summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/tools/clang/lib
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/tools/clang/lib')
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/ARCMT.cpp616
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/ARCMTActions.cpp60
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/FileRemapper.cpp257
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/Internals.h181
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/ObjCMT.cpp2276
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/PlistReporter.cpp126
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransAPIUses.cpp108
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransARCAssign.cpp78
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransAutoreleasePool.cpp435
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransBlockObjCVariable.cpp147
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp251
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransGCAttrs.cpp355
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransGCCalls.cpp77
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransProperties.cpp379
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransProtectedScope.cpp202
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransRetainReleaseDealloc.cpp455
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransUnbridgedCasts.cpp469
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransUnusedInitDelegate.cpp78
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp227
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransformActions.cpp694
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.cpp599
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.h225
-rw-r--r--contrib/llvm/tools/clang/lib/AST/APValue.cpp651
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ASTConsumer.cpp32
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ASTContext.cpp8928
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp1935
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ASTDumper.cpp2406
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp5919
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ASTTypeTraits.cpp158
-rw-r--r--contrib/llvm/tools/clang/lib/AST/AttrImpl.cpp21
-rw-r--r--contrib/llvm/tools/clang/lib/AST/CXXABI.h79
-rw-r--r--contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp688
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Comment.cpp347
-rw-r--r--contrib/llvm/tools/clang/lib/AST/CommentBriefParser.cpp155
-rw-r--r--contrib/llvm/tools/clang/lib/AST/CommentCommandTraits.cpp139
-rw-r--r--contrib/llvm/tools/clang/lib/AST/CommentLexer.cpp851
-rw-r--r--contrib/llvm/tools/clang/lib/AST/CommentParser.cpp776
-rw-r--r--contrib/llvm/tools/clang/lib/AST/CommentSema.cpp1098
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Decl.cpp4204
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclBase.cpp1716
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp2261
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp69
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclGroup.cpp33
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp2158
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclOpenMP.cpp54
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp1360
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp1259
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclarationName.cpp590
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Expr.cpp4034
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp1505
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp688
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp9578
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ExprObjC.cpp360
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ExternalASTSource.cpp130
-rw-r--r--contrib/llvm/tools/clang/lib/AST/InheritViz.cpp160
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ItaniumCXXABI.cpp174
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp4244
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Mangle.cpp273
-rw-r--r--contrib/llvm/tools/clang/lib/AST/MicrosoftCXXABI.cpp272
-rw-r--r--contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp2974
-rw-r--r--contrib/llvm/tools/clang/lib/AST/NSAPI.cpp592
-rw-r--r--contrib/llvm/tools/clang/lib/AST/NestedNameSpecifier.cpp684
-rw-r--r--contrib/llvm/tools/clang/lib/AST/OpenMPClause.cpp417
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ParentMap.cpp198
-rw-r--r--contrib/llvm/tools/clang/lib/AST/RawCommentList.cpp337
-rw-r--r--contrib/llvm/tools/clang/lib/AST/RecordLayout.cpp102
-rw-r--r--contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp3282
-rw-r--r--contrib/llvm/tools/clang/lib/AST/SelectorLocationsKind.cpp128
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Stmt.cpp1110
-rw-r--r--contrib/llvm/tools/clang/lib/AST/StmtCXX.cpp86
-rw-r--r--contrib/llvm/tools/clang/lib/AST/StmtIterator.cpp114
-rw-r--r--contrib/llvm/tools/clang/lib/AST/StmtObjC.cpp73
-rw-r--r--contrib/llvm/tools/clang/lib/AST/StmtOpenMP.cpp884
-rw-r--r--contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp2458
-rw-r--r--contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp1636
-rw-r--r--contrib/llvm/tools/clang/lib/AST/StmtViz.cpp62
-rw-r--r--contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp586
-rw-r--r--contrib/llvm/tools/clang/lib/AST/TemplateName.cpp232
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Type.cpp3784
-rw-r--r--contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp508
-rw-r--r--contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp1625
-rw-r--r--contrib/llvm/tools/clang/lib/AST/VTTBuilder.cpp209
-rw-r--r--contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp3758
-rw-r--r--contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchFinder.cpp995
-rw-r--r--contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchersInternal.cpp344
-rw-r--r--contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp222
-rw-r--r--contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Marshallers.h716
-rw-r--r--contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Parser.cpp613
-rw-r--r--contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Registry.cpp559
-rw-r--r--contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/VariantValue.cpp392
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp604
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/BodyFarm.cpp469
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/BodyFarm.h51
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/CFG.cpp4649
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/CFGReachabilityAnalysis.cpp76
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/CFGStmtMap.cpp91
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/CallGraph.cpp228
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp140
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/CodeInjector.cpp15
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/Consumed.cpp1453
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/Dominators.cpp14
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp907
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/FormatStringParsing.h74
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp611
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/ObjCNoReturn.cpp67
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/PostOrderCFGView.cpp49
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp934
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/ProgramPoint.cpp52
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp227
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/ReachableCode.cpp679
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/ScanfFormatString.cpp553
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp2406
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/ThreadSafetyCommon.cpp938
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/ThreadSafetyLogical.cpp112
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/ThreadSafetyTIL.cpp336
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp925
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Attributes.cpp17
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Builtins.cpp133
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/CharInfo.cpp81
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp1013
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp713
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/DiagnosticOptions.cpp24
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/FileManager.cpp570
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/FileSystemStatCache.cpp126
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp663
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/LangOptions.cpp46
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Module.cpp536
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/ObjCRuntime.cpp93
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/OpenMPKinds.cpp538
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/OperatorPrecedence.cpp76
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/SanitizerBlacklist.cpp46
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Sanitizers.cpp37
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/SourceLocation.cpp142
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp2234
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp639
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Targets.cpp7963
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/TokenKinds.cpp48
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Version.cpp151
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/VersionTuple.cpp100
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/VirtualFileSystem.cpp1560
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Warnings.cpp230
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h116
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/Address.h126
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp716
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGAtomic.cpp1873
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp2339
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h277
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h306
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp7289
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCUDANV.cpp319
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCUDARuntime.cpp55
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCUDARuntime.h65
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp326
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp332
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h572
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp3687
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCall.h178
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp2792
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.cpp1211
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.h642
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp3558
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h587
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp1863
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp605
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp1906
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp4046
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp1538
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp1950
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp1108
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp1607
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp3555
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGLoopInfo.cpp248
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGLoopInfo.h158
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp3103
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp2896
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp7245
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.cpp391
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h312
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGOpenCLRuntime.cpp101
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGOpenCLRuntime.h52
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp4258
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGOpenMPRuntime.h980
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h220
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp860
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp2193
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGStmtOpenMP.cpp2679
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp178
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp958
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h119
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGValue.h595
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenABITypes.cpp70
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp852
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp1939
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h3312
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp3996
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h1245
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenPGO.cpp831
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenPGO.h117
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.cpp319
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.h160
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypeCache.h108
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp760
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h335
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CoverageMappingGen.cpp1050
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CoverageMappingGen.h114
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/EHScopeStack.h420
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp3839
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp4175
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp249
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp300
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/SanitizerMetadata.cpp95
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/SanitizerMetadata.h53
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp7601
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.h224
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Action.cpp165
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Compilation.cpp238
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/CrossWindowsToolChain.cpp122
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Driver.cpp2407
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/DriverOptions.cpp44
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/InputInfo.h89
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Job.cpp306
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/MSVCToolChain.cpp744
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/MinGWToolChain.cpp244
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Multilib.cpp294
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Phases.cpp27
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/SanitizerArgs.cpp730
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Tool.cpp23
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp668
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp4533
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ToolChains.h1146
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Tools.cpp10580
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Tools.h908
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Types.cpp262
-rw-r--r--contrib/llvm/tools/clang/lib/Edit/Commit.cpp346
-rw-r--r--contrib/llvm/tools/clang/lib/Edit/EditedSource.cpp458
-rw-r--r--contrib/llvm/tools/clang/lib/Edit/RewriteObjCFoundationAPI.cpp1170
-rw-r--r--contrib/llvm/tools/clang/lib/Format/BreakableToken.cpp460
-rw-r--r--contrib/llvm/tools/clang/lib/Format/BreakableToken.h245
-rw-r--r--contrib/llvm/tools/clang/lib/Format/ContinuationIndenter.cpp1215
-rw-r--r--contrib/llvm/tools/clang/lib/Format/ContinuationIndenter.h380
-rw-r--r--contrib/llvm/tools/clang/lib/Format/Encoding.h146
-rw-r--r--contrib/llvm/tools/clang/lib/Format/Format.cpp2041
-rw-r--r--contrib/llvm/tools/clang/lib/Format/FormatToken.cpp300
-rw-r--r--contrib/llvm/tools/clang/lib/Format/FormatToken.h621
-rw-r--r--contrib/llvm/tools/clang/lib/Format/TokenAnnotator.cpp2412
-rw-r--r--contrib/llvm/tools/clang/lib/Format/TokenAnnotator.h182
-rw-r--r--contrib/llvm/tools/clang/lib/Format/UnwrappedLineFormatter.cpp965
-rw-r--r--contrib/llvm/tools/clang/lib/Format/UnwrappedLineFormatter.h73
-rw-r--r--contrib/llvm/tools/clang/lib/Format/UnwrappedLineParser.cpp1938
-rw-r--r--contrib/llvm/tools/clang/lib/Format/UnwrappedLineParser.h220
-rw-r--r--contrib/llvm/tools/clang/lib/Format/WhitespaceManager.cpp543
-rw-r--r--contrib/llvm/tools/clang/lib/Format/WhitespaceManager.h210
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp489
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp114
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp2856
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/CacheTokens.cpp694
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/ChainedDiagnosticConsumer.cpp14
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/ChainedIncludesSource.cpp298
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/CodeGenOptions.cpp32
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp1732
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp2319
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp104
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/DependencyFile.cpp476
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/DependencyGraph.cpp138
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/DiagnosticRenderer.cpp667
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp592
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp749
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/FrontendOptions.cpp32
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/HeaderIncludeGen.cpp154
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp675
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp1003
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/LangStandards.cpp43
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/LayoutOverrideSource.cpp208
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/LogDiagnosticPrinter.cpp165
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/ModuleDependencyCollector.cpp94
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/MultiplexConsumer.cpp362
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/PCHContainerOperations.cpp66
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp784
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/Rewrite/FixItRewriter.cpp203
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/Rewrite/FrontendActions.cpp197
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/Rewrite/HTMLPrint.cpp95
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp603
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/Rewrite/RewriteMacros.cpp217
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp7708
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/Rewrite/RewriteObjC.cpp5913
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/Rewrite/RewriteTest.cpp39
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp886
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticReader.cpp295
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/TestModuleFileExtension.cpp123
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/TestModuleFileExtension.h72
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/TextDiagnostic.cpp1261
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticBuffer.cpp63
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp161
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp921
-rw-r--r--contrib/llvm/tools/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp226
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/Intrin.h958
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/__clang_cuda_runtime_wrapper.h216
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/__stddef_max_align_t.h43
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/__wmmintrin_aes.h66
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/__wmmintrin_pclmul.h30
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/adxintrin.h86
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/altivec.h13919
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/ammintrin.h209
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/arm_acle.h308
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/avx2intrin.h1215
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/avx512bwintrin.h1542
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/avx512cdintrin.h131
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/avx512dqintrin.h778
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/avx512erintrin.h285
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/avx512fintrin.h3074
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/avx512vlbwintrin.h2336
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/avx512vldqintrin.h953
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/avx512vlintrin.h4606
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/avxintrin.h1318
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/bmi2intrin.h95
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/bmiintrin.h155
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/cpuid.h209
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/cuda_builtin_vars.h110
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/emmintrin.h1491
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/f16cintrin.h45
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/float.h124
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/fma4intrin.h230
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/fmaintrin.h228
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/fxsrintrin.h55
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/htmintrin.h226
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/htmxlintrin.h363
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/ia32intrin.h77
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/immintrin.h174
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/inttypes.h102
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/iso646.h43
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/limits.h118
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/lzcntintrin.h68
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/mm3dnow.h171
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/mm_malloc.h75
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/mmintrin.h503
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/module.modulemap171
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/nmmintrin.h30
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/pkuintrin.h48
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/pmmintrin.h116
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/popcntintrin.h58
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/prfchwintrin.h45
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/rdseedintrin.h56
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/rtmintrin.h59
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/s390intrin.h39
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/shaintrin.h75
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/smmintrin.h508
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/stdalign.h35
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/stdarg.h52
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/stdatomic.h190
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/stdbool.h44
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/stddef.h137
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/stdint.h707
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/stdnoreturn.h30
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/tbmintrin.h154
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/tgmath.h1374
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/tmmintrin.h221
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/unwind.h282
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/vadefs.h65
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/varargs.h26
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/vecintrin.h8946
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/wmmintrin.h33
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/x86intrin.h57
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/xmmintrin.h1010
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/xopintrin.h782
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/xsavecintrin.h48
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/xsaveintrin.h58
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/xsaveoptintrin.h48
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/xsavesintrin.h58
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/xtestintrin.h41
-rw-r--r--contrib/llvm/tools/clang/lib/Index/CommentToXML.cpp1163
-rw-r--r--contrib/llvm/tools/clang/lib/Index/SimpleFormatContext.h75
-rw-r--r--contrib/llvm/tools/clang/lib/Index/USRGeneration.cpp878
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/HeaderMap.cpp237
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp1423
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/Lexer.cpp3650
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp1685
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/MacroArgs.cpp313
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp245
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp2460
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPCaching.cpp118
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPCallbacks.cpp14
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPConditionalDirectiveRecord.cpp122
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp2567
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPExpressions.cpp798
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp756
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp1796
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp728
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/Pragma.cpp1490
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp488
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp913
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PreprocessorLexer.cpp58
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/ScratchBuffer.cpp76
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/TokenConcatenation.cpp287
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp853
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/UnicodeCharSets.h408
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseAST.cpp179
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp1215
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp6327
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp3942
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp2838
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp3153
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp544
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp3614
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseOpenMP.cpp1068
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp2130
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp2190
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseStmtAsm.cpp819
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp1416
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseTentative.cpp1825
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/Parser.cpp2102
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h447
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/DeltaTree.cpp464
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/HTMLRewrite.cpp582
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/RewriteRope.cpp802
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp458
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/TokenRewriter.cpp99
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp2110
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/AttributeList.cpp225
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp638
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp1257
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/DelayedDiagnostic.cpp72
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/IdentifierResolver.cpp422
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp855
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/MultiplexExternalSemaSource.cpp311
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/Scope.cpp224
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/ScopeInfo.cpp240
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/Sema.cpp1527
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp1913
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp619
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaCUDA.cpp424
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp1062
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaCast.cpp2506
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp9908
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp7518
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaConsumer.cpp14
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaCoroutine.cpp448
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp14810
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp5950
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp13837
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp4634
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp1197
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp14594
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp6856
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExprMember.cpp1774
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp4300
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaFixItUtils.cpp222
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp7689
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaLambda.cpp1703
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp4990
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp2451
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaOpenMP.cpp8544
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp12920
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaPseudoObject.cpp1664
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp3933
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaStmtAsm.cpp780
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaStmtAttr.cpp241
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp8466
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp5074
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp2868
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp4726
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp1038
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaType.cpp7029
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/TreeTransform.h11603
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/TypeLocBuilder.cpp136
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/TypeLocBuilder.h151
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTCommon.cpp369
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTCommon.h112
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp8687
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp3816
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTReaderInternals.h296
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp3378
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp5821
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp2187
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp2411
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/GeneratePCH.cpp65
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/GlobalModuleIndex.cpp889
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/Module.cpp115
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ModuleFileExtension.cpp22
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ModuleManager.cpp476
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/MultiOnDiskHashTable.h330
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.cpp24
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.h31
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp140
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp93
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp306
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp1302
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp157
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp102
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp2165
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp191
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp598
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp144
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp75
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp248
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp140
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp778
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp93
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp322
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td647
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp157
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangCheckers.cpp32
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangSACheckers.h37
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp479
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp247
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp301
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp227
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp92
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp213
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp940
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp190
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp68
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp731
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp512
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h24
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp753
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp317
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp1201
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp623
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp128
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp2744
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp337
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp252
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp81
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp324
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp145
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp223
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp1066
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp94
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp174
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp175
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp263
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp439
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp188
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp314
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp70
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp77
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp334
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp4029
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp92
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp123
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/SelectorExtras.h68
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp287
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp253
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp424
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp62
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp265
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp107
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp110
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp104
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp101
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp64
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp96
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp382
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp252
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp185
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VforkChecker.cpp218
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp246
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/APSIntType.cpp49
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp58
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp346
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp292
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BlockCounter.cpp85
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp3553
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp1643
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CallEvent.cpp1038
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Checker.cpp38
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp94
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp96
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp745
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerRegistry.cpp177
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CommonBugCategories.cpp20
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ConstraintManager.cpp39
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp730
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/DynamicTypeMap.cpp51
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Environment.cpp213
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp443
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp2749
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp1001
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp623
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp1009
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp262
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/FunctionSummary.cpp32
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp634
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/IssueHash.cpp196
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/LoopWidening.cpp68
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp1508
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp1183
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp497
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PrettyStackTraceLocationContext.h45
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ProgramState.cpp754
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp697
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp2469
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp556
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SVals.cpp322
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp334
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h121
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp945
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Store.cpp510
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SubEngine.cpp14
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp560
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp813
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp139
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/FrontendActions.cpp28
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/ModelConsumer.cpp42
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/ModelInjector.cpp117
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/ModelInjector.h74
-rw-r--r--contrib/llvm/tools/clang/lib/Tooling/ArgumentsAdjusters.cpp86
-rw-r--r--contrib/llvm/tools/clang/lib/Tooling/CommonOptionsParser.cpp149
-rw-r--r--contrib/llvm/tools/clang/lib/Tooling/CompilationDatabase.cpp333
-rw-r--r--contrib/llvm/tools/clang/lib/Tooling/Core/Lookup.cpp113
-rw-r--r--contrib/llvm/tools/clang/lib/Tooling/Core/Replacement.cpp419
-rw-r--r--contrib/llvm/tools/clang/lib/Tooling/FileMatchTrie.cpp189
-rw-r--r--contrib/llvm/tools/clang/lib/Tooling/JSONCompilationDatabase.cpp345
-rw-r--r--contrib/llvm/tools/clang/lib/Tooling/Refactoring.cpp65
-rw-r--r--contrib/llvm/tools/clang/lib/Tooling/RefactoringCallbacks.cpp81
-rw-r--r--contrib/llvm/tools/clang/lib/Tooling/Tooling.cpp507
616 files changed, 695766 insertions, 0 deletions
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMT.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMT.cpp
new file mode 100644
index 0000000..8c04c83
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMT.cpp
@@ -0,0 +1,616 @@
+//===--- ARCMT.cpp - Migration to ARC mode --------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Internals.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/Basic/DiagnosticCategories.h"
+#include "clang/Frontend/ASTUnit.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendAction.h"
+#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "clang/Frontend/Utils.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Rewrite/Core/Rewriter.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Serialization/ASTReader.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/MemoryBuffer.h"
+using namespace clang;
+using namespace arcmt;
+
+bool CapturedDiagList::clearDiagnostic(ArrayRef<unsigned> IDs,
+ SourceRange range) {
+ if (range.isInvalid())
+ return false;
+
+ bool cleared = false;
+ ListTy::iterator I = List.begin();
+ while (I != List.end()) {
+ FullSourceLoc diagLoc = I->getLocation();
+ if ((IDs.empty() || // empty means clear all diagnostics in the range.
+ std::find(IDs.begin(), IDs.end(), I->getID()) != IDs.end()) &&
+ !diagLoc.isBeforeInTranslationUnitThan(range.getBegin()) &&
+ (diagLoc == range.getEnd() ||
+ diagLoc.isBeforeInTranslationUnitThan(range.getEnd()))) {
+ cleared = true;
+ ListTy::iterator eraseS = I++;
+ if (eraseS->getLevel() != DiagnosticsEngine::Note)
+ while (I != List.end() && I->getLevel() == DiagnosticsEngine::Note)
+ ++I;
+ // Clear the diagnostic and any notes following it.
+ I = List.erase(eraseS, I);
+ continue;
+ }
+
+ ++I;
+ }
+
+ return cleared;
+}
+
+bool CapturedDiagList::hasDiagnostic(ArrayRef<unsigned> IDs,
+ SourceRange range) const {
+ if (range.isInvalid())
+ return false;
+
+ ListTy::const_iterator I = List.begin();
+ while (I != List.end()) {
+ FullSourceLoc diagLoc = I->getLocation();
+ if ((IDs.empty() || // empty means any diagnostic in the range.
+ std::find(IDs.begin(), IDs.end(), I->getID()) != IDs.end()) &&
+ !diagLoc.isBeforeInTranslationUnitThan(range.getBegin()) &&
+ (diagLoc == range.getEnd() ||
+ diagLoc.isBeforeInTranslationUnitThan(range.getEnd()))) {
+ return true;
+ }
+
+ ++I;
+ }
+
+ return false;
+}
+
+void CapturedDiagList::reportDiagnostics(DiagnosticsEngine &Diags) const {
+ for (ListTy::const_iterator I = List.begin(), E = List.end(); I != E; ++I)
+ Diags.Report(*I);
+}
+
+bool CapturedDiagList::hasErrors() const {
+ for (ListTy::const_iterator I = List.begin(), E = List.end(); I != E; ++I)
+ if (I->getLevel() >= DiagnosticsEngine::Error)
+ return true;
+
+ return false;
+}
+
+namespace {
+
+class CaptureDiagnosticConsumer : public DiagnosticConsumer {
+ DiagnosticsEngine &Diags;
+ DiagnosticConsumer &DiagClient;
+ CapturedDiagList &CapturedDiags;
+ bool HasBegunSourceFile;
+public:
+ CaptureDiagnosticConsumer(DiagnosticsEngine &diags,
+ DiagnosticConsumer &client,
+ CapturedDiagList &capturedDiags)
+ : Diags(diags), DiagClient(client), CapturedDiags(capturedDiags),
+ HasBegunSourceFile(false) { }
+
+ void BeginSourceFile(const LangOptions &Opts,
+ const Preprocessor *PP) override {
+ // Pass BeginSourceFile message onto DiagClient on first call.
+ // The corresponding EndSourceFile call will be made from an
+ // explicit call to FinishCapture.
+ if (!HasBegunSourceFile) {
+ DiagClient.BeginSourceFile(Opts, PP);
+ HasBegunSourceFile = true;
+ }
+ }
+
+ void FinishCapture() {
+ // Call EndSourceFile on DiagClient on completion of capture to
+ // enable VerifyDiagnosticConsumer to check diagnostics *after*
+ // it has received the diagnostic list.
+ if (HasBegunSourceFile) {
+ DiagClient.EndSourceFile();
+ HasBegunSourceFile = false;
+ }
+ }
+
+ ~CaptureDiagnosticConsumer() override {
+ assert(!HasBegunSourceFile && "FinishCapture not called!");
+ }
+
+ void HandleDiagnostic(DiagnosticsEngine::Level level,
+ const Diagnostic &Info) override {
+ if (DiagnosticIDs::isARCDiagnostic(Info.getID()) ||
+ level >= DiagnosticsEngine::Error || level == DiagnosticsEngine::Note) {
+ if (Info.getLocation().isValid())
+ CapturedDiags.push_back(StoredDiagnostic(level, Info));
+ return;
+ }
+
+ // Non-ARC warnings are ignored.
+ Diags.setLastDiagnosticIgnored();
+ }
+};
+
+} // end anonymous namespace
+
+static bool HasARCRuntime(CompilerInvocation &origCI) {
+ // This duplicates some functionality from Darwin::AddDeploymentTarget
+ // but this function is well defined, so keep it decoupled from the driver
+ // and avoid unrelated complications.
+ llvm::Triple triple(origCI.getTargetOpts().Triple);
+
+ if (triple.isiOS())
+ return triple.getOSMajorVersion() >= 5;
+
+ if (triple.isWatchOS())
+ return true;
+
+ if (triple.getOS() == llvm::Triple::Darwin)
+ return triple.getOSMajorVersion() >= 11;
+
+ if (triple.getOS() == llvm::Triple::MacOSX) {
+ unsigned Major, Minor, Micro;
+ triple.getOSVersion(Major, Minor, Micro);
+ return Major > 10 || (Major == 10 && Minor >= 7);
+ }
+
+ return false;
+}
+
+static CompilerInvocation *
+createInvocationForMigration(CompilerInvocation &origCI,
+ const PCHContainerReader &PCHContainerRdr) {
+ std::unique_ptr<CompilerInvocation> CInvok;
+ CInvok.reset(new CompilerInvocation(origCI));
+ PreprocessorOptions &PPOpts = CInvok->getPreprocessorOpts();
+ if (!PPOpts.ImplicitPCHInclude.empty()) {
+ // We can't use a PCH because it was likely built in non-ARC mode and we
+ // want to parse in ARC. Include the original header.
+ FileManager FileMgr(origCI.getFileSystemOpts());
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ new DiagnosticsEngine(DiagID, &origCI.getDiagnosticOpts(),
+ new IgnoringDiagConsumer()));
+ std::string OriginalFile = ASTReader::getOriginalSourceFile(
+ PPOpts.ImplicitPCHInclude, FileMgr, PCHContainerRdr, *Diags);
+ if (!OriginalFile.empty())
+ PPOpts.Includes.insert(PPOpts.Includes.begin(), OriginalFile);
+ PPOpts.ImplicitPCHInclude.clear();
+ }
+ // FIXME: Get the original header of a PTH as well.
+ CInvok->getPreprocessorOpts().ImplicitPTHInclude.clear();
+ std::string define = getARCMTMacroName();
+ define += '=';
+ CInvok->getPreprocessorOpts().addMacroDef(define);
+ CInvok->getLangOpts()->ObjCAutoRefCount = true;
+ CInvok->getLangOpts()->setGC(LangOptions::NonGC);
+ CInvok->getDiagnosticOpts().ErrorLimit = 0;
+ CInvok->getDiagnosticOpts().PedanticErrors = 0;
+
+ // Ignore -Werror flags when migrating.
+ std::vector<std::string> WarnOpts;
+ for (std::vector<std::string>::iterator
+ I = CInvok->getDiagnosticOpts().Warnings.begin(),
+ E = CInvok->getDiagnosticOpts().Warnings.end(); I != E; ++I) {
+ if (!StringRef(*I).startswith("error"))
+ WarnOpts.push_back(*I);
+ }
+ WarnOpts.push_back("error=arc-unsafe-retained-assign");
+ CInvok->getDiagnosticOpts().Warnings = std::move(WarnOpts);
+
+ CInvok->getLangOpts()->ObjCWeakRuntime = HasARCRuntime(origCI);
+ CInvok->getLangOpts()->ObjCWeak = CInvok->getLangOpts()->ObjCWeakRuntime;
+
+ return CInvok.release();
+}
+
+static void emitPremigrationErrors(const CapturedDiagList &arcDiags,
+ DiagnosticOptions *diagOpts,
+ Preprocessor &PP) {
+ TextDiagnosticPrinter printer(llvm::errs(), diagOpts);
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ new DiagnosticsEngine(DiagID, diagOpts, &printer,
+ /*ShouldOwnClient=*/false));
+ Diags->setSourceManager(&PP.getSourceManager());
+
+ printer.BeginSourceFile(PP.getLangOpts(), &PP);
+ arcDiags.reportDiagnostics(*Diags);
+ printer.EndSourceFile();
+}
+
+//===----------------------------------------------------------------------===//
+// checkForManualIssues.
+//===----------------------------------------------------------------------===//
+
+bool arcmt::checkForManualIssues(
+ CompilerInvocation &origCI, const FrontendInputFile &Input,
+ std::shared_ptr<PCHContainerOperations> PCHContainerOps,
+ DiagnosticConsumer *DiagClient, bool emitPremigrationARCErrors,
+ StringRef plistOut) {
+ if (!origCI.getLangOpts()->ObjC1)
+ return false;
+
+ LangOptions::GCMode OrigGCMode = origCI.getLangOpts()->getGC();
+ bool NoNSAllocReallocError = origCI.getMigratorOpts().NoNSAllocReallocError;
+ bool NoFinalizeRemoval = origCI.getMigratorOpts().NoFinalizeRemoval;
+
+ std::vector<TransformFn> transforms = arcmt::getAllTransformations(OrigGCMode,
+ NoFinalizeRemoval);
+ assert(!transforms.empty());
+
+ std::unique_ptr<CompilerInvocation> CInvok;
+ CInvok.reset(
+ createInvocationForMigration(origCI, PCHContainerOps->getRawReader()));
+ CInvok->getFrontendOpts().Inputs.clear();
+ CInvok->getFrontendOpts().Inputs.push_back(Input);
+
+ CapturedDiagList capturedDiags;
+
+ assert(DiagClient);
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ new DiagnosticsEngine(DiagID, &origCI.getDiagnosticOpts(),
+ DiagClient, /*ShouldOwnClient=*/false));
+
+ // Filter of all diagnostics.
+ CaptureDiagnosticConsumer errRec(*Diags, *DiagClient, capturedDiags);
+ Diags->setClient(&errRec, /*ShouldOwnClient=*/false);
+
+ std::unique_ptr<ASTUnit> Unit(ASTUnit::LoadFromCompilerInvocationAction(
+ CInvok.release(), PCHContainerOps, Diags));
+ if (!Unit) {
+ errRec.FinishCapture();
+ return true;
+ }
+
+ // Don't filter diagnostics anymore.
+ Diags->setClient(DiagClient, /*ShouldOwnClient=*/false);
+
+ ASTContext &Ctx = Unit->getASTContext();
+
+ if (Diags->hasFatalErrorOccurred()) {
+ Diags->Reset();
+ DiagClient->BeginSourceFile(Ctx.getLangOpts(), &Unit->getPreprocessor());
+ capturedDiags.reportDiagnostics(*Diags);
+ DiagClient->EndSourceFile();
+ errRec.FinishCapture();
+ return true;
+ }
+
+ if (emitPremigrationARCErrors)
+ emitPremigrationErrors(capturedDiags, &origCI.getDiagnosticOpts(),
+ Unit->getPreprocessor());
+ if (!plistOut.empty()) {
+ SmallVector<StoredDiagnostic, 8> arcDiags;
+ for (CapturedDiagList::iterator
+ I = capturedDiags.begin(), E = capturedDiags.end(); I != E; ++I)
+ arcDiags.push_back(*I);
+ writeARCDiagsToPlist(plistOut, arcDiags,
+ Ctx.getSourceManager(), Ctx.getLangOpts());
+ }
+
+ // After parsing of source files ended, we want to reuse the
+ // diagnostics objects to emit further diagnostics.
+ // We call BeginSourceFile because DiagnosticConsumer requires that
+ // diagnostics with source range information are emitted only in between
+ // BeginSourceFile() and EndSourceFile().
+ DiagClient->BeginSourceFile(Ctx.getLangOpts(), &Unit->getPreprocessor());
+
+ // No macros will be added since we are just checking and we won't modify
+ // source code.
+ std::vector<SourceLocation> ARCMTMacroLocs;
+
+ TransformActions testAct(*Diags, capturedDiags, Ctx, Unit->getPreprocessor());
+ MigrationPass pass(Ctx, OrigGCMode, Unit->getSema(), testAct, capturedDiags,
+ ARCMTMacroLocs);
+ pass.setNoFinalizeRemoval(NoFinalizeRemoval);
+ if (!NoNSAllocReallocError)
+ Diags->setSeverity(diag::warn_arcmt_nsalloc_realloc, diag::Severity::Error,
+ SourceLocation());
+
+ for (unsigned i=0, e = transforms.size(); i != e; ++i)
+ transforms[i](pass);
+
+ capturedDiags.reportDiagnostics(*Diags);
+
+ DiagClient->EndSourceFile();
+ errRec.FinishCapture();
+
+ return capturedDiags.hasErrors() || testAct.hasReportedErrors();
+}
+
+//===----------------------------------------------------------------------===//
+// applyTransformations.
+//===----------------------------------------------------------------------===//
+
+static bool
+applyTransforms(CompilerInvocation &origCI, const FrontendInputFile &Input,
+ std::shared_ptr<PCHContainerOperations> PCHContainerOps,
+ DiagnosticConsumer *DiagClient, StringRef outputDir,
+ bool emitPremigrationARCErrors, StringRef plistOut) {
+ if (!origCI.getLangOpts()->ObjC1)
+ return false;
+
+ LangOptions::GCMode OrigGCMode = origCI.getLangOpts()->getGC();
+
+ // Make sure checking is successful first.
+ CompilerInvocation CInvokForCheck(origCI);
+ if (arcmt::checkForManualIssues(CInvokForCheck, Input, PCHContainerOps,
+ DiagClient, emitPremigrationARCErrors,
+ plistOut))
+ return true;
+
+ CompilerInvocation CInvok(origCI);
+ CInvok.getFrontendOpts().Inputs.clear();
+ CInvok.getFrontendOpts().Inputs.push_back(Input);
+
+ MigrationProcess migration(CInvok, PCHContainerOps, DiagClient, outputDir);
+ bool NoFinalizeRemoval = origCI.getMigratorOpts().NoFinalizeRemoval;
+
+ std::vector<TransformFn> transforms = arcmt::getAllTransformations(OrigGCMode,
+ NoFinalizeRemoval);
+ assert(!transforms.empty());
+
+ for (unsigned i=0, e = transforms.size(); i != e; ++i) {
+ bool err = migration.applyTransform(transforms[i]);
+ if (err) return true;
+ }
+
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ new DiagnosticsEngine(DiagID, &origCI.getDiagnosticOpts(),
+ DiagClient, /*ShouldOwnClient=*/false));
+
+ if (outputDir.empty()) {
+ origCI.getLangOpts()->ObjCAutoRefCount = true;
+ return migration.getRemapper().overwriteOriginal(*Diags);
+ } else {
+ return migration.getRemapper().flushToDisk(outputDir, *Diags);
+ }
+}
+
+bool arcmt::applyTransformations(
+ CompilerInvocation &origCI, const FrontendInputFile &Input,
+ std::shared_ptr<PCHContainerOperations> PCHContainerOps,
+ DiagnosticConsumer *DiagClient) {
+ return applyTransforms(origCI, Input, PCHContainerOps, DiagClient,
+ StringRef(), false, StringRef());
+}
+
+bool arcmt::migrateWithTemporaryFiles(
+ CompilerInvocation &origCI, const FrontendInputFile &Input,
+ std::shared_ptr<PCHContainerOperations> PCHContainerOps,
+ DiagnosticConsumer *DiagClient, StringRef outputDir,
+ bool emitPremigrationARCErrors, StringRef plistOut) {
+ assert(!outputDir.empty() && "Expected output directory path");
+ return applyTransforms(origCI, Input, PCHContainerOps, DiagClient, outputDir,
+ emitPremigrationARCErrors, plistOut);
+}
+
+bool arcmt::getFileRemappings(std::vector<std::pair<std::string,std::string> > &
+ remap,
+ StringRef outputDir,
+ DiagnosticConsumer *DiagClient) {
+ assert(!outputDir.empty());
+
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ new DiagnosticsEngine(DiagID, new DiagnosticOptions,
+ DiagClient, /*ShouldOwnClient=*/false));
+
+ FileRemapper remapper;
+ bool err = remapper.initFromDisk(outputDir, *Diags,
+ /*ignoreIfFilesChanged=*/true);
+ if (err)
+ return true;
+
+ PreprocessorOptions PPOpts;
+ remapper.applyMappings(PPOpts);
+ remap = PPOpts.RemappedFiles;
+
+ return false;
+}
+
+
+//===----------------------------------------------------------------------===//
+// CollectTransformActions.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class ARCMTMacroTrackerPPCallbacks : public PPCallbacks {
+ std::vector<SourceLocation> &ARCMTMacroLocs;
+
+public:
+ ARCMTMacroTrackerPPCallbacks(std::vector<SourceLocation> &ARCMTMacroLocs)
+ : ARCMTMacroLocs(ARCMTMacroLocs) { }
+
+ void MacroExpands(const Token &MacroNameTok, const MacroDefinition &MD,
+ SourceRange Range, const MacroArgs *Args) override {
+ if (MacroNameTok.getIdentifierInfo()->getName() == getARCMTMacroName())
+ ARCMTMacroLocs.push_back(MacroNameTok.getLocation());
+ }
+};
+
+class ARCMTMacroTrackerAction : public ASTFrontendAction {
+ std::vector<SourceLocation> &ARCMTMacroLocs;
+
+public:
+ ARCMTMacroTrackerAction(std::vector<SourceLocation> &ARCMTMacroLocs)
+ : ARCMTMacroLocs(ARCMTMacroLocs) { }
+
+ std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) override {
+ CI.getPreprocessor().addPPCallbacks(
+ llvm::make_unique<ARCMTMacroTrackerPPCallbacks>(ARCMTMacroLocs));
+ return llvm::make_unique<ASTConsumer>();
+ }
+};
+
+class RewritesApplicator : public TransformActions::RewriteReceiver {
+ Rewriter &rewriter;
+ MigrationProcess::RewriteListener *Listener;
+
+public:
+ RewritesApplicator(Rewriter &rewriter, ASTContext &ctx,
+ MigrationProcess::RewriteListener *listener)
+ : rewriter(rewriter), Listener(listener) {
+ if (Listener)
+ Listener->start(ctx);
+ }
+ ~RewritesApplicator() override {
+ if (Listener)
+ Listener->finish();
+ }
+
+ void insert(SourceLocation loc, StringRef text) override {
+ bool err = rewriter.InsertText(loc, text, /*InsertAfter=*/true,
+ /*indentNewLines=*/true);
+ if (!err && Listener)
+ Listener->insert(loc, text);
+ }
+
+ void remove(CharSourceRange range) override {
+ Rewriter::RewriteOptions removeOpts;
+ removeOpts.IncludeInsertsAtBeginOfRange = false;
+ removeOpts.IncludeInsertsAtEndOfRange = false;
+ removeOpts.RemoveLineIfEmpty = true;
+
+ bool err = rewriter.RemoveText(range, removeOpts);
+ if (!err && Listener)
+ Listener->remove(range);
+ }
+
+ void increaseIndentation(CharSourceRange range,
+ SourceLocation parentIndent) override {
+ rewriter.IncreaseIndentation(range, parentIndent);
+ }
+};
+
+} // end anonymous namespace.
+
+/// \brief Anchor for VTable.
+MigrationProcess::RewriteListener::~RewriteListener() { }
+
+MigrationProcess::MigrationProcess(
+ const CompilerInvocation &CI,
+ std::shared_ptr<PCHContainerOperations> PCHContainerOps,
+ DiagnosticConsumer *diagClient, StringRef outputDir)
+ : OrigCI(CI), PCHContainerOps(PCHContainerOps), DiagClient(diagClient),
+ HadARCErrors(false) {
+ if (!outputDir.empty()) {
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ new DiagnosticsEngine(DiagID, &CI.getDiagnosticOpts(),
+ DiagClient, /*ShouldOwnClient=*/false));
+ Remapper.initFromDisk(outputDir, *Diags, /*ignoreIfFilesChanges=*/true);
+ }
+}
+
+bool MigrationProcess::applyTransform(TransformFn trans,
+ RewriteListener *listener) {
+ std::unique_ptr<CompilerInvocation> CInvok;
+ CInvok.reset(
+ createInvocationForMigration(OrigCI, PCHContainerOps->getRawReader()));
+ CInvok->getDiagnosticOpts().IgnoreWarnings = true;
+
+ Remapper.applyMappings(CInvok->getPreprocessorOpts());
+
+ CapturedDiagList capturedDiags;
+ std::vector<SourceLocation> ARCMTMacroLocs;
+
+ assert(DiagClient);
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ new DiagnosticsEngine(DiagID, new DiagnosticOptions,
+ DiagClient, /*ShouldOwnClient=*/false));
+
+ // Filter of all diagnostics.
+ CaptureDiagnosticConsumer errRec(*Diags, *DiagClient, capturedDiags);
+ Diags->setClient(&errRec, /*ShouldOwnClient=*/false);
+
+ std::unique_ptr<ARCMTMacroTrackerAction> ASTAction;
+ ASTAction.reset(new ARCMTMacroTrackerAction(ARCMTMacroLocs));
+
+ std::unique_ptr<ASTUnit> Unit(ASTUnit::LoadFromCompilerInvocationAction(
+ CInvok.release(), PCHContainerOps, Diags, ASTAction.get()));
+ if (!Unit) {
+ errRec.FinishCapture();
+ return true;
+ }
+ Unit->setOwnsRemappedFileBuffers(false); // FileRemapper manages that.
+
+ HadARCErrors = HadARCErrors || capturedDiags.hasErrors();
+
+ // Don't filter diagnostics anymore.
+ Diags->setClient(DiagClient, /*ShouldOwnClient=*/false);
+
+ ASTContext &Ctx = Unit->getASTContext();
+
+ if (Diags->hasFatalErrorOccurred()) {
+ Diags->Reset();
+ DiagClient->BeginSourceFile(Ctx.getLangOpts(), &Unit->getPreprocessor());
+ capturedDiags.reportDiagnostics(*Diags);
+ DiagClient->EndSourceFile();
+ errRec.FinishCapture();
+ return true;
+ }
+
+ // After parsing of source files ended, we want to reuse the
+ // diagnostics objects to emit further diagnostics.
+ // We call BeginSourceFile because DiagnosticConsumer requires that
+ // diagnostics with source range information are emitted only in between
+ // BeginSourceFile() and EndSourceFile().
+ DiagClient->BeginSourceFile(Ctx.getLangOpts(), &Unit->getPreprocessor());
+
+ Rewriter rewriter(Ctx.getSourceManager(), Ctx.getLangOpts());
+ TransformActions TA(*Diags, capturedDiags, Ctx, Unit->getPreprocessor());
+ MigrationPass pass(Ctx, OrigCI.getLangOpts()->getGC(),
+ Unit->getSema(), TA, capturedDiags, ARCMTMacroLocs);
+
+ trans(pass);
+
+ {
+ RewritesApplicator applicator(rewriter, Ctx, listener);
+ TA.applyRewrites(applicator);
+ }
+
+ DiagClient->EndSourceFile();
+ errRec.FinishCapture();
+
+ if (DiagClient->getNumErrors())
+ return true;
+
+ for (Rewriter::buffer_iterator
+ I = rewriter.buffer_begin(), E = rewriter.buffer_end(); I != E; ++I) {
+ FileID FID = I->first;
+ RewriteBuffer &buf = I->second;
+ const FileEntry *file = Ctx.getSourceManager().getFileEntryForID(FID);
+ assert(file);
+ std::string newFname = file->getName();
+ newFname += "-trans";
+ SmallString<512> newText;
+ llvm::raw_svector_ostream vecOS(newText);
+ buf.write(vecOS);
+ std::unique_ptr<llvm::MemoryBuffer> memBuf(
+ llvm::MemoryBuffer::getMemBufferCopy(
+ StringRef(newText.data(), newText.size()), newFname));
+ SmallString<64> filePath(file->getName());
+ Unit->getFileManager().FixupRelativePath(filePath);
+ Remapper.remap(filePath.str(), std::move(memBuf));
+ }
+
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMTActions.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMTActions.cpp
new file mode 100644
index 0000000..39a922f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMTActions.cpp
@@ -0,0 +1,60 @@
+//===--- ARCMTActions.cpp - ARC Migrate Tool Frontend Actions ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/ARCMigrate/ARCMTActions.h"
+#include "clang/ARCMigrate/ARCMT.h"
+#include "clang/Frontend/CompilerInstance.h"
+
+using namespace clang;
+using namespace arcmt;
+
+bool CheckAction::BeginInvocation(CompilerInstance &CI) {
+ if (arcmt::checkForManualIssues(CI.getInvocation(), getCurrentInput(),
+ CI.getPCHContainerOperations(),
+ CI.getDiagnostics().getClient()))
+ return false; // errors, stop the action.
+
+ // We only want to see warnings reported from arcmt::checkForManualIssues.
+ CI.getDiagnostics().setIgnoreAllWarnings(true);
+ return true;
+}
+
+CheckAction::CheckAction(FrontendAction *WrappedAction)
+ : WrapperFrontendAction(WrappedAction) {}
+
+bool ModifyAction::BeginInvocation(CompilerInstance &CI) {
+ return !arcmt::applyTransformations(CI.getInvocation(), getCurrentInput(),
+ CI.getPCHContainerOperations(),
+ CI.getDiagnostics().getClient());
+}
+
+ModifyAction::ModifyAction(FrontendAction *WrappedAction)
+ : WrapperFrontendAction(WrappedAction) {}
+
+bool MigrateAction::BeginInvocation(CompilerInstance &CI) {
+ if (arcmt::migrateWithTemporaryFiles(
+ CI.getInvocation(), getCurrentInput(), CI.getPCHContainerOperations(),
+ CI.getDiagnostics().getClient(), MigrateDir, EmitPremigrationARCErros,
+ PlistOut))
+ return false; // errors, stop the action.
+
+ // We only want to see diagnostics emitted by migrateWithTemporaryFiles.
+ CI.getDiagnostics().setIgnoreAllWarnings(true);
+ return true;
+}
+
+MigrateAction::MigrateAction(FrontendAction *WrappedAction,
+ StringRef migrateDir,
+ StringRef plistOut,
+ bool emitPremigrationARCErrors)
+ : WrapperFrontendAction(WrappedAction), MigrateDir(migrateDir),
+ PlistOut(plistOut), EmitPremigrationARCErros(emitPremigrationARCErrors) {
+ if (MigrateDir.empty())
+ MigrateDir = "."; // user current directory if none is given.
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/FileRemapper.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/FileRemapper.cpp
new file mode 100644
index 0000000..2cf2069
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/FileRemapper.cpp
@@ -0,0 +1,257 @@
+//===--- FileRemapper.cpp - File Remapping Helper -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/ARCMigrate/FileRemapper.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Lex/PreprocessorOptions.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+#include <fstream>
+
+using namespace clang;
+using namespace arcmt;
+
+FileRemapper::FileRemapper() {
+ FileMgr.reset(new FileManager(FileSystemOptions()));
+}
+
+FileRemapper::~FileRemapper() {
+ clear();
+}
+
+void FileRemapper::clear(StringRef outputDir) {
+ for (MappingsTy::iterator
+ I = FromToMappings.begin(), E = FromToMappings.end(); I != E; ++I)
+ resetTarget(I->second);
+ FromToMappings.clear();
+ assert(ToFromMappings.empty());
+ if (!outputDir.empty()) {
+ std::string infoFile = getRemapInfoFile(outputDir);
+ llvm::sys::fs::remove(infoFile);
+ }
+}
+
+std::string FileRemapper::getRemapInfoFile(StringRef outputDir) {
+ assert(!outputDir.empty());
+ SmallString<128> InfoFile = outputDir;
+ llvm::sys::path::append(InfoFile, "remap");
+ return InfoFile.str();
+}
+
+bool FileRemapper::initFromDisk(StringRef outputDir, DiagnosticsEngine &Diag,
+ bool ignoreIfFilesChanged) {
+ std::string infoFile = getRemapInfoFile(outputDir);
+ return initFromFile(infoFile, Diag, ignoreIfFilesChanged);
+}
+
+bool FileRemapper::initFromFile(StringRef filePath, DiagnosticsEngine &Diag,
+ bool ignoreIfFilesChanged) {
+ assert(FromToMappings.empty() &&
+ "initFromDisk should be called before any remap calls");
+ std::string infoFile = filePath;
+ if (!llvm::sys::fs::exists(infoFile))
+ return false;
+
+ std::vector<std::pair<const FileEntry *, const FileEntry *> > pairs;
+
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> fileBuf =
+ llvm::MemoryBuffer::getFile(infoFile.c_str());
+ if (!fileBuf)
+ return report("Error opening file: " + infoFile, Diag);
+
+ SmallVector<StringRef, 64> lines;
+ fileBuf.get()->getBuffer().split(lines, "\n");
+
+ for (unsigned idx = 0; idx+3 <= lines.size(); idx += 3) {
+ StringRef fromFilename = lines[idx];
+ unsigned long long timeModified;
+ if (lines[idx+1].getAsInteger(10, timeModified))
+ return report("Invalid file data: '" + lines[idx+1] + "' not a number",
+ Diag);
+ StringRef toFilename = lines[idx+2];
+
+ const FileEntry *origFE = FileMgr->getFile(fromFilename);
+ if (!origFE) {
+ if (ignoreIfFilesChanged)
+ continue;
+ return report("File does not exist: " + fromFilename, Diag);
+ }
+ const FileEntry *newFE = FileMgr->getFile(toFilename);
+ if (!newFE) {
+ if (ignoreIfFilesChanged)
+ continue;
+ return report("File does not exist: " + toFilename, Diag);
+ }
+
+ if ((uint64_t)origFE->getModificationTime() != timeModified) {
+ if (ignoreIfFilesChanged)
+ continue;
+ return report("File was modified: " + fromFilename, Diag);
+ }
+
+ pairs.push_back(std::make_pair(origFE, newFE));
+ }
+
+ for (unsigned i = 0, e = pairs.size(); i != e; ++i)
+ remap(pairs[i].first, pairs[i].second);
+
+ return false;
+}
+
+bool FileRemapper::flushToDisk(StringRef outputDir, DiagnosticsEngine &Diag) {
+ using namespace llvm::sys;
+
+ if (fs::create_directory(outputDir))
+ return report("Could not create directory: " + outputDir, Diag);
+
+ std::string infoFile = getRemapInfoFile(outputDir);
+ return flushToFile(infoFile, Diag);
+}
+
+bool FileRemapper::flushToFile(StringRef outputPath, DiagnosticsEngine &Diag) {
+ using namespace llvm::sys;
+
+ std::error_code EC;
+ std::string infoFile = outputPath;
+ llvm::raw_fd_ostream infoOut(infoFile, EC, llvm::sys::fs::F_None);
+ if (EC)
+ return report(EC.message(), Diag);
+
+ for (MappingsTy::iterator
+ I = FromToMappings.begin(), E = FromToMappings.end(); I != E; ++I) {
+
+ const FileEntry *origFE = I->first;
+ SmallString<200> origPath = StringRef(origFE->getName());
+ fs::make_absolute(origPath);
+ infoOut << origPath << '\n';
+ infoOut << (uint64_t)origFE->getModificationTime() << '\n';
+
+ if (const FileEntry *FE = I->second.dyn_cast<const FileEntry *>()) {
+ SmallString<200> newPath = StringRef(FE->getName());
+ fs::make_absolute(newPath);
+ infoOut << newPath << '\n';
+ } else {
+
+ SmallString<64> tempPath;
+ int fd;
+ if (fs::createTemporaryFile(path::filename(origFE->getName()),
+ path::extension(origFE->getName()).drop_front(), fd,
+ tempPath))
+ return report("Could not create file: " + tempPath.str(), Diag);
+
+ llvm::raw_fd_ostream newOut(fd, /*shouldClose=*/true);
+ llvm::MemoryBuffer *mem = I->second.get<llvm::MemoryBuffer *>();
+ newOut.write(mem->getBufferStart(), mem->getBufferSize());
+ newOut.close();
+
+ const FileEntry *newE = FileMgr->getFile(tempPath);
+ remap(origFE, newE);
+ infoOut << newE->getName() << '\n';
+ }
+ }
+
+ infoOut.close();
+ return false;
+}
+
+bool FileRemapper::overwriteOriginal(DiagnosticsEngine &Diag,
+ StringRef outputDir) {
+ using namespace llvm::sys;
+
+ for (MappingsTy::iterator
+ I = FromToMappings.begin(), E = FromToMappings.end(); I != E; ++I) {
+ const FileEntry *origFE = I->first;
+ assert(I->second.is<llvm::MemoryBuffer *>());
+ if (!fs::exists(origFE->getName()))
+ return report(StringRef("File does not exist: ") + origFE->getName(),
+ Diag);
+
+ std::error_code EC;
+ llvm::raw_fd_ostream Out(origFE->getName(), EC, llvm::sys::fs::F_None);
+ if (EC)
+ return report(EC.message(), Diag);
+
+ llvm::MemoryBuffer *mem = I->second.get<llvm::MemoryBuffer *>();
+ Out.write(mem->getBufferStart(), mem->getBufferSize());
+ Out.close();
+ }
+
+ clear(outputDir);
+ return false;
+}
+
+void FileRemapper::applyMappings(PreprocessorOptions &PPOpts) const {
+ for (MappingsTy::const_iterator
+ I = FromToMappings.begin(), E = FromToMappings.end(); I != E; ++I) {
+ if (const FileEntry *FE = I->second.dyn_cast<const FileEntry *>()) {
+ PPOpts.addRemappedFile(I->first->getName(), FE->getName());
+ } else {
+ llvm::MemoryBuffer *mem = I->second.get<llvm::MemoryBuffer *>();
+ PPOpts.addRemappedFile(I->first->getName(), mem);
+ }
+ }
+
+ PPOpts.RetainRemappedFileBuffers = true;
+}
+
+void FileRemapper::remap(StringRef filePath,
+ std::unique_ptr<llvm::MemoryBuffer> memBuf) {
+ remap(getOriginalFile(filePath), std::move(memBuf));
+}
+
+void FileRemapper::remap(const FileEntry *file,
+ std::unique_ptr<llvm::MemoryBuffer> memBuf) {
+ assert(file);
+ Target &targ = FromToMappings[file];
+ resetTarget(targ);
+ targ = memBuf.release();
+}
+
+void FileRemapper::remap(const FileEntry *file, const FileEntry *newfile) {
+ assert(file && newfile);
+ Target &targ = FromToMappings[file];
+ resetTarget(targ);
+ targ = newfile;
+ ToFromMappings[newfile] = file;
+}
+
+const FileEntry *FileRemapper::getOriginalFile(StringRef filePath) {
+ const FileEntry *file = FileMgr->getFile(filePath);
+ // If we are updating a file that overriden an original file,
+ // actually update the original file.
+ llvm::DenseMap<const FileEntry *, const FileEntry *>::iterator
+ I = ToFromMappings.find(file);
+ if (I != ToFromMappings.end()) {
+ file = I->second;
+ assert(FromToMappings.find(file) != FromToMappings.end() &&
+ "Original file not in mappings!");
+ }
+ return file;
+}
+
+void FileRemapper::resetTarget(Target &targ) {
+ if (!targ)
+ return;
+
+ if (llvm::MemoryBuffer *oldmem = targ.dyn_cast<llvm::MemoryBuffer *>()) {
+ delete oldmem;
+ } else {
+ const FileEntry *toFE = targ.get<const FileEntry *>();
+ ToFromMappings.erase(toFE);
+ }
+}
+
+bool FileRemapper::report(const Twine &err, DiagnosticsEngine &Diag) {
+ Diag.Report(Diag.getCustomDiagID(DiagnosticsEngine::Error, "%0"))
+ << err.str();
+ return true;
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/Internals.h b/contrib/llvm/tools/clang/lib/ARCMigrate/Internals.h
new file mode 100644
index 0000000..4f153b1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/Internals.h
@@ -0,0 +1,181 @@
+//===-- Internals.h - Implementation Details---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_ARCMIGRATE_INTERNALS_H
+#define LLVM_CLANG_LIB_ARCMIGRATE_INTERNALS_H
+
+#include "clang/ARCMigrate/ARCMT.h"
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Optional.h"
+#include <list>
+
+namespace clang {
+ class Sema;
+ class Stmt;
+
+namespace arcmt {
+
+class CapturedDiagList {
+ typedef std::list<StoredDiagnostic> ListTy;
+ ListTy List;
+
+public:
+ void push_back(const StoredDiagnostic &diag) { List.push_back(diag); }
+
+ bool clearDiagnostic(ArrayRef<unsigned> IDs, SourceRange range);
+ bool hasDiagnostic(ArrayRef<unsigned> IDs, SourceRange range) const;
+
+ void reportDiagnostics(DiagnosticsEngine &diags) const;
+
+ bool hasErrors() const;
+
+ typedef ListTy::const_iterator iterator;
+ iterator begin() const { return List.begin(); }
+ iterator end() const { return List.end(); }
+};
+
+void writeARCDiagsToPlist(const std::string &outPath,
+ ArrayRef<StoredDiagnostic> diags,
+ SourceManager &SM, const LangOptions &LangOpts);
+
+class TransformActions {
+ DiagnosticsEngine &Diags;
+ CapturedDiagList &CapturedDiags;
+ void *Impl; // TransformActionsImpl.
+
+public:
+ TransformActions(DiagnosticsEngine &diag, CapturedDiagList &capturedDiags,
+ ASTContext &ctx, Preprocessor &PP);
+ ~TransformActions();
+
+ void startTransaction();
+ bool commitTransaction();
+ void abortTransaction();
+
+ void insert(SourceLocation loc, StringRef text);
+ void insertAfterToken(SourceLocation loc, StringRef text);
+ void remove(SourceRange range);
+ void removeStmt(Stmt *S);
+ void replace(SourceRange range, StringRef text);
+ void replace(SourceRange range, SourceRange replacementRange);
+ void replaceStmt(Stmt *S, StringRef text);
+ void replaceText(SourceLocation loc, StringRef text,
+ StringRef replacementText);
+ void increaseIndentation(SourceRange range,
+ SourceLocation parentIndent);
+
+ bool clearDiagnostic(ArrayRef<unsigned> IDs, SourceRange range);
+ bool clearAllDiagnostics(SourceRange range) {
+ return clearDiagnostic(None, range);
+ }
+ bool clearDiagnostic(unsigned ID1, unsigned ID2, SourceRange range) {
+ unsigned IDs[] = { ID1, ID2 };
+ return clearDiagnostic(IDs, range);
+ }
+ bool clearDiagnostic(unsigned ID1, unsigned ID2, unsigned ID3,
+ SourceRange range) {
+ unsigned IDs[] = { ID1, ID2, ID3 };
+ return clearDiagnostic(IDs, range);
+ }
+
+ bool hasDiagnostic(unsigned ID, SourceRange range) {
+ return CapturedDiags.hasDiagnostic(ID, range);
+ }
+
+ bool hasDiagnostic(unsigned ID1, unsigned ID2, SourceRange range) {
+ unsigned IDs[] = { ID1, ID2 };
+ return CapturedDiags.hasDiagnostic(IDs, range);
+ }
+
+ DiagnosticBuilder report(SourceLocation loc, unsigned diagId,
+ SourceRange range = SourceRange());
+ void reportError(StringRef error, SourceLocation loc,
+ SourceRange range = SourceRange());
+ void reportWarning(StringRef warning, SourceLocation loc,
+ SourceRange range = SourceRange());
+ void reportNote(StringRef note, SourceLocation loc,
+ SourceRange range = SourceRange());
+
+ bool hasReportedErrors() const {
+ return Diags.hasUnrecoverableErrorOccurred();
+ }
+
+ class RewriteReceiver {
+ public:
+ virtual ~RewriteReceiver();
+
+ virtual void insert(SourceLocation loc, StringRef text) = 0;
+ virtual void remove(CharSourceRange range) = 0;
+ virtual void increaseIndentation(CharSourceRange range,
+ SourceLocation parentIndent) = 0;
+ };
+
+ void applyRewrites(RewriteReceiver &receiver);
+};
+
+class Transaction {
+ TransformActions &TA;
+ bool Aborted;
+
+public:
+ Transaction(TransformActions &TA) : TA(TA), Aborted(false) {
+ TA.startTransaction();
+ }
+
+ ~Transaction() {
+ if (!isAborted())
+ TA.commitTransaction();
+ }
+
+ void abort() {
+ TA.abortTransaction();
+ Aborted = true;
+ }
+
+ bool isAborted() const { return Aborted; }
+};
+
+class MigrationPass {
+public:
+ ASTContext &Ctx;
+ LangOptions::GCMode OrigGCMode;
+ MigratorOptions MigOptions;
+ Sema &SemaRef;
+ TransformActions &TA;
+ const CapturedDiagList &CapturedDiags;
+ std::vector<SourceLocation> &ARCMTMacroLocs;
+ Optional<bool> EnableCFBridgeFns;
+
+ MigrationPass(ASTContext &Ctx, LangOptions::GCMode OrigGCMode,
+ Sema &sema, TransformActions &TA,
+ const CapturedDiagList &capturedDiags,
+ std::vector<SourceLocation> &ARCMTMacroLocs)
+ : Ctx(Ctx), OrigGCMode(OrigGCMode), MigOptions(),
+ SemaRef(sema), TA(TA), CapturedDiags(capturedDiags),
+ ARCMTMacroLocs(ARCMTMacroLocs) { }
+
+ const CapturedDiagList &getDiags() const { return CapturedDiags; }
+
+ bool isGCMigration() const { return OrigGCMode != LangOptions::NonGC; }
+ bool noFinalizeRemoval() const { return MigOptions.NoFinalizeRemoval; }
+ void setNoFinalizeRemoval(bool val) {MigOptions.NoFinalizeRemoval = val; }
+
+ bool CFBridgingFunctionsDefined();
+};
+
+static inline StringRef getARCMTMacroName() {
+ return "__IMPL_ARCMT_REMOVED_EXPR__";
+}
+
+} // end namespace arcmt
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/ObjCMT.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/ObjCMT.cpp
new file mode 100644
index 0000000..50b1136
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/ObjCMT.cpp
@@ -0,0 +1,2276 @@
+//===--- ObjCMT.cpp - ObjC Migrate Tool -----------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "clang/ARCMigrate/ARCMT.h"
+#include "clang/ARCMigrate/ARCMTActions.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/NSAPI.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Edit/Commit.h"
+#include "clang/Edit/EditedSource.h"
+#include "clang/Edit/EditsReceiver.h"
+#include "clang/Edit/Rewriters.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/MultiplexConsumer.h"
+#include "clang/Lex/PPConditionalDirectiveRecord.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Rewrite/Core/Rewriter.h"
+#include "clang/StaticAnalyzer/Checkers/ObjCRetainCount.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/YAMLParser.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace ento::objc_retain;
+
+namespace {
+
+class ObjCMigrateASTConsumer : public ASTConsumer {
+ enum CF_BRIDGING_KIND {
+ CF_BRIDGING_NONE,
+ CF_BRIDGING_ENABLE,
+ CF_BRIDGING_MAY_INCLUDE
+ };
+
+ void migrateDecl(Decl *D);
+ void migrateObjCContainerDecl(ASTContext &Ctx, ObjCContainerDecl *D);
+ void migrateProtocolConformance(ASTContext &Ctx,
+ const ObjCImplementationDecl *ImpDecl);
+ void CacheObjCNSIntegerTypedefed(const TypedefDecl *TypedefDcl);
+ bool migrateNSEnumDecl(ASTContext &Ctx, const EnumDecl *EnumDcl,
+ const TypedefDecl *TypedefDcl);
+ void migrateAllMethodInstaceType(ASTContext &Ctx, ObjCContainerDecl *CDecl);
+ void migrateMethodInstanceType(ASTContext &Ctx, ObjCContainerDecl *CDecl,
+ ObjCMethodDecl *OM);
+ bool migrateProperty(ASTContext &Ctx, ObjCContainerDecl *D, ObjCMethodDecl *OM);
+ void migrateNsReturnsInnerPointer(ASTContext &Ctx, ObjCMethodDecl *OM);
+ void migratePropertyNsReturnsInnerPointer(ASTContext &Ctx, ObjCPropertyDecl *P);
+ void migrateFactoryMethod(ASTContext &Ctx, ObjCContainerDecl *CDecl,
+ ObjCMethodDecl *OM,
+ ObjCInstanceTypeFamily OIT_Family = OIT_None);
+
+ void migrateCFAnnotation(ASTContext &Ctx, const Decl *Decl);
+ void AddCFAnnotations(ASTContext &Ctx, const CallEffects &CE,
+ const FunctionDecl *FuncDecl, bool ResultAnnotated);
+ void AddCFAnnotations(ASTContext &Ctx, const CallEffects &CE,
+ const ObjCMethodDecl *MethodDecl, bool ResultAnnotated);
+
+ void AnnotateImplicitBridging(ASTContext &Ctx);
+
+ CF_BRIDGING_KIND migrateAddFunctionAnnotation(ASTContext &Ctx,
+ const FunctionDecl *FuncDecl);
+
+ void migrateARCSafeAnnotation(ASTContext &Ctx, ObjCContainerDecl *CDecl);
+
+ void migrateAddMethodAnnotation(ASTContext &Ctx,
+ const ObjCMethodDecl *MethodDecl);
+
+ void inferDesignatedInitializers(ASTContext &Ctx,
+ const ObjCImplementationDecl *ImplD);
+
+ bool InsertFoundation(ASTContext &Ctx, SourceLocation Loc);
+
+public:
+ std::string MigrateDir;
+ unsigned ASTMigrateActions;
+ FileID FileId;
+ const TypedefDecl *NSIntegerTypedefed;
+ const TypedefDecl *NSUIntegerTypedefed;
+ std::unique_ptr<NSAPI> NSAPIObj;
+ std::unique_ptr<edit::EditedSource> Editor;
+ FileRemapper &Remapper;
+ FileManager &FileMgr;
+ const PPConditionalDirectiveRecord *PPRec;
+ Preprocessor &PP;
+ bool IsOutputFile;
+ bool FoundationIncluded;
+ llvm::SmallPtrSet<ObjCProtocolDecl *, 32> ObjCProtocolDecls;
+ llvm::SmallVector<const Decl *, 8> CFFunctionIBCandidates;
+ llvm::StringSet<> WhiteListFilenames;
+
+ ObjCMigrateASTConsumer(StringRef migrateDir,
+ unsigned astMigrateActions,
+ FileRemapper &remapper,
+ FileManager &fileMgr,
+ const PPConditionalDirectiveRecord *PPRec,
+ Preprocessor &PP,
+ bool isOutputFile,
+ ArrayRef<std::string> WhiteList)
+ : MigrateDir(migrateDir),
+ ASTMigrateActions(astMigrateActions),
+ NSIntegerTypedefed(nullptr), NSUIntegerTypedefed(nullptr),
+ Remapper(remapper), FileMgr(fileMgr), PPRec(PPRec), PP(PP),
+ IsOutputFile(isOutputFile),
+ FoundationIncluded(false){
+
+ // FIXME: StringSet should have insert(iter, iter) to use here.
+ for (const std::string &Val : WhiteList)
+ WhiteListFilenames.insert(Val);
+ }
+
+protected:
+ void Initialize(ASTContext &Context) override {
+ NSAPIObj.reset(new NSAPI(Context));
+ Editor.reset(new edit::EditedSource(Context.getSourceManager(),
+ Context.getLangOpts(),
+ PPRec));
+ }
+
+ bool HandleTopLevelDecl(DeclGroupRef DG) override {
+ for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I)
+ migrateDecl(*I);
+ return true;
+ }
+ void HandleInterestingDecl(DeclGroupRef DG) override {
+ // Ignore decls from the PCH.
+ }
+ void HandleTopLevelDeclInObjCContainer(DeclGroupRef DG) override {
+ ObjCMigrateASTConsumer::HandleTopLevelDecl(DG);
+ }
+
+ void HandleTranslationUnit(ASTContext &Ctx) override;
+
+ bool canModifyFile(StringRef Path) {
+ if (WhiteListFilenames.empty())
+ return true;
+ return WhiteListFilenames.find(llvm::sys::path::filename(Path))
+ != WhiteListFilenames.end();
+ }
+ bool canModifyFile(const FileEntry *FE) {
+ if (!FE)
+ return false;
+ return canModifyFile(FE->getName());
+ }
+ bool canModifyFile(FileID FID) {
+ if (FID.isInvalid())
+ return false;
+ return canModifyFile(PP.getSourceManager().getFileEntryForID(FID));
+ }
+
+ bool canModify(const Decl *D) {
+ if (!D)
+ return false;
+ if (const ObjCCategoryImplDecl *CatImpl = dyn_cast<ObjCCategoryImplDecl>(D))
+ return canModify(CatImpl->getCategoryDecl());
+ if (const ObjCImplementationDecl *Impl = dyn_cast<ObjCImplementationDecl>(D))
+ return canModify(Impl->getClassInterface());
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
+ return canModify(cast<Decl>(MD->getDeclContext()));
+
+ FileID FID = PP.getSourceManager().getFileID(D->getLocation());
+ return canModifyFile(FID);
+ }
+};
+
+}
+
+ObjCMigrateAction::ObjCMigrateAction(FrontendAction *WrappedAction,
+ StringRef migrateDir,
+ unsigned migrateAction)
+ : WrapperFrontendAction(WrappedAction), MigrateDir(migrateDir),
+ ObjCMigAction(migrateAction),
+ CompInst(nullptr) {
+ if (MigrateDir.empty())
+ MigrateDir = "."; // user current directory if none is given.
+}
+
+std::unique_ptr<ASTConsumer>
+ObjCMigrateAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
+ PPConditionalDirectiveRecord *
+ PPRec = new PPConditionalDirectiveRecord(CompInst->getSourceManager());
+ CI.getPreprocessor().addPPCallbacks(std::unique_ptr<PPCallbacks>(PPRec));
+ std::vector<std::unique_ptr<ASTConsumer>> Consumers;
+ Consumers.push_back(WrapperFrontendAction::CreateASTConsumer(CI, InFile));
+ Consumers.push_back(llvm::make_unique<ObjCMigrateASTConsumer>(
+ MigrateDir, ObjCMigAction, Remapper, CompInst->getFileManager(), PPRec,
+ CompInst->getPreprocessor(), false, None));
+ return llvm::make_unique<MultiplexConsumer>(std::move(Consumers));
+}
+
+bool ObjCMigrateAction::BeginInvocation(CompilerInstance &CI) {
+ Remapper.initFromDisk(MigrateDir, CI.getDiagnostics(),
+ /*ignoreIfFilesChanges=*/true);
+ CompInst = &CI;
+ CI.getDiagnostics().setIgnoreAllWarnings(true);
+ return true;
+}
+
+namespace {
+ // FIXME. This duplicates one in RewriteObjCFoundationAPI.cpp
+ bool subscriptOperatorNeedsParens(const Expr *FullExpr) {
+ const Expr* Expr = FullExpr->IgnoreImpCasts();
+ return !(isa<ArraySubscriptExpr>(Expr) || isa<CallExpr>(Expr) ||
+ isa<DeclRefExpr>(Expr) || isa<CXXNamedCastExpr>(Expr) ||
+ isa<CXXConstructExpr>(Expr) || isa<CXXThisExpr>(Expr) ||
+ isa<CXXTypeidExpr>(Expr) ||
+ isa<CXXUnresolvedConstructExpr>(Expr) ||
+ isa<ObjCMessageExpr>(Expr) || isa<ObjCPropertyRefExpr>(Expr) ||
+ isa<ObjCProtocolExpr>(Expr) || isa<MemberExpr>(Expr) ||
+ isa<ObjCIvarRefExpr>(Expr) || isa<ParenExpr>(FullExpr) ||
+ isa<ParenListExpr>(Expr) || isa<SizeOfPackExpr>(Expr));
+ }
+
+ /// \brief - Rewrite message expression for Objective-C setter and getters into
+ /// property-dot syntax.
+ bool rewriteToPropertyDotSyntax(const ObjCMessageExpr *Msg,
+ Preprocessor &PP,
+ const NSAPI &NS, edit::Commit &commit,
+ const ParentMap *PMap) {
+ if (!Msg || Msg->isImplicit() ||
+ (Msg->getReceiverKind() != ObjCMessageExpr::Instance &&
+ Msg->getReceiverKind() != ObjCMessageExpr::SuperInstance))
+ return false;
+ if (const Expr *Receiver = Msg->getInstanceReceiver())
+ if (Receiver->getType()->isObjCBuiltinType())
+ return false;
+
+ const ObjCMethodDecl *Method = Msg->getMethodDecl();
+ if (!Method)
+ return false;
+ if (!Method->isPropertyAccessor())
+ return false;
+
+ const ObjCPropertyDecl *Prop = Method->findPropertyDecl();
+ if (!Prop)
+ return false;
+
+ SourceRange MsgRange = Msg->getSourceRange();
+ bool ReceiverIsSuper =
+ (Msg->getReceiverKind() == ObjCMessageExpr::SuperInstance);
+ // for 'super' receiver is nullptr.
+ const Expr *receiver = Msg->getInstanceReceiver();
+ bool NeedsParen =
+ ReceiverIsSuper ? false : subscriptOperatorNeedsParens(receiver);
+ bool IsGetter = (Msg->getNumArgs() == 0);
+ if (IsGetter) {
+ // Find space location range between receiver expression and getter method.
+ SourceLocation BegLoc =
+ ReceiverIsSuper ? Msg->getSuperLoc() : receiver->getLocEnd();
+ BegLoc = PP.getLocForEndOfToken(BegLoc);
+ SourceLocation EndLoc = Msg->getSelectorLoc(0);
+ SourceRange SpaceRange(BegLoc, EndLoc);
+ std::string PropertyDotString;
+ // rewrite getter method expression into: receiver.property or
+ // (receiver).property
+ if (NeedsParen) {
+ commit.insertBefore(receiver->getLocStart(), "(");
+ PropertyDotString = ").";
+ }
+ else
+ PropertyDotString = ".";
+ PropertyDotString += Prop->getName();
+ commit.replace(SpaceRange, PropertyDotString);
+
+ // remove '[' ']'
+ commit.replace(SourceRange(MsgRange.getBegin(), MsgRange.getBegin()), "");
+ commit.replace(SourceRange(MsgRange.getEnd(), MsgRange.getEnd()), "");
+ } else {
+ if (NeedsParen)
+ commit.insertWrap("(", receiver->getSourceRange(), ")");
+ std::string PropertyDotString = ".";
+ PropertyDotString += Prop->getName();
+ PropertyDotString += " =";
+ const Expr*const* Args = Msg->getArgs();
+ const Expr *RHS = Args[0];
+ if (!RHS)
+ return false;
+ SourceLocation BegLoc =
+ ReceiverIsSuper ? Msg->getSuperLoc() : receiver->getLocEnd();
+ BegLoc = PP.getLocForEndOfToken(BegLoc);
+ SourceLocation EndLoc = RHS->getLocStart();
+ EndLoc = EndLoc.getLocWithOffset(-1);
+ const char *colon = PP.getSourceManager().getCharacterData(EndLoc);
+ // Add a space after '=' if there is no space between RHS and '='
+ if (colon && colon[0] == ':')
+ PropertyDotString += " ";
+ SourceRange Range(BegLoc, EndLoc);
+ commit.replace(Range, PropertyDotString);
+ // remove '[' ']'
+ commit.replace(SourceRange(MsgRange.getBegin(), MsgRange.getBegin()), "");
+ commit.replace(SourceRange(MsgRange.getEnd(), MsgRange.getEnd()), "");
+ }
+ return true;
+ }
+
+
+class ObjCMigrator : public RecursiveASTVisitor<ObjCMigrator> {
+ ObjCMigrateASTConsumer &Consumer;
+ ParentMap &PMap;
+
+public:
+ ObjCMigrator(ObjCMigrateASTConsumer &consumer, ParentMap &PMap)
+ : Consumer(consumer), PMap(PMap) { }
+
+ bool shouldVisitTemplateInstantiations() const { return false; }
+ bool shouldWalkTypesOfTypeLocs() const { return false; }
+
+ bool VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ if (Consumer.ASTMigrateActions & FrontendOptions::ObjCMT_Literals) {
+ edit::Commit commit(*Consumer.Editor);
+ edit::rewriteToObjCLiteralSyntax(E, *Consumer.NSAPIObj, commit, &PMap);
+ Consumer.Editor->commit(commit);
+ }
+
+ if (Consumer.ASTMigrateActions & FrontendOptions::ObjCMT_Subscripting) {
+ edit::Commit commit(*Consumer.Editor);
+ edit::rewriteToObjCSubscriptSyntax(E, *Consumer.NSAPIObj, commit);
+ Consumer.Editor->commit(commit);
+ }
+
+ if (Consumer.ASTMigrateActions & FrontendOptions::ObjCMT_PropertyDotSyntax) {
+ edit::Commit commit(*Consumer.Editor);
+ rewriteToPropertyDotSyntax(E, Consumer.PP, *Consumer.NSAPIObj,
+ commit, &PMap);
+ Consumer.Editor->commit(commit);
+ }
+
+ return true;
+ }
+
+ bool TraverseObjCMessageExpr(ObjCMessageExpr *E) {
+ // Do depth first; we want to rewrite the subexpressions first so that if
+ // we have to move expressions we will move them already rewritten.
+ for (Stmt *SubStmt : E->children())
+ if (!TraverseStmt(SubStmt))
+ return false;
+
+ return WalkUpFromObjCMessageExpr(E);
+ }
+};
+
+class BodyMigrator : public RecursiveASTVisitor<BodyMigrator> {
+ ObjCMigrateASTConsumer &Consumer;
+ std::unique_ptr<ParentMap> PMap;
+
+public:
+ BodyMigrator(ObjCMigrateASTConsumer &consumer) : Consumer(consumer) { }
+
+ bool shouldVisitTemplateInstantiations() const { return false; }
+ bool shouldWalkTypesOfTypeLocs() const { return false; }
+
+ bool TraverseStmt(Stmt *S) {
+ PMap.reset(new ParentMap(S));
+ ObjCMigrator(Consumer, *PMap).TraverseStmt(S);
+ return true;
+ }
+};
+}
+
+void ObjCMigrateASTConsumer::migrateDecl(Decl *D) {
+ if (!D)
+ return;
+ if (isa<ObjCMethodDecl>(D))
+ return; // Wait for the ObjC container declaration.
+
+ BodyMigrator(*this).TraverseDecl(D);
+}
+
+static void append_attr(std::string &PropertyString, const char *attr,
+ bool &LParenAdded) {
+ if (!LParenAdded) {
+ PropertyString += "(";
+ LParenAdded = true;
+ }
+ else
+ PropertyString += ", ";
+ PropertyString += attr;
+}
+
+static
+void MigrateBlockOrFunctionPointerTypeVariable(std::string & PropertyString,
+ const std::string& TypeString,
+ const char *name) {
+ const char *argPtr = TypeString.c_str();
+ int paren = 0;
+ while (*argPtr) {
+ switch (*argPtr) {
+ case '(':
+ PropertyString += *argPtr;
+ paren++;
+ break;
+ case ')':
+ PropertyString += *argPtr;
+ paren--;
+ break;
+ case '^':
+ case '*':
+ PropertyString += (*argPtr);
+ if (paren == 1) {
+ PropertyString += name;
+ name = "";
+ }
+ break;
+ default:
+ PropertyString += *argPtr;
+ break;
+ }
+ argPtr++;
+ }
+}
+
+static const char *PropertyMemoryAttribute(ASTContext &Context, QualType ArgType) {
+ Qualifiers::ObjCLifetime propertyLifetime = ArgType.getObjCLifetime();
+ bool RetainableObject = ArgType->isObjCRetainableType();
+ if (RetainableObject &&
+ (propertyLifetime == Qualifiers::OCL_Strong
+ || propertyLifetime == Qualifiers::OCL_None)) {
+ if (const ObjCObjectPointerType *ObjPtrTy =
+ ArgType->getAs<ObjCObjectPointerType>()) {
+ ObjCInterfaceDecl *IDecl = ObjPtrTy->getObjectType()->getInterface();
+ if (IDecl &&
+ IDecl->lookupNestedProtocol(&Context.Idents.get("NSCopying")))
+ return "copy";
+ else
+ return "strong";
+ }
+ else if (ArgType->isBlockPointerType())
+ return "copy";
+ } else if (propertyLifetime == Qualifiers::OCL_Weak)
+ // TODO. More precise determination of 'weak' attribute requires
+ // looking into setter's implementation for backing weak ivar.
+ return "weak";
+ else if (RetainableObject)
+ return ArgType->isBlockPointerType() ? "copy" : "strong";
+ return nullptr;
+}
+
+static void rewriteToObjCProperty(const ObjCMethodDecl *Getter,
+ const ObjCMethodDecl *Setter,
+ const NSAPI &NS, edit::Commit &commit,
+ unsigned LengthOfPrefix,
+ bool Atomic, bool UseNsIosOnlyMacro,
+ bool AvailabilityArgsMatch) {
+ ASTContext &Context = NS.getASTContext();
+ bool LParenAdded = false;
+ std::string PropertyString = "@property ";
+ if (UseNsIosOnlyMacro && NS.isMacroDefined("NS_NONATOMIC_IOSONLY")) {
+ PropertyString += "(NS_NONATOMIC_IOSONLY";
+ LParenAdded = true;
+ } else if (!Atomic) {
+ PropertyString += "(nonatomic";
+ LParenAdded = true;
+ }
+
+ std::string PropertyNameString = Getter->getNameAsString();
+ StringRef PropertyName(PropertyNameString);
+ if (LengthOfPrefix > 0) {
+ if (!LParenAdded) {
+ PropertyString += "(getter=";
+ LParenAdded = true;
+ }
+ else
+ PropertyString += ", getter=";
+ PropertyString += PropertyNameString;
+ }
+ // Property with no setter may be suggested as a 'readonly' property.
+ if (!Setter)
+ append_attr(PropertyString, "readonly", LParenAdded);
+
+
+ // Short circuit 'delegate' properties that contain the name "delegate" or
+ // "dataSource", or have exact name "target" to have 'assign' attribute.
+ if (PropertyName.equals("target") ||
+ (PropertyName.find("delegate") != StringRef::npos) ||
+ (PropertyName.find("dataSource") != StringRef::npos)) {
+ QualType QT = Getter->getReturnType();
+ if (!QT->isRealType())
+ append_attr(PropertyString, "assign", LParenAdded);
+ } else if (!Setter) {
+ QualType ResType = Context.getCanonicalType(Getter->getReturnType());
+ if (const char *MemoryManagementAttr = PropertyMemoryAttribute(Context, ResType))
+ append_attr(PropertyString, MemoryManagementAttr, LParenAdded);
+ } else {
+ const ParmVarDecl *argDecl = *Setter->param_begin();
+ QualType ArgType = Context.getCanonicalType(argDecl->getType());
+ if (const char *MemoryManagementAttr = PropertyMemoryAttribute(Context, ArgType))
+ append_attr(PropertyString, MemoryManagementAttr, LParenAdded);
+ }
+ if (LParenAdded)
+ PropertyString += ')';
+ QualType RT = Getter->getReturnType();
+ if (!isa<TypedefType>(RT)) {
+ // strip off any ARC lifetime qualifier.
+ QualType CanResultTy = Context.getCanonicalType(RT);
+ if (CanResultTy.getQualifiers().hasObjCLifetime()) {
+ Qualifiers Qs = CanResultTy.getQualifiers();
+ Qs.removeObjCLifetime();
+ RT = Context.getQualifiedType(CanResultTy.getUnqualifiedType(), Qs);
+ }
+ }
+ PropertyString += " ";
+ PrintingPolicy SubPolicy(Context.getPrintingPolicy());
+ SubPolicy.SuppressStrongLifetime = true;
+ SubPolicy.SuppressLifetimeQualifiers = true;
+ std::string TypeString = RT.getAsString(SubPolicy);
+ if (LengthOfPrefix > 0) {
+ // property name must strip off "is" and lower case the first character
+ // after that; e.g. isContinuous will become continuous.
+ StringRef PropertyNameStringRef(PropertyNameString);
+ PropertyNameStringRef = PropertyNameStringRef.drop_front(LengthOfPrefix);
+ PropertyNameString = PropertyNameStringRef;
+ bool NoLowering = (isUppercase(PropertyNameString[0]) &&
+ PropertyNameString.size() > 1 &&
+ isUppercase(PropertyNameString[1]));
+ if (!NoLowering)
+ PropertyNameString[0] = toLowercase(PropertyNameString[0]);
+ }
+ if (RT->isBlockPointerType() || RT->isFunctionPointerType())
+ MigrateBlockOrFunctionPointerTypeVariable(PropertyString,
+ TypeString,
+ PropertyNameString.c_str());
+ else {
+ char LastChar = TypeString[TypeString.size()-1];
+ PropertyString += TypeString;
+ if (LastChar != '*')
+ PropertyString += ' ';
+ PropertyString += PropertyNameString;
+ }
+ SourceLocation StartGetterSelectorLoc = Getter->getSelectorStartLoc();
+ Selector GetterSelector = Getter->getSelector();
+
+ SourceLocation EndGetterSelectorLoc =
+ StartGetterSelectorLoc.getLocWithOffset(GetterSelector.getNameForSlot(0).size());
+ commit.replace(CharSourceRange::getCharRange(Getter->getLocStart(),
+ EndGetterSelectorLoc),
+ PropertyString);
+ if (Setter && AvailabilityArgsMatch) {
+ SourceLocation EndLoc = Setter->getDeclaratorEndLoc();
+ // Get location past ';'
+ EndLoc = EndLoc.getLocWithOffset(1);
+ SourceLocation BeginOfSetterDclLoc = Setter->getLocStart();
+ // FIXME. This assumes that setter decl; is immediately preceded by eoln.
+ // It is trying to remove the setter method decl. line entirely.
+ BeginOfSetterDclLoc = BeginOfSetterDclLoc.getLocWithOffset(-1);
+ commit.remove(SourceRange(BeginOfSetterDclLoc, EndLoc));
+ }
+}
+
+static bool IsCategoryNameWithDeprecatedSuffix(ObjCContainerDecl *D) {
+ if (ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(D)) {
+ StringRef Name = CatDecl->getName();
+ return Name.endswith("Deprecated");
+ }
+ return false;
+}
+
+void ObjCMigrateASTConsumer::migrateObjCContainerDecl(ASTContext &Ctx,
+ ObjCContainerDecl *D) {
+ if (D->isDeprecated() || IsCategoryNameWithDeprecatedSuffix(D))
+ return;
+
+ for (auto *Method : D->methods()) {
+ if (Method->isDeprecated())
+ continue;
+ bool PropertyInferred = migrateProperty(Ctx, D, Method);
+ // If a property is inferred, do not attempt to attach NS_RETURNS_INNER_POINTER to
+ // the getter method as it ends up on the property itself which we don't want
+ // to do unless -objcmt-returns-innerpointer-property option is on.
+ if (!PropertyInferred ||
+ (ASTMigrateActions & FrontendOptions::ObjCMT_ReturnsInnerPointerProperty))
+ if (ASTMigrateActions & FrontendOptions::ObjCMT_Annotation)
+ migrateNsReturnsInnerPointer(Ctx, Method);
+ }
+ if (!(ASTMigrateActions & FrontendOptions::ObjCMT_ReturnsInnerPointerProperty))
+ return;
+
+ for (auto *Prop : D->properties()) {
+ if ((ASTMigrateActions & FrontendOptions::ObjCMT_Annotation) &&
+ !Prop->isDeprecated())
+ migratePropertyNsReturnsInnerPointer(Ctx, Prop);
+ }
+}
+
+static bool
+ClassImplementsAllMethodsAndProperties(ASTContext &Ctx,
+ const ObjCImplementationDecl *ImpDecl,
+ const ObjCInterfaceDecl *IDecl,
+ ObjCProtocolDecl *Protocol) {
+ // In auto-synthesis, protocol properties are not synthesized. So,
+ // a conforming protocol must have its required properties declared
+ // in class interface.
+ bool HasAtleastOneRequiredProperty = false;
+ if (const ObjCProtocolDecl *PDecl = Protocol->getDefinition())
+ for (const auto *Property : PDecl->properties()) {
+ if (Property->getPropertyImplementation() == ObjCPropertyDecl::Optional)
+ continue;
+ HasAtleastOneRequiredProperty = true;
+ DeclContext::lookup_result R = IDecl->lookup(Property->getDeclName());
+ if (R.size() == 0) {
+ // Relax the rule and look into class's implementation for a synthesize
+ // or dynamic declaration. Class is implementing a property coming from
+ // another protocol. This still makes the target protocol as conforming.
+ if (!ImpDecl->FindPropertyImplDecl(
+ Property->getDeclName().getAsIdentifierInfo()))
+ return false;
+ }
+ else if (ObjCPropertyDecl *ClassProperty = dyn_cast<ObjCPropertyDecl>(R[0])) {
+ if ((ClassProperty->getPropertyAttributes()
+ != Property->getPropertyAttributes()) ||
+ !Ctx.hasSameType(ClassProperty->getType(), Property->getType()))
+ return false;
+ }
+ else
+ return false;
+ }
+
+ // At this point, all required properties in this protocol conform to those
+ // declared in the class.
+ // Check that class implements the required methods of the protocol too.
+ bool HasAtleastOneRequiredMethod = false;
+ if (const ObjCProtocolDecl *PDecl = Protocol->getDefinition()) {
+ if (PDecl->meth_begin() == PDecl->meth_end())
+ return HasAtleastOneRequiredProperty;
+ for (const auto *MD : PDecl->methods()) {
+ if (MD->isImplicit())
+ continue;
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional)
+ continue;
+ DeclContext::lookup_result R = ImpDecl->lookup(MD->getDeclName());
+ if (R.size() == 0)
+ return false;
+ bool match = false;
+ HasAtleastOneRequiredMethod = true;
+ for (unsigned I = 0, N = R.size(); I != N; ++I)
+ if (ObjCMethodDecl *ImpMD = dyn_cast<ObjCMethodDecl>(R[0]))
+ if (Ctx.ObjCMethodsAreEqual(MD, ImpMD)) {
+ match = true;
+ break;
+ }
+ if (!match)
+ return false;
+ }
+ }
+ return HasAtleastOneRequiredProperty || HasAtleastOneRequiredMethod;
+}
+
+static bool rewriteToObjCInterfaceDecl(const ObjCInterfaceDecl *IDecl,
+ llvm::SmallVectorImpl<ObjCProtocolDecl*> &ConformingProtocols,
+ const NSAPI &NS, edit::Commit &commit) {
+ const ObjCList<ObjCProtocolDecl> &Protocols = IDecl->getReferencedProtocols();
+ std::string ClassString;
+ SourceLocation EndLoc =
+ IDecl->getSuperClass() ? IDecl->getSuperClassLoc() : IDecl->getLocation();
+
+ if (Protocols.empty()) {
+ ClassString = '<';
+ for (unsigned i = 0, e = ConformingProtocols.size(); i != e; i++) {
+ ClassString += ConformingProtocols[i]->getNameAsString();
+ if (i != (e-1))
+ ClassString += ", ";
+ }
+ ClassString += "> ";
+ }
+ else {
+ ClassString = ", ";
+ for (unsigned i = 0, e = ConformingProtocols.size(); i != e; i++) {
+ ClassString += ConformingProtocols[i]->getNameAsString();
+ if (i != (e-1))
+ ClassString += ", ";
+ }
+ ObjCInterfaceDecl::protocol_loc_iterator PL = IDecl->protocol_loc_end() - 1;
+ EndLoc = *PL;
+ }
+
+ commit.insertAfterToken(EndLoc, ClassString);
+ return true;
+}
+
+static StringRef GetUnsignedName(StringRef NSIntegerName) {
+ StringRef UnsignedName = llvm::StringSwitch<StringRef>(NSIntegerName)
+ .Case("int8_t", "uint8_t")
+ .Case("int16_t", "uint16_t")
+ .Case("int32_t", "uint32_t")
+ .Case("NSInteger", "NSUInteger")
+ .Case("int64_t", "uint64_t")
+ .Default(NSIntegerName);
+ return UnsignedName;
+}
+
+static bool rewriteToNSEnumDecl(const EnumDecl *EnumDcl,
+ const TypedefDecl *TypedefDcl,
+ const NSAPI &NS, edit::Commit &commit,
+ StringRef NSIntegerName,
+ bool NSOptions) {
+ std::string ClassString;
+ if (NSOptions) {
+ ClassString = "typedef NS_OPTIONS(";
+ ClassString += GetUnsignedName(NSIntegerName);
+ }
+ else {
+ ClassString = "typedef NS_ENUM(";
+ ClassString += NSIntegerName;
+ }
+ ClassString += ", ";
+
+ ClassString += TypedefDcl->getIdentifier()->getName();
+ ClassString += ')';
+ SourceRange R(EnumDcl->getLocStart(), EnumDcl->getLocStart());
+ commit.replace(R, ClassString);
+ SourceLocation EndOfEnumDclLoc = EnumDcl->getLocEnd();
+ EndOfEnumDclLoc = trans::findSemiAfterLocation(EndOfEnumDclLoc,
+ NS.getASTContext(), /*IsDecl*/true);
+ if (EndOfEnumDclLoc.isValid()) {
+ SourceRange EnumDclRange(EnumDcl->getLocStart(), EndOfEnumDclLoc);
+ commit.insertFromRange(TypedefDcl->getLocStart(), EnumDclRange);
+ }
+ else
+ return false;
+
+ SourceLocation EndTypedefDclLoc = TypedefDcl->getLocEnd();
+ EndTypedefDclLoc = trans::findSemiAfterLocation(EndTypedefDclLoc,
+ NS.getASTContext(), /*IsDecl*/true);
+ if (EndTypedefDclLoc.isValid()) {
+ SourceRange TDRange(TypedefDcl->getLocStart(), EndTypedefDclLoc);
+ commit.remove(TDRange);
+ }
+ else
+ return false;
+
+ EndOfEnumDclLoc = trans::findLocationAfterSemi(EnumDcl->getLocEnd(), NS.getASTContext(),
+ /*IsDecl*/true);
+ if (EndOfEnumDclLoc.isValid()) {
+ SourceLocation BeginOfEnumDclLoc = EnumDcl->getLocStart();
+ // FIXME. This assumes that enum decl; is immediately preceded by eoln.
+ // It is trying to remove the enum decl. lines entirely.
+ BeginOfEnumDclLoc = BeginOfEnumDclLoc.getLocWithOffset(-1);
+ commit.remove(SourceRange(BeginOfEnumDclLoc, EndOfEnumDclLoc));
+ return true;
+ }
+ return false;
+}
+
+static void rewriteToNSMacroDecl(ASTContext &Ctx,
+ const EnumDecl *EnumDcl,
+ const TypedefDecl *TypedefDcl,
+ const NSAPI &NS, edit::Commit &commit,
+ bool IsNSIntegerType) {
+ QualType DesignatedEnumType = EnumDcl->getIntegerType();
+ assert(!DesignatedEnumType.isNull()
+ && "rewriteToNSMacroDecl - underlying enum type is null");
+
+ PrintingPolicy Policy(Ctx.getPrintingPolicy());
+ std::string TypeString = DesignatedEnumType.getAsString(Policy);
+ std::string ClassString = IsNSIntegerType ? "NS_ENUM(" : "NS_OPTIONS(";
+ ClassString += TypeString;
+ ClassString += ", ";
+
+ ClassString += TypedefDcl->getIdentifier()->getName();
+ ClassString += ')';
+ SourceLocation EndLoc;
+ if (EnumDcl->getIntegerTypeSourceInfo()) {
+ TypeSourceInfo *TSourceInfo = EnumDcl->getIntegerTypeSourceInfo();
+ TypeLoc TLoc = TSourceInfo->getTypeLoc();
+ EndLoc = TLoc.getLocEnd();
+ const char *lbrace = Ctx.getSourceManager().getCharacterData(EndLoc);
+ unsigned count = 0;
+ if (lbrace)
+ while (lbrace[count] != '{')
+ ++count;
+ if (count > 0)
+ EndLoc = EndLoc.getLocWithOffset(count-1);
+ }
+ else
+ EndLoc = EnumDcl->getLocStart();
+ SourceRange R(EnumDcl->getLocStart(), EndLoc);
+ commit.replace(R, ClassString);
+ // This is to remove spaces between '}' and typedef name.
+ SourceLocation StartTypedefLoc = EnumDcl->getLocEnd();
+ StartTypedefLoc = StartTypedefLoc.getLocWithOffset(+1);
+ SourceLocation EndTypedefLoc = TypedefDcl->getLocEnd();
+
+ commit.remove(SourceRange(StartTypedefLoc, EndTypedefLoc));
+}
+
+static bool UseNSOptionsMacro(Preprocessor &PP, ASTContext &Ctx,
+ const EnumDecl *EnumDcl) {
+ bool PowerOfTwo = true;
+ bool AllHexdecimalEnumerator = true;
+ uint64_t MaxPowerOfTwoVal = 0;
+ for (auto Enumerator : EnumDcl->enumerators()) {
+ const Expr *InitExpr = Enumerator->getInitExpr();
+ if (!InitExpr) {
+ PowerOfTwo = false;
+ AllHexdecimalEnumerator = false;
+ continue;
+ }
+ InitExpr = InitExpr->IgnoreParenCasts();
+ if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(InitExpr))
+ if (BO->isShiftOp() || BO->isBitwiseOp())
+ return true;
+
+ uint64_t EnumVal = Enumerator->getInitVal().getZExtValue();
+ if (PowerOfTwo && EnumVal) {
+ if (!llvm::isPowerOf2_64(EnumVal))
+ PowerOfTwo = false;
+ else if (EnumVal > MaxPowerOfTwoVal)
+ MaxPowerOfTwoVal = EnumVal;
+ }
+ if (AllHexdecimalEnumerator && EnumVal) {
+ bool FoundHexdecimalEnumerator = false;
+ SourceLocation EndLoc = Enumerator->getLocEnd();
+ Token Tok;
+ if (!PP.getRawToken(EndLoc, Tok, /*IgnoreWhiteSpace=*/true))
+ if (Tok.isLiteral() && Tok.getLength() > 2) {
+ if (const char *StringLit = Tok.getLiteralData())
+ FoundHexdecimalEnumerator =
+ (StringLit[0] == '0' && (toLowercase(StringLit[1]) == 'x'));
+ }
+ if (!FoundHexdecimalEnumerator)
+ AllHexdecimalEnumerator = false;
+ }
+ }
+ return AllHexdecimalEnumerator || (PowerOfTwo && (MaxPowerOfTwoVal > 2));
+}
+
+void ObjCMigrateASTConsumer::migrateProtocolConformance(ASTContext &Ctx,
+ const ObjCImplementationDecl *ImpDecl) {
+ const ObjCInterfaceDecl *IDecl = ImpDecl->getClassInterface();
+ if (!IDecl || ObjCProtocolDecls.empty() || IDecl->isDeprecated())
+ return;
+ // Find all implicit conforming protocols for this class
+ // and make them explicit.
+ llvm::SmallPtrSet<ObjCProtocolDecl *, 8> ExplicitProtocols;
+ Ctx.CollectInheritedProtocols(IDecl, ExplicitProtocols);
+ llvm::SmallVector<ObjCProtocolDecl *, 8> PotentialImplicitProtocols;
+
+ for (ObjCProtocolDecl *ProtDecl : ObjCProtocolDecls)
+ if (!ExplicitProtocols.count(ProtDecl))
+ PotentialImplicitProtocols.push_back(ProtDecl);
+
+ if (PotentialImplicitProtocols.empty())
+ return;
+
+ // go through list of non-optional methods and properties in each protocol
+ // in the PotentialImplicitProtocols list. If class implements every one of the
+ // methods and properties, then this class conforms to this protocol.
+ llvm::SmallVector<ObjCProtocolDecl*, 8> ConformingProtocols;
+ for (unsigned i = 0, e = PotentialImplicitProtocols.size(); i != e; i++)
+ if (ClassImplementsAllMethodsAndProperties(Ctx, ImpDecl, IDecl,
+ PotentialImplicitProtocols[i]))
+ ConformingProtocols.push_back(PotentialImplicitProtocols[i]);
+
+ if (ConformingProtocols.empty())
+ return;
+
+ // Further reduce number of conforming protocols. If protocol P1 is in the list
+ // protocol P2 (P2<P1>), No need to include P1.
+ llvm::SmallVector<ObjCProtocolDecl*, 8> MinimalConformingProtocols;
+ for (unsigned i = 0, e = ConformingProtocols.size(); i != e; i++) {
+ bool DropIt = false;
+ ObjCProtocolDecl *TargetPDecl = ConformingProtocols[i];
+ for (unsigned i1 = 0, e1 = ConformingProtocols.size(); i1 != e1; i1++) {
+ ObjCProtocolDecl *PDecl = ConformingProtocols[i1];
+ if (PDecl == TargetPDecl)
+ continue;
+ if (PDecl->lookupProtocolNamed(
+ TargetPDecl->getDeclName().getAsIdentifierInfo())) {
+ DropIt = true;
+ break;
+ }
+ }
+ if (!DropIt)
+ MinimalConformingProtocols.push_back(TargetPDecl);
+ }
+ if (MinimalConformingProtocols.empty())
+ return;
+ edit::Commit commit(*Editor);
+ rewriteToObjCInterfaceDecl(IDecl, MinimalConformingProtocols,
+ *NSAPIObj, commit);
+ Editor->commit(commit);
+}
+
+void ObjCMigrateASTConsumer::CacheObjCNSIntegerTypedefed(
+ const TypedefDecl *TypedefDcl) {
+
+ QualType qt = TypedefDcl->getTypeSourceInfo()->getType();
+ if (NSAPIObj->isObjCNSIntegerType(qt))
+ NSIntegerTypedefed = TypedefDcl;
+ else if (NSAPIObj->isObjCNSUIntegerType(qt))
+ NSUIntegerTypedefed = TypedefDcl;
+}
+
+bool ObjCMigrateASTConsumer::migrateNSEnumDecl(ASTContext &Ctx,
+ const EnumDecl *EnumDcl,
+ const TypedefDecl *TypedefDcl) {
+ if (!EnumDcl->isCompleteDefinition() || EnumDcl->getIdentifier() ||
+ EnumDcl->isDeprecated())
+ return false;
+ if (!TypedefDcl) {
+ if (NSIntegerTypedefed) {
+ TypedefDcl = NSIntegerTypedefed;
+ NSIntegerTypedefed = nullptr;
+ }
+ else if (NSUIntegerTypedefed) {
+ TypedefDcl = NSUIntegerTypedefed;
+ NSUIntegerTypedefed = nullptr;
+ }
+ else
+ return false;
+ FileID FileIdOfTypedefDcl =
+ PP.getSourceManager().getFileID(TypedefDcl->getLocation());
+ FileID FileIdOfEnumDcl =
+ PP.getSourceManager().getFileID(EnumDcl->getLocation());
+ if (FileIdOfTypedefDcl != FileIdOfEnumDcl)
+ return false;
+ }
+ if (TypedefDcl->isDeprecated())
+ return false;
+
+ QualType qt = TypedefDcl->getTypeSourceInfo()->getType();
+ StringRef NSIntegerName = NSAPIObj->GetNSIntegralKind(qt);
+
+ if (NSIntegerName.empty()) {
+ // Also check for typedef enum {...} TD;
+ if (const EnumType *EnumTy = qt->getAs<EnumType>()) {
+ if (EnumTy->getDecl() == EnumDcl) {
+ bool NSOptions = UseNSOptionsMacro(PP, Ctx, EnumDcl);
+ if (!InsertFoundation(Ctx, TypedefDcl->getLocStart()))
+ return false;
+ edit::Commit commit(*Editor);
+ rewriteToNSMacroDecl(Ctx, EnumDcl, TypedefDcl, *NSAPIObj, commit, !NSOptions);
+ Editor->commit(commit);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // We may still use NS_OPTIONS based on what we find in the enumertor list.
+ bool NSOptions = UseNSOptionsMacro(PP, Ctx, EnumDcl);
+ if (!InsertFoundation(Ctx, TypedefDcl->getLocStart()))
+ return false;
+ edit::Commit commit(*Editor);
+ bool Res = rewriteToNSEnumDecl(EnumDcl, TypedefDcl, *NSAPIObj,
+ commit, NSIntegerName, NSOptions);
+ Editor->commit(commit);
+ return Res;
+}
+
+static void ReplaceWithInstancetype(ASTContext &Ctx,
+ const ObjCMigrateASTConsumer &ASTC,
+ ObjCMethodDecl *OM) {
+ if (OM->getReturnType() == Ctx.getObjCInstanceType())
+ return; // already has instancetype.
+
+ SourceRange R;
+ std::string ClassString;
+ if (TypeSourceInfo *TSInfo = OM->getReturnTypeSourceInfo()) {
+ TypeLoc TL = TSInfo->getTypeLoc();
+ R = SourceRange(TL.getBeginLoc(), TL.getEndLoc());
+ ClassString = "instancetype";
+ }
+ else {
+ R = SourceRange(OM->getLocStart(), OM->getLocStart());
+ ClassString = OM->isInstanceMethod() ? '-' : '+';
+ ClassString += " (instancetype)";
+ }
+ edit::Commit commit(*ASTC.Editor);
+ commit.replace(R, ClassString);
+ ASTC.Editor->commit(commit);
+}
+
+static void ReplaceWithClasstype(const ObjCMigrateASTConsumer &ASTC,
+ ObjCMethodDecl *OM) {
+ ObjCInterfaceDecl *IDecl = OM->getClassInterface();
+ SourceRange R;
+ std::string ClassString;
+ if (TypeSourceInfo *TSInfo = OM->getReturnTypeSourceInfo()) {
+ TypeLoc TL = TSInfo->getTypeLoc();
+ R = SourceRange(TL.getBeginLoc(), TL.getEndLoc()); {
+ ClassString = IDecl->getName();
+ ClassString += "*";
+ }
+ }
+ else {
+ R = SourceRange(OM->getLocStart(), OM->getLocStart());
+ ClassString = "+ (";
+ ClassString += IDecl->getName(); ClassString += "*)";
+ }
+ edit::Commit commit(*ASTC.Editor);
+ commit.replace(R, ClassString);
+ ASTC.Editor->commit(commit);
+}
+
+void ObjCMigrateASTConsumer::migrateMethodInstanceType(ASTContext &Ctx,
+ ObjCContainerDecl *CDecl,
+ ObjCMethodDecl *OM) {
+ ObjCInstanceTypeFamily OIT_Family =
+ Selector::getInstTypeMethodFamily(OM->getSelector());
+
+ std::string ClassName;
+ switch (OIT_Family) {
+ case OIT_None:
+ migrateFactoryMethod(Ctx, CDecl, OM);
+ return;
+ case OIT_Array:
+ ClassName = "NSArray";
+ break;
+ case OIT_Dictionary:
+ ClassName = "NSDictionary";
+ break;
+ case OIT_Singleton:
+ migrateFactoryMethod(Ctx, CDecl, OM, OIT_Singleton);
+ return;
+ case OIT_Init:
+ if (OM->getReturnType()->isObjCIdType())
+ ReplaceWithInstancetype(Ctx, *this, OM);
+ return;
+ case OIT_ReturnsSelf:
+ migrateFactoryMethod(Ctx, CDecl, OM, OIT_ReturnsSelf);
+ return;
+ }
+ if (!OM->getReturnType()->isObjCIdType())
+ return;
+
+ ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl);
+ if (!IDecl) {
+ if (ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(CDecl))
+ IDecl = CatDecl->getClassInterface();
+ else if (ObjCImplDecl *ImpDecl = dyn_cast<ObjCImplDecl>(CDecl))
+ IDecl = ImpDecl->getClassInterface();
+ }
+ if (!IDecl ||
+ !IDecl->lookupInheritedClass(&Ctx.Idents.get(ClassName))) {
+ migrateFactoryMethod(Ctx, CDecl, OM);
+ return;
+ }
+ ReplaceWithInstancetype(Ctx, *this, OM);
+}
+
+static bool TypeIsInnerPointer(QualType T) {
+ if (!T->isAnyPointerType())
+ return false;
+ if (T->isObjCObjectPointerType() || T->isObjCBuiltinType() ||
+ T->isBlockPointerType() || T->isFunctionPointerType() ||
+ ento::coreFoundation::isCFObjectRef(T))
+ return false;
+ // Also, typedef-of-pointer-to-incomplete-struct is something that we assume
+ // is not an innter pointer type.
+ QualType OrigT = T;
+ while (const TypedefType *TD = dyn_cast<TypedefType>(T.getTypePtr()))
+ T = TD->getDecl()->getUnderlyingType();
+ if (OrigT == T || !T->isPointerType())
+ return true;
+ const PointerType* PT = T->getAs<PointerType>();
+ QualType UPointeeT = PT->getPointeeType().getUnqualifiedType();
+ if (UPointeeT->isRecordType()) {
+ const RecordType *RecordTy = UPointeeT->getAs<RecordType>();
+ if (!RecordTy->getDecl()->isCompleteDefinition())
+ return false;
+ }
+ return true;
+}
+
+/// \brief Check whether the two versions match.
+static bool versionsMatch(const VersionTuple &X, const VersionTuple &Y) {
+ return (X == Y);
+}
+
+/// AvailabilityAttrsMatch - This routine checks that if comparing two
+/// availability attributes, all their components match. It returns
+/// true, if not dealing with availability or when all components of
+/// availability attributes match. This routine is only called when
+/// the attributes are of the same kind.
+static bool AvailabilityAttrsMatch(Attr *At1, Attr *At2) {
+ const AvailabilityAttr *AA1 = dyn_cast<AvailabilityAttr>(At1);
+ if (!AA1)
+ return true;
+ const AvailabilityAttr *AA2 = dyn_cast<AvailabilityAttr>(At2);
+
+ VersionTuple Introduced1 = AA1->getIntroduced();
+ VersionTuple Deprecated1 = AA1->getDeprecated();
+ VersionTuple Obsoleted1 = AA1->getObsoleted();
+ bool IsUnavailable1 = AA1->getUnavailable();
+ VersionTuple Introduced2 = AA2->getIntroduced();
+ VersionTuple Deprecated2 = AA2->getDeprecated();
+ VersionTuple Obsoleted2 = AA2->getObsoleted();
+ bool IsUnavailable2 = AA2->getUnavailable();
+ return (versionsMatch(Introduced1, Introduced2) &&
+ versionsMatch(Deprecated1, Deprecated2) &&
+ versionsMatch(Obsoleted1, Obsoleted2) &&
+ IsUnavailable1 == IsUnavailable2);
+
+}
+
+static bool MatchTwoAttributeLists(const AttrVec &Attrs1, const AttrVec &Attrs2,
+ bool &AvailabilityArgsMatch) {
+ // This list is very small, so this need not be optimized.
+ for (unsigned i = 0, e = Attrs1.size(); i != e; i++) {
+ bool match = false;
+ for (unsigned j = 0, f = Attrs2.size(); j != f; j++) {
+ // Matching attribute kind only. Except for Availabilty attributes,
+ // we are not getting into details of the attributes. For all practical purposes
+ // this is sufficient.
+ if (Attrs1[i]->getKind() == Attrs2[j]->getKind()) {
+ if (AvailabilityArgsMatch)
+ AvailabilityArgsMatch = AvailabilityAttrsMatch(Attrs1[i], Attrs2[j]);
+ match = true;
+ break;
+ }
+ }
+ if (!match)
+ return false;
+ }
+ return true;
+}
+
+/// AttributesMatch - This routine checks list of attributes for two
+/// decls. It returns false, if there is a mismatch in kind of
+/// attributes seen in the decls. It returns true if the two decls
+/// have list of same kind of attributes. Furthermore, when there
+/// are availability attributes in the two decls, it sets the
+/// AvailabilityArgsMatch to false if availability attributes have
+/// different versions, etc.
+static bool AttributesMatch(const Decl *Decl1, const Decl *Decl2,
+ bool &AvailabilityArgsMatch) {
+ if (!Decl1->hasAttrs() || !Decl2->hasAttrs()) {
+ AvailabilityArgsMatch = (Decl1->hasAttrs() == Decl2->hasAttrs());
+ return true;
+ }
+ AvailabilityArgsMatch = true;
+ const AttrVec &Attrs1 = Decl1->getAttrs();
+ const AttrVec &Attrs2 = Decl2->getAttrs();
+ bool match = MatchTwoAttributeLists(Attrs1, Attrs2, AvailabilityArgsMatch);
+ if (match && (Attrs2.size() > Attrs1.size()))
+ return MatchTwoAttributeLists(Attrs2, Attrs1, AvailabilityArgsMatch);
+ return match;
+}
+
+static bool IsValidIdentifier(ASTContext &Ctx,
+ const char *Name) {
+ if (!isIdentifierHead(Name[0]))
+ return false;
+ std::string NameString = Name;
+ NameString[0] = toLowercase(NameString[0]);
+ IdentifierInfo *II = &Ctx.Idents.get(NameString);
+ return II->getTokenID() == tok::identifier;
+}
+
+bool ObjCMigrateASTConsumer::migrateProperty(ASTContext &Ctx,
+ ObjCContainerDecl *D,
+ ObjCMethodDecl *Method) {
+ if (Method->isPropertyAccessor() || !Method->isInstanceMethod() ||
+ Method->param_size() != 0)
+ return false;
+ // Is this method candidate to be a getter?
+ QualType GRT = Method->getReturnType();
+ if (GRT->isVoidType())
+ return false;
+
+ Selector GetterSelector = Method->getSelector();
+ ObjCInstanceTypeFamily OIT_Family =
+ Selector::getInstTypeMethodFamily(GetterSelector);
+
+ if (OIT_Family != OIT_None)
+ return false;
+
+ IdentifierInfo *getterName = GetterSelector.getIdentifierInfoForSlot(0);
+ Selector SetterSelector =
+ SelectorTable::constructSetterSelector(PP.getIdentifierTable(),
+ PP.getSelectorTable(),
+ getterName);
+ ObjCMethodDecl *SetterMethod = D->getInstanceMethod(SetterSelector);
+ unsigned LengthOfPrefix = 0;
+ if (!SetterMethod) {
+ // try a different naming convention for getter: isXxxxx
+ StringRef getterNameString = getterName->getName();
+ bool IsPrefix = getterNameString.startswith("is");
+ // Note that we don't want to change an isXXX method of retainable object
+ // type to property (readonly or otherwise).
+ if (IsPrefix && GRT->isObjCRetainableType())
+ return false;
+ if (IsPrefix || getterNameString.startswith("get")) {
+ LengthOfPrefix = (IsPrefix ? 2 : 3);
+ const char *CGetterName = getterNameString.data() + LengthOfPrefix;
+ // Make sure that first character after "is" or "get" prefix can
+ // start an identifier.
+ if (!IsValidIdentifier(Ctx, CGetterName))
+ return false;
+ if (CGetterName[0] && isUppercase(CGetterName[0])) {
+ getterName = &Ctx.Idents.get(CGetterName);
+ SetterSelector =
+ SelectorTable::constructSetterSelector(PP.getIdentifierTable(),
+ PP.getSelectorTable(),
+ getterName);
+ SetterMethod = D->getInstanceMethod(SetterSelector);
+ }
+ }
+ }
+
+ if (SetterMethod) {
+ if ((ASTMigrateActions & FrontendOptions::ObjCMT_ReadwriteProperty) == 0)
+ return false;
+ bool AvailabilityArgsMatch;
+ if (SetterMethod->isDeprecated() ||
+ !AttributesMatch(Method, SetterMethod, AvailabilityArgsMatch))
+ return false;
+
+ // Is this a valid setter, matching the target getter?
+ QualType SRT = SetterMethod->getReturnType();
+ if (!SRT->isVoidType())
+ return false;
+ const ParmVarDecl *argDecl = *SetterMethod->param_begin();
+ QualType ArgType = argDecl->getType();
+ if (!Ctx.hasSameUnqualifiedType(ArgType, GRT))
+ return false;
+ edit::Commit commit(*Editor);
+ rewriteToObjCProperty(Method, SetterMethod, *NSAPIObj, commit,
+ LengthOfPrefix,
+ (ASTMigrateActions &
+ FrontendOptions::ObjCMT_AtomicProperty) != 0,
+ (ASTMigrateActions &
+ FrontendOptions::ObjCMT_NsAtomicIOSOnlyProperty) != 0,
+ AvailabilityArgsMatch);
+ Editor->commit(commit);
+ return true;
+ }
+ else if (ASTMigrateActions & FrontendOptions::ObjCMT_ReadonlyProperty) {
+ // Try a non-void method with no argument (and no setter or property of same name
+ // as a 'readonly' property.
+ edit::Commit commit(*Editor);
+ rewriteToObjCProperty(Method, nullptr /*SetterMethod*/, *NSAPIObj, commit,
+ LengthOfPrefix,
+ (ASTMigrateActions &
+ FrontendOptions::ObjCMT_AtomicProperty) != 0,
+ (ASTMigrateActions &
+ FrontendOptions::ObjCMT_NsAtomicIOSOnlyProperty) != 0,
+ /*AvailabilityArgsMatch*/false);
+ Editor->commit(commit);
+ return true;
+ }
+ return false;
+}
+
+void ObjCMigrateASTConsumer::migrateNsReturnsInnerPointer(ASTContext &Ctx,
+ ObjCMethodDecl *OM) {
+ if (OM->isImplicit() ||
+ !OM->isInstanceMethod() ||
+ OM->hasAttr<ObjCReturnsInnerPointerAttr>())
+ return;
+
+ QualType RT = OM->getReturnType();
+ if (!TypeIsInnerPointer(RT) ||
+ !NSAPIObj->isMacroDefined("NS_RETURNS_INNER_POINTER"))
+ return;
+
+ edit::Commit commit(*Editor);
+ commit.insertBefore(OM->getLocEnd(), " NS_RETURNS_INNER_POINTER");
+ Editor->commit(commit);
+}
+
+void ObjCMigrateASTConsumer::migratePropertyNsReturnsInnerPointer(ASTContext &Ctx,
+ ObjCPropertyDecl *P) {
+ QualType T = P->getType();
+
+ if (!TypeIsInnerPointer(T) ||
+ !NSAPIObj->isMacroDefined("NS_RETURNS_INNER_POINTER"))
+ return;
+ edit::Commit commit(*Editor);
+ commit.insertBefore(P->getLocEnd(), " NS_RETURNS_INNER_POINTER ");
+ Editor->commit(commit);
+}
+
+void ObjCMigrateASTConsumer::migrateAllMethodInstaceType(ASTContext &Ctx,
+ ObjCContainerDecl *CDecl) {
+ if (CDecl->isDeprecated() || IsCategoryNameWithDeprecatedSuffix(CDecl))
+ return;
+
+ // migrate methods which can have instancetype as their result type.
+ for (auto *Method : CDecl->methods()) {
+ if (Method->isDeprecated())
+ continue;
+ migrateMethodInstanceType(Ctx, CDecl, Method);
+ }
+}
+
+void ObjCMigrateASTConsumer::migrateFactoryMethod(ASTContext &Ctx,
+ ObjCContainerDecl *CDecl,
+ ObjCMethodDecl *OM,
+ ObjCInstanceTypeFamily OIT_Family) {
+ if (OM->isInstanceMethod() ||
+ OM->getReturnType() == Ctx.getObjCInstanceType() ||
+ !OM->getReturnType()->isObjCIdType())
+ return;
+
+ // Candidate factory methods are + (id) NaMeXXX : ... which belong to a class
+ // NSYYYNamE with matching names be at least 3 characters long.
+ ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl);
+ if (!IDecl) {
+ if (ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(CDecl))
+ IDecl = CatDecl->getClassInterface();
+ else if (ObjCImplDecl *ImpDecl = dyn_cast<ObjCImplDecl>(CDecl))
+ IDecl = ImpDecl->getClassInterface();
+ }
+ if (!IDecl)
+ return;
+
+ std::string StringClassName = IDecl->getName();
+ StringRef LoweredClassName(StringClassName);
+ std::string StringLoweredClassName = LoweredClassName.lower();
+ LoweredClassName = StringLoweredClassName;
+
+ IdentifierInfo *MethodIdName = OM->getSelector().getIdentifierInfoForSlot(0);
+ // Handle method with no name at its first selector slot; e.g. + (id):(int)x.
+ if (!MethodIdName)
+ return;
+
+ std::string MethodName = MethodIdName->getName();
+ if (OIT_Family == OIT_Singleton || OIT_Family == OIT_ReturnsSelf) {
+ StringRef STRefMethodName(MethodName);
+ size_t len = 0;
+ if (STRefMethodName.startswith("standard"))
+ len = strlen("standard");
+ else if (STRefMethodName.startswith("shared"))
+ len = strlen("shared");
+ else if (STRefMethodName.startswith("default"))
+ len = strlen("default");
+ else
+ return;
+ MethodName = STRefMethodName.substr(len);
+ }
+ std::string MethodNameSubStr = MethodName.substr(0, 3);
+ StringRef MethodNamePrefix(MethodNameSubStr);
+ std::string StringLoweredMethodNamePrefix = MethodNamePrefix.lower();
+ MethodNamePrefix = StringLoweredMethodNamePrefix;
+ size_t Ix = LoweredClassName.rfind(MethodNamePrefix);
+ if (Ix == StringRef::npos)
+ return;
+ std::string ClassNamePostfix = LoweredClassName.substr(Ix);
+ StringRef LoweredMethodName(MethodName);
+ std::string StringLoweredMethodName = LoweredMethodName.lower();
+ LoweredMethodName = StringLoweredMethodName;
+ if (!LoweredMethodName.startswith(ClassNamePostfix))
+ return;
+ if (OIT_Family == OIT_ReturnsSelf)
+ ReplaceWithClasstype(*this, OM);
+ else
+ ReplaceWithInstancetype(Ctx, *this, OM);
+}
+
+static bool IsVoidStarType(QualType Ty) {
+ if (!Ty->isPointerType())
+ return false;
+
+ while (const TypedefType *TD = dyn_cast<TypedefType>(Ty.getTypePtr()))
+ Ty = TD->getDecl()->getUnderlyingType();
+
+ // Is the type void*?
+ const PointerType* PT = Ty->getAs<PointerType>();
+ if (PT->getPointeeType().getUnqualifiedType()->isVoidType())
+ return true;
+ return IsVoidStarType(PT->getPointeeType());
+}
+
+/// AuditedType - This routine audits the type AT and returns false if it is one of known
+/// CF object types or of the "void *" variety. It returns true if we don't care about the type
+/// such as a non-pointer or pointers which have no ownership issues (such as "int *").
+static bool AuditedType (QualType AT) {
+ if (!AT->isAnyPointerType() && !AT->isBlockPointerType())
+ return true;
+ // FIXME. There isn't much we can say about CF pointer type; or is there?
+ if (ento::coreFoundation::isCFObjectRef(AT) ||
+ IsVoidStarType(AT) ||
+ // If an ObjC object is type, assuming that it is not a CF function and
+ // that it is an un-audited function.
+ AT->isObjCObjectPointerType() || AT->isObjCBuiltinType())
+ return false;
+ // All other pointers are assumed audited as harmless.
+ return true;
+}
+
+void ObjCMigrateASTConsumer::AnnotateImplicitBridging(ASTContext &Ctx) {
+ if (CFFunctionIBCandidates.empty())
+ return;
+ if (!NSAPIObj->isMacroDefined("CF_IMPLICIT_BRIDGING_ENABLED")) {
+ CFFunctionIBCandidates.clear();
+ FileId = FileID();
+ return;
+ }
+ // Insert CF_IMPLICIT_BRIDGING_ENABLE/CF_IMPLICIT_BRIDGING_DISABLED
+ const Decl *FirstFD = CFFunctionIBCandidates[0];
+ const Decl *LastFD =
+ CFFunctionIBCandidates[CFFunctionIBCandidates.size()-1];
+ const char *PragmaString = "\nCF_IMPLICIT_BRIDGING_ENABLED\n\n";
+ edit::Commit commit(*Editor);
+ commit.insertBefore(FirstFD->getLocStart(), PragmaString);
+ PragmaString = "\n\nCF_IMPLICIT_BRIDGING_DISABLED\n";
+ SourceLocation EndLoc = LastFD->getLocEnd();
+ // get location just past end of function location.
+ EndLoc = PP.getLocForEndOfToken(EndLoc);
+ if (isa<FunctionDecl>(LastFD)) {
+ // For Methods, EndLoc points to the ending semcolon. So,
+ // not of these extra work is needed.
+ Token Tok;
+ // get locaiton of token that comes after end of function.
+ bool Failed = PP.getRawToken(EndLoc, Tok, /*IgnoreWhiteSpace=*/true);
+ if (!Failed)
+ EndLoc = Tok.getLocation();
+ }
+ commit.insertAfterToken(EndLoc, PragmaString);
+ Editor->commit(commit);
+ FileId = FileID();
+ CFFunctionIBCandidates.clear();
+}
+
+void ObjCMigrateASTConsumer::migrateCFAnnotation(ASTContext &Ctx, const Decl *Decl) {
+ if (Decl->isDeprecated())
+ return;
+
+ if (Decl->hasAttr<CFAuditedTransferAttr>()) {
+ assert(CFFunctionIBCandidates.empty() &&
+ "Cannot have audited functions/methods inside user "
+ "provided CF_IMPLICIT_BRIDGING_ENABLE");
+ return;
+ }
+
+ // Finction must be annotated first.
+ if (const FunctionDecl *FuncDecl = dyn_cast<FunctionDecl>(Decl)) {
+ CF_BRIDGING_KIND AuditKind = migrateAddFunctionAnnotation(Ctx, FuncDecl);
+ if (AuditKind == CF_BRIDGING_ENABLE) {
+ CFFunctionIBCandidates.push_back(Decl);
+ if (FileId.isInvalid())
+ FileId = PP.getSourceManager().getFileID(Decl->getLocation());
+ }
+ else if (AuditKind == CF_BRIDGING_MAY_INCLUDE) {
+ if (!CFFunctionIBCandidates.empty()) {
+ CFFunctionIBCandidates.push_back(Decl);
+ if (FileId.isInvalid())
+ FileId = PP.getSourceManager().getFileID(Decl->getLocation());
+ }
+ }
+ else
+ AnnotateImplicitBridging(Ctx);
+ }
+ else {
+ migrateAddMethodAnnotation(Ctx, cast<ObjCMethodDecl>(Decl));
+ AnnotateImplicitBridging(Ctx);
+ }
+}
+
+void ObjCMigrateASTConsumer::AddCFAnnotations(ASTContext &Ctx,
+ const CallEffects &CE,
+ const FunctionDecl *FuncDecl,
+ bool ResultAnnotated) {
+ // Annotate function.
+ if (!ResultAnnotated) {
+ RetEffect Ret = CE.getReturnValue();
+ const char *AnnotationString = nullptr;
+ if (Ret.getObjKind() == RetEffect::CF) {
+ if (Ret.isOwned() && NSAPIObj->isMacroDefined("CF_RETURNS_RETAINED"))
+ AnnotationString = " CF_RETURNS_RETAINED";
+ else if (Ret.notOwned() &&
+ NSAPIObj->isMacroDefined("CF_RETURNS_NOT_RETAINED"))
+ AnnotationString = " CF_RETURNS_NOT_RETAINED";
+ }
+ else if (Ret.getObjKind() == RetEffect::ObjC) {
+ if (Ret.isOwned() && NSAPIObj->isMacroDefined("NS_RETURNS_RETAINED"))
+ AnnotationString = " NS_RETURNS_RETAINED";
+ }
+
+ if (AnnotationString) {
+ edit::Commit commit(*Editor);
+ commit.insertAfterToken(FuncDecl->getLocEnd(), AnnotationString);
+ Editor->commit(commit);
+ }
+ }
+ ArrayRef<ArgEffect> AEArgs = CE.getArgs();
+ unsigned i = 0;
+ for (FunctionDecl::param_const_iterator pi = FuncDecl->param_begin(),
+ pe = FuncDecl->param_end(); pi != pe; ++pi, ++i) {
+ const ParmVarDecl *pd = *pi;
+ ArgEffect AE = AEArgs[i];
+ if (AE == DecRef && !pd->hasAttr<CFConsumedAttr>() &&
+ NSAPIObj->isMacroDefined("CF_CONSUMED")) {
+ edit::Commit commit(*Editor);
+ commit.insertBefore(pd->getLocation(), "CF_CONSUMED ");
+ Editor->commit(commit);
+ }
+ else if (AE == DecRefMsg && !pd->hasAttr<NSConsumedAttr>() &&
+ NSAPIObj->isMacroDefined("NS_CONSUMED")) {
+ edit::Commit commit(*Editor);
+ commit.insertBefore(pd->getLocation(), "NS_CONSUMED ");
+ Editor->commit(commit);
+ }
+ }
+}
+
+
+ObjCMigrateASTConsumer::CF_BRIDGING_KIND
+ ObjCMigrateASTConsumer::migrateAddFunctionAnnotation(
+ ASTContext &Ctx,
+ const FunctionDecl *FuncDecl) {
+ if (FuncDecl->hasBody())
+ return CF_BRIDGING_NONE;
+
+ CallEffects CE = CallEffects::getEffect(FuncDecl);
+ bool FuncIsReturnAnnotated = (FuncDecl->hasAttr<CFReturnsRetainedAttr>() ||
+ FuncDecl->hasAttr<CFReturnsNotRetainedAttr>() ||
+ FuncDecl->hasAttr<NSReturnsRetainedAttr>() ||
+ FuncDecl->hasAttr<NSReturnsNotRetainedAttr>() ||
+ FuncDecl->hasAttr<NSReturnsAutoreleasedAttr>());
+
+ // Trivial case of when function is annotated and has no argument.
+ if (FuncIsReturnAnnotated && FuncDecl->getNumParams() == 0)
+ return CF_BRIDGING_NONE;
+
+ bool ReturnCFAudited = false;
+ if (!FuncIsReturnAnnotated) {
+ RetEffect Ret = CE.getReturnValue();
+ if (Ret.getObjKind() == RetEffect::CF &&
+ (Ret.isOwned() || Ret.notOwned()))
+ ReturnCFAudited = true;
+ else if (!AuditedType(FuncDecl->getReturnType()))
+ return CF_BRIDGING_NONE;
+ }
+
+ // At this point result type is audited for potential inclusion.
+ // Now, how about argument types.
+ ArrayRef<ArgEffect> AEArgs = CE.getArgs();
+ unsigned i = 0;
+ bool ArgCFAudited = false;
+ for (FunctionDecl::param_const_iterator pi = FuncDecl->param_begin(),
+ pe = FuncDecl->param_end(); pi != pe; ++pi, ++i) {
+ const ParmVarDecl *pd = *pi;
+ ArgEffect AE = AEArgs[i];
+ if (AE == DecRef /*CFConsumed annotated*/ || AE == IncRef) {
+ if (AE == DecRef && !pd->hasAttr<CFConsumedAttr>())
+ ArgCFAudited = true;
+ else if (AE == IncRef)
+ ArgCFAudited = true;
+ }
+ else {
+ QualType AT = pd->getType();
+ if (!AuditedType(AT)) {
+ AddCFAnnotations(Ctx, CE, FuncDecl, FuncIsReturnAnnotated);
+ return CF_BRIDGING_NONE;
+ }
+ }
+ }
+ if (ReturnCFAudited || ArgCFAudited)
+ return CF_BRIDGING_ENABLE;
+
+ return CF_BRIDGING_MAY_INCLUDE;
+}
+
+void ObjCMigrateASTConsumer::migrateARCSafeAnnotation(ASTContext &Ctx,
+ ObjCContainerDecl *CDecl) {
+ if (!isa<ObjCInterfaceDecl>(CDecl) || CDecl->isDeprecated())
+ return;
+
+ // migrate methods which can have instancetype as their result type.
+ for (const auto *Method : CDecl->methods())
+ migrateCFAnnotation(Ctx, Method);
+}
+
+void ObjCMigrateASTConsumer::AddCFAnnotations(ASTContext &Ctx,
+ const CallEffects &CE,
+ const ObjCMethodDecl *MethodDecl,
+ bool ResultAnnotated) {
+ // Annotate function.
+ if (!ResultAnnotated) {
+ RetEffect Ret = CE.getReturnValue();
+ const char *AnnotationString = nullptr;
+ if (Ret.getObjKind() == RetEffect::CF) {
+ if (Ret.isOwned() && NSAPIObj->isMacroDefined("CF_RETURNS_RETAINED"))
+ AnnotationString = " CF_RETURNS_RETAINED";
+ else if (Ret.notOwned() &&
+ NSAPIObj->isMacroDefined("CF_RETURNS_NOT_RETAINED"))
+ AnnotationString = " CF_RETURNS_NOT_RETAINED";
+ }
+ else if (Ret.getObjKind() == RetEffect::ObjC) {
+ ObjCMethodFamily OMF = MethodDecl->getMethodFamily();
+ switch (OMF) {
+ case clang::OMF_alloc:
+ case clang::OMF_new:
+ case clang::OMF_copy:
+ case clang::OMF_init:
+ case clang::OMF_mutableCopy:
+ break;
+
+ default:
+ if (Ret.isOwned() && NSAPIObj->isMacroDefined("NS_RETURNS_RETAINED"))
+ AnnotationString = " NS_RETURNS_RETAINED";
+ break;
+ }
+ }
+
+ if (AnnotationString) {
+ edit::Commit commit(*Editor);
+ commit.insertBefore(MethodDecl->getLocEnd(), AnnotationString);
+ Editor->commit(commit);
+ }
+ }
+ ArrayRef<ArgEffect> AEArgs = CE.getArgs();
+ unsigned i = 0;
+ for (ObjCMethodDecl::param_const_iterator pi = MethodDecl->param_begin(),
+ pe = MethodDecl->param_end(); pi != pe; ++pi, ++i) {
+ const ParmVarDecl *pd = *pi;
+ ArgEffect AE = AEArgs[i];
+ if (AE == DecRef && !pd->hasAttr<CFConsumedAttr>() &&
+ NSAPIObj->isMacroDefined("CF_CONSUMED")) {
+ edit::Commit commit(*Editor);
+ commit.insertBefore(pd->getLocation(), "CF_CONSUMED ");
+ Editor->commit(commit);
+ }
+ }
+}
+
+void ObjCMigrateASTConsumer::migrateAddMethodAnnotation(
+ ASTContext &Ctx,
+ const ObjCMethodDecl *MethodDecl) {
+ if (MethodDecl->hasBody() || MethodDecl->isImplicit())
+ return;
+
+ CallEffects CE = CallEffects::getEffect(MethodDecl);
+ bool MethodIsReturnAnnotated = (MethodDecl->hasAttr<CFReturnsRetainedAttr>() ||
+ MethodDecl->hasAttr<CFReturnsNotRetainedAttr>() ||
+ MethodDecl->hasAttr<NSReturnsRetainedAttr>() ||
+ MethodDecl->hasAttr<NSReturnsNotRetainedAttr>() ||
+ MethodDecl->hasAttr<NSReturnsAutoreleasedAttr>());
+
+ if (CE.getReceiver() == DecRefMsg &&
+ !MethodDecl->hasAttr<NSConsumesSelfAttr>() &&
+ MethodDecl->getMethodFamily() != OMF_init &&
+ MethodDecl->getMethodFamily() != OMF_release &&
+ NSAPIObj->isMacroDefined("NS_CONSUMES_SELF")) {
+ edit::Commit commit(*Editor);
+ commit.insertBefore(MethodDecl->getLocEnd(), " NS_CONSUMES_SELF");
+ Editor->commit(commit);
+ }
+
+ // Trivial case of when function is annotated and has no argument.
+ if (MethodIsReturnAnnotated &&
+ (MethodDecl->param_begin() == MethodDecl->param_end()))
+ return;
+
+ if (!MethodIsReturnAnnotated) {
+ RetEffect Ret = CE.getReturnValue();
+ if ((Ret.getObjKind() == RetEffect::CF ||
+ Ret.getObjKind() == RetEffect::ObjC) &&
+ (Ret.isOwned() || Ret.notOwned())) {
+ AddCFAnnotations(Ctx, CE, MethodDecl, false);
+ return;
+ } else if (!AuditedType(MethodDecl->getReturnType()))
+ return;
+ }
+
+ // At this point result type is either annotated or audited.
+ // Now, how about argument types.
+ ArrayRef<ArgEffect> AEArgs = CE.getArgs();
+ unsigned i = 0;
+ for (ObjCMethodDecl::param_const_iterator pi = MethodDecl->param_begin(),
+ pe = MethodDecl->param_end(); pi != pe; ++pi, ++i) {
+ const ParmVarDecl *pd = *pi;
+ ArgEffect AE = AEArgs[i];
+ if ((AE == DecRef && !pd->hasAttr<CFConsumedAttr>()) || AE == IncRef ||
+ !AuditedType(pd->getType())) {
+ AddCFAnnotations(Ctx, CE, MethodDecl, MethodIsReturnAnnotated);
+ return;
+ }
+ }
+ return;
+}
+
+namespace {
+class SuperInitChecker : public RecursiveASTVisitor<SuperInitChecker> {
+public:
+ bool shouldVisitTemplateInstantiations() const { return false; }
+ bool shouldWalkTypesOfTypeLocs() const { return false; }
+
+ bool VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ if (E->getReceiverKind() == ObjCMessageExpr::SuperInstance) {
+ if (E->getMethodFamily() == OMF_init)
+ return false;
+ }
+ return true;
+ }
+};
+} // anonymous namespace
+
+static bool hasSuperInitCall(const ObjCMethodDecl *MD) {
+ return !SuperInitChecker().TraverseStmt(MD->getBody());
+}
+
+void ObjCMigrateASTConsumer::inferDesignatedInitializers(
+ ASTContext &Ctx,
+ const ObjCImplementationDecl *ImplD) {
+
+ const ObjCInterfaceDecl *IFace = ImplD->getClassInterface();
+ if (!IFace || IFace->hasDesignatedInitializers())
+ return;
+ if (!NSAPIObj->isMacroDefined("NS_DESIGNATED_INITIALIZER"))
+ return;
+
+ for (const auto *MD : ImplD->instance_methods()) {
+ if (MD->isDeprecated() ||
+ MD->getMethodFamily() != OMF_init ||
+ MD->isDesignatedInitializerForTheInterface())
+ continue;
+ const ObjCMethodDecl *IFaceM = IFace->getMethod(MD->getSelector(),
+ /*isInstance=*/true);
+ if (!IFaceM)
+ continue;
+ if (hasSuperInitCall(MD)) {
+ edit::Commit commit(*Editor);
+ commit.insert(IFaceM->getLocEnd(), " NS_DESIGNATED_INITIALIZER");
+ Editor->commit(commit);
+ }
+ }
+}
+
+bool ObjCMigrateASTConsumer::InsertFoundation(ASTContext &Ctx,
+ SourceLocation Loc) {
+ if (FoundationIncluded)
+ return true;
+ if (Loc.isInvalid())
+ return false;
+ edit::Commit commit(*Editor);
+ if (Ctx.getLangOpts().Modules)
+ commit.insert(Loc, "#ifndef NS_ENUM\n@import Foundation;\n#endif\n");
+ else
+ commit.insert(Loc, "#ifndef NS_ENUM\n#import <Foundation/Foundation.h>\n#endif\n");
+ Editor->commit(commit);
+ FoundationIncluded = true;
+ return true;
+}
+
+namespace {
+
+class RewritesReceiver : public edit::EditsReceiver {
+ Rewriter &Rewrite;
+
+public:
+ RewritesReceiver(Rewriter &Rewrite) : Rewrite(Rewrite) { }
+
+ void insert(SourceLocation loc, StringRef text) override {
+ Rewrite.InsertText(loc, text);
+ }
+ void replace(CharSourceRange range, StringRef text) override {
+ Rewrite.ReplaceText(range.getBegin(), Rewrite.getRangeSize(range), text);
+ }
+};
+
+class JSONEditWriter : public edit::EditsReceiver {
+ SourceManager &SourceMgr;
+ llvm::raw_ostream &OS;
+
+public:
+ JSONEditWriter(SourceManager &SM, llvm::raw_ostream &OS)
+ : SourceMgr(SM), OS(OS) {
+ OS << "[\n";
+ }
+ ~JSONEditWriter() override { OS << "]\n"; }
+
+private:
+ struct EntryWriter {
+ SourceManager &SourceMgr;
+ llvm::raw_ostream &OS;
+
+ EntryWriter(SourceManager &SM, llvm::raw_ostream &OS)
+ : SourceMgr(SM), OS(OS) {
+ OS << " {\n";
+ }
+ ~EntryWriter() {
+ OS << " },\n";
+ }
+
+ void writeLoc(SourceLocation Loc) {
+ FileID FID;
+ unsigned Offset;
+ std::tie(FID, Offset) = SourceMgr.getDecomposedLoc(Loc);
+ assert(FID.isValid());
+ SmallString<200> Path =
+ StringRef(SourceMgr.getFileEntryForID(FID)->getName());
+ llvm::sys::fs::make_absolute(Path);
+ OS << " \"file\": \"";
+ OS.write_escaped(Path.str()) << "\",\n";
+ OS << " \"offset\": " << Offset << ",\n";
+ }
+
+ void writeRemove(CharSourceRange Range) {
+ assert(Range.isCharRange());
+ std::pair<FileID, unsigned> Begin =
+ SourceMgr.getDecomposedLoc(Range.getBegin());
+ std::pair<FileID, unsigned> End =
+ SourceMgr.getDecomposedLoc(Range.getEnd());
+ assert(Begin.first == End.first);
+ assert(Begin.second <= End.second);
+ unsigned Length = End.second - Begin.second;
+
+ OS << " \"remove\": " << Length << ",\n";
+ }
+
+ void writeText(StringRef Text) {
+ OS << " \"text\": \"";
+ OS.write_escaped(Text) << "\",\n";
+ }
+ };
+
+ void insert(SourceLocation Loc, StringRef Text) override {
+ EntryWriter Writer(SourceMgr, OS);
+ Writer.writeLoc(Loc);
+ Writer.writeText(Text);
+ }
+
+ void replace(CharSourceRange Range, StringRef Text) override {
+ EntryWriter Writer(SourceMgr, OS);
+ Writer.writeLoc(Range.getBegin());
+ Writer.writeRemove(Range);
+ Writer.writeText(Text);
+ }
+
+ void remove(CharSourceRange Range) override {
+ EntryWriter Writer(SourceMgr, OS);
+ Writer.writeLoc(Range.getBegin());
+ Writer.writeRemove(Range);
+ }
+};
+
+}
+
+void ObjCMigrateASTConsumer::HandleTranslationUnit(ASTContext &Ctx) {
+
+ TranslationUnitDecl *TU = Ctx.getTranslationUnitDecl();
+ if (ASTMigrateActions & FrontendOptions::ObjCMT_MigrateDecls) {
+ for (DeclContext::decl_iterator D = TU->decls_begin(), DEnd = TU->decls_end();
+ D != DEnd; ++D) {
+ FileID FID = PP.getSourceManager().getFileID((*D)->getLocation());
+ if (FID.isValid())
+ if (FileId.isValid() && FileId != FID) {
+ if (ASTMigrateActions & FrontendOptions::ObjCMT_Annotation)
+ AnnotateImplicitBridging(Ctx);
+ }
+
+ if (ObjCInterfaceDecl *CDecl = dyn_cast<ObjCInterfaceDecl>(*D))
+ if (canModify(CDecl))
+ migrateObjCContainerDecl(Ctx, CDecl);
+ if (ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(*D)) {
+ if (canModify(CatDecl))
+ migrateObjCContainerDecl(Ctx, CatDecl);
+ }
+ else if (ObjCProtocolDecl *PDecl = dyn_cast<ObjCProtocolDecl>(*D)) {
+ ObjCProtocolDecls.insert(PDecl->getCanonicalDecl());
+ if (canModify(PDecl))
+ migrateObjCContainerDecl(Ctx, PDecl);
+ }
+ else if (const ObjCImplementationDecl *ImpDecl =
+ dyn_cast<ObjCImplementationDecl>(*D)) {
+ if ((ASTMigrateActions & FrontendOptions::ObjCMT_ProtocolConformance) &&
+ canModify(ImpDecl))
+ migrateProtocolConformance(Ctx, ImpDecl);
+ }
+ else if (const EnumDecl *ED = dyn_cast<EnumDecl>(*D)) {
+ if (!(ASTMigrateActions & FrontendOptions::ObjCMT_NsMacros))
+ continue;
+ if (!canModify(ED))
+ continue;
+ DeclContext::decl_iterator N = D;
+ if (++N != DEnd) {
+ const TypedefDecl *TD = dyn_cast<TypedefDecl>(*N);
+ if (migrateNSEnumDecl(Ctx, ED, TD) && TD)
+ D++;
+ }
+ else
+ migrateNSEnumDecl(Ctx, ED, /*TypedefDecl */nullptr);
+ }
+ else if (const TypedefDecl *TD = dyn_cast<TypedefDecl>(*D)) {
+ if (!(ASTMigrateActions & FrontendOptions::ObjCMT_NsMacros))
+ continue;
+ if (!canModify(TD))
+ continue;
+ DeclContext::decl_iterator N = D;
+ if (++N == DEnd)
+ continue;
+ if (const EnumDecl *ED = dyn_cast<EnumDecl>(*N)) {
+ if (++N != DEnd)
+ if (const TypedefDecl *TDF = dyn_cast<TypedefDecl>(*N)) {
+ // prefer typedef-follows-enum to enum-follows-typedef pattern.
+ if (migrateNSEnumDecl(Ctx, ED, TDF)) {
+ ++D; ++D;
+ CacheObjCNSIntegerTypedefed(TD);
+ continue;
+ }
+ }
+ if (migrateNSEnumDecl(Ctx, ED, TD)) {
+ ++D;
+ continue;
+ }
+ }
+ CacheObjCNSIntegerTypedefed(TD);
+ }
+ else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*D)) {
+ if ((ASTMigrateActions & FrontendOptions::ObjCMT_Annotation) &&
+ canModify(FD))
+ migrateCFAnnotation(Ctx, FD);
+ }
+
+ if (ObjCContainerDecl *CDecl = dyn_cast<ObjCContainerDecl>(*D)) {
+ bool CanModify = canModify(CDecl);
+ // migrate methods which can have instancetype as their result type.
+ if ((ASTMigrateActions & FrontendOptions::ObjCMT_Instancetype) &&
+ CanModify)
+ migrateAllMethodInstaceType(Ctx, CDecl);
+ // annotate methods with CF annotations.
+ if ((ASTMigrateActions & FrontendOptions::ObjCMT_Annotation) &&
+ CanModify)
+ migrateARCSafeAnnotation(Ctx, CDecl);
+ }
+
+ if (const ObjCImplementationDecl *
+ ImplD = dyn_cast<ObjCImplementationDecl>(*D)) {
+ if ((ASTMigrateActions & FrontendOptions::ObjCMT_DesignatedInitializer) &&
+ canModify(ImplD))
+ inferDesignatedInitializers(Ctx, ImplD);
+ }
+ }
+ if (ASTMigrateActions & FrontendOptions::ObjCMT_Annotation)
+ AnnotateImplicitBridging(Ctx);
+ }
+
+ if (IsOutputFile) {
+ std::error_code EC;
+ llvm::raw_fd_ostream OS(MigrateDir, EC, llvm::sys::fs::F_None);
+ if (EC) {
+ DiagnosticsEngine &Diags = Ctx.getDiagnostics();
+ Diags.Report(Diags.getCustomDiagID(DiagnosticsEngine::Error, "%0"))
+ << EC.message();
+ return;
+ }
+
+ JSONEditWriter Writer(Ctx.getSourceManager(), OS);
+ Editor->applyRewrites(Writer);
+ return;
+ }
+
+ Rewriter rewriter(Ctx.getSourceManager(), Ctx.getLangOpts());
+ RewritesReceiver Rec(rewriter);
+ Editor->applyRewrites(Rec);
+
+ for (Rewriter::buffer_iterator
+ I = rewriter.buffer_begin(), E = rewriter.buffer_end(); I != E; ++I) {
+ FileID FID = I->first;
+ RewriteBuffer &buf = I->second;
+ const FileEntry *file = Ctx.getSourceManager().getFileEntryForID(FID);
+ assert(file);
+ SmallString<512> newText;
+ llvm::raw_svector_ostream vecOS(newText);
+ buf.write(vecOS);
+ std::unique_ptr<llvm::MemoryBuffer> memBuf(
+ llvm::MemoryBuffer::getMemBufferCopy(
+ StringRef(newText.data(), newText.size()), file->getName()));
+ SmallString<64> filePath(file->getName());
+ FileMgr.FixupRelativePath(filePath);
+ Remapper.remap(filePath.str(), std::move(memBuf));
+ }
+
+ if (IsOutputFile) {
+ Remapper.flushToFile(MigrateDir, Ctx.getDiagnostics());
+ } else {
+ Remapper.flushToDisk(MigrateDir, Ctx.getDiagnostics());
+ }
+}
+
+bool MigrateSourceAction::BeginInvocation(CompilerInstance &CI) {
+ CI.getDiagnostics().setIgnoreAllWarnings(true);
+ return true;
+}
+
+static std::vector<std::string> getWhiteListFilenames(StringRef DirPath) {
+ using namespace llvm::sys::fs;
+ using namespace llvm::sys::path;
+
+ std::vector<std::string> Filenames;
+ if (DirPath.empty() || !is_directory(DirPath))
+ return Filenames;
+
+ std::error_code EC;
+ directory_iterator DI = directory_iterator(DirPath, EC);
+ directory_iterator DE;
+ for (; !EC && DI != DE; DI = DI.increment(EC)) {
+ if (is_regular_file(DI->path()))
+ Filenames.push_back(filename(DI->path()));
+ }
+
+ return Filenames;
+}
+
+std::unique_ptr<ASTConsumer>
+MigrateSourceAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
+ PPConditionalDirectiveRecord *
+ PPRec = new PPConditionalDirectiveRecord(CI.getSourceManager());
+ unsigned ObjCMTAction = CI.getFrontendOpts().ObjCMTAction;
+ unsigned ObjCMTOpts = ObjCMTAction;
+ // These are companion flags, they do not enable transformations.
+ ObjCMTOpts &= ~(FrontendOptions::ObjCMT_AtomicProperty |
+ FrontendOptions::ObjCMT_NsAtomicIOSOnlyProperty);
+ if (ObjCMTOpts == FrontendOptions::ObjCMT_None) {
+ // If no specific option was given, enable literals+subscripting transforms
+ // by default.
+ ObjCMTAction |= FrontendOptions::ObjCMT_Literals |
+ FrontendOptions::ObjCMT_Subscripting;
+ }
+ CI.getPreprocessor().addPPCallbacks(std::unique_ptr<PPCallbacks>(PPRec));
+ std::vector<std::string> WhiteList =
+ getWhiteListFilenames(CI.getFrontendOpts().ObjCMTWhiteListPath);
+ return llvm::make_unique<ObjCMigrateASTConsumer>(
+ CI.getFrontendOpts().OutputFile, ObjCMTAction, Remapper,
+ CI.getFileManager(), PPRec, CI.getPreprocessor(),
+ /*isOutputFile=*/true, WhiteList);
+}
+
+namespace {
+struct EditEntry {
+ const FileEntry *File;
+ unsigned Offset;
+ unsigned RemoveLen;
+ std::string Text;
+
+ EditEntry() : File(), Offset(), RemoveLen() {}
+};
+}
+
+namespace llvm {
+template<> struct DenseMapInfo<EditEntry> {
+ static inline EditEntry getEmptyKey() {
+ EditEntry Entry;
+ Entry.Offset = unsigned(-1);
+ return Entry;
+ }
+ static inline EditEntry getTombstoneKey() {
+ EditEntry Entry;
+ Entry.Offset = unsigned(-2);
+ return Entry;
+ }
+ static unsigned getHashValue(const EditEntry& Val) {
+ llvm::FoldingSetNodeID ID;
+ ID.AddPointer(Val.File);
+ ID.AddInteger(Val.Offset);
+ ID.AddInteger(Val.RemoveLen);
+ ID.AddString(Val.Text);
+ return ID.ComputeHash();
+ }
+ static bool isEqual(const EditEntry &LHS, const EditEntry &RHS) {
+ return LHS.File == RHS.File &&
+ LHS.Offset == RHS.Offset &&
+ LHS.RemoveLen == RHS.RemoveLen &&
+ LHS.Text == RHS.Text;
+ }
+};
+}
+
+namespace {
+class RemapFileParser {
+ FileManager &FileMgr;
+
+public:
+ RemapFileParser(FileManager &FileMgr) : FileMgr(FileMgr) { }
+
+ bool parse(StringRef File, SmallVectorImpl<EditEntry> &Entries) {
+ using namespace llvm::yaml;
+
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> FileBufOrErr =
+ llvm::MemoryBuffer::getFile(File);
+ if (!FileBufOrErr)
+ return true;
+
+ llvm::SourceMgr SM;
+ Stream YAMLStream(FileBufOrErr.get()->getMemBufferRef(), SM);
+ document_iterator I = YAMLStream.begin();
+ if (I == YAMLStream.end())
+ return true;
+ Node *Root = I->getRoot();
+ if (!Root)
+ return true;
+
+ SequenceNode *SeqNode = dyn_cast<SequenceNode>(Root);
+ if (!SeqNode)
+ return true;
+
+ for (SequenceNode::iterator
+ AI = SeqNode->begin(), AE = SeqNode->end(); AI != AE; ++AI) {
+ MappingNode *MapNode = dyn_cast<MappingNode>(&*AI);
+ if (!MapNode)
+ continue;
+ parseEdit(MapNode, Entries);
+ }
+
+ return false;
+ }
+
+private:
+ void parseEdit(llvm::yaml::MappingNode *Node,
+ SmallVectorImpl<EditEntry> &Entries) {
+ using namespace llvm::yaml;
+ EditEntry Entry;
+ bool Ignore = false;
+
+ for (MappingNode::iterator
+ KVI = Node->begin(), KVE = Node->end(); KVI != KVE; ++KVI) {
+ ScalarNode *KeyString = dyn_cast<ScalarNode>((*KVI).getKey());
+ if (!KeyString)
+ continue;
+ SmallString<10> KeyStorage;
+ StringRef Key = KeyString->getValue(KeyStorage);
+
+ ScalarNode *ValueString = dyn_cast<ScalarNode>((*KVI).getValue());
+ if (!ValueString)
+ continue;
+ SmallString<64> ValueStorage;
+ StringRef Val = ValueString->getValue(ValueStorage);
+
+ if (Key == "file") {
+ const FileEntry *FE = FileMgr.getFile(Val);
+ if (!FE)
+ Ignore = true;
+ Entry.File = FE;
+ } else if (Key == "offset") {
+ if (Val.getAsInteger(10, Entry.Offset))
+ Ignore = true;
+ } else if (Key == "remove") {
+ if (Val.getAsInteger(10, Entry.RemoveLen))
+ Ignore = true;
+ } else if (Key == "text") {
+ Entry.Text = Val;
+ }
+ }
+
+ if (!Ignore)
+ Entries.push_back(Entry);
+ }
+};
+}
+
+static bool reportDiag(const Twine &Err, DiagnosticsEngine &Diag) {
+ Diag.Report(Diag.getCustomDiagID(DiagnosticsEngine::Error, "%0"))
+ << Err.str();
+ return true;
+}
+
+static std::string applyEditsToTemp(const FileEntry *FE,
+ ArrayRef<EditEntry> Edits,
+ FileManager &FileMgr,
+ DiagnosticsEngine &Diag) {
+ using namespace llvm::sys;
+
+ SourceManager SM(Diag, FileMgr);
+ FileID FID = SM.createFileID(FE, SourceLocation(), SrcMgr::C_User);
+ LangOptions LangOpts;
+ edit::EditedSource Editor(SM, LangOpts);
+ for (ArrayRef<EditEntry>::iterator
+ I = Edits.begin(), E = Edits.end(); I != E; ++I) {
+ const EditEntry &Entry = *I;
+ assert(Entry.File == FE);
+ SourceLocation Loc =
+ SM.getLocForStartOfFile(FID).getLocWithOffset(Entry.Offset);
+ CharSourceRange Range;
+ if (Entry.RemoveLen != 0) {
+ Range = CharSourceRange::getCharRange(Loc,
+ Loc.getLocWithOffset(Entry.RemoveLen));
+ }
+
+ edit::Commit commit(Editor);
+ if (Range.isInvalid()) {
+ commit.insert(Loc, Entry.Text);
+ } else if (Entry.Text.empty()) {
+ commit.remove(Range);
+ } else {
+ commit.replace(Range, Entry.Text);
+ }
+ Editor.commit(commit);
+ }
+
+ Rewriter rewriter(SM, LangOpts);
+ RewritesReceiver Rec(rewriter);
+ Editor.applyRewrites(Rec);
+
+ const RewriteBuffer *Buf = rewriter.getRewriteBufferFor(FID);
+ SmallString<512> NewText;
+ llvm::raw_svector_ostream OS(NewText);
+ Buf->write(OS);
+
+ SmallString<64> TempPath;
+ int FD;
+ if (fs::createTemporaryFile(path::filename(FE->getName()),
+ path::extension(FE->getName()).drop_front(), FD,
+ TempPath)) {
+ reportDiag("Could not create file: " + TempPath.str(), Diag);
+ return std::string();
+ }
+
+ llvm::raw_fd_ostream TmpOut(FD, /*shouldClose=*/true);
+ TmpOut.write(NewText.data(), NewText.size());
+ TmpOut.close();
+
+ return TempPath.str();
+}
+
+bool arcmt::getFileRemappingsFromFileList(
+ std::vector<std::pair<std::string,std::string> > &remap,
+ ArrayRef<StringRef> remapFiles,
+ DiagnosticConsumer *DiagClient) {
+ bool hasErrorOccurred = false;
+
+ FileSystemOptions FSOpts;
+ FileManager FileMgr(FSOpts);
+ RemapFileParser Parser(FileMgr);
+
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ new DiagnosticsEngine(DiagID, new DiagnosticOptions,
+ DiagClient, /*ShouldOwnClient=*/false));
+
+ typedef llvm::DenseMap<const FileEntry *, std::vector<EditEntry> >
+ FileEditEntriesTy;
+ FileEditEntriesTy FileEditEntries;
+
+ llvm::DenseSet<EditEntry> EntriesSet;
+
+ for (ArrayRef<StringRef>::iterator
+ I = remapFiles.begin(), E = remapFiles.end(); I != E; ++I) {
+ SmallVector<EditEntry, 16> Entries;
+ if (Parser.parse(*I, Entries))
+ continue;
+
+ for (SmallVectorImpl<EditEntry>::iterator
+ EI = Entries.begin(), EE = Entries.end(); EI != EE; ++EI) {
+ EditEntry &Entry = *EI;
+ if (!Entry.File)
+ continue;
+ std::pair<llvm::DenseSet<EditEntry>::iterator, bool>
+ Insert = EntriesSet.insert(Entry);
+ if (!Insert.second)
+ continue;
+
+ FileEditEntries[Entry.File].push_back(Entry);
+ }
+ }
+
+ for (FileEditEntriesTy::iterator
+ I = FileEditEntries.begin(), E = FileEditEntries.end(); I != E; ++I) {
+ std::string TempFile = applyEditsToTemp(I->first, I->second,
+ FileMgr, *Diags);
+ if (TempFile.empty()) {
+ hasErrorOccurred = true;
+ continue;
+ }
+
+ remap.emplace_back(I->first->getName(), TempFile);
+ }
+
+ return hasErrorOccurred;
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/PlistReporter.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/PlistReporter.cpp
new file mode 100644
index 0000000..9a51690
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/PlistReporter.cpp
@@ -0,0 +1,126 @@
+//===--- PlistReporter.cpp - ARC Migrate Tool Plist Reporter ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Internals.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/PlistSupport.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Lexer.h"
+using namespace clang;
+using namespace arcmt;
+using namespace markup;
+
+static StringRef getLevelName(DiagnosticsEngine::Level Level) {
+ switch (Level) {
+ case DiagnosticsEngine::Ignored:
+ llvm_unreachable("ignored");
+ case DiagnosticsEngine::Note:
+ return "note";
+ case DiagnosticsEngine::Remark:
+ case DiagnosticsEngine::Warning:
+ return "warning";
+ case DiagnosticsEngine::Fatal:
+ case DiagnosticsEngine::Error:
+ return "error";
+ }
+ llvm_unreachable("Invalid DiagnosticsEngine level!");
+}
+
+void arcmt::writeARCDiagsToPlist(const std::string &outPath,
+ ArrayRef<StoredDiagnostic> diags,
+ SourceManager &SM,
+ const LangOptions &LangOpts) {
+ DiagnosticIDs DiagIDs;
+
+ // Build up a set of FIDs that we use by scanning the locations and
+ // ranges of the diagnostics.
+ FIDMap FM;
+ SmallVector<FileID, 10> Fids;
+
+ for (ArrayRef<StoredDiagnostic>::iterator
+ I = diags.begin(), E = diags.end(); I != E; ++I) {
+ const StoredDiagnostic &D = *I;
+
+ AddFID(FM, Fids, SM, D.getLocation());
+
+ for (StoredDiagnostic::range_iterator
+ RI = D.range_begin(), RE = D.range_end(); RI != RE; ++RI) {
+ AddFID(FM, Fids, SM, RI->getBegin());
+ AddFID(FM, Fids, SM, RI->getEnd());
+ }
+ }
+
+ std::error_code EC;
+ llvm::raw_fd_ostream o(outPath, EC, llvm::sys::fs::F_Text);
+ if (EC) {
+ llvm::errs() << "error: could not create file: " << outPath << '\n';
+ return;
+ }
+
+ EmitPlistHeader(o);
+
+ // Write the root object: a <dict> containing...
+ // - "files", an <array> mapping from FIDs to file names
+ // - "diagnostics", an <array> containing the diagnostics
+ o << "<dict>\n"
+ " <key>files</key>\n"
+ " <array>\n";
+
+ for (FileID FID : Fids)
+ EmitString(o << " ", SM.getFileEntryForID(FID)->getName()) << '\n';
+
+ o << " </array>\n"
+ " <key>diagnostics</key>\n"
+ " <array>\n";
+
+ for (ArrayRef<StoredDiagnostic>::iterator
+ DI = diags.begin(), DE = diags.end(); DI != DE; ++DI) {
+
+ const StoredDiagnostic &D = *DI;
+
+ if (D.getLevel() == DiagnosticsEngine::Ignored)
+ continue;
+
+ o << " <dict>\n";
+
+ // Output the diagnostic.
+ o << " <key>description</key>";
+ EmitString(o, D.getMessage()) << '\n';
+ o << " <key>category</key>";
+ EmitString(o, DiagIDs.getCategoryNameFromID(
+ DiagIDs.getCategoryNumberForDiag(D.getID()))) << '\n';
+ o << " <key>type</key>";
+ EmitString(o, getLevelName(D.getLevel())) << '\n';
+
+ // Output the location of the bug.
+ o << " <key>location</key>\n";
+ EmitLocation(o, SM, D.getLocation(), FM, 2);
+
+ // Output the ranges (if any).
+ if (!D.getRanges().empty()) {
+ o << " <key>ranges</key>\n";
+ o << " <array>\n";
+ for (auto &R : D.getRanges()) {
+ CharSourceRange ExpansionRange(SM.getExpansionRange(R.getAsRange()),
+ R.isTokenRange());
+ EmitRange(o, SM, Lexer::getAsCharRange(ExpansionRange, SM, LangOpts),
+ FM, 4);
+ }
+ o << " </array>\n";
+ }
+
+ // Close up the entry.
+ o << " </dict>\n";
+ }
+
+ o << " </array>\n";
+
+ // Finish.
+ o << "</dict>\n</plist>";
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransAPIUses.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransAPIUses.cpp
new file mode 100644
index 0000000..40c8a07
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransAPIUses.cpp
@@ -0,0 +1,108 @@
+//===--- TransAPIUses.cpp - Transformations to ARC mode -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// checkAPIUses:
+//
+// Emits error/fix with some API uses that are obsolete or not safe in ARC mode:
+//
+// - NSInvocation's [get/set]ReturnValue and [get/set]Argument are only safe
+// with __unsafe_unretained objects.
+// - Calling -zone gets replaced with 'nil'.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/Sema/SemaDiagnostic.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+
+namespace {
+
+class APIChecker : public RecursiveASTVisitor<APIChecker> {
+ MigrationPass &Pass;
+
+ Selector getReturnValueSel, setReturnValueSel;
+ Selector getArgumentSel, setArgumentSel;
+
+ Selector zoneSel;
+public:
+ APIChecker(MigrationPass &pass) : Pass(pass) {
+ SelectorTable &sels = Pass.Ctx.Selectors;
+ IdentifierTable &ids = Pass.Ctx.Idents;
+ getReturnValueSel = sels.getUnarySelector(&ids.get("getReturnValue"));
+ setReturnValueSel = sels.getUnarySelector(&ids.get("setReturnValue"));
+
+ IdentifierInfo *selIds[2];
+ selIds[0] = &ids.get("getArgument");
+ selIds[1] = &ids.get("atIndex");
+ getArgumentSel = sels.getSelector(2, selIds);
+ selIds[0] = &ids.get("setArgument");
+ setArgumentSel = sels.getSelector(2, selIds);
+
+ zoneSel = sels.getNullarySelector(&ids.get("zone"));
+ }
+
+ bool VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ // NSInvocation.
+ if (E->isInstanceMessage() &&
+ E->getReceiverInterface() &&
+ E->getReceiverInterface()->getName() == "NSInvocation") {
+ StringRef selName;
+ if (E->getSelector() == getReturnValueSel)
+ selName = "getReturnValue";
+ else if (E->getSelector() == setReturnValueSel)
+ selName = "setReturnValue";
+ else if (E->getSelector() == getArgumentSel)
+ selName = "getArgument";
+ else if (E->getSelector() == setArgumentSel)
+ selName = "setArgument";
+ else
+ return true;
+
+ Expr *parm = E->getArg(0)->IgnoreParenCasts();
+ QualType pointee = parm->getType()->getPointeeType();
+ if (pointee.isNull())
+ return true;
+
+ if (pointee.getObjCLifetime() > Qualifiers::OCL_ExplicitNone)
+ Pass.TA.report(parm->getLocStart(),
+ diag::err_arcmt_nsinvocation_ownership,
+ parm->getSourceRange())
+ << selName;
+
+ return true;
+ }
+
+ // -zone.
+ if (E->isInstanceMessage() &&
+ E->getInstanceReceiver() &&
+ E->getSelector() == zoneSel &&
+ Pass.TA.hasDiagnostic(diag::err_unavailable,
+ diag::err_unavailable_message,
+ E->getSelectorLoc(0))) {
+ // Calling -zone is meaningless in ARC, change it to nil.
+ Transaction Trans(Pass.TA);
+ Pass.TA.clearDiagnostic(diag::err_unavailable,
+ diag::err_unavailable_message,
+ E->getSelectorLoc(0));
+ Pass.TA.replace(E->getSourceRange(), getNilString(Pass));
+ }
+ return true;
+ }
+};
+
+} // anonymous namespace
+
+void trans::checkAPIUses(MigrationPass &pass) {
+ APIChecker(pass).TraverseDecl(pass.Ctx.getTranslationUnitDecl());
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransARCAssign.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransARCAssign.cpp
new file mode 100644
index 0000000..80bfd22
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransARCAssign.cpp
@@ -0,0 +1,78 @@
+//===--- TransARCAssign.cpp - Transformations to ARC mode -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// makeAssignARCSafe:
+//
+// Add '__strong' where appropriate.
+//
+// for (id x in collection) {
+// x = 0;
+// }
+// ---->
+// for (__strong id x in collection) {
+// x = 0;
+// }
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/Sema/SemaDiagnostic.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+
+namespace {
+
+class ARCAssignChecker : public RecursiveASTVisitor<ARCAssignChecker> {
+ MigrationPass &Pass;
+ llvm::DenseSet<VarDecl *> ModifiedVars;
+
+public:
+ ARCAssignChecker(MigrationPass &pass) : Pass(pass) { }
+
+ bool VisitBinaryOperator(BinaryOperator *Exp) {
+ if (Exp->getType()->isDependentType())
+ return true;
+
+ Expr *E = Exp->getLHS();
+ SourceLocation OrigLoc = E->getExprLoc();
+ SourceLocation Loc = OrigLoc;
+ DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(E->IgnoreParenCasts());
+ if (declRef && isa<VarDecl>(declRef->getDecl())) {
+ ASTContext &Ctx = Pass.Ctx;
+ Expr::isModifiableLvalueResult IsLV = E->isModifiableLvalue(Ctx, &Loc);
+ if (IsLV != Expr::MLV_ConstQualified)
+ return true;
+ VarDecl *var = cast<VarDecl>(declRef->getDecl());
+ if (var->isARCPseudoStrong()) {
+ Transaction Trans(Pass.TA);
+ if (Pass.TA.clearDiagnostic(diag::err_typecheck_arr_assign_enumeration,
+ Exp->getOperatorLoc())) {
+ if (!ModifiedVars.count(var)) {
+ TypeLoc TLoc = var->getTypeSourceInfo()->getTypeLoc();
+ Pass.TA.insert(TLoc.getBeginLoc(), "__strong ");
+ ModifiedVars.insert(var);
+ }
+ }
+ }
+ }
+
+ return true;
+ }
+};
+
+} // anonymous namespace
+
+void trans::makeAssignARCSafe(MigrationPass &pass) {
+ ARCAssignChecker assignCheck(pass);
+ assignCheck.TraverseDecl(pass.Ctx.getTranslationUnitDecl());
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransAutoreleasePool.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransAutoreleasePool.cpp
new file mode 100644
index 0000000..a8a99fa
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransAutoreleasePool.cpp
@@ -0,0 +1,435 @@
+//===--- TransAutoreleasePool.cpp - Transformations to ARC mode -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// rewriteAutoreleasePool:
+//
+// Calls to NSAutoreleasePools will be rewritten as an @autorelease scope.
+//
+// NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
+// ...
+// [pool release];
+// ---->
+// @autorelease {
+// ...
+// }
+//
+// An NSAutoreleasePool will not be touched if:
+// - There is not a corresponding -release/-drain in the same scope
+// - Not all references of the NSAutoreleasePool variable can be removed
+// - There is a variable that is declared inside the intended @autorelease scope
+// which is also used outside it.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include <map>
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+
+namespace {
+
+class ReleaseCollector : public RecursiveASTVisitor<ReleaseCollector> {
+ Decl *Dcl;
+ SmallVectorImpl<ObjCMessageExpr *> &Releases;
+
+public:
+ ReleaseCollector(Decl *D, SmallVectorImpl<ObjCMessageExpr *> &releases)
+ : Dcl(D), Releases(releases) { }
+
+ bool VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ if (!E->isInstanceMessage())
+ return true;
+ if (E->getMethodFamily() != OMF_release)
+ return true;
+ Expr *instance = E->getInstanceReceiver()->IgnoreParenCasts();
+ if (DeclRefExpr *DE = dyn_cast<DeclRefExpr>(instance)) {
+ if (DE->getDecl() == Dcl)
+ Releases.push_back(E);
+ }
+ return true;
+ }
+};
+
+}
+
+namespace {
+
+class AutoreleasePoolRewriter
+ : public RecursiveASTVisitor<AutoreleasePoolRewriter> {
+public:
+ AutoreleasePoolRewriter(MigrationPass &pass)
+ : Body(nullptr), Pass(pass) {
+ PoolII = &pass.Ctx.Idents.get("NSAutoreleasePool");
+ DrainSel = pass.Ctx.Selectors.getNullarySelector(
+ &pass.Ctx.Idents.get("drain"));
+ }
+
+ void transformBody(Stmt *body, Decl *ParentD) {
+ Body = body;
+ TraverseStmt(body);
+ }
+
+ ~AutoreleasePoolRewriter() {
+ SmallVector<VarDecl *, 8> VarsToHandle;
+
+ for (std::map<VarDecl *, PoolVarInfo>::iterator
+ I = PoolVars.begin(), E = PoolVars.end(); I != E; ++I) {
+ VarDecl *var = I->first;
+ PoolVarInfo &info = I->second;
+
+ // Check that we can handle/rewrite all references of the pool.
+
+ clearRefsIn(info.Dcl, info.Refs);
+ for (SmallVectorImpl<PoolScope>::iterator
+ scpI = info.Scopes.begin(),
+ scpE = info.Scopes.end(); scpI != scpE; ++scpI) {
+ PoolScope &scope = *scpI;
+ clearRefsIn(*scope.Begin, info.Refs);
+ clearRefsIn(*scope.End, info.Refs);
+ clearRefsIn(scope.Releases.begin(), scope.Releases.end(), info.Refs);
+ }
+
+ // Even if one reference is not handled we will not do anything about that
+ // pool variable.
+ if (info.Refs.empty())
+ VarsToHandle.push_back(var);
+ }
+
+ for (unsigned i = 0, e = VarsToHandle.size(); i != e; ++i) {
+ PoolVarInfo &info = PoolVars[VarsToHandle[i]];
+
+ Transaction Trans(Pass.TA);
+
+ clearUnavailableDiags(info.Dcl);
+ Pass.TA.removeStmt(info.Dcl);
+
+ // Add "@autoreleasepool { }"
+ for (SmallVectorImpl<PoolScope>::iterator
+ scpI = info.Scopes.begin(),
+ scpE = info.Scopes.end(); scpI != scpE; ++scpI) {
+ PoolScope &scope = *scpI;
+ clearUnavailableDiags(*scope.Begin);
+ clearUnavailableDiags(*scope.End);
+ if (scope.IsFollowedBySimpleReturnStmt) {
+ // Include the return in the scope.
+ Pass.TA.replaceStmt(*scope.Begin, "@autoreleasepool {");
+ Pass.TA.removeStmt(*scope.End);
+ Stmt::child_iterator retI = scope.End;
+ ++retI;
+ SourceLocation afterSemi = findLocationAfterSemi((*retI)->getLocEnd(),
+ Pass.Ctx);
+ assert(afterSemi.isValid() &&
+ "Didn't we check before setting IsFollowedBySimpleReturnStmt "
+ "to true?");
+ Pass.TA.insertAfterToken(afterSemi, "\n}");
+ Pass.TA.increaseIndentation(
+ SourceRange(scope.getIndentedRange().getBegin(),
+ (*retI)->getLocEnd()),
+ scope.CompoundParent->getLocStart());
+ } else {
+ Pass.TA.replaceStmt(*scope.Begin, "@autoreleasepool {");
+ Pass.TA.replaceStmt(*scope.End, "}");
+ Pass.TA.increaseIndentation(scope.getIndentedRange(),
+ scope.CompoundParent->getLocStart());
+ }
+ }
+
+ // Remove rest of pool var references.
+ for (SmallVectorImpl<PoolScope>::iterator
+ scpI = info.Scopes.begin(),
+ scpE = info.Scopes.end(); scpI != scpE; ++scpI) {
+ PoolScope &scope = *scpI;
+ for (SmallVectorImpl<ObjCMessageExpr *>::iterator
+ relI = scope.Releases.begin(),
+ relE = scope.Releases.end(); relI != relE; ++relI) {
+ clearUnavailableDiags(*relI);
+ Pass.TA.removeStmt(*relI);
+ }
+ }
+ }
+ }
+
+ bool VisitCompoundStmt(CompoundStmt *S) {
+ SmallVector<PoolScope, 4> Scopes;
+
+ for (Stmt::child_iterator
+ I = S->body_begin(), E = S->body_end(); I != E; ++I) {
+ Stmt *child = getEssential(*I);
+ if (DeclStmt *DclS = dyn_cast<DeclStmt>(child)) {
+ if (DclS->isSingleDecl()) {
+ if (VarDecl *VD = dyn_cast<VarDecl>(DclS->getSingleDecl())) {
+ if (isNSAutoreleasePool(VD->getType())) {
+ PoolVarInfo &info = PoolVars[VD];
+ info.Dcl = DclS;
+ collectRefs(VD, S, info.Refs);
+ // Does this statement follow the pattern:
+ // NSAutoreleasePool * pool = [NSAutoreleasePool new];
+ if (isPoolCreation(VD->getInit())) {
+ Scopes.push_back(PoolScope());
+ Scopes.back().PoolVar = VD;
+ Scopes.back().CompoundParent = S;
+ Scopes.back().Begin = I;
+ }
+ }
+ }
+ }
+ } else if (BinaryOperator *bop = dyn_cast<BinaryOperator>(child)) {
+ if (DeclRefExpr *dref = dyn_cast<DeclRefExpr>(bop->getLHS())) {
+ if (VarDecl *VD = dyn_cast<VarDecl>(dref->getDecl())) {
+ // Does this statement follow the pattern:
+ // pool = [NSAutoreleasePool new];
+ if (isNSAutoreleasePool(VD->getType()) &&
+ isPoolCreation(bop->getRHS())) {
+ Scopes.push_back(PoolScope());
+ Scopes.back().PoolVar = VD;
+ Scopes.back().CompoundParent = S;
+ Scopes.back().Begin = I;
+ }
+ }
+ }
+ }
+
+ if (Scopes.empty())
+ continue;
+
+ if (isPoolDrain(Scopes.back().PoolVar, child)) {
+ PoolScope &scope = Scopes.back();
+ scope.End = I;
+ handlePoolScope(scope, S);
+ Scopes.pop_back();
+ }
+ }
+ return true;
+ }
+
+private:
+ void clearUnavailableDiags(Stmt *S) {
+ if (S)
+ Pass.TA.clearDiagnostic(diag::err_unavailable,
+ diag::err_unavailable_message,
+ S->getSourceRange());
+ }
+
+ struct PoolScope {
+ VarDecl *PoolVar;
+ CompoundStmt *CompoundParent;
+ Stmt::child_iterator Begin;
+ Stmt::child_iterator End;
+ bool IsFollowedBySimpleReturnStmt;
+ SmallVector<ObjCMessageExpr *, 4> Releases;
+
+ PoolScope() : PoolVar(nullptr), CompoundParent(nullptr), Begin(), End(),
+ IsFollowedBySimpleReturnStmt(false) { }
+
+ SourceRange getIndentedRange() const {
+ Stmt::child_iterator rangeS = Begin;
+ ++rangeS;
+ if (rangeS == End)
+ return SourceRange();
+ Stmt::child_iterator rangeE = Begin;
+ for (Stmt::child_iterator I = rangeS; I != End; ++I)
+ ++rangeE;
+ return SourceRange((*rangeS)->getLocStart(), (*rangeE)->getLocEnd());
+ }
+ };
+
+ class NameReferenceChecker : public RecursiveASTVisitor<NameReferenceChecker>{
+ ASTContext &Ctx;
+ SourceRange ScopeRange;
+ SourceLocation &referenceLoc, &declarationLoc;
+
+ public:
+ NameReferenceChecker(ASTContext &ctx, PoolScope &scope,
+ SourceLocation &referenceLoc,
+ SourceLocation &declarationLoc)
+ : Ctx(ctx), referenceLoc(referenceLoc),
+ declarationLoc(declarationLoc) {
+ ScopeRange = SourceRange((*scope.Begin)->getLocStart(),
+ (*scope.End)->getLocStart());
+ }
+
+ bool VisitDeclRefExpr(DeclRefExpr *E) {
+ return checkRef(E->getLocation(), E->getDecl()->getLocation());
+ }
+
+ bool VisitTypedefTypeLoc(TypedefTypeLoc TL) {
+ return checkRef(TL.getBeginLoc(), TL.getTypedefNameDecl()->getLocation());
+ }
+
+ bool VisitTagTypeLoc(TagTypeLoc TL) {
+ return checkRef(TL.getBeginLoc(), TL.getDecl()->getLocation());
+ }
+
+ private:
+ bool checkRef(SourceLocation refLoc, SourceLocation declLoc) {
+ if (isInScope(declLoc)) {
+ referenceLoc = refLoc;
+ declarationLoc = declLoc;
+ return false;
+ }
+ return true;
+ }
+
+ bool isInScope(SourceLocation loc) {
+ if (loc.isInvalid())
+ return false;
+
+ SourceManager &SM = Ctx.getSourceManager();
+ if (SM.isBeforeInTranslationUnit(loc, ScopeRange.getBegin()))
+ return false;
+ return SM.isBeforeInTranslationUnit(loc, ScopeRange.getEnd());
+ }
+ };
+
+ void handlePoolScope(PoolScope &scope, CompoundStmt *compoundS) {
+ // Check that all names declared inside the scope are not used
+ // outside the scope.
+ {
+ bool nameUsedOutsideScope = false;
+ SourceLocation referenceLoc, declarationLoc;
+ Stmt::child_iterator SI = scope.End, SE = compoundS->body_end();
+ ++SI;
+ // Check if the autoreleasepool scope is followed by a simple return
+ // statement, in which case we will include the return in the scope.
+ if (SI != SE)
+ if (ReturnStmt *retS = dyn_cast<ReturnStmt>(*SI))
+ if ((retS->getRetValue() == nullptr ||
+ isa<DeclRefExpr>(retS->getRetValue()->IgnoreParenCasts())) &&
+ findLocationAfterSemi(retS->getLocEnd(), Pass.Ctx).isValid()) {
+ scope.IsFollowedBySimpleReturnStmt = true;
+ ++SI; // the return will be included in scope, don't check it.
+ }
+
+ for (; SI != SE; ++SI) {
+ nameUsedOutsideScope = !NameReferenceChecker(Pass.Ctx, scope,
+ referenceLoc,
+ declarationLoc).TraverseStmt(*SI);
+ if (nameUsedOutsideScope)
+ break;
+ }
+
+ // If not all references were cleared it means some variables/typenames/etc
+ // declared inside the pool scope are used outside of it.
+ // We won't try to rewrite the pool.
+ if (nameUsedOutsideScope) {
+ Pass.TA.reportError("a name is referenced outside the "
+ "NSAutoreleasePool scope that it was declared in", referenceLoc);
+ Pass.TA.reportNote("name declared here", declarationLoc);
+ Pass.TA.reportNote("intended @autoreleasepool scope begins here",
+ (*scope.Begin)->getLocStart());
+ Pass.TA.reportNote("intended @autoreleasepool scope ends here",
+ (*scope.End)->getLocStart());
+ return;
+ }
+ }
+
+ // Collect all releases of the pool; they will be removed.
+ {
+ ReleaseCollector releaseColl(scope.PoolVar, scope.Releases);
+ Stmt::child_iterator I = scope.Begin;
+ ++I;
+ for (; I != scope.End; ++I)
+ releaseColl.TraverseStmt(*I);
+ }
+
+ PoolVars[scope.PoolVar].Scopes.push_back(scope);
+ }
+
+ bool isPoolCreation(Expr *E) {
+ if (!E) return false;
+ E = getEssential(E);
+ ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(E);
+ if (!ME) return false;
+ if (ME->getMethodFamily() == OMF_new &&
+ ME->getReceiverKind() == ObjCMessageExpr::Class &&
+ isNSAutoreleasePool(ME->getReceiverInterface()))
+ return true;
+ if (ME->getReceiverKind() == ObjCMessageExpr::Instance &&
+ ME->getMethodFamily() == OMF_init) {
+ Expr *rec = getEssential(ME->getInstanceReceiver());
+ if (ObjCMessageExpr *recME = dyn_cast_or_null<ObjCMessageExpr>(rec)) {
+ if (recME->getMethodFamily() == OMF_alloc &&
+ recME->getReceiverKind() == ObjCMessageExpr::Class &&
+ isNSAutoreleasePool(recME->getReceiverInterface()))
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ bool isPoolDrain(VarDecl *poolVar, Stmt *S) {
+ if (!S) return false;
+ S = getEssential(S);
+ ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S);
+ if (!ME) return false;
+ if (ME->getReceiverKind() == ObjCMessageExpr::Instance) {
+ Expr *rec = getEssential(ME->getInstanceReceiver());
+ if (DeclRefExpr *dref = dyn_cast<DeclRefExpr>(rec))
+ if (dref->getDecl() == poolVar)
+ return ME->getMethodFamily() == OMF_release ||
+ ME->getSelector() == DrainSel;
+ }
+
+ return false;
+ }
+
+ bool isNSAutoreleasePool(ObjCInterfaceDecl *IDecl) {
+ return IDecl && IDecl->getIdentifier() == PoolII;
+ }
+
+ bool isNSAutoreleasePool(QualType Ty) {
+ QualType pointee = Ty->getPointeeType();
+ if (pointee.isNull())
+ return false;
+ if (const ObjCInterfaceType *interT = pointee->getAs<ObjCInterfaceType>())
+ return isNSAutoreleasePool(interT->getDecl());
+ return false;
+ }
+
+ static Expr *getEssential(Expr *E) {
+ return cast<Expr>(getEssential((Stmt*)E));
+ }
+ static Stmt *getEssential(Stmt *S) {
+ if (ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(S))
+ S = EWC->getSubExpr();
+ if (Expr *E = dyn_cast<Expr>(S))
+ S = E->IgnoreParenCasts();
+ return S;
+ }
+
+ Stmt *Body;
+ MigrationPass &Pass;
+
+ IdentifierInfo *PoolII;
+ Selector DrainSel;
+
+ struct PoolVarInfo {
+ DeclStmt *Dcl;
+ ExprSet Refs;
+ SmallVector<PoolScope, 2> Scopes;
+
+ PoolVarInfo() : Dcl(nullptr) { }
+ };
+
+ std::map<VarDecl *, PoolVarInfo> PoolVars;
+};
+
+} // anonymous namespace
+
+void trans::rewriteAutoreleasePool(MigrationPass &pass) {
+ BodyTransform<AutoreleasePoolRewriter> trans(pass);
+ trans.TraverseDecl(pass.Ctx.getTranslationUnitDecl());
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransBlockObjCVariable.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransBlockObjCVariable.cpp
new file mode 100644
index 0000000..fac6a84
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransBlockObjCVariable.cpp
@@ -0,0 +1,147 @@
+//===--- TransBlockObjCVariable.cpp - Transformations to ARC mode ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// rewriteBlockObjCVariable:
+//
+// Adding __block to an obj-c variable could be either because the variable
+// is used for output storage or the user wanted to break a retain cycle.
+// This transformation checks whether a reference of the variable for the block
+// is actually needed (it is assigned to or its address is taken) or not.
+// If the reference is not needed it will assume __block was added to break a
+// cycle so it will remove '__block' and add __weak/__unsafe_unretained.
+// e.g
+//
+// __block Foo *x;
+// bar(^ { [x cake]; });
+// ---->
+// __weak Foo *x;
+// bar(^ { [x cake]; });
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/Basic/SourceManager.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+
+namespace {
+
+class RootBlockObjCVarRewriter :
+ public RecursiveASTVisitor<RootBlockObjCVarRewriter> {
+ llvm::DenseSet<VarDecl *> &VarsToChange;
+
+ class BlockVarChecker : public RecursiveASTVisitor<BlockVarChecker> {
+ VarDecl *Var;
+
+ typedef RecursiveASTVisitor<BlockVarChecker> base;
+ public:
+ BlockVarChecker(VarDecl *var) : Var(var) { }
+
+ bool TraverseImplicitCastExpr(ImplicitCastExpr *castE) {
+ if (DeclRefExpr *
+ ref = dyn_cast<DeclRefExpr>(castE->getSubExpr())) {
+ if (ref->getDecl() == Var) {
+ if (castE->getCastKind() == CK_LValueToRValue)
+ return true; // Using the value of the variable.
+ if (castE->getCastKind() == CK_NoOp && castE->isLValue() &&
+ Var->getASTContext().getLangOpts().CPlusPlus)
+ return true; // Binding to const C++ reference.
+ }
+ }
+
+ return base::TraverseImplicitCastExpr(castE);
+ }
+
+ bool VisitDeclRefExpr(DeclRefExpr *E) {
+ if (E->getDecl() == Var)
+ return false; // The reference of the variable, and not just its value,
+ // is needed.
+ return true;
+ }
+ };
+
+public:
+ RootBlockObjCVarRewriter(llvm::DenseSet<VarDecl *> &VarsToChange)
+ : VarsToChange(VarsToChange) { }
+
+ bool VisitBlockDecl(BlockDecl *block) {
+ SmallVector<VarDecl *, 4> BlockVars;
+
+ for (const auto &I : block->captures()) {
+ VarDecl *var = I.getVariable();
+ if (I.isByRef() &&
+ var->getType()->isObjCObjectPointerType() &&
+ isImplicitStrong(var->getType())) {
+ BlockVars.push_back(var);
+ }
+ }
+
+ for (unsigned i = 0, e = BlockVars.size(); i != e; ++i) {
+ VarDecl *var = BlockVars[i];
+
+ BlockVarChecker checker(var);
+ bool onlyValueOfVarIsNeeded = checker.TraverseStmt(block->getBody());
+ if (onlyValueOfVarIsNeeded)
+ VarsToChange.insert(var);
+ else
+ VarsToChange.erase(var);
+ }
+
+ return true;
+ }
+
+private:
+ bool isImplicitStrong(QualType ty) {
+ if (isa<AttributedType>(ty.getTypePtr()))
+ return false;
+ return ty.getLocalQualifiers().getObjCLifetime() == Qualifiers::OCL_Strong;
+ }
+};
+
+class BlockObjCVarRewriter : public RecursiveASTVisitor<BlockObjCVarRewriter> {
+ llvm::DenseSet<VarDecl *> &VarsToChange;
+
+public:
+ BlockObjCVarRewriter(llvm::DenseSet<VarDecl *> &VarsToChange)
+ : VarsToChange(VarsToChange) { }
+
+ bool TraverseBlockDecl(BlockDecl *block) {
+ RootBlockObjCVarRewriter(VarsToChange).TraverseDecl(block);
+ return true;
+ }
+};
+
+} // anonymous namespace
+
+void BlockObjCVariableTraverser::traverseBody(BodyContext &BodyCtx) {
+ MigrationPass &Pass = BodyCtx.getMigrationContext().Pass;
+ llvm::DenseSet<VarDecl *> VarsToChange;
+
+ BlockObjCVarRewriter trans(VarsToChange);
+ trans.TraverseStmt(BodyCtx.getTopStmt());
+
+ for (llvm::DenseSet<VarDecl *>::iterator
+ I = VarsToChange.begin(), E = VarsToChange.end(); I != E; ++I) {
+ VarDecl *var = *I;
+ BlocksAttr *attr = var->getAttr<BlocksAttr>();
+ if(!attr)
+ continue;
+ bool useWeak = canApplyWeak(Pass.Ctx, var->getType());
+ SourceManager &SM = Pass.Ctx.getSourceManager();
+ Transaction Trans(Pass.TA);
+ Pass.TA.replaceText(SM.getExpansionLoc(attr->getLocation()),
+ "__block",
+ useWeak ? "__weak" : "__unsafe_unretained");
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp
new file mode 100644
index 0000000..d45d5d6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp
@@ -0,0 +1,251 @@
+//===--- TransEmptyStatements.cpp - Transformations to ARC mode -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// removeEmptyStatementsAndDealloc:
+//
+// Removes empty statements that are leftovers from previous transformations.
+// e.g for
+//
+// [x retain];
+//
+// removeRetainReleaseDealloc will leave an empty ";" that removeEmptyStatements
+// will remove.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/SourceManager.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+
+static bool isEmptyARCMTMacroStatement(NullStmt *S,
+ std::vector<SourceLocation> &MacroLocs,
+ ASTContext &Ctx) {
+ if (!S->hasLeadingEmptyMacro())
+ return false;
+
+ SourceLocation SemiLoc = S->getSemiLoc();
+ if (SemiLoc.isInvalid() || SemiLoc.isMacroID())
+ return false;
+
+ if (MacroLocs.empty())
+ return false;
+
+ SourceManager &SM = Ctx.getSourceManager();
+ std::vector<SourceLocation>::iterator
+ I = std::upper_bound(MacroLocs.begin(), MacroLocs.end(), SemiLoc,
+ BeforeThanCompare<SourceLocation>(SM));
+ --I;
+ SourceLocation
+ AfterMacroLoc = I->getLocWithOffset(getARCMTMacroName().size());
+ assert(AfterMacroLoc.isFileID());
+
+ if (AfterMacroLoc == SemiLoc)
+ return true;
+
+ int RelOffs = 0;
+ if (!SM.isInSameSLocAddrSpace(AfterMacroLoc, SemiLoc, &RelOffs))
+ return false;
+ if (RelOffs < 0)
+ return false;
+
+ // We make the reasonable assumption that a semicolon after 100 characters
+ // means that it is not the next token after our macro. If this assumption
+ // fails it is not critical, we will just fail to clear out, e.g., an empty
+ // 'if'.
+ if (RelOffs - getARCMTMacroName().size() > 100)
+ return false;
+
+ SourceLocation AfterMacroSemiLoc = findSemiAfterLocation(AfterMacroLoc, Ctx);
+ return AfterMacroSemiLoc == SemiLoc;
+}
+
+namespace {
+
+/// \brief Returns true if the statement became empty due to previous
+/// transformations.
+class EmptyChecker : public StmtVisitor<EmptyChecker, bool> {
+ ASTContext &Ctx;
+ std::vector<SourceLocation> &MacroLocs;
+
+public:
+ EmptyChecker(ASTContext &ctx, std::vector<SourceLocation> &macroLocs)
+ : Ctx(ctx), MacroLocs(macroLocs) { }
+
+ bool VisitNullStmt(NullStmt *S) {
+ return isEmptyARCMTMacroStatement(S, MacroLocs, Ctx);
+ }
+ bool VisitCompoundStmt(CompoundStmt *S) {
+ if (S->body_empty())
+ return false; // was already empty, not because of transformations.
+ for (auto *I : S->body())
+ if (!Visit(I))
+ return false;
+ return true;
+ }
+ bool VisitIfStmt(IfStmt *S) {
+ if (S->getConditionVariable())
+ return false;
+ Expr *condE = S->getCond();
+ if (!condE)
+ return false;
+ if (hasSideEffects(condE, Ctx))
+ return false;
+ if (!S->getThen() || !Visit(S->getThen()))
+ return false;
+ return !S->getElse() || Visit(S->getElse());
+ }
+ bool VisitWhileStmt(WhileStmt *S) {
+ if (S->getConditionVariable())
+ return false;
+ Expr *condE = S->getCond();
+ if (!condE)
+ return false;
+ if (hasSideEffects(condE, Ctx))
+ return false;
+ if (!S->getBody())
+ return false;
+ return Visit(S->getBody());
+ }
+ bool VisitDoStmt(DoStmt *S) {
+ Expr *condE = S->getCond();
+ if (!condE)
+ return false;
+ if (hasSideEffects(condE, Ctx))
+ return false;
+ if (!S->getBody())
+ return false;
+ return Visit(S->getBody());
+ }
+ bool VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
+ Expr *Exp = S->getCollection();
+ if (!Exp)
+ return false;
+ if (hasSideEffects(Exp, Ctx))
+ return false;
+ if (!S->getBody())
+ return false;
+ return Visit(S->getBody());
+ }
+ bool VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S) {
+ if (!S->getSubStmt())
+ return false;
+ return Visit(S->getSubStmt());
+ }
+};
+
+class EmptyStatementsRemover :
+ public RecursiveASTVisitor<EmptyStatementsRemover> {
+ MigrationPass &Pass;
+
+public:
+ EmptyStatementsRemover(MigrationPass &pass) : Pass(pass) { }
+
+ bool TraverseStmtExpr(StmtExpr *E) {
+ CompoundStmt *S = E->getSubStmt();
+ for (CompoundStmt::body_iterator
+ I = S->body_begin(), E = S->body_end(); I != E; ++I) {
+ if (I != E - 1)
+ check(*I);
+ TraverseStmt(*I);
+ }
+ return true;
+ }
+
+ bool VisitCompoundStmt(CompoundStmt *S) {
+ for (auto *I : S->body())
+ check(I);
+ return true;
+ }
+
+ ASTContext &getContext() { return Pass.Ctx; }
+
+private:
+ void check(Stmt *S) {
+ if (!S) return;
+ if (EmptyChecker(Pass.Ctx, Pass.ARCMTMacroLocs).Visit(S)) {
+ Transaction Trans(Pass.TA);
+ Pass.TA.removeStmt(S);
+ }
+ }
+};
+
+} // anonymous namespace
+
+static bool isBodyEmpty(CompoundStmt *body, ASTContext &Ctx,
+ std::vector<SourceLocation> &MacroLocs) {
+ for (auto *I : body->body())
+ if (!EmptyChecker(Ctx, MacroLocs).Visit(I))
+ return false;
+
+ return true;
+}
+
+static void cleanupDeallocOrFinalize(MigrationPass &pass) {
+ ASTContext &Ctx = pass.Ctx;
+ TransformActions &TA = pass.TA;
+ DeclContext *DC = Ctx.getTranslationUnitDecl();
+ Selector FinalizeSel =
+ Ctx.Selectors.getNullarySelector(&pass.Ctx.Idents.get("finalize"));
+
+ typedef DeclContext::specific_decl_iterator<ObjCImplementationDecl>
+ impl_iterator;
+ for (impl_iterator I = impl_iterator(DC->decls_begin()),
+ E = impl_iterator(DC->decls_end()); I != E; ++I) {
+ ObjCMethodDecl *DeallocM = nullptr;
+ ObjCMethodDecl *FinalizeM = nullptr;
+ for (auto *MD : I->instance_methods()) {
+ if (!MD->hasBody())
+ continue;
+
+ if (MD->getMethodFamily() == OMF_dealloc) {
+ DeallocM = MD;
+ } else if (MD->isInstanceMethod() && MD->getSelector() == FinalizeSel) {
+ FinalizeM = MD;
+ }
+ }
+
+ if (DeallocM) {
+ if (isBodyEmpty(DeallocM->getCompoundBody(), Ctx, pass.ARCMTMacroLocs)) {
+ Transaction Trans(TA);
+ TA.remove(DeallocM->getSourceRange());
+ }
+
+ if (FinalizeM) {
+ Transaction Trans(TA);
+ TA.remove(FinalizeM->getSourceRange());
+ }
+
+ } else if (FinalizeM) {
+ if (isBodyEmpty(FinalizeM->getCompoundBody(), Ctx, pass.ARCMTMacroLocs)) {
+ Transaction Trans(TA);
+ TA.remove(FinalizeM->getSourceRange());
+ } else {
+ Transaction Trans(TA);
+ TA.replaceText(FinalizeM->getSelectorStartLoc(), "finalize", "dealloc");
+ }
+ }
+ }
+}
+
+void trans::removeEmptyStatementsAndDeallocFinalize(MigrationPass &pass) {
+ EmptyStatementsRemover(pass).TraverseDecl(pass.Ctx.getTranslationUnitDecl());
+
+ cleanupDeallocOrFinalize(pass);
+
+ for (unsigned i = 0, e = pass.ARCMTMacroLocs.size(); i != e; ++i) {
+ Transaction Trans(pass.TA);
+ pass.TA.remove(pass.ARCMTMacroLocs[i]);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransGCAttrs.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransGCAttrs.cpp
new file mode 100644
index 0000000..2ae6b78
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransGCAttrs.cpp
@@ -0,0 +1,355 @@
+//===--- TransGCAttrs.cpp - Transformations to ARC mode --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/TinyPtrVector.h"
+#include "llvm/Support/SaveAndRestore.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+
+namespace {
+
+/// \brief Collects all the places where GC attributes __strong/__weak occur.
+class GCAttrsCollector : public RecursiveASTVisitor<GCAttrsCollector> {
+ MigrationContext &MigrateCtx;
+ bool FullyMigratable;
+ std::vector<ObjCPropertyDecl *> &AllProps;
+
+ typedef RecursiveASTVisitor<GCAttrsCollector> base;
+public:
+ GCAttrsCollector(MigrationContext &ctx,
+ std::vector<ObjCPropertyDecl *> &AllProps)
+ : MigrateCtx(ctx), FullyMigratable(false),
+ AllProps(AllProps) { }
+
+ bool shouldWalkTypesOfTypeLocs() const { return false; }
+
+ bool VisitAttributedTypeLoc(AttributedTypeLoc TL) {
+ handleAttr(TL);
+ return true;
+ }
+
+ bool TraverseDecl(Decl *D) {
+ if (!D || D->isImplicit())
+ return true;
+
+ SaveAndRestore<bool> Save(FullyMigratable, isMigratable(D));
+
+ if (ObjCPropertyDecl *PropD = dyn_cast<ObjCPropertyDecl>(D)) {
+ lookForAttribute(PropD, PropD->getTypeSourceInfo());
+ AllProps.push_back(PropD);
+ } else if (DeclaratorDecl *DD = dyn_cast<DeclaratorDecl>(D)) {
+ lookForAttribute(DD, DD->getTypeSourceInfo());
+ }
+ return base::TraverseDecl(D);
+ }
+
+ void lookForAttribute(Decl *D, TypeSourceInfo *TInfo) {
+ if (!TInfo)
+ return;
+ TypeLoc TL = TInfo->getTypeLoc();
+ while (TL) {
+ if (QualifiedTypeLoc QL = TL.getAs<QualifiedTypeLoc>()) {
+ TL = QL.getUnqualifiedLoc();
+ } else if (AttributedTypeLoc Attr = TL.getAs<AttributedTypeLoc>()) {
+ if (handleAttr(Attr, D))
+ break;
+ TL = Attr.getModifiedLoc();
+ } else if (ArrayTypeLoc Arr = TL.getAs<ArrayTypeLoc>()) {
+ TL = Arr.getElementLoc();
+ } else if (PointerTypeLoc PT = TL.getAs<PointerTypeLoc>()) {
+ TL = PT.getPointeeLoc();
+ } else if (ReferenceTypeLoc RT = TL.getAs<ReferenceTypeLoc>())
+ TL = RT.getPointeeLoc();
+ else
+ break;
+ }
+ }
+
+ bool handleAttr(AttributedTypeLoc TL, Decl *D = nullptr) {
+ if (TL.getAttrKind() != AttributedType::attr_objc_ownership)
+ return false;
+
+ SourceLocation Loc = TL.getAttrNameLoc();
+ unsigned RawLoc = Loc.getRawEncoding();
+ if (MigrateCtx.AttrSet.count(RawLoc))
+ return true;
+
+ ASTContext &Ctx = MigrateCtx.Pass.Ctx;
+ SourceManager &SM = Ctx.getSourceManager();
+ if (Loc.isMacroID())
+ Loc = SM.getImmediateExpansionRange(Loc).first;
+ SmallString<32> Buf;
+ bool Invalid = false;
+ StringRef Spell = Lexer::getSpelling(
+ SM.getSpellingLoc(TL.getAttrEnumOperandLoc()),
+ Buf, SM, Ctx.getLangOpts(), &Invalid);
+ if (Invalid)
+ return false;
+ MigrationContext::GCAttrOccurrence::AttrKind Kind;
+ if (Spell == "strong")
+ Kind = MigrationContext::GCAttrOccurrence::Strong;
+ else if (Spell == "weak")
+ Kind = MigrationContext::GCAttrOccurrence::Weak;
+ else
+ return false;
+
+ MigrateCtx.AttrSet.insert(RawLoc);
+ MigrateCtx.GCAttrs.push_back(MigrationContext::GCAttrOccurrence());
+ MigrationContext::GCAttrOccurrence &Attr = MigrateCtx.GCAttrs.back();
+
+ Attr.Kind = Kind;
+ Attr.Loc = Loc;
+ Attr.ModifiedType = TL.getModifiedLoc().getType();
+ Attr.Dcl = D;
+ Attr.FullyMigratable = FullyMigratable;
+ return true;
+ }
+
+ bool isMigratable(Decl *D) {
+ if (isa<TranslationUnitDecl>(D))
+ return false;
+
+ if (isInMainFile(D))
+ return true;
+
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ return FD->hasBody();
+
+ if (ObjCContainerDecl *ContD = dyn_cast<ObjCContainerDecl>(D))
+ return hasObjCImpl(ContD);
+
+ if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
+ for (const auto *MI : RD->methods()) {
+ if (MI->isOutOfLine())
+ return true;
+ }
+ return false;
+ }
+
+ return isMigratable(cast<Decl>(D->getDeclContext()));
+ }
+
+ static bool hasObjCImpl(Decl *D) {
+ if (!D)
+ return false;
+ if (ObjCContainerDecl *ContD = dyn_cast<ObjCContainerDecl>(D)) {
+ if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(ContD))
+ return ID->getImplementation() != nullptr;
+ if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(ContD))
+ return CD->getImplementation() != nullptr;
+ return isa<ObjCImplDecl>(ContD);
+ }
+ return false;
+ }
+
+ bool isInMainFile(Decl *D) {
+ if (!D)
+ return false;
+
+ for (auto I : D->redecls())
+ if (!isInMainFile(I->getLocation()))
+ return false;
+
+ return true;
+ }
+
+ bool isInMainFile(SourceLocation Loc) {
+ if (Loc.isInvalid())
+ return false;
+
+ SourceManager &SM = MigrateCtx.Pass.Ctx.getSourceManager();
+ return SM.isInFileID(SM.getExpansionLoc(Loc), SM.getMainFileID());
+ }
+};
+
+} // anonymous namespace
+
+static void errorForGCAttrsOnNonObjC(MigrationContext &MigrateCtx) {
+ TransformActions &TA = MigrateCtx.Pass.TA;
+
+ for (unsigned i = 0, e = MigrateCtx.GCAttrs.size(); i != e; ++i) {
+ MigrationContext::GCAttrOccurrence &Attr = MigrateCtx.GCAttrs[i];
+ if (Attr.FullyMigratable && Attr.Dcl) {
+ if (Attr.ModifiedType.isNull())
+ continue;
+ if (!Attr.ModifiedType->isObjCRetainableType()) {
+ TA.reportError("GC managed memory will become unmanaged in ARC",
+ Attr.Loc);
+ }
+ }
+ }
+}
+
+static void checkWeakGCAttrs(MigrationContext &MigrateCtx) {
+ TransformActions &TA = MigrateCtx.Pass.TA;
+
+ for (unsigned i = 0, e = MigrateCtx.GCAttrs.size(); i != e; ++i) {
+ MigrationContext::GCAttrOccurrence &Attr = MigrateCtx.GCAttrs[i];
+ if (Attr.Kind == MigrationContext::GCAttrOccurrence::Weak) {
+ if (Attr.ModifiedType.isNull() ||
+ !Attr.ModifiedType->isObjCRetainableType())
+ continue;
+ if (!canApplyWeak(MigrateCtx.Pass.Ctx, Attr.ModifiedType,
+ /*AllowOnUnknownClass=*/true)) {
+ Transaction Trans(TA);
+ if (!MigrateCtx.RemovedAttrSet.count(Attr.Loc.getRawEncoding()))
+ TA.replaceText(Attr.Loc, "__weak", "__unsafe_unretained");
+ TA.clearDiagnostic(diag::err_arc_weak_no_runtime,
+ diag::err_arc_unsupported_weak_class,
+ Attr.Loc);
+ }
+ }
+ }
+}
+
+typedef llvm::TinyPtrVector<ObjCPropertyDecl *> IndivPropsTy;
+
+static void checkAllAtProps(MigrationContext &MigrateCtx,
+ SourceLocation AtLoc,
+ IndivPropsTy &IndProps) {
+ if (IndProps.empty())
+ return;
+
+ for (IndivPropsTy::iterator
+ PI = IndProps.begin(), PE = IndProps.end(); PI != PE; ++PI) {
+ QualType T = (*PI)->getType();
+ if (T.isNull() || !T->isObjCRetainableType())
+ return;
+ }
+
+ SmallVector<std::pair<AttributedTypeLoc, ObjCPropertyDecl *>, 4> ATLs;
+ bool hasWeak = false, hasStrong = false;
+ ObjCPropertyDecl::PropertyAttributeKind
+ Attrs = ObjCPropertyDecl::OBJC_PR_noattr;
+ for (IndivPropsTy::iterator
+ PI = IndProps.begin(), PE = IndProps.end(); PI != PE; ++PI) {
+ ObjCPropertyDecl *PD = *PI;
+ Attrs = PD->getPropertyAttributesAsWritten();
+ TypeSourceInfo *TInfo = PD->getTypeSourceInfo();
+ if (!TInfo)
+ return;
+ TypeLoc TL = TInfo->getTypeLoc();
+ if (AttributedTypeLoc ATL =
+ TL.getAs<AttributedTypeLoc>()) {
+ ATLs.push_back(std::make_pair(ATL, PD));
+ if (TInfo->getType().getObjCLifetime() == Qualifiers::OCL_Weak) {
+ hasWeak = true;
+ } else if (TInfo->getType().getObjCLifetime() == Qualifiers::OCL_Strong)
+ hasStrong = true;
+ else
+ return;
+ }
+ }
+ if (ATLs.empty())
+ return;
+ if (hasWeak && hasStrong)
+ return;
+
+ TransformActions &TA = MigrateCtx.Pass.TA;
+ Transaction Trans(TA);
+
+ if (GCAttrsCollector::hasObjCImpl(
+ cast<Decl>(IndProps.front()->getDeclContext()))) {
+ if (hasWeak)
+ MigrateCtx.AtPropsWeak.insert(AtLoc.getRawEncoding());
+
+ } else {
+ StringRef toAttr = "strong";
+ if (hasWeak) {
+ if (canApplyWeak(MigrateCtx.Pass.Ctx, IndProps.front()->getType(),
+ /*AllowOnUnkwownClass=*/true))
+ toAttr = "weak";
+ else
+ toAttr = "unsafe_unretained";
+ }
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_assign)
+ MigrateCtx.rewritePropertyAttribute("assign", toAttr, AtLoc);
+ else
+ MigrateCtx.addPropertyAttribute(toAttr, AtLoc);
+ }
+
+ for (unsigned i = 0, e = ATLs.size(); i != e; ++i) {
+ SourceLocation Loc = ATLs[i].first.getAttrNameLoc();
+ if (Loc.isMacroID())
+ Loc = MigrateCtx.Pass.Ctx.getSourceManager()
+ .getImmediateExpansionRange(Loc).first;
+ TA.remove(Loc);
+ TA.clearDiagnostic(diag::err_objc_property_attr_mutually_exclusive, AtLoc);
+ TA.clearDiagnostic(diag::err_arc_inconsistent_property_ownership,
+ ATLs[i].second->getLocation());
+ MigrateCtx.RemovedAttrSet.insert(Loc.getRawEncoding());
+ }
+}
+
+static void checkAllProps(MigrationContext &MigrateCtx,
+ std::vector<ObjCPropertyDecl *> &AllProps) {
+ typedef llvm::TinyPtrVector<ObjCPropertyDecl *> IndivPropsTy;
+ llvm::DenseMap<unsigned, IndivPropsTy> AtProps;
+
+ for (unsigned i = 0, e = AllProps.size(); i != e; ++i) {
+ ObjCPropertyDecl *PD = AllProps[i];
+ if (PD->getPropertyAttributesAsWritten() &
+ (ObjCPropertyDecl::OBJC_PR_assign |
+ ObjCPropertyDecl::OBJC_PR_readonly)) {
+ SourceLocation AtLoc = PD->getAtLoc();
+ if (AtLoc.isInvalid())
+ continue;
+ unsigned RawAt = AtLoc.getRawEncoding();
+ AtProps[RawAt].push_back(PD);
+ }
+ }
+
+ for (llvm::DenseMap<unsigned, IndivPropsTy>::iterator
+ I = AtProps.begin(), E = AtProps.end(); I != E; ++I) {
+ SourceLocation AtLoc = SourceLocation::getFromRawEncoding(I->first);
+ IndivPropsTy &IndProps = I->second;
+ checkAllAtProps(MigrateCtx, AtLoc, IndProps);
+ }
+}
+
+void GCAttrsTraverser::traverseTU(MigrationContext &MigrateCtx) {
+ std::vector<ObjCPropertyDecl *> AllProps;
+ GCAttrsCollector(MigrateCtx, AllProps).TraverseDecl(
+ MigrateCtx.Pass.Ctx.getTranslationUnitDecl());
+
+ errorForGCAttrsOnNonObjC(MigrateCtx);
+ checkAllProps(MigrateCtx, AllProps);
+ checkWeakGCAttrs(MigrateCtx);
+}
+
+void MigrationContext::dumpGCAttrs() {
+ llvm::errs() << "\n################\n";
+ for (unsigned i = 0, e = GCAttrs.size(); i != e; ++i) {
+ GCAttrOccurrence &Attr = GCAttrs[i];
+ llvm::errs() << "KIND: "
+ << (Attr.Kind == GCAttrOccurrence::Strong ? "strong" : "weak");
+ llvm::errs() << "\nLOC: ";
+ Attr.Loc.dump(Pass.Ctx.getSourceManager());
+ llvm::errs() << "\nTYPE: ";
+ Attr.ModifiedType.dump();
+ if (Attr.Dcl) {
+ llvm::errs() << "DECL:\n";
+ Attr.Dcl->dump();
+ } else {
+ llvm::errs() << "DECL: NONE";
+ }
+ llvm::errs() << "\nMIGRATABLE: " << Attr.FullyMigratable;
+ llvm::errs() << "\n----------------\n";
+ }
+ llvm::errs() << "\n################\n";
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransGCCalls.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransGCCalls.cpp
new file mode 100644
index 0000000..3a236d3
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransGCCalls.cpp
@@ -0,0 +1,77 @@
+//===--- TransGCCalls.cpp - Transformations to ARC mode -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/Sema/SemaDiagnostic.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+
+namespace {
+
+class GCCollectableCallsChecker :
+ public RecursiveASTVisitor<GCCollectableCallsChecker> {
+ MigrationContext &MigrateCtx;
+ IdentifierInfo *NSMakeCollectableII;
+ IdentifierInfo *CFMakeCollectableII;
+
+public:
+ GCCollectableCallsChecker(MigrationContext &ctx)
+ : MigrateCtx(ctx) {
+ IdentifierTable &Ids = MigrateCtx.Pass.Ctx.Idents;
+ NSMakeCollectableII = &Ids.get("NSMakeCollectable");
+ CFMakeCollectableII = &Ids.get("CFMakeCollectable");
+ }
+
+ bool shouldWalkTypesOfTypeLocs() const { return false; }
+
+ bool VisitCallExpr(CallExpr *E) {
+ TransformActions &TA = MigrateCtx.Pass.TA;
+
+ if (MigrateCtx.isGCOwnedNonObjC(E->getType())) {
+ TA.report(E->getLocStart(), diag::warn_arcmt_nsalloc_realloc,
+ E->getSourceRange());
+ return true;
+ }
+
+ Expr *CEE = E->getCallee()->IgnoreParenImpCasts();
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CEE)) {
+ if (FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(DRE->getDecl())) {
+ if (!FD->getDeclContext()->getRedeclContext()->isFileContext())
+ return true;
+
+ if (FD->getIdentifier() == NSMakeCollectableII) {
+ Transaction Trans(TA);
+ TA.clearDiagnostic(diag::err_unavailable,
+ diag::err_unavailable_message,
+ diag::err_ovl_deleted_call, // ObjC++
+ DRE->getSourceRange());
+ TA.replace(DRE->getSourceRange(), "CFBridgingRelease");
+
+ } else if (FD->getIdentifier() == CFMakeCollectableII) {
+ TA.reportError("CFMakeCollectable will leak the object that it "
+ "receives in ARC", DRE->getLocation(),
+ DRE->getSourceRange());
+ }
+ }
+ }
+
+ return true;
+ }
+};
+
+} // anonymous namespace
+
+void GCCollectableCallsTraverser::traverseBody(BodyContext &BodyCtx) {
+ GCCollectableCallsChecker(BodyCtx.getMigrationContext())
+ .TraverseStmt(BodyCtx.getTopStmt());
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransProperties.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransProperties.cpp
new file mode 100644
index 0000000..8667bc2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransProperties.cpp
@@ -0,0 +1,379 @@
+//===--- TransProperties.cpp - Transformations to ARC mode ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// rewriteProperties:
+//
+// - Adds strong/weak/unsafe_unretained ownership specifier to properties that
+// are missing one.
+// - Migrates properties from (retain) to (strong) and (assign) to
+// (unsafe_unretained/weak).
+// - If a property is synthesized, adds the ownership specifier in the ivar
+// backing the property.
+//
+// @interface Foo : NSObject {
+// NSObject *x;
+// }
+// @property (assign) id x;
+// @end
+// ---->
+// @interface Foo : NSObject {
+// NSObject *__weak x;
+// }
+// @property (weak) id x;
+// @end
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include <map>
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+
+namespace {
+
+class PropertiesRewriter {
+ MigrationContext &MigrateCtx;
+ MigrationPass &Pass;
+ ObjCImplementationDecl *CurImplD;
+
+ enum PropActionKind {
+ PropAction_None,
+ PropAction_RetainReplacedWithStrong,
+ PropAction_AssignRemoved,
+ PropAction_AssignRewritten,
+ PropAction_MaybeAddWeakOrUnsafe
+ };
+
+ struct PropData {
+ ObjCPropertyDecl *PropD;
+ ObjCIvarDecl *IvarD;
+ ObjCPropertyImplDecl *ImplD;
+
+ PropData(ObjCPropertyDecl *propD)
+ : PropD(propD), IvarD(nullptr), ImplD(nullptr) {}
+ };
+
+ typedef SmallVector<PropData, 2> PropsTy;
+ typedef std::map<unsigned, PropsTy> AtPropDeclsTy;
+ AtPropDeclsTy AtProps;
+ llvm::DenseMap<IdentifierInfo *, PropActionKind> ActionOnProp;
+
+public:
+ explicit PropertiesRewriter(MigrationContext &MigrateCtx)
+ : MigrateCtx(MigrateCtx), Pass(MigrateCtx.Pass) { }
+
+ static void collectProperties(ObjCContainerDecl *D, AtPropDeclsTy &AtProps,
+ AtPropDeclsTy *PrevAtProps = nullptr) {
+ for (auto *Prop : D->properties()) {
+ if (Prop->getAtLoc().isInvalid())
+ continue;
+ unsigned RawLoc = Prop->getAtLoc().getRawEncoding();
+ if (PrevAtProps)
+ if (PrevAtProps->find(RawLoc) != PrevAtProps->end())
+ continue;
+ PropsTy &props = AtProps[RawLoc];
+ props.push_back(Prop);
+ }
+ }
+
+ void doTransform(ObjCImplementationDecl *D) {
+ CurImplD = D;
+ ObjCInterfaceDecl *iface = D->getClassInterface();
+ if (!iface)
+ return;
+
+ collectProperties(iface, AtProps);
+
+ // Look through extensions.
+ for (auto *Ext : iface->visible_extensions())
+ collectProperties(Ext, AtProps);
+
+ typedef DeclContext::specific_decl_iterator<ObjCPropertyImplDecl>
+ prop_impl_iterator;
+ for (prop_impl_iterator
+ I = prop_impl_iterator(D->decls_begin()),
+ E = prop_impl_iterator(D->decls_end()); I != E; ++I) {
+ ObjCPropertyImplDecl *implD = *I;
+ if (implD->getPropertyImplementation() != ObjCPropertyImplDecl::Synthesize)
+ continue;
+ ObjCPropertyDecl *propD = implD->getPropertyDecl();
+ if (!propD || propD->isInvalidDecl())
+ continue;
+ ObjCIvarDecl *ivarD = implD->getPropertyIvarDecl();
+ if (!ivarD || ivarD->isInvalidDecl())
+ continue;
+ unsigned rawAtLoc = propD->getAtLoc().getRawEncoding();
+ AtPropDeclsTy::iterator findAtLoc = AtProps.find(rawAtLoc);
+ if (findAtLoc == AtProps.end())
+ continue;
+
+ PropsTy &props = findAtLoc->second;
+ for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I) {
+ if (I->PropD == propD) {
+ I->IvarD = ivarD;
+ I->ImplD = implD;
+ break;
+ }
+ }
+ }
+
+ for (AtPropDeclsTy::iterator
+ I = AtProps.begin(), E = AtProps.end(); I != E; ++I) {
+ SourceLocation atLoc = SourceLocation::getFromRawEncoding(I->first);
+ PropsTy &props = I->second;
+ if (!getPropertyType(props)->isObjCRetainableType())
+ continue;
+ if (hasIvarWithExplicitARCOwnership(props))
+ continue;
+
+ Transaction Trans(Pass.TA);
+ rewriteProperty(props, atLoc);
+ }
+ }
+
+private:
+ void doPropAction(PropActionKind kind,
+ PropsTy &props, SourceLocation atLoc,
+ bool markAction = true) {
+ if (markAction)
+ for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I)
+ ActionOnProp[I->PropD->getIdentifier()] = kind;
+
+ switch (kind) {
+ case PropAction_None:
+ return;
+ case PropAction_RetainReplacedWithStrong: {
+ StringRef toAttr = "strong";
+ MigrateCtx.rewritePropertyAttribute("retain", toAttr, atLoc);
+ return;
+ }
+ case PropAction_AssignRemoved:
+ return removeAssignForDefaultStrong(props, atLoc);
+ case PropAction_AssignRewritten:
+ return rewriteAssign(props, atLoc);
+ case PropAction_MaybeAddWeakOrUnsafe:
+ return maybeAddWeakOrUnsafeUnretainedAttr(props, atLoc);
+ }
+ }
+
+ void rewriteProperty(PropsTy &props, SourceLocation atLoc) {
+ ObjCPropertyDecl::PropertyAttributeKind propAttrs = getPropertyAttrs(props);
+
+ if (propAttrs & (ObjCPropertyDecl::OBJC_PR_copy |
+ ObjCPropertyDecl::OBJC_PR_unsafe_unretained |
+ ObjCPropertyDecl::OBJC_PR_strong |
+ ObjCPropertyDecl::OBJC_PR_weak))
+ return;
+
+ if (propAttrs & ObjCPropertyDecl::OBJC_PR_retain) {
+ // strong is the default.
+ return doPropAction(PropAction_RetainReplacedWithStrong, props, atLoc);
+ }
+
+ bool HasIvarAssignedAPlusOneObject = hasIvarAssignedAPlusOneObject(props);
+
+ if (propAttrs & ObjCPropertyDecl::OBJC_PR_assign) {
+ if (HasIvarAssignedAPlusOneObject)
+ return doPropAction(PropAction_AssignRemoved, props, atLoc);
+ return doPropAction(PropAction_AssignRewritten, props, atLoc);
+ }
+
+ if (HasIvarAssignedAPlusOneObject ||
+ (Pass.isGCMigration() && !hasGCWeak(props, atLoc)))
+ return; // 'strong' by default.
+
+ return doPropAction(PropAction_MaybeAddWeakOrUnsafe, props, atLoc);
+ }
+
+ void removeAssignForDefaultStrong(PropsTy &props,
+ SourceLocation atLoc) const {
+ removeAttribute("retain", atLoc);
+ if (!removeAttribute("assign", atLoc))
+ return;
+
+ for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I) {
+ if (I->ImplD)
+ Pass.TA.clearDiagnostic(diag::err_arc_strong_property_ownership,
+ diag::err_arc_assign_property_ownership,
+ diag::err_arc_inconsistent_property_ownership,
+ I->IvarD->getLocation());
+ }
+ }
+
+ void rewriteAssign(PropsTy &props, SourceLocation atLoc) const {
+ bool canUseWeak = canApplyWeak(Pass.Ctx, getPropertyType(props),
+ /*AllowOnUnknownClass=*/Pass.isGCMigration());
+ const char *toWhich =
+ (Pass.isGCMigration() && !hasGCWeak(props, atLoc)) ? "strong" :
+ (canUseWeak ? "weak" : "unsafe_unretained");
+
+ bool rewroteAttr = rewriteAttribute("assign", toWhich, atLoc);
+ if (!rewroteAttr)
+ canUseWeak = false;
+
+ for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I) {
+ if (isUserDeclared(I->IvarD)) {
+ if (I->IvarD &&
+ I->IvarD->getType().getObjCLifetime() != Qualifiers::OCL_Weak) {
+ const char *toWhich =
+ (Pass.isGCMigration() && !hasGCWeak(props, atLoc)) ? "__strong " :
+ (canUseWeak ? "__weak " : "__unsafe_unretained ");
+ Pass.TA.insert(I->IvarD->getLocation(), toWhich);
+ }
+ }
+ if (I->ImplD)
+ Pass.TA.clearDiagnostic(diag::err_arc_strong_property_ownership,
+ diag::err_arc_assign_property_ownership,
+ diag::err_arc_inconsistent_property_ownership,
+ I->IvarD->getLocation());
+ }
+ }
+
+ void maybeAddWeakOrUnsafeUnretainedAttr(PropsTy &props,
+ SourceLocation atLoc) const {
+ bool canUseWeak = canApplyWeak(Pass.Ctx, getPropertyType(props),
+ /*AllowOnUnknownClass=*/Pass.isGCMigration());
+
+ bool addedAttr = addAttribute(canUseWeak ? "weak" : "unsafe_unretained",
+ atLoc);
+ if (!addedAttr)
+ canUseWeak = false;
+
+ for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I) {
+ if (isUserDeclared(I->IvarD)) {
+ if (I->IvarD &&
+ I->IvarD->getType().getObjCLifetime() != Qualifiers::OCL_Weak)
+ Pass.TA.insert(I->IvarD->getLocation(),
+ canUseWeak ? "__weak " : "__unsafe_unretained ");
+ }
+ if (I->ImplD) {
+ Pass.TA.clearDiagnostic(diag::err_arc_strong_property_ownership,
+ diag::err_arc_assign_property_ownership,
+ diag::err_arc_inconsistent_property_ownership,
+ I->IvarD->getLocation());
+ Pass.TA.clearDiagnostic(
+ diag::err_arc_objc_property_default_assign_on_object,
+ I->ImplD->getLocation());
+ }
+ }
+ }
+
+ bool removeAttribute(StringRef fromAttr, SourceLocation atLoc) const {
+ return MigrateCtx.removePropertyAttribute(fromAttr, atLoc);
+ }
+
+ bool rewriteAttribute(StringRef fromAttr, StringRef toAttr,
+ SourceLocation atLoc) const {
+ return MigrateCtx.rewritePropertyAttribute(fromAttr, toAttr, atLoc);
+ }
+
+ bool addAttribute(StringRef attr, SourceLocation atLoc) const {
+ return MigrateCtx.addPropertyAttribute(attr, atLoc);
+ }
+
+ class PlusOneAssign : public RecursiveASTVisitor<PlusOneAssign> {
+ ObjCIvarDecl *Ivar;
+ public:
+ PlusOneAssign(ObjCIvarDecl *D) : Ivar(D) {}
+
+ bool VisitBinAssign(BinaryOperator *E) {
+ Expr *lhs = E->getLHS()->IgnoreParenImpCasts();
+ if (ObjCIvarRefExpr *RE = dyn_cast<ObjCIvarRefExpr>(lhs)) {
+ if (RE->getDecl() != Ivar)
+ return true;
+
+ if (isPlusOneAssign(E))
+ return false;
+ }
+
+ return true;
+ }
+ };
+
+ bool hasIvarAssignedAPlusOneObject(PropsTy &props) const {
+ for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I) {
+ PlusOneAssign oneAssign(I->IvarD);
+ bool notFound = oneAssign.TraverseDecl(CurImplD);
+ if (!notFound)
+ return true;
+ }
+
+ return false;
+ }
+
+ bool hasIvarWithExplicitARCOwnership(PropsTy &props) const {
+ if (Pass.isGCMigration())
+ return false;
+
+ for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I) {
+ if (isUserDeclared(I->IvarD)) {
+ if (isa<AttributedType>(I->IvarD->getType()))
+ return true;
+ if (I->IvarD->getType().getLocalQualifiers().getObjCLifetime()
+ != Qualifiers::OCL_Strong)
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ // \brief Returns true if all declarations in the @property have GC __weak.
+ bool hasGCWeak(PropsTy &props, SourceLocation atLoc) const {
+ if (!Pass.isGCMigration())
+ return false;
+ if (props.empty())
+ return false;
+ return MigrateCtx.AtPropsWeak.count(atLoc.getRawEncoding());
+ }
+
+ bool isUserDeclared(ObjCIvarDecl *ivarD) const {
+ return ivarD && !ivarD->getSynthesize();
+ }
+
+ QualType getPropertyType(PropsTy &props) const {
+ assert(!props.empty());
+ QualType ty = props[0].PropD->getType().getUnqualifiedType();
+
+#ifndef NDEBUG
+ for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I)
+ assert(ty == I->PropD->getType().getUnqualifiedType());
+#endif
+
+ return ty;
+ }
+
+ ObjCPropertyDecl::PropertyAttributeKind
+ getPropertyAttrs(PropsTy &props) const {
+ assert(!props.empty());
+ ObjCPropertyDecl::PropertyAttributeKind
+ attrs = props[0].PropD->getPropertyAttributesAsWritten();
+
+#ifndef NDEBUG
+ for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I)
+ assert(attrs == I->PropD->getPropertyAttributesAsWritten());
+#endif
+
+ return attrs;
+ }
+};
+
+} // anonymous namespace
+
+void PropertyRewriteTraverser::traverseObjCImplementation(
+ ObjCImplementationContext &ImplCtx) {
+ PropertiesRewriter(ImplCtx.getMigrationContext())
+ .doTransform(ImplCtx.getImplementationDecl());
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransProtectedScope.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransProtectedScope.cpp
new file mode 100644
index 0000000..0fcbcbe
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransProtectedScope.cpp
@@ -0,0 +1,202 @@
+//===--- TransProtectedScope.cpp - Transformations to ARC mode ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Adds brackets in case statements that "contain" initialization of retaining
+// variable, thus emitting the "switch case is in protected scope" error.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/Sema/SemaDiagnostic.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+
+namespace {
+
+class LocalRefsCollector : public RecursiveASTVisitor<LocalRefsCollector> {
+ SmallVectorImpl<DeclRefExpr *> &Refs;
+
+public:
+ LocalRefsCollector(SmallVectorImpl<DeclRefExpr *> &refs)
+ : Refs(refs) { }
+
+ bool VisitDeclRefExpr(DeclRefExpr *E) {
+ if (ValueDecl *D = E->getDecl())
+ if (D->getDeclContext()->getRedeclContext()->isFunctionOrMethod())
+ Refs.push_back(E);
+ return true;
+ }
+};
+
+struct CaseInfo {
+ SwitchCase *SC;
+ SourceRange Range;
+ enum {
+ St_Unchecked,
+ St_CannotFix,
+ St_Fixed
+ } State;
+
+ CaseInfo() : SC(nullptr), State(St_Unchecked) {}
+ CaseInfo(SwitchCase *S, SourceRange Range)
+ : SC(S), Range(Range), State(St_Unchecked) {}
+};
+
+class CaseCollector : public RecursiveASTVisitor<CaseCollector> {
+ ParentMap &PMap;
+ SmallVectorImpl<CaseInfo> &Cases;
+
+public:
+ CaseCollector(ParentMap &PMap, SmallVectorImpl<CaseInfo> &Cases)
+ : PMap(PMap), Cases(Cases) { }
+
+ bool VisitSwitchStmt(SwitchStmt *S) {
+ SwitchCase *Curr = S->getSwitchCaseList();
+ if (!Curr)
+ return true;
+ Stmt *Parent = getCaseParent(Curr);
+ Curr = Curr->getNextSwitchCase();
+ // Make sure all case statements are in the same scope.
+ while (Curr) {
+ if (getCaseParent(Curr) != Parent)
+ return true;
+ Curr = Curr->getNextSwitchCase();
+ }
+
+ SourceLocation NextLoc = S->getLocEnd();
+ Curr = S->getSwitchCaseList();
+ // We iterate over case statements in reverse source-order.
+ while (Curr) {
+ Cases.push_back(CaseInfo(Curr,SourceRange(Curr->getLocStart(), NextLoc)));
+ NextLoc = Curr->getLocStart();
+ Curr = Curr->getNextSwitchCase();
+ }
+ return true;
+ }
+
+ Stmt *getCaseParent(SwitchCase *S) {
+ Stmt *Parent = PMap.getParent(S);
+ while (Parent && (isa<SwitchCase>(Parent) || isa<LabelStmt>(Parent)))
+ Parent = PMap.getParent(Parent);
+ return Parent;
+ }
+};
+
+class ProtectedScopeFixer {
+ MigrationPass &Pass;
+ SourceManager &SM;
+ SmallVector<CaseInfo, 16> Cases;
+ SmallVector<DeclRefExpr *, 16> LocalRefs;
+
+public:
+ ProtectedScopeFixer(BodyContext &BodyCtx)
+ : Pass(BodyCtx.getMigrationContext().Pass),
+ SM(Pass.Ctx.getSourceManager()) {
+
+ CaseCollector(BodyCtx.getParentMap(), Cases)
+ .TraverseStmt(BodyCtx.getTopStmt());
+ LocalRefsCollector(LocalRefs).TraverseStmt(BodyCtx.getTopStmt());
+
+ SourceRange BodyRange = BodyCtx.getTopStmt()->getSourceRange();
+ const CapturedDiagList &DiagList = Pass.getDiags();
+ // Copy the diagnostics so we don't have to worry about invaliding iterators
+ // from the diagnostic list.
+ SmallVector<StoredDiagnostic, 16> StoredDiags;
+ StoredDiags.append(DiagList.begin(), DiagList.end());
+ SmallVectorImpl<StoredDiagnostic>::iterator
+ I = StoredDiags.begin(), E = StoredDiags.end();
+ while (I != E) {
+ if (I->getID() == diag::err_switch_into_protected_scope &&
+ isInRange(I->getLocation(), BodyRange)) {
+ handleProtectedScopeError(I, E);
+ continue;
+ }
+ ++I;
+ }
+ }
+
+ void handleProtectedScopeError(
+ SmallVectorImpl<StoredDiagnostic>::iterator &DiagI,
+ SmallVectorImpl<StoredDiagnostic>::iterator DiagE){
+ Transaction Trans(Pass.TA);
+ assert(DiagI->getID() == diag::err_switch_into_protected_scope);
+ SourceLocation ErrLoc = DiagI->getLocation();
+ bool handledAllNotes = true;
+ ++DiagI;
+ for (; DiagI != DiagE && DiagI->getLevel() == DiagnosticsEngine::Note;
+ ++DiagI) {
+ if (!handleProtectedNote(*DiagI))
+ handledAllNotes = false;
+ }
+
+ if (handledAllNotes)
+ Pass.TA.clearDiagnostic(diag::err_switch_into_protected_scope, ErrLoc);
+ }
+
+ bool handleProtectedNote(const StoredDiagnostic &Diag) {
+ assert(Diag.getLevel() == DiagnosticsEngine::Note);
+
+ for (unsigned i = 0; i != Cases.size(); i++) {
+ CaseInfo &info = Cases[i];
+ if (isInRange(Diag.getLocation(), info.Range)) {
+
+ if (info.State == CaseInfo::St_Unchecked)
+ tryFixing(info);
+ assert(info.State != CaseInfo::St_Unchecked);
+
+ if (info.State == CaseInfo::St_Fixed) {
+ Pass.TA.clearDiagnostic(Diag.getID(), Diag.getLocation());
+ return true;
+ }
+ return false;
+ }
+ }
+
+ return false;
+ }
+
+ void tryFixing(CaseInfo &info) {
+ assert(info.State == CaseInfo::St_Unchecked);
+ if (hasVarReferencedOutside(info)) {
+ info.State = CaseInfo::St_CannotFix;
+ return;
+ }
+
+ Pass.TA.insertAfterToken(info.SC->getColonLoc(), " {");
+ Pass.TA.insert(info.Range.getEnd(), "}\n");
+ info.State = CaseInfo::St_Fixed;
+ }
+
+ bool hasVarReferencedOutside(CaseInfo &info) {
+ for (unsigned i = 0, e = LocalRefs.size(); i != e; ++i) {
+ DeclRefExpr *DRE = LocalRefs[i];
+ if (isInRange(DRE->getDecl()->getLocation(), info.Range) &&
+ !isInRange(DRE->getLocation(), info.Range))
+ return true;
+ }
+ return false;
+ }
+
+ bool isInRange(SourceLocation Loc, SourceRange R) {
+ if (Loc.isInvalid())
+ return false;
+ return !SM.isBeforeInTranslationUnit(Loc, R.getBegin()) &&
+ SM.isBeforeInTranslationUnit(Loc, R.getEnd());
+ }
+};
+
+} // anonymous namespace
+
+void ProtectedScopeTraverser::traverseBody(BodyContext &BodyCtx) {
+ ProtectedScopeFixer Fix(BodyCtx);
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransRetainReleaseDealloc.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
new file mode 100644
index 0000000..f81133f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
@@ -0,0 +1,455 @@
+//===--- TransRetainReleaseDealloc.cpp - Transformations to ARC mode ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// removeRetainReleaseDealloc:
+//
+// Removes retain/release/autorelease/dealloc messages.
+//
+// return [[foo retain] autorelease];
+// ---->
+// return foo;
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "llvm/ADT/StringSwitch.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+
+namespace {
+
+class RetainReleaseDeallocRemover :
+ public RecursiveASTVisitor<RetainReleaseDeallocRemover> {
+ Stmt *Body;
+ MigrationPass &Pass;
+
+ ExprSet Removables;
+ std::unique_ptr<ParentMap> StmtMap;
+
+ Selector DelegateSel, FinalizeSel;
+
+public:
+ RetainReleaseDeallocRemover(MigrationPass &pass)
+ : Body(nullptr), Pass(pass) {
+ DelegateSel =
+ Pass.Ctx.Selectors.getNullarySelector(&Pass.Ctx.Idents.get("delegate"));
+ FinalizeSel =
+ Pass.Ctx.Selectors.getNullarySelector(&Pass.Ctx.Idents.get("finalize"));
+ }
+
+ void transformBody(Stmt *body, Decl *ParentD) {
+ Body = body;
+ collectRemovables(body, Removables);
+ StmtMap.reset(new ParentMap(body));
+ TraverseStmt(body);
+ }
+
+ bool VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ switch (E->getMethodFamily()) {
+ default:
+ if (E->isInstanceMessage() && E->getSelector() == FinalizeSel)
+ break;
+ return true;
+ case OMF_autorelease:
+ if (isRemovable(E)) {
+ if (!isCommonUnusedAutorelease(E)) {
+ // An unused autorelease is badness. If we remove it the receiver
+ // will likely die immediately while previously it was kept alive
+ // by the autorelease pool. This is bad practice in general, leave it
+ // and emit an error to force the user to restructure their code.
+ Pass.TA.reportError("it is not safe to remove an unused 'autorelease' "
+ "message; its receiver may be destroyed immediately",
+ E->getLocStart(), E->getSourceRange());
+ return true;
+ }
+ }
+ // Pass through.
+ case OMF_retain:
+ case OMF_release:
+ if (E->getReceiverKind() == ObjCMessageExpr::Instance)
+ if (Expr *rec = E->getInstanceReceiver()) {
+ rec = rec->IgnoreParenImpCasts();
+ if (rec->getType().getObjCLifetime() == Qualifiers::OCL_ExplicitNone &&
+ (E->getMethodFamily() != OMF_retain || isRemovable(E))) {
+ std::string err = "it is not safe to remove '";
+ err += E->getSelector().getAsString() + "' message on "
+ "an __unsafe_unretained type";
+ Pass.TA.reportError(err, rec->getLocStart());
+ return true;
+ }
+
+ if (isGlobalVar(rec) &&
+ (E->getMethodFamily() != OMF_retain || isRemovable(E))) {
+ std::string err = "it is not safe to remove '";
+ err += E->getSelector().getAsString() + "' message on "
+ "a global variable";
+ Pass.TA.reportError(err, rec->getLocStart());
+ return true;
+ }
+
+ if (E->getMethodFamily() == OMF_release && isDelegateMessage(rec)) {
+ Pass.TA.reportError("it is not safe to remove 'retain' "
+ "message on the result of a 'delegate' message; "
+ "the object that was passed to 'setDelegate:' may not be "
+ "properly retained", rec->getLocStart());
+ return true;
+ }
+ }
+ case OMF_dealloc:
+ break;
+ }
+
+ switch (E->getReceiverKind()) {
+ default:
+ return true;
+ case ObjCMessageExpr::SuperInstance: {
+ Transaction Trans(Pass.TA);
+ clearDiagnostics(E->getSelectorLoc(0));
+ if (tryRemoving(E))
+ return true;
+ Pass.TA.replace(E->getSourceRange(), "self");
+ return true;
+ }
+ case ObjCMessageExpr::Instance:
+ break;
+ }
+
+ Expr *rec = E->getInstanceReceiver();
+ if (!rec) return true;
+
+ Transaction Trans(Pass.TA);
+ clearDiagnostics(E->getSelectorLoc(0));
+
+ ObjCMessageExpr *Msg = E;
+ Expr *RecContainer = Msg;
+ SourceRange RecRange = rec->getSourceRange();
+ checkForGCDOrXPC(Msg, RecContainer, rec, RecRange);
+
+ if (Msg->getMethodFamily() == OMF_release &&
+ isRemovable(RecContainer) && isInAtFinally(RecContainer)) {
+ // Change the -release to "receiver = nil" in a finally to avoid a leak
+ // when an exception is thrown.
+ Pass.TA.replace(RecContainer->getSourceRange(), RecRange);
+ std::string str = " = ";
+ str += getNilString(Pass);
+ Pass.TA.insertAfterToken(RecRange.getEnd(), str);
+ return true;
+ }
+
+ if (hasSideEffects(rec, Pass.Ctx) || !tryRemoving(RecContainer))
+ Pass.TA.replace(RecContainer->getSourceRange(), RecRange);
+
+ return true;
+ }
+
+private:
+ /// \brief Checks for idioms where an unused -autorelease is common.
+ ///
+ /// Returns true for this idiom which is common in property
+ /// setters:
+ ///
+ /// [backingValue autorelease];
+ /// backingValue = [newValue retain]; // in general a +1 assign
+ ///
+ /// For these as well:
+ ///
+ /// [[var retain] autorelease];
+ /// return var;
+ ///
+ bool isCommonUnusedAutorelease(ObjCMessageExpr *E) {
+ return isPlusOneAssignBeforeOrAfterAutorelease(E) ||
+ isReturnedAfterAutorelease(E);
+ }
+
+ bool isReturnedAfterAutorelease(ObjCMessageExpr *E) {
+ Expr *Rec = E->getInstanceReceiver();
+ if (!Rec)
+ return false;
+
+ Decl *RefD = getReferencedDecl(Rec);
+ if (!RefD)
+ return false;
+
+ Stmt *nextStmt = getNextStmt(E);
+ if (!nextStmt)
+ return false;
+
+ // Check for "return <variable>;".
+
+ if (ReturnStmt *RetS = dyn_cast<ReturnStmt>(nextStmt))
+ return RefD == getReferencedDecl(RetS->getRetValue());
+
+ return false;
+ }
+
+ bool isPlusOneAssignBeforeOrAfterAutorelease(ObjCMessageExpr *E) {
+ Expr *Rec = E->getInstanceReceiver();
+ if (!Rec)
+ return false;
+
+ Decl *RefD = getReferencedDecl(Rec);
+ if (!RefD)
+ return false;
+
+ Stmt *prevStmt, *nextStmt;
+ std::tie(prevStmt, nextStmt) = getPreviousAndNextStmt(E);
+
+ return isPlusOneAssignToVar(prevStmt, RefD) ||
+ isPlusOneAssignToVar(nextStmt, RefD);
+ }
+
+ bool isPlusOneAssignToVar(Stmt *S, Decl *RefD) {
+ if (!S)
+ return false;
+
+ // Check for "RefD = [+1 retained object];".
+
+ if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(S)) {
+ return (RefD == getReferencedDecl(Bop->getLHS())) && isPlusOneAssign(Bop);
+ }
+
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(S)) {
+ if (DS->isSingleDecl() && DS->getSingleDecl() == RefD) {
+ if (VarDecl *VD = dyn_cast<VarDecl>(RefD))
+ return isPlusOne(VD->getInit());
+ }
+ return false;
+ }
+
+ return false;
+ }
+
+ Stmt *getNextStmt(Expr *E) {
+ return getPreviousAndNextStmt(E).second;
+ }
+
+ std::pair<Stmt *, Stmt *> getPreviousAndNextStmt(Expr *E) {
+ Stmt *prevStmt = nullptr, *nextStmt = nullptr;
+ if (!E)
+ return std::make_pair(prevStmt, nextStmt);
+
+ Stmt *OuterS = E, *InnerS;
+ do {
+ InnerS = OuterS;
+ OuterS = StmtMap->getParent(InnerS);
+ }
+ while (OuterS && (isa<ParenExpr>(OuterS) ||
+ isa<CastExpr>(OuterS) ||
+ isa<ExprWithCleanups>(OuterS)));
+
+ if (!OuterS)
+ return std::make_pair(prevStmt, nextStmt);
+
+ Stmt::child_iterator currChildS = OuterS->child_begin();
+ Stmt::child_iterator childE = OuterS->child_end();
+ Stmt::child_iterator prevChildS = childE;
+ for (; currChildS != childE; ++currChildS) {
+ if (*currChildS == InnerS)
+ break;
+ prevChildS = currChildS;
+ }
+
+ if (prevChildS != childE) {
+ prevStmt = *prevChildS;
+ if (prevStmt)
+ prevStmt = prevStmt->IgnoreImplicit();
+ }
+
+ if (currChildS == childE)
+ return std::make_pair(prevStmt, nextStmt);
+ ++currChildS;
+ if (currChildS == childE)
+ return std::make_pair(prevStmt, nextStmt);
+
+ nextStmt = *currChildS;
+ if (nextStmt)
+ nextStmt = nextStmt->IgnoreImplicit();
+
+ return std::make_pair(prevStmt, nextStmt);
+ }
+
+ Decl *getReferencedDecl(Expr *E) {
+ if (!E)
+ return nullptr;
+
+ E = E->IgnoreParenCasts();
+ if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(E)) {
+ switch (ME->getMethodFamily()) {
+ case OMF_copy:
+ case OMF_autorelease:
+ case OMF_release:
+ case OMF_retain:
+ return getReferencedDecl(ME->getInstanceReceiver());
+ default:
+ return nullptr;
+ }
+ }
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ return DRE->getDecl();
+ if (MemberExpr *ME = dyn_cast<MemberExpr>(E))
+ return ME->getMemberDecl();
+ if (ObjCIvarRefExpr *IRE = dyn_cast<ObjCIvarRefExpr>(E))
+ return IRE->getDecl();
+
+ return nullptr;
+ }
+
+ /// \brief Check if the retain/release is due to a GCD/XPC macro that are
+ /// defined as:
+ ///
+ /// #define dispatch_retain(object) ({ dispatch_object_t _o = (object); _dispatch_object_validate(_o); (void)[_o retain]; })
+ /// #define dispatch_release(object) ({ dispatch_object_t _o = (object); _dispatch_object_validate(_o); [_o release]; })
+ /// #define xpc_retain(object) ({ xpc_object_t _o = (object); _xpc_object_validate(_o); [_o retain]; })
+ /// #define xpc_release(object) ({ xpc_object_t _o = (object); _xpc_object_validate(_o); [_o release]; })
+ ///
+ /// and return the top container which is the StmtExpr and the macro argument
+ /// expression.
+ void checkForGCDOrXPC(ObjCMessageExpr *Msg, Expr *&RecContainer,
+ Expr *&Rec, SourceRange &RecRange) {
+ SourceLocation Loc = Msg->getExprLoc();
+ if (!Loc.isMacroID())
+ return;
+ SourceManager &SM = Pass.Ctx.getSourceManager();
+ StringRef MacroName = Lexer::getImmediateMacroName(Loc, SM,
+ Pass.Ctx.getLangOpts());
+ bool isGCDOrXPC = llvm::StringSwitch<bool>(MacroName)
+ .Case("dispatch_retain", true)
+ .Case("dispatch_release", true)
+ .Case("xpc_retain", true)
+ .Case("xpc_release", true)
+ .Default(false);
+ if (!isGCDOrXPC)
+ return;
+
+ StmtExpr *StmtE = nullptr;
+ Stmt *S = Msg;
+ while (S) {
+ if (StmtExpr *SE = dyn_cast<StmtExpr>(S)) {
+ StmtE = SE;
+ break;
+ }
+ S = StmtMap->getParent(S);
+ }
+
+ if (!StmtE)
+ return;
+
+ Stmt::child_range StmtExprChild = StmtE->children();
+ if (StmtExprChild.begin() == StmtExprChild.end())
+ return;
+ auto *CompS = dyn_cast_or_null<CompoundStmt>(*StmtExprChild.begin());
+ if (!CompS)
+ return;
+
+ Stmt::child_range CompStmtChild = CompS->children();
+ if (CompStmtChild.begin() == CompStmtChild.end())
+ return;
+ auto *DeclS = dyn_cast_or_null<DeclStmt>(*CompStmtChild.begin());
+ if (!DeclS)
+ return;
+ if (!DeclS->isSingleDecl())
+ return;
+ VarDecl *VD = dyn_cast_or_null<VarDecl>(DeclS->getSingleDecl());
+ if (!VD)
+ return;
+ Expr *Init = VD->getInit();
+ if (!Init)
+ return;
+
+ RecContainer = StmtE;
+ Rec = Init->IgnoreParenImpCasts();
+ if (ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Rec))
+ Rec = EWC->getSubExpr()->IgnoreParenImpCasts();
+ RecRange = Rec->getSourceRange();
+ if (SM.isMacroArgExpansion(RecRange.getBegin()))
+ RecRange.setBegin(SM.getImmediateSpellingLoc(RecRange.getBegin()));
+ if (SM.isMacroArgExpansion(RecRange.getEnd()))
+ RecRange.setEnd(SM.getImmediateSpellingLoc(RecRange.getEnd()));
+ }
+
+ void clearDiagnostics(SourceLocation loc) const {
+ Pass.TA.clearDiagnostic(diag::err_arc_illegal_explicit_message,
+ diag::err_unavailable,
+ diag::err_unavailable_message,
+ loc);
+ }
+
+ bool isDelegateMessage(Expr *E) const {
+ if (!E) return false;
+
+ E = E->IgnoreParenCasts();
+
+ // Also look through property-getter sugar.
+ if (PseudoObjectExpr *pseudoOp = dyn_cast<PseudoObjectExpr>(E))
+ E = pseudoOp->getResultExpr()->IgnoreImplicit();
+
+ if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(E))
+ return (ME->isInstanceMessage() && ME->getSelector() == DelegateSel);
+
+ return false;
+ }
+
+ bool isInAtFinally(Expr *E) const {
+ assert(E);
+ Stmt *S = E;
+ while (S) {
+ if (isa<ObjCAtFinallyStmt>(S))
+ return true;
+ S = StmtMap->getParent(S);
+ }
+
+ return false;
+ }
+
+ bool isRemovable(Expr *E) const {
+ return Removables.count(E);
+ }
+
+ bool tryRemoving(Expr *E) const {
+ if (isRemovable(E)) {
+ Pass.TA.removeStmt(E);
+ return true;
+ }
+
+ Stmt *parent = StmtMap->getParent(E);
+
+ if (ImplicitCastExpr *castE = dyn_cast_or_null<ImplicitCastExpr>(parent))
+ return tryRemoving(castE);
+
+ if (ParenExpr *parenE = dyn_cast_or_null<ParenExpr>(parent))
+ return tryRemoving(parenE);
+
+ if (BinaryOperator *
+ bopE = dyn_cast_or_null<BinaryOperator>(parent)) {
+ if (bopE->getOpcode() == BO_Comma && bopE->getLHS() == E &&
+ isRemovable(bopE)) {
+ Pass.TA.replace(bopE->getSourceRange(), bopE->getRHS()->getSourceRange());
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+};
+
+} // anonymous namespace
+
+void trans::removeRetainReleaseDeallocFinalize(MigrationPass &pass) {
+ BodyTransform<RetainReleaseDeallocRemover> trans(pass);
+ trans.TraverseDecl(pass.Ctx.getTranslationUnitDecl());
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnbridgedCasts.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnbridgedCasts.cpp
new file mode 100644
index 0000000..7ca4955
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnbridgedCasts.cpp
@@ -0,0 +1,469 @@
+//===--- TransUnbridgedCasts.cpp - Transformations to ARC mode ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// rewriteUnbridgedCasts:
+//
+// A cast of non-objc pointer to an objc one is checked. If the non-objc pointer
+// is from a file-level variable, __bridge cast is used to convert it.
+// For the result of a function call that we know is +1/+0,
+// __bridge/CFBridgingRelease is used.
+//
+// NSString *str = (NSString *)kUTTypePlainText;
+// str = b ? kUTTypeRTF : kUTTypePlainText;
+// NSString *_uuidString = (NSString *)CFUUIDCreateString(kCFAllocatorDefault,
+// _uuid);
+// ---->
+// NSString *str = (__bridge NSString *)kUTTypePlainText;
+// str = (__bridge NSString *)(b ? kUTTypeRTF : kUTTypePlainText);
+// NSString *_uuidString = (NSString *)
+// CFBridgingRelease(CFUUIDCreateString(kCFAllocatorDefault, _uuid));
+//
+// For a C pointer to ObjC, for casting 'self', __bridge is used.
+//
+// CFStringRef str = (CFStringRef)self;
+// ---->
+// CFStringRef str = (__bridge CFStringRef)self;
+//
+// Uses of Block_copy/Block_release macros are rewritten:
+//
+// c = Block_copy(b);
+// Block_release(c);
+// ---->
+// c = [b copy];
+// <removed>
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "llvm/ADT/SmallString.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+
+namespace {
+
+class UnbridgedCastRewriter : public RecursiveASTVisitor<UnbridgedCastRewriter>{
+ MigrationPass &Pass;
+ IdentifierInfo *SelfII;
+ std::unique_ptr<ParentMap> StmtMap;
+ Decl *ParentD;
+ Stmt *Body;
+ mutable std::unique_ptr<ExprSet> Removables;
+
+public:
+ UnbridgedCastRewriter(MigrationPass &pass)
+ : Pass(pass), ParentD(nullptr), Body(nullptr) {
+ SelfII = &Pass.Ctx.Idents.get("self");
+ }
+
+ void transformBody(Stmt *body, Decl *ParentD) {
+ this->ParentD = ParentD;
+ Body = body;
+ StmtMap.reset(new ParentMap(body));
+ TraverseStmt(body);
+ }
+
+ bool TraverseBlockDecl(BlockDecl *D) {
+ // ParentMap does not enter into a BlockDecl to record its stmts, so use a
+ // new UnbridgedCastRewriter to handle the block.
+ UnbridgedCastRewriter(Pass).transformBody(D->getBody(), D);
+ return true;
+ }
+
+ bool VisitCastExpr(CastExpr *E) {
+ if (E->getCastKind() != CK_CPointerToObjCPointerCast &&
+ E->getCastKind() != CK_BitCast &&
+ E->getCastKind() != CK_AnyPointerToBlockPointerCast)
+ return true;
+
+ QualType castType = E->getType();
+ Expr *castExpr = E->getSubExpr();
+ QualType castExprType = castExpr->getType();
+
+ if (castType->isObjCRetainableType() == castExprType->isObjCRetainableType())
+ return true;
+
+ bool exprRetainable = castExprType->isObjCIndirectLifetimeType();
+ bool castRetainable = castType->isObjCIndirectLifetimeType();
+ if (exprRetainable == castRetainable) return true;
+
+ if (castExpr->isNullPointerConstant(Pass.Ctx,
+ Expr::NPC_ValueDependentIsNull))
+ return true;
+
+ SourceLocation loc = castExpr->getExprLoc();
+ if (loc.isValid() && Pass.Ctx.getSourceManager().isInSystemHeader(loc))
+ return true;
+
+ if (castType->isObjCRetainableType())
+ transformNonObjCToObjCCast(E);
+ else
+ transformObjCToNonObjCCast(E);
+
+ return true;
+ }
+
+private:
+ void transformNonObjCToObjCCast(CastExpr *E) {
+ if (!E) return;
+
+ // Global vars are assumed that are cast as unretained.
+ if (isGlobalVar(E))
+ if (E->getSubExpr()->getType()->isPointerType()) {
+ castToObjCObject(E, /*retained=*/false);
+ return;
+ }
+
+ // If the cast is directly over the result of a Core Foundation function
+ // try to figure out whether it should be cast as retained or unretained.
+ Expr *inner = E->IgnoreParenCasts();
+ if (CallExpr *callE = dyn_cast<CallExpr>(inner)) {
+ if (FunctionDecl *FD = callE->getDirectCallee()) {
+ if (FD->hasAttr<CFReturnsRetainedAttr>()) {
+ castToObjCObject(E, /*retained=*/true);
+ return;
+ }
+ if (FD->hasAttr<CFReturnsNotRetainedAttr>()) {
+ castToObjCObject(E, /*retained=*/false);
+ return;
+ }
+ if (FD->isGlobal() &&
+ FD->getIdentifier() &&
+ ento::cocoa::isRefType(E->getSubExpr()->getType(), "CF",
+ FD->getIdentifier()->getName())) {
+ StringRef fname = FD->getIdentifier()->getName();
+ if (fname.endswith("Retain") ||
+ fname.find("Create") != StringRef::npos ||
+ fname.find("Copy") != StringRef::npos) {
+ // Do not migrate to couple of bridge transfer casts which
+ // cancel each other out. Leave it unchanged so error gets user
+ // attention instead.
+ if (FD->getName() == "CFRetain" &&
+ FD->getNumParams() == 1 &&
+ FD->getParent()->isTranslationUnit() &&
+ FD->isExternallyVisible()) {
+ Expr *Arg = callE->getArg(0);
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
+ const Expr *sub = ICE->getSubExpr();
+ QualType T = sub->getType();
+ if (T->isObjCObjectPointerType())
+ return;
+ }
+ }
+ castToObjCObject(E, /*retained=*/true);
+ return;
+ }
+
+ if (fname.find("Get") != StringRef::npos) {
+ castToObjCObject(E, /*retained=*/false);
+ return;
+ }
+ }
+ }
+ }
+
+ // If returning an ivar or a member of an ivar from a +0 method, use
+ // a __bridge cast.
+ Expr *base = inner->IgnoreParenImpCasts();
+ while (isa<MemberExpr>(base))
+ base = cast<MemberExpr>(base)->getBase()->IgnoreParenImpCasts();
+ if (isa<ObjCIvarRefExpr>(base) &&
+ isa<ReturnStmt>(StmtMap->getParentIgnoreParenCasts(E))) {
+ if (ObjCMethodDecl *method = dyn_cast_or_null<ObjCMethodDecl>(ParentD)) {
+ if (!method->hasAttr<NSReturnsRetainedAttr>()) {
+ castToObjCObject(E, /*retained=*/false);
+ return;
+ }
+ }
+ }
+ }
+
+ void castToObjCObject(CastExpr *E, bool retained) {
+ rewriteToBridgedCast(E, retained ? OBC_BridgeTransfer : OBC_Bridge);
+ }
+
+ void rewriteToBridgedCast(CastExpr *E, ObjCBridgeCastKind Kind) {
+ Transaction Trans(Pass.TA);
+ rewriteToBridgedCast(E, Kind, Trans);
+ }
+
+ void rewriteToBridgedCast(CastExpr *E, ObjCBridgeCastKind Kind,
+ Transaction &Trans) {
+ TransformActions &TA = Pass.TA;
+
+ // We will remove the compiler diagnostic.
+ if (!TA.hasDiagnostic(diag::err_arc_mismatched_cast,
+ diag::err_arc_cast_requires_bridge,
+ E->getLocStart())) {
+ Trans.abort();
+ return;
+ }
+
+ StringRef bridge;
+ switch(Kind) {
+ case OBC_Bridge:
+ bridge = "__bridge "; break;
+ case OBC_BridgeTransfer:
+ bridge = "__bridge_transfer "; break;
+ case OBC_BridgeRetained:
+ bridge = "__bridge_retained "; break;
+ }
+
+ TA.clearDiagnostic(diag::err_arc_mismatched_cast,
+ diag::err_arc_cast_requires_bridge,
+ E->getLocStart());
+ if (Kind == OBC_Bridge || !Pass.CFBridgingFunctionsDefined()) {
+ if (CStyleCastExpr *CCE = dyn_cast<CStyleCastExpr>(E)) {
+ TA.insertAfterToken(CCE->getLParenLoc(), bridge);
+ } else {
+ SourceLocation insertLoc = E->getSubExpr()->getLocStart();
+ SmallString<128> newCast;
+ newCast += '(';
+ newCast += bridge;
+ newCast += E->getType().getAsString(Pass.Ctx.getPrintingPolicy());
+ newCast += ')';
+
+ if (isa<ParenExpr>(E->getSubExpr())) {
+ TA.insert(insertLoc, newCast.str());
+ } else {
+ newCast += '(';
+ TA.insert(insertLoc, newCast.str());
+ TA.insertAfterToken(E->getLocEnd(), ")");
+ }
+ }
+ } else {
+ assert(Kind == OBC_BridgeTransfer || Kind == OBC_BridgeRetained);
+ SmallString<32> BridgeCall;
+
+ Expr *WrapE = E->getSubExpr();
+ SourceLocation InsertLoc = WrapE->getLocStart();
+
+ SourceManager &SM = Pass.Ctx.getSourceManager();
+ char PrevChar = *SM.getCharacterData(InsertLoc.getLocWithOffset(-1));
+ if (Lexer::isIdentifierBodyChar(PrevChar, Pass.Ctx.getLangOpts()))
+ BridgeCall += ' ';
+
+ if (Kind == OBC_BridgeTransfer)
+ BridgeCall += "CFBridgingRelease";
+ else
+ BridgeCall += "CFBridgingRetain";
+
+ if (isa<ParenExpr>(WrapE)) {
+ TA.insert(InsertLoc, BridgeCall);
+ } else {
+ BridgeCall += '(';
+ TA.insert(InsertLoc, BridgeCall);
+ TA.insertAfterToken(WrapE->getLocEnd(), ")");
+ }
+ }
+ }
+
+ void rewriteCastForCFRetain(CastExpr *castE, CallExpr *callE) {
+ Transaction Trans(Pass.TA);
+ Pass.TA.replace(callE->getSourceRange(), callE->getArg(0)->getSourceRange());
+ rewriteToBridgedCast(castE, OBC_BridgeRetained, Trans);
+ }
+
+ void getBlockMacroRanges(CastExpr *E, SourceRange &Outer, SourceRange &Inner) {
+ SourceManager &SM = Pass.Ctx.getSourceManager();
+ SourceLocation Loc = E->getExprLoc();
+ assert(Loc.isMacroID());
+ SourceLocation MacroBegin, MacroEnd;
+ std::tie(MacroBegin, MacroEnd) = SM.getImmediateExpansionRange(Loc);
+ SourceRange SubRange = E->getSubExpr()->IgnoreParenImpCasts()->getSourceRange();
+ SourceLocation InnerBegin = SM.getImmediateMacroCallerLoc(SubRange.getBegin());
+ SourceLocation InnerEnd = SM.getImmediateMacroCallerLoc(SubRange.getEnd());
+
+ Outer = SourceRange(MacroBegin, MacroEnd);
+ Inner = SourceRange(InnerBegin, InnerEnd);
+ }
+
+ void rewriteBlockCopyMacro(CastExpr *E) {
+ SourceRange OuterRange, InnerRange;
+ getBlockMacroRanges(E, OuterRange, InnerRange);
+
+ Transaction Trans(Pass.TA);
+ Pass.TA.replace(OuterRange, InnerRange);
+ Pass.TA.insert(InnerRange.getBegin(), "[");
+ Pass.TA.insertAfterToken(InnerRange.getEnd(), " copy]");
+ Pass.TA.clearDiagnostic(diag::err_arc_mismatched_cast,
+ diag::err_arc_cast_requires_bridge,
+ OuterRange);
+ }
+
+ void removeBlockReleaseMacro(CastExpr *E) {
+ SourceRange OuterRange, InnerRange;
+ getBlockMacroRanges(E, OuterRange, InnerRange);
+
+ Transaction Trans(Pass.TA);
+ Pass.TA.clearDiagnostic(diag::err_arc_mismatched_cast,
+ diag::err_arc_cast_requires_bridge,
+ OuterRange);
+ if (!hasSideEffects(E, Pass.Ctx)) {
+ if (tryRemoving(cast<Expr>(StmtMap->getParentIgnoreParenCasts(E))))
+ return;
+ }
+ Pass.TA.replace(OuterRange, InnerRange);
+ }
+
+ bool tryRemoving(Expr *E) const {
+ if (!Removables) {
+ Removables.reset(new ExprSet);
+ collectRemovables(Body, *Removables);
+ }
+
+ if (Removables->count(E)) {
+ Pass.TA.removeStmt(E);
+ return true;
+ }
+
+ return false;
+ }
+
+ void transformObjCToNonObjCCast(CastExpr *E) {
+ SourceLocation CastLoc = E->getExprLoc();
+ if (CastLoc.isMacroID()) {
+ StringRef MacroName = Lexer::getImmediateMacroName(CastLoc,
+ Pass.Ctx.getSourceManager(),
+ Pass.Ctx.getLangOpts());
+ if (MacroName == "Block_copy") {
+ rewriteBlockCopyMacro(E);
+ return;
+ }
+ if (MacroName == "Block_release") {
+ removeBlockReleaseMacro(E);
+ return;
+ }
+ }
+
+ if (isSelf(E->getSubExpr()))
+ return rewriteToBridgedCast(E, OBC_Bridge);
+
+ CallExpr *callE;
+ if (isPassedToCFRetain(E, callE))
+ return rewriteCastForCFRetain(E, callE);
+
+ ObjCMethodFamily family = getFamilyOfMessage(E->getSubExpr());
+ if (family == OMF_retain)
+ return rewriteToBridgedCast(E, OBC_BridgeRetained);
+
+ if (family == OMF_autorelease || family == OMF_release) {
+ std::string err = "it is not safe to cast to '";
+ err += E->getType().getAsString(Pass.Ctx.getPrintingPolicy());
+ err += "' the result of '";
+ err += family == OMF_autorelease ? "autorelease" : "release";
+ err += "' message; a __bridge cast may result in a pointer to a "
+ "destroyed object and a __bridge_retained may leak the object";
+ Pass.TA.reportError(err, E->getLocStart(),
+ E->getSubExpr()->getSourceRange());
+ Stmt *parent = E;
+ do {
+ parent = StmtMap->getParentIgnoreParenImpCasts(parent);
+ } while (parent && isa<ExprWithCleanups>(parent));
+
+ if (ReturnStmt *retS = dyn_cast_or_null<ReturnStmt>(parent)) {
+ std::string note = "remove the cast and change return type of function "
+ "to '";
+ note += E->getSubExpr()->getType().getAsString(Pass.Ctx.getPrintingPolicy());
+ note += "' to have the object automatically autoreleased";
+ Pass.TA.reportNote(note, retS->getLocStart());
+ }
+ }
+
+ Expr *subExpr = E->getSubExpr();
+
+ // Look through pseudo-object expressions.
+ if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(subExpr)) {
+ subExpr = pseudo->getResultExpr();
+ assert(subExpr && "no result for pseudo-object of non-void type?");
+ }
+
+ if (ImplicitCastExpr *implCE = dyn_cast<ImplicitCastExpr>(subExpr)) {
+ if (implCE->getCastKind() == CK_ARCConsumeObject)
+ return rewriteToBridgedCast(E, OBC_BridgeRetained);
+ if (implCE->getCastKind() == CK_ARCReclaimReturnedObject)
+ return rewriteToBridgedCast(E, OBC_Bridge);
+ }
+
+ bool isConsumed = false;
+ if (isPassedToCParamWithKnownOwnership(E, isConsumed))
+ return rewriteToBridgedCast(E, isConsumed ? OBC_BridgeRetained
+ : OBC_Bridge);
+ }
+
+ static ObjCMethodFamily getFamilyOfMessage(Expr *E) {
+ E = E->IgnoreParenCasts();
+ if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(E))
+ return ME->getMethodFamily();
+
+ return OMF_None;
+ }
+
+ bool isPassedToCFRetain(Expr *E, CallExpr *&callE) const {
+ if ((callE = dyn_cast_or_null<CallExpr>(
+ StmtMap->getParentIgnoreParenImpCasts(E))))
+ if (FunctionDecl *
+ FD = dyn_cast_or_null<FunctionDecl>(callE->getCalleeDecl()))
+ if (FD->getName() == "CFRetain" && FD->getNumParams() == 1 &&
+ FD->getParent()->isTranslationUnit() &&
+ FD->isExternallyVisible())
+ return true;
+
+ return false;
+ }
+
+ bool isPassedToCParamWithKnownOwnership(Expr *E, bool &isConsumed) const {
+ if (CallExpr *callE = dyn_cast_or_null<CallExpr>(
+ StmtMap->getParentIgnoreParenImpCasts(E)))
+ if (FunctionDecl *
+ FD = dyn_cast_or_null<FunctionDecl>(callE->getCalleeDecl())) {
+ unsigned i = 0;
+ for (unsigned e = callE->getNumArgs(); i != e; ++i) {
+ Expr *arg = callE->getArg(i);
+ if (arg == E || arg->IgnoreParenImpCasts() == E)
+ break;
+ }
+ if (i < callE->getNumArgs() && i < FD->getNumParams()) {
+ ParmVarDecl *PD = FD->getParamDecl(i);
+ if (PD->hasAttr<CFConsumedAttr>()) {
+ isConsumed = true;
+ return true;
+ }
+ }
+ }
+
+ return false;
+ }
+
+ bool isSelf(Expr *E) const {
+ E = E->IgnoreParenLValueCasts();
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ if (ImplicitParamDecl *IPD = dyn_cast<ImplicitParamDecl>(DRE->getDecl()))
+ if (IPD->getIdentifier() == SelfII)
+ return true;
+
+ return false;
+ }
+};
+
+} // end anonymous namespace
+
+void trans::rewriteUnbridgedCasts(MigrationPass &pass) {
+ BodyTransform<UnbridgedCastRewriter> trans(pass);
+ trans.TraverseDecl(pass.Ctx.getTranslationUnitDecl());
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnusedInitDelegate.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnusedInitDelegate.cpp
new file mode 100644
index 0000000..70370ec
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnusedInitDelegate.cpp
@@ -0,0 +1,78 @@
+//===--- TransUnusedInitDelegate.cpp - Transformations to ARC mode --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Transformations:
+//===----------------------------------------------------------------------===//
+//
+// rewriteUnusedInitDelegate:
+//
+// Rewrites an unused result of calling a delegate initialization, to assigning
+// the result to self.
+// e.g
+// [self init];
+// ---->
+// self = [self init];
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/Sema/SemaDiagnostic.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+
+namespace {
+
+class UnusedInitRewriter : public RecursiveASTVisitor<UnusedInitRewriter> {
+ Stmt *Body;
+ MigrationPass &Pass;
+
+ ExprSet Removables;
+
+public:
+ UnusedInitRewriter(MigrationPass &pass)
+ : Body(nullptr), Pass(pass) { }
+
+ void transformBody(Stmt *body, Decl *ParentD) {
+ Body = body;
+ collectRemovables(body, Removables);
+ TraverseStmt(body);
+ }
+
+ bool VisitObjCMessageExpr(ObjCMessageExpr *ME) {
+ if (ME->isDelegateInitCall() &&
+ isRemovable(ME) &&
+ Pass.TA.hasDiagnostic(diag::err_arc_unused_init_message,
+ ME->getExprLoc())) {
+ Transaction Trans(Pass.TA);
+ Pass.TA.clearDiagnostic(diag::err_arc_unused_init_message,
+ ME->getExprLoc());
+ SourceRange ExprRange = ME->getSourceRange();
+ Pass.TA.insert(ExprRange.getBegin(), "if (!(self = ");
+ std::string retStr = ")) return ";
+ retStr += getNilString(Pass);
+ Pass.TA.insertAfterToken(ExprRange.getEnd(), retStr);
+ }
+ return true;
+ }
+
+private:
+ bool isRemovable(Expr *E) const {
+ return Removables.count(E);
+ }
+};
+
+} // anonymous namespace
+
+void trans::rewriteUnusedInitDelegate(MigrationPass &pass) {
+ BodyTransform<UnusedInitRewriter> trans(pass);
+ trans.TraverseDecl(pass.Ctx.getTranslationUnitDecl());
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp
new file mode 100644
index 0000000..76ce0ec
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp
@@ -0,0 +1,227 @@
+//===--- TransZeroOutPropsInDealloc.cpp - Transformations to ARC mode -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// removeZeroOutPropsInDealloc:
+//
+// Removes zero'ing out "strong" @synthesized properties in a -dealloc method.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/AST/ASTContext.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+
+namespace {
+
+class ZeroOutInDeallocRemover :
+ public RecursiveASTVisitor<ZeroOutInDeallocRemover> {
+ typedef RecursiveASTVisitor<ZeroOutInDeallocRemover> base;
+
+ MigrationPass &Pass;
+
+ llvm::DenseMap<ObjCPropertyDecl*, ObjCPropertyImplDecl*> SynthesizedProperties;
+ ImplicitParamDecl *SelfD;
+ ExprSet Removables;
+ Selector FinalizeSel;
+
+public:
+ ZeroOutInDeallocRemover(MigrationPass &pass) : Pass(pass), SelfD(nullptr) {
+ FinalizeSel =
+ Pass.Ctx.Selectors.getNullarySelector(&Pass.Ctx.Idents.get("finalize"));
+ }
+
+ bool VisitObjCMessageExpr(ObjCMessageExpr *ME) {
+ ASTContext &Ctx = Pass.Ctx;
+ TransformActions &TA = Pass.TA;
+
+ if (ME->getReceiverKind() != ObjCMessageExpr::Instance)
+ return true;
+ Expr *receiver = ME->getInstanceReceiver();
+ if (!receiver)
+ return true;
+
+ DeclRefExpr *refE = dyn_cast<DeclRefExpr>(receiver->IgnoreParenCasts());
+ if (!refE || refE->getDecl() != SelfD)
+ return true;
+
+ bool BackedBySynthesizeSetter = false;
+ for (llvm::DenseMap<ObjCPropertyDecl*, ObjCPropertyImplDecl*>::iterator
+ P = SynthesizedProperties.begin(),
+ E = SynthesizedProperties.end(); P != E; ++P) {
+ ObjCPropertyDecl *PropDecl = P->first;
+ if (PropDecl->getSetterName() == ME->getSelector()) {
+ BackedBySynthesizeSetter = true;
+ break;
+ }
+ }
+ if (!BackedBySynthesizeSetter)
+ return true;
+
+ // Remove the setter message if RHS is null
+ Transaction Trans(TA);
+ Expr *RHS = ME->getArg(0);
+ bool RHSIsNull =
+ RHS->isNullPointerConstant(Ctx,
+ Expr::NPC_ValueDependentIsNull);
+ if (RHSIsNull && isRemovable(ME))
+ TA.removeStmt(ME);
+
+ return true;
+ }
+
+ bool VisitPseudoObjectExpr(PseudoObjectExpr *POE) {
+ if (isZeroingPropIvar(POE) && isRemovable(POE)) {
+ Transaction Trans(Pass.TA);
+ Pass.TA.removeStmt(POE);
+ }
+
+ return true;
+ }
+
+ bool VisitBinaryOperator(BinaryOperator *BOE) {
+ if (isZeroingPropIvar(BOE) && isRemovable(BOE)) {
+ Transaction Trans(Pass.TA);
+ Pass.TA.removeStmt(BOE);
+ }
+
+ return true;
+ }
+
+ bool TraverseObjCMethodDecl(ObjCMethodDecl *D) {
+ if (D->getMethodFamily() != OMF_dealloc &&
+ !(D->isInstanceMethod() && D->getSelector() == FinalizeSel))
+ return true;
+ if (!D->hasBody())
+ return true;
+
+ ObjCImplDecl *IMD = dyn_cast<ObjCImplDecl>(D->getDeclContext());
+ if (!IMD)
+ return true;
+
+ SelfD = D->getSelfDecl();
+ collectRemovables(D->getBody(), Removables);
+
+ // For a 'dealloc' method use, find all property implementations in
+ // this class implementation.
+ for (auto *PID : IMD->property_impls()) {
+ if (PID->getPropertyImplementation() ==
+ ObjCPropertyImplDecl::Synthesize) {
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ ObjCMethodDecl *setterM = PD->getSetterMethodDecl();
+ if (!(setterM && setterM->isDefined())) {
+ ObjCPropertyDecl::PropertyAttributeKind AttrKind =
+ PD->getPropertyAttributes();
+ if (AttrKind &
+ (ObjCPropertyDecl::OBJC_PR_retain |
+ ObjCPropertyDecl::OBJC_PR_copy |
+ ObjCPropertyDecl::OBJC_PR_strong))
+ SynthesizedProperties[PD] = PID;
+ }
+ }
+ }
+
+ // Now, remove all zeroing of ivars etc.
+ base::TraverseObjCMethodDecl(D);
+
+ // clear out for next method.
+ SynthesizedProperties.clear();
+ SelfD = nullptr;
+ Removables.clear();
+ return true;
+ }
+
+ bool TraverseFunctionDecl(FunctionDecl *D) { return true; }
+ bool TraverseBlockDecl(BlockDecl *block) { return true; }
+ bool TraverseBlockExpr(BlockExpr *block) { return true; }
+
+private:
+ bool isRemovable(Expr *E) const {
+ return Removables.count(E);
+ }
+
+ bool isZeroingPropIvar(Expr *E) {
+ E = E->IgnoreParens();
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E))
+ return isZeroingPropIvar(BO);
+ if (PseudoObjectExpr *PO = dyn_cast<PseudoObjectExpr>(E))
+ return isZeroingPropIvar(PO);
+ return false;
+ }
+
+ bool isZeroingPropIvar(BinaryOperator *BOE) {
+ if (BOE->getOpcode() == BO_Comma)
+ return isZeroingPropIvar(BOE->getLHS()) &&
+ isZeroingPropIvar(BOE->getRHS());
+
+ if (BOE->getOpcode() != BO_Assign)
+ return false;
+
+ Expr *LHS = BOE->getLHS();
+ if (ObjCIvarRefExpr *IV = dyn_cast<ObjCIvarRefExpr>(LHS)) {
+ ObjCIvarDecl *IVDecl = IV->getDecl();
+ if (!IVDecl->getType()->isObjCObjectPointerType())
+ return false;
+ bool IvarBacksPropertySynthesis = false;
+ for (llvm::DenseMap<ObjCPropertyDecl*, ObjCPropertyImplDecl*>::iterator
+ P = SynthesizedProperties.begin(),
+ E = SynthesizedProperties.end(); P != E; ++P) {
+ ObjCPropertyImplDecl *PropImpDecl = P->second;
+ if (PropImpDecl && PropImpDecl->getPropertyIvarDecl() == IVDecl) {
+ IvarBacksPropertySynthesis = true;
+ break;
+ }
+ }
+ if (!IvarBacksPropertySynthesis)
+ return false;
+ }
+ else
+ return false;
+
+ return isZero(BOE->getRHS());
+ }
+
+ bool isZeroingPropIvar(PseudoObjectExpr *PO) {
+ BinaryOperator *BO = dyn_cast<BinaryOperator>(PO->getSyntacticForm());
+ if (!BO) return false;
+ if (BO->getOpcode() != BO_Assign) return false;
+
+ ObjCPropertyRefExpr *PropRefExp =
+ dyn_cast<ObjCPropertyRefExpr>(BO->getLHS()->IgnoreParens());
+ if (!PropRefExp) return false;
+
+ // TODO: Using implicit property decl.
+ if (PropRefExp->isImplicitProperty())
+ return false;
+
+ if (ObjCPropertyDecl *PDecl = PropRefExp->getExplicitProperty()) {
+ if (!SynthesizedProperties.count(PDecl))
+ return false;
+ }
+
+ return isZero(cast<OpaqueValueExpr>(BO->getRHS())->getSourceExpr());
+ }
+
+ bool isZero(Expr *E) {
+ if (E->isNullPointerConstant(Pass.Ctx, Expr::NPC_ValueDependentIsNull))
+ return true;
+
+ return isZeroingPropIvar(E);
+ }
+};
+
+} // anonymous namespace
+
+void trans::removeZeroOutPropsInDeallocFinalize(MigrationPass &pass) {
+ ZeroOutInDeallocRemover trans(pass);
+ trans.TraverseDecl(pass.Ctx.getTranslationUnitDecl());
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransformActions.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransformActions.cpp
new file mode 100644
index 0000000..c628b54
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransformActions.cpp
@@ -0,0 +1,694 @@
+//===--- ARCMT.cpp - Migration to ARC mode --------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Internals.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/DenseSet.h"
+#include <map>
+using namespace clang;
+using namespace arcmt;
+
+namespace {
+
+/// \brief Collects transformations and merges them before applying them with
+/// with applyRewrites(). E.g. if the same source range
+/// is requested to be removed twice, only one rewriter remove will be invoked.
+/// Rewrites happen in "transactions"; if one rewrite in the transaction cannot
+/// be done (e.g. it resides in a macro) all rewrites in the transaction are
+/// aborted.
+/// FIXME: "Transactional" rewrites support should be baked in the Rewriter.
+class TransformActionsImpl {
+ CapturedDiagList &CapturedDiags;
+ ASTContext &Ctx;
+ Preprocessor &PP;
+
+ bool IsInTransaction;
+
+ enum ActionKind {
+ Act_Insert, Act_InsertAfterToken,
+ Act_Remove, Act_RemoveStmt,
+ Act_Replace, Act_ReplaceText,
+ Act_IncreaseIndentation,
+ Act_ClearDiagnostic
+ };
+
+ struct ActionData {
+ ActionKind Kind;
+ SourceLocation Loc;
+ SourceRange R1, R2;
+ StringRef Text1, Text2;
+ Stmt *S;
+ SmallVector<unsigned, 2> DiagIDs;
+ };
+
+ std::vector<ActionData> CachedActions;
+
+ enum RangeComparison {
+ Range_Before,
+ Range_After,
+ Range_Contains,
+ Range_Contained,
+ Range_ExtendsBegin,
+ Range_ExtendsEnd
+ };
+
+ /// \brief A range to remove. It is a character range.
+ struct CharRange {
+ FullSourceLoc Begin, End;
+
+ CharRange(CharSourceRange range, SourceManager &srcMgr, Preprocessor &PP) {
+ SourceLocation beginLoc = range.getBegin(), endLoc = range.getEnd();
+ assert(beginLoc.isValid() && endLoc.isValid());
+ if (range.isTokenRange()) {
+ Begin = FullSourceLoc(srcMgr.getExpansionLoc(beginLoc), srcMgr);
+ End = FullSourceLoc(getLocForEndOfToken(endLoc, srcMgr, PP), srcMgr);
+ } else {
+ Begin = FullSourceLoc(srcMgr.getExpansionLoc(beginLoc), srcMgr);
+ End = FullSourceLoc(srcMgr.getExpansionLoc(endLoc), srcMgr);
+ }
+ assert(Begin.isValid() && End.isValid());
+ }
+
+ RangeComparison compareWith(const CharRange &RHS) const {
+ if (End.isBeforeInTranslationUnitThan(RHS.Begin))
+ return Range_Before;
+ if (RHS.End.isBeforeInTranslationUnitThan(Begin))
+ return Range_After;
+ if (!Begin.isBeforeInTranslationUnitThan(RHS.Begin) &&
+ !RHS.End.isBeforeInTranslationUnitThan(End))
+ return Range_Contained;
+ if (Begin.isBeforeInTranslationUnitThan(RHS.Begin) &&
+ RHS.End.isBeforeInTranslationUnitThan(End))
+ return Range_Contains;
+ if (Begin.isBeforeInTranslationUnitThan(RHS.Begin))
+ return Range_ExtendsBegin;
+ else
+ return Range_ExtendsEnd;
+ }
+
+ static RangeComparison compare(SourceRange LHS, SourceRange RHS,
+ SourceManager &SrcMgr, Preprocessor &PP) {
+ return CharRange(CharSourceRange::getTokenRange(LHS), SrcMgr, PP)
+ .compareWith(CharRange(CharSourceRange::getTokenRange(RHS),
+ SrcMgr, PP));
+ }
+ };
+
+ typedef SmallVector<StringRef, 2> TextsVec;
+ typedef std::map<FullSourceLoc, TextsVec, FullSourceLoc::BeforeThanCompare>
+ InsertsMap;
+ InsertsMap Inserts;
+ /// \brief A list of ranges to remove. They are always sorted and they never
+ /// intersect with each other.
+ std::list<CharRange> Removals;
+
+ llvm::DenseSet<Stmt *> StmtRemovals;
+
+ std::vector<std::pair<CharRange, SourceLocation> > IndentationRanges;
+
+ /// \brief Keeps text passed to transformation methods.
+ llvm::StringMap<bool> UniqueText;
+
+public:
+ TransformActionsImpl(CapturedDiagList &capturedDiags,
+ ASTContext &ctx, Preprocessor &PP)
+ : CapturedDiags(capturedDiags), Ctx(ctx), PP(PP), IsInTransaction(false) { }
+
+ ASTContext &getASTContext() { return Ctx; }
+
+ void startTransaction();
+ bool commitTransaction();
+ void abortTransaction();
+
+ bool isInTransaction() const { return IsInTransaction; }
+
+ void insert(SourceLocation loc, StringRef text);
+ void insertAfterToken(SourceLocation loc, StringRef text);
+ void remove(SourceRange range);
+ void removeStmt(Stmt *S);
+ void replace(SourceRange range, StringRef text);
+ void replace(SourceRange range, SourceRange replacementRange);
+ void replaceStmt(Stmt *S, StringRef text);
+ void replaceText(SourceLocation loc, StringRef text,
+ StringRef replacementText);
+ void increaseIndentation(SourceRange range,
+ SourceLocation parentIndent);
+
+ bool clearDiagnostic(ArrayRef<unsigned> IDs, SourceRange range);
+
+ void applyRewrites(TransformActions::RewriteReceiver &receiver);
+
+private:
+ bool canInsert(SourceLocation loc);
+ bool canInsertAfterToken(SourceLocation loc);
+ bool canRemoveRange(SourceRange range);
+ bool canReplaceRange(SourceRange range, SourceRange replacementRange);
+ bool canReplaceText(SourceLocation loc, StringRef text);
+
+ void commitInsert(SourceLocation loc, StringRef text);
+ void commitInsertAfterToken(SourceLocation loc, StringRef text);
+ void commitRemove(SourceRange range);
+ void commitRemoveStmt(Stmt *S);
+ void commitReplace(SourceRange range, SourceRange replacementRange);
+ void commitReplaceText(SourceLocation loc, StringRef text,
+ StringRef replacementText);
+ void commitIncreaseIndentation(SourceRange range,SourceLocation parentIndent);
+ void commitClearDiagnostic(ArrayRef<unsigned> IDs, SourceRange range);
+
+ void addRemoval(CharSourceRange range);
+ void addInsertion(SourceLocation loc, StringRef text);
+
+ /// \brief Stores text passed to the transformation methods to keep the string
+ /// "alive". Since the vast majority of text will be the same, we also unique
+ /// the strings using a StringMap.
+ StringRef getUniqueText(StringRef text);
+
+ /// \brief Computes the source location just past the end of the token at
+ /// the given source location. If the location points at a macro, the whole
+ /// macro expansion is skipped.
+ static SourceLocation getLocForEndOfToken(SourceLocation loc,
+ SourceManager &SM,Preprocessor &PP);
+};
+
+} // anonymous namespace
+
+void TransformActionsImpl::startTransaction() {
+ assert(!IsInTransaction &&
+ "Cannot start a transaction in the middle of another one");
+ IsInTransaction = true;
+}
+
+bool TransformActionsImpl::commitTransaction() {
+ assert(IsInTransaction && "No transaction started");
+
+ if (CachedActions.empty()) {
+ IsInTransaction = false;
+ return false;
+ }
+
+ // Verify that all actions are possible otherwise abort the whole transaction.
+ bool AllActionsPossible = true;
+ for (unsigned i = 0, e = CachedActions.size(); i != e; ++i) {
+ ActionData &act = CachedActions[i];
+ switch (act.Kind) {
+ case Act_Insert:
+ if (!canInsert(act.Loc))
+ AllActionsPossible = false;
+ break;
+ case Act_InsertAfterToken:
+ if (!canInsertAfterToken(act.Loc))
+ AllActionsPossible = false;
+ break;
+ case Act_Remove:
+ if (!canRemoveRange(act.R1))
+ AllActionsPossible = false;
+ break;
+ case Act_RemoveStmt:
+ assert(act.S);
+ if (!canRemoveRange(act.S->getSourceRange()))
+ AllActionsPossible = false;
+ break;
+ case Act_Replace:
+ if (!canReplaceRange(act.R1, act.R2))
+ AllActionsPossible = false;
+ break;
+ case Act_ReplaceText:
+ if (!canReplaceText(act.Loc, act.Text1))
+ AllActionsPossible = false;
+ break;
+ case Act_IncreaseIndentation:
+ // This is not important, we don't care if it will fail.
+ break;
+ case Act_ClearDiagnostic:
+ // We are just checking source rewrites.
+ break;
+ }
+ if (!AllActionsPossible)
+ break;
+ }
+
+ if (!AllActionsPossible) {
+ abortTransaction();
+ return true;
+ }
+
+ for (unsigned i = 0, e = CachedActions.size(); i != e; ++i) {
+ ActionData &act = CachedActions[i];
+ switch (act.Kind) {
+ case Act_Insert:
+ commitInsert(act.Loc, act.Text1);
+ break;
+ case Act_InsertAfterToken:
+ commitInsertAfterToken(act.Loc, act.Text1);
+ break;
+ case Act_Remove:
+ commitRemove(act.R1);
+ break;
+ case Act_RemoveStmt:
+ commitRemoveStmt(act.S);
+ break;
+ case Act_Replace:
+ commitReplace(act.R1, act.R2);
+ break;
+ case Act_ReplaceText:
+ commitReplaceText(act.Loc, act.Text1, act.Text2);
+ break;
+ case Act_IncreaseIndentation:
+ commitIncreaseIndentation(act.R1, act.Loc);
+ break;
+ case Act_ClearDiagnostic:
+ commitClearDiagnostic(act.DiagIDs, act.R1);
+ break;
+ }
+ }
+
+ CachedActions.clear();
+ IsInTransaction = false;
+ return false;
+}
+
+void TransformActionsImpl::abortTransaction() {
+ assert(IsInTransaction && "No transaction started");
+ CachedActions.clear();
+ IsInTransaction = false;
+}
+
+void TransformActionsImpl::insert(SourceLocation loc, StringRef text) {
+ assert(IsInTransaction && "Actions only allowed during a transaction");
+ text = getUniqueText(text);
+ ActionData data;
+ data.Kind = Act_Insert;
+ data.Loc = loc;
+ data.Text1 = text;
+ CachedActions.push_back(data);
+}
+
+void TransformActionsImpl::insertAfterToken(SourceLocation loc, StringRef text) {
+ assert(IsInTransaction && "Actions only allowed during a transaction");
+ text = getUniqueText(text);
+ ActionData data;
+ data.Kind = Act_InsertAfterToken;
+ data.Loc = loc;
+ data.Text1 = text;
+ CachedActions.push_back(data);
+}
+
+void TransformActionsImpl::remove(SourceRange range) {
+ assert(IsInTransaction && "Actions only allowed during a transaction");
+ ActionData data;
+ data.Kind = Act_Remove;
+ data.R1 = range;
+ CachedActions.push_back(data);
+}
+
+void TransformActionsImpl::removeStmt(Stmt *S) {
+ assert(IsInTransaction && "Actions only allowed during a transaction");
+ ActionData data;
+ data.Kind = Act_RemoveStmt;
+ data.S = S->IgnoreImplicit(); // important for uniquing
+ CachedActions.push_back(data);
+}
+
+void TransformActionsImpl::replace(SourceRange range, StringRef text) {
+ assert(IsInTransaction && "Actions only allowed during a transaction");
+ text = getUniqueText(text);
+ remove(range);
+ insert(range.getBegin(), text);
+}
+
+void TransformActionsImpl::replace(SourceRange range,
+ SourceRange replacementRange) {
+ assert(IsInTransaction && "Actions only allowed during a transaction");
+ ActionData data;
+ data.Kind = Act_Replace;
+ data.R1 = range;
+ data.R2 = replacementRange;
+ CachedActions.push_back(data);
+}
+
+void TransformActionsImpl::replaceText(SourceLocation loc, StringRef text,
+ StringRef replacementText) {
+ text = getUniqueText(text);
+ replacementText = getUniqueText(replacementText);
+ ActionData data;
+ data.Kind = Act_ReplaceText;
+ data.Loc = loc;
+ data.Text1 = text;
+ data.Text2 = replacementText;
+ CachedActions.push_back(data);
+}
+
+void TransformActionsImpl::replaceStmt(Stmt *S, StringRef text) {
+ assert(IsInTransaction && "Actions only allowed during a transaction");
+ text = getUniqueText(text);
+ insert(S->getLocStart(), text);
+ removeStmt(S);
+}
+
+void TransformActionsImpl::increaseIndentation(SourceRange range,
+ SourceLocation parentIndent) {
+ if (range.isInvalid()) return;
+ assert(IsInTransaction && "Actions only allowed during a transaction");
+ ActionData data;
+ data.Kind = Act_IncreaseIndentation;
+ data.R1 = range;
+ data.Loc = parentIndent;
+ CachedActions.push_back(data);
+}
+
+bool TransformActionsImpl::clearDiagnostic(ArrayRef<unsigned> IDs,
+ SourceRange range) {
+ assert(IsInTransaction && "Actions only allowed during a transaction");
+ if (!CapturedDiags.hasDiagnostic(IDs, range))
+ return false;
+
+ ActionData data;
+ data.Kind = Act_ClearDiagnostic;
+ data.R1 = range;
+ data.DiagIDs.append(IDs.begin(), IDs.end());
+ CachedActions.push_back(data);
+ return true;
+}
+
+bool TransformActionsImpl::canInsert(SourceLocation loc) {
+ if (loc.isInvalid())
+ return false;
+
+ SourceManager &SM = Ctx.getSourceManager();
+ if (SM.isInSystemHeader(SM.getExpansionLoc(loc)))
+ return false;
+
+ if (loc.isFileID())
+ return true;
+ return PP.isAtStartOfMacroExpansion(loc);
+}
+
+bool TransformActionsImpl::canInsertAfterToken(SourceLocation loc) {
+ if (loc.isInvalid())
+ return false;
+
+ SourceManager &SM = Ctx.getSourceManager();
+ if (SM.isInSystemHeader(SM.getExpansionLoc(loc)))
+ return false;
+
+ if (loc.isFileID())
+ return true;
+ return PP.isAtEndOfMacroExpansion(loc);
+}
+
+bool TransformActionsImpl::canRemoveRange(SourceRange range) {
+ return canInsert(range.getBegin()) && canInsertAfterToken(range.getEnd());
+}
+
+bool TransformActionsImpl::canReplaceRange(SourceRange range,
+ SourceRange replacementRange) {
+ return canRemoveRange(range) && canRemoveRange(replacementRange);
+}
+
+bool TransformActionsImpl::canReplaceText(SourceLocation loc, StringRef text) {
+ if (!canInsert(loc))
+ return false;
+
+ SourceManager &SM = Ctx.getSourceManager();
+ loc = SM.getExpansionLoc(loc);
+
+ // Break down the source location.
+ std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(loc);
+
+ // Try to load the file buffer.
+ bool invalidTemp = false;
+ StringRef file = SM.getBufferData(locInfo.first, &invalidTemp);
+ if (invalidTemp)
+ return false;
+
+ return file.substr(locInfo.second).startswith(text);
+}
+
+void TransformActionsImpl::commitInsert(SourceLocation loc, StringRef text) {
+ addInsertion(loc, text);
+}
+
+void TransformActionsImpl::commitInsertAfterToken(SourceLocation loc,
+ StringRef text) {
+ addInsertion(getLocForEndOfToken(loc, Ctx.getSourceManager(), PP), text);
+}
+
+void TransformActionsImpl::commitRemove(SourceRange range) {
+ addRemoval(CharSourceRange::getTokenRange(range));
+}
+
+void TransformActionsImpl::commitRemoveStmt(Stmt *S) {
+ assert(S);
+ if (StmtRemovals.count(S))
+ return; // already removed.
+
+ if (Expr *E = dyn_cast<Expr>(S)) {
+ commitRemove(E->getSourceRange());
+ commitInsert(E->getSourceRange().getBegin(), getARCMTMacroName());
+ } else
+ commitRemove(S->getSourceRange());
+
+ StmtRemovals.insert(S);
+}
+
+void TransformActionsImpl::commitReplace(SourceRange range,
+ SourceRange replacementRange) {
+ RangeComparison comp = CharRange::compare(replacementRange, range,
+ Ctx.getSourceManager(), PP);
+ assert(comp == Range_Contained);
+ if (comp != Range_Contained)
+ return; // Although we asserted, be extra safe for release build.
+ if (range.getBegin() != replacementRange.getBegin())
+ addRemoval(CharSourceRange::getCharRange(range.getBegin(),
+ replacementRange.getBegin()));
+ if (replacementRange.getEnd() != range.getEnd())
+ addRemoval(CharSourceRange::getTokenRange(
+ getLocForEndOfToken(replacementRange.getEnd(),
+ Ctx.getSourceManager(), PP),
+ range.getEnd()));
+}
+void TransformActionsImpl::commitReplaceText(SourceLocation loc,
+ StringRef text,
+ StringRef replacementText) {
+ SourceManager &SM = Ctx.getSourceManager();
+ loc = SM.getExpansionLoc(loc);
+ // canReplaceText already checked if loc points at text.
+ SourceLocation afterText = loc.getLocWithOffset(text.size());
+
+ addRemoval(CharSourceRange::getCharRange(loc, afterText));
+ commitInsert(loc, replacementText);
+}
+
+void TransformActionsImpl::commitIncreaseIndentation(SourceRange range,
+ SourceLocation parentIndent) {
+ SourceManager &SM = Ctx.getSourceManager();
+ IndentationRanges.push_back(
+ std::make_pair(CharRange(CharSourceRange::getTokenRange(range),
+ SM, PP),
+ SM.getExpansionLoc(parentIndent)));
+}
+
+void TransformActionsImpl::commitClearDiagnostic(ArrayRef<unsigned> IDs,
+ SourceRange range) {
+ CapturedDiags.clearDiagnostic(IDs, range);
+}
+
+void TransformActionsImpl::addInsertion(SourceLocation loc, StringRef text) {
+ SourceManager &SM = Ctx.getSourceManager();
+ loc = SM.getExpansionLoc(loc);
+ for (const CharRange &I : llvm::reverse(Removals)) {
+ if (!SM.isBeforeInTranslationUnit(loc, I.End))
+ break;
+ if (I.Begin.isBeforeInTranslationUnitThan(loc))
+ return;
+ }
+
+ Inserts[FullSourceLoc(loc, SM)].push_back(text);
+}
+
+void TransformActionsImpl::addRemoval(CharSourceRange range) {
+ CharRange newRange(range, Ctx.getSourceManager(), PP);
+ if (newRange.Begin == newRange.End)
+ return;
+
+ Inserts.erase(Inserts.upper_bound(newRange.Begin),
+ Inserts.lower_bound(newRange.End));
+
+ std::list<CharRange>::iterator I = Removals.end();
+ while (I != Removals.begin()) {
+ std::list<CharRange>::iterator RI = I;
+ --RI;
+ RangeComparison comp = newRange.compareWith(*RI);
+ switch (comp) {
+ case Range_Before:
+ --I;
+ break;
+ case Range_After:
+ Removals.insert(I, newRange);
+ return;
+ case Range_Contained:
+ return;
+ case Range_Contains:
+ RI->End = newRange.End;
+ case Range_ExtendsBegin:
+ newRange.End = RI->End;
+ Removals.erase(RI);
+ break;
+ case Range_ExtendsEnd:
+ RI->End = newRange.End;
+ return;
+ }
+ }
+
+ Removals.insert(Removals.begin(), newRange);
+}
+
+void TransformActionsImpl::applyRewrites(
+ TransformActions::RewriteReceiver &receiver) {
+ for (InsertsMap::iterator I = Inserts.begin(), E = Inserts.end(); I!=E; ++I) {
+ SourceLocation loc = I->first;
+ for (TextsVec::iterator
+ TI = I->second.begin(), TE = I->second.end(); TI != TE; ++TI) {
+ receiver.insert(loc, *TI);
+ }
+ }
+
+ for (std::vector<std::pair<CharRange, SourceLocation> >::iterator
+ I = IndentationRanges.begin(), E = IndentationRanges.end(); I!=E; ++I) {
+ CharSourceRange range = CharSourceRange::getCharRange(I->first.Begin,
+ I->first.End);
+ receiver.increaseIndentation(range, I->second);
+ }
+
+ for (std::list<CharRange>::iterator
+ I = Removals.begin(), E = Removals.end(); I != E; ++I) {
+ CharSourceRange range = CharSourceRange::getCharRange(I->Begin, I->End);
+ receiver.remove(range);
+ }
+}
+
+/// \brief Stores text passed to the transformation methods to keep the string
+/// "alive". Since the vast majority of text will be the same, we also unique
+/// the strings using a StringMap.
+StringRef TransformActionsImpl::getUniqueText(StringRef text) {
+ return UniqueText.insert(std::make_pair(text, false)).first->first();
+}
+
+/// \brief Computes the source location just past the end of the token at
+/// the given source location. If the location points at a macro, the whole
+/// macro expansion is skipped.
+SourceLocation TransformActionsImpl::getLocForEndOfToken(SourceLocation loc,
+ SourceManager &SM,
+ Preprocessor &PP) {
+ if (loc.isMacroID())
+ loc = SM.getExpansionRange(loc).second;
+ return PP.getLocForEndOfToken(loc);
+}
+
+TransformActions::RewriteReceiver::~RewriteReceiver() { }
+
+TransformActions::TransformActions(DiagnosticsEngine &diag,
+ CapturedDiagList &capturedDiags,
+ ASTContext &ctx, Preprocessor &PP)
+ : Diags(diag), CapturedDiags(capturedDiags) {
+ Impl = new TransformActionsImpl(capturedDiags, ctx, PP);
+}
+
+TransformActions::~TransformActions() {
+ delete static_cast<TransformActionsImpl*>(Impl);
+}
+
+void TransformActions::startTransaction() {
+ static_cast<TransformActionsImpl*>(Impl)->startTransaction();
+}
+
+bool TransformActions::commitTransaction() {
+ return static_cast<TransformActionsImpl*>(Impl)->commitTransaction();
+}
+
+void TransformActions::abortTransaction() {
+ static_cast<TransformActionsImpl*>(Impl)->abortTransaction();
+}
+
+
+void TransformActions::insert(SourceLocation loc, StringRef text) {
+ static_cast<TransformActionsImpl*>(Impl)->insert(loc, text);
+}
+
+void TransformActions::insertAfterToken(SourceLocation loc,
+ StringRef text) {
+ static_cast<TransformActionsImpl*>(Impl)->insertAfterToken(loc, text);
+}
+
+void TransformActions::remove(SourceRange range) {
+ static_cast<TransformActionsImpl*>(Impl)->remove(range);
+}
+
+void TransformActions::removeStmt(Stmt *S) {
+ static_cast<TransformActionsImpl*>(Impl)->removeStmt(S);
+}
+
+void TransformActions::replace(SourceRange range, StringRef text) {
+ static_cast<TransformActionsImpl*>(Impl)->replace(range, text);
+}
+
+void TransformActions::replace(SourceRange range,
+ SourceRange replacementRange) {
+ static_cast<TransformActionsImpl*>(Impl)->replace(range, replacementRange);
+}
+
+void TransformActions::replaceStmt(Stmt *S, StringRef text) {
+ static_cast<TransformActionsImpl*>(Impl)->replaceStmt(S, text);
+}
+
+void TransformActions::replaceText(SourceLocation loc, StringRef text,
+ StringRef replacementText) {
+ static_cast<TransformActionsImpl*>(Impl)->replaceText(loc, text,
+ replacementText);
+}
+
+void TransformActions::increaseIndentation(SourceRange range,
+ SourceLocation parentIndent) {
+ static_cast<TransformActionsImpl*>(Impl)->increaseIndentation(range,
+ parentIndent);
+}
+
+bool TransformActions::clearDiagnostic(ArrayRef<unsigned> IDs,
+ SourceRange range) {
+ return static_cast<TransformActionsImpl*>(Impl)->clearDiagnostic(IDs, range);
+}
+
+void TransformActions::applyRewrites(RewriteReceiver &receiver) {
+ static_cast<TransformActionsImpl*>(Impl)->applyRewrites(receiver);
+}
+
+DiagnosticBuilder TransformActions::report(SourceLocation loc, unsigned diagId,
+ SourceRange range) {
+ assert(!static_cast<TransformActionsImpl *>(Impl)->isInTransaction() &&
+ "Errors should be emitted out of a transaction");
+ return Diags.Report(loc, diagId) << range;
+}
+
+void TransformActions::reportError(StringRef message, SourceLocation loc,
+ SourceRange range) {
+ report(loc, diag::err_mt_message, range) << message;
+}
+
+void TransformActions::reportWarning(StringRef message, SourceLocation loc,
+ SourceRange range) {
+ report(loc, diag::warn_mt_message, range) << message;
+}
+
+void TransformActions::reportNote(StringRef message, SourceLocation loc,
+ SourceRange range) {
+ report(loc, diag::note_mt_message, range) << message;
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.cpp
new file mode 100644
index 0000000..3fd36ff
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.cpp
@@ -0,0 +1,599 @@
+//===--- Transforms.cpp - Transformations to ARC mode ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/StringSwitch.h"
+#include <map>
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+
+ASTTraverser::~ASTTraverser() { }
+
+bool MigrationPass::CFBridgingFunctionsDefined() {
+ if (!EnableCFBridgeFns.hasValue())
+ EnableCFBridgeFns = SemaRef.isKnownName("CFBridgingRetain") &&
+ SemaRef.isKnownName("CFBridgingRelease");
+ return *EnableCFBridgeFns;
+}
+
+//===----------------------------------------------------------------------===//
+// Helpers.
+//===----------------------------------------------------------------------===//
+
+bool trans::canApplyWeak(ASTContext &Ctx, QualType type,
+ bool AllowOnUnknownClass) {
+ if (!Ctx.getLangOpts().ObjCWeakRuntime)
+ return false;
+
+ QualType T = type;
+ if (T.isNull())
+ return false;
+
+ // iOS is always safe to use 'weak'.
+ if (Ctx.getTargetInfo().getTriple().isiOS() ||
+ Ctx.getTargetInfo().getTriple().isWatchOS())
+ AllowOnUnknownClass = true;
+
+ while (const PointerType *ptr = T->getAs<PointerType>())
+ T = ptr->getPointeeType();
+ if (const ObjCObjectPointerType *ObjT = T->getAs<ObjCObjectPointerType>()) {
+ ObjCInterfaceDecl *Class = ObjT->getInterfaceDecl();
+ if (!AllowOnUnknownClass && (!Class || Class->getName() == "NSObject"))
+ return false; // id/NSObject is not safe for weak.
+ if (!AllowOnUnknownClass && !Class->hasDefinition())
+ return false; // forward classes are not verifiable, therefore not safe.
+ if (Class && Class->isArcWeakrefUnavailable())
+ return false;
+ }
+
+ return true;
+}
+
+bool trans::isPlusOneAssign(const BinaryOperator *E) {
+ if (E->getOpcode() != BO_Assign)
+ return false;
+
+ return isPlusOne(E->getRHS());
+}
+
+bool trans::isPlusOne(const Expr *E) {
+ if (!E)
+ return false;
+ if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(E))
+ E = EWC->getSubExpr();
+
+ if (const ObjCMessageExpr *
+ ME = dyn_cast<ObjCMessageExpr>(E->IgnoreParenCasts()))
+ if (ME->getMethodFamily() == OMF_retain)
+ return true;
+
+ if (const CallExpr *
+ callE = dyn_cast<CallExpr>(E->IgnoreParenCasts())) {
+ if (const FunctionDecl *FD = callE->getDirectCallee()) {
+ if (FD->hasAttr<CFReturnsRetainedAttr>())
+ return true;
+
+ if (FD->isGlobal() &&
+ FD->getIdentifier() &&
+ FD->getParent()->isTranslationUnit() &&
+ FD->isExternallyVisible() &&
+ ento::cocoa::isRefType(callE->getType(), "CF",
+ FD->getIdentifier()->getName())) {
+ StringRef fname = FD->getIdentifier()->getName();
+ if (fname.endswith("Retain") ||
+ fname.find("Create") != StringRef::npos ||
+ fname.find("Copy") != StringRef::npos) {
+ return true;
+ }
+ }
+ }
+ }
+
+ const ImplicitCastExpr *implCE = dyn_cast<ImplicitCastExpr>(E);
+ while (implCE && implCE->getCastKind() == CK_BitCast)
+ implCE = dyn_cast<ImplicitCastExpr>(implCE->getSubExpr());
+
+ return implCE && implCE->getCastKind() == CK_ARCConsumeObject;
+}
+
+/// \brief 'Loc' is the end of a statement range. This returns the location
+/// immediately after the semicolon following the statement.
+/// If no semicolon is found or the location is inside a macro, the returned
+/// source location will be invalid.
+SourceLocation trans::findLocationAfterSemi(SourceLocation loc,
+ ASTContext &Ctx, bool IsDecl) {
+ SourceLocation SemiLoc = findSemiAfterLocation(loc, Ctx, IsDecl);
+ if (SemiLoc.isInvalid())
+ return SourceLocation();
+ return SemiLoc.getLocWithOffset(1);
+}
+
+/// \brief \arg Loc is the end of a statement range. This returns the location
+/// of the semicolon following the statement.
+/// If no semicolon is found or the location is inside a macro, the returned
+/// source location will be invalid.
+SourceLocation trans::findSemiAfterLocation(SourceLocation loc,
+ ASTContext &Ctx,
+ bool IsDecl) {
+ SourceManager &SM = Ctx.getSourceManager();
+ if (loc.isMacroID()) {
+ if (!Lexer::isAtEndOfMacroExpansion(loc, SM, Ctx.getLangOpts(), &loc))
+ return SourceLocation();
+ }
+ loc = Lexer::getLocForEndOfToken(loc, /*Offset=*/0, SM, Ctx.getLangOpts());
+
+ // Break down the source location.
+ std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(loc);
+
+ // Try to load the file buffer.
+ bool invalidTemp = false;
+ StringRef file = SM.getBufferData(locInfo.first, &invalidTemp);
+ if (invalidTemp)
+ return SourceLocation();
+
+ const char *tokenBegin = file.data() + locInfo.second;
+
+ // Lex from the start of the given location.
+ Lexer lexer(SM.getLocForStartOfFile(locInfo.first),
+ Ctx.getLangOpts(),
+ file.begin(), tokenBegin, file.end());
+ Token tok;
+ lexer.LexFromRawLexer(tok);
+ if (tok.isNot(tok::semi)) {
+ if (!IsDecl)
+ return SourceLocation();
+ // Declaration may be followed with other tokens; such as an __attribute,
+ // before ending with a semicolon.
+ return findSemiAfterLocation(tok.getLocation(), Ctx, /*IsDecl*/true);
+ }
+
+ return tok.getLocation();
+}
+
+bool trans::hasSideEffects(Expr *E, ASTContext &Ctx) {
+ if (!E || !E->HasSideEffects(Ctx))
+ return false;
+
+ E = E->IgnoreParenCasts();
+ ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(E);
+ if (!ME)
+ return true;
+ switch (ME->getMethodFamily()) {
+ case OMF_autorelease:
+ case OMF_dealloc:
+ case OMF_release:
+ case OMF_retain:
+ switch (ME->getReceiverKind()) {
+ case ObjCMessageExpr::SuperInstance:
+ return false;
+ case ObjCMessageExpr::Instance:
+ return hasSideEffects(ME->getInstanceReceiver(), Ctx);
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+bool trans::isGlobalVar(Expr *E) {
+ E = E->IgnoreParenCasts();
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ return DRE->getDecl()->getDeclContext()->isFileContext() &&
+ DRE->getDecl()->isExternallyVisible();
+ if (ConditionalOperator *condOp = dyn_cast<ConditionalOperator>(E))
+ return isGlobalVar(condOp->getTrueExpr()) &&
+ isGlobalVar(condOp->getFalseExpr());
+
+ return false;
+}
+
+StringRef trans::getNilString(MigrationPass &Pass) {
+ return Pass.SemaRef.PP.isMacroDefined("nil") ? "nil" : "0";
+}
+
+namespace {
+
+class ReferenceClear : public RecursiveASTVisitor<ReferenceClear> {
+ ExprSet &Refs;
+public:
+ ReferenceClear(ExprSet &refs) : Refs(refs) { }
+ bool VisitDeclRefExpr(DeclRefExpr *E) { Refs.erase(E); return true; }
+};
+
+class ReferenceCollector : public RecursiveASTVisitor<ReferenceCollector> {
+ ValueDecl *Dcl;
+ ExprSet &Refs;
+
+public:
+ ReferenceCollector(ValueDecl *D, ExprSet &refs)
+ : Dcl(D), Refs(refs) { }
+
+ bool VisitDeclRefExpr(DeclRefExpr *E) {
+ if (E->getDecl() == Dcl)
+ Refs.insert(E);
+ return true;
+ }
+};
+
+class RemovablesCollector : public RecursiveASTVisitor<RemovablesCollector> {
+ ExprSet &Removables;
+
+public:
+ RemovablesCollector(ExprSet &removables)
+ : Removables(removables) { }
+
+ bool shouldWalkTypesOfTypeLocs() const { return false; }
+
+ bool TraverseStmtExpr(StmtExpr *E) {
+ CompoundStmt *S = E->getSubStmt();
+ for (CompoundStmt::body_iterator
+ I = S->body_begin(), E = S->body_end(); I != E; ++I) {
+ if (I != E - 1)
+ mark(*I);
+ TraverseStmt(*I);
+ }
+ return true;
+ }
+
+ bool VisitCompoundStmt(CompoundStmt *S) {
+ for (auto *I : S->body())
+ mark(I);
+ return true;
+ }
+
+ bool VisitIfStmt(IfStmt *S) {
+ mark(S->getThen());
+ mark(S->getElse());
+ return true;
+ }
+
+ bool VisitWhileStmt(WhileStmt *S) {
+ mark(S->getBody());
+ return true;
+ }
+
+ bool VisitDoStmt(DoStmt *S) {
+ mark(S->getBody());
+ return true;
+ }
+
+ bool VisitForStmt(ForStmt *S) {
+ mark(S->getInit());
+ mark(S->getInc());
+ mark(S->getBody());
+ return true;
+ }
+
+private:
+ void mark(Stmt *S) {
+ if (!S) return;
+
+ while (LabelStmt *Label = dyn_cast<LabelStmt>(S))
+ S = Label->getSubStmt();
+ S = S->IgnoreImplicit();
+ if (Expr *E = dyn_cast<Expr>(S))
+ Removables.insert(E);
+ }
+};
+
+} // end anonymous namespace
+
+void trans::clearRefsIn(Stmt *S, ExprSet &refs) {
+ ReferenceClear(refs).TraverseStmt(S);
+}
+
+void trans::collectRefs(ValueDecl *D, Stmt *S, ExprSet &refs) {
+ ReferenceCollector(D, refs).TraverseStmt(S);
+}
+
+void trans::collectRemovables(Stmt *S, ExprSet &exprs) {
+ RemovablesCollector(exprs).TraverseStmt(S);
+}
+
+//===----------------------------------------------------------------------===//
+// MigrationContext
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class ASTTransform : public RecursiveASTVisitor<ASTTransform> {
+ MigrationContext &MigrateCtx;
+ typedef RecursiveASTVisitor<ASTTransform> base;
+
+public:
+ ASTTransform(MigrationContext &MigrateCtx) : MigrateCtx(MigrateCtx) { }
+
+ bool shouldWalkTypesOfTypeLocs() const { return false; }
+
+ bool TraverseObjCImplementationDecl(ObjCImplementationDecl *D) {
+ ObjCImplementationContext ImplCtx(MigrateCtx, D);
+ for (MigrationContext::traverser_iterator
+ I = MigrateCtx.traversers_begin(),
+ E = MigrateCtx.traversers_end(); I != E; ++I)
+ (*I)->traverseObjCImplementation(ImplCtx);
+
+ return base::TraverseObjCImplementationDecl(D);
+ }
+
+ bool TraverseStmt(Stmt *rootS) {
+ if (!rootS)
+ return true;
+
+ BodyContext BodyCtx(MigrateCtx, rootS);
+ for (MigrationContext::traverser_iterator
+ I = MigrateCtx.traversers_begin(),
+ E = MigrateCtx.traversers_end(); I != E; ++I)
+ (*I)->traverseBody(BodyCtx);
+
+ return true;
+ }
+};
+
+}
+
+MigrationContext::~MigrationContext() {
+ for (traverser_iterator
+ I = traversers_begin(), E = traversers_end(); I != E; ++I)
+ delete *I;
+}
+
+bool MigrationContext::isGCOwnedNonObjC(QualType T) {
+ while (!T.isNull()) {
+ if (const AttributedType *AttrT = T->getAs<AttributedType>()) {
+ if (AttrT->getAttrKind() == AttributedType::attr_objc_ownership)
+ return !AttrT->getModifiedType()->isObjCRetainableType();
+ }
+
+ if (T->isArrayType())
+ T = Pass.Ctx.getBaseElementType(T);
+ else if (const PointerType *PT = T->getAs<PointerType>())
+ T = PT->getPointeeType();
+ else if (const ReferenceType *RT = T->getAs<ReferenceType>())
+ T = RT->getPointeeType();
+ else
+ break;
+ }
+
+ return false;
+}
+
+bool MigrationContext::rewritePropertyAttribute(StringRef fromAttr,
+ StringRef toAttr,
+ SourceLocation atLoc) {
+ if (atLoc.isMacroID())
+ return false;
+
+ SourceManager &SM = Pass.Ctx.getSourceManager();
+
+ // Break down the source location.
+ std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(atLoc);
+
+ // Try to load the file buffer.
+ bool invalidTemp = false;
+ StringRef file = SM.getBufferData(locInfo.first, &invalidTemp);
+ if (invalidTemp)
+ return false;
+
+ const char *tokenBegin = file.data() + locInfo.second;
+
+ // Lex from the start of the given location.
+ Lexer lexer(SM.getLocForStartOfFile(locInfo.first),
+ Pass.Ctx.getLangOpts(),
+ file.begin(), tokenBegin, file.end());
+ Token tok;
+ lexer.LexFromRawLexer(tok);
+ if (tok.isNot(tok::at)) return false;
+ lexer.LexFromRawLexer(tok);
+ if (tok.isNot(tok::raw_identifier)) return false;
+ if (tok.getRawIdentifier() != "property")
+ return false;
+ lexer.LexFromRawLexer(tok);
+ if (tok.isNot(tok::l_paren)) return false;
+
+ Token BeforeTok = tok;
+ Token AfterTok;
+ AfterTok.startToken();
+ SourceLocation AttrLoc;
+
+ lexer.LexFromRawLexer(tok);
+ if (tok.is(tok::r_paren))
+ return false;
+
+ while (1) {
+ if (tok.isNot(tok::raw_identifier)) return false;
+ if (tok.getRawIdentifier() == fromAttr) {
+ if (!toAttr.empty()) {
+ Pass.TA.replaceText(tok.getLocation(), fromAttr, toAttr);
+ return true;
+ }
+ // We want to remove the attribute.
+ AttrLoc = tok.getLocation();
+ }
+
+ do {
+ lexer.LexFromRawLexer(tok);
+ if (AttrLoc.isValid() && AfterTok.is(tok::unknown))
+ AfterTok = tok;
+ } while (tok.isNot(tok::comma) && tok.isNot(tok::r_paren));
+ if (tok.is(tok::r_paren))
+ break;
+ if (AttrLoc.isInvalid())
+ BeforeTok = tok;
+ lexer.LexFromRawLexer(tok);
+ }
+
+ if (toAttr.empty() && AttrLoc.isValid() && AfterTok.isNot(tok::unknown)) {
+ // We want to remove the attribute.
+ if (BeforeTok.is(tok::l_paren) && AfterTok.is(tok::r_paren)) {
+ Pass.TA.remove(SourceRange(BeforeTok.getLocation(),
+ AfterTok.getLocation()));
+ } else if (BeforeTok.is(tok::l_paren) && AfterTok.is(tok::comma)) {
+ Pass.TA.remove(SourceRange(AttrLoc, AfterTok.getLocation()));
+ } else {
+ Pass.TA.remove(SourceRange(BeforeTok.getLocation(), AttrLoc));
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+bool MigrationContext::addPropertyAttribute(StringRef attr,
+ SourceLocation atLoc) {
+ if (atLoc.isMacroID())
+ return false;
+
+ SourceManager &SM = Pass.Ctx.getSourceManager();
+
+ // Break down the source location.
+ std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(atLoc);
+
+ // Try to load the file buffer.
+ bool invalidTemp = false;
+ StringRef file = SM.getBufferData(locInfo.first, &invalidTemp);
+ if (invalidTemp)
+ return false;
+
+ const char *tokenBegin = file.data() + locInfo.second;
+
+ // Lex from the start of the given location.
+ Lexer lexer(SM.getLocForStartOfFile(locInfo.first),
+ Pass.Ctx.getLangOpts(),
+ file.begin(), tokenBegin, file.end());
+ Token tok;
+ lexer.LexFromRawLexer(tok);
+ if (tok.isNot(tok::at)) return false;
+ lexer.LexFromRawLexer(tok);
+ if (tok.isNot(tok::raw_identifier)) return false;
+ if (tok.getRawIdentifier() != "property")
+ return false;
+ lexer.LexFromRawLexer(tok);
+
+ if (tok.isNot(tok::l_paren)) {
+ Pass.TA.insert(tok.getLocation(), std::string("(") + attr.str() + ") ");
+ return true;
+ }
+
+ lexer.LexFromRawLexer(tok);
+ if (tok.is(tok::r_paren)) {
+ Pass.TA.insert(tok.getLocation(), attr);
+ return true;
+ }
+
+ if (tok.isNot(tok::raw_identifier)) return false;
+
+ Pass.TA.insert(tok.getLocation(), std::string(attr) + ", ");
+ return true;
+}
+
+void MigrationContext::traverse(TranslationUnitDecl *TU) {
+ for (traverser_iterator
+ I = traversers_begin(), E = traversers_end(); I != E; ++I)
+ (*I)->traverseTU(*this);
+
+ ASTTransform(*this).TraverseDecl(TU);
+}
+
+static void GCRewriteFinalize(MigrationPass &pass) {
+ ASTContext &Ctx = pass.Ctx;
+ TransformActions &TA = pass.TA;
+ DeclContext *DC = Ctx.getTranslationUnitDecl();
+ Selector FinalizeSel =
+ Ctx.Selectors.getNullarySelector(&pass.Ctx.Idents.get("finalize"));
+
+ typedef DeclContext::specific_decl_iterator<ObjCImplementationDecl>
+ impl_iterator;
+ for (impl_iterator I = impl_iterator(DC->decls_begin()),
+ E = impl_iterator(DC->decls_end()); I != E; ++I) {
+ for (const auto *MD : I->instance_methods()) {
+ if (!MD->hasBody())
+ continue;
+
+ if (MD->isInstanceMethod() && MD->getSelector() == FinalizeSel) {
+ const ObjCMethodDecl *FinalizeM = MD;
+ Transaction Trans(TA);
+ TA.insert(FinalizeM->getSourceRange().getBegin(),
+ "#if !__has_feature(objc_arc)\n");
+ CharSourceRange::getTokenRange(FinalizeM->getSourceRange());
+ const SourceManager &SM = pass.Ctx.getSourceManager();
+ const LangOptions &LangOpts = pass.Ctx.getLangOpts();
+ bool Invalid;
+ std::string str = "\n#endif\n";
+ str += Lexer::getSourceText(
+ CharSourceRange::getTokenRange(FinalizeM->getSourceRange()),
+ SM, LangOpts, &Invalid);
+ TA.insertAfterToken(FinalizeM->getSourceRange().getEnd(), str);
+
+ break;
+ }
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// getAllTransformations.
+//===----------------------------------------------------------------------===//
+
+static void traverseAST(MigrationPass &pass) {
+ MigrationContext MigrateCtx(pass);
+
+ if (pass.isGCMigration()) {
+ MigrateCtx.addTraverser(new GCCollectableCallsTraverser);
+ MigrateCtx.addTraverser(new GCAttrsTraverser());
+ }
+ MigrateCtx.addTraverser(new PropertyRewriteTraverser());
+ MigrateCtx.addTraverser(new BlockObjCVariableTraverser());
+ MigrateCtx.addTraverser(new ProtectedScopeTraverser());
+
+ MigrateCtx.traverse(pass.Ctx.getTranslationUnitDecl());
+}
+
+static void independentTransforms(MigrationPass &pass) {
+ rewriteAutoreleasePool(pass);
+ removeRetainReleaseDeallocFinalize(pass);
+ rewriteUnusedInitDelegate(pass);
+ removeZeroOutPropsInDeallocFinalize(pass);
+ makeAssignARCSafe(pass);
+ rewriteUnbridgedCasts(pass);
+ checkAPIUses(pass);
+ traverseAST(pass);
+}
+
+std::vector<TransformFn> arcmt::getAllTransformations(
+ LangOptions::GCMode OrigGCMode,
+ bool NoFinalizeRemoval) {
+ std::vector<TransformFn> transforms;
+
+ if (OrigGCMode == LangOptions::GCOnly && NoFinalizeRemoval)
+ transforms.push_back(GCRewriteFinalize);
+ transforms.push_back(independentTransforms);
+ // This depends on previous transformations removing various expressions.
+ transforms.push_back(removeEmptyStatementsAndDeallocFinalize);
+
+ return transforms;
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.h b/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.h
new file mode 100644
index 0000000..7e3dd34
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.h
@@ -0,0 +1,225 @@
+//===-- Transforms.h - Transformations to ARC mode --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_ARCMIGRATE_TRANSFORMS_H
+#define LLVM_CLANG_LIB_ARCMIGRATE_TRANSFORMS_H
+
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/Support/SaveAndRestore.h"
+
+namespace clang {
+ class Decl;
+ class Stmt;
+ class BlockDecl;
+ class ObjCMethodDecl;
+ class FunctionDecl;
+
+namespace arcmt {
+ class MigrationPass;
+
+namespace trans {
+
+ class MigrationContext;
+
+//===----------------------------------------------------------------------===//
+// Transformations.
+//===----------------------------------------------------------------------===//
+
+void rewriteAutoreleasePool(MigrationPass &pass);
+void rewriteUnbridgedCasts(MigrationPass &pass);
+void makeAssignARCSafe(MigrationPass &pass);
+void removeRetainReleaseDeallocFinalize(MigrationPass &pass);
+void removeZeroOutPropsInDeallocFinalize(MigrationPass &pass);
+void rewriteUnusedInitDelegate(MigrationPass &pass);
+void checkAPIUses(MigrationPass &pass);
+
+void removeEmptyStatementsAndDeallocFinalize(MigrationPass &pass);
+
+class BodyContext {
+ MigrationContext &MigrateCtx;
+ ParentMap PMap;
+ Stmt *TopStmt;
+
+public:
+ BodyContext(MigrationContext &MigrateCtx, Stmt *S)
+ : MigrateCtx(MigrateCtx), PMap(S), TopStmt(S) {}
+
+ MigrationContext &getMigrationContext() { return MigrateCtx; }
+ ParentMap &getParentMap() { return PMap; }
+ Stmt *getTopStmt() { return TopStmt; }
+};
+
+class ObjCImplementationContext {
+ MigrationContext &MigrateCtx;
+ ObjCImplementationDecl *ImpD;
+
+public:
+ ObjCImplementationContext(MigrationContext &MigrateCtx,
+ ObjCImplementationDecl *D)
+ : MigrateCtx(MigrateCtx), ImpD(D) {}
+
+ MigrationContext &getMigrationContext() { return MigrateCtx; }
+ ObjCImplementationDecl *getImplementationDecl() { return ImpD; }
+};
+
+class ASTTraverser {
+public:
+ virtual ~ASTTraverser();
+ virtual void traverseTU(MigrationContext &MigrateCtx) { }
+ virtual void traverseBody(BodyContext &BodyCtx) { }
+ virtual void traverseObjCImplementation(ObjCImplementationContext &ImplCtx) {}
+};
+
+class MigrationContext {
+ std::vector<ASTTraverser *> Traversers;
+
+public:
+ MigrationPass &Pass;
+
+ struct GCAttrOccurrence {
+ enum AttrKind { Weak, Strong } Kind;
+ SourceLocation Loc;
+ QualType ModifiedType;
+ Decl *Dcl;
+ /// \brief true if the attribute is owned, e.g. it is in a body and not just
+ /// in an interface.
+ bool FullyMigratable;
+ };
+ std::vector<GCAttrOccurrence> GCAttrs;
+ llvm::DenseSet<unsigned> AttrSet;
+ llvm::DenseSet<unsigned> RemovedAttrSet;
+
+ /// \brief Set of raw '@' locations for 'assign' properties group that contain
+ /// GC __weak.
+ llvm::DenseSet<unsigned> AtPropsWeak;
+
+ explicit MigrationContext(MigrationPass &pass) : Pass(pass) {}
+ ~MigrationContext();
+
+ typedef std::vector<ASTTraverser *>::iterator traverser_iterator;
+ traverser_iterator traversers_begin() { return Traversers.begin(); }
+ traverser_iterator traversers_end() { return Traversers.end(); }
+
+ void addTraverser(ASTTraverser *traverser) {
+ Traversers.push_back(traverser);
+ }
+
+ bool isGCOwnedNonObjC(QualType T);
+ bool removePropertyAttribute(StringRef fromAttr, SourceLocation atLoc) {
+ return rewritePropertyAttribute(fromAttr, StringRef(), atLoc);
+ }
+ bool rewritePropertyAttribute(StringRef fromAttr, StringRef toAttr,
+ SourceLocation atLoc);
+ bool addPropertyAttribute(StringRef attr, SourceLocation atLoc);
+
+ void traverse(TranslationUnitDecl *TU);
+
+ void dumpGCAttrs();
+};
+
+class PropertyRewriteTraverser : public ASTTraverser {
+public:
+ void traverseObjCImplementation(ObjCImplementationContext &ImplCtx) override;
+};
+
+class BlockObjCVariableTraverser : public ASTTraverser {
+public:
+ void traverseBody(BodyContext &BodyCtx) override;
+};
+
+class ProtectedScopeTraverser : public ASTTraverser {
+public:
+ void traverseBody(BodyContext &BodyCtx) override;
+};
+
+// GC transformations
+
+class GCAttrsTraverser : public ASTTraverser {
+public:
+ void traverseTU(MigrationContext &MigrateCtx) override;
+};
+
+class GCCollectableCallsTraverser : public ASTTraverser {
+public:
+ void traverseBody(BodyContext &BodyCtx) override;
+};
+
+//===----------------------------------------------------------------------===//
+// Helpers.
+//===----------------------------------------------------------------------===//
+
+/// \brief Determine whether we can add weak to the given type.
+bool canApplyWeak(ASTContext &Ctx, QualType type,
+ bool AllowOnUnknownClass = false);
+
+bool isPlusOneAssign(const BinaryOperator *E);
+bool isPlusOne(const Expr *E);
+
+/// \brief 'Loc' is the end of a statement range. This returns the location
+/// immediately after the semicolon following the statement.
+/// If no semicolon is found or the location is inside a macro, the returned
+/// source location will be invalid.
+SourceLocation findLocationAfterSemi(SourceLocation loc, ASTContext &Ctx,
+ bool IsDecl = false);
+
+/// \brief 'Loc' is the end of a statement range. This returns the location
+/// of the semicolon following the statement.
+/// If no semicolon is found or the location is inside a macro, the returned
+/// source location will be invalid.
+SourceLocation findSemiAfterLocation(SourceLocation loc, ASTContext &Ctx,
+ bool IsDecl = false);
+
+bool hasSideEffects(Expr *E, ASTContext &Ctx);
+bool isGlobalVar(Expr *E);
+/// \brief Returns "nil" or "0" if 'nil' macro is not actually defined.
+StringRef getNilString(MigrationPass &Pass);
+
+template <typename BODY_TRANS>
+class BodyTransform : public RecursiveASTVisitor<BodyTransform<BODY_TRANS> > {
+ MigrationPass &Pass;
+ Decl *ParentD;
+
+ typedef RecursiveASTVisitor<BodyTransform<BODY_TRANS> > base;
+public:
+ BodyTransform(MigrationPass &pass) : Pass(pass), ParentD(nullptr) { }
+
+ bool TraverseStmt(Stmt *rootS) {
+ if (rootS)
+ BODY_TRANS(Pass).transformBody(rootS, ParentD);
+ return true;
+ }
+
+ bool TraverseObjCMethodDecl(ObjCMethodDecl *D) {
+ SaveAndRestore<Decl *> SetParent(ParentD, D);
+ return base::TraverseObjCMethodDecl(D);
+ }
+};
+
+typedef llvm::DenseSet<Expr *> ExprSet;
+
+void clearRefsIn(Stmt *S, ExprSet &refs);
+template <typename iterator>
+void clearRefsIn(iterator begin, iterator end, ExprSet &refs) {
+ for (; begin != end; ++begin)
+ clearRefsIn(*begin, refs);
+}
+
+void collectRefs(ValueDecl *D, Stmt *S, ExprSet &refs);
+
+void collectRemovables(Stmt *S, ExprSet &exprs);
+
+} // end namespace trans
+
+} // end namespace arcmt
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/AST/APValue.cpp b/contrib/llvm/tools/clang/lib/AST/APValue.cpp
new file mode 100644
index 0000000..91f1e20
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/APValue.cpp
@@ -0,0 +1,651 @@
+//===--- APValue.cpp - Union class for APFloat/APSInt/Complex -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the APValue class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+namespace {
+ struct LVBase {
+ llvm::PointerIntPair<APValue::LValueBase, 1, bool> BaseAndIsOnePastTheEnd;
+ CharUnits Offset;
+ unsigned PathLength;
+ unsigned CallIndex;
+ };
+}
+
+struct APValue::LV : LVBase {
+ static const unsigned InlinePathSpace =
+ (DataSize - sizeof(LVBase)) / sizeof(LValuePathEntry);
+
+ /// Path - The sequence of base classes, fields and array indices to follow to
+ /// walk from Base to the subobject. When performing GCC-style folding, there
+ /// may not be such a path.
+ union {
+ LValuePathEntry Path[InlinePathSpace];
+ LValuePathEntry *PathPtr;
+ };
+
+ LV() { PathLength = (unsigned)-1; }
+ ~LV() { resizePath(0); }
+
+ void resizePath(unsigned Length) {
+ if (Length == PathLength)
+ return;
+ if (hasPathPtr())
+ delete [] PathPtr;
+ PathLength = Length;
+ if (hasPathPtr())
+ PathPtr = new LValuePathEntry[Length];
+ }
+
+ bool hasPath() const { return PathLength != (unsigned)-1; }
+ bool hasPathPtr() const { return hasPath() && PathLength > InlinePathSpace; }
+
+ LValuePathEntry *getPath() { return hasPathPtr() ? PathPtr : Path; }
+ const LValuePathEntry *getPath() const {
+ return hasPathPtr() ? PathPtr : Path;
+ }
+};
+
+namespace {
+ struct MemberPointerBase {
+ llvm::PointerIntPair<const ValueDecl*, 1, bool> MemberAndIsDerivedMember;
+ unsigned PathLength;
+ };
+}
+
+struct APValue::MemberPointerData : MemberPointerBase {
+ static const unsigned InlinePathSpace =
+ (DataSize - sizeof(MemberPointerBase)) / sizeof(const CXXRecordDecl*);
+ typedef const CXXRecordDecl *PathElem;
+ union {
+ PathElem Path[InlinePathSpace];
+ PathElem *PathPtr;
+ };
+
+ MemberPointerData() { PathLength = 0; }
+ ~MemberPointerData() { resizePath(0); }
+
+ void resizePath(unsigned Length) {
+ if (Length == PathLength)
+ return;
+ if (hasPathPtr())
+ delete [] PathPtr;
+ PathLength = Length;
+ if (hasPathPtr())
+ PathPtr = new PathElem[Length];
+ }
+
+ bool hasPathPtr() const { return PathLength > InlinePathSpace; }
+
+ PathElem *getPath() { return hasPathPtr() ? PathPtr : Path; }
+ const PathElem *getPath() const {
+ return hasPathPtr() ? PathPtr : Path;
+ }
+};
+
+// FIXME: Reduce the malloc traffic here.
+
+APValue::Arr::Arr(unsigned NumElts, unsigned Size) :
+ Elts(new APValue[NumElts + (NumElts != Size ? 1 : 0)]),
+ NumElts(NumElts), ArrSize(Size) {}
+APValue::Arr::~Arr() { delete [] Elts; }
+
+APValue::StructData::StructData(unsigned NumBases, unsigned NumFields) :
+ Elts(new APValue[NumBases+NumFields]),
+ NumBases(NumBases), NumFields(NumFields) {}
+APValue::StructData::~StructData() {
+ delete [] Elts;
+}
+
+APValue::UnionData::UnionData() : Field(nullptr), Value(new APValue) {}
+APValue::UnionData::~UnionData () {
+ delete Value;
+}
+
+APValue::APValue(const APValue &RHS) : Kind(Uninitialized) {
+ switch (RHS.getKind()) {
+ case Uninitialized:
+ break;
+ case Int:
+ MakeInt();
+ setInt(RHS.getInt());
+ break;
+ case Float:
+ MakeFloat();
+ setFloat(RHS.getFloat());
+ break;
+ case Vector:
+ MakeVector();
+ setVector(((const Vec *)(const char *)RHS.Data.buffer)->Elts,
+ RHS.getVectorLength());
+ break;
+ case ComplexInt:
+ MakeComplexInt();
+ setComplexInt(RHS.getComplexIntReal(), RHS.getComplexIntImag());
+ break;
+ case ComplexFloat:
+ MakeComplexFloat();
+ setComplexFloat(RHS.getComplexFloatReal(), RHS.getComplexFloatImag());
+ break;
+ case LValue:
+ MakeLValue();
+ if (RHS.hasLValuePath())
+ setLValue(RHS.getLValueBase(), RHS.getLValueOffset(), RHS.getLValuePath(),
+ RHS.isLValueOnePastTheEnd(), RHS.getLValueCallIndex());
+ else
+ setLValue(RHS.getLValueBase(), RHS.getLValueOffset(), NoLValuePath(),
+ RHS.getLValueCallIndex());
+ break;
+ case Array:
+ MakeArray(RHS.getArrayInitializedElts(), RHS.getArraySize());
+ for (unsigned I = 0, N = RHS.getArrayInitializedElts(); I != N; ++I)
+ getArrayInitializedElt(I) = RHS.getArrayInitializedElt(I);
+ if (RHS.hasArrayFiller())
+ getArrayFiller() = RHS.getArrayFiller();
+ break;
+ case Struct:
+ MakeStruct(RHS.getStructNumBases(), RHS.getStructNumFields());
+ for (unsigned I = 0, N = RHS.getStructNumBases(); I != N; ++I)
+ getStructBase(I) = RHS.getStructBase(I);
+ for (unsigned I = 0, N = RHS.getStructNumFields(); I != N; ++I)
+ getStructField(I) = RHS.getStructField(I);
+ break;
+ case Union:
+ MakeUnion();
+ setUnion(RHS.getUnionField(), RHS.getUnionValue());
+ break;
+ case MemberPointer:
+ MakeMemberPointer(RHS.getMemberPointerDecl(),
+ RHS.isMemberPointerToDerivedMember(),
+ RHS.getMemberPointerPath());
+ break;
+ case AddrLabelDiff:
+ MakeAddrLabelDiff();
+ setAddrLabelDiff(RHS.getAddrLabelDiffLHS(), RHS.getAddrLabelDiffRHS());
+ break;
+ }
+}
+
+void APValue::DestroyDataAndMakeUninit() {
+ if (Kind == Int)
+ ((APSInt*)(char*)Data.buffer)->~APSInt();
+ else if (Kind == Float)
+ ((APFloat*)(char*)Data.buffer)->~APFloat();
+ else if (Kind == Vector)
+ ((Vec*)(char*)Data.buffer)->~Vec();
+ else if (Kind == ComplexInt)
+ ((ComplexAPSInt*)(char*)Data.buffer)->~ComplexAPSInt();
+ else if (Kind == ComplexFloat)
+ ((ComplexAPFloat*)(char*)Data.buffer)->~ComplexAPFloat();
+ else if (Kind == LValue)
+ ((LV*)(char*)Data.buffer)->~LV();
+ else if (Kind == Array)
+ ((Arr*)(char*)Data.buffer)->~Arr();
+ else if (Kind == Struct)
+ ((StructData*)(char*)Data.buffer)->~StructData();
+ else if (Kind == Union)
+ ((UnionData*)(char*)Data.buffer)->~UnionData();
+ else if (Kind == MemberPointer)
+ ((MemberPointerData*)(char*)Data.buffer)->~MemberPointerData();
+ else if (Kind == AddrLabelDiff)
+ ((AddrLabelDiffData*)(char*)Data.buffer)->~AddrLabelDiffData();
+ Kind = Uninitialized;
+}
+
+bool APValue::needsCleanup() const {
+ switch (getKind()) {
+ case Uninitialized:
+ case AddrLabelDiff:
+ return false;
+ case Struct:
+ case Union:
+ case Array:
+ case Vector:
+ return true;
+ case Int:
+ return getInt().needsCleanup();
+ case Float:
+ return getFloat().needsCleanup();
+ case ComplexFloat:
+ assert(getComplexFloatImag().needsCleanup() ==
+ getComplexFloatReal().needsCleanup() &&
+ "In _Complex float types, real and imaginary values always have the "
+ "same size.");
+ return getComplexFloatReal().needsCleanup();
+ case ComplexInt:
+ assert(getComplexIntImag().needsCleanup() ==
+ getComplexIntReal().needsCleanup() &&
+ "In _Complex int types, real and imaginary values must have the "
+ "same size.");
+ return getComplexIntReal().needsCleanup();
+ case LValue:
+ return reinterpret_cast<const LV *>(Data.buffer)->hasPathPtr();
+ case MemberPointer:
+ return reinterpret_cast<const MemberPointerData *>(Data.buffer)
+ ->hasPathPtr();
+ }
+ llvm_unreachable("Unknown APValue kind!");
+}
+
+void APValue::swap(APValue &RHS) {
+ std::swap(Kind, RHS.Kind);
+ char TmpData[DataSize];
+ memcpy(TmpData, Data.buffer, DataSize);
+ memcpy(Data.buffer, RHS.Data.buffer, DataSize);
+ memcpy(RHS.Data.buffer, TmpData, DataSize);
+}
+
+void APValue::dump() const {
+ dump(llvm::errs());
+ llvm::errs() << '\n';
+}
+
+static double GetApproxValue(const llvm::APFloat &F) {
+ llvm::APFloat V = F;
+ bool ignored;
+ V.convert(llvm::APFloat::IEEEdouble, llvm::APFloat::rmNearestTiesToEven,
+ &ignored);
+ return V.convertToDouble();
+}
+
+void APValue::dump(raw_ostream &OS) const {
+ switch (getKind()) {
+ case Uninitialized:
+ OS << "Uninitialized";
+ return;
+ case Int:
+ OS << "Int: " << getInt();
+ return;
+ case Float:
+ OS << "Float: " << GetApproxValue(getFloat());
+ return;
+ case Vector:
+ OS << "Vector: ";
+ getVectorElt(0).dump(OS);
+ for (unsigned i = 1; i != getVectorLength(); ++i) {
+ OS << ", ";
+ getVectorElt(i).dump(OS);
+ }
+ return;
+ case ComplexInt:
+ OS << "ComplexInt: " << getComplexIntReal() << ", " << getComplexIntImag();
+ return;
+ case ComplexFloat:
+ OS << "ComplexFloat: " << GetApproxValue(getComplexFloatReal())
+ << ", " << GetApproxValue(getComplexFloatImag());
+ return;
+ case LValue:
+ OS << "LValue: <todo>";
+ return;
+ case Array:
+ OS << "Array: ";
+ for (unsigned I = 0, N = getArrayInitializedElts(); I != N; ++I) {
+ getArrayInitializedElt(I).dump(OS);
+ if (I != getArraySize() - 1) OS << ", ";
+ }
+ if (hasArrayFiller()) {
+ OS << getArraySize() - getArrayInitializedElts() << " x ";
+ getArrayFiller().dump(OS);
+ }
+ return;
+ case Struct:
+ OS << "Struct ";
+ if (unsigned N = getStructNumBases()) {
+ OS << " bases: ";
+ getStructBase(0).dump(OS);
+ for (unsigned I = 1; I != N; ++I) {
+ OS << ", ";
+ getStructBase(I).dump(OS);
+ }
+ }
+ if (unsigned N = getStructNumFields()) {
+ OS << " fields: ";
+ getStructField(0).dump(OS);
+ for (unsigned I = 1; I != N; ++I) {
+ OS << ", ";
+ getStructField(I).dump(OS);
+ }
+ }
+ return;
+ case Union:
+ OS << "Union: ";
+ getUnionValue().dump(OS);
+ return;
+ case MemberPointer:
+ OS << "MemberPointer: <todo>";
+ return;
+ case AddrLabelDiff:
+ OS << "AddrLabelDiff: <todo>";
+ return;
+ }
+ llvm_unreachable("Unknown APValue kind!");
+}
+
+void APValue::printPretty(raw_ostream &Out, ASTContext &Ctx, QualType Ty) const{
+ switch (getKind()) {
+ case APValue::Uninitialized:
+ Out << "<uninitialized>";
+ return;
+ case APValue::Int:
+ if (Ty->isBooleanType())
+ Out << (getInt().getBoolValue() ? "true" : "false");
+ else
+ Out << getInt();
+ return;
+ case APValue::Float:
+ Out << GetApproxValue(getFloat());
+ return;
+ case APValue::Vector: {
+ Out << '{';
+ QualType ElemTy = Ty->getAs<VectorType>()->getElementType();
+ getVectorElt(0).printPretty(Out, Ctx, ElemTy);
+ for (unsigned i = 1; i != getVectorLength(); ++i) {
+ Out << ", ";
+ getVectorElt(i).printPretty(Out, Ctx, ElemTy);
+ }
+ Out << '}';
+ return;
+ }
+ case APValue::ComplexInt:
+ Out << getComplexIntReal() << "+" << getComplexIntImag() << "i";
+ return;
+ case APValue::ComplexFloat:
+ Out << GetApproxValue(getComplexFloatReal()) << "+"
+ << GetApproxValue(getComplexFloatImag()) << "i";
+ return;
+ case APValue::LValue: {
+ LValueBase Base = getLValueBase();
+ if (!Base) {
+ Out << "0";
+ return;
+ }
+
+ bool IsReference = Ty->isReferenceType();
+ QualType InnerTy
+ = IsReference ? Ty.getNonReferenceType() : Ty->getPointeeType();
+ if (InnerTy.isNull())
+ InnerTy = Ty;
+
+ if (!hasLValuePath()) {
+ // No lvalue path: just print the offset.
+ CharUnits O = getLValueOffset();
+ CharUnits S = Ctx.getTypeSizeInChars(InnerTy);
+ if (!O.isZero()) {
+ if (IsReference)
+ Out << "*(";
+ if (O % S) {
+ Out << "(char*)";
+ S = CharUnits::One();
+ }
+ Out << '&';
+ } else if (!IsReference)
+ Out << '&';
+
+ if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>())
+ Out << *VD;
+ else {
+ assert(Base.get<const Expr *>() != nullptr &&
+ "Expecting non-null Expr");
+ Base.get<const Expr*>()->printPretty(Out, nullptr,
+ Ctx.getPrintingPolicy());
+ }
+
+ if (!O.isZero()) {
+ Out << " + " << (O / S);
+ if (IsReference)
+ Out << ')';
+ }
+ return;
+ }
+
+ // We have an lvalue path. Print it out nicely.
+ if (!IsReference)
+ Out << '&';
+ else if (isLValueOnePastTheEnd())
+ Out << "*(&";
+
+ QualType ElemTy;
+ if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>()) {
+ Out << *VD;
+ ElemTy = VD->getType();
+ } else {
+ const Expr *E = Base.get<const Expr*>();
+ assert(E != nullptr && "Expecting non-null Expr");
+ E->printPretty(Out, nullptr, Ctx.getPrintingPolicy());
+ ElemTy = E->getType();
+ }
+
+ ArrayRef<LValuePathEntry> Path = getLValuePath();
+ const CXXRecordDecl *CastToBase = nullptr;
+ for (unsigned I = 0, N = Path.size(); I != N; ++I) {
+ if (ElemTy->getAs<RecordType>()) {
+ // The lvalue refers to a class type, so the next path entry is a base
+ // or member.
+ const Decl *BaseOrMember =
+ BaseOrMemberType::getFromOpaqueValue(Path[I].BaseOrMember).getPointer();
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(BaseOrMember)) {
+ CastToBase = RD;
+ ElemTy = Ctx.getRecordType(RD);
+ } else {
+ const ValueDecl *VD = cast<ValueDecl>(BaseOrMember);
+ Out << ".";
+ if (CastToBase)
+ Out << *CastToBase << "::";
+ Out << *VD;
+ ElemTy = VD->getType();
+ }
+ } else {
+ // The lvalue must refer to an array.
+ Out << '[' << Path[I].ArrayIndex << ']';
+ ElemTy = Ctx.getAsArrayType(ElemTy)->getElementType();
+ }
+ }
+
+ // Handle formatting of one-past-the-end lvalues.
+ if (isLValueOnePastTheEnd()) {
+ // FIXME: If CastToBase is non-0, we should prefix the output with
+ // "(CastToBase*)".
+ Out << " + 1";
+ if (IsReference)
+ Out << ')';
+ }
+ return;
+ }
+ case APValue::Array: {
+ const ArrayType *AT = Ctx.getAsArrayType(Ty);
+ QualType ElemTy = AT->getElementType();
+ Out << '{';
+ if (unsigned N = getArrayInitializedElts()) {
+ getArrayInitializedElt(0).printPretty(Out, Ctx, ElemTy);
+ for (unsigned I = 1; I != N; ++I) {
+ Out << ", ";
+ if (I == 10) {
+ // Avoid printing out the entire contents of large arrays.
+ Out << "...";
+ break;
+ }
+ getArrayInitializedElt(I).printPretty(Out, Ctx, ElemTy);
+ }
+ }
+ Out << '}';
+ return;
+ }
+ case APValue::Struct: {
+ Out << '{';
+ const RecordDecl *RD = Ty->getAs<RecordType>()->getDecl();
+ bool First = true;
+ if (unsigned N = getStructNumBases()) {
+ const CXXRecordDecl *CD = cast<CXXRecordDecl>(RD);
+ CXXRecordDecl::base_class_const_iterator BI = CD->bases_begin();
+ for (unsigned I = 0; I != N; ++I, ++BI) {
+ assert(BI != CD->bases_end());
+ if (!First)
+ Out << ", ";
+ getStructBase(I).printPretty(Out, Ctx, BI->getType());
+ First = false;
+ }
+ }
+ for (const auto *FI : RD->fields()) {
+ if (!First)
+ Out << ", ";
+ if (FI->isUnnamedBitfield()) continue;
+ getStructField(FI->getFieldIndex()).
+ printPretty(Out, Ctx, FI->getType());
+ First = false;
+ }
+ Out << '}';
+ return;
+ }
+ case APValue::Union:
+ Out << '{';
+ if (const FieldDecl *FD = getUnionField()) {
+ Out << "." << *FD << " = ";
+ getUnionValue().printPretty(Out, Ctx, FD->getType());
+ }
+ Out << '}';
+ return;
+ case APValue::MemberPointer:
+ // FIXME: This is not enough to unambiguously identify the member in a
+ // multiple-inheritance scenario.
+ if (const ValueDecl *VD = getMemberPointerDecl()) {
+ Out << '&' << *cast<CXXRecordDecl>(VD->getDeclContext()) << "::" << *VD;
+ return;
+ }
+ Out << "0";
+ return;
+ case APValue::AddrLabelDiff:
+ Out << "&&" << getAddrLabelDiffLHS()->getLabel()->getName();
+ Out << " - ";
+ Out << "&&" << getAddrLabelDiffRHS()->getLabel()->getName();
+ return;
+ }
+ llvm_unreachable("Unknown APValue kind!");
+}
+
+std::string APValue::getAsString(ASTContext &Ctx, QualType Ty) const {
+ std::string Result;
+ llvm::raw_string_ostream Out(Result);
+ printPretty(Out, Ctx, Ty);
+ Out.flush();
+ return Result;
+}
+
+const APValue::LValueBase APValue::getLValueBase() const {
+ assert(isLValue() && "Invalid accessor");
+ return ((const LV*)(const void*)Data.buffer)->BaseAndIsOnePastTheEnd.getPointer();
+}
+
+bool APValue::isLValueOnePastTheEnd() const {
+ assert(isLValue() && "Invalid accessor");
+ return ((const LV*)(const void*)Data.buffer)->BaseAndIsOnePastTheEnd.getInt();
+}
+
+CharUnits &APValue::getLValueOffset() {
+ assert(isLValue() && "Invalid accessor");
+ return ((LV*)(void*)Data.buffer)->Offset;
+}
+
+bool APValue::hasLValuePath() const {
+ assert(isLValue() && "Invalid accessor");
+ return ((const LV*)(const char*)Data.buffer)->hasPath();
+}
+
+ArrayRef<APValue::LValuePathEntry> APValue::getLValuePath() const {
+ assert(isLValue() && hasLValuePath() && "Invalid accessor");
+ const LV &LVal = *((const LV*)(const char*)Data.buffer);
+ return llvm::makeArrayRef(LVal.getPath(), LVal.PathLength);
+}
+
+unsigned APValue::getLValueCallIndex() const {
+ assert(isLValue() && "Invalid accessor");
+ return ((const LV*)(const char*)Data.buffer)->CallIndex;
+}
+
+void APValue::setLValue(LValueBase B, const CharUnits &O, NoLValuePath,
+ unsigned CallIndex) {
+ assert(isLValue() && "Invalid accessor");
+ LV &LVal = *((LV*)(char*)Data.buffer);
+ LVal.BaseAndIsOnePastTheEnd.setPointer(B);
+ LVal.BaseAndIsOnePastTheEnd.setInt(false);
+ LVal.Offset = O;
+ LVal.CallIndex = CallIndex;
+ LVal.resizePath((unsigned)-1);
+}
+
+void APValue::setLValue(LValueBase B, const CharUnits &O,
+ ArrayRef<LValuePathEntry> Path, bool IsOnePastTheEnd,
+ unsigned CallIndex) {
+ assert(isLValue() && "Invalid accessor");
+ LV &LVal = *((LV*)(char*)Data.buffer);
+ LVal.BaseAndIsOnePastTheEnd.setPointer(B);
+ LVal.BaseAndIsOnePastTheEnd.setInt(IsOnePastTheEnd);
+ LVal.Offset = O;
+ LVal.CallIndex = CallIndex;
+ LVal.resizePath(Path.size());
+ memcpy(LVal.getPath(), Path.data(), Path.size() * sizeof(LValuePathEntry));
+}
+
+const ValueDecl *APValue::getMemberPointerDecl() const {
+ assert(isMemberPointer() && "Invalid accessor");
+ const MemberPointerData &MPD =
+ *((const MemberPointerData *)(const char *)Data.buffer);
+ return MPD.MemberAndIsDerivedMember.getPointer();
+}
+
+bool APValue::isMemberPointerToDerivedMember() const {
+ assert(isMemberPointer() && "Invalid accessor");
+ const MemberPointerData &MPD =
+ *((const MemberPointerData *)(const char *)Data.buffer);
+ return MPD.MemberAndIsDerivedMember.getInt();
+}
+
+ArrayRef<const CXXRecordDecl*> APValue::getMemberPointerPath() const {
+ assert(isMemberPointer() && "Invalid accessor");
+ const MemberPointerData &MPD =
+ *((const MemberPointerData *)(const char *)Data.buffer);
+ return llvm::makeArrayRef(MPD.getPath(), MPD.PathLength);
+}
+
+void APValue::MakeLValue() {
+ assert(isUninit() && "Bad state change");
+ static_assert(sizeof(LV) <= DataSize, "LV too big");
+ new ((void*)(char*)Data.buffer) LV();
+ Kind = LValue;
+}
+
+void APValue::MakeArray(unsigned InitElts, unsigned Size) {
+ assert(isUninit() && "Bad state change");
+ new ((void*)(char*)Data.buffer) Arr(InitElts, Size);
+ Kind = Array;
+}
+
+void APValue::MakeMemberPointer(const ValueDecl *Member, bool IsDerivedMember,
+ ArrayRef<const CXXRecordDecl*> Path) {
+ assert(isUninit() && "Bad state change");
+ MemberPointerData *MPD = new ((void*)(char*)Data.buffer) MemberPointerData;
+ Kind = MemberPointer;
+ MPD->MemberAndIsDerivedMember.setPointer(Member);
+ MPD->MemberAndIsDerivedMember.setInt(IsDerivedMember);
+ MPD->resizePath(Path.size());
+ memcpy(MPD->getPath(), Path.data(), Path.size()*sizeof(const CXXRecordDecl*));
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/ASTConsumer.cpp b/contrib/llvm/tools/clang/lib/AST/ASTConsumer.cpp
new file mode 100644
index 0000000..cff82e9
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/ASTConsumer.cpp
@@ -0,0 +1,32 @@
+//===--- ASTConsumer.cpp - Abstract interface for reading ASTs --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ASTConsumer class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTConsumer.h"
+#include "llvm/Bitcode/BitstreamReader.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclGroup.h"
+using namespace clang;
+
+bool ASTConsumer::HandleTopLevelDecl(DeclGroupRef D) {
+ return true;
+}
+
+void ASTConsumer::HandleInterestingDecl(DeclGroupRef D) {
+ HandleTopLevelDecl(D);
+}
+
+void ASTConsumer::HandleTopLevelDeclInObjCContainer(DeclGroupRef D) {}
+
+void ASTConsumer::HandleImplicitImportDecl(ImportDecl *D) {
+ HandleTopLevelDecl(DeclGroupRef(D));
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp b/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp
new file mode 100644
index 0000000..d4abbe4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp
@@ -0,0 +1,8928 @@
+//===--- ASTContext.cpp - Context to hold long-lived AST nodes ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ASTContext interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "CXXABI.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/Comment.h"
+#include "clang/AST/CommentCommandTraits.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclContextInternals.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExternalASTSource.h"
+#include "clang/AST/Mangle.h"
+#include "clang/AST/MangleNumberingContext.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/AST/VTableBuilder.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/Capacity.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <map>
+
+using namespace clang;
+
+unsigned ASTContext::NumImplicitDefaultConstructors;
+unsigned ASTContext::NumImplicitDefaultConstructorsDeclared;
+unsigned ASTContext::NumImplicitCopyConstructors;
+unsigned ASTContext::NumImplicitCopyConstructorsDeclared;
+unsigned ASTContext::NumImplicitMoveConstructors;
+unsigned ASTContext::NumImplicitMoveConstructorsDeclared;
+unsigned ASTContext::NumImplicitCopyAssignmentOperators;
+unsigned ASTContext::NumImplicitCopyAssignmentOperatorsDeclared;
+unsigned ASTContext::NumImplicitMoveAssignmentOperators;
+unsigned ASTContext::NumImplicitMoveAssignmentOperatorsDeclared;
+unsigned ASTContext::NumImplicitDestructors;
+unsigned ASTContext::NumImplicitDestructorsDeclared;
+
+enum FloatingRank {
+ HalfRank, FloatRank, DoubleRank, LongDoubleRank
+};
+
+RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
+ if (!CommentsLoaded && ExternalSource) {
+ ExternalSource->ReadComments();
+
+#ifndef NDEBUG
+ ArrayRef<RawComment *> RawComments = Comments.getComments();
+ assert(std::is_sorted(RawComments.begin(), RawComments.end(),
+ BeforeThanCompare<RawComment>(SourceMgr)));
+#endif
+
+ CommentsLoaded = true;
+ }
+
+ assert(D);
+
+ // User can not attach documentation to implicit declarations.
+ if (D->isImplicit())
+ return nullptr;
+
+ // User can not attach documentation to implicit instantiations.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
+ return nullptr;
+ }
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (VD->isStaticDataMember() &&
+ VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
+ return nullptr;
+ }
+
+ if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(D)) {
+ if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
+ return nullptr;
+ }
+
+ if (const ClassTemplateSpecializationDecl *CTSD =
+ dyn_cast<ClassTemplateSpecializationDecl>(D)) {
+ TemplateSpecializationKind TSK = CTSD->getSpecializationKind();
+ if (TSK == TSK_ImplicitInstantiation ||
+ TSK == TSK_Undeclared)
+ return nullptr;
+ }
+
+ if (const EnumDecl *ED = dyn_cast<EnumDecl>(D)) {
+ if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
+ return nullptr;
+ }
+ if (const TagDecl *TD = dyn_cast<TagDecl>(D)) {
+ // When tag declaration (but not definition!) is part of the
+ // decl-specifier-seq of some other declaration, it doesn't get comment
+ if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition())
+ return nullptr;
+ }
+ // TODO: handle comments for function parameters properly.
+ if (isa<ParmVarDecl>(D))
+ return nullptr;
+
+ // TODO: we could look up template parameter documentation in the template
+ // documentation.
+ if (isa<TemplateTypeParmDecl>(D) ||
+ isa<NonTypeTemplateParmDecl>(D) ||
+ isa<TemplateTemplateParmDecl>(D))
+ return nullptr;
+
+ ArrayRef<RawComment *> RawComments = Comments.getComments();
+
+ // If there are no comments anywhere, we won't find anything.
+ if (RawComments.empty())
+ return nullptr;
+
+ // Find declaration location.
+ // For Objective-C declarations we generally don't expect to have multiple
+ // declarators, thus use declaration starting location as the "declaration
+ // location".
+ // For all other declarations multiple declarators are used quite frequently,
+ // so we use the location of the identifier as the "declaration location".
+ SourceLocation DeclLoc;
+ if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) ||
+ isa<ObjCPropertyDecl>(D) ||
+ isa<RedeclarableTemplateDecl>(D) ||
+ isa<ClassTemplateSpecializationDecl>(D))
+ DeclLoc = D->getLocStart();
+ else {
+ DeclLoc = D->getLocation();
+ if (DeclLoc.isMacroID()) {
+ if (isa<TypedefDecl>(D)) {
+ // If location of the typedef name is in a macro, it is because being
+ // declared via a macro. Try using declaration's starting location as
+ // the "declaration location".
+ DeclLoc = D->getLocStart();
+ } else if (const TagDecl *TD = dyn_cast<TagDecl>(D)) {
+ // If location of the tag decl is inside a macro, but the spelling of
+ // the tag name comes from a macro argument, it looks like a special
+ // macro like NS_ENUM is being used to define the tag decl. In that
+ // case, adjust the source location to the expansion loc so that we can
+ // attach the comment to the tag decl.
+ if (SourceMgr.isMacroArgExpansion(DeclLoc) &&
+ TD->isCompleteDefinition())
+ DeclLoc = SourceMgr.getExpansionLoc(DeclLoc);
+ }
+ }
+ }
+
+ // If the declaration doesn't map directly to a location in a file, we
+ // can't find the comment.
+ if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
+ return nullptr;
+
+ // Find the comment that occurs just after this declaration.
+ ArrayRef<RawComment *>::iterator Comment;
+ {
+ // When searching for comments during parsing, the comment we are looking
+ // for is usually among the last two comments we parsed -- check them
+ // first.
+ RawComment CommentAtDeclLoc(
+ SourceMgr, SourceRange(DeclLoc), false,
+ LangOpts.CommentOpts.ParseAllComments);
+ BeforeThanCompare<RawComment> Compare(SourceMgr);
+ ArrayRef<RawComment *>::iterator MaybeBeforeDecl = RawComments.end() - 1;
+ bool Found = Compare(*MaybeBeforeDecl, &CommentAtDeclLoc);
+ if (!Found && RawComments.size() >= 2) {
+ MaybeBeforeDecl--;
+ Found = Compare(*MaybeBeforeDecl, &CommentAtDeclLoc);
+ }
+
+ if (Found) {
+ Comment = MaybeBeforeDecl + 1;
+ assert(Comment == std::lower_bound(RawComments.begin(), RawComments.end(),
+ &CommentAtDeclLoc, Compare));
+ } else {
+ // Slow path.
+ Comment = std::lower_bound(RawComments.begin(), RawComments.end(),
+ &CommentAtDeclLoc, Compare);
+ }
+ }
+
+ // Decompose the location for the declaration and find the beginning of the
+ // file buffer.
+ std::pair<FileID, unsigned> DeclLocDecomp = SourceMgr.getDecomposedLoc(DeclLoc);
+
+ // First check whether we have a trailing comment.
+ if (Comment != RawComments.end() &&
+ (*Comment)->isDocumentation() && (*Comment)->isTrailingComment() &&
+ (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) ||
+ isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) {
+ std::pair<FileID, unsigned> CommentBeginDecomp
+ = SourceMgr.getDecomposedLoc((*Comment)->getSourceRange().getBegin());
+ // Check that Doxygen trailing comment comes after the declaration, starts
+ // on the same line and in the same file as the declaration.
+ if (DeclLocDecomp.first == CommentBeginDecomp.first &&
+ SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second)
+ == SourceMgr.getLineNumber(CommentBeginDecomp.first,
+ CommentBeginDecomp.second)) {
+ return *Comment;
+ }
+ }
+
+ // The comment just after the declaration was not a trailing comment.
+ // Let's look at the previous comment.
+ if (Comment == RawComments.begin())
+ return nullptr;
+ --Comment;
+
+ // Check that we actually have a non-member Doxygen comment.
+ if (!(*Comment)->isDocumentation() || (*Comment)->isTrailingComment())
+ return nullptr;
+
+ // Decompose the end of the comment.
+ std::pair<FileID, unsigned> CommentEndDecomp
+ = SourceMgr.getDecomposedLoc((*Comment)->getSourceRange().getEnd());
+
+ // If the comment and the declaration aren't in the same file, then they
+ // aren't related.
+ if (DeclLocDecomp.first != CommentEndDecomp.first)
+ return nullptr;
+
+ // Get the corresponding buffer.
+ bool Invalid = false;
+ const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first,
+ &Invalid).data();
+ if (Invalid)
+ return nullptr;
+
+ // Extract text between the comment and declaration.
+ StringRef Text(Buffer + CommentEndDecomp.second,
+ DeclLocDecomp.second - CommentEndDecomp.second);
+
+ // There should be no other declarations or preprocessor directives between
+ // comment and declaration.
+ if (Text.find_first_of(";{}#@") != StringRef::npos)
+ return nullptr;
+
+ return *Comment;
+}
+
+namespace {
+/// If we have a 'templated' declaration for a template, adjust 'D' to
+/// refer to the actual template.
+/// If we have an implicit instantiation, adjust 'D' to refer to template.
+const Decl *adjustDeclToTemplate(const Decl *D) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // Is this function declaration part of a function template?
+ if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate())
+ return FTD;
+
+ // Nothing to do if function is not an implicit instantiation.
+ if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation)
+ return D;
+
+ // Function is an implicit instantiation of a function template?
+ if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate())
+ return FTD;
+
+ // Function is instantiated from a member definition of a class template?
+ if (const FunctionDecl *MemberDecl =
+ FD->getInstantiatedFromMemberFunction())
+ return MemberDecl;
+
+ return D;
+ }
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ // Static data member is instantiated from a member definition of a class
+ // template?
+ if (VD->isStaticDataMember())
+ if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember())
+ return MemberDecl;
+
+ return D;
+ }
+ if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(D)) {
+ // Is this class declaration part of a class template?
+ if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate())
+ return CTD;
+
+ // Class is an implicit instantiation of a class template or partial
+ // specialization?
+ if (const ClassTemplateSpecializationDecl *CTSD =
+ dyn_cast<ClassTemplateSpecializationDecl>(CRD)) {
+ if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation)
+ return D;
+ llvm::PointerUnion<ClassTemplateDecl *,
+ ClassTemplatePartialSpecializationDecl *>
+ PU = CTSD->getSpecializedTemplateOrPartial();
+ return PU.is<ClassTemplateDecl*>() ?
+ static_cast<const Decl*>(PU.get<ClassTemplateDecl *>()) :
+ static_cast<const Decl*>(
+ PU.get<ClassTemplatePartialSpecializationDecl *>());
+ }
+
+ // Class is instantiated from a member definition of a class template?
+ if (const MemberSpecializationInfo *Info =
+ CRD->getMemberSpecializationInfo())
+ return Info->getInstantiatedFrom();
+
+ return D;
+ }
+ if (const EnumDecl *ED = dyn_cast<EnumDecl>(D)) {
+ // Enum is instantiated from a member definition of a class template?
+ if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum())
+ return MemberDecl;
+
+ return D;
+ }
+ // FIXME: Adjust alias templates?
+ return D;
+}
+} // anonymous namespace
+
+const RawComment *ASTContext::getRawCommentForAnyRedecl(
+ const Decl *D,
+ const Decl **OriginalDecl) const {
+ D = adjustDeclToTemplate(D);
+
+ // Check whether we have cached a comment for this declaration already.
+ {
+ llvm::DenseMap<const Decl *, RawCommentAndCacheFlags>::iterator Pos =
+ RedeclComments.find(D);
+ if (Pos != RedeclComments.end()) {
+ const RawCommentAndCacheFlags &Raw = Pos->second;
+ if (Raw.getKind() != RawCommentAndCacheFlags::NoCommentInDecl) {
+ if (OriginalDecl)
+ *OriginalDecl = Raw.getOriginalDecl();
+ return Raw.getRaw();
+ }
+ }
+ }
+
+ // Search for comments attached to declarations in the redeclaration chain.
+ const RawComment *RC = nullptr;
+ const Decl *OriginalDeclForRC = nullptr;
+ for (auto I : D->redecls()) {
+ llvm::DenseMap<const Decl *, RawCommentAndCacheFlags>::iterator Pos =
+ RedeclComments.find(I);
+ if (Pos != RedeclComments.end()) {
+ const RawCommentAndCacheFlags &Raw = Pos->second;
+ if (Raw.getKind() != RawCommentAndCacheFlags::NoCommentInDecl) {
+ RC = Raw.getRaw();
+ OriginalDeclForRC = Raw.getOriginalDecl();
+ break;
+ }
+ } else {
+ RC = getRawCommentForDeclNoCache(I);
+ OriginalDeclForRC = I;
+ RawCommentAndCacheFlags Raw;
+ if (RC) {
+ // Call order swapped to work around ICE in VS2015 RTM (Release Win32)
+ // https://connect.microsoft.com/VisualStudio/feedback/details/1741530
+ Raw.setKind(RawCommentAndCacheFlags::FromDecl);
+ Raw.setRaw(RC);
+ } else
+ Raw.setKind(RawCommentAndCacheFlags::NoCommentInDecl);
+ Raw.setOriginalDecl(I);
+ RedeclComments[I] = Raw;
+ if (RC)
+ break;
+ }
+ }
+
+ // If we found a comment, it should be a documentation comment.
+ assert(!RC || RC->isDocumentation());
+
+ if (OriginalDecl)
+ *OriginalDecl = OriginalDeclForRC;
+
+ // Update cache for every declaration in the redeclaration chain.
+ RawCommentAndCacheFlags Raw;
+ Raw.setRaw(RC);
+ Raw.setKind(RawCommentAndCacheFlags::FromRedecl);
+ Raw.setOriginalDecl(OriginalDeclForRC);
+
+ for (auto I : D->redecls()) {
+ RawCommentAndCacheFlags &R = RedeclComments[I];
+ if (R.getKind() == RawCommentAndCacheFlags::NoCommentInDecl)
+ R = Raw;
+ }
+
+ return RC;
+}
+
+static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod,
+ SmallVectorImpl<const NamedDecl *> &Redeclared) {
+ const DeclContext *DC = ObjCMethod->getDeclContext();
+ if (const ObjCImplDecl *IMD = dyn_cast<ObjCImplDecl>(DC)) {
+ const ObjCInterfaceDecl *ID = IMD->getClassInterface();
+ if (!ID)
+ return;
+ // Add redeclared method here.
+ for (const auto *Ext : ID->known_extensions()) {
+ if (ObjCMethodDecl *RedeclaredMethod =
+ Ext->getMethod(ObjCMethod->getSelector(),
+ ObjCMethod->isInstanceMethod()))
+ Redeclared.push_back(RedeclaredMethod);
+ }
+ }
+}
+
+comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC,
+ const Decl *D) const {
+ comments::DeclInfo *ThisDeclInfo = new (*this) comments::DeclInfo;
+ ThisDeclInfo->CommentDecl = D;
+ ThisDeclInfo->IsFilled = false;
+ ThisDeclInfo->fill();
+ ThisDeclInfo->CommentDecl = FC->getDecl();
+ if (!ThisDeclInfo->TemplateParameters)
+ ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters;
+ comments::FullComment *CFC =
+ new (*this) comments::FullComment(FC->getBlocks(),
+ ThisDeclInfo);
+ return CFC;
+}
+
+comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const {
+ const RawComment *RC = getRawCommentForDeclNoCache(D);
+ return RC ? RC->parse(*this, nullptr, D) : nullptr;
+}
+
+comments::FullComment *ASTContext::getCommentForDecl(
+ const Decl *D,
+ const Preprocessor *PP) const {
+ if (D->isInvalidDecl())
+ return nullptr;
+ D = adjustDeclToTemplate(D);
+
+ const Decl *Canonical = D->getCanonicalDecl();
+ llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos =
+ ParsedComments.find(Canonical);
+
+ if (Pos != ParsedComments.end()) {
+ if (Canonical != D) {
+ comments::FullComment *FC = Pos->second;
+ comments::FullComment *CFC = cloneFullComment(FC, D);
+ return CFC;
+ }
+ return Pos->second;
+ }
+
+ const Decl *OriginalDecl;
+
+ const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl);
+ if (!RC) {
+ if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) {
+ SmallVector<const NamedDecl*, 8> Overridden;
+ const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D);
+ if (OMD && OMD->isPropertyAccessor())
+ if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl())
+ if (comments::FullComment *FC = getCommentForDecl(PDecl, PP))
+ return cloneFullComment(FC, D);
+ if (OMD)
+ addRedeclaredMethods(OMD, Overridden);
+ getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden);
+ for (unsigned i = 0, e = Overridden.size(); i < e; i++)
+ if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP))
+ return cloneFullComment(FC, D);
+ }
+ else if (const TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
+ // Attach any tag type's documentation to its typedef if latter
+ // does not have one of its own.
+ QualType QT = TD->getUnderlyingType();
+ if (const TagType *TT = QT->getAs<TagType>())
+ if (const Decl *TD = TT->getDecl())
+ if (comments::FullComment *FC = getCommentForDecl(TD, PP))
+ return cloneFullComment(FC, D);
+ }
+ else if (const ObjCInterfaceDecl *IC = dyn_cast<ObjCInterfaceDecl>(D)) {
+ while (IC->getSuperClass()) {
+ IC = IC->getSuperClass();
+ if (comments::FullComment *FC = getCommentForDecl(IC, PP))
+ return cloneFullComment(FC, D);
+ }
+ }
+ else if (const ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(D)) {
+ if (const ObjCInterfaceDecl *IC = CD->getClassInterface())
+ if (comments::FullComment *FC = getCommentForDecl(IC, PP))
+ return cloneFullComment(FC, D);
+ }
+ else if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
+ if (!(RD = RD->getDefinition()))
+ return nullptr;
+ // Check non-virtual bases.
+ for (const auto &I : RD->bases()) {
+ if (I.isVirtual() || (I.getAccessSpecifier() != AS_public))
+ continue;
+ QualType Ty = I.getType();
+ if (Ty.isNull())
+ continue;
+ if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) {
+ if (!(NonVirtualBase= NonVirtualBase->getDefinition()))
+ continue;
+
+ if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP))
+ return cloneFullComment(FC, D);
+ }
+ }
+ // Check virtual bases.
+ for (const auto &I : RD->vbases()) {
+ if (I.getAccessSpecifier() != AS_public)
+ continue;
+ QualType Ty = I.getType();
+ if (Ty.isNull())
+ continue;
+ if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) {
+ if (!(VirtualBase= VirtualBase->getDefinition()))
+ continue;
+ if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP))
+ return cloneFullComment(FC, D);
+ }
+ }
+ }
+ return nullptr;
+ }
+
+ // If the RawComment was attached to other redeclaration of this Decl, we
+ // should parse the comment in context of that other Decl. This is important
+ // because comments can contain references to parameter names which can be
+ // different across redeclarations.
+ if (D != OriginalDecl)
+ return getCommentForDecl(OriginalDecl, PP);
+
+ comments::FullComment *FC = RC->parse(*this, PP, D);
+ ParsedComments[Canonical] = FC;
+ return FC;
+}
+
+void
+ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
+ TemplateTemplateParmDecl *Parm) {
+ ID.AddInteger(Parm->getDepth());
+ ID.AddInteger(Parm->getPosition());
+ ID.AddBoolean(Parm->isParameterPack());
+
+ TemplateParameterList *Params = Parm->getTemplateParameters();
+ ID.AddInteger(Params->size());
+ for (TemplateParameterList::const_iterator P = Params->begin(),
+ PEnd = Params->end();
+ P != PEnd; ++P) {
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
+ ID.AddInteger(0);
+ ID.AddBoolean(TTP->isParameterPack());
+ continue;
+ }
+
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
+ ID.AddInteger(1);
+ ID.AddBoolean(NTTP->isParameterPack());
+ ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr());
+ if (NTTP->isExpandedParameterPack()) {
+ ID.AddBoolean(true);
+ ID.AddInteger(NTTP->getNumExpansionTypes());
+ for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
+ QualType T = NTTP->getExpansionType(I);
+ ID.AddPointer(T.getCanonicalType().getAsOpaquePtr());
+ }
+ } else
+ ID.AddBoolean(false);
+ continue;
+ }
+
+ TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(*P);
+ ID.AddInteger(2);
+ Profile(ID, TTP);
+ }
+}
+
+TemplateTemplateParmDecl *
+ASTContext::getCanonicalTemplateTemplateParmDecl(
+ TemplateTemplateParmDecl *TTP) const {
+ // Check if we already have a canonical template template parameter.
+ llvm::FoldingSetNodeID ID;
+ CanonicalTemplateTemplateParm::Profile(ID, TTP);
+ void *InsertPos = nullptr;
+ CanonicalTemplateTemplateParm *Canonical
+ = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
+ if (Canonical)
+ return Canonical->getParam();
+
+ // Build a canonical template parameter list.
+ TemplateParameterList *Params = TTP->getTemplateParameters();
+ SmallVector<NamedDecl *, 4> CanonParams;
+ CanonParams.reserve(Params->size());
+ for (TemplateParameterList::const_iterator P = Params->begin(),
+ PEnd = Params->end();
+ P != PEnd; ++P) {
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P))
+ CanonParams.push_back(
+ TemplateTypeParmDecl::Create(*this, getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(),
+ TTP->getDepth(),
+ TTP->getIndex(), nullptr, false,
+ TTP->isParameterPack()));
+ else if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
+ QualType T = getCanonicalType(NTTP->getType());
+ TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T);
+ NonTypeTemplateParmDecl *Param;
+ if (NTTP->isExpandedParameterPack()) {
+ SmallVector<QualType, 2> ExpandedTypes;
+ SmallVector<TypeSourceInfo *, 2> ExpandedTInfos;
+ for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
+ ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I)));
+ ExpandedTInfos.push_back(
+ getTrivialTypeSourceInfo(ExpandedTypes.back()));
+ }
+
+ Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(),
+ NTTP->getDepth(),
+ NTTP->getPosition(), nullptr,
+ T,
+ TInfo,
+ ExpandedTypes.data(),
+ ExpandedTypes.size(),
+ ExpandedTInfos.data());
+ } else {
+ Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(),
+ NTTP->getDepth(),
+ NTTP->getPosition(), nullptr,
+ T,
+ NTTP->isParameterPack(),
+ TInfo);
+ }
+ CanonParams.push_back(Param);
+
+ } else
+ CanonParams.push_back(getCanonicalTemplateTemplateParmDecl(
+ cast<TemplateTemplateParmDecl>(*P)));
+ }
+
+ TemplateTemplateParmDecl *CanonTTP
+ = TemplateTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
+ SourceLocation(), TTP->getDepth(),
+ TTP->getPosition(),
+ TTP->isParameterPack(),
+ nullptr,
+ TemplateParameterList::Create(*this, SourceLocation(),
+ SourceLocation(),
+ CanonParams,
+ SourceLocation()));
+
+ // Get the new insert position for the node we care about.
+ Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!Canonical && "Shouldn't be in the map!");
+ (void)Canonical;
+
+ // Create the canonical template template parameter entry.
+ Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP);
+ CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos);
+ return CanonTTP;
+}
+
+CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
+ if (!LangOpts.CPlusPlus) return nullptr;
+
+ switch (T.getCXXABI().getKind()) {
+ case TargetCXXABI::GenericARM: // Same as Itanium at this level
+ case TargetCXXABI::iOS:
+ case TargetCXXABI::iOS64:
+ case TargetCXXABI::WatchOS:
+ case TargetCXXABI::GenericAArch64:
+ case TargetCXXABI::GenericMIPS:
+ case TargetCXXABI::GenericItanium:
+ case TargetCXXABI::WebAssembly:
+ return CreateItaniumCXXABI(*this);
+ case TargetCXXABI::Microsoft:
+ return CreateMicrosoftCXXABI(*this);
+ }
+ llvm_unreachable("Invalid CXXABI type!");
+}
+
+static const LangAS::Map *getAddressSpaceMap(const TargetInfo &T,
+ const LangOptions &LOpts) {
+ if (LOpts.FakeAddressSpaceMap) {
+ // The fake address space map must have a distinct entry for each
+ // language-specific address space.
+ static const unsigned FakeAddrSpaceMap[] = {
+ 1, // opencl_global
+ 2, // opencl_local
+ 3, // opencl_constant
+ 4, // opencl_generic
+ 5, // cuda_device
+ 6, // cuda_constant
+ 7 // cuda_shared
+ };
+ return &FakeAddrSpaceMap;
+ } else {
+ return &T.getAddressSpaceMap();
+ }
+}
+
+static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI,
+ const LangOptions &LangOpts) {
+ switch (LangOpts.getAddressSpaceMapMangling()) {
+ case LangOptions::ASMM_Target:
+ return TI.useAddressSpaceMapMangling();
+ case LangOptions::ASMM_On:
+ return true;
+ case LangOptions::ASMM_Off:
+ return false;
+ }
+ llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything.");
+}
+
+ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM,
+ IdentifierTable &idents, SelectorTable &sels,
+ Builtin::Context &builtins)
+ : FunctionProtoTypes(this_()), TemplateSpecializationTypes(this_()),
+ DependentTemplateSpecializationTypes(this_()),
+ SubstTemplateTemplateParmPacks(this_()),
+ GlobalNestedNameSpecifier(nullptr), Int128Decl(nullptr),
+ UInt128Decl(nullptr), Float128StubDecl(nullptr),
+ BuiltinVaListDecl(nullptr), BuiltinMSVaListDecl(nullptr),
+ ObjCIdDecl(nullptr), ObjCSelDecl(nullptr), ObjCClassDecl(nullptr),
+ ObjCProtocolClassDecl(nullptr), BOOLDecl(nullptr),
+ CFConstantStringTypeDecl(nullptr), ObjCInstanceTypeDecl(nullptr),
+ FILEDecl(nullptr), jmp_bufDecl(nullptr), sigjmp_bufDecl(nullptr),
+ ucontext_tDecl(nullptr), BlockDescriptorType(nullptr),
+ BlockDescriptorExtendedType(nullptr), cudaConfigureCallDecl(nullptr),
+ FirstLocalImport(), LastLocalImport(), ExternCContext(nullptr),
+ MakeIntegerSeqDecl(nullptr), SourceMgr(SM), LangOpts(LOpts),
+ SanitizerBL(new SanitizerBlacklist(LangOpts.SanitizerBlacklistFiles, SM)),
+ AddrSpaceMap(nullptr), Target(nullptr), AuxTarget(nullptr),
+ PrintingPolicy(LOpts), Idents(idents), Selectors(sels),
+ BuiltinInfo(builtins), DeclarationNames(*this), ExternalSource(nullptr),
+ Listener(nullptr), Comments(SM), CommentsLoaded(false),
+ CommentCommandTraits(BumpAlloc, LOpts.CommentOpts), LastSDM(nullptr, 0) {
+ TUDecl = TranslationUnitDecl::Create(*this);
+}
+
+ASTContext::~ASTContext() {
+ ReleaseParentMapEntries();
+
+ // Release the DenseMaps associated with DeclContext objects.
+ // FIXME: Is this the ideal solution?
+ ReleaseDeclContextMaps();
+
+ // Call all of the deallocation functions on all of their targets.
+ for (auto &Pair : Deallocations)
+ (Pair.first)(Pair.second);
+
+ // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
+ // because they can contain DenseMaps.
+ for (llvm::DenseMap<const ObjCContainerDecl*,
+ const ASTRecordLayout*>::iterator
+ I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; )
+ // Increment in loop to prevent using deallocated memory.
+ if (ASTRecordLayout *R = const_cast<ASTRecordLayout*>((I++)->second))
+ R->Destroy(*this);
+
+ for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
+ I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
+ // Increment in loop to prevent using deallocated memory.
+ if (ASTRecordLayout *R = const_cast<ASTRecordLayout*>((I++)->second))
+ R->Destroy(*this);
+ }
+
+ for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(),
+ AEnd = DeclAttrs.end();
+ A != AEnd; ++A)
+ A->second->~AttrVec();
+
+ for (std::pair<const MaterializeTemporaryExpr *, APValue *> &MTVPair :
+ MaterializedTemporaryValues)
+ MTVPair.second->~APValue();
+
+ llvm::DeleteContainerSeconds(MangleNumberingContexts);
+}
+
+void ASTContext::ReleaseParentMapEntries() {
+ if (!PointerParents) return;
+ for (const auto &Entry : *PointerParents) {
+ if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
+ delete Entry.second.get<ast_type_traits::DynTypedNode *>();
+ } else if (Entry.second.is<ParentVector *>()) {
+ delete Entry.second.get<ParentVector *>();
+ }
+ }
+ for (const auto &Entry : *OtherParents) {
+ if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
+ delete Entry.second.get<ast_type_traits::DynTypedNode *>();
+ } else if (Entry.second.is<ParentVector *>()) {
+ delete Entry.second.get<ParentVector *>();
+ }
+ }
+}
+
+void ASTContext::AddDeallocation(void (*Callback)(void*), void *Data) {
+ Deallocations.push_back({Callback, Data});
+}
+
+void
+ASTContext::setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source) {
+ ExternalSource = Source;
+}
+
+void ASTContext::PrintStats() const {
+ llvm::errs() << "\n*** AST Context Stats:\n";
+ llvm::errs() << " " << Types.size() << " types total.\n";
+
+ unsigned counts[] = {
+#define TYPE(Name, Parent) 0,
+#define ABSTRACT_TYPE(Name, Parent)
+#include "clang/AST/TypeNodes.def"
+ 0 // Extra
+ };
+
+ for (unsigned i = 0, e = Types.size(); i != e; ++i) {
+ Type *T = Types[i];
+ counts[(unsigned)T->getTypeClass()]++;
+ }
+
+ unsigned Idx = 0;
+ unsigned TotalBytes = 0;
+#define TYPE(Name, Parent) \
+ if (counts[Idx]) \
+ llvm::errs() << " " << counts[Idx] << " " << #Name \
+ << " types\n"; \
+ TotalBytes += counts[Idx] * sizeof(Name##Type); \
+ ++Idx;
+#define ABSTRACT_TYPE(Name, Parent)
+#include "clang/AST/TypeNodes.def"
+
+ llvm::errs() << "Total bytes = " << TotalBytes << "\n";
+
+ // Implicit special member functions.
+ llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/"
+ << NumImplicitDefaultConstructors
+ << " implicit default constructors created\n";
+ llvm::errs() << NumImplicitCopyConstructorsDeclared << "/"
+ << NumImplicitCopyConstructors
+ << " implicit copy constructors created\n";
+ if (getLangOpts().CPlusPlus)
+ llvm::errs() << NumImplicitMoveConstructorsDeclared << "/"
+ << NumImplicitMoveConstructors
+ << " implicit move constructors created\n";
+ llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/"
+ << NumImplicitCopyAssignmentOperators
+ << " implicit copy assignment operators created\n";
+ if (getLangOpts().CPlusPlus)
+ llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/"
+ << NumImplicitMoveAssignmentOperators
+ << " implicit move assignment operators created\n";
+ llvm::errs() << NumImplicitDestructorsDeclared << "/"
+ << NumImplicitDestructors
+ << " implicit destructors created\n";
+
+ if (ExternalSource) {
+ llvm::errs() << "\n";
+ ExternalSource->PrintStats();
+ }
+
+ BumpAlloc.PrintStats();
+}
+
+void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M,
+ bool NotifyListeners) {
+ if (NotifyListeners)
+ if (auto *Listener = getASTMutationListener())
+ Listener->RedefinedHiddenDefinition(ND, M);
+
+ if (getLangOpts().ModulesLocalVisibility)
+ MergedDefModules[ND].push_back(M);
+ else
+ ND->setHidden(false);
+}
+
+void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) {
+ auto It = MergedDefModules.find(ND);
+ if (It == MergedDefModules.end())
+ return;
+
+ auto &Merged = It->second;
+ llvm::DenseSet<Module*> Found;
+ for (Module *&M : Merged)
+ if (!Found.insert(M).second)
+ M = nullptr;
+ Merged.erase(std::remove(Merged.begin(), Merged.end(), nullptr), Merged.end());
+}
+
+ExternCContextDecl *ASTContext::getExternCContextDecl() const {
+ if (!ExternCContext)
+ ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl());
+
+ return ExternCContext;
+}
+
+BuiltinTemplateDecl *
+ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK,
+ const IdentifierInfo *II) const {
+ auto *BuiltinTemplate = BuiltinTemplateDecl::Create(*this, TUDecl, II, BTK);
+ BuiltinTemplate->setImplicit();
+ TUDecl->addDecl(BuiltinTemplate);
+
+ return BuiltinTemplate;
+}
+
+BuiltinTemplateDecl *
+ASTContext::getMakeIntegerSeqDecl() const {
+ if (!MakeIntegerSeqDecl)
+ MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq,
+ getMakeIntegerSeqName());
+ return MakeIntegerSeqDecl;
+}
+
+RecordDecl *ASTContext::buildImplicitRecord(StringRef Name,
+ RecordDecl::TagKind TK) const {
+ SourceLocation Loc;
+ RecordDecl *NewDecl;
+ if (getLangOpts().CPlusPlus)
+ NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc,
+ Loc, &Idents.get(Name));
+ else
+ NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc,
+ &Idents.get(Name));
+ NewDecl->setImplicit();
+ NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit(
+ const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default));
+ return NewDecl;
+}
+
+TypedefDecl *ASTContext::buildImplicitTypedef(QualType T,
+ StringRef Name) const {
+ TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T);
+ TypedefDecl *NewDecl = TypedefDecl::Create(
+ const_cast<ASTContext &>(*this), getTranslationUnitDecl(),
+ SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo);
+ NewDecl->setImplicit();
+ return NewDecl;
+}
+
+TypedefDecl *ASTContext::getInt128Decl() const {
+ if (!Int128Decl)
+ Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t");
+ return Int128Decl;
+}
+
+TypedefDecl *ASTContext::getUInt128Decl() const {
+ if (!UInt128Decl)
+ UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t");
+ return UInt128Decl;
+}
+
+TypeDecl *ASTContext::getFloat128StubType() const {
+ assert(LangOpts.CPlusPlus && "should only be called for c++");
+ if (!Float128StubDecl)
+ Float128StubDecl = buildImplicitRecord("__float128");
+
+ return Float128StubDecl;
+}
+
+void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) {
+ BuiltinType *Ty = new (*this, TypeAlignment) BuiltinType(K);
+ R = CanQualType::CreateUnsafe(QualType(Ty, 0));
+ Types.push_back(Ty);
+}
+
+void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
+ const TargetInfo *AuxTarget) {
+ assert((!this->Target || this->Target == &Target) &&
+ "Incorrect target reinitialization");
+ assert(VoidTy.isNull() && "Context reinitialized?");
+
+ this->Target = &Target;
+ this->AuxTarget = AuxTarget;
+
+ ABI.reset(createCXXABI(Target));
+ AddrSpaceMap = getAddressSpaceMap(Target, LangOpts);
+ AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts);
+
+ // C99 6.2.5p19.
+ InitBuiltinType(VoidTy, BuiltinType::Void);
+
+ // C99 6.2.5p2.
+ InitBuiltinType(BoolTy, BuiltinType::Bool);
+ // C99 6.2.5p3.
+ if (LangOpts.CharIsSigned)
+ InitBuiltinType(CharTy, BuiltinType::Char_S);
+ else
+ InitBuiltinType(CharTy, BuiltinType::Char_U);
+ // C99 6.2.5p4.
+ InitBuiltinType(SignedCharTy, BuiltinType::SChar);
+ InitBuiltinType(ShortTy, BuiltinType::Short);
+ InitBuiltinType(IntTy, BuiltinType::Int);
+ InitBuiltinType(LongTy, BuiltinType::Long);
+ InitBuiltinType(LongLongTy, BuiltinType::LongLong);
+
+ // C99 6.2.5p6.
+ InitBuiltinType(UnsignedCharTy, BuiltinType::UChar);
+ InitBuiltinType(UnsignedShortTy, BuiltinType::UShort);
+ InitBuiltinType(UnsignedIntTy, BuiltinType::UInt);
+ InitBuiltinType(UnsignedLongTy, BuiltinType::ULong);
+ InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong);
+
+ // C99 6.2.5p10.
+ InitBuiltinType(FloatTy, BuiltinType::Float);
+ InitBuiltinType(DoubleTy, BuiltinType::Double);
+ InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble);
+
+ // GNU extension, 128-bit integers.
+ InitBuiltinType(Int128Ty, BuiltinType::Int128);
+ InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128);
+
+ // C++ 3.9.1p5
+ if (TargetInfo::isTypeSigned(Target.getWCharType()))
+ InitBuiltinType(WCharTy, BuiltinType::WChar_S);
+ else // -fshort-wchar makes wchar_t be unsigned.
+ InitBuiltinType(WCharTy, BuiltinType::WChar_U);
+ if (LangOpts.CPlusPlus && LangOpts.WChar)
+ WideCharTy = WCharTy;
+ else {
+ // C99 (or C++ using -fno-wchar).
+ WideCharTy = getFromTargetType(Target.getWCharType());
+ }
+
+ WIntTy = getFromTargetType(Target.getWIntType());
+
+ if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
+ InitBuiltinType(Char16Ty, BuiltinType::Char16);
+ else // C99
+ Char16Ty = getFromTargetType(Target.getChar16Type());
+
+ if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
+ InitBuiltinType(Char32Ty, BuiltinType::Char32);
+ else // C99
+ Char32Ty = getFromTargetType(Target.getChar32Type());
+
+ // Placeholder type for type-dependent expressions whose type is
+ // completely unknown. No code should ever check a type against
+ // DependentTy and users should never see it; however, it is here to
+ // help diagnose failures to properly check for type-dependent
+ // expressions.
+ InitBuiltinType(DependentTy, BuiltinType::Dependent);
+
+ // Placeholder type for functions.
+ InitBuiltinType(OverloadTy, BuiltinType::Overload);
+
+ // Placeholder type for bound members.
+ InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember);
+
+ // Placeholder type for pseudo-objects.
+ InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject);
+
+ // "any" type; useful for debugger-like clients.
+ InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny);
+
+ // Placeholder type for unbridged ARC casts.
+ InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast);
+
+ // Placeholder type for builtin functions.
+ InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn);
+
+ // Placeholder type for OMP array sections.
+ if (LangOpts.OpenMP)
+ InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection);
+
+ // C99 6.2.5p11.
+ FloatComplexTy = getComplexType(FloatTy);
+ DoubleComplexTy = getComplexType(DoubleTy);
+ LongDoubleComplexTy = getComplexType(LongDoubleTy);
+
+ // Builtin types for 'id', 'Class', and 'SEL'.
+ InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId);
+ InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass);
+ InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel);
+
+ if (LangOpts.OpenCL) {
+ InitBuiltinType(OCLImage1dTy, BuiltinType::OCLImage1d);
+ InitBuiltinType(OCLImage1dArrayTy, BuiltinType::OCLImage1dArray);
+ InitBuiltinType(OCLImage1dBufferTy, BuiltinType::OCLImage1dBuffer);
+ InitBuiltinType(OCLImage2dTy, BuiltinType::OCLImage2d);
+ InitBuiltinType(OCLImage2dArrayTy, BuiltinType::OCLImage2dArray);
+ InitBuiltinType(OCLImage2dDepthTy, BuiltinType::OCLImage2dDepth);
+ InitBuiltinType(OCLImage2dArrayDepthTy, BuiltinType::OCLImage2dArrayDepth);
+ InitBuiltinType(OCLImage2dMSAATy, BuiltinType::OCLImage2dMSAA);
+ InitBuiltinType(OCLImage2dArrayMSAATy, BuiltinType::OCLImage2dArrayMSAA);
+ InitBuiltinType(OCLImage2dMSAADepthTy, BuiltinType::OCLImage2dMSAADepth);
+ InitBuiltinType(OCLImage2dArrayMSAADepthTy,
+ BuiltinType::OCLImage2dArrayMSAADepth);
+ InitBuiltinType(OCLImage3dTy, BuiltinType::OCLImage3d);
+
+ InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler);
+ InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent);
+ InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent);
+ InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue);
+ InitBuiltinType(OCLNDRangeTy, BuiltinType::OCLNDRange);
+ InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID);
+ }
+
+ // Builtin type for __objc_yes and __objc_no
+ ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ?
+ SignedCharTy : BoolTy);
+
+ ObjCConstantStringType = QualType();
+
+ ObjCSuperType = QualType();
+
+ // void * type
+ VoidPtrTy = getPointerType(VoidTy);
+
+ // nullptr type (C++0x 2.14.7)
+ InitBuiltinType(NullPtrTy, BuiltinType::NullPtr);
+
+ // half type (OpenCL 6.1.1.1) / ARM NEON __fp16
+ InitBuiltinType(HalfTy, BuiltinType::Half);
+
+ // Builtin type used to help define __builtin_va_list.
+ VaListTagDecl = nullptr;
+}
+
+DiagnosticsEngine &ASTContext::getDiagnostics() const {
+ return SourceMgr.getDiagnostics();
+}
+
+AttrVec& ASTContext::getDeclAttrs(const Decl *D) {
+ AttrVec *&Result = DeclAttrs[D];
+ if (!Result) {
+ void *Mem = Allocate(sizeof(AttrVec));
+ Result = new (Mem) AttrVec;
+ }
+
+ return *Result;
+}
+
+/// \brief Erase the attributes corresponding to the given declaration.
+void ASTContext::eraseDeclAttrs(const Decl *D) {
+ llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D);
+ if (Pos != DeclAttrs.end()) {
+ Pos->second->~AttrVec();
+ DeclAttrs.erase(Pos);
+ }
+}
+
+// FIXME: Remove ?
+MemberSpecializationInfo *
+ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) {
+ assert(Var->isStaticDataMember() && "Not a static data member");
+ return getTemplateOrSpecializationInfo(Var)
+ .dyn_cast<MemberSpecializationInfo *>();
+}
+
+ASTContext::TemplateOrSpecializationInfo
+ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) {
+ llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos =
+ TemplateOrInstantiation.find(Var);
+ if (Pos == TemplateOrInstantiation.end())
+ return TemplateOrSpecializationInfo();
+
+ return Pos->second;
+}
+
+void
+ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl,
+ TemplateSpecializationKind TSK,
+ SourceLocation PointOfInstantiation) {
+ assert(Inst->isStaticDataMember() && "Not a static data member");
+ assert(Tmpl->isStaticDataMember() && "Not a static data member");
+ setTemplateOrSpecializationInfo(Inst, new (*this) MemberSpecializationInfo(
+ Tmpl, TSK, PointOfInstantiation));
+}
+
+void
+ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst,
+ TemplateOrSpecializationInfo TSI) {
+ assert(!TemplateOrInstantiation[Inst] &&
+ "Already noted what the variable was instantiated from");
+ TemplateOrInstantiation[Inst] = TSI;
+}
+
+FunctionDecl *ASTContext::getClassScopeSpecializationPattern(
+ const FunctionDecl *FD){
+ assert(FD && "Specialization is 0");
+ llvm::DenseMap<const FunctionDecl*, FunctionDecl *>::const_iterator Pos
+ = ClassScopeSpecializationPattern.find(FD);
+ if (Pos == ClassScopeSpecializationPattern.end())
+ return nullptr;
+
+ return Pos->second;
+}
+
+void ASTContext::setClassScopeSpecializationPattern(FunctionDecl *FD,
+ FunctionDecl *Pattern) {
+ assert(FD && "Specialization is 0");
+ assert(Pattern && "Class scope specialization pattern is 0");
+ ClassScopeSpecializationPattern[FD] = Pattern;
+}
+
+NamedDecl *
+ASTContext::getInstantiatedFromUsingDecl(UsingDecl *UUD) {
+ llvm::DenseMap<UsingDecl *, NamedDecl *>::const_iterator Pos
+ = InstantiatedFromUsingDecl.find(UUD);
+ if (Pos == InstantiatedFromUsingDecl.end())
+ return nullptr;
+
+ return Pos->second;
+}
+
+void
+ASTContext::setInstantiatedFromUsingDecl(UsingDecl *Inst, NamedDecl *Pattern) {
+ assert((isa<UsingDecl>(Pattern) ||
+ isa<UnresolvedUsingValueDecl>(Pattern) ||
+ isa<UnresolvedUsingTypenameDecl>(Pattern)) &&
+ "pattern decl is not a using decl");
+ assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists");
+ InstantiatedFromUsingDecl[Inst] = Pattern;
+}
+
+UsingShadowDecl *
+ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) {
+ llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos
+ = InstantiatedFromUsingShadowDecl.find(Inst);
+ if (Pos == InstantiatedFromUsingShadowDecl.end())
+ return nullptr;
+
+ return Pos->second;
+}
+
+void
+ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst,
+ UsingShadowDecl *Pattern) {
+ assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists");
+ InstantiatedFromUsingShadowDecl[Inst] = Pattern;
+}
+
+FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) {
+ llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos
+ = InstantiatedFromUnnamedFieldDecl.find(Field);
+ if (Pos == InstantiatedFromUnnamedFieldDecl.end())
+ return nullptr;
+
+ return Pos->second;
+}
+
+void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst,
+ FieldDecl *Tmpl) {
+ assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed");
+ assert(!Tmpl->getDeclName() && "Template field decl is not unnamed");
+ assert(!InstantiatedFromUnnamedFieldDecl[Inst] &&
+ "Already noted what unnamed field was instantiated from");
+
+ InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl;
+}
+
+ASTContext::overridden_cxx_method_iterator
+ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const {
+ llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos
+ = OverriddenMethods.find(Method->getCanonicalDecl());
+ if (Pos == OverriddenMethods.end())
+ return nullptr;
+
+ return Pos->second.begin();
+}
+
+ASTContext::overridden_cxx_method_iterator
+ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const {
+ llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos
+ = OverriddenMethods.find(Method->getCanonicalDecl());
+ if (Pos == OverriddenMethods.end())
+ return nullptr;
+
+ return Pos->second.end();
+}
+
+unsigned
+ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const {
+ llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos
+ = OverriddenMethods.find(Method->getCanonicalDecl());
+ if (Pos == OverriddenMethods.end())
+ return 0;
+
+ return Pos->second.size();
+}
+
+void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method,
+ const CXXMethodDecl *Overridden) {
+ assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl());
+ OverriddenMethods[Method].push_back(Overridden);
+}
+
+void ASTContext::getOverriddenMethods(
+ const NamedDecl *D,
+ SmallVectorImpl<const NamedDecl *> &Overridden) const {
+ assert(D);
+
+ if (const CXXMethodDecl *CXXMethod = dyn_cast<CXXMethodDecl>(D)) {
+ Overridden.append(overridden_methods_begin(CXXMethod),
+ overridden_methods_end(CXXMethod));
+ return;
+ }
+
+ const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(D);
+ if (!Method)
+ return;
+
+ SmallVector<const ObjCMethodDecl *, 8> OverDecls;
+ Method->getOverriddenMethods(OverDecls);
+ Overridden.append(OverDecls.begin(), OverDecls.end());
+}
+
+void ASTContext::addedLocalImportDecl(ImportDecl *Import) {
+ assert(!Import->NextLocalImport && "Import declaration already in the chain");
+ assert(!Import->isFromASTFile() && "Non-local import declaration");
+ if (!FirstLocalImport) {
+ FirstLocalImport = Import;
+ LastLocalImport = Import;
+ return;
+ }
+
+ LastLocalImport->NextLocalImport = Import;
+ LastLocalImport = Import;
+}
+
+//===----------------------------------------------------------------------===//
+// Type Sizing and Analysis
+//===----------------------------------------------------------------------===//
+
+/// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified
+/// scalar floating point type.
+const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
+ const BuiltinType *BT = T->getAs<BuiltinType>();
+ assert(BT && "Not a floating point type!");
+ switch (BT->getKind()) {
+ default: llvm_unreachable("Not a floating point type!");
+ case BuiltinType::Half: return Target->getHalfFormat();
+ case BuiltinType::Float: return Target->getFloatFormat();
+ case BuiltinType::Double: return Target->getDoubleFormat();
+ case BuiltinType::LongDouble: return Target->getLongDoubleFormat();
+ }
+}
+
+CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
+ unsigned Align = Target->getCharWidth();
+
+ bool UseAlignAttrOnly = false;
+ if (unsigned AlignFromAttr = D->getMaxAlignment()) {
+ Align = AlignFromAttr;
+
+ // __attribute__((aligned)) can increase or decrease alignment
+ // *except* on a struct or struct member, where it only increases
+ // alignment unless 'packed' is also specified.
+ //
+ // It is an error for alignas to decrease alignment, so we can
+ // ignore that possibility; Sema should diagnose it.
+ if (isa<FieldDecl>(D)) {
+ UseAlignAttrOnly = D->hasAttr<PackedAttr>() ||
+ cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
+ } else {
+ UseAlignAttrOnly = true;
+ }
+ }
+ else if (isa<FieldDecl>(D))
+ UseAlignAttrOnly =
+ D->hasAttr<PackedAttr>() ||
+ cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
+
+ // If we're using the align attribute only, just ignore everything
+ // else about the declaration and its type.
+ if (UseAlignAttrOnly) {
+ // do nothing
+
+ } else if (const ValueDecl *VD = dyn_cast<ValueDecl>(D)) {
+ QualType T = VD->getType();
+ if (const ReferenceType *RT = T->getAs<ReferenceType>()) {
+ if (ForAlignof)
+ T = RT->getPointeeType();
+ else
+ T = getPointerType(RT->getPointeeType());
+ }
+ QualType BaseT = getBaseElementType(T);
+ if (!BaseT->isIncompleteType() && !T->isFunctionType()) {
+ // Adjust alignments of declarations with array type by the
+ // large-array alignment on the target.
+ if (const ArrayType *arrayType = getAsArrayType(T)) {
+ unsigned MinWidth = Target->getLargeArrayMinWidth();
+ if (!ForAlignof && MinWidth) {
+ if (isa<VariableArrayType>(arrayType))
+ Align = std::max(Align, Target->getLargeArrayAlign());
+ else if (isa<ConstantArrayType>(arrayType) &&
+ MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType)))
+ Align = std::max(Align, Target->getLargeArrayAlign());
+ }
+ }
+ Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr()));
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (VD->hasGlobalStorage() && !ForAlignof)
+ Align = std::max(Align, getTargetInfo().getMinGlobalAlign());
+ }
+ }
+
+ // Fields can be subject to extra alignment constraints, like if
+ // the field is packed, the struct is packed, or the struct has a
+ // a max-field-alignment constraint (#pragma pack). So calculate
+ // the actual alignment of the field within the struct, and then
+ // (as we're expected to) constrain that by the alignment of the type.
+ if (const FieldDecl *Field = dyn_cast<FieldDecl>(VD)) {
+ const RecordDecl *Parent = Field->getParent();
+ // We can only produce a sensible answer if the record is valid.
+ if (!Parent->isInvalidDecl()) {
+ const ASTRecordLayout &Layout = getASTRecordLayout(Parent);
+
+ // Start with the record's overall alignment.
+ unsigned FieldAlign = toBits(Layout.getAlignment());
+
+ // Use the GCD of that and the offset within the record.
+ uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex());
+ if (Offset > 0) {
+ // Alignment is always a power of 2, so the GCD will be a power of 2,
+ // which means we get to do this crazy thing instead of Euclid's.
+ uint64_t LowBitOfOffset = Offset & (~Offset + 1);
+ if (LowBitOfOffset < FieldAlign)
+ FieldAlign = static_cast<unsigned>(LowBitOfOffset);
+ }
+
+ Align = std::min(Align, FieldAlign);
+ }
+ }
+ }
+
+ return toCharUnitsFromBits(Align);
+}
+
+// getTypeInfoDataSizeInChars - Return the size of a type, in
+// chars. If the type is a record, its data size is returned. This is
+// the size of the memcpy that's performed when assigning this type
+// using a trivial copy/move assignment operator.
+std::pair<CharUnits, CharUnits>
+ASTContext::getTypeInfoDataSizeInChars(QualType T) const {
+ std::pair<CharUnits, CharUnits> sizeAndAlign = getTypeInfoInChars(T);
+
+ // In C++, objects can sometimes be allocated into the tail padding
+ // of a base-class subobject. We decide whether that's possible
+ // during class layout, so here we can just trust the layout results.
+ if (getLangOpts().CPlusPlus) {
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl());
+ sizeAndAlign.first = layout.getDataSize();
+ }
+ }
+
+ return sizeAndAlign;
+}
+
+/// getConstantArrayInfoInChars - Performing the computation in CharUnits
+/// instead of in bits prevents overflowing the uint64_t for some large arrays.
+std::pair<CharUnits, CharUnits>
+static getConstantArrayInfoInChars(const ASTContext &Context,
+ const ConstantArrayType *CAT) {
+ std::pair<CharUnits, CharUnits> EltInfo =
+ Context.getTypeInfoInChars(CAT->getElementType());
+ uint64_t Size = CAT->getSize().getZExtValue();
+ assert((Size == 0 || static_cast<uint64_t>(EltInfo.first.getQuantity()) <=
+ (uint64_t)(-1)/Size) &&
+ "Overflow in array type char size evaluation");
+ uint64_t Width = EltInfo.first.getQuantity() * Size;
+ unsigned Align = EltInfo.second.getQuantity();
+ if (!Context.getTargetInfo().getCXXABI().isMicrosoft() ||
+ Context.getTargetInfo().getPointerWidth(0) == 64)
+ Width = llvm::RoundUpToAlignment(Width, Align);
+ return std::make_pair(CharUnits::fromQuantity(Width),
+ CharUnits::fromQuantity(Align));
+}
+
+std::pair<CharUnits, CharUnits>
+ASTContext::getTypeInfoInChars(const Type *T) const {
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(T))
+ return getConstantArrayInfoInChars(*this, CAT);
+ TypeInfo Info = getTypeInfo(T);
+ return std::make_pair(toCharUnitsFromBits(Info.Width),
+ toCharUnitsFromBits(Info.Align));
+}
+
+std::pair<CharUnits, CharUnits>
+ASTContext::getTypeInfoInChars(QualType T) const {
+ return getTypeInfoInChars(T.getTypePtr());
+}
+
+bool ASTContext::isAlignmentRequired(const Type *T) const {
+ return getTypeInfo(T).AlignIsRequired;
+}
+
+bool ASTContext::isAlignmentRequired(QualType T) const {
+ return isAlignmentRequired(T.getTypePtr());
+}
+
+TypeInfo ASTContext::getTypeInfo(const Type *T) const {
+ TypeInfoMap::iterator I = MemoizedTypeInfo.find(T);
+ if (I != MemoizedTypeInfo.end())
+ return I->second;
+
+ // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup.
+ TypeInfo TI = getTypeInfoImpl(T);
+ MemoizedTypeInfo[T] = TI;
+ return TI;
+}
+
+/// getTypeInfoImpl - Return the size of the specified type, in bits. This
+/// method does not work on incomplete types.
+///
+/// FIXME: Pointers into different addr spaces could have different sizes and
+/// alignment requirements: getPointerInfo should take an AddrSpace, this
+/// should take a QualType, &c.
+TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
+ uint64_t Width = 0;
+ unsigned Align = 8;
+ bool AlignIsRequired = false;
+ switch (T->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \
+ case Type::Class: \
+ assert(!T->isDependentType() && "should not see dependent types here"); \
+ return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr());
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("Should not see dependent types");
+
+ case Type::FunctionNoProto:
+ case Type::FunctionProto:
+ // GCC extension: alignof(function) = 32 bits
+ Width = 0;
+ Align = 32;
+ break;
+
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ Width = 0;
+ Align = getTypeAlign(cast<ArrayType>(T)->getElementType());
+ break;
+
+ case Type::ConstantArray: {
+ const ConstantArrayType *CAT = cast<ConstantArrayType>(T);
+
+ TypeInfo EltInfo = getTypeInfo(CAT->getElementType());
+ uint64_t Size = CAT->getSize().getZExtValue();
+ assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) &&
+ "Overflow in array type bit size evaluation");
+ Width = EltInfo.Width * Size;
+ Align = EltInfo.Align;
+ if (!getTargetInfo().getCXXABI().isMicrosoft() ||
+ getTargetInfo().getPointerWidth(0) == 64)
+ Width = llvm::RoundUpToAlignment(Width, Align);
+ break;
+ }
+ case Type::ExtVector:
+ case Type::Vector: {
+ const VectorType *VT = cast<VectorType>(T);
+ TypeInfo EltInfo = getTypeInfo(VT->getElementType());
+ Width = EltInfo.Width * VT->getNumElements();
+ Align = Width;
+ // If the alignment is not a power of 2, round up to the next power of 2.
+ // This happens for non-power-of-2 length vectors.
+ if (Align & (Align-1)) {
+ Align = llvm::NextPowerOf2(Align);
+ Width = llvm::RoundUpToAlignment(Width, Align);
+ }
+ // Adjust the alignment based on the target max.
+ uint64_t TargetVectorAlign = Target->getMaxVectorAlign();
+ if (TargetVectorAlign && TargetVectorAlign < Align)
+ Align = TargetVectorAlign;
+ break;
+ }
+
+ case Type::Builtin:
+ switch (cast<BuiltinType>(T)->getKind()) {
+ default: llvm_unreachable("Unknown builtin type!");
+ case BuiltinType::Void:
+ // GCC extension: alignof(void) = 8 bits.
+ Width = 0;
+ Align = 8;
+ break;
+
+ case BuiltinType::Bool:
+ Width = Target->getBoolWidth();
+ Align = Target->getBoolAlign();
+ break;
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ case BuiltinType::SChar:
+ Width = Target->getCharWidth();
+ Align = Target->getCharAlign();
+ break;
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ Width = Target->getWCharWidth();
+ Align = Target->getWCharAlign();
+ break;
+ case BuiltinType::Char16:
+ Width = Target->getChar16Width();
+ Align = Target->getChar16Align();
+ break;
+ case BuiltinType::Char32:
+ Width = Target->getChar32Width();
+ Align = Target->getChar32Align();
+ break;
+ case BuiltinType::UShort:
+ case BuiltinType::Short:
+ Width = Target->getShortWidth();
+ Align = Target->getShortAlign();
+ break;
+ case BuiltinType::UInt:
+ case BuiltinType::Int:
+ Width = Target->getIntWidth();
+ Align = Target->getIntAlign();
+ break;
+ case BuiltinType::ULong:
+ case BuiltinType::Long:
+ Width = Target->getLongWidth();
+ Align = Target->getLongAlign();
+ break;
+ case BuiltinType::ULongLong:
+ case BuiltinType::LongLong:
+ Width = Target->getLongLongWidth();
+ Align = Target->getLongLongAlign();
+ break;
+ case BuiltinType::Int128:
+ case BuiltinType::UInt128:
+ Width = 128;
+ Align = 128; // int128_t is 128-bit aligned on all targets.
+ break;
+ case BuiltinType::Half:
+ Width = Target->getHalfWidth();
+ Align = Target->getHalfAlign();
+ break;
+ case BuiltinType::Float:
+ Width = Target->getFloatWidth();
+ Align = Target->getFloatAlign();
+ break;
+ case BuiltinType::Double:
+ Width = Target->getDoubleWidth();
+ Align = Target->getDoubleAlign();
+ break;
+ case BuiltinType::LongDouble:
+ Width = Target->getLongDoubleWidth();
+ Align = Target->getLongDoubleAlign();
+ break;
+ case BuiltinType::NullPtr:
+ Width = Target->getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t)
+ Align = Target->getPointerAlign(0); // == sizeof(void*)
+ break;
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCSel:
+ Width = Target->getPointerWidth(0);
+ Align = Target->getPointerAlign(0);
+ break;
+ case BuiltinType::OCLSampler:
+ // Samplers are modeled as integers.
+ Width = Target->getIntWidth();
+ Align = Target->getIntAlign();
+ break;
+ case BuiltinType::OCLEvent:
+ case BuiltinType::OCLClkEvent:
+ case BuiltinType::OCLQueue:
+ case BuiltinType::OCLNDRange:
+ case BuiltinType::OCLReserveID:
+ case BuiltinType::OCLImage1d:
+ case BuiltinType::OCLImage1dArray:
+ case BuiltinType::OCLImage1dBuffer:
+ case BuiltinType::OCLImage2d:
+ case BuiltinType::OCLImage2dArray:
+ case BuiltinType::OCLImage2dDepth:
+ case BuiltinType::OCLImage2dArrayDepth:
+ case BuiltinType::OCLImage2dMSAA:
+ case BuiltinType::OCLImage2dArrayMSAA:
+ case BuiltinType::OCLImage2dMSAADepth:
+ case BuiltinType::OCLImage2dArrayMSAADepth:
+ case BuiltinType::OCLImage3d:
+ // Currently these types are pointers to opaque types.
+ Width = Target->getPointerWidth(0);
+ Align = Target->getPointerAlign(0);
+ break;
+ }
+ break;
+ case Type::ObjCObjectPointer:
+ Width = Target->getPointerWidth(0);
+ Align = Target->getPointerAlign(0);
+ break;
+ case Type::BlockPointer: {
+ unsigned AS = getTargetAddressSpace(
+ cast<BlockPointerType>(T)->getPointeeType());
+ Width = Target->getPointerWidth(AS);
+ Align = Target->getPointerAlign(AS);
+ break;
+ }
+ case Type::LValueReference:
+ case Type::RValueReference: {
+ // alignof and sizeof should never enter this code path here, so we go
+ // the pointer route.
+ unsigned AS = getTargetAddressSpace(
+ cast<ReferenceType>(T)->getPointeeType());
+ Width = Target->getPointerWidth(AS);
+ Align = Target->getPointerAlign(AS);
+ break;
+ }
+ case Type::Pointer: {
+ unsigned AS = getTargetAddressSpace(cast<PointerType>(T)->getPointeeType());
+ Width = Target->getPointerWidth(AS);
+ Align = Target->getPointerAlign(AS);
+ break;
+ }
+ case Type::MemberPointer: {
+ const MemberPointerType *MPT = cast<MemberPointerType>(T);
+ std::tie(Width, Align) = ABI->getMemberPointerWidthAndAlign(MPT);
+ break;
+ }
+ case Type::Complex: {
+ // Complex types have the same alignment as their elements, but twice the
+ // size.
+ TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType());
+ Width = EltInfo.Width * 2;
+ Align = EltInfo.Align;
+ break;
+ }
+ case Type::ObjCObject:
+ return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr());
+ case Type::Adjusted:
+ case Type::Decayed:
+ return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr());
+ case Type::ObjCInterface: {
+ const ObjCInterfaceType *ObjCI = cast<ObjCInterfaceType>(T);
+ const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
+ Width = toBits(Layout.getSize());
+ Align = toBits(Layout.getAlignment());
+ break;
+ }
+ case Type::Record:
+ case Type::Enum: {
+ const TagType *TT = cast<TagType>(T);
+
+ if (TT->getDecl()->isInvalidDecl()) {
+ Width = 8;
+ Align = 8;
+ break;
+ }
+
+ if (const EnumType *ET = dyn_cast<EnumType>(TT)) {
+ const EnumDecl *ED = ET->getDecl();
+ TypeInfo Info =
+ getTypeInfo(ED->getIntegerType()->getUnqualifiedDesugaredType());
+ if (unsigned AttrAlign = ED->getMaxAlignment()) {
+ Info.Align = AttrAlign;
+ Info.AlignIsRequired = true;
+ }
+ return Info;
+ }
+
+ const RecordType *RT = cast<RecordType>(TT);
+ const RecordDecl *RD = RT->getDecl();
+ const ASTRecordLayout &Layout = getASTRecordLayout(RD);
+ Width = toBits(Layout.getSize());
+ Align = toBits(Layout.getAlignment());
+ AlignIsRequired = RD->hasAttr<AlignedAttr>();
+ break;
+ }
+
+ case Type::SubstTemplateTypeParm:
+ return getTypeInfo(cast<SubstTemplateTypeParmType>(T)->
+ getReplacementType().getTypePtr());
+
+ case Type::Auto: {
+ const AutoType *A = cast<AutoType>(T);
+ assert(!A->getDeducedType().isNull() &&
+ "cannot request the size of an undeduced or dependent auto type");
+ return getTypeInfo(A->getDeducedType().getTypePtr());
+ }
+
+ case Type::Paren:
+ return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr());
+
+ case Type::Typedef: {
+ const TypedefNameDecl *Typedef = cast<TypedefType>(T)->getDecl();
+ TypeInfo Info = getTypeInfo(Typedef->getUnderlyingType().getTypePtr());
+ // If the typedef has an aligned attribute on it, it overrides any computed
+ // alignment we have. This violates the GCC documentation (which says that
+ // attribute(aligned) can only round up) but matches its implementation.
+ if (unsigned AttrAlign = Typedef->getMaxAlignment()) {
+ Align = AttrAlign;
+ AlignIsRequired = true;
+ } else {
+ Align = Info.Align;
+ AlignIsRequired = Info.AlignIsRequired;
+ }
+ Width = Info.Width;
+ break;
+ }
+
+ case Type::Elaborated:
+ return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr());
+
+ case Type::Attributed:
+ return getTypeInfo(
+ cast<AttributedType>(T)->getEquivalentType().getTypePtr());
+
+ case Type::Atomic: {
+ // Start with the base type information.
+ TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType());
+ Width = Info.Width;
+ Align = Info.Align;
+
+ // If the size of the type doesn't exceed the platform's max
+ // atomic promotion width, make the size and alignment more
+ // favorable to atomic operations:
+ if (Width != 0 && Width <= Target->getMaxAtomicPromoteWidth()) {
+ // Round the size up to a power of 2.
+ if (!llvm::isPowerOf2_64(Width))
+ Width = llvm::NextPowerOf2(Width);
+
+ // Set the alignment equal to the size.
+ Align = static_cast<unsigned>(Width);
+ }
+ }
+
+ }
+
+ assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2");
+ return TypeInfo(Width, Align, AlignIsRequired);
+}
+
+unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const {
+ unsigned SimdAlign = getTargetInfo().getSimdDefaultAlign();
+ // Target ppc64 with QPX: simd default alignment for pointer to double is 32.
+ if ((getTargetInfo().getTriple().getArch() == llvm::Triple::ppc64 ||
+ getTargetInfo().getTriple().getArch() == llvm::Triple::ppc64le) &&
+ getTargetInfo().getABI() == "elfv1-qpx" &&
+ T->isSpecificBuiltinType(BuiltinType::Double))
+ SimdAlign = 256;
+ return SimdAlign;
+}
+
+/// toCharUnitsFromBits - Convert a size in bits to a size in characters.
+CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const {
+ return CharUnits::fromQuantity(BitSize / getCharWidth());
+}
+
+/// toBits - Convert a size in characters to a size in characters.
+int64_t ASTContext::toBits(CharUnits CharSize) const {
+ return CharSize.getQuantity() * getCharWidth();
+}
+
+/// getTypeSizeInChars - Return the size of the specified type, in characters.
+/// This method does not work on incomplete types.
+CharUnits ASTContext::getTypeSizeInChars(QualType T) const {
+ return getTypeInfoInChars(T).first;
+}
+CharUnits ASTContext::getTypeSizeInChars(const Type *T) const {
+ return getTypeInfoInChars(T).first;
+}
+
+/// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
+/// characters. This method does not work on incomplete types.
+CharUnits ASTContext::getTypeAlignInChars(QualType T) const {
+ return toCharUnitsFromBits(getTypeAlign(T));
+}
+CharUnits ASTContext::getTypeAlignInChars(const Type *T) const {
+ return toCharUnitsFromBits(getTypeAlign(T));
+}
+
+/// getPreferredTypeAlign - Return the "preferred" alignment of the specified
+/// type for the current target in bits. This can be different than the ABI
+/// alignment in cases where it is beneficial for performance to overalign
+/// a data type.
+unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
+ TypeInfo TI = getTypeInfo(T);
+ unsigned ABIAlign = TI.Align;
+
+ T = T->getBaseElementTypeUnsafe();
+
+ // The preferred alignment of member pointers is that of a pointer.
+ if (T->isMemberPointerType())
+ return getPreferredTypeAlign(getPointerDiffType().getTypePtr());
+
+ if (Target->getTriple().getArch() == llvm::Triple::xcore)
+ return ABIAlign; // Never overalign on XCore.
+
+ // Double and long long should be naturally aligned if possible.
+ if (const ComplexType *CT = T->getAs<ComplexType>())
+ T = CT->getElementType().getTypePtr();
+ if (const EnumType *ET = T->getAs<EnumType>())
+ T = ET->getDecl()->getIntegerType().getTypePtr();
+ if (T->isSpecificBuiltinType(BuiltinType::Double) ||
+ T->isSpecificBuiltinType(BuiltinType::LongLong) ||
+ T->isSpecificBuiltinType(BuiltinType::ULongLong))
+ // Don't increase the alignment if an alignment attribute was specified on a
+ // typedef declaration.
+ if (!TI.AlignIsRequired)
+ return std::max(ABIAlign, (unsigned)getTypeSize(T));
+
+ return ABIAlign;
+}
+
+/// getTargetDefaultAlignForAttributeAligned - Return the default alignment
+/// for __attribute__((aligned)) on this target, to be used if no alignment
+/// value is specified.
+unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const {
+ return getTargetInfo().getDefaultAlignForAttributeAligned();
+}
+
+/// getAlignOfGlobalVar - Return the alignment in bits that should be given
+/// to a global variable of the specified type.
+unsigned ASTContext::getAlignOfGlobalVar(QualType T) const {
+ return std::max(getTypeAlign(T), getTargetInfo().getMinGlobalAlign());
+}
+
+/// getAlignOfGlobalVarInChars - Return the alignment in characters that
+/// should be given to a global variable of the specified type.
+CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T) const {
+ return toCharUnitsFromBits(getAlignOfGlobalVar(T));
+}
+
+CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const {
+ CharUnits Offset = CharUnits::Zero();
+ const ASTRecordLayout *Layout = &getASTRecordLayout(RD);
+ while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) {
+ Offset += Layout->getBaseClassOffset(Base);
+ Layout = &getASTRecordLayout(Base);
+ }
+ return Offset;
+}
+
+/// DeepCollectObjCIvars -
+/// This routine first collects all declared, but not synthesized, ivars in
+/// super class and then collects all ivars, including those synthesized for
+/// current class. This routine is used for implementation of current class
+/// when all ivars, declared and synthesized are known.
+///
+void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI,
+ bool leafClass,
+ SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const {
+ if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
+ DeepCollectObjCIvars(SuperClass, false, Ivars);
+ if (!leafClass) {
+ for (const auto *I : OI->ivars())
+ Ivars.push_back(I);
+ } else {
+ ObjCInterfaceDecl *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
+ for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
+ Iv= Iv->getNextIvar())
+ Ivars.push_back(Iv);
+ }
+}
+
+/// CollectInheritedProtocols - Collect all protocols in current class and
+/// those inherited by it.
+void ASTContext::CollectInheritedProtocols(const Decl *CDecl,
+ llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) {
+ if (const ObjCInterfaceDecl *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
+ // We can use protocol_iterator here instead of
+ // all_referenced_protocol_iterator since we are walking all categories.
+ for (auto *Proto : OI->all_referenced_protocols()) {
+ CollectInheritedProtocols(Proto, Protocols);
+ }
+
+ // Categories of this Interface.
+ for (const auto *Cat : OI->visible_categories())
+ CollectInheritedProtocols(Cat, Protocols);
+
+ if (ObjCInterfaceDecl *SD = OI->getSuperClass())
+ while (SD) {
+ CollectInheritedProtocols(SD, Protocols);
+ SD = SD->getSuperClass();
+ }
+ } else if (const ObjCCategoryDecl *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) {
+ for (auto *Proto : OC->protocols()) {
+ CollectInheritedProtocols(Proto, Protocols);
+ }
+ } else if (const ObjCProtocolDecl *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) {
+ // Insert the protocol.
+ if (!Protocols.insert(
+ const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second)
+ return;
+
+ for (auto *Proto : OP->protocols())
+ CollectInheritedProtocols(Proto, Protocols);
+ }
+}
+
+unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const {
+ unsigned count = 0;
+ // Count ivars declared in class extension.
+ for (const auto *Ext : OI->known_extensions())
+ count += Ext->ivar_size();
+
+ // Count ivar defined in this class's implementation. This
+ // includes synthesized ivars.
+ if (ObjCImplementationDecl *ImplDecl = OI->getImplementation())
+ count += ImplDecl->ivar_size();
+
+ return count;
+}
+
+bool ASTContext::isSentinelNullExpr(const Expr *E) {
+ if (!E)
+ return false;
+
+ // nullptr_t is always treated as null.
+ if (E->getType()->isNullPtrType()) return true;
+
+ if (E->getType()->isAnyPointerType() &&
+ E->IgnoreParenCasts()->isNullPointerConstant(*this,
+ Expr::NPC_ValueDependentIsNull))
+ return true;
+
+ // Unfortunately, __null has type 'int'.
+ if (isa<GNUNullExpr>(E)) return true;
+
+ return false;
+}
+
+/// \brief Get the implementation of ObjCInterfaceDecl,or NULL if none exists.
+ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) {
+ llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
+ I = ObjCImpls.find(D);
+ if (I != ObjCImpls.end())
+ return cast<ObjCImplementationDecl>(I->second);
+ return nullptr;
+}
+/// \brief Get the implementation of ObjCCategoryDecl, or NULL if none exists.
+ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) {
+ llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
+ I = ObjCImpls.find(D);
+ if (I != ObjCImpls.end())
+ return cast<ObjCCategoryImplDecl>(I->second);
+ return nullptr;
+}
+
+/// \brief Set the implementation of ObjCInterfaceDecl.
+void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD,
+ ObjCImplementationDecl *ImplD) {
+ assert(IFaceD && ImplD && "Passed null params");
+ ObjCImpls[IFaceD] = ImplD;
+}
+/// \brief Set the implementation of ObjCCategoryDecl.
+void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD,
+ ObjCCategoryImplDecl *ImplD) {
+ assert(CatD && ImplD && "Passed null params");
+ ObjCImpls[CatD] = ImplD;
+}
+
+const ObjCMethodDecl *
+ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const {
+ return ObjCMethodRedecls.lookup(MD);
+}
+
+void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD,
+ const ObjCMethodDecl *Redecl) {
+ assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration");
+ ObjCMethodRedecls[MD] = Redecl;
+}
+
+const ObjCInterfaceDecl *ASTContext::getObjContainingInterface(
+ const NamedDecl *ND) const {
+ if (const ObjCInterfaceDecl *ID =
+ dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext()))
+ return ID;
+ if (const ObjCCategoryDecl *CD =
+ dyn_cast<ObjCCategoryDecl>(ND->getDeclContext()))
+ return CD->getClassInterface();
+ if (const ObjCImplDecl *IMD =
+ dyn_cast<ObjCImplDecl>(ND->getDeclContext()))
+ return IMD->getClassInterface();
+
+ return nullptr;
+}
+
+/// \brief Get the copy initialization expression of VarDecl,or NULL if
+/// none exists.
+Expr *ASTContext::getBlockVarCopyInits(const VarDecl*VD) {
+ assert(VD && "Passed null params");
+ assert(VD->hasAttr<BlocksAttr>() &&
+ "getBlockVarCopyInits - not __block var");
+ llvm::DenseMap<const VarDecl*, Expr*>::iterator
+ I = BlockVarCopyInits.find(VD);
+ return (I != BlockVarCopyInits.end()) ? cast<Expr>(I->second) : nullptr;
+}
+
+/// \brief Set the copy inialization expression of a block var decl.
+void ASTContext::setBlockVarCopyInits(VarDecl*VD, Expr* Init) {
+ assert(VD && Init && "Passed null params");
+ assert(VD->hasAttr<BlocksAttr>() &&
+ "setBlockVarCopyInits - not __block var");
+ BlockVarCopyInits[VD] = Init;
+}
+
+TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T,
+ unsigned DataSize) const {
+ if (!DataSize)
+ DataSize = TypeLoc::getFullDataSizeForType(T);
+ else
+ assert(DataSize == TypeLoc::getFullDataSizeForType(T) &&
+ "incorrect data size provided to CreateTypeSourceInfo!");
+
+ TypeSourceInfo *TInfo =
+ (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8);
+ new (TInfo) TypeSourceInfo(T);
+ return TInfo;
+}
+
+TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T,
+ SourceLocation L) const {
+ TypeSourceInfo *DI = CreateTypeSourceInfo(T);
+ DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L);
+ return DI;
+}
+
+const ASTRecordLayout &
+ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const {
+ return getObjCLayout(D, nullptr);
+}
+
+const ASTRecordLayout &
+ASTContext::getASTObjCImplementationLayout(
+ const ObjCImplementationDecl *D) const {
+ return getObjCLayout(D->getClassInterface(), D);
+}
+
+//===----------------------------------------------------------------------===//
+// Type creation/memoization methods
+//===----------------------------------------------------------------------===//
+
+QualType
+ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const {
+ unsigned fastQuals = quals.getFastQualifiers();
+ quals.removeFastQualifiers();
+
+ // Check if we've already instantiated this type.
+ llvm::FoldingSetNodeID ID;
+ ExtQuals::Profile(ID, baseType, quals);
+ void *insertPos = nullptr;
+ if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) {
+ assert(eq->getQualifiers() == quals);
+ return QualType(eq, fastQuals);
+ }
+
+ // If the base type is not canonical, make the appropriate canonical type.
+ QualType canon;
+ if (!baseType->isCanonicalUnqualified()) {
+ SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split();
+ canonSplit.Quals.addConsistentQualifiers(quals);
+ canon = getExtQualType(canonSplit.Ty, canonSplit.Quals);
+
+ // Re-find the insert position.
+ (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos);
+ }
+
+ ExtQuals *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals);
+ ExtQualNodes.InsertNode(eq, insertPos);
+ return QualType(eq, fastQuals);
+}
+
+QualType
+ASTContext::getAddrSpaceQualType(QualType T, unsigned AddressSpace) const {
+ QualType CanT = getCanonicalType(T);
+ if (CanT.getAddressSpace() == AddressSpace)
+ return T;
+
+ // If we are composing extended qualifiers together, merge together
+ // into one ExtQuals node.
+ QualifierCollector Quals;
+ const Type *TypeNode = Quals.strip(T);
+
+ // If this type already has an address space specified, it cannot get
+ // another one.
+ assert(!Quals.hasAddressSpace() &&
+ "Type cannot be in multiple addr spaces!");
+ Quals.addAddressSpace(AddressSpace);
+
+ return getExtQualType(TypeNode, Quals);
+}
+
+QualType ASTContext::getObjCGCQualType(QualType T,
+ Qualifiers::GC GCAttr) const {
+ QualType CanT = getCanonicalType(T);
+ if (CanT.getObjCGCAttr() == GCAttr)
+ return T;
+
+ if (const PointerType *ptr = T->getAs<PointerType>()) {
+ QualType Pointee = ptr->getPointeeType();
+ if (Pointee->isAnyPointerType()) {
+ QualType ResultType = getObjCGCQualType(Pointee, GCAttr);
+ return getPointerType(ResultType);
+ }
+ }
+
+ // If we are composing extended qualifiers together, merge together
+ // into one ExtQuals node.
+ QualifierCollector Quals;
+ const Type *TypeNode = Quals.strip(T);
+
+ // If this type already has an ObjCGC specified, it cannot get
+ // another one.
+ assert(!Quals.hasObjCGCAttr() &&
+ "Type cannot have multiple ObjCGCs!");
+ Quals.addObjCGCAttr(GCAttr);
+
+ return getExtQualType(TypeNode, Quals);
+}
+
+const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T,
+ FunctionType::ExtInfo Info) {
+ if (T->getExtInfo() == Info)
+ return T;
+
+ QualType Result;
+ if (const FunctionNoProtoType *FNPT = dyn_cast<FunctionNoProtoType>(T)) {
+ Result = getFunctionNoProtoType(FNPT->getReturnType(), Info);
+ } else {
+ const FunctionProtoType *FPT = cast<FunctionProtoType>(T);
+ FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
+ EPI.ExtInfo = Info;
+ Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI);
+ }
+
+ return cast<FunctionType>(Result.getTypePtr());
+}
+
+void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD,
+ QualType ResultType) {
+ FD = FD->getMostRecentDecl();
+ while (true) {
+ const FunctionProtoType *FPT = FD->getType()->castAs<FunctionProtoType>();
+ FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
+ FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI));
+ if (FunctionDecl *Next = FD->getPreviousDecl())
+ FD = Next;
+ else
+ break;
+ }
+ if (ASTMutationListener *L = getASTMutationListener())
+ L->DeducedReturnType(FD, ResultType);
+}
+
+/// Get a function type and produce the equivalent function type with the
+/// specified exception specification. Type sugar that can be present on a
+/// declaration of a function with an exception specification is permitted
+/// and preserved. Other type sugar (for instance, typedefs) is not.
+static QualType getFunctionTypeWithExceptionSpec(
+ ASTContext &Context, QualType Orig,
+ const FunctionProtoType::ExceptionSpecInfo &ESI) {
+ // Might have some parens.
+ if (auto *PT = dyn_cast<ParenType>(Orig))
+ return Context.getParenType(
+ getFunctionTypeWithExceptionSpec(Context, PT->getInnerType(), ESI));
+
+ // Might have a calling-convention attribute.
+ if (auto *AT = dyn_cast<AttributedType>(Orig))
+ return Context.getAttributedType(
+ AT->getAttrKind(),
+ getFunctionTypeWithExceptionSpec(Context, AT->getModifiedType(), ESI),
+ getFunctionTypeWithExceptionSpec(Context, AT->getEquivalentType(),
+ ESI));
+
+ // Anything else must be a function type. Rebuild it with the new exception
+ // specification.
+ const FunctionProtoType *Proto = cast<FunctionProtoType>(Orig);
+ return Context.getFunctionType(
+ Proto->getReturnType(), Proto->getParamTypes(),
+ Proto->getExtProtoInfo().withExceptionSpec(ESI));
+}
+
+void ASTContext::adjustExceptionSpec(
+ FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI,
+ bool AsWritten) {
+ // Update the type.
+ QualType Updated =
+ getFunctionTypeWithExceptionSpec(*this, FD->getType(), ESI);
+ FD->setType(Updated);
+
+ if (!AsWritten)
+ return;
+
+ // Update the type in the type source information too.
+ if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) {
+ // If the type and the type-as-written differ, we may need to update
+ // the type-as-written too.
+ if (TSInfo->getType() != FD->getType())
+ Updated = getFunctionTypeWithExceptionSpec(*this, TSInfo->getType(), ESI);
+
+ // FIXME: When we get proper type location information for exceptions,
+ // we'll also have to rebuild the TypeSourceInfo. For now, we just patch
+ // up the TypeSourceInfo;
+ assert(TypeLoc::getFullDataSizeForType(Updated) ==
+ TypeLoc::getFullDataSizeForType(TSInfo->getType()) &&
+ "TypeLoc size mismatch from updating exception specification");
+ TSInfo->overrideType(Updated);
+ }
+}
+
+/// getComplexType - Return the uniqued reference to the type for a complex
+/// number with the specified element type.
+QualType ASTContext::getComplexType(QualType T) const {
+ // Unique pointers, to guarantee there is only one pointer of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ ComplexType::Profile(ID, T);
+
+ void *InsertPos = nullptr;
+ if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(CT, 0);
+
+ // If the pointee type isn't canonical, this won't be a canonical type either,
+ // so fill in the canonical type field.
+ QualType Canonical;
+ if (!T.isCanonical()) {
+ Canonical = getComplexType(getCanonicalType(T));
+
+ // Get the new insert position for the node we care about.
+ ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
+ }
+ ComplexType *New = new (*this, TypeAlignment) ComplexType(T, Canonical);
+ Types.push_back(New);
+ ComplexTypes.InsertNode(New, InsertPos);
+ return QualType(New, 0);
+}
+
+/// getPointerType - Return the uniqued reference to the type for a pointer to
+/// the specified type.
+QualType ASTContext::getPointerType(QualType T) const {
+ // Unique pointers, to guarantee there is only one pointer of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ PointerType::Profile(ID, T);
+
+ void *InsertPos = nullptr;
+ if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(PT, 0);
+
+ // If the pointee type isn't canonical, this won't be a canonical type either,
+ // so fill in the canonical type field.
+ QualType Canonical;
+ if (!T.isCanonical()) {
+ Canonical = getPointerType(getCanonicalType(T));
+
+ // Get the new insert position for the node we care about.
+ PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
+ }
+ PointerType *New = new (*this, TypeAlignment) PointerType(T, Canonical);
+ Types.push_back(New);
+ PointerTypes.InsertNode(New, InsertPos);
+ return QualType(New, 0);
+}
+
+QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const {
+ llvm::FoldingSetNodeID ID;
+ AdjustedType::Profile(ID, Orig, New);
+ void *InsertPos = nullptr;
+ AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (AT)
+ return QualType(AT, 0);
+
+ QualType Canonical = getCanonicalType(New);
+
+ // Get the new insert position for the node we care about.
+ AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!AT && "Shouldn't be in the map!");
+
+ AT = new (*this, TypeAlignment)
+ AdjustedType(Type::Adjusted, Orig, New, Canonical);
+ Types.push_back(AT);
+ AdjustedTypes.InsertNode(AT, InsertPos);
+ return QualType(AT, 0);
+}
+
+QualType ASTContext::getDecayedType(QualType T) const {
+ assert((T->isArrayType() || T->isFunctionType()) && "T does not decay");
+
+ QualType Decayed;
+
+ // C99 6.7.5.3p7:
+ // A declaration of a parameter as "array of type" shall be
+ // adjusted to "qualified pointer to type", where the type
+ // qualifiers (if any) are those specified within the [ and ] of
+ // the array type derivation.
+ if (T->isArrayType())
+ Decayed = getArrayDecayedType(T);
+
+ // C99 6.7.5.3p8:
+ // A declaration of a parameter as "function returning type"
+ // shall be adjusted to "pointer to function returning type", as
+ // in 6.3.2.1.
+ if (T->isFunctionType())
+ Decayed = getPointerType(T);
+
+ llvm::FoldingSetNodeID ID;
+ AdjustedType::Profile(ID, T, Decayed);
+ void *InsertPos = nullptr;
+ AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (AT)
+ return QualType(AT, 0);
+
+ QualType Canonical = getCanonicalType(Decayed);
+
+ // Get the new insert position for the node we care about.
+ AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!AT && "Shouldn't be in the map!");
+
+ AT = new (*this, TypeAlignment) DecayedType(T, Decayed, Canonical);
+ Types.push_back(AT);
+ AdjustedTypes.InsertNode(AT, InsertPos);
+ return QualType(AT, 0);
+}
+
+/// getBlockPointerType - Return the uniqued reference to the type for
+/// a pointer to the specified block.
+QualType ASTContext::getBlockPointerType(QualType T) const {
+ assert(T->isFunctionType() && "block of function types only");
+ // Unique pointers, to guarantee there is only one block of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ BlockPointerType::Profile(ID, T);
+
+ void *InsertPos = nullptr;
+ if (BlockPointerType *PT =
+ BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(PT, 0);
+
+ // If the block pointee type isn't canonical, this won't be a canonical
+ // type either so fill in the canonical type field.
+ QualType Canonical;
+ if (!T.isCanonical()) {
+ Canonical = getBlockPointerType(getCanonicalType(T));
+
+ // Get the new insert position for the node we care about.
+ BlockPointerType *NewIP =
+ BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
+ }
+ BlockPointerType *New
+ = new (*this, TypeAlignment) BlockPointerType(T, Canonical);
+ Types.push_back(New);
+ BlockPointerTypes.InsertNode(New, InsertPos);
+ return QualType(New, 0);
+}
+
+/// getLValueReferenceType - Return the uniqued reference to the type for an
+/// lvalue reference to the specified type.
+QualType
+ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
+ assert(getCanonicalType(T) != OverloadTy &&
+ "Unresolved overloaded function type");
+
+ // Unique pointers, to guarantee there is only one pointer of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ ReferenceType::Profile(ID, T, SpelledAsLValue);
+
+ void *InsertPos = nullptr;
+ if (LValueReferenceType *RT =
+ LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(RT, 0);
+
+ const ReferenceType *InnerRef = T->getAs<ReferenceType>();
+
+ // If the referencee type isn't canonical, this won't be a canonical type
+ // either, so fill in the canonical type field.
+ QualType Canonical;
+ if (!SpelledAsLValue || InnerRef || !T.isCanonical()) {
+ QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
+ Canonical = getLValueReferenceType(getCanonicalType(PointeeType));
+
+ // Get the new insert position for the node we care about.
+ LValueReferenceType *NewIP =
+ LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
+ }
+
+ LValueReferenceType *New
+ = new (*this, TypeAlignment) LValueReferenceType(T, Canonical,
+ SpelledAsLValue);
+ Types.push_back(New);
+ LValueReferenceTypes.InsertNode(New, InsertPos);
+
+ return QualType(New, 0);
+}
+
+/// getRValueReferenceType - Return the uniqued reference to the type for an
+/// rvalue reference to the specified type.
+QualType ASTContext::getRValueReferenceType(QualType T) const {
+ // Unique pointers, to guarantee there is only one pointer of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ ReferenceType::Profile(ID, T, false);
+
+ void *InsertPos = nullptr;
+ if (RValueReferenceType *RT =
+ RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(RT, 0);
+
+ const ReferenceType *InnerRef = T->getAs<ReferenceType>();
+
+ // If the referencee type isn't canonical, this won't be a canonical type
+ // either, so fill in the canonical type field.
+ QualType Canonical;
+ if (InnerRef || !T.isCanonical()) {
+ QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
+ Canonical = getRValueReferenceType(getCanonicalType(PointeeType));
+
+ // Get the new insert position for the node we care about.
+ RValueReferenceType *NewIP =
+ RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
+ }
+
+ RValueReferenceType *New
+ = new (*this, TypeAlignment) RValueReferenceType(T, Canonical);
+ Types.push_back(New);
+ RValueReferenceTypes.InsertNode(New, InsertPos);
+ return QualType(New, 0);
+}
+
+/// getMemberPointerType - Return the uniqued reference to the type for a
+/// member pointer to the specified type, in the specified class.
+QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const {
+ // Unique pointers, to guarantee there is only one pointer of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ MemberPointerType::Profile(ID, T, Cls);
+
+ void *InsertPos = nullptr;
+ if (MemberPointerType *PT =
+ MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(PT, 0);
+
+ // If the pointee or class type isn't canonical, this won't be a canonical
+ // type either, so fill in the canonical type field.
+ QualType Canonical;
+ if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) {
+ Canonical = getMemberPointerType(getCanonicalType(T),getCanonicalType(Cls));
+
+ // Get the new insert position for the node we care about.
+ MemberPointerType *NewIP =
+ MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
+ }
+ MemberPointerType *New
+ = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical);
+ Types.push_back(New);
+ MemberPointerTypes.InsertNode(New, InsertPos);
+ return QualType(New, 0);
+}
+
+/// getConstantArrayType - Return the unique reference to the type for an
+/// array of the specified element type.
+QualType ASTContext::getConstantArrayType(QualType EltTy,
+ const llvm::APInt &ArySizeIn,
+ ArrayType::ArraySizeModifier ASM,
+ unsigned IndexTypeQuals) const {
+ assert((EltTy->isDependentType() ||
+ EltTy->isIncompleteType() || EltTy->isConstantSizeType()) &&
+ "Constant array of VLAs is illegal!");
+
+ // Convert the array size into a canonical width matching the pointer size for
+ // the target.
+ llvm::APInt ArySize(ArySizeIn);
+ ArySize =
+ ArySize.zextOrTrunc(Target->getPointerWidth(getTargetAddressSpace(EltTy)));
+
+ llvm::FoldingSetNodeID ID;
+ ConstantArrayType::Profile(ID, EltTy, ArySize, ASM, IndexTypeQuals);
+
+ void *InsertPos = nullptr;
+ if (ConstantArrayType *ATP =
+ ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(ATP, 0);
+
+ // If the element type isn't canonical or has qualifiers, this won't
+ // be a canonical type either, so fill in the canonical type field.
+ QualType Canon;
+ if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
+ SplitQualType canonSplit = getCanonicalType(EltTy).split();
+ Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize,
+ ASM, IndexTypeQuals);
+ Canon = getQualifiedType(Canon, canonSplit.Quals);
+
+ // Get the new insert position for the node we care about.
+ ConstantArrayType *NewIP =
+ ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
+ }
+
+ ConstantArrayType *New = new(*this,TypeAlignment)
+ ConstantArrayType(EltTy, Canon, ArySize, ASM, IndexTypeQuals);
+ ConstantArrayTypes.InsertNode(New, InsertPos);
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
+/// getVariableArrayDecayedType - Turns the given type, which may be
+/// variably-modified, into the corresponding type with all the known
+/// sizes replaced with [*].
+QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
+ // Vastly most common case.
+ if (!type->isVariablyModifiedType()) return type;
+
+ QualType result;
+
+ SplitQualType split = type.getSplitDesugaredType();
+ const Type *ty = split.Ty;
+ switch (ty->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("didn't desugar past all non-canonical types?");
+
+ // These types should never be variably-modified.
+ case Type::Builtin:
+ case Type::Complex:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::DependentSizedExtVector:
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::ObjCObjectPointer:
+ case Type::Record:
+ case Type::Enum:
+ case Type::UnresolvedUsing:
+ case Type::TypeOfExpr:
+ case Type::TypeOf:
+ case Type::Decltype:
+ case Type::UnaryTransform:
+ case Type::DependentName:
+ case Type::InjectedClassName:
+ case Type::TemplateSpecialization:
+ case Type::DependentTemplateSpecialization:
+ case Type::TemplateTypeParm:
+ case Type::SubstTemplateTypeParmPack:
+ case Type::Auto:
+ case Type::PackExpansion:
+ llvm_unreachable("type should never be variably-modified");
+
+ // These types can be variably-modified but should never need to
+ // further decay.
+ case Type::FunctionNoProto:
+ case Type::FunctionProto:
+ case Type::BlockPointer:
+ case Type::MemberPointer:
+ return type;
+
+ // These types can be variably-modified. All these modifications
+ // preserve structure except as noted by comments.
+ // TODO: if we ever care about optimizing VLAs, there are no-op
+ // optimizations available here.
+ case Type::Pointer:
+ result = getPointerType(getVariableArrayDecayedType(
+ cast<PointerType>(ty)->getPointeeType()));
+ break;
+
+ case Type::LValueReference: {
+ const LValueReferenceType *lv = cast<LValueReferenceType>(ty);
+ result = getLValueReferenceType(
+ getVariableArrayDecayedType(lv->getPointeeType()),
+ lv->isSpelledAsLValue());
+ break;
+ }
+
+ case Type::RValueReference: {
+ const RValueReferenceType *lv = cast<RValueReferenceType>(ty);
+ result = getRValueReferenceType(
+ getVariableArrayDecayedType(lv->getPointeeType()));
+ break;
+ }
+
+ case Type::Atomic: {
+ const AtomicType *at = cast<AtomicType>(ty);
+ result = getAtomicType(getVariableArrayDecayedType(at->getValueType()));
+ break;
+ }
+
+ case Type::ConstantArray: {
+ const ConstantArrayType *cat = cast<ConstantArrayType>(ty);
+ result = getConstantArrayType(
+ getVariableArrayDecayedType(cat->getElementType()),
+ cat->getSize(),
+ cat->getSizeModifier(),
+ cat->getIndexTypeCVRQualifiers());
+ break;
+ }
+
+ case Type::DependentSizedArray: {
+ const DependentSizedArrayType *dat = cast<DependentSizedArrayType>(ty);
+ result = getDependentSizedArrayType(
+ getVariableArrayDecayedType(dat->getElementType()),
+ dat->getSizeExpr(),
+ dat->getSizeModifier(),
+ dat->getIndexTypeCVRQualifiers(),
+ dat->getBracketsRange());
+ break;
+ }
+
+ // Turn incomplete types into [*] types.
+ case Type::IncompleteArray: {
+ const IncompleteArrayType *iat = cast<IncompleteArrayType>(ty);
+ result = getVariableArrayType(
+ getVariableArrayDecayedType(iat->getElementType()),
+ /*size*/ nullptr,
+ ArrayType::Normal,
+ iat->getIndexTypeCVRQualifiers(),
+ SourceRange());
+ break;
+ }
+
+ // Turn VLA types into [*] types.
+ case Type::VariableArray: {
+ const VariableArrayType *vat = cast<VariableArrayType>(ty);
+ result = getVariableArrayType(
+ getVariableArrayDecayedType(vat->getElementType()),
+ /*size*/ nullptr,
+ ArrayType::Star,
+ vat->getIndexTypeCVRQualifiers(),
+ vat->getBracketsRange());
+ break;
+ }
+ }
+
+ // Apply the top-level qualifiers from the original.
+ return getQualifiedType(result, split.Quals);
+}
+
+/// getVariableArrayType - Returns a non-unique reference to the type for a
+/// variable array of the specified element type.
+QualType ASTContext::getVariableArrayType(QualType EltTy,
+ Expr *NumElts,
+ ArrayType::ArraySizeModifier ASM,
+ unsigned IndexTypeQuals,
+ SourceRange Brackets) const {
+ // Since we don't unique expressions, it isn't possible to unique VLA's
+ // that have an expression provided for their size.
+ QualType Canon;
+
+ // Be sure to pull qualifiers off the element type.
+ if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
+ SplitQualType canonSplit = getCanonicalType(EltTy).split();
+ Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM,
+ IndexTypeQuals, Brackets);
+ Canon = getQualifiedType(Canon, canonSplit.Quals);
+ }
+
+ VariableArrayType *New = new(*this, TypeAlignment)
+ VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets);
+
+ VariableArrayTypes.push_back(New);
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
+/// getDependentSizedArrayType - Returns a non-unique reference to
+/// the type for a dependently-sized array of the specified element
+/// type.
+QualType ASTContext::getDependentSizedArrayType(QualType elementType,
+ Expr *numElements,
+ ArrayType::ArraySizeModifier ASM,
+ unsigned elementTypeQuals,
+ SourceRange brackets) const {
+ assert((!numElements || numElements->isTypeDependent() ||
+ numElements->isValueDependent()) &&
+ "Size must be type- or value-dependent!");
+
+ // Dependently-sized array types that do not have a specified number
+ // of elements will have their sizes deduced from a dependent
+ // initializer. We do no canonicalization here at all, which is okay
+ // because they can't be used in most locations.
+ if (!numElements) {
+ DependentSizedArrayType *newType
+ = new (*this, TypeAlignment)
+ DependentSizedArrayType(*this, elementType, QualType(),
+ numElements, ASM, elementTypeQuals,
+ brackets);
+ Types.push_back(newType);
+ return QualType(newType, 0);
+ }
+
+ // Otherwise, we actually build a new type every time, but we
+ // also build a canonical type.
+
+ SplitQualType canonElementType = getCanonicalType(elementType).split();
+
+ void *insertPos = nullptr;
+ llvm::FoldingSetNodeID ID;
+ DependentSizedArrayType::Profile(ID, *this,
+ QualType(canonElementType.Ty, 0),
+ ASM, elementTypeQuals, numElements);
+
+ // Look for an existing type with these properties.
+ DependentSizedArrayType *canonTy =
+ DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos);
+
+ // If we don't have one, build one.
+ if (!canonTy) {
+ canonTy = new (*this, TypeAlignment)
+ DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0),
+ QualType(), numElements, ASM, elementTypeQuals,
+ brackets);
+ DependentSizedArrayTypes.InsertNode(canonTy, insertPos);
+ Types.push_back(canonTy);
+ }
+
+ // Apply qualifiers from the element type to the array.
+ QualType canon = getQualifiedType(QualType(canonTy,0),
+ canonElementType.Quals);
+
+ // If we didn't need extra canonicalization for the element type or the size
+ // expression, then just use that as our result.
+ if (QualType(canonElementType.Ty, 0) == elementType &&
+ canonTy->getSizeExpr() == numElements)
+ return canon;
+
+ // Otherwise, we need to build a type which follows the spelling
+ // of the element type.
+ DependentSizedArrayType *sugaredType
+ = new (*this, TypeAlignment)
+ DependentSizedArrayType(*this, elementType, canon, numElements,
+ ASM, elementTypeQuals, brackets);
+ Types.push_back(sugaredType);
+ return QualType(sugaredType, 0);
+}
+
+QualType ASTContext::getIncompleteArrayType(QualType elementType,
+ ArrayType::ArraySizeModifier ASM,
+ unsigned elementTypeQuals) const {
+ llvm::FoldingSetNodeID ID;
+ IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals);
+
+ void *insertPos = nullptr;
+ if (IncompleteArrayType *iat =
+ IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos))
+ return QualType(iat, 0);
+
+ // If the element type isn't canonical, this won't be a canonical type
+ // either, so fill in the canonical type field. We also have to pull
+ // qualifiers off the element type.
+ QualType canon;
+
+ if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) {
+ SplitQualType canonSplit = getCanonicalType(elementType).split();
+ canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0),
+ ASM, elementTypeQuals);
+ canon = getQualifiedType(canon, canonSplit.Quals);
+
+ // Get the new insert position for the node we care about.
+ IncompleteArrayType *existing =
+ IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos);
+ assert(!existing && "Shouldn't be in the map!"); (void) existing;
+ }
+
+ IncompleteArrayType *newType = new (*this, TypeAlignment)
+ IncompleteArrayType(elementType, canon, ASM, elementTypeQuals);
+
+ IncompleteArrayTypes.InsertNode(newType, insertPos);
+ Types.push_back(newType);
+ return QualType(newType, 0);
+}
+
+/// getVectorType - Return the unique reference to a vector type of
+/// the specified element type and size. VectorType must be a built-in type.
+QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
+ VectorType::VectorKind VecKind) const {
+ assert(vecType->isBuiltinType());
+
+ // Check if we've already instantiated a vector of this type.
+ llvm::FoldingSetNodeID ID;
+ VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind);
+
+ void *InsertPos = nullptr;
+ if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(VTP, 0);
+
+ // If the element type isn't canonical, this won't be a canonical type either,
+ // so fill in the canonical type field.
+ QualType Canonical;
+ if (!vecType.isCanonical()) {
+ Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind);
+
+ // Get the new insert position for the node we care about.
+ VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
+ }
+ VectorType *New = new (*this, TypeAlignment)
+ VectorType(vecType, NumElts, Canonical, VecKind);
+ VectorTypes.InsertNode(New, InsertPos);
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
+/// getExtVectorType - Return the unique reference to an extended vector type of
+/// the specified element type and size. VectorType must be a built-in type.
+QualType
+ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const {
+ assert(vecType->isBuiltinType() || vecType->isDependentType());
+
+ // Check if we've already instantiated a vector of this type.
+ llvm::FoldingSetNodeID ID;
+ VectorType::Profile(ID, vecType, NumElts, Type::ExtVector,
+ VectorType::GenericVector);
+ void *InsertPos = nullptr;
+ if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(VTP, 0);
+
+ // If the element type isn't canonical, this won't be a canonical type either,
+ // so fill in the canonical type field.
+ QualType Canonical;
+ if (!vecType.isCanonical()) {
+ Canonical = getExtVectorType(getCanonicalType(vecType), NumElts);
+
+ // Get the new insert position for the node we care about.
+ VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
+ }
+ ExtVectorType *New = new (*this, TypeAlignment)
+ ExtVectorType(vecType, NumElts, Canonical);
+ VectorTypes.InsertNode(New, InsertPos);
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
+QualType
+ASTContext::getDependentSizedExtVectorType(QualType vecType,
+ Expr *SizeExpr,
+ SourceLocation AttrLoc) const {
+ llvm::FoldingSetNodeID ID;
+ DependentSizedExtVectorType::Profile(ID, *this, getCanonicalType(vecType),
+ SizeExpr);
+
+ void *InsertPos = nullptr;
+ DependentSizedExtVectorType *Canon
+ = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
+ DependentSizedExtVectorType *New;
+ if (Canon) {
+ // We already have a canonical version of this array type; use it as
+ // the canonical type for a newly-built type.
+ New = new (*this, TypeAlignment)
+ DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0),
+ SizeExpr, AttrLoc);
+ } else {
+ QualType CanonVecTy = getCanonicalType(vecType);
+ if (CanonVecTy == vecType) {
+ New = new (*this, TypeAlignment)
+ DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr,
+ AttrLoc);
+
+ DependentSizedExtVectorType *CanonCheck
+ = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken");
+ (void)CanonCheck;
+ DependentSizedExtVectorTypes.InsertNode(New, InsertPos);
+ } else {
+ QualType Canon = getDependentSizedExtVectorType(CanonVecTy, SizeExpr,
+ SourceLocation());
+ New = new (*this, TypeAlignment)
+ DependentSizedExtVectorType(*this, vecType, Canon, SizeExpr, AttrLoc);
+ }
+ }
+
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
+/// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
+///
+QualType
+ASTContext::getFunctionNoProtoType(QualType ResultTy,
+ const FunctionType::ExtInfo &Info) const {
+ const CallingConv CallConv = Info.getCC();
+
+ // Unique functions, to guarantee there is only one function of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ FunctionNoProtoType::Profile(ID, ResultTy, Info);
+
+ void *InsertPos = nullptr;
+ if (FunctionNoProtoType *FT =
+ FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(FT, 0);
+
+ QualType Canonical;
+ if (!ResultTy.isCanonical()) {
+ Canonical = getFunctionNoProtoType(getCanonicalType(ResultTy), Info);
+
+ // Get the new insert position for the node we care about.
+ FunctionNoProtoType *NewIP =
+ FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
+ }
+
+ FunctionProtoType::ExtInfo newInfo = Info.withCallingConv(CallConv);
+ FunctionNoProtoType *New = new (*this, TypeAlignment)
+ FunctionNoProtoType(ResultTy, Canonical, newInfo);
+ Types.push_back(New);
+ FunctionNoProtoTypes.InsertNode(New, InsertPos);
+ return QualType(New, 0);
+}
+
+/// \brief Determine whether \p T is canonical as the result type of a function.
+static bool isCanonicalResultType(QualType T) {
+ return T.isCanonical() &&
+ (T.getObjCLifetime() == Qualifiers::OCL_None ||
+ T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone);
+}
+
+CanQualType
+ASTContext::getCanonicalFunctionResultType(QualType ResultType) const {
+ CanQualType CanResultType = getCanonicalType(ResultType);
+
+ // Canonical result types do not have ARC lifetime qualifiers.
+ if (CanResultType.getQualifiers().hasObjCLifetime()) {
+ Qualifiers Qs = CanResultType.getQualifiers();
+ Qs.removeObjCLifetime();
+ return CanQualType::CreateUnsafe(
+ getQualifiedType(CanResultType.getUnqualifiedType(), Qs));
+ }
+
+ return CanResultType;
+}
+
+QualType
+ASTContext::getFunctionType(QualType ResultTy, ArrayRef<QualType> ArgArray,
+ const FunctionProtoType::ExtProtoInfo &EPI) const {
+ size_t NumArgs = ArgArray.size();
+
+ // Unique functions, to guarantee there is only one function of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI,
+ *this);
+
+ void *InsertPos = nullptr;
+ if (FunctionProtoType *FTP =
+ FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(FTP, 0);
+
+ // Determine whether the type being created is already canonical or not.
+ bool isCanonical =
+ EPI.ExceptionSpec.Type == EST_None && isCanonicalResultType(ResultTy) &&
+ !EPI.HasTrailingReturn;
+ for (unsigned i = 0; i != NumArgs && isCanonical; ++i)
+ if (!ArgArray[i].isCanonicalAsParam())
+ isCanonical = false;
+
+ // If this type isn't canonical, get the canonical version of it.
+ // The exception spec is not part of the canonical type.
+ QualType Canonical;
+ if (!isCanonical) {
+ SmallVector<QualType, 16> CanonicalArgs;
+ CanonicalArgs.reserve(NumArgs);
+ for (unsigned i = 0; i != NumArgs; ++i)
+ CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i]));
+
+ FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI;
+ CanonicalEPI.HasTrailingReturn = false;
+ CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo();
+
+ // Adjust the canonical function result type.
+ CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy);
+ Canonical = getFunctionType(CanResultTy, CanonicalArgs, CanonicalEPI);
+
+ // Get the new insert position for the node we care about.
+ FunctionProtoType *NewIP =
+ FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
+ }
+
+ // FunctionProtoType objects are allocated with extra bytes after
+ // them for three variable size arrays at the end:
+ // - parameter types
+ // - exception types
+ // - consumed-arguments flags
+ // Instead of the exception types, there could be a noexcept
+ // expression, or information used to resolve the exception
+ // specification.
+ size_t Size = sizeof(FunctionProtoType) +
+ NumArgs * sizeof(QualType);
+ if (EPI.ExceptionSpec.Type == EST_Dynamic) {
+ Size += EPI.ExceptionSpec.Exceptions.size() * sizeof(QualType);
+ } else if (EPI.ExceptionSpec.Type == EST_ComputedNoexcept) {
+ Size += sizeof(Expr*);
+ } else if (EPI.ExceptionSpec.Type == EST_Uninstantiated) {
+ Size += 2 * sizeof(FunctionDecl*);
+ } else if (EPI.ExceptionSpec.Type == EST_Unevaluated) {
+ Size += sizeof(FunctionDecl*);
+ }
+ if (EPI.ConsumedParameters)
+ Size += NumArgs * sizeof(bool);
+
+ FunctionProtoType *FTP = (FunctionProtoType*) Allocate(Size, TypeAlignment);
+ FunctionProtoType::ExtProtoInfo newEPI = EPI;
+ new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI);
+ Types.push_back(FTP);
+ FunctionProtoTypes.InsertNode(FTP, InsertPos);
+ return QualType(FTP, 0);
+}
+
+#ifndef NDEBUG
+static bool NeedsInjectedClassNameType(const RecordDecl *D) {
+ if (!isa<CXXRecordDecl>(D)) return false;
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(D);
+ if (isa<ClassTemplatePartialSpecializationDecl>(RD))
+ return true;
+ if (RD->getDescribedClassTemplate() &&
+ !isa<ClassTemplateSpecializationDecl>(RD))
+ return true;
+ return false;
+}
+#endif
+
+/// getInjectedClassNameType - Return the unique reference to the
+/// injected class name type for the specified templated declaration.
+QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl,
+ QualType TST) const {
+ assert(NeedsInjectedClassNameType(Decl));
+ if (Decl->TypeForDecl) {
+ assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
+ } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) {
+ assert(PrevDecl->TypeForDecl && "previous declaration has no type");
+ Decl->TypeForDecl = PrevDecl->TypeForDecl;
+ assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
+ } else {
+ Type *newType =
+ new (*this, TypeAlignment) InjectedClassNameType(Decl, TST);
+ Decl->TypeForDecl = newType;
+ Types.push_back(newType);
+ }
+ return QualType(Decl->TypeForDecl, 0);
+}
+
+/// getTypeDeclType - Return the unique reference to the type for the
+/// specified type declaration.
+QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const {
+ assert(Decl && "Passed null for Decl param");
+ assert(!Decl->TypeForDecl && "TypeForDecl present in slow case");
+
+ if (const TypedefNameDecl *Typedef = dyn_cast<TypedefNameDecl>(Decl))
+ return getTypedefType(Typedef);
+
+ assert(!isa<TemplateTypeParmDecl>(Decl) &&
+ "Template type parameter types are always available.");
+
+ if (const RecordDecl *Record = dyn_cast<RecordDecl>(Decl)) {
+ assert(Record->isFirstDecl() && "struct/union has previous declaration");
+ assert(!NeedsInjectedClassNameType(Record));
+ return getRecordType(Record);
+ } else if (const EnumDecl *Enum = dyn_cast<EnumDecl>(Decl)) {
+ assert(Enum->isFirstDecl() && "enum has previous declaration");
+ return getEnumType(Enum);
+ } else if (const UnresolvedUsingTypenameDecl *Using =
+ dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) {
+ Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Using);
+ Decl->TypeForDecl = newType;
+ Types.push_back(newType);
+ } else
+ llvm_unreachable("TypeDecl without a type?");
+
+ return QualType(Decl->TypeForDecl, 0);
+}
+
+/// getTypedefType - Return the unique reference to the type for the
+/// specified typedef name decl.
+QualType
+ASTContext::getTypedefType(const TypedefNameDecl *Decl,
+ QualType Canonical) const {
+ if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
+
+ if (Canonical.isNull())
+ Canonical = getCanonicalType(Decl->getUnderlyingType());
+ TypedefType *newType = new(*this, TypeAlignment)
+ TypedefType(Type::Typedef, Decl, Canonical);
+ Decl->TypeForDecl = newType;
+ Types.push_back(newType);
+ return QualType(newType, 0);
+}
+
+QualType ASTContext::getRecordType(const RecordDecl *Decl) const {
+ if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
+
+ if (const RecordDecl *PrevDecl = Decl->getPreviousDecl())
+ if (PrevDecl->TypeForDecl)
+ return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
+
+ RecordType *newType = new (*this, TypeAlignment) RecordType(Decl);
+ Decl->TypeForDecl = newType;
+ Types.push_back(newType);
+ return QualType(newType, 0);
+}
+
+QualType ASTContext::getEnumType(const EnumDecl *Decl) const {
+ if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
+
+ if (const EnumDecl *PrevDecl = Decl->getPreviousDecl())
+ if (PrevDecl->TypeForDecl)
+ return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
+
+ EnumType *newType = new (*this, TypeAlignment) EnumType(Decl);
+ Decl->TypeForDecl = newType;
+ Types.push_back(newType);
+ return QualType(newType, 0);
+}
+
+QualType ASTContext::getAttributedType(AttributedType::Kind attrKind,
+ QualType modifiedType,
+ QualType equivalentType) {
+ llvm::FoldingSetNodeID id;
+ AttributedType::Profile(id, attrKind, modifiedType, equivalentType);
+
+ void *insertPos = nullptr;
+ AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos);
+ if (type) return QualType(type, 0);
+
+ QualType canon = getCanonicalType(equivalentType);
+ type = new (*this, TypeAlignment)
+ AttributedType(canon, attrKind, modifiedType, equivalentType);
+
+ Types.push_back(type);
+ AttributedTypes.InsertNode(type, insertPos);
+
+ return QualType(type, 0);
+}
+
+/// \brief Retrieve a substitution-result type.
+QualType
+ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm,
+ QualType Replacement) const {
+ assert(Replacement.isCanonical()
+ && "replacement types must always be canonical");
+
+ llvm::FoldingSetNodeID ID;
+ SubstTemplateTypeParmType::Profile(ID, Parm, Replacement);
+ void *InsertPos = nullptr;
+ SubstTemplateTypeParmType *SubstParm
+ = SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!SubstParm) {
+ SubstParm = new (*this, TypeAlignment)
+ SubstTemplateTypeParmType(Parm, Replacement);
+ Types.push_back(SubstParm);
+ SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos);
+ }
+
+ return QualType(SubstParm, 0);
+}
+
+/// \brief Retrieve a
+QualType ASTContext::getSubstTemplateTypeParmPackType(
+ const TemplateTypeParmType *Parm,
+ const TemplateArgument &ArgPack) {
+#ifndef NDEBUG
+ for (const auto &P : ArgPack.pack_elements()) {
+ assert(P.getKind() == TemplateArgument::Type &&"Pack contains a non-type");
+ assert(P.getAsType().isCanonical() && "Pack contains non-canonical type");
+ }
+#endif
+
+ llvm::FoldingSetNodeID ID;
+ SubstTemplateTypeParmPackType::Profile(ID, Parm, ArgPack);
+ void *InsertPos = nullptr;
+ if (SubstTemplateTypeParmPackType *SubstParm
+ = SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(SubstParm, 0);
+
+ QualType Canon;
+ if (!Parm->isCanonicalUnqualified()) {
+ Canon = getCanonicalType(QualType(Parm, 0));
+ Canon = getSubstTemplateTypeParmPackType(cast<TemplateTypeParmType>(Canon),
+ ArgPack);
+ SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos);
+ }
+
+ SubstTemplateTypeParmPackType *SubstParm
+ = new (*this, TypeAlignment) SubstTemplateTypeParmPackType(Parm, Canon,
+ ArgPack);
+ Types.push_back(SubstParm);
+ SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos);
+ return QualType(SubstParm, 0);
+}
+
+/// \brief Retrieve the template type parameter type for a template
+/// parameter or parameter pack with the given depth, index, and (optionally)
+/// name.
+QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index,
+ bool ParameterPack,
+ TemplateTypeParmDecl *TTPDecl) const {
+ llvm::FoldingSetNodeID ID;
+ TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl);
+ void *InsertPos = nullptr;
+ TemplateTypeParmType *TypeParm
+ = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (TypeParm)
+ return QualType(TypeParm, 0);
+
+ if (TTPDecl) {
+ QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack);
+ TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon);
+
+ TemplateTypeParmType *TypeCheck
+ = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!TypeCheck && "Template type parameter canonical type broken");
+ (void)TypeCheck;
+ } else
+ TypeParm = new (*this, TypeAlignment)
+ TemplateTypeParmType(Depth, Index, ParameterPack);
+
+ Types.push_back(TypeParm);
+ TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos);
+
+ return QualType(TypeParm, 0);
+}
+
+TypeSourceInfo *
+ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name,
+ SourceLocation NameLoc,
+ const TemplateArgumentListInfo &Args,
+ QualType Underlying) const {
+ assert(!Name.getAsDependentTemplateName() &&
+ "No dependent template names here!");
+ QualType TST = getTemplateSpecializationType(Name, Args, Underlying);
+
+ TypeSourceInfo *DI = CreateTypeSourceInfo(TST);
+ TemplateSpecializationTypeLoc TL =
+ DI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>();
+ TL.setTemplateKeywordLoc(SourceLocation());
+ TL.setTemplateNameLoc(NameLoc);
+ TL.setLAngleLoc(Args.getLAngleLoc());
+ TL.setRAngleLoc(Args.getRAngleLoc());
+ for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
+ TL.setArgLocInfo(i, Args[i].getLocInfo());
+ return DI;
+}
+
+QualType
+ASTContext::getTemplateSpecializationType(TemplateName Template,
+ const TemplateArgumentListInfo &Args,
+ QualType Underlying) const {
+ assert(!Template.getAsDependentTemplateName() &&
+ "No dependent template names here!");
+
+ unsigned NumArgs = Args.size();
+
+ SmallVector<TemplateArgument, 4> ArgVec;
+ ArgVec.reserve(NumArgs);
+ for (unsigned i = 0; i != NumArgs; ++i)
+ ArgVec.push_back(Args[i].getArgument());
+
+ return getTemplateSpecializationType(Template, ArgVec.data(), NumArgs,
+ Underlying);
+}
+
+#ifndef NDEBUG
+static bool hasAnyPackExpansions(const TemplateArgument *Args,
+ unsigned NumArgs) {
+ for (unsigned I = 0; I != NumArgs; ++I)
+ if (Args[I].isPackExpansion())
+ return true;
+
+ return true;
+}
+#endif
+
+QualType
+ASTContext::getTemplateSpecializationType(TemplateName Template,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ QualType Underlying) const {
+ assert(!Template.getAsDependentTemplateName() &&
+ "No dependent template names here!");
+ // Look through qualified template names.
+ if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
+ Template = TemplateName(QTN->getTemplateDecl());
+
+ bool IsTypeAlias =
+ Template.getAsTemplateDecl() &&
+ isa<TypeAliasTemplateDecl>(Template.getAsTemplateDecl());
+ QualType CanonType;
+ if (!Underlying.isNull())
+ CanonType = getCanonicalType(Underlying);
+ else {
+ // We can get here with an alias template when the specialization contains
+ // a pack expansion that does not match up with a parameter pack.
+ assert((!IsTypeAlias || hasAnyPackExpansions(Args, NumArgs)) &&
+ "Caller must compute aliased type");
+ IsTypeAlias = false;
+ CanonType = getCanonicalTemplateSpecializationType(Template, Args,
+ NumArgs);
+ }
+
+ // Allocate the (non-canonical) template specialization type, but don't
+ // try to unique it: these types typically have location information that
+ // we don't unique and don't want to lose.
+ void *Mem = Allocate(sizeof(TemplateSpecializationType) +
+ sizeof(TemplateArgument) * NumArgs +
+ (IsTypeAlias? sizeof(QualType) : 0),
+ TypeAlignment);
+ TemplateSpecializationType *Spec
+ = new (Mem) TemplateSpecializationType(Template, Args, NumArgs, CanonType,
+ IsTypeAlias ? Underlying : QualType());
+
+ Types.push_back(Spec);
+ return QualType(Spec, 0);
+}
+
+QualType
+ASTContext::getCanonicalTemplateSpecializationType(TemplateName Template,
+ const TemplateArgument *Args,
+ unsigned NumArgs) const {
+ assert(!Template.getAsDependentTemplateName() &&
+ "No dependent template names here!");
+
+ // Look through qualified template names.
+ if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
+ Template = TemplateName(QTN->getTemplateDecl());
+
+ // Build the canonical template specialization type.
+ TemplateName CanonTemplate = getCanonicalTemplateName(Template);
+ SmallVector<TemplateArgument, 4> CanonArgs;
+ CanonArgs.reserve(NumArgs);
+ for (unsigned I = 0; I != NumArgs; ++I)
+ CanonArgs.push_back(getCanonicalTemplateArgument(Args[I]));
+
+ // Determine whether this canonical template specialization type already
+ // exists.
+ llvm::FoldingSetNodeID ID;
+ TemplateSpecializationType::Profile(ID, CanonTemplate,
+ CanonArgs.data(), NumArgs, *this);
+
+ void *InsertPos = nullptr;
+ TemplateSpecializationType *Spec
+ = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!Spec) {
+ // Allocate a new canonical template specialization type.
+ void *Mem = Allocate((sizeof(TemplateSpecializationType) +
+ sizeof(TemplateArgument) * NumArgs),
+ TypeAlignment);
+ Spec = new (Mem) TemplateSpecializationType(CanonTemplate,
+ CanonArgs.data(), NumArgs,
+ QualType(), QualType());
+ Types.push_back(Spec);
+ TemplateSpecializationTypes.InsertNode(Spec, InsertPos);
+ }
+
+ assert(Spec->isDependentType() &&
+ "Non-dependent template-id type must have a canonical type");
+ return QualType(Spec, 0);
+}
+
+QualType
+ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS,
+ QualType NamedType) const {
+ llvm::FoldingSetNodeID ID;
+ ElaboratedType::Profile(ID, Keyword, NNS, NamedType);
+
+ void *InsertPos = nullptr;
+ ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (T)
+ return QualType(T, 0);
+
+ QualType Canon = NamedType;
+ if (!Canon.isCanonical()) {
+ Canon = getCanonicalType(NamedType);
+ ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!CheckT && "Elaborated canonical type broken");
+ (void)CheckT;
+ }
+
+ T = new (*this, TypeAlignment) ElaboratedType(Keyword, NNS, NamedType, Canon);
+ Types.push_back(T);
+ ElaboratedTypes.InsertNode(T, InsertPos);
+ return QualType(T, 0);
+}
+
+QualType
+ASTContext::getParenType(QualType InnerType) const {
+ llvm::FoldingSetNodeID ID;
+ ParenType::Profile(ID, InnerType);
+
+ void *InsertPos = nullptr;
+ ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (T)
+ return QualType(T, 0);
+
+ QualType Canon = InnerType;
+ if (!Canon.isCanonical()) {
+ Canon = getCanonicalType(InnerType);
+ ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!CheckT && "Paren canonical type broken");
+ (void)CheckT;
+ }
+
+ T = new (*this, TypeAlignment) ParenType(InnerType, Canon);
+ Types.push_back(T);
+ ParenTypes.InsertNode(T, InsertPos);
+ return QualType(T, 0);
+}
+
+QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name,
+ QualType Canon) const {
+ if (Canon.isNull()) {
+ NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
+ ElaboratedTypeKeyword CanonKeyword = Keyword;
+ if (Keyword == ETK_None)
+ CanonKeyword = ETK_Typename;
+
+ if (CanonNNS != NNS || CanonKeyword != Keyword)
+ Canon = getDependentNameType(CanonKeyword, CanonNNS, Name);
+ }
+
+ llvm::FoldingSetNodeID ID;
+ DependentNameType::Profile(ID, Keyword, NNS, Name);
+
+ void *InsertPos = nullptr;
+ DependentNameType *T
+ = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (T)
+ return QualType(T, 0);
+
+ T = new (*this, TypeAlignment) DependentNameType(Keyword, NNS, Name, Canon);
+ Types.push_back(T);
+ DependentNameTypes.InsertNode(T, InsertPos);
+ return QualType(T, 0);
+}
+
+QualType
+ASTContext::getDependentTemplateSpecializationType(
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name,
+ const TemplateArgumentListInfo &Args) const {
+ // TODO: avoid this copy
+ SmallVector<TemplateArgument, 16> ArgCopy;
+ for (unsigned I = 0, E = Args.size(); I != E; ++I)
+ ArgCopy.push_back(Args[I].getArgument());
+ return getDependentTemplateSpecializationType(Keyword, NNS, Name,
+ ArgCopy.size(),
+ ArgCopy.data());
+}
+
+QualType
+ASTContext::getDependentTemplateSpecializationType(
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name,
+ unsigned NumArgs,
+ const TemplateArgument *Args) const {
+ assert((!NNS || NNS->isDependent()) &&
+ "nested-name-specifier must be dependent");
+
+ llvm::FoldingSetNodeID ID;
+ DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS,
+ Name, NumArgs, Args);
+
+ void *InsertPos = nullptr;
+ DependentTemplateSpecializationType *T
+ = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (T)
+ return QualType(T, 0);
+
+ NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
+
+ ElaboratedTypeKeyword CanonKeyword = Keyword;
+ if (Keyword == ETK_None) CanonKeyword = ETK_Typename;
+
+ bool AnyNonCanonArgs = false;
+ SmallVector<TemplateArgument, 16> CanonArgs(NumArgs);
+ for (unsigned I = 0; I != NumArgs; ++I) {
+ CanonArgs[I] = getCanonicalTemplateArgument(Args[I]);
+ if (!CanonArgs[I].structurallyEquals(Args[I]))
+ AnyNonCanonArgs = true;
+ }
+
+ QualType Canon;
+ if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) {
+ Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS,
+ Name, NumArgs,
+ CanonArgs.data());
+
+ // Find the insert position again.
+ DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
+ }
+
+ void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) +
+ sizeof(TemplateArgument) * NumArgs),
+ TypeAlignment);
+ T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS,
+ Name, NumArgs, Args, Canon);
+ Types.push_back(T);
+ DependentTemplateSpecializationTypes.InsertNode(T, InsertPos);
+ return QualType(T, 0);
+}
+
+QualType ASTContext::getPackExpansionType(QualType Pattern,
+ Optional<unsigned> NumExpansions) {
+ llvm::FoldingSetNodeID ID;
+ PackExpansionType::Profile(ID, Pattern, NumExpansions);
+
+ assert(Pattern->containsUnexpandedParameterPack() &&
+ "Pack expansions must expand one or more parameter packs");
+ void *InsertPos = nullptr;
+ PackExpansionType *T
+ = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (T)
+ return QualType(T, 0);
+
+ QualType Canon;
+ if (!Pattern.isCanonical()) {
+ Canon = getCanonicalType(Pattern);
+ // The canonical type might not contain an unexpanded parameter pack, if it
+ // contains an alias template specialization which ignores one of its
+ // parameters.
+ if (Canon->containsUnexpandedParameterPack()) {
+ Canon = getPackExpansionType(Canon, NumExpansions);
+
+ // Find the insert position again, in case we inserted an element into
+ // PackExpansionTypes and invalidated our insert position.
+ PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
+ }
+ }
+
+ T = new (*this, TypeAlignment)
+ PackExpansionType(Pattern, Canon, NumExpansions);
+ Types.push_back(T);
+ PackExpansionTypes.InsertNode(T, InsertPos);
+ return QualType(T, 0);
+}
+
+/// CmpProtocolNames - Comparison predicate for sorting protocols
+/// alphabetically.
+static int CmpProtocolNames(ObjCProtocolDecl *const *LHS,
+ ObjCProtocolDecl *const *RHS) {
+ return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName());
+}
+
+static bool areSortedAndUniqued(ArrayRef<ObjCProtocolDecl *> Protocols) {
+ if (Protocols.empty()) return true;
+
+ if (Protocols[0]->getCanonicalDecl() != Protocols[0])
+ return false;
+
+ for (unsigned i = 1; i != Protocols.size(); ++i)
+ if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 ||
+ Protocols[i]->getCanonicalDecl() != Protocols[i])
+ return false;
+ return true;
+}
+
+static void
+SortAndUniqueProtocols(SmallVectorImpl<ObjCProtocolDecl *> &Protocols) {
+ // Sort protocols, keyed by name.
+ llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames);
+
+ // Canonicalize.
+ for (ObjCProtocolDecl *&P : Protocols)
+ P = P->getCanonicalDecl();
+
+ // Remove duplicates.
+ auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end());
+ Protocols.erase(ProtocolsEnd, Protocols.end());
+}
+
+QualType ASTContext::getObjCObjectType(QualType BaseType,
+ ObjCProtocolDecl * const *Protocols,
+ unsigned NumProtocols) const {
+ return getObjCObjectType(BaseType, { },
+ llvm::makeArrayRef(Protocols, NumProtocols),
+ /*isKindOf=*/false);
+}
+
+QualType ASTContext::getObjCObjectType(
+ QualType baseType,
+ ArrayRef<QualType> typeArgs,
+ ArrayRef<ObjCProtocolDecl *> protocols,
+ bool isKindOf) const {
+ // If the base type is an interface and there aren't any protocols or
+ // type arguments to add, then the interface type will do just fine.
+ if (typeArgs.empty() && protocols.empty() && !isKindOf &&
+ isa<ObjCInterfaceType>(baseType))
+ return baseType;
+
+ // Look in the folding set for an existing type.
+ llvm::FoldingSetNodeID ID;
+ ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf);
+ void *InsertPos = nullptr;
+ if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(QT, 0);
+
+ // Determine the type arguments to be used for canonicalization,
+ // which may be explicitly specified here or written on the base
+ // type.
+ ArrayRef<QualType> effectiveTypeArgs = typeArgs;
+ if (effectiveTypeArgs.empty()) {
+ if (auto baseObject = baseType->getAs<ObjCObjectType>())
+ effectiveTypeArgs = baseObject->getTypeArgs();
+ }
+
+ // Build the canonical type, which has the canonical base type and a
+ // sorted-and-uniqued list of protocols and the type arguments
+ // canonicalized.
+ QualType canonical;
+ bool typeArgsAreCanonical = std::all_of(effectiveTypeArgs.begin(),
+ effectiveTypeArgs.end(),
+ [&](QualType type) {
+ return type.isCanonical();
+ });
+ bool protocolsSorted = areSortedAndUniqued(protocols);
+ if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) {
+ // Determine the canonical type arguments.
+ ArrayRef<QualType> canonTypeArgs;
+ SmallVector<QualType, 4> canonTypeArgsVec;
+ if (!typeArgsAreCanonical) {
+ canonTypeArgsVec.reserve(effectiveTypeArgs.size());
+ for (auto typeArg : effectiveTypeArgs)
+ canonTypeArgsVec.push_back(getCanonicalType(typeArg));
+ canonTypeArgs = canonTypeArgsVec;
+ } else {
+ canonTypeArgs = effectiveTypeArgs;
+ }
+
+ ArrayRef<ObjCProtocolDecl *> canonProtocols;
+ SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec;
+ if (!protocolsSorted) {
+ canonProtocolsVec.append(protocols.begin(), protocols.end());
+ SortAndUniqueProtocols(canonProtocolsVec);
+ canonProtocols = canonProtocolsVec;
+ } else {
+ canonProtocols = protocols;
+ }
+
+ canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs,
+ canonProtocols, isKindOf);
+
+ // Regenerate InsertPos.
+ ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos);
+ }
+
+ unsigned size = sizeof(ObjCObjectTypeImpl);
+ size += typeArgs.size() * sizeof(QualType);
+ size += protocols.size() * sizeof(ObjCProtocolDecl *);
+ void *mem = Allocate(size, TypeAlignment);
+ ObjCObjectTypeImpl *T =
+ new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols,
+ isKindOf);
+
+ Types.push_back(T);
+ ObjCObjectTypes.InsertNode(T, InsertPos);
+ return QualType(T, 0);
+}
+
+/// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's
+/// protocol list adopt all protocols in QT's qualified-id protocol
+/// list.
+bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT,
+ ObjCInterfaceDecl *IC) {
+ if (!QT->isObjCQualifiedIdType())
+ return false;
+
+ if (const ObjCObjectPointerType *OPT = QT->getAs<ObjCObjectPointerType>()) {
+ // If both the right and left sides have qualifiers.
+ for (auto *Proto : OPT->quals()) {
+ if (!IC->ClassImplementsProtocol(Proto, false))
+ return false;
+ }
+ return true;
+ }
+ return false;
+}
+
+/// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in
+/// QT's qualified-id protocol list adopt all protocols in IDecl's list
+/// of protocols.
+bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT,
+ ObjCInterfaceDecl *IDecl) {
+ if (!QT->isObjCQualifiedIdType())
+ return false;
+ const ObjCObjectPointerType *OPT = QT->getAs<ObjCObjectPointerType>();
+ if (!OPT)
+ return false;
+ if (!IDecl->hasDefinition())
+ return false;
+ llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols;
+ CollectInheritedProtocols(IDecl, InheritedProtocols);
+ if (InheritedProtocols.empty())
+ return false;
+ // Check that if every protocol in list of id<plist> conforms to a protcol
+ // of IDecl's, then bridge casting is ok.
+ bool Conforms = false;
+ for (auto *Proto : OPT->quals()) {
+ Conforms = false;
+ for (auto *PI : InheritedProtocols) {
+ if (ProtocolCompatibleWithProtocol(Proto, PI)) {
+ Conforms = true;
+ break;
+ }
+ }
+ if (!Conforms)
+ break;
+ }
+ if (Conforms)
+ return true;
+
+ for (auto *PI : InheritedProtocols) {
+ // If both the right and left sides have qualifiers.
+ bool Adopts = false;
+ for (auto *Proto : OPT->quals()) {
+ // return 'true' if 'PI' is in the inheritance hierarchy of Proto
+ if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto)))
+ break;
+ }
+ if (!Adopts)
+ return false;
+ }
+ return true;
+}
+
+/// getObjCObjectPointerType - Return a ObjCObjectPointerType type for
+/// the given object type.
+QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const {
+ llvm::FoldingSetNodeID ID;
+ ObjCObjectPointerType::Profile(ID, ObjectT);
+
+ void *InsertPos = nullptr;
+ if (ObjCObjectPointerType *QT =
+ ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(QT, 0);
+
+ // Find the canonical object type.
+ QualType Canonical;
+ if (!ObjectT.isCanonical()) {
+ Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT));
+
+ // Regenerate InsertPos.
+ ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
+ }
+
+ // No match.
+ void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment);
+ ObjCObjectPointerType *QType =
+ new (Mem) ObjCObjectPointerType(Canonical, ObjectT);
+
+ Types.push_back(QType);
+ ObjCObjectPointerTypes.InsertNode(QType, InsertPos);
+ return QualType(QType, 0);
+}
+
+/// getObjCInterfaceType - Return the unique reference to the type for the
+/// specified ObjC interface decl. The list of protocols is optional.
+QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl,
+ ObjCInterfaceDecl *PrevDecl) const {
+ if (Decl->TypeForDecl)
+ return QualType(Decl->TypeForDecl, 0);
+
+ if (PrevDecl) {
+ assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl");
+ Decl->TypeForDecl = PrevDecl->TypeForDecl;
+ return QualType(PrevDecl->TypeForDecl, 0);
+ }
+
+ // Prefer the definition, if there is one.
+ if (const ObjCInterfaceDecl *Def = Decl->getDefinition())
+ Decl = Def;
+
+ void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment);
+ ObjCInterfaceType *T = new (Mem) ObjCInterfaceType(Decl);
+ Decl->TypeForDecl = T;
+ Types.push_back(T);
+ return QualType(T, 0);
+}
+
+/// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique
+/// TypeOfExprType AST's (since expression's are never shared). For example,
+/// multiple declarations that refer to "typeof(x)" all contain different
+/// DeclRefExpr's. This doesn't effect the type checker, since it operates
+/// on canonical type's (which are always unique).
+QualType ASTContext::getTypeOfExprType(Expr *tofExpr) const {
+ TypeOfExprType *toe;
+ if (tofExpr->isTypeDependent()) {
+ llvm::FoldingSetNodeID ID;
+ DependentTypeOfExprType::Profile(ID, *this, tofExpr);
+
+ void *InsertPos = nullptr;
+ DependentTypeOfExprType *Canon
+ = DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (Canon) {
+ // We already have a "canonical" version of an identical, dependent
+ // typeof(expr) type. Use that as our canonical type.
+ toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr,
+ QualType((TypeOfExprType*)Canon, 0));
+ } else {
+ // Build a new, canonical typeof(expr) type.
+ Canon
+ = new (*this, TypeAlignment) DependentTypeOfExprType(*this, tofExpr);
+ DependentTypeOfExprTypes.InsertNode(Canon, InsertPos);
+ toe = Canon;
+ }
+ } else {
+ QualType Canonical = getCanonicalType(tofExpr->getType());
+ toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, Canonical);
+ }
+ Types.push_back(toe);
+ return QualType(toe, 0);
+}
+
+/// getTypeOfType - Unlike many "get<Type>" functions, we don't unique
+/// TypeOfType nodes. The only motivation to unique these nodes would be
+/// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be
+/// an issue. This doesn't affect the type checker, since it operates
+/// on canonical types (which are always unique).
+QualType ASTContext::getTypeOfType(QualType tofType) const {
+ QualType Canonical = getCanonicalType(tofType);
+ TypeOfType *tot = new (*this, TypeAlignment) TypeOfType(tofType, Canonical);
+ Types.push_back(tot);
+ return QualType(tot, 0);
+}
+
+/// \brief Unlike many "get<Type>" functions, we don't unique DecltypeType
+/// nodes. This would never be helpful, since each such type has its own
+/// expression, and would not give a significant memory saving, since there
+/// is an Expr tree under each such type.
+QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const {
+ DecltypeType *dt;
+
+ // C++11 [temp.type]p2:
+ // If an expression e involves a template parameter, decltype(e) denotes a
+ // unique dependent type. Two such decltype-specifiers refer to the same
+ // type only if their expressions are equivalent (14.5.6.1).
+ if (e->isInstantiationDependent()) {
+ llvm::FoldingSetNodeID ID;
+ DependentDecltypeType::Profile(ID, *this, e);
+
+ void *InsertPos = nullptr;
+ DependentDecltypeType *Canon
+ = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (!Canon) {
+ // Build a new, canonical typeof(expr) type.
+ Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e);
+ DependentDecltypeTypes.InsertNode(Canon, InsertPos);
+ }
+ dt = new (*this, TypeAlignment)
+ DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0));
+ } else {
+ dt = new (*this, TypeAlignment)
+ DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType));
+ }
+ Types.push_back(dt);
+ return QualType(dt, 0);
+}
+
+/// getUnaryTransformationType - We don't unique these, since the memory
+/// savings are minimal and these are rare.
+QualType ASTContext::getUnaryTransformType(QualType BaseType,
+ QualType UnderlyingType,
+ UnaryTransformType::UTTKind Kind)
+ const {
+ UnaryTransformType *Ty =
+ new (*this, TypeAlignment) UnaryTransformType (BaseType, UnderlyingType,
+ Kind,
+ UnderlyingType->isDependentType() ?
+ QualType() : getCanonicalType(UnderlyingType));
+ Types.push_back(Ty);
+ return QualType(Ty, 0);
+}
+
+/// getAutoType - Return the uniqued reference to the 'auto' type which has been
+/// deduced to the given type, or to the canonical undeduced 'auto' type, or the
+/// canonical deduced-but-dependent 'auto' type.
+QualType ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword,
+ bool IsDependent) const {
+ if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && !IsDependent)
+ return getAutoDeductType();
+
+ // Look in the folding set for an existing type.
+ void *InsertPos = nullptr;
+ llvm::FoldingSetNodeID ID;
+ AutoType::Profile(ID, DeducedType, Keyword, IsDependent);
+ if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(AT, 0);
+
+ AutoType *AT = new (*this, TypeAlignment) AutoType(DeducedType,
+ Keyword,
+ IsDependent);
+ Types.push_back(AT);
+ if (InsertPos)
+ AutoTypes.InsertNode(AT, InsertPos);
+ return QualType(AT, 0);
+}
+
+/// getAtomicType - Return the uniqued reference to the atomic type for
+/// the given value type.
+QualType ASTContext::getAtomicType(QualType T) const {
+ // Unique pointers, to guarantee there is only one pointer of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ AtomicType::Profile(ID, T);
+
+ void *InsertPos = nullptr;
+ if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(AT, 0);
+
+ // If the atomic value type isn't canonical, this won't be a canonical type
+ // either, so fill in the canonical type field.
+ QualType Canonical;
+ if (!T.isCanonical()) {
+ Canonical = getAtomicType(getCanonicalType(T));
+
+ // Get the new insert position for the node we care about.
+ AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
+ }
+ AtomicType *New = new (*this, TypeAlignment) AtomicType(T, Canonical);
+ Types.push_back(New);
+ AtomicTypes.InsertNode(New, InsertPos);
+ return QualType(New, 0);
+}
+
+/// getAutoDeductType - Get type pattern for deducing against 'auto'.
+QualType ASTContext::getAutoDeductType() const {
+ if (AutoDeductTy.isNull())
+ AutoDeductTy = QualType(
+ new (*this, TypeAlignment) AutoType(QualType(), AutoTypeKeyword::Auto,
+ /*dependent*/false),
+ 0);
+ return AutoDeductTy;
+}
+
+/// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'.
+QualType ASTContext::getAutoRRefDeductType() const {
+ if (AutoRRefDeductTy.isNull())
+ AutoRRefDeductTy = getRValueReferenceType(getAutoDeductType());
+ assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern");
+ return AutoRRefDeductTy;
+}
+
+/// getTagDeclType - Return the unique reference to the type for the
+/// specified TagDecl (struct/union/class/enum) decl.
+QualType ASTContext::getTagDeclType(const TagDecl *Decl) const {
+ assert (Decl);
+ // FIXME: What is the design on getTagDeclType when it requires casting
+ // away const? mutable?
+ return getTypeDeclType(const_cast<TagDecl*>(Decl));
+}
+
+/// getSizeType - Return the unique type for "size_t" (C99 7.17), the result
+/// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and
+/// needs to agree with the definition in <stddef.h>.
+CanQualType ASTContext::getSizeType() const {
+ return getFromTargetType(Target->getSizeType());
+}
+
+/// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5).
+CanQualType ASTContext::getIntMaxType() const {
+ return getFromTargetType(Target->getIntMaxType());
+}
+
+/// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5).
+CanQualType ASTContext::getUIntMaxType() const {
+ return getFromTargetType(Target->getUIntMaxType());
+}
+
+/// getSignedWCharType - Return the type of "signed wchar_t".
+/// Used when in C++, as a GCC extension.
+QualType ASTContext::getSignedWCharType() const {
+ // FIXME: derive from "Target" ?
+ return WCharTy;
+}
+
+/// getUnsignedWCharType - Return the type of "unsigned wchar_t".
+/// Used when in C++, as a GCC extension.
+QualType ASTContext::getUnsignedWCharType() const {
+ // FIXME: derive from "Target" ?
+ return UnsignedIntTy;
+}
+
+QualType ASTContext::getIntPtrType() const {
+ return getFromTargetType(Target->getIntPtrType());
+}
+
+QualType ASTContext::getUIntPtrType() const {
+ return getCorrespondingUnsignedType(getIntPtrType());
+}
+
+/// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17)
+/// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
+QualType ASTContext::getPointerDiffType() const {
+ return getFromTargetType(Target->getPtrDiffType(0));
+}
+
+/// \brief Return the unique type for "pid_t" defined in
+/// <sys/types.h>. We need this to compute the correct type for vfork().
+QualType ASTContext::getProcessIDType() const {
+ return getFromTargetType(Target->getProcessIDType());
+}
+
+//===----------------------------------------------------------------------===//
+// Type Operators
+//===----------------------------------------------------------------------===//
+
+CanQualType ASTContext::getCanonicalParamType(QualType T) const {
+ // Push qualifiers into arrays, and then discard any remaining
+ // qualifiers.
+ T = getCanonicalType(T);
+ T = getVariableArrayDecayedType(T);
+ const Type *Ty = T.getTypePtr();
+ QualType Result;
+ if (isa<ArrayType>(Ty)) {
+ Result = getArrayDecayedType(QualType(Ty,0));
+ } else if (isa<FunctionType>(Ty)) {
+ Result = getPointerType(QualType(Ty, 0));
+ } else {
+ Result = QualType(Ty, 0);
+ }
+
+ return CanQualType::CreateUnsafe(Result);
+}
+
+QualType ASTContext::getUnqualifiedArrayType(QualType type,
+ Qualifiers &quals) {
+ SplitQualType splitType = type.getSplitUnqualifiedType();
+
+ // FIXME: getSplitUnqualifiedType() actually walks all the way to
+ // the unqualified desugared type and then drops it on the floor.
+ // We then have to strip that sugar back off with
+ // getUnqualifiedDesugaredType(), which is silly.
+ const ArrayType *AT =
+ dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType());
+
+ // If we don't have an array, just use the results in splitType.
+ if (!AT) {
+ quals = splitType.Quals;
+ return QualType(splitType.Ty, 0);
+ }
+
+ // Otherwise, recurse on the array's element type.
+ QualType elementType = AT->getElementType();
+ QualType unqualElementType = getUnqualifiedArrayType(elementType, quals);
+
+ // If that didn't change the element type, AT has no qualifiers, so we
+ // can just use the results in splitType.
+ if (elementType == unqualElementType) {
+ assert(quals.empty()); // from the recursive call
+ quals = splitType.Quals;
+ return QualType(splitType.Ty, 0);
+ }
+
+ // Otherwise, add in the qualifiers from the outermost type, then
+ // build the type back up.
+ quals.addConsistentQualifiers(splitType.Quals);
+
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) {
+ return getConstantArrayType(unqualElementType, CAT->getSize(),
+ CAT->getSizeModifier(), 0);
+ }
+
+ if (const IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(AT)) {
+ return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0);
+ }
+
+ if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(AT)) {
+ return getVariableArrayType(unqualElementType,
+ VAT->getSizeExpr(),
+ VAT->getSizeModifier(),
+ VAT->getIndexTypeCVRQualifiers(),
+ VAT->getBracketsRange());
+ }
+
+ const DependentSizedArrayType *DSAT = cast<DependentSizedArrayType>(AT);
+ return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(),
+ DSAT->getSizeModifier(), 0,
+ SourceRange());
+}
+
+/// UnwrapSimilarPointerTypes - If T1 and T2 are pointer types that
+/// may be similar (C++ 4.4), replaces T1 and T2 with the type that
+/// they point to and return true. If T1 and T2 aren't pointer types
+/// or pointer-to-member types, or if they are not similar at this
+/// level, returns false and leaves T1 and T2 unchanged. Top-level
+/// qualifiers on T1 and T2 are ignored. This function will typically
+/// be called in a loop that successively "unwraps" pointer and
+/// pointer-to-member types to compare them at each level.
+bool ASTContext::UnwrapSimilarPointerTypes(QualType &T1, QualType &T2) {
+ const PointerType *T1PtrType = T1->getAs<PointerType>(),
+ *T2PtrType = T2->getAs<PointerType>();
+ if (T1PtrType && T2PtrType) {
+ T1 = T1PtrType->getPointeeType();
+ T2 = T2PtrType->getPointeeType();
+ return true;
+ }
+
+ const MemberPointerType *T1MPType = T1->getAs<MemberPointerType>(),
+ *T2MPType = T2->getAs<MemberPointerType>();
+ if (T1MPType && T2MPType &&
+ hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0),
+ QualType(T2MPType->getClass(), 0))) {
+ T1 = T1MPType->getPointeeType();
+ T2 = T2MPType->getPointeeType();
+ return true;
+ }
+
+ if (getLangOpts().ObjC1) {
+ const ObjCObjectPointerType *T1OPType = T1->getAs<ObjCObjectPointerType>(),
+ *T2OPType = T2->getAs<ObjCObjectPointerType>();
+ if (T1OPType && T2OPType) {
+ T1 = T1OPType->getPointeeType();
+ T2 = T2OPType->getPointeeType();
+ return true;
+ }
+ }
+
+ // FIXME: Block pointers, too?
+
+ return false;
+}
+
+DeclarationNameInfo
+ASTContext::getNameForTemplate(TemplateName Name,
+ SourceLocation NameLoc) const {
+ switch (Name.getKind()) {
+ case TemplateName::QualifiedTemplate:
+ case TemplateName::Template:
+ // DNInfo work in progress: CHECKME: what about DNLoc?
+ return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(),
+ NameLoc);
+
+ case TemplateName::OverloadedTemplate: {
+ OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate();
+ // DNInfo work in progress: CHECKME: what about DNLoc?
+ return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc);
+ }
+
+ case TemplateName::DependentTemplate: {
+ DependentTemplateName *DTN = Name.getAsDependentTemplateName();
+ DeclarationName DName;
+ if (DTN->isIdentifier()) {
+ DName = DeclarationNames.getIdentifier(DTN->getIdentifier());
+ return DeclarationNameInfo(DName, NameLoc);
+ } else {
+ DName = DeclarationNames.getCXXOperatorName(DTN->getOperator());
+ // DNInfo work in progress: FIXME: source locations?
+ DeclarationNameLoc DNLoc;
+ DNLoc.CXXOperatorName.BeginOpNameLoc = SourceLocation().getRawEncoding();
+ DNLoc.CXXOperatorName.EndOpNameLoc = SourceLocation().getRawEncoding();
+ return DeclarationNameInfo(DName, NameLoc, DNLoc);
+ }
+ }
+
+ case TemplateName::SubstTemplateTemplateParm: {
+ SubstTemplateTemplateParmStorage *subst
+ = Name.getAsSubstTemplateTemplateParm();
+ return DeclarationNameInfo(subst->getParameter()->getDeclName(),
+ NameLoc);
+ }
+
+ case TemplateName::SubstTemplateTemplateParmPack: {
+ SubstTemplateTemplateParmPackStorage *subst
+ = Name.getAsSubstTemplateTemplateParmPack();
+ return DeclarationNameInfo(subst->getParameterPack()->getDeclName(),
+ NameLoc);
+ }
+ }
+
+ llvm_unreachable("bad template name kind!");
+}
+
+TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) const {
+ switch (Name.getKind()) {
+ case TemplateName::QualifiedTemplate:
+ case TemplateName::Template: {
+ TemplateDecl *Template = Name.getAsTemplateDecl();
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(Template))
+ Template = getCanonicalTemplateTemplateParmDecl(TTP);
+
+ // The canonical template name is the canonical template declaration.
+ return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl()));
+ }
+
+ case TemplateName::OverloadedTemplate:
+ llvm_unreachable("cannot canonicalize overloaded template");
+
+ case TemplateName::DependentTemplate: {
+ DependentTemplateName *DTN = Name.getAsDependentTemplateName();
+ assert(DTN && "Non-dependent template names must refer to template decls.");
+ return DTN->CanonicalTemplateName;
+ }
+
+ case TemplateName::SubstTemplateTemplateParm: {
+ SubstTemplateTemplateParmStorage *subst
+ = Name.getAsSubstTemplateTemplateParm();
+ return getCanonicalTemplateName(subst->getReplacement());
+ }
+
+ case TemplateName::SubstTemplateTemplateParmPack: {
+ SubstTemplateTemplateParmPackStorage *subst
+ = Name.getAsSubstTemplateTemplateParmPack();
+ TemplateTemplateParmDecl *canonParameter
+ = getCanonicalTemplateTemplateParmDecl(subst->getParameterPack());
+ TemplateArgument canonArgPack
+ = getCanonicalTemplateArgument(subst->getArgumentPack());
+ return getSubstTemplateTemplateParmPack(canonParameter, canonArgPack);
+ }
+ }
+
+ llvm_unreachable("bad template name!");
+}
+
+bool ASTContext::hasSameTemplateName(TemplateName X, TemplateName Y) {
+ X = getCanonicalTemplateName(X);
+ Y = getCanonicalTemplateName(Y);
+ return X.getAsVoidPointer() == Y.getAsVoidPointer();
+}
+
+TemplateArgument
+ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const {
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ return Arg;
+
+ case TemplateArgument::Expression:
+ return Arg;
+
+ case TemplateArgument::Declaration: {
+ ValueDecl *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl());
+ return TemplateArgument(D, Arg.getParamTypeForDecl());
+ }
+
+ case TemplateArgument::NullPtr:
+ return TemplateArgument(getCanonicalType(Arg.getNullPtrType()),
+ /*isNullPtr*/true);
+
+ case TemplateArgument::Template:
+ return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate()));
+
+ case TemplateArgument::TemplateExpansion:
+ return TemplateArgument(getCanonicalTemplateName(
+ Arg.getAsTemplateOrTemplatePattern()),
+ Arg.getNumTemplateExpansions());
+
+ case TemplateArgument::Integral:
+ return TemplateArgument(Arg, getCanonicalType(Arg.getIntegralType()));
+
+ case TemplateArgument::Type:
+ return TemplateArgument(getCanonicalType(Arg.getAsType()));
+
+ case TemplateArgument::Pack: {
+ if (Arg.pack_size() == 0)
+ return Arg;
+
+ TemplateArgument *CanonArgs
+ = new (*this) TemplateArgument[Arg.pack_size()];
+ unsigned Idx = 0;
+ for (TemplateArgument::pack_iterator A = Arg.pack_begin(),
+ AEnd = Arg.pack_end();
+ A != AEnd; (void)++A, ++Idx)
+ CanonArgs[Idx] = getCanonicalTemplateArgument(*A);
+
+ return TemplateArgument(llvm::makeArrayRef(CanonArgs, Arg.pack_size()));
+ }
+ }
+
+ // Silence GCC warning
+ llvm_unreachable("Unhandled template argument kind");
+}
+
+NestedNameSpecifier *
+ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const {
+ if (!NNS)
+ return nullptr;
+
+ switch (NNS->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ // Canonicalize the prefix but keep the identifier the same.
+ return NestedNameSpecifier::Create(*this,
+ getCanonicalNestedNameSpecifier(NNS->getPrefix()),
+ NNS->getAsIdentifier());
+
+ case NestedNameSpecifier::Namespace:
+ // A namespace is canonical; build a nested-name-specifier with
+ // this namespace and no prefix.
+ return NestedNameSpecifier::Create(*this, nullptr,
+ NNS->getAsNamespace()->getOriginalNamespace());
+
+ case NestedNameSpecifier::NamespaceAlias:
+ // A namespace is canonical; build a nested-name-specifier with
+ // this namespace and no prefix.
+ return NestedNameSpecifier::Create(*this, nullptr,
+ NNS->getAsNamespaceAlias()->getNamespace()
+ ->getOriginalNamespace());
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate: {
+ QualType T = getCanonicalType(QualType(NNS->getAsType(), 0));
+
+ // If we have some kind of dependent-named type (e.g., "typename T::type"),
+ // break it apart into its prefix and identifier, then reconsititute those
+ // as the canonical nested-name-specifier. This is required to canonicalize
+ // a dependent nested-name-specifier involving typedefs of dependent-name
+ // types, e.g.,
+ // typedef typename T::type T1;
+ // typedef typename T1::type T2;
+ if (const DependentNameType *DNT = T->getAs<DependentNameType>())
+ return NestedNameSpecifier::Create(*this, DNT->getQualifier(),
+ const_cast<IdentifierInfo *>(DNT->getIdentifier()));
+
+ // Otherwise, just canonicalize the type, and force it to be a TypeSpec.
+ // FIXME: Why are TypeSpec and TypeSpecWithTemplate distinct in the
+ // first place?
+ return NestedNameSpecifier::Create(*this, nullptr, false,
+ const_cast<Type *>(T.getTypePtr()));
+ }
+
+ case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Super:
+ // The global specifier and __super specifer are canonical and unique.
+ return NNS;
+ }
+
+ llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
+}
+
+const ArrayType *ASTContext::getAsArrayType(QualType T) const {
+ // Handle the non-qualified case efficiently.
+ if (!T.hasLocalQualifiers()) {
+ // Handle the common positive case fast.
+ if (const ArrayType *AT = dyn_cast<ArrayType>(T))
+ return AT;
+ }
+
+ // Handle the common negative case fast.
+ if (!isa<ArrayType>(T.getCanonicalType()))
+ return nullptr;
+
+ // Apply any qualifiers from the array type to the element type. This
+ // implements C99 6.7.3p8: "If the specification of an array type includes
+ // any type qualifiers, the element type is so qualified, not the array type."
+
+ // If we get here, we either have type qualifiers on the type, or we have
+ // sugar such as a typedef in the way. If we have type qualifiers on the type
+ // we must propagate them down into the element type.
+
+ SplitQualType split = T.getSplitDesugaredType();
+ Qualifiers qs = split.Quals;
+
+ // If we have a simple case, just return now.
+ const ArrayType *ATy = dyn_cast<ArrayType>(split.Ty);
+ if (!ATy || qs.empty())
+ return ATy;
+
+ // Otherwise, we have an array and we have qualifiers on it. Push the
+ // qualifiers into the array element type and return a new array type.
+ QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs);
+
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(ATy))
+ return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(),
+ CAT->getSizeModifier(),
+ CAT->getIndexTypeCVRQualifiers()));
+ if (const IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(ATy))
+ return cast<ArrayType>(getIncompleteArrayType(NewEltTy,
+ IAT->getSizeModifier(),
+ IAT->getIndexTypeCVRQualifiers()));
+
+ if (const DependentSizedArrayType *DSAT
+ = dyn_cast<DependentSizedArrayType>(ATy))
+ return cast<ArrayType>(
+ getDependentSizedArrayType(NewEltTy,
+ DSAT->getSizeExpr(),
+ DSAT->getSizeModifier(),
+ DSAT->getIndexTypeCVRQualifiers(),
+ DSAT->getBracketsRange()));
+
+ const VariableArrayType *VAT = cast<VariableArrayType>(ATy);
+ return cast<ArrayType>(getVariableArrayType(NewEltTy,
+ VAT->getSizeExpr(),
+ VAT->getSizeModifier(),
+ VAT->getIndexTypeCVRQualifiers(),
+ VAT->getBracketsRange()));
+}
+
+QualType ASTContext::getAdjustedParameterType(QualType T) const {
+ if (T->isArrayType() || T->isFunctionType())
+ return getDecayedType(T);
+ return T;
+}
+
+QualType ASTContext::getSignatureParameterType(QualType T) const {
+ T = getVariableArrayDecayedType(T);
+ T = getAdjustedParameterType(T);
+ return T.getUnqualifiedType();
+}
+
+QualType ASTContext::getExceptionObjectType(QualType T) const {
+ // C++ [except.throw]p3:
+ // A throw-expression initializes a temporary object, called the exception
+ // object, the type of which is determined by removing any top-level
+ // cv-qualifiers from the static type of the operand of throw and adjusting
+ // the type from "array of T" or "function returning T" to "pointer to T"
+ // or "pointer to function returning T", [...]
+ T = getVariableArrayDecayedType(T);
+ if (T->isArrayType() || T->isFunctionType())
+ T = getDecayedType(T);
+ return T.getUnqualifiedType();
+}
+
+/// getArrayDecayedType - Return the properly qualified result of decaying the
+/// specified array type to a pointer. This operation is non-trivial when
+/// handling typedefs etc. The canonical type of "T" must be an array type,
+/// this returns a pointer to a properly qualified element of the array.
+///
+/// See C99 6.7.5.3p7 and C99 6.3.2.1p3.
+QualType ASTContext::getArrayDecayedType(QualType Ty) const {
+ // Get the element type with 'getAsArrayType' so that we don't lose any
+ // typedefs in the element type of the array. This also handles propagation
+ // of type qualifiers from the array type into the element type if present
+ // (C99 6.7.3p8).
+ const ArrayType *PrettyArrayType = getAsArrayType(Ty);
+ assert(PrettyArrayType && "Not an array type!");
+
+ QualType PtrTy = getPointerType(PrettyArrayType->getElementType());
+
+ // int x[restrict 4] -> int *restrict
+ return getQualifiedType(PtrTy, PrettyArrayType->getIndexTypeQualifiers());
+}
+
+QualType ASTContext::getBaseElementType(const ArrayType *array) const {
+ return getBaseElementType(array->getElementType());
+}
+
+QualType ASTContext::getBaseElementType(QualType type) const {
+ Qualifiers qs;
+ while (true) {
+ SplitQualType split = type.getSplitDesugaredType();
+ const ArrayType *array = split.Ty->getAsArrayTypeUnsafe();
+ if (!array) break;
+
+ type = array->getElementType();
+ qs.addConsistentQualifiers(split.Quals);
+ }
+
+ return getQualifiedType(type, qs);
+}
+
+/// getConstantArrayElementCount - Returns number of constant array elements.
+uint64_t
+ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const {
+ uint64_t ElementCount = 1;
+ do {
+ ElementCount *= CA->getSize().getZExtValue();
+ CA = dyn_cast_or_null<ConstantArrayType>(
+ CA->getElementType()->getAsArrayTypeUnsafe());
+ } while (CA);
+ return ElementCount;
+}
+
+/// getFloatingRank - Return a relative rank for floating point types.
+/// This routine will assert if passed a built-in type that isn't a float.
+static FloatingRank getFloatingRank(QualType T) {
+ if (const ComplexType *CT = T->getAs<ComplexType>())
+ return getFloatingRank(CT->getElementType());
+
+ assert(T->getAs<BuiltinType>() && "getFloatingRank(): not a floating type");
+ switch (T->getAs<BuiltinType>()->getKind()) {
+ default: llvm_unreachable("getFloatingRank(): not a floating type");
+ case BuiltinType::Half: return HalfRank;
+ case BuiltinType::Float: return FloatRank;
+ case BuiltinType::Double: return DoubleRank;
+ case BuiltinType::LongDouble: return LongDoubleRank;
+ }
+}
+
+/// getFloatingTypeOfSizeWithinDomain - Returns a real floating
+/// point or a complex type (based on typeDomain/typeSize).
+/// 'typeDomain' is a real floating point or complex type.
+/// 'typeSize' is a real floating point or complex type.
+QualType ASTContext::getFloatingTypeOfSizeWithinDomain(QualType Size,
+ QualType Domain) const {
+ FloatingRank EltRank = getFloatingRank(Size);
+ if (Domain->isComplexType()) {
+ switch (EltRank) {
+ case HalfRank: llvm_unreachable("Complex half is not supported");
+ case FloatRank: return FloatComplexTy;
+ case DoubleRank: return DoubleComplexTy;
+ case LongDoubleRank: return LongDoubleComplexTy;
+ }
+ }
+
+ assert(Domain->isRealFloatingType() && "Unknown domain!");
+ switch (EltRank) {
+ case HalfRank: return HalfTy;
+ case FloatRank: return FloatTy;
+ case DoubleRank: return DoubleTy;
+ case LongDoubleRank: return LongDoubleTy;
+ }
+ llvm_unreachable("getFloatingRank(): illegal value for rank");
+}
+
+/// getFloatingTypeOrder - Compare the rank of the two specified floating
+/// point types, ignoring the domain of the type (i.e. 'double' ==
+/// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If
+/// LHS < RHS, return -1.
+int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const {
+ FloatingRank LHSR = getFloatingRank(LHS);
+ FloatingRank RHSR = getFloatingRank(RHS);
+
+ if (LHSR == RHSR)
+ return 0;
+ if (LHSR > RHSR)
+ return 1;
+ return -1;
+}
+
+/// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This
+/// routine will assert if passed a built-in type that isn't an integer or enum,
+/// or if it is not canonicalized.
+unsigned ASTContext::getIntegerRank(const Type *T) const {
+ assert(T->isCanonicalUnqualified() && "T should be canonicalized");
+
+ switch (cast<BuiltinType>(T)->getKind()) {
+ default: llvm_unreachable("getIntegerRank(): not a built-in integer");
+ case BuiltinType::Bool:
+ return 1 + (getIntWidth(BoolTy) << 3);
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U:
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ return 2 + (getIntWidth(CharTy) << 3);
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ return 3 + (getIntWidth(ShortTy) << 3);
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ return 4 + (getIntWidth(IntTy) << 3);
+ case BuiltinType::Long:
+ case BuiltinType::ULong:
+ return 5 + (getIntWidth(LongTy) << 3);
+ case BuiltinType::LongLong:
+ case BuiltinType::ULongLong:
+ return 6 + (getIntWidth(LongLongTy) << 3);
+ case BuiltinType::Int128:
+ case BuiltinType::UInt128:
+ return 7 + (getIntWidth(Int128Ty) << 3);
+ }
+}
+
+/// \brief Whether this is a promotable bitfield reference according
+/// to C99 6.3.1.1p2, bullet 2 (and GCC extensions).
+///
+/// \returns the type this bit-field will promote to, or NULL if no
+/// promotion occurs.
+QualType ASTContext::isPromotableBitField(Expr *E) const {
+ if (E->isTypeDependent() || E->isValueDependent())
+ return QualType();
+
+ // FIXME: We should not do this unless E->refersToBitField() is true. This
+ // matters in C where getSourceBitField() will find bit-fields for various
+ // cases where the source expression is not a bit-field designator.
+
+ FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields?
+ if (!Field)
+ return QualType();
+
+ QualType FT = Field->getType();
+
+ uint64_t BitWidth = Field->getBitWidthValue(*this);
+ uint64_t IntSize = getTypeSize(IntTy);
+ // C++ [conv.prom]p5:
+ // A prvalue for an integral bit-field can be converted to a prvalue of type
+ // int if int can represent all the values of the bit-field; otherwise, it
+ // can be converted to unsigned int if unsigned int can represent all the
+ // values of the bit-field. If the bit-field is larger yet, no integral
+ // promotion applies to it.
+ // C11 6.3.1.1/2:
+ // [For a bit-field of type _Bool, int, signed int, or unsigned int:]
+ // If an int can represent all values of the original type (as restricted by
+ // the width, for a bit-field), the value is converted to an int; otherwise,
+ // it is converted to an unsigned int.
+ //
+ // FIXME: C does not permit promotion of a 'long : 3' bitfield to int.
+ // We perform that promotion here to match GCC and C++.
+ if (BitWidth < IntSize)
+ return IntTy;
+
+ if (BitWidth == IntSize)
+ return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy;
+
+ // Types bigger than int are not subject to promotions, and therefore act
+ // like the base type. GCC has some weird bugs in this area that we
+ // deliberately do not follow (GCC follows a pre-standard resolution to
+ // C's DR315 which treats bit-width as being part of the type, and this leaks
+ // into their semantics in some cases).
+ return QualType();
+}
+
+/// getPromotedIntegerType - Returns the type that Promotable will
+/// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable
+/// integer type.
+QualType ASTContext::getPromotedIntegerType(QualType Promotable) const {
+ assert(!Promotable.isNull());
+ assert(Promotable->isPromotableIntegerType());
+ if (const EnumType *ET = Promotable->getAs<EnumType>())
+ return ET->getDecl()->getPromotionType();
+
+ if (const BuiltinType *BT = Promotable->getAs<BuiltinType>()) {
+ // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t
+ // (3.9.1) can be converted to a prvalue of the first of the following
+ // types that can represent all the values of its underlying type:
+ // int, unsigned int, long int, unsigned long int, long long int, or
+ // unsigned long long int [...]
+ // FIXME: Is there some better way to compute this?
+ if (BT->getKind() == BuiltinType::WChar_S ||
+ BT->getKind() == BuiltinType::WChar_U ||
+ BT->getKind() == BuiltinType::Char16 ||
+ BT->getKind() == BuiltinType::Char32) {
+ bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S;
+ uint64_t FromSize = getTypeSize(BT);
+ QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy,
+ LongLongTy, UnsignedLongLongTy };
+ for (size_t Idx = 0; Idx < llvm::array_lengthof(PromoteTypes); ++Idx) {
+ uint64_t ToSize = getTypeSize(PromoteTypes[Idx]);
+ if (FromSize < ToSize ||
+ (FromSize == ToSize &&
+ FromIsSigned == PromoteTypes[Idx]->isSignedIntegerType()))
+ return PromoteTypes[Idx];
+ }
+ llvm_unreachable("char type should fit into long long");
+ }
+ }
+
+ // At this point, we should have a signed or unsigned integer type.
+ if (Promotable->isSignedIntegerType())
+ return IntTy;
+ uint64_t PromotableSize = getIntWidth(Promotable);
+ uint64_t IntSize = getIntWidth(IntTy);
+ assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize);
+ return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy;
+}
+
+/// \brief Recurses in pointer/array types until it finds an objc retainable
+/// type and returns its ownership.
+Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const {
+ while (!T.isNull()) {
+ if (T.getObjCLifetime() != Qualifiers::OCL_None)
+ return T.getObjCLifetime();
+ if (T->isArrayType())
+ T = getBaseElementType(T);
+ else if (const PointerType *PT = T->getAs<PointerType>())
+ T = PT->getPointeeType();
+ else if (const ReferenceType *RT = T->getAs<ReferenceType>())
+ T = RT->getPointeeType();
+ else
+ break;
+ }
+
+ return Qualifiers::OCL_None;
+}
+
+static const Type *getIntegerTypeForEnum(const EnumType *ET) {
+ // Incomplete enum types are not treated as integer types.
+ // FIXME: In C++, enum types are never integer types.
+ if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped())
+ return ET->getDecl()->getIntegerType().getTypePtr();
+ return nullptr;
+}
+
+/// getIntegerTypeOrder - Returns the highest ranked integer type:
+/// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If
+/// LHS < RHS, return -1.
+int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const {
+ const Type *LHSC = getCanonicalType(LHS).getTypePtr();
+ const Type *RHSC = getCanonicalType(RHS).getTypePtr();
+
+ // Unwrap enums to their underlying type.
+ if (const EnumType *ET = dyn_cast<EnumType>(LHSC))
+ LHSC = getIntegerTypeForEnum(ET);
+ if (const EnumType *ET = dyn_cast<EnumType>(RHSC))
+ RHSC = getIntegerTypeForEnum(ET);
+
+ if (LHSC == RHSC) return 0;
+
+ bool LHSUnsigned = LHSC->isUnsignedIntegerType();
+ bool RHSUnsigned = RHSC->isUnsignedIntegerType();
+
+ unsigned LHSRank = getIntegerRank(LHSC);
+ unsigned RHSRank = getIntegerRank(RHSC);
+
+ if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned.
+ if (LHSRank == RHSRank) return 0;
+ return LHSRank > RHSRank ? 1 : -1;
+ }
+
+ // Otherwise, the LHS is signed and the RHS is unsigned or visa versa.
+ if (LHSUnsigned) {
+ // If the unsigned [LHS] type is larger, return it.
+ if (LHSRank >= RHSRank)
+ return 1;
+
+ // If the signed type can represent all values of the unsigned type, it
+ // wins. Because we are dealing with 2's complement and types that are
+ // powers of two larger than each other, this is always safe.
+ return -1;
+ }
+
+ // If the unsigned [RHS] type is larger, return it.
+ if (RHSRank >= LHSRank)
+ return -1;
+
+ // If the signed type can represent all values of the unsigned type, it
+ // wins. Because we are dealing with 2's complement and types that are
+ // powers of two larger than each other, this is always safe.
+ return 1;
+}
+
+// getCFConstantStringType - Return the type used for constant CFStrings.
+QualType ASTContext::getCFConstantStringType() const {
+ if (!CFConstantStringTypeDecl) {
+ CFConstantStringTypeDecl = buildImplicitRecord("NSConstantString");
+ CFConstantStringTypeDecl->startDefinition();
+
+ QualType FieldTypes[4];
+
+ // const int *isa;
+ FieldTypes[0] = getPointerType(IntTy.withConst());
+ // int flags;
+ FieldTypes[1] = IntTy;
+ // const char *str;
+ FieldTypes[2] = getPointerType(CharTy.withConst());
+ // long length;
+ FieldTypes[3] = LongTy;
+
+ // Create fields
+ for (unsigned i = 0; i < 4; ++i) {
+ FieldDecl *Field = FieldDecl::Create(*this, CFConstantStringTypeDecl,
+ SourceLocation(),
+ SourceLocation(), nullptr,
+ FieldTypes[i], /*TInfo=*/nullptr,
+ /*BitWidth=*/nullptr,
+ /*Mutable=*/false,
+ ICIS_NoInit);
+ Field->setAccess(AS_public);
+ CFConstantStringTypeDecl->addDecl(Field);
+ }
+
+ CFConstantStringTypeDecl->completeDefinition();
+ }
+
+ return getTagDeclType(CFConstantStringTypeDecl);
+}
+
+QualType ASTContext::getObjCSuperType() const {
+ if (ObjCSuperType.isNull()) {
+ RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord("objc_super");
+ TUDecl->addDecl(ObjCSuperTypeDecl);
+ ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl);
+ }
+ return ObjCSuperType;
+}
+
+void ASTContext::setCFConstantStringType(QualType T) {
+ const RecordType *Rec = T->getAs<RecordType>();
+ assert(Rec && "Invalid CFConstantStringType");
+ CFConstantStringTypeDecl = Rec->getDecl();
+}
+
+QualType ASTContext::getBlockDescriptorType() const {
+ if (BlockDescriptorType)
+ return getTagDeclType(BlockDescriptorType);
+
+ RecordDecl *RD;
+ // FIXME: Needs the FlagAppleBlock bit.
+ RD = buildImplicitRecord("__block_descriptor");
+ RD->startDefinition();
+
+ QualType FieldTypes[] = {
+ UnsignedLongTy,
+ UnsignedLongTy,
+ };
+
+ static const char *const FieldNames[] = {
+ "reserved",
+ "Size"
+ };
+
+ for (size_t i = 0; i < 2; ++i) {
+ FieldDecl *Field = FieldDecl::Create(
+ *this, RD, SourceLocation(), SourceLocation(),
+ &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr,
+ /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit);
+ Field->setAccess(AS_public);
+ RD->addDecl(Field);
+ }
+
+ RD->completeDefinition();
+
+ BlockDescriptorType = RD;
+
+ return getTagDeclType(BlockDescriptorType);
+}
+
+QualType ASTContext::getBlockDescriptorExtendedType() const {
+ if (BlockDescriptorExtendedType)
+ return getTagDeclType(BlockDescriptorExtendedType);
+
+ RecordDecl *RD;
+ // FIXME: Needs the FlagAppleBlock bit.
+ RD = buildImplicitRecord("__block_descriptor_withcopydispose");
+ RD->startDefinition();
+
+ QualType FieldTypes[] = {
+ UnsignedLongTy,
+ UnsignedLongTy,
+ getPointerType(VoidPtrTy),
+ getPointerType(VoidPtrTy)
+ };
+
+ static const char *const FieldNames[] = {
+ "reserved",
+ "Size",
+ "CopyFuncPtr",
+ "DestroyFuncPtr"
+ };
+
+ for (size_t i = 0; i < 4; ++i) {
+ FieldDecl *Field = FieldDecl::Create(
+ *this, RD, SourceLocation(), SourceLocation(),
+ &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr,
+ /*BitWidth=*/nullptr,
+ /*Mutable=*/false, ICIS_NoInit);
+ Field->setAccess(AS_public);
+ RD->addDecl(Field);
+ }
+
+ RD->completeDefinition();
+
+ BlockDescriptorExtendedType = RD;
+ return getTagDeclType(BlockDescriptorExtendedType);
+}
+
+/// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty"
+/// requires copy/dispose. Note that this must match the logic
+/// in buildByrefHelpers.
+bool ASTContext::BlockRequiresCopying(QualType Ty,
+ const VarDecl *D) {
+ if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) {
+ const Expr *copyExpr = getBlockVarCopyInits(D);
+ if (!copyExpr && record->hasTrivialDestructor()) return false;
+
+ return true;
+ }
+
+ if (!Ty->isObjCRetainableType()) return false;
+
+ Qualifiers qs = Ty.getQualifiers();
+
+ // If we have lifetime, that dominates.
+ if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) {
+ switch (lifetime) {
+ case Qualifiers::OCL_None: llvm_unreachable("impossible");
+
+ // These are just bits as far as the runtime is concerned.
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ return false;
+
+ // Tell the runtime that this is ARC __weak, called by the
+ // byref routines.
+ case Qualifiers::OCL_Weak:
+ // ARC __strong __block variables need to be retained.
+ case Qualifiers::OCL_Strong:
+ return true;
+ }
+ llvm_unreachable("fell out of lifetime switch!");
+ }
+ return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) ||
+ Ty->isObjCObjectPointerType());
+}
+
+bool ASTContext::getByrefLifetime(QualType Ty,
+ Qualifiers::ObjCLifetime &LifeTime,
+ bool &HasByrefExtendedLayout) const {
+
+ if (!getLangOpts().ObjC1 ||
+ getLangOpts().getGC() != LangOptions::NonGC)
+ return false;
+
+ HasByrefExtendedLayout = false;
+ if (Ty->isRecordType()) {
+ HasByrefExtendedLayout = true;
+ LifeTime = Qualifiers::OCL_None;
+ } else if ((LifeTime = Ty.getObjCLifetime())) {
+ // Honor the ARC qualifiers.
+ } else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) {
+ // The MRR rule.
+ LifeTime = Qualifiers::OCL_ExplicitNone;
+ } else {
+ LifeTime = Qualifiers::OCL_None;
+ }
+ return true;
+}
+
+TypedefDecl *ASTContext::getObjCInstanceTypeDecl() {
+ if (!ObjCInstanceTypeDecl)
+ ObjCInstanceTypeDecl =
+ buildImplicitTypedef(getObjCIdType(), "instancetype");
+ return ObjCInstanceTypeDecl;
+}
+
+// This returns true if a type has been typedefed to BOOL:
+// typedef <type> BOOL;
+static bool isTypeTypedefedAsBOOL(QualType T) {
+ if (const TypedefType *TT = dyn_cast<TypedefType>(T))
+ if (IdentifierInfo *II = TT->getDecl()->getIdentifier())
+ return II->isStr("BOOL");
+
+ return false;
+}
+
+/// getObjCEncodingTypeSize returns size of type for objective-c encoding
+/// purpose.
+CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const {
+ if (!type->isIncompleteArrayType() && type->isIncompleteType())
+ return CharUnits::Zero();
+
+ CharUnits sz = getTypeSizeInChars(type);
+
+ // Make all integer and enum types at least as large as an int
+ if (sz.isPositive() && type->isIntegralOrEnumerationType())
+ sz = std::max(sz, getTypeSizeInChars(IntTy));
+ // Treat arrays as pointers, since that's how they're passed in.
+ else if (type->isArrayType())
+ sz = getTypeSizeInChars(VoidPtrTy);
+ return sz;
+}
+
+bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const {
+ return getTargetInfo().getCXXABI().isMicrosoft() &&
+ VD->isStaticDataMember() &&
+ VD->getType()->isIntegralOrEnumerationType() &&
+ !VD->getFirstDecl()->isOutOfLine() && VD->getFirstDecl()->hasInit();
+}
+
+static inline
+std::string charUnitsToString(const CharUnits &CU) {
+ return llvm::itostr(CU.getQuantity());
+}
+
+/// getObjCEncodingForBlock - Return the encoded type for this block
+/// declaration.
+std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const {
+ std::string S;
+
+ const BlockDecl *Decl = Expr->getBlockDecl();
+ QualType BlockTy =
+ Expr->getType()->getAs<BlockPointerType>()->getPointeeType();
+ // Encode result type.
+ if (getLangOpts().EncodeExtendedBlockSig)
+ getObjCEncodingForMethodParameter(
+ Decl::OBJC_TQ_None, BlockTy->getAs<FunctionType>()->getReturnType(), S,
+ true /*Extended*/);
+ else
+ getObjCEncodingForType(BlockTy->getAs<FunctionType>()->getReturnType(), S);
+ // Compute size of all parameters.
+ // Start with computing size of a pointer in number of bytes.
+ // FIXME: There might(should) be a better way of doing this computation!
+ SourceLocation Loc;
+ CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy);
+ CharUnits ParmOffset = PtrSize;
+ for (auto PI : Decl->params()) {
+ QualType PType = PI->getType();
+ CharUnits sz = getObjCEncodingTypeSize(PType);
+ if (sz.isZero())
+ continue;
+ assert (sz.isPositive() && "BlockExpr - Incomplete param type");
+ ParmOffset += sz;
+ }
+ // Size of the argument frame
+ S += charUnitsToString(ParmOffset);
+ // Block pointer and offset.
+ S += "@?0";
+
+ // Argument types.
+ ParmOffset = PtrSize;
+ for (auto PVDecl : Decl->params()) {
+ QualType PType = PVDecl->getOriginalType();
+ if (const ArrayType *AT =
+ dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
+ // Use array's original type only if it has known number of
+ // elements.
+ if (!isa<ConstantArrayType>(AT))
+ PType = PVDecl->getType();
+ } else if (PType->isFunctionType())
+ PType = PVDecl->getType();
+ if (getLangOpts().EncodeExtendedBlockSig)
+ getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, PType,
+ S, true /*Extended*/);
+ else
+ getObjCEncodingForType(PType, S);
+ S += charUnitsToString(ParmOffset);
+ ParmOffset += getObjCEncodingTypeSize(PType);
+ }
+
+ return S;
+}
+
+bool ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl,
+ std::string& S) {
+ // Encode result type.
+ getObjCEncodingForType(Decl->getReturnType(), S);
+ CharUnits ParmOffset;
+ // Compute size of all parameters.
+ for (auto PI : Decl->params()) {
+ QualType PType = PI->getType();
+ CharUnits sz = getObjCEncodingTypeSize(PType);
+ if (sz.isZero())
+ continue;
+
+ assert (sz.isPositive() &&
+ "getObjCEncodingForFunctionDecl - Incomplete param type");
+ ParmOffset += sz;
+ }
+ S += charUnitsToString(ParmOffset);
+ ParmOffset = CharUnits::Zero();
+
+ // Argument types.
+ for (auto PVDecl : Decl->params()) {
+ QualType PType = PVDecl->getOriginalType();
+ if (const ArrayType *AT =
+ dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
+ // Use array's original type only if it has known number of
+ // elements.
+ if (!isa<ConstantArrayType>(AT))
+ PType = PVDecl->getType();
+ } else if (PType->isFunctionType())
+ PType = PVDecl->getType();
+ getObjCEncodingForType(PType, S);
+ S += charUnitsToString(ParmOffset);
+ ParmOffset += getObjCEncodingTypeSize(PType);
+ }
+
+ return false;
+}
+
+/// getObjCEncodingForMethodParameter - Return the encoded type for a single
+/// method parameter or return type. If Extended, include class names and
+/// block object types.
+void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT,
+ QualType T, std::string& S,
+ bool Extended) const {
+ // Encode type qualifer, 'in', 'inout', etc. for the parameter.
+ getObjCEncodingForTypeQualifier(QT, S);
+ // Encode parameter type.
+ getObjCEncodingForTypeImpl(T, S, true, true, nullptr,
+ true /*OutermostType*/,
+ false /*EncodingProperty*/,
+ false /*StructField*/,
+ Extended /*EncodeBlockParameters*/,
+ Extended /*EncodeClassNames*/);
+}
+
+/// getObjCEncodingForMethodDecl - Return the encoded type for this method
+/// declaration.
+bool ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl,
+ std::string& S,
+ bool Extended) const {
+ // FIXME: This is not very efficient.
+ // Encode return type.
+ getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(),
+ Decl->getReturnType(), S, Extended);
+ // Compute size of all parameters.
+ // Start with computing size of a pointer in number of bytes.
+ // FIXME: There might(should) be a better way of doing this computation!
+ SourceLocation Loc;
+ CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy);
+ // The first two arguments (self and _cmd) are pointers; account for
+ // their size.
+ CharUnits ParmOffset = 2 * PtrSize;
+ for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(),
+ E = Decl->sel_param_end(); PI != E; ++PI) {
+ QualType PType = (*PI)->getType();
+ CharUnits sz = getObjCEncodingTypeSize(PType);
+ if (sz.isZero())
+ continue;
+
+ assert (sz.isPositive() &&
+ "getObjCEncodingForMethodDecl - Incomplete param type");
+ ParmOffset += sz;
+ }
+ S += charUnitsToString(ParmOffset);
+ S += "@0:";
+ S += charUnitsToString(PtrSize);
+
+ // Argument types.
+ ParmOffset = 2 * PtrSize;
+ for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(),
+ E = Decl->sel_param_end(); PI != E; ++PI) {
+ const ParmVarDecl *PVDecl = *PI;
+ QualType PType = PVDecl->getOriginalType();
+ if (const ArrayType *AT =
+ dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
+ // Use array's original type only if it has known number of
+ // elements.
+ if (!isa<ConstantArrayType>(AT))
+ PType = PVDecl->getType();
+ } else if (PType->isFunctionType())
+ PType = PVDecl->getType();
+ getObjCEncodingForMethodParameter(PVDecl->getObjCDeclQualifier(),
+ PType, S, Extended);
+ S += charUnitsToString(ParmOffset);
+ ParmOffset += getObjCEncodingTypeSize(PType);
+ }
+
+ return false;
+}
+
+ObjCPropertyImplDecl *
+ASTContext::getObjCPropertyImplDeclForPropertyDecl(
+ const ObjCPropertyDecl *PD,
+ const Decl *Container) const {
+ if (!Container)
+ return nullptr;
+ if (const ObjCCategoryImplDecl *CID =
+ dyn_cast<ObjCCategoryImplDecl>(Container)) {
+ for (auto *PID : CID->property_impls())
+ if (PID->getPropertyDecl() == PD)
+ return PID;
+ } else {
+ const ObjCImplementationDecl *OID=cast<ObjCImplementationDecl>(Container);
+ for (auto *PID : OID->property_impls())
+ if (PID->getPropertyDecl() == PD)
+ return PID;
+ }
+ return nullptr;
+}
+
+/// getObjCEncodingForPropertyDecl - Return the encoded type for this
+/// property declaration. If non-NULL, Container must be either an
+/// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be
+/// NULL when getting encodings for protocol properties.
+/// Property attributes are stored as a comma-delimited C string. The simple
+/// attributes readonly and bycopy are encoded as single characters. The
+/// parametrized attributes, getter=name, setter=name, and ivar=name, are
+/// encoded as single characters, followed by an identifier. Property types
+/// are also encoded as a parametrized attribute. The characters used to encode
+/// these attributes are defined by the following enumeration:
+/// @code
+/// enum PropertyAttributes {
+/// kPropertyReadOnly = 'R', // property is read-only.
+/// kPropertyBycopy = 'C', // property is a copy of the value last assigned
+/// kPropertyByref = '&', // property is a reference to the value last assigned
+/// kPropertyDynamic = 'D', // property is dynamic
+/// kPropertyGetter = 'G', // followed by getter selector name
+/// kPropertySetter = 'S', // followed by setter selector name
+/// kPropertyInstanceVariable = 'V' // followed by instance variable name
+/// kPropertyType = 'T' // followed by old-style type encoding.
+/// kPropertyWeak = 'W' // 'weak' property
+/// kPropertyStrong = 'P' // property GC'able
+/// kPropertyNonAtomic = 'N' // property non-atomic
+/// };
+/// @endcode
+void ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
+ const Decl *Container,
+ std::string& S) const {
+ // Collect information from the property implementation decl(s).
+ bool Dynamic = false;
+ ObjCPropertyImplDecl *SynthesizePID = nullptr;
+
+ if (ObjCPropertyImplDecl *PropertyImpDecl =
+ getObjCPropertyImplDeclForPropertyDecl(PD, Container)) {
+ if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+ Dynamic = true;
+ else
+ SynthesizePID = PropertyImpDecl;
+ }
+
+ // FIXME: This is not very efficient.
+ S = "T";
+
+ // Encode result type.
+ // GCC has some special rules regarding encoding of properties which
+ // closely resembles encoding of ivars.
+ getObjCEncodingForPropertyType(PD->getType(), S);
+
+ if (PD->isReadOnly()) {
+ S += ",R";
+ if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_copy)
+ S += ",C";
+ if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_retain)
+ S += ",&";
+ if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak)
+ S += ",W";
+ } else {
+ switch (PD->getSetterKind()) {
+ case ObjCPropertyDecl::Assign: break;
+ case ObjCPropertyDecl::Copy: S += ",C"; break;
+ case ObjCPropertyDecl::Retain: S += ",&"; break;
+ case ObjCPropertyDecl::Weak: S += ",W"; break;
+ }
+ }
+
+ // It really isn't clear at all what this means, since properties
+ // are "dynamic by default".
+ if (Dynamic)
+ S += ",D";
+
+ if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic)
+ S += ",N";
+
+ if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_getter) {
+ S += ",G";
+ S += PD->getGetterName().getAsString();
+ }
+
+ if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_setter) {
+ S += ",S";
+ S += PD->getSetterName().getAsString();
+ }
+
+ if (SynthesizePID) {
+ const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl();
+ S += ",V";
+ S += OID->getNameAsString();
+ }
+
+ // FIXME: OBJCGC: weak & strong
+}
+
+/// getLegacyIntegralTypeEncoding -
+/// Another legacy compatibility encoding: 32-bit longs are encoded as
+/// 'l' or 'L' , but not always. For typedefs, we need to use
+/// 'i' or 'I' instead if encoding a struct field, or a pointer!
+///
+void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const {
+ if (isa<TypedefType>(PointeeTy.getTypePtr())) {
+ if (const BuiltinType *BT = PointeeTy->getAs<BuiltinType>()) {
+ if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32)
+ PointeeTy = UnsignedIntTy;
+ else
+ if (BT->getKind() == BuiltinType::Long && getIntWidth(PointeeTy) == 32)
+ PointeeTy = IntTy;
+ }
+ }
+}
+
+void ASTContext::getObjCEncodingForType(QualType T, std::string& S,
+ const FieldDecl *Field,
+ QualType *NotEncodedT) const {
+ // We follow the behavior of gcc, expanding structures which are
+ // directly pointed to, and expanding embedded structures. Note that
+ // these rules are sufficient to prevent recursive encoding of the
+ // same type.
+ getObjCEncodingForTypeImpl(T, S, true, true, Field,
+ true /* outermost type */, false, false,
+ false, false, false, NotEncodedT);
+}
+
+void ASTContext::getObjCEncodingForPropertyType(QualType T,
+ std::string& S) const {
+ // Encode result type.
+ // GCC has some special rules regarding encoding of properties which
+ // closely resembles encoding of ivars.
+ getObjCEncodingForTypeImpl(T, S, true, true, nullptr,
+ true /* outermost type */,
+ true /* encoding property */);
+}
+
+static char getObjCEncodingForPrimitiveKind(const ASTContext *C,
+ BuiltinType::Kind kind) {
+ switch (kind) {
+ case BuiltinType::Void: return 'v';
+ case BuiltinType::Bool: return 'B';
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar: return 'C';
+ case BuiltinType::Char16:
+ case BuiltinType::UShort: return 'S';
+ case BuiltinType::Char32:
+ case BuiltinType::UInt: return 'I';
+ case BuiltinType::ULong:
+ return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q';
+ case BuiltinType::UInt128: return 'T';
+ case BuiltinType::ULongLong: return 'Q';
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar: return 'c';
+ case BuiltinType::Short: return 's';
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ case BuiltinType::Int: return 'i';
+ case BuiltinType::Long:
+ return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q';
+ case BuiltinType::LongLong: return 'q';
+ case BuiltinType::Int128: return 't';
+ case BuiltinType::Float: return 'f';
+ case BuiltinType::Double: return 'd';
+ case BuiltinType::LongDouble: return 'D';
+ case BuiltinType::NullPtr: return '*'; // like char*
+
+ case BuiltinType::Half:
+ // FIXME: potentially need @encodes for these!
+ return ' ';
+
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCSel:
+ llvm_unreachable("@encoding ObjC primitive type");
+
+ // OpenCL and placeholder types don't need @encodings.
+ case BuiltinType::OCLImage1d:
+ case BuiltinType::OCLImage1dArray:
+ case BuiltinType::OCLImage1dBuffer:
+ case BuiltinType::OCLImage2d:
+ case BuiltinType::OCLImage2dArray:
+ case BuiltinType::OCLImage2dDepth:
+ case BuiltinType::OCLImage2dArrayDepth:
+ case BuiltinType::OCLImage2dMSAA:
+ case BuiltinType::OCLImage2dArrayMSAA:
+ case BuiltinType::OCLImage2dMSAADepth:
+ case BuiltinType::OCLImage2dArrayMSAADepth:
+ case BuiltinType::OCLImage3d:
+ case BuiltinType::OCLEvent:
+ case BuiltinType::OCLClkEvent:
+ case BuiltinType::OCLQueue:
+ case BuiltinType::OCLNDRange:
+ case BuiltinType::OCLReserveID:
+ case BuiltinType::OCLSampler:
+ case BuiltinType::Dependent:
+#define BUILTIN_TYPE(KIND, ID)
+#define PLACEHOLDER_TYPE(KIND, ID) \
+ case BuiltinType::KIND:
+#include "clang/AST/BuiltinTypes.def"
+ llvm_unreachable("invalid builtin type for @encode");
+ }
+ llvm_unreachable("invalid BuiltinType::Kind value");
+}
+
+static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) {
+ EnumDecl *Enum = ET->getDecl();
+
+ // The encoding of an non-fixed enum type is always 'i', regardless of size.
+ if (!Enum->isFixed())
+ return 'i';
+
+ // The encoding of a fixed enum type matches its fixed underlying type.
+ const BuiltinType *BT = Enum->getIntegerType()->castAs<BuiltinType>();
+ return getObjCEncodingForPrimitiveKind(C, BT->getKind());
+}
+
+static void EncodeBitField(const ASTContext *Ctx, std::string& S,
+ QualType T, const FieldDecl *FD) {
+ assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl");
+ S += 'b';
+ // The NeXT runtime encodes bit fields as b followed by the number of bits.
+ // The GNU runtime requires more information; bitfields are encoded as b,
+ // then the offset (in bits) of the first element, then the type of the
+ // bitfield, then the size in bits. For example, in this structure:
+ //
+ // struct
+ // {
+ // int integer;
+ // int flags:2;
+ // };
+ // On a 32-bit system, the encoding for flags would be b2 for the NeXT
+ // runtime, but b32i2 for the GNU runtime. The reason for this extra
+ // information is not especially sensible, but we're stuck with it for
+ // compatibility with GCC, although providing it breaks anything that
+ // actually uses runtime introspection and wants to work on both runtimes...
+ if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) {
+ const RecordDecl *RD = FD->getParent();
+ const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD);
+ S += llvm::utostr(RL.getFieldOffset(FD->getFieldIndex()));
+ if (const EnumType *ET = T->getAs<EnumType>())
+ S += ObjCEncodingForEnumType(Ctx, ET);
+ else {
+ const BuiltinType *BT = T->castAs<BuiltinType>();
+ S += getObjCEncodingForPrimitiveKind(Ctx, BT->getKind());
+ }
+ }
+ S += llvm::utostr(FD->getBitWidthValue(*Ctx));
+}
+
+// FIXME: Use SmallString for accumulating string.
+void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
+ bool ExpandPointedToStructures,
+ bool ExpandStructures,
+ const FieldDecl *FD,
+ bool OutermostType,
+ bool EncodingProperty,
+ bool StructField,
+ bool EncodeBlockParameters,
+ bool EncodeClassNames,
+ bool EncodePointerToObjCTypedef,
+ QualType *NotEncodedT) const {
+ CanQualType CT = getCanonicalType(T);
+ switch (CT->getTypeClass()) {
+ case Type::Builtin:
+ case Type::Enum:
+ if (FD && FD->isBitField())
+ return EncodeBitField(this, S, T, FD);
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CT))
+ S += getObjCEncodingForPrimitiveKind(this, BT->getKind());
+ else
+ S += ObjCEncodingForEnumType(this, cast<EnumType>(CT));
+ return;
+
+ case Type::Complex: {
+ const ComplexType *CT = T->castAs<ComplexType>();
+ S += 'j';
+ getObjCEncodingForTypeImpl(CT->getElementType(), S, false, false, nullptr);
+ return;
+ }
+
+ case Type::Atomic: {
+ const AtomicType *AT = T->castAs<AtomicType>();
+ S += 'A';
+ getObjCEncodingForTypeImpl(AT->getValueType(), S, false, false, nullptr);
+ return;
+ }
+
+ // encoding for pointer or reference types.
+ case Type::Pointer:
+ case Type::LValueReference:
+ case Type::RValueReference: {
+ QualType PointeeTy;
+ if (isa<PointerType>(CT)) {
+ const PointerType *PT = T->castAs<PointerType>();
+ if (PT->isObjCSelType()) {
+ S += ':';
+ return;
+ }
+ PointeeTy = PT->getPointeeType();
+ } else {
+ PointeeTy = T->castAs<ReferenceType>()->getPointeeType();
+ }
+
+ bool isReadOnly = false;
+ // For historical/compatibility reasons, the read-only qualifier of the
+ // pointee gets emitted _before_ the '^'. The read-only qualifier of
+ // the pointer itself gets ignored, _unless_ we are looking at a typedef!
+ // Also, do not emit the 'r' for anything but the outermost type!
+ if (isa<TypedefType>(T.getTypePtr())) {
+ if (OutermostType && T.isConstQualified()) {
+ isReadOnly = true;
+ S += 'r';
+ }
+ } else if (OutermostType) {
+ QualType P = PointeeTy;
+ while (P->getAs<PointerType>())
+ P = P->getAs<PointerType>()->getPointeeType();
+ if (P.isConstQualified()) {
+ isReadOnly = true;
+ S += 'r';
+ }
+ }
+ if (isReadOnly) {
+ // Another legacy compatibility encoding. Some ObjC qualifier and type
+ // combinations need to be rearranged.
+ // Rewrite "in const" from "nr" to "rn"
+ if (StringRef(S).endswith("nr"))
+ S.replace(S.end()-2, S.end(), "rn");
+ }
+
+ if (PointeeTy->isCharType()) {
+ // char pointer types should be encoded as '*' unless it is a
+ // type that has been typedef'd to 'BOOL'.
+ if (!isTypeTypedefedAsBOOL(PointeeTy)) {
+ S += '*';
+ return;
+ }
+ } else if (const RecordType *RTy = PointeeTy->getAs<RecordType>()) {
+ // GCC binary compat: Need to convert "struct objc_class *" to "#".
+ if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_class")) {
+ S += '#';
+ return;
+ }
+ // GCC binary compat: Need to convert "struct objc_object *" to "@".
+ if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_object")) {
+ S += '@';
+ return;
+ }
+ // fall through...
+ }
+ S += '^';
+ getLegacyIntegralTypeEncoding(PointeeTy);
+
+ getObjCEncodingForTypeImpl(PointeeTy, S, false, ExpandPointedToStructures,
+ nullptr, false, false, false, false, false, false,
+ NotEncodedT);
+ return;
+ }
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray: {
+ const ArrayType *AT = cast<ArrayType>(CT);
+
+ if (isa<IncompleteArrayType>(AT) && !StructField) {
+ // Incomplete arrays are encoded as a pointer to the array element.
+ S += '^';
+
+ getObjCEncodingForTypeImpl(AT->getElementType(), S,
+ false, ExpandStructures, FD);
+ } else {
+ S += '[';
+
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
+ S += llvm::utostr(CAT->getSize().getZExtValue());
+ else {
+ //Variable length arrays are encoded as a regular array with 0 elements.
+ assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) &&
+ "Unknown array type!");
+ S += '0';
+ }
+
+ getObjCEncodingForTypeImpl(AT->getElementType(), S,
+ false, ExpandStructures, FD,
+ false, false, false, false, false, false,
+ NotEncodedT);
+ S += ']';
+ }
+ return;
+ }
+
+ case Type::FunctionNoProto:
+ case Type::FunctionProto:
+ S += '?';
+ return;
+
+ case Type::Record: {
+ RecordDecl *RDecl = cast<RecordType>(CT)->getDecl();
+ S += RDecl->isUnion() ? '(' : '{';
+ // Anonymous structures print as '?'
+ if (const IdentifierInfo *II = RDecl->getIdentifier()) {
+ S += II->getName();
+ if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(RDecl)) {
+ const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
+ llvm::raw_string_ostream OS(S);
+ TemplateSpecializationType::PrintTemplateArgumentList(OS,
+ TemplateArgs.data(),
+ TemplateArgs.size(),
+ (*this).getPrintingPolicy());
+ }
+ } else {
+ S += '?';
+ }
+ if (ExpandStructures) {
+ S += '=';
+ if (!RDecl->isUnion()) {
+ getObjCEncodingForStructureImpl(RDecl, S, FD, true, NotEncodedT);
+ } else {
+ for (const auto *Field : RDecl->fields()) {
+ if (FD) {
+ S += '"';
+ S += Field->getNameAsString();
+ S += '"';
+ }
+
+ // Special case bit-fields.
+ if (Field->isBitField()) {
+ getObjCEncodingForTypeImpl(Field->getType(), S, false, true,
+ Field);
+ } else {
+ QualType qt = Field->getType();
+ getLegacyIntegralTypeEncoding(qt);
+ getObjCEncodingForTypeImpl(qt, S, false, true,
+ FD, /*OutermostType*/false,
+ /*EncodingProperty*/false,
+ /*StructField*/true,
+ false, false, false, NotEncodedT);
+ }
+ }
+ }
+ }
+ S += RDecl->isUnion() ? ')' : '}';
+ return;
+ }
+
+ case Type::BlockPointer: {
+ const BlockPointerType *BT = T->castAs<BlockPointerType>();
+ S += "@?"; // Unlike a pointer-to-function, which is "^?".
+ if (EncodeBlockParameters) {
+ const FunctionType *FT = BT->getPointeeType()->castAs<FunctionType>();
+
+ S += '<';
+ // Block return type
+ getObjCEncodingForTypeImpl(
+ FT->getReturnType(), S, ExpandPointedToStructures, ExpandStructures,
+ FD, false /* OutermostType */, EncodingProperty,
+ false /* StructField */, EncodeBlockParameters, EncodeClassNames, false,
+ NotEncodedT);
+ // Block self
+ S += "@?";
+ // Block parameters
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) {
+ for (const auto &I : FPT->param_types())
+ getObjCEncodingForTypeImpl(
+ I, S, ExpandPointedToStructures, ExpandStructures, FD,
+ false /* OutermostType */, EncodingProperty,
+ false /* StructField */, EncodeBlockParameters, EncodeClassNames,
+ false, NotEncodedT);
+ }
+ S += '>';
+ }
+ return;
+ }
+
+ case Type::ObjCObject: {
+ // hack to match legacy encoding of *id and *Class
+ QualType Ty = getObjCObjectPointerType(CT);
+ if (Ty->isObjCIdType()) {
+ S += "{objc_object=}";
+ return;
+ }
+ else if (Ty->isObjCClassType()) {
+ S += "{objc_class=}";
+ return;
+ }
+ }
+
+ case Type::ObjCInterface: {
+ // Ignore protocol qualifiers when mangling at this level.
+ // @encode(class_name)
+ ObjCInterfaceDecl *OI = T->castAs<ObjCObjectType>()->getInterface();
+ S += '{';
+ S += OI->getObjCRuntimeNameAsString();
+ S += '=';
+ SmallVector<const ObjCIvarDecl*, 32> Ivars;
+ DeepCollectObjCIvars(OI, true, Ivars);
+ for (unsigned i = 0, e = Ivars.size(); i != e; ++i) {
+ const FieldDecl *Field = cast<FieldDecl>(Ivars[i]);
+ if (Field->isBitField())
+ getObjCEncodingForTypeImpl(Field->getType(), S, false, true, Field);
+ else
+ getObjCEncodingForTypeImpl(Field->getType(), S, false, true, FD,
+ false, false, false, false, false,
+ EncodePointerToObjCTypedef,
+ NotEncodedT);
+ }
+ S += '}';
+ return;
+ }
+
+ case Type::ObjCObjectPointer: {
+ const ObjCObjectPointerType *OPT = T->castAs<ObjCObjectPointerType>();
+ if (OPT->isObjCIdType()) {
+ S += '@';
+ return;
+ }
+
+ if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) {
+ // FIXME: Consider if we need to output qualifiers for 'Class<p>'.
+ // Since this is a binary compatibility issue, need to consult with runtime
+ // folks. Fortunately, this is a *very* obsure construct.
+ S += '#';
+ return;
+ }
+
+ if (OPT->isObjCQualifiedIdType()) {
+ getObjCEncodingForTypeImpl(getObjCIdType(), S,
+ ExpandPointedToStructures,
+ ExpandStructures, FD);
+ if (FD || EncodingProperty || EncodeClassNames) {
+ // Note that we do extended encoding of protocol qualifer list
+ // Only when doing ivar or property encoding.
+ S += '"';
+ for (const auto *I : OPT->quals()) {
+ S += '<';
+ S += I->getObjCRuntimeNameAsString();
+ S += '>';
+ }
+ S += '"';
+ }
+ return;
+ }
+
+ QualType PointeeTy = OPT->getPointeeType();
+ if (!EncodingProperty &&
+ isa<TypedefType>(PointeeTy.getTypePtr()) &&
+ !EncodePointerToObjCTypedef) {
+ // Another historical/compatibility reason.
+ // We encode the underlying type which comes out as
+ // {...};
+ S += '^';
+ if (FD && OPT->getInterfaceDecl()) {
+ // Prevent recursive encoding of fields in some rare cases.
+ ObjCInterfaceDecl *OI = OPT->getInterfaceDecl();
+ SmallVector<const ObjCIvarDecl*, 32> Ivars;
+ DeepCollectObjCIvars(OI, true, Ivars);
+ for (unsigned i = 0, e = Ivars.size(); i != e; ++i) {
+ if (cast<FieldDecl>(Ivars[i]) == FD) {
+ S += '{';
+ S += OI->getObjCRuntimeNameAsString();
+ S += '}';
+ return;
+ }
+ }
+ }
+ getObjCEncodingForTypeImpl(PointeeTy, S,
+ false, ExpandPointedToStructures,
+ nullptr,
+ false, false, false, false, false,
+ /*EncodePointerToObjCTypedef*/true);
+ return;
+ }
+
+ S += '@';
+ if (OPT->getInterfaceDecl() &&
+ (FD || EncodingProperty || EncodeClassNames)) {
+ S += '"';
+ S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString();
+ for (const auto *I : OPT->quals()) {
+ S += '<';
+ S += I->getObjCRuntimeNameAsString();
+ S += '>';
+ }
+ S += '"';
+ }
+ return;
+ }
+
+ // gcc just blithely ignores member pointers.
+ // FIXME: we shoul do better than that. 'M' is available.
+ case Type::MemberPointer:
+ // This matches gcc's encoding, even though technically it is insufficient.
+ //FIXME. We should do a better job than gcc.
+ case Type::Vector:
+ case Type::ExtVector:
+ // Until we have a coherent encoding of these three types, issue warning.
+ { if (NotEncodedT)
+ *NotEncodedT = T;
+ return;
+ }
+
+ // We could see an undeduced auto type here during error recovery.
+ // Just ignore it.
+ case Type::Auto:
+ return;
+
+#define ABSTRACT_TYPE(KIND, BASE)
+#define TYPE(KIND, BASE)
+#define DEPENDENT_TYPE(KIND, BASE) \
+ case Type::KIND:
+#define NON_CANONICAL_TYPE(KIND, BASE) \
+ case Type::KIND:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \
+ case Type::KIND:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("@encode for dependent type!");
+ }
+ llvm_unreachable("bad type kind!");
+}
+
+void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl,
+ std::string &S,
+ const FieldDecl *FD,
+ bool includeVBases,
+ QualType *NotEncodedT) const {
+ assert(RDecl && "Expected non-null RecordDecl");
+ assert(!RDecl->isUnion() && "Should not be called for unions");
+ if (!RDecl->getDefinition())
+ return;
+
+ CXXRecordDecl *CXXRec = dyn_cast<CXXRecordDecl>(RDecl);
+ std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets;
+ const ASTRecordLayout &layout = getASTRecordLayout(RDecl);
+
+ if (CXXRec) {
+ for (const auto &BI : CXXRec->bases()) {
+ if (!BI.isVirtual()) {
+ CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl();
+ if (base->isEmpty())
+ continue;
+ uint64_t offs = toBits(layout.getBaseClassOffset(base));
+ FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs),
+ std::make_pair(offs, base));
+ }
+ }
+ }
+
+ unsigned i = 0;
+ for (auto *Field : RDecl->fields()) {
+ uint64_t offs = layout.getFieldOffset(i);
+ FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs),
+ std::make_pair(offs, Field));
+ ++i;
+ }
+
+ if (CXXRec && includeVBases) {
+ for (const auto &BI : CXXRec->vbases()) {
+ CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl();
+ if (base->isEmpty())
+ continue;
+ uint64_t offs = toBits(layout.getVBaseClassOffset(base));
+ if (offs >= uint64_t(toBits(layout.getNonVirtualSize())) &&
+ FieldOrBaseOffsets.find(offs) == FieldOrBaseOffsets.end())
+ FieldOrBaseOffsets.insert(FieldOrBaseOffsets.end(),
+ std::make_pair(offs, base));
+ }
+ }
+
+ CharUnits size;
+ if (CXXRec) {
+ size = includeVBases ? layout.getSize() : layout.getNonVirtualSize();
+ } else {
+ size = layout.getSize();
+ }
+
+#ifndef NDEBUG
+ uint64_t CurOffs = 0;
+#endif
+ std::multimap<uint64_t, NamedDecl *>::iterator
+ CurLayObj = FieldOrBaseOffsets.begin();
+
+ if (CXXRec && CXXRec->isDynamicClass() &&
+ (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) {
+ if (FD) {
+ S += "\"_vptr$";
+ std::string recname = CXXRec->getNameAsString();
+ if (recname.empty()) recname = "?";
+ S += recname;
+ S += '"';
+ }
+ S += "^^?";
+#ifndef NDEBUG
+ CurOffs += getTypeSize(VoidPtrTy);
+#endif
+ }
+
+ if (!RDecl->hasFlexibleArrayMember()) {
+ // Mark the end of the structure.
+ uint64_t offs = toBits(size);
+ FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs),
+ std::make_pair(offs, nullptr));
+ }
+
+ for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) {
+#ifndef NDEBUG
+ assert(CurOffs <= CurLayObj->first);
+ if (CurOffs < CurLayObj->first) {
+ uint64_t padding = CurLayObj->first - CurOffs;
+ // FIXME: There doesn't seem to be a way to indicate in the encoding that
+ // packing/alignment of members is different that normal, in which case
+ // the encoding will be out-of-sync with the real layout.
+ // If the runtime switches to just consider the size of types without
+ // taking into account alignment, we could make padding explicit in the
+ // encoding (e.g. using arrays of chars). The encoding strings would be
+ // longer then though.
+ CurOffs += padding;
+ }
+#endif
+
+ NamedDecl *dcl = CurLayObj->second;
+ if (!dcl)
+ break; // reached end of structure.
+
+ if (CXXRecordDecl *base = dyn_cast<CXXRecordDecl>(dcl)) {
+ // We expand the bases without their virtual bases since those are going
+ // in the initial structure. Note that this differs from gcc which
+ // expands virtual bases each time one is encountered in the hierarchy,
+ // making the encoding type bigger than it really is.
+ getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false,
+ NotEncodedT);
+ assert(!base->isEmpty());
+#ifndef NDEBUG
+ CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize());
+#endif
+ } else {
+ FieldDecl *field = cast<FieldDecl>(dcl);
+ if (FD) {
+ S += '"';
+ S += field->getNameAsString();
+ S += '"';
+ }
+
+ if (field->isBitField()) {
+ EncodeBitField(this, S, field->getType(), field);
+#ifndef NDEBUG
+ CurOffs += field->getBitWidthValue(*this);
+#endif
+ } else {
+ QualType qt = field->getType();
+ getLegacyIntegralTypeEncoding(qt);
+ getObjCEncodingForTypeImpl(qt, S, false, true, FD,
+ /*OutermostType*/false,
+ /*EncodingProperty*/false,
+ /*StructField*/true,
+ false, false, false, NotEncodedT);
+#ifndef NDEBUG
+ CurOffs += getTypeSize(field->getType());
+#endif
+ }
+ }
+ }
+}
+
+void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT,
+ std::string& S) const {
+ if (QT & Decl::OBJC_TQ_In)
+ S += 'n';
+ if (QT & Decl::OBJC_TQ_Inout)
+ S += 'N';
+ if (QT & Decl::OBJC_TQ_Out)
+ S += 'o';
+ if (QT & Decl::OBJC_TQ_Bycopy)
+ S += 'O';
+ if (QT & Decl::OBJC_TQ_Byref)
+ S += 'R';
+ if (QT & Decl::OBJC_TQ_Oneway)
+ S += 'V';
+}
+
+TypedefDecl *ASTContext::getObjCIdDecl() const {
+ if (!ObjCIdDecl) {
+ QualType T = getObjCObjectType(ObjCBuiltinIdTy, { }, { });
+ T = getObjCObjectPointerType(T);
+ ObjCIdDecl = buildImplicitTypedef(T, "id");
+ }
+ return ObjCIdDecl;
+}
+
+TypedefDecl *ASTContext::getObjCSelDecl() const {
+ if (!ObjCSelDecl) {
+ QualType T = getPointerType(ObjCBuiltinSelTy);
+ ObjCSelDecl = buildImplicitTypedef(T, "SEL");
+ }
+ return ObjCSelDecl;
+}
+
+TypedefDecl *ASTContext::getObjCClassDecl() const {
+ if (!ObjCClassDecl) {
+ QualType T = getObjCObjectType(ObjCBuiltinClassTy, { }, { });
+ T = getObjCObjectPointerType(T);
+ ObjCClassDecl = buildImplicitTypedef(T, "Class");
+ }
+ return ObjCClassDecl;
+}
+
+ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const {
+ if (!ObjCProtocolClassDecl) {
+ ObjCProtocolClassDecl
+ = ObjCInterfaceDecl::Create(*this, getTranslationUnitDecl(),
+ SourceLocation(),
+ &Idents.get("Protocol"),
+ /*typeParamList=*/nullptr,
+ /*PrevDecl=*/nullptr,
+ SourceLocation(), true);
+ }
+
+ return ObjCProtocolClassDecl;
+}
+
+//===----------------------------------------------------------------------===//
+// __builtin_va_list Construction Functions
+//===----------------------------------------------------------------------===//
+
+static TypedefDecl *CreateCharPtrNamedVaListDecl(const ASTContext *Context,
+ StringRef Name) {
+ // typedef char* __builtin[_ms]_va_list;
+ QualType T = Context->getPointerType(Context->CharTy);
+ return Context->buildImplicitTypedef(T, Name);
+}
+
+static TypedefDecl *CreateMSVaListDecl(const ASTContext *Context) {
+ return CreateCharPtrNamedVaListDecl(Context, "__builtin_ms_va_list");
+}
+
+static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) {
+ return CreateCharPtrNamedVaListDecl(Context, "__builtin_va_list");
+}
+
+static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) {
+ // typedef void* __builtin_va_list;
+ QualType T = Context->getPointerType(Context->VoidTy);
+ return Context->buildImplicitTypedef(T, "__builtin_va_list");
+}
+
+static TypedefDecl *
+CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) {
+ // struct __va_list
+ RecordDecl *VaListTagDecl = Context->buildImplicitRecord("__va_list");
+ if (Context->getLangOpts().CPlusPlus) {
+ // namespace std { struct __va_list {
+ NamespaceDecl *NS;
+ NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context),
+ Context->getTranslationUnitDecl(),
+ /*Inline*/ false, SourceLocation(),
+ SourceLocation(), &Context->Idents.get("std"),
+ /*PrevDecl*/ nullptr);
+ NS->setImplicit();
+ VaListTagDecl->setDeclContext(NS);
+ }
+
+ VaListTagDecl->startDefinition();
+
+ const size_t NumFields = 5;
+ QualType FieldTypes[NumFields];
+ const char *FieldNames[NumFields];
+
+ // void *__stack;
+ FieldTypes[0] = Context->getPointerType(Context->VoidTy);
+ FieldNames[0] = "__stack";
+
+ // void *__gr_top;
+ FieldTypes[1] = Context->getPointerType(Context->VoidTy);
+ FieldNames[1] = "__gr_top";
+
+ // void *__vr_top;
+ FieldTypes[2] = Context->getPointerType(Context->VoidTy);
+ FieldNames[2] = "__vr_top";
+
+ // int __gr_offs;
+ FieldTypes[3] = Context->IntTy;
+ FieldNames[3] = "__gr_offs";
+
+ // int __vr_offs;
+ FieldTypes[4] = Context->IntTy;
+ FieldNames[4] = "__vr_offs";
+
+ // Create fields
+ for (unsigned i = 0; i < NumFields; ++i) {
+ FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context),
+ VaListTagDecl,
+ SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get(FieldNames[i]),
+ FieldTypes[i], /*TInfo=*/nullptr,
+ /*BitWidth=*/nullptr,
+ /*Mutable=*/false,
+ ICIS_NoInit);
+ Field->setAccess(AS_public);
+ VaListTagDecl->addDecl(Field);
+ }
+ VaListTagDecl->completeDefinition();
+ Context->VaListTagDecl = VaListTagDecl;
+ QualType VaListTagType = Context->getRecordType(VaListTagDecl);
+
+ // } __builtin_va_list;
+ return Context->buildImplicitTypedef(VaListTagType, "__builtin_va_list");
+}
+
+static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) {
+ // typedef struct __va_list_tag {
+ RecordDecl *VaListTagDecl;
+
+ VaListTagDecl = Context->buildImplicitRecord("__va_list_tag");
+ VaListTagDecl->startDefinition();
+
+ const size_t NumFields = 5;
+ QualType FieldTypes[NumFields];
+ const char *FieldNames[NumFields];
+
+ // unsigned char gpr;
+ FieldTypes[0] = Context->UnsignedCharTy;
+ FieldNames[0] = "gpr";
+
+ // unsigned char fpr;
+ FieldTypes[1] = Context->UnsignedCharTy;
+ FieldNames[1] = "fpr";
+
+ // unsigned short reserved;
+ FieldTypes[2] = Context->UnsignedShortTy;
+ FieldNames[2] = "reserved";
+
+ // void* overflow_arg_area;
+ FieldTypes[3] = Context->getPointerType(Context->VoidTy);
+ FieldNames[3] = "overflow_arg_area";
+
+ // void* reg_save_area;
+ FieldTypes[4] = Context->getPointerType(Context->VoidTy);
+ FieldNames[4] = "reg_save_area";
+
+ // Create fields
+ for (unsigned i = 0; i < NumFields; ++i) {
+ FieldDecl *Field = FieldDecl::Create(*Context, VaListTagDecl,
+ SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get(FieldNames[i]),
+ FieldTypes[i], /*TInfo=*/nullptr,
+ /*BitWidth=*/nullptr,
+ /*Mutable=*/false,
+ ICIS_NoInit);
+ Field->setAccess(AS_public);
+ VaListTagDecl->addDecl(Field);
+ }
+ VaListTagDecl->completeDefinition();
+ Context->VaListTagDecl = VaListTagDecl;
+ QualType VaListTagType = Context->getRecordType(VaListTagDecl);
+
+ // } __va_list_tag;
+ TypedefDecl *VaListTagTypedefDecl =
+ Context->buildImplicitTypedef(VaListTagType, "__va_list_tag");
+
+ QualType VaListTagTypedefType =
+ Context->getTypedefType(VaListTagTypedefDecl);
+
+ // typedef __va_list_tag __builtin_va_list[1];
+ llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1);
+ QualType VaListTagArrayType
+ = Context->getConstantArrayType(VaListTagTypedefType,
+ Size, ArrayType::Normal, 0);
+ return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
+}
+
+static TypedefDecl *
+CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) {
+ // struct __va_list_tag {
+ RecordDecl *VaListTagDecl;
+ VaListTagDecl = Context->buildImplicitRecord("__va_list_tag");
+ VaListTagDecl->startDefinition();
+
+ const size_t NumFields = 4;
+ QualType FieldTypes[NumFields];
+ const char *FieldNames[NumFields];
+
+ // unsigned gp_offset;
+ FieldTypes[0] = Context->UnsignedIntTy;
+ FieldNames[0] = "gp_offset";
+
+ // unsigned fp_offset;
+ FieldTypes[1] = Context->UnsignedIntTy;
+ FieldNames[1] = "fp_offset";
+
+ // void* overflow_arg_area;
+ FieldTypes[2] = Context->getPointerType(Context->VoidTy);
+ FieldNames[2] = "overflow_arg_area";
+
+ // void* reg_save_area;
+ FieldTypes[3] = Context->getPointerType(Context->VoidTy);
+ FieldNames[3] = "reg_save_area";
+
+ // Create fields
+ for (unsigned i = 0; i < NumFields; ++i) {
+ FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context),
+ VaListTagDecl,
+ SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get(FieldNames[i]),
+ FieldTypes[i], /*TInfo=*/nullptr,
+ /*BitWidth=*/nullptr,
+ /*Mutable=*/false,
+ ICIS_NoInit);
+ Field->setAccess(AS_public);
+ VaListTagDecl->addDecl(Field);
+ }
+ VaListTagDecl->completeDefinition();
+ Context->VaListTagDecl = VaListTagDecl;
+ QualType VaListTagType = Context->getRecordType(VaListTagDecl);
+
+ // };
+
+ // typedef struct __va_list_tag __builtin_va_list[1];
+ llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1);
+ QualType VaListTagArrayType =
+ Context->getConstantArrayType(VaListTagType, Size, ArrayType::Normal, 0);
+ return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
+}
+
+static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) {
+ // typedef int __builtin_va_list[4];
+ llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 4);
+ QualType IntArrayType
+ = Context->getConstantArrayType(Context->IntTy,
+ Size, ArrayType::Normal, 0);
+ return Context->buildImplicitTypedef(IntArrayType, "__builtin_va_list");
+}
+
+static TypedefDecl *
+CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) {
+ // struct __va_list
+ RecordDecl *VaListDecl = Context->buildImplicitRecord("__va_list");
+ if (Context->getLangOpts().CPlusPlus) {
+ // namespace std { struct __va_list {
+ NamespaceDecl *NS;
+ NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context),
+ Context->getTranslationUnitDecl(),
+ /*Inline*/false, SourceLocation(),
+ SourceLocation(), &Context->Idents.get("std"),
+ /*PrevDecl*/ nullptr);
+ NS->setImplicit();
+ VaListDecl->setDeclContext(NS);
+ }
+
+ VaListDecl->startDefinition();
+
+ // void * __ap;
+ FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context),
+ VaListDecl,
+ SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get("__ap"),
+ Context->getPointerType(Context->VoidTy),
+ /*TInfo=*/nullptr,
+ /*BitWidth=*/nullptr,
+ /*Mutable=*/false,
+ ICIS_NoInit);
+ Field->setAccess(AS_public);
+ VaListDecl->addDecl(Field);
+
+ // };
+ VaListDecl->completeDefinition();
+
+ // typedef struct __va_list __builtin_va_list;
+ QualType T = Context->getRecordType(VaListDecl);
+ return Context->buildImplicitTypedef(T, "__builtin_va_list");
+}
+
+static TypedefDecl *
+CreateSystemZBuiltinVaListDecl(const ASTContext *Context) {
+ // struct __va_list_tag {
+ RecordDecl *VaListTagDecl;
+ VaListTagDecl = Context->buildImplicitRecord("__va_list_tag");
+ VaListTagDecl->startDefinition();
+
+ const size_t NumFields = 4;
+ QualType FieldTypes[NumFields];
+ const char *FieldNames[NumFields];
+
+ // long __gpr;
+ FieldTypes[0] = Context->LongTy;
+ FieldNames[0] = "__gpr";
+
+ // long __fpr;
+ FieldTypes[1] = Context->LongTy;
+ FieldNames[1] = "__fpr";
+
+ // void *__overflow_arg_area;
+ FieldTypes[2] = Context->getPointerType(Context->VoidTy);
+ FieldNames[2] = "__overflow_arg_area";
+
+ // void *__reg_save_area;
+ FieldTypes[3] = Context->getPointerType(Context->VoidTy);
+ FieldNames[3] = "__reg_save_area";
+
+ // Create fields
+ for (unsigned i = 0; i < NumFields; ++i) {
+ FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context),
+ VaListTagDecl,
+ SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get(FieldNames[i]),
+ FieldTypes[i], /*TInfo=*/nullptr,
+ /*BitWidth=*/nullptr,
+ /*Mutable=*/false,
+ ICIS_NoInit);
+ Field->setAccess(AS_public);
+ VaListTagDecl->addDecl(Field);
+ }
+ VaListTagDecl->completeDefinition();
+ Context->VaListTagDecl = VaListTagDecl;
+ QualType VaListTagType = Context->getRecordType(VaListTagDecl);
+
+ // };
+
+ // typedef __va_list_tag __builtin_va_list[1];
+ llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1);
+ QualType VaListTagArrayType =
+ Context->getConstantArrayType(VaListTagType, Size, ArrayType::Normal, 0);
+
+ return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
+}
+
+static TypedefDecl *CreateVaListDecl(const ASTContext *Context,
+ TargetInfo::BuiltinVaListKind Kind) {
+ switch (Kind) {
+ case TargetInfo::CharPtrBuiltinVaList:
+ return CreateCharPtrBuiltinVaListDecl(Context);
+ case TargetInfo::VoidPtrBuiltinVaList:
+ return CreateVoidPtrBuiltinVaListDecl(Context);
+ case TargetInfo::AArch64ABIBuiltinVaList:
+ return CreateAArch64ABIBuiltinVaListDecl(Context);
+ case TargetInfo::PowerABIBuiltinVaList:
+ return CreatePowerABIBuiltinVaListDecl(Context);
+ case TargetInfo::X86_64ABIBuiltinVaList:
+ return CreateX86_64ABIBuiltinVaListDecl(Context);
+ case TargetInfo::PNaClABIBuiltinVaList:
+ return CreatePNaClABIBuiltinVaListDecl(Context);
+ case TargetInfo::AAPCSABIBuiltinVaList:
+ return CreateAAPCSABIBuiltinVaListDecl(Context);
+ case TargetInfo::SystemZBuiltinVaList:
+ return CreateSystemZBuiltinVaListDecl(Context);
+ }
+
+ llvm_unreachable("Unhandled __builtin_va_list type kind");
+}
+
+TypedefDecl *ASTContext::getBuiltinVaListDecl() const {
+ if (!BuiltinVaListDecl) {
+ BuiltinVaListDecl = CreateVaListDecl(this, Target->getBuiltinVaListKind());
+ assert(BuiltinVaListDecl->isImplicit());
+ }
+
+ return BuiltinVaListDecl;
+}
+
+Decl *ASTContext::getVaListTagDecl() const {
+ // Force the creation of VaListTagDecl by building the __builtin_va_list
+ // declaration.
+ if (!VaListTagDecl)
+ (void)getBuiltinVaListDecl();
+
+ return VaListTagDecl;
+}
+
+TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const {
+ if (!BuiltinMSVaListDecl)
+ BuiltinMSVaListDecl = CreateMSVaListDecl(this);
+
+ return BuiltinMSVaListDecl;
+}
+
+void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) {
+ assert(ObjCConstantStringType.isNull() &&
+ "'NSConstantString' type already set!");
+
+ ObjCConstantStringType = getObjCInterfaceType(Decl);
+}
+
+/// \brief Retrieve the template name that corresponds to a non-empty
+/// lookup.
+TemplateName
+ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End) const {
+ unsigned size = End - Begin;
+ assert(size > 1 && "set is not overloaded!");
+
+ void *memory = Allocate(sizeof(OverloadedTemplateStorage) +
+ size * sizeof(FunctionTemplateDecl*));
+ OverloadedTemplateStorage *OT = new(memory) OverloadedTemplateStorage(size);
+
+ NamedDecl **Storage = OT->getStorage();
+ for (UnresolvedSetIterator I = Begin; I != End; ++I) {
+ NamedDecl *D = *I;
+ assert(isa<FunctionTemplateDecl>(D) ||
+ (isa<UsingShadowDecl>(D) &&
+ isa<FunctionTemplateDecl>(D->getUnderlyingDecl())));
+ *Storage++ = D;
+ }
+
+ return TemplateName(OT);
+}
+
+/// \brief Retrieve the template name that represents a qualified
+/// template name such as \c std::vector.
+TemplateName
+ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS,
+ bool TemplateKeyword,
+ TemplateDecl *Template) const {
+ assert(NNS && "Missing nested-name-specifier in qualified template name");
+
+ // FIXME: Canonicalization?
+ llvm::FoldingSetNodeID ID;
+ QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template);
+
+ void *InsertPos = nullptr;
+ QualifiedTemplateName *QTN =
+ QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
+ if (!QTN) {
+ QTN = new (*this, llvm::alignOf<QualifiedTemplateName>())
+ QualifiedTemplateName(NNS, TemplateKeyword, Template);
+ QualifiedTemplateNames.InsertNode(QTN, InsertPos);
+ }
+
+ return TemplateName(QTN);
+}
+
+/// \brief Retrieve the template name that represents a dependent
+/// template name such as \c MetaFun::template apply.
+TemplateName
+ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name) const {
+ assert((!NNS || NNS->isDependent()) &&
+ "Nested name specifier must be dependent");
+
+ llvm::FoldingSetNodeID ID;
+ DependentTemplateName::Profile(ID, NNS, Name);
+
+ void *InsertPos = nullptr;
+ DependentTemplateName *QTN =
+ DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (QTN)
+ return TemplateName(QTN);
+
+ NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
+ if (CanonNNS == NNS) {
+ QTN = new (*this, llvm::alignOf<DependentTemplateName>())
+ DependentTemplateName(NNS, Name);
+ } else {
+ TemplateName Canon = getDependentTemplateName(CanonNNS, Name);
+ QTN = new (*this, llvm::alignOf<DependentTemplateName>())
+ DependentTemplateName(NNS, Name, Canon);
+ DependentTemplateName *CheckQTN =
+ DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!CheckQTN && "Dependent type name canonicalization broken");
+ (void)CheckQTN;
+ }
+
+ DependentTemplateNames.InsertNode(QTN, InsertPos);
+ return TemplateName(QTN);
+}
+
+/// \brief Retrieve the template name that represents a dependent
+/// template name such as \c MetaFun::template operator+.
+TemplateName
+ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS,
+ OverloadedOperatorKind Operator) const {
+ assert((!NNS || NNS->isDependent()) &&
+ "Nested name specifier must be dependent");
+
+ llvm::FoldingSetNodeID ID;
+ DependentTemplateName::Profile(ID, NNS, Operator);
+
+ void *InsertPos = nullptr;
+ DependentTemplateName *QTN
+ = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (QTN)
+ return TemplateName(QTN);
+
+ NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
+ if (CanonNNS == NNS) {
+ QTN = new (*this, llvm::alignOf<DependentTemplateName>())
+ DependentTemplateName(NNS, Operator);
+ } else {
+ TemplateName Canon = getDependentTemplateName(CanonNNS, Operator);
+ QTN = new (*this, llvm::alignOf<DependentTemplateName>())
+ DependentTemplateName(NNS, Operator, Canon);
+
+ DependentTemplateName *CheckQTN
+ = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!CheckQTN && "Dependent template name canonicalization broken");
+ (void)CheckQTN;
+ }
+
+ DependentTemplateNames.InsertNode(QTN, InsertPos);
+ return TemplateName(QTN);
+}
+
+TemplateName
+ASTContext::getSubstTemplateTemplateParm(TemplateTemplateParmDecl *param,
+ TemplateName replacement) const {
+ llvm::FoldingSetNodeID ID;
+ SubstTemplateTemplateParmStorage::Profile(ID, param, replacement);
+
+ void *insertPos = nullptr;
+ SubstTemplateTemplateParmStorage *subst
+ = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, insertPos);
+
+ if (!subst) {
+ subst = new (*this) SubstTemplateTemplateParmStorage(param, replacement);
+ SubstTemplateTemplateParms.InsertNode(subst, insertPos);
+ }
+
+ return TemplateName(subst);
+}
+
+TemplateName
+ASTContext::getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param,
+ const TemplateArgument &ArgPack) const {
+ ASTContext &Self = const_cast<ASTContext &>(*this);
+ llvm::FoldingSetNodeID ID;
+ SubstTemplateTemplateParmPackStorage::Profile(ID, Self, Param, ArgPack);
+
+ void *InsertPos = nullptr;
+ SubstTemplateTemplateParmPackStorage *Subst
+ = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!Subst) {
+ Subst = new (*this) SubstTemplateTemplateParmPackStorage(Param,
+ ArgPack.pack_size(),
+ ArgPack.pack_begin());
+ SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos);
+ }
+
+ return TemplateName(Subst);
+}
+
+/// getFromTargetType - Given one of the integer types provided by
+/// TargetInfo, produce the corresponding type. The unsigned @p Type
+/// is actually a value of type @c TargetInfo::IntType.
+CanQualType ASTContext::getFromTargetType(unsigned Type) const {
+ switch (Type) {
+ case TargetInfo::NoInt: return CanQualType();
+ case TargetInfo::SignedChar: return SignedCharTy;
+ case TargetInfo::UnsignedChar: return UnsignedCharTy;
+ case TargetInfo::SignedShort: return ShortTy;
+ case TargetInfo::UnsignedShort: return UnsignedShortTy;
+ case TargetInfo::SignedInt: return IntTy;
+ case TargetInfo::UnsignedInt: return UnsignedIntTy;
+ case TargetInfo::SignedLong: return LongTy;
+ case TargetInfo::UnsignedLong: return UnsignedLongTy;
+ case TargetInfo::SignedLongLong: return LongLongTy;
+ case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy;
+ }
+
+ llvm_unreachable("Unhandled TargetInfo::IntType value");
+}
+
+//===----------------------------------------------------------------------===//
+// Type Predicates.
+//===----------------------------------------------------------------------===//
+
+/// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's
+/// garbage collection attribute.
+///
+Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const {
+ if (getLangOpts().getGC() == LangOptions::NonGC)
+ return Qualifiers::GCNone;
+
+ assert(getLangOpts().ObjC1);
+ Qualifiers::GC GCAttrs = Ty.getObjCGCAttr();
+
+ // Default behaviour under objective-C's gc is for ObjC pointers
+ // (or pointers to them) be treated as though they were declared
+ // as __strong.
+ if (GCAttrs == Qualifiers::GCNone) {
+ if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType())
+ return Qualifiers::Strong;
+ else if (Ty->isPointerType())
+ return getObjCGCAttrKind(Ty->getAs<PointerType>()->getPointeeType());
+ } else {
+ // It's not valid to set GC attributes on anything that isn't a
+ // pointer.
+#ifndef NDEBUG
+ QualType CT = Ty->getCanonicalTypeInternal();
+ while (const ArrayType *AT = dyn_cast<ArrayType>(CT))
+ CT = AT->getElementType();
+ assert(CT->isAnyPointerType() || CT->isBlockPointerType());
+#endif
+ }
+ return GCAttrs;
+}
+
+//===----------------------------------------------------------------------===//
+// Type Compatibility Testing
+//===----------------------------------------------------------------------===//
+
+/// areCompatVectorTypes - Return true if the two specified vector types are
+/// compatible.
+static bool areCompatVectorTypes(const VectorType *LHS,
+ const VectorType *RHS) {
+ assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified());
+ return LHS->getElementType() == RHS->getElementType() &&
+ LHS->getNumElements() == RHS->getNumElements();
+}
+
+bool ASTContext::areCompatibleVectorTypes(QualType FirstVec,
+ QualType SecondVec) {
+ assert(FirstVec->isVectorType() && "FirstVec should be a vector type");
+ assert(SecondVec->isVectorType() && "SecondVec should be a vector type");
+
+ if (hasSameUnqualifiedType(FirstVec, SecondVec))
+ return true;
+
+ // Treat Neon vector types and most AltiVec vector types as if they are the
+ // equivalent GCC vector types.
+ const VectorType *First = FirstVec->getAs<VectorType>();
+ const VectorType *Second = SecondVec->getAs<VectorType>();
+ if (First->getNumElements() == Second->getNumElements() &&
+ hasSameType(First->getElementType(), Second->getElementType()) &&
+ First->getVectorKind() != VectorType::AltiVecPixel &&
+ First->getVectorKind() != VectorType::AltiVecBool &&
+ Second->getVectorKind() != VectorType::AltiVecPixel &&
+ Second->getVectorKind() != VectorType::AltiVecBool)
+ return true;
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's.
+//===----------------------------------------------------------------------===//
+
+/// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the
+/// inheritance hierarchy of 'rProto'.
+bool
+ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto,
+ ObjCProtocolDecl *rProto) const {
+ if (declaresSameEntity(lProto, rProto))
+ return true;
+ for (auto *PI : rProto->protocols())
+ if (ProtocolCompatibleWithProtocol(lProto, PI))
+ return true;
+ return false;
+}
+
+/// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and
+/// Class<pr1, ...>.
+bool ASTContext::ObjCQualifiedClassTypesAreCompatible(QualType lhs,
+ QualType rhs) {
+ const ObjCObjectPointerType *lhsQID = lhs->getAs<ObjCObjectPointerType>();
+ const ObjCObjectPointerType *rhsOPT = rhs->getAs<ObjCObjectPointerType>();
+ assert ((lhsQID && rhsOPT) && "ObjCQualifiedClassTypesAreCompatible");
+
+ for (auto *lhsProto : lhsQID->quals()) {
+ bool match = false;
+ for (auto *rhsProto : rhsOPT->quals()) {
+ if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto)) {
+ match = true;
+ break;
+ }
+ }
+ if (!match)
+ return false;
+ }
+ return true;
+}
+
+/// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an
+/// ObjCQualifiedIDType.
+bool ASTContext::ObjCQualifiedIdTypesAreCompatible(QualType lhs, QualType rhs,
+ bool compare) {
+ // Allow id<P..> and an 'id' or void* type in all cases.
+ if (lhs->isVoidPointerType() ||
+ lhs->isObjCIdType() || lhs->isObjCClassType())
+ return true;
+ else if (rhs->isVoidPointerType() ||
+ rhs->isObjCIdType() || rhs->isObjCClassType())
+ return true;
+
+ if (const ObjCObjectPointerType *lhsQID = lhs->getAsObjCQualifiedIdType()) {
+ const ObjCObjectPointerType *rhsOPT = rhs->getAs<ObjCObjectPointerType>();
+
+ if (!rhsOPT) return false;
+
+ if (rhsOPT->qual_empty()) {
+ // If the RHS is a unqualified interface pointer "NSString*",
+ // make sure we check the class hierarchy.
+ if (ObjCInterfaceDecl *rhsID = rhsOPT->getInterfaceDecl()) {
+ for (auto *I : lhsQID->quals()) {
+ // when comparing an id<P> on lhs with a static type on rhs,
+ // see if static class implements all of id's protocols, directly or
+ // through its super class and categories.
+ if (!rhsID->ClassImplementsProtocol(I, true))
+ return false;
+ }
+ }
+ // If there are no qualifiers and no interface, we have an 'id'.
+ return true;
+ }
+ // Both the right and left sides have qualifiers.
+ for (auto *lhsProto : lhsQID->quals()) {
+ bool match = false;
+
+ // when comparing an id<P> on lhs with a static type on rhs,
+ // see if static class implements all of id's protocols, directly or
+ // through its super class and categories.
+ for (auto *rhsProto : rhsOPT->quals()) {
+ if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) ||
+ (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) {
+ match = true;
+ break;
+ }
+ }
+ // If the RHS is a qualified interface pointer "NSString<P>*",
+ // make sure we check the class hierarchy.
+ if (ObjCInterfaceDecl *rhsID = rhsOPT->getInterfaceDecl()) {
+ for (auto *I : lhsQID->quals()) {
+ // when comparing an id<P> on lhs with a static type on rhs,
+ // see if static class implements all of id's protocols, directly or
+ // through its super class and categories.
+ if (rhsID->ClassImplementsProtocol(I, true)) {
+ match = true;
+ break;
+ }
+ }
+ }
+ if (!match)
+ return false;
+ }
+
+ return true;
+ }
+
+ const ObjCObjectPointerType *rhsQID = rhs->getAsObjCQualifiedIdType();
+ assert(rhsQID && "One of the LHS/RHS should be id<x>");
+
+ if (const ObjCObjectPointerType *lhsOPT =
+ lhs->getAsObjCInterfacePointerType()) {
+ // If both the right and left sides have qualifiers.
+ for (auto *lhsProto : lhsOPT->quals()) {
+ bool match = false;
+
+ // when comparing an id<P> on rhs with a static type on lhs,
+ // see if static class implements all of id's protocols, directly or
+ // through its super class and categories.
+ // First, lhs protocols in the qualifier list must be found, direct
+ // or indirect in rhs's qualifier list or it is a mismatch.
+ for (auto *rhsProto : rhsQID->quals()) {
+ if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) ||
+ (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) {
+ match = true;
+ break;
+ }
+ }
+ if (!match)
+ return false;
+ }
+
+ // Static class's protocols, or its super class or category protocols
+ // must be found, direct or indirect in rhs's qualifier list or it is a mismatch.
+ if (ObjCInterfaceDecl *lhsID = lhsOPT->getInterfaceDecl()) {
+ llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols;
+ CollectInheritedProtocols(lhsID, LHSInheritedProtocols);
+ // This is rather dubious but matches gcc's behavior. If lhs has
+ // no type qualifier and its class has no static protocol(s)
+ // assume that it is mismatch.
+ if (LHSInheritedProtocols.empty() && lhsOPT->qual_empty())
+ return false;
+ for (auto *lhsProto : LHSInheritedProtocols) {
+ bool match = false;
+ for (auto *rhsProto : rhsQID->quals()) {
+ if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) ||
+ (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) {
+ match = true;
+ break;
+ }
+ }
+ if (!match)
+ return false;
+ }
+ }
+ return true;
+ }
+ return false;
+}
+
+/// canAssignObjCInterfaces - Return true if the two interface types are
+/// compatible for assignment from RHS to LHS. This handles validation of any
+/// protocol qualifiers on the LHS or RHS.
+///
+bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT,
+ const ObjCObjectPointerType *RHSOPT) {
+ const ObjCObjectType* LHS = LHSOPT->getObjectType();
+ const ObjCObjectType* RHS = RHSOPT->getObjectType();
+
+ // If either type represents the built-in 'id' or 'Class' types, return true.
+ if (LHS->isObjCUnqualifiedIdOrClass() ||
+ RHS->isObjCUnqualifiedIdOrClass())
+ return true;
+
+ // Function object that propagates a successful result or handles
+ // __kindof types.
+ auto finish = [&](bool succeeded) -> bool {
+ if (succeeded)
+ return true;
+
+ if (!RHS->isKindOfType())
+ return false;
+
+ // Strip off __kindof and protocol qualifiers, then check whether
+ // we can assign the other way.
+ return canAssignObjCInterfaces(RHSOPT->stripObjCKindOfTypeAndQuals(*this),
+ LHSOPT->stripObjCKindOfTypeAndQuals(*this));
+ };
+
+ if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) {
+ return finish(ObjCQualifiedIdTypesAreCompatible(QualType(LHSOPT,0),
+ QualType(RHSOPT,0),
+ false));
+ }
+
+ if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) {
+ return finish(ObjCQualifiedClassTypesAreCompatible(QualType(LHSOPT,0),
+ QualType(RHSOPT,0)));
+ }
+
+ // If we have 2 user-defined types, fall into that path.
+ if (LHS->getInterface() && RHS->getInterface()) {
+ return finish(canAssignObjCInterfaces(LHS, RHS));
+ }
+
+ return false;
+}
+
+/// canAssignObjCInterfacesInBlockPointer - This routine is specifically written
+/// for providing type-safety for objective-c pointers used to pass/return
+/// arguments in block literals. When passed as arguments, passing 'A*' where
+/// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is
+/// not OK. For the return type, the opposite is not OK.
+bool ASTContext::canAssignObjCInterfacesInBlockPointer(
+ const ObjCObjectPointerType *LHSOPT,
+ const ObjCObjectPointerType *RHSOPT,
+ bool BlockReturnType) {
+
+ // Function object that propagates a successful result or handles
+ // __kindof types.
+ auto finish = [&](bool succeeded) -> bool {
+ if (succeeded)
+ return true;
+
+ const ObjCObjectPointerType *Expected = BlockReturnType ? RHSOPT : LHSOPT;
+ if (!Expected->isKindOfType())
+ return false;
+
+ // Strip off __kindof and protocol qualifiers, then check whether
+ // we can assign the other way.
+ return canAssignObjCInterfacesInBlockPointer(
+ RHSOPT->stripObjCKindOfTypeAndQuals(*this),
+ LHSOPT->stripObjCKindOfTypeAndQuals(*this),
+ BlockReturnType);
+ };
+
+ if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType())
+ return true;
+
+ if (LHSOPT->isObjCBuiltinType()) {
+ return finish(RHSOPT->isObjCBuiltinType() ||
+ RHSOPT->isObjCQualifiedIdType());
+ }
+
+ if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType())
+ return finish(ObjCQualifiedIdTypesAreCompatible(QualType(LHSOPT,0),
+ QualType(RHSOPT,0),
+ false));
+
+ const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType();
+ const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType();
+ if (LHS && RHS) { // We have 2 user-defined types.
+ if (LHS != RHS) {
+ if (LHS->getDecl()->isSuperClassOf(RHS->getDecl()))
+ return finish(BlockReturnType);
+ if (RHS->getDecl()->isSuperClassOf(LHS->getDecl()))
+ return finish(!BlockReturnType);
+ }
+ else
+ return true;
+ }
+ return false;
+}
+
+/// Comparison routine for Objective-C protocols to be used with
+/// llvm::array_pod_sort.
+static int compareObjCProtocolsByName(ObjCProtocolDecl * const *lhs,
+ ObjCProtocolDecl * const *rhs) {
+ return (*lhs)->getName().compare((*rhs)->getName());
+
+}
+
+/// getIntersectionOfProtocols - This routine finds the intersection of set
+/// of protocols inherited from two distinct objective-c pointer objects with
+/// the given common base.
+/// It is used to build composite qualifier list of the composite type of
+/// the conditional expression involving two objective-c pointer objects.
+static
+void getIntersectionOfProtocols(ASTContext &Context,
+ const ObjCInterfaceDecl *CommonBase,
+ const ObjCObjectPointerType *LHSOPT,
+ const ObjCObjectPointerType *RHSOPT,
+ SmallVectorImpl<ObjCProtocolDecl *> &IntersectionSet) {
+
+ const ObjCObjectType* LHS = LHSOPT->getObjectType();
+ const ObjCObjectType* RHS = RHSOPT->getObjectType();
+ assert(LHS->getInterface() && "LHS must have an interface base");
+ assert(RHS->getInterface() && "RHS must have an interface base");
+
+ // Add all of the protocols for the LHS.
+ llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSProtocolSet;
+
+ // Start with the protocol qualifiers.
+ for (auto proto : LHS->quals()) {
+ Context.CollectInheritedProtocols(proto, LHSProtocolSet);
+ }
+
+ // Also add the protocols associated with the LHS interface.
+ Context.CollectInheritedProtocols(LHS->getInterface(), LHSProtocolSet);
+
+ // Add all of the protocls for the RHS.
+ llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet;
+
+ // Start with the protocol qualifiers.
+ for (auto proto : RHS->quals()) {
+ Context.CollectInheritedProtocols(proto, RHSProtocolSet);
+ }
+
+ // Also add the protocols associated with the RHS interface.
+ Context.CollectInheritedProtocols(RHS->getInterface(), RHSProtocolSet);
+
+ // Compute the intersection of the collected protocol sets.
+ for (auto proto : LHSProtocolSet) {
+ if (RHSProtocolSet.count(proto))
+ IntersectionSet.push_back(proto);
+ }
+
+ // Compute the set of protocols that is implied by either the common type or
+ // the protocols within the intersection.
+ llvm::SmallPtrSet<ObjCProtocolDecl *, 8> ImpliedProtocols;
+ Context.CollectInheritedProtocols(CommonBase, ImpliedProtocols);
+
+ // Remove any implied protocols from the list of inherited protocols.
+ if (!ImpliedProtocols.empty()) {
+ IntersectionSet.erase(
+ std::remove_if(IntersectionSet.begin(),
+ IntersectionSet.end(),
+ [&](ObjCProtocolDecl *proto) -> bool {
+ return ImpliedProtocols.count(proto) > 0;
+ }),
+ IntersectionSet.end());
+ }
+
+ // Sort the remaining protocols by name.
+ llvm::array_pod_sort(IntersectionSet.begin(), IntersectionSet.end(),
+ compareObjCProtocolsByName);
+}
+
+/// Determine whether the first type is a subtype of the second.
+static bool canAssignObjCObjectTypes(ASTContext &ctx, QualType lhs,
+ QualType rhs) {
+ // Common case: two object pointers.
+ const ObjCObjectPointerType *lhsOPT = lhs->getAs<ObjCObjectPointerType>();
+ const ObjCObjectPointerType *rhsOPT = rhs->getAs<ObjCObjectPointerType>();
+ if (lhsOPT && rhsOPT)
+ return ctx.canAssignObjCInterfaces(lhsOPT, rhsOPT);
+
+ // Two block pointers.
+ const BlockPointerType *lhsBlock = lhs->getAs<BlockPointerType>();
+ const BlockPointerType *rhsBlock = rhs->getAs<BlockPointerType>();
+ if (lhsBlock && rhsBlock)
+ return ctx.typesAreBlockPointerCompatible(lhs, rhs);
+
+ // If either is an unqualified 'id' and the other is a block, it's
+ // acceptable.
+ if ((lhsOPT && lhsOPT->isObjCIdType() && rhsBlock) ||
+ (rhsOPT && rhsOPT->isObjCIdType() && lhsBlock))
+ return true;
+
+ return false;
+}
+
+// Check that the given Objective-C type argument lists are equivalent.
+static bool sameObjCTypeArgs(ASTContext &ctx,
+ const ObjCInterfaceDecl *iface,
+ ArrayRef<QualType> lhsArgs,
+ ArrayRef<QualType> rhsArgs,
+ bool stripKindOf) {
+ if (lhsArgs.size() != rhsArgs.size())
+ return false;
+
+ ObjCTypeParamList *typeParams = iface->getTypeParamList();
+ for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) {
+ if (ctx.hasSameType(lhsArgs[i], rhsArgs[i]))
+ continue;
+
+ switch (typeParams->begin()[i]->getVariance()) {
+ case ObjCTypeParamVariance::Invariant:
+ if (!stripKindOf ||
+ !ctx.hasSameType(lhsArgs[i].stripObjCKindOfType(ctx),
+ rhsArgs[i].stripObjCKindOfType(ctx))) {
+ return false;
+ }
+ break;
+
+ case ObjCTypeParamVariance::Covariant:
+ if (!canAssignObjCObjectTypes(ctx, lhsArgs[i], rhsArgs[i]))
+ return false;
+ break;
+
+ case ObjCTypeParamVariance::Contravariant:
+ if (!canAssignObjCObjectTypes(ctx, rhsArgs[i], lhsArgs[i]))
+ return false;
+ break;
+ }
+ }
+
+ return true;
+}
+
+QualType ASTContext::areCommonBaseCompatible(
+ const ObjCObjectPointerType *Lptr,
+ const ObjCObjectPointerType *Rptr) {
+ const ObjCObjectType *LHS = Lptr->getObjectType();
+ const ObjCObjectType *RHS = Rptr->getObjectType();
+ const ObjCInterfaceDecl* LDecl = LHS->getInterface();
+ const ObjCInterfaceDecl* RDecl = RHS->getInterface();
+
+ if (!LDecl || !RDecl)
+ return QualType();
+
+ // Follow the left-hand side up the class hierarchy until we either hit a
+ // root or find the RHS. Record the ancestors in case we don't find it.
+ llvm::SmallDenseMap<const ObjCInterfaceDecl *, const ObjCObjectType *, 4>
+ LHSAncestors;
+ while (true) {
+ // Record this ancestor. We'll need this if the common type isn't in the
+ // path from the LHS to the root.
+ LHSAncestors[LHS->getInterface()->getCanonicalDecl()] = LHS;
+
+ if (declaresSameEntity(LHS->getInterface(), RDecl)) {
+ // Get the type arguments.
+ ArrayRef<QualType> LHSTypeArgs = LHS->getTypeArgsAsWritten();
+ bool anyChanges = false;
+ if (LHS->isSpecialized() && RHS->isSpecialized()) {
+ // Both have type arguments, compare them.
+ if (!sameObjCTypeArgs(*this, LHS->getInterface(),
+ LHS->getTypeArgs(), RHS->getTypeArgs(),
+ /*stripKindOf=*/true))
+ return QualType();
+ } else if (LHS->isSpecialized() != RHS->isSpecialized()) {
+ // If only one has type arguments, the result will not have type
+ // arguments.
+ LHSTypeArgs = { };
+ anyChanges = true;
+ }
+
+ // Compute the intersection of protocols.
+ SmallVector<ObjCProtocolDecl *, 8> Protocols;
+ getIntersectionOfProtocols(*this, LHS->getInterface(), Lptr, Rptr,
+ Protocols);
+ if (!Protocols.empty())
+ anyChanges = true;
+
+ // If anything in the LHS will have changed, build a new result type.
+ if (anyChanges) {
+ QualType Result = getObjCInterfaceType(LHS->getInterface());
+ Result = getObjCObjectType(Result, LHSTypeArgs, Protocols,
+ LHS->isKindOfType());
+ return getObjCObjectPointerType(Result);
+ }
+
+ return getObjCObjectPointerType(QualType(LHS, 0));
+ }
+
+ // Find the superclass.
+ QualType LHSSuperType = LHS->getSuperClassType();
+ if (LHSSuperType.isNull())
+ break;
+
+ LHS = LHSSuperType->castAs<ObjCObjectType>();
+ }
+
+ // We didn't find anything by following the LHS to its root; now check
+ // the RHS against the cached set of ancestors.
+ while (true) {
+ auto KnownLHS = LHSAncestors.find(RHS->getInterface()->getCanonicalDecl());
+ if (KnownLHS != LHSAncestors.end()) {
+ LHS = KnownLHS->second;
+
+ // Get the type arguments.
+ ArrayRef<QualType> RHSTypeArgs = RHS->getTypeArgsAsWritten();
+ bool anyChanges = false;
+ if (LHS->isSpecialized() && RHS->isSpecialized()) {
+ // Both have type arguments, compare them.
+ if (!sameObjCTypeArgs(*this, LHS->getInterface(),
+ LHS->getTypeArgs(), RHS->getTypeArgs(),
+ /*stripKindOf=*/true))
+ return QualType();
+ } else if (LHS->isSpecialized() != RHS->isSpecialized()) {
+ // If only one has type arguments, the result will not have type
+ // arguments.
+ RHSTypeArgs = { };
+ anyChanges = true;
+ }
+
+ // Compute the intersection of protocols.
+ SmallVector<ObjCProtocolDecl *, 8> Protocols;
+ getIntersectionOfProtocols(*this, RHS->getInterface(), Lptr, Rptr,
+ Protocols);
+ if (!Protocols.empty())
+ anyChanges = true;
+
+ if (anyChanges) {
+ QualType Result = getObjCInterfaceType(RHS->getInterface());
+ Result = getObjCObjectType(Result, RHSTypeArgs, Protocols,
+ RHS->isKindOfType());
+ return getObjCObjectPointerType(Result);
+ }
+
+ return getObjCObjectPointerType(QualType(RHS, 0));
+ }
+
+ // Find the superclass of the RHS.
+ QualType RHSSuperType = RHS->getSuperClassType();
+ if (RHSSuperType.isNull())
+ break;
+
+ RHS = RHSSuperType->castAs<ObjCObjectType>();
+ }
+
+ return QualType();
+}
+
+bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS,
+ const ObjCObjectType *RHS) {
+ assert(LHS->getInterface() && "LHS is not an interface type");
+ assert(RHS->getInterface() && "RHS is not an interface type");
+
+ // Verify that the base decls are compatible: the RHS must be a subclass of
+ // the LHS.
+ ObjCInterfaceDecl *LHSInterface = LHS->getInterface();
+ bool IsSuperClass = LHSInterface->isSuperClassOf(RHS->getInterface());
+ if (!IsSuperClass)
+ return false;
+
+ // If the LHS has protocol qualifiers, determine whether all of them are
+ // satisfied by the RHS (i.e., the RHS has a superset of the protocols in the
+ // LHS).
+ if (LHS->getNumProtocols() > 0) {
+ // OK if conversion of LHS to SuperClass results in narrowing of types
+ // ; i.e., SuperClass may implement at least one of the protocols
+ // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok.
+ // But not SuperObj<P1,P2,P3> = lhs<P1,P2>.
+ llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols;
+ CollectInheritedProtocols(RHS->getInterface(), SuperClassInheritedProtocols);
+ // Also, if RHS has explicit quelifiers, include them for comparing with LHS's
+ // qualifiers.
+ for (auto *RHSPI : RHS->quals())
+ CollectInheritedProtocols(RHSPI, SuperClassInheritedProtocols);
+ // If there is no protocols associated with RHS, it is not a match.
+ if (SuperClassInheritedProtocols.empty())
+ return false;
+
+ for (const auto *LHSProto : LHS->quals()) {
+ bool SuperImplementsProtocol = false;
+ for (auto *SuperClassProto : SuperClassInheritedProtocols)
+ if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) {
+ SuperImplementsProtocol = true;
+ break;
+ }
+ if (!SuperImplementsProtocol)
+ return false;
+ }
+ }
+
+ // If the LHS is specialized, we may need to check type arguments.
+ if (LHS->isSpecialized()) {
+ // Follow the superclass chain until we've matched the LHS class in the
+ // hierarchy. This substitutes type arguments through.
+ const ObjCObjectType *RHSSuper = RHS;
+ while (!declaresSameEntity(RHSSuper->getInterface(), LHSInterface))
+ RHSSuper = RHSSuper->getSuperClassType()->castAs<ObjCObjectType>();
+
+ // If the RHS is specializd, compare type arguments.
+ if (RHSSuper->isSpecialized() &&
+ !sameObjCTypeArgs(*this, LHS->getInterface(),
+ LHS->getTypeArgs(), RHSSuper->getTypeArgs(),
+ /*stripKindOf=*/true)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) {
+ // get the "pointed to" types
+ const ObjCObjectPointerType *LHSOPT = LHS->getAs<ObjCObjectPointerType>();
+ const ObjCObjectPointerType *RHSOPT = RHS->getAs<ObjCObjectPointerType>();
+
+ if (!LHSOPT || !RHSOPT)
+ return false;
+
+ return canAssignObjCInterfaces(LHSOPT, RHSOPT) ||
+ canAssignObjCInterfaces(RHSOPT, LHSOPT);
+}
+
+bool ASTContext::canBindObjCObjectType(QualType To, QualType From) {
+ return canAssignObjCInterfaces(
+ getObjCObjectPointerType(To)->getAs<ObjCObjectPointerType>(),
+ getObjCObjectPointerType(From)->getAs<ObjCObjectPointerType>());
+}
+
+/// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible,
+/// both shall have the identically qualified version of a compatible type.
+/// C99 6.2.7p1: Two types have compatible types if their types are the
+/// same. See 6.7.[2,3,5] for additional rules.
+bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS,
+ bool CompareUnqualified) {
+ if (getLangOpts().CPlusPlus)
+ return hasSameType(LHS, RHS);
+
+ return !mergeTypes(LHS, RHS, false, CompareUnqualified).isNull();
+}
+
+bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) {
+ return typesAreCompatible(LHS, RHS);
+}
+
+bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) {
+ return !mergeTypes(LHS, RHS, true).isNull();
+}
+
+/// mergeTransparentUnionType - if T is a transparent union type and a member
+/// of T is compatible with SubType, return the merged type, else return
+/// QualType()
+QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType,
+ bool OfBlockPointer,
+ bool Unqualified) {
+ if (const RecordType *UT = T->getAsUnionType()) {
+ RecordDecl *UD = UT->getDecl();
+ if (UD->hasAttr<TransparentUnionAttr>()) {
+ for (const auto *I : UD->fields()) {
+ QualType ET = I->getType().getUnqualifiedType();
+ QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified);
+ if (!MT.isNull())
+ return MT;
+ }
+ }
+ }
+
+ return QualType();
+}
+
+/// mergeFunctionParameterTypes - merge two types which appear as function
+/// parameter types
+QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs,
+ bool OfBlockPointer,
+ bool Unqualified) {
+ // GNU extension: two types are compatible if they appear as a function
+ // argument, one of the types is a transparent union type and the other
+ // type is compatible with a union member
+ QualType lmerge = mergeTransparentUnionType(lhs, rhs, OfBlockPointer,
+ Unqualified);
+ if (!lmerge.isNull())
+ return lmerge;
+
+ QualType rmerge = mergeTransparentUnionType(rhs, lhs, OfBlockPointer,
+ Unqualified);
+ if (!rmerge.isNull())
+ return rmerge;
+
+ return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified);
+}
+
+QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
+ bool OfBlockPointer,
+ bool Unqualified) {
+ const FunctionType *lbase = lhs->getAs<FunctionType>();
+ const FunctionType *rbase = rhs->getAs<FunctionType>();
+ const FunctionProtoType *lproto = dyn_cast<FunctionProtoType>(lbase);
+ const FunctionProtoType *rproto = dyn_cast<FunctionProtoType>(rbase);
+ bool allLTypes = true;
+ bool allRTypes = true;
+
+ // Check return type
+ QualType retType;
+ if (OfBlockPointer) {
+ QualType RHS = rbase->getReturnType();
+ QualType LHS = lbase->getReturnType();
+ bool UnqualifiedResult = Unqualified;
+ if (!UnqualifiedResult)
+ UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers());
+ retType = mergeTypes(LHS, RHS, true, UnqualifiedResult, true);
+ }
+ else
+ retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), false,
+ Unqualified);
+ if (retType.isNull()) return QualType();
+
+ if (Unqualified)
+ retType = retType.getUnqualifiedType();
+
+ CanQualType LRetType = getCanonicalType(lbase->getReturnType());
+ CanQualType RRetType = getCanonicalType(rbase->getReturnType());
+ if (Unqualified) {
+ LRetType = LRetType.getUnqualifiedType();
+ RRetType = RRetType.getUnqualifiedType();
+ }
+
+ if (getCanonicalType(retType) != LRetType)
+ allLTypes = false;
+ if (getCanonicalType(retType) != RRetType)
+ allRTypes = false;
+
+ // FIXME: double check this
+ // FIXME: should we error if lbase->getRegParmAttr() != 0 &&
+ // rbase->getRegParmAttr() != 0 &&
+ // lbase->getRegParmAttr() != rbase->getRegParmAttr()?
+ FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo();
+ FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo();
+
+ // Compatible functions must have compatible calling conventions
+ if (lbaseInfo.getCC() != rbaseInfo.getCC())
+ return QualType();
+
+ // Regparm is part of the calling convention.
+ if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm())
+ return QualType();
+ if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm())
+ return QualType();
+
+ if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult())
+ return QualType();
+
+ // FIXME: some uses, e.g. conditional exprs, really want this to be 'both'.
+ bool NoReturn = lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn();
+
+ if (lbaseInfo.getNoReturn() != NoReturn)
+ allLTypes = false;
+ if (rbaseInfo.getNoReturn() != NoReturn)
+ allRTypes = false;
+
+ FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn);
+
+ if (lproto && rproto) { // two C99 style function prototypes
+ assert(!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec() &&
+ "C++ shouldn't be here");
+ // Compatible functions must have the same number of parameters
+ if (lproto->getNumParams() != rproto->getNumParams())
+ return QualType();
+
+ // Variadic and non-variadic functions aren't compatible
+ if (lproto->isVariadic() != rproto->isVariadic())
+ return QualType();
+
+ if (lproto->getTypeQuals() != rproto->getTypeQuals())
+ return QualType();
+
+ if (LangOpts.ObjCAutoRefCount &&
+ !FunctionTypesMatchOnNSConsumedAttrs(rproto, lproto))
+ return QualType();
+
+ // Check parameter type compatibility
+ SmallVector<QualType, 10> types;
+ for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) {
+ QualType lParamType = lproto->getParamType(i).getUnqualifiedType();
+ QualType rParamType = rproto->getParamType(i).getUnqualifiedType();
+ QualType paramType = mergeFunctionParameterTypes(
+ lParamType, rParamType, OfBlockPointer, Unqualified);
+ if (paramType.isNull())
+ return QualType();
+
+ if (Unqualified)
+ paramType = paramType.getUnqualifiedType();
+
+ types.push_back(paramType);
+ if (Unqualified) {
+ lParamType = lParamType.getUnqualifiedType();
+ rParamType = rParamType.getUnqualifiedType();
+ }
+
+ if (getCanonicalType(paramType) != getCanonicalType(lParamType))
+ allLTypes = false;
+ if (getCanonicalType(paramType) != getCanonicalType(rParamType))
+ allRTypes = false;
+ }
+
+ if (allLTypes) return lhs;
+ if (allRTypes) return rhs;
+
+ FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo();
+ EPI.ExtInfo = einfo;
+ return getFunctionType(retType, types, EPI);
+ }
+
+ if (lproto) allRTypes = false;
+ if (rproto) allLTypes = false;
+
+ const FunctionProtoType *proto = lproto ? lproto : rproto;
+ if (proto) {
+ assert(!proto->hasExceptionSpec() && "C++ shouldn't be here");
+ if (proto->isVariadic()) return QualType();
+ // Check that the types are compatible with the types that
+ // would result from default argument promotions (C99 6.7.5.3p15).
+ // The only types actually affected are promotable integer
+ // types and floats, which would be passed as a different
+ // type depending on whether the prototype is visible.
+ for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) {
+ QualType paramTy = proto->getParamType(i);
+
+ // Look at the converted type of enum types, since that is the type used
+ // to pass enum values.
+ if (const EnumType *Enum = paramTy->getAs<EnumType>()) {
+ paramTy = Enum->getDecl()->getIntegerType();
+ if (paramTy.isNull())
+ return QualType();
+ }
+
+ if (paramTy->isPromotableIntegerType() ||
+ getCanonicalType(paramTy).getUnqualifiedType() == FloatTy)
+ return QualType();
+ }
+
+ if (allLTypes) return lhs;
+ if (allRTypes) return rhs;
+
+ FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo();
+ EPI.ExtInfo = einfo;
+ return getFunctionType(retType, proto->getParamTypes(), EPI);
+ }
+
+ if (allLTypes) return lhs;
+ if (allRTypes) return rhs;
+ return getFunctionNoProtoType(retType, einfo);
+}
+
+/// Given that we have an enum type and a non-enum type, try to merge them.
+static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET,
+ QualType other, bool isBlockReturnType) {
+ // C99 6.7.2.2p4: Each enumerated type shall be compatible with char,
+ // a signed integer type, or an unsigned integer type.
+ // Compatibility is based on the underlying type, not the promotion
+ // type.
+ QualType underlyingType = ET->getDecl()->getIntegerType();
+ if (underlyingType.isNull()) return QualType();
+ if (Context.hasSameType(underlyingType, other))
+ return other;
+
+ // In block return types, we're more permissive and accept any
+ // integral type of the same size.
+ if (isBlockReturnType && other->isIntegerType() &&
+ Context.getTypeSize(underlyingType) == Context.getTypeSize(other))
+ return other;
+
+ return QualType();
+}
+
+QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
+ bool OfBlockPointer,
+ bool Unqualified, bool BlockReturnType) {
+ // C++ [expr]: If an expression initially has the type "reference to T", the
+ // type is adjusted to "T" prior to any further analysis, the expression
+ // designates the object or function denoted by the reference, and the
+ // expression is an lvalue unless the reference is an rvalue reference and
+ // the expression is a function call (possibly inside parentheses).
+ assert(!LHS->getAs<ReferenceType>() && "LHS is a reference type?");
+ assert(!RHS->getAs<ReferenceType>() && "RHS is a reference type?");
+
+ if (Unqualified) {
+ LHS = LHS.getUnqualifiedType();
+ RHS = RHS.getUnqualifiedType();
+ }
+
+ QualType LHSCan = getCanonicalType(LHS),
+ RHSCan = getCanonicalType(RHS);
+
+ // If two types are identical, they are compatible.
+ if (LHSCan == RHSCan)
+ return LHS;
+
+ // If the qualifiers are different, the types aren't compatible... mostly.
+ Qualifiers LQuals = LHSCan.getLocalQualifiers();
+ Qualifiers RQuals = RHSCan.getLocalQualifiers();
+ if (LQuals != RQuals) {
+ // If any of these qualifiers are different, we have a type
+ // mismatch.
+ if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() ||
+ LQuals.getAddressSpace() != RQuals.getAddressSpace() ||
+ LQuals.getObjCLifetime() != RQuals.getObjCLifetime())
+ return QualType();
+
+ // Exactly one GC qualifier difference is allowed: __strong is
+ // okay if the other type has no GC qualifier but is an Objective
+ // C object pointer (i.e. implicitly strong by default). We fix
+ // this by pretending that the unqualified type was actually
+ // qualified __strong.
+ Qualifiers::GC GC_L = LQuals.getObjCGCAttr();
+ Qualifiers::GC GC_R = RQuals.getObjCGCAttr();
+ assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements");
+
+ if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak)
+ return QualType();
+
+ if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) {
+ return mergeTypes(LHS, getObjCGCQualType(RHS, Qualifiers::Strong));
+ }
+ if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) {
+ return mergeTypes(getObjCGCQualType(LHS, Qualifiers::Strong), RHS);
+ }
+ return QualType();
+ }
+
+ // Okay, qualifiers are equal.
+
+ Type::TypeClass LHSClass = LHSCan->getTypeClass();
+ Type::TypeClass RHSClass = RHSCan->getTypeClass();
+
+ // We want to consider the two function types to be the same for these
+ // comparisons, just force one to the other.
+ if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto;
+ if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto;
+
+ // Same as above for arrays
+ if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray)
+ LHSClass = Type::ConstantArray;
+ if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray)
+ RHSClass = Type::ConstantArray;
+
+ // ObjCInterfaces are just specialized ObjCObjects.
+ if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject;
+ if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject;
+
+ // Canonicalize ExtVector -> Vector.
+ if (LHSClass == Type::ExtVector) LHSClass = Type::Vector;
+ if (RHSClass == Type::ExtVector) RHSClass = Type::Vector;
+
+ // If the canonical type classes don't match.
+ if (LHSClass != RHSClass) {
+ // Note that we only have special rules for turning block enum
+ // returns into block int returns, not vice-versa.
+ if (const EnumType* ETy = LHS->getAs<EnumType>()) {
+ return mergeEnumWithInteger(*this, ETy, RHS, false);
+ }
+ if (const EnumType* ETy = RHS->getAs<EnumType>()) {
+ return mergeEnumWithInteger(*this, ETy, LHS, BlockReturnType);
+ }
+ // allow block pointer type to match an 'id' type.
+ if (OfBlockPointer && !BlockReturnType) {
+ if (LHS->isObjCIdType() && RHS->isBlockPointerType())
+ return LHS;
+ if (RHS->isObjCIdType() && LHS->isBlockPointerType())
+ return RHS;
+ }
+
+ return QualType();
+ }
+
+ // The canonical type classes match.
+ switch (LHSClass) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("Non-canonical and dependent types shouldn't get here");
+
+ case Type::Auto:
+ case Type::LValueReference:
+ case Type::RValueReference:
+ case Type::MemberPointer:
+ llvm_unreachable("C++ should never be in mergeTypes");
+
+ case Type::ObjCInterface:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ case Type::FunctionProto:
+ case Type::ExtVector:
+ llvm_unreachable("Types are eliminated above");
+
+ case Type::Pointer:
+ {
+ // Merge two pointer types, while trying to preserve typedef info
+ QualType LHSPointee = LHS->getAs<PointerType>()->getPointeeType();
+ QualType RHSPointee = RHS->getAs<PointerType>()->getPointeeType();
+ if (Unqualified) {
+ LHSPointee = LHSPointee.getUnqualifiedType();
+ RHSPointee = RHSPointee.getUnqualifiedType();
+ }
+ QualType ResultType = mergeTypes(LHSPointee, RHSPointee, false,
+ Unqualified);
+ if (ResultType.isNull()) return QualType();
+ if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType))
+ return LHS;
+ if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType))
+ return RHS;
+ return getPointerType(ResultType);
+ }
+ case Type::BlockPointer:
+ {
+ // Merge two block pointer types, while trying to preserve typedef info
+ QualType LHSPointee = LHS->getAs<BlockPointerType>()->getPointeeType();
+ QualType RHSPointee = RHS->getAs<BlockPointerType>()->getPointeeType();
+ if (Unqualified) {
+ LHSPointee = LHSPointee.getUnqualifiedType();
+ RHSPointee = RHSPointee.getUnqualifiedType();
+ }
+ QualType ResultType = mergeTypes(LHSPointee, RHSPointee, OfBlockPointer,
+ Unqualified);
+ if (ResultType.isNull()) return QualType();
+ if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType))
+ return LHS;
+ if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType))
+ return RHS;
+ return getBlockPointerType(ResultType);
+ }
+ case Type::Atomic:
+ {
+ // Merge two pointer types, while trying to preserve typedef info
+ QualType LHSValue = LHS->getAs<AtomicType>()->getValueType();
+ QualType RHSValue = RHS->getAs<AtomicType>()->getValueType();
+ if (Unqualified) {
+ LHSValue = LHSValue.getUnqualifiedType();
+ RHSValue = RHSValue.getUnqualifiedType();
+ }
+ QualType ResultType = mergeTypes(LHSValue, RHSValue, false,
+ Unqualified);
+ if (ResultType.isNull()) return QualType();
+ if (getCanonicalType(LHSValue) == getCanonicalType(ResultType))
+ return LHS;
+ if (getCanonicalType(RHSValue) == getCanonicalType(ResultType))
+ return RHS;
+ return getAtomicType(ResultType);
+ }
+ case Type::ConstantArray:
+ {
+ const ConstantArrayType* LCAT = getAsConstantArrayType(LHS);
+ const ConstantArrayType* RCAT = getAsConstantArrayType(RHS);
+ if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize())
+ return QualType();
+
+ QualType LHSElem = getAsArrayType(LHS)->getElementType();
+ QualType RHSElem = getAsArrayType(RHS)->getElementType();
+ if (Unqualified) {
+ LHSElem = LHSElem.getUnqualifiedType();
+ RHSElem = RHSElem.getUnqualifiedType();
+ }
+
+ QualType ResultType = mergeTypes(LHSElem, RHSElem, false, Unqualified);
+ if (ResultType.isNull()) return QualType();
+ if (LCAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType))
+ return LHS;
+ if (RCAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType))
+ return RHS;
+ if (LCAT) return getConstantArrayType(ResultType, LCAT->getSize(),
+ ArrayType::ArraySizeModifier(), 0);
+ if (RCAT) return getConstantArrayType(ResultType, RCAT->getSize(),
+ ArrayType::ArraySizeModifier(), 0);
+ const VariableArrayType* LVAT = getAsVariableArrayType(LHS);
+ const VariableArrayType* RVAT = getAsVariableArrayType(RHS);
+ if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType))
+ return LHS;
+ if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType))
+ return RHS;
+ if (LVAT) {
+ // FIXME: This isn't correct! But tricky to implement because
+ // the array's size has to be the size of LHS, but the type
+ // has to be different.
+ return LHS;
+ }
+ if (RVAT) {
+ // FIXME: This isn't correct! But tricky to implement because
+ // the array's size has to be the size of RHS, but the type
+ // has to be different.
+ return RHS;
+ }
+ if (getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS;
+ if (getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS;
+ return getIncompleteArrayType(ResultType,
+ ArrayType::ArraySizeModifier(), 0);
+ }
+ case Type::FunctionNoProto:
+ return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified);
+ case Type::Record:
+ case Type::Enum:
+ return QualType();
+ case Type::Builtin:
+ // Only exactly equal builtin types are compatible, which is tested above.
+ return QualType();
+ case Type::Complex:
+ // Distinct complex types are incompatible.
+ return QualType();
+ case Type::Vector:
+ // FIXME: The merged type should be an ExtVector!
+ if (areCompatVectorTypes(LHSCan->getAs<VectorType>(),
+ RHSCan->getAs<VectorType>()))
+ return LHS;
+ return QualType();
+ case Type::ObjCObject: {
+ // Check if the types are assignment compatible.
+ // FIXME: This should be type compatibility, e.g. whether
+ // "LHS x; RHS x;" at global scope is legal.
+ const ObjCObjectType* LHSIface = LHS->getAs<ObjCObjectType>();
+ const ObjCObjectType* RHSIface = RHS->getAs<ObjCObjectType>();
+ if (canAssignObjCInterfaces(LHSIface, RHSIface))
+ return LHS;
+
+ return QualType();
+ }
+ case Type::ObjCObjectPointer: {
+ if (OfBlockPointer) {
+ if (canAssignObjCInterfacesInBlockPointer(
+ LHS->getAs<ObjCObjectPointerType>(),
+ RHS->getAs<ObjCObjectPointerType>(),
+ BlockReturnType))
+ return LHS;
+ return QualType();
+ }
+ if (canAssignObjCInterfaces(LHS->getAs<ObjCObjectPointerType>(),
+ RHS->getAs<ObjCObjectPointerType>()))
+ return LHS;
+
+ return QualType();
+ }
+ }
+
+ llvm_unreachable("Invalid Type::Class!");
+}
+
+bool ASTContext::FunctionTypesMatchOnNSConsumedAttrs(
+ const FunctionProtoType *FromFunctionType,
+ const FunctionProtoType *ToFunctionType) {
+ if (FromFunctionType->hasAnyConsumedParams() !=
+ ToFunctionType->hasAnyConsumedParams())
+ return false;
+ FunctionProtoType::ExtProtoInfo FromEPI =
+ FromFunctionType->getExtProtoInfo();
+ FunctionProtoType::ExtProtoInfo ToEPI =
+ ToFunctionType->getExtProtoInfo();
+ if (FromEPI.ConsumedParameters && ToEPI.ConsumedParameters)
+ for (unsigned i = 0, n = FromFunctionType->getNumParams(); i != n; ++i) {
+ if (FromEPI.ConsumedParameters[i] != ToEPI.ConsumedParameters[i])
+ return false;
+ }
+ return true;
+}
+
+void ASTContext::ResetObjCLayout(const ObjCContainerDecl *CD) {
+ ObjCLayouts[CD] = nullptr;
+}
+
+/// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and
+/// 'RHS' attributes and returns the merged version; including for function
+/// return types.
+QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) {
+ QualType LHSCan = getCanonicalType(LHS),
+ RHSCan = getCanonicalType(RHS);
+ // If two types are identical, they are compatible.
+ if (LHSCan == RHSCan)
+ return LHS;
+ if (RHSCan->isFunctionType()) {
+ if (!LHSCan->isFunctionType())
+ return QualType();
+ QualType OldReturnType =
+ cast<FunctionType>(RHSCan.getTypePtr())->getReturnType();
+ QualType NewReturnType =
+ cast<FunctionType>(LHSCan.getTypePtr())->getReturnType();
+ QualType ResReturnType =
+ mergeObjCGCQualifiers(NewReturnType, OldReturnType);
+ if (ResReturnType.isNull())
+ return QualType();
+ if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) {
+ // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo();
+ // In either case, use OldReturnType to build the new function type.
+ const FunctionType *F = LHS->getAs<FunctionType>();
+ if (const FunctionProtoType *FPT = cast<FunctionProtoType>(F)) {
+ FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
+ EPI.ExtInfo = getFunctionExtInfo(LHS);
+ QualType ResultType =
+ getFunctionType(OldReturnType, FPT->getParamTypes(), EPI);
+ return ResultType;
+ }
+ }
+ return QualType();
+ }
+
+ // If the qualifiers are different, the types can still be merged.
+ Qualifiers LQuals = LHSCan.getLocalQualifiers();
+ Qualifiers RQuals = RHSCan.getLocalQualifiers();
+ if (LQuals != RQuals) {
+ // If any of these qualifiers are different, we have a type mismatch.
+ if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() ||
+ LQuals.getAddressSpace() != RQuals.getAddressSpace())
+ return QualType();
+
+ // Exactly one GC qualifier difference is allowed: __strong is
+ // okay if the other type has no GC qualifier but is an Objective
+ // C object pointer (i.e. implicitly strong by default). We fix
+ // this by pretending that the unqualified type was actually
+ // qualified __strong.
+ Qualifiers::GC GC_L = LQuals.getObjCGCAttr();
+ Qualifiers::GC GC_R = RQuals.getObjCGCAttr();
+ assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements");
+
+ if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak)
+ return QualType();
+
+ if (GC_L == Qualifiers::Strong)
+ return LHS;
+ if (GC_R == Qualifiers::Strong)
+ return RHS;
+ return QualType();
+ }
+
+ if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) {
+ QualType LHSBaseQT = LHS->getAs<ObjCObjectPointerType>()->getPointeeType();
+ QualType RHSBaseQT = RHS->getAs<ObjCObjectPointerType>()->getPointeeType();
+ QualType ResQT = mergeObjCGCQualifiers(LHSBaseQT, RHSBaseQT);
+ if (ResQT == LHSBaseQT)
+ return LHS;
+ if (ResQT == RHSBaseQT)
+ return RHS;
+ }
+ return QualType();
+}
+
+//===----------------------------------------------------------------------===//
+// Integer Predicates
+//===----------------------------------------------------------------------===//
+
+unsigned ASTContext::getIntWidth(QualType T) const {
+ if (const EnumType *ET = T->getAs<EnumType>())
+ T = ET->getDecl()->getIntegerType();
+ if (T->isBooleanType())
+ return 1;
+ // For builtin types, just use the standard type sizing method
+ return (unsigned)getTypeSize(T);
+}
+
+QualType ASTContext::getCorrespondingUnsignedType(QualType T) const {
+ assert(T->hasSignedIntegerRepresentation() && "Unexpected type");
+
+ // Turn <4 x signed int> -> <4 x unsigned int>
+ if (const VectorType *VTy = T->getAs<VectorType>())
+ return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()),
+ VTy->getNumElements(), VTy->getVectorKind());
+
+ // For enums, we return the unsigned version of the base type.
+ if (const EnumType *ETy = T->getAs<EnumType>())
+ T = ETy->getDecl()->getIntegerType();
+
+ const BuiltinType *BTy = T->getAs<BuiltinType>();
+ assert(BTy && "Unexpected signed integer type");
+ switch (BTy->getKind()) {
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ return UnsignedCharTy;
+ case BuiltinType::Short:
+ return UnsignedShortTy;
+ case BuiltinType::Int:
+ return UnsignedIntTy;
+ case BuiltinType::Long:
+ return UnsignedLongTy;
+ case BuiltinType::LongLong:
+ return UnsignedLongLongTy;
+ case BuiltinType::Int128:
+ return UnsignedInt128Ty;
+ default:
+ llvm_unreachable("Unexpected signed integer type");
+ }
+}
+
+ASTMutationListener::~ASTMutationListener() { }
+
+void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD,
+ QualType ReturnType) {}
+
+//===----------------------------------------------------------------------===//
+// Builtin Type Computation
+//===----------------------------------------------------------------------===//
+
+/// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the
+/// pointer over the consumed characters. This returns the resultant type. If
+/// AllowTypeModifiers is false then modifier like * are not parsed, just basic
+/// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of
+/// a vector of "i*".
+///
+/// RequiresICE is filled in on return to indicate whether the value is required
+/// to be an Integer Constant Expression.
+static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
+ ASTContext::GetBuiltinTypeError &Error,
+ bool &RequiresICE,
+ bool AllowTypeModifiers) {
+ // Modifiers.
+ int HowLong = 0;
+ bool Signed = false, Unsigned = false;
+ RequiresICE = false;
+
+ // Read the prefixed modifiers first.
+ bool Done = false;
+ while (!Done) {
+ switch (*Str++) {
+ default: Done = true; --Str; break;
+ case 'I':
+ RequiresICE = true;
+ break;
+ case 'S':
+ assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!");
+ assert(!Signed && "Can't use 'S' modifier multiple times!");
+ Signed = true;
+ break;
+ case 'U':
+ assert(!Signed && "Can't use both 'S' and 'U' modifiers!");
+ assert(!Unsigned && "Can't use 'U' modifier multiple times!");
+ Unsigned = true;
+ break;
+ case 'L':
+ assert(HowLong <= 2 && "Can't have LLLL modifier");
+ ++HowLong;
+ break;
+ case 'W':
+ // This modifier represents int64 type.
+ assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!");
+ switch (Context.getTargetInfo().getInt64Type()) {
+ default:
+ llvm_unreachable("Unexpected integer type");
+ case TargetInfo::SignedLong:
+ HowLong = 1;
+ break;
+ case TargetInfo::SignedLongLong:
+ HowLong = 2;
+ break;
+ }
+ }
+ }
+
+ QualType Type;
+
+ // Read the base type.
+ switch (*Str++) {
+ default: llvm_unreachable("Unknown builtin type letter!");
+ case 'v':
+ assert(HowLong == 0 && !Signed && !Unsigned &&
+ "Bad modifiers used with 'v'!");
+ Type = Context.VoidTy;
+ break;
+ case 'h':
+ assert(HowLong == 0 && !Signed && !Unsigned &&
+ "Bad modifiers used with 'h'!");
+ Type = Context.HalfTy;
+ break;
+ case 'f':
+ assert(HowLong == 0 && !Signed && !Unsigned &&
+ "Bad modifiers used with 'f'!");
+ Type = Context.FloatTy;
+ break;
+ case 'd':
+ assert(HowLong < 2 && !Signed && !Unsigned &&
+ "Bad modifiers used with 'd'!");
+ if (HowLong)
+ Type = Context.LongDoubleTy;
+ else
+ Type = Context.DoubleTy;
+ break;
+ case 's':
+ assert(HowLong == 0 && "Bad modifiers used with 's'!");
+ if (Unsigned)
+ Type = Context.UnsignedShortTy;
+ else
+ Type = Context.ShortTy;
+ break;
+ case 'i':
+ if (HowLong == 3)
+ Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty;
+ else if (HowLong == 2)
+ Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy;
+ else if (HowLong == 1)
+ Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy;
+ else
+ Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy;
+ break;
+ case 'c':
+ assert(HowLong == 0 && "Bad modifiers used with 'c'!");
+ if (Signed)
+ Type = Context.SignedCharTy;
+ else if (Unsigned)
+ Type = Context.UnsignedCharTy;
+ else
+ Type = Context.CharTy;
+ break;
+ case 'b': // boolean
+ assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!");
+ Type = Context.BoolTy;
+ break;
+ case 'z': // size_t.
+ assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!");
+ Type = Context.getSizeType();
+ break;
+ case 'F':
+ Type = Context.getCFConstantStringType();
+ break;
+ case 'G':
+ Type = Context.getObjCIdType();
+ break;
+ case 'H':
+ Type = Context.getObjCSelType();
+ break;
+ case 'M':
+ Type = Context.getObjCSuperType();
+ break;
+ case 'a':
+ Type = Context.getBuiltinVaListType();
+ assert(!Type.isNull() && "builtin va list type not initialized!");
+ break;
+ case 'A':
+ // This is a "reference" to a va_list; however, what exactly
+ // this means depends on how va_list is defined. There are two
+ // different kinds of va_list: ones passed by value, and ones
+ // passed by reference. An example of a by-value va_list is
+ // x86, where va_list is a char*. An example of by-ref va_list
+ // is x86-64, where va_list is a __va_list_tag[1]. For x86,
+ // we want this argument to be a char*&; for x86-64, we want
+ // it to be a __va_list_tag*.
+ Type = Context.getBuiltinVaListType();
+ assert(!Type.isNull() && "builtin va list type not initialized!");
+ if (Type->isArrayType())
+ Type = Context.getArrayDecayedType(Type);
+ else
+ Type = Context.getLValueReferenceType(Type);
+ break;
+ case 'V': {
+ char *End;
+ unsigned NumElements = strtoul(Str, &End, 10);
+ assert(End != Str && "Missing vector size");
+ Str = End;
+
+ QualType ElementType = DecodeTypeFromStr(Str, Context, Error,
+ RequiresICE, false);
+ assert(!RequiresICE && "Can't require vector ICE");
+
+ // TODO: No way to make AltiVec vectors in builtins yet.
+ Type = Context.getVectorType(ElementType, NumElements,
+ VectorType::GenericVector);
+ break;
+ }
+ case 'E': {
+ char *End;
+
+ unsigned NumElements = strtoul(Str, &End, 10);
+ assert(End != Str && "Missing vector size");
+
+ Str = End;
+
+ QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE,
+ false);
+ Type = Context.getExtVectorType(ElementType, NumElements);
+ break;
+ }
+ case 'X': {
+ QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE,
+ false);
+ assert(!RequiresICE && "Can't require complex ICE");
+ Type = Context.getComplexType(ElementType);
+ break;
+ }
+ case 'Y' : {
+ Type = Context.getPointerDiffType();
+ break;
+ }
+ case 'P':
+ Type = Context.getFILEType();
+ if (Type.isNull()) {
+ Error = ASTContext::GE_Missing_stdio;
+ return QualType();
+ }
+ break;
+ case 'J':
+ if (Signed)
+ Type = Context.getsigjmp_bufType();
+ else
+ Type = Context.getjmp_bufType();
+
+ if (Type.isNull()) {
+ Error = ASTContext::GE_Missing_setjmp;
+ return QualType();
+ }
+ break;
+ case 'K':
+ assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!");
+ Type = Context.getucontext_tType();
+
+ if (Type.isNull()) {
+ Error = ASTContext::GE_Missing_ucontext;
+ return QualType();
+ }
+ break;
+ case 'p':
+ Type = Context.getProcessIDType();
+ break;
+ }
+
+ // If there are modifiers and if we're allowed to parse them, go for it.
+ Done = !AllowTypeModifiers;
+ while (!Done) {
+ switch (char c = *Str++) {
+ default: Done = true; --Str; break;
+ case '*':
+ case '&': {
+ // Both pointers and references can have their pointee types
+ // qualified with an address space.
+ char *End;
+ unsigned AddrSpace = strtoul(Str, &End, 10);
+ if (End != Str && AddrSpace != 0) {
+ Type = Context.getAddrSpaceQualType(Type, AddrSpace);
+ Str = End;
+ }
+ if (c == '*')
+ Type = Context.getPointerType(Type);
+ else
+ Type = Context.getLValueReferenceType(Type);
+ break;
+ }
+ // FIXME: There's no way to have a built-in with an rvalue ref arg.
+ case 'C':
+ Type = Type.withConst();
+ break;
+ case 'D':
+ Type = Context.getVolatileType(Type);
+ break;
+ case 'R':
+ Type = Type.withRestrict();
+ break;
+ }
+ }
+
+ assert((!RequiresICE || Type->isIntegralOrEnumerationType()) &&
+ "Integer constant 'I' type must be an integer");
+
+ return Type;
+}
+
+/// GetBuiltinType - Return the type for the specified builtin.
+QualType ASTContext::GetBuiltinType(unsigned Id,
+ GetBuiltinTypeError &Error,
+ unsigned *IntegerConstantArgs) const {
+ const char *TypeStr = BuiltinInfo.getTypeString(Id);
+
+ SmallVector<QualType, 8> ArgTypes;
+
+ bool RequiresICE = false;
+ Error = GE_None;
+ QualType ResType = DecodeTypeFromStr(TypeStr, *this, Error,
+ RequiresICE, true);
+ if (Error != GE_None)
+ return QualType();
+
+ assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE");
+
+ while (TypeStr[0] && TypeStr[0] != '.') {
+ QualType Ty = DecodeTypeFromStr(TypeStr, *this, Error, RequiresICE, true);
+ if (Error != GE_None)
+ return QualType();
+
+ // If this argument is required to be an IntegerConstantExpression and the
+ // caller cares, fill in the bitmask we return.
+ if (RequiresICE && IntegerConstantArgs)
+ *IntegerConstantArgs |= 1 << ArgTypes.size();
+
+ // Do array -> pointer decay. The builtin should use the decayed type.
+ if (Ty->isArrayType())
+ Ty = getArrayDecayedType(Ty);
+
+ ArgTypes.push_back(Ty);
+ }
+
+ if (Id == Builtin::BI__GetExceptionInfo)
+ return QualType();
+
+ assert((TypeStr[0] != '.' || TypeStr[1] == 0) &&
+ "'.' should only occur at end of builtin type list!");
+
+ FunctionType::ExtInfo EI(CC_C);
+ if (BuiltinInfo.isNoReturn(Id)) EI = EI.withNoReturn(true);
+
+ bool Variadic = (TypeStr[0] == '.');
+
+ // We really shouldn't be making a no-proto type here, especially in C++.
+ if (ArgTypes.empty() && Variadic)
+ return getFunctionNoProtoType(ResType, EI);
+
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.ExtInfo = EI;
+ EPI.Variadic = Variadic;
+
+ return getFunctionType(ResType, ArgTypes, EPI);
+}
+
+static GVALinkage basicGVALinkageForFunction(const ASTContext &Context,
+ const FunctionDecl *FD) {
+ if (!FD->isExternallyVisible())
+ return GVA_Internal;
+
+ GVALinkage External = GVA_StrongExternal;
+ switch (FD->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ External = GVA_StrongExternal;
+ break;
+
+ case TSK_ExplicitInstantiationDefinition:
+ return GVA_StrongODR;
+
+ // C++11 [temp.explicit]p10:
+ // [ Note: The intent is that an inline function that is the subject of
+ // an explicit instantiation declaration will still be implicitly
+ // instantiated when used so that the body can be considered for
+ // inlining, but that no out-of-line copy of the inline function would be
+ // generated in the translation unit. -- end note ]
+ case TSK_ExplicitInstantiationDeclaration:
+ return GVA_AvailableExternally;
+
+ case TSK_ImplicitInstantiation:
+ External = GVA_DiscardableODR;
+ break;
+ }
+
+ if (!FD->isInlined())
+ return External;
+
+ if ((!Context.getLangOpts().CPlusPlus &&
+ !Context.getTargetInfo().getCXXABI().isMicrosoft() &&
+ !FD->hasAttr<DLLExportAttr>()) ||
+ FD->hasAttr<GNUInlineAttr>()) {
+ // FIXME: This doesn't match gcc's behavior for dllexport inline functions.
+
+ // GNU or C99 inline semantics. Determine whether this symbol should be
+ // externally visible.
+ if (FD->isInlineDefinitionExternallyVisible())
+ return External;
+
+ // C99 inline semantics, where the symbol is not externally visible.
+ return GVA_AvailableExternally;
+ }
+
+ // Functions specified with extern and inline in -fms-compatibility mode
+ // forcibly get emitted. While the body of the function cannot be later
+ // replaced, the function definition cannot be discarded.
+ if (FD->isMSExternInline())
+ return GVA_StrongODR;
+
+ return GVA_DiscardableODR;
+}
+
+static GVALinkage adjustGVALinkageForAttributes(GVALinkage L, const Decl *D) {
+ // See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx
+ // dllexport/dllimport on inline functions.
+ if (D->hasAttr<DLLImportAttr>()) {
+ if (L == GVA_DiscardableODR || L == GVA_StrongODR)
+ return GVA_AvailableExternally;
+ } else if (D->hasAttr<DLLExportAttr>() || D->hasAttr<CUDAGlobalAttr>()) {
+ if (L == GVA_DiscardableODR)
+ return GVA_StrongODR;
+ }
+ return L;
+}
+
+GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const {
+ return adjustGVALinkageForAttributes(basicGVALinkageForFunction(*this, FD),
+ FD);
+}
+
+static GVALinkage basicGVALinkageForVariable(const ASTContext &Context,
+ const VarDecl *VD) {
+ if (!VD->isExternallyVisible())
+ return GVA_Internal;
+
+ if (VD->isStaticLocal()) {
+ GVALinkage StaticLocalLinkage = GVA_DiscardableODR;
+ const DeclContext *LexicalContext = VD->getParentFunctionOrMethod();
+ while (LexicalContext && !isa<FunctionDecl>(LexicalContext))
+ LexicalContext = LexicalContext->getLexicalParent();
+
+ // Let the static local variable inherit its linkage from the nearest
+ // enclosing function.
+ if (LexicalContext)
+ StaticLocalLinkage =
+ Context.GetGVALinkageForFunction(cast<FunctionDecl>(LexicalContext));
+
+ // GVA_StrongODR function linkage is stronger than what we need,
+ // downgrade to GVA_DiscardableODR.
+ // This allows us to discard the variable if we never end up needing it.
+ return StaticLocalLinkage == GVA_StrongODR ? GVA_DiscardableODR
+ : StaticLocalLinkage;
+ }
+
+ // MSVC treats in-class initialized static data members as definitions.
+ // By giving them non-strong linkage, out-of-line definitions won't
+ // cause link errors.
+ if (Context.isMSStaticDataMemberInlineDefinition(VD))
+ return GVA_DiscardableODR;
+
+ switch (VD->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ return GVA_StrongExternal;
+
+ case TSK_ExplicitSpecialization:
+ return Context.getTargetInfo().getCXXABI().isMicrosoft() &&
+ VD->isStaticDataMember()
+ ? GVA_StrongODR
+ : GVA_StrongExternal;
+
+ case TSK_ExplicitInstantiationDefinition:
+ return GVA_StrongODR;
+
+ case TSK_ExplicitInstantiationDeclaration:
+ return GVA_AvailableExternally;
+
+ case TSK_ImplicitInstantiation:
+ return GVA_DiscardableODR;
+ }
+
+ llvm_unreachable("Invalid Linkage!");
+}
+
+GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) {
+ return adjustGVALinkageForAttributes(basicGVALinkageForVariable(*this, VD),
+ VD);
+}
+
+bool ASTContext::DeclMustBeEmitted(const Decl *D) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (!VD->isFileVarDecl())
+ return false;
+ // Global named register variables (GNU extension) are never emitted.
+ if (VD->getStorageClass() == SC_Register)
+ return false;
+ if (VD->getDescribedVarTemplate() ||
+ isa<VarTemplatePartialSpecializationDecl>(VD))
+ return false;
+ } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // We never need to emit an uninstantiated function template.
+ if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate)
+ return false;
+ } else if (isa<OMPThreadPrivateDecl>(D))
+ return true;
+ else
+ return false;
+
+ // If this is a member of a class template, we do not need to emit it.
+ if (D->getDeclContext()->isDependentContext())
+ return false;
+
+ // Weak references don't produce any output by themselves.
+ if (D->hasAttr<WeakRefAttr>())
+ return false;
+
+ // Aliases and used decls are required.
+ if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>())
+ return true;
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // Forward declarations aren't required.
+ if (!FD->doesThisDeclarationHaveABody())
+ return FD->doesDeclarationForceExternallyVisibleDefinition();
+
+ // Constructors and destructors are required.
+ if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>())
+ return true;
+
+ // The key function for a class is required. This rule only comes
+ // into play when inline functions can be key functions, though.
+ if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) {
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ const CXXRecordDecl *RD = MD->getParent();
+ if (MD->isOutOfLine() && RD->isDynamicClass()) {
+ const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD);
+ if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl())
+ return true;
+ }
+ }
+ }
+
+ GVALinkage Linkage = GetGVALinkageForFunction(FD);
+
+ // static, static inline, always_inline, and extern inline functions can
+ // always be deferred. Normal inline functions can be deferred in C99/C++.
+ // Implicit template instantiations can also be deferred in C++.
+ if (Linkage == GVA_Internal || Linkage == GVA_AvailableExternally ||
+ Linkage == GVA_DiscardableODR)
+ return false;
+ return true;
+ }
+
+ const VarDecl *VD = cast<VarDecl>(D);
+ assert(VD->isFileVarDecl() && "Expected file scoped var");
+
+ if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly &&
+ !isMSStaticDataMemberInlineDefinition(VD))
+ return false;
+
+ // Variables that can be needed in other TUs are required.
+ GVALinkage L = GetGVALinkageForVariable(VD);
+ if (L != GVA_Internal && L != GVA_AvailableExternally &&
+ L != GVA_DiscardableODR)
+ return true;
+
+ // Variables that have destruction with side-effects are required.
+ if (VD->getType().isDestructedType())
+ return true;
+
+ // Variables that have initialization with side-effects are required.
+ if (VD->getInit() && VD->getInit()->HasSideEffects(*this) &&
+ !VD->evaluateValue())
+ return true;
+
+ return false;
+}
+
+CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic,
+ bool IsCXXMethod) const {
+ // Pass through to the C++ ABI object
+ if (IsCXXMethod)
+ return ABI->getDefaultMethodCallConv(IsVariadic);
+
+ if (LangOpts.MRTD && !IsVariadic) return CC_X86StdCall;
+
+ return Target->getDefaultCallingConv(TargetInfo::CCMT_Unknown);
+}
+
+bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const {
+ // Pass through to the C++ ABI object
+ return ABI->isNearlyEmpty(RD);
+}
+
+VTableContextBase *ASTContext::getVTableContext() {
+ if (!VTContext.get()) {
+ if (Target->getCXXABI().isMicrosoft())
+ VTContext.reset(new MicrosoftVTableContext(*this));
+ else
+ VTContext.reset(new ItaniumVTableContext(*this));
+ }
+ return VTContext.get();
+}
+
+MangleContext *ASTContext::createMangleContext() {
+ switch (Target->getCXXABI().getKind()) {
+ case TargetCXXABI::GenericAArch64:
+ case TargetCXXABI::GenericItanium:
+ case TargetCXXABI::GenericARM:
+ case TargetCXXABI::GenericMIPS:
+ case TargetCXXABI::iOS:
+ case TargetCXXABI::iOS64:
+ case TargetCXXABI::WebAssembly:
+ case TargetCXXABI::WatchOS:
+ return ItaniumMangleContext::create(*this, getDiagnostics());
+ case TargetCXXABI::Microsoft:
+ return MicrosoftMangleContext::create(*this, getDiagnostics());
+ }
+ llvm_unreachable("Unsupported ABI");
+}
+
+CXXABI::~CXXABI() {}
+
+size_t ASTContext::getSideTableAllocatedMemory() const {
+ return ASTRecordLayouts.getMemorySize() +
+ llvm::capacity_in_bytes(ObjCLayouts) +
+ llvm::capacity_in_bytes(KeyFunctions) +
+ llvm::capacity_in_bytes(ObjCImpls) +
+ llvm::capacity_in_bytes(BlockVarCopyInits) +
+ llvm::capacity_in_bytes(DeclAttrs) +
+ llvm::capacity_in_bytes(TemplateOrInstantiation) +
+ llvm::capacity_in_bytes(InstantiatedFromUsingDecl) +
+ llvm::capacity_in_bytes(InstantiatedFromUsingShadowDecl) +
+ llvm::capacity_in_bytes(InstantiatedFromUnnamedFieldDecl) +
+ llvm::capacity_in_bytes(OverriddenMethods) +
+ llvm::capacity_in_bytes(Types) +
+ llvm::capacity_in_bytes(VariableArrayTypes) +
+ llvm::capacity_in_bytes(ClassScopeSpecializationPattern);
+}
+
+/// getIntTypeForBitwidth -
+/// sets integer QualTy according to specified details:
+/// bitwidth, signed/unsigned.
+/// Returns empty type if there is no appropriate target types.
+QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth,
+ unsigned Signed) const {
+ TargetInfo::IntType Ty = getTargetInfo().getIntTypeByWidth(DestWidth, Signed);
+ CanQualType QualTy = getFromTargetType(Ty);
+ if (!QualTy && DestWidth == 128)
+ return Signed ? Int128Ty : UnsignedInt128Ty;
+ return QualTy;
+}
+
+/// getRealTypeForBitwidth -
+/// sets floating point QualTy according to specified bitwidth.
+/// Returns empty type if there is no appropriate target types.
+QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth) const {
+ TargetInfo::RealType Ty = getTargetInfo().getRealTypeByWidth(DestWidth);
+ switch (Ty) {
+ case TargetInfo::Float:
+ return FloatTy;
+ case TargetInfo::Double:
+ return DoubleTy;
+ case TargetInfo::LongDouble:
+ return LongDoubleTy;
+ case TargetInfo::NoFloat:
+ return QualType();
+ }
+
+ llvm_unreachable("Unhandled TargetInfo::RealType value");
+}
+
+void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) {
+ if (Number > 1)
+ MangleNumbers[ND] = Number;
+}
+
+unsigned ASTContext::getManglingNumber(const NamedDecl *ND) const {
+ llvm::DenseMap<const NamedDecl *, unsigned>::const_iterator I =
+ MangleNumbers.find(ND);
+ return I != MangleNumbers.end() ? I->second : 1;
+}
+
+void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) {
+ if (Number > 1)
+ StaticLocalNumbers[VD] = Number;
+}
+
+unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const {
+ llvm::DenseMap<const VarDecl *, unsigned>::const_iterator I =
+ StaticLocalNumbers.find(VD);
+ return I != StaticLocalNumbers.end() ? I->second : 1;
+}
+
+MangleNumberingContext &
+ASTContext::getManglingNumberContext(const DeclContext *DC) {
+ assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C.
+ MangleNumberingContext *&MCtx = MangleNumberingContexts[DC];
+ if (!MCtx)
+ MCtx = createMangleNumberingContext();
+ return *MCtx;
+}
+
+MangleNumberingContext *ASTContext::createMangleNumberingContext() const {
+ return ABI->createMangleNumberingContext();
+}
+
+const CXXConstructorDecl *
+ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl *RD) {
+ return ABI->getCopyConstructorForExceptionObject(
+ cast<CXXRecordDecl>(RD->getFirstDecl()));
+}
+
+void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD,
+ CXXConstructorDecl *CD) {
+ return ABI->addCopyConstructorForExceptionObject(
+ cast<CXXRecordDecl>(RD->getFirstDecl()),
+ cast<CXXConstructorDecl>(CD->getFirstDecl()));
+}
+
+void ASTContext::addDefaultArgExprForConstructor(const CXXConstructorDecl *CD,
+ unsigned ParmIdx, Expr *DAE) {
+ ABI->addDefaultArgExprForConstructor(
+ cast<CXXConstructorDecl>(CD->getFirstDecl()), ParmIdx, DAE);
+}
+
+Expr *ASTContext::getDefaultArgExprForConstructor(const CXXConstructorDecl *CD,
+ unsigned ParmIdx) {
+ return ABI->getDefaultArgExprForConstructor(
+ cast<CXXConstructorDecl>(CD->getFirstDecl()), ParmIdx);
+}
+
+void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl *TD,
+ TypedefNameDecl *DD) {
+ return ABI->addTypedefNameForUnnamedTagDecl(TD, DD);
+}
+
+TypedefNameDecl *
+ASTContext::getTypedefNameForUnnamedTagDecl(const TagDecl *TD) {
+ return ABI->getTypedefNameForUnnamedTagDecl(TD);
+}
+
+void ASTContext::addDeclaratorForUnnamedTagDecl(TagDecl *TD,
+ DeclaratorDecl *DD) {
+ return ABI->addDeclaratorForUnnamedTagDecl(TD, DD);
+}
+
+DeclaratorDecl *ASTContext::getDeclaratorForUnnamedTagDecl(const TagDecl *TD) {
+ return ABI->getDeclaratorForUnnamedTagDecl(TD);
+}
+
+void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) {
+ ParamIndices[D] = index;
+}
+
+unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const {
+ ParameterIndexTable::const_iterator I = ParamIndices.find(D);
+ assert(I != ParamIndices.end() &&
+ "ParmIndices lacks entry set by ParmVarDecl");
+ return I->second;
+}
+
+APValue *
+ASTContext::getMaterializedTemporaryValue(const MaterializeTemporaryExpr *E,
+ bool MayCreate) {
+ assert(E && E->getStorageDuration() == SD_Static &&
+ "don't need to cache the computed value for this temporary");
+ if (MayCreate) {
+ APValue *&MTVI = MaterializedTemporaryValues[E];
+ if (!MTVI)
+ MTVI = new (*this) APValue;
+ return MTVI;
+ }
+
+ return MaterializedTemporaryValues.lookup(E);
+}
+
+bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const {
+ const llvm::Triple &T = getTargetInfo().getTriple();
+ if (!T.isOSDarwin())
+ return false;
+
+ if (!(T.isiOS() && T.isOSVersionLT(7)) &&
+ !(T.isMacOSX() && T.isOSVersionLT(10, 9)))
+ return false;
+
+ QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
+ CharUnits sizeChars = getTypeSizeInChars(AtomicTy);
+ uint64_t Size = sizeChars.getQuantity();
+ CharUnits alignChars = getTypeAlignInChars(AtomicTy);
+ unsigned Align = alignChars.getQuantity();
+ unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth();
+ return (Size != Align || toBits(sizeChars) > MaxInlineWidthInBits);
+}
+
+namespace {
+
+ast_type_traits::DynTypedNode getSingleDynTypedNodeFromParentMap(
+ ASTContext::ParentMapPointers::mapped_type U) {
+ if (const auto *D = U.dyn_cast<const Decl *>())
+ return ast_type_traits::DynTypedNode::create(*D);
+ if (const auto *S = U.dyn_cast<const Stmt *>())
+ return ast_type_traits::DynTypedNode::create(*S);
+ return *U.get<ast_type_traits::DynTypedNode *>();
+}
+
+/// Template specializations to abstract away from pointers and TypeLocs.
+/// @{
+template <typename T>
+ast_type_traits::DynTypedNode createDynTypedNode(const T &Node) {
+ return ast_type_traits::DynTypedNode::create(*Node);
+}
+template <>
+ast_type_traits::DynTypedNode createDynTypedNode(const TypeLoc &Node) {
+ return ast_type_traits::DynTypedNode::create(Node);
+}
+template <>
+ast_type_traits::DynTypedNode
+createDynTypedNode(const NestedNameSpecifierLoc &Node) {
+ return ast_type_traits::DynTypedNode::create(Node);
+}
+/// @}
+
+ /// \brief A \c RecursiveASTVisitor that builds a map from nodes to their
+ /// parents as defined by the \c RecursiveASTVisitor.
+ ///
+ /// Note that the relationship described here is purely in terms of AST
+ /// traversal - there are other relationships (for example declaration context)
+ /// in the AST that are better modeled by special matchers.
+ ///
+ /// FIXME: Currently only builds up the map using \c Stmt and \c Decl nodes.
+ class ParentMapASTVisitor : public RecursiveASTVisitor<ParentMapASTVisitor> {
+ public:
+ /// \brief Builds and returns the translation unit's parent map.
+ ///
+ /// The caller takes ownership of the returned \c ParentMap.
+ static std::pair<ASTContext::ParentMapPointers *,
+ ASTContext::ParentMapOtherNodes *>
+ buildMap(TranslationUnitDecl &TU) {
+ ParentMapASTVisitor Visitor(new ASTContext::ParentMapPointers,
+ new ASTContext::ParentMapOtherNodes);
+ Visitor.TraverseDecl(&TU);
+ return std::make_pair(Visitor.Parents, Visitor.OtherParents);
+ }
+
+ private:
+ typedef RecursiveASTVisitor<ParentMapASTVisitor> VisitorBase;
+
+ ParentMapASTVisitor(ASTContext::ParentMapPointers *Parents,
+ ASTContext::ParentMapOtherNodes *OtherParents)
+ : Parents(Parents), OtherParents(OtherParents) {}
+
+ bool shouldVisitTemplateInstantiations() const {
+ return true;
+ }
+ bool shouldVisitImplicitCode() const {
+ return true;
+ }
+
+ template <typename T, typename MapNodeTy, typename BaseTraverseFn,
+ typename MapTy>
+ bool TraverseNode(T Node, MapNodeTy MapNode,
+ BaseTraverseFn BaseTraverse, MapTy *Parents) {
+ if (!Node)
+ return true;
+ if (ParentStack.size() > 0) {
+ // FIXME: Currently we add the same parent multiple times, but only
+ // when no memoization data is available for the type.
+ // For example when we visit all subexpressions of template
+ // instantiations; this is suboptimal, but benign: the only way to
+ // visit those is with hasAncestor / hasParent, and those do not create
+ // new matches.
+ // The plan is to enable DynTypedNode to be storable in a map or hash
+ // map. The main problem there is to implement hash functions /
+ // comparison operators for all types that DynTypedNode supports that
+ // do not have pointer identity.
+ auto &NodeOrVector = (*Parents)[MapNode];
+ if (NodeOrVector.isNull()) {
+ if (const auto *D = ParentStack.back().get<Decl>())
+ NodeOrVector = D;
+ else if (const auto *S = ParentStack.back().get<Stmt>())
+ NodeOrVector = S;
+ else
+ NodeOrVector =
+ new ast_type_traits::DynTypedNode(ParentStack.back());
+ } else {
+ if (!NodeOrVector.template is<ASTContext::ParentVector *>()) {
+ auto *Vector = new ASTContext::ParentVector(
+ 1, getSingleDynTypedNodeFromParentMap(NodeOrVector));
+ if (auto *Node =
+ NodeOrVector
+ .template dyn_cast<ast_type_traits::DynTypedNode *>())
+ delete Node;
+ NodeOrVector = Vector;
+ }
+
+ auto *Vector =
+ NodeOrVector.template get<ASTContext::ParentVector *>();
+ // Skip duplicates for types that have memoization data.
+ // We must check that the type has memoization data before calling
+ // std::find() because DynTypedNode::operator== can't compare all
+ // types.
+ bool Found = ParentStack.back().getMemoizationData() &&
+ std::find(Vector->begin(), Vector->end(),
+ ParentStack.back()) != Vector->end();
+ if (!Found)
+ Vector->push_back(ParentStack.back());
+ }
+ }
+ ParentStack.push_back(createDynTypedNode(Node));
+ bool Result = BaseTraverse();
+ ParentStack.pop_back();
+ return Result;
+ }
+
+ bool TraverseDecl(Decl *DeclNode) {
+ return TraverseNode(DeclNode, DeclNode,
+ [&] { return VisitorBase::TraverseDecl(DeclNode); },
+ Parents);
+ }
+
+ bool TraverseStmt(Stmt *StmtNode) {
+ return TraverseNode(StmtNode, StmtNode,
+ [&] { return VisitorBase::TraverseStmt(StmtNode); },
+ Parents);
+ }
+
+ bool TraverseTypeLoc(TypeLoc TypeLocNode) {
+ return TraverseNode(
+ TypeLocNode, ast_type_traits::DynTypedNode::create(TypeLocNode),
+ [&] { return VisitorBase::TraverseTypeLoc(TypeLocNode); },
+ OtherParents);
+ }
+
+ bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNSLocNode) {
+ return TraverseNode(
+ NNSLocNode, ast_type_traits::DynTypedNode::create(NNSLocNode),
+ [&] {
+ return VisitorBase::TraverseNestedNameSpecifierLoc(NNSLocNode);
+ },
+ OtherParents);
+ }
+
+ ASTContext::ParentMapPointers *Parents;
+ ASTContext::ParentMapOtherNodes *OtherParents;
+ llvm::SmallVector<ast_type_traits::DynTypedNode, 16> ParentStack;
+
+ friend class RecursiveASTVisitor<ParentMapASTVisitor>;
+ };
+
+} // anonymous namespace
+
+template <typename NodeTy, typename MapTy>
+static ASTContext::DynTypedNodeList getDynNodeFromMap(const NodeTy &Node,
+ const MapTy &Map) {
+ auto I = Map.find(Node);
+ if (I == Map.end()) {
+ return llvm::ArrayRef<ast_type_traits::DynTypedNode>();
+ }
+ if (auto *V = I->second.template dyn_cast<ASTContext::ParentVector *>()) {
+ return llvm::makeArrayRef(*V);
+ }
+ return getSingleDynTypedNodeFromParentMap(I->second);
+}
+
+ASTContext::DynTypedNodeList
+ASTContext::getParents(const ast_type_traits::DynTypedNode &Node) {
+ if (!PointerParents) {
+ // We always need to run over the whole translation unit, as
+ // hasAncestor can escape any subtree.
+ auto Maps = ParentMapASTVisitor::buildMap(*getTranslationUnitDecl());
+ PointerParents.reset(Maps.first);
+ OtherParents.reset(Maps.second);
+ }
+ if (Node.getNodeKind().hasPointerIdentity())
+ return getDynNodeFromMap(Node.getMemoizationData(), *PointerParents);
+ return getDynNodeFromMap(Node, *OtherParents);
+}
+
+bool
+ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl,
+ const ObjCMethodDecl *MethodImpl) {
+ // No point trying to match an unavailable/deprecated mothod.
+ if (MethodDecl->hasAttr<UnavailableAttr>()
+ || MethodDecl->hasAttr<DeprecatedAttr>())
+ return false;
+ if (MethodDecl->getObjCDeclQualifier() !=
+ MethodImpl->getObjCDeclQualifier())
+ return false;
+ if (!hasSameType(MethodDecl->getReturnType(), MethodImpl->getReturnType()))
+ return false;
+
+ if (MethodDecl->param_size() != MethodImpl->param_size())
+ return false;
+
+ for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(),
+ IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(),
+ EF = MethodDecl->param_end();
+ IM != EM && IF != EF; ++IM, ++IF) {
+ const ParmVarDecl *DeclVar = (*IF);
+ const ParmVarDecl *ImplVar = (*IM);
+ if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier())
+ return false;
+ if (!hasSameType(DeclVar->getType(), ImplVar->getType()))
+ return false;
+ }
+ return (MethodDecl->isVariadic() == MethodImpl->isVariadic());
+
+}
+
+// Explicitly instantiate this in case a Redeclarable<T> is used from a TU that
+// doesn't include ASTContext.h
+template
+clang::LazyGenerationalUpdatePtr<
+ const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::ValueType
+clang::LazyGenerationalUpdatePtr<
+ const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::makeValue(
+ const clang::ASTContext &Ctx, Decl *Value);
diff --git a/contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp b/contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp
new file mode 100644
index 0000000..0ab1fa7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp
@@ -0,0 +1,1935 @@
+//===--- ASTDiagnostic.cpp - Diagnostic Printing Hooks for AST Nodes ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a diagnostic formatting hook for AST elements.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTDiagnostic.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTLambda.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/TemplateBase.h"
+#include "clang/AST/Type.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+// Returns a desugared version of the QualType, and marks ShouldAKA as true
+// whenever we remove significant sugar from the type.
+static QualType Desugar(ASTContext &Context, QualType QT, bool &ShouldAKA) {
+ QualifierCollector QC;
+
+ while (true) {
+ const Type *Ty = QC.strip(QT);
+
+ // Don't aka just because we saw an elaborated type...
+ if (const ElaboratedType *ET = dyn_cast<ElaboratedType>(Ty)) {
+ QT = ET->desugar();
+ continue;
+ }
+ // ... or a paren type ...
+ if (const ParenType *PT = dyn_cast<ParenType>(Ty)) {
+ QT = PT->desugar();
+ continue;
+ }
+ // ...or a substituted template type parameter ...
+ if (const SubstTemplateTypeParmType *ST =
+ dyn_cast<SubstTemplateTypeParmType>(Ty)) {
+ QT = ST->desugar();
+ continue;
+ }
+ // ...or an attributed type...
+ if (const AttributedType *AT = dyn_cast<AttributedType>(Ty)) {
+ QT = AT->desugar();
+ continue;
+ }
+ // ...or an adjusted type...
+ if (const AdjustedType *AT = dyn_cast<AdjustedType>(Ty)) {
+ QT = AT->desugar();
+ continue;
+ }
+ // ... or an auto type.
+ if (const AutoType *AT = dyn_cast<AutoType>(Ty)) {
+ if (!AT->isSugared())
+ break;
+ QT = AT->desugar();
+ continue;
+ }
+
+ // Desugar FunctionType if return type or any parameter type should be
+ // desugared. Preserve nullability attribute on desugared types.
+ if (const FunctionType *FT = dyn_cast<FunctionType>(Ty)) {
+ bool DesugarReturn = false;
+ QualType SugarRT = FT->getReturnType();
+ QualType RT = Desugar(Context, SugarRT, DesugarReturn);
+ if (auto nullability = AttributedType::stripOuterNullability(SugarRT)) {
+ RT = Context.getAttributedType(
+ AttributedType::getNullabilityAttrKind(*nullability), RT, RT);
+ }
+
+ bool DesugarArgument = false;
+ SmallVector<QualType, 4> Args;
+ const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT);
+ if (FPT) {
+ for (QualType SugarPT : FPT->param_types()) {
+ QualType PT = Desugar(Context, SugarPT, DesugarArgument);
+ if (auto nullability =
+ AttributedType::stripOuterNullability(SugarPT)) {
+ PT = Context.getAttributedType(
+ AttributedType::getNullabilityAttrKind(*nullability), PT, PT);
+ }
+ Args.push_back(PT);
+ }
+ }
+
+ if (DesugarReturn || DesugarArgument) {
+ ShouldAKA = true;
+ QT = FPT ? Context.getFunctionType(RT, Args, FPT->getExtProtoInfo())
+ : Context.getFunctionNoProtoType(RT, FT->getExtInfo());
+ break;
+ }
+ }
+
+ // Desugar template specializations if any template argument should be
+ // desugared.
+ if (const TemplateSpecializationType *TST =
+ dyn_cast<TemplateSpecializationType>(Ty)) {
+ if (!TST->isTypeAlias()) {
+ bool DesugarArgument = false;
+ SmallVector<TemplateArgument, 4> Args;
+ for (unsigned I = 0, N = TST->getNumArgs(); I != N; ++I) {
+ const TemplateArgument &Arg = TST->getArg(I);
+ if (Arg.getKind() == TemplateArgument::Type)
+ Args.push_back(Desugar(Context, Arg.getAsType(), DesugarArgument));
+ else
+ Args.push_back(Arg);
+ }
+
+ if (DesugarArgument) {
+ ShouldAKA = true;
+ QT = Context.getTemplateSpecializationType(
+ TST->getTemplateName(), Args.data(), Args.size(), QT);
+ }
+ break;
+ }
+ }
+
+ // Don't desugar magic Objective-C types.
+ if (QualType(Ty,0) == Context.getObjCIdType() ||
+ QualType(Ty,0) == Context.getObjCClassType() ||
+ QualType(Ty,0) == Context.getObjCSelType() ||
+ QualType(Ty,0) == Context.getObjCProtoType())
+ break;
+
+ // Don't desugar va_list.
+ if (QualType(Ty, 0) == Context.getBuiltinVaListType() ||
+ QualType(Ty, 0) == Context.getBuiltinMSVaListType())
+ break;
+
+ // Otherwise, do a single-step desugar.
+ QualType Underlying;
+ bool IsSugar = false;
+ switch (Ty->getTypeClass()) {
+#define ABSTRACT_TYPE(Class, Base)
+#define TYPE(Class, Base) \
+case Type::Class: { \
+const Class##Type *CTy = cast<Class##Type>(Ty); \
+if (CTy->isSugared()) { \
+IsSugar = true; \
+Underlying = CTy->desugar(); \
+} \
+break; \
+}
+#include "clang/AST/TypeNodes.def"
+ }
+
+ // If it wasn't sugared, we're done.
+ if (!IsSugar)
+ break;
+
+ // If the desugared type is a vector type, we don't want to expand
+ // it, it will turn into an attribute mess. People want their "vec4".
+ if (isa<VectorType>(Underlying))
+ break;
+
+ // Don't desugar through the primary typedef of an anonymous type.
+ if (const TagType *UTT = Underlying->getAs<TagType>())
+ if (const TypedefType *QTT = dyn_cast<TypedefType>(QT))
+ if (UTT->getDecl()->getTypedefNameForAnonDecl() == QTT->getDecl())
+ break;
+
+ // Record that we actually looked through an opaque type here.
+ ShouldAKA = true;
+ QT = Underlying;
+ }
+
+ // If we have a pointer-like type, desugar the pointee as well.
+ // FIXME: Handle other pointer-like types.
+ if (const PointerType *Ty = QT->getAs<PointerType>()) {
+ QT = Context.getPointerType(Desugar(Context, Ty->getPointeeType(),
+ ShouldAKA));
+ } else if (const auto *Ty = QT->getAs<ObjCObjectPointerType>()) {
+ QT = Context.getObjCObjectPointerType(Desugar(Context, Ty->getPointeeType(),
+ ShouldAKA));
+ } else if (const LValueReferenceType *Ty = QT->getAs<LValueReferenceType>()) {
+ QT = Context.getLValueReferenceType(Desugar(Context, Ty->getPointeeType(),
+ ShouldAKA));
+ } else if (const RValueReferenceType *Ty = QT->getAs<RValueReferenceType>()) {
+ QT = Context.getRValueReferenceType(Desugar(Context, Ty->getPointeeType(),
+ ShouldAKA));
+ } else if (const auto *Ty = QT->getAs<ObjCObjectType>()) {
+ if (Ty->getBaseType().getTypePtr() != Ty && !ShouldAKA) {
+ QualType BaseType = Desugar(Context, Ty->getBaseType(), ShouldAKA);
+ QT = Context.getObjCObjectType(BaseType, Ty->getTypeArgsAsWritten(),
+ llvm::makeArrayRef(Ty->qual_begin(),
+ Ty->getNumProtocols()),
+ Ty->isKindOfTypeAsWritten());
+ }
+ }
+
+ return QC.apply(Context, QT);
+}
+
+/// \brief Convert the given type to a string suitable for printing as part of
+/// a diagnostic.
+///
+/// There are four main criteria when determining whether we should have an
+/// a.k.a. clause when pretty-printing a type:
+///
+/// 1) Some types provide very minimal sugar that doesn't impede the
+/// user's understanding --- for example, elaborated type
+/// specifiers. If this is all the sugar we see, we don't want an
+/// a.k.a. clause.
+/// 2) Some types are technically sugared but are much more familiar
+/// when seen in their sugared form --- for example, va_list,
+/// vector types, and the magic Objective C types. We don't
+/// want to desugar these, even if we do produce an a.k.a. clause.
+/// 3) Some types may have already been desugared previously in this diagnostic.
+/// if this is the case, doing another "aka" would just be clutter.
+/// 4) Two different types within the same diagnostic have the same output
+/// string. In this case, force an a.k.a with the desugared type when
+/// doing so will provide additional information.
+///
+/// \param Context the context in which the type was allocated
+/// \param Ty the type to print
+/// \param QualTypeVals pointer values to QualTypes which are used in the
+/// diagnostic message
+static std::string
+ConvertTypeToDiagnosticString(ASTContext &Context, QualType Ty,
+ ArrayRef<DiagnosticsEngine::ArgumentValue> PrevArgs,
+ ArrayRef<intptr_t> QualTypeVals) {
+ // FIXME: Playing with std::string is really slow.
+ bool ForceAKA = false;
+ QualType CanTy = Ty.getCanonicalType();
+ std::string S = Ty.getAsString(Context.getPrintingPolicy());
+ std::string CanS = CanTy.getAsString(Context.getPrintingPolicy());
+
+ for (unsigned I = 0, E = QualTypeVals.size(); I != E; ++I) {
+ QualType CompareTy =
+ QualType::getFromOpaquePtr(reinterpret_cast<void*>(QualTypeVals[I]));
+ if (CompareTy.isNull())
+ continue;
+ if (CompareTy == Ty)
+ continue; // Same types
+ QualType CompareCanTy = CompareTy.getCanonicalType();
+ if (CompareCanTy == CanTy)
+ continue; // Same canonical types
+ std::string CompareS = CompareTy.getAsString(Context.getPrintingPolicy());
+ bool ShouldAKA = false;
+ QualType CompareDesugar = Desugar(Context, CompareTy, ShouldAKA);
+ std::string CompareDesugarStr =
+ CompareDesugar.getAsString(Context.getPrintingPolicy());
+ if (CompareS != S && CompareDesugarStr != S)
+ continue; // The type string is different than the comparison string
+ // and the desugared comparison string.
+ std::string CompareCanS =
+ CompareCanTy.getAsString(Context.getPrintingPolicy());
+
+ if (CompareCanS == CanS)
+ continue; // No new info from canonical type
+
+ ForceAKA = true;
+ break;
+ }
+
+ // Check to see if we already desugared this type in this
+ // diagnostic. If so, don't do it again.
+ bool Repeated = false;
+ for (unsigned i = 0, e = PrevArgs.size(); i != e; ++i) {
+ // TODO: Handle ak_declcontext case.
+ if (PrevArgs[i].first == DiagnosticsEngine::ak_qualtype) {
+ void *Ptr = (void*)PrevArgs[i].second;
+ QualType PrevTy(QualType::getFromOpaquePtr(Ptr));
+ if (PrevTy == Ty) {
+ Repeated = true;
+ break;
+ }
+ }
+ }
+
+ // Consider producing an a.k.a. clause if removing all the direct
+ // sugar gives us something "significantly different".
+ if (!Repeated) {
+ bool ShouldAKA = false;
+ QualType DesugaredTy = Desugar(Context, Ty, ShouldAKA);
+ if (ShouldAKA || ForceAKA) {
+ if (DesugaredTy == Ty) {
+ DesugaredTy = Ty.getCanonicalType();
+ }
+ std::string akaStr = DesugaredTy.getAsString(Context.getPrintingPolicy());
+ if (akaStr != S) {
+ S = "'" + S + "' (aka '" + akaStr + "')";
+ return S;
+ }
+ }
+
+ // Give some additional info on vector types. These are either not desugared
+ // or displaying complex __attribute__ expressions so add details of the
+ // type and element count.
+ if (Ty->isVectorType()) {
+ const VectorType *VTy = Ty->getAs<VectorType>();
+ std::string DecoratedString;
+ llvm::raw_string_ostream OS(DecoratedString);
+ const char *Values = VTy->getNumElements() > 1 ? "values" : "value";
+ OS << "'" << S << "' (vector of " << VTy->getNumElements() << " '"
+ << VTy->getElementType().getAsString(Context.getPrintingPolicy())
+ << "' " << Values << ")";
+ return OS.str();
+ }
+ }
+
+ S = "'" + S + "'";
+ return S;
+}
+
+static bool FormatTemplateTypeDiff(ASTContext &Context, QualType FromType,
+ QualType ToType, bool PrintTree,
+ bool PrintFromType, bool ElideType,
+ bool ShowColors, raw_ostream &OS);
+
+void clang::FormatASTNodeDiagnosticArgument(
+ DiagnosticsEngine::ArgumentKind Kind,
+ intptr_t Val,
+ StringRef Modifier,
+ StringRef Argument,
+ ArrayRef<DiagnosticsEngine::ArgumentValue> PrevArgs,
+ SmallVectorImpl<char> &Output,
+ void *Cookie,
+ ArrayRef<intptr_t> QualTypeVals) {
+ ASTContext &Context = *static_cast<ASTContext*>(Cookie);
+
+ size_t OldEnd = Output.size();
+ llvm::raw_svector_ostream OS(Output);
+ bool NeedQuotes = true;
+
+ switch (Kind) {
+ default: llvm_unreachable("unknown ArgumentKind");
+ case DiagnosticsEngine::ak_qualtype_pair: {
+ TemplateDiffTypes &TDT = *reinterpret_cast<TemplateDiffTypes*>(Val);
+ QualType FromType =
+ QualType::getFromOpaquePtr(reinterpret_cast<void*>(TDT.FromType));
+ QualType ToType =
+ QualType::getFromOpaquePtr(reinterpret_cast<void*>(TDT.ToType));
+
+ if (FormatTemplateTypeDiff(Context, FromType, ToType, TDT.PrintTree,
+ TDT.PrintFromType, TDT.ElideType,
+ TDT.ShowColors, OS)) {
+ NeedQuotes = !TDT.PrintTree;
+ TDT.TemplateDiffUsed = true;
+ break;
+ }
+
+ // Don't fall-back during tree printing. The caller will handle
+ // this case.
+ if (TDT.PrintTree)
+ return;
+
+ // Attempting to do a template diff on non-templates. Set the variables
+ // and continue with regular type printing of the appropriate type.
+ Val = TDT.PrintFromType ? TDT.FromType : TDT.ToType;
+ Modifier = StringRef();
+ Argument = StringRef();
+ // Fall through
+ }
+ case DiagnosticsEngine::ak_qualtype: {
+ assert(Modifier.empty() && Argument.empty() &&
+ "Invalid modifier for QualType argument");
+
+ QualType Ty(QualType::getFromOpaquePtr(reinterpret_cast<void*>(Val)));
+ OS << ConvertTypeToDiagnosticString(Context, Ty, PrevArgs, QualTypeVals);
+ NeedQuotes = false;
+ break;
+ }
+ case DiagnosticsEngine::ak_declarationname: {
+ if (Modifier == "objcclass" && Argument.empty())
+ OS << '+';
+ else if (Modifier == "objcinstance" && Argument.empty())
+ OS << '-';
+ else
+ assert(Modifier.empty() && Argument.empty() &&
+ "Invalid modifier for DeclarationName argument");
+
+ OS << DeclarationName::getFromOpaqueInteger(Val);
+ break;
+ }
+ case DiagnosticsEngine::ak_nameddecl: {
+ bool Qualified;
+ if (Modifier == "q" && Argument.empty())
+ Qualified = true;
+ else {
+ assert(Modifier.empty() && Argument.empty() &&
+ "Invalid modifier for NamedDecl* argument");
+ Qualified = false;
+ }
+ const NamedDecl *ND = reinterpret_cast<const NamedDecl*>(Val);
+ ND->getNameForDiagnostic(OS, Context.getPrintingPolicy(), Qualified);
+ break;
+ }
+ case DiagnosticsEngine::ak_nestednamespec: {
+ NestedNameSpecifier *NNS = reinterpret_cast<NestedNameSpecifier*>(Val);
+ NNS->print(OS, Context.getPrintingPolicy());
+ NeedQuotes = false;
+ break;
+ }
+ case DiagnosticsEngine::ak_declcontext: {
+ DeclContext *DC = reinterpret_cast<DeclContext *> (Val);
+ assert(DC && "Should never have a null declaration context");
+ NeedQuotes = false;
+
+ // FIXME: Get the strings for DeclContext from some localized place
+ if (DC->isTranslationUnit()) {
+ if (Context.getLangOpts().CPlusPlus)
+ OS << "the global namespace";
+ else
+ OS << "the global scope";
+ } else if (DC->isClosure()) {
+ OS << "block literal";
+ } else if (isLambdaCallOperator(DC)) {
+ OS << "lambda expression";
+ } else if (TypeDecl *Type = dyn_cast<TypeDecl>(DC)) {
+ OS << ConvertTypeToDiagnosticString(Context,
+ Context.getTypeDeclType(Type),
+ PrevArgs, QualTypeVals);
+ } else {
+ assert(isa<NamedDecl>(DC) && "Expected a NamedDecl");
+ NamedDecl *ND = cast<NamedDecl>(DC);
+ if (isa<NamespaceDecl>(ND))
+ OS << "namespace ";
+ else if (isa<ObjCMethodDecl>(ND))
+ OS << "method ";
+ else if (isa<FunctionDecl>(ND))
+ OS << "function ";
+
+ OS << '\'';
+ ND->getNameForDiagnostic(OS, Context.getPrintingPolicy(), true);
+ OS << '\'';
+ }
+ break;
+ }
+ case DiagnosticsEngine::ak_attr: {
+ const Attr *At = reinterpret_cast<Attr *>(Val);
+ assert(At && "Received null Attr object!");
+ OS << '\'' << At->getSpelling() << '\'';
+ NeedQuotes = false;
+ break;
+ }
+
+ }
+
+ if (NeedQuotes) {
+ Output.insert(Output.begin()+OldEnd, '\'');
+ Output.push_back('\'');
+ }
+}
+
+/// TemplateDiff - A class that constructs a pretty string for a pair of
+/// QualTypes. For the pair of types, a diff tree will be created containing
+/// all the information about the templates and template arguments. Afterwards,
+/// the tree is transformed to a string according to the options passed in.
+namespace {
+class TemplateDiff {
+ /// Context - The ASTContext which is used for comparing template arguments.
+ ASTContext &Context;
+
+ /// Policy - Used during expression printing.
+ PrintingPolicy Policy;
+
+ /// ElideType - Option to elide identical types.
+ bool ElideType;
+
+ /// PrintTree - Format output string as a tree.
+ bool PrintTree;
+
+ /// ShowColor - Diagnostics support color, so bolding will be used.
+ bool ShowColor;
+
+ /// FromType - When single type printing is selected, this is the type to be
+ /// be printed. When tree printing is selected, this type will show up first
+ /// in the tree.
+ QualType FromType;
+
+ /// ToType - The type that FromType is compared to. Only in tree printing
+ /// will this type be outputed.
+ QualType ToType;
+
+ /// OS - The stream used to construct the output strings.
+ raw_ostream &OS;
+
+ /// IsBold - Keeps track of the bold formatting for the output string.
+ bool IsBold;
+
+ /// DiffTree - A tree representation the differences between two types.
+ class DiffTree {
+ public:
+ /// DiffKind - The difference in a DiffNode and which fields are used.
+ enum DiffKind {
+ /// Incomplete or invalid node.
+ Invalid,
+ /// Another level of templates, uses TemplateDecl and Qualifiers
+ Template,
+ /// Type difference, uses QualType
+ Type,
+ /// Expression difference, uses Expr
+ Expression,
+ /// Template argument difference, uses TemplateDecl
+ TemplateTemplate,
+ /// Integer difference, uses APSInt and Expr
+ Integer,
+ /// Declaration difference, uses ValueDecl
+ Declaration
+ };
+ private:
+ /// DiffNode - The root node stores the original type. Each child node
+ /// stores template arguments of their parents. For templated types, the
+ /// template decl is also stored.
+ struct DiffNode {
+ DiffKind Kind;
+
+ /// NextNode - The index of the next sibling node or 0.
+ unsigned NextNode;
+
+ /// ChildNode - The index of the first child node or 0.
+ unsigned ChildNode;
+
+ /// ParentNode - The index of the parent node.
+ unsigned ParentNode;
+
+ /// FromType, ToType - The type arguments.
+ QualType FromType, ToType;
+
+ /// FromExpr, ToExpr - The expression arguments.
+ Expr *FromExpr, *ToExpr;
+
+ /// FromNullPtr, ToNullPtr - If the template argument is a nullptr
+ bool FromNullPtr, ToNullPtr;
+
+ /// FromTD, ToTD - The template decl for template template
+ /// arguments or the type arguments that are templates.
+ TemplateDecl *FromTD, *ToTD;
+
+ /// FromQual, ToQual - Qualifiers for template types.
+ Qualifiers FromQual, ToQual;
+
+ /// FromInt, ToInt - APSInt's for integral arguments.
+ llvm::APSInt FromInt, ToInt;
+
+ /// IsValidFromInt, IsValidToInt - Whether the APSInt's are valid.
+ bool IsValidFromInt, IsValidToInt;
+
+ /// FromValueDecl, ToValueDecl - Whether the argument is a decl.
+ ValueDecl *FromValueDecl, *ToValueDecl;
+
+ /// FromAddressOf, ToAddressOf - Whether the ValueDecl needs an address of
+ /// operator before it.
+ bool FromAddressOf, ToAddressOf;
+
+ /// FromDefault, ToDefault - Whether the argument is a default argument.
+ bool FromDefault, ToDefault;
+
+ /// Same - Whether the two arguments evaluate to the same value.
+ bool Same;
+
+ DiffNode(unsigned ParentNode = 0)
+ : Kind(Invalid), NextNode(0), ChildNode(0), ParentNode(ParentNode),
+ FromType(), ToType(), FromExpr(nullptr), ToExpr(nullptr),
+ FromNullPtr(false), ToNullPtr(false),
+ FromTD(nullptr), ToTD(nullptr), IsValidFromInt(false),
+ IsValidToInt(false), FromValueDecl(nullptr), ToValueDecl(nullptr),
+ FromAddressOf(false), ToAddressOf(false), FromDefault(false),
+ ToDefault(false), Same(false) {}
+ };
+
+ /// FlatTree - A flattened tree used to store the DiffNodes.
+ SmallVector<DiffNode, 16> FlatTree;
+
+ /// CurrentNode - The index of the current node being used.
+ unsigned CurrentNode;
+
+ /// NextFreeNode - The index of the next unused node. Used when creating
+ /// child nodes.
+ unsigned NextFreeNode;
+
+ /// ReadNode - The index of the current node being read.
+ unsigned ReadNode;
+
+ public:
+ DiffTree() :
+ CurrentNode(0), NextFreeNode(1) {
+ FlatTree.push_back(DiffNode());
+ }
+
+ // Node writing functions.
+ /// SetNode - Sets FromTD and ToTD of the current node.
+ void SetNode(TemplateDecl *FromTD, TemplateDecl *ToTD) {
+ FlatTree[CurrentNode].FromTD = FromTD;
+ FlatTree[CurrentNode].ToTD = ToTD;
+ }
+
+ /// SetNode - Sets FromType and ToType of the current node.
+ void SetNode(QualType FromType, QualType ToType) {
+ FlatTree[CurrentNode].FromType = FromType;
+ FlatTree[CurrentNode].ToType = ToType;
+ }
+
+ /// SetNode - Set FromExpr and ToExpr of the current node.
+ void SetNode(Expr *FromExpr, Expr *ToExpr) {
+ FlatTree[CurrentNode].FromExpr = FromExpr;
+ FlatTree[CurrentNode].ToExpr = ToExpr;
+ }
+
+ /// SetNode - Set FromInt and ToInt of the current node.
+ void SetNode(llvm::APSInt FromInt, llvm::APSInt ToInt,
+ bool IsValidFromInt, bool IsValidToInt) {
+ FlatTree[CurrentNode].FromInt = FromInt;
+ FlatTree[CurrentNode].ToInt = ToInt;
+ FlatTree[CurrentNode].IsValidFromInt = IsValidFromInt;
+ FlatTree[CurrentNode].IsValidToInt = IsValidToInt;
+ }
+
+ /// SetNode - Set FromQual and ToQual of the current node.
+ void SetNode(Qualifiers FromQual, Qualifiers ToQual) {
+ FlatTree[CurrentNode].FromQual = FromQual;
+ FlatTree[CurrentNode].ToQual = ToQual;
+ }
+
+ /// SetNode - Set FromValueDecl and ToValueDecl of the current node.
+ void SetNode(ValueDecl *FromValueDecl, ValueDecl *ToValueDecl,
+ bool FromAddressOf, bool ToAddressOf) {
+ FlatTree[CurrentNode].FromValueDecl = FromValueDecl;
+ FlatTree[CurrentNode].ToValueDecl = ToValueDecl;
+ FlatTree[CurrentNode].FromAddressOf = FromAddressOf;
+ FlatTree[CurrentNode].ToAddressOf = ToAddressOf;
+ }
+
+ /// SetSame - Sets the same flag of the current node.
+ void SetSame(bool Same) {
+ FlatTree[CurrentNode].Same = Same;
+ }
+
+ /// SetNullPtr - Sets the NullPtr flags of the current node.
+ void SetNullPtr(bool FromNullPtr, bool ToNullPtr) {
+ FlatTree[CurrentNode].FromNullPtr = FromNullPtr;
+ FlatTree[CurrentNode].ToNullPtr = ToNullPtr;
+ }
+
+ /// SetDefault - Sets FromDefault and ToDefault flags of the current node.
+ void SetDefault(bool FromDefault, bool ToDefault) {
+ FlatTree[CurrentNode].FromDefault = FromDefault;
+ FlatTree[CurrentNode].ToDefault = ToDefault;
+ }
+
+ /// SetKind - Sets the current node's type.
+ void SetKind(DiffKind Kind) {
+ FlatTree[CurrentNode].Kind = Kind;
+ }
+
+ /// Up - Changes the node to the parent of the current node.
+ void Up() {
+ CurrentNode = FlatTree[CurrentNode].ParentNode;
+ }
+
+ /// AddNode - Adds a child node to the current node, then sets that node
+ /// node as the current node.
+ void AddNode() {
+ FlatTree.push_back(DiffNode(CurrentNode));
+ DiffNode &Node = FlatTree[CurrentNode];
+ if (Node.ChildNode == 0) {
+ // If a child node doesn't exist, add one.
+ Node.ChildNode = NextFreeNode;
+ } else {
+ // If a child node exists, find the last child node and add a
+ // next node to it.
+ unsigned i;
+ for (i = Node.ChildNode; FlatTree[i].NextNode != 0;
+ i = FlatTree[i].NextNode) {
+ }
+ FlatTree[i].NextNode = NextFreeNode;
+ }
+ CurrentNode = NextFreeNode;
+ ++NextFreeNode;
+ }
+
+ // Node reading functions.
+ /// StartTraverse - Prepares the tree for recursive traversal.
+ void StartTraverse() {
+ ReadNode = 0;
+ CurrentNode = NextFreeNode;
+ NextFreeNode = 0;
+ }
+
+ /// Parent - Move the current read node to its parent.
+ void Parent() {
+ ReadNode = FlatTree[ReadNode].ParentNode;
+ }
+
+ /// GetNode - Gets the FromType and ToType.
+ void GetNode(QualType &FromType, QualType &ToType) {
+ FromType = FlatTree[ReadNode].FromType;
+ ToType = FlatTree[ReadNode].ToType;
+ }
+
+ /// GetNode - Gets the FromExpr and ToExpr.
+ void GetNode(Expr *&FromExpr, Expr *&ToExpr) {
+ FromExpr = FlatTree[ReadNode].FromExpr;
+ ToExpr = FlatTree[ReadNode].ToExpr;
+ }
+
+ /// GetNode - Gets the FromTD and ToTD.
+ void GetNode(TemplateDecl *&FromTD, TemplateDecl *&ToTD) {
+ FromTD = FlatTree[ReadNode].FromTD;
+ ToTD = FlatTree[ReadNode].ToTD;
+ }
+
+ /// GetNode - Gets the FromInt and ToInt.
+ void GetNode(llvm::APSInt &FromInt, llvm::APSInt &ToInt,
+ bool &IsValidFromInt, bool &IsValidToInt) {
+ FromInt = FlatTree[ReadNode].FromInt;
+ ToInt = FlatTree[ReadNode].ToInt;
+ IsValidFromInt = FlatTree[ReadNode].IsValidFromInt;
+ IsValidToInt = FlatTree[ReadNode].IsValidToInt;
+ }
+
+ /// GetNode - Gets the FromQual and ToQual.
+ void GetNode(Qualifiers &FromQual, Qualifiers &ToQual) {
+ FromQual = FlatTree[ReadNode].FromQual;
+ ToQual = FlatTree[ReadNode].ToQual;
+ }
+
+ /// GetNode - Gets the FromValueDecl and ToValueDecl.
+ void GetNode(ValueDecl *&FromValueDecl, ValueDecl *&ToValueDecl,
+ bool &FromAddressOf, bool &ToAddressOf) {
+ FromValueDecl = FlatTree[ReadNode].FromValueDecl;
+ ToValueDecl = FlatTree[ReadNode].ToValueDecl;
+ FromAddressOf = FlatTree[ReadNode].FromAddressOf;
+ ToAddressOf = FlatTree[ReadNode].ToAddressOf;
+ }
+
+ /// NodeIsSame - Returns true the arguments are the same.
+ bool NodeIsSame() {
+ return FlatTree[ReadNode].Same;
+ }
+
+ /// HasChildrend - Returns true if the node has children.
+ bool HasChildren() {
+ return FlatTree[ReadNode].ChildNode != 0;
+ }
+
+ /// MoveToChild - Moves from the current node to its child.
+ void MoveToChild() {
+ ReadNode = FlatTree[ReadNode].ChildNode;
+ }
+
+ /// AdvanceSibling - If there is a next sibling, advance to it and return
+ /// true. Otherwise, return false.
+ bool AdvanceSibling() {
+ if (FlatTree[ReadNode].NextNode == 0)
+ return false;
+
+ ReadNode = FlatTree[ReadNode].NextNode;
+ return true;
+ }
+
+ /// HasNextSibling - Return true if the node has a next sibling.
+ bool HasNextSibling() {
+ return FlatTree[ReadNode].NextNode != 0;
+ }
+
+ /// FromNullPtr - Returns true if the from argument is null.
+ bool FromNullPtr() {
+ return FlatTree[ReadNode].FromNullPtr;
+ }
+
+ /// ToNullPtr - Returns true if the to argument is null.
+ bool ToNullPtr() {
+ return FlatTree[ReadNode].ToNullPtr;
+ }
+
+ /// FromDefault - Return true if the from argument is the default.
+ bool FromDefault() {
+ return FlatTree[ReadNode].FromDefault;
+ }
+
+ /// ToDefault - Return true if the to argument is the default.
+ bool ToDefault() {
+ return FlatTree[ReadNode].ToDefault;
+ }
+
+ /// Empty - Returns true if the tree has no information.
+ bool Empty() {
+ return GetKind() == Invalid;
+ }
+
+ /// GetKind - Returns the current node's type.
+ DiffKind GetKind() {
+ return FlatTree[ReadNode].Kind;
+ }
+ };
+
+ DiffTree Tree;
+
+ /// TSTiterator - an iterator that is used to enter a
+ /// TemplateSpecializationType and read TemplateArguments inside template
+ /// parameter packs in order with the rest of the TemplateArguments.
+ struct TSTiterator {
+ typedef const TemplateArgument& reference;
+ typedef const TemplateArgument* pointer;
+
+ /// TST - the template specialization whose arguments this iterator
+ /// traverse over.
+ const TemplateSpecializationType *TST;
+
+ /// DesugarTST - desugared template specialization used to extract
+ /// default argument information
+ const TemplateSpecializationType *DesugarTST;
+
+ /// Index - the index of the template argument in TST.
+ unsigned Index;
+
+ /// CurrentTA - if CurrentTA is not the same as EndTA, then CurrentTA
+ /// points to a TemplateArgument within a parameter pack.
+ TemplateArgument::pack_iterator CurrentTA;
+
+ /// EndTA - the end iterator of a parameter pack
+ TemplateArgument::pack_iterator EndTA;
+
+ /// TSTiterator - Constructs an iterator and sets it to the first template
+ /// argument.
+ TSTiterator(ASTContext &Context, const TemplateSpecializationType *TST)
+ : TST(TST),
+ DesugarTST(GetTemplateSpecializationType(Context, TST->desugar())),
+ Index(0), CurrentTA(nullptr), EndTA(nullptr) {
+ if (isEnd()) return;
+
+ // Set to first template argument. If not a parameter pack, done.
+ TemplateArgument TA = TST->getArg(0);
+ if (TA.getKind() != TemplateArgument::Pack) return;
+
+ // Start looking into the parameter pack.
+ CurrentTA = TA.pack_begin();
+ EndTA = TA.pack_end();
+
+ // Found a valid template argument.
+ if (CurrentTA != EndTA) return;
+
+ // Parameter pack is empty, use the increment to get to a valid
+ // template argument.
+ ++(*this);
+ }
+
+ /// isEnd - Returns true if the iterator is one past the end.
+ bool isEnd() const {
+ return Index >= TST->getNumArgs();
+ }
+
+ /// &operator++ - Increment the iterator to the next template argument.
+ TSTiterator &operator++() {
+ // After the end, Index should be the default argument position in
+ // DesugarTST, if it exists.
+ if (isEnd()) {
+ ++Index;
+ return *this;
+ }
+
+ // If in a parameter pack, advance in the parameter pack.
+ if (CurrentTA != EndTA) {
+ ++CurrentTA;
+ if (CurrentTA != EndTA)
+ return *this;
+ }
+
+ // Loop until a template argument is found, or the end is reached.
+ while (true) {
+ // Advance to the next template argument. Break if reached the end.
+ if (++Index == TST->getNumArgs()) break;
+
+ // If the TemplateArgument is not a parameter pack, done.
+ TemplateArgument TA = TST->getArg(Index);
+ if (TA.getKind() != TemplateArgument::Pack) break;
+
+ // Handle parameter packs.
+ CurrentTA = TA.pack_begin();
+ EndTA = TA.pack_end();
+
+ // If the parameter pack is empty, try to advance again.
+ if (CurrentTA != EndTA) break;
+ }
+ return *this;
+ }
+
+ /// operator* - Returns the appropriate TemplateArgument.
+ reference operator*() const {
+ assert(!isEnd() && "Index exceeds number of arguments.");
+ if (CurrentTA == EndTA)
+ return TST->getArg(Index);
+ else
+ return *CurrentTA;
+ }
+
+ /// operator-> - Allow access to the underlying TemplateArgument.
+ pointer operator->() const {
+ return &operator*();
+ }
+
+ /// getDesugar - Returns the deduced template argument from DesguarTST
+ reference getDesugar() const {
+ return DesugarTST->getArg(Index);
+ }
+ };
+
+ // These functions build up the template diff tree, including functions to
+ // retrieve and compare template arguments.
+
+ static const TemplateSpecializationType * GetTemplateSpecializationType(
+ ASTContext &Context, QualType Ty) {
+ if (const TemplateSpecializationType *TST =
+ Ty->getAs<TemplateSpecializationType>())
+ return TST;
+
+ const RecordType *RT = Ty->getAs<RecordType>();
+
+ if (!RT)
+ return nullptr;
+
+ const ClassTemplateSpecializationDecl *CTSD =
+ dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl());
+
+ if (!CTSD)
+ return nullptr;
+
+ Ty = Context.getTemplateSpecializationType(
+ TemplateName(CTSD->getSpecializedTemplate()),
+ CTSD->getTemplateArgs().data(),
+ CTSD->getTemplateArgs().size(),
+ Ty.getLocalUnqualifiedType().getCanonicalType());
+
+ return Ty->getAs<TemplateSpecializationType>();
+ }
+
+ /// DiffTypes - Fills a DiffNode with information about a type difference.
+ void DiffTypes(const TSTiterator &FromIter, const TSTiterator &ToIter,
+ TemplateTypeParmDecl *FromDefaultTypeDecl,
+ TemplateTypeParmDecl *ToDefaultTypeDecl) {
+ QualType FromType = GetType(FromIter, FromDefaultTypeDecl);
+ QualType ToType = GetType(ToIter, ToDefaultTypeDecl);
+
+ Tree.SetNode(FromType, ToType);
+ Tree.SetDefault(FromIter.isEnd() && !FromType.isNull(),
+ ToIter.isEnd() && !ToType.isNull());
+ Tree.SetKind(DiffTree::Type);
+ if (FromType.isNull() || ToType.isNull())
+ return;
+
+ if (Context.hasSameType(FromType, ToType)) {
+ Tree.SetSame(true);
+ return;
+ }
+
+ const TemplateSpecializationType *FromArgTST =
+ GetTemplateSpecializationType(Context, FromType);
+ if (!FromArgTST)
+ return;
+
+ const TemplateSpecializationType *ToArgTST =
+ GetTemplateSpecializationType(Context, ToType);
+ if (!ToArgTST)
+ return;
+
+ if (!hasSameTemplate(FromArgTST, ToArgTST))
+ return;
+
+ Qualifiers FromQual = FromType.getQualifiers(),
+ ToQual = ToType.getQualifiers();
+ FromQual -= QualType(FromArgTST, 0).getQualifiers();
+ ToQual -= QualType(ToArgTST, 0).getQualifiers();
+ Tree.SetNode(FromArgTST->getTemplateName().getAsTemplateDecl(),
+ ToArgTST->getTemplateName().getAsTemplateDecl());
+ Tree.SetNode(FromQual, ToQual);
+ Tree.SetKind(DiffTree::Template);
+ DiffTemplate(FromArgTST, ToArgTST);
+ }
+
+ /// DiffTemplateTemplates - Fills a DiffNode with information about a
+ /// template template difference.
+ void DiffTemplateTemplates(const TSTiterator &FromIter,
+ const TSTiterator &ToIter,
+ TemplateTemplateParmDecl *FromDefaultTemplateDecl,
+ TemplateTemplateParmDecl *ToDefaultTemplateDecl) {
+ TemplateDecl *FromDecl = GetTemplateDecl(FromIter, FromDefaultTemplateDecl);
+ TemplateDecl *ToDecl = GetTemplateDecl(ToIter, ToDefaultTemplateDecl);
+ Tree.SetNode(FromDecl, ToDecl);
+ Tree.SetSame(FromDecl && ToDecl &&
+ FromDecl->getCanonicalDecl() == ToDecl->getCanonicalDecl());
+ Tree.SetDefault(FromIter.isEnd() && FromDecl, ToIter.isEnd() && ToDecl);
+ Tree.SetKind(DiffTree::TemplateTemplate);
+ }
+
+ /// InitializeNonTypeDiffVariables - Helper function for DiffNonTypes
+ static void InitializeNonTypeDiffVariables(
+ ASTContext &Context, const TSTiterator &Iter,
+ NonTypeTemplateParmDecl *Default, bool &HasInt, bool &HasValueDecl,
+ bool &IsNullPtr, Expr *&E, llvm::APSInt &Value, ValueDecl *&VD) {
+ HasInt = !Iter.isEnd() && Iter->getKind() == TemplateArgument::Integral;
+
+ HasValueDecl =
+ !Iter.isEnd() && Iter->getKind() == TemplateArgument::Declaration;
+
+ IsNullPtr = !Iter.isEnd() && Iter->getKind() == TemplateArgument::NullPtr;
+
+ if (HasInt)
+ Value = Iter->getAsIntegral();
+ else if (HasValueDecl)
+ VD = Iter->getAsDecl();
+ else if (!IsNullPtr)
+ E = GetExpr(Iter, Default);
+
+ if (E && Default->getType()->isPointerType())
+ IsNullPtr = CheckForNullPtr(Context, E);
+ }
+
+ /// NeedsAddressOf - Helper function for DiffNonTypes. Returns true if the
+ /// ValueDecl needs a '&' when printed.
+ static bool NeedsAddressOf(ValueDecl *VD, Expr *E,
+ NonTypeTemplateParmDecl *Default) {
+ if (!VD)
+ return false;
+
+ if (E) {
+ if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) {
+ if (UO->getOpcode() == UO_AddrOf) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ if (!Default->getType()->isReferenceType()) {
+ return true;
+ }
+
+ return false;
+ }
+
+ /// DiffNonTypes - Handles any template parameters not handled by DiffTypes
+ /// of DiffTemplatesTemplates, such as integer and declaration parameters.
+ void DiffNonTypes(const TSTiterator &FromIter, const TSTiterator &ToIter,
+ NonTypeTemplateParmDecl *FromDefaultNonTypeDecl,
+ NonTypeTemplateParmDecl *ToDefaultNonTypeDecl) {
+ Expr *FromExpr = nullptr, *ToExpr = nullptr;
+ llvm::APSInt FromInt, ToInt;
+ ValueDecl *FromValueDecl = nullptr, *ToValueDecl = nullptr;
+ bool HasFromInt = false, HasToInt = false, HasFromValueDecl = false,
+ HasToValueDecl = false, FromNullPtr = false, ToNullPtr = false;
+ InitializeNonTypeDiffVariables(Context, FromIter, FromDefaultNonTypeDecl,
+ HasFromInt, HasFromValueDecl, FromNullPtr,
+ FromExpr, FromInt, FromValueDecl);
+ InitializeNonTypeDiffVariables(Context, ToIter, ToDefaultNonTypeDecl,
+ HasToInt, HasToValueDecl, ToNullPtr,
+ ToExpr, ToInt, ToValueDecl);
+
+ assert(((!HasFromInt && !HasToInt) ||
+ (!HasFromValueDecl && !HasToValueDecl)) &&
+ "Template argument cannot be both integer and declaration");
+
+ if (!HasFromInt && !HasToInt && !HasFromValueDecl && !HasToValueDecl) {
+ Tree.SetNode(FromExpr, ToExpr);
+ Tree.SetDefault(FromIter.isEnd() && FromExpr, ToIter.isEnd() && ToExpr);
+ if (FromDefaultNonTypeDecl->getType()->isIntegralOrEnumerationType()) {
+ if (FromExpr)
+ HasFromInt = GetInt(Context, FromIter, FromExpr, FromInt,
+ FromDefaultNonTypeDecl->getType());
+ if (ToExpr)
+ HasToInt = GetInt(Context, ToIter, ToExpr, ToInt,
+ ToDefaultNonTypeDecl->getType());
+ }
+ if (HasFromInt && HasToInt) {
+ Tree.SetNode(FromInt, ToInt, HasFromInt, HasToInt);
+ Tree.SetSame(FromInt == ToInt);
+ Tree.SetKind(DiffTree::Integer);
+ } else if (HasFromInt || HasToInt) {
+ Tree.SetNode(FromInt, ToInt, HasFromInt, HasToInt);
+ Tree.SetSame(false);
+ Tree.SetKind(DiffTree::Integer);
+ } else {
+ Tree.SetSame(IsEqualExpr(Context, FromExpr, ToExpr) ||
+ (FromNullPtr && ToNullPtr));
+ Tree.SetNullPtr(FromNullPtr, ToNullPtr);
+ Tree.SetKind(DiffTree::Expression);
+ }
+ return;
+ }
+
+ if (HasFromInt || HasToInt) {
+ if (!HasFromInt && FromExpr)
+ HasFromInt = GetInt(Context, FromIter, FromExpr, FromInt,
+ FromDefaultNonTypeDecl->getType());
+ if (!HasToInt && ToExpr)
+ HasToInt = GetInt(Context, ToIter, ToExpr, ToInt,
+ ToDefaultNonTypeDecl->getType());
+ Tree.SetNode(FromInt, ToInt, HasFromInt, HasToInt);
+ if (HasFromInt && HasToInt) {
+ Tree.SetSame(FromInt == ToInt);
+ } else {
+ Tree.SetSame(false);
+ }
+ Tree.SetDefault(FromIter.isEnd() && HasFromInt,
+ ToIter.isEnd() && HasToInt);
+ Tree.SetKind(DiffTree::Integer);
+ return;
+ }
+
+ if (!HasFromValueDecl && FromExpr)
+ FromValueDecl = GetValueDecl(FromIter, FromExpr);
+ if (!HasToValueDecl && ToExpr)
+ ToValueDecl = GetValueDecl(ToIter, ToExpr);
+
+ bool FromAddressOf =
+ NeedsAddressOf(FromValueDecl, FromExpr, FromDefaultNonTypeDecl);
+ bool ToAddressOf =
+ NeedsAddressOf(ToValueDecl, ToExpr, ToDefaultNonTypeDecl);
+
+ Tree.SetNullPtr(FromNullPtr, ToNullPtr);
+ Tree.SetNode(FromValueDecl, ToValueDecl, FromAddressOf, ToAddressOf);
+ Tree.SetSame(FromValueDecl && ToValueDecl &&
+ FromValueDecl->getCanonicalDecl() ==
+ ToValueDecl->getCanonicalDecl());
+ Tree.SetDefault(FromIter.isEnd() && FromValueDecl,
+ ToIter.isEnd() && ToValueDecl);
+ Tree.SetKind(DiffTree::Declaration);
+ }
+
+ /// DiffTemplate - recursively visits template arguments and stores the
+ /// argument info into a tree.
+ void DiffTemplate(const TemplateSpecializationType *FromTST,
+ const TemplateSpecializationType *ToTST) {
+ // Begin descent into diffing template tree.
+ TemplateParameterList *ParamsFrom =
+ FromTST->getTemplateName().getAsTemplateDecl()->getTemplateParameters();
+ TemplateParameterList *ParamsTo =
+ ToTST->getTemplateName().getAsTemplateDecl()->getTemplateParameters();
+ unsigned TotalArgs = 0;
+ for (TSTiterator FromIter(Context, FromTST), ToIter(Context, ToTST);
+ !FromIter.isEnd() || !ToIter.isEnd(); ++TotalArgs) {
+ Tree.AddNode();
+
+ // Get the parameter at index TotalArgs. If index is larger
+ // than the total number of parameters, then there is an
+ // argument pack, so re-use the last parameter.
+ unsigned FromParamIndex = std::min(TotalArgs, ParamsFrom->size() - 1);
+ unsigned ToParamIndex = std::min(TotalArgs, ParamsTo->size() - 1);
+ NamedDecl *FromParamND = ParamsFrom->getParam(FromParamIndex);
+ NamedDecl *ToParamND = ParamsTo->getParam(ToParamIndex);
+
+ TemplateTypeParmDecl *FromDefaultTypeDecl =
+ dyn_cast<TemplateTypeParmDecl>(FromParamND);
+ TemplateTypeParmDecl *ToDefaultTypeDecl =
+ dyn_cast<TemplateTypeParmDecl>(ToParamND);
+ if (FromDefaultTypeDecl && ToDefaultTypeDecl)
+ DiffTypes(FromIter, ToIter, FromDefaultTypeDecl, ToDefaultTypeDecl);
+
+ TemplateTemplateParmDecl *FromDefaultTemplateDecl =
+ dyn_cast<TemplateTemplateParmDecl>(FromParamND);
+ TemplateTemplateParmDecl *ToDefaultTemplateDecl =
+ dyn_cast<TemplateTemplateParmDecl>(ToParamND);
+ if (FromDefaultTemplateDecl && ToDefaultTemplateDecl)
+ DiffTemplateTemplates(FromIter, ToIter, FromDefaultTemplateDecl,
+ ToDefaultTemplateDecl);
+
+ NonTypeTemplateParmDecl *FromDefaultNonTypeDecl =
+ dyn_cast<NonTypeTemplateParmDecl>(FromParamND);
+ NonTypeTemplateParmDecl *ToDefaultNonTypeDecl =
+ dyn_cast<NonTypeTemplateParmDecl>(ToParamND);
+ if (FromDefaultNonTypeDecl && ToDefaultNonTypeDecl)
+ DiffNonTypes(FromIter, ToIter, FromDefaultNonTypeDecl,
+ ToDefaultNonTypeDecl);
+
+ ++FromIter;
+ ++ToIter;
+ Tree.Up();
+ }
+ }
+
+ /// makeTemplateList - Dump every template alias into the vector.
+ static void makeTemplateList(
+ SmallVectorImpl<const TemplateSpecializationType *> &TemplateList,
+ const TemplateSpecializationType *TST) {
+ while (TST) {
+ TemplateList.push_back(TST);
+ if (!TST->isTypeAlias())
+ return;
+ TST = TST->getAliasedType()->getAs<TemplateSpecializationType>();
+ }
+ }
+
+ /// hasSameBaseTemplate - Returns true when the base templates are the same,
+ /// even if the template arguments are not.
+ static bool hasSameBaseTemplate(const TemplateSpecializationType *FromTST,
+ const TemplateSpecializationType *ToTST) {
+ return FromTST->getTemplateName().getAsTemplateDecl()->getCanonicalDecl() ==
+ ToTST->getTemplateName().getAsTemplateDecl()->getCanonicalDecl();
+ }
+
+ /// hasSameTemplate - Returns true if both types are specialized from the
+ /// same template declaration. If they come from different template aliases,
+ /// do a parallel ascension search to determine the highest template alias in
+ /// common and set the arguments to them.
+ static bool hasSameTemplate(const TemplateSpecializationType *&FromTST,
+ const TemplateSpecializationType *&ToTST) {
+ // Check the top templates if they are the same.
+ if (hasSameBaseTemplate(FromTST, ToTST))
+ return true;
+
+ // Create vectors of template aliases.
+ SmallVector<const TemplateSpecializationType*, 1> FromTemplateList,
+ ToTemplateList;
+
+ makeTemplateList(FromTemplateList, FromTST);
+ makeTemplateList(ToTemplateList, ToTST);
+
+ SmallVectorImpl<const TemplateSpecializationType *>::reverse_iterator
+ FromIter = FromTemplateList.rbegin(), FromEnd = FromTemplateList.rend(),
+ ToIter = ToTemplateList.rbegin(), ToEnd = ToTemplateList.rend();
+
+ // Check if the lowest template types are the same. If not, return.
+ if (!hasSameBaseTemplate(*FromIter, *ToIter))
+ return false;
+
+ // Begin searching up the template aliases. The bottom most template
+ // matches so move up until one pair does not match. Use the template
+ // right before that one.
+ for (; FromIter != FromEnd && ToIter != ToEnd; ++FromIter, ++ToIter) {
+ if (!hasSameBaseTemplate(*FromIter, *ToIter))
+ break;
+ }
+
+ FromTST = FromIter[-1];
+ ToTST = ToIter[-1];
+
+ return true;
+ }
+
+ /// GetType - Retrieves the template type arguments, including default
+ /// arguments.
+ static QualType GetType(const TSTiterator &Iter,
+ TemplateTypeParmDecl *DefaultTTPD) {
+ bool isVariadic = DefaultTTPD->isParameterPack();
+
+ if (!Iter.isEnd())
+ return Iter->getAsType();
+ if (isVariadic)
+ return QualType();
+
+ QualType ArgType = DefaultTTPD->getDefaultArgument();
+ if (ArgType->isDependentType())
+ return Iter.getDesugar().getAsType();
+
+ return ArgType;
+ }
+
+ /// GetExpr - Retrieves the template expression argument, including default
+ /// arguments.
+ static Expr *GetExpr(const TSTiterator &Iter,
+ NonTypeTemplateParmDecl *DefaultNTTPD) {
+ Expr *ArgExpr = nullptr;
+ bool isVariadic = DefaultNTTPD->isParameterPack();
+
+ if (!Iter.isEnd())
+ ArgExpr = Iter->getAsExpr();
+ else if (!isVariadic)
+ ArgExpr = DefaultNTTPD->getDefaultArgument();
+
+ if (ArgExpr)
+ while (SubstNonTypeTemplateParmExpr *SNTTPE =
+ dyn_cast<SubstNonTypeTemplateParmExpr>(ArgExpr))
+ ArgExpr = SNTTPE->getReplacement();
+
+ return ArgExpr;
+ }
+
+ /// GetInt - Retrieves the template integer argument, including evaluating
+ /// default arguments. If the value comes from an expression, extend the
+ /// APSInt to size of IntegerType to match the behavior in
+ /// Sema::CheckTemplateArgument
+ static bool GetInt(ASTContext &Context, const TSTiterator &Iter,
+ Expr *ArgExpr, llvm::APSInt &Int, QualType IntegerType) {
+ // Default, value-depenedent expressions require fetching
+ // from the desugared TemplateArgument, otherwise expression needs to
+ // be evaluatable.
+ if (Iter.isEnd() && ArgExpr->isValueDependent()) {
+ switch (Iter.getDesugar().getKind()) {
+ case TemplateArgument::Integral:
+ Int = Iter.getDesugar().getAsIntegral();
+ return true;
+ case TemplateArgument::Expression:
+ ArgExpr = Iter.getDesugar().getAsExpr();
+ Int = ArgExpr->EvaluateKnownConstInt(Context);
+ Int = Int.extOrTrunc(Context.getTypeSize(IntegerType));
+ return true;
+ default:
+ llvm_unreachable("Unexpected template argument kind");
+ }
+ } else if (ArgExpr->isEvaluatable(Context)) {
+ Int = ArgExpr->EvaluateKnownConstInt(Context);
+ Int = Int.extOrTrunc(Context.getTypeSize(IntegerType));
+ return true;
+ }
+
+ return false;
+ }
+
+ /// GetValueDecl - Retrieves the template Decl argument, including
+ /// default expression argument.
+ static ValueDecl *GetValueDecl(const TSTiterator &Iter, Expr *ArgExpr) {
+ // Default, value-depenedent expressions require fetching
+ // from the desugared TemplateArgument
+ if (Iter.isEnd() && ArgExpr->isValueDependent())
+ switch (Iter.getDesugar().getKind()) {
+ case TemplateArgument::Declaration:
+ return Iter.getDesugar().getAsDecl();
+ case TemplateArgument::Expression:
+ ArgExpr = Iter.getDesugar().getAsExpr();
+ return cast<DeclRefExpr>(ArgExpr)->getDecl();
+ default:
+ llvm_unreachable("Unexpected template argument kind");
+ }
+ DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ArgExpr);
+ if (!DRE) {
+ UnaryOperator *UO = dyn_cast<UnaryOperator>(ArgExpr->IgnoreParens());
+ if (!UO)
+ return nullptr;
+ DRE = cast<DeclRefExpr>(UO->getSubExpr());
+ }
+
+ return DRE->getDecl();
+ }
+
+ /// CheckForNullPtr - returns true if the expression can be evaluated as
+ /// a null pointer
+ static bool CheckForNullPtr(ASTContext &Context, Expr *E) {
+ assert(E && "Expected expression");
+
+ E = E->IgnoreParenCasts();
+ if (E->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull))
+ return true;
+
+ DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E);
+ if (!DRE)
+ return false;
+
+ VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl());
+ if (!VD || !VD->hasInit())
+ return false;
+
+ return VD->getInit()->IgnoreParenCasts()->isNullPointerConstant(
+ Context, Expr::NPC_ValueDependentIsNull);
+ }
+
+ /// GetTemplateDecl - Retrieves the template template arguments, including
+ /// default arguments.
+ static TemplateDecl *GetTemplateDecl(const TSTiterator &Iter,
+ TemplateTemplateParmDecl *DefaultTTPD) {
+ bool isVariadic = DefaultTTPD->isParameterPack();
+
+ TemplateArgument TA = DefaultTTPD->getDefaultArgument().getArgument();
+ TemplateDecl *DefaultTD = nullptr;
+ if (TA.getKind() != TemplateArgument::Null)
+ DefaultTD = TA.getAsTemplate().getAsTemplateDecl();
+
+ if (!Iter.isEnd())
+ return Iter->getAsTemplate().getAsTemplateDecl();
+ if (!isVariadic)
+ return DefaultTD;
+
+ return nullptr;
+ }
+
+ /// IsEqualExpr - Returns true if the expressions evaluate to the same value.
+ static bool IsEqualExpr(ASTContext &Context, Expr *FromExpr, Expr *ToExpr) {
+ if (FromExpr == ToExpr)
+ return true;
+
+ if (!FromExpr || !ToExpr)
+ return false;
+
+ DeclRefExpr *FromDRE = dyn_cast<DeclRefExpr>(FromExpr->IgnoreParens()),
+ *ToDRE = dyn_cast<DeclRefExpr>(ToExpr->IgnoreParens());
+
+ if (FromDRE || ToDRE) {
+ if (!FromDRE || !ToDRE)
+ return false;
+ return FromDRE->getDecl() == ToDRE->getDecl();
+ }
+
+ Expr::EvalResult FromResult, ToResult;
+ if (!FromExpr->EvaluateAsRValue(FromResult, Context) ||
+ !ToExpr->EvaluateAsRValue(ToResult, Context)) {
+ llvm::FoldingSetNodeID FromID, ToID;
+ FromExpr->Profile(FromID, Context, true);
+ ToExpr->Profile(ToID, Context, true);
+ return FromID == ToID;
+ }
+
+ APValue &FromVal = FromResult.Val;
+ APValue &ToVal = ToResult.Val;
+
+ if (FromVal.getKind() != ToVal.getKind()) return false;
+
+ switch (FromVal.getKind()) {
+ case APValue::Int:
+ return FromVal.getInt() == ToVal.getInt();
+ case APValue::LValue: {
+ APValue::LValueBase FromBase = FromVal.getLValueBase();
+ APValue::LValueBase ToBase = ToVal.getLValueBase();
+ if (FromBase.isNull() && ToBase.isNull())
+ return true;
+ if (FromBase.isNull() || ToBase.isNull())
+ return false;
+ return FromBase.get<const ValueDecl*>() ==
+ ToBase.get<const ValueDecl*>();
+ }
+ case APValue::MemberPointer:
+ return FromVal.getMemberPointerDecl() == ToVal.getMemberPointerDecl();
+ default:
+ llvm_unreachable("Unknown template argument expression.");
+ }
+ }
+
+ // These functions converts the tree representation of the template
+ // differences into the internal character vector.
+
+ /// TreeToString - Converts the Tree object into a character stream which
+ /// will later be turned into the output string.
+ void TreeToString(int Indent = 1) {
+ if (PrintTree) {
+ OS << '\n';
+ OS.indent(2 * Indent);
+ ++Indent;
+ }
+
+ // Handle cases where the difference is not templates with different
+ // arguments.
+ switch (Tree.GetKind()) {
+ case DiffTree::Invalid:
+ llvm_unreachable("Template diffing failed with bad DiffNode");
+ case DiffTree::Type: {
+ QualType FromType, ToType;
+ Tree.GetNode(FromType, ToType);
+ PrintTypeNames(FromType, ToType, Tree.FromDefault(), Tree.ToDefault(),
+ Tree.NodeIsSame());
+ return;
+ }
+ case DiffTree::Expression: {
+ Expr *FromExpr, *ToExpr;
+ Tree.GetNode(FromExpr, ToExpr);
+ PrintExpr(FromExpr, ToExpr, Tree.FromNullPtr(), Tree.ToNullPtr(),
+ Tree.FromDefault(), Tree.ToDefault(), Tree.NodeIsSame());
+ return;
+ }
+ case DiffTree::TemplateTemplate: {
+ TemplateDecl *FromTD, *ToTD;
+ Tree.GetNode(FromTD, ToTD);
+ PrintTemplateTemplate(FromTD, ToTD, Tree.FromDefault(),
+ Tree.ToDefault(), Tree.NodeIsSame());
+ return;
+ }
+ case DiffTree::Integer: {
+ llvm::APSInt FromInt, ToInt;
+ Expr *FromExpr, *ToExpr;
+ bool IsValidFromInt, IsValidToInt;
+ Tree.GetNode(FromExpr, ToExpr);
+ Tree.GetNode(FromInt, ToInt, IsValidFromInt, IsValidToInt);
+ PrintAPSInt(FromInt, ToInt, IsValidFromInt, IsValidToInt,
+ FromExpr, ToExpr, Tree.FromDefault(), Tree.ToDefault(),
+ Tree.NodeIsSame());
+ return;
+ }
+ case DiffTree::Declaration: {
+ ValueDecl *FromValueDecl, *ToValueDecl;
+ bool FromAddressOf, ToAddressOf;
+ Tree.GetNode(FromValueDecl, ToValueDecl, FromAddressOf, ToAddressOf);
+ PrintValueDecl(FromValueDecl, ToValueDecl, FromAddressOf, ToAddressOf,
+ Tree.FromNullPtr(), Tree.ToNullPtr(), Tree.FromDefault(),
+ Tree.ToDefault(), Tree.NodeIsSame());
+ return;
+ }
+ case DiffTree::Template: {
+ // Node is root of template. Recurse on children.
+ TemplateDecl *FromTD, *ToTD;
+ Tree.GetNode(FromTD, ToTD);
+
+ if (!Tree.HasChildren()) {
+ // If we're dealing with a template specialization with zero
+ // arguments, there are no children; special-case this.
+ OS << FromTD->getNameAsString() << "<>";
+ return;
+ }
+
+ Qualifiers FromQual, ToQual;
+ Tree.GetNode(FromQual, ToQual);
+ PrintQualifiers(FromQual, ToQual);
+
+ OS << FromTD->getNameAsString() << '<';
+ Tree.MoveToChild();
+ unsigned NumElideArgs = 0;
+ do {
+ if (ElideType) {
+ if (Tree.NodeIsSame()) {
+ ++NumElideArgs;
+ continue;
+ }
+ if (NumElideArgs > 0) {
+ PrintElideArgs(NumElideArgs, Indent);
+ NumElideArgs = 0;
+ OS << ", ";
+ }
+ }
+ TreeToString(Indent);
+ if (Tree.HasNextSibling())
+ OS << ", ";
+ } while (Tree.AdvanceSibling());
+ if (NumElideArgs > 0)
+ PrintElideArgs(NumElideArgs, Indent);
+
+ Tree.Parent();
+ OS << ">";
+ return;
+ }
+ }
+ }
+
+ // To signal to the text printer that a certain text needs to be bolded,
+ // a special character is injected into the character stream which the
+ // text printer will later strip out.
+
+ /// Bold - Start bolding text.
+ void Bold() {
+ assert(!IsBold && "Attempting to bold text that is already bold.");
+ IsBold = true;
+ if (ShowColor)
+ OS << ToggleHighlight;
+ }
+
+ /// Unbold - Stop bolding text.
+ void Unbold() {
+ assert(IsBold && "Attempting to remove bold from unbold text.");
+ IsBold = false;
+ if (ShowColor)
+ OS << ToggleHighlight;
+ }
+
+ // Functions to print out the arguments and highlighting the difference.
+
+ /// PrintTypeNames - prints the typenames, bolding differences. Will detect
+ /// typenames that are the same and attempt to disambiguate them by using
+ /// canonical typenames.
+ void PrintTypeNames(QualType FromType, QualType ToType,
+ bool FromDefault, bool ToDefault, bool Same) {
+ assert((!FromType.isNull() || !ToType.isNull()) &&
+ "Only one template argument may be missing.");
+
+ if (Same) {
+ OS << FromType.getAsString(Policy);
+ return;
+ }
+
+ if (!FromType.isNull() && !ToType.isNull() &&
+ FromType.getLocalUnqualifiedType() ==
+ ToType.getLocalUnqualifiedType()) {
+ Qualifiers FromQual = FromType.getLocalQualifiers(),
+ ToQual = ToType.getLocalQualifiers();
+ PrintQualifiers(FromQual, ToQual);
+ FromType.getLocalUnqualifiedType().print(OS, Policy);
+ return;
+ }
+
+ std::string FromTypeStr = FromType.isNull() ? "(no argument)"
+ : FromType.getAsString(Policy);
+ std::string ToTypeStr = ToType.isNull() ? "(no argument)"
+ : ToType.getAsString(Policy);
+ // Switch to canonical typename if it is better.
+ // TODO: merge this with other aka printing above.
+ if (FromTypeStr == ToTypeStr) {
+ std::string FromCanTypeStr =
+ FromType.getCanonicalType().getAsString(Policy);
+ std::string ToCanTypeStr = ToType.getCanonicalType().getAsString(Policy);
+ if (FromCanTypeStr != ToCanTypeStr) {
+ FromTypeStr = FromCanTypeStr;
+ ToTypeStr = ToCanTypeStr;
+ }
+ }
+
+ if (PrintTree) OS << '[';
+ OS << (FromDefault ? "(default) " : "");
+ Bold();
+ OS << FromTypeStr;
+ Unbold();
+ if (PrintTree) {
+ OS << " != " << (ToDefault ? "(default) " : "");
+ Bold();
+ OS << ToTypeStr;
+ Unbold();
+ OS << "]";
+ }
+ return;
+ }
+
+ /// PrintExpr - Prints out the expr template arguments, highlighting argument
+ /// differences.
+ void PrintExpr(const Expr *FromExpr, const Expr *ToExpr, bool FromNullPtr,
+ bool ToNullPtr, bool FromDefault, bool ToDefault, bool Same) {
+ assert((FromExpr || ToExpr) &&
+ "Only one template argument may be missing.");
+ if (Same) {
+ PrintExpr(FromExpr, FromNullPtr);
+ } else if (!PrintTree) {
+ OS << (FromDefault ? "(default) " : "");
+ Bold();
+ PrintExpr(FromExpr, FromNullPtr);
+ Unbold();
+ } else {
+ OS << (FromDefault ? "[(default) " : "[");
+ Bold();
+ PrintExpr(FromExpr, FromNullPtr);
+ Unbold();
+ OS << " != " << (ToDefault ? "(default) " : "");
+ Bold();
+ PrintExpr(ToExpr, ToNullPtr);
+ Unbold();
+ OS << ']';
+ }
+ }
+
+ /// PrintExpr - Actual formatting and printing of expressions.
+ void PrintExpr(const Expr *E, bool NullPtr = false) {
+ if (E) {
+ E->printPretty(OS, nullptr, Policy);
+ return;
+ }
+ if (NullPtr) {
+ OS << "nullptr";
+ return;
+ }
+ OS << "(no argument)";
+ }
+
+ /// PrintTemplateTemplate - Handles printing of template template arguments,
+ /// highlighting argument differences.
+ void PrintTemplateTemplate(TemplateDecl *FromTD, TemplateDecl *ToTD,
+ bool FromDefault, bool ToDefault, bool Same) {
+ assert((FromTD || ToTD) && "Only one template argument may be missing.");
+
+ std::string FromName = FromTD ? FromTD->getName() : "(no argument)";
+ std::string ToName = ToTD ? ToTD->getName() : "(no argument)";
+ if (FromTD && ToTD && FromName == ToName) {
+ FromName = FromTD->getQualifiedNameAsString();
+ ToName = ToTD->getQualifiedNameAsString();
+ }
+
+ if (Same) {
+ OS << "template " << FromTD->getNameAsString();
+ } else if (!PrintTree) {
+ OS << (FromDefault ? "(default) template " : "template ");
+ Bold();
+ OS << FromName;
+ Unbold();
+ } else {
+ OS << (FromDefault ? "[(default) template " : "[template ");
+ Bold();
+ OS << FromName;
+ Unbold();
+ OS << " != " << (ToDefault ? "(default) template " : "template ");
+ Bold();
+ OS << ToName;
+ Unbold();
+ OS << ']';
+ }
+ }
+
+ /// PrintAPSInt - Handles printing of integral arguments, highlighting
+ /// argument differences.
+ void PrintAPSInt(llvm::APSInt FromInt, llvm::APSInt ToInt,
+ bool IsValidFromInt, bool IsValidToInt, Expr *FromExpr,
+ Expr *ToExpr, bool FromDefault, bool ToDefault, bool Same) {
+ assert((IsValidFromInt || IsValidToInt) &&
+ "Only one integral argument may be missing.");
+
+ if (Same) {
+ OS << FromInt.toString(10);
+ } else if (!PrintTree) {
+ OS << (FromDefault ? "(default) " : "");
+ PrintAPSInt(FromInt, FromExpr, IsValidFromInt);
+ } else {
+ OS << (FromDefault ? "[(default) " : "[");
+ PrintAPSInt(FromInt, FromExpr, IsValidFromInt);
+ OS << " != " << (ToDefault ? "(default) " : "");
+ PrintAPSInt(ToInt, ToExpr, IsValidToInt);
+ OS << ']';
+ }
+ }
+
+ /// PrintAPSInt - If valid, print the APSInt. If the expression is
+ /// gives more information, print it too.
+ void PrintAPSInt(llvm::APSInt Val, Expr *E, bool Valid) {
+ Bold();
+ if (Valid) {
+ if (HasExtraInfo(E)) {
+ PrintExpr(E);
+ Unbold();
+ OS << " aka ";
+ Bold();
+ }
+ OS << Val.toString(10);
+ } else if (E) {
+ PrintExpr(E);
+ } else {
+ OS << "(no argument)";
+ }
+ Unbold();
+ }
+
+ /// HasExtraInfo - Returns true if E is not an integer literal or the
+ /// negation of an integer literal
+ bool HasExtraInfo(Expr *E) {
+ if (!E) return false;
+
+ E = E->IgnoreImpCasts();
+
+ if (isa<IntegerLiteral>(E)) return false;
+
+ if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E))
+ if (UO->getOpcode() == UO_Minus)
+ if (isa<IntegerLiteral>(UO->getSubExpr()))
+ return false;
+
+ return true;
+ }
+
+ void PrintValueDecl(ValueDecl *VD, bool AddressOf, bool NullPtr) {
+ if (VD) {
+ if (AddressOf)
+ OS << "&";
+ OS << VD->getName();
+ return;
+ }
+
+ if (NullPtr) {
+ OS << "nullptr";
+ return;
+ }
+
+ OS << "(no argument)";
+ }
+
+ /// PrintDecl - Handles printing of Decl arguments, highlighting
+ /// argument differences.
+ void PrintValueDecl(ValueDecl *FromValueDecl, ValueDecl *ToValueDecl,
+ bool FromAddressOf, bool ToAddressOf, bool FromNullPtr,
+ bool ToNullPtr, bool FromDefault, bool ToDefault,
+ bool Same) {
+ assert((FromValueDecl || FromNullPtr || ToValueDecl || ToNullPtr) &&
+ "Only one Decl argument may be NULL");
+
+ if (Same) {
+ PrintValueDecl(FromValueDecl, FromAddressOf, FromNullPtr);
+ } else if (!PrintTree) {
+ OS << (FromDefault ? "(default) " : "");
+ Bold();
+ PrintValueDecl(FromValueDecl, FromAddressOf, FromNullPtr);
+ Unbold();
+ } else {
+ OS << (FromDefault ? "[(default) " : "[");
+ Bold();
+ PrintValueDecl(FromValueDecl, FromAddressOf, FromNullPtr);
+ Unbold();
+ OS << " != " << (ToDefault ? "(default) " : "");
+ Bold();
+ PrintValueDecl(ToValueDecl, ToAddressOf, ToNullPtr);
+ Unbold();
+ OS << ']';
+ }
+
+ }
+
+ // Prints the appropriate placeholder for elided template arguments.
+ void PrintElideArgs(unsigned NumElideArgs, unsigned Indent) {
+ if (PrintTree) {
+ OS << '\n';
+ for (unsigned i = 0; i < Indent; ++i)
+ OS << " ";
+ }
+ if (NumElideArgs == 0) return;
+ if (NumElideArgs == 1)
+ OS << "[...]";
+ else
+ OS << "[" << NumElideArgs << " * ...]";
+ }
+
+ // Prints and highlights differences in Qualifiers.
+ void PrintQualifiers(Qualifiers FromQual, Qualifiers ToQual) {
+ // Both types have no qualifiers
+ if (FromQual.empty() && ToQual.empty())
+ return;
+
+ // Both types have same qualifiers
+ if (FromQual == ToQual) {
+ PrintQualifier(FromQual, /*ApplyBold*/false);
+ return;
+ }
+
+ // Find common qualifiers and strip them from FromQual and ToQual.
+ Qualifiers CommonQual = Qualifiers::removeCommonQualifiers(FromQual,
+ ToQual);
+
+ // The qualifiers are printed before the template name.
+ // Inline printing:
+ // The common qualifiers are printed. Then, qualifiers only in this type
+ // are printed and highlighted. Finally, qualifiers only in the other
+ // type are printed and highlighted inside parentheses after "missing".
+ // Tree printing:
+ // Qualifiers are printed next to each other, inside brackets, and
+ // separated by "!=". The printing order is:
+ // common qualifiers, highlighted from qualifiers, "!=",
+ // common qualifiers, highlighted to qualifiers
+ if (PrintTree) {
+ OS << "[";
+ if (CommonQual.empty() && FromQual.empty()) {
+ Bold();
+ OS << "(no qualifiers) ";
+ Unbold();
+ } else {
+ PrintQualifier(CommonQual, /*ApplyBold*/false);
+ PrintQualifier(FromQual, /*ApplyBold*/true);
+ }
+ OS << "!= ";
+ if (CommonQual.empty() && ToQual.empty()) {
+ Bold();
+ OS << "(no qualifiers)";
+ Unbold();
+ } else {
+ PrintQualifier(CommonQual, /*ApplyBold*/false,
+ /*appendSpaceIfNonEmpty*/!ToQual.empty());
+ PrintQualifier(ToQual, /*ApplyBold*/true,
+ /*appendSpaceIfNonEmpty*/false);
+ }
+ OS << "] ";
+ } else {
+ PrintQualifier(CommonQual, /*ApplyBold*/false);
+ PrintQualifier(FromQual, /*ApplyBold*/true);
+ }
+ }
+
+ void PrintQualifier(Qualifiers Q, bool ApplyBold,
+ bool AppendSpaceIfNonEmpty = true) {
+ if (Q.empty()) return;
+ if (ApplyBold) Bold();
+ Q.print(OS, Policy, AppendSpaceIfNonEmpty);
+ if (ApplyBold) Unbold();
+ }
+
+public:
+
+ TemplateDiff(raw_ostream &OS, ASTContext &Context, QualType FromType,
+ QualType ToType, bool PrintTree, bool PrintFromType,
+ bool ElideType, bool ShowColor)
+ : Context(Context),
+ Policy(Context.getLangOpts()),
+ ElideType(ElideType),
+ PrintTree(PrintTree),
+ ShowColor(ShowColor),
+ // When printing a single type, the FromType is the one printed.
+ FromType(PrintFromType ? FromType : ToType),
+ ToType(PrintFromType ? ToType : FromType),
+ OS(OS),
+ IsBold(false) {
+ }
+
+ /// DiffTemplate - Start the template type diffing.
+ void DiffTemplate() {
+ Qualifiers FromQual = FromType.getQualifiers(),
+ ToQual = ToType.getQualifiers();
+
+ const TemplateSpecializationType *FromOrigTST =
+ GetTemplateSpecializationType(Context, FromType);
+ const TemplateSpecializationType *ToOrigTST =
+ GetTemplateSpecializationType(Context, ToType);
+
+ // Only checking templates.
+ if (!FromOrigTST || !ToOrigTST)
+ return;
+
+ // Different base templates.
+ if (!hasSameTemplate(FromOrigTST, ToOrigTST)) {
+ return;
+ }
+
+ FromQual -= QualType(FromOrigTST, 0).getQualifiers();
+ ToQual -= QualType(ToOrigTST, 0).getQualifiers();
+ Tree.SetNode(FromType, ToType);
+ Tree.SetNode(FromQual, ToQual);
+ Tree.SetKind(DiffTree::Template);
+
+ // Same base template, but different arguments.
+ Tree.SetNode(FromOrigTST->getTemplateName().getAsTemplateDecl(),
+ ToOrigTST->getTemplateName().getAsTemplateDecl());
+
+ DiffTemplate(FromOrigTST, ToOrigTST);
+ }
+
+ /// Emit - When the two types given are templated types with the same
+ /// base template, a string representation of the type difference will be
+ /// emitted to the stream and return true. Otherwise, return false.
+ bool Emit() {
+ Tree.StartTraverse();
+ if (Tree.Empty())
+ return false;
+
+ TreeToString();
+ assert(!IsBold && "Bold is applied to end of string.");
+ return true;
+ }
+}; // end class TemplateDiff
+} // end namespace
+
+/// FormatTemplateTypeDiff - A helper static function to start the template
+/// diff and return the properly formatted string. Returns true if the diff
+/// is successful.
+static bool FormatTemplateTypeDiff(ASTContext &Context, QualType FromType,
+ QualType ToType, bool PrintTree,
+ bool PrintFromType, bool ElideType,
+ bool ShowColors, raw_ostream &OS) {
+ if (PrintTree)
+ PrintFromType = true;
+ TemplateDiff TD(OS, Context, FromType, ToType, PrintTree, PrintFromType,
+ ElideType, ShowColors);
+ TD.DiffTemplate();
+ return TD.Emit();
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/ASTDumper.cpp b/contrib/llvm/tools/clang/lib/AST/ASTDumper.cpp
new file mode 100644
index 0000000..e7fee03
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/ASTDumper.cpp
@@ -0,0 +1,2406 @@
+//===--- ASTDumper.cpp - Dumping implementation for ASTs ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the AST dump methods, which dump out the
+// AST in a form that exposes type details and other fields.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/CommentVisitor.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclLookups.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/TypeVisitor.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/Module.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Sema/LocInfoType.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+using namespace clang::comments;
+
+//===----------------------------------------------------------------------===//
+// ASTDumper Visitor
+//===----------------------------------------------------------------------===//
+
+namespace {
+ // Colors used for various parts of the AST dump
+ // Do not use bold yellow for any text. It is hard to read on white screens.
+
+ struct TerminalColor {
+ raw_ostream::Colors Color;
+ bool Bold;
+ };
+
+ // Red - CastColor
+ // Green - TypeColor
+ // Bold Green - DeclKindNameColor, UndeserializedColor
+ // Yellow - AddressColor, LocationColor
+ // Blue - CommentColor, NullColor, IndentColor
+ // Bold Blue - AttrColor
+ // Bold Magenta - StmtColor
+ // Cyan - ValueKindColor, ObjectKindColor
+ // Bold Cyan - ValueColor, DeclNameColor
+
+ // Decl kind names (VarDecl, FunctionDecl, etc)
+ static const TerminalColor DeclKindNameColor = { raw_ostream::GREEN, true };
+ // Attr names (CleanupAttr, GuardedByAttr, etc)
+ static const TerminalColor AttrColor = { raw_ostream::BLUE, true };
+ // Statement names (DeclStmt, ImplicitCastExpr, etc)
+ static const TerminalColor StmtColor = { raw_ostream::MAGENTA, true };
+ // Comment names (FullComment, ParagraphComment, TextComment, etc)
+ static const TerminalColor CommentColor = { raw_ostream::BLUE, false };
+
+ // Type names (int, float, etc, plus user defined types)
+ static const TerminalColor TypeColor = { raw_ostream::GREEN, false };
+
+ // Pointer address
+ static const TerminalColor AddressColor = { raw_ostream::YELLOW, false };
+ // Source locations
+ static const TerminalColor LocationColor = { raw_ostream::YELLOW, false };
+
+ // lvalue/xvalue
+ static const TerminalColor ValueKindColor = { raw_ostream::CYAN, false };
+ // bitfield/objcproperty/objcsubscript/vectorcomponent
+ static const TerminalColor ObjectKindColor = { raw_ostream::CYAN, false };
+
+ // Null statements
+ static const TerminalColor NullColor = { raw_ostream::BLUE, false };
+
+ // Undeserialized entities
+ static const TerminalColor UndeserializedColor = { raw_ostream::GREEN, true };
+
+ // CastKind from CastExpr's
+ static const TerminalColor CastColor = { raw_ostream::RED, false };
+
+ // Value of the statement
+ static const TerminalColor ValueColor = { raw_ostream::CYAN, true };
+ // Decl names
+ static const TerminalColor DeclNameColor = { raw_ostream::CYAN, true };
+
+ // Indents ( `, -. | )
+ static const TerminalColor IndentColor = { raw_ostream::BLUE, false };
+
+ class ASTDumper
+ : public ConstDeclVisitor<ASTDumper>, public ConstStmtVisitor<ASTDumper>,
+ public ConstCommentVisitor<ASTDumper>, public TypeVisitor<ASTDumper> {
+ raw_ostream &OS;
+ const CommandTraits *Traits;
+ const SourceManager *SM;
+
+ /// Pending[i] is an action to dump an entity at level i.
+ llvm::SmallVector<std::function<void(bool isLastChild)>, 32> Pending;
+
+ /// Indicates whether we're at the top level.
+ bool TopLevel;
+
+ /// Indicates if we're handling the first child after entering a new depth.
+ bool FirstChild;
+
+ /// Prefix for currently-being-dumped entity.
+ std::string Prefix;
+
+ /// Keep track of the last location we print out so that we can
+ /// print out deltas from then on out.
+ const char *LastLocFilename;
+ unsigned LastLocLine;
+
+ /// The \c FullComment parent of the comment being dumped.
+ const FullComment *FC;
+
+ bool ShowColors;
+
+ /// Dump a child of the current node.
+ template<typename Fn> void dumpChild(Fn doDumpChild) {
+ // If we're at the top level, there's nothing interesting to do; just
+ // run the dumper.
+ if (TopLevel) {
+ TopLevel = false;
+ doDumpChild();
+ while (!Pending.empty()) {
+ Pending.back()(true);
+ Pending.pop_back();
+ }
+ Prefix.clear();
+ OS << "\n";
+ TopLevel = true;
+ return;
+ }
+
+ const FullComment *OrigFC = FC;
+ auto dumpWithIndent = [this, doDumpChild, OrigFC](bool isLastChild) {
+ // Print out the appropriate tree structure and work out the prefix for
+ // children of this node. For instance:
+ //
+ // A Prefix = ""
+ // |-B Prefix = "| "
+ // | `-C Prefix = "| "
+ // `-D Prefix = " "
+ // |-E Prefix = " | "
+ // `-F Prefix = " "
+ // G Prefix = ""
+ //
+ // Note that the first level gets no prefix.
+ {
+ OS << '\n';
+ ColorScope Color(*this, IndentColor);
+ OS << Prefix << (isLastChild ? '`' : '|') << '-';
+ this->Prefix.push_back(isLastChild ? ' ' : '|');
+ this->Prefix.push_back(' ');
+ }
+
+ FirstChild = true;
+ unsigned Depth = Pending.size();
+
+ FC = OrigFC;
+ doDumpChild();
+
+ // If any children are left, they're the last at their nesting level.
+ // Dump those ones out now.
+ while (Depth < Pending.size()) {
+ Pending.back()(true);
+ this->Pending.pop_back();
+ }
+
+ // Restore the old prefix.
+ this->Prefix.resize(Prefix.size() - 2);
+ };
+
+ if (FirstChild) {
+ Pending.push_back(std::move(dumpWithIndent));
+ } else {
+ Pending.back()(false);
+ Pending.back() = std::move(dumpWithIndent);
+ }
+ FirstChild = false;
+ }
+
+ class ColorScope {
+ ASTDumper &Dumper;
+ public:
+ ColorScope(ASTDumper &Dumper, TerminalColor Color)
+ : Dumper(Dumper) {
+ if (Dumper.ShowColors)
+ Dumper.OS.changeColor(Color.Color, Color.Bold);
+ }
+ ~ColorScope() {
+ if (Dumper.ShowColors)
+ Dumper.OS.resetColor();
+ }
+ };
+
+ public:
+ ASTDumper(raw_ostream &OS, const CommandTraits *Traits,
+ const SourceManager *SM)
+ : OS(OS), Traits(Traits), SM(SM), TopLevel(true), FirstChild(true),
+ LastLocFilename(""), LastLocLine(~0U), FC(nullptr),
+ ShowColors(SM && SM->getDiagnostics().getShowColors()) { }
+
+ ASTDumper(raw_ostream &OS, const CommandTraits *Traits,
+ const SourceManager *SM, bool ShowColors)
+ : OS(OS), Traits(Traits), SM(SM), TopLevel(true), FirstChild(true),
+ LastLocFilename(""), LastLocLine(~0U),
+ ShowColors(ShowColors) { }
+
+ void dumpDecl(const Decl *D);
+ void dumpStmt(const Stmt *S);
+ void dumpFullComment(const FullComment *C);
+
+ // Utilities
+ void dumpPointer(const void *Ptr);
+ void dumpSourceRange(SourceRange R);
+ void dumpLocation(SourceLocation Loc);
+ void dumpBareType(QualType T, bool Desugar = true);
+ void dumpType(QualType T);
+ void dumpTypeAsChild(QualType T);
+ void dumpTypeAsChild(const Type *T);
+ void dumpBareDeclRef(const Decl *Node);
+ void dumpDeclRef(const Decl *Node, const char *Label = nullptr);
+ void dumpName(const NamedDecl *D);
+ bool hasNodes(const DeclContext *DC);
+ void dumpDeclContext(const DeclContext *DC);
+ void dumpLookups(const DeclContext *DC, bool DumpDecls);
+ void dumpAttr(const Attr *A);
+
+ // C++ Utilities
+ void dumpAccessSpecifier(AccessSpecifier AS);
+ void dumpCXXCtorInitializer(const CXXCtorInitializer *Init);
+ void dumpTemplateParameters(const TemplateParameterList *TPL);
+ void dumpTemplateArgumentListInfo(const TemplateArgumentListInfo &TALI);
+ void dumpTemplateArgumentLoc(const TemplateArgumentLoc &A);
+ void dumpTemplateArgumentList(const TemplateArgumentList &TAL);
+ void dumpTemplateArgument(const TemplateArgument &A,
+ SourceRange R = SourceRange());
+
+ // Objective-C utilities.
+ void dumpObjCTypeParamList(const ObjCTypeParamList *typeParams);
+
+ // Types
+ void VisitComplexType(const ComplexType *T) {
+ dumpTypeAsChild(T->getElementType());
+ }
+ void VisitPointerType(const PointerType *T) {
+ dumpTypeAsChild(T->getPointeeType());
+ }
+ void VisitBlockPointerType(const BlockPointerType *T) {
+ dumpTypeAsChild(T->getPointeeType());
+ }
+ void VisitReferenceType(const ReferenceType *T) {
+ dumpTypeAsChild(T->getPointeeType());
+ }
+ void VisitRValueReferenceType(const ReferenceType *T) {
+ if (T->isSpelledAsLValue())
+ OS << " written as lvalue reference";
+ VisitReferenceType(T);
+ }
+ void VisitMemberPointerType(const MemberPointerType *T) {
+ dumpTypeAsChild(T->getClass());
+ dumpTypeAsChild(T->getPointeeType());
+ }
+ void VisitArrayType(const ArrayType *T) {
+ switch (T->getSizeModifier()) {
+ case ArrayType::Normal: break;
+ case ArrayType::Static: OS << " static"; break;
+ case ArrayType::Star: OS << " *"; break;
+ }
+ OS << " " << T->getIndexTypeQualifiers().getAsString();
+ dumpTypeAsChild(T->getElementType());
+ }
+ void VisitConstantArrayType(const ConstantArrayType *T) {
+ OS << " " << T->getSize();
+ VisitArrayType(T);
+ }
+ void VisitVariableArrayType(const VariableArrayType *T) {
+ OS << " ";
+ dumpSourceRange(T->getBracketsRange());
+ VisitArrayType(T);
+ dumpStmt(T->getSizeExpr());
+ }
+ void VisitDependentSizedArrayType(const DependentSizedArrayType *T) {
+ VisitArrayType(T);
+ OS << " ";
+ dumpSourceRange(T->getBracketsRange());
+ dumpStmt(T->getSizeExpr());
+ }
+ void VisitDependentSizedExtVectorType(
+ const DependentSizedExtVectorType *T) {
+ OS << " ";
+ dumpLocation(T->getAttributeLoc());
+ dumpTypeAsChild(T->getElementType());
+ dumpStmt(T->getSizeExpr());
+ }
+ void VisitVectorType(const VectorType *T) {
+ switch (T->getVectorKind()) {
+ case VectorType::GenericVector: break;
+ case VectorType::AltiVecVector: OS << " altivec"; break;
+ case VectorType::AltiVecPixel: OS << " altivec pixel"; break;
+ case VectorType::AltiVecBool: OS << " altivec bool"; break;
+ case VectorType::NeonVector: OS << " neon"; break;
+ case VectorType::NeonPolyVector: OS << " neon poly"; break;
+ }
+ OS << " " << T->getNumElements();
+ dumpTypeAsChild(T->getElementType());
+ }
+ void VisitFunctionType(const FunctionType *T) {
+ auto EI = T->getExtInfo();
+ if (EI.getNoReturn()) OS << " noreturn";
+ if (EI.getProducesResult()) OS << " produces_result";
+ if (EI.getHasRegParm()) OS << " regparm " << EI.getRegParm();
+ OS << " " << FunctionType::getNameForCallConv(EI.getCC());
+ dumpTypeAsChild(T->getReturnType());
+ }
+ void VisitFunctionProtoType(const FunctionProtoType *T) {
+ auto EPI = T->getExtProtoInfo();
+ if (EPI.HasTrailingReturn) OS << " trailing_return";
+ if (T->isConst()) OS << " const";
+ if (T->isVolatile()) OS << " volatile";
+ if (T->isRestrict()) OS << " restrict";
+ switch (EPI.RefQualifier) {
+ case RQ_None: break;
+ case RQ_LValue: OS << " &"; break;
+ case RQ_RValue: OS << " &&"; break;
+ }
+ // FIXME: Exception specification.
+ // FIXME: Consumed parameters.
+ VisitFunctionType(T);
+ for (QualType PT : T->getParamTypes())
+ dumpTypeAsChild(PT);
+ if (EPI.Variadic)
+ dumpChild([=] { OS << "..."; });
+ }
+ void VisitUnresolvedUsingType(const UnresolvedUsingType *T) {
+ dumpDeclRef(T->getDecl());
+ }
+ void VisitTypedefType(const TypedefType *T) {
+ dumpDeclRef(T->getDecl());
+ }
+ void VisitTypeOfExprType(const TypeOfExprType *T) {
+ dumpStmt(T->getUnderlyingExpr());
+ }
+ void VisitDecltypeType(const DecltypeType *T) {
+ dumpStmt(T->getUnderlyingExpr());
+ }
+ void VisitUnaryTransformType(const UnaryTransformType *T) {
+ switch (T->getUTTKind()) {
+ case UnaryTransformType::EnumUnderlyingType:
+ OS << " underlying_type";
+ break;
+ }
+ dumpTypeAsChild(T->getBaseType());
+ }
+ void VisitTagType(const TagType *T) {
+ dumpDeclRef(T->getDecl());
+ }
+ void VisitAttributedType(const AttributedType *T) {
+ // FIXME: AttrKind
+ dumpTypeAsChild(T->getModifiedType());
+ }
+ void VisitTemplateTypeParmType(const TemplateTypeParmType *T) {
+ OS << " depth " << T->getDepth() << " index " << T->getIndex();
+ if (T->isParameterPack()) OS << " pack";
+ dumpDeclRef(T->getDecl());
+ }
+ void VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *T) {
+ dumpTypeAsChild(T->getReplacedParameter());
+ }
+ void VisitSubstTemplateTypeParmPackType(
+ const SubstTemplateTypeParmPackType *T) {
+ dumpTypeAsChild(T->getReplacedParameter());
+ dumpTemplateArgument(T->getArgumentPack());
+ }
+ void VisitAutoType(const AutoType *T) {
+ if (T->isDecltypeAuto()) OS << " decltype(auto)";
+ if (!T->isDeduced())
+ OS << " undeduced";
+ }
+ void VisitTemplateSpecializationType(const TemplateSpecializationType *T) {
+ if (T->isTypeAlias()) OS << " alias";
+ OS << " "; T->getTemplateName().dump(OS);
+ for (auto &Arg : *T)
+ dumpTemplateArgument(Arg);
+ if (T->isTypeAlias())
+ dumpTypeAsChild(T->getAliasedType());
+ }
+ void VisitInjectedClassNameType(const InjectedClassNameType *T) {
+ dumpDeclRef(T->getDecl());
+ }
+ void VisitObjCInterfaceType(const ObjCInterfaceType *T) {
+ dumpDeclRef(T->getDecl());
+ }
+ void VisitObjCObjectPointerType(const ObjCObjectPointerType *T) {
+ dumpTypeAsChild(T->getPointeeType());
+ }
+ void VisitAtomicType(const AtomicType *T) {
+ dumpTypeAsChild(T->getValueType());
+ }
+ void VisitAdjustedType(const AdjustedType *T) {
+ dumpTypeAsChild(T->getOriginalType());
+ }
+ void VisitPackExpansionType(const PackExpansionType *T) {
+ if (auto N = T->getNumExpansions()) OS << " expansions " << *N;
+ if (!T->isSugared())
+ dumpTypeAsChild(T->getPattern());
+ }
+ // FIXME: ElaboratedType, DependentNameType,
+ // DependentTemplateSpecializationType, ObjCObjectType
+
+ // Decls
+ void VisitLabelDecl(const LabelDecl *D);
+ void VisitTypedefDecl(const TypedefDecl *D);
+ void VisitEnumDecl(const EnumDecl *D);
+ void VisitRecordDecl(const RecordDecl *D);
+ void VisitEnumConstantDecl(const EnumConstantDecl *D);
+ void VisitIndirectFieldDecl(const IndirectFieldDecl *D);
+ void VisitFunctionDecl(const FunctionDecl *D);
+ void VisitFieldDecl(const FieldDecl *D);
+ void VisitVarDecl(const VarDecl *D);
+ void VisitFileScopeAsmDecl(const FileScopeAsmDecl *D);
+ void VisitImportDecl(const ImportDecl *D);
+
+ // C++ Decls
+ void VisitNamespaceDecl(const NamespaceDecl *D);
+ void VisitUsingDirectiveDecl(const UsingDirectiveDecl *D);
+ void VisitNamespaceAliasDecl(const NamespaceAliasDecl *D);
+ void VisitTypeAliasDecl(const TypeAliasDecl *D);
+ void VisitTypeAliasTemplateDecl(const TypeAliasTemplateDecl *D);
+ void VisitCXXRecordDecl(const CXXRecordDecl *D);
+ void VisitStaticAssertDecl(const StaticAssertDecl *D);
+ template<typename SpecializationDecl>
+ void VisitTemplateDeclSpecialization(const SpecializationDecl *D,
+ bool DumpExplicitInst,
+ bool DumpRefOnly);
+ template<typename TemplateDecl>
+ void VisitTemplateDecl(const TemplateDecl *D, bool DumpExplicitInst);
+ void VisitFunctionTemplateDecl(const FunctionTemplateDecl *D);
+ void VisitClassTemplateDecl(const ClassTemplateDecl *D);
+ void VisitClassTemplateSpecializationDecl(
+ const ClassTemplateSpecializationDecl *D);
+ void VisitClassTemplatePartialSpecializationDecl(
+ const ClassTemplatePartialSpecializationDecl *D);
+ void VisitClassScopeFunctionSpecializationDecl(
+ const ClassScopeFunctionSpecializationDecl *D);
+ void VisitBuiltinTemplateDecl(const BuiltinTemplateDecl *D);
+ void VisitVarTemplateDecl(const VarTemplateDecl *D);
+ void VisitVarTemplateSpecializationDecl(
+ const VarTemplateSpecializationDecl *D);
+ void VisitVarTemplatePartialSpecializationDecl(
+ const VarTemplatePartialSpecializationDecl *D);
+ void VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D);
+ void VisitNonTypeTemplateParmDecl(const NonTypeTemplateParmDecl *D);
+ void VisitTemplateTemplateParmDecl(const TemplateTemplateParmDecl *D);
+ void VisitUsingDecl(const UsingDecl *D);
+ void VisitUnresolvedUsingTypenameDecl(const UnresolvedUsingTypenameDecl *D);
+ void VisitUnresolvedUsingValueDecl(const UnresolvedUsingValueDecl *D);
+ void VisitUsingShadowDecl(const UsingShadowDecl *D);
+ void VisitLinkageSpecDecl(const LinkageSpecDecl *D);
+ void VisitAccessSpecDecl(const AccessSpecDecl *D);
+ void VisitFriendDecl(const FriendDecl *D);
+
+ // ObjC Decls
+ void VisitObjCIvarDecl(const ObjCIvarDecl *D);
+ void VisitObjCMethodDecl(const ObjCMethodDecl *D);
+ void VisitObjCTypeParamDecl(const ObjCTypeParamDecl *D);
+ void VisitObjCCategoryDecl(const ObjCCategoryDecl *D);
+ void VisitObjCCategoryImplDecl(const ObjCCategoryImplDecl *D);
+ void VisitObjCProtocolDecl(const ObjCProtocolDecl *D);
+ void VisitObjCInterfaceDecl(const ObjCInterfaceDecl *D);
+ void VisitObjCImplementationDecl(const ObjCImplementationDecl *D);
+ void VisitObjCCompatibleAliasDecl(const ObjCCompatibleAliasDecl *D);
+ void VisitObjCPropertyDecl(const ObjCPropertyDecl *D);
+ void VisitObjCPropertyImplDecl(const ObjCPropertyImplDecl *D);
+ void VisitBlockDecl(const BlockDecl *D);
+
+ // Stmts.
+ void VisitStmt(const Stmt *Node);
+ void VisitDeclStmt(const DeclStmt *Node);
+ void VisitAttributedStmt(const AttributedStmt *Node);
+ void VisitLabelStmt(const LabelStmt *Node);
+ void VisitGotoStmt(const GotoStmt *Node);
+ void VisitCXXCatchStmt(const CXXCatchStmt *Node);
+
+ // Exprs
+ void VisitExpr(const Expr *Node);
+ void VisitCastExpr(const CastExpr *Node);
+ void VisitDeclRefExpr(const DeclRefExpr *Node);
+ void VisitPredefinedExpr(const PredefinedExpr *Node);
+ void VisitCharacterLiteral(const CharacterLiteral *Node);
+ void VisitIntegerLiteral(const IntegerLiteral *Node);
+ void VisitFloatingLiteral(const FloatingLiteral *Node);
+ void VisitStringLiteral(const StringLiteral *Str);
+ void VisitInitListExpr(const InitListExpr *ILE);
+ void VisitUnaryOperator(const UnaryOperator *Node);
+ void VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Node);
+ void VisitMemberExpr(const MemberExpr *Node);
+ void VisitExtVectorElementExpr(const ExtVectorElementExpr *Node);
+ void VisitBinaryOperator(const BinaryOperator *Node);
+ void VisitCompoundAssignOperator(const CompoundAssignOperator *Node);
+ void VisitAddrLabelExpr(const AddrLabelExpr *Node);
+ void VisitBlockExpr(const BlockExpr *Node);
+ void VisitOpaqueValueExpr(const OpaqueValueExpr *Node);
+
+ // C++
+ void VisitCXXNamedCastExpr(const CXXNamedCastExpr *Node);
+ void VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *Node);
+ void VisitCXXThisExpr(const CXXThisExpr *Node);
+ void VisitCXXFunctionalCastExpr(const CXXFunctionalCastExpr *Node);
+ void VisitCXXConstructExpr(const CXXConstructExpr *Node);
+ void VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *Node);
+ void VisitCXXNewExpr(const CXXNewExpr *Node);
+ void VisitCXXDeleteExpr(const CXXDeleteExpr *Node);
+ void VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *Node);
+ void VisitExprWithCleanups(const ExprWithCleanups *Node);
+ void VisitUnresolvedLookupExpr(const UnresolvedLookupExpr *Node);
+ void dumpCXXTemporary(const CXXTemporary *Temporary);
+ void VisitLambdaExpr(const LambdaExpr *Node) {
+ VisitExpr(Node);
+ dumpDecl(Node->getLambdaClass());
+ }
+ void VisitSizeOfPackExpr(const SizeOfPackExpr *Node);
+
+ // ObjC
+ void VisitObjCAtCatchStmt(const ObjCAtCatchStmt *Node);
+ void VisitObjCEncodeExpr(const ObjCEncodeExpr *Node);
+ void VisitObjCMessageExpr(const ObjCMessageExpr *Node);
+ void VisitObjCBoxedExpr(const ObjCBoxedExpr *Node);
+ void VisitObjCSelectorExpr(const ObjCSelectorExpr *Node);
+ void VisitObjCProtocolExpr(const ObjCProtocolExpr *Node);
+ void VisitObjCPropertyRefExpr(const ObjCPropertyRefExpr *Node);
+ void VisitObjCSubscriptRefExpr(const ObjCSubscriptRefExpr *Node);
+ void VisitObjCIvarRefExpr(const ObjCIvarRefExpr *Node);
+ void VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *Node);
+
+ // Comments.
+ const char *getCommandName(unsigned CommandID);
+ void dumpComment(const Comment *C);
+
+ // Inline comments.
+ void visitTextComment(const TextComment *C);
+ void visitInlineCommandComment(const InlineCommandComment *C);
+ void visitHTMLStartTagComment(const HTMLStartTagComment *C);
+ void visitHTMLEndTagComment(const HTMLEndTagComment *C);
+
+ // Block comments.
+ void visitBlockCommandComment(const BlockCommandComment *C);
+ void visitParamCommandComment(const ParamCommandComment *C);
+ void visitTParamCommandComment(const TParamCommandComment *C);
+ void visitVerbatimBlockComment(const VerbatimBlockComment *C);
+ void visitVerbatimBlockLineComment(const VerbatimBlockLineComment *C);
+ void visitVerbatimLineComment(const VerbatimLineComment *C);
+ };
+}
+
+//===----------------------------------------------------------------------===//
+// Utilities
+//===----------------------------------------------------------------------===//
+
+void ASTDumper::dumpPointer(const void *Ptr) {
+ ColorScope Color(*this, AddressColor);
+ OS << ' ' << Ptr;
+}
+
+void ASTDumper::dumpLocation(SourceLocation Loc) {
+ if (!SM)
+ return;
+
+ ColorScope Color(*this, LocationColor);
+ SourceLocation SpellingLoc = SM->getSpellingLoc(Loc);
+
+ // The general format we print out is filename:line:col, but we drop pieces
+ // that haven't changed since the last loc printed.
+ PresumedLoc PLoc = SM->getPresumedLoc(SpellingLoc);
+
+ if (PLoc.isInvalid()) {
+ OS << "<invalid sloc>";
+ return;
+ }
+
+ if (strcmp(PLoc.getFilename(), LastLocFilename) != 0) {
+ OS << PLoc.getFilename() << ':' << PLoc.getLine()
+ << ':' << PLoc.getColumn();
+ LastLocFilename = PLoc.getFilename();
+ LastLocLine = PLoc.getLine();
+ } else if (PLoc.getLine() != LastLocLine) {
+ OS << "line" << ':' << PLoc.getLine()
+ << ':' << PLoc.getColumn();
+ LastLocLine = PLoc.getLine();
+ } else {
+ OS << "col" << ':' << PLoc.getColumn();
+ }
+}
+
+void ASTDumper::dumpSourceRange(SourceRange R) {
+ // Can't translate locations if a SourceManager isn't available.
+ if (!SM)
+ return;
+
+ OS << " <";
+ dumpLocation(R.getBegin());
+ if (R.getBegin() != R.getEnd()) {
+ OS << ", ";
+ dumpLocation(R.getEnd());
+ }
+ OS << ">";
+
+ // <t2.c:123:421[blah], t2.c:412:321>
+
+}
+
+void ASTDumper::dumpBareType(QualType T, bool Desugar) {
+ ColorScope Color(*this, TypeColor);
+
+ SplitQualType T_split = T.split();
+ OS << "'" << QualType::getAsString(T_split) << "'";
+
+ if (Desugar && !T.isNull()) {
+ // If the type is sugared, also dump a (shallow) desugared type.
+ SplitQualType D_split = T.getSplitDesugaredType();
+ if (T_split != D_split)
+ OS << ":'" << QualType::getAsString(D_split) << "'";
+ }
+}
+
+void ASTDumper::dumpType(QualType T) {
+ OS << ' ';
+ dumpBareType(T);
+}
+
+void ASTDumper::dumpTypeAsChild(QualType T) {
+ SplitQualType SQT = T.split();
+ if (!SQT.Quals.hasQualifiers())
+ return dumpTypeAsChild(SQT.Ty);
+
+ dumpChild([=] {
+ OS << "QualType";
+ dumpPointer(T.getAsOpaquePtr());
+ OS << " ";
+ dumpBareType(T, false);
+ OS << " " << T.split().Quals.getAsString();
+ dumpTypeAsChild(T.split().Ty);
+ });
+}
+
+void ASTDumper::dumpTypeAsChild(const Type *T) {
+ dumpChild([=] {
+ if (!T) {
+ ColorScope Color(*this, NullColor);
+ OS << "<<<NULL>>>";
+ return;
+ }
+ if (const LocInfoType *LIT = llvm::dyn_cast<LocInfoType>(T)) {
+ {
+ ColorScope Color(*this, TypeColor);
+ OS << "LocInfo Type";
+ }
+ dumpPointer(T);
+ dumpTypeAsChild(LIT->getTypeSourceInfo()->getType());
+ return;
+ }
+
+ {
+ ColorScope Color(*this, TypeColor);
+ OS << T->getTypeClassName() << "Type";
+ }
+ dumpPointer(T);
+ OS << " ";
+ dumpBareType(QualType(T, 0), false);
+
+ QualType SingleStepDesugar =
+ T->getLocallyUnqualifiedSingleStepDesugaredType();
+ if (SingleStepDesugar != QualType(T, 0))
+ OS << " sugar";
+ if (T->isDependentType())
+ OS << " dependent";
+ else if (T->isInstantiationDependentType())
+ OS << " instantiation_dependent";
+ if (T->isVariablyModifiedType())
+ OS << " variably_modified";
+ if (T->containsUnexpandedParameterPack())
+ OS << " contains_unexpanded_pack";
+ if (T->isFromAST())
+ OS << " imported";
+
+ TypeVisitor<ASTDumper>::Visit(T);
+
+ if (SingleStepDesugar != QualType(T, 0))
+ dumpTypeAsChild(SingleStepDesugar);
+ });
+}
+
+void ASTDumper::dumpBareDeclRef(const Decl *D) {
+ {
+ ColorScope Color(*this, DeclKindNameColor);
+ OS << D->getDeclKindName();
+ }
+ dumpPointer(D);
+
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(D)) {
+ ColorScope Color(*this, DeclNameColor);
+ OS << " '" << ND->getDeclName() << '\'';
+ }
+
+ if (const ValueDecl *VD = dyn_cast<ValueDecl>(D))
+ dumpType(VD->getType());
+}
+
+void ASTDumper::dumpDeclRef(const Decl *D, const char *Label) {
+ if (!D)
+ return;
+
+ dumpChild([=]{
+ if (Label)
+ OS << Label << ' ';
+ dumpBareDeclRef(D);
+ });
+}
+
+void ASTDumper::dumpName(const NamedDecl *ND) {
+ if (ND->getDeclName()) {
+ ColorScope Color(*this, DeclNameColor);
+ OS << ' ' << ND->getNameAsString();
+ }
+}
+
+bool ASTDumper::hasNodes(const DeclContext *DC) {
+ if (!DC)
+ return false;
+
+ return DC->hasExternalLexicalStorage() ||
+ DC->noload_decls_begin() != DC->noload_decls_end();
+}
+
+void ASTDumper::dumpDeclContext(const DeclContext *DC) {
+ if (!DC)
+ return;
+
+ for (auto *D : DC->noload_decls())
+ dumpDecl(D);
+
+ if (DC->hasExternalLexicalStorage()) {
+ dumpChild([=]{
+ ColorScope Color(*this, UndeserializedColor);
+ OS << "<undeserialized declarations>";
+ });
+ }
+}
+
+void ASTDumper::dumpLookups(const DeclContext *DC, bool DumpDecls) {
+ dumpChild([=] {
+ OS << "StoredDeclsMap ";
+ dumpBareDeclRef(cast<Decl>(DC));
+
+ const DeclContext *Primary = DC->getPrimaryContext();
+ if (Primary != DC) {
+ OS << " primary";
+ dumpPointer(cast<Decl>(Primary));
+ }
+
+ bool HasUndeserializedLookups = Primary->hasExternalVisibleStorage();
+
+ DeclContext::all_lookups_iterator I = Primary->noload_lookups_begin(),
+ E = Primary->noload_lookups_end();
+ while (I != E) {
+ DeclarationName Name = I.getLookupName();
+ DeclContextLookupResult R = *I++;
+
+ dumpChild([=] {
+ OS << "DeclarationName ";
+ {
+ ColorScope Color(*this, DeclNameColor);
+ OS << '\'' << Name << '\'';
+ }
+
+ for (DeclContextLookupResult::iterator RI = R.begin(), RE = R.end();
+ RI != RE; ++RI) {
+ dumpChild([=] {
+ dumpBareDeclRef(*RI);
+
+ if ((*RI)->isHidden())
+ OS << " hidden";
+
+ // If requested, dump the redecl chain for this lookup.
+ if (DumpDecls) {
+ // Dump earliest decl first.
+ std::function<void(Decl *)> DumpWithPrev = [&](Decl *D) {
+ if (Decl *Prev = D->getPreviousDecl())
+ DumpWithPrev(Prev);
+ dumpDecl(D);
+ };
+ DumpWithPrev(*RI);
+ }
+ });
+ }
+ });
+ }
+
+ if (HasUndeserializedLookups) {
+ dumpChild([=] {
+ ColorScope Color(*this, UndeserializedColor);
+ OS << "<undeserialized lookups>";
+ });
+ }
+ });
+}
+
+void ASTDumper::dumpAttr(const Attr *A) {
+ dumpChild([=] {
+ {
+ ColorScope Color(*this, AttrColor);
+
+ switch (A->getKind()) {
+#define ATTR(X) case attr::X: OS << #X; break;
+#include "clang/Basic/AttrList.inc"
+ default:
+ llvm_unreachable("unexpected attribute kind");
+ }
+ OS << "Attr";
+ }
+ dumpPointer(A);
+ dumpSourceRange(A->getRange());
+ if (A->isInherited())
+ OS << " Inherited";
+ if (A->isImplicit())
+ OS << " Implicit";
+#include "clang/AST/AttrDump.inc"
+ });
+}
+
+static void dumpPreviousDeclImpl(raw_ostream &OS, ...) {}
+
+template<typename T>
+static void dumpPreviousDeclImpl(raw_ostream &OS, const Mergeable<T> *D) {
+ const T *First = D->getFirstDecl();
+ if (First != D)
+ OS << " first " << First;
+}
+
+template<typename T>
+static void dumpPreviousDeclImpl(raw_ostream &OS, const Redeclarable<T> *D) {
+ const T *Prev = D->getPreviousDecl();
+ if (Prev)
+ OS << " prev " << Prev;
+}
+
+/// Dump the previous declaration in the redeclaration chain for a declaration,
+/// if any.
+static void dumpPreviousDecl(raw_ostream &OS, const Decl *D) {
+ switch (D->getKind()) {
+#define DECL(DERIVED, BASE) \
+ case Decl::DERIVED: \
+ return dumpPreviousDeclImpl(OS, cast<DERIVED##Decl>(D));
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
+ }
+ llvm_unreachable("Decl that isn't part of DeclNodes.inc!");
+}
+
+//===----------------------------------------------------------------------===//
+// C++ Utilities
+//===----------------------------------------------------------------------===//
+
+void ASTDumper::dumpAccessSpecifier(AccessSpecifier AS) {
+ switch (AS) {
+ case AS_none:
+ break;
+ case AS_public:
+ OS << "public";
+ break;
+ case AS_protected:
+ OS << "protected";
+ break;
+ case AS_private:
+ OS << "private";
+ break;
+ }
+}
+
+void ASTDumper::dumpCXXCtorInitializer(const CXXCtorInitializer *Init) {
+ dumpChild([=] {
+ OS << "CXXCtorInitializer";
+ if (Init->isAnyMemberInitializer()) {
+ OS << ' ';
+ dumpBareDeclRef(Init->getAnyMember());
+ } else if (Init->isBaseInitializer()) {
+ dumpType(QualType(Init->getBaseClass(), 0));
+ } else if (Init->isDelegatingInitializer()) {
+ dumpType(Init->getTypeSourceInfo()->getType());
+ } else {
+ llvm_unreachable("Unknown initializer type");
+ }
+ dumpStmt(Init->getInit());
+ });
+}
+
+void ASTDumper::dumpTemplateParameters(const TemplateParameterList *TPL) {
+ if (!TPL)
+ return;
+
+ for (TemplateParameterList::const_iterator I = TPL->begin(), E = TPL->end();
+ I != E; ++I)
+ dumpDecl(*I);
+}
+
+void ASTDumper::dumpTemplateArgumentListInfo(
+ const TemplateArgumentListInfo &TALI) {
+ for (unsigned i = 0, e = TALI.size(); i < e; ++i)
+ dumpTemplateArgumentLoc(TALI[i]);
+}
+
+void ASTDumper::dumpTemplateArgumentLoc(const TemplateArgumentLoc &A) {
+ dumpTemplateArgument(A.getArgument(), A.getSourceRange());
+}
+
+void ASTDumper::dumpTemplateArgumentList(const TemplateArgumentList &TAL) {
+ for (unsigned i = 0, e = TAL.size(); i < e; ++i)
+ dumpTemplateArgument(TAL[i]);
+}
+
+void ASTDumper::dumpTemplateArgument(const TemplateArgument &A, SourceRange R) {
+ dumpChild([=] {
+ OS << "TemplateArgument";
+ if (R.isValid())
+ dumpSourceRange(R);
+
+ switch (A.getKind()) {
+ case TemplateArgument::Null:
+ OS << " null";
+ break;
+ case TemplateArgument::Type:
+ OS << " type";
+ dumpType(A.getAsType());
+ break;
+ case TemplateArgument::Declaration:
+ OS << " decl";
+ dumpDeclRef(A.getAsDecl());
+ break;
+ case TemplateArgument::NullPtr:
+ OS << " nullptr";
+ break;
+ case TemplateArgument::Integral:
+ OS << " integral " << A.getAsIntegral();
+ break;
+ case TemplateArgument::Template:
+ OS << " template ";
+ A.getAsTemplate().dump(OS);
+ break;
+ case TemplateArgument::TemplateExpansion:
+ OS << " template expansion";
+ A.getAsTemplateOrTemplatePattern().dump(OS);
+ break;
+ case TemplateArgument::Expression:
+ OS << " expr";
+ dumpStmt(A.getAsExpr());
+ break;
+ case TemplateArgument::Pack:
+ OS << " pack";
+ for (TemplateArgument::pack_iterator I = A.pack_begin(), E = A.pack_end();
+ I != E; ++I)
+ dumpTemplateArgument(*I);
+ break;
+ }
+ });
+}
+
+//===----------------------------------------------------------------------===//
+// Objective-C Utilities
+//===----------------------------------------------------------------------===//
+void ASTDumper::dumpObjCTypeParamList(const ObjCTypeParamList *typeParams) {
+ if (!typeParams)
+ return;
+
+ for (auto typeParam : *typeParams) {
+ dumpDecl(typeParam);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Decl dumping methods.
+//===----------------------------------------------------------------------===//
+
+void ASTDumper::dumpDecl(const Decl *D) {
+ dumpChild([=] {
+ if (!D) {
+ ColorScope Color(*this, NullColor);
+ OS << "<<<NULL>>>";
+ return;
+ }
+
+ {
+ ColorScope Color(*this, DeclKindNameColor);
+ OS << D->getDeclKindName() << "Decl";
+ }
+ dumpPointer(D);
+ if (D->getLexicalDeclContext() != D->getDeclContext())
+ OS << " parent " << cast<Decl>(D->getDeclContext());
+ dumpPreviousDecl(OS, D);
+ dumpSourceRange(D->getSourceRange());
+ OS << ' ';
+ dumpLocation(D->getLocation());
+ if (Module *M = D->getImportedOwningModule())
+ OS << " in " << M->getFullModuleName();
+ else if (Module *M = D->getLocalOwningModule())
+ OS << " in (local) " << M->getFullModuleName();
+ if (auto *ND = dyn_cast<NamedDecl>(D))
+ for (Module *M : D->getASTContext().getModulesWithMergedDefinition(
+ const_cast<NamedDecl *>(ND)))
+ dumpChild([=] { OS << "also in " << M->getFullModuleName(); });
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ if (ND->isHidden())
+ OS << " hidden";
+ if (D->isImplicit())
+ OS << " implicit";
+ if (D->isUsed())
+ OS << " used";
+ else if (D->isThisDeclarationReferenced())
+ OS << " referenced";
+ if (D->isInvalidDecl())
+ OS << " invalid";
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ if (FD->isConstexpr())
+ OS << " constexpr";
+
+
+ ConstDeclVisitor<ASTDumper>::Visit(D);
+
+ for (Decl::attr_iterator I = D->attr_begin(), E = D->attr_end(); I != E;
+ ++I)
+ dumpAttr(*I);
+
+ if (const FullComment *Comment =
+ D->getASTContext().getLocalCommentForDeclUncached(D))
+ dumpFullComment(Comment);
+
+ // Decls within functions are visited by the body.
+ if (!isa<FunctionDecl>(*D) && !isa<ObjCMethodDecl>(*D) &&
+ hasNodes(dyn_cast<DeclContext>(D)))
+ dumpDeclContext(cast<DeclContext>(D));
+ });
+}
+
+void ASTDumper::VisitLabelDecl(const LabelDecl *D) {
+ dumpName(D);
+}
+
+void ASTDumper::VisitTypedefDecl(const TypedefDecl *D) {
+ dumpName(D);
+ dumpType(D->getUnderlyingType());
+ if (D->isModulePrivate())
+ OS << " __module_private__";
+}
+
+void ASTDumper::VisitEnumDecl(const EnumDecl *D) {
+ if (D->isScoped()) {
+ if (D->isScopedUsingClassTag())
+ OS << " class";
+ else
+ OS << " struct";
+ }
+ dumpName(D);
+ if (D->isModulePrivate())
+ OS << " __module_private__";
+ if (D->isFixed())
+ dumpType(D->getIntegerType());
+}
+
+void ASTDumper::VisitRecordDecl(const RecordDecl *D) {
+ OS << ' ' << D->getKindName();
+ dumpName(D);
+ if (D->isModulePrivate())
+ OS << " __module_private__";
+ if (D->isCompleteDefinition())
+ OS << " definition";
+}
+
+void ASTDumper::VisitEnumConstantDecl(const EnumConstantDecl *D) {
+ dumpName(D);
+ dumpType(D->getType());
+ if (const Expr *Init = D->getInitExpr())
+ dumpStmt(Init);
+}
+
+void ASTDumper::VisitIndirectFieldDecl(const IndirectFieldDecl *D) {
+ dumpName(D);
+ dumpType(D->getType());
+
+ for (auto *Child : D->chain())
+ dumpDeclRef(Child);
+}
+
+void ASTDumper::VisitFunctionDecl(const FunctionDecl *D) {
+ dumpName(D);
+ dumpType(D->getType());
+
+ StorageClass SC = D->getStorageClass();
+ if (SC != SC_None)
+ OS << ' ' << VarDecl::getStorageClassSpecifierString(SC);
+ if (D->isInlineSpecified())
+ OS << " inline";
+ if (D->isVirtualAsWritten())
+ OS << " virtual";
+ if (D->isModulePrivate())
+ OS << " __module_private__";
+
+ if (D->isPure())
+ OS << " pure";
+ else if (D->isDeletedAsWritten())
+ OS << " delete";
+
+ if (const FunctionProtoType *FPT = D->getType()->getAs<FunctionProtoType>()) {
+ FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
+ switch (EPI.ExceptionSpec.Type) {
+ default: break;
+ case EST_Unevaluated:
+ OS << " noexcept-unevaluated " << EPI.ExceptionSpec.SourceDecl;
+ break;
+ case EST_Uninstantiated:
+ OS << " noexcept-uninstantiated " << EPI.ExceptionSpec.SourceTemplate;
+ break;
+ }
+ }
+
+ if (const FunctionTemplateSpecializationInfo *FTSI =
+ D->getTemplateSpecializationInfo())
+ dumpTemplateArgumentList(*FTSI->TemplateArguments);
+
+ for (ArrayRef<NamedDecl *>::iterator
+ I = D->getDeclsInPrototypeScope().begin(),
+ E = D->getDeclsInPrototypeScope().end(); I != E; ++I)
+ dumpDecl(*I);
+
+ if (!D->param_begin() && D->getNumParams())
+ dumpChild([=] { OS << "<<NULL params x " << D->getNumParams() << ">>"; });
+ else
+ for (FunctionDecl::param_const_iterator I = D->param_begin(),
+ E = D->param_end();
+ I != E; ++I)
+ dumpDecl(*I);
+
+ if (const CXXConstructorDecl *C = dyn_cast<CXXConstructorDecl>(D))
+ for (CXXConstructorDecl::init_const_iterator I = C->init_begin(),
+ E = C->init_end();
+ I != E; ++I)
+ dumpCXXCtorInitializer(*I);
+
+ if (D->doesThisDeclarationHaveABody())
+ dumpStmt(D->getBody());
+}
+
+void ASTDumper::VisitFieldDecl(const FieldDecl *D) {
+ dumpName(D);
+ dumpType(D->getType());
+ if (D->isMutable())
+ OS << " mutable";
+ if (D->isModulePrivate())
+ OS << " __module_private__";
+
+ if (D->isBitField())
+ dumpStmt(D->getBitWidth());
+ if (Expr *Init = D->getInClassInitializer())
+ dumpStmt(Init);
+}
+
+void ASTDumper::VisitVarDecl(const VarDecl *D) {
+ dumpName(D);
+ dumpType(D->getType());
+ StorageClass SC = D->getStorageClass();
+ if (SC != SC_None)
+ OS << ' ' << VarDecl::getStorageClassSpecifierString(SC);
+ switch (D->getTLSKind()) {
+ case VarDecl::TLS_None: break;
+ case VarDecl::TLS_Static: OS << " tls"; break;
+ case VarDecl::TLS_Dynamic: OS << " tls_dynamic"; break;
+ }
+ if (D->isModulePrivate())
+ OS << " __module_private__";
+ if (D->isNRVOVariable())
+ OS << " nrvo";
+ if (D->hasInit()) {
+ switch (D->getInitStyle()) {
+ case VarDecl::CInit: OS << " cinit"; break;
+ case VarDecl::CallInit: OS << " callinit"; break;
+ case VarDecl::ListInit: OS << " listinit"; break;
+ }
+ dumpStmt(D->getInit());
+ }
+}
+
+void ASTDumper::VisitFileScopeAsmDecl(const FileScopeAsmDecl *D) {
+ dumpStmt(D->getAsmString());
+}
+
+void ASTDumper::VisitImportDecl(const ImportDecl *D) {
+ OS << ' ' << D->getImportedModule()->getFullModuleName();
+}
+
+//===----------------------------------------------------------------------===//
+// C++ Declarations
+//===----------------------------------------------------------------------===//
+
+void ASTDumper::VisitNamespaceDecl(const NamespaceDecl *D) {
+ dumpName(D);
+ if (D->isInline())
+ OS << " inline";
+ if (!D->isOriginalNamespace())
+ dumpDeclRef(D->getOriginalNamespace(), "original");
+}
+
+void ASTDumper::VisitUsingDirectiveDecl(const UsingDirectiveDecl *D) {
+ OS << ' ';
+ dumpBareDeclRef(D->getNominatedNamespace());
+}
+
+void ASTDumper::VisitNamespaceAliasDecl(const NamespaceAliasDecl *D) {
+ dumpName(D);
+ dumpDeclRef(D->getAliasedNamespace());
+}
+
+void ASTDumper::VisitTypeAliasDecl(const TypeAliasDecl *D) {
+ dumpName(D);
+ dumpType(D->getUnderlyingType());
+}
+
+void ASTDumper::VisitTypeAliasTemplateDecl(const TypeAliasTemplateDecl *D) {
+ dumpName(D);
+ dumpTemplateParameters(D->getTemplateParameters());
+ dumpDecl(D->getTemplatedDecl());
+}
+
+void ASTDumper::VisitCXXRecordDecl(const CXXRecordDecl *D) {
+ VisitRecordDecl(D);
+ if (!D->isCompleteDefinition())
+ return;
+
+ for (const auto &I : D->bases()) {
+ dumpChild([=] {
+ if (I.isVirtual())
+ OS << "virtual ";
+ dumpAccessSpecifier(I.getAccessSpecifier());
+ dumpType(I.getType());
+ if (I.isPackExpansion())
+ OS << "...";
+ });
+ }
+}
+
+void ASTDumper::VisitStaticAssertDecl(const StaticAssertDecl *D) {
+ dumpStmt(D->getAssertExpr());
+ dumpStmt(D->getMessage());
+}
+
+template<typename SpecializationDecl>
+void ASTDumper::VisitTemplateDeclSpecialization(const SpecializationDecl *D,
+ bool DumpExplicitInst,
+ bool DumpRefOnly) {
+ bool DumpedAny = false;
+ for (auto *RedeclWithBadType : D->redecls()) {
+ // FIXME: The redecls() range sometimes has elements of a less-specific
+ // type. (In particular, ClassTemplateSpecializationDecl::redecls() gives
+ // us TagDecls, and should give CXXRecordDecls).
+ auto *Redecl = dyn_cast<SpecializationDecl>(RedeclWithBadType);
+ if (!Redecl) {
+ // Found the injected-class-name for a class template. This will be dumped
+ // as part of its surrounding class so we don't need to dump it here.
+ assert(isa<CXXRecordDecl>(RedeclWithBadType) &&
+ "expected an injected-class-name");
+ continue;
+ }
+
+ switch (Redecl->getTemplateSpecializationKind()) {
+ case TSK_ExplicitInstantiationDeclaration:
+ case TSK_ExplicitInstantiationDefinition:
+ if (!DumpExplicitInst)
+ break;
+ // Fall through.
+ case TSK_Undeclared:
+ case TSK_ImplicitInstantiation:
+ if (DumpRefOnly)
+ dumpDeclRef(Redecl);
+ else
+ dumpDecl(Redecl);
+ DumpedAny = true;
+ break;
+ case TSK_ExplicitSpecialization:
+ break;
+ }
+ }
+
+ // Ensure we dump at least one decl for each specialization.
+ if (!DumpedAny)
+ dumpDeclRef(D);
+}
+
+template<typename TemplateDecl>
+void ASTDumper::VisitTemplateDecl(const TemplateDecl *D,
+ bool DumpExplicitInst) {
+ dumpName(D);
+ dumpTemplateParameters(D->getTemplateParameters());
+
+ dumpDecl(D->getTemplatedDecl());
+
+ for (auto *Child : D->specializations())
+ VisitTemplateDeclSpecialization(Child, DumpExplicitInst,
+ !D->isCanonicalDecl());
+}
+
+void ASTDumper::VisitFunctionTemplateDecl(const FunctionTemplateDecl *D) {
+ // FIXME: We don't add a declaration of a function template specialization
+ // to its context when it's explicitly instantiated, so dump explicit
+ // instantiations when we dump the template itself.
+ VisitTemplateDecl(D, true);
+}
+
+void ASTDumper::VisitClassTemplateDecl(const ClassTemplateDecl *D) {
+ VisitTemplateDecl(D, false);
+}
+
+void ASTDumper::VisitClassTemplateSpecializationDecl(
+ const ClassTemplateSpecializationDecl *D) {
+ VisitCXXRecordDecl(D);
+ dumpTemplateArgumentList(D->getTemplateArgs());
+}
+
+void ASTDumper::VisitClassTemplatePartialSpecializationDecl(
+ const ClassTemplatePartialSpecializationDecl *D) {
+ VisitClassTemplateSpecializationDecl(D);
+ dumpTemplateParameters(D->getTemplateParameters());
+}
+
+void ASTDumper::VisitClassScopeFunctionSpecializationDecl(
+ const ClassScopeFunctionSpecializationDecl *D) {
+ dumpDeclRef(D->getSpecialization());
+ if (D->hasExplicitTemplateArgs())
+ dumpTemplateArgumentListInfo(D->templateArgs());
+}
+
+void ASTDumper::VisitVarTemplateDecl(const VarTemplateDecl *D) {
+ VisitTemplateDecl(D, false);
+}
+
+void ASTDumper::VisitBuiltinTemplateDecl(const BuiltinTemplateDecl *D) {
+ dumpName(D);
+ dumpTemplateParameters(D->getTemplateParameters());
+}
+
+void ASTDumper::VisitVarTemplateSpecializationDecl(
+ const VarTemplateSpecializationDecl *D) {
+ dumpTemplateArgumentList(D->getTemplateArgs());
+ VisitVarDecl(D);
+}
+
+void ASTDumper::VisitVarTemplatePartialSpecializationDecl(
+ const VarTemplatePartialSpecializationDecl *D) {
+ dumpTemplateParameters(D->getTemplateParameters());
+ VisitVarTemplateSpecializationDecl(D);
+}
+
+void ASTDumper::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D) {
+ if (D->wasDeclaredWithTypename())
+ OS << " typename";
+ else
+ OS << " class";
+ if (D->isParameterPack())
+ OS << " ...";
+ dumpName(D);
+ if (D->hasDefaultArgument())
+ dumpTemplateArgument(D->getDefaultArgument());
+}
+
+void ASTDumper::VisitNonTypeTemplateParmDecl(const NonTypeTemplateParmDecl *D) {
+ dumpType(D->getType());
+ if (D->isParameterPack())
+ OS << " ...";
+ dumpName(D);
+ if (D->hasDefaultArgument())
+ dumpTemplateArgument(D->getDefaultArgument());
+}
+
+void ASTDumper::VisitTemplateTemplateParmDecl(
+ const TemplateTemplateParmDecl *D) {
+ if (D->isParameterPack())
+ OS << " ...";
+ dumpName(D);
+ dumpTemplateParameters(D->getTemplateParameters());
+ if (D->hasDefaultArgument())
+ dumpTemplateArgumentLoc(D->getDefaultArgument());
+}
+
+void ASTDumper::VisitUsingDecl(const UsingDecl *D) {
+ OS << ' ';
+ if (D->getQualifier())
+ D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
+ OS << D->getNameAsString();
+}
+
+void ASTDumper::VisitUnresolvedUsingTypenameDecl(
+ const UnresolvedUsingTypenameDecl *D) {
+ OS << ' ';
+ if (D->getQualifier())
+ D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
+ OS << D->getNameAsString();
+}
+
+void ASTDumper::VisitUnresolvedUsingValueDecl(const UnresolvedUsingValueDecl *D) {
+ OS << ' ';
+ if (D->getQualifier())
+ D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
+ OS << D->getNameAsString();
+ dumpType(D->getType());
+}
+
+void ASTDumper::VisitUsingShadowDecl(const UsingShadowDecl *D) {
+ OS << ' ';
+ dumpBareDeclRef(D->getTargetDecl());
+}
+
+void ASTDumper::VisitLinkageSpecDecl(const LinkageSpecDecl *D) {
+ switch (D->getLanguage()) {
+ case LinkageSpecDecl::lang_c: OS << " C"; break;
+ case LinkageSpecDecl::lang_cxx: OS << " C++"; break;
+ }
+}
+
+void ASTDumper::VisitAccessSpecDecl(const AccessSpecDecl *D) {
+ OS << ' ';
+ dumpAccessSpecifier(D->getAccess());
+}
+
+void ASTDumper::VisitFriendDecl(const FriendDecl *D) {
+ if (TypeSourceInfo *T = D->getFriendType())
+ dumpType(T->getType());
+ else
+ dumpDecl(D->getFriendDecl());
+}
+
+//===----------------------------------------------------------------------===//
+// Obj-C Declarations
+//===----------------------------------------------------------------------===//
+
+void ASTDumper::VisitObjCIvarDecl(const ObjCIvarDecl *D) {
+ dumpName(D);
+ dumpType(D->getType());
+ if (D->getSynthesize())
+ OS << " synthesize";
+
+ switch (D->getAccessControl()) {
+ case ObjCIvarDecl::None:
+ OS << " none";
+ break;
+ case ObjCIvarDecl::Private:
+ OS << " private";
+ break;
+ case ObjCIvarDecl::Protected:
+ OS << " protected";
+ break;
+ case ObjCIvarDecl::Public:
+ OS << " public";
+ break;
+ case ObjCIvarDecl::Package:
+ OS << " package";
+ break;
+ }
+}
+
+void ASTDumper::VisitObjCMethodDecl(const ObjCMethodDecl *D) {
+ if (D->isInstanceMethod())
+ OS << " -";
+ else
+ OS << " +";
+ dumpName(D);
+ dumpType(D->getReturnType());
+
+ if (D->isThisDeclarationADefinition()) {
+ dumpDeclContext(D);
+ } else {
+ for (ObjCMethodDecl::param_const_iterator I = D->param_begin(),
+ E = D->param_end();
+ I != E; ++I)
+ dumpDecl(*I);
+ }
+
+ if (D->isVariadic())
+ dumpChild([=] { OS << "..."; });
+
+ if (D->hasBody())
+ dumpStmt(D->getBody());
+}
+
+void ASTDumper::VisitObjCTypeParamDecl(const ObjCTypeParamDecl *D) {
+ dumpName(D);
+ switch (D->getVariance()) {
+ case ObjCTypeParamVariance::Invariant:
+ break;
+
+ case ObjCTypeParamVariance::Covariant:
+ OS << " covariant";
+ break;
+
+ case ObjCTypeParamVariance::Contravariant:
+ OS << " contravariant";
+ break;
+ }
+
+ if (D->hasExplicitBound())
+ OS << " bounded";
+ dumpType(D->getUnderlyingType());
+}
+
+void ASTDumper::VisitObjCCategoryDecl(const ObjCCategoryDecl *D) {
+ dumpName(D);
+ dumpDeclRef(D->getClassInterface());
+ dumpObjCTypeParamList(D->getTypeParamList());
+ dumpDeclRef(D->getImplementation());
+ for (ObjCCategoryDecl::protocol_iterator I = D->protocol_begin(),
+ E = D->protocol_end();
+ I != E; ++I)
+ dumpDeclRef(*I);
+}
+
+void ASTDumper::VisitObjCCategoryImplDecl(const ObjCCategoryImplDecl *D) {
+ dumpName(D);
+ dumpDeclRef(D->getClassInterface());
+ dumpDeclRef(D->getCategoryDecl());
+}
+
+void ASTDumper::VisitObjCProtocolDecl(const ObjCProtocolDecl *D) {
+ dumpName(D);
+
+ for (auto *Child : D->protocols())
+ dumpDeclRef(Child);
+}
+
+void ASTDumper::VisitObjCInterfaceDecl(const ObjCInterfaceDecl *D) {
+ dumpName(D);
+ dumpObjCTypeParamList(D->getTypeParamListAsWritten());
+ dumpDeclRef(D->getSuperClass(), "super");
+
+ dumpDeclRef(D->getImplementation());
+ for (auto *Child : D->protocols())
+ dumpDeclRef(Child);
+}
+
+void ASTDumper::VisitObjCImplementationDecl(const ObjCImplementationDecl *D) {
+ dumpName(D);
+ dumpDeclRef(D->getSuperClass(), "super");
+ dumpDeclRef(D->getClassInterface());
+ for (ObjCImplementationDecl::init_const_iterator I = D->init_begin(),
+ E = D->init_end();
+ I != E; ++I)
+ dumpCXXCtorInitializer(*I);
+}
+
+void ASTDumper::VisitObjCCompatibleAliasDecl(const ObjCCompatibleAliasDecl *D) {
+ dumpName(D);
+ dumpDeclRef(D->getClassInterface());
+}
+
+void ASTDumper::VisitObjCPropertyDecl(const ObjCPropertyDecl *D) {
+ dumpName(D);
+ dumpType(D->getType());
+
+ if (D->getPropertyImplementation() == ObjCPropertyDecl::Required)
+ OS << " required";
+ else if (D->getPropertyImplementation() == ObjCPropertyDecl::Optional)
+ OS << " optional";
+
+ ObjCPropertyDecl::PropertyAttributeKind Attrs = D->getPropertyAttributes();
+ if (Attrs != ObjCPropertyDecl::OBJC_PR_noattr) {
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_readonly)
+ OS << " readonly";
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_assign)
+ OS << " assign";
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_readwrite)
+ OS << " readwrite";
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_retain)
+ OS << " retain";
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_copy)
+ OS << " copy";
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_nonatomic)
+ OS << " nonatomic";
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_atomic)
+ OS << " atomic";
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_weak)
+ OS << " weak";
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_strong)
+ OS << " strong";
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_unsafe_unretained)
+ OS << " unsafe_unretained";
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_getter)
+ dumpDeclRef(D->getGetterMethodDecl(), "getter");
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_setter)
+ dumpDeclRef(D->getSetterMethodDecl(), "setter");
+ }
+}
+
+void ASTDumper::VisitObjCPropertyImplDecl(const ObjCPropertyImplDecl *D) {
+ dumpName(D->getPropertyDecl());
+ if (D->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize)
+ OS << " synthesize";
+ else
+ OS << " dynamic";
+ dumpDeclRef(D->getPropertyDecl());
+ dumpDeclRef(D->getPropertyIvarDecl());
+}
+
+void ASTDumper::VisitBlockDecl(const BlockDecl *D) {
+ for (auto I : D->params())
+ dumpDecl(I);
+
+ if (D->isVariadic())
+ dumpChild([=]{ OS << "..."; });
+
+ if (D->capturesCXXThis())
+ dumpChild([=]{ OS << "capture this"; });
+
+ for (const auto &I : D->captures()) {
+ dumpChild([=] {
+ OS << "capture";
+ if (I.isByRef())
+ OS << " byref";
+ if (I.isNested())
+ OS << " nested";
+ if (I.getVariable()) {
+ OS << ' ';
+ dumpBareDeclRef(I.getVariable());
+ }
+ if (I.hasCopyExpr())
+ dumpStmt(I.getCopyExpr());
+ });
+ }
+ dumpStmt(D->getBody());
+}
+
+//===----------------------------------------------------------------------===//
+// Stmt dumping methods.
+//===----------------------------------------------------------------------===//
+
+void ASTDumper::dumpStmt(const Stmt *S) {
+ dumpChild([=] {
+ if (!S) {
+ ColorScope Color(*this, NullColor);
+ OS << "<<<NULL>>>";
+ return;
+ }
+
+ if (const DeclStmt *DS = dyn_cast<DeclStmt>(S)) {
+ VisitDeclStmt(DS);
+ return;
+ }
+
+ ConstStmtVisitor<ASTDumper>::Visit(S);
+
+ for (const Stmt *SubStmt : S->children())
+ dumpStmt(SubStmt);
+ });
+}
+
+void ASTDumper::VisitStmt(const Stmt *Node) {
+ {
+ ColorScope Color(*this, StmtColor);
+ OS << Node->getStmtClassName();
+ }
+ dumpPointer(Node);
+ dumpSourceRange(Node->getSourceRange());
+}
+
+void ASTDumper::VisitDeclStmt(const DeclStmt *Node) {
+ VisitStmt(Node);
+ for (DeclStmt::const_decl_iterator I = Node->decl_begin(),
+ E = Node->decl_end();
+ I != E; ++I)
+ dumpDecl(*I);
+}
+
+void ASTDumper::VisitAttributedStmt(const AttributedStmt *Node) {
+ VisitStmt(Node);
+ for (ArrayRef<const Attr *>::iterator I = Node->getAttrs().begin(),
+ E = Node->getAttrs().end();
+ I != E; ++I)
+ dumpAttr(*I);
+}
+
+void ASTDumper::VisitLabelStmt(const LabelStmt *Node) {
+ VisitStmt(Node);
+ OS << " '" << Node->getName() << "'";
+}
+
+void ASTDumper::VisitGotoStmt(const GotoStmt *Node) {
+ VisitStmt(Node);
+ OS << " '" << Node->getLabel()->getName() << "'";
+ dumpPointer(Node->getLabel());
+}
+
+void ASTDumper::VisitCXXCatchStmt(const CXXCatchStmt *Node) {
+ VisitStmt(Node);
+ dumpDecl(Node->getExceptionDecl());
+}
+
+//===----------------------------------------------------------------------===//
+// Expr dumping methods.
+//===----------------------------------------------------------------------===//
+
+void ASTDumper::VisitExpr(const Expr *Node) {
+ VisitStmt(Node);
+ dumpType(Node->getType());
+
+ {
+ ColorScope Color(*this, ValueKindColor);
+ switch (Node->getValueKind()) {
+ case VK_RValue:
+ break;
+ case VK_LValue:
+ OS << " lvalue";
+ break;
+ case VK_XValue:
+ OS << " xvalue";
+ break;
+ }
+ }
+
+ {
+ ColorScope Color(*this, ObjectKindColor);
+ switch (Node->getObjectKind()) {
+ case OK_Ordinary:
+ break;
+ case OK_BitField:
+ OS << " bitfield";
+ break;
+ case OK_ObjCProperty:
+ OS << " objcproperty";
+ break;
+ case OK_ObjCSubscript:
+ OS << " objcsubscript";
+ break;
+ case OK_VectorComponent:
+ OS << " vectorcomponent";
+ break;
+ }
+ }
+}
+
+static void dumpBasePath(raw_ostream &OS, const CastExpr *Node) {
+ if (Node->path_empty())
+ return;
+
+ OS << " (";
+ bool First = true;
+ for (CastExpr::path_const_iterator I = Node->path_begin(),
+ E = Node->path_end();
+ I != E; ++I) {
+ const CXXBaseSpecifier *Base = *I;
+ if (!First)
+ OS << " -> ";
+
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+
+ if (Base->isVirtual())
+ OS << "virtual ";
+ OS << RD->getName();
+ First = false;
+ }
+
+ OS << ')';
+}
+
+void ASTDumper::VisitCastExpr(const CastExpr *Node) {
+ VisitExpr(Node);
+ OS << " <";
+ {
+ ColorScope Color(*this, CastColor);
+ OS << Node->getCastKindName();
+ }
+ dumpBasePath(OS, Node);
+ OS << ">";
+}
+
+void ASTDumper::VisitDeclRefExpr(const DeclRefExpr *Node) {
+ VisitExpr(Node);
+
+ OS << " ";
+ dumpBareDeclRef(Node->getDecl());
+ if (Node->getDecl() != Node->getFoundDecl()) {
+ OS << " (";
+ dumpBareDeclRef(Node->getFoundDecl());
+ OS << ")";
+ }
+}
+
+void ASTDumper::VisitUnresolvedLookupExpr(const UnresolvedLookupExpr *Node) {
+ VisitExpr(Node);
+ OS << " (";
+ if (!Node->requiresADL())
+ OS << "no ";
+ OS << "ADL) = '" << Node->getName() << '\'';
+
+ UnresolvedLookupExpr::decls_iterator
+ I = Node->decls_begin(), E = Node->decls_end();
+ if (I == E)
+ OS << " empty";
+ for (; I != E; ++I)
+ dumpPointer(*I);
+}
+
+void ASTDumper::VisitObjCIvarRefExpr(const ObjCIvarRefExpr *Node) {
+ VisitExpr(Node);
+
+ {
+ ColorScope Color(*this, DeclKindNameColor);
+ OS << " " << Node->getDecl()->getDeclKindName() << "Decl";
+ }
+ OS << "='" << *Node->getDecl() << "'";
+ dumpPointer(Node->getDecl());
+ if (Node->isFreeIvar())
+ OS << " isFreeIvar";
+}
+
+void ASTDumper::VisitPredefinedExpr(const PredefinedExpr *Node) {
+ VisitExpr(Node);
+ OS << " " << PredefinedExpr::getIdentTypeName(Node->getIdentType());
+}
+
+void ASTDumper::VisitCharacterLiteral(const CharacterLiteral *Node) {
+ VisitExpr(Node);
+ ColorScope Color(*this, ValueColor);
+ OS << " " << Node->getValue();
+}
+
+void ASTDumper::VisitIntegerLiteral(const IntegerLiteral *Node) {
+ VisitExpr(Node);
+
+ bool isSigned = Node->getType()->isSignedIntegerType();
+ ColorScope Color(*this, ValueColor);
+ OS << " " << Node->getValue().toString(10, isSigned);
+}
+
+void ASTDumper::VisitFloatingLiteral(const FloatingLiteral *Node) {
+ VisitExpr(Node);
+ ColorScope Color(*this, ValueColor);
+ OS << " " << Node->getValueAsApproximateDouble();
+}
+
+void ASTDumper::VisitStringLiteral(const StringLiteral *Str) {
+ VisitExpr(Str);
+ ColorScope Color(*this, ValueColor);
+ OS << " ";
+ Str->outputString(OS);
+}
+
+void ASTDumper::VisitInitListExpr(const InitListExpr *ILE) {
+ VisitExpr(ILE);
+ if (auto *Filler = ILE->getArrayFiller()) {
+ dumpChild([=] {
+ OS << "array filler";
+ dumpStmt(Filler);
+ });
+ }
+ if (auto *Field = ILE->getInitializedFieldInUnion()) {
+ OS << " field ";
+ dumpBareDeclRef(Field);
+ }
+}
+
+void ASTDumper::VisitUnaryOperator(const UnaryOperator *Node) {
+ VisitExpr(Node);
+ OS << " " << (Node->isPostfix() ? "postfix" : "prefix")
+ << " '" << UnaryOperator::getOpcodeStr(Node->getOpcode()) << "'";
+}
+
+void ASTDumper::VisitUnaryExprOrTypeTraitExpr(
+ const UnaryExprOrTypeTraitExpr *Node) {
+ VisitExpr(Node);
+ switch(Node->getKind()) {
+ case UETT_SizeOf:
+ OS << " sizeof";
+ break;
+ case UETT_AlignOf:
+ OS << " alignof";
+ break;
+ case UETT_VecStep:
+ OS << " vec_step";
+ break;
+ case UETT_OpenMPRequiredSimdAlign:
+ OS << " __builtin_omp_required_simd_align";
+ break;
+ }
+ if (Node->isArgumentType())
+ dumpType(Node->getArgumentType());
+}
+
+void ASTDumper::VisitMemberExpr(const MemberExpr *Node) {
+ VisitExpr(Node);
+ OS << " " << (Node->isArrow() ? "->" : ".") << *Node->getMemberDecl();
+ dumpPointer(Node->getMemberDecl());
+}
+
+void ASTDumper::VisitExtVectorElementExpr(const ExtVectorElementExpr *Node) {
+ VisitExpr(Node);
+ OS << " " << Node->getAccessor().getNameStart();
+}
+
+void ASTDumper::VisitBinaryOperator(const BinaryOperator *Node) {
+ VisitExpr(Node);
+ OS << " '" << BinaryOperator::getOpcodeStr(Node->getOpcode()) << "'";
+}
+
+void ASTDumper::VisitCompoundAssignOperator(
+ const CompoundAssignOperator *Node) {
+ VisitExpr(Node);
+ OS << " '" << BinaryOperator::getOpcodeStr(Node->getOpcode())
+ << "' ComputeLHSTy=";
+ dumpBareType(Node->getComputationLHSType());
+ OS << " ComputeResultTy=";
+ dumpBareType(Node->getComputationResultType());
+}
+
+void ASTDumper::VisitBlockExpr(const BlockExpr *Node) {
+ VisitExpr(Node);
+ dumpDecl(Node->getBlockDecl());
+}
+
+void ASTDumper::VisitOpaqueValueExpr(const OpaqueValueExpr *Node) {
+ VisitExpr(Node);
+
+ if (Expr *Source = Node->getSourceExpr())
+ dumpStmt(Source);
+}
+
+// GNU extensions.
+
+void ASTDumper::VisitAddrLabelExpr(const AddrLabelExpr *Node) {
+ VisitExpr(Node);
+ OS << " " << Node->getLabel()->getName();
+ dumpPointer(Node->getLabel());
+}
+
+//===----------------------------------------------------------------------===//
+// C++ Expressions
+//===----------------------------------------------------------------------===//
+
+void ASTDumper::VisitCXXNamedCastExpr(const CXXNamedCastExpr *Node) {
+ VisitExpr(Node);
+ OS << " " << Node->getCastName()
+ << "<" << Node->getTypeAsWritten().getAsString() << ">"
+ << " <" << Node->getCastKindName();
+ dumpBasePath(OS, Node);
+ OS << ">";
+}
+
+void ASTDumper::VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *Node) {
+ VisitExpr(Node);
+ OS << " " << (Node->getValue() ? "true" : "false");
+}
+
+void ASTDumper::VisitCXXThisExpr(const CXXThisExpr *Node) {
+ VisitExpr(Node);
+ OS << " this";
+}
+
+void ASTDumper::VisitCXXFunctionalCastExpr(const CXXFunctionalCastExpr *Node) {
+ VisitExpr(Node);
+ OS << " functional cast to " << Node->getTypeAsWritten().getAsString()
+ << " <" << Node->getCastKindName() << ">";
+}
+
+void ASTDumper::VisitCXXConstructExpr(const CXXConstructExpr *Node) {
+ VisitExpr(Node);
+ CXXConstructorDecl *Ctor = Node->getConstructor();
+ dumpType(Ctor->getType());
+ if (Node->isElidable())
+ OS << " elidable";
+ if (Node->requiresZeroInitialization())
+ OS << " zeroing";
+}
+
+void ASTDumper::VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *Node) {
+ VisitExpr(Node);
+ OS << " ";
+ dumpCXXTemporary(Node->getTemporary());
+}
+
+void ASTDumper::VisitCXXNewExpr(const CXXNewExpr *Node) {
+ VisitExpr(Node);
+ if (Node->isGlobalNew())
+ OS << " global";
+ if (Node->isArray())
+ OS << " array";
+ if (Node->getOperatorNew()) {
+ OS << ' ';
+ dumpBareDeclRef(Node->getOperatorNew());
+ }
+ // We could dump the deallocation function used in case of error, but it's
+ // usually not that interesting.
+}
+
+void ASTDumper::VisitCXXDeleteExpr(const CXXDeleteExpr *Node) {
+ VisitExpr(Node);
+ if (Node->isGlobalDelete())
+ OS << " global";
+ if (Node->isArrayForm())
+ OS << " array";
+ if (Node->getOperatorDelete()) {
+ OS << ' ';
+ dumpBareDeclRef(Node->getOperatorDelete());
+ }
+}
+
+void
+ASTDumper::VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *Node) {
+ VisitExpr(Node);
+ if (const ValueDecl *VD = Node->getExtendingDecl()) {
+ OS << " extended by ";
+ dumpBareDeclRef(VD);
+ }
+}
+
+void ASTDumper::VisitExprWithCleanups(const ExprWithCleanups *Node) {
+ VisitExpr(Node);
+ for (unsigned i = 0, e = Node->getNumObjects(); i != e; ++i)
+ dumpDeclRef(Node->getObject(i), "cleanup");
+}
+
+void ASTDumper::dumpCXXTemporary(const CXXTemporary *Temporary) {
+ OS << "(CXXTemporary";
+ dumpPointer(Temporary);
+ OS << ")";
+}
+
+void ASTDumper::VisitSizeOfPackExpr(const SizeOfPackExpr *Node) {
+ VisitExpr(Node);
+ dumpPointer(Node->getPack());
+ dumpName(Node->getPack());
+ if (Node->isPartiallySubstituted())
+ for (const auto &A : Node->getPartialArguments())
+ dumpTemplateArgument(A);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Obj-C Expressions
+//===----------------------------------------------------------------------===//
+
+void ASTDumper::VisitObjCMessageExpr(const ObjCMessageExpr *Node) {
+ VisitExpr(Node);
+ OS << " selector=";
+ Node->getSelector().print(OS);
+ switch (Node->getReceiverKind()) {
+ case ObjCMessageExpr::Instance:
+ break;
+
+ case ObjCMessageExpr::Class:
+ OS << " class=";
+ dumpBareType(Node->getClassReceiver());
+ break;
+
+ case ObjCMessageExpr::SuperInstance:
+ OS << " super (instance)";
+ break;
+
+ case ObjCMessageExpr::SuperClass:
+ OS << " super (class)";
+ break;
+ }
+}
+
+void ASTDumper::VisitObjCBoxedExpr(const ObjCBoxedExpr *Node) {
+ VisitExpr(Node);
+ OS << " selector=";
+ Node->getBoxingMethod()->getSelector().print(OS);
+}
+
+void ASTDumper::VisitObjCAtCatchStmt(const ObjCAtCatchStmt *Node) {
+ VisitStmt(Node);
+ if (const VarDecl *CatchParam = Node->getCatchParamDecl())
+ dumpDecl(CatchParam);
+ else
+ OS << " catch all";
+}
+
+void ASTDumper::VisitObjCEncodeExpr(const ObjCEncodeExpr *Node) {
+ VisitExpr(Node);
+ dumpType(Node->getEncodedType());
+}
+
+void ASTDumper::VisitObjCSelectorExpr(const ObjCSelectorExpr *Node) {
+ VisitExpr(Node);
+
+ OS << " ";
+ Node->getSelector().print(OS);
+}
+
+void ASTDumper::VisitObjCProtocolExpr(const ObjCProtocolExpr *Node) {
+ VisitExpr(Node);
+
+ OS << ' ' << *Node->getProtocol();
+}
+
+void ASTDumper::VisitObjCPropertyRefExpr(const ObjCPropertyRefExpr *Node) {
+ VisitExpr(Node);
+ if (Node->isImplicitProperty()) {
+ OS << " Kind=MethodRef Getter=\"";
+ if (Node->getImplicitPropertyGetter())
+ Node->getImplicitPropertyGetter()->getSelector().print(OS);
+ else
+ OS << "(null)";
+
+ OS << "\" Setter=\"";
+ if (ObjCMethodDecl *Setter = Node->getImplicitPropertySetter())
+ Setter->getSelector().print(OS);
+ else
+ OS << "(null)";
+ OS << "\"";
+ } else {
+ OS << " Kind=PropertyRef Property=\"" << *Node->getExplicitProperty() <<'"';
+ }
+
+ if (Node->isSuperReceiver())
+ OS << " super";
+
+ OS << " Messaging=";
+ if (Node->isMessagingGetter() && Node->isMessagingSetter())
+ OS << "Getter&Setter";
+ else if (Node->isMessagingGetter())
+ OS << "Getter";
+ else if (Node->isMessagingSetter())
+ OS << "Setter";
+}
+
+void ASTDumper::VisitObjCSubscriptRefExpr(const ObjCSubscriptRefExpr *Node) {
+ VisitExpr(Node);
+ if (Node->isArraySubscriptRefExpr())
+ OS << " Kind=ArraySubscript GetterForArray=\"";
+ else
+ OS << " Kind=DictionarySubscript GetterForDictionary=\"";
+ if (Node->getAtIndexMethodDecl())
+ Node->getAtIndexMethodDecl()->getSelector().print(OS);
+ else
+ OS << "(null)";
+
+ if (Node->isArraySubscriptRefExpr())
+ OS << "\" SetterForArray=\"";
+ else
+ OS << "\" SetterForDictionary=\"";
+ if (Node->setAtIndexMethodDecl())
+ Node->setAtIndexMethodDecl()->getSelector().print(OS);
+ else
+ OS << "(null)";
+}
+
+void ASTDumper::VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *Node) {
+ VisitExpr(Node);
+ OS << " " << (Node->getValue() ? "__objc_yes" : "__objc_no");
+}
+
+//===----------------------------------------------------------------------===//
+// Comments
+//===----------------------------------------------------------------------===//
+
+const char *ASTDumper::getCommandName(unsigned CommandID) {
+ if (Traits)
+ return Traits->getCommandInfo(CommandID)->Name;
+ const CommandInfo *Info = CommandTraits::getBuiltinCommandInfo(CommandID);
+ if (Info)
+ return Info->Name;
+ return "<not a builtin command>";
+}
+
+void ASTDumper::dumpFullComment(const FullComment *C) {
+ if (!C)
+ return;
+
+ FC = C;
+ dumpComment(C);
+ FC = nullptr;
+}
+
+void ASTDumper::dumpComment(const Comment *C) {
+ dumpChild([=] {
+ if (!C) {
+ ColorScope Color(*this, NullColor);
+ OS << "<<<NULL>>>";
+ return;
+ }
+
+ {
+ ColorScope Color(*this, CommentColor);
+ OS << C->getCommentKindName();
+ }
+ dumpPointer(C);
+ dumpSourceRange(C->getSourceRange());
+ ConstCommentVisitor<ASTDumper>::visit(C);
+ for (Comment::child_iterator I = C->child_begin(), E = C->child_end();
+ I != E; ++I)
+ dumpComment(*I);
+ });
+}
+
+void ASTDumper::visitTextComment(const TextComment *C) {
+ OS << " Text=\"" << C->getText() << "\"";
+}
+
+void ASTDumper::visitInlineCommandComment(const InlineCommandComment *C) {
+ OS << " Name=\"" << getCommandName(C->getCommandID()) << "\"";
+ switch (C->getRenderKind()) {
+ case InlineCommandComment::RenderNormal:
+ OS << " RenderNormal";
+ break;
+ case InlineCommandComment::RenderBold:
+ OS << " RenderBold";
+ break;
+ case InlineCommandComment::RenderMonospaced:
+ OS << " RenderMonospaced";
+ break;
+ case InlineCommandComment::RenderEmphasized:
+ OS << " RenderEmphasized";
+ break;
+ }
+
+ for (unsigned i = 0, e = C->getNumArgs(); i != e; ++i)
+ OS << " Arg[" << i << "]=\"" << C->getArgText(i) << "\"";
+}
+
+void ASTDumper::visitHTMLStartTagComment(const HTMLStartTagComment *C) {
+ OS << " Name=\"" << C->getTagName() << "\"";
+ if (C->getNumAttrs() != 0) {
+ OS << " Attrs: ";
+ for (unsigned i = 0, e = C->getNumAttrs(); i != e; ++i) {
+ const HTMLStartTagComment::Attribute &Attr = C->getAttr(i);
+ OS << " \"" << Attr.Name << "=\"" << Attr.Value << "\"";
+ }
+ }
+ if (C->isSelfClosing())
+ OS << " SelfClosing";
+}
+
+void ASTDumper::visitHTMLEndTagComment(const HTMLEndTagComment *C) {
+ OS << " Name=\"" << C->getTagName() << "\"";
+}
+
+void ASTDumper::visitBlockCommandComment(const BlockCommandComment *C) {
+ OS << " Name=\"" << getCommandName(C->getCommandID()) << "\"";
+ for (unsigned i = 0, e = C->getNumArgs(); i != e; ++i)
+ OS << " Arg[" << i << "]=\"" << C->getArgText(i) << "\"";
+}
+
+void ASTDumper::visitParamCommandComment(const ParamCommandComment *C) {
+ OS << " " << ParamCommandComment::getDirectionAsString(C->getDirection());
+
+ if (C->isDirectionExplicit())
+ OS << " explicitly";
+ else
+ OS << " implicitly";
+
+ if (C->hasParamName()) {
+ if (C->isParamIndexValid())
+ OS << " Param=\"" << C->getParamName(FC) << "\"";
+ else
+ OS << " Param=\"" << C->getParamNameAsWritten() << "\"";
+ }
+
+ if (C->isParamIndexValid() && !C->isVarArgParam())
+ OS << " ParamIndex=" << C->getParamIndex();
+}
+
+void ASTDumper::visitTParamCommandComment(const TParamCommandComment *C) {
+ if (C->hasParamName()) {
+ if (C->isPositionValid())
+ OS << " Param=\"" << C->getParamName(FC) << "\"";
+ else
+ OS << " Param=\"" << C->getParamNameAsWritten() << "\"";
+ }
+
+ if (C->isPositionValid()) {
+ OS << " Position=<";
+ for (unsigned i = 0, e = C->getDepth(); i != e; ++i) {
+ OS << C->getIndex(i);
+ if (i != e - 1)
+ OS << ", ";
+ }
+ OS << ">";
+ }
+}
+
+void ASTDumper::visitVerbatimBlockComment(const VerbatimBlockComment *C) {
+ OS << " Name=\"" << getCommandName(C->getCommandID()) << "\""
+ " CloseName=\"" << C->getCloseName() << "\"";
+}
+
+void ASTDumper::visitVerbatimBlockLineComment(
+ const VerbatimBlockLineComment *C) {
+ OS << " Text=\"" << C->getText() << "\"";
+}
+
+void ASTDumper::visitVerbatimLineComment(const VerbatimLineComment *C) {
+ OS << " Text=\"" << C->getText() << "\"";
+}
+
+//===----------------------------------------------------------------------===//
+// Type method implementations
+//===----------------------------------------------------------------------===//
+
+void QualType::dump(const char *msg) const {
+ if (msg)
+ llvm::errs() << msg << ": ";
+ dump();
+}
+
+LLVM_DUMP_METHOD void QualType::dump() const {
+ ASTDumper Dumper(llvm::errs(), nullptr, nullptr);
+ Dumper.dumpTypeAsChild(*this);
+}
+
+LLVM_DUMP_METHOD void Type::dump() const { QualType(this, 0).dump(); }
+
+//===----------------------------------------------------------------------===//
+// Decl method implementations
+//===----------------------------------------------------------------------===//
+
+LLVM_DUMP_METHOD void Decl::dump() const { dump(llvm::errs()); }
+
+LLVM_DUMP_METHOD void Decl::dump(raw_ostream &OS) const {
+ ASTDumper P(OS, &getASTContext().getCommentCommandTraits(),
+ &getASTContext().getSourceManager());
+ P.dumpDecl(this);
+}
+
+LLVM_DUMP_METHOD void Decl::dumpColor() const {
+ ASTDumper P(llvm::errs(), &getASTContext().getCommentCommandTraits(),
+ &getASTContext().getSourceManager(), /*ShowColors*/true);
+ P.dumpDecl(this);
+}
+
+LLVM_DUMP_METHOD void DeclContext::dumpLookups() const {
+ dumpLookups(llvm::errs());
+}
+
+LLVM_DUMP_METHOD void DeclContext::dumpLookups(raw_ostream &OS,
+ bool DumpDecls) const {
+ const DeclContext *DC = this;
+ while (!DC->isTranslationUnit())
+ DC = DC->getParent();
+ ASTContext &Ctx = cast<TranslationUnitDecl>(DC)->getASTContext();
+ ASTDumper P(OS, &Ctx.getCommentCommandTraits(), &Ctx.getSourceManager());
+ P.dumpLookups(this, DumpDecls);
+}
+
+//===----------------------------------------------------------------------===//
+// Stmt method implementations
+//===----------------------------------------------------------------------===//
+
+LLVM_DUMP_METHOD void Stmt::dump(SourceManager &SM) const {
+ dump(llvm::errs(), SM);
+}
+
+LLVM_DUMP_METHOD void Stmt::dump(raw_ostream &OS, SourceManager &SM) const {
+ ASTDumper P(OS, nullptr, &SM);
+ P.dumpStmt(this);
+}
+
+LLVM_DUMP_METHOD void Stmt::dump(raw_ostream &OS) const {
+ ASTDumper P(OS, nullptr, nullptr);
+ P.dumpStmt(this);
+}
+
+LLVM_DUMP_METHOD void Stmt::dump() const {
+ ASTDumper P(llvm::errs(), nullptr, nullptr);
+ P.dumpStmt(this);
+}
+
+LLVM_DUMP_METHOD void Stmt::dumpColor() const {
+ ASTDumper P(llvm::errs(), nullptr, nullptr, /*ShowColors*/true);
+ P.dumpStmt(this);
+}
+
+//===----------------------------------------------------------------------===//
+// Comment method implementations
+//===----------------------------------------------------------------------===//
+
+LLVM_DUMP_METHOD void Comment::dump() const {
+ dump(llvm::errs(), nullptr, nullptr);
+}
+
+LLVM_DUMP_METHOD void Comment::dump(const ASTContext &Context) const {
+ dump(llvm::errs(), &Context.getCommentCommandTraits(),
+ &Context.getSourceManager());
+}
+
+void Comment::dump(raw_ostream &OS, const CommandTraits *Traits,
+ const SourceManager *SM) const {
+ const FullComment *FC = dyn_cast<FullComment>(this);
+ ASTDumper D(OS, Traits, SM);
+ D.dumpFullComment(FC);
+}
+
+LLVM_DUMP_METHOD void Comment::dumpColor() const {
+ const FullComment *FC = dyn_cast<FullComment>(this);
+ ASTDumper D(llvm::errs(), nullptr, nullptr, /*ShowColors*/true);
+ D.dumpFullComment(FC);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp b/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp
new file mode 100644
index 0000000..359db1b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp
@@ -0,0 +1,5919 @@
+//===--- ASTImporter.cpp - Importing ASTs from other Contexts ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ASTImporter class which imports AST nodes from one
+// context into another context.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTImporter.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTDiagnostic.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/TypeVisitor.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <deque>
+
+namespace clang {
+ class ASTNodeImporter : public TypeVisitor<ASTNodeImporter, QualType>,
+ public DeclVisitor<ASTNodeImporter, Decl *>,
+ public StmtVisitor<ASTNodeImporter, Stmt *> {
+ ASTImporter &Importer;
+
+ public:
+ explicit ASTNodeImporter(ASTImporter &Importer) : Importer(Importer) { }
+
+ using TypeVisitor<ASTNodeImporter, QualType>::Visit;
+ using DeclVisitor<ASTNodeImporter, Decl *>::Visit;
+ using StmtVisitor<ASTNodeImporter, Stmt *>::Visit;
+
+ // Importing types
+ QualType VisitType(const Type *T);
+ QualType VisitBuiltinType(const BuiltinType *T);
+ QualType VisitComplexType(const ComplexType *T);
+ QualType VisitPointerType(const PointerType *T);
+ QualType VisitBlockPointerType(const BlockPointerType *T);
+ QualType VisitLValueReferenceType(const LValueReferenceType *T);
+ QualType VisitRValueReferenceType(const RValueReferenceType *T);
+ QualType VisitMemberPointerType(const MemberPointerType *T);
+ QualType VisitConstantArrayType(const ConstantArrayType *T);
+ QualType VisitIncompleteArrayType(const IncompleteArrayType *T);
+ QualType VisitVariableArrayType(const VariableArrayType *T);
+ // FIXME: DependentSizedArrayType
+ // FIXME: DependentSizedExtVectorType
+ QualType VisitVectorType(const VectorType *T);
+ QualType VisitExtVectorType(const ExtVectorType *T);
+ QualType VisitFunctionNoProtoType(const FunctionNoProtoType *T);
+ QualType VisitFunctionProtoType(const FunctionProtoType *T);
+ // FIXME: UnresolvedUsingType
+ QualType VisitParenType(const ParenType *T);
+ QualType VisitTypedefType(const TypedefType *T);
+ QualType VisitTypeOfExprType(const TypeOfExprType *T);
+ // FIXME: DependentTypeOfExprType
+ QualType VisitTypeOfType(const TypeOfType *T);
+ QualType VisitDecltypeType(const DecltypeType *T);
+ QualType VisitUnaryTransformType(const UnaryTransformType *T);
+ QualType VisitAutoType(const AutoType *T);
+ // FIXME: DependentDecltypeType
+ QualType VisitRecordType(const RecordType *T);
+ QualType VisitEnumType(const EnumType *T);
+ QualType VisitAttributedType(const AttributedType *T);
+ // FIXME: TemplateTypeParmType
+ // FIXME: SubstTemplateTypeParmType
+ QualType VisitTemplateSpecializationType(const TemplateSpecializationType *T);
+ QualType VisitElaboratedType(const ElaboratedType *T);
+ // FIXME: DependentNameType
+ // FIXME: DependentTemplateSpecializationType
+ QualType VisitObjCInterfaceType(const ObjCInterfaceType *T);
+ QualType VisitObjCObjectType(const ObjCObjectType *T);
+ QualType VisitObjCObjectPointerType(const ObjCObjectPointerType *T);
+
+ // Importing declarations
+ bool ImportDeclParts(NamedDecl *D, DeclContext *&DC,
+ DeclContext *&LexicalDC, DeclarationName &Name,
+ NamedDecl *&ToD, SourceLocation &Loc);
+ void ImportDefinitionIfNeeded(Decl *FromD, Decl *ToD = nullptr);
+ void ImportDeclarationNameLoc(const DeclarationNameInfo &From,
+ DeclarationNameInfo& To);
+ void ImportDeclContext(DeclContext *FromDC, bool ForceImport = false);
+
+ /// \brief What we should import from the definition.
+ enum ImportDefinitionKind {
+ /// \brief Import the default subset of the definition, which might be
+ /// nothing (if minimal import is set) or might be everything (if minimal
+ /// import is not set).
+ IDK_Default,
+ /// \brief Import everything.
+ IDK_Everything,
+ /// \brief Import only the bare bones needed to establish a valid
+ /// DeclContext.
+ IDK_Basic
+ };
+
+ bool shouldForceImportDeclContext(ImportDefinitionKind IDK) {
+ return IDK == IDK_Everything ||
+ (IDK == IDK_Default && !Importer.isMinimalImport());
+ }
+
+ bool ImportDefinition(RecordDecl *From, RecordDecl *To,
+ ImportDefinitionKind Kind = IDK_Default);
+ bool ImportDefinition(VarDecl *From, VarDecl *To,
+ ImportDefinitionKind Kind = IDK_Default);
+ bool ImportDefinition(EnumDecl *From, EnumDecl *To,
+ ImportDefinitionKind Kind = IDK_Default);
+ bool ImportDefinition(ObjCInterfaceDecl *From, ObjCInterfaceDecl *To,
+ ImportDefinitionKind Kind = IDK_Default);
+ bool ImportDefinition(ObjCProtocolDecl *From, ObjCProtocolDecl *To,
+ ImportDefinitionKind Kind = IDK_Default);
+ TemplateParameterList *ImportTemplateParameterList(
+ TemplateParameterList *Params);
+ TemplateArgument ImportTemplateArgument(const TemplateArgument &From);
+ bool ImportTemplateArguments(const TemplateArgument *FromArgs,
+ unsigned NumFromArgs,
+ SmallVectorImpl<TemplateArgument> &ToArgs);
+ bool IsStructuralMatch(RecordDecl *FromRecord, RecordDecl *ToRecord,
+ bool Complain = true);
+ bool IsStructuralMatch(VarDecl *FromVar, VarDecl *ToVar,
+ bool Complain = true);
+ bool IsStructuralMatch(EnumDecl *FromEnum, EnumDecl *ToRecord);
+ bool IsStructuralMatch(EnumConstantDecl *FromEC, EnumConstantDecl *ToEC);
+ bool IsStructuralMatch(ClassTemplateDecl *From, ClassTemplateDecl *To);
+ bool IsStructuralMatch(VarTemplateDecl *From, VarTemplateDecl *To);
+ Decl *VisitDecl(Decl *D);
+ Decl *VisitTranslationUnitDecl(TranslationUnitDecl *D);
+ Decl *VisitNamespaceDecl(NamespaceDecl *D);
+ Decl *VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias);
+ Decl *VisitTypedefDecl(TypedefDecl *D);
+ Decl *VisitTypeAliasDecl(TypeAliasDecl *D);
+ Decl *VisitEnumDecl(EnumDecl *D);
+ Decl *VisitRecordDecl(RecordDecl *D);
+ Decl *VisitEnumConstantDecl(EnumConstantDecl *D);
+ Decl *VisitFunctionDecl(FunctionDecl *D);
+ Decl *VisitCXXMethodDecl(CXXMethodDecl *D);
+ Decl *VisitCXXConstructorDecl(CXXConstructorDecl *D);
+ Decl *VisitCXXDestructorDecl(CXXDestructorDecl *D);
+ Decl *VisitCXXConversionDecl(CXXConversionDecl *D);
+ Decl *VisitFieldDecl(FieldDecl *D);
+ Decl *VisitIndirectFieldDecl(IndirectFieldDecl *D);
+ Decl *VisitObjCIvarDecl(ObjCIvarDecl *D);
+ Decl *VisitVarDecl(VarDecl *D);
+ Decl *VisitImplicitParamDecl(ImplicitParamDecl *D);
+ Decl *VisitParmVarDecl(ParmVarDecl *D);
+ Decl *VisitObjCMethodDecl(ObjCMethodDecl *D);
+ Decl *VisitObjCTypeParamDecl(ObjCTypeParamDecl *D);
+ Decl *VisitObjCCategoryDecl(ObjCCategoryDecl *D);
+ Decl *VisitObjCProtocolDecl(ObjCProtocolDecl *D);
+ Decl *VisitLinkageSpecDecl(LinkageSpecDecl *D);
+
+ ObjCTypeParamList *ImportObjCTypeParamList(ObjCTypeParamList *list);
+ Decl *VisitObjCInterfaceDecl(ObjCInterfaceDecl *D);
+ Decl *VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D);
+ Decl *VisitObjCImplementationDecl(ObjCImplementationDecl *D);
+ Decl *VisitObjCPropertyDecl(ObjCPropertyDecl *D);
+ Decl *VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D);
+ Decl *VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D);
+ Decl *VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D);
+ Decl *VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D);
+ Decl *VisitClassTemplateDecl(ClassTemplateDecl *D);
+ Decl *VisitClassTemplateSpecializationDecl(
+ ClassTemplateSpecializationDecl *D);
+ Decl *VisitVarTemplateDecl(VarTemplateDecl *D);
+ Decl *VisitVarTemplateSpecializationDecl(VarTemplateSpecializationDecl *D);
+
+ // Importing statements
+ DeclGroupRef ImportDeclGroup(DeclGroupRef DG);
+
+ Stmt *VisitStmt(Stmt *S);
+ Stmt *VisitDeclStmt(DeclStmt *S);
+ Stmt *VisitNullStmt(NullStmt *S);
+ Stmt *VisitCompoundStmt(CompoundStmt *S);
+ Stmt *VisitCaseStmt(CaseStmt *S);
+ Stmt *VisitDefaultStmt(DefaultStmt *S);
+ Stmt *VisitLabelStmt(LabelStmt *S);
+ Stmt *VisitAttributedStmt(AttributedStmt *S);
+ Stmt *VisitIfStmt(IfStmt *S);
+ Stmt *VisitSwitchStmt(SwitchStmt *S);
+ Stmt *VisitWhileStmt(WhileStmt *S);
+ Stmt *VisitDoStmt(DoStmt *S);
+ Stmt *VisitForStmt(ForStmt *S);
+ Stmt *VisitGotoStmt(GotoStmt *S);
+ Stmt *VisitIndirectGotoStmt(IndirectGotoStmt *S);
+ Stmt *VisitContinueStmt(ContinueStmt *S);
+ Stmt *VisitBreakStmt(BreakStmt *S);
+ Stmt *VisitReturnStmt(ReturnStmt *S);
+ // FIXME: GCCAsmStmt
+ // FIXME: MSAsmStmt
+ // FIXME: SEHExceptStmt
+ // FIXME: SEHFinallyStmt
+ // FIXME: SEHTryStmt
+ // FIXME: SEHLeaveStmt
+ // FIXME: CapturedStmt
+ Stmt *VisitCXXCatchStmt(CXXCatchStmt *S);
+ Stmt *VisitCXXTryStmt(CXXTryStmt *S);
+ Stmt *VisitCXXForRangeStmt(CXXForRangeStmt *S);
+ // FIXME: MSDependentExistsStmt
+ Stmt *VisitObjCForCollectionStmt(ObjCForCollectionStmt *S);
+ Stmt *VisitObjCAtCatchStmt(ObjCAtCatchStmt *S);
+ Stmt *VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S);
+ Stmt *VisitObjCAtTryStmt(ObjCAtTryStmt *S);
+ Stmt *VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S);
+ Stmt *VisitObjCAtThrowStmt(ObjCAtThrowStmt *S);
+ Stmt *VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S);
+
+ // Importing expressions
+ Expr *VisitExpr(Expr *E);
+ Expr *VisitDeclRefExpr(DeclRefExpr *E);
+ Expr *VisitIntegerLiteral(IntegerLiteral *E);
+ Expr *VisitCharacterLiteral(CharacterLiteral *E);
+ Expr *VisitParenExpr(ParenExpr *E);
+ Expr *VisitUnaryOperator(UnaryOperator *E);
+ Expr *VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E);
+ Expr *VisitBinaryOperator(BinaryOperator *E);
+ Expr *VisitCompoundAssignOperator(CompoundAssignOperator *E);
+ Expr *VisitImplicitCastExpr(ImplicitCastExpr *E);
+ Expr *VisitCStyleCastExpr(CStyleCastExpr *E);
+ Expr *VisitCXXConstructExpr(CXXConstructExpr *E);
+ Expr *VisitMemberExpr(MemberExpr *E);
+ Expr *VisitCallExpr(CallExpr *E);
+ };
+}
+using namespace clang;
+
+//----------------------------------------------------------------------------
+// Structural Equivalence
+//----------------------------------------------------------------------------
+
+namespace {
+ struct StructuralEquivalenceContext {
+ /// \brief AST contexts for which we are checking structural equivalence.
+ ASTContext &C1, &C2;
+
+ /// \brief The set of "tentative" equivalences between two canonical
+ /// declarations, mapping from a declaration in the first context to the
+ /// declaration in the second context that we believe to be equivalent.
+ llvm::DenseMap<Decl *, Decl *> TentativeEquivalences;
+
+ /// \brief Queue of declarations in the first context whose equivalence
+ /// with a declaration in the second context still needs to be verified.
+ std::deque<Decl *> DeclsToCheck;
+
+ /// \brief Declaration (from, to) pairs that are known not to be equivalent
+ /// (which we have already complained about).
+ llvm::DenseSet<std::pair<Decl *, Decl *> > &NonEquivalentDecls;
+
+ /// \brief Whether we're being strict about the spelling of types when
+ /// unifying two types.
+ bool StrictTypeSpelling;
+
+ /// \brief Whether to complain about failures.
+ bool Complain;
+
+ /// \brief \c true if the last diagnostic came from C2.
+ bool LastDiagFromC2;
+
+ StructuralEquivalenceContext(ASTContext &C1, ASTContext &C2,
+ llvm::DenseSet<std::pair<Decl *, Decl *> > &NonEquivalentDecls,
+ bool StrictTypeSpelling = false,
+ bool Complain = true)
+ : C1(C1), C2(C2), NonEquivalentDecls(NonEquivalentDecls),
+ StrictTypeSpelling(StrictTypeSpelling), Complain(Complain),
+ LastDiagFromC2(false) {}
+
+ /// \brief Determine whether the two declarations are structurally
+ /// equivalent.
+ bool IsStructurallyEquivalent(Decl *D1, Decl *D2);
+
+ /// \brief Determine whether the two types are structurally equivalent.
+ bool IsStructurallyEquivalent(QualType T1, QualType T2);
+
+ private:
+ /// \brief Finish checking all of the structural equivalences.
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ bool Finish();
+
+ public:
+ DiagnosticBuilder Diag1(SourceLocation Loc, unsigned DiagID) {
+ assert(Complain && "Not allowed to complain");
+ if (LastDiagFromC2)
+ C1.getDiagnostics().notePriorDiagnosticFrom(C2.getDiagnostics());
+ LastDiagFromC2 = false;
+ return C1.getDiagnostics().Report(Loc, DiagID);
+ }
+
+ DiagnosticBuilder Diag2(SourceLocation Loc, unsigned DiagID) {
+ assert(Complain && "Not allowed to complain");
+ if (!LastDiagFromC2)
+ C2.getDiagnostics().notePriorDiagnosticFrom(C1.getDiagnostics());
+ LastDiagFromC2 = true;
+ return C2.getDiagnostics().Report(Loc, DiagID);
+ }
+ };
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ QualType T1, QualType T2);
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ Decl *D1, Decl *D2);
+
+/// \brief Determine structural equivalence of two expressions.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ Expr *E1, Expr *E2) {
+ if (!E1 || !E2)
+ return E1 == E2;
+
+ // FIXME: Actually perform a structural comparison!
+ return true;
+}
+
+/// \brief Determine whether two identifiers are equivalent.
+static bool IsStructurallyEquivalent(const IdentifierInfo *Name1,
+ const IdentifierInfo *Name2) {
+ if (!Name1 || !Name2)
+ return Name1 == Name2;
+
+ return Name1->getName() == Name2->getName();
+}
+
+/// \brief Determine whether two nested-name-specifiers are equivalent.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ NestedNameSpecifier *NNS1,
+ NestedNameSpecifier *NNS2) {
+ // FIXME: Implement!
+ return true;
+}
+
+/// \brief Determine whether two template arguments are equivalent.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ const TemplateArgument &Arg1,
+ const TemplateArgument &Arg2) {
+ if (Arg1.getKind() != Arg2.getKind())
+ return false;
+
+ switch (Arg1.getKind()) {
+ case TemplateArgument::Null:
+ return true;
+
+ case TemplateArgument::Type:
+ return Context.IsStructurallyEquivalent(Arg1.getAsType(), Arg2.getAsType());
+
+ case TemplateArgument::Integral:
+ if (!Context.IsStructurallyEquivalent(Arg1.getIntegralType(),
+ Arg2.getIntegralType()))
+ return false;
+
+ return llvm::APSInt::isSameValue(Arg1.getAsIntegral(), Arg2.getAsIntegral());
+
+ case TemplateArgument::Declaration:
+ return Context.IsStructurallyEquivalent(Arg1.getAsDecl(), Arg2.getAsDecl());
+
+ case TemplateArgument::NullPtr:
+ return true; // FIXME: Is this correct?
+
+ case TemplateArgument::Template:
+ return IsStructurallyEquivalent(Context,
+ Arg1.getAsTemplate(),
+ Arg2.getAsTemplate());
+
+ case TemplateArgument::TemplateExpansion:
+ return IsStructurallyEquivalent(Context,
+ Arg1.getAsTemplateOrTemplatePattern(),
+ Arg2.getAsTemplateOrTemplatePattern());
+
+ case TemplateArgument::Expression:
+ return IsStructurallyEquivalent(Context,
+ Arg1.getAsExpr(), Arg2.getAsExpr());
+
+ case TemplateArgument::Pack:
+ if (Arg1.pack_size() != Arg2.pack_size())
+ return false;
+
+ for (unsigned I = 0, N = Arg1.pack_size(); I != N; ++I)
+ if (!IsStructurallyEquivalent(Context,
+ Arg1.pack_begin()[I],
+ Arg2.pack_begin()[I]))
+ return false;
+
+ return true;
+ }
+
+ llvm_unreachable("Invalid template argument kind");
+}
+
+/// \brief Determine structural equivalence for the common part of array
+/// types.
+static bool IsArrayStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ const ArrayType *Array1,
+ const ArrayType *Array2) {
+ if (!IsStructurallyEquivalent(Context,
+ Array1->getElementType(),
+ Array2->getElementType()))
+ return false;
+ if (Array1->getSizeModifier() != Array2->getSizeModifier())
+ return false;
+ if (Array1->getIndexTypeQualifiers() != Array2->getIndexTypeQualifiers())
+ return false;
+
+ return true;
+}
+
+/// \brief Determine structural equivalence of two types.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ QualType T1, QualType T2) {
+ if (T1.isNull() || T2.isNull())
+ return T1.isNull() && T2.isNull();
+
+ if (!Context.StrictTypeSpelling) {
+ // We aren't being strict about token-to-token equivalence of types,
+ // so map down to the canonical type.
+ T1 = Context.C1.getCanonicalType(T1);
+ T2 = Context.C2.getCanonicalType(T2);
+ }
+
+ if (T1.getQualifiers() != T2.getQualifiers())
+ return false;
+
+ Type::TypeClass TC = T1->getTypeClass();
+
+ if (T1->getTypeClass() != T2->getTypeClass()) {
+ // Compare function types with prototypes vs. without prototypes as if
+ // both did not have prototypes.
+ if (T1->getTypeClass() == Type::FunctionProto &&
+ T2->getTypeClass() == Type::FunctionNoProto)
+ TC = Type::FunctionNoProto;
+ else if (T1->getTypeClass() == Type::FunctionNoProto &&
+ T2->getTypeClass() == Type::FunctionProto)
+ TC = Type::FunctionNoProto;
+ else
+ return false;
+ }
+
+ switch (TC) {
+ case Type::Builtin:
+ // FIXME: Deal with Char_S/Char_U.
+ if (cast<BuiltinType>(T1)->getKind() != cast<BuiltinType>(T2)->getKind())
+ return false;
+ break;
+
+ case Type::Complex:
+ if (!IsStructurallyEquivalent(Context,
+ cast<ComplexType>(T1)->getElementType(),
+ cast<ComplexType>(T2)->getElementType()))
+ return false;
+ break;
+
+ case Type::Adjusted:
+ case Type::Decayed:
+ if (!IsStructurallyEquivalent(Context,
+ cast<AdjustedType>(T1)->getOriginalType(),
+ cast<AdjustedType>(T2)->getOriginalType()))
+ return false;
+ break;
+
+ case Type::Pointer:
+ if (!IsStructurallyEquivalent(Context,
+ cast<PointerType>(T1)->getPointeeType(),
+ cast<PointerType>(T2)->getPointeeType()))
+ return false;
+ break;
+
+ case Type::BlockPointer:
+ if (!IsStructurallyEquivalent(Context,
+ cast<BlockPointerType>(T1)->getPointeeType(),
+ cast<BlockPointerType>(T2)->getPointeeType()))
+ return false;
+ break;
+
+ case Type::LValueReference:
+ case Type::RValueReference: {
+ const ReferenceType *Ref1 = cast<ReferenceType>(T1);
+ const ReferenceType *Ref2 = cast<ReferenceType>(T2);
+ if (Ref1->isSpelledAsLValue() != Ref2->isSpelledAsLValue())
+ return false;
+ if (Ref1->isInnerRef() != Ref2->isInnerRef())
+ return false;
+ if (!IsStructurallyEquivalent(Context,
+ Ref1->getPointeeTypeAsWritten(),
+ Ref2->getPointeeTypeAsWritten()))
+ return false;
+ break;
+ }
+
+ case Type::MemberPointer: {
+ const MemberPointerType *MemPtr1 = cast<MemberPointerType>(T1);
+ const MemberPointerType *MemPtr2 = cast<MemberPointerType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ MemPtr1->getPointeeType(),
+ MemPtr2->getPointeeType()))
+ return false;
+ if (!IsStructurallyEquivalent(Context,
+ QualType(MemPtr1->getClass(), 0),
+ QualType(MemPtr2->getClass(), 0)))
+ return false;
+ break;
+ }
+
+ case Type::ConstantArray: {
+ const ConstantArrayType *Array1 = cast<ConstantArrayType>(T1);
+ const ConstantArrayType *Array2 = cast<ConstantArrayType>(T2);
+ if (!llvm::APInt::isSameValue(Array1->getSize(), Array2->getSize()))
+ return false;
+
+ if (!IsArrayStructurallyEquivalent(Context, Array1, Array2))
+ return false;
+ break;
+ }
+
+ case Type::IncompleteArray:
+ if (!IsArrayStructurallyEquivalent(Context,
+ cast<ArrayType>(T1),
+ cast<ArrayType>(T2)))
+ return false;
+ break;
+
+ case Type::VariableArray: {
+ const VariableArrayType *Array1 = cast<VariableArrayType>(T1);
+ const VariableArrayType *Array2 = cast<VariableArrayType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Array1->getSizeExpr(), Array2->getSizeExpr()))
+ return false;
+
+ if (!IsArrayStructurallyEquivalent(Context, Array1, Array2))
+ return false;
+
+ break;
+ }
+
+ case Type::DependentSizedArray: {
+ const DependentSizedArrayType *Array1 = cast<DependentSizedArrayType>(T1);
+ const DependentSizedArrayType *Array2 = cast<DependentSizedArrayType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Array1->getSizeExpr(), Array2->getSizeExpr()))
+ return false;
+
+ if (!IsArrayStructurallyEquivalent(Context, Array1, Array2))
+ return false;
+
+ break;
+ }
+
+ case Type::DependentSizedExtVector: {
+ const DependentSizedExtVectorType *Vec1
+ = cast<DependentSizedExtVectorType>(T1);
+ const DependentSizedExtVectorType *Vec2
+ = cast<DependentSizedExtVectorType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Vec1->getSizeExpr(), Vec2->getSizeExpr()))
+ return false;
+ if (!IsStructurallyEquivalent(Context,
+ Vec1->getElementType(),
+ Vec2->getElementType()))
+ return false;
+ break;
+ }
+
+ case Type::Vector:
+ case Type::ExtVector: {
+ const VectorType *Vec1 = cast<VectorType>(T1);
+ const VectorType *Vec2 = cast<VectorType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Vec1->getElementType(),
+ Vec2->getElementType()))
+ return false;
+ if (Vec1->getNumElements() != Vec2->getNumElements())
+ return false;
+ if (Vec1->getVectorKind() != Vec2->getVectorKind())
+ return false;
+ break;
+ }
+
+ case Type::FunctionProto: {
+ const FunctionProtoType *Proto1 = cast<FunctionProtoType>(T1);
+ const FunctionProtoType *Proto2 = cast<FunctionProtoType>(T2);
+ if (Proto1->getNumParams() != Proto2->getNumParams())
+ return false;
+ for (unsigned I = 0, N = Proto1->getNumParams(); I != N; ++I) {
+ if (!IsStructurallyEquivalent(Context, Proto1->getParamType(I),
+ Proto2->getParamType(I)))
+ return false;
+ }
+ if (Proto1->isVariadic() != Proto2->isVariadic())
+ return false;
+ if (Proto1->getExceptionSpecType() != Proto2->getExceptionSpecType())
+ return false;
+ if (Proto1->getExceptionSpecType() == EST_Dynamic) {
+ if (Proto1->getNumExceptions() != Proto2->getNumExceptions())
+ return false;
+ for (unsigned I = 0, N = Proto1->getNumExceptions(); I != N; ++I) {
+ if (!IsStructurallyEquivalent(Context,
+ Proto1->getExceptionType(I),
+ Proto2->getExceptionType(I)))
+ return false;
+ }
+ } else if (Proto1->getExceptionSpecType() == EST_ComputedNoexcept) {
+ if (!IsStructurallyEquivalent(Context,
+ Proto1->getNoexceptExpr(),
+ Proto2->getNoexceptExpr()))
+ return false;
+ }
+ if (Proto1->getTypeQuals() != Proto2->getTypeQuals())
+ return false;
+
+ // Fall through to check the bits common with FunctionNoProtoType.
+ }
+
+ case Type::FunctionNoProto: {
+ const FunctionType *Function1 = cast<FunctionType>(T1);
+ const FunctionType *Function2 = cast<FunctionType>(T2);
+ if (!IsStructurallyEquivalent(Context, Function1->getReturnType(),
+ Function2->getReturnType()))
+ return false;
+ if (Function1->getExtInfo() != Function2->getExtInfo())
+ return false;
+ break;
+ }
+
+ case Type::UnresolvedUsing:
+ if (!IsStructurallyEquivalent(Context,
+ cast<UnresolvedUsingType>(T1)->getDecl(),
+ cast<UnresolvedUsingType>(T2)->getDecl()))
+ return false;
+
+ break;
+
+ case Type::Attributed:
+ if (!IsStructurallyEquivalent(Context,
+ cast<AttributedType>(T1)->getModifiedType(),
+ cast<AttributedType>(T2)->getModifiedType()))
+ return false;
+ if (!IsStructurallyEquivalent(Context,
+ cast<AttributedType>(T1)->getEquivalentType(),
+ cast<AttributedType>(T2)->getEquivalentType()))
+ return false;
+ break;
+
+ case Type::Paren:
+ if (!IsStructurallyEquivalent(Context,
+ cast<ParenType>(T1)->getInnerType(),
+ cast<ParenType>(T2)->getInnerType()))
+ return false;
+ break;
+
+ case Type::Typedef:
+ if (!IsStructurallyEquivalent(Context,
+ cast<TypedefType>(T1)->getDecl(),
+ cast<TypedefType>(T2)->getDecl()))
+ return false;
+ break;
+
+ case Type::TypeOfExpr:
+ if (!IsStructurallyEquivalent(Context,
+ cast<TypeOfExprType>(T1)->getUnderlyingExpr(),
+ cast<TypeOfExprType>(T2)->getUnderlyingExpr()))
+ return false;
+ break;
+
+ case Type::TypeOf:
+ if (!IsStructurallyEquivalent(Context,
+ cast<TypeOfType>(T1)->getUnderlyingType(),
+ cast<TypeOfType>(T2)->getUnderlyingType()))
+ return false;
+ break;
+
+ case Type::UnaryTransform:
+ if (!IsStructurallyEquivalent(Context,
+ cast<UnaryTransformType>(T1)->getUnderlyingType(),
+ cast<UnaryTransformType>(T1)->getUnderlyingType()))
+ return false;
+ break;
+
+ case Type::Decltype:
+ if (!IsStructurallyEquivalent(Context,
+ cast<DecltypeType>(T1)->getUnderlyingExpr(),
+ cast<DecltypeType>(T2)->getUnderlyingExpr()))
+ return false;
+ break;
+
+ case Type::Auto:
+ if (!IsStructurallyEquivalent(Context,
+ cast<AutoType>(T1)->getDeducedType(),
+ cast<AutoType>(T2)->getDeducedType()))
+ return false;
+ break;
+
+ case Type::Record:
+ case Type::Enum:
+ if (!IsStructurallyEquivalent(Context,
+ cast<TagType>(T1)->getDecl(),
+ cast<TagType>(T2)->getDecl()))
+ return false;
+ break;
+
+ case Type::TemplateTypeParm: {
+ const TemplateTypeParmType *Parm1 = cast<TemplateTypeParmType>(T1);
+ const TemplateTypeParmType *Parm2 = cast<TemplateTypeParmType>(T2);
+ if (Parm1->getDepth() != Parm2->getDepth())
+ return false;
+ if (Parm1->getIndex() != Parm2->getIndex())
+ return false;
+ if (Parm1->isParameterPack() != Parm2->isParameterPack())
+ return false;
+
+ // Names of template type parameters are never significant.
+ break;
+ }
+
+ case Type::SubstTemplateTypeParm: {
+ const SubstTemplateTypeParmType *Subst1
+ = cast<SubstTemplateTypeParmType>(T1);
+ const SubstTemplateTypeParmType *Subst2
+ = cast<SubstTemplateTypeParmType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ QualType(Subst1->getReplacedParameter(), 0),
+ QualType(Subst2->getReplacedParameter(), 0)))
+ return false;
+ if (!IsStructurallyEquivalent(Context,
+ Subst1->getReplacementType(),
+ Subst2->getReplacementType()))
+ return false;
+ break;
+ }
+
+ case Type::SubstTemplateTypeParmPack: {
+ const SubstTemplateTypeParmPackType *Subst1
+ = cast<SubstTemplateTypeParmPackType>(T1);
+ const SubstTemplateTypeParmPackType *Subst2
+ = cast<SubstTemplateTypeParmPackType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ QualType(Subst1->getReplacedParameter(), 0),
+ QualType(Subst2->getReplacedParameter(), 0)))
+ return false;
+ if (!IsStructurallyEquivalent(Context,
+ Subst1->getArgumentPack(),
+ Subst2->getArgumentPack()))
+ return false;
+ break;
+ }
+ case Type::TemplateSpecialization: {
+ const TemplateSpecializationType *Spec1
+ = cast<TemplateSpecializationType>(T1);
+ const TemplateSpecializationType *Spec2
+ = cast<TemplateSpecializationType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Spec1->getTemplateName(),
+ Spec2->getTemplateName()))
+ return false;
+ if (Spec1->getNumArgs() != Spec2->getNumArgs())
+ return false;
+ for (unsigned I = 0, N = Spec1->getNumArgs(); I != N; ++I) {
+ if (!IsStructurallyEquivalent(Context,
+ Spec1->getArg(I), Spec2->getArg(I)))
+ return false;
+ }
+ break;
+ }
+
+ case Type::Elaborated: {
+ const ElaboratedType *Elab1 = cast<ElaboratedType>(T1);
+ const ElaboratedType *Elab2 = cast<ElaboratedType>(T2);
+ // CHECKME: what if a keyword is ETK_None or ETK_typename ?
+ if (Elab1->getKeyword() != Elab2->getKeyword())
+ return false;
+ if (!IsStructurallyEquivalent(Context,
+ Elab1->getQualifier(),
+ Elab2->getQualifier()))
+ return false;
+ if (!IsStructurallyEquivalent(Context,
+ Elab1->getNamedType(),
+ Elab2->getNamedType()))
+ return false;
+ break;
+ }
+
+ case Type::InjectedClassName: {
+ const InjectedClassNameType *Inj1 = cast<InjectedClassNameType>(T1);
+ const InjectedClassNameType *Inj2 = cast<InjectedClassNameType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Inj1->getInjectedSpecializationType(),
+ Inj2->getInjectedSpecializationType()))
+ return false;
+ break;
+ }
+
+ case Type::DependentName: {
+ const DependentNameType *Typename1 = cast<DependentNameType>(T1);
+ const DependentNameType *Typename2 = cast<DependentNameType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Typename1->getQualifier(),
+ Typename2->getQualifier()))
+ return false;
+ if (!IsStructurallyEquivalent(Typename1->getIdentifier(),
+ Typename2->getIdentifier()))
+ return false;
+
+ break;
+ }
+
+ case Type::DependentTemplateSpecialization: {
+ const DependentTemplateSpecializationType *Spec1 =
+ cast<DependentTemplateSpecializationType>(T1);
+ const DependentTemplateSpecializationType *Spec2 =
+ cast<DependentTemplateSpecializationType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Spec1->getQualifier(),
+ Spec2->getQualifier()))
+ return false;
+ if (!IsStructurallyEquivalent(Spec1->getIdentifier(),
+ Spec2->getIdentifier()))
+ return false;
+ if (Spec1->getNumArgs() != Spec2->getNumArgs())
+ return false;
+ for (unsigned I = 0, N = Spec1->getNumArgs(); I != N; ++I) {
+ if (!IsStructurallyEquivalent(Context,
+ Spec1->getArg(I), Spec2->getArg(I)))
+ return false;
+ }
+ break;
+ }
+
+ case Type::PackExpansion:
+ if (!IsStructurallyEquivalent(Context,
+ cast<PackExpansionType>(T1)->getPattern(),
+ cast<PackExpansionType>(T2)->getPattern()))
+ return false;
+ break;
+
+ case Type::ObjCInterface: {
+ const ObjCInterfaceType *Iface1 = cast<ObjCInterfaceType>(T1);
+ const ObjCInterfaceType *Iface2 = cast<ObjCInterfaceType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Iface1->getDecl(), Iface2->getDecl()))
+ return false;
+ break;
+ }
+
+ case Type::ObjCObject: {
+ const ObjCObjectType *Obj1 = cast<ObjCObjectType>(T1);
+ const ObjCObjectType *Obj2 = cast<ObjCObjectType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Obj1->getBaseType(),
+ Obj2->getBaseType()))
+ return false;
+ if (Obj1->getNumProtocols() != Obj2->getNumProtocols())
+ return false;
+ for (unsigned I = 0, N = Obj1->getNumProtocols(); I != N; ++I) {
+ if (!IsStructurallyEquivalent(Context,
+ Obj1->getProtocol(I),
+ Obj2->getProtocol(I)))
+ return false;
+ }
+ break;
+ }
+
+ case Type::ObjCObjectPointer: {
+ const ObjCObjectPointerType *Ptr1 = cast<ObjCObjectPointerType>(T1);
+ const ObjCObjectPointerType *Ptr2 = cast<ObjCObjectPointerType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Ptr1->getPointeeType(),
+ Ptr2->getPointeeType()))
+ return false;
+ break;
+ }
+
+ case Type::Atomic: {
+ if (!IsStructurallyEquivalent(Context,
+ cast<AtomicType>(T1)->getValueType(),
+ cast<AtomicType>(T2)->getValueType()))
+ return false;
+ break;
+ }
+
+ } // end switch
+
+ return true;
+}
+
+/// \brief Determine structural equivalence of two fields.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ FieldDecl *Field1, FieldDecl *Field2) {
+ RecordDecl *Owner2 = cast<RecordDecl>(Field2->getDeclContext());
+
+ // For anonymous structs/unions, match up the anonymous struct/union type
+ // declarations directly, so that we don't go off searching for anonymous
+ // types
+ if (Field1->isAnonymousStructOrUnion() &&
+ Field2->isAnonymousStructOrUnion()) {
+ RecordDecl *D1 = Field1->getType()->castAs<RecordType>()->getDecl();
+ RecordDecl *D2 = Field2->getType()->castAs<RecordType>()->getDecl();
+ return IsStructurallyEquivalent(Context, D1, D2);
+ }
+
+ // Check for equivalent field names.
+ IdentifierInfo *Name1 = Field1->getIdentifier();
+ IdentifierInfo *Name2 = Field2->getIdentifier();
+ if (!::IsStructurallyEquivalent(Name1, Name2))
+ return false;
+
+ if (!IsStructurallyEquivalent(Context,
+ Field1->getType(), Field2->getType())) {
+ if (Context.Complain) {
+ Context.Diag2(Owner2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(Owner2);
+ Context.Diag2(Field2->getLocation(), diag::note_odr_field)
+ << Field2->getDeclName() << Field2->getType();
+ Context.Diag1(Field1->getLocation(), diag::note_odr_field)
+ << Field1->getDeclName() << Field1->getType();
+ }
+ return false;
+ }
+
+ if (Field1->isBitField() != Field2->isBitField()) {
+ if (Context.Complain) {
+ Context.Diag2(Owner2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(Owner2);
+ if (Field1->isBitField()) {
+ Context.Diag1(Field1->getLocation(), diag::note_odr_bit_field)
+ << Field1->getDeclName() << Field1->getType()
+ << Field1->getBitWidthValue(Context.C1);
+ Context.Diag2(Field2->getLocation(), diag::note_odr_not_bit_field)
+ << Field2->getDeclName();
+ } else {
+ Context.Diag2(Field2->getLocation(), diag::note_odr_bit_field)
+ << Field2->getDeclName() << Field2->getType()
+ << Field2->getBitWidthValue(Context.C2);
+ Context.Diag1(Field1->getLocation(), diag::note_odr_not_bit_field)
+ << Field1->getDeclName();
+ }
+ }
+ return false;
+ }
+
+ if (Field1->isBitField()) {
+ // Make sure that the bit-fields are the same length.
+ unsigned Bits1 = Field1->getBitWidthValue(Context.C1);
+ unsigned Bits2 = Field2->getBitWidthValue(Context.C2);
+
+ if (Bits1 != Bits2) {
+ if (Context.Complain) {
+ Context.Diag2(Owner2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(Owner2);
+ Context.Diag2(Field2->getLocation(), diag::note_odr_bit_field)
+ << Field2->getDeclName() << Field2->getType() << Bits2;
+ Context.Diag1(Field1->getLocation(), diag::note_odr_bit_field)
+ << Field1->getDeclName() << Field1->getType() << Bits1;
+ }
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/// \brief Find the index of the given anonymous struct/union within its
+/// context.
+///
+/// \returns Returns the index of this anonymous struct/union in its context,
+/// including the next assigned index (if none of them match). Returns an
+/// empty option if the context is not a record, i.e.. if the anonymous
+/// struct/union is at namespace or block scope.
+static Optional<unsigned> findAnonymousStructOrUnionIndex(RecordDecl *Anon) {
+ ASTContext &Context = Anon->getASTContext();
+ QualType AnonTy = Context.getRecordType(Anon);
+
+ RecordDecl *Owner = dyn_cast<RecordDecl>(Anon->getDeclContext());
+ if (!Owner)
+ return None;
+
+ unsigned Index = 0;
+ for (const auto *D : Owner->noload_decls()) {
+ const auto *F = dyn_cast<FieldDecl>(D);
+ if (!F || !F->isAnonymousStructOrUnion())
+ continue;
+
+ if (Context.hasSameType(F->getType(), AnonTy))
+ break;
+
+ ++Index;
+ }
+
+ return Index;
+}
+
+/// \brief Determine structural equivalence of two records.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ RecordDecl *D1, RecordDecl *D2) {
+ if (D1->isUnion() != D2->isUnion()) {
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag1(D1->getLocation(), diag::note_odr_tag_kind_here)
+ << D1->getDeclName() << (unsigned)D1->getTagKind();
+ }
+ return false;
+ }
+
+ if (D1->isAnonymousStructOrUnion() && D2->isAnonymousStructOrUnion()) {
+ // If both anonymous structs/unions are in a record context, make sure
+ // they occur in the same location in the context records.
+ if (Optional<unsigned> Index1 = findAnonymousStructOrUnionIndex(D1)) {
+ if (Optional<unsigned> Index2 = findAnonymousStructOrUnionIndex(D2)) {
+ if (*Index1 != *Index2)
+ return false;
+ }
+ }
+ }
+
+ // If both declarations are class template specializations, we know
+ // the ODR applies, so check the template and template arguments.
+ ClassTemplateSpecializationDecl *Spec1
+ = dyn_cast<ClassTemplateSpecializationDecl>(D1);
+ ClassTemplateSpecializationDecl *Spec2
+ = dyn_cast<ClassTemplateSpecializationDecl>(D2);
+ if (Spec1 && Spec2) {
+ // Check that the specialized templates are the same.
+ if (!IsStructurallyEquivalent(Context, Spec1->getSpecializedTemplate(),
+ Spec2->getSpecializedTemplate()))
+ return false;
+
+ // Check that the template arguments are the same.
+ if (Spec1->getTemplateArgs().size() != Spec2->getTemplateArgs().size())
+ return false;
+
+ for (unsigned I = 0, N = Spec1->getTemplateArgs().size(); I != N; ++I)
+ if (!IsStructurallyEquivalent(Context,
+ Spec1->getTemplateArgs().get(I),
+ Spec2->getTemplateArgs().get(I)))
+ return false;
+ }
+ // If one is a class template specialization and the other is not, these
+ // structures are different.
+ else if (Spec1 || Spec2)
+ return false;
+
+ // Compare the definitions of these two records. If either or both are
+ // incomplete, we assume that they are equivalent.
+ D1 = D1->getDefinition();
+ D2 = D2->getDefinition();
+ if (!D1 || !D2)
+ return true;
+
+ if (CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(D1)) {
+ if (CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(D2)) {
+ if (D1CXX->getNumBases() != D2CXX->getNumBases()) {
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag2(D2->getLocation(), diag::note_odr_number_of_bases)
+ << D2CXX->getNumBases();
+ Context.Diag1(D1->getLocation(), diag::note_odr_number_of_bases)
+ << D1CXX->getNumBases();
+ }
+ return false;
+ }
+
+ // Check the base classes.
+ for (CXXRecordDecl::base_class_iterator Base1 = D1CXX->bases_begin(),
+ BaseEnd1 = D1CXX->bases_end(),
+ Base2 = D2CXX->bases_begin();
+ Base1 != BaseEnd1;
+ ++Base1, ++Base2) {
+ if (!IsStructurallyEquivalent(Context,
+ Base1->getType(), Base2->getType())) {
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag2(Base2->getLocStart(), diag::note_odr_base)
+ << Base2->getType()
+ << Base2->getSourceRange();
+ Context.Diag1(Base1->getLocStart(), diag::note_odr_base)
+ << Base1->getType()
+ << Base1->getSourceRange();
+ }
+ return false;
+ }
+
+ // Check virtual vs. non-virtual inheritance mismatch.
+ if (Base1->isVirtual() != Base2->isVirtual()) {
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag2(Base2->getLocStart(),
+ diag::note_odr_virtual_base)
+ << Base2->isVirtual() << Base2->getSourceRange();
+ Context.Diag1(Base1->getLocStart(), diag::note_odr_base)
+ << Base1->isVirtual()
+ << Base1->getSourceRange();
+ }
+ return false;
+ }
+ }
+ } else if (D1CXX->getNumBases() > 0) {
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ const CXXBaseSpecifier *Base1 = D1CXX->bases_begin();
+ Context.Diag1(Base1->getLocStart(), diag::note_odr_base)
+ << Base1->getType()
+ << Base1->getSourceRange();
+ Context.Diag2(D2->getLocation(), diag::note_odr_missing_base);
+ }
+ return false;
+ }
+ }
+
+ // Check the fields for consistency.
+ RecordDecl::field_iterator Field2 = D2->field_begin(),
+ Field2End = D2->field_end();
+ for (RecordDecl::field_iterator Field1 = D1->field_begin(),
+ Field1End = D1->field_end();
+ Field1 != Field1End;
+ ++Field1, ++Field2) {
+ if (Field2 == Field2End) {
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag1(Field1->getLocation(), diag::note_odr_field)
+ << Field1->getDeclName() << Field1->getType();
+ Context.Diag2(D2->getLocation(), diag::note_odr_missing_field);
+ }
+ return false;
+ }
+
+ if (!IsStructurallyEquivalent(Context, *Field1, *Field2))
+ return false;
+ }
+
+ if (Field2 != Field2End) {
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag2(Field2->getLocation(), diag::note_odr_field)
+ << Field2->getDeclName() << Field2->getType();
+ Context.Diag1(D1->getLocation(), diag::note_odr_missing_field);
+ }
+ return false;
+ }
+
+ return true;
+}
+
+/// \brief Determine structural equivalence of two enums.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ EnumDecl *D1, EnumDecl *D2) {
+ EnumDecl::enumerator_iterator EC2 = D2->enumerator_begin(),
+ EC2End = D2->enumerator_end();
+ for (EnumDecl::enumerator_iterator EC1 = D1->enumerator_begin(),
+ EC1End = D1->enumerator_end();
+ EC1 != EC1End; ++EC1, ++EC2) {
+ if (EC2 == EC2End) {
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag1(EC1->getLocation(), diag::note_odr_enumerator)
+ << EC1->getDeclName()
+ << EC1->getInitVal().toString(10);
+ Context.Diag2(D2->getLocation(), diag::note_odr_missing_enumerator);
+ }
+ return false;
+ }
+
+ llvm::APSInt Val1 = EC1->getInitVal();
+ llvm::APSInt Val2 = EC2->getInitVal();
+ if (!llvm::APSInt::isSameValue(Val1, Val2) ||
+ !IsStructurallyEquivalent(EC1->getIdentifier(), EC2->getIdentifier())) {
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag2(EC2->getLocation(), diag::note_odr_enumerator)
+ << EC2->getDeclName()
+ << EC2->getInitVal().toString(10);
+ Context.Diag1(EC1->getLocation(), diag::note_odr_enumerator)
+ << EC1->getDeclName()
+ << EC1->getInitVal().toString(10);
+ }
+ return false;
+ }
+ }
+
+ if (EC2 != EC2End) {
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag2(EC2->getLocation(), diag::note_odr_enumerator)
+ << EC2->getDeclName()
+ << EC2->getInitVal().toString(10);
+ Context.Diag1(D1->getLocation(), diag::note_odr_missing_enumerator);
+ }
+ return false;
+ }
+
+ return true;
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ TemplateParameterList *Params1,
+ TemplateParameterList *Params2) {
+ if (Params1->size() != Params2->size()) {
+ if (Context.Complain) {
+ Context.Diag2(Params2->getTemplateLoc(),
+ diag::err_odr_different_num_template_parameters)
+ << Params1->size() << Params2->size();
+ Context.Diag1(Params1->getTemplateLoc(),
+ diag::note_odr_template_parameter_list);
+ }
+ return false;
+ }
+
+ for (unsigned I = 0, N = Params1->size(); I != N; ++I) {
+ if (Params1->getParam(I)->getKind() != Params2->getParam(I)->getKind()) {
+ if (Context.Complain) {
+ Context.Diag2(Params2->getParam(I)->getLocation(),
+ diag::err_odr_different_template_parameter_kind);
+ Context.Diag1(Params1->getParam(I)->getLocation(),
+ diag::note_odr_template_parameter_here);
+ }
+ return false;
+ }
+
+ if (!Context.IsStructurallyEquivalent(Params1->getParam(I),
+ Params2->getParam(I))) {
+
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ TemplateTypeParmDecl *D1,
+ TemplateTypeParmDecl *D2) {
+ if (D1->isParameterPack() != D2->isParameterPack()) {
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(), diag::err_odr_parameter_pack_non_pack)
+ << D2->isParameterPack();
+ Context.Diag1(D1->getLocation(), diag::note_odr_parameter_pack_non_pack)
+ << D1->isParameterPack();
+ }
+ return false;
+ }
+
+ return true;
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ NonTypeTemplateParmDecl *D1,
+ NonTypeTemplateParmDecl *D2) {
+ if (D1->isParameterPack() != D2->isParameterPack()) {
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(), diag::err_odr_parameter_pack_non_pack)
+ << D2->isParameterPack();
+ Context.Diag1(D1->getLocation(), diag::note_odr_parameter_pack_non_pack)
+ << D1->isParameterPack();
+ }
+ return false;
+ }
+
+ // Check types.
+ if (!Context.IsStructurallyEquivalent(D1->getType(), D2->getType())) {
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(),
+ diag::err_odr_non_type_parameter_type_inconsistent)
+ << D2->getType() << D1->getType();
+ Context.Diag1(D1->getLocation(), diag::note_odr_value_here)
+ << D1->getType();
+ }
+ return false;
+ }
+
+ return true;
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ TemplateTemplateParmDecl *D1,
+ TemplateTemplateParmDecl *D2) {
+ if (D1->isParameterPack() != D2->isParameterPack()) {
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(), diag::err_odr_parameter_pack_non_pack)
+ << D2->isParameterPack();
+ Context.Diag1(D1->getLocation(), diag::note_odr_parameter_pack_non_pack)
+ << D1->isParameterPack();
+ }
+ return false;
+ }
+
+ // Check template parameter lists.
+ return IsStructurallyEquivalent(Context, D1->getTemplateParameters(),
+ D2->getTemplateParameters());
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ ClassTemplateDecl *D1,
+ ClassTemplateDecl *D2) {
+ // Check template parameters.
+ if (!IsStructurallyEquivalent(Context,
+ D1->getTemplateParameters(),
+ D2->getTemplateParameters()))
+ return false;
+
+ // Check the templated declaration.
+ return Context.IsStructurallyEquivalent(D1->getTemplatedDecl(),
+ D2->getTemplatedDecl());
+}
+
+/// \brief Determine structural equivalence of two declarations.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ Decl *D1, Decl *D2) {
+ // FIXME: Check for known structural equivalences via a callback of some sort.
+
+ // Check whether we already know that these two declarations are not
+ // structurally equivalent.
+ if (Context.NonEquivalentDecls.count(std::make_pair(D1->getCanonicalDecl(),
+ D2->getCanonicalDecl())))
+ return false;
+
+ // Determine whether we've already produced a tentative equivalence for D1.
+ Decl *&EquivToD1 = Context.TentativeEquivalences[D1->getCanonicalDecl()];
+ if (EquivToD1)
+ return EquivToD1 == D2->getCanonicalDecl();
+
+ // Produce a tentative equivalence D1 <-> D2, which will be checked later.
+ EquivToD1 = D2->getCanonicalDecl();
+ Context.DeclsToCheck.push_back(D1->getCanonicalDecl());
+ return true;
+}
+
+bool StructuralEquivalenceContext::IsStructurallyEquivalent(Decl *D1,
+ Decl *D2) {
+ if (!::IsStructurallyEquivalent(*this, D1, D2))
+ return false;
+
+ return !Finish();
+}
+
+bool StructuralEquivalenceContext::IsStructurallyEquivalent(QualType T1,
+ QualType T2) {
+ if (!::IsStructurallyEquivalent(*this, T1, T2))
+ return false;
+
+ return !Finish();
+}
+
+bool StructuralEquivalenceContext::Finish() {
+ while (!DeclsToCheck.empty()) {
+ // Check the next declaration.
+ Decl *D1 = DeclsToCheck.front();
+ DeclsToCheck.pop_front();
+
+ Decl *D2 = TentativeEquivalences[D1];
+ assert(D2 && "Unrecorded tentative equivalence?");
+
+ bool Equivalent = true;
+
+ // FIXME: Switch on all declaration kinds. For now, we're just going to
+ // check the obvious ones.
+ if (RecordDecl *Record1 = dyn_cast<RecordDecl>(D1)) {
+ if (RecordDecl *Record2 = dyn_cast<RecordDecl>(D2)) {
+ // Check for equivalent structure names.
+ IdentifierInfo *Name1 = Record1->getIdentifier();
+ if (!Name1 && Record1->getTypedefNameForAnonDecl())
+ Name1 = Record1->getTypedefNameForAnonDecl()->getIdentifier();
+ IdentifierInfo *Name2 = Record2->getIdentifier();
+ if (!Name2 && Record2->getTypedefNameForAnonDecl())
+ Name2 = Record2->getTypedefNameForAnonDecl()->getIdentifier();
+ if (!::IsStructurallyEquivalent(Name1, Name2) ||
+ !::IsStructurallyEquivalent(*this, Record1, Record2))
+ Equivalent = false;
+ } else {
+ // Record/non-record mismatch.
+ Equivalent = false;
+ }
+ } else if (EnumDecl *Enum1 = dyn_cast<EnumDecl>(D1)) {
+ if (EnumDecl *Enum2 = dyn_cast<EnumDecl>(D2)) {
+ // Check for equivalent enum names.
+ IdentifierInfo *Name1 = Enum1->getIdentifier();
+ if (!Name1 && Enum1->getTypedefNameForAnonDecl())
+ Name1 = Enum1->getTypedefNameForAnonDecl()->getIdentifier();
+ IdentifierInfo *Name2 = Enum2->getIdentifier();
+ if (!Name2 && Enum2->getTypedefNameForAnonDecl())
+ Name2 = Enum2->getTypedefNameForAnonDecl()->getIdentifier();
+ if (!::IsStructurallyEquivalent(Name1, Name2) ||
+ !::IsStructurallyEquivalent(*this, Enum1, Enum2))
+ Equivalent = false;
+ } else {
+ // Enum/non-enum mismatch
+ Equivalent = false;
+ }
+ } else if (TypedefNameDecl *Typedef1 = dyn_cast<TypedefNameDecl>(D1)) {
+ if (TypedefNameDecl *Typedef2 = dyn_cast<TypedefNameDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(Typedef1->getIdentifier(),
+ Typedef2->getIdentifier()) ||
+ !::IsStructurallyEquivalent(*this,
+ Typedef1->getUnderlyingType(),
+ Typedef2->getUnderlyingType()))
+ Equivalent = false;
+ } else {
+ // Typedef/non-typedef mismatch.
+ Equivalent = false;
+ }
+ } else if (ClassTemplateDecl *ClassTemplate1
+ = dyn_cast<ClassTemplateDecl>(D1)) {
+ if (ClassTemplateDecl *ClassTemplate2 = dyn_cast<ClassTemplateDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(ClassTemplate1->getIdentifier(),
+ ClassTemplate2->getIdentifier()) ||
+ !::IsStructurallyEquivalent(*this, ClassTemplate1, ClassTemplate2))
+ Equivalent = false;
+ } else {
+ // Class template/non-class-template mismatch.
+ Equivalent = false;
+ }
+ } else if (TemplateTypeParmDecl *TTP1= dyn_cast<TemplateTypeParmDecl>(D1)) {
+ if (TemplateTypeParmDecl *TTP2 = dyn_cast<TemplateTypeParmDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(*this, TTP1, TTP2))
+ Equivalent = false;
+ } else {
+ // Kind mismatch.
+ Equivalent = false;
+ }
+ } else if (NonTypeTemplateParmDecl *NTTP1
+ = dyn_cast<NonTypeTemplateParmDecl>(D1)) {
+ if (NonTypeTemplateParmDecl *NTTP2
+ = dyn_cast<NonTypeTemplateParmDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(*this, NTTP1, NTTP2))
+ Equivalent = false;
+ } else {
+ // Kind mismatch.
+ Equivalent = false;
+ }
+ } else if (TemplateTemplateParmDecl *TTP1
+ = dyn_cast<TemplateTemplateParmDecl>(D1)) {
+ if (TemplateTemplateParmDecl *TTP2
+ = dyn_cast<TemplateTemplateParmDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(*this, TTP1, TTP2))
+ Equivalent = false;
+ } else {
+ // Kind mismatch.
+ Equivalent = false;
+ }
+ }
+
+ if (!Equivalent) {
+ // Note that these two declarations are not equivalent (and we already
+ // know about it).
+ NonEquivalentDecls.insert(std::make_pair(D1->getCanonicalDecl(),
+ D2->getCanonicalDecl()));
+ return true;
+ }
+ // FIXME: Check other declaration kinds!
+ }
+
+ return false;
+}
+
+//----------------------------------------------------------------------------
+// Import Types
+//----------------------------------------------------------------------------
+
+QualType ASTNodeImporter::VisitType(const Type *T) {
+ Importer.FromDiag(SourceLocation(), diag::err_unsupported_ast_node)
+ << T->getTypeClassName();
+ return QualType();
+}
+
+QualType ASTNodeImporter::VisitBuiltinType(const BuiltinType *T) {
+ switch (T->getKind()) {
+#define SHARED_SINGLETON_TYPE(Expansion)
+#define BUILTIN_TYPE(Id, SingletonId) \
+ case BuiltinType::Id: return Importer.getToContext().SingletonId;
+#include "clang/AST/BuiltinTypes.def"
+
+ // FIXME: for Char16, Char32, and NullPtr, make sure that the "to"
+ // context supports C++.
+
+ // FIXME: for ObjCId, ObjCClass, and ObjCSel, make sure that the "to"
+ // context supports ObjC.
+
+ case BuiltinType::Char_U:
+ // The context we're importing from has an unsigned 'char'. If we're
+ // importing into a context with a signed 'char', translate to
+ // 'unsigned char' instead.
+ if (Importer.getToContext().getLangOpts().CharIsSigned)
+ return Importer.getToContext().UnsignedCharTy;
+
+ return Importer.getToContext().CharTy;
+
+ case BuiltinType::Char_S:
+ // The context we're importing from has an unsigned 'char'. If we're
+ // importing into a context with a signed 'char', translate to
+ // 'unsigned char' instead.
+ if (!Importer.getToContext().getLangOpts().CharIsSigned)
+ return Importer.getToContext().SignedCharTy;
+
+ return Importer.getToContext().CharTy;
+
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ // FIXME: If not in C++, shall we translate to the C equivalent of
+ // wchar_t?
+ return Importer.getToContext().WCharTy;
+ }
+
+ llvm_unreachable("Invalid BuiltinType Kind!");
+}
+
+QualType ASTNodeImporter::VisitComplexType(const ComplexType *T) {
+ QualType ToElementType = Importer.Import(T->getElementType());
+ if (ToElementType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getComplexType(ToElementType);
+}
+
+QualType ASTNodeImporter::VisitPointerType(const PointerType *T) {
+ QualType ToPointeeType = Importer.Import(T->getPointeeType());
+ if (ToPointeeType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getPointerType(ToPointeeType);
+}
+
+QualType ASTNodeImporter::VisitBlockPointerType(const BlockPointerType *T) {
+ // FIXME: Check for blocks support in "to" context.
+ QualType ToPointeeType = Importer.Import(T->getPointeeType());
+ if (ToPointeeType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getBlockPointerType(ToPointeeType);
+}
+
+QualType
+ASTNodeImporter::VisitLValueReferenceType(const LValueReferenceType *T) {
+ // FIXME: Check for C++ support in "to" context.
+ QualType ToPointeeType = Importer.Import(T->getPointeeTypeAsWritten());
+ if (ToPointeeType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getLValueReferenceType(ToPointeeType);
+}
+
+QualType
+ASTNodeImporter::VisitRValueReferenceType(const RValueReferenceType *T) {
+ // FIXME: Check for C++0x support in "to" context.
+ QualType ToPointeeType = Importer.Import(T->getPointeeTypeAsWritten());
+ if (ToPointeeType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getRValueReferenceType(ToPointeeType);
+}
+
+QualType ASTNodeImporter::VisitMemberPointerType(const MemberPointerType *T) {
+ // FIXME: Check for C++ support in "to" context.
+ QualType ToPointeeType = Importer.Import(T->getPointeeType());
+ if (ToPointeeType.isNull())
+ return QualType();
+
+ QualType ClassType = Importer.Import(QualType(T->getClass(), 0));
+ return Importer.getToContext().getMemberPointerType(ToPointeeType,
+ ClassType.getTypePtr());
+}
+
+QualType ASTNodeImporter::VisitConstantArrayType(const ConstantArrayType *T) {
+ QualType ToElementType = Importer.Import(T->getElementType());
+ if (ToElementType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getConstantArrayType(ToElementType,
+ T->getSize(),
+ T->getSizeModifier(),
+ T->getIndexTypeCVRQualifiers());
+}
+
+QualType
+ASTNodeImporter::VisitIncompleteArrayType(const IncompleteArrayType *T) {
+ QualType ToElementType = Importer.Import(T->getElementType());
+ if (ToElementType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getIncompleteArrayType(ToElementType,
+ T->getSizeModifier(),
+ T->getIndexTypeCVRQualifiers());
+}
+
+QualType ASTNodeImporter::VisitVariableArrayType(const VariableArrayType *T) {
+ QualType ToElementType = Importer.Import(T->getElementType());
+ if (ToElementType.isNull())
+ return QualType();
+
+ Expr *Size = Importer.Import(T->getSizeExpr());
+ if (!Size)
+ return QualType();
+
+ SourceRange Brackets = Importer.Import(T->getBracketsRange());
+ return Importer.getToContext().getVariableArrayType(ToElementType, Size,
+ T->getSizeModifier(),
+ T->getIndexTypeCVRQualifiers(),
+ Brackets);
+}
+
+QualType ASTNodeImporter::VisitVectorType(const VectorType *T) {
+ QualType ToElementType = Importer.Import(T->getElementType());
+ if (ToElementType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getVectorType(ToElementType,
+ T->getNumElements(),
+ T->getVectorKind());
+}
+
+QualType ASTNodeImporter::VisitExtVectorType(const ExtVectorType *T) {
+ QualType ToElementType = Importer.Import(T->getElementType());
+ if (ToElementType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getExtVectorType(ToElementType,
+ T->getNumElements());
+}
+
+QualType
+ASTNodeImporter::VisitFunctionNoProtoType(const FunctionNoProtoType *T) {
+ // FIXME: What happens if we're importing a function without a prototype
+ // into C++? Should we make it variadic?
+ QualType ToResultType = Importer.Import(T->getReturnType());
+ if (ToResultType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getFunctionNoProtoType(ToResultType,
+ T->getExtInfo());
+}
+
+QualType ASTNodeImporter::VisitFunctionProtoType(const FunctionProtoType *T) {
+ QualType ToResultType = Importer.Import(T->getReturnType());
+ if (ToResultType.isNull())
+ return QualType();
+
+ // Import argument types
+ SmallVector<QualType, 4> ArgTypes;
+ for (const auto &A : T->param_types()) {
+ QualType ArgType = Importer.Import(A);
+ if (ArgType.isNull())
+ return QualType();
+ ArgTypes.push_back(ArgType);
+ }
+
+ // Import exception types
+ SmallVector<QualType, 4> ExceptionTypes;
+ for (const auto &E : T->exceptions()) {
+ QualType ExceptionType = Importer.Import(E);
+ if (ExceptionType.isNull())
+ return QualType();
+ ExceptionTypes.push_back(ExceptionType);
+ }
+
+ FunctionProtoType::ExtProtoInfo FromEPI = T->getExtProtoInfo();
+ FunctionProtoType::ExtProtoInfo ToEPI;
+
+ ToEPI.ExtInfo = FromEPI.ExtInfo;
+ ToEPI.Variadic = FromEPI.Variadic;
+ ToEPI.HasTrailingReturn = FromEPI.HasTrailingReturn;
+ ToEPI.TypeQuals = FromEPI.TypeQuals;
+ ToEPI.RefQualifier = FromEPI.RefQualifier;
+ ToEPI.ExceptionSpec.Type = FromEPI.ExceptionSpec.Type;
+ ToEPI.ExceptionSpec.Exceptions = ExceptionTypes;
+ ToEPI.ExceptionSpec.NoexceptExpr =
+ Importer.Import(FromEPI.ExceptionSpec.NoexceptExpr);
+ ToEPI.ExceptionSpec.SourceDecl = cast_or_null<FunctionDecl>(
+ Importer.Import(FromEPI.ExceptionSpec.SourceDecl));
+ ToEPI.ExceptionSpec.SourceTemplate = cast_or_null<FunctionDecl>(
+ Importer.Import(FromEPI.ExceptionSpec.SourceTemplate));
+
+ return Importer.getToContext().getFunctionType(ToResultType, ArgTypes, ToEPI);
+}
+
+QualType ASTNodeImporter::VisitParenType(const ParenType *T) {
+ QualType ToInnerType = Importer.Import(T->getInnerType());
+ if (ToInnerType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getParenType(ToInnerType);
+}
+
+QualType ASTNodeImporter::VisitTypedefType(const TypedefType *T) {
+ TypedefNameDecl *ToDecl
+ = dyn_cast_or_null<TypedefNameDecl>(Importer.Import(T->getDecl()));
+ if (!ToDecl)
+ return QualType();
+
+ return Importer.getToContext().getTypeDeclType(ToDecl);
+}
+
+QualType ASTNodeImporter::VisitTypeOfExprType(const TypeOfExprType *T) {
+ Expr *ToExpr = Importer.Import(T->getUnderlyingExpr());
+ if (!ToExpr)
+ return QualType();
+
+ return Importer.getToContext().getTypeOfExprType(ToExpr);
+}
+
+QualType ASTNodeImporter::VisitTypeOfType(const TypeOfType *T) {
+ QualType ToUnderlyingType = Importer.Import(T->getUnderlyingType());
+ if (ToUnderlyingType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getTypeOfType(ToUnderlyingType);
+}
+
+QualType ASTNodeImporter::VisitDecltypeType(const DecltypeType *T) {
+ // FIXME: Make sure that the "to" context supports C++0x!
+ Expr *ToExpr = Importer.Import(T->getUnderlyingExpr());
+ if (!ToExpr)
+ return QualType();
+
+ QualType UnderlyingType = Importer.Import(T->getUnderlyingType());
+ if (UnderlyingType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getDecltypeType(ToExpr, UnderlyingType);
+}
+
+QualType ASTNodeImporter::VisitUnaryTransformType(const UnaryTransformType *T) {
+ QualType ToBaseType = Importer.Import(T->getBaseType());
+ QualType ToUnderlyingType = Importer.Import(T->getUnderlyingType());
+ if (ToBaseType.isNull() || ToUnderlyingType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getUnaryTransformType(ToBaseType,
+ ToUnderlyingType,
+ T->getUTTKind());
+}
+
+QualType ASTNodeImporter::VisitAutoType(const AutoType *T) {
+ // FIXME: Make sure that the "to" context supports C++11!
+ QualType FromDeduced = T->getDeducedType();
+ QualType ToDeduced;
+ if (!FromDeduced.isNull()) {
+ ToDeduced = Importer.Import(FromDeduced);
+ if (ToDeduced.isNull())
+ return QualType();
+ }
+
+ return Importer.getToContext().getAutoType(ToDeduced, T->getKeyword(),
+ /*IsDependent*/false);
+}
+
+QualType ASTNodeImporter::VisitRecordType(const RecordType *T) {
+ RecordDecl *ToDecl
+ = dyn_cast_or_null<RecordDecl>(Importer.Import(T->getDecl()));
+ if (!ToDecl)
+ return QualType();
+
+ return Importer.getToContext().getTagDeclType(ToDecl);
+}
+
+QualType ASTNodeImporter::VisitEnumType(const EnumType *T) {
+ EnumDecl *ToDecl
+ = dyn_cast_or_null<EnumDecl>(Importer.Import(T->getDecl()));
+ if (!ToDecl)
+ return QualType();
+
+ return Importer.getToContext().getTagDeclType(ToDecl);
+}
+
+QualType ASTNodeImporter::VisitAttributedType(const AttributedType *T) {
+ QualType FromModifiedType = T->getModifiedType();
+ QualType FromEquivalentType = T->getEquivalentType();
+ QualType ToModifiedType;
+ QualType ToEquivalentType;
+
+ if (!FromModifiedType.isNull()) {
+ ToModifiedType = Importer.Import(FromModifiedType);
+ if (ToModifiedType.isNull())
+ return QualType();
+ }
+ if (!FromEquivalentType.isNull()) {
+ ToEquivalentType = Importer.Import(FromEquivalentType);
+ if (ToEquivalentType.isNull())
+ return QualType();
+ }
+
+ return Importer.getToContext().getAttributedType(T->getAttrKind(),
+ ToModifiedType, ToEquivalentType);
+}
+
+QualType ASTNodeImporter::VisitTemplateSpecializationType(
+ const TemplateSpecializationType *T) {
+ TemplateName ToTemplate = Importer.Import(T->getTemplateName());
+ if (ToTemplate.isNull())
+ return QualType();
+
+ SmallVector<TemplateArgument, 2> ToTemplateArgs;
+ if (ImportTemplateArguments(T->getArgs(), T->getNumArgs(), ToTemplateArgs))
+ return QualType();
+
+ QualType ToCanonType;
+ if (!QualType(T, 0).isCanonical()) {
+ QualType FromCanonType
+ = Importer.getFromContext().getCanonicalType(QualType(T, 0));
+ ToCanonType =Importer.Import(FromCanonType);
+ if (ToCanonType.isNull())
+ return QualType();
+ }
+ return Importer.getToContext().getTemplateSpecializationType(ToTemplate,
+ ToTemplateArgs.data(),
+ ToTemplateArgs.size(),
+ ToCanonType);
+}
+
+QualType ASTNodeImporter::VisitElaboratedType(const ElaboratedType *T) {
+ NestedNameSpecifier *ToQualifier = nullptr;
+ // Note: the qualifier in an ElaboratedType is optional.
+ if (T->getQualifier()) {
+ ToQualifier = Importer.Import(T->getQualifier());
+ if (!ToQualifier)
+ return QualType();
+ }
+
+ QualType ToNamedType = Importer.Import(T->getNamedType());
+ if (ToNamedType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getElaboratedType(T->getKeyword(),
+ ToQualifier, ToNamedType);
+}
+
+QualType ASTNodeImporter::VisitObjCInterfaceType(const ObjCInterfaceType *T) {
+ ObjCInterfaceDecl *Class
+ = dyn_cast_or_null<ObjCInterfaceDecl>(Importer.Import(T->getDecl()));
+ if (!Class)
+ return QualType();
+
+ return Importer.getToContext().getObjCInterfaceType(Class);
+}
+
+QualType ASTNodeImporter::VisitObjCObjectType(const ObjCObjectType *T) {
+ QualType ToBaseType = Importer.Import(T->getBaseType());
+ if (ToBaseType.isNull())
+ return QualType();
+
+ SmallVector<QualType, 4> TypeArgs;
+ for (auto TypeArg : T->getTypeArgsAsWritten()) {
+ QualType ImportedTypeArg = Importer.Import(TypeArg);
+ if (ImportedTypeArg.isNull())
+ return QualType();
+
+ TypeArgs.push_back(ImportedTypeArg);
+ }
+
+ SmallVector<ObjCProtocolDecl *, 4> Protocols;
+ for (auto *P : T->quals()) {
+ ObjCProtocolDecl *Protocol
+ = dyn_cast_or_null<ObjCProtocolDecl>(Importer.Import(P));
+ if (!Protocol)
+ return QualType();
+ Protocols.push_back(Protocol);
+ }
+
+ return Importer.getToContext().getObjCObjectType(ToBaseType, TypeArgs,
+ Protocols,
+ T->isKindOfTypeAsWritten());
+}
+
+QualType
+ASTNodeImporter::VisitObjCObjectPointerType(const ObjCObjectPointerType *T) {
+ QualType ToPointeeType = Importer.Import(T->getPointeeType());
+ if (ToPointeeType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getObjCObjectPointerType(ToPointeeType);
+}
+
+//----------------------------------------------------------------------------
+// Import Declarations
+//----------------------------------------------------------------------------
+bool ASTNodeImporter::ImportDeclParts(NamedDecl *D, DeclContext *&DC,
+ DeclContext *&LexicalDC,
+ DeclarationName &Name,
+ NamedDecl *&ToD,
+ SourceLocation &Loc) {
+ // Import the context of this declaration.
+ DC = Importer.ImportContext(D->getDeclContext());
+ if (!DC)
+ return true;
+
+ LexicalDC = DC;
+ if (D->getDeclContext() != D->getLexicalDeclContext()) {
+ LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
+ if (!LexicalDC)
+ return true;
+ }
+
+ // Import the name of this declaration.
+ Name = Importer.Import(D->getDeclName());
+ if (D->getDeclName() && !Name)
+ return true;
+
+ // Import the location of this declaration.
+ Loc = Importer.Import(D->getLocation());
+ ToD = cast_or_null<NamedDecl>(Importer.GetAlreadyImportedOrNull(D));
+ return false;
+}
+
+void ASTNodeImporter::ImportDefinitionIfNeeded(Decl *FromD, Decl *ToD) {
+ if (!FromD)
+ return;
+
+ if (!ToD) {
+ ToD = Importer.Import(FromD);
+ if (!ToD)
+ return;
+ }
+
+ if (RecordDecl *FromRecord = dyn_cast<RecordDecl>(FromD)) {
+ if (RecordDecl *ToRecord = cast_or_null<RecordDecl>(ToD)) {
+ if (FromRecord->getDefinition() && FromRecord->isCompleteDefinition() && !ToRecord->getDefinition()) {
+ ImportDefinition(FromRecord, ToRecord);
+ }
+ }
+ return;
+ }
+
+ if (EnumDecl *FromEnum = dyn_cast<EnumDecl>(FromD)) {
+ if (EnumDecl *ToEnum = cast_or_null<EnumDecl>(ToD)) {
+ if (FromEnum->getDefinition() && !ToEnum->getDefinition()) {
+ ImportDefinition(FromEnum, ToEnum);
+ }
+ }
+ return;
+ }
+}
+
+void
+ASTNodeImporter::ImportDeclarationNameLoc(const DeclarationNameInfo &From,
+ DeclarationNameInfo& To) {
+ // NOTE: To.Name and To.Loc are already imported.
+ // We only have to import To.LocInfo.
+ switch (To.getName().getNameKind()) {
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXUsingDirective:
+ return;
+
+ case DeclarationName::CXXOperatorName: {
+ SourceRange Range = From.getCXXOperatorNameRange();
+ To.setCXXOperatorNameRange(Importer.Import(Range));
+ return;
+ }
+ case DeclarationName::CXXLiteralOperatorName: {
+ SourceLocation Loc = From.getCXXLiteralOperatorNameLoc();
+ To.setCXXLiteralOperatorNameLoc(Importer.Import(Loc));
+ return;
+ }
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName: {
+ TypeSourceInfo *FromTInfo = From.getNamedTypeInfo();
+ To.setNamedTypeInfo(Importer.Import(FromTInfo));
+ return;
+ }
+ }
+ llvm_unreachable("Unknown name kind.");
+}
+
+void ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) {
+ if (Importer.isMinimalImport() && !ForceImport) {
+ Importer.ImportContext(FromDC);
+ return;
+ }
+
+ for (auto *From : FromDC->decls())
+ Importer.Import(From);
+}
+
+bool ASTNodeImporter::ImportDefinition(RecordDecl *From, RecordDecl *To,
+ ImportDefinitionKind Kind) {
+ if (To->getDefinition() || To->isBeingDefined()) {
+ if (Kind == IDK_Everything)
+ ImportDeclContext(From, /*ForceImport=*/true);
+
+ return false;
+ }
+
+ To->startDefinition();
+
+ // Add base classes.
+ if (CXXRecordDecl *ToCXX = dyn_cast<CXXRecordDecl>(To)) {
+ CXXRecordDecl *FromCXX = cast<CXXRecordDecl>(From);
+
+ struct CXXRecordDecl::DefinitionData &ToData = ToCXX->data();
+ struct CXXRecordDecl::DefinitionData &FromData = FromCXX->data();
+ ToData.UserDeclaredConstructor = FromData.UserDeclaredConstructor;
+ ToData.UserDeclaredSpecialMembers = FromData.UserDeclaredSpecialMembers;
+ ToData.Aggregate = FromData.Aggregate;
+ ToData.PlainOldData = FromData.PlainOldData;
+ ToData.Empty = FromData.Empty;
+ ToData.Polymorphic = FromData.Polymorphic;
+ ToData.Abstract = FromData.Abstract;
+ ToData.IsStandardLayout = FromData.IsStandardLayout;
+ ToData.HasNoNonEmptyBases = FromData.HasNoNonEmptyBases;
+ ToData.HasPrivateFields = FromData.HasPrivateFields;
+ ToData.HasProtectedFields = FromData.HasProtectedFields;
+ ToData.HasPublicFields = FromData.HasPublicFields;
+ ToData.HasMutableFields = FromData.HasMutableFields;
+ ToData.HasVariantMembers = FromData.HasVariantMembers;
+ ToData.HasOnlyCMembers = FromData.HasOnlyCMembers;
+ ToData.HasInClassInitializer = FromData.HasInClassInitializer;
+ ToData.HasUninitializedReferenceMember
+ = FromData.HasUninitializedReferenceMember;
+ ToData.NeedOverloadResolutionForMoveConstructor
+ = FromData.NeedOverloadResolutionForMoveConstructor;
+ ToData.NeedOverloadResolutionForMoveAssignment
+ = FromData.NeedOverloadResolutionForMoveAssignment;
+ ToData.NeedOverloadResolutionForDestructor
+ = FromData.NeedOverloadResolutionForDestructor;
+ ToData.DefaultedMoveConstructorIsDeleted
+ = FromData.DefaultedMoveConstructorIsDeleted;
+ ToData.DefaultedMoveAssignmentIsDeleted
+ = FromData.DefaultedMoveAssignmentIsDeleted;
+ ToData.DefaultedDestructorIsDeleted = FromData.DefaultedDestructorIsDeleted;
+ ToData.HasTrivialSpecialMembers = FromData.HasTrivialSpecialMembers;
+ ToData.HasIrrelevantDestructor = FromData.HasIrrelevantDestructor;
+ ToData.HasConstexprNonCopyMoveConstructor
+ = FromData.HasConstexprNonCopyMoveConstructor;
+ ToData.DefaultedDefaultConstructorIsConstexpr
+ = FromData.DefaultedDefaultConstructorIsConstexpr;
+ ToData.HasConstexprDefaultConstructor
+ = FromData.HasConstexprDefaultConstructor;
+ ToData.HasNonLiteralTypeFieldsOrBases
+ = FromData.HasNonLiteralTypeFieldsOrBases;
+ // ComputedVisibleConversions not imported.
+ ToData.UserProvidedDefaultConstructor
+ = FromData.UserProvidedDefaultConstructor;
+ ToData.DeclaredSpecialMembers = FromData.DeclaredSpecialMembers;
+ ToData.ImplicitCopyConstructorHasConstParam
+ = FromData.ImplicitCopyConstructorHasConstParam;
+ ToData.ImplicitCopyAssignmentHasConstParam
+ = FromData.ImplicitCopyAssignmentHasConstParam;
+ ToData.HasDeclaredCopyConstructorWithConstParam
+ = FromData.HasDeclaredCopyConstructorWithConstParam;
+ ToData.HasDeclaredCopyAssignmentWithConstParam
+ = FromData.HasDeclaredCopyAssignmentWithConstParam;
+ ToData.IsLambda = FromData.IsLambda;
+
+ SmallVector<CXXBaseSpecifier *, 4> Bases;
+ for (const auto &Base1 : FromCXX->bases()) {
+ QualType T = Importer.Import(Base1.getType());
+ if (T.isNull())
+ return true;
+
+ SourceLocation EllipsisLoc;
+ if (Base1.isPackExpansion())
+ EllipsisLoc = Importer.Import(Base1.getEllipsisLoc());
+
+ // Ensure that we have a definition for the base.
+ ImportDefinitionIfNeeded(Base1.getType()->getAsCXXRecordDecl());
+
+ Bases.push_back(
+ new (Importer.getToContext())
+ CXXBaseSpecifier(Importer.Import(Base1.getSourceRange()),
+ Base1.isVirtual(),
+ Base1.isBaseOfClass(),
+ Base1.getAccessSpecifierAsWritten(),
+ Importer.Import(Base1.getTypeSourceInfo()),
+ EllipsisLoc));
+ }
+ if (!Bases.empty())
+ ToCXX->setBases(Bases.data(), Bases.size());
+ }
+
+ if (shouldForceImportDeclContext(Kind))
+ ImportDeclContext(From, /*ForceImport=*/true);
+
+ To->completeDefinition();
+ return false;
+}
+
+bool ASTNodeImporter::ImportDefinition(VarDecl *From, VarDecl *To,
+ ImportDefinitionKind Kind) {
+ if (To->getAnyInitializer())
+ return false;
+
+ // FIXME: Can we really import any initializer? Alternatively, we could force
+ // ourselves to import every declaration of a variable and then only use
+ // getInit() here.
+ To->setInit(Importer.Import(const_cast<Expr *>(From->getAnyInitializer())));
+
+ // FIXME: Other bits to merge?
+
+ return false;
+}
+
+bool ASTNodeImporter::ImportDefinition(EnumDecl *From, EnumDecl *To,
+ ImportDefinitionKind Kind) {
+ if (To->getDefinition() || To->isBeingDefined()) {
+ if (Kind == IDK_Everything)
+ ImportDeclContext(From, /*ForceImport=*/true);
+ return false;
+ }
+
+ To->startDefinition();
+
+ QualType T = Importer.Import(Importer.getFromContext().getTypeDeclType(From));
+ if (T.isNull())
+ return true;
+
+ QualType ToPromotionType = Importer.Import(From->getPromotionType());
+ if (ToPromotionType.isNull())
+ return true;
+
+ if (shouldForceImportDeclContext(Kind))
+ ImportDeclContext(From, /*ForceImport=*/true);
+
+ // FIXME: we might need to merge the number of positive or negative bits
+ // if the enumerator lists don't match.
+ To->completeDefinition(T, ToPromotionType,
+ From->getNumPositiveBits(),
+ From->getNumNegativeBits());
+ return false;
+}
+
+TemplateParameterList *ASTNodeImporter::ImportTemplateParameterList(
+ TemplateParameterList *Params) {
+ SmallVector<NamedDecl *, 4> ToParams;
+ ToParams.reserve(Params->size());
+ for (TemplateParameterList::iterator P = Params->begin(),
+ PEnd = Params->end();
+ P != PEnd; ++P) {
+ Decl *To = Importer.Import(*P);
+ if (!To)
+ return nullptr;
+
+ ToParams.push_back(cast<NamedDecl>(To));
+ }
+
+ return TemplateParameterList::Create(Importer.getToContext(),
+ Importer.Import(Params->getTemplateLoc()),
+ Importer.Import(Params->getLAngleLoc()),
+ ToParams,
+ Importer.Import(Params->getRAngleLoc()));
+}
+
+TemplateArgument
+ASTNodeImporter::ImportTemplateArgument(const TemplateArgument &From) {
+ switch (From.getKind()) {
+ case TemplateArgument::Null:
+ return TemplateArgument();
+
+ case TemplateArgument::Type: {
+ QualType ToType = Importer.Import(From.getAsType());
+ if (ToType.isNull())
+ return TemplateArgument();
+ return TemplateArgument(ToType);
+ }
+
+ case TemplateArgument::Integral: {
+ QualType ToType = Importer.Import(From.getIntegralType());
+ if (ToType.isNull())
+ return TemplateArgument();
+ return TemplateArgument(From, ToType);
+ }
+
+ case TemplateArgument::Declaration: {
+ ValueDecl *To = cast_or_null<ValueDecl>(Importer.Import(From.getAsDecl()));
+ QualType ToType = Importer.Import(From.getParamTypeForDecl());
+ if (!To || ToType.isNull())
+ return TemplateArgument();
+ return TemplateArgument(To, ToType);
+ }
+
+ case TemplateArgument::NullPtr: {
+ QualType ToType = Importer.Import(From.getNullPtrType());
+ if (ToType.isNull())
+ return TemplateArgument();
+ return TemplateArgument(ToType, /*isNullPtr*/true);
+ }
+
+ case TemplateArgument::Template: {
+ TemplateName ToTemplate = Importer.Import(From.getAsTemplate());
+ if (ToTemplate.isNull())
+ return TemplateArgument();
+
+ return TemplateArgument(ToTemplate);
+ }
+
+ case TemplateArgument::TemplateExpansion: {
+ TemplateName ToTemplate
+ = Importer.Import(From.getAsTemplateOrTemplatePattern());
+ if (ToTemplate.isNull())
+ return TemplateArgument();
+
+ return TemplateArgument(ToTemplate, From.getNumTemplateExpansions());
+ }
+
+ case TemplateArgument::Expression:
+ if (Expr *ToExpr = Importer.Import(From.getAsExpr()))
+ return TemplateArgument(ToExpr);
+ return TemplateArgument();
+
+ case TemplateArgument::Pack: {
+ SmallVector<TemplateArgument, 2> ToPack;
+ ToPack.reserve(From.pack_size());
+ if (ImportTemplateArguments(From.pack_begin(), From.pack_size(), ToPack))
+ return TemplateArgument();
+
+ return TemplateArgument(
+ llvm::makeArrayRef(ToPack).copy(Importer.getToContext()));
+ }
+ }
+
+ llvm_unreachable("Invalid template argument kind");
+}
+
+bool ASTNodeImporter::ImportTemplateArguments(const TemplateArgument *FromArgs,
+ unsigned NumFromArgs,
+ SmallVectorImpl<TemplateArgument> &ToArgs) {
+ for (unsigned I = 0; I != NumFromArgs; ++I) {
+ TemplateArgument To = ImportTemplateArgument(FromArgs[I]);
+ if (To.isNull() && !FromArgs[I].isNull())
+ return true;
+
+ ToArgs.push_back(To);
+ }
+
+ return false;
+}
+
+bool ASTNodeImporter::IsStructuralMatch(RecordDecl *FromRecord,
+ RecordDecl *ToRecord, bool Complain) {
+ // Eliminate a potential failure point where we attempt to re-import
+ // something we're trying to import while completing ToRecord.
+ Decl *ToOrigin = Importer.GetOriginalDecl(ToRecord);
+ if (ToOrigin) {
+ RecordDecl *ToOriginRecord = dyn_cast<RecordDecl>(ToOrigin);
+ if (ToOriginRecord)
+ ToRecord = ToOriginRecord;
+ }
+
+ StructuralEquivalenceContext Ctx(Importer.getFromContext(),
+ ToRecord->getASTContext(),
+ Importer.getNonEquivalentDecls(),
+ false, Complain);
+ return Ctx.IsStructurallyEquivalent(FromRecord, ToRecord);
+}
+
+bool ASTNodeImporter::IsStructuralMatch(VarDecl *FromVar, VarDecl *ToVar,
+ bool Complain) {
+ StructuralEquivalenceContext Ctx(
+ Importer.getFromContext(), Importer.getToContext(),
+ Importer.getNonEquivalentDecls(), false, Complain);
+ return Ctx.IsStructurallyEquivalent(FromVar, ToVar);
+}
+
+bool ASTNodeImporter::IsStructuralMatch(EnumDecl *FromEnum, EnumDecl *ToEnum) {
+ StructuralEquivalenceContext Ctx(Importer.getFromContext(),
+ Importer.getToContext(),
+ Importer.getNonEquivalentDecls());
+ return Ctx.IsStructurallyEquivalent(FromEnum, ToEnum);
+}
+
+bool ASTNodeImporter::IsStructuralMatch(EnumConstantDecl *FromEC,
+ EnumConstantDecl *ToEC)
+{
+ const llvm::APSInt &FromVal = FromEC->getInitVal();
+ const llvm::APSInt &ToVal = ToEC->getInitVal();
+
+ return FromVal.isSigned() == ToVal.isSigned() &&
+ FromVal.getBitWidth() == ToVal.getBitWidth() &&
+ FromVal == ToVal;
+}
+
+bool ASTNodeImporter::IsStructuralMatch(ClassTemplateDecl *From,
+ ClassTemplateDecl *To) {
+ StructuralEquivalenceContext Ctx(Importer.getFromContext(),
+ Importer.getToContext(),
+ Importer.getNonEquivalentDecls());
+ return Ctx.IsStructurallyEquivalent(From, To);
+}
+
+bool ASTNodeImporter::IsStructuralMatch(VarTemplateDecl *From,
+ VarTemplateDecl *To) {
+ StructuralEquivalenceContext Ctx(Importer.getFromContext(),
+ Importer.getToContext(),
+ Importer.getNonEquivalentDecls());
+ return Ctx.IsStructurallyEquivalent(From, To);
+}
+
+Decl *ASTNodeImporter::VisitDecl(Decl *D) {
+ Importer.FromDiag(D->getLocation(), diag::err_unsupported_ast_node)
+ << D->getDeclKindName();
+ return nullptr;
+}
+
+Decl *ASTNodeImporter::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
+ TranslationUnitDecl *ToD =
+ Importer.getToContext().getTranslationUnitDecl();
+
+ Importer.Imported(D, ToD);
+
+ return ToD;
+}
+
+Decl *ASTNodeImporter::VisitNamespaceDecl(NamespaceDecl *D) {
+ // Import the major distinguishing characteristics of this namespace.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ NamedDecl *ToD;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return nullptr;
+ if (ToD)
+ return ToD;
+
+ NamespaceDecl *MergeWithNamespace = nullptr;
+ if (!Name) {
+ // This is an anonymous namespace. Adopt an existing anonymous
+ // namespace if we can.
+ // FIXME: Not testable.
+ if (TranslationUnitDecl *TU = dyn_cast<TranslationUnitDecl>(DC))
+ MergeWithNamespace = TU->getAnonymousNamespace();
+ else
+ MergeWithNamespace = cast<NamespaceDecl>(DC)->getAnonymousNamespace();
+ } else {
+ SmallVector<NamedDecl *, 4> ConflictingDecls;
+ SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_Namespace))
+ continue;
+
+ if (NamespaceDecl *FoundNS = dyn_cast<NamespaceDecl>(FoundDecls[I])) {
+ MergeWithNamespace = FoundNS;
+ ConflictingDecls.clear();
+ break;
+ }
+
+ ConflictingDecls.push_back(FoundDecls[I]);
+ }
+
+ if (!ConflictingDecls.empty()) {
+ Name = Importer.HandleNameConflict(Name, DC, Decl::IDNS_Namespace,
+ ConflictingDecls.data(),
+ ConflictingDecls.size());
+ }
+ }
+
+ // Create the "to" namespace, if needed.
+ NamespaceDecl *ToNamespace = MergeWithNamespace;
+ if (!ToNamespace) {
+ ToNamespace = NamespaceDecl::Create(Importer.getToContext(), DC,
+ D->isInline(),
+ Importer.Import(D->getLocStart()),
+ Loc, Name.getAsIdentifierInfo(),
+ /*PrevDecl=*/nullptr);
+ ToNamespace->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(ToNamespace);
+
+ // If this is an anonymous namespace, register it as the anonymous
+ // namespace within its context.
+ if (!Name) {
+ if (TranslationUnitDecl *TU = dyn_cast<TranslationUnitDecl>(DC))
+ TU->setAnonymousNamespace(ToNamespace);
+ else
+ cast<NamespaceDecl>(DC)->setAnonymousNamespace(ToNamespace);
+ }
+ }
+ Importer.Imported(D, ToNamespace);
+
+ ImportDeclContext(D);
+
+ return ToNamespace;
+}
+
+Decl *ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) {
+ // Import the major distinguishing characteristics of this typedef.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ NamedDecl *ToD;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return nullptr;
+ if (ToD)
+ return ToD;
+
+ // If this typedef is not in block scope, determine whether we've
+ // seen a typedef with the same name (that we can merge with) or any
+ // other entity by that name (which name lookup could conflict with).
+ if (!DC->isFunctionOrMethod()) {
+ SmallVector<NamedDecl *, 4> ConflictingDecls;
+ unsigned IDNS = Decl::IDNS_Ordinary;
+ SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
+ continue;
+ if (TypedefNameDecl *FoundTypedef =
+ dyn_cast<TypedefNameDecl>(FoundDecls[I])) {
+ if (Importer.IsStructurallyEquivalent(D->getUnderlyingType(),
+ FoundTypedef->getUnderlyingType()))
+ return Importer.Imported(D, FoundTypedef);
+ }
+
+ ConflictingDecls.push_back(FoundDecls[I]);
+ }
+
+ if (!ConflictingDecls.empty()) {
+ Name = Importer.HandleNameConflict(Name, DC, IDNS,
+ ConflictingDecls.data(),
+ ConflictingDecls.size());
+ if (!Name)
+ return nullptr;
+ }
+ }
+
+ // Import the underlying type of this typedef;
+ QualType T = Importer.Import(D->getUnderlyingType());
+ if (T.isNull())
+ return nullptr;
+
+ // Create the new typedef node.
+ TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
+ SourceLocation StartL = Importer.Import(D->getLocStart());
+ TypedefNameDecl *ToTypedef;
+ if (IsAlias)
+ ToTypedef = TypeAliasDecl::Create(Importer.getToContext(), DC,
+ StartL, Loc,
+ Name.getAsIdentifierInfo(),
+ TInfo);
+ else
+ ToTypedef = TypedefDecl::Create(Importer.getToContext(), DC,
+ StartL, Loc,
+ Name.getAsIdentifierInfo(),
+ TInfo);
+
+ ToTypedef->setAccess(D->getAccess());
+ ToTypedef->setLexicalDeclContext(LexicalDC);
+ Importer.Imported(D, ToTypedef);
+ LexicalDC->addDeclInternal(ToTypedef);
+
+ return ToTypedef;
+}
+
+Decl *ASTNodeImporter::VisitTypedefDecl(TypedefDecl *D) {
+ return VisitTypedefNameDecl(D, /*IsAlias=*/false);
+}
+
+Decl *ASTNodeImporter::VisitTypeAliasDecl(TypeAliasDecl *D) {
+ return VisitTypedefNameDecl(D, /*IsAlias=*/true);
+}
+
+Decl *ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
+ // Import the major distinguishing characteristics of this enum.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ NamedDecl *ToD;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return nullptr;
+ if (ToD)
+ return ToD;
+
+ // Figure out what enum name we're looking for.
+ unsigned IDNS = Decl::IDNS_Tag;
+ DeclarationName SearchName = Name;
+ if (!SearchName && D->getTypedefNameForAnonDecl()) {
+ SearchName = Importer.Import(D->getTypedefNameForAnonDecl()->getDeclName());
+ IDNS = Decl::IDNS_Ordinary;
+ } else if (Importer.getToContext().getLangOpts().CPlusPlus)
+ IDNS |= Decl::IDNS_Ordinary;
+
+ // We may already have an enum of the same name; try to find and match it.
+ if (!DC->isFunctionOrMethod() && SearchName) {
+ SmallVector<NamedDecl *, 4> ConflictingDecls;
+ SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
+ continue;
+
+ Decl *Found = FoundDecls[I];
+ if (TypedefNameDecl *Typedef = dyn_cast<TypedefNameDecl>(Found)) {
+ if (const TagType *Tag = Typedef->getUnderlyingType()->getAs<TagType>())
+ Found = Tag->getDecl();
+ }
+
+ if (EnumDecl *FoundEnum = dyn_cast<EnumDecl>(Found)) {
+ if (IsStructuralMatch(D, FoundEnum))
+ return Importer.Imported(D, FoundEnum);
+ }
+
+ ConflictingDecls.push_back(FoundDecls[I]);
+ }
+
+ if (!ConflictingDecls.empty()) {
+ Name = Importer.HandleNameConflict(Name, DC, IDNS,
+ ConflictingDecls.data(),
+ ConflictingDecls.size());
+ }
+ }
+
+ // Create the enum declaration.
+ EnumDecl *D2 = EnumDecl::Create(Importer.getToContext(), DC,
+ Importer.Import(D->getLocStart()),
+ Loc, Name.getAsIdentifierInfo(), nullptr,
+ D->isScoped(), D->isScopedUsingClassTag(),
+ D->isFixed());
+ // Import the qualifier, if any.
+ D2->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
+ D2->setAccess(D->getAccess());
+ D2->setLexicalDeclContext(LexicalDC);
+ Importer.Imported(D, D2);
+ LexicalDC->addDeclInternal(D2);
+
+ // Import the integer type.
+ QualType ToIntegerType = Importer.Import(D->getIntegerType());
+ if (ToIntegerType.isNull())
+ return nullptr;
+ D2->setIntegerType(ToIntegerType);
+
+ // Import the definition
+ if (D->isCompleteDefinition() && ImportDefinition(D, D2))
+ return nullptr;
+
+ return D2;
+}
+
+Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
+ // If this record has a definition in the translation unit we're coming from,
+ // but this particular declaration is not that definition, import the
+ // definition and map to that.
+ TagDecl *Definition = D->getDefinition();
+ if (Definition && Definition != D) {
+ Decl *ImportedDef = Importer.Import(Definition);
+ if (!ImportedDef)
+ return nullptr;
+
+ return Importer.Imported(D, ImportedDef);
+ }
+
+ // Import the major distinguishing characteristics of this record.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ NamedDecl *ToD;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return nullptr;
+ if (ToD)
+ return ToD;
+
+ // Figure out what structure name we're looking for.
+ unsigned IDNS = Decl::IDNS_Tag;
+ DeclarationName SearchName = Name;
+ if (!SearchName && D->getTypedefNameForAnonDecl()) {
+ SearchName = Importer.Import(D->getTypedefNameForAnonDecl()->getDeclName());
+ IDNS = Decl::IDNS_Ordinary;
+ } else if (Importer.getToContext().getLangOpts().CPlusPlus)
+ IDNS |= Decl::IDNS_Ordinary;
+
+ // We may already have a record of the same name; try to find and match it.
+ RecordDecl *AdoptDecl = nullptr;
+ if (!DC->isFunctionOrMethod()) {
+ SmallVector<NamedDecl *, 4> ConflictingDecls;
+ SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
+ continue;
+
+ Decl *Found = FoundDecls[I];
+ if (TypedefNameDecl *Typedef = dyn_cast<TypedefNameDecl>(Found)) {
+ if (const TagType *Tag = Typedef->getUnderlyingType()->getAs<TagType>())
+ Found = Tag->getDecl();
+ }
+
+ if (RecordDecl *FoundRecord = dyn_cast<RecordDecl>(Found)) {
+ if (D->isAnonymousStructOrUnion() &&
+ FoundRecord->isAnonymousStructOrUnion()) {
+ // If both anonymous structs/unions are in a record context, make sure
+ // they occur in the same location in the context records.
+ if (Optional<unsigned> Index1
+ = findAnonymousStructOrUnionIndex(D)) {
+ if (Optional<unsigned> Index2 =
+ findAnonymousStructOrUnionIndex(FoundRecord)) {
+ if (*Index1 != *Index2)
+ continue;
+ }
+ }
+ }
+
+ if (RecordDecl *FoundDef = FoundRecord->getDefinition()) {
+ if ((SearchName && !D->isCompleteDefinition())
+ || (D->isCompleteDefinition() &&
+ D->isAnonymousStructOrUnion()
+ == FoundDef->isAnonymousStructOrUnion() &&
+ IsStructuralMatch(D, FoundDef))) {
+ // The record types structurally match, or the "from" translation
+ // unit only had a forward declaration anyway; call it the same
+ // function.
+ // FIXME: For C++, we should also merge methods here.
+ return Importer.Imported(D, FoundDef);
+ }
+ } else if (!D->isCompleteDefinition()) {
+ // We have a forward declaration of this type, so adopt that forward
+ // declaration rather than building a new one.
+
+ // If one or both can be completed from external storage then try one
+ // last time to complete and compare them before doing this.
+
+ if (FoundRecord->hasExternalLexicalStorage() &&
+ !FoundRecord->isCompleteDefinition())
+ FoundRecord->getASTContext().getExternalSource()->CompleteType(FoundRecord);
+ if (D->hasExternalLexicalStorage())
+ D->getASTContext().getExternalSource()->CompleteType(D);
+
+ if (FoundRecord->isCompleteDefinition() &&
+ D->isCompleteDefinition() &&
+ !IsStructuralMatch(D, FoundRecord))
+ continue;
+
+ AdoptDecl = FoundRecord;
+ continue;
+ } else if (!SearchName) {
+ continue;
+ }
+ }
+
+ ConflictingDecls.push_back(FoundDecls[I]);
+ }
+
+ if (!ConflictingDecls.empty() && SearchName) {
+ Name = Importer.HandleNameConflict(Name, DC, IDNS,
+ ConflictingDecls.data(),
+ ConflictingDecls.size());
+ }
+ }
+
+ // Create the record declaration.
+ RecordDecl *D2 = AdoptDecl;
+ SourceLocation StartLoc = Importer.Import(D->getLocStart());
+ if (!D2) {
+ if (isa<CXXRecordDecl>(D)) {
+ CXXRecordDecl *D2CXX = CXXRecordDecl::Create(Importer.getToContext(),
+ D->getTagKind(),
+ DC, StartLoc, Loc,
+ Name.getAsIdentifierInfo());
+ D2 = D2CXX;
+ D2->setAccess(D->getAccess());
+ } else {
+ D2 = RecordDecl::Create(Importer.getToContext(), D->getTagKind(),
+ DC, StartLoc, Loc, Name.getAsIdentifierInfo());
+ }
+
+ D2->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
+ D2->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(D2);
+ if (D->isAnonymousStructOrUnion())
+ D2->setAnonymousStructOrUnion(true);
+ }
+
+ Importer.Imported(D, D2);
+
+ if (D->isCompleteDefinition() && ImportDefinition(D, D2, IDK_Default))
+ return nullptr;
+
+ return D2;
+}
+
+Decl *ASTNodeImporter::VisitEnumConstantDecl(EnumConstantDecl *D) {
+ // Import the major distinguishing characteristics of this enumerator.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ NamedDecl *ToD;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return nullptr;
+ if (ToD)
+ return ToD;
+
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return nullptr;
+
+ // Determine whether there are any other declarations with the same name and
+ // in the same context.
+ if (!LexicalDC->isFunctionOrMethod()) {
+ SmallVector<NamedDecl *, 4> ConflictingDecls;
+ unsigned IDNS = Decl::IDNS_Ordinary;
+ SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
+ continue;
+
+ if (EnumConstantDecl *FoundEnumConstant
+ = dyn_cast<EnumConstantDecl>(FoundDecls[I])) {
+ if (IsStructuralMatch(D, FoundEnumConstant))
+ return Importer.Imported(D, FoundEnumConstant);
+ }
+
+ ConflictingDecls.push_back(FoundDecls[I]);
+ }
+
+ if (!ConflictingDecls.empty()) {
+ Name = Importer.HandleNameConflict(Name, DC, IDNS,
+ ConflictingDecls.data(),
+ ConflictingDecls.size());
+ if (!Name)
+ return nullptr;
+ }
+ }
+
+ Expr *Init = Importer.Import(D->getInitExpr());
+ if (D->getInitExpr() && !Init)
+ return nullptr;
+
+ EnumConstantDecl *ToEnumerator
+ = EnumConstantDecl::Create(Importer.getToContext(), cast<EnumDecl>(DC), Loc,
+ Name.getAsIdentifierInfo(), T,
+ Init, D->getInitVal());
+ ToEnumerator->setAccess(D->getAccess());
+ ToEnumerator->setLexicalDeclContext(LexicalDC);
+ Importer.Imported(D, ToEnumerator);
+ LexicalDC->addDeclInternal(ToEnumerator);
+ return ToEnumerator;
+}
+
+Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
+ // Import the major distinguishing characteristics of this function.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ NamedDecl *ToD;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return nullptr;
+ if (ToD)
+ return ToD;
+
+ // Try to find a function in our own ("to") context with the same name, same
+ // type, and in the same context as the function we're importing.
+ if (!LexicalDC->isFunctionOrMethod()) {
+ SmallVector<NamedDecl *, 4> ConflictingDecls;
+ unsigned IDNS = Decl::IDNS_Ordinary;
+ SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
+ continue;
+
+ if (FunctionDecl *FoundFunction = dyn_cast<FunctionDecl>(FoundDecls[I])) {
+ if (FoundFunction->hasExternalFormalLinkage() &&
+ D->hasExternalFormalLinkage()) {
+ if (Importer.IsStructurallyEquivalent(D->getType(),
+ FoundFunction->getType())) {
+ // FIXME: Actually try to merge the body and other attributes.
+ return Importer.Imported(D, FoundFunction);
+ }
+
+ // FIXME: Check for overloading more carefully, e.g., by boosting
+ // Sema::IsOverload out to the AST library.
+
+ // Function overloading is okay in C++.
+ if (Importer.getToContext().getLangOpts().CPlusPlus)
+ continue;
+
+ // Complain about inconsistent function types.
+ Importer.ToDiag(Loc, diag::err_odr_function_type_inconsistent)
+ << Name << D->getType() << FoundFunction->getType();
+ Importer.ToDiag(FoundFunction->getLocation(),
+ diag::note_odr_value_here)
+ << FoundFunction->getType();
+ }
+ }
+
+ ConflictingDecls.push_back(FoundDecls[I]);
+ }
+
+ if (!ConflictingDecls.empty()) {
+ Name = Importer.HandleNameConflict(Name, DC, IDNS,
+ ConflictingDecls.data(),
+ ConflictingDecls.size());
+ if (!Name)
+ return nullptr;
+ }
+ }
+
+ DeclarationNameInfo NameInfo(Name, Loc);
+ // Import additional name location/type info.
+ ImportDeclarationNameLoc(D->getNameInfo(), NameInfo);
+
+ QualType FromTy = D->getType();
+ bool usedDifferentExceptionSpec = false;
+
+ if (const FunctionProtoType *
+ FromFPT = D->getType()->getAs<FunctionProtoType>()) {
+ FunctionProtoType::ExtProtoInfo FromEPI = FromFPT->getExtProtoInfo();
+ // FunctionProtoType::ExtProtoInfo's ExceptionSpecDecl can point to the
+ // FunctionDecl that we are importing the FunctionProtoType for.
+ // To avoid an infinite recursion when importing, create the FunctionDecl
+ // with a simplified function type and update it afterwards.
+ if (FromEPI.ExceptionSpec.SourceDecl ||
+ FromEPI.ExceptionSpec.SourceTemplate ||
+ FromEPI.ExceptionSpec.NoexceptExpr) {
+ FunctionProtoType::ExtProtoInfo DefaultEPI;
+ FromTy = Importer.getFromContext().getFunctionType(
+ FromFPT->getReturnType(), FromFPT->getParamTypes(), DefaultEPI);
+ usedDifferentExceptionSpec = true;
+ }
+ }
+
+ // Import the type.
+ QualType T = Importer.Import(FromTy);
+ if (T.isNull())
+ return nullptr;
+
+ // Import the function parameters.
+ SmallVector<ParmVarDecl *, 8> Parameters;
+ for (auto P : D->params()) {
+ ParmVarDecl *ToP = cast_or_null<ParmVarDecl>(Importer.Import(P));
+ if (!ToP)
+ return nullptr;
+
+ Parameters.push_back(ToP);
+ }
+
+ // Create the imported function.
+ TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
+ FunctionDecl *ToFunction = nullptr;
+ SourceLocation InnerLocStart = Importer.Import(D->getInnerLocStart());
+ if (CXXConstructorDecl *FromConstructor = dyn_cast<CXXConstructorDecl>(D)) {
+ ToFunction = CXXConstructorDecl::Create(Importer.getToContext(),
+ cast<CXXRecordDecl>(DC),
+ InnerLocStart,
+ NameInfo, T, TInfo,
+ FromConstructor->isExplicit(),
+ D->isInlineSpecified(),
+ D->isImplicit(),
+ D->isConstexpr());
+ } else if (isa<CXXDestructorDecl>(D)) {
+ ToFunction = CXXDestructorDecl::Create(Importer.getToContext(),
+ cast<CXXRecordDecl>(DC),
+ InnerLocStart,
+ NameInfo, T, TInfo,
+ D->isInlineSpecified(),
+ D->isImplicit());
+ } else if (CXXConversionDecl *FromConversion
+ = dyn_cast<CXXConversionDecl>(D)) {
+ ToFunction = CXXConversionDecl::Create(Importer.getToContext(),
+ cast<CXXRecordDecl>(DC),
+ InnerLocStart,
+ NameInfo, T, TInfo,
+ D->isInlineSpecified(),
+ FromConversion->isExplicit(),
+ D->isConstexpr(),
+ Importer.Import(D->getLocEnd()));
+ } else if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ ToFunction = CXXMethodDecl::Create(Importer.getToContext(),
+ cast<CXXRecordDecl>(DC),
+ InnerLocStart,
+ NameInfo, T, TInfo,
+ Method->getStorageClass(),
+ Method->isInlineSpecified(),
+ D->isConstexpr(),
+ Importer.Import(D->getLocEnd()));
+ } else {
+ ToFunction = FunctionDecl::Create(Importer.getToContext(), DC,
+ InnerLocStart,
+ NameInfo, T, TInfo, D->getStorageClass(),
+ D->isInlineSpecified(),
+ D->hasWrittenPrototype(),
+ D->isConstexpr());
+ }
+
+ // Import the qualifier, if any.
+ ToFunction->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
+ ToFunction->setAccess(D->getAccess());
+ ToFunction->setLexicalDeclContext(LexicalDC);
+ ToFunction->setVirtualAsWritten(D->isVirtualAsWritten());
+ ToFunction->setTrivial(D->isTrivial());
+ ToFunction->setPure(D->isPure());
+ Importer.Imported(D, ToFunction);
+
+ // Set the parameters.
+ for (unsigned I = 0, N = Parameters.size(); I != N; ++I) {
+ Parameters[I]->setOwningFunction(ToFunction);
+ ToFunction->addDeclInternal(Parameters[I]);
+ }
+ ToFunction->setParams(Parameters);
+
+ if (usedDifferentExceptionSpec) {
+ // Update FunctionProtoType::ExtProtoInfo.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return nullptr;
+ ToFunction->setType(T);
+ }
+
+ // Import the body, if any.
+ if (Stmt *FromBody = D->getBody()) {
+ if (Stmt *ToBody = Importer.Import(FromBody)) {
+ ToFunction->setBody(ToBody);
+ }
+ }
+
+ // FIXME: Other bits to merge?
+
+ // Add this function to the lexical context.
+ LexicalDC->addDeclInternal(ToFunction);
+
+ return ToFunction;
+}
+
+Decl *ASTNodeImporter::VisitCXXMethodDecl(CXXMethodDecl *D) {
+ return VisitFunctionDecl(D);
+}
+
+Decl *ASTNodeImporter::VisitCXXConstructorDecl(CXXConstructorDecl *D) {
+ return VisitCXXMethodDecl(D);
+}
+
+Decl *ASTNodeImporter::VisitCXXDestructorDecl(CXXDestructorDecl *D) {
+ return VisitCXXMethodDecl(D);
+}
+
+Decl *ASTNodeImporter::VisitCXXConversionDecl(CXXConversionDecl *D) {
+ return VisitCXXMethodDecl(D);
+}
+
+static unsigned getFieldIndex(Decl *F) {
+ RecordDecl *Owner = dyn_cast<RecordDecl>(F->getDeclContext());
+ if (!Owner)
+ return 0;
+
+ unsigned Index = 1;
+ for (const auto *D : Owner->noload_decls()) {
+ if (D == F)
+ return Index;
+
+ if (isa<FieldDecl>(*D) || isa<IndirectFieldDecl>(*D))
+ ++Index;
+ }
+
+ return Index;
+}
+
+Decl *ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
+ // Import the major distinguishing characteristics of a variable.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ NamedDecl *ToD;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return nullptr;
+ if (ToD)
+ return ToD;
+
+ // Determine whether we've already imported this field.
+ SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (FieldDecl *FoundField = dyn_cast<FieldDecl>(FoundDecls[I])) {
+ // For anonymous fields, match up by index.
+ if (!Name && getFieldIndex(D) != getFieldIndex(FoundField))
+ continue;
+
+ if (Importer.IsStructurallyEquivalent(D->getType(),
+ FoundField->getType())) {
+ Importer.Imported(D, FoundField);
+ return FoundField;
+ }
+
+ Importer.ToDiag(Loc, diag::err_odr_field_type_inconsistent)
+ << Name << D->getType() << FoundField->getType();
+ Importer.ToDiag(FoundField->getLocation(), diag::note_odr_value_here)
+ << FoundField->getType();
+ return nullptr;
+ }
+ }
+
+ // Import the type.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return nullptr;
+
+ TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
+ Expr *BitWidth = Importer.Import(D->getBitWidth());
+ if (!BitWidth && D->getBitWidth())
+ return nullptr;
+
+ FieldDecl *ToField = FieldDecl::Create(Importer.getToContext(), DC,
+ Importer.Import(D->getInnerLocStart()),
+ Loc, Name.getAsIdentifierInfo(),
+ T, TInfo, BitWidth, D->isMutable(),
+ D->getInClassInitStyle());
+ ToField->setAccess(D->getAccess());
+ ToField->setLexicalDeclContext(LexicalDC);
+ if (ToField->hasInClassInitializer())
+ ToField->setInClassInitializer(D->getInClassInitializer());
+ ToField->setImplicit(D->isImplicit());
+ Importer.Imported(D, ToField);
+ LexicalDC->addDeclInternal(ToField);
+ return ToField;
+}
+
+Decl *ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
+ // Import the major distinguishing characteristics of a variable.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ NamedDecl *ToD;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return nullptr;
+ if (ToD)
+ return ToD;
+
+ // Determine whether we've already imported this field.
+ SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (IndirectFieldDecl *FoundField
+ = dyn_cast<IndirectFieldDecl>(FoundDecls[I])) {
+ // For anonymous indirect fields, match up by index.
+ if (!Name && getFieldIndex(D) != getFieldIndex(FoundField))
+ continue;
+
+ if (Importer.IsStructurallyEquivalent(D->getType(),
+ FoundField->getType(),
+ !Name.isEmpty())) {
+ Importer.Imported(D, FoundField);
+ return FoundField;
+ }
+
+ // If there are more anonymous fields to check, continue.
+ if (!Name && I < N-1)
+ continue;
+
+ Importer.ToDiag(Loc, diag::err_odr_field_type_inconsistent)
+ << Name << D->getType() << FoundField->getType();
+ Importer.ToDiag(FoundField->getLocation(), diag::note_odr_value_here)
+ << FoundField->getType();
+ return nullptr;
+ }
+ }
+
+ // Import the type.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return nullptr;
+
+ NamedDecl **NamedChain =
+ new (Importer.getToContext())NamedDecl*[D->getChainingSize()];
+
+ unsigned i = 0;
+ for (auto *PI : D->chain()) {
+ Decl *D = Importer.Import(PI);
+ if (!D)
+ return nullptr;
+ NamedChain[i++] = cast<NamedDecl>(D);
+ }
+
+ IndirectFieldDecl *ToIndirectField = IndirectFieldDecl::Create(
+ Importer.getToContext(), DC, Loc, Name.getAsIdentifierInfo(), T,
+ NamedChain, D->getChainingSize());
+
+ for (const auto *Attr : D->attrs())
+ ToIndirectField->addAttr(Attr->clone(Importer.getToContext()));
+
+ ToIndirectField->setAccess(D->getAccess());
+ ToIndirectField->setLexicalDeclContext(LexicalDC);
+ Importer.Imported(D, ToIndirectField);
+ LexicalDC->addDeclInternal(ToIndirectField);
+ return ToIndirectField;
+}
+
+Decl *ASTNodeImporter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
+ // Import the major distinguishing characteristics of an ivar.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ NamedDecl *ToD;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return nullptr;
+ if (ToD)
+ return ToD;
+
+ // Determine whether we've already imported this ivar
+ SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (ObjCIvarDecl *FoundIvar = dyn_cast<ObjCIvarDecl>(FoundDecls[I])) {
+ if (Importer.IsStructurallyEquivalent(D->getType(),
+ FoundIvar->getType())) {
+ Importer.Imported(D, FoundIvar);
+ return FoundIvar;
+ }
+
+ Importer.ToDiag(Loc, diag::err_odr_ivar_type_inconsistent)
+ << Name << D->getType() << FoundIvar->getType();
+ Importer.ToDiag(FoundIvar->getLocation(), diag::note_odr_value_here)
+ << FoundIvar->getType();
+ return nullptr;
+ }
+ }
+
+ // Import the type.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return nullptr;
+
+ TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
+ Expr *BitWidth = Importer.Import(D->getBitWidth());
+ if (!BitWidth && D->getBitWidth())
+ return nullptr;
+
+ ObjCIvarDecl *ToIvar = ObjCIvarDecl::Create(Importer.getToContext(),
+ cast<ObjCContainerDecl>(DC),
+ Importer.Import(D->getInnerLocStart()),
+ Loc, Name.getAsIdentifierInfo(),
+ T, TInfo, D->getAccessControl(),
+ BitWidth, D->getSynthesize());
+ ToIvar->setLexicalDeclContext(LexicalDC);
+ Importer.Imported(D, ToIvar);
+ LexicalDC->addDeclInternal(ToIvar);
+ return ToIvar;
+
+}
+
+Decl *ASTNodeImporter::VisitVarDecl(VarDecl *D) {
+ // Import the major distinguishing characteristics of a variable.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ NamedDecl *ToD;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return nullptr;
+ if (ToD)
+ return ToD;
+
+ // Try to find a variable in our own ("to") context with the same name and
+ // in the same context as the variable we're importing.
+ if (D->isFileVarDecl()) {
+ VarDecl *MergeWithVar = nullptr;
+ SmallVector<NamedDecl *, 4> ConflictingDecls;
+ unsigned IDNS = Decl::IDNS_Ordinary;
+ SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
+ continue;
+
+ if (VarDecl *FoundVar = dyn_cast<VarDecl>(FoundDecls[I])) {
+ // We have found a variable that we may need to merge with. Check it.
+ if (FoundVar->hasExternalFormalLinkage() &&
+ D->hasExternalFormalLinkage()) {
+ if (Importer.IsStructurallyEquivalent(D->getType(),
+ FoundVar->getType())) {
+ MergeWithVar = FoundVar;
+ break;
+ }
+
+ const ArrayType *FoundArray
+ = Importer.getToContext().getAsArrayType(FoundVar->getType());
+ const ArrayType *TArray
+ = Importer.getToContext().getAsArrayType(D->getType());
+ if (FoundArray && TArray) {
+ if (isa<IncompleteArrayType>(FoundArray) &&
+ isa<ConstantArrayType>(TArray)) {
+ // Import the type.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return nullptr;
+
+ FoundVar->setType(T);
+ MergeWithVar = FoundVar;
+ break;
+ } else if (isa<IncompleteArrayType>(TArray) &&
+ isa<ConstantArrayType>(FoundArray)) {
+ MergeWithVar = FoundVar;
+ break;
+ }
+ }
+
+ Importer.ToDiag(Loc, diag::err_odr_variable_type_inconsistent)
+ << Name << D->getType() << FoundVar->getType();
+ Importer.ToDiag(FoundVar->getLocation(), diag::note_odr_value_here)
+ << FoundVar->getType();
+ }
+ }
+
+ ConflictingDecls.push_back(FoundDecls[I]);
+ }
+
+ if (MergeWithVar) {
+ // An equivalent variable with external linkage has been found. Link
+ // the two declarations, then merge them.
+ Importer.Imported(D, MergeWithVar);
+
+ if (VarDecl *DDef = D->getDefinition()) {
+ if (VarDecl *ExistingDef = MergeWithVar->getDefinition()) {
+ Importer.ToDiag(ExistingDef->getLocation(),
+ diag::err_odr_variable_multiple_def)
+ << Name;
+ Importer.FromDiag(DDef->getLocation(), diag::note_odr_defined_here);
+ } else {
+ Expr *Init = Importer.Import(DDef->getInit());
+ MergeWithVar->setInit(Init);
+ if (DDef->isInitKnownICE()) {
+ EvaluatedStmt *Eval = MergeWithVar->ensureEvaluatedStmt();
+ Eval->CheckedICE = true;
+ Eval->IsICE = DDef->isInitICE();
+ }
+ }
+ }
+
+ return MergeWithVar;
+ }
+
+ if (!ConflictingDecls.empty()) {
+ Name = Importer.HandleNameConflict(Name, DC, IDNS,
+ ConflictingDecls.data(),
+ ConflictingDecls.size());
+ if (!Name)
+ return nullptr;
+ }
+ }
+
+ // Import the type.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return nullptr;
+
+ // Create the imported variable.
+ TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
+ VarDecl *ToVar = VarDecl::Create(Importer.getToContext(), DC,
+ Importer.Import(D->getInnerLocStart()),
+ Loc, Name.getAsIdentifierInfo(),
+ T, TInfo,
+ D->getStorageClass());
+ ToVar->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
+ ToVar->setAccess(D->getAccess());
+ ToVar->setLexicalDeclContext(LexicalDC);
+ Importer.Imported(D, ToVar);
+ LexicalDC->addDeclInternal(ToVar);
+
+ if (!D->isFileVarDecl() &&
+ D->isUsed())
+ ToVar->setIsUsed();
+
+ // Merge the initializer.
+ if (ImportDefinition(D, ToVar))
+ return nullptr;
+
+ return ToVar;
+}
+
+Decl *ASTNodeImporter::VisitImplicitParamDecl(ImplicitParamDecl *D) {
+ // Parameters are created in the translation unit's context, then moved
+ // into the function declaration's context afterward.
+ DeclContext *DC = Importer.getToContext().getTranslationUnitDecl();
+
+ // Import the name of this declaration.
+ DeclarationName Name = Importer.Import(D->getDeclName());
+ if (D->getDeclName() && !Name)
+ return nullptr;
+
+ // Import the location of this declaration.
+ SourceLocation Loc = Importer.Import(D->getLocation());
+
+ // Import the parameter's type.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return nullptr;
+
+ // Create the imported parameter.
+ ImplicitParamDecl *ToParm
+ = ImplicitParamDecl::Create(Importer.getToContext(), DC,
+ Loc, Name.getAsIdentifierInfo(),
+ T);
+ return Importer.Imported(D, ToParm);
+}
+
+Decl *ASTNodeImporter::VisitParmVarDecl(ParmVarDecl *D) {
+ // Parameters are created in the translation unit's context, then moved
+ // into the function declaration's context afterward.
+ DeclContext *DC = Importer.getToContext().getTranslationUnitDecl();
+
+ // Import the name of this declaration.
+ DeclarationName Name = Importer.Import(D->getDeclName());
+ if (D->getDeclName() && !Name)
+ return nullptr;
+
+ // Import the location of this declaration.
+ SourceLocation Loc = Importer.Import(D->getLocation());
+
+ // Import the parameter's type.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return nullptr;
+
+ // Create the imported parameter.
+ TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
+ ParmVarDecl *ToParm = ParmVarDecl::Create(Importer.getToContext(), DC,
+ Importer.Import(D->getInnerLocStart()),
+ Loc, Name.getAsIdentifierInfo(),
+ T, TInfo, D->getStorageClass(),
+ /*FIXME: Default argument*/nullptr);
+ ToParm->setHasInheritedDefaultArg(D->hasInheritedDefaultArg());
+
+ if (D->isUsed())
+ ToParm->setIsUsed();
+
+ return Importer.Imported(D, ToParm);
+}
+
+Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
+ // Import the major distinguishing characteristics of a method.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ NamedDecl *ToD;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return nullptr;
+ if (ToD)
+ return ToD;
+
+ SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (ObjCMethodDecl *FoundMethod = dyn_cast<ObjCMethodDecl>(FoundDecls[I])) {
+ if (FoundMethod->isInstanceMethod() != D->isInstanceMethod())
+ continue;
+
+ // Check return types.
+ if (!Importer.IsStructurallyEquivalent(D->getReturnType(),
+ FoundMethod->getReturnType())) {
+ Importer.ToDiag(Loc, diag::err_odr_objc_method_result_type_inconsistent)
+ << D->isInstanceMethod() << Name << D->getReturnType()
+ << FoundMethod->getReturnType();
+ Importer.ToDiag(FoundMethod->getLocation(),
+ diag::note_odr_objc_method_here)
+ << D->isInstanceMethod() << Name;
+ return nullptr;
+ }
+
+ // Check the number of parameters.
+ if (D->param_size() != FoundMethod->param_size()) {
+ Importer.ToDiag(Loc, diag::err_odr_objc_method_num_params_inconsistent)
+ << D->isInstanceMethod() << Name
+ << D->param_size() << FoundMethod->param_size();
+ Importer.ToDiag(FoundMethod->getLocation(),
+ diag::note_odr_objc_method_here)
+ << D->isInstanceMethod() << Name;
+ return nullptr;
+ }
+
+ // Check parameter types.
+ for (ObjCMethodDecl::param_iterator P = D->param_begin(),
+ PEnd = D->param_end(), FoundP = FoundMethod->param_begin();
+ P != PEnd; ++P, ++FoundP) {
+ if (!Importer.IsStructurallyEquivalent((*P)->getType(),
+ (*FoundP)->getType())) {
+ Importer.FromDiag((*P)->getLocation(),
+ diag::err_odr_objc_method_param_type_inconsistent)
+ << D->isInstanceMethod() << Name
+ << (*P)->getType() << (*FoundP)->getType();
+ Importer.ToDiag((*FoundP)->getLocation(), diag::note_odr_value_here)
+ << (*FoundP)->getType();
+ return nullptr;
+ }
+ }
+
+ // Check variadic/non-variadic.
+ // Check the number of parameters.
+ if (D->isVariadic() != FoundMethod->isVariadic()) {
+ Importer.ToDiag(Loc, diag::err_odr_objc_method_variadic_inconsistent)
+ << D->isInstanceMethod() << Name;
+ Importer.ToDiag(FoundMethod->getLocation(),
+ diag::note_odr_objc_method_here)
+ << D->isInstanceMethod() << Name;
+ return nullptr;
+ }
+
+ // FIXME: Any other bits we need to merge?
+ return Importer.Imported(D, FoundMethod);
+ }
+ }
+
+ // Import the result type.
+ QualType ResultTy = Importer.Import(D->getReturnType());
+ if (ResultTy.isNull())
+ return nullptr;
+
+ TypeSourceInfo *ReturnTInfo = Importer.Import(D->getReturnTypeSourceInfo());
+
+ ObjCMethodDecl *ToMethod = ObjCMethodDecl::Create(
+ Importer.getToContext(), Loc, Importer.Import(D->getLocEnd()),
+ Name.getObjCSelector(), ResultTy, ReturnTInfo, DC, D->isInstanceMethod(),
+ D->isVariadic(), D->isPropertyAccessor(), D->isImplicit(), D->isDefined(),
+ D->getImplementationControl(), D->hasRelatedResultType());
+
+ // FIXME: When we decide to merge method definitions, we'll need to
+ // deal with implicit parameters.
+
+ // Import the parameters
+ SmallVector<ParmVarDecl *, 5> ToParams;
+ for (auto *FromP : D->params()) {
+ ParmVarDecl *ToP = cast_or_null<ParmVarDecl>(Importer.Import(FromP));
+ if (!ToP)
+ return nullptr;
+
+ ToParams.push_back(ToP);
+ }
+
+ // Set the parameters.
+ for (unsigned I = 0, N = ToParams.size(); I != N; ++I) {
+ ToParams[I]->setOwningFunction(ToMethod);
+ ToMethod->addDeclInternal(ToParams[I]);
+ }
+ SmallVector<SourceLocation, 12> SelLocs;
+ D->getSelectorLocs(SelLocs);
+ ToMethod->setMethodParams(Importer.getToContext(), ToParams, SelLocs);
+
+ ToMethod->setLexicalDeclContext(LexicalDC);
+ Importer.Imported(D, ToMethod);
+ LexicalDC->addDeclInternal(ToMethod);
+ return ToMethod;
+}
+
+Decl *ASTNodeImporter::VisitObjCTypeParamDecl(ObjCTypeParamDecl *D) {
+ // Import the major distinguishing characteristics of a category.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ NamedDecl *ToD;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return nullptr;
+ if (ToD)
+ return ToD;
+
+ TypeSourceInfo *BoundInfo = Importer.Import(D->getTypeSourceInfo());
+ if (!BoundInfo)
+ return nullptr;
+
+ ObjCTypeParamDecl *Result = ObjCTypeParamDecl::Create(
+ Importer.getToContext(), DC,
+ D->getVariance(),
+ Importer.Import(D->getVarianceLoc()),
+ D->getIndex(),
+ Importer.Import(D->getLocation()),
+ Name.getAsIdentifierInfo(),
+ Importer.Import(D->getColonLoc()),
+ BoundInfo);
+ Importer.Imported(D, Result);
+ Result->setLexicalDeclContext(LexicalDC);
+ return Result;
+}
+
+Decl *ASTNodeImporter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) {
+ // Import the major distinguishing characteristics of a category.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ NamedDecl *ToD;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return nullptr;
+ if (ToD)
+ return ToD;
+
+ ObjCInterfaceDecl *ToInterface
+ = cast_or_null<ObjCInterfaceDecl>(Importer.Import(D->getClassInterface()));
+ if (!ToInterface)
+ return nullptr;
+
+ // Determine if we've already encountered this category.
+ ObjCCategoryDecl *MergeWithCategory
+ = ToInterface->FindCategoryDeclaration(Name.getAsIdentifierInfo());
+ ObjCCategoryDecl *ToCategory = MergeWithCategory;
+ if (!ToCategory) {
+ ToCategory = ObjCCategoryDecl::Create(Importer.getToContext(), DC,
+ Importer.Import(D->getAtStartLoc()),
+ Loc,
+ Importer.Import(D->getCategoryNameLoc()),
+ Name.getAsIdentifierInfo(),
+ ToInterface,
+ /*TypeParamList=*/nullptr,
+ Importer.Import(D->getIvarLBraceLoc()),
+ Importer.Import(D->getIvarRBraceLoc()));
+ ToCategory->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(ToCategory);
+ Importer.Imported(D, ToCategory);
+ // Import the type parameter list after calling Imported, to avoid
+ // loops when bringing in their DeclContext.
+ ToCategory->setTypeParamList(ImportObjCTypeParamList(
+ D->getTypeParamList()));
+
+ // Import protocols
+ SmallVector<ObjCProtocolDecl *, 4> Protocols;
+ SmallVector<SourceLocation, 4> ProtocolLocs;
+ ObjCCategoryDecl::protocol_loc_iterator FromProtoLoc
+ = D->protocol_loc_begin();
+ for (ObjCCategoryDecl::protocol_iterator FromProto = D->protocol_begin(),
+ FromProtoEnd = D->protocol_end();
+ FromProto != FromProtoEnd;
+ ++FromProto, ++FromProtoLoc) {
+ ObjCProtocolDecl *ToProto
+ = cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
+ if (!ToProto)
+ return nullptr;
+ Protocols.push_back(ToProto);
+ ProtocolLocs.push_back(Importer.Import(*FromProtoLoc));
+ }
+
+ // FIXME: If we're merging, make sure that the protocol list is the same.
+ ToCategory->setProtocolList(Protocols.data(), Protocols.size(),
+ ProtocolLocs.data(), Importer.getToContext());
+
+ } else {
+ Importer.Imported(D, ToCategory);
+ }
+
+ // Import all of the members of this category.
+ ImportDeclContext(D);
+
+ // If we have an implementation, import it as well.
+ if (D->getImplementation()) {
+ ObjCCategoryImplDecl *Impl
+ = cast_or_null<ObjCCategoryImplDecl>(
+ Importer.Import(D->getImplementation()));
+ if (!Impl)
+ return nullptr;
+
+ ToCategory->setImplementation(Impl);
+ }
+
+ return ToCategory;
+}
+
+bool ASTNodeImporter::ImportDefinition(ObjCProtocolDecl *From,
+ ObjCProtocolDecl *To,
+ ImportDefinitionKind Kind) {
+ if (To->getDefinition()) {
+ if (shouldForceImportDeclContext(Kind))
+ ImportDeclContext(From);
+ return false;
+ }
+
+ // Start the protocol definition
+ To->startDefinition();
+
+ // Import protocols
+ SmallVector<ObjCProtocolDecl *, 4> Protocols;
+ SmallVector<SourceLocation, 4> ProtocolLocs;
+ ObjCProtocolDecl::protocol_loc_iterator
+ FromProtoLoc = From->protocol_loc_begin();
+ for (ObjCProtocolDecl::protocol_iterator FromProto = From->protocol_begin(),
+ FromProtoEnd = From->protocol_end();
+ FromProto != FromProtoEnd;
+ ++FromProto, ++FromProtoLoc) {
+ ObjCProtocolDecl *ToProto
+ = cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
+ if (!ToProto)
+ return true;
+ Protocols.push_back(ToProto);
+ ProtocolLocs.push_back(Importer.Import(*FromProtoLoc));
+ }
+
+ // FIXME: If we're merging, make sure that the protocol list is the same.
+ To->setProtocolList(Protocols.data(), Protocols.size(),
+ ProtocolLocs.data(), Importer.getToContext());
+
+ if (shouldForceImportDeclContext(Kind)) {
+ // Import all of the members of this protocol.
+ ImportDeclContext(From, /*ForceImport=*/true);
+ }
+ return false;
+}
+
+Decl *ASTNodeImporter::VisitObjCProtocolDecl(ObjCProtocolDecl *D) {
+ // If this protocol has a definition in the translation unit we're coming
+ // from, but this particular declaration is not that definition, import the
+ // definition and map to that.
+ ObjCProtocolDecl *Definition = D->getDefinition();
+ if (Definition && Definition != D) {
+ Decl *ImportedDef = Importer.Import(Definition);
+ if (!ImportedDef)
+ return nullptr;
+
+ return Importer.Imported(D, ImportedDef);
+ }
+
+ // Import the major distinguishing characteristics of a protocol.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ NamedDecl *ToD;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return nullptr;
+ if (ToD)
+ return ToD;
+
+ ObjCProtocolDecl *MergeWithProtocol = nullptr;
+ SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_ObjCProtocol))
+ continue;
+
+ if ((MergeWithProtocol = dyn_cast<ObjCProtocolDecl>(FoundDecls[I])))
+ break;
+ }
+
+ ObjCProtocolDecl *ToProto = MergeWithProtocol;
+ if (!ToProto) {
+ ToProto = ObjCProtocolDecl::Create(Importer.getToContext(), DC,
+ Name.getAsIdentifierInfo(), Loc,
+ Importer.Import(D->getAtStartLoc()),
+ /*PrevDecl=*/nullptr);
+ ToProto->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(ToProto);
+ }
+
+ Importer.Imported(D, ToProto);
+
+ if (D->isThisDeclarationADefinition() && ImportDefinition(D, ToProto))
+ return nullptr;
+
+ return ToProto;
+}
+
+Decl *ASTNodeImporter::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
+ DeclContext *DC = Importer.ImportContext(D->getDeclContext());
+ DeclContext *LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
+
+ SourceLocation ExternLoc = Importer.Import(D->getExternLoc());
+ SourceLocation LangLoc = Importer.Import(D->getLocation());
+
+ bool HasBraces = D->hasBraces();
+
+ LinkageSpecDecl *ToLinkageSpec =
+ LinkageSpecDecl::Create(Importer.getToContext(),
+ DC,
+ ExternLoc,
+ LangLoc,
+ D->getLanguage(),
+ HasBraces);
+
+ if (HasBraces) {
+ SourceLocation RBraceLoc = Importer.Import(D->getRBraceLoc());
+ ToLinkageSpec->setRBraceLoc(RBraceLoc);
+ }
+
+ ToLinkageSpec->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(ToLinkageSpec);
+
+ Importer.Imported(D, ToLinkageSpec);
+
+ return ToLinkageSpec;
+}
+
+bool ASTNodeImporter::ImportDefinition(ObjCInterfaceDecl *From,
+ ObjCInterfaceDecl *To,
+ ImportDefinitionKind Kind) {
+ if (To->getDefinition()) {
+ // Check consistency of superclass.
+ ObjCInterfaceDecl *FromSuper = From->getSuperClass();
+ if (FromSuper) {
+ FromSuper = cast_or_null<ObjCInterfaceDecl>(Importer.Import(FromSuper));
+ if (!FromSuper)
+ return true;
+ }
+
+ ObjCInterfaceDecl *ToSuper = To->getSuperClass();
+ if ((bool)FromSuper != (bool)ToSuper ||
+ (FromSuper && !declaresSameEntity(FromSuper, ToSuper))) {
+ Importer.ToDiag(To->getLocation(),
+ diag::err_odr_objc_superclass_inconsistent)
+ << To->getDeclName();
+ if (ToSuper)
+ Importer.ToDiag(To->getSuperClassLoc(), diag::note_odr_objc_superclass)
+ << To->getSuperClass()->getDeclName();
+ else
+ Importer.ToDiag(To->getLocation(),
+ diag::note_odr_objc_missing_superclass);
+ if (From->getSuperClass())
+ Importer.FromDiag(From->getSuperClassLoc(),
+ diag::note_odr_objc_superclass)
+ << From->getSuperClass()->getDeclName();
+ else
+ Importer.FromDiag(From->getLocation(),
+ diag::note_odr_objc_missing_superclass);
+ }
+
+ if (shouldForceImportDeclContext(Kind))
+ ImportDeclContext(From);
+ return false;
+ }
+
+ // Start the definition.
+ To->startDefinition();
+
+ // If this class has a superclass, import it.
+ if (From->getSuperClass()) {
+ TypeSourceInfo *SuperTInfo = Importer.Import(From->getSuperClassTInfo());
+ if (!SuperTInfo)
+ return true;
+
+ To->setSuperClass(SuperTInfo);
+ }
+
+ // Import protocols
+ SmallVector<ObjCProtocolDecl *, 4> Protocols;
+ SmallVector<SourceLocation, 4> ProtocolLocs;
+ ObjCInterfaceDecl::protocol_loc_iterator
+ FromProtoLoc = From->protocol_loc_begin();
+
+ for (ObjCInterfaceDecl::protocol_iterator FromProto = From->protocol_begin(),
+ FromProtoEnd = From->protocol_end();
+ FromProto != FromProtoEnd;
+ ++FromProto, ++FromProtoLoc) {
+ ObjCProtocolDecl *ToProto
+ = cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
+ if (!ToProto)
+ return true;
+ Protocols.push_back(ToProto);
+ ProtocolLocs.push_back(Importer.Import(*FromProtoLoc));
+ }
+
+ // FIXME: If we're merging, make sure that the protocol list is the same.
+ To->setProtocolList(Protocols.data(), Protocols.size(),
+ ProtocolLocs.data(), Importer.getToContext());
+
+ // Import categories. When the categories themselves are imported, they'll
+ // hook themselves into this interface.
+ for (auto *Cat : From->known_categories())
+ Importer.Import(Cat);
+
+ // If we have an @implementation, import it as well.
+ if (From->getImplementation()) {
+ ObjCImplementationDecl *Impl = cast_or_null<ObjCImplementationDecl>(
+ Importer.Import(From->getImplementation()));
+ if (!Impl)
+ return true;
+
+ To->setImplementation(Impl);
+ }
+
+ if (shouldForceImportDeclContext(Kind)) {
+ // Import all of the members of this class.
+ ImportDeclContext(From, /*ForceImport=*/true);
+ }
+ return false;
+}
+
+ObjCTypeParamList *
+ASTNodeImporter::ImportObjCTypeParamList(ObjCTypeParamList *list) {
+ if (!list)
+ return nullptr;
+
+ SmallVector<ObjCTypeParamDecl *, 4> toTypeParams;
+ for (auto fromTypeParam : *list) {
+ auto toTypeParam = cast_or_null<ObjCTypeParamDecl>(
+ Importer.Import(fromTypeParam));
+ if (!toTypeParam)
+ return nullptr;
+
+ toTypeParams.push_back(toTypeParam);
+ }
+
+ return ObjCTypeParamList::create(Importer.getToContext(),
+ Importer.Import(list->getLAngleLoc()),
+ toTypeParams,
+ Importer.Import(list->getRAngleLoc()));
+}
+
+Decl *ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
+ // If this class has a definition in the translation unit we're coming from,
+ // but this particular declaration is not that definition, import the
+ // definition and map to that.
+ ObjCInterfaceDecl *Definition = D->getDefinition();
+ if (Definition && Definition != D) {
+ Decl *ImportedDef = Importer.Import(Definition);
+ if (!ImportedDef)
+ return nullptr;
+
+ return Importer.Imported(D, ImportedDef);
+ }
+
+ // Import the major distinguishing characteristics of an @interface.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ NamedDecl *ToD;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return nullptr;
+ if (ToD)
+ return ToD;
+
+ // Look for an existing interface with the same name.
+ ObjCInterfaceDecl *MergeWithIface = nullptr;
+ SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_Ordinary))
+ continue;
+
+ if ((MergeWithIface = dyn_cast<ObjCInterfaceDecl>(FoundDecls[I])))
+ break;
+ }
+
+ // Create an interface declaration, if one does not already exist.
+ ObjCInterfaceDecl *ToIface = MergeWithIface;
+ if (!ToIface) {
+ ToIface = ObjCInterfaceDecl::Create(Importer.getToContext(), DC,
+ Importer.Import(D->getAtStartLoc()),
+ Name.getAsIdentifierInfo(),
+ /*TypeParamList=*/nullptr,
+ /*PrevDecl=*/nullptr, Loc,
+ D->isImplicitInterfaceDecl());
+ ToIface->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(ToIface);
+ }
+ Importer.Imported(D, ToIface);
+ // Import the type parameter list after calling Imported, to avoid
+ // loops when bringing in their DeclContext.
+ ToIface->setTypeParamList(ImportObjCTypeParamList(
+ D->getTypeParamListAsWritten()));
+
+ if (D->isThisDeclarationADefinition() && ImportDefinition(D, ToIface))
+ return nullptr;
+
+ return ToIface;
+}
+
+Decl *ASTNodeImporter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D) {
+ ObjCCategoryDecl *Category = cast_or_null<ObjCCategoryDecl>(
+ Importer.Import(D->getCategoryDecl()));
+ if (!Category)
+ return nullptr;
+
+ ObjCCategoryImplDecl *ToImpl = Category->getImplementation();
+ if (!ToImpl) {
+ DeclContext *DC = Importer.ImportContext(D->getDeclContext());
+ if (!DC)
+ return nullptr;
+
+ SourceLocation CategoryNameLoc = Importer.Import(D->getCategoryNameLoc());
+ ToImpl = ObjCCategoryImplDecl::Create(Importer.getToContext(), DC,
+ Importer.Import(D->getIdentifier()),
+ Category->getClassInterface(),
+ Importer.Import(D->getLocation()),
+ Importer.Import(D->getAtStartLoc()),
+ CategoryNameLoc);
+
+ DeclContext *LexicalDC = DC;
+ if (D->getDeclContext() != D->getLexicalDeclContext()) {
+ LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
+ if (!LexicalDC)
+ return nullptr;
+
+ ToImpl->setLexicalDeclContext(LexicalDC);
+ }
+
+ LexicalDC->addDeclInternal(ToImpl);
+ Category->setImplementation(ToImpl);
+ }
+
+ Importer.Imported(D, ToImpl);
+ ImportDeclContext(D);
+ return ToImpl;
+}
+
+Decl *ASTNodeImporter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
+ // Find the corresponding interface.
+ ObjCInterfaceDecl *Iface = cast_or_null<ObjCInterfaceDecl>(
+ Importer.Import(D->getClassInterface()));
+ if (!Iface)
+ return nullptr;
+
+ // Import the superclass, if any.
+ ObjCInterfaceDecl *Super = nullptr;
+ if (D->getSuperClass()) {
+ Super = cast_or_null<ObjCInterfaceDecl>(
+ Importer.Import(D->getSuperClass()));
+ if (!Super)
+ return nullptr;
+ }
+
+ ObjCImplementationDecl *Impl = Iface->getImplementation();
+ if (!Impl) {
+ // We haven't imported an implementation yet. Create a new @implementation
+ // now.
+ Impl = ObjCImplementationDecl::Create(Importer.getToContext(),
+ Importer.ImportContext(D->getDeclContext()),
+ Iface, Super,
+ Importer.Import(D->getLocation()),
+ Importer.Import(D->getAtStartLoc()),
+ Importer.Import(D->getSuperClassLoc()),
+ Importer.Import(D->getIvarLBraceLoc()),
+ Importer.Import(D->getIvarRBraceLoc()));
+
+ if (D->getDeclContext() != D->getLexicalDeclContext()) {
+ DeclContext *LexicalDC
+ = Importer.ImportContext(D->getLexicalDeclContext());
+ if (!LexicalDC)
+ return nullptr;
+ Impl->setLexicalDeclContext(LexicalDC);
+ }
+
+ // Associate the implementation with the class it implements.
+ Iface->setImplementation(Impl);
+ Importer.Imported(D, Iface->getImplementation());
+ } else {
+ Importer.Imported(D, Iface->getImplementation());
+
+ // Verify that the existing @implementation has the same superclass.
+ if ((Super && !Impl->getSuperClass()) ||
+ (!Super && Impl->getSuperClass()) ||
+ (Super && Impl->getSuperClass() &&
+ !declaresSameEntity(Super->getCanonicalDecl(),
+ Impl->getSuperClass()))) {
+ Importer.ToDiag(Impl->getLocation(),
+ diag::err_odr_objc_superclass_inconsistent)
+ << Iface->getDeclName();
+ // FIXME: It would be nice to have the location of the superclass
+ // below.
+ if (Impl->getSuperClass())
+ Importer.ToDiag(Impl->getLocation(),
+ diag::note_odr_objc_superclass)
+ << Impl->getSuperClass()->getDeclName();
+ else
+ Importer.ToDiag(Impl->getLocation(),
+ diag::note_odr_objc_missing_superclass);
+ if (D->getSuperClass())
+ Importer.FromDiag(D->getLocation(),
+ diag::note_odr_objc_superclass)
+ << D->getSuperClass()->getDeclName();
+ else
+ Importer.FromDiag(D->getLocation(),
+ diag::note_odr_objc_missing_superclass);
+ return nullptr;
+ }
+ }
+
+ // Import all of the members of this @implementation.
+ ImportDeclContext(D);
+
+ return Impl;
+}
+
+Decl *ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
+ // Import the major distinguishing characteristics of an @property.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ NamedDecl *ToD;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return nullptr;
+ if (ToD)
+ return ToD;
+
+ // Check whether we have already imported this property.
+ SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (ObjCPropertyDecl *FoundProp
+ = dyn_cast<ObjCPropertyDecl>(FoundDecls[I])) {
+ // Check property types.
+ if (!Importer.IsStructurallyEquivalent(D->getType(),
+ FoundProp->getType())) {
+ Importer.ToDiag(Loc, diag::err_odr_objc_property_type_inconsistent)
+ << Name << D->getType() << FoundProp->getType();
+ Importer.ToDiag(FoundProp->getLocation(), diag::note_odr_value_here)
+ << FoundProp->getType();
+ return nullptr;
+ }
+
+ // FIXME: Check property attributes, getters, setters, etc.?
+
+ // Consider these properties to be equivalent.
+ Importer.Imported(D, FoundProp);
+ return FoundProp;
+ }
+ }
+
+ // Import the type.
+ TypeSourceInfo *TSI = Importer.Import(D->getTypeSourceInfo());
+ if (!TSI)
+ return nullptr;
+
+ // Create the new property.
+ ObjCPropertyDecl *ToProperty
+ = ObjCPropertyDecl::Create(Importer.getToContext(), DC, Loc,
+ Name.getAsIdentifierInfo(),
+ Importer.Import(D->getAtLoc()),
+ Importer.Import(D->getLParenLoc()),
+ Importer.Import(D->getType()),
+ TSI,
+ D->getPropertyImplementation());
+ Importer.Imported(D, ToProperty);
+ ToProperty->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(ToProperty);
+
+ ToProperty->setPropertyAttributes(D->getPropertyAttributes());
+ ToProperty->setPropertyAttributesAsWritten(
+ D->getPropertyAttributesAsWritten());
+ ToProperty->setGetterName(Importer.Import(D->getGetterName()));
+ ToProperty->setSetterName(Importer.Import(D->getSetterName()));
+ ToProperty->setGetterMethodDecl(
+ cast_or_null<ObjCMethodDecl>(Importer.Import(D->getGetterMethodDecl())));
+ ToProperty->setSetterMethodDecl(
+ cast_or_null<ObjCMethodDecl>(Importer.Import(D->getSetterMethodDecl())));
+ ToProperty->setPropertyIvarDecl(
+ cast_or_null<ObjCIvarDecl>(Importer.Import(D->getPropertyIvarDecl())));
+ return ToProperty;
+}
+
+Decl *ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
+ ObjCPropertyDecl *Property = cast_or_null<ObjCPropertyDecl>(
+ Importer.Import(D->getPropertyDecl()));
+ if (!Property)
+ return nullptr;
+
+ DeclContext *DC = Importer.ImportContext(D->getDeclContext());
+ if (!DC)
+ return nullptr;
+
+ // Import the lexical declaration context.
+ DeclContext *LexicalDC = DC;
+ if (D->getDeclContext() != D->getLexicalDeclContext()) {
+ LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
+ if (!LexicalDC)
+ return nullptr;
+ }
+
+ ObjCImplDecl *InImpl = dyn_cast<ObjCImplDecl>(LexicalDC);
+ if (!InImpl)
+ return nullptr;
+
+ // Import the ivar (for an @synthesize).
+ ObjCIvarDecl *Ivar = nullptr;
+ if (D->getPropertyIvarDecl()) {
+ Ivar = cast_or_null<ObjCIvarDecl>(
+ Importer.Import(D->getPropertyIvarDecl()));
+ if (!Ivar)
+ return nullptr;
+ }
+
+ ObjCPropertyImplDecl *ToImpl
+ = InImpl->FindPropertyImplDecl(Property->getIdentifier());
+ if (!ToImpl) {
+ ToImpl = ObjCPropertyImplDecl::Create(Importer.getToContext(), DC,
+ Importer.Import(D->getLocStart()),
+ Importer.Import(D->getLocation()),
+ Property,
+ D->getPropertyImplementation(),
+ Ivar,
+ Importer.Import(D->getPropertyIvarDeclLoc()));
+ ToImpl->setLexicalDeclContext(LexicalDC);
+ Importer.Imported(D, ToImpl);
+ LexicalDC->addDeclInternal(ToImpl);
+ } else {
+ // Check that we have the same kind of property implementation (@synthesize
+ // vs. @dynamic).
+ if (D->getPropertyImplementation() != ToImpl->getPropertyImplementation()) {
+ Importer.ToDiag(ToImpl->getLocation(),
+ diag::err_odr_objc_property_impl_kind_inconsistent)
+ << Property->getDeclName()
+ << (ToImpl->getPropertyImplementation()
+ == ObjCPropertyImplDecl::Dynamic);
+ Importer.FromDiag(D->getLocation(),
+ diag::note_odr_objc_property_impl_kind)
+ << D->getPropertyDecl()->getDeclName()
+ << (D->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic);
+ return nullptr;
+ }
+
+ // For @synthesize, check that we have the same
+ if (D->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize &&
+ Ivar != ToImpl->getPropertyIvarDecl()) {
+ Importer.ToDiag(ToImpl->getPropertyIvarDeclLoc(),
+ diag::err_odr_objc_synthesize_ivar_inconsistent)
+ << Property->getDeclName()
+ << ToImpl->getPropertyIvarDecl()->getDeclName()
+ << Ivar->getDeclName();
+ Importer.FromDiag(D->getPropertyIvarDeclLoc(),
+ diag::note_odr_objc_synthesize_ivar_here)
+ << D->getPropertyIvarDecl()->getDeclName();
+ return nullptr;
+ }
+
+ // Merge the existing implementation with the new implementation.
+ Importer.Imported(D, ToImpl);
+ }
+
+ return ToImpl;
+}
+
+Decl *ASTNodeImporter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
+ // For template arguments, we adopt the translation unit as our declaration
+ // context. This context will be fixed when the actual template declaration
+ // is created.
+
+ // FIXME: Import default argument.
+ return TemplateTypeParmDecl::Create(Importer.getToContext(),
+ Importer.getToContext().getTranslationUnitDecl(),
+ Importer.Import(D->getLocStart()),
+ Importer.Import(D->getLocation()),
+ D->getDepth(),
+ D->getIndex(),
+ Importer.Import(D->getIdentifier()),
+ D->wasDeclaredWithTypename(),
+ D->isParameterPack());
+}
+
+Decl *
+ASTNodeImporter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
+ // Import the name of this declaration.
+ DeclarationName Name = Importer.Import(D->getDeclName());
+ if (D->getDeclName() && !Name)
+ return nullptr;
+
+ // Import the location of this declaration.
+ SourceLocation Loc = Importer.Import(D->getLocation());
+
+ // Import the type of this declaration.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return nullptr;
+
+ // Import type-source information.
+ TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
+ if (D->getTypeSourceInfo() && !TInfo)
+ return nullptr;
+
+ // FIXME: Import default argument.
+
+ return NonTypeTemplateParmDecl::Create(Importer.getToContext(),
+ Importer.getToContext().getTranslationUnitDecl(),
+ Importer.Import(D->getInnerLocStart()),
+ Loc, D->getDepth(), D->getPosition(),
+ Name.getAsIdentifierInfo(),
+ T, D->isParameterPack(), TInfo);
+}
+
+Decl *
+ASTNodeImporter::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
+ // Import the name of this declaration.
+ DeclarationName Name = Importer.Import(D->getDeclName());
+ if (D->getDeclName() && !Name)
+ return nullptr;
+
+ // Import the location of this declaration.
+ SourceLocation Loc = Importer.Import(D->getLocation());
+
+ // Import template parameters.
+ TemplateParameterList *TemplateParams
+ = ImportTemplateParameterList(D->getTemplateParameters());
+ if (!TemplateParams)
+ return nullptr;
+
+ // FIXME: Import default argument.
+
+ return TemplateTemplateParmDecl::Create(Importer.getToContext(),
+ Importer.getToContext().getTranslationUnitDecl(),
+ Loc, D->getDepth(), D->getPosition(),
+ D->isParameterPack(),
+ Name.getAsIdentifierInfo(),
+ TemplateParams);
+}
+
+Decl *ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
+ // If this record has a definition in the translation unit we're coming from,
+ // but this particular declaration is not that definition, import the
+ // definition and map to that.
+ CXXRecordDecl *Definition
+ = cast_or_null<CXXRecordDecl>(D->getTemplatedDecl()->getDefinition());
+ if (Definition && Definition != D->getTemplatedDecl()) {
+ Decl *ImportedDef
+ = Importer.Import(Definition->getDescribedClassTemplate());
+ if (!ImportedDef)
+ return nullptr;
+
+ return Importer.Imported(D, ImportedDef);
+ }
+
+ // Import the major distinguishing characteristics of this class template.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ NamedDecl *ToD;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return nullptr;
+ if (ToD)
+ return ToD;
+
+ // We may already have a template of the same name; try to find and match it.
+ if (!DC->isFunctionOrMethod()) {
+ SmallVector<NamedDecl *, 4> ConflictingDecls;
+ SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_Ordinary))
+ continue;
+
+ Decl *Found = FoundDecls[I];
+ if (ClassTemplateDecl *FoundTemplate
+ = dyn_cast<ClassTemplateDecl>(Found)) {
+ if (IsStructuralMatch(D, FoundTemplate)) {
+ // The class templates structurally match; call it the same template.
+ // FIXME: We may be filling in a forward declaration here. Handle
+ // this case!
+ Importer.Imported(D->getTemplatedDecl(),
+ FoundTemplate->getTemplatedDecl());
+ return Importer.Imported(D, FoundTemplate);
+ }
+ }
+
+ ConflictingDecls.push_back(FoundDecls[I]);
+ }
+
+ if (!ConflictingDecls.empty()) {
+ Name = Importer.HandleNameConflict(Name, DC, Decl::IDNS_Ordinary,
+ ConflictingDecls.data(),
+ ConflictingDecls.size());
+ }
+
+ if (!Name)
+ return nullptr;
+ }
+
+ CXXRecordDecl *DTemplated = D->getTemplatedDecl();
+
+ // Create the declaration that is being templated.
+ SourceLocation StartLoc = Importer.Import(DTemplated->getLocStart());
+ SourceLocation IdLoc = Importer.Import(DTemplated->getLocation());
+ CXXRecordDecl *D2Templated = CXXRecordDecl::Create(Importer.getToContext(),
+ DTemplated->getTagKind(),
+ DC, StartLoc, IdLoc,
+ Name.getAsIdentifierInfo());
+ D2Templated->setAccess(DTemplated->getAccess());
+ D2Templated->setQualifierInfo(Importer.Import(DTemplated->getQualifierLoc()));
+ D2Templated->setLexicalDeclContext(LexicalDC);
+
+ // Create the class template declaration itself.
+ TemplateParameterList *TemplateParams
+ = ImportTemplateParameterList(D->getTemplateParameters());
+ if (!TemplateParams)
+ return nullptr;
+
+ ClassTemplateDecl *D2 = ClassTemplateDecl::Create(Importer.getToContext(), DC,
+ Loc, Name, TemplateParams,
+ D2Templated,
+ /*PrevDecl=*/nullptr);
+ D2Templated->setDescribedClassTemplate(D2);
+
+ D2->setAccess(D->getAccess());
+ D2->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(D2);
+
+ // Note the relationship between the class templates.
+ Importer.Imported(D, D2);
+ Importer.Imported(DTemplated, D2Templated);
+
+ if (DTemplated->isCompleteDefinition() &&
+ !D2Templated->isCompleteDefinition()) {
+ // FIXME: Import definition!
+ }
+
+ return D2;
+}
+
+Decl *ASTNodeImporter::VisitClassTemplateSpecializationDecl(
+ ClassTemplateSpecializationDecl *D) {
+ // If this record has a definition in the translation unit we're coming from,
+ // but this particular declaration is not that definition, import the
+ // definition and map to that.
+ TagDecl *Definition = D->getDefinition();
+ if (Definition && Definition != D) {
+ Decl *ImportedDef = Importer.Import(Definition);
+ if (!ImportedDef)
+ return nullptr;
+
+ return Importer.Imported(D, ImportedDef);
+ }
+
+ ClassTemplateDecl *ClassTemplate
+ = cast_or_null<ClassTemplateDecl>(Importer.Import(
+ D->getSpecializedTemplate()));
+ if (!ClassTemplate)
+ return nullptr;
+
+ // Import the context of this declaration.
+ DeclContext *DC = ClassTemplate->getDeclContext();
+ if (!DC)
+ return nullptr;
+
+ DeclContext *LexicalDC = DC;
+ if (D->getDeclContext() != D->getLexicalDeclContext()) {
+ LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
+ if (!LexicalDC)
+ return nullptr;
+ }
+
+ // Import the location of this declaration.
+ SourceLocation StartLoc = Importer.Import(D->getLocStart());
+ SourceLocation IdLoc = Importer.Import(D->getLocation());
+
+ // Import template arguments.
+ SmallVector<TemplateArgument, 2> TemplateArgs;
+ if (ImportTemplateArguments(D->getTemplateArgs().data(),
+ D->getTemplateArgs().size(),
+ TemplateArgs))
+ return nullptr;
+
+ // Try to find an existing specialization with these template arguments.
+ void *InsertPos = nullptr;
+ ClassTemplateSpecializationDecl *D2
+ = ClassTemplate->findSpecialization(TemplateArgs, InsertPos);
+ if (D2) {
+ // We already have a class template specialization with these template
+ // arguments.
+
+ // FIXME: Check for specialization vs. instantiation errors.
+
+ if (RecordDecl *FoundDef = D2->getDefinition()) {
+ if (!D->isCompleteDefinition() || IsStructuralMatch(D, FoundDef)) {
+ // The record types structurally match, or the "from" translation
+ // unit only had a forward declaration anyway; call it the same
+ // function.
+ return Importer.Imported(D, FoundDef);
+ }
+ }
+ } else {
+ // Create a new specialization.
+ D2 = ClassTemplateSpecializationDecl::Create(Importer.getToContext(),
+ D->getTagKind(), DC,
+ StartLoc, IdLoc,
+ ClassTemplate,
+ TemplateArgs.data(),
+ TemplateArgs.size(),
+ /*PrevDecl=*/nullptr);
+ D2->setSpecializationKind(D->getSpecializationKind());
+
+ // Add this specialization to the class template.
+ ClassTemplate->AddSpecialization(D2, InsertPos);
+
+ // Import the qualifier, if any.
+ D2->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
+
+ // Add the specialization to this context.
+ D2->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(D2);
+ }
+ Importer.Imported(D, D2);
+
+ if (D->isCompleteDefinition() && ImportDefinition(D, D2))
+ return nullptr;
+
+ return D2;
+}
+
+Decl *ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
+ // If this variable has a definition in the translation unit we're coming
+ // from,
+ // but this particular declaration is not that definition, import the
+ // definition and map to that.
+ VarDecl *Definition =
+ cast_or_null<VarDecl>(D->getTemplatedDecl()->getDefinition());
+ if (Definition && Definition != D->getTemplatedDecl()) {
+ Decl *ImportedDef = Importer.Import(Definition->getDescribedVarTemplate());
+ if (!ImportedDef)
+ return nullptr;
+
+ return Importer.Imported(D, ImportedDef);
+ }
+
+ // Import the major distinguishing characteristics of this variable template.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ NamedDecl *ToD;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return nullptr;
+ if (ToD)
+ return ToD;
+
+ // We may already have a template of the same name; try to find and match it.
+ assert(!DC->isFunctionOrMethod() &&
+ "Variable templates cannot be declared at function scope");
+ SmallVector<NamedDecl *, 4> ConflictingDecls;
+ SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_Ordinary))
+ continue;
+
+ Decl *Found = FoundDecls[I];
+ if (VarTemplateDecl *FoundTemplate = dyn_cast<VarTemplateDecl>(Found)) {
+ if (IsStructuralMatch(D, FoundTemplate)) {
+ // The variable templates structurally match; call it the same template.
+ Importer.Imported(D->getTemplatedDecl(),
+ FoundTemplate->getTemplatedDecl());
+ return Importer.Imported(D, FoundTemplate);
+ }
+ }
+
+ ConflictingDecls.push_back(FoundDecls[I]);
+ }
+
+ if (!ConflictingDecls.empty()) {
+ Name = Importer.HandleNameConflict(Name, DC, Decl::IDNS_Ordinary,
+ ConflictingDecls.data(),
+ ConflictingDecls.size());
+ }
+
+ if (!Name)
+ return nullptr;
+
+ VarDecl *DTemplated = D->getTemplatedDecl();
+
+ // Import the type.
+ QualType T = Importer.Import(DTemplated->getType());
+ if (T.isNull())
+ return nullptr;
+
+ // Create the declaration that is being templated.
+ SourceLocation StartLoc = Importer.Import(DTemplated->getLocStart());
+ SourceLocation IdLoc = Importer.Import(DTemplated->getLocation());
+ TypeSourceInfo *TInfo = Importer.Import(DTemplated->getTypeSourceInfo());
+ VarDecl *D2Templated = VarDecl::Create(Importer.getToContext(), DC, StartLoc,
+ IdLoc, Name.getAsIdentifierInfo(), T,
+ TInfo, DTemplated->getStorageClass());
+ D2Templated->setAccess(DTemplated->getAccess());
+ D2Templated->setQualifierInfo(Importer.Import(DTemplated->getQualifierLoc()));
+ D2Templated->setLexicalDeclContext(LexicalDC);
+
+ // Importer.Imported(DTemplated, D2Templated);
+ // LexicalDC->addDeclInternal(D2Templated);
+
+ // Merge the initializer.
+ if (ImportDefinition(DTemplated, D2Templated))
+ return nullptr;
+
+ // Create the variable template declaration itself.
+ TemplateParameterList *TemplateParams =
+ ImportTemplateParameterList(D->getTemplateParameters());
+ if (!TemplateParams)
+ return nullptr;
+
+ VarTemplateDecl *D2 = VarTemplateDecl::Create(
+ Importer.getToContext(), DC, Loc, Name, TemplateParams, D2Templated);
+ D2Templated->setDescribedVarTemplate(D2);
+
+ D2->setAccess(D->getAccess());
+ D2->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(D2);
+
+ // Note the relationship between the variable templates.
+ Importer.Imported(D, D2);
+ Importer.Imported(DTemplated, D2Templated);
+
+ if (DTemplated->isThisDeclarationADefinition() &&
+ !D2Templated->isThisDeclarationADefinition()) {
+ // FIXME: Import definition!
+ }
+
+ return D2;
+}
+
+Decl *ASTNodeImporter::VisitVarTemplateSpecializationDecl(
+ VarTemplateSpecializationDecl *D) {
+ // If this record has a definition in the translation unit we're coming from,
+ // but this particular declaration is not that definition, import the
+ // definition and map to that.
+ VarDecl *Definition = D->getDefinition();
+ if (Definition && Definition != D) {
+ Decl *ImportedDef = Importer.Import(Definition);
+ if (!ImportedDef)
+ return nullptr;
+
+ return Importer.Imported(D, ImportedDef);
+ }
+
+ VarTemplateDecl *VarTemplate = cast_or_null<VarTemplateDecl>(
+ Importer.Import(D->getSpecializedTemplate()));
+ if (!VarTemplate)
+ return nullptr;
+
+ // Import the context of this declaration.
+ DeclContext *DC = VarTemplate->getDeclContext();
+ if (!DC)
+ return nullptr;
+
+ DeclContext *LexicalDC = DC;
+ if (D->getDeclContext() != D->getLexicalDeclContext()) {
+ LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
+ if (!LexicalDC)
+ return nullptr;
+ }
+
+ // Import the location of this declaration.
+ SourceLocation StartLoc = Importer.Import(D->getLocStart());
+ SourceLocation IdLoc = Importer.Import(D->getLocation());
+
+ // Import template arguments.
+ SmallVector<TemplateArgument, 2> TemplateArgs;
+ if (ImportTemplateArguments(D->getTemplateArgs().data(),
+ D->getTemplateArgs().size(), TemplateArgs))
+ return nullptr;
+
+ // Try to find an existing specialization with these template arguments.
+ void *InsertPos = nullptr;
+ VarTemplateSpecializationDecl *D2 = VarTemplate->findSpecialization(
+ TemplateArgs, InsertPos);
+ if (D2) {
+ // We already have a variable template specialization with these template
+ // arguments.
+
+ // FIXME: Check for specialization vs. instantiation errors.
+
+ if (VarDecl *FoundDef = D2->getDefinition()) {
+ if (!D->isThisDeclarationADefinition() ||
+ IsStructuralMatch(D, FoundDef)) {
+ // The record types structurally match, or the "from" translation
+ // unit only had a forward declaration anyway; call it the same
+ // variable.
+ return Importer.Imported(D, FoundDef);
+ }
+ }
+ } else {
+
+ // Import the type.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return nullptr;
+ TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
+
+ // Create a new specialization.
+ D2 = VarTemplateSpecializationDecl::Create(
+ Importer.getToContext(), DC, StartLoc, IdLoc, VarTemplate, T, TInfo,
+ D->getStorageClass(), TemplateArgs.data(), TemplateArgs.size());
+ D2->setSpecializationKind(D->getSpecializationKind());
+ D2->setTemplateArgsInfo(D->getTemplateArgsInfo());
+
+ // Add this specialization to the class template.
+ VarTemplate->AddSpecialization(D2, InsertPos);
+
+ // Import the qualifier, if any.
+ D2->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
+
+ // Add the specialization to this context.
+ D2->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(D2);
+ }
+ Importer.Imported(D, D2);
+
+ if (D->isThisDeclarationADefinition() && ImportDefinition(D, D2))
+ return nullptr;
+
+ return D2;
+}
+
+//----------------------------------------------------------------------------
+// Import Statements
+//----------------------------------------------------------------------------
+
+DeclGroupRef ASTNodeImporter::ImportDeclGroup(DeclGroupRef DG) {
+ if (DG.isNull())
+ return DeclGroupRef::Create(Importer.getToContext(), nullptr, 0);
+ size_t NumDecls = DG.end() - DG.begin();
+ SmallVector<Decl *, 1> ToDecls(NumDecls);
+ auto &_Importer = this->Importer;
+ std::transform(DG.begin(), DG.end(), ToDecls.begin(),
+ [&_Importer](Decl *D) -> Decl * {
+ return _Importer.Import(D);
+ });
+ return DeclGroupRef::Create(Importer.getToContext(),
+ ToDecls.begin(),
+ NumDecls);
+}
+
+ Stmt *ASTNodeImporter::VisitStmt(Stmt *S) {
+ Importer.FromDiag(S->getLocStart(), diag::err_unsupported_ast_node)
+ << S->getStmtClassName();
+ return nullptr;
+ }
+
+Stmt *ASTNodeImporter::VisitDeclStmt(DeclStmt *S) {
+ DeclGroupRef ToDG = ImportDeclGroup(S->getDeclGroup());
+ for (Decl *ToD : ToDG) {
+ if (!ToD)
+ return nullptr;
+ }
+ SourceLocation ToStartLoc = Importer.Import(S->getStartLoc());
+ SourceLocation ToEndLoc = Importer.Import(S->getEndLoc());
+ return new (Importer.getToContext()) DeclStmt(ToDG, ToStartLoc, ToEndLoc);
+}
+
+Stmt *ASTNodeImporter::VisitNullStmt(NullStmt *S) {
+ SourceLocation ToSemiLoc = Importer.Import(S->getSemiLoc());
+ return new (Importer.getToContext()) NullStmt(ToSemiLoc,
+ S->hasLeadingEmptyMacro());
+}
+
+Stmt *ASTNodeImporter::VisitCompoundStmt(CompoundStmt *S) {
+ SmallVector<Stmt *, 4> ToStmts(S->size());
+ auto &_Importer = this->Importer;
+ std::transform(S->body_begin(), S->body_end(), ToStmts.begin(),
+ [&_Importer](Stmt *CS) -> Stmt * {
+ return _Importer.Import(CS);
+ });
+ for (Stmt *ToS : ToStmts) {
+ if (!ToS)
+ return nullptr;
+ }
+ SourceLocation ToLBraceLoc = Importer.Import(S->getLBracLoc());
+ SourceLocation ToRBraceLoc = Importer.Import(S->getRBracLoc());
+ return new (Importer.getToContext()) CompoundStmt(Importer.getToContext(),
+ ToStmts,
+ ToLBraceLoc, ToRBraceLoc);
+}
+
+Stmt *ASTNodeImporter::VisitCaseStmt(CaseStmt *S) {
+ Expr *ToLHS = Importer.Import(S->getLHS());
+ if (!ToLHS)
+ return nullptr;
+ Expr *ToRHS = Importer.Import(S->getRHS());
+ if (!ToRHS && S->getRHS())
+ return nullptr;
+ SourceLocation ToCaseLoc = Importer.Import(S->getCaseLoc());
+ SourceLocation ToEllipsisLoc = Importer.Import(S->getEllipsisLoc());
+ SourceLocation ToColonLoc = Importer.Import(S->getColonLoc());
+ return new (Importer.getToContext()) CaseStmt(ToLHS, ToRHS,
+ ToCaseLoc, ToEllipsisLoc,
+ ToColonLoc);
+}
+
+Stmt *ASTNodeImporter::VisitDefaultStmt(DefaultStmt *S) {
+ SourceLocation ToDefaultLoc = Importer.Import(S->getDefaultLoc());
+ SourceLocation ToColonLoc = Importer.Import(S->getColonLoc());
+ Stmt *ToSubStmt = Importer.Import(S->getSubStmt());
+ if (!ToSubStmt && S->getSubStmt())
+ return nullptr;
+ return new (Importer.getToContext()) DefaultStmt(ToDefaultLoc, ToColonLoc,
+ ToSubStmt);
+}
+
+Stmt *ASTNodeImporter::VisitLabelStmt(LabelStmt *S) {
+ SourceLocation ToIdentLoc = Importer.Import(S->getIdentLoc());
+ LabelDecl *ToLabelDecl =
+ cast_or_null<LabelDecl>(Importer.Import(S->getDecl()));
+ if (!ToLabelDecl && S->getDecl())
+ return nullptr;
+ Stmt *ToSubStmt = Importer.Import(S->getSubStmt());
+ if (!ToSubStmt && S->getSubStmt())
+ return nullptr;
+ return new (Importer.getToContext()) LabelStmt(ToIdentLoc, ToLabelDecl,
+ ToSubStmt);
+}
+
+Stmt *ASTNodeImporter::VisitAttributedStmt(AttributedStmt *S) {
+ SourceLocation ToAttrLoc = Importer.Import(S->getAttrLoc());
+ ArrayRef<const Attr*> FromAttrs(S->getAttrs());
+ SmallVector<const Attr *, 1> ToAttrs(FromAttrs.size());
+ ASTContext &_ToContext = Importer.getToContext();
+ std::transform(FromAttrs.begin(), FromAttrs.end(), ToAttrs.begin(),
+ [&_ToContext](const Attr *A) -> const Attr * {
+ return A->clone(_ToContext);
+ });
+ for (const Attr *ToA : ToAttrs) {
+ if (!ToA)
+ return nullptr;
+ }
+ Stmt *ToSubStmt = Importer.Import(S->getSubStmt());
+ if (!ToSubStmt && S->getSubStmt())
+ return nullptr;
+ return AttributedStmt::Create(Importer.getToContext(), ToAttrLoc,
+ ToAttrs, ToSubStmt);
+}
+
+Stmt *ASTNodeImporter::VisitIfStmt(IfStmt *S) {
+ SourceLocation ToIfLoc = Importer.Import(S->getIfLoc());
+ VarDecl *ToConditionVariable = nullptr;
+ if (VarDecl *FromConditionVariable = S->getConditionVariable()) {
+ ToConditionVariable =
+ dyn_cast_or_null<VarDecl>(Importer.Import(FromConditionVariable));
+ if (!ToConditionVariable)
+ return nullptr;
+ }
+ Expr *ToCondition = Importer.Import(S->getCond());
+ if (!ToCondition && S->getCond())
+ return nullptr;
+ Stmt *ToThenStmt = Importer.Import(S->getThen());
+ if (!ToThenStmt && S->getThen())
+ return nullptr;
+ SourceLocation ToElseLoc = Importer.Import(S->getElseLoc());
+ Stmt *ToElseStmt = Importer.Import(S->getElse());
+ if (!ToElseStmt && S->getElse())
+ return nullptr;
+ return new (Importer.getToContext()) IfStmt(Importer.getToContext(),
+ ToIfLoc, ToConditionVariable,
+ ToCondition, ToThenStmt,
+ ToElseLoc, ToElseStmt);
+}
+
+Stmt *ASTNodeImporter::VisitSwitchStmt(SwitchStmt *S) {
+ VarDecl *ToConditionVariable = nullptr;
+ if (VarDecl *FromConditionVariable = S->getConditionVariable()) {
+ ToConditionVariable =
+ dyn_cast_or_null<VarDecl>(Importer.Import(FromConditionVariable));
+ if (!ToConditionVariable)
+ return nullptr;
+ }
+ Expr *ToCondition = Importer.Import(S->getCond());
+ if (!ToCondition && S->getCond())
+ return nullptr;
+ SwitchStmt *ToStmt = new (Importer.getToContext()) SwitchStmt(
+ Importer.getToContext(), ToConditionVariable,
+ ToCondition);
+ Stmt *ToBody = Importer.Import(S->getBody());
+ if (!ToBody && S->getBody())
+ return nullptr;
+ ToStmt->setBody(ToBody);
+ ToStmt->setSwitchLoc(Importer.Import(S->getSwitchLoc()));
+ // Now we have to re-chain the cases.
+ SwitchCase *LastChainedSwitchCase = nullptr;
+ for (SwitchCase *SC = S->getSwitchCaseList(); SC != nullptr;
+ SC = SC->getNextSwitchCase()) {
+ SwitchCase *ToSC = dyn_cast_or_null<SwitchCase>(Importer.Import(SC));
+ if (!ToSC)
+ return nullptr;
+ if (LastChainedSwitchCase)
+ LastChainedSwitchCase->setNextSwitchCase(ToSC);
+ else
+ ToStmt->setSwitchCaseList(ToSC);
+ LastChainedSwitchCase = ToSC;
+ }
+ return ToStmt;
+}
+
+Stmt *ASTNodeImporter::VisitWhileStmt(WhileStmt *S) {
+ VarDecl *ToConditionVariable = nullptr;
+ if (VarDecl *FromConditionVariable = S->getConditionVariable()) {
+ ToConditionVariable =
+ dyn_cast_or_null<VarDecl>(Importer.Import(FromConditionVariable));
+ if (!ToConditionVariable)
+ return nullptr;
+ }
+ Expr *ToCondition = Importer.Import(S->getCond());
+ if (!ToCondition && S->getCond())
+ return nullptr;
+ Stmt *ToBody = Importer.Import(S->getBody());
+ if (!ToBody && S->getBody())
+ return nullptr;
+ SourceLocation ToWhileLoc = Importer.Import(S->getWhileLoc());
+ return new (Importer.getToContext()) WhileStmt(Importer.getToContext(),
+ ToConditionVariable,
+ ToCondition, ToBody,
+ ToWhileLoc);
+}
+
+Stmt *ASTNodeImporter::VisitDoStmt(DoStmt *S) {
+ Stmt *ToBody = Importer.Import(S->getBody());
+ if (!ToBody && S->getBody())
+ return nullptr;
+ Expr *ToCondition = Importer.Import(S->getCond());
+ if (!ToCondition && S->getCond())
+ return nullptr;
+ SourceLocation ToDoLoc = Importer.Import(S->getDoLoc());
+ SourceLocation ToWhileLoc = Importer.Import(S->getWhileLoc());
+ SourceLocation ToRParenLoc = Importer.Import(S->getRParenLoc());
+ return new (Importer.getToContext()) DoStmt(ToBody, ToCondition,
+ ToDoLoc, ToWhileLoc,
+ ToRParenLoc);
+}
+
+Stmt *ASTNodeImporter::VisitForStmt(ForStmt *S) {
+ Stmt *ToInit = Importer.Import(S->getInit());
+ if (!ToInit && S->getInit())
+ return nullptr;
+ Expr *ToCondition = Importer.Import(S->getCond());
+ if (!ToCondition && S->getCond())
+ return nullptr;
+ VarDecl *ToConditionVariable = nullptr;
+ if (VarDecl *FromConditionVariable = S->getConditionVariable()) {
+ ToConditionVariable =
+ dyn_cast_or_null<VarDecl>(Importer.Import(FromConditionVariable));
+ if (!ToConditionVariable)
+ return nullptr;
+ }
+ Expr *ToInc = Importer.Import(S->getInc());
+ if (!ToInc && S->getInc())
+ return nullptr;
+ Stmt *ToBody = Importer.Import(S->getBody());
+ if (!ToBody && S->getBody())
+ return nullptr;
+ SourceLocation ToForLoc = Importer.Import(S->getForLoc());
+ SourceLocation ToLParenLoc = Importer.Import(S->getLParenLoc());
+ SourceLocation ToRParenLoc = Importer.Import(S->getRParenLoc());
+ return new (Importer.getToContext()) ForStmt(Importer.getToContext(),
+ ToInit, ToCondition,
+ ToConditionVariable,
+ ToInc, ToBody,
+ ToForLoc, ToLParenLoc,
+ ToRParenLoc);
+}
+
+Stmt *ASTNodeImporter::VisitGotoStmt(GotoStmt *S) {
+ LabelDecl *ToLabel = nullptr;
+ if (LabelDecl *FromLabel = S->getLabel()) {
+ ToLabel = dyn_cast_or_null<LabelDecl>(Importer.Import(FromLabel));
+ if (!ToLabel)
+ return nullptr;
+ }
+ SourceLocation ToGotoLoc = Importer.Import(S->getGotoLoc());
+ SourceLocation ToLabelLoc = Importer.Import(S->getLabelLoc());
+ return new (Importer.getToContext()) GotoStmt(ToLabel,
+ ToGotoLoc, ToLabelLoc);
+}
+
+Stmt *ASTNodeImporter::VisitIndirectGotoStmt(IndirectGotoStmt *S) {
+ SourceLocation ToGotoLoc = Importer.Import(S->getGotoLoc());
+ SourceLocation ToStarLoc = Importer.Import(S->getStarLoc());
+ Expr *ToTarget = Importer.Import(S->getTarget());
+ if (!ToTarget && S->getTarget())
+ return nullptr;
+ return new (Importer.getToContext()) IndirectGotoStmt(ToGotoLoc, ToStarLoc,
+ ToTarget);
+}
+
+Stmt *ASTNodeImporter::VisitContinueStmt(ContinueStmt *S) {
+ SourceLocation ToContinueLoc = Importer.Import(S->getContinueLoc());
+ return new (Importer.getToContext()) ContinueStmt(ToContinueLoc);
+}
+
+Stmt *ASTNodeImporter::VisitBreakStmt(BreakStmt *S) {
+ SourceLocation ToBreakLoc = Importer.Import(S->getBreakLoc());
+ return new (Importer.getToContext()) BreakStmt(ToBreakLoc);
+}
+
+Stmt *ASTNodeImporter::VisitReturnStmt(ReturnStmt *S) {
+ SourceLocation ToRetLoc = Importer.Import(S->getReturnLoc());
+ Expr *ToRetExpr = Importer.Import(S->getRetValue());
+ if (!ToRetExpr && S->getRetValue())
+ return nullptr;
+ VarDecl *NRVOCandidate = const_cast<VarDecl*>(S->getNRVOCandidate());
+ VarDecl *ToNRVOCandidate = cast_or_null<VarDecl>(Importer.Import(NRVOCandidate));
+ if (!ToNRVOCandidate && NRVOCandidate)
+ return nullptr;
+ return new (Importer.getToContext()) ReturnStmt(ToRetLoc, ToRetExpr,
+ ToNRVOCandidate);
+}
+
+Stmt *ASTNodeImporter::VisitCXXCatchStmt(CXXCatchStmt *S) {
+ SourceLocation ToCatchLoc = Importer.Import(S->getCatchLoc());
+ VarDecl *ToExceptionDecl = nullptr;
+ if (VarDecl *FromExceptionDecl = S->getExceptionDecl()) {
+ ToExceptionDecl =
+ dyn_cast_or_null<VarDecl>(Importer.Import(FromExceptionDecl));
+ if (!ToExceptionDecl)
+ return nullptr;
+ }
+ Stmt *ToHandlerBlock = Importer.Import(S->getHandlerBlock());
+ if (!ToHandlerBlock && S->getHandlerBlock())
+ return nullptr;
+ return new (Importer.getToContext()) CXXCatchStmt(ToCatchLoc,
+ ToExceptionDecl,
+ ToHandlerBlock);
+}
+
+Stmt *ASTNodeImporter::VisitCXXTryStmt(CXXTryStmt *S) {
+ SourceLocation ToTryLoc = Importer.Import(S->getTryLoc());
+ Stmt *ToTryBlock = Importer.Import(S->getTryBlock());
+ if (!ToTryBlock && S->getTryBlock())
+ return nullptr;
+ SmallVector<Stmt *, 1> ToHandlers(S->getNumHandlers());
+ for (unsigned HI = 0, HE = S->getNumHandlers(); HI != HE; ++HI) {
+ CXXCatchStmt *FromHandler = S->getHandler(HI);
+ if (Stmt *ToHandler = Importer.Import(FromHandler))
+ ToHandlers[HI] = ToHandler;
+ else
+ return nullptr;
+ }
+ return CXXTryStmt::Create(Importer.getToContext(), ToTryLoc, ToTryBlock,
+ ToHandlers);
+}
+
+Stmt *ASTNodeImporter::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
+ DeclStmt *ToRange =
+ dyn_cast_or_null<DeclStmt>(Importer.Import(S->getRangeStmt()));
+ if (!ToRange && S->getRangeStmt())
+ return nullptr;
+ DeclStmt *ToBeginEnd =
+ dyn_cast_or_null<DeclStmt>(Importer.Import(S->getBeginEndStmt()));
+ if (!ToBeginEnd && S->getBeginEndStmt())
+ return nullptr;
+ Expr *ToCond = Importer.Import(S->getCond());
+ if (!ToCond && S->getCond())
+ return nullptr;
+ Expr *ToInc = Importer.Import(S->getInc());
+ if (!ToInc && S->getInc())
+ return nullptr;
+ DeclStmt *ToLoopVar =
+ dyn_cast_or_null<DeclStmt>(Importer.Import(S->getLoopVarStmt()));
+ if (!ToLoopVar && S->getLoopVarStmt())
+ return nullptr;
+ Stmt *ToBody = Importer.Import(S->getBody());
+ if (!ToBody && S->getBody())
+ return nullptr;
+ SourceLocation ToForLoc = Importer.Import(S->getForLoc());
+ SourceLocation ToCoawaitLoc = Importer.Import(S->getCoawaitLoc());
+ SourceLocation ToColonLoc = Importer.Import(S->getColonLoc());
+ SourceLocation ToRParenLoc = Importer.Import(S->getRParenLoc());
+ return new (Importer.getToContext()) CXXForRangeStmt(ToRange, ToBeginEnd,
+ ToCond, ToInc,
+ ToLoopVar, ToBody,
+ ToForLoc, ToCoawaitLoc,
+ ToColonLoc, ToRParenLoc);
+}
+
+Stmt *ASTNodeImporter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
+ Stmt *ToElem = Importer.Import(S->getElement());
+ if (!ToElem && S->getElement())
+ return nullptr;
+ Expr *ToCollect = Importer.Import(S->getCollection());
+ if (!ToCollect && S->getCollection())
+ return nullptr;
+ Stmt *ToBody = Importer.Import(S->getBody());
+ if (!ToBody && S->getBody())
+ return nullptr;
+ SourceLocation ToForLoc = Importer.Import(S->getForLoc());
+ SourceLocation ToRParenLoc = Importer.Import(S->getRParenLoc());
+ return new (Importer.getToContext()) ObjCForCollectionStmt(ToElem,
+ ToCollect,
+ ToBody, ToForLoc,
+ ToRParenLoc);
+}
+
+Stmt *ASTNodeImporter::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) {
+ SourceLocation ToAtCatchLoc = Importer.Import(S->getAtCatchLoc());
+ SourceLocation ToRParenLoc = Importer.Import(S->getRParenLoc());
+ VarDecl *ToExceptionDecl = nullptr;
+ if (VarDecl *FromExceptionDecl = S->getCatchParamDecl()) {
+ ToExceptionDecl =
+ dyn_cast_or_null<VarDecl>(Importer.Import(FromExceptionDecl));
+ if (!ToExceptionDecl)
+ return nullptr;
+ }
+ Stmt *ToBody = Importer.Import(S->getCatchBody());
+ if (!ToBody && S->getCatchBody())
+ return nullptr;
+ return new (Importer.getToContext()) ObjCAtCatchStmt(ToAtCatchLoc,
+ ToRParenLoc,
+ ToExceptionDecl,
+ ToBody);
+}
+
+Stmt *ASTNodeImporter::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
+ SourceLocation ToAtFinallyLoc = Importer.Import(S->getAtFinallyLoc());
+ Stmt *ToAtFinallyStmt = Importer.Import(S->getFinallyBody());
+ if (!ToAtFinallyStmt && S->getFinallyBody())
+ return nullptr;
+ return new (Importer.getToContext()) ObjCAtFinallyStmt(ToAtFinallyLoc,
+ ToAtFinallyStmt);
+}
+
+Stmt *ASTNodeImporter::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
+ SourceLocation ToAtTryLoc = Importer.Import(S->getAtTryLoc());
+ Stmt *ToAtTryStmt = Importer.Import(S->getTryBody());
+ if (!ToAtTryStmt && S->getTryBody())
+ return nullptr;
+ SmallVector<Stmt *, 1> ToCatchStmts(S->getNumCatchStmts());
+ for (unsigned CI = 0, CE = S->getNumCatchStmts(); CI != CE; ++CI) {
+ ObjCAtCatchStmt *FromCatchStmt = S->getCatchStmt(CI);
+ if (Stmt *ToCatchStmt = Importer.Import(FromCatchStmt))
+ ToCatchStmts[CI] = ToCatchStmt;
+ else
+ return nullptr;
+ }
+ Stmt *ToAtFinallyStmt = Importer.Import(S->getFinallyStmt());
+ if (!ToAtFinallyStmt && S->getFinallyStmt())
+ return nullptr;
+ return ObjCAtTryStmt::Create(Importer.getToContext(),
+ ToAtTryLoc, ToAtTryStmt,
+ ToCatchStmts.begin(), ToCatchStmts.size(),
+ ToAtFinallyStmt);
+}
+
+Stmt *ASTNodeImporter::VisitObjCAtSynchronizedStmt
+ (ObjCAtSynchronizedStmt *S) {
+ SourceLocation ToAtSynchronizedLoc =
+ Importer.Import(S->getAtSynchronizedLoc());
+ Expr *ToSynchExpr = Importer.Import(S->getSynchExpr());
+ if (!ToSynchExpr && S->getSynchExpr())
+ return nullptr;
+ Stmt *ToSynchBody = Importer.Import(S->getSynchBody());
+ if (!ToSynchBody && S->getSynchBody())
+ return nullptr;
+ return new (Importer.getToContext()) ObjCAtSynchronizedStmt(
+ ToAtSynchronizedLoc, ToSynchExpr, ToSynchBody);
+}
+
+Stmt *ASTNodeImporter::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
+ SourceLocation ToAtThrowLoc = Importer.Import(S->getThrowLoc());
+ Expr *ToThrow = Importer.Import(S->getThrowExpr());
+ if (!ToThrow && S->getThrowExpr())
+ return nullptr;
+ return new (Importer.getToContext()) ObjCAtThrowStmt(ToAtThrowLoc, ToThrow);
+}
+
+Stmt *ASTNodeImporter::VisitObjCAutoreleasePoolStmt
+ (ObjCAutoreleasePoolStmt *S) {
+ SourceLocation ToAtLoc = Importer.Import(S->getAtLoc());
+ Stmt *ToSubStmt = Importer.Import(S->getSubStmt());
+ if (!ToSubStmt && S->getSubStmt())
+ return nullptr;
+ return new (Importer.getToContext()) ObjCAutoreleasePoolStmt(ToAtLoc,
+ ToSubStmt);
+}
+
+//----------------------------------------------------------------------------
+// Import Expressions
+//----------------------------------------------------------------------------
+Expr *ASTNodeImporter::VisitExpr(Expr *E) {
+ Importer.FromDiag(E->getLocStart(), diag::err_unsupported_ast_node)
+ << E->getStmtClassName();
+ return nullptr;
+}
+
+Expr *ASTNodeImporter::VisitDeclRefExpr(DeclRefExpr *E) {
+ ValueDecl *ToD = cast_or_null<ValueDecl>(Importer.Import(E->getDecl()));
+ if (!ToD)
+ return nullptr;
+
+ NamedDecl *FoundD = nullptr;
+ if (E->getDecl() != E->getFoundDecl()) {
+ FoundD = cast_or_null<NamedDecl>(Importer.Import(E->getFoundDecl()));
+ if (!FoundD)
+ return nullptr;
+ }
+
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return nullptr;
+
+ DeclRefExpr *DRE = DeclRefExpr::Create(Importer.getToContext(),
+ Importer.Import(E->getQualifierLoc()),
+ Importer.Import(E->getTemplateKeywordLoc()),
+ ToD,
+ E->refersToEnclosingVariableOrCapture(),
+ Importer.Import(E->getLocation()),
+ T, E->getValueKind(),
+ FoundD,
+ /*FIXME:TemplateArgs=*/nullptr);
+ if (E->hadMultipleCandidates())
+ DRE->setHadMultipleCandidates(true);
+ return DRE;
+}
+
+Expr *ASTNodeImporter::VisitIntegerLiteral(IntegerLiteral *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return nullptr;
+
+ return IntegerLiteral::Create(Importer.getToContext(),
+ E->getValue(), T,
+ Importer.Import(E->getLocation()));
+}
+
+Expr *ASTNodeImporter::VisitCharacterLiteral(CharacterLiteral *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return nullptr;
+
+ return new (Importer.getToContext()) CharacterLiteral(E->getValue(),
+ E->getKind(), T,
+ Importer.Import(E->getLocation()));
+}
+
+Expr *ASTNodeImporter::VisitParenExpr(ParenExpr *E) {
+ Expr *SubExpr = Importer.Import(E->getSubExpr());
+ if (!SubExpr)
+ return nullptr;
+
+ return new (Importer.getToContext())
+ ParenExpr(Importer.Import(E->getLParen()),
+ Importer.Import(E->getRParen()),
+ SubExpr);
+}
+
+Expr *ASTNodeImporter::VisitUnaryOperator(UnaryOperator *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return nullptr;
+
+ Expr *SubExpr = Importer.Import(E->getSubExpr());
+ if (!SubExpr)
+ return nullptr;
+
+ return new (Importer.getToContext()) UnaryOperator(SubExpr, E->getOpcode(),
+ T, E->getValueKind(),
+ E->getObjectKind(),
+ Importer.Import(E->getOperatorLoc()));
+}
+
+Expr *ASTNodeImporter::VisitUnaryExprOrTypeTraitExpr(
+ UnaryExprOrTypeTraitExpr *E) {
+ QualType ResultType = Importer.Import(E->getType());
+
+ if (E->isArgumentType()) {
+ TypeSourceInfo *TInfo = Importer.Import(E->getArgumentTypeInfo());
+ if (!TInfo)
+ return nullptr;
+
+ return new (Importer.getToContext()) UnaryExprOrTypeTraitExpr(E->getKind(),
+ TInfo, ResultType,
+ Importer.Import(E->getOperatorLoc()),
+ Importer.Import(E->getRParenLoc()));
+ }
+
+ Expr *SubExpr = Importer.Import(E->getArgumentExpr());
+ if (!SubExpr)
+ return nullptr;
+
+ return new (Importer.getToContext()) UnaryExprOrTypeTraitExpr(E->getKind(),
+ SubExpr, ResultType,
+ Importer.Import(E->getOperatorLoc()),
+ Importer.Import(E->getRParenLoc()));
+}
+
+Expr *ASTNodeImporter::VisitBinaryOperator(BinaryOperator *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return nullptr;
+
+ Expr *LHS = Importer.Import(E->getLHS());
+ if (!LHS)
+ return nullptr;
+
+ Expr *RHS = Importer.Import(E->getRHS());
+ if (!RHS)
+ return nullptr;
+
+ return new (Importer.getToContext()) BinaryOperator(LHS, RHS, E->getOpcode(),
+ T, E->getValueKind(),
+ E->getObjectKind(),
+ Importer.Import(E->getOperatorLoc()),
+ E->isFPContractable());
+}
+
+Expr *ASTNodeImporter::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return nullptr;
+
+ QualType CompLHSType = Importer.Import(E->getComputationLHSType());
+ if (CompLHSType.isNull())
+ return nullptr;
+
+ QualType CompResultType = Importer.Import(E->getComputationResultType());
+ if (CompResultType.isNull())
+ return nullptr;
+
+ Expr *LHS = Importer.Import(E->getLHS());
+ if (!LHS)
+ return nullptr;
+
+ Expr *RHS = Importer.Import(E->getRHS());
+ if (!RHS)
+ return nullptr;
+
+ return new (Importer.getToContext())
+ CompoundAssignOperator(LHS, RHS, E->getOpcode(),
+ T, E->getValueKind(),
+ E->getObjectKind(),
+ CompLHSType, CompResultType,
+ Importer.Import(E->getOperatorLoc()),
+ E->isFPContractable());
+}
+
+static bool ImportCastPath(CastExpr *E, CXXCastPath &Path) {
+ if (E->path_empty()) return false;
+
+ // TODO: import cast paths
+ return true;
+}
+
+Expr *ASTNodeImporter::VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return nullptr;
+
+ Expr *SubExpr = Importer.Import(E->getSubExpr());
+ if (!SubExpr)
+ return nullptr;
+
+ CXXCastPath BasePath;
+ if (ImportCastPath(E, BasePath))
+ return nullptr;
+
+ return ImplicitCastExpr::Create(Importer.getToContext(), T, E->getCastKind(),
+ SubExpr, &BasePath, E->getValueKind());
+}
+
+Expr *ASTNodeImporter::VisitCStyleCastExpr(CStyleCastExpr *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return nullptr;
+
+ Expr *SubExpr = Importer.Import(E->getSubExpr());
+ if (!SubExpr)
+ return nullptr;
+
+ TypeSourceInfo *TInfo = Importer.Import(E->getTypeInfoAsWritten());
+ if (!TInfo && E->getTypeInfoAsWritten())
+ return nullptr;
+
+ CXXCastPath BasePath;
+ if (ImportCastPath(E, BasePath))
+ return nullptr;
+
+ return CStyleCastExpr::Create(Importer.getToContext(), T,
+ E->getValueKind(), E->getCastKind(),
+ SubExpr, &BasePath, TInfo,
+ Importer.Import(E->getLParenLoc()),
+ Importer.Import(E->getRParenLoc()));
+}
+
+Expr *ASTNodeImporter::VisitCXXConstructExpr(CXXConstructExpr *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return nullptr;
+
+ CXXConstructorDecl *ToCCD =
+ dyn_cast<CXXConstructorDecl>(Importer.Import(E->getConstructor()));
+ if (!ToCCD && E->getConstructor())
+ return nullptr;
+
+ size_t NumArgs = E->getNumArgs();
+ SmallVector<Expr *, 1> ToArgs(NumArgs);
+ ASTImporter &_Importer = Importer;
+ std::transform(E->arg_begin(), E->arg_end(), ToArgs.begin(),
+ [&_Importer](Expr *AE) -> Expr * {
+ return _Importer.Import(AE);
+ });
+ for (Expr *ToA : ToArgs) {
+ if (!ToA)
+ return nullptr;
+ }
+
+ return CXXConstructExpr::Create(Importer.getToContext(), T,
+ Importer.Import(E->getLocation()),
+ ToCCD, E->isElidable(),
+ ToArgs, E->hadMultipleCandidates(),
+ E->isListInitialization(),
+ E->isStdInitListInitialization(),
+ E->requiresZeroInitialization(),
+ E->getConstructionKind(),
+ Importer.Import(E->getParenOrBraceRange()));
+}
+
+Expr *ASTNodeImporter::VisitMemberExpr(MemberExpr *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return nullptr;
+
+ Expr *ToBase = Importer.Import(E->getBase());
+ if (!ToBase && E->getBase())
+ return nullptr;
+
+ ValueDecl *ToMember = dyn_cast<ValueDecl>(Importer.Import(E->getMemberDecl()));
+ if (!ToMember && E->getMemberDecl())
+ return nullptr;
+
+ DeclAccessPair ToFoundDecl = DeclAccessPair::make(
+ dyn_cast<NamedDecl>(Importer.Import(E->getFoundDecl().getDecl())),
+ E->getFoundDecl().getAccess());
+
+ DeclarationNameInfo ToMemberNameInfo(
+ Importer.Import(E->getMemberNameInfo().getName()),
+ Importer.Import(E->getMemberNameInfo().getLoc()));
+
+ if (E->hasExplicitTemplateArgs()) {
+ return nullptr; // FIXME: handle template arguments
+ }
+
+ return MemberExpr::Create(Importer.getToContext(), ToBase,
+ E->isArrow(),
+ Importer.Import(E->getOperatorLoc()),
+ Importer.Import(E->getQualifierLoc()),
+ Importer.Import(E->getTemplateKeywordLoc()),
+ ToMember, ToFoundDecl, ToMemberNameInfo,
+ nullptr, T, E->getValueKind(),
+ E->getObjectKind());
+}
+
+Expr *ASTNodeImporter::VisitCallExpr(CallExpr *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return nullptr;
+
+ Expr *ToCallee = Importer.Import(E->getCallee());
+ if (!ToCallee && E->getCallee())
+ return nullptr;
+
+ unsigned NumArgs = E->getNumArgs();
+
+ llvm::SmallVector<Expr *, 2> ToArgs(NumArgs);
+
+ for (unsigned ai = 0, ae = NumArgs; ai != ae; ++ai) {
+ Expr *FromArg = E->getArg(ai);
+ Expr *ToArg = Importer.Import(FromArg);
+ if (!ToArg)
+ return nullptr;
+ ToArgs[ai] = ToArg;
+ }
+
+ Expr **ToArgs_Copied = new (Importer.getToContext())
+ Expr*[NumArgs];
+
+ for (unsigned ai = 0, ae = NumArgs; ai != ae; ++ai)
+ ToArgs_Copied[ai] = ToArgs[ai];
+
+ return new (Importer.getToContext())
+ CallExpr(Importer.getToContext(), ToCallee,
+ llvm::makeArrayRef(ToArgs_Copied, NumArgs), T, E->getValueKind(),
+ Importer.Import(E->getRParenLoc()));
+}
+
+ASTImporter::ASTImporter(ASTContext &ToContext, FileManager &ToFileManager,
+ ASTContext &FromContext, FileManager &FromFileManager,
+ bool MinimalImport)
+ : ToContext(ToContext), FromContext(FromContext),
+ ToFileManager(ToFileManager), FromFileManager(FromFileManager),
+ Minimal(MinimalImport), LastDiagFromFrom(false)
+{
+ ImportedDecls[FromContext.getTranslationUnitDecl()]
+ = ToContext.getTranslationUnitDecl();
+}
+
+ASTImporter::~ASTImporter() { }
+
+QualType ASTImporter::Import(QualType FromT) {
+ if (FromT.isNull())
+ return QualType();
+
+ const Type *fromTy = FromT.getTypePtr();
+
+ // Check whether we've already imported this type.
+ llvm::DenseMap<const Type *, const Type *>::iterator Pos
+ = ImportedTypes.find(fromTy);
+ if (Pos != ImportedTypes.end())
+ return ToContext.getQualifiedType(Pos->second, FromT.getLocalQualifiers());
+
+ // Import the type
+ ASTNodeImporter Importer(*this);
+ QualType ToT = Importer.Visit(fromTy);
+ if (ToT.isNull())
+ return ToT;
+
+ // Record the imported type.
+ ImportedTypes[fromTy] = ToT.getTypePtr();
+
+ return ToContext.getQualifiedType(ToT, FromT.getLocalQualifiers());
+}
+
+TypeSourceInfo *ASTImporter::Import(TypeSourceInfo *FromTSI) {
+ if (!FromTSI)
+ return FromTSI;
+
+ // FIXME: For now we just create a "trivial" type source info based
+ // on the type and a single location. Implement a real version of this.
+ QualType T = Import(FromTSI->getType());
+ if (T.isNull())
+ return nullptr;
+
+ return ToContext.getTrivialTypeSourceInfo(T,
+ Import(FromTSI->getTypeLoc().getLocStart()));
+}
+
+Decl *ASTImporter::GetAlreadyImportedOrNull(Decl *FromD) {
+ llvm::DenseMap<Decl *, Decl *>::iterator Pos = ImportedDecls.find(FromD);
+ if (Pos != ImportedDecls.end()) {
+ Decl *ToD = Pos->second;
+ ASTNodeImporter(*this).ImportDefinitionIfNeeded(FromD, ToD);
+ return ToD;
+ } else {
+ return nullptr;
+ }
+}
+
+Decl *ASTImporter::Import(Decl *FromD) {
+ if (!FromD)
+ return nullptr;
+
+ ASTNodeImporter Importer(*this);
+
+ // Check whether we've already imported this declaration.
+ llvm::DenseMap<Decl *, Decl *>::iterator Pos = ImportedDecls.find(FromD);
+ if (Pos != ImportedDecls.end()) {
+ Decl *ToD = Pos->second;
+ Importer.ImportDefinitionIfNeeded(FromD, ToD);
+ return ToD;
+ }
+
+ // Import the type
+ Decl *ToD = Importer.Visit(FromD);
+ if (!ToD)
+ return nullptr;
+
+ // Record the imported declaration.
+ ImportedDecls[FromD] = ToD;
+
+ if (TagDecl *FromTag = dyn_cast<TagDecl>(FromD)) {
+ // Keep track of anonymous tags that have an associated typedef.
+ if (FromTag->getTypedefNameForAnonDecl())
+ AnonTagsWithPendingTypedefs.push_back(FromTag);
+ } else if (TypedefNameDecl *FromTypedef = dyn_cast<TypedefNameDecl>(FromD)) {
+ // When we've finished transforming a typedef, see whether it was the
+ // typedef for an anonymous tag.
+ for (SmallVectorImpl<TagDecl *>::iterator
+ FromTag = AnonTagsWithPendingTypedefs.begin(),
+ FromTagEnd = AnonTagsWithPendingTypedefs.end();
+ FromTag != FromTagEnd; ++FromTag) {
+ if ((*FromTag)->getTypedefNameForAnonDecl() == FromTypedef) {
+ if (TagDecl *ToTag = cast_or_null<TagDecl>(Import(*FromTag))) {
+ // We found the typedef for an anonymous tag; link them.
+ ToTag->setTypedefNameForAnonDecl(cast<TypedefNameDecl>(ToD));
+ AnonTagsWithPendingTypedefs.erase(FromTag);
+ break;
+ }
+ }
+ }
+ }
+
+ return ToD;
+}
+
+DeclContext *ASTImporter::ImportContext(DeclContext *FromDC) {
+ if (!FromDC)
+ return FromDC;
+
+ DeclContext *ToDC = cast_or_null<DeclContext>(Import(cast<Decl>(FromDC)));
+ if (!ToDC)
+ return nullptr;
+
+ // When we're using a record/enum/Objective-C class/protocol as a context, we
+ // need it to have a definition.
+ if (RecordDecl *ToRecord = dyn_cast<RecordDecl>(ToDC)) {
+ RecordDecl *FromRecord = cast<RecordDecl>(FromDC);
+ if (ToRecord->isCompleteDefinition()) {
+ // Do nothing.
+ } else if (FromRecord->isCompleteDefinition()) {
+ ASTNodeImporter(*this).ImportDefinition(FromRecord, ToRecord,
+ ASTNodeImporter::IDK_Basic);
+ } else {
+ CompleteDecl(ToRecord);
+ }
+ } else if (EnumDecl *ToEnum = dyn_cast<EnumDecl>(ToDC)) {
+ EnumDecl *FromEnum = cast<EnumDecl>(FromDC);
+ if (ToEnum->isCompleteDefinition()) {
+ // Do nothing.
+ } else if (FromEnum->isCompleteDefinition()) {
+ ASTNodeImporter(*this).ImportDefinition(FromEnum, ToEnum,
+ ASTNodeImporter::IDK_Basic);
+ } else {
+ CompleteDecl(ToEnum);
+ }
+ } else if (ObjCInterfaceDecl *ToClass = dyn_cast<ObjCInterfaceDecl>(ToDC)) {
+ ObjCInterfaceDecl *FromClass = cast<ObjCInterfaceDecl>(FromDC);
+ if (ToClass->getDefinition()) {
+ // Do nothing.
+ } else if (ObjCInterfaceDecl *FromDef = FromClass->getDefinition()) {
+ ASTNodeImporter(*this).ImportDefinition(FromDef, ToClass,
+ ASTNodeImporter::IDK_Basic);
+ } else {
+ CompleteDecl(ToClass);
+ }
+ } else if (ObjCProtocolDecl *ToProto = dyn_cast<ObjCProtocolDecl>(ToDC)) {
+ ObjCProtocolDecl *FromProto = cast<ObjCProtocolDecl>(FromDC);
+ if (ToProto->getDefinition()) {
+ // Do nothing.
+ } else if (ObjCProtocolDecl *FromDef = FromProto->getDefinition()) {
+ ASTNodeImporter(*this).ImportDefinition(FromDef, ToProto,
+ ASTNodeImporter::IDK_Basic);
+ } else {
+ CompleteDecl(ToProto);
+ }
+ }
+
+ return ToDC;
+}
+
+Expr *ASTImporter::Import(Expr *FromE) {
+ if (!FromE)
+ return nullptr;
+
+ return cast_or_null<Expr>(Import(cast<Stmt>(FromE)));
+}
+
+Stmt *ASTImporter::Import(Stmt *FromS) {
+ if (!FromS)
+ return nullptr;
+
+ // Check whether we've already imported this declaration.
+ llvm::DenseMap<Stmt *, Stmt *>::iterator Pos = ImportedStmts.find(FromS);
+ if (Pos != ImportedStmts.end())
+ return Pos->second;
+
+ // Import the type
+ ASTNodeImporter Importer(*this);
+ Stmt *ToS = Importer.Visit(FromS);
+ if (!ToS)
+ return nullptr;
+
+ // Record the imported declaration.
+ ImportedStmts[FromS] = ToS;
+ return ToS;
+}
+
+NestedNameSpecifier *ASTImporter::Import(NestedNameSpecifier *FromNNS) {
+ if (!FromNNS)
+ return nullptr;
+
+ NestedNameSpecifier *prefix = Import(FromNNS->getPrefix());
+
+ switch (FromNNS->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ if (IdentifierInfo *II = Import(FromNNS->getAsIdentifier())) {
+ return NestedNameSpecifier::Create(ToContext, prefix, II);
+ }
+ return nullptr;
+
+ case NestedNameSpecifier::Namespace:
+ if (NamespaceDecl *NS =
+ cast<NamespaceDecl>(Import(FromNNS->getAsNamespace()))) {
+ return NestedNameSpecifier::Create(ToContext, prefix, NS);
+ }
+ return nullptr;
+
+ case NestedNameSpecifier::NamespaceAlias:
+ if (NamespaceAliasDecl *NSAD =
+ cast<NamespaceAliasDecl>(Import(FromNNS->getAsNamespaceAlias()))) {
+ return NestedNameSpecifier::Create(ToContext, prefix, NSAD);
+ }
+ return nullptr;
+
+ case NestedNameSpecifier::Global:
+ return NestedNameSpecifier::GlobalSpecifier(ToContext);
+
+ case NestedNameSpecifier::Super:
+ if (CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(Import(FromNNS->getAsRecordDecl()))) {
+ return NestedNameSpecifier::SuperSpecifier(ToContext, RD);
+ }
+ return nullptr;
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate: {
+ QualType T = Import(QualType(FromNNS->getAsType(), 0u));
+ if (!T.isNull()) {
+ bool bTemplate = FromNNS->getKind() ==
+ NestedNameSpecifier::TypeSpecWithTemplate;
+ return NestedNameSpecifier::Create(ToContext, prefix,
+ bTemplate, T.getTypePtr());
+ }
+ }
+ return nullptr;
+ }
+
+ llvm_unreachable("Invalid nested name specifier kind");
+}
+
+NestedNameSpecifierLoc ASTImporter::Import(NestedNameSpecifierLoc FromNNS) {
+ // FIXME: Implement!
+ return NestedNameSpecifierLoc();
+}
+
+TemplateName ASTImporter::Import(TemplateName From) {
+ switch (From.getKind()) {
+ case TemplateName::Template:
+ if (TemplateDecl *ToTemplate
+ = cast_or_null<TemplateDecl>(Import(From.getAsTemplateDecl())))
+ return TemplateName(ToTemplate);
+
+ return TemplateName();
+
+ case TemplateName::OverloadedTemplate: {
+ OverloadedTemplateStorage *FromStorage = From.getAsOverloadedTemplate();
+ UnresolvedSet<2> ToTemplates;
+ for (OverloadedTemplateStorage::iterator I = FromStorage->begin(),
+ E = FromStorage->end();
+ I != E; ++I) {
+ if (NamedDecl *To = cast_or_null<NamedDecl>(Import(*I)))
+ ToTemplates.addDecl(To);
+ else
+ return TemplateName();
+ }
+ return ToContext.getOverloadedTemplateName(ToTemplates.begin(),
+ ToTemplates.end());
+ }
+
+ case TemplateName::QualifiedTemplate: {
+ QualifiedTemplateName *QTN = From.getAsQualifiedTemplateName();
+ NestedNameSpecifier *Qualifier = Import(QTN->getQualifier());
+ if (!Qualifier)
+ return TemplateName();
+
+ if (TemplateDecl *ToTemplate
+ = cast_or_null<TemplateDecl>(Import(From.getAsTemplateDecl())))
+ return ToContext.getQualifiedTemplateName(Qualifier,
+ QTN->hasTemplateKeyword(),
+ ToTemplate);
+
+ return TemplateName();
+ }
+
+ case TemplateName::DependentTemplate: {
+ DependentTemplateName *DTN = From.getAsDependentTemplateName();
+ NestedNameSpecifier *Qualifier = Import(DTN->getQualifier());
+ if (!Qualifier)
+ return TemplateName();
+
+ if (DTN->isIdentifier()) {
+ return ToContext.getDependentTemplateName(Qualifier,
+ Import(DTN->getIdentifier()));
+ }
+
+ return ToContext.getDependentTemplateName(Qualifier, DTN->getOperator());
+ }
+
+ case TemplateName::SubstTemplateTemplateParm: {
+ SubstTemplateTemplateParmStorage *subst
+ = From.getAsSubstTemplateTemplateParm();
+ TemplateTemplateParmDecl *param
+ = cast_or_null<TemplateTemplateParmDecl>(Import(subst->getParameter()));
+ if (!param)
+ return TemplateName();
+
+ TemplateName replacement = Import(subst->getReplacement());
+ if (replacement.isNull()) return TemplateName();
+
+ return ToContext.getSubstTemplateTemplateParm(param, replacement);
+ }
+
+ case TemplateName::SubstTemplateTemplateParmPack: {
+ SubstTemplateTemplateParmPackStorage *SubstPack
+ = From.getAsSubstTemplateTemplateParmPack();
+ TemplateTemplateParmDecl *Param
+ = cast_or_null<TemplateTemplateParmDecl>(
+ Import(SubstPack->getParameterPack()));
+ if (!Param)
+ return TemplateName();
+
+ ASTNodeImporter Importer(*this);
+ TemplateArgument ArgPack
+ = Importer.ImportTemplateArgument(SubstPack->getArgumentPack());
+ if (ArgPack.isNull())
+ return TemplateName();
+
+ return ToContext.getSubstTemplateTemplateParmPack(Param, ArgPack);
+ }
+ }
+
+ llvm_unreachable("Invalid template name kind");
+}
+
+SourceLocation ASTImporter::Import(SourceLocation FromLoc) {
+ if (FromLoc.isInvalid())
+ return SourceLocation();
+
+ SourceManager &FromSM = FromContext.getSourceManager();
+
+ // For now, map everything down to its spelling location, so that we
+ // don't have to import macro expansions.
+ // FIXME: Import macro expansions!
+ FromLoc = FromSM.getSpellingLoc(FromLoc);
+ std::pair<FileID, unsigned> Decomposed = FromSM.getDecomposedLoc(FromLoc);
+ SourceManager &ToSM = ToContext.getSourceManager();
+ FileID ToFileID = Import(Decomposed.first);
+ if (ToFileID.isInvalid())
+ return SourceLocation();
+ SourceLocation ret = ToSM.getLocForStartOfFile(ToFileID)
+ .getLocWithOffset(Decomposed.second);
+ return ret;
+}
+
+SourceRange ASTImporter::Import(SourceRange FromRange) {
+ return SourceRange(Import(FromRange.getBegin()), Import(FromRange.getEnd()));
+}
+
+FileID ASTImporter::Import(FileID FromID) {
+ llvm::DenseMap<FileID, FileID>::iterator Pos
+ = ImportedFileIDs.find(FromID);
+ if (Pos != ImportedFileIDs.end())
+ return Pos->second;
+
+ SourceManager &FromSM = FromContext.getSourceManager();
+ SourceManager &ToSM = ToContext.getSourceManager();
+ const SrcMgr::SLocEntry &FromSLoc = FromSM.getSLocEntry(FromID);
+ assert(FromSLoc.isFile() && "Cannot handle macro expansions yet");
+
+ // Include location of this file.
+ SourceLocation ToIncludeLoc = Import(FromSLoc.getFile().getIncludeLoc());
+
+ // Map the FileID for to the "to" source manager.
+ FileID ToID;
+ const SrcMgr::ContentCache *Cache = FromSLoc.getFile().getContentCache();
+ if (Cache->OrigEntry && Cache->OrigEntry->getDir()) {
+ // FIXME: We probably want to use getVirtualFile(), so we don't hit the
+ // disk again
+ // FIXME: We definitely want to re-use the existing MemoryBuffer, rather
+ // than mmap the files several times.
+ const FileEntry *Entry = ToFileManager.getFile(Cache->OrigEntry->getName());
+ if (!Entry)
+ return FileID();
+ ToID = ToSM.createFileID(Entry, ToIncludeLoc,
+ FromSLoc.getFile().getFileCharacteristic());
+ } else {
+ // FIXME: We want to re-use the existing MemoryBuffer!
+ const llvm::MemoryBuffer *
+ FromBuf = Cache->getBuffer(FromContext.getDiagnostics(), FromSM);
+ std::unique_ptr<llvm::MemoryBuffer> ToBuf
+ = llvm::MemoryBuffer::getMemBufferCopy(FromBuf->getBuffer(),
+ FromBuf->getBufferIdentifier());
+ ToID = ToSM.createFileID(std::move(ToBuf),
+ FromSLoc.getFile().getFileCharacteristic());
+ }
+
+
+ ImportedFileIDs[FromID] = ToID;
+ return ToID;
+}
+
+void ASTImporter::ImportDefinition(Decl *From) {
+ Decl *To = Import(From);
+ if (!To)
+ return;
+
+ if (DeclContext *FromDC = cast<DeclContext>(From)) {
+ ASTNodeImporter Importer(*this);
+
+ if (RecordDecl *ToRecord = dyn_cast<RecordDecl>(To)) {
+ if (!ToRecord->getDefinition()) {
+ Importer.ImportDefinition(cast<RecordDecl>(FromDC), ToRecord,
+ ASTNodeImporter::IDK_Everything);
+ return;
+ }
+ }
+
+ if (EnumDecl *ToEnum = dyn_cast<EnumDecl>(To)) {
+ if (!ToEnum->getDefinition()) {
+ Importer.ImportDefinition(cast<EnumDecl>(FromDC), ToEnum,
+ ASTNodeImporter::IDK_Everything);
+ return;
+ }
+ }
+
+ if (ObjCInterfaceDecl *ToIFace = dyn_cast<ObjCInterfaceDecl>(To)) {
+ if (!ToIFace->getDefinition()) {
+ Importer.ImportDefinition(cast<ObjCInterfaceDecl>(FromDC), ToIFace,
+ ASTNodeImporter::IDK_Everything);
+ return;
+ }
+ }
+
+ if (ObjCProtocolDecl *ToProto = dyn_cast<ObjCProtocolDecl>(To)) {
+ if (!ToProto->getDefinition()) {
+ Importer.ImportDefinition(cast<ObjCProtocolDecl>(FromDC), ToProto,
+ ASTNodeImporter::IDK_Everything);
+ return;
+ }
+ }
+
+ Importer.ImportDeclContext(FromDC, true);
+ }
+}
+
+DeclarationName ASTImporter::Import(DeclarationName FromName) {
+ if (!FromName)
+ return DeclarationName();
+
+ switch (FromName.getNameKind()) {
+ case DeclarationName::Identifier:
+ return Import(FromName.getAsIdentifierInfo());
+
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ return Import(FromName.getObjCSelector());
+
+ case DeclarationName::CXXConstructorName: {
+ QualType T = Import(FromName.getCXXNameType());
+ if (T.isNull())
+ return DeclarationName();
+
+ return ToContext.DeclarationNames.getCXXConstructorName(
+ ToContext.getCanonicalType(T));
+ }
+
+ case DeclarationName::CXXDestructorName: {
+ QualType T = Import(FromName.getCXXNameType());
+ if (T.isNull())
+ return DeclarationName();
+
+ return ToContext.DeclarationNames.getCXXDestructorName(
+ ToContext.getCanonicalType(T));
+ }
+
+ case DeclarationName::CXXConversionFunctionName: {
+ QualType T = Import(FromName.getCXXNameType());
+ if (T.isNull())
+ return DeclarationName();
+
+ return ToContext.DeclarationNames.getCXXConversionFunctionName(
+ ToContext.getCanonicalType(T));
+ }
+
+ case DeclarationName::CXXOperatorName:
+ return ToContext.DeclarationNames.getCXXOperatorName(
+ FromName.getCXXOverloadedOperator());
+
+ case DeclarationName::CXXLiteralOperatorName:
+ return ToContext.DeclarationNames.getCXXLiteralOperatorName(
+ Import(FromName.getCXXLiteralIdentifier()));
+
+ case DeclarationName::CXXUsingDirective:
+ // FIXME: STATICS!
+ return DeclarationName::getUsingDirectiveName();
+ }
+
+ llvm_unreachable("Invalid DeclarationName Kind!");
+}
+
+IdentifierInfo *ASTImporter::Import(const IdentifierInfo *FromId) {
+ if (!FromId)
+ return nullptr;
+
+ return &ToContext.Idents.get(FromId->getName());
+}
+
+Selector ASTImporter::Import(Selector FromSel) {
+ if (FromSel.isNull())
+ return Selector();
+
+ SmallVector<IdentifierInfo *, 4> Idents;
+ Idents.push_back(Import(FromSel.getIdentifierInfoForSlot(0)));
+ for (unsigned I = 1, N = FromSel.getNumArgs(); I < N; ++I)
+ Idents.push_back(Import(FromSel.getIdentifierInfoForSlot(I)));
+ return ToContext.Selectors.getSelector(FromSel.getNumArgs(), Idents.data());
+}
+
+DeclarationName ASTImporter::HandleNameConflict(DeclarationName Name,
+ DeclContext *DC,
+ unsigned IDNS,
+ NamedDecl **Decls,
+ unsigned NumDecls) {
+ return Name;
+}
+
+DiagnosticBuilder ASTImporter::ToDiag(SourceLocation Loc, unsigned DiagID) {
+ if (LastDiagFromFrom)
+ ToContext.getDiagnostics().notePriorDiagnosticFrom(
+ FromContext.getDiagnostics());
+ LastDiagFromFrom = false;
+ return ToContext.getDiagnostics().Report(Loc, DiagID);
+}
+
+DiagnosticBuilder ASTImporter::FromDiag(SourceLocation Loc, unsigned DiagID) {
+ if (!LastDiagFromFrom)
+ FromContext.getDiagnostics().notePriorDiagnosticFrom(
+ ToContext.getDiagnostics());
+ LastDiagFromFrom = true;
+ return FromContext.getDiagnostics().Report(Loc, DiagID);
+}
+
+void ASTImporter::CompleteDecl (Decl *D) {
+ if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D)) {
+ if (!ID->getDefinition())
+ ID->startDefinition();
+ }
+ else if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(D)) {
+ if (!PD->getDefinition())
+ PD->startDefinition();
+ }
+ else if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
+ if (!TD->getDefinition() && !TD->isBeingDefined()) {
+ TD->startDefinition();
+ TD->setCompleteDefinition(true);
+ }
+ }
+ else {
+ assert (0 && "CompleteDecl called on a Decl that can't be completed");
+ }
+}
+
+Decl *ASTImporter::Imported(Decl *From, Decl *To) {
+ ImportedDecls[From] = To;
+ return To;
+}
+
+bool ASTImporter::IsStructurallyEquivalent(QualType From, QualType To,
+ bool Complain) {
+ llvm::DenseMap<const Type *, const Type *>::iterator Pos
+ = ImportedTypes.find(From.getTypePtr());
+ if (Pos != ImportedTypes.end() && ToContext.hasSameType(Import(From), To))
+ return true;
+
+ StructuralEquivalenceContext Ctx(FromContext, ToContext, NonEquivalentDecls,
+ false, Complain);
+ return Ctx.IsStructurallyEquivalent(From, To);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/ASTTypeTraits.cpp b/contrib/llvm/tools/clang/lib/AST/ASTTypeTraits.cpp
new file mode 100644
index 0000000..ec0671c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/ASTTypeTraits.cpp
@@ -0,0 +1,158 @@
+//===--- ASTTypeTraits.cpp --------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Provides a dynamic type identifier and a dynamically typed node container
+// that can be used to store an AST base node at runtime in the same storage in
+// a type safe way.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTTypeTraits.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+
+namespace clang {
+namespace ast_type_traits {
+
+const ASTNodeKind::KindInfo ASTNodeKind::AllKindInfo[] = {
+ { NKI_None, "<None>" },
+ { NKI_None, "CXXCtorInitializer" },
+ { NKI_None, "TemplateArgument" },
+ { NKI_None, "NestedNameSpecifier" },
+ { NKI_None, "NestedNameSpecifierLoc" },
+ { NKI_None, "QualType" },
+ { NKI_None, "TypeLoc" },
+ { NKI_None, "Decl" },
+#define DECL(DERIVED, BASE) { NKI_##BASE, #DERIVED "Decl" },
+#include "clang/AST/DeclNodes.inc"
+ { NKI_None, "Stmt" },
+#define STMT(DERIVED, BASE) { NKI_##BASE, #DERIVED },
+#include "clang/AST/StmtNodes.inc"
+ { NKI_None, "Type" },
+#define TYPE(DERIVED, BASE) { NKI_##BASE, #DERIVED "Type" },
+#include "clang/AST/TypeNodes.def"
+};
+
+bool ASTNodeKind::isBaseOf(ASTNodeKind Other, unsigned *Distance) const {
+ return isBaseOf(KindId, Other.KindId, Distance);
+}
+
+bool ASTNodeKind::isSame(ASTNodeKind Other) const {
+ return KindId != NKI_None && KindId == Other.KindId;
+}
+
+bool ASTNodeKind::isBaseOf(NodeKindId Base, NodeKindId Derived,
+ unsigned *Distance) {
+ if (Base == NKI_None || Derived == NKI_None) return false;
+ unsigned Dist = 0;
+ while (Derived != Base && Derived != NKI_None) {
+ Derived = AllKindInfo[Derived].ParentId;
+ ++Dist;
+ }
+ if (Distance)
+ *Distance = Dist;
+ return Derived == Base;
+}
+
+StringRef ASTNodeKind::asStringRef() const { return AllKindInfo[KindId].Name; }
+
+ASTNodeKind ASTNodeKind::getMostDerivedType(ASTNodeKind Kind1,
+ ASTNodeKind Kind2) {
+ if (Kind1.isBaseOf(Kind2)) return Kind2;
+ if (Kind2.isBaseOf(Kind1)) return Kind1;
+ return ASTNodeKind();
+}
+
+ASTNodeKind ASTNodeKind::getMostDerivedCommonAncestor(ASTNodeKind Kind1,
+ ASTNodeKind Kind2) {
+ NodeKindId Parent = Kind1.KindId;
+ while (!isBaseOf(Parent, Kind2.KindId, nullptr) && Parent != NKI_None) {
+ Parent = AllKindInfo[Parent].ParentId;
+ }
+ return ASTNodeKind(Parent);
+}
+
+ASTNodeKind ASTNodeKind::getFromNode(const Decl &D) {
+ switch (D.getKind()) {
+#define DECL(DERIVED, BASE) \
+ case Decl::DERIVED: return ASTNodeKind(NKI_##DERIVED##Decl);
+#define ABSTRACT_DECL(D)
+#include "clang/AST/DeclNodes.inc"
+ };
+ llvm_unreachable("invalid decl kind");
+}
+
+ASTNodeKind ASTNodeKind::getFromNode(const Stmt &S) {
+ switch (S.getStmtClass()) {
+ case Stmt::NoStmtClass: return NKI_None;
+#define STMT(CLASS, PARENT) \
+ case Stmt::CLASS##Class: return ASTNodeKind(NKI_##CLASS);
+#define ABSTRACT_STMT(S)
+#include "clang/AST/StmtNodes.inc"
+ }
+ llvm_unreachable("invalid stmt kind");
+}
+
+ASTNodeKind ASTNodeKind::getFromNode(const Type &T) {
+ switch (T.getTypeClass()) {
+#define TYPE(Class, Base) \
+ case Type::Class: return ASTNodeKind(NKI_##Class##Type);
+#define ABSTRACT_TYPE(Class, Base)
+#include "clang/AST/TypeNodes.def"
+ }
+ llvm_unreachable("invalid type kind");
+}
+
+void DynTypedNode::print(llvm::raw_ostream &OS,
+ const PrintingPolicy &PP) const {
+ if (const TemplateArgument *TA = get<TemplateArgument>())
+ TA->print(PP, OS);
+ else if (const NestedNameSpecifier *NNS = get<NestedNameSpecifier>())
+ NNS->print(OS, PP);
+ else if (const NestedNameSpecifierLoc *NNSL = get<NestedNameSpecifierLoc>())
+ NNSL->getNestedNameSpecifier()->print(OS, PP);
+ else if (const QualType *QT = get<QualType>())
+ QT->print(OS, PP);
+ else if (const TypeLoc *TL = get<TypeLoc>())
+ TL->getType().print(OS, PP);
+ else if (const Decl *D = get<Decl>())
+ D->print(OS, PP);
+ else if (const Stmt *S = get<Stmt>())
+ S->printPretty(OS, nullptr, PP);
+ else if (const Type *T = get<Type>())
+ QualType(T, 0).print(OS, PP);
+ else
+ OS << "Unable to print values of type " << NodeKind.asStringRef() << "\n";
+}
+
+void DynTypedNode::dump(llvm::raw_ostream &OS, SourceManager &SM) const {
+ if (const Decl *D = get<Decl>())
+ D->dump(OS);
+ else if (const Stmt *S = get<Stmt>())
+ S->dump(OS, SM);
+ else
+ OS << "Unable to dump values of type " << NodeKind.asStringRef() << "\n";
+}
+
+SourceRange DynTypedNode::getSourceRange() const {
+ if (const CXXCtorInitializer *CCI = get<CXXCtorInitializer>())
+ return CCI->getSourceRange();
+ if (const NestedNameSpecifierLoc *NNSL = get<NestedNameSpecifierLoc>())
+ return NNSL->getSourceRange();
+ if (const TypeLoc *TL = get<TypeLoc>())
+ return TL->getSourceRange();
+ if (const Decl *D = get<Decl>())
+ return D->getSourceRange();
+ if (const Stmt *S = get<Stmt>())
+ return S->getSourceRange();
+ return SourceRange();
+}
+
+} // end namespace ast_type_traits
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/AST/AttrImpl.cpp b/contrib/llvm/tools/clang/lib/AST/AttrImpl.cpp
new file mode 100644
index 0000000..cb60870
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/AttrImpl.cpp
@@ -0,0 +1,21 @@
+//===--- AttrImpl.cpp - Classes for representing attributes -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains out-of-line methods for Attr classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Attr.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/Type.h"
+#include "llvm/ADT/StringSwitch.h"
+using namespace clang;
+
+#include "clang/AST/AttrImpl.inc"
diff --git a/contrib/llvm/tools/clang/lib/AST/CXXABI.h b/contrib/llvm/tools/clang/lib/AST/CXXABI.h
new file mode 100644
index 0000000..c23b919
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/CXXABI.h
@@ -0,0 +1,79 @@
+//===----- CXXABI.h - Interface to C++ ABIs ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for C++ AST support. Concrete
+// subclasses of this implement AST support for specific C++ ABIs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_AST_CXXABI_H
+#define LLVM_CLANG_LIB_AST_CXXABI_H
+
+#include "clang/AST/Type.h"
+
+namespace clang {
+
+class ASTContext;
+class CXXConstructorDecl;
+class DeclaratorDecl;
+class Expr;
+class MemberPointerType;
+class MangleNumberingContext;
+
+/// Implements C++ ABI-specific semantic analysis functions.
+class CXXABI {
+public:
+ virtual ~CXXABI();
+
+ /// Returns the width and alignment of a member pointer in bits.
+ virtual std::pair<uint64_t, unsigned>
+ getMemberPointerWidthAndAlign(const MemberPointerType *MPT) const = 0;
+
+ /// Returns the default calling convention for C++ methods.
+ virtual CallingConv getDefaultMethodCallConv(bool isVariadic) const = 0;
+
+ /// Returns whether the given class is nearly empty, with just virtual
+ /// pointers and no data except possibly virtual bases.
+ virtual bool isNearlyEmpty(const CXXRecordDecl *RD) const = 0;
+
+ /// Returns a new mangling number context for this C++ ABI.
+ virtual MangleNumberingContext *createMangleNumberingContext() const = 0;
+
+ /// Adds a mapping from class to copy constructor for this C++ ABI.
+ virtual void addCopyConstructorForExceptionObject(CXXRecordDecl *,
+ CXXConstructorDecl *) = 0;
+
+ /// Retrieves the mapping from class to copy constructor for this C++ ABI.
+ virtual const CXXConstructorDecl *
+ getCopyConstructorForExceptionObject(CXXRecordDecl *) = 0;
+
+ virtual void addDefaultArgExprForConstructor(const CXXConstructorDecl *CD,
+ unsigned ParmIdx, Expr *DAE) = 0;
+
+ virtual Expr *getDefaultArgExprForConstructor(const CXXConstructorDecl *CD,
+ unsigned ParmIdx) = 0;
+
+ virtual void addTypedefNameForUnnamedTagDecl(TagDecl *TD,
+ TypedefNameDecl *DD) = 0;
+
+ virtual TypedefNameDecl *
+ getTypedefNameForUnnamedTagDecl(const TagDecl *TD) = 0;
+
+ virtual void addDeclaratorForUnnamedTagDecl(TagDecl *TD,
+ DeclaratorDecl *DD) = 0;
+
+ virtual DeclaratorDecl *getDeclaratorForUnnamedTagDecl(const TagDecl *TD) = 0;
+};
+
+/// Creates an instance of a C++ ABI class.
+CXXABI *CreateItaniumCXXABI(ASTContext &Ctx);
+CXXABI *CreateMicrosoftCXXABI(ASTContext &Ctx);
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp b/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp
new file mode 100644
index 0000000..6785a0c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp
@@ -0,0 +1,688 @@
+//===------ CXXInheritance.cpp - C++ Inheritance ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides routines that help analyzing C++ inheritance hierarchies.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/RecordLayout.h"
+#include "llvm/ADT/SetVector.h"
+#include <algorithm>
+#include <set>
+
+using namespace clang;
+
+/// \brief Computes the set of declarations referenced by these base
+/// paths.
+void CXXBasePaths::ComputeDeclsFound() {
+ assert(NumDeclsFound == 0 && !DeclsFound &&
+ "Already computed the set of declarations");
+
+ llvm::SetVector<NamedDecl *, SmallVector<NamedDecl *, 8> > Decls;
+ for (paths_iterator Path = begin(), PathEnd = end(); Path != PathEnd; ++Path)
+ Decls.insert(Path->Decls.front());
+
+ NumDeclsFound = Decls.size();
+ DeclsFound = llvm::make_unique<NamedDecl *[]>(NumDeclsFound);
+ std::copy(Decls.begin(), Decls.end(), DeclsFound.get());
+}
+
+CXXBasePaths::decl_range CXXBasePaths::found_decls() {
+ if (NumDeclsFound == 0)
+ ComputeDeclsFound();
+
+ return decl_range(decl_iterator(DeclsFound.get()),
+ decl_iterator(DeclsFound.get() + NumDeclsFound));
+}
+
+/// isAmbiguous - Determines whether the set of paths provided is
+/// ambiguous, i.e., there are two or more paths that refer to
+/// different base class subobjects of the same type. BaseType must be
+/// an unqualified, canonical class type.
+bool CXXBasePaths::isAmbiguous(CanQualType BaseType) {
+ BaseType = BaseType.getUnqualifiedType();
+ std::pair<bool, unsigned>& Subobjects = ClassSubobjects[BaseType];
+ return Subobjects.second + (Subobjects.first? 1 : 0) > 1;
+}
+
+/// clear - Clear out all prior path information.
+void CXXBasePaths::clear() {
+ Paths.clear();
+ ClassSubobjects.clear();
+ ScratchPath.clear();
+ DetectedVirtual = nullptr;
+}
+
+/// @brief Swaps the contents of this CXXBasePaths structure with the
+/// contents of Other.
+void CXXBasePaths::swap(CXXBasePaths &Other) {
+ std::swap(Origin, Other.Origin);
+ Paths.swap(Other.Paths);
+ ClassSubobjects.swap(Other.ClassSubobjects);
+ std::swap(FindAmbiguities, Other.FindAmbiguities);
+ std::swap(RecordPaths, Other.RecordPaths);
+ std::swap(DetectVirtual, Other.DetectVirtual);
+ std::swap(DetectedVirtual, Other.DetectedVirtual);
+}
+
+bool CXXRecordDecl::isDerivedFrom(const CXXRecordDecl *Base) const {
+ CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/false,
+ /*DetectVirtual=*/false);
+ return isDerivedFrom(Base, Paths);
+}
+
+bool CXXRecordDecl::isDerivedFrom(const CXXRecordDecl *Base,
+ CXXBasePaths &Paths) const {
+ if (getCanonicalDecl() == Base->getCanonicalDecl())
+ return false;
+
+ Paths.setOrigin(const_cast<CXXRecordDecl*>(this));
+
+ const CXXRecordDecl *BaseDecl = Base->getCanonicalDecl();
+ // FIXME: Capturing 'this' is a workaround for name lookup bugs in GCC 4.7.
+ return lookupInBases(
+ [this, BaseDecl](const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
+ return FindBaseClass(Specifier, Path, BaseDecl);
+ },
+ Paths);
+}
+
+bool CXXRecordDecl::isVirtuallyDerivedFrom(const CXXRecordDecl *Base) const {
+ if (!getNumVBases())
+ return false;
+
+ CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/false,
+ /*DetectVirtual=*/false);
+
+ if (getCanonicalDecl() == Base->getCanonicalDecl())
+ return false;
+
+ Paths.setOrigin(const_cast<CXXRecordDecl*>(this));
+
+ const CXXRecordDecl *BaseDecl = Base->getCanonicalDecl();
+ // FIXME: Capturing 'this' is a workaround for name lookup bugs in GCC 4.7.
+ return lookupInBases(
+ [this, BaseDecl](const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
+ return FindVirtualBaseClass(Specifier, Path, BaseDecl);
+ },
+ Paths);
+}
+
+bool CXXRecordDecl::isProvablyNotDerivedFrom(const CXXRecordDecl *Base) const {
+ const CXXRecordDecl *TargetDecl = Base->getCanonicalDecl();
+ return forallBases([TargetDecl](const CXXRecordDecl *Base) {
+ return Base->getCanonicalDecl() != TargetDecl;
+ });
+}
+
+bool
+CXXRecordDecl::isCurrentInstantiation(const DeclContext *CurContext) const {
+ assert(isDependentContext());
+
+ for (; !CurContext->isFileContext(); CurContext = CurContext->getParent())
+ if (CurContext->Equals(this))
+ return true;
+
+ return false;
+}
+
+bool CXXRecordDecl::forallBases(ForallBasesCallback BaseMatches,
+ bool AllowShortCircuit) const {
+ SmallVector<const CXXRecordDecl*, 8> Queue;
+
+ const CXXRecordDecl *Record = this;
+ bool AllMatches = true;
+ while (true) {
+ for (const auto &I : Record->bases()) {
+ const RecordType *Ty = I.getType()->getAs<RecordType>();
+ if (!Ty) {
+ if (AllowShortCircuit) return false;
+ AllMatches = false;
+ continue;
+ }
+
+ CXXRecordDecl *Base =
+ cast_or_null<CXXRecordDecl>(Ty->getDecl()->getDefinition());
+ if (!Base ||
+ (Base->isDependentContext() &&
+ !Base->isCurrentInstantiation(Record))) {
+ if (AllowShortCircuit) return false;
+ AllMatches = false;
+ continue;
+ }
+
+ Queue.push_back(Base);
+ if (!BaseMatches(Base)) {
+ if (AllowShortCircuit) return false;
+ AllMatches = false;
+ continue;
+ }
+ }
+
+ if (Queue.empty())
+ break;
+ Record = Queue.pop_back_val(); // not actually a queue.
+ }
+
+ return AllMatches;
+}
+
+bool CXXBasePaths::lookupInBases(
+ ASTContext &Context, const CXXRecordDecl *Record,
+ CXXRecordDecl::BaseMatchesCallback BaseMatches) {
+ bool FoundPath = false;
+
+ // The access of the path down to this record.
+ AccessSpecifier AccessToHere = ScratchPath.Access;
+ bool IsFirstStep = ScratchPath.empty();
+
+ for (const auto &BaseSpec : Record->bases()) {
+ // Find the record of the base class subobjects for this type.
+ QualType BaseType =
+ Context.getCanonicalType(BaseSpec.getType()).getUnqualifiedType();
+
+ // C++ [temp.dep]p3:
+ // In the definition of a class template or a member of a class template,
+ // if a base class of the class template depends on a template-parameter,
+ // the base class scope is not examined during unqualified name lookup
+ // either at the point of definition of the class template or member or
+ // during an instantiation of the class tem- plate or member.
+ if (BaseType->isDependentType())
+ continue;
+
+ // Determine whether we need to visit this base class at all,
+ // updating the count of subobjects appropriately.
+ std::pair<bool, unsigned>& Subobjects = ClassSubobjects[BaseType];
+ bool VisitBase = true;
+ bool SetVirtual = false;
+ if (BaseSpec.isVirtual()) {
+ VisitBase = !Subobjects.first;
+ Subobjects.first = true;
+ if (isDetectingVirtual() && DetectedVirtual == nullptr) {
+ // If this is the first virtual we find, remember it. If it turns out
+ // there is no base path here, we'll reset it later.
+ DetectedVirtual = BaseType->getAs<RecordType>();
+ SetVirtual = true;
+ }
+ } else
+ ++Subobjects.second;
+
+ if (isRecordingPaths()) {
+ // Add this base specifier to the current path.
+ CXXBasePathElement Element;
+ Element.Base = &BaseSpec;
+ Element.Class = Record;
+ if (BaseSpec.isVirtual())
+ Element.SubobjectNumber = 0;
+ else
+ Element.SubobjectNumber = Subobjects.second;
+ ScratchPath.push_back(Element);
+
+ // Calculate the "top-down" access to this base class.
+ // The spec actually describes this bottom-up, but top-down is
+ // equivalent because the definition works out as follows:
+ // 1. Write down the access along each step in the inheritance
+ // chain, followed by the access of the decl itself.
+ // For example, in
+ // class A { public: int foo; };
+ // class B : protected A {};
+ // class C : public B {};
+ // class D : private C {};
+ // we would write:
+ // private public protected public
+ // 2. If 'private' appears anywhere except far-left, access is denied.
+ // 3. Otherwise, overall access is determined by the most restrictive
+ // access in the sequence.
+ if (IsFirstStep)
+ ScratchPath.Access = BaseSpec.getAccessSpecifier();
+ else
+ ScratchPath.Access = CXXRecordDecl::MergeAccess(AccessToHere,
+ BaseSpec.getAccessSpecifier());
+ }
+
+ // Track whether there's a path involving this specific base.
+ bool FoundPathThroughBase = false;
+
+ if (BaseMatches(&BaseSpec, ScratchPath)) {
+ // We've found a path that terminates at this base.
+ FoundPath = FoundPathThroughBase = true;
+ if (isRecordingPaths()) {
+ // We have a path. Make a copy of it before moving on.
+ Paths.push_back(ScratchPath);
+ } else if (!isFindingAmbiguities()) {
+ // We found a path and we don't care about ambiguities;
+ // return immediately.
+ return FoundPath;
+ }
+ } else if (VisitBase) {
+ CXXRecordDecl *BaseRecord
+ = cast<CXXRecordDecl>(BaseSpec.getType()->castAs<RecordType>()
+ ->getDecl());
+ if (lookupInBases(Context, BaseRecord, BaseMatches)) {
+ // C++ [class.member.lookup]p2:
+ // A member name f in one sub-object B hides a member name f in
+ // a sub-object A if A is a base class sub-object of B. Any
+ // declarations that are so hidden are eliminated from
+ // consideration.
+
+ // There is a path to a base class that meets the criteria. If we're
+ // not collecting paths or finding ambiguities, we're done.
+ FoundPath = FoundPathThroughBase = true;
+ if (!isFindingAmbiguities())
+ return FoundPath;
+ }
+ }
+
+ // Pop this base specifier off the current path (if we're
+ // collecting paths).
+ if (isRecordingPaths()) {
+ ScratchPath.pop_back();
+ }
+
+ // If we set a virtual earlier, and this isn't a path, forget it again.
+ if (SetVirtual && !FoundPathThroughBase) {
+ DetectedVirtual = nullptr;
+ }
+ }
+
+ // Reset the scratch path access.
+ ScratchPath.Access = AccessToHere;
+
+ return FoundPath;
+}
+
+bool CXXRecordDecl::lookupInBases(BaseMatchesCallback BaseMatches,
+ CXXBasePaths &Paths) const {
+ // If we didn't find anything, report that.
+ if (!Paths.lookupInBases(getASTContext(), this, BaseMatches))
+ return false;
+
+ // If we're not recording paths or we won't ever find ambiguities,
+ // we're done.
+ if (!Paths.isRecordingPaths() || !Paths.isFindingAmbiguities())
+ return true;
+
+ // C++ [class.member.lookup]p6:
+ // When virtual base classes are used, a hidden declaration can be
+ // reached along a path through the sub-object lattice that does
+ // not pass through the hiding declaration. This is not an
+ // ambiguity. The identical use with nonvirtual base classes is an
+ // ambiguity; in that case there is no unique instance of the name
+ // that hides all the others.
+ //
+ // FIXME: This is an O(N^2) algorithm, but DPG doesn't see an easy
+ // way to make it any faster.
+ Paths.Paths.remove_if([&Paths](const CXXBasePath &Path) {
+ for (const CXXBasePathElement &PE : Path) {
+ if (!PE.Base->isVirtual())
+ continue;
+
+ CXXRecordDecl *VBase = nullptr;
+ if (const RecordType *Record = PE.Base->getType()->getAs<RecordType>())
+ VBase = cast<CXXRecordDecl>(Record->getDecl());
+ if (!VBase)
+ break;
+
+ // The declaration(s) we found along this path were found in a
+ // subobject of a virtual base. Check whether this virtual
+ // base is a subobject of any other path; if so, then the
+ // declaration in this path are hidden by that patch.
+ for (const CXXBasePath &HidingP : Paths) {
+ CXXRecordDecl *HidingClass = nullptr;
+ if (const RecordType *Record =
+ HidingP.back().Base->getType()->getAs<RecordType>())
+ HidingClass = cast<CXXRecordDecl>(Record->getDecl());
+ if (!HidingClass)
+ break;
+
+ if (HidingClass->isVirtuallyDerivedFrom(VBase))
+ return true;
+ }
+ }
+ return false;
+ });
+
+ return true;
+}
+
+bool CXXRecordDecl::FindBaseClass(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path,
+ const CXXRecordDecl *BaseRecord) {
+ assert(BaseRecord->getCanonicalDecl() == BaseRecord &&
+ "User data for FindBaseClass is not canonical!");
+ return Specifier->getType()->castAs<RecordType>()->getDecl()
+ ->getCanonicalDecl() == BaseRecord;
+}
+
+bool CXXRecordDecl::FindVirtualBaseClass(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path,
+ const CXXRecordDecl *BaseRecord) {
+ assert(BaseRecord->getCanonicalDecl() == BaseRecord &&
+ "User data for FindBaseClass is not canonical!");
+ return Specifier->isVirtual() &&
+ Specifier->getType()->castAs<RecordType>()->getDecl()
+ ->getCanonicalDecl() == BaseRecord;
+}
+
+bool CXXRecordDecl::FindTagMember(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path,
+ DeclarationName Name) {
+ RecordDecl *BaseRecord =
+ Specifier->getType()->castAs<RecordType>()->getDecl();
+
+ for (Path.Decls = BaseRecord->lookup(Name);
+ !Path.Decls.empty();
+ Path.Decls = Path.Decls.slice(1)) {
+ if (Path.Decls.front()->isInIdentifierNamespace(IDNS_Tag))
+ return true;
+ }
+
+ return false;
+}
+
+bool CXXRecordDecl::FindOrdinaryMember(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path,
+ DeclarationName Name) {
+ RecordDecl *BaseRecord =
+ Specifier->getType()->castAs<RecordType>()->getDecl();
+
+ const unsigned IDNS = IDNS_Ordinary | IDNS_Tag | IDNS_Member;
+ for (Path.Decls = BaseRecord->lookup(Name);
+ !Path.Decls.empty();
+ Path.Decls = Path.Decls.slice(1)) {
+ if (Path.Decls.front()->isInIdentifierNamespace(IDNS))
+ return true;
+ }
+
+ return false;
+}
+
+bool CXXRecordDecl::
+FindNestedNameSpecifierMember(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path,
+ DeclarationName Name) {
+ RecordDecl *BaseRecord =
+ Specifier->getType()->castAs<RecordType>()->getDecl();
+
+ for (Path.Decls = BaseRecord->lookup(Name);
+ !Path.Decls.empty();
+ Path.Decls = Path.Decls.slice(1)) {
+ // FIXME: Refactor the "is it a nested-name-specifier?" check
+ if (isa<TypedefNameDecl>(Path.Decls.front()) ||
+ Path.Decls.front()->isInIdentifierNamespace(IDNS_Tag))
+ return true;
+ }
+
+ return false;
+}
+
+void OverridingMethods::add(unsigned OverriddenSubobject,
+ UniqueVirtualMethod Overriding) {
+ SmallVectorImpl<UniqueVirtualMethod> &SubobjectOverrides
+ = Overrides[OverriddenSubobject];
+ if (std::find(SubobjectOverrides.begin(), SubobjectOverrides.end(),
+ Overriding) == SubobjectOverrides.end())
+ SubobjectOverrides.push_back(Overriding);
+}
+
+void OverridingMethods::add(const OverridingMethods &Other) {
+ for (const_iterator I = Other.begin(), IE = Other.end(); I != IE; ++I) {
+ for (overriding_const_iterator M = I->second.begin(),
+ MEnd = I->second.end();
+ M != MEnd;
+ ++M)
+ add(I->first, *M);
+ }
+}
+
+void OverridingMethods::replaceAll(UniqueVirtualMethod Overriding) {
+ for (iterator I = begin(), IEnd = end(); I != IEnd; ++I) {
+ I->second.clear();
+ I->second.push_back(Overriding);
+ }
+}
+
+
+namespace {
+ class FinalOverriderCollector {
+ /// \brief The number of subobjects of a given class type that
+ /// occur within the class hierarchy.
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> SubobjectCount;
+
+ /// \brief Overriders for each virtual base subobject.
+ llvm::DenseMap<const CXXRecordDecl *, CXXFinalOverriderMap *> VirtualOverriders;
+
+ CXXFinalOverriderMap FinalOverriders;
+
+ public:
+ ~FinalOverriderCollector();
+
+ void Collect(const CXXRecordDecl *RD, bool VirtualBase,
+ const CXXRecordDecl *InVirtualSubobject,
+ CXXFinalOverriderMap &Overriders);
+ };
+}
+
+void FinalOverriderCollector::Collect(const CXXRecordDecl *RD,
+ bool VirtualBase,
+ const CXXRecordDecl *InVirtualSubobject,
+ CXXFinalOverriderMap &Overriders) {
+ unsigned SubobjectNumber = 0;
+ if (!VirtualBase)
+ SubobjectNumber
+ = ++SubobjectCount[cast<CXXRecordDecl>(RD->getCanonicalDecl())];
+
+ for (const auto &Base : RD->bases()) {
+ if (const RecordType *RT = Base.getType()->getAs<RecordType>()) {
+ const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(RT->getDecl());
+ if (!BaseDecl->isPolymorphic())
+ continue;
+
+ if (Overriders.empty() && !Base.isVirtual()) {
+ // There are no other overriders of virtual member functions,
+ // so let the base class fill in our overriders for us.
+ Collect(BaseDecl, false, InVirtualSubobject, Overriders);
+ continue;
+ }
+
+ // Collect all of the overridders from the base class subobject
+ // and merge them into the set of overridders for this class.
+ // For virtual base classes, populate or use the cached virtual
+ // overrides so that we do not walk the virtual base class (and
+ // its base classes) more than once.
+ CXXFinalOverriderMap ComputedBaseOverriders;
+ CXXFinalOverriderMap *BaseOverriders = &ComputedBaseOverriders;
+ if (Base.isVirtual()) {
+ CXXFinalOverriderMap *&MyVirtualOverriders = VirtualOverriders[BaseDecl];
+ BaseOverriders = MyVirtualOverriders;
+ if (!MyVirtualOverriders) {
+ MyVirtualOverriders = new CXXFinalOverriderMap;
+
+ // Collect may cause VirtualOverriders to reallocate, invalidating the
+ // MyVirtualOverriders reference. Set BaseOverriders to the right
+ // value now.
+ BaseOverriders = MyVirtualOverriders;
+
+ Collect(BaseDecl, true, BaseDecl, *MyVirtualOverriders);
+ }
+ } else
+ Collect(BaseDecl, false, InVirtualSubobject, ComputedBaseOverriders);
+
+ // Merge the overriders from this base class into our own set of
+ // overriders.
+ for (CXXFinalOverriderMap::iterator OM = BaseOverriders->begin(),
+ OMEnd = BaseOverriders->end();
+ OM != OMEnd;
+ ++OM) {
+ const CXXMethodDecl *CanonOM
+ = cast<CXXMethodDecl>(OM->first->getCanonicalDecl());
+ Overriders[CanonOM].add(OM->second);
+ }
+ }
+ }
+
+ for (auto *M : RD->methods()) {
+ // We only care about virtual methods.
+ if (!M->isVirtual())
+ continue;
+
+ CXXMethodDecl *CanonM = cast<CXXMethodDecl>(M->getCanonicalDecl());
+
+ if (CanonM->begin_overridden_methods()
+ == CanonM->end_overridden_methods()) {
+ // This is a new virtual function that does not override any
+ // other virtual function. Add it to the map of virtual
+ // functions for which we are tracking overridders.
+
+ // C++ [class.virtual]p2:
+ // For convenience we say that any virtual function overrides itself.
+ Overriders[CanonM].add(SubobjectNumber,
+ UniqueVirtualMethod(CanonM, SubobjectNumber,
+ InVirtualSubobject));
+ continue;
+ }
+
+ // This virtual method overrides other virtual methods, so it does
+ // not add any new slots into the set of overriders. Instead, we
+ // replace entries in the set of overriders with the new
+ // overrider. To do so, we dig down to the original virtual
+ // functions using data recursion and update all of the methods it
+ // overrides.
+ typedef llvm::iterator_range<CXXMethodDecl::method_iterator>
+ OverriddenMethods;
+ SmallVector<OverriddenMethods, 4> Stack;
+ Stack.push_back(llvm::make_range(CanonM->begin_overridden_methods(),
+ CanonM->end_overridden_methods()));
+ while (!Stack.empty()) {
+ for (const CXXMethodDecl *OM : Stack.pop_back_val()) {
+ const CXXMethodDecl *CanonOM = OM->getCanonicalDecl();
+
+ // C++ [class.virtual]p2:
+ // A virtual member function C::vf of a class object S is
+ // a final overrider unless the most derived class (1.8)
+ // of which S is a base class subobject (if any) declares
+ // or inherits another member function that overrides vf.
+ //
+ // Treating this object like the most derived class, we
+ // replace any overrides from base classes with this
+ // overriding virtual function.
+ Overriders[CanonOM].replaceAll(
+ UniqueVirtualMethod(CanonM, SubobjectNumber,
+ InVirtualSubobject));
+
+ if (CanonOM->begin_overridden_methods()
+ == CanonOM->end_overridden_methods())
+ continue;
+
+ // Continue recursion to the methods that this virtual method
+ // overrides.
+ Stack.push_back(llvm::make_range(CanonOM->begin_overridden_methods(),
+ CanonOM->end_overridden_methods()));
+ }
+ }
+
+ // C++ [class.virtual]p2:
+ // For convenience we say that any virtual function overrides itself.
+ Overriders[CanonM].add(SubobjectNumber,
+ UniqueVirtualMethod(CanonM, SubobjectNumber,
+ InVirtualSubobject));
+ }
+}
+
+FinalOverriderCollector::~FinalOverriderCollector() {
+ for (llvm::DenseMap<const CXXRecordDecl *, CXXFinalOverriderMap *>::iterator
+ VO = VirtualOverriders.begin(), VOEnd = VirtualOverriders.end();
+ VO != VOEnd;
+ ++VO)
+ delete VO->second;
+}
+
+void
+CXXRecordDecl::getFinalOverriders(CXXFinalOverriderMap &FinalOverriders) const {
+ FinalOverriderCollector Collector;
+ Collector.Collect(this, false, nullptr, FinalOverriders);
+
+ // Weed out any final overriders that come from virtual base class
+ // subobjects that were hidden by other subobjects along any path.
+ // This is the final-overrider variant of C++ [class.member.lookup]p10.
+ for (auto &OM : FinalOverriders) {
+ for (auto &SO : OM.second) {
+ SmallVectorImpl<UniqueVirtualMethod> &Overriding = SO.second;
+ if (Overriding.size() < 2)
+ continue;
+
+ auto IsHidden = [&Overriding](const UniqueVirtualMethod &M) {
+ if (!M.InVirtualSubobject)
+ return false;
+
+ // We have an overriding method in a virtual base class
+ // subobject (or non-virtual base class subobject thereof);
+ // determine whether there exists an other overriding method
+ // in a base class subobject that hides the virtual base class
+ // subobject.
+ for (const UniqueVirtualMethod &OP : Overriding)
+ if (&M != &OP &&
+ OP.Method->getParent()->isVirtuallyDerivedFrom(
+ M.InVirtualSubobject))
+ return true;
+ return false;
+ };
+
+ Overriding.erase(
+ std::remove_if(Overriding.begin(), Overriding.end(), IsHidden),
+ Overriding.end());
+ }
+ }
+}
+
+static void
+AddIndirectPrimaryBases(const CXXRecordDecl *RD, ASTContext &Context,
+ CXXIndirectPrimaryBaseSet& Bases) {
+ // If the record has a virtual primary base class, add it to our set.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ if (Layout.isPrimaryBaseVirtual())
+ Bases.insert(Layout.getPrimaryBase());
+
+ for (const auto &I : RD->bases()) {
+ assert(!I.getType()->isDependentType() &&
+ "Cannot get indirect primary bases for class with dependent bases.");
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+
+ // Only bases with virtual bases participate in computing the
+ // indirect primary virtual base classes.
+ if (BaseDecl->getNumVBases())
+ AddIndirectPrimaryBases(BaseDecl, Context, Bases);
+ }
+
+}
+
+void
+CXXRecordDecl::getIndirectPrimaryBases(CXXIndirectPrimaryBaseSet& Bases) const {
+ ASTContext &Context = getASTContext();
+
+ if (!getNumVBases())
+ return;
+
+ for (const auto &I : bases()) {
+ assert(!I.getType()->isDependentType() &&
+ "Cannot get indirect primary bases for class with dependent bases.");
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+
+ // Only bases with virtual bases participate in computing the
+ // indirect primary virtual base classes.
+ if (BaseDecl->getNumVBases())
+ AddIndirectPrimaryBases(BaseDecl, Context, Bases);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/Comment.cpp b/contrib/llvm/tools/clang/lib/AST/Comment.cpp
new file mode 100644
index 0000000..d05c5de
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/Comment.cpp
@@ -0,0 +1,347 @@
+//===--- Comment.cpp - Comment AST node implementation --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Comment.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/Basic/CharInfo.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace clang {
+namespace comments {
+
+const char *Comment::getCommentKindName() const {
+ switch (getCommentKind()) {
+ case NoCommentKind: return "NoCommentKind";
+#define ABSTRACT_COMMENT(COMMENT)
+#define COMMENT(CLASS, PARENT) \
+ case CLASS##Kind: \
+ return #CLASS;
+#include "clang/AST/CommentNodes.inc"
+#undef COMMENT
+#undef ABSTRACT_COMMENT
+ }
+ llvm_unreachable("Unknown comment kind!");
+}
+
+namespace {
+struct good {};
+struct bad {};
+
+template <typename T>
+good implements_child_begin_end(Comment::child_iterator (T::*)() const) {
+ return good();
+}
+
+LLVM_ATTRIBUTE_UNUSED
+static inline bad implements_child_begin_end(
+ Comment::child_iterator (Comment::*)() const) {
+ return bad();
+}
+
+#define ASSERT_IMPLEMENTS_child_begin(function) \
+ (void) good(implements_child_begin_end(function))
+
+LLVM_ATTRIBUTE_UNUSED
+static inline void CheckCommentASTNodes() {
+#define ABSTRACT_COMMENT(COMMENT)
+#define COMMENT(CLASS, PARENT) \
+ ASSERT_IMPLEMENTS_child_begin(&CLASS::child_begin); \
+ ASSERT_IMPLEMENTS_child_begin(&CLASS::child_end);
+#include "clang/AST/CommentNodes.inc"
+#undef COMMENT
+#undef ABSTRACT_COMMENT
+}
+
+#undef ASSERT_IMPLEMENTS_child_begin
+
+} // end unnamed namespace
+
+Comment::child_iterator Comment::child_begin() const {
+ switch (getCommentKind()) {
+ case NoCommentKind: llvm_unreachable("comment without a kind");
+#define ABSTRACT_COMMENT(COMMENT)
+#define COMMENT(CLASS, PARENT) \
+ case CLASS##Kind: \
+ return static_cast<const CLASS *>(this)->child_begin();
+#include "clang/AST/CommentNodes.inc"
+#undef COMMENT
+#undef ABSTRACT_COMMENT
+ }
+ llvm_unreachable("Unknown comment kind!");
+}
+
+Comment::child_iterator Comment::child_end() const {
+ switch (getCommentKind()) {
+ case NoCommentKind: llvm_unreachable("comment without a kind");
+#define ABSTRACT_COMMENT(COMMENT)
+#define COMMENT(CLASS, PARENT) \
+ case CLASS##Kind: \
+ return static_cast<const CLASS *>(this)->child_end();
+#include "clang/AST/CommentNodes.inc"
+#undef COMMENT
+#undef ABSTRACT_COMMENT
+ }
+ llvm_unreachable("Unknown comment kind!");
+}
+
+bool TextComment::isWhitespaceNoCache() const {
+ for (StringRef::const_iterator I = Text.begin(), E = Text.end();
+ I != E; ++I) {
+ if (!clang::isWhitespace(*I))
+ return false;
+ }
+ return true;
+}
+
+bool ParagraphComment::isWhitespaceNoCache() const {
+ for (child_iterator I = child_begin(), E = child_end(); I != E; ++I) {
+ if (const TextComment *TC = dyn_cast<TextComment>(*I)) {
+ if (!TC->isWhitespace())
+ return false;
+ } else
+ return false;
+ }
+ return true;
+}
+
+const char *ParamCommandComment::getDirectionAsString(PassDirection D) {
+ switch (D) {
+ case ParamCommandComment::In:
+ return "[in]";
+ case ParamCommandComment::Out:
+ return "[out]";
+ case ParamCommandComment::InOut:
+ return "[in,out]";
+ }
+ llvm_unreachable("unknown PassDirection");
+}
+
+void DeclInfo::fill() {
+ assert(!IsFilled);
+
+ // Set defaults.
+ Kind = OtherKind;
+ TemplateKind = NotTemplate;
+ IsObjCMethod = false;
+ IsInstanceMethod = false;
+ IsClassMethod = false;
+ ParamVars = None;
+ TemplateParameters = nullptr;
+
+ if (!CommentDecl) {
+ // If there is no declaration, the defaults is our only guess.
+ IsFilled = true;
+ return;
+ }
+ CurrentDecl = CommentDecl;
+
+ Decl::Kind K = CommentDecl->getKind();
+ switch (K) {
+ default:
+ // Defaults are should be good for declarations we don't handle explicitly.
+ break;
+ case Decl::Function:
+ case Decl::CXXMethod:
+ case Decl::CXXConstructor:
+ case Decl::CXXDestructor:
+ case Decl::CXXConversion: {
+ const FunctionDecl *FD = cast<FunctionDecl>(CommentDecl);
+ Kind = FunctionKind;
+ ParamVars = llvm::makeArrayRef(FD->param_begin(), FD->getNumParams());
+ ReturnType = FD->getReturnType();
+ unsigned NumLists = FD->getNumTemplateParameterLists();
+ if (NumLists != 0) {
+ TemplateKind = TemplateSpecialization;
+ TemplateParameters =
+ FD->getTemplateParameterList(NumLists - 1);
+ }
+
+ if (K == Decl::CXXMethod || K == Decl::CXXConstructor ||
+ K == Decl::CXXDestructor || K == Decl::CXXConversion) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(CommentDecl);
+ IsInstanceMethod = MD->isInstance();
+ IsClassMethod = !IsInstanceMethod;
+ }
+ break;
+ }
+ case Decl::ObjCMethod: {
+ const ObjCMethodDecl *MD = cast<ObjCMethodDecl>(CommentDecl);
+ Kind = FunctionKind;
+ ParamVars = llvm::makeArrayRef(MD->param_begin(), MD->param_size());
+ ReturnType = MD->getReturnType();
+ IsObjCMethod = true;
+ IsInstanceMethod = MD->isInstanceMethod();
+ IsClassMethod = !IsInstanceMethod;
+ break;
+ }
+ case Decl::FunctionTemplate: {
+ const FunctionTemplateDecl *FTD = cast<FunctionTemplateDecl>(CommentDecl);
+ Kind = FunctionKind;
+ TemplateKind = Template;
+ const FunctionDecl *FD = FTD->getTemplatedDecl();
+ ParamVars = llvm::makeArrayRef(FD->param_begin(), FD->getNumParams());
+ ReturnType = FD->getReturnType();
+ TemplateParameters = FTD->getTemplateParameters();
+ break;
+ }
+ case Decl::ClassTemplate: {
+ const ClassTemplateDecl *CTD = cast<ClassTemplateDecl>(CommentDecl);
+ Kind = ClassKind;
+ TemplateKind = Template;
+ TemplateParameters = CTD->getTemplateParameters();
+ break;
+ }
+ case Decl::ClassTemplatePartialSpecialization: {
+ const ClassTemplatePartialSpecializationDecl *CTPSD =
+ cast<ClassTemplatePartialSpecializationDecl>(CommentDecl);
+ Kind = ClassKind;
+ TemplateKind = TemplatePartialSpecialization;
+ TemplateParameters = CTPSD->getTemplateParameters();
+ break;
+ }
+ case Decl::ClassTemplateSpecialization:
+ Kind = ClassKind;
+ TemplateKind = TemplateSpecialization;
+ break;
+ case Decl::Record:
+ case Decl::CXXRecord:
+ Kind = ClassKind;
+ break;
+ case Decl::Var:
+ case Decl::Field:
+ case Decl::EnumConstant:
+ case Decl::ObjCIvar:
+ case Decl::ObjCAtDefsField:
+ Kind = VariableKind;
+ break;
+ case Decl::Namespace:
+ Kind = NamespaceKind;
+ break;
+ case Decl::Typedef: {
+ Kind = TypedefKind;
+ // If this is a typedef to something we consider a function, extract
+ // arguments and return type.
+ const TypedefDecl *TD = cast<TypedefDecl>(CommentDecl);
+ const TypeSourceInfo *TSI = TD->getTypeSourceInfo();
+ if (!TSI)
+ break;
+ TypeLoc TL = TSI->getTypeLoc().getUnqualifiedLoc();
+ while (true) {
+ TL = TL.IgnoreParens();
+ // Look through qualified types.
+ if (QualifiedTypeLoc QualifiedTL = TL.getAs<QualifiedTypeLoc>()) {
+ TL = QualifiedTL.getUnqualifiedLoc();
+ continue;
+ }
+ // Look through pointer types.
+ if (PointerTypeLoc PointerTL = TL.getAs<PointerTypeLoc>()) {
+ TL = PointerTL.getPointeeLoc().getUnqualifiedLoc();
+ continue;
+ }
+ // Look through reference types.
+ if (ReferenceTypeLoc ReferenceTL = TL.getAs<ReferenceTypeLoc>()) {
+ TL = ReferenceTL.getPointeeLoc().getUnqualifiedLoc();
+ continue;
+ }
+ // Look through adjusted types.
+ if (AdjustedTypeLoc ATL = TL.getAs<AdjustedTypeLoc>()) {
+ TL = ATL.getOriginalLoc();
+ continue;
+ }
+ if (BlockPointerTypeLoc BlockPointerTL =
+ TL.getAs<BlockPointerTypeLoc>()) {
+ TL = BlockPointerTL.getPointeeLoc().getUnqualifiedLoc();
+ continue;
+ }
+ if (MemberPointerTypeLoc MemberPointerTL =
+ TL.getAs<MemberPointerTypeLoc>()) {
+ TL = MemberPointerTL.getPointeeLoc().getUnqualifiedLoc();
+ continue;
+ }
+ if (ElaboratedTypeLoc ETL = TL.getAs<ElaboratedTypeLoc>()) {
+ TL = ETL.getNamedTypeLoc();
+ continue;
+ }
+ // Is this a typedef for a function type?
+ if (FunctionTypeLoc FTL = TL.getAs<FunctionTypeLoc>()) {
+ Kind = FunctionKind;
+ ParamVars = FTL.getParams();
+ ReturnType = FTL.getReturnLoc().getType();
+ break;
+ }
+ if (TemplateSpecializationTypeLoc STL =
+ TL.getAs<TemplateSpecializationTypeLoc>()) {
+ // If we have a typedef to a template specialization with exactly one
+ // template argument of a function type, this looks like std::function,
+ // boost::function, or other function wrapper. Treat these typedefs as
+ // functions.
+ if (STL.getNumArgs() != 1)
+ break;
+ TemplateArgumentLoc MaybeFunction = STL.getArgLoc(0);
+ if (MaybeFunction.getArgument().getKind() != TemplateArgument::Type)
+ break;
+ TypeSourceInfo *MaybeFunctionTSI = MaybeFunction.getTypeSourceInfo();
+ TypeLoc TL = MaybeFunctionTSI->getTypeLoc().getUnqualifiedLoc();
+ if (FunctionTypeLoc FTL = TL.getAs<FunctionTypeLoc>()) {
+ Kind = FunctionKind;
+ ParamVars = FTL.getParams();
+ ReturnType = FTL.getReturnLoc().getType();
+ }
+ break;
+ }
+ break;
+ }
+ break;
+ }
+ case Decl::TypeAlias:
+ Kind = TypedefKind;
+ break;
+ case Decl::TypeAliasTemplate: {
+ const TypeAliasTemplateDecl *TAT = cast<TypeAliasTemplateDecl>(CommentDecl);
+ Kind = TypedefKind;
+ TemplateKind = Template;
+ TemplateParameters = TAT->getTemplateParameters();
+ break;
+ }
+ case Decl::Enum:
+ Kind = EnumKind;
+ break;
+ }
+
+ IsFilled = true;
+}
+
+StringRef ParamCommandComment::getParamName(const FullComment *FC) const {
+ assert(isParamIndexValid());
+ if (isVarArgParam())
+ return "...";
+ return FC->getDeclInfo()->ParamVars[getParamIndex()]->getName();
+}
+
+StringRef TParamCommandComment::getParamName(const FullComment *FC) const {
+ assert(isPositionValid());
+ const TemplateParameterList *TPL = FC->getDeclInfo()->TemplateParameters;
+ for (unsigned i = 0, e = getDepth(); i != e; ++i) {
+ if (i == e-1)
+ return TPL->getParam(getIndex(i))->getName();
+ const NamedDecl *Param = TPL->getParam(getIndex(i));
+ if (const TemplateTemplateParmDecl *TTP =
+ dyn_cast<TemplateTemplateParmDecl>(Param))
+ TPL = TTP->getTemplateParameters();
+ }
+ return "";
+}
+
+} // end namespace comments
+} // end namespace clang
+
diff --git a/contrib/llvm/tools/clang/lib/AST/CommentBriefParser.cpp b/contrib/llvm/tools/clang/lib/AST/CommentBriefParser.cpp
new file mode 100644
index 0000000..090b921
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/CommentBriefParser.cpp
@@ -0,0 +1,155 @@
+//===--- CommentBriefParser.cpp - Dumb comment parser ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/CommentBriefParser.h"
+#include "clang/AST/CommentCommandTraits.h"
+#include "llvm/ADT/StringSwitch.h"
+
+namespace clang {
+namespace comments {
+
+namespace {
+inline bool isWhitespace(char C) {
+ return C == ' ' || C == '\n' || C == '\r' ||
+ C == '\t' || C == '\f' || C == '\v';
+}
+
+/// Convert all whitespace into spaces, remove leading and trailing spaces,
+/// compress multiple spaces into one.
+void cleanupBrief(std::string &S) {
+ bool PrevWasSpace = true;
+ std::string::iterator O = S.begin();
+ for (std::string::iterator I = S.begin(), E = S.end();
+ I != E; ++I) {
+ const char C = *I;
+ if (isWhitespace(C)) {
+ if (!PrevWasSpace) {
+ *O++ = ' ';
+ PrevWasSpace = true;
+ }
+ continue;
+ } else {
+ *O++ = C;
+ PrevWasSpace = false;
+ }
+ }
+ if (O != S.begin() && *(O - 1) == ' ')
+ --O;
+
+ S.resize(O - S.begin());
+}
+
+bool isWhitespace(StringRef Text) {
+ for (StringRef::const_iterator I = Text.begin(), E = Text.end();
+ I != E; ++I) {
+ if (!isWhitespace(*I))
+ return false;
+ }
+ return true;
+}
+} // unnamed namespace
+
+BriefParser::BriefParser(Lexer &L, const CommandTraits &Traits) :
+ L(L), Traits(Traits) {
+ // Get lookahead token.
+ ConsumeToken();
+}
+
+std::string BriefParser::Parse() {
+ std::string FirstParagraphOrBrief;
+ std::string ReturnsParagraph;
+ bool InFirstParagraph = true;
+ bool InBrief = false;
+ bool InReturns = false;
+
+ while (Tok.isNot(tok::eof)) {
+ if (Tok.is(tok::text)) {
+ if (InFirstParagraph || InBrief)
+ FirstParagraphOrBrief += Tok.getText();
+ else if (InReturns)
+ ReturnsParagraph += Tok.getText();
+ ConsumeToken();
+ continue;
+ }
+
+ if (Tok.is(tok::backslash_command) || Tok.is(tok::at_command)) {
+ const CommandInfo *Info = Traits.getCommandInfo(Tok.getCommandID());
+ if (Info->IsBriefCommand) {
+ FirstParagraphOrBrief.clear();
+ InBrief = true;
+ ConsumeToken();
+ continue;
+ }
+ if (Info->IsReturnsCommand) {
+ InReturns = true;
+ InBrief = false;
+ InFirstParagraph = false;
+ ReturnsParagraph += "Returns ";
+ ConsumeToken();
+ continue;
+ }
+ // Block commands implicitly start a new paragraph.
+ if (Info->IsBlockCommand) {
+ // We found an implicit paragraph end.
+ InFirstParagraph = false;
+ if (InBrief)
+ break;
+ }
+ }
+
+ if (Tok.is(tok::newline)) {
+ if (InFirstParagraph || InBrief)
+ FirstParagraphOrBrief += ' ';
+ else if (InReturns)
+ ReturnsParagraph += ' ';
+ ConsumeToken();
+
+ // If the next token is a whitespace only text, ignore it. Thus we allow
+ // two paragraphs to be separated by line that has only whitespace in it.
+ //
+ // We don't need to add a space to the parsed text because we just added
+ // a space for the newline.
+ if (Tok.is(tok::text)) {
+ if (isWhitespace(Tok.getText()))
+ ConsumeToken();
+ }
+
+ if (Tok.is(tok::newline)) {
+ ConsumeToken();
+ // We found a paragraph end. This ends the brief description if
+ // \\brief command or its equivalent was explicitly used.
+ // Stop scanning text because an explicit \\brief paragraph is the
+ // preffered one.
+ if (InBrief)
+ break;
+ // End first paragraph if we found some non-whitespace text.
+ if (InFirstParagraph && !isWhitespace(FirstParagraphOrBrief))
+ InFirstParagraph = false;
+ // End the \\returns paragraph because we found the paragraph end.
+ InReturns = false;
+ }
+ continue;
+ }
+
+ // We didn't handle this token, so just drop it.
+ ConsumeToken();
+ }
+
+ cleanupBrief(FirstParagraphOrBrief);
+ if (!FirstParagraphOrBrief.empty())
+ return FirstParagraphOrBrief;
+
+ cleanupBrief(ReturnsParagraph);
+ return ReturnsParagraph;
+}
+
+} // end namespace comments
+} // end namespace clang
+
+
diff --git a/contrib/llvm/tools/clang/lib/AST/CommentCommandTraits.cpp b/contrib/llvm/tools/clang/lib/AST/CommentCommandTraits.cpp
new file mode 100644
index 0000000..7378a7c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/CommentCommandTraits.cpp
@@ -0,0 +1,139 @@
+//===--- CommentCommandTraits.cpp - Comment command properties --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/CommentCommandTraits.h"
+#include "llvm/ADT/STLExtras.h"
+
+namespace clang {
+namespace comments {
+
+#include "clang/AST/CommentCommandInfo.inc"
+
+CommandTraits::CommandTraits(llvm::BumpPtrAllocator &Allocator,
+ const CommentOptions &CommentOptions) :
+ NextID(llvm::array_lengthof(Commands)), Allocator(Allocator) {
+ registerCommentOptions(CommentOptions);
+}
+
+void CommandTraits::registerCommentOptions(
+ const CommentOptions &CommentOptions) {
+ for (CommentOptions::BlockCommandNamesTy::const_iterator
+ I = CommentOptions.BlockCommandNames.begin(),
+ E = CommentOptions.BlockCommandNames.end();
+ I != E; I++) {
+ registerBlockCommand(*I);
+ }
+}
+
+const CommandInfo *CommandTraits::getCommandInfoOrNULL(StringRef Name) const {
+ if (const CommandInfo *Info = getBuiltinCommandInfo(Name))
+ return Info;
+ return getRegisteredCommandInfo(Name);
+}
+
+const CommandInfo *CommandTraits::getCommandInfo(unsigned CommandID) const {
+ if (const CommandInfo *Info = getBuiltinCommandInfo(CommandID))
+ return Info;
+ return getRegisteredCommandInfo(CommandID);
+}
+
+const CommandInfo *
+CommandTraits::getTypoCorrectCommandInfo(StringRef Typo) const {
+ // Single-character command impostures, such as \t or \n, should not go
+ // through the fixit logic.
+ if (Typo.size() <= 1)
+ return nullptr;
+
+ // The maximum edit distance we're prepared to accept.
+ const unsigned MaxEditDistance = 1;
+
+ unsigned BestEditDistance = MaxEditDistance;
+ SmallVector<const CommandInfo *, 2> BestCommand;
+
+ auto ConsiderCorrection = [&](const CommandInfo *Command) {
+ StringRef Name = Command->Name;
+
+ unsigned MinPossibleEditDistance = abs((int)Name.size() - (int)Typo.size());
+ if (MinPossibleEditDistance <= BestEditDistance) {
+ unsigned EditDistance = Typo.edit_distance(Name, true, BestEditDistance);
+ if (EditDistance < BestEditDistance) {
+ BestEditDistance = EditDistance;
+ BestCommand.clear();
+ }
+ if (EditDistance == BestEditDistance)
+ BestCommand.push_back(Command);
+ }
+ };
+
+ for (const auto &Command : Commands)
+ ConsiderCorrection(&Command);
+
+ for (const auto *Command : RegisteredCommands)
+ if (!Command->IsUnknownCommand)
+ ConsiderCorrection(Command);
+
+ return BestCommand.size() == 1 ? BestCommand[0] : nullptr;
+}
+
+CommandInfo *CommandTraits::createCommandInfoWithName(StringRef CommandName) {
+ char *Name = Allocator.Allocate<char>(CommandName.size() + 1);
+ memcpy(Name, CommandName.data(), CommandName.size());
+ Name[CommandName.size()] = '\0';
+
+ // Value-initialize (=zero-initialize in this case) a new CommandInfo.
+ CommandInfo *Info = new (Allocator) CommandInfo();
+ Info->Name = Name;
+ // We only have a limited number of bits to encode command IDs in the
+ // CommandInfo structure, so the ID numbers can potentially wrap around.
+ assert((NextID < (1 << CommandInfo::NumCommandIDBits))
+ && "Too many commands. We have limited bits for the command ID.");
+ Info->ID = NextID++;
+
+ RegisteredCommands.push_back(Info);
+
+ return Info;
+}
+
+const CommandInfo *CommandTraits::registerUnknownCommand(
+ StringRef CommandName) {
+ CommandInfo *Info = createCommandInfoWithName(CommandName);
+ Info->IsUnknownCommand = true;
+ return Info;
+}
+
+const CommandInfo *CommandTraits::registerBlockCommand(StringRef CommandName) {
+ CommandInfo *Info = createCommandInfoWithName(CommandName);
+ Info->IsBlockCommand = true;
+ return Info;
+}
+
+const CommandInfo *CommandTraits::getBuiltinCommandInfo(
+ unsigned CommandID) {
+ if (CommandID < llvm::array_lengthof(Commands))
+ return &Commands[CommandID];
+ return nullptr;
+}
+
+const CommandInfo *CommandTraits::getRegisteredCommandInfo(
+ StringRef Name) const {
+ for (unsigned i = 0, e = RegisteredCommands.size(); i != e; ++i) {
+ if (RegisteredCommands[i]->Name == Name)
+ return RegisteredCommands[i];
+ }
+ return nullptr;
+}
+
+const CommandInfo *CommandTraits::getRegisteredCommandInfo(
+ unsigned CommandID) const {
+ return RegisteredCommands[CommandID - llvm::array_lengthof(Commands)];
+}
+
+} // end namespace comments
+} // end namespace clang
+
diff --git a/contrib/llvm/tools/clang/lib/AST/CommentLexer.cpp b/contrib/llvm/tools/clang/lib/AST/CommentLexer.cpp
new file mode 100644
index 0000000..98b7e36
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/CommentLexer.cpp
@@ -0,0 +1,851 @@
+#include "clang/AST/CommentLexer.h"
+#include "clang/AST/CommentCommandTraits.h"
+#include "clang/AST/CommentDiagnostic.h"
+#include "clang/Basic/CharInfo.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/ConvertUTF.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace clang {
+namespace comments {
+
+void Token::dump(const Lexer &L, const SourceManager &SM) const {
+ llvm::errs() << "comments::Token Kind=" << Kind << " ";
+ Loc.dump(SM);
+ llvm::errs() << " " << Length << " \"" << L.getSpelling(*this, SM) << "\"\n";
+}
+
+static inline bool isHTMLNamedCharacterReferenceCharacter(char C) {
+ return isLetter(C);
+}
+
+static inline bool isHTMLDecimalCharacterReferenceCharacter(char C) {
+ return isDigit(C);
+}
+
+static inline bool isHTMLHexCharacterReferenceCharacter(char C) {
+ return isHexDigit(C);
+}
+
+static inline StringRef convertCodePointToUTF8(
+ llvm::BumpPtrAllocator &Allocator,
+ unsigned CodePoint) {
+ char *Resolved = Allocator.Allocate<char>(UNI_MAX_UTF8_BYTES_PER_CODE_POINT);
+ char *ResolvedPtr = Resolved;
+ if (llvm::ConvertCodePointToUTF8(CodePoint, ResolvedPtr))
+ return StringRef(Resolved, ResolvedPtr - Resolved);
+ else
+ return StringRef();
+}
+
+namespace {
+
+#include "clang/AST/CommentHTMLTags.inc"
+#include "clang/AST/CommentHTMLNamedCharacterReferences.inc"
+
+} // unnamed namespace
+
+StringRef Lexer::resolveHTMLNamedCharacterReference(StringRef Name) const {
+ // Fast path, first check a few most widely used named character references.
+ return llvm::StringSwitch<StringRef>(Name)
+ .Case("amp", "&")
+ .Case("lt", "<")
+ .Case("gt", ">")
+ .Case("quot", "\"")
+ .Case("apos", "\'")
+ // Slow path.
+ .Default(translateHTMLNamedCharacterReferenceToUTF8(Name));
+}
+
+StringRef Lexer::resolveHTMLDecimalCharacterReference(StringRef Name) const {
+ unsigned CodePoint = 0;
+ for (unsigned i = 0, e = Name.size(); i != e; ++i) {
+ assert(isHTMLDecimalCharacterReferenceCharacter(Name[i]));
+ CodePoint *= 10;
+ CodePoint += Name[i] - '0';
+ }
+ return convertCodePointToUTF8(Allocator, CodePoint);
+}
+
+StringRef Lexer::resolveHTMLHexCharacterReference(StringRef Name) const {
+ unsigned CodePoint = 0;
+ for (unsigned i = 0, e = Name.size(); i != e; ++i) {
+ CodePoint *= 16;
+ const char C = Name[i];
+ assert(isHTMLHexCharacterReferenceCharacter(C));
+ CodePoint += llvm::hexDigitValue(C);
+ }
+ return convertCodePointToUTF8(Allocator, CodePoint);
+}
+
+void Lexer::skipLineStartingDecorations() {
+ // This function should be called only for C comments
+ assert(CommentState == LCS_InsideCComment);
+
+ if (BufferPtr == CommentEnd)
+ return;
+
+ switch (*BufferPtr) {
+ case ' ':
+ case '\t':
+ case '\f':
+ case '\v': {
+ const char *NewBufferPtr = BufferPtr;
+ NewBufferPtr++;
+ if (NewBufferPtr == CommentEnd)
+ return;
+
+ char C = *NewBufferPtr;
+ while (isHorizontalWhitespace(C)) {
+ NewBufferPtr++;
+ if (NewBufferPtr == CommentEnd)
+ return;
+ C = *NewBufferPtr;
+ }
+ if (C == '*')
+ BufferPtr = NewBufferPtr + 1;
+ break;
+ }
+ case '*':
+ BufferPtr++;
+ break;
+ }
+}
+
+namespace {
+/// Returns pointer to the first newline character in the string.
+const char *findNewline(const char *BufferPtr, const char *BufferEnd) {
+ for ( ; BufferPtr != BufferEnd; ++BufferPtr) {
+ if (isVerticalWhitespace(*BufferPtr))
+ return BufferPtr;
+ }
+ return BufferEnd;
+}
+
+const char *skipNewline(const char *BufferPtr, const char *BufferEnd) {
+ if (BufferPtr == BufferEnd)
+ return BufferPtr;
+
+ if (*BufferPtr == '\n')
+ BufferPtr++;
+ else {
+ assert(*BufferPtr == '\r');
+ BufferPtr++;
+ if (BufferPtr != BufferEnd && *BufferPtr == '\n')
+ BufferPtr++;
+ }
+ return BufferPtr;
+}
+
+const char *skipNamedCharacterReference(const char *BufferPtr,
+ const char *BufferEnd) {
+ for ( ; BufferPtr != BufferEnd; ++BufferPtr) {
+ if (!isHTMLNamedCharacterReferenceCharacter(*BufferPtr))
+ return BufferPtr;
+ }
+ return BufferEnd;
+}
+
+const char *skipDecimalCharacterReference(const char *BufferPtr,
+ const char *BufferEnd) {
+ for ( ; BufferPtr != BufferEnd; ++BufferPtr) {
+ if (!isHTMLDecimalCharacterReferenceCharacter(*BufferPtr))
+ return BufferPtr;
+ }
+ return BufferEnd;
+}
+
+const char *skipHexCharacterReference(const char *BufferPtr,
+ const char *BufferEnd) {
+ for ( ; BufferPtr != BufferEnd; ++BufferPtr) {
+ if (!isHTMLHexCharacterReferenceCharacter(*BufferPtr))
+ return BufferPtr;
+ }
+ return BufferEnd;
+}
+
+bool isHTMLIdentifierStartingCharacter(char C) {
+ return isLetter(C);
+}
+
+bool isHTMLIdentifierCharacter(char C) {
+ return isAlphanumeric(C);
+}
+
+const char *skipHTMLIdentifier(const char *BufferPtr, const char *BufferEnd) {
+ for ( ; BufferPtr != BufferEnd; ++BufferPtr) {
+ if (!isHTMLIdentifierCharacter(*BufferPtr))
+ return BufferPtr;
+ }
+ return BufferEnd;
+}
+
+/// Skip HTML string quoted in single or double quotes. Escaping quotes inside
+/// string allowed.
+///
+/// Returns pointer to closing quote.
+const char *skipHTMLQuotedString(const char *BufferPtr, const char *BufferEnd)
+{
+ const char Quote = *BufferPtr;
+ assert(Quote == '\"' || Quote == '\'');
+
+ BufferPtr++;
+ for ( ; BufferPtr != BufferEnd; ++BufferPtr) {
+ const char C = *BufferPtr;
+ if (C == Quote && BufferPtr[-1] != '\\')
+ return BufferPtr;
+ }
+ return BufferEnd;
+}
+
+const char *skipWhitespace(const char *BufferPtr, const char *BufferEnd) {
+ for ( ; BufferPtr != BufferEnd; ++BufferPtr) {
+ if (!isWhitespace(*BufferPtr))
+ return BufferPtr;
+ }
+ return BufferEnd;
+}
+
+bool isWhitespace(const char *BufferPtr, const char *BufferEnd) {
+ return skipWhitespace(BufferPtr, BufferEnd) == BufferEnd;
+}
+
+bool isCommandNameStartCharacter(char C) {
+ return isLetter(C);
+}
+
+bool isCommandNameCharacter(char C) {
+ return isAlphanumeric(C);
+}
+
+const char *skipCommandName(const char *BufferPtr, const char *BufferEnd) {
+ for ( ; BufferPtr != BufferEnd; ++BufferPtr) {
+ if (!isCommandNameCharacter(*BufferPtr))
+ return BufferPtr;
+ }
+ return BufferEnd;
+}
+
+/// Return the one past end pointer for BCPL comments.
+/// Handles newlines escaped with backslash or trigraph for backslahs.
+const char *findBCPLCommentEnd(const char *BufferPtr, const char *BufferEnd) {
+ const char *CurPtr = BufferPtr;
+ while (CurPtr != BufferEnd) {
+ while (!isVerticalWhitespace(*CurPtr)) {
+ CurPtr++;
+ if (CurPtr == BufferEnd)
+ return BufferEnd;
+ }
+ // We found a newline, check if it is escaped.
+ const char *EscapePtr = CurPtr - 1;
+ while(isHorizontalWhitespace(*EscapePtr))
+ EscapePtr--;
+
+ if (*EscapePtr == '\\' ||
+ (EscapePtr - 2 >= BufferPtr && EscapePtr[0] == '/' &&
+ EscapePtr[-1] == '?' && EscapePtr[-2] == '?')) {
+ // We found an escaped newline.
+ CurPtr = skipNewline(CurPtr, BufferEnd);
+ } else
+ return CurPtr; // Not an escaped newline.
+ }
+ return BufferEnd;
+}
+
+/// Return the one past end pointer for C comments.
+/// Very dumb, does not handle escaped newlines or trigraphs.
+const char *findCCommentEnd(const char *BufferPtr, const char *BufferEnd) {
+ for ( ; BufferPtr != BufferEnd; ++BufferPtr) {
+ if (*BufferPtr == '*') {
+ assert(BufferPtr + 1 != BufferEnd);
+ if (*(BufferPtr + 1) == '/')
+ return BufferPtr;
+ }
+ }
+ llvm_unreachable("buffer end hit before '*/' was seen");
+}
+
+} // unnamed namespace
+
+void Lexer::formTokenWithChars(Token &Result, const char *TokEnd,
+ tok::TokenKind Kind) {
+ const unsigned TokLen = TokEnd - BufferPtr;
+ Result.setLocation(getSourceLocation(BufferPtr));
+ Result.setKind(Kind);
+ Result.setLength(TokLen);
+#ifndef NDEBUG
+ Result.TextPtr = "<UNSET>";
+ Result.IntVal = 7;
+#endif
+ BufferPtr = TokEnd;
+}
+
+void Lexer::lexCommentText(Token &T) {
+ assert(CommentState == LCS_InsideBCPLComment ||
+ CommentState == LCS_InsideCComment);
+
+ switch (State) {
+ case LS_Normal:
+ break;
+ case LS_VerbatimBlockFirstLine:
+ lexVerbatimBlockFirstLine(T);
+ return;
+ case LS_VerbatimBlockBody:
+ lexVerbatimBlockBody(T);
+ return;
+ case LS_VerbatimLineText:
+ lexVerbatimLineText(T);
+ return;
+ case LS_HTMLStartTag:
+ lexHTMLStartTag(T);
+ return;
+ case LS_HTMLEndTag:
+ lexHTMLEndTag(T);
+ return;
+ }
+
+ assert(State == LS_Normal);
+
+ const char *TokenPtr = BufferPtr;
+ assert(TokenPtr < CommentEnd);
+ while (TokenPtr != CommentEnd) {
+ switch(*TokenPtr) {
+ case '\\':
+ case '@': {
+ // Commands that start with a backslash and commands that start with
+ // 'at' have equivalent semantics. But we keep information about the
+ // exact syntax in AST for comments.
+ tok::TokenKind CommandKind =
+ (*TokenPtr == '@') ? tok::at_command : tok::backslash_command;
+ TokenPtr++;
+ if (TokenPtr == CommentEnd) {
+ formTextToken(T, TokenPtr);
+ return;
+ }
+ char C = *TokenPtr;
+ switch (C) {
+ default:
+ break;
+
+ case '\\': case '@': case '&': case '$':
+ case '#': case '<': case '>': case '%':
+ case '\"': case '.': case ':':
+ // This is one of \\ \@ \& \$ etc escape sequences.
+ TokenPtr++;
+ if (C == ':' && TokenPtr != CommentEnd && *TokenPtr == ':') {
+ // This is the \:: escape sequence.
+ TokenPtr++;
+ }
+ StringRef UnescapedText(BufferPtr + 1, TokenPtr - (BufferPtr + 1));
+ formTokenWithChars(T, TokenPtr, tok::text);
+ T.setText(UnescapedText);
+ return;
+ }
+
+ // Don't make zero-length commands.
+ if (!isCommandNameStartCharacter(*TokenPtr)) {
+ formTextToken(T, TokenPtr);
+ return;
+ }
+
+ TokenPtr = skipCommandName(TokenPtr, CommentEnd);
+ unsigned Length = TokenPtr - (BufferPtr + 1);
+
+ // Hardcoded support for lexing LaTeX formula commands
+ // \f$ \f[ \f] \f{ \f} as a single command.
+ if (Length == 1 && TokenPtr[-1] == 'f' && TokenPtr != CommentEnd) {
+ C = *TokenPtr;
+ if (C == '$' || C == '[' || C == ']' || C == '{' || C == '}') {
+ TokenPtr++;
+ Length++;
+ }
+ }
+
+ StringRef CommandName(BufferPtr + 1, Length);
+
+ const CommandInfo *Info = Traits.getCommandInfoOrNULL(CommandName);
+ if (!Info) {
+ if ((Info = Traits.getTypoCorrectCommandInfo(CommandName))) {
+ StringRef CorrectedName = Info->Name;
+ SourceLocation Loc = getSourceLocation(BufferPtr);
+ SourceRange CommandRange(Loc.getLocWithOffset(1),
+ getSourceLocation(TokenPtr));
+ Diag(Loc, diag::warn_correct_comment_command_name)
+ << CommandName << CorrectedName
+ << FixItHint::CreateReplacement(CommandRange, CorrectedName);
+ } else {
+ formTokenWithChars(T, TokenPtr, tok::unknown_command);
+ T.setUnknownCommandName(CommandName);
+ Diag(T.getLocation(), diag::warn_unknown_comment_command_name);
+ return;
+ }
+ }
+ if (Info->IsVerbatimBlockCommand) {
+ setupAndLexVerbatimBlock(T, TokenPtr, *BufferPtr, Info);
+ return;
+ }
+ if (Info->IsVerbatimLineCommand) {
+ setupAndLexVerbatimLine(T, TokenPtr, Info);
+ return;
+ }
+ formTokenWithChars(T, TokenPtr, CommandKind);
+ T.setCommandID(Info->getID());
+ return;
+ }
+
+ case '&':
+ lexHTMLCharacterReference(T);
+ return;
+
+ case '<': {
+ TokenPtr++;
+ if (TokenPtr == CommentEnd) {
+ formTextToken(T, TokenPtr);
+ return;
+ }
+ const char C = *TokenPtr;
+ if (isHTMLIdentifierStartingCharacter(C))
+ setupAndLexHTMLStartTag(T);
+ else if (C == '/')
+ setupAndLexHTMLEndTag(T);
+ else
+ formTextToken(T, TokenPtr);
+
+ return;
+ }
+
+ case '\n':
+ case '\r':
+ TokenPtr = skipNewline(TokenPtr, CommentEnd);
+ formTokenWithChars(T, TokenPtr, tok::newline);
+
+ if (CommentState == LCS_InsideCComment)
+ skipLineStartingDecorations();
+ return;
+
+ default: {
+ size_t End = StringRef(TokenPtr, CommentEnd - TokenPtr).
+ find_first_of("\n\r\\@&<");
+ if (End != StringRef::npos)
+ TokenPtr += End;
+ else
+ TokenPtr = CommentEnd;
+ formTextToken(T, TokenPtr);
+ return;
+ }
+ }
+ }
+}
+
+void Lexer::setupAndLexVerbatimBlock(Token &T,
+ const char *TextBegin,
+ char Marker, const CommandInfo *Info) {
+ assert(Info->IsVerbatimBlockCommand);
+
+ VerbatimBlockEndCommandName.clear();
+ VerbatimBlockEndCommandName.append(Marker == '\\' ? "\\" : "@");
+ VerbatimBlockEndCommandName.append(Info->EndCommandName);
+
+ formTokenWithChars(T, TextBegin, tok::verbatim_block_begin);
+ T.setVerbatimBlockID(Info->getID());
+
+ // If there is a newline following the verbatim opening command, skip the
+ // newline so that we don't create an tok::verbatim_block_line with empty
+ // text content.
+ if (BufferPtr != CommentEnd &&
+ isVerticalWhitespace(*BufferPtr)) {
+ BufferPtr = skipNewline(BufferPtr, CommentEnd);
+ State = LS_VerbatimBlockBody;
+ return;
+ }
+
+ State = LS_VerbatimBlockFirstLine;
+}
+
+void Lexer::lexVerbatimBlockFirstLine(Token &T) {
+again:
+ assert(BufferPtr < CommentEnd);
+
+ // FIXME: It would be better to scan the text once, finding either the block
+ // end command or newline.
+ //
+ // Extract current line.
+ const char *Newline = findNewline(BufferPtr, CommentEnd);
+ StringRef Line(BufferPtr, Newline - BufferPtr);
+
+ // Look for end command in current line.
+ size_t Pos = Line.find(VerbatimBlockEndCommandName);
+ const char *TextEnd;
+ const char *NextLine;
+ if (Pos == StringRef::npos) {
+ // Current line is completely verbatim.
+ TextEnd = Newline;
+ NextLine = skipNewline(Newline, CommentEnd);
+ } else if (Pos == 0) {
+ // Current line contains just an end command.
+ const char *End = BufferPtr + VerbatimBlockEndCommandName.size();
+ StringRef Name(BufferPtr + 1, End - (BufferPtr + 1));
+ formTokenWithChars(T, End, tok::verbatim_block_end);
+ T.setVerbatimBlockID(Traits.getCommandInfo(Name)->getID());
+ State = LS_Normal;
+ return;
+ } else {
+ // There is some text, followed by end command. Extract text first.
+ TextEnd = BufferPtr + Pos;
+ NextLine = TextEnd;
+ // If there is only whitespace before end command, skip whitespace.
+ if (isWhitespace(BufferPtr, TextEnd)) {
+ BufferPtr = TextEnd;
+ goto again;
+ }
+ }
+
+ StringRef Text(BufferPtr, TextEnd - BufferPtr);
+ formTokenWithChars(T, NextLine, tok::verbatim_block_line);
+ T.setVerbatimBlockText(Text);
+
+ State = LS_VerbatimBlockBody;
+}
+
+void Lexer::lexVerbatimBlockBody(Token &T) {
+ assert(State == LS_VerbatimBlockBody);
+
+ if (CommentState == LCS_InsideCComment)
+ skipLineStartingDecorations();
+
+ if (BufferPtr == CommentEnd) {
+ formTokenWithChars(T, BufferPtr, tok::verbatim_block_line);
+ T.setVerbatimBlockText("");
+ return;
+ }
+
+ lexVerbatimBlockFirstLine(T);
+}
+
+void Lexer::setupAndLexVerbatimLine(Token &T, const char *TextBegin,
+ const CommandInfo *Info) {
+ assert(Info->IsVerbatimLineCommand);
+ formTokenWithChars(T, TextBegin, tok::verbatim_line_name);
+ T.setVerbatimLineID(Info->getID());
+
+ State = LS_VerbatimLineText;
+}
+
+void Lexer::lexVerbatimLineText(Token &T) {
+ assert(State == LS_VerbatimLineText);
+
+ // Extract current line.
+ const char *Newline = findNewline(BufferPtr, CommentEnd);
+ StringRef Text(BufferPtr, Newline - BufferPtr);
+ formTokenWithChars(T, Newline, tok::verbatim_line_text);
+ T.setVerbatimLineText(Text);
+
+ State = LS_Normal;
+}
+
+void Lexer::lexHTMLCharacterReference(Token &T) {
+ const char *TokenPtr = BufferPtr;
+ assert(*TokenPtr == '&');
+ TokenPtr++;
+ if (TokenPtr == CommentEnd) {
+ formTextToken(T, TokenPtr);
+ return;
+ }
+ const char *NamePtr;
+ bool isNamed = false;
+ bool isDecimal = false;
+ char C = *TokenPtr;
+ if (isHTMLNamedCharacterReferenceCharacter(C)) {
+ NamePtr = TokenPtr;
+ TokenPtr = skipNamedCharacterReference(TokenPtr, CommentEnd);
+ isNamed = true;
+ } else if (C == '#') {
+ TokenPtr++;
+ if (TokenPtr == CommentEnd) {
+ formTextToken(T, TokenPtr);
+ return;
+ }
+ C = *TokenPtr;
+ if (isHTMLDecimalCharacterReferenceCharacter(C)) {
+ NamePtr = TokenPtr;
+ TokenPtr = skipDecimalCharacterReference(TokenPtr, CommentEnd);
+ isDecimal = true;
+ } else if (C == 'x' || C == 'X') {
+ TokenPtr++;
+ NamePtr = TokenPtr;
+ TokenPtr = skipHexCharacterReference(TokenPtr, CommentEnd);
+ } else {
+ formTextToken(T, TokenPtr);
+ return;
+ }
+ } else {
+ formTextToken(T, TokenPtr);
+ return;
+ }
+ if (NamePtr == TokenPtr || TokenPtr == CommentEnd ||
+ *TokenPtr != ';') {
+ formTextToken(T, TokenPtr);
+ return;
+ }
+ StringRef Name(NamePtr, TokenPtr - NamePtr);
+ TokenPtr++; // Skip semicolon.
+ StringRef Resolved;
+ if (isNamed)
+ Resolved = resolveHTMLNamedCharacterReference(Name);
+ else if (isDecimal)
+ Resolved = resolveHTMLDecimalCharacterReference(Name);
+ else
+ Resolved = resolveHTMLHexCharacterReference(Name);
+
+ if (Resolved.empty()) {
+ formTextToken(T, TokenPtr);
+ return;
+ }
+ formTokenWithChars(T, TokenPtr, tok::text);
+ T.setText(Resolved);
+ return;
+}
+
+void Lexer::setupAndLexHTMLStartTag(Token &T) {
+ assert(BufferPtr[0] == '<' &&
+ isHTMLIdentifierStartingCharacter(BufferPtr[1]));
+ const char *TagNameEnd = skipHTMLIdentifier(BufferPtr + 2, CommentEnd);
+ StringRef Name(BufferPtr + 1, TagNameEnd - (BufferPtr + 1));
+ if (!isHTMLTagName(Name)) {
+ formTextToken(T, TagNameEnd);
+ return;
+ }
+
+ formTokenWithChars(T, TagNameEnd, tok::html_start_tag);
+ T.setHTMLTagStartName(Name);
+
+ BufferPtr = skipWhitespace(BufferPtr, CommentEnd);
+
+ const char C = *BufferPtr;
+ if (BufferPtr != CommentEnd &&
+ (C == '>' || C == '/' || isHTMLIdentifierStartingCharacter(C)))
+ State = LS_HTMLStartTag;
+}
+
+void Lexer::lexHTMLStartTag(Token &T) {
+ assert(State == LS_HTMLStartTag);
+
+ const char *TokenPtr = BufferPtr;
+ char C = *TokenPtr;
+ if (isHTMLIdentifierCharacter(C)) {
+ TokenPtr = skipHTMLIdentifier(TokenPtr, CommentEnd);
+ StringRef Ident(BufferPtr, TokenPtr - BufferPtr);
+ formTokenWithChars(T, TokenPtr, tok::html_ident);
+ T.setHTMLIdent(Ident);
+ } else {
+ switch (C) {
+ case '=':
+ TokenPtr++;
+ formTokenWithChars(T, TokenPtr, tok::html_equals);
+ break;
+ case '\"':
+ case '\'': {
+ const char *OpenQuote = TokenPtr;
+ TokenPtr = skipHTMLQuotedString(TokenPtr, CommentEnd);
+ const char *ClosingQuote = TokenPtr;
+ if (TokenPtr != CommentEnd) // Skip closing quote.
+ TokenPtr++;
+ formTokenWithChars(T, TokenPtr, tok::html_quoted_string);
+ T.setHTMLQuotedString(StringRef(OpenQuote + 1,
+ ClosingQuote - (OpenQuote + 1)));
+ break;
+ }
+ case '>':
+ TokenPtr++;
+ formTokenWithChars(T, TokenPtr, tok::html_greater);
+ State = LS_Normal;
+ return;
+ case '/':
+ TokenPtr++;
+ if (TokenPtr != CommentEnd && *TokenPtr == '>') {
+ TokenPtr++;
+ formTokenWithChars(T, TokenPtr, tok::html_slash_greater);
+ } else
+ formTextToken(T, TokenPtr);
+
+ State = LS_Normal;
+ return;
+ }
+ }
+
+ // Now look ahead and return to normal state if we don't see any HTML tokens
+ // ahead.
+ BufferPtr = skipWhitespace(BufferPtr, CommentEnd);
+ if (BufferPtr == CommentEnd) {
+ State = LS_Normal;
+ return;
+ }
+
+ C = *BufferPtr;
+ if (!isHTMLIdentifierStartingCharacter(C) &&
+ C != '=' && C != '\"' && C != '\'' && C != '>') {
+ State = LS_Normal;
+ return;
+ }
+}
+
+void Lexer::setupAndLexHTMLEndTag(Token &T) {
+ assert(BufferPtr[0] == '<' && BufferPtr[1] == '/');
+
+ const char *TagNameBegin = skipWhitespace(BufferPtr + 2, CommentEnd);
+ const char *TagNameEnd = skipHTMLIdentifier(TagNameBegin, CommentEnd);
+ StringRef Name(TagNameBegin, TagNameEnd - TagNameBegin);
+ if (!isHTMLTagName(Name)) {
+ formTextToken(T, TagNameEnd);
+ return;
+ }
+
+ const char *End = skipWhitespace(TagNameEnd, CommentEnd);
+
+ formTokenWithChars(T, End, tok::html_end_tag);
+ T.setHTMLTagEndName(Name);
+
+ if (BufferPtr != CommentEnd && *BufferPtr == '>')
+ State = LS_HTMLEndTag;
+}
+
+void Lexer::lexHTMLEndTag(Token &T) {
+ assert(BufferPtr != CommentEnd && *BufferPtr == '>');
+
+ formTokenWithChars(T, BufferPtr + 1, tok::html_greater);
+ State = LS_Normal;
+}
+
+Lexer::Lexer(llvm::BumpPtrAllocator &Allocator, DiagnosticsEngine &Diags,
+ const CommandTraits &Traits,
+ SourceLocation FileLoc,
+ const char *BufferStart, const char *BufferEnd):
+ Allocator(Allocator), Diags(Diags), Traits(Traits),
+ BufferStart(BufferStart), BufferEnd(BufferEnd),
+ FileLoc(FileLoc), BufferPtr(BufferStart),
+ CommentState(LCS_BeforeComment), State(LS_Normal) {
+}
+
+void Lexer::lex(Token &T) {
+again:
+ switch (CommentState) {
+ case LCS_BeforeComment:
+ if (BufferPtr == BufferEnd) {
+ formTokenWithChars(T, BufferPtr, tok::eof);
+ return;
+ }
+
+ assert(*BufferPtr == '/');
+ BufferPtr++; // Skip first slash.
+ switch(*BufferPtr) {
+ case '/': { // BCPL comment.
+ BufferPtr++; // Skip second slash.
+
+ if (BufferPtr != BufferEnd) {
+ // Skip Doxygen magic marker, if it is present.
+ // It might be missing because of a typo //< or /*<, or because we
+ // merged this non-Doxygen comment into a bunch of Doxygen comments
+ // around it: /** ... */ /* ... */ /** ... */
+ const char C = *BufferPtr;
+ if (C == '/' || C == '!')
+ BufferPtr++;
+ }
+
+ // Skip less-than symbol that marks trailing comments.
+ // Skip it even if the comment is not a Doxygen one, because //< and /*<
+ // are frequent typos.
+ if (BufferPtr != BufferEnd && *BufferPtr == '<')
+ BufferPtr++;
+
+ CommentState = LCS_InsideBCPLComment;
+ if (State != LS_VerbatimBlockBody && State != LS_VerbatimBlockFirstLine)
+ State = LS_Normal;
+ CommentEnd = findBCPLCommentEnd(BufferPtr, BufferEnd);
+ goto again;
+ }
+ case '*': { // C comment.
+ BufferPtr++; // Skip star.
+
+ // Skip Doxygen magic marker.
+ const char C = *BufferPtr;
+ if ((C == '*' && *(BufferPtr + 1) != '/') || C == '!')
+ BufferPtr++;
+
+ // Skip less-than symbol that marks trailing comments.
+ if (BufferPtr != BufferEnd && *BufferPtr == '<')
+ BufferPtr++;
+
+ CommentState = LCS_InsideCComment;
+ State = LS_Normal;
+ CommentEnd = findCCommentEnd(BufferPtr, BufferEnd);
+ goto again;
+ }
+ default:
+ llvm_unreachable("second character of comment should be '/' or '*'");
+ }
+
+ case LCS_BetweenComments: {
+ // Consecutive comments are extracted only if there is only whitespace
+ // between them. So we can search for the start of the next comment.
+ const char *EndWhitespace = BufferPtr;
+ while(EndWhitespace != BufferEnd && *EndWhitespace != '/')
+ EndWhitespace++;
+
+ // Turn any whitespace between comments (and there is only whitespace
+ // between them -- guaranteed by comment extraction) into a newline. We
+ // have two newlines between C comments in total (first one was synthesized
+ // after a comment).
+ formTokenWithChars(T, EndWhitespace, tok::newline);
+
+ CommentState = LCS_BeforeComment;
+ break;
+ }
+
+ case LCS_InsideBCPLComment:
+ case LCS_InsideCComment:
+ if (BufferPtr != CommentEnd) {
+ lexCommentText(T);
+ break;
+ } else {
+ // Skip C comment closing sequence.
+ if (CommentState == LCS_InsideCComment) {
+ assert(BufferPtr[0] == '*' && BufferPtr[1] == '/');
+ BufferPtr += 2;
+ assert(BufferPtr <= BufferEnd);
+
+ // Synthenize newline just after the C comment, regardless if there is
+ // actually a newline.
+ formTokenWithChars(T, BufferPtr, tok::newline);
+
+ CommentState = LCS_BetweenComments;
+ break;
+ } else {
+ // Don't synthesized a newline after BCPL comment.
+ CommentState = LCS_BetweenComments;
+ goto again;
+ }
+ }
+ }
+}
+
+StringRef Lexer::getSpelling(const Token &Tok,
+ const SourceManager &SourceMgr,
+ bool *Invalid) const {
+ SourceLocation Loc = Tok.getLocation();
+ std::pair<FileID, unsigned> LocInfo = SourceMgr.getDecomposedLoc(Loc);
+
+ bool InvalidTemp = false;
+ StringRef File = SourceMgr.getBufferData(LocInfo.first, &InvalidTemp);
+ if (InvalidTemp) {
+ *Invalid = true;
+ return StringRef();
+ }
+
+ const char *Begin = File.data() + LocInfo.second;
+ return StringRef(Begin, Tok.getLength());
+}
+
+} // end namespace comments
+} // end namespace clang
+
diff --git a/contrib/llvm/tools/clang/lib/AST/CommentParser.cpp b/contrib/llvm/tools/clang/lib/AST/CommentParser.cpp
new file mode 100644
index 0000000..cb37ec3
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/CommentParser.cpp
@@ -0,0 +1,776 @@
+//===--- CommentParser.cpp - Doxygen comment parser -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/CommentParser.h"
+#include "clang/AST/CommentCommandTraits.h"
+#include "clang/AST/CommentDiagnostic.h"
+#include "clang/AST/CommentSema.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace clang {
+
+static inline bool isWhitespace(llvm::StringRef S) {
+ for (StringRef::const_iterator I = S.begin(), E = S.end(); I != E; ++I) {
+ if (!isWhitespace(*I))
+ return false;
+ }
+ return true;
+}
+
+namespace comments {
+
+/// Re-lexes a sequence of tok::text tokens.
+class TextTokenRetokenizer {
+ llvm::BumpPtrAllocator &Allocator;
+ Parser &P;
+
+ /// This flag is set when there are no more tokens we can fetch from lexer.
+ bool NoMoreInterestingTokens;
+
+ /// Token buffer: tokens we have processed and lookahead.
+ SmallVector<Token, 16> Toks;
+
+ /// A position in \c Toks.
+ struct Position {
+ unsigned CurToken;
+ const char *BufferStart;
+ const char *BufferEnd;
+ const char *BufferPtr;
+ SourceLocation BufferStartLoc;
+ };
+
+ /// Current position in Toks.
+ Position Pos;
+
+ bool isEnd() const {
+ return Pos.CurToken >= Toks.size();
+ }
+
+ /// Sets up the buffer pointers to point to current token.
+ void setupBuffer() {
+ assert(!isEnd());
+ const Token &Tok = Toks[Pos.CurToken];
+
+ Pos.BufferStart = Tok.getText().begin();
+ Pos.BufferEnd = Tok.getText().end();
+ Pos.BufferPtr = Pos.BufferStart;
+ Pos.BufferStartLoc = Tok.getLocation();
+ }
+
+ SourceLocation getSourceLocation() const {
+ const unsigned CharNo = Pos.BufferPtr - Pos.BufferStart;
+ return Pos.BufferStartLoc.getLocWithOffset(CharNo);
+ }
+
+ char peek() const {
+ assert(!isEnd());
+ assert(Pos.BufferPtr != Pos.BufferEnd);
+ return *Pos.BufferPtr;
+ }
+
+ void consumeChar() {
+ assert(!isEnd());
+ assert(Pos.BufferPtr != Pos.BufferEnd);
+ Pos.BufferPtr++;
+ if (Pos.BufferPtr == Pos.BufferEnd) {
+ Pos.CurToken++;
+ if (isEnd() && !addToken())
+ return;
+
+ assert(!isEnd());
+ setupBuffer();
+ }
+ }
+
+ /// Add a token.
+ /// Returns true on success, false if there are no interesting tokens to
+ /// fetch from lexer.
+ bool addToken() {
+ if (NoMoreInterestingTokens)
+ return false;
+
+ if (P.Tok.is(tok::newline)) {
+ // If we see a single newline token between text tokens, skip it.
+ Token Newline = P.Tok;
+ P.consumeToken();
+ if (P.Tok.isNot(tok::text)) {
+ P.putBack(Newline);
+ NoMoreInterestingTokens = true;
+ return false;
+ }
+ }
+ if (P.Tok.isNot(tok::text)) {
+ NoMoreInterestingTokens = true;
+ return false;
+ }
+
+ Toks.push_back(P.Tok);
+ P.consumeToken();
+ if (Toks.size() == 1)
+ setupBuffer();
+ return true;
+ }
+
+ void consumeWhitespace() {
+ while (!isEnd()) {
+ if (isWhitespace(peek()))
+ consumeChar();
+ else
+ break;
+ }
+ }
+
+ void formTokenWithChars(Token &Result,
+ SourceLocation Loc,
+ const char *TokBegin,
+ unsigned TokLength,
+ StringRef Text) {
+ Result.setLocation(Loc);
+ Result.setKind(tok::text);
+ Result.setLength(TokLength);
+#ifndef NDEBUG
+ Result.TextPtr = "<UNSET>";
+ Result.IntVal = 7;
+#endif
+ Result.setText(Text);
+ }
+
+public:
+ TextTokenRetokenizer(llvm::BumpPtrAllocator &Allocator, Parser &P):
+ Allocator(Allocator), P(P), NoMoreInterestingTokens(false) {
+ Pos.CurToken = 0;
+ addToken();
+ }
+
+ /// Extract a word -- sequence of non-whitespace characters.
+ bool lexWord(Token &Tok) {
+ if (isEnd())
+ return false;
+
+ Position SavedPos = Pos;
+
+ consumeWhitespace();
+ SmallString<32> WordText;
+ const char *WordBegin = Pos.BufferPtr;
+ SourceLocation Loc = getSourceLocation();
+ while (!isEnd()) {
+ const char C = peek();
+ if (!isWhitespace(C)) {
+ WordText.push_back(C);
+ consumeChar();
+ } else
+ break;
+ }
+ const unsigned Length = WordText.size();
+ if (Length == 0) {
+ Pos = SavedPos;
+ return false;
+ }
+
+ char *TextPtr = Allocator.Allocate<char>(Length + 1);
+
+ memcpy(TextPtr, WordText.c_str(), Length + 1);
+ StringRef Text = StringRef(TextPtr, Length);
+
+ formTokenWithChars(Tok, Loc, WordBegin, Length, Text);
+ return true;
+ }
+
+ bool lexDelimitedSeq(Token &Tok, char OpenDelim, char CloseDelim) {
+ if (isEnd())
+ return false;
+
+ Position SavedPos = Pos;
+
+ consumeWhitespace();
+ SmallString<32> WordText;
+ const char *WordBegin = Pos.BufferPtr;
+ SourceLocation Loc = getSourceLocation();
+ bool Error = false;
+ if (!isEnd()) {
+ const char C = peek();
+ if (C == OpenDelim) {
+ WordText.push_back(C);
+ consumeChar();
+ } else
+ Error = true;
+ }
+ char C = '\0';
+ while (!Error && !isEnd()) {
+ C = peek();
+ WordText.push_back(C);
+ consumeChar();
+ if (C == CloseDelim)
+ break;
+ }
+ if (!Error && C != CloseDelim)
+ Error = true;
+
+ if (Error) {
+ Pos = SavedPos;
+ return false;
+ }
+
+ const unsigned Length = WordText.size();
+ char *TextPtr = Allocator.Allocate<char>(Length + 1);
+
+ memcpy(TextPtr, WordText.c_str(), Length + 1);
+ StringRef Text = StringRef(TextPtr, Length);
+
+ formTokenWithChars(Tok, Loc, WordBegin,
+ Pos.BufferPtr - WordBegin, Text);
+ return true;
+ }
+
+ /// Put back tokens that we didn't consume.
+ void putBackLeftoverTokens() {
+ if (isEnd())
+ return;
+
+ bool HavePartialTok = false;
+ Token PartialTok;
+ if (Pos.BufferPtr != Pos.BufferStart) {
+ formTokenWithChars(PartialTok, getSourceLocation(),
+ Pos.BufferPtr, Pos.BufferEnd - Pos.BufferPtr,
+ StringRef(Pos.BufferPtr,
+ Pos.BufferEnd - Pos.BufferPtr));
+ HavePartialTok = true;
+ Pos.CurToken++;
+ }
+
+ P.putBack(llvm::makeArrayRef(Toks.begin() + Pos.CurToken, Toks.end()));
+ Pos.CurToken = Toks.size();
+
+ if (HavePartialTok)
+ P.putBack(PartialTok);
+ }
+};
+
+Parser::Parser(Lexer &L, Sema &S, llvm::BumpPtrAllocator &Allocator,
+ const SourceManager &SourceMgr, DiagnosticsEngine &Diags,
+ const CommandTraits &Traits):
+ L(L), S(S), Allocator(Allocator), SourceMgr(SourceMgr), Diags(Diags),
+ Traits(Traits) {
+ consumeToken();
+}
+
+void Parser::parseParamCommandArgs(ParamCommandComment *PC,
+ TextTokenRetokenizer &Retokenizer) {
+ Token Arg;
+ // Check if argument looks like direction specification: [dir]
+ // e.g., [in], [out], [in,out]
+ if (Retokenizer.lexDelimitedSeq(Arg, '[', ']'))
+ S.actOnParamCommandDirectionArg(PC,
+ Arg.getLocation(),
+ Arg.getEndLocation(),
+ Arg.getText());
+
+ if (Retokenizer.lexWord(Arg))
+ S.actOnParamCommandParamNameArg(PC,
+ Arg.getLocation(),
+ Arg.getEndLocation(),
+ Arg.getText());
+}
+
+void Parser::parseTParamCommandArgs(TParamCommandComment *TPC,
+ TextTokenRetokenizer &Retokenizer) {
+ Token Arg;
+ if (Retokenizer.lexWord(Arg))
+ S.actOnTParamCommandParamNameArg(TPC,
+ Arg.getLocation(),
+ Arg.getEndLocation(),
+ Arg.getText());
+}
+
+void Parser::parseBlockCommandArgs(BlockCommandComment *BC,
+ TextTokenRetokenizer &Retokenizer,
+ unsigned NumArgs) {
+ typedef BlockCommandComment::Argument Argument;
+ Argument *Args =
+ new (Allocator.Allocate<Argument>(NumArgs)) Argument[NumArgs];
+ unsigned ParsedArgs = 0;
+ Token Arg;
+ while (ParsedArgs < NumArgs && Retokenizer.lexWord(Arg)) {
+ Args[ParsedArgs] = Argument(SourceRange(Arg.getLocation(),
+ Arg.getEndLocation()),
+ Arg.getText());
+ ParsedArgs++;
+ }
+
+ S.actOnBlockCommandArgs(BC, llvm::makeArrayRef(Args, ParsedArgs));
+}
+
+BlockCommandComment *Parser::parseBlockCommand() {
+ assert(Tok.is(tok::backslash_command) || Tok.is(tok::at_command));
+
+ ParamCommandComment *PC = nullptr;
+ TParamCommandComment *TPC = nullptr;
+ BlockCommandComment *BC = nullptr;
+ const CommandInfo *Info = Traits.getCommandInfo(Tok.getCommandID());
+ CommandMarkerKind CommandMarker =
+ Tok.is(tok::backslash_command) ? CMK_Backslash : CMK_At;
+ if (Info->IsParamCommand) {
+ PC = S.actOnParamCommandStart(Tok.getLocation(),
+ Tok.getEndLocation(),
+ Tok.getCommandID(),
+ CommandMarker);
+ } else if (Info->IsTParamCommand) {
+ TPC = S.actOnTParamCommandStart(Tok.getLocation(),
+ Tok.getEndLocation(),
+ Tok.getCommandID(),
+ CommandMarker);
+ } else {
+ BC = S.actOnBlockCommandStart(Tok.getLocation(),
+ Tok.getEndLocation(),
+ Tok.getCommandID(),
+ CommandMarker);
+ }
+ consumeToken();
+
+ if (isTokBlockCommand()) {
+ // Block command ahead. We can't nest block commands, so pretend that this
+ // command has an empty argument.
+ ParagraphComment *Paragraph = S.actOnParagraphComment(None);
+ if (PC) {
+ S.actOnParamCommandFinish(PC, Paragraph);
+ return PC;
+ } else if (TPC) {
+ S.actOnTParamCommandFinish(TPC, Paragraph);
+ return TPC;
+ } else {
+ S.actOnBlockCommandFinish(BC, Paragraph);
+ return BC;
+ }
+ }
+
+ if (PC || TPC || Info->NumArgs > 0) {
+ // In order to parse command arguments we need to retokenize a few
+ // following text tokens.
+ TextTokenRetokenizer Retokenizer(Allocator, *this);
+
+ if (PC)
+ parseParamCommandArgs(PC, Retokenizer);
+ else if (TPC)
+ parseTParamCommandArgs(TPC, Retokenizer);
+ else
+ parseBlockCommandArgs(BC, Retokenizer, Info->NumArgs);
+
+ Retokenizer.putBackLeftoverTokens();
+ }
+
+ // If there's a block command ahead, we will attach an empty paragraph to
+ // this command.
+ bool EmptyParagraph = false;
+ if (isTokBlockCommand())
+ EmptyParagraph = true;
+ else if (Tok.is(tok::newline)) {
+ Token PrevTok = Tok;
+ consumeToken();
+ EmptyParagraph = isTokBlockCommand();
+ putBack(PrevTok);
+ }
+
+ ParagraphComment *Paragraph;
+ if (EmptyParagraph)
+ Paragraph = S.actOnParagraphComment(None);
+ else {
+ BlockContentComment *Block = parseParagraphOrBlockCommand();
+ // Since we have checked for a block command, we should have parsed a
+ // paragraph.
+ Paragraph = cast<ParagraphComment>(Block);
+ }
+
+ if (PC) {
+ S.actOnParamCommandFinish(PC, Paragraph);
+ return PC;
+ } else if (TPC) {
+ S.actOnTParamCommandFinish(TPC, Paragraph);
+ return TPC;
+ } else {
+ S.actOnBlockCommandFinish(BC, Paragraph);
+ return BC;
+ }
+}
+
+InlineCommandComment *Parser::parseInlineCommand() {
+ assert(Tok.is(tok::backslash_command) || Tok.is(tok::at_command));
+
+ const Token CommandTok = Tok;
+ consumeToken();
+
+ TextTokenRetokenizer Retokenizer(Allocator, *this);
+
+ Token ArgTok;
+ bool ArgTokValid = Retokenizer.lexWord(ArgTok);
+
+ InlineCommandComment *IC;
+ if (ArgTokValid) {
+ IC = S.actOnInlineCommand(CommandTok.getLocation(),
+ CommandTok.getEndLocation(),
+ CommandTok.getCommandID(),
+ ArgTok.getLocation(),
+ ArgTok.getEndLocation(),
+ ArgTok.getText());
+ } else {
+ IC = S.actOnInlineCommand(CommandTok.getLocation(),
+ CommandTok.getEndLocation(),
+ CommandTok.getCommandID());
+ }
+
+ Retokenizer.putBackLeftoverTokens();
+
+ return IC;
+}
+
+HTMLStartTagComment *Parser::parseHTMLStartTag() {
+ assert(Tok.is(tok::html_start_tag));
+ HTMLStartTagComment *HST =
+ S.actOnHTMLStartTagStart(Tok.getLocation(),
+ Tok.getHTMLTagStartName());
+ consumeToken();
+
+ SmallVector<HTMLStartTagComment::Attribute, 2> Attrs;
+ while (true) {
+ switch (Tok.getKind()) {
+ case tok::html_ident: {
+ Token Ident = Tok;
+ consumeToken();
+ if (Tok.isNot(tok::html_equals)) {
+ Attrs.push_back(HTMLStartTagComment::Attribute(Ident.getLocation(),
+ Ident.getHTMLIdent()));
+ continue;
+ }
+ Token Equals = Tok;
+ consumeToken();
+ if (Tok.isNot(tok::html_quoted_string)) {
+ Diag(Tok.getLocation(),
+ diag::warn_doc_html_start_tag_expected_quoted_string)
+ << SourceRange(Equals.getLocation());
+ Attrs.push_back(HTMLStartTagComment::Attribute(Ident.getLocation(),
+ Ident.getHTMLIdent()));
+ while (Tok.is(tok::html_equals) ||
+ Tok.is(tok::html_quoted_string))
+ consumeToken();
+ continue;
+ }
+ Attrs.push_back(HTMLStartTagComment::Attribute(
+ Ident.getLocation(),
+ Ident.getHTMLIdent(),
+ Equals.getLocation(),
+ SourceRange(Tok.getLocation(),
+ Tok.getEndLocation()),
+ Tok.getHTMLQuotedString()));
+ consumeToken();
+ continue;
+ }
+
+ case tok::html_greater:
+ S.actOnHTMLStartTagFinish(HST,
+ S.copyArray(llvm::makeArrayRef(Attrs)),
+ Tok.getLocation(),
+ /* IsSelfClosing = */ false);
+ consumeToken();
+ return HST;
+
+ case tok::html_slash_greater:
+ S.actOnHTMLStartTagFinish(HST,
+ S.copyArray(llvm::makeArrayRef(Attrs)),
+ Tok.getLocation(),
+ /* IsSelfClosing = */ true);
+ consumeToken();
+ return HST;
+
+ case tok::html_equals:
+ case tok::html_quoted_string:
+ Diag(Tok.getLocation(),
+ diag::warn_doc_html_start_tag_expected_ident_or_greater);
+ while (Tok.is(tok::html_equals) ||
+ Tok.is(tok::html_quoted_string))
+ consumeToken();
+ if (Tok.is(tok::html_ident) ||
+ Tok.is(tok::html_greater) ||
+ Tok.is(tok::html_slash_greater))
+ continue;
+
+ S.actOnHTMLStartTagFinish(HST,
+ S.copyArray(llvm::makeArrayRef(Attrs)),
+ SourceLocation(),
+ /* IsSelfClosing = */ false);
+ return HST;
+
+ default:
+ // Not a token from an HTML start tag. Thus HTML tag prematurely ended.
+ S.actOnHTMLStartTagFinish(HST,
+ S.copyArray(llvm::makeArrayRef(Attrs)),
+ SourceLocation(),
+ /* IsSelfClosing = */ false);
+ bool StartLineInvalid;
+ const unsigned StartLine = SourceMgr.getPresumedLineNumber(
+ HST->getLocation(),
+ &StartLineInvalid);
+ bool EndLineInvalid;
+ const unsigned EndLine = SourceMgr.getPresumedLineNumber(
+ Tok.getLocation(),
+ &EndLineInvalid);
+ if (StartLineInvalid || EndLineInvalid || StartLine == EndLine)
+ Diag(Tok.getLocation(),
+ diag::warn_doc_html_start_tag_expected_ident_or_greater)
+ << HST->getSourceRange();
+ else {
+ Diag(Tok.getLocation(),
+ diag::warn_doc_html_start_tag_expected_ident_or_greater);
+ Diag(HST->getLocation(), diag::note_doc_html_tag_started_here)
+ << HST->getSourceRange();
+ }
+ return HST;
+ }
+ }
+}
+
+HTMLEndTagComment *Parser::parseHTMLEndTag() {
+ assert(Tok.is(tok::html_end_tag));
+ Token TokEndTag = Tok;
+ consumeToken();
+ SourceLocation Loc;
+ if (Tok.is(tok::html_greater)) {
+ Loc = Tok.getLocation();
+ consumeToken();
+ }
+
+ return S.actOnHTMLEndTag(TokEndTag.getLocation(),
+ Loc,
+ TokEndTag.getHTMLTagEndName());
+}
+
+BlockContentComment *Parser::parseParagraphOrBlockCommand() {
+ SmallVector<InlineContentComment *, 8> Content;
+
+ while (true) {
+ switch (Tok.getKind()) {
+ case tok::verbatim_block_begin:
+ case tok::verbatim_line_name:
+ case tok::eof:
+ assert(Content.size() != 0);
+ break; // Block content or EOF ahead, finish this parapgaph.
+
+ case tok::unknown_command:
+ Content.push_back(S.actOnUnknownCommand(Tok.getLocation(),
+ Tok.getEndLocation(),
+ Tok.getUnknownCommandName()));
+ consumeToken();
+ continue;
+
+ case tok::backslash_command:
+ case tok::at_command: {
+ const CommandInfo *Info = Traits.getCommandInfo(Tok.getCommandID());
+ if (Info->IsBlockCommand) {
+ if (Content.size() == 0)
+ return parseBlockCommand();
+ break; // Block command ahead, finish this parapgaph.
+ }
+ if (Info->IsVerbatimBlockEndCommand) {
+ Diag(Tok.getLocation(),
+ diag::warn_verbatim_block_end_without_start)
+ << Tok.is(tok::at_command)
+ << Info->Name
+ << SourceRange(Tok.getLocation(), Tok.getEndLocation());
+ consumeToken();
+ continue;
+ }
+ if (Info->IsUnknownCommand) {
+ Content.push_back(S.actOnUnknownCommand(Tok.getLocation(),
+ Tok.getEndLocation(),
+ Info->getID()));
+ consumeToken();
+ continue;
+ }
+ assert(Info->IsInlineCommand);
+ Content.push_back(parseInlineCommand());
+ continue;
+ }
+
+ case tok::newline: {
+ consumeToken();
+ if (Tok.is(tok::newline) || Tok.is(tok::eof)) {
+ consumeToken();
+ break; // Two newlines -- end of paragraph.
+ }
+ // Also allow [tok::newline, tok::text, tok::newline] if the middle
+ // tok::text is just whitespace.
+ if (Tok.is(tok::text) && isWhitespace(Tok.getText())) {
+ Token WhitespaceTok = Tok;
+ consumeToken();
+ if (Tok.is(tok::newline) || Tok.is(tok::eof)) {
+ consumeToken();
+ break;
+ }
+ // We have [tok::newline, tok::text, non-newline]. Put back tok::text.
+ putBack(WhitespaceTok);
+ }
+ if (Content.size() > 0)
+ Content.back()->addTrailingNewline();
+ continue;
+ }
+
+ // Don't deal with HTML tag soup now.
+ case tok::html_start_tag:
+ Content.push_back(parseHTMLStartTag());
+ continue;
+
+ case tok::html_end_tag:
+ Content.push_back(parseHTMLEndTag());
+ continue;
+
+ case tok::text:
+ Content.push_back(S.actOnText(Tok.getLocation(),
+ Tok.getEndLocation(),
+ Tok.getText()));
+ consumeToken();
+ continue;
+
+ case tok::verbatim_block_line:
+ case tok::verbatim_block_end:
+ case tok::verbatim_line_text:
+ case tok::html_ident:
+ case tok::html_equals:
+ case tok::html_quoted_string:
+ case tok::html_greater:
+ case tok::html_slash_greater:
+ llvm_unreachable("should not see this token");
+ }
+ break;
+ }
+
+ return S.actOnParagraphComment(S.copyArray(llvm::makeArrayRef(Content)));
+}
+
+VerbatimBlockComment *Parser::parseVerbatimBlock() {
+ assert(Tok.is(tok::verbatim_block_begin));
+
+ VerbatimBlockComment *VB =
+ S.actOnVerbatimBlockStart(Tok.getLocation(),
+ Tok.getVerbatimBlockID());
+ consumeToken();
+
+ // Don't create an empty line if verbatim opening command is followed
+ // by a newline.
+ if (Tok.is(tok::newline))
+ consumeToken();
+
+ SmallVector<VerbatimBlockLineComment *, 8> Lines;
+ while (Tok.is(tok::verbatim_block_line) ||
+ Tok.is(tok::newline)) {
+ VerbatimBlockLineComment *Line;
+ if (Tok.is(tok::verbatim_block_line)) {
+ Line = S.actOnVerbatimBlockLine(Tok.getLocation(),
+ Tok.getVerbatimBlockText());
+ consumeToken();
+ if (Tok.is(tok::newline)) {
+ consumeToken();
+ }
+ } else {
+ // Empty line, just a tok::newline.
+ Line = S.actOnVerbatimBlockLine(Tok.getLocation(), "");
+ consumeToken();
+ }
+ Lines.push_back(Line);
+ }
+
+ if (Tok.is(tok::verbatim_block_end)) {
+ const CommandInfo *Info = Traits.getCommandInfo(Tok.getVerbatimBlockID());
+ S.actOnVerbatimBlockFinish(VB, Tok.getLocation(),
+ Info->Name,
+ S.copyArray(llvm::makeArrayRef(Lines)));
+ consumeToken();
+ } else {
+ // Unterminated \\verbatim block
+ S.actOnVerbatimBlockFinish(VB, SourceLocation(), "",
+ S.copyArray(llvm::makeArrayRef(Lines)));
+ }
+
+ return VB;
+}
+
+VerbatimLineComment *Parser::parseVerbatimLine() {
+ assert(Tok.is(tok::verbatim_line_name));
+
+ Token NameTok = Tok;
+ consumeToken();
+
+ SourceLocation TextBegin;
+ StringRef Text;
+ // Next token might not be a tok::verbatim_line_text if verbatim line
+ // starting command comes just before a newline or comment end.
+ if (Tok.is(tok::verbatim_line_text)) {
+ TextBegin = Tok.getLocation();
+ Text = Tok.getVerbatimLineText();
+ } else {
+ TextBegin = NameTok.getEndLocation();
+ Text = "";
+ }
+
+ VerbatimLineComment *VL = S.actOnVerbatimLine(NameTok.getLocation(),
+ NameTok.getVerbatimLineID(),
+ TextBegin,
+ Text);
+ consumeToken();
+ return VL;
+}
+
+BlockContentComment *Parser::parseBlockContent() {
+ switch (Tok.getKind()) {
+ case tok::text:
+ case tok::unknown_command:
+ case tok::backslash_command:
+ case tok::at_command:
+ case tok::html_start_tag:
+ case tok::html_end_tag:
+ return parseParagraphOrBlockCommand();
+
+ case tok::verbatim_block_begin:
+ return parseVerbatimBlock();
+
+ case tok::verbatim_line_name:
+ return parseVerbatimLine();
+
+ case tok::eof:
+ case tok::newline:
+ case tok::verbatim_block_line:
+ case tok::verbatim_block_end:
+ case tok::verbatim_line_text:
+ case tok::html_ident:
+ case tok::html_equals:
+ case tok::html_quoted_string:
+ case tok::html_greater:
+ case tok::html_slash_greater:
+ llvm_unreachable("should not see this token");
+ }
+ llvm_unreachable("bogus token kind");
+}
+
+FullComment *Parser::parseFullComment() {
+ // Skip newlines at the beginning of the comment.
+ while (Tok.is(tok::newline))
+ consumeToken();
+
+ SmallVector<BlockContentComment *, 8> Blocks;
+ while (Tok.isNot(tok::eof)) {
+ Blocks.push_back(parseBlockContent());
+
+ // Skip extra newlines after paragraph end.
+ while (Tok.is(tok::newline))
+ consumeToken();
+ }
+ return S.actOnFullComment(S.copyArray(llvm::makeArrayRef(Blocks)));
+}
+
+} // end namespace comments
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/AST/CommentSema.cpp b/contrib/llvm/tools/clang/lib/AST/CommentSema.cpp
new file mode 100644
index 0000000..12823c3
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/CommentSema.cpp
@@ -0,0 +1,1098 @@
+//===--- CommentSema.cpp - Doxygen comment semantic analysis --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/CommentSema.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/CommentCommandTraits.h"
+#include "clang/AST/CommentDiagnostic.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringSwitch.h"
+
+namespace clang {
+namespace comments {
+
+namespace {
+#include "clang/AST/CommentHTMLTagsProperties.inc"
+} // unnamed namespace
+
+Sema::Sema(llvm::BumpPtrAllocator &Allocator, const SourceManager &SourceMgr,
+ DiagnosticsEngine &Diags, CommandTraits &Traits,
+ const Preprocessor *PP) :
+ Allocator(Allocator), SourceMgr(SourceMgr), Diags(Diags), Traits(Traits),
+ PP(PP), ThisDeclInfo(nullptr), BriefCommand(nullptr),
+ HeaderfileCommand(nullptr) {
+}
+
+void Sema::setDecl(const Decl *D) {
+ if (!D)
+ return;
+
+ ThisDeclInfo = new (Allocator) DeclInfo;
+ ThisDeclInfo->CommentDecl = D;
+ ThisDeclInfo->IsFilled = false;
+}
+
+ParagraphComment *Sema::actOnParagraphComment(
+ ArrayRef<InlineContentComment *> Content) {
+ return new (Allocator) ParagraphComment(Content);
+}
+
+BlockCommandComment *Sema::actOnBlockCommandStart(
+ SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ unsigned CommandID,
+ CommandMarkerKind CommandMarker) {
+ BlockCommandComment *BC = new (Allocator) BlockCommandComment(LocBegin, LocEnd,
+ CommandID,
+ CommandMarker);
+ checkContainerDecl(BC);
+ return BC;
+}
+
+void Sema::actOnBlockCommandArgs(BlockCommandComment *Command,
+ ArrayRef<BlockCommandComment::Argument> Args) {
+ Command->setArgs(Args);
+}
+
+void Sema::actOnBlockCommandFinish(BlockCommandComment *Command,
+ ParagraphComment *Paragraph) {
+ Command->setParagraph(Paragraph);
+ checkBlockCommandEmptyParagraph(Command);
+ checkBlockCommandDuplicate(Command);
+ if (ThisDeclInfo) {
+ // These checks only make sense if the comment is attached to a
+ // declaration.
+ checkReturnsCommand(Command);
+ checkDeprecatedCommand(Command);
+ }
+}
+
+ParamCommandComment *Sema::actOnParamCommandStart(
+ SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ unsigned CommandID,
+ CommandMarkerKind CommandMarker) {
+ ParamCommandComment *Command =
+ new (Allocator) ParamCommandComment(LocBegin, LocEnd, CommandID,
+ CommandMarker);
+
+ if (!isFunctionDecl())
+ Diag(Command->getLocation(),
+ diag::warn_doc_param_not_attached_to_a_function_decl)
+ << CommandMarker
+ << Command->getCommandNameRange(Traits);
+
+ return Command;
+}
+
+void Sema::checkFunctionDeclVerbatimLine(const BlockCommandComment *Comment) {
+ const CommandInfo *Info = Traits.getCommandInfo(Comment->getCommandID());
+ if (!Info->IsFunctionDeclarationCommand)
+ return;
+
+ unsigned DiagSelect;
+ switch (Comment->getCommandID()) {
+ case CommandTraits::KCI_function:
+ DiagSelect = (!isAnyFunctionDecl() && !isFunctionTemplateDecl())? 1 : 0;
+ break;
+ case CommandTraits::KCI_functiongroup:
+ DiagSelect = (!isAnyFunctionDecl() && !isFunctionTemplateDecl())? 2 : 0;
+ break;
+ case CommandTraits::KCI_method:
+ DiagSelect = !isObjCMethodDecl() ? 3 : 0;
+ break;
+ case CommandTraits::KCI_methodgroup:
+ DiagSelect = !isObjCMethodDecl() ? 4 : 0;
+ break;
+ case CommandTraits::KCI_callback:
+ DiagSelect = !isFunctionPointerVarDecl() ? 5 : 0;
+ break;
+ default:
+ DiagSelect = 0;
+ break;
+ }
+ if (DiagSelect)
+ Diag(Comment->getLocation(), diag::warn_doc_function_method_decl_mismatch)
+ << Comment->getCommandMarker()
+ << (DiagSelect-1) << (DiagSelect-1)
+ << Comment->getSourceRange();
+}
+
+void Sema::checkContainerDeclVerbatimLine(const BlockCommandComment *Comment) {
+ const CommandInfo *Info = Traits.getCommandInfo(Comment->getCommandID());
+ if (!Info->IsRecordLikeDeclarationCommand)
+ return;
+ unsigned DiagSelect;
+ switch (Comment->getCommandID()) {
+ case CommandTraits::KCI_class:
+ DiagSelect = (!isClassOrStructDecl() && !isClassTemplateDecl()) ? 1 : 0;
+ // Allow @class command on @interface declarations.
+ // FIXME. Currently, \class and @class are indistinguishable. So,
+ // \class is also allowed on an @interface declaration
+ if (DiagSelect && Comment->getCommandMarker() && isObjCInterfaceDecl())
+ DiagSelect = 0;
+ break;
+ case CommandTraits::KCI_interface:
+ DiagSelect = !isObjCInterfaceDecl() ? 2 : 0;
+ break;
+ case CommandTraits::KCI_protocol:
+ DiagSelect = !isObjCProtocolDecl() ? 3 : 0;
+ break;
+ case CommandTraits::KCI_struct:
+ DiagSelect = !isClassOrStructDecl() ? 4 : 0;
+ break;
+ case CommandTraits::KCI_union:
+ DiagSelect = !isUnionDecl() ? 5 : 0;
+ break;
+ default:
+ DiagSelect = 0;
+ break;
+ }
+ if (DiagSelect)
+ Diag(Comment->getLocation(), diag::warn_doc_api_container_decl_mismatch)
+ << Comment->getCommandMarker()
+ << (DiagSelect-1) << (DiagSelect-1)
+ << Comment->getSourceRange();
+}
+
+void Sema::checkContainerDecl(const BlockCommandComment *Comment) {
+ const CommandInfo *Info = Traits.getCommandInfo(Comment->getCommandID());
+ if (!Info->IsRecordLikeDetailCommand || isRecordLikeDecl())
+ return;
+ unsigned DiagSelect;
+ switch (Comment->getCommandID()) {
+ case CommandTraits::KCI_classdesign:
+ DiagSelect = 1;
+ break;
+ case CommandTraits::KCI_coclass:
+ DiagSelect = 2;
+ break;
+ case CommandTraits::KCI_dependency:
+ DiagSelect = 3;
+ break;
+ case CommandTraits::KCI_helper:
+ DiagSelect = 4;
+ break;
+ case CommandTraits::KCI_helperclass:
+ DiagSelect = 5;
+ break;
+ case CommandTraits::KCI_helps:
+ DiagSelect = 6;
+ break;
+ case CommandTraits::KCI_instancesize:
+ DiagSelect = 7;
+ break;
+ case CommandTraits::KCI_ownership:
+ DiagSelect = 8;
+ break;
+ case CommandTraits::KCI_performance:
+ DiagSelect = 9;
+ break;
+ case CommandTraits::KCI_security:
+ DiagSelect = 10;
+ break;
+ case CommandTraits::KCI_superclass:
+ DiagSelect = 11;
+ break;
+ default:
+ DiagSelect = 0;
+ break;
+ }
+ if (DiagSelect)
+ Diag(Comment->getLocation(), diag::warn_doc_container_decl_mismatch)
+ << Comment->getCommandMarker()
+ << (DiagSelect-1)
+ << Comment->getSourceRange();
+}
+
+/// \brief Turn a string into the corresponding PassDirection or -1 if it's not
+/// valid.
+static int getParamPassDirection(StringRef Arg) {
+ return llvm::StringSwitch<int>(Arg)
+ .Case("[in]", ParamCommandComment::In)
+ .Case("[out]", ParamCommandComment::Out)
+ .Cases("[in,out]", "[out,in]", ParamCommandComment::InOut)
+ .Default(-1);
+}
+
+void Sema::actOnParamCommandDirectionArg(ParamCommandComment *Command,
+ SourceLocation ArgLocBegin,
+ SourceLocation ArgLocEnd,
+ StringRef Arg) {
+ std::string ArgLower = Arg.lower();
+ int Direction = getParamPassDirection(ArgLower);
+
+ if (Direction == -1) {
+ // Try again with whitespace removed.
+ ArgLower.erase(
+ std::remove_if(ArgLower.begin(), ArgLower.end(), clang::isWhitespace),
+ ArgLower.end());
+ Direction = getParamPassDirection(ArgLower);
+
+ SourceRange ArgRange(ArgLocBegin, ArgLocEnd);
+ if (Direction != -1) {
+ const char *FixedName = ParamCommandComment::getDirectionAsString(
+ (ParamCommandComment::PassDirection)Direction);
+ Diag(ArgLocBegin, diag::warn_doc_param_spaces_in_direction)
+ << ArgRange << FixItHint::CreateReplacement(ArgRange, FixedName);
+ } else {
+ Diag(ArgLocBegin, diag::warn_doc_param_invalid_direction) << ArgRange;
+ Direction = ParamCommandComment::In; // Sane fall back.
+ }
+ }
+ Command->setDirection((ParamCommandComment::PassDirection)Direction,
+ /*Explicit=*/true);
+}
+
+void Sema::actOnParamCommandParamNameArg(ParamCommandComment *Command,
+ SourceLocation ArgLocBegin,
+ SourceLocation ArgLocEnd,
+ StringRef Arg) {
+ // Parser will not feed us more arguments than needed.
+ assert(Command->getNumArgs() == 0);
+
+ if (!Command->isDirectionExplicit()) {
+ // User didn't provide a direction argument.
+ Command->setDirection(ParamCommandComment::In, /* Explicit = */ false);
+ }
+ typedef BlockCommandComment::Argument Argument;
+ Argument *A = new (Allocator) Argument(SourceRange(ArgLocBegin,
+ ArgLocEnd),
+ Arg);
+ Command->setArgs(llvm::makeArrayRef(A, 1));
+}
+
+void Sema::actOnParamCommandFinish(ParamCommandComment *Command,
+ ParagraphComment *Paragraph) {
+ Command->setParagraph(Paragraph);
+ checkBlockCommandEmptyParagraph(Command);
+}
+
+TParamCommandComment *Sema::actOnTParamCommandStart(
+ SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ unsigned CommandID,
+ CommandMarkerKind CommandMarker) {
+ TParamCommandComment *Command =
+ new (Allocator) TParamCommandComment(LocBegin, LocEnd, CommandID,
+ CommandMarker);
+
+ if (!isTemplateOrSpecialization())
+ Diag(Command->getLocation(),
+ diag::warn_doc_tparam_not_attached_to_a_template_decl)
+ << CommandMarker
+ << Command->getCommandNameRange(Traits);
+
+ return Command;
+}
+
+void Sema::actOnTParamCommandParamNameArg(TParamCommandComment *Command,
+ SourceLocation ArgLocBegin,
+ SourceLocation ArgLocEnd,
+ StringRef Arg) {
+ // Parser will not feed us more arguments than needed.
+ assert(Command->getNumArgs() == 0);
+
+ typedef BlockCommandComment::Argument Argument;
+ Argument *A = new (Allocator) Argument(SourceRange(ArgLocBegin,
+ ArgLocEnd),
+ Arg);
+ Command->setArgs(llvm::makeArrayRef(A, 1));
+
+ if (!isTemplateOrSpecialization()) {
+ // We already warned that this \\tparam is not attached to a template decl.
+ return;
+ }
+
+ const TemplateParameterList *TemplateParameters =
+ ThisDeclInfo->TemplateParameters;
+ SmallVector<unsigned, 2> Position;
+ if (resolveTParamReference(Arg, TemplateParameters, &Position)) {
+ Command->setPosition(copyArray(llvm::makeArrayRef(Position)));
+ TParamCommandComment *&PrevCommand = TemplateParameterDocs[Arg];
+ if (PrevCommand) {
+ SourceRange ArgRange(ArgLocBegin, ArgLocEnd);
+ Diag(ArgLocBegin, diag::warn_doc_tparam_duplicate)
+ << Arg << ArgRange;
+ Diag(PrevCommand->getLocation(), diag::note_doc_tparam_previous)
+ << PrevCommand->getParamNameRange();
+ }
+ PrevCommand = Command;
+ return;
+ }
+
+ SourceRange ArgRange(ArgLocBegin, ArgLocEnd);
+ Diag(ArgLocBegin, diag::warn_doc_tparam_not_found)
+ << Arg << ArgRange;
+
+ if (!TemplateParameters || TemplateParameters->size() == 0)
+ return;
+
+ StringRef CorrectedName;
+ if (TemplateParameters->size() == 1) {
+ const NamedDecl *Param = TemplateParameters->getParam(0);
+ const IdentifierInfo *II = Param->getIdentifier();
+ if (II)
+ CorrectedName = II->getName();
+ } else {
+ CorrectedName = correctTypoInTParamReference(Arg, TemplateParameters);
+ }
+
+ if (!CorrectedName.empty()) {
+ Diag(ArgLocBegin, diag::note_doc_tparam_name_suggestion)
+ << CorrectedName
+ << FixItHint::CreateReplacement(ArgRange, CorrectedName);
+ }
+
+ return;
+}
+
+void Sema::actOnTParamCommandFinish(TParamCommandComment *Command,
+ ParagraphComment *Paragraph) {
+ Command->setParagraph(Paragraph);
+ checkBlockCommandEmptyParagraph(Command);
+}
+
+InlineCommandComment *Sema::actOnInlineCommand(SourceLocation CommandLocBegin,
+ SourceLocation CommandLocEnd,
+ unsigned CommandID) {
+ ArrayRef<InlineCommandComment::Argument> Args;
+ StringRef CommandName = Traits.getCommandInfo(CommandID)->Name;
+ return new (Allocator) InlineCommandComment(
+ CommandLocBegin,
+ CommandLocEnd,
+ CommandID,
+ getInlineCommandRenderKind(CommandName),
+ Args);
+}
+
+InlineCommandComment *Sema::actOnInlineCommand(SourceLocation CommandLocBegin,
+ SourceLocation CommandLocEnd,
+ unsigned CommandID,
+ SourceLocation ArgLocBegin,
+ SourceLocation ArgLocEnd,
+ StringRef Arg) {
+ typedef InlineCommandComment::Argument Argument;
+ Argument *A = new (Allocator) Argument(SourceRange(ArgLocBegin,
+ ArgLocEnd),
+ Arg);
+ StringRef CommandName = Traits.getCommandInfo(CommandID)->Name;
+
+ return new (Allocator) InlineCommandComment(
+ CommandLocBegin,
+ CommandLocEnd,
+ CommandID,
+ getInlineCommandRenderKind(CommandName),
+ llvm::makeArrayRef(A, 1));
+}
+
+InlineContentComment *Sema::actOnUnknownCommand(SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ StringRef CommandName) {
+ unsigned CommandID = Traits.registerUnknownCommand(CommandName)->getID();
+ return actOnUnknownCommand(LocBegin, LocEnd, CommandID);
+}
+
+InlineContentComment *Sema::actOnUnknownCommand(SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ unsigned CommandID) {
+ ArrayRef<InlineCommandComment::Argument> Args;
+ return new (Allocator) InlineCommandComment(
+ LocBegin, LocEnd, CommandID,
+ InlineCommandComment::RenderNormal,
+ Args);
+}
+
+TextComment *Sema::actOnText(SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ StringRef Text) {
+ return new (Allocator) TextComment(LocBegin, LocEnd, Text);
+}
+
+VerbatimBlockComment *Sema::actOnVerbatimBlockStart(SourceLocation Loc,
+ unsigned CommandID) {
+ StringRef CommandName = Traits.getCommandInfo(CommandID)->Name;
+ return new (Allocator) VerbatimBlockComment(
+ Loc,
+ Loc.getLocWithOffset(1 + CommandName.size()),
+ CommandID);
+}
+
+VerbatimBlockLineComment *Sema::actOnVerbatimBlockLine(SourceLocation Loc,
+ StringRef Text) {
+ return new (Allocator) VerbatimBlockLineComment(Loc, Text);
+}
+
+void Sema::actOnVerbatimBlockFinish(
+ VerbatimBlockComment *Block,
+ SourceLocation CloseNameLocBegin,
+ StringRef CloseName,
+ ArrayRef<VerbatimBlockLineComment *> Lines) {
+ Block->setCloseName(CloseName, CloseNameLocBegin);
+ Block->setLines(Lines);
+}
+
+VerbatimLineComment *Sema::actOnVerbatimLine(SourceLocation LocBegin,
+ unsigned CommandID,
+ SourceLocation TextBegin,
+ StringRef Text) {
+ VerbatimLineComment *VL = new (Allocator) VerbatimLineComment(
+ LocBegin,
+ TextBegin.getLocWithOffset(Text.size()),
+ CommandID,
+ TextBegin,
+ Text);
+ checkFunctionDeclVerbatimLine(VL);
+ checkContainerDeclVerbatimLine(VL);
+ return VL;
+}
+
+HTMLStartTagComment *Sema::actOnHTMLStartTagStart(SourceLocation LocBegin,
+ StringRef TagName) {
+ return new (Allocator) HTMLStartTagComment(LocBegin, TagName);
+}
+
+void Sema::actOnHTMLStartTagFinish(
+ HTMLStartTagComment *Tag,
+ ArrayRef<HTMLStartTagComment::Attribute> Attrs,
+ SourceLocation GreaterLoc,
+ bool IsSelfClosing) {
+ Tag->setAttrs(Attrs);
+ Tag->setGreaterLoc(GreaterLoc);
+ if (IsSelfClosing)
+ Tag->setSelfClosing();
+ else if (!isHTMLEndTagForbidden(Tag->getTagName()))
+ HTMLOpenTags.push_back(Tag);
+}
+
+HTMLEndTagComment *Sema::actOnHTMLEndTag(SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ StringRef TagName) {
+ HTMLEndTagComment *HET =
+ new (Allocator) HTMLEndTagComment(LocBegin, LocEnd, TagName);
+ if (isHTMLEndTagForbidden(TagName)) {
+ Diag(HET->getLocation(), diag::warn_doc_html_end_forbidden)
+ << TagName << HET->getSourceRange();
+ HET->setIsMalformed();
+ return HET;
+ }
+
+ bool FoundOpen = false;
+ for (SmallVectorImpl<HTMLStartTagComment *>::const_reverse_iterator
+ I = HTMLOpenTags.rbegin(), E = HTMLOpenTags.rend();
+ I != E; ++I) {
+ if ((*I)->getTagName() == TagName) {
+ FoundOpen = true;
+ break;
+ }
+ }
+ if (!FoundOpen) {
+ Diag(HET->getLocation(), diag::warn_doc_html_end_unbalanced)
+ << HET->getSourceRange();
+ HET->setIsMalformed();
+ return HET;
+ }
+
+ while (!HTMLOpenTags.empty()) {
+ HTMLStartTagComment *HST = HTMLOpenTags.pop_back_val();
+ StringRef LastNotClosedTagName = HST->getTagName();
+ if (LastNotClosedTagName == TagName) {
+ // If the start tag is malformed, end tag is malformed as well.
+ if (HST->isMalformed())
+ HET->setIsMalformed();
+ break;
+ }
+
+ if (isHTMLEndTagOptional(LastNotClosedTagName))
+ continue;
+
+ bool OpenLineInvalid;
+ const unsigned OpenLine = SourceMgr.getPresumedLineNumber(
+ HST->getLocation(),
+ &OpenLineInvalid);
+ bool CloseLineInvalid;
+ const unsigned CloseLine = SourceMgr.getPresumedLineNumber(
+ HET->getLocation(),
+ &CloseLineInvalid);
+
+ if (OpenLineInvalid || CloseLineInvalid || OpenLine == CloseLine) {
+ Diag(HST->getLocation(), diag::warn_doc_html_start_end_mismatch)
+ << HST->getTagName() << HET->getTagName()
+ << HST->getSourceRange() << HET->getSourceRange();
+ HST->setIsMalformed();
+ } else {
+ Diag(HST->getLocation(), diag::warn_doc_html_start_end_mismatch)
+ << HST->getTagName() << HET->getTagName()
+ << HST->getSourceRange();
+ Diag(HET->getLocation(), diag::note_doc_html_end_tag)
+ << HET->getSourceRange();
+ HST->setIsMalformed();
+ }
+ }
+
+ return HET;
+}
+
+FullComment *Sema::actOnFullComment(
+ ArrayRef<BlockContentComment *> Blocks) {
+ FullComment *FC = new (Allocator) FullComment(Blocks, ThisDeclInfo);
+ resolveParamCommandIndexes(FC);
+
+ // Complain about HTML tags that are not closed.
+ while (!HTMLOpenTags.empty()) {
+ HTMLStartTagComment *HST = HTMLOpenTags.pop_back_val();
+ if (isHTMLEndTagOptional(HST->getTagName()))
+ continue;
+
+ Diag(HST->getLocation(), diag::warn_doc_html_missing_end_tag)
+ << HST->getTagName() << HST->getSourceRange();
+ HST->setIsMalformed();
+ }
+
+ return FC;
+}
+
+void Sema::checkBlockCommandEmptyParagraph(BlockCommandComment *Command) {
+ if (Traits.getCommandInfo(Command->getCommandID())->IsEmptyParagraphAllowed)
+ return;
+
+ ParagraphComment *Paragraph = Command->getParagraph();
+ if (Paragraph->isWhitespace()) {
+ SourceLocation DiagLoc;
+ if (Command->getNumArgs() > 0)
+ DiagLoc = Command->getArgRange(Command->getNumArgs() - 1).getEnd();
+ if (!DiagLoc.isValid())
+ DiagLoc = Command->getCommandNameRange(Traits).getEnd();
+ Diag(DiagLoc, diag::warn_doc_block_command_empty_paragraph)
+ << Command->getCommandMarker()
+ << Command->getCommandName(Traits)
+ << Command->getSourceRange();
+ }
+}
+
+void Sema::checkReturnsCommand(const BlockCommandComment *Command) {
+ if (!Traits.getCommandInfo(Command->getCommandID())->IsReturnsCommand)
+ return;
+
+ assert(ThisDeclInfo && "should not call this check on a bare comment");
+
+ if (isFunctionDecl()) {
+ if (ThisDeclInfo->ReturnType->isVoidType()) {
+ unsigned DiagKind;
+ switch (ThisDeclInfo->CommentDecl->getKind()) {
+ default:
+ if (ThisDeclInfo->IsObjCMethod)
+ DiagKind = 3;
+ else
+ DiagKind = 0;
+ break;
+ case Decl::CXXConstructor:
+ DiagKind = 1;
+ break;
+ case Decl::CXXDestructor:
+ DiagKind = 2;
+ break;
+ }
+ Diag(Command->getLocation(),
+ diag::warn_doc_returns_attached_to_a_void_function)
+ << Command->getCommandMarker()
+ << Command->getCommandName(Traits)
+ << DiagKind
+ << Command->getSourceRange();
+ }
+ return;
+ }
+ else if (isObjCPropertyDecl())
+ return;
+
+ Diag(Command->getLocation(),
+ diag::warn_doc_returns_not_attached_to_a_function_decl)
+ << Command->getCommandMarker()
+ << Command->getCommandName(Traits)
+ << Command->getSourceRange();
+}
+
+void Sema::checkBlockCommandDuplicate(const BlockCommandComment *Command) {
+ const CommandInfo *Info = Traits.getCommandInfo(Command->getCommandID());
+ const BlockCommandComment *PrevCommand = nullptr;
+ if (Info->IsBriefCommand) {
+ if (!BriefCommand) {
+ BriefCommand = Command;
+ return;
+ }
+ PrevCommand = BriefCommand;
+ } else if (Info->IsHeaderfileCommand) {
+ if (!HeaderfileCommand) {
+ HeaderfileCommand = Command;
+ return;
+ }
+ PrevCommand = HeaderfileCommand;
+ } else {
+ // We don't want to check this command for duplicates.
+ return;
+ }
+ StringRef CommandName = Command->getCommandName(Traits);
+ StringRef PrevCommandName = PrevCommand->getCommandName(Traits);
+ Diag(Command->getLocation(), diag::warn_doc_block_command_duplicate)
+ << Command->getCommandMarker()
+ << CommandName
+ << Command->getSourceRange();
+ if (CommandName == PrevCommandName)
+ Diag(PrevCommand->getLocation(), diag::note_doc_block_command_previous)
+ << PrevCommand->getCommandMarker()
+ << PrevCommandName
+ << PrevCommand->getSourceRange();
+ else
+ Diag(PrevCommand->getLocation(),
+ diag::note_doc_block_command_previous_alias)
+ << PrevCommand->getCommandMarker()
+ << PrevCommandName
+ << CommandName;
+}
+
+void Sema::checkDeprecatedCommand(const BlockCommandComment *Command) {
+ if (!Traits.getCommandInfo(Command->getCommandID())->IsDeprecatedCommand)
+ return;
+
+ assert(ThisDeclInfo && "should not call this check on a bare comment");
+
+ const Decl *D = ThisDeclInfo->CommentDecl;
+ if (!D)
+ return;
+
+ if (D->hasAttr<DeprecatedAttr>() ||
+ D->hasAttr<AvailabilityAttr>() ||
+ D->hasAttr<UnavailableAttr>())
+ return;
+
+ Diag(Command->getLocation(),
+ diag::warn_doc_deprecated_not_sync)
+ << Command->getSourceRange();
+
+ // Try to emit a fixit with a deprecation attribute.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // Don't emit a Fix-It for non-member function definitions. GCC does not
+ // accept attributes on them.
+ const DeclContext *Ctx = FD->getDeclContext();
+ if ((!Ctx || !Ctx->isRecord()) &&
+ FD->doesThisDeclarationHaveABody())
+ return;
+
+ StringRef AttributeSpelling = "__attribute__((deprecated))";
+ if (PP) {
+ TokenValue Tokens[] = {
+ tok::kw___attribute, tok::l_paren, tok::l_paren,
+ PP->getIdentifierInfo("deprecated"),
+ tok::r_paren, tok::r_paren
+ };
+ StringRef MacroName = PP->getLastMacroWithSpelling(FD->getLocation(),
+ Tokens);
+ if (!MacroName.empty())
+ AttributeSpelling = MacroName;
+ }
+
+ SmallString<64> TextToInsert(" ");
+ TextToInsert += AttributeSpelling;
+ Diag(FD->getLocEnd(),
+ diag::note_add_deprecation_attr)
+ << FixItHint::CreateInsertion(FD->getLocEnd().getLocWithOffset(1),
+ TextToInsert);
+ }
+}
+
+void Sema::resolveParamCommandIndexes(const FullComment *FC) {
+ if (!isFunctionDecl()) {
+ // We already warned that \\param commands are not attached to a function
+ // decl.
+ return;
+ }
+
+ SmallVector<ParamCommandComment *, 8> UnresolvedParamCommands;
+
+ // Comment AST nodes that correspond to \c ParamVars for which we have
+ // found a \\param command or NULL if no documentation was found so far.
+ SmallVector<ParamCommandComment *, 8> ParamVarDocs;
+
+ ArrayRef<const ParmVarDecl *> ParamVars = getParamVars();
+ ParamVarDocs.resize(ParamVars.size(), nullptr);
+
+ // First pass over all \\param commands: resolve all parameter names.
+ for (Comment::child_iterator I = FC->child_begin(), E = FC->child_end();
+ I != E; ++I) {
+ ParamCommandComment *PCC = dyn_cast<ParamCommandComment>(*I);
+ if (!PCC || !PCC->hasParamName())
+ continue;
+ StringRef ParamName = PCC->getParamNameAsWritten();
+
+ // Check that referenced parameter name is in the function decl.
+ const unsigned ResolvedParamIndex = resolveParmVarReference(ParamName,
+ ParamVars);
+ if (ResolvedParamIndex == ParamCommandComment::VarArgParamIndex) {
+ PCC->setIsVarArgParam();
+ continue;
+ }
+ if (ResolvedParamIndex == ParamCommandComment::InvalidParamIndex) {
+ UnresolvedParamCommands.push_back(PCC);
+ continue;
+ }
+ PCC->setParamIndex(ResolvedParamIndex);
+ if (ParamVarDocs[ResolvedParamIndex]) {
+ SourceRange ArgRange = PCC->getParamNameRange();
+ Diag(ArgRange.getBegin(), diag::warn_doc_param_duplicate)
+ << ParamName << ArgRange;
+ ParamCommandComment *PrevCommand = ParamVarDocs[ResolvedParamIndex];
+ Diag(PrevCommand->getLocation(), diag::note_doc_param_previous)
+ << PrevCommand->getParamNameRange();
+ }
+ ParamVarDocs[ResolvedParamIndex] = PCC;
+ }
+
+ // Find parameter declarations that have no corresponding \\param.
+ SmallVector<const ParmVarDecl *, 8> OrphanedParamDecls;
+ for (unsigned i = 0, e = ParamVarDocs.size(); i != e; ++i) {
+ if (!ParamVarDocs[i])
+ OrphanedParamDecls.push_back(ParamVars[i]);
+ }
+
+ // Second pass over unresolved \\param commands: do typo correction.
+ // Suggest corrections from a set of parameter declarations that have no
+ // corresponding \\param.
+ for (unsigned i = 0, e = UnresolvedParamCommands.size(); i != e; ++i) {
+ const ParamCommandComment *PCC = UnresolvedParamCommands[i];
+
+ SourceRange ArgRange = PCC->getParamNameRange();
+ StringRef ParamName = PCC->getParamNameAsWritten();
+ Diag(ArgRange.getBegin(), diag::warn_doc_param_not_found)
+ << ParamName << ArgRange;
+
+ // All parameters documented -- can't suggest a correction.
+ if (OrphanedParamDecls.size() == 0)
+ continue;
+
+ unsigned CorrectedParamIndex = ParamCommandComment::InvalidParamIndex;
+ if (OrphanedParamDecls.size() == 1) {
+ // If one parameter is not documented then that parameter is the only
+ // possible suggestion.
+ CorrectedParamIndex = 0;
+ } else {
+ // Do typo correction.
+ CorrectedParamIndex = correctTypoInParmVarReference(ParamName,
+ OrphanedParamDecls);
+ }
+ if (CorrectedParamIndex != ParamCommandComment::InvalidParamIndex) {
+ const ParmVarDecl *CorrectedPVD = OrphanedParamDecls[CorrectedParamIndex];
+ if (const IdentifierInfo *CorrectedII = CorrectedPVD->getIdentifier())
+ Diag(ArgRange.getBegin(), diag::note_doc_param_name_suggestion)
+ << CorrectedII->getName()
+ << FixItHint::CreateReplacement(ArgRange, CorrectedII->getName());
+ }
+ }
+}
+
+bool Sema::isFunctionDecl() {
+ if (!ThisDeclInfo)
+ return false;
+ if (!ThisDeclInfo->IsFilled)
+ inspectThisDecl();
+ return ThisDeclInfo->getKind() == DeclInfo::FunctionKind;
+}
+
+bool Sema::isAnyFunctionDecl() {
+ return isFunctionDecl() && ThisDeclInfo->CurrentDecl &&
+ isa<FunctionDecl>(ThisDeclInfo->CurrentDecl);
+}
+
+bool Sema::isFunctionOrMethodVariadic() {
+ if (!isAnyFunctionDecl() && !isObjCMethodDecl() && !isFunctionTemplateDecl())
+ return false;
+ if (const FunctionDecl *FD =
+ dyn_cast<FunctionDecl>(ThisDeclInfo->CurrentDecl))
+ return FD->isVariadic();
+ if (const FunctionTemplateDecl *FTD =
+ dyn_cast<FunctionTemplateDecl>(ThisDeclInfo->CurrentDecl))
+ return FTD->getTemplatedDecl()->isVariadic();
+ if (const ObjCMethodDecl *MD =
+ dyn_cast<ObjCMethodDecl>(ThisDeclInfo->CurrentDecl))
+ return MD->isVariadic();
+ return false;
+}
+
+bool Sema::isObjCMethodDecl() {
+ return isFunctionDecl() && ThisDeclInfo->CurrentDecl &&
+ isa<ObjCMethodDecl>(ThisDeclInfo->CurrentDecl);
+}
+
+bool Sema::isFunctionPointerVarDecl() {
+ if (!ThisDeclInfo)
+ return false;
+ if (!ThisDeclInfo->IsFilled)
+ inspectThisDecl();
+ if (ThisDeclInfo->getKind() == DeclInfo::VariableKind) {
+ if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(ThisDeclInfo->CurrentDecl)) {
+ QualType QT = VD->getType();
+ return QT->isFunctionPointerType();
+ }
+ }
+ return false;
+}
+
+bool Sema::isObjCPropertyDecl() {
+ if (!ThisDeclInfo)
+ return false;
+ if (!ThisDeclInfo->IsFilled)
+ inspectThisDecl();
+ return ThisDeclInfo->CurrentDecl->getKind() == Decl::ObjCProperty;
+}
+
+bool Sema::isTemplateOrSpecialization() {
+ if (!ThisDeclInfo)
+ return false;
+ if (!ThisDeclInfo->IsFilled)
+ inspectThisDecl();
+ return ThisDeclInfo->getTemplateKind() != DeclInfo::NotTemplate;
+}
+
+bool Sema::isRecordLikeDecl() {
+ if (!ThisDeclInfo)
+ return false;
+ if (!ThisDeclInfo->IsFilled)
+ inspectThisDecl();
+ return isUnionDecl() || isClassOrStructDecl() || isObjCInterfaceDecl() ||
+ isObjCProtocolDecl();
+}
+
+bool Sema::isUnionDecl() {
+ if (!ThisDeclInfo)
+ return false;
+ if (!ThisDeclInfo->IsFilled)
+ inspectThisDecl();
+ if (const RecordDecl *RD =
+ dyn_cast_or_null<RecordDecl>(ThisDeclInfo->CurrentDecl))
+ return RD->isUnion();
+ return false;
+}
+
+bool Sema::isClassOrStructDecl() {
+ if (!ThisDeclInfo)
+ return false;
+ if (!ThisDeclInfo->IsFilled)
+ inspectThisDecl();
+ return ThisDeclInfo->CurrentDecl &&
+ isa<RecordDecl>(ThisDeclInfo->CurrentDecl) &&
+ !isUnionDecl();
+}
+
+bool Sema::isClassTemplateDecl() {
+ if (!ThisDeclInfo)
+ return false;
+ if (!ThisDeclInfo->IsFilled)
+ inspectThisDecl();
+ return ThisDeclInfo->CurrentDecl &&
+ (isa<ClassTemplateDecl>(ThisDeclInfo->CurrentDecl));
+}
+
+bool Sema::isFunctionTemplateDecl() {
+ if (!ThisDeclInfo)
+ return false;
+ if (!ThisDeclInfo->IsFilled)
+ inspectThisDecl();
+ return ThisDeclInfo->CurrentDecl &&
+ (isa<FunctionTemplateDecl>(ThisDeclInfo->CurrentDecl));
+}
+
+bool Sema::isObjCInterfaceDecl() {
+ if (!ThisDeclInfo)
+ return false;
+ if (!ThisDeclInfo->IsFilled)
+ inspectThisDecl();
+ return ThisDeclInfo->CurrentDecl &&
+ isa<ObjCInterfaceDecl>(ThisDeclInfo->CurrentDecl);
+}
+
+bool Sema::isObjCProtocolDecl() {
+ if (!ThisDeclInfo)
+ return false;
+ if (!ThisDeclInfo->IsFilled)
+ inspectThisDecl();
+ return ThisDeclInfo->CurrentDecl &&
+ isa<ObjCProtocolDecl>(ThisDeclInfo->CurrentDecl);
+}
+
+ArrayRef<const ParmVarDecl *> Sema::getParamVars() {
+ if (!ThisDeclInfo->IsFilled)
+ inspectThisDecl();
+ return ThisDeclInfo->ParamVars;
+}
+
+void Sema::inspectThisDecl() {
+ ThisDeclInfo->fill();
+}
+
+unsigned Sema::resolveParmVarReference(StringRef Name,
+ ArrayRef<const ParmVarDecl *> ParamVars) {
+ for (unsigned i = 0, e = ParamVars.size(); i != e; ++i) {
+ const IdentifierInfo *II = ParamVars[i]->getIdentifier();
+ if (II && II->getName() == Name)
+ return i;
+ }
+ if (Name == "..." && isFunctionOrMethodVariadic())
+ return ParamCommandComment::VarArgParamIndex;
+ return ParamCommandComment::InvalidParamIndex;
+}
+
+namespace {
+class SimpleTypoCorrector {
+ StringRef Typo;
+ const unsigned MaxEditDistance;
+
+ const NamedDecl *BestDecl;
+ unsigned BestEditDistance;
+ unsigned BestIndex;
+ unsigned NextIndex;
+
+public:
+ SimpleTypoCorrector(StringRef Typo) :
+ Typo(Typo), MaxEditDistance((Typo.size() + 2) / 3),
+ BestDecl(nullptr), BestEditDistance(MaxEditDistance + 1),
+ BestIndex(0), NextIndex(0)
+ { }
+
+ void addDecl(const NamedDecl *ND);
+
+ const NamedDecl *getBestDecl() const {
+ if (BestEditDistance > MaxEditDistance)
+ return nullptr;
+
+ return BestDecl;
+ }
+
+ unsigned getBestDeclIndex() const {
+ assert(getBestDecl());
+ return BestIndex;
+ }
+};
+
+void SimpleTypoCorrector::addDecl(const NamedDecl *ND) {
+ unsigned CurrIndex = NextIndex++;
+
+ const IdentifierInfo *II = ND->getIdentifier();
+ if (!II)
+ return;
+
+ StringRef Name = II->getName();
+ unsigned MinPossibleEditDistance = abs((int)Name.size() - (int)Typo.size());
+ if (MinPossibleEditDistance > 0 &&
+ Typo.size() / MinPossibleEditDistance < 3)
+ return;
+
+ unsigned EditDistance = Typo.edit_distance(Name, true, MaxEditDistance);
+ if (EditDistance < BestEditDistance) {
+ BestEditDistance = EditDistance;
+ BestDecl = ND;
+ BestIndex = CurrIndex;
+ }
+}
+} // unnamed namespace
+
+unsigned Sema::correctTypoInParmVarReference(
+ StringRef Typo,
+ ArrayRef<const ParmVarDecl *> ParamVars) {
+ SimpleTypoCorrector Corrector(Typo);
+ for (unsigned i = 0, e = ParamVars.size(); i != e; ++i)
+ Corrector.addDecl(ParamVars[i]);
+ if (Corrector.getBestDecl())
+ return Corrector.getBestDeclIndex();
+ else
+ return ParamCommandComment::InvalidParamIndex;
+}
+
+namespace {
+bool ResolveTParamReferenceHelper(
+ StringRef Name,
+ const TemplateParameterList *TemplateParameters,
+ SmallVectorImpl<unsigned> *Position) {
+ for (unsigned i = 0, e = TemplateParameters->size(); i != e; ++i) {
+ const NamedDecl *Param = TemplateParameters->getParam(i);
+ const IdentifierInfo *II = Param->getIdentifier();
+ if (II && II->getName() == Name) {
+ Position->push_back(i);
+ return true;
+ }
+
+ if (const TemplateTemplateParmDecl *TTP =
+ dyn_cast<TemplateTemplateParmDecl>(Param)) {
+ Position->push_back(i);
+ if (ResolveTParamReferenceHelper(Name, TTP->getTemplateParameters(),
+ Position))
+ return true;
+ Position->pop_back();
+ }
+ }
+ return false;
+}
+} // unnamed namespace
+
+bool Sema::resolveTParamReference(
+ StringRef Name,
+ const TemplateParameterList *TemplateParameters,
+ SmallVectorImpl<unsigned> *Position) {
+ Position->clear();
+ if (!TemplateParameters)
+ return false;
+
+ return ResolveTParamReferenceHelper(Name, TemplateParameters, Position);
+}
+
+namespace {
+void CorrectTypoInTParamReferenceHelper(
+ const TemplateParameterList *TemplateParameters,
+ SimpleTypoCorrector &Corrector) {
+ for (unsigned i = 0, e = TemplateParameters->size(); i != e; ++i) {
+ const NamedDecl *Param = TemplateParameters->getParam(i);
+ Corrector.addDecl(Param);
+
+ if (const TemplateTemplateParmDecl *TTP =
+ dyn_cast<TemplateTemplateParmDecl>(Param))
+ CorrectTypoInTParamReferenceHelper(TTP->getTemplateParameters(),
+ Corrector);
+ }
+}
+} // unnamed namespace
+
+StringRef Sema::correctTypoInTParamReference(
+ StringRef Typo,
+ const TemplateParameterList *TemplateParameters) {
+ SimpleTypoCorrector Corrector(Typo);
+ CorrectTypoInTParamReferenceHelper(TemplateParameters, Corrector);
+ if (const NamedDecl *ND = Corrector.getBestDecl()) {
+ const IdentifierInfo *II = ND->getIdentifier();
+ assert(II && "SimpleTypoCorrector should not return this decl");
+ return II->getName();
+ }
+ return StringRef();
+}
+
+InlineCommandComment::RenderKind
+Sema::getInlineCommandRenderKind(StringRef Name) const {
+ assert(Traits.getCommandInfo(Name)->IsInlineCommand);
+
+ return llvm::StringSwitch<InlineCommandComment::RenderKind>(Name)
+ .Case("b", InlineCommandComment::RenderBold)
+ .Cases("c", "p", InlineCommandComment::RenderMonospaced)
+ .Cases("a", "e", "em", InlineCommandComment::RenderEmphasized)
+ .Default(InlineCommandComment::RenderNormal);
+}
+
+} // end namespace comments
+} // end namespace clang
+
diff --git a/contrib/llvm/tools/clang/lib/AST/Decl.cpp b/contrib/llvm/tools/clang/lib/AST/Decl.cpp
new file mode 100644
index 0000000..42bebc5
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/Decl.cpp
@@ -0,0 +1,4204 @@
+//===--- Decl.cpp - Declaration AST Node Implementation -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Decl subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Decl.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTLambda.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/Module.h"
+#include "clang/Basic/Specifiers.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+
+using namespace clang;
+
+Decl *clang::getPrimaryMergedDecl(Decl *D) {
+ return D->getASTContext().getPrimaryMergedDecl(D);
+}
+
+// Defined here so that it can be inlined into its direct callers.
+bool Decl::isOutOfLine() const {
+ return !getLexicalDeclContext()->Equals(getDeclContext());
+}
+
+TranslationUnitDecl::TranslationUnitDecl(ASTContext &ctx)
+ : Decl(TranslationUnit, nullptr, SourceLocation()),
+ DeclContext(TranslationUnit), Ctx(ctx), AnonymousNamespace(nullptr) {
+ Hidden = Ctx.getLangOpts().ModulesLocalVisibility;
+}
+
+//===----------------------------------------------------------------------===//
+// NamedDecl Implementation
+//===----------------------------------------------------------------------===//
+
+// Visibility rules aren't rigorously externally specified, but here
+// are the basic principles behind what we implement:
+//
+// 1. An explicit visibility attribute is generally a direct expression
+// of the user's intent and should be honored. Only the innermost
+// visibility attribute applies. If no visibility attribute applies,
+// global visibility settings are considered.
+//
+// 2. There is one caveat to the above: on or in a template pattern,
+// an explicit visibility attribute is just a default rule, and
+// visibility can be decreased by the visibility of template
+// arguments. But this, too, has an exception: an attribute on an
+// explicit specialization or instantiation causes all the visibility
+// restrictions of the template arguments to be ignored.
+//
+// 3. A variable that does not otherwise have explicit visibility can
+// be restricted by the visibility of its type.
+//
+// 4. A visibility restriction is explicit if it comes from an
+// attribute (or something like it), not a global visibility setting.
+// When emitting a reference to an external symbol, visibility
+// restrictions are ignored unless they are explicit.
+//
+// 5. When computing the visibility of a non-type, including a
+// non-type member of a class, only non-type visibility restrictions
+// are considered: the 'visibility' attribute, global value-visibility
+// settings, and a few special cases like __private_extern.
+//
+// 6. When computing the visibility of a type, including a type member
+// of a class, only type visibility restrictions are considered:
+// the 'type_visibility' attribute and global type-visibility settings.
+// However, a 'visibility' attribute counts as a 'type_visibility'
+// attribute on any declaration that only has the former.
+//
+// The visibility of a "secondary" entity, like a template argument,
+// is computed using the kind of that entity, not the kind of the
+// primary entity for which we are computing visibility. For example,
+// the visibility of a specialization of either of these templates:
+// template <class T, bool (&compare)(T, X)> bool has_match(list<T>, X);
+// template <class T, bool (&compare)(T, X)> class matcher;
+// is restricted according to the type visibility of the argument 'T',
+// the type visibility of 'bool(&)(T,X)', and the value visibility of
+// the argument function 'compare'. That 'has_match' is a value
+// and 'matcher' is a type only matters when looking for attributes
+// and settings from the immediate context.
+
+const unsigned IgnoreExplicitVisibilityBit = 2;
+const unsigned IgnoreAllVisibilityBit = 4;
+
+/// Kinds of LV computation. The linkage side of the computation is
+/// always the same, but different things can change how visibility is
+/// computed.
+enum LVComputationKind {
+ /// Do an LV computation for, ultimately, a type.
+ /// Visibility may be restricted by type visibility settings and
+ /// the visibility of template arguments.
+ LVForType = NamedDecl::VisibilityForType,
+
+ /// Do an LV computation for, ultimately, a non-type declaration.
+ /// Visibility may be restricted by value visibility settings and
+ /// the visibility of template arguments.
+ LVForValue = NamedDecl::VisibilityForValue,
+
+ /// Do an LV computation for, ultimately, a type that already has
+ /// some sort of explicit visibility. Visibility may only be
+ /// restricted by the visibility of template arguments.
+ LVForExplicitType = (LVForType | IgnoreExplicitVisibilityBit),
+
+ /// Do an LV computation for, ultimately, a non-type declaration
+ /// that already has some sort of explicit visibility. Visibility
+ /// may only be restricted by the visibility of template arguments.
+ LVForExplicitValue = (LVForValue | IgnoreExplicitVisibilityBit),
+
+ /// Do an LV computation when we only care about the linkage.
+ LVForLinkageOnly =
+ LVForValue | IgnoreExplicitVisibilityBit | IgnoreAllVisibilityBit
+};
+
+/// Does this computation kind permit us to consider additional
+/// visibility settings from attributes and the like?
+static bool hasExplicitVisibilityAlready(LVComputationKind computation) {
+ return ((unsigned(computation) & IgnoreExplicitVisibilityBit) != 0);
+}
+
+/// Given an LVComputationKind, return one of the same type/value sort
+/// that records that it already has explicit visibility.
+static LVComputationKind
+withExplicitVisibilityAlready(LVComputationKind oldKind) {
+ LVComputationKind newKind =
+ static_cast<LVComputationKind>(unsigned(oldKind) |
+ IgnoreExplicitVisibilityBit);
+ assert(oldKind != LVForType || newKind == LVForExplicitType);
+ assert(oldKind != LVForValue || newKind == LVForExplicitValue);
+ assert(oldKind != LVForExplicitType || newKind == LVForExplicitType);
+ assert(oldKind != LVForExplicitValue || newKind == LVForExplicitValue);
+ return newKind;
+}
+
+static Optional<Visibility> getExplicitVisibility(const NamedDecl *D,
+ LVComputationKind kind) {
+ assert(!hasExplicitVisibilityAlready(kind) &&
+ "asking for explicit visibility when we shouldn't be");
+ return D->getExplicitVisibility((NamedDecl::ExplicitVisibilityKind) kind);
+}
+
+/// Is the given declaration a "type" or a "value" for the purposes of
+/// visibility computation?
+static bool usesTypeVisibility(const NamedDecl *D) {
+ return isa<TypeDecl>(D) ||
+ isa<ClassTemplateDecl>(D) ||
+ isa<ObjCInterfaceDecl>(D);
+}
+
+/// Does the given declaration have member specialization information,
+/// and if so, is it an explicit specialization?
+template <class T> static typename
+std::enable_if<!std::is_base_of<RedeclarableTemplateDecl, T>::value, bool>::type
+isExplicitMemberSpecialization(const T *D) {
+ if (const MemberSpecializationInfo *member =
+ D->getMemberSpecializationInfo()) {
+ return member->isExplicitSpecialization();
+ }
+ return false;
+}
+
+/// For templates, this question is easier: a member template can't be
+/// explicitly instantiated, so there's a single bit indicating whether
+/// or not this is an explicit member specialization.
+static bool isExplicitMemberSpecialization(const RedeclarableTemplateDecl *D) {
+ return D->isMemberSpecialization();
+}
+
+/// Given a visibility attribute, return the explicit visibility
+/// associated with it.
+template <class T>
+static Visibility getVisibilityFromAttr(const T *attr) {
+ switch (attr->getVisibility()) {
+ case T::Default:
+ return DefaultVisibility;
+ case T::Hidden:
+ return HiddenVisibility;
+ case T::Protected:
+ return ProtectedVisibility;
+ }
+ llvm_unreachable("bad visibility kind");
+}
+
+/// Return the explicit visibility of the given declaration.
+static Optional<Visibility> getVisibilityOf(const NamedDecl *D,
+ NamedDecl::ExplicitVisibilityKind kind) {
+ // If we're ultimately computing the visibility of a type, look for
+ // a 'type_visibility' attribute before looking for 'visibility'.
+ if (kind == NamedDecl::VisibilityForType) {
+ if (const auto *A = D->getAttr<TypeVisibilityAttr>()) {
+ return getVisibilityFromAttr(A);
+ }
+ }
+
+ // If this declaration has an explicit visibility attribute, use it.
+ if (const auto *A = D->getAttr<VisibilityAttr>()) {
+ return getVisibilityFromAttr(A);
+ }
+
+ // If we're on Mac OS X, an 'availability' for Mac OS X attribute
+ // implies visibility(default).
+ if (D->getASTContext().getTargetInfo().getTriple().isOSDarwin()) {
+ for (const auto *A : D->specific_attrs<AvailabilityAttr>())
+ if (A->getPlatform()->getName().equals("macosx"))
+ return DefaultVisibility;
+ }
+
+ return None;
+}
+
+static LinkageInfo
+getLVForType(const Type &T, LVComputationKind computation) {
+ if (computation == LVForLinkageOnly)
+ return LinkageInfo(T.getLinkage(), DefaultVisibility, true);
+ return T.getLinkageAndVisibility();
+}
+
+/// \brief Get the most restrictive linkage for the types in the given
+/// template parameter list. For visibility purposes, template
+/// parameters are part of the signature of a template.
+static LinkageInfo
+getLVForTemplateParameterList(const TemplateParameterList *Params,
+ LVComputationKind computation) {
+ LinkageInfo LV;
+ for (const NamedDecl *P : *Params) {
+ // Template type parameters are the most common and never
+ // contribute to visibility, pack or not.
+ if (isa<TemplateTypeParmDecl>(P))
+ continue;
+
+ // Non-type template parameters can be restricted by the value type, e.g.
+ // template <enum X> class A { ... };
+ // We have to be careful here, though, because we can be dealing with
+ // dependent types.
+ if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(P)) {
+ // Handle the non-pack case first.
+ if (!NTTP->isExpandedParameterPack()) {
+ if (!NTTP->getType()->isDependentType()) {
+ LV.merge(getLVForType(*NTTP->getType(), computation));
+ }
+ continue;
+ }
+
+ // Look at all the types in an expanded pack.
+ for (unsigned i = 0, n = NTTP->getNumExpansionTypes(); i != n; ++i) {
+ QualType type = NTTP->getExpansionType(i);
+ if (!type->isDependentType())
+ LV.merge(type->getLinkageAndVisibility());
+ }
+ continue;
+ }
+
+ // Template template parameters can be restricted by their
+ // template parameters, recursively.
+ const auto *TTP = cast<TemplateTemplateParmDecl>(P);
+
+ // Handle the non-pack case first.
+ if (!TTP->isExpandedParameterPack()) {
+ LV.merge(getLVForTemplateParameterList(TTP->getTemplateParameters(),
+ computation));
+ continue;
+ }
+
+ // Look at all expansions in an expanded pack.
+ for (unsigned i = 0, n = TTP->getNumExpansionTemplateParameters();
+ i != n; ++i) {
+ LV.merge(getLVForTemplateParameterList(
+ TTP->getExpansionTemplateParameters(i), computation));
+ }
+ }
+
+ return LV;
+}
+
+/// getLVForDecl - Get the linkage and visibility for the given declaration.
+static LinkageInfo getLVForDecl(const NamedDecl *D,
+ LVComputationKind computation);
+
+static const Decl *getOutermostFuncOrBlockContext(const Decl *D) {
+ const Decl *Ret = nullptr;
+ const DeclContext *DC = D->getDeclContext();
+ while (DC->getDeclKind() != Decl::TranslationUnit) {
+ if (isa<FunctionDecl>(DC) || isa<BlockDecl>(DC))
+ Ret = cast<Decl>(DC);
+ DC = DC->getParent();
+ }
+ return Ret;
+}
+
+/// \brief Get the most restrictive linkage for the types and
+/// declarations in the given template argument list.
+///
+/// Note that we don't take an LVComputationKind because we always
+/// want to honor the visibility of template arguments in the same way.
+static LinkageInfo getLVForTemplateArgumentList(ArrayRef<TemplateArgument> Args,
+ LVComputationKind computation) {
+ LinkageInfo LV;
+
+ for (const TemplateArgument &Arg : Args) {
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ case TemplateArgument::Integral:
+ case TemplateArgument::Expression:
+ continue;
+
+ case TemplateArgument::Type:
+ LV.merge(getLVForType(*Arg.getAsType(), computation));
+ continue;
+
+ case TemplateArgument::Declaration:
+ if (const auto *ND = dyn_cast<NamedDecl>(Arg.getAsDecl())) {
+ assert(!usesTypeVisibility(ND));
+ LV.merge(getLVForDecl(ND, computation));
+ }
+ continue;
+
+ case TemplateArgument::NullPtr:
+ LV.merge(Arg.getNullPtrType()->getLinkageAndVisibility());
+ continue;
+
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion:
+ if (TemplateDecl *Template =
+ Arg.getAsTemplateOrTemplatePattern().getAsTemplateDecl())
+ LV.merge(getLVForDecl(Template, computation));
+ continue;
+
+ case TemplateArgument::Pack:
+ LV.merge(getLVForTemplateArgumentList(Arg.getPackAsArray(), computation));
+ continue;
+ }
+ llvm_unreachable("bad template argument kind");
+ }
+
+ return LV;
+}
+
+static LinkageInfo
+getLVForTemplateArgumentList(const TemplateArgumentList &TArgs,
+ LVComputationKind computation) {
+ return getLVForTemplateArgumentList(TArgs.asArray(), computation);
+}
+
+static bool shouldConsiderTemplateVisibility(const FunctionDecl *fn,
+ const FunctionTemplateSpecializationInfo *specInfo) {
+ // Include visibility from the template parameters and arguments
+ // only if this is not an explicit instantiation or specialization
+ // with direct explicit visibility. (Implicit instantiations won't
+ // have a direct attribute.)
+ if (!specInfo->isExplicitInstantiationOrSpecialization())
+ return true;
+
+ return !fn->hasAttr<VisibilityAttr>();
+}
+
+/// Merge in template-related linkage and visibility for the given
+/// function template specialization.
+///
+/// We don't need a computation kind here because we can assume
+/// LVForValue.
+///
+/// \param[out] LV the computation to use for the parent
+static void
+mergeTemplateLV(LinkageInfo &LV, const FunctionDecl *fn,
+ const FunctionTemplateSpecializationInfo *specInfo,
+ LVComputationKind computation) {
+ bool considerVisibility =
+ shouldConsiderTemplateVisibility(fn, specInfo);
+
+ // Merge information from the template parameters.
+ FunctionTemplateDecl *temp = specInfo->getTemplate();
+ LinkageInfo tempLV =
+ getLVForTemplateParameterList(temp->getTemplateParameters(), computation);
+ LV.mergeMaybeWithVisibility(tempLV, considerVisibility);
+
+ // Merge information from the template arguments.
+ const TemplateArgumentList &templateArgs = *specInfo->TemplateArguments;
+ LinkageInfo argsLV = getLVForTemplateArgumentList(templateArgs, computation);
+ LV.mergeMaybeWithVisibility(argsLV, considerVisibility);
+}
+
+/// Does the given declaration have a direct visibility attribute
+/// that would match the given rules?
+static bool hasDirectVisibilityAttribute(const NamedDecl *D,
+ LVComputationKind computation) {
+ switch (computation) {
+ case LVForType:
+ case LVForExplicitType:
+ if (D->hasAttr<TypeVisibilityAttr>())
+ return true;
+ // fallthrough
+ case LVForValue:
+ case LVForExplicitValue:
+ if (D->hasAttr<VisibilityAttr>())
+ return true;
+ return false;
+ case LVForLinkageOnly:
+ return false;
+ }
+ llvm_unreachable("bad visibility computation kind");
+}
+
+/// Should we consider visibility associated with the template
+/// arguments and parameters of the given class template specialization?
+static bool shouldConsiderTemplateVisibility(
+ const ClassTemplateSpecializationDecl *spec,
+ LVComputationKind computation) {
+ // Include visibility from the template parameters and arguments
+ // only if this is not an explicit instantiation or specialization
+ // with direct explicit visibility (and note that implicit
+ // instantiations won't have a direct attribute).
+ //
+ // Furthermore, we want to ignore template parameters and arguments
+ // for an explicit specialization when computing the visibility of a
+ // member thereof with explicit visibility.
+ //
+ // This is a bit complex; let's unpack it.
+ //
+ // An explicit class specialization is an independent, top-level
+ // declaration. As such, if it or any of its members has an
+ // explicit visibility attribute, that must directly express the
+ // user's intent, and we should honor it. The same logic applies to
+ // an explicit instantiation of a member of such a thing.
+
+ // Fast path: if this is not an explicit instantiation or
+ // specialization, we always want to consider template-related
+ // visibility restrictions.
+ if (!spec->isExplicitInstantiationOrSpecialization())
+ return true;
+
+ // This is the 'member thereof' check.
+ if (spec->isExplicitSpecialization() &&
+ hasExplicitVisibilityAlready(computation))
+ return false;
+
+ return !hasDirectVisibilityAttribute(spec, computation);
+}
+
+/// Merge in template-related linkage and visibility for the given
+/// class template specialization.
+static void mergeTemplateLV(LinkageInfo &LV,
+ const ClassTemplateSpecializationDecl *spec,
+ LVComputationKind computation) {
+ bool considerVisibility = shouldConsiderTemplateVisibility(spec, computation);
+
+ // Merge information from the template parameters, but ignore
+ // visibility if we're only considering template arguments.
+
+ ClassTemplateDecl *temp = spec->getSpecializedTemplate();
+ LinkageInfo tempLV =
+ getLVForTemplateParameterList(temp->getTemplateParameters(), computation);
+ LV.mergeMaybeWithVisibility(tempLV,
+ considerVisibility && !hasExplicitVisibilityAlready(computation));
+
+ // Merge information from the template arguments. We ignore
+ // template-argument visibility if we've got an explicit
+ // instantiation with a visibility attribute.
+ const TemplateArgumentList &templateArgs = spec->getTemplateArgs();
+ LinkageInfo argsLV = getLVForTemplateArgumentList(templateArgs, computation);
+ if (considerVisibility)
+ LV.mergeVisibility(argsLV);
+ LV.mergeExternalVisibility(argsLV);
+}
+
+/// Should we consider visibility associated with the template
+/// arguments and parameters of the given variable template
+/// specialization? As usual, follow class template specialization
+/// logic up to initialization.
+static bool shouldConsiderTemplateVisibility(
+ const VarTemplateSpecializationDecl *spec,
+ LVComputationKind computation) {
+ // Include visibility from the template parameters and arguments
+ // only if this is not an explicit instantiation or specialization
+ // with direct explicit visibility (and note that implicit
+ // instantiations won't have a direct attribute).
+ if (!spec->isExplicitInstantiationOrSpecialization())
+ return true;
+
+ // An explicit variable specialization is an independent, top-level
+ // declaration. As such, if it has an explicit visibility attribute,
+ // that must directly express the user's intent, and we should honor
+ // it.
+ if (spec->isExplicitSpecialization() &&
+ hasExplicitVisibilityAlready(computation))
+ return false;
+
+ return !hasDirectVisibilityAttribute(spec, computation);
+}
+
+/// Merge in template-related linkage and visibility for the given
+/// variable template specialization. As usual, follow class template
+/// specialization logic up to initialization.
+static void mergeTemplateLV(LinkageInfo &LV,
+ const VarTemplateSpecializationDecl *spec,
+ LVComputationKind computation) {
+ bool considerVisibility = shouldConsiderTemplateVisibility(spec, computation);
+
+ // Merge information from the template parameters, but ignore
+ // visibility if we're only considering template arguments.
+
+ VarTemplateDecl *temp = spec->getSpecializedTemplate();
+ LinkageInfo tempLV =
+ getLVForTemplateParameterList(temp->getTemplateParameters(), computation);
+ LV.mergeMaybeWithVisibility(tempLV,
+ considerVisibility && !hasExplicitVisibilityAlready(computation));
+
+ // Merge information from the template arguments. We ignore
+ // template-argument visibility if we've got an explicit
+ // instantiation with a visibility attribute.
+ const TemplateArgumentList &templateArgs = spec->getTemplateArgs();
+ LinkageInfo argsLV = getLVForTemplateArgumentList(templateArgs, computation);
+ if (considerVisibility)
+ LV.mergeVisibility(argsLV);
+ LV.mergeExternalVisibility(argsLV);
+}
+
+static bool useInlineVisibilityHidden(const NamedDecl *D) {
+ // FIXME: we should warn if -fvisibility-inlines-hidden is used with c.
+ const LangOptions &Opts = D->getASTContext().getLangOpts();
+ if (!Opts.CPlusPlus || !Opts.InlineVisibilityHidden)
+ return false;
+
+ const auto *FD = dyn_cast<FunctionDecl>(D);
+ if (!FD)
+ return false;
+
+ TemplateSpecializationKind TSK = TSK_Undeclared;
+ if (FunctionTemplateSpecializationInfo *spec
+ = FD->getTemplateSpecializationInfo()) {
+ TSK = spec->getTemplateSpecializationKind();
+ } else if (MemberSpecializationInfo *MSI =
+ FD->getMemberSpecializationInfo()) {
+ TSK = MSI->getTemplateSpecializationKind();
+ }
+
+ const FunctionDecl *Def = nullptr;
+ // InlineVisibilityHidden only applies to definitions, and
+ // isInlined() only gives meaningful answers on definitions
+ // anyway.
+ return TSK != TSK_ExplicitInstantiationDeclaration &&
+ TSK != TSK_ExplicitInstantiationDefinition &&
+ FD->hasBody(Def) && Def->isInlined() && !Def->hasAttr<GNUInlineAttr>();
+}
+
+template <typename T> static bool isFirstInExternCContext(T *D) {
+ const T *First = D->getFirstDecl();
+ return First->isInExternCContext();
+}
+
+static bool isSingleLineLanguageLinkage(const Decl &D) {
+ if (const auto *SD = dyn_cast<LinkageSpecDecl>(D.getDeclContext()))
+ if (!SD->hasBraces())
+ return true;
+ return false;
+}
+
+static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D,
+ LVComputationKind computation) {
+ assert(D->getDeclContext()->getRedeclContext()->isFileContext() &&
+ "Not a name having namespace scope");
+ ASTContext &Context = D->getASTContext();
+
+ // C++ [basic.link]p3:
+ // A name having namespace scope (3.3.6) has internal linkage if it
+ // is the name of
+ // - an object, reference, function or function template that is
+ // explicitly declared static; or,
+ // (This bullet corresponds to C99 6.2.2p3.)
+ if (const auto *Var = dyn_cast<VarDecl>(D)) {
+ // Explicitly declared static.
+ if (Var->getStorageClass() == SC_Static)
+ return LinkageInfo::internal();
+
+ // - a non-volatile object or reference that is explicitly declared const
+ // or constexpr and neither explicitly declared extern nor previously
+ // declared to have external linkage; or (there is no equivalent in C99)
+ if (Context.getLangOpts().CPlusPlus &&
+ Var->getType().isConstQualified() &&
+ !Var->getType().isVolatileQualified()) {
+ const VarDecl *PrevVar = Var->getPreviousDecl();
+ if (PrevVar)
+ return getLVForDecl(PrevVar, computation);
+
+ if (Var->getStorageClass() != SC_Extern &&
+ Var->getStorageClass() != SC_PrivateExtern &&
+ !isSingleLineLanguageLinkage(*Var))
+ return LinkageInfo::internal();
+ }
+
+ for (const VarDecl *PrevVar = Var->getPreviousDecl(); PrevVar;
+ PrevVar = PrevVar->getPreviousDecl()) {
+ if (PrevVar->getStorageClass() == SC_PrivateExtern &&
+ Var->getStorageClass() == SC_None)
+ return PrevVar->getLinkageAndVisibility();
+ // Explicitly declared static.
+ if (PrevVar->getStorageClass() == SC_Static)
+ return LinkageInfo::internal();
+ }
+ } else if (const FunctionDecl *Function = D->getAsFunction()) {
+ // C++ [temp]p4:
+ // A non-member function template can have internal linkage; any
+ // other template name shall have external linkage.
+
+ // Explicitly declared static.
+ if (Function->getCanonicalDecl()->getStorageClass() == SC_Static)
+ return LinkageInfo(InternalLinkage, DefaultVisibility, false);
+ } else if (const auto *IFD = dyn_cast<IndirectFieldDecl>(D)) {
+ // - a data member of an anonymous union.
+ const VarDecl *VD = IFD->getVarDecl();
+ assert(VD && "Expected a VarDecl in this IndirectFieldDecl!");
+ return getLVForNamespaceScopeDecl(VD, computation);
+ }
+ assert(!isa<FieldDecl>(D) && "Didn't expect a FieldDecl!");
+
+ if (D->isInAnonymousNamespace()) {
+ const auto *Var = dyn_cast<VarDecl>(D);
+ const auto *Func = dyn_cast<FunctionDecl>(D);
+ // FIXME: In C++11 onwards, anonymous namespaces should give decls
+ // within them internal linkage, not unique external linkage.
+ if ((!Var || !isFirstInExternCContext(Var)) &&
+ (!Func || !isFirstInExternCContext(Func)))
+ return LinkageInfo::uniqueExternal();
+ }
+
+ // Set up the defaults.
+
+ // C99 6.2.2p5:
+ // If the declaration of an identifier for an object has file
+ // scope and no storage-class specifier, its linkage is
+ // external.
+ LinkageInfo LV;
+
+ if (!hasExplicitVisibilityAlready(computation)) {
+ if (Optional<Visibility> Vis = getExplicitVisibility(D, computation)) {
+ LV.mergeVisibility(*Vis, true);
+ } else {
+ // If we're declared in a namespace with a visibility attribute,
+ // use that namespace's visibility, and it still counts as explicit.
+ for (const DeclContext *DC = D->getDeclContext();
+ !isa<TranslationUnitDecl>(DC);
+ DC = DC->getParent()) {
+ const auto *ND = dyn_cast<NamespaceDecl>(DC);
+ if (!ND) continue;
+ if (Optional<Visibility> Vis = getExplicitVisibility(ND, computation)) {
+ LV.mergeVisibility(*Vis, true);
+ break;
+ }
+ }
+ }
+
+ // Add in global settings if the above didn't give us direct visibility.
+ if (!LV.isVisibilityExplicit()) {
+ // Use global type/value visibility as appropriate.
+ Visibility globalVisibility;
+ if (computation == LVForValue) {
+ globalVisibility = Context.getLangOpts().getValueVisibilityMode();
+ } else {
+ assert(computation == LVForType);
+ globalVisibility = Context.getLangOpts().getTypeVisibilityMode();
+ }
+ LV.mergeVisibility(globalVisibility, /*explicit*/ false);
+
+ // If we're paying attention to global visibility, apply
+ // -finline-visibility-hidden if this is an inline method.
+ if (useInlineVisibilityHidden(D))
+ LV.mergeVisibility(HiddenVisibility, true);
+ }
+ }
+
+ // C++ [basic.link]p4:
+
+ // A name having namespace scope has external linkage if it is the
+ // name of
+ //
+ // - an object or reference, unless it has internal linkage; or
+ if (const auto *Var = dyn_cast<VarDecl>(D)) {
+ // GCC applies the following optimization to variables and static
+ // data members, but not to functions:
+ //
+ // Modify the variable's LV by the LV of its type unless this is
+ // C or extern "C". This follows from [basic.link]p9:
+ // A type without linkage shall not be used as the type of a
+ // variable or function with external linkage unless
+ // - the entity has C language linkage, or
+ // - the entity is declared within an unnamed namespace, or
+ // - the entity is not used or is defined in the same
+ // translation unit.
+ // and [basic.link]p10:
+ // ...the types specified by all declarations referring to a
+ // given variable or function shall be identical...
+ // C does not have an equivalent rule.
+ //
+ // Ignore this if we've got an explicit attribute; the user
+ // probably knows what they're doing.
+ //
+ // Note that we don't want to make the variable non-external
+ // because of this, but unique-external linkage suits us.
+ if (Context.getLangOpts().CPlusPlus && !isFirstInExternCContext(Var)) {
+ LinkageInfo TypeLV = getLVForType(*Var->getType(), computation);
+ if (TypeLV.getLinkage() != ExternalLinkage)
+ return LinkageInfo::uniqueExternal();
+ if (!LV.isVisibilityExplicit())
+ LV.mergeVisibility(TypeLV);
+ }
+
+ if (Var->getStorageClass() == SC_PrivateExtern)
+ LV.mergeVisibility(HiddenVisibility, true);
+
+ // Note that Sema::MergeVarDecl already takes care of implementing
+ // C99 6.2.2p4 and propagating the visibility attribute, so we don't have
+ // to do it here.
+
+ // As per function and class template specializations (below),
+ // consider LV for the template and template arguments. We're at file
+ // scope, so we do not need to worry about nested specializations.
+ if (const auto *spec = dyn_cast<VarTemplateSpecializationDecl>(Var)) {
+ mergeTemplateLV(LV, spec, computation);
+ }
+
+ // - a function, unless it has internal linkage; or
+ } else if (const auto *Function = dyn_cast<FunctionDecl>(D)) {
+ // In theory, we can modify the function's LV by the LV of its
+ // type unless it has C linkage (see comment above about variables
+ // for justification). In practice, GCC doesn't do this, so it's
+ // just too painful to make work.
+
+ if (Function->getStorageClass() == SC_PrivateExtern)
+ LV.mergeVisibility(HiddenVisibility, true);
+
+ // Note that Sema::MergeCompatibleFunctionDecls already takes care of
+ // merging storage classes and visibility attributes, so we don't have to
+ // look at previous decls in here.
+
+ // In C++, then if the type of the function uses a type with
+ // unique-external linkage, it's not legally usable from outside
+ // this translation unit. However, we should use the C linkage
+ // rules instead for extern "C" declarations.
+ if (Context.getLangOpts().CPlusPlus &&
+ !Function->isInExternCContext()) {
+ // Only look at the type-as-written. If this function has an auto-deduced
+ // return type, we can't compute the linkage of that type because it could
+ // require looking at the linkage of this function, and we don't need this
+ // for correctness because the type is not part of the function's
+ // signature.
+ // FIXME: This is a hack. We should be able to solve this circularity and
+ // the one in getLVForClassMember for Functions some other way.
+ QualType TypeAsWritten = Function->getType();
+ if (TypeSourceInfo *TSI = Function->getTypeSourceInfo())
+ TypeAsWritten = TSI->getType();
+ if (TypeAsWritten->getLinkage() == UniqueExternalLinkage)
+ return LinkageInfo::uniqueExternal();
+ }
+
+ // Consider LV from the template and the template arguments.
+ // We're at file scope, so we do not need to worry about nested
+ // specializations.
+ if (FunctionTemplateSpecializationInfo *specInfo
+ = Function->getTemplateSpecializationInfo()) {
+ mergeTemplateLV(LV, Function, specInfo, computation);
+ }
+
+ // - a named class (Clause 9), or an unnamed class defined in a
+ // typedef declaration in which the class has the typedef name
+ // for linkage purposes (7.1.3); or
+ // - a named enumeration (7.2), or an unnamed enumeration
+ // defined in a typedef declaration in which the enumeration
+ // has the typedef name for linkage purposes (7.1.3); or
+ } else if (const auto *Tag = dyn_cast<TagDecl>(D)) {
+ // Unnamed tags have no linkage.
+ if (!Tag->hasNameForLinkage())
+ return LinkageInfo::none();
+
+ // If this is a class template specialization, consider the
+ // linkage of the template and template arguments. We're at file
+ // scope, so we do not need to worry about nested specializations.
+ if (const auto *spec = dyn_cast<ClassTemplateSpecializationDecl>(Tag)) {
+ mergeTemplateLV(LV, spec, computation);
+ }
+
+ // - an enumerator belonging to an enumeration with external linkage;
+ } else if (isa<EnumConstantDecl>(D)) {
+ LinkageInfo EnumLV = getLVForDecl(cast<NamedDecl>(D->getDeclContext()),
+ computation);
+ if (!isExternalFormalLinkage(EnumLV.getLinkage()))
+ return LinkageInfo::none();
+ LV.merge(EnumLV);
+
+ // - a template, unless it is a function template that has
+ // internal linkage (Clause 14);
+ } else if (const auto *temp = dyn_cast<TemplateDecl>(D)) {
+ bool considerVisibility = !hasExplicitVisibilityAlready(computation);
+ LinkageInfo tempLV =
+ getLVForTemplateParameterList(temp->getTemplateParameters(), computation);
+ LV.mergeMaybeWithVisibility(tempLV, considerVisibility);
+
+ // - a namespace (7.3), unless it is declared within an unnamed
+ // namespace.
+ } else if (isa<NamespaceDecl>(D) && !D->isInAnonymousNamespace()) {
+ return LV;
+
+ // By extension, we assign external linkage to Objective-C
+ // interfaces.
+ } else if (isa<ObjCInterfaceDecl>(D)) {
+ // fallout
+
+ } else if (auto *TD = dyn_cast<TypedefNameDecl>(D)) {
+ // A typedef declaration has linkage if it gives a type a name for
+ // linkage purposes.
+ if (!TD->getAnonDeclWithTypedefName(/*AnyRedecl*/true))
+ return LinkageInfo::none();
+
+ // Everything not covered here has no linkage.
+ } else {
+ return LinkageInfo::none();
+ }
+
+ // If we ended up with non-external linkage, visibility should
+ // always be default.
+ if (LV.getLinkage() != ExternalLinkage)
+ return LinkageInfo(LV.getLinkage(), DefaultVisibility, false);
+
+ return LV;
+}
+
+static LinkageInfo getLVForClassMember(const NamedDecl *D,
+ LVComputationKind computation) {
+ // Only certain class members have linkage. Note that fields don't
+ // really have linkage, but it's convenient to say they do for the
+ // purposes of calculating linkage of pointer-to-data-member
+ // template arguments.
+ //
+ // Templates also don't officially have linkage, but since we ignore
+ // the C++ standard and look at template arguments when determining
+ // linkage and visibility of a template specialization, we might hit
+ // a template template argument that way. If we do, we need to
+ // consider its linkage.
+ if (!(isa<CXXMethodDecl>(D) ||
+ isa<VarDecl>(D) ||
+ isa<FieldDecl>(D) ||
+ isa<IndirectFieldDecl>(D) ||
+ isa<TagDecl>(D) ||
+ isa<TemplateDecl>(D)))
+ return LinkageInfo::none();
+
+ LinkageInfo LV;
+
+ // If we have an explicit visibility attribute, merge that in.
+ if (!hasExplicitVisibilityAlready(computation)) {
+ if (Optional<Visibility> Vis = getExplicitVisibility(D, computation))
+ LV.mergeVisibility(*Vis, true);
+ // If we're paying attention to global visibility, apply
+ // -finline-visibility-hidden if this is an inline method.
+ //
+ // Note that we do this before merging information about
+ // the class visibility.
+ if (!LV.isVisibilityExplicit() && useInlineVisibilityHidden(D))
+ LV.mergeVisibility(HiddenVisibility, true);
+ }
+
+ // If this class member has an explicit visibility attribute, the only
+ // thing that can change its visibility is the template arguments, so
+ // only look for them when processing the class.
+ LVComputationKind classComputation = computation;
+ if (LV.isVisibilityExplicit())
+ classComputation = withExplicitVisibilityAlready(computation);
+
+ LinkageInfo classLV =
+ getLVForDecl(cast<RecordDecl>(D->getDeclContext()), classComputation);
+ // If the class already has unique-external linkage, we can't improve.
+ if (classLV.getLinkage() == UniqueExternalLinkage)
+ return LinkageInfo::uniqueExternal();
+
+ if (!isExternallyVisible(classLV.getLinkage()))
+ return LinkageInfo::none();
+
+
+ // Otherwise, don't merge in classLV yet, because in certain cases
+ // we need to completely ignore the visibility from it.
+
+ // Specifically, if this decl exists and has an explicit attribute.
+ const NamedDecl *explicitSpecSuppressor = nullptr;
+
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(D)) {
+ // If the type of the function uses a type with unique-external
+ // linkage, it's not legally usable from outside this translation unit.
+ // But only look at the type-as-written. If this function has an
+ // auto-deduced return type, we can't compute the linkage of that type
+ // because it could require looking at the linkage of this function, and we
+ // don't need this for correctness because the type is not part of the
+ // function's signature.
+ // FIXME: This is a hack. We should be able to solve this circularity and
+ // the one in getLVForNamespaceScopeDecl for Functions some other way.
+ {
+ QualType TypeAsWritten = MD->getType();
+ if (TypeSourceInfo *TSI = MD->getTypeSourceInfo())
+ TypeAsWritten = TSI->getType();
+ if (TypeAsWritten->getLinkage() == UniqueExternalLinkage)
+ return LinkageInfo::uniqueExternal();
+ }
+ // If this is a method template specialization, use the linkage for
+ // the template parameters and arguments.
+ if (FunctionTemplateSpecializationInfo *spec
+ = MD->getTemplateSpecializationInfo()) {
+ mergeTemplateLV(LV, MD, spec, computation);
+ if (spec->isExplicitSpecialization()) {
+ explicitSpecSuppressor = MD;
+ } else if (isExplicitMemberSpecialization(spec->getTemplate())) {
+ explicitSpecSuppressor = spec->getTemplate()->getTemplatedDecl();
+ }
+ } else if (isExplicitMemberSpecialization(MD)) {
+ explicitSpecSuppressor = MD;
+ }
+
+ } else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) {
+ if (const auto *spec = dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
+ mergeTemplateLV(LV, spec, computation);
+ if (spec->isExplicitSpecialization()) {
+ explicitSpecSuppressor = spec;
+ } else {
+ const ClassTemplateDecl *temp = spec->getSpecializedTemplate();
+ if (isExplicitMemberSpecialization(temp)) {
+ explicitSpecSuppressor = temp->getTemplatedDecl();
+ }
+ }
+ } else if (isExplicitMemberSpecialization(RD)) {
+ explicitSpecSuppressor = RD;
+ }
+
+ // Static data members.
+ } else if (const auto *VD = dyn_cast<VarDecl>(D)) {
+ if (const auto *spec = dyn_cast<VarTemplateSpecializationDecl>(VD))
+ mergeTemplateLV(LV, spec, computation);
+
+ // Modify the variable's linkage by its type, but ignore the
+ // type's visibility unless it's a definition.
+ LinkageInfo typeLV = getLVForType(*VD->getType(), computation);
+ if (!LV.isVisibilityExplicit() && !classLV.isVisibilityExplicit())
+ LV.mergeVisibility(typeLV);
+ LV.mergeExternalVisibility(typeLV);
+
+ if (isExplicitMemberSpecialization(VD)) {
+ explicitSpecSuppressor = VD;
+ }
+
+ // Template members.
+ } else if (const auto *temp = dyn_cast<TemplateDecl>(D)) {
+ bool considerVisibility =
+ (!LV.isVisibilityExplicit() &&
+ !classLV.isVisibilityExplicit() &&
+ !hasExplicitVisibilityAlready(computation));
+ LinkageInfo tempLV =
+ getLVForTemplateParameterList(temp->getTemplateParameters(), computation);
+ LV.mergeMaybeWithVisibility(tempLV, considerVisibility);
+
+ if (const auto *redeclTemp = dyn_cast<RedeclarableTemplateDecl>(temp)) {
+ if (isExplicitMemberSpecialization(redeclTemp)) {
+ explicitSpecSuppressor = temp->getTemplatedDecl();
+ }
+ }
+ }
+
+ // We should never be looking for an attribute directly on a template.
+ assert(!explicitSpecSuppressor || !isa<TemplateDecl>(explicitSpecSuppressor));
+
+ // If this member is an explicit member specialization, and it has
+ // an explicit attribute, ignore visibility from the parent.
+ bool considerClassVisibility = true;
+ if (explicitSpecSuppressor &&
+ // optimization: hasDVA() is true only with explicit visibility.
+ LV.isVisibilityExplicit() &&
+ classLV.getVisibility() != DefaultVisibility &&
+ hasDirectVisibilityAttribute(explicitSpecSuppressor, computation)) {
+ considerClassVisibility = false;
+ }
+
+ // Finally, merge in information from the class.
+ LV.mergeMaybeWithVisibility(classLV, considerClassVisibility);
+ return LV;
+}
+
+void NamedDecl::anchor() { }
+
+static LinkageInfo computeLVForDecl(const NamedDecl *D,
+ LVComputationKind computation);
+
+bool NamedDecl::isLinkageValid() const {
+ if (!hasCachedLinkage())
+ return true;
+
+ return computeLVForDecl(this, LVForLinkageOnly).getLinkage() ==
+ getCachedLinkage();
+}
+
+ObjCStringFormatFamily NamedDecl::getObjCFStringFormattingFamily() const {
+ StringRef name = getName();
+ if (name.empty()) return SFF_None;
+
+ if (name.front() == 'C')
+ if (name == "CFStringCreateWithFormat" ||
+ name == "CFStringCreateWithFormatAndArguments" ||
+ name == "CFStringAppendFormat" ||
+ name == "CFStringAppendFormatAndArguments")
+ return SFF_CFString;
+ return SFF_None;
+}
+
+Linkage NamedDecl::getLinkageInternal() const {
+ // We don't care about visibility here, so ask for the cheapest
+ // possible visibility analysis.
+ return getLVForDecl(this, LVForLinkageOnly).getLinkage();
+}
+
+LinkageInfo NamedDecl::getLinkageAndVisibility() const {
+ LVComputationKind computation =
+ (usesTypeVisibility(this) ? LVForType : LVForValue);
+ return getLVForDecl(this, computation);
+}
+
+static Optional<Visibility>
+getExplicitVisibilityAux(const NamedDecl *ND,
+ NamedDecl::ExplicitVisibilityKind kind,
+ bool IsMostRecent) {
+ assert(!IsMostRecent || ND == ND->getMostRecentDecl());
+
+ // Check the declaration itself first.
+ if (Optional<Visibility> V = getVisibilityOf(ND, kind))
+ return V;
+
+ // If this is a member class of a specialization of a class template
+ // and the corresponding decl has explicit visibility, use that.
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(ND)) {
+ CXXRecordDecl *InstantiatedFrom = RD->getInstantiatedFromMemberClass();
+ if (InstantiatedFrom)
+ return getVisibilityOf(InstantiatedFrom, kind);
+ }
+
+ // If there wasn't explicit visibility there, and this is a
+ // specialization of a class template, check for visibility
+ // on the pattern.
+ if (const auto *spec = dyn_cast<ClassTemplateSpecializationDecl>(ND))
+ return getVisibilityOf(spec->getSpecializedTemplate()->getTemplatedDecl(),
+ kind);
+
+ // Use the most recent declaration.
+ if (!IsMostRecent && !isa<NamespaceDecl>(ND)) {
+ const NamedDecl *MostRecent = ND->getMostRecentDecl();
+ if (MostRecent != ND)
+ return getExplicitVisibilityAux(MostRecent, kind, true);
+ }
+
+ if (const auto *Var = dyn_cast<VarDecl>(ND)) {
+ if (Var->isStaticDataMember()) {
+ VarDecl *InstantiatedFrom = Var->getInstantiatedFromStaticDataMember();
+ if (InstantiatedFrom)
+ return getVisibilityOf(InstantiatedFrom, kind);
+ }
+
+ if (const auto *VTSD = dyn_cast<VarTemplateSpecializationDecl>(Var))
+ return getVisibilityOf(VTSD->getSpecializedTemplate()->getTemplatedDecl(),
+ kind);
+
+ return None;
+ }
+ // Also handle function template specializations.
+ if (const auto *fn = dyn_cast<FunctionDecl>(ND)) {
+ // If the function is a specialization of a template with an
+ // explicit visibility attribute, use that.
+ if (FunctionTemplateSpecializationInfo *templateInfo
+ = fn->getTemplateSpecializationInfo())
+ return getVisibilityOf(templateInfo->getTemplate()->getTemplatedDecl(),
+ kind);
+
+ // If the function is a member of a specialization of a class template
+ // and the corresponding decl has explicit visibility, use that.
+ FunctionDecl *InstantiatedFrom = fn->getInstantiatedFromMemberFunction();
+ if (InstantiatedFrom)
+ return getVisibilityOf(InstantiatedFrom, kind);
+
+ return None;
+ }
+
+ // The visibility of a template is stored in the templated decl.
+ if (const auto *TD = dyn_cast<TemplateDecl>(ND))
+ return getVisibilityOf(TD->getTemplatedDecl(), kind);
+
+ return None;
+}
+
+Optional<Visibility>
+NamedDecl::getExplicitVisibility(ExplicitVisibilityKind kind) const {
+ return getExplicitVisibilityAux(this, kind, false);
+}
+
+static LinkageInfo getLVForClosure(const DeclContext *DC, Decl *ContextDecl,
+ LVComputationKind computation) {
+ // This lambda has its linkage/visibility determined by its owner.
+ if (ContextDecl) {
+ if (isa<ParmVarDecl>(ContextDecl))
+ DC = ContextDecl->getDeclContext()->getRedeclContext();
+ else
+ return getLVForDecl(cast<NamedDecl>(ContextDecl), computation);
+ }
+
+ if (const auto *ND = dyn_cast<NamedDecl>(DC))
+ return getLVForDecl(ND, computation);
+
+ return LinkageInfo::external();
+}
+
+static LinkageInfo getLVForLocalDecl(const NamedDecl *D,
+ LVComputationKind computation) {
+ if (const auto *Function = dyn_cast<FunctionDecl>(D)) {
+ if (Function->isInAnonymousNamespace() &&
+ !Function->isInExternCContext())
+ return LinkageInfo::uniqueExternal();
+
+ // This is a "void f();" which got merged with a file static.
+ if (Function->getCanonicalDecl()->getStorageClass() == SC_Static)
+ return LinkageInfo::internal();
+
+ LinkageInfo LV;
+ if (!hasExplicitVisibilityAlready(computation)) {
+ if (Optional<Visibility> Vis =
+ getExplicitVisibility(Function, computation))
+ LV.mergeVisibility(*Vis, true);
+ }
+
+ // Note that Sema::MergeCompatibleFunctionDecls already takes care of
+ // merging storage classes and visibility attributes, so we don't have to
+ // look at previous decls in here.
+
+ return LV;
+ }
+
+ if (const auto *Var = dyn_cast<VarDecl>(D)) {
+ if (Var->hasExternalStorage()) {
+ if (Var->isInAnonymousNamespace() && !Var->isInExternCContext())
+ return LinkageInfo::uniqueExternal();
+
+ LinkageInfo LV;
+ if (Var->getStorageClass() == SC_PrivateExtern)
+ LV.mergeVisibility(HiddenVisibility, true);
+ else if (!hasExplicitVisibilityAlready(computation)) {
+ if (Optional<Visibility> Vis = getExplicitVisibility(Var, computation))
+ LV.mergeVisibility(*Vis, true);
+ }
+
+ if (const VarDecl *Prev = Var->getPreviousDecl()) {
+ LinkageInfo PrevLV = getLVForDecl(Prev, computation);
+ if (PrevLV.getLinkage())
+ LV.setLinkage(PrevLV.getLinkage());
+ LV.mergeVisibility(PrevLV);
+ }
+
+ return LV;
+ }
+
+ if (!Var->isStaticLocal())
+ return LinkageInfo::none();
+ }
+
+ ASTContext &Context = D->getASTContext();
+ if (!Context.getLangOpts().CPlusPlus)
+ return LinkageInfo::none();
+
+ const Decl *OuterD = getOutermostFuncOrBlockContext(D);
+ if (!OuterD)
+ return LinkageInfo::none();
+
+ LinkageInfo LV;
+ if (const auto *BD = dyn_cast<BlockDecl>(OuterD)) {
+ if (!BD->getBlockManglingNumber())
+ return LinkageInfo::none();
+
+ LV = getLVForClosure(BD->getDeclContext()->getRedeclContext(),
+ BD->getBlockManglingContextDecl(), computation);
+ } else {
+ const auto *FD = cast<FunctionDecl>(OuterD);
+ if (!FD->isInlined() &&
+ !isTemplateInstantiation(FD->getTemplateSpecializationKind()))
+ return LinkageInfo::none();
+
+ LV = getLVForDecl(FD, computation);
+ }
+ if (!isExternallyVisible(LV.getLinkage()))
+ return LinkageInfo::none();
+ return LinkageInfo(VisibleNoLinkage, LV.getVisibility(),
+ LV.isVisibilityExplicit());
+}
+
+static inline const CXXRecordDecl*
+getOutermostEnclosingLambda(const CXXRecordDecl *Record) {
+ const CXXRecordDecl *Ret = Record;
+ while (Record && Record->isLambda()) {
+ Ret = Record;
+ if (!Record->getParent()) break;
+ // Get the Containing Class of this Lambda Class
+ Record = dyn_cast_or_null<CXXRecordDecl>(
+ Record->getParent()->getParent());
+ }
+ return Ret;
+}
+
+static LinkageInfo computeLVForDecl(const NamedDecl *D,
+ LVComputationKind computation) {
+ // Internal_linkage attribute overrides other considerations.
+ if (D->hasAttr<InternalLinkageAttr>())
+ return LinkageInfo::internal();
+
+ // Objective-C: treat all Objective-C declarations as having external
+ // linkage.
+ switch (D->getKind()) {
+ default:
+ break;
+
+ // Per C++ [basic.link]p2, only the names of objects, references,
+ // functions, types, templates, namespaces, and values ever have linkage.
+ //
+ // Note that the name of a typedef, namespace alias, using declaration,
+ // and so on are not the name of the corresponding type, namespace, or
+ // declaration, so they do *not* have linkage.
+ case Decl::ImplicitParam:
+ case Decl::Label:
+ case Decl::NamespaceAlias:
+ case Decl::ParmVar:
+ case Decl::Using:
+ case Decl::UsingShadow:
+ case Decl::UsingDirective:
+ return LinkageInfo::none();
+
+ case Decl::EnumConstant:
+ // C++ [basic.link]p4: an enumerator has the linkage of its enumeration.
+ return getLVForDecl(cast<EnumDecl>(D->getDeclContext()), computation);
+
+ case Decl::Typedef:
+ case Decl::TypeAlias:
+ // A typedef declaration has linkage if it gives a type a name for
+ // linkage purposes.
+ if (!D->getASTContext().getLangOpts().CPlusPlus ||
+ !cast<TypedefNameDecl>(D)
+ ->getAnonDeclWithTypedefName(/*AnyRedecl*/true))
+ return LinkageInfo::none();
+ break;
+
+ case Decl::TemplateTemplateParm: // count these as external
+ case Decl::NonTypeTemplateParm:
+ case Decl::ObjCAtDefsField:
+ case Decl::ObjCCategory:
+ case Decl::ObjCCategoryImpl:
+ case Decl::ObjCCompatibleAlias:
+ case Decl::ObjCImplementation:
+ case Decl::ObjCMethod:
+ case Decl::ObjCProperty:
+ case Decl::ObjCPropertyImpl:
+ case Decl::ObjCProtocol:
+ return LinkageInfo::external();
+
+ case Decl::CXXRecord: {
+ const auto *Record = cast<CXXRecordDecl>(D);
+ if (Record->isLambda()) {
+ if (!Record->getLambdaManglingNumber()) {
+ // This lambda has no mangling number, so it's internal.
+ return LinkageInfo::internal();
+ }
+
+ // This lambda has its linkage/visibility determined:
+ // - either by the outermost lambda if that lambda has no mangling
+ // number.
+ // - or by the parent of the outer most lambda
+ // This prevents infinite recursion in settings such as nested lambdas
+ // used in NSDMI's, for e.g.
+ // struct L {
+ // int t{};
+ // int t2 = ([](int a) { return [](int b) { return b; };})(t)(t);
+ // };
+ const CXXRecordDecl *OuterMostLambda =
+ getOutermostEnclosingLambda(Record);
+ if (!OuterMostLambda->getLambdaManglingNumber())
+ return LinkageInfo::internal();
+
+ return getLVForClosure(
+ OuterMostLambda->getDeclContext()->getRedeclContext(),
+ OuterMostLambda->getLambdaContextDecl(), computation);
+ }
+
+ break;
+ }
+ }
+
+ // Handle linkage for namespace-scope names.
+ if (D->getDeclContext()->getRedeclContext()->isFileContext())
+ return getLVForNamespaceScopeDecl(D, computation);
+
+ // C++ [basic.link]p5:
+ // In addition, a member function, static data member, a named
+ // class or enumeration of class scope, or an unnamed class or
+ // enumeration defined in a class-scope typedef declaration such
+ // that the class or enumeration has the typedef name for linkage
+ // purposes (7.1.3), has external linkage if the name of the class
+ // has external linkage.
+ if (D->getDeclContext()->isRecord())
+ return getLVForClassMember(D, computation);
+
+ // C++ [basic.link]p6:
+ // The name of a function declared in block scope and the name of
+ // an object declared by a block scope extern declaration have
+ // linkage. If there is a visible declaration of an entity with
+ // linkage having the same name and type, ignoring entities
+ // declared outside the innermost enclosing namespace scope, the
+ // block scope declaration declares that same entity and receives
+ // the linkage of the previous declaration. If there is more than
+ // one such matching entity, the program is ill-formed. Otherwise,
+ // if no matching entity is found, the block scope entity receives
+ // external linkage.
+ if (D->getDeclContext()->isFunctionOrMethod())
+ return getLVForLocalDecl(D, computation);
+
+ // C++ [basic.link]p6:
+ // Names not covered by these rules have no linkage.
+ return LinkageInfo::none();
+}
+
+namespace clang {
+class LinkageComputer {
+public:
+ static LinkageInfo getLVForDecl(const NamedDecl *D,
+ LVComputationKind computation) {
+ // Internal_linkage attribute overrides other considerations.
+ if (D->hasAttr<InternalLinkageAttr>())
+ return LinkageInfo::internal();
+
+ if (computation == LVForLinkageOnly && D->hasCachedLinkage())
+ return LinkageInfo(D->getCachedLinkage(), DefaultVisibility, false);
+
+ LinkageInfo LV = computeLVForDecl(D, computation);
+ if (D->hasCachedLinkage())
+ assert(D->getCachedLinkage() == LV.getLinkage());
+
+ D->setCachedLinkage(LV.getLinkage());
+
+#ifndef NDEBUG
+ // In C (because of gnu inline) and in c++ with microsoft extensions an
+ // static can follow an extern, so we can have two decls with different
+ // linkages.
+ const LangOptions &Opts = D->getASTContext().getLangOpts();
+ if (!Opts.CPlusPlus || Opts.MicrosoftExt)
+ return LV;
+
+ // We have just computed the linkage for this decl. By induction we know
+ // that all other computed linkages match, check that the one we just
+ // computed also does.
+ NamedDecl *Old = nullptr;
+ for (auto I : D->redecls()) {
+ auto *T = cast<NamedDecl>(I);
+ if (T == D)
+ continue;
+ if (!T->isInvalidDecl() && T->hasCachedLinkage()) {
+ Old = T;
+ break;
+ }
+ }
+ assert(!Old || Old->getCachedLinkage() == D->getCachedLinkage());
+#endif
+
+ return LV;
+ }
+};
+}
+
+static LinkageInfo getLVForDecl(const NamedDecl *D,
+ LVComputationKind computation) {
+ return clang::LinkageComputer::getLVForDecl(D, computation);
+}
+
+std::string NamedDecl::getQualifiedNameAsString() const {
+ std::string QualName;
+ llvm::raw_string_ostream OS(QualName);
+ printQualifiedName(OS, getASTContext().getPrintingPolicy());
+ return OS.str();
+}
+
+void NamedDecl::printQualifiedName(raw_ostream &OS) const {
+ printQualifiedName(OS, getASTContext().getPrintingPolicy());
+}
+
+void NamedDecl::printQualifiedName(raw_ostream &OS,
+ const PrintingPolicy &P) const {
+ const DeclContext *Ctx = getDeclContext();
+
+ if (Ctx->isFunctionOrMethod()) {
+ printName(OS);
+ return;
+ }
+
+ typedef SmallVector<const DeclContext *, 8> ContextsTy;
+ ContextsTy Contexts;
+
+ // Collect contexts.
+ while (Ctx && isa<NamedDecl>(Ctx)) {
+ Contexts.push_back(Ctx);
+ Ctx = Ctx->getParent();
+ }
+
+ for (ContextsTy::reverse_iterator I = Contexts.rbegin(), E = Contexts.rend();
+ I != E; ++I) {
+ if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(*I)) {
+ OS << Spec->getName();
+ const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
+ TemplateSpecializationType::PrintTemplateArgumentList(OS,
+ TemplateArgs.data(),
+ TemplateArgs.size(),
+ P);
+ } else if (const auto *ND = dyn_cast<NamespaceDecl>(*I)) {
+ if (P.SuppressUnwrittenScope &&
+ (ND->isAnonymousNamespace() || ND->isInline()))
+ continue;
+ if (ND->isAnonymousNamespace()) {
+ OS << (P.MSVCFormatting ? "`anonymous namespace\'"
+ : "(anonymous namespace)");
+ }
+ else
+ OS << *ND;
+ } else if (const auto *RD = dyn_cast<RecordDecl>(*I)) {
+ if (!RD->getIdentifier())
+ OS << "(anonymous " << RD->getKindName() << ')';
+ else
+ OS << *RD;
+ } else if (const auto *FD = dyn_cast<FunctionDecl>(*I)) {
+ const FunctionProtoType *FT = nullptr;
+ if (FD->hasWrittenPrototype())
+ FT = dyn_cast<FunctionProtoType>(FD->getType()->castAs<FunctionType>());
+
+ OS << *FD << '(';
+ if (FT) {
+ unsigned NumParams = FD->getNumParams();
+ for (unsigned i = 0; i < NumParams; ++i) {
+ if (i)
+ OS << ", ";
+ OS << FD->getParamDecl(i)->getType().stream(P);
+ }
+
+ if (FT->isVariadic()) {
+ if (NumParams > 0)
+ OS << ", ";
+ OS << "...";
+ }
+ }
+ OS << ')';
+ } else if (const auto *ED = dyn_cast<EnumDecl>(*I)) {
+ // C++ [dcl.enum]p10: Each enum-name and each unscoped
+ // enumerator is declared in the scope that immediately contains
+ // the enum-specifier. Each scoped enumerator is declared in the
+ // scope of the enumeration.
+ if (ED->isScoped() || ED->getIdentifier())
+ OS << *ED;
+ else
+ continue;
+ } else {
+ OS << *cast<NamedDecl>(*I);
+ }
+ OS << "::";
+ }
+
+ if (getDeclName())
+ OS << *this;
+ else
+ OS << "(anonymous)";
+}
+
+void NamedDecl::getNameForDiagnostic(raw_ostream &OS,
+ const PrintingPolicy &Policy,
+ bool Qualified) const {
+ if (Qualified)
+ printQualifiedName(OS, Policy);
+ else
+ printName(OS);
+}
+
+template<typename T> static bool isRedeclarableImpl(Redeclarable<T> *) {
+ return true;
+}
+static bool isRedeclarableImpl(...) { return false; }
+static bool isRedeclarable(Decl::Kind K) {
+ switch (K) {
+#define DECL(Type, Base) \
+ case Decl::Type: \
+ return isRedeclarableImpl((Type##Decl *)nullptr);
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
+ }
+ llvm_unreachable("unknown decl kind");
+}
+
+bool NamedDecl::declarationReplaces(NamedDecl *OldD, bool IsKnownNewer) const {
+ assert(getDeclName() == OldD->getDeclName() && "Declaration name mismatch");
+
+ // Never replace one imported declaration with another; we need both results
+ // when re-exporting.
+ if (OldD->isFromASTFile() && isFromASTFile())
+ return false;
+
+ // A kind mismatch implies that the declaration is not replaced.
+ if (OldD->getKind() != getKind())
+ return false;
+
+ // For method declarations, we never replace. (Why?)
+ if (isa<ObjCMethodDecl>(this))
+ return false;
+
+ // For parameters, pick the newer one. This is either an error or (in
+ // Objective-C) permitted as an extension.
+ if (isa<ParmVarDecl>(this))
+ return true;
+
+ // Inline namespaces can give us two declarations with the same
+ // name and kind in the same scope but different contexts; we should
+ // keep both declarations in this case.
+ if (!this->getDeclContext()->getRedeclContext()->Equals(
+ OldD->getDeclContext()->getRedeclContext()))
+ return false;
+
+ // Using declarations can be replaced if they import the same name from the
+ // same context.
+ if (auto *UD = dyn_cast<UsingDecl>(this)) {
+ ASTContext &Context = getASTContext();
+ return Context.getCanonicalNestedNameSpecifier(UD->getQualifier()) ==
+ Context.getCanonicalNestedNameSpecifier(
+ cast<UsingDecl>(OldD)->getQualifier());
+ }
+ if (auto *UUVD = dyn_cast<UnresolvedUsingValueDecl>(this)) {
+ ASTContext &Context = getASTContext();
+ return Context.getCanonicalNestedNameSpecifier(UUVD->getQualifier()) ==
+ Context.getCanonicalNestedNameSpecifier(
+ cast<UnresolvedUsingValueDecl>(OldD)->getQualifier());
+ }
+
+ // UsingDirectiveDecl's are not really NamedDecl's, and all have same name.
+ // They can be replaced if they nominate the same namespace.
+ // FIXME: Is this true even if they have different module visibility?
+ if (auto *UD = dyn_cast<UsingDirectiveDecl>(this))
+ return UD->getNominatedNamespace()->getOriginalNamespace() ==
+ cast<UsingDirectiveDecl>(OldD)->getNominatedNamespace()
+ ->getOriginalNamespace();
+
+ if (isRedeclarable(getKind())) {
+ if (getCanonicalDecl() != OldD->getCanonicalDecl())
+ return false;
+
+ if (IsKnownNewer)
+ return true;
+
+ // Check whether this is actually newer than OldD. We want to keep the
+ // newer declaration. This loop will usually only iterate once, because
+ // OldD is usually the previous declaration.
+ for (auto D : redecls()) {
+ if (D == OldD)
+ break;
+
+ // If we reach the canonical declaration, then OldD is not actually older
+ // than this one.
+ //
+ // FIXME: In this case, we should not add this decl to the lookup table.
+ if (D->isCanonicalDecl())
+ return false;
+ }
+
+ // It's a newer declaration of the same kind of declaration in the same
+ // scope: we want this decl instead of the existing one.
+ return true;
+ }
+
+ // In all other cases, we need to keep both declarations in case they have
+ // different visibility. Any attempt to use the name will result in an
+ // ambiguity if more than one is visible.
+ return false;
+}
+
+bool NamedDecl::hasLinkage() const {
+ return getFormalLinkage() != NoLinkage;
+}
+
+NamedDecl *NamedDecl::getUnderlyingDeclImpl() {
+ NamedDecl *ND = this;
+ while (auto *UD = dyn_cast<UsingShadowDecl>(ND))
+ ND = UD->getTargetDecl();
+
+ if (auto *AD = dyn_cast<ObjCCompatibleAliasDecl>(ND))
+ return AD->getClassInterface();
+
+ if (auto *AD = dyn_cast<NamespaceAliasDecl>(ND))
+ return AD->getNamespace();
+
+ return ND;
+}
+
+bool NamedDecl::isCXXInstanceMember() const {
+ if (!isCXXClassMember())
+ return false;
+
+ const NamedDecl *D = this;
+ if (isa<UsingShadowDecl>(D))
+ D = cast<UsingShadowDecl>(D)->getTargetDecl();
+
+ if (isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D) || isa<MSPropertyDecl>(D))
+ return true;
+ if (const auto *MD = dyn_cast_or_null<CXXMethodDecl>(D->getAsFunction()))
+ return MD->isInstance();
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// DeclaratorDecl Implementation
+//===----------------------------------------------------------------------===//
+
+template <typename DeclT>
+static SourceLocation getTemplateOrInnerLocStart(const DeclT *decl) {
+ if (decl->getNumTemplateParameterLists() > 0)
+ return decl->getTemplateParameterList(0)->getTemplateLoc();
+ else
+ return decl->getInnerLocStart();
+}
+
+SourceLocation DeclaratorDecl::getTypeSpecStartLoc() const {
+ TypeSourceInfo *TSI = getTypeSourceInfo();
+ if (TSI) return TSI->getTypeLoc().getBeginLoc();
+ return SourceLocation();
+}
+
+void DeclaratorDecl::setQualifierInfo(NestedNameSpecifierLoc QualifierLoc) {
+ if (QualifierLoc) {
+ // Make sure the extended decl info is allocated.
+ if (!hasExtInfo()) {
+ // Save (non-extended) type source info pointer.
+ auto *savedTInfo = DeclInfo.get<TypeSourceInfo*>();
+ // Allocate external info struct.
+ DeclInfo = new (getASTContext()) ExtInfo;
+ // Restore savedTInfo into (extended) decl info.
+ getExtInfo()->TInfo = savedTInfo;
+ }
+ // Set qualifier info.
+ getExtInfo()->QualifierLoc = QualifierLoc;
+ } else {
+ // Here Qualifier == 0, i.e., we are removing the qualifier (if any).
+ if (hasExtInfo()) {
+ if (getExtInfo()->NumTemplParamLists == 0) {
+ // Save type source info pointer.
+ TypeSourceInfo *savedTInfo = getExtInfo()->TInfo;
+ // Deallocate the extended decl info.
+ getASTContext().Deallocate(getExtInfo());
+ // Restore savedTInfo into (non-extended) decl info.
+ DeclInfo = savedTInfo;
+ }
+ else
+ getExtInfo()->QualifierLoc = QualifierLoc;
+ }
+ }
+}
+
+void DeclaratorDecl::setTemplateParameterListsInfo(
+ ASTContext &Context, ArrayRef<TemplateParameterList *> TPLists) {
+ assert(!TPLists.empty());
+ // Make sure the extended decl info is allocated.
+ if (!hasExtInfo()) {
+ // Save (non-extended) type source info pointer.
+ auto *savedTInfo = DeclInfo.get<TypeSourceInfo*>();
+ // Allocate external info struct.
+ DeclInfo = new (getASTContext()) ExtInfo;
+ // Restore savedTInfo into (extended) decl info.
+ getExtInfo()->TInfo = savedTInfo;
+ }
+ // Set the template parameter lists info.
+ getExtInfo()->setTemplateParameterListsInfo(Context, TPLists);
+}
+
+SourceLocation DeclaratorDecl::getOuterLocStart() const {
+ return getTemplateOrInnerLocStart(this);
+}
+
+namespace {
+
+// Helper function: returns true if QT is or contains a type
+// having a postfix component.
+bool typeIsPostfix(clang::QualType QT) {
+ while (true) {
+ const Type* T = QT.getTypePtr();
+ switch (T->getTypeClass()) {
+ default:
+ return false;
+ case Type::Pointer:
+ QT = cast<PointerType>(T)->getPointeeType();
+ break;
+ case Type::BlockPointer:
+ QT = cast<BlockPointerType>(T)->getPointeeType();
+ break;
+ case Type::MemberPointer:
+ QT = cast<MemberPointerType>(T)->getPointeeType();
+ break;
+ case Type::LValueReference:
+ case Type::RValueReference:
+ QT = cast<ReferenceType>(T)->getPointeeType();
+ break;
+ case Type::PackExpansion:
+ QT = cast<PackExpansionType>(T)->getPattern();
+ break;
+ case Type::Paren:
+ case Type::ConstantArray:
+ case Type::DependentSizedArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ return true;
+ }
+ }
+}
+
+} // namespace
+
+SourceRange DeclaratorDecl::getSourceRange() const {
+ SourceLocation RangeEnd = getLocation();
+ if (TypeSourceInfo *TInfo = getTypeSourceInfo()) {
+ // If the declaration has no name or the type extends past the name take the
+ // end location of the type.
+ if (!getDeclName() || typeIsPostfix(TInfo->getType()))
+ RangeEnd = TInfo->getTypeLoc().getSourceRange().getEnd();
+ }
+ return SourceRange(getOuterLocStart(), RangeEnd);
+}
+
+void QualifierInfo::setTemplateParameterListsInfo(
+ ASTContext &Context, ArrayRef<TemplateParameterList *> TPLists) {
+ // Free previous template parameters (if any).
+ if (NumTemplParamLists > 0) {
+ Context.Deallocate(TemplParamLists);
+ TemplParamLists = nullptr;
+ NumTemplParamLists = 0;
+ }
+ // Set info on matched template parameter lists (if any).
+ if (!TPLists.empty()) {
+ TemplParamLists = new (Context) TemplateParameterList *[TPLists.size()];
+ NumTemplParamLists = TPLists.size();
+ std::copy(TPLists.begin(), TPLists.end(), TemplParamLists);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// VarDecl Implementation
+//===----------------------------------------------------------------------===//
+
+const char *VarDecl::getStorageClassSpecifierString(StorageClass SC) {
+ switch (SC) {
+ case SC_None: break;
+ case SC_Auto: return "auto";
+ case SC_Extern: return "extern";
+ case SC_PrivateExtern: return "__private_extern__";
+ case SC_Register: return "register";
+ case SC_Static: return "static";
+ }
+
+ llvm_unreachable("Invalid storage class");
+}
+
+VarDecl::VarDecl(Kind DK, ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo,
+ StorageClass SC)
+ : DeclaratorDecl(DK, DC, IdLoc, Id, T, TInfo, StartLoc),
+ redeclarable_base(C), Init() {
+ static_assert(sizeof(VarDeclBitfields) <= sizeof(unsigned),
+ "VarDeclBitfields too large!");
+ static_assert(sizeof(ParmVarDeclBitfields) <= sizeof(unsigned),
+ "ParmVarDeclBitfields too large!");
+ static_assert(sizeof(NonParmVarDeclBitfields) <= sizeof(unsigned),
+ "NonParmVarDeclBitfields too large!");
+ AllBits = 0;
+ VarDeclBits.SClass = SC;
+ // Everything else is implicitly initialized to false.
+}
+
+VarDecl *VarDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartL, SourceLocation IdL,
+ IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo,
+ StorageClass S) {
+ return new (C, DC) VarDecl(Var, C, DC, StartL, IdL, Id, T, TInfo, S);
+}
+
+VarDecl *VarDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID)
+ VarDecl(Var, C, nullptr, SourceLocation(), SourceLocation(), nullptr,
+ QualType(), nullptr, SC_None);
+}
+
+void VarDecl::setStorageClass(StorageClass SC) {
+ assert(isLegalForVariable(SC));
+ VarDeclBits.SClass = SC;
+}
+
+VarDecl::TLSKind VarDecl::getTLSKind() const {
+ switch (VarDeclBits.TSCSpec) {
+ case TSCS_unspecified:
+ if (!hasAttr<ThreadAttr>() &&
+ !(getASTContext().getLangOpts().OpenMPUseTLS &&
+ getASTContext().getTargetInfo().isTLSSupported() &&
+ hasAttr<OMPThreadPrivateDeclAttr>()))
+ return TLS_None;
+ return ((getASTContext().getLangOpts().isCompatibleWithMSVC(
+ LangOptions::MSVC2015)) ||
+ hasAttr<OMPThreadPrivateDeclAttr>())
+ ? TLS_Dynamic
+ : TLS_Static;
+ case TSCS___thread: // Fall through.
+ case TSCS__Thread_local:
+ return TLS_Static;
+ case TSCS_thread_local:
+ return TLS_Dynamic;
+ }
+ llvm_unreachable("Unknown thread storage class specifier!");
+}
+
+SourceRange VarDecl::getSourceRange() const {
+ if (const Expr *Init = getInit()) {
+ SourceLocation InitEnd = Init->getLocEnd();
+ // If Init is implicit, ignore its source range and fallback on
+ // DeclaratorDecl::getSourceRange() to handle postfix elements.
+ if (InitEnd.isValid() && InitEnd != getLocation())
+ return SourceRange(getOuterLocStart(), InitEnd);
+ }
+ return DeclaratorDecl::getSourceRange();
+}
+
+template<typename T>
+static LanguageLinkage getDeclLanguageLinkage(const T &D) {
+ // C++ [dcl.link]p1: All function types, function names with external linkage,
+ // and variable names with external linkage have a language linkage.
+ if (!D.hasExternalFormalLinkage())
+ return NoLanguageLinkage;
+
+ // Language linkage is a C++ concept, but saying that everything else in C has
+ // C language linkage fits the implementation nicely.
+ ASTContext &Context = D.getASTContext();
+ if (!Context.getLangOpts().CPlusPlus)
+ return CLanguageLinkage;
+
+ // C++ [dcl.link]p4: A C language linkage is ignored in determining the
+ // language linkage of the names of class members and the function type of
+ // class member functions.
+ const DeclContext *DC = D.getDeclContext();
+ if (DC->isRecord())
+ return CXXLanguageLinkage;
+
+ // If the first decl is in an extern "C" context, any other redeclaration
+ // will have C language linkage. If the first one is not in an extern "C"
+ // context, we would have reported an error for any other decl being in one.
+ if (isFirstInExternCContext(&D))
+ return CLanguageLinkage;
+ return CXXLanguageLinkage;
+}
+
+template<typename T>
+static bool isDeclExternC(const T &D) {
+ // Since the context is ignored for class members, they can only have C++
+ // language linkage or no language linkage.
+ const DeclContext *DC = D.getDeclContext();
+ if (DC->isRecord()) {
+ assert(D.getASTContext().getLangOpts().CPlusPlus);
+ return false;
+ }
+
+ return D.getLanguageLinkage() == CLanguageLinkage;
+}
+
+LanguageLinkage VarDecl::getLanguageLinkage() const {
+ return getDeclLanguageLinkage(*this);
+}
+
+bool VarDecl::isExternC() const {
+ return isDeclExternC(*this);
+}
+
+bool VarDecl::isInExternCContext() const {
+ return getLexicalDeclContext()->isExternCContext();
+}
+
+bool VarDecl::isInExternCXXContext() const {
+ return getLexicalDeclContext()->isExternCXXContext();
+}
+
+VarDecl *VarDecl::getCanonicalDecl() { return getFirstDecl(); }
+
+VarDecl::DefinitionKind
+VarDecl::isThisDeclarationADefinition(ASTContext &C) const {
+ // C++ [basic.def]p2:
+ // A declaration is a definition unless [...] it contains the 'extern'
+ // specifier or a linkage-specification and neither an initializer [...],
+ // it declares a static data member in a class declaration [...].
+ // C++1y [temp.expl.spec]p15:
+ // An explicit specialization of a static data member or an explicit
+ // specialization of a static data member template is a definition if the
+ // declaration includes an initializer; otherwise, it is a declaration.
+ //
+ // FIXME: How do you declare (but not define) a partial specialization of
+ // a static data member template outside the containing class?
+ if (isStaticDataMember()) {
+ if (isOutOfLine() &&
+ (hasInit() ||
+ // If the first declaration is out-of-line, this may be an
+ // instantiation of an out-of-line partial specialization of a variable
+ // template for which we have not yet instantiated the initializer.
+ (getFirstDecl()->isOutOfLine()
+ ? getTemplateSpecializationKind() == TSK_Undeclared
+ : getTemplateSpecializationKind() !=
+ TSK_ExplicitSpecialization) ||
+ isa<VarTemplatePartialSpecializationDecl>(this)))
+ return Definition;
+ else
+ return DeclarationOnly;
+ }
+ // C99 6.7p5:
+ // A definition of an identifier is a declaration for that identifier that
+ // [...] causes storage to be reserved for that object.
+ // Note: that applies for all non-file-scope objects.
+ // C99 6.9.2p1:
+ // If the declaration of an identifier for an object has file scope and an
+ // initializer, the declaration is an external definition for the identifier
+ if (hasInit())
+ return Definition;
+
+ if (hasAttr<AliasAttr>())
+ return Definition;
+
+ if (const auto *SAA = getAttr<SelectAnyAttr>())
+ if (!SAA->isInherited())
+ return Definition;
+
+ // A variable template specialization (other than a static data member
+ // template or an explicit specialization) is a declaration until we
+ // instantiate its initializer.
+ if (isa<VarTemplateSpecializationDecl>(this) &&
+ getTemplateSpecializationKind() != TSK_ExplicitSpecialization)
+ return DeclarationOnly;
+
+ if (hasExternalStorage())
+ return DeclarationOnly;
+
+ // [dcl.link] p7:
+ // A declaration directly contained in a linkage-specification is treated
+ // as if it contains the extern specifier for the purpose of determining
+ // the linkage of the declared name and whether it is a definition.
+ if (isSingleLineLanguageLinkage(*this))
+ return DeclarationOnly;
+
+ // C99 6.9.2p2:
+ // A declaration of an object that has file scope without an initializer,
+ // and without a storage class specifier or the scs 'static', constitutes
+ // a tentative definition.
+ // No such thing in C++.
+ if (!C.getLangOpts().CPlusPlus && isFileVarDecl())
+ return TentativeDefinition;
+
+ // What's left is (in C, block-scope) declarations without initializers or
+ // external storage. These are definitions.
+ return Definition;
+}
+
+VarDecl *VarDecl::getActingDefinition() {
+ DefinitionKind Kind = isThisDeclarationADefinition();
+ if (Kind != TentativeDefinition)
+ return nullptr;
+
+ VarDecl *LastTentative = nullptr;
+ VarDecl *First = getFirstDecl();
+ for (auto I : First->redecls()) {
+ Kind = I->isThisDeclarationADefinition();
+ if (Kind == Definition)
+ return nullptr;
+ else if (Kind == TentativeDefinition)
+ LastTentative = I;
+ }
+ return LastTentative;
+}
+
+VarDecl *VarDecl::getDefinition(ASTContext &C) {
+ VarDecl *First = getFirstDecl();
+ for (auto I : First->redecls()) {
+ if (I->isThisDeclarationADefinition(C) == Definition)
+ return I;
+ }
+ return nullptr;
+}
+
+VarDecl::DefinitionKind VarDecl::hasDefinition(ASTContext &C) const {
+ DefinitionKind Kind = DeclarationOnly;
+
+ const VarDecl *First = getFirstDecl();
+ for (auto I : First->redecls()) {
+ Kind = std::max(Kind, I->isThisDeclarationADefinition(C));
+ if (Kind == Definition)
+ break;
+ }
+
+ return Kind;
+}
+
+const Expr *VarDecl::getAnyInitializer(const VarDecl *&D) const {
+ for (auto I : redecls()) {
+ if (auto Expr = I->getInit()) {
+ D = I;
+ return Expr;
+ }
+ }
+ return nullptr;
+}
+
+bool VarDecl::hasInit() const {
+ if (auto *P = dyn_cast<ParmVarDecl>(this))
+ if (P->hasUnparsedDefaultArg() || P->hasUninstantiatedDefaultArg())
+ return false;
+
+ return !Init.isNull();
+}
+
+Expr *VarDecl::getInit() {
+ if (!hasInit())
+ return nullptr;
+
+ if (auto *S = Init.dyn_cast<Stmt *>())
+ return cast<Expr>(S);
+
+ return cast_or_null<Expr>(Init.get<EvaluatedStmt *>()->Value);
+}
+
+Stmt **VarDecl::getInitAddress() {
+ if (auto *ES = Init.dyn_cast<EvaluatedStmt *>())
+ return &ES->Value;
+
+ return Init.getAddrOfPtr1();
+}
+
+bool VarDecl::isOutOfLine() const {
+ if (Decl::isOutOfLine())
+ return true;
+
+ if (!isStaticDataMember())
+ return false;
+
+ // If this static data member was instantiated from a static data member of
+ // a class template, check whether that static data member was defined
+ // out-of-line.
+ if (VarDecl *VD = getInstantiatedFromStaticDataMember())
+ return VD->isOutOfLine();
+
+ return false;
+}
+
+VarDecl *VarDecl::getOutOfLineDefinition() {
+ if (!isStaticDataMember())
+ return nullptr;
+
+ for (auto RD : redecls()) {
+ if (RD->getLexicalDeclContext()->isFileContext())
+ return RD;
+ }
+
+ return nullptr;
+}
+
+void VarDecl::setInit(Expr *I) {
+ if (auto *Eval = Init.dyn_cast<EvaluatedStmt *>()) {
+ Eval->~EvaluatedStmt();
+ getASTContext().Deallocate(Eval);
+ }
+
+ Init = I;
+}
+
+bool VarDecl::isUsableInConstantExpressions(ASTContext &C) const {
+ const LangOptions &Lang = C.getLangOpts();
+
+ if (!Lang.CPlusPlus)
+ return false;
+
+ // In C++11, any variable of reference type can be used in a constant
+ // expression if it is initialized by a constant expression.
+ if (Lang.CPlusPlus11 && getType()->isReferenceType())
+ return true;
+
+ // Only const objects can be used in constant expressions in C++. C++98 does
+ // not require the variable to be non-volatile, but we consider this to be a
+ // defect.
+ if (!getType().isConstQualified() || getType().isVolatileQualified())
+ return false;
+
+ // In C++, const, non-volatile variables of integral or enumeration types
+ // can be used in constant expressions.
+ if (getType()->isIntegralOrEnumerationType())
+ return true;
+
+ // Additionally, in C++11, non-volatile constexpr variables can be used in
+ // constant expressions.
+ return Lang.CPlusPlus11 && isConstexpr();
+}
+
+/// Convert the initializer for this declaration to the elaborated EvaluatedStmt
+/// form, which contains extra information on the evaluated value of the
+/// initializer.
+EvaluatedStmt *VarDecl::ensureEvaluatedStmt() const {
+ auto *Eval = Init.dyn_cast<EvaluatedStmt *>();
+ if (!Eval) {
+ // Note: EvaluatedStmt contains an APValue, which usually holds
+ // resources not allocated from the ASTContext. We need to do some
+ // work to avoid leaking those, but we do so in VarDecl::evaluateValue
+ // where we can detect whether there's anything to clean up or not.
+ Eval = new (getASTContext()) EvaluatedStmt;
+ Eval->Value = Init.get<Stmt *>();
+ Init = Eval;
+ }
+ return Eval;
+}
+
+APValue *VarDecl::evaluateValue() const {
+ SmallVector<PartialDiagnosticAt, 8> Notes;
+ return evaluateValue(Notes);
+}
+
+namespace {
+// Destroy an APValue that was allocated in an ASTContext.
+void DestroyAPValue(void* UntypedValue) {
+ static_cast<APValue*>(UntypedValue)->~APValue();
+}
+} // namespace
+
+APValue *VarDecl::evaluateValue(
+ SmallVectorImpl<PartialDiagnosticAt> &Notes) const {
+ EvaluatedStmt *Eval = ensureEvaluatedStmt();
+
+ // We only produce notes indicating why an initializer is non-constant the
+ // first time it is evaluated. FIXME: The notes won't always be emitted the
+ // first time we try evaluation, so might not be produced at all.
+ if (Eval->WasEvaluated)
+ return Eval->Evaluated.isUninit() ? nullptr : &Eval->Evaluated;
+
+ const auto *Init = cast<Expr>(Eval->Value);
+ assert(!Init->isValueDependent());
+
+ if (Eval->IsEvaluating) {
+ // FIXME: Produce a diagnostic for self-initialization.
+ Eval->CheckedICE = true;
+ Eval->IsICE = false;
+ return nullptr;
+ }
+
+ Eval->IsEvaluating = true;
+
+ bool Result = Init->EvaluateAsInitializer(Eval->Evaluated, getASTContext(),
+ this, Notes);
+
+ // Ensure the computed APValue is cleaned up later if evaluation succeeded,
+ // or that it's empty (so that there's nothing to clean up) if evaluation
+ // failed.
+ if (!Result)
+ Eval->Evaluated = APValue();
+ else if (Eval->Evaluated.needsCleanup())
+ getASTContext().AddDeallocation(DestroyAPValue, &Eval->Evaluated);
+
+ Eval->IsEvaluating = false;
+ Eval->WasEvaluated = true;
+
+ // In C++11, we have determined whether the initializer was a constant
+ // expression as a side-effect.
+ if (getASTContext().getLangOpts().CPlusPlus11 && !Eval->CheckedICE) {
+ Eval->CheckedICE = true;
+ Eval->IsICE = Result && Notes.empty();
+ }
+
+ return Result ? &Eval->Evaluated : nullptr;
+}
+
+APValue *VarDecl::getEvaluatedValue() const {
+ if (EvaluatedStmt *Eval = Init.dyn_cast<EvaluatedStmt *>())
+ if (Eval->WasEvaluated)
+ return &Eval->Evaluated;
+
+ return nullptr;
+}
+
+bool VarDecl::isInitKnownICE() const {
+ if (EvaluatedStmt *Eval = Init.dyn_cast<EvaluatedStmt *>())
+ return Eval->CheckedICE;
+
+ return false;
+}
+
+bool VarDecl::isInitICE() const {
+ assert(isInitKnownICE() &&
+ "Check whether we already know that the initializer is an ICE");
+ return Init.get<EvaluatedStmt *>()->IsICE;
+}
+
+bool VarDecl::checkInitIsICE() const {
+ // Initializers of weak variables are never ICEs.
+ if (isWeak())
+ return false;
+
+ EvaluatedStmt *Eval = ensureEvaluatedStmt();
+ if (Eval->CheckedICE)
+ // We have already checked whether this subexpression is an
+ // integral constant expression.
+ return Eval->IsICE;
+
+ const auto *Init = cast<Expr>(Eval->Value);
+ assert(!Init->isValueDependent());
+
+ // In C++11, evaluate the initializer to check whether it's a constant
+ // expression.
+ if (getASTContext().getLangOpts().CPlusPlus11) {
+ SmallVector<PartialDiagnosticAt, 8> Notes;
+ evaluateValue(Notes);
+ return Eval->IsICE;
+ }
+
+ // It's an ICE whether or not the definition we found is
+ // out-of-line. See DR 721 and the discussion in Clang PR
+ // 6206 for details.
+
+ if (Eval->CheckingICE)
+ return false;
+ Eval->CheckingICE = true;
+
+ Eval->IsICE = Init->isIntegerConstantExpr(getASTContext());
+ Eval->CheckingICE = false;
+ Eval->CheckedICE = true;
+ return Eval->IsICE;
+}
+
+VarDecl *VarDecl::getInstantiatedFromStaticDataMember() const {
+ if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo())
+ return cast<VarDecl>(MSI->getInstantiatedFrom());
+
+ return nullptr;
+}
+
+TemplateSpecializationKind VarDecl::getTemplateSpecializationKind() const {
+ if (const auto *Spec = dyn_cast<VarTemplateSpecializationDecl>(this))
+ return Spec->getSpecializationKind();
+
+ if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo())
+ return MSI->getTemplateSpecializationKind();
+
+ return TSK_Undeclared;
+}
+
+SourceLocation VarDecl::getPointOfInstantiation() const {
+ if (const auto *Spec = dyn_cast<VarTemplateSpecializationDecl>(this))
+ return Spec->getPointOfInstantiation();
+
+ if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo())
+ return MSI->getPointOfInstantiation();
+
+ return SourceLocation();
+}
+
+VarTemplateDecl *VarDecl::getDescribedVarTemplate() const {
+ return getASTContext().getTemplateOrSpecializationInfo(this)
+ .dyn_cast<VarTemplateDecl *>();
+}
+
+void VarDecl::setDescribedVarTemplate(VarTemplateDecl *Template) {
+ getASTContext().setTemplateOrSpecializationInfo(this, Template);
+}
+
+MemberSpecializationInfo *VarDecl::getMemberSpecializationInfo() const {
+ if (isStaticDataMember())
+ // FIXME: Remove ?
+ // return getASTContext().getInstantiatedFromStaticDataMember(this);
+ return getASTContext().getTemplateOrSpecializationInfo(this)
+ .dyn_cast<MemberSpecializationInfo *>();
+ return nullptr;
+}
+
+void VarDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK,
+ SourceLocation PointOfInstantiation) {
+ assert((isa<VarTemplateSpecializationDecl>(this) ||
+ getMemberSpecializationInfo()) &&
+ "not a variable or static data member template specialization");
+
+ if (VarTemplateSpecializationDecl *Spec =
+ dyn_cast<VarTemplateSpecializationDecl>(this)) {
+ Spec->setSpecializationKind(TSK);
+ if (TSK != TSK_ExplicitSpecialization && PointOfInstantiation.isValid() &&
+ Spec->getPointOfInstantiation().isInvalid())
+ Spec->setPointOfInstantiation(PointOfInstantiation);
+ }
+
+ if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo()) {
+ MSI->setTemplateSpecializationKind(TSK);
+ if (TSK != TSK_ExplicitSpecialization && PointOfInstantiation.isValid() &&
+ MSI->getPointOfInstantiation().isInvalid())
+ MSI->setPointOfInstantiation(PointOfInstantiation);
+ }
+}
+
+void
+VarDecl::setInstantiationOfStaticDataMember(VarDecl *VD,
+ TemplateSpecializationKind TSK) {
+ assert(getASTContext().getTemplateOrSpecializationInfo(this).isNull() &&
+ "Previous template or instantiation?");
+ getASTContext().setInstantiatedFromStaticDataMember(this, VD, TSK);
+}
+
+//===----------------------------------------------------------------------===//
+// ParmVarDecl Implementation
+//===----------------------------------------------------------------------===//
+
+ParmVarDecl *ParmVarDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ QualType T, TypeSourceInfo *TInfo,
+ StorageClass S, Expr *DefArg) {
+ return new (C, DC) ParmVarDecl(ParmVar, C, DC, StartLoc, IdLoc, Id, T, TInfo,
+ S, DefArg);
+}
+
+QualType ParmVarDecl::getOriginalType() const {
+ TypeSourceInfo *TSI = getTypeSourceInfo();
+ QualType T = TSI ? TSI->getType() : getType();
+ if (const auto *DT = dyn_cast<DecayedType>(T))
+ return DT->getOriginalType();
+ return T;
+}
+
+ParmVarDecl *ParmVarDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID)
+ ParmVarDecl(ParmVar, C, nullptr, SourceLocation(), SourceLocation(),
+ nullptr, QualType(), nullptr, SC_None, nullptr);
+}
+
+SourceRange ParmVarDecl::getSourceRange() const {
+ if (!hasInheritedDefaultArg()) {
+ SourceRange ArgRange = getDefaultArgRange();
+ if (ArgRange.isValid())
+ return SourceRange(getOuterLocStart(), ArgRange.getEnd());
+ }
+
+ // DeclaratorDecl considers the range of postfix types as overlapping with the
+ // declaration name, but this is not the case with parameters in ObjC methods.
+ if (isa<ObjCMethodDecl>(getDeclContext()))
+ return SourceRange(DeclaratorDecl::getLocStart(), getLocation());
+
+ return DeclaratorDecl::getSourceRange();
+}
+
+Expr *ParmVarDecl::getDefaultArg() {
+ assert(!hasUnparsedDefaultArg() && "Default argument is not yet parsed!");
+ assert(!hasUninstantiatedDefaultArg() &&
+ "Default argument is not yet instantiated!");
+
+ Expr *Arg = getInit();
+ if (auto *E = dyn_cast_or_null<ExprWithCleanups>(Arg))
+ return E->getSubExpr();
+
+ return Arg;
+}
+
+void ParmVarDecl::setDefaultArg(Expr *defarg) {
+ ParmVarDeclBits.DefaultArgKind = DAK_Normal;
+ Init = defarg;
+}
+
+SourceRange ParmVarDecl::getDefaultArgRange() const {
+ switch (ParmVarDeclBits.DefaultArgKind) {
+ case DAK_None:
+ case DAK_Unparsed:
+ // Nothing we can do here.
+ return SourceRange();
+
+ case DAK_Uninstantiated:
+ return getUninstantiatedDefaultArg()->getSourceRange();
+
+ case DAK_Normal:
+ if (const Expr *E = getInit())
+ return E->getSourceRange();
+
+ // Missing an actual expression, may be invalid.
+ return SourceRange();
+ }
+ llvm_unreachable("Invalid default argument kind.");
+}
+
+void ParmVarDecl::setUninstantiatedDefaultArg(Expr *arg) {
+ ParmVarDeclBits.DefaultArgKind = DAK_Uninstantiated;
+ Init = arg;
+}
+
+Expr *ParmVarDecl::getUninstantiatedDefaultArg() {
+ assert(hasUninstantiatedDefaultArg() &&
+ "Wrong kind of initialization expression!");
+ return cast_or_null<Expr>(Init.get<Stmt *>());
+}
+
+bool ParmVarDecl::hasDefaultArg() const {
+ // FIXME: We should just return false for DAK_None here once callers are
+ // prepared for the case that we encountered an invalid default argument and
+ // were unable to even build an invalid expression.
+ return hasUnparsedDefaultArg() || hasUninstantiatedDefaultArg() ||
+ !Init.isNull();
+}
+
+bool ParmVarDecl::isParameterPack() const {
+ return isa<PackExpansionType>(getType());
+}
+
+void ParmVarDecl::setParameterIndexLarge(unsigned parameterIndex) {
+ getASTContext().setParameterIndex(this, parameterIndex);
+ ParmVarDeclBits.ParameterIndex = ParameterIndexSentinel;
+}
+
+unsigned ParmVarDecl::getParameterIndexLarge() const {
+ return getASTContext().getParameterIndex(this);
+}
+
+//===----------------------------------------------------------------------===//
+// FunctionDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void FunctionDecl::getNameForDiagnostic(
+ raw_ostream &OS, const PrintingPolicy &Policy, bool Qualified) const {
+ NamedDecl::getNameForDiagnostic(OS, Policy, Qualified);
+ const TemplateArgumentList *TemplateArgs = getTemplateSpecializationArgs();
+ if (TemplateArgs)
+ TemplateSpecializationType::PrintTemplateArgumentList(
+ OS, TemplateArgs->data(), TemplateArgs->size(), Policy);
+}
+
+bool FunctionDecl::isVariadic() const {
+ if (const auto *FT = getType()->getAs<FunctionProtoType>())
+ return FT->isVariadic();
+ return false;
+}
+
+bool FunctionDecl::hasBody(const FunctionDecl *&Definition) const {
+ for (auto I : redecls()) {
+ if (I->Body || I->IsLateTemplateParsed) {
+ Definition = I;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool FunctionDecl::hasTrivialBody() const
+{
+ Stmt *S = getBody();
+ if (!S) {
+ // Since we don't have a body for this function, we don't know if it's
+ // trivial or not.
+ return false;
+ }
+
+ if (isa<CompoundStmt>(S) && cast<CompoundStmt>(S)->body_empty())
+ return true;
+ return false;
+}
+
+bool FunctionDecl::isDefined(const FunctionDecl *&Definition) const {
+ for (auto I : redecls()) {
+ if (I->IsDeleted || I->IsDefaulted || I->Body || I->IsLateTemplateParsed ||
+ I->hasAttr<AliasAttr>()) {
+ Definition = I->IsDeleted ? I->getCanonicalDecl() : I;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+Stmt *FunctionDecl::getBody(const FunctionDecl *&Definition) const {
+ if (!hasBody(Definition))
+ return nullptr;
+
+ if (Definition->Body)
+ return Definition->Body.get(getASTContext().getExternalSource());
+
+ return nullptr;
+}
+
+void FunctionDecl::setBody(Stmt *B) {
+ Body = B;
+ if (B)
+ EndRangeLoc = B->getLocEnd();
+}
+
+void FunctionDecl::setPure(bool P) {
+ IsPure = P;
+ if (P)
+ if (auto *Parent = dyn_cast<CXXRecordDecl>(getDeclContext()))
+ Parent->markedVirtualFunctionPure();
+}
+
+template<std::size_t Len>
+static bool isNamed(const NamedDecl *ND, const char (&Str)[Len]) {
+ IdentifierInfo *II = ND->getIdentifier();
+ return II && II->isStr(Str);
+}
+
+bool FunctionDecl::isMain() const {
+ const TranslationUnitDecl *tunit =
+ dyn_cast<TranslationUnitDecl>(getDeclContext()->getRedeclContext());
+ return tunit &&
+ !tunit->getASTContext().getLangOpts().Freestanding &&
+ isNamed(this, "main");
+}
+
+bool FunctionDecl::isMSVCRTEntryPoint() const {
+ const TranslationUnitDecl *TUnit =
+ dyn_cast<TranslationUnitDecl>(getDeclContext()->getRedeclContext());
+ if (!TUnit)
+ return false;
+
+ // Even though we aren't really targeting MSVCRT if we are freestanding,
+ // semantic analysis for these functions remains the same.
+
+ // MSVCRT entry points only exist on MSVCRT targets.
+ if (!TUnit->getASTContext().getTargetInfo().getTriple().isOSMSVCRT())
+ return false;
+
+ // Nameless functions like constructors cannot be entry points.
+ if (!getIdentifier())
+ return false;
+
+ return llvm::StringSwitch<bool>(getName())
+ .Cases("main", // an ANSI console app
+ "wmain", // a Unicode console App
+ "WinMain", // an ANSI GUI app
+ "wWinMain", // a Unicode GUI app
+ "DllMain", // a DLL
+ true)
+ .Default(false);
+}
+
+bool FunctionDecl::isReservedGlobalPlacementOperator() const {
+ assert(getDeclName().getNameKind() == DeclarationName::CXXOperatorName);
+ assert(getDeclName().getCXXOverloadedOperator() == OO_New ||
+ getDeclName().getCXXOverloadedOperator() == OO_Delete ||
+ getDeclName().getCXXOverloadedOperator() == OO_Array_New ||
+ getDeclName().getCXXOverloadedOperator() == OO_Array_Delete);
+
+ if (!getDeclContext()->getRedeclContext()->isTranslationUnit())
+ return false;
+
+ const auto *proto = getType()->castAs<FunctionProtoType>();
+ if (proto->getNumParams() != 2 || proto->isVariadic())
+ return false;
+
+ ASTContext &Context =
+ cast<TranslationUnitDecl>(getDeclContext()->getRedeclContext())
+ ->getASTContext();
+
+ // The result type and first argument type are constant across all
+ // these operators. The second argument must be exactly void*.
+ return (proto->getParamType(1).getCanonicalType() == Context.VoidPtrTy);
+}
+
+bool FunctionDecl::isReplaceableGlobalAllocationFunction() const {
+ if (getDeclName().getNameKind() != DeclarationName::CXXOperatorName)
+ return false;
+ if (getDeclName().getCXXOverloadedOperator() != OO_New &&
+ getDeclName().getCXXOverloadedOperator() != OO_Delete &&
+ getDeclName().getCXXOverloadedOperator() != OO_Array_New &&
+ getDeclName().getCXXOverloadedOperator() != OO_Array_Delete)
+ return false;
+
+ if (isa<CXXRecordDecl>(getDeclContext()))
+ return false;
+
+ // This can only fail for an invalid 'operator new' declaration.
+ if (!getDeclContext()->getRedeclContext()->isTranslationUnit())
+ return false;
+
+ const auto *FPT = getType()->castAs<FunctionProtoType>();
+ if (FPT->getNumParams() == 0 || FPT->getNumParams() > 2 || FPT->isVariadic())
+ return false;
+
+ // If this is a single-parameter function, it must be a replaceable global
+ // allocation or deallocation function.
+ if (FPT->getNumParams() == 1)
+ return true;
+
+ // Otherwise, we're looking for a second parameter whose type is
+ // 'const std::nothrow_t &', or, in C++1y, 'std::size_t'.
+ QualType Ty = FPT->getParamType(1);
+ ASTContext &Ctx = getASTContext();
+ if (Ctx.getLangOpts().SizedDeallocation &&
+ Ctx.hasSameType(Ty, Ctx.getSizeType()))
+ return true;
+ if (!Ty->isReferenceType())
+ return false;
+ Ty = Ty->getPointeeType();
+ if (Ty.getCVRQualifiers() != Qualifiers::Const)
+ return false;
+ const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
+ return RD && isNamed(RD, "nothrow_t") && RD->isInStdNamespace();
+}
+
+LanguageLinkage FunctionDecl::getLanguageLinkage() const {
+ return getDeclLanguageLinkage(*this);
+}
+
+bool FunctionDecl::isExternC() const {
+ return isDeclExternC(*this);
+}
+
+bool FunctionDecl::isInExternCContext() const {
+ return getLexicalDeclContext()->isExternCContext();
+}
+
+bool FunctionDecl::isInExternCXXContext() const {
+ return getLexicalDeclContext()->isExternCXXContext();
+}
+
+bool FunctionDecl::isGlobal() const {
+ if (const auto *Method = dyn_cast<CXXMethodDecl>(this))
+ return Method->isStatic();
+
+ if (getCanonicalDecl()->getStorageClass() == SC_Static)
+ return false;
+
+ for (const DeclContext *DC = getDeclContext();
+ DC->isNamespace();
+ DC = DC->getParent()) {
+ if (const auto *Namespace = cast<NamespaceDecl>(DC)) {
+ if (!Namespace->getDeclName())
+ return false;
+ break;
+ }
+ }
+
+ return true;
+}
+
+bool FunctionDecl::isNoReturn() const {
+ return hasAttr<NoReturnAttr>() || hasAttr<CXX11NoReturnAttr>() ||
+ hasAttr<C11NoReturnAttr>() ||
+ getType()->getAs<FunctionType>()->getNoReturnAttr();
+}
+
+void
+FunctionDecl::setPreviousDeclaration(FunctionDecl *PrevDecl) {
+ redeclarable_base::setPreviousDecl(PrevDecl);
+
+ if (FunctionTemplateDecl *FunTmpl = getDescribedFunctionTemplate()) {
+ FunctionTemplateDecl *PrevFunTmpl
+ = PrevDecl? PrevDecl->getDescribedFunctionTemplate() : nullptr;
+ assert((!PrevDecl || PrevFunTmpl) && "Function/function template mismatch");
+ FunTmpl->setPreviousDecl(PrevFunTmpl);
+ }
+
+ if (PrevDecl && PrevDecl->IsInline)
+ IsInline = true;
+}
+
+FunctionDecl *FunctionDecl::getCanonicalDecl() { return getFirstDecl(); }
+
+/// \brief Returns a value indicating whether this function
+/// corresponds to a builtin function.
+///
+/// The function corresponds to a built-in function if it is
+/// declared at translation scope or within an extern "C" block and
+/// its name matches with the name of a builtin. The returned value
+/// will be 0 for functions that do not correspond to a builtin, a
+/// value of type \c Builtin::ID if in the target-independent range
+/// \c [1,Builtin::First), or a target-specific builtin value.
+unsigned FunctionDecl::getBuiltinID() const {
+ if (!getIdentifier())
+ return 0;
+
+ unsigned BuiltinID = getIdentifier()->getBuiltinID();
+ if (!BuiltinID)
+ return 0;
+
+ ASTContext &Context = getASTContext();
+ if (Context.getLangOpts().CPlusPlus) {
+ const auto *LinkageDecl =
+ dyn_cast<LinkageSpecDecl>(getFirstDecl()->getDeclContext());
+ // In C++, the first declaration of a builtin is always inside an implicit
+ // extern "C".
+ // FIXME: A recognised library function may not be directly in an extern "C"
+ // declaration, for instance "extern "C" { namespace std { decl } }".
+ if (!LinkageDecl) {
+ if (BuiltinID == Builtin::BI__GetExceptionInfo &&
+ Context.getTargetInfo().getCXXABI().isMicrosoft() &&
+ isInStdNamespace())
+ return Builtin::BI__GetExceptionInfo;
+ return 0;
+ }
+ if (LinkageDecl->getLanguage() != LinkageSpecDecl::lang_c)
+ return 0;
+ }
+
+ // If the function is marked "overloadable", it has a different mangled name
+ // and is not the C library function.
+ if (hasAttr<OverloadableAttr>())
+ return 0;
+
+ if (!Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))
+ return BuiltinID;
+
+ // This function has the name of a known C library
+ // function. Determine whether it actually refers to the C library
+ // function or whether it just has the same name.
+
+ // If this is a static function, it's not a builtin.
+ if (getStorageClass() == SC_Static)
+ return 0;
+
+ return BuiltinID;
+}
+
+
+/// getNumParams - Return the number of parameters this function must have
+/// based on its FunctionType. This is the length of the ParamInfo array
+/// after it has been created.
+unsigned FunctionDecl::getNumParams() const {
+ const auto *FPT = getType()->getAs<FunctionProtoType>();
+ return FPT ? FPT->getNumParams() : 0;
+}
+
+void FunctionDecl::setParams(ASTContext &C,
+ ArrayRef<ParmVarDecl *> NewParamInfo) {
+ assert(!ParamInfo && "Already has param info!");
+ assert(NewParamInfo.size() == getNumParams() && "Parameter count mismatch!");
+
+ // Zero params -> null pointer.
+ if (!NewParamInfo.empty()) {
+ ParamInfo = new (C) ParmVarDecl*[NewParamInfo.size()];
+ std::copy(NewParamInfo.begin(), NewParamInfo.end(), ParamInfo);
+ }
+}
+
+void FunctionDecl::setDeclsInPrototypeScope(ArrayRef<NamedDecl *> NewDecls) {
+ assert(DeclsInPrototypeScope.empty() && "Already has prototype decls!");
+
+ if (!NewDecls.empty()) {
+ NamedDecl **A = new (getASTContext()) NamedDecl*[NewDecls.size()];
+ std::copy(NewDecls.begin(), NewDecls.end(), A);
+ DeclsInPrototypeScope = llvm::makeArrayRef(A, NewDecls.size());
+ // Move declarations introduced in prototype to the function context.
+ for (auto I : NewDecls) {
+ DeclContext *DC = I->getDeclContext();
+ // Forward-declared reference to an enumeration is not added to
+ // declaration scope, so skip declaration that is absent from its
+ // declaration contexts.
+ if (DC->containsDecl(I)) {
+ DC->removeDecl(I);
+ I->setDeclContext(this);
+ addDecl(I);
+ }
+ }
+ }
+}
+
+/// getMinRequiredArguments - Returns the minimum number of arguments
+/// needed to call this function. This may be fewer than the number of
+/// function parameters, if some of the parameters have default
+/// arguments (in C++) or are parameter packs (C++11).
+unsigned FunctionDecl::getMinRequiredArguments() const {
+ if (!getASTContext().getLangOpts().CPlusPlus)
+ return getNumParams();
+
+ unsigned NumRequiredArgs = 0;
+ for (auto *Param : params())
+ if (!Param->isParameterPack() && !Param->hasDefaultArg())
+ ++NumRequiredArgs;
+ return NumRequiredArgs;
+}
+
+/// \brief The combination of the extern and inline keywords under MSVC forces
+/// the function to be required.
+///
+/// Note: This function assumes that we will only get called when isInlined()
+/// would return true for this FunctionDecl.
+bool FunctionDecl::isMSExternInline() const {
+ assert(isInlined() && "expected to get called on an inlined function!");
+
+ const ASTContext &Context = getASTContext();
+ if (!Context.getTargetInfo().getCXXABI().isMicrosoft() &&
+ !hasAttr<DLLExportAttr>())
+ return false;
+
+ for (const FunctionDecl *FD = getMostRecentDecl(); FD;
+ FD = FD->getPreviousDecl())
+ if (!FD->isImplicit() && FD->getStorageClass() == SC_Extern)
+ return true;
+
+ return false;
+}
+
+static bool redeclForcesDefMSVC(const FunctionDecl *Redecl) {
+ if (Redecl->getStorageClass() != SC_Extern)
+ return false;
+
+ for (const FunctionDecl *FD = Redecl->getPreviousDecl(); FD;
+ FD = FD->getPreviousDecl())
+ if (!FD->isImplicit() && FD->getStorageClass() == SC_Extern)
+ return false;
+
+ return true;
+}
+
+static bool RedeclForcesDefC99(const FunctionDecl *Redecl) {
+ // Only consider file-scope declarations in this test.
+ if (!Redecl->getLexicalDeclContext()->isTranslationUnit())
+ return false;
+
+ // Only consider explicit declarations; the presence of a builtin for a
+ // libcall shouldn't affect whether a definition is externally visible.
+ if (Redecl->isImplicit())
+ return false;
+
+ if (!Redecl->isInlineSpecified() || Redecl->getStorageClass() == SC_Extern)
+ return true; // Not an inline definition
+
+ return false;
+}
+
+/// \brief For a function declaration in C or C++, determine whether this
+/// declaration causes the definition to be externally visible.
+///
+/// For instance, this determines if adding the current declaration to the set
+/// of redeclarations of the given functions causes
+/// isInlineDefinitionExternallyVisible to change from false to true.
+bool FunctionDecl::doesDeclarationForceExternallyVisibleDefinition() const {
+ assert(!doesThisDeclarationHaveABody() &&
+ "Must have a declaration without a body.");
+
+ ASTContext &Context = getASTContext();
+
+ if (Context.getLangOpts().MSVCCompat) {
+ const FunctionDecl *Definition;
+ if (hasBody(Definition) && Definition->isInlined() &&
+ redeclForcesDefMSVC(this))
+ return true;
+ }
+
+ if (Context.getLangOpts().GNUInline || hasAttr<GNUInlineAttr>()) {
+ // With GNU inlining, a declaration with 'inline' but not 'extern', forces
+ // an externally visible definition.
+ //
+ // FIXME: What happens if gnu_inline gets added on after the first
+ // declaration?
+ if (!isInlineSpecified() || getStorageClass() == SC_Extern)
+ return false;
+
+ const FunctionDecl *Prev = this;
+ bool FoundBody = false;
+ while ((Prev = Prev->getPreviousDecl())) {
+ FoundBody |= Prev->Body.isValid();
+
+ if (Prev->Body) {
+ // If it's not the case that both 'inline' and 'extern' are
+ // specified on the definition, then it is always externally visible.
+ if (!Prev->isInlineSpecified() ||
+ Prev->getStorageClass() != SC_Extern)
+ return false;
+ } else if (Prev->isInlineSpecified() &&
+ Prev->getStorageClass() != SC_Extern) {
+ return false;
+ }
+ }
+ return FoundBody;
+ }
+
+ if (Context.getLangOpts().CPlusPlus)
+ return false;
+
+ // C99 6.7.4p6:
+ // [...] If all of the file scope declarations for a function in a
+ // translation unit include the inline function specifier without extern,
+ // then the definition in that translation unit is an inline definition.
+ if (isInlineSpecified() && getStorageClass() != SC_Extern)
+ return false;
+ const FunctionDecl *Prev = this;
+ bool FoundBody = false;
+ while ((Prev = Prev->getPreviousDecl())) {
+ FoundBody |= Prev->Body.isValid();
+ if (RedeclForcesDefC99(Prev))
+ return false;
+ }
+ return FoundBody;
+}
+
+SourceRange FunctionDecl::getReturnTypeSourceRange() const {
+ const TypeSourceInfo *TSI = getTypeSourceInfo();
+ if (!TSI)
+ return SourceRange();
+ FunctionTypeLoc FTL =
+ TSI->getTypeLoc().IgnoreParens().getAs<FunctionTypeLoc>();
+ if (!FTL)
+ return SourceRange();
+
+ // Skip self-referential return types.
+ const SourceManager &SM = getASTContext().getSourceManager();
+ SourceRange RTRange = FTL.getReturnLoc().getSourceRange();
+ SourceLocation Boundary = getNameInfo().getLocStart();
+ if (RTRange.isInvalid() || Boundary.isInvalid() ||
+ !SM.isBeforeInTranslationUnit(RTRange.getEnd(), Boundary))
+ return SourceRange();
+
+ return RTRange;
+}
+
+bool FunctionDecl::hasUnusedResultAttr() const {
+ QualType RetType = getReturnType();
+ if (RetType->isRecordType()) {
+ const CXXRecordDecl *Ret = RetType->getAsCXXRecordDecl();
+ const auto *MD = dyn_cast<CXXMethodDecl>(this);
+ if (Ret && Ret->hasAttr<WarnUnusedResultAttr>() &&
+ !(MD && MD->getCorrespondingMethodInClass(Ret, true)))
+ return true;
+ }
+ return hasAttr<WarnUnusedResultAttr>();
+}
+
+/// \brief For an inline function definition in C, or for a gnu_inline function
+/// in C++, determine whether the definition will be externally visible.
+///
+/// Inline function definitions are always available for inlining optimizations.
+/// However, depending on the language dialect, declaration specifiers, and
+/// attributes, the definition of an inline function may or may not be
+/// "externally" visible to other translation units in the program.
+///
+/// In C99, inline definitions are not externally visible by default. However,
+/// if even one of the global-scope declarations is marked "extern inline", the
+/// inline definition becomes externally visible (C99 6.7.4p6).
+///
+/// In GNU89 mode, or if the gnu_inline attribute is attached to the function
+/// definition, we use the GNU semantics for inline, which are nearly the
+/// opposite of C99 semantics. In particular, "inline" by itself will create
+/// an externally visible symbol, but "extern inline" will not create an
+/// externally visible symbol.
+bool FunctionDecl::isInlineDefinitionExternallyVisible() const {
+ assert(doesThisDeclarationHaveABody() && "Must have the function definition");
+ assert(isInlined() && "Function must be inline");
+ ASTContext &Context = getASTContext();
+
+ if (Context.getLangOpts().GNUInline || hasAttr<GNUInlineAttr>()) {
+ // Note: If you change the logic here, please change
+ // doesDeclarationForceExternallyVisibleDefinition as well.
+ //
+ // If it's not the case that both 'inline' and 'extern' are
+ // specified on the definition, then this inline definition is
+ // externally visible.
+ if (!(isInlineSpecified() && getStorageClass() == SC_Extern))
+ return true;
+
+ // If any declaration is 'inline' but not 'extern', then this definition
+ // is externally visible.
+ for (auto Redecl : redecls()) {
+ if (Redecl->isInlineSpecified() &&
+ Redecl->getStorageClass() != SC_Extern)
+ return true;
+ }
+
+ return false;
+ }
+
+ // The rest of this function is C-only.
+ assert(!Context.getLangOpts().CPlusPlus &&
+ "should not use C inline rules in C++");
+
+ // C99 6.7.4p6:
+ // [...] If all of the file scope declarations for a function in a
+ // translation unit include the inline function specifier without extern,
+ // then the definition in that translation unit is an inline definition.
+ for (auto Redecl : redecls()) {
+ if (RedeclForcesDefC99(Redecl))
+ return true;
+ }
+
+ // C99 6.7.4p6:
+ // An inline definition does not provide an external definition for the
+ // function, and does not forbid an external definition in another
+ // translation unit.
+ return false;
+}
+
+/// getOverloadedOperator - Which C++ overloaded operator this
+/// function represents, if any.
+OverloadedOperatorKind FunctionDecl::getOverloadedOperator() const {
+ if (getDeclName().getNameKind() == DeclarationName::CXXOperatorName)
+ return getDeclName().getCXXOverloadedOperator();
+ else
+ return OO_None;
+}
+
+/// getLiteralIdentifier - The literal suffix identifier this function
+/// represents, if any.
+const IdentifierInfo *FunctionDecl::getLiteralIdentifier() const {
+ if (getDeclName().getNameKind() == DeclarationName::CXXLiteralOperatorName)
+ return getDeclName().getCXXLiteralIdentifier();
+ else
+ return nullptr;
+}
+
+FunctionDecl::TemplatedKind FunctionDecl::getTemplatedKind() const {
+ if (TemplateOrSpecialization.isNull())
+ return TK_NonTemplate;
+ if (TemplateOrSpecialization.is<FunctionTemplateDecl *>())
+ return TK_FunctionTemplate;
+ if (TemplateOrSpecialization.is<MemberSpecializationInfo *>())
+ return TK_MemberSpecialization;
+ if (TemplateOrSpecialization.is<FunctionTemplateSpecializationInfo *>())
+ return TK_FunctionTemplateSpecialization;
+ if (TemplateOrSpecialization.is
+ <DependentFunctionTemplateSpecializationInfo*>())
+ return TK_DependentFunctionTemplateSpecialization;
+
+ llvm_unreachable("Did we miss a TemplateOrSpecialization type?");
+}
+
+FunctionDecl *FunctionDecl::getInstantiatedFromMemberFunction() const {
+ if (MemberSpecializationInfo *Info = getMemberSpecializationInfo())
+ return cast<FunctionDecl>(Info->getInstantiatedFrom());
+
+ return nullptr;
+}
+
+MemberSpecializationInfo *FunctionDecl::getMemberSpecializationInfo() const {
+ return TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo *>();
+}
+
+void
+FunctionDecl::setInstantiationOfMemberFunction(ASTContext &C,
+ FunctionDecl *FD,
+ TemplateSpecializationKind TSK) {
+ assert(TemplateOrSpecialization.isNull() &&
+ "Member function is already a specialization");
+ MemberSpecializationInfo *Info
+ = new (C) MemberSpecializationInfo(FD, TSK);
+ TemplateOrSpecialization = Info;
+}
+
+FunctionTemplateDecl *FunctionDecl::getDescribedFunctionTemplate() const {
+ return TemplateOrSpecialization.dyn_cast<FunctionTemplateDecl *>();
+}
+
+void FunctionDecl::setDescribedFunctionTemplate(FunctionTemplateDecl *Template) {
+ TemplateOrSpecialization = Template;
+}
+
+bool FunctionDecl::isImplicitlyInstantiable() const {
+ // If the function is invalid, it can't be implicitly instantiated.
+ if (isInvalidDecl())
+ return false;
+
+ switch (getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitInstantiationDefinition:
+ return false;
+
+ case TSK_ImplicitInstantiation:
+ return true;
+
+ // It is possible to instantiate TSK_ExplicitSpecialization kind
+ // if the FunctionDecl has a class scope specialization pattern.
+ case TSK_ExplicitSpecialization:
+ return getClassScopeSpecializationPattern() != nullptr;
+
+ case TSK_ExplicitInstantiationDeclaration:
+ // Handled below.
+ break;
+ }
+
+ // Find the actual template from which we will instantiate.
+ const FunctionDecl *PatternDecl = getTemplateInstantiationPattern();
+ bool HasPattern = false;
+ if (PatternDecl)
+ HasPattern = PatternDecl->hasBody(PatternDecl);
+
+ // C++0x [temp.explicit]p9:
+ // Except for inline functions, other explicit instantiation declarations
+ // have the effect of suppressing the implicit instantiation of the entity
+ // to which they refer.
+ if (!HasPattern || !PatternDecl)
+ return true;
+
+ return PatternDecl->isInlined();
+}
+
+bool FunctionDecl::isTemplateInstantiation() const {
+ switch (getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ return false;
+ case TSK_ImplicitInstantiation:
+ case TSK_ExplicitInstantiationDeclaration:
+ case TSK_ExplicitInstantiationDefinition:
+ return true;
+ }
+ llvm_unreachable("All TSK values handled.");
+}
+
+FunctionDecl *FunctionDecl::getTemplateInstantiationPattern() const {
+ // Handle class scope explicit specialization special case.
+ if (getTemplateSpecializationKind() == TSK_ExplicitSpecialization)
+ return getClassScopeSpecializationPattern();
+
+ // If this is a generic lambda call operator specialization, its
+ // instantiation pattern is always its primary template's pattern
+ // even if its primary template was instantiated from another
+ // member template (which happens with nested generic lambdas).
+ // Since a lambda's call operator's body is transformed eagerly,
+ // we don't have to go hunting for a prototype definition template
+ // (i.e. instantiated-from-member-template) to use as an instantiation
+ // pattern.
+
+ if (isGenericLambdaCallOperatorSpecialization(
+ dyn_cast<CXXMethodDecl>(this))) {
+ assert(getPrimaryTemplate() && "A generic lambda specialization must be "
+ "generated from a primary call operator "
+ "template");
+ assert(getPrimaryTemplate()->getTemplatedDecl()->getBody() &&
+ "A generic lambda call operator template must always have a body - "
+ "even if instantiated from a prototype (i.e. as written) member "
+ "template");
+ return getPrimaryTemplate()->getTemplatedDecl();
+ }
+
+ if (FunctionTemplateDecl *Primary = getPrimaryTemplate()) {
+ while (Primary->getInstantiatedFromMemberTemplate()) {
+ // If we have hit a point where the user provided a specialization of
+ // this template, we're done looking.
+ if (Primary->isMemberSpecialization())
+ break;
+ Primary = Primary->getInstantiatedFromMemberTemplate();
+ }
+
+ return Primary->getTemplatedDecl();
+ }
+
+ return getInstantiatedFromMemberFunction();
+}
+
+FunctionTemplateDecl *FunctionDecl::getPrimaryTemplate() const {
+ if (FunctionTemplateSpecializationInfo *Info
+ = TemplateOrSpecialization
+ .dyn_cast<FunctionTemplateSpecializationInfo*>()) {
+ return Info->Template.getPointer();
+ }
+ return nullptr;
+}
+
+FunctionDecl *FunctionDecl::getClassScopeSpecializationPattern() const {
+ return getASTContext().getClassScopeSpecializationPattern(this);
+}
+
+FunctionTemplateSpecializationInfo *
+FunctionDecl::getTemplateSpecializationInfo() const {
+ return TemplateOrSpecialization
+ .dyn_cast<FunctionTemplateSpecializationInfo *>();
+}
+
+const TemplateArgumentList *
+FunctionDecl::getTemplateSpecializationArgs() const {
+ if (FunctionTemplateSpecializationInfo *Info
+ = TemplateOrSpecialization
+ .dyn_cast<FunctionTemplateSpecializationInfo*>()) {
+ return Info->TemplateArguments;
+ }
+ return nullptr;
+}
+
+const ASTTemplateArgumentListInfo *
+FunctionDecl::getTemplateSpecializationArgsAsWritten() const {
+ if (FunctionTemplateSpecializationInfo *Info
+ = TemplateOrSpecialization
+ .dyn_cast<FunctionTemplateSpecializationInfo*>()) {
+ return Info->TemplateArgumentsAsWritten;
+ }
+ return nullptr;
+}
+
+void
+FunctionDecl::setFunctionTemplateSpecialization(ASTContext &C,
+ FunctionTemplateDecl *Template,
+ const TemplateArgumentList *TemplateArgs,
+ void *InsertPos,
+ TemplateSpecializationKind TSK,
+ const TemplateArgumentListInfo *TemplateArgsAsWritten,
+ SourceLocation PointOfInstantiation) {
+ assert(TSK != TSK_Undeclared &&
+ "Must specify the type of function template specialization");
+ FunctionTemplateSpecializationInfo *Info
+ = TemplateOrSpecialization.dyn_cast<FunctionTemplateSpecializationInfo*>();
+ if (!Info)
+ Info = FunctionTemplateSpecializationInfo::Create(C, this, Template, TSK,
+ TemplateArgs,
+ TemplateArgsAsWritten,
+ PointOfInstantiation);
+ TemplateOrSpecialization = Info;
+ Template->addSpecialization(Info, InsertPos);
+}
+
+void
+FunctionDecl::setDependentTemplateSpecialization(ASTContext &Context,
+ const UnresolvedSetImpl &Templates,
+ const TemplateArgumentListInfo &TemplateArgs) {
+ assert(TemplateOrSpecialization.isNull());
+ DependentFunctionTemplateSpecializationInfo *Info =
+ DependentFunctionTemplateSpecializationInfo::Create(Context, Templates,
+ TemplateArgs);
+ TemplateOrSpecialization = Info;
+}
+
+DependentFunctionTemplateSpecializationInfo *
+FunctionDecl::getDependentSpecializationInfo() const {
+ return TemplateOrSpecialization
+ .dyn_cast<DependentFunctionTemplateSpecializationInfo *>();
+}
+
+DependentFunctionTemplateSpecializationInfo *
+DependentFunctionTemplateSpecializationInfo::Create(
+ ASTContext &Context, const UnresolvedSetImpl &Ts,
+ const TemplateArgumentListInfo &TArgs) {
+ void *Buffer = Context.Allocate(
+ totalSizeToAlloc<TemplateArgumentLoc, FunctionTemplateDecl *>(
+ TArgs.size(), Ts.size()));
+ return new (Buffer) DependentFunctionTemplateSpecializationInfo(Ts, TArgs);
+}
+
+DependentFunctionTemplateSpecializationInfo::
+DependentFunctionTemplateSpecializationInfo(const UnresolvedSetImpl &Ts,
+ const TemplateArgumentListInfo &TArgs)
+ : AngleLocs(TArgs.getLAngleLoc(), TArgs.getRAngleLoc()) {
+
+ NumTemplates = Ts.size();
+ NumArgs = TArgs.size();
+
+ FunctionTemplateDecl **TsArray = getTrailingObjects<FunctionTemplateDecl *>();
+ for (unsigned I = 0, E = Ts.size(); I != E; ++I)
+ TsArray[I] = cast<FunctionTemplateDecl>(Ts[I]->getUnderlyingDecl());
+
+ TemplateArgumentLoc *ArgsArray = getTrailingObjects<TemplateArgumentLoc>();
+ for (unsigned I = 0, E = TArgs.size(); I != E; ++I)
+ new (&ArgsArray[I]) TemplateArgumentLoc(TArgs[I]);
+}
+
+TemplateSpecializationKind FunctionDecl::getTemplateSpecializationKind() const {
+ // For a function template specialization, query the specialization
+ // information object.
+ FunctionTemplateSpecializationInfo *FTSInfo
+ = TemplateOrSpecialization.dyn_cast<FunctionTemplateSpecializationInfo*>();
+ if (FTSInfo)
+ return FTSInfo->getTemplateSpecializationKind();
+
+ MemberSpecializationInfo *MSInfo
+ = TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo*>();
+ if (MSInfo)
+ return MSInfo->getTemplateSpecializationKind();
+
+ return TSK_Undeclared;
+}
+
+void
+FunctionDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK,
+ SourceLocation PointOfInstantiation) {
+ if (FunctionTemplateSpecializationInfo *FTSInfo
+ = TemplateOrSpecialization.dyn_cast<
+ FunctionTemplateSpecializationInfo*>()) {
+ FTSInfo->setTemplateSpecializationKind(TSK);
+ if (TSK != TSK_ExplicitSpecialization &&
+ PointOfInstantiation.isValid() &&
+ FTSInfo->getPointOfInstantiation().isInvalid())
+ FTSInfo->setPointOfInstantiation(PointOfInstantiation);
+ } else if (MemberSpecializationInfo *MSInfo
+ = TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo*>()) {
+ MSInfo->setTemplateSpecializationKind(TSK);
+ if (TSK != TSK_ExplicitSpecialization &&
+ PointOfInstantiation.isValid() &&
+ MSInfo->getPointOfInstantiation().isInvalid())
+ MSInfo->setPointOfInstantiation(PointOfInstantiation);
+ } else
+ llvm_unreachable("Function cannot have a template specialization kind");
+}
+
+SourceLocation FunctionDecl::getPointOfInstantiation() const {
+ if (FunctionTemplateSpecializationInfo *FTSInfo
+ = TemplateOrSpecialization.dyn_cast<
+ FunctionTemplateSpecializationInfo*>())
+ return FTSInfo->getPointOfInstantiation();
+ else if (MemberSpecializationInfo *MSInfo
+ = TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo*>())
+ return MSInfo->getPointOfInstantiation();
+
+ return SourceLocation();
+}
+
+bool FunctionDecl::isOutOfLine() const {
+ if (Decl::isOutOfLine())
+ return true;
+
+ // If this function was instantiated from a member function of a
+ // class template, check whether that member function was defined out-of-line.
+ if (FunctionDecl *FD = getInstantiatedFromMemberFunction()) {
+ const FunctionDecl *Definition;
+ if (FD->hasBody(Definition))
+ return Definition->isOutOfLine();
+ }
+
+ // If this function was instantiated from a function template,
+ // check whether that function template was defined out-of-line.
+ if (FunctionTemplateDecl *FunTmpl = getPrimaryTemplate()) {
+ const FunctionDecl *Definition;
+ if (FunTmpl->getTemplatedDecl()->hasBody(Definition))
+ return Definition->isOutOfLine();
+ }
+
+ return false;
+}
+
+SourceRange FunctionDecl::getSourceRange() const {
+ return SourceRange(getOuterLocStart(), EndRangeLoc);
+}
+
+unsigned FunctionDecl::getMemoryFunctionKind() const {
+ IdentifierInfo *FnInfo = getIdentifier();
+
+ if (!FnInfo)
+ return 0;
+
+ // Builtin handling.
+ switch (getBuiltinID()) {
+ case Builtin::BI__builtin_memset:
+ case Builtin::BI__builtin___memset_chk:
+ case Builtin::BImemset:
+ return Builtin::BImemset;
+
+ case Builtin::BI__builtin_memcpy:
+ case Builtin::BI__builtin___memcpy_chk:
+ case Builtin::BImemcpy:
+ return Builtin::BImemcpy;
+
+ case Builtin::BI__builtin_memmove:
+ case Builtin::BI__builtin___memmove_chk:
+ case Builtin::BImemmove:
+ return Builtin::BImemmove;
+
+ case Builtin::BIstrlcpy:
+ case Builtin::BI__builtin___strlcpy_chk:
+ return Builtin::BIstrlcpy;
+
+ case Builtin::BIstrlcat:
+ case Builtin::BI__builtin___strlcat_chk:
+ return Builtin::BIstrlcat;
+
+ case Builtin::BI__builtin_memcmp:
+ case Builtin::BImemcmp:
+ return Builtin::BImemcmp;
+
+ case Builtin::BI__builtin_strncpy:
+ case Builtin::BI__builtin___strncpy_chk:
+ case Builtin::BIstrncpy:
+ return Builtin::BIstrncpy;
+
+ case Builtin::BI__builtin_strncmp:
+ case Builtin::BIstrncmp:
+ return Builtin::BIstrncmp;
+
+ case Builtin::BI__builtin_strncasecmp:
+ case Builtin::BIstrncasecmp:
+ return Builtin::BIstrncasecmp;
+
+ case Builtin::BI__builtin_strncat:
+ case Builtin::BI__builtin___strncat_chk:
+ case Builtin::BIstrncat:
+ return Builtin::BIstrncat;
+
+ case Builtin::BI__builtin_strndup:
+ case Builtin::BIstrndup:
+ return Builtin::BIstrndup;
+
+ case Builtin::BI__builtin_strlen:
+ case Builtin::BIstrlen:
+ return Builtin::BIstrlen;
+
+ default:
+ if (isExternC()) {
+ if (FnInfo->isStr("memset"))
+ return Builtin::BImemset;
+ else if (FnInfo->isStr("memcpy"))
+ return Builtin::BImemcpy;
+ else if (FnInfo->isStr("memmove"))
+ return Builtin::BImemmove;
+ else if (FnInfo->isStr("memcmp"))
+ return Builtin::BImemcmp;
+ else if (FnInfo->isStr("strncpy"))
+ return Builtin::BIstrncpy;
+ else if (FnInfo->isStr("strncmp"))
+ return Builtin::BIstrncmp;
+ else if (FnInfo->isStr("strncasecmp"))
+ return Builtin::BIstrncasecmp;
+ else if (FnInfo->isStr("strncat"))
+ return Builtin::BIstrncat;
+ else if (FnInfo->isStr("strndup"))
+ return Builtin::BIstrndup;
+ else if (FnInfo->isStr("strlen"))
+ return Builtin::BIstrlen;
+ }
+ break;
+ }
+ return 0;
+}
+
+//===----------------------------------------------------------------------===//
+// FieldDecl Implementation
+//===----------------------------------------------------------------------===//
+
+FieldDecl *FieldDecl::Create(const ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, QualType T,
+ TypeSourceInfo *TInfo, Expr *BW, bool Mutable,
+ InClassInitStyle InitStyle) {
+ return new (C, DC) FieldDecl(Decl::Field, DC, StartLoc, IdLoc, Id, T, TInfo,
+ BW, Mutable, InitStyle);
+}
+
+FieldDecl *FieldDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) FieldDecl(Field, nullptr, SourceLocation(),
+ SourceLocation(), nullptr, QualType(), nullptr,
+ nullptr, false, ICIS_NoInit);
+}
+
+bool FieldDecl::isAnonymousStructOrUnion() const {
+ if (!isImplicit() || getDeclName())
+ return false;
+
+ if (const auto *Record = getType()->getAs<RecordType>())
+ return Record->getDecl()->isAnonymousStructOrUnion();
+
+ return false;
+}
+
+unsigned FieldDecl::getBitWidthValue(const ASTContext &Ctx) const {
+ assert(isBitField() && "not a bitfield");
+ auto *BitWidth = static_cast<Expr *>(InitStorage.getPointer());
+ return BitWidth->EvaluateKnownConstInt(Ctx).getZExtValue();
+}
+
+unsigned FieldDecl::getFieldIndex() const {
+ const FieldDecl *Canonical = getCanonicalDecl();
+ if (Canonical != this)
+ return Canonical->getFieldIndex();
+
+ if (CachedFieldIndex) return CachedFieldIndex - 1;
+
+ unsigned Index = 0;
+ const RecordDecl *RD = getParent();
+
+ for (auto *Field : RD->fields()) {
+ Field->getCanonicalDecl()->CachedFieldIndex = Index + 1;
+ ++Index;
+ }
+
+ assert(CachedFieldIndex && "failed to find field in parent");
+ return CachedFieldIndex - 1;
+}
+
+SourceRange FieldDecl::getSourceRange() const {
+ switch (InitStorage.getInt()) {
+ // All three of these cases store an optional Expr*.
+ case ISK_BitWidthOrNothing:
+ case ISK_InClassCopyInit:
+ case ISK_InClassListInit:
+ if (const auto *E = static_cast<const Expr *>(InitStorage.getPointer()))
+ return SourceRange(getInnerLocStart(), E->getLocEnd());
+ // FALLTHROUGH
+
+ case ISK_CapturedVLAType:
+ return DeclaratorDecl::getSourceRange();
+ }
+ llvm_unreachable("bad init storage kind");
+}
+
+void FieldDecl::setCapturedVLAType(const VariableArrayType *VLAType) {
+ assert((getParent()->isLambda() || getParent()->isCapturedRecord()) &&
+ "capturing type in non-lambda or captured record.");
+ assert(InitStorage.getInt() == ISK_BitWidthOrNothing &&
+ InitStorage.getPointer() == nullptr &&
+ "bit width, initializer or captured type already set");
+ InitStorage.setPointerAndInt(const_cast<VariableArrayType *>(VLAType),
+ ISK_CapturedVLAType);
+}
+
+//===----------------------------------------------------------------------===//
+// TagDecl Implementation
+//===----------------------------------------------------------------------===//
+
+SourceLocation TagDecl::getOuterLocStart() const {
+ return getTemplateOrInnerLocStart(this);
+}
+
+SourceRange TagDecl::getSourceRange() const {
+ SourceLocation E = RBraceLoc.isValid() ? RBraceLoc : getLocation();
+ return SourceRange(getOuterLocStart(), E);
+}
+
+TagDecl *TagDecl::getCanonicalDecl() { return getFirstDecl(); }
+
+void TagDecl::setTypedefNameForAnonDecl(TypedefNameDecl *TDD) {
+ TypedefNameDeclOrQualifier = TDD;
+ if (const Type *T = getTypeForDecl()) {
+ (void)T;
+ assert(T->isLinkageValid());
+ }
+ assert(isLinkageValid());
+}
+
+void TagDecl::startDefinition() {
+ IsBeingDefined = true;
+
+ if (auto *D = dyn_cast<CXXRecordDecl>(this)) {
+ struct CXXRecordDecl::DefinitionData *Data =
+ new (getASTContext()) struct CXXRecordDecl::DefinitionData(D);
+ for (auto I : redecls())
+ cast<CXXRecordDecl>(I)->DefinitionData = Data;
+ }
+}
+
+void TagDecl::completeDefinition() {
+ assert((!isa<CXXRecordDecl>(this) ||
+ cast<CXXRecordDecl>(this)->hasDefinition()) &&
+ "definition completed but not started");
+
+ IsCompleteDefinition = true;
+ IsBeingDefined = false;
+
+ if (ASTMutationListener *L = getASTMutationListener())
+ L->CompletedTagDefinition(this);
+}
+
+TagDecl *TagDecl::getDefinition() const {
+ if (isCompleteDefinition())
+ return const_cast<TagDecl *>(this);
+
+ // If it's possible for us to have an out-of-date definition, check now.
+ if (MayHaveOutOfDateDef) {
+ if (IdentifierInfo *II = getIdentifier()) {
+ if (II->isOutOfDate()) {
+ updateOutOfDate(*II);
+ }
+ }
+ }
+
+ if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(this))
+ return CXXRD->getDefinition();
+
+ for (auto R : redecls())
+ if (R->isCompleteDefinition())
+ return R;
+
+ return nullptr;
+}
+
+void TagDecl::setQualifierInfo(NestedNameSpecifierLoc QualifierLoc) {
+ if (QualifierLoc) {
+ // Make sure the extended qualifier info is allocated.
+ if (!hasExtInfo())
+ TypedefNameDeclOrQualifier = new (getASTContext()) ExtInfo;
+ // Set qualifier info.
+ getExtInfo()->QualifierLoc = QualifierLoc;
+ } else {
+ // Here Qualifier == 0, i.e., we are removing the qualifier (if any).
+ if (hasExtInfo()) {
+ if (getExtInfo()->NumTemplParamLists == 0) {
+ getASTContext().Deallocate(getExtInfo());
+ TypedefNameDeclOrQualifier = (TypedefNameDecl *)nullptr;
+ }
+ else
+ getExtInfo()->QualifierLoc = QualifierLoc;
+ }
+ }
+}
+
+void TagDecl::setTemplateParameterListsInfo(
+ ASTContext &Context, ArrayRef<TemplateParameterList *> TPLists) {
+ assert(!TPLists.empty());
+ // Make sure the extended decl info is allocated.
+ if (!hasExtInfo())
+ // Allocate external info struct.
+ TypedefNameDeclOrQualifier = new (getASTContext()) ExtInfo;
+ // Set the template parameter lists info.
+ getExtInfo()->setTemplateParameterListsInfo(Context, TPLists);
+}
+
+//===----------------------------------------------------------------------===//
+// EnumDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void EnumDecl::anchor() { }
+
+EnumDecl *EnumDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id,
+ EnumDecl *PrevDecl, bool IsScoped,
+ bool IsScopedUsingClassTag, bool IsFixed) {
+ auto *Enum = new (C, DC) EnumDecl(C, DC, StartLoc, IdLoc, Id, PrevDecl,
+ IsScoped, IsScopedUsingClassTag, IsFixed);
+ Enum->MayHaveOutOfDateDef = C.getLangOpts().Modules;
+ C.getTypeDeclType(Enum, PrevDecl);
+ return Enum;
+}
+
+EnumDecl *EnumDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ EnumDecl *Enum =
+ new (C, ID) EnumDecl(C, nullptr, SourceLocation(), SourceLocation(),
+ nullptr, nullptr, false, false, false);
+ Enum->MayHaveOutOfDateDef = C.getLangOpts().Modules;
+ return Enum;
+}
+
+SourceRange EnumDecl::getIntegerTypeRange() const {
+ if (const TypeSourceInfo *TI = getIntegerTypeSourceInfo())
+ return TI->getTypeLoc().getSourceRange();
+ return SourceRange();
+}
+
+void EnumDecl::completeDefinition(QualType NewType,
+ QualType NewPromotionType,
+ unsigned NumPositiveBits,
+ unsigned NumNegativeBits) {
+ assert(!isCompleteDefinition() && "Cannot redefine enums!");
+ if (!IntegerType)
+ IntegerType = NewType.getTypePtr();
+ PromotionType = NewPromotionType;
+ setNumPositiveBits(NumPositiveBits);
+ setNumNegativeBits(NumNegativeBits);
+ TagDecl::completeDefinition();
+}
+
+TemplateSpecializationKind EnumDecl::getTemplateSpecializationKind() const {
+ if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo())
+ return MSI->getTemplateSpecializationKind();
+
+ return TSK_Undeclared;
+}
+
+void EnumDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK,
+ SourceLocation PointOfInstantiation) {
+ MemberSpecializationInfo *MSI = getMemberSpecializationInfo();
+ assert(MSI && "Not an instantiated member enumeration?");
+ MSI->setTemplateSpecializationKind(TSK);
+ if (TSK != TSK_ExplicitSpecialization &&
+ PointOfInstantiation.isValid() &&
+ MSI->getPointOfInstantiation().isInvalid())
+ MSI->setPointOfInstantiation(PointOfInstantiation);
+}
+
+EnumDecl *EnumDecl::getInstantiatedFromMemberEnum() const {
+ if (SpecializationInfo)
+ return cast<EnumDecl>(SpecializationInfo->getInstantiatedFrom());
+
+ return nullptr;
+}
+
+void EnumDecl::setInstantiationOfMemberEnum(ASTContext &C, EnumDecl *ED,
+ TemplateSpecializationKind TSK) {
+ assert(!SpecializationInfo && "Member enum is already a specialization");
+ SpecializationInfo = new (C) MemberSpecializationInfo(ED, TSK);
+}
+
+//===----------------------------------------------------------------------===//
+// RecordDecl Implementation
+//===----------------------------------------------------------------------===//
+
+RecordDecl::RecordDecl(Kind DK, TagKind TK, const ASTContext &C,
+ DeclContext *DC, SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ RecordDecl *PrevDecl)
+ : TagDecl(DK, TK, C, DC, IdLoc, Id, PrevDecl, StartLoc) {
+ HasFlexibleArrayMember = false;
+ AnonymousStructOrUnion = false;
+ HasObjectMember = false;
+ HasVolatileMember = false;
+ LoadedFieldsFromExternalStorage = false;
+ assert(classof(static_cast<Decl*>(this)) && "Invalid Kind!");
+}
+
+RecordDecl *RecordDecl::Create(const ASTContext &C, TagKind TK, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, RecordDecl* PrevDecl) {
+ RecordDecl *R = new (C, DC) RecordDecl(Record, TK, C, DC,
+ StartLoc, IdLoc, Id, PrevDecl);
+ R->MayHaveOutOfDateDef = C.getLangOpts().Modules;
+
+ C.getTypeDeclType(R, PrevDecl);
+ return R;
+}
+
+RecordDecl *RecordDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
+ RecordDecl *R =
+ new (C, ID) RecordDecl(Record, TTK_Struct, C, nullptr, SourceLocation(),
+ SourceLocation(), nullptr, nullptr);
+ R->MayHaveOutOfDateDef = C.getLangOpts().Modules;
+ return R;
+}
+
+bool RecordDecl::isInjectedClassName() const {
+ return isImplicit() && getDeclName() && getDeclContext()->isRecord() &&
+ cast<RecordDecl>(getDeclContext())->getDeclName() == getDeclName();
+}
+
+bool RecordDecl::isLambda() const {
+ if (auto RD = dyn_cast<CXXRecordDecl>(this))
+ return RD->isLambda();
+ return false;
+}
+
+bool RecordDecl::isCapturedRecord() const {
+ return hasAttr<CapturedRecordAttr>();
+}
+
+void RecordDecl::setCapturedRecord() {
+ addAttr(CapturedRecordAttr::CreateImplicit(getASTContext()));
+}
+
+RecordDecl::field_iterator RecordDecl::field_begin() const {
+ if (hasExternalLexicalStorage() && !LoadedFieldsFromExternalStorage)
+ LoadFieldsFromExternalStorage();
+
+ return field_iterator(decl_iterator(FirstDecl));
+}
+
+/// completeDefinition - Notes that the definition of this type is now
+/// complete.
+void RecordDecl::completeDefinition() {
+ assert(!isCompleteDefinition() && "Cannot redefine record!");
+ TagDecl::completeDefinition();
+}
+
+/// isMsStruct - Get whether or not this record uses ms_struct layout.
+/// This which can be turned on with an attribute, pragma, or the
+/// -mms-bitfields command-line option.
+bool RecordDecl::isMsStruct(const ASTContext &C) const {
+ return hasAttr<MSStructAttr>() || C.getLangOpts().MSBitfields == 1;
+}
+
+void RecordDecl::LoadFieldsFromExternalStorage() const {
+ ExternalASTSource *Source = getASTContext().getExternalSource();
+ assert(hasExternalLexicalStorage() && Source && "No external storage?");
+
+ // Notify that we have a RecordDecl doing some initialization.
+ ExternalASTSource::Deserializing TheFields(Source);
+
+ SmallVector<Decl*, 64> Decls;
+ LoadedFieldsFromExternalStorage = true;
+ Source->FindExternalLexicalDecls(this, [](Decl::Kind K) {
+ return FieldDecl::classofKind(K) || IndirectFieldDecl::classofKind(K);
+ }, Decls);
+
+#ifndef NDEBUG
+ // Check that all decls we got were FieldDecls.
+ for (unsigned i=0, e=Decls.size(); i != e; ++i)
+ assert(isa<FieldDecl>(Decls[i]) || isa<IndirectFieldDecl>(Decls[i]));
+#endif
+
+ if (Decls.empty())
+ return;
+
+ std::tie(FirstDecl, LastDecl) = BuildDeclChain(Decls,
+ /*FieldsAlreadyLoaded=*/false);
+}
+
+bool RecordDecl::mayInsertExtraPadding(bool EmitRemark) const {
+ ASTContext &Context = getASTContext();
+ if (!Context.getLangOpts().Sanitize.hasOneOf(
+ SanitizerKind::Address | SanitizerKind::KernelAddress) ||
+ !Context.getLangOpts().SanitizeAddressFieldPadding)
+ return false;
+ const auto &Blacklist = Context.getSanitizerBlacklist();
+ const auto *CXXRD = dyn_cast<CXXRecordDecl>(this);
+ // We may be able to relax some of these requirements.
+ int ReasonToReject = -1;
+ if (!CXXRD || CXXRD->isExternCContext())
+ ReasonToReject = 0; // is not C++.
+ else if (CXXRD->hasAttr<PackedAttr>())
+ ReasonToReject = 1; // is packed.
+ else if (CXXRD->isUnion())
+ ReasonToReject = 2; // is a union.
+ else if (CXXRD->isTriviallyCopyable())
+ ReasonToReject = 3; // is trivially copyable.
+ else if (CXXRD->hasTrivialDestructor())
+ ReasonToReject = 4; // has trivial destructor.
+ else if (CXXRD->isStandardLayout())
+ ReasonToReject = 5; // is standard layout.
+ else if (Blacklist.isBlacklistedLocation(getLocation(), "field-padding"))
+ ReasonToReject = 6; // is in a blacklisted file.
+ else if (Blacklist.isBlacklistedType(getQualifiedNameAsString(),
+ "field-padding"))
+ ReasonToReject = 7; // is blacklisted.
+
+ if (EmitRemark) {
+ if (ReasonToReject >= 0)
+ Context.getDiagnostics().Report(
+ getLocation(),
+ diag::remark_sanitize_address_insert_extra_padding_rejected)
+ << getQualifiedNameAsString() << ReasonToReject;
+ else
+ Context.getDiagnostics().Report(
+ getLocation(),
+ diag::remark_sanitize_address_insert_extra_padding_accepted)
+ << getQualifiedNameAsString();
+ }
+ return ReasonToReject < 0;
+}
+
+const FieldDecl *RecordDecl::findFirstNamedDataMember() const {
+ for (const auto *I : fields()) {
+ if (I->getIdentifier())
+ return I;
+
+ if (const auto *RT = I->getType()->getAs<RecordType>())
+ if (const FieldDecl *NamedDataMember =
+ RT->getDecl()->findFirstNamedDataMember())
+ return NamedDataMember;
+ }
+
+ // We didn't find a named data member.
+ return nullptr;
+}
+
+
+//===----------------------------------------------------------------------===//
+// BlockDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void BlockDecl::setParams(ArrayRef<ParmVarDecl *> NewParamInfo) {
+ assert(!ParamInfo && "Already has param info!");
+
+ // Zero params -> null pointer.
+ if (!NewParamInfo.empty()) {
+ NumParams = NewParamInfo.size();
+ ParamInfo = new (getASTContext()) ParmVarDecl*[NewParamInfo.size()];
+ std::copy(NewParamInfo.begin(), NewParamInfo.end(), ParamInfo);
+ }
+}
+
+void BlockDecl::setCaptures(ASTContext &Context, ArrayRef<Capture> Captures,
+ bool CapturesCXXThis) {
+ this->CapturesCXXThis = CapturesCXXThis;
+ this->NumCaptures = Captures.size();
+
+ if (Captures.empty()) {
+ this->Captures = nullptr;
+ return;
+ }
+
+ this->Captures = Captures.copy(Context).data();
+}
+
+bool BlockDecl::capturesVariable(const VarDecl *variable) const {
+ for (const auto &I : captures())
+ // Only auto vars can be captured, so no redeclaration worries.
+ if (I.getVariable() == variable)
+ return true;
+
+ return false;
+}
+
+SourceRange BlockDecl::getSourceRange() const {
+ return SourceRange(getLocation(), Body? Body->getLocEnd() : getLocation());
+}
+
+//===----------------------------------------------------------------------===//
+// Other Decl Allocation/Deallocation Method Implementations
+//===----------------------------------------------------------------------===//
+
+void TranslationUnitDecl::anchor() { }
+
+TranslationUnitDecl *TranslationUnitDecl::Create(ASTContext &C) {
+ return new (C, (DeclContext *)nullptr) TranslationUnitDecl(C);
+}
+
+void ExternCContextDecl::anchor() { }
+
+ExternCContextDecl *ExternCContextDecl::Create(const ASTContext &C,
+ TranslationUnitDecl *DC) {
+ return new (C, DC) ExternCContextDecl(DC);
+}
+
+void LabelDecl::anchor() { }
+
+LabelDecl *LabelDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation IdentL, IdentifierInfo *II) {
+ return new (C, DC) LabelDecl(DC, IdentL, II, nullptr, IdentL);
+}
+
+LabelDecl *LabelDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation IdentL, IdentifierInfo *II,
+ SourceLocation GnuLabelL) {
+ assert(GnuLabelL != IdentL && "Use this only for GNU local labels");
+ return new (C, DC) LabelDecl(DC, IdentL, II, nullptr, GnuLabelL);
+}
+
+LabelDecl *LabelDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) LabelDecl(nullptr, SourceLocation(), nullptr, nullptr,
+ SourceLocation());
+}
+
+void LabelDecl::setMSAsmLabel(StringRef Name) {
+ char *Buffer = new (getASTContext(), 1) char[Name.size() + 1];
+ memcpy(Buffer, Name.data(), Name.size());
+ Buffer[Name.size()] = '\0';
+ MSAsmName = Buffer;
+}
+
+void ValueDecl::anchor() { }
+
+bool ValueDecl::isWeak() const {
+ for (const auto *I : attrs())
+ if (isa<WeakAttr>(I) || isa<WeakRefAttr>(I))
+ return true;
+
+ return isWeakImported();
+}
+
+void ImplicitParamDecl::anchor() { }
+
+ImplicitParamDecl *ImplicitParamDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation IdLoc,
+ IdentifierInfo *Id,
+ QualType Type) {
+ return new (C, DC) ImplicitParamDecl(C, DC, IdLoc, Id, Type);
+}
+
+ImplicitParamDecl *ImplicitParamDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ return new (C, ID) ImplicitParamDecl(C, nullptr, SourceLocation(), nullptr,
+ QualType());
+}
+
+FunctionDecl *FunctionDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo,
+ QualType T, TypeSourceInfo *TInfo,
+ StorageClass SC,
+ bool isInlineSpecified,
+ bool hasWrittenPrototype,
+ bool isConstexprSpecified) {
+ FunctionDecl *New =
+ new (C, DC) FunctionDecl(Function, C, DC, StartLoc, NameInfo, T, TInfo,
+ SC, isInlineSpecified, isConstexprSpecified);
+ New->HasWrittenPrototype = hasWrittenPrototype;
+ return New;
+}
+
+FunctionDecl *FunctionDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) FunctionDecl(Function, C, nullptr, SourceLocation(),
+ DeclarationNameInfo(), QualType(), nullptr,
+ SC_None, false, false);
+}
+
+BlockDecl *BlockDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L) {
+ return new (C, DC) BlockDecl(DC, L);
+}
+
+BlockDecl *BlockDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) BlockDecl(nullptr, SourceLocation());
+}
+
+CapturedDecl::CapturedDecl(DeclContext *DC, unsigned NumParams)
+ : Decl(Captured, DC, SourceLocation()), DeclContext(Captured),
+ NumParams(NumParams), ContextParam(0), BodyAndNothrow(nullptr, false) {}
+
+CapturedDecl *CapturedDecl::Create(ASTContext &C, DeclContext *DC,
+ unsigned NumParams) {
+ return new (C, DC, additionalSizeToAlloc<ImplicitParamDecl *>(NumParams))
+ CapturedDecl(DC, NumParams);
+}
+
+CapturedDecl *CapturedDecl::CreateDeserialized(ASTContext &C, unsigned ID,
+ unsigned NumParams) {
+ return new (C, ID, additionalSizeToAlloc<ImplicitParamDecl *>(NumParams))
+ CapturedDecl(nullptr, NumParams);
+}
+
+Stmt *CapturedDecl::getBody() const { return BodyAndNothrow.getPointer(); }
+void CapturedDecl::setBody(Stmt *B) { BodyAndNothrow.setPointer(B); }
+
+bool CapturedDecl::isNothrow() const { return BodyAndNothrow.getInt(); }
+void CapturedDecl::setNothrow(bool Nothrow) { BodyAndNothrow.setInt(Nothrow); }
+
+EnumConstantDecl *EnumConstantDecl::Create(ASTContext &C, EnumDecl *CD,
+ SourceLocation L,
+ IdentifierInfo *Id, QualType T,
+ Expr *E, const llvm::APSInt &V) {
+ return new (C, CD) EnumConstantDecl(CD, L, Id, T, E, V);
+}
+
+EnumConstantDecl *
+EnumConstantDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) EnumConstantDecl(nullptr, SourceLocation(), nullptr,
+ QualType(), nullptr, llvm::APSInt());
+}
+
+void IndirectFieldDecl::anchor() { }
+
+IndirectFieldDecl *
+IndirectFieldDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L,
+ IdentifierInfo *Id, QualType T, NamedDecl **CH,
+ unsigned CHS) {
+ return new (C, DC) IndirectFieldDecl(DC, L, Id, T, CH, CHS);
+}
+
+IndirectFieldDecl *IndirectFieldDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ return new (C, ID) IndirectFieldDecl(nullptr, SourceLocation(),
+ DeclarationName(), QualType(), nullptr,
+ 0);
+}
+
+SourceRange EnumConstantDecl::getSourceRange() const {
+ SourceLocation End = getLocation();
+ if (Init)
+ End = Init->getLocEnd();
+ return SourceRange(getLocation(), End);
+}
+
+void TypeDecl::anchor() { }
+
+TypedefDecl *TypedefDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, TypeSourceInfo *TInfo) {
+ return new (C, DC) TypedefDecl(C, DC, StartLoc, IdLoc, Id, TInfo);
+}
+
+void TypedefNameDecl::anchor() { }
+
+TagDecl *TypedefNameDecl::getAnonDeclWithTypedefName(bool AnyRedecl) const {
+ if (auto *TT = getTypeSourceInfo()->getType()->getAs<TagType>()) {
+ auto *OwningTypedef = TT->getDecl()->getTypedefNameForAnonDecl();
+ auto *ThisTypedef = this;
+ if (AnyRedecl && OwningTypedef) {
+ OwningTypedef = OwningTypedef->getCanonicalDecl();
+ ThisTypedef = ThisTypedef->getCanonicalDecl();
+ }
+ if (OwningTypedef == ThisTypedef)
+ return TT->getDecl();
+ }
+
+ return nullptr;
+}
+
+TypedefDecl *TypedefDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) TypedefDecl(C, nullptr, SourceLocation(), SourceLocation(),
+ nullptr, nullptr);
+}
+
+TypeAliasDecl *TypeAliasDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ TypeSourceInfo *TInfo) {
+ return new (C, DC) TypeAliasDecl(C, DC, StartLoc, IdLoc, Id, TInfo);
+}
+
+TypeAliasDecl *TypeAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) TypeAliasDecl(C, nullptr, SourceLocation(),
+ SourceLocation(), nullptr, nullptr);
+}
+
+SourceRange TypedefDecl::getSourceRange() const {
+ SourceLocation RangeEnd = getLocation();
+ if (TypeSourceInfo *TInfo = getTypeSourceInfo()) {
+ if (typeIsPostfix(TInfo->getType()))
+ RangeEnd = TInfo->getTypeLoc().getSourceRange().getEnd();
+ }
+ return SourceRange(getLocStart(), RangeEnd);
+}
+
+SourceRange TypeAliasDecl::getSourceRange() const {
+ SourceLocation RangeEnd = getLocStart();
+ if (TypeSourceInfo *TInfo = getTypeSourceInfo())
+ RangeEnd = TInfo->getTypeLoc().getSourceRange().getEnd();
+ return SourceRange(getLocStart(), RangeEnd);
+}
+
+void FileScopeAsmDecl::anchor() { }
+
+FileScopeAsmDecl *FileScopeAsmDecl::Create(ASTContext &C, DeclContext *DC,
+ StringLiteral *Str,
+ SourceLocation AsmLoc,
+ SourceLocation RParenLoc) {
+ return new (C, DC) FileScopeAsmDecl(DC, Str, AsmLoc, RParenLoc);
+}
+
+FileScopeAsmDecl *FileScopeAsmDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ return new (C, ID) FileScopeAsmDecl(nullptr, nullptr, SourceLocation(),
+ SourceLocation());
+}
+
+void EmptyDecl::anchor() {}
+
+EmptyDecl *EmptyDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L) {
+ return new (C, DC) EmptyDecl(DC, L);
+}
+
+EmptyDecl *EmptyDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) EmptyDecl(nullptr, SourceLocation());
+}
+
+//===----------------------------------------------------------------------===//
+// ImportDecl Implementation
+//===----------------------------------------------------------------------===//
+
+/// \brief Retrieve the number of module identifiers needed to name the given
+/// module.
+static unsigned getNumModuleIdentifiers(Module *Mod) {
+ unsigned Result = 1;
+ while (Mod->Parent) {
+ Mod = Mod->Parent;
+ ++Result;
+ }
+ return Result;
+}
+
+ImportDecl::ImportDecl(DeclContext *DC, SourceLocation StartLoc,
+ Module *Imported,
+ ArrayRef<SourceLocation> IdentifierLocs)
+ : Decl(Import, DC, StartLoc), ImportedAndComplete(Imported, true),
+ NextLocalImport()
+{
+ assert(getNumModuleIdentifiers(Imported) == IdentifierLocs.size());
+ auto *StoredLocs = getTrailingObjects<SourceLocation>();
+ std::uninitialized_copy(IdentifierLocs.begin(), IdentifierLocs.end(),
+ StoredLocs);
+}
+
+ImportDecl::ImportDecl(DeclContext *DC, SourceLocation StartLoc,
+ Module *Imported, SourceLocation EndLoc)
+ : Decl(Import, DC, StartLoc), ImportedAndComplete(Imported, false),
+ NextLocalImport()
+{
+ *getTrailingObjects<SourceLocation>() = EndLoc;
+}
+
+ImportDecl *ImportDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, Module *Imported,
+ ArrayRef<SourceLocation> IdentifierLocs) {
+ return new (C, DC,
+ additionalSizeToAlloc<SourceLocation>(IdentifierLocs.size()))
+ ImportDecl(DC, StartLoc, Imported, IdentifierLocs);
+}
+
+ImportDecl *ImportDecl::CreateImplicit(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc,
+ Module *Imported,
+ SourceLocation EndLoc) {
+ ImportDecl *Import = new (C, DC, additionalSizeToAlloc<SourceLocation>(1))
+ ImportDecl(DC, StartLoc, Imported, EndLoc);
+ Import->setImplicit();
+ return Import;
+}
+
+ImportDecl *ImportDecl::CreateDeserialized(ASTContext &C, unsigned ID,
+ unsigned NumLocations) {
+ return new (C, ID, additionalSizeToAlloc<SourceLocation>(NumLocations))
+ ImportDecl(EmptyShell());
+}
+
+ArrayRef<SourceLocation> ImportDecl::getIdentifierLocs() const {
+ if (!ImportedAndComplete.getInt())
+ return None;
+
+ const auto *StoredLocs = getTrailingObjects<SourceLocation>();
+ return llvm::makeArrayRef(StoredLocs,
+ getNumModuleIdentifiers(getImportedModule()));
+}
+
+SourceRange ImportDecl::getSourceRange() const {
+ if (!ImportedAndComplete.getInt())
+ return SourceRange(getLocation(), *getTrailingObjects<SourceLocation>());
+
+ return SourceRange(getLocation(), getIdentifierLocs().back());
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp b/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp
new file mode 100644
index 0000000..16394e8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp
@@ -0,0 +1,1716 @@
+//===--- DeclBase.cpp - Declaration AST Node Implementation ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Decl and DeclContext classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclContextInternals.h"
+#include "clang/AST/DeclFriend.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclOpenMP.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DependentDiagnostic.h"
+#include "clang/AST/ExternalASTSource.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Statistics
+//===----------------------------------------------------------------------===//
+
+#define DECL(DERIVED, BASE) static int n##DERIVED##s = 0;
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
+
+void Decl::updateOutOfDate(IdentifierInfo &II) const {
+ getASTContext().getExternalSource()->updateOutOfDateIdentifier(II);
+}
+
+#define DECL(DERIVED, BASE) \
+ static_assert(Decl::DeclObjAlignment >= \
+ llvm::AlignOf<DERIVED##Decl>::Alignment, \
+ "Alignment sufficient after objects prepended to " #DERIVED);
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
+
+void *Decl::operator new(std::size_t Size, const ASTContext &Context,
+ unsigned ID, std::size_t Extra) {
+ // Allocate an extra 8 bytes worth of storage, which ensures that the
+ // resulting pointer will still be 8-byte aligned.
+ static_assert(sizeof(unsigned) * 2 >= DeclObjAlignment,
+ "Decl won't be misaligned");
+ void *Start = Context.Allocate(Size + Extra + 8);
+ void *Result = (char*)Start + 8;
+
+ unsigned *PrefixPtr = (unsigned *)Result - 2;
+
+ // Zero out the first 4 bytes; this is used to store the owning module ID.
+ PrefixPtr[0] = 0;
+
+ // Store the global declaration ID in the second 4 bytes.
+ PrefixPtr[1] = ID;
+
+ return Result;
+}
+
+void *Decl::operator new(std::size_t Size, const ASTContext &Ctx,
+ DeclContext *Parent, std::size_t Extra) {
+ assert(!Parent || &Parent->getParentASTContext() == &Ctx);
+ // With local visibility enabled, we track the owning module even for local
+ // declarations.
+ if (Ctx.getLangOpts().ModulesLocalVisibility) {
+ // Ensure required alignment of the resulting object by adding extra
+ // padding at the start if required.
+ size_t ExtraAlign =
+ llvm::OffsetToAlignment(sizeof(Module *), DeclObjAlignment);
+ char *Buffer = reinterpret_cast<char *>(
+ ::operator new(ExtraAlign + sizeof(Module *) + Size + Extra, Ctx));
+ Buffer += ExtraAlign;
+ return new (Buffer) Module*(nullptr) + 1;
+ }
+ return ::operator new(Size + Extra, Ctx);
+}
+
+Module *Decl::getOwningModuleSlow() const {
+ assert(isFromASTFile() && "Not from AST file?");
+ return getASTContext().getExternalSource()->getModule(getOwningModuleID());
+}
+
+bool Decl::hasLocalOwningModuleStorage() const {
+ return getASTContext().getLangOpts().ModulesLocalVisibility;
+}
+
+const char *Decl::getDeclKindName() const {
+ switch (DeclKind) {
+ default: llvm_unreachable("Declaration not in DeclNodes.inc!");
+#define DECL(DERIVED, BASE) case DERIVED: return #DERIVED;
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
+ }
+}
+
+void Decl::setInvalidDecl(bool Invalid) {
+ InvalidDecl = Invalid;
+ assert(!isa<TagDecl>(this) || !cast<TagDecl>(this)->isCompleteDefinition());
+ if (Invalid && !isa<ParmVarDecl>(this)) {
+ // Defensive maneuver for ill-formed code: we're likely not to make it to
+ // a point where we set the access specifier, so default it to "public"
+ // to avoid triggering asserts elsewhere in the front end.
+ setAccess(AS_public);
+ }
+}
+
+const char *DeclContext::getDeclKindName() const {
+ switch (DeclKind) {
+ default: llvm_unreachable("Declaration context not in DeclNodes.inc!");
+#define DECL(DERIVED, BASE) case Decl::DERIVED: return #DERIVED;
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
+ }
+}
+
+bool Decl::StatisticsEnabled = false;
+void Decl::EnableStatistics() {
+ StatisticsEnabled = true;
+}
+
+void Decl::PrintStats() {
+ llvm::errs() << "\n*** Decl Stats:\n";
+
+ int totalDecls = 0;
+#define DECL(DERIVED, BASE) totalDecls += n##DERIVED##s;
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
+ llvm::errs() << " " << totalDecls << " decls total.\n";
+
+ int totalBytes = 0;
+#define DECL(DERIVED, BASE) \
+ if (n##DERIVED##s > 0) { \
+ totalBytes += (int)(n##DERIVED##s * sizeof(DERIVED##Decl)); \
+ llvm::errs() << " " << n##DERIVED##s << " " #DERIVED " decls, " \
+ << sizeof(DERIVED##Decl) << " each (" \
+ << n##DERIVED##s * sizeof(DERIVED##Decl) \
+ << " bytes)\n"; \
+ }
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
+
+ llvm::errs() << "Total bytes = " << totalBytes << "\n";
+}
+
+void Decl::add(Kind k) {
+ switch (k) {
+#define DECL(DERIVED, BASE) case DERIVED: ++n##DERIVED##s; break;
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
+ }
+}
+
+bool Decl::isTemplateParameterPack() const {
+ if (const TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(this))
+ return TTP->isParameterPack();
+ if (const NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(this))
+ return NTTP->isParameterPack();
+ if (const TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(this))
+ return TTP->isParameterPack();
+ return false;
+}
+
+bool Decl::isParameterPack() const {
+ if (const ParmVarDecl *Parm = dyn_cast<ParmVarDecl>(this))
+ return Parm->isParameterPack();
+
+ return isTemplateParameterPack();
+}
+
+FunctionDecl *Decl::getAsFunction() {
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(this))
+ return FD;
+ if (const FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(this))
+ return FTD->getTemplatedDecl();
+ return nullptr;
+}
+
+bool Decl::isTemplateDecl() const {
+ return isa<TemplateDecl>(this);
+}
+
+const DeclContext *Decl::getParentFunctionOrMethod() const {
+ for (const DeclContext *DC = getDeclContext();
+ DC && !DC->isTranslationUnit() && !DC->isNamespace();
+ DC = DC->getParent())
+ if (DC->isFunctionOrMethod())
+ return DC;
+
+ return nullptr;
+}
+
+
+//===----------------------------------------------------------------------===//
+// PrettyStackTraceDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void PrettyStackTraceDecl::print(raw_ostream &OS) const {
+ SourceLocation TheLoc = Loc;
+ if (TheLoc.isInvalid() && TheDecl)
+ TheLoc = TheDecl->getLocation();
+
+ if (TheLoc.isValid()) {
+ TheLoc.print(OS, SM);
+ OS << ": ";
+ }
+
+ OS << Message;
+
+ if (const NamedDecl *DN = dyn_cast_or_null<NamedDecl>(TheDecl)) {
+ OS << " '";
+ DN->printQualifiedName(OS);
+ OS << '\'';
+ }
+ OS << '\n';
+}
+
+//===----------------------------------------------------------------------===//
+// Decl Implementation
+//===----------------------------------------------------------------------===//
+
+// Out-of-line virtual method providing a home for Decl.
+Decl::~Decl() { }
+
+void Decl::setDeclContext(DeclContext *DC) {
+ DeclCtx = DC;
+}
+
+void Decl::setLexicalDeclContext(DeclContext *DC) {
+ if (DC == getLexicalDeclContext())
+ return;
+
+ if (isInSemaDC()) {
+ setDeclContextsImpl(getDeclContext(), DC, getASTContext());
+ } else {
+ getMultipleDC()->LexicalDC = DC;
+ }
+ Hidden = cast<Decl>(DC)->Hidden;
+}
+
+void Decl::setDeclContextsImpl(DeclContext *SemaDC, DeclContext *LexicalDC,
+ ASTContext &Ctx) {
+ if (SemaDC == LexicalDC) {
+ DeclCtx = SemaDC;
+ } else {
+ Decl::MultipleDC *MDC = new (Ctx) Decl::MultipleDC();
+ MDC->SemanticDC = SemaDC;
+ MDC->LexicalDC = LexicalDC;
+ DeclCtx = MDC;
+ }
+}
+
+bool Decl::isLexicallyWithinFunctionOrMethod() const {
+ const DeclContext *LDC = getLexicalDeclContext();
+ while (true) {
+ if (LDC->isFunctionOrMethod())
+ return true;
+ if (!isa<TagDecl>(LDC))
+ return false;
+ LDC = LDC->getLexicalParent();
+ }
+ return false;
+}
+
+bool Decl::isInAnonymousNamespace() const {
+ const DeclContext *DC = getDeclContext();
+ do {
+ if (const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC))
+ if (ND->isAnonymousNamespace())
+ return true;
+ } while ((DC = DC->getParent()));
+
+ return false;
+}
+
+bool Decl::isInStdNamespace() const {
+ return getDeclContext()->isStdNamespace();
+}
+
+TranslationUnitDecl *Decl::getTranslationUnitDecl() {
+ if (TranslationUnitDecl *TUD = dyn_cast<TranslationUnitDecl>(this))
+ return TUD;
+
+ DeclContext *DC = getDeclContext();
+ assert(DC && "This decl is not contained in a translation unit!");
+
+ while (!DC->isTranslationUnit()) {
+ DC = DC->getParent();
+ assert(DC && "This decl is not contained in a translation unit!");
+ }
+
+ return cast<TranslationUnitDecl>(DC);
+}
+
+ASTContext &Decl::getASTContext() const {
+ return getTranslationUnitDecl()->getASTContext();
+}
+
+ASTMutationListener *Decl::getASTMutationListener() const {
+ return getASTContext().getASTMutationListener();
+}
+
+unsigned Decl::getMaxAlignment() const {
+ if (!hasAttrs())
+ return 0;
+
+ unsigned Align = 0;
+ const AttrVec &V = getAttrs();
+ ASTContext &Ctx = getASTContext();
+ specific_attr_iterator<AlignedAttr> I(V.begin()), E(V.end());
+ for (; I != E; ++I)
+ Align = std::max(Align, I->getAlignment(Ctx));
+ return Align;
+}
+
+bool Decl::isUsed(bool CheckUsedAttr) const {
+ if (Used)
+ return true;
+
+ // Check for used attribute.
+ if (CheckUsedAttr && hasAttr<UsedAttr>())
+ return true;
+
+ return false;
+}
+
+void Decl::markUsed(ASTContext &C) {
+ if (Used)
+ return;
+
+ if (C.getASTMutationListener())
+ C.getASTMutationListener()->DeclarationMarkedUsed(this);
+
+ Used = true;
+}
+
+bool Decl::isReferenced() const {
+ if (Referenced)
+ return true;
+
+ // Check redeclarations.
+ for (auto I : redecls())
+ if (I->Referenced)
+ return true;
+
+ return false;
+}
+
+/// \brief Determine the availability of the given declaration based on
+/// the target platform.
+///
+/// When it returns an availability result other than \c AR_Available,
+/// if the \p Message parameter is non-NULL, it will be set to a
+/// string describing why the entity is unavailable.
+///
+/// FIXME: Make these strings localizable, since they end up in
+/// diagnostics.
+static AvailabilityResult CheckAvailability(ASTContext &Context,
+ const AvailabilityAttr *A,
+ std::string *Message) {
+ VersionTuple TargetMinVersion =
+ Context.getTargetInfo().getPlatformMinVersion();
+
+ if (TargetMinVersion.empty())
+ return AR_Available;
+
+ // Check if this is an App Extension "platform", and if so chop off
+ // the suffix for matching with the actual platform.
+ StringRef ActualPlatform = A->getPlatform()->getName();
+ StringRef RealizedPlatform = ActualPlatform;
+ if (Context.getLangOpts().AppExt) {
+ size_t suffix = RealizedPlatform.rfind("_app_extension");
+ if (suffix != StringRef::npos)
+ RealizedPlatform = RealizedPlatform.slice(0, suffix);
+ }
+
+ StringRef TargetPlatform = Context.getTargetInfo().getPlatformName();
+
+ // Match the platform name.
+ if (RealizedPlatform != TargetPlatform)
+ return AR_Available;
+
+ StringRef PrettyPlatformName
+ = AvailabilityAttr::getPrettyPlatformName(ActualPlatform);
+
+ if (PrettyPlatformName.empty())
+ PrettyPlatformName = ActualPlatform;
+
+ std::string HintMessage;
+ if (!A->getMessage().empty()) {
+ HintMessage = " - ";
+ HintMessage += A->getMessage();
+ }
+
+ // Make sure that this declaration has not been marked 'unavailable'.
+ if (A->getUnavailable()) {
+ if (Message) {
+ Message->clear();
+ llvm::raw_string_ostream Out(*Message);
+ Out << "not available on " << PrettyPlatformName
+ << HintMessage;
+ }
+
+ return AR_Unavailable;
+ }
+
+ // Make sure that this declaration has already been introduced.
+ if (!A->getIntroduced().empty() &&
+ TargetMinVersion < A->getIntroduced()) {
+ if (Message) {
+ Message->clear();
+ llvm::raw_string_ostream Out(*Message);
+ VersionTuple VTI(A->getIntroduced());
+ VTI.UseDotAsSeparator();
+ Out << "introduced in " << PrettyPlatformName << ' '
+ << VTI << HintMessage;
+ }
+
+ return AR_NotYetIntroduced;
+ }
+
+ // Make sure that this declaration hasn't been obsoleted.
+ if (!A->getObsoleted().empty() && TargetMinVersion >= A->getObsoleted()) {
+ if (Message) {
+ Message->clear();
+ llvm::raw_string_ostream Out(*Message);
+ VersionTuple VTO(A->getObsoleted());
+ VTO.UseDotAsSeparator();
+ Out << "obsoleted in " << PrettyPlatformName << ' '
+ << VTO << HintMessage;
+ }
+
+ return AR_Unavailable;
+ }
+
+ // Make sure that this declaration hasn't been deprecated.
+ if (!A->getDeprecated().empty() && TargetMinVersion >= A->getDeprecated()) {
+ if (Message) {
+ Message->clear();
+ llvm::raw_string_ostream Out(*Message);
+ VersionTuple VTD(A->getDeprecated());
+ VTD.UseDotAsSeparator();
+ Out << "first deprecated in " << PrettyPlatformName << ' '
+ << VTD << HintMessage;
+ }
+
+ return AR_Deprecated;
+ }
+
+ return AR_Available;
+}
+
+AvailabilityResult Decl::getAvailability(std::string *Message) const {
+ AvailabilityResult Result = AR_Available;
+ std::string ResultMessage;
+
+ for (const auto *A : attrs()) {
+ if (const auto *Deprecated = dyn_cast<DeprecatedAttr>(A)) {
+ if (Result >= AR_Deprecated)
+ continue;
+
+ if (Message)
+ ResultMessage = Deprecated->getMessage();
+
+ Result = AR_Deprecated;
+ continue;
+ }
+
+ if (const auto *Unavailable = dyn_cast<UnavailableAttr>(A)) {
+ if (Message)
+ *Message = Unavailable->getMessage();
+ return AR_Unavailable;
+ }
+
+ if (const auto *Availability = dyn_cast<AvailabilityAttr>(A)) {
+ AvailabilityResult AR = CheckAvailability(getASTContext(), Availability,
+ Message);
+
+ if (AR == AR_Unavailable)
+ return AR_Unavailable;
+
+ if (AR > Result) {
+ Result = AR;
+ if (Message)
+ ResultMessage.swap(*Message);
+ }
+ continue;
+ }
+ }
+
+ if (Message)
+ Message->swap(ResultMessage);
+ return Result;
+}
+
+bool Decl::canBeWeakImported(bool &IsDefinition) const {
+ IsDefinition = false;
+
+ // Variables, if they aren't definitions.
+ if (const VarDecl *Var = dyn_cast<VarDecl>(this)) {
+ if (Var->isThisDeclarationADefinition()) {
+ IsDefinition = true;
+ return false;
+ }
+ return true;
+
+ // Functions, if they aren't definitions.
+ } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(this)) {
+ if (FD->hasBody()) {
+ IsDefinition = true;
+ return false;
+ }
+ return true;
+
+ // Objective-C classes, if this is the non-fragile runtime.
+ } else if (isa<ObjCInterfaceDecl>(this) &&
+ getASTContext().getLangOpts().ObjCRuntime.hasWeakClassImport()) {
+ return true;
+
+ // Nothing else.
+ } else {
+ return false;
+ }
+}
+
+bool Decl::isWeakImported() const {
+ bool IsDefinition;
+ if (!canBeWeakImported(IsDefinition))
+ return false;
+
+ for (const auto *A : attrs()) {
+ if (isa<WeakImportAttr>(A))
+ return true;
+
+ if (const auto *Availability = dyn_cast<AvailabilityAttr>(A)) {
+ if (CheckAvailability(getASTContext(), Availability,
+ nullptr) == AR_NotYetIntroduced)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
+ switch (DeclKind) {
+ case Function:
+ case CXXMethod:
+ case CXXConstructor:
+ case CXXDestructor:
+ case CXXConversion:
+ case EnumConstant:
+ case Var:
+ case ImplicitParam:
+ case ParmVar:
+ case NonTypeTemplateParm:
+ case ObjCMethod:
+ case ObjCProperty:
+ case MSProperty:
+ return IDNS_Ordinary;
+ case Label:
+ return IDNS_Label;
+ case IndirectField:
+ return IDNS_Ordinary | IDNS_Member;
+
+ case ObjCCompatibleAlias:
+ case ObjCInterface:
+ return IDNS_Ordinary | IDNS_Type;
+
+ case Typedef:
+ case TypeAlias:
+ case TypeAliasTemplate:
+ case UnresolvedUsingTypename:
+ case TemplateTypeParm:
+ case ObjCTypeParam:
+ return IDNS_Ordinary | IDNS_Type;
+
+ case UsingShadow:
+ return 0; // we'll actually overwrite this later
+
+ case UnresolvedUsingValue:
+ return IDNS_Ordinary | IDNS_Using;
+
+ case Using:
+ return IDNS_Using;
+
+ case ObjCProtocol:
+ return IDNS_ObjCProtocol;
+
+ case Field:
+ case ObjCAtDefsField:
+ case ObjCIvar:
+ return IDNS_Member;
+
+ case Record:
+ case CXXRecord:
+ case Enum:
+ return IDNS_Tag | IDNS_Type;
+
+ case Namespace:
+ case NamespaceAlias:
+ return IDNS_Namespace;
+
+ case FunctionTemplate:
+ case VarTemplate:
+ return IDNS_Ordinary;
+
+ case ClassTemplate:
+ case TemplateTemplateParm:
+ return IDNS_Ordinary | IDNS_Tag | IDNS_Type;
+
+ // Never have names.
+ case Friend:
+ case FriendTemplate:
+ case AccessSpec:
+ case LinkageSpec:
+ case FileScopeAsm:
+ case StaticAssert:
+ case ObjCPropertyImpl:
+ case Block:
+ case Captured:
+ case TranslationUnit:
+ case ExternCContext:
+
+ case UsingDirective:
+ case BuiltinTemplate:
+ case ClassTemplateSpecialization:
+ case ClassTemplatePartialSpecialization:
+ case ClassScopeFunctionSpecialization:
+ case VarTemplateSpecialization:
+ case VarTemplatePartialSpecialization:
+ case ObjCImplementation:
+ case ObjCCategory:
+ case ObjCCategoryImpl:
+ case Import:
+ case OMPThreadPrivate:
+ case Empty:
+ // Never looked up by name.
+ return 0;
+ }
+
+ llvm_unreachable("Invalid DeclKind!");
+}
+
+void Decl::setAttrsImpl(const AttrVec &attrs, ASTContext &Ctx) {
+ assert(!HasAttrs && "Decl already contains attrs.");
+
+ AttrVec &AttrBlank = Ctx.getDeclAttrs(this);
+ assert(AttrBlank.empty() && "HasAttrs was wrong?");
+
+ AttrBlank = attrs;
+ HasAttrs = true;
+}
+
+void Decl::dropAttrs() {
+ if (!HasAttrs) return;
+
+ HasAttrs = false;
+ getASTContext().eraseDeclAttrs(this);
+}
+
+const AttrVec &Decl::getAttrs() const {
+ assert(HasAttrs && "No attrs to get!");
+ return getASTContext().getDeclAttrs(this);
+}
+
+Decl *Decl::castFromDeclContext (const DeclContext *D) {
+ Decl::Kind DK = D->getDeclKind();
+ switch(DK) {
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT(NAME) \
+ case Decl::NAME: \
+ return static_cast<NAME##Decl*>(const_cast<DeclContext*>(D));
+#define DECL_CONTEXT_BASE(NAME)
+#include "clang/AST/DeclNodes.inc"
+ default:
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT_BASE(NAME) \
+ if (DK >= first##NAME && DK <= last##NAME) \
+ return static_cast<NAME##Decl*>(const_cast<DeclContext*>(D));
+#include "clang/AST/DeclNodes.inc"
+ llvm_unreachable("a decl that inherits DeclContext isn't handled");
+ }
+}
+
+DeclContext *Decl::castToDeclContext(const Decl *D) {
+ Decl::Kind DK = D->getKind();
+ switch(DK) {
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT(NAME) \
+ case Decl::NAME: \
+ return static_cast<NAME##Decl*>(const_cast<Decl*>(D));
+#define DECL_CONTEXT_BASE(NAME)
+#include "clang/AST/DeclNodes.inc"
+ default:
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT_BASE(NAME) \
+ if (DK >= first##NAME && DK <= last##NAME) \
+ return static_cast<NAME##Decl*>(const_cast<Decl*>(D));
+#include "clang/AST/DeclNodes.inc"
+ llvm_unreachable("a decl that inherits DeclContext isn't handled");
+ }
+}
+
+SourceLocation Decl::getBodyRBrace() const {
+ // Special handling of FunctionDecl to avoid de-serializing the body from PCH.
+ // FunctionDecl stores EndRangeLoc for this purpose.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(this)) {
+ const FunctionDecl *Definition;
+ if (FD->hasBody(Definition))
+ return Definition->getSourceRange().getEnd();
+ return SourceLocation();
+ }
+
+ if (Stmt *Body = getBody())
+ return Body->getSourceRange().getEnd();
+
+ return SourceLocation();
+}
+
+bool Decl::AccessDeclContextSanity() const {
+#ifndef NDEBUG
+ // Suppress this check if any of the following hold:
+ // 1. this is the translation unit (and thus has no parent)
+ // 2. this is a template parameter (and thus doesn't belong to its context)
+ // 3. this is a non-type template parameter
+ // 4. the context is not a record
+ // 5. it's invalid
+ // 6. it's a C++0x static_assert.
+ if (isa<TranslationUnitDecl>(this) ||
+ isa<TemplateTypeParmDecl>(this) ||
+ isa<NonTypeTemplateParmDecl>(this) ||
+ !isa<CXXRecordDecl>(getDeclContext()) ||
+ isInvalidDecl() ||
+ isa<StaticAssertDecl>(this) ||
+ // FIXME: a ParmVarDecl can have ClassTemplateSpecialization
+ // as DeclContext (?).
+ isa<ParmVarDecl>(this) ||
+ // FIXME: a ClassTemplateSpecialization or CXXRecordDecl can have
+ // AS_none as access specifier.
+ isa<CXXRecordDecl>(this) ||
+ isa<ClassScopeFunctionSpecializationDecl>(this))
+ return true;
+
+ assert(Access != AS_none &&
+ "Access specifier is AS_none inside a record decl");
+#endif
+ return true;
+}
+
+static Decl::Kind getKind(const Decl *D) { return D->getKind(); }
+static Decl::Kind getKind(const DeclContext *DC) { return DC->getDeclKind(); }
+
+const FunctionType *Decl::getFunctionType(bool BlocksToo) const {
+ QualType Ty;
+ if (const ValueDecl *D = dyn_cast<ValueDecl>(this))
+ Ty = D->getType();
+ else if (const TypedefNameDecl *D = dyn_cast<TypedefNameDecl>(this))
+ Ty = D->getUnderlyingType();
+ else
+ return nullptr;
+
+ if (Ty->isFunctionPointerType())
+ Ty = Ty->getAs<PointerType>()->getPointeeType();
+ else if (BlocksToo && Ty->isBlockPointerType())
+ Ty = Ty->getAs<BlockPointerType>()->getPointeeType();
+
+ return Ty->getAs<FunctionType>();
+}
+
+
+/// Starting at a given context (a Decl or DeclContext), look for a
+/// code context that is not a closure (a lambda, block, etc.).
+template <class T> static Decl *getNonClosureContext(T *D) {
+ if (getKind(D) == Decl::CXXMethod) {
+ CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
+ if (MD->getOverloadedOperator() == OO_Call &&
+ MD->getParent()->isLambda())
+ return getNonClosureContext(MD->getParent()->getParent());
+ return MD;
+ } else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ return FD;
+ } else if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ return MD;
+ } else if (BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
+ return getNonClosureContext(BD->getParent());
+ } else if (CapturedDecl *CD = dyn_cast<CapturedDecl>(D)) {
+ return getNonClosureContext(CD->getParent());
+ } else {
+ return nullptr;
+ }
+}
+
+Decl *Decl::getNonClosureContext() {
+ return ::getNonClosureContext(this);
+}
+
+Decl *DeclContext::getNonClosureAncestor() {
+ return ::getNonClosureContext(this);
+}
+
+//===----------------------------------------------------------------------===//
+// DeclContext Implementation
+//===----------------------------------------------------------------------===//
+
+bool DeclContext::classof(const Decl *D) {
+ switch (D->getKind()) {
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT(NAME) case Decl::NAME:
+#define DECL_CONTEXT_BASE(NAME)
+#include "clang/AST/DeclNodes.inc"
+ return true;
+ default:
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT_BASE(NAME) \
+ if (D->getKind() >= Decl::first##NAME && \
+ D->getKind() <= Decl::last##NAME) \
+ return true;
+#include "clang/AST/DeclNodes.inc"
+ return false;
+ }
+}
+
+DeclContext::~DeclContext() { }
+
+/// \brief Find the parent context of this context that will be
+/// used for unqualified name lookup.
+///
+/// Generally, the parent lookup context is the semantic context. However, for
+/// a friend function the parent lookup context is the lexical context, which
+/// is the class in which the friend is declared.
+DeclContext *DeclContext::getLookupParent() {
+ // FIXME: Find a better way to identify friends
+ if (isa<FunctionDecl>(this))
+ if (getParent()->getRedeclContext()->isFileContext() &&
+ getLexicalParent()->getRedeclContext()->isRecord())
+ return getLexicalParent();
+
+ return getParent();
+}
+
+bool DeclContext::isInlineNamespace() const {
+ return isNamespace() &&
+ cast<NamespaceDecl>(this)->isInline();
+}
+
+bool DeclContext::isStdNamespace() const {
+ if (!isNamespace())
+ return false;
+
+ const NamespaceDecl *ND = cast<NamespaceDecl>(this);
+ if (ND->isInline()) {
+ return ND->getParent()->isStdNamespace();
+ }
+
+ if (!getParent()->getRedeclContext()->isTranslationUnit())
+ return false;
+
+ const IdentifierInfo *II = ND->getIdentifier();
+ return II && II->isStr("std");
+}
+
+bool DeclContext::isDependentContext() const {
+ if (isFileContext())
+ return false;
+
+ if (isa<ClassTemplatePartialSpecializationDecl>(this))
+ return true;
+
+ if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(this)) {
+ if (Record->getDescribedClassTemplate())
+ return true;
+
+ if (Record->isDependentLambda())
+ return true;
+ }
+
+ if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(this)) {
+ if (Function->getDescribedFunctionTemplate())
+ return true;
+
+ // Friend function declarations are dependent if their *lexical*
+ // context is dependent.
+ if (cast<Decl>(this)->getFriendObjectKind())
+ return getLexicalParent()->isDependentContext();
+ }
+
+ // FIXME: A variable template is a dependent context, but is not a
+ // DeclContext. A context within it (such as a lambda-expression)
+ // should be considered dependent.
+
+ return getParent() && getParent()->isDependentContext();
+}
+
+bool DeclContext::isTransparentContext() const {
+ if (DeclKind == Decl::Enum)
+ return !cast<EnumDecl>(this)->isScoped();
+ else if (DeclKind == Decl::LinkageSpec)
+ return true;
+
+ return false;
+}
+
+static bool isLinkageSpecContext(const DeclContext *DC,
+ LinkageSpecDecl::LanguageIDs ID) {
+ while (DC->getDeclKind() != Decl::TranslationUnit) {
+ if (DC->getDeclKind() == Decl::LinkageSpec)
+ return cast<LinkageSpecDecl>(DC)->getLanguage() == ID;
+ DC = DC->getLexicalParent();
+ }
+ return false;
+}
+
+bool DeclContext::isExternCContext() const {
+ return isLinkageSpecContext(this, clang::LinkageSpecDecl::lang_c);
+}
+
+bool DeclContext::isExternCXXContext() const {
+ return isLinkageSpecContext(this, clang::LinkageSpecDecl::lang_cxx);
+}
+
+bool DeclContext::Encloses(const DeclContext *DC) const {
+ if (getPrimaryContext() != this)
+ return getPrimaryContext()->Encloses(DC);
+
+ for (; DC; DC = DC->getParent())
+ if (DC->getPrimaryContext() == this)
+ return true;
+ return false;
+}
+
+DeclContext *DeclContext::getPrimaryContext() {
+ switch (DeclKind) {
+ case Decl::TranslationUnit:
+ case Decl::ExternCContext:
+ case Decl::LinkageSpec:
+ case Decl::Block:
+ case Decl::Captured:
+ // There is only one DeclContext for these entities.
+ return this;
+
+ case Decl::Namespace:
+ // The original namespace is our primary context.
+ return static_cast<NamespaceDecl*>(this)->getOriginalNamespace();
+
+ case Decl::ObjCMethod:
+ return this;
+
+ case Decl::ObjCInterface:
+ if (ObjCInterfaceDecl *Def = cast<ObjCInterfaceDecl>(this)->getDefinition())
+ return Def;
+
+ return this;
+
+ case Decl::ObjCProtocol:
+ if (ObjCProtocolDecl *Def = cast<ObjCProtocolDecl>(this)->getDefinition())
+ return Def;
+
+ return this;
+
+ case Decl::ObjCCategory:
+ return this;
+
+ case Decl::ObjCImplementation:
+ case Decl::ObjCCategoryImpl:
+ return this;
+
+ default:
+ if (DeclKind >= Decl::firstTag && DeclKind <= Decl::lastTag) {
+ // If this is a tag type that has a definition or is currently
+ // being defined, that definition is our primary context.
+ TagDecl *Tag = cast<TagDecl>(this);
+
+ if (TagDecl *Def = Tag->getDefinition())
+ return Def;
+
+ if (const TagType *TagTy = dyn_cast<TagType>(Tag->getTypeForDecl())) {
+ // Note, TagType::getDecl returns the (partial) definition one exists.
+ TagDecl *PossiblePartialDef = TagTy->getDecl();
+ if (PossiblePartialDef->isBeingDefined())
+ return PossiblePartialDef;
+ } else {
+ assert(isa<InjectedClassNameType>(Tag->getTypeForDecl()));
+ }
+
+ return Tag;
+ }
+
+ assert(DeclKind >= Decl::firstFunction && DeclKind <= Decl::lastFunction &&
+ "Unknown DeclContext kind");
+ return this;
+ }
+}
+
+void
+DeclContext::collectAllContexts(SmallVectorImpl<DeclContext *> &Contexts){
+ Contexts.clear();
+
+ if (DeclKind != Decl::Namespace) {
+ Contexts.push_back(this);
+ return;
+ }
+
+ NamespaceDecl *Self = static_cast<NamespaceDecl *>(this);
+ for (NamespaceDecl *N = Self->getMostRecentDecl(); N;
+ N = N->getPreviousDecl())
+ Contexts.push_back(N);
+
+ std::reverse(Contexts.begin(), Contexts.end());
+}
+
+std::pair<Decl *, Decl *>
+DeclContext::BuildDeclChain(ArrayRef<Decl*> Decls,
+ bool FieldsAlreadyLoaded) {
+ // Build up a chain of declarations via the Decl::NextInContextAndBits field.
+ Decl *FirstNewDecl = nullptr;
+ Decl *PrevDecl = nullptr;
+ for (unsigned I = 0, N = Decls.size(); I != N; ++I) {
+ if (FieldsAlreadyLoaded && isa<FieldDecl>(Decls[I]))
+ continue;
+
+ Decl *D = Decls[I];
+ if (PrevDecl)
+ PrevDecl->NextInContextAndBits.setPointer(D);
+ else
+ FirstNewDecl = D;
+
+ PrevDecl = D;
+ }
+
+ return std::make_pair(FirstNewDecl, PrevDecl);
+}
+
+/// \brief We have just acquired external visible storage, and we already have
+/// built a lookup map. For every name in the map, pull in the new names from
+/// the external storage.
+void DeclContext::reconcileExternalVisibleStorage() const {
+ assert(NeedToReconcileExternalVisibleStorage && LookupPtr);
+ NeedToReconcileExternalVisibleStorage = false;
+
+ for (auto &Lookup : *LookupPtr)
+ Lookup.second.setHasExternalDecls();
+}
+
+/// \brief Load the declarations within this lexical storage from an
+/// external source.
+/// \return \c true if any declarations were added.
+bool
+DeclContext::LoadLexicalDeclsFromExternalStorage() const {
+ ExternalASTSource *Source = getParentASTContext().getExternalSource();
+ assert(hasExternalLexicalStorage() && Source && "No external storage?");
+
+ // Notify that we have a DeclContext that is initializing.
+ ExternalASTSource::Deserializing ADeclContext(Source);
+
+ // Load the external declarations, if any.
+ SmallVector<Decl*, 64> Decls;
+ ExternalLexicalStorage = false;
+ Source->FindExternalLexicalDecls(this, Decls);
+
+ if (Decls.empty())
+ return false;
+
+ // We may have already loaded just the fields of this record, in which case
+ // we need to ignore them.
+ bool FieldsAlreadyLoaded = false;
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(this))
+ FieldsAlreadyLoaded = RD->LoadedFieldsFromExternalStorage;
+
+ // Splice the newly-read declarations into the beginning of the list
+ // of declarations.
+ Decl *ExternalFirst, *ExternalLast;
+ std::tie(ExternalFirst, ExternalLast) =
+ BuildDeclChain(Decls, FieldsAlreadyLoaded);
+ ExternalLast->NextInContextAndBits.setPointer(FirstDecl);
+ FirstDecl = ExternalFirst;
+ if (!LastDecl)
+ LastDecl = ExternalLast;
+ return true;
+}
+
+DeclContext::lookup_result
+ExternalASTSource::SetNoExternalVisibleDeclsForName(const DeclContext *DC,
+ DeclarationName Name) {
+ ASTContext &Context = DC->getParentASTContext();
+ StoredDeclsMap *Map;
+ if (!(Map = DC->LookupPtr))
+ Map = DC->CreateStoredDeclsMap(Context);
+ if (DC->NeedToReconcileExternalVisibleStorage)
+ DC->reconcileExternalVisibleStorage();
+
+ (*Map)[Name].removeExternalDecls();
+
+ return DeclContext::lookup_result();
+}
+
+DeclContext::lookup_result
+ExternalASTSource::SetExternalVisibleDeclsForName(const DeclContext *DC,
+ DeclarationName Name,
+ ArrayRef<NamedDecl*> Decls) {
+ ASTContext &Context = DC->getParentASTContext();
+ StoredDeclsMap *Map;
+ if (!(Map = DC->LookupPtr))
+ Map = DC->CreateStoredDeclsMap(Context);
+ if (DC->NeedToReconcileExternalVisibleStorage)
+ DC->reconcileExternalVisibleStorage();
+
+ StoredDeclsList &List = (*Map)[Name];
+
+ // Clear out any old external visible declarations, to avoid quadratic
+ // performance in the redeclaration checks below.
+ List.removeExternalDecls();
+
+ if (!List.isNull()) {
+ // We have both existing declarations and new declarations for this name.
+ // Some of the declarations may simply replace existing ones. Handle those
+ // first.
+ llvm::SmallVector<unsigned, 8> Skip;
+ for (unsigned I = 0, N = Decls.size(); I != N; ++I)
+ if (List.HandleRedeclaration(Decls[I], /*IsKnownNewer*/false))
+ Skip.push_back(I);
+ Skip.push_back(Decls.size());
+
+ // Add in any new declarations.
+ unsigned SkipPos = 0;
+ for (unsigned I = 0, N = Decls.size(); I != N; ++I) {
+ if (I == Skip[SkipPos])
+ ++SkipPos;
+ else
+ List.AddSubsequentDecl(Decls[I]);
+ }
+ } else {
+ // Convert the array to a StoredDeclsList.
+ for (ArrayRef<NamedDecl*>::iterator
+ I = Decls.begin(), E = Decls.end(); I != E; ++I) {
+ if (List.isNull())
+ List.setOnlyValue(*I);
+ else
+ List.AddSubsequentDecl(*I);
+ }
+ }
+
+ return List.getLookupResult();
+}
+
+DeclContext::decl_iterator DeclContext::decls_begin() const {
+ if (hasExternalLexicalStorage())
+ LoadLexicalDeclsFromExternalStorage();
+ return decl_iterator(FirstDecl);
+}
+
+bool DeclContext::decls_empty() const {
+ if (hasExternalLexicalStorage())
+ LoadLexicalDeclsFromExternalStorage();
+
+ return !FirstDecl;
+}
+
+bool DeclContext::containsDecl(Decl *D) const {
+ return (D->getLexicalDeclContext() == this &&
+ (D->NextInContextAndBits.getPointer() || D == LastDecl));
+}
+
+void DeclContext::removeDecl(Decl *D) {
+ assert(D->getLexicalDeclContext() == this &&
+ "decl being removed from non-lexical context");
+ assert((D->NextInContextAndBits.getPointer() || D == LastDecl) &&
+ "decl is not in decls list");
+
+ // Remove D from the decl chain. This is O(n) but hopefully rare.
+ if (D == FirstDecl) {
+ if (D == LastDecl)
+ FirstDecl = LastDecl = nullptr;
+ else
+ FirstDecl = D->NextInContextAndBits.getPointer();
+ } else {
+ for (Decl *I = FirstDecl; true; I = I->NextInContextAndBits.getPointer()) {
+ assert(I && "decl not found in linked list");
+ if (I->NextInContextAndBits.getPointer() == D) {
+ I->NextInContextAndBits.setPointer(D->NextInContextAndBits.getPointer());
+ if (D == LastDecl) LastDecl = I;
+ break;
+ }
+ }
+ }
+
+ // Mark that D is no longer in the decl chain.
+ D->NextInContextAndBits.setPointer(nullptr);
+
+ // Remove D from the lookup table if necessary.
+ if (isa<NamedDecl>(D)) {
+ NamedDecl *ND = cast<NamedDecl>(D);
+
+ // Remove only decls that have a name
+ if (!ND->getDeclName()) return;
+
+ auto *DC = this;
+ do {
+ StoredDeclsMap *Map = DC->getPrimaryContext()->LookupPtr;
+ if (Map) {
+ StoredDeclsMap::iterator Pos = Map->find(ND->getDeclName());
+ assert(Pos != Map->end() && "no lookup entry for decl");
+ if (Pos->second.getAsVector() || Pos->second.getAsDecl() == ND)
+ Pos->second.remove(ND);
+ }
+ } while (DC->isTransparentContext() && (DC = DC->getParent()));
+ }
+}
+
+void DeclContext::addHiddenDecl(Decl *D) {
+ assert(D->getLexicalDeclContext() == this &&
+ "Decl inserted into wrong lexical context");
+ assert(!D->getNextDeclInContext() && D != LastDecl &&
+ "Decl already inserted into a DeclContext");
+
+ if (FirstDecl) {
+ LastDecl->NextInContextAndBits.setPointer(D);
+ LastDecl = D;
+ } else {
+ FirstDecl = LastDecl = D;
+ }
+
+ // Notify a C++ record declaration that we've added a member, so it can
+ // update its class-specific state.
+ if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(this))
+ Record->addedMember(D);
+
+ // If this is a newly-created (not de-serialized) import declaration, wire
+ // it in to the list of local import declarations.
+ if (!D->isFromASTFile()) {
+ if (ImportDecl *Import = dyn_cast<ImportDecl>(D))
+ D->getASTContext().addedLocalImportDecl(Import);
+ }
+}
+
+void DeclContext::addDecl(Decl *D) {
+ addHiddenDecl(D);
+
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ ND->getDeclContext()->getPrimaryContext()->
+ makeDeclVisibleInContextWithFlags(ND, false, true);
+}
+
+void DeclContext::addDeclInternal(Decl *D) {
+ addHiddenDecl(D);
+
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ ND->getDeclContext()->getPrimaryContext()->
+ makeDeclVisibleInContextWithFlags(ND, true, true);
+}
+
+/// shouldBeHidden - Determine whether a declaration which was declared
+/// within its semantic context should be invisible to qualified name lookup.
+static bool shouldBeHidden(NamedDecl *D) {
+ // Skip unnamed declarations.
+ if (!D->getDeclName())
+ return true;
+
+ // Skip entities that can't be found by name lookup into a particular
+ // context.
+ if ((D->getIdentifierNamespace() == 0 && !isa<UsingDirectiveDecl>(D)) ||
+ D->isTemplateParameter())
+ return true;
+
+ // Skip template specializations.
+ // FIXME: This feels like a hack. Should DeclarationName support
+ // template-ids, or is there a better way to keep specializations
+ // from being visible?
+ if (isa<ClassTemplateSpecializationDecl>(D))
+ return true;
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ if (FD->isFunctionTemplateSpecialization())
+ return true;
+
+ return false;
+}
+
+/// buildLookup - Build the lookup data structure with all of the
+/// declarations in this DeclContext (and any other contexts linked
+/// to it or transparent contexts nested within it) and return it.
+///
+/// Note that the produced map may miss out declarations from an
+/// external source. If it does, those entries will be marked with
+/// the 'hasExternalDecls' flag.
+StoredDeclsMap *DeclContext::buildLookup() {
+ assert(this == getPrimaryContext() && "buildLookup called on non-primary DC");
+
+ if (!HasLazyLocalLexicalLookups && !HasLazyExternalLexicalLookups)
+ return LookupPtr;
+
+ SmallVector<DeclContext *, 2> Contexts;
+ collectAllContexts(Contexts);
+
+ if (HasLazyExternalLexicalLookups) {
+ HasLazyExternalLexicalLookups = false;
+ for (auto *DC : Contexts) {
+ if (DC->hasExternalLexicalStorage())
+ HasLazyLocalLexicalLookups |=
+ DC->LoadLexicalDeclsFromExternalStorage();
+ }
+
+ if (!HasLazyLocalLexicalLookups)
+ return LookupPtr;
+ }
+
+ for (auto *DC : Contexts)
+ buildLookupImpl(DC, hasExternalVisibleStorage());
+
+ // We no longer have any lazy decls.
+ HasLazyLocalLexicalLookups = false;
+ return LookupPtr;
+}
+
+/// buildLookupImpl - Build part of the lookup data structure for the
+/// declarations contained within DCtx, which will either be this
+/// DeclContext, a DeclContext linked to it, or a transparent context
+/// nested within it.
+void DeclContext::buildLookupImpl(DeclContext *DCtx, bool Internal) {
+ for (Decl *D : DCtx->noload_decls()) {
+ // Insert this declaration into the lookup structure, but only if
+ // it's semantically within its decl context. Any other decls which
+ // should be found in this context are added eagerly.
+ //
+ // If it's from an AST file, don't add it now. It'll get handled by
+ // FindExternalVisibleDeclsByName if needed. Exception: if we're not
+ // in C++, we do not track external visible decls for the TU, so in
+ // that case we need to collect them all here.
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ if (ND->getDeclContext() == DCtx && !shouldBeHidden(ND) &&
+ (!ND->isFromASTFile() ||
+ (isTranslationUnit() &&
+ !getParentASTContext().getLangOpts().CPlusPlus)))
+ makeDeclVisibleInContextImpl(ND, Internal);
+
+ // If this declaration is itself a transparent declaration context
+ // or inline namespace, add the members of this declaration of that
+ // context (recursively).
+ if (DeclContext *InnerCtx = dyn_cast<DeclContext>(D))
+ if (InnerCtx->isTransparentContext() || InnerCtx->isInlineNamespace())
+ buildLookupImpl(InnerCtx, Internal);
+ }
+}
+
+NamedDecl *const DeclContextLookupResult::SingleElementDummyList = nullptr;
+
+DeclContext::lookup_result
+DeclContext::lookup(DeclarationName Name) const {
+ assert(DeclKind != Decl::LinkageSpec &&
+ "Should not perform lookups into linkage specs!");
+
+ const DeclContext *PrimaryContext = getPrimaryContext();
+ if (PrimaryContext != this)
+ return PrimaryContext->lookup(Name);
+
+ // If we have an external source, ensure that any later redeclarations of this
+ // context have been loaded, since they may add names to the result of this
+ // lookup (or add external visible storage).
+ ExternalASTSource *Source = getParentASTContext().getExternalSource();
+ if (Source)
+ (void)cast<Decl>(this)->getMostRecentDecl();
+
+ if (hasExternalVisibleStorage()) {
+ assert(Source && "external visible storage but no external source?");
+
+ if (NeedToReconcileExternalVisibleStorage)
+ reconcileExternalVisibleStorage();
+
+ StoredDeclsMap *Map = LookupPtr;
+
+ if (HasLazyLocalLexicalLookups || HasLazyExternalLexicalLookups)
+ // FIXME: Make buildLookup const?
+ Map = const_cast<DeclContext*>(this)->buildLookup();
+
+ if (!Map)
+ Map = CreateStoredDeclsMap(getParentASTContext());
+
+ // If we have a lookup result with no external decls, we are done.
+ std::pair<StoredDeclsMap::iterator, bool> R =
+ Map->insert(std::make_pair(Name, StoredDeclsList()));
+ if (!R.second && !R.first->second.hasExternalDecls())
+ return R.first->second.getLookupResult();
+
+ if (Source->FindExternalVisibleDeclsByName(this, Name) || !R.second) {
+ if (StoredDeclsMap *Map = LookupPtr) {
+ StoredDeclsMap::iterator I = Map->find(Name);
+ if (I != Map->end())
+ return I->second.getLookupResult();
+ }
+ }
+
+ return lookup_result();
+ }
+
+ StoredDeclsMap *Map = LookupPtr;
+ if (HasLazyLocalLexicalLookups || HasLazyExternalLexicalLookups)
+ Map = const_cast<DeclContext*>(this)->buildLookup();
+
+ if (!Map)
+ return lookup_result();
+
+ StoredDeclsMap::iterator I = Map->find(Name);
+ if (I == Map->end())
+ return lookup_result();
+
+ return I->second.getLookupResult();
+}
+
+DeclContext::lookup_result
+DeclContext::noload_lookup(DeclarationName Name) {
+ assert(DeclKind != Decl::LinkageSpec &&
+ "Should not perform lookups into linkage specs!");
+
+ DeclContext *PrimaryContext = getPrimaryContext();
+ if (PrimaryContext != this)
+ return PrimaryContext->noload_lookup(Name);
+
+ // If we have any lazy lexical declarations not in our lookup map, add them
+ // now. Don't import any external declarations, not even if we know we have
+ // some missing from the external visible lookups.
+ if (HasLazyLocalLexicalLookups) {
+ SmallVector<DeclContext *, 2> Contexts;
+ collectAllContexts(Contexts);
+ for (unsigned I = 0, N = Contexts.size(); I != N; ++I)
+ buildLookupImpl(Contexts[I], hasExternalVisibleStorage());
+ HasLazyLocalLexicalLookups = false;
+ }
+
+ StoredDeclsMap *Map = LookupPtr;
+ if (!Map)
+ return lookup_result();
+
+ StoredDeclsMap::iterator I = Map->find(Name);
+ return I != Map->end() ? I->second.getLookupResult()
+ : lookup_result();
+}
+
+void DeclContext::localUncachedLookup(DeclarationName Name,
+ SmallVectorImpl<NamedDecl *> &Results) {
+ Results.clear();
+
+ // If there's no external storage, just perform a normal lookup and copy
+ // the results.
+ if (!hasExternalVisibleStorage() && !hasExternalLexicalStorage() && Name) {
+ lookup_result LookupResults = lookup(Name);
+ Results.insert(Results.end(), LookupResults.begin(), LookupResults.end());
+ return;
+ }
+
+ // If we have a lookup table, check there first. Maybe we'll get lucky.
+ // FIXME: Should we be checking these flags on the primary context?
+ if (Name && !HasLazyLocalLexicalLookups && !HasLazyExternalLexicalLookups) {
+ if (StoredDeclsMap *Map = LookupPtr) {
+ StoredDeclsMap::iterator Pos = Map->find(Name);
+ if (Pos != Map->end()) {
+ Results.insert(Results.end(),
+ Pos->second.getLookupResult().begin(),
+ Pos->second.getLookupResult().end());
+ return;
+ }
+ }
+ }
+
+ // Slow case: grovel through the declarations in our chain looking for
+ // matches.
+ // FIXME: If we have lazy external declarations, this will not find them!
+ // FIXME: Should we CollectAllContexts and walk them all here?
+ for (Decl *D = FirstDecl; D; D = D->getNextDeclInContext()) {
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ if (ND->getDeclName() == Name)
+ Results.push_back(ND);
+ }
+}
+
+DeclContext *DeclContext::getRedeclContext() {
+ DeclContext *Ctx = this;
+ // Skip through transparent contexts.
+ while (Ctx->isTransparentContext())
+ Ctx = Ctx->getParent();
+ return Ctx;
+}
+
+DeclContext *DeclContext::getEnclosingNamespaceContext() {
+ DeclContext *Ctx = this;
+ // Skip through non-namespace, non-translation-unit contexts.
+ while (!Ctx->isFileContext())
+ Ctx = Ctx->getParent();
+ return Ctx->getPrimaryContext();
+}
+
+RecordDecl *DeclContext::getOuterLexicalRecordContext() {
+ // Loop until we find a non-record context.
+ RecordDecl *OutermostRD = nullptr;
+ DeclContext *DC = this;
+ while (DC->isRecord()) {
+ OutermostRD = cast<RecordDecl>(DC);
+ DC = DC->getLexicalParent();
+ }
+ return OutermostRD;
+}
+
+bool DeclContext::InEnclosingNamespaceSetOf(const DeclContext *O) const {
+ // For non-file contexts, this is equivalent to Equals.
+ if (!isFileContext())
+ return O->Equals(this);
+
+ do {
+ if (O->Equals(this))
+ return true;
+
+ const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(O);
+ if (!NS || !NS->isInline())
+ break;
+ O = NS->getParent();
+ } while (O);
+
+ return false;
+}
+
+void DeclContext::makeDeclVisibleInContext(NamedDecl *D) {
+ DeclContext *PrimaryDC = this->getPrimaryContext();
+ DeclContext *DeclDC = D->getDeclContext()->getPrimaryContext();
+ // If the decl is being added outside of its semantic decl context, we
+ // need to ensure that we eagerly build the lookup information for it.
+ PrimaryDC->makeDeclVisibleInContextWithFlags(D, false, PrimaryDC == DeclDC);
+}
+
+void DeclContext::makeDeclVisibleInContextWithFlags(NamedDecl *D, bool Internal,
+ bool Recoverable) {
+ assert(this == getPrimaryContext() && "expected a primary DC");
+
+ // Skip declarations within functions.
+ if (isFunctionOrMethod())
+ return;
+
+ // Skip declarations which should be invisible to name lookup.
+ if (shouldBeHidden(D))
+ return;
+
+ // If we already have a lookup data structure, perform the insertion into
+ // it. If we might have externally-stored decls with this name, look them
+ // up and perform the insertion. If this decl was declared outside its
+ // semantic context, buildLookup won't add it, so add it now.
+ //
+ // FIXME: As a performance hack, don't add such decls into the translation
+ // unit unless we're in C++, since qualified lookup into the TU is never
+ // performed.
+ if (LookupPtr || hasExternalVisibleStorage() ||
+ ((!Recoverable || D->getDeclContext() != D->getLexicalDeclContext()) &&
+ (getParentASTContext().getLangOpts().CPlusPlus ||
+ !isTranslationUnit()))) {
+ // If we have lazily omitted any decls, they might have the same name as
+ // the decl which we are adding, so build a full lookup table before adding
+ // this decl.
+ buildLookup();
+ makeDeclVisibleInContextImpl(D, Internal);
+ } else {
+ HasLazyLocalLexicalLookups = true;
+ }
+
+ // If we are a transparent context or inline namespace, insert into our
+ // parent context, too. This operation is recursive.
+ if (isTransparentContext() || isInlineNamespace())
+ getParent()->getPrimaryContext()->
+ makeDeclVisibleInContextWithFlags(D, Internal, Recoverable);
+
+ Decl *DCAsDecl = cast<Decl>(this);
+ // Notify that a decl was made visible unless we are a Tag being defined.
+ if (!(isa<TagDecl>(DCAsDecl) && cast<TagDecl>(DCAsDecl)->isBeingDefined()))
+ if (ASTMutationListener *L = DCAsDecl->getASTMutationListener())
+ L->AddedVisibleDecl(this, D);
+}
+
+void DeclContext::makeDeclVisibleInContextImpl(NamedDecl *D, bool Internal) {
+ // Find or create the stored declaration map.
+ StoredDeclsMap *Map = LookupPtr;
+ if (!Map) {
+ ASTContext *C = &getParentASTContext();
+ Map = CreateStoredDeclsMap(*C);
+ }
+
+ // If there is an external AST source, load any declarations it knows about
+ // with this declaration's name.
+ // If the lookup table contains an entry about this name it means that we
+ // have already checked the external source.
+ if (!Internal)
+ if (ExternalASTSource *Source = getParentASTContext().getExternalSource())
+ if (hasExternalVisibleStorage() &&
+ Map->find(D->getDeclName()) == Map->end())
+ Source->FindExternalVisibleDeclsByName(this, D->getDeclName());
+
+ // Insert this declaration into the map.
+ StoredDeclsList &DeclNameEntries = (*Map)[D->getDeclName()];
+
+ if (Internal) {
+ // If this is being added as part of loading an external declaration,
+ // this may not be the only external declaration with this name.
+ // In this case, we never try to replace an existing declaration; we'll
+ // handle that when we finalize the list of declarations for this name.
+ DeclNameEntries.setHasExternalDecls();
+ DeclNameEntries.AddSubsequentDecl(D);
+ return;
+ }
+
+ if (DeclNameEntries.isNull()) {
+ DeclNameEntries.setOnlyValue(D);
+ return;
+ }
+
+ if (DeclNameEntries.HandleRedeclaration(D, /*IsKnownNewer*/!Internal)) {
+ // This declaration has replaced an existing one for which
+ // declarationReplaces returns true.
+ return;
+ }
+
+ // Put this declaration into the appropriate slot.
+ DeclNameEntries.AddSubsequentDecl(D);
+}
+
+UsingDirectiveDecl *DeclContext::udir_iterator::operator*() const {
+ return cast<UsingDirectiveDecl>(*I);
+}
+
+/// Returns iterator range [First, Last) of UsingDirectiveDecls stored within
+/// this context.
+DeclContext::udir_range DeclContext::using_directives() const {
+ // FIXME: Use something more efficient than normal lookup for using
+ // directives. In C++, using directives are looked up more than anything else.
+ lookup_result Result = lookup(UsingDirectiveDecl::getName());
+ return udir_range(Result.begin(), Result.end());
+}
+
+//===----------------------------------------------------------------------===//
+// Creation and Destruction of StoredDeclsMaps. //
+//===----------------------------------------------------------------------===//
+
+StoredDeclsMap *DeclContext::CreateStoredDeclsMap(ASTContext &C) const {
+ assert(!LookupPtr && "context already has a decls map");
+ assert(getPrimaryContext() == this &&
+ "creating decls map on non-primary context");
+
+ StoredDeclsMap *M;
+ bool Dependent = isDependentContext();
+ if (Dependent)
+ M = new DependentStoredDeclsMap();
+ else
+ M = new StoredDeclsMap();
+ M->Previous = C.LastSDM;
+ C.LastSDM = llvm::PointerIntPair<StoredDeclsMap*,1>(M, Dependent);
+ LookupPtr = M;
+ return M;
+}
+
+void ASTContext::ReleaseDeclContextMaps() {
+ // It's okay to delete DependentStoredDeclsMaps via a StoredDeclsMap
+ // pointer because the subclass doesn't add anything that needs to
+ // be deleted.
+ StoredDeclsMap::DestroyAll(LastSDM.getPointer(), LastSDM.getInt());
+}
+
+void StoredDeclsMap::DestroyAll(StoredDeclsMap *Map, bool Dependent) {
+ while (Map) {
+ // Advance the iteration before we invalidate memory.
+ llvm::PointerIntPair<StoredDeclsMap*,1> Next = Map->Previous;
+
+ if (Dependent)
+ delete static_cast<DependentStoredDeclsMap*>(Map);
+ else
+ delete Map;
+
+ Map = Next.getPointer();
+ Dependent = Next.getInt();
+ }
+}
+
+DependentDiagnostic *DependentDiagnostic::Create(ASTContext &C,
+ DeclContext *Parent,
+ const PartialDiagnostic &PDiag) {
+ assert(Parent->isDependentContext()
+ && "cannot iterate dependent diagnostics of non-dependent context");
+ Parent = Parent->getPrimaryContext();
+ if (!Parent->LookupPtr)
+ Parent->CreateStoredDeclsMap(C);
+
+ DependentStoredDeclsMap *Map =
+ static_cast<DependentStoredDeclsMap *>(Parent->LookupPtr);
+
+ // Allocate the copy of the PartialDiagnostic via the ASTContext's
+ // BumpPtrAllocator, rather than the ASTContext itself.
+ PartialDiagnostic::Storage *DiagStorage = nullptr;
+ if (PDiag.hasStorage())
+ DiagStorage = new (C) PartialDiagnostic::Storage;
+
+ DependentDiagnostic *DD = new (C) DependentDiagnostic(PDiag, DiagStorage);
+
+ // TODO: Maybe we shouldn't reverse the order during insertion.
+ DD->NextDiagnostic = Map->FirstDiagnostic;
+ Map->FirstDiagnostic = DD;
+
+ return DD;
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp b/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp
new file mode 100644
index 0000000..4f24fdc
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp
@@ -0,0 +1,2261 @@
+//===--- DeclCXX.cpp - C++ Declaration AST Node Implementation ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the C++ related Decl classes.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTLambda.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Decl Allocation/Deallocation Method Implementations
+//===----------------------------------------------------------------------===//
+
+void AccessSpecDecl::anchor() { }
+
+AccessSpecDecl *AccessSpecDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) AccessSpecDecl(EmptyShell());
+}
+
+void LazyASTUnresolvedSet::getFromExternalSource(ASTContext &C) const {
+ ExternalASTSource *Source = C.getExternalSource();
+ assert(Impl.Decls.isLazy() && "getFromExternalSource for non-lazy set");
+ assert(Source && "getFromExternalSource with no external source");
+
+ for (ASTUnresolvedSet::iterator I = Impl.begin(); I != Impl.end(); ++I)
+ I.setDecl(cast<NamedDecl>(Source->GetExternalDecl(
+ reinterpret_cast<uintptr_t>(I.getDecl()) >> 2)));
+ Impl.Decls.setLazy(false);
+}
+
+CXXRecordDecl::DefinitionData::DefinitionData(CXXRecordDecl *D)
+ : UserDeclaredConstructor(false), UserDeclaredSpecialMembers(0),
+ Aggregate(true), PlainOldData(true), Empty(true), Polymorphic(false),
+ Abstract(false), IsStandardLayout(true), HasNoNonEmptyBases(true),
+ HasPrivateFields(false), HasProtectedFields(false), HasPublicFields(false),
+ HasMutableFields(false), HasVariantMembers(false), HasOnlyCMembers(true),
+ HasInClassInitializer(false), HasUninitializedReferenceMember(false),
+ NeedOverloadResolutionForMoveConstructor(false),
+ NeedOverloadResolutionForMoveAssignment(false),
+ NeedOverloadResolutionForDestructor(false),
+ DefaultedMoveConstructorIsDeleted(false),
+ DefaultedMoveAssignmentIsDeleted(false),
+ DefaultedDestructorIsDeleted(false),
+ HasTrivialSpecialMembers(SMF_All),
+ DeclaredNonTrivialSpecialMembers(0),
+ HasIrrelevantDestructor(true),
+ HasConstexprNonCopyMoveConstructor(false),
+ DefaultedDefaultConstructorIsConstexpr(true),
+ HasConstexprDefaultConstructor(false),
+ HasNonLiteralTypeFieldsOrBases(false), ComputedVisibleConversions(false),
+ UserProvidedDefaultConstructor(false), DeclaredSpecialMembers(0),
+ ImplicitCopyConstructorHasConstParam(true),
+ ImplicitCopyAssignmentHasConstParam(true),
+ HasDeclaredCopyConstructorWithConstParam(false),
+ HasDeclaredCopyAssignmentWithConstParam(false),
+ IsLambda(false), IsParsingBaseSpecifiers(false), NumBases(0), NumVBases(0),
+ Bases(), VBases(),
+ Definition(D), FirstFriend() {
+}
+
+CXXBaseSpecifier *CXXRecordDecl::DefinitionData::getBasesSlowCase() const {
+ return Bases.get(Definition->getASTContext().getExternalSource());
+}
+
+CXXBaseSpecifier *CXXRecordDecl::DefinitionData::getVBasesSlowCase() const {
+ return VBases.get(Definition->getASTContext().getExternalSource());
+}
+
+CXXRecordDecl::CXXRecordDecl(Kind K, TagKind TK, const ASTContext &C,
+ DeclContext *DC, SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ CXXRecordDecl *PrevDecl)
+ : RecordDecl(K, TK, C, DC, StartLoc, IdLoc, Id, PrevDecl),
+ DefinitionData(PrevDecl ? PrevDecl->DefinitionData
+ : DefinitionDataPtr(this)),
+ TemplateOrInstantiation() {}
+
+CXXRecordDecl *CXXRecordDecl::Create(const ASTContext &C, TagKind TK,
+ DeclContext *DC, SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ CXXRecordDecl* PrevDecl,
+ bool DelayTypeCreation) {
+ CXXRecordDecl *R = new (C, DC) CXXRecordDecl(CXXRecord, TK, C, DC, StartLoc,
+ IdLoc, Id, PrevDecl);
+ R->MayHaveOutOfDateDef = C.getLangOpts().Modules;
+
+ // FIXME: DelayTypeCreation seems like such a hack
+ if (!DelayTypeCreation)
+ C.getTypeDeclType(R, PrevDecl);
+ return R;
+}
+
+CXXRecordDecl *
+CXXRecordDecl::CreateLambda(const ASTContext &C, DeclContext *DC,
+ TypeSourceInfo *Info, SourceLocation Loc,
+ bool Dependent, bool IsGeneric,
+ LambdaCaptureDefault CaptureDefault) {
+ CXXRecordDecl *R =
+ new (C, DC) CXXRecordDecl(CXXRecord, TTK_Class, C, DC, Loc, Loc,
+ nullptr, nullptr);
+ R->IsBeingDefined = true;
+ R->DefinitionData =
+ new (C) struct LambdaDefinitionData(R, Info, Dependent, IsGeneric,
+ CaptureDefault);
+ R->MayHaveOutOfDateDef = false;
+ R->setImplicit(true);
+ C.getTypeDeclType(R, /*PrevDecl=*/nullptr);
+ return R;
+}
+
+CXXRecordDecl *
+CXXRecordDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
+ CXXRecordDecl *R = new (C, ID) CXXRecordDecl(
+ CXXRecord, TTK_Struct, C, nullptr, SourceLocation(), SourceLocation(),
+ nullptr, nullptr);
+ R->MayHaveOutOfDateDef = false;
+ return R;
+}
+
+void
+CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
+ unsigned NumBases) {
+ ASTContext &C = getASTContext();
+
+ if (!data().Bases.isOffset() && data().NumBases > 0)
+ C.Deallocate(data().getBases());
+
+ if (NumBases) {
+ // C++ [dcl.init.aggr]p1:
+ // An aggregate is [...] a class with [...] no base classes [...].
+ data().Aggregate = false;
+
+ // C++ [class]p4:
+ // A POD-struct is an aggregate class...
+ data().PlainOldData = false;
+ }
+
+ // The set of seen virtual base types.
+ llvm::SmallPtrSet<CanQualType, 8> SeenVBaseTypes;
+
+ // The virtual bases of this class.
+ SmallVector<const CXXBaseSpecifier *, 8> VBases;
+
+ data().Bases = new(C) CXXBaseSpecifier [NumBases];
+ data().NumBases = NumBases;
+ for (unsigned i = 0; i < NumBases; ++i) {
+ data().getBases()[i] = *Bases[i];
+ // Keep track of inherited vbases for this base class.
+ const CXXBaseSpecifier *Base = Bases[i];
+ QualType BaseType = Base->getType();
+ // Skip dependent types; we can't do any checking on them now.
+ if (BaseType->isDependentType())
+ continue;
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl());
+
+ // A class with a non-empty base class is not empty.
+ // FIXME: Standard ref?
+ if (!BaseClassDecl->isEmpty()) {
+ if (!data().Empty) {
+ // C++0x [class]p7:
+ // A standard-layout class is a class that:
+ // [...]
+ // -- either has no non-static data members in the most derived
+ // class and at most one base class with non-static data members,
+ // or has no base classes with non-static data members, and
+ // If this is the second non-empty base, then neither of these two
+ // clauses can be true.
+ data().IsStandardLayout = false;
+ }
+
+ data().Empty = false;
+ data().HasNoNonEmptyBases = false;
+ }
+
+ // C++ [class.virtual]p1:
+ // A class that declares or inherits a virtual function is called a
+ // polymorphic class.
+ if (BaseClassDecl->isPolymorphic())
+ data().Polymorphic = true;
+
+ // C++0x [class]p7:
+ // A standard-layout class is a class that: [...]
+ // -- has no non-standard-layout base classes
+ if (!BaseClassDecl->isStandardLayout())
+ data().IsStandardLayout = false;
+
+ // Record if this base is the first non-literal field or base.
+ if (!hasNonLiteralTypeFieldsOrBases() && !BaseType->isLiteralType(C))
+ data().HasNonLiteralTypeFieldsOrBases = true;
+
+ // Now go through all virtual bases of this base and add them.
+ for (const auto &VBase : BaseClassDecl->vbases()) {
+ // Add this base if it's not already in the list.
+ if (SeenVBaseTypes.insert(C.getCanonicalType(VBase.getType())).second) {
+ VBases.push_back(&VBase);
+
+ // C++11 [class.copy]p8:
+ // The implicitly-declared copy constructor for a class X will have
+ // the form 'X::X(const X&)' if each [...] virtual base class B of X
+ // has a copy constructor whose first parameter is of type
+ // 'const B&' or 'const volatile B&' [...]
+ if (CXXRecordDecl *VBaseDecl = VBase.getType()->getAsCXXRecordDecl())
+ if (!VBaseDecl->hasCopyConstructorWithConstParam())
+ data().ImplicitCopyConstructorHasConstParam = false;
+ }
+ }
+
+ if (Base->isVirtual()) {
+ // Add this base if it's not already in the list.
+ if (SeenVBaseTypes.insert(C.getCanonicalType(BaseType)).second)
+ VBases.push_back(Base);
+
+ // C++0x [meta.unary.prop] is_empty:
+ // T is a class type, but not a union type, with ... no virtual base
+ // classes
+ data().Empty = false;
+
+ // C++11 [class.ctor]p5, C++11 [class.copy]p12, C++11 [class.copy]p25:
+ // A [default constructor, copy/move constructor, or copy/move assignment
+ // operator for a class X] is trivial [...] if:
+ // -- class X has [...] no virtual base classes
+ data().HasTrivialSpecialMembers &= SMF_Destructor;
+
+ // C++0x [class]p7:
+ // A standard-layout class is a class that: [...]
+ // -- has [...] no virtual base classes
+ data().IsStandardLayout = false;
+
+ // C++11 [dcl.constexpr]p4:
+ // In the definition of a constexpr constructor [...]
+ // -- the class shall not have any virtual base classes
+ data().DefaultedDefaultConstructorIsConstexpr = false;
+ } else {
+ // C++ [class.ctor]p5:
+ // A default constructor is trivial [...] if:
+ // -- all the direct base classes of its class have trivial default
+ // constructors.
+ if (!BaseClassDecl->hasTrivialDefaultConstructor())
+ data().HasTrivialSpecialMembers &= ~SMF_DefaultConstructor;
+
+ // C++0x [class.copy]p13:
+ // A copy/move constructor for class X is trivial if [...]
+ // [...]
+ // -- the constructor selected to copy/move each direct base class
+ // subobject is trivial, and
+ if (!BaseClassDecl->hasTrivialCopyConstructor())
+ data().HasTrivialSpecialMembers &= ~SMF_CopyConstructor;
+ // If the base class doesn't have a simple move constructor, we'll eagerly
+ // declare it and perform overload resolution to determine which function
+ // it actually calls. If it does have a simple move constructor, this
+ // check is correct.
+ if (!BaseClassDecl->hasTrivialMoveConstructor())
+ data().HasTrivialSpecialMembers &= ~SMF_MoveConstructor;
+
+ // C++0x [class.copy]p27:
+ // A copy/move assignment operator for class X is trivial if [...]
+ // [...]
+ // -- the assignment operator selected to copy/move each direct base
+ // class subobject is trivial, and
+ if (!BaseClassDecl->hasTrivialCopyAssignment())
+ data().HasTrivialSpecialMembers &= ~SMF_CopyAssignment;
+ // If the base class doesn't have a simple move assignment, we'll eagerly
+ // declare it and perform overload resolution to determine which function
+ // it actually calls. If it does have a simple move assignment, this
+ // check is correct.
+ if (!BaseClassDecl->hasTrivialMoveAssignment())
+ data().HasTrivialSpecialMembers &= ~SMF_MoveAssignment;
+
+ // C++11 [class.ctor]p6:
+ // If that user-written default constructor would satisfy the
+ // requirements of a constexpr constructor, the implicitly-defined
+ // default constructor is constexpr.
+ if (!BaseClassDecl->hasConstexprDefaultConstructor())
+ data().DefaultedDefaultConstructorIsConstexpr = false;
+ }
+
+ // C++ [class.ctor]p3:
+ // A destructor is trivial if all the direct base classes of its class
+ // have trivial destructors.
+ if (!BaseClassDecl->hasTrivialDestructor())
+ data().HasTrivialSpecialMembers &= ~SMF_Destructor;
+
+ if (!BaseClassDecl->hasIrrelevantDestructor())
+ data().HasIrrelevantDestructor = false;
+
+ // C++11 [class.copy]p18:
+ // The implicitly-declared copy assignment oeprator for a class X will
+ // have the form 'X& X::operator=(const X&)' if each direct base class B
+ // of X has a copy assignment operator whose parameter is of type 'const
+ // B&', 'const volatile B&', or 'B' [...]
+ if (!BaseClassDecl->hasCopyAssignmentWithConstParam())
+ data().ImplicitCopyAssignmentHasConstParam = false;
+
+ // C++11 [class.copy]p8:
+ // The implicitly-declared copy constructor for a class X will have
+ // the form 'X::X(const X&)' if each direct [...] base class B of X
+ // has a copy constructor whose first parameter is of type
+ // 'const B&' or 'const volatile B&' [...]
+ if (!BaseClassDecl->hasCopyConstructorWithConstParam())
+ data().ImplicitCopyConstructorHasConstParam = false;
+
+ // A class has an Objective-C object member if... or any of its bases
+ // has an Objective-C object member.
+ if (BaseClassDecl->hasObjectMember())
+ setHasObjectMember(true);
+
+ if (BaseClassDecl->hasVolatileMember())
+ setHasVolatileMember(true);
+
+ // Keep track of the presence of mutable fields.
+ if (BaseClassDecl->hasMutableFields())
+ data().HasMutableFields = true;
+
+ if (BaseClassDecl->hasUninitializedReferenceMember())
+ data().HasUninitializedReferenceMember = true;
+
+ addedClassSubobject(BaseClassDecl);
+ }
+
+ if (VBases.empty()) {
+ data().IsParsingBaseSpecifiers = false;
+ return;
+ }
+
+ // Create base specifier for any direct or indirect virtual bases.
+ data().VBases = new (C) CXXBaseSpecifier[VBases.size()];
+ data().NumVBases = VBases.size();
+ for (int I = 0, E = VBases.size(); I != E; ++I) {
+ QualType Type = VBases[I]->getType();
+ if (!Type->isDependentType())
+ addedClassSubobject(Type->getAsCXXRecordDecl());
+ data().getVBases()[I] = *VBases[I];
+ }
+
+ data().IsParsingBaseSpecifiers = false;
+}
+
+void CXXRecordDecl::addedClassSubobject(CXXRecordDecl *Subobj) {
+ // C++11 [class.copy]p11:
+ // A defaulted copy/move constructor for a class X is defined as
+ // deleted if X has:
+ // -- a direct or virtual base class B that cannot be copied/moved [...]
+ // -- a non-static data member of class type M (or array thereof)
+ // that cannot be copied or moved [...]
+ if (!Subobj->hasSimpleMoveConstructor())
+ data().NeedOverloadResolutionForMoveConstructor = true;
+
+ // C++11 [class.copy]p23:
+ // A defaulted copy/move assignment operator for a class X is defined as
+ // deleted if X has:
+ // -- a direct or virtual base class B that cannot be copied/moved [...]
+ // -- a non-static data member of class type M (or array thereof)
+ // that cannot be copied or moved [...]
+ if (!Subobj->hasSimpleMoveAssignment())
+ data().NeedOverloadResolutionForMoveAssignment = true;
+
+ // C++11 [class.ctor]p5, C++11 [class.copy]p11, C++11 [class.dtor]p5:
+ // A defaulted [ctor or dtor] for a class X is defined as
+ // deleted if X has:
+ // -- any direct or virtual base class [...] has a type with a destructor
+ // that is deleted or inaccessible from the defaulted [ctor or dtor].
+ // -- any non-static data member has a type with a destructor
+ // that is deleted or inaccessible from the defaulted [ctor or dtor].
+ if (!Subobj->hasSimpleDestructor()) {
+ data().NeedOverloadResolutionForMoveConstructor = true;
+ data().NeedOverloadResolutionForDestructor = true;
+ }
+}
+
+bool CXXRecordDecl::hasAnyDependentBases() const {
+ if (!isDependentContext())
+ return false;
+
+ return !forallBases([](const CXXRecordDecl *) { return true; });
+}
+
+bool CXXRecordDecl::isTriviallyCopyable() const {
+ // C++0x [class]p5:
+ // A trivially copyable class is a class that:
+ // -- has no non-trivial copy constructors,
+ if (hasNonTrivialCopyConstructor()) return false;
+ // -- has no non-trivial move constructors,
+ if (hasNonTrivialMoveConstructor()) return false;
+ // -- has no non-trivial copy assignment operators,
+ if (hasNonTrivialCopyAssignment()) return false;
+ // -- has no non-trivial move assignment operators, and
+ if (hasNonTrivialMoveAssignment()) return false;
+ // -- has a trivial destructor.
+ if (!hasTrivialDestructor()) return false;
+
+ return true;
+}
+
+void CXXRecordDecl::markedVirtualFunctionPure() {
+ // C++ [class.abstract]p2:
+ // A class is abstract if it has at least one pure virtual function.
+ data().Abstract = true;
+}
+
+void CXXRecordDecl::addedMember(Decl *D) {
+ if (!D->isImplicit() &&
+ !isa<FieldDecl>(D) &&
+ !isa<IndirectFieldDecl>(D) &&
+ (!isa<TagDecl>(D) || cast<TagDecl>(D)->getTagKind() == TTK_Class ||
+ cast<TagDecl>(D)->getTagKind() == TTK_Interface))
+ data().HasOnlyCMembers = false;
+
+ // Ignore friends and invalid declarations.
+ if (D->getFriendObjectKind() || D->isInvalidDecl())
+ return;
+
+ FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(D);
+ if (FunTmpl)
+ D = FunTmpl->getTemplatedDecl();
+
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ if (Method->isVirtual()) {
+ // C++ [dcl.init.aggr]p1:
+ // An aggregate is an array or a class with [...] no virtual functions.
+ data().Aggregate = false;
+
+ // C++ [class]p4:
+ // A POD-struct is an aggregate class...
+ data().PlainOldData = false;
+
+ // Virtual functions make the class non-empty.
+ // FIXME: Standard ref?
+ data().Empty = false;
+
+ // C++ [class.virtual]p1:
+ // A class that declares or inherits a virtual function is called a
+ // polymorphic class.
+ data().Polymorphic = true;
+
+ // C++11 [class.ctor]p5, C++11 [class.copy]p12, C++11 [class.copy]p25:
+ // A [default constructor, copy/move constructor, or copy/move
+ // assignment operator for a class X] is trivial [...] if:
+ // -- class X has no virtual functions [...]
+ data().HasTrivialSpecialMembers &= SMF_Destructor;
+
+ // C++0x [class]p7:
+ // A standard-layout class is a class that: [...]
+ // -- has no virtual functions
+ data().IsStandardLayout = false;
+ }
+ }
+
+ // Notify the listener if an implicit member was added after the definition
+ // was completed.
+ if (!isBeingDefined() && D->isImplicit())
+ if (ASTMutationListener *L = getASTMutationListener())
+ L->AddedCXXImplicitMember(data().Definition, D);
+
+ // The kind of special member this declaration is, if any.
+ unsigned SMKind = 0;
+
+ // Handle constructors.
+ if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
+ if (!Constructor->isImplicit()) {
+ // Note that we have a user-declared constructor.
+ data().UserDeclaredConstructor = true;
+
+ // C++ [class]p4:
+ // A POD-struct is an aggregate class [...]
+ // Since the POD bit is meant to be C++03 POD-ness, clear it even if the
+ // type is technically an aggregate in C++0x since it wouldn't be in 03.
+ data().PlainOldData = false;
+ }
+
+ // Technically, "user-provided" is only defined for special member
+ // functions, but the intent of the standard is clearly that it should apply
+ // to all functions.
+ bool UserProvided = Constructor->isUserProvided();
+
+ if (Constructor->isDefaultConstructor()) {
+ SMKind |= SMF_DefaultConstructor;
+
+ if (UserProvided)
+ data().UserProvidedDefaultConstructor = true;
+ if (Constructor->isConstexpr())
+ data().HasConstexprDefaultConstructor = true;
+ }
+
+ if (!FunTmpl) {
+ unsigned Quals;
+ if (Constructor->isCopyConstructor(Quals)) {
+ SMKind |= SMF_CopyConstructor;
+
+ if (Quals & Qualifiers::Const)
+ data().HasDeclaredCopyConstructorWithConstParam = true;
+ } else if (Constructor->isMoveConstructor())
+ SMKind |= SMF_MoveConstructor;
+ }
+
+ // Record if we see any constexpr constructors which are neither copy
+ // nor move constructors.
+ if (Constructor->isConstexpr() && !Constructor->isCopyOrMoveConstructor())
+ data().HasConstexprNonCopyMoveConstructor = true;
+
+ // C++ [dcl.init.aggr]p1:
+ // An aggregate is an array or a class with no user-declared
+ // constructors [...].
+ // C++11 [dcl.init.aggr]p1:
+ // An aggregate is an array or a class with no user-provided
+ // constructors [...].
+ if (getASTContext().getLangOpts().CPlusPlus11
+ ? UserProvided : !Constructor->isImplicit())
+ data().Aggregate = false;
+ }
+
+ // Handle destructors.
+ if (CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(D)) {
+ SMKind |= SMF_Destructor;
+
+ if (DD->isUserProvided())
+ data().HasIrrelevantDestructor = false;
+ // If the destructor is explicitly defaulted and not trivial or not public
+ // or if the destructor is deleted, we clear HasIrrelevantDestructor in
+ // finishedDefaultedOrDeletedMember.
+
+ // C++11 [class.dtor]p5:
+ // A destructor is trivial if [...] the destructor is not virtual.
+ if (DD->isVirtual())
+ data().HasTrivialSpecialMembers &= ~SMF_Destructor;
+ }
+
+ // Handle member functions.
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ if (Method->isCopyAssignmentOperator()) {
+ SMKind |= SMF_CopyAssignment;
+
+ const ReferenceType *ParamTy =
+ Method->getParamDecl(0)->getType()->getAs<ReferenceType>();
+ if (!ParamTy || ParamTy->getPointeeType().isConstQualified())
+ data().HasDeclaredCopyAssignmentWithConstParam = true;
+ }
+
+ if (Method->isMoveAssignmentOperator())
+ SMKind |= SMF_MoveAssignment;
+
+ // Keep the list of conversion functions up-to-date.
+ if (CXXConversionDecl *Conversion = dyn_cast<CXXConversionDecl>(D)) {
+ // FIXME: We use the 'unsafe' accessor for the access specifier here,
+ // because Sema may not have set it yet. That's really just a misdesign
+ // in Sema. However, LLDB *will* have set the access specifier correctly,
+ // and adds declarations after the class is technically completed,
+ // so completeDefinition()'s overriding of the access specifiers doesn't
+ // work.
+ AccessSpecifier AS = Conversion->getAccessUnsafe();
+
+ if (Conversion->getPrimaryTemplate()) {
+ // We don't record specializations.
+ } else {
+ ASTContext &Ctx = getASTContext();
+ ASTUnresolvedSet &Conversions = data().Conversions.get(Ctx);
+ NamedDecl *Primary =
+ FunTmpl ? cast<NamedDecl>(FunTmpl) : cast<NamedDecl>(Conversion);
+ if (Primary->getPreviousDecl())
+ Conversions.replace(cast<NamedDecl>(Primary->getPreviousDecl()),
+ Primary, AS);
+ else
+ Conversions.addDecl(Ctx, Primary, AS);
+ }
+ }
+
+ if (SMKind) {
+ // If this is the first declaration of a special member, we no longer have
+ // an implicit trivial special member.
+ data().HasTrivialSpecialMembers &=
+ data().DeclaredSpecialMembers | ~SMKind;
+
+ if (!Method->isImplicit() && !Method->isUserProvided()) {
+ // This method is user-declared but not user-provided. We can't work out
+ // whether it's trivial yet (not until we get to the end of the class).
+ // We'll handle this method in finishedDefaultedOrDeletedMember.
+ } else if (Method->isTrivial())
+ data().HasTrivialSpecialMembers |= SMKind;
+ else
+ data().DeclaredNonTrivialSpecialMembers |= SMKind;
+
+ // Note when we have declared a declared special member, and suppress the
+ // implicit declaration of this special member.
+ data().DeclaredSpecialMembers |= SMKind;
+
+ if (!Method->isImplicit()) {
+ data().UserDeclaredSpecialMembers |= SMKind;
+
+ // C++03 [class]p4:
+ // A POD-struct is an aggregate class that has [...] no user-defined
+ // copy assignment operator and no user-defined destructor.
+ //
+ // Since the POD bit is meant to be C++03 POD-ness, and in C++03,
+ // aggregates could not have any constructors, clear it even for an
+ // explicitly defaulted or deleted constructor.
+ // type is technically an aggregate in C++0x since it wouldn't be in 03.
+ //
+ // Also, a user-declared move assignment operator makes a class non-POD.
+ // This is an extension in C++03.
+ data().PlainOldData = false;
+ }
+ }
+
+ return;
+ }
+
+ // Handle non-static data members.
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(D)) {
+ // C++ [class.bit]p2:
+ // A declaration for a bit-field that omits the identifier declares an
+ // unnamed bit-field. Unnamed bit-fields are not members and cannot be
+ // initialized.
+ if (Field->isUnnamedBitfield())
+ return;
+
+ // C++ [dcl.init.aggr]p1:
+ // An aggregate is an array or a class (clause 9) with [...] no
+ // private or protected non-static data members (clause 11).
+ //
+ // A POD must be an aggregate.
+ if (D->getAccess() == AS_private || D->getAccess() == AS_protected) {
+ data().Aggregate = false;
+ data().PlainOldData = false;
+ }
+
+ // C++0x [class]p7:
+ // A standard-layout class is a class that:
+ // [...]
+ // -- has the same access control for all non-static data members,
+ switch (D->getAccess()) {
+ case AS_private: data().HasPrivateFields = true; break;
+ case AS_protected: data().HasProtectedFields = true; break;
+ case AS_public: data().HasPublicFields = true; break;
+ case AS_none: llvm_unreachable("Invalid access specifier");
+ };
+ if ((data().HasPrivateFields + data().HasProtectedFields +
+ data().HasPublicFields) > 1)
+ data().IsStandardLayout = false;
+
+ // Keep track of the presence of mutable fields.
+ if (Field->isMutable())
+ data().HasMutableFields = true;
+
+ // C++11 [class.union]p8, DR1460:
+ // If X is a union, a non-static data member of X that is not an anonymous
+ // union is a variant member of X.
+ if (isUnion() && !Field->isAnonymousStructOrUnion())
+ data().HasVariantMembers = true;
+
+ // C++0x [class]p9:
+ // A POD struct is a class that is both a trivial class and a
+ // standard-layout class, and has no non-static data members of type
+ // non-POD struct, non-POD union (or array of such types).
+ //
+ // Automatic Reference Counting: the presence of a member of Objective-C pointer type
+ // that does not explicitly have no lifetime makes the class a non-POD.
+ ASTContext &Context = getASTContext();
+ QualType T = Context.getBaseElementType(Field->getType());
+ if (T->isObjCRetainableType() || T.isObjCGCStrong()) {
+ if (!Context.getLangOpts().ObjCAutoRefCount) {
+ setHasObjectMember(true);
+ } else if (T.getObjCLifetime() != Qualifiers::OCL_ExplicitNone) {
+ // Objective-C Automatic Reference Counting:
+ // If a class has a non-static data member of Objective-C pointer
+ // type (or array thereof), it is a non-POD type and its
+ // default constructor (if any), copy constructor, move constructor,
+ // copy assignment operator, move assignment operator, and destructor are
+ // non-trivial.
+ setHasObjectMember(true);
+ struct DefinitionData &Data = data();
+ Data.PlainOldData = false;
+ Data.HasTrivialSpecialMembers = 0;
+ Data.HasIrrelevantDestructor = false;
+ }
+ } else if (!T.isCXX98PODType(Context))
+ data().PlainOldData = false;
+
+ if (T->isReferenceType()) {
+ if (!Field->hasInClassInitializer())
+ data().HasUninitializedReferenceMember = true;
+
+ // C++0x [class]p7:
+ // A standard-layout class is a class that:
+ // -- has no non-static data members of type [...] reference,
+ data().IsStandardLayout = false;
+ }
+
+ // Record if this field is the first non-literal or volatile field or base.
+ if (!T->isLiteralType(Context) || T.isVolatileQualified())
+ data().HasNonLiteralTypeFieldsOrBases = true;
+
+ if (Field->hasInClassInitializer() ||
+ (Field->isAnonymousStructOrUnion() &&
+ Field->getType()->getAsCXXRecordDecl()->hasInClassInitializer())) {
+ data().HasInClassInitializer = true;
+
+ // C++11 [class]p5:
+ // A default constructor is trivial if [...] no non-static data member
+ // of its class has a brace-or-equal-initializer.
+ data().HasTrivialSpecialMembers &= ~SMF_DefaultConstructor;
+
+ // C++11 [dcl.init.aggr]p1:
+ // An aggregate is a [...] class with [...] no
+ // brace-or-equal-initializers for non-static data members.
+ //
+ // This rule was removed in C++1y.
+ if (!getASTContext().getLangOpts().CPlusPlus14)
+ data().Aggregate = false;
+
+ // C++11 [class]p10:
+ // A POD struct is [...] a trivial class.
+ data().PlainOldData = false;
+ }
+
+ // C++11 [class.copy]p23:
+ // A defaulted copy/move assignment operator for a class X is defined
+ // as deleted if X has:
+ // -- a non-static data member of reference type
+ if (T->isReferenceType())
+ data().DefaultedMoveAssignmentIsDeleted = true;
+
+ if (const RecordType *RecordTy = T->getAs<RecordType>()) {
+ CXXRecordDecl* FieldRec = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (FieldRec->getDefinition()) {
+ addedClassSubobject(FieldRec);
+
+ // We may need to perform overload resolution to determine whether a
+ // field can be moved if it's const or volatile qualified.
+ if (T.getCVRQualifiers() & (Qualifiers::Const | Qualifiers::Volatile)) {
+ data().NeedOverloadResolutionForMoveConstructor = true;
+ data().NeedOverloadResolutionForMoveAssignment = true;
+ }
+
+ // C++11 [class.ctor]p5, C++11 [class.copy]p11:
+ // A defaulted [special member] for a class X is defined as
+ // deleted if:
+ // -- X is a union-like class that has a variant member with a
+ // non-trivial [corresponding special member]
+ if (isUnion()) {
+ if (FieldRec->hasNonTrivialMoveConstructor())
+ data().DefaultedMoveConstructorIsDeleted = true;
+ if (FieldRec->hasNonTrivialMoveAssignment())
+ data().DefaultedMoveAssignmentIsDeleted = true;
+ if (FieldRec->hasNonTrivialDestructor())
+ data().DefaultedDestructorIsDeleted = true;
+ }
+
+ // C++0x [class.ctor]p5:
+ // A default constructor is trivial [...] if:
+ // -- for all the non-static data members of its class that are of
+ // class type (or array thereof), each such class has a trivial
+ // default constructor.
+ if (!FieldRec->hasTrivialDefaultConstructor())
+ data().HasTrivialSpecialMembers &= ~SMF_DefaultConstructor;
+
+ // C++0x [class.copy]p13:
+ // A copy/move constructor for class X is trivial if [...]
+ // [...]
+ // -- for each non-static data member of X that is of class type (or
+ // an array thereof), the constructor selected to copy/move that
+ // member is trivial;
+ if (!FieldRec->hasTrivialCopyConstructor())
+ data().HasTrivialSpecialMembers &= ~SMF_CopyConstructor;
+ // If the field doesn't have a simple move constructor, we'll eagerly
+ // declare the move constructor for this class and we'll decide whether
+ // it's trivial then.
+ if (!FieldRec->hasTrivialMoveConstructor())
+ data().HasTrivialSpecialMembers &= ~SMF_MoveConstructor;
+
+ // C++0x [class.copy]p27:
+ // A copy/move assignment operator for class X is trivial if [...]
+ // [...]
+ // -- for each non-static data member of X that is of class type (or
+ // an array thereof), the assignment operator selected to
+ // copy/move that member is trivial;
+ if (!FieldRec->hasTrivialCopyAssignment())
+ data().HasTrivialSpecialMembers &= ~SMF_CopyAssignment;
+ // If the field doesn't have a simple move assignment, we'll eagerly
+ // declare the move assignment for this class and we'll decide whether
+ // it's trivial then.
+ if (!FieldRec->hasTrivialMoveAssignment())
+ data().HasTrivialSpecialMembers &= ~SMF_MoveAssignment;
+
+ if (!FieldRec->hasTrivialDestructor())
+ data().HasTrivialSpecialMembers &= ~SMF_Destructor;
+ if (!FieldRec->hasIrrelevantDestructor())
+ data().HasIrrelevantDestructor = false;
+ if (FieldRec->hasObjectMember())
+ setHasObjectMember(true);
+ if (FieldRec->hasVolatileMember())
+ setHasVolatileMember(true);
+
+ // C++0x [class]p7:
+ // A standard-layout class is a class that:
+ // -- has no non-static data members of type non-standard-layout
+ // class (or array of such types) [...]
+ if (!FieldRec->isStandardLayout())
+ data().IsStandardLayout = false;
+
+ // C++0x [class]p7:
+ // A standard-layout class is a class that:
+ // [...]
+ // -- has no base classes of the same type as the first non-static
+ // data member.
+ // We don't want to expend bits in the state of the record decl
+ // tracking whether this is the first non-static data member so we
+ // cheat a bit and use some of the existing state: the empty bit.
+ // Virtual bases and virtual methods make a class non-empty, but they
+ // also make it non-standard-layout so we needn't check here.
+ // A non-empty base class may leave the class standard-layout, but not
+ // if we have arrived here, and have at least one non-static data
+ // member. If IsStandardLayout remains true, then the first non-static
+ // data member must come through here with Empty still true, and Empty
+ // will subsequently be set to false below.
+ if (data().IsStandardLayout && data().Empty) {
+ for (const auto &BI : bases()) {
+ if (Context.hasSameUnqualifiedType(BI.getType(), T)) {
+ data().IsStandardLayout = false;
+ break;
+ }
+ }
+ }
+
+ // Keep track of the presence of mutable fields.
+ if (FieldRec->hasMutableFields())
+ data().HasMutableFields = true;
+
+ // C++11 [class.copy]p13:
+ // If the implicitly-defined constructor would satisfy the
+ // requirements of a constexpr constructor, the implicitly-defined
+ // constructor is constexpr.
+ // C++11 [dcl.constexpr]p4:
+ // -- every constructor involved in initializing non-static data
+ // members [...] shall be a constexpr constructor
+ if (!Field->hasInClassInitializer() &&
+ !FieldRec->hasConstexprDefaultConstructor() && !isUnion())
+ // The standard requires any in-class initializer to be a constant
+ // expression. We consider this to be a defect.
+ data().DefaultedDefaultConstructorIsConstexpr = false;
+
+ // C++11 [class.copy]p8:
+ // The implicitly-declared copy constructor for a class X will have
+ // the form 'X::X(const X&)' if [...] for all the non-static data
+ // members of X that are of a class type M (or array thereof), each
+ // such class type has a copy constructor whose first parameter is
+ // of type 'const M&' or 'const volatile M&'.
+ if (!FieldRec->hasCopyConstructorWithConstParam())
+ data().ImplicitCopyConstructorHasConstParam = false;
+
+ // C++11 [class.copy]p18:
+ // The implicitly-declared copy assignment oeprator for a class X will
+ // have the form 'X& X::operator=(const X&)' if [...] for all the
+ // non-static data members of X that are of a class type M (or array
+ // thereof), each such class type has a copy assignment operator whose
+ // parameter is of type 'const M&', 'const volatile M&' or 'M'.
+ if (!FieldRec->hasCopyAssignmentWithConstParam())
+ data().ImplicitCopyAssignmentHasConstParam = false;
+
+ if (FieldRec->hasUninitializedReferenceMember() &&
+ !Field->hasInClassInitializer())
+ data().HasUninitializedReferenceMember = true;
+
+ // C++11 [class.union]p8, DR1460:
+ // a non-static data member of an anonymous union that is a member of
+ // X is also a variant member of X.
+ if (FieldRec->hasVariantMembers() &&
+ Field->isAnonymousStructOrUnion())
+ data().HasVariantMembers = true;
+ }
+ } else {
+ // Base element type of field is a non-class type.
+ if (!T->isLiteralType(Context) ||
+ (!Field->hasInClassInitializer() && !isUnion()))
+ data().DefaultedDefaultConstructorIsConstexpr = false;
+
+ // C++11 [class.copy]p23:
+ // A defaulted copy/move assignment operator for a class X is defined
+ // as deleted if X has:
+ // -- a non-static data member of const non-class type (or array
+ // thereof)
+ if (T.isConstQualified())
+ data().DefaultedMoveAssignmentIsDeleted = true;
+ }
+
+ // C++0x [class]p7:
+ // A standard-layout class is a class that:
+ // [...]
+ // -- either has no non-static data members in the most derived
+ // class and at most one base class with non-static data members,
+ // or has no base classes with non-static data members, and
+ // At this point we know that we have a non-static data member, so the last
+ // clause holds.
+ if (!data().HasNoNonEmptyBases)
+ data().IsStandardLayout = false;
+
+ // If this is not a zero-length bit-field, then the class is not empty.
+ if (data().Empty) {
+ if (!Field->isBitField() ||
+ (!Field->getBitWidth()->isTypeDependent() &&
+ !Field->getBitWidth()->isValueDependent() &&
+ Field->getBitWidthValue(Context) != 0))
+ data().Empty = false;
+ }
+ }
+
+ // Handle using declarations of conversion functions.
+ if (UsingShadowDecl *Shadow = dyn_cast<UsingShadowDecl>(D)) {
+ if (Shadow->getDeclName().getNameKind()
+ == DeclarationName::CXXConversionFunctionName) {
+ ASTContext &Ctx = getASTContext();
+ data().Conversions.get(Ctx).addDecl(Ctx, Shadow, Shadow->getAccess());
+ }
+ }
+}
+
+void CXXRecordDecl::finishedDefaultedOrDeletedMember(CXXMethodDecl *D) {
+ assert(!D->isImplicit() && !D->isUserProvided());
+
+ // The kind of special member this declaration is, if any.
+ unsigned SMKind = 0;
+
+ if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
+ if (Constructor->isDefaultConstructor()) {
+ SMKind |= SMF_DefaultConstructor;
+ if (Constructor->isConstexpr())
+ data().HasConstexprDefaultConstructor = true;
+ }
+ if (Constructor->isCopyConstructor())
+ SMKind |= SMF_CopyConstructor;
+ else if (Constructor->isMoveConstructor())
+ SMKind |= SMF_MoveConstructor;
+ else if (Constructor->isConstexpr())
+ // We may now know that the constructor is constexpr.
+ data().HasConstexprNonCopyMoveConstructor = true;
+ } else if (isa<CXXDestructorDecl>(D)) {
+ SMKind |= SMF_Destructor;
+ if (!D->isTrivial() || D->getAccess() != AS_public || D->isDeleted())
+ data().HasIrrelevantDestructor = false;
+ } else if (D->isCopyAssignmentOperator())
+ SMKind |= SMF_CopyAssignment;
+ else if (D->isMoveAssignmentOperator())
+ SMKind |= SMF_MoveAssignment;
+
+ // Update which trivial / non-trivial special members we have.
+ // addedMember will have skipped this step for this member.
+ if (D->isTrivial())
+ data().HasTrivialSpecialMembers |= SMKind;
+ else
+ data().DeclaredNonTrivialSpecialMembers |= SMKind;
+}
+
+bool CXXRecordDecl::isCLike() const {
+ if (getTagKind() == TTK_Class || getTagKind() == TTK_Interface ||
+ !TemplateOrInstantiation.isNull())
+ return false;
+ if (!hasDefinition())
+ return true;
+
+ return isPOD() && data().HasOnlyCMembers;
+}
+
+bool CXXRecordDecl::isGenericLambda() const {
+ if (!isLambda()) return false;
+ return getLambdaData().IsGenericLambda;
+}
+
+CXXMethodDecl* CXXRecordDecl::getLambdaCallOperator() const {
+ if (!isLambda()) return nullptr;
+ DeclarationName Name =
+ getASTContext().DeclarationNames.getCXXOperatorName(OO_Call);
+ DeclContext::lookup_result Calls = lookup(Name);
+
+ assert(!Calls.empty() && "Missing lambda call operator!");
+ assert(Calls.size() == 1 && "More than one lambda call operator!");
+
+ NamedDecl *CallOp = Calls.front();
+ if (FunctionTemplateDecl *CallOpTmpl =
+ dyn_cast<FunctionTemplateDecl>(CallOp))
+ return cast<CXXMethodDecl>(CallOpTmpl->getTemplatedDecl());
+
+ return cast<CXXMethodDecl>(CallOp);
+}
+
+CXXMethodDecl* CXXRecordDecl::getLambdaStaticInvoker() const {
+ if (!isLambda()) return nullptr;
+ DeclarationName Name =
+ &getASTContext().Idents.get(getLambdaStaticInvokerName());
+ DeclContext::lookup_result Invoker = lookup(Name);
+ if (Invoker.empty()) return nullptr;
+ assert(Invoker.size() == 1 && "More than one static invoker operator!");
+ NamedDecl *InvokerFun = Invoker.front();
+ if (FunctionTemplateDecl *InvokerTemplate =
+ dyn_cast<FunctionTemplateDecl>(InvokerFun))
+ return cast<CXXMethodDecl>(InvokerTemplate->getTemplatedDecl());
+
+ return cast<CXXMethodDecl>(InvokerFun);
+}
+
+void CXXRecordDecl::getCaptureFields(
+ llvm::DenseMap<const VarDecl *, FieldDecl *> &Captures,
+ FieldDecl *&ThisCapture) const {
+ Captures.clear();
+ ThisCapture = nullptr;
+
+ LambdaDefinitionData &Lambda = getLambdaData();
+ RecordDecl::field_iterator Field = field_begin();
+ for (const LambdaCapture *C = Lambda.Captures, *CEnd = C + Lambda.NumCaptures;
+ C != CEnd; ++C, ++Field) {
+ if (C->capturesThis())
+ ThisCapture = *Field;
+ else if (C->capturesVariable())
+ Captures[C->getCapturedVar()] = *Field;
+ }
+ assert(Field == field_end());
+}
+
+TemplateParameterList *
+CXXRecordDecl::getGenericLambdaTemplateParameterList() const {
+ if (!isLambda()) return nullptr;
+ CXXMethodDecl *CallOp = getLambdaCallOperator();
+ if (FunctionTemplateDecl *Tmpl = CallOp->getDescribedFunctionTemplate())
+ return Tmpl->getTemplateParameters();
+ return nullptr;
+}
+
+static CanQualType GetConversionType(ASTContext &Context, NamedDecl *Conv) {
+ QualType T =
+ cast<CXXConversionDecl>(Conv->getUnderlyingDecl()->getAsFunction())
+ ->getConversionType();
+ return Context.getCanonicalType(T);
+}
+
+/// Collect the visible conversions of a base class.
+///
+/// \param Record a base class of the class we're considering
+/// \param InVirtual whether this base class is a virtual base (or a base
+/// of a virtual base)
+/// \param Access the access along the inheritance path to this base
+/// \param ParentHiddenTypes the conversions provided by the inheritors
+/// of this base
+/// \param Output the set to which to add conversions from non-virtual bases
+/// \param VOutput the set to which to add conversions from virtual bases
+/// \param HiddenVBaseCs the set of conversions which were hidden in a
+/// virtual base along some inheritance path
+static void CollectVisibleConversions(ASTContext &Context,
+ CXXRecordDecl *Record,
+ bool InVirtual,
+ AccessSpecifier Access,
+ const llvm::SmallPtrSet<CanQualType, 8> &ParentHiddenTypes,
+ ASTUnresolvedSet &Output,
+ UnresolvedSetImpl &VOutput,
+ llvm::SmallPtrSet<NamedDecl*, 8> &HiddenVBaseCs) {
+ // The set of types which have conversions in this class or its
+ // subclasses. As an optimization, we don't copy the derived set
+ // unless it might change.
+ const llvm::SmallPtrSet<CanQualType, 8> *HiddenTypes = &ParentHiddenTypes;
+ llvm::SmallPtrSet<CanQualType, 8> HiddenTypesBuffer;
+
+ // Collect the direct conversions and figure out which conversions
+ // will be hidden in the subclasses.
+ CXXRecordDecl::conversion_iterator ConvI = Record->conversion_begin();
+ CXXRecordDecl::conversion_iterator ConvE = Record->conversion_end();
+ if (ConvI != ConvE) {
+ HiddenTypesBuffer = ParentHiddenTypes;
+ HiddenTypes = &HiddenTypesBuffer;
+
+ for (CXXRecordDecl::conversion_iterator I = ConvI; I != ConvE; ++I) {
+ CanQualType ConvType(GetConversionType(Context, I.getDecl()));
+ bool Hidden = ParentHiddenTypes.count(ConvType);
+ if (!Hidden)
+ HiddenTypesBuffer.insert(ConvType);
+
+ // If this conversion is hidden and we're in a virtual base,
+ // remember that it's hidden along some inheritance path.
+ if (Hidden && InVirtual)
+ HiddenVBaseCs.insert(cast<NamedDecl>(I.getDecl()->getCanonicalDecl()));
+
+ // If this conversion isn't hidden, add it to the appropriate output.
+ else if (!Hidden) {
+ AccessSpecifier IAccess
+ = CXXRecordDecl::MergeAccess(Access, I.getAccess());
+
+ if (InVirtual)
+ VOutput.addDecl(I.getDecl(), IAccess);
+ else
+ Output.addDecl(Context, I.getDecl(), IAccess);
+ }
+ }
+ }
+
+ // Collect information recursively from any base classes.
+ for (const auto &I : Record->bases()) {
+ const RecordType *RT = I.getType()->getAs<RecordType>();
+ if (!RT) continue;
+
+ AccessSpecifier BaseAccess
+ = CXXRecordDecl::MergeAccess(Access, I.getAccessSpecifier());
+ bool BaseInVirtual = InVirtual || I.isVirtual();
+
+ CXXRecordDecl *Base = cast<CXXRecordDecl>(RT->getDecl());
+ CollectVisibleConversions(Context, Base, BaseInVirtual, BaseAccess,
+ *HiddenTypes, Output, VOutput, HiddenVBaseCs);
+ }
+}
+
+/// Collect the visible conversions of a class.
+///
+/// This would be extremely straightforward if it weren't for virtual
+/// bases. It might be worth special-casing that, really.
+static void CollectVisibleConversions(ASTContext &Context,
+ CXXRecordDecl *Record,
+ ASTUnresolvedSet &Output) {
+ // The collection of all conversions in virtual bases that we've
+ // found. These will be added to the output as long as they don't
+ // appear in the hidden-conversions set.
+ UnresolvedSet<8> VBaseCs;
+
+ // The set of conversions in virtual bases that we've determined to
+ // be hidden.
+ llvm::SmallPtrSet<NamedDecl*, 8> HiddenVBaseCs;
+
+ // The set of types hidden by classes derived from this one.
+ llvm::SmallPtrSet<CanQualType, 8> HiddenTypes;
+
+ // Go ahead and collect the direct conversions and add them to the
+ // hidden-types set.
+ CXXRecordDecl::conversion_iterator ConvI = Record->conversion_begin();
+ CXXRecordDecl::conversion_iterator ConvE = Record->conversion_end();
+ Output.append(Context, ConvI, ConvE);
+ for (; ConvI != ConvE; ++ConvI)
+ HiddenTypes.insert(GetConversionType(Context, ConvI.getDecl()));
+
+ // Recursively collect conversions from base classes.
+ for (const auto &I : Record->bases()) {
+ const RecordType *RT = I.getType()->getAs<RecordType>();
+ if (!RT) continue;
+
+ CollectVisibleConversions(Context, cast<CXXRecordDecl>(RT->getDecl()),
+ I.isVirtual(), I.getAccessSpecifier(),
+ HiddenTypes, Output, VBaseCs, HiddenVBaseCs);
+ }
+
+ // Add any unhidden conversions provided by virtual bases.
+ for (UnresolvedSetIterator I = VBaseCs.begin(), E = VBaseCs.end();
+ I != E; ++I) {
+ if (!HiddenVBaseCs.count(cast<NamedDecl>(I.getDecl()->getCanonicalDecl())))
+ Output.addDecl(Context, I.getDecl(), I.getAccess());
+ }
+}
+
+/// getVisibleConversionFunctions - get all conversion functions visible
+/// in current class; including conversion function templates.
+llvm::iterator_range<CXXRecordDecl::conversion_iterator>
+CXXRecordDecl::getVisibleConversionFunctions() {
+ ASTContext &Ctx = getASTContext();
+
+ ASTUnresolvedSet *Set;
+ if (bases_begin() == bases_end()) {
+ // If root class, all conversions are visible.
+ Set = &data().Conversions.get(Ctx);
+ } else {
+ Set = &data().VisibleConversions.get(Ctx);
+ // If visible conversion list is not evaluated, evaluate it.
+ if (!data().ComputedVisibleConversions) {
+ CollectVisibleConversions(Ctx, this, *Set);
+ data().ComputedVisibleConversions = true;
+ }
+ }
+ return llvm::make_range(Set->begin(), Set->end());
+}
+
+void CXXRecordDecl::removeConversion(const NamedDecl *ConvDecl) {
+ // This operation is O(N) but extremely rare. Sema only uses it to
+ // remove UsingShadowDecls in a class that were followed by a direct
+ // declaration, e.g.:
+ // class A : B {
+ // using B::operator int;
+ // operator int();
+ // };
+ // This is uncommon by itself and even more uncommon in conjunction
+ // with sufficiently large numbers of directly-declared conversions
+ // that asymptotic behavior matters.
+
+ ASTUnresolvedSet &Convs = data().Conversions.get(getASTContext());
+ for (unsigned I = 0, E = Convs.size(); I != E; ++I) {
+ if (Convs[I].getDecl() == ConvDecl) {
+ Convs.erase(I);
+ assert(std::find(Convs.begin(), Convs.end(), ConvDecl) == Convs.end()
+ && "conversion was found multiple times in unresolved set");
+ return;
+ }
+ }
+
+ llvm_unreachable("conversion not found in set!");
+}
+
+CXXRecordDecl *CXXRecordDecl::getInstantiatedFromMemberClass() const {
+ if (MemberSpecializationInfo *MSInfo = getMemberSpecializationInfo())
+ return cast<CXXRecordDecl>(MSInfo->getInstantiatedFrom());
+
+ return nullptr;
+}
+
+MemberSpecializationInfo *CXXRecordDecl::getMemberSpecializationInfo() const {
+ return TemplateOrInstantiation.dyn_cast<MemberSpecializationInfo *>();
+}
+
+void
+CXXRecordDecl::setInstantiationOfMemberClass(CXXRecordDecl *RD,
+ TemplateSpecializationKind TSK) {
+ assert(TemplateOrInstantiation.isNull() &&
+ "Previous template or instantiation?");
+ assert(!isa<ClassTemplatePartialSpecializationDecl>(this));
+ TemplateOrInstantiation
+ = new (getASTContext()) MemberSpecializationInfo(RD, TSK);
+}
+
+ClassTemplateDecl *CXXRecordDecl::getDescribedClassTemplate() const {
+ return TemplateOrInstantiation.dyn_cast<ClassTemplateDecl *>();
+}
+
+void CXXRecordDecl::setDescribedClassTemplate(ClassTemplateDecl *Template) {
+ TemplateOrInstantiation = Template;
+}
+
+TemplateSpecializationKind CXXRecordDecl::getTemplateSpecializationKind() const{
+ if (const ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(this))
+ return Spec->getSpecializationKind();
+
+ if (MemberSpecializationInfo *MSInfo = getMemberSpecializationInfo())
+ return MSInfo->getTemplateSpecializationKind();
+
+ return TSK_Undeclared;
+}
+
+void
+CXXRecordDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK) {
+ if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(this)) {
+ Spec->setSpecializationKind(TSK);
+ return;
+ }
+
+ if (MemberSpecializationInfo *MSInfo = getMemberSpecializationInfo()) {
+ MSInfo->setTemplateSpecializationKind(TSK);
+ return;
+ }
+
+ llvm_unreachable("Not a class template or member class specialization");
+}
+
+const CXXRecordDecl *CXXRecordDecl::getTemplateInstantiationPattern() const {
+ // If it's a class template specialization, find the template or partial
+ // specialization from which it was instantiated.
+ if (auto *TD = dyn_cast<ClassTemplateSpecializationDecl>(this)) {
+ auto From = TD->getInstantiatedFrom();
+ if (auto *CTD = From.dyn_cast<ClassTemplateDecl *>()) {
+ while (auto *NewCTD = CTD->getInstantiatedFromMemberTemplate()) {
+ if (NewCTD->isMemberSpecialization())
+ break;
+ CTD = NewCTD;
+ }
+ return CTD->getTemplatedDecl()->getDefinition();
+ }
+ if (auto *CTPSD =
+ From.dyn_cast<ClassTemplatePartialSpecializationDecl *>()) {
+ while (auto *NewCTPSD = CTPSD->getInstantiatedFromMember()) {
+ if (NewCTPSD->isMemberSpecialization())
+ break;
+ CTPSD = NewCTPSD;
+ }
+ return CTPSD->getDefinition();
+ }
+ }
+
+ if (MemberSpecializationInfo *MSInfo = getMemberSpecializationInfo()) {
+ if (isTemplateInstantiation(MSInfo->getTemplateSpecializationKind())) {
+ const CXXRecordDecl *RD = this;
+ while (auto *NewRD = RD->getInstantiatedFromMemberClass())
+ RD = NewRD;
+ return RD->getDefinition();
+ }
+ }
+
+ assert(!isTemplateInstantiation(this->getTemplateSpecializationKind()) &&
+ "couldn't find pattern for class template instantiation");
+ return nullptr;
+}
+
+CXXDestructorDecl *CXXRecordDecl::getDestructor() const {
+ ASTContext &Context = getASTContext();
+ QualType ClassType = Context.getTypeDeclType(this);
+
+ DeclarationName Name
+ = Context.DeclarationNames.getCXXDestructorName(
+ Context.getCanonicalType(ClassType));
+
+ DeclContext::lookup_result R = lookup(Name);
+ if (R.empty())
+ return nullptr;
+
+ CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(R.front());
+ return Dtor;
+}
+
+bool CXXRecordDecl::isAnyDestructorNoReturn() const {
+ // Destructor is noreturn.
+ if (const CXXDestructorDecl *Destructor = getDestructor())
+ if (Destructor->isNoReturn())
+ return true;
+
+ // Check base classes destructor for noreturn.
+ for (const auto &Base : bases())
+ if (Base.getType()->getAsCXXRecordDecl()->isAnyDestructorNoReturn())
+ return true;
+
+ // Check fields for noreturn.
+ for (const auto *Field : fields())
+ if (const CXXRecordDecl *RD =
+ Field->getType()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl())
+ if (RD->isAnyDestructorNoReturn())
+ return true;
+
+ // All destructors are not noreturn.
+ return false;
+}
+
+void CXXRecordDecl::completeDefinition() {
+ completeDefinition(nullptr);
+}
+
+void CXXRecordDecl::completeDefinition(CXXFinalOverriderMap *FinalOverriders) {
+ RecordDecl::completeDefinition();
+
+ // If the class may be abstract (but hasn't been marked as such), check for
+ // any pure final overriders.
+ if (mayBeAbstract()) {
+ CXXFinalOverriderMap MyFinalOverriders;
+ if (!FinalOverriders) {
+ getFinalOverriders(MyFinalOverriders);
+ FinalOverriders = &MyFinalOverriders;
+ }
+
+ bool Done = false;
+ for (CXXFinalOverriderMap::iterator M = FinalOverriders->begin(),
+ MEnd = FinalOverriders->end();
+ M != MEnd && !Done; ++M) {
+ for (OverridingMethods::iterator SO = M->second.begin(),
+ SOEnd = M->second.end();
+ SO != SOEnd && !Done; ++SO) {
+ assert(SO->second.size() > 0 &&
+ "All virtual functions have overridding virtual functions");
+
+ // C++ [class.abstract]p4:
+ // A class is abstract if it contains or inherits at least one
+ // pure virtual function for which the final overrider is pure
+ // virtual.
+ if (SO->second.front().Method->isPure()) {
+ data().Abstract = true;
+ Done = true;
+ break;
+ }
+ }
+ }
+ }
+
+ // Set access bits correctly on the directly-declared conversions.
+ for (conversion_iterator I = conversion_begin(), E = conversion_end();
+ I != E; ++I)
+ I.setAccess((*I)->getAccess());
+}
+
+bool CXXRecordDecl::mayBeAbstract() const {
+ if (data().Abstract || isInvalidDecl() || !data().Polymorphic ||
+ isDependentContext())
+ return false;
+
+ for (const auto &B : bases()) {
+ CXXRecordDecl *BaseDecl
+ = cast<CXXRecordDecl>(B.getType()->getAs<RecordType>()->getDecl());
+ if (BaseDecl->isAbstract())
+ return true;
+ }
+
+ return false;
+}
+
+void CXXMethodDecl::anchor() { }
+
+bool CXXMethodDecl::isStatic() const {
+ const CXXMethodDecl *MD = getCanonicalDecl();
+
+ if (MD->getStorageClass() == SC_Static)
+ return true;
+
+ OverloadedOperatorKind OOK = getDeclName().getCXXOverloadedOperator();
+ return isStaticOverloadedOperator(OOK);
+}
+
+static bool recursivelyOverrides(const CXXMethodDecl *DerivedMD,
+ const CXXMethodDecl *BaseMD) {
+ for (CXXMethodDecl::method_iterator I = DerivedMD->begin_overridden_methods(),
+ E = DerivedMD->end_overridden_methods(); I != E; ++I) {
+ const CXXMethodDecl *MD = *I;
+ if (MD->getCanonicalDecl() == BaseMD->getCanonicalDecl())
+ return true;
+ if (recursivelyOverrides(MD, BaseMD))
+ return true;
+ }
+ return false;
+}
+
+CXXMethodDecl *
+CXXMethodDecl::getCorrespondingMethodInClass(const CXXRecordDecl *RD,
+ bool MayBeBase) {
+ if (this->getParent()->getCanonicalDecl() == RD->getCanonicalDecl())
+ return this;
+
+ // Lookup doesn't work for destructors, so handle them separately.
+ if (isa<CXXDestructorDecl>(this)) {
+ CXXMethodDecl *MD = RD->getDestructor();
+ if (MD) {
+ if (recursivelyOverrides(MD, this))
+ return MD;
+ if (MayBeBase && recursivelyOverrides(this, MD))
+ return MD;
+ }
+ return nullptr;
+ }
+
+ for (auto *ND : RD->lookup(getDeclName())) {
+ CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND);
+ if (!MD)
+ continue;
+ if (recursivelyOverrides(MD, this))
+ return MD;
+ if (MayBeBase && recursivelyOverrides(this, MD))
+ return MD;
+ }
+
+ for (const auto &I : RD->bases()) {
+ const RecordType *RT = I.getType()->getAs<RecordType>();
+ if (!RT)
+ continue;
+ const CXXRecordDecl *Base = cast<CXXRecordDecl>(RT->getDecl());
+ CXXMethodDecl *T = this->getCorrespondingMethodInClass(Base);
+ if (T)
+ return T;
+ }
+
+ return nullptr;
+}
+
+CXXMethodDecl *
+CXXMethodDecl::Create(ASTContext &C, CXXRecordDecl *RD,
+ SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo,
+ QualType T, TypeSourceInfo *TInfo,
+ StorageClass SC, bool isInline,
+ bool isConstexpr, SourceLocation EndLocation) {
+ return new (C, RD) CXXMethodDecl(CXXMethod, C, RD, StartLoc, NameInfo,
+ T, TInfo, SC, isInline, isConstexpr,
+ EndLocation);
+}
+
+CXXMethodDecl *CXXMethodDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) CXXMethodDecl(CXXMethod, C, nullptr, SourceLocation(),
+ DeclarationNameInfo(), QualType(), nullptr,
+ SC_None, false, false, SourceLocation());
+}
+
+bool CXXMethodDecl::isUsualDeallocationFunction() const {
+ if (getOverloadedOperator() != OO_Delete &&
+ getOverloadedOperator() != OO_Array_Delete)
+ return false;
+
+ // C++ [basic.stc.dynamic.deallocation]p2:
+ // A template instance is never a usual deallocation function,
+ // regardless of its signature.
+ if (getPrimaryTemplate())
+ return false;
+
+ // C++ [basic.stc.dynamic.deallocation]p2:
+ // If a class T has a member deallocation function named operator delete
+ // with exactly one parameter, then that function is a usual (non-placement)
+ // deallocation function. [...]
+ if (getNumParams() == 1)
+ return true;
+
+ // C++ [basic.stc.dynamic.deallocation]p2:
+ // [...] If class T does not declare such an operator delete but does
+ // declare a member deallocation function named operator delete with
+ // exactly two parameters, the second of which has type std::size_t (18.1),
+ // then this function is a usual deallocation function.
+ ASTContext &Context = getASTContext();
+ if (getNumParams() != 2 ||
+ !Context.hasSameUnqualifiedType(getParamDecl(1)->getType(),
+ Context.getSizeType()))
+ return false;
+
+ // This function is a usual deallocation function if there are no
+ // single-parameter deallocation functions of the same kind.
+ DeclContext::lookup_result R = getDeclContext()->lookup(getDeclName());
+ for (DeclContext::lookup_result::iterator I = R.begin(), E = R.end();
+ I != E; ++I) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I))
+ if (FD->getNumParams() == 1)
+ return false;
+ }
+
+ return true;
+}
+
+bool CXXMethodDecl::isCopyAssignmentOperator() const {
+ // C++0x [class.copy]p17:
+ // A user-declared copy assignment operator X::operator= is a non-static
+ // non-template member function of class X with exactly one parameter of
+ // type X, X&, const X&, volatile X& or const volatile X&.
+ if (/*operator=*/getOverloadedOperator() != OO_Equal ||
+ /*non-static*/ isStatic() ||
+ /*non-template*/getPrimaryTemplate() || getDescribedFunctionTemplate() ||
+ getNumParams() != 1)
+ return false;
+
+ QualType ParamType = getParamDecl(0)->getType();
+ if (const LValueReferenceType *Ref = ParamType->getAs<LValueReferenceType>())
+ ParamType = Ref->getPointeeType();
+
+ ASTContext &Context = getASTContext();
+ QualType ClassType
+ = Context.getCanonicalType(Context.getTypeDeclType(getParent()));
+ return Context.hasSameUnqualifiedType(ClassType, ParamType);
+}
+
+bool CXXMethodDecl::isMoveAssignmentOperator() const {
+ // C++0x [class.copy]p19:
+ // A user-declared move assignment operator X::operator= is a non-static
+ // non-template member function of class X with exactly one parameter of type
+ // X&&, const X&&, volatile X&&, or const volatile X&&.
+ if (getOverloadedOperator() != OO_Equal || isStatic() ||
+ getPrimaryTemplate() || getDescribedFunctionTemplate() ||
+ getNumParams() != 1)
+ return false;
+
+ QualType ParamType = getParamDecl(0)->getType();
+ if (!isa<RValueReferenceType>(ParamType))
+ return false;
+ ParamType = ParamType->getPointeeType();
+
+ ASTContext &Context = getASTContext();
+ QualType ClassType
+ = Context.getCanonicalType(Context.getTypeDeclType(getParent()));
+ return Context.hasSameUnqualifiedType(ClassType, ParamType);
+}
+
+void CXXMethodDecl::addOverriddenMethod(const CXXMethodDecl *MD) {
+ assert(MD->isCanonicalDecl() && "Method is not canonical!");
+ assert(!MD->getParent()->isDependentContext() &&
+ "Can't add an overridden method to a class template!");
+ assert(MD->isVirtual() && "Method is not virtual!");
+
+ getASTContext().addOverriddenMethod(this, MD);
+}
+
+CXXMethodDecl::method_iterator CXXMethodDecl::begin_overridden_methods() const {
+ if (isa<CXXConstructorDecl>(this)) return nullptr;
+ return getASTContext().overridden_methods_begin(this);
+}
+
+CXXMethodDecl::method_iterator CXXMethodDecl::end_overridden_methods() const {
+ if (isa<CXXConstructorDecl>(this)) return nullptr;
+ return getASTContext().overridden_methods_end(this);
+}
+
+unsigned CXXMethodDecl::size_overridden_methods() const {
+ if (isa<CXXConstructorDecl>(this)) return 0;
+ return getASTContext().overridden_methods_size(this);
+}
+
+QualType CXXMethodDecl::getThisType(ASTContext &C) const {
+ // C++ 9.3.2p1: The type of this in a member function of a class X is X*.
+ // If the member function is declared const, the type of this is const X*,
+ // if the member function is declared volatile, the type of this is
+ // volatile X*, and if the member function is declared const volatile,
+ // the type of this is const volatile X*.
+
+ assert(isInstance() && "No 'this' for static methods!");
+
+ QualType ClassTy = C.getTypeDeclType(getParent());
+ ClassTy = C.getQualifiedType(ClassTy,
+ Qualifiers::fromCVRMask(getTypeQualifiers()));
+ return C.getPointerType(ClassTy);
+}
+
+bool CXXMethodDecl::hasInlineBody() const {
+ // If this function is a template instantiation, look at the template from
+ // which it was instantiated.
+ const FunctionDecl *CheckFn = getTemplateInstantiationPattern();
+ if (!CheckFn)
+ CheckFn = this;
+
+ const FunctionDecl *fn;
+ return CheckFn->hasBody(fn) && !fn->isOutOfLine();
+}
+
+bool CXXMethodDecl::isLambdaStaticInvoker() const {
+ const CXXRecordDecl *P = getParent();
+ if (P->isLambda()) {
+ if (const CXXMethodDecl *StaticInvoker = P->getLambdaStaticInvoker()) {
+ if (StaticInvoker == this) return true;
+ if (P->isGenericLambda() && this->isFunctionTemplateSpecialization())
+ return StaticInvoker == this->getPrimaryTemplate()->getTemplatedDecl();
+ }
+ }
+ return false;
+}
+
+CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
+ TypeSourceInfo *TInfo, bool IsVirtual,
+ SourceLocation L, Expr *Init,
+ SourceLocation R,
+ SourceLocation EllipsisLoc)
+ : Initializee(TInfo), MemberOrEllipsisLocation(EllipsisLoc), Init(Init),
+ LParenLoc(L), RParenLoc(R), IsDelegating(false), IsVirtual(IsVirtual),
+ IsWritten(false), SourceOrderOrNumArrayIndices(0)
+{
+}
+
+CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
+ FieldDecl *Member,
+ SourceLocation MemberLoc,
+ SourceLocation L, Expr *Init,
+ SourceLocation R)
+ : Initializee(Member), MemberOrEllipsisLocation(MemberLoc), Init(Init),
+ LParenLoc(L), RParenLoc(R), IsDelegating(false), IsVirtual(false),
+ IsWritten(false), SourceOrderOrNumArrayIndices(0)
+{
+}
+
+CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
+ IndirectFieldDecl *Member,
+ SourceLocation MemberLoc,
+ SourceLocation L, Expr *Init,
+ SourceLocation R)
+ : Initializee(Member), MemberOrEllipsisLocation(MemberLoc), Init(Init),
+ LParenLoc(L), RParenLoc(R), IsDelegating(false), IsVirtual(false),
+ IsWritten(false), SourceOrderOrNumArrayIndices(0)
+{
+}
+
+CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
+ TypeSourceInfo *TInfo,
+ SourceLocation L, Expr *Init,
+ SourceLocation R)
+ : Initializee(TInfo), MemberOrEllipsisLocation(), Init(Init),
+ LParenLoc(L), RParenLoc(R), IsDelegating(true), IsVirtual(false),
+ IsWritten(false), SourceOrderOrNumArrayIndices(0)
+{
+}
+
+CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
+ FieldDecl *Member,
+ SourceLocation MemberLoc,
+ SourceLocation L, Expr *Init,
+ SourceLocation R,
+ VarDecl **Indices,
+ unsigned NumIndices)
+ : Initializee(Member), MemberOrEllipsisLocation(MemberLoc), Init(Init),
+ LParenLoc(L), RParenLoc(R), IsDelegating(false), IsVirtual(false),
+ IsWritten(false), SourceOrderOrNumArrayIndices(NumIndices)
+{
+ std::uninitialized_copy(Indices, Indices + NumIndices,
+ getTrailingObjects<VarDecl *>());
+}
+
+CXXCtorInitializer *CXXCtorInitializer::Create(ASTContext &Context,
+ FieldDecl *Member,
+ SourceLocation MemberLoc,
+ SourceLocation L, Expr *Init,
+ SourceLocation R,
+ VarDecl **Indices,
+ unsigned NumIndices) {
+ void *Mem = Context.Allocate(totalSizeToAlloc<VarDecl *>(NumIndices),
+ llvm::alignOf<CXXCtorInitializer>());
+ return new (Mem) CXXCtorInitializer(Context, Member, MemberLoc, L, Init, R,
+ Indices, NumIndices);
+}
+
+TypeLoc CXXCtorInitializer::getBaseClassLoc() const {
+ if (isBaseInitializer())
+ return Initializee.get<TypeSourceInfo*>()->getTypeLoc();
+ else
+ return TypeLoc();
+}
+
+const Type *CXXCtorInitializer::getBaseClass() const {
+ if (isBaseInitializer())
+ return Initializee.get<TypeSourceInfo*>()->getType().getTypePtr();
+ else
+ return nullptr;
+}
+
+SourceLocation CXXCtorInitializer::getSourceLocation() const {
+ if (isInClassMemberInitializer())
+ return getAnyMember()->getLocation();
+
+ if (isAnyMemberInitializer())
+ return getMemberLocation();
+
+ if (TypeSourceInfo *TSInfo = Initializee.get<TypeSourceInfo*>())
+ return TSInfo->getTypeLoc().getLocalSourceRange().getBegin();
+
+ return SourceLocation();
+}
+
+SourceRange CXXCtorInitializer::getSourceRange() const {
+ if (isInClassMemberInitializer()) {
+ FieldDecl *D = getAnyMember();
+ if (Expr *I = D->getInClassInitializer())
+ return I->getSourceRange();
+ return SourceRange();
+ }
+
+ return SourceRange(getSourceLocation(), getRParenLoc());
+}
+
+void CXXConstructorDecl::anchor() { }
+
+CXXConstructorDecl *
+CXXConstructorDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) CXXConstructorDecl(C, nullptr, SourceLocation(),
+ DeclarationNameInfo(), QualType(),
+ nullptr, false, false, false, false);
+}
+
+CXXConstructorDecl *
+CXXConstructorDecl::Create(ASTContext &C, CXXRecordDecl *RD,
+ SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo,
+ QualType T, TypeSourceInfo *TInfo,
+ bool isExplicit, bool isInline,
+ bool isImplicitlyDeclared, bool isConstexpr) {
+ assert(NameInfo.getName().getNameKind()
+ == DeclarationName::CXXConstructorName &&
+ "Name must refer to a constructor");
+ return new (C, RD) CXXConstructorDecl(C, RD, StartLoc, NameInfo, T, TInfo,
+ isExplicit, isInline,
+ isImplicitlyDeclared, isConstexpr);
+}
+
+CXXConstructorDecl::init_const_iterator CXXConstructorDecl::init_begin() const {
+ return CtorInitializers.get(getASTContext().getExternalSource());
+}
+
+CXXConstructorDecl *CXXConstructorDecl::getTargetConstructor() const {
+ assert(isDelegatingConstructor() && "Not a delegating constructor!");
+ Expr *E = (*init_begin())->getInit()->IgnoreImplicit();
+ if (CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(E))
+ return Construct->getConstructor();
+
+ return nullptr;
+}
+
+bool CXXConstructorDecl::isDefaultConstructor() const {
+ // C++ [class.ctor]p5:
+ // A default constructor for a class X is a constructor of class
+ // X that can be called without an argument.
+ return (getNumParams() == 0) ||
+ (getNumParams() > 0 && getParamDecl(0)->hasDefaultArg());
+}
+
+bool
+CXXConstructorDecl::isCopyConstructor(unsigned &TypeQuals) const {
+ return isCopyOrMoveConstructor(TypeQuals) &&
+ getParamDecl(0)->getType()->isLValueReferenceType();
+}
+
+bool CXXConstructorDecl::isMoveConstructor(unsigned &TypeQuals) const {
+ return isCopyOrMoveConstructor(TypeQuals) &&
+ getParamDecl(0)->getType()->isRValueReferenceType();
+}
+
+/// \brief Determine whether this is a copy or move constructor.
+bool CXXConstructorDecl::isCopyOrMoveConstructor(unsigned &TypeQuals) const {
+ // C++ [class.copy]p2:
+ // A non-template constructor for class X is a copy constructor
+ // if its first parameter is of type X&, const X&, volatile X& or
+ // const volatile X&, and either there are no other parameters
+ // or else all other parameters have default arguments (8.3.6).
+ // C++0x [class.copy]p3:
+ // A non-template constructor for class X is a move constructor if its
+ // first parameter is of type X&&, const X&&, volatile X&&, or
+ // const volatile X&&, and either there are no other parameters or else
+ // all other parameters have default arguments.
+ if ((getNumParams() < 1) ||
+ (getNumParams() > 1 && !getParamDecl(1)->hasDefaultArg()) ||
+ (getPrimaryTemplate() != nullptr) ||
+ (getDescribedFunctionTemplate() != nullptr))
+ return false;
+
+ const ParmVarDecl *Param = getParamDecl(0);
+
+ // Do we have a reference type?
+ const ReferenceType *ParamRefType = Param->getType()->getAs<ReferenceType>();
+ if (!ParamRefType)
+ return false;
+
+ // Is it a reference to our class type?
+ ASTContext &Context = getASTContext();
+
+ CanQualType PointeeType
+ = Context.getCanonicalType(ParamRefType->getPointeeType());
+ CanQualType ClassTy
+ = Context.getCanonicalType(Context.getTagDeclType(getParent()));
+ if (PointeeType.getUnqualifiedType() != ClassTy)
+ return false;
+
+ // FIXME: other qualifiers?
+
+ // We have a copy or move constructor.
+ TypeQuals = PointeeType.getCVRQualifiers();
+ return true;
+}
+
+bool CXXConstructorDecl::isConvertingConstructor(bool AllowExplicit) const {
+ // C++ [class.conv.ctor]p1:
+ // A constructor declared without the function-specifier explicit
+ // that can be called with a single parameter specifies a
+ // conversion from the type of its first parameter to the type of
+ // its class. Such a constructor is called a converting
+ // constructor.
+ if (isExplicit() && !AllowExplicit)
+ return false;
+
+ return (getNumParams() == 0 &&
+ getType()->getAs<FunctionProtoType>()->isVariadic()) ||
+ (getNumParams() == 1) ||
+ (getNumParams() > 1 &&
+ (getParamDecl(1)->hasDefaultArg() ||
+ getParamDecl(1)->isParameterPack()));
+}
+
+bool CXXConstructorDecl::isSpecializationCopyingObject() const {
+ if ((getNumParams() < 1) ||
+ (getNumParams() > 1 && !getParamDecl(1)->hasDefaultArg()) ||
+ (getDescribedFunctionTemplate() != nullptr))
+ return false;
+
+ const ParmVarDecl *Param = getParamDecl(0);
+
+ ASTContext &Context = getASTContext();
+ CanQualType ParamType = Context.getCanonicalType(Param->getType());
+
+ // Is it the same as our our class type?
+ CanQualType ClassTy
+ = Context.getCanonicalType(Context.getTagDeclType(getParent()));
+ if (ParamType.getUnqualifiedType() != ClassTy)
+ return false;
+
+ return true;
+}
+
+const CXXConstructorDecl *CXXConstructorDecl::getInheritedConstructor() const {
+ // Hack: we store the inherited constructor in the overridden method table
+ method_iterator It = getASTContext().overridden_methods_begin(this);
+ if (It == getASTContext().overridden_methods_end(this))
+ return nullptr;
+
+ return cast<CXXConstructorDecl>(*It);
+}
+
+void
+CXXConstructorDecl::setInheritedConstructor(const CXXConstructorDecl *BaseCtor){
+ // Hack: we store the inherited constructor in the overridden method table
+ assert(getASTContext().overridden_methods_size(this) == 0 &&
+ "Base ctor already set.");
+ getASTContext().addOverriddenMethod(this, BaseCtor);
+}
+
+void CXXDestructorDecl::anchor() { }
+
+CXXDestructorDecl *
+CXXDestructorDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID)
+ CXXDestructorDecl(C, nullptr, SourceLocation(), DeclarationNameInfo(),
+ QualType(), nullptr, false, false);
+}
+
+CXXDestructorDecl *
+CXXDestructorDecl::Create(ASTContext &C, CXXRecordDecl *RD,
+ SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo,
+ QualType T, TypeSourceInfo *TInfo,
+ bool isInline, bool isImplicitlyDeclared) {
+ assert(NameInfo.getName().getNameKind()
+ == DeclarationName::CXXDestructorName &&
+ "Name must refer to a destructor");
+ return new (C, RD) CXXDestructorDecl(C, RD, StartLoc, NameInfo, T, TInfo,
+ isInline, isImplicitlyDeclared);
+}
+
+void CXXDestructorDecl::setOperatorDelete(FunctionDecl *OD) {
+ auto *First = cast<CXXDestructorDecl>(getFirstDecl());
+ if (OD && !First->OperatorDelete) {
+ First->OperatorDelete = OD;
+ if (auto *L = getASTMutationListener())
+ L->ResolvedOperatorDelete(First, OD);
+ }
+}
+
+void CXXConversionDecl::anchor() { }
+
+CXXConversionDecl *
+CXXConversionDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) CXXConversionDecl(C, nullptr, SourceLocation(),
+ DeclarationNameInfo(), QualType(),
+ nullptr, false, false, false,
+ SourceLocation());
+}
+
+CXXConversionDecl *
+CXXConversionDecl::Create(ASTContext &C, CXXRecordDecl *RD,
+ SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo,
+ QualType T, TypeSourceInfo *TInfo,
+ bool isInline, bool isExplicit,
+ bool isConstexpr, SourceLocation EndLocation) {
+ assert(NameInfo.getName().getNameKind()
+ == DeclarationName::CXXConversionFunctionName &&
+ "Name must refer to a conversion function");
+ return new (C, RD) CXXConversionDecl(C, RD, StartLoc, NameInfo, T, TInfo,
+ isInline, isExplicit, isConstexpr,
+ EndLocation);
+}
+
+bool CXXConversionDecl::isLambdaToBlockPointerConversion() const {
+ return isImplicit() && getParent()->isLambda() &&
+ getConversionType()->isBlockPointerType();
+}
+
+void LinkageSpecDecl::anchor() { }
+
+LinkageSpecDecl *LinkageSpecDecl::Create(ASTContext &C,
+ DeclContext *DC,
+ SourceLocation ExternLoc,
+ SourceLocation LangLoc,
+ LanguageIDs Lang,
+ bool HasBraces) {
+ return new (C, DC) LinkageSpecDecl(DC, ExternLoc, LangLoc, Lang, HasBraces);
+}
+
+LinkageSpecDecl *LinkageSpecDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ return new (C, ID) LinkageSpecDecl(nullptr, SourceLocation(),
+ SourceLocation(), lang_c, false);
+}
+
+void UsingDirectiveDecl::anchor() { }
+
+UsingDirectiveDecl *UsingDirectiveDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,
+ SourceLocation NamespaceLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation IdentLoc,
+ NamedDecl *Used,
+ DeclContext *CommonAncestor) {
+ if (NamespaceDecl *NS = dyn_cast_or_null<NamespaceDecl>(Used))
+ Used = NS->getOriginalNamespace();
+ return new (C, DC) UsingDirectiveDecl(DC, L, NamespaceLoc, QualifierLoc,
+ IdentLoc, Used, CommonAncestor);
+}
+
+UsingDirectiveDecl *UsingDirectiveDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ return new (C, ID) UsingDirectiveDecl(nullptr, SourceLocation(),
+ SourceLocation(),
+ NestedNameSpecifierLoc(),
+ SourceLocation(), nullptr, nullptr);
+}
+
+NamespaceDecl *UsingDirectiveDecl::getNominatedNamespace() {
+ if (NamespaceAliasDecl *NA =
+ dyn_cast_or_null<NamespaceAliasDecl>(NominatedNamespace))
+ return NA->getNamespace();
+ return cast_or_null<NamespaceDecl>(NominatedNamespace);
+}
+
+NamespaceDecl::NamespaceDecl(ASTContext &C, DeclContext *DC, bool Inline,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, NamespaceDecl *PrevDecl)
+ : NamedDecl(Namespace, DC, IdLoc, Id), DeclContext(Namespace),
+ redeclarable_base(C), LocStart(StartLoc), RBraceLoc(),
+ AnonOrFirstNamespaceAndInline(nullptr, Inline) {
+ setPreviousDecl(PrevDecl);
+
+ if (PrevDecl)
+ AnonOrFirstNamespaceAndInline.setPointer(PrevDecl->getOriginalNamespace());
+}
+
+NamespaceDecl *NamespaceDecl::Create(ASTContext &C, DeclContext *DC,
+ bool Inline, SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ NamespaceDecl *PrevDecl) {
+ return new (C, DC) NamespaceDecl(C, DC, Inline, StartLoc, IdLoc, Id,
+ PrevDecl);
+}
+
+NamespaceDecl *NamespaceDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) NamespaceDecl(C, nullptr, false, SourceLocation(),
+ SourceLocation(), nullptr, nullptr);
+}
+
+NamespaceDecl *NamespaceDecl::getOriginalNamespace() {
+ if (isFirstDecl())
+ return this;
+
+ return AnonOrFirstNamespaceAndInline.getPointer();
+}
+
+const NamespaceDecl *NamespaceDecl::getOriginalNamespace() const {
+ if (isFirstDecl())
+ return this;
+
+ return AnonOrFirstNamespaceAndInline.getPointer();
+}
+
+bool NamespaceDecl::isOriginalNamespace() const { return isFirstDecl(); }
+
+NamespaceDecl *NamespaceDecl::getNextRedeclarationImpl() {
+ return getNextRedeclaration();
+}
+NamespaceDecl *NamespaceDecl::getPreviousDeclImpl() {
+ return getPreviousDecl();
+}
+NamespaceDecl *NamespaceDecl::getMostRecentDeclImpl() {
+ return getMostRecentDecl();
+}
+
+void NamespaceAliasDecl::anchor() { }
+
+NamespaceAliasDecl *NamespaceAliasDecl::getNextRedeclarationImpl() {
+ return getNextRedeclaration();
+}
+NamespaceAliasDecl *NamespaceAliasDecl::getPreviousDeclImpl() {
+ return getPreviousDecl();
+}
+NamespaceAliasDecl *NamespaceAliasDecl::getMostRecentDeclImpl() {
+ return getMostRecentDecl();
+}
+
+NamespaceAliasDecl *NamespaceAliasDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation UsingLoc,
+ SourceLocation AliasLoc,
+ IdentifierInfo *Alias,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation IdentLoc,
+ NamedDecl *Namespace) {
+ // FIXME: Preserve the aliased namespace as written.
+ if (NamespaceDecl *NS = dyn_cast_or_null<NamespaceDecl>(Namespace))
+ Namespace = NS->getOriginalNamespace();
+ return new (C, DC) NamespaceAliasDecl(C, DC, UsingLoc, AliasLoc, Alias,
+ QualifierLoc, IdentLoc, Namespace);
+}
+
+NamespaceAliasDecl *
+NamespaceAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) NamespaceAliasDecl(C, nullptr, SourceLocation(),
+ SourceLocation(), nullptr,
+ NestedNameSpecifierLoc(),
+ SourceLocation(), nullptr);
+}
+
+void UsingShadowDecl::anchor() { }
+
+UsingShadowDecl *
+UsingShadowDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) UsingShadowDecl(C, nullptr, SourceLocation(),
+ nullptr, nullptr);
+}
+
+UsingDecl *UsingShadowDecl::getUsingDecl() const {
+ const UsingShadowDecl *Shadow = this;
+ while (const UsingShadowDecl *NextShadow =
+ dyn_cast<UsingShadowDecl>(Shadow->UsingOrNextShadow))
+ Shadow = NextShadow;
+ return cast<UsingDecl>(Shadow->UsingOrNextShadow);
+}
+
+void UsingDecl::anchor() { }
+
+void UsingDecl::addShadowDecl(UsingShadowDecl *S) {
+ assert(std::find(shadow_begin(), shadow_end(), S) == shadow_end() &&
+ "declaration already in set");
+ assert(S->getUsingDecl() == this);
+
+ if (FirstUsingShadow.getPointer())
+ S->UsingOrNextShadow = FirstUsingShadow.getPointer();
+ FirstUsingShadow.setPointer(S);
+}
+
+void UsingDecl::removeShadowDecl(UsingShadowDecl *S) {
+ assert(std::find(shadow_begin(), shadow_end(), S) != shadow_end() &&
+ "declaration not in set");
+ assert(S->getUsingDecl() == this);
+
+ // Remove S from the shadow decl chain. This is O(n) but hopefully rare.
+
+ if (FirstUsingShadow.getPointer() == S) {
+ FirstUsingShadow.setPointer(
+ dyn_cast<UsingShadowDecl>(S->UsingOrNextShadow));
+ S->UsingOrNextShadow = this;
+ return;
+ }
+
+ UsingShadowDecl *Prev = FirstUsingShadow.getPointer();
+ while (Prev->UsingOrNextShadow != S)
+ Prev = cast<UsingShadowDecl>(Prev->UsingOrNextShadow);
+ Prev->UsingOrNextShadow = S->UsingOrNextShadow;
+ S->UsingOrNextShadow = this;
+}
+
+UsingDecl *UsingDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation UL,
+ NestedNameSpecifierLoc QualifierLoc,
+ const DeclarationNameInfo &NameInfo,
+ bool HasTypename) {
+ return new (C, DC) UsingDecl(DC, UL, QualifierLoc, NameInfo, HasTypename);
+}
+
+UsingDecl *UsingDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) UsingDecl(nullptr, SourceLocation(),
+ NestedNameSpecifierLoc(), DeclarationNameInfo(),
+ false);
+}
+
+SourceRange UsingDecl::getSourceRange() const {
+ SourceLocation Begin = isAccessDeclaration()
+ ? getQualifierLoc().getBeginLoc() : UsingLocation;
+ return SourceRange(Begin, getNameInfo().getEndLoc());
+}
+
+void UnresolvedUsingValueDecl::anchor() { }
+
+UnresolvedUsingValueDecl *
+UnresolvedUsingValueDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation UsingLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ const DeclarationNameInfo &NameInfo) {
+ return new (C, DC) UnresolvedUsingValueDecl(DC, C.DependentTy, UsingLoc,
+ QualifierLoc, NameInfo);
+}
+
+UnresolvedUsingValueDecl *
+UnresolvedUsingValueDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) UnresolvedUsingValueDecl(nullptr, QualType(),
+ SourceLocation(),
+ NestedNameSpecifierLoc(),
+ DeclarationNameInfo());
+}
+
+SourceRange UnresolvedUsingValueDecl::getSourceRange() const {
+ SourceLocation Begin = isAccessDeclaration()
+ ? getQualifierLoc().getBeginLoc() : UsingLocation;
+ return SourceRange(Begin, getNameInfo().getEndLoc());
+}
+
+void UnresolvedUsingTypenameDecl::anchor() { }
+
+UnresolvedUsingTypenameDecl *
+UnresolvedUsingTypenameDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation UsingLoc,
+ SourceLocation TypenameLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TargetNameLoc,
+ DeclarationName TargetName) {
+ return new (C, DC) UnresolvedUsingTypenameDecl(
+ DC, UsingLoc, TypenameLoc, QualifierLoc, TargetNameLoc,
+ TargetName.getAsIdentifierInfo());
+}
+
+UnresolvedUsingTypenameDecl *
+UnresolvedUsingTypenameDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) UnresolvedUsingTypenameDecl(
+ nullptr, SourceLocation(), SourceLocation(), NestedNameSpecifierLoc(),
+ SourceLocation(), nullptr);
+}
+
+void StaticAssertDecl::anchor() { }
+
+StaticAssertDecl *StaticAssertDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StaticAssertLoc,
+ Expr *AssertExpr,
+ StringLiteral *Message,
+ SourceLocation RParenLoc,
+ bool Failed) {
+ return new (C, DC) StaticAssertDecl(DC, StaticAssertLoc, AssertExpr, Message,
+ RParenLoc, Failed);
+}
+
+StaticAssertDecl *StaticAssertDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ return new (C, ID) StaticAssertDecl(nullptr, SourceLocation(), nullptr,
+ nullptr, SourceLocation(), false);
+}
+
+MSPropertyDecl *MSPropertyDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L, DeclarationName N,
+ QualType T, TypeSourceInfo *TInfo,
+ SourceLocation StartL,
+ IdentifierInfo *Getter,
+ IdentifierInfo *Setter) {
+ return new (C, DC) MSPropertyDecl(DC, L, N, T, TInfo, StartL, Getter, Setter);
+}
+
+MSPropertyDecl *MSPropertyDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ return new (C, ID) MSPropertyDecl(nullptr, SourceLocation(),
+ DeclarationName(), QualType(), nullptr,
+ SourceLocation(), nullptr, nullptr);
+}
+
+static const char *getAccessName(AccessSpecifier AS) {
+ switch (AS) {
+ case AS_none:
+ llvm_unreachable("Invalid access specifier!");
+ case AS_public:
+ return "public";
+ case AS_private:
+ return "private";
+ case AS_protected:
+ return "protected";
+ }
+ llvm_unreachable("Invalid access specifier!");
+}
+
+const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
+ AccessSpecifier AS) {
+ return DB << getAccessName(AS);
+}
+
+const PartialDiagnostic &clang::operator<<(const PartialDiagnostic &DB,
+ AccessSpecifier AS) {
+ return DB << getAccessName(AS);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp b/contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp
new file mode 100644
index 0000000..121403b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp
@@ -0,0 +1,69 @@
+//===--- DeclFriend.cpp - C++ Friend Declaration AST Node Implementation --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the AST classes related to C++ friend
+// declarations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclFriend.h"
+#include "clang/AST/DeclTemplate.h"
+using namespace clang;
+
+void FriendDecl::anchor() { }
+
+FriendDecl *FriendDecl::getNextFriendSlowCase() {
+ return cast_or_null<FriendDecl>(
+ NextFriend.get(getASTContext().getExternalSource()));
+}
+
+FriendDecl *FriendDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,
+ FriendUnion Friend,
+ SourceLocation FriendL,
+ ArrayRef<TemplateParameterList*> FriendTypeTPLists) {
+#ifndef NDEBUG
+ if (Friend.is<NamedDecl*>()) {
+ NamedDecl *D = Friend.get<NamedDecl*>();
+ assert(isa<FunctionDecl>(D) ||
+ isa<CXXRecordDecl>(D) ||
+ isa<FunctionTemplateDecl>(D) ||
+ isa<ClassTemplateDecl>(D));
+
+ // As a temporary hack, we permit template instantiation to point
+ // to the original declaration when instantiating members.
+ assert(D->getFriendObjectKind() ||
+ (cast<CXXRecordDecl>(DC)->getTemplateSpecializationKind()));
+ // These template parameters are for friend types only.
+ assert(FriendTypeTPLists.size() == 0);
+ }
+#endif
+
+ std::size_t Extra =
+ FriendDecl::additionalSizeToAlloc<TemplateParameterList *>(
+ FriendTypeTPLists.size());
+ FriendDecl *FD = new (C, DC, Extra) FriendDecl(DC, L, Friend, FriendL,
+ FriendTypeTPLists);
+ cast<CXXRecordDecl>(DC)->pushFriendDecl(FD);
+ return FD;
+}
+
+FriendDecl *FriendDecl::CreateDeserialized(ASTContext &C, unsigned ID,
+ unsigned FriendTypeNumTPLists) {
+ std::size_t Extra =
+ additionalSizeToAlloc<TemplateParameterList *>(FriendTypeNumTPLists);
+ return new (C, ID, Extra) FriendDecl(EmptyShell(), FriendTypeNumTPLists);
+}
+
+FriendDecl *CXXRecordDecl::getFirstFriend() const {
+ ExternalASTSource *Source = getParentASTContext().getExternalSource();
+ Decl *First = data().FirstFriend.get(Source);
+ return First ? cast<FriendDecl>(First) : nullptr;
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclGroup.cpp b/contrib/llvm/tools/clang/lib/AST/DeclGroup.cpp
new file mode 100644
index 0000000..f162e6d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/DeclGroup.cpp
@@ -0,0 +1,33 @@
+//===--- DeclGroup.cpp - Classes for representing groups of Decls -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DeclGroup and DeclGroupRef classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/DeclGroup.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "llvm/Support/Allocator.h"
+using namespace clang;
+
+DeclGroup* DeclGroup::Create(ASTContext &C, Decl **Decls, unsigned NumDecls) {
+ assert(NumDecls > 1 && "Invalid DeclGroup");
+ unsigned Size = totalSizeToAlloc<Decl *>(NumDecls);
+ void* Mem = C.Allocate(Size, llvm::AlignOf<DeclGroup>::Alignment);
+ new (Mem) DeclGroup(NumDecls, Decls);
+ return static_cast<DeclGroup*>(Mem);
+}
+
+DeclGroup::DeclGroup(unsigned numdecls, Decl** decls) : NumDecls(numdecls) {
+ assert(numdecls > 0);
+ assert(decls);
+ std::uninitialized_copy(decls, decls + numdecls,
+ getTrailingObjects<Decl *>());
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp b/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp
new file mode 100644
index 0000000..050a0f5
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp
@@ -0,0 +1,2158 @@
+//===--- DeclObjC.cpp - ObjC Declaration AST Node Implementation ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Objective-C related Decl classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/Stmt.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// ObjCListBase
+//===----------------------------------------------------------------------===//
+
+void ObjCListBase::set(void *const* InList, unsigned Elts, ASTContext &Ctx) {
+ List = nullptr;
+ if (Elts == 0) return; // Setting to an empty list is a noop.
+
+
+ List = new (Ctx) void*[Elts];
+ NumElts = Elts;
+ memcpy(List, InList, sizeof(void*)*Elts);
+}
+
+void ObjCProtocolList::set(ObjCProtocolDecl* const* InList, unsigned Elts,
+ const SourceLocation *Locs, ASTContext &Ctx) {
+ if (Elts == 0)
+ return;
+
+ Locations = new (Ctx) SourceLocation[Elts];
+ memcpy(Locations, Locs, sizeof(SourceLocation) * Elts);
+ set(InList, Elts, Ctx);
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCInterfaceDecl
+//===----------------------------------------------------------------------===//
+
+void ObjCContainerDecl::anchor() { }
+
+/// getIvarDecl - This method looks up an ivar in this ContextDecl.
+///
+ObjCIvarDecl *
+ObjCContainerDecl::getIvarDecl(IdentifierInfo *Id) const {
+ lookup_result R = lookup(Id);
+ for (lookup_iterator Ivar = R.begin(), IvarEnd = R.end();
+ Ivar != IvarEnd; ++Ivar) {
+ if (ObjCIvarDecl *ivar = dyn_cast<ObjCIvarDecl>(*Ivar))
+ return ivar;
+ }
+ return nullptr;
+}
+
+// Get the local instance/class method declared in this interface.
+ObjCMethodDecl *
+ObjCContainerDecl::getMethod(Selector Sel, bool isInstance,
+ bool AllowHidden) const {
+ // If this context is a hidden protocol definition, don't find any
+ // methods there.
+ if (const ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>(this)) {
+ if (const ObjCProtocolDecl *Def = Proto->getDefinition())
+ if (Def->isHidden() && !AllowHidden)
+ return nullptr;
+ }
+
+ // Since instance & class methods can have the same name, the loop below
+ // ensures we get the correct method.
+ //
+ // @interface Whatever
+ // - (int) class_method;
+ // + (float) class_method;
+ // @end
+ //
+ lookup_result R = lookup(Sel);
+ for (lookup_iterator Meth = R.begin(), MethEnd = R.end();
+ Meth != MethEnd; ++Meth) {
+ ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(*Meth);
+ if (MD && MD->isInstanceMethod() == isInstance)
+ return MD;
+ }
+ return nullptr;
+}
+
+/// \brief This routine returns 'true' if a user declared setter method was
+/// found in the class, its protocols, its super classes or categories.
+/// It also returns 'true' if one of its categories has declared a 'readwrite'
+/// property. This is because, user must provide a setter method for the
+/// category's 'readwrite' property.
+bool ObjCContainerDecl::HasUserDeclaredSetterMethod(
+ const ObjCPropertyDecl *Property) const {
+ Selector Sel = Property->getSetterName();
+ lookup_result R = lookup(Sel);
+ for (lookup_iterator Meth = R.begin(), MethEnd = R.end();
+ Meth != MethEnd; ++Meth) {
+ ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(*Meth);
+ if (MD && MD->isInstanceMethod() && !MD->isImplicit())
+ return true;
+ }
+
+ if (const ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(this)) {
+ // Also look into categories, including class extensions, looking
+ // for a user declared instance method.
+ for (const auto *Cat : ID->visible_categories()) {
+ if (ObjCMethodDecl *MD = Cat->getInstanceMethod(Sel))
+ if (!MD->isImplicit())
+ return true;
+ if (Cat->IsClassExtension())
+ continue;
+ // Also search through the categories looking for a 'readwrite'
+ // declaration of this property. If one found, presumably a setter will
+ // be provided (properties declared in categories will not get
+ // auto-synthesized).
+ for (const auto *P : Cat->properties())
+ if (P->getIdentifier() == Property->getIdentifier()) {
+ if (P->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_readwrite)
+ return true;
+ break;
+ }
+ }
+
+ // Also look into protocols, for a user declared instance method.
+ for (const auto *Proto : ID->all_referenced_protocols())
+ if (Proto->HasUserDeclaredSetterMethod(Property))
+ return true;
+
+ // And in its super class.
+ ObjCInterfaceDecl *OSC = ID->getSuperClass();
+ while (OSC) {
+ if (OSC->HasUserDeclaredSetterMethod(Property))
+ return true;
+ OSC = OSC->getSuperClass();
+ }
+ }
+ if (const ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(this))
+ for (const auto *PI : PD->protocols())
+ if (PI->HasUserDeclaredSetterMethod(Property))
+ return true;
+ return false;
+}
+
+ObjCPropertyDecl *
+ObjCPropertyDecl::findPropertyDecl(const DeclContext *DC,
+ const IdentifierInfo *propertyID) {
+ // If this context is a hidden protocol definition, don't find any
+ // property.
+ if (const ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>(DC)) {
+ if (const ObjCProtocolDecl *Def = Proto->getDefinition())
+ if (Def->isHidden())
+ return nullptr;
+ }
+
+ // If context is class, then lookup property in its extensions.
+ // This comes before property is looked up in primary class.
+ if (auto *IDecl = dyn_cast<ObjCInterfaceDecl>(DC)) {
+ for (const auto *Ext : IDecl->known_extensions())
+ if (ObjCPropertyDecl *PD = ObjCPropertyDecl::findPropertyDecl(Ext,
+ propertyID))
+ return PD;
+ }
+
+ DeclContext::lookup_result R = DC->lookup(propertyID);
+ for (DeclContext::lookup_iterator I = R.begin(), E = R.end(); I != E;
+ ++I)
+ if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(*I))
+ return PD;
+
+ return nullptr;
+}
+
+IdentifierInfo *
+ObjCPropertyDecl::getDefaultSynthIvarName(ASTContext &Ctx) const {
+ SmallString<128> ivarName;
+ {
+ llvm::raw_svector_ostream os(ivarName);
+ os << '_' << getIdentifier()->getName();
+ }
+ return &Ctx.Idents.get(ivarName.str());
+}
+
+/// FindPropertyDeclaration - Finds declaration of the property given its name
+/// in 'PropertyId' and returns it. It returns 0, if not found.
+ObjCPropertyDecl *ObjCContainerDecl::FindPropertyDeclaration(
+ const IdentifierInfo *PropertyId) const {
+ // Don't find properties within hidden protocol definitions.
+ if (const ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>(this)) {
+ if (const ObjCProtocolDecl *Def = Proto->getDefinition())
+ if (Def->isHidden())
+ return nullptr;
+ }
+
+ // Search the extensions of a class first; they override what's in
+ // the class itself.
+ if (const auto *ClassDecl = dyn_cast<ObjCInterfaceDecl>(this)) {
+ for (const auto *Ext : ClassDecl->visible_extensions()) {
+ if (auto *P = Ext->FindPropertyDeclaration(PropertyId))
+ return P;
+ }
+ }
+
+ if (ObjCPropertyDecl *PD =
+ ObjCPropertyDecl::findPropertyDecl(cast<DeclContext>(this), PropertyId))
+ return PD;
+
+ switch (getKind()) {
+ default:
+ break;
+ case Decl::ObjCProtocol: {
+ const ObjCProtocolDecl *PID = cast<ObjCProtocolDecl>(this);
+ for (const auto *I : PID->protocols())
+ if (ObjCPropertyDecl *P = I->FindPropertyDeclaration(PropertyId))
+ return P;
+ break;
+ }
+ case Decl::ObjCInterface: {
+ const ObjCInterfaceDecl *OID = cast<ObjCInterfaceDecl>(this);
+ // Look through categories (but not extensions; they were handled above).
+ for (const auto *Cat : OID->visible_categories()) {
+ if (!Cat->IsClassExtension())
+ if (ObjCPropertyDecl *P = Cat->FindPropertyDeclaration(PropertyId))
+ return P;
+ }
+
+ // Look through protocols.
+ for (const auto *I : OID->all_referenced_protocols())
+ if (ObjCPropertyDecl *P = I->FindPropertyDeclaration(PropertyId))
+ return P;
+
+ // Finally, check the super class.
+ if (const ObjCInterfaceDecl *superClass = OID->getSuperClass())
+ return superClass->FindPropertyDeclaration(PropertyId);
+ break;
+ }
+ case Decl::ObjCCategory: {
+ const ObjCCategoryDecl *OCD = cast<ObjCCategoryDecl>(this);
+ // Look through protocols.
+ if (!OCD->IsClassExtension())
+ for (const auto *I : OCD->protocols())
+ if (ObjCPropertyDecl *P = I->FindPropertyDeclaration(PropertyId))
+ return P;
+ break;
+ }
+ }
+ return nullptr;
+}
+
+void ObjCInterfaceDecl::anchor() { }
+
+ObjCTypeParamList *ObjCInterfaceDecl::getTypeParamList() const {
+ // If this particular declaration has a type parameter list, return it.
+ if (ObjCTypeParamList *written = getTypeParamListAsWritten())
+ return written;
+
+ // If there is a definition, return its type parameter list.
+ if (const ObjCInterfaceDecl *def = getDefinition())
+ return def->getTypeParamListAsWritten();
+
+ // Otherwise, look at previous declarations to determine whether any
+ // of them has a type parameter list, skipping over those
+ // declarations that do not.
+ for (auto decl = getMostRecentDecl(); decl; decl = decl->getPreviousDecl()) {
+ if (ObjCTypeParamList *written = decl->getTypeParamListAsWritten())
+ return written;
+ }
+
+ return nullptr;
+}
+
+void ObjCInterfaceDecl::setTypeParamList(ObjCTypeParamList *TPL) {
+ TypeParamList = TPL;
+ if (!TPL)
+ return;
+ // Set the declaration context of each of the type parameters.
+ for (auto typeParam : *TypeParamList)
+ typeParam->setDeclContext(this);
+}
+
+ObjCInterfaceDecl *ObjCInterfaceDecl::getSuperClass() const {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return nullptr;
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ if (const ObjCObjectType *superType = getSuperClassType()) {
+ if (ObjCInterfaceDecl *superDecl = superType->getInterface()) {
+ if (ObjCInterfaceDecl *superDef = superDecl->getDefinition())
+ return superDef;
+
+ return superDecl;
+ }
+ }
+
+ return nullptr;
+}
+
+SourceLocation ObjCInterfaceDecl::getSuperClassLoc() const {
+ if (TypeSourceInfo *superTInfo = getSuperClassTInfo())
+ return superTInfo->getTypeLoc().getLocStart();
+
+ return SourceLocation();
+}
+
+/// FindPropertyVisibleInPrimaryClass - Finds declaration of the property
+/// with name 'PropertyId' in the primary class; including those in protocols
+/// (direct or indirect) used by the primary class.
+///
+ObjCPropertyDecl *
+ObjCInterfaceDecl::FindPropertyVisibleInPrimaryClass(
+ IdentifierInfo *PropertyId) const {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return nullptr;
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ if (ObjCPropertyDecl *PD =
+ ObjCPropertyDecl::findPropertyDecl(cast<DeclContext>(this), PropertyId))
+ return PD;
+
+ // Look through protocols.
+ for (const auto *I : all_referenced_protocols())
+ if (ObjCPropertyDecl *P = I->FindPropertyDeclaration(PropertyId))
+ return P;
+
+ return nullptr;
+}
+
+void ObjCInterfaceDecl::collectPropertiesToImplement(PropertyMap &PM,
+ PropertyDeclOrder &PO) const {
+ for (auto *Prop : properties()) {
+ PM[Prop->getIdentifier()] = Prop;
+ PO.push_back(Prop);
+ }
+ for (const auto *Ext : known_extensions()) {
+ const ObjCCategoryDecl *ClassExt = Ext;
+ for (auto *Prop : ClassExt->properties()) {
+ PM[Prop->getIdentifier()] = Prop;
+ PO.push_back(Prop);
+ }
+ }
+ for (const auto *PI : all_referenced_protocols())
+ PI->collectPropertiesToImplement(PM, PO);
+ // Note, the properties declared only in class extensions are still copied
+ // into the main @interface's property list, and therefore we don't
+ // explicitly, have to search class extension properties.
+}
+
+bool ObjCInterfaceDecl::isArcWeakrefUnavailable() const {
+ const ObjCInterfaceDecl *Class = this;
+ while (Class) {
+ if (Class->hasAttr<ArcWeakrefUnavailableAttr>())
+ return true;
+ Class = Class->getSuperClass();
+ }
+ return false;
+}
+
+const ObjCInterfaceDecl *ObjCInterfaceDecl::isObjCRequiresPropertyDefs() const {
+ const ObjCInterfaceDecl *Class = this;
+ while (Class) {
+ if (Class->hasAttr<ObjCRequiresPropertyDefsAttr>())
+ return Class;
+ Class = Class->getSuperClass();
+ }
+ return nullptr;
+}
+
+void ObjCInterfaceDecl::mergeClassExtensionProtocolList(
+ ObjCProtocolDecl *const* ExtList, unsigned ExtNum,
+ ASTContext &C)
+{
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ if (data().AllReferencedProtocols.empty() &&
+ data().ReferencedProtocols.empty()) {
+ data().AllReferencedProtocols.set(ExtList, ExtNum, C);
+ return;
+ }
+
+ // Check for duplicate protocol in class's protocol list.
+ // This is O(n*m). But it is extremely rare and number of protocols in
+ // class or its extension are very few.
+ SmallVector<ObjCProtocolDecl*, 8> ProtocolRefs;
+ for (unsigned i = 0; i < ExtNum; i++) {
+ bool protocolExists = false;
+ ObjCProtocolDecl *ProtoInExtension = ExtList[i];
+ for (auto *Proto : all_referenced_protocols()) {
+ if (C.ProtocolCompatibleWithProtocol(ProtoInExtension, Proto)) {
+ protocolExists = true;
+ break;
+ }
+ }
+ // Do we want to warn on a protocol in extension class which
+ // already exist in the class? Probably not.
+ if (!protocolExists)
+ ProtocolRefs.push_back(ProtoInExtension);
+ }
+
+ if (ProtocolRefs.empty())
+ return;
+
+ // Merge ProtocolRefs into class's protocol list;
+ ProtocolRefs.append(all_referenced_protocol_begin(),
+ all_referenced_protocol_end());
+
+ data().AllReferencedProtocols.set(ProtocolRefs.data(), ProtocolRefs.size(),C);
+}
+
+const ObjCInterfaceDecl *
+ObjCInterfaceDecl::findInterfaceWithDesignatedInitializers() const {
+ const ObjCInterfaceDecl *IFace = this;
+ while (IFace) {
+ if (IFace->hasDesignatedInitializers())
+ return IFace;
+ if (!IFace->inheritsDesignatedInitializers())
+ break;
+ IFace = IFace->getSuperClass();
+ }
+ return nullptr;
+}
+
+static bool isIntroducingInitializers(const ObjCInterfaceDecl *D) {
+ for (const auto *MD : D->instance_methods()) {
+ if (MD->getMethodFamily() == OMF_init && !MD->isOverriding())
+ return true;
+ }
+ for (const auto *Ext : D->visible_extensions()) {
+ for (const auto *MD : Ext->instance_methods()) {
+ if (MD->getMethodFamily() == OMF_init && !MD->isOverriding())
+ return true;
+ }
+ }
+ if (const auto *ImplD = D->getImplementation()) {
+ for (const auto *MD : ImplD->instance_methods()) {
+ if (MD->getMethodFamily() == OMF_init && !MD->isOverriding())
+ return true;
+ }
+ }
+ return false;
+}
+
+bool ObjCInterfaceDecl::inheritsDesignatedInitializers() const {
+ switch (data().InheritedDesignatedInitializers) {
+ case DefinitionData::IDI_Inherited:
+ return true;
+ case DefinitionData::IDI_NotInherited:
+ return false;
+ case DefinitionData::IDI_Unknown: {
+ // If the class introduced initializers we conservatively assume that we
+ // don't know if any of them is a designated initializer to avoid possible
+ // misleading warnings.
+ if (isIntroducingInitializers(this)) {
+ data().InheritedDesignatedInitializers = DefinitionData::IDI_NotInherited;
+ } else {
+ if (auto SuperD = getSuperClass()) {
+ data().InheritedDesignatedInitializers =
+ SuperD->declaresOrInheritsDesignatedInitializers() ?
+ DefinitionData::IDI_Inherited :
+ DefinitionData::IDI_NotInherited;
+ } else {
+ data().InheritedDesignatedInitializers =
+ DefinitionData::IDI_NotInherited;
+ }
+ }
+ assert(data().InheritedDesignatedInitializers
+ != DefinitionData::IDI_Unknown);
+ return data().InheritedDesignatedInitializers ==
+ DefinitionData::IDI_Inherited;
+ }
+ }
+
+ llvm_unreachable("unexpected InheritedDesignatedInitializers value");
+}
+
+void ObjCInterfaceDecl::getDesignatedInitializers(
+ llvm::SmallVectorImpl<const ObjCMethodDecl *> &Methods) const {
+ // Check for a complete definition and recover if not so.
+ if (!isThisDeclarationADefinition())
+ return;
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ const ObjCInterfaceDecl *IFace= findInterfaceWithDesignatedInitializers();
+ if (!IFace)
+ return;
+
+ for (const auto *MD : IFace->instance_methods())
+ if (MD->isThisDeclarationADesignatedInitializer())
+ Methods.push_back(MD);
+ for (const auto *Ext : IFace->visible_extensions()) {
+ for (const auto *MD : Ext->instance_methods())
+ if (MD->isThisDeclarationADesignatedInitializer())
+ Methods.push_back(MD);
+ }
+}
+
+bool ObjCInterfaceDecl::isDesignatedInitializer(Selector Sel,
+ const ObjCMethodDecl **InitMethod) const {
+ // Check for a complete definition and recover if not so.
+ if (!isThisDeclarationADefinition())
+ return false;
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ const ObjCInterfaceDecl *IFace= findInterfaceWithDesignatedInitializers();
+ if (!IFace)
+ return false;
+
+ if (const ObjCMethodDecl *MD = IFace->getInstanceMethod(Sel)) {
+ if (MD->isThisDeclarationADesignatedInitializer()) {
+ if (InitMethod)
+ *InitMethod = MD;
+ return true;
+ }
+ }
+ for (const auto *Ext : IFace->visible_extensions()) {
+ if (const ObjCMethodDecl *MD = Ext->getInstanceMethod(Sel)) {
+ if (MD->isThisDeclarationADesignatedInitializer()) {
+ if (InitMethod)
+ *InitMethod = MD;
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+void ObjCInterfaceDecl::allocateDefinitionData() {
+ assert(!hasDefinition() && "ObjC class already has a definition");
+ Data.setPointer(new (getASTContext()) DefinitionData());
+ Data.getPointer()->Definition = this;
+
+ // Make the type point at the definition, now that we have one.
+ if (TypeForDecl)
+ cast<ObjCInterfaceType>(TypeForDecl)->Decl = this;
+}
+
+void ObjCInterfaceDecl::startDefinition() {
+ allocateDefinitionData();
+
+ // Update all of the declarations with a pointer to the definition.
+ for (auto RD : redecls()) {
+ if (RD != this)
+ RD->Data = Data;
+ }
+}
+
+ObjCIvarDecl *ObjCInterfaceDecl::lookupInstanceVariable(IdentifierInfo *ID,
+ ObjCInterfaceDecl *&clsDeclared) {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return nullptr;
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ ObjCInterfaceDecl* ClassDecl = this;
+ while (ClassDecl != nullptr) {
+ if (ObjCIvarDecl *I = ClassDecl->getIvarDecl(ID)) {
+ clsDeclared = ClassDecl;
+ return I;
+ }
+
+ for (const auto *Ext : ClassDecl->visible_extensions()) {
+ if (ObjCIvarDecl *I = Ext->getIvarDecl(ID)) {
+ clsDeclared = ClassDecl;
+ return I;
+ }
+ }
+
+ ClassDecl = ClassDecl->getSuperClass();
+ }
+ return nullptr;
+}
+
+/// lookupInheritedClass - This method returns ObjCInterfaceDecl * of the super
+/// class whose name is passed as argument. If it is not one of the super classes
+/// the it returns NULL.
+ObjCInterfaceDecl *ObjCInterfaceDecl::lookupInheritedClass(
+ const IdentifierInfo*ICName) {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return nullptr;
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ ObjCInterfaceDecl* ClassDecl = this;
+ while (ClassDecl != nullptr) {
+ if (ClassDecl->getIdentifier() == ICName)
+ return ClassDecl;
+ ClassDecl = ClassDecl->getSuperClass();
+ }
+ return nullptr;
+}
+
+ObjCProtocolDecl *
+ObjCInterfaceDecl::lookupNestedProtocol(IdentifierInfo *Name) {
+ for (auto *P : all_referenced_protocols())
+ if (P->lookupProtocolNamed(Name))
+ return P;
+ ObjCInterfaceDecl *SuperClass = getSuperClass();
+ return SuperClass ? SuperClass->lookupNestedProtocol(Name) : nullptr;
+}
+
+/// lookupMethod - This method returns an instance/class method by looking in
+/// the class, its categories, and its super classes (using a linear search).
+/// When argument category "C" is specified, any implicit method found
+/// in this category is ignored.
+ObjCMethodDecl *ObjCInterfaceDecl::lookupMethod(Selector Sel,
+ bool isInstance,
+ bool shallowCategoryLookup,
+ bool followSuper,
+ const ObjCCategoryDecl *C) const
+{
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return nullptr;
+
+ const ObjCInterfaceDecl* ClassDecl = this;
+ ObjCMethodDecl *MethodDecl = nullptr;
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ while (ClassDecl) {
+ // 1. Look through primary class.
+ if ((MethodDecl = ClassDecl->getMethod(Sel, isInstance)))
+ return MethodDecl;
+
+ // 2. Didn't find one yet - now look through categories.
+ for (const auto *Cat : ClassDecl->visible_categories())
+ if ((MethodDecl = Cat->getMethod(Sel, isInstance)))
+ if (C != Cat || !MethodDecl->isImplicit())
+ return MethodDecl;
+
+ // 3. Didn't find one yet - look through primary class's protocols.
+ for (const auto *I : ClassDecl->protocols())
+ if ((MethodDecl = I->lookupMethod(Sel, isInstance)))
+ return MethodDecl;
+
+ // 4. Didn't find one yet - now look through categories' protocols
+ if (!shallowCategoryLookup)
+ for (const auto *Cat : ClassDecl->visible_categories()) {
+ // Didn't find one yet - look through protocols.
+ const ObjCList<ObjCProtocolDecl> &Protocols =
+ Cat->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
+ E = Protocols.end(); I != E; ++I)
+ if ((MethodDecl = (*I)->lookupMethod(Sel, isInstance)))
+ if (C != Cat || !MethodDecl->isImplicit())
+ return MethodDecl;
+ }
+
+
+ if (!followSuper)
+ return nullptr;
+
+ // 5. Get to the super class (if any).
+ ClassDecl = ClassDecl->getSuperClass();
+ }
+ return nullptr;
+}
+
+// Will search "local" class/category implementations for a method decl.
+// If failed, then we search in class's root for an instance method.
+// Returns 0 if no method is found.
+ObjCMethodDecl *ObjCInterfaceDecl::lookupPrivateMethod(
+ const Selector &Sel,
+ bool Instance) const {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return nullptr;
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ ObjCMethodDecl *Method = nullptr;
+ if (ObjCImplementationDecl *ImpDecl = getImplementation())
+ Method = Instance ? ImpDecl->getInstanceMethod(Sel)
+ : ImpDecl->getClassMethod(Sel);
+
+ // Look through local category implementations associated with the class.
+ if (!Method)
+ Method = getCategoryMethod(Sel, Instance);
+
+ // Before we give up, check if the selector is an instance method.
+ // But only in the root. This matches gcc's behavior and what the
+ // runtime expects.
+ if (!Instance && !Method && !getSuperClass()) {
+ Method = lookupInstanceMethod(Sel);
+ // Look through local category implementations associated
+ // with the root class.
+ if (!Method)
+ Method = lookupPrivateMethod(Sel, true);
+ }
+
+ if (!Method && getSuperClass())
+ return getSuperClass()->lookupPrivateMethod(Sel, Instance);
+ return Method;
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCMethodDecl
+//===----------------------------------------------------------------------===//
+
+ObjCMethodDecl *ObjCMethodDecl::Create(
+ ASTContext &C, SourceLocation beginLoc, SourceLocation endLoc,
+ Selector SelInfo, QualType T, TypeSourceInfo *ReturnTInfo,
+ DeclContext *contextDecl, bool isInstance, bool isVariadic,
+ bool isPropertyAccessor, bool isImplicitlyDeclared, bool isDefined,
+ ImplementationControl impControl, bool HasRelatedResultType) {
+ return new (C, contextDecl) ObjCMethodDecl(
+ beginLoc, endLoc, SelInfo, T, ReturnTInfo, contextDecl, isInstance,
+ isVariadic, isPropertyAccessor, isImplicitlyDeclared, isDefined,
+ impControl, HasRelatedResultType);
+}
+
+ObjCMethodDecl *ObjCMethodDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) ObjCMethodDecl(SourceLocation(), SourceLocation(),
+ Selector(), QualType(), nullptr, nullptr);
+}
+
+bool ObjCMethodDecl::isThisDeclarationADesignatedInitializer() const {
+ return getMethodFamily() == OMF_init &&
+ hasAttr<ObjCDesignatedInitializerAttr>();
+}
+
+bool ObjCMethodDecl::isDesignatedInitializerForTheInterface(
+ const ObjCMethodDecl **InitMethod) const {
+ if (getMethodFamily() != OMF_init)
+ return false;
+ const DeclContext *DC = getDeclContext();
+ if (isa<ObjCProtocolDecl>(DC))
+ return false;
+ if (const ObjCInterfaceDecl *ID = getClassInterface())
+ return ID->isDesignatedInitializer(getSelector(), InitMethod);
+ return false;
+}
+
+Stmt *ObjCMethodDecl::getBody() const {
+ return Body.get(getASTContext().getExternalSource());
+}
+
+void ObjCMethodDecl::setAsRedeclaration(const ObjCMethodDecl *PrevMethod) {
+ assert(PrevMethod);
+ getASTContext().setObjCMethodRedeclaration(PrevMethod, this);
+ IsRedeclaration = true;
+ PrevMethod->HasRedeclaration = true;
+}
+
+void ObjCMethodDecl::setParamsAndSelLocs(ASTContext &C,
+ ArrayRef<ParmVarDecl*> Params,
+ ArrayRef<SourceLocation> SelLocs) {
+ ParamsAndSelLocs = nullptr;
+ NumParams = Params.size();
+ if (Params.empty() && SelLocs.empty())
+ return;
+
+ static_assert(llvm::AlignOf<ParmVarDecl *>::Alignment >=
+ llvm::AlignOf<SourceLocation>::Alignment,
+ "Alignment not sufficient for SourceLocation");
+
+ unsigned Size = sizeof(ParmVarDecl *) * NumParams +
+ sizeof(SourceLocation) * SelLocs.size();
+ ParamsAndSelLocs = C.Allocate(Size);
+ std::copy(Params.begin(), Params.end(), getParams());
+ std::copy(SelLocs.begin(), SelLocs.end(), getStoredSelLocs());
+}
+
+void ObjCMethodDecl::getSelectorLocs(
+ SmallVectorImpl<SourceLocation> &SelLocs) const {
+ for (unsigned i = 0, e = getNumSelectorLocs(); i != e; ++i)
+ SelLocs.push_back(getSelectorLoc(i));
+}
+
+void ObjCMethodDecl::setMethodParams(ASTContext &C,
+ ArrayRef<ParmVarDecl*> Params,
+ ArrayRef<SourceLocation> SelLocs) {
+ assert((!SelLocs.empty() || isImplicit()) &&
+ "No selector locs for non-implicit method");
+ if (isImplicit())
+ return setParamsAndSelLocs(C, Params, llvm::None);
+
+ SelLocsKind = hasStandardSelectorLocs(getSelector(), SelLocs, Params,
+ DeclEndLoc);
+ if (SelLocsKind != SelLoc_NonStandard)
+ return setParamsAndSelLocs(C, Params, llvm::None);
+
+ setParamsAndSelLocs(C, Params, SelLocs);
+}
+
+/// \brief A definition will return its interface declaration.
+/// An interface declaration will return its definition.
+/// Otherwise it will return itself.
+ObjCMethodDecl *ObjCMethodDecl::getNextRedeclarationImpl() {
+ ASTContext &Ctx = getASTContext();
+ ObjCMethodDecl *Redecl = nullptr;
+ if (HasRedeclaration)
+ Redecl = const_cast<ObjCMethodDecl*>(Ctx.getObjCMethodRedeclaration(this));
+ if (Redecl)
+ return Redecl;
+
+ Decl *CtxD = cast<Decl>(getDeclContext());
+
+ if (!CtxD->isInvalidDecl()) {
+ if (ObjCInterfaceDecl *IFD = dyn_cast<ObjCInterfaceDecl>(CtxD)) {
+ if (ObjCImplementationDecl *ImplD = Ctx.getObjCImplementation(IFD))
+ if (!ImplD->isInvalidDecl())
+ Redecl = ImplD->getMethod(getSelector(), isInstanceMethod());
+
+ } else if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(CtxD)) {
+ if (ObjCCategoryImplDecl *ImplD = Ctx.getObjCImplementation(CD))
+ if (!ImplD->isInvalidDecl())
+ Redecl = ImplD->getMethod(getSelector(), isInstanceMethod());
+
+ } else if (ObjCImplementationDecl *ImplD =
+ dyn_cast<ObjCImplementationDecl>(CtxD)) {
+ if (ObjCInterfaceDecl *IFD = ImplD->getClassInterface())
+ if (!IFD->isInvalidDecl())
+ Redecl = IFD->getMethod(getSelector(), isInstanceMethod());
+
+ } else if (ObjCCategoryImplDecl *CImplD =
+ dyn_cast<ObjCCategoryImplDecl>(CtxD)) {
+ if (ObjCCategoryDecl *CatD = CImplD->getCategoryDecl())
+ if (!CatD->isInvalidDecl())
+ Redecl = CatD->getMethod(getSelector(), isInstanceMethod());
+ }
+ }
+
+ if (!Redecl && isRedeclaration()) {
+ // This is the last redeclaration, go back to the first method.
+ return cast<ObjCContainerDecl>(CtxD)->getMethod(getSelector(),
+ isInstanceMethod());
+ }
+
+ return Redecl ? Redecl : this;
+}
+
+ObjCMethodDecl *ObjCMethodDecl::getCanonicalDecl() {
+ Decl *CtxD = cast<Decl>(getDeclContext());
+
+ if (ObjCImplementationDecl *ImplD = dyn_cast<ObjCImplementationDecl>(CtxD)) {
+ if (ObjCInterfaceDecl *IFD = ImplD->getClassInterface())
+ if (ObjCMethodDecl *MD = IFD->getMethod(getSelector(),
+ isInstanceMethod()))
+ return MD;
+
+ } else if (ObjCCategoryImplDecl *CImplD =
+ dyn_cast<ObjCCategoryImplDecl>(CtxD)) {
+ if (ObjCCategoryDecl *CatD = CImplD->getCategoryDecl())
+ if (ObjCMethodDecl *MD = CatD->getMethod(getSelector(),
+ isInstanceMethod()))
+ return MD;
+ }
+
+ if (isRedeclaration())
+ return cast<ObjCContainerDecl>(CtxD)->getMethod(getSelector(),
+ isInstanceMethod());
+
+ return this;
+}
+
+SourceLocation ObjCMethodDecl::getLocEnd() const {
+ if (Stmt *Body = getBody())
+ return Body->getLocEnd();
+ return DeclEndLoc;
+}
+
+ObjCMethodFamily ObjCMethodDecl::getMethodFamily() const {
+ ObjCMethodFamily family = static_cast<ObjCMethodFamily>(Family);
+ if (family != static_cast<unsigned>(InvalidObjCMethodFamily))
+ return family;
+
+ // Check for an explicit attribute.
+ if (const ObjCMethodFamilyAttr *attr = getAttr<ObjCMethodFamilyAttr>()) {
+ // The unfortunate necessity of mapping between enums here is due
+ // to the attributes framework.
+ switch (attr->getFamily()) {
+ case ObjCMethodFamilyAttr::OMF_None: family = OMF_None; break;
+ case ObjCMethodFamilyAttr::OMF_alloc: family = OMF_alloc; break;
+ case ObjCMethodFamilyAttr::OMF_copy: family = OMF_copy; break;
+ case ObjCMethodFamilyAttr::OMF_init: family = OMF_init; break;
+ case ObjCMethodFamilyAttr::OMF_mutableCopy: family = OMF_mutableCopy; break;
+ case ObjCMethodFamilyAttr::OMF_new: family = OMF_new; break;
+ }
+ Family = static_cast<unsigned>(family);
+ return family;
+ }
+
+ family = getSelector().getMethodFamily();
+ switch (family) {
+ case OMF_None: break;
+
+ // init only has a conventional meaning for an instance method, and
+ // it has to return an object.
+ case OMF_init:
+ if (!isInstanceMethod() || !getReturnType()->isObjCObjectPointerType())
+ family = OMF_None;
+ break;
+
+ // alloc/copy/new have a conventional meaning for both class and
+ // instance methods, but they require an object return.
+ case OMF_alloc:
+ case OMF_copy:
+ case OMF_mutableCopy:
+ case OMF_new:
+ if (!getReturnType()->isObjCObjectPointerType())
+ family = OMF_None;
+ break;
+
+ // These selectors have a conventional meaning only for instance methods.
+ case OMF_dealloc:
+ case OMF_finalize:
+ case OMF_retain:
+ case OMF_release:
+ case OMF_autorelease:
+ case OMF_retainCount:
+ case OMF_self:
+ if (!isInstanceMethod())
+ family = OMF_None;
+ break;
+
+ case OMF_initialize:
+ if (isInstanceMethod() || !getReturnType()->isVoidType())
+ family = OMF_None;
+ break;
+
+ case OMF_performSelector:
+ if (!isInstanceMethod() || !getReturnType()->isObjCIdType())
+ family = OMF_None;
+ else {
+ unsigned noParams = param_size();
+ if (noParams < 1 || noParams > 3)
+ family = OMF_None;
+ else {
+ ObjCMethodDecl::param_type_iterator it = param_type_begin();
+ QualType ArgT = (*it);
+ if (!ArgT->isObjCSelType()) {
+ family = OMF_None;
+ break;
+ }
+ while (--noParams) {
+ it++;
+ ArgT = (*it);
+ if (!ArgT->isObjCIdType()) {
+ family = OMF_None;
+ break;
+ }
+ }
+ }
+ }
+ break;
+
+ }
+
+ // Cache the result.
+ Family = static_cast<unsigned>(family);
+ return family;
+}
+
+QualType ObjCMethodDecl::getSelfType(ASTContext &Context,
+ const ObjCInterfaceDecl *OID,
+ bool &selfIsPseudoStrong,
+ bool &selfIsConsumed) {
+ QualType selfTy;
+ selfIsPseudoStrong = false;
+ selfIsConsumed = false;
+ if (isInstanceMethod()) {
+ // There may be no interface context due to error in declaration
+ // of the interface (which has been reported). Recover gracefully.
+ if (OID) {
+ selfTy = Context.getObjCInterfaceType(OID);
+ selfTy = Context.getObjCObjectPointerType(selfTy);
+ } else {
+ selfTy = Context.getObjCIdType();
+ }
+ } else // we have a factory method.
+ selfTy = Context.getObjCClassType();
+
+ if (Context.getLangOpts().ObjCAutoRefCount) {
+ if (isInstanceMethod()) {
+ selfIsConsumed = hasAttr<NSConsumesSelfAttr>();
+
+ // 'self' is always __strong. It's actually pseudo-strong except
+ // in init methods (or methods labeled ns_consumes_self), though.
+ Qualifiers qs;
+ qs.setObjCLifetime(Qualifiers::OCL_Strong);
+ selfTy = Context.getQualifiedType(selfTy, qs);
+
+ // In addition, 'self' is const unless this is an init method.
+ if (getMethodFamily() != OMF_init && !selfIsConsumed) {
+ selfTy = selfTy.withConst();
+ selfIsPseudoStrong = true;
+ }
+ }
+ else {
+ assert(isClassMethod());
+ // 'self' is always const in class methods.
+ selfTy = selfTy.withConst();
+ selfIsPseudoStrong = true;
+ }
+ }
+ return selfTy;
+}
+
+void ObjCMethodDecl::createImplicitParams(ASTContext &Context,
+ const ObjCInterfaceDecl *OID) {
+ bool selfIsPseudoStrong, selfIsConsumed;
+ QualType selfTy =
+ getSelfType(Context, OID, selfIsPseudoStrong, selfIsConsumed);
+ ImplicitParamDecl *self
+ = ImplicitParamDecl::Create(Context, this, SourceLocation(),
+ &Context.Idents.get("self"), selfTy);
+ setSelfDecl(self);
+
+ if (selfIsConsumed)
+ self->addAttr(NSConsumedAttr::CreateImplicit(Context));
+
+ if (selfIsPseudoStrong)
+ self->setARCPseudoStrong(true);
+
+ setCmdDecl(ImplicitParamDecl::Create(Context, this, SourceLocation(),
+ &Context.Idents.get("_cmd"),
+ Context.getObjCSelType()));
+}
+
+ObjCInterfaceDecl *ObjCMethodDecl::getClassInterface() {
+ if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(getDeclContext()))
+ return ID;
+ if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(getDeclContext()))
+ return CD->getClassInterface();
+ if (ObjCImplDecl *IMD = dyn_cast<ObjCImplDecl>(getDeclContext()))
+ return IMD->getClassInterface();
+ if (isa<ObjCProtocolDecl>(getDeclContext()))
+ return nullptr;
+ llvm_unreachable("unknown method context");
+}
+
+SourceRange ObjCMethodDecl::getReturnTypeSourceRange() const {
+ const auto *TSI = getReturnTypeSourceInfo();
+ if (TSI)
+ return TSI->getTypeLoc().getSourceRange();
+ return SourceRange();
+}
+
+QualType ObjCMethodDecl::getSendResultType() const {
+ ASTContext &Ctx = getASTContext();
+ return getReturnType().getNonLValueExprType(Ctx)
+ .substObjCTypeArgs(Ctx, {}, ObjCSubstitutionContext::Result);
+}
+
+QualType ObjCMethodDecl::getSendResultType(QualType receiverType) const {
+ // FIXME: Handle related result types here.
+
+ return getReturnType().getNonLValueExprType(getASTContext())
+ .substObjCMemberType(receiverType, getDeclContext(),
+ ObjCSubstitutionContext::Result);
+}
+
+static void CollectOverriddenMethodsRecurse(const ObjCContainerDecl *Container,
+ const ObjCMethodDecl *Method,
+ SmallVectorImpl<const ObjCMethodDecl *> &Methods,
+ bool MovedToSuper) {
+ if (!Container)
+ return;
+
+ // In categories look for overriden methods from protocols. A method from
+ // category is not "overriden" since it is considered as the "same" method
+ // (same USR) as the one from the interface.
+ if (const ObjCCategoryDecl *
+ Category = dyn_cast<ObjCCategoryDecl>(Container)) {
+ // Check whether we have a matching method at this category but only if we
+ // are at the super class level.
+ if (MovedToSuper)
+ if (ObjCMethodDecl *
+ Overridden = Container->getMethod(Method->getSelector(),
+ Method->isInstanceMethod(),
+ /*AllowHidden=*/true))
+ if (Method != Overridden) {
+ // We found an override at this category; there is no need to look
+ // into its protocols.
+ Methods.push_back(Overridden);
+ return;
+ }
+
+ for (const auto *P : Category->protocols())
+ CollectOverriddenMethodsRecurse(P, Method, Methods, MovedToSuper);
+ return;
+ }
+
+ // Check whether we have a matching method at this level.
+ if (const ObjCMethodDecl *
+ Overridden = Container->getMethod(Method->getSelector(),
+ Method->isInstanceMethod(),
+ /*AllowHidden=*/true))
+ if (Method != Overridden) {
+ // We found an override at this level; there is no need to look
+ // into other protocols or categories.
+ Methods.push_back(Overridden);
+ return;
+ }
+
+ if (const ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)){
+ for (const auto *P : Protocol->protocols())
+ CollectOverriddenMethodsRecurse(P, Method, Methods, MovedToSuper);
+ }
+
+ if (const ObjCInterfaceDecl *
+ Interface = dyn_cast<ObjCInterfaceDecl>(Container)) {
+ for (const auto *P : Interface->protocols())
+ CollectOverriddenMethodsRecurse(P, Method, Methods, MovedToSuper);
+
+ for (const auto *Cat : Interface->known_categories())
+ CollectOverriddenMethodsRecurse(Cat, Method, Methods, MovedToSuper);
+
+ if (const ObjCInterfaceDecl *Super = Interface->getSuperClass())
+ return CollectOverriddenMethodsRecurse(Super, Method, Methods,
+ /*MovedToSuper=*/true);
+ }
+}
+
+static inline void CollectOverriddenMethods(const ObjCContainerDecl *Container,
+ const ObjCMethodDecl *Method,
+ SmallVectorImpl<const ObjCMethodDecl *> &Methods) {
+ CollectOverriddenMethodsRecurse(Container, Method, Methods,
+ /*MovedToSuper=*/false);
+}
+
+static void collectOverriddenMethodsSlow(const ObjCMethodDecl *Method,
+ SmallVectorImpl<const ObjCMethodDecl *> &overridden) {
+ assert(Method->isOverriding());
+
+ if (const ObjCProtocolDecl *
+ ProtD = dyn_cast<ObjCProtocolDecl>(Method->getDeclContext())) {
+ CollectOverriddenMethods(ProtD, Method, overridden);
+
+ } else if (const ObjCImplDecl *
+ IMD = dyn_cast<ObjCImplDecl>(Method->getDeclContext())) {
+ const ObjCInterfaceDecl *ID = IMD->getClassInterface();
+ if (!ID)
+ return;
+ // Start searching for overridden methods using the method from the
+ // interface as starting point.
+ if (const ObjCMethodDecl *IFaceMeth = ID->getMethod(Method->getSelector(),
+ Method->isInstanceMethod(),
+ /*AllowHidden=*/true))
+ Method = IFaceMeth;
+ CollectOverriddenMethods(ID, Method, overridden);
+
+ } else if (const ObjCCategoryDecl *
+ CatD = dyn_cast<ObjCCategoryDecl>(Method->getDeclContext())) {
+ const ObjCInterfaceDecl *ID = CatD->getClassInterface();
+ if (!ID)
+ return;
+ // Start searching for overridden methods using the method from the
+ // interface as starting point.
+ if (const ObjCMethodDecl *IFaceMeth = ID->getMethod(Method->getSelector(),
+ Method->isInstanceMethod(),
+ /*AllowHidden=*/true))
+ Method = IFaceMeth;
+ CollectOverriddenMethods(ID, Method, overridden);
+
+ } else {
+ CollectOverriddenMethods(
+ dyn_cast_or_null<ObjCContainerDecl>(Method->getDeclContext()),
+ Method, overridden);
+ }
+}
+
+void ObjCMethodDecl::getOverriddenMethods(
+ SmallVectorImpl<const ObjCMethodDecl *> &Overridden) const {
+ const ObjCMethodDecl *Method = this;
+
+ if (Method->isRedeclaration()) {
+ Method = cast<ObjCContainerDecl>(Method->getDeclContext())->
+ getMethod(Method->getSelector(), Method->isInstanceMethod());
+ }
+
+ if (Method->isOverriding()) {
+ collectOverriddenMethodsSlow(Method, Overridden);
+ assert(!Overridden.empty() &&
+ "ObjCMethodDecl's overriding bit is not as expected");
+ }
+}
+
+const ObjCPropertyDecl *
+ObjCMethodDecl::findPropertyDecl(bool CheckOverrides) const {
+ Selector Sel = getSelector();
+ unsigned NumArgs = Sel.getNumArgs();
+ if (NumArgs > 1)
+ return nullptr;
+
+ if (!isInstanceMethod())
+ return nullptr;
+
+ if (isPropertyAccessor()) {
+ const ObjCContainerDecl *Container = cast<ObjCContainerDecl>(getParent());
+ bool IsGetter = (NumArgs == 0);
+
+ /// Local function that attempts to find a matching property within the
+ /// given Objective-C container.
+ auto findMatchingProperty =
+ [&](const ObjCContainerDecl *Container) -> const ObjCPropertyDecl * {
+
+ for (const auto *I : Container->properties()) {
+ Selector NextSel = IsGetter ? I->getGetterName()
+ : I->getSetterName();
+ if (NextSel == Sel)
+ return I;
+ }
+
+ return nullptr;
+ };
+
+ // Look in the container we were given.
+ if (const auto *Found = findMatchingProperty(Container))
+ return Found;
+
+ // If we're in a category or extension, look in the main class.
+ const ObjCInterfaceDecl *ClassDecl = nullptr;
+ if (const auto *Category = dyn_cast<ObjCCategoryDecl>(Container)) {
+ ClassDecl = Category->getClassInterface();
+ if (const auto *Found = findMatchingProperty(ClassDecl))
+ return Found;
+ } else {
+ // Determine whether the container is a class.
+ ClassDecl = dyn_cast<ObjCInterfaceDecl>(Container);
+ }
+
+ // If we have a class, check its visible extensions.
+ if (ClassDecl) {
+ for (const auto *Ext : ClassDecl->visible_extensions()) {
+ if (Ext == Container)
+ continue;
+
+ if (const auto *Found = findMatchingProperty(Ext))
+ return Found;
+ }
+ }
+
+ llvm_unreachable("Marked as a property accessor but no property found!");
+ }
+
+ if (!CheckOverrides)
+ return nullptr;
+
+ typedef SmallVector<const ObjCMethodDecl *, 8> OverridesTy;
+ OverridesTy Overrides;
+ getOverriddenMethods(Overrides);
+ for (OverridesTy::const_iterator I = Overrides.begin(), E = Overrides.end();
+ I != E; ++I) {
+ if (const ObjCPropertyDecl *Prop = (*I)->findPropertyDecl(false))
+ return Prop;
+ }
+
+ return nullptr;
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCTypeParamDecl
+//===----------------------------------------------------------------------===//
+
+void ObjCTypeParamDecl::anchor() { }
+
+ObjCTypeParamDecl *ObjCTypeParamDecl::Create(ASTContext &ctx, DeclContext *dc,
+ ObjCTypeParamVariance variance,
+ SourceLocation varianceLoc,
+ unsigned index,
+ SourceLocation nameLoc,
+ IdentifierInfo *name,
+ SourceLocation colonLoc,
+ TypeSourceInfo *boundInfo) {
+ return new (ctx, dc) ObjCTypeParamDecl(ctx, dc, variance, varianceLoc, index,
+ nameLoc, name, colonLoc, boundInfo);
+}
+
+ObjCTypeParamDecl *ObjCTypeParamDecl::CreateDeserialized(ASTContext &ctx,
+ unsigned ID) {
+ return new (ctx, ID) ObjCTypeParamDecl(ctx, nullptr,
+ ObjCTypeParamVariance::Invariant,
+ SourceLocation(), 0, SourceLocation(),
+ nullptr, SourceLocation(), nullptr);
+}
+
+SourceRange ObjCTypeParamDecl::getSourceRange() const {
+ SourceLocation startLoc = VarianceLoc;
+ if (startLoc.isInvalid())
+ startLoc = getLocation();
+
+ if (hasExplicitBound()) {
+ return SourceRange(startLoc,
+ getTypeSourceInfo()->getTypeLoc().getEndLoc());
+ }
+
+ return SourceRange(startLoc);
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCTypeParamList
+//===----------------------------------------------------------------------===//
+ObjCTypeParamList::ObjCTypeParamList(SourceLocation lAngleLoc,
+ ArrayRef<ObjCTypeParamDecl *> typeParams,
+ SourceLocation rAngleLoc)
+ : NumParams(typeParams.size())
+{
+ Brackets.Begin = lAngleLoc.getRawEncoding();
+ Brackets.End = rAngleLoc.getRawEncoding();
+ std::copy(typeParams.begin(), typeParams.end(), begin());
+}
+
+
+ObjCTypeParamList *ObjCTypeParamList::create(
+ ASTContext &ctx,
+ SourceLocation lAngleLoc,
+ ArrayRef<ObjCTypeParamDecl *> typeParams,
+ SourceLocation rAngleLoc) {
+ void *mem =
+ ctx.Allocate(totalSizeToAlloc<ObjCTypeParamDecl *>(typeParams.size()),
+ llvm::alignOf<ObjCTypeParamList>());
+ return new (mem) ObjCTypeParamList(lAngleLoc, typeParams, rAngleLoc);
+}
+
+void ObjCTypeParamList::gatherDefaultTypeArgs(
+ SmallVectorImpl<QualType> &typeArgs) const {
+ typeArgs.reserve(size());
+ for (auto typeParam : *this)
+ typeArgs.push_back(typeParam->getUnderlyingType());
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCInterfaceDecl
+//===----------------------------------------------------------------------===//
+
+ObjCInterfaceDecl *ObjCInterfaceDecl::Create(const ASTContext &C,
+ DeclContext *DC,
+ SourceLocation atLoc,
+ IdentifierInfo *Id,
+ ObjCTypeParamList *typeParamList,
+ ObjCInterfaceDecl *PrevDecl,
+ SourceLocation ClassLoc,
+ bool isInternal){
+ ObjCInterfaceDecl *Result = new (C, DC)
+ ObjCInterfaceDecl(C, DC, atLoc, Id, typeParamList, ClassLoc, PrevDecl,
+ isInternal);
+ Result->Data.setInt(!C.getLangOpts().Modules);
+ C.getObjCInterfaceType(Result, PrevDecl);
+ return Result;
+}
+
+ObjCInterfaceDecl *ObjCInterfaceDecl::CreateDeserialized(const ASTContext &C,
+ unsigned ID) {
+ ObjCInterfaceDecl *Result = new (C, ID) ObjCInterfaceDecl(C, nullptr,
+ SourceLocation(),
+ nullptr,
+ nullptr,
+ SourceLocation(),
+ nullptr, false);
+ Result->Data.setInt(!C.getLangOpts().Modules);
+ return Result;
+}
+
+ObjCInterfaceDecl::ObjCInterfaceDecl(const ASTContext &C, DeclContext *DC,
+ SourceLocation AtLoc, IdentifierInfo *Id,
+ ObjCTypeParamList *typeParamList,
+ SourceLocation CLoc,
+ ObjCInterfaceDecl *PrevDecl,
+ bool IsInternal)
+ : ObjCContainerDecl(ObjCInterface, DC, Id, CLoc, AtLoc),
+ redeclarable_base(C), TypeForDecl(nullptr), TypeParamList(nullptr),
+ Data() {
+ setPreviousDecl(PrevDecl);
+
+ // Copy the 'data' pointer over.
+ if (PrevDecl)
+ Data = PrevDecl->Data;
+
+ setImplicit(IsInternal);
+
+ setTypeParamList(typeParamList);
+}
+
+void ObjCInterfaceDecl::LoadExternalDefinition() const {
+ assert(data().ExternallyCompleted && "Class is not externally completed");
+ data().ExternallyCompleted = false;
+ getASTContext().getExternalSource()->CompleteType(
+ const_cast<ObjCInterfaceDecl *>(this));
+}
+
+void ObjCInterfaceDecl::setExternallyCompleted() {
+ assert(getASTContext().getExternalSource() &&
+ "Class can't be externally completed without an external source");
+ assert(hasDefinition() &&
+ "Forward declarations can't be externally completed");
+ data().ExternallyCompleted = true;
+}
+
+void ObjCInterfaceDecl::setHasDesignatedInitializers() {
+ // Check for a complete definition and recover if not so.
+ if (!isThisDeclarationADefinition())
+ return;
+ data().HasDesignatedInitializers = true;
+}
+
+bool ObjCInterfaceDecl::hasDesignatedInitializers() const {
+ // Check for a complete definition and recover if not so.
+ if (!isThisDeclarationADefinition())
+ return false;
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ return data().HasDesignatedInitializers;
+}
+
+StringRef
+ObjCInterfaceDecl::getObjCRuntimeNameAsString() const {
+ if (ObjCRuntimeNameAttr *ObjCRTName = getAttr<ObjCRuntimeNameAttr>())
+ return ObjCRTName->getMetadataName();
+
+ return getName();
+}
+
+StringRef
+ObjCImplementationDecl::getObjCRuntimeNameAsString() const {
+ if (ObjCInterfaceDecl *ID =
+ const_cast<ObjCImplementationDecl*>(this)->getClassInterface())
+ return ID->getObjCRuntimeNameAsString();
+
+ return getName();
+}
+
+ObjCImplementationDecl *ObjCInterfaceDecl::getImplementation() const {
+ if (const ObjCInterfaceDecl *Def = getDefinition()) {
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ return getASTContext().getObjCImplementation(
+ const_cast<ObjCInterfaceDecl*>(Def));
+ }
+
+ // FIXME: Should make sure no callers ever do this.
+ return nullptr;
+}
+
+void ObjCInterfaceDecl::setImplementation(ObjCImplementationDecl *ImplD) {
+ getASTContext().setObjCImplementation(getDefinition(), ImplD);
+}
+
+namespace {
+ struct SynthesizeIvarChunk {
+ uint64_t Size;
+ ObjCIvarDecl *Ivar;
+ SynthesizeIvarChunk(uint64_t size, ObjCIvarDecl *ivar)
+ : Size(size), Ivar(ivar) {}
+ };
+
+ bool operator<(const SynthesizeIvarChunk & LHS,
+ const SynthesizeIvarChunk &RHS) {
+ return LHS.Size < RHS.Size;
+ }
+}
+
+/// all_declared_ivar_begin - return first ivar declared in this class,
+/// its extensions and its implementation. Lazily build the list on first
+/// access.
+///
+/// Caveat: The list returned by this method reflects the current
+/// state of the parser. The cache will be updated for every ivar
+/// added by an extension or the implementation when they are
+/// encountered.
+/// See also ObjCIvarDecl::Create().
+ObjCIvarDecl *ObjCInterfaceDecl::all_declared_ivar_begin() {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return nullptr;
+
+ ObjCIvarDecl *curIvar = nullptr;
+ if (!data().IvarList) {
+ if (!ivar_empty()) {
+ ObjCInterfaceDecl::ivar_iterator I = ivar_begin(), E = ivar_end();
+ data().IvarList = *I; ++I;
+ for (curIvar = data().IvarList; I != E; curIvar = *I, ++I)
+ curIvar->setNextIvar(*I);
+ }
+
+ for (const auto *Ext : known_extensions()) {
+ if (!Ext->ivar_empty()) {
+ ObjCCategoryDecl::ivar_iterator
+ I = Ext->ivar_begin(),
+ E = Ext->ivar_end();
+ if (!data().IvarList) {
+ data().IvarList = *I; ++I;
+ curIvar = data().IvarList;
+ }
+ for ( ;I != E; curIvar = *I, ++I)
+ curIvar->setNextIvar(*I);
+ }
+ }
+ data().IvarListMissingImplementation = true;
+ }
+
+ // cached and complete!
+ if (!data().IvarListMissingImplementation)
+ return data().IvarList;
+
+ if (ObjCImplementationDecl *ImplDecl = getImplementation()) {
+ data().IvarListMissingImplementation = false;
+ if (!ImplDecl->ivar_empty()) {
+ SmallVector<SynthesizeIvarChunk, 16> layout;
+ for (auto *IV : ImplDecl->ivars()) {
+ if (IV->getSynthesize() && !IV->isInvalidDecl()) {
+ layout.push_back(SynthesizeIvarChunk(
+ IV->getASTContext().getTypeSize(IV->getType()), IV));
+ continue;
+ }
+ if (!data().IvarList)
+ data().IvarList = IV;
+ else
+ curIvar->setNextIvar(IV);
+ curIvar = IV;
+ }
+
+ if (!layout.empty()) {
+ // Order synthesized ivars by their size.
+ std::stable_sort(layout.begin(), layout.end());
+ unsigned Ix = 0, EIx = layout.size();
+ if (!data().IvarList) {
+ data().IvarList = layout[0].Ivar; Ix++;
+ curIvar = data().IvarList;
+ }
+ for ( ; Ix != EIx; curIvar = layout[Ix].Ivar, Ix++)
+ curIvar->setNextIvar(layout[Ix].Ivar);
+ }
+ }
+ }
+ return data().IvarList;
+}
+
+/// FindCategoryDeclaration - Finds category declaration in the list of
+/// categories for this class and returns it. Name of the category is passed
+/// in 'CategoryId'. If category not found, return 0;
+///
+ObjCCategoryDecl *
+ObjCInterfaceDecl::FindCategoryDeclaration(IdentifierInfo *CategoryId) const {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return nullptr;
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ for (auto *Cat : visible_categories())
+ if (Cat->getIdentifier() == CategoryId)
+ return Cat;
+
+ return nullptr;
+}
+
+ObjCMethodDecl *
+ObjCInterfaceDecl::getCategoryInstanceMethod(Selector Sel) const {
+ for (const auto *Cat : visible_categories()) {
+ if (ObjCCategoryImplDecl *Impl = Cat->getImplementation())
+ if (ObjCMethodDecl *MD = Impl->getInstanceMethod(Sel))
+ return MD;
+ }
+
+ return nullptr;
+}
+
+ObjCMethodDecl *ObjCInterfaceDecl::getCategoryClassMethod(Selector Sel) const {
+ for (const auto *Cat : visible_categories()) {
+ if (ObjCCategoryImplDecl *Impl = Cat->getImplementation())
+ if (ObjCMethodDecl *MD = Impl->getClassMethod(Sel))
+ return MD;
+ }
+
+ return nullptr;
+}
+
+/// ClassImplementsProtocol - Checks that 'lProto' protocol
+/// has been implemented in IDecl class, its super class or categories (if
+/// lookupCategory is true).
+bool ObjCInterfaceDecl::ClassImplementsProtocol(ObjCProtocolDecl *lProto,
+ bool lookupCategory,
+ bool RHSIsQualifiedID) {
+ if (!hasDefinition())
+ return false;
+
+ ObjCInterfaceDecl *IDecl = this;
+ // 1st, look up the class.
+ for (auto *PI : IDecl->protocols()){
+ if (getASTContext().ProtocolCompatibleWithProtocol(lProto, PI))
+ return true;
+ // This is dubious and is added to be compatible with gcc. In gcc, it is
+ // also allowed assigning a protocol-qualified 'id' type to a LHS object
+ // when protocol in qualified LHS is in list of protocols in the rhs 'id'
+ // object. This IMO, should be a bug.
+ // FIXME: Treat this as an extension, and flag this as an error when GCC
+ // extensions are not enabled.
+ if (RHSIsQualifiedID &&
+ getASTContext().ProtocolCompatibleWithProtocol(PI, lProto))
+ return true;
+ }
+
+ // 2nd, look up the category.
+ if (lookupCategory)
+ for (const auto *Cat : visible_categories()) {
+ for (auto *PI : Cat->protocols())
+ if (getASTContext().ProtocolCompatibleWithProtocol(lProto, PI))
+ return true;
+ }
+
+ // 3rd, look up the super class(s)
+ if (IDecl->getSuperClass())
+ return
+ IDecl->getSuperClass()->ClassImplementsProtocol(lProto, lookupCategory,
+ RHSIsQualifiedID);
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCIvarDecl
+//===----------------------------------------------------------------------===//
+
+void ObjCIvarDecl::anchor() { }
+
+ObjCIvarDecl *ObjCIvarDecl::Create(ASTContext &C, ObjCContainerDecl *DC,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ QualType T, TypeSourceInfo *TInfo,
+ AccessControl ac, Expr *BW,
+ bool synthesized) {
+ if (DC) {
+ // Ivar's can only appear in interfaces, implementations (via synthesized
+ // properties), and class extensions (via direct declaration, or synthesized
+ // properties).
+ //
+ // FIXME: This should really be asserting this:
+ // (isa<ObjCCategoryDecl>(DC) &&
+ // cast<ObjCCategoryDecl>(DC)->IsClassExtension()))
+ // but unfortunately we sometimes place ivars into non-class extension
+ // categories on error. This breaks an AST invariant, and should not be
+ // fixed.
+ assert((isa<ObjCInterfaceDecl>(DC) || isa<ObjCImplementationDecl>(DC) ||
+ isa<ObjCCategoryDecl>(DC)) &&
+ "Invalid ivar decl context!");
+ // Once a new ivar is created in any of class/class-extension/implementation
+ // decl contexts, the previously built IvarList must be rebuilt.
+ ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(DC);
+ if (!ID) {
+ if (ObjCImplementationDecl *IM = dyn_cast<ObjCImplementationDecl>(DC))
+ ID = IM->getClassInterface();
+ else
+ ID = cast<ObjCCategoryDecl>(DC)->getClassInterface();
+ }
+ ID->setIvarList(nullptr);
+ }
+
+ return new (C, DC) ObjCIvarDecl(DC, StartLoc, IdLoc, Id, T, TInfo, ac, BW,
+ synthesized);
+}
+
+ObjCIvarDecl *ObjCIvarDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) ObjCIvarDecl(nullptr, SourceLocation(), SourceLocation(),
+ nullptr, QualType(), nullptr,
+ ObjCIvarDecl::None, nullptr, false);
+}
+
+const ObjCInterfaceDecl *ObjCIvarDecl::getContainingInterface() const {
+ const ObjCContainerDecl *DC = cast<ObjCContainerDecl>(getDeclContext());
+
+ switch (DC->getKind()) {
+ default:
+ case ObjCCategoryImpl:
+ case ObjCProtocol:
+ llvm_unreachable("invalid ivar container!");
+
+ // Ivars can only appear in class extension categories.
+ case ObjCCategory: {
+ const ObjCCategoryDecl *CD = cast<ObjCCategoryDecl>(DC);
+ assert(CD->IsClassExtension() && "invalid container for ivar!");
+ return CD->getClassInterface();
+ }
+
+ case ObjCImplementation:
+ return cast<ObjCImplementationDecl>(DC)->getClassInterface();
+
+ case ObjCInterface:
+ return cast<ObjCInterfaceDecl>(DC);
+ }
+}
+
+QualType ObjCIvarDecl::getUsageType(QualType objectType) const {
+ return getType().substObjCMemberType(objectType, getDeclContext(),
+ ObjCSubstitutionContext::Property);
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCAtDefsFieldDecl
+//===----------------------------------------------------------------------===//
+
+void ObjCAtDefsFieldDecl::anchor() { }
+
+ObjCAtDefsFieldDecl
+*ObjCAtDefsFieldDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, QualType T, Expr *BW) {
+ return new (C, DC) ObjCAtDefsFieldDecl(DC, StartLoc, IdLoc, Id, T, BW);
+}
+
+ObjCAtDefsFieldDecl *ObjCAtDefsFieldDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ return new (C, ID) ObjCAtDefsFieldDecl(nullptr, SourceLocation(),
+ SourceLocation(), nullptr, QualType(),
+ nullptr);
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCProtocolDecl
+//===----------------------------------------------------------------------===//
+
+void ObjCProtocolDecl::anchor() { }
+
+ObjCProtocolDecl::ObjCProtocolDecl(ASTContext &C, DeclContext *DC,
+ IdentifierInfo *Id, SourceLocation nameLoc,
+ SourceLocation atStartLoc,
+ ObjCProtocolDecl *PrevDecl)
+ : ObjCContainerDecl(ObjCProtocol, DC, Id, nameLoc, atStartLoc),
+ redeclarable_base(C), Data() {
+ setPreviousDecl(PrevDecl);
+ if (PrevDecl)
+ Data = PrevDecl->Data;
+}
+
+ObjCProtocolDecl *ObjCProtocolDecl::Create(ASTContext &C, DeclContext *DC,
+ IdentifierInfo *Id,
+ SourceLocation nameLoc,
+ SourceLocation atStartLoc,
+ ObjCProtocolDecl *PrevDecl) {
+ ObjCProtocolDecl *Result =
+ new (C, DC) ObjCProtocolDecl(C, DC, Id, nameLoc, atStartLoc, PrevDecl);
+ Result->Data.setInt(!C.getLangOpts().Modules);
+ return Result;
+}
+
+ObjCProtocolDecl *ObjCProtocolDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ ObjCProtocolDecl *Result =
+ new (C, ID) ObjCProtocolDecl(C, nullptr, nullptr, SourceLocation(),
+ SourceLocation(), nullptr);
+ Result->Data.setInt(!C.getLangOpts().Modules);
+ return Result;
+}
+
+ObjCProtocolDecl *ObjCProtocolDecl::lookupProtocolNamed(IdentifierInfo *Name) {
+ ObjCProtocolDecl *PDecl = this;
+
+ if (Name == getIdentifier())
+ return PDecl;
+
+ for (auto *I : protocols())
+ if ((PDecl = I->lookupProtocolNamed(Name)))
+ return PDecl;
+
+ return nullptr;
+}
+
+// lookupMethod - Lookup a instance/class method in the protocol and protocols
+// it inherited.
+ObjCMethodDecl *ObjCProtocolDecl::lookupMethod(Selector Sel,
+ bool isInstance) const {
+ ObjCMethodDecl *MethodDecl = nullptr;
+
+ // If there is no definition or the definition is hidden, we don't find
+ // anything.
+ const ObjCProtocolDecl *Def = getDefinition();
+ if (!Def || Def->isHidden())
+ return nullptr;
+
+ if ((MethodDecl = getMethod(Sel, isInstance)))
+ return MethodDecl;
+
+ for (const auto *I : protocols())
+ if ((MethodDecl = I->lookupMethod(Sel, isInstance)))
+ return MethodDecl;
+ return nullptr;
+}
+
+void ObjCProtocolDecl::allocateDefinitionData() {
+ assert(!Data.getPointer() && "Protocol already has a definition!");
+ Data.setPointer(new (getASTContext()) DefinitionData);
+ Data.getPointer()->Definition = this;
+}
+
+void ObjCProtocolDecl::startDefinition() {
+ allocateDefinitionData();
+
+ // Update all of the declarations with a pointer to the definition.
+ for (auto RD : redecls())
+ RD->Data = this->Data;
+}
+
+void ObjCProtocolDecl::collectPropertiesToImplement(PropertyMap &PM,
+ PropertyDeclOrder &PO) const {
+
+ if (const ObjCProtocolDecl *PDecl = getDefinition()) {
+ for (auto *Prop : PDecl->properties()) {
+ // Insert into PM if not there already.
+ PM.insert(std::make_pair(Prop->getIdentifier(), Prop));
+ PO.push_back(Prop);
+ }
+ // Scan through protocol's protocols.
+ for (const auto *PI : PDecl->protocols())
+ PI->collectPropertiesToImplement(PM, PO);
+ }
+}
+
+
+void ObjCProtocolDecl::collectInheritedProtocolProperties(
+ const ObjCPropertyDecl *Property,
+ ProtocolPropertyMap &PM) const {
+ if (const ObjCProtocolDecl *PDecl = getDefinition()) {
+ bool MatchFound = false;
+ for (auto *Prop : PDecl->properties()) {
+ if (Prop == Property)
+ continue;
+ if (Prop->getIdentifier() == Property->getIdentifier()) {
+ PM[PDecl] = Prop;
+ MatchFound = true;
+ break;
+ }
+ }
+ // Scan through protocol's protocols which did not have a matching property.
+ if (!MatchFound)
+ for (const auto *PI : PDecl->protocols())
+ PI->collectInheritedProtocolProperties(Property, PM);
+ }
+}
+
+StringRef
+ObjCProtocolDecl::getObjCRuntimeNameAsString() const {
+ if (ObjCRuntimeNameAttr *ObjCRTName = getAttr<ObjCRuntimeNameAttr>())
+ return ObjCRTName->getMetadataName();
+
+ return getName();
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCCategoryDecl
+//===----------------------------------------------------------------------===//
+
+void ObjCCategoryDecl::anchor() { }
+
+ObjCCategoryDecl::ObjCCategoryDecl(DeclContext *DC, SourceLocation AtLoc,
+ SourceLocation ClassNameLoc,
+ SourceLocation CategoryNameLoc,
+ IdentifierInfo *Id, ObjCInterfaceDecl *IDecl,
+ ObjCTypeParamList *typeParamList,
+ SourceLocation IvarLBraceLoc,
+ SourceLocation IvarRBraceLoc)
+ : ObjCContainerDecl(ObjCCategory, DC, Id, ClassNameLoc, AtLoc),
+ ClassInterface(IDecl), TypeParamList(nullptr),
+ NextClassCategory(nullptr), CategoryNameLoc(CategoryNameLoc),
+ IvarLBraceLoc(IvarLBraceLoc), IvarRBraceLoc(IvarRBraceLoc)
+{
+ setTypeParamList(typeParamList);
+}
+
+ObjCCategoryDecl *ObjCCategoryDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation AtLoc,
+ SourceLocation ClassNameLoc,
+ SourceLocation CategoryNameLoc,
+ IdentifierInfo *Id,
+ ObjCInterfaceDecl *IDecl,
+ ObjCTypeParamList *typeParamList,
+ SourceLocation IvarLBraceLoc,
+ SourceLocation IvarRBraceLoc) {
+ ObjCCategoryDecl *CatDecl =
+ new (C, DC) ObjCCategoryDecl(DC, AtLoc, ClassNameLoc, CategoryNameLoc, Id,
+ IDecl, typeParamList, IvarLBraceLoc,
+ IvarRBraceLoc);
+ if (IDecl) {
+ // Link this category into its class's category list.
+ CatDecl->NextClassCategory = IDecl->getCategoryListRaw();
+ if (IDecl->hasDefinition()) {
+ IDecl->setCategoryListRaw(CatDecl);
+ if (ASTMutationListener *L = C.getASTMutationListener())
+ L->AddedObjCCategoryToInterface(CatDecl, IDecl);
+ }
+ }
+
+ return CatDecl;
+}
+
+ObjCCategoryDecl *ObjCCategoryDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ return new (C, ID) ObjCCategoryDecl(nullptr, SourceLocation(),
+ SourceLocation(), SourceLocation(),
+ nullptr, nullptr, nullptr);
+}
+
+ObjCCategoryImplDecl *ObjCCategoryDecl::getImplementation() const {
+ return getASTContext().getObjCImplementation(
+ const_cast<ObjCCategoryDecl*>(this));
+}
+
+void ObjCCategoryDecl::setImplementation(ObjCCategoryImplDecl *ImplD) {
+ getASTContext().setObjCImplementation(this, ImplD);
+}
+
+void ObjCCategoryDecl::setTypeParamList(ObjCTypeParamList *TPL) {
+ TypeParamList = TPL;
+ if (!TPL)
+ return;
+ // Set the declaration context of each of the type parameters.
+ for (auto typeParam : *TypeParamList)
+ typeParam->setDeclContext(this);
+}
+
+
+//===----------------------------------------------------------------------===//
+// ObjCCategoryImplDecl
+//===----------------------------------------------------------------------===//
+
+void ObjCCategoryImplDecl::anchor() { }
+
+ObjCCategoryImplDecl *
+ObjCCategoryImplDecl::Create(ASTContext &C, DeclContext *DC,
+ IdentifierInfo *Id,
+ ObjCInterfaceDecl *ClassInterface,
+ SourceLocation nameLoc,
+ SourceLocation atStartLoc,
+ SourceLocation CategoryNameLoc) {
+ if (ClassInterface && ClassInterface->hasDefinition())
+ ClassInterface = ClassInterface->getDefinition();
+ return new (C, DC) ObjCCategoryImplDecl(DC, Id, ClassInterface, nameLoc,
+ atStartLoc, CategoryNameLoc);
+}
+
+ObjCCategoryImplDecl *ObjCCategoryImplDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ return new (C, ID) ObjCCategoryImplDecl(nullptr, nullptr, nullptr,
+ SourceLocation(), SourceLocation(),
+ SourceLocation());
+}
+
+ObjCCategoryDecl *ObjCCategoryImplDecl::getCategoryDecl() const {
+ // The class interface might be NULL if we are working with invalid code.
+ if (const ObjCInterfaceDecl *ID = getClassInterface())
+ return ID->FindCategoryDeclaration(getIdentifier());
+ return nullptr;
+}
+
+
+void ObjCImplDecl::anchor() { }
+
+void ObjCImplDecl::addPropertyImplementation(ObjCPropertyImplDecl *property) {
+ // FIXME: The context should be correct before we get here.
+ property->setLexicalDeclContext(this);
+ addDecl(property);
+}
+
+void ObjCImplDecl::setClassInterface(ObjCInterfaceDecl *IFace) {
+ ASTContext &Ctx = getASTContext();
+
+ if (ObjCImplementationDecl *ImplD
+ = dyn_cast_or_null<ObjCImplementationDecl>(this)) {
+ if (IFace)
+ Ctx.setObjCImplementation(IFace, ImplD);
+
+ } else if (ObjCCategoryImplDecl *ImplD =
+ dyn_cast_or_null<ObjCCategoryImplDecl>(this)) {
+ if (ObjCCategoryDecl *CD = IFace->FindCategoryDeclaration(getIdentifier()))
+ Ctx.setObjCImplementation(CD, ImplD);
+ }
+
+ ClassInterface = IFace;
+}
+
+/// FindPropertyImplIvarDecl - This method lookup the ivar in the list of
+/// properties implemented in this \@implementation block and returns
+/// the implemented property that uses it.
+///
+ObjCPropertyImplDecl *ObjCImplDecl::
+FindPropertyImplIvarDecl(IdentifierInfo *ivarId) const {
+ for (auto *PID : property_impls())
+ if (PID->getPropertyIvarDecl() &&
+ PID->getPropertyIvarDecl()->getIdentifier() == ivarId)
+ return PID;
+ return nullptr;
+}
+
+/// FindPropertyImplDecl - This method looks up a previous ObjCPropertyImplDecl
+/// added to the list of those properties \@synthesized/\@dynamic in this
+/// category \@implementation block.
+///
+ObjCPropertyImplDecl *ObjCImplDecl::
+FindPropertyImplDecl(IdentifierInfo *Id) const {
+ for (auto *PID : property_impls())
+ if (PID->getPropertyDecl()->getIdentifier() == Id)
+ return PID;
+ return nullptr;
+}
+
+raw_ostream &clang::operator<<(raw_ostream &OS,
+ const ObjCCategoryImplDecl &CID) {
+ OS << CID.getName();
+ return OS;
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCImplementationDecl
+//===----------------------------------------------------------------------===//
+
+void ObjCImplementationDecl::anchor() { }
+
+ObjCImplementationDecl *
+ObjCImplementationDecl::Create(ASTContext &C, DeclContext *DC,
+ ObjCInterfaceDecl *ClassInterface,
+ ObjCInterfaceDecl *SuperDecl,
+ SourceLocation nameLoc,
+ SourceLocation atStartLoc,
+ SourceLocation superLoc,
+ SourceLocation IvarLBraceLoc,
+ SourceLocation IvarRBraceLoc) {
+ if (ClassInterface && ClassInterface->hasDefinition())
+ ClassInterface = ClassInterface->getDefinition();
+ return new (C, DC) ObjCImplementationDecl(DC, ClassInterface, SuperDecl,
+ nameLoc, atStartLoc, superLoc,
+ IvarLBraceLoc, IvarRBraceLoc);
+}
+
+ObjCImplementationDecl *
+ObjCImplementationDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) ObjCImplementationDecl(nullptr, nullptr, nullptr,
+ SourceLocation(), SourceLocation());
+}
+
+void ObjCImplementationDecl::setIvarInitializers(ASTContext &C,
+ CXXCtorInitializer ** initializers,
+ unsigned numInitializers) {
+ if (numInitializers > 0) {
+ NumIvarInitializers = numInitializers;
+ CXXCtorInitializer **ivarInitializers =
+ new (C) CXXCtorInitializer*[NumIvarInitializers];
+ memcpy(ivarInitializers, initializers,
+ numInitializers * sizeof(CXXCtorInitializer*));
+ IvarInitializers = ivarInitializers;
+ }
+}
+
+ObjCImplementationDecl::init_const_iterator
+ObjCImplementationDecl::init_begin() const {
+ return IvarInitializers.get(getASTContext().getExternalSource());
+}
+
+raw_ostream &clang::operator<<(raw_ostream &OS,
+ const ObjCImplementationDecl &ID) {
+ OS << ID.getName();
+ return OS;
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCCompatibleAliasDecl
+//===----------------------------------------------------------------------===//
+
+void ObjCCompatibleAliasDecl::anchor() { }
+
+ObjCCompatibleAliasDecl *
+ObjCCompatibleAliasDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,
+ IdentifierInfo *Id,
+ ObjCInterfaceDecl* AliasedClass) {
+ return new (C, DC) ObjCCompatibleAliasDecl(DC, L, Id, AliasedClass);
+}
+
+ObjCCompatibleAliasDecl *
+ObjCCompatibleAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) ObjCCompatibleAliasDecl(nullptr, SourceLocation(),
+ nullptr, nullptr);
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCPropertyDecl
+//===----------------------------------------------------------------------===//
+
+void ObjCPropertyDecl::anchor() { }
+
+ObjCPropertyDecl *ObjCPropertyDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,
+ IdentifierInfo *Id,
+ SourceLocation AtLoc,
+ SourceLocation LParenLoc,
+ QualType T,
+ TypeSourceInfo *TSI,
+ PropertyControl propControl) {
+ return new (C, DC) ObjCPropertyDecl(DC, L, Id, AtLoc, LParenLoc, T, TSI,
+ propControl);
+}
+
+ObjCPropertyDecl *ObjCPropertyDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ return new (C, ID) ObjCPropertyDecl(nullptr, SourceLocation(), nullptr,
+ SourceLocation(), SourceLocation(),
+ QualType(), nullptr, None);
+}
+
+QualType ObjCPropertyDecl::getUsageType(QualType objectType) const {
+ return DeclType.substObjCMemberType(objectType, getDeclContext(),
+ ObjCSubstitutionContext::Property);
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCPropertyImplDecl
+//===----------------------------------------------------------------------===//
+
+ObjCPropertyImplDecl *ObjCPropertyImplDecl::Create(ASTContext &C,
+ DeclContext *DC,
+ SourceLocation atLoc,
+ SourceLocation L,
+ ObjCPropertyDecl *property,
+ Kind PK,
+ ObjCIvarDecl *ivar,
+ SourceLocation ivarLoc) {
+ return new (C, DC) ObjCPropertyImplDecl(DC, atLoc, L, property, PK, ivar,
+ ivarLoc);
+}
+
+ObjCPropertyImplDecl *ObjCPropertyImplDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ return new (C, ID) ObjCPropertyImplDecl(nullptr, SourceLocation(),
+ SourceLocation(), nullptr, Dynamic,
+ nullptr, SourceLocation());
+}
+
+SourceRange ObjCPropertyImplDecl::getSourceRange() const {
+ SourceLocation EndLoc = getLocation();
+ if (IvarLoc.isValid())
+ EndLoc = IvarLoc;
+
+ return SourceRange(AtLoc, EndLoc);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclOpenMP.cpp b/contrib/llvm/tools/clang/lib/AST/DeclOpenMP.cpp
new file mode 100644
index 0000000..493e2cd
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/DeclOpenMP.cpp
@@ -0,0 +1,54 @@
+//===--- DeclOpenMP.cpp - Declaration OpenMP AST Node Implementation ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// \brief This file implements OMPThreadPrivateDecl class.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/DeclOpenMP.h"
+#include "clang/AST/Expr.h"
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// OMPThreadPrivateDecl Implementation.
+//===----------------------------------------------------------------------===//
+
+void OMPThreadPrivateDecl::anchor() { }
+
+OMPThreadPrivateDecl *OMPThreadPrivateDecl::Create(ASTContext &C,
+ DeclContext *DC,
+ SourceLocation L,
+ ArrayRef<Expr *> VL) {
+ OMPThreadPrivateDecl *D =
+ new (C, DC, additionalSizeToAlloc<Expr *>(VL.size()))
+ OMPThreadPrivateDecl(OMPThreadPrivate, DC, L);
+ D->NumVars = VL.size();
+ D->setVars(VL);
+ return D;
+}
+
+OMPThreadPrivateDecl *OMPThreadPrivateDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID,
+ unsigned N) {
+ OMPThreadPrivateDecl *D = new (C, ID, additionalSizeToAlloc<Expr *>(N))
+ OMPThreadPrivateDecl(OMPThreadPrivate, nullptr, SourceLocation());
+ D->NumVars = N;
+ return D;
+}
+
+void OMPThreadPrivateDecl::setVars(ArrayRef<Expr *> VL) {
+ assert(VL.size() == NumVars &&
+ "Number of variables is not the same as the preallocated buffer");
+ std::uninitialized_copy(VL.begin(), VL.end(), getTrailingObjects<Expr *>());
+}
+
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp b/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp
new file mode 100644
index 0000000..5c6002d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp
@@ -0,0 +1,1360 @@
+//===--- DeclPrinter.cpp - Printing implementation for Decl ASTs ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Decl::print method, which pretty prints the
+// AST back out to C/Objective-C/C++/Objective-C++ code.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/Basic/Module.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+namespace {
+ class DeclPrinter : public DeclVisitor<DeclPrinter> {
+ raw_ostream &Out;
+ PrintingPolicy Policy;
+ unsigned Indentation;
+ bool PrintInstantiation;
+
+ raw_ostream& Indent() { return Indent(Indentation); }
+ raw_ostream& Indent(unsigned Indentation);
+ void ProcessDeclGroup(SmallVectorImpl<Decl*>& Decls);
+
+ void Print(AccessSpecifier AS);
+
+ /// Print an Objective-C method type in parentheses.
+ ///
+ /// \param Quals The Objective-C declaration qualifiers.
+ /// \param T The type to print.
+ void PrintObjCMethodType(ASTContext &Ctx, Decl::ObjCDeclQualifier Quals,
+ QualType T);
+
+ void PrintObjCTypeParams(ObjCTypeParamList *Params);
+
+ public:
+ DeclPrinter(raw_ostream &Out, const PrintingPolicy &Policy,
+ unsigned Indentation = 0, bool PrintInstantiation = false)
+ : Out(Out), Policy(Policy), Indentation(Indentation),
+ PrintInstantiation(PrintInstantiation) { }
+
+ void VisitDeclContext(DeclContext *DC, bool Indent = true);
+
+ void VisitTranslationUnitDecl(TranslationUnitDecl *D);
+ void VisitTypedefDecl(TypedefDecl *D);
+ void VisitTypeAliasDecl(TypeAliasDecl *D);
+ void VisitEnumDecl(EnumDecl *D);
+ void VisitRecordDecl(RecordDecl *D);
+ void VisitEnumConstantDecl(EnumConstantDecl *D);
+ void VisitEmptyDecl(EmptyDecl *D);
+ void VisitFunctionDecl(FunctionDecl *D);
+ void VisitFriendDecl(FriendDecl *D);
+ void VisitFieldDecl(FieldDecl *D);
+ void VisitVarDecl(VarDecl *D);
+ void VisitLabelDecl(LabelDecl *D);
+ void VisitParmVarDecl(ParmVarDecl *D);
+ void VisitFileScopeAsmDecl(FileScopeAsmDecl *D);
+ void VisitImportDecl(ImportDecl *D);
+ void VisitStaticAssertDecl(StaticAssertDecl *D);
+ void VisitNamespaceDecl(NamespaceDecl *D);
+ void VisitUsingDirectiveDecl(UsingDirectiveDecl *D);
+ void VisitNamespaceAliasDecl(NamespaceAliasDecl *D);
+ void VisitCXXRecordDecl(CXXRecordDecl *D);
+ void VisitLinkageSpecDecl(LinkageSpecDecl *D);
+ void VisitTemplateDecl(const TemplateDecl *D);
+ void VisitFunctionTemplateDecl(FunctionTemplateDecl *D);
+ void VisitClassTemplateDecl(ClassTemplateDecl *D);
+ void VisitObjCMethodDecl(ObjCMethodDecl *D);
+ void VisitObjCImplementationDecl(ObjCImplementationDecl *D);
+ void VisitObjCInterfaceDecl(ObjCInterfaceDecl *D);
+ void VisitObjCProtocolDecl(ObjCProtocolDecl *D);
+ void VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D);
+ void VisitObjCCategoryDecl(ObjCCategoryDecl *D);
+ void VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *D);
+ void VisitObjCPropertyDecl(ObjCPropertyDecl *D);
+ void VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D);
+ void VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D);
+ void VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D);
+ void VisitUsingDecl(UsingDecl *D);
+ void VisitUsingShadowDecl(UsingShadowDecl *D);
+ void VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D);
+
+ void PrintTemplateParameters(const TemplateParameterList *Params,
+ const TemplateArgumentList *Args = nullptr);
+ void prettyPrintAttributes(Decl *D);
+ void prettyPrintPragmas(Decl *D);
+ void printDeclType(QualType T, StringRef DeclName, bool Pack = false);
+ };
+}
+
+void Decl::print(raw_ostream &Out, unsigned Indentation,
+ bool PrintInstantiation) const {
+ print(Out, getASTContext().getPrintingPolicy(), Indentation, PrintInstantiation);
+}
+
+void Decl::print(raw_ostream &Out, const PrintingPolicy &Policy,
+ unsigned Indentation, bool PrintInstantiation) const {
+ DeclPrinter Printer(Out, Policy, Indentation, PrintInstantiation);
+ Printer.Visit(const_cast<Decl*>(this));
+}
+
+static QualType GetBaseType(QualType T) {
+ // FIXME: This should be on the Type class!
+ QualType BaseType = T;
+ while (!BaseType->isSpecifierType()) {
+ if (isa<TypedefType>(BaseType))
+ break;
+ else if (const PointerType* PTy = BaseType->getAs<PointerType>())
+ BaseType = PTy->getPointeeType();
+ else if (const BlockPointerType *BPy = BaseType->getAs<BlockPointerType>())
+ BaseType = BPy->getPointeeType();
+ else if (const ArrayType* ATy = dyn_cast<ArrayType>(BaseType))
+ BaseType = ATy->getElementType();
+ else if (const FunctionType* FTy = BaseType->getAs<FunctionType>())
+ BaseType = FTy->getReturnType();
+ else if (const VectorType *VTy = BaseType->getAs<VectorType>())
+ BaseType = VTy->getElementType();
+ else if (const ReferenceType *RTy = BaseType->getAs<ReferenceType>())
+ BaseType = RTy->getPointeeType();
+ else
+ llvm_unreachable("Unknown declarator!");
+ }
+ return BaseType;
+}
+
+static QualType getDeclType(Decl* D) {
+ if (TypedefNameDecl* TDD = dyn_cast<TypedefNameDecl>(D))
+ return TDD->getUnderlyingType();
+ if (ValueDecl* VD = dyn_cast<ValueDecl>(D))
+ return VD->getType();
+ return QualType();
+}
+
+void Decl::printGroup(Decl** Begin, unsigned NumDecls,
+ raw_ostream &Out, const PrintingPolicy &Policy,
+ unsigned Indentation) {
+ if (NumDecls == 1) {
+ (*Begin)->print(Out, Policy, Indentation);
+ return;
+ }
+
+ Decl** End = Begin + NumDecls;
+ TagDecl* TD = dyn_cast<TagDecl>(*Begin);
+ if (TD)
+ ++Begin;
+
+ PrintingPolicy SubPolicy(Policy);
+ if (TD && TD->isCompleteDefinition()) {
+ TD->print(Out, Policy, Indentation);
+ Out << " ";
+ SubPolicy.SuppressTag = true;
+ }
+
+ bool isFirst = true;
+ for ( ; Begin != End; ++Begin) {
+ if (isFirst) {
+ SubPolicy.SuppressSpecifiers = false;
+ isFirst = false;
+ } else {
+ if (!isFirst) Out << ", ";
+ SubPolicy.SuppressSpecifiers = true;
+ }
+
+ (*Begin)->print(Out, SubPolicy, Indentation);
+ }
+}
+
+LLVM_DUMP_METHOD void DeclContext::dumpDeclContext() const {
+ // Get the translation unit
+ const DeclContext *DC = this;
+ while (!DC->isTranslationUnit())
+ DC = DC->getParent();
+
+ ASTContext &Ctx = cast<TranslationUnitDecl>(DC)->getASTContext();
+ DeclPrinter Printer(llvm::errs(), Ctx.getPrintingPolicy(), 0);
+ Printer.VisitDeclContext(const_cast<DeclContext *>(this), /*Indent=*/false);
+}
+
+raw_ostream& DeclPrinter::Indent(unsigned Indentation) {
+ for (unsigned i = 0; i != Indentation; ++i)
+ Out << " ";
+ return Out;
+}
+
+void DeclPrinter::prettyPrintAttributes(Decl *D) {
+ if (Policy.PolishForDeclaration)
+ return;
+
+ if (D->hasAttrs()) {
+ AttrVec &Attrs = D->getAttrs();
+ for (auto *A : Attrs) {
+ switch (A->getKind()) {
+#define ATTR(X)
+#define PRAGMA_SPELLING_ATTR(X) case attr::X:
+#include "clang/Basic/AttrList.inc"
+ break;
+ default:
+ A->printPretty(Out, Policy);
+ break;
+ }
+ }
+ }
+}
+
+void DeclPrinter::prettyPrintPragmas(Decl *D) {
+ if (Policy.PolishForDeclaration)
+ return;
+
+ if (D->hasAttrs()) {
+ AttrVec &Attrs = D->getAttrs();
+ for (auto *A : Attrs) {
+ switch (A->getKind()) {
+#define ATTR(X)
+#define PRAGMA_SPELLING_ATTR(X) case attr::X:
+#include "clang/Basic/AttrList.inc"
+ A->printPretty(Out, Policy);
+ Indent();
+ break;
+ default:
+ break;
+ }
+ }
+ }
+}
+
+void DeclPrinter::printDeclType(QualType T, StringRef DeclName, bool Pack) {
+ // Normally, a PackExpansionType is written as T[3]... (for instance, as a
+ // template argument), but if it is the type of a declaration, the ellipsis
+ // is placed before the name being declared.
+ if (auto *PET = T->getAs<PackExpansionType>()) {
+ Pack = true;
+ T = PET->getPattern();
+ }
+ T.print(Out, Policy, (Pack ? "..." : "") + DeclName);
+}
+
+void DeclPrinter::ProcessDeclGroup(SmallVectorImpl<Decl*>& Decls) {
+ this->Indent();
+ Decl::printGroup(Decls.data(), Decls.size(), Out, Policy, Indentation);
+ Out << ";\n";
+ Decls.clear();
+
+}
+
+void DeclPrinter::Print(AccessSpecifier AS) {
+ switch(AS) {
+ case AS_none: llvm_unreachable("No access specifier!");
+ case AS_public: Out << "public"; break;
+ case AS_protected: Out << "protected"; break;
+ case AS_private: Out << "private"; break;
+ }
+}
+
+//----------------------------------------------------------------------------
+// Common C declarations
+//----------------------------------------------------------------------------
+
+void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) {
+ if (Policy.TerseOutput)
+ return;
+
+ if (Indent)
+ Indentation += Policy.Indentation;
+
+ SmallVector<Decl*, 2> Decls;
+ for (DeclContext::decl_iterator D = DC->decls_begin(), DEnd = DC->decls_end();
+ D != DEnd; ++D) {
+
+ // Don't print ObjCIvarDecls, as they are printed when visiting the
+ // containing ObjCInterfaceDecl.
+ if (isa<ObjCIvarDecl>(*D))
+ continue;
+
+ // Skip over implicit declarations in pretty-printing mode.
+ if (D->isImplicit())
+ continue;
+
+ // The next bits of code handles stuff like "struct {int x;} a,b"; we're
+ // forced to merge the declarations because there's no other way to
+ // refer to the struct in question. This limited merging is safe without
+ // a bunch of other checks because it only merges declarations directly
+ // referring to the tag, not typedefs.
+ //
+ // Check whether the current declaration should be grouped with a previous
+ // unnamed struct.
+ QualType CurDeclType = getDeclType(*D);
+ if (!Decls.empty() && !CurDeclType.isNull()) {
+ QualType BaseType = GetBaseType(CurDeclType);
+ if (!BaseType.isNull() && isa<ElaboratedType>(BaseType))
+ BaseType = cast<ElaboratedType>(BaseType)->getNamedType();
+ if (!BaseType.isNull() && isa<TagType>(BaseType) &&
+ cast<TagType>(BaseType)->getDecl() == Decls[0]) {
+ Decls.push_back(*D);
+ continue;
+ }
+ }
+
+ // If we have a merged group waiting to be handled, handle it now.
+ if (!Decls.empty())
+ ProcessDeclGroup(Decls);
+
+ // If the current declaration is an unnamed tag type, save it
+ // so we can merge it with the subsequent declaration(s) using it.
+ if (isa<TagDecl>(*D) && !cast<TagDecl>(*D)->getIdentifier()) {
+ Decls.push_back(*D);
+ continue;
+ }
+
+ if (isa<AccessSpecDecl>(*D)) {
+ Indentation -= Policy.Indentation;
+ this->Indent();
+ Print(D->getAccess());
+ Out << ":\n";
+ Indentation += Policy.Indentation;
+ continue;
+ }
+
+ this->Indent();
+ Visit(*D);
+
+ // FIXME: Need to be able to tell the DeclPrinter when
+ const char *Terminator = nullptr;
+ if (isa<OMPThreadPrivateDecl>(*D))
+ Terminator = nullptr;
+ else if (isa<FunctionDecl>(*D) &&
+ cast<FunctionDecl>(*D)->isThisDeclarationADefinition())
+ Terminator = nullptr;
+ else if (isa<ObjCMethodDecl>(*D) && cast<ObjCMethodDecl>(*D)->getBody())
+ Terminator = nullptr;
+ else if (isa<NamespaceDecl>(*D) || isa<LinkageSpecDecl>(*D) ||
+ isa<ObjCImplementationDecl>(*D) ||
+ isa<ObjCInterfaceDecl>(*D) ||
+ isa<ObjCProtocolDecl>(*D) ||
+ isa<ObjCCategoryImplDecl>(*D) ||
+ isa<ObjCCategoryDecl>(*D))
+ Terminator = nullptr;
+ else if (isa<EnumConstantDecl>(*D)) {
+ DeclContext::decl_iterator Next = D;
+ ++Next;
+ if (Next != DEnd)
+ Terminator = ",";
+ } else
+ Terminator = ";";
+
+ if (Terminator)
+ Out << Terminator;
+ Out << "\n";
+ }
+
+ if (!Decls.empty())
+ ProcessDeclGroup(Decls);
+
+ if (Indent)
+ Indentation -= Policy.Indentation;
+}
+
+void DeclPrinter::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
+ VisitDeclContext(D, false);
+}
+
+void DeclPrinter::VisitTypedefDecl(TypedefDecl *D) {
+ if (!Policy.SuppressSpecifiers) {
+ Out << "typedef ";
+
+ if (D->isModulePrivate())
+ Out << "__module_private__ ";
+ }
+ D->getTypeSourceInfo()->getType().print(Out, Policy, D->getName());
+ prettyPrintAttributes(D);
+}
+
+void DeclPrinter::VisitTypeAliasDecl(TypeAliasDecl *D) {
+ Out << "using " << *D;
+ prettyPrintAttributes(D);
+ Out << " = " << D->getTypeSourceInfo()->getType().getAsString(Policy);
+}
+
+void DeclPrinter::VisitEnumDecl(EnumDecl *D) {
+ if (!Policy.SuppressSpecifiers && D->isModulePrivate())
+ Out << "__module_private__ ";
+ Out << "enum ";
+ if (D->isScoped()) {
+ if (D->isScopedUsingClassTag())
+ Out << "class ";
+ else
+ Out << "struct ";
+ }
+ Out << *D;
+
+ if (D->isFixed())
+ Out << " : " << D->getIntegerType().stream(Policy);
+
+ if (D->isCompleteDefinition()) {
+ Out << " {\n";
+ VisitDeclContext(D);
+ Indent() << "}";
+ }
+ prettyPrintAttributes(D);
+}
+
+void DeclPrinter::VisitRecordDecl(RecordDecl *D) {
+ if (!Policy.SuppressSpecifiers && D->isModulePrivate())
+ Out << "__module_private__ ";
+ Out << D->getKindName();
+
+ prettyPrintAttributes(D);
+
+ if (D->getIdentifier())
+ Out << ' ' << *D;
+
+ if (D->isCompleteDefinition()) {
+ Out << " {\n";
+ VisitDeclContext(D);
+ Indent() << "}";
+ }
+}
+
+void DeclPrinter::VisitEnumConstantDecl(EnumConstantDecl *D) {
+ Out << *D;
+ if (Expr *Init = D->getInitExpr()) {
+ Out << " = ";
+ Init->printPretty(Out, nullptr, Policy, Indentation);
+ }
+}
+
+void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
+ if (!D->getDescribedFunctionTemplate() &&
+ !D->isFunctionTemplateSpecialization())
+ prettyPrintPragmas(D);
+
+ CXXConstructorDecl *CDecl = dyn_cast<CXXConstructorDecl>(D);
+ CXXConversionDecl *ConversionDecl = dyn_cast<CXXConversionDecl>(D);
+ if (!Policy.SuppressSpecifiers) {
+ switch (D->getStorageClass()) {
+ case SC_None: break;
+ case SC_Extern: Out << "extern "; break;
+ case SC_Static: Out << "static "; break;
+ case SC_PrivateExtern: Out << "__private_extern__ "; break;
+ case SC_Auto: case SC_Register:
+ llvm_unreachable("invalid for functions");
+ }
+
+ if (D->isInlineSpecified()) Out << "inline ";
+ if (D->isVirtualAsWritten()) Out << "virtual ";
+ if (D->isModulePrivate()) Out << "__module_private__ ";
+ if (D->isConstexpr() && !D->isExplicitlyDefaulted()) Out << "constexpr ";
+ if ((CDecl && CDecl->isExplicitSpecified()) ||
+ (ConversionDecl && ConversionDecl->isExplicit()))
+ Out << "explicit ";
+ }
+
+ PrintingPolicy SubPolicy(Policy);
+ SubPolicy.SuppressSpecifiers = false;
+ std::string Proto = D->getNameInfo().getAsString();
+
+ QualType Ty = D->getType();
+ while (const ParenType *PT = dyn_cast<ParenType>(Ty)) {
+ Proto = '(' + Proto + ')';
+ Ty = PT->getInnerType();
+ }
+
+ if (const FunctionType *AFT = Ty->getAs<FunctionType>()) {
+ const FunctionProtoType *FT = nullptr;
+ if (D->hasWrittenPrototype())
+ FT = dyn_cast<FunctionProtoType>(AFT);
+
+ Proto += "(";
+ if (FT) {
+ llvm::raw_string_ostream POut(Proto);
+ DeclPrinter ParamPrinter(POut, SubPolicy, Indentation);
+ for (unsigned i = 0, e = D->getNumParams(); i != e; ++i) {
+ if (i) POut << ", ";
+ ParamPrinter.VisitParmVarDecl(D->getParamDecl(i));
+ }
+
+ if (FT->isVariadic()) {
+ if (D->getNumParams()) POut << ", ";
+ POut << "...";
+ }
+ } else if (D->doesThisDeclarationHaveABody() && !D->hasPrototype()) {
+ for (unsigned i = 0, e = D->getNumParams(); i != e; ++i) {
+ if (i)
+ Proto += ", ";
+ Proto += D->getParamDecl(i)->getNameAsString();
+ }
+ }
+
+ Proto += ")";
+
+ if (FT) {
+ if (FT->isConst())
+ Proto += " const";
+ if (FT->isVolatile())
+ Proto += " volatile";
+ if (FT->isRestrict())
+ Proto += " restrict";
+
+ switch (FT->getRefQualifier()) {
+ case RQ_None:
+ break;
+ case RQ_LValue:
+ Proto += " &";
+ break;
+ case RQ_RValue:
+ Proto += " &&";
+ break;
+ }
+ }
+
+ if (FT && FT->hasDynamicExceptionSpec()) {
+ Proto += " throw(";
+ if (FT->getExceptionSpecType() == EST_MSAny)
+ Proto += "...";
+ else
+ for (unsigned I = 0, N = FT->getNumExceptions(); I != N; ++I) {
+ if (I)
+ Proto += ", ";
+
+ Proto += FT->getExceptionType(I).getAsString(SubPolicy);
+ }
+ Proto += ")";
+ } else if (FT && isNoexceptExceptionSpec(FT->getExceptionSpecType())) {
+ Proto += " noexcept";
+ if (FT->getExceptionSpecType() == EST_ComputedNoexcept) {
+ Proto += "(";
+ llvm::raw_string_ostream EOut(Proto);
+ FT->getNoexceptExpr()->printPretty(EOut, nullptr, SubPolicy,
+ Indentation);
+ EOut.flush();
+ Proto += EOut.str();
+ Proto += ")";
+ }
+ }
+
+ if (CDecl) {
+ bool HasInitializerList = false;
+ for (const auto *BMInitializer : CDecl->inits()) {
+ if (BMInitializer->isInClassMemberInitializer())
+ continue;
+
+ if (!HasInitializerList) {
+ Proto += " : ";
+ Out << Proto;
+ Proto.clear();
+ HasInitializerList = true;
+ } else
+ Out << ", ";
+
+ if (BMInitializer->isAnyMemberInitializer()) {
+ FieldDecl *FD = BMInitializer->getAnyMember();
+ Out << *FD;
+ } else {
+ Out << QualType(BMInitializer->getBaseClass(), 0).getAsString(Policy);
+ }
+
+ Out << "(";
+ if (!BMInitializer->getInit()) {
+ // Nothing to print
+ } else {
+ Expr *Init = BMInitializer->getInit();
+ if (ExprWithCleanups *Tmp = dyn_cast<ExprWithCleanups>(Init))
+ Init = Tmp->getSubExpr();
+
+ Init = Init->IgnoreParens();
+
+ Expr *SimpleInit = nullptr;
+ Expr **Args = nullptr;
+ unsigned NumArgs = 0;
+ if (ParenListExpr *ParenList = dyn_cast<ParenListExpr>(Init)) {
+ Args = ParenList->getExprs();
+ NumArgs = ParenList->getNumExprs();
+ } else if (CXXConstructExpr *Construct
+ = dyn_cast<CXXConstructExpr>(Init)) {
+ Args = Construct->getArgs();
+ NumArgs = Construct->getNumArgs();
+ } else
+ SimpleInit = Init;
+
+ if (SimpleInit)
+ SimpleInit->printPretty(Out, nullptr, Policy, Indentation);
+ else {
+ for (unsigned I = 0; I != NumArgs; ++I) {
+ assert(Args[I] != nullptr && "Expected non-null Expr");
+ if (isa<CXXDefaultArgExpr>(Args[I]))
+ break;
+
+ if (I)
+ Out << ", ";
+ Args[I]->printPretty(Out, nullptr, Policy, Indentation);
+ }
+ }
+ }
+ Out << ")";
+ if (BMInitializer->isPackExpansion())
+ Out << "...";
+ }
+ } else if (!ConversionDecl && !isa<CXXDestructorDecl>(D)) {
+ if (FT && FT->hasTrailingReturn()) {
+ Out << "auto " << Proto << " -> ";
+ Proto.clear();
+ }
+ AFT->getReturnType().print(Out, Policy, Proto);
+ Proto.clear();
+ }
+ Out << Proto;
+ } else {
+ Ty.print(Out, Policy, Proto);
+ }
+
+ prettyPrintAttributes(D);
+
+ if (D->isPure())
+ Out << " = 0";
+ else if (D->isDeletedAsWritten())
+ Out << " = delete";
+ else if (D->isExplicitlyDefaulted())
+ Out << " = default";
+ else if (D->doesThisDeclarationHaveABody() && !Policy.TerseOutput) {
+ if (!D->hasPrototype() && D->getNumParams()) {
+ // This is a K&R function definition, so we need to print the
+ // parameters.
+ Out << '\n';
+ DeclPrinter ParamPrinter(Out, SubPolicy, Indentation);
+ Indentation += Policy.Indentation;
+ for (unsigned i = 0, e = D->getNumParams(); i != e; ++i) {
+ Indent();
+ ParamPrinter.VisitParmVarDecl(D->getParamDecl(i));
+ Out << ";\n";
+ }
+ Indentation -= Policy.Indentation;
+ } else
+ Out << ' ';
+
+ if (D->getBody())
+ D->getBody()->printPretty(Out, nullptr, SubPolicy, Indentation);
+ Out << '\n';
+ }
+}
+
+void DeclPrinter::VisitFriendDecl(FriendDecl *D) {
+ if (TypeSourceInfo *TSI = D->getFriendType()) {
+ unsigned NumTPLists = D->getFriendTypeNumTemplateParameterLists();
+ for (unsigned i = 0; i < NumTPLists; ++i)
+ PrintTemplateParameters(D->getFriendTypeTemplateParameterList(i));
+ Out << "friend ";
+ Out << " " << TSI->getType().getAsString(Policy);
+ }
+ else if (FunctionDecl *FD =
+ dyn_cast<FunctionDecl>(D->getFriendDecl())) {
+ Out << "friend ";
+ VisitFunctionDecl(FD);
+ }
+ else if (FunctionTemplateDecl *FTD =
+ dyn_cast<FunctionTemplateDecl>(D->getFriendDecl())) {
+ Out << "friend ";
+ VisitFunctionTemplateDecl(FTD);
+ }
+ else if (ClassTemplateDecl *CTD =
+ dyn_cast<ClassTemplateDecl>(D->getFriendDecl())) {
+ Out << "friend ";
+ VisitRedeclarableTemplateDecl(CTD);
+ }
+}
+
+void DeclPrinter::VisitFieldDecl(FieldDecl *D) {
+ // FIXME: add printing of pragma attributes if required.
+ if (!Policy.SuppressSpecifiers && D->isMutable())
+ Out << "mutable ";
+ if (!Policy.SuppressSpecifiers && D->isModulePrivate())
+ Out << "__module_private__ ";
+
+ Out << D->getASTContext().getUnqualifiedObjCPointerType(D->getType()).
+ stream(Policy, D->getName());
+
+ if (D->isBitField()) {
+ Out << " : ";
+ D->getBitWidth()->printPretty(Out, nullptr, Policy, Indentation);
+ }
+
+ Expr *Init = D->getInClassInitializer();
+ if (!Policy.SuppressInitializers && Init) {
+ if (D->getInClassInitStyle() == ICIS_ListInit)
+ Out << " ";
+ else
+ Out << " = ";
+ Init->printPretty(Out, nullptr, Policy, Indentation);
+ }
+ prettyPrintAttributes(D);
+}
+
+void DeclPrinter::VisitLabelDecl(LabelDecl *D) {
+ Out << *D << ":";
+}
+
+void DeclPrinter::VisitVarDecl(VarDecl *D) {
+ prettyPrintPragmas(D);
+ if (!Policy.SuppressSpecifiers) {
+ StorageClass SC = D->getStorageClass();
+ if (SC != SC_None)
+ Out << VarDecl::getStorageClassSpecifierString(SC) << " ";
+
+ switch (D->getTSCSpec()) {
+ case TSCS_unspecified:
+ break;
+ case TSCS___thread:
+ Out << "__thread ";
+ break;
+ case TSCS__Thread_local:
+ Out << "_Thread_local ";
+ break;
+ case TSCS_thread_local:
+ Out << "thread_local ";
+ break;
+ }
+
+ if (D->isModulePrivate())
+ Out << "__module_private__ ";
+ }
+
+ QualType T = D->getTypeSourceInfo()
+ ? D->getTypeSourceInfo()->getType()
+ : D->getASTContext().getUnqualifiedObjCPointerType(D->getType());
+ printDeclType(T, D->getName());
+ Expr *Init = D->getInit();
+ if (!Policy.SuppressInitializers && Init) {
+ bool ImplicitInit = false;
+ if (CXXConstructExpr *Construct =
+ dyn_cast<CXXConstructExpr>(Init->IgnoreImplicit())) {
+ if (D->getInitStyle() == VarDecl::CallInit &&
+ !Construct->isListInitialization()) {
+ ImplicitInit = Construct->getNumArgs() == 0 ||
+ Construct->getArg(0)->isDefaultArgument();
+ }
+ }
+ if (!ImplicitInit) {
+ if ((D->getInitStyle() == VarDecl::CallInit) && !isa<ParenListExpr>(Init))
+ Out << "(";
+ else if (D->getInitStyle() == VarDecl::CInit) {
+ Out << " = ";
+ }
+ Init->printPretty(Out, nullptr, Policy, Indentation);
+ if ((D->getInitStyle() == VarDecl::CallInit) && !isa<ParenListExpr>(Init))
+ Out << ")";
+ }
+ }
+ prettyPrintAttributes(D);
+}
+
+void DeclPrinter::VisitParmVarDecl(ParmVarDecl *D) {
+ VisitVarDecl(D);
+}
+
+void DeclPrinter::VisitFileScopeAsmDecl(FileScopeAsmDecl *D) {
+ Out << "__asm (";
+ D->getAsmString()->printPretty(Out, nullptr, Policy, Indentation);
+ Out << ")";
+}
+
+void DeclPrinter::VisitImportDecl(ImportDecl *D) {
+ Out << "@import " << D->getImportedModule()->getFullModuleName()
+ << ";\n";
+}
+
+void DeclPrinter::VisitStaticAssertDecl(StaticAssertDecl *D) {
+ Out << "static_assert(";
+ D->getAssertExpr()->printPretty(Out, nullptr, Policy, Indentation);
+ if (StringLiteral *SL = D->getMessage()) {
+ Out << ", ";
+ SL->printPretty(Out, nullptr, Policy, Indentation);
+ }
+ Out << ")";
+}
+
+//----------------------------------------------------------------------------
+// C++ declarations
+//----------------------------------------------------------------------------
+void DeclPrinter::VisitNamespaceDecl(NamespaceDecl *D) {
+ if (D->isInline())
+ Out << "inline ";
+ Out << "namespace " << *D << " {\n";
+ VisitDeclContext(D);
+ Indent() << "}";
+}
+
+void DeclPrinter::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
+ Out << "using namespace ";
+ if (D->getQualifier())
+ D->getQualifier()->print(Out, Policy);
+ Out << *D->getNominatedNamespaceAsWritten();
+}
+
+void DeclPrinter::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
+ Out << "namespace " << *D << " = ";
+ if (D->getQualifier())
+ D->getQualifier()->print(Out, Policy);
+ Out << *D->getAliasedNamespace();
+}
+
+void DeclPrinter::VisitEmptyDecl(EmptyDecl *D) {
+ prettyPrintAttributes(D);
+}
+
+void DeclPrinter::VisitCXXRecordDecl(CXXRecordDecl *D) {
+ // FIXME: add printing of pragma attributes if required.
+ if (!Policy.SuppressSpecifiers && D->isModulePrivate())
+ Out << "__module_private__ ";
+ Out << D->getKindName();
+
+ prettyPrintAttributes(D);
+
+ if (D->getIdentifier())
+ Out << ' ' << *D;
+
+ if (D->isCompleteDefinition()) {
+ // Print the base classes
+ if (D->getNumBases()) {
+ Out << " : ";
+ for (CXXRecordDecl::base_class_iterator Base = D->bases_begin(),
+ BaseEnd = D->bases_end(); Base != BaseEnd; ++Base) {
+ if (Base != D->bases_begin())
+ Out << ", ";
+
+ if (Base->isVirtual())
+ Out << "virtual ";
+
+ AccessSpecifier AS = Base->getAccessSpecifierAsWritten();
+ if (AS != AS_none) {
+ Print(AS);
+ Out << " ";
+ }
+ Out << Base->getType().getAsString(Policy);
+
+ if (Base->isPackExpansion())
+ Out << "...";
+ }
+ }
+
+ // Print the class definition
+ // FIXME: Doesn't print access specifiers, e.g., "public:"
+ Out << " {\n";
+ VisitDeclContext(D);
+ Indent() << "}";
+ }
+}
+
+void DeclPrinter::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
+ const char *l;
+ if (D->getLanguage() == LinkageSpecDecl::lang_c)
+ l = "C";
+ else {
+ assert(D->getLanguage() == LinkageSpecDecl::lang_cxx &&
+ "unknown language in linkage specification");
+ l = "C++";
+ }
+
+ Out << "extern \"" << l << "\" ";
+ if (D->hasBraces()) {
+ Out << "{\n";
+ VisitDeclContext(D);
+ Indent() << "}";
+ } else
+ Visit(*D->decls_begin());
+}
+
+void DeclPrinter::PrintTemplateParameters(const TemplateParameterList *Params,
+ const TemplateArgumentList *Args) {
+ assert(Params);
+ assert(!Args || Params->size() == Args->size());
+
+ Out << "template <";
+
+ for (unsigned i = 0, e = Params->size(); i != e; ++i) {
+ if (i != 0)
+ Out << ", ";
+
+ const Decl *Param = Params->getParam(i);
+ if (const TemplateTypeParmDecl *TTP =
+ dyn_cast<TemplateTypeParmDecl>(Param)) {
+
+ if (TTP->wasDeclaredWithTypename())
+ Out << "typename ";
+ else
+ Out << "class ";
+
+ if (TTP->isParameterPack())
+ Out << "...";
+
+ Out << *TTP;
+
+ if (Args) {
+ Out << " = ";
+ Args->get(i).print(Policy, Out);
+ } else if (TTP->hasDefaultArgument()) {
+ Out << " = ";
+ Out << TTP->getDefaultArgument().getAsString(Policy);
+ };
+ } else if (const NonTypeTemplateParmDecl *NTTP =
+ dyn_cast<NonTypeTemplateParmDecl>(Param)) {
+ StringRef Name;
+ if (IdentifierInfo *II = NTTP->getIdentifier())
+ Name = II->getName();
+ printDeclType(NTTP->getType(), Name, NTTP->isParameterPack());
+
+ if (Args) {
+ Out << " = ";
+ Args->get(i).print(Policy, Out);
+ } else if (NTTP->hasDefaultArgument()) {
+ Out << " = ";
+ NTTP->getDefaultArgument()->printPretty(Out, nullptr, Policy,
+ Indentation);
+ }
+ } else if (const TemplateTemplateParmDecl *TTPD =
+ dyn_cast<TemplateTemplateParmDecl>(Param)) {
+ VisitTemplateDecl(TTPD);
+ // FIXME: print the default argument, if present.
+ }
+ }
+
+ Out << "> ";
+}
+
+void DeclPrinter::VisitTemplateDecl(const TemplateDecl *D) {
+ PrintTemplateParameters(D->getTemplateParameters());
+
+ if (const TemplateTemplateParmDecl *TTP =
+ dyn_cast<TemplateTemplateParmDecl>(D)) {
+ Out << "class ";
+ if (TTP->isParameterPack())
+ Out << "...";
+ Out << D->getName();
+ } else {
+ Visit(D->getTemplatedDecl());
+ }
+}
+
+void DeclPrinter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
+ if (PrintInstantiation) {
+ TemplateParameterList *Params = D->getTemplateParameters();
+ for (auto *I : D->specializations()) {
+ prettyPrintPragmas(I);
+ PrintTemplateParameters(Params, I->getTemplateSpecializationArgs());
+ Visit(I);
+ }
+ }
+
+ prettyPrintPragmas(D->getTemplatedDecl());
+ return VisitRedeclarableTemplateDecl(D);
+}
+
+void DeclPrinter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
+ if (PrintInstantiation) {
+ TemplateParameterList *Params = D->getTemplateParameters();
+ for (auto *I : D->specializations()) {
+ PrintTemplateParameters(Params, &I->getTemplateArgs());
+ Visit(I);
+ Out << '\n';
+ }
+ }
+
+ return VisitRedeclarableTemplateDecl(D);
+}
+
+//----------------------------------------------------------------------------
+// Objective-C declarations
+//----------------------------------------------------------------------------
+
+void DeclPrinter::PrintObjCMethodType(ASTContext &Ctx,
+ Decl::ObjCDeclQualifier Quals,
+ QualType T) {
+ Out << '(';
+ if (Quals & Decl::ObjCDeclQualifier::OBJC_TQ_In)
+ Out << "in ";
+ if (Quals & Decl::ObjCDeclQualifier::OBJC_TQ_Inout)
+ Out << "inout ";
+ if (Quals & Decl::ObjCDeclQualifier::OBJC_TQ_Out)
+ Out << "out ";
+ if (Quals & Decl::ObjCDeclQualifier::OBJC_TQ_Bycopy)
+ Out << "bycopy ";
+ if (Quals & Decl::ObjCDeclQualifier::OBJC_TQ_Byref)
+ Out << "byref ";
+ if (Quals & Decl::ObjCDeclQualifier::OBJC_TQ_Oneway)
+ Out << "oneway ";
+ if (Quals & Decl::ObjCDeclQualifier::OBJC_TQ_CSNullability) {
+ if (auto nullability = AttributedType::stripOuterNullability(T))
+ Out << getNullabilitySpelling(*nullability, true) << ' ';
+ }
+
+ Out << Ctx.getUnqualifiedObjCPointerType(T).getAsString(Policy);
+ Out << ')';
+}
+
+void DeclPrinter::PrintObjCTypeParams(ObjCTypeParamList *Params) {
+ Out << "<";
+ unsigned First = true;
+ for (auto *Param : *Params) {
+ if (First) {
+ First = false;
+ } else {
+ Out << ", ";
+ }
+
+ switch (Param->getVariance()) {
+ case ObjCTypeParamVariance::Invariant:
+ break;
+
+ case ObjCTypeParamVariance::Covariant:
+ Out << "__covariant ";
+ break;
+
+ case ObjCTypeParamVariance::Contravariant:
+ Out << "__contravariant ";
+ break;
+ }
+
+ Out << Param->getDeclName().getAsString();
+
+ if (Param->hasExplicitBound()) {
+ Out << " : " << Param->getUnderlyingType().getAsString(Policy);
+ }
+ }
+ Out << ">";
+}
+
+void DeclPrinter::VisitObjCMethodDecl(ObjCMethodDecl *OMD) {
+ if (OMD->isInstanceMethod())
+ Out << "- ";
+ else
+ Out << "+ ";
+ if (!OMD->getReturnType().isNull()) {
+ PrintObjCMethodType(OMD->getASTContext(), OMD->getObjCDeclQualifier(),
+ OMD->getReturnType());
+ }
+
+ std::string name = OMD->getSelector().getAsString();
+ std::string::size_type pos, lastPos = 0;
+ for (const auto *PI : OMD->params()) {
+ // FIXME: selector is missing here!
+ pos = name.find_first_of(':', lastPos);
+ Out << " " << name.substr(lastPos, pos - lastPos) << ':';
+ PrintObjCMethodType(OMD->getASTContext(),
+ PI->getObjCDeclQualifier(),
+ PI->getType());
+ Out << *PI;
+ lastPos = pos + 1;
+ }
+
+ if (OMD->param_begin() == OMD->param_end())
+ Out << " " << name;
+
+ if (OMD->isVariadic())
+ Out << ", ...";
+
+ prettyPrintAttributes(OMD);
+
+ if (OMD->getBody() && !Policy.TerseOutput) {
+ Out << ' ';
+ OMD->getBody()->printPretty(Out, nullptr, Policy);
+ }
+ else if (Policy.PolishForDeclaration)
+ Out << ';';
+}
+
+void DeclPrinter::VisitObjCImplementationDecl(ObjCImplementationDecl *OID) {
+ std::string I = OID->getNameAsString();
+ ObjCInterfaceDecl *SID = OID->getSuperClass();
+
+ bool eolnOut = false;
+ if (SID)
+ Out << "@implementation " << I << " : " << *SID;
+ else
+ Out << "@implementation " << I;
+
+ if (OID->ivar_size() > 0) {
+ Out << "{\n";
+ eolnOut = true;
+ Indentation += Policy.Indentation;
+ for (const auto *I : OID->ivars()) {
+ Indent() << I->getASTContext().getUnqualifiedObjCPointerType(I->getType()).
+ getAsString(Policy) << ' ' << *I << ";\n";
+ }
+ Indentation -= Policy.Indentation;
+ Out << "}\n";
+ }
+ else if (SID || (OID->decls_begin() != OID->decls_end())) {
+ Out << "\n";
+ eolnOut = true;
+ }
+ VisitDeclContext(OID, false);
+ if (!eolnOut)
+ Out << "\n";
+ Out << "@end";
+}
+
+void DeclPrinter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *OID) {
+ std::string I = OID->getNameAsString();
+ ObjCInterfaceDecl *SID = OID->getSuperClass();
+
+ if (!OID->isThisDeclarationADefinition()) {
+ Out << "@class " << I;
+
+ if (auto TypeParams = OID->getTypeParamListAsWritten()) {
+ PrintObjCTypeParams(TypeParams);
+ }
+
+ Out << ";";
+ return;
+ }
+ bool eolnOut = false;
+ Out << "@interface " << I;
+
+ if (auto TypeParams = OID->getTypeParamListAsWritten()) {
+ PrintObjCTypeParams(TypeParams);
+ }
+
+ if (SID)
+ Out << " : " << QualType(OID->getSuperClassType(), 0).getAsString(Policy);
+
+ // Protocols?
+ const ObjCList<ObjCProtocolDecl> &Protocols = OID->getReferencedProtocols();
+ if (!Protocols.empty()) {
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
+ E = Protocols.end(); I != E; ++I)
+ Out << (I == Protocols.begin() ? '<' : ',') << **I;
+ Out << "> ";
+ }
+
+ if (OID->ivar_size() > 0) {
+ Out << "{\n";
+ eolnOut = true;
+ Indentation += Policy.Indentation;
+ for (const auto *I : OID->ivars()) {
+ Indent() << I->getASTContext()
+ .getUnqualifiedObjCPointerType(I->getType())
+ .getAsString(Policy) << ' ' << *I << ";\n";
+ }
+ Indentation -= Policy.Indentation;
+ Out << "}\n";
+ }
+ else if (SID || (OID->decls_begin() != OID->decls_end())) {
+ Out << "\n";
+ eolnOut = true;
+ }
+
+ VisitDeclContext(OID, false);
+ if (!eolnOut)
+ Out << "\n";
+ Out << "@end";
+ // FIXME: implement the rest...
+}
+
+void DeclPrinter::VisitObjCProtocolDecl(ObjCProtocolDecl *PID) {
+ if (!PID->isThisDeclarationADefinition()) {
+ Out << "@protocol " << *PID << ";\n";
+ return;
+ }
+ // Protocols?
+ const ObjCList<ObjCProtocolDecl> &Protocols = PID->getReferencedProtocols();
+ if (!Protocols.empty()) {
+ Out << "@protocol " << *PID;
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
+ E = Protocols.end(); I != E; ++I)
+ Out << (I == Protocols.begin() ? '<' : ',') << **I;
+ Out << ">\n";
+ } else
+ Out << "@protocol " << *PID << '\n';
+ VisitDeclContext(PID, false);
+ Out << "@end";
+}
+
+void DeclPrinter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *PID) {
+ Out << "@implementation " << *PID->getClassInterface() << '(' << *PID <<")\n";
+
+ VisitDeclContext(PID, false);
+ Out << "@end";
+ // FIXME: implement the rest...
+}
+
+void DeclPrinter::VisitObjCCategoryDecl(ObjCCategoryDecl *PID) {
+ Out << "@interface " << *PID->getClassInterface();
+ if (auto TypeParams = PID->getTypeParamList()) {
+ PrintObjCTypeParams(TypeParams);
+ }
+ Out << "(" << *PID << ")\n";
+ if (PID->ivar_size() > 0) {
+ Out << "{\n";
+ Indentation += Policy.Indentation;
+ for (const auto *I : PID->ivars())
+ Indent() << I->getASTContext().getUnqualifiedObjCPointerType(I->getType()).
+ getAsString(Policy) << ' ' << *I << ";\n";
+ Indentation -= Policy.Indentation;
+ Out << "}\n";
+ }
+
+ VisitDeclContext(PID, false);
+ Out << "@end";
+
+ // FIXME: implement the rest...
+}
+
+void DeclPrinter::VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *AID) {
+ Out << "@compatibility_alias " << *AID
+ << ' ' << *AID->getClassInterface() << ";\n";
+}
+
+/// PrintObjCPropertyDecl - print a property declaration.
+///
+void DeclPrinter::VisitObjCPropertyDecl(ObjCPropertyDecl *PDecl) {
+ if (PDecl->getPropertyImplementation() == ObjCPropertyDecl::Required)
+ Out << "@required\n";
+ else if (PDecl->getPropertyImplementation() == ObjCPropertyDecl::Optional)
+ Out << "@optional\n";
+
+ QualType T = PDecl->getType();
+
+ Out << "@property";
+ if (PDecl->getPropertyAttributes() != ObjCPropertyDecl::OBJC_PR_noattr) {
+ bool first = true;
+ Out << " (";
+ if (PDecl->getPropertyAttributes() &
+ ObjCPropertyDecl::OBJC_PR_readonly) {
+ Out << (first ? ' ' : ',') << "readonly";
+ first = false;
+ }
+
+ if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_getter) {
+ Out << (first ? ' ' : ',') << "getter = ";
+ PDecl->getGetterName().print(Out);
+ first = false;
+ }
+ if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_setter) {
+ Out << (first ? ' ' : ',') << "setter = ";
+ PDecl->getSetterName().print(Out);
+ first = false;
+ }
+
+ if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_assign) {
+ Out << (first ? ' ' : ',') << "assign";
+ first = false;
+ }
+
+ if (PDecl->getPropertyAttributes() &
+ ObjCPropertyDecl::OBJC_PR_readwrite) {
+ Out << (first ? ' ' : ',') << "readwrite";
+ first = false;
+ }
+
+ if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_retain) {
+ Out << (first ? ' ' : ',') << "retain";
+ first = false;
+ }
+
+ if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_strong) {
+ Out << (first ? ' ' : ',') << "strong";
+ first = false;
+ }
+
+ if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_copy) {
+ Out << (first ? ' ' : ',') << "copy";
+ first = false;
+ }
+
+ if (PDecl->getPropertyAttributes() &
+ ObjCPropertyDecl::OBJC_PR_nonatomic) {
+ Out << (first ? ' ' : ',') << "nonatomic";
+ first = false;
+ }
+ if (PDecl->getPropertyAttributes() &
+ ObjCPropertyDecl::OBJC_PR_atomic) {
+ Out << (first ? ' ' : ',') << "atomic";
+ first = false;
+ }
+
+ if (PDecl->getPropertyAttributes() &
+ ObjCPropertyDecl::OBJC_PR_nullability) {
+ if (auto nullability = AttributedType::stripOuterNullability(T)) {
+ if (*nullability == NullabilityKind::Unspecified &&
+ (PDecl->getPropertyAttributes() &
+ ObjCPropertyDecl::OBJC_PR_null_resettable)) {
+ Out << (first ? ' ' : ',') << "null_resettable";
+ } else {
+ Out << (first ? ' ' : ',')
+ << getNullabilitySpelling(*nullability, true);
+ }
+ first = false;
+ }
+ }
+
+ (void) first; // Silence dead store warning due to idiomatic code.
+ Out << " )";
+ }
+ Out << ' ' << PDecl->getASTContext().getUnqualifiedObjCPointerType(T).
+ getAsString(Policy) << ' ' << *PDecl;
+ if (Policy.PolishForDeclaration)
+ Out << ';';
+}
+
+void DeclPrinter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *PID) {
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize)
+ Out << "@synthesize ";
+ else
+ Out << "@dynamic ";
+ Out << *PID->getPropertyDecl();
+ if (PID->getPropertyIvarDecl())
+ Out << '=' << *PID->getPropertyIvarDecl();
+}
+
+void DeclPrinter::VisitUsingDecl(UsingDecl *D) {
+ if (!D->isAccessDeclaration())
+ Out << "using ";
+ if (D->hasTypename())
+ Out << "typename ";
+ D->getQualifier()->print(Out, Policy);
+ Out << *D;
+}
+
+void
+DeclPrinter::VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D) {
+ Out << "using typename ";
+ D->getQualifier()->print(Out, Policy);
+ Out << D->getDeclName();
+}
+
+void DeclPrinter::VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D) {
+ if (!D->isAccessDeclaration())
+ Out << "using ";
+ D->getQualifier()->print(Out, Policy);
+ Out << D->getDeclName();
+}
+
+void DeclPrinter::VisitUsingShadowDecl(UsingShadowDecl *D) {
+ // ignore
+}
+
+void DeclPrinter::VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D) {
+ Out << "#pragma omp threadprivate";
+ if (!D->varlist_empty()) {
+ for (OMPThreadPrivateDecl::varlist_iterator I = D->varlist_begin(),
+ E = D->varlist_end();
+ I != E; ++I) {
+ Out << (I == D->varlist_begin() ? '(' : ',');
+ NamedDecl *ND = cast<NamedDecl>(cast<DeclRefExpr>(*I)->getDecl());
+ ND->printQualifiedName(Out);
+ }
+ Out << ")";
+ }
+}
+
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp b/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp
new file mode 100644
index 0000000..de3ebd2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp
@@ -0,0 +1,1259 @@
+//===--- DeclTemplate.cpp - Template Declaration AST Node Implementation --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the C++ related Decl classes for templates.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/ADT/STLExtras.h"
+#include <memory>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// TemplateParameterList Implementation
+//===----------------------------------------------------------------------===//
+
+TemplateParameterList::TemplateParameterList(SourceLocation TemplateLoc,
+ SourceLocation LAngleLoc,
+ ArrayRef<NamedDecl *> Params,
+ SourceLocation RAngleLoc)
+ : TemplateLoc(TemplateLoc), LAngleLoc(LAngleLoc), RAngleLoc(RAngleLoc),
+ NumParams(Params.size()), ContainsUnexpandedParameterPack(false) {
+ assert(this->NumParams == NumParams && "Too many template parameters");
+ for (unsigned Idx = 0; Idx < NumParams; ++Idx) {
+ NamedDecl *P = Params[Idx];
+ begin()[Idx] = P;
+
+ if (!P->isTemplateParameterPack()) {
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(P))
+ if (NTTP->getType()->containsUnexpandedParameterPack())
+ ContainsUnexpandedParameterPack = true;
+
+ if (TemplateTemplateParmDecl *TTP = dyn_cast<TemplateTemplateParmDecl>(P))
+ if (TTP->getTemplateParameters()->containsUnexpandedParameterPack())
+ ContainsUnexpandedParameterPack = true;
+
+ // FIXME: If a default argument contains an unexpanded parameter pack, the
+ // template parameter list does too.
+ }
+ }
+}
+
+TemplateParameterList *TemplateParameterList::Create(
+ const ASTContext &C, SourceLocation TemplateLoc, SourceLocation LAngleLoc,
+ ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc) {
+ void *Mem = C.Allocate(totalSizeToAlloc<NamedDecl *>(Params.size()),
+ llvm::alignOf<TemplateParameterList>());
+ return new (Mem) TemplateParameterList(TemplateLoc, LAngleLoc, Params,
+ RAngleLoc);
+}
+
+unsigned TemplateParameterList::getMinRequiredArguments() const {
+ unsigned NumRequiredArgs = 0;
+ for (iterator P = const_cast<TemplateParameterList *>(this)->begin(),
+ PEnd = const_cast<TemplateParameterList *>(this)->end();
+ P != PEnd; ++P) {
+ if ((*P)->isTemplateParameterPack()) {
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P))
+ if (NTTP->isExpandedParameterPack()) {
+ NumRequiredArgs += NTTP->getNumExpansionTypes();
+ continue;
+ }
+
+ break;
+ }
+
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
+ if (TTP->hasDefaultArgument())
+ break;
+ } else if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
+ if (NTTP->hasDefaultArgument())
+ break;
+ } else if (cast<TemplateTemplateParmDecl>(*P)->hasDefaultArgument())
+ break;
+
+ ++NumRequiredArgs;
+ }
+
+ return NumRequiredArgs;
+}
+
+unsigned TemplateParameterList::getDepth() const {
+ if (size() == 0)
+ return 0;
+
+ const NamedDecl *FirstParm = getParam(0);
+ if (const TemplateTypeParmDecl *TTP
+ = dyn_cast<TemplateTypeParmDecl>(FirstParm))
+ return TTP->getDepth();
+ else if (const NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(FirstParm))
+ return NTTP->getDepth();
+ else
+ return cast<TemplateTemplateParmDecl>(FirstParm)->getDepth();
+}
+
+static void AdoptTemplateParameterList(TemplateParameterList *Params,
+ DeclContext *Owner) {
+ for (TemplateParameterList::iterator P = Params->begin(),
+ PEnd = Params->end();
+ P != PEnd; ++P) {
+ (*P)->setDeclContext(Owner);
+
+ if (TemplateTemplateParmDecl *TTP = dyn_cast<TemplateTemplateParmDecl>(*P))
+ AdoptTemplateParameterList(TTP->getTemplateParameters(), Owner);
+ }
+}
+
+namespace clang {
+void *allocateDefaultArgStorageChain(const ASTContext &C) {
+ return new (C) char[sizeof(void*) * 2];
+}
+}
+
+//===----------------------------------------------------------------------===//
+// RedeclarableTemplateDecl Implementation
+//===----------------------------------------------------------------------===//
+
+RedeclarableTemplateDecl::CommonBase *RedeclarableTemplateDecl::getCommonPtr() const {
+ if (Common)
+ return Common;
+
+ // Walk the previous-declaration chain until we either find a declaration
+ // with a common pointer or we run out of previous declarations.
+ SmallVector<const RedeclarableTemplateDecl *, 2> PrevDecls;
+ for (const RedeclarableTemplateDecl *Prev = getPreviousDecl(); Prev;
+ Prev = Prev->getPreviousDecl()) {
+ if (Prev->Common) {
+ Common = Prev->Common;
+ break;
+ }
+
+ PrevDecls.push_back(Prev);
+ }
+
+ // If we never found a common pointer, allocate one now.
+ if (!Common) {
+ // FIXME: If any of the declarations is from an AST file, we probably
+ // need an update record to add the common data.
+
+ Common = newCommon(getASTContext());
+ }
+
+ // Update any previous declarations we saw with the common pointer.
+ for (unsigned I = 0, N = PrevDecls.size(); I != N; ++I)
+ PrevDecls[I]->Common = Common;
+
+ return Common;
+}
+
+template<class EntryType>
+typename RedeclarableTemplateDecl::SpecEntryTraits<EntryType>::DeclType *
+RedeclarableTemplateDecl::findSpecializationImpl(
+ llvm::FoldingSetVector<EntryType> &Specs, ArrayRef<TemplateArgument> Args,
+ void *&InsertPos) {
+ typedef SpecEntryTraits<EntryType> SETraits;
+ llvm::FoldingSetNodeID ID;
+ EntryType::Profile(ID,Args, getASTContext());
+ EntryType *Entry = Specs.FindNodeOrInsertPos(ID, InsertPos);
+ return Entry ? SETraits::getDecl(Entry)->getMostRecentDecl() : nullptr;
+}
+
+template<class Derived, class EntryType>
+void RedeclarableTemplateDecl::addSpecializationImpl(
+ llvm::FoldingSetVector<EntryType> &Specializations, EntryType *Entry,
+ void *InsertPos) {
+ typedef SpecEntryTraits<EntryType> SETraits;
+ if (InsertPos) {
+#ifndef NDEBUG
+ void *CorrectInsertPos;
+ assert(!findSpecializationImpl(Specializations,
+ SETraits::getTemplateArgs(Entry),
+ CorrectInsertPos) &&
+ InsertPos == CorrectInsertPos &&
+ "given incorrect InsertPos for specialization");
+#endif
+ Specializations.InsertNode(Entry, InsertPos);
+ } else {
+ EntryType *Existing = Specializations.GetOrInsertNode(Entry);
+ (void)Existing;
+ assert(SETraits::getDecl(Existing)->isCanonicalDecl() &&
+ "non-canonical specialization?");
+ }
+
+ if (ASTMutationListener *L = getASTMutationListener())
+ L->AddedCXXTemplateSpecialization(cast<Derived>(this),
+ SETraits::getDecl(Entry));
+}
+
+/// \brief Generate the injected template arguments for the given template
+/// parameter list, e.g., for the injected-class-name of a class template.
+static void GenerateInjectedTemplateArgs(ASTContext &Context,
+ TemplateParameterList *Params,
+ TemplateArgument *Args) {
+ for (TemplateParameterList::iterator Param = Params->begin(),
+ ParamEnd = Params->end();
+ Param != ParamEnd; ++Param) {
+ TemplateArgument Arg;
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*Param)) {
+ QualType ArgType = Context.getTypeDeclType(TTP);
+ if (TTP->isParameterPack())
+ ArgType = Context.getPackExpansionType(ArgType, None);
+
+ Arg = TemplateArgument(ArgType);
+ } else if (NonTypeTemplateParmDecl *NTTP =
+ dyn_cast<NonTypeTemplateParmDecl>(*Param)) {
+ Expr *E = new (Context) DeclRefExpr(NTTP, /*enclosing*/ false,
+ NTTP->getType().getNonLValueExprType(Context),
+ Expr::getValueKindForType(NTTP->getType()),
+ NTTP->getLocation());
+
+ if (NTTP->isParameterPack())
+ E = new (Context) PackExpansionExpr(Context.DependentTy, E,
+ NTTP->getLocation(), None);
+ Arg = TemplateArgument(E);
+ } else {
+ TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(*Param);
+ if (TTP->isParameterPack())
+ Arg = TemplateArgument(TemplateName(TTP), Optional<unsigned>());
+ else
+ Arg = TemplateArgument(TemplateName(TTP));
+ }
+
+ if ((*Param)->isTemplateParameterPack())
+ Arg = TemplateArgument::CreatePackCopy(Context, Arg);
+
+ *Args++ = Arg;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// FunctionTemplateDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void FunctionTemplateDecl::DeallocateCommon(void *Ptr) {
+ static_cast<Common *>(Ptr)->~Common();
+}
+
+FunctionTemplateDecl *FunctionTemplateDecl::Create(ASTContext &C,
+ DeclContext *DC,
+ SourceLocation L,
+ DeclarationName Name,
+ TemplateParameterList *Params,
+ NamedDecl *Decl) {
+ AdoptTemplateParameterList(Params, cast<DeclContext>(Decl));
+ return new (C, DC) FunctionTemplateDecl(C, DC, L, Name, Params, Decl);
+}
+
+FunctionTemplateDecl *FunctionTemplateDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ return new (C, ID) FunctionTemplateDecl(C, nullptr, SourceLocation(),
+ DeclarationName(), nullptr, nullptr);
+}
+
+RedeclarableTemplateDecl::CommonBase *
+FunctionTemplateDecl::newCommon(ASTContext &C) const {
+ Common *CommonPtr = new (C) Common;
+ C.AddDeallocation(DeallocateCommon, CommonPtr);
+ return CommonPtr;
+}
+
+void FunctionTemplateDecl::LoadLazySpecializations() const {
+ // Grab the most recent declaration to ensure we've loaded any lazy
+ // redeclarations of this template.
+ //
+ // FIXME: Avoid walking the entire redeclaration chain here.
+ Common *CommonPtr = getMostRecentDecl()->getCommonPtr();
+ if (CommonPtr->LazySpecializations) {
+ ASTContext &Context = getASTContext();
+ uint32_t *Specs = CommonPtr->LazySpecializations;
+ CommonPtr->LazySpecializations = nullptr;
+ for (uint32_t I = 0, N = *Specs++; I != N; ++I)
+ (void)Context.getExternalSource()->GetExternalDecl(Specs[I]);
+ }
+}
+
+llvm::FoldingSetVector<FunctionTemplateSpecializationInfo> &
+FunctionTemplateDecl::getSpecializations() const {
+ LoadLazySpecializations();
+ return getCommonPtr()->Specializations;
+}
+
+FunctionDecl *
+FunctionTemplateDecl::findSpecialization(ArrayRef<TemplateArgument> Args,
+ void *&InsertPos) {
+ return findSpecializationImpl(getSpecializations(), Args, InsertPos);
+}
+
+void FunctionTemplateDecl::addSpecialization(
+ FunctionTemplateSpecializationInfo *Info, void *InsertPos) {
+ addSpecializationImpl<FunctionTemplateDecl>(getSpecializations(), Info,
+ InsertPos);
+}
+
+ArrayRef<TemplateArgument> FunctionTemplateDecl::getInjectedTemplateArgs() {
+ TemplateParameterList *Params = getTemplateParameters();
+ Common *CommonPtr = getCommonPtr();
+ if (!CommonPtr->InjectedArgs) {
+ CommonPtr->InjectedArgs
+ = new (getASTContext()) TemplateArgument[Params->size()];
+ GenerateInjectedTemplateArgs(getASTContext(), Params,
+ CommonPtr->InjectedArgs);
+ }
+
+ return llvm::makeArrayRef(CommonPtr->InjectedArgs, Params->size());
+}
+
+//===----------------------------------------------------------------------===//
+// ClassTemplateDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void ClassTemplateDecl::DeallocateCommon(void *Ptr) {
+ static_cast<Common *>(Ptr)->~Common();
+}
+
+ClassTemplateDecl *ClassTemplateDecl::Create(ASTContext &C,
+ DeclContext *DC,
+ SourceLocation L,
+ DeclarationName Name,
+ TemplateParameterList *Params,
+ NamedDecl *Decl,
+ ClassTemplateDecl *PrevDecl) {
+ AdoptTemplateParameterList(Params, cast<DeclContext>(Decl));
+ ClassTemplateDecl *New = new (C, DC) ClassTemplateDecl(C, DC, L, Name,
+ Params, Decl);
+ New->setPreviousDecl(PrevDecl);
+ return New;
+}
+
+ClassTemplateDecl *ClassTemplateDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ return new (C, ID) ClassTemplateDecl(C, nullptr, SourceLocation(),
+ DeclarationName(), nullptr, nullptr);
+}
+
+void ClassTemplateDecl::LoadLazySpecializations() const {
+ // Grab the most recent declaration to ensure we've loaded any lazy
+ // redeclarations of this template.
+ //
+ // FIXME: Avoid walking the entire redeclaration chain here.
+ Common *CommonPtr = getMostRecentDecl()->getCommonPtr();
+ if (CommonPtr->LazySpecializations) {
+ ASTContext &Context = getASTContext();
+ uint32_t *Specs = CommonPtr->LazySpecializations;
+ CommonPtr->LazySpecializations = nullptr;
+ for (uint32_t I = 0, N = *Specs++; I != N; ++I)
+ (void)Context.getExternalSource()->GetExternalDecl(Specs[I]);
+ }
+}
+
+llvm::FoldingSetVector<ClassTemplateSpecializationDecl> &
+ClassTemplateDecl::getSpecializations() const {
+ LoadLazySpecializations();
+ return getCommonPtr()->Specializations;
+}
+
+llvm::FoldingSetVector<ClassTemplatePartialSpecializationDecl> &
+ClassTemplateDecl::getPartialSpecializations() {
+ LoadLazySpecializations();
+ return getCommonPtr()->PartialSpecializations;
+}
+
+RedeclarableTemplateDecl::CommonBase *
+ClassTemplateDecl::newCommon(ASTContext &C) const {
+ Common *CommonPtr = new (C) Common;
+ C.AddDeallocation(DeallocateCommon, CommonPtr);
+ return CommonPtr;
+}
+
+ClassTemplateSpecializationDecl *
+ClassTemplateDecl::findSpecialization(ArrayRef<TemplateArgument> Args,
+ void *&InsertPos) {
+ return findSpecializationImpl(getSpecializations(), Args, InsertPos);
+}
+
+void ClassTemplateDecl::AddSpecialization(ClassTemplateSpecializationDecl *D,
+ void *InsertPos) {
+ addSpecializationImpl<ClassTemplateDecl>(getSpecializations(), D, InsertPos);
+}
+
+ClassTemplatePartialSpecializationDecl *
+ClassTemplateDecl::findPartialSpecialization(ArrayRef<TemplateArgument> Args,
+ void *&InsertPos) {
+ return findSpecializationImpl(getPartialSpecializations(), Args, InsertPos);
+}
+
+void ClassTemplateDecl::AddPartialSpecialization(
+ ClassTemplatePartialSpecializationDecl *D,
+ void *InsertPos) {
+ if (InsertPos)
+ getPartialSpecializations().InsertNode(D, InsertPos);
+ else {
+ ClassTemplatePartialSpecializationDecl *Existing
+ = getPartialSpecializations().GetOrInsertNode(D);
+ (void)Existing;
+ assert(Existing->isCanonicalDecl() && "Non-canonical specialization?");
+ }
+
+ if (ASTMutationListener *L = getASTMutationListener())
+ L->AddedCXXTemplateSpecialization(this, D);
+}
+
+void ClassTemplateDecl::getPartialSpecializations(
+ SmallVectorImpl<ClassTemplatePartialSpecializationDecl *> &PS) {
+ llvm::FoldingSetVector<ClassTemplatePartialSpecializationDecl> &PartialSpecs
+ = getPartialSpecializations();
+ PS.clear();
+ PS.reserve(PartialSpecs.size());
+ for (llvm::FoldingSetVector<ClassTemplatePartialSpecializationDecl>::iterator
+ P = PartialSpecs.begin(), PEnd = PartialSpecs.end();
+ P != PEnd; ++P)
+ PS.push_back(P->getMostRecentDecl());
+}
+
+ClassTemplatePartialSpecializationDecl *
+ClassTemplateDecl::findPartialSpecialization(QualType T) {
+ ASTContext &Context = getASTContext();
+ using llvm::FoldingSetVector;
+ typedef FoldingSetVector<ClassTemplatePartialSpecializationDecl>::iterator
+ partial_spec_iterator;
+ for (partial_spec_iterator P = getPartialSpecializations().begin(),
+ PEnd = getPartialSpecializations().end();
+ P != PEnd; ++P) {
+ if (Context.hasSameType(P->getInjectedSpecializationType(), T))
+ return P->getMostRecentDecl();
+ }
+
+ return nullptr;
+}
+
+ClassTemplatePartialSpecializationDecl *
+ClassTemplateDecl::findPartialSpecInstantiatedFromMember(
+ ClassTemplatePartialSpecializationDecl *D) {
+ Decl *DCanon = D->getCanonicalDecl();
+ for (llvm::FoldingSetVector<ClassTemplatePartialSpecializationDecl>::iterator
+ P = getPartialSpecializations().begin(),
+ PEnd = getPartialSpecializations().end();
+ P != PEnd; ++P) {
+ if (P->getInstantiatedFromMember()->getCanonicalDecl() == DCanon)
+ return P->getMostRecentDecl();
+ }
+
+ return nullptr;
+}
+
+QualType
+ClassTemplateDecl::getInjectedClassNameSpecialization() {
+ Common *CommonPtr = getCommonPtr();
+ if (!CommonPtr->InjectedClassNameType.isNull())
+ return CommonPtr->InjectedClassNameType;
+
+ // C++0x [temp.dep.type]p2:
+ // The template argument list of a primary template is a template argument
+ // list in which the nth template argument has the value of the nth template
+ // parameter of the class template. If the nth template parameter is a
+ // template parameter pack (14.5.3), the nth template argument is a pack
+ // expansion (14.5.3) whose pattern is the name of the template parameter
+ // pack.
+ ASTContext &Context = getASTContext();
+ TemplateParameterList *Params = getTemplateParameters();
+ SmallVector<TemplateArgument, 16> TemplateArgs;
+ TemplateArgs.resize(Params->size());
+ GenerateInjectedTemplateArgs(getASTContext(), Params, TemplateArgs.data());
+ CommonPtr->InjectedClassNameType
+ = Context.getTemplateSpecializationType(TemplateName(this),
+ &TemplateArgs[0],
+ TemplateArgs.size());
+ return CommonPtr->InjectedClassNameType;
+}
+
+//===----------------------------------------------------------------------===//
+// TemplateTypeParm Allocation/Deallocation Method Implementations
+//===----------------------------------------------------------------------===//
+
+TemplateTypeParmDecl *
+TemplateTypeParmDecl::Create(const ASTContext &C, DeclContext *DC,
+ SourceLocation KeyLoc, SourceLocation NameLoc,
+ unsigned D, unsigned P, IdentifierInfo *Id,
+ bool Typename, bool ParameterPack) {
+ TemplateTypeParmDecl *TTPDecl =
+ new (C, DC) TemplateTypeParmDecl(DC, KeyLoc, NameLoc, Id, Typename);
+ QualType TTPType = C.getTemplateTypeParmType(D, P, ParameterPack, TTPDecl);
+ TTPDecl->setTypeForDecl(TTPType.getTypePtr());
+ return TTPDecl;
+}
+
+TemplateTypeParmDecl *
+TemplateTypeParmDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
+ return new (C, ID) TemplateTypeParmDecl(nullptr, SourceLocation(),
+ SourceLocation(), nullptr, false);
+}
+
+SourceLocation TemplateTypeParmDecl::getDefaultArgumentLoc() const {
+ return hasDefaultArgument()
+ ? getDefaultArgumentInfo()->getTypeLoc().getBeginLoc()
+ : SourceLocation();
+}
+
+SourceRange TemplateTypeParmDecl::getSourceRange() const {
+ if (hasDefaultArgument() && !defaultArgumentWasInherited())
+ return SourceRange(getLocStart(),
+ getDefaultArgumentInfo()->getTypeLoc().getEndLoc());
+ else
+ return TypeDecl::getSourceRange();
+}
+
+unsigned TemplateTypeParmDecl::getDepth() const {
+ return getTypeForDecl()->getAs<TemplateTypeParmType>()->getDepth();
+}
+
+unsigned TemplateTypeParmDecl::getIndex() const {
+ return getTypeForDecl()->getAs<TemplateTypeParmType>()->getIndex();
+}
+
+bool TemplateTypeParmDecl::isParameterPack() const {
+ return getTypeForDecl()->getAs<TemplateTypeParmType>()->isParameterPack();
+}
+
+//===----------------------------------------------------------------------===//
+// NonTypeTemplateParmDecl Method Implementations
+//===----------------------------------------------------------------------===//
+
+NonTypeTemplateParmDecl::NonTypeTemplateParmDecl(DeclContext *DC,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc,
+ unsigned D, unsigned P,
+ IdentifierInfo *Id,
+ QualType T,
+ TypeSourceInfo *TInfo,
+ const QualType *ExpandedTypes,
+ unsigned NumExpandedTypes,
+ TypeSourceInfo **ExpandedTInfos)
+ : DeclaratorDecl(NonTypeTemplateParm, DC, IdLoc, Id, T, TInfo, StartLoc),
+ TemplateParmPosition(D, P), ParameterPack(true),
+ ExpandedParameterPack(true), NumExpandedTypes(NumExpandedTypes) {
+ if (ExpandedTypes && ExpandedTInfos) {
+ auto TypesAndInfos =
+ getTrailingObjects<std::pair<QualType, TypeSourceInfo *>>();
+ for (unsigned I = 0; I != NumExpandedTypes; ++I) {
+ new (&TypesAndInfos[I].first) QualType(ExpandedTypes[I]);
+ TypesAndInfos[I].second = ExpandedTInfos[I];
+ }
+ }
+}
+
+NonTypeTemplateParmDecl *
+NonTypeTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ unsigned D, unsigned P, IdentifierInfo *Id,
+ QualType T, bool ParameterPack,
+ TypeSourceInfo *TInfo) {
+ return new (C, DC) NonTypeTemplateParmDecl(DC, StartLoc, IdLoc, D, P, Id,
+ T, ParameterPack, TInfo);
+}
+
+NonTypeTemplateParmDecl *
+NonTypeTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ unsigned D, unsigned P,
+ IdentifierInfo *Id, QualType T,
+ TypeSourceInfo *TInfo,
+ const QualType *ExpandedTypes,
+ unsigned NumExpandedTypes,
+ TypeSourceInfo **ExpandedTInfos) {
+ return new (C, DC,
+ additionalSizeToAlloc<std::pair<QualType, TypeSourceInfo *>>(
+ NumExpandedTypes))
+ NonTypeTemplateParmDecl(DC, StartLoc, IdLoc, D, P, Id, T, TInfo,
+ ExpandedTypes, NumExpandedTypes, ExpandedTInfos);
+}
+
+NonTypeTemplateParmDecl *
+NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) NonTypeTemplateParmDecl(nullptr, SourceLocation(),
+ SourceLocation(), 0, 0, nullptr,
+ QualType(), false, nullptr);
+}
+
+NonTypeTemplateParmDecl *
+NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID,
+ unsigned NumExpandedTypes) {
+ return new (C, ID,
+ additionalSizeToAlloc<std::pair<QualType, TypeSourceInfo *>>(
+ NumExpandedTypes))
+ NonTypeTemplateParmDecl(nullptr, SourceLocation(), SourceLocation(), 0, 0,
+ nullptr, QualType(), nullptr, nullptr,
+ NumExpandedTypes, nullptr);
+}
+
+SourceRange NonTypeTemplateParmDecl::getSourceRange() const {
+ if (hasDefaultArgument() && !defaultArgumentWasInherited())
+ return SourceRange(getOuterLocStart(),
+ getDefaultArgument()->getSourceRange().getEnd());
+ return DeclaratorDecl::getSourceRange();
+}
+
+SourceLocation NonTypeTemplateParmDecl::getDefaultArgumentLoc() const {
+ return hasDefaultArgument()
+ ? getDefaultArgument()->getSourceRange().getBegin()
+ : SourceLocation();
+}
+
+//===----------------------------------------------------------------------===//
+// TemplateTemplateParmDecl Method Implementations
+//===----------------------------------------------------------------------===//
+
+void TemplateTemplateParmDecl::anchor() { }
+
+TemplateTemplateParmDecl::TemplateTemplateParmDecl(
+ DeclContext *DC, SourceLocation L, unsigned D, unsigned P,
+ IdentifierInfo *Id, TemplateParameterList *Params,
+ unsigned NumExpansions, TemplateParameterList * const *Expansions)
+ : TemplateDecl(TemplateTemplateParm, DC, L, Id, Params),
+ TemplateParmPosition(D, P), ParameterPack(true),
+ ExpandedParameterPack(true), NumExpandedParams(NumExpansions) {
+ if (Expansions)
+ std::uninitialized_copy(Expansions, Expansions + NumExpandedParams,
+ getTrailingObjects<TemplateParameterList *>());
+}
+
+TemplateTemplateParmDecl *
+TemplateTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
+ SourceLocation L, unsigned D, unsigned P,
+ bool ParameterPack, IdentifierInfo *Id,
+ TemplateParameterList *Params) {
+ return new (C, DC) TemplateTemplateParmDecl(DC, L, D, P, ParameterPack, Id,
+ Params);
+}
+
+TemplateTemplateParmDecl *
+TemplateTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
+ SourceLocation L, unsigned D, unsigned P,
+ IdentifierInfo *Id,
+ TemplateParameterList *Params,
+ ArrayRef<TemplateParameterList *> Expansions) {
+ return new (C, DC,
+ additionalSizeToAlloc<TemplateParameterList *>(Expansions.size()))
+ TemplateTemplateParmDecl(DC, L, D, P, Id, Params, Expansions.size(),
+ Expansions.data());
+}
+
+TemplateTemplateParmDecl *
+TemplateTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) TemplateTemplateParmDecl(nullptr, SourceLocation(), 0, 0,
+ false, nullptr, nullptr);
+}
+
+TemplateTemplateParmDecl *
+TemplateTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID,
+ unsigned NumExpansions) {
+ return new (C, ID,
+ additionalSizeToAlloc<TemplateParameterList *>(NumExpansions))
+ TemplateTemplateParmDecl(nullptr, SourceLocation(), 0, 0, nullptr,
+ nullptr, NumExpansions, nullptr);
+}
+
+SourceLocation TemplateTemplateParmDecl::getDefaultArgumentLoc() const {
+ return hasDefaultArgument() ? getDefaultArgument().getLocation()
+ : SourceLocation();
+}
+
+void TemplateTemplateParmDecl::setDefaultArgument(
+ const ASTContext &C, const TemplateArgumentLoc &DefArg) {
+ if (DefArg.getArgument().isNull())
+ DefaultArgument.set(nullptr);
+ else
+ DefaultArgument.set(new (C) TemplateArgumentLoc(DefArg));
+}
+
+//===----------------------------------------------------------------------===//
+// TemplateArgumentList Implementation
+//===----------------------------------------------------------------------===//
+TemplateArgumentList::TemplateArgumentList(const TemplateArgument *Args,
+ unsigned NumArgs)
+ : Arguments(getTrailingObjects<TemplateArgument>()), NumArguments(NumArgs) {
+ std::uninitialized_copy(Args, Args + NumArgs,
+ getTrailingObjects<TemplateArgument>());
+}
+
+TemplateArgumentList *
+TemplateArgumentList::CreateCopy(ASTContext &Context,
+ const TemplateArgument *Args,
+ unsigned NumArgs) {
+ void *Mem = Context.Allocate(totalSizeToAlloc<TemplateArgument>(NumArgs));
+ return new (Mem) TemplateArgumentList(Args, NumArgs);
+}
+
+FunctionTemplateSpecializationInfo *
+FunctionTemplateSpecializationInfo::Create(ASTContext &C, FunctionDecl *FD,
+ FunctionTemplateDecl *Template,
+ TemplateSpecializationKind TSK,
+ const TemplateArgumentList *TemplateArgs,
+ const TemplateArgumentListInfo *TemplateArgsAsWritten,
+ SourceLocation POI) {
+ const ASTTemplateArgumentListInfo *ArgsAsWritten = nullptr;
+ if (TemplateArgsAsWritten)
+ ArgsAsWritten = ASTTemplateArgumentListInfo::Create(C,
+ *TemplateArgsAsWritten);
+
+ return new (C) FunctionTemplateSpecializationInfo(FD, Template, TSK,
+ TemplateArgs,
+ ArgsAsWritten,
+ POI);
+}
+
+//===----------------------------------------------------------------------===//
+// TemplateDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void TemplateDecl::anchor() { }
+
+//===----------------------------------------------------------------------===//
+// ClassTemplateSpecializationDecl Implementation
+//===----------------------------------------------------------------------===//
+ClassTemplateSpecializationDecl::
+ClassTemplateSpecializationDecl(ASTContext &Context, Kind DK, TagKind TK,
+ DeclContext *DC, SourceLocation StartLoc,
+ SourceLocation IdLoc,
+ ClassTemplateDecl *SpecializedTemplate,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ ClassTemplateSpecializationDecl *PrevDecl)
+ : CXXRecordDecl(DK, TK, Context, DC, StartLoc, IdLoc,
+ SpecializedTemplate->getIdentifier(),
+ PrevDecl),
+ SpecializedTemplate(SpecializedTemplate),
+ ExplicitInfo(nullptr),
+ TemplateArgs(TemplateArgumentList::CreateCopy(Context, Args, NumArgs)),
+ SpecializationKind(TSK_Undeclared) {
+}
+
+ClassTemplateSpecializationDecl::ClassTemplateSpecializationDecl(ASTContext &C,
+ Kind DK)
+ : CXXRecordDecl(DK, TTK_Struct, C, nullptr, SourceLocation(),
+ SourceLocation(), nullptr, nullptr),
+ ExplicitInfo(nullptr), SpecializationKind(TSK_Undeclared) {}
+
+ClassTemplateSpecializationDecl *
+ClassTemplateSpecializationDecl::Create(ASTContext &Context, TagKind TK,
+ DeclContext *DC,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc,
+ ClassTemplateDecl *SpecializedTemplate,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ ClassTemplateSpecializationDecl *PrevDecl) {
+ ClassTemplateSpecializationDecl *Result =
+ new (Context, DC) ClassTemplateSpecializationDecl(
+ Context, ClassTemplateSpecialization, TK, DC, StartLoc, IdLoc,
+ SpecializedTemplate, Args, NumArgs, PrevDecl);
+ Result->MayHaveOutOfDateDef = false;
+
+ Context.getTypeDeclType(Result, PrevDecl);
+ return Result;
+}
+
+ClassTemplateSpecializationDecl *
+ClassTemplateSpecializationDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ ClassTemplateSpecializationDecl *Result =
+ new (C, ID) ClassTemplateSpecializationDecl(C, ClassTemplateSpecialization);
+ Result->MayHaveOutOfDateDef = false;
+ return Result;
+}
+
+void ClassTemplateSpecializationDecl::getNameForDiagnostic(
+ raw_ostream &OS, const PrintingPolicy &Policy, bool Qualified) const {
+ NamedDecl::getNameForDiagnostic(OS, Policy, Qualified);
+
+ const TemplateArgumentList &TemplateArgs = getTemplateArgs();
+ TemplateSpecializationType::PrintTemplateArgumentList(
+ OS, TemplateArgs.data(), TemplateArgs.size(), Policy);
+}
+
+ClassTemplateDecl *
+ClassTemplateSpecializationDecl::getSpecializedTemplate() const {
+ if (SpecializedPartialSpecialization *PartialSpec
+ = SpecializedTemplate.dyn_cast<SpecializedPartialSpecialization*>())
+ return PartialSpec->PartialSpecialization->getSpecializedTemplate();
+ return SpecializedTemplate.get<ClassTemplateDecl*>();
+}
+
+SourceRange
+ClassTemplateSpecializationDecl::getSourceRange() const {
+ if (ExplicitInfo) {
+ SourceLocation Begin = getTemplateKeywordLoc();
+ if (Begin.isValid()) {
+ // Here we have an explicit (partial) specialization or instantiation.
+ assert(getSpecializationKind() == TSK_ExplicitSpecialization ||
+ getSpecializationKind() == TSK_ExplicitInstantiationDeclaration ||
+ getSpecializationKind() == TSK_ExplicitInstantiationDefinition);
+ if (getExternLoc().isValid())
+ Begin = getExternLoc();
+ SourceLocation End = getRBraceLoc();
+ if (End.isInvalid())
+ End = getTypeAsWritten()->getTypeLoc().getEndLoc();
+ return SourceRange(Begin, End);
+ }
+ // An implicit instantiation of a class template partial specialization
+ // uses ExplicitInfo to record the TypeAsWritten, but the source
+ // locations should be retrieved from the instantiation pattern.
+ typedef ClassTemplatePartialSpecializationDecl CTPSDecl;
+ CTPSDecl *ctpsd = const_cast<CTPSDecl*>(cast<CTPSDecl>(this));
+ CTPSDecl *inst_from = ctpsd->getInstantiatedFromMember();
+ assert(inst_from != nullptr);
+ return inst_from->getSourceRange();
+ }
+ else {
+ // No explicit info available.
+ llvm::PointerUnion<ClassTemplateDecl *,
+ ClassTemplatePartialSpecializationDecl *>
+ inst_from = getInstantiatedFrom();
+ if (inst_from.isNull())
+ return getSpecializedTemplate()->getSourceRange();
+ if (ClassTemplateDecl *ctd = inst_from.dyn_cast<ClassTemplateDecl*>())
+ return ctd->getSourceRange();
+ return inst_from.get<ClassTemplatePartialSpecializationDecl*>()
+ ->getSourceRange();
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// ClassTemplatePartialSpecializationDecl Implementation
+//===----------------------------------------------------------------------===//
+void ClassTemplatePartialSpecializationDecl::anchor() { }
+
+ClassTemplatePartialSpecializationDecl::
+ClassTemplatePartialSpecializationDecl(ASTContext &Context, TagKind TK,
+ DeclContext *DC,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc,
+ TemplateParameterList *Params,
+ ClassTemplateDecl *SpecializedTemplate,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ const ASTTemplateArgumentListInfo *ArgInfos,
+ ClassTemplatePartialSpecializationDecl *PrevDecl)
+ : ClassTemplateSpecializationDecl(Context,
+ ClassTemplatePartialSpecialization,
+ TK, DC, StartLoc, IdLoc,
+ SpecializedTemplate,
+ Args, NumArgs, PrevDecl),
+ TemplateParams(Params), ArgsAsWritten(ArgInfos),
+ InstantiatedFromMember(nullptr, false)
+{
+ AdoptTemplateParameterList(Params, this);
+}
+
+ClassTemplatePartialSpecializationDecl *
+ClassTemplatePartialSpecializationDecl::
+Create(ASTContext &Context, TagKind TK,DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ TemplateParameterList *Params,
+ ClassTemplateDecl *SpecializedTemplate,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ const TemplateArgumentListInfo &ArgInfos,
+ QualType CanonInjectedType,
+ ClassTemplatePartialSpecializationDecl *PrevDecl) {
+ const ASTTemplateArgumentListInfo *ASTArgInfos =
+ ASTTemplateArgumentListInfo::Create(Context, ArgInfos);
+
+ ClassTemplatePartialSpecializationDecl *Result = new (Context, DC)
+ ClassTemplatePartialSpecializationDecl(Context, TK, DC, StartLoc, IdLoc,
+ Params, SpecializedTemplate, Args,
+ NumArgs, ASTArgInfos, PrevDecl);
+ Result->setSpecializationKind(TSK_ExplicitSpecialization);
+ Result->MayHaveOutOfDateDef = false;
+
+ Context.getInjectedClassNameType(Result, CanonInjectedType);
+ return Result;
+}
+
+ClassTemplatePartialSpecializationDecl *
+ClassTemplatePartialSpecializationDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ ClassTemplatePartialSpecializationDecl *Result =
+ new (C, ID) ClassTemplatePartialSpecializationDecl(C);
+ Result->MayHaveOutOfDateDef = false;
+ return Result;
+}
+
+//===----------------------------------------------------------------------===//
+// FriendTemplateDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void FriendTemplateDecl::anchor() { }
+
+FriendTemplateDecl *FriendTemplateDecl::Create(ASTContext &Context,
+ DeclContext *DC,
+ SourceLocation L,
+ unsigned NParams,
+ TemplateParameterList **Params,
+ FriendUnion Friend,
+ SourceLocation FLoc) {
+ return new (Context, DC) FriendTemplateDecl(DC, L, NParams, Params,
+ Friend, FLoc);
+}
+
+FriendTemplateDecl *FriendTemplateDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ return new (C, ID) FriendTemplateDecl(EmptyShell());
+}
+
+//===----------------------------------------------------------------------===//
+// TypeAliasTemplateDecl Implementation
+//===----------------------------------------------------------------------===//
+
+TypeAliasTemplateDecl *TypeAliasTemplateDecl::Create(ASTContext &C,
+ DeclContext *DC,
+ SourceLocation L,
+ DeclarationName Name,
+ TemplateParameterList *Params,
+ NamedDecl *Decl) {
+ AdoptTemplateParameterList(Params, DC);
+ return new (C, DC) TypeAliasTemplateDecl(C, DC, L, Name, Params, Decl);
+}
+
+TypeAliasTemplateDecl *TypeAliasTemplateDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ return new (C, ID) TypeAliasTemplateDecl(C, nullptr, SourceLocation(),
+ DeclarationName(), nullptr, nullptr);
+}
+
+void TypeAliasTemplateDecl::DeallocateCommon(void *Ptr) {
+ static_cast<Common *>(Ptr)->~Common();
+}
+RedeclarableTemplateDecl::CommonBase *
+TypeAliasTemplateDecl::newCommon(ASTContext &C) const {
+ Common *CommonPtr = new (C) Common;
+ C.AddDeallocation(DeallocateCommon, CommonPtr);
+ return CommonPtr;
+}
+
+//===----------------------------------------------------------------------===//
+// ClassScopeFunctionSpecializationDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void ClassScopeFunctionSpecializationDecl::anchor() { }
+
+ClassScopeFunctionSpecializationDecl *
+ClassScopeFunctionSpecializationDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ return new (C, ID) ClassScopeFunctionSpecializationDecl(
+ nullptr, SourceLocation(), nullptr, false, TemplateArgumentListInfo());
+}
+
+//===----------------------------------------------------------------------===//
+// VarTemplateDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void VarTemplateDecl::DeallocateCommon(void *Ptr) {
+ static_cast<Common *>(Ptr)->~Common();
+}
+
+VarTemplateDecl *VarTemplateDecl::getDefinition() {
+ VarTemplateDecl *CurD = this;
+ while (CurD) {
+ if (CurD->isThisDeclarationADefinition())
+ return CurD;
+ CurD = CurD->getPreviousDecl();
+ }
+ return nullptr;
+}
+
+VarTemplateDecl *VarTemplateDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L, DeclarationName Name,
+ TemplateParameterList *Params,
+ VarDecl *Decl) {
+ return new (C, DC) VarTemplateDecl(C, DC, L, Name, Params, Decl);
+}
+
+VarTemplateDecl *VarTemplateDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ return new (C, ID) VarTemplateDecl(C, nullptr, SourceLocation(),
+ DeclarationName(), nullptr, nullptr);
+}
+
+// TODO: Unify across class, function and variable templates?
+// May require moving this and Common to RedeclarableTemplateDecl.
+void VarTemplateDecl::LoadLazySpecializations() const {
+ // Grab the most recent declaration to ensure we've loaded any lazy
+ // redeclarations of this template.
+ //
+ // FIXME: Avoid walking the entire redeclaration chain here.
+ Common *CommonPtr = getMostRecentDecl()->getCommonPtr();
+ if (CommonPtr->LazySpecializations) {
+ ASTContext &Context = getASTContext();
+ uint32_t *Specs = CommonPtr->LazySpecializations;
+ CommonPtr->LazySpecializations = nullptr;
+ for (uint32_t I = 0, N = *Specs++; I != N; ++I)
+ (void)Context.getExternalSource()->GetExternalDecl(Specs[I]);
+ }
+}
+
+llvm::FoldingSetVector<VarTemplateSpecializationDecl> &
+VarTemplateDecl::getSpecializations() const {
+ LoadLazySpecializations();
+ return getCommonPtr()->Specializations;
+}
+
+llvm::FoldingSetVector<VarTemplatePartialSpecializationDecl> &
+VarTemplateDecl::getPartialSpecializations() {
+ LoadLazySpecializations();
+ return getCommonPtr()->PartialSpecializations;
+}
+
+RedeclarableTemplateDecl::CommonBase *
+VarTemplateDecl::newCommon(ASTContext &C) const {
+ Common *CommonPtr = new (C) Common;
+ C.AddDeallocation(DeallocateCommon, CommonPtr);
+ return CommonPtr;
+}
+
+VarTemplateSpecializationDecl *
+VarTemplateDecl::findSpecialization(ArrayRef<TemplateArgument> Args,
+ void *&InsertPos) {
+ return findSpecializationImpl(getSpecializations(), Args, InsertPos);
+}
+
+void VarTemplateDecl::AddSpecialization(VarTemplateSpecializationDecl *D,
+ void *InsertPos) {
+ addSpecializationImpl<VarTemplateDecl>(getSpecializations(), D, InsertPos);
+}
+
+VarTemplatePartialSpecializationDecl *
+VarTemplateDecl::findPartialSpecialization(ArrayRef<TemplateArgument> Args,
+ void *&InsertPos) {
+ return findSpecializationImpl(getPartialSpecializations(), Args, InsertPos);
+}
+
+void VarTemplateDecl::AddPartialSpecialization(
+ VarTemplatePartialSpecializationDecl *D, void *InsertPos) {
+ if (InsertPos)
+ getPartialSpecializations().InsertNode(D, InsertPos);
+ else {
+ VarTemplatePartialSpecializationDecl *Existing =
+ getPartialSpecializations().GetOrInsertNode(D);
+ (void)Existing;
+ assert(Existing->isCanonicalDecl() && "Non-canonical specialization?");
+ }
+
+ if (ASTMutationListener *L = getASTMutationListener())
+ L->AddedCXXTemplateSpecialization(this, D);
+}
+
+void VarTemplateDecl::getPartialSpecializations(
+ SmallVectorImpl<VarTemplatePartialSpecializationDecl *> &PS) {
+ llvm::FoldingSetVector<VarTemplatePartialSpecializationDecl> &PartialSpecs =
+ getPartialSpecializations();
+ PS.clear();
+ PS.reserve(PartialSpecs.size());
+ for (llvm::FoldingSetVector<VarTemplatePartialSpecializationDecl>::iterator
+ P = PartialSpecs.begin(),
+ PEnd = PartialSpecs.end();
+ P != PEnd; ++P)
+ PS.push_back(P->getMostRecentDecl());
+}
+
+VarTemplatePartialSpecializationDecl *
+VarTemplateDecl::findPartialSpecInstantiatedFromMember(
+ VarTemplatePartialSpecializationDecl *D) {
+ Decl *DCanon = D->getCanonicalDecl();
+ for (llvm::FoldingSetVector<VarTemplatePartialSpecializationDecl>::iterator
+ P = getPartialSpecializations().begin(),
+ PEnd = getPartialSpecializations().end();
+ P != PEnd; ++P) {
+ if (P->getInstantiatedFromMember()->getCanonicalDecl() == DCanon)
+ return P->getMostRecentDecl();
+ }
+
+ return nullptr;
+}
+
+//===----------------------------------------------------------------------===//
+// VarTemplateSpecializationDecl Implementation
+//===----------------------------------------------------------------------===//
+VarTemplateSpecializationDecl::VarTemplateSpecializationDecl(
+ Kind DK, ASTContext &Context, DeclContext *DC, SourceLocation StartLoc,
+ SourceLocation IdLoc, VarTemplateDecl *SpecializedTemplate, QualType T,
+ TypeSourceInfo *TInfo, StorageClass S, const TemplateArgument *Args,
+ unsigned NumArgs)
+ : VarDecl(DK, Context, DC, StartLoc, IdLoc,
+ SpecializedTemplate->getIdentifier(), T, TInfo, S),
+ SpecializedTemplate(SpecializedTemplate), ExplicitInfo(nullptr),
+ TemplateArgs(TemplateArgumentList::CreateCopy(Context, Args, NumArgs)),
+ SpecializationKind(TSK_Undeclared) {}
+
+VarTemplateSpecializationDecl::VarTemplateSpecializationDecl(Kind DK,
+ ASTContext &C)
+ : VarDecl(DK, C, nullptr, SourceLocation(), SourceLocation(), nullptr,
+ QualType(), nullptr, SC_None),
+ ExplicitInfo(nullptr), SpecializationKind(TSK_Undeclared) {}
+
+VarTemplateSpecializationDecl *VarTemplateSpecializationDecl::Create(
+ ASTContext &Context, DeclContext *DC, SourceLocation StartLoc,
+ SourceLocation IdLoc, VarTemplateDecl *SpecializedTemplate, QualType T,
+ TypeSourceInfo *TInfo, StorageClass S, const TemplateArgument *Args,
+ unsigned NumArgs) {
+ return new (Context, DC) VarTemplateSpecializationDecl(
+ VarTemplateSpecialization, Context, DC, StartLoc, IdLoc,
+ SpecializedTemplate, T, TInfo, S, Args, NumArgs);
+}
+
+VarTemplateSpecializationDecl *
+VarTemplateSpecializationDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID)
+ VarTemplateSpecializationDecl(VarTemplateSpecialization, C);
+}
+
+void VarTemplateSpecializationDecl::getNameForDiagnostic(
+ raw_ostream &OS, const PrintingPolicy &Policy, bool Qualified) const {
+ NamedDecl::getNameForDiagnostic(OS, Policy, Qualified);
+
+ const TemplateArgumentList &TemplateArgs = getTemplateArgs();
+ TemplateSpecializationType::PrintTemplateArgumentList(
+ OS, TemplateArgs.data(), TemplateArgs.size(), Policy);
+}
+
+VarTemplateDecl *VarTemplateSpecializationDecl::getSpecializedTemplate() const {
+ if (SpecializedPartialSpecialization *PartialSpec =
+ SpecializedTemplate.dyn_cast<SpecializedPartialSpecialization *>())
+ return PartialSpec->PartialSpecialization->getSpecializedTemplate();
+ return SpecializedTemplate.get<VarTemplateDecl *>();
+}
+
+void VarTemplateSpecializationDecl::setTemplateArgsInfo(
+ const TemplateArgumentListInfo &ArgsInfo) {
+ unsigned N = ArgsInfo.size();
+ TemplateArgsInfo.setLAngleLoc(ArgsInfo.getLAngleLoc());
+ TemplateArgsInfo.setRAngleLoc(ArgsInfo.getRAngleLoc());
+ for (unsigned I = 0; I != N; ++I)
+ TemplateArgsInfo.addArgument(ArgsInfo[I]);
+}
+
+//===----------------------------------------------------------------------===//
+// VarTemplatePartialSpecializationDecl Implementation
+//===----------------------------------------------------------------------===//
+void VarTemplatePartialSpecializationDecl::anchor() {}
+
+VarTemplatePartialSpecializationDecl::VarTemplatePartialSpecializationDecl(
+ ASTContext &Context, DeclContext *DC, SourceLocation StartLoc,
+ SourceLocation IdLoc, TemplateParameterList *Params,
+ VarTemplateDecl *SpecializedTemplate, QualType T, TypeSourceInfo *TInfo,
+ StorageClass S, const TemplateArgument *Args, unsigned NumArgs,
+ const ASTTemplateArgumentListInfo *ArgInfos)
+ : VarTemplateSpecializationDecl(VarTemplatePartialSpecialization, Context,
+ DC, StartLoc, IdLoc, SpecializedTemplate, T,
+ TInfo, S, Args, NumArgs),
+ TemplateParams(Params), ArgsAsWritten(ArgInfos),
+ InstantiatedFromMember(nullptr, false) {
+ // TODO: The template parameters should be in DC by now. Verify.
+ // AdoptTemplateParameterList(Params, DC);
+}
+
+VarTemplatePartialSpecializationDecl *
+VarTemplatePartialSpecializationDecl::Create(
+ ASTContext &Context, DeclContext *DC, SourceLocation StartLoc,
+ SourceLocation IdLoc, TemplateParameterList *Params,
+ VarTemplateDecl *SpecializedTemplate, QualType T, TypeSourceInfo *TInfo,
+ StorageClass S, const TemplateArgument *Args, unsigned NumArgs,
+ const TemplateArgumentListInfo &ArgInfos) {
+ const ASTTemplateArgumentListInfo *ASTArgInfos
+ = ASTTemplateArgumentListInfo::Create(Context, ArgInfos);
+
+ VarTemplatePartialSpecializationDecl *Result =
+ new (Context, DC) VarTemplatePartialSpecializationDecl(
+ Context, DC, StartLoc, IdLoc, Params, SpecializedTemplate, T, TInfo,
+ S, Args, NumArgs, ASTArgInfos);
+ Result->setSpecializationKind(TSK_ExplicitSpecialization);
+ return Result;
+}
+
+VarTemplatePartialSpecializationDecl *
+VarTemplatePartialSpecializationDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ return new (C, ID) VarTemplatePartialSpecializationDecl(C);
+}
+
+static TemplateParameterList *
+createMakeIntegerSeqParameterList(const ASTContext &C, DeclContext *DC) {
+ // typename T
+ auto *T = TemplateTypeParmDecl::Create(
+ C, DC, SourceLocation(), SourceLocation(), /*Depth=*/1, /*Position=*/0,
+ /*Id=*/nullptr, /*Typename=*/true, /*ParameterPack=*/false);
+ T->setImplicit(true);
+
+ // T ...Ints
+ TypeSourceInfo *TI =
+ C.getTrivialTypeSourceInfo(QualType(T->getTypeForDecl(), 0));
+ auto *N = NonTypeTemplateParmDecl::Create(
+ C, DC, SourceLocation(), SourceLocation(), /*Depth=*/0, /*Position=*/1,
+ /*Id=*/nullptr, TI->getType(), /*ParameterPack=*/true, TI);
+ N->setImplicit(true);
+
+ // <typename T, T ...Ints>
+ NamedDecl *P[2] = {T, N};
+ auto *TPL = TemplateParameterList::Create(
+ C, SourceLocation(), SourceLocation(), P, SourceLocation());
+
+ // template <typename T, ...Ints> class IntSeq
+ auto *TemplateTemplateParm = TemplateTemplateParmDecl::Create(
+ C, DC, SourceLocation(), /*Depth=*/0, /*Position=*/0,
+ /*ParameterPack=*/false, /*Id=*/nullptr, TPL);
+ TemplateTemplateParm->setImplicit(true);
+
+ // typename T
+ auto *TemplateTypeParm = TemplateTypeParmDecl::Create(
+ C, DC, SourceLocation(), SourceLocation(), /*Depth=*/0, /*Position=*/1,
+ /*Id=*/nullptr, /*Typename=*/true, /*ParameterPack=*/false);
+ TemplateTypeParm->setImplicit(true);
+
+ // T N
+ TypeSourceInfo *TInfo = C.getTrivialTypeSourceInfo(
+ QualType(TemplateTypeParm->getTypeForDecl(), 0));
+ auto *NonTypeTemplateParm = NonTypeTemplateParmDecl::Create(
+ C, DC, SourceLocation(), SourceLocation(), /*Depth=*/0, /*Position=*/2,
+ /*Id=*/nullptr, TInfo->getType(), /*ParameterPack=*/false, TInfo);
+ NamedDecl *Params[] = {TemplateTemplateParm, TemplateTypeParm,
+ NonTypeTemplateParm};
+
+ // template <template <typename T, T ...Ints> class IntSeq, typename T, T N>
+ return TemplateParameterList::Create(C, SourceLocation(), SourceLocation(),
+ Params, SourceLocation());
+}
+
+static TemplateParameterList *createBuiltinTemplateParameterList(
+ const ASTContext &C, DeclContext *DC, BuiltinTemplateKind BTK) {
+ switch (BTK) {
+ case BTK__make_integer_seq:
+ return createMakeIntegerSeqParameterList(C, DC);
+ }
+
+ llvm_unreachable("unhandled BuiltinTemplateKind!");
+}
+
+void BuiltinTemplateDecl::anchor() {}
+
+BuiltinTemplateDecl::BuiltinTemplateDecl(const ASTContext &C, DeclContext *DC,
+ DeclarationName Name,
+ BuiltinTemplateKind BTK)
+ : TemplateDecl(BuiltinTemplate, DC, SourceLocation(), Name,
+ createBuiltinTemplateParameterList(C, DC, BTK)),
+ BTK(BTK) {}
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclarationName.cpp b/contrib/llvm/tools/clang/lib/AST/DeclarationName.cpp
new file mode 100644
index 0000000..b2f2727
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/DeclarationName.cpp
@@ -0,0 +1,590 @@
+//===-- DeclarationName.cpp - Declaration names implementation --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the DeclarationName and DeclarationNameTable
+// classes.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclarationName.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/AST/TypeOrdering.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+namespace clang {
+/// CXXSpecialName - Records the type associated with one of the
+/// "special" kinds of declaration names in C++, e.g., constructors,
+/// destructors, and conversion functions.
+class CXXSpecialName
+ : public DeclarationNameExtra, public llvm::FoldingSetNode {
+public:
+ /// Type - The type associated with this declaration name.
+ QualType Type;
+
+ /// FETokenInfo - Extra information associated with this declaration
+ /// name that can be used by the front end.
+ void *FETokenInfo;
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ ID.AddInteger(ExtraKindOrNumArgs);
+ ID.AddPointer(Type.getAsOpaquePtr());
+ }
+};
+
+/// CXXOperatorIdName - Contains extra information for the name of an
+/// overloaded operator in C++, such as "operator+.
+class CXXOperatorIdName : public DeclarationNameExtra {
+public:
+ /// FETokenInfo - Extra information associated with this operator
+ /// name that can be used by the front end.
+ void *FETokenInfo;
+};
+
+/// CXXLiteralOperatorName - Contains the actual identifier that makes up the
+/// name.
+///
+/// This identifier is stored here rather than directly in DeclarationName so as
+/// to allow Objective-C selectors, which are about a million times more common,
+/// to consume minimal memory.
+class CXXLiteralOperatorIdName
+ : public DeclarationNameExtra, public llvm::FoldingSetNode {
+public:
+ IdentifierInfo *ID;
+
+ /// FETokenInfo - Extra information associated with this operator
+ /// name that can be used by the front end.
+ void *FETokenInfo;
+
+ void Profile(llvm::FoldingSetNodeID &FSID) {
+ FSID.AddPointer(ID);
+ }
+};
+
+static int compareInt(unsigned A, unsigned B) {
+ return (A < B ? -1 : (A > B ? 1 : 0));
+}
+
+int DeclarationName::compare(DeclarationName LHS, DeclarationName RHS) {
+ if (LHS.getNameKind() != RHS.getNameKind())
+ return (LHS.getNameKind() < RHS.getNameKind() ? -1 : 1);
+
+ switch (LHS.getNameKind()) {
+ case DeclarationName::Identifier: {
+ IdentifierInfo *LII = LHS.getAsIdentifierInfo();
+ IdentifierInfo *RII = RHS.getAsIdentifierInfo();
+ if (!LII) return RII ? -1 : 0;
+ if (!RII) return 1;
+
+ return LII->getName().compare(RII->getName());
+ }
+
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector: {
+ Selector LHSSelector = LHS.getObjCSelector();
+ Selector RHSSelector = RHS.getObjCSelector();
+ unsigned LN = LHSSelector.getNumArgs(), RN = RHSSelector.getNumArgs();
+ for (unsigned I = 0, N = std::min(LN, RN); I != N; ++I) {
+ switch (LHSSelector.getNameForSlot(I).compare(
+ RHSSelector.getNameForSlot(I))) {
+ case -1: return true;
+ case 1: return false;
+ default: break;
+ }
+ }
+
+ return compareInt(LN, RN);
+ }
+
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ if (QualTypeOrdering()(LHS.getCXXNameType(), RHS.getCXXNameType()))
+ return -1;
+ if (QualTypeOrdering()(RHS.getCXXNameType(), LHS.getCXXNameType()))
+ return 1;
+ return 0;
+
+ case DeclarationName::CXXOperatorName:
+ return compareInt(LHS.getCXXOverloadedOperator(),
+ RHS.getCXXOverloadedOperator());
+
+ case DeclarationName::CXXLiteralOperatorName:
+ return LHS.getCXXLiteralIdentifier()->getName().compare(
+ RHS.getCXXLiteralIdentifier()->getName());
+
+ case DeclarationName::CXXUsingDirective:
+ return 0;
+ }
+
+ llvm_unreachable("Invalid DeclarationName Kind!");
+}
+
+raw_ostream &operator<<(raw_ostream &OS, DeclarationName N) {
+ switch (N.getNameKind()) {
+ case DeclarationName::Identifier:
+ if (const IdentifierInfo *II = N.getAsIdentifierInfo())
+ OS << II->getName();
+ return OS;
+
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ N.getObjCSelector().print(OS);
+ return OS;
+
+ case DeclarationName::CXXConstructorName: {
+ QualType ClassType = N.getCXXNameType();
+ if (const RecordType *ClassRec = ClassType->getAs<RecordType>())
+ return OS << *ClassRec->getDecl();
+ LangOptions LO;
+ LO.CPlusPlus = true;
+ return OS << ClassType.getAsString(PrintingPolicy(LO));
+ }
+
+ case DeclarationName::CXXDestructorName: {
+ OS << '~';
+ QualType Type = N.getCXXNameType();
+ if (const RecordType *Rec = Type->getAs<RecordType>())
+ return OS << *Rec->getDecl();
+ LangOptions LO;
+ LO.CPlusPlus = true;
+ return OS << Type.getAsString(PrintingPolicy(LO));
+ }
+
+ case DeclarationName::CXXOperatorName: {
+ static const char* const OperatorNames[NUM_OVERLOADED_OPERATORS] = {
+ nullptr,
+#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
+ Spelling,
+#include "clang/Basic/OperatorKinds.def"
+ };
+ const char *OpName = OperatorNames[N.getCXXOverloadedOperator()];
+ assert(OpName && "not an overloaded operator");
+
+ OS << "operator";
+ if (OpName[0] >= 'a' && OpName[0] <= 'z')
+ OS << ' ';
+ return OS << OpName;
+ }
+
+ case DeclarationName::CXXLiteralOperatorName:
+ return OS << "operator\"\"" << N.getCXXLiteralIdentifier()->getName();
+
+ case DeclarationName::CXXConversionFunctionName: {
+ OS << "operator ";
+ QualType Type = N.getCXXNameType();
+ if (const RecordType *Rec = Type->getAs<RecordType>())
+ return OS << *Rec->getDecl();
+ LangOptions LO;
+ LO.CPlusPlus = true;
+ LO.Bool = true;
+ return OS << Type.getAsString(PrintingPolicy(LO));
+ }
+ case DeclarationName::CXXUsingDirective:
+ return OS << "<using-directive>";
+ }
+
+ llvm_unreachable("Unexpected declaration name kind");
+}
+
+} // end namespace clang
+
+DeclarationName::NameKind DeclarationName::getNameKind() const {
+ switch (getStoredNameKind()) {
+ case StoredIdentifier: return Identifier;
+ case StoredObjCZeroArgSelector: return ObjCZeroArgSelector;
+ case StoredObjCOneArgSelector: return ObjCOneArgSelector;
+
+ case StoredDeclarationNameExtra:
+ switch (getExtra()->ExtraKindOrNumArgs) {
+ case DeclarationNameExtra::CXXConstructor:
+ return CXXConstructorName;
+
+ case DeclarationNameExtra::CXXDestructor:
+ return CXXDestructorName;
+
+ case DeclarationNameExtra::CXXConversionFunction:
+ return CXXConversionFunctionName;
+
+ case DeclarationNameExtra::CXXLiteralOperator:
+ return CXXLiteralOperatorName;
+
+ case DeclarationNameExtra::CXXUsingDirective:
+ return CXXUsingDirective;
+
+ default:
+ // Check if we have one of the CXXOperator* enumeration values.
+ if (getExtra()->ExtraKindOrNumArgs <
+ DeclarationNameExtra::CXXUsingDirective)
+ return CXXOperatorName;
+
+ return ObjCMultiArgSelector;
+ }
+ }
+
+ // Can't actually get here.
+ llvm_unreachable("This should be unreachable!");
+}
+
+bool DeclarationName::isDependentName() const {
+ QualType T = getCXXNameType();
+ return !T.isNull() && T->isDependentType();
+}
+
+std::string DeclarationName::getAsString() const {
+ std::string Result;
+ llvm::raw_string_ostream OS(Result);
+ OS << *this;
+ return OS.str();
+}
+
+QualType DeclarationName::getCXXNameType() const {
+ if (CXXSpecialName *CXXName = getAsCXXSpecialName())
+ return CXXName->Type;
+ else
+ return QualType();
+}
+
+OverloadedOperatorKind DeclarationName::getCXXOverloadedOperator() const {
+ if (CXXOperatorIdName *CXXOp = getAsCXXOperatorIdName()) {
+ unsigned value
+ = CXXOp->ExtraKindOrNumArgs - DeclarationNameExtra::CXXConversionFunction;
+ return static_cast<OverloadedOperatorKind>(value);
+ } else {
+ return OO_None;
+ }
+}
+
+IdentifierInfo *DeclarationName::getCXXLiteralIdentifier() const {
+ if (CXXLiteralOperatorIdName *CXXLit = getAsCXXLiteralOperatorIdName())
+ return CXXLit->ID;
+ else
+ return nullptr;
+}
+
+void *DeclarationName::getFETokenInfoAsVoidSlow() const {
+ switch (getNameKind()) {
+ case Identifier:
+ llvm_unreachable("Handled by getFETokenInfo()");
+
+ case CXXConstructorName:
+ case CXXDestructorName:
+ case CXXConversionFunctionName:
+ return getAsCXXSpecialName()->FETokenInfo;
+
+ case CXXOperatorName:
+ return getAsCXXOperatorIdName()->FETokenInfo;
+
+ case CXXLiteralOperatorName:
+ return getAsCXXLiteralOperatorIdName()->FETokenInfo;
+
+ default:
+ llvm_unreachable("Declaration name has no FETokenInfo");
+ }
+}
+
+void DeclarationName::setFETokenInfo(void *T) {
+ switch (getNameKind()) {
+ case Identifier:
+ getAsIdentifierInfo()->setFETokenInfo(T);
+ break;
+
+ case CXXConstructorName:
+ case CXXDestructorName:
+ case CXXConversionFunctionName:
+ getAsCXXSpecialName()->FETokenInfo = T;
+ break;
+
+ case CXXOperatorName:
+ getAsCXXOperatorIdName()->FETokenInfo = T;
+ break;
+
+ case CXXLiteralOperatorName:
+ getAsCXXLiteralOperatorIdName()->FETokenInfo = T;
+ break;
+
+ default:
+ llvm_unreachable("Declaration name has no FETokenInfo");
+ }
+}
+
+DeclarationName DeclarationName::getUsingDirectiveName() {
+ // Single instance of DeclarationNameExtra for using-directive
+ static const DeclarationNameExtra UDirExtra =
+ { DeclarationNameExtra::CXXUsingDirective };
+
+ uintptr_t Ptr = reinterpret_cast<uintptr_t>(&UDirExtra);
+ Ptr |= StoredDeclarationNameExtra;
+
+ return DeclarationName(Ptr);
+}
+
+void DeclarationName::dump() const {
+ llvm::errs() << *this << '\n';
+}
+
+DeclarationNameTable::DeclarationNameTable(const ASTContext &C) : Ctx(C) {
+ CXXSpecialNamesImpl = new llvm::FoldingSet<CXXSpecialName>;
+ CXXLiteralOperatorNames = new llvm::FoldingSet<CXXLiteralOperatorIdName>;
+
+ // Initialize the overloaded operator names.
+ CXXOperatorNames = new (Ctx) CXXOperatorIdName[NUM_OVERLOADED_OPERATORS];
+ for (unsigned Op = 0; Op < NUM_OVERLOADED_OPERATORS; ++Op) {
+ CXXOperatorNames[Op].ExtraKindOrNumArgs
+ = Op + DeclarationNameExtra::CXXConversionFunction;
+ CXXOperatorNames[Op].FETokenInfo = nullptr;
+ }
+}
+
+DeclarationNameTable::~DeclarationNameTable() {
+ llvm::FoldingSet<CXXSpecialName> *SpecialNames =
+ static_cast<llvm::FoldingSet<CXXSpecialName>*>(CXXSpecialNamesImpl);
+ llvm::FoldingSet<CXXLiteralOperatorIdName> *LiteralNames
+ = static_cast<llvm::FoldingSet<CXXLiteralOperatorIdName>*>
+ (CXXLiteralOperatorNames);
+
+ delete SpecialNames;
+ delete LiteralNames;
+}
+
+DeclarationName DeclarationNameTable::getCXXConstructorName(CanQualType Ty) {
+ return getCXXSpecialName(DeclarationName::CXXConstructorName,
+ Ty.getUnqualifiedType());
+}
+
+DeclarationName DeclarationNameTable::getCXXDestructorName(CanQualType Ty) {
+ return getCXXSpecialName(DeclarationName::CXXDestructorName,
+ Ty.getUnqualifiedType());
+}
+
+DeclarationName
+DeclarationNameTable::getCXXConversionFunctionName(CanQualType Ty) {
+ return getCXXSpecialName(DeclarationName::CXXConversionFunctionName, Ty);
+}
+
+DeclarationName
+DeclarationNameTable::getCXXSpecialName(DeclarationName::NameKind Kind,
+ CanQualType Ty) {
+ assert(Kind >= DeclarationName::CXXConstructorName &&
+ Kind <= DeclarationName::CXXConversionFunctionName &&
+ "Kind must be a C++ special name kind");
+ llvm::FoldingSet<CXXSpecialName> *SpecialNames
+ = static_cast<llvm::FoldingSet<CXXSpecialName>*>(CXXSpecialNamesImpl);
+
+ DeclarationNameExtra::ExtraKind EKind;
+ switch (Kind) {
+ case DeclarationName::CXXConstructorName:
+ EKind = DeclarationNameExtra::CXXConstructor;
+ assert(!Ty.hasQualifiers() &&"Constructor type must be unqualified");
+ break;
+ case DeclarationName::CXXDestructorName:
+ EKind = DeclarationNameExtra::CXXDestructor;
+ assert(!Ty.hasQualifiers() && "Destructor type must be unqualified");
+ break;
+ case DeclarationName::CXXConversionFunctionName:
+ EKind = DeclarationNameExtra::CXXConversionFunction;
+ break;
+ default:
+ return DeclarationName();
+ }
+
+ // Unique selector, to guarantee there is one per name.
+ llvm::FoldingSetNodeID ID;
+ ID.AddInteger(EKind);
+ ID.AddPointer(Ty.getAsOpaquePtr());
+
+ void *InsertPos = nullptr;
+ if (CXXSpecialName *Name = SpecialNames->FindNodeOrInsertPos(ID, InsertPos))
+ return DeclarationName(Name);
+
+ CXXSpecialName *SpecialName = new (Ctx) CXXSpecialName;
+ SpecialName->ExtraKindOrNumArgs = EKind;
+ SpecialName->Type = Ty;
+ SpecialName->FETokenInfo = nullptr;
+
+ SpecialNames->InsertNode(SpecialName, InsertPos);
+ return DeclarationName(SpecialName);
+}
+
+DeclarationName
+DeclarationNameTable::getCXXOperatorName(OverloadedOperatorKind Op) {
+ return DeclarationName(&CXXOperatorNames[(unsigned)Op]);
+}
+
+DeclarationName
+DeclarationNameTable::getCXXLiteralOperatorName(IdentifierInfo *II) {
+ llvm::FoldingSet<CXXLiteralOperatorIdName> *LiteralNames
+ = static_cast<llvm::FoldingSet<CXXLiteralOperatorIdName>*>
+ (CXXLiteralOperatorNames);
+
+ llvm::FoldingSetNodeID ID;
+ ID.AddPointer(II);
+
+ void *InsertPos = nullptr;
+ if (CXXLiteralOperatorIdName *Name =
+ LiteralNames->FindNodeOrInsertPos(ID, InsertPos))
+ return DeclarationName (Name);
+
+ CXXLiteralOperatorIdName *LiteralName = new (Ctx) CXXLiteralOperatorIdName;
+ LiteralName->ExtraKindOrNumArgs = DeclarationNameExtra::CXXLiteralOperator;
+ LiteralName->ID = II;
+ LiteralName->FETokenInfo = nullptr;
+
+ LiteralNames->InsertNode(LiteralName, InsertPos);
+ return DeclarationName(LiteralName);
+}
+
+DeclarationNameLoc::DeclarationNameLoc(DeclarationName Name) {
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier:
+ break;
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ NamedType.TInfo = nullptr;
+ break;
+ case DeclarationName::CXXOperatorName:
+ CXXOperatorName.BeginOpNameLoc = SourceLocation().getRawEncoding();
+ CXXOperatorName.EndOpNameLoc = SourceLocation().getRawEncoding();
+ break;
+ case DeclarationName::CXXLiteralOperatorName:
+ CXXLiteralOperatorName.OpNameLoc = SourceLocation().getRawEncoding();
+ break;
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ // FIXME: ?
+ break;
+ case DeclarationName::CXXUsingDirective:
+ break;
+ }
+}
+
+bool DeclarationNameInfo::containsUnexpandedParameterPack() const {
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXOperatorName:
+ case DeclarationName::CXXLiteralOperatorName:
+ case DeclarationName::CXXUsingDirective:
+ return false;
+
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ if (TypeSourceInfo *TInfo = LocInfo.NamedType.TInfo)
+ return TInfo->getType()->containsUnexpandedParameterPack();
+
+ return Name.getCXXNameType()->containsUnexpandedParameterPack();
+ }
+ llvm_unreachable("All name kinds handled.");
+}
+
+bool DeclarationNameInfo::isInstantiationDependent() const {
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXOperatorName:
+ case DeclarationName::CXXLiteralOperatorName:
+ case DeclarationName::CXXUsingDirective:
+ return false;
+
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ if (TypeSourceInfo *TInfo = LocInfo.NamedType.TInfo)
+ return TInfo->getType()->isInstantiationDependentType();
+
+ return Name.getCXXNameType()->isInstantiationDependentType();
+ }
+ llvm_unreachable("All name kinds handled.");
+}
+
+std::string DeclarationNameInfo::getAsString() const {
+ std::string Result;
+ llvm::raw_string_ostream OS(Result);
+ printName(OS);
+ return OS.str();
+}
+
+void DeclarationNameInfo::printName(raw_ostream &OS) const {
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXOperatorName:
+ case DeclarationName::CXXLiteralOperatorName:
+ case DeclarationName::CXXUsingDirective:
+ OS << Name;
+ return;
+
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ if (TypeSourceInfo *TInfo = LocInfo.NamedType.TInfo) {
+ if (Name.getNameKind() == DeclarationName::CXXDestructorName)
+ OS << '~';
+ else if (Name.getNameKind() == DeclarationName::CXXConversionFunctionName)
+ OS << "operator ";
+ LangOptions LO;
+ LO.CPlusPlus = true;
+ LO.Bool = true;
+ OS << TInfo->getType().getAsString(PrintingPolicy(LO));
+ } else
+ OS << Name;
+ return;
+ }
+ llvm_unreachable("Unexpected declaration name kind");
+}
+
+SourceLocation DeclarationNameInfo::getEndLoc() const {
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier:
+ return NameLoc;
+
+ case DeclarationName::CXXOperatorName: {
+ unsigned raw = LocInfo.CXXOperatorName.EndOpNameLoc;
+ return SourceLocation::getFromRawEncoding(raw);
+ }
+
+ case DeclarationName::CXXLiteralOperatorName: {
+ unsigned raw = LocInfo.CXXLiteralOperatorName.OpNameLoc;
+ return SourceLocation::getFromRawEncoding(raw);
+ }
+
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ if (TypeSourceInfo *TInfo = LocInfo.NamedType.TInfo)
+ return TInfo->getTypeLoc().getEndLoc();
+ else
+ return NameLoc;
+
+ // DNInfo work in progress: FIXME.
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXUsingDirective:
+ return NameLoc;
+ }
+ llvm_unreachable("Unexpected declaration name kind");
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/Expr.cpp b/contrib/llvm/tools/clang/lib/AST/Expr.cpp
new file mode 100644
index 0000000..f9757b2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/Expr.cpp
@@ -0,0 +1,4034 @@
+//===--- Expr.cpp - Expression AST Node Implementation --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Expr class and subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/Mangle.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Lex/LiteralSupport.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cstring>
+using namespace clang;
+
+const CXXRecordDecl *Expr::getBestDynamicClassType() const {
+ const Expr *E = ignoreParenBaseCasts();
+
+ QualType DerivedType = E->getType();
+ if (const PointerType *PTy = DerivedType->getAs<PointerType>())
+ DerivedType = PTy->getPointeeType();
+
+ if (DerivedType->isDependentType())
+ return nullptr;
+
+ const RecordType *Ty = DerivedType->castAs<RecordType>();
+ Decl *D = Ty->getDecl();
+ return cast<CXXRecordDecl>(D);
+}
+
+const Expr *Expr::skipRValueSubobjectAdjustments(
+ SmallVectorImpl<const Expr *> &CommaLHSs,
+ SmallVectorImpl<SubobjectAdjustment> &Adjustments) const {
+ const Expr *E = this;
+ while (true) {
+ E = E->IgnoreParens();
+
+ if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
+ if ((CE->getCastKind() == CK_DerivedToBase ||
+ CE->getCastKind() == CK_UncheckedDerivedToBase) &&
+ E->getType()->isRecordType()) {
+ E = CE->getSubExpr();
+ CXXRecordDecl *Derived
+ = cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl());
+ Adjustments.push_back(SubobjectAdjustment(CE, Derived));
+ continue;
+ }
+
+ if (CE->getCastKind() == CK_NoOp) {
+ E = CE->getSubExpr();
+ continue;
+ }
+ } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
+ if (!ME->isArrow()) {
+ assert(ME->getBase()->getType()->isRecordType());
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
+ if (!Field->isBitField() && !Field->getType()->isReferenceType()) {
+ E = ME->getBase();
+ Adjustments.push_back(SubobjectAdjustment(Field));
+ continue;
+ }
+ }
+ }
+ } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ if (BO->isPtrMemOp()) {
+ assert(BO->getRHS()->isRValue());
+ E = BO->getLHS();
+ const MemberPointerType *MPT =
+ BO->getRHS()->getType()->getAs<MemberPointerType>();
+ Adjustments.push_back(SubobjectAdjustment(MPT, BO->getRHS()));
+ continue;
+ } else if (BO->getOpcode() == BO_Comma) {
+ CommaLHSs.push_back(BO->getLHS());
+ E = BO->getRHS();
+ continue;
+ }
+ }
+
+ // Nothing changed.
+ break;
+ }
+ return E;
+}
+
+/// isKnownToHaveBooleanValue - Return true if this is an integer expression
+/// that is known to return 0 or 1. This happens for _Bool/bool expressions
+/// but also int expressions which are produced by things like comparisons in
+/// C.
+bool Expr::isKnownToHaveBooleanValue() const {
+ const Expr *E = IgnoreParens();
+
+ // If this value has _Bool type, it is obvious 0/1.
+ if (E->getType()->isBooleanType()) return true;
+ // If this is a non-scalar-integer type, we don't care enough to try.
+ if (!E->getType()->isIntegralOrEnumerationType()) return false;
+
+ if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
+ switch (UO->getOpcode()) {
+ case UO_Plus:
+ return UO->getSubExpr()->isKnownToHaveBooleanValue();
+ case UO_LNot:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ // Only look through implicit casts. If the user writes
+ // '(int) (a && b)' treat it as an arbitrary int.
+ if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E))
+ return CE->getSubExpr()->isKnownToHaveBooleanValue();
+
+ if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ switch (BO->getOpcode()) {
+ default: return false;
+ case BO_LT: // Relational operators.
+ case BO_GT:
+ case BO_LE:
+ case BO_GE:
+ case BO_EQ: // Equality operators.
+ case BO_NE:
+ case BO_LAnd: // AND operator.
+ case BO_LOr: // Logical OR operator.
+ return true;
+
+ case BO_And: // Bitwise AND operator.
+ case BO_Xor: // Bitwise XOR operator.
+ case BO_Or: // Bitwise OR operator.
+ // Handle things like (x==2)|(y==12).
+ return BO->getLHS()->isKnownToHaveBooleanValue() &&
+ BO->getRHS()->isKnownToHaveBooleanValue();
+
+ case BO_Comma:
+ case BO_Assign:
+ return BO->getRHS()->isKnownToHaveBooleanValue();
+ }
+ }
+
+ if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E))
+ return CO->getTrueExpr()->isKnownToHaveBooleanValue() &&
+ CO->getFalseExpr()->isKnownToHaveBooleanValue();
+
+ return false;
+}
+
+// Amusing macro metaprogramming hack: check whether a class provides
+// a more specific implementation of getExprLoc().
+//
+// See also Stmt.cpp:{getLocStart(),getLocEnd()}.
+namespace {
+ /// This implementation is used when a class provides a custom
+ /// implementation of getExprLoc.
+ template <class E, class T>
+ SourceLocation getExprLocImpl(const Expr *expr,
+ SourceLocation (T::*v)() const) {
+ return static_cast<const E*>(expr)->getExprLoc();
+ }
+
+ /// This implementation is used when a class doesn't provide
+ /// a custom implementation of getExprLoc. Overload resolution
+ /// should pick it over the implementation above because it's
+ /// more specialized according to function template partial ordering.
+ template <class E>
+ SourceLocation getExprLocImpl(const Expr *expr,
+ SourceLocation (Expr::*v)() const) {
+ return static_cast<const E*>(expr)->getLocStart();
+ }
+}
+
+SourceLocation Expr::getExprLoc() const {
+ switch (getStmtClass()) {
+ case Stmt::NoStmtClass: llvm_unreachable("statement without class");
+#define ABSTRACT_STMT(type)
+#define STMT(type, base) \
+ case Stmt::type##Class: break;
+#define EXPR(type, base) \
+ case Stmt::type##Class: return getExprLocImpl<type>(this, &type::getExprLoc);
+#include "clang/AST/StmtNodes.inc"
+ }
+ llvm_unreachable("unknown expression kind");
+}
+
+//===----------------------------------------------------------------------===//
+// Primary Expressions.
+//===----------------------------------------------------------------------===//
+
+/// \brief Compute the type-, value-, and instantiation-dependence of a
+/// declaration reference
+/// based on the declaration being referenced.
+static void computeDeclRefDependence(const ASTContext &Ctx, NamedDecl *D,
+ QualType T, bool &TypeDependent,
+ bool &ValueDependent,
+ bool &InstantiationDependent) {
+ TypeDependent = false;
+ ValueDependent = false;
+ InstantiationDependent = false;
+
+ // (TD) C++ [temp.dep.expr]p3:
+ // An id-expression is type-dependent if it contains:
+ //
+ // and
+ //
+ // (VD) C++ [temp.dep.constexpr]p2:
+ // An identifier is value-dependent if it is:
+
+ // (TD) - an identifier that was declared with dependent type
+ // (VD) - a name declared with a dependent type,
+ if (T->isDependentType()) {
+ TypeDependent = true;
+ ValueDependent = true;
+ InstantiationDependent = true;
+ return;
+ } else if (T->isInstantiationDependentType()) {
+ InstantiationDependent = true;
+ }
+
+ // (TD) - a conversion-function-id that specifies a dependent type
+ if (D->getDeclName().getNameKind()
+ == DeclarationName::CXXConversionFunctionName) {
+ QualType T = D->getDeclName().getCXXNameType();
+ if (T->isDependentType()) {
+ TypeDependent = true;
+ ValueDependent = true;
+ InstantiationDependent = true;
+ return;
+ }
+
+ if (T->isInstantiationDependentType())
+ InstantiationDependent = true;
+ }
+
+ // (VD) - the name of a non-type template parameter,
+ if (isa<NonTypeTemplateParmDecl>(D)) {
+ ValueDependent = true;
+ InstantiationDependent = true;
+ return;
+ }
+
+ // (VD) - a constant with integral or enumeration type and is
+ // initialized with an expression that is value-dependent.
+ // (VD) - a constant with literal type and is initialized with an
+ // expression that is value-dependent [C++11].
+ // (VD) - FIXME: Missing from the standard:
+ // - an entity with reference type and is initialized with an
+ // expression that is value-dependent [C++11]
+ if (VarDecl *Var = dyn_cast<VarDecl>(D)) {
+ if ((Ctx.getLangOpts().CPlusPlus11 ?
+ Var->getType()->isLiteralType(Ctx) :
+ Var->getType()->isIntegralOrEnumerationType()) &&
+ (Var->getType().isConstQualified() ||
+ Var->getType()->isReferenceType())) {
+ if (const Expr *Init = Var->getAnyInitializer())
+ if (Init->isValueDependent()) {
+ ValueDependent = true;
+ InstantiationDependent = true;
+ }
+ }
+
+ // (VD) - FIXME: Missing from the standard:
+ // - a member function or a static data member of the current
+ // instantiation
+ if (Var->isStaticDataMember() &&
+ Var->getDeclContext()->isDependentContext()) {
+ ValueDependent = true;
+ InstantiationDependent = true;
+ TypeSourceInfo *TInfo = Var->getFirstDecl()->getTypeSourceInfo();
+ if (TInfo->getType()->isIncompleteArrayType())
+ TypeDependent = true;
+ }
+
+ return;
+ }
+
+ // (VD) - FIXME: Missing from the standard:
+ // - a member function or a static data member of the current
+ // instantiation
+ if (isa<CXXMethodDecl>(D) && D->getDeclContext()->isDependentContext()) {
+ ValueDependent = true;
+ InstantiationDependent = true;
+ }
+}
+
+void DeclRefExpr::computeDependence(const ASTContext &Ctx) {
+ bool TypeDependent = false;
+ bool ValueDependent = false;
+ bool InstantiationDependent = false;
+ computeDeclRefDependence(Ctx, getDecl(), getType(), TypeDependent,
+ ValueDependent, InstantiationDependent);
+
+ ExprBits.TypeDependent |= TypeDependent;
+ ExprBits.ValueDependent |= ValueDependent;
+ ExprBits.InstantiationDependent |= InstantiationDependent;
+
+ // Is the declaration a parameter pack?
+ if (getDecl()->isParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+}
+
+DeclRefExpr::DeclRefExpr(const ASTContext &Ctx,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ ValueDecl *D, bool RefersToEnclosingVariableOrCapture,
+ const DeclarationNameInfo &NameInfo,
+ NamedDecl *FoundD,
+ const TemplateArgumentListInfo *TemplateArgs,
+ QualType T, ExprValueKind VK)
+ : Expr(DeclRefExprClass, T, VK, OK_Ordinary, false, false, false, false),
+ D(D), Loc(NameInfo.getLoc()), DNLoc(NameInfo.getInfo()) {
+ DeclRefExprBits.HasQualifier = QualifierLoc ? 1 : 0;
+ if (QualifierLoc) {
+ new (getTrailingObjects<NestedNameSpecifierLoc>())
+ NestedNameSpecifierLoc(QualifierLoc);
+ auto *NNS = QualifierLoc.getNestedNameSpecifier();
+ if (NNS->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (NNS->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+ }
+ DeclRefExprBits.HasFoundDecl = FoundD ? 1 : 0;
+ if (FoundD)
+ *getTrailingObjects<NamedDecl *>() = FoundD;
+ DeclRefExprBits.HasTemplateKWAndArgsInfo
+ = (TemplateArgs || TemplateKWLoc.isValid()) ? 1 : 0;
+ DeclRefExprBits.RefersToEnclosingVariableOrCapture =
+ RefersToEnclosingVariableOrCapture;
+ if (TemplateArgs) {
+ bool Dependent = false;
+ bool InstantiationDependent = false;
+ bool ContainsUnexpandedParameterPack = false;
+ getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
+ TemplateKWLoc, *TemplateArgs, getTrailingObjects<TemplateArgumentLoc>(),
+ Dependent, InstantiationDependent, ContainsUnexpandedParameterPack);
+ assert(!Dependent && "built a DeclRefExpr with dependent template args");
+ ExprBits.InstantiationDependent |= InstantiationDependent;
+ ExprBits.ContainsUnexpandedParameterPack |= ContainsUnexpandedParameterPack;
+ } else if (TemplateKWLoc.isValid()) {
+ getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
+ TemplateKWLoc);
+ }
+ DeclRefExprBits.HadMultipleCandidates = 0;
+
+ computeDependence(Ctx);
+}
+
+DeclRefExpr *DeclRefExpr::Create(const ASTContext &Context,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ ValueDecl *D,
+ bool RefersToEnclosingVariableOrCapture,
+ SourceLocation NameLoc,
+ QualType T,
+ ExprValueKind VK,
+ NamedDecl *FoundD,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ return Create(Context, QualifierLoc, TemplateKWLoc, D,
+ RefersToEnclosingVariableOrCapture,
+ DeclarationNameInfo(D->getDeclName(), NameLoc),
+ T, VK, FoundD, TemplateArgs);
+}
+
+DeclRefExpr *DeclRefExpr::Create(const ASTContext &Context,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ ValueDecl *D,
+ bool RefersToEnclosingVariableOrCapture,
+ const DeclarationNameInfo &NameInfo,
+ QualType T,
+ ExprValueKind VK,
+ NamedDecl *FoundD,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ // Filter out cases where the found Decl is the same as the value refenenced.
+ if (D == FoundD)
+ FoundD = nullptr;
+
+ bool HasTemplateKWAndArgsInfo = TemplateArgs || TemplateKWLoc.isValid();
+ std::size_t Size =
+ totalSizeToAlloc<NestedNameSpecifierLoc, NamedDecl *,
+ ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
+ QualifierLoc ? 1 : 0, FoundD ? 1 : 0,
+ HasTemplateKWAndArgsInfo ? 1 : 0,
+ TemplateArgs ? TemplateArgs->size() : 0);
+
+ void *Mem = Context.Allocate(Size, llvm::alignOf<DeclRefExpr>());
+ return new (Mem) DeclRefExpr(Context, QualifierLoc, TemplateKWLoc, D,
+ RefersToEnclosingVariableOrCapture,
+ NameInfo, FoundD, TemplateArgs, T, VK);
+}
+
+DeclRefExpr *DeclRefExpr::CreateEmpty(const ASTContext &Context,
+ bool HasQualifier,
+ bool HasFoundDecl,
+ bool HasTemplateKWAndArgsInfo,
+ unsigned NumTemplateArgs) {
+ assert(NumTemplateArgs == 0 || HasTemplateKWAndArgsInfo);
+ std::size_t Size =
+ totalSizeToAlloc<NestedNameSpecifierLoc, NamedDecl *,
+ ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
+ HasQualifier ? 1 : 0, HasFoundDecl ? 1 : 0, HasTemplateKWAndArgsInfo,
+ NumTemplateArgs);
+ void *Mem = Context.Allocate(Size, llvm::alignOf<DeclRefExpr>());
+ return new (Mem) DeclRefExpr(EmptyShell());
+}
+
+SourceLocation DeclRefExpr::getLocStart() const {
+ if (hasQualifier())
+ return getQualifierLoc().getBeginLoc();
+ return getNameInfo().getLocStart();
+}
+SourceLocation DeclRefExpr::getLocEnd() const {
+ if (hasExplicitTemplateArgs())
+ return getRAngleLoc();
+ return getNameInfo().getLocEnd();
+}
+
+PredefinedExpr::PredefinedExpr(SourceLocation L, QualType FNTy, IdentType IT,
+ StringLiteral *SL)
+ : Expr(PredefinedExprClass, FNTy, VK_LValue, OK_Ordinary,
+ FNTy->isDependentType(), FNTy->isDependentType(),
+ FNTy->isInstantiationDependentType(),
+ /*ContainsUnexpandedParameterPack=*/false),
+ Loc(L), Type(IT), FnName(SL) {}
+
+StringLiteral *PredefinedExpr::getFunctionName() {
+ return cast_or_null<StringLiteral>(FnName);
+}
+
+StringRef PredefinedExpr::getIdentTypeName(PredefinedExpr::IdentType IT) {
+ switch (IT) {
+ case Func:
+ return "__func__";
+ case Function:
+ return "__FUNCTION__";
+ case FuncDName:
+ return "__FUNCDNAME__";
+ case LFunction:
+ return "L__FUNCTION__";
+ case PrettyFunction:
+ return "__PRETTY_FUNCTION__";
+ case FuncSig:
+ return "__FUNCSIG__";
+ case PrettyFunctionNoVirtual:
+ break;
+ }
+ llvm_unreachable("Unknown ident type for PredefinedExpr");
+}
+
+// FIXME: Maybe this should use DeclPrinter with a special "print predefined
+// expr" policy instead.
+std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
+ ASTContext &Context = CurrentDecl->getASTContext();
+
+ if (IT == PredefinedExpr::FuncDName) {
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(CurrentDecl)) {
+ std::unique_ptr<MangleContext> MC;
+ MC.reset(Context.createMangleContext());
+
+ if (MC->shouldMangleDeclName(ND)) {
+ SmallString<256> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(ND))
+ MC->mangleCXXCtor(CD, Ctor_Base, Out);
+ else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(ND))
+ MC->mangleCXXDtor(DD, Dtor_Base, Out);
+ else
+ MC->mangleName(ND, Out);
+
+ if (!Buffer.empty() && Buffer.front() == '\01')
+ return Buffer.substr(1);
+ return Buffer.str();
+ } else
+ return ND->getIdentifier()->getName();
+ }
+ return "";
+ }
+ if (auto *BD = dyn_cast<BlockDecl>(CurrentDecl)) {
+ std::unique_ptr<MangleContext> MC;
+ MC.reset(Context.createMangleContext());
+ SmallString<256> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ auto DC = CurrentDecl->getDeclContext();
+ if (DC->isFileContext())
+ MC->mangleGlobalBlock(BD, /*ID*/ nullptr, Out);
+ else if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC))
+ MC->mangleCtorBlock(CD, /*CT*/ Ctor_Complete, BD, Out);
+ else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC))
+ MC->mangleDtorBlock(DD, /*DT*/ Dtor_Complete, BD, Out);
+ else
+ MC->mangleBlock(DC, BD, Out);
+ return Out.str();
+ }
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CurrentDecl)) {
+ if (IT != PrettyFunction && IT != PrettyFunctionNoVirtual && IT != FuncSig)
+ return FD->getNameAsString();
+
+ SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
+
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ if (MD->isVirtual() && IT != PrettyFunctionNoVirtual)
+ Out << "virtual ";
+ if (MD->isStatic())
+ Out << "static ";
+ }
+
+ PrintingPolicy Policy(Context.getLangOpts());
+ std::string Proto;
+ llvm::raw_string_ostream POut(Proto);
+
+ const FunctionDecl *Decl = FD;
+ if (const FunctionDecl* Pattern = FD->getTemplateInstantiationPattern())
+ Decl = Pattern;
+ const FunctionType *AFT = Decl->getType()->getAs<FunctionType>();
+ const FunctionProtoType *FT = nullptr;
+ if (FD->hasWrittenPrototype())
+ FT = dyn_cast<FunctionProtoType>(AFT);
+
+ if (IT == FuncSig) {
+ switch (FT->getCallConv()) {
+ case CC_C: POut << "__cdecl "; break;
+ case CC_X86StdCall: POut << "__stdcall "; break;
+ case CC_X86FastCall: POut << "__fastcall "; break;
+ case CC_X86ThisCall: POut << "__thiscall "; break;
+ case CC_X86VectorCall: POut << "__vectorcall "; break;
+ // Only bother printing the conventions that MSVC knows about.
+ default: break;
+ }
+ }
+
+ FD->printQualifiedName(POut, Policy);
+
+ POut << "(";
+ if (FT) {
+ for (unsigned i = 0, e = Decl->getNumParams(); i != e; ++i) {
+ if (i) POut << ", ";
+ POut << Decl->getParamDecl(i)->getType().stream(Policy);
+ }
+
+ if (FT->isVariadic()) {
+ if (FD->getNumParams()) POut << ", ";
+ POut << "...";
+ }
+ }
+ POut << ")";
+
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ const FunctionType *FT = MD->getType()->castAs<FunctionType>();
+ if (FT->isConst())
+ POut << " const";
+ if (FT->isVolatile())
+ POut << " volatile";
+ RefQualifierKind Ref = MD->getRefQualifier();
+ if (Ref == RQ_LValue)
+ POut << " &";
+ else if (Ref == RQ_RValue)
+ POut << " &&";
+ }
+
+ typedef SmallVector<const ClassTemplateSpecializationDecl *, 8> SpecsTy;
+ SpecsTy Specs;
+ const DeclContext *Ctx = FD->getDeclContext();
+ while (Ctx && isa<NamedDecl>(Ctx)) {
+ const ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(Ctx);
+ if (Spec && !Spec->isExplicitSpecialization())
+ Specs.push_back(Spec);
+ Ctx = Ctx->getParent();
+ }
+
+ std::string TemplateParams;
+ llvm::raw_string_ostream TOut(TemplateParams);
+ for (SpecsTy::reverse_iterator I = Specs.rbegin(), E = Specs.rend();
+ I != E; ++I) {
+ const TemplateParameterList *Params
+ = (*I)->getSpecializedTemplate()->getTemplateParameters();
+ const TemplateArgumentList &Args = (*I)->getTemplateArgs();
+ assert(Params->size() == Args.size());
+ for (unsigned i = 0, numParams = Params->size(); i != numParams; ++i) {
+ StringRef Param = Params->getParam(i)->getName();
+ if (Param.empty()) continue;
+ TOut << Param << " = ";
+ Args.get(i).print(Policy, TOut);
+ TOut << ", ";
+ }
+ }
+
+ FunctionTemplateSpecializationInfo *FSI
+ = FD->getTemplateSpecializationInfo();
+ if (FSI && !FSI->isExplicitSpecialization()) {
+ const TemplateParameterList* Params
+ = FSI->getTemplate()->getTemplateParameters();
+ const TemplateArgumentList* Args = FSI->TemplateArguments;
+ assert(Params->size() == Args->size());
+ for (unsigned i = 0, e = Params->size(); i != e; ++i) {
+ StringRef Param = Params->getParam(i)->getName();
+ if (Param.empty()) continue;
+ TOut << Param << " = ";
+ Args->get(i).print(Policy, TOut);
+ TOut << ", ";
+ }
+ }
+
+ TOut.flush();
+ if (!TemplateParams.empty()) {
+ // remove the trailing comma and space
+ TemplateParams.resize(TemplateParams.size() - 2);
+ POut << " [" << TemplateParams << "]";
+ }
+
+ POut.flush();
+
+ // Print "auto" for all deduced return types. This includes C++1y return
+ // type deduction and lambdas. For trailing return types resolve the
+ // decltype expression. Otherwise print the real type when this is
+ // not a constructor or destructor.
+ if (isa<CXXMethodDecl>(FD) &&
+ cast<CXXMethodDecl>(FD)->getParent()->isLambda())
+ Proto = "auto " + Proto;
+ else if (FT && FT->getReturnType()->getAs<DecltypeType>())
+ FT->getReturnType()
+ ->getAs<DecltypeType>()
+ ->getUnderlyingType()
+ .getAsStringInternal(Proto, Policy);
+ else if (!isa<CXXConstructorDecl>(FD) && !isa<CXXDestructorDecl>(FD))
+ AFT->getReturnType().getAsStringInternal(Proto, Policy);
+
+ Out << Proto;
+
+ return Name.str().str();
+ }
+ if (const CapturedDecl *CD = dyn_cast<CapturedDecl>(CurrentDecl)) {
+ for (const DeclContext *DC = CD->getParent(); DC; DC = DC->getParent())
+ // Skip to its enclosing function or method, but not its enclosing
+ // CapturedDecl.
+ if (DC->isFunctionOrMethod() && (DC->getDeclKind() != Decl::Captured)) {
+ const Decl *D = Decl::castFromDeclContext(DC);
+ return ComputeName(IT, D);
+ }
+ llvm_unreachable("CapturedDecl not inside a function or method");
+ }
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(CurrentDecl)) {
+ SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
+ Out << (MD->isInstanceMethod() ? '-' : '+');
+ Out << '[';
+
+ // For incorrect code, there might not be an ObjCInterfaceDecl. Do
+ // a null check to avoid a crash.
+ if (const ObjCInterfaceDecl *ID = MD->getClassInterface())
+ Out << *ID;
+
+ if (const ObjCCategoryImplDecl *CID =
+ dyn_cast<ObjCCategoryImplDecl>(MD->getDeclContext()))
+ Out << '(' << *CID << ')';
+
+ Out << ' ';
+ MD->getSelector().print(Out);
+ Out << ']';
+
+ return Name.str().str();
+ }
+ if (isa<TranslationUnitDecl>(CurrentDecl) && IT == PrettyFunction) {
+ // __PRETTY_FUNCTION__ -> "top level", the others produce an empty string.
+ return "top level";
+ }
+ return "";
+}
+
+void APNumericStorage::setIntValue(const ASTContext &C,
+ const llvm::APInt &Val) {
+ if (hasAllocation())
+ C.Deallocate(pVal);
+
+ BitWidth = Val.getBitWidth();
+ unsigned NumWords = Val.getNumWords();
+ const uint64_t* Words = Val.getRawData();
+ if (NumWords > 1) {
+ pVal = new (C) uint64_t[NumWords];
+ std::copy(Words, Words + NumWords, pVal);
+ } else if (NumWords == 1)
+ VAL = Words[0];
+ else
+ VAL = 0;
+}
+
+IntegerLiteral::IntegerLiteral(const ASTContext &C, const llvm::APInt &V,
+ QualType type, SourceLocation l)
+ : Expr(IntegerLiteralClass, type, VK_RValue, OK_Ordinary, false, false,
+ false, false),
+ Loc(l) {
+ assert(type->isIntegerType() && "Illegal type in IntegerLiteral");
+ assert(V.getBitWidth() == C.getIntWidth(type) &&
+ "Integer type is not the correct size for constant.");
+ setValue(C, V);
+}
+
+IntegerLiteral *
+IntegerLiteral::Create(const ASTContext &C, const llvm::APInt &V,
+ QualType type, SourceLocation l) {
+ return new (C) IntegerLiteral(C, V, type, l);
+}
+
+IntegerLiteral *
+IntegerLiteral::Create(const ASTContext &C, EmptyShell Empty) {
+ return new (C) IntegerLiteral(Empty);
+}
+
+FloatingLiteral::FloatingLiteral(const ASTContext &C, const llvm::APFloat &V,
+ bool isexact, QualType Type, SourceLocation L)
+ : Expr(FloatingLiteralClass, Type, VK_RValue, OK_Ordinary, false, false,
+ false, false), Loc(L) {
+ setSemantics(V.getSemantics());
+ FloatingLiteralBits.IsExact = isexact;
+ setValue(C, V);
+}
+
+FloatingLiteral::FloatingLiteral(const ASTContext &C, EmptyShell Empty)
+ : Expr(FloatingLiteralClass, Empty) {
+ setRawSemantics(IEEEhalf);
+ FloatingLiteralBits.IsExact = false;
+}
+
+FloatingLiteral *
+FloatingLiteral::Create(const ASTContext &C, const llvm::APFloat &V,
+ bool isexact, QualType Type, SourceLocation L) {
+ return new (C) FloatingLiteral(C, V, isexact, Type, L);
+}
+
+FloatingLiteral *
+FloatingLiteral::Create(const ASTContext &C, EmptyShell Empty) {
+ return new (C) FloatingLiteral(C, Empty);
+}
+
+const llvm::fltSemantics &FloatingLiteral::getSemantics() const {
+ switch(FloatingLiteralBits.Semantics) {
+ case IEEEhalf:
+ return llvm::APFloat::IEEEhalf;
+ case IEEEsingle:
+ return llvm::APFloat::IEEEsingle;
+ case IEEEdouble:
+ return llvm::APFloat::IEEEdouble;
+ case x87DoubleExtended:
+ return llvm::APFloat::x87DoubleExtended;
+ case IEEEquad:
+ return llvm::APFloat::IEEEquad;
+ case PPCDoubleDouble:
+ return llvm::APFloat::PPCDoubleDouble;
+ }
+ llvm_unreachable("Unrecognised floating semantics");
+}
+
+void FloatingLiteral::setSemantics(const llvm::fltSemantics &Sem) {
+ if (&Sem == &llvm::APFloat::IEEEhalf)
+ FloatingLiteralBits.Semantics = IEEEhalf;
+ else if (&Sem == &llvm::APFloat::IEEEsingle)
+ FloatingLiteralBits.Semantics = IEEEsingle;
+ else if (&Sem == &llvm::APFloat::IEEEdouble)
+ FloatingLiteralBits.Semantics = IEEEdouble;
+ else if (&Sem == &llvm::APFloat::x87DoubleExtended)
+ FloatingLiteralBits.Semantics = x87DoubleExtended;
+ else if (&Sem == &llvm::APFloat::IEEEquad)
+ FloatingLiteralBits.Semantics = IEEEquad;
+ else if (&Sem == &llvm::APFloat::PPCDoubleDouble)
+ FloatingLiteralBits.Semantics = PPCDoubleDouble;
+ else
+ llvm_unreachable("Unknown floating semantics");
+}
+
+/// getValueAsApproximateDouble - This returns the value as an inaccurate
+/// double. Note that this may cause loss of precision, but is useful for
+/// debugging dumps, etc.
+double FloatingLiteral::getValueAsApproximateDouble() const {
+ llvm::APFloat V = getValue();
+ bool ignored;
+ V.convert(llvm::APFloat::IEEEdouble, llvm::APFloat::rmNearestTiesToEven,
+ &ignored);
+ return V.convertToDouble();
+}
+
+int StringLiteral::mapCharByteWidth(TargetInfo const &target,StringKind k) {
+ int CharByteWidth = 0;
+ switch(k) {
+ case Ascii:
+ case UTF8:
+ CharByteWidth = target.getCharWidth();
+ break;
+ case Wide:
+ CharByteWidth = target.getWCharWidth();
+ break;
+ case UTF16:
+ CharByteWidth = target.getChar16Width();
+ break;
+ case UTF32:
+ CharByteWidth = target.getChar32Width();
+ break;
+ }
+ assert((CharByteWidth & 7) == 0 && "Assumes character size is byte multiple");
+ CharByteWidth /= 8;
+ assert((CharByteWidth==1 || CharByteWidth==2 || CharByteWidth==4)
+ && "character byte widths supported are 1, 2, and 4 only");
+ return CharByteWidth;
+}
+
+StringLiteral *StringLiteral::Create(const ASTContext &C, StringRef Str,
+ StringKind Kind, bool Pascal, QualType Ty,
+ const SourceLocation *Loc,
+ unsigned NumStrs) {
+ assert(C.getAsConstantArrayType(Ty) &&
+ "StringLiteral must be of constant array type!");
+
+ // Allocate enough space for the StringLiteral plus an array of locations for
+ // any concatenated string tokens.
+ void *Mem = C.Allocate(sizeof(StringLiteral)+
+ sizeof(SourceLocation)*(NumStrs-1),
+ llvm::alignOf<StringLiteral>());
+ StringLiteral *SL = new (Mem) StringLiteral(Ty);
+
+ // OPTIMIZE: could allocate this appended to the StringLiteral.
+ SL->setString(C,Str,Kind,Pascal);
+
+ SL->TokLocs[0] = Loc[0];
+ SL->NumConcatenated = NumStrs;
+
+ if (NumStrs != 1)
+ memcpy(&SL->TokLocs[1], Loc+1, sizeof(SourceLocation)*(NumStrs-1));
+ return SL;
+}
+
+StringLiteral *StringLiteral::CreateEmpty(const ASTContext &C,
+ unsigned NumStrs) {
+ void *Mem = C.Allocate(sizeof(StringLiteral)+
+ sizeof(SourceLocation)*(NumStrs-1),
+ llvm::alignOf<StringLiteral>());
+ StringLiteral *SL = new (Mem) StringLiteral(QualType());
+ SL->CharByteWidth = 0;
+ SL->Length = 0;
+ SL->NumConcatenated = NumStrs;
+ return SL;
+}
+
+void StringLiteral::outputString(raw_ostream &OS) const {
+ switch (getKind()) {
+ case Ascii: break; // no prefix.
+ case Wide: OS << 'L'; break;
+ case UTF8: OS << "u8"; break;
+ case UTF16: OS << 'u'; break;
+ case UTF32: OS << 'U'; break;
+ }
+ OS << '"';
+ static const char Hex[] = "0123456789ABCDEF";
+
+ unsigned LastSlashX = getLength();
+ for (unsigned I = 0, N = getLength(); I != N; ++I) {
+ switch (uint32_t Char = getCodeUnit(I)) {
+ default:
+ // FIXME: Convert UTF-8 back to codepoints before rendering.
+
+ // Convert UTF-16 surrogate pairs back to codepoints before rendering.
+ // Leave invalid surrogates alone; we'll use \x for those.
+ if (getKind() == UTF16 && I != N - 1 && Char >= 0xd800 &&
+ Char <= 0xdbff) {
+ uint32_t Trail = getCodeUnit(I + 1);
+ if (Trail >= 0xdc00 && Trail <= 0xdfff) {
+ Char = 0x10000 + ((Char - 0xd800) << 10) + (Trail - 0xdc00);
+ ++I;
+ }
+ }
+
+ if (Char > 0xff) {
+ // If this is a wide string, output characters over 0xff using \x
+ // escapes. Otherwise, this is a UTF-16 or UTF-32 string, and Char is a
+ // codepoint: use \x escapes for invalid codepoints.
+ if (getKind() == Wide ||
+ (Char >= 0xd800 && Char <= 0xdfff) || Char >= 0x110000) {
+ // FIXME: Is this the best way to print wchar_t?
+ OS << "\\x";
+ int Shift = 28;
+ while ((Char >> Shift) == 0)
+ Shift -= 4;
+ for (/**/; Shift >= 0; Shift -= 4)
+ OS << Hex[(Char >> Shift) & 15];
+ LastSlashX = I;
+ break;
+ }
+
+ if (Char > 0xffff)
+ OS << "\\U00"
+ << Hex[(Char >> 20) & 15]
+ << Hex[(Char >> 16) & 15];
+ else
+ OS << "\\u";
+ OS << Hex[(Char >> 12) & 15]
+ << Hex[(Char >> 8) & 15]
+ << Hex[(Char >> 4) & 15]
+ << Hex[(Char >> 0) & 15];
+ break;
+ }
+
+ // If we used \x... for the previous character, and this character is a
+ // hexadecimal digit, prevent it being slurped as part of the \x.
+ if (LastSlashX + 1 == I) {
+ switch (Char) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
+ case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
+ OS << "\"\"";
+ }
+ }
+
+ assert(Char <= 0xff &&
+ "Characters above 0xff should already have been handled.");
+
+ if (isPrintable(Char))
+ OS << (char)Char;
+ else // Output anything hard as an octal escape.
+ OS << '\\'
+ << (char)('0' + ((Char >> 6) & 7))
+ << (char)('0' + ((Char >> 3) & 7))
+ << (char)('0' + ((Char >> 0) & 7));
+ break;
+ // Handle some common non-printable cases to make dumps prettier.
+ case '\\': OS << "\\\\"; break;
+ case '"': OS << "\\\""; break;
+ case '\n': OS << "\\n"; break;
+ case '\t': OS << "\\t"; break;
+ case '\a': OS << "\\a"; break;
+ case '\b': OS << "\\b"; break;
+ }
+ }
+ OS << '"';
+}
+
+void StringLiteral::setString(const ASTContext &C, StringRef Str,
+ StringKind Kind, bool IsPascal) {
+ //FIXME: we assume that the string data comes from a target that uses the same
+ // code unit size and endianess for the type of string.
+ this->Kind = Kind;
+ this->IsPascal = IsPascal;
+
+ CharByteWidth = mapCharByteWidth(C.getTargetInfo(),Kind);
+ assert((Str.size()%CharByteWidth == 0)
+ && "size of data must be multiple of CharByteWidth");
+ Length = Str.size()/CharByteWidth;
+
+ switch(CharByteWidth) {
+ case 1: {
+ char *AStrData = new (C) char[Length];
+ std::memcpy(AStrData,Str.data(),Length*sizeof(*AStrData));
+ StrData.asChar = AStrData;
+ break;
+ }
+ case 2: {
+ uint16_t *AStrData = new (C) uint16_t[Length];
+ std::memcpy(AStrData,Str.data(),Length*sizeof(*AStrData));
+ StrData.asUInt16 = AStrData;
+ break;
+ }
+ case 4: {
+ uint32_t *AStrData = new (C) uint32_t[Length];
+ std::memcpy(AStrData,Str.data(),Length*sizeof(*AStrData));
+ StrData.asUInt32 = AStrData;
+ break;
+ }
+ default:
+ assert(false && "unsupported CharByteWidth");
+ }
+}
+
+/// getLocationOfByte - Return a source location that points to the specified
+/// byte of this string literal.
+///
+/// Strings are amazingly complex. They can be formed from multiple tokens and
+/// can have escape sequences in them in addition to the usual trigraph and
+/// escaped newline business. This routine handles this complexity.
+///
+/// The *StartToken sets the first token to be searched in this function and
+/// the *StartTokenByteOffset is the byte offset of the first token. Before
+/// returning, it updates the *StartToken to the TokNo of the token being found
+/// and sets *StartTokenByteOffset to the byte offset of the token in the
+/// string.
+/// Using these two parameters can reduce the time complexity from O(n^2) to
+/// O(n) if one wants to get the location of byte for all the tokens in a
+/// string.
+///
+SourceLocation
+StringLiteral::getLocationOfByte(unsigned ByteNo, const SourceManager &SM,
+ const LangOptions &Features,
+ const TargetInfo &Target, unsigned *StartToken,
+ unsigned *StartTokenByteOffset) const {
+ assert((Kind == StringLiteral::Ascii || Kind == StringLiteral::UTF8) &&
+ "Only narrow string literals are currently supported");
+
+ // Loop over all of the tokens in this string until we find the one that
+ // contains the byte we're looking for.
+ unsigned TokNo = 0;
+ unsigned StringOffset = 0;
+ if (StartToken)
+ TokNo = *StartToken;
+ if (StartTokenByteOffset) {
+ StringOffset = *StartTokenByteOffset;
+ ByteNo -= StringOffset;
+ }
+ while (1) {
+ assert(TokNo < getNumConcatenated() && "Invalid byte number!");
+ SourceLocation StrTokLoc = getStrTokenLoc(TokNo);
+
+ // Get the spelling of the string so that we can get the data that makes up
+ // the string literal, not the identifier for the macro it is potentially
+ // expanded through.
+ SourceLocation StrTokSpellingLoc = SM.getSpellingLoc(StrTokLoc);
+
+ // Re-lex the token to get its length and original spelling.
+ std::pair<FileID, unsigned> LocInfo =
+ SM.getDecomposedLoc(StrTokSpellingLoc);
+ bool Invalid = false;
+ StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
+ if (Invalid) {
+ if (StartTokenByteOffset != nullptr)
+ *StartTokenByteOffset = StringOffset;
+ if (StartToken != nullptr)
+ *StartToken = TokNo;
+ return StrTokSpellingLoc;
+ }
+
+ const char *StrData = Buffer.data()+LocInfo.second;
+
+ // Create a lexer starting at the beginning of this token.
+ Lexer TheLexer(SM.getLocForStartOfFile(LocInfo.first), Features,
+ Buffer.begin(), StrData, Buffer.end());
+ Token TheTok;
+ TheLexer.LexFromRawLexer(TheTok);
+
+ // Use the StringLiteralParser to compute the length of the string in bytes.
+ StringLiteralParser SLP(TheTok, SM, Features, Target);
+ unsigned TokNumBytes = SLP.GetStringLength();
+
+ // If the byte is in this token, return the location of the byte.
+ if (ByteNo < TokNumBytes ||
+ (ByteNo == TokNumBytes && TokNo == getNumConcatenated() - 1)) {
+ unsigned Offset = SLP.getOffsetOfStringByte(TheTok, ByteNo);
+
+ // Now that we know the offset of the token in the spelling, use the
+ // preprocessor to get the offset in the original source.
+ if (StartTokenByteOffset != nullptr)
+ *StartTokenByteOffset = StringOffset;
+ if (StartToken != nullptr)
+ *StartToken = TokNo;
+ return Lexer::AdvanceToTokenCharacter(StrTokLoc, Offset, SM, Features);
+ }
+
+ // Move to the next string token.
+ StringOffset += TokNumBytes;
+ ++TokNo;
+ ByteNo -= TokNumBytes;
+ }
+}
+
+
+
+/// getOpcodeStr - Turn an Opcode enum value into the punctuation char it
+/// corresponds to, e.g. "sizeof" or "[pre]++".
+StringRef UnaryOperator::getOpcodeStr(Opcode Op) {
+ switch (Op) {
+ case UO_PostInc: return "++";
+ case UO_PostDec: return "--";
+ case UO_PreInc: return "++";
+ case UO_PreDec: return "--";
+ case UO_AddrOf: return "&";
+ case UO_Deref: return "*";
+ case UO_Plus: return "+";
+ case UO_Minus: return "-";
+ case UO_Not: return "~";
+ case UO_LNot: return "!";
+ case UO_Real: return "__real";
+ case UO_Imag: return "__imag";
+ case UO_Extension: return "__extension__";
+ case UO_Coawait: return "co_await";
+ }
+ llvm_unreachable("Unknown unary operator");
+}
+
+UnaryOperatorKind
+UnaryOperator::getOverloadedOpcode(OverloadedOperatorKind OO, bool Postfix) {
+ switch (OO) {
+ default: llvm_unreachable("No unary operator for overloaded function");
+ case OO_PlusPlus: return Postfix ? UO_PostInc : UO_PreInc;
+ case OO_MinusMinus: return Postfix ? UO_PostDec : UO_PreDec;
+ case OO_Amp: return UO_AddrOf;
+ case OO_Star: return UO_Deref;
+ case OO_Plus: return UO_Plus;
+ case OO_Minus: return UO_Minus;
+ case OO_Tilde: return UO_Not;
+ case OO_Exclaim: return UO_LNot;
+ case OO_Coawait: return UO_Coawait;
+ }
+}
+
+OverloadedOperatorKind UnaryOperator::getOverloadedOperator(Opcode Opc) {
+ switch (Opc) {
+ case UO_PostInc: case UO_PreInc: return OO_PlusPlus;
+ case UO_PostDec: case UO_PreDec: return OO_MinusMinus;
+ case UO_AddrOf: return OO_Amp;
+ case UO_Deref: return OO_Star;
+ case UO_Plus: return OO_Plus;
+ case UO_Minus: return OO_Minus;
+ case UO_Not: return OO_Tilde;
+ case UO_LNot: return OO_Exclaim;
+ case UO_Coawait: return OO_Coawait;
+ default: return OO_None;
+ }
+}
+
+
+//===----------------------------------------------------------------------===//
+// Postfix Operators.
+//===----------------------------------------------------------------------===//
+
+CallExpr::CallExpr(const ASTContext& C, StmtClass SC, Expr *fn,
+ unsigned NumPreArgs, ArrayRef<Expr*> args, QualType t,
+ ExprValueKind VK, SourceLocation rparenloc)
+ : Expr(SC, t, VK, OK_Ordinary,
+ fn->isTypeDependent(),
+ fn->isValueDependent(),
+ fn->isInstantiationDependent(),
+ fn->containsUnexpandedParameterPack()),
+ NumArgs(args.size()) {
+
+ SubExprs = new (C) Stmt*[args.size()+PREARGS_START+NumPreArgs];
+ SubExprs[FN] = fn;
+ for (unsigned i = 0; i != args.size(); ++i) {
+ if (args[i]->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (args[i]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (args[i]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (args[i]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ SubExprs[i+PREARGS_START+NumPreArgs] = args[i];
+ }
+
+ CallExprBits.NumPreArgs = NumPreArgs;
+ RParenLoc = rparenloc;
+}
+
+CallExpr::CallExpr(const ASTContext &C, Expr *fn, ArrayRef<Expr *> args,
+ QualType t, ExprValueKind VK, SourceLocation rparenloc)
+ : CallExpr(C, CallExprClass, fn, /*NumPreArgs=*/0, args, t, VK, rparenloc) {
+}
+
+CallExpr::CallExpr(const ASTContext &C, StmtClass SC, EmptyShell Empty)
+ : CallExpr(C, SC, /*NumPreArgs=*/0, Empty) {}
+
+CallExpr::CallExpr(const ASTContext &C, StmtClass SC, unsigned NumPreArgs,
+ EmptyShell Empty)
+ : Expr(SC, Empty), SubExprs(nullptr), NumArgs(0) {
+ // FIXME: Why do we allocate this?
+ SubExprs = new (C) Stmt*[PREARGS_START+NumPreArgs];
+ CallExprBits.NumPreArgs = NumPreArgs;
+}
+
+Decl *CallExpr::getCalleeDecl() {
+ Expr *CEE = getCallee()->IgnoreParenImpCasts();
+
+ while (SubstNonTypeTemplateParmExpr *NTTP
+ = dyn_cast<SubstNonTypeTemplateParmExpr>(CEE)) {
+ CEE = NTTP->getReplacement()->IgnoreParenCasts();
+ }
+
+ // If we're calling a dereference, look at the pointer instead.
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CEE)) {
+ if (BO->isPtrMemOp())
+ CEE = BO->getRHS()->IgnoreParenCasts();
+ } else if (UnaryOperator *UO = dyn_cast<UnaryOperator>(CEE)) {
+ if (UO->getOpcode() == UO_Deref)
+ CEE = UO->getSubExpr()->IgnoreParenCasts();
+ }
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CEE))
+ return DRE->getDecl();
+ if (MemberExpr *ME = dyn_cast<MemberExpr>(CEE))
+ return ME->getMemberDecl();
+
+ return nullptr;
+}
+
+FunctionDecl *CallExpr::getDirectCallee() {
+ return dyn_cast_or_null<FunctionDecl>(getCalleeDecl());
+}
+
+/// setNumArgs - This changes the number of arguments present in this call.
+/// Any orphaned expressions are deleted by this, and any new operands are set
+/// to null.
+void CallExpr::setNumArgs(const ASTContext& C, unsigned NumArgs) {
+ // No change, just return.
+ if (NumArgs == getNumArgs()) return;
+
+ // If shrinking # arguments, just delete the extras and forgot them.
+ if (NumArgs < getNumArgs()) {
+ this->NumArgs = NumArgs;
+ return;
+ }
+
+ // Otherwise, we are growing the # arguments. New an bigger argument array.
+ unsigned NumPreArgs = getNumPreArgs();
+ Stmt **NewSubExprs = new (C) Stmt*[NumArgs+PREARGS_START+NumPreArgs];
+ // Copy over args.
+ for (unsigned i = 0; i != getNumArgs()+PREARGS_START+NumPreArgs; ++i)
+ NewSubExprs[i] = SubExprs[i];
+ // Null out new args.
+ for (unsigned i = getNumArgs()+PREARGS_START+NumPreArgs;
+ i != NumArgs+PREARGS_START+NumPreArgs; ++i)
+ NewSubExprs[i] = nullptr;
+
+ if (SubExprs) C.Deallocate(SubExprs);
+ SubExprs = NewSubExprs;
+ this->NumArgs = NumArgs;
+}
+
+/// getBuiltinCallee - If this is a call to a builtin, return the builtin ID. If
+/// not, return 0.
+unsigned CallExpr::getBuiltinCallee() const {
+ // All simple function calls (e.g. func()) are implicitly cast to pointer to
+ // function. As a result, we try and obtain the DeclRefExpr from the
+ // ImplicitCastExpr.
+ const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(getCallee());
+ if (!ICE) // FIXME: deal with more complex calls (e.g. (func)(), (*func)()).
+ return 0;
+
+ const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr());
+ if (!DRE)
+ return 0;
+
+ const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(DRE->getDecl());
+ if (!FDecl)
+ return 0;
+
+ if (!FDecl->getIdentifier())
+ return 0;
+
+ return FDecl->getBuiltinID();
+}
+
+bool CallExpr::isUnevaluatedBuiltinCall(const ASTContext &Ctx) const {
+ if (unsigned BI = getBuiltinCallee())
+ return Ctx.BuiltinInfo.isUnevaluated(BI);
+ return false;
+}
+
+QualType CallExpr::getCallReturnType(const ASTContext &Ctx) const {
+ const Expr *Callee = getCallee();
+ QualType CalleeType = Callee->getType();
+ if (const auto *FnTypePtr = CalleeType->getAs<PointerType>()) {
+ CalleeType = FnTypePtr->getPointeeType();
+ } else if (const auto *BPT = CalleeType->getAs<BlockPointerType>()) {
+ CalleeType = BPT->getPointeeType();
+ } else if (CalleeType->isSpecificPlaceholderType(BuiltinType::BoundMember)) {
+ if (isa<CXXPseudoDestructorExpr>(Callee->IgnoreParens()))
+ return Ctx.VoidTy;
+
+ // This should never be overloaded and so should never return null.
+ CalleeType = Expr::findBoundMemberType(Callee);
+ }
+
+ const FunctionType *FnType = CalleeType->castAs<FunctionType>();
+ return FnType->getReturnType();
+}
+
+SourceLocation CallExpr::getLocStart() const {
+ if (isa<CXXOperatorCallExpr>(this))
+ return cast<CXXOperatorCallExpr>(this)->getLocStart();
+
+ SourceLocation begin = getCallee()->getLocStart();
+ if (begin.isInvalid() && getNumArgs() > 0 && getArg(0))
+ begin = getArg(0)->getLocStart();
+ return begin;
+}
+SourceLocation CallExpr::getLocEnd() const {
+ if (isa<CXXOperatorCallExpr>(this))
+ return cast<CXXOperatorCallExpr>(this)->getLocEnd();
+
+ SourceLocation end = getRParenLoc();
+ if (end.isInvalid() && getNumArgs() > 0 && getArg(getNumArgs() - 1))
+ end = getArg(getNumArgs() - 1)->getLocEnd();
+ return end;
+}
+
+OffsetOfExpr *OffsetOfExpr::Create(const ASTContext &C, QualType type,
+ SourceLocation OperatorLoc,
+ TypeSourceInfo *tsi,
+ ArrayRef<OffsetOfNode> comps,
+ ArrayRef<Expr*> exprs,
+ SourceLocation RParenLoc) {
+ void *Mem = C.Allocate(
+ totalSizeToAlloc<OffsetOfNode, Expr *>(comps.size(), exprs.size()));
+
+ return new (Mem) OffsetOfExpr(C, type, OperatorLoc, tsi, comps, exprs,
+ RParenLoc);
+}
+
+OffsetOfExpr *OffsetOfExpr::CreateEmpty(const ASTContext &C,
+ unsigned numComps, unsigned numExprs) {
+ void *Mem =
+ C.Allocate(totalSizeToAlloc<OffsetOfNode, Expr *>(numComps, numExprs));
+ return new (Mem) OffsetOfExpr(numComps, numExprs);
+}
+
+OffsetOfExpr::OffsetOfExpr(const ASTContext &C, QualType type,
+ SourceLocation OperatorLoc, TypeSourceInfo *tsi,
+ ArrayRef<OffsetOfNode> comps, ArrayRef<Expr*> exprs,
+ SourceLocation RParenLoc)
+ : Expr(OffsetOfExprClass, type, VK_RValue, OK_Ordinary,
+ /*TypeDependent=*/false,
+ /*ValueDependent=*/tsi->getType()->isDependentType(),
+ tsi->getType()->isInstantiationDependentType(),
+ tsi->getType()->containsUnexpandedParameterPack()),
+ OperatorLoc(OperatorLoc), RParenLoc(RParenLoc), TSInfo(tsi),
+ NumComps(comps.size()), NumExprs(exprs.size())
+{
+ for (unsigned i = 0; i != comps.size(); ++i) {
+ setComponent(i, comps[i]);
+ }
+
+ for (unsigned i = 0; i != exprs.size(); ++i) {
+ if (exprs[i]->isTypeDependent() || exprs[i]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (exprs[i]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ setIndexExpr(i, exprs[i]);
+ }
+}
+
+IdentifierInfo *OffsetOfNode::getFieldName() const {
+ assert(getKind() == Field || getKind() == Identifier);
+ if (getKind() == Field)
+ return getField()->getIdentifier();
+
+ return reinterpret_cast<IdentifierInfo *> (Data & ~(uintptr_t)Mask);
+}
+
+UnaryExprOrTypeTraitExpr::UnaryExprOrTypeTraitExpr(
+ UnaryExprOrTypeTrait ExprKind, Expr *E, QualType resultType,
+ SourceLocation op, SourceLocation rp)
+ : Expr(UnaryExprOrTypeTraitExprClass, resultType, VK_RValue, OK_Ordinary,
+ false, // Never type-dependent (C++ [temp.dep.expr]p3).
+ // Value-dependent if the argument is type-dependent.
+ E->isTypeDependent(), E->isInstantiationDependent(),
+ E->containsUnexpandedParameterPack()),
+ OpLoc(op), RParenLoc(rp) {
+ UnaryExprOrTypeTraitExprBits.Kind = ExprKind;
+ UnaryExprOrTypeTraitExprBits.IsType = false;
+ Argument.Ex = E;
+
+ // Check to see if we are in the situation where alignof(decl) should be
+ // dependent because decl's alignment is dependent.
+ if (ExprKind == UETT_AlignOf) {
+ if (!isValueDependent() || !isInstantiationDependent()) {
+ E = E->IgnoreParens();
+
+ const ValueDecl *D = nullptr;
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
+ D = DRE->getDecl();
+ else if (const auto *ME = dyn_cast<MemberExpr>(E))
+ D = ME->getMemberDecl();
+
+ if (D) {
+ for (const auto *I : D->specific_attrs<AlignedAttr>()) {
+ if (I->isAlignmentDependent()) {
+ setValueDependent(true);
+ setInstantiationDependent(true);
+ break;
+ }
+ }
+ }
+ }
+ }
+}
+
+MemberExpr *MemberExpr::Create(
+ const ASTContext &C, Expr *base, bool isarrow, SourceLocation OperatorLoc,
+ NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc,
+ ValueDecl *memberdecl, DeclAccessPair founddecl,
+ DeclarationNameInfo nameinfo, const TemplateArgumentListInfo *targs,
+ QualType ty, ExprValueKind vk, ExprObjectKind ok) {
+
+ bool hasQualOrFound = (QualifierLoc ||
+ founddecl.getDecl() != memberdecl ||
+ founddecl.getAccess() != memberdecl->getAccess());
+
+ bool HasTemplateKWAndArgsInfo = targs || TemplateKWLoc.isValid();
+ std::size_t Size =
+ totalSizeToAlloc<MemberExprNameQualifier, ASTTemplateKWAndArgsInfo,
+ TemplateArgumentLoc>(hasQualOrFound ? 1 : 0,
+ HasTemplateKWAndArgsInfo ? 1 : 0,
+ targs ? targs->size() : 0);
+
+ void *Mem = C.Allocate(Size, llvm::alignOf<MemberExpr>());
+ MemberExpr *E = new (Mem)
+ MemberExpr(base, isarrow, OperatorLoc, memberdecl, nameinfo, ty, vk, ok);
+
+ if (hasQualOrFound) {
+ // FIXME: Wrong. We should be looking at the member declaration we found.
+ if (QualifierLoc && QualifierLoc.getNestedNameSpecifier()->isDependent()) {
+ E->setValueDependent(true);
+ E->setTypeDependent(true);
+ E->setInstantiationDependent(true);
+ }
+ else if (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())
+ E->setInstantiationDependent(true);
+
+ E->HasQualifierOrFoundDecl = true;
+
+ MemberExprNameQualifier *NQ =
+ E->getTrailingObjects<MemberExprNameQualifier>();
+ NQ->QualifierLoc = QualifierLoc;
+ NQ->FoundDecl = founddecl;
+ }
+
+ E->HasTemplateKWAndArgsInfo = (targs || TemplateKWLoc.isValid());
+
+ if (targs) {
+ bool Dependent = false;
+ bool InstantiationDependent = false;
+ bool ContainsUnexpandedParameterPack = false;
+ E->getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
+ TemplateKWLoc, *targs, E->getTrailingObjects<TemplateArgumentLoc>(),
+ Dependent, InstantiationDependent, ContainsUnexpandedParameterPack);
+ if (InstantiationDependent)
+ E->setInstantiationDependent(true);
+ } else if (TemplateKWLoc.isValid()) {
+ E->getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
+ TemplateKWLoc);
+ }
+
+ return E;
+}
+
+SourceLocation MemberExpr::getLocStart() const {
+ if (isImplicitAccess()) {
+ if (hasQualifier())
+ return getQualifierLoc().getBeginLoc();
+ return MemberLoc;
+ }
+
+ // FIXME: We don't want this to happen. Rather, we should be able to
+ // detect all kinds of implicit accesses more cleanly.
+ SourceLocation BaseStartLoc = getBase()->getLocStart();
+ if (BaseStartLoc.isValid())
+ return BaseStartLoc;
+ return MemberLoc;
+}
+SourceLocation MemberExpr::getLocEnd() const {
+ SourceLocation EndLoc = getMemberNameInfo().getEndLoc();
+ if (hasExplicitTemplateArgs())
+ EndLoc = getRAngleLoc();
+ else if (EndLoc.isInvalid())
+ EndLoc = getBase()->getLocEnd();
+ return EndLoc;
+}
+
+bool CastExpr::CastConsistency() const {
+ switch (getCastKind()) {
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_BaseToDerived:
+ case CK_BaseToDerivedMemberPointer:
+ assert(!path_empty() && "Cast kind should have a base path!");
+ break;
+
+ case CK_CPointerToObjCPointerCast:
+ assert(getType()->isObjCObjectPointerType());
+ assert(getSubExpr()->getType()->isPointerType());
+ goto CheckNoBasePath;
+
+ case CK_BlockPointerToObjCPointerCast:
+ assert(getType()->isObjCObjectPointerType());
+ assert(getSubExpr()->getType()->isBlockPointerType());
+ goto CheckNoBasePath;
+
+ case CK_ReinterpretMemberPointer:
+ assert(getType()->isMemberPointerType());
+ assert(getSubExpr()->getType()->isMemberPointerType());
+ goto CheckNoBasePath;
+
+ case CK_BitCast:
+ // Arbitrary casts to C pointer types count as bitcasts.
+ // Otherwise, we should only have block and ObjC pointer casts
+ // here if they stay within the type kind.
+ if (!getType()->isPointerType()) {
+ assert(getType()->isObjCObjectPointerType() ==
+ getSubExpr()->getType()->isObjCObjectPointerType());
+ assert(getType()->isBlockPointerType() ==
+ getSubExpr()->getType()->isBlockPointerType());
+ }
+ goto CheckNoBasePath;
+
+ case CK_AnyPointerToBlockPointerCast:
+ assert(getType()->isBlockPointerType());
+ assert(getSubExpr()->getType()->isAnyPointerType() &&
+ !getSubExpr()->getType()->isBlockPointerType());
+ goto CheckNoBasePath;
+
+ case CK_CopyAndAutoreleaseBlockObject:
+ assert(getType()->isBlockPointerType());
+ assert(getSubExpr()->getType()->isBlockPointerType());
+ goto CheckNoBasePath;
+
+ case CK_FunctionToPointerDecay:
+ assert(getType()->isPointerType());
+ assert(getSubExpr()->getType()->isFunctionType());
+ goto CheckNoBasePath;
+
+ case CK_AddressSpaceConversion:
+ assert(getType()->isPointerType());
+ assert(getSubExpr()->getType()->isPointerType());
+ assert(getType()->getPointeeType().getAddressSpace() !=
+ getSubExpr()->getType()->getPointeeType().getAddressSpace());
+ // These should not have an inheritance path.
+ case CK_Dynamic:
+ case CK_ToUnion:
+ case CK_ArrayToPointerDecay:
+ case CK_NullToMemberPointer:
+ case CK_NullToPointer:
+ case CK_ConstructorConversion:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_ToVoid:
+ case CK_VectorSplat:
+ case CK_IntegralCast:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingCast:
+ case CK_ObjCObjectLValueCast:
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
+ case CK_ZeroToOCLEvent:
+ assert(!getType()->isBooleanType() && "unheralded conversion to bool");
+ goto CheckNoBasePath;
+
+ case CK_Dependent:
+ case CK_LValueToRValue:
+ case CK_NoOp:
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
+ case CK_PointerToBoolean:
+ case CK_IntegralToBoolean:
+ case CK_FloatingToBoolean:
+ case CK_MemberPointerToBoolean:
+ case CK_FloatingComplexToBoolean:
+ case CK_IntegralComplexToBoolean:
+ case CK_LValueBitCast: // -> bool&
+ case CK_UserDefinedConversion: // operator bool()
+ case CK_BuiltinFnToFnPtr:
+ CheckNoBasePath:
+ assert(path_empty() && "Cast kind should not have a base path!");
+ break;
+ }
+ return true;
+}
+
+const char *CastExpr::getCastKindName() const {
+ switch (getCastKind()) {
+ case CK_Dependent:
+ return "Dependent";
+ case CK_BitCast:
+ return "BitCast";
+ case CK_LValueBitCast:
+ return "LValueBitCast";
+ case CK_LValueToRValue:
+ return "LValueToRValue";
+ case CK_NoOp:
+ return "NoOp";
+ case CK_BaseToDerived:
+ return "BaseToDerived";
+ case CK_DerivedToBase:
+ return "DerivedToBase";
+ case CK_UncheckedDerivedToBase:
+ return "UncheckedDerivedToBase";
+ case CK_Dynamic:
+ return "Dynamic";
+ case CK_ToUnion:
+ return "ToUnion";
+ case CK_ArrayToPointerDecay:
+ return "ArrayToPointerDecay";
+ case CK_FunctionToPointerDecay:
+ return "FunctionToPointerDecay";
+ case CK_NullToMemberPointer:
+ return "NullToMemberPointer";
+ case CK_NullToPointer:
+ return "NullToPointer";
+ case CK_BaseToDerivedMemberPointer:
+ return "BaseToDerivedMemberPointer";
+ case CK_DerivedToBaseMemberPointer:
+ return "DerivedToBaseMemberPointer";
+ case CK_ReinterpretMemberPointer:
+ return "ReinterpretMemberPointer";
+ case CK_UserDefinedConversion:
+ return "UserDefinedConversion";
+ case CK_ConstructorConversion:
+ return "ConstructorConversion";
+ case CK_IntegralToPointer:
+ return "IntegralToPointer";
+ case CK_PointerToIntegral:
+ return "PointerToIntegral";
+ case CK_PointerToBoolean:
+ return "PointerToBoolean";
+ case CK_ToVoid:
+ return "ToVoid";
+ case CK_VectorSplat:
+ return "VectorSplat";
+ case CK_IntegralCast:
+ return "IntegralCast";
+ case CK_IntegralToBoolean:
+ return "IntegralToBoolean";
+ case CK_IntegralToFloating:
+ return "IntegralToFloating";
+ case CK_FloatingToIntegral:
+ return "FloatingToIntegral";
+ case CK_FloatingCast:
+ return "FloatingCast";
+ case CK_FloatingToBoolean:
+ return "FloatingToBoolean";
+ case CK_MemberPointerToBoolean:
+ return "MemberPointerToBoolean";
+ case CK_CPointerToObjCPointerCast:
+ return "CPointerToObjCPointerCast";
+ case CK_BlockPointerToObjCPointerCast:
+ return "BlockPointerToObjCPointerCast";
+ case CK_AnyPointerToBlockPointerCast:
+ return "AnyPointerToBlockPointerCast";
+ case CK_ObjCObjectLValueCast:
+ return "ObjCObjectLValueCast";
+ case CK_FloatingRealToComplex:
+ return "FloatingRealToComplex";
+ case CK_FloatingComplexToReal:
+ return "FloatingComplexToReal";
+ case CK_FloatingComplexToBoolean:
+ return "FloatingComplexToBoolean";
+ case CK_FloatingComplexCast:
+ return "FloatingComplexCast";
+ case CK_FloatingComplexToIntegralComplex:
+ return "FloatingComplexToIntegralComplex";
+ case CK_IntegralRealToComplex:
+ return "IntegralRealToComplex";
+ case CK_IntegralComplexToReal:
+ return "IntegralComplexToReal";
+ case CK_IntegralComplexToBoolean:
+ return "IntegralComplexToBoolean";
+ case CK_IntegralComplexCast:
+ return "IntegralComplexCast";
+ case CK_IntegralComplexToFloatingComplex:
+ return "IntegralComplexToFloatingComplex";
+ case CK_ARCConsumeObject:
+ return "ARCConsumeObject";
+ case CK_ARCProduceObject:
+ return "ARCProduceObject";
+ case CK_ARCReclaimReturnedObject:
+ return "ARCReclaimReturnedObject";
+ case CK_ARCExtendBlockObject:
+ return "ARCExtendBlockObject";
+ case CK_AtomicToNonAtomic:
+ return "AtomicToNonAtomic";
+ case CK_NonAtomicToAtomic:
+ return "NonAtomicToAtomic";
+ case CK_CopyAndAutoreleaseBlockObject:
+ return "CopyAndAutoreleaseBlockObject";
+ case CK_BuiltinFnToFnPtr:
+ return "BuiltinFnToFnPtr";
+ case CK_ZeroToOCLEvent:
+ return "ZeroToOCLEvent";
+ case CK_AddressSpaceConversion:
+ return "AddressSpaceConversion";
+ }
+
+ llvm_unreachable("Unhandled cast kind!");
+}
+
+Expr *CastExpr::getSubExprAsWritten() {
+ Expr *SubExpr = nullptr;
+ CastExpr *E = this;
+ do {
+ SubExpr = E->getSubExpr();
+
+ // Skip through reference binding to temporary.
+ if (MaterializeTemporaryExpr *Materialize
+ = dyn_cast<MaterializeTemporaryExpr>(SubExpr))
+ SubExpr = Materialize->GetTemporaryExpr();
+
+ // Skip any temporary bindings; they're implicit.
+ if (CXXBindTemporaryExpr *Binder = dyn_cast<CXXBindTemporaryExpr>(SubExpr))
+ SubExpr = Binder->getSubExpr();
+
+ // Conversions by constructor and conversion functions have a
+ // subexpression describing the call; strip it off.
+ if (E->getCastKind() == CK_ConstructorConversion)
+ SubExpr = cast<CXXConstructExpr>(SubExpr)->getArg(0);
+ else if (E->getCastKind() == CK_UserDefinedConversion)
+ SubExpr = cast<CXXMemberCallExpr>(SubExpr)->getImplicitObjectArgument();
+
+ // If the subexpression we're left with is an implicit cast, look
+ // through that, too.
+ } while ((E = dyn_cast<ImplicitCastExpr>(SubExpr)));
+
+ return SubExpr;
+}
+
+CXXBaseSpecifier **CastExpr::path_buffer() {
+ switch (getStmtClass()) {
+#define ABSTRACT_STMT(x)
+#define CASTEXPR(Type, Base) \
+ case Stmt::Type##Class: \
+ return static_cast<Type *>(this)->getTrailingObjects<CXXBaseSpecifier *>();
+#define STMT(Type, Base)
+#include "clang/AST/StmtNodes.inc"
+ default:
+ llvm_unreachable("non-cast expressions not possible here");
+ }
+}
+
+ImplicitCastExpr *ImplicitCastExpr::Create(const ASTContext &C, QualType T,
+ CastKind Kind, Expr *Operand,
+ const CXXCastPath *BasePath,
+ ExprValueKind VK) {
+ unsigned PathSize = (BasePath ? BasePath->size() : 0);
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
+ ImplicitCastExpr *E =
+ new (Buffer) ImplicitCastExpr(T, Kind, Operand, PathSize, VK);
+ if (PathSize)
+ std::uninitialized_copy_n(BasePath->data(), BasePath->size(),
+ E->getTrailingObjects<CXXBaseSpecifier *>());
+ return E;
+}
+
+ImplicitCastExpr *ImplicitCastExpr::CreateEmpty(const ASTContext &C,
+ unsigned PathSize) {
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
+ return new (Buffer) ImplicitCastExpr(EmptyShell(), PathSize);
+}
+
+
+CStyleCastExpr *CStyleCastExpr::Create(const ASTContext &C, QualType T,
+ ExprValueKind VK, CastKind K, Expr *Op,
+ const CXXCastPath *BasePath,
+ TypeSourceInfo *WrittenTy,
+ SourceLocation L, SourceLocation R) {
+ unsigned PathSize = (BasePath ? BasePath->size() : 0);
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
+ CStyleCastExpr *E =
+ new (Buffer) CStyleCastExpr(T, VK, K, Op, PathSize, WrittenTy, L, R);
+ if (PathSize)
+ std::uninitialized_copy_n(BasePath->data(), BasePath->size(),
+ E->getTrailingObjects<CXXBaseSpecifier *>());
+ return E;
+}
+
+CStyleCastExpr *CStyleCastExpr::CreateEmpty(const ASTContext &C,
+ unsigned PathSize) {
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
+ return new (Buffer) CStyleCastExpr(EmptyShell(), PathSize);
+}
+
+/// getOpcodeStr - Turn an Opcode enum value into the punctuation char it
+/// corresponds to, e.g. "<<=".
+StringRef BinaryOperator::getOpcodeStr(Opcode Op) {
+ switch (Op) {
+ case BO_PtrMemD: return ".*";
+ case BO_PtrMemI: return "->*";
+ case BO_Mul: return "*";
+ case BO_Div: return "/";
+ case BO_Rem: return "%";
+ case BO_Add: return "+";
+ case BO_Sub: return "-";
+ case BO_Shl: return "<<";
+ case BO_Shr: return ">>";
+ case BO_LT: return "<";
+ case BO_GT: return ">";
+ case BO_LE: return "<=";
+ case BO_GE: return ">=";
+ case BO_EQ: return "==";
+ case BO_NE: return "!=";
+ case BO_And: return "&";
+ case BO_Xor: return "^";
+ case BO_Or: return "|";
+ case BO_LAnd: return "&&";
+ case BO_LOr: return "||";
+ case BO_Assign: return "=";
+ case BO_MulAssign: return "*=";
+ case BO_DivAssign: return "/=";
+ case BO_RemAssign: return "%=";
+ case BO_AddAssign: return "+=";
+ case BO_SubAssign: return "-=";
+ case BO_ShlAssign: return "<<=";
+ case BO_ShrAssign: return ">>=";
+ case BO_AndAssign: return "&=";
+ case BO_XorAssign: return "^=";
+ case BO_OrAssign: return "|=";
+ case BO_Comma: return ",";
+ }
+
+ llvm_unreachable("Invalid OpCode!");
+}
+
+BinaryOperatorKind
+BinaryOperator::getOverloadedOpcode(OverloadedOperatorKind OO) {
+ switch (OO) {
+ default: llvm_unreachable("Not an overloadable binary operator");
+ case OO_Plus: return BO_Add;
+ case OO_Minus: return BO_Sub;
+ case OO_Star: return BO_Mul;
+ case OO_Slash: return BO_Div;
+ case OO_Percent: return BO_Rem;
+ case OO_Caret: return BO_Xor;
+ case OO_Amp: return BO_And;
+ case OO_Pipe: return BO_Or;
+ case OO_Equal: return BO_Assign;
+ case OO_Less: return BO_LT;
+ case OO_Greater: return BO_GT;
+ case OO_PlusEqual: return BO_AddAssign;
+ case OO_MinusEqual: return BO_SubAssign;
+ case OO_StarEqual: return BO_MulAssign;
+ case OO_SlashEqual: return BO_DivAssign;
+ case OO_PercentEqual: return BO_RemAssign;
+ case OO_CaretEqual: return BO_XorAssign;
+ case OO_AmpEqual: return BO_AndAssign;
+ case OO_PipeEqual: return BO_OrAssign;
+ case OO_LessLess: return BO_Shl;
+ case OO_GreaterGreater: return BO_Shr;
+ case OO_LessLessEqual: return BO_ShlAssign;
+ case OO_GreaterGreaterEqual: return BO_ShrAssign;
+ case OO_EqualEqual: return BO_EQ;
+ case OO_ExclaimEqual: return BO_NE;
+ case OO_LessEqual: return BO_LE;
+ case OO_GreaterEqual: return BO_GE;
+ case OO_AmpAmp: return BO_LAnd;
+ case OO_PipePipe: return BO_LOr;
+ case OO_Comma: return BO_Comma;
+ case OO_ArrowStar: return BO_PtrMemI;
+ }
+}
+
+OverloadedOperatorKind BinaryOperator::getOverloadedOperator(Opcode Opc) {
+ static const OverloadedOperatorKind OverOps[] = {
+ /* .* Cannot be overloaded */OO_None, OO_ArrowStar,
+ OO_Star, OO_Slash, OO_Percent,
+ OO_Plus, OO_Minus,
+ OO_LessLess, OO_GreaterGreater,
+ OO_Less, OO_Greater, OO_LessEqual, OO_GreaterEqual,
+ OO_EqualEqual, OO_ExclaimEqual,
+ OO_Amp,
+ OO_Caret,
+ OO_Pipe,
+ OO_AmpAmp,
+ OO_PipePipe,
+ OO_Equal, OO_StarEqual,
+ OO_SlashEqual, OO_PercentEqual,
+ OO_PlusEqual, OO_MinusEqual,
+ OO_LessLessEqual, OO_GreaterGreaterEqual,
+ OO_AmpEqual, OO_CaretEqual,
+ OO_PipeEqual,
+ OO_Comma
+ };
+ return OverOps[Opc];
+}
+
+InitListExpr::InitListExpr(const ASTContext &C, SourceLocation lbraceloc,
+ ArrayRef<Expr*> initExprs, SourceLocation rbraceloc)
+ : Expr(InitListExprClass, QualType(), VK_RValue, OK_Ordinary, false, false,
+ false, false),
+ InitExprs(C, initExprs.size()),
+ LBraceLoc(lbraceloc), RBraceLoc(rbraceloc), AltForm(nullptr, true)
+{
+ sawArrayRangeDesignator(false);
+ for (unsigned I = 0; I != initExprs.size(); ++I) {
+ if (initExprs[I]->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (initExprs[I]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (initExprs[I]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (initExprs[I]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+ }
+
+ InitExprs.insert(C, InitExprs.end(), initExprs.begin(), initExprs.end());
+}
+
+void InitListExpr::reserveInits(const ASTContext &C, unsigned NumInits) {
+ if (NumInits > InitExprs.size())
+ InitExprs.reserve(C, NumInits);
+}
+
+void InitListExpr::resizeInits(const ASTContext &C, unsigned NumInits) {
+ InitExprs.resize(C, NumInits, nullptr);
+}
+
+Expr *InitListExpr::updateInit(const ASTContext &C, unsigned Init, Expr *expr) {
+ if (Init >= InitExprs.size()) {
+ InitExprs.insert(C, InitExprs.end(), Init - InitExprs.size() + 1, nullptr);
+ setInit(Init, expr);
+ return nullptr;
+ }
+
+ Expr *Result = cast_or_null<Expr>(InitExprs[Init]);
+ setInit(Init, expr);
+ return Result;
+}
+
+void InitListExpr::setArrayFiller(Expr *filler) {
+ assert(!hasArrayFiller() && "Filler already set!");
+ ArrayFillerOrUnionFieldInit = filler;
+ // Fill out any "holes" in the array due to designated initializers.
+ Expr **inits = getInits();
+ for (unsigned i = 0, e = getNumInits(); i != e; ++i)
+ if (inits[i] == nullptr)
+ inits[i] = filler;
+}
+
+bool InitListExpr::isStringLiteralInit() const {
+ if (getNumInits() != 1)
+ return false;
+ const ArrayType *AT = getType()->getAsArrayTypeUnsafe();
+ if (!AT || !AT->getElementType()->isIntegerType())
+ return false;
+ // It is possible for getInit() to return null.
+ const Expr *Init = getInit(0);
+ if (!Init)
+ return false;
+ Init = Init->IgnoreParens();
+ return isa<StringLiteral>(Init) || isa<ObjCEncodeExpr>(Init);
+}
+
+SourceLocation InitListExpr::getLocStart() const {
+ if (InitListExpr *SyntacticForm = getSyntacticForm())
+ return SyntacticForm->getLocStart();
+ SourceLocation Beg = LBraceLoc;
+ if (Beg.isInvalid()) {
+ // Find the first non-null initializer.
+ for (InitExprsTy::const_iterator I = InitExprs.begin(),
+ E = InitExprs.end();
+ I != E; ++I) {
+ if (Stmt *S = *I) {
+ Beg = S->getLocStart();
+ break;
+ }
+ }
+ }
+ return Beg;
+}
+
+SourceLocation InitListExpr::getLocEnd() const {
+ if (InitListExpr *SyntacticForm = getSyntacticForm())
+ return SyntacticForm->getLocEnd();
+ SourceLocation End = RBraceLoc;
+ if (End.isInvalid()) {
+ // Find the first non-null initializer from the end.
+ for (InitExprsTy::const_reverse_iterator I = InitExprs.rbegin(),
+ E = InitExprs.rend();
+ I != E; ++I) {
+ if (Stmt *S = *I) {
+ End = S->getLocEnd();
+ break;
+ }
+ }
+ }
+ return End;
+}
+
+/// getFunctionType - Return the underlying function type for this block.
+///
+const FunctionProtoType *BlockExpr::getFunctionType() const {
+ // The block pointer is never sugared, but the function type might be.
+ return cast<BlockPointerType>(getType())
+ ->getPointeeType()->castAs<FunctionProtoType>();
+}
+
+SourceLocation BlockExpr::getCaretLocation() const {
+ return TheBlock->getCaretLocation();
+}
+const Stmt *BlockExpr::getBody() const {
+ return TheBlock->getBody();
+}
+Stmt *BlockExpr::getBody() {
+ return TheBlock->getBody();
+}
+
+
+//===----------------------------------------------------------------------===//
+// Generic Expression Routines
+//===----------------------------------------------------------------------===//
+
+/// isUnusedResultAWarning - Return true if this immediate expression should
+/// be warned about if the result is unused. If so, fill in Loc and Ranges
+/// with location to warn on and the source range[s] to report with the
+/// warning.
+bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc,
+ SourceRange &R1, SourceRange &R2,
+ ASTContext &Ctx) const {
+ // Don't warn if the expr is type dependent. The type could end up
+ // instantiating to void.
+ if (isTypeDependent())
+ return false;
+
+ switch (getStmtClass()) {
+ default:
+ if (getType()->isVoidType())
+ return false;
+ WarnE = this;
+ Loc = getExprLoc();
+ R1 = getSourceRange();
+ return true;
+ case ParenExprClass:
+ return cast<ParenExpr>(this)->getSubExpr()->
+ isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
+ case GenericSelectionExprClass:
+ return cast<GenericSelectionExpr>(this)->getResultExpr()->
+ isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
+ case ChooseExprClass:
+ return cast<ChooseExpr>(this)->getChosenSubExpr()->
+ isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
+ case UnaryOperatorClass: {
+ const UnaryOperator *UO = cast<UnaryOperator>(this);
+
+ switch (UO->getOpcode()) {
+ case UO_Plus:
+ case UO_Minus:
+ case UO_AddrOf:
+ case UO_Not:
+ case UO_LNot:
+ case UO_Deref:
+ break;
+ case UO_Coawait:
+ // This is just the 'operator co_await' call inside the guts of a
+ // dependent co_await call.
+ case UO_PostInc:
+ case UO_PostDec:
+ case UO_PreInc:
+ case UO_PreDec: // ++/--
+ return false; // Not a warning.
+ case UO_Real:
+ case UO_Imag:
+ // accessing a piece of a volatile complex is a side-effect.
+ if (Ctx.getCanonicalType(UO->getSubExpr()->getType())
+ .isVolatileQualified())
+ return false;
+ break;
+ case UO_Extension:
+ return UO->getSubExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
+ }
+ WarnE = this;
+ Loc = UO->getOperatorLoc();
+ R1 = UO->getSubExpr()->getSourceRange();
+ return true;
+ }
+ case BinaryOperatorClass: {
+ const BinaryOperator *BO = cast<BinaryOperator>(this);
+ switch (BO->getOpcode()) {
+ default:
+ break;
+ // Consider the RHS of comma for side effects. LHS was checked by
+ // Sema::CheckCommaOperands.
+ case BO_Comma:
+ // ((foo = <blah>), 0) is an idiom for hiding the result (and
+ // lvalue-ness) of an assignment written in a macro.
+ if (IntegerLiteral *IE =
+ dyn_cast<IntegerLiteral>(BO->getRHS()->IgnoreParens()))
+ if (IE->getValue() == 0)
+ return false;
+ return BO->getRHS()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
+ // Consider '||', '&&' to have side effects if the LHS or RHS does.
+ case BO_LAnd:
+ case BO_LOr:
+ if (!BO->getLHS()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx) ||
+ !BO->getRHS()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx))
+ return false;
+ break;
+ }
+ if (BO->isAssignmentOp())
+ return false;
+ WarnE = this;
+ Loc = BO->getOperatorLoc();
+ R1 = BO->getLHS()->getSourceRange();
+ R2 = BO->getRHS()->getSourceRange();
+ return true;
+ }
+ case CompoundAssignOperatorClass:
+ case VAArgExprClass:
+ case AtomicExprClass:
+ return false;
+
+ case ConditionalOperatorClass: {
+ // If only one of the LHS or RHS is a warning, the operator might
+ // be being used for control flow. Only warn if both the LHS and
+ // RHS are warnings.
+ const ConditionalOperator *Exp = cast<ConditionalOperator>(this);
+ if (!Exp->getRHS()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx))
+ return false;
+ if (!Exp->getLHS())
+ return true;
+ return Exp->getLHS()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
+ }
+
+ case MemberExprClass:
+ WarnE = this;
+ Loc = cast<MemberExpr>(this)->getMemberLoc();
+ R1 = SourceRange(Loc, Loc);
+ R2 = cast<MemberExpr>(this)->getBase()->getSourceRange();
+ return true;
+
+ case ArraySubscriptExprClass:
+ WarnE = this;
+ Loc = cast<ArraySubscriptExpr>(this)->getRBracketLoc();
+ R1 = cast<ArraySubscriptExpr>(this)->getLHS()->getSourceRange();
+ R2 = cast<ArraySubscriptExpr>(this)->getRHS()->getSourceRange();
+ return true;
+
+ case CXXOperatorCallExprClass: {
+ // Warn about operator ==,!=,<,>,<=, and >= even when user-defined operator
+ // overloads as there is no reasonable way to define these such that they
+ // have non-trivial, desirable side-effects. See the -Wunused-comparison
+ // warning: operators == and != are commonly typo'ed, and so warning on them
+ // provides additional value as well. If this list is updated,
+ // DiagnoseUnusedComparison should be as well.
+ const CXXOperatorCallExpr *Op = cast<CXXOperatorCallExpr>(this);
+ switch (Op->getOperator()) {
+ default:
+ break;
+ case OO_EqualEqual:
+ case OO_ExclaimEqual:
+ case OO_Less:
+ case OO_Greater:
+ case OO_GreaterEqual:
+ case OO_LessEqual:
+ if (Op->getCallReturnType(Ctx)->isReferenceType() ||
+ Op->getCallReturnType(Ctx)->isVoidType())
+ break;
+ WarnE = this;
+ Loc = Op->getOperatorLoc();
+ R1 = Op->getSourceRange();
+ return true;
+ }
+
+ // Fallthrough for generic call handling.
+ }
+ case CallExprClass:
+ case CXXMemberCallExprClass:
+ case UserDefinedLiteralClass: {
+ // If this is a direct call, get the callee.
+ const CallExpr *CE = cast<CallExpr>(this);
+ if (const Decl *FD = CE->getCalleeDecl()) {
+ const FunctionDecl *Func = dyn_cast<FunctionDecl>(FD);
+ bool HasWarnUnusedResultAttr = Func ? Func->hasUnusedResultAttr()
+ : FD->hasAttr<WarnUnusedResultAttr>();
+
+ // If the callee has attribute pure, const, or warn_unused_result, warn
+ // about it. void foo() { strlen("bar"); } should warn.
+ //
+ // Note: If new cases are added here, DiagnoseUnusedExprResult should be
+ // updated to match for QoI.
+ if (HasWarnUnusedResultAttr ||
+ FD->hasAttr<PureAttr>() || FD->hasAttr<ConstAttr>()) {
+ WarnE = this;
+ Loc = CE->getCallee()->getLocStart();
+ R1 = CE->getCallee()->getSourceRange();
+
+ if (unsigned NumArgs = CE->getNumArgs())
+ R2 = SourceRange(CE->getArg(0)->getLocStart(),
+ CE->getArg(NumArgs-1)->getLocEnd());
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // If we don't know precisely what we're looking at, let's not warn.
+ case UnresolvedLookupExprClass:
+ case CXXUnresolvedConstructExprClass:
+ return false;
+
+ case CXXTemporaryObjectExprClass:
+ case CXXConstructExprClass: {
+ if (const CXXRecordDecl *Type = getType()->getAsCXXRecordDecl()) {
+ if (Type->hasAttr<WarnUnusedAttr>()) {
+ WarnE = this;
+ Loc = getLocStart();
+ R1 = getSourceRange();
+ return true;
+ }
+ }
+ return false;
+ }
+
+ case ObjCMessageExprClass: {
+ const ObjCMessageExpr *ME = cast<ObjCMessageExpr>(this);
+ if (Ctx.getLangOpts().ObjCAutoRefCount &&
+ ME->isInstanceMessage() &&
+ !ME->getType()->isVoidType() &&
+ ME->getMethodFamily() == OMF_init) {
+ WarnE = this;
+ Loc = getExprLoc();
+ R1 = ME->getSourceRange();
+ return true;
+ }
+
+ if (const ObjCMethodDecl *MD = ME->getMethodDecl())
+ if (MD->hasAttr<WarnUnusedResultAttr>()) {
+ WarnE = this;
+ Loc = getExprLoc();
+ return true;
+ }
+
+ return false;
+ }
+
+ case ObjCPropertyRefExprClass:
+ WarnE = this;
+ Loc = getExprLoc();
+ R1 = getSourceRange();
+ return true;
+
+ case PseudoObjectExprClass: {
+ const PseudoObjectExpr *PO = cast<PseudoObjectExpr>(this);
+
+ // Only complain about things that have the form of a getter.
+ if (isa<UnaryOperator>(PO->getSyntacticForm()) ||
+ isa<BinaryOperator>(PO->getSyntacticForm()))
+ return false;
+
+ WarnE = this;
+ Loc = getExprLoc();
+ R1 = getSourceRange();
+ return true;
+ }
+
+ case StmtExprClass: {
+ // Statement exprs don't logically have side effects themselves, but are
+ // sometimes used in macros in ways that give them a type that is unused.
+ // For example ({ blah; foo(); }) will end up with a type if foo has a type.
+ // however, if the result of the stmt expr is dead, we don't want to emit a
+ // warning.
+ const CompoundStmt *CS = cast<StmtExpr>(this)->getSubStmt();
+ if (!CS->body_empty()) {
+ if (const Expr *E = dyn_cast<Expr>(CS->body_back()))
+ return E->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
+ if (const LabelStmt *Label = dyn_cast<LabelStmt>(CS->body_back()))
+ if (const Expr *E = dyn_cast<Expr>(Label->getSubStmt()))
+ return E->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
+ }
+
+ if (getType()->isVoidType())
+ return false;
+ WarnE = this;
+ Loc = cast<StmtExpr>(this)->getLParenLoc();
+ R1 = getSourceRange();
+ return true;
+ }
+ case CXXFunctionalCastExprClass:
+ case CStyleCastExprClass: {
+ // Ignore an explicit cast to void unless the operand is a non-trivial
+ // volatile lvalue.
+ const CastExpr *CE = cast<CastExpr>(this);
+ if (CE->getCastKind() == CK_ToVoid) {
+ if (CE->getSubExpr()->isGLValue() &&
+ CE->getSubExpr()->getType().isVolatileQualified()) {
+ const DeclRefExpr *DRE =
+ dyn_cast<DeclRefExpr>(CE->getSubExpr()->IgnoreParens());
+ if (!(DRE && isa<VarDecl>(DRE->getDecl()) &&
+ cast<VarDecl>(DRE->getDecl())->hasLocalStorage())) {
+ return CE->getSubExpr()->isUnusedResultAWarning(WarnE, Loc,
+ R1, R2, Ctx);
+ }
+ }
+ return false;
+ }
+
+ // If this is a cast to a constructor conversion, check the operand.
+ // Otherwise, the result of the cast is unused.
+ if (CE->getCastKind() == CK_ConstructorConversion)
+ return CE->getSubExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
+
+ WarnE = this;
+ if (const CXXFunctionalCastExpr *CXXCE =
+ dyn_cast<CXXFunctionalCastExpr>(this)) {
+ Loc = CXXCE->getLocStart();
+ R1 = CXXCE->getSubExpr()->getSourceRange();
+ } else {
+ const CStyleCastExpr *CStyleCE = cast<CStyleCastExpr>(this);
+ Loc = CStyleCE->getLParenLoc();
+ R1 = CStyleCE->getSubExpr()->getSourceRange();
+ }
+ return true;
+ }
+ case ImplicitCastExprClass: {
+ const CastExpr *ICE = cast<ImplicitCastExpr>(this);
+
+ // lvalue-to-rvalue conversion on a volatile lvalue is a side-effect.
+ if (ICE->getCastKind() == CK_LValueToRValue &&
+ ICE->getSubExpr()->getType().isVolatileQualified())
+ return false;
+
+ return ICE->getSubExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
+ }
+ case CXXDefaultArgExprClass:
+ return (cast<CXXDefaultArgExpr>(this)
+ ->getExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx));
+ case CXXDefaultInitExprClass:
+ return (cast<CXXDefaultInitExpr>(this)
+ ->getExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx));
+
+ case CXXNewExprClass:
+ // FIXME: In theory, there might be new expressions that don't have side
+ // effects (e.g. a placement new with an uninitialized POD).
+ case CXXDeleteExprClass:
+ return false;
+ case CXXBindTemporaryExprClass:
+ return (cast<CXXBindTemporaryExpr>(this)
+ ->getSubExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx));
+ case ExprWithCleanupsClass:
+ return (cast<ExprWithCleanups>(this)
+ ->getSubExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx));
+ }
+}
+
+/// isOBJCGCCandidate - Check if an expression is objc gc'able.
+/// returns true, if it is; false otherwise.
+bool Expr::isOBJCGCCandidate(ASTContext &Ctx) const {
+ const Expr *E = IgnoreParens();
+ switch (E->getStmtClass()) {
+ default:
+ return false;
+ case ObjCIvarRefExprClass:
+ return true;
+ case Expr::UnaryOperatorClass:
+ return cast<UnaryOperator>(E)->getSubExpr()->isOBJCGCCandidate(Ctx);
+ case ImplicitCastExprClass:
+ return cast<ImplicitCastExpr>(E)->getSubExpr()->isOBJCGCCandidate(Ctx);
+ case MaterializeTemporaryExprClass:
+ return cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr()
+ ->isOBJCGCCandidate(Ctx);
+ case CStyleCastExprClass:
+ return cast<CStyleCastExpr>(E)->getSubExpr()->isOBJCGCCandidate(Ctx);
+ case DeclRefExprClass: {
+ const Decl *D = cast<DeclRefExpr>(E)->getDecl();
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (VD->hasGlobalStorage())
+ return true;
+ QualType T = VD->getType();
+ // dereferencing to a pointer is always a gc'able candidate,
+ // unless it is __weak.
+ return T->isPointerType() &&
+ (Ctx.getObjCGCAttrKind(T) != Qualifiers::Weak);
+ }
+ return false;
+ }
+ case MemberExprClass: {
+ const MemberExpr *M = cast<MemberExpr>(E);
+ return M->getBase()->isOBJCGCCandidate(Ctx);
+ }
+ case ArraySubscriptExprClass:
+ return cast<ArraySubscriptExpr>(E)->getBase()->isOBJCGCCandidate(Ctx);
+ }
+}
+
+bool Expr::isBoundMemberFunction(ASTContext &Ctx) const {
+ if (isTypeDependent())
+ return false;
+ return ClassifyLValue(Ctx) == Expr::LV_MemberFunction;
+}
+
+QualType Expr::findBoundMemberType(const Expr *expr) {
+ assert(expr->hasPlaceholderType(BuiltinType::BoundMember));
+
+ // Bound member expressions are always one of these possibilities:
+ // x->m x.m x->*y x.*y
+ // (possibly parenthesized)
+
+ expr = expr->IgnoreParens();
+ if (const MemberExpr *mem = dyn_cast<MemberExpr>(expr)) {
+ assert(isa<CXXMethodDecl>(mem->getMemberDecl()));
+ return mem->getMemberDecl()->getType();
+ }
+
+ if (const BinaryOperator *op = dyn_cast<BinaryOperator>(expr)) {
+ QualType type = op->getRHS()->getType()->castAs<MemberPointerType>()
+ ->getPointeeType();
+ assert(type->isFunctionType());
+ return type;
+ }
+
+ assert(isa<UnresolvedMemberExpr>(expr) || isa<CXXPseudoDestructorExpr>(expr));
+ return QualType();
+}
+
+Expr* Expr::IgnoreParens() {
+ Expr* E = this;
+ while (true) {
+ if (ParenExpr* P = dyn_cast<ParenExpr>(E)) {
+ E = P->getSubExpr();
+ continue;
+ }
+ if (UnaryOperator* P = dyn_cast<UnaryOperator>(E)) {
+ if (P->getOpcode() == UO_Extension) {
+ E = P->getSubExpr();
+ continue;
+ }
+ }
+ if (GenericSelectionExpr* P = dyn_cast<GenericSelectionExpr>(E)) {
+ if (!P->isResultDependent()) {
+ E = P->getResultExpr();
+ continue;
+ }
+ }
+ if (ChooseExpr* P = dyn_cast<ChooseExpr>(E)) {
+ if (!P->isConditionDependent()) {
+ E = P->getChosenSubExpr();
+ continue;
+ }
+ }
+ return E;
+ }
+}
+
+/// IgnoreParenCasts - Ignore parentheses and casts. Strip off any ParenExpr
+/// or CastExprs or ImplicitCastExprs, returning their operand.
+Expr *Expr::IgnoreParenCasts() {
+ Expr *E = this;
+ while (true) {
+ E = E->IgnoreParens();
+ if (CastExpr *P = dyn_cast<CastExpr>(E)) {
+ E = P->getSubExpr();
+ continue;
+ }
+ if (MaterializeTemporaryExpr *Materialize
+ = dyn_cast<MaterializeTemporaryExpr>(E)) {
+ E = Materialize->GetTemporaryExpr();
+ continue;
+ }
+ if (SubstNonTypeTemplateParmExpr *NTTP
+ = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
+ E = NTTP->getReplacement();
+ continue;
+ }
+ return E;
+ }
+}
+
+Expr *Expr::IgnoreCasts() {
+ Expr *E = this;
+ while (true) {
+ if (CastExpr *P = dyn_cast<CastExpr>(E)) {
+ E = P->getSubExpr();
+ continue;
+ }
+ if (MaterializeTemporaryExpr *Materialize
+ = dyn_cast<MaterializeTemporaryExpr>(E)) {
+ E = Materialize->GetTemporaryExpr();
+ continue;
+ }
+ if (SubstNonTypeTemplateParmExpr *NTTP
+ = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
+ E = NTTP->getReplacement();
+ continue;
+ }
+ return E;
+ }
+}
+
+/// IgnoreParenLValueCasts - Ignore parentheses and lvalue-to-rvalue
+/// casts. This is intended purely as a temporary workaround for code
+/// that hasn't yet been rewritten to do the right thing about those
+/// casts, and may disappear along with the last internal use.
+Expr *Expr::IgnoreParenLValueCasts() {
+ Expr *E = this;
+ while (true) {
+ E = E->IgnoreParens();
+ if (CastExpr *P = dyn_cast<CastExpr>(E)) {
+ if (P->getCastKind() == CK_LValueToRValue) {
+ E = P->getSubExpr();
+ continue;
+ }
+ } else if (MaterializeTemporaryExpr *Materialize
+ = dyn_cast<MaterializeTemporaryExpr>(E)) {
+ E = Materialize->GetTemporaryExpr();
+ continue;
+ } else if (SubstNonTypeTemplateParmExpr *NTTP
+ = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
+ E = NTTP->getReplacement();
+ continue;
+ }
+ break;
+ }
+ return E;
+}
+
+Expr *Expr::ignoreParenBaseCasts() {
+ Expr *E = this;
+ while (true) {
+ E = E->IgnoreParens();
+ if (CastExpr *CE = dyn_cast<CastExpr>(E)) {
+ if (CE->getCastKind() == CK_DerivedToBase ||
+ CE->getCastKind() == CK_UncheckedDerivedToBase ||
+ CE->getCastKind() == CK_NoOp) {
+ E = CE->getSubExpr();
+ continue;
+ }
+ }
+
+ return E;
+ }
+}
+
+Expr *Expr::IgnoreParenImpCasts() {
+ Expr *E = this;
+ while (true) {
+ E = E->IgnoreParens();
+ if (ImplicitCastExpr *P = dyn_cast<ImplicitCastExpr>(E)) {
+ E = P->getSubExpr();
+ continue;
+ }
+ if (MaterializeTemporaryExpr *Materialize
+ = dyn_cast<MaterializeTemporaryExpr>(E)) {
+ E = Materialize->GetTemporaryExpr();
+ continue;
+ }
+ if (SubstNonTypeTemplateParmExpr *NTTP
+ = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
+ E = NTTP->getReplacement();
+ continue;
+ }
+ return E;
+ }
+}
+
+Expr *Expr::IgnoreConversionOperator() {
+ if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(this)) {
+ if (MCE->getMethodDecl() && isa<CXXConversionDecl>(MCE->getMethodDecl()))
+ return MCE->getImplicitObjectArgument();
+ }
+ return this;
+}
+
+/// IgnoreParenNoopCasts - Ignore parentheses and casts that do not change the
+/// value (including ptr->int casts of the same size). Strip off any
+/// ParenExpr or CastExprs, returning their operand.
+Expr *Expr::IgnoreParenNoopCasts(ASTContext &Ctx) {
+ Expr *E = this;
+ while (true) {
+ E = E->IgnoreParens();
+
+ if (CastExpr *P = dyn_cast<CastExpr>(E)) {
+ // We ignore integer <-> casts that are of the same width, ptr<->ptr and
+ // ptr<->int casts of the same width. We also ignore all identity casts.
+ Expr *SE = P->getSubExpr();
+
+ if (Ctx.hasSameUnqualifiedType(E->getType(), SE->getType())) {
+ E = SE;
+ continue;
+ }
+
+ if ((E->getType()->isPointerType() ||
+ E->getType()->isIntegralType(Ctx)) &&
+ (SE->getType()->isPointerType() ||
+ SE->getType()->isIntegralType(Ctx)) &&
+ Ctx.getTypeSize(E->getType()) == Ctx.getTypeSize(SE->getType())) {
+ E = SE;
+ continue;
+ }
+ }
+
+ if (SubstNonTypeTemplateParmExpr *NTTP
+ = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
+ E = NTTP->getReplacement();
+ continue;
+ }
+
+ return E;
+ }
+}
+
+bool Expr::isDefaultArgument() const {
+ const Expr *E = this;
+ if (const MaterializeTemporaryExpr *M = dyn_cast<MaterializeTemporaryExpr>(E))
+ E = M->GetTemporaryExpr();
+
+ while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E))
+ E = ICE->getSubExprAsWritten();
+
+ return isa<CXXDefaultArgExpr>(E);
+}
+
+/// \brief Skip over any no-op casts and any temporary-binding
+/// expressions.
+static const Expr *skipTemporaryBindingsNoOpCastsAndParens(const Expr *E) {
+ if (const MaterializeTemporaryExpr *M = dyn_cast<MaterializeTemporaryExpr>(E))
+ E = M->GetTemporaryExpr();
+
+ while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
+ if (ICE->getCastKind() == CK_NoOp)
+ E = ICE->getSubExpr();
+ else
+ break;
+ }
+
+ while (const CXXBindTemporaryExpr *BE = dyn_cast<CXXBindTemporaryExpr>(E))
+ E = BE->getSubExpr();
+
+ while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
+ if (ICE->getCastKind() == CK_NoOp)
+ E = ICE->getSubExpr();
+ else
+ break;
+ }
+
+ return E->IgnoreParens();
+}
+
+/// isTemporaryObject - Determines if this expression produces a
+/// temporary of the given class type.
+bool Expr::isTemporaryObject(ASTContext &C, const CXXRecordDecl *TempTy) const {
+ if (!C.hasSameUnqualifiedType(getType(), C.getTypeDeclType(TempTy)))
+ return false;
+
+ const Expr *E = skipTemporaryBindingsNoOpCastsAndParens(this);
+
+ // Temporaries are by definition pr-values of class type.
+ if (!E->Classify(C).isPRValue()) {
+ // In this context, property reference is a message call and is pr-value.
+ if (!isa<ObjCPropertyRefExpr>(E))
+ return false;
+ }
+
+ // Black-list a few cases which yield pr-values of class type that don't
+ // refer to temporaries of that type:
+
+ // - implicit derived-to-base conversions
+ if (isa<ImplicitCastExpr>(E)) {
+ switch (cast<ImplicitCastExpr>(E)->getCastKind()) {
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ return false;
+ default:
+ break;
+ }
+ }
+
+ // - member expressions (all)
+ if (isa<MemberExpr>(E))
+ return false;
+
+ if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E))
+ if (BO->isPtrMemOp())
+ return false;
+
+ // - opaque values (all)
+ if (isa<OpaqueValueExpr>(E))
+ return false;
+
+ return true;
+}
+
+bool Expr::isImplicitCXXThis() const {
+ const Expr *E = this;
+
+ // Strip away parentheses and casts we don't care about.
+ while (true) {
+ if (const ParenExpr *Paren = dyn_cast<ParenExpr>(E)) {
+ E = Paren->getSubExpr();
+ continue;
+ }
+
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
+ if (ICE->getCastKind() == CK_NoOp ||
+ ICE->getCastKind() == CK_LValueToRValue ||
+ ICE->getCastKind() == CK_DerivedToBase ||
+ ICE->getCastKind() == CK_UncheckedDerivedToBase) {
+ E = ICE->getSubExpr();
+ continue;
+ }
+ }
+
+ if (const UnaryOperator* UnOp = dyn_cast<UnaryOperator>(E)) {
+ if (UnOp->getOpcode() == UO_Extension) {
+ E = UnOp->getSubExpr();
+ continue;
+ }
+ }
+
+ if (const MaterializeTemporaryExpr *M
+ = dyn_cast<MaterializeTemporaryExpr>(E)) {
+ E = M->GetTemporaryExpr();
+ continue;
+ }
+
+ break;
+ }
+
+ if (const CXXThisExpr *This = dyn_cast<CXXThisExpr>(E))
+ return This->isImplicit();
+
+ return false;
+}
+
+/// hasAnyTypeDependentArguments - Determines if any of the expressions
+/// in Exprs is type-dependent.
+bool Expr::hasAnyTypeDependentArguments(ArrayRef<Expr *> Exprs) {
+ for (unsigned I = 0; I < Exprs.size(); ++I)
+ if (Exprs[I]->isTypeDependent())
+ return true;
+
+ return false;
+}
+
+bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef,
+ const Expr **Culprit) const {
+ // This function is attempting whether an expression is an initializer
+ // which can be evaluated at compile-time. It very closely parallels
+ // ConstExprEmitter in CGExprConstant.cpp; if they don't match, it
+ // will lead to unexpected results. Like ConstExprEmitter, it falls back
+ // to isEvaluatable most of the time.
+ //
+ // If we ever capture reference-binding directly in the AST, we can
+ // kill the second parameter.
+
+ if (IsForRef) {
+ EvalResult Result;
+ if (EvaluateAsLValue(Result, Ctx) && !Result.HasSideEffects)
+ return true;
+ if (Culprit)
+ *Culprit = this;
+ return false;
+ }
+
+ switch (getStmtClass()) {
+ default: break;
+ case StringLiteralClass:
+ case ObjCEncodeExprClass:
+ return true;
+ case CXXTemporaryObjectExprClass:
+ case CXXConstructExprClass: {
+ const CXXConstructExpr *CE = cast<CXXConstructExpr>(this);
+
+ if (CE->getConstructor()->isTrivial() &&
+ CE->getConstructor()->getParent()->hasTrivialDestructor()) {
+ // Trivial default constructor
+ if (!CE->getNumArgs()) return true;
+
+ // Trivial copy constructor
+ assert(CE->getNumArgs() == 1 && "trivial ctor with > 1 argument");
+ return CE->getArg(0)->isConstantInitializer(Ctx, false, Culprit);
+ }
+
+ break;
+ }
+ case CompoundLiteralExprClass: {
+ // This handles gcc's extension that allows global initializers like
+ // "struct x {int x;} x = (struct x) {};".
+ // FIXME: This accepts other cases it shouldn't!
+ const Expr *Exp = cast<CompoundLiteralExpr>(this)->getInitializer();
+ return Exp->isConstantInitializer(Ctx, false, Culprit);
+ }
+ case DesignatedInitUpdateExprClass: {
+ const DesignatedInitUpdateExpr *DIUE = cast<DesignatedInitUpdateExpr>(this);
+ return DIUE->getBase()->isConstantInitializer(Ctx, false, Culprit) &&
+ DIUE->getUpdater()->isConstantInitializer(Ctx, false, Culprit);
+ }
+ case InitListExprClass: {
+ const InitListExpr *ILE = cast<InitListExpr>(this);
+ if (ILE->getType()->isArrayType()) {
+ unsigned numInits = ILE->getNumInits();
+ for (unsigned i = 0; i < numInits; i++) {
+ if (!ILE->getInit(i)->isConstantInitializer(Ctx, false, Culprit))
+ return false;
+ }
+ return true;
+ }
+
+ if (ILE->getType()->isRecordType()) {
+ unsigned ElementNo = 0;
+ RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
+ for (const auto *Field : RD->fields()) {
+ // If this is a union, skip all the fields that aren't being initialized.
+ if (RD->isUnion() && ILE->getInitializedFieldInUnion() != Field)
+ continue;
+
+ // Don't emit anonymous bitfields, they just affect layout.
+ if (Field->isUnnamedBitfield())
+ continue;
+
+ if (ElementNo < ILE->getNumInits()) {
+ const Expr *Elt = ILE->getInit(ElementNo++);
+ if (Field->isBitField()) {
+ // Bitfields have to evaluate to an integer.
+ llvm::APSInt ResultTmp;
+ if (!Elt->EvaluateAsInt(ResultTmp, Ctx)) {
+ if (Culprit)
+ *Culprit = Elt;
+ return false;
+ }
+ } else {
+ bool RefType = Field->getType()->isReferenceType();
+ if (!Elt->isConstantInitializer(Ctx, RefType, Culprit))
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ break;
+ }
+ case ImplicitValueInitExprClass:
+ case NoInitExprClass:
+ return true;
+ case ParenExprClass:
+ return cast<ParenExpr>(this)->getSubExpr()
+ ->isConstantInitializer(Ctx, IsForRef, Culprit);
+ case GenericSelectionExprClass:
+ return cast<GenericSelectionExpr>(this)->getResultExpr()
+ ->isConstantInitializer(Ctx, IsForRef, Culprit);
+ case ChooseExprClass:
+ if (cast<ChooseExpr>(this)->isConditionDependent()) {
+ if (Culprit)
+ *Culprit = this;
+ return false;
+ }
+ return cast<ChooseExpr>(this)->getChosenSubExpr()
+ ->isConstantInitializer(Ctx, IsForRef, Culprit);
+ case UnaryOperatorClass: {
+ const UnaryOperator* Exp = cast<UnaryOperator>(this);
+ if (Exp->getOpcode() == UO_Extension)
+ return Exp->getSubExpr()->isConstantInitializer(Ctx, false, Culprit);
+ break;
+ }
+ case CXXFunctionalCastExprClass:
+ case CXXStaticCastExprClass:
+ case ImplicitCastExprClass:
+ case CStyleCastExprClass:
+ case ObjCBridgedCastExprClass:
+ case CXXDynamicCastExprClass:
+ case CXXReinterpretCastExprClass:
+ case CXXConstCastExprClass: {
+ const CastExpr *CE = cast<CastExpr>(this);
+
+ // Handle misc casts we want to ignore.
+ if (CE->getCastKind() == CK_NoOp ||
+ CE->getCastKind() == CK_LValueToRValue ||
+ CE->getCastKind() == CK_ToUnion ||
+ CE->getCastKind() == CK_ConstructorConversion ||
+ CE->getCastKind() == CK_NonAtomicToAtomic ||
+ CE->getCastKind() == CK_AtomicToNonAtomic)
+ return CE->getSubExpr()->isConstantInitializer(Ctx, false, Culprit);
+
+ break;
+ }
+ case MaterializeTemporaryExprClass:
+ return cast<MaterializeTemporaryExpr>(this)->GetTemporaryExpr()
+ ->isConstantInitializer(Ctx, false, Culprit);
+
+ case SubstNonTypeTemplateParmExprClass:
+ return cast<SubstNonTypeTemplateParmExpr>(this)->getReplacement()
+ ->isConstantInitializer(Ctx, false, Culprit);
+ case CXXDefaultArgExprClass:
+ return cast<CXXDefaultArgExpr>(this)->getExpr()
+ ->isConstantInitializer(Ctx, false, Culprit);
+ case CXXDefaultInitExprClass:
+ return cast<CXXDefaultInitExpr>(this)->getExpr()
+ ->isConstantInitializer(Ctx, false, Culprit);
+ }
+ // Allow certain forms of UB in constant initializers: signed integer
+ // overflow and floating-point division by zero. We'll give a warning on
+ // these, but they're common enough that we have to accept them.
+ if (isEvaluatable(Ctx, SE_AllowUndefinedBehavior))
+ return true;
+ if (Culprit)
+ *Culprit = this;
+ return false;
+}
+
+namespace {
+ /// \brief Look for any side effects within a Stmt.
+ class SideEffectFinder : public ConstEvaluatedExprVisitor<SideEffectFinder> {
+ typedef ConstEvaluatedExprVisitor<SideEffectFinder> Inherited;
+ const bool IncludePossibleEffects;
+ bool HasSideEffects;
+
+ public:
+ explicit SideEffectFinder(const ASTContext &Context, bool IncludePossible)
+ : Inherited(Context),
+ IncludePossibleEffects(IncludePossible), HasSideEffects(false) { }
+
+ bool hasSideEffects() const { return HasSideEffects; }
+
+ void VisitExpr(const Expr *E) {
+ if (!HasSideEffects &&
+ E->HasSideEffects(Context, IncludePossibleEffects))
+ HasSideEffects = true;
+ }
+ };
+}
+
+bool Expr::HasSideEffects(const ASTContext &Ctx,
+ bool IncludePossibleEffects) const {
+ // In circumstances where we care about definite side effects instead of
+ // potential side effects, we want to ignore expressions that are part of a
+ // macro expansion as a potential side effect.
+ if (!IncludePossibleEffects && getExprLoc().isMacroID())
+ return false;
+
+ if (isInstantiationDependent())
+ return IncludePossibleEffects;
+
+ switch (getStmtClass()) {
+ case NoStmtClass:
+ #define ABSTRACT_STMT(Type)
+ #define STMT(Type, Base) case Type##Class:
+ #define EXPR(Type, Base)
+ #include "clang/AST/StmtNodes.inc"
+ llvm_unreachable("unexpected Expr kind");
+
+ case DependentScopeDeclRefExprClass:
+ case CXXUnresolvedConstructExprClass:
+ case CXXDependentScopeMemberExprClass:
+ case UnresolvedLookupExprClass:
+ case UnresolvedMemberExprClass:
+ case PackExpansionExprClass:
+ case SubstNonTypeTemplateParmPackExprClass:
+ case FunctionParmPackExprClass:
+ case TypoExprClass:
+ case CXXFoldExprClass:
+ llvm_unreachable("shouldn't see dependent / unresolved nodes here");
+
+ case DeclRefExprClass:
+ case ObjCIvarRefExprClass:
+ case PredefinedExprClass:
+ case IntegerLiteralClass:
+ case FloatingLiteralClass:
+ case ImaginaryLiteralClass:
+ case StringLiteralClass:
+ case CharacterLiteralClass:
+ case OffsetOfExprClass:
+ case ImplicitValueInitExprClass:
+ case UnaryExprOrTypeTraitExprClass:
+ case AddrLabelExprClass:
+ case GNUNullExprClass:
+ case NoInitExprClass:
+ case CXXBoolLiteralExprClass:
+ case CXXNullPtrLiteralExprClass:
+ case CXXThisExprClass:
+ case CXXScalarValueInitExprClass:
+ case TypeTraitExprClass:
+ case ArrayTypeTraitExprClass:
+ case ExpressionTraitExprClass:
+ case CXXNoexceptExprClass:
+ case SizeOfPackExprClass:
+ case ObjCStringLiteralClass:
+ case ObjCEncodeExprClass:
+ case ObjCBoolLiteralExprClass:
+ case CXXUuidofExprClass:
+ case OpaqueValueExprClass:
+ // These never have a side-effect.
+ return false;
+
+ case CallExprClass:
+ case CXXOperatorCallExprClass:
+ case CXXMemberCallExprClass:
+ case CUDAKernelCallExprClass:
+ case UserDefinedLiteralClass: {
+ // We don't know a call definitely has side effects, except for calls
+ // to pure/const functions that definitely don't.
+ // If the call itself is considered side-effect free, check the operands.
+ const Decl *FD = cast<CallExpr>(this)->getCalleeDecl();
+ bool IsPure = FD && (FD->hasAttr<ConstAttr>() || FD->hasAttr<PureAttr>());
+ if (IsPure || !IncludePossibleEffects)
+ break;
+ return true;
+ }
+
+ case BlockExprClass:
+ case CXXBindTemporaryExprClass:
+ if (!IncludePossibleEffects)
+ break;
+ return true;
+
+ case MSPropertyRefExprClass:
+ case MSPropertySubscriptExprClass:
+ case CompoundAssignOperatorClass:
+ case VAArgExprClass:
+ case AtomicExprClass:
+ case CXXThrowExprClass:
+ case CXXNewExprClass:
+ case CXXDeleteExprClass:
+ case ExprWithCleanupsClass:
+ case CoawaitExprClass:
+ case CoyieldExprClass:
+ // These always have a side-effect.
+ return true;
+
+ case StmtExprClass: {
+ // StmtExprs have a side-effect if any substatement does.
+ SideEffectFinder Finder(Ctx, IncludePossibleEffects);
+ Finder.Visit(cast<StmtExpr>(this)->getSubStmt());
+ return Finder.hasSideEffects();
+ }
+
+ case ParenExprClass:
+ case ArraySubscriptExprClass:
+ case OMPArraySectionExprClass:
+ case MemberExprClass:
+ case ConditionalOperatorClass:
+ case BinaryConditionalOperatorClass:
+ case CompoundLiteralExprClass:
+ case ExtVectorElementExprClass:
+ case DesignatedInitExprClass:
+ case DesignatedInitUpdateExprClass:
+ case ParenListExprClass:
+ case CXXPseudoDestructorExprClass:
+ case CXXStdInitializerListExprClass:
+ case SubstNonTypeTemplateParmExprClass:
+ case MaterializeTemporaryExprClass:
+ case ShuffleVectorExprClass:
+ case ConvertVectorExprClass:
+ case AsTypeExprClass:
+ // These have a side-effect if any subexpression does.
+ break;
+
+ case UnaryOperatorClass:
+ if (cast<UnaryOperator>(this)->isIncrementDecrementOp())
+ return true;
+ break;
+
+ case BinaryOperatorClass:
+ if (cast<BinaryOperator>(this)->isAssignmentOp())
+ return true;
+ break;
+
+ case InitListExprClass:
+ // FIXME: The children for an InitListExpr doesn't include the array filler.
+ if (const Expr *E = cast<InitListExpr>(this)->getArrayFiller())
+ if (E->HasSideEffects(Ctx, IncludePossibleEffects))
+ return true;
+ break;
+
+ case GenericSelectionExprClass:
+ return cast<GenericSelectionExpr>(this)->getResultExpr()->
+ HasSideEffects(Ctx, IncludePossibleEffects);
+
+ case ChooseExprClass:
+ return cast<ChooseExpr>(this)->getChosenSubExpr()->HasSideEffects(
+ Ctx, IncludePossibleEffects);
+
+ case CXXDefaultArgExprClass:
+ return cast<CXXDefaultArgExpr>(this)->getExpr()->HasSideEffects(
+ Ctx, IncludePossibleEffects);
+
+ case CXXDefaultInitExprClass: {
+ const FieldDecl *FD = cast<CXXDefaultInitExpr>(this)->getField();
+ if (const Expr *E = FD->getInClassInitializer())
+ return E->HasSideEffects(Ctx, IncludePossibleEffects);
+ // If we've not yet parsed the initializer, assume it has side-effects.
+ return true;
+ }
+
+ case CXXDynamicCastExprClass: {
+ // A dynamic_cast expression has side-effects if it can throw.
+ const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(this);
+ if (DCE->getTypeAsWritten()->isReferenceType() &&
+ DCE->getCastKind() == CK_Dynamic)
+ return true;
+ } // Fall through.
+ case ImplicitCastExprClass:
+ case CStyleCastExprClass:
+ case CXXStaticCastExprClass:
+ case CXXReinterpretCastExprClass:
+ case CXXConstCastExprClass:
+ case CXXFunctionalCastExprClass: {
+ // While volatile reads are side-effecting in both C and C++, we treat them
+ // as having possible (not definite) side-effects. This allows idiomatic
+ // code to behave without warning, such as sizeof(*v) for a volatile-
+ // qualified pointer.
+ if (!IncludePossibleEffects)
+ break;
+
+ const CastExpr *CE = cast<CastExpr>(this);
+ if (CE->getCastKind() == CK_LValueToRValue &&
+ CE->getSubExpr()->getType().isVolatileQualified())
+ return true;
+ break;
+ }
+
+ case CXXTypeidExprClass:
+ // typeid might throw if its subexpression is potentially-evaluated, so has
+ // side-effects in that case whether or not its subexpression does.
+ return cast<CXXTypeidExpr>(this)->isPotentiallyEvaluated();
+
+ case CXXConstructExprClass:
+ case CXXTemporaryObjectExprClass: {
+ const CXXConstructExpr *CE = cast<CXXConstructExpr>(this);
+ if (!CE->getConstructor()->isTrivial() && IncludePossibleEffects)
+ return true;
+ // A trivial constructor does not add any side-effects of its own. Just look
+ // at its arguments.
+ break;
+ }
+
+ case LambdaExprClass: {
+ const LambdaExpr *LE = cast<LambdaExpr>(this);
+ for (LambdaExpr::capture_iterator I = LE->capture_begin(),
+ E = LE->capture_end(); I != E; ++I)
+ if (I->getCaptureKind() == LCK_ByCopy)
+ // FIXME: Only has a side-effect if the variable is volatile or if
+ // the copy would invoke a non-trivial copy constructor.
+ return true;
+ return false;
+ }
+
+ case PseudoObjectExprClass: {
+ // Only look for side-effects in the semantic form, and look past
+ // OpaqueValueExpr bindings in that form.
+ const PseudoObjectExpr *PO = cast<PseudoObjectExpr>(this);
+ for (PseudoObjectExpr::const_semantics_iterator I = PO->semantics_begin(),
+ E = PO->semantics_end();
+ I != E; ++I) {
+ const Expr *Subexpr = *I;
+ if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(Subexpr))
+ Subexpr = OVE->getSourceExpr();
+ if (Subexpr->HasSideEffects(Ctx, IncludePossibleEffects))
+ return true;
+ }
+ return false;
+ }
+
+ case ObjCBoxedExprClass:
+ case ObjCArrayLiteralClass:
+ case ObjCDictionaryLiteralClass:
+ case ObjCSelectorExprClass:
+ case ObjCProtocolExprClass:
+ case ObjCIsaExprClass:
+ case ObjCIndirectCopyRestoreExprClass:
+ case ObjCSubscriptRefExprClass:
+ case ObjCBridgedCastExprClass:
+ case ObjCMessageExprClass:
+ case ObjCPropertyRefExprClass:
+ // FIXME: Classify these cases better.
+ if (IncludePossibleEffects)
+ return true;
+ break;
+ }
+
+ // Recurse to children.
+ for (const Stmt *SubStmt : children())
+ if (SubStmt &&
+ cast<Expr>(SubStmt)->HasSideEffects(Ctx, IncludePossibleEffects))
+ return true;
+
+ return false;
+}
+
+namespace {
+ /// \brief Look for a call to a non-trivial function within an expression.
+ class NonTrivialCallFinder : public ConstEvaluatedExprVisitor<NonTrivialCallFinder>
+ {
+ typedef ConstEvaluatedExprVisitor<NonTrivialCallFinder> Inherited;
+
+ bool NonTrivial;
+
+ public:
+ explicit NonTrivialCallFinder(const ASTContext &Context)
+ : Inherited(Context), NonTrivial(false) { }
+
+ bool hasNonTrivialCall() const { return NonTrivial; }
+
+ void VisitCallExpr(const CallExpr *E) {
+ if (const CXXMethodDecl *Method
+ = dyn_cast_or_null<const CXXMethodDecl>(E->getCalleeDecl())) {
+ if (Method->isTrivial()) {
+ // Recurse to children of the call.
+ Inherited::VisitStmt(E);
+ return;
+ }
+ }
+
+ NonTrivial = true;
+ }
+
+ void VisitCXXConstructExpr(const CXXConstructExpr *E) {
+ if (E->getConstructor()->isTrivial()) {
+ // Recurse to children of the call.
+ Inherited::VisitStmt(E);
+ return;
+ }
+
+ NonTrivial = true;
+ }
+
+ void VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *E) {
+ if (E->getTemporary()->getDestructor()->isTrivial()) {
+ Inherited::VisitStmt(E);
+ return;
+ }
+
+ NonTrivial = true;
+ }
+ };
+}
+
+bool Expr::hasNonTrivialCall(const ASTContext &Ctx) const {
+ NonTrivialCallFinder Finder(Ctx);
+ Finder.Visit(this);
+ return Finder.hasNonTrivialCall();
+}
+
+/// isNullPointerConstant - C99 6.3.2.3p3 - Return whether this is a null
+/// pointer constant or not, as well as the specific kind of constant detected.
+/// Null pointer constants can be integer constant expressions with the
+/// value zero, casts of zero to void*, nullptr (C++0X), or __null
+/// (a GNU extension).
+Expr::NullPointerConstantKind
+Expr::isNullPointerConstant(ASTContext &Ctx,
+ NullPointerConstantValueDependence NPC) const {
+ if (isValueDependent() &&
+ (!Ctx.getLangOpts().CPlusPlus11 || Ctx.getLangOpts().MSVCCompat)) {
+ switch (NPC) {
+ case NPC_NeverValueDependent:
+ llvm_unreachable("Unexpected value dependent expression!");
+ case NPC_ValueDependentIsNull:
+ if (isTypeDependent() || getType()->isIntegralType(Ctx))
+ return NPCK_ZeroExpression;
+ else
+ return NPCK_NotNull;
+
+ case NPC_ValueDependentIsNotNull:
+ return NPCK_NotNull;
+ }
+ }
+
+ // Strip off a cast to void*, if it exists. Except in C++.
+ if (const ExplicitCastExpr *CE = dyn_cast<ExplicitCastExpr>(this)) {
+ if (!Ctx.getLangOpts().CPlusPlus) {
+ // Check that it is a cast to void*.
+ if (const PointerType *PT = CE->getType()->getAs<PointerType>()) {
+ QualType Pointee = PT->getPointeeType();
+ Qualifiers Q = Pointee.getQualifiers();
+ // In OpenCL v2.0 generic address space acts as a placeholder
+ // and should be ignored.
+ bool IsASValid = true;
+ if (Ctx.getLangOpts().OpenCLVersion >= 200) {
+ if (Pointee.getAddressSpace() == LangAS::opencl_generic)
+ Q.removeAddressSpace();
+ else
+ IsASValid = false;
+ }
+
+ if (IsASValid && !Q.hasQualifiers() &&
+ Pointee->isVoidType() && // to void*
+ CE->getSubExpr()->getType()->isIntegerType()) // from int.
+ return CE->getSubExpr()->isNullPointerConstant(Ctx, NPC);
+ }
+ }
+ } else if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(this)) {
+ // Ignore the ImplicitCastExpr type entirely.
+ return ICE->getSubExpr()->isNullPointerConstant(Ctx, NPC);
+ } else if (const ParenExpr *PE = dyn_cast<ParenExpr>(this)) {
+ // Accept ((void*)0) as a null pointer constant, as many other
+ // implementations do.
+ return PE->getSubExpr()->isNullPointerConstant(Ctx, NPC);
+ } else if (const GenericSelectionExpr *GE =
+ dyn_cast<GenericSelectionExpr>(this)) {
+ if (GE->isResultDependent())
+ return NPCK_NotNull;
+ return GE->getResultExpr()->isNullPointerConstant(Ctx, NPC);
+ } else if (const ChooseExpr *CE = dyn_cast<ChooseExpr>(this)) {
+ if (CE->isConditionDependent())
+ return NPCK_NotNull;
+ return CE->getChosenSubExpr()->isNullPointerConstant(Ctx, NPC);
+ } else if (const CXXDefaultArgExpr *DefaultArg
+ = dyn_cast<CXXDefaultArgExpr>(this)) {
+ // See through default argument expressions.
+ return DefaultArg->getExpr()->isNullPointerConstant(Ctx, NPC);
+ } else if (const CXXDefaultInitExpr *DefaultInit
+ = dyn_cast<CXXDefaultInitExpr>(this)) {
+ // See through default initializer expressions.
+ return DefaultInit->getExpr()->isNullPointerConstant(Ctx, NPC);
+ } else if (isa<GNUNullExpr>(this)) {
+ // The GNU __null extension is always a null pointer constant.
+ return NPCK_GNUNull;
+ } else if (const MaterializeTemporaryExpr *M
+ = dyn_cast<MaterializeTemporaryExpr>(this)) {
+ return M->GetTemporaryExpr()->isNullPointerConstant(Ctx, NPC);
+ } else if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(this)) {
+ if (const Expr *Source = OVE->getSourceExpr())
+ return Source->isNullPointerConstant(Ctx, NPC);
+ }
+
+ // C++11 nullptr_t is always a null pointer constant.
+ if (getType()->isNullPtrType())
+ return NPCK_CXX11_nullptr;
+
+ if (const RecordType *UT = getType()->getAsUnionType())
+ if (!Ctx.getLangOpts().CPlusPlus11 &&
+ UT && UT->getDecl()->hasAttr<TransparentUnionAttr>())
+ if (const CompoundLiteralExpr *CLE = dyn_cast<CompoundLiteralExpr>(this)){
+ const Expr *InitExpr = CLE->getInitializer();
+ if (const InitListExpr *ILE = dyn_cast<InitListExpr>(InitExpr))
+ return ILE->getInit(0)->isNullPointerConstant(Ctx, NPC);
+ }
+ // This expression must be an integer type.
+ if (!getType()->isIntegerType() ||
+ (Ctx.getLangOpts().CPlusPlus && getType()->isEnumeralType()))
+ return NPCK_NotNull;
+
+ if (Ctx.getLangOpts().CPlusPlus11) {
+ // C++11 [conv.ptr]p1: A null pointer constant is an integer literal with
+ // value zero or a prvalue of type std::nullptr_t.
+ // Microsoft mode permits C++98 rules reflecting MSVC behavior.
+ const IntegerLiteral *Lit = dyn_cast<IntegerLiteral>(this);
+ if (Lit && !Lit->getValue())
+ return NPCK_ZeroLiteral;
+ else if (!Ctx.getLangOpts().MSVCCompat || !isCXX98IntegralConstantExpr(Ctx))
+ return NPCK_NotNull;
+ } else {
+ // If we have an integer constant expression, we need to *evaluate* it and
+ // test for the value 0.
+ if (!isIntegerConstantExpr(Ctx))
+ return NPCK_NotNull;
+ }
+
+ if (EvaluateKnownConstInt(Ctx) != 0)
+ return NPCK_NotNull;
+
+ if (isa<IntegerLiteral>(this))
+ return NPCK_ZeroLiteral;
+ return NPCK_ZeroExpression;
+}
+
+/// \brief If this expression is an l-value for an Objective C
+/// property, find the underlying property reference expression.
+const ObjCPropertyRefExpr *Expr::getObjCProperty() const {
+ const Expr *E = this;
+ while (true) {
+ assert((E->getValueKind() == VK_LValue &&
+ E->getObjectKind() == OK_ObjCProperty) &&
+ "expression is not a property reference");
+ E = E->IgnoreParenCasts();
+ if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ if (BO->getOpcode() == BO_Comma) {
+ E = BO->getRHS();
+ continue;
+ }
+ }
+
+ break;
+ }
+
+ return cast<ObjCPropertyRefExpr>(E);
+}
+
+bool Expr::isObjCSelfExpr() const {
+ const Expr *E = IgnoreParenImpCasts();
+
+ const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E);
+ if (!DRE)
+ return false;
+
+ const ImplicitParamDecl *Param = dyn_cast<ImplicitParamDecl>(DRE->getDecl());
+ if (!Param)
+ return false;
+
+ const ObjCMethodDecl *M = dyn_cast<ObjCMethodDecl>(Param->getDeclContext());
+ if (!M)
+ return false;
+
+ return M->getSelfDecl() == Param;
+}
+
+FieldDecl *Expr::getSourceBitField() {
+ Expr *E = this->IgnoreParens();
+
+ while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
+ if (ICE->getCastKind() == CK_LValueToRValue ||
+ (ICE->getValueKind() != VK_RValue && ICE->getCastKind() == CK_NoOp))
+ E = ICE->getSubExpr()->IgnoreParens();
+ else
+ break;
+ }
+
+ if (MemberExpr *MemRef = dyn_cast<MemberExpr>(E))
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(MemRef->getMemberDecl()))
+ if (Field->isBitField())
+ return Field;
+
+ if (ObjCIvarRefExpr *IvarRef = dyn_cast<ObjCIvarRefExpr>(E))
+ if (FieldDecl *Ivar = dyn_cast<FieldDecl>(IvarRef->getDecl()))
+ if (Ivar->isBitField())
+ return Ivar;
+
+ if (DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E))
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(DeclRef->getDecl()))
+ if (Field->isBitField())
+ return Field;
+
+ if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(E)) {
+ if (BinOp->isAssignmentOp() && BinOp->getLHS())
+ return BinOp->getLHS()->getSourceBitField();
+
+ if (BinOp->getOpcode() == BO_Comma && BinOp->getRHS())
+ return BinOp->getRHS()->getSourceBitField();
+ }
+
+ if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(E))
+ if (UnOp->isPrefix() && UnOp->isIncrementDecrementOp())
+ return UnOp->getSubExpr()->getSourceBitField();
+
+ return nullptr;
+}
+
+bool Expr::refersToVectorElement() const {
+ const Expr *E = this->IgnoreParens();
+
+ while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
+ if (ICE->getValueKind() != VK_RValue &&
+ ICE->getCastKind() == CK_NoOp)
+ E = ICE->getSubExpr()->IgnoreParens();
+ else
+ break;
+ }
+
+ if (const ArraySubscriptExpr *ASE = dyn_cast<ArraySubscriptExpr>(E))
+ return ASE->getBase()->getType()->isVectorType();
+
+ if (isa<ExtVectorElementExpr>(E))
+ return true;
+
+ return false;
+}
+
+bool Expr::refersToGlobalRegisterVar() const {
+ const Expr *E = this->IgnoreParenImpCasts();
+
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (VD->getStorageClass() == SC_Register &&
+ VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())
+ return true;
+
+ return false;
+}
+
+/// isArrow - Return true if the base expression is a pointer to vector,
+/// return false if the base expression is a vector.
+bool ExtVectorElementExpr::isArrow() const {
+ return getBase()->getType()->isPointerType();
+}
+
+unsigned ExtVectorElementExpr::getNumElements() const {
+ if (const VectorType *VT = getType()->getAs<VectorType>())
+ return VT->getNumElements();
+ return 1;
+}
+
+/// containsDuplicateElements - Return true if any element access is repeated.
+bool ExtVectorElementExpr::containsDuplicateElements() const {
+ // FIXME: Refactor this code to an accessor on the AST node which returns the
+ // "type" of component access, and share with code below and in Sema.
+ StringRef Comp = Accessor->getName();
+
+ // Halving swizzles do not contain duplicate elements.
+ if (Comp == "hi" || Comp == "lo" || Comp == "even" || Comp == "odd")
+ return false;
+
+ // Advance past s-char prefix on hex swizzles.
+ if (Comp[0] == 's' || Comp[0] == 'S')
+ Comp = Comp.substr(1);
+
+ for (unsigned i = 0, e = Comp.size(); i != e; ++i)
+ if (Comp.substr(i + 1).find(Comp[i]) != StringRef::npos)
+ return true;
+
+ return false;
+}
+
+/// getEncodedElementAccess - We encode the fields as a llvm ConstantArray.
+void ExtVectorElementExpr::getEncodedElementAccess(
+ SmallVectorImpl<uint32_t> &Elts) const {
+ StringRef Comp = Accessor->getName();
+ if (Comp[0] == 's' || Comp[0] == 'S')
+ Comp = Comp.substr(1);
+
+ bool isHi = Comp == "hi";
+ bool isLo = Comp == "lo";
+ bool isEven = Comp == "even";
+ bool isOdd = Comp == "odd";
+
+ for (unsigned i = 0, e = getNumElements(); i != e; ++i) {
+ uint64_t Index;
+
+ if (isHi)
+ Index = e + i;
+ else if (isLo)
+ Index = i;
+ else if (isEven)
+ Index = 2 * i;
+ else if (isOdd)
+ Index = 2 * i + 1;
+ else
+ Index = ExtVectorType::getAccessorIdx(Comp[i]);
+
+ Elts.push_back(Index);
+ }
+}
+
+ShuffleVectorExpr::ShuffleVectorExpr(const ASTContext &C, ArrayRef<Expr*> args,
+ QualType Type, SourceLocation BLoc,
+ SourceLocation RP)
+ : Expr(ShuffleVectorExprClass, Type, VK_RValue, OK_Ordinary,
+ Type->isDependentType(), Type->isDependentType(),
+ Type->isInstantiationDependentType(),
+ Type->containsUnexpandedParameterPack()),
+ BuiltinLoc(BLoc), RParenLoc(RP), NumExprs(args.size())
+{
+ SubExprs = new (C) Stmt*[args.size()];
+ for (unsigned i = 0; i != args.size(); i++) {
+ if (args[i]->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (args[i]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (args[i]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (args[i]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ SubExprs[i] = args[i];
+ }
+}
+
+void ShuffleVectorExpr::setExprs(const ASTContext &C, ArrayRef<Expr *> Exprs) {
+ if (SubExprs) C.Deallocate(SubExprs);
+
+ this->NumExprs = Exprs.size();
+ SubExprs = new (C) Stmt*[NumExprs];
+ memcpy(SubExprs, Exprs.data(), sizeof(Expr *) * Exprs.size());
+}
+
+GenericSelectionExpr::GenericSelectionExpr(const ASTContext &Context,
+ SourceLocation GenericLoc, Expr *ControllingExpr,
+ ArrayRef<TypeSourceInfo*> AssocTypes,
+ ArrayRef<Expr*> AssocExprs,
+ SourceLocation DefaultLoc,
+ SourceLocation RParenLoc,
+ bool ContainsUnexpandedParameterPack,
+ unsigned ResultIndex)
+ : Expr(GenericSelectionExprClass,
+ AssocExprs[ResultIndex]->getType(),
+ AssocExprs[ResultIndex]->getValueKind(),
+ AssocExprs[ResultIndex]->getObjectKind(),
+ AssocExprs[ResultIndex]->isTypeDependent(),
+ AssocExprs[ResultIndex]->isValueDependent(),
+ AssocExprs[ResultIndex]->isInstantiationDependent(),
+ ContainsUnexpandedParameterPack),
+ AssocTypes(new (Context) TypeSourceInfo*[AssocTypes.size()]),
+ SubExprs(new (Context) Stmt*[END_EXPR+AssocExprs.size()]),
+ NumAssocs(AssocExprs.size()), ResultIndex(ResultIndex),
+ GenericLoc(GenericLoc), DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) {
+ SubExprs[CONTROLLING] = ControllingExpr;
+ assert(AssocTypes.size() == AssocExprs.size());
+ std::copy(AssocTypes.begin(), AssocTypes.end(), this->AssocTypes);
+ std::copy(AssocExprs.begin(), AssocExprs.end(), SubExprs+END_EXPR);
+}
+
+GenericSelectionExpr::GenericSelectionExpr(const ASTContext &Context,
+ SourceLocation GenericLoc, Expr *ControllingExpr,
+ ArrayRef<TypeSourceInfo*> AssocTypes,
+ ArrayRef<Expr*> AssocExprs,
+ SourceLocation DefaultLoc,
+ SourceLocation RParenLoc,
+ bool ContainsUnexpandedParameterPack)
+ : Expr(GenericSelectionExprClass,
+ Context.DependentTy,
+ VK_RValue,
+ OK_Ordinary,
+ /*isTypeDependent=*/true,
+ /*isValueDependent=*/true,
+ /*isInstantiationDependent=*/true,
+ ContainsUnexpandedParameterPack),
+ AssocTypes(new (Context) TypeSourceInfo*[AssocTypes.size()]),
+ SubExprs(new (Context) Stmt*[END_EXPR+AssocExprs.size()]),
+ NumAssocs(AssocExprs.size()), ResultIndex(-1U), GenericLoc(GenericLoc),
+ DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) {
+ SubExprs[CONTROLLING] = ControllingExpr;
+ assert(AssocTypes.size() == AssocExprs.size());
+ std::copy(AssocTypes.begin(), AssocTypes.end(), this->AssocTypes);
+ std::copy(AssocExprs.begin(), AssocExprs.end(), SubExprs+END_EXPR);
+}
+
+//===----------------------------------------------------------------------===//
+// DesignatedInitExpr
+//===----------------------------------------------------------------------===//
+
+IdentifierInfo *DesignatedInitExpr::Designator::getFieldName() const {
+ assert(Kind == FieldDesignator && "Only valid on a field designator");
+ if (Field.NameOrField & 0x01)
+ return reinterpret_cast<IdentifierInfo *>(Field.NameOrField&~0x01);
+ else
+ return getField()->getIdentifier();
+}
+
+DesignatedInitExpr::DesignatedInitExpr(const ASTContext &C, QualType Ty,
+ unsigned NumDesignators,
+ const Designator *Designators,
+ SourceLocation EqualOrColonLoc,
+ bool GNUSyntax,
+ ArrayRef<Expr*> IndexExprs,
+ Expr *Init)
+ : Expr(DesignatedInitExprClass, Ty,
+ Init->getValueKind(), Init->getObjectKind(),
+ Init->isTypeDependent(), Init->isValueDependent(),
+ Init->isInstantiationDependent(),
+ Init->containsUnexpandedParameterPack()),
+ EqualOrColonLoc(EqualOrColonLoc), GNUSyntax(GNUSyntax),
+ NumDesignators(NumDesignators), NumSubExprs(IndexExprs.size() + 1) {
+ this->Designators = new (C) Designator[NumDesignators];
+
+ // Record the initializer itself.
+ child_iterator Child = child_begin();
+ *Child++ = Init;
+
+ // Copy the designators and their subexpressions, computing
+ // value-dependence along the way.
+ unsigned IndexIdx = 0;
+ for (unsigned I = 0; I != NumDesignators; ++I) {
+ this->Designators[I] = Designators[I];
+
+ if (this->Designators[I].isArrayDesignator()) {
+ // Compute type- and value-dependence.
+ Expr *Index = IndexExprs[IndexIdx];
+ if (Index->isTypeDependent() || Index->isValueDependent())
+ ExprBits.TypeDependent = ExprBits.ValueDependent = true;
+ if (Index->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ // Propagate unexpanded parameter packs.
+ if (Index->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ // Copy the index expressions into permanent storage.
+ *Child++ = IndexExprs[IndexIdx++];
+ } else if (this->Designators[I].isArrayRangeDesignator()) {
+ // Compute type- and value-dependence.
+ Expr *Start = IndexExprs[IndexIdx];
+ Expr *End = IndexExprs[IndexIdx + 1];
+ if (Start->isTypeDependent() || Start->isValueDependent() ||
+ End->isTypeDependent() || End->isValueDependent()) {
+ ExprBits.TypeDependent = ExprBits.ValueDependent = true;
+ ExprBits.InstantiationDependent = true;
+ } else if (Start->isInstantiationDependent() ||
+ End->isInstantiationDependent()) {
+ ExprBits.InstantiationDependent = true;
+ }
+
+ // Propagate unexpanded parameter packs.
+ if (Start->containsUnexpandedParameterPack() ||
+ End->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ // Copy the start/end expressions into permanent storage.
+ *Child++ = IndexExprs[IndexIdx++];
+ *Child++ = IndexExprs[IndexIdx++];
+ }
+ }
+
+ assert(IndexIdx == IndexExprs.size() && "Wrong number of index expressions");
+}
+
+DesignatedInitExpr *
+DesignatedInitExpr::Create(const ASTContext &C, Designator *Designators,
+ unsigned NumDesignators,
+ ArrayRef<Expr*> IndexExprs,
+ SourceLocation ColonOrEqualLoc,
+ bool UsesColonSyntax, Expr *Init) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Stmt *>(IndexExprs.size() + 1),
+ llvm::alignOf<DesignatedInitExpr>());
+ return new (Mem) DesignatedInitExpr(C, C.VoidTy, NumDesignators, Designators,
+ ColonOrEqualLoc, UsesColonSyntax,
+ IndexExprs, Init);
+}
+
+DesignatedInitExpr *DesignatedInitExpr::CreateEmpty(const ASTContext &C,
+ unsigned NumIndexExprs) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Stmt *>(NumIndexExprs + 1),
+ llvm::alignOf<DesignatedInitExpr>());
+ return new (Mem) DesignatedInitExpr(NumIndexExprs + 1);
+}
+
+void DesignatedInitExpr::setDesignators(const ASTContext &C,
+ const Designator *Desigs,
+ unsigned NumDesigs) {
+ Designators = new (C) Designator[NumDesigs];
+ NumDesignators = NumDesigs;
+ for (unsigned I = 0; I != NumDesigs; ++I)
+ Designators[I] = Desigs[I];
+}
+
+SourceRange DesignatedInitExpr::getDesignatorsSourceRange() const {
+ DesignatedInitExpr *DIE = const_cast<DesignatedInitExpr*>(this);
+ if (size() == 1)
+ return DIE->getDesignator(0)->getSourceRange();
+ return SourceRange(DIE->getDesignator(0)->getLocStart(),
+ DIE->getDesignator(size()-1)->getLocEnd());
+}
+
+SourceLocation DesignatedInitExpr::getLocStart() const {
+ SourceLocation StartLoc;
+ Designator &First =
+ *const_cast<DesignatedInitExpr*>(this)->designators_begin();
+ if (First.isFieldDesignator()) {
+ if (GNUSyntax)
+ StartLoc = SourceLocation::getFromRawEncoding(First.Field.FieldLoc);
+ else
+ StartLoc = SourceLocation::getFromRawEncoding(First.Field.DotLoc);
+ } else
+ StartLoc =
+ SourceLocation::getFromRawEncoding(First.ArrayOrRange.LBracketLoc);
+ return StartLoc;
+}
+
+SourceLocation DesignatedInitExpr::getLocEnd() const {
+ return getInit()->getLocEnd();
+}
+
+Expr *DesignatedInitExpr::getArrayIndex(const Designator& D) const {
+ assert(D.Kind == Designator::ArrayDesignator && "Requires array designator");
+ return getSubExpr(D.ArrayOrRange.Index + 1);
+}
+
+Expr *DesignatedInitExpr::getArrayRangeStart(const Designator &D) const {
+ assert(D.Kind == Designator::ArrayRangeDesignator &&
+ "Requires array range designator");
+ return getSubExpr(D.ArrayOrRange.Index + 1);
+}
+
+Expr *DesignatedInitExpr::getArrayRangeEnd(const Designator &D) const {
+ assert(D.Kind == Designator::ArrayRangeDesignator &&
+ "Requires array range designator");
+ return getSubExpr(D.ArrayOrRange.Index + 2);
+}
+
+/// \brief Replaces the designator at index @p Idx with the series
+/// of designators in [First, Last).
+void DesignatedInitExpr::ExpandDesignator(const ASTContext &C, unsigned Idx,
+ const Designator *First,
+ const Designator *Last) {
+ unsigned NumNewDesignators = Last - First;
+ if (NumNewDesignators == 0) {
+ std::copy_backward(Designators + Idx + 1,
+ Designators + NumDesignators,
+ Designators + Idx);
+ --NumNewDesignators;
+ return;
+ } else if (NumNewDesignators == 1) {
+ Designators[Idx] = *First;
+ return;
+ }
+
+ Designator *NewDesignators
+ = new (C) Designator[NumDesignators - 1 + NumNewDesignators];
+ std::copy(Designators, Designators + Idx, NewDesignators);
+ std::copy(First, Last, NewDesignators + Idx);
+ std::copy(Designators + Idx + 1, Designators + NumDesignators,
+ NewDesignators + Idx + NumNewDesignators);
+ Designators = NewDesignators;
+ NumDesignators = NumDesignators - 1 + NumNewDesignators;
+}
+
+DesignatedInitUpdateExpr::DesignatedInitUpdateExpr(const ASTContext &C,
+ SourceLocation lBraceLoc, Expr *baseExpr, SourceLocation rBraceLoc)
+ : Expr(DesignatedInitUpdateExprClass, baseExpr->getType(), VK_RValue,
+ OK_Ordinary, false, false, false, false) {
+ BaseAndUpdaterExprs[0] = baseExpr;
+
+ InitListExpr *ILE = new (C) InitListExpr(C, lBraceLoc, None, rBraceLoc);
+ ILE->setType(baseExpr->getType());
+ BaseAndUpdaterExprs[1] = ILE;
+}
+
+SourceLocation DesignatedInitUpdateExpr::getLocStart() const {
+ return getBase()->getLocStart();
+}
+
+SourceLocation DesignatedInitUpdateExpr::getLocEnd() const {
+ return getBase()->getLocEnd();
+}
+
+ParenListExpr::ParenListExpr(const ASTContext& C, SourceLocation lparenloc,
+ ArrayRef<Expr*> exprs,
+ SourceLocation rparenloc)
+ : Expr(ParenListExprClass, QualType(), VK_RValue, OK_Ordinary,
+ false, false, false, false),
+ NumExprs(exprs.size()), LParenLoc(lparenloc), RParenLoc(rparenloc) {
+ Exprs = new (C) Stmt*[exprs.size()];
+ for (unsigned i = 0; i != exprs.size(); ++i) {
+ if (exprs[i]->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (exprs[i]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (exprs[i]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (exprs[i]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ Exprs[i] = exprs[i];
+ }
+}
+
+const OpaqueValueExpr *OpaqueValueExpr::findInCopyConstruct(const Expr *e) {
+ if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(e))
+ e = ewc->getSubExpr();
+ if (const MaterializeTemporaryExpr *m = dyn_cast<MaterializeTemporaryExpr>(e))
+ e = m->GetTemporaryExpr();
+ e = cast<CXXConstructExpr>(e)->getArg(0);
+ while (const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(e))
+ e = ice->getSubExpr();
+ return cast<OpaqueValueExpr>(e);
+}
+
+PseudoObjectExpr *PseudoObjectExpr::Create(const ASTContext &Context,
+ EmptyShell sh,
+ unsigned numSemanticExprs) {
+ void *buffer =
+ Context.Allocate(totalSizeToAlloc<Expr *>(1 + numSemanticExprs),
+ llvm::alignOf<PseudoObjectExpr>());
+ return new(buffer) PseudoObjectExpr(sh, numSemanticExprs);
+}
+
+PseudoObjectExpr::PseudoObjectExpr(EmptyShell shell, unsigned numSemanticExprs)
+ : Expr(PseudoObjectExprClass, shell) {
+ PseudoObjectExprBits.NumSubExprs = numSemanticExprs + 1;
+}
+
+PseudoObjectExpr *PseudoObjectExpr::Create(const ASTContext &C, Expr *syntax,
+ ArrayRef<Expr*> semantics,
+ unsigned resultIndex) {
+ assert(syntax && "no syntactic expression!");
+ assert(semantics.size() && "no semantic expressions!");
+
+ QualType type;
+ ExprValueKind VK;
+ if (resultIndex == NoResult) {
+ type = C.VoidTy;
+ VK = VK_RValue;
+ } else {
+ assert(resultIndex < semantics.size());
+ type = semantics[resultIndex]->getType();
+ VK = semantics[resultIndex]->getValueKind();
+ assert(semantics[resultIndex]->getObjectKind() == OK_Ordinary);
+ }
+
+ void *buffer = C.Allocate(totalSizeToAlloc<Expr *>(semantics.size() + 1),
+ llvm::alignOf<PseudoObjectExpr>());
+ return new(buffer) PseudoObjectExpr(type, VK, syntax, semantics,
+ resultIndex);
+}
+
+PseudoObjectExpr::PseudoObjectExpr(QualType type, ExprValueKind VK,
+ Expr *syntax, ArrayRef<Expr*> semantics,
+ unsigned resultIndex)
+ : Expr(PseudoObjectExprClass, type, VK, OK_Ordinary,
+ /*filled in at end of ctor*/ false, false, false, false) {
+ PseudoObjectExprBits.NumSubExprs = semantics.size() + 1;
+ PseudoObjectExprBits.ResultIndex = resultIndex + 1;
+
+ for (unsigned i = 0, e = semantics.size() + 1; i != e; ++i) {
+ Expr *E = (i == 0 ? syntax : semantics[i-1]);
+ getSubExprsBuffer()[i] = E;
+
+ if (E->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (E->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (E->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (E->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ if (isa<OpaqueValueExpr>(E))
+ assert(cast<OpaqueValueExpr>(E)->getSourceExpr() != nullptr &&
+ "opaque-value semantic expressions for pseudo-object "
+ "operations must have sources");
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Child Iterators for iterating over subexpressions/substatements
+//===----------------------------------------------------------------------===//
+
+// UnaryExprOrTypeTraitExpr
+Stmt::child_range UnaryExprOrTypeTraitExpr::children() {
+ // If this is of a type and the type is a VLA type (and not a typedef), the
+ // size expression of the VLA needs to be treated as an executable expression.
+ // Why isn't this weirdness documented better in StmtIterator?
+ if (isArgumentType()) {
+ if (const VariableArrayType* T = dyn_cast<VariableArrayType>(
+ getArgumentType().getTypePtr()))
+ return child_range(child_iterator(T), child_iterator());
+ return child_range(child_iterator(), child_iterator());
+ }
+ return child_range(&Argument.Ex, &Argument.Ex + 1);
+}
+
+AtomicExpr::AtomicExpr(SourceLocation BLoc, ArrayRef<Expr*> args,
+ QualType t, AtomicOp op, SourceLocation RP)
+ : Expr(AtomicExprClass, t, VK_RValue, OK_Ordinary,
+ false, false, false, false),
+ NumSubExprs(args.size()), BuiltinLoc(BLoc), RParenLoc(RP), Op(op)
+{
+ assert(args.size() == getNumSubExprs(op) && "wrong number of subexpressions");
+ for (unsigned i = 0; i != args.size(); i++) {
+ if (args[i]->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (args[i]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (args[i]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (args[i]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ SubExprs[i] = args[i];
+ }
+}
+
+unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) {
+ switch (Op) {
+ case AO__c11_atomic_init:
+ case AO__c11_atomic_load:
+ case AO__atomic_load_n:
+ return 2;
+
+ case AO__c11_atomic_store:
+ case AO__c11_atomic_exchange:
+ case AO__atomic_load:
+ case AO__atomic_store:
+ case AO__atomic_store_n:
+ case AO__atomic_exchange_n:
+ case AO__c11_atomic_fetch_add:
+ case AO__c11_atomic_fetch_sub:
+ case AO__c11_atomic_fetch_and:
+ case AO__c11_atomic_fetch_or:
+ case AO__c11_atomic_fetch_xor:
+ case AO__atomic_fetch_add:
+ case AO__atomic_fetch_sub:
+ case AO__atomic_fetch_and:
+ case AO__atomic_fetch_or:
+ case AO__atomic_fetch_xor:
+ case AO__atomic_fetch_nand:
+ case AO__atomic_add_fetch:
+ case AO__atomic_sub_fetch:
+ case AO__atomic_and_fetch:
+ case AO__atomic_or_fetch:
+ case AO__atomic_xor_fetch:
+ case AO__atomic_nand_fetch:
+ return 3;
+
+ case AO__atomic_exchange:
+ return 4;
+
+ case AO__c11_atomic_compare_exchange_strong:
+ case AO__c11_atomic_compare_exchange_weak:
+ return 5;
+
+ case AO__atomic_compare_exchange:
+ case AO__atomic_compare_exchange_n:
+ return 6;
+ }
+ llvm_unreachable("unknown atomic op");
+}
+
+QualType OMPArraySectionExpr::getBaseOriginalType(Expr *Base) {
+ unsigned ArraySectionCount = 0;
+ while (auto *OASE = dyn_cast<OMPArraySectionExpr>(Base->IgnoreParens())) {
+ Base = OASE->getBase();
+ ++ArraySectionCount;
+ }
+ while (auto *ASE = dyn_cast<ArraySubscriptExpr>(Base->IgnoreParens())) {
+ Base = ASE->getBase();
+ ++ArraySectionCount;
+ }
+ auto OriginalTy = Base->getType();
+ if (auto *DRE = dyn_cast<DeclRefExpr>(Base))
+ if (auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl()))
+ OriginalTy = PVD->getOriginalType().getNonReferenceType();
+
+ for (unsigned Cnt = 0; Cnt < ArraySectionCount; ++Cnt) {
+ if (OriginalTy->isAnyPointerType())
+ OriginalTy = OriginalTy->getPointeeType();
+ else {
+ assert (OriginalTy->isArrayType());
+ OriginalTy = OriginalTy->castAsArrayTypeUnsafe()->getElementType();
+ }
+ }
+ return OriginalTy;
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp b/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp
new file mode 100644
index 0000000..d35efcb
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp
@@ -0,0 +1,1505 @@
+//===--- ExprCXX.cpp - (C++) Expression AST Node Implementation -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the subclesses of Expr class declared in ExprCXX.h
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Basic/IdentifierTable.h"
+using namespace clang;
+
+
+//===----------------------------------------------------------------------===//
+// Child Iterators for iterating over subexpressions/substatements
+//===----------------------------------------------------------------------===//
+
+bool CXXTypeidExpr::isPotentiallyEvaluated() const {
+ if (isTypeOperand())
+ return false;
+
+ // C++11 [expr.typeid]p3:
+ // When typeid is applied to an expression other than a glvalue of
+ // polymorphic class type, [...] the expression is an unevaluated operand.
+ const Expr *E = getExprOperand();
+ if (const CXXRecordDecl *RD = E->getType()->getAsCXXRecordDecl())
+ if (RD->isPolymorphic() && E->isGLValue())
+ return true;
+
+ return false;
+}
+
+QualType CXXTypeidExpr::getTypeOperand(ASTContext &Context) const {
+ assert(isTypeOperand() && "Cannot call getTypeOperand for typeid(expr)");
+ Qualifiers Quals;
+ return Context.getUnqualifiedArrayType(
+ Operand.get<TypeSourceInfo *>()->getType().getNonReferenceType(), Quals);
+}
+
+QualType CXXUuidofExpr::getTypeOperand(ASTContext &Context) const {
+ assert(isTypeOperand() && "Cannot call getTypeOperand for __uuidof(expr)");
+ Qualifiers Quals;
+ return Context.getUnqualifiedArrayType(
+ Operand.get<TypeSourceInfo *>()->getType().getNonReferenceType(), Quals);
+}
+
+// static
+const UuidAttr *CXXUuidofExpr::GetUuidAttrOfType(QualType QT,
+ bool *RDHasMultipleGUIDsPtr) {
+ // Optionally remove one level of pointer, reference or array indirection.
+ const Type *Ty = QT.getTypePtr();
+ if (QT->isPointerType() || QT->isReferenceType())
+ Ty = QT->getPointeeType().getTypePtr();
+ else if (QT->isArrayType())
+ Ty = Ty->getBaseElementTypeUnsafe();
+
+ const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
+ if (!RD)
+ return nullptr;
+
+ if (const UuidAttr *Uuid = RD->getMostRecentDecl()->getAttr<UuidAttr>())
+ return Uuid;
+
+ // __uuidof can grab UUIDs from template arguments.
+ if (const ClassTemplateSpecializationDecl *CTSD =
+ dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
+ const TemplateArgumentList &TAL = CTSD->getTemplateArgs();
+ const UuidAttr *UuidForRD = nullptr;
+
+ for (const TemplateArgument &TA : TAL.asArray()) {
+ bool SeenMultipleGUIDs = false;
+
+ const UuidAttr *UuidForTA = nullptr;
+ if (TA.getKind() == TemplateArgument::Type)
+ UuidForTA = GetUuidAttrOfType(TA.getAsType(), &SeenMultipleGUIDs);
+ else if (TA.getKind() == TemplateArgument::Declaration)
+ UuidForTA =
+ GetUuidAttrOfType(TA.getAsDecl()->getType(), &SeenMultipleGUIDs);
+
+ // If the template argument has a UUID, there are three cases:
+ // - This is the first UUID seen for this RecordDecl.
+ // - This is a different UUID than previously seen for this RecordDecl.
+ // - This is the same UUID than previously seen for this RecordDecl.
+ if (UuidForTA) {
+ if (!UuidForRD)
+ UuidForRD = UuidForTA;
+ else if (UuidForRD != UuidForTA)
+ SeenMultipleGUIDs = true;
+ }
+
+ // Seeing multiple UUIDs means that we couldn't find a UUID
+ if (SeenMultipleGUIDs) {
+ if (RDHasMultipleGUIDsPtr)
+ *RDHasMultipleGUIDsPtr = true;
+ return nullptr;
+ }
+ }
+
+ return UuidForRD;
+ }
+
+ return nullptr;
+}
+
+StringRef CXXUuidofExpr::getUuidAsStringRef(ASTContext &Context) const {
+ StringRef Uuid;
+ if (isTypeOperand())
+ Uuid = CXXUuidofExpr::GetUuidAttrOfType(getTypeOperand(Context))->getGuid();
+ else {
+ // Special case: __uuidof(0) means an all-zero GUID.
+ Expr *Op = getExprOperand();
+ if (!Op->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull))
+ Uuid = CXXUuidofExpr::GetUuidAttrOfType(Op->getType())->getGuid();
+ else
+ Uuid = "00000000-0000-0000-0000-000000000000";
+ }
+ return Uuid;
+}
+
+// CXXScalarValueInitExpr
+SourceLocation CXXScalarValueInitExpr::getLocStart() const {
+ return TypeInfo ? TypeInfo->getTypeLoc().getBeginLoc() : RParenLoc;
+}
+
+// CXXNewExpr
+CXXNewExpr::CXXNewExpr(const ASTContext &C, bool globalNew,
+ FunctionDecl *operatorNew, FunctionDecl *operatorDelete,
+ bool usualArrayDeleteWantsSize,
+ ArrayRef<Expr*> placementArgs,
+ SourceRange typeIdParens, Expr *arraySize,
+ InitializationStyle initializationStyle,
+ Expr *initializer, QualType ty,
+ TypeSourceInfo *allocatedTypeInfo,
+ SourceRange Range, SourceRange directInitRange)
+ : Expr(CXXNewExprClass, ty, VK_RValue, OK_Ordinary,
+ ty->isDependentType(), ty->isDependentType(),
+ ty->isInstantiationDependentType(),
+ ty->containsUnexpandedParameterPack()),
+ SubExprs(nullptr), OperatorNew(operatorNew), OperatorDelete(operatorDelete),
+ AllocatedTypeInfo(allocatedTypeInfo), TypeIdParens(typeIdParens),
+ Range(Range), DirectInitRange(directInitRange),
+ GlobalNew(globalNew), UsualArrayDeleteWantsSize(usualArrayDeleteWantsSize) {
+ assert((initializer != nullptr || initializationStyle == NoInit) &&
+ "Only NoInit can have no initializer.");
+ StoredInitializationStyle = initializer ? initializationStyle + 1 : 0;
+ AllocateArgsArray(C, arraySize != nullptr, placementArgs.size(),
+ initializer != nullptr);
+ unsigned i = 0;
+ if (Array) {
+ if (arraySize->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+
+ if (arraySize->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ SubExprs[i++] = arraySize;
+ }
+
+ if (initializer) {
+ if (initializer->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+
+ if (initializer->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ SubExprs[i++] = initializer;
+ }
+
+ for (unsigned j = 0; j != placementArgs.size(); ++j) {
+ if (placementArgs[j]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (placementArgs[j]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ SubExprs[i++] = placementArgs[j];
+ }
+
+ switch (getInitializationStyle()) {
+ case CallInit:
+ this->Range.setEnd(DirectInitRange.getEnd()); break;
+ case ListInit:
+ this->Range.setEnd(getInitializer()->getSourceRange().getEnd()); break;
+ default:
+ if (TypeIdParens.isValid())
+ this->Range.setEnd(TypeIdParens.getEnd());
+ break;
+ }
+}
+
+void CXXNewExpr::AllocateArgsArray(const ASTContext &C, bool isArray,
+ unsigned numPlaceArgs, bool hasInitializer){
+ assert(SubExprs == nullptr && "SubExprs already allocated");
+ Array = isArray;
+ NumPlacementArgs = numPlaceArgs;
+
+ unsigned TotalSize = Array + hasInitializer + NumPlacementArgs;
+ SubExprs = new (C) Stmt*[TotalSize];
+}
+
+bool CXXNewExpr::shouldNullCheckAllocation(const ASTContext &Ctx) const {
+ return getOperatorNew()->getType()->castAs<FunctionProtoType>()->isNothrow(
+ Ctx) &&
+ !getOperatorNew()->isReservedGlobalPlacementOperator();
+}
+
+// CXXDeleteExpr
+QualType CXXDeleteExpr::getDestroyedType() const {
+ const Expr *Arg = getArgument();
+ // The type-to-delete may not be a pointer if it's a dependent type.
+ const QualType ArgType = Arg->getType();
+
+ if (ArgType->isDependentType() && !ArgType->isPointerType())
+ return QualType();
+
+ return ArgType->getAs<PointerType>()->getPointeeType();
+}
+
+// CXXPseudoDestructorExpr
+PseudoDestructorTypeStorage::PseudoDestructorTypeStorage(TypeSourceInfo *Info)
+ : Type(Info)
+{
+ Location = Info->getTypeLoc().getLocalSourceRange().getBegin();
+}
+
+CXXPseudoDestructorExpr::CXXPseudoDestructorExpr(const ASTContext &Context,
+ Expr *Base, bool isArrow, SourceLocation OperatorLoc,
+ NestedNameSpecifierLoc QualifierLoc, TypeSourceInfo *ScopeType,
+ SourceLocation ColonColonLoc, SourceLocation TildeLoc,
+ PseudoDestructorTypeStorage DestroyedType)
+ : Expr(CXXPseudoDestructorExprClass,
+ Context.BoundMemberTy,
+ VK_RValue, OK_Ordinary,
+ /*isTypeDependent=*/(Base->isTypeDependent() ||
+ (DestroyedType.getTypeSourceInfo() &&
+ DestroyedType.getTypeSourceInfo()->getType()->isDependentType())),
+ /*isValueDependent=*/Base->isValueDependent(),
+ (Base->isInstantiationDependent() ||
+ (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent()) ||
+ (ScopeType &&
+ ScopeType->getType()->isInstantiationDependentType()) ||
+ (DestroyedType.getTypeSourceInfo() &&
+ DestroyedType.getTypeSourceInfo()->getType()
+ ->isInstantiationDependentType())),
+ // ContainsUnexpandedParameterPack
+ (Base->containsUnexpandedParameterPack() ||
+ (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()
+ ->containsUnexpandedParameterPack()) ||
+ (ScopeType &&
+ ScopeType->getType()->containsUnexpandedParameterPack()) ||
+ (DestroyedType.getTypeSourceInfo() &&
+ DestroyedType.getTypeSourceInfo()->getType()
+ ->containsUnexpandedParameterPack()))),
+ Base(static_cast<Stmt *>(Base)), IsArrow(isArrow),
+ OperatorLoc(OperatorLoc), QualifierLoc(QualifierLoc),
+ ScopeType(ScopeType), ColonColonLoc(ColonColonLoc), TildeLoc(TildeLoc),
+ DestroyedType(DestroyedType) { }
+
+QualType CXXPseudoDestructorExpr::getDestroyedType() const {
+ if (TypeSourceInfo *TInfo = DestroyedType.getTypeSourceInfo())
+ return TInfo->getType();
+
+ return QualType();
+}
+
+SourceLocation CXXPseudoDestructorExpr::getLocEnd() const {
+ SourceLocation End = DestroyedType.getLocation();
+ if (TypeSourceInfo *TInfo = DestroyedType.getTypeSourceInfo())
+ End = TInfo->getTypeLoc().getLocalSourceRange().getEnd();
+ return End;
+}
+
+// UnresolvedLookupExpr
+UnresolvedLookupExpr *
+UnresolvedLookupExpr::Create(const ASTContext &C,
+ CXXRecordDecl *NamingClass,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ bool ADL,
+ const TemplateArgumentListInfo *Args,
+ UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End)
+{
+ assert(Args || TemplateKWLoc.isValid());
+ unsigned num_args = Args ? Args->size() : 0;
+
+ std::size_t Size =
+ totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(1,
+ num_args);
+ void *Mem = C.Allocate(Size, llvm::alignOf<UnresolvedLookupExpr>());
+ return new (Mem) UnresolvedLookupExpr(C, NamingClass, QualifierLoc,
+ TemplateKWLoc, NameInfo,
+ ADL, /*Overload*/ true, Args,
+ Begin, End);
+}
+
+UnresolvedLookupExpr *
+UnresolvedLookupExpr::CreateEmpty(const ASTContext &C,
+ bool HasTemplateKWAndArgsInfo,
+ unsigned NumTemplateArgs) {
+ assert(NumTemplateArgs == 0 || HasTemplateKWAndArgsInfo);
+ std::size_t Size =
+ totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
+ HasTemplateKWAndArgsInfo, NumTemplateArgs);
+ void *Mem = C.Allocate(Size, llvm::alignOf<UnresolvedLookupExpr>());
+ UnresolvedLookupExpr *E = new (Mem) UnresolvedLookupExpr(EmptyShell());
+ E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
+ return E;
+}
+
+OverloadExpr::OverloadExpr(StmtClass K, const ASTContext &C,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *TemplateArgs,
+ UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End,
+ bool KnownDependent,
+ bool KnownInstantiationDependent,
+ bool KnownContainsUnexpandedParameterPack)
+ : Expr(K, C.OverloadTy, VK_LValue, OK_Ordinary, KnownDependent,
+ KnownDependent,
+ (KnownInstantiationDependent ||
+ NameInfo.isInstantiationDependent() ||
+ (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())),
+ (KnownContainsUnexpandedParameterPack ||
+ NameInfo.containsUnexpandedParameterPack() ||
+ (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()
+ ->containsUnexpandedParameterPack()))),
+ NameInfo(NameInfo), QualifierLoc(QualifierLoc),
+ Results(nullptr), NumResults(End - Begin),
+ HasTemplateKWAndArgsInfo(TemplateArgs != nullptr ||
+ TemplateKWLoc.isValid()) {
+ NumResults = End - Begin;
+ if (NumResults) {
+ // Determine whether this expression is type-dependent.
+ for (UnresolvedSetImpl::const_iterator I = Begin; I != End; ++I) {
+ if ((*I)->getDeclContext()->isDependentContext() ||
+ isa<UnresolvedUsingValueDecl>(*I)) {
+ ExprBits.TypeDependent = true;
+ ExprBits.ValueDependent = true;
+ ExprBits.InstantiationDependent = true;
+ }
+ }
+
+ Results = static_cast<DeclAccessPair *>(
+ C.Allocate(sizeof(DeclAccessPair) * NumResults,
+ llvm::alignOf<DeclAccessPair>()));
+ memcpy(Results, Begin.I, NumResults * sizeof(DeclAccessPair));
+ }
+
+ // If we have explicit template arguments, check for dependent
+ // template arguments and whether they contain any unexpanded pack
+ // expansions.
+ if (TemplateArgs) {
+ bool Dependent = false;
+ bool InstantiationDependent = false;
+ bool ContainsUnexpandedParameterPack = false;
+ getTrailingASTTemplateKWAndArgsInfo()->initializeFrom(
+ TemplateKWLoc, *TemplateArgs, getTrailingTemplateArgumentLoc(),
+ Dependent, InstantiationDependent, ContainsUnexpandedParameterPack);
+
+ if (Dependent) {
+ ExprBits.TypeDependent = true;
+ ExprBits.ValueDependent = true;
+ }
+ if (InstantiationDependent)
+ ExprBits.InstantiationDependent = true;
+ if (ContainsUnexpandedParameterPack)
+ ExprBits.ContainsUnexpandedParameterPack = true;
+ } else if (TemplateKWLoc.isValid()) {
+ getTrailingASTTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
+ }
+
+ if (isTypeDependent())
+ setType(C.DependentTy);
+}
+
+void OverloadExpr::initializeResults(const ASTContext &C,
+ UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End) {
+ assert(!Results && "Results already initialized!");
+ NumResults = End - Begin;
+ if (NumResults) {
+ Results = static_cast<DeclAccessPair *>(
+ C.Allocate(sizeof(DeclAccessPair) * NumResults,
+
+ llvm::alignOf<DeclAccessPair>()));
+ memcpy(Results, Begin.I, NumResults * sizeof(DeclAccessPair));
+ }
+}
+
+CXXRecordDecl *OverloadExpr::getNamingClass() const {
+ if (isa<UnresolvedLookupExpr>(this))
+ return cast<UnresolvedLookupExpr>(this)->getNamingClass();
+ else
+ return cast<UnresolvedMemberExpr>(this)->getNamingClass();
+}
+
+// DependentScopeDeclRefExpr
+DependentScopeDeclRefExpr::DependentScopeDeclRefExpr(QualType T,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *Args)
+ : Expr(DependentScopeDeclRefExprClass, T, VK_LValue, OK_Ordinary,
+ true, true,
+ (NameInfo.isInstantiationDependent() ||
+ (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())),
+ (NameInfo.containsUnexpandedParameterPack() ||
+ (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()
+ ->containsUnexpandedParameterPack()))),
+ QualifierLoc(QualifierLoc), NameInfo(NameInfo),
+ HasTemplateKWAndArgsInfo(Args != nullptr || TemplateKWLoc.isValid())
+{
+ if (Args) {
+ bool Dependent = true;
+ bool InstantiationDependent = true;
+ bool ContainsUnexpandedParameterPack
+ = ExprBits.ContainsUnexpandedParameterPack;
+ getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
+ TemplateKWLoc, *Args, getTrailingObjects<TemplateArgumentLoc>(),
+ Dependent, InstantiationDependent, ContainsUnexpandedParameterPack);
+ ExprBits.ContainsUnexpandedParameterPack = ContainsUnexpandedParameterPack;
+ } else if (TemplateKWLoc.isValid()) {
+ getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
+ TemplateKWLoc);
+ }
+}
+
+DependentScopeDeclRefExpr *
+DependentScopeDeclRefExpr::Create(const ASTContext &C,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *Args) {
+ assert(QualifierLoc && "should be created for dependent qualifiers");
+ bool HasTemplateKWAndArgsInfo = Args || TemplateKWLoc.isValid();
+ std::size_t Size =
+ totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
+ HasTemplateKWAndArgsInfo, Args ? Args->size() : 0);
+ void *Mem = C.Allocate(Size);
+ return new (Mem) DependentScopeDeclRefExpr(C.DependentTy, QualifierLoc,
+ TemplateKWLoc, NameInfo, Args);
+}
+
+DependentScopeDeclRefExpr *
+DependentScopeDeclRefExpr::CreateEmpty(const ASTContext &C,
+ bool HasTemplateKWAndArgsInfo,
+ unsigned NumTemplateArgs) {
+ assert(NumTemplateArgs == 0 || HasTemplateKWAndArgsInfo);
+ std::size_t Size =
+ totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
+ HasTemplateKWAndArgsInfo, NumTemplateArgs);
+ void *Mem = C.Allocate(Size);
+ DependentScopeDeclRefExpr *E
+ = new (Mem) DependentScopeDeclRefExpr(QualType(), NestedNameSpecifierLoc(),
+ SourceLocation(),
+ DeclarationNameInfo(), nullptr);
+ E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
+ return E;
+}
+
+SourceLocation CXXConstructExpr::getLocStart() const {
+ if (isa<CXXTemporaryObjectExpr>(this))
+ return cast<CXXTemporaryObjectExpr>(this)->getLocStart();
+ return Loc;
+}
+
+SourceLocation CXXConstructExpr::getLocEnd() const {
+ if (isa<CXXTemporaryObjectExpr>(this))
+ return cast<CXXTemporaryObjectExpr>(this)->getLocEnd();
+
+ if (ParenOrBraceRange.isValid())
+ return ParenOrBraceRange.getEnd();
+
+ SourceLocation End = Loc;
+ for (unsigned I = getNumArgs(); I > 0; --I) {
+ const Expr *Arg = getArg(I-1);
+ if (!Arg->isDefaultArgument()) {
+ SourceLocation NewEnd = Arg->getLocEnd();
+ if (NewEnd.isValid()) {
+ End = NewEnd;
+ break;
+ }
+ }
+ }
+
+ return End;
+}
+
+SourceRange CXXOperatorCallExpr::getSourceRangeImpl() const {
+ OverloadedOperatorKind Kind = getOperator();
+ if (Kind == OO_PlusPlus || Kind == OO_MinusMinus) {
+ if (getNumArgs() == 1)
+ // Prefix operator
+ return SourceRange(getOperatorLoc(), getArg(0)->getLocEnd());
+ else
+ // Postfix operator
+ return SourceRange(getArg(0)->getLocStart(), getOperatorLoc());
+ } else if (Kind == OO_Arrow) {
+ return getArg(0)->getSourceRange();
+ } else if (Kind == OO_Call) {
+ return SourceRange(getArg(0)->getLocStart(), getRParenLoc());
+ } else if (Kind == OO_Subscript) {
+ return SourceRange(getArg(0)->getLocStart(), getRParenLoc());
+ } else if (getNumArgs() == 1) {
+ return SourceRange(getOperatorLoc(), getArg(0)->getLocEnd());
+ } else if (getNumArgs() == 2) {
+ return SourceRange(getArg(0)->getLocStart(), getArg(1)->getLocEnd());
+ } else {
+ return getOperatorLoc();
+ }
+}
+
+Expr *CXXMemberCallExpr::getImplicitObjectArgument() const {
+ const Expr *Callee = getCallee()->IgnoreParens();
+ if (const MemberExpr *MemExpr = dyn_cast<MemberExpr>(Callee))
+ return MemExpr->getBase();
+ if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(Callee))
+ if (BO->getOpcode() == BO_PtrMemD || BO->getOpcode() == BO_PtrMemI)
+ return BO->getLHS();
+
+ // FIXME: Will eventually need to cope with member pointers.
+ return nullptr;
+}
+
+CXXMethodDecl *CXXMemberCallExpr::getMethodDecl() const {
+ if (const MemberExpr *MemExpr =
+ dyn_cast<MemberExpr>(getCallee()->IgnoreParens()))
+ return cast<CXXMethodDecl>(MemExpr->getMemberDecl());
+
+ // FIXME: Will eventually need to cope with member pointers.
+ return nullptr;
+}
+
+
+CXXRecordDecl *CXXMemberCallExpr::getRecordDecl() const {
+ Expr* ThisArg = getImplicitObjectArgument();
+ if (!ThisArg)
+ return nullptr;
+
+ if (ThisArg->getType()->isAnyPointerType())
+ return ThisArg->getType()->getPointeeType()->getAsCXXRecordDecl();
+
+ return ThisArg->getType()->getAsCXXRecordDecl();
+}
+
+
+//===----------------------------------------------------------------------===//
+// Named casts
+//===----------------------------------------------------------------------===//
+
+/// getCastName - Get the name of the C++ cast being used, e.g.,
+/// "static_cast", "dynamic_cast", "reinterpret_cast", or
+/// "const_cast". The returned pointer must not be freed.
+const char *CXXNamedCastExpr::getCastName() const {
+ switch (getStmtClass()) {
+ case CXXStaticCastExprClass: return "static_cast";
+ case CXXDynamicCastExprClass: return "dynamic_cast";
+ case CXXReinterpretCastExprClass: return "reinterpret_cast";
+ case CXXConstCastExprClass: return "const_cast";
+ default: return "<invalid cast>";
+ }
+}
+
+CXXStaticCastExpr *CXXStaticCastExpr::Create(const ASTContext &C, QualType T,
+ ExprValueKind VK,
+ CastKind K, Expr *Op,
+ const CXXCastPath *BasePath,
+ TypeSourceInfo *WrittenTy,
+ SourceLocation L,
+ SourceLocation RParenLoc,
+ SourceRange AngleBrackets) {
+ unsigned PathSize = (BasePath ? BasePath->size() : 0);
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
+ CXXStaticCastExpr *E =
+ new (Buffer) CXXStaticCastExpr(T, VK, K, Op, PathSize, WrittenTy, L,
+ RParenLoc, AngleBrackets);
+ if (PathSize)
+ std::uninitialized_copy_n(BasePath->data(), BasePath->size(),
+ E->getTrailingObjects<CXXBaseSpecifier *>());
+ return E;
+}
+
+CXXStaticCastExpr *CXXStaticCastExpr::CreateEmpty(const ASTContext &C,
+ unsigned PathSize) {
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
+ return new (Buffer) CXXStaticCastExpr(EmptyShell(), PathSize);
+}
+
+CXXDynamicCastExpr *CXXDynamicCastExpr::Create(const ASTContext &C, QualType T,
+ ExprValueKind VK,
+ CastKind K, Expr *Op,
+ const CXXCastPath *BasePath,
+ TypeSourceInfo *WrittenTy,
+ SourceLocation L,
+ SourceLocation RParenLoc,
+ SourceRange AngleBrackets) {
+ unsigned PathSize = (BasePath ? BasePath->size() : 0);
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
+ CXXDynamicCastExpr *E =
+ new (Buffer) CXXDynamicCastExpr(T, VK, K, Op, PathSize, WrittenTy, L,
+ RParenLoc, AngleBrackets);
+ if (PathSize)
+ std::uninitialized_copy_n(BasePath->data(), BasePath->size(),
+ E->getTrailingObjects<CXXBaseSpecifier *>());
+ return E;
+}
+
+CXXDynamicCastExpr *CXXDynamicCastExpr::CreateEmpty(const ASTContext &C,
+ unsigned PathSize) {
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
+ return new (Buffer) CXXDynamicCastExpr(EmptyShell(), PathSize);
+}
+
+/// isAlwaysNull - Return whether the result of the dynamic_cast is proven
+/// to always be null. For example:
+///
+/// struct A { };
+/// struct B final : A { };
+/// struct C { };
+///
+/// C *f(B* b) { return dynamic_cast<C*>(b); }
+bool CXXDynamicCastExpr::isAlwaysNull() const
+{
+ QualType SrcType = getSubExpr()->getType();
+ QualType DestType = getType();
+
+ if (const PointerType *SrcPTy = SrcType->getAs<PointerType>()) {
+ SrcType = SrcPTy->getPointeeType();
+ DestType = DestType->castAs<PointerType>()->getPointeeType();
+ }
+
+ if (DestType->isVoidType())
+ return false;
+
+ const CXXRecordDecl *SrcRD =
+ cast<CXXRecordDecl>(SrcType->castAs<RecordType>()->getDecl());
+
+ if (!SrcRD->hasAttr<FinalAttr>())
+ return false;
+
+ const CXXRecordDecl *DestRD =
+ cast<CXXRecordDecl>(DestType->castAs<RecordType>()->getDecl());
+
+ return !DestRD->isDerivedFrom(SrcRD);
+}
+
+CXXReinterpretCastExpr *
+CXXReinterpretCastExpr::Create(const ASTContext &C, QualType T,
+ ExprValueKind VK, CastKind K, Expr *Op,
+ const CXXCastPath *BasePath,
+ TypeSourceInfo *WrittenTy, SourceLocation L,
+ SourceLocation RParenLoc,
+ SourceRange AngleBrackets) {
+ unsigned PathSize = (BasePath ? BasePath->size() : 0);
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
+ CXXReinterpretCastExpr *E =
+ new (Buffer) CXXReinterpretCastExpr(T, VK, K, Op, PathSize, WrittenTy, L,
+ RParenLoc, AngleBrackets);
+ if (PathSize)
+ std::uninitialized_copy_n(BasePath->data(), BasePath->size(),
+ E->getTrailingObjects<CXXBaseSpecifier *>());
+ return E;
+}
+
+CXXReinterpretCastExpr *
+CXXReinterpretCastExpr::CreateEmpty(const ASTContext &C, unsigned PathSize) {
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
+ return new (Buffer) CXXReinterpretCastExpr(EmptyShell(), PathSize);
+}
+
+CXXConstCastExpr *CXXConstCastExpr::Create(const ASTContext &C, QualType T,
+ ExprValueKind VK, Expr *Op,
+ TypeSourceInfo *WrittenTy,
+ SourceLocation L,
+ SourceLocation RParenLoc,
+ SourceRange AngleBrackets) {
+ return new (C) CXXConstCastExpr(T, VK, Op, WrittenTy, L, RParenLoc, AngleBrackets);
+}
+
+CXXConstCastExpr *CXXConstCastExpr::CreateEmpty(const ASTContext &C) {
+ return new (C) CXXConstCastExpr(EmptyShell());
+}
+
+CXXFunctionalCastExpr *
+CXXFunctionalCastExpr::Create(const ASTContext &C, QualType T, ExprValueKind VK,
+ TypeSourceInfo *Written, CastKind K, Expr *Op,
+ const CXXCastPath *BasePath,
+ SourceLocation L, SourceLocation R) {
+ unsigned PathSize = (BasePath ? BasePath->size() : 0);
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
+ CXXFunctionalCastExpr *E =
+ new (Buffer) CXXFunctionalCastExpr(T, VK, Written, K, Op, PathSize, L, R);
+ if (PathSize)
+ std::uninitialized_copy_n(BasePath->data(), BasePath->size(),
+ E->getTrailingObjects<CXXBaseSpecifier *>());
+ return E;
+}
+
+CXXFunctionalCastExpr *
+CXXFunctionalCastExpr::CreateEmpty(const ASTContext &C, unsigned PathSize) {
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
+ return new (Buffer) CXXFunctionalCastExpr(EmptyShell(), PathSize);
+}
+
+SourceLocation CXXFunctionalCastExpr::getLocStart() const {
+ return getTypeInfoAsWritten()->getTypeLoc().getLocStart();
+}
+
+SourceLocation CXXFunctionalCastExpr::getLocEnd() const {
+ return RParenLoc.isValid() ? RParenLoc : getSubExpr()->getLocEnd();
+}
+
+UserDefinedLiteral::LiteralOperatorKind
+UserDefinedLiteral::getLiteralOperatorKind() const {
+ if (getNumArgs() == 0)
+ return LOK_Template;
+ if (getNumArgs() == 2)
+ return LOK_String;
+
+ assert(getNumArgs() == 1 && "unexpected #args in literal operator call");
+ QualType ParamTy =
+ cast<FunctionDecl>(getCalleeDecl())->getParamDecl(0)->getType();
+ if (ParamTy->isPointerType())
+ return LOK_Raw;
+ if (ParamTy->isAnyCharacterType())
+ return LOK_Character;
+ if (ParamTy->isIntegerType())
+ return LOK_Integer;
+ if (ParamTy->isFloatingType())
+ return LOK_Floating;
+
+ llvm_unreachable("unknown kind of literal operator");
+}
+
+Expr *UserDefinedLiteral::getCookedLiteral() {
+#ifndef NDEBUG
+ LiteralOperatorKind LOK = getLiteralOperatorKind();
+ assert(LOK != LOK_Template && LOK != LOK_Raw && "not a cooked literal");
+#endif
+ return getArg(0);
+}
+
+const IdentifierInfo *UserDefinedLiteral::getUDSuffix() const {
+ return cast<FunctionDecl>(getCalleeDecl())->getLiteralIdentifier();
+}
+
+CXXDefaultArgExpr *
+CXXDefaultArgExpr::Create(const ASTContext &C, SourceLocation Loc,
+ ParmVarDecl *Param, Expr *SubExpr) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(1));
+ return new (Mem) CXXDefaultArgExpr(CXXDefaultArgExprClass, Loc, Param,
+ SubExpr);
+}
+
+CXXDefaultInitExpr::CXXDefaultInitExpr(const ASTContext &C, SourceLocation Loc,
+ FieldDecl *Field, QualType T)
+ : Expr(CXXDefaultInitExprClass, T.getNonLValueExprType(C),
+ T->isLValueReferenceType() ? VK_LValue : T->isRValueReferenceType()
+ ? VK_XValue
+ : VK_RValue,
+ /*FIXME*/ OK_Ordinary, false, false, false, false),
+ Field(Field), Loc(Loc) {
+ assert(Field->hasInClassInitializer());
+}
+
+CXXTemporary *CXXTemporary::Create(const ASTContext &C,
+ const CXXDestructorDecl *Destructor) {
+ return new (C) CXXTemporary(Destructor);
+}
+
+CXXBindTemporaryExpr *CXXBindTemporaryExpr::Create(const ASTContext &C,
+ CXXTemporary *Temp,
+ Expr* SubExpr) {
+ assert((SubExpr->getType()->isRecordType() ||
+ SubExpr->getType()->isArrayType()) &&
+ "Expression bound to a temporary must have record or array type!");
+
+ return new (C) CXXBindTemporaryExpr(Temp, SubExpr);
+}
+
+CXXTemporaryObjectExpr::CXXTemporaryObjectExpr(const ASTContext &C,
+ CXXConstructorDecl *Cons,
+ TypeSourceInfo *Type,
+ ArrayRef<Expr*> Args,
+ SourceRange ParenOrBraceRange,
+ bool HadMultipleCandidates,
+ bool ListInitialization,
+ bool StdInitListInitialization,
+ bool ZeroInitialization)
+ : CXXConstructExpr(C, CXXTemporaryObjectExprClass,
+ Type->getType().getNonReferenceType(),
+ Type->getTypeLoc().getBeginLoc(),
+ Cons, false, Args,
+ HadMultipleCandidates,
+ ListInitialization,
+ StdInitListInitialization,
+ ZeroInitialization,
+ CXXConstructExpr::CK_Complete, ParenOrBraceRange),
+ Type(Type) {
+}
+
+SourceLocation CXXTemporaryObjectExpr::getLocStart() const {
+ return Type->getTypeLoc().getBeginLoc();
+}
+
+SourceLocation CXXTemporaryObjectExpr::getLocEnd() const {
+ SourceLocation Loc = getParenOrBraceRange().getEnd();
+ if (Loc.isInvalid() && getNumArgs())
+ Loc = getArg(getNumArgs()-1)->getLocEnd();
+ return Loc;
+}
+
+CXXConstructExpr *CXXConstructExpr::Create(const ASTContext &C, QualType T,
+ SourceLocation Loc,
+ CXXConstructorDecl *D, bool Elidable,
+ ArrayRef<Expr*> Args,
+ bool HadMultipleCandidates,
+ bool ListInitialization,
+ bool StdInitListInitialization,
+ bool ZeroInitialization,
+ ConstructionKind ConstructKind,
+ SourceRange ParenOrBraceRange) {
+ return new (C) CXXConstructExpr(C, CXXConstructExprClass, T, Loc, D,
+ Elidable, Args,
+ HadMultipleCandidates, ListInitialization,
+ StdInitListInitialization,
+ ZeroInitialization, ConstructKind,
+ ParenOrBraceRange);
+}
+
+CXXConstructExpr::CXXConstructExpr(const ASTContext &C, StmtClass SC,
+ QualType T, SourceLocation Loc,
+ CXXConstructorDecl *D, bool elidable,
+ ArrayRef<Expr*> args,
+ bool HadMultipleCandidates,
+ bool ListInitialization,
+ bool StdInitListInitialization,
+ bool ZeroInitialization,
+ ConstructionKind ConstructKind,
+ SourceRange ParenOrBraceRange)
+ : Expr(SC, T, VK_RValue, OK_Ordinary,
+ T->isDependentType(), T->isDependentType(),
+ T->isInstantiationDependentType(),
+ T->containsUnexpandedParameterPack()),
+ Constructor(D), Loc(Loc), ParenOrBraceRange(ParenOrBraceRange),
+ NumArgs(args.size()),
+ Elidable(elidable), HadMultipleCandidates(HadMultipleCandidates),
+ ListInitialization(ListInitialization),
+ StdInitListInitialization(StdInitListInitialization),
+ ZeroInitialization(ZeroInitialization),
+ ConstructKind(ConstructKind), Args(nullptr)
+{
+ if (NumArgs) {
+ Args = new (C) Stmt*[args.size()];
+
+ for (unsigned i = 0; i != args.size(); ++i) {
+ assert(args[i] && "NULL argument in CXXConstructExpr");
+
+ if (args[i]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (args[i]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (args[i]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ Args[i] = args[i];
+ }
+ }
+}
+
+LambdaCapture::LambdaCapture(SourceLocation Loc, bool Implicit,
+ LambdaCaptureKind Kind, VarDecl *Var,
+ SourceLocation EllipsisLoc)
+ : DeclAndBits(Var, 0), Loc(Loc), EllipsisLoc(EllipsisLoc)
+{
+ unsigned Bits = 0;
+ if (Implicit)
+ Bits |= Capture_Implicit;
+
+ switch (Kind) {
+ case LCK_This:
+ assert(!Var && "'this' capture cannot have a variable!");
+ break;
+
+ case LCK_ByCopy:
+ Bits |= Capture_ByCopy;
+ // Fall through
+ case LCK_ByRef:
+ assert(Var && "capture must have a variable!");
+ break;
+ case LCK_VLAType:
+ assert(!Var && "VLA type capture cannot have a variable!");
+ Bits |= Capture_ByCopy;
+ break;
+ }
+ DeclAndBits.setInt(Bits);
+}
+
+LambdaCaptureKind LambdaCapture::getCaptureKind() const {
+ Decl *D = DeclAndBits.getPointer();
+ bool CapByCopy = DeclAndBits.getInt() & Capture_ByCopy;
+ if (!D)
+ return CapByCopy ? LCK_VLAType : LCK_This;
+
+ return CapByCopy ? LCK_ByCopy : LCK_ByRef;
+}
+
+LambdaExpr::LambdaExpr(QualType T, SourceRange IntroducerRange,
+ LambdaCaptureDefault CaptureDefault,
+ SourceLocation CaptureDefaultLoc,
+ ArrayRef<LambdaCapture> Captures, bool ExplicitParams,
+ bool ExplicitResultType, ArrayRef<Expr *> CaptureInits,
+ ArrayRef<VarDecl *> ArrayIndexVars,
+ ArrayRef<unsigned> ArrayIndexStarts,
+ SourceLocation ClosingBrace,
+ bool ContainsUnexpandedParameterPack)
+ : Expr(LambdaExprClass, T, VK_RValue, OK_Ordinary, T->isDependentType(),
+ T->isDependentType(), T->isDependentType(),
+ ContainsUnexpandedParameterPack),
+ IntroducerRange(IntroducerRange), CaptureDefaultLoc(CaptureDefaultLoc),
+ NumCaptures(Captures.size()), CaptureDefault(CaptureDefault),
+ ExplicitParams(ExplicitParams), ExplicitResultType(ExplicitResultType),
+ ClosingBrace(ClosingBrace) {
+ assert(CaptureInits.size() == Captures.size() && "Wrong number of arguments");
+ CXXRecordDecl *Class = getLambdaClass();
+ CXXRecordDecl::LambdaDefinitionData &Data = Class->getLambdaData();
+
+ // FIXME: Propagate "has unexpanded parameter pack" bit.
+
+ // Copy captures.
+ const ASTContext &Context = Class->getASTContext();
+ Data.NumCaptures = NumCaptures;
+ Data.NumExplicitCaptures = 0;
+ Data.Captures =
+ (LambdaCapture *)Context.Allocate(sizeof(LambdaCapture) * NumCaptures);
+ LambdaCapture *ToCapture = Data.Captures;
+ for (unsigned I = 0, N = Captures.size(); I != N; ++I) {
+ if (Captures[I].isExplicit())
+ ++Data.NumExplicitCaptures;
+
+ *ToCapture++ = Captures[I];
+ }
+
+ // Copy initialization expressions for the non-static data members.
+ Stmt **Stored = getStoredStmts();
+ for (unsigned I = 0, N = CaptureInits.size(); I != N; ++I)
+ *Stored++ = CaptureInits[I];
+
+ // Copy the body of the lambda.
+ *Stored++ = getCallOperator()->getBody();
+
+ // Copy the array index variables, if any.
+ HasArrayIndexVars = !ArrayIndexVars.empty();
+ if (HasArrayIndexVars) {
+ assert(ArrayIndexStarts.size() == NumCaptures);
+ memcpy(getArrayIndexVars(), ArrayIndexVars.data(),
+ sizeof(VarDecl *) * ArrayIndexVars.size());
+ memcpy(getArrayIndexStarts(), ArrayIndexStarts.data(),
+ sizeof(unsigned) * Captures.size());
+ getArrayIndexStarts()[Captures.size()] = ArrayIndexVars.size();
+ }
+}
+
+LambdaExpr *LambdaExpr::Create(
+ const ASTContext &Context, CXXRecordDecl *Class,
+ SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault,
+ SourceLocation CaptureDefaultLoc, ArrayRef<LambdaCapture> Captures,
+ bool ExplicitParams, bool ExplicitResultType, ArrayRef<Expr *> CaptureInits,
+ ArrayRef<VarDecl *> ArrayIndexVars, ArrayRef<unsigned> ArrayIndexStarts,
+ SourceLocation ClosingBrace, bool ContainsUnexpandedParameterPack) {
+ // Determine the type of the expression (i.e., the type of the
+ // function object we're creating).
+ QualType T = Context.getTypeDeclType(Class);
+
+ unsigned Size = totalSizeToAlloc<Stmt *, unsigned, VarDecl *>(
+ Captures.size() + 1, ArrayIndexVars.empty() ? 0 : Captures.size() + 1,
+ ArrayIndexVars.size());
+ void *Mem = Context.Allocate(Size);
+ return new (Mem) LambdaExpr(T, IntroducerRange,
+ CaptureDefault, CaptureDefaultLoc, Captures,
+ ExplicitParams, ExplicitResultType,
+ CaptureInits, ArrayIndexVars, ArrayIndexStarts,
+ ClosingBrace, ContainsUnexpandedParameterPack);
+}
+
+LambdaExpr *LambdaExpr::CreateDeserialized(const ASTContext &C,
+ unsigned NumCaptures,
+ unsigned NumArrayIndexVars) {
+ unsigned Size = totalSizeToAlloc<Stmt *, unsigned, VarDecl *>(
+ NumCaptures + 1, NumArrayIndexVars ? NumCaptures + 1 : 0,
+ NumArrayIndexVars);
+ void *Mem = C.Allocate(Size);
+ return new (Mem) LambdaExpr(EmptyShell(), NumCaptures, NumArrayIndexVars > 0);
+}
+
+bool LambdaExpr::isInitCapture(const LambdaCapture *C) const {
+ return (C->capturesVariable() && C->getCapturedVar()->isInitCapture() &&
+ (getCallOperator() == C->getCapturedVar()->getDeclContext()));
+}
+
+LambdaExpr::capture_iterator LambdaExpr::capture_begin() const {
+ return getLambdaClass()->getLambdaData().Captures;
+}
+
+LambdaExpr::capture_iterator LambdaExpr::capture_end() const {
+ return capture_begin() + NumCaptures;
+}
+
+LambdaExpr::capture_range LambdaExpr::captures() const {
+ return capture_range(capture_begin(), capture_end());
+}
+
+LambdaExpr::capture_iterator LambdaExpr::explicit_capture_begin() const {
+ return capture_begin();
+}
+
+LambdaExpr::capture_iterator LambdaExpr::explicit_capture_end() const {
+ struct CXXRecordDecl::LambdaDefinitionData &Data
+ = getLambdaClass()->getLambdaData();
+ return Data.Captures + Data.NumExplicitCaptures;
+}
+
+LambdaExpr::capture_range LambdaExpr::explicit_captures() const {
+ return capture_range(explicit_capture_begin(), explicit_capture_end());
+}
+
+LambdaExpr::capture_iterator LambdaExpr::implicit_capture_begin() const {
+ return explicit_capture_end();
+}
+
+LambdaExpr::capture_iterator LambdaExpr::implicit_capture_end() const {
+ return capture_end();
+}
+
+LambdaExpr::capture_range LambdaExpr::implicit_captures() const {
+ return capture_range(implicit_capture_begin(), implicit_capture_end());
+}
+
+ArrayRef<VarDecl *>
+LambdaExpr::getCaptureInitIndexVars(const_capture_init_iterator Iter) const {
+ assert(HasArrayIndexVars && "No array index-var data?");
+
+ unsigned Index = Iter - capture_init_begin();
+ assert(Index < getLambdaClass()->getLambdaData().NumCaptures &&
+ "Capture index out-of-range");
+ VarDecl *const *IndexVars = getArrayIndexVars();
+ const unsigned *IndexStarts = getArrayIndexStarts();
+ return llvm::makeArrayRef(IndexVars + IndexStarts[Index],
+ IndexVars + IndexStarts[Index + 1]);
+}
+
+CXXRecordDecl *LambdaExpr::getLambdaClass() const {
+ return getType()->getAsCXXRecordDecl();
+}
+
+CXXMethodDecl *LambdaExpr::getCallOperator() const {
+ CXXRecordDecl *Record = getLambdaClass();
+ return Record->getLambdaCallOperator();
+}
+
+TemplateParameterList *LambdaExpr::getTemplateParameterList() const {
+ CXXRecordDecl *Record = getLambdaClass();
+ return Record->getGenericLambdaTemplateParameterList();
+
+}
+
+CompoundStmt *LambdaExpr::getBody() const {
+ // FIXME: this mutation in getBody is bogus. It should be
+ // initialized in ASTStmtReader::VisitLambdaExpr, but for reasons I
+ // don't understand, that doesn't work.
+ if (!getStoredStmts()[NumCaptures])
+ *const_cast<clang::Stmt **>(&getStoredStmts()[NumCaptures]) =
+ getCallOperator()->getBody();
+
+ return static_cast<CompoundStmt *>(getStoredStmts()[NumCaptures]);
+}
+
+bool LambdaExpr::isMutable() const {
+ return !getCallOperator()->isConst();
+}
+
+ExprWithCleanups::ExprWithCleanups(Expr *subexpr,
+ ArrayRef<CleanupObject> objects)
+ : Expr(ExprWithCleanupsClass, subexpr->getType(),
+ subexpr->getValueKind(), subexpr->getObjectKind(),
+ subexpr->isTypeDependent(), subexpr->isValueDependent(),
+ subexpr->isInstantiationDependent(),
+ subexpr->containsUnexpandedParameterPack()),
+ SubExpr(subexpr) {
+ ExprWithCleanupsBits.NumObjects = objects.size();
+ for (unsigned i = 0, e = objects.size(); i != e; ++i)
+ getTrailingObjects<CleanupObject>()[i] = objects[i];
+}
+
+ExprWithCleanups *ExprWithCleanups::Create(const ASTContext &C, Expr *subexpr,
+ ArrayRef<CleanupObject> objects) {
+ void *buffer = C.Allocate(totalSizeToAlloc<CleanupObject>(objects.size()),
+ llvm::alignOf<ExprWithCleanups>());
+ return new (buffer) ExprWithCleanups(subexpr, objects);
+}
+
+ExprWithCleanups::ExprWithCleanups(EmptyShell empty, unsigned numObjects)
+ : Expr(ExprWithCleanupsClass, empty) {
+ ExprWithCleanupsBits.NumObjects = numObjects;
+}
+
+ExprWithCleanups *ExprWithCleanups::Create(const ASTContext &C,
+ EmptyShell empty,
+ unsigned numObjects) {
+ void *buffer = C.Allocate(totalSizeToAlloc<CleanupObject>(numObjects),
+ llvm::alignOf<ExprWithCleanups>());
+ return new (buffer) ExprWithCleanups(empty, numObjects);
+}
+
+CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(TypeSourceInfo *Type,
+ SourceLocation LParenLoc,
+ ArrayRef<Expr*> Args,
+ SourceLocation RParenLoc)
+ : Expr(CXXUnresolvedConstructExprClass,
+ Type->getType().getNonReferenceType(),
+ (Type->getType()->isLValueReferenceType() ? VK_LValue
+ :Type->getType()->isRValueReferenceType()? VK_XValue
+ :VK_RValue),
+ OK_Ordinary,
+ Type->getType()->isDependentType(), true, true,
+ Type->getType()->containsUnexpandedParameterPack()),
+ Type(Type),
+ LParenLoc(LParenLoc),
+ RParenLoc(RParenLoc),
+ NumArgs(Args.size()) {
+ Expr **StoredArgs = getTrailingObjects<Expr *>();
+ for (unsigned I = 0; I != Args.size(); ++I) {
+ if (Args[I]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ StoredArgs[I] = Args[I];
+ }
+}
+
+CXXUnresolvedConstructExpr *
+CXXUnresolvedConstructExpr::Create(const ASTContext &C,
+ TypeSourceInfo *Type,
+ SourceLocation LParenLoc,
+ ArrayRef<Expr*> Args,
+ SourceLocation RParenLoc) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(Args.size()));
+ return new (Mem) CXXUnresolvedConstructExpr(Type, LParenLoc, Args, RParenLoc);
+}
+
+CXXUnresolvedConstructExpr *
+CXXUnresolvedConstructExpr::CreateEmpty(const ASTContext &C, unsigned NumArgs) {
+ Stmt::EmptyShell Empty;
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(NumArgs));
+ return new (Mem) CXXUnresolvedConstructExpr(Empty, NumArgs);
+}
+
+SourceLocation CXXUnresolvedConstructExpr::getLocStart() const {
+ return Type->getTypeLoc().getBeginLoc();
+}
+
+CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(
+ const ASTContext &C, Expr *Base, QualType BaseType, bool IsArrow,
+ SourceLocation OperatorLoc, NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierFoundInScope,
+ DeclarationNameInfo MemberNameInfo,
+ const TemplateArgumentListInfo *TemplateArgs)
+ : Expr(CXXDependentScopeMemberExprClass, C.DependentTy, VK_LValue,
+ OK_Ordinary, true, true, true,
+ ((Base && Base->containsUnexpandedParameterPack()) ||
+ (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()
+ ->containsUnexpandedParameterPack()) ||
+ MemberNameInfo.containsUnexpandedParameterPack())),
+ Base(Base), BaseType(BaseType), IsArrow(IsArrow),
+ HasTemplateKWAndArgsInfo(TemplateArgs != nullptr ||
+ TemplateKWLoc.isValid()),
+ OperatorLoc(OperatorLoc), QualifierLoc(QualifierLoc),
+ FirstQualifierFoundInScope(FirstQualifierFoundInScope),
+ MemberNameInfo(MemberNameInfo) {
+ if (TemplateArgs) {
+ bool Dependent = true;
+ bool InstantiationDependent = true;
+ bool ContainsUnexpandedParameterPack = false;
+ getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
+ TemplateKWLoc, *TemplateArgs, getTrailingObjects<TemplateArgumentLoc>(),
+ Dependent, InstantiationDependent, ContainsUnexpandedParameterPack);
+ if (ContainsUnexpandedParameterPack)
+ ExprBits.ContainsUnexpandedParameterPack = true;
+ } else if (TemplateKWLoc.isValid()) {
+ getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
+ TemplateKWLoc);
+ }
+}
+
+CXXDependentScopeMemberExpr *
+CXXDependentScopeMemberExpr::Create(const ASTContext &C,
+ Expr *Base, QualType BaseType, bool IsArrow,
+ SourceLocation OperatorLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ NamedDecl *FirstQualifierFoundInScope,
+ DeclarationNameInfo MemberNameInfo,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ bool HasTemplateKWAndArgsInfo = TemplateArgs || TemplateKWLoc.isValid();
+ unsigned NumTemplateArgs = TemplateArgs ? TemplateArgs->size() : 0;
+ std::size_t Size =
+ totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
+ HasTemplateKWAndArgsInfo, NumTemplateArgs);
+
+ void *Mem = C.Allocate(Size, llvm::alignOf<CXXDependentScopeMemberExpr>());
+ return new (Mem) CXXDependentScopeMemberExpr(C, Base, BaseType,
+ IsArrow, OperatorLoc,
+ QualifierLoc,
+ TemplateKWLoc,
+ FirstQualifierFoundInScope,
+ MemberNameInfo, TemplateArgs);
+}
+
+CXXDependentScopeMemberExpr *
+CXXDependentScopeMemberExpr::CreateEmpty(const ASTContext &C,
+ bool HasTemplateKWAndArgsInfo,
+ unsigned NumTemplateArgs) {
+ assert(NumTemplateArgs == 0 || HasTemplateKWAndArgsInfo);
+ std::size_t Size =
+ totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
+ HasTemplateKWAndArgsInfo, NumTemplateArgs);
+ void *Mem = C.Allocate(Size, llvm::alignOf<CXXDependentScopeMemberExpr>());
+ CXXDependentScopeMemberExpr *E
+ = new (Mem) CXXDependentScopeMemberExpr(C, nullptr, QualType(),
+ 0, SourceLocation(),
+ NestedNameSpecifierLoc(),
+ SourceLocation(), nullptr,
+ DeclarationNameInfo(), nullptr);
+ E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
+ return E;
+}
+
+bool CXXDependentScopeMemberExpr::isImplicitAccess() const {
+ if (!Base)
+ return true;
+
+ return cast<Expr>(Base)->isImplicitCXXThis();
+}
+
+static bool hasOnlyNonStaticMemberFunctions(UnresolvedSetIterator begin,
+ UnresolvedSetIterator end) {
+ do {
+ NamedDecl *decl = *begin;
+ if (isa<UnresolvedUsingValueDecl>(decl))
+ return false;
+
+ // Unresolved member expressions should only contain methods and
+ // method templates.
+ if (cast<CXXMethodDecl>(decl->getUnderlyingDecl()->getAsFunction())
+ ->isStatic())
+ return false;
+ } while (++begin != end);
+
+ return true;
+}
+
+UnresolvedMemberExpr::UnresolvedMemberExpr(const ASTContext &C,
+ bool HasUnresolvedUsing,
+ Expr *Base, QualType BaseType,
+ bool IsArrow,
+ SourceLocation OperatorLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &MemberNameInfo,
+ const TemplateArgumentListInfo *TemplateArgs,
+ UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End)
+ : OverloadExpr(UnresolvedMemberExprClass, C, QualifierLoc, TemplateKWLoc,
+ MemberNameInfo, TemplateArgs, Begin, End,
+ // Dependent
+ ((Base && Base->isTypeDependent()) ||
+ BaseType->isDependentType()),
+ ((Base && Base->isInstantiationDependent()) ||
+ BaseType->isInstantiationDependentType()),
+ // Contains unexpanded parameter pack
+ ((Base && Base->containsUnexpandedParameterPack()) ||
+ BaseType->containsUnexpandedParameterPack())),
+ IsArrow(IsArrow), HasUnresolvedUsing(HasUnresolvedUsing),
+ Base(Base), BaseType(BaseType), OperatorLoc(OperatorLoc) {
+
+ // Check whether all of the members are non-static member functions,
+ // and if so, mark give this bound-member type instead of overload type.
+ if (hasOnlyNonStaticMemberFunctions(Begin, End))
+ setType(C.BoundMemberTy);
+}
+
+bool UnresolvedMemberExpr::isImplicitAccess() const {
+ if (!Base)
+ return true;
+
+ return cast<Expr>(Base)->isImplicitCXXThis();
+}
+
+UnresolvedMemberExpr *UnresolvedMemberExpr::Create(
+ const ASTContext &C, bool HasUnresolvedUsing, Expr *Base, QualType BaseType,
+ bool IsArrow, SourceLocation OperatorLoc,
+ NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &MemberNameInfo,
+ const TemplateArgumentListInfo *TemplateArgs, UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End) {
+ bool HasTemplateKWAndArgsInfo = TemplateArgs || TemplateKWLoc.isValid();
+ std::size_t Size =
+ totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
+ HasTemplateKWAndArgsInfo, TemplateArgs ? TemplateArgs->size() : 0);
+
+ void *Mem = C.Allocate(Size, llvm::alignOf<UnresolvedMemberExpr>());
+ return new (Mem) UnresolvedMemberExpr(
+ C, HasUnresolvedUsing, Base, BaseType, IsArrow, OperatorLoc, QualifierLoc,
+ TemplateKWLoc, MemberNameInfo, TemplateArgs, Begin, End);
+}
+
+UnresolvedMemberExpr *
+UnresolvedMemberExpr::CreateEmpty(const ASTContext &C,
+ bool HasTemplateKWAndArgsInfo,
+ unsigned NumTemplateArgs) {
+ assert(NumTemplateArgs == 0 || HasTemplateKWAndArgsInfo);
+ std::size_t Size =
+ totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
+ HasTemplateKWAndArgsInfo, NumTemplateArgs);
+
+ void *Mem = C.Allocate(Size, llvm::alignOf<UnresolvedMemberExpr>());
+ UnresolvedMemberExpr *E = new (Mem) UnresolvedMemberExpr(EmptyShell());
+ E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
+ return E;
+}
+
+CXXRecordDecl *UnresolvedMemberExpr::getNamingClass() const {
+ // Unlike for UnresolvedLookupExpr, it is very easy to re-derive this.
+
+ // If there was a nested name specifier, it names the naming class.
+ // It can't be dependent: after all, we were actually able to do the
+ // lookup.
+ CXXRecordDecl *Record = nullptr;
+ auto *NNS = getQualifier();
+ if (NNS && NNS->getKind() != NestedNameSpecifier::Super) {
+ const Type *T = getQualifier()->getAsType();
+ assert(T && "qualifier in member expression does not name type");
+ Record = T->getAsCXXRecordDecl();
+ assert(Record && "qualifier in member expression does not name record");
+ }
+ // Otherwise the naming class must have been the base class.
+ else {
+ QualType BaseType = getBaseType().getNonReferenceType();
+ if (isArrow()) {
+ const PointerType *PT = BaseType->getAs<PointerType>();
+ assert(PT && "base of arrow member access is not pointer");
+ BaseType = PT->getPointeeType();
+ }
+
+ Record = BaseType->getAsCXXRecordDecl();
+ assert(Record && "base of member expression does not name record");
+ }
+
+ return Record;
+}
+
+SizeOfPackExpr *
+SizeOfPackExpr::Create(ASTContext &Context, SourceLocation OperatorLoc,
+ NamedDecl *Pack, SourceLocation PackLoc,
+ SourceLocation RParenLoc,
+ Optional<unsigned> Length,
+ ArrayRef<TemplateArgument> PartialArgs) {
+ void *Storage =
+ Context.Allocate(totalSizeToAlloc<TemplateArgument>(PartialArgs.size()));
+ return new (Storage) SizeOfPackExpr(Context.getSizeType(), OperatorLoc, Pack,
+ PackLoc, RParenLoc, Length, PartialArgs);
+}
+
+SizeOfPackExpr *SizeOfPackExpr::CreateDeserialized(ASTContext &Context,
+ unsigned NumPartialArgs) {
+ void *Storage =
+ Context.Allocate(totalSizeToAlloc<TemplateArgument>(NumPartialArgs));
+ return new (Storage) SizeOfPackExpr(EmptyShell(), NumPartialArgs);
+}
+
+SubstNonTypeTemplateParmPackExpr::
+SubstNonTypeTemplateParmPackExpr(QualType T,
+ NonTypeTemplateParmDecl *Param,
+ SourceLocation NameLoc,
+ const TemplateArgument &ArgPack)
+ : Expr(SubstNonTypeTemplateParmPackExprClass, T, VK_RValue, OK_Ordinary,
+ true, true, true, true),
+ Param(Param), Arguments(ArgPack.pack_begin()),
+ NumArguments(ArgPack.pack_size()), NameLoc(NameLoc) { }
+
+TemplateArgument SubstNonTypeTemplateParmPackExpr::getArgumentPack() const {
+ return TemplateArgument(llvm::makeArrayRef(Arguments, NumArguments));
+}
+
+FunctionParmPackExpr::FunctionParmPackExpr(QualType T, ParmVarDecl *ParamPack,
+ SourceLocation NameLoc,
+ unsigned NumParams,
+ ParmVarDecl *const *Params)
+ : Expr(FunctionParmPackExprClass, T, VK_LValue, OK_Ordinary, true, true,
+ true, true),
+ ParamPack(ParamPack), NameLoc(NameLoc), NumParameters(NumParams) {
+ if (Params)
+ std::uninitialized_copy(Params, Params + NumParams,
+ getTrailingObjects<ParmVarDecl *>());
+}
+
+FunctionParmPackExpr *
+FunctionParmPackExpr::Create(const ASTContext &Context, QualType T,
+ ParmVarDecl *ParamPack, SourceLocation NameLoc,
+ ArrayRef<ParmVarDecl *> Params) {
+ return new (Context.Allocate(totalSizeToAlloc<ParmVarDecl *>(Params.size())))
+ FunctionParmPackExpr(T, ParamPack, NameLoc, Params.size(), Params.data());
+}
+
+FunctionParmPackExpr *
+FunctionParmPackExpr::CreateEmpty(const ASTContext &Context,
+ unsigned NumParams) {
+ return new (Context.Allocate(totalSizeToAlloc<ParmVarDecl *>(NumParams)))
+ FunctionParmPackExpr(QualType(), nullptr, SourceLocation(), 0, nullptr);
+}
+
+void MaterializeTemporaryExpr::setExtendingDecl(const ValueDecl *ExtendedBy,
+ unsigned ManglingNumber) {
+ // We only need extra state if we have to remember more than just the Stmt.
+ if (!ExtendedBy)
+ return;
+
+ // We may need to allocate extra storage for the mangling number and the
+ // extended-by ValueDecl.
+ if (!State.is<ExtraState *>()) {
+ auto ES = new (ExtendedBy->getASTContext()) ExtraState;
+ ES->Temporary = State.get<Stmt *>();
+ State = ES;
+ }
+
+ auto ES = State.get<ExtraState *>();
+ ES->ExtendingDecl = ExtendedBy;
+ ES->ManglingNumber = ManglingNumber;
+}
+
+TypeTraitExpr::TypeTraitExpr(QualType T, SourceLocation Loc, TypeTrait Kind,
+ ArrayRef<TypeSourceInfo *> Args,
+ SourceLocation RParenLoc,
+ bool Value)
+ : Expr(TypeTraitExprClass, T, VK_RValue, OK_Ordinary,
+ /*TypeDependent=*/false,
+ /*ValueDependent=*/false,
+ /*InstantiationDependent=*/false,
+ /*ContainsUnexpandedParameterPack=*/false),
+ Loc(Loc), RParenLoc(RParenLoc)
+{
+ TypeTraitExprBits.Kind = Kind;
+ TypeTraitExprBits.Value = Value;
+ TypeTraitExprBits.NumArgs = Args.size();
+
+ TypeSourceInfo **ToArgs = getTrailingObjects<TypeSourceInfo *>();
+
+ for (unsigned I = 0, N = Args.size(); I != N; ++I) {
+ if (Args[I]->getType()->isDependentType())
+ setValueDependent(true);
+ if (Args[I]->getType()->isInstantiationDependentType())
+ setInstantiationDependent(true);
+ if (Args[I]->getType()->containsUnexpandedParameterPack())
+ setContainsUnexpandedParameterPack(true);
+
+ ToArgs[I] = Args[I];
+ }
+}
+
+TypeTraitExpr *TypeTraitExpr::Create(const ASTContext &C, QualType T,
+ SourceLocation Loc,
+ TypeTrait Kind,
+ ArrayRef<TypeSourceInfo *> Args,
+ SourceLocation RParenLoc,
+ bool Value) {
+ void *Mem = C.Allocate(totalSizeToAlloc<TypeSourceInfo *>(Args.size()));
+ return new (Mem) TypeTraitExpr(T, Loc, Kind, Args, RParenLoc, Value);
+}
+
+TypeTraitExpr *TypeTraitExpr::CreateDeserialized(const ASTContext &C,
+ unsigned NumArgs) {
+ void *Mem = C.Allocate(totalSizeToAlloc<TypeSourceInfo *>(NumArgs));
+ return new (Mem) TypeTraitExpr(EmptyShell());
+}
+
+void ArrayTypeTraitExpr::anchor() { }
diff --git a/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp b/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp
new file mode 100644
index 0000000..a47b03c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp
@@ -0,0 +1,688 @@
+//===--- ExprClassification.cpp - Expression AST Node Implementation ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements Expr::classify.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Expr.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace clang;
+
+typedef Expr::Classification Cl;
+
+static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E);
+static Cl::Kinds ClassifyDecl(ASTContext &Ctx, const Decl *D);
+static Cl::Kinds ClassifyUnnamed(ASTContext &Ctx, QualType T);
+static Cl::Kinds ClassifyMemberExpr(ASTContext &Ctx, const MemberExpr *E);
+static Cl::Kinds ClassifyBinaryOp(ASTContext &Ctx, const BinaryOperator *E);
+static Cl::Kinds ClassifyConditional(ASTContext &Ctx,
+ const Expr *trueExpr,
+ const Expr *falseExpr);
+static Cl::ModifiableType IsModifiable(ASTContext &Ctx, const Expr *E,
+ Cl::Kinds Kind, SourceLocation &Loc);
+
+Cl Expr::ClassifyImpl(ASTContext &Ctx, SourceLocation *Loc) const {
+ assert(!TR->isReferenceType() && "Expressions can't have reference type.");
+
+ Cl::Kinds kind = ClassifyInternal(Ctx, this);
+ // C99 6.3.2.1: An lvalue is an expression with an object type or an
+ // incomplete type other than void.
+ if (!Ctx.getLangOpts().CPlusPlus) {
+ // Thus, no functions.
+ if (TR->isFunctionType() || TR == Ctx.OverloadTy)
+ kind = Cl::CL_Function;
+ // No void either, but qualified void is OK because it is "other than void".
+ // Void "lvalues" are classified as addressable void values, which are void
+ // expressions whose address can be taken.
+ else if (TR->isVoidType() && !TR.hasQualifiers())
+ kind = (kind == Cl::CL_LValue ? Cl::CL_AddressableVoid : Cl::CL_Void);
+ }
+
+ // Enable this assertion for testing.
+ switch (kind) {
+ case Cl::CL_LValue: assert(getValueKind() == VK_LValue); break;
+ case Cl::CL_XValue: assert(getValueKind() == VK_XValue); break;
+ case Cl::CL_Function:
+ case Cl::CL_Void:
+ case Cl::CL_AddressableVoid:
+ case Cl::CL_DuplicateVectorComponents:
+ case Cl::CL_MemberFunction:
+ case Cl::CL_SubObjCPropertySetting:
+ case Cl::CL_ClassTemporary:
+ case Cl::CL_ArrayTemporary:
+ case Cl::CL_ObjCMessageRValue:
+ case Cl::CL_PRValue: assert(getValueKind() == VK_RValue); break;
+ }
+
+ Cl::ModifiableType modifiable = Cl::CM_Untested;
+ if (Loc)
+ modifiable = IsModifiable(Ctx, this, kind, *Loc);
+ return Classification(kind, modifiable);
+}
+
+/// Classify an expression which creates a temporary, based on its type.
+static Cl::Kinds ClassifyTemporary(QualType T) {
+ if (T->isRecordType())
+ return Cl::CL_ClassTemporary;
+ if (T->isArrayType())
+ return Cl::CL_ArrayTemporary;
+
+ // No special classification: these don't behave differently from normal
+ // prvalues.
+ return Cl::CL_PRValue;
+}
+
+static Cl::Kinds ClassifyExprValueKind(const LangOptions &Lang,
+ const Expr *E,
+ ExprValueKind Kind) {
+ switch (Kind) {
+ case VK_RValue:
+ return Lang.CPlusPlus ? ClassifyTemporary(E->getType()) : Cl::CL_PRValue;
+ case VK_LValue:
+ return Cl::CL_LValue;
+ case VK_XValue:
+ return Cl::CL_XValue;
+ }
+ llvm_unreachable("Invalid value category of implicit cast.");
+}
+
+static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
+ // This function takes the first stab at classifying expressions.
+ const LangOptions &Lang = Ctx.getLangOpts();
+
+ switch (E->getStmtClass()) {
+ case Stmt::NoStmtClass:
+#define ABSTRACT_STMT(Kind)
+#define STMT(Kind, Base) case Expr::Kind##Class:
+#define EXPR(Kind, Base)
+#include "clang/AST/StmtNodes.inc"
+ llvm_unreachable("cannot classify a statement");
+
+ // First come the expressions that are always lvalues, unconditionally.
+ case Expr::ObjCIsaExprClass:
+ // C++ [expr.prim.general]p1: A string literal is an lvalue.
+ case Expr::StringLiteralClass:
+ // @encode is equivalent to its string
+ case Expr::ObjCEncodeExprClass:
+ // __func__ and friends are too.
+ case Expr::PredefinedExprClass:
+ // Property references are lvalues
+ case Expr::ObjCSubscriptRefExprClass:
+ case Expr::ObjCPropertyRefExprClass:
+ // C++ [expr.typeid]p1: The result of a typeid expression is an lvalue of...
+ case Expr::CXXTypeidExprClass:
+ // Unresolved lookups and uncorrected typos get classified as lvalues.
+ // FIXME: Is this wise? Should they get their own kind?
+ case Expr::UnresolvedLookupExprClass:
+ case Expr::UnresolvedMemberExprClass:
+ case Expr::TypoExprClass:
+ case Expr::CXXDependentScopeMemberExprClass:
+ case Expr::DependentScopeDeclRefExprClass:
+ // ObjC instance variables are lvalues
+ // FIXME: ObjC++0x might have different rules
+ case Expr::ObjCIvarRefExprClass:
+ case Expr::FunctionParmPackExprClass:
+ case Expr::MSPropertyRefExprClass:
+ case Expr::MSPropertySubscriptExprClass:
+ case Expr::OMPArraySectionExprClass:
+ return Cl::CL_LValue;
+
+ // C99 6.5.2.5p5 says that compound literals are lvalues.
+ // In C++, they're prvalue temporaries.
+ case Expr::CompoundLiteralExprClass:
+ return Ctx.getLangOpts().CPlusPlus ? ClassifyTemporary(E->getType())
+ : Cl::CL_LValue;
+
+ // Expressions that are prvalues.
+ case Expr::CXXBoolLiteralExprClass:
+ case Expr::CXXPseudoDestructorExprClass:
+ case Expr::UnaryExprOrTypeTraitExprClass:
+ case Expr::CXXNewExprClass:
+ case Expr::CXXThisExprClass:
+ case Expr::CXXNullPtrLiteralExprClass:
+ case Expr::ImaginaryLiteralClass:
+ case Expr::GNUNullExprClass:
+ case Expr::OffsetOfExprClass:
+ case Expr::CXXThrowExprClass:
+ case Expr::ShuffleVectorExprClass:
+ case Expr::ConvertVectorExprClass:
+ case Expr::IntegerLiteralClass:
+ case Expr::CharacterLiteralClass:
+ case Expr::AddrLabelExprClass:
+ case Expr::CXXDeleteExprClass:
+ case Expr::ImplicitValueInitExprClass:
+ case Expr::BlockExprClass:
+ case Expr::FloatingLiteralClass:
+ case Expr::CXXNoexceptExprClass:
+ case Expr::CXXScalarValueInitExprClass:
+ case Expr::TypeTraitExprClass:
+ case Expr::ArrayTypeTraitExprClass:
+ case Expr::ExpressionTraitExprClass:
+ case Expr::ObjCSelectorExprClass:
+ case Expr::ObjCProtocolExprClass:
+ case Expr::ObjCStringLiteralClass:
+ case Expr::ObjCBoxedExprClass:
+ case Expr::ObjCArrayLiteralClass:
+ case Expr::ObjCDictionaryLiteralClass:
+ case Expr::ObjCBoolLiteralExprClass:
+ case Expr::ParenListExprClass:
+ case Expr::SizeOfPackExprClass:
+ case Expr::SubstNonTypeTemplateParmPackExprClass:
+ case Expr::AsTypeExprClass:
+ case Expr::ObjCIndirectCopyRestoreExprClass:
+ case Expr::AtomicExprClass:
+ case Expr::CXXFoldExprClass:
+ case Expr::NoInitExprClass:
+ case Expr::DesignatedInitUpdateExprClass:
+ case Expr::CoyieldExprClass:
+ return Cl::CL_PRValue;
+
+ // Next come the complicated cases.
+ case Expr::SubstNonTypeTemplateParmExprClass:
+ return ClassifyInternal(Ctx,
+ cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
+
+ // C++ [expr.sub]p1: The result is an lvalue of type "T".
+ // However, subscripting vector types is more like member access.
+ case Expr::ArraySubscriptExprClass:
+ if (cast<ArraySubscriptExpr>(E)->getBase()->getType()->isVectorType())
+ return ClassifyInternal(Ctx, cast<ArraySubscriptExpr>(E)->getBase());
+ return Cl::CL_LValue;
+
+ // C++ [expr.prim.general]p3: The result is an lvalue if the entity is a
+ // function or variable and a prvalue otherwise.
+ case Expr::DeclRefExprClass:
+ if (E->getType() == Ctx.UnknownAnyTy)
+ return isa<FunctionDecl>(cast<DeclRefExpr>(E)->getDecl())
+ ? Cl::CL_PRValue : Cl::CL_LValue;
+ return ClassifyDecl(Ctx, cast<DeclRefExpr>(E)->getDecl());
+
+ // Member access is complex.
+ case Expr::MemberExprClass:
+ return ClassifyMemberExpr(Ctx, cast<MemberExpr>(E));
+
+ case Expr::UnaryOperatorClass:
+ switch (cast<UnaryOperator>(E)->getOpcode()) {
+ // C++ [expr.unary.op]p1: The unary * operator performs indirection:
+ // [...] the result is an lvalue referring to the object or function
+ // to which the expression points.
+ case UO_Deref:
+ return Cl::CL_LValue;
+
+ // GNU extensions, simply look through them.
+ case UO_Extension:
+ return ClassifyInternal(Ctx, cast<UnaryOperator>(E)->getSubExpr());
+
+ // Treat _Real and _Imag basically as if they were member
+ // expressions: l-value only if the operand is a true l-value.
+ case UO_Real:
+ case UO_Imag: {
+ const Expr *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens();
+ Cl::Kinds K = ClassifyInternal(Ctx, Op);
+ if (K != Cl::CL_LValue) return K;
+
+ if (isa<ObjCPropertyRefExpr>(Op))
+ return Cl::CL_SubObjCPropertySetting;
+ return Cl::CL_LValue;
+ }
+
+ // C++ [expr.pre.incr]p1: The result is the updated operand; it is an
+ // lvalue, [...]
+ // Not so in C.
+ case UO_PreInc:
+ case UO_PreDec:
+ return Lang.CPlusPlus ? Cl::CL_LValue : Cl::CL_PRValue;
+
+ default:
+ return Cl::CL_PRValue;
+ }
+
+ case Expr::OpaqueValueExprClass:
+ return ClassifyExprValueKind(Lang, E, E->getValueKind());
+
+ // Pseudo-object expressions can produce l-values with reference magic.
+ case Expr::PseudoObjectExprClass:
+ return ClassifyExprValueKind(Lang, E,
+ cast<PseudoObjectExpr>(E)->getValueKind());
+
+ // Implicit casts are lvalues if they're lvalue casts. Other than that, we
+ // only specifically record class temporaries.
+ case Expr::ImplicitCastExprClass:
+ return ClassifyExprValueKind(Lang, E, E->getValueKind());
+
+ // C++ [expr.prim.general]p4: The presence of parentheses does not affect
+ // whether the expression is an lvalue.
+ case Expr::ParenExprClass:
+ return ClassifyInternal(Ctx, cast<ParenExpr>(E)->getSubExpr());
+
+ // C11 6.5.1.1p4: [A generic selection] is an lvalue, a function designator,
+ // or a void expression if its result expression is, respectively, an
+ // lvalue, a function designator, or a void expression.
+ case Expr::GenericSelectionExprClass:
+ if (cast<GenericSelectionExpr>(E)->isResultDependent())
+ return Cl::CL_PRValue;
+ return ClassifyInternal(Ctx,cast<GenericSelectionExpr>(E)->getResultExpr());
+
+ case Expr::BinaryOperatorClass:
+ case Expr::CompoundAssignOperatorClass:
+ // C doesn't have any binary expressions that are lvalues.
+ if (Lang.CPlusPlus)
+ return ClassifyBinaryOp(Ctx, cast<BinaryOperator>(E));
+ return Cl::CL_PRValue;
+
+ case Expr::CallExprClass:
+ case Expr::CXXOperatorCallExprClass:
+ case Expr::CXXMemberCallExprClass:
+ case Expr::UserDefinedLiteralClass:
+ case Expr::CUDAKernelCallExprClass:
+ return ClassifyUnnamed(Ctx, cast<CallExpr>(E)->getCallReturnType(Ctx));
+
+ // __builtin_choose_expr is equivalent to the chosen expression.
+ case Expr::ChooseExprClass:
+ return ClassifyInternal(Ctx, cast<ChooseExpr>(E)->getChosenSubExpr());
+
+ // Extended vector element access is an lvalue unless there are duplicates
+ // in the shuffle expression.
+ case Expr::ExtVectorElementExprClass:
+ if (cast<ExtVectorElementExpr>(E)->containsDuplicateElements())
+ return Cl::CL_DuplicateVectorComponents;
+ if (cast<ExtVectorElementExpr>(E)->isArrow())
+ return Cl::CL_LValue;
+ return ClassifyInternal(Ctx, cast<ExtVectorElementExpr>(E)->getBase());
+
+ // Simply look at the actual default argument.
+ case Expr::CXXDefaultArgExprClass:
+ return ClassifyInternal(Ctx, cast<CXXDefaultArgExpr>(E)->getExpr());
+
+ // Same idea for default initializers.
+ case Expr::CXXDefaultInitExprClass:
+ return ClassifyInternal(Ctx, cast<CXXDefaultInitExpr>(E)->getExpr());
+
+ // Same idea for temporary binding.
+ case Expr::CXXBindTemporaryExprClass:
+ return ClassifyInternal(Ctx, cast<CXXBindTemporaryExpr>(E)->getSubExpr());
+
+ // And the cleanups guard.
+ case Expr::ExprWithCleanupsClass:
+ return ClassifyInternal(Ctx, cast<ExprWithCleanups>(E)->getSubExpr());
+
+ // Casts depend completely on the target type. All casts work the same.
+ case Expr::CStyleCastExprClass:
+ case Expr::CXXFunctionalCastExprClass:
+ case Expr::CXXStaticCastExprClass:
+ case Expr::CXXDynamicCastExprClass:
+ case Expr::CXXReinterpretCastExprClass:
+ case Expr::CXXConstCastExprClass:
+ case Expr::ObjCBridgedCastExprClass:
+ // Only in C++ can casts be interesting at all.
+ if (!Lang.CPlusPlus) return Cl::CL_PRValue;
+ return ClassifyUnnamed(Ctx, cast<ExplicitCastExpr>(E)->getTypeAsWritten());
+
+ case Expr::CXXUnresolvedConstructExprClass:
+ return ClassifyUnnamed(Ctx,
+ cast<CXXUnresolvedConstructExpr>(E)->getTypeAsWritten());
+
+ case Expr::BinaryConditionalOperatorClass: {
+ if (!Lang.CPlusPlus) return Cl::CL_PRValue;
+ const BinaryConditionalOperator *co = cast<BinaryConditionalOperator>(E);
+ return ClassifyConditional(Ctx, co->getTrueExpr(), co->getFalseExpr());
+ }
+
+ case Expr::ConditionalOperatorClass: {
+ // Once again, only C++ is interesting.
+ if (!Lang.CPlusPlus) return Cl::CL_PRValue;
+ const ConditionalOperator *co = cast<ConditionalOperator>(E);
+ return ClassifyConditional(Ctx, co->getTrueExpr(), co->getFalseExpr());
+ }
+
+ // ObjC message sends are effectively function calls, if the target function
+ // is known.
+ case Expr::ObjCMessageExprClass:
+ if (const ObjCMethodDecl *Method =
+ cast<ObjCMessageExpr>(E)->getMethodDecl()) {
+ Cl::Kinds kind = ClassifyUnnamed(Ctx, Method->getReturnType());
+ return (kind == Cl::CL_PRValue) ? Cl::CL_ObjCMessageRValue : kind;
+ }
+ return Cl::CL_PRValue;
+
+ // Some C++ expressions are always class temporaries.
+ case Expr::CXXConstructExprClass:
+ case Expr::CXXTemporaryObjectExprClass:
+ case Expr::LambdaExprClass:
+ case Expr::CXXStdInitializerListExprClass:
+ return Cl::CL_ClassTemporary;
+
+ case Expr::VAArgExprClass:
+ return ClassifyUnnamed(Ctx, E->getType());
+
+ case Expr::DesignatedInitExprClass:
+ return ClassifyInternal(Ctx, cast<DesignatedInitExpr>(E)->getInit());
+
+ case Expr::StmtExprClass: {
+ const CompoundStmt *S = cast<StmtExpr>(E)->getSubStmt();
+ if (const Expr *LastExpr = dyn_cast_or_null<Expr>(S->body_back()))
+ return ClassifyUnnamed(Ctx, LastExpr->getType());
+ return Cl::CL_PRValue;
+ }
+
+ case Expr::CXXUuidofExprClass:
+ return Cl::CL_LValue;
+
+ case Expr::PackExpansionExprClass:
+ return ClassifyInternal(Ctx, cast<PackExpansionExpr>(E)->getPattern());
+
+ case Expr::MaterializeTemporaryExprClass:
+ return cast<MaterializeTemporaryExpr>(E)->isBoundToLvalueReference()
+ ? Cl::CL_LValue
+ : Cl::CL_XValue;
+
+ case Expr::InitListExprClass:
+ // An init list can be an lvalue if it is bound to a reference and
+ // contains only one element. In that case, we look at that element
+ // for an exact classification. Init list creation takes care of the
+ // value kind for us, so we only need to fine-tune.
+ if (E->isRValue())
+ return ClassifyExprValueKind(Lang, E, E->getValueKind());
+ assert(cast<InitListExpr>(E)->getNumInits() == 1 &&
+ "Only 1-element init lists can be glvalues.");
+ return ClassifyInternal(Ctx, cast<InitListExpr>(E)->getInit(0));
+
+ case Expr::CoawaitExprClass:
+ return ClassifyInternal(Ctx, cast<CoawaitExpr>(E)->getResumeExpr());
+ }
+
+ llvm_unreachable("unhandled expression kind in classification");
+}
+
+/// ClassifyDecl - Return the classification of an expression referencing the
+/// given declaration.
+static Cl::Kinds ClassifyDecl(ASTContext &Ctx, const Decl *D) {
+ // C++ [expr.prim.general]p6: The result is an lvalue if the entity is a
+ // function, variable, or data member and a prvalue otherwise.
+ // In C, functions are not lvalues.
+ // In addition, NonTypeTemplateParmDecl derives from VarDecl but isn't an
+ // lvalue unless it's a reference type (C++ [temp.param]p6), so we need to
+ // special-case this.
+
+ if (isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance())
+ return Cl::CL_MemberFunction;
+
+ bool islvalue;
+ if (const NonTypeTemplateParmDecl *NTTParm =
+ dyn_cast<NonTypeTemplateParmDecl>(D))
+ islvalue = NTTParm->getType()->isReferenceType();
+ else
+ islvalue = isa<VarDecl>(D) || isa<FieldDecl>(D) ||
+ isa<IndirectFieldDecl>(D) ||
+ (Ctx.getLangOpts().CPlusPlus &&
+ (isa<FunctionDecl>(D) || isa<MSPropertyDecl>(D) ||
+ isa<FunctionTemplateDecl>(D)));
+
+ return islvalue ? Cl::CL_LValue : Cl::CL_PRValue;
+}
+
+/// ClassifyUnnamed - Return the classification of an expression yielding an
+/// unnamed value of the given type. This applies in particular to function
+/// calls and casts.
+static Cl::Kinds ClassifyUnnamed(ASTContext &Ctx, QualType T) {
+ // In C, function calls are always rvalues.
+ if (!Ctx.getLangOpts().CPlusPlus) return Cl::CL_PRValue;
+
+ // C++ [expr.call]p10: A function call is an lvalue if the result type is an
+ // lvalue reference type or an rvalue reference to function type, an xvalue
+ // if the result type is an rvalue reference to object type, and a prvalue
+ // otherwise.
+ if (T->isLValueReferenceType())
+ return Cl::CL_LValue;
+ const RValueReferenceType *RV = T->getAs<RValueReferenceType>();
+ if (!RV) // Could still be a class temporary, though.
+ return ClassifyTemporary(T);
+
+ return RV->getPointeeType()->isFunctionType() ? Cl::CL_LValue : Cl::CL_XValue;
+}
+
+static Cl::Kinds ClassifyMemberExpr(ASTContext &Ctx, const MemberExpr *E) {
+ if (E->getType() == Ctx.UnknownAnyTy)
+ return (isa<FunctionDecl>(E->getMemberDecl())
+ ? Cl::CL_PRValue : Cl::CL_LValue);
+
+ // Handle C first, it's easier.
+ if (!Ctx.getLangOpts().CPlusPlus) {
+ // C99 6.5.2.3p3
+ // For dot access, the expression is an lvalue if the first part is. For
+ // arrow access, it always is an lvalue.
+ if (E->isArrow())
+ return Cl::CL_LValue;
+ // ObjC property accesses are not lvalues, but get special treatment.
+ Expr *Base = E->getBase()->IgnoreParens();
+ if (isa<ObjCPropertyRefExpr>(Base))
+ return Cl::CL_SubObjCPropertySetting;
+ return ClassifyInternal(Ctx, Base);
+ }
+
+ NamedDecl *Member = E->getMemberDecl();
+ // C++ [expr.ref]p3: E1->E2 is converted to the equivalent form (*(E1)).E2.
+ // C++ [expr.ref]p4: If E2 is declared to have type "reference to T", then
+ // E1.E2 is an lvalue.
+ if (ValueDecl *Value = dyn_cast<ValueDecl>(Member))
+ if (Value->getType()->isReferenceType())
+ return Cl::CL_LValue;
+
+ // Otherwise, one of the following rules applies.
+ // -- If E2 is a static member [...] then E1.E2 is an lvalue.
+ if (isa<VarDecl>(Member) && Member->getDeclContext()->isRecord())
+ return Cl::CL_LValue;
+
+ // -- If E2 is a non-static data member [...]. If E1 is an lvalue, then
+ // E1.E2 is an lvalue; if E1 is an xvalue, then E1.E2 is an xvalue;
+ // otherwise, it is a prvalue.
+ if (isa<FieldDecl>(Member)) {
+ // *E1 is an lvalue
+ if (E->isArrow())
+ return Cl::CL_LValue;
+ Expr *Base = E->getBase()->IgnoreParenImpCasts();
+ if (isa<ObjCPropertyRefExpr>(Base))
+ return Cl::CL_SubObjCPropertySetting;
+ return ClassifyInternal(Ctx, E->getBase());
+ }
+
+ // -- If E2 is a [...] member function, [...]
+ // -- If it refers to a static member function [...], then E1.E2 is an
+ // lvalue; [...]
+ // -- Otherwise [...] E1.E2 is a prvalue.
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Member))
+ return Method->isStatic() ? Cl::CL_LValue : Cl::CL_MemberFunction;
+
+ // -- If E2 is a member enumerator [...], the expression E1.E2 is a prvalue.
+ // So is everything else we haven't handled yet.
+ return Cl::CL_PRValue;
+}
+
+static Cl::Kinds ClassifyBinaryOp(ASTContext &Ctx, const BinaryOperator *E) {
+ assert(Ctx.getLangOpts().CPlusPlus &&
+ "This is only relevant for C++.");
+ // C++ [expr.ass]p1: All [...] return an lvalue referring to the left operand.
+ // Except we override this for writes to ObjC properties.
+ if (E->isAssignmentOp())
+ return (E->getLHS()->getObjectKind() == OK_ObjCProperty
+ ? Cl::CL_PRValue : Cl::CL_LValue);
+
+ // C++ [expr.comma]p1: the result is of the same value category as its right
+ // operand, [...].
+ if (E->getOpcode() == BO_Comma)
+ return ClassifyInternal(Ctx, E->getRHS());
+
+ // C++ [expr.mptr.oper]p6: The result of a .* expression whose second operand
+ // is a pointer to a data member is of the same value category as its first
+ // operand.
+ if (E->getOpcode() == BO_PtrMemD)
+ return (E->getType()->isFunctionType() ||
+ E->hasPlaceholderType(BuiltinType::BoundMember))
+ ? Cl::CL_MemberFunction
+ : ClassifyInternal(Ctx, E->getLHS());
+
+ // C++ [expr.mptr.oper]p6: The result of an ->* expression is an lvalue if its
+ // second operand is a pointer to data member and a prvalue otherwise.
+ if (E->getOpcode() == BO_PtrMemI)
+ return (E->getType()->isFunctionType() ||
+ E->hasPlaceholderType(BuiltinType::BoundMember))
+ ? Cl::CL_MemberFunction
+ : Cl::CL_LValue;
+
+ // All other binary operations are prvalues.
+ return Cl::CL_PRValue;
+}
+
+static Cl::Kinds ClassifyConditional(ASTContext &Ctx, const Expr *True,
+ const Expr *False) {
+ assert(Ctx.getLangOpts().CPlusPlus &&
+ "This is only relevant for C++.");
+
+ // C++ [expr.cond]p2
+ // If either the second or the third operand has type (cv) void,
+ // one of the following shall hold:
+ if (True->getType()->isVoidType() || False->getType()->isVoidType()) {
+ // The second or the third operand (but not both) is a (possibly
+ // parenthesized) throw-expression; the result is of the [...] value
+ // category of the other.
+ bool TrueIsThrow = isa<CXXThrowExpr>(True->IgnoreParenImpCasts());
+ bool FalseIsThrow = isa<CXXThrowExpr>(False->IgnoreParenImpCasts());
+ if (const Expr *NonThrow = TrueIsThrow ? (FalseIsThrow ? nullptr : False)
+ : (FalseIsThrow ? True : nullptr))
+ return ClassifyInternal(Ctx, NonThrow);
+
+ // [Otherwise] the result [...] is a prvalue.
+ return Cl::CL_PRValue;
+ }
+
+ // Note that at this point, we have already performed all conversions
+ // according to [expr.cond]p3.
+ // C++ [expr.cond]p4: If the second and third operands are glvalues of the
+ // same value category [...], the result is of that [...] value category.
+ // C++ [expr.cond]p5: Otherwise, the result is a prvalue.
+ Cl::Kinds LCl = ClassifyInternal(Ctx, True),
+ RCl = ClassifyInternal(Ctx, False);
+ return LCl == RCl ? LCl : Cl::CL_PRValue;
+}
+
+static Cl::ModifiableType IsModifiable(ASTContext &Ctx, const Expr *E,
+ Cl::Kinds Kind, SourceLocation &Loc) {
+ // As a general rule, we only care about lvalues. But there are some rvalues
+ // for which we want to generate special results.
+ if (Kind == Cl::CL_PRValue) {
+ // For the sake of better diagnostics, we want to specifically recognize
+ // use of the GCC cast-as-lvalue extension.
+ if (const ExplicitCastExpr *CE =
+ dyn_cast<ExplicitCastExpr>(E->IgnoreParens())) {
+ if (CE->getSubExpr()->IgnoreParenImpCasts()->isLValue()) {
+ Loc = CE->getExprLoc();
+ return Cl::CM_LValueCast;
+ }
+ }
+ }
+ if (Kind != Cl::CL_LValue)
+ return Cl::CM_RValue;
+
+ // This is the lvalue case.
+ // Functions are lvalues in C++, but not modifiable. (C++ [basic.lval]p6)
+ if (Ctx.getLangOpts().CPlusPlus && E->getType()->isFunctionType())
+ return Cl::CM_Function;
+
+ // Assignment to a property in ObjC is an implicit setter access. But a
+ // setter might not exist.
+ if (const ObjCPropertyRefExpr *Expr = dyn_cast<ObjCPropertyRefExpr>(E)) {
+ if (Expr->isImplicitProperty() &&
+ Expr->getImplicitPropertySetter() == nullptr)
+ return Cl::CM_NoSetterProperty;
+ }
+
+ CanQualType CT = Ctx.getCanonicalType(E->getType());
+ // Const stuff is obviously not modifiable.
+ if (CT.isConstQualified())
+ return Cl::CM_ConstQualified;
+ if (CT.getQualifiers().getAddressSpace() == LangAS::opencl_constant)
+ return Cl::CM_ConstAddrSpace;
+
+ // Arrays are not modifiable, only their elements are.
+ if (CT->isArrayType())
+ return Cl::CM_ArrayType;
+ // Incomplete types are not modifiable.
+ if (CT->isIncompleteType())
+ return Cl::CM_IncompleteType;
+
+ // Records with any const fields (recursively) are not modifiable.
+ if (const RecordType *R = CT->getAs<RecordType>())
+ if (R->hasConstFields())
+ return Cl::CM_ConstQualified;
+
+ return Cl::CM_Modifiable;
+}
+
+Expr::LValueClassification Expr::ClassifyLValue(ASTContext &Ctx) const {
+ Classification VC = Classify(Ctx);
+ switch (VC.getKind()) {
+ case Cl::CL_LValue: return LV_Valid;
+ case Cl::CL_XValue: return LV_InvalidExpression;
+ case Cl::CL_Function: return LV_NotObjectType;
+ case Cl::CL_Void: return LV_InvalidExpression;
+ case Cl::CL_AddressableVoid: return LV_IncompleteVoidType;
+ case Cl::CL_DuplicateVectorComponents: return LV_DuplicateVectorComponents;
+ case Cl::CL_MemberFunction: return LV_MemberFunction;
+ case Cl::CL_SubObjCPropertySetting: return LV_SubObjCPropertySetting;
+ case Cl::CL_ClassTemporary: return LV_ClassTemporary;
+ case Cl::CL_ArrayTemporary: return LV_ArrayTemporary;
+ case Cl::CL_ObjCMessageRValue: return LV_InvalidMessageExpression;
+ case Cl::CL_PRValue: return LV_InvalidExpression;
+ }
+ llvm_unreachable("Unhandled kind");
+}
+
+Expr::isModifiableLvalueResult
+Expr::isModifiableLvalue(ASTContext &Ctx, SourceLocation *Loc) const {
+ SourceLocation dummy;
+ Classification VC = ClassifyModifiable(Ctx, Loc ? *Loc : dummy);
+ switch (VC.getKind()) {
+ case Cl::CL_LValue: break;
+ case Cl::CL_XValue: return MLV_InvalidExpression;
+ case Cl::CL_Function: return MLV_NotObjectType;
+ case Cl::CL_Void: return MLV_InvalidExpression;
+ case Cl::CL_AddressableVoid: return MLV_IncompleteVoidType;
+ case Cl::CL_DuplicateVectorComponents: return MLV_DuplicateVectorComponents;
+ case Cl::CL_MemberFunction: return MLV_MemberFunction;
+ case Cl::CL_SubObjCPropertySetting: return MLV_SubObjCPropertySetting;
+ case Cl::CL_ClassTemporary: return MLV_ClassTemporary;
+ case Cl::CL_ArrayTemporary: return MLV_ArrayTemporary;
+ case Cl::CL_ObjCMessageRValue: return MLV_InvalidMessageExpression;
+ case Cl::CL_PRValue:
+ return VC.getModifiable() == Cl::CM_LValueCast ?
+ MLV_LValueCast : MLV_InvalidExpression;
+ }
+ assert(VC.getKind() == Cl::CL_LValue && "Unhandled kind");
+ switch (VC.getModifiable()) {
+ case Cl::CM_Untested: llvm_unreachable("Did not test modifiability");
+ case Cl::CM_Modifiable: return MLV_Valid;
+ case Cl::CM_RValue: llvm_unreachable("CM_RValue and CL_LValue don't match");
+ case Cl::CM_Function: return MLV_NotObjectType;
+ case Cl::CM_LValueCast:
+ llvm_unreachable("CM_LValueCast and CL_LValue don't match");
+ case Cl::CM_NoSetterProperty: return MLV_NoSetterProperty;
+ case Cl::CM_ConstQualified: return MLV_ConstQualified;
+ case Cl::CM_ConstAddrSpace: return MLV_ConstAddrSpace;
+ case Cl::CM_ArrayType: return MLV_ArrayType;
+ case Cl::CM_IncompleteType: return MLV_IncompleteType;
+ }
+ llvm_unreachable("Unhandled modifiable type");
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp b/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp
new file mode 100644
index 0000000..c4c4398
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp
@@ -0,0 +1,9578 @@
+//===--- ExprConstant.cpp - Expression Constant Evaluator -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Expr constant evaluator.
+//
+// Constant expression evaluation produces four main results:
+//
+// * A success/failure flag indicating whether constant folding was successful.
+// This is the 'bool' return value used by most of the code in this file. A
+// 'false' return value indicates that constant folding has failed, and any
+// appropriate diagnostic has already been produced.
+//
+// * An evaluated result, valid only if constant folding has not failed.
+//
+// * A flag indicating if evaluation encountered (unevaluated) side-effects.
+// These arise in cases such as (sideEffect(), 0) and (sideEffect() || 1),
+// where it is possible to determine the evaluated result regardless.
+//
+// * A set of notes indicating why the evaluation was not a constant expression
+// (under the C++11 / C++1y rules only, at the moment), or, if folding failed
+// too, why the expression could not be folded.
+//
+// If we are checking for a potential constant expression, failure to constant
+// fold a potential constant sub-expression will be indicated by a 'false'
+// return value (the expression could not be folded) and no diagnostic (the
+// expression is not necessarily non-constant).
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTDiagnostic.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstring>
+#include <functional>
+
+using namespace clang;
+using llvm::APSInt;
+using llvm::APFloat;
+
+static bool IsGlobalLValue(APValue::LValueBase B);
+
+namespace {
+ struct LValue;
+ struct CallStackFrame;
+ struct EvalInfo;
+
+ static QualType getType(APValue::LValueBase B) {
+ if (!B) return QualType();
+ if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>())
+ return D->getType();
+
+ const Expr *Base = B.get<const Expr*>();
+
+ // For a materialized temporary, the type of the temporary we materialized
+ // may not be the type of the expression.
+ if (const MaterializeTemporaryExpr *MTE =
+ dyn_cast<MaterializeTemporaryExpr>(Base)) {
+ SmallVector<const Expr *, 2> CommaLHSs;
+ SmallVector<SubobjectAdjustment, 2> Adjustments;
+ const Expr *Temp = MTE->GetTemporaryExpr();
+ const Expr *Inner = Temp->skipRValueSubobjectAdjustments(CommaLHSs,
+ Adjustments);
+ // Keep any cv-qualifiers from the reference if we generated a temporary
+ // for it.
+ if (Inner != Temp)
+ return Inner->getType();
+ }
+
+ return Base->getType();
+ }
+
+ /// Get an LValue path entry, which is known to not be an array index, as a
+ /// field or base class.
+ static
+ APValue::BaseOrMemberType getAsBaseOrMember(APValue::LValuePathEntry E) {
+ APValue::BaseOrMemberType Value;
+ Value.setFromOpaqueValue(E.BaseOrMember);
+ return Value;
+ }
+
+ /// Get an LValue path entry, which is known to not be an array index, as a
+ /// field declaration.
+ static const FieldDecl *getAsField(APValue::LValuePathEntry E) {
+ return dyn_cast<FieldDecl>(getAsBaseOrMember(E).getPointer());
+ }
+ /// Get an LValue path entry, which is known to not be an array index, as a
+ /// base class declaration.
+ static const CXXRecordDecl *getAsBaseClass(APValue::LValuePathEntry E) {
+ return dyn_cast<CXXRecordDecl>(getAsBaseOrMember(E).getPointer());
+ }
+ /// Determine whether this LValue path entry for a base class names a virtual
+ /// base class.
+ static bool isVirtualBaseClass(APValue::LValuePathEntry E) {
+ return getAsBaseOrMember(E).getInt();
+ }
+
+ /// Find the path length and type of the most-derived subobject in the given
+ /// path, and find the size of the containing array, if any.
+ static
+ unsigned findMostDerivedSubobject(ASTContext &Ctx, QualType Base,
+ ArrayRef<APValue::LValuePathEntry> Path,
+ uint64_t &ArraySize, QualType &Type,
+ bool &IsArray) {
+ unsigned MostDerivedLength = 0;
+ Type = Base;
+ for (unsigned I = 0, N = Path.size(); I != N; ++I) {
+ if (Type->isArrayType()) {
+ const ConstantArrayType *CAT =
+ cast<ConstantArrayType>(Ctx.getAsArrayType(Type));
+ Type = CAT->getElementType();
+ ArraySize = CAT->getSize().getZExtValue();
+ MostDerivedLength = I + 1;
+ IsArray = true;
+ } else if (Type->isAnyComplexType()) {
+ const ComplexType *CT = Type->castAs<ComplexType>();
+ Type = CT->getElementType();
+ ArraySize = 2;
+ MostDerivedLength = I + 1;
+ IsArray = true;
+ } else if (const FieldDecl *FD = getAsField(Path[I])) {
+ Type = FD->getType();
+ ArraySize = 0;
+ MostDerivedLength = I + 1;
+ IsArray = false;
+ } else {
+ // Path[I] describes a base class.
+ ArraySize = 0;
+ IsArray = false;
+ }
+ }
+ return MostDerivedLength;
+ }
+
+ // The order of this enum is important for diagnostics.
+ enum CheckSubobjectKind {
+ CSK_Base, CSK_Derived, CSK_Field, CSK_ArrayToPointer, CSK_ArrayIndex,
+ CSK_This, CSK_Real, CSK_Imag
+ };
+
+ /// A path from a glvalue to a subobject of that glvalue.
+ struct SubobjectDesignator {
+ /// True if the subobject was named in a manner not supported by C++11. Such
+ /// lvalues can still be folded, but they are not core constant expressions
+ /// and we cannot perform lvalue-to-rvalue conversions on them.
+ bool Invalid : 1;
+
+ /// Is this a pointer one past the end of an object?
+ bool IsOnePastTheEnd : 1;
+
+ /// Indicator of whether the most-derived object is an array element.
+ bool MostDerivedIsArrayElement : 1;
+
+ /// The length of the path to the most-derived object of which this is a
+ /// subobject.
+ unsigned MostDerivedPathLength : 29;
+
+ /// The size of the array of which the most-derived object is an element.
+ /// This will always be 0 if the most-derived object is not an array
+ /// element. 0 is not an indicator of whether or not the most-derived object
+ /// is an array, however, because 0-length arrays are allowed.
+ uint64_t MostDerivedArraySize;
+
+ /// The type of the most derived object referred to by this address.
+ QualType MostDerivedType;
+
+ typedef APValue::LValuePathEntry PathEntry;
+
+ /// The entries on the path from the glvalue to the designated subobject.
+ SmallVector<PathEntry, 8> Entries;
+
+ SubobjectDesignator() : Invalid(true) {}
+
+ explicit SubobjectDesignator(QualType T)
+ : Invalid(false), IsOnePastTheEnd(false),
+ MostDerivedIsArrayElement(false), MostDerivedPathLength(0),
+ MostDerivedArraySize(0), MostDerivedType(T) {}
+
+ SubobjectDesignator(ASTContext &Ctx, const APValue &V)
+ : Invalid(!V.isLValue() || !V.hasLValuePath()), IsOnePastTheEnd(false),
+ MostDerivedIsArrayElement(false), MostDerivedPathLength(0),
+ MostDerivedArraySize(0) {
+ if (!Invalid) {
+ IsOnePastTheEnd = V.isLValueOnePastTheEnd();
+ ArrayRef<PathEntry> VEntries = V.getLValuePath();
+ Entries.insert(Entries.end(), VEntries.begin(), VEntries.end());
+ if (V.getLValueBase()) {
+ bool IsArray = false;
+ MostDerivedPathLength =
+ findMostDerivedSubobject(Ctx, getType(V.getLValueBase()),
+ V.getLValuePath(), MostDerivedArraySize,
+ MostDerivedType, IsArray);
+ MostDerivedIsArrayElement = IsArray;
+ }
+ }
+ }
+
+ void setInvalid() {
+ Invalid = true;
+ Entries.clear();
+ }
+
+ /// Determine whether this is a one-past-the-end pointer.
+ bool isOnePastTheEnd() const {
+ assert(!Invalid);
+ if (IsOnePastTheEnd)
+ return true;
+ if (MostDerivedIsArrayElement &&
+ Entries[MostDerivedPathLength - 1].ArrayIndex == MostDerivedArraySize)
+ return true;
+ return false;
+ }
+
+ /// Check that this refers to a valid subobject.
+ bool isValidSubobject() const {
+ if (Invalid)
+ return false;
+ return !isOnePastTheEnd();
+ }
+ /// Check that this refers to a valid subobject, and if not, produce a
+ /// relevant diagnostic and set the designator as invalid.
+ bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK);
+
+ /// Update this designator to refer to the first element within this array.
+ void addArrayUnchecked(const ConstantArrayType *CAT) {
+ PathEntry Entry;
+ Entry.ArrayIndex = 0;
+ Entries.push_back(Entry);
+
+ // This is a most-derived object.
+ MostDerivedType = CAT->getElementType();
+ MostDerivedIsArrayElement = true;
+ MostDerivedArraySize = CAT->getSize().getZExtValue();
+ MostDerivedPathLength = Entries.size();
+ }
+ /// Update this designator to refer to the given base or member of this
+ /// object.
+ void addDeclUnchecked(const Decl *D, bool Virtual = false) {
+ PathEntry Entry;
+ APValue::BaseOrMemberType Value(D, Virtual);
+ Entry.BaseOrMember = Value.getOpaqueValue();
+ Entries.push_back(Entry);
+
+ // If this isn't a base class, it's a new most-derived object.
+ if (const FieldDecl *FD = dyn_cast<FieldDecl>(D)) {
+ MostDerivedType = FD->getType();
+ MostDerivedIsArrayElement = false;
+ MostDerivedArraySize = 0;
+ MostDerivedPathLength = Entries.size();
+ }
+ }
+ /// Update this designator to refer to the given complex component.
+ void addComplexUnchecked(QualType EltTy, bool Imag) {
+ PathEntry Entry;
+ Entry.ArrayIndex = Imag;
+ Entries.push_back(Entry);
+
+ // This is technically a most-derived object, though in practice this
+ // is unlikely to matter.
+ MostDerivedType = EltTy;
+ MostDerivedIsArrayElement = true;
+ MostDerivedArraySize = 2;
+ MostDerivedPathLength = Entries.size();
+ }
+ void diagnosePointerArithmetic(EvalInfo &Info, const Expr *E, uint64_t N);
+ /// Add N to the address of this subobject.
+ void adjustIndex(EvalInfo &Info, const Expr *E, uint64_t N) {
+ if (Invalid) return;
+ if (MostDerivedPathLength == Entries.size() &&
+ MostDerivedIsArrayElement) {
+ Entries.back().ArrayIndex += N;
+ if (Entries.back().ArrayIndex > MostDerivedArraySize) {
+ diagnosePointerArithmetic(Info, E, Entries.back().ArrayIndex);
+ setInvalid();
+ }
+ return;
+ }
+ // [expr.add]p4: For the purposes of these operators, a pointer to a
+ // nonarray object behaves the same as a pointer to the first element of
+ // an array of length one with the type of the object as its element type.
+ if (IsOnePastTheEnd && N == (uint64_t)-1)
+ IsOnePastTheEnd = false;
+ else if (!IsOnePastTheEnd && N == 1)
+ IsOnePastTheEnd = true;
+ else if (N != 0) {
+ diagnosePointerArithmetic(Info, E, uint64_t(IsOnePastTheEnd) + N);
+ setInvalid();
+ }
+ }
+ };
+
+ /// A stack frame in the constexpr call stack.
+ struct CallStackFrame {
+ EvalInfo &Info;
+
+ /// Parent - The caller of this stack frame.
+ CallStackFrame *Caller;
+
+ /// CallLoc - The location of the call expression for this call.
+ SourceLocation CallLoc;
+
+ /// Callee - The function which was called.
+ const FunctionDecl *Callee;
+
+ /// Index - The call index of this call.
+ unsigned Index;
+
+ /// This - The binding for the this pointer in this call, if any.
+ const LValue *This;
+
+ /// Arguments - Parameter bindings for this function call, indexed by
+ /// parameters' function scope indices.
+ APValue *Arguments;
+
+ // Note that we intentionally use std::map here so that references to
+ // values are stable.
+ typedef std::map<const void*, APValue> MapTy;
+ typedef MapTy::const_iterator temp_iterator;
+ /// Temporaries - Temporary lvalues materialized within this stack frame.
+ MapTy Temporaries;
+
+ CallStackFrame(EvalInfo &Info, SourceLocation CallLoc,
+ const FunctionDecl *Callee, const LValue *This,
+ APValue *Arguments);
+ ~CallStackFrame();
+
+ APValue *getTemporary(const void *Key) {
+ MapTy::iterator I = Temporaries.find(Key);
+ return I == Temporaries.end() ? nullptr : &I->second;
+ }
+ APValue &createTemporary(const void *Key, bool IsLifetimeExtended);
+ };
+
+ /// Temporarily override 'this'.
+ class ThisOverrideRAII {
+ public:
+ ThisOverrideRAII(CallStackFrame &Frame, const LValue *NewThis, bool Enable)
+ : Frame(Frame), OldThis(Frame.This) {
+ if (Enable)
+ Frame.This = NewThis;
+ }
+ ~ThisOverrideRAII() {
+ Frame.This = OldThis;
+ }
+ private:
+ CallStackFrame &Frame;
+ const LValue *OldThis;
+ };
+
+ /// A partial diagnostic which we might know in advance that we are not going
+ /// to emit.
+ class OptionalDiagnostic {
+ PartialDiagnostic *Diag;
+
+ public:
+ explicit OptionalDiagnostic(PartialDiagnostic *Diag = nullptr)
+ : Diag(Diag) {}
+
+ template<typename T>
+ OptionalDiagnostic &operator<<(const T &v) {
+ if (Diag)
+ *Diag << v;
+ return *this;
+ }
+
+ OptionalDiagnostic &operator<<(const APSInt &I) {
+ if (Diag) {
+ SmallVector<char, 32> Buffer;
+ I.toString(Buffer);
+ *Diag << StringRef(Buffer.data(), Buffer.size());
+ }
+ return *this;
+ }
+
+ OptionalDiagnostic &operator<<(const APFloat &F) {
+ if (Diag) {
+ // FIXME: Force the precision of the source value down so we don't
+ // print digits which are usually useless (we don't really care here if
+ // we truncate a digit by accident in edge cases). Ideally,
+ // APFloat::toString would automatically print the shortest
+ // representation which rounds to the correct value, but it's a bit
+ // tricky to implement.
+ unsigned precision =
+ llvm::APFloat::semanticsPrecision(F.getSemantics());
+ precision = (precision * 59 + 195) / 196;
+ SmallVector<char, 32> Buffer;
+ F.toString(Buffer, precision);
+ *Diag << StringRef(Buffer.data(), Buffer.size());
+ }
+ return *this;
+ }
+ };
+
+ /// A cleanup, and a flag indicating whether it is lifetime-extended.
+ class Cleanup {
+ llvm::PointerIntPair<APValue*, 1, bool> Value;
+
+ public:
+ Cleanup(APValue *Val, bool IsLifetimeExtended)
+ : Value(Val, IsLifetimeExtended) {}
+
+ bool isLifetimeExtended() const { return Value.getInt(); }
+ void endLifetime() {
+ *Value.getPointer() = APValue();
+ }
+ };
+
+ /// EvalInfo - This is a private struct used by the evaluator to capture
+ /// information about a subexpression as it is folded. It retains information
+ /// about the AST context, but also maintains information about the folded
+ /// expression.
+ ///
+ /// If an expression could be evaluated, it is still possible it is not a C
+ /// "integer constant expression" or constant expression. If not, this struct
+ /// captures information about how and why not.
+ ///
+ /// One bit of information passed *into* the request for constant folding
+ /// indicates whether the subexpression is "evaluated" or not according to C
+ /// rules. For example, the RHS of (0 && foo()) is not evaluated. We can
+ /// evaluate the expression regardless of what the RHS is, but C only allows
+ /// certain things in certain situations.
+ struct EvalInfo {
+ ASTContext &Ctx;
+
+ /// EvalStatus - Contains information about the evaluation.
+ Expr::EvalStatus &EvalStatus;
+
+ /// CurrentCall - The top of the constexpr call stack.
+ CallStackFrame *CurrentCall;
+
+ /// CallStackDepth - The number of calls in the call stack right now.
+ unsigned CallStackDepth;
+
+ /// NextCallIndex - The next call index to assign.
+ unsigned NextCallIndex;
+
+ /// StepsLeft - The remaining number of evaluation steps we're permitted
+ /// to perform. This is essentially a limit for the number of statements
+ /// we will evaluate.
+ unsigned StepsLeft;
+
+ /// BottomFrame - The frame in which evaluation started. This must be
+ /// initialized after CurrentCall and CallStackDepth.
+ CallStackFrame BottomFrame;
+
+ /// A stack of values whose lifetimes end at the end of some surrounding
+ /// evaluation frame.
+ llvm::SmallVector<Cleanup, 16> CleanupStack;
+
+ /// EvaluatingDecl - This is the declaration whose initializer is being
+ /// evaluated, if any.
+ APValue::LValueBase EvaluatingDecl;
+
+ /// EvaluatingDeclValue - This is the value being constructed for the
+ /// declaration whose initializer is being evaluated, if any.
+ APValue *EvaluatingDeclValue;
+
+ /// HasActiveDiagnostic - Was the previous diagnostic stored? If so, further
+ /// notes attached to it will also be stored, otherwise they will not be.
+ bool HasActiveDiagnostic;
+
+ /// \brief Have we emitted a diagnostic explaining why we couldn't constant
+ /// fold (not just why it's not strictly a constant expression)?
+ bool HasFoldFailureDiagnostic;
+
+ enum EvaluationMode {
+ /// Evaluate as a constant expression. Stop if we find that the expression
+ /// is not a constant expression.
+ EM_ConstantExpression,
+
+ /// Evaluate as a potential constant expression. Keep going if we hit a
+ /// construct that we can't evaluate yet (because we don't yet know the
+ /// value of something) but stop if we hit something that could never be
+ /// a constant expression.
+ EM_PotentialConstantExpression,
+
+ /// Fold the expression to a constant. Stop if we hit a side-effect that
+ /// we can't model.
+ EM_ConstantFold,
+
+ /// Evaluate the expression looking for integer overflow and similar
+ /// issues. Don't worry about side-effects, and try to visit all
+ /// subexpressions.
+ EM_EvaluateForOverflow,
+
+ /// Evaluate in any way we know how. Don't worry about side-effects that
+ /// can't be modeled.
+ EM_IgnoreSideEffects,
+
+ /// Evaluate as a constant expression. Stop if we find that the expression
+ /// is not a constant expression. Some expressions can be retried in the
+ /// optimizer if we don't constant fold them here, but in an unevaluated
+ /// context we try to fold them immediately since the optimizer never
+ /// gets a chance to look at it.
+ EM_ConstantExpressionUnevaluated,
+
+ /// Evaluate as a potential constant expression. Keep going if we hit a
+ /// construct that we can't evaluate yet (because we don't yet know the
+ /// value of something) but stop if we hit something that could never be
+ /// a constant expression. Some expressions can be retried in the
+ /// optimizer if we don't constant fold them here, but in an unevaluated
+ /// context we try to fold them immediately since the optimizer never
+ /// gets a chance to look at it.
+ EM_PotentialConstantExpressionUnevaluated,
+
+ /// Evaluate as a constant expression. Continue evaluating if we find a
+ /// MemberExpr with a base that can't be evaluated.
+ EM_DesignatorFold,
+ } EvalMode;
+
+ /// Are we checking whether the expression is a potential constant
+ /// expression?
+ bool checkingPotentialConstantExpression() const {
+ return EvalMode == EM_PotentialConstantExpression ||
+ EvalMode == EM_PotentialConstantExpressionUnevaluated;
+ }
+
+ /// Are we checking an expression for overflow?
+ // FIXME: We should check for any kind of undefined or suspicious behavior
+ // in such constructs, not just overflow.
+ bool checkingForOverflow() { return EvalMode == EM_EvaluateForOverflow; }
+
+ EvalInfo(const ASTContext &C, Expr::EvalStatus &S, EvaluationMode Mode)
+ : Ctx(const_cast<ASTContext &>(C)), EvalStatus(S), CurrentCall(nullptr),
+ CallStackDepth(0), NextCallIndex(1),
+ StepsLeft(getLangOpts().ConstexprStepLimit),
+ BottomFrame(*this, SourceLocation(), nullptr, nullptr, nullptr),
+ EvaluatingDecl((const ValueDecl *)nullptr),
+ EvaluatingDeclValue(nullptr), HasActiveDiagnostic(false),
+ HasFoldFailureDiagnostic(false), EvalMode(Mode) {}
+
+ void setEvaluatingDecl(APValue::LValueBase Base, APValue &Value) {
+ EvaluatingDecl = Base;
+ EvaluatingDeclValue = &Value;
+ }
+
+ const LangOptions &getLangOpts() const { return Ctx.getLangOpts(); }
+
+ bool CheckCallLimit(SourceLocation Loc) {
+ // Don't perform any constexpr calls (other than the call we're checking)
+ // when checking a potential constant expression.
+ if (checkingPotentialConstantExpression() && CallStackDepth > 1)
+ return false;
+ if (NextCallIndex == 0) {
+ // NextCallIndex has wrapped around.
+ Diag(Loc, diag::note_constexpr_call_limit_exceeded);
+ return false;
+ }
+ if (CallStackDepth <= getLangOpts().ConstexprCallDepth)
+ return true;
+ Diag(Loc, diag::note_constexpr_depth_limit_exceeded)
+ << getLangOpts().ConstexprCallDepth;
+ return false;
+ }
+
+ CallStackFrame *getCallFrame(unsigned CallIndex) {
+ assert(CallIndex && "no call index in getCallFrame");
+ // We will eventually hit BottomFrame, which has Index 1, so Frame can't
+ // be null in this loop.
+ CallStackFrame *Frame = CurrentCall;
+ while (Frame->Index > CallIndex)
+ Frame = Frame->Caller;
+ return (Frame->Index == CallIndex) ? Frame : nullptr;
+ }
+
+ bool nextStep(const Stmt *S) {
+ if (!StepsLeft) {
+ Diag(S->getLocStart(), diag::note_constexpr_step_limit_exceeded);
+ return false;
+ }
+ --StepsLeft;
+ return true;
+ }
+
+ private:
+ /// Add a diagnostic to the diagnostics list.
+ PartialDiagnostic &addDiag(SourceLocation Loc, diag::kind DiagId) {
+ PartialDiagnostic PD(DiagId, Ctx.getDiagAllocator());
+ EvalStatus.Diag->push_back(std::make_pair(Loc, PD));
+ return EvalStatus.Diag->back().second;
+ }
+
+ /// Add notes containing a call stack to the current point of evaluation.
+ void addCallStack(unsigned Limit);
+
+ public:
+ /// Diagnose that the evaluation cannot be folded.
+ OptionalDiagnostic Diag(SourceLocation Loc, diag::kind DiagId
+ = diag::note_invalid_subexpr_in_const_expr,
+ unsigned ExtraNotes = 0, bool IsCCEDiag = false) {
+ if (EvalStatus.Diag) {
+ // If we have a prior diagnostic, it will be noting that the expression
+ // isn't a constant expression. This diagnostic is more important,
+ // unless we require this evaluation to produce a constant expression.
+ //
+ // FIXME: We might want to show both diagnostics to the user in
+ // EM_ConstantFold mode.
+ if (!EvalStatus.Diag->empty()) {
+ switch (EvalMode) {
+ case EM_ConstantFold:
+ case EM_IgnoreSideEffects:
+ case EM_EvaluateForOverflow:
+ if (!HasFoldFailureDiagnostic)
+ break;
+ // We've already failed to fold something. Keep that diagnostic.
+ case EM_ConstantExpression:
+ case EM_PotentialConstantExpression:
+ case EM_ConstantExpressionUnevaluated:
+ case EM_PotentialConstantExpressionUnevaluated:
+ case EM_DesignatorFold:
+ HasActiveDiagnostic = false;
+ return OptionalDiagnostic();
+ }
+ }
+
+ unsigned CallStackNotes = CallStackDepth - 1;
+ unsigned Limit = Ctx.getDiagnostics().getConstexprBacktraceLimit();
+ if (Limit)
+ CallStackNotes = std::min(CallStackNotes, Limit + 1);
+ if (checkingPotentialConstantExpression())
+ CallStackNotes = 0;
+
+ HasActiveDiagnostic = true;
+ HasFoldFailureDiagnostic = !IsCCEDiag;
+ EvalStatus.Diag->clear();
+ EvalStatus.Diag->reserve(1 + ExtraNotes + CallStackNotes);
+ addDiag(Loc, DiagId);
+ if (!checkingPotentialConstantExpression())
+ addCallStack(Limit);
+ return OptionalDiagnostic(&(*EvalStatus.Diag)[0].second);
+ }
+ HasActiveDiagnostic = false;
+ return OptionalDiagnostic();
+ }
+
+ OptionalDiagnostic Diag(const Expr *E, diag::kind DiagId
+ = diag::note_invalid_subexpr_in_const_expr,
+ unsigned ExtraNotes = 0, bool IsCCEDiag = false) {
+ if (EvalStatus.Diag)
+ return Diag(E->getExprLoc(), DiagId, ExtraNotes, IsCCEDiag);
+ HasActiveDiagnostic = false;
+ return OptionalDiagnostic();
+ }
+
+ /// Diagnose that the evaluation does not produce a C++11 core constant
+ /// expression.
+ ///
+ /// FIXME: Stop evaluating if we're in EM_ConstantExpression or
+ /// EM_PotentialConstantExpression mode and we produce one of these.
+ template<typename LocArg>
+ OptionalDiagnostic CCEDiag(LocArg Loc, diag::kind DiagId
+ = diag::note_invalid_subexpr_in_const_expr,
+ unsigned ExtraNotes = 0) {
+ // Don't override a previous diagnostic. Don't bother collecting
+ // diagnostics if we're evaluating for overflow.
+ if (!EvalStatus.Diag || !EvalStatus.Diag->empty()) {
+ HasActiveDiagnostic = false;
+ return OptionalDiagnostic();
+ }
+ return Diag(Loc, DiagId, ExtraNotes, true);
+ }
+
+ /// Add a note to a prior diagnostic.
+ OptionalDiagnostic Note(SourceLocation Loc, diag::kind DiagId) {
+ if (!HasActiveDiagnostic)
+ return OptionalDiagnostic();
+ return OptionalDiagnostic(&addDiag(Loc, DiagId));
+ }
+
+ /// Add a stack of notes to a prior diagnostic.
+ void addNotes(ArrayRef<PartialDiagnosticAt> Diags) {
+ if (HasActiveDiagnostic) {
+ EvalStatus.Diag->insert(EvalStatus.Diag->end(),
+ Diags.begin(), Diags.end());
+ }
+ }
+
+ /// Should we continue evaluation after encountering a side-effect that we
+ /// couldn't model?
+ bool keepEvaluatingAfterSideEffect() {
+ switch (EvalMode) {
+ case EM_PotentialConstantExpression:
+ case EM_PotentialConstantExpressionUnevaluated:
+ case EM_EvaluateForOverflow:
+ case EM_IgnoreSideEffects:
+ return true;
+
+ case EM_ConstantExpression:
+ case EM_ConstantExpressionUnevaluated:
+ case EM_ConstantFold:
+ case EM_DesignatorFold:
+ return false;
+ }
+ llvm_unreachable("Missed EvalMode case");
+ }
+
+ /// Note that we have had a side-effect, and determine whether we should
+ /// keep evaluating.
+ bool noteSideEffect() {
+ EvalStatus.HasSideEffects = true;
+ return keepEvaluatingAfterSideEffect();
+ }
+
+ /// Should we continue evaluation after encountering undefined behavior?
+ bool keepEvaluatingAfterUndefinedBehavior() {
+ switch (EvalMode) {
+ case EM_EvaluateForOverflow:
+ case EM_IgnoreSideEffects:
+ case EM_ConstantFold:
+ case EM_DesignatorFold:
+ return true;
+
+ case EM_PotentialConstantExpression:
+ case EM_PotentialConstantExpressionUnevaluated:
+ case EM_ConstantExpression:
+ case EM_ConstantExpressionUnevaluated:
+ return false;
+ }
+ llvm_unreachable("Missed EvalMode case");
+ }
+
+ /// Note that we hit something that was technically undefined behavior, but
+ /// that we can evaluate past it (such as signed overflow or floating-point
+ /// division by zero.)
+ bool noteUndefinedBehavior() {
+ EvalStatus.HasUndefinedBehavior = true;
+ return keepEvaluatingAfterUndefinedBehavior();
+ }
+
+ /// Should we continue evaluation as much as possible after encountering a
+ /// construct which can't be reduced to a value?
+ bool keepEvaluatingAfterFailure() {
+ if (!StepsLeft)
+ return false;
+
+ switch (EvalMode) {
+ case EM_PotentialConstantExpression:
+ case EM_PotentialConstantExpressionUnevaluated:
+ case EM_EvaluateForOverflow:
+ return true;
+
+ case EM_ConstantExpression:
+ case EM_ConstantExpressionUnevaluated:
+ case EM_ConstantFold:
+ case EM_IgnoreSideEffects:
+ case EM_DesignatorFold:
+ return false;
+ }
+ llvm_unreachable("Missed EvalMode case");
+ }
+
+ bool allowInvalidBaseExpr() const {
+ return EvalMode == EM_DesignatorFold;
+ }
+ };
+
+ /// Object used to treat all foldable expressions as constant expressions.
+ struct FoldConstant {
+ EvalInfo &Info;
+ bool Enabled;
+ bool HadNoPriorDiags;
+ EvalInfo::EvaluationMode OldMode;
+
+ explicit FoldConstant(EvalInfo &Info, bool Enabled)
+ : Info(Info),
+ Enabled(Enabled),
+ HadNoPriorDiags(Info.EvalStatus.Diag &&
+ Info.EvalStatus.Diag->empty() &&
+ !Info.EvalStatus.HasSideEffects),
+ OldMode(Info.EvalMode) {
+ if (Enabled &&
+ (Info.EvalMode == EvalInfo::EM_ConstantExpression ||
+ Info.EvalMode == EvalInfo::EM_ConstantExpressionUnevaluated))
+ Info.EvalMode = EvalInfo::EM_ConstantFold;
+ }
+ void keepDiagnostics() { Enabled = false; }
+ ~FoldConstant() {
+ if (Enabled && HadNoPriorDiags && !Info.EvalStatus.Diag->empty() &&
+ !Info.EvalStatus.HasSideEffects)
+ Info.EvalStatus.Diag->clear();
+ Info.EvalMode = OldMode;
+ }
+ };
+
+ /// RAII object used to treat the current evaluation as the correct pointer
+ /// offset fold for the current EvalMode
+ struct FoldOffsetRAII {
+ EvalInfo &Info;
+ EvalInfo::EvaluationMode OldMode;
+ explicit FoldOffsetRAII(EvalInfo &Info, bool Subobject)
+ : Info(Info), OldMode(Info.EvalMode) {
+ if (!Info.checkingPotentialConstantExpression())
+ Info.EvalMode = Subobject ? EvalInfo::EM_DesignatorFold
+ : EvalInfo::EM_ConstantFold;
+ }
+
+ ~FoldOffsetRAII() { Info.EvalMode = OldMode; }
+ };
+
+ /// RAII object used to suppress diagnostics and side-effects from a
+ /// speculative evaluation.
+ class SpeculativeEvaluationRAII {
+ EvalInfo &Info;
+ Expr::EvalStatus Old;
+
+ public:
+ SpeculativeEvaluationRAII(EvalInfo &Info,
+ SmallVectorImpl<PartialDiagnosticAt> *NewDiag = nullptr)
+ : Info(Info), Old(Info.EvalStatus) {
+ Info.EvalStatus.Diag = NewDiag;
+ // If we're speculatively evaluating, we may have skipped over some
+ // evaluations and missed out a side effect.
+ Info.EvalStatus.HasSideEffects = true;
+ }
+ ~SpeculativeEvaluationRAII() {
+ Info.EvalStatus = Old;
+ }
+ };
+
+ /// RAII object wrapping a full-expression or block scope, and handling
+ /// the ending of the lifetime of temporaries created within it.
+ template<bool IsFullExpression>
+ class ScopeRAII {
+ EvalInfo &Info;
+ unsigned OldStackSize;
+ public:
+ ScopeRAII(EvalInfo &Info)
+ : Info(Info), OldStackSize(Info.CleanupStack.size()) {}
+ ~ScopeRAII() {
+ // Body moved to a static method to encourage the compiler to inline away
+ // instances of this class.
+ cleanup(Info, OldStackSize);
+ }
+ private:
+ static void cleanup(EvalInfo &Info, unsigned OldStackSize) {
+ unsigned NewEnd = OldStackSize;
+ for (unsigned I = OldStackSize, N = Info.CleanupStack.size();
+ I != N; ++I) {
+ if (IsFullExpression && Info.CleanupStack[I].isLifetimeExtended()) {
+ // Full-expression cleanup of a lifetime-extended temporary: nothing
+ // to do, just move this cleanup to the right place in the stack.
+ std::swap(Info.CleanupStack[I], Info.CleanupStack[NewEnd]);
+ ++NewEnd;
+ } else {
+ // End the lifetime of the object.
+ Info.CleanupStack[I].endLifetime();
+ }
+ }
+ Info.CleanupStack.erase(Info.CleanupStack.begin() + NewEnd,
+ Info.CleanupStack.end());
+ }
+ };
+ typedef ScopeRAII<false> BlockScopeRAII;
+ typedef ScopeRAII<true> FullExpressionRAII;
+}
+
+bool SubobjectDesignator::checkSubobject(EvalInfo &Info, const Expr *E,
+ CheckSubobjectKind CSK) {
+ if (Invalid)
+ return false;
+ if (isOnePastTheEnd()) {
+ Info.CCEDiag(E, diag::note_constexpr_past_end_subobject)
+ << CSK;
+ setInvalid();
+ return false;
+ }
+ return true;
+}
+
+void SubobjectDesignator::diagnosePointerArithmetic(EvalInfo &Info,
+ const Expr *E, uint64_t N) {
+ if (MostDerivedPathLength == Entries.size() && MostDerivedIsArrayElement)
+ Info.CCEDiag(E, diag::note_constexpr_array_index)
+ << static_cast<int>(N) << /*array*/ 0
+ << static_cast<unsigned>(MostDerivedArraySize);
+ else
+ Info.CCEDiag(E, diag::note_constexpr_array_index)
+ << static_cast<int>(N) << /*non-array*/ 1;
+ setInvalid();
+}
+
+CallStackFrame::CallStackFrame(EvalInfo &Info, SourceLocation CallLoc,
+ const FunctionDecl *Callee, const LValue *This,
+ APValue *Arguments)
+ : Info(Info), Caller(Info.CurrentCall), CallLoc(CallLoc), Callee(Callee),
+ Index(Info.NextCallIndex++), This(This), Arguments(Arguments) {
+ Info.CurrentCall = this;
+ ++Info.CallStackDepth;
+}
+
+CallStackFrame::~CallStackFrame() {
+ assert(Info.CurrentCall == this && "calls retired out of order");
+ --Info.CallStackDepth;
+ Info.CurrentCall = Caller;
+}
+
+APValue &CallStackFrame::createTemporary(const void *Key,
+ bool IsLifetimeExtended) {
+ APValue &Result = Temporaries[Key];
+ assert(Result.isUninit() && "temporary created multiple times");
+ Info.CleanupStack.push_back(Cleanup(&Result, IsLifetimeExtended));
+ return Result;
+}
+
+static void describeCall(CallStackFrame *Frame, raw_ostream &Out);
+
+void EvalInfo::addCallStack(unsigned Limit) {
+ // Determine which calls to skip, if any.
+ unsigned ActiveCalls = CallStackDepth - 1;
+ unsigned SkipStart = ActiveCalls, SkipEnd = SkipStart;
+ if (Limit && Limit < ActiveCalls) {
+ SkipStart = Limit / 2 + Limit % 2;
+ SkipEnd = ActiveCalls - Limit / 2;
+ }
+
+ // Walk the call stack and add the diagnostics.
+ unsigned CallIdx = 0;
+ for (CallStackFrame *Frame = CurrentCall; Frame != &BottomFrame;
+ Frame = Frame->Caller, ++CallIdx) {
+ // Skip this call?
+ if (CallIdx >= SkipStart && CallIdx < SkipEnd) {
+ if (CallIdx == SkipStart) {
+ // Note that we're skipping calls.
+ addDiag(Frame->CallLoc, diag::note_constexpr_calls_suppressed)
+ << unsigned(ActiveCalls - Limit);
+ }
+ continue;
+ }
+
+ SmallVector<char, 128> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ describeCall(Frame, Out);
+ addDiag(Frame->CallLoc, diag::note_constexpr_call_here) << Out.str();
+ }
+}
+
+namespace {
+ struct ComplexValue {
+ private:
+ bool IsInt;
+
+ public:
+ APSInt IntReal, IntImag;
+ APFloat FloatReal, FloatImag;
+
+ ComplexValue() : FloatReal(APFloat::Bogus), FloatImag(APFloat::Bogus) {}
+
+ void makeComplexFloat() { IsInt = false; }
+ bool isComplexFloat() const { return !IsInt; }
+ APFloat &getComplexFloatReal() { return FloatReal; }
+ APFloat &getComplexFloatImag() { return FloatImag; }
+
+ void makeComplexInt() { IsInt = true; }
+ bool isComplexInt() const { return IsInt; }
+ APSInt &getComplexIntReal() { return IntReal; }
+ APSInt &getComplexIntImag() { return IntImag; }
+
+ void moveInto(APValue &v) const {
+ if (isComplexFloat())
+ v = APValue(FloatReal, FloatImag);
+ else
+ v = APValue(IntReal, IntImag);
+ }
+ void setFrom(const APValue &v) {
+ assert(v.isComplexFloat() || v.isComplexInt());
+ if (v.isComplexFloat()) {
+ makeComplexFloat();
+ FloatReal = v.getComplexFloatReal();
+ FloatImag = v.getComplexFloatImag();
+ } else {
+ makeComplexInt();
+ IntReal = v.getComplexIntReal();
+ IntImag = v.getComplexIntImag();
+ }
+ }
+ };
+
+ struct LValue {
+ APValue::LValueBase Base;
+ CharUnits Offset;
+ bool InvalidBase : 1;
+ unsigned CallIndex : 31;
+ SubobjectDesignator Designator;
+
+ const APValue::LValueBase getLValueBase() const { return Base; }
+ CharUnits &getLValueOffset() { return Offset; }
+ const CharUnits &getLValueOffset() const { return Offset; }
+ unsigned getLValueCallIndex() const { return CallIndex; }
+ SubobjectDesignator &getLValueDesignator() { return Designator; }
+ const SubobjectDesignator &getLValueDesignator() const { return Designator;}
+
+ void moveInto(APValue &V) const {
+ if (Designator.Invalid)
+ V = APValue(Base, Offset, APValue::NoLValuePath(), CallIndex);
+ else
+ V = APValue(Base, Offset, Designator.Entries,
+ Designator.IsOnePastTheEnd, CallIndex);
+ }
+ void setFrom(ASTContext &Ctx, const APValue &V) {
+ assert(V.isLValue());
+ Base = V.getLValueBase();
+ Offset = V.getLValueOffset();
+ InvalidBase = false;
+ CallIndex = V.getLValueCallIndex();
+ Designator = SubobjectDesignator(Ctx, V);
+ }
+
+ void set(APValue::LValueBase B, unsigned I = 0, bool BInvalid = false) {
+ Base = B;
+ Offset = CharUnits::Zero();
+ InvalidBase = BInvalid;
+ CallIndex = I;
+ Designator = SubobjectDesignator(getType(B));
+ }
+
+ void setInvalid(APValue::LValueBase B, unsigned I = 0) {
+ set(B, I, true);
+ }
+
+ // Check that this LValue is not based on a null pointer. If it is, produce
+ // a diagnostic and mark the designator as invalid.
+ bool checkNullPointer(EvalInfo &Info, const Expr *E,
+ CheckSubobjectKind CSK) {
+ if (Designator.Invalid)
+ return false;
+ if (!Base) {
+ Info.CCEDiag(E, diag::note_constexpr_null_subobject)
+ << CSK;
+ Designator.setInvalid();
+ return false;
+ }
+ return true;
+ }
+
+ // Check this LValue refers to an object. If not, set the designator to be
+ // invalid and emit a diagnostic.
+ bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK) {
+ return (CSK == CSK_ArrayToPointer || checkNullPointer(Info, E, CSK)) &&
+ Designator.checkSubobject(Info, E, CSK);
+ }
+
+ void addDecl(EvalInfo &Info, const Expr *E,
+ const Decl *D, bool Virtual = false) {
+ if (checkSubobject(Info, E, isa<FieldDecl>(D) ? CSK_Field : CSK_Base))
+ Designator.addDeclUnchecked(D, Virtual);
+ }
+ void addArray(EvalInfo &Info, const Expr *E, const ConstantArrayType *CAT) {
+ if (checkSubobject(Info, E, CSK_ArrayToPointer))
+ Designator.addArrayUnchecked(CAT);
+ }
+ void addComplex(EvalInfo &Info, const Expr *E, QualType EltTy, bool Imag) {
+ if (checkSubobject(Info, E, Imag ? CSK_Imag : CSK_Real))
+ Designator.addComplexUnchecked(EltTy, Imag);
+ }
+ void adjustIndex(EvalInfo &Info, const Expr *E, uint64_t N) {
+ if (N && checkNullPointer(Info, E, CSK_ArrayIndex))
+ Designator.adjustIndex(Info, E, N);
+ }
+ };
+
+ struct MemberPtr {
+ MemberPtr() {}
+ explicit MemberPtr(const ValueDecl *Decl) :
+ DeclAndIsDerivedMember(Decl, false), Path() {}
+
+ /// The member or (direct or indirect) field referred to by this member
+ /// pointer, or 0 if this is a null member pointer.
+ const ValueDecl *getDecl() const {
+ return DeclAndIsDerivedMember.getPointer();
+ }
+ /// Is this actually a member of some type derived from the relevant class?
+ bool isDerivedMember() const {
+ return DeclAndIsDerivedMember.getInt();
+ }
+ /// Get the class which the declaration actually lives in.
+ const CXXRecordDecl *getContainingRecord() const {
+ return cast<CXXRecordDecl>(
+ DeclAndIsDerivedMember.getPointer()->getDeclContext());
+ }
+
+ void moveInto(APValue &V) const {
+ V = APValue(getDecl(), isDerivedMember(), Path);
+ }
+ void setFrom(const APValue &V) {
+ assert(V.isMemberPointer());
+ DeclAndIsDerivedMember.setPointer(V.getMemberPointerDecl());
+ DeclAndIsDerivedMember.setInt(V.isMemberPointerToDerivedMember());
+ Path.clear();
+ ArrayRef<const CXXRecordDecl*> P = V.getMemberPointerPath();
+ Path.insert(Path.end(), P.begin(), P.end());
+ }
+
+ /// DeclAndIsDerivedMember - The member declaration, and a flag indicating
+ /// whether the member is a member of some class derived from the class type
+ /// of the member pointer.
+ llvm::PointerIntPair<const ValueDecl*, 1, bool> DeclAndIsDerivedMember;
+ /// Path - The path of base/derived classes from the member declaration's
+ /// class (exclusive) to the class type of the member pointer (inclusive).
+ SmallVector<const CXXRecordDecl*, 4> Path;
+
+ /// Perform a cast towards the class of the Decl (either up or down the
+ /// hierarchy).
+ bool castBack(const CXXRecordDecl *Class) {
+ assert(!Path.empty());
+ const CXXRecordDecl *Expected;
+ if (Path.size() >= 2)
+ Expected = Path[Path.size() - 2];
+ else
+ Expected = getContainingRecord();
+ if (Expected->getCanonicalDecl() != Class->getCanonicalDecl()) {
+ // C++11 [expr.static.cast]p12: In a conversion from (D::*) to (B::*),
+ // if B does not contain the original member and is not a base or
+ // derived class of the class containing the original member, the result
+ // of the cast is undefined.
+ // C++11 [conv.mem]p2 does not cover this case for a cast from (B::*) to
+ // (D::*). We consider that to be a language defect.
+ return false;
+ }
+ Path.pop_back();
+ return true;
+ }
+ /// Perform a base-to-derived member pointer cast.
+ bool castToDerived(const CXXRecordDecl *Derived) {
+ if (!getDecl())
+ return true;
+ if (!isDerivedMember()) {
+ Path.push_back(Derived);
+ return true;
+ }
+ if (!castBack(Derived))
+ return false;
+ if (Path.empty())
+ DeclAndIsDerivedMember.setInt(false);
+ return true;
+ }
+ /// Perform a derived-to-base member pointer cast.
+ bool castToBase(const CXXRecordDecl *Base) {
+ if (!getDecl())
+ return true;
+ if (Path.empty())
+ DeclAndIsDerivedMember.setInt(true);
+ if (isDerivedMember()) {
+ Path.push_back(Base);
+ return true;
+ }
+ return castBack(Base);
+ }
+ };
+
+ /// Compare two member pointers, which are assumed to be of the same type.
+ static bool operator==(const MemberPtr &LHS, const MemberPtr &RHS) {
+ if (!LHS.getDecl() || !RHS.getDecl())
+ return !LHS.getDecl() && !RHS.getDecl();
+ if (LHS.getDecl()->getCanonicalDecl() != RHS.getDecl()->getCanonicalDecl())
+ return false;
+ return LHS.Path == RHS.Path;
+ }
+}
+
+static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E);
+static bool EvaluateInPlace(APValue &Result, EvalInfo &Info,
+ const LValue &This, const Expr *E,
+ bool AllowNonLiteralTypes = false);
+static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info);
+static bool EvaluatePointer(const Expr *E, LValue &Result, EvalInfo &Info);
+static bool EvaluateMemberPointer(const Expr *E, MemberPtr &Result,
+ EvalInfo &Info);
+static bool EvaluateTemporary(const Expr *E, LValue &Result, EvalInfo &Info);
+static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info);
+static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result,
+ EvalInfo &Info);
+static bool EvaluateFloat(const Expr *E, APFloat &Result, EvalInfo &Info);
+static bool EvaluateComplex(const Expr *E, ComplexValue &Res, EvalInfo &Info);
+static bool EvaluateAtomic(const Expr *E, APValue &Result, EvalInfo &Info);
+static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result);
+
+//===----------------------------------------------------------------------===//
+// Misc utilities
+//===----------------------------------------------------------------------===//
+
+/// Produce a string describing the given constexpr call.
+static void describeCall(CallStackFrame *Frame, raw_ostream &Out) {
+ unsigned ArgIndex = 0;
+ bool IsMemberCall = isa<CXXMethodDecl>(Frame->Callee) &&
+ !isa<CXXConstructorDecl>(Frame->Callee) &&
+ cast<CXXMethodDecl>(Frame->Callee)->isInstance();
+
+ if (!IsMemberCall)
+ Out << *Frame->Callee << '(';
+
+ if (Frame->This && IsMemberCall) {
+ APValue Val;
+ Frame->This->moveInto(Val);
+ Val.printPretty(Out, Frame->Info.Ctx,
+ Frame->This->Designator.MostDerivedType);
+ // FIXME: Add parens around Val if needed.
+ Out << "->" << *Frame->Callee << '(';
+ IsMemberCall = false;
+ }
+
+ for (FunctionDecl::param_const_iterator I = Frame->Callee->param_begin(),
+ E = Frame->Callee->param_end(); I != E; ++I, ++ArgIndex) {
+ if (ArgIndex > (unsigned)IsMemberCall)
+ Out << ", ";
+
+ const ParmVarDecl *Param = *I;
+ const APValue &Arg = Frame->Arguments[ArgIndex];
+ Arg.printPretty(Out, Frame->Info.Ctx, Param->getType());
+
+ if (ArgIndex == 0 && IsMemberCall)
+ Out << "->" << *Frame->Callee << '(';
+ }
+
+ Out << ')';
+}
+
+/// Evaluate an expression to see if it had side-effects, and discard its
+/// result.
+/// \return \c true if the caller should keep evaluating.
+static bool EvaluateIgnoredValue(EvalInfo &Info, const Expr *E) {
+ APValue Scratch;
+ if (!Evaluate(Scratch, Info, E))
+ // We don't need the value, but we might have skipped a side effect here.
+ return Info.noteSideEffect();
+ return true;
+}
+
+/// Sign- or zero-extend a value to 64 bits. If it's already 64 bits, just
+/// return its existing value.
+static int64_t getExtValue(const APSInt &Value) {
+ return Value.isSigned() ? Value.getSExtValue()
+ : static_cast<int64_t>(Value.getZExtValue());
+}
+
+/// Should this call expression be treated as a string literal?
+static bool IsStringLiteralCall(const CallExpr *E) {
+ unsigned Builtin = E->getBuiltinCallee();
+ return (Builtin == Builtin::BI__builtin___CFStringMakeConstantString ||
+ Builtin == Builtin::BI__builtin___NSStringMakeConstantString);
+}
+
+static bool IsGlobalLValue(APValue::LValueBase B) {
+ // C++11 [expr.const]p3 An address constant expression is a prvalue core
+ // constant expression of pointer type that evaluates to...
+
+ // ... a null pointer value, or a prvalue core constant expression of type
+ // std::nullptr_t.
+ if (!B) return true;
+
+ if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) {
+ // ... the address of an object with static storage duration,
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ return VD->hasGlobalStorage();
+ // ... the address of a function,
+ return isa<FunctionDecl>(D);
+ }
+
+ const Expr *E = B.get<const Expr*>();
+ switch (E->getStmtClass()) {
+ default:
+ return false;
+ case Expr::CompoundLiteralExprClass: {
+ const CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
+ return CLE->isFileScope() && CLE->isLValue();
+ }
+ case Expr::MaterializeTemporaryExprClass:
+ // A materialized temporary might have been lifetime-extended to static
+ // storage duration.
+ return cast<MaterializeTemporaryExpr>(E)->getStorageDuration() == SD_Static;
+ // A string literal has static storage duration.
+ case Expr::StringLiteralClass:
+ case Expr::PredefinedExprClass:
+ case Expr::ObjCStringLiteralClass:
+ case Expr::ObjCEncodeExprClass:
+ case Expr::CXXTypeidExprClass:
+ case Expr::CXXUuidofExprClass:
+ return true;
+ case Expr::CallExprClass:
+ return IsStringLiteralCall(cast<CallExpr>(E));
+ // For GCC compatibility, &&label has static storage duration.
+ case Expr::AddrLabelExprClass:
+ return true;
+ // A Block literal expression may be used as the initialization value for
+ // Block variables at global or local static scope.
+ case Expr::BlockExprClass:
+ return !cast<BlockExpr>(E)->getBlockDecl()->hasCaptures();
+ case Expr::ImplicitValueInitExprClass:
+ // FIXME:
+ // We can never form an lvalue with an implicit value initialization as its
+ // base through expression evaluation, so these only appear in one case: the
+ // implicit variable declaration we invent when checking whether a constexpr
+ // constructor can produce a constant expression. We must assume that such
+ // an expression might be a global lvalue.
+ return true;
+ }
+}
+
+static void NoteLValueLocation(EvalInfo &Info, APValue::LValueBase Base) {
+ assert(Base && "no location for a null lvalue");
+ const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>();
+ if (VD)
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ else
+ Info.Note(Base.get<const Expr*>()->getExprLoc(),
+ diag::note_constexpr_temporary_here);
+}
+
+/// Check that this reference or pointer core constant expression is a valid
+/// value for an address or reference constant expression. Return true if we
+/// can fold this expression, whether or not it's a constant expression.
+static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
+ QualType Type, const LValue &LVal) {
+ bool IsReferenceType = Type->isReferenceType();
+
+ APValue::LValueBase Base = LVal.getLValueBase();
+ const SubobjectDesignator &Designator = LVal.getLValueDesignator();
+
+ // Check that the object is a global. Note that the fake 'this' object we
+ // manufacture when checking potential constant expressions is conservatively
+ // assumed to be global here.
+ if (!IsGlobalLValue(Base)) {
+ if (Info.getLangOpts().CPlusPlus11) {
+ const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>();
+ Info.Diag(Loc, diag::note_constexpr_non_global, 1)
+ << IsReferenceType << !Designator.Entries.empty()
+ << !!VD << VD;
+ NoteLValueLocation(Info, Base);
+ } else {
+ Info.Diag(Loc);
+ }
+ // Don't allow references to temporaries to escape.
+ return false;
+ }
+ assert((Info.checkingPotentialConstantExpression() ||
+ LVal.getLValueCallIndex() == 0) &&
+ "have call index for global lvalue");
+
+ if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>()) {
+ if (const VarDecl *Var = dyn_cast<const VarDecl>(VD)) {
+ // Check if this is a thread-local variable.
+ if (Var->getTLSKind())
+ return false;
+
+ // A dllimport variable never acts like a constant.
+ if (Var->hasAttr<DLLImportAttr>())
+ return false;
+ }
+ if (const auto *FD = dyn_cast<const FunctionDecl>(VD)) {
+ // __declspec(dllimport) must be handled very carefully:
+ // We must never initialize an expression with the thunk in C++.
+ // Doing otherwise would allow the same id-expression to yield
+ // different addresses for the same function in different translation
+ // units. However, this means that we must dynamically initialize the
+ // expression with the contents of the import address table at runtime.
+ //
+ // The C language has no notion of ODR; furthermore, it has no notion of
+ // dynamic initialization. This means that we are permitted to
+ // perform initialization with the address of the thunk.
+ if (Info.getLangOpts().CPlusPlus && FD->hasAttr<DLLImportAttr>())
+ return false;
+ }
+ }
+
+ // Allow address constant expressions to be past-the-end pointers. This is
+ // an extension: the standard requires them to point to an object.
+ if (!IsReferenceType)
+ return true;
+
+ // A reference constant expression must refer to an object.
+ if (!Base) {
+ // FIXME: diagnostic
+ Info.CCEDiag(Loc);
+ return true;
+ }
+
+ // Does this refer one past the end of some object?
+ if (!Designator.Invalid && Designator.isOnePastTheEnd()) {
+ const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>();
+ Info.Diag(Loc, diag::note_constexpr_past_end, 1)
+ << !Designator.Entries.empty() << !!VD << VD;
+ NoteLValueLocation(Info, Base);
+ }
+
+ return true;
+}
+
+/// Check that this core constant expression is of literal type, and if not,
+/// produce an appropriate diagnostic.
+static bool CheckLiteralType(EvalInfo &Info, const Expr *E,
+ const LValue *This = nullptr) {
+ if (!E->isRValue() || E->getType()->isLiteralType(Info.Ctx))
+ return true;
+
+ // C++1y: A constant initializer for an object o [...] may also invoke
+ // constexpr constructors for o and its subobjects even if those objects
+ // are of non-literal class types.
+ if (Info.getLangOpts().CPlusPlus14 && This &&
+ Info.EvaluatingDecl == This->getLValueBase())
+ return true;
+
+ // Prvalue constant expressions must be of literal types.
+ if (Info.getLangOpts().CPlusPlus11)
+ Info.Diag(E, diag::note_constexpr_nonliteral)
+ << E->getType();
+ else
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+}
+
+/// Check that this core constant expression value is a valid value for a
+/// constant expression. If not, report an appropriate diagnostic. Does not
+/// check that the expression is of literal type.
+static bool CheckConstantExpression(EvalInfo &Info, SourceLocation DiagLoc,
+ QualType Type, const APValue &Value) {
+ if (Value.isUninit()) {
+ Info.Diag(DiagLoc, diag::note_constexpr_uninitialized)
+ << true << Type;
+ return false;
+ }
+
+ // We allow _Atomic(T) to be initialized from anything that T can be
+ // initialized from.
+ if (const AtomicType *AT = Type->getAs<AtomicType>())
+ Type = AT->getValueType();
+
+ // Core issue 1454: For a literal constant expression of array or class type,
+ // each subobject of its value shall have been initialized by a constant
+ // expression.
+ if (Value.isArray()) {
+ QualType EltTy = Type->castAsArrayTypeUnsafe()->getElementType();
+ for (unsigned I = 0, N = Value.getArrayInitializedElts(); I != N; ++I) {
+ if (!CheckConstantExpression(Info, DiagLoc, EltTy,
+ Value.getArrayInitializedElt(I)))
+ return false;
+ }
+ if (!Value.hasArrayFiller())
+ return true;
+ return CheckConstantExpression(Info, DiagLoc, EltTy,
+ Value.getArrayFiller());
+ }
+ if (Value.isUnion() && Value.getUnionField()) {
+ return CheckConstantExpression(Info, DiagLoc,
+ Value.getUnionField()->getType(),
+ Value.getUnionValue());
+ }
+ if (Value.isStruct()) {
+ RecordDecl *RD = Type->castAs<RecordType>()->getDecl();
+ if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) {
+ unsigned BaseIndex = 0;
+ for (CXXRecordDecl::base_class_const_iterator I = CD->bases_begin(),
+ End = CD->bases_end(); I != End; ++I, ++BaseIndex) {
+ if (!CheckConstantExpression(Info, DiagLoc, I->getType(),
+ Value.getStructBase(BaseIndex)))
+ return false;
+ }
+ }
+ for (const auto *I : RD->fields()) {
+ if (!CheckConstantExpression(Info, DiagLoc, I->getType(),
+ Value.getStructField(I->getFieldIndex())))
+ return false;
+ }
+ }
+
+ if (Value.isLValue()) {
+ LValue LVal;
+ LVal.setFrom(Info.Ctx, Value);
+ return CheckLValueConstantExpression(Info, DiagLoc, Type, LVal);
+ }
+
+ // Everything else is fine.
+ return true;
+}
+
+static const ValueDecl *GetLValueBaseDecl(const LValue &LVal) {
+ return LVal.Base.dyn_cast<const ValueDecl*>();
+}
+
+static bool IsLiteralLValue(const LValue &Value) {
+ if (Value.CallIndex)
+ return false;
+ const Expr *E = Value.Base.dyn_cast<const Expr*>();
+ return E && !isa<MaterializeTemporaryExpr>(E);
+}
+
+static bool IsWeakLValue(const LValue &Value) {
+ const ValueDecl *Decl = GetLValueBaseDecl(Value);
+ return Decl && Decl->isWeak();
+}
+
+static bool isZeroSized(const LValue &Value) {
+ const ValueDecl *Decl = GetLValueBaseDecl(Value);
+ if (Decl && isa<VarDecl>(Decl)) {
+ QualType Ty = Decl->getType();
+ if (Ty->isArrayType())
+ return Ty->isIncompleteType() ||
+ Decl->getASTContext().getTypeSize(Ty) == 0;
+ }
+ return false;
+}
+
+static bool EvalPointerValueAsBool(const APValue &Value, bool &Result) {
+ // A null base expression indicates a null pointer. These are always
+ // evaluatable, and they are false unless the offset is zero.
+ if (!Value.getLValueBase()) {
+ Result = !Value.getLValueOffset().isZero();
+ return true;
+ }
+
+ // We have a non-null base. These are generally known to be true, but if it's
+ // a weak declaration it can be null at runtime.
+ Result = true;
+ const ValueDecl *Decl = Value.getLValueBase().dyn_cast<const ValueDecl*>();
+ return !Decl || !Decl->isWeak();
+}
+
+static bool HandleConversionToBool(const APValue &Val, bool &Result) {
+ switch (Val.getKind()) {
+ case APValue::Uninitialized:
+ return false;
+ case APValue::Int:
+ Result = Val.getInt().getBoolValue();
+ return true;
+ case APValue::Float:
+ Result = !Val.getFloat().isZero();
+ return true;
+ case APValue::ComplexInt:
+ Result = Val.getComplexIntReal().getBoolValue() ||
+ Val.getComplexIntImag().getBoolValue();
+ return true;
+ case APValue::ComplexFloat:
+ Result = !Val.getComplexFloatReal().isZero() ||
+ !Val.getComplexFloatImag().isZero();
+ return true;
+ case APValue::LValue:
+ return EvalPointerValueAsBool(Val, Result);
+ case APValue::MemberPointer:
+ Result = Val.getMemberPointerDecl();
+ return true;
+ case APValue::Vector:
+ case APValue::Array:
+ case APValue::Struct:
+ case APValue::Union:
+ case APValue::AddrLabelDiff:
+ return false;
+ }
+
+ llvm_unreachable("unknown APValue kind");
+}
+
+static bool EvaluateAsBooleanCondition(const Expr *E, bool &Result,
+ EvalInfo &Info) {
+ assert(E->isRValue() && "missing lvalue-to-rvalue conv in bool condition");
+ APValue Val;
+ if (!Evaluate(Val, Info, E))
+ return false;
+ return HandleConversionToBool(Val, Result);
+}
+
+template<typename T>
+static bool HandleOverflow(EvalInfo &Info, const Expr *E,
+ const T &SrcValue, QualType DestType) {
+ Info.CCEDiag(E, diag::note_constexpr_overflow)
+ << SrcValue << DestType;
+ return Info.noteUndefinedBehavior();
+}
+
+static bool HandleFloatToIntCast(EvalInfo &Info, const Expr *E,
+ QualType SrcType, const APFloat &Value,
+ QualType DestType, APSInt &Result) {
+ unsigned DestWidth = Info.Ctx.getIntWidth(DestType);
+ // Determine whether we are converting to unsigned or signed.
+ bool DestSigned = DestType->isSignedIntegerOrEnumerationType();
+
+ Result = APSInt(DestWidth, !DestSigned);
+ bool ignored;
+ if (Value.convertToInteger(Result, llvm::APFloat::rmTowardZero, &ignored)
+ & APFloat::opInvalidOp)
+ return HandleOverflow(Info, E, Value, DestType);
+ return true;
+}
+
+static bool HandleFloatToFloatCast(EvalInfo &Info, const Expr *E,
+ QualType SrcType, QualType DestType,
+ APFloat &Result) {
+ APFloat Value = Result;
+ bool ignored;
+ if (Result.convert(Info.Ctx.getFloatTypeSemantics(DestType),
+ APFloat::rmNearestTiesToEven, &ignored)
+ & APFloat::opOverflow)
+ return HandleOverflow(Info, E, Value, DestType);
+ return true;
+}
+
+static APSInt HandleIntToIntCast(EvalInfo &Info, const Expr *E,
+ QualType DestType, QualType SrcType,
+ const APSInt &Value) {
+ unsigned DestWidth = Info.Ctx.getIntWidth(DestType);
+ APSInt Result = Value;
+ // Figure out if this is a truncate, extend or noop cast.
+ // If the input is signed, do a sign extend, noop, or truncate.
+ Result = Result.extOrTrunc(DestWidth);
+ Result.setIsUnsigned(DestType->isUnsignedIntegerOrEnumerationType());
+ return Result;
+}
+
+static bool HandleIntToFloatCast(EvalInfo &Info, const Expr *E,
+ QualType SrcType, const APSInt &Value,
+ QualType DestType, APFloat &Result) {
+ Result = APFloat(Info.Ctx.getFloatTypeSemantics(DestType), 1);
+ if (Result.convertFromAPInt(Value, Value.isSigned(),
+ APFloat::rmNearestTiesToEven)
+ & APFloat::opOverflow)
+ return HandleOverflow(Info, E, Value, DestType);
+ return true;
+}
+
+static bool truncateBitfieldValue(EvalInfo &Info, const Expr *E,
+ APValue &Value, const FieldDecl *FD) {
+ assert(FD->isBitField() && "truncateBitfieldValue on non-bitfield");
+
+ if (!Value.isInt()) {
+ // Trying to store a pointer-cast-to-integer into a bitfield.
+ // FIXME: In this case, we should provide the diagnostic for casting
+ // a pointer to an integer.
+ assert(Value.isLValue() && "integral value neither int nor lvalue?");
+ Info.Diag(E);
+ return false;
+ }
+
+ APSInt &Int = Value.getInt();
+ unsigned OldBitWidth = Int.getBitWidth();
+ unsigned NewBitWidth = FD->getBitWidthValue(Info.Ctx);
+ if (NewBitWidth < OldBitWidth)
+ Int = Int.trunc(NewBitWidth).extend(OldBitWidth);
+ return true;
+}
+
+static bool EvalAndBitcastToAPInt(EvalInfo &Info, const Expr *E,
+ llvm::APInt &Res) {
+ APValue SVal;
+ if (!Evaluate(SVal, Info, E))
+ return false;
+ if (SVal.isInt()) {
+ Res = SVal.getInt();
+ return true;
+ }
+ if (SVal.isFloat()) {
+ Res = SVal.getFloat().bitcastToAPInt();
+ return true;
+ }
+ if (SVal.isVector()) {
+ QualType VecTy = E->getType();
+ unsigned VecSize = Info.Ctx.getTypeSize(VecTy);
+ QualType EltTy = VecTy->castAs<VectorType>()->getElementType();
+ unsigned EltSize = Info.Ctx.getTypeSize(EltTy);
+ bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian();
+ Res = llvm::APInt::getNullValue(VecSize);
+ for (unsigned i = 0; i < SVal.getVectorLength(); i++) {
+ APValue &Elt = SVal.getVectorElt(i);
+ llvm::APInt EltAsInt;
+ if (Elt.isInt()) {
+ EltAsInt = Elt.getInt();
+ } else if (Elt.isFloat()) {
+ EltAsInt = Elt.getFloat().bitcastToAPInt();
+ } else {
+ // Don't try to handle vectors of anything other than int or float
+ // (not sure if it's possible to hit this case).
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+ unsigned BaseEltSize = EltAsInt.getBitWidth();
+ if (BigEndian)
+ Res |= EltAsInt.zextOrTrunc(VecSize).rotr(i*EltSize+BaseEltSize);
+ else
+ Res |= EltAsInt.zextOrTrunc(VecSize).rotl(i*EltSize);
+ }
+ return true;
+ }
+ // Give up if the input isn't an int, float, or vector. For example, we
+ // reject "(v4i16)(intptr_t)&a".
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+}
+
+/// Perform the given integer operation, which is known to need at most BitWidth
+/// bits, and check for overflow in the original type (if that type was not an
+/// unsigned type).
+template<typename Operation>
+static bool CheckedIntArithmetic(EvalInfo &Info, const Expr *E,
+ const APSInt &LHS, const APSInt &RHS,
+ unsigned BitWidth, Operation Op,
+ APSInt &Result) {
+ if (LHS.isUnsigned()) {
+ Result = Op(LHS, RHS);
+ return true;
+ }
+
+ APSInt Value(Op(LHS.extend(BitWidth), RHS.extend(BitWidth)), false);
+ Result = Value.trunc(LHS.getBitWidth());
+ if (Result.extend(BitWidth) != Value) {
+ if (Info.checkingForOverflow())
+ Info.Ctx.getDiagnostics().Report(E->getExprLoc(),
+ diag::warn_integer_constant_overflow)
+ << Result.toString(10) << E->getType();
+ else
+ return HandleOverflow(Info, E, Value, E->getType());
+ }
+ return true;
+}
+
+/// Perform the given binary integer operation.
+static bool handleIntIntBinOp(EvalInfo &Info, const Expr *E, const APSInt &LHS,
+ BinaryOperatorKind Opcode, APSInt RHS,
+ APSInt &Result) {
+ switch (Opcode) {
+ default:
+ Info.Diag(E);
+ return false;
+ case BO_Mul:
+ return CheckedIntArithmetic(Info, E, LHS, RHS, LHS.getBitWidth() * 2,
+ std::multiplies<APSInt>(), Result);
+ case BO_Add:
+ return CheckedIntArithmetic(Info, E, LHS, RHS, LHS.getBitWidth() + 1,
+ std::plus<APSInt>(), Result);
+ case BO_Sub:
+ return CheckedIntArithmetic(Info, E, LHS, RHS, LHS.getBitWidth() + 1,
+ std::minus<APSInt>(), Result);
+ case BO_And: Result = LHS & RHS; return true;
+ case BO_Xor: Result = LHS ^ RHS; return true;
+ case BO_Or: Result = LHS | RHS; return true;
+ case BO_Div:
+ case BO_Rem:
+ if (RHS == 0) {
+ Info.Diag(E, diag::note_expr_divide_by_zero);
+ return false;
+ }
+ Result = (Opcode == BO_Rem ? LHS % RHS : LHS / RHS);
+ // Check for overflow case: INT_MIN / -1 or INT_MIN % -1. APSInt supports
+ // this operation and gives the two's complement result.
+ if (RHS.isNegative() && RHS.isAllOnesValue() &&
+ LHS.isSigned() && LHS.isMinSignedValue())
+ return HandleOverflow(Info, E, -LHS.extend(LHS.getBitWidth() + 1),
+ E->getType());
+ return true;
+ case BO_Shl: {
+ if (Info.getLangOpts().OpenCL)
+ // OpenCL 6.3j: shift values are effectively % word size of LHS.
+ RHS &= APSInt(llvm::APInt(RHS.getBitWidth(),
+ static_cast<uint64_t>(LHS.getBitWidth() - 1)),
+ RHS.isUnsigned());
+ else if (RHS.isSigned() && RHS.isNegative()) {
+ // During constant-folding, a negative shift is an opposite shift. Such
+ // a shift is not a constant expression.
+ Info.CCEDiag(E, diag::note_constexpr_negative_shift) << RHS;
+ RHS = -RHS;
+ goto shift_right;
+ }
+ shift_left:
+ // C++11 [expr.shift]p1: Shift width must be less than the bit width of
+ // the shifted type.
+ unsigned SA = (unsigned) RHS.getLimitedValue(LHS.getBitWidth()-1);
+ if (SA != RHS) {
+ Info.CCEDiag(E, diag::note_constexpr_large_shift)
+ << RHS << E->getType() << LHS.getBitWidth();
+ } else if (LHS.isSigned()) {
+ // C++11 [expr.shift]p2: A signed left shift must have a non-negative
+ // operand, and must not overflow the corresponding unsigned type.
+ if (LHS.isNegative())
+ Info.CCEDiag(E, diag::note_constexpr_lshift_of_negative) << LHS;
+ else if (LHS.countLeadingZeros() < SA)
+ Info.CCEDiag(E, diag::note_constexpr_lshift_discards);
+ }
+ Result = LHS << SA;
+ return true;
+ }
+ case BO_Shr: {
+ if (Info.getLangOpts().OpenCL)
+ // OpenCL 6.3j: shift values are effectively % word size of LHS.
+ RHS &= APSInt(llvm::APInt(RHS.getBitWidth(),
+ static_cast<uint64_t>(LHS.getBitWidth() - 1)),
+ RHS.isUnsigned());
+ else if (RHS.isSigned() && RHS.isNegative()) {
+ // During constant-folding, a negative shift is an opposite shift. Such a
+ // shift is not a constant expression.
+ Info.CCEDiag(E, diag::note_constexpr_negative_shift) << RHS;
+ RHS = -RHS;
+ goto shift_left;
+ }
+ shift_right:
+ // C++11 [expr.shift]p1: Shift width must be less than the bit width of the
+ // shifted type.
+ unsigned SA = (unsigned) RHS.getLimitedValue(LHS.getBitWidth()-1);
+ if (SA != RHS)
+ Info.CCEDiag(E, diag::note_constexpr_large_shift)
+ << RHS << E->getType() << LHS.getBitWidth();
+ Result = LHS >> SA;
+ return true;
+ }
+
+ case BO_LT: Result = LHS < RHS; return true;
+ case BO_GT: Result = LHS > RHS; return true;
+ case BO_LE: Result = LHS <= RHS; return true;
+ case BO_GE: Result = LHS >= RHS; return true;
+ case BO_EQ: Result = LHS == RHS; return true;
+ case BO_NE: Result = LHS != RHS; return true;
+ }
+}
+
+/// Perform the given binary floating-point operation, in-place, on LHS.
+static bool handleFloatFloatBinOp(EvalInfo &Info, const Expr *E,
+ APFloat &LHS, BinaryOperatorKind Opcode,
+ const APFloat &RHS) {
+ switch (Opcode) {
+ default:
+ Info.Diag(E);
+ return false;
+ case BO_Mul:
+ LHS.multiply(RHS, APFloat::rmNearestTiesToEven);
+ break;
+ case BO_Add:
+ LHS.add(RHS, APFloat::rmNearestTiesToEven);
+ break;
+ case BO_Sub:
+ LHS.subtract(RHS, APFloat::rmNearestTiesToEven);
+ break;
+ case BO_Div:
+ LHS.divide(RHS, APFloat::rmNearestTiesToEven);
+ break;
+ }
+
+ if (LHS.isInfinity() || LHS.isNaN()) {
+ Info.CCEDiag(E, diag::note_constexpr_float_arithmetic) << LHS.isNaN();
+ return Info.noteUndefinedBehavior();
+ }
+ return true;
+}
+
+/// Cast an lvalue referring to a base subobject to a derived class, by
+/// truncating the lvalue's path to the given length.
+static bool CastToDerivedClass(EvalInfo &Info, const Expr *E, LValue &Result,
+ const RecordDecl *TruncatedType,
+ unsigned TruncatedElements) {
+ SubobjectDesignator &D = Result.Designator;
+
+ // Check we actually point to a derived class object.
+ if (TruncatedElements == D.Entries.size())
+ return true;
+ assert(TruncatedElements >= D.MostDerivedPathLength &&
+ "not casting to a derived class");
+ if (!Result.checkSubobject(Info, E, CSK_Derived))
+ return false;
+
+ // Truncate the path to the subobject, and remove any derived-to-base offsets.
+ const RecordDecl *RD = TruncatedType;
+ for (unsigned I = TruncatedElements, N = D.Entries.size(); I != N; ++I) {
+ if (RD->isInvalidDecl()) return false;
+ const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
+ const CXXRecordDecl *Base = getAsBaseClass(D.Entries[I]);
+ if (isVirtualBaseClass(D.Entries[I]))
+ Result.Offset -= Layout.getVBaseClassOffset(Base);
+ else
+ Result.Offset -= Layout.getBaseClassOffset(Base);
+ RD = Base;
+ }
+ D.Entries.resize(TruncatedElements);
+ return true;
+}
+
+static bool HandleLValueDirectBase(EvalInfo &Info, const Expr *E, LValue &Obj,
+ const CXXRecordDecl *Derived,
+ const CXXRecordDecl *Base,
+ const ASTRecordLayout *RL = nullptr) {
+ if (!RL) {
+ if (Derived->isInvalidDecl()) return false;
+ RL = &Info.Ctx.getASTRecordLayout(Derived);
+ }
+
+ Obj.getLValueOffset() += RL->getBaseClassOffset(Base);
+ Obj.addDecl(Info, E, Base, /*Virtual*/ false);
+ return true;
+}
+
+static bool HandleLValueBase(EvalInfo &Info, const Expr *E, LValue &Obj,
+ const CXXRecordDecl *DerivedDecl,
+ const CXXBaseSpecifier *Base) {
+ const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl();
+
+ if (!Base->isVirtual())
+ return HandleLValueDirectBase(Info, E, Obj, DerivedDecl, BaseDecl);
+
+ SubobjectDesignator &D = Obj.Designator;
+ if (D.Invalid)
+ return false;
+
+ // Extract most-derived object and corresponding type.
+ DerivedDecl = D.MostDerivedType->getAsCXXRecordDecl();
+ if (!CastToDerivedClass(Info, E, Obj, DerivedDecl, D.MostDerivedPathLength))
+ return false;
+
+ // Find the virtual base class.
+ if (DerivedDecl->isInvalidDecl()) return false;
+ const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(DerivedDecl);
+ Obj.getLValueOffset() += Layout.getVBaseClassOffset(BaseDecl);
+ Obj.addDecl(Info, E, BaseDecl, /*Virtual*/ true);
+ return true;
+}
+
+static bool HandleLValueBasePath(EvalInfo &Info, const CastExpr *E,
+ QualType Type, LValue &Result) {
+ for (CastExpr::path_const_iterator PathI = E->path_begin(),
+ PathE = E->path_end();
+ PathI != PathE; ++PathI) {
+ if (!HandleLValueBase(Info, E, Result, Type->getAsCXXRecordDecl(),
+ *PathI))
+ return false;
+ Type = (*PathI)->getType();
+ }
+ return true;
+}
+
+/// Update LVal to refer to the given field, which must be a member of the type
+/// currently described by LVal.
+static bool HandleLValueMember(EvalInfo &Info, const Expr *E, LValue &LVal,
+ const FieldDecl *FD,
+ const ASTRecordLayout *RL = nullptr) {
+ if (!RL) {
+ if (FD->getParent()->isInvalidDecl()) return false;
+ RL = &Info.Ctx.getASTRecordLayout(FD->getParent());
+ }
+
+ unsigned I = FD->getFieldIndex();
+ LVal.Offset += Info.Ctx.toCharUnitsFromBits(RL->getFieldOffset(I));
+ LVal.addDecl(Info, E, FD);
+ return true;
+}
+
+/// Update LVal to refer to the given indirect field.
+static bool HandleLValueIndirectMember(EvalInfo &Info, const Expr *E,
+ LValue &LVal,
+ const IndirectFieldDecl *IFD) {
+ for (const auto *C : IFD->chain())
+ if (!HandleLValueMember(Info, E, LVal, cast<FieldDecl>(C)))
+ return false;
+ return true;
+}
+
+/// Get the size of the given type in char units.
+static bool HandleSizeof(EvalInfo &Info, SourceLocation Loc,
+ QualType Type, CharUnits &Size) {
+ // sizeof(void), __alignof__(void), sizeof(function) = 1 as a gcc
+ // extension.
+ if (Type->isVoidType() || Type->isFunctionType()) {
+ Size = CharUnits::One();
+ return true;
+ }
+
+ if (!Type->isConstantSizeType()) {
+ // sizeof(vla) is not a constantexpr: C99 6.5.3.4p2.
+ // FIXME: Better diagnostic.
+ Info.Diag(Loc);
+ return false;
+ }
+
+ Size = Info.Ctx.getTypeSizeInChars(Type);
+ return true;
+}
+
+/// Update a pointer value to model pointer arithmetic.
+/// \param Info - Information about the ongoing evaluation.
+/// \param E - The expression being evaluated, for diagnostic purposes.
+/// \param LVal - The pointer value to be updated.
+/// \param EltTy - The pointee type represented by LVal.
+/// \param Adjustment - The adjustment, in objects of type EltTy, to add.
+static bool HandleLValueArrayAdjustment(EvalInfo &Info, const Expr *E,
+ LValue &LVal, QualType EltTy,
+ int64_t Adjustment) {
+ CharUnits SizeOfPointee;
+ if (!HandleSizeof(Info, E->getExprLoc(), EltTy, SizeOfPointee))
+ return false;
+
+ // Compute the new offset in the appropriate width.
+ LVal.Offset += Adjustment * SizeOfPointee;
+ LVal.adjustIndex(Info, E, Adjustment);
+ return true;
+}
+
+/// Update an lvalue to refer to a component of a complex number.
+/// \param Info - Information about the ongoing evaluation.
+/// \param LVal - The lvalue to be updated.
+/// \param EltTy - The complex number's component type.
+/// \param Imag - False for the real component, true for the imaginary.
+static bool HandleLValueComplexElement(EvalInfo &Info, const Expr *E,
+ LValue &LVal, QualType EltTy,
+ bool Imag) {
+ if (Imag) {
+ CharUnits SizeOfComponent;
+ if (!HandleSizeof(Info, E->getExprLoc(), EltTy, SizeOfComponent))
+ return false;
+ LVal.Offset += SizeOfComponent;
+ }
+ LVal.addComplex(Info, E, EltTy, Imag);
+ return true;
+}
+
+/// Try to evaluate the initializer for a variable declaration.
+///
+/// \param Info Information about the ongoing evaluation.
+/// \param E An expression to be used when printing diagnostics.
+/// \param VD The variable whose initializer should be obtained.
+/// \param Frame The frame in which the variable was created. Must be null
+/// if this variable is not local to the evaluation.
+/// \param Result Filled in with a pointer to the value of the variable.
+static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
+ const VarDecl *VD, CallStackFrame *Frame,
+ APValue *&Result) {
+ // If this is a parameter to an active constexpr function call, perform
+ // argument substitution.
+ if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD)) {
+ // Assume arguments of a potential constant expression are unknown
+ // constant expressions.
+ if (Info.checkingPotentialConstantExpression())
+ return false;
+ if (!Frame || !Frame->Arguments) {
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+ Result = &Frame->Arguments[PVD->getFunctionScopeIndex()];
+ return true;
+ }
+
+ // If this is a local variable, dig out its value.
+ if (Frame) {
+ Result = Frame->getTemporary(VD);
+ assert(Result && "missing value for local variable");
+ return true;
+ }
+
+ // Dig out the initializer, and use the declaration which it's attached to.
+ const Expr *Init = VD->getAnyInitializer(VD);
+ if (!Init || Init->isValueDependent()) {
+ // If we're checking a potential constant expression, the variable could be
+ // initialized later.
+ if (!Info.checkingPotentialConstantExpression())
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+
+ // If we're currently evaluating the initializer of this declaration, use that
+ // in-flight value.
+ if (Info.EvaluatingDecl.dyn_cast<const ValueDecl*>() == VD) {
+ Result = Info.EvaluatingDeclValue;
+ return true;
+ }
+
+ // Never evaluate the initializer of a weak variable. We can't be sure that
+ // this is the definition which will be used.
+ if (VD->isWeak()) {
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+
+ // Check that we can fold the initializer. In C++, we will have already done
+ // this in the cases where it matters for conformance.
+ SmallVector<PartialDiagnosticAt, 8> Notes;
+ if (!VD->evaluateValue(Notes)) {
+ Info.Diag(E, diag::note_constexpr_var_init_non_constant,
+ Notes.size() + 1) << VD;
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ Info.addNotes(Notes);
+ return false;
+ } else if (!VD->checkInitIsICE()) {
+ Info.CCEDiag(E, diag::note_constexpr_var_init_non_constant,
+ Notes.size() + 1) << VD;
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ Info.addNotes(Notes);
+ }
+
+ Result = VD->getEvaluatedValue();
+ return true;
+}
+
+static bool IsConstNonVolatile(QualType T) {
+ Qualifiers Quals = T.getQualifiers();
+ return Quals.hasConst() && !Quals.hasVolatile();
+}
+
+/// Get the base index of the given base class within an APValue representing
+/// the given derived class.
+static unsigned getBaseIndex(const CXXRecordDecl *Derived,
+ const CXXRecordDecl *Base) {
+ Base = Base->getCanonicalDecl();
+ unsigned Index = 0;
+ for (CXXRecordDecl::base_class_const_iterator I = Derived->bases_begin(),
+ E = Derived->bases_end(); I != E; ++I, ++Index) {
+ if (I->getType()->getAsCXXRecordDecl()->getCanonicalDecl() == Base)
+ return Index;
+ }
+
+ llvm_unreachable("base class missing from derived class's bases list");
+}
+
+/// Extract the value of a character from a string literal.
+static APSInt extractStringLiteralCharacter(EvalInfo &Info, const Expr *Lit,
+ uint64_t Index) {
+ // FIXME: Support ObjCEncodeExpr, MakeStringConstant
+ if (auto PE = dyn_cast<PredefinedExpr>(Lit))
+ Lit = PE->getFunctionName();
+ const StringLiteral *S = cast<StringLiteral>(Lit);
+ const ConstantArrayType *CAT =
+ Info.Ctx.getAsConstantArrayType(S->getType());
+ assert(CAT && "string literal isn't an array");
+ QualType CharType = CAT->getElementType();
+ assert(CharType->isIntegerType() && "unexpected character type");
+
+ APSInt Value(S->getCharByteWidth() * Info.Ctx.getCharWidth(),
+ CharType->isUnsignedIntegerType());
+ if (Index < S->getLength())
+ Value = S->getCodeUnit(Index);
+ return Value;
+}
+
+// Expand a string literal into an array of characters.
+static void expandStringLiteral(EvalInfo &Info, const Expr *Lit,
+ APValue &Result) {
+ const StringLiteral *S = cast<StringLiteral>(Lit);
+ const ConstantArrayType *CAT =
+ Info.Ctx.getAsConstantArrayType(S->getType());
+ assert(CAT && "string literal isn't an array");
+ QualType CharType = CAT->getElementType();
+ assert(CharType->isIntegerType() && "unexpected character type");
+
+ unsigned Elts = CAT->getSize().getZExtValue();
+ Result = APValue(APValue::UninitArray(),
+ std::min(S->getLength(), Elts), Elts);
+ APSInt Value(S->getCharByteWidth() * Info.Ctx.getCharWidth(),
+ CharType->isUnsignedIntegerType());
+ if (Result.hasArrayFiller())
+ Result.getArrayFiller() = APValue(Value);
+ for (unsigned I = 0, N = Result.getArrayInitializedElts(); I != N; ++I) {
+ Value = S->getCodeUnit(I);
+ Result.getArrayInitializedElt(I) = APValue(Value);
+ }
+}
+
+// Expand an array so that it has more than Index filled elements.
+static void expandArray(APValue &Array, unsigned Index) {
+ unsigned Size = Array.getArraySize();
+ assert(Index < Size);
+
+ // Always at least double the number of elements for which we store a value.
+ unsigned OldElts = Array.getArrayInitializedElts();
+ unsigned NewElts = std::max(Index+1, OldElts * 2);
+ NewElts = std::min(Size, std::max(NewElts, 8u));
+
+ // Copy the data across.
+ APValue NewValue(APValue::UninitArray(), NewElts, Size);
+ for (unsigned I = 0; I != OldElts; ++I)
+ NewValue.getArrayInitializedElt(I).swap(Array.getArrayInitializedElt(I));
+ for (unsigned I = OldElts; I != NewElts; ++I)
+ NewValue.getArrayInitializedElt(I) = Array.getArrayFiller();
+ if (NewValue.hasArrayFiller())
+ NewValue.getArrayFiller() = Array.getArrayFiller();
+ Array.swap(NewValue);
+}
+
+/// Determine whether a type would actually be read by an lvalue-to-rvalue
+/// conversion. If it's of class type, we may assume that the copy operation
+/// is trivial. Note that this is never true for a union type with fields
+/// (because the copy always "reads" the active member) and always true for
+/// a non-class type.
+static bool isReadByLvalueToRvalueConversion(QualType T) {
+ CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ if (!RD || (RD->isUnion() && !RD->field_empty()))
+ return true;
+ if (RD->isEmpty())
+ return false;
+
+ for (auto *Field : RD->fields())
+ if (isReadByLvalueToRvalueConversion(Field->getType()))
+ return true;
+
+ for (auto &BaseSpec : RD->bases())
+ if (isReadByLvalueToRvalueConversion(BaseSpec.getType()))
+ return true;
+
+ return false;
+}
+
+/// Diagnose an attempt to read from any unreadable field within the specified
+/// type, which might be a class type.
+static bool diagnoseUnreadableFields(EvalInfo &Info, const Expr *E,
+ QualType T) {
+ CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ if (!RD)
+ return false;
+
+ if (!RD->hasMutableFields())
+ return false;
+
+ for (auto *Field : RD->fields()) {
+ // If we're actually going to read this field in some way, then it can't
+ // be mutable. If we're in a union, then assigning to a mutable field
+ // (even an empty one) can change the active member, so that's not OK.
+ // FIXME: Add core issue number for the union case.
+ if (Field->isMutable() &&
+ (RD->isUnion() || isReadByLvalueToRvalueConversion(Field->getType()))) {
+ Info.Diag(E, diag::note_constexpr_ltor_mutable, 1) << Field;
+ Info.Note(Field->getLocation(), diag::note_declared_at);
+ return true;
+ }
+
+ if (diagnoseUnreadableFields(Info, E, Field->getType()))
+ return true;
+ }
+
+ for (auto &BaseSpec : RD->bases())
+ if (diagnoseUnreadableFields(Info, E, BaseSpec.getType()))
+ return true;
+
+ // All mutable fields were empty, and thus not actually read.
+ return false;
+}
+
+/// Kinds of access we can perform on an object, for diagnostics.
+enum AccessKinds {
+ AK_Read,
+ AK_Assign,
+ AK_Increment,
+ AK_Decrement
+};
+
+namespace {
+/// A handle to a complete object (an object that is not a subobject of
+/// another object).
+struct CompleteObject {
+ /// The value of the complete object.
+ APValue *Value;
+ /// The type of the complete object.
+ QualType Type;
+
+ CompleteObject() : Value(nullptr) {}
+ CompleteObject(APValue *Value, QualType Type)
+ : Value(Value), Type(Type) {
+ assert(Value && "missing value for complete object");
+ }
+
+ explicit operator bool() const { return Value; }
+};
+} // end anonymous namespace
+
+/// Find the designated sub-object of an rvalue.
+template<typename SubobjectHandler>
+typename SubobjectHandler::result_type
+findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
+ const SubobjectDesignator &Sub, SubobjectHandler &handler) {
+ if (Sub.Invalid)
+ // A diagnostic will have already been produced.
+ return handler.failed();
+ if (Sub.isOnePastTheEnd()) {
+ if (Info.getLangOpts().CPlusPlus11)
+ Info.Diag(E, diag::note_constexpr_access_past_end)
+ << handler.AccessKind;
+ else
+ Info.Diag(E);
+ return handler.failed();
+ }
+
+ APValue *O = Obj.Value;
+ QualType ObjType = Obj.Type;
+ const FieldDecl *LastField = nullptr;
+
+ // Walk the designator's path to find the subobject.
+ for (unsigned I = 0, N = Sub.Entries.size(); /**/; ++I) {
+ if (O->isUninit()) {
+ if (!Info.checkingPotentialConstantExpression())
+ Info.Diag(E, diag::note_constexpr_access_uninit) << handler.AccessKind;
+ return handler.failed();
+ }
+
+ if (I == N) {
+ // If we are reading an object of class type, there may still be more
+ // things we need to check: if there are any mutable subobjects, we
+ // cannot perform this read. (This only happens when performing a trivial
+ // copy or assignment.)
+ if (ObjType->isRecordType() && handler.AccessKind == AK_Read &&
+ diagnoseUnreadableFields(Info, E, ObjType))
+ return handler.failed();
+
+ if (!handler.found(*O, ObjType))
+ return false;
+
+ // If we modified a bit-field, truncate it to the right width.
+ if (handler.AccessKind != AK_Read &&
+ LastField && LastField->isBitField() &&
+ !truncateBitfieldValue(Info, E, *O, LastField))
+ return false;
+
+ return true;
+ }
+
+ LastField = nullptr;
+ if (ObjType->isArrayType()) {
+ // Next subobject is an array element.
+ const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(ObjType);
+ assert(CAT && "vla in literal type?");
+ uint64_t Index = Sub.Entries[I].ArrayIndex;
+ if (CAT->getSize().ule(Index)) {
+ // Note, it should not be possible to form a pointer with a valid
+ // designator which points more than one past the end of the array.
+ if (Info.getLangOpts().CPlusPlus11)
+ Info.Diag(E, diag::note_constexpr_access_past_end)
+ << handler.AccessKind;
+ else
+ Info.Diag(E);
+ return handler.failed();
+ }
+
+ ObjType = CAT->getElementType();
+
+ // An array object is represented as either an Array APValue or as an
+ // LValue which refers to a string literal.
+ if (O->isLValue()) {
+ assert(I == N - 1 && "extracting subobject of character?");
+ assert(!O->hasLValuePath() || O->getLValuePath().empty());
+ if (handler.AccessKind != AK_Read)
+ expandStringLiteral(Info, O->getLValueBase().get<const Expr *>(),
+ *O);
+ else
+ return handler.foundString(*O, ObjType, Index);
+ }
+
+ if (O->getArrayInitializedElts() > Index)
+ O = &O->getArrayInitializedElt(Index);
+ else if (handler.AccessKind != AK_Read) {
+ expandArray(*O, Index);
+ O = &O->getArrayInitializedElt(Index);
+ } else
+ O = &O->getArrayFiller();
+ } else if (ObjType->isAnyComplexType()) {
+ // Next subobject is a complex number.
+ uint64_t Index = Sub.Entries[I].ArrayIndex;
+ if (Index > 1) {
+ if (Info.getLangOpts().CPlusPlus11)
+ Info.Diag(E, diag::note_constexpr_access_past_end)
+ << handler.AccessKind;
+ else
+ Info.Diag(E);
+ return handler.failed();
+ }
+
+ bool WasConstQualified = ObjType.isConstQualified();
+ ObjType = ObjType->castAs<ComplexType>()->getElementType();
+ if (WasConstQualified)
+ ObjType.addConst();
+
+ assert(I == N - 1 && "extracting subobject of scalar?");
+ if (O->isComplexInt()) {
+ return handler.found(Index ? O->getComplexIntImag()
+ : O->getComplexIntReal(), ObjType);
+ } else {
+ assert(O->isComplexFloat());
+ return handler.found(Index ? O->getComplexFloatImag()
+ : O->getComplexFloatReal(), ObjType);
+ }
+ } else if (const FieldDecl *Field = getAsField(Sub.Entries[I])) {
+ if (Field->isMutable() && handler.AccessKind == AK_Read) {
+ Info.Diag(E, diag::note_constexpr_ltor_mutable, 1)
+ << Field;
+ Info.Note(Field->getLocation(), diag::note_declared_at);
+ return handler.failed();
+ }
+
+ // Next subobject is a class, struct or union field.
+ RecordDecl *RD = ObjType->castAs<RecordType>()->getDecl();
+ if (RD->isUnion()) {
+ const FieldDecl *UnionField = O->getUnionField();
+ if (!UnionField ||
+ UnionField->getCanonicalDecl() != Field->getCanonicalDecl()) {
+ Info.Diag(E, diag::note_constexpr_access_inactive_union_member)
+ << handler.AccessKind << Field << !UnionField << UnionField;
+ return handler.failed();
+ }
+ O = &O->getUnionValue();
+ } else
+ O = &O->getStructField(Field->getFieldIndex());
+
+ bool WasConstQualified = ObjType.isConstQualified();
+ ObjType = Field->getType();
+ if (WasConstQualified && !Field->isMutable())
+ ObjType.addConst();
+
+ if (ObjType.isVolatileQualified()) {
+ if (Info.getLangOpts().CPlusPlus) {
+ // FIXME: Include a description of the path to the volatile subobject.
+ Info.Diag(E, diag::note_constexpr_access_volatile_obj, 1)
+ << handler.AccessKind << 2 << Field;
+ Info.Note(Field->getLocation(), diag::note_declared_at);
+ } else {
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ }
+ return handler.failed();
+ }
+
+ LastField = Field;
+ } else {
+ // Next subobject is a base class.
+ const CXXRecordDecl *Derived = ObjType->getAsCXXRecordDecl();
+ const CXXRecordDecl *Base = getAsBaseClass(Sub.Entries[I]);
+ O = &O->getStructBase(getBaseIndex(Derived, Base));
+
+ bool WasConstQualified = ObjType.isConstQualified();
+ ObjType = Info.Ctx.getRecordType(Base);
+ if (WasConstQualified)
+ ObjType.addConst();
+ }
+ }
+}
+
+namespace {
+struct ExtractSubobjectHandler {
+ EvalInfo &Info;
+ APValue &Result;
+
+ static const AccessKinds AccessKind = AK_Read;
+
+ typedef bool result_type;
+ bool failed() { return false; }
+ bool found(APValue &Subobj, QualType SubobjType) {
+ Result = Subobj;
+ return true;
+ }
+ bool found(APSInt &Value, QualType SubobjType) {
+ Result = APValue(Value);
+ return true;
+ }
+ bool found(APFloat &Value, QualType SubobjType) {
+ Result = APValue(Value);
+ return true;
+ }
+ bool foundString(APValue &Subobj, QualType SubobjType, uint64_t Character) {
+ Result = APValue(extractStringLiteralCharacter(
+ Info, Subobj.getLValueBase().get<const Expr *>(), Character));
+ return true;
+ }
+};
+} // end anonymous namespace
+
+const AccessKinds ExtractSubobjectHandler::AccessKind;
+
+/// Extract the designated sub-object of an rvalue.
+static bool extractSubobject(EvalInfo &Info, const Expr *E,
+ const CompleteObject &Obj,
+ const SubobjectDesignator &Sub,
+ APValue &Result) {
+ ExtractSubobjectHandler Handler = { Info, Result };
+ return findSubobject(Info, E, Obj, Sub, Handler);
+}
+
+namespace {
+struct ModifySubobjectHandler {
+ EvalInfo &Info;
+ APValue &NewVal;
+ const Expr *E;
+
+ typedef bool result_type;
+ static const AccessKinds AccessKind = AK_Assign;
+
+ bool checkConst(QualType QT) {
+ // Assigning to a const object has undefined behavior.
+ if (QT.isConstQualified()) {
+ Info.Diag(E, diag::note_constexpr_modify_const_type) << QT;
+ return false;
+ }
+ return true;
+ }
+
+ bool failed() { return false; }
+ bool found(APValue &Subobj, QualType SubobjType) {
+ if (!checkConst(SubobjType))
+ return false;
+ // We've been given ownership of NewVal, so just swap it in.
+ Subobj.swap(NewVal);
+ return true;
+ }
+ bool found(APSInt &Value, QualType SubobjType) {
+ if (!checkConst(SubobjType))
+ return false;
+ if (!NewVal.isInt()) {
+ // Maybe trying to write a cast pointer value into a complex?
+ Info.Diag(E);
+ return false;
+ }
+ Value = NewVal.getInt();
+ return true;
+ }
+ bool found(APFloat &Value, QualType SubobjType) {
+ if (!checkConst(SubobjType))
+ return false;
+ Value = NewVal.getFloat();
+ return true;
+ }
+ bool foundString(APValue &Subobj, QualType SubobjType, uint64_t Character) {
+ llvm_unreachable("shouldn't encounter string elements with ExpandArrays");
+ }
+};
+} // end anonymous namespace
+
+const AccessKinds ModifySubobjectHandler::AccessKind;
+
+/// Update the designated sub-object of an rvalue to the given value.
+static bool modifySubobject(EvalInfo &Info, const Expr *E,
+ const CompleteObject &Obj,
+ const SubobjectDesignator &Sub,
+ APValue &NewVal) {
+ ModifySubobjectHandler Handler = { Info, NewVal, E };
+ return findSubobject(Info, E, Obj, Sub, Handler);
+}
+
+/// Find the position where two subobject designators diverge, or equivalently
+/// the length of the common initial subsequence.
+static unsigned FindDesignatorMismatch(QualType ObjType,
+ const SubobjectDesignator &A,
+ const SubobjectDesignator &B,
+ bool &WasArrayIndex) {
+ unsigned I = 0, N = std::min(A.Entries.size(), B.Entries.size());
+ for (/**/; I != N; ++I) {
+ if (!ObjType.isNull() &&
+ (ObjType->isArrayType() || ObjType->isAnyComplexType())) {
+ // Next subobject is an array element.
+ if (A.Entries[I].ArrayIndex != B.Entries[I].ArrayIndex) {
+ WasArrayIndex = true;
+ return I;
+ }
+ if (ObjType->isAnyComplexType())
+ ObjType = ObjType->castAs<ComplexType>()->getElementType();
+ else
+ ObjType = ObjType->castAsArrayTypeUnsafe()->getElementType();
+ } else {
+ if (A.Entries[I].BaseOrMember != B.Entries[I].BaseOrMember) {
+ WasArrayIndex = false;
+ return I;
+ }
+ if (const FieldDecl *FD = getAsField(A.Entries[I]))
+ // Next subobject is a field.
+ ObjType = FD->getType();
+ else
+ // Next subobject is a base class.
+ ObjType = QualType();
+ }
+ }
+ WasArrayIndex = false;
+ return I;
+}
+
+/// Determine whether the given subobject designators refer to elements of the
+/// same array object.
+static bool AreElementsOfSameArray(QualType ObjType,
+ const SubobjectDesignator &A,
+ const SubobjectDesignator &B) {
+ if (A.Entries.size() != B.Entries.size())
+ return false;
+
+ bool IsArray = A.MostDerivedIsArrayElement;
+ if (IsArray && A.MostDerivedPathLength != A.Entries.size())
+ // A is a subobject of the array element.
+ return false;
+
+ // If A (and B) designates an array element, the last entry will be the array
+ // index. That doesn't have to match. Otherwise, we're in the 'implicit array
+ // of length 1' case, and the entire path must match.
+ bool WasArrayIndex;
+ unsigned CommonLength = FindDesignatorMismatch(ObjType, A, B, WasArrayIndex);
+ return CommonLength >= A.Entries.size() - IsArray;
+}
+
+/// Find the complete object to which an LValue refers.
+static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
+ AccessKinds AK, const LValue &LVal,
+ QualType LValType) {
+ if (!LVal.Base) {
+ Info.Diag(E, diag::note_constexpr_access_null) << AK;
+ return CompleteObject();
+ }
+
+ CallStackFrame *Frame = nullptr;
+ if (LVal.CallIndex) {
+ Frame = Info.getCallFrame(LVal.CallIndex);
+ if (!Frame) {
+ Info.Diag(E, diag::note_constexpr_lifetime_ended, 1)
+ << AK << LVal.Base.is<const ValueDecl*>();
+ NoteLValueLocation(Info, LVal.Base);
+ return CompleteObject();
+ }
+ }
+
+ // C++11 DR1311: An lvalue-to-rvalue conversion on a volatile-qualified type
+ // is not a constant expression (even if the object is non-volatile). We also
+ // apply this rule to C++98, in order to conform to the expected 'volatile'
+ // semantics.
+ if (LValType.isVolatileQualified()) {
+ if (Info.getLangOpts().CPlusPlus)
+ Info.Diag(E, diag::note_constexpr_access_volatile_type)
+ << AK << LValType;
+ else
+ Info.Diag(E);
+ return CompleteObject();
+ }
+
+ // Compute value storage location and type of base object.
+ APValue *BaseVal = nullptr;
+ QualType BaseType = getType(LVal.Base);
+
+ if (const ValueDecl *D = LVal.Base.dyn_cast<const ValueDecl*>()) {
+ // In C++98, const, non-volatile integers initialized with ICEs are ICEs.
+ // In C++11, constexpr, non-volatile variables initialized with constant
+ // expressions are constant expressions too. Inside constexpr functions,
+ // parameters are constant expressions even if they're non-const.
+ // In C++1y, objects local to a constant expression (those with a Frame) are
+ // both readable and writable inside constant expressions.
+ // In C, such things can also be folded, although they are not ICEs.
+ const VarDecl *VD = dyn_cast<VarDecl>(D);
+ if (VD) {
+ if (const VarDecl *VDef = VD->getDefinition(Info.Ctx))
+ VD = VDef;
+ }
+ if (!VD || VD->isInvalidDecl()) {
+ Info.Diag(E);
+ return CompleteObject();
+ }
+
+ // Accesses of volatile-qualified objects are not allowed.
+ if (BaseType.isVolatileQualified()) {
+ if (Info.getLangOpts().CPlusPlus) {
+ Info.Diag(E, diag::note_constexpr_access_volatile_obj, 1)
+ << AK << 1 << VD;
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ } else {
+ Info.Diag(E);
+ }
+ return CompleteObject();
+ }
+
+ // Unless we're looking at a local variable or argument in a constexpr call,
+ // the variable we're reading must be const.
+ if (!Frame) {
+ if (Info.getLangOpts().CPlusPlus14 &&
+ VD == Info.EvaluatingDecl.dyn_cast<const ValueDecl *>()) {
+ // OK, we can read and modify an object if we're in the process of
+ // evaluating its initializer, because its lifetime began in this
+ // evaluation.
+ } else if (AK != AK_Read) {
+ // All the remaining cases only permit reading.
+ Info.Diag(E, diag::note_constexpr_modify_global);
+ return CompleteObject();
+ } else if (VD->isConstexpr()) {
+ // OK, we can read this variable.
+ } else if (BaseType->isIntegralOrEnumerationType()) {
+ if (!BaseType.isConstQualified()) {
+ if (Info.getLangOpts().CPlusPlus) {
+ Info.Diag(E, diag::note_constexpr_ltor_non_const_int, 1) << VD;
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ } else {
+ Info.Diag(E);
+ }
+ return CompleteObject();
+ }
+ } else if (BaseType->isFloatingType() && BaseType.isConstQualified()) {
+ // We support folding of const floating-point types, in order to make
+ // static const data members of such types (supported as an extension)
+ // more useful.
+ if (Info.getLangOpts().CPlusPlus11) {
+ Info.CCEDiag(E, diag::note_constexpr_ltor_non_constexpr, 1) << VD;
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ } else {
+ Info.CCEDiag(E);
+ }
+ } else {
+ // FIXME: Allow folding of values of any literal type in all languages.
+ if (Info.getLangOpts().CPlusPlus11) {
+ Info.Diag(E, diag::note_constexpr_ltor_non_constexpr, 1) << VD;
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ } else {
+ Info.Diag(E);
+ }
+ return CompleteObject();
+ }
+ }
+
+ if (!evaluateVarDeclInit(Info, E, VD, Frame, BaseVal))
+ return CompleteObject();
+ } else {
+ const Expr *Base = LVal.Base.dyn_cast<const Expr*>();
+
+ if (!Frame) {
+ if (const MaterializeTemporaryExpr *MTE =
+ dyn_cast<MaterializeTemporaryExpr>(Base)) {
+ assert(MTE->getStorageDuration() == SD_Static &&
+ "should have a frame for a non-global materialized temporary");
+
+ // Per C++1y [expr.const]p2:
+ // an lvalue-to-rvalue conversion [is not allowed unless it applies to]
+ // - a [...] glvalue of integral or enumeration type that refers to
+ // a non-volatile const object [...]
+ // [...]
+ // - a [...] glvalue of literal type that refers to a non-volatile
+ // object whose lifetime began within the evaluation of e.
+ //
+ // C++11 misses the 'began within the evaluation of e' check and
+ // instead allows all temporaries, including things like:
+ // int &&r = 1;
+ // int x = ++r;
+ // constexpr int k = r;
+ // Therefore we use the C++1y rules in C++11 too.
+ const ValueDecl *VD = Info.EvaluatingDecl.dyn_cast<const ValueDecl*>();
+ const ValueDecl *ED = MTE->getExtendingDecl();
+ if (!(BaseType.isConstQualified() &&
+ BaseType->isIntegralOrEnumerationType()) &&
+ !(VD && VD->getCanonicalDecl() == ED->getCanonicalDecl())) {
+ Info.Diag(E, diag::note_constexpr_access_static_temporary, 1) << AK;
+ Info.Note(MTE->getExprLoc(), diag::note_constexpr_temporary_here);
+ return CompleteObject();
+ }
+
+ BaseVal = Info.Ctx.getMaterializedTemporaryValue(MTE, false);
+ assert(BaseVal && "got reference to unevaluated temporary");
+ } else {
+ Info.Diag(E);
+ return CompleteObject();
+ }
+ } else {
+ BaseVal = Frame->getTemporary(Base);
+ assert(BaseVal && "missing value for temporary");
+ }
+
+ // Volatile temporary objects cannot be accessed in constant expressions.
+ if (BaseType.isVolatileQualified()) {
+ if (Info.getLangOpts().CPlusPlus) {
+ Info.Diag(E, diag::note_constexpr_access_volatile_obj, 1)
+ << AK << 0;
+ Info.Note(Base->getExprLoc(), diag::note_constexpr_temporary_here);
+ } else {
+ Info.Diag(E);
+ }
+ return CompleteObject();
+ }
+ }
+
+ // During the construction of an object, it is not yet 'const'.
+ // FIXME: We don't set up EvaluatingDecl for local variables or temporaries,
+ // and this doesn't do quite the right thing for const subobjects of the
+ // object under construction.
+ if (LVal.getLValueBase() == Info.EvaluatingDecl) {
+ BaseType = Info.Ctx.getCanonicalType(BaseType);
+ BaseType.removeLocalConst();
+ }
+
+ // In C++1y, we can't safely access any mutable state when we might be
+ // evaluating after an unmodeled side effect or an evaluation failure.
+ //
+ // FIXME: Not all local state is mutable. Allow local constant subobjects
+ // to be read here (but take care with 'mutable' fields).
+ if (Frame && Info.getLangOpts().CPlusPlus14 &&
+ (Info.EvalStatus.HasSideEffects || Info.keepEvaluatingAfterFailure()))
+ return CompleteObject();
+
+ return CompleteObject(BaseVal, BaseType);
+}
+
+/// \brief Perform an lvalue-to-rvalue conversion on the given glvalue. This
+/// can also be used for 'lvalue-to-lvalue' conversions for looking up the
+/// glvalue referred to by an entity of reference type.
+///
+/// \param Info - Information about the ongoing evaluation.
+/// \param Conv - The expression for which we are performing the conversion.
+/// Used for diagnostics.
+/// \param Type - The type of the glvalue (before stripping cv-qualifiers in the
+/// case of a non-class type).
+/// \param LVal - The glvalue on which we are attempting to perform this action.
+/// \param RVal - The produced value will be placed here.
+static bool handleLValueToRValueConversion(EvalInfo &Info, const Expr *Conv,
+ QualType Type,
+ const LValue &LVal, APValue &RVal) {
+ if (LVal.Designator.Invalid)
+ return false;
+
+ // Check for special cases where there is no existing APValue to look at.
+ const Expr *Base = LVal.Base.dyn_cast<const Expr*>();
+ if (Base && !LVal.CallIndex && !Type.isVolatileQualified()) {
+ if (const CompoundLiteralExpr *CLE = dyn_cast<CompoundLiteralExpr>(Base)) {
+ // In C99, a CompoundLiteralExpr is an lvalue, and we defer evaluating the
+ // initializer until now for such expressions. Such an expression can't be
+ // an ICE in C, so this only matters for fold.
+ assert(!Info.getLangOpts().CPlusPlus && "lvalue compound literal in c++?");
+ if (Type.isVolatileQualified()) {
+ Info.Diag(Conv);
+ return false;
+ }
+ APValue Lit;
+ if (!Evaluate(Lit, Info, CLE->getInitializer()))
+ return false;
+ CompleteObject LitObj(&Lit, Base->getType());
+ return extractSubobject(Info, Conv, LitObj, LVal.Designator, RVal);
+ } else if (isa<StringLiteral>(Base) || isa<PredefinedExpr>(Base)) {
+ // We represent a string literal array as an lvalue pointing at the
+ // corresponding expression, rather than building an array of chars.
+ // FIXME: Support ObjCEncodeExpr, MakeStringConstant
+ APValue Str(Base, CharUnits::Zero(), APValue::NoLValuePath(), 0);
+ CompleteObject StrObj(&Str, Base->getType());
+ return extractSubobject(Info, Conv, StrObj, LVal.Designator, RVal);
+ }
+ }
+
+ CompleteObject Obj = findCompleteObject(Info, Conv, AK_Read, LVal, Type);
+ return Obj && extractSubobject(Info, Conv, Obj, LVal.Designator, RVal);
+}
+
+/// Perform an assignment of Val to LVal. Takes ownership of Val.
+static bool handleAssignment(EvalInfo &Info, const Expr *E, const LValue &LVal,
+ QualType LValType, APValue &Val) {
+ if (LVal.Designator.Invalid)
+ return false;
+
+ if (!Info.getLangOpts().CPlusPlus14) {
+ Info.Diag(E);
+ return false;
+ }
+
+ CompleteObject Obj = findCompleteObject(Info, E, AK_Assign, LVal, LValType);
+ return Obj && modifySubobject(Info, E, Obj, LVal.Designator, Val);
+}
+
+static bool isOverflowingIntegerType(ASTContext &Ctx, QualType T) {
+ return T->isSignedIntegerType() &&
+ Ctx.getIntWidth(T) >= Ctx.getIntWidth(Ctx.IntTy);
+}
+
+namespace {
+struct CompoundAssignSubobjectHandler {
+ EvalInfo &Info;
+ const Expr *E;
+ QualType PromotedLHSType;
+ BinaryOperatorKind Opcode;
+ const APValue &RHS;
+
+ static const AccessKinds AccessKind = AK_Assign;
+
+ typedef bool result_type;
+
+ bool checkConst(QualType QT) {
+ // Assigning to a const object has undefined behavior.
+ if (QT.isConstQualified()) {
+ Info.Diag(E, diag::note_constexpr_modify_const_type) << QT;
+ return false;
+ }
+ return true;
+ }
+
+ bool failed() { return false; }
+ bool found(APValue &Subobj, QualType SubobjType) {
+ switch (Subobj.getKind()) {
+ case APValue::Int:
+ return found(Subobj.getInt(), SubobjType);
+ case APValue::Float:
+ return found(Subobj.getFloat(), SubobjType);
+ case APValue::ComplexInt:
+ case APValue::ComplexFloat:
+ // FIXME: Implement complex compound assignment.
+ Info.Diag(E);
+ return false;
+ case APValue::LValue:
+ return foundPointer(Subobj, SubobjType);
+ default:
+ // FIXME: can this happen?
+ Info.Diag(E);
+ return false;
+ }
+ }
+ bool found(APSInt &Value, QualType SubobjType) {
+ if (!checkConst(SubobjType))
+ return false;
+
+ if (!SubobjType->isIntegerType() || !RHS.isInt()) {
+ // We don't support compound assignment on integer-cast-to-pointer
+ // values.
+ Info.Diag(E);
+ return false;
+ }
+
+ APSInt LHS = HandleIntToIntCast(Info, E, PromotedLHSType,
+ SubobjType, Value);
+ if (!handleIntIntBinOp(Info, E, LHS, Opcode, RHS.getInt(), LHS))
+ return false;
+ Value = HandleIntToIntCast(Info, E, SubobjType, PromotedLHSType, LHS);
+ return true;
+ }
+ bool found(APFloat &Value, QualType SubobjType) {
+ return checkConst(SubobjType) &&
+ HandleFloatToFloatCast(Info, E, SubobjType, PromotedLHSType,
+ Value) &&
+ handleFloatFloatBinOp(Info, E, Value, Opcode, RHS.getFloat()) &&
+ HandleFloatToFloatCast(Info, E, PromotedLHSType, SubobjType, Value);
+ }
+ bool foundPointer(APValue &Subobj, QualType SubobjType) {
+ if (!checkConst(SubobjType))
+ return false;
+
+ QualType PointeeType;
+ if (const PointerType *PT = SubobjType->getAs<PointerType>())
+ PointeeType = PT->getPointeeType();
+
+ if (PointeeType.isNull() || !RHS.isInt() ||
+ (Opcode != BO_Add && Opcode != BO_Sub)) {
+ Info.Diag(E);
+ return false;
+ }
+
+ int64_t Offset = getExtValue(RHS.getInt());
+ if (Opcode == BO_Sub)
+ Offset = -Offset;
+
+ LValue LVal;
+ LVal.setFrom(Info.Ctx, Subobj);
+ if (!HandleLValueArrayAdjustment(Info, E, LVal, PointeeType, Offset))
+ return false;
+ LVal.moveInto(Subobj);
+ return true;
+ }
+ bool foundString(APValue &Subobj, QualType SubobjType, uint64_t Character) {
+ llvm_unreachable("shouldn't encounter string elements here");
+ }
+};
+} // end anonymous namespace
+
+const AccessKinds CompoundAssignSubobjectHandler::AccessKind;
+
+/// Perform a compound assignment of LVal <op>= RVal.
+static bool handleCompoundAssignment(
+ EvalInfo &Info, const Expr *E,
+ const LValue &LVal, QualType LValType, QualType PromotedLValType,
+ BinaryOperatorKind Opcode, const APValue &RVal) {
+ if (LVal.Designator.Invalid)
+ return false;
+
+ if (!Info.getLangOpts().CPlusPlus14) {
+ Info.Diag(E);
+ return false;
+ }
+
+ CompleteObject Obj = findCompleteObject(Info, E, AK_Assign, LVal, LValType);
+ CompoundAssignSubobjectHandler Handler = { Info, E, PromotedLValType, Opcode,
+ RVal };
+ return Obj && findSubobject(Info, E, Obj, LVal.Designator, Handler);
+}
+
+namespace {
+struct IncDecSubobjectHandler {
+ EvalInfo &Info;
+ const Expr *E;
+ AccessKinds AccessKind;
+ APValue *Old;
+
+ typedef bool result_type;
+
+ bool checkConst(QualType QT) {
+ // Assigning to a const object has undefined behavior.
+ if (QT.isConstQualified()) {
+ Info.Diag(E, diag::note_constexpr_modify_const_type) << QT;
+ return false;
+ }
+ return true;
+ }
+
+ bool failed() { return false; }
+ bool found(APValue &Subobj, QualType SubobjType) {
+ // Stash the old value. Also clear Old, so we don't clobber it later
+ // if we're post-incrementing a complex.
+ if (Old) {
+ *Old = Subobj;
+ Old = nullptr;
+ }
+
+ switch (Subobj.getKind()) {
+ case APValue::Int:
+ return found(Subobj.getInt(), SubobjType);
+ case APValue::Float:
+ return found(Subobj.getFloat(), SubobjType);
+ case APValue::ComplexInt:
+ return found(Subobj.getComplexIntReal(),
+ SubobjType->castAs<ComplexType>()->getElementType()
+ .withCVRQualifiers(SubobjType.getCVRQualifiers()));
+ case APValue::ComplexFloat:
+ return found(Subobj.getComplexFloatReal(),
+ SubobjType->castAs<ComplexType>()->getElementType()
+ .withCVRQualifiers(SubobjType.getCVRQualifiers()));
+ case APValue::LValue:
+ return foundPointer(Subobj, SubobjType);
+ default:
+ // FIXME: can this happen?
+ Info.Diag(E);
+ return false;
+ }
+ }
+ bool found(APSInt &Value, QualType SubobjType) {
+ if (!checkConst(SubobjType))
+ return false;
+
+ if (!SubobjType->isIntegerType()) {
+ // We don't support increment / decrement on integer-cast-to-pointer
+ // values.
+ Info.Diag(E);
+ return false;
+ }
+
+ if (Old) *Old = APValue(Value);
+
+ // bool arithmetic promotes to int, and the conversion back to bool
+ // doesn't reduce mod 2^n, so special-case it.
+ if (SubobjType->isBooleanType()) {
+ if (AccessKind == AK_Increment)
+ Value = 1;
+ else
+ Value = !Value;
+ return true;
+ }
+
+ bool WasNegative = Value.isNegative();
+ if (AccessKind == AK_Increment) {
+ ++Value;
+
+ if (!WasNegative && Value.isNegative() &&
+ isOverflowingIntegerType(Info.Ctx, SubobjType)) {
+ APSInt ActualValue(Value, /*IsUnsigned*/true);
+ return HandleOverflow(Info, E, ActualValue, SubobjType);
+ }
+ } else {
+ --Value;
+
+ if (WasNegative && !Value.isNegative() &&
+ isOverflowingIntegerType(Info.Ctx, SubobjType)) {
+ unsigned BitWidth = Value.getBitWidth();
+ APSInt ActualValue(Value.sext(BitWidth + 1), /*IsUnsigned*/false);
+ ActualValue.setBit(BitWidth);
+ return HandleOverflow(Info, E, ActualValue, SubobjType);
+ }
+ }
+ return true;
+ }
+ bool found(APFloat &Value, QualType SubobjType) {
+ if (!checkConst(SubobjType))
+ return false;
+
+ if (Old) *Old = APValue(Value);
+
+ APFloat One(Value.getSemantics(), 1);
+ if (AccessKind == AK_Increment)
+ Value.add(One, APFloat::rmNearestTiesToEven);
+ else
+ Value.subtract(One, APFloat::rmNearestTiesToEven);
+ return true;
+ }
+ bool foundPointer(APValue &Subobj, QualType SubobjType) {
+ if (!checkConst(SubobjType))
+ return false;
+
+ QualType PointeeType;
+ if (const PointerType *PT = SubobjType->getAs<PointerType>())
+ PointeeType = PT->getPointeeType();
+ else {
+ Info.Diag(E);
+ return false;
+ }
+
+ LValue LVal;
+ LVal.setFrom(Info.Ctx, Subobj);
+ if (!HandleLValueArrayAdjustment(Info, E, LVal, PointeeType,
+ AccessKind == AK_Increment ? 1 : -1))
+ return false;
+ LVal.moveInto(Subobj);
+ return true;
+ }
+ bool foundString(APValue &Subobj, QualType SubobjType, uint64_t Character) {
+ llvm_unreachable("shouldn't encounter string elements here");
+ }
+};
+} // end anonymous namespace
+
+/// Perform an increment or decrement on LVal.
+static bool handleIncDec(EvalInfo &Info, const Expr *E, const LValue &LVal,
+ QualType LValType, bool IsIncrement, APValue *Old) {
+ if (LVal.Designator.Invalid)
+ return false;
+
+ if (!Info.getLangOpts().CPlusPlus14) {
+ Info.Diag(E);
+ return false;
+ }
+
+ AccessKinds AK = IsIncrement ? AK_Increment : AK_Decrement;
+ CompleteObject Obj = findCompleteObject(Info, E, AK, LVal, LValType);
+ IncDecSubobjectHandler Handler = { Info, E, AK, Old };
+ return Obj && findSubobject(Info, E, Obj, LVal.Designator, Handler);
+}
+
+/// Build an lvalue for the object argument of a member function call.
+static bool EvaluateObjectArgument(EvalInfo &Info, const Expr *Object,
+ LValue &This) {
+ if (Object->getType()->isPointerType())
+ return EvaluatePointer(Object, This, Info);
+
+ if (Object->isGLValue())
+ return EvaluateLValue(Object, This, Info);
+
+ if (Object->getType()->isLiteralType(Info.Ctx))
+ return EvaluateTemporary(Object, This, Info);
+
+ Info.Diag(Object, diag::note_constexpr_nonliteral) << Object->getType();
+ return false;
+}
+
+/// HandleMemberPointerAccess - Evaluate a member access operation and build an
+/// lvalue referring to the result.
+///
+/// \param Info - Information about the ongoing evaluation.
+/// \param LV - An lvalue referring to the base of the member pointer.
+/// \param RHS - The member pointer expression.
+/// \param IncludeMember - Specifies whether the member itself is included in
+/// the resulting LValue subobject designator. This is not possible when
+/// creating a bound member function.
+/// \return The field or method declaration to which the member pointer refers,
+/// or 0 if evaluation fails.
+static const ValueDecl *HandleMemberPointerAccess(EvalInfo &Info,
+ QualType LVType,
+ LValue &LV,
+ const Expr *RHS,
+ bool IncludeMember = true) {
+ MemberPtr MemPtr;
+ if (!EvaluateMemberPointer(RHS, MemPtr, Info))
+ return nullptr;
+
+ // C++11 [expr.mptr.oper]p6: If the second operand is the null pointer to
+ // member value, the behavior is undefined.
+ if (!MemPtr.getDecl()) {
+ // FIXME: Specific diagnostic.
+ Info.Diag(RHS);
+ return nullptr;
+ }
+
+ if (MemPtr.isDerivedMember()) {
+ // This is a member of some derived class. Truncate LV appropriately.
+ // The end of the derived-to-base path for the base object must match the
+ // derived-to-base path for the member pointer.
+ if (LV.Designator.MostDerivedPathLength + MemPtr.Path.size() >
+ LV.Designator.Entries.size()) {
+ Info.Diag(RHS);
+ return nullptr;
+ }
+ unsigned PathLengthToMember =
+ LV.Designator.Entries.size() - MemPtr.Path.size();
+ for (unsigned I = 0, N = MemPtr.Path.size(); I != N; ++I) {
+ const CXXRecordDecl *LVDecl = getAsBaseClass(
+ LV.Designator.Entries[PathLengthToMember + I]);
+ const CXXRecordDecl *MPDecl = MemPtr.Path[I];
+ if (LVDecl->getCanonicalDecl() != MPDecl->getCanonicalDecl()) {
+ Info.Diag(RHS);
+ return nullptr;
+ }
+ }
+
+ // Truncate the lvalue to the appropriate derived class.
+ if (!CastToDerivedClass(Info, RHS, LV, MemPtr.getContainingRecord(),
+ PathLengthToMember))
+ return nullptr;
+ } else if (!MemPtr.Path.empty()) {
+ // Extend the LValue path with the member pointer's path.
+ LV.Designator.Entries.reserve(LV.Designator.Entries.size() +
+ MemPtr.Path.size() + IncludeMember);
+
+ // Walk down to the appropriate base class.
+ if (const PointerType *PT = LVType->getAs<PointerType>())
+ LVType = PT->getPointeeType();
+ const CXXRecordDecl *RD = LVType->getAsCXXRecordDecl();
+ assert(RD && "member pointer access on non-class-type expression");
+ // The first class in the path is that of the lvalue.
+ for (unsigned I = 1, N = MemPtr.Path.size(); I != N; ++I) {
+ const CXXRecordDecl *Base = MemPtr.Path[N - I - 1];
+ if (!HandleLValueDirectBase(Info, RHS, LV, RD, Base))
+ return nullptr;
+ RD = Base;
+ }
+ // Finally cast to the class containing the member.
+ if (!HandleLValueDirectBase(Info, RHS, LV, RD,
+ MemPtr.getContainingRecord()))
+ return nullptr;
+ }
+
+ // Add the member. Note that we cannot build bound member functions here.
+ if (IncludeMember) {
+ if (const FieldDecl *FD = dyn_cast<FieldDecl>(MemPtr.getDecl())) {
+ if (!HandleLValueMember(Info, RHS, LV, FD))
+ return nullptr;
+ } else if (const IndirectFieldDecl *IFD =
+ dyn_cast<IndirectFieldDecl>(MemPtr.getDecl())) {
+ if (!HandleLValueIndirectMember(Info, RHS, LV, IFD))
+ return nullptr;
+ } else {
+ llvm_unreachable("can't construct reference to bound member function");
+ }
+ }
+
+ return MemPtr.getDecl();
+}
+
+static const ValueDecl *HandleMemberPointerAccess(EvalInfo &Info,
+ const BinaryOperator *BO,
+ LValue &LV,
+ bool IncludeMember = true) {
+ assert(BO->getOpcode() == BO_PtrMemD || BO->getOpcode() == BO_PtrMemI);
+
+ if (!EvaluateObjectArgument(Info, BO->getLHS(), LV)) {
+ if (Info.keepEvaluatingAfterFailure()) {
+ MemberPtr MemPtr;
+ EvaluateMemberPointer(BO->getRHS(), MemPtr, Info);
+ }
+ return nullptr;
+ }
+
+ return HandleMemberPointerAccess(Info, BO->getLHS()->getType(), LV,
+ BO->getRHS(), IncludeMember);
+}
+
+/// HandleBaseToDerivedCast - Apply the given base-to-derived cast operation on
+/// the provided lvalue, which currently refers to the base object.
+static bool HandleBaseToDerivedCast(EvalInfo &Info, const CastExpr *E,
+ LValue &Result) {
+ SubobjectDesignator &D = Result.Designator;
+ if (D.Invalid || !Result.checkNullPointer(Info, E, CSK_Derived))
+ return false;
+
+ QualType TargetQT = E->getType();
+ if (const PointerType *PT = TargetQT->getAs<PointerType>())
+ TargetQT = PT->getPointeeType();
+
+ // Check this cast lands within the final derived-to-base subobject path.
+ if (D.MostDerivedPathLength + E->path_size() > D.Entries.size()) {
+ Info.CCEDiag(E, diag::note_constexpr_invalid_downcast)
+ << D.MostDerivedType << TargetQT;
+ return false;
+ }
+
+ // Check the type of the final cast. We don't need to check the path,
+ // since a cast can only be formed if the path is unique.
+ unsigned NewEntriesSize = D.Entries.size() - E->path_size();
+ const CXXRecordDecl *TargetType = TargetQT->getAsCXXRecordDecl();
+ const CXXRecordDecl *FinalType;
+ if (NewEntriesSize == D.MostDerivedPathLength)
+ FinalType = D.MostDerivedType->getAsCXXRecordDecl();
+ else
+ FinalType = getAsBaseClass(D.Entries[NewEntriesSize - 1]);
+ if (FinalType->getCanonicalDecl() != TargetType->getCanonicalDecl()) {
+ Info.CCEDiag(E, diag::note_constexpr_invalid_downcast)
+ << D.MostDerivedType << TargetQT;
+ return false;
+ }
+
+ // Truncate the lvalue to the appropriate derived class.
+ return CastToDerivedClass(Info, E, Result, TargetType, NewEntriesSize);
+}
+
+namespace {
+enum EvalStmtResult {
+ /// Evaluation failed.
+ ESR_Failed,
+ /// Hit a 'return' statement.
+ ESR_Returned,
+ /// Evaluation succeeded.
+ ESR_Succeeded,
+ /// Hit a 'continue' statement.
+ ESR_Continue,
+ /// Hit a 'break' statement.
+ ESR_Break,
+ /// Still scanning for 'case' or 'default' statement.
+ ESR_CaseNotFound
+};
+}
+
+static bool EvaluateDecl(EvalInfo &Info, const Decl *D) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ // We don't need to evaluate the initializer for a static local.
+ if (!VD->hasLocalStorage())
+ return true;
+
+ LValue Result;
+ Result.set(VD, Info.CurrentCall->Index);
+ APValue &Val = Info.CurrentCall->createTemporary(VD, true);
+
+ const Expr *InitE = VD->getInit();
+ if (!InitE) {
+ Info.Diag(D->getLocStart(), diag::note_constexpr_uninitialized)
+ << false << VD->getType();
+ Val = APValue();
+ return false;
+ }
+
+ if (InitE->isValueDependent())
+ return false;
+
+ if (!EvaluateInPlace(Val, Info, Result, InitE)) {
+ // Wipe out any partially-computed value, to allow tracking that this
+ // evaluation failed.
+ Val = APValue();
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/// Evaluate a condition (either a variable declaration or an expression).
+static bool EvaluateCond(EvalInfo &Info, const VarDecl *CondDecl,
+ const Expr *Cond, bool &Result) {
+ FullExpressionRAII Scope(Info);
+ if (CondDecl && !EvaluateDecl(Info, CondDecl))
+ return false;
+ return EvaluateAsBooleanCondition(Cond, Result, Info);
+}
+
+/// \brief A location where the result (returned value) of evaluating a
+/// statement should be stored.
+struct StmtResult {
+ /// The APValue that should be filled in with the returned value.
+ APValue &Value;
+ /// The location containing the result, if any (used to support RVO).
+ const LValue *Slot;
+};
+
+static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
+ const Stmt *S,
+ const SwitchCase *SC = nullptr);
+
+/// Evaluate the body of a loop, and translate the result as appropriate.
+static EvalStmtResult EvaluateLoopBody(StmtResult &Result, EvalInfo &Info,
+ const Stmt *Body,
+ const SwitchCase *Case = nullptr) {
+ BlockScopeRAII Scope(Info);
+ switch (EvalStmtResult ESR = EvaluateStmt(Result, Info, Body, Case)) {
+ case ESR_Break:
+ return ESR_Succeeded;
+ case ESR_Succeeded:
+ case ESR_Continue:
+ return ESR_Continue;
+ case ESR_Failed:
+ case ESR_Returned:
+ case ESR_CaseNotFound:
+ return ESR;
+ }
+ llvm_unreachable("Invalid EvalStmtResult!");
+}
+
+/// Evaluate a switch statement.
+static EvalStmtResult EvaluateSwitch(StmtResult &Result, EvalInfo &Info,
+ const SwitchStmt *SS) {
+ BlockScopeRAII Scope(Info);
+
+ // Evaluate the switch condition.
+ APSInt Value;
+ {
+ FullExpressionRAII Scope(Info);
+ if (SS->getConditionVariable() &&
+ !EvaluateDecl(Info, SS->getConditionVariable()))
+ return ESR_Failed;
+ if (!EvaluateInteger(SS->getCond(), Value, Info))
+ return ESR_Failed;
+ }
+
+ // Find the switch case corresponding to the value of the condition.
+ // FIXME: Cache this lookup.
+ const SwitchCase *Found = nullptr;
+ for (const SwitchCase *SC = SS->getSwitchCaseList(); SC;
+ SC = SC->getNextSwitchCase()) {
+ if (isa<DefaultStmt>(SC)) {
+ Found = SC;
+ continue;
+ }
+
+ const CaseStmt *CS = cast<CaseStmt>(SC);
+ APSInt LHS = CS->getLHS()->EvaluateKnownConstInt(Info.Ctx);
+ APSInt RHS = CS->getRHS() ? CS->getRHS()->EvaluateKnownConstInt(Info.Ctx)
+ : LHS;
+ if (LHS <= Value && Value <= RHS) {
+ Found = SC;
+ break;
+ }
+ }
+
+ if (!Found)
+ return ESR_Succeeded;
+
+ // Search the switch body for the switch case and evaluate it from there.
+ switch (EvalStmtResult ESR = EvaluateStmt(Result, Info, SS->getBody(), Found)) {
+ case ESR_Break:
+ return ESR_Succeeded;
+ case ESR_Succeeded:
+ case ESR_Continue:
+ case ESR_Failed:
+ case ESR_Returned:
+ return ESR;
+ case ESR_CaseNotFound:
+ // This can only happen if the switch case is nested within a statement
+ // expression. We have no intention of supporting that.
+ Info.Diag(Found->getLocStart(), diag::note_constexpr_stmt_expr_unsupported);
+ return ESR_Failed;
+ }
+ llvm_unreachable("Invalid EvalStmtResult!");
+}
+
+// Evaluate a statement.
+static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
+ const Stmt *S, const SwitchCase *Case) {
+ if (!Info.nextStep(S))
+ return ESR_Failed;
+
+ // If we're hunting down a 'case' or 'default' label, recurse through
+ // substatements until we hit the label.
+ if (Case) {
+ // FIXME: We don't start the lifetime of objects whose initialization we
+ // jump over. However, such objects must be of class type with a trivial
+ // default constructor that initialize all subobjects, so must be empty,
+ // so this almost never matters.
+ switch (S->getStmtClass()) {
+ case Stmt::CompoundStmtClass:
+ // FIXME: Precompute which substatement of a compound statement we
+ // would jump to, and go straight there rather than performing a
+ // linear scan each time.
+ case Stmt::LabelStmtClass:
+ case Stmt::AttributedStmtClass:
+ case Stmt::DoStmtClass:
+ break;
+
+ case Stmt::CaseStmtClass:
+ case Stmt::DefaultStmtClass:
+ if (Case == S)
+ Case = nullptr;
+ break;
+
+ case Stmt::IfStmtClass: {
+ // FIXME: Precompute which side of an 'if' we would jump to, and go
+ // straight there rather than scanning both sides.
+ const IfStmt *IS = cast<IfStmt>(S);
+
+ // Wrap the evaluation in a block scope, in case it's a DeclStmt
+ // preceded by our switch label.
+ BlockScopeRAII Scope(Info);
+
+ EvalStmtResult ESR = EvaluateStmt(Result, Info, IS->getThen(), Case);
+ if (ESR != ESR_CaseNotFound || !IS->getElse())
+ return ESR;
+ return EvaluateStmt(Result, Info, IS->getElse(), Case);
+ }
+
+ case Stmt::WhileStmtClass: {
+ EvalStmtResult ESR =
+ EvaluateLoopBody(Result, Info, cast<WhileStmt>(S)->getBody(), Case);
+ if (ESR != ESR_Continue)
+ return ESR;
+ break;
+ }
+
+ case Stmt::ForStmtClass: {
+ const ForStmt *FS = cast<ForStmt>(S);
+ EvalStmtResult ESR =
+ EvaluateLoopBody(Result, Info, FS->getBody(), Case);
+ if (ESR != ESR_Continue)
+ return ESR;
+ if (FS->getInc()) {
+ FullExpressionRAII IncScope(Info);
+ if (!EvaluateIgnoredValue(Info, FS->getInc()))
+ return ESR_Failed;
+ }
+ break;
+ }
+
+ case Stmt::DeclStmtClass:
+ // FIXME: If the variable has initialization that can't be jumped over,
+ // bail out of any immediately-surrounding compound-statement too.
+ default:
+ return ESR_CaseNotFound;
+ }
+ }
+
+ switch (S->getStmtClass()) {
+ default:
+ if (const Expr *E = dyn_cast<Expr>(S)) {
+ // Don't bother evaluating beyond an expression-statement which couldn't
+ // be evaluated.
+ FullExpressionRAII Scope(Info);
+ if (!EvaluateIgnoredValue(Info, E))
+ return ESR_Failed;
+ return ESR_Succeeded;
+ }
+
+ Info.Diag(S->getLocStart());
+ return ESR_Failed;
+
+ case Stmt::NullStmtClass:
+ return ESR_Succeeded;
+
+ case Stmt::DeclStmtClass: {
+ const DeclStmt *DS = cast<DeclStmt>(S);
+ for (const auto *DclIt : DS->decls()) {
+ // Each declaration initialization is its own full-expression.
+ // FIXME: This isn't quite right; if we're performing aggregate
+ // initialization, each braced subexpression is its own full-expression.
+ FullExpressionRAII Scope(Info);
+ if (!EvaluateDecl(Info, DclIt) && !Info.keepEvaluatingAfterFailure())
+ return ESR_Failed;
+ }
+ return ESR_Succeeded;
+ }
+
+ case Stmt::ReturnStmtClass: {
+ const Expr *RetExpr = cast<ReturnStmt>(S)->getRetValue();
+ FullExpressionRAII Scope(Info);
+ if (RetExpr &&
+ !(Result.Slot
+ ? EvaluateInPlace(Result.Value, Info, *Result.Slot, RetExpr)
+ : Evaluate(Result.Value, Info, RetExpr)))
+ return ESR_Failed;
+ return ESR_Returned;
+ }
+
+ case Stmt::CompoundStmtClass: {
+ BlockScopeRAII Scope(Info);
+
+ const CompoundStmt *CS = cast<CompoundStmt>(S);
+ for (const auto *BI : CS->body()) {
+ EvalStmtResult ESR = EvaluateStmt(Result, Info, BI, Case);
+ if (ESR == ESR_Succeeded)
+ Case = nullptr;
+ else if (ESR != ESR_CaseNotFound)
+ return ESR;
+ }
+ return Case ? ESR_CaseNotFound : ESR_Succeeded;
+ }
+
+ case Stmt::IfStmtClass: {
+ const IfStmt *IS = cast<IfStmt>(S);
+
+ // Evaluate the condition, as either a var decl or as an expression.
+ BlockScopeRAII Scope(Info);
+ bool Cond;
+ if (!EvaluateCond(Info, IS->getConditionVariable(), IS->getCond(), Cond))
+ return ESR_Failed;
+
+ if (const Stmt *SubStmt = Cond ? IS->getThen() : IS->getElse()) {
+ EvalStmtResult ESR = EvaluateStmt(Result, Info, SubStmt);
+ if (ESR != ESR_Succeeded)
+ return ESR;
+ }
+ return ESR_Succeeded;
+ }
+
+ case Stmt::WhileStmtClass: {
+ const WhileStmt *WS = cast<WhileStmt>(S);
+ while (true) {
+ BlockScopeRAII Scope(Info);
+ bool Continue;
+ if (!EvaluateCond(Info, WS->getConditionVariable(), WS->getCond(),
+ Continue))
+ return ESR_Failed;
+ if (!Continue)
+ break;
+
+ EvalStmtResult ESR = EvaluateLoopBody(Result, Info, WS->getBody());
+ if (ESR != ESR_Continue)
+ return ESR;
+ }
+ return ESR_Succeeded;
+ }
+
+ case Stmt::DoStmtClass: {
+ const DoStmt *DS = cast<DoStmt>(S);
+ bool Continue;
+ do {
+ EvalStmtResult ESR = EvaluateLoopBody(Result, Info, DS->getBody(), Case);
+ if (ESR != ESR_Continue)
+ return ESR;
+ Case = nullptr;
+
+ FullExpressionRAII CondScope(Info);
+ if (!EvaluateAsBooleanCondition(DS->getCond(), Continue, Info))
+ return ESR_Failed;
+ } while (Continue);
+ return ESR_Succeeded;
+ }
+
+ case Stmt::ForStmtClass: {
+ const ForStmt *FS = cast<ForStmt>(S);
+ BlockScopeRAII Scope(Info);
+ if (FS->getInit()) {
+ EvalStmtResult ESR = EvaluateStmt(Result, Info, FS->getInit());
+ if (ESR != ESR_Succeeded)
+ return ESR;
+ }
+ while (true) {
+ BlockScopeRAII Scope(Info);
+ bool Continue = true;
+ if (FS->getCond() && !EvaluateCond(Info, FS->getConditionVariable(),
+ FS->getCond(), Continue))
+ return ESR_Failed;
+ if (!Continue)
+ break;
+
+ EvalStmtResult ESR = EvaluateLoopBody(Result, Info, FS->getBody());
+ if (ESR != ESR_Continue)
+ return ESR;
+
+ if (FS->getInc()) {
+ FullExpressionRAII IncScope(Info);
+ if (!EvaluateIgnoredValue(Info, FS->getInc()))
+ return ESR_Failed;
+ }
+ }
+ return ESR_Succeeded;
+ }
+
+ case Stmt::CXXForRangeStmtClass: {
+ const CXXForRangeStmt *FS = cast<CXXForRangeStmt>(S);
+ BlockScopeRAII Scope(Info);
+
+ // Initialize the __range variable.
+ EvalStmtResult ESR = EvaluateStmt(Result, Info, FS->getRangeStmt());
+ if (ESR != ESR_Succeeded)
+ return ESR;
+
+ // Create the __begin and __end iterators.
+ ESR = EvaluateStmt(Result, Info, FS->getBeginEndStmt());
+ if (ESR != ESR_Succeeded)
+ return ESR;
+
+ while (true) {
+ // Condition: __begin != __end.
+ {
+ bool Continue = true;
+ FullExpressionRAII CondExpr(Info);
+ if (!EvaluateAsBooleanCondition(FS->getCond(), Continue, Info))
+ return ESR_Failed;
+ if (!Continue)
+ break;
+ }
+
+ // User's variable declaration, initialized by *__begin.
+ BlockScopeRAII InnerScope(Info);
+ ESR = EvaluateStmt(Result, Info, FS->getLoopVarStmt());
+ if (ESR != ESR_Succeeded)
+ return ESR;
+
+ // Loop body.
+ ESR = EvaluateLoopBody(Result, Info, FS->getBody());
+ if (ESR != ESR_Continue)
+ return ESR;
+
+ // Increment: ++__begin
+ if (!EvaluateIgnoredValue(Info, FS->getInc()))
+ return ESR_Failed;
+ }
+
+ return ESR_Succeeded;
+ }
+
+ case Stmt::SwitchStmtClass:
+ return EvaluateSwitch(Result, Info, cast<SwitchStmt>(S));
+
+ case Stmt::ContinueStmtClass:
+ return ESR_Continue;
+
+ case Stmt::BreakStmtClass:
+ return ESR_Break;
+
+ case Stmt::LabelStmtClass:
+ return EvaluateStmt(Result, Info, cast<LabelStmt>(S)->getSubStmt(), Case);
+
+ case Stmt::AttributedStmtClass:
+ // As a general principle, C++11 attributes can be ignored without
+ // any semantic impact.
+ return EvaluateStmt(Result, Info, cast<AttributedStmt>(S)->getSubStmt(),
+ Case);
+
+ case Stmt::CaseStmtClass:
+ case Stmt::DefaultStmtClass:
+ return EvaluateStmt(Result, Info, cast<SwitchCase>(S)->getSubStmt(), Case);
+ }
+}
+
+/// CheckTrivialDefaultConstructor - Check whether a constructor is a trivial
+/// default constructor. If so, we'll fold it whether or not it's marked as
+/// constexpr. If it is marked as constexpr, we will never implicitly define it,
+/// so we need special handling.
+static bool CheckTrivialDefaultConstructor(EvalInfo &Info, SourceLocation Loc,
+ const CXXConstructorDecl *CD,
+ bool IsValueInitialization) {
+ if (!CD->isTrivial() || !CD->isDefaultConstructor())
+ return false;
+
+ // Value-initialization does not call a trivial default constructor, so such a
+ // call is a core constant expression whether or not the constructor is
+ // constexpr.
+ if (!CD->isConstexpr() && !IsValueInitialization) {
+ if (Info.getLangOpts().CPlusPlus11) {
+ // FIXME: If DiagDecl is an implicitly-declared special member function,
+ // we should be much more explicit about why it's not constexpr.
+ Info.CCEDiag(Loc, diag::note_constexpr_invalid_function, 1)
+ << /*IsConstexpr*/0 << /*IsConstructor*/1 << CD;
+ Info.Note(CD->getLocation(), diag::note_declared_at);
+ } else {
+ Info.CCEDiag(Loc, diag::note_invalid_subexpr_in_const_expr);
+ }
+ }
+ return true;
+}
+
+/// CheckConstexprFunction - Check that a function can be called in a constant
+/// expression.
+static bool CheckConstexprFunction(EvalInfo &Info, SourceLocation CallLoc,
+ const FunctionDecl *Declaration,
+ const FunctionDecl *Definition) {
+ // Potential constant expressions can contain calls to declared, but not yet
+ // defined, constexpr functions.
+ if (Info.checkingPotentialConstantExpression() && !Definition &&
+ Declaration->isConstexpr())
+ return false;
+
+ // Bail out with no diagnostic if the function declaration itself is invalid.
+ // We will have produced a relevant diagnostic while parsing it.
+ if (Declaration->isInvalidDecl())
+ return false;
+
+ // Can we evaluate this function call?
+ if (Definition && Definition->isConstexpr() && !Definition->isInvalidDecl())
+ return true;
+
+ if (Info.getLangOpts().CPlusPlus11) {
+ const FunctionDecl *DiagDecl = Definition ? Definition : Declaration;
+ // FIXME: If DiagDecl is an implicitly-declared special member function, we
+ // should be much more explicit about why it's not constexpr.
+ Info.Diag(CallLoc, diag::note_constexpr_invalid_function, 1)
+ << DiagDecl->isConstexpr() << isa<CXXConstructorDecl>(DiagDecl)
+ << DiagDecl;
+ Info.Note(DiagDecl->getLocation(), diag::note_declared_at);
+ } else {
+ Info.Diag(CallLoc, diag::note_invalid_subexpr_in_const_expr);
+ }
+ return false;
+}
+
+/// Determine if a class has any fields that might need to be copied by a
+/// trivial copy or move operation.
+static bool hasFields(const CXXRecordDecl *RD) {
+ if (!RD || RD->isEmpty())
+ return false;
+ for (auto *FD : RD->fields()) {
+ if (FD->isUnnamedBitfield())
+ continue;
+ return true;
+ }
+ for (auto &Base : RD->bases())
+ if (hasFields(Base.getType()->getAsCXXRecordDecl()))
+ return true;
+ return false;
+}
+
+namespace {
+typedef SmallVector<APValue, 8> ArgVector;
+}
+
+/// EvaluateArgs - Evaluate the arguments to a function call.
+static bool EvaluateArgs(ArrayRef<const Expr*> Args, ArgVector &ArgValues,
+ EvalInfo &Info) {
+ bool Success = true;
+ for (ArrayRef<const Expr*>::iterator I = Args.begin(), E = Args.end();
+ I != E; ++I) {
+ if (!Evaluate(ArgValues[I - Args.begin()], Info, *I)) {
+ // If we're checking for a potential constant expression, evaluate all
+ // initializers even if some of them fail.
+ if (!Info.keepEvaluatingAfterFailure())
+ return false;
+ Success = false;
+ }
+ }
+ return Success;
+}
+
+/// Evaluate a function call.
+static bool HandleFunctionCall(SourceLocation CallLoc,
+ const FunctionDecl *Callee, const LValue *This,
+ ArrayRef<const Expr*> Args, const Stmt *Body,
+ EvalInfo &Info, APValue &Result,
+ const LValue *ResultSlot) {
+ ArgVector ArgValues(Args.size());
+ if (!EvaluateArgs(Args, ArgValues, Info))
+ return false;
+
+ if (!Info.CheckCallLimit(CallLoc))
+ return false;
+
+ CallStackFrame Frame(Info, CallLoc, Callee, This, ArgValues.data());
+
+ // For a trivial copy or move assignment, perform an APValue copy. This is
+ // essential for unions, where the operations performed by the assignment
+ // operator cannot be represented as statements.
+ //
+ // Skip this for non-union classes with no fields; in that case, the defaulted
+ // copy/move does not actually read the object.
+ const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Callee);
+ if (MD && MD->isDefaulted() &&
+ (MD->getParent()->isUnion() ||
+ (MD->isTrivial() && hasFields(MD->getParent())))) {
+ assert(This &&
+ (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()));
+ LValue RHS;
+ RHS.setFrom(Info.Ctx, ArgValues[0]);
+ APValue RHSValue;
+ if (!handleLValueToRValueConversion(Info, Args[0], Args[0]->getType(),
+ RHS, RHSValue))
+ return false;
+ if (!handleAssignment(Info, Args[0], *This, MD->getThisType(Info.Ctx),
+ RHSValue))
+ return false;
+ This->moveInto(Result);
+ return true;
+ }
+
+ StmtResult Ret = {Result, ResultSlot};
+ EvalStmtResult ESR = EvaluateStmt(Ret, Info, Body);
+ if (ESR == ESR_Succeeded) {
+ if (Callee->getReturnType()->isVoidType())
+ return true;
+ Info.Diag(Callee->getLocEnd(), diag::note_constexpr_no_return);
+ }
+ return ESR == ESR_Returned;
+}
+
+/// Evaluate a constructor call.
+static bool HandleConstructorCall(SourceLocation CallLoc, const LValue &This,
+ ArrayRef<const Expr*> Args,
+ const CXXConstructorDecl *Definition,
+ EvalInfo &Info, APValue &Result) {
+ ArgVector ArgValues(Args.size());
+ if (!EvaluateArgs(Args, ArgValues, Info))
+ return false;
+
+ if (!Info.CheckCallLimit(CallLoc))
+ return false;
+
+ const CXXRecordDecl *RD = Definition->getParent();
+ if (RD->getNumVBases()) {
+ Info.Diag(CallLoc, diag::note_constexpr_virtual_base) << RD;
+ return false;
+ }
+
+ CallStackFrame Frame(Info, CallLoc, Definition, &This, ArgValues.data());
+
+ // FIXME: Creating an APValue just to hold a nonexistent return value is
+ // wasteful.
+ APValue RetVal;
+ StmtResult Ret = {RetVal, nullptr};
+
+ // If it's a delegating constructor, just delegate.
+ if (Definition->isDelegatingConstructor()) {
+ CXXConstructorDecl::init_const_iterator I = Definition->init_begin();
+ {
+ FullExpressionRAII InitScope(Info);
+ if (!EvaluateInPlace(Result, Info, This, (*I)->getInit()))
+ return false;
+ }
+ return EvaluateStmt(Ret, Info, Definition->getBody()) != ESR_Failed;
+ }
+
+ // For a trivial copy or move constructor, perform an APValue copy. This is
+ // essential for unions (or classes with anonymous union members), where the
+ // operations performed by the constructor cannot be represented by
+ // ctor-initializers.
+ //
+ // Skip this for empty non-union classes; we should not perform an
+ // lvalue-to-rvalue conversion on them because their copy constructor does not
+ // actually read them.
+ if (Definition->isDefaulted() && Definition->isCopyOrMoveConstructor() &&
+ (Definition->getParent()->isUnion() ||
+ (Definition->isTrivial() && hasFields(Definition->getParent())))) {
+ LValue RHS;
+ RHS.setFrom(Info.Ctx, ArgValues[0]);
+ return handleLValueToRValueConversion(Info, Args[0], Args[0]->getType(),
+ RHS, Result);
+ }
+
+ // Reserve space for the struct members.
+ if (!RD->isUnion() && Result.isUninit())
+ Result = APValue(APValue::UninitStruct(), RD->getNumBases(),
+ std::distance(RD->field_begin(), RD->field_end()));
+
+ if (RD->isInvalidDecl()) return false;
+ const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
+
+ // A scope for temporaries lifetime-extended by reference members.
+ BlockScopeRAII LifetimeExtendedScope(Info);
+
+ bool Success = true;
+ unsigned BasesSeen = 0;
+#ifndef NDEBUG
+ CXXRecordDecl::base_class_const_iterator BaseIt = RD->bases_begin();
+#endif
+ for (const auto *I : Definition->inits()) {
+ LValue Subobject = This;
+ APValue *Value = &Result;
+
+ // Determine the subobject to initialize.
+ FieldDecl *FD = nullptr;
+ if (I->isBaseInitializer()) {
+ QualType BaseType(I->getBaseClass(), 0);
+#ifndef NDEBUG
+ // Non-virtual base classes are initialized in the order in the class
+ // definition. We have already checked for virtual base classes.
+ assert(!BaseIt->isVirtual() && "virtual base for literal type");
+ assert(Info.Ctx.hasSameType(BaseIt->getType(), BaseType) &&
+ "base class initializers not in expected order");
+ ++BaseIt;
+#endif
+ if (!HandleLValueDirectBase(Info, I->getInit(), Subobject, RD,
+ BaseType->getAsCXXRecordDecl(), &Layout))
+ return false;
+ Value = &Result.getStructBase(BasesSeen++);
+ } else if ((FD = I->getMember())) {
+ if (!HandleLValueMember(Info, I->getInit(), Subobject, FD, &Layout))
+ return false;
+ if (RD->isUnion()) {
+ Result = APValue(FD);
+ Value = &Result.getUnionValue();
+ } else {
+ Value = &Result.getStructField(FD->getFieldIndex());
+ }
+ } else if (IndirectFieldDecl *IFD = I->getIndirectMember()) {
+ // Walk the indirect field decl's chain to find the object to initialize,
+ // and make sure we've initialized every step along it.
+ for (auto *C : IFD->chain()) {
+ FD = cast<FieldDecl>(C);
+ CXXRecordDecl *CD = cast<CXXRecordDecl>(FD->getParent());
+ // Switch the union field if it differs. This happens if we had
+ // preceding zero-initialization, and we're now initializing a union
+ // subobject other than the first.
+ // FIXME: In this case, the values of the other subobjects are
+ // specified, since zero-initialization sets all padding bits to zero.
+ if (Value->isUninit() ||
+ (Value->isUnion() && Value->getUnionField() != FD)) {
+ if (CD->isUnion())
+ *Value = APValue(FD);
+ else
+ *Value = APValue(APValue::UninitStruct(), CD->getNumBases(),
+ std::distance(CD->field_begin(), CD->field_end()));
+ }
+ if (!HandleLValueMember(Info, I->getInit(), Subobject, FD))
+ return false;
+ if (CD->isUnion())
+ Value = &Value->getUnionValue();
+ else
+ Value = &Value->getStructField(FD->getFieldIndex());
+ }
+ } else {
+ llvm_unreachable("unknown base initializer kind");
+ }
+
+ FullExpressionRAII InitScope(Info);
+ if (!EvaluateInPlace(*Value, Info, Subobject, I->getInit()) ||
+ (FD && FD->isBitField() && !truncateBitfieldValue(Info, I->getInit(),
+ *Value, FD))) {
+ // If we're checking for a potential constant expression, evaluate all
+ // initializers even if some of them fail.
+ if (!Info.keepEvaluatingAfterFailure())
+ return false;
+ Success = false;
+ }
+ }
+
+ return Success &&
+ EvaluateStmt(Ret, Info, Definition->getBody()) != ESR_Failed;
+}
+
+//===----------------------------------------------------------------------===//
+// Generic Evaluation
+//===----------------------------------------------------------------------===//
+namespace {
+
+template <class Derived>
+class ExprEvaluatorBase
+ : public ConstStmtVisitor<Derived, bool> {
+private:
+ Derived &getDerived() { return static_cast<Derived&>(*this); }
+ bool DerivedSuccess(const APValue &V, const Expr *E) {
+ return getDerived().Success(V, E);
+ }
+ bool DerivedZeroInitialization(const Expr *E) {
+ return getDerived().ZeroInitialization(E);
+ }
+
+ // Check whether a conditional operator with a non-constant condition is a
+ // potential constant expression. If neither arm is a potential constant
+ // expression, then the conditional operator is not either.
+ template<typename ConditionalOperator>
+ void CheckPotentialConstantConditional(const ConditionalOperator *E) {
+ assert(Info.checkingPotentialConstantExpression());
+
+ // Speculatively evaluate both arms.
+ {
+ SmallVector<PartialDiagnosticAt, 8> Diag;
+ SpeculativeEvaluationRAII Speculate(Info, &Diag);
+
+ StmtVisitorTy::Visit(E->getFalseExpr());
+ if (Diag.empty())
+ return;
+
+ Diag.clear();
+ StmtVisitorTy::Visit(E->getTrueExpr());
+ if (Diag.empty())
+ return;
+ }
+
+ Error(E, diag::note_constexpr_conditional_never_const);
+ }
+
+
+ template<typename ConditionalOperator>
+ bool HandleConditionalOperator(const ConditionalOperator *E) {
+ bool BoolResult;
+ if (!EvaluateAsBooleanCondition(E->getCond(), BoolResult, Info)) {
+ if (Info.checkingPotentialConstantExpression())
+ CheckPotentialConstantConditional(E);
+ return false;
+ }
+
+ Expr *EvalExpr = BoolResult ? E->getTrueExpr() : E->getFalseExpr();
+ return StmtVisitorTy::Visit(EvalExpr);
+ }
+
+protected:
+ EvalInfo &Info;
+ typedef ConstStmtVisitor<Derived, bool> StmtVisitorTy;
+ typedef ExprEvaluatorBase ExprEvaluatorBaseTy;
+
+ OptionalDiagnostic CCEDiag(const Expr *E, diag::kind D) {
+ return Info.CCEDiag(E, D);
+ }
+
+ bool ZeroInitialization(const Expr *E) { return Error(E); }
+
+public:
+ ExprEvaluatorBase(EvalInfo &Info) : Info(Info) {}
+
+ EvalInfo &getEvalInfo() { return Info; }
+
+ /// Report an evaluation error. This should only be called when an error is
+ /// first discovered. When propagating an error, just return false.
+ bool Error(const Expr *E, diag::kind D) {
+ Info.Diag(E, D);
+ return false;
+ }
+ bool Error(const Expr *E) {
+ return Error(E, diag::note_invalid_subexpr_in_const_expr);
+ }
+
+ bool VisitStmt(const Stmt *) {
+ llvm_unreachable("Expression evaluator should not be called on stmts");
+ }
+ bool VisitExpr(const Expr *E) {
+ return Error(E);
+ }
+
+ bool VisitParenExpr(const ParenExpr *E)
+ { return StmtVisitorTy::Visit(E->getSubExpr()); }
+ bool VisitUnaryExtension(const UnaryOperator *E)
+ { return StmtVisitorTy::Visit(E->getSubExpr()); }
+ bool VisitUnaryPlus(const UnaryOperator *E)
+ { return StmtVisitorTy::Visit(E->getSubExpr()); }
+ bool VisitChooseExpr(const ChooseExpr *E)
+ { return StmtVisitorTy::Visit(E->getChosenSubExpr()); }
+ bool VisitGenericSelectionExpr(const GenericSelectionExpr *E)
+ { return StmtVisitorTy::Visit(E->getResultExpr()); }
+ bool VisitSubstNonTypeTemplateParmExpr(const SubstNonTypeTemplateParmExpr *E)
+ { return StmtVisitorTy::Visit(E->getReplacement()); }
+ bool VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *E)
+ { return StmtVisitorTy::Visit(E->getExpr()); }
+ bool VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *E) {
+ // The initializer may not have been parsed yet, or might be erroneous.
+ if (!E->getExpr())
+ return Error(E);
+ return StmtVisitorTy::Visit(E->getExpr());
+ }
+ // We cannot create any objects for which cleanups are required, so there is
+ // nothing to do here; all cleanups must come from unevaluated subexpressions.
+ bool VisitExprWithCleanups(const ExprWithCleanups *E)
+ { return StmtVisitorTy::Visit(E->getSubExpr()); }
+
+ bool VisitCXXReinterpretCastExpr(const CXXReinterpretCastExpr *E) {
+ CCEDiag(E, diag::note_constexpr_invalid_cast) << 0;
+ return static_cast<Derived*>(this)->VisitCastExpr(E);
+ }
+ bool VisitCXXDynamicCastExpr(const CXXDynamicCastExpr *E) {
+ CCEDiag(E, diag::note_constexpr_invalid_cast) << 1;
+ return static_cast<Derived*>(this)->VisitCastExpr(E);
+ }
+
+ bool VisitBinaryOperator(const BinaryOperator *E) {
+ switch (E->getOpcode()) {
+ default:
+ return Error(E);
+
+ case BO_Comma:
+ VisitIgnoredValue(E->getLHS());
+ return StmtVisitorTy::Visit(E->getRHS());
+
+ case BO_PtrMemD:
+ case BO_PtrMemI: {
+ LValue Obj;
+ if (!HandleMemberPointerAccess(Info, E, Obj))
+ return false;
+ APValue Result;
+ if (!handleLValueToRValueConversion(Info, E, E->getType(), Obj, Result))
+ return false;
+ return DerivedSuccess(Result, E);
+ }
+ }
+ }
+
+ bool VisitBinaryConditionalOperator(const BinaryConditionalOperator *E) {
+ // Evaluate and cache the common expression. We treat it as a temporary,
+ // even though it's not quite the same thing.
+ if (!Evaluate(Info.CurrentCall->createTemporary(E->getOpaqueValue(), false),
+ Info, E->getCommon()))
+ return false;
+
+ return HandleConditionalOperator(E);
+ }
+
+ bool VisitConditionalOperator(const ConditionalOperator *E) {
+ bool IsBcpCall = false;
+ // If the condition (ignoring parens) is a __builtin_constant_p call,
+ // the result is a constant expression if it can be folded without
+ // side-effects. This is an important GNU extension. See GCC PR38377
+ // for discussion.
+ if (const CallExpr *CallCE =
+ dyn_cast<CallExpr>(E->getCond()->IgnoreParenCasts()))
+ if (CallCE->getBuiltinCallee() == Builtin::BI__builtin_constant_p)
+ IsBcpCall = true;
+
+ // Always assume __builtin_constant_p(...) ? ... : ... is a potential
+ // constant expression; we can't check whether it's potentially foldable.
+ if (Info.checkingPotentialConstantExpression() && IsBcpCall)
+ return false;
+
+ FoldConstant Fold(Info, IsBcpCall);
+ if (!HandleConditionalOperator(E)) {
+ Fold.keepDiagnostics();
+ return false;
+ }
+
+ return true;
+ }
+
+ bool VisitOpaqueValueExpr(const OpaqueValueExpr *E) {
+ if (APValue *Value = Info.CurrentCall->getTemporary(E))
+ return DerivedSuccess(*Value, E);
+
+ const Expr *Source = E->getSourceExpr();
+ if (!Source)
+ return Error(E);
+ if (Source == E) { // sanity checking.
+ assert(0 && "OpaqueValueExpr recursively refers to itself");
+ return Error(E);
+ }
+ return StmtVisitorTy::Visit(Source);
+ }
+
+ bool VisitCallExpr(const CallExpr *E) {
+ APValue Result;
+ if (!handleCallExpr(E, Result, nullptr))
+ return false;
+ return DerivedSuccess(Result, E);
+ }
+
+ bool handleCallExpr(const CallExpr *E, APValue &Result,
+ const LValue *ResultSlot) {
+ const Expr *Callee = E->getCallee()->IgnoreParens();
+ QualType CalleeType = Callee->getType();
+
+ const FunctionDecl *FD = nullptr;
+ LValue *This = nullptr, ThisVal;
+ auto Args = llvm::makeArrayRef(E->getArgs(), E->getNumArgs());
+ bool HasQualifier = false;
+
+ // Extract function decl and 'this' pointer from the callee.
+ if (CalleeType->isSpecificBuiltinType(BuiltinType::BoundMember)) {
+ const ValueDecl *Member = nullptr;
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(Callee)) {
+ // Explicit bound member calls, such as x.f() or p->g();
+ if (!EvaluateObjectArgument(Info, ME->getBase(), ThisVal))
+ return false;
+ Member = ME->getMemberDecl();
+ This = &ThisVal;
+ HasQualifier = ME->hasQualifier();
+ } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(Callee)) {
+ // Indirect bound member calls ('.*' or '->*').
+ Member = HandleMemberPointerAccess(Info, BE, ThisVal, false);
+ if (!Member) return false;
+ This = &ThisVal;
+ } else
+ return Error(Callee);
+
+ FD = dyn_cast<FunctionDecl>(Member);
+ if (!FD)
+ return Error(Callee);
+ } else if (CalleeType->isFunctionPointerType()) {
+ LValue Call;
+ if (!EvaluatePointer(Callee, Call, Info))
+ return false;
+
+ if (!Call.getLValueOffset().isZero())
+ return Error(Callee);
+ FD = dyn_cast_or_null<FunctionDecl>(
+ Call.getLValueBase().dyn_cast<const ValueDecl*>());
+ if (!FD)
+ return Error(Callee);
+
+ // Overloaded operator calls to member functions are represented as normal
+ // calls with '*this' as the first argument.
+ const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
+ if (MD && !MD->isStatic()) {
+ // FIXME: When selecting an implicit conversion for an overloaded
+ // operator delete, we sometimes try to evaluate calls to conversion
+ // operators without a 'this' parameter!
+ if (Args.empty())
+ return Error(E);
+
+ if (!EvaluateObjectArgument(Info, Args[0], ThisVal))
+ return false;
+ This = &ThisVal;
+ Args = Args.slice(1);
+ }
+
+ // Don't call function pointers which have been cast to some other type.
+ if (!Info.Ctx.hasSameType(CalleeType->getPointeeType(), FD->getType()))
+ return Error(E);
+ } else
+ return Error(E);
+
+ if (This && !This->checkSubobject(Info, E, CSK_This))
+ return false;
+
+ // DR1358 allows virtual constexpr functions in some cases. Don't allow
+ // calls to such functions in constant expressions.
+ if (This && !HasQualifier &&
+ isa<CXXMethodDecl>(FD) && cast<CXXMethodDecl>(FD)->isVirtual())
+ return Error(E, diag::note_constexpr_virtual_call);
+
+ const FunctionDecl *Definition = nullptr;
+ Stmt *Body = FD->getBody(Definition);
+
+ if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition) ||
+ !HandleFunctionCall(E->getExprLoc(), Definition, This, Args, Body, Info,
+ Result, ResultSlot))
+ return false;
+
+ return true;
+ }
+
+ bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
+ return StmtVisitorTy::Visit(E->getInitializer());
+ }
+ bool VisitInitListExpr(const InitListExpr *E) {
+ if (E->getNumInits() == 0)
+ return DerivedZeroInitialization(E);
+ if (E->getNumInits() == 1)
+ return StmtVisitorTy::Visit(E->getInit(0));
+ return Error(E);
+ }
+ bool VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
+ return DerivedZeroInitialization(E);
+ }
+ bool VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
+ return DerivedZeroInitialization(E);
+ }
+ bool VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
+ return DerivedZeroInitialization(E);
+ }
+
+ /// A member expression where the object is a prvalue is itself a prvalue.
+ bool VisitMemberExpr(const MemberExpr *E) {
+ assert(!E->isArrow() && "missing call to bound member function?");
+
+ APValue Val;
+ if (!Evaluate(Val, Info, E->getBase()))
+ return false;
+
+ QualType BaseTy = E->getBase()->getType();
+
+ const FieldDecl *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
+ if (!FD) return Error(E);
+ assert(!FD->getType()->isReferenceType() && "prvalue reference?");
+ assert(BaseTy->castAs<RecordType>()->getDecl()->getCanonicalDecl() ==
+ FD->getParent()->getCanonicalDecl() && "record / field mismatch");
+
+ CompleteObject Obj(&Val, BaseTy);
+ SubobjectDesignator Designator(BaseTy);
+ Designator.addDeclUnchecked(FD);
+
+ APValue Result;
+ return extractSubobject(Info, E, Obj, Designator, Result) &&
+ DerivedSuccess(Result, E);
+ }
+
+ bool VisitCastExpr(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ break;
+
+ case CK_AtomicToNonAtomic: {
+ APValue AtomicVal;
+ if (!EvaluateAtomic(E->getSubExpr(), AtomicVal, Info))
+ return false;
+ return DerivedSuccess(AtomicVal, E);
+ }
+
+ case CK_NoOp:
+ case CK_UserDefinedConversion:
+ return StmtVisitorTy::Visit(E->getSubExpr());
+
+ case CK_LValueToRValue: {
+ LValue LVal;
+ if (!EvaluateLValue(E->getSubExpr(), LVal, Info))
+ return false;
+ APValue RVal;
+ // Note, we use the subexpression's type in order to retain cv-qualifiers.
+ if (!handleLValueToRValueConversion(Info, E, E->getSubExpr()->getType(),
+ LVal, RVal))
+ return false;
+ return DerivedSuccess(RVal, E);
+ }
+ }
+
+ return Error(E);
+ }
+
+ bool VisitUnaryPostInc(const UnaryOperator *UO) {
+ return VisitUnaryPostIncDec(UO);
+ }
+ bool VisitUnaryPostDec(const UnaryOperator *UO) {
+ return VisitUnaryPostIncDec(UO);
+ }
+ bool VisitUnaryPostIncDec(const UnaryOperator *UO) {
+ if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure())
+ return Error(UO);
+
+ LValue LVal;
+ if (!EvaluateLValue(UO->getSubExpr(), LVal, Info))
+ return false;
+ APValue RVal;
+ if (!handleIncDec(this->Info, UO, LVal, UO->getSubExpr()->getType(),
+ UO->isIncrementOp(), &RVal))
+ return false;
+ return DerivedSuccess(RVal, UO);
+ }
+
+ bool VisitStmtExpr(const StmtExpr *E) {
+ // We will have checked the full-expressions inside the statement expression
+ // when they were completed, and don't need to check them again now.
+ if (Info.checkingForOverflow())
+ return Error(E);
+
+ BlockScopeRAII Scope(Info);
+ const CompoundStmt *CS = E->getSubStmt();
+ if (CS->body_empty())
+ return true;
+
+ for (CompoundStmt::const_body_iterator BI = CS->body_begin(),
+ BE = CS->body_end();
+ /**/; ++BI) {
+ if (BI + 1 == BE) {
+ const Expr *FinalExpr = dyn_cast<Expr>(*BI);
+ if (!FinalExpr) {
+ Info.Diag((*BI)->getLocStart(),
+ diag::note_constexpr_stmt_expr_unsupported);
+ return false;
+ }
+ return this->Visit(FinalExpr);
+ }
+
+ APValue ReturnValue;
+ StmtResult Result = { ReturnValue, nullptr };
+ EvalStmtResult ESR = EvaluateStmt(Result, Info, *BI);
+ if (ESR != ESR_Succeeded) {
+ // FIXME: If the statement-expression terminated due to 'return',
+ // 'break', or 'continue', it would be nice to propagate that to
+ // the outer statement evaluation rather than bailing out.
+ if (ESR != ESR_Failed)
+ Info.Diag((*BI)->getLocStart(),
+ diag::note_constexpr_stmt_expr_unsupported);
+ return false;
+ }
+ }
+
+ llvm_unreachable("Return from function from the loop above.");
+ }
+
+ /// Visit a value which is evaluated, but whose value is ignored.
+ void VisitIgnoredValue(const Expr *E) {
+ EvaluateIgnoredValue(Info, E);
+ }
+};
+
+}
+
+//===----------------------------------------------------------------------===//
+// Common base class for lvalue and temporary evaluation.
+//===----------------------------------------------------------------------===//
+namespace {
+template<class Derived>
+class LValueExprEvaluatorBase
+ : public ExprEvaluatorBase<Derived> {
+protected:
+ LValue &Result;
+ typedef LValueExprEvaluatorBase LValueExprEvaluatorBaseTy;
+ typedef ExprEvaluatorBase<Derived> ExprEvaluatorBaseTy;
+
+ bool Success(APValue::LValueBase B) {
+ Result.set(B);
+ return true;
+ }
+
+public:
+ LValueExprEvaluatorBase(EvalInfo &Info, LValue &Result) :
+ ExprEvaluatorBaseTy(Info), Result(Result) {}
+
+ bool Success(const APValue &V, const Expr *E) {
+ Result.setFrom(this->Info.Ctx, V);
+ return true;
+ }
+
+ bool VisitMemberExpr(const MemberExpr *E) {
+ // Handle non-static data members.
+ QualType BaseTy;
+ bool EvalOK;
+ if (E->isArrow()) {
+ EvalOK = EvaluatePointer(E->getBase(), Result, this->Info);
+ BaseTy = E->getBase()->getType()->castAs<PointerType>()->getPointeeType();
+ } else if (E->getBase()->isRValue()) {
+ assert(E->getBase()->getType()->isRecordType());
+ EvalOK = EvaluateTemporary(E->getBase(), Result, this->Info);
+ BaseTy = E->getBase()->getType();
+ } else {
+ EvalOK = this->Visit(E->getBase());
+ BaseTy = E->getBase()->getType();
+ }
+ if (!EvalOK) {
+ if (!this->Info.allowInvalidBaseExpr())
+ return false;
+ Result.setInvalid(E);
+ return true;
+ }
+
+ const ValueDecl *MD = E->getMemberDecl();
+ if (const FieldDecl *FD = dyn_cast<FieldDecl>(E->getMemberDecl())) {
+ assert(BaseTy->getAs<RecordType>()->getDecl()->getCanonicalDecl() ==
+ FD->getParent()->getCanonicalDecl() && "record / field mismatch");
+ (void)BaseTy;
+ if (!HandleLValueMember(this->Info, E, Result, FD))
+ return false;
+ } else if (const IndirectFieldDecl *IFD = dyn_cast<IndirectFieldDecl>(MD)) {
+ if (!HandleLValueIndirectMember(this->Info, E, Result, IFD))
+ return false;
+ } else
+ return this->Error(E);
+
+ if (MD->getType()->isReferenceType()) {
+ APValue RefValue;
+ if (!handleLValueToRValueConversion(this->Info, E, MD->getType(), Result,
+ RefValue))
+ return false;
+ return Success(RefValue, E);
+ }
+ return true;
+ }
+
+ bool VisitBinaryOperator(const BinaryOperator *E) {
+ switch (E->getOpcode()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
+
+ case BO_PtrMemD:
+ case BO_PtrMemI:
+ return HandleMemberPointerAccess(this->Info, E, Result);
+ }
+ }
+
+ bool VisitCastExpr(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ if (!this->Visit(E->getSubExpr()))
+ return false;
+
+ // Now figure out the necessary offset to add to the base LV to get from
+ // the derived class to the base class.
+ return HandleLValueBasePath(this->Info, E, E->getSubExpr()->getType(),
+ Result);
+ }
+ }
+};
+}
+
+//===----------------------------------------------------------------------===//
+// LValue Evaluation
+//
+// This is used for evaluating lvalues (in C and C++), xvalues (in C++11),
+// function designators (in C), decl references to void objects (in C), and
+// temporaries (if building with -Wno-address-of-temporary).
+//
+// LValue evaluation produces values comprising a base expression of one of the
+// following types:
+// - Declarations
+// * VarDecl
+// * FunctionDecl
+// - Literals
+// * CompoundLiteralExpr in C
+// * StringLiteral
+// * CXXTypeidExpr
+// * PredefinedExpr
+// * ObjCStringLiteralExpr
+// * ObjCEncodeExpr
+// * AddrLabelExpr
+// * BlockExpr
+// * CallExpr for a MakeStringConstant builtin
+// - Locals and temporaries
+// * MaterializeTemporaryExpr
+// * Any Expr, with a CallIndex indicating the function in which the temporary
+// was evaluated, for cases where the MaterializeTemporaryExpr is missing
+// from the AST (FIXME).
+// * A MaterializeTemporaryExpr that has static storage duration, with no
+// CallIndex, for a lifetime-extended temporary.
+// plus an offset in bytes.
+//===----------------------------------------------------------------------===//
+namespace {
+class LValueExprEvaluator
+ : public LValueExprEvaluatorBase<LValueExprEvaluator> {
+public:
+ LValueExprEvaluator(EvalInfo &Info, LValue &Result) :
+ LValueExprEvaluatorBaseTy(Info, Result) {}
+
+ bool VisitVarDecl(const Expr *E, const VarDecl *VD);
+ bool VisitUnaryPreIncDec(const UnaryOperator *UO);
+
+ bool VisitDeclRefExpr(const DeclRefExpr *E);
+ bool VisitPredefinedExpr(const PredefinedExpr *E) { return Success(E); }
+ bool VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
+ bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E);
+ bool VisitMemberExpr(const MemberExpr *E);
+ bool VisitStringLiteral(const StringLiteral *E) { return Success(E); }
+ bool VisitObjCEncodeExpr(const ObjCEncodeExpr *E) { return Success(E); }
+ bool VisitCXXTypeidExpr(const CXXTypeidExpr *E);
+ bool VisitCXXUuidofExpr(const CXXUuidofExpr *E);
+ bool VisitArraySubscriptExpr(const ArraySubscriptExpr *E);
+ bool VisitUnaryDeref(const UnaryOperator *E);
+ bool VisitUnaryReal(const UnaryOperator *E);
+ bool VisitUnaryImag(const UnaryOperator *E);
+ bool VisitUnaryPreInc(const UnaryOperator *UO) {
+ return VisitUnaryPreIncDec(UO);
+ }
+ bool VisitUnaryPreDec(const UnaryOperator *UO) {
+ return VisitUnaryPreIncDec(UO);
+ }
+ bool VisitBinAssign(const BinaryOperator *BO);
+ bool VisitCompoundAssignOperator(const CompoundAssignOperator *CAO);
+
+ bool VisitCastExpr(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ return LValueExprEvaluatorBaseTy::VisitCastExpr(E);
+
+ case CK_LValueBitCast:
+ this->CCEDiag(E, diag::note_constexpr_invalid_cast) << 2;
+ if (!Visit(E->getSubExpr()))
+ return false;
+ Result.Designator.setInvalid();
+ return true;
+
+ case CK_BaseToDerived:
+ if (!Visit(E->getSubExpr()))
+ return false;
+ return HandleBaseToDerivedCast(Info, E, Result);
+ }
+ }
+};
+} // end anonymous namespace
+
+/// Evaluate an expression as an lvalue. This can be legitimately called on
+/// expressions which are not glvalues, in three cases:
+/// * function designators in C, and
+/// * "extern void" objects
+/// * @selector() expressions in Objective-C
+static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info) {
+ assert(E->isGLValue() || E->getType()->isFunctionType() ||
+ E->getType()->isVoidType() || isa<ObjCSelectorExpr>(E));
+ return LValueExprEvaluator(Info, Result).Visit(E);
+}
+
+bool LValueExprEvaluator::VisitDeclRefExpr(const DeclRefExpr *E) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(E->getDecl()))
+ return Success(FD);
+ if (const VarDecl *VD = dyn_cast<VarDecl>(E->getDecl()))
+ return VisitVarDecl(E, VD);
+ return Error(E);
+}
+
+bool LValueExprEvaluator::VisitVarDecl(const Expr *E, const VarDecl *VD) {
+ CallStackFrame *Frame = nullptr;
+ if (VD->hasLocalStorage() && Info.CurrentCall->Index > 1)
+ Frame = Info.CurrentCall;
+
+ if (!VD->getType()->isReferenceType()) {
+ if (Frame) {
+ Result.set(VD, Frame->Index);
+ return true;
+ }
+ return Success(VD);
+ }
+
+ APValue *V;
+ if (!evaluateVarDeclInit(Info, E, VD, Frame, V))
+ return false;
+ if (V->isUninit()) {
+ if (!Info.checkingPotentialConstantExpression())
+ Info.Diag(E, diag::note_constexpr_use_uninit_reference);
+ return false;
+ }
+ return Success(*V, E);
+}
+
+bool LValueExprEvaluator::VisitMaterializeTemporaryExpr(
+ const MaterializeTemporaryExpr *E) {
+ // Walk through the expression to find the materialized temporary itself.
+ SmallVector<const Expr *, 2> CommaLHSs;
+ SmallVector<SubobjectAdjustment, 2> Adjustments;
+ const Expr *Inner = E->GetTemporaryExpr()->
+ skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
+
+ // If we passed any comma operators, evaluate their LHSs.
+ for (unsigned I = 0, N = CommaLHSs.size(); I != N; ++I)
+ if (!EvaluateIgnoredValue(Info, CommaLHSs[I]))
+ return false;
+
+ // A materialized temporary with static storage duration can appear within the
+ // result of a constant expression evaluation, so we need to preserve its
+ // value for use outside this evaluation.
+ APValue *Value;
+ if (E->getStorageDuration() == SD_Static) {
+ Value = Info.Ctx.getMaterializedTemporaryValue(E, true);
+ *Value = APValue();
+ Result.set(E);
+ } else {
+ Value = &Info.CurrentCall->
+ createTemporary(E, E->getStorageDuration() == SD_Automatic);
+ Result.set(E, Info.CurrentCall->Index);
+ }
+
+ QualType Type = Inner->getType();
+
+ // Materialize the temporary itself.
+ if (!EvaluateInPlace(*Value, Info, Result, Inner) ||
+ (E->getStorageDuration() == SD_Static &&
+ !CheckConstantExpression(Info, E->getExprLoc(), Type, *Value))) {
+ *Value = APValue();
+ return false;
+ }
+
+ // Adjust our lvalue to refer to the desired subobject.
+ for (unsigned I = Adjustments.size(); I != 0; /**/) {
+ --I;
+ switch (Adjustments[I].Kind) {
+ case SubobjectAdjustment::DerivedToBaseAdjustment:
+ if (!HandleLValueBasePath(Info, Adjustments[I].DerivedToBase.BasePath,
+ Type, Result))
+ return false;
+ Type = Adjustments[I].DerivedToBase.BasePath->getType();
+ break;
+
+ case SubobjectAdjustment::FieldAdjustment:
+ if (!HandleLValueMember(Info, E, Result, Adjustments[I].Field))
+ return false;
+ Type = Adjustments[I].Field->getType();
+ break;
+
+ case SubobjectAdjustment::MemberPointerAdjustment:
+ if (!HandleMemberPointerAccess(this->Info, Type, Result,
+ Adjustments[I].Ptr.RHS))
+ return false;
+ Type = Adjustments[I].Ptr.MPT->getPointeeType();
+ break;
+ }
+ }
+
+ return true;
+}
+
+bool
+LValueExprEvaluator::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
+ assert(!Info.getLangOpts().CPlusPlus && "lvalue compound literal in c++?");
+ // Defer visiting the literal until the lvalue-to-rvalue conversion. We can
+ // only see this when folding in C, so there's no standard to follow here.
+ return Success(E);
+}
+
+bool LValueExprEvaluator::VisitCXXTypeidExpr(const CXXTypeidExpr *E) {
+ if (!E->isPotentiallyEvaluated())
+ return Success(E);
+
+ Info.Diag(E, diag::note_constexpr_typeid_polymorphic)
+ << E->getExprOperand()->getType()
+ << E->getExprOperand()->getSourceRange();
+ return false;
+}
+
+bool LValueExprEvaluator::VisitCXXUuidofExpr(const CXXUuidofExpr *E) {
+ return Success(E);
+}
+
+bool LValueExprEvaluator::VisitMemberExpr(const MemberExpr *E) {
+ // Handle static data members.
+ if (const VarDecl *VD = dyn_cast<VarDecl>(E->getMemberDecl())) {
+ VisitIgnoredValue(E->getBase());
+ return VisitVarDecl(E, VD);
+ }
+
+ // Handle static member functions.
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(E->getMemberDecl())) {
+ if (MD->isStatic()) {
+ VisitIgnoredValue(E->getBase());
+ return Success(MD);
+ }
+ }
+
+ // Handle non-static data members.
+ return LValueExprEvaluatorBaseTy::VisitMemberExpr(E);
+}
+
+bool LValueExprEvaluator::VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
+ // FIXME: Deal with vectors as array subscript bases.
+ if (E->getBase()->getType()->isVectorType())
+ return Error(E);
+
+ if (!EvaluatePointer(E->getBase(), Result, Info))
+ return false;
+
+ APSInt Index;
+ if (!EvaluateInteger(E->getIdx(), Index, Info))
+ return false;
+
+ return HandleLValueArrayAdjustment(Info, E, Result, E->getType(),
+ getExtValue(Index));
+}
+
+bool LValueExprEvaluator::VisitUnaryDeref(const UnaryOperator *E) {
+ return EvaluatePointer(E->getSubExpr(), Result, Info);
+}
+
+bool LValueExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
+ if (!Visit(E->getSubExpr()))
+ return false;
+ // __real is a no-op on scalar lvalues.
+ if (E->getSubExpr()->getType()->isAnyComplexType())
+ HandleLValueComplexElement(Info, E, Result, E->getType(), false);
+ return true;
+}
+
+bool LValueExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
+ assert(E->getSubExpr()->getType()->isAnyComplexType() &&
+ "lvalue __imag__ on scalar?");
+ if (!Visit(E->getSubExpr()))
+ return false;
+ HandleLValueComplexElement(Info, E, Result, E->getType(), true);
+ return true;
+}
+
+bool LValueExprEvaluator::VisitUnaryPreIncDec(const UnaryOperator *UO) {
+ if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure())
+ return Error(UO);
+
+ if (!this->Visit(UO->getSubExpr()))
+ return false;
+
+ return handleIncDec(
+ this->Info, UO, Result, UO->getSubExpr()->getType(),
+ UO->isIncrementOp(), nullptr);
+}
+
+bool LValueExprEvaluator::VisitCompoundAssignOperator(
+ const CompoundAssignOperator *CAO) {
+ if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure())
+ return Error(CAO);
+
+ APValue RHS;
+
+ // The overall lvalue result is the result of evaluating the LHS.
+ if (!this->Visit(CAO->getLHS())) {
+ if (Info.keepEvaluatingAfterFailure())
+ Evaluate(RHS, this->Info, CAO->getRHS());
+ return false;
+ }
+
+ if (!Evaluate(RHS, this->Info, CAO->getRHS()))
+ return false;
+
+ return handleCompoundAssignment(
+ this->Info, CAO,
+ Result, CAO->getLHS()->getType(), CAO->getComputationLHSType(),
+ CAO->getOpForCompoundAssignment(CAO->getOpcode()), RHS);
+}
+
+bool LValueExprEvaluator::VisitBinAssign(const BinaryOperator *E) {
+ if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure())
+ return Error(E);
+
+ APValue NewVal;
+
+ if (!this->Visit(E->getLHS())) {
+ if (Info.keepEvaluatingAfterFailure())
+ Evaluate(NewVal, this->Info, E->getRHS());
+ return false;
+ }
+
+ if (!Evaluate(NewVal, this->Info, E->getRHS()))
+ return false;
+
+ return handleAssignment(this->Info, E, Result, E->getLHS()->getType(),
+ NewVal);
+}
+
+//===----------------------------------------------------------------------===//
+// Pointer Evaluation
+//===----------------------------------------------------------------------===//
+
+namespace {
+class PointerExprEvaluator
+ : public ExprEvaluatorBase<PointerExprEvaluator> {
+ LValue &Result;
+
+ bool Success(const Expr *E) {
+ Result.set(E);
+ return true;
+ }
+public:
+
+ PointerExprEvaluator(EvalInfo &info, LValue &Result)
+ : ExprEvaluatorBaseTy(info), Result(Result) {}
+
+ bool Success(const APValue &V, const Expr *E) {
+ Result.setFrom(Info.Ctx, V);
+ return true;
+ }
+ bool ZeroInitialization(const Expr *E) {
+ return Success((Expr*)nullptr);
+ }
+
+ bool VisitBinaryOperator(const BinaryOperator *E);
+ bool VisitCastExpr(const CastExpr* E);
+ bool VisitUnaryAddrOf(const UnaryOperator *E);
+ bool VisitObjCStringLiteral(const ObjCStringLiteral *E)
+ { return Success(E); }
+ bool VisitObjCBoxedExpr(const ObjCBoxedExpr *E)
+ { return Success(E); }
+ bool VisitAddrLabelExpr(const AddrLabelExpr *E)
+ { return Success(E); }
+ bool VisitCallExpr(const CallExpr *E);
+ bool VisitBlockExpr(const BlockExpr *E) {
+ if (!E->getBlockDecl()->hasCaptures())
+ return Success(E);
+ return Error(E);
+ }
+ bool VisitCXXThisExpr(const CXXThisExpr *E) {
+ // Can't look at 'this' when checking a potential constant expression.
+ if (Info.checkingPotentialConstantExpression())
+ return false;
+ if (!Info.CurrentCall->This) {
+ if (Info.getLangOpts().CPlusPlus11)
+ Info.Diag(E, diag::note_constexpr_this) << E->isImplicit();
+ else
+ Info.Diag(E);
+ return false;
+ }
+ Result = *Info.CurrentCall->This;
+ return true;
+ }
+
+ // FIXME: Missing: @protocol, @selector
+};
+} // end anonymous namespace
+
+static bool EvaluatePointer(const Expr* E, LValue& Result, EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->hasPointerRepresentation());
+ return PointerExprEvaluator(Info, Result).Visit(E);
+}
+
+bool PointerExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
+ if (E->getOpcode() != BO_Add &&
+ E->getOpcode() != BO_Sub)
+ return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
+
+ const Expr *PExp = E->getLHS();
+ const Expr *IExp = E->getRHS();
+ if (IExp->getType()->isPointerType())
+ std::swap(PExp, IExp);
+
+ bool EvalPtrOK = EvaluatePointer(PExp, Result, Info);
+ if (!EvalPtrOK && !Info.keepEvaluatingAfterFailure())
+ return false;
+
+ llvm::APSInt Offset;
+ if (!EvaluateInteger(IExp, Offset, Info) || !EvalPtrOK)
+ return false;
+
+ int64_t AdditionalOffset = getExtValue(Offset);
+ if (E->getOpcode() == BO_Sub)
+ AdditionalOffset = -AdditionalOffset;
+
+ QualType Pointee = PExp->getType()->castAs<PointerType>()->getPointeeType();
+ return HandleLValueArrayAdjustment(Info, E, Result, Pointee,
+ AdditionalOffset);
+}
+
+bool PointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) {
+ return EvaluateLValue(E->getSubExpr(), Result, Info);
+}
+
+bool PointerExprEvaluator::VisitCastExpr(const CastExpr* E) {
+ const Expr* SubExpr = E->getSubExpr();
+
+ switch (E->getCastKind()) {
+ default:
+ break;
+
+ case CK_BitCast:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_AddressSpaceConversion:
+ if (!Visit(SubExpr))
+ return false;
+ // Bitcasts to cv void* are static_casts, not reinterpret_casts, so are
+ // permitted in constant expressions in C++11. Bitcasts from cv void* are
+ // also static_casts, but we disallow them as a resolution to DR1312.
+ if (!E->getType()->isVoidPointerType()) {
+ Result.Designator.setInvalid();
+ if (SubExpr->getType()->isVoidPointerType())
+ CCEDiag(E, diag::note_constexpr_invalid_cast)
+ << 3 << SubExpr->getType();
+ else
+ CCEDiag(E, diag::note_constexpr_invalid_cast) << 2;
+ }
+ return true;
+
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ if (!EvaluatePointer(E->getSubExpr(), Result, Info))
+ return false;
+ if (!Result.Base && Result.Offset.isZero())
+ return true;
+
+ // Now figure out the necessary offset to add to the base LV to get from
+ // the derived class to the base class.
+ return HandleLValueBasePath(Info, E, E->getSubExpr()->getType()->
+ castAs<PointerType>()->getPointeeType(),
+ Result);
+
+ case CK_BaseToDerived:
+ if (!Visit(E->getSubExpr()))
+ return false;
+ if (!Result.Base && Result.Offset.isZero())
+ return true;
+ return HandleBaseToDerivedCast(Info, E, Result);
+
+ case CK_NullToPointer:
+ VisitIgnoredValue(E->getSubExpr());
+ return ZeroInitialization(E);
+
+ case CK_IntegralToPointer: {
+ CCEDiag(E, diag::note_constexpr_invalid_cast) << 2;
+
+ APValue Value;
+ if (!EvaluateIntegerOrLValue(SubExpr, Value, Info))
+ break;
+
+ if (Value.isInt()) {
+ unsigned Size = Info.Ctx.getTypeSize(E->getType());
+ uint64_t N = Value.getInt().extOrTrunc(Size).getZExtValue();
+ Result.Base = (Expr*)nullptr;
+ Result.InvalidBase = false;
+ Result.Offset = CharUnits::fromQuantity(N);
+ Result.CallIndex = 0;
+ Result.Designator.setInvalid();
+ return true;
+ } else {
+ // Cast is of an lvalue, no need to change value.
+ Result.setFrom(Info.Ctx, Value);
+ return true;
+ }
+ }
+ case CK_ArrayToPointerDecay:
+ if (SubExpr->isGLValue()) {
+ if (!EvaluateLValue(SubExpr, Result, Info))
+ return false;
+ } else {
+ Result.set(SubExpr, Info.CurrentCall->Index);
+ if (!EvaluateInPlace(Info.CurrentCall->createTemporary(SubExpr, false),
+ Info, Result, SubExpr))
+ return false;
+ }
+ // The result is a pointer to the first element of the array.
+ if (const ConstantArrayType *CAT
+ = Info.Ctx.getAsConstantArrayType(SubExpr->getType()))
+ Result.addArray(Info, E, CAT);
+ else
+ Result.Designator.setInvalid();
+ return true;
+
+ case CK_FunctionToPointerDecay:
+ return EvaluateLValue(SubExpr, Result, Info);
+ }
+
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+}
+
+static CharUnits GetAlignOfType(EvalInfo &Info, QualType T) {
+ // C++ [expr.alignof]p3:
+ // When alignof is applied to a reference type, the result is the
+ // alignment of the referenced type.
+ if (const ReferenceType *Ref = T->getAs<ReferenceType>())
+ T = Ref->getPointeeType();
+
+ // __alignof is defined to return the preferred alignment.
+ return Info.Ctx.toCharUnitsFromBits(
+ Info.Ctx.getPreferredTypeAlign(T.getTypePtr()));
+}
+
+static CharUnits GetAlignOfExpr(EvalInfo &Info, const Expr *E) {
+ E = E->IgnoreParens();
+
+ // The kinds of expressions that we have special-case logic here for
+ // should be kept up to date with the special checks for those
+ // expressions in Sema.
+
+ // alignof decl is always accepted, even if it doesn't make sense: we default
+ // to 1 in those cases.
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ return Info.Ctx.getDeclAlign(DRE->getDecl(),
+ /*RefAsPointee*/true);
+
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(E))
+ return Info.Ctx.getDeclAlign(ME->getMemberDecl(),
+ /*RefAsPointee*/true);
+
+ return GetAlignOfType(Info, E->getType());
+}
+
+bool PointerExprEvaluator::VisitCallExpr(const CallExpr *E) {
+ if (IsStringLiteralCall(E))
+ return Success(E);
+
+ switch (E->getBuiltinCallee()) {
+ case Builtin::BI__builtin_addressof:
+ return EvaluateLValue(E->getArg(0), Result, Info);
+ case Builtin::BI__builtin_assume_aligned: {
+ // We need to be very careful here because: if the pointer does not have the
+ // asserted alignment, then the behavior is undefined, and undefined
+ // behavior is non-constant.
+ if (!EvaluatePointer(E->getArg(0), Result, Info))
+ return false;
+
+ LValue OffsetResult(Result);
+ APSInt Alignment;
+ if (!EvaluateInteger(E->getArg(1), Alignment, Info))
+ return false;
+ CharUnits Align = CharUnits::fromQuantity(getExtValue(Alignment));
+
+ if (E->getNumArgs() > 2) {
+ APSInt Offset;
+ if (!EvaluateInteger(E->getArg(2), Offset, Info))
+ return false;
+
+ int64_t AdditionalOffset = -getExtValue(Offset);
+ OffsetResult.Offset += CharUnits::fromQuantity(AdditionalOffset);
+ }
+
+ // If there is a base object, then it must have the correct alignment.
+ if (OffsetResult.Base) {
+ CharUnits BaseAlignment;
+ if (const ValueDecl *VD =
+ OffsetResult.Base.dyn_cast<const ValueDecl*>()) {
+ BaseAlignment = Info.Ctx.getDeclAlign(VD);
+ } else {
+ BaseAlignment =
+ GetAlignOfExpr(Info, OffsetResult.Base.get<const Expr*>());
+ }
+
+ if (BaseAlignment < Align) {
+ Result.Designator.setInvalid();
+ // FIXME: Quantities here cast to integers because the plural modifier
+ // does not work on APSInts yet.
+ CCEDiag(E->getArg(0),
+ diag::note_constexpr_baa_insufficient_alignment) << 0
+ << (int) BaseAlignment.getQuantity()
+ << (unsigned) getExtValue(Alignment);
+ return false;
+ }
+ }
+
+ // The offset must also have the correct alignment.
+ if (OffsetResult.Offset.RoundUpToAlignment(Align) != OffsetResult.Offset) {
+ Result.Designator.setInvalid();
+ APSInt Offset(64, false);
+ Offset = OffsetResult.Offset.getQuantity();
+
+ if (OffsetResult.Base)
+ CCEDiag(E->getArg(0),
+ diag::note_constexpr_baa_insufficient_alignment) << 1
+ << (int) getExtValue(Offset) << (unsigned) getExtValue(Alignment);
+ else
+ CCEDiag(E->getArg(0),
+ diag::note_constexpr_baa_value_insufficient_alignment)
+ << Offset << (unsigned) getExtValue(Alignment);
+
+ return false;
+ }
+
+ return true;
+ }
+ default:
+ return ExprEvaluatorBaseTy::VisitCallExpr(E);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Member Pointer Evaluation
+//===----------------------------------------------------------------------===//
+
+namespace {
+class MemberPointerExprEvaluator
+ : public ExprEvaluatorBase<MemberPointerExprEvaluator> {
+ MemberPtr &Result;
+
+ bool Success(const ValueDecl *D) {
+ Result = MemberPtr(D);
+ return true;
+ }
+public:
+
+ MemberPointerExprEvaluator(EvalInfo &Info, MemberPtr &Result)
+ : ExprEvaluatorBaseTy(Info), Result(Result) {}
+
+ bool Success(const APValue &V, const Expr *E) {
+ Result.setFrom(V);
+ return true;
+ }
+ bool ZeroInitialization(const Expr *E) {
+ return Success((const ValueDecl*)nullptr);
+ }
+
+ bool VisitCastExpr(const CastExpr *E);
+ bool VisitUnaryAddrOf(const UnaryOperator *E);
+};
+} // end anonymous namespace
+
+static bool EvaluateMemberPointer(const Expr *E, MemberPtr &Result,
+ EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isMemberPointerType());
+ return MemberPointerExprEvaluator(Info, Result).Visit(E);
+}
+
+bool MemberPointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+
+ case CK_NullToMemberPointer:
+ VisitIgnoredValue(E->getSubExpr());
+ return ZeroInitialization(E);
+
+ case CK_BaseToDerivedMemberPointer: {
+ if (!Visit(E->getSubExpr()))
+ return false;
+ if (E->path_empty())
+ return true;
+ // Base-to-derived member pointer casts store the path in derived-to-base
+ // order, so iterate backwards. The CXXBaseSpecifier also provides us with
+ // the wrong end of the derived->base arc, so stagger the path by one class.
+ typedef std::reverse_iterator<CastExpr::path_const_iterator> ReverseIter;
+ for (ReverseIter PathI(E->path_end() - 1), PathE(E->path_begin());
+ PathI != PathE; ++PathI) {
+ assert(!(*PathI)->isVirtual() && "memptr cast through vbase");
+ const CXXRecordDecl *Derived = (*PathI)->getType()->getAsCXXRecordDecl();
+ if (!Result.castToDerived(Derived))
+ return Error(E);
+ }
+ const Type *FinalTy = E->getType()->castAs<MemberPointerType>()->getClass();
+ if (!Result.castToDerived(FinalTy->getAsCXXRecordDecl()))
+ return Error(E);
+ return true;
+ }
+
+ case CK_DerivedToBaseMemberPointer:
+ if (!Visit(E->getSubExpr()))
+ return false;
+ for (CastExpr::path_const_iterator PathI = E->path_begin(),
+ PathE = E->path_end(); PathI != PathE; ++PathI) {
+ assert(!(*PathI)->isVirtual() && "memptr cast through vbase");
+ const CXXRecordDecl *Base = (*PathI)->getType()->getAsCXXRecordDecl();
+ if (!Result.castToBase(Base))
+ return Error(E);
+ }
+ return true;
+ }
+}
+
+bool MemberPointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) {
+ // C++11 [expr.unary.op]p3 has very strict rules on how the address of a
+ // member can be formed.
+ return Success(cast<DeclRefExpr>(E->getSubExpr())->getDecl());
+}
+
+//===----------------------------------------------------------------------===//
+// Record Evaluation
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class RecordExprEvaluator
+ : public ExprEvaluatorBase<RecordExprEvaluator> {
+ const LValue &This;
+ APValue &Result;
+ public:
+
+ RecordExprEvaluator(EvalInfo &info, const LValue &This, APValue &Result)
+ : ExprEvaluatorBaseTy(info), This(This), Result(Result) {}
+
+ bool Success(const APValue &V, const Expr *E) {
+ Result = V;
+ return true;
+ }
+ bool ZeroInitialization(const Expr *E);
+
+ bool VisitCallExpr(const CallExpr *E) {
+ return handleCallExpr(E, Result, &This);
+ }
+ bool VisitCastExpr(const CastExpr *E);
+ bool VisitInitListExpr(const InitListExpr *E);
+ bool VisitCXXConstructExpr(const CXXConstructExpr *E);
+ bool VisitCXXStdInitializerListExpr(const CXXStdInitializerListExpr *E);
+ };
+}
+
+/// Perform zero-initialization on an object of non-union class type.
+/// C++11 [dcl.init]p5:
+/// To zero-initialize an object or reference of type T means:
+/// [...]
+/// -- if T is a (possibly cv-qualified) non-union class type,
+/// each non-static data member and each base-class subobject is
+/// zero-initialized
+static bool HandleClassZeroInitialization(EvalInfo &Info, const Expr *E,
+ const RecordDecl *RD,
+ const LValue &This, APValue &Result) {
+ assert(!RD->isUnion() && "Expected non-union class type");
+ const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD);
+ Result = APValue(APValue::UninitStruct(), CD ? CD->getNumBases() : 0,
+ std::distance(RD->field_begin(), RD->field_end()));
+
+ if (RD->isInvalidDecl()) return false;
+ const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
+
+ if (CD) {
+ unsigned Index = 0;
+ for (CXXRecordDecl::base_class_const_iterator I = CD->bases_begin(),
+ End = CD->bases_end(); I != End; ++I, ++Index) {
+ const CXXRecordDecl *Base = I->getType()->getAsCXXRecordDecl();
+ LValue Subobject = This;
+ if (!HandleLValueDirectBase(Info, E, Subobject, CD, Base, &Layout))
+ return false;
+ if (!HandleClassZeroInitialization(Info, E, Base, Subobject,
+ Result.getStructBase(Index)))
+ return false;
+ }
+ }
+
+ for (const auto *I : RD->fields()) {
+ // -- if T is a reference type, no initialization is performed.
+ if (I->getType()->isReferenceType())
+ continue;
+
+ LValue Subobject = This;
+ if (!HandleLValueMember(Info, E, Subobject, I, &Layout))
+ return false;
+
+ ImplicitValueInitExpr VIE(I->getType());
+ if (!EvaluateInPlace(
+ Result.getStructField(I->getFieldIndex()), Info, Subobject, &VIE))
+ return false;
+ }
+
+ return true;
+}
+
+bool RecordExprEvaluator::ZeroInitialization(const Expr *E) {
+ const RecordDecl *RD = E->getType()->castAs<RecordType>()->getDecl();
+ if (RD->isInvalidDecl()) return false;
+ if (RD->isUnion()) {
+ // C++11 [dcl.init]p5: If T is a (possibly cv-qualified) union type, the
+ // object's first non-static named data member is zero-initialized
+ RecordDecl::field_iterator I = RD->field_begin();
+ if (I == RD->field_end()) {
+ Result = APValue((const FieldDecl*)nullptr);
+ return true;
+ }
+
+ LValue Subobject = This;
+ if (!HandleLValueMember(Info, E, Subobject, *I))
+ return false;
+ Result = APValue(*I);
+ ImplicitValueInitExpr VIE(I->getType());
+ return EvaluateInPlace(Result.getUnionValue(), Info, Subobject, &VIE);
+ }
+
+ if (isa<CXXRecordDecl>(RD) && cast<CXXRecordDecl>(RD)->getNumVBases()) {
+ Info.Diag(E, diag::note_constexpr_virtual_base) << RD;
+ return false;
+ }
+
+ return HandleClassZeroInitialization(Info, E, RD, This, Result);
+}
+
+bool RecordExprEvaluator::VisitCastExpr(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+
+ case CK_ConstructorConversion:
+ return Visit(E->getSubExpr());
+
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase: {
+ APValue DerivedObject;
+ if (!Evaluate(DerivedObject, Info, E->getSubExpr()))
+ return false;
+ if (!DerivedObject.isStruct())
+ return Error(E->getSubExpr());
+
+ // Derived-to-base rvalue conversion: just slice off the derived part.
+ APValue *Value = &DerivedObject;
+ const CXXRecordDecl *RD = E->getSubExpr()->getType()->getAsCXXRecordDecl();
+ for (CastExpr::path_const_iterator PathI = E->path_begin(),
+ PathE = E->path_end(); PathI != PathE; ++PathI) {
+ assert(!(*PathI)->isVirtual() && "record rvalue with virtual base");
+ const CXXRecordDecl *Base = (*PathI)->getType()->getAsCXXRecordDecl();
+ Value = &Value->getStructBase(getBaseIndex(RD, Base));
+ RD = Base;
+ }
+ Result = *Value;
+ return true;
+ }
+ }
+}
+
+bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
+ const RecordDecl *RD = E->getType()->castAs<RecordType>()->getDecl();
+ if (RD->isInvalidDecl()) return false;
+ const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
+
+ if (RD->isUnion()) {
+ const FieldDecl *Field = E->getInitializedFieldInUnion();
+ Result = APValue(Field);
+ if (!Field)
+ return true;
+
+ // If the initializer list for a union does not contain any elements, the
+ // first element of the union is value-initialized.
+ // FIXME: The element should be initialized from an initializer list.
+ // Is this difference ever observable for initializer lists which
+ // we don't build?
+ ImplicitValueInitExpr VIE(Field->getType());
+ const Expr *InitExpr = E->getNumInits() ? E->getInit(0) : &VIE;
+
+ LValue Subobject = This;
+ if (!HandleLValueMember(Info, InitExpr, Subobject, Field, &Layout))
+ return false;
+
+ // Temporarily override This, in case there's a CXXDefaultInitExpr in here.
+ ThisOverrideRAII ThisOverride(*Info.CurrentCall, &This,
+ isa<CXXDefaultInitExpr>(InitExpr));
+
+ return EvaluateInPlace(Result.getUnionValue(), Info, Subobject, InitExpr);
+ }
+
+ assert((!isa<CXXRecordDecl>(RD) || !cast<CXXRecordDecl>(RD)->getNumBases()) &&
+ "initializer list for class with base classes");
+ Result = APValue(APValue::UninitStruct(), 0,
+ std::distance(RD->field_begin(), RD->field_end()));
+ unsigned ElementNo = 0;
+ bool Success = true;
+ for (const auto *Field : RD->fields()) {
+ // Anonymous bit-fields are not considered members of the class for
+ // purposes of aggregate initialization.
+ if (Field->isUnnamedBitfield())
+ continue;
+
+ LValue Subobject = This;
+
+ bool HaveInit = ElementNo < E->getNumInits();
+
+ // FIXME: Diagnostics here should point to the end of the initializer
+ // list, not the start.
+ if (!HandleLValueMember(Info, HaveInit ? E->getInit(ElementNo) : E,
+ Subobject, Field, &Layout))
+ return false;
+
+ // Perform an implicit value-initialization for members beyond the end of
+ // the initializer list.
+ ImplicitValueInitExpr VIE(HaveInit ? Info.Ctx.IntTy : Field->getType());
+ const Expr *Init = HaveInit ? E->getInit(ElementNo++) : &VIE;
+
+ // Temporarily override This, in case there's a CXXDefaultInitExpr in here.
+ ThisOverrideRAII ThisOverride(*Info.CurrentCall, &This,
+ isa<CXXDefaultInitExpr>(Init));
+
+ APValue &FieldVal = Result.getStructField(Field->getFieldIndex());
+ if (!EvaluateInPlace(FieldVal, Info, Subobject, Init) ||
+ (Field->isBitField() && !truncateBitfieldValue(Info, Init,
+ FieldVal, Field))) {
+ if (!Info.keepEvaluatingAfterFailure())
+ return false;
+ Success = false;
+ }
+ }
+
+ return Success;
+}
+
+bool RecordExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E) {
+ const CXXConstructorDecl *FD = E->getConstructor();
+ if (FD->isInvalidDecl() || FD->getParent()->isInvalidDecl()) return false;
+
+ bool ZeroInit = E->requiresZeroInitialization();
+ if (CheckTrivialDefaultConstructor(Info, E->getExprLoc(), FD, ZeroInit)) {
+ // If we've already performed zero-initialization, we're already done.
+ if (!Result.isUninit())
+ return true;
+
+ // We can get here in two different ways:
+ // 1) We're performing value-initialization, and should zero-initialize
+ // the object, or
+ // 2) We're performing default-initialization of an object with a trivial
+ // constexpr default constructor, in which case we should start the
+ // lifetimes of all the base subobjects (there can be no data member
+ // subobjects in this case) per [basic.life]p1.
+ // Either way, ZeroInitialization is appropriate.
+ return ZeroInitialization(E);
+ }
+
+ const FunctionDecl *Definition = nullptr;
+ FD->getBody(Definition);
+
+ if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition))
+ return false;
+
+ // Avoid materializing a temporary for an elidable copy/move constructor.
+ if (E->isElidable() && !ZeroInit)
+ if (const MaterializeTemporaryExpr *ME
+ = dyn_cast<MaterializeTemporaryExpr>(E->getArg(0)))
+ return Visit(ME->GetTemporaryExpr());
+
+ if (ZeroInit && !ZeroInitialization(E))
+ return false;
+
+ auto Args = llvm::makeArrayRef(E->getArgs(), E->getNumArgs());
+ return HandleConstructorCall(E->getExprLoc(), This, Args,
+ cast<CXXConstructorDecl>(Definition), Info,
+ Result);
+}
+
+bool RecordExprEvaluator::VisitCXXStdInitializerListExpr(
+ const CXXStdInitializerListExpr *E) {
+ const ConstantArrayType *ArrayType =
+ Info.Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
+
+ LValue Array;
+ if (!EvaluateLValue(E->getSubExpr(), Array, Info))
+ return false;
+
+ // Get a pointer to the first element of the array.
+ Array.addArray(Info, E, ArrayType);
+
+ // FIXME: Perform the checks on the field types in SemaInit.
+ RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
+ RecordDecl::field_iterator Field = Record->field_begin();
+ if (Field == Record->field_end())
+ return Error(E);
+
+ // Start pointer.
+ if (!Field->getType()->isPointerType() ||
+ !Info.Ctx.hasSameType(Field->getType()->getPointeeType(),
+ ArrayType->getElementType()))
+ return Error(E);
+
+ // FIXME: What if the initializer_list type has base classes, etc?
+ Result = APValue(APValue::UninitStruct(), 0, 2);
+ Array.moveInto(Result.getStructField(0));
+
+ if (++Field == Record->field_end())
+ return Error(E);
+
+ if (Field->getType()->isPointerType() &&
+ Info.Ctx.hasSameType(Field->getType()->getPointeeType(),
+ ArrayType->getElementType())) {
+ // End pointer.
+ if (!HandleLValueArrayAdjustment(Info, E, Array,
+ ArrayType->getElementType(),
+ ArrayType->getSize().getZExtValue()))
+ return false;
+ Array.moveInto(Result.getStructField(1));
+ } else if (Info.Ctx.hasSameType(Field->getType(), Info.Ctx.getSizeType()))
+ // Length.
+ Result.getStructField(1) = APValue(APSInt(ArrayType->getSize()));
+ else
+ return Error(E);
+
+ if (++Field != Record->field_end())
+ return Error(E);
+
+ return true;
+}
+
+static bool EvaluateRecord(const Expr *E, const LValue &This,
+ APValue &Result, EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isRecordType() &&
+ "can't evaluate expression as a record rvalue");
+ return RecordExprEvaluator(Info, This, Result).Visit(E);
+}
+
+//===----------------------------------------------------------------------===//
+// Temporary Evaluation
+//
+// Temporaries are represented in the AST as rvalues, but generally behave like
+// lvalues. The full-object of which the temporary is a subobject is implicitly
+// materialized so that a reference can bind to it.
+//===----------------------------------------------------------------------===//
+namespace {
+class TemporaryExprEvaluator
+ : public LValueExprEvaluatorBase<TemporaryExprEvaluator> {
+public:
+ TemporaryExprEvaluator(EvalInfo &Info, LValue &Result) :
+ LValueExprEvaluatorBaseTy(Info, Result) {}
+
+ /// Visit an expression which constructs the value of this temporary.
+ bool VisitConstructExpr(const Expr *E) {
+ Result.set(E, Info.CurrentCall->Index);
+ return EvaluateInPlace(Info.CurrentCall->createTemporary(E, false),
+ Info, Result, E);
+ }
+
+ bool VisitCastExpr(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ return LValueExprEvaluatorBaseTy::VisitCastExpr(E);
+
+ case CK_ConstructorConversion:
+ return VisitConstructExpr(E->getSubExpr());
+ }
+ }
+ bool VisitInitListExpr(const InitListExpr *E) {
+ return VisitConstructExpr(E);
+ }
+ bool VisitCXXConstructExpr(const CXXConstructExpr *E) {
+ return VisitConstructExpr(E);
+ }
+ bool VisitCallExpr(const CallExpr *E) {
+ return VisitConstructExpr(E);
+ }
+ bool VisitCXXStdInitializerListExpr(const CXXStdInitializerListExpr *E) {
+ return VisitConstructExpr(E);
+ }
+};
+} // end anonymous namespace
+
+/// Evaluate an expression of record type as a temporary.
+static bool EvaluateTemporary(const Expr *E, LValue &Result, EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isRecordType());
+ return TemporaryExprEvaluator(Info, Result).Visit(E);
+}
+
+//===----------------------------------------------------------------------===//
+// Vector Evaluation
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class VectorExprEvaluator
+ : public ExprEvaluatorBase<VectorExprEvaluator> {
+ APValue &Result;
+ public:
+
+ VectorExprEvaluator(EvalInfo &info, APValue &Result)
+ : ExprEvaluatorBaseTy(info), Result(Result) {}
+
+ bool Success(ArrayRef<APValue> V, const Expr *E) {
+ assert(V.size() == E->getType()->castAs<VectorType>()->getNumElements());
+ // FIXME: remove this APValue copy.
+ Result = APValue(V.data(), V.size());
+ return true;
+ }
+ bool Success(const APValue &V, const Expr *E) {
+ assert(V.isVector());
+ Result = V;
+ return true;
+ }
+ bool ZeroInitialization(const Expr *E);
+
+ bool VisitUnaryReal(const UnaryOperator *E)
+ { return Visit(E->getSubExpr()); }
+ bool VisitCastExpr(const CastExpr* E);
+ bool VisitInitListExpr(const InitListExpr *E);
+ bool VisitUnaryImag(const UnaryOperator *E);
+ // FIXME: Missing: unary -, unary ~, binary add/sub/mul/div,
+ // binary comparisons, binary and/or/xor,
+ // shufflevector, ExtVectorElementExpr
+ };
+} // end anonymous namespace
+
+static bool EvaluateVector(const Expr* E, APValue& Result, EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isVectorType() &&"not a vector rvalue");
+ return VectorExprEvaluator(Info, Result).Visit(E);
+}
+
+bool VectorExprEvaluator::VisitCastExpr(const CastExpr *E) {
+ const VectorType *VTy = E->getType()->castAs<VectorType>();
+ unsigned NElts = VTy->getNumElements();
+
+ const Expr *SE = E->getSubExpr();
+ QualType SETy = SE->getType();
+
+ switch (E->getCastKind()) {
+ case CK_VectorSplat: {
+ APValue Val = APValue();
+ if (SETy->isIntegerType()) {
+ APSInt IntResult;
+ if (!EvaluateInteger(SE, IntResult, Info))
+ return false;
+ Val = APValue(std::move(IntResult));
+ } else if (SETy->isRealFloatingType()) {
+ APFloat FloatResult(0.0);
+ if (!EvaluateFloat(SE, FloatResult, Info))
+ return false;
+ Val = APValue(std::move(FloatResult));
+ } else {
+ return Error(E);
+ }
+
+ // Splat and create vector APValue.
+ SmallVector<APValue, 4> Elts(NElts, Val);
+ return Success(Elts, E);
+ }
+ case CK_BitCast: {
+ // Evaluate the operand into an APInt we can extract from.
+ llvm::APInt SValInt;
+ if (!EvalAndBitcastToAPInt(Info, SE, SValInt))
+ return false;
+ // Extract the elements
+ QualType EltTy = VTy->getElementType();
+ unsigned EltSize = Info.Ctx.getTypeSize(EltTy);
+ bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian();
+ SmallVector<APValue, 4> Elts;
+ if (EltTy->isRealFloatingType()) {
+ const llvm::fltSemantics &Sem = Info.Ctx.getFloatTypeSemantics(EltTy);
+ unsigned FloatEltSize = EltSize;
+ if (&Sem == &APFloat::x87DoubleExtended)
+ FloatEltSize = 80;
+ for (unsigned i = 0; i < NElts; i++) {
+ llvm::APInt Elt;
+ if (BigEndian)
+ Elt = SValInt.rotl(i*EltSize+FloatEltSize).trunc(FloatEltSize);
+ else
+ Elt = SValInt.rotr(i*EltSize).trunc(FloatEltSize);
+ Elts.push_back(APValue(APFloat(Sem, Elt)));
+ }
+ } else if (EltTy->isIntegerType()) {
+ for (unsigned i = 0; i < NElts; i++) {
+ llvm::APInt Elt;
+ if (BigEndian)
+ Elt = SValInt.rotl(i*EltSize+EltSize).zextOrTrunc(EltSize);
+ else
+ Elt = SValInt.rotr(i*EltSize).zextOrTrunc(EltSize);
+ Elts.push_back(APValue(APSInt(Elt, EltTy->isSignedIntegerType())));
+ }
+ } else {
+ return Error(E);
+ }
+ return Success(Elts, E);
+ }
+ default:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+ }
+}
+
+bool
+VectorExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
+ const VectorType *VT = E->getType()->castAs<VectorType>();
+ unsigned NumInits = E->getNumInits();
+ unsigned NumElements = VT->getNumElements();
+
+ QualType EltTy = VT->getElementType();
+ SmallVector<APValue, 4> Elements;
+
+ // The number of initializers can be less than the number of
+ // vector elements. For OpenCL, this can be due to nested vector
+ // initialization. For GCC compatibility, missing trailing elements
+ // should be initialized with zeroes.
+ unsigned CountInits = 0, CountElts = 0;
+ while (CountElts < NumElements) {
+ // Handle nested vector initialization.
+ if (CountInits < NumInits
+ && E->getInit(CountInits)->getType()->isVectorType()) {
+ APValue v;
+ if (!EvaluateVector(E->getInit(CountInits), v, Info))
+ return Error(E);
+ unsigned vlen = v.getVectorLength();
+ for (unsigned j = 0; j < vlen; j++)
+ Elements.push_back(v.getVectorElt(j));
+ CountElts += vlen;
+ } else if (EltTy->isIntegerType()) {
+ llvm::APSInt sInt(32);
+ if (CountInits < NumInits) {
+ if (!EvaluateInteger(E->getInit(CountInits), sInt, Info))
+ return false;
+ } else // trailing integer zero.
+ sInt = Info.Ctx.MakeIntValue(0, EltTy);
+ Elements.push_back(APValue(sInt));
+ CountElts++;
+ } else {
+ llvm::APFloat f(0.0);
+ if (CountInits < NumInits) {
+ if (!EvaluateFloat(E->getInit(CountInits), f, Info))
+ return false;
+ } else // trailing float zero.
+ f = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(EltTy));
+ Elements.push_back(APValue(f));
+ CountElts++;
+ }
+ CountInits++;
+ }
+ return Success(Elements, E);
+}
+
+bool
+VectorExprEvaluator::ZeroInitialization(const Expr *E) {
+ const VectorType *VT = E->getType()->getAs<VectorType>();
+ QualType EltTy = VT->getElementType();
+ APValue ZeroElement;
+ if (EltTy->isIntegerType())
+ ZeroElement = APValue(Info.Ctx.MakeIntValue(0, EltTy));
+ else
+ ZeroElement =
+ APValue(APFloat::getZero(Info.Ctx.getFloatTypeSemantics(EltTy)));
+
+ SmallVector<APValue, 4> Elements(VT->getNumElements(), ZeroElement);
+ return Success(Elements, E);
+}
+
+bool VectorExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
+ VisitIgnoredValue(E->getSubExpr());
+ return ZeroInitialization(E);
+}
+
+//===----------------------------------------------------------------------===//
+// Array Evaluation
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class ArrayExprEvaluator
+ : public ExprEvaluatorBase<ArrayExprEvaluator> {
+ const LValue &This;
+ APValue &Result;
+ public:
+
+ ArrayExprEvaluator(EvalInfo &Info, const LValue &This, APValue &Result)
+ : ExprEvaluatorBaseTy(Info), This(This), Result(Result) {}
+
+ bool Success(const APValue &V, const Expr *E) {
+ assert((V.isArray() || V.isLValue()) &&
+ "expected array or string literal");
+ Result = V;
+ return true;
+ }
+
+ bool ZeroInitialization(const Expr *E) {
+ const ConstantArrayType *CAT =
+ Info.Ctx.getAsConstantArrayType(E->getType());
+ if (!CAT)
+ return Error(E);
+
+ Result = APValue(APValue::UninitArray(), 0,
+ CAT->getSize().getZExtValue());
+ if (!Result.hasArrayFiller()) return true;
+
+ // Zero-initialize all elements.
+ LValue Subobject = This;
+ Subobject.addArray(Info, E, CAT);
+ ImplicitValueInitExpr VIE(CAT->getElementType());
+ return EvaluateInPlace(Result.getArrayFiller(), Info, Subobject, &VIE);
+ }
+
+ bool VisitCallExpr(const CallExpr *E) {
+ return handleCallExpr(E, Result, &This);
+ }
+ bool VisitInitListExpr(const InitListExpr *E);
+ bool VisitCXXConstructExpr(const CXXConstructExpr *E);
+ bool VisitCXXConstructExpr(const CXXConstructExpr *E,
+ const LValue &Subobject,
+ APValue *Value, QualType Type);
+ };
+} // end anonymous namespace
+
+static bool EvaluateArray(const Expr *E, const LValue &This,
+ APValue &Result, EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isArrayType() && "not an array rvalue");
+ return ArrayExprEvaluator(Info, This, Result).Visit(E);
+}
+
+bool ArrayExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
+ const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(E->getType());
+ if (!CAT)
+ return Error(E);
+
+ // C++11 [dcl.init.string]p1: A char array [...] can be initialized by [...]
+ // an appropriately-typed string literal enclosed in braces.
+ if (E->isStringLiteralInit()) {
+ LValue LV;
+ if (!EvaluateLValue(E->getInit(0), LV, Info))
+ return false;
+ APValue Val;
+ LV.moveInto(Val);
+ return Success(Val, E);
+ }
+
+ bool Success = true;
+
+ assert((!Result.isArray() || Result.getArrayInitializedElts() == 0) &&
+ "zero-initialized array shouldn't have any initialized elts");
+ APValue Filler;
+ if (Result.isArray() && Result.hasArrayFiller())
+ Filler = Result.getArrayFiller();
+
+ unsigned NumEltsToInit = E->getNumInits();
+ unsigned NumElts = CAT->getSize().getZExtValue();
+ const Expr *FillerExpr = E->hasArrayFiller() ? E->getArrayFiller() : nullptr;
+
+ // If the initializer might depend on the array index, run it for each
+ // array element. For now, just whitelist non-class value-initialization.
+ if (NumEltsToInit != NumElts && !isa<ImplicitValueInitExpr>(FillerExpr))
+ NumEltsToInit = NumElts;
+
+ Result = APValue(APValue::UninitArray(), NumEltsToInit, NumElts);
+
+ // If the array was previously zero-initialized, preserve the
+ // zero-initialized values.
+ if (!Filler.isUninit()) {
+ for (unsigned I = 0, E = Result.getArrayInitializedElts(); I != E; ++I)
+ Result.getArrayInitializedElt(I) = Filler;
+ if (Result.hasArrayFiller())
+ Result.getArrayFiller() = Filler;
+ }
+
+ LValue Subobject = This;
+ Subobject.addArray(Info, E, CAT);
+ for (unsigned Index = 0; Index != NumEltsToInit; ++Index) {
+ const Expr *Init =
+ Index < E->getNumInits() ? E->getInit(Index) : FillerExpr;
+ if (!EvaluateInPlace(Result.getArrayInitializedElt(Index),
+ Info, Subobject, Init) ||
+ !HandleLValueArrayAdjustment(Info, Init, Subobject,
+ CAT->getElementType(), 1)) {
+ if (!Info.keepEvaluatingAfterFailure())
+ return false;
+ Success = false;
+ }
+ }
+
+ if (!Result.hasArrayFiller())
+ return Success;
+
+ // If we get here, we have a trivial filler, which we can just evaluate
+ // once and splat over the rest of the array elements.
+ assert(FillerExpr && "no array filler for incomplete init list");
+ return EvaluateInPlace(Result.getArrayFiller(), Info, Subobject,
+ FillerExpr) && Success;
+}
+
+bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E) {
+ return VisitCXXConstructExpr(E, This, &Result, E->getType());
+}
+
+bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E,
+ const LValue &Subobject,
+ APValue *Value,
+ QualType Type) {
+ bool HadZeroInit = !Value->isUninit();
+
+ if (const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(Type)) {
+ unsigned N = CAT->getSize().getZExtValue();
+
+ // Preserve the array filler if we had prior zero-initialization.
+ APValue Filler =
+ HadZeroInit && Value->hasArrayFiller() ? Value->getArrayFiller()
+ : APValue();
+
+ *Value = APValue(APValue::UninitArray(), N, N);
+
+ if (HadZeroInit)
+ for (unsigned I = 0; I != N; ++I)
+ Value->getArrayInitializedElt(I) = Filler;
+
+ // Initialize the elements.
+ LValue ArrayElt = Subobject;
+ ArrayElt.addArray(Info, E, CAT);
+ for (unsigned I = 0; I != N; ++I)
+ if (!VisitCXXConstructExpr(E, ArrayElt, &Value->getArrayInitializedElt(I),
+ CAT->getElementType()) ||
+ !HandleLValueArrayAdjustment(Info, E, ArrayElt,
+ CAT->getElementType(), 1))
+ return false;
+
+ return true;
+ }
+
+ if (!Type->isRecordType())
+ return Error(E);
+
+ const CXXConstructorDecl *FD = E->getConstructor();
+
+ bool ZeroInit = E->requiresZeroInitialization();
+ if (CheckTrivialDefaultConstructor(Info, E->getExprLoc(), FD, ZeroInit)) {
+ if (HadZeroInit)
+ return true;
+
+ // See RecordExprEvaluator::VisitCXXConstructExpr for explanation.
+ ImplicitValueInitExpr VIE(Type);
+ return EvaluateInPlace(*Value, Info, Subobject, &VIE);
+ }
+
+ const FunctionDecl *Definition = nullptr;
+ FD->getBody(Definition);
+
+ if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition))
+ return false;
+
+ if (ZeroInit && !HadZeroInit) {
+ ImplicitValueInitExpr VIE(Type);
+ if (!EvaluateInPlace(*Value, Info, Subobject, &VIE))
+ return false;
+ }
+
+ auto Args = llvm::makeArrayRef(E->getArgs(), E->getNumArgs());
+ return HandleConstructorCall(E->getExprLoc(), Subobject, Args,
+ cast<CXXConstructorDecl>(Definition),
+ Info, *Value);
+}
+
+//===----------------------------------------------------------------------===//
+// Integer Evaluation
+//
+// As a GNU extension, we support casting pointers to sufficiently-wide integer
+// types and back in constant folding. Integer values are thus represented
+// either as an integer-valued APValue, or as an lvalue-valued APValue.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class IntExprEvaluator
+ : public ExprEvaluatorBase<IntExprEvaluator> {
+ APValue &Result;
+public:
+ IntExprEvaluator(EvalInfo &info, APValue &result)
+ : ExprEvaluatorBaseTy(info), Result(result) {}
+
+ bool Success(const llvm::APSInt &SI, const Expr *E, APValue &Result) {
+ assert(E->getType()->isIntegralOrEnumerationType() &&
+ "Invalid evaluation result.");
+ assert(SI.isSigned() == E->getType()->isSignedIntegerOrEnumerationType() &&
+ "Invalid evaluation result.");
+ assert(SI.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) &&
+ "Invalid evaluation result.");
+ Result = APValue(SI);
+ return true;
+ }
+ bool Success(const llvm::APSInt &SI, const Expr *E) {
+ return Success(SI, E, Result);
+ }
+
+ bool Success(const llvm::APInt &I, const Expr *E, APValue &Result) {
+ assert(E->getType()->isIntegralOrEnumerationType() &&
+ "Invalid evaluation result.");
+ assert(I.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) &&
+ "Invalid evaluation result.");
+ Result = APValue(APSInt(I));
+ Result.getInt().setIsUnsigned(
+ E->getType()->isUnsignedIntegerOrEnumerationType());
+ return true;
+ }
+ bool Success(const llvm::APInt &I, const Expr *E) {
+ return Success(I, E, Result);
+ }
+
+ bool Success(uint64_t Value, const Expr *E, APValue &Result) {
+ assert(E->getType()->isIntegralOrEnumerationType() &&
+ "Invalid evaluation result.");
+ Result = APValue(Info.Ctx.MakeIntValue(Value, E->getType()));
+ return true;
+ }
+ bool Success(uint64_t Value, const Expr *E) {
+ return Success(Value, E, Result);
+ }
+
+ bool Success(CharUnits Size, const Expr *E) {
+ return Success(Size.getQuantity(), E);
+ }
+
+ bool Success(const APValue &V, const Expr *E) {
+ if (V.isLValue() || V.isAddrLabelDiff()) {
+ Result = V;
+ return true;
+ }
+ return Success(V.getInt(), E);
+ }
+
+ bool ZeroInitialization(const Expr *E) { return Success(0, E); }
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ bool VisitIntegerLiteral(const IntegerLiteral *E) {
+ return Success(E->getValue(), E);
+ }
+ bool VisitCharacterLiteral(const CharacterLiteral *E) {
+ return Success(E->getValue(), E);
+ }
+
+ bool CheckReferencedDecl(const Expr *E, const Decl *D);
+ bool VisitDeclRefExpr(const DeclRefExpr *E) {
+ if (CheckReferencedDecl(E, E->getDecl()))
+ return true;
+
+ return ExprEvaluatorBaseTy::VisitDeclRefExpr(E);
+ }
+ bool VisitMemberExpr(const MemberExpr *E) {
+ if (CheckReferencedDecl(E, E->getMemberDecl())) {
+ VisitIgnoredValue(E->getBase());
+ return true;
+ }
+
+ return ExprEvaluatorBaseTy::VisitMemberExpr(E);
+ }
+
+ bool VisitCallExpr(const CallExpr *E);
+ bool VisitBinaryOperator(const BinaryOperator *E);
+ bool VisitOffsetOfExpr(const OffsetOfExpr *E);
+ bool VisitUnaryOperator(const UnaryOperator *E);
+
+ bool VisitCastExpr(const CastExpr* E);
+ bool VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
+
+ bool VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
+ return Success(E->getValue(), E);
+ }
+
+ bool VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
+ return Success(E->getValue(), E);
+ }
+
+ // Note, GNU defines __null as an integer, not a pointer.
+ bool VisitGNUNullExpr(const GNUNullExpr *E) {
+ return ZeroInitialization(E);
+ }
+
+ bool VisitTypeTraitExpr(const TypeTraitExpr *E) {
+ return Success(E->getValue(), E);
+ }
+
+ bool VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
+ return Success(E->getValue(), E);
+ }
+
+ bool VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
+ return Success(E->getValue(), E);
+ }
+
+ bool VisitUnaryReal(const UnaryOperator *E);
+ bool VisitUnaryImag(const UnaryOperator *E);
+
+ bool VisitCXXNoexceptExpr(const CXXNoexceptExpr *E);
+ bool VisitSizeOfPackExpr(const SizeOfPackExpr *E);
+
+private:
+ bool TryEvaluateBuiltinObjectSize(const CallExpr *E, unsigned Type);
+ // FIXME: Missing: array subscript of vector, member of vector
+};
+} // end anonymous namespace
+
+/// EvaluateIntegerOrLValue - Evaluate an rvalue integral-typed expression, and
+/// produce either the integer value or a pointer.
+///
+/// GCC has a heinous extension which folds casts between pointer types and
+/// pointer-sized integral types. We support this by allowing the evaluation of
+/// an integer rvalue to produce a pointer (represented as an lvalue) instead.
+/// Some simple arithmetic on such values is supported (they are treated much
+/// like char*).
+static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result,
+ EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isIntegralOrEnumerationType());
+ return IntExprEvaluator(Info, Result).Visit(E);
+}
+
+static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info) {
+ APValue Val;
+ if (!EvaluateIntegerOrLValue(E, Val, Info))
+ return false;
+ if (!Val.isInt()) {
+ // FIXME: It would be better to produce the diagnostic for casting
+ // a pointer to an integer.
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+ Result = Val.getInt();
+ return true;
+}
+
+/// Check whether the given declaration can be directly converted to an integral
+/// rvalue. If not, no diagnostic is produced; there are other things we can
+/// try.
+bool IntExprEvaluator::CheckReferencedDecl(const Expr* E, const Decl* D) {
+ // Enums are integer constant exprs.
+ if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(D)) {
+ // Check for signedness/width mismatches between E type and ECD value.
+ bool SameSign = (ECD->getInitVal().isSigned()
+ == E->getType()->isSignedIntegerOrEnumerationType());
+ bool SameWidth = (ECD->getInitVal().getBitWidth()
+ == Info.Ctx.getIntWidth(E->getType()));
+ if (SameSign && SameWidth)
+ return Success(ECD->getInitVal(), E);
+ else {
+ // Get rid of mismatch (otherwise Success assertions will fail)
+ // by computing a new value matching the type of E.
+ llvm::APSInt Val = ECD->getInitVal();
+ if (!SameSign)
+ Val.setIsSigned(!ECD->getInitVal().isSigned());
+ if (!SameWidth)
+ Val = Val.extOrTrunc(Info.Ctx.getIntWidth(E->getType()));
+ return Success(Val, E);
+ }
+ }
+ return false;
+}
+
+/// EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way
+/// as GCC.
+static int EvaluateBuiltinClassifyType(const CallExpr *E) {
+ // The following enum mimics the values returned by GCC.
+ // FIXME: Does GCC differ between lvalue and rvalue references here?
+ enum gcc_type_class {
+ no_type_class = -1,
+ void_type_class, integer_type_class, char_type_class,
+ enumeral_type_class, boolean_type_class,
+ pointer_type_class, reference_type_class, offset_type_class,
+ real_type_class, complex_type_class,
+ function_type_class, method_type_class,
+ record_type_class, union_type_class,
+ array_type_class, string_type_class,
+ lang_type_class
+ };
+
+ // If no argument was supplied, default to "no_type_class". This isn't
+ // ideal, however it is what gcc does.
+ if (E->getNumArgs() == 0)
+ return no_type_class;
+
+ QualType ArgTy = E->getArg(0)->getType();
+ if (ArgTy->isVoidType())
+ return void_type_class;
+ else if (ArgTy->isEnumeralType())
+ return enumeral_type_class;
+ else if (ArgTy->isBooleanType())
+ return boolean_type_class;
+ else if (ArgTy->isCharType())
+ return string_type_class; // gcc doesn't appear to use char_type_class
+ else if (ArgTy->isIntegerType())
+ return integer_type_class;
+ else if (ArgTy->isPointerType())
+ return pointer_type_class;
+ else if (ArgTy->isReferenceType())
+ return reference_type_class;
+ else if (ArgTy->isRealType())
+ return real_type_class;
+ else if (ArgTy->isComplexType())
+ return complex_type_class;
+ else if (ArgTy->isFunctionType())
+ return function_type_class;
+ else if (ArgTy->isStructureOrClassType())
+ return record_type_class;
+ else if (ArgTy->isUnionType())
+ return union_type_class;
+ else if (ArgTy->isArrayType())
+ return array_type_class;
+ else if (ArgTy->isUnionType())
+ return union_type_class;
+ else // FIXME: offset_type_class, method_type_class, & lang_type_class?
+ llvm_unreachable("CallExpr::isBuiltinClassifyType(): unimplemented type");
+}
+
+/// EvaluateBuiltinConstantPForLValue - Determine the result of
+/// __builtin_constant_p when applied to the given lvalue.
+///
+/// An lvalue is only "constant" if it is a pointer or reference to the first
+/// character of a string literal.
+template<typename LValue>
+static bool EvaluateBuiltinConstantPForLValue(const LValue &LV) {
+ const Expr *E = LV.getLValueBase().template dyn_cast<const Expr*>();
+ return E && isa<StringLiteral>(E) && LV.getLValueOffset().isZero();
+}
+
+/// EvaluateBuiltinConstantP - Evaluate __builtin_constant_p as similarly to
+/// GCC as we can manage.
+static bool EvaluateBuiltinConstantP(ASTContext &Ctx, const Expr *Arg) {
+ QualType ArgType = Arg->getType();
+
+ // __builtin_constant_p always has one operand. The rules which gcc follows
+ // are not precisely documented, but are as follows:
+ //
+ // - If the operand is of integral, floating, complex or enumeration type,
+ // and can be folded to a known value of that type, it returns 1.
+ // - If the operand and can be folded to a pointer to the first character
+ // of a string literal (or such a pointer cast to an integral type), it
+ // returns 1.
+ //
+ // Otherwise, it returns 0.
+ //
+ // FIXME: GCC also intends to return 1 for literals of aggregate types, but
+ // its support for this does not currently work.
+ if (ArgType->isIntegralOrEnumerationType()) {
+ Expr::EvalResult Result;
+ if (!Arg->EvaluateAsRValue(Result, Ctx) || Result.HasSideEffects)
+ return false;
+
+ APValue &V = Result.Val;
+ if (V.getKind() == APValue::Int)
+ return true;
+ if (V.getKind() == APValue::LValue)
+ return EvaluateBuiltinConstantPForLValue(V);
+ } else if (ArgType->isFloatingType() || ArgType->isAnyComplexType()) {
+ return Arg->isEvaluatable(Ctx);
+ } else if (ArgType->isPointerType() || Arg->isGLValue()) {
+ LValue LV;
+ Expr::EvalStatus Status;
+ EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantFold);
+ if ((Arg->isGLValue() ? EvaluateLValue(Arg, LV, Info)
+ : EvaluatePointer(Arg, LV, Info)) &&
+ !Status.HasSideEffects)
+ return EvaluateBuiltinConstantPForLValue(LV);
+ }
+
+ // Anything else isn't considered to be sufficiently constant.
+ return false;
+}
+
+/// Retrieves the "underlying object type" of the given expression,
+/// as used by __builtin_object_size.
+static QualType getObjectType(APValue::LValueBase B) {
+ if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ return VD->getType();
+ } else if (const Expr *E = B.get<const Expr*>()) {
+ if (isa<CompoundLiteralExpr>(E))
+ return E->getType();
+ }
+
+ return QualType();
+}
+
+/// A more selective version of E->IgnoreParenCasts for
+/// TryEvaluateBuiltinObjectSize. This ignores some casts/parens that serve only
+/// to change the type of E.
+/// Ex. For E = `(short*)((char*)(&foo))`, returns `&foo`
+///
+/// Always returns an RValue with a pointer representation.
+static const Expr *ignorePointerCastsAndParens(const Expr *E) {
+ assert(E->isRValue() && E->getType()->hasPointerRepresentation());
+
+ auto *NoParens = E->IgnoreParens();
+ auto *Cast = dyn_cast<CastExpr>(NoParens);
+ if (Cast == nullptr)
+ return NoParens;
+
+ // We only conservatively allow a few kinds of casts, because this code is
+ // inherently a simple solution that seeks to support the common case.
+ auto CastKind = Cast->getCastKind();
+ if (CastKind != CK_NoOp && CastKind != CK_BitCast &&
+ CastKind != CK_AddressSpaceConversion)
+ return NoParens;
+
+ auto *SubExpr = Cast->getSubExpr();
+ if (!SubExpr->getType()->hasPointerRepresentation() || !SubExpr->isRValue())
+ return NoParens;
+ return ignorePointerCastsAndParens(SubExpr);
+}
+
+/// Checks to see if the given LValue's Designator is at the end of the LValue's
+/// record layout. e.g.
+/// struct { struct { int a, b; } fst, snd; } obj;
+/// obj.fst // no
+/// obj.snd // yes
+/// obj.fst.a // no
+/// obj.fst.b // no
+/// obj.snd.a // no
+/// obj.snd.b // yes
+///
+/// Please note: this function is specialized for how __builtin_object_size
+/// views "objects".
+static bool isDesignatorAtObjectEnd(const ASTContext &Ctx, const LValue &LVal) {
+ assert(!LVal.Designator.Invalid);
+
+ auto IsLastFieldDecl = [&Ctx](const FieldDecl *FD) {
+ if (FD->getParent()->isUnion())
+ return true;
+ const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(FD->getParent());
+ return FD->getFieldIndex() + 1 == Layout.getFieldCount();
+ };
+
+ auto &Base = LVal.getLValueBase();
+ if (auto *ME = dyn_cast_or_null<MemberExpr>(Base.dyn_cast<const Expr *>())) {
+ if (auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
+ if (!IsLastFieldDecl(FD))
+ return false;
+ } else if (auto *IFD = dyn_cast<IndirectFieldDecl>(ME->getMemberDecl())) {
+ for (auto *FD : IFD->chain())
+ if (!IsLastFieldDecl(cast<FieldDecl>(FD)))
+ return false;
+ }
+ }
+
+ QualType BaseType = getType(Base);
+ for (int I = 0, E = LVal.Designator.Entries.size(); I != E; ++I) {
+ if (BaseType->isArrayType()) {
+ // Because __builtin_object_size treats arrays as objects, we can ignore
+ // the index iff this is the last array in the Designator.
+ if (I + 1 == E)
+ return true;
+ auto *CAT = cast<ConstantArrayType>(Ctx.getAsArrayType(BaseType));
+ uint64_t Index = LVal.Designator.Entries[I].ArrayIndex;
+ if (Index + 1 != CAT->getSize())
+ return false;
+ BaseType = CAT->getElementType();
+ } else if (BaseType->isAnyComplexType()) {
+ auto *CT = BaseType->castAs<ComplexType>();
+ uint64_t Index = LVal.Designator.Entries[I].ArrayIndex;
+ if (Index != 1)
+ return false;
+ BaseType = CT->getElementType();
+ } else if (auto *FD = getAsField(LVal.Designator.Entries[I])) {
+ if (!IsLastFieldDecl(FD))
+ return false;
+ BaseType = FD->getType();
+ } else {
+ assert(getAsBaseClass(LVal.Designator.Entries[I]) != nullptr &&
+ "Expecting cast to a base class");
+ return false;
+ }
+ }
+ return true;
+}
+
+/// Tests to see if the LValue has a designator (that isn't necessarily valid).
+static bool refersToCompleteObject(const LValue &LVal) {
+ if (LVal.Designator.Invalid || !LVal.Designator.Entries.empty())
+ return false;
+
+ if (!LVal.InvalidBase)
+ return true;
+
+ auto *E = LVal.Base.dyn_cast<const Expr *>();
+ (void)E;
+ assert(E != nullptr && isa<MemberExpr>(E));
+ return false;
+}
+
+/// Tries to evaluate the __builtin_object_size for @p E. If successful, returns
+/// true and stores the result in @p Size.
+///
+/// If @p WasError is non-null, this will report whether the failure to evaluate
+/// is to be treated as an Error in IntExprEvaluator.
+static bool tryEvaluateBuiltinObjectSize(const Expr *E, unsigned Type,
+ EvalInfo &Info, uint64_t &Size,
+ bool *WasError = nullptr) {
+ if (WasError != nullptr)
+ *WasError = false;
+
+ auto Error = [&](const Expr *E) {
+ if (WasError != nullptr)
+ *WasError = true;
+ return false;
+ };
+
+ auto Success = [&](uint64_t S, const Expr *E) {
+ Size = S;
+ return true;
+ };
+
+ // Determine the denoted object.
+ LValue Base;
+ {
+ // The operand of __builtin_object_size is never evaluated for side-effects.
+ // If there are any, but we can determine the pointed-to object anyway, then
+ // ignore the side-effects.
+ SpeculativeEvaluationRAII SpeculativeEval(Info);
+ FoldOffsetRAII Fold(Info, Type & 1);
+
+ if (E->isGLValue()) {
+ // It's possible for us to be given GLValues if we're called via
+ // Expr::tryEvaluateObjectSize.
+ APValue RVal;
+ if (!EvaluateAsRValue(Info, E, RVal))
+ return false;
+ Base.setFrom(Info.Ctx, RVal);
+ } else if (!EvaluatePointer(ignorePointerCastsAndParens(E), Base, Info))
+ return false;
+ }
+
+ CharUnits BaseOffset = Base.getLValueOffset();
+ // If we point to before the start of the object, there are no accessible
+ // bytes.
+ if (BaseOffset.isNegative())
+ return Success(0, E);
+
+ // In the case where we're not dealing with a subobject, we discard the
+ // subobject bit.
+ bool SubobjectOnly = (Type & 1) != 0 && !refersToCompleteObject(Base);
+
+ // If Type & 1 is 0, we need to be able to statically guarantee that the bytes
+ // exist. If we can't verify the base, then we can't do that.
+ //
+ // As a special case, we produce a valid object size for an unknown object
+ // with a known designator if Type & 1 is 1. For instance:
+ //
+ // extern struct X { char buff[32]; int a, b, c; } *p;
+ // int a = __builtin_object_size(p->buff + 4, 3); // returns 28
+ // int b = __builtin_object_size(p->buff + 4, 2); // returns 0, not 40
+ //
+ // This matches GCC's behavior.
+ if (Base.InvalidBase && !SubobjectOnly)
+ return Error(E);
+
+ // If we're not examining only the subobject, then we reset to a complete
+ // object designator
+ //
+ // If Type is 1 and we've lost track of the subobject, just find the complete
+ // object instead. (If Type is 3, that's not correct behavior and we should
+ // return 0 instead.)
+ LValue End = Base;
+ if (!SubobjectOnly || (End.Designator.Invalid && Type == 1)) {
+ QualType T = getObjectType(End.getLValueBase());
+ if (T.isNull())
+ End.Designator.setInvalid();
+ else {
+ End.Designator = SubobjectDesignator(T);
+ End.Offset = CharUnits::Zero();
+ }
+ }
+
+ // If it is not possible to determine which objects ptr points to at compile
+ // time, __builtin_object_size should return (size_t) -1 for type 0 or 1
+ // and (size_t) 0 for type 2 or 3.
+ if (End.Designator.Invalid)
+ return false;
+
+ // According to the GCC documentation, we want the size of the subobject
+ // denoted by the pointer. But that's not quite right -- what we actually
+ // want is the size of the immediately-enclosing array, if there is one.
+ int64_t AmountToAdd = 1;
+ if (End.Designator.MostDerivedIsArrayElement &&
+ End.Designator.Entries.size() == End.Designator.MostDerivedPathLength) {
+ // We got a pointer to an array. Step to its end.
+ AmountToAdd = End.Designator.MostDerivedArraySize -
+ End.Designator.Entries.back().ArrayIndex;
+ } else if (End.Designator.isOnePastTheEnd()) {
+ // We're already pointing at the end of the object.
+ AmountToAdd = 0;
+ }
+
+ QualType PointeeType = End.Designator.MostDerivedType;
+ assert(!PointeeType.isNull());
+ if (PointeeType->isIncompleteType() || PointeeType->isFunctionType())
+ return Error(E);
+
+ if (!HandleLValueArrayAdjustment(Info, E, End, End.Designator.MostDerivedType,
+ AmountToAdd))
+ return false;
+
+ auto EndOffset = End.getLValueOffset();
+
+ // The following is a moderately common idiom in C:
+ //
+ // struct Foo { int a; char c[1]; };
+ // struct Foo *F = (struct Foo *)malloc(sizeof(struct Foo) + strlen(Bar));
+ // strcpy(&F->c[0], Bar);
+ //
+ // So, if we see that we're examining a 1-length (or 0-length) array at the
+ // end of a struct with an unknown base, we give up instead of breaking code
+ // that behaves this way. Note that we only do this when Type=1, because
+ // Type=3 is a lower bound, so answering conservatively is fine.
+ if (End.InvalidBase && SubobjectOnly && Type == 1 &&
+ End.Designator.Entries.size() == End.Designator.MostDerivedPathLength &&
+ End.Designator.MostDerivedIsArrayElement &&
+ End.Designator.MostDerivedArraySize < 2 &&
+ isDesignatorAtObjectEnd(Info.Ctx, End))
+ return false;
+
+ if (BaseOffset > EndOffset)
+ return Success(0, E);
+
+ return Success((EndOffset - BaseOffset).getQuantity(), E);
+}
+
+bool IntExprEvaluator::TryEvaluateBuiltinObjectSize(const CallExpr *E,
+ unsigned Type) {
+ uint64_t Size;
+ bool WasError;
+ if (::tryEvaluateBuiltinObjectSize(E->getArg(0), Type, Info, Size, &WasError))
+ return Success(Size, E);
+ if (WasError)
+ return Error(E);
+ return false;
+}
+
+bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
+ switch (unsigned BuiltinOp = E->getBuiltinCallee()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitCallExpr(E);
+
+ case Builtin::BI__builtin_object_size: {
+ // The type was checked when we built the expression.
+ unsigned Type =
+ E->getArg(1)->EvaluateKnownConstInt(Info.Ctx).getZExtValue();
+ assert(Type <= 3 && "unexpected type");
+
+ if (TryEvaluateBuiltinObjectSize(E, Type))
+ return true;
+
+ if (E->getArg(0)->HasSideEffects(Info.Ctx))
+ return Success((Type & 2) ? 0 : -1, E);
+
+ // Expression had no side effects, but we couldn't statically determine the
+ // size of the referenced object.
+ switch (Info.EvalMode) {
+ case EvalInfo::EM_ConstantExpression:
+ case EvalInfo::EM_PotentialConstantExpression:
+ case EvalInfo::EM_ConstantFold:
+ case EvalInfo::EM_EvaluateForOverflow:
+ case EvalInfo::EM_IgnoreSideEffects:
+ case EvalInfo::EM_DesignatorFold:
+ // Leave it to IR generation.
+ return Error(E);
+ case EvalInfo::EM_ConstantExpressionUnevaluated:
+ case EvalInfo::EM_PotentialConstantExpressionUnevaluated:
+ // Reduce it to a constant now.
+ return Success((Type & 2) ? 0 : -1, E);
+ }
+ }
+
+ case Builtin::BI__builtin_bswap16:
+ case Builtin::BI__builtin_bswap32:
+ case Builtin::BI__builtin_bswap64: {
+ APSInt Val;
+ if (!EvaluateInteger(E->getArg(0), Val, Info))
+ return false;
+
+ return Success(Val.byteSwap(), E);
+ }
+
+ case Builtin::BI__builtin_classify_type:
+ return Success(EvaluateBuiltinClassifyType(E), E);
+
+ // FIXME: BI__builtin_clrsb
+ // FIXME: BI__builtin_clrsbl
+ // FIXME: BI__builtin_clrsbll
+
+ case Builtin::BI__builtin_clz:
+ case Builtin::BI__builtin_clzl:
+ case Builtin::BI__builtin_clzll:
+ case Builtin::BI__builtin_clzs: {
+ APSInt Val;
+ if (!EvaluateInteger(E->getArg(0), Val, Info))
+ return false;
+ if (!Val)
+ return Error(E);
+
+ return Success(Val.countLeadingZeros(), E);
+ }
+
+ case Builtin::BI__builtin_constant_p:
+ return Success(EvaluateBuiltinConstantP(Info.Ctx, E->getArg(0)), E);
+
+ case Builtin::BI__builtin_ctz:
+ case Builtin::BI__builtin_ctzl:
+ case Builtin::BI__builtin_ctzll:
+ case Builtin::BI__builtin_ctzs: {
+ APSInt Val;
+ if (!EvaluateInteger(E->getArg(0), Val, Info))
+ return false;
+ if (!Val)
+ return Error(E);
+
+ return Success(Val.countTrailingZeros(), E);
+ }
+
+ case Builtin::BI__builtin_eh_return_data_regno: {
+ int Operand = E->getArg(0)->EvaluateKnownConstInt(Info.Ctx).getZExtValue();
+ Operand = Info.Ctx.getTargetInfo().getEHDataRegisterNumber(Operand);
+ return Success(Operand, E);
+ }
+
+ case Builtin::BI__builtin_expect:
+ return Visit(E->getArg(0));
+
+ case Builtin::BI__builtin_ffs:
+ case Builtin::BI__builtin_ffsl:
+ case Builtin::BI__builtin_ffsll: {
+ APSInt Val;
+ if (!EvaluateInteger(E->getArg(0), Val, Info))
+ return false;
+
+ unsigned N = Val.countTrailingZeros();
+ return Success(N == Val.getBitWidth() ? 0 : N + 1, E);
+ }
+
+ case Builtin::BI__builtin_fpclassify: {
+ APFloat Val(0.0);
+ if (!EvaluateFloat(E->getArg(5), Val, Info))
+ return false;
+ unsigned Arg;
+ switch (Val.getCategory()) {
+ case APFloat::fcNaN: Arg = 0; break;
+ case APFloat::fcInfinity: Arg = 1; break;
+ case APFloat::fcNormal: Arg = Val.isDenormal() ? 3 : 2; break;
+ case APFloat::fcZero: Arg = 4; break;
+ }
+ return Visit(E->getArg(Arg));
+ }
+
+ case Builtin::BI__builtin_isinf_sign: {
+ APFloat Val(0.0);
+ return EvaluateFloat(E->getArg(0), Val, Info) &&
+ Success(Val.isInfinity() ? (Val.isNegative() ? -1 : 1) : 0, E);
+ }
+
+ case Builtin::BI__builtin_isinf: {
+ APFloat Val(0.0);
+ return EvaluateFloat(E->getArg(0), Val, Info) &&
+ Success(Val.isInfinity() ? 1 : 0, E);
+ }
+
+ case Builtin::BI__builtin_isfinite: {
+ APFloat Val(0.0);
+ return EvaluateFloat(E->getArg(0), Val, Info) &&
+ Success(Val.isFinite() ? 1 : 0, E);
+ }
+
+ case Builtin::BI__builtin_isnan: {
+ APFloat Val(0.0);
+ return EvaluateFloat(E->getArg(0), Val, Info) &&
+ Success(Val.isNaN() ? 1 : 0, E);
+ }
+
+ case Builtin::BI__builtin_isnormal: {
+ APFloat Val(0.0);
+ return EvaluateFloat(E->getArg(0), Val, Info) &&
+ Success(Val.isNormal() ? 1 : 0, E);
+ }
+
+ case Builtin::BI__builtin_parity:
+ case Builtin::BI__builtin_parityl:
+ case Builtin::BI__builtin_parityll: {
+ APSInt Val;
+ if (!EvaluateInteger(E->getArg(0), Val, Info))
+ return false;
+
+ return Success(Val.countPopulation() % 2, E);
+ }
+
+ case Builtin::BI__builtin_popcount:
+ case Builtin::BI__builtin_popcountl:
+ case Builtin::BI__builtin_popcountll: {
+ APSInt Val;
+ if (!EvaluateInteger(E->getArg(0), Val, Info))
+ return false;
+
+ return Success(Val.countPopulation(), E);
+ }
+
+ case Builtin::BIstrlen:
+ // A call to strlen is not a constant expression.
+ if (Info.getLangOpts().CPlusPlus11)
+ Info.CCEDiag(E, diag::note_constexpr_invalid_function)
+ << /*isConstexpr*/0 << /*isConstructor*/0 << "'strlen'";
+ else
+ Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr);
+ // Fall through.
+ case Builtin::BI__builtin_strlen: {
+ // As an extension, we support __builtin_strlen() as a constant expression,
+ // and support folding strlen() to a constant.
+ LValue String;
+ if (!EvaluatePointer(E->getArg(0), String, Info))
+ return false;
+
+ // Fast path: if it's a string literal, search the string value.
+ if (const StringLiteral *S = dyn_cast_or_null<StringLiteral>(
+ String.getLValueBase().dyn_cast<const Expr *>())) {
+ // The string literal may have embedded null characters. Find the first
+ // one and truncate there.
+ StringRef Str = S->getBytes();
+ int64_t Off = String.Offset.getQuantity();
+ if (Off >= 0 && (uint64_t)Off <= (uint64_t)Str.size() &&
+ S->getCharByteWidth() == 1) {
+ Str = Str.substr(Off);
+
+ StringRef::size_type Pos = Str.find(0);
+ if (Pos != StringRef::npos)
+ Str = Str.substr(0, Pos);
+
+ return Success(Str.size(), E);
+ }
+
+ // Fall through to slow path to issue appropriate diagnostic.
+ }
+
+ // Slow path: scan the bytes of the string looking for the terminating 0.
+ QualType CharTy = E->getArg(0)->getType()->getPointeeType();
+ for (uint64_t Strlen = 0; /**/; ++Strlen) {
+ APValue Char;
+ if (!handleLValueToRValueConversion(Info, E, CharTy, String, Char) ||
+ !Char.isInt())
+ return false;
+ if (!Char.getInt())
+ return Success(Strlen, E);
+ if (!HandleLValueArrayAdjustment(Info, E, String, CharTy, 1))
+ return false;
+ }
+ }
+
+ case Builtin::BI__atomic_always_lock_free:
+ case Builtin::BI__atomic_is_lock_free:
+ case Builtin::BI__c11_atomic_is_lock_free: {
+ APSInt SizeVal;
+ if (!EvaluateInteger(E->getArg(0), SizeVal, Info))
+ return false;
+
+ // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
+ // of two less than the maximum inline atomic width, we know it is
+ // lock-free. If the size isn't a power of two, or greater than the
+ // maximum alignment where we promote atomics, we know it is not lock-free
+ // (at least not in the sense of atomic_is_lock_free). Otherwise,
+ // the answer can only be determined at runtime; for example, 16-byte
+ // atomics have lock-free implementations on some, but not all,
+ // x86-64 processors.
+
+ // Check power-of-two.
+ CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
+ if (Size.isPowerOfTwo()) {
+ // Check against inlining width.
+ unsigned InlineWidthBits =
+ Info.Ctx.getTargetInfo().getMaxAtomicInlineWidth();
+ if (Size <= Info.Ctx.toCharUnitsFromBits(InlineWidthBits)) {
+ if (BuiltinOp == Builtin::BI__c11_atomic_is_lock_free ||
+ Size == CharUnits::One() ||
+ E->getArg(1)->isNullPointerConstant(Info.Ctx,
+ Expr::NPC_NeverValueDependent))
+ // OK, we will inline appropriately-aligned operations of this size,
+ // and _Atomic(T) is appropriately-aligned.
+ return Success(1, E);
+
+ QualType PointeeType = E->getArg(1)->IgnoreImpCasts()->getType()->
+ castAs<PointerType>()->getPointeeType();
+ if (!PointeeType->isIncompleteType() &&
+ Info.Ctx.getTypeAlignInChars(PointeeType) >= Size) {
+ // OK, we will inline operations on this object.
+ return Success(1, E);
+ }
+ }
+ }
+
+ return BuiltinOp == Builtin::BI__atomic_always_lock_free ?
+ Success(0, E) : Error(E);
+ }
+ }
+}
+
+static bool HasSameBase(const LValue &A, const LValue &B) {
+ if (!A.getLValueBase())
+ return !B.getLValueBase();
+ if (!B.getLValueBase())
+ return false;
+
+ if (A.getLValueBase().getOpaqueValue() !=
+ B.getLValueBase().getOpaqueValue()) {
+ const Decl *ADecl = GetLValueBaseDecl(A);
+ if (!ADecl)
+ return false;
+ const Decl *BDecl = GetLValueBaseDecl(B);
+ if (!BDecl || ADecl->getCanonicalDecl() != BDecl->getCanonicalDecl())
+ return false;
+ }
+
+ return IsGlobalLValue(A.getLValueBase()) ||
+ A.getLValueCallIndex() == B.getLValueCallIndex();
+}
+
+/// \brief Determine whether this is a pointer past the end of the complete
+/// object referred to by the lvalue.
+static bool isOnePastTheEndOfCompleteObject(const ASTContext &Ctx,
+ const LValue &LV) {
+ // A null pointer can be viewed as being "past the end" but we don't
+ // choose to look at it that way here.
+ if (!LV.getLValueBase())
+ return false;
+
+ // If the designator is valid and refers to a subobject, we're not pointing
+ // past the end.
+ if (!LV.getLValueDesignator().Invalid &&
+ !LV.getLValueDesignator().isOnePastTheEnd())
+ return false;
+
+ // A pointer to an incomplete type might be past-the-end if the type's size is
+ // zero. We cannot tell because the type is incomplete.
+ QualType Ty = getType(LV.getLValueBase());
+ if (Ty->isIncompleteType())
+ return true;
+
+ // We're a past-the-end pointer if we point to the byte after the object,
+ // no matter what our type or path is.
+ auto Size = Ctx.getTypeSizeInChars(Ty);
+ return LV.getLValueOffset() == Size;
+}
+
+namespace {
+
+/// \brief Data recursive integer evaluator of certain binary operators.
+///
+/// We use a data recursive algorithm for binary operators so that we are able
+/// to handle extreme cases of chained binary operators without causing stack
+/// overflow.
+class DataRecursiveIntBinOpEvaluator {
+ struct EvalResult {
+ APValue Val;
+ bool Failed;
+
+ EvalResult() : Failed(false) { }
+
+ void swap(EvalResult &RHS) {
+ Val.swap(RHS.Val);
+ Failed = RHS.Failed;
+ RHS.Failed = false;
+ }
+ };
+
+ struct Job {
+ const Expr *E;
+ EvalResult LHSResult; // meaningful only for binary operator expression.
+ enum { AnyExprKind, BinOpKind, BinOpVisitedLHSKind } Kind;
+
+ Job() = default;
+ Job(Job &&J)
+ : E(J.E), LHSResult(J.LHSResult), Kind(J.Kind),
+ StoredInfo(J.StoredInfo), OldEvalStatus(J.OldEvalStatus) {
+ J.StoredInfo = nullptr;
+ }
+
+ void startSpeculativeEval(EvalInfo &Info) {
+ OldEvalStatus = Info.EvalStatus;
+ Info.EvalStatus.Diag = nullptr;
+ StoredInfo = &Info;
+ }
+ ~Job() {
+ if (StoredInfo) {
+ StoredInfo->EvalStatus = OldEvalStatus;
+ }
+ }
+ private:
+ EvalInfo *StoredInfo = nullptr; // non-null if status changed.
+ Expr::EvalStatus OldEvalStatus;
+ };
+
+ SmallVector<Job, 16> Queue;
+
+ IntExprEvaluator &IntEval;
+ EvalInfo &Info;
+ APValue &FinalResult;
+
+public:
+ DataRecursiveIntBinOpEvaluator(IntExprEvaluator &IntEval, APValue &Result)
+ : IntEval(IntEval), Info(IntEval.getEvalInfo()), FinalResult(Result) { }
+
+ /// \brief True if \param E is a binary operator that we are going to handle
+ /// data recursively.
+ /// We handle binary operators that are comma, logical, or that have operands
+ /// with integral or enumeration type.
+ static bool shouldEnqueue(const BinaryOperator *E) {
+ return E->getOpcode() == BO_Comma ||
+ E->isLogicalOp() ||
+ (E->getLHS()->getType()->isIntegralOrEnumerationType() &&
+ E->getRHS()->getType()->isIntegralOrEnumerationType());
+ }
+
+ bool Traverse(const BinaryOperator *E) {
+ enqueue(E);
+ EvalResult PrevResult;
+ while (!Queue.empty())
+ process(PrevResult);
+
+ if (PrevResult.Failed) return false;
+
+ FinalResult.swap(PrevResult.Val);
+ return true;
+ }
+
+private:
+ bool Success(uint64_t Value, const Expr *E, APValue &Result) {
+ return IntEval.Success(Value, E, Result);
+ }
+ bool Success(const APSInt &Value, const Expr *E, APValue &Result) {
+ return IntEval.Success(Value, E, Result);
+ }
+ bool Error(const Expr *E) {
+ return IntEval.Error(E);
+ }
+ bool Error(const Expr *E, diag::kind D) {
+ return IntEval.Error(E, D);
+ }
+
+ OptionalDiagnostic CCEDiag(const Expr *E, diag::kind D) {
+ return Info.CCEDiag(E, D);
+ }
+
+ // \brief Returns true if visiting the RHS is necessary, false otherwise.
+ bool VisitBinOpLHSOnly(EvalResult &LHSResult, const BinaryOperator *E,
+ bool &SuppressRHSDiags);
+
+ bool VisitBinOp(const EvalResult &LHSResult, const EvalResult &RHSResult,
+ const BinaryOperator *E, APValue &Result);
+
+ void EvaluateExpr(const Expr *E, EvalResult &Result) {
+ Result.Failed = !Evaluate(Result.Val, Info, E);
+ if (Result.Failed)
+ Result.Val = APValue();
+ }
+
+ void process(EvalResult &Result);
+
+ void enqueue(const Expr *E) {
+ E = E->IgnoreParens();
+ Queue.resize(Queue.size()+1);
+ Queue.back().E = E;
+ Queue.back().Kind = Job::AnyExprKind;
+ }
+};
+
+}
+
+bool DataRecursiveIntBinOpEvaluator::
+ VisitBinOpLHSOnly(EvalResult &LHSResult, const BinaryOperator *E,
+ bool &SuppressRHSDiags) {
+ if (E->getOpcode() == BO_Comma) {
+ // Ignore LHS but note if we could not evaluate it.
+ if (LHSResult.Failed)
+ return Info.noteSideEffect();
+ return true;
+ }
+
+ if (E->isLogicalOp()) {
+ bool LHSAsBool;
+ if (!LHSResult.Failed && HandleConversionToBool(LHSResult.Val, LHSAsBool)) {
+ // We were able to evaluate the LHS, see if we can get away with not
+ // evaluating the RHS: 0 && X -> 0, 1 || X -> 1
+ if (LHSAsBool == (E->getOpcode() == BO_LOr)) {
+ Success(LHSAsBool, E, LHSResult.Val);
+ return false; // Ignore RHS
+ }
+ } else {
+ LHSResult.Failed = true;
+
+ // Since we weren't able to evaluate the left hand side, it
+ // must have had side effects.
+ if (!Info.noteSideEffect())
+ return false;
+
+ // We can't evaluate the LHS; however, sometimes the result
+ // is determined by the RHS: X && 0 -> 0, X || 1 -> 1.
+ // Don't ignore RHS and suppress diagnostics from this arm.
+ SuppressRHSDiags = true;
+ }
+
+ return true;
+ }
+
+ assert(E->getLHS()->getType()->isIntegralOrEnumerationType() &&
+ E->getRHS()->getType()->isIntegralOrEnumerationType());
+
+ if (LHSResult.Failed && !Info.keepEvaluatingAfterFailure())
+ return false; // Ignore RHS;
+
+ return true;
+}
+
+bool DataRecursiveIntBinOpEvaluator::
+ VisitBinOp(const EvalResult &LHSResult, const EvalResult &RHSResult,
+ const BinaryOperator *E, APValue &Result) {
+ if (E->getOpcode() == BO_Comma) {
+ if (RHSResult.Failed)
+ return false;
+ Result = RHSResult.Val;
+ return true;
+ }
+
+ if (E->isLogicalOp()) {
+ bool lhsResult, rhsResult;
+ bool LHSIsOK = HandleConversionToBool(LHSResult.Val, lhsResult);
+ bool RHSIsOK = HandleConversionToBool(RHSResult.Val, rhsResult);
+
+ if (LHSIsOK) {
+ if (RHSIsOK) {
+ if (E->getOpcode() == BO_LOr)
+ return Success(lhsResult || rhsResult, E, Result);
+ else
+ return Success(lhsResult && rhsResult, E, Result);
+ }
+ } else {
+ if (RHSIsOK) {
+ // We can't evaluate the LHS; however, sometimes the result
+ // is determined by the RHS: X && 0 -> 0, X || 1 -> 1.
+ if (rhsResult == (E->getOpcode() == BO_LOr))
+ return Success(rhsResult, E, Result);
+ }
+ }
+
+ return false;
+ }
+
+ assert(E->getLHS()->getType()->isIntegralOrEnumerationType() &&
+ E->getRHS()->getType()->isIntegralOrEnumerationType());
+
+ if (LHSResult.Failed || RHSResult.Failed)
+ return false;
+
+ const APValue &LHSVal = LHSResult.Val;
+ const APValue &RHSVal = RHSResult.Val;
+
+ // Handle cases like (unsigned long)&a + 4.
+ if (E->isAdditiveOp() && LHSVal.isLValue() && RHSVal.isInt()) {
+ Result = LHSVal;
+ CharUnits AdditionalOffset =
+ CharUnits::fromQuantity(RHSVal.getInt().getZExtValue());
+ if (E->getOpcode() == BO_Add)
+ Result.getLValueOffset() += AdditionalOffset;
+ else
+ Result.getLValueOffset() -= AdditionalOffset;
+ return true;
+ }
+
+ // Handle cases like 4 + (unsigned long)&a
+ if (E->getOpcode() == BO_Add &&
+ RHSVal.isLValue() && LHSVal.isInt()) {
+ Result = RHSVal;
+ Result.getLValueOffset() +=
+ CharUnits::fromQuantity(LHSVal.getInt().getZExtValue());
+ return true;
+ }
+
+ if (E->getOpcode() == BO_Sub && LHSVal.isLValue() && RHSVal.isLValue()) {
+ // Handle (intptr_t)&&A - (intptr_t)&&B.
+ if (!LHSVal.getLValueOffset().isZero() ||
+ !RHSVal.getLValueOffset().isZero())
+ return false;
+ const Expr *LHSExpr = LHSVal.getLValueBase().dyn_cast<const Expr*>();
+ const Expr *RHSExpr = RHSVal.getLValueBase().dyn_cast<const Expr*>();
+ if (!LHSExpr || !RHSExpr)
+ return false;
+ const AddrLabelExpr *LHSAddrExpr = dyn_cast<AddrLabelExpr>(LHSExpr);
+ const AddrLabelExpr *RHSAddrExpr = dyn_cast<AddrLabelExpr>(RHSExpr);
+ if (!LHSAddrExpr || !RHSAddrExpr)
+ return false;
+ // Make sure both labels come from the same function.
+ if (LHSAddrExpr->getLabel()->getDeclContext() !=
+ RHSAddrExpr->getLabel()->getDeclContext())
+ return false;
+ Result = APValue(LHSAddrExpr, RHSAddrExpr);
+ return true;
+ }
+
+ // All the remaining cases expect both operands to be an integer
+ if (!LHSVal.isInt() || !RHSVal.isInt())
+ return Error(E);
+
+ // Set up the width and signedness manually, in case it can't be deduced
+ // from the operation we're performing.
+ // FIXME: Don't do this in the cases where we can deduce it.
+ APSInt Value(Info.Ctx.getIntWidth(E->getType()),
+ E->getType()->isUnsignedIntegerOrEnumerationType());
+ if (!handleIntIntBinOp(Info, E, LHSVal.getInt(), E->getOpcode(),
+ RHSVal.getInt(), Value))
+ return false;
+ return Success(Value, E, Result);
+}
+
+void DataRecursiveIntBinOpEvaluator::process(EvalResult &Result) {
+ Job &job = Queue.back();
+
+ switch (job.Kind) {
+ case Job::AnyExprKind: {
+ if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(job.E)) {
+ if (shouldEnqueue(Bop)) {
+ job.Kind = Job::BinOpKind;
+ enqueue(Bop->getLHS());
+ return;
+ }
+ }
+
+ EvaluateExpr(job.E, Result);
+ Queue.pop_back();
+ return;
+ }
+
+ case Job::BinOpKind: {
+ const BinaryOperator *Bop = cast<BinaryOperator>(job.E);
+ bool SuppressRHSDiags = false;
+ if (!VisitBinOpLHSOnly(Result, Bop, SuppressRHSDiags)) {
+ Queue.pop_back();
+ return;
+ }
+ if (SuppressRHSDiags)
+ job.startSpeculativeEval(Info);
+ job.LHSResult.swap(Result);
+ job.Kind = Job::BinOpVisitedLHSKind;
+ enqueue(Bop->getRHS());
+ return;
+ }
+
+ case Job::BinOpVisitedLHSKind: {
+ const BinaryOperator *Bop = cast<BinaryOperator>(job.E);
+ EvalResult RHS;
+ RHS.swap(Result);
+ Result.Failed = !VisitBinOp(job.LHSResult, RHS, Bop, Result.Val);
+ Queue.pop_back();
+ return;
+ }
+ }
+
+ llvm_unreachable("Invalid Job::Kind!");
+}
+
+bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
+ if (!Info.keepEvaluatingAfterFailure() && E->isAssignmentOp())
+ return Error(E);
+
+ if (DataRecursiveIntBinOpEvaluator::shouldEnqueue(E))
+ return DataRecursiveIntBinOpEvaluator(*this, Result).Traverse(E);
+
+ QualType LHSTy = E->getLHS()->getType();
+ QualType RHSTy = E->getRHS()->getType();
+
+ if (LHSTy->isAnyComplexType() || RHSTy->isAnyComplexType()) {
+ ComplexValue LHS, RHS;
+ bool LHSOK;
+ if (E->isAssignmentOp()) {
+ LValue LV;
+ EvaluateLValue(E->getLHS(), LV, Info);
+ LHSOK = false;
+ } else if (LHSTy->isRealFloatingType()) {
+ LHSOK = EvaluateFloat(E->getLHS(), LHS.FloatReal, Info);
+ if (LHSOK) {
+ LHS.makeComplexFloat();
+ LHS.FloatImag = APFloat(LHS.FloatReal.getSemantics());
+ }
+ } else {
+ LHSOK = EvaluateComplex(E->getLHS(), LHS, Info);
+ }
+ if (!LHSOK && !Info.keepEvaluatingAfterFailure())
+ return false;
+
+ if (E->getRHS()->getType()->isRealFloatingType()) {
+ if (!EvaluateFloat(E->getRHS(), RHS.FloatReal, Info) || !LHSOK)
+ return false;
+ RHS.makeComplexFloat();
+ RHS.FloatImag = APFloat(RHS.FloatReal.getSemantics());
+ } else if (!EvaluateComplex(E->getRHS(), RHS, Info) || !LHSOK)
+ return false;
+
+ if (LHS.isComplexFloat()) {
+ APFloat::cmpResult CR_r =
+ LHS.getComplexFloatReal().compare(RHS.getComplexFloatReal());
+ APFloat::cmpResult CR_i =
+ LHS.getComplexFloatImag().compare(RHS.getComplexFloatImag());
+
+ if (E->getOpcode() == BO_EQ)
+ return Success((CR_r == APFloat::cmpEqual &&
+ CR_i == APFloat::cmpEqual), E);
+ else {
+ assert(E->getOpcode() == BO_NE &&
+ "Invalid complex comparison.");
+ return Success(((CR_r == APFloat::cmpGreaterThan ||
+ CR_r == APFloat::cmpLessThan ||
+ CR_r == APFloat::cmpUnordered) ||
+ (CR_i == APFloat::cmpGreaterThan ||
+ CR_i == APFloat::cmpLessThan ||
+ CR_i == APFloat::cmpUnordered)), E);
+ }
+ } else {
+ if (E->getOpcode() == BO_EQ)
+ return Success((LHS.getComplexIntReal() == RHS.getComplexIntReal() &&
+ LHS.getComplexIntImag() == RHS.getComplexIntImag()), E);
+ else {
+ assert(E->getOpcode() == BO_NE &&
+ "Invalid compex comparison.");
+ return Success((LHS.getComplexIntReal() != RHS.getComplexIntReal() ||
+ LHS.getComplexIntImag() != RHS.getComplexIntImag()), E);
+ }
+ }
+ }
+
+ if (LHSTy->isRealFloatingType() &&
+ RHSTy->isRealFloatingType()) {
+ APFloat RHS(0.0), LHS(0.0);
+
+ bool LHSOK = EvaluateFloat(E->getRHS(), RHS, Info);
+ if (!LHSOK && !Info.keepEvaluatingAfterFailure())
+ return false;
+
+ if (!EvaluateFloat(E->getLHS(), LHS, Info) || !LHSOK)
+ return false;
+
+ APFloat::cmpResult CR = LHS.compare(RHS);
+
+ switch (E->getOpcode()) {
+ default:
+ llvm_unreachable("Invalid binary operator!");
+ case BO_LT:
+ return Success(CR == APFloat::cmpLessThan, E);
+ case BO_GT:
+ return Success(CR == APFloat::cmpGreaterThan, E);
+ case BO_LE:
+ return Success(CR == APFloat::cmpLessThan || CR == APFloat::cmpEqual, E);
+ case BO_GE:
+ return Success(CR == APFloat::cmpGreaterThan || CR == APFloat::cmpEqual,
+ E);
+ case BO_EQ:
+ return Success(CR == APFloat::cmpEqual, E);
+ case BO_NE:
+ return Success(CR == APFloat::cmpGreaterThan
+ || CR == APFloat::cmpLessThan
+ || CR == APFloat::cmpUnordered, E);
+ }
+ }
+
+ if (LHSTy->isPointerType() && RHSTy->isPointerType()) {
+ if (E->getOpcode() == BO_Sub || E->isComparisonOp()) {
+ LValue LHSValue, RHSValue;
+
+ bool LHSOK = EvaluatePointer(E->getLHS(), LHSValue, Info);
+ if (!LHSOK && !Info.keepEvaluatingAfterFailure())
+ return false;
+
+ if (!EvaluatePointer(E->getRHS(), RHSValue, Info) || !LHSOK)
+ return false;
+
+ // Reject differing bases from the normal codepath; we special-case
+ // comparisons to null.
+ if (!HasSameBase(LHSValue, RHSValue)) {
+ if (E->getOpcode() == BO_Sub) {
+ // Handle &&A - &&B.
+ if (!LHSValue.Offset.isZero() || !RHSValue.Offset.isZero())
+ return Error(E);
+ const Expr *LHSExpr = LHSValue.Base.dyn_cast<const Expr*>();
+ const Expr *RHSExpr = RHSValue.Base.dyn_cast<const Expr*>();
+ if (!LHSExpr || !RHSExpr)
+ return Error(E);
+ const AddrLabelExpr *LHSAddrExpr = dyn_cast<AddrLabelExpr>(LHSExpr);
+ const AddrLabelExpr *RHSAddrExpr = dyn_cast<AddrLabelExpr>(RHSExpr);
+ if (!LHSAddrExpr || !RHSAddrExpr)
+ return Error(E);
+ // Make sure both labels come from the same function.
+ if (LHSAddrExpr->getLabel()->getDeclContext() !=
+ RHSAddrExpr->getLabel()->getDeclContext())
+ return Error(E);
+ return Success(APValue(LHSAddrExpr, RHSAddrExpr), E);
+ }
+ // Inequalities and subtractions between unrelated pointers have
+ // unspecified or undefined behavior.
+ if (!E->isEqualityOp())
+ return Error(E);
+ // A constant address may compare equal to the address of a symbol.
+ // The one exception is that address of an object cannot compare equal
+ // to a null pointer constant.
+ if ((!LHSValue.Base && !LHSValue.Offset.isZero()) ||
+ (!RHSValue.Base && !RHSValue.Offset.isZero()))
+ return Error(E);
+ // It's implementation-defined whether distinct literals will have
+ // distinct addresses. In clang, the result of such a comparison is
+ // unspecified, so it is not a constant expression. However, we do know
+ // that the address of a literal will be non-null.
+ if ((IsLiteralLValue(LHSValue) || IsLiteralLValue(RHSValue)) &&
+ LHSValue.Base && RHSValue.Base)
+ return Error(E);
+ // We can't tell whether weak symbols will end up pointing to the same
+ // object.
+ if (IsWeakLValue(LHSValue) || IsWeakLValue(RHSValue))
+ return Error(E);
+ // We can't compare the address of the start of one object with the
+ // past-the-end address of another object, per C++ DR1652.
+ if ((LHSValue.Base && LHSValue.Offset.isZero() &&
+ isOnePastTheEndOfCompleteObject(Info.Ctx, RHSValue)) ||
+ (RHSValue.Base && RHSValue.Offset.isZero() &&
+ isOnePastTheEndOfCompleteObject(Info.Ctx, LHSValue)))
+ return Error(E);
+ // We can't tell whether an object is at the same address as another
+ // zero sized object.
+ if ((RHSValue.Base && isZeroSized(LHSValue)) ||
+ (LHSValue.Base && isZeroSized(RHSValue)))
+ return Error(E);
+ // Pointers with different bases cannot represent the same object.
+ // (Note that clang defaults to -fmerge-all-constants, which can
+ // lead to inconsistent results for comparisons involving the address
+ // of a constant; this generally doesn't matter in practice.)
+ return Success(E->getOpcode() == BO_NE, E);
+ }
+
+ const CharUnits &LHSOffset = LHSValue.getLValueOffset();
+ const CharUnits &RHSOffset = RHSValue.getLValueOffset();
+
+ SubobjectDesignator &LHSDesignator = LHSValue.getLValueDesignator();
+ SubobjectDesignator &RHSDesignator = RHSValue.getLValueDesignator();
+
+ if (E->getOpcode() == BO_Sub) {
+ // C++11 [expr.add]p6:
+ // Unless both pointers point to elements of the same array object, or
+ // one past the last element of the array object, the behavior is
+ // undefined.
+ if (!LHSDesignator.Invalid && !RHSDesignator.Invalid &&
+ !AreElementsOfSameArray(getType(LHSValue.Base),
+ LHSDesignator, RHSDesignator))
+ CCEDiag(E, diag::note_constexpr_pointer_subtraction_not_same_array);
+
+ QualType Type = E->getLHS()->getType();
+ QualType ElementType = Type->getAs<PointerType>()->getPointeeType();
+
+ CharUnits ElementSize;
+ if (!HandleSizeof(Info, E->getExprLoc(), ElementType, ElementSize))
+ return false;
+
+ // As an extension, a type may have zero size (empty struct or union in
+ // C, array of zero length). Pointer subtraction in such cases has
+ // undefined behavior, so is not constant.
+ if (ElementSize.isZero()) {
+ Info.Diag(E, diag::note_constexpr_pointer_subtraction_zero_size)
+ << ElementType;
+ return false;
+ }
+
+ // FIXME: LLVM and GCC both compute LHSOffset - RHSOffset at runtime,
+ // and produce incorrect results when it overflows. Such behavior
+ // appears to be non-conforming, but is common, so perhaps we should
+ // assume the standard intended for such cases to be undefined behavior
+ // and check for them.
+
+ // Compute (LHSOffset - RHSOffset) / Size carefully, checking for
+ // overflow in the final conversion to ptrdiff_t.
+ APSInt LHS(
+ llvm::APInt(65, (int64_t)LHSOffset.getQuantity(), true), false);
+ APSInt RHS(
+ llvm::APInt(65, (int64_t)RHSOffset.getQuantity(), true), false);
+ APSInt ElemSize(
+ llvm::APInt(65, (int64_t)ElementSize.getQuantity(), true), false);
+ APSInt TrueResult = (LHS - RHS) / ElemSize;
+ APSInt Result = TrueResult.trunc(Info.Ctx.getIntWidth(E->getType()));
+
+ if (Result.extend(65) != TrueResult &&
+ !HandleOverflow(Info, E, TrueResult, E->getType()))
+ return false;
+ return Success(Result, E);
+ }
+
+ // C++11 [expr.rel]p3:
+ // Pointers to void (after pointer conversions) can be compared, with a
+ // result defined as follows: If both pointers represent the same
+ // address or are both the null pointer value, the result is true if the
+ // operator is <= or >= and false otherwise; otherwise the result is
+ // unspecified.
+ // We interpret this as applying to pointers to *cv* void.
+ if (LHSTy->isVoidPointerType() && LHSOffset != RHSOffset &&
+ E->isRelationalOp())
+ CCEDiag(E, diag::note_constexpr_void_comparison);
+
+ // C++11 [expr.rel]p2:
+ // - If two pointers point to non-static data members of the same object,
+ // or to subobjects or array elements fo such members, recursively, the
+ // pointer to the later declared member compares greater provided the
+ // two members have the same access control and provided their class is
+ // not a union.
+ // [...]
+ // - Otherwise pointer comparisons are unspecified.
+ if (!LHSDesignator.Invalid && !RHSDesignator.Invalid &&
+ E->isRelationalOp()) {
+ bool WasArrayIndex;
+ unsigned Mismatch =
+ FindDesignatorMismatch(getType(LHSValue.Base), LHSDesignator,
+ RHSDesignator, WasArrayIndex);
+ // At the point where the designators diverge, the comparison has a
+ // specified value if:
+ // - we are comparing array indices
+ // - we are comparing fields of a union, or fields with the same access
+ // Otherwise, the result is unspecified and thus the comparison is not a
+ // constant expression.
+ if (!WasArrayIndex && Mismatch < LHSDesignator.Entries.size() &&
+ Mismatch < RHSDesignator.Entries.size()) {
+ const FieldDecl *LF = getAsField(LHSDesignator.Entries[Mismatch]);
+ const FieldDecl *RF = getAsField(RHSDesignator.Entries[Mismatch]);
+ if (!LF && !RF)
+ CCEDiag(E, diag::note_constexpr_pointer_comparison_base_classes);
+ else if (!LF)
+ CCEDiag(E, diag::note_constexpr_pointer_comparison_base_field)
+ << getAsBaseClass(LHSDesignator.Entries[Mismatch])
+ << RF->getParent() << RF;
+ else if (!RF)
+ CCEDiag(E, diag::note_constexpr_pointer_comparison_base_field)
+ << getAsBaseClass(RHSDesignator.Entries[Mismatch])
+ << LF->getParent() << LF;
+ else if (!LF->getParent()->isUnion() &&
+ LF->getAccess() != RF->getAccess())
+ CCEDiag(E, diag::note_constexpr_pointer_comparison_differing_access)
+ << LF << LF->getAccess() << RF << RF->getAccess()
+ << LF->getParent();
+ }
+ }
+
+ // The comparison here must be unsigned, and performed with the same
+ // width as the pointer.
+ unsigned PtrSize = Info.Ctx.getTypeSize(LHSTy);
+ uint64_t CompareLHS = LHSOffset.getQuantity();
+ uint64_t CompareRHS = RHSOffset.getQuantity();
+ assert(PtrSize <= 64 && "Unexpected pointer width");
+ uint64_t Mask = ~0ULL >> (64 - PtrSize);
+ CompareLHS &= Mask;
+ CompareRHS &= Mask;
+
+ // If there is a base and this is a relational operator, we can only
+ // compare pointers within the object in question; otherwise, the result
+ // depends on where the object is located in memory.
+ if (!LHSValue.Base.isNull() && E->isRelationalOp()) {
+ QualType BaseTy = getType(LHSValue.Base);
+ if (BaseTy->isIncompleteType())
+ return Error(E);
+ CharUnits Size = Info.Ctx.getTypeSizeInChars(BaseTy);
+ uint64_t OffsetLimit = Size.getQuantity();
+ if (CompareLHS > OffsetLimit || CompareRHS > OffsetLimit)
+ return Error(E);
+ }
+
+ switch (E->getOpcode()) {
+ default: llvm_unreachable("missing comparison operator");
+ case BO_LT: return Success(CompareLHS < CompareRHS, E);
+ case BO_GT: return Success(CompareLHS > CompareRHS, E);
+ case BO_LE: return Success(CompareLHS <= CompareRHS, E);
+ case BO_GE: return Success(CompareLHS >= CompareRHS, E);
+ case BO_EQ: return Success(CompareLHS == CompareRHS, E);
+ case BO_NE: return Success(CompareLHS != CompareRHS, E);
+ }
+ }
+ }
+
+ if (LHSTy->isMemberPointerType()) {
+ assert(E->isEqualityOp() && "unexpected member pointer operation");
+ assert(RHSTy->isMemberPointerType() && "invalid comparison");
+
+ MemberPtr LHSValue, RHSValue;
+
+ bool LHSOK = EvaluateMemberPointer(E->getLHS(), LHSValue, Info);
+ if (!LHSOK && Info.keepEvaluatingAfterFailure())
+ return false;
+
+ if (!EvaluateMemberPointer(E->getRHS(), RHSValue, Info) || !LHSOK)
+ return false;
+
+ // C++11 [expr.eq]p2:
+ // If both operands are null, they compare equal. Otherwise if only one is
+ // null, they compare unequal.
+ if (!LHSValue.getDecl() || !RHSValue.getDecl()) {
+ bool Equal = !LHSValue.getDecl() && !RHSValue.getDecl();
+ return Success(E->getOpcode() == BO_EQ ? Equal : !Equal, E);
+ }
+
+ // Otherwise if either is a pointer to a virtual member function, the
+ // result is unspecified.
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(LHSValue.getDecl()))
+ if (MD->isVirtual())
+ CCEDiag(E, diag::note_constexpr_compare_virtual_mem_ptr) << MD;
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(RHSValue.getDecl()))
+ if (MD->isVirtual())
+ CCEDiag(E, diag::note_constexpr_compare_virtual_mem_ptr) << MD;
+
+ // Otherwise they compare equal if and only if they would refer to the
+ // same member of the same most derived object or the same subobject if
+ // they were dereferenced with a hypothetical object of the associated
+ // class type.
+ bool Equal = LHSValue == RHSValue;
+ return Success(E->getOpcode() == BO_EQ ? Equal : !Equal, E);
+ }
+
+ if (LHSTy->isNullPtrType()) {
+ assert(E->isComparisonOp() && "unexpected nullptr operation");
+ assert(RHSTy->isNullPtrType() && "missing pointer conversion");
+ // C++11 [expr.rel]p4, [expr.eq]p3: If two operands of type std::nullptr_t
+ // are compared, the result is true of the operator is <=, >= or ==, and
+ // false otherwise.
+ BinaryOperator::Opcode Opcode = E->getOpcode();
+ return Success(Opcode == BO_EQ || Opcode == BO_LE || Opcode == BO_GE, E);
+ }
+
+ assert((!LHSTy->isIntegralOrEnumerationType() ||
+ !RHSTy->isIntegralOrEnumerationType()) &&
+ "DataRecursiveIntBinOpEvaluator should have handled integral types");
+ // We can't continue from here for non-integral types.
+ return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
+}
+
+/// VisitUnaryExprOrTypeTraitExpr - Evaluate a sizeof, alignof or vec_step with
+/// a result as the expression's type.
+bool IntExprEvaluator::VisitUnaryExprOrTypeTraitExpr(
+ const UnaryExprOrTypeTraitExpr *E) {
+ switch(E->getKind()) {
+ case UETT_AlignOf: {
+ if (E->isArgumentType())
+ return Success(GetAlignOfType(Info, E->getArgumentType()), E);
+ else
+ return Success(GetAlignOfExpr(Info, E->getArgumentExpr()), E);
+ }
+
+ case UETT_VecStep: {
+ QualType Ty = E->getTypeOfArgument();
+
+ if (Ty->isVectorType()) {
+ unsigned n = Ty->castAs<VectorType>()->getNumElements();
+
+ // The vec_step built-in functions that take a 3-component
+ // vector return 4. (OpenCL 1.1 spec 6.11.12)
+ if (n == 3)
+ n = 4;
+
+ return Success(n, E);
+ } else
+ return Success(1, E);
+ }
+
+ case UETT_SizeOf: {
+ QualType SrcTy = E->getTypeOfArgument();
+ // C++ [expr.sizeof]p2: "When applied to a reference or a reference type,
+ // the result is the size of the referenced type."
+ if (const ReferenceType *Ref = SrcTy->getAs<ReferenceType>())
+ SrcTy = Ref->getPointeeType();
+
+ CharUnits Sizeof;
+ if (!HandleSizeof(Info, E->getExprLoc(), SrcTy, Sizeof))
+ return false;
+ return Success(Sizeof, E);
+ }
+ case UETT_OpenMPRequiredSimdAlign:
+ assert(E->isArgumentType());
+ return Success(
+ Info.Ctx.toCharUnitsFromBits(
+ Info.Ctx.getOpenMPDefaultSimdAlign(E->getArgumentType()))
+ .getQuantity(),
+ E);
+ }
+
+ llvm_unreachable("unknown expr/type trait");
+}
+
+bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *OOE) {
+ CharUnits Result;
+ unsigned n = OOE->getNumComponents();
+ if (n == 0)
+ return Error(OOE);
+ QualType CurrentType = OOE->getTypeSourceInfo()->getType();
+ for (unsigned i = 0; i != n; ++i) {
+ OffsetOfNode ON = OOE->getComponent(i);
+ switch (ON.getKind()) {
+ case OffsetOfNode::Array: {
+ const Expr *Idx = OOE->getIndexExpr(ON.getArrayExprIndex());
+ APSInt IdxResult;
+ if (!EvaluateInteger(Idx, IdxResult, Info))
+ return false;
+ const ArrayType *AT = Info.Ctx.getAsArrayType(CurrentType);
+ if (!AT)
+ return Error(OOE);
+ CurrentType = AT->getElementType();
+ CharUnits ElementSize = Info.Ctx.getTypeSizeInChars(CurrentType);
+ Result += IdxResult.getSExtValue() * ElementSize;
+ break;
+ }
+
+ case OffsetOfNode::Field: {
+ FieldDecl *MemberDecl = ON.getField();
+ const RecordType *RT = CurrentType->getAs<RecordType>();
+ if (!RT)
+ return Error(OOE);
+ RecordDecl *RD = RT->getDecl();
+ if (RD->isInvalidDecl()) return false;
+ const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(RD);
+ unsigned i = MemberDecl->getFieldIndex();
+ assert(i < RL.getFieldCount() && "offsetof field in wrong type");
+ Result += Info.Ctx.toCharUnitsFromBits(RL.getFieldOffset(i));
+ CurrentType = MemberDecl->getType().getNonReferenceType();
+ break;
+ }
+
+ case OffsetOfNode::Identifier:
+ llvm_unreachable("dependent __builtin_offsetof");
+
+ case OffsetOfNode::Base: {
+ CXXBaseSpecifier *BaseSpec = ON.getBase();
+ if (BaseSpec->isVirtual())
+ return Error(OOE);
+
+ // Find the layout of the class whose base we are looking into.
+ const RecordType *RT = CurrentType->getAs<RecordType>();
+ if (!RT)
+ return Error(OOE);
+ RecordDecl *RD = RT->getDecl();
+ if (RD->isInvalidDecl()) return false;
+ const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(RD);
+
+ // Find the base class itself.
+ CurrentType = BaseSpec->getType();
+ const RecordType *BaseRT = CurrentType->getAs<RecordType>();
+ if (!BaseRT)
+ return Error(OOE);
+
+ // Add the offset to the base.
+ Result += RL.getBaseClassOffset(cast<CXXRecordDecl>(BaseRT->getDecl()));
+ break;
+ }
+ }
+ }
+ return Success(Result, OOE);
+}
+
+bool IntExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
+ switch (E->getOpcode()) {
+ default:
+ // Address, indirect, pre/post inc/dec, etc are not valid constant exprs.
+ // See C99 6.6p3.
+ return Error(E);
+ case UO_Extension:
+ // FIXME: Should extension allow i-c-e extension expressions in its scope?
+ // If so, we could clear the diagnostic ID.
+ return Visit(E->getSubExpr());
+ case UO_Plus:
+ // The result is just the value.
+ return Visit(E->getSubExpr());
+ case UO_Minus: {
+ if (!Visit(E->getSubExpr()))
+ return false;
+ if (!Result.isInt()) return Error(E);
+ const APSInt &Value = Result.getInt();
+ if (Value.isSigned() && Value.isMinSignedValue() &&
+ !HandleOverflow(Info, E, -Value.extend(Value.getBitWidth() + 1),
+ E->getType()))
+ return false;
+ return Success(-Value, E);
+ }
+ case UO_Not: {
+ if (!Visit(E->getSubExpr()))
+ return false;
+ if (!Result.isInt()) return Error(E);
+ return Success(~Result.getInt(), E);
+ }
+ case UO_LNot: {
+ bool bres;
+ if (!EvaluateAsBooleanCondition(E->getSubExpr(), bres, Info))
+ return false;
+ return Success(!bres, E);
+ }
+ }
+}
+
+/// HandleCast - This is used to evaluate implicit or explicit casts where the
+/// result type is integer.
+bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
+ const Expr *SubExpr = E->getSubExpr();
+ QualType DestType = E->getType();
+ QualType SrcType = SubExpr->getType();
+
+ switch (E->getCastKind()) {
+ case CK_BaseToDerived:
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_Dynamic:
+ case CK_ToUnion:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_NullToPointer:
+ case CK_NullToMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_ReinterpretMemberPointer:
+ case CK_ConstructorConversion:
+ case CK_IntegralToPointer:
+ case CK_ToVoid:
+ case CK_VectorSplat:
+ case CK_IntegralToFloating:
+ case CK_FloatingCast:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ObjCObjectLValueCast:
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ case CK_BuiltinFnToFnPtr:
+ case CK_ZeroToOCLEvent:
+ case CK_NonAtomicToAtomic:
+ case CK_AddressSpaceConversion:
+ llvm_unreachable("invalid cast kind for integral value");
+
+ case CK_BitCast:
+ case CK_Dependent:
+ case CK_LValueBitCast:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject:
+ return Error(E);
+
+ case CK_UserDefinedConversion:
+ case CK_LValueToRValue:
+ case CK_AtomicToNonAtomic:
+ case CK_NoOp:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+
+ case CK_MemberPointerToBoolean:
+ case CK_PointerToBoolean:
+ case CK_IntegralToBoolean:
+ case CK_FloatingToBoolean:
+ case CK_FloatingComplexToBoolean:
+ case CK_IntegralComplexToBoolean: {
+ bool BoolResult;
+ if (!EvaluateAsBooleanCondition(SubExpr, BoolResult, Info))
+ return false;
+ return Success(BoolResult, E);
+ }
+
+ case CK_IntegralCast: {
+ if (!Visit(SubExpr))
+ return false;
+
+ if (!Result.isInt()) {
+ // Allow casts of address-of-label differences if they are no-ops
+ // or narrowing. (The narrowing case isn't actually guaranteed to
+ // be constant-evaluatable except in some narrow cases which are hard
+ // to detect here. We let it through on the assumption the user knows
+ // what they are doing.)
+ if (Result.isAddrLabelDiff())
+ return Info.Ctx.getTypeSize(DestType) <= Info.Ctx.getTypeSize(SrcType);
+ // Only allow casts of lvalues if they are lossless.
+ return Info.Ctx.getTypeSize(DestType) == Info.Ctx.getTypeSize(SrcType);
+ }
+
+ return Success(HandleIntToIntCast(Info, E, DestType, SrcType,
+ Result.getInt()), E);
+ }
+
+ case CK_PointerToIntegral: {
+ CCEDiag(E, diag::note_constexpr_invalid_cast) << 2;
+
+ LValue LV;
+ if (!EvaluatePointer(SubExpr, LV, Info))
+ return false;
+
+ if (LV.getLValueBase()) {
+ // Only allow based lvalue casts if they are lossless.
+ // FIXME: Allow a larger integer size than the pointer size, and allow
+ // narrowing back down to pointer width in subsequent integral casts.
+ // FIXME: Check integer type's active bits, not its type size.
+ if (Info.Ctx.getTypeSize(DestType) != Info.Ctx.getTypeSize(SrcType))
+ return Error(E);
+
+ LV.Designator.setInvalid();
+ LV.moveInto(Result);
+ return true;
+ }
+
+ APSInt AsInt = Info.Ctx.MakeIntValue(LV.getLValueOffset().getQuantity(),
+ SrcType);
+ return Success(HandleIntToIntCast(Info, E, DestType, SrcType, AsInt), E);
+ }
+
+ case CK_IntegralComplexToReal: {
+ ComplexValue C;
+ if (!EvaluateComplex(SubExpr, C, Info))
+ return false;
+ return Success(C.getComplexIntReal(), E);
+ }
+
+ case CK_FloatingToIntegral: {
+ APFloat F(0.0);
+ if (!EvaluateFloat(SubExpr, F, Info))
+ return false;
+
+ APSInt Value;
+ if (!HandleFloatToIntCast(Info, E, SrcType, F, DestType, Value))
+ return false;
+ return Success(Value, E);
+ }
+ }
+
+ llvm_unreachable("unknown cast resulting in integral value");
+}
+
+bool IntExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
+ if (E->getSubExpr()->getType()->isAnyComplexType()) {
+ ComplexValue LV;
+ if (!EvaluateComplex(E->getSubExpr(), LV, Info))
+ return false;
+ if (!LV.isComplexInt())
+ return Error(E);
+ return Success(LV.getComplexIntReal(), E);
+ }
+
+ return Visit(E->getSubExpr());
+}
+
+bool IntExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
+ if (E->getSubExpr()->getType()->isComplexIntegerType()) {
+ ComplexValue LV;
+ if (!EvaluateComplex(E->getSubExpr(), LV, Info))
+ return false;
+ if (!LV.isComplexInt())
+ return Error(E);
+ return Success(LV.getComplexIntImag(), E);
+ }
+
+ VisitIgnoredValue(E->getSubExpr());
+ return Success(0, E);
+}
+
+bool IntExprEvaluator::VisitSizeOfPackExpr(const SizeOfPackExpr *E) {
+ return Success(E->getPackLength(), E);
+}
+
+bool IntExprEvaluator::VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
+ return Success(E->getValue(), E);
+}
+
+//===----------------------------------------------------------------------===//
+// Float Evaluation
+//===----------------------------------------------------------------------===//
+
+namespace {
+class FloatExprEvaluator
+ : public ExprEvaluatorBase<FloatExprEvaluator> {
+ APFloat &Result;
+public:
+ FloatExprEvaluator(EvalInfo &info, APFloat &result)
+ : ExprEvaluatorBaseTy(info), Result(result) {}
+
+ bool Success(const APValue &V, const Expr *e) {
+ Result = V.getFloat();
+ return true;
+ }
+
+ bool ZeroInitialization(const Expr *E) {
+ Result = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(E->getType()));
+ return true;
+ }
+
+ bool VisitCallExpr(const CallExpr *E);
+
+ bool VisitUnaryOperator(const UnaryOperator *E);
+ bool VisitBinaryOperator(const BinaryOperator *E);
+ bool VisitFloatingLiteral(const FloatingLiteral *E);
+ bool VisitCastExpr(const CastExpr *E);
+
+ bool VisitUnaryReal(const UnaryOperator *E);
+ bool VisitUnaryImag(const UnaryOperator *E);
+
+ // FIXME: Missing: array subscript of vector, member of vector
+};
+} // end anonymous namespace
+
+static bool EvaluateFloat(const Expr* E, APFloat& Result, EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isRealFloatingType());
+ return FloatExprEvaluator(Info, Result).Visit(E);
+}
+
+static bool TryEvaluateBuiltinNaN(const ASTContext &Context,
+ QualType ResultTy,
+ const Expr *Arg,
+ bool SNaN,
+ llvm::APFloat &Result) {
+ const StringLiteral *S = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts());
+ if (!S) return false;
+
+ const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(ResultTy);
+
+ llvm::APInt fill;
+
+ // Treat empty strings as if they were zero.
+ if (S->getString().empty())
+ fill = llvm::APInt(32, 0);
+ else if (S->getString().getAsInteger(0, fill))
+ return false;
+
+ if (Context.getTargetInfo().isNan2008()) {
+ if (SNaN)
+ Result = llvm::APFloat::getSNaN(Sem, false, &fill);
+ else
+ Result = llvm::APFloat::getQNaN(Sem, false, &fill);
+ } else {
+ // Prior to IEEE 754-2008, architectures were allowed to choose whether
+ // the first bit of their significand was set for qNaN or sNaN. MIPS chose
+ // a different encoding to what became a standard in 2008, and for pre-
+ // 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as
+ // sNaN. This is now known as "legacy NaN" encoding.
+ if (SNaN)
+ Result = llvm::APFloat::getQNaN(Sem, false, &fill);
+ else
+ Result = llvm::APFloat::getSNaN(Sem, false, &fill);
+ }
+
+ return true;
+}
+
+bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
+ switch (E->getBuiltinCallee()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitCallExpr(E);
+
+ case Builtin::BI__builtin_huge_val:
+ case Builtin::BI__builtin_huge_valf:
+ case Builtin::BI__builtin_huge_vall:
+ case Builtin::BI__builtin_inf:
+ case Builtin::BI__builtin_inff:
+ case Builtin::BI__builtin_infl: {
+ const llvm::fltSemantics &Sem =
+ Info.Ctx.getFloatTypeSemantics(E->getType());
+ Result = llvm::APFloat::getInf(Sem);
+ return true;
+ }
+
+ case Builtin::BI__builtin_nans:
+ case Builtin::BI__builtin_nansf:
+ case Builtin::BI__builtin_nansl:
+ if (!TryEvaluateBuiltinNaN(Info.Ctx, E->getType(), E->getArg(0),
+ true, Result))
+ return Error(E);
+ return true;
+
+ case Builtin::BI__builtin_nan:
+ case Builtin::BI__builtin_nanf:
+ case Builtin::BI__builtin_nanl:
+ // If this is __builtin_nan() turn this into a nan, otherwise we
+ // can't constant fold it.
+ if (!TryEvaluateBuiltinNaN(Info.Ctx, E->getType(), E->getArg(0),
+ false, Result))
+ return Error(E);
+ return true;
+
+ case Builtin::BI__builtin_fabs:
+ case Builtin::BI__builtin_fabsf:
+ case Builtin::BI__builtin_fabsl:
+ if (!EvaluateFloat(E->getArg(0), Result, Info))
+ return false;
+
+ if (Result.isNegative())
+ Result.changeSign();
+ return true;
+
+ // FIXME: Builtin::BI__builtin_powi
+ // FIXME: Builtin::BI__builtin_powif
+ // FIXME: Builtin::BI__builtin_powil
+
+ case Builtin::BI__builtin_copysign:
+ case Builtin::BI__builtin_copysignf:
+ case Builtin::BI__builtin_copysignl: {
+ APFloat RHS(0.);
+ if (!EvaluateFloat(E->getArg(0), Result, Info) ||
+ !EvaluateFloat(E->getArg(1), RHS, Info))
+ return false;
+ Result.copySign(RHS);
+ return true;
+ }
+ }
+}
+
+bool FloatExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
+ if (E->getSubExpr()->getType()->isAnyComplexType()) {
+ ComplexValue CV;
+ if (!EvaluateComplex(E->getSubExpr(), CV, Info))
+ return false;
+ Result = CV.FloatReal;
+ return true;
+ }
+
+ return Visit(E->getSubExpr());
+}
+
+bool FloatExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
+ if (E->getSubExpr()->getType()->isAnyComplexType()) {
+ ComplexValue CV;
+ if (!EvaluateComplex(E->getSubExpr(), CV, Info))
+ return false;
+ Result = CV.FloatImag;
+ return true;
+ }
+
+ VisitIgnoredValue(E->getSubExpr());
+ const llvm::fltSemantics &Sem = Info.Ctx.getFloatTypeSemantics(E->getType());
+ Result = llvm::APFloat::getZero(Sem);
+ return true;
+}
+
+bool FloatExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
+ switch (E->getOpcode()) {
+ default: return Error(E);
+ case UO_Plus:
+ return EvaluateFloat(E->getSubExpr(), Result, Info);
+ case UO_Minus:
+ if (!EvaluateFloat(E->getSubExpr(), Result, Info))
+ return false;
+ Result.changeSign();
+ return true;
+ }
+}
+
+bool FloatExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
+ if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma)
+ return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
+
+ APFloat RHS(0.0);
+ bool LHSOK = EvaluateFloat(E->getLHS(), Result, Info);
+ if (!LHSOK && !Info.keepEvaluatingAfterFailure())
+ return false;
+ return EvaluateFloat(E->getRHS(), RHS, Info) && LHSOK &&
+ handleFloatFloatBinOp(Info, E, Result, E->getOpcode(), RHS);
+}
+
+bool FloatExprEvaluator::VisitFloatingLiteral(const FloatingLiteral *E) {
+ Result = E->getValue();
+ return true;
+}
+
+bool FloatExprEvaluator::VisitCastExpr(const CastExpr *E) {
+ const Expr* SubExpr = E->getSubExpr();
+
+ switch (E->getCastKind()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+
+ case CK_IntegralToFloating: {
+ APSInt IntResult;
+ return EvaluateInteger(SubExpr, IntResult, Info) &&
+ HandleIntToFloatCast(Info, E, SubExpr->getType(), IntResult,
+ E->getType(), Result);
+ }
+
+ case CK_FloatingCast: {
+ if (!Visit(SubExpr))
+ return false;
+ return HandleFloatToFloatCast(Info, E, SubExpr->getType(), E->getType(),
+ Result);
+ }
+
+ case CK_FloatingComplexToReal: {
+ ComplexValue V;
+ if (!EvaluateComplex(SubExpr, V, Info))
+ return false;
+ Result = V.getComplexFloatReal();
+ return true;
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Complex Evaluation (for float and integer)
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ComplexExprEvaluator
+ : public ExprEvaluatorBase<ComplexExprEvaluator> {
+ ComplexValue &Result;
+
+public:
+ ComplexExprEvaluator(EvalInfo &info, ComplexValue &Result)
+ : ExprEvaluatorBaseTy(info), Result(Result) {}
+
+ bool Success(const APValue &V, const Expr *e) {
+ Result.setFrom(V);
+ return true;
+ }
+
+ bool ZeroInitialization(const Expr *E);
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ bool VisitImaginaryLiteral(const ImaginaryLiteral *E);
+ bool VisitCastExpr(const CastExpr *E);
+ bool VisitBinaryOperator(const BinaryOperator *E);
+ bool VisitUnaryOperator(const UnaryOperator *E);
+ bool VisitInitListExpr(const InitListExpr *E);
+};
+} // end anonymous namespace
+
+static bool EvaluateComplex(const Expr *E, ComplexValue &Result,
+ EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isAnyComplexType());
+ return ComplexExprEvaluator(Info, Result).Visit(E);
+}
+
+bool ComplexExprEvaluator::ZeroInitialization(const Expr *E) {
+ QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType();
+ if (ElemTy->isRealFloatingType()) {
+ Result.makeComplexFloat();
+ APFloat Zero = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(ElemTy));
+ Result.FloatReal = Zero;
+ Result.FloatImag = Zero;
+ } else {
+ Result.makeComplexInt();
+ APSInt Zero = Info.Ctx.MakeIntValue(0, ElemTy);
+ Result.IntReal = Zero;
+ Result.IntImag = Zero;
+ }
+ return true;
+}
+
+bool ComplexExprEvaluator::VisitImaginaryLiteral(const ImaginaryLiteral *E) {
+ const Expr* SubExpr = E->getSubExpr();
+
+ if (SubExpr->getType()->isRealFloatingType()) {
+ Result.makeComplexFloat();
+ APFloat &Imag = Result.FloatImag;
+ if (!EvaluateFloat(SubExpr, Imag, Info))
+ return false;
+
+ Result.FloatReal = APFloat(Imag.getSemantics());
+ return true;
+ } else {
+ assert(SubExpr->getType()->isIntegerType() &&
+ "Unexpected imaginary literal.");
+
+ Result.makeComplexInt();
+ APSInt &Imag = Result.IntImag;
+ if (!EvaluateInteger(SubExpr, Imag, Info))
+ return false;
+
+ Result.IntReal = APSInt(Imag.getBitWidth(), !Imag.isSigned());
+ return true;
+ }
+}
+
+bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
+
+ switch (E->getCastKind()) {
+ case CK_BitCast:
+ case CK_BaseToDerived:
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_Dynamic:
+ case CK_ToUnion:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_NullToPointer:
+ case CK_NullToMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_MemberPointerToBoolean:
+ case CK_ReinterpretMemberPointer:
+ case CK_ConstructorConversion:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_PointerToBoolean:
+ case CK_ToVoid:
+ case CK_VectorSplat:
+ case CK_IntegralCast:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ObjCObjectLValueCast:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject:
+ case CK_BuiltinFnToFnPtr:
+ case CK_ZeroToOCLEvent:
+ case CK_NonAtomicToAtomic:
+ case CK_AddressSpaceConversion:
+ llvm_unreachable("invalid cast kind for complex value");
+
+ case CK_LValueToRValue:
+ case CK_AtomicToNonAtomic:
+ case CK_NoOp:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+
+ case CK_Dependent:
+ case CK_LValueBitCast:
+ case CK_UserDefinedConversion:
+ return Error(E);
+
+ case CK_FloatingRealToComplex: {
+ APFloat &Real = Result.FloatReal;
+ if (!EvaluateFloat(E->getSubExpr(), Real, Info))
+ return false;
+
+ Result.makeComplexFloat();
+ Result.FloatImag = APFloat(Real.getSemantics());
+ return true;
+ }
+
+ case CK_FloatingComplexCast: {
+ if (!Visit(E->getSubExpr()))
+ return false;
+
+ QualType To = E->getType()->getAs<ComplexType>()->getElementType();
+ QualType From
+ = E->getSubExpr()->getType()->getAs<ComplexType>()->getElementType();
+
+ return HandleFloatToFloatCast(Info, E, From, To, Result.FloatReal) &&
+ HandleFloatToFloatCast(Info, E, From, To, Result.FloatImag);
+ }
+
+ case CK_FloatingComplexToIntegralComplex: {
+ if (!Visit(E->getSubExpr()))
+ return false;
+
+ QualType To = E->getType()->getAs<ComplexType>()->getElementType();
+ QualType From
+ = E->getSubExpr()->getType()->getAs<ComplexType>()->getElementType();
+ Result.makeComplexInt();
+ return HandleFloatToIntCast(Info, E, From, Result.FloatReal,
+ To, Result.IntReal) &&
+ HandleFloatToIntCast(Info, E, From, Result.FloatImag,
+ To, Result.IntImag);
+ }
+
+ case CK_IntegralRealToComplex: {
+ APSInt &Real = Result.IntReal;
+ if (!EvaluateInteger(E->getSubExpr(), Real, Info))
+ return false;
+
+ Result.makeComplexInt();
+ Result.IntImag = APSInt(Real.getBitWidth(), !Real.isSigned());
+ return true;
+ }
+
+ case CK_IntegralComplexCast: {
+ if (!Visit(E->getSubExpr()))
+ return false;
+
+ QualType To = E->getType()->getAs<ComplexType>()->getElementType();
+ QualType From
+ = E->getSubExpr()->getType()->getAs<ComplexType>()->getElementType();
+
+ Result.IntReal = HandleIntToIntCast(Info, E, To, From, Result.IntReal);
+ Result.IntImag = HandleIntToIntCast(Info, E, To, From, Result.IntImag);
+ return true;
+ }
+
+ case CK_IntegralComplexToFloatingComplex: {
+ if (!Visit(E->getSubExpr()))
+ return false;
+
+ QualType To = E->getType()->castAs<ComplexType>()->getElementType();
+ QualType From
+ = E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType();
+ Result.makeComplexFloat();
+ return HandleIntToFloatCast(Info, E, From, Result.IntReal,
+ To, Result.FloatReal) &&
+ HandleIntToFloatCast(Info, E, From, Result.IntImag,
+ To, Result.FloatImag);
+ }
+ }
+
+ llvm_unreachable("unknown cast resulting in complex value");
+}
+
+bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
+ if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma)
+ return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
+
+ // Track whether the LHS or RHS is real at the type system level. When this is
+ // the case we can simplify our evaluation strategy.
+ bool LHSReal = false, RHSReal = false;
+
+ bool LHSOK;
+ if (E->getLHS()->getType()->isRealFloatingType()) {
+ LHSReal = true;
+ APFloat &Real = Result.FloatReal;
+ LHSOK = EvaluateFloat(E->getLHS(), Real, Info);
+ if (LHSOK) {
+ Result.makeComplexFloat();
+ Result.FloatImag = APFloat(Real.getSemantics());
+ }
+ } else {
+ LHSOK = Visit(E->getLHS());
+ }
+ if (!LHSOK && !Info.keepEvaluatingAfterFailure())
+ return false;
+
+ ComplexValue RHS;
+ if (E->getRHS()->getType()->isRealFloatingType()) {
+ RHSReal = true;
+ APFloat &Real = RHS.FloatReal;
+ if (!EvaluateFloat(E->getRHS(), Real, Info) || !LHSOK)
+ return false;
+ RHS.makeComplexFloat();
+ RHS.FloatImag = APFloat(Real.getSemantics());
+ } else if (!EvaluateComplex(E->getRHS(), RHS, Info) || !LHSOK)
+ return false;
+
+ assert(!(LHSReal && RHSReal) &&
+ "Cannot have both operands of a complex operation be real.");
+ switch (E->getOpcode()) {
+ default: return Error(E);
+ case BO_Add:
+ if (Result.isComplexFloat()) {
+ Result.getComplexFloatReal().add(RHS.getComplexFloatReal(),
+ APFloat::rmNearestTiesToEven);
+ if (LHSReal)
+ Result.getComplexFloatImag() = RHS.getComplexFloatImag();
+ else if (!RHSReal)
+ Result.getComplexFloatImag().add(RHS.getComplexFloatImag(),
+ APFloat::rmNearestTiesToEven);
+ } else {
+ Result.getComplexIntReal() += RHS.getComplexIntReal();
+ Result.getComplexIntImag() += RHS.getComplexIntImag();
+ }
+ break;
+ case BO_Sub:
+ if (Result.isComplexFloat()) {
+ Result.getComplexFloatReal().subtract(RHS.getComplexFloatReal(),
+ APFloat::rmNearestTiesToEven);
+ if (LHSReal) {
+ Result.getComplexFloatImag() = RHS.getComplexFloatImag();
+ Result.getComplexFloatImag().changeSign();
+ } else if (!RHSReal) {
+ Result.getComplexFloatImag().subtract(RHS.getComplexFloatImag(),
+ APFloat::rmNearestTiesToEven);
+ }
+ } else {
+ Result.getComplexIntReal() -= RHS.getComplexIntReal();
+ Result.getComplexIntImag() -= RHS.getComplexIntImag();
+ }
+ break;
+ case BO_Mul:
+ if (Result.isComplexFloat()) {
+ // This is an implementation of complex multiplication according to the
+ // constraints laid out in C11 Annex G. The implemantion uses the
+ // following naming scheme:
+ // (a + ib) * (c + id)
+ ComplexValue LHS = Result;
+ APFloat &A = LHS.getComplexFloatReal();
+ APFloat &B = LHS.getComplexFloatImag();
+ APFloat &C = RHS.getComplexFloatReal();
+ APFloat &D = RHS.getComplexFloatImag();
+ APFloat &ResR = Result.getComplexFloatReal();
+ APFloat &ResI = Result.getComplexFloatImag();
+ if (LHSReal) {
+ assert(!RHSReal && "Cannot have two real operands for a complex op!");
+ ResR = A * C;
+ ResI = A * D;
+ } else if (RHSReal) {
+ ResR = C * A;
+ ResI = C * B;
+ } else {
+ // In the fully general case, we need to handle NaNs and infinities
+ // robustly.
+ APFloat AC = A * C;
+ APFloat BD = B * D;
+ APFloat AD = A * D;
+ APFloat BC = B * C;
+ ResR = AC - BD;
+ ResI = AD + BC;
+ if (ResR.isNaN() && ResI.isNaN()) {
+ bool Recalc = false;
+ if (A.isInfinity() || B.isInfinity()) {
+ A = APFloat::copySign(
+ APFloat(A.getSemantics(), A.isInfinity() ? 1 : 0), A);
+ B = APFloat::copySign(
+ APFloat(B.getSemantics(), B.isInfinity() ? 1 : 0), B);
+ if (C.isNaN())
+ C = APFloat::copySign(APFloat(C.getSemantics()), C);
+ if (D.isNaN())
+ D = APFloat::copySign(APFloat(D.getSemantics()), D);
+ Recalc = true;
+ }
+ if (C.isInfinity() || D.isInfinity()) {
+ C = APFloat::copySign(
+ APFloat(C.getSemantics(), C.isInfinity() ? 1 : 0), C);
+ D = APFloat::copySign(
+ APFloat(D.getSemantics(), D.isInfinity() ? 1 : 0), D);
+ if (A.isNaN())
+ A = APFloat::copySign(APFloat(A.getSemantics()), A);
+ if (B.isNaN())
+ B = APFloat::copySign(APFloat(B.getSemantics()), B);
+ Recalc = true;
+ }
+ if (!Recalc && (AC.isInfinity() || BD.isInfinity() ||
+ AD.isInfinity() || BC.isInfinity())) {
+ if (A.isNaN())
+ A = APFloat::copySign(APFloat(A.getSemantics()), A);
+ if (B.isNaN())
+ B = APFloat::copySign(APFloat(B.getSemantics()), B);
+ if (C.isNaN())
+ C = APFloat::copySign(APFloat(C.getSemantics()), C);
+ if (D.isNaN())
+ D = APFloat::copySign(APFloat(D.getSemantics()), D);
+ Recalc = true;
+ }
+ if (Recalc) {
+ ResR = APFloat::getInf(A.getSemantics()) * (A * C - B * D);
+ ResI = APFloat::getInf(A.getSemantics()) * (A * D + B * C);
+ }
+ }
+ }
+ } else {
+ ComplexValue LHS = Result;
+ Result.getComplexIntReal() =
+ (LHS.getComplexIntReal() * RHS.getComplexIntReal() -
+ LHS.getComplexIntImag() * RHS.getComplexIntImag());
+ Result.getComplexIntImag() =
+ (LHS.getComplexIntReal() * RHS.getComplexIntImag() +
+ LHS.getComplexIntImag() * RHS.getComplexIntReal());
+ }
+ break;
+ case BO_Div:
+ if (Result.isComplexFloat()) {
+ // This is an implementation of complex division according to the
+ // constraints laid out in C11 Annex G. The implemantion uses the
+ // following naming scheme:
+ // (a + ib) / (c + id)
+ ComplexValue LHS = Result;
+ APFloat &A = LHS.getComplexFloatReal();
+ APFloat &B = LHS.getComplexFloatImag();
+ APFloat &C = RHS.getComplexFloatReal();
+ APFloat &D = RHS.getComplexFloatImag();
+ APFloat &ResR = Result.getComplexFloatReal();
+ APFloat &ResI = Result.getComplexFloatImag();
+ if (RHSReal) {
+ ResR = A / C;
+ ResI = B / C;
+ } else {
+ if (LHSReal) {
+ // No real optimizations we can do here, stub out with zero.
+ B = APFloat::getZero(A.getSemantics());
+ }
+ int DenomLogB = 0;
+ APFloat MaxCD = maxnum(abs(C), abs(D));
+ if (MaxCD.isFinite()) {
+ DenomLogB = ilogb(MaxCD);
+ C = scalbn(C, -DenomLogB);
+ D = scalbn(D, -DenomLogB);
+ }
+ APFloat Denom = C * C + D * D;
+ ResR = scalbn((A * C + B * D) / Denom, -DenomLogB);
+ ResI = scalbn((B * C - A * D) / Denom, -DenomLogB);
+ if (ResR.isNaN() && ResI.isNaN()) {
+ if (Denom.isPosZero() && (!A.isNaN() || !B.isNaN())) {
+ ResR = APFloat::getInf(ResR.getSemantics(), C.isNegative()) * A;
+ ResI = APFloat::getInf(ResR.getSemantics(), C.isNegative()) * B;
+ } else if ((A.isInfinity() || B.isInfinity()) && C.isFinite() &&
+ D.isFinite()) {
+ A = APFloat::copySign(
+ APFloat(A.getSemantics(), A.isInfinity() ? 1 : 0), A);
+ B = APFloat::copySign(
+ APFloat(B.getSemantics(), B.isInfinity() ? 1 : 0), B);
+ ResR = APFloat::getInf(ResR.getSemantics()) * (A * C + B * D);
+ ResI = APFloat::getInf(ResI.getSemantics()) * (B * C - A * D);
+ } else if (MaxCD.isInfinity() && A.isFinite() && B.isFinite()) {
+ C = APFloat::copySign(
+ APFloat(C.getSemantics(), C.isInfinity() ? 1 : 0), C);
+ D = APFloat::copySign(
+ APFloat(D.getSemantics(), D.isInfinity() ? 1 : 0), D);
+ ResR = APFloat::getZero(ResR.getSemantics()) * (A * C + B * D);
+ ResI = APFloat::getZero(ResI.getSemantics()) * (B * C - A * D);
+ }
+ }
+ }
+ } else {
+ if (RHS.getComplexIntReal() == 0 && RHS.getComplexIntImag() == 0)
+ return Error(E, diag::note_expr_divide_by_zero);
+
+ ComplexValue LHS = Result;
+ APSInt Den = RHS.getComplexIntReal() * RHS.getComplexIntReal() +
+ RHS.getComplexIntImag() * RHS.getComplexIntImag();
+ Result.getComplexIntReal() =
+ (LHS.getComplexIntReal() * RHS.getComplexIntReal() +
+ LHS.getComplexIntImag() * RHS.getComplexIntImag()) / Den;
+ Result.getComplexIntImag() =
+ (LHS.getComplexIntImag() * RHS.getComplexIntReal() -
+ LHS.getComplexIntReal() * RHS.getComplexIntImag()) / Den;
+ }
+ break;
+ }
+
+ return true;
+}
+
+bool ComplexExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
+ // Get the operand value into 'Result'.
+ if (!Visit(E->getSubExpr()))
+ return false;
+
+ switch (E->getOpcode()) {
+ default:
+ return Error(E);
+ case UO_Extension:
+ return true;
+ case UO_Plus:
+ // The result is always just the subexpr.
+ return true;
+ case UO_Minus:
+ if (Result.isComplexFloat()) {
+ Result.getComplexFloatReal().changeSign();
+ Result.getComplexFloatImag().changeSign();
+ }
+ else {
+ Result.getComplexIntReal() = -Result.getComplexIntReal();
+ Result.getComplexIntImag() = -Result.getComplexIntImag();
+ }
+ return true;
+ case UO_Not:
+ if (Result.isComplexFloat())
+ Result.getComplexFloatImag().changeSign();
+ else
+ Result.getComplexIntImag() = -Result.getComplexIntImag();
+ return true;
+ }
+}
+
+bool ComplexExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
+ if (E->getNumInits() == 2) {
+ if (E->getType()->isComplexType()) {
+ Result.makeComplexFloat();
+ if (!EvaluateFloat(E->getInit(0), Result.FloatReal, Info))
+ return false;
+ if (!EvaluateFloat(E->getInit(1), Result.FloatImag, Info))
+ return false;
+ } else {
+ Result.makeComplexInt();
+ if (!EvaluateInteger(E->getInit(0), Result.IntReal, Info))
+ return false;
+ if (!EvaluateInteger(E->getInit(1), Result.IntImag, Info))
+ return false;
+ }
+ return true;
+ }
+ return ExprEvaluatorBaseTy::VisitInitListExpr(E);
+}
+
+//===----------------------------------------------------------------------===//
+// Atomic expression evaluation, essentially just handling the NonAtomicToAtomic
+// implicit conversion.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class AtomicExprEvaluator :
+ public ExprEvaluatorBase<AtomicExprEvaluator> {
+ APValue &Result;
+public:
+ AtomicExprEvaluator(EvalInfo &Info, APValue &Result)
+ : ExprEvaluatorBaseTy(Info), Result(Result) {}
+
+ bool Success(const APValue &V, const Expr *E) {
+ Result = V;
+ return true;
+ }
+
+ bool ZeroInitialization(const Expr *E) {
+ ImplicitValueInitExpr VIE(
+ E->getType()->castAs<AtomicType>()->getValueType());
+ return Evaluate(Result, Info, &VIE);
+ }
+
+ bool VisitCastExpr(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+ case CK_NonAtomicToAtomic:
+ return Evaluate(Result, Info, E->getSubExpr());
+ }
+ }
+};
+} // end anonymous namespace
+
+static bool EvaluateAtomic(const Expr *E, APValue &Result, EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isAtomicType());
+ return AtomicExprEvaluator(Info, Result).Visit(E);
+}
+
+//===----------------------------------------------------------------------===//
+// Void expression evaluation, primarily for a cast to void on the LHS of a
+// comma operator
+//===----------------------------------------------------------------------===//
+
+namespace {
+class VoidExprEvaluator
+ : public ExprEvaluatorBase<VoidExprEvaluator> {
+public:
+ VoidExprEvaluator(EvalInfo &Info) : ExprEvaluatorBaseTy(Info) {}
+
+ bool Success(const APValue &V, const Expr *e) { return true; }
+
+ bool VisitCastExpr(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+ case CK_ToVoid:
+ VisitIgnoredValue(E->getSubExpr());
+ return true;
+ }
+ }
+
+ bool VisitCallExpr(const CallExpr *E) {
+ switch (E->getBuiltinCallee()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitCallExpr(E);
+ case Builtin::BI__assume:
+ case Builtin::BI__builtin_assume:
+ // The argument is not evaluated!
+ return true;
+ }
+ }
+};
+} // end anonymous namespace
+
+static bool EvaluateVoid(const Expr *E, EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isVoidType());
+ return VoidExprEvaluator(Info).Visit(E);
+}
+
+//===----------------------------------------------------------------------===//
+// Top level Expr::EvaluateAsRValue method.
+//===----------------------------------------------------------------------===//
+
+static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E) {
+ // In C, function designators are not lvalues, but we evaluate them as if they
+ // are.
+ QualType T = E->getType();
+ if (E->isGLValue() || T->isFunctionType()) {
+ LValue LV;
+ if (!EvaluateLValue(E, LV, Info))
+ return false;
+ LV.moveInto(Result);
+ } else if (T->isVectorType()) {
+ if (!EvaluateVector(E, Result, Info))
+ return false;
+ } else if (T->isIntegralOrEnumerationType()) {
+ if (!IntExprEvaluator(Info, Result).Visit(E))
+ return false;
+ } else if (T->hasPointerRepresentation()) {
+ LValue LV;
+ if (!EvaluatePointer(E, LV, Info))
+ return false;
+ LV.moveInto(Result);
+ } else if (T->isRealFloatingType()) {
+ llvm::APFloat F(0.0);
+ if (!EvaluateFloat(E, F, Info))
+ return false;
+ Result = APValue(F);
+ } else if (T->isAnyComplexType()) {
+ ComplexValue C;
+ if (!EvaluateComplex(E, C, Info))
+ return false;
+ C.moveInto(Result);
+ } else if (T->isMemberPointerType()) {
+ MemberPtr P;
+ if (!EvaluateMemberPointer(E, P, Info))
+ return false;
+ P.moveInto(Result);
+ return true;
+ } else if (T->isArrayType()) {
+ LValue LV;
+ LV.set(E, Info.CurrentCall->Index);
+ APValue &Value = Info.CurrentCall->createTemporary(E, false);
+ if (!EvaluateArray(E, LV, Value, Info))
+ return false;
+ Result = Value;
+ } else if (T->isRecordType()) {
+ LValue LV;
+ LV.set(E, Info.CurrentCall->Index);
+ APValue &Value = Info.CurrentCall->createTemporary(E, false);
+ if (!EvaluateRecord(E, LV, Value, Info))
+ return false;
+ Result = Value;
+ } else if (T->isVoidType()) {
+ if (!Info.getLangOpts().CPlusPlus11)
+ Info.CCEDiag(E, diag::note_constexpr_nonliteral)
+ << E->getType();
+ if (!EvaluateVoid(E, Info))
+ return false;
+ } else if (T->isAtomicType()) {
+ if (!EvaluateAtomic(E, Result, Info))
+ return false;
+ } else if (Info.getLangOpts().CPlusPlus11) {
+ Info.Diag(E, diag::note_constexpr_nonliteral) << E->getType();
+ return false;
+ } else {
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+
+ return true;
+}
+
+/// EvaluateInPlace - Evaluate an expression in-place in an APValue. In some
+/// cases, the in-place evaluation is essential, since later initializers for
+/// an object can indirectly refer to subobjects which were initialized earlier.
+static bool EvaluateInPlace(APValue &Result, EvalInfo &Info, const LValue &This,
+ const Expr *E, bool AllowNonLiteralTypes) {
+ assert(!E->isValueDependent());
+
+ if (!AllowNonLiteralTypes && !CheckLiteralType(Info, E, &This))
+ return false;
+
+ if (E->isRValue()) {
+ // Evaluate arrays and record types in-place, so that later initializers can
+ // refer to earlier-initialized members of the object.
+ if (E->getType()->isArrayType())
+ return EvaluateArray(E, This, Result, Info);
+ else if (E->getType()->isRecordType())
+ return EvaluateRecord(E, This, Result, Info);
+ }
+
+ // For any other type, in-place evaluation is unimportant.
+ return Evaluate(Result, Info, E);
+}
+
+/// EvaluateAsRValue - Try to evaluate this expression, performing an implicit
+/// lvalue-to-rvalue cast if it is an lvalue.
+static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result) {
+ if (E->getType().isNull())
+ return false;
+
+ if (!CheckLiteralType(Info, E))
+ return false;
+
+ if (!::Evaluate(Result, Info, E))
+ return false;
+
+ if (E->isGLValue()) {
+ LValue LV;
+ LV.setFrom(Info.Ctx, Result);
+ if (!handleLValueToRValueConversion(Info, E, E->getType(), LV, Result))
+ return false;
+ }
+
+ // Check this core constant expression is a constant expression.
+ return CheckConstantExpression(Info, E->getExprLoc(), E->getType(), Result);
+}
+
+static bool FastEvaluateAsRValue(const Expr *Exp, Expr::EvalResult &Result,
+ const ASTContext &Ctx, bool &IsConst) {
+ // Fast-path evaluations of integer literals, since we sometimes see files
+ // containing vast quantities of these.
+ if (const IntegerLiteral *L = dyn_cast<IntegerLiteral>(Exp)) {
+ Result.Val = APValue(APSInt(L->getValue(),
+ L->getType()->isUnsignedIntegerType()));
+ IsConst = true;
+ return true;
+ }
+
+ // This case should be rare, but we need to check it before we check on
+ // the type below.
+ if (Exp->getType().isNull()) {
+ IsConst = false;
+ return true;
+ }
+
+ // FIXME: Evaluating values of large array and record types can cause
+ // performance problems. Only do so in C++11 for now.
+ if (Exp->isRValue() && (Exp->getType()->isArrayType() ||
+ Exp->getType()->isRecordType()) &&
+ !Ctx.getLangOpts().CPlusPlus11) {
+ IsConst = false;
+ return true;
+ }
+ return false;
+}
+
+
+/// EvaluateAsRValue - Return true if this is a constant which we can fold using
+/// any crazy technique (that has nothing to do with language standards) that
+/// we want to. If this function returns true, it returns the folded constant
+/// in Result. If this expression is a glvalue, an lvalue-to-rvalue conversion
+/// will be applied to the result.
+bool Expr::EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx) const {
+ bool IsConst;
+ if (FastEvaluateAsRValue(this, Result, Ctx, IsConst))
+ return IsConst;
+
+ EvalInfo Info(Ctx, Result, EvalInfo::EM_IgnoreSideEffects);
+ return ::EvaluateAsRValue(Info, this, Result.Val);
+}
+
+bool Expr::EvaluateAsBooleanCondition(bool &Result,
+ const ASTContext &Ctx) const {
+ EvalResult Scratch;
+ return EvaluateAsRValue(Scratch, Ctx) &&
+ HandleConversionToBool(Scratch.Val, Result);
+}
+
+static bool hasUnacceptableSideEffect(Expr::EvalStatus &Result,
+ Expr::SideEffectsKind SEK) {
+ return (SEK < Expr::SE_AllowSideEffects && Result.HasSideEffects) ||
+ (SEK < Expr::SE_AllowUndefinedBehavior && Result.HasUndefinedBehavior);
+}
+
+bool Expr::EvaluateAsInt(APSInt &Result, const ASTContext &Ctx,
+ SideEffectsKind AllowSideEffects) const {
+ if (!getType()->isIntegralOrEnumerationType())
+ return false;
+
+ EvalResult ExprResult;
+ if (!EvaluateAsRValue(ExprResult, Ctx) || !ExprResult.Val.isInt() ||
+ hasUnacceptableSideEffect(ExprResult, AllowSideEffects))
+ return false;
+
+ Result = ExprResult.Val.getInt();
+ return true;
+}
+
+bool Expr::EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx) const {
+ EvalInfo Info(Ctx, Result, EvalInfo::EM_ConstantFold);
+
+ LValue LV;
+ if (!EvaluateLValue(this, LV, Info) || Result.HasSideEffects ||
+ !CheckLValueConstantExpression(Info, getExprLoc(),
+ Ctx.getLValueReferenceType(getType()), LV))
+ return false;
+
+ LV.moveInto(Result.Val);
+ return true;
+}
+
+bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
+ const VarDecl *VD,
+ SmallVectorImpl<PartialDiagnosticAt> &Notes) const {
+ // FIXME: Evaluating initializers for large array and record types can cause
+ // performance problems. Only do so in C++11 for now.
+ if (isRValue() && (getType()->isArrayType() || getType()->isRecordType()) &&
+ !Ctx.getLangOpts().CPlusPlus11)
+ return false;
+
+ Expr::EvalStatus EStatus;
+ EStatus.Diag = &Notes;
+
+ EvalInfo InitInfo(Ctx, EStatus, VD->isConstexpr()
+ ? EvalInfo::EM_ConstantExpression
+ : EvalInfo::EM_ConstantFold);
+ InitInfo.setEvaluatingDecl(VD, Value);
+
+ LValue LVal;
+ LVal.set(VD);
+
+ // C++11 [basic.start.init]p2:
+ // Variables with static storage duration or thread storage duration shall be
+ // zero-initialized before any other initialization takes place.
+ // This behavior is not present in C.
+ if (Ctx.getLangOpts().CPlusPlus && !VD->hasLocalStorage() &&
+ !VD->getType()->isReferenceType()) {
+ ImplicitValueInitExpr VIE(VD->getType());
+ if (!EvaluateInPlace(Value, InitInfo, LVal, &VIE,
+ /*AllowNonLiteralTypes=*/true))
+ return false;
+ }
+
+ if (!EvaluateInPlace(Value, InitInfo, LVal, this,
+ /*AllowNonLiteralTypes=*/true) ||
+ EStatus.HasSideEffects)
+ return false;
+
+ return CheckConstantExpression(InitInfo, VD->getLocation(), VD->getType(),
+ Value);
+}
+
+/// isEvaluatable - Call EvaluateAsRValue to see if this expression can be
+/// constant folded, but discard the result.
+bool Expr::isEvaluatable(const ASTContext &Ctx, SideEffectsKind SEK) const {
+ EvalResult Result;
+ return EvaluateAsRValue(Result, Ctx) &&
+ !hasUnacceptableSideEffect(Result, SEK);
+}
+
+APSInt Expr::EvaluateKnownConstInt(const ASTContext &Ctx,
+ SmallVectorImpl<PartialDiagnosticAt> *Diag) const {
+ EvalResult EvalResult;
+ EvalResult.Diag = Diag;
+ bool Result = EvaluateAsRValue(EvalResult, Ctx);
+ (void)Result;
+ assert(Result && "Could not evaluate expression");
+ assert(EvalResult.Val.isInt() && "Expression did not evaluate to integer");
+
+ return EvalResult.Val.getInt();
+}
+
+void Expr::EvaluateForOverflow(const ASTContext &Ctx) const {
+ bool IsConst;
+ EvalResult EvalResult;
+ if (!FastEvaluateAsRValue(this, EvalResult, Ctx, IsConst)) {
+ EvalInfo Info(Ctx, EvalResult, EvalInfo::EM_EvaluateForOverflow);
+ (void)::EvaluateAsRValue(Info, this, EvalResult.Val);
+ }
+}
+
+bool Expr::EvalResult::isGlobalLValue() const {
+ assert(Val.isLValue());
+ return IsGlobalLValue(Val.getLValueBase());
+}
+
+
+/// isIntegerConstantExpr - this recursive routine will test if an expression is
+/// an integer constant expression.
+
+/// FIXME: Pass up a reason why! Invalid operation in i-c-e, division by zero,
+/// comma, etc
+
+// CheckICE - This function does the fundamental ICE checking: the returned
+// ICEDiag contains an ICEKind indicating whether the expression is an ICE,
+// and a (possibly null) SourceLocation indicating the location of the problem.
+//
+// Note that to reduce code duplication, this helper does no evaluation
+// itself; the caller checks whether the expression is evaluatable, and
+// in the rare cases where CheckICE actually cares about the evaluated
+// value, it calls into Evalute.
+
+namespace {
+
+enum ICEKind {
+ /// This expression is an ICE.
+ IK_ICE,
+ /// This expression is not an ICE, but if it isn't evaluated, it's
+ /// a legal subexpression for an ICE. This return value is used to handle
+ /// the comma operator in C99 mode, and non-constant subexpressions.
+ IK_ICEIfUnevaluated,
+ /// This expression is not an ICE, and is not a legal subexpression for one.
+ IK_NotICE
+};
+
+struct ICEDiag {
+ ICEKind Kind;
+ SourceLocation Loc;
+
+ ICEDiag(ICEKind IK, SourceLocation l) : Kind(IK), Loc(l) {}
+};
+
+}
+
+static ICEDiag NoDiag() { return ICEDiag(IK_ICE, SourceLocation()); }
+
+static ICEDiag Worst(ICEDiag A, ICEDiag B) { return A.Kind >= B.Kind ? A : B; }
+
+static ICEDiag CheckEvalInICE(const Expr* E, const ASTContext &Ctx) {
+ Expr::EvalResult EVResult;
+ if (!E->EvaluateAsRValue(EVResult, Ctx) || EVResult.HasSideEffects ||
+ !EVResult.Val.isInt())
+ return ICEDiag(IK_NotICE, E->getLocStart());
+
+ return NoDiag();
+}
+
+static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
+ assert(!E->isValueDependent() && "Should not see value dependent exprs!");
+ if (!E->getType()->isIntegralOrEnumerationType())
+ return ICEDiag(IK_NotICE, E->getLocStart());
+
+ switch (E->getStmtClass()) {
+#define ABSTRACT_STMT(Node)
+#define STMT(Node, Base) case Expr::Node##Class:
+#define EXPR(Node, Base)
+#include "clang/AST/StmtNodes.inc"
+ case Expr::PredefinedExprClass:
+ case Expr::FloatingLiteralClass:
+ case Expr::ImaginaryLiteralClass:
+ case Expr::StringLiteralClass:
+ case Expr::ArraySubscriptExprClass:
+ case Expr::OMPArraySectionExprClass:
+ case Expr::MemberExprClass:
+ case Expr::CompoundAssignOperatorClass:
+ case Expr::CompoundLiteralExprClass:
+ case Expr::ExtVectorElementExprClass:
+ case Expr::DesignatedInitExprClass:
+ case Expr::NoInitExprClass:
+ case Expr::DesignatedInitUpdateExprClass:
+ case Expr::ImplicitValueInitExprClass:
+ case Expr::ParenListExprClass:
+ case Expr::VAArgExprClass:
+ case Expr::AddrLabelExprClass:
+ case Expr::StmtExprClass:
+ case Expr::CXXMemberCallExprClass:
+ case Expr::CUDAKernelCallExprClass:
+ case Expr::CXXDynamicCastExprClass:
+ case Expr::CXXTypeidExprClass:
+ case Expr::CXXUuidofExprClass:
+ case Expr::MSPropertyRefExprClass:
+ case Expr::MSPropertySubscriptExprClass:
+ case Expr::CXXNullPtrLiteralExprClass:
+ case Expr::UserDefinedLiteralClass:
+ case Expr::CXXThisExprClass:
+ case Expr::CXXThrowExprClass:
+ case Expr::CXXNewExprClass:
+ case Expr::CXXDeleteExprClass:
+ case Expr::CXXPseudoDestructorExprClass:
+ case Expr::UnresolvedLookupExprClass:
+ case Expr::TypoExprClass:
+ case Expr::DependentScopeDeclRefExprClass:
+ case Expr::CXXConstructExprClass:
+ case Expr::CXXStdInitializerListExprClass:
+ case Expr::CXXBindTemporaryExprClass:
+ case Expr::ExprWithCleanupsClass:
+ case Expr::CXXTemporaryObjectExprClass:
+ case Expr::CXXUnresolvedConstructExprClass:
+ case Expr::CXXDependentScopeMemberExprClass:
+ case Expr::UnresolvedMemberExprClass:
+ case Expr::ObjCStringLiteralClass:
+ case Expr::ObjCBoxedExprClass:
+ case Expr::ObjCArrayLiteralClass:
+ case Expr::ObjCDictionaryLiteralClass:
+ case Expr::ObjCEncodeExprClass:
+ case Expr::ObjCMessageExprClass:
+ case Expr::ObjCSelectorExprClass:
+ case Expr::ObjCProtocolExprClass:
+ case Expr::ObjCIvarRefExprClass:
+ case Expr::ObjCPropertyRefExprClass:
+ case Expr::ObjCSubscriptRefExprClass:
+ case Expr::ObjCIsaExprClass:
+ case Expr::ShuffleVectorExprClass:
+ case Expr::ConvertVectorExprClass:
+ case Expr::BlockExprClass:
+ case Expr::NoStmtClass:
+ case Expr::OpaqueValueExprClass:
+ case Expr::PackExpansionExprClass:
+ case Expr::SubstNonTypeTemplateParmPackExprClass:
+ case Expr::FunctionParmPackExprClass:
+ case Expr::AsTypeExprClass:
+ case Expr::ObjCIndirectCopyRestoreExprClass:
+ case Expr::MaterializeTemporaryExprClass:
+ case Expr::PseudoObjectExprClass:
+ case Expr::AtomicExprClass:
+ case Expr::LambdaExprClass:
+ case Expr::CXXFoldExprClass:
+ case Expr::CoawaitExprClass:
+ case Expr::CoyieldExprClass:
+ return ICEDiag(IK_NotICE, E->getLocStart());
+
+ case Expr::InitListExprClass: {
+ // C++03 [dcl.init]p13: If T is a scalar type, then a declaration of the
+ // form "T x = { a };" is equivalent to "T x = a;".
+ // Unless we're initializing a reference, T is a scalar as it is known to be
+ // of integral or enumeration type.
+ if (E->isRValue())
+ if (cast<InitListExpr>(E)->getNumInits() == 1)
+ return CheckICE(cast<InitListExpr>(E)->getInit(0), Ctx);
+ return ICEDiag(IK_NotICE, E->getLocStart());
+ }
+
+ case Expr::SizeOfPackExprClass:
+ case Expr::GNUNullExprClass:
+ // GCC considers the GNU __null value to be an integral constant expression.
+ return NoDiag();
+
+ case Expr::SubstNonTypeTemplateParmExprClass:
+ return
+ CheckICE(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(), Ctx);
+
+ case Expr::ParenExprClass:
+ return CheckICE(cast<ParenExpr>(E)->getSubExpr(), Ctx);
+ case Expr::GenericSelectionExprClass:
+ return CheckICE(cast<GenericSelectionExpr>(E)->getResultExpr(), Ctx);
+ case Expr::IntegerLiteralClass:
+ case Expr::CharacterLiteralClass:
+ case Expr::ObjCBoolLiteralExprClass:
+ case Expr::CXXBoolLiteralExprClass:
+ case Expr::CXXScalarValueInitExprClass:
+ case Expr::TypeTraitExprClass:
+ case Expr::ArrayTypeTraitExprClass:
+ case Expr::ExpressionTraitExprClass:
+ case Expr::CXXNoexceptExprClass:
+ return NoDiag();
+ case Expr::CallExprClass:
+ case Expr::CXXOperatorCallExprClass: {
+ // C99 6.6/3 allows function calls within unevaluated subexpressions of
+ // constant expressions, but they can never be ICEs because an ICE cannot
+ // contain an operand of (pointer to) function type.
+ const CallExpr *CE = cast<CallExpr>(E);
+ if (CE->getBuiltinCallee())
+ return CheckEvalInICE(E, Ctx);
+ return ICEDiag(IK_NotICE, E->getLocStart());
+ }
+ case Expr::DeclRefExprClass: {
+ if (isa<EnumConstantDecl>(cast<DeclRefExpr>(E)->getDecl()))
+ return NoDiag();
+ const ValueDecl *D = dyn_cast<ValueDecl>(cast<DeclRefExpr>(E)->getDecl());
+ if (Ctx.getLangOpts().CPlusPlus &&
+ D && IsConstNonVolatile(D->getType())) {
+ // Parameter variables are never constants. Without this check,
+ // getAnyInitializer() can find a default argument, which leads
+ // to chaos.
+ if (isa<ParmVarDecl>(D))
+ return ICEDiag(IK_NotICE, cast<DeclRefExpr>(E)->getLocation());
+
+ // C++ 7.1.5.1p2
+ // A variable of non-volatile const-qualified integral or enumeration
+ // type initialized by an ICE can be used in ICEs.
+ if (const VarDecl *Dcl = dyn_cast<VarDecl>(D)) {
+ if (!Dcl->getType()->isIntegralOrEnumerationType())
+ return ICEDiag(IK_NotICE, cast<DeclRefExpr>(E)->getLocation());
+
+ const VarDecl *VD;
+ // Look for a declaration of this variable that has an initializer, and
+ // check whether it is an ICE.
+ if (Dcl->getAnyInitializer(VD) && VD->checkInitIsICE())
+ return NoDiag();
+ else
+ return ICEDiag(IK_NotICE, cast<DeclRefExpr>(E)->getLocation());
+ }
+ }
+ return ICEDiag(IK_NotICE, E->getLocStart());
+ }
+ case Expr::UnaryOperatorClass: {
+ const UnaryOperator *Exp = cast<UnaryOperator>(E);
+ switch (Exp->getOpcode()) {
+ case UO_PostInc:
+ case UO_PostDec:
+ case UO_PreInc:
+ case UO_PreDec:
+ case UO_AddrOf:
+ case UO_Deref:
+ case UO_Coawait:
+ // C99 6.6/3 allows increment and decrement within unevaluated
+ // subexpressions of constant expressions, but they can never be ICEs
+ // because an ICE cannot contain an lvalue operand.
+ return ICEDiag(IK_NotICE, E->getLocStart());
+ case UO_Extension:
+ case UO_LNot:
+ case UO_Plus:
+ case UO_Minus:
+ case UO_Not:
+ case UO_Real:
+ case UO_Imag:
+ return CheckICE(Exp->getSubExpr(), Ctx);
+ }
+
+ // OffsetOf falls through here.
+ }
+ case Expr::OffsetOfExprClass: {
+ // Note that per C99, offsetof must be an ICE. And AFAIK, using
+ // EvaluateAsRValue matches the proposed gcc behavior for cases like
+ // "offsetof(struct s{int x[4];}, x[1.0])". This doesn't affect
+ // compliance: we should warn earlier for offsetof expressions with
+ // array subscripts that aren't ICEs, and if the array subscripts
+ // are ICEs, the value of the offsetof must be an integer constant.
+ return CheckEvalInICE(E, Ctx);
+ }
+ case Expr::UnaryExprOrTypeTraitExprClass: {
+ const UnaryExprOrTypeTraitExpr *Exp = cast<UnaryExprOrTypeTraitExpr>(E);
+ if ((Exp->getKind() == UETT_SizeOf) &&
+ Exp->getTypeOfArgument()->isVariableArrayType())
+ return ICEDiag(IK_NotICE, E->getLocStart());
+ return NoDiag();
+ }
+ case Expr::BinaryOperatorClass: {
+ const BinaryOperator *Exp = cast<BinaryOperator>(E);
+ switch (Exp->getOpcode()) {
+ case BO_PtrMemD:
+ case BO_PtrMemI:
+ case BO_Assign:
+ case BO_MulAssign:
+ case BO_DivAssign:
+ case BO_RemAssign:
+ case BO_AddAssign:
+ case BO_SubAssign:
+ case BO_ShlAssign:
+ case BO_ShrAssign:
+ case BO_AndAssign:
+ case BO_XorAssign:
+ case BO_OrAssign:
+ // C99 6.6/3 allows assignments within unevaluated subexpressions of
+ // constant expressions, but they can never be ICEs because an ICE cannot
+ // contain an lvalue operand.
+ return ICEDiag(IK_NotICE, E->getLocStart());
+
+ case BO_Mul:
+ case BO_Div:
+ case BO_Rem:
+ case BO_Add:
+ case BO_Sub:
+ case BO_Shl:
+ case BO_Shr:
+ case BO_LT:
+ case BO_GT:
+ case BO_LE:
+ case BO_GE:
+ case BO_EQ:
+ case BO_NE:
+ case BO_And:
+ case BO_Xor:
+ case BO_Or:
+ case BO_Comma: {
+ ICEDiag LHSResult = CheckICE(Exp->getLHS(), Ctx);
+ ICEDiag RHSResult = CheckICE(Exp->getRHS(), Ctx);
+ if (Exp->getOpcode() == BO_Div ||
+ Exp->getOpcode() == BO_Rem) {
+ // EvaluateAsRValue gives an error for undefined Div/Rem, so make sure
+ // we don't evaluate one.
+ if (LHSResult.Kind == IK_ICE && RHSResult.Kind == IK_ICE) {
+ llvm::APSInt REval = Exp->getRHS()->EvaluateKnownConstInt(Ctx);
+ if (REval == 0)
+ return ICEDiag(IK_ICEIfUnevaluated, E->getLocStart());
+ if (REval.isSigned() && REval.isAllOnesValue()) {
+ llvm::APSInt LEval = Exp->getLHS()->EvaluateKnownConstInt(Ctx);
+ if (LEval.isMinSignedValue())
+ return ICEDiag(IK_ICEIfUnevaluated, E->getLocStart());
+ }
+ }
+ }
+ if (Exp->getOpcode() == BO_Comma) {
+ if (Ctx.getLangOpts().C99) {
+ // C99 6.6p3 introduces a strange edge case: comma can be in an ICE
+ // if it isn't evaluated.
+ if (LHSResult.Kind == IK_ICE && RHSResult.Kind == IK_ICE)
+ return ICEDiag(IK_ICEIfUnevaluated, E->getLocStart());
+ } else {
+ // In both C89 and C++, commas in ICEs are illegal.
+ return ICEDiag(IK_NotICE, E->getLocStart());
+ }
+ }
+ return Worst(LHSResult, RHSResult);
+ }
+ case BO_LAnd:
+ case BO_LOr: {
+ ICEDiag LHSResult = CheckICE(Exp->getLHS(), Ctx);
+ ICEDiag RHSResult = CheckICE(Exp->getRHS(), Ctx);
+ if (LHSResult.Kind == IK_ICE && RHSResult.Kind == IK_ICEIfUnevaluated) {
+ // Rare case where the RHS has a comma "side-effect"; we need
+ // to actually check the condition to see whether the side
+ // with the comma is evaluated.
+ if ((Exp->getOpcode() == BO_LAnd) !=
+ (Exp->getLHS()->EvaluateKnownConstInt(Ctx) == 0))
+ return RHSResult;
+ return NoDiag();
+ }
+
+ return Worst(LHSResult, RHSResult);
+ }
+ }
+ }
+ case Expr::ImplicitCastExprClass:
+ case Expr::CStyleCastExprClass:
+ case Expr::CXXFunctionalCastExprClass:
+ case Expr::CXXStaticCastExprClass:
+ case Expr::CXXReinterpretCastExprClass:
+ case Expr::CXXConstCastExprClass:
+ case Expr::ObjCBridgedCastExprClass: {
+ const Expr *SubExpr = cast<CastExpr>(E)->getSubExpr();
+ if (isa<ExplicitCastExpr>(E)) {
+ if (const FloatingLiteral *FL
+ = dyn_cast<FloatingLiteral>(SubExpr->IgnoreParenImpCasts())) {
+ unsigned DestWidth = Ctx.getIntWidth(E->getType());
+ bool DestSigned = E->getType()->isSignedIntegerOrEnumerationType();
+ APSInt IgnoredVal(DestWidth, !DestSigned);
+ bool Ignored;
+ // If the value does not fit in the destination type, the behavior is
+ // undefined, so we are not required to treat it as a constant
+ // expression.
+ if (FL->getValue().convertToInteger(IgnoredVal,
+ llvm::APFloat::rmTowardZero,
+ &Ignored) & APFloat::opInvalidOp)
+ return ICEDiag(IK_NotICE, E->getLocStart());
+ return NoDiag();
+ }
+ }
+ switch (cast<CastExpr>(E)->getCastKind()) {
+ case CK_LValueToRValue:
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
+ case CK_NoOp:
+ case CK_IntegralToBoolean:
+ case CK_IntegralCast:
+ return CheckICE(SubExpr, Ctx);
+ default:
+ return ICEDiag(IK_NotICE, E->getLocStart());
+ }
+ }
+ case Expr::BinaryConditionalOperatorClass: {
+ const BinaryConditionalOperator *Exp = cast<BinaryConditionalOperator>(E);
+ ICEDiag CommonResult = CheckICE(Exp->getCommon(), Ctx);
+ if (CommonResult.Kind == IK_NotICE) return CommonResult;
+ ICEDiag FalseResult = CheckICE(Exp->getFalseExpr(), Ctx);
+ if (FalseResult.Kind == IK_NotICE) return FalseResult;
+ if (CommonResult.Kind == IK_ICEIfUnevaluated) return CommonResult;
+ if (FalseResult.Kind == IK_ICEIfUnevaluated &&
+ Exp->getCommon()->EvaluateKnownConstInt(Ctx) != 0) return NoDiag();
+ return FalseResult;
+ }
+ case Expr::ConditionalOperatorClass: {
+ const ConditionalOperator *Exp = cast<ConditionalOperator>(E);
+ // If the condition (ignoring parens) is a __builtin_constant_p call,
+ // then only the true side is actually considered in an integer constant
+ // expression, and it is fully evaluated. This is an important GNU
+ // extension. See GCC PR38377 for discussion.
+ if (const CallExpr *CallCE
+ = dyn_cast<CallExpr>(Exp->getCond()->IgnoreParenCasts()))
+ if (CallCE->getBuiltinCallee() == Builtin::BI__builtin_constant_p)
+ return CheckEvalInICE(E, Ctx);
+ ICEDiag CondResult = CheckICE(Exp->getCond(), Ctx);
+ if (CondResult.Kind == IK_NotICE)
+ return CondResult;
+
+ ICEDiag TrueResult = CheckICE(Exp->getTrueExpr(), Ctx);
+ ICEDiag FalseResult = CheckICE(Exp->getFalseExpr(), Ctx);
+
+ if (TrueResult.Kind == IK_NotICE)
+ return TrueResult;
+ if (FalseResult.Kind == IK_NotICE)
+ return FalseResult;
+ if (CondResult.Kind == IK_ICEIfUnevaluated)
+ return CondResult;
+ if (TrueResult.Kind == IK_ICE && FalseResult.Kind == IK_ICE)
+ return NoDiag();
+ // Rare case where the diagnostics depend on which side is evaluated
+ // Note that if we get here, CondResult is 0, and at least one of
+ // TrueResult and FalseResult is non-zero.
+ if (Exp->getCond()->EvaluateKnownConstInt(Ctx) == 0)
+ return FalseResult;
+ return TrueResult;
+ }
+ case Expr::CXXDefaultArgExprClass:
+ return CheckICE(cast<CXXDefaultArgExpr>(E)->getExpr(), Ctx);
+ case Expr::CXXDefaultInitExprClass:
+ return CheckICE(cast<CXXDefaultInitExpr>(E)->getExpr(), Ctx);
+ case Expr::ChooseExprClass: {
+ return CheckICE(cast<ChooseExpr>(E)->getChosenSubExpr(), Ctx);
+ }
+ }
+
+ llvm_unreachable("Invalid StmtClass!");
+}
+
+/// Evaluate an expression as a C++11 integral constant expression.
+static bool EvaluateCPlusPlus11IntegralConstantExpr(const ASTContext &Ctx,
+ const Expr *E,
+ llvm::APSInt *Value,
+ SourceLocation *Loc) {
+ if (!E->getType()->isIntegralOrEnumerationType()) {
+ if (Loc) *Loc = E->getExprLoc();
+ return false;
+ }
+
+ APValue Result;
+ if (!E->isCXX11ConstantExpr(Ctx, &Result, Loc))
+ return false;
+
+ if (!Result.isInt()) {
+ if (Loc) *Loc = E->getExprLoc();
+ return false;
+ }
+
+ if (Value) *Value = Result.getInt();
+ return true;
+}
+
+bool Expr::isIntegerConstantExpr(const ASTContext &Ctx,
+ SourceLocation *Loc) const {
+ if (Ctx.getLangOpts().CPlusPlus11)
+ return EvaluateCPlusPlus11IntegralConstantExpr(Ctx, this, nullptr, Loc);
+
+ ICEDiag D = CheckICE(this, Ctx);
+ if (D.Kind != IK_ICE) {
+ if (Loc) *Loc = D.Loc;
+ return false;
+ }
+ return true;
+}
+
+bool Expr::isIntegerConstantExpr(llvm::APSInt &Value, const ASTContext &Ctx,
+ SourceLocation *Loc, bool isEvaluated) const {
+ if (Ctx.getLangOpts().CPlusPlus11)
+ return EvaluateCPlusPlus11IntegralConstantExpr(Ctx, this, &Value, Loc);
+
+ if (!isIntegerConstantExpr(Ctx, Loc))
+ return false;
+ // The only possible side-effects here are due to UB discovered in the
+ // evaluation (for instance, INT_MAX + 1). In such a case, we are still
+ // required to treat the expression as an ICE, so we produce the folded
+ // value.
+ if (!EvaluateAsInt(Value, Ctx, SE_AllowSideEffects))
+ llvm_unreachable("ICE cannot be evaluated!");
+ return true;
+}
+
+bool Expr::isCXX98IntegralConstantExpr(const ASTContext &Ctx) const {
+ return CheckICE(this, Ctx).Kind == IK_ICE;
+}
+
+bool Expr::isCXX11ConstantExpr(const ASTContext &Ctx, APValue *Result,
+ SourceLocation *Loc) const {
+ // We support this checking in C++98 mode in order to diagnose compatibility
+ // issues.
+ assert(Ctx.getLangOpts().CPlusPlus);
+
+ // Build evaluation settings.
+ Expr::EvalStatus Status;
+ SmallVector<PartialDiagnosticAt, 8> Diags;
+ Status.Diag = &Diags;
+ EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantExpression);
+
+ APValue Scratch;
+ bool IsConstExpr = ::EvaluateAsRValue(Info, this, Result ? *Result : Scratch);
+
+ if (!Diags.empty()) {
+ IsConstExpr = false;
+ if (Loc) *Loc = Diags[0].first;
+ } else if (!IsConstExpr) {
+ // FIXME: This shouldn't happen.
+ if (Loc) *Loc = getExprLoc();
+ }
+
+ return IsConstExpr;
+}
+
+bool Expr::EvaluateWithSubstitution(APValue &Value, ASTContext &Ctx,
+ const FunctionDecl *Callee,
+ ArrayRef<const Expr*> Args) const {
+ Expr::EvalStatus Status;
+ EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantExpressionUnevaluated);
+
+ ArgVector ArgValues(Args.size());
+ for (ArrayRef<const Expr*>::iterator I = Args.begin(), E = Args.end();
+ I != E; ++I) {
+ if ((*I)->isValueDependent() ||
+ !Evaluate(ArgValues[I - Args.begin()], Info, *I))
+ // If evaluation fails, throw away the argument entirely.
+ ArgValues[I - Args.begin()] = APValue();
+ if (Info.EvalStatus.HasSideEffects)
+ return false;
+ }
+
+ // Build fake call to Callee.
+ CallStackFrame Frame(Info, Callee->getLocation(), Callee, /*This*/nullptr,
+ ArgValues.data());
+ return Evaluate(Value, Info, this) && !Info.EvalStatus.HasSideEffects;
+}
+
+bool Expr::isPotentialConstantExpr(const FunctionDecl *FD,
+ SmallVectorImpl<
+ PartialDiagnosticAt> &Diags) {
+ // FIXME: It would be useful to check constexpr function templates, but at the
+ // moment the constant expression evaluator cannot cope with the non-rigorous
+ // ASTs which we build for dependent expressions.
+ if (FD->isDependentContext())
+ return true;
+
+ Expr::EvalStatus Status;
+ Status.Diag = &Diags;
+
+ EvalInfo Info(FD->getASTContext(), Status,
+ EvalInfo::EM_PotentialConstantExpression);
+
+ const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
+ const CXXRecordDecl *RD = MD ? MD->getParent()->getCanonicalDecl() : nullptr;
+
+ // Fabricate an arbitrary expression on the stack and pretend that it
+ // is a temporary being used as the 'this' pointer.
+ LValue This;
+ ImplicitValueInitExpr VIE(RD ? Info.Ctx.getRecordType(RD) : Info.Ctx.IntTy);
+ This.set(&VIE, Info.CurrentCall->Index);
+
+ ArrayRef<const Expr*> Args;
+
+ SourceLocation Loc = FD->getLocation();
+
+ APValue Scratch;
+ if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) {
+ // Evaluate the call as a constant initializer, to allow the construction
+ // of objects of non-literal types.
+ Info.setEvaluatingDecl(This.getLValueBase(), Scratch);
+ HandleConstructorCall(Loc, This, Args, CD, Info, Scratch);
+ } else
+ HandleFunctionCall(Loc, FD, (MD && MD->isInstance()) ? &This : nullptr,
+ Args, FD->getBody(), Info, Scratch, nullptr);
+
+ return Diags.empty();
+}
+
+bool Expr::isPotentialConstantExprUnevaluated(Expr *E,
+ const FunctionDecl *FD,
+ SmallVectorImpl<
+ PartialDiagnosticAt> &Diags) {
+ Expr::EvalStatus Status;
+ Status.Diag = &Diags;
+
+ EvalInfo Info(FD->getASTContext(), Status,
+ EvalInfo::EM_PotentialConstantExpressionUnevaluated);
+
+ // Fabricate a call stack frame to give the arguments a plausible cover story.
+ ArrayRef<const Expr*> Args;
+ ArgVector ArgValues(0);
+ bool Success = EvaluateArgs(Args, ArgValues, Info);
+ (void)Success;
+ assert(Success &&
+ "Failed to set up arguments for potential constant evaluation");
+ CallStackFrame Frame(Info, SourceLocation(), FD, nullptr, ArgValues.data());
+
+ APValue ResultScratch;
+ Evaluate(ResultScratch, Info, E);
+ return Diags.empty();
+}
+
+bool Expr::tryEvaluateObjectSize(uint64_t &Result, ASTContext &Ctx,
+ unsigned Type) const {
+ if (!getType()->isPointerType())
+ return false;
+
+ Expr::EvalStatus Status;
+ EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantFold);
+ return ::tryEvaluateBuiltinObjectSize(this, Type, Info, Result);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/ExprObjC.cpp b/contrib/llvm/tools/clang/lib/AST/ExprObjC.cpp
new file mode 100644
index 0000000..0936a81
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/ExprObjC.cpp
@@ -0,0 +1,360 @@
+//===--- ExprObjC.cpp - (ObjC) Expression AST Node Implementation ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the subclesses of Expr class declared in ExprObjC.h
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ExprObjC.h"
+
+#include "clang/AST/ASTContext.h"
+
+using namespace clang;
+
+ObjCArrayLiteral::ObjCArrayLiteral(ArrayRef<Expr *> Elements, QualType T,
+ ObjCMethodDecl *Method, SourceRange SR)
+ : Expr(ObjCArrayLiteralClass, T, VK_RValue, OK_Ordinary, false, false,
+ false, false),
+ NumElements(Elements.size()), Range(SR), ArrayWithObjectsMethod(Method) {
+ Expr **SaveElements = getElements();
+ for (unsigned I = 0, N = Elements.size(); I != N; ++I) {
+ if (Elements[I]->isTypeDependent() || Elements[I]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (Elements[I]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (Elements[I]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ SaveElements[I] = Elements[I];
+ }
+}
+
+ObjCArrayLiteral *ObjCArrayLiteral::Create(const ASTContext &C,
+ ArrayRef<Expr *> Elements,
+ QualType T, ObjCMethodDecl *Method,
+ SourceRange SR) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(Elements.size()));
+ return new (Mem) ObjCArrayLiteral(Elements, T, Method, SR);
+}
+
+ObjCArrayLiteral *ObjCArrayLiteral::CreateEmpty(const ASTContext &C,
+ unsigned NumElements) {
+
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(NumElements));
+ return new (Mem) ObjCArrayLiteral(EmptyShell(), NumElements);
+}
+
+ObjCDictionaryLiteral::ObjCDictionaryLiteral(ArrayRef<ObjCDictionaryElement> VK,
+ bool HasPackExpansions, QualType T,
+ ObjCMethodDecl *method,
+ SourceRange SR)
+ : Expr(ObjCDictionaryLiteralClass, T, VK_RValue, OK_Ordinary, false, false,
+ false, false),
+ NumElements(VK.size()), HasPackExpansions(HasPackExpansions), Range(SR),
+ DictWithObjectsMethod(method) {
+ KeyValuePair *KeyValues = getTrailingObjects<KeyValuePair>();
+ ExpansionData *Expansions =
+ HasPackExpansions ? getTrailingObjects<ExpansionData>() : nullptr;
+ for (unsigned I = 0; I < NumElements; I++) {
+ if (VK[I].Key->isTypeDependent() || VK[I].Key->isValueDependent() ||
+ VK[I].Value->isTypeDependent() || VK[I].Value->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (VK[I].Key->isInstantiationDependent() ||
+ VK[I].Value->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (VK[I].EllipsisLoc.isInvalid() &&
+ (VK[I].Key->containsUnexpandedParameterPack() ||
+ VK[I].Value->containsUnexpandedParameterPack()))
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ KeyValues[I].Key = VK[I].Key;
+ KeyValues[I].Value = VK[I].Value;
+ if (Expansions) {
+ Expansions[I].EllipsisLoc = VK[I].EllipsisLoc;
+ if (VK[I].NumExpansions)
+ Expansions[I].NumExpansionsPlusOne = *VK[I].NumExpansions + 1;
+ else
+ Expansions[I].NumExpansionsPlusOne = 0;
+ }
+ }
+}
+
+ObjCDictionaryLiteral *
+ObjCDictionaryLiteral::Create(const ASTContext &C,
+ ArrayRef<ObjCDictionaryElement> VK,
+ bool HasPackExpansions, QualType T,
+ ObjCMethodDecl *method, SourceRange SR) {
+ void *Mem = C.Allocate(totalSizeToAlloc<KeyValuePair, ExpansionData>(
+ VK.size(), HasPackExpansions ? VK.size() : 0));
+ return new (Mem) ObjCDictionaryLiteral(VK, HasPackExpansions, T, method, SR);
+}
+
+ObjCDictionaryLiteral *
+ObjCDictionaryLiteral::CreateEmpty(const ASTContext &C, unsigned NumElements,
+ bool HasPackExpansions) {
+ void *Mem = C.Allocate(totalSizeToAlloc<KeyValuePair, ExpansionData>(
+ NumElements, HasPackExpansions ? NumElements : 0));
+ return new (Mem)
+ ObjCDictionaryLiteral(EmptyShell(), NumElements, HasPackExpansions);
+}
+
+QualType ObjCPropertyRefExpr::getReceiverType(const ASTContext &ctx) const {
+ if (isClassReceiver())
+ return ctx.getObjCInterfaceType(getClassReceiver());
+
+ if (isSuperReceiver())
+ return getSuperReceiverType();
+
+ return getBase()->getType();
+}
+
+ObjCMessageExpr::ObjCMessageExpr(QualType T, ExprValueKind VK,
+ SourceLocation LBracLoc,
+ SourceLocation SuperLoc, bool IsInstanceSuper,
+ QualType SuperType, Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ SelectorLocationsKind SelLocsK,
+ ObjCMethodDecl *Method, ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc, bool isImplicit)
+ : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary,
+ /*TypeDependent=*/false, /*ValueDependent=*/false,
+ /*InstantiationDependent=*/false,
+ /*ContainsUnexpandedParameterPack=*/false),
+ SelectorOrMethod(
+ reinterpret_cast<uintptr_t>(Method ? Method : Sel.getAsOpaquePtr())),
+ Kind(IsInstanceSuper ? SuperInstance : SuperClass),
+ HasMethod(Method != nullptr), IsDelegateInitCall(false),
+ IsImplicit(isImplicit), SuperLoc(SuperLoc), LBracLoc(LBracLoc),
+ RBracLoc(RBracLoc) {
+ initArgsAndSelLocs(Args, SelLocs, SelLocsK);
+ setReceiverPointer(SuperType.getAsOpaquePtr());
+}
+
+ObjCMessageExpr::ObjCMessageExpr(QualType T, ExprValueKind VK,
+ SourceLocation LBracLoc,
+ TypeSourceInfo *Receiver, Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ SelectorLocationsKind SelLocsK,
+ ObjCMethodDecl *Method, ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc, bool isImplicit)
+ : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary, T->isDependentType(),
+ T->isDependentType(), T->isInstantiationDependentType(),
+ T->containsUnexpandedParameterPack()),
+ SelectorOrMethod(
+ reinterpret_cast<uintptr_t>(Method ? Method : Sel.getAsOpaquePtr())),
+ Kind(Class), HasMethod(Method != nullptr), IsDelegateInitCall(false),
+ IsImplicit(isImplicit), LBracLoc(LBracLoc), RBracLoc(RBracLoc) {
+ initArgsAndSelLocs(Args, SelLocs, SelLocsK);
+ setReceiverPointer(Receiver);
+}
+
+ObjCMessageExpr::ObjCMessageExpr(QualType T, ExprValueKind VK,
+ SourceLocation LBracLoc, Expr *Receiver,
+ Selector Sel, ArrayRef<SourceLocation> SelLocs,
+ SelectorLocationsKind SelLocsK,
+ ObjCMethodDecl *Method, ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc, bool isImplicit)
+ : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary,
+ Receiver->isTypeDependent(), Receiver->isTypeDependent(),
+ Receiver->isInstantiationDependent(),
+ Receiver->containsUnexpandedParameterPack()),
+ SelectorOrMethod(
+ reinterpret_cast<uintptr_t>(Method ? Method : Sel.getAsOpaquePtr())),
+ Kind(Instance), HasMethod(Method != nullptr), IsDelegateInitCall(false),
+ IsImplicit(isImplicit), LBracLoc(LBracLoc), RBracLoc(RBracLoc) {
+ initArgsAndSelLocs(Args, SelLocs, SelLocsK);
+ setReceiverPointer(Receiver);
+}
+
+void ObjCMessageExpr::initArgsAndSelLocs(ArrayRef<Expr *> Args,
+ ArrayRef<SourceLocation> SelLocs,
+ SelectorLocationsKind SelLocsK) {
+ setNumArgs(Args.size());
+ Expr **MyArgs = getArgs();
+ for (unsigned I = 0; I != Args.size(); ++I) {
+ if (Args[I]->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (Args[I]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (Args[I]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (Args[I]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ MyArgs[I] = Args[I];
+ }
+
+ SelLocsKind = SelLocsK;
+ if (!isImplicit()) {
+ if (SelLocsK == SelLoc_NonStandard)
+ std::copy(SelLocs.begin(), SelLocs.end(), getStoredSelLocs());
+ }
+}
+
+ObjCMessageExpr *
+ObjCMessageExpr::Create(const ASTContext &Context, QualType T, ExprValueKind VK,
+ SourceLocation LBracLoc, SourceLocation SuperLoc,
+ bool IsInstanceSuper, QualType SuperType, Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ ObjCMethodDecl *Method, ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc, bool isImplicit) {
+ assert((!SelLocs.empty() || isImplicit) &&
+ "No selector locs for non-implicit message");
+ ObjCMessageExpr *Mem;
+ SelectorLocationsKind SelLocsK = SelectorLocationsKind();
+ if (isImplicit)
+ Mem = alloc(Context, Args.size(), 0);
+ else
+ Mem = alloc(Context, Args, RBracLoc, SelLocs, Sel, SelLocsK);
+ return new (Mem) ObjCMessageExpr(T, VK, LBracLoc, SuperLoc, IsInstanceSuper,
+ SuperType, Sel, SelLocs, SelLocsK, Method,
+ Args, RBracLoc, isImplicit);
+}
+
+ObjCMessageExpr *
+ObjCMessageExpr::Create(const ASTContext &Context, QualType T, ExprValueKind VK,
+ SourceLocation LBracLoc, TypeSourceInfo *Receiver,
+ Selector Sel, ArrayRef<SourceLocation> SelLocs,
+ ObjCMethodDecl *Method, ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc, bool isImplicit) {
+ assert((!SelLocs.empty() || isImplicit) &&
+ "No selector locs for non-implicit message");
+ ObjCMessageExpr *Mem;
+ SelectorLocationsKind SelLocsK = SelectorLocationsKind();
+ if (isImplicit)
+ Mem = alloc(Context, Args.size(), 0);
+ else
+ Mem = alloc(Context, Args, RBracLoc, SelLocs, Sel, SelLocsK);
+ return new (Mem)
+ ObjCMessageExpr(T, VK, LBracLoc, Receiver, Sel, SelLocs, SelLocsK, Method,
+ Args, RBracLoc, isImplicit);
+}
+
+ObjCMessageExpr *
+ObjCMessageExpr::Create(const ASTContext &Context, QualType T, ExprValueKind VK,
+ SourceLocation LBracLoc, Expr *Receiver, Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ ObjCMethodDecl *Method, ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc, bool isImplicit) {
+ assert((!SelLocs.empty() || isImplicit) &&
+ "No selector locs for non-implicit message");
+ ObjCMessageExpr *Mem;
+ SelectorLocationsKind SelLocsK = SelectorLocationsKind();
+ if (isImplicit)
+ Mem = alloc(Context, Args.size(), 0);
+ else
+ Mem = alloc(Context, Args, RBracLoc, SelLocs, Sel, SelLocsK);
+ return new (Mem)
+ ObjCMessageExpr(T, VK, LBracLoc, Receiver, Sel, SelLocs, SelLocsK, Method,
+ Args, RBracLoc, isImplicit);
+}
+
+ObjCMessageExpr *ObjCMessageExpr::CreateEmpty(const ASTContext &Context,
+ unsigned NumArgs,
+ unsigned NumStoredSelLocs) {
+ ObjCMessageExpr *Mem = alloc(Context, NumArgs, NumStoredSelLocs);
+ return new (Mem) ObjCMessageExpr(EmptyShell(), NumArgs);
+}
+
+ObjCMessageExpr *ObjCMessageExpr::alloc(const ASTContext &C,
+ ArrayRef<Expr *> Args,
+ SourceLocation RBraceLoc,
+ ArrayRef<SourceLocation> SelLocs,
+ Selector Sel,
+ SelectorLocationsKind &SelLocsK) {
+ SelLocsK = hasStandardSelectorLocs(Sel, SelLocs, Args, RBraceLoc);
+ unsigned NumStoredSelLocs =
+ (SelLocsK == SelLoc_NonStandard) ? SelLocs.size() : 0;
+ return alloc(C, Args.size(), NumStoredSelLocs);
+}
+
+ObjCMessageExpr *ObjCMessageExpr::alloc(const ASTContext &C, unsigned NumArgs,
+ unsigned NumStoredSelLocs) {
+ return (ObjCMessageExpr *)C.Allocate(
+ totalSizeToAlloc<void *, SourceLocation>(NumArgs + 1, NumStoredSelLocs),
+ llvm::AlignOf<ObjCMessageExpr>::Alignment);
+}
+
+void ObjCMessageExpr::getSelectorLocs(
+ SmallVectorImpl<SourceLocation> &SelLocs) const {
+ for (unsigned i = 0, e = getNumSelectorLocs(); i != e; ++i)
+ SelLocs.push_back(getSelectorLoc(i));
+}
+
+SourceRange ObjCMessageExpr::getReceiverRange() const {
+ switch (getReceiverKind()) {
+ case Instance:
+ return getInstanceReceiver()->getSourceRange();
+
+ case Class:
+ return getClassReceiverTypeInfo()->getTypeLoc().getSourceRange();
+
+ case SuperInstance:
+ case SuperClass:
+ return getSuperLoc();
+ }
+
+ llvm_unreachable("Invalid ReceiverKind!");
+}
+
+Selector ObjCMessageExpr::getSelector() const {
+ if (HasMethod)
+ return reinterpret_cast<const ObjCMethodDecl *>(SelectorOrMethod)
+ ->getSelector();
+ return Selector(SelectorOrMethod);
+}
+
+QualType ObjCMessageExpr::getReceiverType() const {
+ switch (getReceiverKind()) {
+ case Instance:
+ return getInstanceReceiver()->getType();
+ case Class:
+ return getClassReceiver();
+ case SuperInstance:
+ case SuperClass:
+ return getSuperType();
+ }
+
+ llvm_unreachable("unexpected receiver kind");
+}
+
+ObjCInterfaceDecl *ObjCMessageExpr::getReceiverInterface() const {
+ QualType T = getReceiverType();
+
+ if (const ObjCObjectPointerType *Ptr = T->getAs<ObjCObjectPointerType>())
+ return Ptr->getInterfaceDecl();
+
+ if (const ObjCObjectType *Ty = T->getAs<ObjCObjectType>())
+ return Ty->getInterface();
+
+ return nullptr;
+}
+
+Stmt::child_range ObjCMessageExpr::children() {
+ Stmt **begin;
+ if (getReceiverKind() == Instance)
+ begin = reinterpret_cast<Stmt **>(getTrailingObjects<void *>());
+ else
+ begin = reinterpret_cast<Stmt **>(getArgs());
+ return child_range(begin,
+ reinterpret_cast<Stmt **>(getArgs() + getNumArgs()));
+}
+
+StringRef ObjCBridgedCastExpr::getBridgeKindName() const {
+ switch (getBridgeKind()) {
+ case OBC_Bridge:
+ return "__bridge";
+ case OBC_BridgeTransfer:
+ return "__bridge_transfer";
+ case OBC_BridgeRetained:
+ return "__bridge_retained";
+ }
+
+ llvm_unreachable("Invalid BridgeKind!");
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/ExternalASTSource.cpp b/contrib/llvm/tools/clang/lib/AST/ExternalASTSource.cpp
new file mode 100644
index 0000000..e3de8c5
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/ExternalASTSource.cpp
@@ -0,0 +1,130 @@
+//===- ExternalASTSource.cpp - Abstract External AST Interface --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the default implementation of the ExternalASTSource
+// interface, which enables construction of AST nodes from some external
+// source.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ExternalASTSource.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclarationName.h"
+#include "clang/Basic/Module.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace clang;
+
+ExternalASTSource::~ExternalASTSource() { }
+
+llvm::Optional<ExternalASTSource::ASTSourceDescriptor>
+ExternalASTSource::getSourceDescriptor(unsigned ID) {
+ return None;
+}
+
+ExternalASTSource::ASTSourceDescriptor::ASTSourceDescriptor(const Module &M)
+ : Signature(M.Signature), ClangModule(&M) {
+ if (M.Directory)
+ Path = M.Directory->getName();
+ if (auto *File = M.getASTFile())
+ ASTFile = File->getName();
+}
+
+std::string ExternalASTSource::ASTSourceDescriptor::getModuleName() const {
+ if (ClangModule)
+ return ClangModule->Name;
+ else
+ return PCHModuleName;
+}
+
+void ExternalASTSource::FindFileRegionDecls(FileID File, unsigned Offset,
+ unsigned Length,
+ SmallVectorImpl<Decl *> &Decls) {}
+
+void ExternalASTSource::CompleteRedeclChain(const Decl *D) {}
+
+void ExternalASTSource::CompleteType(TagDecl *Tag) {}
+
+void ExternalASTSource::CompleteType(ObjCInterfaceDecl *Class) {}
+
+void ExternalASTSource::ReadComments() {}
+
+void ExternalASTSource::StartedDeserializing() {}
+
+void ExternalASTSource::FinishedDeserializing() {}
+
+void ExternalASTSource::StartTranslationUnit(ASTConsumer *Consumer) {}
+
+void ExternalASTSource::PrintStats() { }
+
+bool ExternalASTSource::layoutRecordType(
+ const RecordDecl *Record, uint64_t &Size, uint64_t &Alignment,
+ llvm::DenseMap<const FieldDecl *, uint64_t> &FieldOffsets,
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> &BaseOffsets,
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> &VirtualBaseOffsets) {
+ return false;
+}
+
+Decl *ExternalASTSource::GetExternalDecl(uint32_t ID) {
+ return nullptr;
+}
+
+Selector ExternalASTSource::GetExternalSelector(uint32_t ID) {
+ return Selector();
+}
+
+uint32_t ExternalASTSource::GetNumExternalSelectors() {
+ return 0;
+}
+
+Stmt *ExternalASTSource::GetExternalDeclStmt(uint64_t Offset) {
+ return nullptr;
+}
+
+CXXCtorInitializer **
+ExternalASTSource::GetExternalCXXCtorInitializers(uint64_t Offset) {
+ return nullptr;
+}
+
+CXXBaseSpecifier *
+ExternalASTSource::GetExternalCXXBaseSpecifiers(uint64_t Offset) {
+ return nullptr;
+}
+
+bool
+ExternalASTSource::FindExternalVisibleDeclsByName(const DeclContext *DC,
+ DeclarationName Name) {
+ return false;
+}
+
+void ExternalASTSource::completeVisibleDeclsMap(const DeclContext *DC) {}
+
+void ExternalASTSource::FindExternalLexicalDecls(
+ const DeclContext *DC, llvm::function_ref<bool(Decl::Kind)> IsKindWeWant,
+ SmallVectorImpl<Decl *> &Result) {}
+
+void ExternalASTSource::getMemoryBufferSizes(MemoryBufferSizes &sizes) const {}
+
+uint32_t ExternalASTSource::incrementGeneration(ASTContext &C) {
+ uint32_t OldGeneration = CurrentGeneration;
+
+ // Make sure the generation of the topmost external source for the context is
+ // incremented. That might not be us.
+ auto *P = C.getExternalSource();
+ if (P && P != this)
+ CurrentGeneration = P->incrementGeneration(C);
+ else {
+ // FIXME: Only bump the generation counter if the current generation number
+ // has been observed?
+ if (!++CurrentGeneration)
+ llvm::report_fatal_error("generation counter overflowed", false);
+ }
+
+ return OldGeneration;
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/InheritViz.cpp b/contrib/llvm/tools/clang/lib/AST/InheritViz.cpp
new file mode 100644
index 0000000..0b82da1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/InheritViz.cpp
@@ -0,0 +1,160 @@
+//===- InheritViz.cpp - Graphviz visualization for inheritance --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements CXXRecordDecl::viewInheritance, which
+// generates a GraphViz DOT file that depicts the class inheritance
+// diagram and then calls Graphviz/dot+gv on it.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/TypeOrdering.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/GraphWriter.h"
+#include "llvm/Support/raw_ostream.h"
+#include <map>
+#include <set>
+using namespace clang;
+
+namespace {
+/// InheritanceHierarchyWriter - Helper class that writes out a
+/// GraphViz file that diagrams the inheritance hierarchy starting at
+/// a given C++ class type. Note that we do not use LLVM's
+/// GraphWriter, because the interface does not permit us to properly
+/// differentiate between uses of types as virtual bases
+/// vs. non-virtual bases.
+class InheritanceHierarchyWriter {
+ ASTContext& Context;
+ raw_ostream &Out;
+ std::map<QualType, int, QualTypeOrdering> DirectBaseCount;
+ std::set<QualType, QualTypeOrdering> KnownVirtualBases;
+
+public:
+ InheritanceHierarchyWriter(ASTContext& Context, raw_ostream& Out)
+ : Context(Context), Out(Out) { }
+
+ void WriteGraph(QualType Type) {
+ Out << "digraph \"" << llvm::DOT::EscapeString(Type.getAsString())
+ << "\" {\n";
+ WriteNode(Type, false);
+ Out << "}\n";
+ }
+
+protected:
+ /// WriteNode - Write out the description of node in the inheritance
+ /// diagram, which may be a base class or it may be the root node.
+ void WriteNode(QualType Type, bool FromVirtual);
+
+ /// WriteNodeReference - Write out a reference to the given node,
+ /// using a unique identifier for each direct base and for the
+ /// (only) virtual base.
+ raw_ostream& WriteNodeReference(QualType Type, bool FromVirtual);
+};
+} // namespace
+
+void InheritanceHierarchyWriter::WriteNode(QualType Type, bool FromVirtual) {
+ QualType CanonType = Context.getCanonicalType(Type);
+
+ if (FromVirtual) {
+ if (KnownVirtualBases.find(CanonType) != KnownVirtualBases.end())
+ return;
+
+ // We haven't seen this virtual base before, so display it and
+ // its bases.
+ KnownVirtualBases.insert(CanonType);
+ }
+
+ // Declare the node itself.
+ Out << " ";
+ WriteNodeReference(Type, FromVirtual);
+
+ // Give the node a label based on the name of the class.
+ std::string TypeName = Type.getAsString();
+ Out << " [ shape=\"box\", label=\"" << llvm::DOT::EscapeString(TypeName);
+
+ // If the name of the class was a typedef or something different
+ // from the "real" class name, show the real class name in
+ // parentheses so we don't confuse ourselves.
+ if (TypeName != CanonType.getAsString()) {
+ Out << "\\n(" << CanonType.getAsString() << ")";
+ }
+
+ // Finished describing the node.
+ Out << " \"];\n";
+
+ // Display the base classes.
+ const CXXRecordDecl *Decl
+ = static_cast<const CXXRecordDecl *>(Type->getAs<RecordType>()->getDecl());
+ for (const auto &Base : Decl->bases()) {
+ QualType CanonBaseType = Context.getCanonicalType(Base.getType());
+
+ // If this is not virtual inheritance, bump the direct base
+ // count for the type.
+ if (!Base.isVirtual())
+ ++DirectBaseCount[CanonBaseType];
+
+ // Write out the node (if we need to).
+ WriteNode(Base.getType(), Base.isVirtual());
+
+ // Write out the edge.
+ Out << " ";
+ WriteNodeReference(Type, FromVirtual);
+ Out << " -> ";
+ WriteNodeReference(Base.getType(), Base.isVirtual());
+
+ // Write out edge attributes to show the kind of inheritance.
+ if (Base.isVirtual()) {
+ Out << " [ style=\"dashed\" ]";
+ }
+ Out << ";";
+ }
+}
+
+/// WriteNodeReference - Write out a reference to the given node,
+/// using a unique identifier for each direct base and for the
+/// (only) virtual base.
+raw_ostream&
+InheritanceHierarchyWriter::WriteNodeReference(QualType Type,
+ bool FromVirtual) {
+ QualType CanonType = Context.getCanonicalType(Type);
+
+ Out << "Class_" << CanonType.getAsOpaquePtr();
+ if (!FromVirtual)
+ Out << "_" << DirectBaseCount[CanonType];
+ return Out;
+}
+
+/// viewInheritance - Display the inheritance hierarchy of this C++
+/// class using GraphViz.
+void CXXRecordDecl::viewInheritance(ASTContext& Context) const {
+ QualType Self = Context.getTypeDeclType(this);
+
+ int FD;
+ SmallString<128> Filename;
+ if (std::error_code EC = llvm::sys::fs::createTemporaryFile(
+ Self.getAsString(), "dot", FD, Filename)) {
+ llvm::errs() << "Error: " << EC.message() << "\n";
+ return;
+ }
+
+ llvm::errs() << "Writing '" << Filename << "'... ";
+
+ llvm::raw_fd_ostream O(FD, true);
+
+ InheritanceHierarchyWriter Writer(Context, O);
+ Writer.WriteGraph(Self);
+ llvm::errs() << " done. \n";
+
+ O.close();
+
+ // Display the graph
+ DisplayGraph(Filename);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/ItaniumCXXABI.cpp b/contrib/llvm/tools/clang/lib/AST/ItaniumCXXABI.cpp
new file mode 100644
index 0000000..8a2cc0f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/ItaniumCXXABI.cpp
@@ -0,0 +1,174 @@
+//===------- ItaniumCXXABI.cpp - AST support for the Itanium C++ ABI ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides C++ AST support targeting the Itanium C++ ABI, which is
+// documented at:
+// http://www.codesourcery.com/public/cxx-abi/abi.html
+// http://www.codesourcery.com/public/cxx-abi/abi-eh.html
+//
+// It also supports the closely-related ARM C++ ABI, documented at:
+// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf
+//
+//===----------------------------------------------------------------------===//
+
+#include "CXXABI.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/MangleNumberingContext.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/TargetInfo.h"
+
+using namespace clang;
+
+namespace {
+
+/// According to Itanium C++ ABI 5.1.2:
+/// the name of an anonymous union is considered to be
+/// the name of the first named data member found by a pre-order,
+/// depth-first, declaration-order walk of the data members of
+/// the anonymous union.
+/// If there is no such data member (i.e., if all of the data members
+/// in the union are unnamed), then there is no way for a program to
+/// refer to the anonymous union, and there is therefore no need to mangle its name.
+///
+/// Returns the name of anonymous union VarDecl or nullptr if it is not found.
+static const IdentifierInfo *findAnonymousUnionVarDeclName(const VarDecl& VD) {
+ const RecordType *RT = VD.getType()->getAs<RecordType>();
+ assert(RT && "type of VarDecl is expected to be RecordType.");
+ assert(RT->getDecl()->isUnion() && "RecordType is expected to be a union.");
+ if (const FieldDecl *FD = RT->getDecl()->findFirstNamedDataMember()) {
+ return FD->getIdentifier();
+ }
+
+ return nullptr;
+}
+
+/// \brief Keeps track of the mangled names of lambda expressions and block
+/// literals within a particular context.
+class ItaniumNumberingContext : public MangleNumberingContext {
+ llvm::DenseMap<const Type *, unsigned> ManglingNumbers;
+ llvm::DenseMap<const IdentifierInfo *, unsigned> VarManglingNumbers;
+ llvm::DenseMap<const IdentifierInfo *, unsigned> TagManglingNumbers;
+
+public:
+ unsigned getManglingNumber(const CXXMethodDecl *CallOperator) override {
+ const FunctionProtoType *Proto =
+ CallOperator->getType()->getAs<FunctionProtoType>();
+ ASTContext &Context = CallOperator->getASTContext();
+
+ QualType Key =
+ Context.getFunctionType(Context.VoidTy, Proto->getParamTypes(),
+ FunctionProtoType::ExtProtoInfo());
+ Key = Context.getCanonicalType(Key);
+ return ++ManglingNumbers[Key->castAs<FunctionProtoType>()];
+ }
+
+ unsigned getManglingNumber(const BlockDecl *BD) override {
+ const Type *Ty = nullptr;
+ return ++ManglingNumbers[Ty];
+ }
+
+ unsigned getStaticLocalNumber(const VarDecl *VD) override {
+ return 0;
+ }
+
+ /// Variable decls are numbered by identifier.
+ unsigned getManglingNumber(const VarDecl *VD, unsigned) override {
+ const IdentifierInfo *Identifier = VD->getIdentifier();
+ if (!Identifier) {
+ // VarDecl without an identifier represents an anonymous union declaration.
+ Identifier = findAnonymousUnionVarDeclName(*VD);
+ }
+ return ++VarManglingNumbers[Identifier];
+ }
+
+ unsigned getManglingNumber(const TagDecl *TD, unsigned) override {
+ return ++TagManglingNumbers[TD->getIdentifier()];
+ }
+};
+
+class ItaniumCXXABI : public CXXABI {
+protected:
+ ASTContext &Context;
+public:
+ ItaniumCXXABI(ASTContext &Ctx) : Context(Ctx) { }
+
+ std::pair<uint64_t, unsigned>
+ getMemberPointerWidthAndAlign(const MemberPointerType *MPT) const override {
+ const TargetInfo &Target = Context.getTargetInfo();
+ TargetInfo::IntType PtrDiff = Target.getPtrDiffType(0);
+ uint64_t Width = Target.getTypeWidth(PtrDiff);
+ unsigned Align = Target.getTypeAlign(PtrDiff);
+ if (MPT->isMemberFunctionPointer())
+ Width = 2 * Width;
+ return std::make_pair(Width, Align);
+ }
+
+ CallingConv getDefaultMethodCallConv(bool isVariadic) const override {
+ const llvm::Triple &T = Context.getTargetInfo().getTriple();
+ if (!isVariadic && T.isWindowsGNUEnvironment() &&
+ T.getArch() == llvm::Triple::x86)
+ return CC_X86ThisCall;
+ return CC_C;
+ }
+
+ // We cheat and just check that the class has a vtable pointer, and that it's
+ // only big enough to have a vtable pointer and nothing more (or less).
+ bool isNearlyEmpty(const CXXRecordDecl *RD) const override {
+
+ // Check that the class has a vtable pointer.
+ if (!RD->isDynamicClass())
+ return false;
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ CharUnits PointerSize =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
+ return Layout.getNonVirtualSize() == PointerSize;
+ }
+
+ const CXXConstructorDecl *
+ getCopyConstructorForExceptionObject(CXXRecordDecl *RD) override {
+ return nullptr;
+ }
+
+ void addCopyConstructorForExceptionObject(CXXRecordDecl *RD,
+ CXXConstructorDecl *CD) override {}
+
+ void addDefaultArgExprForConstructor(const CXXConstructorDecl *CD,
+ unsigned ParmIdx, Expr *DAE) override {}
+
+ Expr *getDefaultArgExprForConstructor(const CXXConstructorDecl *CD,
+ unsigned ParmIdx) override {
+ return nullptr;
+ }
+
+ void addTypedefNameForUnnamedTagDecl(TagDecl *TD,
+ TypedefNameDecl *DD) override {}
+
+ TypedefNameDecl *getTypedefNameForUnnamedTagDecl(const TagDecl *TD) override {
+ return nullptr;
+ }
+
+ void addDeclaratorForUnnamedTagDecl(TagDecl *TD,
+ DeclaratorDecl *DD) override {}
+
+ DeclaratorDecl *getDeclaratorForUnnamedTagDecl(const TagDecl *TD) override {
+ return nullptr;
+ }
+
+ MangleNumberingContext *createMangleNumberingContext() const override {
+ return new ItaniumNumberingContext();
+ }
+};
+}
+
+CXXABI *clang::CreateItaniumCXXABI(ASTContext &Ctx) {
+ return new ItaniumCXXABI(Ctx);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp b/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp
new file mode 100644
index 0000000..8018188
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp
@@ -0,0 +1,4244 @@
+//===--- ItaniumMangle.cpp - Itanium C++ Name Mangling ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements C++ name mangling according to the Itanium C++ ABI,
+// which is used in GCC 3.2 and newer (and many compilers that are
+// ABI-compatible with GCC):
+//
+// http://mentorembedded.github.io/cxx-abi/abi.html#mangling
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/Mangle.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Basic/ABI.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define MANGLE_CHECKER 0
+
+#if MANGLE_CHECKER
+#include <cxxabi.h>
+#endif
+
+using namespace clang;
+
+namespace {
+
+/// Retrieve the declaration context that should be used when mangling the given
+/// declaration.
+static const DeclContext *getEffectiveDeclContext(const Decl *D) {
+ // The ABI assumes that lambda closure types that occur within
+ // default arguments live in the context of the function. However, due to
+ // the way in which Clang parses and creates function declarations, this is
+ // not the case: the lambda closure type ends up living in the context
+ // where the function itself resides, because the function declaration itself
+ // had not yet been created. Fix the context here.
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
+ if (RD->isLambda())
+ if (ParmVarDecl *ContextParam
+ = dyn_cast_or_null<ParmVarDecl>(RD->getLambdaContextDecl()))
+ return ContextParam->getDeclContext();
+ }
+
+ // Perform the same check for block literals.
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
+ if (ParmVarDecl *ContextParam
+ = dyn_cast_or_null<ParmVarDecl>(BD->getBlockManglingContextDecl()))
+ return ContextParam->getDeclContext();
+ }
+
+ const DeclContext *DC = D->getDeclContext();
+ if (const CapturedDecl *CD = dyn_cast<CapturedDecl>(DC))
+ return getEffectiveDeclContext(CD);
+
+ if (const auto *VD = dyn_cast<VarDecl>(D))
+ if (VD->isExternC())
+ return VD->getASTContext().getTranslationUnitDecl();
+
+ if (const auto *FD = dyn_cast<FunctionDecl>(D))
+ if (FD->isExternC())
+ return FD->getASTContext().getTranslationUnitDecl();
+
+ return DC;
+}
+
+static const DeclContext *getEffectiveParentContext(const DeclContext *DC) {
+ return getEffectiveDeclContext(cast<Decl>(DC));
+}
+
+static bool isLocalContainerContext(const DeclContext *DC) {
+ return isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC) || isa<BlockDecl>(DC);
+}
+
+static const RecordDecl *GetLocalClassDecl(const Decl *D) {
+ const DeclContext *DC = getEffectiveDeclContext(D);
+ while (!DC->isNamespace() && !DC->isTranslationUnit()) {
+ if (isLocalContainerContext(DC))
+ return dyn_cast<RecordDecl>(D);
+ D = cast<Decl>(DC);
+ DC = getEffectiveDeclContext(D);
+ }
+ return nullptr;
+}
+
+static const FunctionDecl *getStructor(const FunctionDecl *fn) {
+ if (const FunctionTemplateDecl *ftd = fn->getPrimaryTemplate())
+ return ftd->getTemplatedDecl();
+
+ return fn;
+}
+
+static const NamedDecl *getStructor(const NamedDecl *decl) {
+ const FunctionDecl *fn = dyn_cast_or_null<FunctionDecl>(decl);
+ return (fn ? getStructor(fn) : decl);
+}
+
+static bool isLambda(const NamedDecl *ND) {
+ const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(ND);
+ if (!Record)
+ return false;
+
+ return Record->isLambda();
+}
+
+static const unsigned UnknownArity = ~0U;
+
+class ItaniumMangleContextImpl : public ItaniumMangleContext {
+ typedef std::pair<const DeclContext*, IdentifierInfo*> DiscriminatorKeyTy;
+ llvm::DenseMap<DiscriminatorKeyTy, unsigned> Discriminator;
+ llvm::DenseMap<const NamedDecl*, unsigned> Uniquifier;
+
+public:
+ explicit ItaniumMangleContextImpl(ASTContext &Context,
+ DiagnosticsEngine &Diags)
+ : ItaniumMangleContext(Context, Diags) {}
+
+ /// @name Mangler Entry Points
+ /// @{
+
+ bool shouldMangleCXXName(const NamedDecl *D) override;
+ bool shouldMangleStringLiteral(const StringLiteral *) override {
+ return false;
+ }
+ void mangleCXXName(const NamedDecl *D, raw_ostream &) override;
+ void mangleThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk,
+ raw_ostream &) override;
+ void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
+ const ThisAdjustment &ThisAdjustment,
+ raw_ostream &) override;
+ void mangleReferenceTemporary(const VarDecl *D, unsigned ManglingNumber,
+ raw_ostream &) override;
+ void mangleCXXVTable(const CXXRecordDecl *RD, raw_ostream &) override;
+ void mangleCXXVTT(const CXXRecordDecl *RD, raw_ostream &) override;
+ void mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
+ const CXXRecordDecl *Type, raw_ostream &) override;
+ void mangleCXXRTTI(QualType T, raw_ostream &) override;
+ void mangleCXXRTTIName(QualType T, raw_ostream &) override;
+ void mangleTypeName(QualType T, raw_ostream &) override;
+ void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
+ raw_ostream &) override;
+ void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
+ raw_ostream &) override;
+
+ void mangleCXXCtorComdat(const CXXConstructorDecl *D, raw_ostream &) override;
+ void mangleCXXDtorComdat(const CXXDestructorDecl *D, raw_ostream &) override;
+ void mangleStaticGuardVariable(const VarDecl *D, raw_ostream &) override;
+ void mangleDynamicInitializer(const VarDecl *D, raw_ostream &Out) override;
+ void mangleDynamicAtExitDestructor(const VarDecl *D,
+ raw_ostream &Out) override;
+ void mangleSEHFilterExpression(const NamedDecl *EnclosingDecl,
+ raw_ostream &Out) override;
+ void mangleSEHFinallyBlock(const NamedDecl *EnclosingDecl,
+ raw_ostream &Out) override;
+ void mangleItaniumThreadLocalInit(const VarDecl *D, raw_ostream &) override;
+ void mangleItaniumThreadLocalWrapper(const VarDecl *D,
+ raw_ostream &) override;
+
+ void mangleStringLiteral(const StringLiteral *, raw_ostream &) override;
+
+ bool getNextDiscriminator(const NamedDecl *ND, unsigned &disc) {
+ // Lambda closure types are already numbered.
+ if (isLambda(ND))
+ return false;
+
+ // Anonymous tags are already numbered.
+ if (const TagDecl *Tag = dyn_cast<TagDecl>(ND)) {
+ if (Tag->getName().empty() && !Tag->getTypedefNameForAnonDecl())
+ return false;
+ }
+
+ // Use the canonical number for externally visible decls.
+ if (ND->isExternallyVisible()) {
+ unsigned discriminator = getASTContext().getManglingNumber(ND);
+ if (discriminator == 1)
+ return false;
+ disc = discriminator - 2;
+ return true;
+ }
+
+ // Make up a reasonable number for internal decls.
+ unsigned &discriminator = Uniquifier[ND];
+ if (!discriminator) {
+ const DeclContext *DC = getEffectiveDeclContext(ND);
+ discriminator = ++Discriminator[std::make_pair(DC, ND->getIdentifier())];
+ }
+ if (discriminator == 1)
+ return false;
+ disc = discriminator-2;
+ return true;
+ }
+ /// @}
+};
+
+/// Manage the mangling of a single name.
+class CXXNameMangler {
+ ItaniumMangleContextImpl &Context;
+ raw_ostream &Out;
+
+ /// The "structor" is the top-level declaration being mangled, if
+ /// that's not a template specialization; otherwise it's the pattern
+ /// for that specialization.
+ const NamedDecl *Structor;
+ unsigned StructorType;
+
+ /// The next substitution sequence number.
+ unsigned SeqID;
+
+ class FunctionTypeDepthState {
+ unsigned Bits;
+
+ enum { InResultTypeMask = 1 };
+
+ public:
+ FunctionTypeDepthState() : Bits(0) {}
+
+ /// The number of function types we're inside.
+ unsigned getDepth() const {
+ return Bits >> 1;
+ }
+
+ /// True if we're in the return type of the innermost function type.
+ bool isInResultType() const {
+ return Bits & InResultTypeMask;
+ }
+
+ FunctionTypeDepthState push() {
+ FunctionTypeDepthState tmp = *this;
+ Bits = (Bits & ~InResultTypeMask) + 2;
+ return tmp;
+ }
+
+ void enterResultType() {
+ Bits |= InResultTypeMask;
+ }
+
+ void leaveResultType() {
+ Bits &= ~InResultTypeMask;
+ }
+
+ void pop(FunctionTypeDepthState saved) {
+ assert(getDepth() == saved.getDepth() + 1);
+ Bits = saved.Bits;
+ }
+
+ } FunctionTypeDepth;
+
+ llvm::DenseMap<uintptr_t, unsigned> Substitutions;
+
+ ASTContext &getASTContext() const { return Context.getASTContext(); }
+
+public:
+ CXXNameMangler(ItaniumMangleContextImpl &C, raw_ostream &Out_,
+ const NamedDecl *D = nullptr)
+ : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(0),
+ SeqID(0) {
+ // These can't be mangled without a ctor type or dtor type.
+ assert(!D || (!isa<CXXDestructorDecl>(D) &&
+ !isa<CXXConstructorDecl>(D)));
+ }
+ CXXNameMangler(ItaniumMangleContextImpl &C, raw_ostream &Out_,
+ const CXXConstructorDecl *D, CXXCtorType Type)
+ : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type),
+ SeqID(0) { }
+ CXXNameMangler(ItaniumMangleContextImpl &C, raw_ostream &Out_,
+ const CXXDestructorDecl *D, CXXDtorType Type)
+ : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type),
+ SeqID(0) { }
+
+#if MANGLE_CHECKER
+ ~CXXNameMangler() {
+ if (Out.str()[0] == '\01')
+ return;
+
+ int status = 0;
+ char *result = abi::__cxa_demangle(Out.str().str().c_str(), 0, 0, &status);
+ assert(status == 0 && "Could not demangle mangled name!");
+ free(result);
+ }
+#endif
+ raw_ostream &getStream() { return Out; }
+
+ void mangle(const NamedDecl *D);
+ void mangleCallOffset(int64_t NonVirtual, int64_t Virtual);
+ void mangleNumber(const llvm::APSInt &I);
+ void mangleNumber(int64_t Number);
+ void mangleFloat(const llvm::APFloat &F);
+ void mangleFunctionEncoding(const FunctionDecl *FD);
+ void mangleSeqID(unsigned SeqID);
+ void mangleName(const NamedDecl *ND);
+ void mangleType(QualType T);
+ void mangleNameOrStandardSubstitution(const NamedDecl *ND);
+
+private:
+
+ bool mangleSubstitution(const NamedDecl *ND);
+ bool mangleSubstitution(QualType T);
+ bool mangleSubstitution(TemplateName Template);
+ bool mangleSubstitution(uintptr_t Ptr);
+
+ void mangleExistingSubstitution(QualType type);
+ void mangleExistingSubstitution(TemplateName name);
+
+ bool mangleStandardSubstitution(const NamedDecl *ND);
+
+ void addSubstitution(const NamedDecl *ND) {
+ ND = cast<NamedDecl>(ND->getCanonicalDecl());
+
+ addSubstitution(reinterpret_cast<uintptr_t>(ND));
+ }
+ void addSubstitution(QualType T);
+ void addSubstitution(TemplateName Template);
+ void addSubstitution(uintptr_t Ptr);
+
+ void mangleUnresolvedPrefix(NestedNameSpecifier *qualifier,
+ bool recursive = false);
+ void mangleUnresolvedName(NestedNameSpecifier *qualifier,
+ DeclarationName name,
+ unsigned KnownArity = UnknownArity);
+
+ void mangleName(const TemplateDecl *TD,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs);
+ void mangleUnqualifiedName(const NamedDecl *ND) {
+ mangleUnqualifiedName(ND, ND->getDeclName(), UnknownArity);
+ }
+ void mangleUnqualifiedName(const NamedDecl *ND, DeclarationName Name,
+ unsigned KnownArity);
+ void mangleUnscopedName(const NamedDecl *ND);
+ void mangleUnscopedTemplateName(const TemplateDecl *ND);
+ void mangleUnscopedTemplateName(TemplateName);
+ void mangleSourceName(const IdentifierInfo *II);
+ void mangleLocalName(const Decl *D);
+ void mangleBlockForPrefix(const BlockDecl *Block);
+ void mangleUnqualifiedBlock(const BlockDecl *Block);
+ void mangleLambda(const CXXRecordDecl *Lambda);
+ void mangleNestedName(const NamedDecl *ND, const DeclContext *DC,
+ bool NoFunction=false);
+ void mangleNestedName(const TemplateDecl *TD,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs);
+ void manglePrefix(NestedNameSpecifier *qualifier);
+ void manglePrefix(const DeclContext *DC, bool NoFunction=false);
+ void manglePrefix(QualType type);
+ void mangleTemplatePrefix(const TemplateDecl *ND, bool NoFunction=false);
+ void mangleTemplatePrefix(TemplateName Template);
+ bool mangleUnresolvedTypeOrSimpleId(QualType DestroyedType,
+ StringRef Prefix = "");
+ void mangleOperatorName(DeclarationName Name, unsigned Arity);
+ void mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity);
+ void mangleQualifiers(Qualifiers Quals);
+ void mangleRefQualifier(RefQualifierKind RefQualifier);
+
+ void mangleObjCMethodName(const ObjCMethodDecl *MD);
+
+ // Declare manglers for every type class.
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define NON_CANONICAL_TYPE(CLASS, PARENT)
+#define TYPE(CLASS, PARENT) void mangleType(const CLASS##Type *T);
+#include "clang/AST/TypeNodes.def"
+
+ void mangleType(const TagType*);
+ void mangleType(TemplateName);
+ void mangleBareFunctionType(const FunctionType *T, bool MangleReturnType,
+ const FunctionDecl *FD = nullptr);
+ void mangleNeonVectorType(const VectorType *T);
+ void mangleAArch64NeonVectorType(const VectorType *T);
+
+ void mangleIntegerLiteral(QualType T, const llvm::APSInt &Value);
+ void mangleMemberExprBase(const Expr *base, bool isArrow);
+ void mangleMemberExpr(const Expr *base, bool isArrow,
+ NestedNameSpecifier *qualifier,
+ NamedDecl *firstQualifierLookup,
+ DeclarationName name,
+ unsigned knownArity);
+ void mangleCastExpression(const Expr *E, StringRef CastEncoding);
+ void mangleInitListElements(const InitListExpr *InitList);
+ void mangleExpression(const Expr *E, unsigned Arity = UnknownArity);
+ void mangleCXXCtorType(CXXCtorType T);
+ void mangleCXXDtorType(CXXDtorType T);
+
+ void mangleTemplateArgs(const TemplateArgumentLoc *TemplateArgs,
+ unsigned NumTemplateArgs);
+ void mangleTemplateArgs(const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs);
+ void mangleTemplateArgs(const TemplateArgumentList &AL);
+ void mangleTemplateArg(TemplateArgument A);
+
+ void mangleTemplateParameter(unsigned Index);
+
+ void mangleFunctionParam(const ParmVarDecl *parm);
+};
+
+}
+
+bool ItaniumMangleContextImpl::shouldMangleCXXName(const NamedDecl *D) {
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (FD) {
+ LanguageLinkage L = FD->getLanguageLinkage();
+ // Overloadable functions need mangling.
+ if (FD->hasAttr<OverloadableAttr>())
+ return true;
+
+ // "main" is not mangled.
+ if (FD->isMain())
+ return false;
+
+ // C++ functions and those whose names are not a simple identifier need
+ // mangling.
+ if (!FD->getDeclName().isIdentifier() || L == CXXLanguageLinkage)
+ return true;
+
+ // C functions are not mangled.
+ if (L == CLanguageLinkage)
+ return false;
+ }
+
+ // Otherwise, no mangling is done outside C++ mode.
+ if (!getASTContext().getLangOpts().CPlusPlus)
+ return false;
+
+ const VarDecl *VD = dyn_cast<VarDecl>(D);
+ if (VD) {
+ // C variables are not mangled.
+ if (VD->isExternC())
+ return false;
+
+ // Variables at global scope with non-internal linkage are not mangled
+ const DeclContext *DC = getEffectiveDeclContext(D);
+ // Check for extern variable declared locally.
+ if (DC->isFunctionOrMethod() && D->hasLinkage())
+ while (!DC->isNamespace() && !DC->isTranslationUnit())
+ DC = getEffectiveParentContext(DC);
+ if (DC->isTranslationUnit() && D->getFormalLinkage() != InternalLinkage &&
+ !isa<VarTemplateSpecializationDecl>(D))
+ return false;
+ }
+
+ return true;
+}
+
+void CXXNameMangler::mangle(const NamedDecl *D) {
+ // <mangled-name> ::= _Z <encoding>
+ // ::= <data name>
+ // ::= <special-name>
+ Out << "_Z";
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ mangleFunctionEncoding(FD);
+ else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ mangleName(VD);
+ else if (const IndirectFieldDecl *IFD = dyn_cast<IndirectFieldDecl>(D))
+ mangleName(IFD->getAnonField());
+ else
+ mangleName(cast<FieldDecl>(D));
+}
+
+void CXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
+ // <encoding> ::= <function name> <bare-function-type>
+ mangleName(FD);
+
+ // Don't mangle in the type if this isn't a decl we should typically mangle.
+ if (!Context.shouldMangleDeclName(FD))
+ return;
+
+ if (FD->hasAttr<EnableIfAttr>()) {
+ FunctionTypeDepthState Saved = FunctionTypeDepth.push();
+ Out << "Ua9enable_ifI";
+ // FIXME: specific_attr_iterator iterates in reverse order. Fix that and use
+ // it here.
+ for (AttrVec::const_reverse_iterator I = FD->getAttrs().rbegin(),
+ E = FD->getAttrs().rend();
+ I != E; ++I) {
+ EnableIfAttr *EIA = dyn_cast<EnableIfAttr>(*I);
+ if (!EIA)
+ continue;
+ Out << 'X';
+ mangleExpression(EIA->getCond());
+ Out << 'E';
+ }
+ Out << 'E';
+ FunctionTypeDepth.pop(Saved);
+ }
+
+ // Whether the mangling of a function type includes the return type depends on
+ // the context and the nature of the function. The rules for deciding whether
+ // the return type is included are:
+ //
+ // 1. Template functions (names or types) have return types encoded, with
+ // the exceptions listed below.
+ // 2. Function types not appearing as part of a function name mangling,
+ // e.g. parameters, pointer types, etc., have return type encoded, with the
+ // exceptions listed below.
+ // 3. Non-template function names do not have return types encoded.
+ //
+ // The exceptions mentioned in (1) and (2) above, for which the return type is
+ // never included, are
+ // 1. Constructors.
+ // 2. Destructors.
+ // 3. Conversion operator functions, e.g. operator int.
+ bool MangleReturnType = false;
+ if (FunctionTemplateDecl *PrimaryTemplate = FD->getPrimaryTemplate()) {
+ if (!(isa<CXXConstructorDecl>(FD) || isa<CXXDestructorDecl>(FD) ||
+ isa<CXXConversionDecl>(FD)))
+ MangleReturnType = true;
+
+ // Mangle the type of the primary template.
+ FD = PrimaryTemplate->getTemplatedDecl();
+ }
+
+ mangleBareFunctionType(FD->getType()->getAs<FunctionType>(),
+ MangleReturnType, FD);
+}
+
+static const DeclContext *IgnoreLinkageSpecDecls(const DeclContext *DC) {
+ while (isa<LinkageSpecDecl>(DC)) {
+ DC = getEffectiveParentContext(DC);
+ }
+
+ return DC;
+}
+
+/// Return whether a given namespace is the 'std' namespace.
+static bool isStd(const NamespaceDecl *NS) {
+ if (!IgnoreLinkageSpecDecls(getEffectiveParentContext(NS))
+ ->isTranslationUnit())
+ return false;
+
+ const IdentifierInfo *II = NS->getOriginalNamespace()->getIdentifier();
+ return II && II->isStr("std");
+}
+
+// isStdNamespace - Return whether a given decl context is a toplevel 'std'
+// namespace.
+static bool isStdNamespace(const DeclContext *DC) {
+ if (!DC->isNamespace())
+ return false;
+
+ return isStd(cast<NamespaceDecl>(DC));
+}
+
+static const TemplateDecl *
+isTemplate(const NamedDecl *ND, const TemplateArgumentList *&TemplateArgs) {
+ // Check if we have a function template.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)){
+ if (const TemplateDecl *TD = FD->getPrimaryTemplate()) {
+ TemplateArgs = FD->getTemplateSpecializationArgs();
+ return TD;
+ }
+ }
+
+ // Check if we have a class template.
+ if (const ClassTemplateSpecializationDecl *Spec =
+ dyn_cast<ClassTemplateSpecializationDecl>(ND)) {
+ TemplateArgs = &Spec->getTemplateArgs();
+ return Spec->getSpecializedTemplate();
+ }
+
+ // Check if we have a variable template.
+ if (const VarTemplateSpecializationDecl *Spec =
+ dyn_cast<VarTemplateSpecializationDecl>(ND)) {
+ TemplateArgs = &Spec->getTemplateArgs();
+ return Spec->getSpecializedTemplate();
+ }
+
+ return nullptr;
+}
+
+void CXXNameMangler::mangleName(const NamedDecl *ND) {
+ // <name> ::= <nested-name>
+ // ::= <unscoped-name>
+ // ::= <unscoped-template-name> <template-args>
+ // ::= <local-name>
+ //
+ const DeclContext *DC = getEffectiveDeclContext(ND);
+
+ // If this is an extern variable declared locally, the relevant DeclContext
+ // is that of the containing namespace, or the translation unit.
+ // FIXME: This is a hack; extern variables declared locally should have
+ // a proper semantic declaration context!
+ if (isLocalContainerContext(DC) && ND->hasLinkage() && !isLambda(ND))
+ while (!DC->isNamespace() && !DC->isTranslationUnit())
+ DC = getEffectiveParentContext(DC);
+ else if (GetLocalClassDecl(ND)) {
+ mangleLocalName(ND);
+ return;
+ }
+
+ DC = IgnoreLinkageSpecDecls(DC);
+
+ if (DC->isTranslationUnit() || isStdNamespace(DC)) {
+ // Check if we have a template.
+ const TemplateArgumentList *TemplateArgs = nullptr;
+ if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
+ mangleUnscopedTemplateName(TD);
+ mangleTemplateArgs(*TemplateArgs);
+ return;
+ }
+
+ mangleUnscopedName(ND);
+ return;
+ }
+
+ if (isLocalContainerContext(DC)) {
+ mangleLocalName(ND);
+ return;
+ }
+
+ mangleNestedName(ND, DC);
+}
+void CXXNameMangler::mangleName(const TemplateDecl *TD,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs) {
+ const DeclContext *DC = IgnoreLinkageSpecDecls(getEffectiveDeclContext(TD));
+
+ if (DC->isTranslationUnit() || isStdNamespace(DC)) {
+ mangleUnscopedTemplateName(TD);
+ mangleTemplateArgs(TemplateArgs, NumTemplateArgs);
+ } else {
+ mangleNestedName(TD, TemplateArgs, NumTemplateArgs);
+ }
+}
+
+void CXXNameMangler::mangleUnscopedName(const NamedDecl *ND) {
+ // <unscoped-name> ::= <unqualified-name>
+ // ::= St <unqualified-name> # ::std::
+
+ if (isStdNamespace(IgnoreLinkageSpecDecls(getEffectiveDeclContext(ND))))
+ Out << "St";
+
+ mangleUnqualifiedName(ND);
+}
+
+void CXXNameMangler::mangleUnscopedTemplateName(const TemplateDecl *ND) {
+ // <unscoped-template-name> ::= <unscoped-name>
+ // ::= <substitution>
+ if (mangleSubstitution(ND))
+ return;
+
+ // <template-template-param> ::= <template-param>
+ if (const auto *TTP = dyn_cast<TemplateTemplateParmDecl>(ND))
+ mangleTemplateParameter(TTP->getIndex());
+ else
+ mangleUnscopedName(ND->getTemplatedDecl());
+
+ addSubstitution(ND);
+}
+
+void CXXNameMangler::mangleUnscopedTemplateName(TemplateName Template) {
+ // <unscoped-template-name> ::= <unscoped-name>
+ // ::= <substitution>
+ if (TemplateDecl *TD = Template.getAsTemplateDecl())
+ return mangleUnscopedTemplateName(TD);
+
+ if (mangleSubstitution(Template))
+ return;
+
+ DependentTemplateName *Dependent = Template.getAsDependentTemplateName();
+ assert(Dependent && "Not a dependent template name?");
+ if (const IdentifierInfo *Id = Dependent->getIdentifier())
+ mangleSourceName(Id);
+ else
+ mangleOperatorName(Dependent->getOperator(), UnknownArity);
+
+ addSubstitution(Template);
+}
+
+void CXXNameMangler::mangleFloat(const llvm::APFloat &f) {
+ // ABI:
+ // Floating-point literals are encoded using a fixed-length
+ // lowercase hexadecimal string corresponding to the internal
+ // representation (IEEE on Itanium), high-order bytes first,
+ // without leading zeroes. For example: "Lf bf800000 E" is -1.0f
+ // on Itanium.
+ // The 'without leading zeroes' thing seems to be an editorial
+ // mistake; see the discussion on cxx-abi-dev beginning on
+ // 2012-01-16.
+
+ // Our requirements here are just barely weird enough to justify
+ // using a custom algorithm instead of post-processing APInt::toString().
+
+ llvm::APInt valueBits = f.bitcastToAPInt();
+ unsigned numCharacters = (valueBits.getBitWidth() + 3) / 4;
+ assert(numCharacters != 0);
+
+ // Allocate a buffer of the right number of characters.
+ SmallVector<char, 20> buffer(numCharacters);
+
+ // Fill the buffer left-to-right.
+ for (unsigned stringIndex = 0; stringIndex != numCharacters; ++stringIndex) {
+ // The bit-index of the next hex digit.
+ unsigned digitBitIndex = 4 * (numCharacters - stringIndex - 1);
+
+ // Project out 4 bits starting at 'digitIndex'.
+ llvm::integerPart hexDigit
+ = valueBits.getRawData()[digitBitIndex / llvm::integerPartWidth];
+ hexDigit >>= (digitBitIndex % llvm::integerPartWidth);
+ hexDigit &= 0xF;
+
+ // Map that over to a lowercase hex digit.
+ static const char charForHex[16] = {
+ '0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'
+ };
+ buffer[stringIndex] = charForHex[hexDigit];
+ }
+
+ Out.write(buffer.data(), numCharacters);
+}
+
+void CXXNameMangler::mangleNumber(const llvm::APSInt &Value) {
+ if (Value.isSigned() && Value.isNegative()) {
+ Out << 'n';
+ Value.abs().print(Out, /*signed*/ false);
+ } else {
+ Value.print(Out, /*signed*/ false);
+ }
+}
+
+void CXXNameMangler::mangleNumber(int64_t Number) {
+ // <number> ::= [n] <non-negative decimal integer>
+ if (Number < 0) {
+ Out << 'n';
+ Number = -Number;
+ }
+
+ Out << Number;
+}
+
+void CXXNameMangler::mangleCallOffset(int64_t NonVirtual, int64_t Virtual) {
+ // <call-offset> ::= h <nv-offset> _
+ // ::= v <v-offset> _
+ // <nv-offset> ::= <offset number> # non-virtual base override
+ // <v-offset> ::= <offset number> _ <virtual offset number>
+ // # virtual base override, with vcall offset
+ if (!Virtual) {
+ Out << 'h';
+ mangleNumber(NonVirtual);
+ Out << '_';
+ return;
+ }
+
+ Out << 'v';
+ mangleNumber(NonVirtual);
+ Out << '_';
+ mangleNumber(Virtual);
+ Out << '_';
+}
+
+void CXXNameMangler::manglePrefix(QualType type) {
+ if (const auto *TST = type->getAs<TemplateSpecializationType>()) {
+ if (!mangleSubstitution(QualType(TST, 0))) {
+ mangleTemplatePrefix(TST->getTemplateName());
+
+ // FIXME: GCC does not appear to mangle the template arguments when
+ // the template in question is a dependent template name. Should we
+ // emulate that badness?
+ mangleTemplateArgs(TST->getArgs(), TST->getNumArgs());
+ addSubstitution(QualType(TST, 0));
+ }
+ } else if (const auto *DTST =
+ type->getAs<DependentTemplateSpecializationType>()) {
+ if (!mangleSubstitution(QualType(DTST, 0))) {
+ TemplateName Template = getASTContext().getDependentTemplateName(
+ DTST->getQualifier(), DTST->getIdentifier());
+ mangleTemplatePrefix(Template);
+
+ // FIXME: GCC does not appear to mangle the template arguments when
+ // the template in question is a dependent template name. Should we
+ // emulate that badness?
+ mangleTemplateArgs(DTST->getArgs(), DTST->getNumArgs());
+ addSubstitution(QualType(DTST, 0));
+ }
+ } else {
+ // We use the QualType mangle type variant here because it handles
+ // substitutions.
+ mangleType(type);
+ }
+}
+
+/// Mangle everything prior to the base-unresolved-name in an unresolved-name.
+///
+/// \param recursive - true if this is being called recursively,
+/// i.e. if there is more prefix "to the right".
+void CXXNameMangler::mangleUnresolvedPrefix(NestedNameSpecifier *qualifier,
+ bool recursive) {
+
+ // x, ::x
+ // <unresolved-name> ::= [gs] <base-unresolved-name>
+
+ // T::x / decltype(p)::x
+ // <unresolved-name> ::= sr <unresolved-type> <base-unresolved-name>
+
+ // T::N::x /decltype(p)::N::x
+ // <unresolved-name> ::= srN <unresolved-type> <unresolved-qualifier-level>+ E
+ // <base-unresolved-name>
+
+ // A::x, N::y, A<T>::z; "gs" means leading "::"
+ // <unresolved-name> ::= [gs] sr <unresolved-qualifier-level>+ E
+ // <base-unresolved-name>
+
+ switch (qualifier->getKind()) {
+ case NestedNameSpecifier::Global:
+ Out << "gs";
+
+ // We want an 'sr' unless this is the entire NNS.
+ if (recursive)
+ Out << "sr";
+
+ // We never want an 'E' here.
+ return;
+
+ case NestedNameSpecifier::Super:
+ llvm_unreachable("Can't mangle __super specifier");
+
+ case NestedNameSpecifier::Namespace:
+ if (qualifier->getPrefix())
+ mangleUnresolvedPrefix(qualifier->getPrefix(),
+ /*recursive*/ true);
+ else
+ Out << "sr";
+ mangleSourceName(qualifier->getAsNamespace()->getIdentifier());
+ break;
+ case NestedNameSpecifier::NamespaceAlias:
+ if (qualifier->getPrefix())
+ mangleUnresolvedPrefix(qualifier->getPrefix(),
+ /*recursive*/ true);
+ else
+ Out << "sr";
+ mangleSourceName(qualifier->getAsNamespaceAlias()->getIdentifier());
+ break;
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate: {
+ const Type *type = qualifier->getAsType();
+
+ // We only want to use an unresolved-type encoding if this is one of:
+ // - a decltype
+ // - a template type parameter
+ // - a template template parameter with arguments
+ // In all of these cases, we should have no prefix.
+ if (qualifier->getPrefix()) {
+ mangleUnresolvedPrefix(qualifier->getPrefix(),
+ /*recursive*/ true);
+ } else {
+ // Otherwise, all the cases want this.
+ Out << "sr";
+ }
+
+ if (mangleUnresolvedTypeOrSimpleId(QualType(type, 0), recursive ? "N" : ""))
+ return;
+
+ break;
+ }
+
+ case NestedNameSpecifier::Identifier:
+ // Member expressions can have these without prefixes.
+ if (qualifier->getPrefix())
+ mangleUnresolvedPrefix(qualifier->getPrefix(),
+ /*recursive*/ true);
+ else
+ Out << "sr";
+
+ mangleSourceName(qualifier->getAsIdentifier());
+ break;
+ }
+
+ // If this was the innermost part of the NNS, and we fell out to
+ // here, append an 'E'.
+ if (!recursive)
+ Out << 'E';
+}
+
+/// Mangle an unresolved-name, which is generally used for names which
+/// weren't resolved to specific entities.
+void CXXNameMangler::mangleUnresolvedName(NestedNameSpecifier *qualifier,
+ DeclarationName name,
+ unsigned knownArity) {
+ if (qualifier) mangleUnresolvedPrefix(qualifier);
+ switch (name.getNameKind()) {
+ // <base-unresolved-name> ::= <simple-id>
+ case DeclarationName::Identifier:
+ mangleSourceName(name.getAsIdentifierInfo());
+ break;
+ // <base-unresolved-name> ::= dn <destructor-name>
+ case DeclarationName::CXXDestructorName:
+ Out << "dn";
+ mangleUnresolvedTypeOrSimpleId(name.getCXXNameType());
+ break;
+ // <base-unresolved-name> ::= on <operator-name>
+ case DeclarationName::CXXConversionFunctionName:
+ case DeclarationName::CXXLiteralOperatorName:
+ case DeclarationName::CXXOperatorName:
+ Out << "on";
+ mangleOperatorName(name, knownArity);
+ break;
+ case DeclarationName::CXXConstructorName:
+ llvm_unreachable("Can't mangle a constructor name!");
+ case DeclarationName::CXXUsingDirective:
+ llvm_unreachable("Can't mangle a using directive name!");
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCZeroArgSelector:
+ llvm_unreachable("Can't mangle Objective-C selector names here!");
+ }
+}
+
+void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
+ DeclarationName Name,
+ unsigned KnownArity) {
+ unsigned Arity = KnownArity;
+ // <unqualified-name> ::= <operator-name>
+ // ::= <ctor-dtor-name>
+ // ::= <source-name>
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier: {
+ if (const IdentifierInfo *II = Name.getAsIdentifierInfo()) {
+ // We must avoid conflicts between internally- and externally-
+ // linked variable and function declaration names in the same TU:
+ // void test() { extern void foo(); }
+ // static void foo();
+ // This naming convention is the same as that followed by GCC,
+ // though it shouldn't actually matter.
+ if (ND && ND->getFormalLinkage() == InternalLinkage &&
+ getEffectiveDeclContext(ND)->isFileContext())
+ Out << 'L';
+
+ mangleSourceName(II);
+ break;
+ }
+
+ // Otherwise, an anonymous entity. We must have a declaration.
+ assert(ND && "mangling empty name without declaration");
+
+ if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
+ if (NS->isAnonymousNamespace()) {
+ // This is how gcc mangles these names.
+ Out << "12_GLOBAL__N_1";
+ break;
+ }
+ }
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
+ // We must have an anonymous union or struct declaration.
+ const RecordDecl *RD =
+ cast<RecordDecl>(VD->getType()->getAs<RecordType>()->getDecl());
+
+ // Itanium C++ ABI 5.1.2:
+ //
+ // For the purposes of mangling, the name of an anonymous union is
+ // considered to be the name of the first named data member found by a
+ // pre-order, depth-first, declaration-order walk of the data members of
+ // the anonymous union. If there is no such data member (i.e., if all of
+ // the data members in the union are unnamed), then there is no way for
+ // a program to refer to the anonymous union, and there is therefore no
+ // need to mangle its name.
+ assert(RD->isAnonymousStructOrUnion()
+ && "Expected anonymous struct or union!");
+ const FieldDecl *FD = RD->findFirstNamedDataMember();
+
+ // It's actually possible for various reasons for us to get here
+ // with an empty anonymous struct / union. Fortunately, it
+ // doesn't really matter what name we generate.
+ if (!FD) break;
+ assert(FD->getIdentifier() && "Data member name isn't an identifier!");
+
+ mangleSourceName(FD->getIdentifier());
+ break;
+ }
+
+ // Class extensions have no name as a category, and it's possible
+ // for them to be the semantic parent of certain declarations
+ // (primarily, tag decls defined within declarations). Such
+ // declarations will always have internal linkage, so the name
+ // doesn't really matter, but we shouldn't crash on them. For
+ // safety, just handle all ObjC containers here.
+ if (isa<ObjCContainerDecl>(ND))
+ break;
+
+ // We must have an anonymous struct.
+ const TagDecl *TD = cast<TagDecl>(ND);
+ if (const TypedefNameDecl *D = TD->getTypedefNameForAnonDecl()) {
+ assert(TD->getDeclContext() == D->getDeclContext() &&
+ "Typedef should not be in another decl context!");
+ assert(D->getDeclName().getAsIdentifierInfo() &&
+ "Typedef was not named!");
+ mangleSourceName(D->getDeclName().getAsIdentifierInfo());
+ break;
+ }
+
+ // <unnamed-type-name> ::= <closure-type-name>
+ //
+ // <closure-type-name> ::= Ul <lambda-sig> E [ <nonnegative number> ] _
+ // <lambda-sig> ::= <parameter-type>+ # Parameter types or 'v' for 'void'.
+ if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(TD)) {
+ if (Record->isLambda() && Record->getLambdaManglingNumber()) {
+ mangleLambda(Record);
+ break;
+ }
+ }
+
+ if (TD->isExternallyVisible()) {
+ unsigned UnnamedMangle = getASTContext().getManglingNumber(TD);
+ Out << "Ut";
+ if (UnnamedMangle > 1)
+ Out << UnnamedMangle - 2;
+ Out << '_';
+ break;
+ }
+
+ // Get a unique id for the anonymous struct.
+ unsigned AnonStructId = Context.getAnonymousStructId(TD);
+
+ // Mangle it as a source name in the form
+ // [n] $_<id>
+ // where n is the length of the string.
+ SmallString<8> Str;
+ Str += "$_";
+ Str += llvm::utostr(AnonStructId);
+
+ Out << Str.size();
+ Out << Str;
+ break;
+ }
+
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ llvm_unreachable("Can't mangle Objective-C selector names here!");
+
+ case DeclarationName::CXXConstructorName:
+ if (ND == Structor)
+ // If the named decl is the C++ constructor we're mangling, use the type
+ // we were given.
+ mangleCXXCtorType(static_cast<CXXCtorType>(StructorType));
+ else
+ // Otherwise, use the complete constructor name. This is relevant if a
+ // class with a constructor is declared within a constructor.
+ mangleCXXCtorType(Ctor_Complete);
+ break;
+
+ case DeclarationName::CXXDestructorName:
+ if (ND == Structor)
+ // If the named decl is the C++ destructor we're mangling, use the type we
+ // were given.
+ mangleCXXDtorType(static_cast<CXXDtorType>(StructorType));
+ else
+ // Otherwise, use the complete destructor name. This is relevant if a
+ // class with a destructor is declared within a destructor.
+ mangleCXXDtorType(Dtor_Complete);
+ break;
+
+ case DeclarationName::CXXOperatorName:
+ if (ND && Arity == UnknownArity) {
+ Arity = cast<FunctionDecl>(ND)->getNumParams();
+
+ // If we have a member function, we need to include the 'this' pointer.
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(ND))
+ if (!MD->isStatic())
+ Arity++;
+ }
+ // FALLTHROUGH
+ case DeclarationName::CXXConversionFunctionName:
+ case DeclarationName::CXXLiteralOperatorName:
+ mangleOperatorName(Name, Arity);
+ break;
+
+ case DeclarationName::CXXUsingDirective:
+ llvm_unreachable("Can't mangle a using directive name!");
+ }
+}
+
+void CXXNameMangler::mangleSourceName(const IdentifierInfo *II) {
+ // <source-name> ::= <positive length number> <identifier>
+ // <number> ::= [n] <non-negative decimal integer>
+ // <identifier> ::= <unqualified source code identifier>
+ Out << II->getLength() << II->getName();
+}
+
+void CXXNameMangler::mangleNestedName(const NamedDecl *ND,
+ const DeclContext *DC,
+ bool NoFunction) {
+ // <nested-name>
+ // ::= N [<CV-qualifiers>] [<ref-qualifier>] <prefix> <unqualified-name> E
+ // ::= N [<CV-qualifiers>] [<ref-qualifier>] <template-prefix>
+ // <template-args> E
+
+ Out << 'N';
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(ND)) {
+ Qualifiers MethodQuals =
+ Qualifiers::fromCVRMask(Method->getTypeQualifiers());
+ // We do not consider restrict a distinguishing attribute for overloading
+ // purposes so we must not mangle it.
+ MethodQuals.removeRestrict();
+ mangleQualifiers(MethodQuals);
+ mangleRefQualifier(Method->getRefQualifier());
+ }
+
+ // Check if we have a template.
+ const TemplateArgumentList *TemplateArgs = nullptr;
+ if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
+ mangleTemplatePrefix(TD, NoFunction);
+ mangleTemplateArgs(*TemplateArgs);
+ }
+ else {
+ manglePrefix(DC, NoFunction);
+ mangleUnqualifiedName(ND);
+ }
+
+ Out << 'E';
+}
+void CXXNameMangler::mangleNestedName(const TemplateDecl *TD,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs) {
+ // <nested-name> ::= N [<CV-qualifiers>] <template-prefix> <template-args> E
+
+ Out << 'N';
+
+ mangleTemplatePrefix(TD);
+ mangleTemplateArgs(TemplateArgs, NumTemplateArgs);
+
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleLocalName(const Decl *D) {
+ // <local-name> := Z <function encoding> E <entity name> [<discriminator>]
+ // := Z <function encoding> E s [<discriminator>]
+ // <local-name> := Z <function encoding> E d [ <parameter number> ]
+ // _ <entity name>
+ // <discriminator> := _ <non-negative number>
+ assert(isa<NamedDecl>(D) || isa<BlockDecl>(D));
+ const RecordDecl *RD = GetLocalClassDecl(D);
+ const DeclContext *DC = getEffectiveDeclContext(RD ? RD : D);
+
+ Out << 'Z';
+
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(DC))
+ mangleObjCMethodName(MD);
+ else if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC))
+ mangleBlockForPrefix(BD);
+ else
+ mangleFunctionEncoding(cast<FunctionDecl>(DC));
+
+ Out << 'E';
+
+ if (RD) {
+ // The parameter number is omitted for the last parameter, 0 for the
+ // second-to-last parameter, 1 for the third-to-last parameter, etc. The
+ // <entity name> will of course contain a <closure-type-name>: Its
+ // numbering will be local to the particular argument in which it appears
+ // -- other default arguments do not affect its encoding.
+ const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD);
+ if (CXXRD->isLambda()) {
+ if (const ParmVarDecl *Parm
+ = dyn_cast_or_null<ParmVarDecl>(CXXRD->getLambdaContextDecl())) {
+ if (const FunctionDecl *Func
+ = dyn_cast<FunctionDecl>(Parm->getDeclContext())) {
+ Out << 'd';
+ unsigned Num = Func->getNumParams() - Parm->getFunctionScopeIndex();
+ if (Num > 1)
+ mangleNumber(Num - 2);
+ Out << '_';
+ }
+ }
+ }
+
+ // Mangle the name relative to the closest enclosing function.
+ // equality ok because RD derived from ND above
+ if (D == RD) {
+ mangleUnqualifiedName(RD);
+ } else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
+ manglePrefix(getEffectiveDeclContext(BD), true /*NoFunction*/);
+ mangleUnqualifiedBlock(BD);
+ } else {
+ const NamedDecl *ND = cast<NamedDecl>(D);
+ mangleNestedName(ND, getEffectiveDeclContext(ND), true /*NoFunction*/);
+ }
+ } else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
+ // Mangle a block in a default parameter; see above explanation for
+ // lambdas.
+ if (const ParmVarDecl *Parm
+ = dyn_cast_or_null<ParmVarDecl>(BD->getBlockManglingContextDecl())) {
+ if (const FunctionDecl *Func
+ = dyn_cast<FunctionDecl>(Parm->getDeclContext())) {
+ Out << 'd';
+ unsigned Num = Func->getNumParams() - Parm->getFunctionScopeIndex();
+ if (Num > 1)
+ mangleNumber(Num - 2);
+ Out << '_';
+ }
+ }
+
+ mangleUnqualifiedBlock(BD);
+ } else {
+ mangleUnqualifiedName(cast<NamedDecl>(D));
+ }
+
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(RD ? RD : D)) {
+ unsigned disc;
+ if (Context.getNextDiscriminator(ND, disc)) {
+ if (disc < 10)
+ Out << '_' << disc;
+ else
+ Out << "__" << disc << '_';
+ }
+ }
+}
+
+void CXXNameMangler::mangleBlockForPrefix(const BlockDecl *Block) {
+ if (GetLocalClassDecl(Block)) {
+ mangleLocalName(Block);
+ return;
+ }
+ const DeclContext *DC = getEffectiveDeclContext(Block);
+ if (isLocalContainerContext(DC)) {
+ mangleLocalName(Block);
+ return;
+ }
+ manglePrefix(getEffectiveDeclContext(Block));
+ mangleUnqualifiedBlock(Block);
+}
+
+void CXXNameMangler::mangleUnqualifiedBlock(const BlockDecl *Block) {
+ if (Decl *Context = Block->getBlockManglingContextDecl()) {
+ if ((isa<VarDecl>(Context) || isa<FieldDecl>(Context)) &&
+ Context->getDeclContext()->isRecord()) {
+ if (const IdentifierInfo *Name
+ = cast<NamedDecl>(Context)->getIdentifier()) {
+ mangleSourceName(Name);
+ Out << 'M';
+ }
+ }
+ }
+
+ // If we have a block mangling number, use it.
+ unsigned Number = Block->getBlockManglingNumber();
+ // Otherwise, just make up a number. It doesn't matter what it is because
+ // the symbol in question isn't externally visible.
+ if (!Number)
+ Number = Context.getBlockId(Block, false);
+ Out << "Ub";
+ if (Number > 0)
+ Out << Number - 1;
+ Out << '_';
+}
+
+void CXXNameMangler::mangleLambda(const CXXRecordDecl *Lambda) {
+ // If the context of a closure type is an initializer for a class member
+ // (static or nonstatic), it is encoded in a qualified name with a final
+ // <prefix> of the form:
+ //
+ // <data-member-prefix> := <member source-name> M
+ //
+ // Technically, the data-member-prefix is part of the <prefix>. However,
+ // since a closure type will always be mangled with a prefix, it's easier
+ // to emit that last part of the prefix here.
+ if (Decl *Context = Lambda->getLambdaContextDecl()) {
+ if ((isa<VarDecl>(Context) || isa<FieldDecl>(Context)) &&
+ Context->getDeclContext()->isRecord()) {
+ if (const IdentifierInfo *Name
+ = cast<NamedDecl>(Context)->getIdentifier()) {
+ mangleSourceName(Name);
+ Out << 'M';
+ }
+ }
+ }
+
+ Out << "Ul";
+ const FunctionProtoType *Proto = Lambda->getLambdaTypeInfo()->getType()->
+ getAs<FunctionProtoType>();
+ mangleBareFunctionType(Proto, /*MangleReturnType=*/false,
+ Lambda->getLambdaStaticInvoker());
+ Out << "E";
+
+ // The number is omitted for the first closure type with a given
+ // <lambda-sig> in a given context; it is n-2 for the nth closure type
+ // (in lexical order) with that same <lambda-sig> and context.
+ //
+ // The AST keeps track of the number for us.
+ unsigned Number = Lambda->getLambdaManglingNumber();
+ assert(Number > 0 && "Lambda should be mangled as an unnamed class");
+ if (Number > 1)
+ mangleNumber(Number - 2);
+ Out << '_';
+}
+
+void CXXNameMangler::manglePrefix(NestedNameSpecifier *qualifier) {
+ switch (qualifier->getKind()) {
+ case NestedNameSpecifier::Global:
+ // nothing
+ return;
+
+ case NestedNameSpecifier::Super:
+ llvm_unreachable("Can't mangle __super specifier");
+
+ case NestedNameSpecifier::Namespace:
+ mangleName(qualifier->getAsNamespace());
+ return;
+
+ case NestedNameSpecifier::NamespaceAlias:
+ mangleName(qualifier->getAsNamespaceAlias()->getNamespace());
+ return;
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ manglePrefix(QualType(qualifier->getAsType(), 0));
+ return;
+
+ case NestedNameSpecifier::Identifier:
+ // Member expressions can have these without prefixes, but that
+ // should end up in mangleUnresolvedPrefix instead.
+ assert(qualifier->getPrefix());
+ manglePrefix(qualifier->getPrefix());
+
+ mangleSourceName(qualifier->getAsIdentifier());
+ return;
+ }
+
+ llvm_unreachable("unexpected nested name specifier");
+}
+
+void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) {
+ // <prefix> ::= <prefix> <unqualified-name>
+ // ::= <template-prefix> <template-args>
+ // ::= <template-param>
+ // ::= # empty
+ // ::= <substitution>
+
+ DC = IgnoreLinkageSpecDecls(DC);
+
+ if (DC->isTranslationUnit())
+ return;
+
+ if (NoFunction && isLocalContainerContext(DC))
+ return;
+
+ assert(!isLocalContainerContext(DC));
+
+ const NamedDecl *ND = cast<NamedDecl>(DC);
+ if (mangleSubstitution(ND))
+ return;
+
+ // Check if we have a template.
+ const TemplateArgumentList *TemplateArgs = nullptr;
+ if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
+ mangleTemplatePrefix(TD);
+ mangleTemplateArgs(*TemplateArgs);
+ } else {
+ manglePrefix(getEffectiveDeclContext(ND), NoFunction);
+ mangleUnqualifiedName(ND);
+ }
+
+ addSubstitution(ND);
+}
+
+void CXXNameMangler::mangleTemplatePrefix(TemplateName Template) {
+ // <template-prefix> ::= <prefix> <template unqualified-name>
+ // ::= <template-param>
+ // ::= <substitution>
+ if (TemplateDecl *TD = Template.getAsTemplateDecl())
+ return mangleTemplatePrefix(TD);
+
+ if (QualifiedTemplateName *Qualified = Template.getAsQualifiedTemplateName())
+ manglePrefix(Qualified->getQualifier());
+
+ if (OverloadedTemplateStorage *Overloaded
+ = Template.getAsOverloadedTemplate()) {
+ mangleUnqualifiedName(nullptr, (*Overloaded->begin())->getDeclName(),
+ UnknownArity);
+ return;
+ }
+
+ DependentTemplateName *Dependent = Template.getAsDependentTemplateName();
+ assert(Dependent && "Unknown template name kind?");
+ if (NestedNameSpecifier *Qualifier = Dependent->getQualifier())
+ manglePrefix(Qualifier);
+ mangleUnscopedTemplateName(Template);
+}
+
+void CXXNameMangler::mangleTemplatePrefix(const TemplateDecl *ND,
+ bool NoFunction) {
+ // <template-prefix> ::= <prefix> <template unqualified-name>
+ // ::= <template-param>
+ // ::= <substitution>
+ // <template-template-param> ::= <template-param>
+ // <substitution>
+
+ if (mangleSubstitution(ND))
+ return;
+
+ // <template-template-param> ::= <template-param>
+ if (const auto *TTP = dyn_cast<TemplateTemplateParmDecl>(ND)) {
+ mangleTemplateParameter(TTP->getIndex());
+ } else {
+ manglePrefix(getEffectiveDeclContext(ND), NoFunction);
+ mangleUnqualifiedName(ND->getTemplatedDecl());
+ }
+
+ addSubstitution(ND);
+}
+
+/// Mangles a template name under the production <type>. Required for
+/// template template arguments.
+/// <type> ::= <class-enum-type>
+/// ::= <template-param>
+/// ::= <substitution>
+void CXXNameMangler::mangleType(TemplateName TN) {
+ if (mangleSubstitution(TN))
+ return;
+
+ TemplateDecl *TD = nullptr;
+
+ switch (TN.getKind()) {
+ case TemplateName::QualifiedTemplate:
+ TD = TN.getAsQualifiedTemplateName()->getTemplateDecl();
+ goto HaveDecl;
+
+ case TemplateName::Template:
+ TD = TN.getAsTemplateDecl();
+ goto HaveDecl;
+
+ HaveDecl:
+ if (isa<TemplateTemplateParmDecl>(TD))
+ mangleTemplateParameter(cast<TemplateTemplateParmDecl>(TD)->getIndex());
+ else
+ mangleName(TD);
+ break;
+
+ case TemplateName::OverloadedTemplate:
+ llvm_unreachable("can't mangle an overloaded template name as a <type>");
+
+ case TemplateName::DependentTemplate: {
+ const DependentTemplateName *Dependent = TN.getAsDependentTemplateName();
+ assert(Dependent->isIdentifier());
+
+ // <class-enum-type> ::= <name>
+ // <name> ::= <nested-name>
+ mangleUnresolvedPrefix(Dependent->getQualifier());
+ mangleSourceName(Dependent->getIdentifier());
+ break;
+ }
+
+ case TemplateName::SubstTemplateTemplateParm: {
+ // Substituted template parameters are mangled as the substituted
+ // template. This will check for the substitution twice, which is
+ // fine, but we have to return early so that we don't try to *add*
+ // the substitution twice.
+ SubstTemplateTemplateParmStorage *subst
+ = TN.getAsSubstTemplateTemplateParm();
+ mangleType(subst->getReplacement());
+ return;
+ }
+
+ case TemplateName::SubstTemplateTemplateParmPack: {
+ // FIXME: not clear how to mangle this!
+ // template <template <class> class T...> class A {
+ // template <template <class> class U...> void foo(B<T,U> x...);
+ // };
+ Out << "_SUBSTPACK_";
+ break;
+ }
+ }
+
+ addSubstitution(TN);
+}
+
+bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
+ StringRef Prefix) {
+ // Only certain other types are valid as prefixes; enumerate them.
+ switch (Ty->getTypeClass()) {
+ case Type::Builtin:
+ case Type::Complex:
+ case Type::Adjusted:
+ case Type::Decayed:
+ case Type::Pointer:
+ case Type::BlockPointer:
+ case Type::LValueReference:
+ case Type::RValueReference:
+ case Type::MemberPointer:
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ case Type::DependentSizedArray:
+ case Type::DependentSizedExtVector:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ case Type::Paren:
+ case Type::Attributed:
+ case Type::Auto:
+ case Type::PackExpansion:
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::ObjCObjectPointer:
+ case Type::Atomic:
+ llvm_unreachable("type is illegal as a nested name specifier");
+
+ case Type::SubstTemplateTypeParmPack:
+ // FIXME: not clear how to mangle this!
+ // template <class T...> class A {
+ // template <class U...> void foo(decltype(T::foo(U())) x...);
+ // };
+ Out << "_SUBSTPACK_";
+ break;
+
+ // <unresolved-type> ::= <template-param>
+ // ::= <decltype>
+ // ::= <template-template-param> <template-args>
+ // (this last is not official yet)
+ case Type::TypeOfExpr:
+ case Type::TypeOf:
+ case Type::Decltype:
+ case Type::TemplateTypeParm:
+ case Type::UnaryTransform:
+ case Type::SubstTemplateTypeParm:
+ unresolvedType:
+ // Some callers want a prefix before the mangled type.
+ Out << Prefix;
+
+ // This seems to do everything we want. It's not really
+ // sanctioned for a substituted template parameter, though.
+ mangleType(Ty);
+
+ // We never want to print 'E' directly after an unresolved-type,
+ // so we return directly.
+ return true;
+
+ case Type::Typedef:
+ mangleSourceName(cast<TypedefType>(Ty)->getDecl()->getIdentifier());
+ break;
+
+ case Type::UnresolvedUsing:
+ mangleSourceName(
+ cast<UnresolvedUsingType>(Ty)->getDecl()->getIdentifier());
+ break;
+
+ case Type::Enum:
+ case Type::Record:
+ mangleSourceName(cast<TagType>(Ty)->getDecl()->getIdentifier());
+ break;
+
+ case Type::TemplateSpecialization: {
+ const TemplateSpecializationType *TST =
+ cast<TemplateSpecializationType>(Ty);
+ TemplateName TN = TST->getTemplateName();
+ switch (TN.getKind()) {
+ case TemplateName::Template:
+ case TemplateName::QualifiedTemplate: {
+ TemplateDecl *TD = TN.getAsTemplateDecl();
+
+ // If the base is a template template parameter, this is an
+ // unresolved type.
+ assert(TD && "no template for template specialization type");
+ if (isa<TemplateTemplateParmDecl>(TD))
+ goto unresolvedType;
+
+ mangleSourceName(TD->getIdentifier());
+ break;
+ }
+
+ case TemplateName::OverloadedTemplate:
+ case TemplateName::DependentTemplate:
+ llvm_unreachable("invalid base for a template specialization type");
+
+ case TemplateName::SubstTemplateTemplateParm: {
+ SubstTemplateTemplateParmStorage *subst =
+ TN.getAsSubstTemplateTemplateParm();
+ mangleExistingSubstitution(subst->getReplacement());
+ break;
+ }
+
+ case TemplateName::SubstTemplateTemplateParmPack: {
+ // FIXME: not clear how to mangle this!
+ // template <template <class U> class T...> class A {
+ // template <class U...> void foo(decltype(T<U>::foo) x...);
+ // };
+ Out << "_SUBSTPACK_";
+ break;
+ }
+ }
+
+ mangleTemplateArgs(TST->getArgs(), TST->getNumArgs());
+ break;
+ }
+
+ case Type::InjectedClassName:
+ mangleSourceName(
+ cast<InjectedClassNameType>(Ty)->getDecl()->getIdentifier());
+ break;
+
+ case Type::DependentName:
+ mangleSourceName(cast<DependentNameType>(Ty)->getIdentifier());
+ break;
+
+ case Type::DependentTemplateSpecialization: {
+ const DependentTemplateSpecializationType *DTST =
+ cast<DependentTemplateSpecializationType>(Ty);
+ mangleSourceName(DTST->getIdentifier());
+ mangleTemplateArgs(DTST->getArgs(), DTST->getNumArgs());
+ break;
+ }
+
+ case Type::Elaborated:
+ return mangleUnresolvedTypeOrSimpleId(
+ cast<ElaboratedType>(Ty)->getNamedType(), Prefix);
+ }
+
+ return false;
+}
+
+void CXXNameMangler::mangleOperatorName(DeclarationName Name, unsigned Arity) {
+ switch (Name.getNameKind()) {
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXUsingDirective:
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCZeroArgSelector:
+ llvm_unreachable("Not an operator name");
+
+ case DeclarationName::CXXConversionFunctionName:
+ // <operator-name> ::= cv <type> # (cast)
+ Out << "cv";
+ mangleType(Name.getCXXNameType());
+ break;
+
+ case DeclarationName::CXXLiteralOperatorName:
+ Out << "li";
+ mangleSourceName(Name.getCXXLiteralIdentifier());
+ return;
+
+ case DeclarationName::CXXOperatorName:
+ mangleOperatorName(Name.getCXXOverloadedOperator(), Arity);
+ break;
+ }
+}
+
+
+
+void
+CXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity) {
+ switch (OO) {
+ // <operator-name> ::= nw # new
+ case OO_New: Out << "nw"; break;
+ // ::= na # new[]
+ case OO_Array_New: Out << "na"; break;
+ // ::= dl # delete
+ case OO_Delete: Out << "dl"; break;
+ // ::= da # delete[]
+ case OO_Array_Delete: Out << "da"; break;
+ // ::= ps # + (unary)
+ // ::= pl # + (binary or unknown)
+ case OO_Plus:
+ Out << (Arity == 1? "ps" : "pl"); break;
+ // ::= ng # - (unary)
+ // ::= mi # - (binary or unknown)
+ case OO_Minus:
+ Out << (Arity == 1? "ng" : "mi"); break;
+ // ::= ad # & (unary)
+ // ::= an # & (binary or unknown)
+ case OO_Amp:
+ Out << (Arity == 1? "ad" : "an"); break;
+ // ::= de # * (unary)
+ // ::= ml # * (binary or unknown)
+ case OO_Star:
+ // Use binary when unknown.
+ Out << (Arity == 1? "de" : "ml"); break;
+ // ::= co # ~
+ case OO_Tilde: Out << "co"; break;
+ // ::= dv # /
+ case OO_Slash: Out << "dv"; break;
+ // ::= rm # %
+ case OO_Percent: Out << "rm"; break;
+ // ::= or # |
+ case OO_Pipe: Out << "or"; break;
+ // ::= eo # ^
+ case OO_Caret: Out << "eo"; break;
+ // ::= aS # =
+ case OO_Equal: Out << "aS"; break;
+ // ::= pL # +=
+ case OO_PlusEqual: Out << "pL"; break;
+ // ::= mI # -=
+ case OO_MinusEqual: Out << "mI"; break;
+ // ::= mL # *=
+ case OO_StarEqual: Out << "mL"; break;
+ // ::= dV # /=
+ case OO_SlashEqual: Out << "dV"; break;
+ // ::= rM # %=
+ case OO_PercentEqual: Out << "rM"; break;
+ // ::= aN # &=
+ case OO_AmpEqual: Out << "aN"; break;
+ // ::= oR # |=
+ case OO_PipeEqual: Out << "oR"; break;
+ // ::= eO # ^=
+ case OO_CaretEqual: Out << "eO"; break;
+ // ::= ls # <<
+ case OO_LessLess: Out << "ls"; break;
+ // ::= rs # >>
+ case OO_GreaterGreater: Out << "rs"; break;
+ // ::= lS # <<=
+ case OO_LessLessEqual: Out << "lS"; break;
+ // ::= rS # >>=
+ case OO_GreaterGreaterEqual: Out << "rS"; break;
+ // ::= eq # ==
+ case OO_EqualEqual: Out << "eq"; break;
+ // ::= ne # !=
+ case OO_ExclaimEqual: Out << "ne"; break;
+ // ::= lt # <
+ case OO_Less: Out << "lt"; break;
+ // ::= gt # >
+ case OO_Greater: Out << "gt"; break;
+ // ::= le # <=
+ case OO_LessEqual: Out << "le"; break;
+ // ::= ge # >=
+ case OO_GreaterEqual: Out << "ge"; break;
+ // ::= nt # !
+ case OO_Exclaim: Out << "nt"; break;
+ // ::= aa # &&
+ case OO_AmpAmp: Out << "aa"; break;
+ // ::= oo # ||
+ case OO_PipePipe: Out << "oo"; break;
+ // ::= pp # ++
+ case OO_PlusPlus: Out << "pp"; break;
+ // ::= mm # --
+ case OO_MinusMinus: Out << "mm"; break;
+ // ::= cm # ,
+ case OO_Comma: Out << "cm"; break;
+ // ::= pm # ->*
+ case OO_ArrowStar: Out << "pm"; break;
+ // ::= pt # ->
+ case OO_Arrow: Out << "pt"; break;
+ // ::= cl # ()
+ case OO_Call: Out << "cl"; break;
+ // ::= ix # []
+ case OO_Subscript: Out << "ix"; break;
+
+ // ::= qu # ?
+ // The conditional operator can't be overloaded, but we still handle it when
+ // mangling expressions.
+ case OO_Conditional: Out << "qu"; break;
+ // Proposal on cxx-abi-dev, 2015-10-21.
+ // ::= aw # co_await
+ case OO_Coawait: Out << "aw"; break;
+
+ case OO_None:
+ case NUM_OVERLOADED_OPERATORS:
+ llvm_unreachable("Not an overloaded operator");
+ }
+}
+
+void CXXNameMangler::mangleQualifiers(Qualifiers Quals) {
+ // <CV-qualifiers> ::= [r] [V] [K] # restrict (C99), volatile, const
+ if (Quals.hasRestrict())
+ Out << 'r';
+ if (Quals.hasVolatile())
+ Out << 'V';
+ if (Quals.hasConst())
+ Out << 'K';
+
+ if (Quals.hasAddressSpace()) {
+ // Address space extension:
+ //
+ // <type> ::= U <target-addrspace>
+ // <type> ::= U <OpenCL-addrspace>
+ // <type> ::= U <CUDA-addrspace>
+
+ SmallString<64> ASString;
+ unsigned AS = Quals.getAddressSpace();
+
+ if (Context.getASTContext().addressSpaceMapManglingFor(AS)) {
+ // <target-addrspace> ::= "AS" <address-space-number>
+ unsigned TargetAS = Context.getASTContext().getTargetAddressSpace(AS);
+ ASString = "AS" + llvm::utostr_32(TargetAS);
+ } else {
+ switch (AS) {
+ default: llvm_unreachable("Not a language specific address space");
+ // <OpenCL-addrspace> ::= "CL" [ "global" | "local" | "constant" ]
+ case LangAS::opencl_global: ASString = "CLglobal"; break;
+ case LangAS::opencl_local: ASString = "CLlocal"; break;
+ case LangAS::opencl_constant: ASString = "CLconstant"; break;
+ // <CUDA-addrspace> ::= "CU" [ "device" | "constant" | "shared" ]
+ case LangAS::cuda_device: ASString = "CUdevice"; break;
+ case LangAS::cuda_constant: ASString = "CUconstant"; break;
+ case LangAS::cuda_shared: ASString = "CUshared"; break;
+ }
+ }
+ Out << 'U' << ASString.size() << ASString;
+ }
+
+ StringRef LifetimeName;
+ switch (Quals.getObjCLifetime()) {
+ // Objective-C ARC Extension:
+ //
+ // <type> ::= U "__strong"
+ // <type> ::= U "__weak"
+ // <type> ::= U "__autoreleasing"
+ case Qualifiers::OCL_None:
+ break;
+
+ case Qualifiers::OCL_Weak:
+ LifetimeName = "__weak";
+ break;
+
+ case Qualifiers::OCL_Strong:
+ LifetimeName = "__strong";
+ break;
+
+ case Qualifiers::OCL_Autoreleasing:
+ LifetimeName = "__autoreleasing";
+ break;
+
+ case Qualifiers::OCL_ExplicitNone:
+ // The __unsafe_unretained qualifier is *not* mangled, so that
+ // __unsafe_unretained types in ARC produce the same manglings as the
+ // equivalent (but, naturally, unqualified) types in non-ARC, providing
+ // better ABI compatibility.
+ //
+ // It's safe to do this because unqualified 'id' won't show up
+ // in any type signatures that need to be mangled.
+ break;
+ }
+ if (!LifetimeName.empty())
+ Out << 'U' << LifetimeName.size() << LifetimeName;
+}
+
+void CXXNameMangler::mangleRefQualifier(RefQualifierKind RefQualifier) {
+ // <ref-qualifier> ::= R # lvalue reference
+ // ::= O # rvalue-reference
+ switch (RefQualifier) {
+ case RQ_None:
+ break;
+
+ case RQ_LValue:
+ Out << 'R';
+ break;
+
+ case RQ_RValue:
+ Out << 'O';
+ break;
+ }
+}
+
+void CXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
+ Context.mangleObjCMethodName(MD, Out);
+}
+
+static bool isTypeSubstitutable(Qualifiers Quals, const Type *Ty) {
+ if (Quals)
+ return true;
+ if (Ty->isSpecificBuiltinType(BuiltinType::ObjCSel))
+ return true;
+ if (Ty->isOpenCLSpecificType())
+ return true;
+ if (Ty->isBuiltinType())
+ return false;
+
+ return true;
+}
+
+void CXXNameMangler::mangleType(QualType T) {
+ // If our type is instantiation-dependent but not dependent, we mangle
+ // it as it was written in the source, removing any top-level sugar.
+ // Otherwise, use the canonical type.
+ //
+ // FIXME: This is an approximation of the instantiation-dependent name
+ // mangling rules, since we should really be using the type as written and
+ // augmented via semantic analysis (i.e., with implicit conversions and
+ // default template arguments) for any instantiation-dependent type.
+ // Unfortunately, that requires several changes to our AST:
+ // - Instantiation-dependent TemplateSpecializationTypes will need to be
+ // uniqued, so that we can handle substitutions properly
+ // - Default template arguments will need to be represented in the
+ // TemplateSpecializationType, since they need to be mangled even though
+ // they aren't written.
+ // - Conversions on non-type template arguments need to be expressed, since
+ // they can affect the mangling of sizeof/alignof.
+ if (!T->isInstantiationDependentType() || T->isDependentType())
+ T = T.getCanonicalType();
+ else {
+ // Desugar any types that are purely sugar.
+ do {
+ // Don't desugar through template specialization types that aren't
+ // type aliases. We need to mangle the template arguments as written.
+ if (const TemplateSpecializationType *TST
+ = dyn_cast<TemplateSpecializationType>(T))
+ if (!TST->isTypeAlias())
+ break;
+
+ QualType Desugared
+ = T.getSingleStepDesugaredType(Context.getASTContext());
+ if (Desugared == T)
+ break;
+
+ T = Desugared;
+ } while (true);
+ }
+ SplitQualType split = T.split();
+ Qualifiers quals = split.Quals;
+ const Type *ty = split.Ty;
+
+ bool isSubstitutable = isTypeSubstitutable(quals, ty);
+ if (isSubstitutable && mangleSubstitution(T))
+ return;
+
+ // If we're mangling a qualified array type, push the qualifiers to
+ // the element type.
+ if (quals && isa<ArrayType>(T)) {
+ ty = Context.getASTContext().getAsArrayType(T);
+ quals = Qualifiers();
+
+ // Note that we don't update T: we want to add the
+ // substitution at the original type.
+ }
+
+ if (quals) {
+ mangleQualifiers(quals);
+ // Recurse: even if the qualified type isn't yet substitutable,
+ // the unqualified type might be.
+ mangleType(QualType(ty, 0));
+ } else {
+ switch (ty->getTypeClass()) {
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define NON_CANONICAL_TYPE(CLASS, PARENT) \
+ case Type::CLASS: \
+ llvm_unreachable("can't mangle non-canonical type " #CLASS "Type"); \
+ return;
+#define TYPE(CLASS, PARENT) \
+ case Type::CLASS: \
+ mangleType(static_cast<const CLASS##Type*>(ty)); \
+ break;
+#include "clang/AST/TypeNodes.def"
+ }
+ }
+
+ // Add the substitution.
+ if (isSubstitutable)
+ addSubstitution(T);
+}
+
+void CXXNameMangler::mangleNameOrStandardSubstitution(const NamedDecl *ND) {
+ if (!mangleStandardSubstitution(ND))
+ mangleName(ND);
+}
+
+void CXXNameMangler::mangleType(const BuiltinType *T) {
+ // <type> ::= <builtin-type>
+ // <builtin-type> ::= v # void
+ // ::= w # wchar_t
+ // ::= b # bool
+ // ::= c # char
+ // ::= a # signed char
+ // ::= h # unsigned char
+ // ::= s # short
+ // ::= t # unsigned short
+ // ::= i # int
+ // ::= j # unsigned int
+ // ::= l # long
+ // ::= m # unsigned long
+ // ::= x # long long, __int64
+ // ::= y # unsigned long long, __int64
+ // ::= n # __int128
+ // ::= o # unsigned __int128
+ // ::= f # float
+ // ::= d # double
+ // ::= e # long double, __float80
+ // UNSUPPORTED: ::= g # __float128
+ // UNSUPPORTED: ::= Dd # IEEE 754r decimal floating point (64 bits)
+ // UNSUPPORTED: ::= De # IEEE 754r decimal floating point (128 bits)
+ // UNSUPPORTED: ::= Df # IEEE 754r decimal floating point (32 bits)
+ // ::= Dh # IEEE 754r half-precision floating point (16 bits)
+ // ::= Di # char32_t
+ // ::= Ds # char16_t
+ // ::= Dn # std::nullptr_t (i.e., decltype(nullptr))
+ // ::= u <source-name> # vendor extended type
+ switch (T->getKind()) {
+ case BuiltinType::Void:
+ Out << 'v';
+ break;
+ case BuiltinType::Bool:
+ Out << 'b';
+ break;
+ case BuiltinType::Char_U:
+ case BuiltinType::Char_S:
+ Out << 'c';
+ break;
+ case BuiltinType::UChar:
+ Out << 'h';
+ break;
+ case BuiltinType::UShort:
+ Out << 't';
+ break;
+ case BuiltinType::UInt:
+ Out << 'j';
+ break;
+ case BuiltinType::ULong:
+ Out << 'm';
+ break;
+ case BuiltinType::ULongLong:
+ Out << 'y';
+ break;
+ case BuiltinType::UInt128:
+ Out << 'o';
+ break;
+ case BuiltinType::SChar:
+ Out << 'a';
+ break;
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ Out << 'w';
+ break;
+ case BuiltinType::Char16:
+ Out << "Ds";
+ break;
+ case BuiltinType::Char32:
+ Out << "Di";
+ break;
+ case BuiltinType::Short:
+ Out << 's';
+ break;
+ case BuiltinType::Int:
+ Out << 'i';
+ break;
+ case BuiltinType::Long:
+ Out << 'l';
+ break;
+ case BuiltinType::LongLong:
+ Out << 'x';
+ break;
+ case BuiltinType::Int128:
+ Out << 'n';
+ break;
+ case BuiltinType::Half:
+ Out << "Dh";
+ break;
+ case BuiltinType::Float:
+ Out << 'f';
+ break;
+ case BuiltinType::Double:
+ Out << 'd';
+ break;
+ case BuiltinType::LongDouble:
+ Out << (getASTContext().getTargetInfo().useFloat128ManglingForLongDouble()
+ ? 'g'
+ : 'e');
+ break;
+ case BuiltinType::NullPtr:
+ Out << "Dn";
+ break;
+
+#define BUILTIN_TYPE(Id, SingletonId)
+#define PLACEHOLDER_TYPE(Id, SingletonId) \
+ case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
+ case BuiltinType::Dependent:
+ llvm_unreachable("mangling a placeholder type");
+ case BuiltinType::ObjCId:
+ Out << "11objc_object";
+ break;
+ case BuiltinType::ObjCClass:
+ Out << "10objc_class";
+ break;
+ case BuiltinType::ObjCSel:
+ Out << "13objc_selector";
+ break;
+ case BuiltinType::OCLImage1d:
+ Out << "11ocl_image1d";
+ break;
+ case BuiltinType::OCLImage1dArray:
+ Out << "16ocl_image1darray";
+ break;
+ case BuiltinType::OCLImage1dBuffer:
+ Out << "17ocl_image1dbuffer";
+ break;
+ case BuiltinType::OCLImage2d:
+ Out << "11ocl_image2d";
+ break;
+ case BuiltinType::OCLImage2dArray:
+ Out << "16ocl_image2darray";
+ break;
+ case BuiltinType::OCLImage2dDepth:
+ Out << "16ocl_image2ddepth";
+ break;
+ case BuiltinType::OCLImage2dArrayDepth:
+ Out << "21ocl_image2darraydepth";
+ break;
+ case BuiltinType::OCLImage2dMSAA:
+ Out << "15ocl_image2dmsaa";
+ break;
+ case BuiltinType::OCLImage2dArrayMSAA:
+ Out << "20ocl_image2darraymsaa";
+ break;
+ case BuiltinType::OCLImage2dMSAADepth:
+ Out << "20ocl_image2dmsaadepth";
+ break;
+ case BuiltinType::OCLImage2dArrayMSAADepth:
+ Out << "35ocl_image2darraymsaadepth";
+ break;
+ case BuiltinType::OCLImage3d:
+ Out << "11ocl_image3d";
+ break;
+ case BuiltinType::OCLSampler:
+ Out << "11ocl_sampler";
+ break;
+ case BuiltinType::OCLEvent:
+ Out << "9ocl_event";
+ break;
+ case BuiltinType::OCLClkEvent:
+ Out << "12ocl_clkevent";
+ break;
+ case BuiltinType::OCLQueue:
+ Out << "9ocl_queue";
+ break;
+ case BuiltinType::OCLNDRange:
+ Out << "11ocl_ndrange";
+ break;
+ case BuiltinType::OCLReserveID:
+ Out << "13ocl_reserveid";
+ break;
+ }
+}
+
+// <type> ::= <function-type>
+// <function-type> ::= [<CV-qualifiers>] F [Y]
+// <bare-function-type> [<ref-qualifier>] E
+void CXXNameMangler::mangleType(const FunctionProtoType *T) {
+ // Mangle CV-qualifiers, if present. These are 'this' qualifiers,
+ // e.g. "const" in "int (A::*)() const".
+ mangleQualifiers(Qualifiers::fromCVRMask(T->getTypeQuals()));
+
+ Out << 'F';
+
+ // FIXME: We don't have enough information in the AST to produce the 'Y'
+ // encoding for extern "C" function types.
+ mangleBareFunctionType(T, /*MangleReturnType=*/true);
+
+ // Mangle the ref-qualifier, if present.
+ mangleRefQualifier(T->getRefQualifier());
+
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleType(const FunctionNoProtoType *T) {
+ // Function types without prototypes can arise when mangling a function type
+ // within an overloadable function in C. We mangle these as the absence of any
+ // parameter types (not even an empty parameter list).
+ Out << 'F';
+
+ FunctionTypeDepthState saved = FunctionTypeDepth.push();
+
+ FunctionTypeDepth.enterResultType();
+ mangleType(T->getReturnType());
+ FunctionTypeDepth.leaveResultType();
+
+ FunctionTypeDepth.pop(saved);
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleBareFunctionType(const FunctionType *T,
+ bool MangleReturnType,
+ const FunctionDecl *FD) {
+ // We should never be mangling something without a prototype.
+ const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
+
+ // Record that we're in a function type. See mangleFunctionParam
+ // for details on what we're trying to achieve here.
+ FunctionTypeDepthState saved = FunctionTypeDepth.push();
+
+ // <bare-function-type> ::= <signature type>+
+ if (MangleReturnType) {
+ FunctionTypeDepth.enterResultType();
+ mangleType(Proto->getReturnType());
+ FunctionTypeDepth.leaveResultType();
+ }
+
+ if (Proto->getNumParams() == 0 && !Proto->isVariadic()) {
+ // <builtin-type> ::= v # void
+ Out << 'v';
+
+ FunctionTypeDepth.pop(saved);
+ return;
+ }
+
+ assert(!FD || FD->getNumParams() == Proto->getNumParams());
+ for (unsigned I = 0, E = Proto->getNumParams(); I != E; ++I) {
+ const auto &ParamTy = Proto->getParamType(I);
+ mangleType(Context.getASTContext().getSignatureParameterType(ParamTy));
+
+ if (FD) {
+ if (auto *Attr = FD->getParamDecl(I)->getAttr<PassObjectSizeAttr>()) {
+ // Attr can only take 1 character, so we can hardcode the length below.
+ assert(Attr->getType() <= 9 && Attr->getType() >= 0);
+ Out << "U17pass_object_size" << Attr->getType();
+ }
+ }
+ }
+
+ FunctionTypeDepth.pop(saved);
+
+ // <builtin-type> ::= z # ellipsis
+ if (Proto->isVariadic())
+ Out << 'z';
+}
+
+// <type> ::= <class-enum-type>
+// <class-enum-type> ::= <name>
+void CXXNameMangler::mangleType(const UnresolvedUsingType *T) {
+ mangleName(T->getDecl());
+}
+
+// <type> ::= <class-enum-type>
+// <class-enum-type> ::= <name>
+void CXXNameMangler::mangleType(const EnumType *T) {
+ mangleType(static_cast<const TagType*>(T));
+}
+void CXXNameMangler::mangleType(const RecordType *T) {
+ mangleType(static_cast<const TagType*>(T));
+}
+void CXXNameMangler::mangleType(const TagType *T) {
+ mangleName(T->getDecl());
+}
+
+// <type> ::= <array-type>
+// <array-type> ::= A <positive dimension number> _ <element type>
+// ::= A [<dimension expression>] _ <element type>
+void CXXNameMangler::mangleType(const ConstantArrayType *T) {
+ Out << 'A' << T->getSize() << '_';
+ mangleType(T->getElementType());
+}
+void CXXNameMangler::mangleType(const VariableArrayType *T) {
+ Out << 'A';
+ // decayed vla types (size 0) will just be skipped.
+ if (T->getSizeExpr())
+ mangleExpression(T->getSizeExpr());
+ Out << '_';
+ mangleType(T->getElementType());
+}
+void CXXNameMangler::mangleType(const DependentSizedArrayType *T) {
+ Out << 'A';
+ mangleExpression(T->getSizeExpr());
+ Out << '_';
+ mangleType(T->getElementType());
+}
+void CXXNameMangler::mangleType(const IncompleteArrayType *T) {
+ Out << "A_";
+ mangleType(T->getElementType());
+}
+
+// <type> ::= <pointer-to-member-type>
+// <pointer-to-member-type> ::= M <class type> <member type>
+void CXXNameMangler::mangleType(const MemberPointerType *T) {
+ Out << 'M';
+ mangleType(QualType(T->getClass(), 0));
+ QualType PointeeType = T->getPointeeType();
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) {
+ mangleType(FPT);
+
+ // Itanium C++ ABI 5.1.8:
+ //
+ // The type of a non-static member function is considered to be different,
+ // for the purposes of substitution, from the type of a namespace-scope or
+ // static member function whose type appears similar. The types of two
+ // non-static member functions are considered to be different, for the
+ // purposes of substitution, if the functions are members of different
+ // classes. In other words, for the purposes of substitution, the class of
+ // which the function is a member is considered part of the type of
+ // function.
+
+ // Given that we already substitute member function pointers as a
+ // whole, the net effect of this rule is just to unconditionally
+ // suppress substitution on the function type in a member pointer.
+ // We increment the SeqID here to emulate adding an entry to the
+ // substitution table.
+ ++SeqID;
+ } else
+ mangleType(PointeeType);
+}
+
+// <type> ::= <template-param>
+void CXXNameMangler::mangleType(const TemplateTypeParmType *T) {
+ mangleTemplateParameter(T->getIndex());
+}
+
+// <type> ::= <template-param>
+void CXXNameMangler::mangleType(const SubstTemplateTypeParmPackType *T) {
+ // FIXME: not clear how to mangle this!
+ // template <class T...> class A {
+ // template <class U...> void foo(T(*)(U) x...);
+ // };
+ Out << "_SUBSTPACK_";
+}
+
+// <type> ::= P <type> # pointer-to
+void CXXNameMangler::mangleType(const PointerType *T) {
+ Out << 'P';
+ mangleType(T->getPointeeType());
+}
+void CXXNameMangler::mangleType(const ObjCObjectPointerType *T) {
+ Out << 'P';
+ mangleType(T->getPointeeType());
+}
+
+// <type> ::= R <type> # reference-to
+void CXXNameMangler::mangleType(const LValueReferenceType *T) {
+ Out << 'R';
+ mangleType(T->getPointeeType());
+}
+
+// <type> ::= O <type> # rvalue reference-to (C++0x)
+void CXXNameMangler::mangleType(const RValueReferenceType *T) {
+ Out << 'O';
+ mangleType(T->getPointeeType());
+}
+
+// <type> ::= C <type> # complex pair (C 2000)
+void CXXNameMangler::mangleType(const ComplexType *T) {
+ Out << 'C';
+ mangleType(T->getElementType());
+}
+
+// ARM's ABI for Neon vector types specifies that they should be mangled as
+// if they are structs (to match ARM's initial implementation). The
+// vector type must be one of the special types predefined by ARM.
+void CXXNameMangler::mangleNeonVectorType(const VectorType *T) {
+ QualType EltType = T->getElementType();
+ assert(EltType->isBuiltinType() && "Neon vector element not a BuiltinType");
+ const char *EltName = nullptr;
+ if (T->getVectorKind() == VectorType::NeonPolyVector) {
+ switch (cast<BuiltinType>(EltType)->getKind()) {
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ EltName = "poly8_t";
+ break;
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ EltName = "poly16_t";
+ break;
+ case BuiltinType::ULongLong:
+ EltName = "poly64_t";
+ break;
+ default: llvm_unreachable("unexpected Neon polynomial vector element type");
+ }
+ } else {
+ switch (cast<BuiltinType>(EltType)->getKind()) {
+ case BuiltinType::SChar: EltName = "int8_t"; break;
+ case BuiltinType::UChar: EltName = "uint8_t"; break;
+ case BuiltinType::Short: EltName = "int16_t"; break;
+ case BuiltinType::UShort: EltName = "uint16_t"; break;
+ case BuiltinType::Int: EltName = "int32_t"; break;
+ case BuiltinType::UInt: EltName = "uint32_t"; break;
+ case BuiltinType::LongLong: EltName = "int64_t"; break;
+ case BuiltinType::ULongLong: EltName = "uint64_t"; break;
+ case BuiltinType::Double: EltName = "float64_t"; break;
+ case BuiltinType::Float: EltName = "float32_t"; break;
+ case BuiltinType::Half: EltName = "float16_t";break;
+ default:
+ llvm_unreachable("unexpected Neon vector element type");
+ }
+ }
+ const char *BaseName = nullptr;
+ unsigned BitSize = (T->getNumElements() *
+ getASTContext().getTypeSize(EltType));
+ if (BitSize == 64)
+ BaseName = "__simd64_";
+ else {
+ assert(BitSize == 128 && "Neon vector type not 64 or 128 bits");
+ BaseName = "__simd128_";
+ }
+ Out << strlen(BaseName) + strlen(EltName);
+ Out << BaseName << EltName;
+}
+
+static StringRef mangleAArch64VectorBase(const BuiltinType *EltType) {
+ switch (EltType->getKind()) {
+ case BuiltinType::SChar:
+ return "Int8";
+ case BuiltinType::Short:
+ return "Int16";
+ case BuiltinType::Int:
+ return "Int32";
+ case BuiltinType::Long:
+ case BuiltinType::LongLong:
+ return "Int64";
+ case BuiltinType::UChar:
+ return "Uint8";
+ case BuiltinType::UShort:
+ return "Uint16";
+ case BuiltinType::UInt:
+ return "Uint32";
+ case BuiltinType::ULong:
+ case BuiltinType::ULongLong:
+ return "Uint64";
+ case BuiltinType::Half:
+ return "Float16";
+ case BuiltinType::Float:
+ return "Float32";
+ case BuiltinType::Double:
+ return "Float64";
+ default:
+ llvm_unreachable("Unexpected vector element base type");
+ }
+}
+
+// AArch64's ABI for Neon vector types specifies that they should be mangled as
+// the equivalent internal name. The vector type must be one of the special
+// types predefined by ARM.
+void CXXNameMangler::mangleAArch64NeonVectorType(const VectorType *T) {
+ QualType EltType = T->getElementType();
+ assert(EltType->isBuiltinType() && "Neon vector element not a BuiltinType");
+ unsigned BitSize =
+ (T->getNumElements() * getASTContext().getTypeSize(EltType));
+ (void)BitSize; // Silence warning.
+
+ assert((BitSize == 64 || BitSize == 128) &&
+ "Neon vector type not 64 or 128 bits");
+
+ StringRef EltName;
+ if (T->getVectorKind() == VectorType::NeonPolyVector) {
+ switch (cast<BuiltinType>(EltType)->getKind()) {
+ case BuiltinType::UChar:
+ EltName = "Poly8";
+ break;
+ case BuiltinType::UShort:
+ EltName = "Poly16";
+ break;
+ case BuiltinType::ULong:
+ case BuiltinType::ULongLong:
+ EltName = "Poly64";
+ break;
+ default:
+ llvm_unreachable("unexpected Neon polynomial vector element type");
+ }
+ } else
+ EltName = mangleAArch64VectorBase(cast<BuiltinType>(EltType));
+
+ std::string TypeName =
+ ("__" + EltName + "x" + Twine(T->getNumElements()) + "_t").str();
+ Out << TypeName.length() << TypeName;
+}
+
+// GNU extension: vector types
+// <type> ::= <vector-type>
+// <vector-type> ::= Dv <positive dimension number> _
+// <extended element type>
+// ::= Dv [<dimension expression>] _ <element type>
+// <extended element type> ::= <element type>
+// ::= p # AltiVec vector pixel
+// ::= b # Altivec vector bool
+void CXXNameMangler::mangleType(const VectorType *T) {
+ if ((T->getVectorKind() == VectorType::NeonVector ||
+ T->getVectorKind() == VectorType::NeonPolyVector)) {
+ llvm::Triple Target = getASTContext().getTargetInfo().getTriple();
+ llvm::Triple::ArchType Arch =
+ getASTContext().getTargetInfo().getTriple().getArch();
+ if ((Arch == llvm::Triple::aarch64 ||
+ Arch == llvm::Triple::aarch64_be) && !Target.isOSDarwin())
+ mangleAArch64NeonVectorType(T);
+ else
+ mangleNeonVectorType(T);
+ return;
+ }
+ Out << "Dv" << T->getNumElements() << '_';
+ if (T->getVectorKind() == VectorType::AltiVecPixel)
+ Out << 'p';
+ else if (T->getVectorKind() == VectorType::AltiVecBool)
+ Out << 'b';
+ else
+ mangleType(T->getElementType());
+}
+void CXXNameMangler::mangleType(const ExtVectorType *T) {
+ mangleType(static_cast<const VectorType*>(T));
+}
+void CXXNameMangler::mangleType(const DependentSizedExtVectorType *T) {
+ Out << "Dv";
+ mangleExpression(T->getSizeExpr());
+ Out << '_';
+ mangleType(T->getElementType());
+}
+
+void CXXNameMangler::mangleType(const PackExpansionType *T) {
+ // <type> ::= Dp <type> # pack expansion (C++0x)
+ Out << "Dp";
+ mangleType(T->getPattern());
+}
+
+void CXXNameMangler::mangleType(const ObjCInterfaceType *T) {
+ mangleSourceName(T->getDecl()->getIdentifier());
+}
+
+void CXXNameMangler::mangleType(const ObjCObjectType *T) {
+ // Treat __kindof as a vendor extended type qualifier.
+ if (T->isKindOfType())
+ Out << "U8__kindof";
+
+ if (!T->qual_empty()) {
+ // Mangle protocol qualifiers.
+ SmallString<64> QualStr;
+ llvm::raw_svector_ostream QualOS(QualStr);
+ QualOS << "objcproto";
+ for (const auto *I : T->quals()) {
+ StringRef name = I->getName();
+ QualOS << name.size() << name;
+ }
+ Out << 'U' << QualStr.size() << QualStr;
+ }
+
+ mangleType(T->getBaseType());
+
+ if (T->isSpecialized()) {
+ // Mangle type arguments as I <type>+ E
+ Out << 'I';
+ for (auto typeArg : T->getTypeArgs())
+ mangleType(typeArg);
+ Out << 'E';
+ }
+}
+
+void CXXNameMangler::mangleType(const BlockPointerType *T) {
+ Out << "U13block_pointer";
+ mangleType(T->getPointeeType());
+}
+
+void CXXNameMangler::mangleType(const InjectedClassNameType *T) {
+ // Mangle injected class name types as if the user had written the
+ // specialization out fully. It may not actually be possible to see
+ // this mangling, though.
+ mangleType(T->getInjectedSpecializationType());
+}
+
+void CXXNameMangler::mangleType(const TemplateSpecializationType *T) {
+ if (TemplateDecl *TD = T->getTemplateName().getAsTemplateDecl()) {
+ mangleName(TD, T->getArgs(), T->getNumArgs());
+ } else {
+ if (mangleSubstitution(QualType(T, 0)))
+ return;
+
+ mangleTemplatePrefix(T->getTemplateName());
+
+ // FIXME: GCC does not appear to mangle the template arguments when
+ // the template in question is a dependent template name. Should we
+ // emulate that badness?
+ mangleTemplateArgs(T->getArgs(), T->getNumArgs());
+ addSubstitution(QualType(T, 0));
+ }
+}
+
+void CXXNameMangler::mangleType(const DependentNameType *T) {
+ // Proposal by cxx-abi-dev, 2014-03-26
+ // <class-enum-type> ::= <name> # non-dependent or dependent type name or
+ // # dependent elaborated type specifier using
+ // # 'typename'
+ // ::= Ts <name> # dependent elaborated type specifier using
+ // # 'struct' or 'class'
+ // ::= Tu <name> # dependent elaborated type specifier using
+ // # 'union'
+ // ::= Te <name> # dependent elaborated type specifier using
+ // # 'enum'
+ switch (T->getKeyword()) {
+ case ETK_Typename:
+ break;
+ case ETK_Struct:
+ case ETK_Class:
+ case ETK_Interface:
+ Out << "Ts";
+ break;
+ case ETK_Union:
+ Out << "Tu";
+ break;
+ case ETK_Enum:
+ Out << "Te";
+ break;
+ default:
+ llvm_unreachable("unexpected keyword for dependent type name");
+ }
+ // Typename types are always nested
+ Out << 'N';
+ manglePrefix(T->getQualifier());
+ mangleSourceName(T->getIdentifier());
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleType(const DependentTemplateSpecializationType *T) {
+ // Dependently-scoped template types are nested if they have a prefix.
+ Out << 'N';
+
+ // TODO: avoid making this TemplateName.
+ TemplateName Prefix =
+ getASTContext().getDependentTemplateName(T->getQualifier(),
+ T->getIdentifier());
+ mangleTemplatePrefix(Prefix);
+
+ // FIXME: GCC does not appear to mangle the template arguments when
+ // the template in question is a dependent template name. Should we
+ // emulate that badness?
+ mangleTemplateArgs(T->getArgs(), T->getNumArgs());
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleType(const TypeOfType *T) {
+ // FIXME: this is pretty unsatisfactory, but there isn't an obvious
+ // "extension with parameters" mangling.
+ Out << "u6typeof";
+}
+
+void CXXNameMangler::mangleType(const TypeOfExprType *T) {
+ // FIXME: this is pretty unsatisfactory, but there isn't an obvious
+ // "extension with parameters" mangling.
+ Out << "u6typeof";
+}
+
+void CXXNameMangler::mangleType(const DecltypeType *T) {
+ Expr *E = T->getUnderlyingExpr();
+
+ // type ::= Dt <expression> E # decltype of an id-expression
+ // # or class member access
+ // ::= DT <expression> E # decltype of an expression
+
+ // This purports to be an exhaustive list of id-expressions and
+ // class member accesses. Note that we do not ignore parentheses;
+ // parentheses change the semantics of decltype for these
+ // expressions (and cause the mangler to use the other form).
+ if (isa<DeclRefExpr>(E) ||
+ isa<MemberExpr>(E) ||
+ isa<UnresolvedLookupExpr>(E) ||
+ isa<DependentScopeDeclRefExpr>(E) ||
+ isa<CXXDependentScopeMemberExpr>(E) ||
+ isa<UnresolvedMemberExpr>(E))
+ Out << "Dt";
+ else
+ Out << "DT";
+ mangleExpression(E);
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleType(const UnaryTransformType *T) {
+ // If this is dependent, we need to record that. If not, we simply
+ // mangle it as the underlying type since they are equivalent.
+ if (T->isDependentType()) {
+ Out << 'U';
+
+ switch (T->getUTTKind()) {
+ case UnaryTransformType::EnumUnderlyingType:
+ Out << "3eut";
+ break;
+ }
+ }
+
+ mangleType(T->getUnderlyingType());
+}
+
+void CXXNameMangler::mangleType(const AutoType *T) {
+ QualType D = T->getDeducedType();
+ // <builtin-type> ::= Da # dependent auto
+ if (D.isNull()) {
+ assert(T->getKeyword() != AutoTypeKeyword::GNUAutoType &&
+ "shouldn't need to mangle __auto_type!");
+ Out << (T->isDecltypeAuto() ? "Dc" : "Da");
+ } else
+ mangleType(D);
+}
+
+void CXXNameMangler::mangleType(const AtomicType *T) {
+ // <type> ::= U <source-name> <type> # vendor extended type qualifier
+ // (Until there's a standardized mangling...)
+ Out << "U7_Atomic";
+ mangleType(T->getValueType());
+}
+
+void CXXNameMangler::mangleIntegerLiteral(QualType T,
+ const llvm::APSInt &Value) {
+ // <expr-primary> ::= L <type> <value number> E # integer literal
+ Out << 'L';
+
+ mangleType(T);
+ if (T->isBooleanType()) {
+ // Boolean values are encoded as 0/1.
+ Out << (Value.getBoolValue() ? '1' : '0');
+ } else {
+ mangleNumber(Value);
+ }
+ Out << 'E';
+
+}
+
+void CXXNameMangler::mangleMemberExprBase(const Expr *Base, bool IsArrow) {
+ // Ignore member expressions involving anonymous unions.
+ while (const auto *RT = Base->getType()->getAs<RecordType>()) {
+ if (!RT->getDecl()->isAnonymousStructOrUnion())
+ break;
+ const auto *ME = dyn_cast<MemberExpr>(Base);
+ if (!ME)
+ break;
+ Base = ME->getBase();
+ IsArrow = ME->isArrow();
+ }
+
+ if (Base->isImplicitCXXThis()) {
+ // Note: GCC mangles member expressions to the implicit 'this' as
+ // *this., whereas we represent them as this->. The Itanium C++ ABI
+ // does not specify anything here, so we follow GCC.
+ Out << "dtdefpT";
+ } else {
+ Out << (IsArrow ? "pt" : "dt");
+ mangleExpression(Base);
+ }
+}
+
+/// Mangles a member expression.
+void CXXNameMangler::mangleMemberExpr(const Expr *base,
+ bool isArrow,
+ NestedNameSpecifier *qualifier,
+ NamedDecl *firstQualifierLookup,
+ DeclarationName member,
+ unsigned arity) {
+ // <expression> ::= dt <expression> <unresolved-name>
+ // ::= pt <expression> <unresolved-name>
+ if (base)
+ mangleMemberExprBase(base, isArrow);
+ mangleUnresolvedName(qualifier, member, arity);
+}
+
+/// Look at the callee of the given call expression and determine if
+/// it's a parenthesized id-expression which would have triggered ADL
+/// otherwise.
+static bool isParenthesizedADLCallee(const CallExpr *call) {
+ const Expr *callee = call->getCallee();
+ const Expr *fn = callee->IgnoreParens();
+
+ // Must be parenthesized. IgnoreParens() skips __extension__ nodes,
+ // too, but for those to appear in the callee, it would have to be
+ // parenthesized.
+ if (callee == fn) return false;
+
+ // Must be an unresolved lookup.
+ const UnresolvedLookupExpr *lookup = dyn_cast<UnresolvedLookupExpr>(fn);
+ if (!lookup) return false;
+
+ assert(!lookup->requiresADL());
+
+ // Must be an unqualified lookup.
+ if (lookup->getQualifier()) return false;
+
+ // Must not have found a class member. Note that if one is a class
+ // member, they're all class members.
+ if (lookup->getNumDecls() > 0 &&
+ (*lookup->decls_begin())->isCXXClassMember())
+ return false;
+
+ // Otherwise, ADL would have been triggered.
+ return true;
+}
+
+void CXXNameMangler::mangleCastExpression(const Expr *E, StringRef CastEncoding) {
+ const ExplicitCastExpr *ECE = cast<ExplicitCastExpr>(E);
+ Out << CastEncoding;
+ mangleType(ECE->getType());
+ mangleExpression(ECE->getSubExpr());
+}
+
+void CXXNameMangler::mangleInitListElements(const InitListExpr *InitList) {
+ if (auto *Syntactic = InitList->getSyntacticForm())
+ InitList = Syntactic;
+ for (unsigned i = 0, e = InitList->getNumInits(); i != e; ++i)
+ mangleExpression(InitList->getInit(i));
+}
+
+void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) {
+ // <expression> ::= <unary operator-name> <expression>
+ // ::= <binary operator-name> <expression> <expression>
+ // ::= <trinary operator-name> <expression> <expression> <expression>
+ // ::= cv <type> expression # conversion with one argument
+ // ::= cv <type> _ <expression>* E # conversion with a different number of arguments
+ // ::= dc <type> <expression> # dynamic_cast<type> (expression)
+ // ::= sc <type> <expression> # static_cast<type> (expression)
+ // ::= cc <type> <expression> # const_cast<type> (expression)
+ // ::= rc <type> <expression> # reinterpret_cast<type> (expression)
+ // ::= st <type> # sizeof (a type)
+ // ::= at <type> # alignof (a type)
+ // ::= <template-param>
+ // ::= <function-param>
+ // ::= sr <type> <unqualified-name> # dependent name
+ // ::= sr <type> <unqualified-name> <template-args> # dependent template-id
+ // ::= ds <expression> <expression> # expr.*expr
+ // ::= sZ <template-param> # size of a parameter pack
+ // ::= sZ <function-param> # size of a function parameter pack
+ // ::= <expr-primary>
+ // <expr-primary> ::= L <type> <value number> E # integer literal
+ // ::= L <type <value float> E # floating literal
+ // ::= L <mangled-name> E # external name
+ // ::= fpT # 'this' expression
+ QualType ImplicitlyConvertedToType;
+
+recurse:
+ switch (E->getStmtClass()) {
+ case Expr::NoStmtClass:
+#define ABSTRACT_STMT(Type)
+#define EXPR(Type, Base)
+#define STMT(Type, Base) \
+ case Expr::Type##Class:
+#include "clang/AST/StmtNodes.inc"
+ // fallthrough
+
+ // These all can only appear in local or variable-initialization
+ // contexts and so should never appear in a mangling.
+ case Expr::AddrLabelExprClass:
+ case Expr::DesignatedInitUpdateExprClass:
+ case Expr::ImplicitValueInitExprClass:
+ case Expr::NoInitExprClass:
+ case Expr::ParenListExprClass:
+ case Expr::LambdaExprClass:
+ case Expr::MSPropertyRefExprClass:
+ case Expr::MSPropertySubscriptExprClass:
+ case Expr::TypoExprClass: // This should no longer exist in the AST by now.
+ case Expr::OMPArraySectionExprClass:
+ llvm_unreachable("unexpected statement kind");
+
+ // FIXME: invent manglings for all these.
+ case Expr::BlockExprClass:
+ case Expr::ChooseExprClass:
+ case Expr::CompoundLiteralExprClass:
+ case Expr::DesignatedInitExprClass:
+ case Expr::ExtVectorElementExprClass:
+ case Expr::GenericSelectionExprClass:
+ case Expr::ObjCEncodeExprClass:
+ case Expr::ObjCIsaExprClass:
+ case Expr::ObjCIvarRefExprClass:
+ case Expr::ObjCMessageExprClass:
+ case Expr::ObjCPropertyRefExprClass:
+ case Expr::ObjCProtocolExprClass:
+ case Expr::ObjCSelectorExprClass:
+ case Expr::ObjCStringLiteralClass:
+ case Expr::ObjCBoxedExprClass:
+ case Expr::ObjCArrayLiteralClass:
+ case Expr::ObjCDictionaryLiteralClass:
+ case Expr::ObjCSubscriptRefExprClass:
+ case Expr::ObjCIndirectCopyRestoreExprClass:
+ case Expr::OffsetOfExprClass:
+ case Expr::PredefinedExprClass:
+ case Expr::ShuffleVectorExprClass:
+ case Expr::ConvertVectorExprClass:
+ case Expr::StmtExprClass:
+ case Expr::TypeTraitExprClass:
+ case Expr::ArrayTypeTraitExprClass:
+ case Expr::ExpressionTraitExprClass:
+ case Expr::VAArgExprClass:
+ case Expr::CUDAKernelCallExprClass:
+ case Expr::AsTypeExprClass:
+ case Expr::PseudoObjectExprClass:
+ case Expr::AtomicExprClass:
+ {
+ // As bad as this diagnostic is, it's better than crashing.
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot yet mangle expression type %0");
+ Diags.Report(E->getExprLoc(), DiagID)
+ << E->getStmtClassName() << E->getSourceRange();
+ break;
+ }
+
+ case Expr::CXXUuidofExprClass: {
+ const CXXUuidofExpr *UE = cast<CXXUuidofExpr>(E);
+ if (UE->isTypeOperand()) {
+ QualType UuidT = UE->getTypeOperand(Context.getASTContext());
+ Out << "u8__uuidoft";
+ mangleType(UuidT);
+ } else {
+ Expr *UuidExp = UE->getExprOperand();
+ Out << "u8__uuidofz";
+ mangleExpression(UuidExp, Arity);
+ }
+ break;
+ }
+
+ // Even gcc-4.5 doesn't mangle this.
+ case Expr::BinaryConditionalOperatorClass: {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID =
+ Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "?: operator with omitted middle operand cannot be mangled");
+ Diags.Report(E->getExprLoc(), DiagID)
+ << E->getStmtClassName() << E->getSourceRange();
+ break;
+ }
+
+ // These are used for internal purposes and cannot be meaningfully mangled.
+ case Expr::OpaqueValueExprClass:
+ llvm_unreachable("cannot mangle opaque value; mangling wrong thing?");
+
+ case Expr::InitListExprClass: {
+ Out << "il";
+ mangleInitListElements(cast<InitListExpr>(E));
+ Out << "E";
+ break;
+ }
+
+ case Expr::CXXDefaultArgExprClass:
+ mangleExpression(cast<CXXDefaultArgExpr>(E)->getExpr(), Arity);
+ break;
+
+ case Expr::CXXDefaultInitExprClass:
+ mangleExpression(cast<CXXDefaultInitExpr>(E)->getExpr(), Arity);
+ break;
+
+ case Expr::CXXStdInitializerListExprClass:
+ mangleExpression(cast<CXXStdInitializerListExpr>(E)->getSubExpr(), Arity);
+ break;
+
+ case Expr::SubstNonTypeTemplateParmExprClass:
+ mangleExpression(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(),
+ Arity);
+ break;
+
+ case Expr::UserDefinedLiteralClass:
+ // We follow g++'s approach of mangling a UDL as a call to the literal
+ // operator.
+ case Expr::CXXMemberCallExprClass: // fallthrough
+ case Expr::CallExprClass: {
+ const CallExpr *CE = cast<CallExpr>(E);
+
+ // <expression> ::= cp <simple-id> <expression>* E
+ // We use this mangling only when the call would use ADL except
+ // for being parenthesized. Per discussion with David
+ // Vandervoorde, 2011.04.25.
+ if (isParenthesizedADLCallee(CE)) {
+ Out << "cp";
+ // The callee here is a parenthesized UnresolvedLookupExpr with
+ // no qualifier and should always get mangled as a <simple-id>
+ // anyway.
+
+ // <expression> ::= cl <expression>* E
+ } else {
+ Out << "cl";
+ }
+
+ unsigned CallArity = CE->getNumArgs();
+ for (const Expr *Arg : CE->arguments())
+ if (isa<PackExpansionExpr>(Arg))
+ CallArity = UnknownArity;
+
+ mangleExpression(CE->getCallee(), CallArity);
+ for (const Expr *Arg : CE->arguments())
+ mangleExpression(Arg);
+ Out << 'E';
+ break;
+ }
+
+ case Expr::CXXNewExprClass: {
+ const CXXNewExpr *New = cast<CXXNewExpr>(E);
+ if (New->isGlobalNew()) Out << "gs";
+ Out << (New->isArray() ? "na" : "nw");
+ for (CXXNewExpr::const_arg_iterator I = New->placement_arg_begin(),
+ E = New->placement_arg_end(); I != E; ++I)
+ mangleExpression(*I);
+ Out << '_';
+ mangleType(New->getAllocatedType());
+ if (New->hasInitializer()) {
+ if (New->getInitializationStyle() == CXXNewExpr::ListInit)
+ Out << "il";
+ else
+ Out << "pi";
+ const Expr *Init = New->getInitializer();
+ if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init)) {
+ // Directly inline the initializers.
+ for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(),
+ E = CCE->arg_end();
+ I != E; ++I)
+ mangleExpression(*I);
+ } else if (const ParenListExpr *PLE = dyn_cast<ParenListExpr>(Init)) {
+ for (unsigned i = 0, e = PLE->getNumExprs(); i != e; ++i)
+ mangleExpression(PLE->getExpr(i));
+ } else if (New->getInitializationStyle() == CXXNewExpr::ListInit &&
+ isa<InitListExpr>(Init)) {
+ // Only take InitListExprs apart for list-initialization.
+ mangleInitListElements(cast<InitListExpr>(Init));
+ } else
+ mangleExpression(Init);
+ }
+ Out << 'E';
+ break;
+ }
+
+ case Expr::CXXPseudoDestructorExprClass: {
+ const auto *PDE = cast<CXXPseudoDestructorExpr>(E);
+ if (const Expr *Base = PDE->getBase())
+ mangleMemberExprBase(Base, PDE->isArrow());
+ NestedNameSpecifier *Qualifier = PDE->getQualifier();
+ QualType ScopeType;
+ if (TypeSourceInfo *ScopeInfo = PDE->getScopeTypeInfo()) {
+ if (Qualifier) {
+ mangleUnresolvedPrefix(Qualifier,
+ /*Recursive=*/true);
+ mangleUnresolvedTypeOrSimpleId(ScopeInfo->getType());
+ Out << 'E';
+ } else {
+ Out << "sr";
+ if (!mangleUnresolvedTypeOrSimpleId(ScopeInfo->getType()))
+ Out << 'E';
+ }
+ } else if (Qualifier) {
+ mangleUnresolvedPrefix(Qualifier);
+ }
+ // <base-unresolved-name> ::= dn <destructor-name>
+ Out << "dn";
+ QualType DestroyedType = PDE->getDestroyedType();
+ mangleUnresolvedTypeOrSimpleId(DestroyedType);
+ break;
+ }
+
+ case Expr::MemberExprClass: {
+ const MemberExpr *ME = cast<MemberExpr>(E);
+ mangleMemberExpr(ME->getBase(), ME->isArrow(),
+ ME->getQualifier(), nullptr,
+ ME->getMemberDecl()->getDeclName(), Arity);
+ break;
+ }
+
+ case Expr::UnresolvedMemberExprClass: {
+ const UnresolvedMemberExpr *ME = cast<UnresolvedMemberExpr>(E);
+ mangleMemberExpr(ME->isImplicitAccess() ? nullptr : ME->getBase(),
+ ME->isArrow(), ME->getQualifier(), nullptr,
+ ME->getMemberName(), Arity);
+ if (ME->hasExplicitTemplateArgs())
+ mangleTemplateArgs(ME->getTemplateArgs(), ME->getNumTemplateArgs());
+ break;
+ }
+
+ case Expr::CXXDependentScopeMemberExprClass: {
+ const CXXDependentScopeMemberExpr *ME
+ = cast<CXXDependentScopeMemberExpr>(E);
+ mangleMemberExpr(ME->isImplicitAccess() ? nullptr : ME->getBase(),
+ ME->isArrow(), ME->getQualifier(),
+ ME->getFirstQualifierFoundInScope(),
+ ME->getMember(), Arity);
+ if (ME->hasExplicitTemplateArgs())
+ mangleTemplateArgs(ME->getTemplateArgs(), ME->getNumTemplateArgs());
+ break;
+ }
+
+ case Expr::UnresolvedLookupExprClass: {
+ const UnresolvedLookupExpr *ULE = cast<UnresolvedLookupExpr>(E);
+ mangleUnresolvedName(ULE->getQualifier(), ULE->getName(), Arity);
+
+ // All the <unresolved-name> productions end in a
+ // base-unresolved-name, where <template-args> are just tacked
+ // onto the end.
+ if (ULE->hasExplicitTemplateArgs())
+ mangleTemplateArgs(ULE->getTemplateArgs(), ULE->getNumTemplateArgs());
+ break;
+ }
+
+ case Expr::CXXUnresolvedConstructExprClass: {
+ const CXXUnresolvedConstructExpr *CE = cast<CXXUnresolvedConstructExpr>(E);
+ unsigned N = CE->arg_size();
+
+ Out << "cv";
+ mangleType(CE->getType());
+ if (N != 1) Out << '_';
+ for (unsigned I = 0; I != N; ++I) mangleExpression(CE->getArg(I));
+ if (N != 1) Out << 'E';
+ break;
+ }
+
+ case Expr::CXXConstructExprClass: {
+ const auto *CE = cast<CXXConstructExpr>(E);
+ if (!CE->isListInitialization() || CE->isStdInitListInitialization()) {
+ assert(
+ CE->getNumArgs() >= 1 &&
+ (CE->getNumArgs() == 1 || isa<CXXDefaultArgExpr>(CE->getArg(1))) &&
+ "implicit CXXConstructExpr must have one argument");
+ return mangleExpression(cast<CXXConstructExpr>(E)->getArg(0));
+ }
+ Out << "il";
+ for (auto *E : CE->arguments())
+ mangleExpression(E);
+ Out << "E";
+ break;
+ }
+
+ case Expr::CXXTemporaryObjectExprClass: {
+ const auto *CE = cast<CXXTemporaryObjectExpr>(E);
+ unsigned N = CE->getNumArgs();
+ bool List = CE->isListInitialization();
+
+ if (List)
+ Out << "tl";
+ else
+ Out << "cv";
+ mangleType(CE->getType());
+ if (!List && N != 1)
+ Out << '_';
+ if (CE->isStdInitListInitialization()) {
+ // We implicitly created a std::initializer_list<T> for the first argument
+ // of a constructor of type U in an expression of the form U{a, b, c}.
+ // Strip all the semantic gunk off the initializer list.
+ auto *SILE =
+ cast<CXXStdInitializerListExpr>(CE->getArg(0)->IgnoreImplicit());
+ auto *ILE = cast<InitListExpr>(SILE->getSubExpr()->IgnoreImplicit());
+ mangleInitListElements(ILE);
+ } else {
+ for (auto *E : CE->arguments())
+ mangleExpression(E);
+ }
+ if (List || N != 1)
+ Out << 'E';
+ break;
+ }
+
+ case Expr::CXXScalarValueInitExprClass:
+ Out << "cv";
+ mangleType(E->getType());
+ Out << "_E";
+ break;
+
+ case Expr::CXXNoexceptExprClass:
+ Out << "nx";
+ mangleExpression(cast<CXXNoexceptExpr>(E)->getOperand());
+ break;
+
+ case Expr::UnaryExprOrTypeTraitExprClass: {
+ const UnaryExprOrTypeTraitExpr *SAE = cast<UnaryExprOrTypeTraitExpr>(E);
+
+ if (!SAE->isInstantiationDependent()) {
+ // Itanium C++ ABI:
+ // If the operand of a sizeof or alignof operator is not
+ // instantiation-dependent it is encoded as an integer literal
+ // reflecting the result of the operator.
+ //
+ // If the result of the operator is implicitly converted to a known
+ // integer type, that type is used for the literal; otherwise, the type
+ // of std::size_t or std::ptrdiff_t is used.
+ QualType T = (ImplicitlyConvertedToType.isNull() ||
+ !ImplicitlyConvertedToType->isIntegerType())? SAE->getType()
+ : ImplicitlyConvertedToType;
+ llvm::APSInt V = SAE->EvaluateKnownConstInt(Context.getASTContext());
+ mangleIntegerLiteral(T, V);
+ break;
+ }
+
+ switch(SAE->getKind()) {
+ case UETT_SizeOf:
+ Out << 's';
+ break;
+ case UETT_AlignOf:
+ Out << 'a';
+ break;
+ case UETT_VecStep: {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot yet mangle vec_step expression");
+ Diags.Report(DiagID);
+ return;
+ }
+ case UETT_OpenMPRequiredSimdAlign:
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error,
+ "cannot yet mangle __builtin_omp_required_simd_align expression");
+ Diags.Report(DiagID);
+ return;
+ }
+ if (SAE->isArgumentType()) {
+ Out << 't';
+ mangleType(SAE->getArgumentType());
+ } else {
+ Out << 'z';
+ mangleExpression(SAE->getArgumentExpr());
+ }
+ break;
+ }
+
+ case Expr::CXXThrowExprClass: {
+ const CXXThrowExpr *TE = cast<CXXThrowExpr>(E);
+ // <expression> ::= tw <expression> # throw expression
+ // ::= tr # rethrow
+ if (TE->getSubExpr()) {
+ Out << "tw";
+ mangleExpression(TE->getSubExpr());
+ } else {
+ Out << "tr";
+ }
+ break;
+ }
+
+ case Expr::CXXTypeidExprClass: {
+ const CXXTypeidExpr *TIE = cast<CXXTypeidExpr>(E);
+ // <expression> ::= ti <type> # typeid (type)
+ // ::= te <expression> # typeid (expression)
+ if (TIE->isTypeOperand()) {
+ Out << "ti";
+ mangleType(TIE->getTypeOperand(Context.getASTContext()));
+ } else {
+ Out << "te";
+ mangleExpression(TIE->getExprOperand());
+ }
+ break;
+ }
+
+ case Expr::CXXDeleteExprClass: {
+ const CXXDeleteExpr *DE = cast<CXXDeleteExpr>(E);
+ // <expression> ::= [gs] dl <expression> # [::] delete expr
+ // ::= [gs] da <expression> # [::] delete [] expr
+ if (DE->isGlobalDelete()) Out << "gs";
+ Out << (DE->isArrayForm() ? "da" : "dl");
+ mangleExpression(DE->getArgument());
+ break;
+ }
+
+ case Expr::UnaryOperatorClass: {
+ const UnaryOperator *UO = cast<UnaryOperator>(E);
+ mangleOperatorName(UnaryOperator::getOverloadedOperator(UO->getOpcode()),
+ /*Arity=*/1);
+ mangleExpression(UO->getSubExpr());
+ break;
+ }
+
+ case Expr::ArraySubscriptExprClass: {
+ const ArraySubscriptExpr *AE = cast<ArraySubscriptExpr>(E);
+
+ // Array subscript is treated as a syntactically weird form of
+ // binary operator.
+ Out << "ix";
+ mangleExpression(AE->getLHS());
+ mangleExpression(AE->getRHS());
+ break;
+ }
+
+ case Expr::CompoundAssignOperatorClass: // fallthrough
+ case Expr::BinaryOperatorClass: {
+ const BinaryOperator *BO = cast<BinaryOperator>(E);
+ if (BO->getOpcode() == BO_PtrMemD)
+ Out << "ds";
+ else
+ mangleOperatorName(BinaryOperator::getOverloadedOperator(BO->getOpcode()),
+ /*Arity=*/2);
+ mangleExpression(BO->getLHS());
+ mangleExpression(BO->getRHS());
+ break;
+ }
+
+ case Expr::ConditionalOperatorClass: {
+ const ConditionalOperator *CO = cast<ConditionalOperator>(E);
+ mangleOperatorName(OO_Conditional, /*Arity=*/3);
+ mangleExpression(CO->getCond());
+ mangleExpression(CO->getLHS(), Arity);
+ mangleExpression(CO->getRHS(), Arity);
+ break;
+ }
+
+ case Expr::ImplicitCastExprClass: {
+ ImplicitlyConvertedToType = E->getType();
+ E = cast<ImplicitCastExpr>(E)->getSubExpr();
+ goto recurse;
+ }
+
+ case Expr::ObjCBridgedCastExprClass: {
+ // Mangle ownership casts as a vendor extended operator __bridge,
+ // __bridge_transfer, or __bridge_retain.
+ StringRef Kind = cast<ObjCBridgedCastExpr>(E)->getBridgeKindName();
+ Out << "v1U" << Kind.size() << Kind;
+ }
+ // Fall through to mangle the cast itself.
+
+ case Expr::CStyleCastExprClass:
+ mangleCastExpression(E, "cv");
+ break;
+
+ case Expr::CXXFunctionalCastExprClass: {
+ auto *Sub = cast<ExplicitCastExpr>(E)->getSubExpr()->IgnoreImplicit();
+ // FIXME: Add isImplicit to CXXConstructExpr.
+ if (auto *CCE = dyn_cast<CXXConstructExpr>(Sub))
+ if (CCE->getParenOrBraceRange().isInvalid())
+ Sub = CCE->getArg(0)->IgnoreImplicit();
+ if (auto *StdInitList = dyn_cast<CXXStdInitializerListExpr>(Sub))
+ Sub = StdInitList->getSubExpr()->IgnoreImplicit();
+ if (auto *IL = dyn_cast<InitListExpr>(Sub)) {
+ Out << "tl";
+ mangleType(E->getType());
+ mangleInitListElements(IL);
+ Out << "E";
+ } else {
+ mangleCastExpression(E, "cv");
+ }
+ break;
+ }
+
+ case Expr::CXXStaticCastExprClass:
+ mangleCastExpression(E, "sc");
+ break;
+ case Expr::CXXDynamicCastExprClass:
+ mangleCastExpression(E, "dc");
+ break;
+ case Expr::CXXReinterpretCastExprClass:
+ mangleCastExpression(E, "rc");
+ break;
+ case Expr::CXXConstCastExprClass:
+ mangleCastExpression(E, "cc");
+ break;
+
+ case Expr::CXXOperatorCallExprClass: {
+ const CXXOperatorCallExpr *CE = cast<CXXOperatorCallExpr>(E);
+ unsigned NumArgs = CE->getNumArgs();
+ mangleOperatorName(CE->getOperator(), /*Arity=*/NumArgs);
+ // Mangle the arguments.
+ for (unsigned i = 0; i != NumArgs; ++i)
+ mangleExpression(CE->getArg(i));
+ break;
+ }
+
+ case Expr::ParenExprClass:
+ mangleExpression(cast<ParenExpr>(E)->getSubExpr(), Arity);
+ break;
+
+ case Expr::DeclRefExprClass: {
+ const NamedDecl *D = cast<DeclRefExpr>(E)->getDecl();
+
+ switch (D->getKind()) {
+ default:
+ // <expr-primary> ::= L <mangled-name> E # external name
+ Out << 'L';
+ mangle(D);
+ Out << 'E';
+ break;
+
+ case Decl::ParmVar:
+ mangleFunctionParam(cast<ParmVarDecl>(D));
+ break;
+
+ case Decl::EnumConstant: {
+ const EnumConstantDecl *ED = cast<EnumConstantDecl>(D);
+ mangleIntegerLiteral(ED->getType(), ED->getInitVal());
+ break;
+ }
+
+ case Decl::NonTypeTemplateParm: {
+ const NonTypeTemplateParmDecl *PD = cast<NonTypeTemplateParmDecl>(D);
+ mangleTemplateParameter(PD->getIndex());
+ break;
+ }
+
+ }
+
+ break;
+ }
+
+ case Expr::SubstNonTypeTemplateParmPackExprClass:
+ // FIXME: not clear how to mangle this!
+ // template <unsigned N...> class A {
+ // template <class U...> void foo(U (&x)[N]...);
+ // };
+ Out << "_SUBSTPACK_";
+ break;
+
+ case Expr::FunctionParmPackExprClass: {
+ // FIXME: not clear how to mangle this!
+ const FunctionParmPackExpr *FPPE = cast<FunctionParmPackExpr>(E);
+ Out << "v110_SUBSTPACK";
+ mangleFunctionParam(FPPE->getParameterPack());
+ break;
+ }
+
+ case Expr::DependentScopeDeclRefExprClass: {
+ const DependentScopeDeclRefExpr *DRE = cast<DependentScopeDeclRefExpr>(E);
+ mangleUnresolvedName(DRE->getQualifier(), DRE->getDeclName(), Arity);
+
+ // All the <unresolved-name> productions end in a
+ // base-unresolved-name, where <template-args> are just tacked
+ // onto the end.
+ if (DRE->hasExplicitTemplateArgs())
+ mangleTemplateArgs(DRE->getTemplateArgs(), DRE->getNumTemplateArgs());
+ break;
+ }
+
+ case Expr::CXXBindTemporaryExprClass:
+ mangleExpression(cast<CXXBindTemporaryExpr>(E)->getSubExpr());
+ break;
+
+ case Expr::ExprWithCleanupsClass:
+ mangleExpression(cast<ExprWithCleanups>(E)->getSubExpr(), Arity);
+ break;
+
+ case Expr::FloatingLiteralClass: {
+ const FloatingLiteral *FL = cast<FloatingLiteral>(E);
+ Out << 'L';
+ mangleType(FL->getType());
+ mangleFloat(FL->getValue());
+ Out << 'E';
+ break;
+ }
+
+ case Expr::CharacterLiteralClass:
+ Out << 'L';
+ mangleType(E->getType());
+ Out << cast<CharacterLiteral>(E)->getValue();
+ Out << 'E';
+ break;
+
+ // FIXME. __objc_yes/__objc_no are mangled same as true/false
+ case Expr::ObjCBoolLiteralExprClass:
+ Out << "Lb";
+ Out << (cast<ObjCBoolLiteralExpr>(E)->getValue() ? '1' : '0');
+ Out << 'E';
+ break;
+
+ case Expr::CXXBoolLiteralExprClass:
+ Out << "Lb";
+ Out << (cast<CXXBoolLiteralExpr>(E)->getValue() ? '1' : '0');
+ Out << 'E';
+ break;
+
+ case Expr::IntegerLiteralClass: {
+ llvm::APSInt Value(cast<IntegerLiteral>(E)->getValue());
+ if (E->getType()->isSignedIntegerType())
+ Value.setIsSigned(true);
+ mangleIntegerLiteral(E->getType(), Value);
+ break;
+ }
+
+ case Expr::ImaginaryLiteralClass: {
+ const ImaginaryLiteral *IE = cast<ImaginaryLiteral>(E);
+ // Mangle as if a complex literal.
+ // Proposal from David Vandevoorde, 2010.06.30.
+ Out << 'L';
+ mangleType(E->getType());
+ if (const FloatingLiteral *Imag =
+ dyn_cast<FloatingLiteral>(IE->getSubExpr())) {
+ // Mangle a floating-point zero of the appropriate type.
+ mangleFloat(llvm::APFloat(Imag->getValue().getSemantics()));
+ Out << '_';
+ mangleFloat(Imag->getValue());
+ } else {
+ Out << "0_";
+ llvm::APSInt Value(cast<IntegerLiteral>(IE->getSubExpr())->getValue());
+ if (IE->getSubExpr()->getType()->isSignedIntegerType())
+ Value.setIsSigned(true);
+ mangleNumber(Value);
+ }
+ Out << 'E';
+ break;
+ }
+
+ case Expr::StringLiteralClass: {
+ // Revised proposal from David Vandervoorde, 2010.07.15.
+ Out << 'L';
+ assert(isa<ConstantArrayType>(E->getType()));
+ mangleType(E->getType());
+ Out << 'E';
+ break;
+ }
+
+ case Expr::GNUNullExprClass:
+ // FIXME: should this really be mangled the same as nullptr?
+ // fallthrough
+
+ case Expr::CXXNullPtrLiteralExprClass: {
+ Out << "LDnE";
+ break;
+ }
+
+ case Expr::PackExpansionExprClass:
+ Out << "sp";
+ mangleExpression(cast<PackExpansionExpr>(E)->getPattern());
+ break;
+
+ case Expr::SizeOfPackExprClass: {
+ auto *SPE = cast<SizeOfPackExpr>(E);
+ if (SPE->isPartiallySubstituted()) {
+ Out << "sP";
+ for (const auto &A : SPE->getPartialArguments())
+ mangleTemplateArg(A);
+ Out << "E";
+ break;
+ }
+
+ Out << "sZ";
+ const NamedDecl *Pack = SPE->getPack();
+ if (const TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(Pack))
+ mangleTemplateParameter(TTP->getIndex());
+ else if (const NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(Pack))
+ mangleTemplateParameter(NTTP->getIndex());
+ else if (const TemplateTemplateParmDecl *TempTP
+ = dyn_cast<TemplateTemplateParmDecl>(Pack))
+ mangleTemplateParameter(TempTP->getIndex());
+ else
+ mangleFunctionParam(cast<ParmVarDecl>(Pack));
+ break;
+ }
+
+ case Expr::MaterializeTemporaryExprClass: {
+ mangleExpression(cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr());
+ break;
+ }
+
+ case Expr::CXXFoldExprClass: {
+ auto *FE = cast<CXXFoldExpr>(E);
+ if (FE->isLeftFold())
+ Out << (FE->getInit() ? "fL" : "fl");
+ else
+ Out << (FE->getInit() ? "fR" : "fr");
+
+ if (FE->getOperator() == BO_PtrMemD)
+ Out << "ds";
+ else
+ mangleOperatorName(
+ BinaryOperator::getOverloadedOperator(FE->getOperator()),
+ /*Arity=*/2);
+
+ if (FE->getLHS())
+ mangleExpression(FE->getLHS());
+ if (FE->getRHS())
+ mangleExpression(FE->getRHS());
+ break;
+ }
+
+ case Expr::CXXThisExprClass:
+ Out << "fpT";
+ break;
+
+ case Expr::CoawaitExprClass:
+ // FIXME: Propose a non-vendor mangling.
+ Out << "v18co_await";
+ mangleExpression(cast<CoawaitExpr>(E)->getOperand());
+ break;
+
+ case Expr::CoyieldExprClass:
+ // FIXME: Propose a non-vendor mangling.
+ Out << "v18co_yield";
+ mangleExpression(cast<CoawaitExpr>(E)->getOperand());
+ break;
+ }
+}
+
+/// Mangle an expression which refers to a parameter variable.
+///
+/// <expression> ::= <function-param>
+/// <function-param> ::= fp <top-level CV-qualifiers> _ # L == 0, I == 0
+/// <function-param> ::= fp <top-level CV-qualifiers>
+/// <parameter-2 non-negative number> _ # L == 0, I > 0
+/// <function-param> ::= fL <L-1 non-negative number>
+/// p <top-level CV-qualifiers> _ # L > 0, I == 0
+/// <function-param> ::= fL <L-1 non-negative number>
+/// p <top-level CV-qualifiers>
+/// <I-1 non-negative number> _ # L > 0, I > 0
+///
+/// L is the nesting depth of the parameter, defined as 1 if the
+/// parameter comes from the innermost function prototype scope
+/// enclosing the current context, 2 if from the next enclosing
+/// function prototype scope, and so on, with one special case: if
+/// we've processed the full parameter clause for the innermost
+/// function type, then L is one less. This definition conveniently
+/// makes it irrelevant whether a function's result type was written
+/// trailing or leading, but is otherwise overly complicated; the
+/// numbering was first designed without considering references to
+/// parameter in locations other than return types, and then the
+/// mangling had to be generalized without changing the existing
+/// manglings.
+///
+/// I is the zero-based index of the parameter within its parameter
+/// declaration clause. Note that the original ABI document describes
+/// this using 1-based ordinals.
+void CXXNameMangler::mangleFunctionParam(const ParmVarDecl *parm) {
+ unsigned parmDepth = parm->getFunctionScopeDepth();
+ unsigned parmIndex = parm->getFunctionScopeIndex();
+
+ // Compute 'L'.
+ // parmDepth does not include the declaring function prototype.
+ // FunctionTypeDepth does account for that.
+ assert(parmDepth < FunctionTypeDepth.getDepth());
+ unsigned nestingDepth = FunctionTypeDepth.getDepth() - parmDepth;
+ if (FunctionTypeDepth.isInResultType())
+ nestingDepth--;
+
+ if (nestingDepth == 0) {
+ Out << "fp";
+ } else {
+ Out << "fL" << (nestingDepth - 1) << 'p';
+ }
+
+ // Top-level qualifiers. We don't have to worry about arrays here,
+ // because parameters declared as arrays should already have been
+ // transformed to have pointer type. FIXME: apparently these don't
+ // get mangled if used as an rvalue of a known non-class type?
+ assert(!parm->getType()->isArrayType()
+ && "parameter's type is still an array type?");
+ mangleQualifiers(parm->getType().getQualifiers());
+
+ // Parameter index.
+ if (parmIndex != 0) {
+ Out << (parmIndex - 1);
+ }
+ Out << '_';
+}
+
+void CXXNameMangler::mangleCXXCtorType(CXXCtorType T) {
+ // <ctor-dtor-name> ::= C1 # complete object constructor
+ // ::= C2 # base object constructor
+ //
+ // In addition, C5 is a comdat name with C1 and C2 in it.
+ switch (T) {
+ case Ctor_Complete:
+ Out << "C1";
+ break;
+ case Ctor_Base:
+ Out << "C2";
+ break;
+ case Ctor_Comdat:
+ Out << "C5";
+ break;
+ case Ctor_DefaultClosure:
+ case Ctor_CopyingClosure:
+ llvm_unreachable("closure constructors don't exist for the Itanium ABI!");
+ }
+}
+
+void CXXNameMangler::mangleCXXDtorType(CXXDtorType T) {
+ // <ctor-dtor-name> ::= D0 # deleting destructor
+ // ::= D1 # complete object destructor
+ // ::= D2 # base object destructor
+ //
+ // In addition, D5 is a comdat name with D1, D2 and, if virtual, D0 in it.
+ switch (T) {
+ case Dtor_Deleting:
+ Out << "D0";
+ break;
+ case Dtor_Complete:
+ Out << "D1";
+ break;
+ case Dtor_Base:
+ Out << "D2";
+ break;
+ case Dtor_Comdat:
+ Out << "D5";
+ break;
+ }
+}
+
+void CXXNameMangler::mangleTemplateArgs(const TemplateArgumentLoc *TemplateArgs,
+ unsigned NumTemplateArgs) {
+ // <template-args> ::= I <template-arg>+ E
+ Out << 'I';
+ for (unsigned i = 0; i != NumTemplateArgs; ++i)
+ mangleTemplateArg(TemplateArgs[i].getArgument());
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleTemplateArgs(const TemplateArgumentList &AL) {
+ // <template-args> ::= I <template-arg>+ E
+ Out << 'I';
+ for (unsigned i = 0, e = AL.size(); i != e; ++i)
+ mangleTemplateArg(AL[i]);
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleTemplateArgs(const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs) {
+ // <template-args> ::= I <template-arg>+ E
+ Out << 'I';
+ for (unsigned i = 0; i != NumTemplateArgs; ++i)
+ mangleTemplateArg(TemplateArgs[i]);
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleTemplateArg(TemplateArgument A) {
+ // <template-arg> ::= <type> # type or template
+ // ::= X <expression> E # expression
+ // ::= <expr-primary> # simple expressions
+ // ::= J <template-arg>* E # argument pack
+ if (!A.isInstantiationDependent() || A.isDependent())
+ A = Context.getASTContext().getCanonicalTemplateArgument(A);
+
+ switch (A.getKind()) {
+ case TemplateArgument::Null:
+ llvm_unreachable("Cannot mangle NULL template argument");
+
+ case TemplateArgument::Type:
+ mangleType(A.getAsType());
+ break;
+ case TemplateArgument::Template:
+ // This is mangled as <type>.
+ mangleType(A.getAsTemplate());
+ break;
+ case TemplateArgument::TemplateExpansion:
+ // <type> ::= Dp <type> # pack expansion (C++0x)
+ Out << "Dp";
+ mangleType(A.getAsTemplateOrTemplatePattern());
+ break;
+ case TemplateArgument::Expression: {
+ // It's possible to end up with a DeclRefExpr here in certain
+ // dependent cases, in which case we should mangle as a
+ // declaration.
+ const Expr *E = A.getAsExpr()->IgnoreParens();
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ const ValueDecl *D = DRE->getDecl();
+ if (isa<VarDecl>(D) || isa<FunctionDecl>(D)) {
+ Out << 'L';
+ mangle(D);
+ Out << 'E';
+ break;
+ }
+ }
+
+ Out << 'X';
+ mangleExpression(E);
+ Out << 'E';
+ break;
+ }
+ case TemplateArgument::Integral:
+ mangleIntegerLiteral(A.getIntegralType(), A.getAsIntegral());
+ break;
+ case TemplateArgument::Declaration: {
+ // <expr-primary> ::= L <mangled-name> E # external name
+ // Clang produces AST's where pointer-to-member-function expressions
+ // and pointer-to-function expressions are represented as a declaration not
+ // an expression. We compensate for it here to produce the correct mangling.
+ ValueDecl *D = A.getAsDecl();
+ bool compensateMangling = !A.getParamTypeForDecl()->isReferenceType();
+ if (compensateMangling) {
+ Out << 'X';
+ mangleOperatorName(OO_Amp, 1);
+ }
+
+ Out << 'L';
+ // References to external entities use the mangled name; if the name would
+ // not normally be manged then mangle it as unqualified.
+ mangle(D);
+ Out << 'E';
+
+ if (compensateMangling)
+ Out << 'E';
+
+ break;
+ }
+ case TemplateArgument::NullPtr: {
+ // <expr-primary> ::= L <type> 0 E
+ Out << 'L';
+ mangleType(A.getNullPtrType());
+ Out << "0E";
+ break;
+ }
+ case TemplateArgument::Pack: {
+ // <template-arg> ::= J <template-arg>* E
+ Out << 'J';
+ for (const auto &P : A.pack_elements())
+ mangleTemplateArg(P);
+ Out << 'E';
+ }
+ }
+}
+
+void CXXNameMangler::mangleTemplateParameter(unsigned Index) {
+ // <template-param> ::= T_ # first template parameter
+ // ::= T <parameter-2 non-negative number> _
+ if (Index == 0)
+ Out << "T_";
+ else
+ Out << 'T' << (Index - 1) << '_';
+}
+
+void CXXNameMangler::mangleSeqID(unsigned SeqID) {
+ if (SeqID == 1)
+ Out << '0';
+ else if (SeqID > 1) {
+ SeqID--;
+
+ // <seq-id> is encoded in base-36, using digits and upper case letters.
+ char Buffer[7]; // log(2**32) / log(36) ~= 7
+ MutableArrayRef<char> BufferRef(Buffer);
+ MutableArrayRef<char>::reverse_iterator I = BufferRef.rbegin();
+
+ for (; SeqID != 0; SeqID /= 36) {
+ unsigned C = SeqID % 36;
+ *I++ = (C < 10 ? '0' + C : 'A' + C - 10);
+ }
+
+ Out.write(I.base(), I - BufferRef.rbegin());
+ }
+ Out << '_';
+}
+
+void CXXNameMangler::mangleExistingSubstitution(QualType type) {
+ bool result = mangleSubstitution(type);
+ assert(result && "no existing substitution for type");
+ (void) result;
+}
+
+void CXXNameMangler::mangleExistingSubstitution(TemplateName tname) {
+ bool result = mangleSubstitution(tname);
+ assert(result && "no existing substitution for template name");
+ (void) result;
+}
+
+// <substitution> ::= S <seq-id> _
+// ::= S_
+bool CXXNameMangler::mangleSubstitution(const NamedDecl *ND) {
+ // Try one of the standard substitutions first.
+ if (mangleStandardSubstitution(ND))
+ return true;
+
+ ND = cast<NamedDecl>(ND->getCanonicalDecl());
+ return mangleSubstitution(reinterpret_cast<uintptr_t>(ND));
+}
+
+/// Determine whether the given type has any qualifiers that are relevant for
+/// substitutions.
+static bool hasMangledSubstitutionQualifiers(QualType T) {
+ Qualifiers Qs = T.getQualifiers();
+ return Qs.getCVRQualifiers() || Qs.hasAddressSpace();
+}
+
+bool CXXNameMangler::mangleSubstitution(QualType T) {
+ if (!hasMangledSubstitutionQualifiers(T)) {
+ if (const RecordType *RT = T->getAs<RecordType>())
+ return mangleSubstitution(RT->getDecl());
+ }
+
+ uintptr_t TypePtr = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr());
+
+ return mangleSubstitution(TypePtr);
+}
+
+bool CXXNameMangler::mangleSubstitution(TemplateName Template) {
+ if (TemplateDecl *TD = Template.getAsTemplateDecl())
+ return mangleSubstitution(TD);
+
+ Template = Context.getASTContext().getCanonicalTemplateName(Template);
+ return mangleSubstitution(
+ reinterpret_cast<uintptr_t>(Template.getAsVoidPointer()));
+}
+
+bool CXXNameMangler::mangleSubstitution(uintptr_t Ptr) {
+ llvm::DenseMap<uintptr_t, unsigned>::iterator I = Substitutions.find(Ptr);
+ if (I == Substitutions.end())
+ return false;
+
+ unsigned SeqID = I->second;
+ Out << 'S';
+ mangleSeqID(SeqID);
+
+ return true;
+}
+
+static bool isCharType(QualType T) {
+ if (T.isNull())
+ return false;
+
+ return T->isSpecificBuiltinType(BuiltinType::Char_S) ||
+ T->isSpecificBuiltinType(BuiltinType::Char_U);
+}
+
+/// Returns whether a given type is a template specialization of a given name
+/// with a single argument of type char.
+static bool isCharSpecialization(QualType T, const char *Name) {
+ if (T.isNull())
+ return false;
+
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ const ClassTemplateSpecializationDecl *SD =
+ dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl());
+ if (!SD)
+ return false;
+
+ if (!isStdNamespace(getEffectiveDeclContext(SD)))
+ return false;
+
+ const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs();
+ if (TemplateArgs.size() != 1)
+ return false;
+
+ if (!isCharType(TemplateArgs[0].getAsType()))
+ return false;
+
+ return SD->getIdentifier()->getName() == Name;
+}
+
+template <std::size_t StrLen>
+static bool isStreamCharSpecialization(const ClassTemplateSpecializationDecl*SD,
+ const char (&Str)[StrLen]) {
+ if (!SD->getIdentifier()->isStr(Str))
+ return false;
+
+ const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs();
+ if (TemplateArgs.size() != 2)
+ return false;
+
+ if (!isCharType(TemplateArgs[0].getAsType()))
+ return false;
+
+ if (!isCharSpecialization(TemplateArgs[1].getAsType(), "char_traits"))
+ return false;
+
+ return true;
+}
+
+bool CXXNameMangler::mangleStandardSubstitution(const NamedDecl *ND) {
+ // <substitution> ::= St # ::std::
+ if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
+ if (isStd(NS)) {
+ Out << "St";
+ return true;
+ }
+ }
+
+ if (const ClassTemplateDecl *TD = dyn_cast<ClassTemplateDecl>(ND)) {
+ if (!isStdNamespace(getEffectiveDeclContext(TD)))
+ return false;
+
+ // <substitution> ::= Sa # ::std::allocator
+ if (TD->getIdentifier()->isStr("allocator")) {
+ Out << "Sa";
+ return true;
+ }
+
+ // <<substitution> ::= Sb # ::std::basic_string
+ if (TD->getIdentifier()->isStr("basic_string")) {
+ Out << "Sb";
+ return true;
+ }
+ }
+
+ if (const ClassTemplateSpecializationDecl *SD =
+ dyn_cast<ClassTemplateSpecializationDecl>(ND)) {
+ if (!isStdNamespace(getEffectiveDeclContext(SD)))
+ return false;
+
+ // <substitution> ::= Ss # ::std::basic_string<char,
+ // ::std::char_traits<char>,
+ // ::std::allocator<char> >
+ if (SD->getIdentifier()->isStr("basic_string")) {
+ const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs();
+
+ if (TemplateArgs.size() != 3)
+ return false;
+
+ if (!isCharType(TemplateArgs[0].getAsType()))
+ return false;
+
+ if (!isCharSpecialization(TemplateArgs[1].getAsType(), "char_traits"))
+ return false;
+
+ if (!isCharSpecialization(TemplateArgs[2].getAsType(), "allocator"))
+ return false;
+
+ Out << "Ss";
+ return true;
+ }
+
+ // <substitution> ::= Si # ::std::basic_istream<char,
+ // ::std::char_traits<char> >
+ if (isStreamCharSpecialization(SD, "basic_istream")) {
+ Out << "Si";
+ return true;
+ }
+
+ // <substitution> ::= So # ::std::basic_ostream<char,
+ // ::std::char_traits<char> >
+ if (isStreamCharSpecialization(SD, "basic_ostream")) {
+ Out << "So";
+ return true;
+ }
+
+ // <substitution> ::= Sd # ::std::basic_iostream<char,
+ // ::std::char_traits<char> >
+ if (isStreamCharSpecialization(SD, "basic_iostream")) {
+ Out << "Sd";
+ return true;
+ }
+ }
+ return false;
+}
+
+void CXXNameMangler::addSubstitution(QualType T) {
+ if (!hasMangledSubstitutionQualifiers(T)) {
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ addSubstitution(RT->getDecl());
+ return;
+ }
+ }
+
+ uintptr_t TypePtr = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr());
+ addSubstitution(TypePtr);
+}
+
+void CXXNameMangler::addSubstitution(TemplateName Template) {
+ if (TemplateDecl *TD = Template.getAsTemplateDecl())
+ return addSubstitution(TD);
+
+ Template = Context.getASTContext().getCanonicalTemplateName(Template);
+ addSubstitution(reinterpret_cast<uintptr_t>(Template.getAsVoidPointer()));
+}
+
+void CXXNameMangler::addSubstitution(uintptr_t Ptr) {
+ assert(!Substitutions.count(Ptr) && "Substitution already exists!");
+ Substitutions[Ptr] = SeqID++;
+}
+
+//
+
+/// Mangles the name of the declaration D and emits that name to the given
+/// output stream.
+///
+/// If the declaration D requires a mangled name, this routine will emit that
+/// mangled name to \p os and return true. Otherwise, \p os will be unchanged
+/// and this routine will return false. In this case, the caller should just
+/// emit the identifier of the declaration (\c D->getIdentifier()) as its
+/// name.
+void ItaniumMangleContextImpl::mangleCXXName(const NamedDecl *D,
+ raw_ostream &Out) {
+ assert((isa<FunctionDecl>(D) || isa<VarDecl>(D)) &&
+ "Invalid mangleName() call, argument is not a variable or function!");
+ assert(!isa<CXXConstructorDecl>(D) && !isa<CXXDestructorDecl>(D) &&
+ "Invalid mangleName() call on 'structor decl!");
+
+ PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
+ getASTContext().getSourceManager(),
+ "Mangling declaration");
+
+ CXXNameMangler Mangler(*this, Out, D);
+ Mangler.mangle(D);
+}
+
+void ItaniumMangleContextImpl::mangleCXXCtor(const CXXConstructorDecl *D,
+ CXXCtorType Type,
+ raw_ostream &Out) {
+ CXXNameMangler Mangler(*this, Out, D, Type);
+ Mangler.mangle(D);
+}
+
+void ItaniumMangleContextImpl::mangleCXXDtor(const CXXDestructorDecl *D,
+ CXXDtorType Type,
+ raw_ostream &Out) {
+ CXXNameMangler Mangler(*this, Out, D, Type);
+ Mangler.mangle(D);
+}
+
+void ItaniumMangleContextImpl::mangleCXXCtorComdat(const CXXConstructorDecl *D,
+ raw_ostream &Out) {
+ CXXNameMangler Mangler(*this, Out, D, Ctor_Comdat);
+ Mangler.mangle(D);
+}
+
+void ItaniumMangleContextImpl::mangleCXXDtorComdat(const CXXDestructorDecl *D,
+ raw_ostream &Out) {
+ CXXNameMangler Mangler(*this, Out, D, Dtor_Comdat);
+ Mangler.mangle(D);
+}
+
+void ItaniumMangleContextImpl::mangleThunk(const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk,
+ raw_ostream &Out) {
+ // <special-name> ::= T <call-offset> <base encoding>
+ // # base is the nominal target function of thunk
+ // <special-name> ::= Tc <call-offset> <call-offset> <base encoding>
+ // # base is the nominal target function of thunk
+ // # first call-offset is 'this' adjustment
+ // # second call-offset is result adjustment
+
+ assert(!isa<CXXDestructorDecl>(MD) &&
+ "Use mangleCXXDtor for destructor decls!");
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_ZT";
+ if (!Thunk.Return.isEmpty())
+ Mangler.getStream() << 'c';
+
+ // Mangle the 'this' pointer adjustment.
+ Mangler.mangleCallOffset(Thunk.This.NonVirtual,
+ Thunk.This.Virtual.Itanium.VCallOffsetOffset);
+
+ // Mangle the return pointer adjustment if there is one.
+ if (!Thunk.Return.isEmpty())
+ Mangler.mangleCallOffset(Thunk.Return.NonVirtual,
+ Thunk.Return.Virtual.Itanium.VBaseOffsetOffset);
+
+ Mangler.mangleFunctionEncoding(MD);
+}
+
+void ItaniumMangleContextImpl::mangleCXXDtorThunk(
+ const CXXDestructorDecl *DD, CXXDtorType Type,
+ const ThisAdjustment &ThisAdjustment, raw_ostream &Out) {
+ // <special-name> ::= T <call-offset> <base encoding>
+ // # base is the nominal target function of thunk
+ CXXNameMangler Mangler(*this, Out, DD, Type);
+ Mangler.getStream() << "_ZT";
+
+ // Mangle the 'this' pointer adjustment.
+ Mangler.mangleCallOffset(ThisAdjustment.NonVirtual,
+ ThisAdjustment.Virtual.Itanium.VCallOffsetOffset);
+
+ Mangler.mangleFunctionEncoding(DD);
+}
+
+/// Returns the mangled name for a guard variable for the passed in VarDecl.
+void ItaniumMangleContextImpl::mangleStaticGuardVariable(const VarDecl *D,
+ raw_ostream &Out) {
+ // <special-name> ::= GV <object name> # Guard variable for one-time
+ // # initialization
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_ZGV";
+ Mangler.mangleName(D);
+}
+
+void ItaniumMangleContextImpl::mangleDynamicInitializer(const VarDecl *MD,
+ raw_ostream &Out) {
+ // These symbols are internal in the Itanium ABI, so the names don't matter.
+ // Clang has traditionally used this symbol and allowed LLVM to adjust it to
+ // avoid duplicate symbols.
+ Out << "__cxx_global_var_init";
+}
+
+void ItaniumMangleContextImpl::mangleDynamicAtExitDestructor(const VarDecl *D,
+ raw_ostream &Out) {
+ // Prefix the mangling of D with __dtor_.
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "__dtor_";
+ if (shouldMangleDeclName(D))
+ Mangler.mangle(D);
+ else
+ Mangler.getStream() << D->getName();
+}
+
+void ItaniumMangleContextImpl::mangleSEHFilterExpression(
+ const NamedDecl *EnclosingDecl, raw_ostream &Out) {
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "__filt_";
+ if (shouldMangleDeclName(EnclosingDecl))
+ Mangler.mangle(EnclosingDecl);
+ else
+ Mangler.getStream() << EnclosingDecl->getName();
+}
+
+void ItaniumMangleContextImpl::mangleSEHFinallyBlock(
+ const NamedDecl *EnclosingDecl, raw_ostream &Out) {
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "__fin_";
+ if (shouldMangleDeclName(EnclosingDecl))
+ Mangler.mangle(EnclosingDecl);
+ else
+ Mangler.getStream() << EnclosingDecl->getName();
+}
+
+void ItaniumMangleContextImpl::mangleItaniumThreadLocalInit(const VarDecl *D,
+ raw_ostream &Out) {
+ // <special-name> ::= TH <object name>
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_ZTH";
+ Mangler.mangleName(D);
+}
+
+void
+ItaniumMangleContextImpl::mangleItaniumThreadLocalWrapper(const VarDecl *D,
+ raw_ostream &Out) {
+ // <special-name> ::= TW <object name>
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_ZTW";
+ Mangler.mangleName(D);
+}
+
+void ItaniumMangleContextImpl::mangleReferenceTemporary(const VarDecl *D,
+ unsigned ManglingNumber,
+ raw_ostream &Out) {
+ // We match the GCC mangling here.
+ // <special-name> ::= GR <object name>
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_ZGR";
+ Mangler.mangleName(D);
+ assert(ManglingNumber > 0 && "Reference temporary mangling number is zero!");
+ Mangler.mangleSeqID(ManglingNumber - 1);
+}
+
+void ItaniumMangleContextImpl::mangleCXXVTable(const CXXRecordDecl *RD,
+ raw_ostream &Out) {
+ // <special-name> ::= TV <type> # virtual table
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_ZTV";
+ Mangler.mangleNameOrStandardSubstitution(RD);
+}
+
+void ItaniumMangleContextImpl::mangleCXXVTT(const CXXRecordDecl *RD,
+ raw_ostream &Out) {
+ // <special-name> ::= TT <type> # VTT structure
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_ZTT";
+ Mangler.mangleNameOrStandardSubstitution(RD);
+}
+
+void ItaniumMangleContextImpl::mangleCXXCtorVTable(const CXXRecordDecl *RD,
+ int64_t Offset,
+ const CXXRecordDecl *Type,
+ raw_ostream &Out) {
+ // <special-name> ::= TC <type> <offset number> _ <base type>
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_ZTC";
+ Mangler.mangleNameOrStandardSubstitution(RD);
+ Mangler.getStream() << Offset;
+ Mangler.getStream() << '_';
+ Mangler.mangleNameOrStandardSubstitution(Type);
+}
+
+void ItaniumMangleContextImpl::mangleCXXRTTI(QualType Ty, raw_ostream &Out) {
+ // <special-name> ::= TI <type> # typeinfo structure
+ assert(!Ty.hasQualifiers() && "RTTI info cannot have top-level qualifiers");
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_ZTI";
+ Mangler.mangleType(Ty);
+}
+
+void ItaniumMangleContextImpl::mangleCXXRTTIName(QualType Ty,
+ raw_ostream &Out) {
+ // <special-name> ::= TS <type> # typeinfo name (null terminated byte string)
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_ZTS";
+ Mangler.mangleType(Ty);
+}
+
+void ItaniumMangleContextImpl::mangleTypeName(QualType Ty, raw_ostream &Out) {
+ mangleCXXRTTIName(Ty, Out);
+}
+
+void ItaniumMangleContextImpl::mangleStringLiteral(const StringLiteral *, raw_ostream &) {
+ llvm_unreachable("Can't mangle string literals");
+}
+
+ItaniumMangleContext *
+ItaniumMangleContext::create(ASTContext &Context, DiagnosticsEngine &Diags) {
+ return new ItaniumMangleContextImpl(Context, Diags);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/Mangle.cpp b/contrib/llvm/tools/clang/lib/AST/Mangle.cpp
new file mode 100644
index 0000000..014338f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/Mangle.cpp
@@ -0,0 +1,273 @@
+//===--- Mangle.cpp - Mangle C++ Names --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements generic name mangling support for blocks and Objective-C.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/Attr.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/Mangle.h"
+#include "clang/Basic/ABI.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define MANGLE_CHECKER 0
+
+#if MANGLE_CHECKER
+#include <cxxabi.h>
+#endif
+
+using namespace clang;
+
+// FIXME: For blocks we currently mimic GCC's mangling scheme, which leaves
+// much to be desired. Come up with a better mangling scheme.
+
+static void mangleFunctionBlock(MangleContext &Context,
+ StringRef Outer,
+ const BlockDecl *BD,
+ raw_ostream &Out) {
+ unsigned discriminator = Context.getBlockId(BD, true);
+ if (discriminator == 0)
+ Out << "__" << Outer << "_block_invoke";
+ else
+ Out << "__" << Outer << "_block_invoke_" << discriminator+1;
+}
+
+void MangleContext::anchor() { }
+
+enum CCMangling {
+ CCM_Other,
+ CCM_Fast,
+ CCM_Vector,
+ CCM_Std
+};
+
+static bool isExternC(const NamedDecl *ND) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
+ return FD->isExternC();
+ return cast<VarDecl>(ND)->isExternC();
+}
+
+static CCMangling getCallingConvMangling(const ASTContext &Context,
+ const NamedDecl *ND) {
+ const TargetInfo &TI = Context.getTargetInfo();
+ const llvm::Triple &Triple = TI.getTriple();
+ if (!Triple.isOSWindows() ||
+ !(Triple.getArch() == llvm::Triple::x86 ||
+ Triple.getArch() == llvm::Triple::x86_64))
+ return CCM_Other;
+
+ if (Context.getLangOpts().CPlusPlus && !isExternC(ND) &&
+ TI.getCXXABI() == TargetCXXABI::Microsoft)
+ return CCM_Other;
+
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND);
+ if (!FD)
+ return CCM_Other;
+ QualType T = FD->getType();
+
+ const FunctionType *FT = T->castAs<FunctionType>();
+
+ CallingConv CC = FT->getCallConv();
+ switch (CC) {
+ default:
+ return CCM_Other;
+ case CC_X86FastCall:
+ return CCM_Fast;
+ case CC_X86StdCall:
+ return CCM_Std;
+ case CC_X86VectorCall:
+ return CCM_Vector;
+ }
+}
+
+bool MangleContext::shouldMangleDeclName(const NamedDecl *D) {
+ const ASTContext &ASTContext = getASTContext();
+
+ CCMangling CC = getCallingConvMangling(ASTContext, D);
+ if (CC != CCM_Other)
+ return true;
+
+ // In C, functions with no attributes never need to be mangled. Fastpath them.
+ if (!getASTContext().getLangOpts().CPlusPlus && !D->hasAttrs())
+ return false;
+
+ // Any decl can be declared with __asm("foo") on it, and this takes precedence
+ // over all other naming in the .o file.
+ if (D->hasAttr<AsmLabelAttr>())
+ return true;
+
+ return shouldMangleCXXName(D);
+}
+
+void MangleContext::mangleName(const NamedDecl *D, raw_ostream &Out) {
+ // Any decl can be declared with __asm("foo") on it, and this takes precedence
+ // over all other naming in the .o file.
+ if (const AsmLabelAttr *ALA = D->getAttr<AsmLabelAttr>()) {
+ // If we have an asm name, then we use it as the mangling.
+
+ // Adding the prefix can cause problems when one file has a "foo" and
+ // another has a "\01foo". That is known to happen on ELF with the
+ // tricks normally used for producing aliases (PR9177). Fortunately the
+ // llvm mangler on ELF is a nop, so we can just avoid adding the \01
+ // marker. We also avoid adding the marker if this is an alias for an
+ // LLVM intrinsic.
+ StringRef UserLabelPrefix =
+ getASTContext().getTargetInfo().getUserLabelPrefix();
+ if (!UserLabelPrefix.empty() && !ALA->getLabel().startswith("llvm."))
+ Out << '\01'; // LLVM IR Marker for __asm("foo")
+
+ Out << ALA->getLabel();
+ return;
+ }
+
+ const ASTContext &ASTContext = getASTContext();
+ CCMangling CC = getCallingConvMangling(ASTContext, D);
+ bool MCXX = shouldMangleCXXName(D);
+ const TargetInfo &TI = Context.getTargetInfo();
+ if (CC == CCM_Other || (MCXX && TI.getCXXABI() == TargetCXXABI::Microsoft)) {
+ if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D))
+ mangleObjCMethodName(OMD, Out);
+ else
+ mangleCXXName(D, Out);
+ return;
+ }
+
+ Out << '\01';
+ if (CC == CCM_Std)
+ Out << '_';
+ else if (CC == CCM_Fast)
+ Out << '@';
+
+ if (!MCXX)
+ Out << D->getIdentifier()->getName();
+ else if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D))
+ mangleObjCMethodName(OMD, Out);
+ else
+ mangleCXXName(D, Out);
+
+ const FunctionDecl *FD = cast<FunctionDecl>(D);
+ const FunctionType *FT = FD->getType()->castAs<FunctionType>();
+ const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FT);
+ if (CC == CCM_Vector)
+ Out << '@';
+ Out << '@';
+ if (!Proto) {
+ Out << '0';
+ return;
+ }
+ assert(!Proto->isVariadic());
+ unsigned ArgWords = 0;
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
+ if (!MD->isStatic())
+ ++ArgWords;
+ for (const auto &AT : Proto->param_types())
+ // Size should be aligned to pointer size.
+ ArgWords += llvm::RoundUpToAlignment(ASTContext.getTypeSize(AT),
+ TI.getPointerWidth(0)) /
+ TI.getPointerWidth(0);
+ Out << ((TI.getPointerWidth(0) / 8) * ArgWords);
+}
+
+void MangleContext::mangleGlobalBlock(const BlockDecl *BD,
+ const NamedDecl *ID,
+ raw_ostream &Out) {
+ unsigned discriminator = getBlockId(BD, false);
+ if (ID) {
+ if (shouldMangleDeclName(ID))
+ mangleName(ID, Out);
+ else {
+ Out << ID->getIdentifier()->getName();
+ }
+ }
+ if (discriminator == 0)
+ Out << "_block_invoke";
+ else
+ Out << "_block_invoke_" << discriminator+1;
+}
+
+void MangleContext::mangleCtorBlock(const CXXConstructorDecl *CD,
+ CXXCtorType CT, const BlockDecl *BD,
+ raw_ostream &ResStream) {
+ SmallString<64> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ mangleCXXCtor(CD, CT, Out);
+ mangleFunctionBlock(*this, Buffer, BD, ResStream);
+}
+
+void MangleContext::mangleDtorBlock(const CXXDestructorDecl *DD,
+ CXXDtorType DT, const BlockDecl *BD,
+ raw_ostream &ResStream) {
+ SmallString<64> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ mangleCXXDtor(DD, DT, Out);
+ mangleFunctionBlock(*this, Buffer, BD, ResStream);
+}
+
+void MangleContext::mangleBlock(const DeclContext *DC, const BlockDecl *BD,
+ raw_ostream &Out) {
+ assert(!isa<CXXConstructorDecl>(DC) && !isa<CXXDestructorDecl>(DC));
+
+ SmallString<64> Buffer;
+ llvm::raw_svector_ostream Stream(Buffer);
+ if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC)) {
+ mangleObjCMethodName(Method, Stream);
+ } else {
+ assert((isa<NamedDecl>(DC) || isa<BlockDecl>(DC)) &&
+ "expected a NamedDecl or BlockDecl");
+ if (isa<BlockDecl>(DC))
+ for (; DC && isa<BlockDecl>(DC); DC = DC->getParent())
+ (void) getBlockId(cast<BlockDecl>(DC), true);
+ assert((isa<TranslationUnitDecl>(DC) || isa<NamedDecl>(DC)) &&
+ "expected a TranslationUnitDecl or a NamedDecl");
+ if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC))
+ mangleCtorBlock(CD, /*CT*/ Ctor_Complete, BD, Out);
+ else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC))
+ mangleDtorBlock(DD, /*DT*/ Dtor_Complete, BD, Out);
+ else if (auto ND = dyn_cast<NamedDecl>(DC)) {
+ if (!shouldMangleDeclName(ND) && ND->getIdentifier())
+ Stream << ND->getIdentifier()->getName();
+ else {
+ // FIXME: We were doing a mangleUnqualifiedName() before, but that's
+ // a private member of a class that will soon itself be private to the
+ // Itanium C++ ABI object. What should we do now? Right now, I'm just
+ // calling the mangleName() method on the MangleContext; is there a
+ // better way?
+ mangleName(ND, Stream);
+ }
+ }
+ }
+ mangleFunctionBlock(*this, Buffer, BD, Out);
+}
+
+void MangleContext::mangleObjCMethodName(const ObjCMethodDecl *MD,
+ raw_ostream &Out) {
+ SmallString<64> Name;
+ llvm::raw_svector_ostream OS(Name);
+
+ const ObjCContainerDecl *CD =
+ dyn_cast<ObjCContainerDecl>(MD->getDeclContext());
+ assert (CD && "Missing container decl in GetNameForMethod");
+ OS << (MD->isInstanceMethod() ? '-' : '+') << '[' << CD->getName();
+ if (const ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(CD))
+ OS << '(' << *CID << ')';
+ OS << ' ';
+ MD->getSelector().print(OS);
+ OS << ']';
+
+ Out << OS.str().size() << OS.str();
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/MicrosoftCXXABI.cpp b/contrib/llvm/tools/clang/lib/AST/MicrosoftCXXABI.cpp
new file mode 100644
index 0000000..6ba31cc
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/MicrosoftCXXABI.cpp
@@ -0,0 +1,272 @@
+//===------- MicrosoftCXXABI.cpp - AST support for the Microsoft C++ ABI --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides C++ AST support targeting the Microsoft Visual C++
+// ABI.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CXXABI.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/MangleNumberingContext.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/TargetInfo.h"
+
+using namespace clang;
+
+namespace {
+
+/// \brief Numbers things which need to correspond across multiple TUs.
+/// Typically these are things like static locals, lambdas, or blocks.
+class MicrosoftNumberingContext : public MangleNumberingContext {
+ llvm::DenseMap<const Type *, unsigned> ManglingNumbers;
+ unsigned LambdaManglingNumber;
+ unsigned StaticLocalNumber;
+ unsigned StaticThreadlocalNumber;
+
+public:
+ MicrosoftNumberingContext()
+ : MangleNumberingContext(), LambdaManglingNumber(0),
+ StaticLocalNumber(0), StaticThreadlocalNumber(0) {}
+
+ unsigned getManglingNumber(const CXXMethodDecl *CallOperator) override {
+ return ++LambdaManglingNumber;
+ }
+
+ unsigned getManglingNumber(const BlockDecl *BD) override {
+ const Type *Ty = nullptr;
+ return ++ManglingNumbers[Ty];
+ }
+
+ unsigned getStaticLocalNumber(const VarDecl *VD) override {
+ if (VD->getTLSKind())
+ return ++StaticThreadlocalNumber;
+ return ++StaticLocalNumber;
+ }
+
+ unsigned getManglingNumber(const VarDecl *VD,
+ unsigned MSLocalManglingNumber) override {
+ return MSLocalManglingNumber;
+ }
+
+ unsigned getManglingNumber(const TagDecl *TD,
+ unsigned MSLocalManglingNumber) override {
+ return MSLocalManglingNumber;
+ }
+};
+
+class MicrosoftCXXABI : public CXXABI {
+ ASTContext &Context;
+ llvm::SmallDenseMap<CXXRecordDecl *, CXXConstructorDecl *> RecordToCopyCtor;
+ llvm::SmallDenseMap<std::pair<const CXXConstructorDecl *, unsigned>, Expr *>
+ CtorToDefaultArgExpr;
+
+ llvm::SmallDenseMap<TagDecl *, DeclaratorDecl *>
+ UnnamedTagDeclToDeclaratorDecl;
+ llvm::SmallDenseMap<TagDecl *, TypedefNameDecl *>
+ UnnamedTagDeclToTypedefNameDecl;
+
+public:
+ MicrosoftCXXABI(ASTContext &Ctx) : Context(Ctx) { }
+
+ std::pair<uint64_t, unsigned>
+ getMemberPointerWidthAndAlign(const MemberPointerType *MPT) const override;
+
+ CallingConv getDefaultMethodCallConv(bool isVariadic) const override {
+ if (!isVariadic &&
+ Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86)
+ return CC_X86ThisCall;
+ return CC_C;
+ }
+
+ bool isNearlyEmpty(const CXXRecordDecl *RD) const override {
+ llvm_unreachable("unapplicable to the MS ABI");
+ }
+
+ void addDefaultArgExprForConstructor(const CXXConstructorDecl *CD,
+ unsigned ParmIdx, Expr *DAE) override {
+ CtorToDefaultArgExpr[std::make_pair(CD, ParmIdx)] = DAE;
+ }
+
+ Expr *getDefaultArgExprForConstructor(const CXXConstructorDecl *CD,
+ unsigned ParmIdx) override {
+ return CtorToDefaultArgExpr[std::make_pair(CD, ParmIdx)];
+ }
+
+ const CXXConstructorDecl *
+ getCopyConstructorForExceptionObject(CXXRecordDecl *RD) override {
+ return RecordToCopyCtor[RD];
+ }
+
+ void
+ addCopyConstructorForExceptionObject(CXXRecordDecl *RD,
+ CXXConstructorDecl *CD) override {
+ assert(CD != nullptr);
+ assert(RecordToCopyCtor[RD] == nullptr || RecordToCopyCtor[RD] == CD);
+ RecordToCopyCtor[RD] = CD;
+ }
+
+ void addTypedefNameForUnnamedTagDecl(TagDecl *TD,
+ TypedefNameDecl *DD) override {
+ TD = TD->getCanonicalDecl();
+ DD = cast<TypedefNameDecl>(DD->getCanonicalDecl());
+ TypedefNameDecl *&I = UnnamedTagDeclToTypedefNameDecl[TD];
+ if (!I)
+ I = DD;
+ }
+
+ TypedefNameDecl *getTypedefNameForUnnamedTagDecl(const TagDecl *TD) override {
+ return UnnamedTagDeclToTypedefNameDecl.lookup(
+ const_cast<TagDecl *>(TD->getCanonicalDecl()));
+ }
+
+ void addDeclaratorForUnnamedTagDecl(TagDecl *TD,
+ DeclaratorDecl *DD) override {
+ TD = TD->getCanonicalDecl();
+ DD = cast<DeclaratorDecl>(DD->getCanonicalDecl());
+ DeclaratorDecl *&I = UnnamedTagDeclToDeclaratorDecl[TD];
+ if (!I)
+ I = DD;
+ }
+
+ DeclaratorDecl *getDeclaratorForUnnamedTagDecl(const TagDecl *TD) override {
+ return UnnamedTagDeclToDeclaratorDecl.lookup(
+ const_cast<TagDecl *>(TD->getCanonicalDecl()));
+ }
+
+ MangleNumberingContext *createMangleNumberingContext() const override {
+ return new MicrosoftNumberingContext();
+ }
+};
+}
+
+// getNumBases() seems to only give us the number of direct bases, and not the
+// total. This function tells us if we inherit from anybody that uses MI, or if
+// we have a non-primary base class, which uses the multiple inheritance model.
+static bool usesMultipleInheritanceModel(const CXXRecordDecl *RD) {
+ while (RD->getNumBases() > 0) {
+ if (RD->getNumBases() > 1)
+ return true;
+ assert(RD->getNumBases() == 1);
+ const CXXRecordDecl *Base =
+ RD->bases_begin()->getType()->getAsCXXRecordDecl();
+ if (RD->isPolymorphic() && !Base->isPolymorphic())
+ return true;
+ RD = Base;
+ }
+ return false;
+}
+
+MSInheritanceAttr::Spelling CXXRecordDecl::calculateInheritanceModel() const {
+ if (!hasDefinition() || isParsingBaseSpecifiers())
+ return MSInheritanceAttr::Keyword_unspecified_inheritance;
+ if (getNumVBases() > 0)
+ return MSInheritanceAttr::Keyword_virtual_inheritance;
+ if (usesMultipleInheritanceModel(this))
+ return MSInheritanceAttr::Keyword_multiple_inheritance;
+ return MSInheritanceAttr::Keyword_single_inheritance;
+}
+
+MSInheritanceAttr::Spelling
+CXXRecordDecl::getMSInheritanceModel() const {
+ MSInheritanceAttr *IA = getAttr<MSInheritanceAttr>();
+ assert(IA && "Expected MSInheritanceAttr on the CXXRecordDecl!");
+ return IA->getSemanticSpelling();
+}
+
+MSVtorDispAttr::Mode CXXRecordDecl::getMSVtorDispMode() const {
+ if (MSVtorDispAttr *VDA = getAttr<MSVtorDispAttr>())
+ return VDA->getVtorDispMode();
+ return MSVtorDispAttr::Mode(getASTContext().getLangOpts().VtorDispMode);
+}
+
+// Returns the number of pointer and integer slots used to represent a member
+// pointer in the MS C++ ABI.
+//
+// Member function pointers have the following general form; however, fields
+// are dropped as permitted (under the MSVC interpretation) by the inheritance
+// model of the actual class.
+//
+// struct {
+// // A pointer to the member function to call. If the member function is
+// // virtual, this will be a thunk that forwards to the appropriate vftable
+// // slot.
+// void *FunctionPointerOrVirtualThunk;
+//
+// // An offset to add to the address of the vbtable pointer after
+// // (possibly) selecting the virtual base but before resolving and calling
+// // the function.
+// // Only needed if the class has any virtual bases or bases at a non-zero
+// // offset.
+// int NonVirtualBaseAdjustment;
+//
+// // The offset of the vb-table pointer within the object. Only needed for
+// // incomplete types.
+// int VBPtrOffset;
+//
+// // An offset within the vb-table that selects the virtual base containing
+// // the member. Loading from this offset produces a new offset that is
+// // added to the address of the vb-table pointer to produce the base.
+// int VirtualBaseAdjustmentOffset;
+// };
+static std::pair<unsigned, unsigned>
+getMSMemberPointerSlots(const MemberPointerType *MPT) {
+ const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
+ MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel();
+ unsigned Ptrs = 0;
+ unsigned Ints = 0;
+ if (MPT->isMemberFunctionPointer())
+ Ptrs = 1;
+ else
+ Ints = 1;
+ if (MSInheritanceAttr::hasNVOffsetField(MPT->isMemberFunctionPointer(),
+ Inheritance))
+ Ints++;
+ if (MSInheritanceAttr::hasVBPtrOffsetField(Inheritance))
+ Ints++;
+ if (MSInheritanceAttr::hasVBTableOffsetField(Inheritance))
+ Ints++;
+ return std::make_pair(Ptrs, Ints);
+}
+
+std::pair<uint64_t, unsigned> MicrosoftCXXABI::getMemberPointerWidthAndAlign(
+ const MemberPointerType *MPT) const {
+ // The nominal struct is laid out with pointers followed by ints and aligned
+ // to a pointer width if any are present and an int width otherwise.
+ const TargetInfo &Target = Context.getTargetInfo();
+ unsigned PtrSize = Target.getPointerWidth(0);
+ unsigned IntSize = Target.getIntWidth();
+
+ unsigned Ptrs, Ints;
+ std::tie(Ptrs, Ints) = getMSMemberPointerSlots(MPT);
+ uint64_t Width = Ptrs * PtrSize + Ints * IntSize;
+ unsigned Align;
+
+ // When MSVC does x86_32 record layout, it aligns aggregate member pointers to
+ // 8 bytes. However, __alignof usually returns 4 for data memptrs and 8 for
+ // function memptrs.
+ if (Ptrs + Ints > 1 && Target.getTriple().isArch32Bit())
+ Align = 64;
+ else if (Ptrs)
+ Align = Target.getPointerAlign(0);
+ else
+ Align = Target.getIntAlign();
+
+ if (Target.getTriple().isArch64Bit())
+ Width = llvm::RoundUpToAlignment(Width, Align);
+ return std::make_pair(Width, Align);
+}
+
+CXXABI *clang::CreateMicrosoftCXXABI(ASTContext &Ctx) {
+ return new MicrosoftCXXABI(Ctx);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp b/contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp
new file mode 100644
index 0000000..d45232b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp
@@ -0,0 +1,2974 @@
+//===--- MicrosoftMangle.cpp - Microsoft Visual C++ Name Mangling ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides C++ name mangling targeting the Microsoft Visual C++ ABI.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Mangle.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/VTableBuilder.h"
+#include "clang/Basic/ABI.h"
+#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/JamCRC.h"
+
+using namespace clang;
+
+namespace {
+
+/// \brief Retrieve the declaration context that should be used when mangling
+/// the given declaration.
+static const DeclContext *getEffectiveDeclContext(const Decl *D) {
+ // The ABI assumes that lambda closure types that occur within
+ // default arguments live in the context of the function. However, due to
+ // the way in which Clang parses and creates function declarations, this is
+ // not the case: the lambda closure type ends up living in the context
+ // where the function itself resides, because the function declaration itself
+ // had not yet been created. Fix the context here.
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
+ if (RD->isLambda())
+ if (ParmVarDecl *ContextParam =
+ dyn_cast_or_null<ParmVarDecl>(RD->getLambdaContextDecl()))
+ return ContextParam->getDeclContext();
+ }
+
+ // Perform the same check for block literals.
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
+ if (ParmVarDecl *ContextParam =
+ dyn_cast_or_null<ParmVarDecl>(BD->getBlockManglingContextDecl()))
+ return ContextParam->getDeclContext();
+ }
+
+ const DeclContext *DC = D->getDeclContext();
+ if (const CapturedDecl *CD = dyn_cast<CapturedDecl>(DC))
+ return getEffectiveDeclContext(CD);
+
+ return DC;
+}
+
+static const DeclContext *getEffectiveParentContext(const DeclContext *DC) {
+ return getEffectiveDeclContext(cast<Decl>(DC));
+}
+
+static const FunctionDecl *getStructor(const NamedDecl *ND) {
+ if (const auto *FTD = dyn_cast<FunctionTemplateDecl>(ND))
+ return FTD->getTemplatedDecl();
+
+ const auto *FD = cast<FunctionDecl>(ND);
+ if (const auto *FTD = FD->getPrimaryTemplate())
+ return FTD->getTemplatedDecl();
+
+ return FD;
+}
+
+static bool isLambda(const NamedDecl *ND) {
+ const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(ND);
+ if (!Record)
+ return false;
+
+ return Record->isLambda();
+}
+
+/// MicrosoftMangleContextImpl - Overrides the default MangleContext for the
+/// Microsoft Visual C++ ABI.
+class MicrosoftMangleContextImpl : public MicrosoftMangleContext {
+ typedef std::pair<const DeclContext *, IdentifierInfo *> DiscriminatorKeyTy;
+ llvm::DenseMap<DiscriminatorKeyTy, unsigned> Discriminator;
+ llvm::DenseMap<const NamedDecl *, unsigned> Uniquifier;
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> LambdaIds;
+ llvm::DenseMap<const NamedDecl *, unsigned> SEHFilterIds;
+ llvm::DenseMap<const NamedDecl *, unsigned> SEHFinallyIds;
+
+public:
+ MicrosoftMangleContextImpl(ASTContext &Context, DiagnosticsEngine &Diags)
+ : MicrosoftMangleContext(Context, Diags) {}
+ bool shouldMangleCXXName(const NamedDecl *D) override;
+ bool shouldMangleStringLiteral(const StringLiteral *SL) override;
+ void mangleCXXName(const NamedDecl *D, raw_ostream &Out) override;
+ void mangleVirtualMemPtrThunk(const CXXMethodDecl *MD,
+ raw_ostream &) override;
+ void mangleThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk,
+ raw_ostream &) override;
+ void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
+ const ThisAdjustment &ThisAdjustment,
+ raw_ostream &) override;
+ void mangleCXXVFTable(const CXXRecordDecl *Derived,
+ ArrayRef<const CXXRecordDecl *> BasePath,
+ raw_ostream &Out) override;
+ void mangleCXXVBTable(const CXXRecordDecl *Derived,
+ ArrayRef<const CXXRecordDecl *> BasePath,
+ raw_ostream &Out) override;
+ void mangleCXXVirtualDisplacementMap(const CXXRecordDecl *SrcRD,
+ const CXXRecordDecl *DstRD,
+ raw_ostream &Out) override;
+ void mangleCXXThrowInfo(QualType T, bool IsConst, bool IsVolatile,
+ uint32_t NumEntries, raw_ostream &Out) override;
+ void mangleCXXCatchableTypeArray(QualType T, uint32_t NumEntries,
+ raw_ostream &Out) override;
+ void mangleCXXCatchableType(QualType T, const CXXConstructorDecl *CD,
+ CXXCtorType CT, uint32_t Size, uint32_t NVOffset,
+ int32_t VBPtrOffset, uint32_t VBIndex,
+ raw_ostream &Out) override;
+ void mangleCXXRTTI(QualType T, raw_ostream &Out) override;
+ void mangleCXXRTTIName(QualType T, raw_ostream &Out) override;
+ void mangleCXXRTTIBaseClassDescriptor(const CXXRecordDecl *Derived,
+ uint32_t NVOffset, int32_t VBPtrOffset,
+ uint32_t VBTableOffset, uint32_t Flags,
+ raw_ostream &Out) override;
+ void mangleCXXRTTIBaseClassArray(const CXXRecordDecl *Derived,
+ raw_ostream &Out) override;
+ void mangleCXXRTTIClassHierarchyDescriptor(const CXXRecordDecl *Derived,
+ raw_ostream &Out) override;
+ void
+ mangleCXXRTTICompleteObjectLocator(const CXXRecordDecl *Derived,
+ ArrayRef<const CXXRecordDecl *> BasePath,
+ raw_ostream &Out) override;
+ void mangleTypeName(QualType T, raw_ostream &) override;
+ void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
+ raw_ostream &) override;
+ void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
+ raw_ostream &) override;
+ void mangleReferenceTemporary(const VarDecl *, unsigned ManglingNumber,
+ raw_ostream &) override;
+ void mangleStaticGuardVariable(const VarDecl *D, raw_ostream &Out) override;
+ void mangleThreadSafeStaticGuardVariable(const VarDecl *D, unsigned GuardNum,
+ raw_ostream &Out) override;
+ void mangleDynamicInitializer(const VarDecl *D, raw_ostream &Out) override;
+ void mangleDynamicAtExitDestructor(const VarDecl *D,
+ raw_ostream &Out) override;
+ void mangleSEHFilterExpression(const NamedDecl *EnclosingDecl,
+ raw_ostream &Out) override;
+ void mangleSEHFinallyBlock(const NamedDecl *EnclosingDecl,
+ raw_ostream &Out) override;
+ void mangleStringLiteral(const StringLiteral *SL, raw_ostream &Out) override;
+ bool getNextDiscriminator(const NamedDecl *ND, unsigned &disc) {
+ // Lambda closure types are already numbered.
+ if (isLambda(ND))
+ return false;
+
+ const DeclContext *DC = getEffectiveDeclContext(ND);
+ if (!DC->isFunctionOrMethod())
+ return false;
+
+ // Use the canonical number for externally visible decls.
+ if (ND->isExternallyVisible()) {
+ disc = getASTContext().getManglingNumber(ND);
+ return true;
+ }
+
+ // Anonymous tags are already numbered.
+ if (const TagDecl *Tag = dyn_cast<TagDecl>(ND)) {
+ if (!Tag->hasNameForLinkage() &&
+ !getASTContext().getDeclaratorForUnnamedTagDecl(Tag) &&
+ !getASTContext().getTypedefNameForUnnamedTagDecl(Tag))
+ return false;
+ }
+
+ // Make up a reasonable number for internal decls.
+ unsigned &discriminator = Uniquifier[ND];
+ if (!discriminator)
+ discriminator = ++Discriminator[std::make_pair(DC, ND->getIdentifier())];
+ disc = discriminator + 1;
+ return true;
+ }
+
+ unsigned getLambdaId(const CXXRecordDecl *RD) {
+ assert(RD->isLambda() && "RD must be a lambda!");
+ assert(!RD->isExternallyVisible() && "RD must not be visible!");
+ assert(RD->getLambdaManglingNumber() == 0 &&
+ "RD must not have a mangling number!");
+ std::pair<llvm::DenseMap<const CXXRecordDecl *, unsigned>::iterator, bool>
+ Result = LambdaIds.insert(std::make_pair(RD, LambdaIds.size()));
+ return Result.first->second;
+ }
+
+private:
+ void mangleInitFiniStub(const VarDecl *D, raw_ostream &Out, char CharCode);
+};
+
+/// MicrosoftCXXNameMangler - Manage the mangling of a single name for the
+/// Microsoft Visual C++ ABI.
+class MicrosoftCXXNameMangler {
+ MicrosoftMangleContextImpl &Context;
+ raw_ostream &Out;
+
+ /// The "structor" is the top-level declaration being mangled, if
+ /// that's not a template specialization; otherwise it's the pattern
+ /// for that specialization.
+ const NamedDecl *Structor;
+ unsigned StructorType;
+
+ typedef llvm::SmallVector<std::string, 10> BackRefVec;
+ BackRefVec NameBackReferences;
+
+ typedef llvm::DenseMap<const void *, unsigned> ArgBackRefMap;
+ ArgBackRefMap TypeBackReferences;
+
+ typedef std::set<int> PassObjectSizeArgsSet;
+ PassObjectSizeArgsSet PassObjectSizeArgs;
+
+ ASTContext &getASTContext() const { return Context.getASTContext(); }
+
+ // FIXME: If we add support for __ptr32/64 qualifiers, then we should push
+ // this check into mangleQualifiers().
+ const bool PointersAre64Bit;
+
+public:
+ enum QualifierMangleMode { QMM_Drop, QMM_Mangle, QMM_Escape, QMM_Result };
+
+ MicrosoftCXXNameMangler(MicrosoftMangleContextImpl &C, raw_ostream &Out_)
+ : Context(C), Out(Out_), Structor(nullptr), StructorType(-1),
+ PointersAre64Bit(C.getASTContext().getTargetInfo().getPointerWidth(0) ==
+ 64) {}
+
+ MicrosoftCXXNameMangler(MicrosoftMangleContextImpl &C, raw_ostream &Out_,
+ const CXXConstructorDecl *D, CXXCtorType Type)
+ : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type),
+ PointersAre64Bit(C.getASTContext().getTargetInfo().getPointerWidth(0) ==
+ 64) {}
+
+ MicrosoftCXXNameMangler(MicrosoftMangleContextImpl &C, raw_ostream &Out_,
+ const CXXDestructorDecl *D, CXXDtorType Type)
+ : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type),
+ PointersAre64Bit(C.getASTContext().getTargetInfo().getPointerWidth(0) ==
+ 64) {}
+
+ raw_ostream &getStream() const { return Out; }
+
+ void mangle(const NamedDecl *D, StringRef Prefix = "\01?");
+ void mangleName(const NamedDecl *ND);
+ void mangleFunctionEncoding(const FunctionDecl *FD, bool ShouldMangle);
+ void mangleVariableEncoding(const VarDecl *VD);
+ void mangleMemberDataPointer(const CXXRecordDecl *RD, const ValueDecl *VD);
+ void mangleMemberFunctionPointer(const CXXRecordDecl *RD,
+ const CXXMethodDecl *MD);
+ void mangleVirtualMemPtrThunk(
+ const CXXMethodDecl *MD,
+ const MicrosoftVTableContext::MethodVFTableLocation &ML);
+ void mangleNumber(int64_t Number);
+ void mangleTagTypeKind(TagTypeKind TK);
+ void mangleArtificalTagType(TagTypeKind TK, StringRef UnqualifiedName,
+ ArrayRef<StringRef> NestedNames = None);
+ void mangleType(QualType T, SourceRange Range,
+ QualifierMangleMode QMM = QMM_Mangle);
+ void mangleFunctionType(const FunctionType *T,
+ const FunctionDecl *D = nullptr,
+ bool ForceThisQuals = false);
+ void mangleNestedName(const NamedDecl *ND);
+
+private:
+ void mangleUnqualifiedName(const NamedDecl *ND) {
+ mangleUnqualifiedName(ND, ND->getDeclName());
+ }
+ void mangleUnqualifiedName(const NamedDecl *ND, DeclarationName Name);
+ void mangleSourceName(StringRef Name);
+ void mangleOperatorName(OverloadedOperatorKind OO, SourceLocation Loc);
+ void mangleCXXDtorType(CXXDtorType T);
+ void mangleQualifiers(Qualifiers Quals, bool IsMember);
+ void mangleRefQualifier(RefQualifierKind RefQualifier);
+ void manglePointerCVQualifiers(Qualifiers Quals);
+ void manglePointerExtQualifiers(Qualifiers Quals, QualType PointeeType);
+
+ void mangleUnscopedTemplateName(const TemplateDecl *ND);
+ void
+ mangleTemplateInstantiationName(const TemplateDecl *TD,
+ const TemplateArgumentList &TemplateArgs);
+ void mangleObjCMethodName(const ObjCMethodDecl *MD);
+
+ void mangleArgumentType(QualType T, SourceRange Range);
+ void manglePassObjectSizeArg(const PassObjectSizeAttr *POSA);
+
+ // Declare manglers for every type class.
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define NON_CANONICAL_TYPE(CLASS, PARENT)
+#define TYPE(CLASS, PARENT) void mangleType(const CLASS##Type *T, \
+ Qualifiers Quals, \
+ SourceRange Range);
+#include "clang/AST/TypeNodes.def"
+#undef ABSTRACT_TYPE
+#undef NON_CANONICAL_TYPE
+#undef TYPE
+
+ void mangleType(const TagDecl *TD);
+ void mangleDecayedArrayType(const ArrayType *T);
+ void mangleArrayType(const ArrayType *T);
+ void mangleFunctionClass(const FunctionDecl *FD);
+ void mangleCallingConvention(CallingConv CC);
+ void mangleCallingConvention(const FunctionType *T);
+ void mangleIntegerLiteral(const llvm::APSInt &Number, bool IsBoolean);
+ void mangleExpression(const Expr *E);
+ void mangleThrowSpecification(const FunctionProtoType *T);
+
+ void mangleTemplateArgs(const TemplateDecl *TD,
+ const TemplateArgumentList &TemplateArgs);
+ void mangleTemplateArg(const TemplateDecl *TD, const TemplateArgument &TA,
+ const NamedDecl *Parm);
+};
+}
+
+bool MicrosoftMangleContextImpl::shouldMangleCXXName(const NamedDecl *D) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ LanguageLinkage L = FD->getLanguageLinkage();
+ // Overloadable functions need mangling.
+ if (FD->hasAttr<OverloadableAttr>())
+ return true;
+
+ // The ABI expects that we would never mangle "typical" user-defined entry
+ // points regardless of visibility or freestanding-ness.
+ //
+ // N.B. This is distinct from asking about "main". "main" has a lot of
+ // special rules associated with it in the standard while these
+ // user-defined entry points are outside of the purview of the standard.
+ // For example, there can be only one definition for "main" in a standards
+ // compliant program; however nothing forbids the existence of wmain and
+ // WinMain in the same translation unit.
+ if (FD->isMSVCRTEntryPoint())
+ return false;
+
+ // C++ functions and those whose names are not a simple identifier need
+ // mangling.
+ if (!FD->getDeclName().isIdentifier() || L == CXXLanguageLinkage)
+ return true;
+
+ // C functions are not mangled.
+ if (L == CLanguageLinkage)
+ return false;
+ }
+
+ // Otherwise, no mangling is done outside C++ mode.
+ if (!getASTContext().getLangOpts().CPlusPlus)
+ return false;
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ // C variables are not mangled.
+ if (VD->isExternC())
+ return false;
+
+ // Variables at global scope with non-internal linkage are not mangled.
+ const DeclContext *DC = getEffectiveDeclContext(D);
+ // Check for extern variable declared locally.
+ if (DC->isFunctionOrMethod() && D->hasLinkage())
+ while (!DC->isNamespace() && !DC->isTranslationUnit())
+ DC = getEffectiveParentContext(DC);
+
+ if (DC->isTranslationUnit() && D->getFormalLinkage() == InternalLinkage &&
+ !isa<VarTemplateSpecializationDecl>(D) &&
+ D->getIdentifier() != nullptr)
+ return false;
+ }
+
+ return true;
+}
+
+bool
+MicrosoftMangleContextImpl::shouldMangleStringLiteral(const StringLiteral *SL) {
+ return true;
+}
+
+void MicrosoftCXXNameMangler::mangle(const NamedDecl *D, StringRef Prefix) {
+ // MSVC doesn't mangle C++ names the same way it mangles extern "C" names.
+ // Therefore it's really important that we don't decorate the
+ // name with leading underscores or leading/trailing at signs. So, by
+ // default, we emit an asm marker at the start so we get the name right.
+ // Callers can override this with a custom prefix.
+
+ // <mangled-name> ::= ? <name> <type-encoding>
+ Out << Prefix;
+ mangleName(D);
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ mangleFunctionEncoding(FD, Context.shouldMangleDeclName(FD));
+ else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ mangleVariableEncoding(VD);
+ else
+ llvm_unreachable("Tried to mangle unexpected NamedDecl!");
+}
+
+void MicrosoftCXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD,
+ bool ShouldMangle) {
+ // <type-encoding> ::= <function-class> <function-type>
+
+ // Since MSVC operates on the type as written and not the canonical type, it
+ // actually matters which decl we have here. MSVC appears to choose the
+ // first, since it is most likely to be the declaration in a header file.
+ FD = FD->getFirstDecl();
+
+ // We should never ever see a FunctionNoProtoType at this point.
+ // We don't even know how to mangle their types anyway :).
+ const FunctionProtoType *FT = FD->getType()->castAs<FunctionProtoType>();
+
+ // extern "C" functions can hold entities that must be mangled.
+ // As it stands, these functions still need to get expressed in the full
+ // external name. They have their class and type omitted, replaced with '9'.
+ if (ShouldMangle) {
+ // We would like to mangle all extern "C" functions using this additional
+ // component but this would break compatibility with MSVC's behavior.
+ // Instead, do this when we know that compatibility isn't important (in
+ // other words, when it is an overloaded extern "C" function).
+ if (FD->isExternC() && FD->hasAttr<OverloadableAttr>())
+ Out << "$$J0";
+
+ mangleFunctionClass(FD);
+
+ mangleFunctionType(FT, FD);
+ } else {
+ Out << '9';
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleVariableEncoding(const VarDecl *VD) {
+ // <type-encoding> ::= <storage-class> <variable-type>
+ // <storage-class> ::= 0 # private static member
+ // ::= 1 # protected static member
+ // ::= 2 # public static member
+ // ::= 3 # global
+ // ::= 4 # static local
+
+ // The first character in the encoding (after the name) is the storage class.
+ if (VD->isStaticDataMember()) {
+ // If it's a static member, it also encodes the access level.
+ switch (VD->getAccess()) {
+ default:
+ case AS_private: Out << '0'; break;
+ case AS_protected: Out << '1'; break;
+ case AS_public: Out << '2'; break;
+ }
+ }
+ else if (!VD->isStaticLocal())
+ Out << '3';
+ else
+ Out << '4';
+ // Now mangle the type.
+ // <variable-type> ::= <type> <cvr-qualifiers>
+ // ::= <type> <pointee-cvr-qualifiers> # pointers, references
+ // Pointers and references are odd. The type of 'int * const foo;' gets
+ // mangled as 'QAHA' instead of 'PAHB', for example.
+ SourceRange SR = VD->getSourceRange();
+ QualType Ty = VD->getType();
+ if (Ty->isPointerType() || Ty->isReferenceType() ||
+ Ty->isMemberPointerType()) {
+ mangleType(Ty, SR, QMM_Drop);
+ manglePointerExtQualifiers(
+ Ty.getDesugaredType(getASTContext()).getLocalQualifiers(), QualType());
+ if (const MemberPointerType *MPT = Ty->getAs<MemberPointerType>()) {
+ mangleQualifiers(MPT->getPointeeType().getQualifiers(), true);
+ // Member pointers are suffixed with a back reference to the member
+ // pointer's class name.
+ mangleName(MPT->getClass()->getAsCXXRecordDecl());
+ } else
+ mangleQualifiers(Ty->getPointeeType().getQualifiers(), false);
+ } else if (const ArrayType *AT = getASTContext().getAsArrayType(Ty)) {
+ // Global arrays are funny, too.
+ mangleDecayedArrayType(AT);
+ if (AT->getElementType()->isArrayType())
+ Out << 'A';
+ else
+ mangleQualifiers(Ty.getQualifiers(), false);
+ } else {
+ mangleType(Ty, SR, QMM_Drop);
+ mangleQualifiers(Ty.getQualifiers(), false);
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleMemberDataPointer(const CXXRecordDecl *RD,
+ const ValueDecl *VD) {
+ // <member-data-pointer> ::= <integer-literal>
+ // ::= $F <number> <number>
+ // ::= $G <number> <number> <number>
+
+ int64_t FieldOffset;
+ int64_t VBTableOffset;
+ MSInheritanceAttr::Spelling IM = RD->getMSInheritanceModel();
+ if (VD) {
+ FieldOffset = getASTContext().getFieldOffset(VD);
+ assert(FieldOffset % getASTContext().getCharWidth() == 0 &&
+ "cannot take address of bitfield");
+ FieldOffset /= getASTContext().getCharWidth();
+
+ VBTableOffset = 0;
+
+ if (IM == MSInheritanceAttr::Keyword_virtual_inheritance)
+ FieldOffset -= getASTContext().getOffsetOfBaseWithVBPtr(RD).getQuantity();
+ } else {
+ FieldOffset = RD->nullFieldOffsetIsZero() ? 0 : -1;
+
+ VBTableOffset = -1;
+ }
+
+ char Code = '\0';
+ switch (IM) {
+ case MSInheritanceAttr::Keyword_single_inheritance: Code = '0'; break;
+ case MSInheritanceAttr::Keyword_multiple_inheritance: Code = '0'; break;
+ case MSInheritanceAttr::Keyword_virtual_inheritance: Code = 'F'; break;
+ case MSInheritanceAttr::Keyword_unspecified_inheritance: Code = 'G'; break;
+ }
+
+ Out << '$' << Code;
+
+ mangleNumber(FieldOffset);
+
+ // The C++ standard doesn't allow base-to-derived member pointer conversions
+ // in template parameter contexts, so the vbptr offset of data member pointers
+ // is always zero.
+ if (MSInheritanceAttr::hasVBPtrOffsetField(IM))
+ mangleNumber(0);
+ if (MSInheritanceAttr::hasVBTableOffsetField(IM))
+ mangleNumber(VBTableOffset);
+}
+
+void
+MicrosoftCXXNameMangler::mangleMemberFunctionPointer(const CXXRecordDecl *RD,
+ const CXXMethodDecl *MD) {
+ // <member-function-pointer> ::= $1? <name>
+ // ::= $H? <name> <number>
+ // ::= $I? <name> <number> <number>
+ // ::= $J? <name> <number> <number> <number>
+
+ MSInheritanceAttr::Spelling IM = RD->getMSInheritanceModel();
+
+ char Code = '\0';
+ switch (IM) {
+ case MSInheritanceAttr::Keyword_single_inheritance: Code = '1'; break;
+ case MSInheritanceAttr::Keyword_multiple_inheritance: Code = 'H'; break;
+ case MSInheritanceAttr::Keyword_virtual_inheritance: Code = 'I'; break;
+ case MSInheritanceAttr::Keyword_unspecified_inheritance: Code = 'J'; break;
+ }
+
+ // If non-virtual, mangle the name. If virtual, mangle as a virtual memptr
+ // thunk.
+ uint64_t NVOffset = 0;
+ uint64_t VBTableOffset = 0;
+ uint64_t VBPtrOffset = 0;
+ if (MD) {
+ Out << '$' << Code << '?';
+ if (MD->isVirtual()) {
+ MicrosoftVTableContext *VTContext =
+ cast<MicrosoftVTableContext>(getASTContext().getVTableContext());
+ const MicrosoftVTableContext::MethodVFTableLocation &ML =
+ VTContext->getMethodVFTableLocation(GlobalDecl(MD));
+ mangleVirtualMemPtrThunk(MD, ML);
+ NVOffset = ML.VFPtrOffset.getQuantity();
+ VBTableOffset = ML.VBTableIndex * 4;
+ if (ML.VBase) {
+ const ASTRecordLayout &Layout = getASTContext().getASTRecordLayout(RD);
+ VBPtrOffset = Layout.getVBPtrOffset().getQuantity();
+ }
+ } else {
+ mangleName(MD);
+ mangleFunctionEncoding(MD, /*ShouldMangle=*/true);
+ }
+
+ if (VBTableOffset == 0 &&
+ IM == MSInheritanceAttr::Keyword_virtual_inheritance)
+ NVOffset -= getASTContext().getOffsetOfBaseWithVBPtr(RD).getQuantity();
+ } else {
+ // Null single inheritance member functions are encoded as a simple nullptr.
+ if (IM == MSInheritanceAttr::Keyword_single_inheritance) {
+ Out << "$0A@";
+ return;
+ }
+ if (IM == MSInheritanceAttr::Keyword_unspecified_inheritance)
+ VBTableOffset = -1;
+ Out << '$' << Code;
+ }
+
+ if (MSInheritanceAttr::hasNVOffsetField(/*IsMemberFunction=*/true, IM))
+ mangleNumber(static_cast<uint32_t>(NVOffset));
+ if (MSInheritanceAttr::hasVBPtrOffsetField(IM))
+ mangleNumber(VBPtrOffset);
+ if (MSInheritanceAttr::hasVBTableOffsetField(IM))
+ mangleNumber(VBTableOffset);
+}
+
+void MicrosoftCXXNameMangler::mangleVirtualMemPtrThunk(
+ const CXXMethodDecl *MD,
+ const MicrosoftVTableContext::MethodVFTableLocation &ML) {
+ // Get the vftable offset.
+ CharUnits PointerWidth = getASTContext().toCharUnitsFromBits(
+ getASTContext().getTargetInfo().getPointerWidth(0));
+ uint64_t OffsetInVFTable = ML.Index * PointerWidth.getQuantity();
+
+ Out << "?_9";
+ mangleName(MD->getParent());
+ Out << "$B";
+ mangleNumber(OffsetInVFTable);
+ Out << 'A';
+ mangleCallingConvention(MD->getType()->getAs<FunctionProtoType>());
+}
+
+void MicrosoftCXXNameMangler::mangleName(const NamedDecl *ND) {
+ // <name> ::= <unscoped-name> {[<named-scope>]+ | [<nested-name>]}? @
+
+ // Always start with the unqualified name.
+ mangleUnqualifiedName(ND);
+
+ mangleNestedName(ND);
+
+ // Terminate the whole name with an '@'.
+ Out << '@';
+}
+
+void MicrosoftCXXNameMangler::mangleNumber(int64_t Number) {
+ // <non-negative integer> ::= A@ # when Number == 0
+ // ::= <decimal digit> # when 1 <= Number <= 10
+ // ::= <hex digit>+ @ # when Number >= 10
+ //
+ // <number> ::= [?] <non-negative integer>
+
+ uint64_t Value = static_cast<uint64_t>(Number);
+ if (Number < 0) {
+ Value = -Value;
+ Out << '?';
+ }
+
+ if (Value == 0)
+ Out << "A@";
+ else if (Value >= 1 && Value <= 10)
+ Out << (Value - 1);
+ else {
+ // Numbers that are not encoded as decimal digits are represented as nibbles
+ // in the range of ASCII characters 'A' to 'P'.
+ // The number 0x123450 would be encoded as 'BCDEFA'
+ char EncodedNumberBuffer[sizeof(uint64_t) * 2];
+ MutableArrayRef<char> BufferRef(EncodedNumberBuffer);
+ MutableArrayRef<char>::reverse_iterator I = BufferRef.rbegin();
+ for (; Value != 0; Value >>= 4)
+ *I++ = 'A' + (Value & 0xf);
+ Out.write(I.base(), I - BufferRef.rbegin());
+ Out << '@';
+ }
+}
+
+static const TemplateDecl *
+isTemplate(const NamedDecl *ND, const TemplateArgumentList *&TemplateArgs) {
+ // Check if we have a function template.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
+ if (const TemplateDecl *TD = FD->getPrimaryTemplate()) {
+ TemplateArgs = FD->getTemplateSpecializationArgs();
+ return TD;
+ }
+ }
+
+ // Check if we have a class template.
+ if (const ClassTemplateSpecializationDecl *Spec =
+ dyn_cast<ClassTemplateSpecializationDecl>(ND)) {
+ TemplateArgs = &Spec->getTemplateArgs();
+ return Spec->getSpecializedTemplate();
+ }
+
+ // Check if we have a variable template.
+ if (const VarTemplateSpecializationDecl *Spec =
+ dyn_cast<VarTemplateSpecializationDecl>(ND)) {
+ TemplateArgs = &Spec->getTemplateArgs();
+ return Spec->getSpecializedTemplate();
+ }
+
+ return nullptr;
+}
+
+void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
+ DeclarationName Name) {
+ // <unqualified-name> ::= <operator-name>
+ // ::= <ctor-dtor-name>
+ // ::= <source-name>
+ // ::= <template-name>
+
+ // Check if we have a template.
+ const TemplateArgumentList *TemplateArgs = nullptr;
+ if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
+ // Function templates aren't considered for name back referencing. This
+ // makes sense since function templates aren't likely to occur multiple
+ // times in a symbol.
+ if (isa<FunctionTemplateDecl>(TD)) {
+ mangleTemplateInstantiationName(TD, *TemplateArgs);
+ Out << '@';
+ return;
+ }
+
+ // Here comes the tricky thing: if we need to mangle something like
+ // void foo(A::X<Y>, B::X<Y>),
+ // the X<Y> part is aliased. However, if you need to mangle
+ // void foo(A::X<A::Y>, A::X<B::Y>),
+ // the A::X<> part is not aliased.
+ // That said, from the mangler's perspective we have a structure like this:
+ // namespace[s] -> type[ -> template-parameters]
+ // but from the Clang perspective we have
+ // type [ -> template-parameters]
+ // \-> namespace[s]
+ // What we do is we create a new mangler, mangle the same type (without
+ // a namespace suffix) to a string using the extra mangler and then use
+ // the mangled type name as a key to check the mangling of different types
+ // for aliasing.
+
+ llvm::SmallString<64> TemplateMangling;
+ llvm::raw_svector_ostream Stream(TemplateMangling);
+ MicrosoftCXXNameMangler Extra(Context, Stream);
+ Extra.mangleTemplateInstantiationName(TD, *TemplateArgs);
+
+ mangleSourceName(TemplateMangling);
+ return;
+ }
+
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier: {
+ if (const IdentifierInfo *II = Name.getAsIdentifierInfo()) {
+ mangleSourceName(II->getName());
+ break;
+ }
+
+ // Otherwise, an anonymous entity. We must have a declaration.
+ assert(ND && "mangling empty name without declaration");
+
+ if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
+ if (NS->isAnonymousNamespace()) {
+ Out << "?A@";
+ break;
+ }
+ }
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
+ // We must have an anonymous union or struct declaration.
+ const CXXRecordDecl *RD = VD->getType()->getAsCXXRecordDecl();
+ assert(RD && "expected variable decl to have a record type");
+ // Anonymous types with no tag or typedef get the name of their
+ // declarator mangled in. If they have no declarator, number them with
+ // a $S prefix.
+ llvm::SmallString<64> Name("$S");
+ // Get a unique id for the anonymous struct.
+ Name += llvm::utostr(Context.getAnonymousStructId(RD) + 1);
+ mangleSourceName(Name.str());
+ break;
+ }
+
+ // We must have an anonymous struct.
+ const TagDecl *TD = cast<TagDecl>(ND);
+ if (const TypedefNameDecl *D = TD->getTypedefNameForAnonDecl()) {
+ assert(TD->getDeclContext() == D->getDeclContext() &&
+ "Typedef should not be in another decl context!");
+ assert(D->getDeclName().getAsIdentifierInfo() &&
+ "Typedef was not named!");
+ mangleSourceName(D->getDeclName().getAsIdentifierInfo()->getName());
+ break;
+ }
+
+ if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(TD)) {
+ if (Record->isLambda()) {
+ llvm::SmallString<10> Name("<lambda_");
+ unsigned LambdaId;
+ if (Record->getLambdaManglingNumber())
+ LambdaId = Record->getLambdaManglingNumber();
+ else
+ LambdaId = Context.getLambdaId(Record);
+
+ Name += llvm::utostr(LambdaId);
+ Name += ">";
+
+ mangleSourceName(Name);
+ break;
+ }
+ }
+
+ llvm::SmallString<64> Name("<unnamed-type-");
+ if (DeclaratorDecl *DD =
+ Context.getASTContext().getDeclaratorForUnnamedTagDecl(TD)) {
+ // Anonymous types without a name for linkage purposes have their
+ // declarator mangled in if they have one.
+ Name += DD->getName();
+ } else if (TypedefNameDecl *TND =
+ Context.getASTContext().getTypedefNameForUnnamedTagDecl(
+ TD)) {
+ // Anonymous types without a name for linkage purposes have their
+ // associate typedef mangled in if they have one.
+ Name += TND->getName();
+ } else {
+ // Otherwise, number the types using a $S prefix.
+ Name += "$S";
+ Name += llvm::utostr(Context.getAnonymousStructId(TD) + 1);
+ }
+ Name += ">";
+ mangleSourceName(Name.str());
+ break;
+ }
+
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ llvm_unreachable("Can't mangle Objective-C selector names here!");
+
+ case DeclarationName::CXXConstructorName:
+ if (Structor == getStructor(ND)) {
+ if (StructorType == Ctor_CopyingClosure) {
+ Out << "?_O";
+ return;
+ }
+ if (StructorType == Ctor_DefaultClosure) {
+ Out << "?_F";
+ return;
+ }
+ }
+ Out << "?0";
+ return;
+
+ case DeclarationName::CXXDestructorName:
+ if (ND == Structor)
+ // If the named decl is the C++ destructor we're mangling,
+ // use the type we were given.
+ mangleCXXDtorType(static_cast<CXXDtorType>(StructorType));
+ else
+ // Otherwise, use the base destructor name. This is relevant if a
+ // class with a destructor is declared within a destructor.
+ mangleCXXDtorType(Dtor_Base);
+ break;
+
+ case DeclarationName::CXXConversionFunctionName:
+ // <operator-name> ::= ?B # (cast)
+ // The target type is encoded as the return type.
+ Out << "?B";
+ break;
+
+ case DeclarationName::CXXOperatorName:
+ mangleOperatorName(Name.getCXXOverloadedOperator(), ND->getLocation());
+ break;
+
+ case DeclarationName::CXXLiteralOperatorName: {
+ Out << "?__K";
+ mangleSourceName(Name.getCXXLiteralIdentifier()->getName());
+ break;
+ }
+
+ case DeclarationName::CXXUsingDirective:
+ llvm_unreachable("Can't mangle a using directive name!");
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleNestedName(const NamedDecl *ND) {
+ // <postfix> ::= <unqualified-name> [<postfix>]
+ // ::= <substitution> [<postfix>]
+ const DeclContext *DC = getEffectiveDeclContext(ND);
+
+ while (!DC->isTranslationUnit()) {
+ if (isa<TagDecl>(ND) || isa<VarDecl>(ND)) {
+ unsigned Disc;
+ if (Context.getNextDiscriminator(ND, Disc)) {
+ Out << '?';
+ mangleNumber(Disc);
+ Out << '?';
+ }
+ }
+
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC)) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID =
+ Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle a local inside this block yet");
+ Diags.Report(BD->getLocation(), DiagID);
+
+ // FIXME: This is completely, utterly, wrong; see ItaniumMangle
+ // for how this should be done.
+ Out << "__block_invoke" << Context.getBlockId(BD, false);
+ Out << '@';
+ continue;
+ } else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC)) {
+ mangleObjCMethodName(Method);
+ } else if (isa<NamedDecl>(DC)) {
+ ND = cast<NamedDecl>(DC);
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
+ mangle(FD, "?");
+ break;
+ } else
+ mangleUnqualifiedName(ND);
+ }
+ DC = DC->getParent();
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleCXXDtorType(CXXDtorType T) {
+ // Microsoft uses the names on the case labels for these dtor variants. Clang
+ // uses the Itanium terminology internally. Everything in this ABI delegates
+ // towards the base dtor.
+ switch (T) {
+ // <operator-name> ::= ?1 # destructor
+ case Dtor_Base: Out << "?1"; return;
+ // <operator-name> ::= ?_D # vbase destructor
+ case Dtor_Complete: Out << "?_D"; return;
+ // <operator-name> ::= ?_G # scalar deleting destructor
+ case Dtor_Deleting: Out << "?_G"; return;
+ // <operator-name> ::= ?_E # vector deleting destructor
+ // FIXME: Add a vector deleting dtor type. It goes in the vtable, so we need
+ // it.
+ case Dtor_Comdat:
+ llvm_unreachable("not expecting a COMDAT");
+ }
+ llvm_unreachable("Unsupported dtor type?");
+}
+
+void MicrosoftCXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO,
+ SourceLocation Loc) {
+ switch (OO) {
+ // ?0 # constructor
+ // ?1 # destructor
+ // <operator-name> ::= ?2 # new
+ case OO_New: Out << "?2"; break;
+ // <operator-name> ::= ?3 # delete
+ case OO_Delete: Out << "?3"; break;
+ // <operator-name> ::= ?4 # =
+ case OO_Equal: Out << "?4"; break;
+ // <operator-name> ::= ?5 # >>
+ case OO_GreaterGreater: Out << "?5"; break;
+ // <operator-name> ::= ?6 # <<
+ case OO_LessLess: Out << "?6"; break;
+ // <operator-name> ::= ?7 # !
+ case OO_Exclaim: Out << "?7"; break;
+ // <operator-name> ::= ?8 # ==
+ case OO_EqualEqual: Out << "?8"; break;
+ // <operator-name> ::= ?9 # !=
+ case OO_ExclaimEqual: Out << "?9"; break;
+ // <operator-name> ::= ?A # []
+ case OO_Subscript: Out << "?A"; break;
+ // ?B # conversion
+ // <operator-name> ::= ?C # ->
+ case OO_Arrow: Out << "?C"; break;
+ // <operator-name> ::= ?D # *
+ case OO_Star: Out << "?D"; break;
+ // <operator-name> ::= ?E # ++
+ case OO_PlusPlus: Out << "?E"; break;
+ // <operator-name> ::= ?F # --
+ case OO_MinusMinus: Out << "?F"; break;
+ // <operator-name> ::= ?G # -
+ case OO_Minus: Out << "?G"; break;
+ // <operator-name> ::= ?H # +
+ case OO_Plus: Out << "?H"; break;
+ // <operator-name> ::= ?I # &
+ case OO_Amp: Out << "?I"; break;
+ // <operator-name> ::= ?J # ->*
+ case OO_ArrowStar: Out << "?J"; break;
+ // <operator-name> ::= ?K # /
+ case OO_Slash: Out << "?K"; break;
+ // <operator-name> ::= ?L # %
+ case OO_Percent: Out << "?L"; break;
+ // <operator-name> ::= ?M # <
+ case OO_Less: Out << "?M"; break;
+ // <operator-name> ::= ?N # <=
+ case OO_LessEqual: Out << "?N"; break;
+ // <operator-name> ::= ?O # >
+ case OO_Greater: Out << "?O"; break;
+ // <operator-name> ::= ?P # >=
+ case OO_GreaterEqual: Out << "?P"; break;
+ // <operator-name> ::= ?Q # ,
+ case OO_Comma: Out << "?Q"; break;
+ // <operator-name> ::= ?R # ()
+ case OO_Call: Out << "?R"; break;
+ // <operator-name> ::= ?S # ~
+ case OO_Tilde: Out << "?S"; break;
+ // <operator-name> ::= ?T # ^
+ case OO_Caret: Out << "?T"; break;
+ // <operator-name> ::= ?U # |
+ case OO_Pipe: Out << "?U"; break;
+ // <operator-name> ::= ?V # &&
+ case OO_AmpAmp: Out << "?V"; break;
+ // <operator-name> ::= ?W # ||
+ case OO_PipePipe: Out << "?W"; break;
+ // <operator-name> ::= ?X # *=
+ case OO_StarEqual: Out << "?X"; break;
+ // <operator-name> ::= ?Y # +=
+ case OO_PlusEqual: Out << "?Y"; break;
+ // <operator-name> ::= ?Z # -=
+ case OO_MinusEqual: Out << "?Z"; break;
+ // <operator-name> ::= ?_0 # /=
+ case OO_SlashEqual: Out << "?_0"; break;
+ // <operator-name> ::= ?_1 # %=
+ case OO_PercentEqual: Out << "?_1"; break;
+ // <operator-name> ::= ?_2 # >>=
+ case OO_GreaterGreaterEqual: Out << "?_2"; break;
+ // <operator-name> ::= ?_3 # <<=
+ case OO_LessLessEqual: Out << "?_3"; break;
+ // <operator-name> ::= ?_4 # &=
+ case OO_AmpEqual: Out << "?_4"; break;
+ // <operator-name> ::= ?_5 # |=
+ case OO_PipeEqual: Out << "?_5"; break;
+ // <operator-name> ::= ?_6 # ^=
+ case OO_CaretEqual: Out << "?_6"; break;
+ // ?_7 # vftable
+ // ?_8 # vbtable
+ // ?_9 # vcall
+ // ?_A # typeof
+ // ?_B # local static guard
+ // ?_C # string
+ // ?_D # vbase destructor
+ // ?_E # vector deleting destructor
+ // ?_F # default constructor closure
+ // ?_G # scalar deleting destructor
+ // ?_H # vector constructor iterator
+ // ?_I # vector destructor iterator
+ // ?_J # vector vbase constructor iterator
+ // ?_K # virtual displacement map
+ // ?_L # eh vector constructor iterator
+ // ?_M # eh vector destructor iterator
+ // ?_N # eh vector vbase constructor iterator
+ // ?_O # copy constructor closure
+ // ?_P<name> # udt returning <name>
+ // ?_Q # <unknown>
+ // ?_R0 # RTTI Type Descriptor
+ // ?_R1 # RTTI Base Class Descriptor at (a,b,c,d)
+ // ?_R2 # RTTI Base Class Array
+ // ?_R3 # RTTI Class Hierarchy Descriptor
+ // ?_R4 # RTTI Complete Object Locator
+ // ?_S # local vftable
+ // ?_T # local vftable constructor closure
+ // <operator-name> ::= ?_U # new[]
+ case OO_Array_New: Out << "?_U"; break;
+ // <operator-name> ::= ?_V # delete[]
+ case OO_Array_Delete: Out << "?_V"; break;
+
+ case OO_Conditional: {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this conditional operator yet");
+ Diags.Report(Loc, DiagID);
+ break;
+ }
+
+ case OO_Coawait: {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this operator co_await yet");
+ Diags.Report(Loc, DiagID);
+ break;
+ }
+
+ case OO_None:
+ case NUM_OVERLOADED_OPERATORS:
+ llvm_unreachable("Not an overloaded operator");
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleSourceName(StringRef Name) {
+ // <source name> ::= <identifier> @
+ BackRefVec::iterator Found =
+ std::find(NameBackReferences.begin(), NameBackReferences.end(), Name);
+ if (Found == NameBackReferences.end()) {
+ if (NameBackReferences.size() < 10)
+ NameBackReferences.push_back(Name);
+ Out << Name << '@';
+ } else {
+ Out << (Found - NameBackReferences.begin());
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
+ Context.mangleObjCMethodName(MD, Out);
+}
+
+void MicrosoftCXXNameMangler::mangleTemplateInstantiationName(
+ const TemplateDecl *TD, const TemplateArgumentList &TemplateArgs) {
+ // <template-name> ::= <unscoped-template-name> <template-args>
+ // ::= <substitution>
+ // Always start with the unqualified name.
+
+ // Templates have their own context for back references.
+ ArgBackRefMap OuterArgsContext;
+ BackRefVec OuterTemplateContext;
+ PassObjectSizeArgsSet OuterPassObjectSizeArgs;
+ NameBackReferences.swap(OuterTemplateContext);
+ TypeBackReferences.swap(OuterArgsContext);
+ PassObjectSizeArgs.swap(OuterPassObjectSizeArgs);
+
+ mangleUnscopedTemplateName(TD);
+ mangleTemplateArgs(TD, TemplateArgs);
+
+ // Restore the previous back reference contexts.
+ NameBackReferences.swap(OuterTemplateContext);
+ TypeBackReferences.swap(OuterArgsContext);
+ PassObjectSizeArgs.swap(OuterPassObjectSizeArgs);
+}
+
+void
+MicrosoftCXXNameMangler::mangleUnscopedTemplateName(const TemplateDecl *TD) {
+ // <unscoped-template-name> ::= ?$ <unqualified-name>
+ Out << "?$";
+ mangleUnqualifiedName(TD);
+}
+
+void MicrosoftCXXNameMangler::mangleIntegerLiteral(const llvm::APSInt &Value,
+ bool IsBoolean) {
+ // <integer-literal> ::= $0 <number>
+ Out << "$0";
+ // Make sure booleans are encoded as 0/1.
+ if (IsBoolean && Value.getBoolValue())
+ mangleNumber(1);
+ else if (Value.isSigned())
+ mangleNumber(Value.getSExtValue());
+ else
+ mangleNumber(Value.getZExtValue());
+}
+
+void MicrosoftCXXNameMangler::mangleExpression(const Expr *E) {
+ // See if this is a constant expression.
+ llvm::APSInt Value;
+ if (E->isIntegerConstantExpr(Value, Context.getASTContext())) {
+ mangleIntegerLiteral(Value, E->getType()->isBooleanType());
+ return;
+ }
+
+ // Look through no-op casts like template parameter substitutions.
+ E = E->IgnoreParenNoopCasts(Context.getASTContext());
+
+ const CXXUuidofExpr *UE = nullptr;
+ if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
+ if (UO->getOpcode() == UO_AddrOf)
+ UE = dyn_cast<CXXUuidofExpr>(UO->getSubExpr());
+ } else
+ UE = dyn_cast<CXXUuidofExpr>(E);
+
+ if (UE) {
+ // If we had to peek through an address-of operator, treat this like we are
+ // dealing with a pointer type. Otherwise, treat it like a const reference.
+ //
+ // N.B. This matches up with the handling of TemplateArgument::Declaration
+ // in mangleTemplateArg
+ if (UE == E)
+ Out << "$E?";
+ else
+ Out << "$1?";
+
+ // This CXXUuidofExpr is mangled as-if it were actually a VarDecl from
+ // const __s_GUID _GUID_{lower case UUID with underscores}
+ StringRef Uuid = UE->getUuidAsStringRef(Context.getASTContext());
+ std::string Name = "_GUID_" + Uuid.lower();
+ std::replace(Name.begin(), Name.end(), '-', '_');
+
+ mangleSourceName(Name);
+ // Terminate the whole name with an '@'.
+ Out << '@';
+ // It's a global variable.
+ Out << '3';
+ // It's a struct called __s_GUID.
+ mangleArtificalTagType(TTK_Struct, "__s_GUID");
+ // It's const.
+ Out << 'B';
+ return;
+ }
+
+ // As bad as this diagnostic is, it's better than crashing.
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error, "cannot yet mangle expression type %0");
+ Diags.Report(E->getExprLoc(), DiagID) << E->getStmtClassName()
+ << E->getSourceRange();
+}
+
+void MicrosoftCXXNameMangler::mangleTemplateArgs(
+ const TemplateDecl *TD, const TemplateArgumentList &TemplateArgs) {
+ // <template-args> ::= <template-arg>+
+ const TemplateParameterList *TPL = TD->getTemplateParameters();
+ assert(TPL->size() == TemplateArgs.size() &&
+ "size mismatch between args and parms!");
+
+ unsigned Idx = 0;
+ for (const TemplateArgument &TA : TemplateArgs.asArray())
+ mangleTemplateArg(TD, TA, TPL->getParam(Idx++));
+}
+
+void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD,
+ const TemplateArgument &TA,
+ const NamedDecl *Parm) {
+ // <template-arg> ::= <type>
+ // ::= <integer-literal>
+ // ::= <member-data-pointer>
+ // ::= <member-function-pointer>
+ // ::= $E? <name> <type-encoding>
+ // ::= $1? <name> <type-encoding>
+ // ::= $0A@
+ // ::= <template-args>
+
+ switch (TA.getKind()) {
+ case TemplateArgument::Null:
+ llvm_unreachable("Can't mangle null template arguments!");
+ case TemplateArgument::TemplateExpansion:
+ llvm_unreachable("Can't mangle template expansion arguments!");
+ case TemplateArgument::Type: {
+ QualType T = TA.getAsType();
+ mangleType(T, SourceRange(), QMM_Escape);
+ break;
+ }
+ case TemplateArgument::Declaration: {
+ const NamedDecl *ND = cast<NamedDecl>(TA.getAsDecl());
+ if (isa<FieldDecl>(ND) || isa<IndirectFieldDecl>(ND)) {
+ mangleMemberDataPointer(
+ cast<CXXRecordDecl>(ND->getDeclContext())->getMostRecentDecl(),
+ cast<ValueDecl>(ND));
+ } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
+ const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
+ if (MD && MD->isInstance()) {
+ mangleMemberFunctionPointer(MD->getParent()->getMostRecentDecl(), MD);
+ } else {
+ Out << "$1?";
+ mangleName(FD);
+ mangleFunctionEncoding(FD, /*ShouldMangle=*/true);
+ }
+ } else {
+ mangle(ND, TA.getParamTypeForDecl()->isReferenceType() ? "$E?" : "$1?");
+ }
+ break;
+ }
+ case TemplateArgument::Integral:
+ mangleIntegerLiteral(TA.getAsIntegral(),
+ TA.getIntegralType()->isBooleanType());
+ break;
+ case TemplateArgument::NullPtr: {
+ QualType T = TA.getNullPtrType();
+ if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) {
+ const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
+ if (MPT->isMemberFunctionPointerType() &&
+ !isa<FunctionTemplateDecl>(TD)) {
+ mangleMemberFunctionPointer(RD, nullptr);
+ return;
+ }
+ if (MPT->isMemberDataPointer()) {
+ if (!isa<FunctionTemplateDecl>(TD)) {
+ mangleMemberDataPointer(RD, nullptr);
+ return;
+ }
+ // nullptr data pointers are always represented with a single field
+ // which is initialized with either 0 or -1. Why -1? Well, we need to
+ // distinguish the case where the data member is at offset zero in the
+ // record.
+ // However, we are free to use 0 *if* we would use multiple fields for
+ // non-nullptr member pointers.
+ if (!RD->nullFieldOffsetIsZero()) {
+ mangleIntegerLiteral(llvm::APSInt::get(-1), /*IsBoolean=*/false);
+ return;
+ }
+ }
+ }
+ mangleIntegerLiteral(llvm::APSInt::getUnsigned(0), /*IsBoolean=*/false);
+ break;
+ }
+ case TemplateArgument::Expression:
+ mangleExpression(TA.getAsExpr());
+ break;
+ case TemplateArgument::Pack: {
+ ArrayRef<TemplateArgument> TemplateArgs = TA.getPackAsArray();
+ if (TemplateArgs.empty()) {
+ if (isa<TemplateTypeParmDecl>(Parm) ||
+ isa<TemplateTemplateParmDecl>(Parm))
+ // MSVC 2015 changed the mangling for empty expanded template packs,
+ // use the old mangling for link compatibility for old versions.
+ Out << (Context.getASTContext().getLangOpts().isCompatibleWithMSVC(
+ LangOptions::MSVC2015)
+ ? "$$V"
+ : "$$$V");
+ else if (isa<NonTypeTemplateParmDecl>(Parm))
+ Out << "$S";
+ else
+ llvm_unreachable("unexpected template parameter decl!");
+ } else {
+ for (const TemplateArgument &PA : TemplateArgs)
+ mangleTemplateArg(TD, PA, Parm);
+ }
+ break;
+ }
+ case TemplateArgument::Template: {
+ const NamedDecl *ND =
+ TA.getAsTemplate().getAsTemplateDecl()->getTemplatedDecl();
+ if (const auto *TD = dyn_cast<TagDecl>(ND)) {
+ mangleType(TD);
+ } else if (isa<TypeAliasDecl>(ND)) {
+ Out << "$$Y";
+ mangleName(ND);
+ } else {
+ llvm_unreachable("unexpected template template NamedDecl!");
+ }
+ break;
+ }
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleQualifiers(Qualifiers Quals,
+ bool IsMember) {
+ // <cvr-qualifiers> ::= [E] [F] [I] <base-cvr-qualifiers>
+ // 'E' means __ptr64 (32-bit only); 'F' means __unaligned (32/64-bit only);
+ // 'I' means __restrict (32/64-bit).
+ // Note that the MSVC __restrict keyword isn't the same as the C99 restrict
+ // keyword!
+ // <base-cvr-qualifiers> ::= A # near
+ // ::= B # near const
+ // ::= C # near volatile
+ // ::= D # near const volatile
+ // ::= E # far (16-bit)
+ // ::= F # far const (16-bit)
+ // ::= G # far volatile (16-bit)
+ // ::= H # far const volatile (16-bit)
+ // ::= I # huge (16-bit)
+ // ::= J # huge const (16-bit)
+ // ::= K # huge volatile (16-bit)
+ // ::= L # huge const volatile (16-bit)
+ // ::= M <basis> # based
+ // ::= N <basis> # based const
+ // ::= O <basis> # based volatile
+ // ::= P <basis> # based const volatile
+ // ::= Q # near member
+ // ::= R # near const member
+ // ::= S # near volatile member
+ // ::= T # near const volatile member
+ // ::= U # far member (16-bit)
+ // ::= V # far const member (16-bit)
+ // ::= W # far volatile member (16-bit)
+ // ::= X # far const volatile member (16-bit)
+ // ::= Y # huge member (16-bit)
+ // ::= Z # huge const member (16-bit)
+ // ::= 0 # huge volatile member (16-bit)
+ // ::= 1 # huge const volatile member (16-bit)
+ // ::= 2 <basis> # based member
+ // ::= 3 <basis> # based const member
+ // ::= 4 <basis> # based volatile member
+ // ::= 5 <basis> # based const volatile member
+ // ::= 6 # near function (pointers only)
+ // ::= 7 # far function (pointers only)
+ // ::= 8 # near method (pointers only)
+ // ::= 9 # far method (pointers only)
+ // ::= _A <basis> # based function (pointers only)
+ // ::= _B <basis> # based function (far?) (pointers only)
+ // ::= _C <basis> # based method (pointers only)
+ // ::= _D <basis> # based method (far?) (pointers only)
+ // ::= _E # block (Clang)
+ // <basis> ::= 0 # __based(void)
+ // ::= 1 # __based(segment)?
+ // ::= 2 <name> # __based(name)
+ // ::= 3 # ?
+ // ::= 4 # ?
+ // ::= 5 # not really based
+ bool HasConst = Quals.hasConst(),
+ HasVolatile = Quals.hasVolatile();
+
+ if (!IsMember) {
+ if (HasConst && HasVolatile) {
+ Out << 'D';
+ } else if (HasVolatile) {
+ Out << 'C';
+ } else if (HasConst) {
+ Out << 'B';
+ } else {
+ Out << 'A';
+ }
+ } else {
+ if (HasConst && HasVolatile) {
+ Out << 'T';
+ } else if (HasVolatile) {
+ Out << 'S';
+ } else if (HasConst) {
+ Out << 'R';
+ } else {
+ Out << 'Q';
+ }
+ }
+
+ // FIXME: For now, just drop all extension qualifiers on the floor.
+}
+
+void
+MicrosoftCXXNameMangler::mangleRefQualifier(RefQualifierKind RefQualifier) {
+ // <ref-qualifier> ::= G # lvalue reference
+ // ::= H # rvalue-reference
+ switch (RefQualifier) {
+ case RQ_None:
+ break;
+
+ case RQ_LValue:
+ Out << 'G';
+ break;
+
+ case RQ_RValue:
+ Out << 'H';
+ break;
+ }
+}
+
+void MicrosoftCXXNameMangler::manglePointerExtQualifiers(Qualifiers Quals,
+ QualType PointeeType) {
+ bool HasRestrict = Quals.hasRestrict();
+ if (PointersAre64Bit &&
+ (PointeeType.isNull() || !PointeeType->isFunctionType()))
+ Out << 'E';
+
+ if (HasRestrict)
+ Out << 'I';
+}
+
+void MicrosoftCXXNameMangler::manglePointerCVQualifiers(Qualifiers Quals) {
+ // <pointer-cv-qualifiers> ::= P # no qualifiers
+ // ::= Q # const
+ // ::= R # volatile
+ // ::= S # const volatile
+ bool HasConst = Quals.hasConst(),
+ HasVolatile = Quals.hasVolatile();
+
+ if (HasConst && HasVolatile) {
+ Out << 'S';
+ } else if (HasVolatile) {
+ Out << 'R';
+ } else if (HasConst) {
+ Out << 'Q';
+ } else {
+ Out << 'P';
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleArgumentType(QualType T,
+ SourceRange Range) {
+ // MSVC will backreference two canonically equivalent types that have slightly
+ // different manglings when mangled alone.
+
+ // Decayed types do not match up with non-decayed versions of the same type.
+ //
+ // e.g.
+ // void (*x)(void) will not form a backreference with void x(void)
+ void *TypePtr;
+ if (const auto *DT = T->getAs<DecayedType>()) {
+ QualType OriginalType = DT->getOriginalType();
+ // All decayed ArrayTypes should be treated identically; as-if they were
+ // a decayed IncompleteArrayType.
+ if (const auto *AT = getASTContext().getAsArrayType(OriginalType))
+ OriginalType = getASTContext().getIncompleteArrayType(
+ AT->getElementType(), AT->getSizeModifier(),
+ AT->getIndexTypeCVRQualifiers());
+
+ TypePtr = OriginalType.getCanonicalType().getAsOpaquePtr();
+ // If the original parameter was textually written as an array,
+ // instead treat the decayed parameter like it's const.
+ //
+ // e.g.
+ // int [] -> int * const
+ if (OriginalType->isArrayType())
+ T = T.withConst();
+ } else {
+ TypePtr = T.getCanonicalType().getAsOpaquePtr();
+ }
+
+ ArgBackRefMap::iterator Found = TypeBackReferences.find(TypePtr);
+
+ if (Found == TypeBackReferences.end()) {
+ size_t OutSizeBefore = Out.tell();
+
+ mangleType(T, Range, QMM_Drop);
+
+ // See if it's worth creating a back reference.
+ // Only types longer than 1 character are considered
+ // and only 10 back references slots are available:
+ bool LongerThanOneChar = (Out.tell() - OutSizeBefore > 1);
+ if (LongerThanOneChar && TypeBackReferences.size() < 10) {
+ size_t Size = TypeBackReferences.size();
+ TypeBackReferences[TypePtr] = Size;
+ }
+ } else {
+ Out << Found->second;
+ }
+}
+
+void MicrosoftCXXNameMangler::manglePassObjectSizeArg(
+ const PassObjectSizeAttr *POSA) {
+ int Type = POSA->getType();
+
+ auto Iter = PassObjectSizeArgs.insert(Type).first;
+ auto *TypePtr = (const void *)&*Iter;
+ ArgBackRefMap::iterator Found = TypeBackReferences.find(TypePtr);
+
+ if (Found == TypeBackReferences.end()) {
+ mangleArtificalTagType(TTK_Enum, "__pass_object_size" + llvm::utostr(Type),
+ {"__clang"});
+
+ if (TypeBackReferences.size() < 10) {
+ size_t Size = TypeBackReferences.size();
+ TypeBackReferences[TypePtr] = Size;
+ }
+ } else {
+ Out << Found->second;
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleType(QualType T, SourceRange Range,
+ QualifierMangleMode QMM) {
+ // Don't use the canonical types. MSVC includes things like 'const' on
+ // pointer arguments to function pointers that canonicalization strips away.
+ T = T.getDesugaredType(getASTContext());
+ Qualifiers Quals = T.getLocalQualifiers();
+ if (const ArrayType *AT = getASTContext().getAsArrayType(T)) {
+ // If there were any Quals, getAsArrayType() pushed them onto the array
+ // element type.
+ if (QMM == QMM_Mangle)
+ Out << 'A';
+ else if (QMM == QMM_Escape || QMM == QMM_Result)
+ Out << "$$B";
+ mangleArrayType(AT);
+ return;
+ }
+
+ bool IsPointer = T->isAnyPointerType() || T->isMemberPointerType() ||
+ T->isReferenceType() || T->isBlockPointerType();
+
+ switch (QMM) {
+ case QMM_Drop:
+ break;
+ case QMM_Mangle:
+ if (const FunctionType *FT = dyn_cast<FunctionType>(T)) {
+ Out << '6';
+ mangleFunctionType(FT);
+ return;
+ }
+ mangleQualifiers(Quals, false);
+ break;
+ case QMM_Escape:
+ if (!IsPointer && Quals) {
+ Out << "$$C";
+ mangleQualifiers(Quals, false);
+ }
+ break;
+ case QMM_Result:
+ if ((!IsPointer && Quals) || isa<TagType>(T)) {
+ Out << '?';
+ mangleQualifiers(Quals, false);
+ }
+ break;
+ }
+
+ const Type *ty = T.getTypePtr();
+
+ switch (ty->getTypeClass()) {
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define NON_CANONICAL_TYPE(CLASS, PARENT) \
+ case Type::CLASS: \
+ llvm_unreachable("can't mangle non-canonical type " #CLASS "Type"); \
+ return;
+#define TYPE(CLASS, PARENT) \
+ case Type::CLASS: \
+ mangleType(cast<CLASS##Type>(ty), Quals, Range); \
+ break;
+#include "clang/AST/TypeNodes.def"
+#undef ABSTRACT_TYPE
+#undef NON_CANONICAL_TYPE
+#undef TYPE
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers,
+ SourceRange Range) {
+ // <type> ::= <builtin-type>
+ // <builtin-type> ::= X # void
+ // ::= C # signed char
+ // ::= D # char
+ // ::= E # unsigned char
+ // ::= F # short
+ // ::= G # unsigned short (or wchar_t if it's not a builtin)
+ // ::= H # int
+ // ::= I # unsigned int
+ // ::= J # long
+ // ::= K # unsigned long
+ // L # <none>
+ // ::= M # float
+ // ::= N # double
+ // ::= O # long double (__float80 is mangled differently)
+ // ::= _J # long long, __int64
+ // ::= _K # unsigned long long, __int64
+ // ::= _L # __int128
+ // ::= _M # unsigned __int128
+ // ::= _N # bool
+ // _O # <array in parameter>
+ // ::= _T # __float80 (Intel)
+ // ::= _W # wchar_t
+ // ::= _Z # __float80 (Digital Mars)
+ switch (T->getKind()) {
+ case BuiltinType::Void:
+ Out << 'X';
+ break;
+ case BuiltinType::SChar:
+ Out << 'C';
+ break;
+ case BuiltinType::Char_U:
+ case BuiltinType::Char_S:
+ Out << 'D';
+ break;
+ case BuiltinType::UChar:
+ Out << 'E';
+ break;
+ case BuiltinType::Short:
+ Out << 'F';
+ break;
+ case BuiltinType::UShort:
+ Out << 'G';
+ break;
+ case BuiltinType::Int:
+ Out << 'H';
+ break;
+ case BuiltinType::UInt:
+ Out << 'I';
+ break;
+ case BuiltinType::Long:
+ Out << 'J';
+ break;
+ case BuiltinType::ULong:
+ Out << 'K';
+ break;
+ case BuiltinType::Float:
+ Out << 'M';
+ break;
+ case BuiltinType::Double:
+ Out << 'N';
+ break;
+ // TODO: Determine size and mangle accordingly
+ case BuiltinType::LongDouble:
+ Out << 'O';
+ break;
+ case BuiltinType::LongLong:
+ Out << "_J";
+ break;
+ case BuiltinType::ULongLong:
+ Out << "_K";
+ break;
+ case BuiltinType::Int128:
+ Out << "_L";
+ break;
+ case BuiltinType::UInt128:
+ Out << "_M";
+ break;
+ case BuiltinType::Bool:
+ Out << "_N";
+ break;
+ case BuiltinType::Char16:
+ Out << "_S";
+ break;
+ case BuiltinType::Char32:
+ Out << "_U";
+ break;
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ Out << "_W";
+ break;
+
+#define BUILTIN_TYPE(Id, SingletonId)
+#define PLACEHOLDER_TYPE(Id, SingletonId) \
+ case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
+ case BuiltinType::Dependent:
+ llvm_unreachable("placeholder types shouldn't get to name mangling");
+
+ case BuiltinType::ObjCId:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "objc_object");
+ break;
+ case BuiltinType::ObjCClass:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "objc_class");
+ break;
+ case BuiltinType::ObjCSel:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "objc_selector");
+ break;
+
+ case BuiltinType::OCLImage1d:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_image1d");
+ break;
+ case BuiltinType::OCLImage1dArray:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_image1darray");
+ break;
+ case BuiltinType::OCLImage1dBuffer:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_image1dbuffer");
+ break;
+ case BuiltinType::OCLImage2d:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_image2d");
+ break;
+ case BuiltinType::OCLImage2dArray:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_image2darray");
+ break;
+ case BuiltinType::OCLImage2dDepth:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_image2ddepth");
+ break;
+ case BuiltinType::OCLImage2dArrayDepth:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_image2darraydepth");
+ break;
+ case BuiltinType::OCLImage2dMSAA:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_image2dmsaa");
+ break;
+ case BuiltinType::OCLImage2dArrayMSAA:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_image2darraymsaa");
+ break;
+ case BuiltinType::OCLImage2dMSAADepth:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_image2dmsaadepth");
+ break;
+ case BuiltinType::OCLImage2dArrayMSAADepth:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_image2darraymsaadepth");
+ break;
+ case BuiltinType::OCLImage3d:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_image3d");
+ break;
+ case BuiltinType::OCLSampler:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_sampler");
+ break;
+ case BuiltinType::OCLEvent:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_event");
+ break;
+ case BuiltinType::OCLClkEvent:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_clkevent");
+ break;
+ case BuiltinType::OCLQueue:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_queue");
+ break;
+ case BuiltinType::OCLNDRange:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_ndrange");
+ break;
+ case BuiltinType::OCLReserveID:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_reserveid");
+ break;
+
+ case BuiltinType::NullPtr:
+ Out << "$$T";
+ break;
+
+ case BuiltinType::Half: {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error, "cannot mangle this built-in %0 type yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << T->getName(Context.getASTContext().getPrintingPolicy()) << Range;
+ break;
+ }
+ }
+}
+
+// <type> ::= <function-type>
+void MicrosoftCXXNameMangler::mangleType(const FunctionProtoType *T, Qualifiers,
+ SourceRange) {
+ // Structors only appear in decls, so at this point we know it's not a
+ // structor type.
+ // FIXME: This may not be lambda-friendly.
+ if (T->getTypeQuals() || T->getRefQualifier() != RQ_None) {
+ Out << "$$A8@@";
+ mangleFunctionType(T, /*D=*/nullptr, /*ForceThisQuals=*/true);
+ } else {
+ Out << "$$A6";
+ mangleFunctionType(T);
+ }
+}
+void MicrosoftCXXNameMangler::mangleType(const FunctionNoProtoType *T,
+ Qualifiers, SourceRange) {
+ Out << "$$A6";
+ mangleFunctionType(T);
+}
+
+void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T,
+ const FunctionDecl *D,
+ bool ForceThisQuals) {
+ // <function-type> ::= <this-cvr-qualifiers> <calling-convention>
+ // <return-type> <argument-list> <throw-spec>
+ const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(T);
+
+ SourceRange Range;
+ if (D) Range = D->getSourceRange();
+
+ bool IsStructor = false, HasThisQuals = ForceThisQuals, IsCtorClosure = false;
+ CallingConv CC = T->getCallConv();
+ if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(D)) {
+ if (MD->isInstance())
+ HasThisQuals = true;
+ if (isa<CXXDestructorDecl>(MD)) {
+ IsStructor = true;
+ } else if (isa<CXXConstructorDecl>(MD)) {
+ IsStructor = true;
+ IsCtorClosure = (StructorType == Ctor_CopyingClosure ||
+ StructorType == Ctor_DefaultClosure) &&
+ getStructor(MD) == Structor;
+ if (IsCtorClosure)
+ CC = getASTContext().getDefaultCallingConvention(
+ /*IsVariadic=*/false, /*IsCXXMethod=*/true);
+ }
+ }
+
+ // If this is a C++ instance method, mangle the CVR qualifiers for the
+ // this pointer.
+ if (HasThisQuals) {
+ Qualifiers Quals = Qualifiers::fromCVRMask(Proto->getTypeQuals());
+ manglePointerExtQualifiers(Quals, /*PointeeType=*/QualType());
+ mangleRefQualifier(Proto->getRefQualifier());
+ mangleQualifiers(Quals, /*IsMember=*/false);
+ }
+
+ mangleCallingConvention(CC);
+
+ // <return-type> ::= <type>
+ // ::= @ # structors (they have no declared return type)
+ if (IsStructor) {
+ if (isa<CXXDestructorDecl>(D) && D == Structor &&
+ StructorType == Dtor_Deleting) {
+ // The scalar deleting destructor takes an extra int argument.
+ // However, the FunctionType generated has 0 arguments.
+ // FIXME: This is a temporary hack.
+ // Maybe should fix the FunctionType creation instead?
+ Out << (PointersAre64Bit ? "PEAXI@Z" : "PAXI@Z");
+ return;
+ }
+ if (IsCtorClosure) {
+ // Default constructor closure and copy constructor closure both return
+ // void.
+ Out << 'X';
+
+ if (StructorType == Ctor_DefaultClosure) {
+ // Default constructor closure always has no arguments.
+ Out << 'X';
+ } else if (StructorType == Ctor_CopyingClosure) {
+ // Copy constructor closure always takes an unqualified reference.
+ mangleArgumentType(getASTContext().getLValueReferenceType(
+ Proto->getParamType(0)
+ ->getAs<LValueReferenceType>()
+ ->getPointeeType(),
+ /*SpelledAsLValue=*/true),
+ Range);
+ Out << '@';
+ } else {
+ llvm_unreachable("unexpected constructor closure!");
+ }
+ Out << 'Z';
+ return;
+ }
+ Out << '@';
+ } else {
+ QualType ResultType = T->getReturnType();
+ if (const auto *AT =
+ dyn_cast_or_null<AutoType>(ResultType->getContainedAutoType())) {
+ Out << '?';
+ mangleQualifiers(ResultType.getLocalQualifiers(), /*IsMember=*/false);
+ Out << '?';
+ assert(AT->getKeyword() != AutoTypeKeyword::GNUAutoType &&
+ "shouldn't need to mangle __auto_type!");
+ mangleSourceName(AT->isDecltypeAuto() ? "<decltype-auto>" : "<auto>");
+ Out << '@';
+ } else {
+ if (ResultType->isVoidType())
+ ResultType = ResultType.getUnqualifiedType();
+ mangleType(ResultType, Range, QMM_Result);
+ }
+ }
+
+ // <argument-list> ::= X # void
+ // ::= <type>+ @
+ // ::= <type>* Z # varargs
+ if (!Proto) {
+ // Function types without prototypes can arise when mangling a function type
+ // within an overloadable function in C. We mangle these as the absence of
+ // any parameter types (not even an empty parameter list).
+ Out << '@';
+ } else if (Proto->getNumParams() == 0 && !Proto->isVariadic()) {
+ Out << 'X';
+ } else {
+ // Happens for function pointer type arguments for example.
+ for (unsigned I = 0, E = Proto->getNumParams(); I != E; ++I) {
+ mangleArgumentType(Proto->getParamType(I), Range);
+ // Mangle each pass_object_size parameter as if it's a paramater of enum
+ // type passed directly after the parameter with the pass_object_size
+ // attribute. The aforementioned enum's name is __pass_object_size, and we
+ // pretend it resides in a top-level namespace called __clang.
+ //
+ // FIXME: Is there a defined extension notation for the MS ABI, or is it
+ // necessary to just cross our fingers and hope this type+namespace
+ // combination doesn't conflict with anything?
+ if (D)
+ if (const auto *P = D->getParamDecl(I)->getAttr<PassObjectSizeAttr>())
+ manglePassObjectSizeArg(P);
+ }
+ // <builtin-type> ::= Z # ellipsis
+ if (Proto->isVariadic())
+ Out << 'Z';
+ else
+ Out << '@';
+ }
+
+ mangleThrowSpecification(Proto);
+}
+
+void MicrosoftCXXNameMangler::mangleFunctionClass(const FunctionDecl *FD) {
+ // <function-class> ::= <member-function> E? # E designates a 64-bit 'this'
+ // # pointer. in 64-bit mode *all*
+ // # 'this' pointers are 64-bit.
+ // ::= <global-function>
+ // <member-function> ::= A # private: near
+ // ::= B # private: far
+ // ::= C # private: static near
+ // ::= D # private: static far
+ // ::= E # private: virtual near
+ // ::= F # private: virtual far
+ // ::= I # protected: near
+ // ::= J # protected: far
+ // ::= K # protected: static near
+ // ::= L # protected: static far
+ // ::= M # protected: virtual near
+ // ::= N # protected: virtual far
+ // ::= Q # public: near
+ // ::= R # public: far
+ // ::= S # public: static near
+ // ::= T # public: static far
+ // ::= U # public: virtual near
+ // ::= V # public: virtual far
+ // <global-function> ::= Y # global near
+ // ::= Z # global far
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ switch (MD->getAccess()) {
+ case AS_none:
+ llvm_unreachable("Unsupported access specifier");
+ case AS_private:
+ if (MD->isStatic())
+ Out << 'C';
+ else if (MD->isVirtual())
+ Out << 'E';
+ else
+ Out << 'A';
+ break;
+ case AS_protected:
+ if (MD->isStatic())
+ Out << 'K';
+ else if (MD->isVirtual())
+ Out << 'M';
+ else
+ Out << 'I';
+ break;
+ case AS_public:
+ if (MD->isStatic())
+ Out << 'S';
+ else if (MD->isVirtual())
+ Out << 'U';
+ else
+ Out << 'Q';
+ }
+ } else {
+ Out << 'Y';
+ }
+}
+void MicrosoftCXXNameMangler::mangleCallingConvention(CallingConv CC) {
+ // <calling-convention> ::= A # __cdecl
+ // ::= B # __export __cdecl
+ // ::= C # __pascal
+ // ::= D # __export __pascal
+ // ::= E # __thiscall
+ // ::= F # __export __thiscall
+ // ::= G # __stdcall
+ // ::= H # __export __stdcall
+ // ::= I # __fastcall
+ // ::= J # __export __fastcall
+ // ::= Q # __vectorcall
+ // The 'export' calling conventions are from a bygone era
+ // (*cough*Win16*cough*) when functions were declared for export with
+ // that keyword. (It didn't actually export them, it just made them so
+ // that they could be in a DLL and somebody from another module could call
+ // them.)
+
+ switch (CC) {
+ default:
+ llvm_unreachable("Unsupported CC for mangling");
+ case CC_X86_64Win64:
+ case CC_X86_64SysV:
+ case CC_C: Out << 'A'; break;
+ case CC_X86Pascal: Out << 'C'; break;
+ case CC_X86ThisCall: Out << 'E'; break;
+ case CC_X86StdCall: Out << 'G'; break;
+ case CC_X86FastCall: Out << 'I'; break;
+ case CC_X86VectorCall: Out << 'Q'; break;
+ }
+}
+void MicrosoftCXXNameMangler::mangleCallingConvention(const FunctionType *T) {
+ mangleCallingConvention(T->getCallConv());
+}
+void MicrosoftCXXNameMangler::mangleThrowSpecification(
+ const FunctionProtoType *FT) {
+ // <throw-spec> ::= Z # throw(...) (default)
+ // ::= @ # throw() or __declspec/__attribute__((nothrow))
+ // ::= <type>+
+ // NOTE: Since the Microsoft compiler ignores throw specifications, they are
+ // all actually mangled as 'Z'. (They're ignored because their associated
+ // functionality isn't implemented, and probably never will be.)
+ Out << 'Z';
+}
+
+void MicrosoftCXXNameMangler::mangleType(const UnresolvedUsingType *T,
+ Qualifiers, SourceRange Range) {
+ // Probably should be mangled as a template instantiation; need to see what
+ // VC does first.
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this unresolved dependent type yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
+}
+
+// <type> ::= <union-type> | <struct-type> | <class-type> | <enum-type>
+// <union-type> ::= T <name>
+// <struct-type> ::= U <name>
+// <class-type> ::= V <name>
+// <enum-type> ::= W4 <name>
+void MicrosoftCXXNameMangler::mangleTagTypeKind(TagTypeKind TTK) {
+ switch (TTK) {
+ case TTK_Union:
+ Out << 'T';
+ break;
+ case TTK_Struct:
+ case TTK_Interface:
+ Out << 'U';
+ break;
+ case TTK_Class:
+ Out << 'V';
+ break;
+ case TTK_Enum:
+ Out << "W4";
+ break;
+ }
+}
+void MicrosoftCXXNameMangler::mangleType(const EnumType *T, Qualifiers,
+ SourceRange) {
+ mangleType(cast<TagType>(T)->getDecl());
+}
+void MicrosoftCXXNameMangler::mangleType(const RecordType *T, Qualifiers,
+ SourceRange) {
+ mangleType(cast<TagType>(T)->getDecl());
+}
+void MicrosoftCXXNameMangler::mangleType(const TagDecl *TD) {
+ mangleTagTypeKind(TD->getTagKind());
+ mangleName(TD);
+}
+void MicrosoftCXXNameMangler::mangleArtificalTagType(
+ TagTypeKind TK, StringRef UnqualifiedName, ArrayRef<StringRef> NestedNames) {
+ // <name> ::= <unscoped-name> {[<named-scope>]+ | [<nested-name>]}? @
+ mangleTagTypeKind(TK);
+
+ // Always start with the unqualified name.
+ mangleSourceName(UnqualifiedName);
+
+ for (auto I = NestedNames.rbegin(), E = NestedNames.rend(); I != E; ++I)
+ mangleSourceName(*I);
+
+ // Terminate the whole name with an '@'.
+ Out << '@';
+}
+
+// <type> ::= <array-type>
+// <array-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers>
+// [Y <dimension-count> <dimension>+]
+// <element-type> # as global, E is never required
+// It's supposed to be the other way around, but for some strange reason, it
+// isn't. Today this behavior is retained for the sole purpose of backwards
+// compatibility.
+void MicrosoftCXXNameMangler::mangleDecayedArrayType(const ArrayType *T) {
+ // This isn't a recursive mangling, so now we have to do it all in this
+ // one call.
+ manglePointerCVQualifiers(T->getElementType().getQualifiers());
+ mangleType(T->getElementType(), SourceRange());
+}
+void MicrosoftCXXNameMangler::mangleType(const ConstantArrayType *T, Qualifiers,
+ SourceRange) {
+ llvm_unreachable("Should have been special cased");
+}
+void MicrosoftCXXNameMangler::mangleType(const VariableArrayType *T, Qualifiers,
+ SourceRange) {
+ llvm_unreachable("Should have been special cased");
+}
+void MicrosoftCXXNameMangler::mangleType(const DependentSizedArrayType *T,
+ Qualifiers, SourceRange) {
+ llvm_unreachable("Should have been special cased");
+}
+void MicrosoftCXXNameMangler::mangleType(const IncompleteArrayType *T,
+ Qualifiers, SourceRange) {
+ llvm_unreachable("Should have been special cased");
+}
+void MicrosoftCXXNameMangler::mangleArrayType(const ArrayType *T) {
+ QualType ElementTy(T, 0);
+ SmallVector<llvm::APInt, 3> Dimensions;
+ for (;;) {
+ if (ElementTy->isConstantArrayType()) {
+ const ConstantArrayType *CAT =
+ getASTContext().getAsConstantArrayType(ElementTy);
+ Dimensions.push_back(CAT->getSize());
+ ElementTy = CAT->getElementType();
+ } else if (ElementTy->isIncompleteArrayType()) {
+ const IncompleteArrayType *IAT =
+ getASTContext().getAsIncompleteArrayType(ElementTy);
+ Dimensions.push_back(llvm::APInt(32, 0));
+ ElementTy = IAT->getElementType();
+ } else if (ElementTy->isVariableArrayType()) {
+ const VariableArrayType *VAT =
+ getASTContext().getAsVariableArrayType(ElementTy);
+ Dimensions.push_back(llvm::APInt(32, 0));
+ ElementTy = VAT->getElementType();
+ } else if (ElementTy->isDependentSizedArrayType()) {
+ // The dependent expression has to be folded into a constant (TODO).
+ const DependentSizedArrayType *DSAT =
+ getASTContext().getAsDependentSizedArrayType(ElementTy);
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this dependent-length array yet");
+ Diags.Report(DSAT->getSizeExpr()->getExprLoc(), DiagID)
+ << DSAT->getBracketsRange();
+ return;
+ } else {
+ break;
+ }
+ }
+ Out << 'Y';
+ // <dimension-count> ::= <number> # number of extra dimensions
+ mangleNumber(Dimensions.size());
+ for (const llvm::APInt &Dimension : Dimensions)
+ mangleNumber(Dimension.getLimitedValue());
+ mangleType(ElementTy, SourceRange(), QMM_Escape);
+}
+
+// <type> ::= <pointer-to-member-type>
+// <pointer-to-member-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers>
+// <class name> <type>
+void MicrosoftCXXNameMangler::mangleType(const MemberPointerType *T, Qualifiers Quals,
+ SourceRange Range) {
+ QualType PointeeType = T->getPointeeType();
+ manglePointerCVQualifiers(Quals);
+ manglePointerExtQualifiers(Quals, PointeeType);
+ if (const FunctionProtoType *FPT = PointeeType->getAs<FunctionProtoType>()) {
+ Out << '8';
+ mangleName(T->getClass()->castAs<RecordType>()->getDecl());
+ mangleFunctionType(FPT, nullptr, true);
+ } else {
+ mangleQualifiers(PointeeType.getQualifiers(), true);
+ mangleName(T->getClass()->castAs<RecordType>()->getDecl());
+ mangleType(PointeeType, Range, QMM_Drop);
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TemplateTypeParmType *T,
+ Qualifiers, SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this template type parameter type yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
+}
+
+void MicrosoftCXXNameMangler::mangleType(const SubstTemplateTypeParmPackType *T,
+ Qualifiers, SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this substituted parameter pack yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
+}
+
+// <type> ::= <pointer-type>
+// <pointer-type> ::= E? <pointer-cvr-qualifiers> <cvr-qualifiers> <type>
+// # the E is required for 64-bit non-static pointers
+void MicrosoftCXXNameMangler::mangleType(const PointerType *T, Qualifiers Quals,
+ SourceRange Range) {
+ QualType PointeeType = T->getPointeeType();
+ manglePointerCVQualifiers(Quals);
+ manglePointerExtQualifiers(Quals, PointeeType);
+ mangleType(PointeeType, Range);
+}
+void MicrosoftCXXNameMangler::mangleType(const ObjCObjectPointerType *T,
+ Qualifiers Quals, SourceRange Range) {
+ QualType PointeeType = T->getPointeeType();
+ manglePointerCVQualifiers(Quals);
+ manglePointerExtQualifiers(Quals, PointeeType);
+ // Object pointers never have qualifiers.
+ Out << 'A';
+ mangleType(PointeeType, Range);
+}
+
+// <type> ::= <reference-type>
+// <reference-type> ::= A E? <cvr-qualifiers> <type>
+// # the E is required for 64-bit non-static lvalue references
+void MicrosoftCXXNameMangler::mangleType(const LValueReferenceType *T,
+ Qualifiers Quals, SourceRange Range) {
+ QualType PointeeType = T->getPointeeType();
+ assert(!Quals.hasConst() && !Quals.hasVolatile() && "unexpected qualifier!");
+ Out << 'A';
+ manglePointerExtQualifiers(Quals, PointeeType);
+ mangleType(PointeeType, Range);
+}
+
+// <type> ::= <r-value-reference-type>
+// <r-value-reference-type> ::= $$Q E? <cvr-qualifiers> <type>
+// # the E is required for 64-bit non-static rvalue references
+void MicrosoftCXXNameMangler::mangleType(const RValueReferenceType *T,
+ Qualifiers Quals, SourceRange Range) {
+ QualType PointeeType = T->getPointeeType();
+ assert(!Quals.hasConst() && !Quals.hasVolatile() && "unexpected qualifier!");
+ Out << "$$Q";
+ manglePointerExtQualifiers(Quals, PointeeType);
+ mangleType(PointeeType, Range);
+}
+
+void MicrosoftCXXNameMangler::mangleType(const ComplexType *T, Qualifiers,
+ SourceRange Range) {
+ QualType ElementType = T->getElementType();
+
+ llvm::SmallString<64> TemplateMangling;
+ llvm::raw_svector_ostream Stream(TemplateMangling);
+ MicrosoftCXXNameMangler Extra(Context, Stream);
+ Stream << "?$";
+ Extra.mangleSourceName("_Complex");
+ Extra.mangleType(ElementType, Range, QMM_Escape);
+
+ mangleArtificalTagType(TTK_Struct, TemplateMangling, {"__clang"});
+}
+
+void MicrosoftCXXNameMangler::mangleType(const VectorType *T, Qualifiers Quals,
+ SourceRange Range) {
+ const BuiltinType *ET = T->getElementType()->getAs<BuiltinType>();
+ assert(ET && "vectors with non-builtin elements are unsupported");
+ uint64_t Width = getASTContext().getTypeSize(T);
+ // Pattern match exactly the typedefs in our intrinsic headers. Anything that
+ // doesn't match the Intel types uses a custom mangling below.
+ size_t OutSizeBefore = Out.tell();
+ llvm::Triple::ArchType AT =
+ getASTContext().getTargetInfo().getTriple().getArch();
+ if (AT == llvm::Triple::x86 || AT == llvm::Triple::x86_64) {
+ if (Width == 64 && ET->getKind() == BuiltinType::LongLong) {
+ mangleArtificalTagType(TTK_Union, "__m64");
+ } else if (Width >= 128) {
+ if (ET->getKind() == BuiltinType::Float)
+ mangleArtificalTagType(TTK_Union, "__m" + llvm::utostr(Width));
+ else if (ET->getKind() == BuiltinType::LongLong)
+ mangleArtificalTagType(TTK_Union, "__m" + llvm::utostr(Width) + 'i');
+ else if (ET->getKind() == BuiltinType::Double)
+ mangleArtificalTagType(TTK_Struct, "__m" + llvm::utostr(Width) + 'd');
+ }
+ }
+
+ bool IsBuiltin = Out.tell() != OutSizeBefore;
+ if (!IsBuiltin) {
+ // The MS ABI doesn't have a special mangling for vector types, so we define
+ // our own mangling to handle uses of __vector_size__ on user-specified
+ // types, and for extensions like __v4sf.
+
+ llvm::SmallString<64> TemplateMangling;
+ llvm::raw_svector_ostream Stream(TemplateMangling);
+ MicrosoftCXXNameMangler Extra(Context, Stream);
+ Stream << "?$";
+ Extra.mangleSourceName("__vector");
+ Extra.mangleType(QualType(ET, 0), Range, QMM_Escape);
+ Extra.mangleIntegerLiteral(llvm::APSInt::getUnsigned(T->getNumElements()),
+ /*IsBoolean=*/false);
+
+ mangleArtificalTagType(TTK_Union, TemplateMangling, {"__clang"});
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleType(const ExtVectorType *T,
+ Qualifiers Quals, SourceRange Range) {
+ mangleType(static_cast<const VectorType *>(T), Quals, Range);
+}
+void MicrosoftCXXNameMangler::mangleType(const DependentSizedExtVectorType *T,
+ Qualifiers, SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this dependent-sized extended vector type yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
+}
+
+void MicrosoftCXXNameMangler::mangleType(const ObjCInterfaceType *T, Qualifiers,
+ SourceRange) {
+ // ObjC interfaces have structs underlying them.
+ mangleTagTypeKind(TTK_Struct);
+ mangleName(T->getDecl());
+}
+
+void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T, Qualifiers,
+ SourceRange Range) {
+ // We don't allow overloading by different protocol qualification,
+ // so mangling them isn't necessary.
+ mangleType(T->getBaseType(), Range);
+}
+
+void MicrosoftCXXNameMangler::mangleType(const BlockPointerType *T,
+ Qualifiers Quals, SourceRange Range) {
+ QualType PointeeType = T->getPointeeType();
+ manglePointerCVQualifiers(Quals);
+ manglePointerExtQualifiers(Quals, PointeeType);
+
+ Out << "_E";
+
+ mangleFunctionType(PointeeType->castAs<FunctionProtoType>());
+}
+
+void MicrosoftCXXNameMangler::mangleType(const InjectedClassNameType *,
+ Qualifiers, SourceRange) {
+ llvm_unreachable("Cannot mangle injected class name type.");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TemplateSpecializationType *T,
+ Qualifiers, SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this template specialization type yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
+}
+
+void MicrosoftCXXNameMangler::mangleType(const DependentNameType *T, Qualifiers,
+ SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this dependent name type yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
+}
+
+void MicrosoftCXXNameMangler::mangleType(
+ const DependentTemplateSpecializationType *T, Qualifiers,
+ SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this dependent template specialization type yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
+}
+
+void MicrosoftCXXNameMangler::mangleType(const PackExpansionType *T, Qualifiers,
+ SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this pack expansion yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TypeOfType *T, Qualifiers,
+ SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this typeof(type) yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TypeOfExprType *T, Qualifiers,
+ SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this typeof(expression) yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
+}
+
+void MicrosoftCXXNameMangler::mangleType(const DecltypeType *T, Qualifiers,
+ SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this decltype() yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
+}
+
+void MicrosoftCXXNameMangler::mangleType(const UnaryTransformType *T,
+ Qualifiers, SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this unary transform type yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
+}
+
+void MicrosoftCXXNameMangler::mangleType(const AutoType *T, Qualifiers,
+ SourceRange Range) {
+ assert(T->getDeducedType().isNull() && "expecting a dependent type!");
+
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this 'auto' type yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
+}
+
+void MicrosoftCXXNameMangler::mangleType(const AtomicType *T, Qualifiers,
+ SourceRange Range) {
+ QualType ValueType = T->getValueType();
+
+ llvm::SmallString<64> TemplateMangling;
+ llvm::raw_svector_ostream Stream(TemplateMangling);
+ MicrosoftCXXNameMangler Extra(Context, Stream);
+ Stream << "?$";
+ Extra.mangleSourceName("_Atomic");
+ Extra.mangleType(ValueType, Range, QMM_Escape);
+
+ mangleArtificalTagType(TTK_Struct, TemplateMangling, {"__clang"});
+}
+
+void MicrosoftMangleContextImpl::mangleCXXName(const NamedDecl *D,
+ raw_ostream &Out) {
+ assert((isa<FunctionDecl>(D) || isa<VarDecl>(D)) &&
+ "Invalid mangleName() call, argument is not a variable or function!");
+ assert(!isa<CXXConstructorDecl>(D) && !isa<CXXDestructorDecl>(D) &&
+ "Invalid mangleName() call on 'structor decl!");
+
+ PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
+ getASTContext().getSourceManager(),
+ "Mangling declaration");
+
+ MicrosoftCXXNameMangler Mangler(*this, Out);
+ return Mangler.mangle(D);
+}
+
+// <this-adjustment> ::= <no-adjustment> | <static-adjustment> |
+// <virtual-adjustment>
+// <no-adjustment> ::= A # private near
+// ::= B # private far
+// ::= I # protected near
+// ::= J # protected far
+// ::= Q # public near
+// ::= R # public far
+// <static-adjustment> ::= G <static-offset> # private near
+// ::= H <static-offset> # private far
+// ::= O <static-offset> # protected near
+// ::= P <static-offset> # protected far
+// ::= W <static-offset> # public near
+// ::= X <static-offset> # public far
+// <virtual-adjustment> ::= $0 <virtual-shift> <static-offset> # private near
+// ::= $1 <virtual-shift> <static-offset> # private far
+// ::= $2 <virtual-shift> <static-offset> # protected near
+// ::= $3 <virtual-shift> <static-offset> # protected far
+// ::= $4 <virtual-shift> <static-offset> # public near
+// ::= $5 <virtual-shift> <static-offset> # public far
+// <virtual-shift> ::= <vtordisp-shift> | <vtordispex-shift>
+// <vtordisp-shift> ::= <offset-to-vtordisp>
+// <vtordispex-shift> ::= <offset-to-vbptr> <vbase-offset-offset>
+// <offset-to-vtordisp>
+static void mangleThunkThisAdjustment(const CXXMethodDecl *MD,
+ const ThisAdjustment &Adjustment,
+ MicrosoftCXXNameMangler &Mangler,
+ raw_ostream &Out) {
+ if (!Adjustment.Virtual.isEmpty()) {
+ Out << '$';
+ char AccessSpec;
+ switch (MD->getAccess()) {
+ case AS_none:
+ llvm_unreachable("Unsupported access specifier");
+ case AS_private:
+ AccessSpec = '0';
+ break;
+ case AS_protected:
+ AccessSpec = '2';
+ break;
+ case AS_public:
+ AccessSpec = '4';
+ }
+ if (Adjustment.Virtual.Microsoft.VBPtrOffset) {
+ Out << 'R' << AccessSpec;
+ Mangler.mangleNumber(
+ static_cast<uint32_t>(Adjustment.Virtual.Microsoft.VBPtrOffset));
+ Mangler.mangleNumber(
+ static_cast<uint32_t>(Adjustment.Virtual.Microsoft.VBOffsetOffset));
+ Mangler.mangleNumber(
+ static_cast<uint32_t>(Adjustment.Virtual.Microsoft.VtordispOffset));
+ Mangler.mangleNumber(static_cast<uint32_t>(Adjustment.NonVirtual));
+ } else {
+ Out << AccessSpec;
+ Mangler.mangleNumber(
+ static_cast<uint32_t>(Adjustment.Virtual.Microsoft.VtordispOffset));
+ Mangler.mangleNumber(-static_cast<uint32_t>(Adjustment.NonVirtual));
+ }
+ } else if (Adjustment.NonVirtual != 0) {
+ switch (MD->getAccess()) {
+ case AS_none:
+ llvm_unreachable("Unsupported access specifier");
+ case AS_private:
+ Out << 'G';
+ break;
+ case AS_protected:
+ Out << 'O';
+ break;
+ case AS_public:
+ Out << 'W';
+ }
+ Mangler.mangleNumber(-static_cast<uint32_t>(Adjustment.NonVirtual));
+ } else {
+ switch (MD->getAccess()) {
+ case AS_none:
+ llvm_unreachable("Unsupported access specifier");
+ case AS_private:
+ Out << 'A';
+ break;
+ case AS_protected:
+ Out << 'I';
+ break;
+ case AS_public:
+ Out << 'Q';
+ }
+ }
+}
+
+void
+MicrosoftMangleContextImpl::mangleVirtualMemPtrThunk(const CXXMethodDecl *MD,
+ raw_ostream &Out) {
+ MicrosoftVTableContext *VTContext =
+ cast<MicrosoftVTableContext>(getASTContext().getVTableContext());
+ const MicrosoftVTableContext::MethodVFTableLocation &ML =
+ VTContext->getMethodVFTableLocation(GlobalDecl(MD));
+
+ MicrosoftCXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "\01?";
+ Mangler.mangleVirtualMemPtrThunk(MD, ML);
+}
+
+void MicrosoftMangleContextImpl::mangleThunk(const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk,
+ raw_ostream &Out) {
+ MicrosoftCXXNameMangler Mangler(*this, Out);
+ Out << "\01?";
+ Mangler.mangleName(MD);
+ mangleThunkThisAdjustment(MD, Thunk.This, Mangler, Out);
+ if (!Thunk.Return.isEmpty())
+ assert(Thunk.Method != nullptr &&
+ "Thunk info should hold the overridee decl");
+
+ const CXXMethodDecl *DeclForFPT = Thunk.Method ? Thunk.Method : MD;
+ Mangler.mangleFunctionType(
+ DeclForFPT->getType()->castAs<FunctionProtoType>(), MD);
+}
+
+void MicrosoftMangleContextImpl::mangleCXXDtorThunk(
+ const CXXDestructorDecl *DD, CXXDtorType Type,
+ const ThisAdjustment &Adjustment, raw_ostream &Out) {
+ // FIXME: Actually, the dtor thunk should be emitted for vector deleting
+ // dtors rather than scalar deleting dtors. Just use the vector deleting dtor
+ // mangling manually until we support both deleting dtor types.
+ assert(Type == Dtor_Deleting);
+ MicrosoftCXXNameMangler Mangler(*this, Out, DD, Type);
+ Out << "\01??_E";
+ Mangler.mangleName(DD->getParent());
+ mangleThunkThisAdjustment(DD, Adjustment, Mangler, Out);
+ Mangler.mangleFunctionType(DD->getType()->castAs<FunctionProtoType>(), DD);
+}
+
+void MicrosoftMangleContextImpl::mangleCXXVFTable(
+ const CXXRecordDecl *Derived, ArrayRef<const CXXRecordDecl *> BasePath,
+ raw_ostream &Out) {
+ // <mangled-name> ::= ?_7 <class-name> <storage-class>
+ // <cvr-qualifiers> [<name>] @
+ // NOTE: <cvr-qualifiers> here is always 'B' (const). <storage-class>
+ // is always '6' for vftables.
+ MicrosoftCXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "\01??_7";
+ Mangler.mangleName(Derived);
+ Mangler.getStream() << "6B"; // '6' for vftable, 'B' for const.
+ for (const CXXRecordDecl *RD : BasePath)
+ Mangler.mangleName(RD);
+ Mangler.getStream() << '@';
+}
+
+void MicrosoftMangleContextImpl::mangleCXXVBTable(
+ const CXXRecordDecl *Derived, ArrayRef<const CXXRecordDecl *> BasePath,
+ raw_ostream &Out) {
+ // <mangled-name> ::= ?_8 <class-name> <storage-class>
+ // <cvr-qualifiers> [<name>] @
+ // NOTE: <cvr-qualifiers> here is always 'B' (const). <storage-class>
+ // is always '7' for vbtables.
+ MicrosoftCXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "\01??_8";
+ Mangler.mangleName(Derived);
+ Mangler.getStream() << "7B"; // '7' for vbtable, 'B' for const.
+ for (const CXXRecordDecl *RD : BasePath)
+ Mangler.mangleName(RD);
+ Mangler.getStream() << '@';
+}
+
+void MicrosoftMangleContextImpl::mangleCXXRTTI(QualType T, raw_ostream &Out) {
+ MicrosoftCXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "\01??_R0";
+ Mangler.mangleType(T, SourceRange(), MicrosoftCXXNameMangler::QMM_Result);
+ Mangler.getStream() << "@8";
+}
+
+void MicrosoftMangleContextImpl::mangleCXXRTTIName(QualType T,
+ raw_ostream &Out) {
+ MicrosoftCXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << '.';
+ Mangler.mangleType(T, SourceRange(), MicrosoftCXXNameMangler::QMM_Result);
+}
+
+void MicrosoftMangleContextImpl::mangleCXXVirtualDisplacementMap(
+ const CXXRecordDecl *SrcRD, const CXXRecordDecl *DstRD, raw_ostream &Out) {
+ MicrosoftCXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "\01??_K";
+ Mangler.mangleName(SrcRD);
+ Mangler.getStream() << "$C";
+ Mangler.mangleName(DstRD);
+}
+
+void MicrosoftMangleContextImpl::mangleCXXThrowInfo(QualType T,
+ bool IsConst,
+ bool IsVolatile,
+ uint32_t NumEntries,
+ raw_ostream &Out) {
+ MicrosoftCXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_TI";
+ if (IsConst)
+ Mangler.getStream() << 'C';
+ if (IsVolatile)
+ Mangler.getStream() << 'V';
+ Mangler.getStream() << NumEntries;
+ Mangler.mangleType(T, SourceRange(), MicrosoftCXXNameMangler::QMM_Result);
+}
+
+void MicrosoftMangleContextImpl::mangleCXXCatchableTypeArray(
+ QualType T, uint32_t NumEntries, raw_ostream &Out) {
+ MicrosoftCXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_CTA";
+ Mangler.getStream() << NumEntries;
+ Mangler.mangleType(T, SourceRange(), MicrosoftCXXNameMangler::QMM_Result);
+}
+
+void MicrosoftMangleContextImpl::mangleCXXCatchableType(
+ QualType T, const CXXConstructorDecl *CD, CXXCtorType CT, uint32_t Size,
+ uint32_t NVOffset, int32_t VBPtrOffset, uint32_t VBIndex,
+ raw_ostream &Out) {
+ MicrosoftCXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_CT";
+
+ llvm::SmallString<64> RTTIMangling;
+ {
+ llvm::raw_svector_ostream Stream(RTTIMangling);
+ mangleCXXRTTI(T, Stream);
+ }
+ Mangler.getStream() << RTTIMangling.substr(1);
+
+ // VS2015 CTP6 omits the copy-constructor in the mangled name. This name is,
+ // in fact, superfluous but I'm not sure the change was made consciously.
+ // TODO: Revisit this when VS2015 gets released.
+ llvm::SmallString<64> CopyCtorMangling;
+ if (CD) {
+ llvm::raw_svector_ostream Stream(CopyCtorMangling);
+ mangleCXXCtor(CD, CT, Stream);
+ }
+ Mangler.getStream() << CopyCtorMangling.substr(1);
+
+ Mangler.getStream() << Size;
+ if (VBPtrOffset == -1) {
+ if (NVOffset) {
+ Mangler.getStream() << NVOffset;
+ }
+ } else {
+ Mangler.getStream() << NVOffset;
+ Mangler.getStream() << VBPtrOffset;
+ Mangler.getStream() << VBIndex;
+ }
+}
+
+void MicrosoftMangleContextImpl::mangleCXXRTTIBaseClassDescriptor(
+ const CXXRecordDecl *Derived, uint32_t NVOffset, int32_t VBPtrOffset,
+ uint32_t VBTableOffset, uint32_t Flags, raw_ostream &Out) {
+ MicrosoftCXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "\01??_R1";
+ Mangler.mangleNumber(NVOffset);
+ Mangler.mangleNumber(VBPtrOffset);
+ Mangler.mangleNumber(VBTableOffset);
+ Mangler.mangleNumber(Flags);
+ Mangler.mangleName(Derived);
+ Mangler.getStream() << "8";
+}
+
+void MicrosoftMangleContextImpl::mangleCXXRTTIBaseClassArray(
+ const CXXRecordDecl *Derived, raw_ostream &Out) {
+ MicrosoftCXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "\01??_R2";
+ Mangler.mangleName(Derived);
+ Mangler.getStream() << "8";
+}
+
+void MicrosoftMangleContextImpl::mangleCXXRTTIClassHierarchyDescriptor(
+ const CXXRecordDecl *Derived, raw_ostream &Out) {
+ MicrosoftCXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "\01??_R3";
+ Mangler.mangleName(Derived);
+ Mangler.getStream() << "8";
+}
+
+void MicrosoftMangleContextImpl::mangleCXXRTTICompleteObjectLocator(
+ const CXXRecordDecl *Derived, ArrayRef<const CXXRecordDecl *> BasePath,
+ raw_ostream &Out) {
+ // <mangled-name> ::= ?_R4 <class-name> <storage-class>
+ // <cvr-qualifiers> [<name>] @
+ // NOTE: <cvr-qualifiers> here is always 'B' (const). <storage-class>
+ // is always '6' for vftables.
+ MicrosoftCXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "\01??_R4";
+ Mangler.mangleName(Derived);
+ Mangler.getStream() << "6B"; // '6' for vftable, 'B' for const.
+ for (const CXXRecordDecl *RD : BasePath)
+ Mangler.mangleName(RD);
+ Mangler.getStream() << '@';
+}
+
+void MicrosoftMangleContextImpl::mangleSEHFilterExpression(
+ const NamedDecl *EnclosingDecl, raw_ostream &Out) {
+ MicrosoftCXXNameMangler Mangler(*this, Out);
+ // The function body is in the same comdat as the function with the handler,
+ // so the numbering here doesn't have to be the same across TUs.
+ //
+ // <mangled-name> ::= ?filt$ <filter-number> @0
+ Mangler.getStream() << "\01?filt$" << SEHFilterIds[EnclosingDecl]++ << "@0@";
+ Mangler.mangleName(EnclosingDecl);
+}
+
+void MicrosoftMangleContextImpl::mangleSEHFinallyBlock(
+ const NamedDecl *EnclosingDecl, raw_ostream &Out) {
+ MicrosoftCXXNameMangler Mangler(*this, Out);
+ // The function body is in the same comdat as the function with the handler,
+ // so the numbering here doesn't have to be the same across TUs.
+ //
+ // <mangled-name> ::= ?fin$ <filter-number> @0
+ Mangler.getStream() << "\01?fin$" << SEHFinallyIds[EnclosingDecl]++ << "@0@";
+ Mangler.mangleName(EnclosingDecl);
+}
+
+void MicrosoftMangleContextImpl::mangleTypeName(QualType T, raw_ostream &Out) {
+ // This is just a made up unique string for the purposes of tbaa. undname
+ // does *not* know how to demangle it.
+ MicrosoftCXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << '?';
+ Mangler.mangleType(T, SourceRange());
+}
+
+void MicrosoftMangleContextImpl::mangleCXXCtor(const CXXConstructorDecl *D,
+ CXXCtorType Type,
+ raw_ostream &Out) {
+ MicrosoftCXXNameMangler mangler(*this, Out, D, Type);
+ mangler.mangle(D);
+}
+
+void MicrosoftMangleContextImpl::mangleCXXDtor(const CXXDestructorDecl *D,
+ CXXDtorType Type,
+ raw_ostream &Out) {
+ MicrosoftCXXNameMangler mangler(*this, Out, D, Type);
+ mangler.mangle(D);
+}
+
+void MicrosoftMangleContextImpl::mangleReferenceTemporary(
+ const VarDecl *VD, unsigned ManglingNumber, raw_ostream &Out) {
+ MicrosoftCXXNameMangler Mangler(*this, Out);
+
+ Mangler.getStream() << "\01?$RT" << ManglingNumber << '@';
+ Mangler.mangle(VD, "");
+}
+
+void MicrosoftMangleContextImpl::mangleThreadSafeStaticGuardVariable(
+ const VarDecl *VD, unsigned GuardNum, raw_ostream &Out) {
+ MicrosoftCXXNameMangler Mangler(*this, Out);
+
+ Mangler.getStream() << "\01?$TSS" << GuardNum << '@';
+ Mangler.mangleNestedName(VD);
+}
+
+void MicrosoftMangleContextImpl::mangleStaticGuardVariable(const VarDecl *VD,
+ raw_ostream &Out) {
+ // <guard-name> ::= ?_B <postfix> @5 <scope-depth>
+ // ::= ?__J <postfix> @5 <scope-depth>
+ // ::= ?$S <guard-num> @ <postfix> @4IA
+
+ // The first mangling is what MSVC uses to guard static locals in inline
+ // functions. It uses a different mangling in external functions to support
+ // guarding more than 32 variables. MSVC rejects inline functions with more
+ // than 32 static locals. We don't fully implement the second mangling
+ // because those guards are not externally visible, and instead use LLVM's
+ // default renaming when creating a new guard variable.
+ MicrosoftCXXNameMangler Mangler(*this, Out);
+
+ bool Visible = VD->isExternallyVisible();
+ if (Visible) {
+ Mangler.getStream() << (VD->getTLSKind() ? "\01??__J" : "\01??_B");
+ } else {
+ Mangler.getStream() << "\01?$S1@";
+ }
+ unsigned ScopeDepth = 0;
+ if (Visible && !getNextDiscriminator(VD, ScopeDepth))
+ // If we do not have a discriminator and are emitting a guard variable for
+ // use at global scope, then mangling the nested name will not be enough to
+ // remove ambiguities.
+ Mangler.mangle(VD, "");
+ else
+ Mangler.mangleNestedName(VD);
+ Mangler.getStream() << (Visible ? "@5" : "@4IA");
+ if (ScopeDepth)
+ Mangler.mangleNumber(ScopeDepth);
+}
+
+void MicrosoftMangleContextImpl::mangleInitFiniStub(const VarDecl *D,
+ raw_ostream &Out,
+ char CharCode) {
+ MicrosoftCXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "\01??__" << CharCode;
+ Mangler.mangleName(D);
+ if (D->isStaticDataMember()) {
+ Mangler.mangleVariableEncoding(D);
+ Mangler.getStream() << '@';
+ }
+ // This is the function class mangling. These stubs are global, non-variadic,
+ // cdecl functions that return void and take no args.
+ Mangler.getStream() << "YAXXZ";
+}
+
+void MicrosoftMangleContextImpl::mangleDynamicInitializer(const VarDecl *D,
+ raw_ostream &Out) {
+ // <initializer-name> ::= ?__E <name> YAXXZ
+ mangleInitFiniStub(D, Out, 'E');
+}
+
+void
+MicrosoftMangleContextImpl::mangleDynamicAtExitDestructor(const VarDecl *D,
+ raw_ostream &Out) {
+ // <destructor-name> ::= ?__F <name> YAXXZ
+ mangleInitFiniStub(D, Out, 'F');
+}
+
+void MicrosoftMangleContextImpl::mangleStringLiteral(const StringLiteral *SL,
+ raw_ostream &Out) {
+ // <char-type> ::= 0 # char
+ // ::= 1 # wchar_t
+ // ::= ??? # char16_t/char32_t will need a mangling too...
+ //
+ // <literal-length> ::= <non-negative integer> # the length of the literal
+ //
+ // <encoded-crc> ::= <hex digit>+ @ # crc of the literal including
+ // # null-terminator
+ //
+ // <encoded-string> ::= <simple character> # uninteresting character
+ // ::= '?$' <hex digit> <hex digit> # these two nibbles
+ // # encode the byte for the
+ // # character
+ // ::= '?' [a-z] # \xe1 - \xfa
+ // ::= '?' [A-Z] # \xc1 - \xda
+ // ::= '?' [0-9] # [,/\:. \n\t'-]
+ //
+ // <literal> ::= '??_C@_' <char-type> <literal-length> <encoded-crc>
+ // <encoded-string> '@'
+ MicrosoftCXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "\01??_C@_";
+
+ // <char-type>: The "kind" of string literal is encoded into the mangled name.
+ if (SL->isWide())
+ Mangler.getStream() << '1';
+ else
+ Mangler.getStream() << '0';
+
+ // <literal-length>: The next part of the mangled name consists of the length
+ // of the string.
+ // The StringLiteral does not consider the NUL terminator byte(s) but the
+ // mangling does.
+ // N.B. The length is in terms of bytes, not characters.
+ Mangler.mangleNumber(SL->getByteLength() + SL->getCharByteWidth());
+
+ auto GetLittleEndianByte = [&Mangler, &SL](unsigned Index) {
+ unsigned CharByteWidth = SL->getCharByteWidth();
+ uint32_t CodeUnit = SL->getCodeUnit(Index / CharByteWidth);
+ unsigned OffsetInCodeUnit = Index % CharByteWidth;
+ return static_cast<char>((CodeUnit >> (8 * OffsetInCodeUnit)) & 0xff);
+ };
+
+ auto GetBigEndianByte = [&Mangler, &SL](unsigned Index) {
+ unsigned CharByteWidth = SL->getCharByteWidth();
+ uint32_t CodeUnit = SL->getCodeUnit(Index / CharByteWidth);
+ unsigned OffsetInCodeUnit = (CharByteWidth - 1) - (Index % CharByteWidth);
+ return static_cast<char>((CodeUnit >> (8 * OffsetInCodeUnit)) & 0xff);
+ };
+
+ // CRC all the bytes of the StringLiteral.
+ llvm::JamCRC JC;
+ for (unsigned I = 0, E = SL->getByteLength(); I != E; ++I)
+ JC.update(GetLittleEndianByte(I));
+
+ // The NUL terminator byte(s) were not present earlier,
+ // we need to manually process those bytes into the CRC.
+ for (unsigned NullTerminator = 0; NullTerminator < SL->getCharByteWidth();
+ ++NullTerminator)
+ JC.update('\x00');
+
+ // <encoded-crc>: The CRC is encoded utilizing the standard number mangling
+ // scheme.
+ Mangler.mangleNumber(JC.getCRC());
+
+ // <encoded-string>: The mangled name also contains the first 32 _characters_
+ // (including null-terminator bytes) of the StringLiteral.
+ // Each character is encoded by splitting them into bytes and then encoding
+ // the constituent bytes.
+ auto MangleByte = [&Mangler](char Byte) {
+ // There are five different manglings for characters:
+ // - [a-zA-Z0-9_$]: A one-to-one mapping.
+ // - ?[a-z]: The range from \xe1 to \xfa.
+ // - ?[A-Z]: The range from \xc1 to \xda.
+ // - ?[0-9]: The set of [,/\:. \n\t'-].
+ // - ?$XX: A fallback which maps nibbles.
+ if (isIdentifierBody(Byte, /*AllowDollar=*/true)) {
+ Mangler.getStream() << Byte;
+ } else if (isLetter(Byte & 0x7f)) {
+ Mangler.getStream() << '?' << static_cast<char>(Byte & 0x7f);
+ } else {
+ const char SpecialChars[] = {',', '/', '\\', ':', '.',
+ ' ', '\n', '\t', '\'', '-'};
+ const char *Pos =
+ std::find(std::begin(SpecialChars), std::end(SpecialChars), Byte);
+ if (Pos != std::end(SpecialChars)) {
+ Mangler.getStream() << '?' << (Pos - std::begin(SpecialChars));
+ } else {
+ Mangler.getStream() << "?$";
+ Mangler.getStream() << static_cast<char>('A' + ((Byte >> 4) & 0xf));
+ Mangler.getStream() << static_cast<char>('A' + (Byte & 0xf));
+ }
+ }
+ };
+
+ // Enforce our 32 character max.
+ unsigned NumCharsToMangle = std::min(32U, SL->getLength());
+ for (unsigned I = 0, E = NumCharsToMangle * SL->getCharByteWidth(); I != E;
+ ++I)
+ if (SL->isWide())
+ MangleByte(GetBigEndianByte(I));
+ else
+ MangleByte(GetLittleEndianByte(I));
+
+ // Encode the NUL terminator if there is room.
+ if (NumCharsToMangle < 32)
+ for (unsigned NullTerminator = 0; NullTerminator < SL->getCharByteWidth();
+ ++NullTerminator)
+ MangleByte(0);
+
+ Mangler.getStream() << '@';
+}
+
+MicrosoftMangleContext *
+MicrosoftMangleContext::create(ASTContext &Context, DiagnosticsEngine &Diags) {
+ return new MicrosoftMangleContextImpl(Context, Diags);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/NSAPI.cpp b/contrib/llvm/tools/clang/lib/AST/NSAPI.cpp
new file mode 100644
index 0000000..c562dae
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/NSAPI.cpp
@@ -0,0 +1,592 @@
+//===--- NSAPI.cpp - NSFoundation APIs ------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/NSAPI.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "llvm/ADT/StringSwitch.h"
+
+using namespace clang;
+
+NSAPI::NSAPI(ASTContext &ctx)
+ : Ctx(ctx), ClassIds(), BOOLId(nullptr), NSIntegerId(nullptr),
+ NSUIntegerId(nullptr), NSASCIIStringEncodingId(nullptr),
+ NSUTF8StringEncodingId(nullptr) {}
+
+IdentifierInfo *NSAPI::getNSClassId(NSClassIdKindKind K) const {
+ static const char *ClassName[NumClassIds] = {
+ "NSObject",
+ "NSString",
+ "NSArray",
+ "NSMutableArray",
+ "NSDictionary",
+ "NSMutableDictionary",
+ "NSNumber",
+ "NSMutableSet",
+ "NSMutableOrderedSet",
+ "NSValue"
+ };
+
+ if (!ClassIds[K])
+ return (ClassIds[K] = &Ctx.Idents.get(ClassName[K]));
+
+ return ClassIds[K];
+}
+
+Selector NSAPI::getNSStringSelector(NSStringMethodKind MK) const {
+ if (NSStringSelectors[MK].isNull()) {
+ Selector Sel;
+ switch (MK) {
+ case NSStr_stringWithString:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("stringWithString"));
+ break;
+ case NSStr_stringWithUTF8String:
+ Sel = Ctx.Selectors.getUnarySelector(
+ &Ctx.Idents.get("stringWithUTF8String"));
+ break;
+ case NSStr_initWithUTF8String:
+ Sel = Ctx.Selectors.getUnarySelector(
+ &Ctx.Idents.get("initWithUTF8String"));
+ break;
+ case NSStr_stringWithCStringEncoding: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("stringWithCString"),
+ &Ctx.Idents.get("encoding")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ case NSStr_stringWithCString:
+ Sel= Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("stringWithCString"));
+ break;
+ case NSStr_initWithString:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("initWithString"));
+ break;
+ }
+ return (NSStringSelectors[MK] = Sel);
+ }
+
+ return NSStringSelectors[MK];
+}
+
+Optional<NSAPI::NSStringMethodKind>
+NSAPI::getNSStringMethodKind(Selector Sel) const {
+ for (unsigned i = 0; i != NumNSStringMethods; ++i) {
+ NSStringMethodKind MK = NSStringMethodKind(i);
+ if (Sel == getNSStringSelector(MK))
+ return MK;
+ }
+
+ return None;
+}
+
+Selector NSAPI::getNSArraySelector(NSArrayMethodKind MK) const {
+ if (NSArraySelectors[MK].isNull()) {
+ Selector Sel;
+ switch (MK) {
+ case NSArr_array:
+ Sel = Ctx.Selectors.getNullarySelector(&Ctx.Idents.get("array"));
+ break;
+ case NSArr_arrayWithArray:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("arrayWithArray"));
+ break;
+ case NSArr_arrayWithObject:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("arrayWithObject"));
+ break;
+ case NSArr_arrayWithObjects:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("arrayWithObjects"));
+ break;
+ case NSArr_arrayWithObjectsCount: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("arrayWithObjects"),
+ &Ctx.Idents.get("count")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ case NSArr_initWithArray:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("initWithArray"));
+ break;
+ case NSArr_initWithObjects:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("initWithObjects"));
+ break;
+ case NSArr_objectAtIndex:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("objectAtIndex"));
+ break;
+ case NSMutableArr_replaceObjectAtIndex: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("replaceObjectAtIndex"),
+ &Ctx.Idents.get("withObject")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ case NSMutableArr_addObject:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("addObject"));
+ break;
+ case NSMutableArr_insertObjectAtIndex: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("insertObject"),
+ &Ctx.Idents.get("atIndex")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ case NSMutableArr_setObjectAtIndexedSubscript: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("setObject"),
+ &Ctx.Idents.get("atIndexedSubscript")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ }
+ return (NSArraySelectors[MK] = Sel);
+ }
+
+ return NSArraySelectors[MK];
+}
+
+Optional<NSAPI::NSArrayMethodKind> NSAPI::getNSArrayMethodKind(Selector Sel) {
+ for (unsigned i = 0; i != NumNSArrayMethods; ++i) {
+ NSArrayMethodKind MK = NSArrayMethodKind(i);
+ if (Sel == getNSArraySelector(MK))
+ return MK;
+ }
+
+ return None;
+}
+
+Selector NSAPI::getNSDictionarySelector(
+ NSDictionaryMethodKind MK) const {
+ if (NSDictionarySelectors[MK].isNull()) {
+ Selector Sel;
+ switch (MK) {
+ case NSDict_dictionary:
+ Sel = Ctx.Selectors.getNullarySelector(&Ctx.Idents.get("dictionary"));
+ break;
+ case NSDict_dictionaryWithDictionary:
+ Sel = Ctx.Selectors.getUnarySelector(
+ &Ctx.Idents.get("dictionaryWithDictionary"));
+ break;
+ case NSDict_dictionaryWithObjectForKey: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("dictionaryWithObject"),
+ &Ctx.Idents.get("forKey")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ case NSDict_dictionaryWithObjectsForKeys: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("dictionaryWithObjects"),
+ &Ctx.Idents.get("forKeys")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ case NSDict_dictionaryWithObjectsForKeysCount: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("dictionaryWithObjects"),
+ &Ctx.Idents.get("forKeys"),
+ &Ctx.Idents.get("count")
+ };
+ Sel = Ctx.Selectors.getSelector(3, KeyIdents);
+ break;
+ }
+ case NSDict_dictionaryWithObjectsAndKeys:
+ Sel = Ctx.Selectors.getUnarySelector(
+ &Ctx.Idents.get("dictionaryWithObjectsAndKeys"));
+ break;
+ case NSDict_initWithDictionary:
+ Sel = Ctx.Selectors.getUnarySelector(
+ &Ctx.Idents.get("initWithDictionary"));
+ break;
+ case NSDict_initWithObjectsAndKeys:
+ Sel = Ctx.Selectors.getUnarySelector(
+ &Ctx.Idents.get("initWithObjectsAndKeys"));
+ break;
+ case NSDict_initWithObjectsForKeys: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("initWithObjects"),
+ &Ctx.Idents.get("forKeys")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ case NSDict_objectForKey:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("objectForKey"));
+ break;
+ case NSMutableDict_setObjectForKey: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("setObject"),
+ &Ctx.Idents.get("forKey")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ case NSMutableDict_setObjectForKeyedSubscript: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("setObject"),
+ &Ctx.Idents.get("forKeyedSubscript")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ case NSMutableDict_setValueForKey: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("setValue"),
+ &Ctx.Idents.get("forKey")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ }
+ return (NSDictionarySelectors[MK] = Sel);
+ }
+
+ return NSDictionarySelectors[MK];
+}
+
+Optional<NSAPI::NSDictionaryMethodKind>
+NSAPI::getNSDictionaryMethodKind(Selector Sel) {
+ for (unsigned i = 0; i != NumNSDictionaryMethods; ++i) {
+ NSDictionaryMethodKind MK = NSDictionaryMethodKind(i);
+ if (Sel == getNSDictionarySelector(MK))
+ return MK;
+ }
+
+ return None;
+}
+
+Selector NSAPI::getNSSetSelector(NSSetMethodKind MK) const {
+ if (NSSetSelectors[MK].isNull()) {
+ Selector Sel;
+ switch (MK) {
+ case NSMutableSet_addObject:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("addObject"));
+ break;
+ case NSOrderedSet_insertObjectAtIndex: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("insertObject"),
+ &Ctx.Idents.get("atIndex")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ case NSOrderedSet_setObjectAtIndex: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("setObject"),
+ &Ctx.Idents.get("atIndex")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ case NSOrderedSet_setObjectAtIndexedSubscript: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("setObject"),
+ &Ctx.Idents.get("atIndexedSubscript")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ case NSOrderedSet_replaceObjectAtIndexWithObject: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("replaceObjectAtIndex"),
+ &Ctx.Idents.get("withObject")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ }
+ return (NSSetSelectors[MK] = Sel);
+ }
+
+ return NSSetSelectors[MK];
+}
+
+Optional<NSAPI::NSSetMethodKind>
+NSAPI::getNSSetMethodKind(Selector Sel) {
+ for (unsigned i = 0; i != NumNSSetMethods; ++i) {
+ NSSetMethodKind MK = NSSetMethodKind(i);
+ if (Sel == getNSSetSelector(MK))
+ return MK;
+ }
+
+ return None;
+}
+
+Selector NSAPI::getNSNumberLiteralSelector(NSNumberLiteralMethodKind MK,
+ bool Instance) const {
+ static const char *ClassSelectorName[NumNSNumberLiteralMethods] = {
+ "numberWithChar",
+ "numberWithUnsignedChar",
+ "numberWithShort",
+ "numberWithUnsignedShort",
+ "numberWithInt",
+ "numberWithUnsignedInt",
+ "numberWithLong",
+ "numberWithUnsignedLong",
+ "numberWithLongLong",
+ "numberWithUnsignedLongLong",
+ "numberWithFloat",
+ "numberWithDouble",
+ "numberWithBool",
+ "numberWithInteger",
+ "numberWithUnsignedInteger"
+ };
+ static const char *InstanceSelectorName[NumNSNumberLiteralMethods] = {
+ "initWithChar",
+ "initWithUnsignedChar",
+ "initWithShort",
+ "initWithUnsignedShort",
+ "initWithInt",
+ "initWithUnsignedInt",
+ "initWithLong",
+ "initWithUnsignedLong",
+ "initWithLongLong",
+ "initWithUnsignedLongLong",
+ "initWithFloat",
+ "initWithDouble",
+ "initWithBool",
+ "initWithInteger",
+ "initWithUnsignedInteger"
+ };
+
+ Selector *Sels;
+ const char **Names;
+ if (Instance) {
+ Sels = NSNumberInstanceSelectors;
+ Names = InstanceSelectorName;
+ } else {
+ Sels = NSNumberClassSelectors;
+ Names = ClassSelectorName;
+ }
+
+ if (Sels[MK].isNull())
+ Sels[MK] = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get(Names[MK]));
+ return Sels[MK];
+}
+
+Optional<NSAPI::NSNumberLiteralMethodKind>
+NSAPI::getNSNumberLiteralMethodKind(Selector Sel) const {
+ for (unsigned i = 0; i != NumNSNumberLiteralMethods; ++i) {
+ NSNumberLiteralMethodKind MK = NSNumberLiteralMethodKind(i);
+ if (isNSNumberLiteralSelector(MK, Sel))
+ return MK;
+ }
+
+ return None;
+}
+
+Optional<NSAPI::NSNumberLiteralMethodKind>
+NSAPI::getNSNumberFactoryMethodKind(QualType T) const {
+ const BuiltinType *BT = T->getAs<BuiltinType>();
+ if (!BT)
+ return None;
+
+ const TypedefType *TDT = T->getAs<TypedefType>();
+ if (TDT) {
+ QualType TDTTy = QualType(TDT, 0);
+ if (isObjCBOOLType(TDTTy))
+ return NSAPI::NSNumberWithBool;
+ if (isObjCNSIntegerType(TDTTy))
+ return NSAPI::NSNumberWithInteger;
+ if (isObjCNSUIntegerType(TDTTy))
+ return NSAPI::NSNumberWithUnsignedInteger;
+ }
+
+ switch (BT->getKind()) {
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ return NSAPI::NSNumberWithChar;
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ return NSAPI::NSNumberWithUnsignedChar;
+ case BuiltinType::Short:
+ return NSAPI::NSNumberWithShort;
+ case BuiltinType::UShort:
+ return NSAPI::NSNumberWithUnsignedShort;
+ case BuiltinType::Int:
+ return NSAPI::NSNumberWithInt;
+ case BuiltinType::UInt:
+ return NSAPI::NSNumberWithUnsignedInt;
+ case BuiltinType::Long:
+ return NSAPI::NSNumberWithLong;
+ case BuiltinType::ULong:
+ return NSAPI::NSNumberWithUnsignedLong;
+ case BuiltinType::LongLong:
+ return NSAPI::NSNumberWithLongLong;
+ case BuiltinType::ULongLong:
+ return NSAPI::NSNumberWithUnsignedLongLong;
+ case BuiltinType::Float:
+ return NSAPI::NSNumberWithFloat;
+ case BuiltinType::Double:
+ return NSAPI::NSNumberWithDouble;
+ case BuiltinType::Bool:
+ return NSAPI::NSNumberWithBool;
+
+ case BuiltinType::Void:
+ case BuiltinType::WChar_U:
+ case BuiltinType::WChar_S:
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ case BuiltinType::Int128:
+ case BuiltinType::LongDouble:
+ case BuiltinType::UInt128:
+ case BuiltinType::NullPtr:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCSel:
+ case BuiltinType::OCLImage1d:
+ case BuiltinType::OCLImage1dArray:
+ case BuiltinType::OCLImage1dBuffer:
+ case BuiltinType::OCLImage2d:
+ case BuiltinType::OCLImage2dArray:
+ case BuiltinType::OCLImage2dDepth:
+ case BuiltinType::OCLImage2dArrayDepth:
+ case BuiltinType::OCLImage2dMSAA:
+ case BuiltinType::OCLImage2dArrayMSAA:
+ case BuiltinType::OCLImage2dMSAADepth:
+ case BuiltinType::OCLImage2dArrayMSAADepth:
+ case BuiltinType::OCLImage3d:
+ case BuiltinType::OCLSampler:
+ case BuiltinType::OCLEvent:
+ case BuiltinType::OCLClkEvent:
+ case BuiltinType::OCLQueue:
+ case BuiltinType::OCLNDRange:
+ case BuiltinType::OCLReserveID:
+ case BuiltinType::BoundMember:
+ case BuiltinType::Dependent:
+ case BuiltinType::Overload:
+ case BuiltinType::UnknownAny:
+ case BuiltinType::ARCUnbridgedCast:
+ case BuiltinType::Half:
+ case BuiltinType::PseudoObject:
+ case BuiltinType::BuiltinFn:
+ case BuiltinType::OMPArraySection:
+ break;
+ }
+
+ return None;
+}
+
+/// \brief Returns true if \param T is a typedef of "BOOL" in objective-c.
+bool NSAPI::isObjCBOOLType(QualType T) const {
+ return isObjCTypedef(T, "BOOL", BOOLId);
+}
+/// \brief Returns true if \param T is a typedef of "NSInteger" in objective-c.
+bool NSAPI::isObjCNSIntegerType(QualType T) const {
+ return isObjCTypedef(T, "NSInteger", NSIntegerId);
+}
+/// \brief Returns true if \param T is a typedef of "NSUInteger" in objective-c.
+bool NSAPI::isObjCNSUIntegerType(QualType T) const {
+ return isObjCTypedef(T, "NSUInteger", NSUIntegerId);
+}
+
+StringRef NSAPI::GetNSIntegralKind(QualType T) const {
+ if (!Ctx.getLangOpts().ObjC1 || T.isNull())
+ return StringRef();
+
+ while (const TypedefType *TDT = T->getAs<TypedefType>()) {
+ StringRef NSIntegralResust =
+ llvm::StringSwitch<StringRef>(
+ TDT->getDecl()->getDeclName().getAsIdentifierInfo()->getName())
+ .Case("int8_t", "int8_t")
+ .Case("int16_t", "int16_t")
+ .Case("int32_t", "int32_t")
+ .Case("NSInteger", "NSInteger")
+ .Case("int64_t", "int64_t")
+ .Case("uint8_t", "uint8_t")
+ .Case("uint16_t", "uint16_t")
+ .Case("uint32_t", "uint32_t")
+ .Case("NSUInteger", "NSUInteger")
+ .Case("uint64_t", "uint64_t")
+ .Default(StringRef());
+ if (!NSIntegralResust.empty())
+ return NSIntegralResust;
+ T = TDT->desugar();
+ }
+ return StringRef();
+}
+
+bool NSAPI::isMacroDefined(StringRef Id) const {
+ // FIXME: Check whether the relevant module macros are visible.
+ return Ctx.Idents.get(Id).hasMacroDefinition();
+}
+
+bool NSAPI::isSubclassOfNSClass(ObjCInterfaceDecl *InterfaceDecl,
+ NSClassIdKindKind NSClassKind) const {
+ if (!InterfaceDecl) {
+ return false;
+ }
+
+ IdentifierInfo *NSClassID = getNSClassId(NSClassKind);
+
+ bool IsSubclass = false;
+ do {
+ IsSubclass = NSClassID == InterfaceDecl->getIdentifier();
+
+ if (IsSubclass) {
+ break;
+ }
+ } while ((InterfaceDecl = InterfaceDecl->getSuperClass()));
+
+ return IsSubclass;
+}
+
+bool NSAPI::isObjCTypedef(QualType T,
+ StringRef name, IdentifierInfo *&II) const {
+ if (!Ctx.getLangOpts().ObjC1)
+ return false;
+ if (T.isNull())
+ return false;
+
+ if (!II)
+ II = &Ctx.Idents.get(name);
+
+ while (const TypedefType *TDT = T->getAs<TypedefType>()) {
+ if (TDT->getDecl()->getDeclName().getAsIdentifierInfo() == II)
+ return true;
+ T = TDT->desugar();
+ }
+
+ return false;
+}
+
+bool NSAPI::isObjCEnumerator(const Expr *E,
+ StringRef name, IdentifierInfo *&II) const {
+ if (!Ctx.getLangOpts().ObjC1)
+ return false;
+ if (!E)
+ return false;
+
+ if (!II)
+ II = &Ctx.Idents.get(name);
+
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts()))
+ if (const EnumConstantDecl *
+ EnumD = dyn_cast_or_null<EnumConstantDecl>(DRE->getDecl()))
+ return EnumD->getIdentifier() == II;
+
+ return false;
+}
+
+Selector NSAPI::getOrInitSelector(ArrayRef<StringRef> Ids,
+ Selector &Sel) const {
+ if (Sel.isNull()) {
+ SmallVector<IdentifierInfo *, 4> Idents;
+ for (ArrayRef<StringRef>::const_iterator
+ I = Ids.begin(), E = Ids.end(); I != E; ++I)
+ Idents.push_back(&Ctx.Idents.get(*I));
+ Sel = Ctx.Selectors.getSelector(Idents.size(), Idents.data());
+ }
+ return Sel;
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/NestedNameSpecifier.cpp b/contrib/llvm/tools/clang/lib/AST/NestedNameSpecifier.cpp
new file mode 100644
index 0000000..d2370c8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/NestedNameSpecifier.cpp
@@ -0,0 +1,684 @@
+//===--- NestedNameSpecifier.cpp - C++ nested name specifiers -----*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the NestedNameSpecifier class, which represents
+// a C++ nested-name-specifier.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/TypeLoc.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+
+using namespace clang;
+
+NestedNameSpecifier *
+NestedNameSpecifier::FindOrInsert(const ASTContext &Context,
+ const NestedNameSpecifier &Mockup) {
+ llvm::FoldingSetNodeID ID;
+ Mockup.Profile(ID);
+
+ void *InsertPos = nullptr;
+ NestedNameSpecifier *NNS
+ = Context.NestedNameSpecifiers.FindNodeOrInsertPos(ID, InsertPos);
+ if (!NNS) {
+ NNS = new (Context, llvm::alignOf<NestedNameSpecifier>())
+ NestedNameSpecifier(Mockup);
+ Context.NestedNameSpecifiers.InsertNode(NNS, InsertPos);
+ }
+
+ return NNS;
+}
+
+NestedNameSpecifier *
+NestedNameSpecifier::Create(const ASTContext &Context,
+ NestedNameSpecifier *Prefix, IdentifierInfo *II) {
+ assert(II && "Identifier cannot be NULL");
+ assert((!Prefix || Prefix->isDependent()) && "Prefix must be dependent");
+
+ NestedNameSpecifier Mockup;
+ Mockup.Prefix.setPointer(Prefix);
+ Mockup.Prefix.setInt(StoredIdentifier);
+ Mockup.Specifier = II;
+ return FindOrInsert(Context, Mockup);
+}
+
+NestedNameSpecifier *
+NestedNameSpecifier::Create(const ASTContext &Context,
+ NestedNameSpecifier *Prefix,
+ const NamespaceDecl *NS) {
+ assert(NS && "Namespace cannot be NULL");
+ assert((!Prefix ||
+ (Prefix->getAsType() == nullptr &&
+ Prefix->getAsIdentifier() == nullptr)) &&
+ "Broken nested name specifier");
+ NestedNameSpecifier Mockup;
+ Mockup.Prefix.setPointer(Prefix);
+ Mockup.Prefix.setInt(StoredDecl);
+ Mockup.Specifier = const_cast<NamespaceDecl *>(NS);
+ return FindOrInsert(Context, Mockup);
+}
+
+NestedNameSpecifier *
+NestedNameSpecifier::Create(const ASTContext &Context,
+ NestedNameSpecifier *Prefix,
+ NamespaceAliasDecl *Alias) {
+ assert(Alias && "Namespace alias cannot be NULL");
+ assert((!Prefix ||
+ (Prefix->getAsType() == nullptr &&
+ Prefix->getAsIdentifier() == nullptr)) &&
+ "Broken nested name specifier");
+ NestedNameSpecifier Mockup;
+ Mockup.Prefix.setPointer(Prefix);
+ Mockup.Prefix.setInt(StoredDecl);
+ Mockup.Specifier = Alias;
+ return FindOrInsert(Context, Mockup);
+}
+
+NestedNameSpecifier *
+NestedNameSpecifier::Create(const ASTContext &Context,
+ NestedNameSpecifier *Prefix,
+ bool Template, const Type *T) {
+ assert(T && "Type cannot be NULL");
+ NestedNameSpecifier Mockup;
+ Mockup.Prefix.setPointer(Prefix);
+ Mockup.Prefix.setInt(Template? StoredTypeSpecWithTemplate : StoredTypeSpec);
+ Mockup.Specifier = const_cast<Type*>(T);
+ return FindOrInsert(Context, Mockup);
+}
+
+NestedNameSpecifier *
+NestedNameSpecifier::Create(const ASTContext &Context, IdentifierInfo *II) {
+ assert(II && "Identifier cannot be NULL");
+ NestedNameSpecifier Mockup;
+ Mockup.Prefix.setPointer(nullptr);
+ Mockup.Prefix.setInt(StoredIdentifier);
+ Mockup.Specifier = II;
+ return FindOrInsert(Context, Mockup);
+}
+
+NestedNameSpecifier *
+NestedNameSpecifier::GlobalSpecifier(const ASTContext &Context) {
+ if (!Context.GlobalNestedNameSpecifier)
+ Context.GlobalNestedNameSpecifier =
+ new (Context, llvm::alignOf<NestedNameSpecifier>())
+ NestedNameSpecifier();
+ return Context.GlobalNestedNameSpecifier;
+}
+
+NestedNameSpecifier *
+NestedNameSpecifier::SuperSpecifier(const ASTContext &Context,
+ CXXRecordDecl *RD) {
+ NestedNameSpecifier Mockup;
+ Mockup.Prefix.setPointer(nullptr);
+ Mockup.Prefix.setInt(StoredDecl);
+ Mockup.Specifier = RD;
+ return FindOrInsert(Context, Mockup);
+}
+
+NestedNameSpecifier::SpecifierKind NestedNameSpecifier::getKind() const {
+ if (!Specifier)
+ return Global;
+
+ switch (Prefix.getInt()) {
+ case StoredIdentifier:
+ return Identifier;
+
+ case StoredDecl: {
+ NamedDecl *ND = static_cast<NamedDecl *>(Specifier);
+ if (isa<CXXRecordDecl>(ND))
+ return Super;
+ return isa<NamespaceDecl>(ND) ? Namespace : NamespaceAlias;
+ }
+
+ case StoredTypeSpec:
+ return TypeSpec;
+
+ case StoredTypeSpecWithTemplate:
+ return TypeSpecWithTemplate;
+ }
+
+ llvm_unreachable("Invalid NNS Kind!");
+}
+
+/// \brief Retrieve the namespace stored in this nested name specifier.
+NamespaceDecl *NestedNameSpecifier::getAsNamespace() const {
+ if (Prefix.getInt() == StoredDecl)
+ return dyn_cast<NamespaceDecl>(static_cast<NamedDecl *>(Specifier));
+
+ return nullptr;
+}
+
+/// \brief Retrieve the namespace alias stored in this nested name specifier.
+NamespaceAliasDecl *NestedNameSpecifier::getAsNamespaceAlias() const {
+ if (Prefix.getInt() == StoredDecl)
+ return dyn_cast<NamespaceAliasDecl>(static_cast<NamedDecl *>(Specifier));
+
+ return nullptr;
+}
+
+/// \brief Retrieve the record declaration stored in this nested name specifier.
+CXXRecordDecl *NestedNameSpecifier::getAsRecordDecl() const {
+ if (Prefix.getInt() == StoredDecl)
+ return dyn_cast<CXXRecordDecl>(static_cast<NamedDecl *>(Specifier));
+
+ return nullptr;
+}
+
+/// \brief Whether this nested name specifier refers to a dependent
+/// type or not.
+bool NestedNameSpecifier::isDependent() const {
+ switch (getKind()) {
+ case Identifier:
+ // Identifier specifiers always represent dependent types
+ return true;
+
+ case Namespace:
+ case NamespaceAlias:
+ case Global:
+ return false;
+
+ case Super: {
+ CXXRecordDecl *RD = static_cast<CXXRecordDecl *>(Specifier);
+ for (const auto &Base : RD->bases())
+ if (Base.getType()->isDependentType())
+ return true;
+
+ return false;
+ }
+
+ case TypeSpec:
+ case TypeSpecWithTemplate:
+ return getAsType()->isDependentType();
+ }
+
+ llvm_unreachable("Invalid NNS Kind!");
+}
+
+/// \brief Whether this nested name specifier refers to a dependent
+/// type or not.
+bool NestedNameSpecifier::isInstantiationDependent() const {
+ switch (getKind()) {
+ case Identifier:
+ // Identifier specifiers always represent dependent types
+ return true;
+
+ case Namespace:
+ case NamespaceAlias:
+ case Global:
+ case Super:
+ return false;
+
+ case TypeSpec:
+ case TypeSpecWithTemplate:
+ return getAsType()->isInstantiationDependentType();
+ }
+
+ llvm_unreachable("Invalid NNS Kind!");
+}
+
+bool NestedNameSpecifier::containsUnexpandedParameterPack() const {
+ switch (getKind()) {
+ case Identifier:
+ return getPrefix() && getPrefix()->containsUnexpandedParameterPack();
+
+ case Namespace:
+ case NamespaceAlias:
+ case Global:
+ case Super:
+ return false;
+
+ case TypeSpec:
+ case TypeSpecWithTemplate:
+ return getAsType()->containsUnexpandedParameterPack();
+ }
+
+ llvm_unreachable("Invalid NNS Kind!");
+}
+
+/// \brief Print this nested name specifier to the given output
+/// stream.
+void
+NestedNameSpecifier::print(raw_ostream &OS,
+ const PrintingPolicy &Policy) const {
+ if (getPrefix())
+ getPrefix()->print(OS, Policy);
+
+ switch (getKind()) {
+ case Identifier:
+ OS << getAsIdentifier()->getName();
+ break;
+
+ case Namespace:
+ if (getAsNamespace()->isAnonymousNamespace())
+ return;
+
+ OS << getAsNamespace()->getName();
+ break;
+
+ case NamespaceAlias:
+ OS << getAsNamespaceAlias()->getName();
+ break;
+
+ case Global:
+ break;
+
+ case Super:
+ OS << "__super";
+ break;
+
+ case TypeSpecWithTemplate:
+ OS << "template ";
+ // Fall through to print the type.
+
+ case TypeSpec: {
+ const Type *T = getAsType();
+
+ PrintingPolicy InnerPolicy(Policy);
+ InnerPolicy.SuppressScope = true;
+
+ // Nested-name-specifiers are intended to contain minimally-qualified
+ // types. An actual ElaboratedType will not occur, since we'll store
+ // just the type that is referred to in the nested-name-specifier (e.g.,
+ // a TypedefType, TagType, etc.). However, when we are dealing with
+ // dependent template-id types (e.g., Outer<T>::template Inner<U>),
+ // the type requires its own nested-name-specifier for uniqueness, so we
+ // suppress that nested-name-specifier during printing.
+ assert(!isa<ElaboratedType>(T) &&
+ "Elaborated type in nested-name-specifier");
+ if (const TemplateSpecializationType *SpecType
+ = dyn_cast<TemplateSpecializationType>(T)) {
+ // Print the template name without its corresponding
+ // nested-name-specifier.
+ SpecType->getTemplateName().print(OS, InnerPolicy, true);
+
+ // Print the template argument list.
+ TemplateSpecializationType::PrintTemplateArgumentList(
+ OS, SpecType->getArgs(), SpecType->getNumArgs(), InnerPolicy);
+ } else {
+ // Print the type normally
+ QualType(T, 0).print(OS, InnerPolicy);
+ }
+ break;
+ }
+ }
+
+ OS << "::";
+}
+
+void NestedNameSpecifier::dump(const LangOptions &LO) const {
+ print(llvm::errs(), PrintingPolicy(LO));
+}
+
+void NestedNameSpecifier::dump() const {
+ LangOptions LO;
+ print(llvm::errs(), PrintingPolicy(LO));
+}
+
+unsigned
+NestedNameSpecifierLoc::getLocalDataLength(NestedNameSpecifier *Qualifier) {
+ assert(Qualifier && "Expected a non-NULL qualifier");
+
+ // Location of the trailing '::'.
+ unsigned Length = sizeof(unsigned);
+
+ switch (Qualifier->getKind()) {
+ case NestedNameSpecifier::Global:
+ // Nothing more to add.
+ break;
+
+ case NestedNameSpecifier::Identifier:
+ case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::NamespaceAlias:
+ case NestedNameSpecifier::Super:
+ // The location of the identifier or namespace name.
+ Length += sizeof(unsigned);
+ break;
+
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ case NestedNameSpecifier::TypeSpec:
+ // The "void*" that points at the TypeLoc data.
+ // Note: the 'template' keyword is part of the TypeLoc.
+ Length += sizeof(void *);
+ break;
+ }
+
+ return Length;
+}
+
+unsigned
+NestedNameSpecifierLoc::getDataLength(NestedNameSpecifier *Qualifier) {
+ unsigned Length = 0;
+ for (; Qualifier; Qualifier = Qualifier->getPrefix())
+ Length += getLocalDataLength(Qualifier);
+ return Length;
+}
+
+namespace {
+ /// \brief Load a (possibly unaligned) source location from a given address
+ /// and offset.
+ SourceLocation LoadSourceLocation(void *Data, unsigned Offset) {
+ unsigned Raw;
+ memcpy(&Raw, static_cast<char *>(Data) + Offset, sizeof(unsigned));
+ return SourceLocation::getFromRawEncoding(Raw);
+ }
+
+ /// \brief Load a (possibly unaligned) pointer from a given address and
+ /// offset.
+ void *LoadPointer(void *Data, unsigned Offset) {
+ void *Result;
+ memcpy(&Result, static_cast<char *>(Data) + Offset, sizeof(void*));
+ return Result;
+ }
+}
+
+SourceRange NestedNameSpecifierLoc::getSourceRange() const {
+ if (!Qualifier)
+ return SourceRange();
+
+ NestedNameSpecifierLoc First = *this;
+ while (NestedNameSpecifierLoc Prefix = First.getPrefix())
+ First = Prefix;
+
+ return SourceRange(First.getLocalSourceRange().getBegin(),
+ getLocalSourceRange().getEnd());
+}
+
+SourceRange NestedNameSpecifierLoc::getLocalSourceRange() const {
+ if (!Qualifier)
+ return SourceRange();
+
+ unsigned Offset = getDataLength(Qualifier->getPrefix());
+ switch (Qualifier->getKind()) {
+ case NestedNameSpecifier::Global:
+ return LoadSourceLocation(Data, Offset);
+
+ case NestedNameSpecifier::Identifier:
+ case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::NamespaceAlias:
+ case NestedNameSpecifier::Super:
+ return SourceRange(LoadSourceLocation(Data, Offset),
+ LoadSourceLocation(Data, Offset + sizeof(unsigned)));
+
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ case NestedNameSpecifier::TypeSpec: {
+ // The "void*" that points at the TypeLoc data.
+ // Note: the 'template' keyword is part of the TypeLoc.
+ void *TypeData = LoadPointer(Data, Offset);
+ TypeLoc TL(Qualifier->getAsType(), TypeData);
+ return SourceRange(TL.getBeginLoc(),
+ LoadSourceLocation(Data, Offset + sizeof(void*)));
+ }
+ }
+
+ llvm_unreachable("Invalid NNS Kind!");
+}
+
+TypeLoc NestedNameSpecifierLoc::getTypeLoc() const {
+ assert((Qualifier->getKind() == NestedNameSpecifier::TypeSpec ||
+ Qualifier->getKind() == NestedNameSpecifier::TypeSpecWithTemplate) &&
+ "Nested-name-specifier location is not a type");
+
+ // The "void*" that points at the TypeLoc data.
+ unsigned Offset = getDataLength(Qualifier->getPrefix());
+ void *TypeData = LoadPointer(Data, Offset);
+ return TypeLoc(Qualifier->getAsType(), TypeData);
+}
+
+namespace {
+ void Append(char *Start, char *End, char *&Buffer, unsigned &BufferSize,
+ unsigned &BufferCapacity) {
+ if (Start == End)
+ return;
+
+ if (BufferSize + (End - Start) > BufferCapacity) {
+ // Reallocate the buffer.
+ unsigned NewCapacity = std::max(
+ (unsigned)(BufferCapacity ? BufferCapacity * 2 : sizeof(void *) * 2),
+ (unsigned)(BufferSize + (End - Start)));
+ char *NewBuffer = static_cast<char *>(malloc(NewCapacity));
+ if (BufferCapacity) {
+ memcpy(NewBuffer, Buffer, BufferSize);
+ free(Buffer);
+ }
+ Buffer = NewBuffer;
+ BufferCapacity = NewCapacity;
+ }
+
+ memcpy(Buffer + BufferSize, Start, End - Start);
+ BufferSize += End-Start;
+ }
+
+ /// \brief Save a source location to the given buffer.
+ void SaveSourceLocation(SourceLocation Loc, char *&Buffer,
+ unsigned &BufferSize, unsigned &BufferCapacity) {
+ unsigned Raw = Loc.getRawEncoding();
+ Append(reinterpret_cast<char *>(&Raw),
+ reinterpret_cast<char *>(&Raw) + sizeof(unsigned),
+ Buffer, BufferSize, BufferCapacity);
+ }
+
+ /// \brief Save a pointer to the given buffer.
+ void SavePointer(void *Ptr, char *&Buffer, unsigned &BufferSize,
+ unsigned &BufferCapacity) {
+ Append(reinterpret_cast<char *>(&Ptr),
+ reinterpret_cast<char *>(&Ptr) + sizeof(void *),
+ Buffer, BufferSize, BufferCapacity);
+ }
+}
+
+NestedNameSpecifierLocBuilder::
+NestedNameSpecifierLocBuilder(const NestedNameSpecifierLocBuilder &Other)
+ : Representation(Other.Representation), Buffer(nullptr),
+ BufferSize(0), BufferCapacity(0)
+{
+ if (!Other.Buffer)
+ return;
+
+ if (Other.BufferCapacity == 0) {
+ // Shallow copy is okay.
+ Buffer = Other.Buffer;
+ BufferSize = Other.BufferSize;
+ return;
+ }
+
+ // Deep copy
+ Append(Other.Buffer, Other.Buffer + Other.BufferSize, Buffer, BufferSize,
+ BufferCapacity);
+}
+
+NestedNameSpecifierLocBuilder &
+NestedNameSpecifierLocBuilder::
+operator=(const NestedNameSpecifierLocBuilder &Other) {
+ Representation = Other.Representation;
+
+ if (Buffer && Other.Buffer && BufferCapacity >= Other.BufferSize) {
+ // Re-use our storage.
+ BufferSize = Other.BufferSize;
+ memcpy(Buffer, Other.Buffer, BufferSize);
+ return *this;
+ }
+
+ // Free our storage, if we have any.
+ if (BufferCapacity) {
+ free(Buffer);
+ BufferCapacity = 0;
+ }
+
+ if (!Other.Buffer) {
+ // Empty.
+ Buffer = nullptr;
+ BufferSize = 0;
+ return *this;
+ }
+
+ if (Other.BufferCapacity == 0) {
+ // Shallow copy is okay.
+ Buffer = Other.Buffer;
+ BufferSize = Other.BufferSize;
+ return *this;
+ }
+
+ // Deep copy.
+ Append(Other.Buffer, Other.Buffer + Other.BufferSize, Buffer, BufferSize,
+ BufferCapacity);
+ return *this;
+}
+
+void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context,
+ SourceLocation TemplateKWLoc,
+ TypeLoc TL,
+ SourceLocation ColonColonLoc) {
+ Representation = NestedNameSpecifier::Create(Context, Representation,
+ TemplateKWLoc.isValid(),
+ TL.getTypePtr());
+
+ // Push source-location info into the buffer.
+ SavePointer(TL.getOpaqueData(), Buffer, BufferSize, BufferCapacity);
+ SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
+}
+
+void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context,
+ IdentifierInfo *Identifier,
+ SourceLocation IdentifierLoc,
+ SourceLocation ColonColonLoc) {
+ Representation = NestedNameSpecifier::Create(Context, Representation,
+ Identifier);
+
+ // Push source-location info into the buffer.
+ SaveSourceLocation(IdentifierLoc, Buffer, BufferSize, BufferCapacity);
+ SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
+}
+
+void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context,
+ NamespaceDecl *Namespace,
+ SourceLocation NamespaceLoc,
+ SourceLocation ColonColonLoc) {
+ Representation = NestedNameSpecifier::Create(Context, Representation,
+ Namespace);
+
+ // Push source-location info into the buffer.
+ SaveSourceLocation(NamespaceLoc, Buffer, BufferSize, BufferCapacity);
+ SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
+}
+
+void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context,
+ NamespaceAliasDecl *Alias,
+ SourceLocation AliasLoc,
+ SourceLocation ColonColonLoc) {
+ Representation = NestedNameSpecifier::Create(Context, Representation, Alias);
+
+ // Push source-location info into the buffer.
+ SaveSourceLocation(AliasLoc, Buffer, BufferSize, BufferCapacity);
+ SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
+}
+
+void NestedNameSpecifierLocBuilder::MakeGlobal(ASTContext &Context,
+ SourceLocation ColonColonLoc) {
+ assert(!Representation && "Already have a nested-name-specifier!?");
+ Representation = NestedNameSpecifier::GlobalSpecifier(Context);
+
+ // Push source-location info into the buffer.
+ SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
+}
+
+void NestedNameSpecifierLocBuilder::MakeSuper(ASTContext &Context,
+ CXXRecordDecl *RD,
+ SourceLocation SuperLoc,
+ SourceLocation ColonColonLoc) {
+ Representation = NestedNameSpecifier::SuperSpecifier(Context, RD);
+
+ // Push source-location info into the buffer.
+ SaveSourceLocation(SuperLoc, Buffer, BufferSize, BufferCapacity);
+ SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
+}
+
+void NestedNameSpecifierLocBuilder::MakeTrivial(ASTContext &Context,
+ NestedNameSpecifier *Qualifier,
+ SourceRange R) {
+ Representation = Qualifier;
+
+ // Construct bogus (but well-formed) source information for the
+ // nested-name-specifier.
+ BufferSize = 0;
+ SmallVector<NestedNameSpecifier *, 4> Stack;
+ for (NestedNameSpecifier *NNS = Qualifier; NNS; NNS = NNS->getPrefix())
+ Stack.push_back(NNS);
+ while (!Stack.empty()) {
+ NestedNameSpecifier *NNS = Stack.pop_back_val();
+ switch (NNS->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::NamespaceAlias:
+ SaveSourceLocation(R.getBegin(), Buffer, BufferSize, BufferCapacity);
+ break;
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate: {
+ TypeSourceInfo *TSInfo
+ = Context.getTrivialTypeSourceInfo(QualType(NNS->getAsType(), 0),
+ R.getBegin());
+ SavePointer(TSInfo->getTypeLoc().getOpaqueData(), Buffer, BufferSize,
+ BufferCapacity);
+ break;
+ }
+
+ case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Super:
+ break;
+ }
+
+ // Save the location of the '::'.
+ SaveSourceLocation(Stack.empty()? R.getEnd() : R.getBegin(),
+ Buffer, BufferSize, BufferCapacity);
+ }
+}
+
+void NestedNameSpecifierLocBuilder::Adopt(NestedNameSpecifierLoc Other) {
+ if (BufferCapacity)
+ free(Buffer);
+
+ if (!Other) {
+ Representation = nullptr;
+ BufferSize = 0;
+ return;
+ }
+
+ // Rather than copying the data (which is wasteful), "adopt" the
+ // pointer (which points into the ASTContext) but set the capacity to zero to
+ // indicate that we don't own it.
+ Representation = Other.getNestedNameSpecifier();
+ Buffer = static_cast<char *>(Other.getOpaqueData());
+ BufferSize = Other.getDataLength();
+ BufferCapacity = 0;
+}
+
+NestedNameSpecifierLoc
+NestedNameSpecifierLocBuilder::getWithLocInContext(ASTContext &Context) const {
+ if (!Representation)
+ return NestedNameSpecifierLoc();
+
+ // If we adopted our data pointer from elsewhere in the AST context, there's
+ // no need to copy the memory.
+ if (BufferCapacity == 0)
+ return NestedNameSpecifierLoc(Representation, Buffer);
+
+ // FIXME: After copying the source-location information, should we free
+ // our (temporary) buffer and adopt the ASTContext-allocated memory?
+ // Doing so would optimize repeated calls to getWithLocInContext().
+ void *Mem = Context.Allocate(BufferSize, llvm::alignOf<void *>());
+ memcpy(Mem, Buffer, BufferSize);
+ return NestedNameSpecifierLoc(Representation, Mem);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/OpenMPClause.cpp b/contrib/llvm/tools/clang/lib/AST/OpenMPClause.cpp
new file mode 100644
index 0000000..1ef43f7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/OpenMPClause.cpp
@@ -0,0 +1,417 @@
+//===--- OpenMPClause.cpp - Classes for OpenMP clauses --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the subclesses of Stmt class declared in OpenMPClause.h
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/OpenMPClause.h"
+
+#include "clang/AST/ASTContext.h"
+
+using namespace clang;
+
+OMPClause::child_range OMPClause::children() {
+ switch (getClauseKind()) {
+ default:
+ break;
+#define OPENMP_CLAUSE(Name, Class) \
+ case OMPC_##Name: \
+ return static_cast<Class *>(this)->children();
+#include "clang/Basic/OpenMPKinds.def"
+ }
+ llvm_unreachable("unknown OMPClause");
+}
+
+void OMPPrivateClause::setPrivateCopies(ArrayRef<Expr *> VL) {
+ assert(VL.size() == varlist_size() &&
+ "Number of private copies is not the same as the preallocated buffer");
+ std::copy(VL.begin(), VL.end(), varlist_end());
+}
+
+OMPPrivateClause *
+OMPPrivateClause::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc,
+ ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL) {
+ // Allocate space for private variables and initializer expressions.
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(2 * VL.size()));
+ OMPPrivateClause *Clause =
+ new (Mem) OMPPrivateClause(StartLoc, LParenLoc, EndLoc, VL.size());
+ Clause->setVarRefs(VL);
+ Clause->setPrivateCopies(PrivateVL);
+ return Clause;
+}
+
+OMPPrivateClause *OMPPrivateClause::CreateEmpty(const ASTContext &C,
+ unsigned N) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(2 * N));
+ return new (Mem) OMPPrivateClause(N);
+}
+
+void OMPFirstprivateClause::setPrivateCopies(ArrayRef<Expr *> VL) {
+ assert(VL.size() == varlist_size() &&
+ "Number of private copies is not the same as the preallocated buffer");
+ std::copy(VL.begin(), VL.end(), varlist_end());
+}
+
+void OMPFirstprivateClause::setInits(ArrayRef<Expr *> VL) {
+ assert(VL.size() == varlist_size() &&
+ "Number of inits is not the same as the preallocated buffer");
+ std::copy(VL.begin(), VL.end(), getPrivateCopies().end());
+}
+
+OMPFirstprivateClause *
+OMPFirstprivateClause::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc,
+ ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL,
+ ArrayRef<Expr *> InitVL) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(3 * VL.size()));
+ OMPFirstprivateClause *Clause =
+ new (Mem) OMPFirstprivateClause(StartLoc, LParenLoc, EndLoc, VL.size());
+ Clause->setVarRefs(VL);
+ Clause->setPrivateCopies(PrivateVL);
+ Clause->setInits(InitVL);
+ return Clause;
+}
+
+OMPFirstprivateClause *OMPFirstprivateClause::CreateEmpty(const ASTContext &C,
+ unsigned N) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(3 * N));
+ return new (Mem) OMPFirstprivateClause(N);
+}
+
+void OMPLastprivateClause::setPrivateCopies(ArrayRef<Expr *> PrivateCopies) {
+ assert(PrivateCopies.size() == varlist_size() &&
+ "Number of private copies is not the same as the preallocated buffer");
+ std::copy(PrivateCopies.begin(), PrivateCopies.end(), varlist_end());
+}
+
+void OMPLastprivateClause::setSourceExprs(ArrayRef<Expr *> SrcExprs) {
+ assert(SrcExprs.size() == varlist_size() && "Number of source expressions is "
+ "not the same as the "
+ "preallocated buffer");
+ std::copy(SrcExprs.begin(), SrcExprs.end(), getPrivateCopies().end());
+}
+
+void OMPLastprivateClause::setDestinationExprs(ArrayRef<Expr *> DstExprs) {
+ assert(DstExprs.size() == varlist_size() && "Number of destination "
+ "expressions is not the same as "
+ "the preallocated buffer");
+ std::copy(DstExprs.begin(), DstExprs.end(), getSourceExprs().end());
+}
+
+void OMPLastprivateClause::setAssignmentOps(ArrayRef<Expr *> AssignmentOps) {
+ assert(AssignmentOps.size() == varlist_size() &&
+ "Number of assignment expressions is not the same as the preallocated "
+ "buffer");
+ std::copy(AssignmentOps.begin(), AssignmentOps.end(),
+ getDestinationExprs().end());
+}
+
+OMPLastprivateClause *OMPLastprivateClause::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
+ ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(5 * VL.size()));
+ OMPLastprivateClause *Clause =
+ new (Mem) OMPLastprivateClause(StartLoc, LParenLoc, EndLoc, VL.size());
+ Clause->setVarRefs(VL);
+ Clause->setSourceExprs(SrcExprs);
+ Clause->setDestinationExprs(DstExprs);
+ Clause->setAssignmentOps(AssignmentOps);
+ return Clause;
+}
+
+OMPLastprivateClause *OMPLastprivateClause::CreateEmpty(const ASTContext &C,
+ unsigned N) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(5 * N));
+ return new (Mem) OMPLastprivateClause(N);
+}
+
+OMPSharedClause *OMPSharedClause::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc,
+ ArrayRef<Expr *> VL) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size()));
+ OMPSharedClause *Clause =
+ new (Mem) OMPSharedClause(StartLoc, LParenLoc, EndLoc, VL.size());
+ Clause->setVarRefs(VL);
+ return Clause;
+}
+
+OMPSharedClause *OMPSharedClause::CreateEmpty(const ASTContext &C, unsigned N) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N));
+ return new (Mem) OMPSharedClause(N);
+}
+
+void OMPLinearClause::setPrivates(ArrayRef<Expr *> PL) {
+ assert(PL.size() == varlist_size() &&
+ "Number of privates is not the same as the preallocated buffer");
+ std::copy(PL.begin(), PL.end(), varlist_end());
+}
+
+void OMPLinearClause::setInits(ArrayRef<Expr *> IL) {
+ assert(IL.size() == varlist_size() &&
+ "Number of inits is not the same as the preallocated buffer");
+ std::copy(IL.begin(), IL.end(), getPrivates().end());
+}
+
+void OMPLinearClause::setUpdates(ArrayRef<Expr *> UL) {
+ assert(UL.size() == varlist_size() &&
+ "Number of updates is not the same as the preallocated buffer");
+ std::copy(UL.begin(), UL.end(), getInits().end());
+}
+
+void OMPLinearClause::setFinals(ArrayRef<Expr *> FL) {
+ assert(FL.size() == varlist_size() &&
+ "Number of final updates is not the same as the preallocated buffer");
+ std::copy(FL.begin(), FL.end(), getUpdates().end());
+}
+
+OMPLinearClause *OMPLinearClause::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
+ OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc,
+ SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
+ ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep) {
+ // Allocate space for 4 lists (Vars, Inits, Updates, Finals) and 2 expressions
+ // (Step and CalcStep).
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(5 * VL.size() + 2));
+ OMPLinearClause *Clause = new (Mem) OMPLinearClause(
+ StartLoc, LParenLoc, Modifier, ModifierLoc, ColonLoc, EndLoc, VL.size());
+ Clause->setVarRefs(VL);
+ Clause->setPrivates(PL);
+ Clause->setInits(IL);
+ // Fill update and final expressions with zeroes, they are provided later,
+ // after the directive construction.
+ std::fill(Clause->getInits().end(), Clause->getInits().end() + VL.size(),
+ nullptr);
+ std::fill(Clause->getUpdates().end(), Clause->getUpdates().end() + VL.size(),
+ nullptr);
+ Clause->setStep(Step);
+ Clause->setCalcStep(CalcStep);
+ return Clause;
+}
+
+OMPLinearClause *OMPLinearClause::CreateEmpty(const ASTContext &C,
+ unsigned NumVars) {
+ // Allocate space for 4 lists (Vars, Inits, Updates, Finals) and 2 expressions
+ // (Step and CalcStep).
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(5 * NumVars + 2));
+ return new (Mem) OMPLinearClause(NumVars);
+}
+
+OMPAlignedClause *
+OMPAlignedClause::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation ColonLoc,
+ SourceLocation EndLoc, ArrayRef<Expr *> VL, Expr *A) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size() + 1));
+ OMPAlignedClause *Clause = new (Mem)
+ OMPAlignedClause(StartLoc, LParenLoc, ColonLoc, EndLoc, VL.size());
+ Clause->setVarRefs(VL);
+ Clause->setAlignment(A);
+ return Clause;
+}
+
+OMPAlignedClause *OMPAlignedClause::CreateEmpty(const ASTContext &C,
+ unsigned NumVars) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(NumVars + 1));
+ return new (Mem) OMPAlignedClause(NumVars);
+}
+
+void OMPCopyinClause::setSourceExprs(ArrayRef<Expr *> SrcExprs) {
+ assert(SrcExprs.size() == varlist_size() && "Number of source expressions is "
+ "not the same as the "
+ "preallocated buffer");
+ std::copy(SrcExprs.begin(), SrcExprs.end(), varlist_end());
+}
+
+void OMPCopyinClause::setDestinationExprs(ArrayRef<Expr *> DstExprs) {
+ assert(DstExprs.size() == varlist_size() && "Number of destination "
+ "expressions is not the same as "
+ "the preallocated buffer");
+ std::copy(DstExprs.begin(), DstExprs.end(), getSourceExprs().end());
+}
+
+void OMPCopyinClause::setAssignmentOps(ArrayRef<Expr *> AssignmentOps) {
+ assert(AssignmentOps.size() == varlist_size() &&
+ "Number of assignment expressions is not the same as the preallocated "
+ "buffer");
+ std::copy(AssignmentOps.begin(), AssignmentOps.end(),
+ getDestinationExprs().end());
+}
+
+OMPCopyinClause *OMPCopyinClause::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
+ ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(4 * VL.size()));
+ OMPCopyinClause *Clause =
+ new (Mem) OMPCopyinClause(StartLoc, LParenLoc, EndLoc, VL.size());
+ Clause->setVarRefs(VL);
+ Clause->setSourceExprs(SrcExprs);
+ Clause->setDestinationExprs(DstExprs);
+ Clause->setAssignmentOps(AssignmentOps);
+ return Clause;
+}
+
+OMPCopyinClause *OMPCopyinClause::CreateEmpty(const ASTContext &C, unsigned N) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(4 * N));
+ return new (Mem) OMPCopyinClause(N);
+}
+
+void OMPCopyprivateClause::setSourceExprs(ArrayRef<Expr *> SrcExprs) {
+ assert(SrcExprs.size() == varlist_size() && "Number of source expressions is "
+ "not the same as the "
+ "preallocated buffer");
+ std::copy(SrcExprs.begin(), SrcExprs.end(), varlist_end());
+}
+
+void OMPCopyprivateClause::setDestinationExprs(ArrayRef<Expr *> DstExprs) {
+ assert(DstExprs.size() == varlist_size() && "Number of destination "
+ "expressions is not the same as "
+ "the preallocated buffer");
+ std::copy(DstExprs.begin(), DstExprs.end(), getSourceExprs().end());
+}
+
+void OMPCopyprivateClause::setAssignmentOps(ArrayRef<Expr *> AssignmentOps) {
+ assert(AssignmentOps.size() == varlist_size() &&
+ "Number of assignment expressions is not the same as the preallocated "
+ "buffer");
+ std::copy(AssignmentOps.begin(), AssignmentOps.end(),
+ getDestinationExprs().end());
+}
+
+OMPCopyprivateClause *OMPCopyprivateClause::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
+ ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(4 * VL.size()));
+ OMPCopyprivateClause *Clause =
+ new (Mem) OMPCopyprivateClause(StartLoc, LParenLoc, EndLoc, VL.size());
+ Clause->setVarRefs(VL);
+ Clause->setSourceExprs(SrcExprs);
+ Clause->setDestinationExprs(DstExprs);
+ Clause->setAssignmentOps(AssignmentOps);
+ return Clause;
+}
+
+OMPCopyprivateClause *OMPCopyprivateClause::CreateEmpty(const ASTContext &C,
+ unsigned N) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(4 * N));
+ return new (Mem) OMPCopyprivateClause(N);
+}
+
+void OMPReductionClause::setPrivates(ArrayRef<Expr *> Privates) {
+ assert(Privates.size() == varlist_size() &&
+ "Number of private copies is not the same as the preallocated buffer");
+ std::copy(Privates.begin(), Privates.end(), varlist_end());
+}
+
+void OMPReductionClause::setLHSExprs(ArrayRef<Expr *> LHSExprs) {
+ assert(
+ LHSExprs.size() == varlist_size() &&
+ "Number of LHS expressions is not the same as the preallocated buffer");
+ std::copy(LHSExprs.begin(), LHSExprs.end(), getPrivates().end());
+}
+
+void OMPReductionClause::setRHSExprs(ArrayRef<Expr *> RHSExprs) {
+ assert(
+ RHSExprs.size() == varlist_size() &&
+ "Number of RHS expressions is not the same as the preallocated buffer");
+ std::copy(RHSExprs.begin(), RHSExprs.end(), getLHSExprs().end());
+}
+
+void OMPReductionClause::setReductionOps(ArrayRef<Expr *> ReductionOps) {
+ assert(ReductionOps.size() == varlist_size() && "Number of reduction "
+ "expressions is not the same "
+ "as the preallocated buffer");
+ std::copy(ReductionOps.begin(), ReductionOps.end(), getRHSExprs().end());
+}
+
+OMPReductionClause *OMPReductionClause::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL,
+ NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo,
+ ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs,
+ ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(5 * VL.size()));
+ OMPReductionClause *Clause = new (Mem) OMPReductionClause(
+ StartLoc, LParenLoc, EndLoc, ColonLoc, VL.size(), QualifierLoc, NameInfo);
+ Clause->setVarRefs(VL);
+ Clause->setPrivates(Privates);
+ Clause->setLHSExprs(LHSExprs);
+ Clause->setRHSExprs(RHSExprs);
+ Clause->setReductionOps(ReductionOps);
+ return Clause;
+}
+
+OMPReductionClause *OMPReductionClause::CreateEmpty(const ASTContext &C,
+ unsigned N) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(5 * N));
+ return new (Mem) OMPReductionClause(N);
+}
+
+OMPFlushClause *OMPFlushClause::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc,
+ ArrayRef<Expr *> VL) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size()));
+ OMPFlushClause *Clause =
+ new (Mem) OMPFlushClause(StartLoc, LParenLoc, EndLoc, VL.size());
+ Clause->setVarRefs(VL);
+ return Clause;
+}
+
+OMPFlushClause *OMPFlushClause::CreateEmpty(const ASTContext &C, unsigned N) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N));
+ return new (Mem) OMPFlushClause(N);
+}
+
+OMPDependClause *
+OMPDependClause::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc,
+ OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
+ SourceLocation ColonLoc, ArrayRef<Expr *> VL) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size()));
+ OMPDependClause *Clause =
+ new (Mem) OMPDependClause(StartLoc, LParenLoc, EndLoc, VL.size());
+ Clause->setVarRefs(VL);
+ Clause->setDependencyKind(DepKind);
+ Clause->setDependencyLoc(DepLoc);
+ Clause->setColonLoc(ColonLoc);
+ return Clause;
+}
+
+OMPDependClause *OMPDependClause::CreateEmpty(const ASTContext &C, unsigned N) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N));
+ return new (Mem) OMPDependClause(N);
+}
+
+OMPMapClause *OMPMapClause::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc, ArrayRef<Expr *> VL,
+ OpenMPMapClauseKind TypeModifier,
+ OpenMPMapClauseKind Type,
+ SourceLocation TypeLoc) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size()));
+ OMPMapClause *Clause = new (Mem) OMPMapClause(
+ TypeModifier, Type, TypeLoc, StartLoc, LParenLoc, EndLoc, VL.size());
+ Clause->setVarRefs(VL);
+ Clause->setMapTypeModifier(TypeModifier);
+ Clause->setMapType(Type);
+ Clause->setMapLoc(TypeLoc);
+ return Clause;
+}
+
+OMPMapClause *OMPMapClause::CreateEmpty(const ASTContext &C, unsigned N) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N));
+ return new (Mem) OMPMapClause(N);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/ParentMap.cpp b/contrib/llvm/tools/clang/lib/AST/ParentMap.cpp
new file mode 100644
index 0000000..d7d5f9c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/ParentMap.cpp
@@ -0,0 +1,198 @@
+//===--- ParentMap.cpp - Mappings from Stmts to their Parents ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ParentMap class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "llvm/ADT/DenseMap.h"
+
+using namespace clang;
+
+typedef llvm::DenseMap<Stmt*, Stmt*> MapTy;
+
+enum OpaqueValueMode {
+ OV_Transparent,
+ OV_Opaque
+};
+
+static void BuildParentMap(MapTy& M, Stmt* S,
+ OpaqueValueMode OVMode = OV_Transparent) {
+
+ switch (S->getStmtClass()) {
+ case Stmt::PseudoObjectExprClass: {
+ assert(OVMode == OV_Transparent && "Should not appear alongside OVEs");
+ PseudoObjectExpr *POE = cast<PseudoObjectExpr>(S);
+
+ // If we are rebuilding the map, clear out any existing state.
+ if (M[POE->getSyntacticForm()])
+ for (Stmt *SubStmt : S->children())
+ M[SubStmt] = nullptr;
+
+ M[POE->getSyntacticForm()] = S;
+ BuildParentMap(M, POE->getSyntacticForm(), OV_Transparent);
+
+ for (PseudoObjectExpr::semantics_iterator I = POE->semantics_begin(),
+ E = POE->semantics_end();
+ I != E; ++I) {
+ M[*I] = S;
+ BuildParentMap(M, *I, OV_Opaque);
+ }
+ break;
+ }
+ case Stmt::BinaryConditionalOperatorClass: {
+ assert(OVMode == OV_Transparent && "Should not appear alongside OVEs");
+ BinaryConditionalOperator *BCO = cast<BinaryConditionalOperator>(S);
+
+ M[BCO->getCommon()] = S;
+ BuildParentMap(M, BCO->getCommon(), OV_Transparent);
+
+ M[BCO->getCond()] = S;
+ BuildParentMap(M, BCO->getCond(), OV_Opaque);
+
+ M[BCO->getTrueExpr()] = S;
+ BuildParentMap(M, BCO->getTrueExpr(), OV_Opaque);
+
+ M[BCO->getFalseExpr()] = S;
+ BuildParentMap(M, BCO->getFalseExpr(), OV_Transparent);
+
+ break;
+ }
+ case Stmt::OpaqueValueExprClass: {
+ // FIXME: This isn't correct; it assumes that multiple OpaqueValueExprs
+ // share a single source expression, but in the AST a single
+ // OpaqueValueExpr is shared among multiple parent expressions.
+ // The right thing to do is to give the OpaqueValueExpr its syntactic
+ // parent, then not reassign that when traversing the semantic expressions.
+ OpaqueValueExpr *OVE = cast<OpaqueValueExpr>(S);
+ if (OVMode == OV_Transparent || !M[OVE->getSourceExpr()]) {
+ M[OVE->getSourceExpr()] = S;
+ BuildParentMap(M, OVE->getSourceExpr(), OV_Transparent);
+ }
+ break;
+ }
+ default:
+ for (Stmt *SubStmt : S->children()) {
+ if (SubStmt) {
+ M[SubStmt] = S;
+ BuildParentMap(M, SubStmt, OVMode);
+ }
+ }
+ break;
+ }
+}
+
+ParentMap::ParentMap(Stmt *S) : Impl(nullptr) {
+ if (S) {
+ MapTy *M = new MapTy();
+ BuildParentMap(*M, S);
+ Impl = M;
+ }
+}
+
+ParentMap::~ParentMap() {
+ delete (MapTy*) Impl;
+}
+
+void ParentMap::addStmt(Stmt* S) {
+ if (S) {
+ BuildParentMap(*(MapTy*) Impl, S);
+ }
+}
+
+void ParentMap::setParent(const Stmt *S, const Stmt *Parent) {
+ assert(S);
+ assert(Parent);
+ MapTy *M = reinterpret_cast<MapTy *>(Impl);
+ M->insert(std::make_pair(const_cast<Stmt *>(S), const_cast<Stmt *>(Parent)));
+}
+
+Stmt* ParentMap::getParent(Stmt* S) const {
+ MapTy* M = (MapTy*) Impl;
+ MapTy::iterator I = M->find(S);
+ return I == M->end() ? nullptr : I->second;
+}
+
+Stmt *ParentMap::getParentIgnoreParens(Stmt *S) const {
+ do { S = getParent(S); } while (S && isa<ParenExpr>(S));
+ return S;
+}
+
+Stmt *ParentMap::getParentIgnoreParenCasts(Stmt *S) const {
+ do {
+ S = getParent(S);
+ }
+ while (S && (isa<ParenExpr>(S) || isa<CastExpr>(S)));
+
+ return S;
+}
+
+Stmt *ParentMap::getParentIgnoreParenImpCasts(Stmt *S) const {
+ do {
+ S = getParent(S);
+ } while (S && isa<Expr>(S) && cast<Expr>(S)->IgnoreParenImpCasts() != S);
+
+ return S;
+}
+
+Stmt *ParentMap::getOuterParenParent(Stmt *S) const {
+ Stmt *Paren = nullptr;
+ while (isa<ParenExpr>(S)) {
+ Paren = S;
+ S = getParent(S);
+ };
+ return Paren;
+}
+
+bool ParentMap::isConsumedExpr(Expr* E) const {
+ Stmt *P = getParent(E);
+ Stmt *DirectChild = E;
+
+ // Ignore parents that don't guarantee consumption.
+ while (P && (isa<ParenExpr>(P) || isa<CastExpr>(P) ||
+ isa<ExprWithCleanups>(P))) {
+ DirectChild = P;
+ P = getParent(P);
+ }
+
+ if (!P)
+ return false;
+
+ switch (P->getStmtClass()) {
+ default:
+ return isa<Expr>(P);
+ case Stmt::DeclStmtClass:
+ return true;
+ case Stmt::BinaryOperatorClass: {
+ BinaryOperator *BE = cast<BinaryOperator>(P);
+ // If it is a comma, only the right side is consumed.
+ // If it isn't a comma, both sides are consumed.
+ return BE->getOpcode()!=BO_Comma ||DirectChild==BE->getRHS();
+ }
+ case Stmt::ForStmtClass:
+ return DirectChild == cast<ForStmt>(P)->getCond();
+ case Stmt::WhileStmtClass:
+ return DirectChild == cast<WhileStmt>(P)->getCond();
+ case Stmt::DoStmtClass:
+ return DirectChild == cast<DoStmt>(P)->getCond();
+ case Stmt::IfStmtClass:
+ return DirectChild == cast<IfStmt>(P)->getCond();
+ case Stmt::IndirectGotoStmtClass:
+ return DirectChild == cast<IndirectGotoStmt>(P)->getTarget();
+ case Stmt::SwitchStmtClass:
+ return DirectChild == cast<SwitchStmt>(P)->getCond();
+ case Stmt::ReturnStmtClass:
+ return true;
+ }
+}
+
diff --git a/contrib/llvm/tools/clang/lib/AST/RawCommentList.cpp b/contrib/llvm/tools/clang/lib/AST/RawCommentList.cpp
new file mode 100644
index 0000000..8317f76
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/RawCommentList.cpp
@@ -0,0 +1,337 @@
+//===--- RawCommentList.cpp - Processing raw comments -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/RawCommentList.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Comment.h"
+#include "clang/AST/CommentBriefParser.h"
+#include "clang/AST/CommentCommandTraits.h"
+#include "clang/AST/CommentLexer.h"
+#include "clang/AST/CommentParser.h"
+#include "clang/AST/CommentSema.h"
+#include "clang/Basic/CharInfo.h"
+#include "llvm/ADT/STLExtras.h"
+
+using namespace clang;
+
+namespace {
+/// Get comment kind and bool describing if it is a trailing comment.
+std::pair<RawComment::CommentKind, bool> getCommentKind(StringRef Comment,
+ bool ParseAllComments) {
+ const size_t MinCommentLength = ParseAllComments ? 2 : 3;
+ if ((Comment.size() < MinCommentLength) || Comment[0] != '/')
+ return std::make_pair(RawComment::RCK_Invalid, false);
+
+ RawComment::CommentKind K;
+ if (Comment[1] == '/') {
+ if (Comment.size() < 3)
+ return std::make_pair(RawComment::RCK_OrdinaryBCPL, false);
+
+ if (Comment[2] == '/')
+ K = RawComment::RCK_BCPLSlash;
+ else if (Comment[2] == '!')
+ K = RawComment::RCK_BCPLExcl;
+ else
+ return std::make_pair(RawComment::RCK_OrdinaryBCPL, false);
+ } else {
+ assert(Comment.size() >= 4);
+
+ // Comment lexer does not understand escapes in comment markers, so pretend
+ // that this is not a comment.
+ if (Comment[1] != '*' ||
+ Comment[Comment.size() - 2] != '*' ||
+ Comment[Comment.size() - 1] != '/')
+ return std::make_pair(RawComment::RCK_Invalid, false);
+
+ if (Comment[2] == '*')
+ K = RawComment::RCK_JavaDoc;
+ else if (Comment[2] == '!')
+ K = RawComment::RCK_Qt;
+ else
+ return std::make_pair(RawComment::RCK_OrdinaryC, false);
+ }
+ const bool TrailingComment = (Comment.size() > 3) && (Comment[3] == '<');
+ return std::make_pair(K, TrailingComment);
+}
+
+bool mergedCommentIsTrailingComment(StringRef Comment) {
+ return (Comment.size() > 3) && (Comment[3] == '<');
+}
+
+/// Returns true if R1 and R2 both have valid locations that start on the same
+/// column.
+bool commentsStartOnSameColumn(const SourceManager &SM, const RawComment &R1,
+ const RawComment &R2) {
+ SourceLocation L1 = R1.getLocStart();
+ SourceLocation L2 = R2.getLocStart();
+ bool Invalid = false;
+ unsigned C1 = SM.getPresumedColumnNumber(L1, &Invalid);
+ if (!Invalid) {
+ unsigned C2 = SM.getPresumedColumnNumber(L2, &Invalid);
+ return !Invalid && (C1 == C2);
+ }
+ return false;
+}
+} // unnamed namespace
+
+/// \brief Determines whether there is only whitespace in `Buffer` between `P`
+/// and the previous line.
+/// \param Buffer The buffer to search in.
+/// \param P The offset from the beginning of `Buffer` to start from.
+/// \return true if all of the characters in `Buffer` ranging from the closest
+/// line-ending character before `P` (or the beginning of `Buffer`) to `P - 1`
+/// are whitespace.
+static bool onlyWhitespaceOnLineBefore(const char *Buffer, unsigned P) {
+ // Search backwards until we see linefeed or carriage return.
+ for (unsigned I = P; I != 0; --I) {
+ char C = Buffer[I - 1];
+ if (isVerticalWhitespace(C))
+ return true;
+ if (!isHorizontalWhitespace(C))
+ return false;
+ }
+ // We hit the beginning of the buffer.
+ return true;
+}
+
+/// Returns whether `K` is an ordinary comment kind.
+static bool isOrdinaryKind(RawComment::CommentKind K) {
+ return (K == RawComment::RCK_OrdinaryBCPL) ||
+ (K == RawComment::RCK_OrdinaryC);
+}
+
+RawComment::RawComment(const SourceManager &SourceMgr, SourceRange SR,
+ bool Merged, bool ParseAllComments) :
+ Range(SR), RawTextValid(false), BriefTextValid(false),
+ IsAttached(false), IsTrailingComment(false), IsAlmostTrailingComment(false),
+ ParseAllComments(ParseAllComments) {
+ // Extract raw comment text, if possible.
+ if (SR.getBegin() == SR.getEnd() || getRawText(SourceMgr).empty()) {
+ Kind = RCK_Invalid;
+ return;
+ }
+
+ // Guess comment kind.
+ std::pair<CommentKind, bool> K = getCommentKind(RawText, ParseAllComments);
+
+ // Guess whether an ordinary comment is trailing.
+ if (ParseAllComments && isOrdinaryKind(K.first)) {
+ FileID BeginFileID;
+ unsigned BeginOffset;
+ std::tie(BeginFileID, BeginOffset) =
+ SourceMgr.getDecomposedLoc(Range.getBegin());
+ if (BeginOffset != 0) {
+ bool Invalid = false;
+ const char *Buffer =
+ SourceMgr.getBufferData(BeginFileID, &Invalid).data();
+ IsTrailingComment |=
+ (!Invalid && !onlyWhitespaceOnLineBefore(Buffer, BeginOffset));
+ }
+ }
+
+ if (!Merged) {
+ Kind = K.first;
+ IsTrailingComment |= K.second;
+
+ IsAlmostTrailingComment = RawText.startswith("//<") ||
+ RawText.startswith("/*<");
+ } else {
+ Kind = RCK_Merged;
+ IsTrailingComment =
+ IsTrailingComment || mergedCommentIsTrailingComment(RawText);
+ }
+}
+
+StringRef RawComment::getRawTextSlow(const SourceManager &SourceMgr) const {
+ FileID BeginFileID;
+ FileID EndFileID;
+ unsigned BeginOffset;
+ unsigned EndOffset;
+
+ std::tie(BeginFileID, BeginOffset) =
+ SourceMgr.getDecomposedLoc(Range.getBegin());
+ std::tie(EndFileID, EndOffset) = SourceMgr.getDecomposedLoc(Range.getEnd());
+
+ const unsigned Length = EndOffset - BeginOffset;
+ if (Length < 2)
+ return StringRef();
+
+ // The comment can't begin in one file and end in another.
+ assert(BeginFileID == EndFileID);
+
+ bool Invalid = false;
+ const char *BufferStart = SourceMgr.getBufferData(BeginFileID,
+ &Invalid).data();
+ if (Invalid)
+ return StringRef();
+
+ return StringRef(BufferStart + BeginOffset, Length);
+}
+
+const char *RawComment::extractBriefText(const ASTContext &Context) const {
+ // Make sure that RawText is valid.
+ getRawText(Context.getSourceManager());
+
+ // Since we will be copying the resulting text, all allocations made during
+ // parsing are garbage after resulting string is formed. Thus we can use
+ // a separate allocator for all temporary stuff.
+ llvm::BumpPtrAllocator Allocator;
+
+ comments::Lexer L(Allocator, Context.getDiagnostics(),
+ Context.getCommentCommandTraits(),
+ Range.getBegin(),
+ RawText.begin(), RawText.end());
+ comments::BriefParser P(L, Context.getCommentCommandTraits());
+
+ const std::string Result = P.Parse();
+ const unsigned BriefTextLength = Result.size();
+ char *BriefTextPtr = new (Context) char[BriefTextLength + 1];
+ memcpy(BriefTextPtr, Result.c_str(), BriefTextLength + 1);
+ BriefText = BriefTextPtr;
+ BriefTextValid = true;
+
+ return BriefTextPtr;
+}
+
+comments::FullComment *RawComment::parse(const ASTContext &Context,
+ const Preprocessor *PP,
+ const Decl *D) const {
+ // Make sure that RawText is valid.
+ getRawText(Context.getSourceManager());
+
+ comments::Lexer L(Context.getAllocator(), Context.getDiagnostics(),
+ Context.getCommentCommandTraits(),
+ getSourceRange().getBegin(),
+ RawText.begin(), RawText.end());
+ comments::Sema S(Context.getAllocator(), Context.getSourceManager(),
+ Context.getDiagnostics(),
+ Context.getCommentCommandTraits(),
+ PP);
+ S.setDecl(D);
+ comments::Parser P(L, S, Context.getAllocator(), Context.getSourceManager(),
+ Context.getDiagnostics(),
+ Context.getCommentCommandTraits());
+
+ return P.parseFullComment();
+}
+
+static bool onlyWhitespaceBetween(SourceManager &SM,
+ SourceLocation Loc1, SourceLocation Loc2,
+ unsigned MaxNewlinesAllowed) {
+ std::pair<FileID, unsigned> Loc1Info = SM.getDecomposedLoc(Loc1);
+ std::pair<FileID, unsigned> Loc2Info = SM.getDecomposedLoc(Loc2);
+
+ // Question does not make sense if locations are in different files.
+ if (Loc1Info.first != Loc2Info.first)
+ return false;
+
+ bool Invalid = false;
+ const char *Buffer = SM.getBufferData(Loc1Info.first, &Invalid).data();
+ if (Invalid)
+ return false;
+
+ unsigned NumNewlines = 0;
+ assert(Loc1Info.second <= Loc2Info.second && "Loc1 after Loc2!");
+ // Look for non-whitespace characters and remember any newlines seen.
+ for (unsigned I = Loc1Info.second; I != Loc2Info.second; ++I) {
+ switch (Buffer[I]) {
+ default:
+ return false;
+ case ' ':
+ case '\t':
+ case '\f':
+ case '\v':
+ break;
+ case '\r':
+ case '\n':
+ ++NumNewlines;
+
+ // Check if we have found more than the maximum allowed number of
+ // newlines.
+ if (NumNewlines > MaxNewlinesAllowed)
+ return false;
+
+ // Collapse \r\n and \n\r into a single newline.
+ if (I + 1 != Loc2Info.second &&
+ (Buffer[I + 1] == '\n' || Buffer[I + 1] == '\r') &&
+ Buffer[I] != Buffer[I + 1])
+ ++I;
+ break;
+ }
+ }
+
+ return true;
+}
+
+void RawCommentList::addComment(const RawComment &RC,
+ llvm::BumpPtrAllocator &Allocator) {
+ if (RC.isInvalid())
+ return;
+
+ // Check if the comments are not in source order.
+ while (!Comments.empty() &&
+ !SourceMgr.isBeforeInTranslationUnit(Comments.back()->getLocStart(),
+ RC.getLocStart())) {
+ // If they are, just pop a few last comments that don't fit.
+ // This happens if an \#include directive contains comments.
+ Comments.pop_back();
+ }
+
+ // Ordinary comments are not interesting for us.
+ if (RC.isOrdinary())
+ return;
+
+ // If this is the first Doxygen comment, save it (because there isn't
+ // anything to merge it with).
+ if (Comments.empty()) {
+ Comments.push_back(new (Allocator) RawComment(RC));
+ return;
+ }
+
+ const RawComment &C1 = *Comments.back();
+ const RawComment &C2 = RC;
+
+ // Merge comments only if there is only whitespace between them.
+ // Can't merge trailing and non-trailing comments unless the second is
+ // non-trailing ordinary in the same column, as in the case:
+ // int x; // documents x
+ // // more text
+ // versus:
+ // int x; // documents x
+ // int y; // documents y
+ // or:
+ // int x; // documents x
+ // // documents y
+ // int y;
+ // Merge comments if they are on same or consecutive lines.
+ if ((C1.isTrailingComment() == C2.isTrailingComment() ||
+ (C1.isTrailingComment() && !C2.isTrailingComment() &&
+ isOrdinaryKind(C2.getKind()) &&
+ commentsStartOnSameColumn(SourceMgr, C1, C2))) &&
+ onlyWhitespaceBetween(SourceMgr, C1.getLocEnd(), C2.getLocStart(),
+ /*MaxNewlinesAllowed=*/1)) {
+ SourceRange MergedRange(C1.getLocStart(), C2.getLocEnd());
+ *Comments.back() = RawComment(SourceMgr, MergedRange, true,
+ RC.isParseAllComments());
+ } else {
+ Comments.push_back(new (Allocator) RawComment(RC));
+ }
+}
+
+void RawCommentList::addDeserializedComments(ArrayRef<RawComment *> DeserializedComments) {
+ std::vector<RawComment *> MergedComments;
+ MergedComments.reserve(Comments.size() + DeserializedComments.size());
+
+ std::merge(Comments.begin(), Comments.end(),
+ DeserializedComments.begin(), DeserializedComments.end(),
+ std::back_inserter(MergedComments),
+ BeforeThanCompare<RawComment>(SourceMgr));
+ std::swap(Comments, MergedComments);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/AST/RecordLayout.cpp b/contrib/llvm/tools/clang/lib/AST/RecordLayout.cpp
new file mode 100644
index 0000000..b2c244e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/RecordLayout.cpp
@@ -0,0 +1,102 @@
+//===-- RecordLayout.cpp - Layout information for a struct/union -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the RecordLayout interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/TargetInfo.h"
+
+using namespace clang;
+
+void ASTRecordLayout::Destroy(ASTContext &Ctx) {
+ if (FieldOffsets)
+ Ctx.Deallocate(FieldOffsets);
+ if (CXXInfo) {
+ CXXInfo->~CXXRecordLayoutInfo();
+ Ctx.Deallocate(CXXInfo);
+ }
+ this->~ASTRecordLayout();
+ Ctx.Deallocate(this);
+}
+
+ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx, CharUnits size,
+ CharUnits alignment,
+ CharUnits requiredAlignment,
+ CharUnits datasize,
+ const uint64_t *fieldoffsets,
+ unsigned fieldcount)
+ : Size(size), DataSize(datasize), Alignment(alignment),
+ RequiredAlignment(requiredAlignment), FieldOffsets(nullptr),
+ FieldCount(fieldcount), CXXInfo(nullptr) {
+ if (FieldCount > 0) {
+ FieldOffsets = new (Ctx) uint64_t[FieldCount];
+ memcpy(FieldOffsets, fieldoffsets, FieldCount * sizeof(*FieldOffsets));
+ }
+}
+
+// Constructor for C++ records.
+ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx,
+ CharUnits size, CharUnits alignment,
+ CharUnits requiredAlignment,
+ bool hasOwnVFPtr, bool hasExtendableVFPtr,
+ CharUnits vbptroffset,
+ CharUnits datasize,
+ const uint64_t *fieldoffsets,
+ unsigned fieldcount,
+ CharUnits nonvirtualsize,
+ CharUnits nonvirtualalignment,
+ CharUnits SizeOfLargestEmptySubobject,
+ const CXXRecordDecl *PrimaryBase,
+ bool IsPrimaryBaseVirtual,
+ const CXXRecordDecl *BaseSharingVBPtr,
+ bool HasZeroSizedSubObject,
+ bool LeadsWithZeroSizedBase,
+ const BaseOffsetsMapTy& BaseOffsets,
+ const VBaseOffsetsMapTy& VBaseOffsets)
+ : Size(size), DataSize(datasize), Alignment(alignment),
+ RequiredAlignment(requiredAlignment), FieldOffsets(nullptr),
+ FieldCount(fieldcount), CXXInfo(new (Ctx) CXXRecordLayoutInfo)
+{
+ if (FieldCount > 0) {
+ FieldOffsets = new (Ctx) uint64_t[FieldCount];
+ memcpy(FieldOffsets, fieldoffsets, FieldCount * sizeof(*FieldOffsets));
+ }
+
+ CXXInfo->PrimaryBase.setPointer(PrimaryBase);
+ CXXInfo->PrimaryBase.setInt(IsPrimaryBaseVirtual);
+ CXXInfo->NonVirtualSize = nonvirtualsize;
+ CXXInfo->NonVirtualAlignment = nonvirtualalignment;
+ CXXInfo->SizeOfLargestEmptySubobject = SizeOfLargestEmptySubobject;
+ CXXInfo->BaseOffsets = BaseOffsets;
+ CXXInfo->VBaseOffsets = VBaseOffsets;
+ CXXInfo->HasOwnVFPtr = hasOwnVFPtr;
+ CXXInfo->VBPtrOffset = vbptroffset;
+ CXXInfo->HasExtendableVFPtr = hasExtendableVFPtr;
+ CXXInfo->BaseSharingVBPtr = BaseSharingVBPtr;
+ CXXInfo->HasZeroSizedSubObject = HasZeroSizedSubObject;
+ CXXInfo->LeadsWithZeroSizedBase = LeadsWithZeroSizedBase;
+
+
+#ifndef NDEBUG
+ if (const CXXRecordDecl *PrimaryBase = getPrimaryBase()) {
+ if (isPrimaryBaseVirtual()) {
+ if (Ctx.getTargetInfo().getCXXABI().hasPrimaryVBases()) {
+ assert(getVBaseClassOffset(PrimaryBase).isZero() &&
+ "Primary virtual base must be at offset 0!");
+ }
+ } else {
+ assert(getBaseClassOffset(PrimaryBase).isZero() &&
+ "Primary base must be at offset 0!");
+ }
+ }
+#endif
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp b/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp
new file mode 100644
index 0000000..bc3c2a8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp
@@ -0,0 +1,3282 @@
+//=== RecordLayoutBuilder.cpp - Helper class for building record layouts ---==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/MathExtras.h"
+
+using namespace clang;
+
+namespace {
+
+/// BaseSubobjectInfo - Represents a single base subobject in a complete class.
+/// For a class hierarchy like
+///
+/// class A { };
+/// class B : A { };
+/// class C : A, B { };
+///
+/// The BaseSubobjectInfo graph for C will have three BaseSubobjectInfo
+/// instances, one for B and two for A.
+///
+/// If a base is virtual, it will only have one BaseSubobjectInfo allocated.
+struct BaseSubobjectInfo {
+ /// Class - The class for this base info.
+ const CXXRecordDecl *Class;
+
+ /// IsVirtual - Whether the BaseInfo represents a virtual base or not.
+ bool IsVirtual;
+
+ /// Bases - Information about the base subobjects.
+ SmallVector<BaseSubobjectInfo*, 4> Bases;
+
+ /// PrimaryVirtualBaseInfo - Holds the base info for the primary virtual base
+ /// of this base info (if one exists).
+ BaseSubobjectInfo *PrimaryVirtualBaseInfo;
+
+ // FIXME: Document.
+ const BaseSubobjectInfo *Derived;
+};
+
+/// \brief Externally provided layout. Typically used when the AST source, such
+/// as DWARF, lacks all the information that was available at compile time, such
+/// as alignment attributes on fields and pragmas in effect.
+struct ExternalLayout {
+ ExternalLayout() : Size(0), Align(0) {}
+
+ /// \brief Overall record size in bits.
+ uint64_t Size;
+
+ /// \brief Overall record alignment in bits.
+ uint64_t Align;
+
+ /// \brief Record field offsets in bits.
+ llvm::DenseMap<const FieldDecl *, uint64_t> FieldOffsets;
+
+ /// \brief Direct, non-virtual base offsets.
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> BaseOffsets;
+
+ /// \brief Virtual base offsets.
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> VirtualBaseOffsets;
+
+ /// Get the offset of the given field. The external source must provide
+ /// entries for all fields in the record.
+ uint64_t getExternalFieldOffset(const FieldDecl *FD) {
+ assert(FieldOffsets.count(FD) &&
+ "Field does not have an external offset");
+ return FieldOffsets[FD];
+ }
+
+ bool getExternalNVBaseOffset(const CXXRecordDecl *RD, CharUnits &BaseOffset) {
+ auto Known = BaseOffsets.find(RD);
+ if (Known == BaseOffsets.end())
+ return false;
+ BaseOffset = Known->second;
+ return true;
+ }
+
+ bool getExternalVBaseOffset(const CXXRecordDecl *RD, CharUnits &BaseOffset) {
+ auto Known = VirtualBaseOffsets.find(RD);
+ if (Known == VirtualBaseOffsets.end())
+ return false;
+ BaseOffset = Known->second;
+ return true;
+ }
+};
+
+/// EmptySubobjectMap - Keeps track of which empty subobjects exist at different
+/// offsets while laying out a C++ class.
+class EmptySubobjectMap {
+ const ASTContext &Context;
+ uint64_t CharWidth;
+
+ /// Class - The class whose empty entries we're keeping track of.
+ const CXXRecordDecl *Class;
+
+ /// EmptyClassOffsets - A map from offsets to empty record decls.
+ typedef llvm::TinyPtrVector<const CXXRecordDecl *> ClassVectorTy;
+ typedef llvm::DenseMap<CharUnits, ClassVectorTy> EmptyClassOffsetsMapTy;
+ EmptyClassOffsetsMapTy EmptyClassOffsets;
+
+ /// MaxEmptyClassOffset - The highest offset known to contain an empty
+ /// base subobject.
+ CharUnits MaxEmptyClassOffset;
+
+ /// ComputeEmptySubobjectSizes - Compute the size of the largest base or
+ /// member subobject that is empty.
+ void ComputeEmptySubobjectSizes();
+
+ void AddSubobjectAtOffset(const CXXRecordDecl *RD, CharUnits Offset);
+
+ void UpdateEmptyBaseSubobjects(const BaseSubobjectInfo *Info,
+ CharUnits Offset, bool PlacingEmptyBase);
+
+ void UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD,
+ const CXXRecordDecl *Class,
+ CharUnits Offset);
+ void UpdateEmptyFieldSubobjects(const FieldDecl *FD, CharUnits Offset);
+
+ /// AnyEmptySubobjectsBeyondOffset - Returns whether there are any empty
+ /// subobjects beyond the given offset.
+ bool AnyEmptySubobjectsBeyondOffset(CharUnits Offset) const {
+ return Offset <= MaxEmptyClassOffset;
+ }
+
+ CharUnits
+ getFieldOffset(const ASTRecordLayout &Layout, unsigned FieldNo) const {
+ uint64_t FieldOffset = Layout.getFieldOffset(FieldNo);
+ assert(FieldOffset % CharWidth == 0 &&
+ "Field offset not at char boundary!");
+
+ return Context.toCharUnitsFromBits(FieldOffset);
+ }
+
+protected:
+ bool CanPlaceSubobjectAtOffset(const CXXRecordDecl *RD,
+ CharUnits Offset) const;
+
+ bool CanPlaceBaseSubobjectAtOffset(const BaseSubobjectInfo *Info,
+ CharUnits Offset);
+
+ bool CanPlaceFieldSubobjectAtOffset(const CXXRecordDecl *RD,
+ const CXXRecordDecl *Class,
+ CharUnits Offset) const;
+ bool CanPlaceFieldSubobjectAtOffset(const FieldDecl *FD,
+ CharUnits Offset) const;
+
+public:
+ /// This holds the size of the largest empty subobject (either a base
+ /// or a member). Will be zero if the record being built doesn't contain
+ /// any empty classes.
+ CharUnits SizeOfLargestEmptySubobject;
+
+ EmptySubobjectMap(const ASTContext &Context, const CXXRecordDecl *Class)
+ : Context(Context), CharWidth(Context.getCharWidth()), Class(Class) {
+ ComputeEmptySubobjectSizes();
+ }
+
+ /// CanPlaceBaseAtOffset - Return whether the given base class can be placed
+ /// at the given offset.
+ /// Returns false if placing the record will result in two components
+ /// (direct or indirect) of the same type having the same offset.
+ bool CanPlaceBaseAtOffset(const BaseSubobjectInfo *Info,
+ CharUnits Offset);
+
+ /// CanPlaceFieldAtOffset - Return whether a field can be placed at the given
+ /// offset.
+ bool CanPlaceFieldAtOffset(const FieldDecl *FD, CharUnits Offset);
+};
+
+void EmptySubobjectMap::ComputeEmptySubobjectSizes() {
+ // Check the bases.
+ for (const CXXBaseSpecifier &Base : Class->bases()) {
+ const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
+
+ CharUnits EmptySize;
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(BaseDecl);
+ if (BaseDecl->isEmpty()) {
+ // If the class decl is empty, get its size.
+ EmptySize = Layout.getSize();
+ } else {
+ // Otherwise, we get the largest empty subobject for the decl.
+ EmptySize = Layout.getSizeOfLargestEmptySubobject();
+ }
+
+ if (EmptySize > SizeOfLargestEmptySubobject)
+ SizeOfLargestEmptySubobject = EmptySize;
+ }
+
+ // Check the fields.
+ for (const FieldDecl *FD : Class->fields()) {
+ const RecordType *RT =
+ Context.getBaseElementType(FD->getType())->getAs<RecordType>();
+
+ // We only care about record types.
+ if (!RT)
+ continue;
+
+ CharUnits EmptySize;
+ const CXXRecordDecl *MemberDecl = RT->getAsCXXRecordDecl();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(MemberDecl);
+ if (MemberDecl->isEmpty()) {
+ // If the class decl is empty, get its size.
+ EmptySize = Layout.getSize();
+ } else {
+ // Otherwise, we get the largest empty subobject for the decl.
+ EmptySize = Layout.getSizeOfLargestEmptySubobject();
+ }
+
+ if (EmptySize > SizeOfLargestEmptySubobject)
+ SizeOfLargestEmptySubobject = EmptySize;
+ }
+}
+
+bool
+EmptySubobjectMap::CanPlaceSubobjectAtOffset(const CXXRecordDecl *RD,
+ CharUnits Offset) const {
+ // We only need to check empty bases.
+ if (!RD->isEmpty())
+ return true;
+
+ EmptyClassOffsetsMapTy::const_iterator I = EmptyClassOffsets.find(Offset);
+ if (I == EmptyClassOffsets.end())
+ return true;
+
+ const ClassVectorTy &Classes = I->second;
+ if (std::find(Classes.begin(), Classes.end(), RD) == Classes.end())
+ return true;
+
+ // There is already an empty class of the same type at this offset.
+ return false;
+}
+
+void EmptySubobjectMap::AddSubobjectAtOffset(const CXXRecordDecl *RD,
+ CharUnits Offset) {
+ // We only care about empty bases.
+ if (!RD->isEmpty())
+ return;
+
+ // If we have empty structures inside a union, we can assign both
+ // the same offset. Just avoid pushing them twice in the list.
+ ClassVectorTy &Classes = EmptyClassOffsets[Offset];
+ if (std::find(Classes.begin(), Classes.end(), RD) != Classes.end())
+ return;
+
+ Classes.push_back(RD);
+
+ // Update the empty class offset.
+ if (Offset > MaxEmptyClassOffset)
+ MaxEmptyClassOffset = Offset;
+}
+
+bool
+EmptySubobjectMap::CanPlaceBaseSubobjectAtOffset(const BaseSubobjectInfo *Info,
+ CharUnits Offset) {
+ // We don't have to keep looking past the maximum offset that's known to
+ // contain an empty class.
+ if (!AnyEmptySubobjectsBeyondOffset(Offset))
+ return true;
+
+ if (!CanPlaceSubobjectAtOffset(Info->Class, Offset))
+ return false;
+
+ // Traverse all non-virtual bases.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class);
+ for (const BaseSubobjectInfo *Base : Info->Bases) {
+ if (Base->IsVirtual)
+ continue;
+
+ CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class);
+
+ if (!CanPlaceBaseSubobjectAtOffset(Base, BaseOffset))
+ return false;
+ }
+
+ if (Info->PrimaryVirtualBaseInfo) {
+ BaseSubobjectInfo *PrimaryVirtualBaseInfo = Info->PrimaryVirtualBaseInfo;
+
+ if (Info == PrimaryVirtualBaseInfo->Derived) {
+ if (!CanPlaceBaseSubobjectAtOffset(PrimaryVirtualBaseInfo, Offset))
+ return false;
+ }
+ }
+
+ // Traverse all member variables.
+ unsigned FieldNo = 0;
+ for (CXXRecordDecl::field_iterator I = Info->Class->field_begin(),
+ E = Info->Class->field_end(); I != E; ++I, ++FieldNo) {
+ if (I->isBitField())
+ continue;
+
+ CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo);
+ if (!CanPlaceFieldSubobjectAtOffset(*I, FieldOffset))
+ return false;
+ }
+
+ return true;
+}
+
+void EmptySubobjectMap::UpdateEmptyBaseSubobjects(const BaseSubobjectInfo *Info,
+ CharUnits Offset,
+ bool PlacingEmptyBase) {
+ if (!PlacingEmptyBase && Offset >= SizeOfLargestEmptySubobject) {
+ // We know that the only empty subobjects that can conflict with empty
+ // subobject of non-empty bases, are empty bases that can be placed at
+ // offset zero. Because of this, we only need to keep track of empty base
+ // subobjects with offsets less than the size of the largest empty
+ // subobject for our class.
+ return;
+ }
+
+ AddSubobjectAtOffset(Info->Class, Offset);
+
+ // Traverse all non-virtual bases.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class);
+ for (const BaseSubobjectInfo *Base : Info->Bases) {
+ if (Base->IsVirtual)
+ continue;
+
+ CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class);
+ UpdateEmptyBaseSubobjects(Base, BaseOffset, PlacingEmptyBase);
+ }
+
+ if (Info->PrimaryVirtualBaseInfo) {
+ BaseSubobjectInfo *PrimaryVirtualBaseInfo = Info->PrimaryVirtualBaseInfo;
+
+ if (Info == PrimaryVirtualBaseInfo->Derived)
+ UpdateEmptyBaseSubobjects(PrimaryVirtualBaseInfo, Offset,
+ PlacingEmptyBase);
+ }
+
+ // Traverse all member variables.
+ unsigned FieldNo = 0;
+ for (CXXRecordDecl::field_iterator I = Info->Class->field_begin(),
+ E = Info->Class->field_end(); I != E; ++I, ++FieldNo) {
+ if (I->isBitField())
+ continue;
+
+ CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo);
+ UpdateEmptyFieldSubobjects(*I, FieldOffset);
+ }
+}
+
+bool EmptySubobjectMap::CanPlaceBaseAtOffset(const BaseSubobjectInfo *Info,
+ CharUnits Offset) {
+ // If we know this class doesn't have any empty subobjects we don't need to
+ // bother checking.
+ if (SizeOfLargestEmptySubobject.isZero())
+ return true;
+
+ if (!CanPlaceBaseSubobjectAtOffset(Info, Offset))
+ return false;
+
+ // We are able to place the base at this offset. Make sure to update the
+ // empty base subobject map.
+ UpdateEmptyBaseSubobjects(Info, Offset, Info->Class->isEmpty());
+ return true;
+}
+
+bool
+EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const CXXRecordDecl *RD,
+ const CXXRecordDecl *Class,
+ CharUnits Offset) const {
+ // We don't have to keep looking past the maximum offset that's known to
+ // contain an empty class.
+ if (!AnyEmptySubobjectsBeyondOffset(Offset))
+ return true;
+
+ if (!CanPlaceSubobjectAtOffset(RD, Offset))
+ return false;
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // Traverse all non-virtual bases.
+ for (const CXXBaseSpecifier &Base : RD->bases()) {
+ if (Base.isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
+
+ CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl);
+ if (!CanPlaceFieldSubobjectAtOffset(BaseDecl, Class, BaseOffset))
+ return false;
+ }
+
+ if (RD == Class) {
+ // This is the most derived class, traverse virtual bases as well.
+ for (const CXXBaseSpecifier &Base : RD->vbases()) {
+ const CXXRecordDecl *VBaseDecl = Base.getType()->getAsCXXRecordDecl();
+
+ CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBaseDecl);
+ if (!CanPlaceFieldSubobjectAtOffset(VBaseDecl, Class, VBaseOffset))
+ return false;
+ }
+ }
+
+ // Traverse all member variables.
+ unsigned FieldNo = 0;
+ for (CXXRecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
+ I != E; ++I, ++FieldNo) {
+ if (I->isBitField())
+ continue;
+
+ CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo);
+
+ if (!CanPlaceFieldSubobjectAtOffset(*I, FieldOffset))
+ return false;
+ }
+
+ return true;
+}
+
+bool
+EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const FieldDecl *FD,
+ CharUnits Offset) const {
+ // We don't have to keep looking past the maximum offset that's known to
+ // contain an empty class.
+ if (!AnyEmptySubobjectsBeyondOffset(Offset))
+ return true;
+
+ QualType T = FD->getType();
+ if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
+ return CanPlaceFieldSubobjectAtOffset(RD, RD, Offset);
+
+ // If we have an array type we need to look at every element.
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(T)) {
+ QualType ElemTy = Context.getBaseElementType(AT);
+ const RecordType *RT = ElemTy->getAs<RecordType>();
+ if (!RT)
+ return true;
+
+ const CXXRecordDecl *RD = RT->getAsCXXRecordDecl();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ uint64_t NumElements = Context.getConstantArrayElementCount(AT);
+ CharUnits ElementOffset = Offset;
+ for (uint64_t I = 0; I != NumElements; ++I) {
+ // We don't have to keep looking past the maximum offset that's known to
+ // contain an empty class.
+ if (!AnyEmptySubobjectsBeyondOffset(ElementOffset))
+ return true;
+
+ if (!CanPlaceFieldSubobjectAtOffset(RD, RD, ElementOffset))
+ return false;
+
+ ElementOffset += Layout.getSize();
+ }
+ }
+
+ return true;
+}
+
+bool
+EmptySubobjectMap::CanPlaceFieldAtOffset(const FieldDecl *FD,
+ CharUnits Offset) {
+ if (!CanPlaceFieldSubobjectAtOffset(FD, Offset))
+ return false;
+
+ // We are able to place the member variable at this offset.
+ // Make sure to update the empty base subobject map.
+ UpdateEmptyFieldSubobjects(FD, Offset);
+ return true;
+}
+
+void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD,
+ const CXXRecordDecl *Class,
+ CharUnits Offset) {
+ // We know that the only empty subobjects that can conflict with empty
+ // field subobjects are subobjects of empty bases that can be placed at offset
+ // zero. Because of this, we only need to keep track of empty field
+ // subobjects with offsets less than the size of the largest empty
+ // subobject for our class.
+ if (Offset >= SizeOfLargestEmptySubobject)
+ return;
+
+ AddSubobjectAtOffset(RD, Offset);
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // Traverse all non-virtual bases.
+ for (const CXXBaseSpecifier &Base : RD->bases()) {
+ if (Base.isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
+
+ CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl);
+ UpdateEmptyFieldSubobjects(BaseDecl, Class, BaseOffset);
+ }
+
+ if (RD == Class) {
+ // This is the most derived class, traverse virtual bases as well.
+ for (const CXXBaseSpecifier &Base : RD->vbases()) {
+ const CXXRecordDecl *VBaseDecl = Base.getType()->getAsCXXRecordDecl();
+
+ CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBaseDecl);
+ UpdateEmptyFieldSubobjects(VBaseDecl, Class, VBaseOffset);
+ }
+ }
+
+ // Traverse all member variables.
+ unsigned FieldNo = 0;
+ for (CXXRecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
+ I != E; ++I, ++FieldNo) {
+ if (I->isBitField())
+ continue;
+
+ CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo);
+
+ UpdateEmptyFieldSubobjects(*I, FieldOffset);
+ }
+}
+
+void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const FieldDecl *FD,
+ CharUnits Offset) {
+ QualType T = FD->getType();
+ if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) {
+ UpdateEmptyFieldSubobjects(RD, RD, Offset);
+ return;
+ }
+
+ // If we have an array type we need to update every element.
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(T)) {
+ QualType ElemTy = Context.getBaseElementType(AT);
+ const RecordType *RT = ElemTy->getAs<RecordType>();
+ if (!RT)
+ return;
+
+ const CXXRecordDecl *RD = RT->getAsCXXRecordDecl();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ uint64_t NumElements = Context.getConstantArrayElementCount(AT);
+ CharUnits ElementOffset = Offset;
+
+ for (uint64_t I = 0; I != NumElements; ++I) {
+ // We know that the only empty subobjects that can conflict with empty
+ // field subobjects are subobjects of empty bases that can be placed at
+ // offset zero. Because of this, we only need to keep track of empty field
+ // subobjects with offsets less than the size of the largest empty
+ // subobject for our class.
+ if (ElementOffset >= SizeOfLargestEmptySubobject)
+ return;
+
+ UpdateEmptyFieldSubobjects(RD, RD, ElementOffset);
+ ElementOffset += Layout.getSize();
+ }
+ }
+}
+
+typedef llvm::SmallPtrSet<const CXXRecordDecl*, 4> ClassSetTy;
+
+class ItaniumRecordLayoutBuilder {
+protected:
+ // FIXME: Remove this and make the appropriate fields public.
+ friend class clang::ASTContext;
+
+ const ASTContext &Context;
+
+ EmptySubobjectMap *EmptySubobjects;
+
+ /// Size - The current size of the record layout.
+ uint64_t Size;
+
+ /// Alignment - The current alignment of the record layout.
+ CharUnits Alignment;
+
+ /// \brief The alignment if attribute packed is not used.
+ CharUnits UnpackedAlignment;
+
+ SmallVector<uint64_t, 16> FieldOffsets;
+
+ /// \brief Whether the external AST source has provided a layout for this
+ /// record.
+ unsigned UseExternalLayout : 1;
+
+ /// \brief Whether we need to infer alignment, even when we have an
+ /// externally-provided layout.
+ unsigned InferAlignment : 1;
+
+ /// Packed - Whether the record is packed or not.
+ unsigned Packed : 1;
+
+ unsigned IsUnion : 1;
+
+ unsigned IsMac68kAlign : 1;
+
+ unsigned IsMsStruct : 1;
+
+ /// UnfilledBitsInLastUnit - If the last field laid out was a bitfield,
+ /// this contains the number of bits in the last unit that can be used for
+ /// an adjacent bitfield if necessary. The unit in question is usually
+ /// a byte, but larger units are used if IsMsStruct.
+ unsigned char UnfilledBitsInLastUnit;
+ /// LastBitfieldTypeSize - If IsMsStruct, represents the size of the type
+ /// of the previous field if it was a bitfield.
+ unsigned char LastBitfieldTypeSize;
+
+ /// MaxFieldAlignment - The maximum allowed field alignment. This is set by
+ /// #pragma pack.
+ CharUnits MaxFieldAlignment;
+
+ /// DataSize - The data size of the record being laid out.
+ uint64_t DataSize;
+
+ CharUnits NonVirtualSize;
+ CharUnits NonVirtualAlignment;
+
+ /// PrimaryBase - the primary base class (if one exists) of the class
+ /// we're laying out.
+ const CXXRecordDecl *PrimaryBase;
+
+ /// PrimaryBaseIsVirtual - Whether the primary base of the class we're laying
+ /// out is virtual.
+ bool PrimaryBaseIsVirtual;
+
+ /// HasOwnVFPtr - Whether the class provides its own vtable/vftbl
+ /// pointer, as opposed to inheriting one from a primary base class.
+ bool HasOwnVFPtr;
+
+ typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits> BaseOffsetsMapTy;
+
+ /// Bases - base classes and their offsets in the record.
+ BaseOffsetsMapTy Bases;
+
+ // VBases - virtual base classes and their offsets in the record.
+ ASTRecordLayout::VBaseOffsetsMapTy VBases;
+
+ /// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are
+ /// primary base classes for some other direct or indirect base class.
+ CXXIndirectPrimaryBaseSet IndirectPrimaryBases;
+
+ /// FirstNearlyEmptyVBase - The first nearly empty virtual base class in
+ /// inheritance graph order. Used for determining the primary base class.
+ const CXXRecordDecl *FirstNearlyEmptyVBase;
+
+ /// VisitedVirtualBases - A set of all the visited virtual bases, used to
+ /// avoid visiting virtual bases more than once.
+ llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBases;
+
+ /// Valid if UseExternalLayout is true.
+ ExternalLayout External;
+
+ ItaniumRecordLayoutBuilder(const ASTContext &Context,
+ EmptySubobjectMap *EmptySubobjects)
+ : Context(Context), EmptySubobjects(EmptySubobjects), Size(0),
+ Alignment(CharUnits::One()), UnpackedAlignment(CharUnits::One()),
+ UseExternalLayout(false), InferAlignment(false), Packed(false),
+ IsUnion(false), IsMac68kAlign(false), IsMsStruct(false),
+ UnfilledBitsInLastUnit(0), LastBitfieldTypeSize(0),
+ MaxFieldAlignment(CharUnits::Zero()), DataSize(0),
+ NonVirtualSize(CharUnits::Zero()),
+ NonVirtualAlignment(CharUnits::One()), PrimaryBase(nullptr),
+ PrimaryBaseIsVirtual(false), HasOwnVFPtr(false),
+ FirstNearlyEmptyVBase(nullptr) {}
+
+ void Layout(const RecordDecl *D);
+ void Layout(const CXXRecordDecl *D);
+ void Layout(const ObjCInterfaceDecl *D);
+
+ void LayoutFields(const RecordDecl *D);
+ void LayoutField(const FieldDecl *D, bool InsertExtraPadding);
+ void LayoutWideBitField(uint64_t FieldSize, uint64_t TypeSize,
+ bool FieldPacked, const FieldDecl *D);
+ void LayoutBitField(const FieldDecl *D);
+
+ TargetCXXABI getCXXABI() const {
+ return Context.getTargetInfo().getCXXABI();
+ }
+
+ /// BaseSubobjectInfoAllocator - Allocator for BaseSubobjectInfo objects.
+ llvm::SpecificBumpPtrAllocator<BaseSubobjectInfo> BaseSubobjectInfoAllocator;
+
+ typedef llvm::DenseMap<const CXXRecordDecl *, BaseSubobjectInfo *>
+ BaseSubobjectInfoMapTy;
+
+ /// VirtualBaseInfo - Map from all the (direct or indirect) virtual bases
+ /// of the class we're laying out to their base subobject info.
+ BaseSubobjectInfoMapTy VirtualBaseInfo;
+
+ /// NonVirtualBaseInfo - Map from all the direct non-virtual bases of the
+ /// class we're laying out to their base subobject info.
+ BaseSubobjectInfoMapTy NonVirtualBaseInfo;
+
+ /// ComputeBaseSubobjectInfo - Compute the base subobject information for the
+ /// bases of the given class.
+ void ComputeBaseSubobjectInfo(const CXXRecordDecl *RD);
+
+ /// ComputeBaseSubobjectInfo - Compute the base subobject information for a
+ /// single class and all of its base classes.
+ BaseSubobjectInfo *ComputeBaseSubobjectInfo(const CXXRecordDecl *RD,
+ bool IsVirtual,
+ BaseSubobjectInfo *Derived);
+
+ /// DeterminePrimaryBase - Determine the primary base of the given class.
+ void DeterminePrimaryBase(const CXXRecordDecl *RD);
+
+ void SelectPrimaryVBase(const CXXRecordDecl *RD);
+
+ void EnsureVTablePointerAlignment(CharUnits UnpackedBaseAlign);
+
+ /// LayoutNonVirtualBases - Determines the primary base class (if any) and
+ /// lays it out. Will then proceed to lay out all non-virtual base clasess.
+ void LayoutNonVirtualBases(const CXXRecordDecl *RD);
+
+ /// LayoutNonVirtualBase - Lays out a single non-virtual base.
+ void LayoutNonVirtualBase(const BaseSubobjectInfo *Base);
+
+ void AddPrimaryVirtualBaseOffsets(const BaseSubobjectInfo *Info,
+ CharUnits Offset);
+
+ /// LayoutVirtualBases - Lays out all the virtual bases.
+ void LayoutVirtualBases(const CXXRecordDecl *RD,
+ const CXXRecordDecl *MostDerivedClass);
+
+ /// LayoutVirtualBase - Lays out a single virtual base.
+ void LayoutVirtualBase(const BaseSubobjectInfo *Base);
+
+ /// LayoutBase - Will lay out a base and return the offset where it was
+ /// placed, in chars.
+ CharUnits LayoutBase(const BaseSubobjectInfo *Base);
+
+ /// InitializeLayout - Initialize record layout for the given record decl.
+ void InitializeLayout(const Decl *D);
+
+ /// FinishLayout - Finalize record layout. Adjust record size based on the
+ /// alignment.
+ void FinishLayout(const NamedDecl *D);
+
+ void UpdateAlignment(CharUnits NewAlignment, CharUnits UnpackedNewAlignment);
+ void UpdateAlignment(CharUnits NewAlignment) {
+ UpdateAlignment(NewAlignment, NewAlignment);
+ }
+
+ /// \brief Retrieve the externally-supplied field offset for the given
+ /// field.
+ ///
+ /// \param Field The field whose offset is being queried.
+ /// \param ComputedOffset The offset that we've computed for this field.
+ uint64_t updateExternalFieldOffset(const FieldDecl *Field,
+ uint64_t ComputedOffset);
+
+ void CheckFieldPadding(uint64_t Offset, uint64_t UnpaddedOffset,
+ uint64_t UnpackedOffset, unsigned UnpackedAlign,
+ bool isPacked, const FieldDecl *D);
+
+ DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
+
+ CharUnits getSize() const {
+ assert(Size % Context.getCharWidth() == 0);
+ return Context.toCharUnitsFromBits(Size);
+ }
+ uint64_t getSizeInBits() const { return Size; }
+
+ void setSize(CharUnits NewSize) { Size = Context.toBits(NewSize); }
+ void setSize(uint64_t NewSize) { Size = NewSize; }
+
+ CharUnits getAligment() const { return Alignment; }
+
+ CharUnits getDataSize() const {
+ assert(DataSize % Context.getCharWidth() == 0);
+ return Context.toCharUnitsFromBits(DataSize);
+ }
+ uint64_t getDataSizeInBits() const { return DataSize; }
+
+ void setDataSize(CharUnits NewSize) { DataSize = Context.toBits(NewSize); }
+ void setDataSize(uint64_t NewSize) { DataSize = NewSize; }
+
+ ItaniumRecordLayoutBuilder(const ItaniumRecordLayoutBuilder &) = delete;
+ void operator=(const ItaniumRecordLayoutBuilder &) = delete;
+};
+} // end anonymous namespace
+
+void ItaniumRecordLayoutBuilder::SelectPrimaryVBase(const CXXRecordDecl *RD) {
+ for (const auto &I : RD->bases()) {
+ assert(!I.getType()->isDependentType() &&
+ "Cannot layout class with dependent bases.");
+
+ const CXXRecordDecl *Base = I.getType()->getAsCXXRecordDecl();
+
+ // Check if this is a nearly empty virtual base.
+ if (I.isVirtual() && Context.isNearlyEmpty(Base)) {
+ // If it's not an indirect primary base, then we've found our primary
+ // base.
+ if (!IndirectPrimaryBases.count(Base)) {
+ PrimaryBase = Base;
+ PrimaryBaseIsVirtual = true;
+ return;
+ }
+
+ // Is this the first nearly empty virtual base?
+ if (!FirstNearlyEmptyVBase)
+ FirstNearlyEmptyVBase = Base;
+ }
+
+ SelectPrimaryVBase(Base);
+ if (PrimaryBase)
+ return;
+ }
+}
+
+/// DeterminePrimaryBase - Determine the primary base of the given class.
+void ItaniumRecordLayoutBuilder::DeterminePrimaryBase(const CXXRecordDecl *RD) {
+ // If the class isn't dynamic, it won't have a primary base.
+ if (!RD->isDynamicClass())
+ return;
+
+ // Compute all the primary virtual bases for all of our direct and
+ // indirect bases, and record all their primary virtual base classes.
+ RD->getIndirectPrimaryBases(IndirectPrimaryBases);
+
+ // If the record has a dynamic base class, attempt to choose a primary base
+ // class. It is the first (in direct base class order) non-virtual dynamic
+ // base class, if one exists.
+ for (const auto &I : RD->bases()) {
+ // Ignore virtual bases.
+ if (I.isVirtual())
+ continue;
+
+ const CXXRecordDecl *Base = I.getType()->getAsCXXRecordDecl();
+
+ if (Base->isDynamicClass()) {
+ // We found it.
+ PrimaryBase = Base;
+ PrimaryBaseIsVirtual = false;
+ return;
+ }
+ }
+
+ // Under the Itanium ABI, if there is no non-virtual primary base class,
+ // try to compute the primary virtual base. The primary virtual base is
+ // the first nearly empty virtual base that is not an indirect primary
+ // virtual base class, if one exists.
+ if (RD->getNumVBases() != 0) {
+ SelectPrimaryVBase(RD);
+ if (PrimaryBase)
+ return;
+ }
+
+ // Otherwise, it is the first indirect primary base class, if one exists.
+ if (FirstNearlyEmptyVBase) {
+ PrimaryBase = FirstNearlyEmptyVBase;
+ PrimaryBaseIsVirtual = true;
+ return;
+ }
+
+ assert(!PrimaryBase && "Should not get here with a primary base!");
+}
+
+BaseSubobjectInfo *ItaniumRecordLayoutBuilder::ComputeBaseSubobjectInfo(
+ const CXXRecordDecl *RD, bool IsVirtual, BaseSubobjectInfo *Derived) {
+ BaseSubobjectInfo *Info;
+
+ if (IsVirtual) {
+ // Check if we already have info about this virtual base.
+ BaseSubobjectInfo *&InfoSlot = VirtualBaseInfo[RD];
+ if (InfoSlot) {
+ assert(InfoSlot->Class == RD && "Wrong class for virtual base info!");
+ return InfoSlot;
+ }
+
+ // We don't, create it.
+ InfoSlot = new (BaseSubobjectInfoAllocator.Allocate()) BaseSubobjectInfo;
+ Info = InfoSlot;
+ } else {
+ Info = new (BaseSubobjectInfoAllocator.Allocate()) BaseSubobjectInfo;
+ }
+
+ Info->Class = RD;
+ Info->IsVirtual = IsVirtual;
+ Info->Derived = nullptr;
+ Info->PrimaryVirtualBaseInfo = nullptr;
+
+ const CXXRecordDecl *PrimaryVirtualBase = nullptr;
+ BaseSubobjectInfo *PrimaryVirtualBaseInfo = nullptr;
+
+ // Check if this base has a primary virtual base.
+ if (RD->getNumVBases()) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ if (Layout.isPrimaryBaseVirtual()) {
+ // This base does have a primary virtual base.
+ PrimaryVirtualBase = Layout.getPrimaryBase();
+ assert(PrimaryVirtualBase && "Didn't have a primary virtual base!");
+
+ // Now check if we have base subobject info about this primary base.
+ PrimaryVirtualBaseInfo = VirtualBaseInfo.lookup(PrimaryVirtualBase);
+
+ if (PrimaryVirtualBaseInfo) {
+ if (PrimaryVirtualBaseInfo->Derived) {
+ // We did have info about this primary base, and it turns out that it
+ // has already been claimed as a primary virtual base for another
+ // base.
+ PrimaryVirtualBase = nullptr;
+ } else {
+ // We can claim this base as our primary base.
+ Info->PrimaryVirtualBaseInfo = PrimaryVirtualBaseInfo;
+ PrimaryVirtualBaseInfo->Derived = Info;
+ }
+ }
+ }
+ }
+
+ // Now go through all direct bases.
+ for (const auto &I : RD->bases()) {
+ bool IsVirtual = I.isVirtual();
+
+ const CXXRecordDecl *BaseDecl = I.getType()->getAsCXXRecordDecl();
+
+ Info->Bases.push_back(ComputeBaseSubobjectInfo(BaseDecl, IsVirtual, Info));
+ }
+
+ if (PrimaryVirtualBase && !PrimaryVirtualBaseInfo) {
+ // Traversing the bases must have created the base info for our primary
+ // virtual base.
+ PrimaryVirtualBaseInfo = VirtualBaseInfo.lookup(PrimaryVirtualBase);
+ assert(PrimaryVirtualBaseInfo &&
+ "Did not create a primary virtual base!");
+
+ // Claim the primary virtual base as our primary virtual base.
+ Info->PrimaryVirtualBaseInfo = PrimaryVirtualBaseInfo;
+ PrimaryVirtualBaseInfo->Derived = Info;
+ }
+
+ return Info;
+}
+
+void ItaniumRecordLayoutBuilder::ComputeBaseSubobjectInfo(
+ const CXXRecordDecl *RD) {
+ for (const auto &I : RD->bases()) {
+ bool IsVirtual = I.isVirtual();
+
+ const CXXRecordDecl *BaseDecl = I.getType()->getAsCXXRecordDecl();
+
+ // Compute the base subobject info for this base.
+ BaseSubobjectInfo *Info = ComputeBaseSubobjectInfo(BaseDecl, IsVirtual,
+ nullptr);
+
+ if (IsVirtual) {
+ // ComputeBaseInfo has already added this base for us.
+ assert(VirtualBaseInfo.count(BaseDecl) &&
+ "Did not add virtual base!");
+ } else {
+ // Add the base info to the map of non-virtual bases.
+ assert(!NonVirtualBaseInfo.count(BaseDecl) &&
+ "Non-virtual base already exists!");
+ NonVirtualBaseInfo.insert(std::make_pair(BaseDecl, Info));
+ }
+ }
+}
+
+void ItaniumRecordLayoutBuilder::EnsureVTablePointerAlignment(
+ CharUnits UnpackedBaseAlign) {
+ CharUnits BaseAlign = (Packed) ? CharUnits::One() : UnpackedBaseAlign;
+
+ // The maximum field alignment overrides base align.
+ if (!MaxFieldAlignment.isZero()) {
+ BaseAlign = std::min(BaseAlign, MaxFieldAlignment);
+ UnpackedBaseAlign = std::min(UnpackedBaseAlign, MaxFieldAlignment);
+ }
+
+ // Round up the current record size to pointer alignment.
+ setSize(getSize().RoundUpToAlignment(BaseAlign));
+ setDataSize(getSize());
+
+ // Update the alignment.
+ UpdateAlignment(BaseAlign, UnpackedBaseAlign);
+}
+
+void ItaniumRecordLayoutBuilder::LayoutNonVirtualBases(
+ const CXXRecordDecl *RD) {
+ // Then, determine the primary base class.
+ DeterminePrimaryBase(RD);
+
+ // Compute base subobject info.
+ ComputeBaseSubobjectInfo(RD);
+
+ // If we have a primary base class, lay it out.
+ if (PrimaryBase) {
+ if (PrimaryBaseIsVirtual) {
+ // If the primary virtual base was a primary virtual base of some other
+ // base class we'll have to steal it.
+ BaseSubobjectInfo *PrimaryBaseInfo = VirtualBaseInfo.lookup(PrimaryBase);
+ PrimaryBaseInfo->Derived = nullptr;
+
+ // We have a virtual primary base, insert it as an indirect primary base.
+ IndirectPrimaryBases.insert(PrimaryBase);
+
+ assert(!VisitedVirtualBases.count(PrimaryBase) &&
+ "vbase already visited!");
+ VisitedVirtualBases.insert(PrimaryBase);
+
+ LayoutVirtualBase(PrimaryBaseInfo);
+ } else {
+ BaseSubobjectInfo *PrimaryBaseInfo =
+ NonVirtualBaseInfo.lookup(PrimaryBase);
+ assert(PrimaryBaseInfo &&
+ "Did not find base info for non-virtual primary base!");
+
+ LayoutNonVirtualBase(PrimaryBaseInfo);
+ }
+
+ // If this class needs a vtable/vf-table and didn't get one from a
+ // primary base, add it in now.
+ } else if (RD->isDynamicClass()) {
+ assert(DataSize == 0 && "Vtable pointer must be at offset zero!");
+ CharUnits PtrWidth =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
+ CharUnits PtrAlign =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(0));
+ EnsureVTablePointerAlignment(PtrAlign);
+ HasOwnVFPtr = true;
+ setSize(getSize() + PtrWidth);
+ setDataSize(getSize());
+ }
+
+ // Now lay out the non-virtual bases.
+ for (const auto &I : RD->bases()) {
+
+ // Ignore virtual bases.
+ if (I.isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl = I.getType()->getAsCXXRecordDecl();
+
+ // Skip the primary base, because we've already laid it out. The
+ // !PrimaryBaseIsVirtual check is required because we might have a
+ // non-virtual base of the same type as a primary virtual base.
+ if (BaseDecl == PrimaryBase && !PrimaryBaseIsVirtual)
+ continue;
+
+ // Lay out the base.
+ BaseSubobjectInfo *BaseInfo = NonVirtualBaseInfo.lookup(BaseDecl);
+ assert(BaseInfo && "Did not find base info for non-virtual base!");
+
+ LayoutNonVirtualBase(BaseInfo);
+ }
+}
+
+void ItaniumRecordLayoutBuilder::LayoutNonVirtualBase(
+ const BaseSubobjectInfo *Base) {
+ // Layout the base.
+ CharUnits Offset = LayoutBase(Base);
+
+ // Add its base class offset.
+ assert(!Bases.count(Base->Class) && "base offset already exists!");
+ Bases.insert(std::make_pair(Base->Class, Offset));
+
+ AddPrimaryVirtualBaseOffsets(Base, Offset);
+}
+
+void ItaniumRecordLayoutBuilder::AddPrimaryVirtualBaseOffsets(
+ const BaseSubobjectInfo *Info, CharUnits Offset) {
+ // This base isn't interesting, it has no virtual bases.
+ if (!Info->Class->getNumVBases())
+ return;
+
+ // First, check if we have a virtual primary base to add offsets for.
+ if (Info->PrimaryVirtualBaseInfo) {
+ assert(Info->PrimaryVirtualBaseInfo->IsVirtual &&
+ "Primary virtual base is not virtual!");
+ if (Info->PrimaryVirtualBaseInfo->Derived == Info) {
+ // Add the offset.
+ assert(!VBases.count(Info->PrimaryVirtualBaseInfo->Class) &&
+ "primary vbase offset already exists!");
+ VBases.insert(std::make_pair(Info->PrimaryVirtualBaseInfo->Class,
+ ASTRecordLayout::VBaseInfo(Offset, false)));
+
+ // Traverse the primary virtual base.
+ AddPrimaryVirtualBaseOffsets(Info->PrimaryVirtualBaseInfo, Offset);
+ }
+ }
+
+ // Now go through all direct non-virtual bases.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class);
+ for (const BaseSubobjectInfo *Base : Info->Bases) {
+ if (Base->IsVirtual)
+ continue;
+
+ CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class);
+ AddPrimaryVirtualBaseOffsets(Base, BaseOffset);
+ }
+}
+
+void ItaniumRecordLayoutBuilder::LayoutVirtualBases(
+ const CXXRecordDecl *RD, const CXXRecordDecl *MostDerivedClass) {
+ const CXXRecordDecl *PrimaryBase;
+ bool PrimaryBaseIsVirtual;
+
+ if (MostDerivedClass == RD) {
+ PrimaryBase = this->PrimaryBase;
+ PrimaryBaseIsVirtual = this->PrimaryBaseIsVirtual;
+ } else {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ PrimaryBase = Layout.getPrimaryBase();
+ PrimaryBaseIsVirtual = Layout.isPrimaryBaseVirtual();
+ }
+
+ for (const CXXBaseSpecifier &Base : RD->bases()) {
+ assert(!Base.getType()->isDependentType() &&
+ "Cannot layout class with dependent bases.");
+
+ const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
+
+ if (Base.isVirtual()) {
+ if (PrimaryBase != BaseDecl || !PrimaryBaseIsVirtual) {
+ bool IndirectPrimaryBase = IndirectPrimaryBases.count(BaseDecl);
+
+ // Only lay out the virtual base if it's not an indirect primary base.
+ if (!IndirectPrimaryBase) {
+ // Only visit virtual bases once.
+ if (!VisitedVirtualBases.insert(BaseDecl).second)
+ continue;
+
+ const BaseSubobjectInfo *BaseInfo = VirtualBaseInfo.lookup(BaseDecl);
+ assert(BaseInfo && "Did not find virtual base info!");
+ LayoutVirtualBase(BaseInfo);
+ }
+ }
+ }
+
+ if (!BaseDecl->getNumVBases()) {
+ // This base isn't interesting since it doesn't have any virtual bases.
+ continue;
+ }
+
+ LayoutVirtualBases(BaseDecl, MostDerivedClass);
+ }
+}
+
+void ItaniumRecordLayoutBuilder::LayoutVirtualBase(
+ const BaseSubobjectInfo *Base) {
+ assert(!Base->Derived && "Trying to lay out a primary virtual base!");
+
+ // Layout the base.
+ CharUnits Offset = LayoutBase(Base);
+
+ // Add its base class offset.
+ assert(!VBases.count(Base->Class) && "vbase offset already exists!");
+ VBases.insert(std::make_pair(Base->Class,
+ ASTRecordLayout::VBaseInfo(Offset, false)));
+
+ AddPrimaryVirtualBaseOffsets(Base, Offset);
+}
+
+CharUnits
+ItaniumRecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Base->Class);
+
+
+ CharUnits Offset;
+
+ // Query the external layout to see if it provides an offset.
+ bool HasExternalLayout = false;
+ if (UseExternalLayout) {
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits>::iterator Known;
+ if (Base->IsVirtual)
+ HasExternalLayout = External.getExternalNVBaseOffset(Base->Class, Offset);
+ else
+ HasExternalLayout = External.getExternalVBaseOffset(Base->Class, Offset);
+ }
+
+ CharUnits UnpackedBaseAlign = Layout.getNonVirtualAlignment();
+ CharUnits BaseAlign = (Packed) ? CharUnits::One() : UnpackedBaseAlign;
+
+ // If we have an empty base class, try to place it at offset 0.
+ if (Base->Class->isEmpty() &&
+ (!HasExternalLayout || Offset == CharUnits::Zero()) &&
+ EmptySubobjects->CanPlaceBaseAtOffset(Base, CharUnits::Zero())) {
+ setSize(std::max(getSize(), Layout.getSize()));
+ UpdateAlignment(BaseAlign, UnpackedBaseAlign);
+
+ return CharUnits::Zero();
+ }
+
+ // The maximum field alignment overrides base align.
+ if (!MaxFieldAlignment.isZero()) {
+ BaseAlign = std::min(BaseAlign, MaxFieldAlignment);
+ UnpackedBaseAlign = std::min(UnpackedBaseAlign, MaxFieldAlignment);
+ }
+
+ if (!HasExternalLayout) {
+ // Round up the current record size to the base's alignment boundary.
+ Offset = getDataSize().RoundUpToAlignment(BaseAlign);
+
+ // Try to place the base.
+ while (!EmptySubobjects->CanPlaceBaseAtOffset(Base, Offset))
+ Offset += BaseAlign;
+ } else {
+ bool Allowed = EmptySubobjects->CanPlaceBaseAtOffset(Base, Offset);
+ (void)Allowed;
+ assert(Allowed && "Base subobject externally placed at overlapping offset");
+
+ if (InferAlignment && Offset < getDataSize().RoundUpToAlignment(BaseAlign)){
+ // The externally-supplied base offset is before the base offset we
+ // computed. Assume that the structure is packed.
+ Alignment = CharUnits::One();
+ InferAlignment = false;
+ }
+ }
+
+ if (!Base->Class->isEmpty()) {
+ // Update the data size.
+ setDataSize(Offset + Layout.getNonVirtualSize());
+
+ setSize(std::max(getSize(), getDataSize()));
+ } else
+ setSize(std::max(getSize(), Offset + Layout.getSize()));
+
+ // Remember max struct/class alignment.
+ UpdateAlignment(BaseAlign, UnpackedBaseAlign);
+
+ return Offset;
+}
+
+void ItaniumRecordLayoutBuilder::InitializeLayout(const Decl *D) {
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(D)) {
+ IsUnion = RD->isUnion();
+ IsMsStruct = RD->isMsStruct(Context);
+ }
+
+ Packed = D->hasAttr<PackedAttr>();
+
+ // Honor the default struct packing maximum alignment flag.
+ if (unsigned DefaultMaxFieldAlignment = Context.getLangOpts().PackStruct) {
+ MaxFieldAlignment = CharUnits::fromQuantity(DefaultMaxFieldAlignment);
+ }
+
+ // mac68k alignment supersedes maximum field alignment and attribute aligned,
+ // and forces all structures to have 2-byte alignment. The IBM docs on it
+ // allude to additional (more complicated) semantics, especially with regard
+ // to bit-fields, but gcc appears not to follow that.
+ if (D->hasAttr<AlignMac68kAttr>()) {
+ IsMac68kAlign = true;
+ MaxFieldAlignment = CharUnits::fromQuantity(2);
+ Alignment = CharUnits::fromQuantity(2);
+ } else {
+ if (const MaxFieldAlignmentAttr *MFAA = D->getAttr<MaxFieldAlignmentAttr>())
+ MaxFieldAlignment = Context.toCharUnitsFromBits(MFAA->getAlignment());
+
+ if (unsigned MaxAlign = D->getMaxAlignment())
+ UpdateAlignment(Context.toCharUnitsFromBits(MaxAlign));
+ }
+
+ // If there is an external AST source, ask it for the various offsets.
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(D))
+ if (ExternalASTSource *Source = Context.getExternalSource()) {
+ UseExternalLayout = Source->layoutRecordType(
+ RD, External.Size, External.Align, External.FieldOffsets,
+ External.BaseOffsets, External.VirtualBaseOffsets);
+
+ // Update based on external alignment.
+ if (UseExternalLayout) {
+ if (External.Align > 0) {
+ Alignment = Context.toCharUnitsFromBits(External.Align);
+ } else {
+ // The external source didn't have alignment information; infer it.
+ InferAlignment = true;
+ }
+ }
+ }
+}
+
+void ItaniumRecordLayoutBuilder::Layout(const RecordDecl *D) {
+ InitializeLayout(D);
+ LayoutFields(D);
+
+ // Finally, round the size of the total struct up to the alignment of the
+ // struct itself.
+ FinishLayout(D);
+}
+
+void ItaniumRecordLayoutBuilder::Layout(const CXXRecordDecl *RD) {
+ InitializeLayout(RD);
+
+ // Lay out the vtable and the non-virtual bases.
+ LayoutNonVirtualBases(RD);
+
+ LayoutFields(RD);
+
+ NonVirtualSize = Context.toCharUnitsFromBits(
+ llvm::RoundUpToAlignment(getSizeInBits(),
+ Context.getTargetInfo().getCharAlign()));
+ NonVirtualAlignment = Alignment;
+
+ // Lay out the virtual bases and add the primary virtual base offsets.
+ LayoutVirtualBases(RD, RD);
+
+ // Finally, round the size of the total struct up to the alignment
+ // of the struct itself.
+ FinishLayout(RD);
+
+#ifndef NDEBUG
+ // Check that we have base offsets for all bases.
+ for (const CXXBaseSpecifier &Base : RD->bases()) {
+ if (Base.isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
+
+ assert(Bases.count(BaseDecl) && "Did not find base offset!");
+ }
+
+ // And all virtual bases.
+ for (const CXXBaseSpecifier &Base : RD->vbases()) {
+ const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
+
+ assert(VBases.count(BaseDecl) && "Did not find base offset!");
+ }
+#endif
+}
+
+void ItaniumRecordLayoutBuilder::Layout(const ObjCInterfaceDecl *D) {
+ if (ObjCInterfaceDecl *SD = D->getSuperClass()) {
+ const ASTRecordLayout &SL = Context.getASTObjCInterfaceLayout(SD);
+
+ UpdateAlignment(SL.getAlignment());
+
+ // We start laying out ivars not at the end of the superclass
+ // structure, but at the next byte following the last field.
+ setSize(SL.getDataSize());
+ setDataSize(getSize());
+ }
+
+ InitializeLayout(D);
+ // Layout each ivar sequentially.
+ for (const ObjCIvarDecl *IVD = D->all_declared_ivar_begin(); IVD;
+ IVD = IVD->getNextIvar())
+ LayoutField(IVD, false);
+
+ // Finally, round the size of the total struct up to the alignment of the
+ // struct itself.
+ FinishLayout(D);
+}
+
+void ItaniumRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
+ // Layout each field, for now, just sequentially, respecting alignment. In
+ // the future, this will need to be tweakable by targets.
+ bool InsertExtraPadding = D->mayInsertExtraPadding(/*EmitRemark=*/true);
+ bool HasFlexibleArrayMember = D->hasFlexibleArrayMember();
+ for (auto I = D->field_begin(), End = D->field_end(); I != End; ++I) {
+ auto Next(I);
+ ++Next;
+ LayoutField(*I,
+ InsertExtraPadding && (Next != End || !HasFlexibleArrayMember));
+ }
+}
+
+// Rounds the specified size to have it a multiple of the char size.
+static uint64_t
+roundUpSizeToCharAlignment(uint64_t Size,
+ const ASTContext &Context) {
+ uint64_t CharAlignment = Context.getTargetInfo().getCharAlign();
+ return llvm::RoundUpToAlignment(Size, CharAlignment);
+}
+
+void ItaniumRecordLayoutBuilder::LayoutWideBitField(uint64_t FieldSize,
+ uint64_t TypeSize,
+ bool FieldPacked,
+ const FieldDecl *D) {
+ assert(Context.getLangOpts().CPlusPlus &&
+ "Can only have wide bit-fields in C++!");
+
+ // Itanium C++ ABI 2.4:
+ // If sizeof(T)*8 < n, let T' be the largest integral POD type with
+ // sizeof(T')*8 <= n.
+
+ QualType IntegralPODTypes[] = {
+ Context.UnsignedCharTy, Context.UnsignedShortTy, Context.UnsignedIntTy,
+ Context.UnsignedLongTy, Context.UnsignedLongLongTy
+ };
+
+ QualType Type;
+ for (const QualType &QT : IntegralPODTypes) {
+ uint64_t Size = Context.getTypeSize(QT);
+
+ if (Size > FieldSize)
+ break;
+
+ Type = QT;
+ }
+ assert(!Type.isNull() && "Did not find a type!");
+
+ CharUnits TypeAlign = Context.getTypeAlignInChars(Type);
+
+ // We're not going to use any of the unfilled bits in the last byte.
+ UnfilledBitsInLastUnit = 0;
+ LastBitfieldTypeSize = 0;
+
+ uint64_t FieldOffset;
+ uint64_t UnpaddedFieldOffset = getDataSizeInBits() - UnfilledBitsInLastUnit;
+
+ if (IsUnion) {
+ uint64_t RoundedFieldSize = roundUpSizeToCharAlignment(FieldSize,
+ Context);
+ setDataSize(std::max(getDataSizeInBits(), RoundedFieldSize));
+ FieldOffset = 0;
+ } else {
+ // The bitfield is allocated starting at the next offset aligned
+ // appropriately for T', with length n bits.
+ FieldOffset = llvm::RoundUpToAlignment(getDataSizeInBits(),
+ Context.toBits(TypeAlign));
+
+ uint64_t NewSizeInBits = FieldOffset + FieldSize;
+
+ setDataSize(llvm::RoundUpToAlignment(NewSizeInBits,
+ Context.getTargetInfo().getCharAlign()));
+ UnfilledBitsInLastUnit = getDataSizeInBits() - NewSizeInBits;
+ }
+
+ // Place this field at the current location.
+ FieldOffsets.push_back(FieldOffset);
+
+ CheckFieldPadding(FieldOffset, UnpaddedFieldOffset, FieldOffset,
+ Context.toBits(TypeAlign), FieldPacked, D);
+
+ // Update the size.
+ setSize(std::max(getSizeInBits(), getDataSizeInBits()));
+
+ // Remember max struct/class alignment.
+ UpdateAlignment(TypeAlign);
+}
+
+void ItaniumRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
+ bool FieldPacked = Packed || D->hasAttr<PackedAttr>();
+ uint64_t FieldSize = D->getBitWidthValue(Context);
+ TypeInfo FieldInfo = Context.getTypeInfo(D->getType());
+ uint64_t TypeSize = FieldInfo.Width;
+ unsigned FieldAlign = FieldInfo.Align;
+
+ // UnfilledBitsInLastUnit is the difference between the end of the
+ // last allocated bitfield (i.e. the first bit offset available for
+ // bitfields) and the end of the current data size in bits (i.e. the
+ // first bit offset available for non-bitfields). The current data
+ // size in bits is always a multiple of the char size; additionally,
+ // for ms_struct records it's also a multiple of the
+ // LastBitfieldTypeSize (if set).
+
+ // The struct-layout algorithm is dictated by the platform ABI,
+ // which in principle could use almost any rules it likes. In
+ // practice, UNIXy targets tend to inherit the algorithm described
+ // in the System V generic ABI. The basic bitfield layout rule in
+ // System V is to place bitfields at the next available bit offset
+ // where the entire bitfield would fit in an aligned storage unit of
+ // the declared type; it's okay if an earlier or later non-bitfield
+ // is allocated in the same storage unit. However, some targets
+ // (those that !useBitFieldTypeAlignment(), e.g. ARM APCS) don't
+ // require this storage unit to be aligned, and therefore always put
+ // the bitfield at the next available bit offset.
+
+ // ms_struct basically requests a complete replacement of the
+ // platform ABI's struct-layout algorithm, with the high-level goal
+ // of duplicating MSVC's layout. For non-bitfields, this follows
+ // the standard algorithm. The basic bitfield layout rule is to
+ // allocate an entire unit of the bitfield's declared type
+ // (e.g. 'unsigned long'), then parcel it up among successive
+ // bitfields whose declared types have the same size, making a new
+ // unit as soon as the last can no longer store the whole value.
+ // Since it completely replaces the platform ABI's algorithm,
+ // settings like !useBitFieldTypeAlignment() do not apply.
+
+ // A zero-width bitfield forces the use of a new storage unit for
+ // later bitfields. In general, this occurs by rounding up the
+ // current size of the struct as if the algorithm were about to
+ // place a non-bitfield of the field's formal type. Usually this
+ // does not change the alignment of the struct itself, but it does
+ // on some targets (those that useZeroLengthBitfieldAlignment(),
+ // e.g. ARM). In ms_struct layout, zero-width bitfields are
+ // ignored unless they follow a non-zero-width bitfield.
+
+ // A field alignment restriction (e.g. from #pragma pack) or
+ // specification (e.g. from __attribute__((aligned))) changes the
+ // formal alignment of the field. For System V, this alters the
+ // required alignment of the notional storage unit that must contain
+ // the bitfield. For ms_struct, this only affects the placement of
+ // new storage units. In both cases, the effect of #pragma pack is
+ // ignored on zero-width bitfields.
+
+ // On System V, a packed field (e.g. from #pragma pack or
+ // __attribute__((packed))) always uses the next available bit
+ // offset.
+
+ // In an ms_struct struct, the alignment of a fundamental type is
+ // always equal to its size. This is necessary in order to mimic
+ // the i386 alignment rules on targets which might not fully align
+ // all types (e.g. Darwin PPC32, where alignof(long long) == 4).
+
+ // First, some simple bookkeeping to perform for ms_struct structs.
+ if (IsMsStruct) {
+ // The field alignment for integer types is always the size.
+ FieldAlign = TypeSize;
+
+ // If the previous field was not a bitfield, or was a bitfield
+ // with a different storage unit size, we're done with that
+ // storage unit.
+ if (LastBitfieldTypeSize != TypeSize) {
+ // Also, ignore zero-length bitfields after non-bitfields.
+ if (!LastBitfieldTypeSize && !FieldSize)
+ FieldAlign = 1;
+
+ UnfilledBitsInLastUnit = 0;
+ LastBitfieldTypeSize = 0;
+ }
+ }
+
+ // If the field is wider than its declared type, it follows
+ // different rules in all cases.
+ if (FieldSize > TypeSize) {
+ LayoutWideBitField(FieldSize, TypeSize, FieldPacked, D);
+ return;
+ }
+
+ // Compute the next available bit offset.
+ uint64_t FieldOffset =
+ IsUnion ? 0 : (getDataSizeInBits() - UnfilledBitsInLastUnit);
+
+ // Handle targets that don't honor bitfield type alignment.
+ if (!IsMsStruct && !Context.getTargetInfo().useBitFieldTypeAlignment()) {
+ // Some such targets do honor it on zero-width bitfields.
+ if (FieldSize == 0 &&
+ Context.getTargetInfo().useZeroLengthBitfieldAlignment()) {
+ // The alignment to round up to is the max of the field's natural
+ // alignment and a target-specific fixed value (sometimes zero).
+ unsigned ZeroLengthBitfieldBoundary =
+ Context.getTargetInfo().getZeroLengthBitfieldBoundary();
+ FieldAlign = std::max(FieldAlign, ZeroLengthBitfieldBoundary);
+
+ // If that doesn't apply, just ignore the field alignment.
+ } else {
+ FieldAlign = 1;
+ }
+ }
+
+ // Remember the alignment we would have used if the field were not packed.
+ unsigned UnpackedFieldAlign = FieldAlign;
+
+ // Ignore the field alignment if the field is packed unless it has zero-size.
+ if (!IsMsStruct && FieldPacked && FieldSize != 0)
+ FieldAlign = 1;
+
+ // But, if there's an 'aligned' attribute on the field, honor that.
+ if (unsigned ExplicitFieldAlign = D->getMaxAlignment()) {
+ FieldAlign = std::max(FieldAlign, ExplicitFieldAlign);
+ UnpackedFieldAlign = std::max(UnpackedFieldAlign, ExplicitFieldAlign);
+ }
+
+ // But, if there's a #pragma pack in play, that takes precedent over
+ // even the 'aligned' attribute, for non-zero-width bitfields.
+ if (!MaxFieldAlignment.isZero() && FieldSize) {
+ unsigned MaxFieldAlignmentInBits = Context.toBits(MaxFieldAlignment);
+ FieldAlign = std::min(FieldAlign, MaxFieldAlignmentInBits);
+ UnpackedFieldAlign = std::min(UnpackedFieldAlign, MaxFieldAlignmentInBits);
+ }
+
+ // But, ms_struct just ignores all of that in unions, even explicit
+ // alignment attributes.
+ if (IsMsStruct && IsUnion) {
+ FieldAlign = UnpackedFieldAlign = 1;
+ }
+
+ // For purposes of diagnostics, we're going to simultaneously
+ // compute the field offsets that we would have used if we weren't
+ // adding any alignment padding or if the field weren't packed.
+ uint64_t UnpaddedFieldOffset = FieldOffset;
+ uint64_t UnpackedFieldOffset = FieldOffset;
+
+ // Check if we need to add padding to fit the bitfield within an
+ // allocation unit with the right size and alignment. The rules are
+ // somewhat different here for ms_struct structs.
+ if (IsMsStruct) {
+ // If it's not a zero-width bitfield, and we can fit the bitfield
+ // into the active storage unit (and we haven't already decided to
+ // start a new storage unit), just do so, regardless of any other
+ // other consideration. Otherwise, round up to the right alignment.
+ if (FieldSize == 0 || FieldSize > UnfilledBitsInLastUnit) {
+ FieldOffset = llvm::RoundUpToAlignment(FieldOffset, FieldAlign);
+ UnpackedFieldOffset = llvm::RoundUpToAlignment(UnpackedFieldOffset,
+ UnpackedFieldAlign);
+ UnfilledBitsInLastUnit = 0;
+ }
+
+ } else {
+ // #pragma pack, with any value, suppresses the insertion of padding.
+ bool AllowPadding = MaxFieldAlignment.isZero();
+
+ // Compute the real offset.
+ if (FieldSize == 0 ||
+ (AllowPadding &&
+ (FieldOffset & (FieldAlign-1)) + FieldSize > TypeSize)) {
+ FieldOffset = llvm::RoundUpToAlignment(FieldOffset, FieldAlign);
+ }
+
+ // Repeat the computation for diagnostic purposes.
+ if (FieldSize == 0 ||
+ (AllowPadding &&
+ (UnpackedFieldOffset & (UnpackedFieldAlign-1)) + FieldSize > TypeSize))
+ UnpackedFieldOffset = llvm::RoundUpToAlignment(UnpackedFieldOffset,
+ UnpackedFieldAlign);
+ }
+
+ // If we're using external layout, give the external layout a chance
+ // to override this information.
+ if (UseExternalLayout)
+ FieldOffset = updateExternalFieldOffset(D, FieldOffset);
+
+ // Okay, place the bitfield at the calculated offset.
+ FieldOffsets.push_back(FieldOffset);
+
+ // Bookkeeping:
+
+ // Anonymous members don't affect the overall record alignment,
+ // except on targets where they do.
+ if (!IsMsStruct &&
+ !Context.getTargetInfo().useZeroLengthBitfieldAlignment() &&
+ !D->getIdentifier())
+ FieldAlign = UnpackedFieldAlign = 1;
+
+ // Diagnose differences in layout due to padding or packing.
+ if (!UseExternalLayout)
+ CheckFieldPadding(FieldOffset, UnpaddedFieldOffset, UnpackedFieldOffset,
+ UnpackedFieldAlign, FieldPacked, D);
+
+ // Update DataSize to include the last byte containing (part of) the bitfield.
+
+ // For unions, this is just a max operation, as usual.
+ if (IsUnion) {
+ // For ms_struct, allocate the entire storage unit --- unless this
+ // is a zero-width bitfield, in which case just use a size of 1.
+ uint64_t RoundedFieldSize;
+ if (IsMsStruct) {
+ RoundedFieldSize =
+ (FieldSize ? TypeSize : Context.getTargetInfo().getCharWidth());
+
+ // Otherwise, allocate just the number of bytes required to store
+ // the bitfield.
+ } else {
+ RoundedFieldSize = roundUpSizeToCharAlignment(FieldSize, Context);
+ }
+ setDataSize(std::max(getDataSizeInBits(), RoundedFieldSize));
+
+ // For non-zero-width bitfields in ms_struct structs, allocate a new
+ // storage unit if necessary.
+ } else if (IsMsStruct && FieldSize) {
+ // We should have cleared UnfilledBitsInLastUnit in every case
+ // where we changed storage units.
+ if (!UnfilledBitsInLastUnit) {
+ setDataSize(FieldOffset + TypeSize);
+ UnfilledBitsInLastUnit = TypeSize;
+ }
+ UnfilledBitsInLastUnit -= FieldSize;
+ LastBitfieldTypeSize = TypeSize;
+
+ // Otherwise, bump the data size up to include the bitfield,
+ // including padding up to char alignment, and then remember how
+ // bits we didn't use.
+ } else {
+ uint64_t NewSizeInBits = FieldOffset + FieldSize;
+ uint64_t CharAlignment = Context.getTargetInfo().getCharAlign();
+ setDataSize(llvm::RoundUpToAlignment(NewSizeInBits, CharAlignment));
+ UnfilledBitsInLastUnit = getDataSizeInBits() - NewSizeInBits;
+
+ // The only time we can get here for an ms_struct is if this is a
+ // zero-width bitfield, which doesn't count as anything for the
+ // purposes of unfilled bits.
+ LastBitfieldTypeSize = 0;
+ }
+
+ // Update the size.
+ setSize(std::max(getSizeInBits(), getDataSizeInBits()));
+
+ // Remember max struct/class alignment.
+ UpdateAlignment(Context.toCharUnitsFromBits(FieldAlign),
+ Context.toCharUnitsFromBits(UnpackedFieldAlign));
+}
+
+void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
+ bool InsertExtraPadding) {
+ if (D->isBitField()) {
+ LayoutBitField(D);
+ return;
+ }
+
+ uint64_t UnpaddedFieldOffset = getDataSizeInBits() - UnfilledBitsInLastUnit;
+
+ // Reset the unfilled bits.
+ UnfilledBitsInLastUnit = 0;
+ LastBitfieldTypeSize = 0;
+
+ bool FieldPacked = Packed || D->hasAttr<PackedAttr>();
+ CharUnits FieldOffset =
+ IsUnion ? CharUnits::Zero() : getDataSize();
+ CharUnits FieldSize;
+ CharUnits FieldAlign;
+
+ if (D->getType()->isIncompleteArrayType()) {
+ // This is a flexible array member; we can't directly
+ // query getTypeInfo about these, so we figure it out here.
+ // Flexible array members don't have any size, but they
+ // have to be aligned appropriately for their element type.
+ FieldSize = CharUnits::Zero();
+ const ArrayType* ATy = Context.getAsArrayType(D->getType());
+ FieldAlign = Context.getTypeAlignInChars(ATy->getElementType());
+ } else if (const ReferenceType *RT = D->getType()->getAs<ReferenceType>()) {
+ unsigned AS = RT->getPointeeType().getAddressSpace();
+ FieldSize =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(AS));
+ FieldAlign =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(AS));
+ } else {
+ std::pair<CharUnits, CharUnits> FieldInfo =
+ Context.getTypeInfoInChars(D->getType());
+ FieldSize = FieldInfo.first;
+ FieldAlign = FieldInfo.second;
+
+ if (IsMsStruct) {
+ // If MS bitfield layout is required, figure out what type is being
+ // laid out and align the field to the width of that type.
+
+ // Resolve all typedefs down to their base type and round up the field
+ // alignment if necessary.
+ QualType T = Context.getBaseElementType(D->getType());
+ if (const BuiltinType *BTy = T->getAs<BuiltinType>()) {
+ CharUnits TypeSize = Context.getTypeSizeInChars(BTy);
+ if (TypeSize > FieldAlign)
+ FieldAlign = TypeSize;
+ }
+ }
+ }
+
+ // The align if the field is not packed. This is to check if the attribute
+ // was unnecessary (-Wpacked).
+ CharUnits UnpackedFieldAlign = FieldAlign;
+ CharUnits UnpackedFieldOffset = FieldOffset;
+
+ if (FieldPacked)
+ FieldAlign = CharUnits::One();
+ CharUnits MaxAlignmentInChars =
+ Context.toCharUnitsFromBits(D->getMaxAlignment());
+ FieldAlign = std::max(FieldAlign, MaxAlignmentInChars);
+ UnpackedFieldAlign = std::max(UnpackedFieldAlign, MaxAlignmentInChars);
+
+ // The maximum field alignment overrides the aligned attribute.
+ if (!MaxFieldAlignment.isZero()) {
+ FieldAlign = std::min(FieldAlign, MaxFieldAlignment);
+ UnpackedFieldAlign = std::min(UnpackedFieldAlign, MaxFieldAlignment);
+ }
+
+ // Round up the current record size to the field's alignment boundary.
+ FieldOffset = FieldOffset.RoundUpToAlignment(FieldAlign);
+ UnpackedFieldOffset =
+ UnpackedFieldOffset.RoundUpToAlignment(UnpackedFieldAlign);
+
+ if (UseExternalLayout) {
+ FieldOffset = Context.toCharUnitsFromBits(
+ updateExternalFieldOffset(D, Context.toBits(FieldOffset)));
+
+ if (!IsUnion && EmptySubobjects) {
+ // Record the fact that we're placing a field at this offset.
+ bool Allowed = EmptySubobjects->CanPlaceFieldAtOffset(D, FieldOffset);
+ (void)Allowed;
+ assert(Allowed && "Externally-placed field cannot be placed here");
+ }
+ } else {
+ if (!IsUnion && EmptySubobjects) {
+ // Check if we can place the field at this offset.
+ while (!EmptySubobjects->CanPlaceFieldAtOffset(D, FieldOffset)) {
+ // We couldn't place the field at the offset. Try again at a new offset.
+ FieldOffset += FieldAlign;
+ }
+ }
+ }
+
+ // Place this field at the current location.
+ FieldOffsets.push_back(Context.toBits(FieldOffset));
+
+ if (!UseExternalLayout)
+ CheckFieldPadding(Context.toBits(FieldOffset), UnpaddedFieldOffset,
+ Context.toBits(UnpackedFieldOffset),
+ Context.toBits(UnpackedFieldAlign), FieldPacked, D);
+
+ if (InsertExtraPadding) {
+ CharUnits ASanAlignment = CharUnits::fromQuantity(8);
+ CharUnits ExtraSizeForAsan = ASanAlignment;
+ if (FieldSize % ASanAlignment)
+ ExtraSizeForAsan +=
+ ASanAlignment - CharUnits::fromQuantity(FieldSize % ASanAlignment);
+ FieldSize += ExtraSizeForAsan;
+ }
+
+ // Reserve space for this field.
+ uint64_t FieldSizeInBits = Context.toBits(FieldSize);
+ if (IsUnion)
+ setDataSize(std::max(getDataSizeInBits(), FieldSizeInBits));
+ else
+ setDataSize(FieldOffset + FieldSize);
+
+ // Update the size.
+ setSize(std::max(getSizeInBits(), getDataSizeInBits()));
+
+ // Remember max struct/class alignment.
+ UpdateAlignment(FieldAlign, UnpackedFieldAlign);
+}
+
+void ItaniumRecordLayoutBuilder::FinishLayout(const NamedDecl *D) {
+ // In C++, records cannot be of size 0.
+ if (Context.getLangOpts().CPlusPlus && getSizeInBits() == 0) {
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
+ // Compatibility with gcc requires a class (pod or non-pod)
+ // which is not empty but of size 0; such as having fields of
+ // array of zero-length, remains of Size 0
+ if (RD->isEmpty())
+ setSize(CharUnits::One());
+ }
+ else
+ setSize(CharUnits::One());
+ }
+
+ // Finally, round the size of the record up to the alignment of the
+ // record itself.
+ uint64_t UnpaddedSize = getSizeInBits() - UnfilledBitsInLastUnit;
+ uint64_t UnpackedSizeInBits =
+ llvm::RoundUpToAlignment(getSizeInBits(),
+ Context.toBits(UnpackedAlignment));
+ CharUnits UnpackedSize = Context.toCharUnitsFromBits(UnpackedSizeInBits);
+ uint64_t RoundedSize
+ = llvm::RoundUpToAlignment(getSizeInBits(), Context.toBits(Alignment));
+
+ if (UseExternalLayout) {
+ // If we're inferring alignment, and the external size is smaller than
+ // our size after we've rounded up to alignment, conservatively set the
+ // alignment to 1.
+ if (InferAlignment && External.Size < RoundedSize) {
+ Alignment = CharUnits::One();
+ InferAlignment = false;
+ }
+ setSize(External.Size);
+ return;
+ }
+
+ // Set the size to the final size.
+ setSize(RoundedSize);
+
+ unsigned CharBitNum = Context.getTargetInfo().getCharWidth();
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(D)) {
+ // Warn if padding was introduced to the struct/class/union.
+ if (getSizeInBits() > UnpaddedSize) {
+ unsigned PadSize = getSizeInBits() - UnpaddedSize;
+ bool InBits = true;
+ if (PadSize % CharBitNum == 0) {
+ PadSize = PadSize / CharBitNum;
+ InBits = false;
+ }
+ Diag(RD->getLocation(), diag::warn_padded_struct_size)
+ << Context.getTypeDeclType(RD)
+ << PadSize
+ << (InBits ? 1 : 0); // (byte|bit)
+ }
+
+ // Warn if we packed it unnecessarily. If the alignment is 1 byte don't
+ // bother since there won't be alignment issues.
+ if (Packed && UnpackedAlignment > CharUnits::One() &&
+ getSize() == UnpackedSize)
+ Diag(D->getLocation(), diag::warn_unnecessary_packed)
+ << Context.getTypeDeclType(RD);
+ }
+}
+
+void ItaniumRecordLayoutBuilder::UpdateAlignment(
+ CharUnits NewAlignment, CharUnits UnpackedNewAlignment) {
+ // The alignment is not modified when using 'mac68k' alignment or when
+ // we have an externally-supplied layout that also provides overall alignment.
+ if (IsMac68kAlign || (UseExternalLayout && !InferAlignment))
+ return;
+
+ if (NewAlignment > Alignment) {
+ assert(llvm::isPowerOf2_64(NewAlignment.getQuantity()) &&
+ "Alignment not a power of 2");
+ Alignment = NewAlignment;
+ }
+
+ if (UnpackedNewAlignment > UnpackedAlignment) {
+ assert(llvm::isPowerOf2_64(UnpackedNewAlignment.getQuantity()) &&
+ "Alignment not a power of 2");
+ UnpackedAlignment = UnpackedNewAlignment;
+ }
+}
+
+uint64_t
+ItaniumRecordLayoutBuilder::updateExternalFieldOffset(const FieldDecl *Field,
+ uint64_t ComputedOffset) {
+ uint64_t ExternalFieldOffset = External.getExternalFieldOffset(Field);
+
+ if (InferAlignment && ExternalFieldOffset < ComputedOffset) {
+ // The externally-supplied field offset is before the field offset we
+ // computed. Assume that the structure is packed.
+ Alignment = CharUnits::One();
+ InferAlignment = false;
+ }
+
+ // Use the externally-supplied field offset.
+ return ExternalFieldOffset;
+}
+
+/// \brief Get diagnostic %select index for tag kind for
+/// field padding diagnostic message.
+/// WARNING: Indexes apply to particular diagnostics only!
+///
+/// \returns diagnostic %select index.
+static unsigned getPaddingDiagFromTagKind(TagTypeKind Tag) {
+ switch (Tag) {
+ case TTK_Struct: return 0;
+ case TTK_Interface: return 1;
+ case TTK_Class: return 2;
+ default: llvm_unreachable("Invalid tag kind for field padding diagnostic!");
+ }
+}
+
+void ItaniumRecordLayoutBuilder::CheckFieldPadding(
+ uint64_t Offset, uint64_t UnpaddedOffset, uint64_t UnpackedOffset,
+ unsigned UnpackedAlign, bool isPacked, const FieldDecl *D) {
+ // We let objc ivars without warning, objc interfaces generally are not used
+ // for padding tricks.
+ if (isa<ObjCIvarDecl>(D))
+ return;
+
+ // Don't warn about structs created without a SourceLocation. This can
+ // be done by clients of the AST, such as codegen.
+ if (D->getLocation().isInvalid())
+ return;
+
+ unsigned CharBitNum = Context.getTargetInfo().getCharWidth();
+
+ // Warn if padding was introduced to the struct/class.
+ if (!IsUnion && Offset > UnpaddedOffset) {
+ unsigned PadSize = Offset - UnpaddedOffset;
+ bool InBits = true;
+ if (PadSize % CharBitNum == 0) {
+ PadSize = PadSize / CharBitNum;
+ InBits = false;
+ }
+ if (D->getIdentifier())
+ Diag(D->getLocation(), diag::warn_padded_struct_field)
+ << getPaddingDiagFromTagKind(D->getParent()->getTagKind())
+ << Context.getTypeDeclType(D->getParent())
+ << PadSize
+ << (InBits ? 1 : 0) // (byte|bit)
+ << D->getIdentifier();
+ else
+ Diag(D->getLocation(), diag::warn_padded_struct_anon_field)
+ << getPaddingDiagFromTagKind(D->getParent()->getTagKind())
+ << Context.getTypeDeclType(D->getParent())
+ << PadSize
+ << (InBits ? 1 : 0); // (byte|bit)
+ }
+
+ // Warn if we packed it unnecessarily. If the alignment is 1 byte don't
+ // bother since there won't be alignment issues.
+ if (isPacked && UnpackedAlign > CharBitNum && Offset == UnpackedOffset)
+ Diag(D->getLocation(), diag::warn_unnecessary_packed)
+ << D->getIdentifier();
+}
+
+static const CXXMethodDecl *computeKeyFunction(ASTContext &Context,
+ const CXXRecordDecl *RD) {
+ // If a class isn't polymorphic it doesn't have a key function.
+ if (!RD->isPolymorphic())
+ return nullptr;
+
+ // A class that is not externally visible doesn't have a key function. (Or
+ // at least, there's no point to assigning a key function to such a class;
+ // this doesn't affect the ABI.)
+ if (!RD->isExternallyVisible())
+ return nullptr;
+
+ // Template instantiations don't have key functions per Itanium C++ ABI 5.2.6.
+ // Same behavior as GCC.
+ TemplateSpecializationKind TSK = RD->getTemplateSpecializationKind();
+ if (TSK == TSK_ImplicitInstantiation ||
+ TSK == TSK_ExplicitInstantiationDeclaration ||
+ TSK == TSK_ExplicitInstantiationDefinition)
+ return nullptr;
+
+ bool allowInlineFunctions =
+ Context.getTargetInfo().getCXXABI().canKeyFunctionBeInline();
+
+ for (const CXXMethodDecl *MD : RD->methods()) {
+ if (!MD->isVirtual())
+ continue;
+
+ if (MD->isPure())
+ continue;
+
+ // Ignore implicit member functions, they are always marked as inline, but
+ // they don't have a body until they're defined.
+ if (MD->isImplicit())
+ continue;
+
+ if (MD->isInlineSpecified())
+ continue;
+
+ if (MD->hasInlineBody())
+ continue;
+
+ // Ignore inline deleted or defaulted functions.
+ if (!MD->isUserProvided())
+ continue;
+
+ // In certain ABIs, ignore functions with out-of-line inline definitions.
+ if (!allowInlineFunctions) {
+ const FunctionDecl *Def;
+ if (MD->hasBody(Def) && Def->isInlineSpecified())
+ continue;
+ }
+
+ if (Context.getLangOpts().CUDA) {
+ // While compiler may see key method in this TU, during CUDA
+ // compilation we should ignore methods that are not accessible
+ // on this side of compilation.
+ if (Context.getLangOpts().CUDAIsDevice) {
+ // In device mode ignore methods without __device__ attribute.
+ if (!MD->hasAttr<CUDADeviceAttr>())
+ continue;
+ } else {
+ // In host mode ignore __device__-only methods.
+ if (!MD->hasAttr<CUDAHostAttr>() && MD->hasAttr<CUDADeviceAttr>())
+ continue;
+ }
+ }
+
+ // If the key function is dllimport but the class isn't, then the class has
+ // no key function. The DLL that exports the key function won't export the
+ // vtable in this case.
+ if (MD->hasAttr<DLLImportAttr>() && !RD->hasAttr<DLLImportAttr>())
+ return nullptr;
+
+ // We found it.
+ return MD;
+ }
+
+ return nullptr;
+}
+
+DiagnosticBuilder ItaniumRecordLayoutBuilder::Diag(SourceLocation Loc,
+ unsigned DiagID) {
+ return Context.getDiagnostics().Report(Loc, DiagID);
+}
+
+/// Does the target C++ ABI require us to skip over the tail-padding
+/// of the given class (considering it as a base class) when allocating
+/// objects?
+static bool mustSkipTailPadding(TargetCXXABI ABI, const CXXRecordDecl *RD) {
+ switch (ABI.getTailPaddingUseRules()) {
+ case TargetCXXABI::AlwaysUseTailPadding:
+ return false;
+
+ case TargetCXXABI::UseTailPaddingUnlessPOD03:
+ // FIXME: To the extent that this is meant to cover the Itanium ABI
+ // rules, we should implement the restrictions about over-sized
+ // bitfields:
+ //
+ // http://mentorembedded.github.com/cxx-abi/abi.html#POD :
+ // In general, a type is considered a POD for the purposes of
+ // layout if it is a POD type (in the sense of ISO C++
+ // [basic.types]). However, a POD-struct or POD-union (in the
+ // sense of ISO C++ [class]) with a bitfield member whose
+ // declared width is wider than the declared type of the
+ // bitfield is not a POD for the purpose of layout. Similarly,
+ // an array type is not a POD for the purpose of layout if the
+ // element type of the array is not a POD for the purpose of
+ // layout.
+ //
+ // Where references to the ISO C++ are made in this paragraph,
+ // the Technical Corrigendum 1 version of the standard is
+ // intended.
+ return RD->isPOD();
+
+ case TargetCXXABI::UseTailPaddingUnlessPOD11:
+ // This is equivalent to RD->getTypeForDecl().isCXX11PODType(),
+ // but with a lot of abstraction penalty stripped off. This does
+ // assume that these properties are set correctly even in C++98
+ // mode; fortunately, that is true because we want to assign
+ // consistently semantics to the type-traits intrinsics (or at
+ // least as many of them as possible).
+ return RD->isTrivial() && RD->isStandardLayout();
+ }
+
+ llvm_unreachable("bad tail-padding use kind");
+}
+
+static bool isMsLayout(const ASTContext &Context) {
+ return Context.getTargetInfo().getCXXABI().isMicrosoft();
+}
+
+// This section contains an implementation of struct layout that is, up to the
+// included tests, compatible with cl.exe (2013). The layout produced is
+// significantly different than those produced by the Itanium ABI. Here we note
+// the most important differences.
+//
+// * The alignment of bitfields in unions is ignored when computing the
+// alignment of the union.
+// * The existence of zero-width bitfield that occurs after anything other than
+// a non-zero length bitfield is ignored.
+// * There is no explicit primary base for the purposes of layout. All bases
+// with vfptrs are laid out first, followed by all bases without vfptrs.
+// * The Itanium equivalent vtable pointers are split into a vfptr (virtual
+// function pointer) and a vbptr (virtual base pointer). They can each be
+// shared with a, non-virtual bases. These bases need not be the same. vfptrs
+// always occur at offset 0. vbptrs can occur at an arbitrary offset and are
+// placed after the lexiographically last non-virtual base. This placement
+// is always before fields but can be in the middle of the non-virtual bases
+// due to the two-pass layout scheme for non-virtual-bases.
+// * Virtual bases sometimes require a 'vtordisp' field that is laid out before
+// the virtual base and is used in conjunction with virtual overrides during
+// construction and destruction. This is always a 4 byte value and is used as
+// an alternative to constructor vtables.
+// * vtordisps are allocated in a block of memory with size and alignment equal
+// to the alignment of the completed structure (before applying __declspec(
+// align())). The vtordisp always occur at the end of the allocation block,
+// immediately prior to the virtual base.
+// * vfptrs are injected after all bases and fields have been laid out. In
+// order to guarantee proper alignment of all fields, the vfptr injection
+// pushes all bases and fields back by the alignment imposed by those bases
+// and fields. This can potentially add a significant amount of padding.
+// vfptrs are always injected at offset 0.
+// * vbptrs are injected after all bases and fields have been laid out. In
+// order to guarantee proper alignment of all fields, the vfptr injection
+// pushes all bases and fields back by the alignment imposed by those bases
+// and fields. This can potentially add a significant amount of padding.
+// vbptrs are injected immediately after the last non-virtual base as
+// lexiographically ordered in the code. If this site isn't pointer aligned
+// the vbptr is placed at the next properly aligned location. Enough padding
+// is added to guarantee a fit.
+// * The last zero sized non-virtual base can be placed at the end of the
+// struct (potentially aliasing another object), or may alias with the first
+// field, even if they are of the same type.
+// * The last zero size virtual base may be placed at the end of the struct
+// potentially aliasing another object.
+// * The ABI attempts to avoid aliasing of zero sized bases by adding padding
+// between bases or vbases with specific properties. The criteria for
+// additional padding between two bases is that the first base is zero sized
+// or ends with a zero sized subobject and the second base is zero sized or
+// trails with a zero sized base or field (sharing of vfptrs can reorder the
+// layout of the so the leading base is not always the first one declared).
+// This rule does take into account fields that are not records, so padding
+// will occur even if the last field is, e.g. an int. The padding added for
+// bases is 1 byte. The padding added between vbases depends on the alignment
+// of the object but is at least 4 bytes (in both 32 and 64 bit modes).
+// * There is no concept of non-virtual alignment, non-virtual alignment and
+// alignment are always identical.
+// * There is a distinction between alignment and required alignment.
+// __declspec(align) changes the required alignment of a struct. This
+// alignment is _always_ obeyed, even in the presence of #pragma pack. A
+// record inherits required alignment from all of its fields and bases.
+// * __declspec(align) on bitfields has the effect of changing the bitfield's
+// alignment instead of its required alignment. This is the only known way
+// to make the alignment of a struct bigger than 8. Interestingly enough
+// this alignment is also immune to the effects of #pragma pack and can be
+// used to create structures with large alignment under #pragma pack.
+// However, because it does not impact required alignment, such a structure,
+// when used as a field or base, will not be aligned if #pragma pack is
+// still active at the time of use.
+//
+// Known incompatibilities:
+// * all: #pragma pack between fields in a record
+// * 2010 and back: If the last field in a record is a bitfield, every object
+// laid out after the record will have extra padding inserted before it. The
+// extra padding will have size equal to the size of the storage class of the
+// bitfield. 0 sized bitfields don't exhibit this behavior and the extra
+// padding can be avoided by adding a 0 sized bitfield after the non-zero-
+// sized bitfield.
+// * 2012 and back: In 64-bit mode, if the alignment of a record is 16 or
+// greater due to __declspec(align()) then a second layout phase occurs after
+// The locations of the vf and vb pointers are known. This layout phase
+// suffers from the "last field is a bitfield" bug in 2010 and results in
+// _every_ field getting padding put in front of it, potentially including the
+// vfptr, leaving the vfprt at a non-zero location which results in a fault if
+// anything tries to read the vftbl. The second layout phase also treats
+// bitfields as separate entities and gives them each storage rather than
+// packing them. Additionally, because this phase appears to perform a
+// (an unstable) sort on the members before laying them out and because merged
+// bitfields have the same address, the bitfields end up in whatever order
+// the sort left them in, a behavior we could never hope to replicate.
+
+namespace {
+struct MicrosoftRecordLayoutBuilder {
+ struct ElementInfo {
+ CharUnits Size;
+ CharUnits Alignment;
+ };
+ typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits> BaseOffsetsMapTy;
+ MicrosoftRecordLayoutBuilder(const ASTContext &Context) : Context(Context) {}
+private:
+ MicrosoftRecordLayoutBuilder(const MicrosoftRecordLayoutBuilder &) = delete;
+ void operator=(const MicrosoftRecordLayoutBuilder &) = delete;
+public:
+ void layout(const RecordDecl *RD);
+ void cxxLayout(const CXXRecordDecl *RD);
+ /// \brief Initializes size and alignment and honors some flags.
+ void initializeLayout(const RecordDecl *RD);
+ /// \brief Initialized C++ layout, compute alignment and virtual alignment and
+ /// existence of vfptrs and vbptrs. Alignment is needed before the vfptr is
+ /// laid out.
+ void initializeCXXLayout(const CXXRecordDecl *RD);
+ void layoutNonVirtualBases(const CXXRecordDecl *RD);
+ void layoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
+ const ASTRecordLayout &BaseLayout,
+ const ASTRecordLayout *&PreviousBaseLayout);
+ void injectVFPtr(const CXXRecordDecl *RD);
+ void injectVBPtr(const CXXRecordDecl *RD);
+ /// \brief Lays out the fields of the record. Also rounds size up to
+ /// alignment.
+ void layoutFields(const RecordDecl *RD);
+ void layoutField(const FieldDecl *FD);
+ void layoutBitField(const FieldDecl *FD);
+ /// \brief Lays out a single zero-width bit-field in the record and handles
+ /// special cases associated with zero-width bit-fields.
+ void layoutZeroWidthBitField(const FieldDecl *FD);
+ void layoutVirtualBases(const CXXRecordDecl *RD);
+ void finalizeLayout(const RecordDecl *RD);
+ /// \brief Gets the size and alignment of a base taking pragma pack and
+ /// __declspec(align) into account.
+ ElementInfo getAdjustedElementInfo(const ASTRecordLayout &Layout);
+ /// \brief Gets the size and alignment of a field taking pragma pack and
+ /// __declspec(align) into account. It also updates RequiredAlignment as a
+ /// side effect because it is most convenient to do so here.
+ ElementInfo getAdjustedElementInfo(const FieldDecl *FD);
+ /// \brief Places a field at an offset in CharUnits.
+ void placeFieldAtOffset(CharUnits FieldOffset) {
+ FieldOffsets.push_back(Context.toBits(FieldOffset));
+ }
+ /// \brief Places a bitfield at a bit offset.
+ void placeFieldAtBitOffset(uint64_t FieldOffset) {
+ FieldOffsets.push_back(FieldOffset);
+ }
+ /// \brief Compute the set of virtual bases for which vtordisps are required.
+ void computeVtorDispSet(
+ llvm::SmallPtrSetImpl<const CXXRecordDecl *> &HasVtorDispSet,
+ const CXXRecordDecl *RD) const;
+ const ASTContext &Context;
+ /// \brief The size of the record being laid out.
+ CharUnits Size;
+ /// \brief The non-virtual size of the record layout.
+ CharUnits NonVirtualSize;
+ /// \brief The data size of the record layout.
+ CharUnits DataSize;
+ /// \brief The current alignment of the record layout.
+ CharUnits Alignment;
+ /// \brief The maximum allowed field alignment. This is set by #pragma pack.
+ CharUnits MaxFieldAlignment;
+ /// \brief The alignment that this record must obey. This is imposed by
+ /// __declspec(align()) on the record itself or one of its fields or bases.
+ CharUnits RequiredAlignment;
+ /// \brief The size of the allocation of the currently active bitfield.
+ /// This value isn't meaningful unless LastFieldIsNonZeroWidthBitfield
+ /// is true.
+ CharUnits CurrentBitfieldSize;
+ /// \brief Offset to the virtual base table pointer (if one exists).
+ CharUnits VBPtrOffset;
+ /// \brief Minimum record size possible.
+ CharUnits MinEmptyStructSize;
+ /// \brief The size and alignment info of a pointer.
+ ElementInfo PointerInfo;
+ /// \brief The primary base class (if one exists).
+ const CXXRecordDecl *PrimaryBase;
+ /// \brief The class we share our vb-pointer with.
+ const CXXRecordDecl *SharedVBPtrBase;
+ /// \brief The collection of field offsets.
+ SmallVector<uint64_t, 16> FieldOffsets;
+ /// \brief Base classes and their offsets in the record.
+ BaseOffsetsMapTy Bases;
+ /// \brief virtual base classes and their offsets in the record.
+ ASTRecordLayout::VBaseOffsetsMapTy VBases;
+ /// \brief The number of remaining bits in our last bitfield allocation.
+ /// This value isn't meaningful unless LastFieldIsNonZeroWidthBitfield is
+ /// true.
+ unsigned RemainingBitsInField;
+ bool IsUnion : 1;
+ /// \brief True if the last field laid out was a bitfield and was not 0
+ /// width.
+ bool LastFieldIsNonZeroWidthBitfield : 1;
+ /// \brief True if the class has its own vftable pointer.
+ bool HasOwnVFPtr : 1;
+ /// \brief True if the class has a vbtable pointer.
+ bool HasVBPtr : 1;
+ /// \brief True if the last sub-object within the type is zero sized or the
+ /// object itself is zero sized. This *does not* count members that are not
+ /// records. Only used for MS-ABI.
+ bool EndsWithZeroSizedObject : 1;
+ /// \brief True if this class is zero sized or first base is zero sized or
+ /// has this property. Only used for MS-ABI.
+ bool LeadsWithZeroSizedBase : 1;
+
+ /// \brief True if the external AST source provided a layout for this record.
+ bool UseExternalLayout : 1;
+
+ /// \brief The layout provided by the external AST source. Only active if
+ /// UseExternalLayout is true.
+ ExternalLayout External;
+};
+} // namespace
+
+MicrosoftRecordLayoutBuilder::ElementInfo
+MicrosoftRecordLayoutBuilder::getAdjustedElementInfo(
+ const ASTRecordLayout &Layout) {
+ ElementInfo Info;
+ Info.Alignment = Layout.getAlignment();
+ // Respect pragma pack.
+ if (!MaxFieldAlignment.isZero())
+ Info.Alignment = std::min(Info.Alignment, MaxFieldAlignment);
+ // Track zero-sized subobjects here where it's already available.
+ EndsWithZeroSizedObject = Layout.hasZeroSizedSubObject();
+ // Respect required alignment, this is necessary because we may have adjusted
+ // the alignment in the case of pragam pack. Note that the required alignment
+ // doesn't actually apply to the struct alignment at this point.
+ Alignment = std::max(Alignment, Info.Alignment);
+ RequiredAlignment = std::max(RequiredAlignment, Layout.getRequiredAlignment());
+ Info.Alignment = std::max(Info.Alignment, Layout.getRequiredAlignment());
+ Info.Size = Layout.getNonVirtualSize();
+ return Info;
+}
+
+MicrosoftRecordLayoutBuilder::ElementInfo
+MicrosoftRecordLayoutBuilder::getAdjustedElementInfo(
+ const FieldDecl *FD) {
+ // Get the alignment of the field type's natural alignment, ignore any
+ // alignment attributes.
+ ElementInfo Info;
+ std::tie(Info.Size, Info.Alignment) =
+ Context.getTypeInfoInChars(FD->getType()->getUnqualifiedDesugaredType());
+ // Respect align attributes on the field.
+ CharUnits FieldRequiredAlignment =
+ Context.toCharUnitsFromBits(FD->getMaxAlignment());
+ // Respect align attributes on the type.
+ if (Context.isAlignmentRequired(FD->getType()))
+ FieldRequiredAlignment = std::max(
+ Context.getTypeAlignInChars(FD->getType()), FieldRequiredAlignment);
+ // Respect attributes applied to subobjects of the field.
+ if (FD->isBitField())
+ // For some reason __declspec align impacts alignment rather than required
+ // alignment when it is applied to bitfields.
+ Info.Alignment = std::max(Info.Alignment, FieldRequiredAlignment);
+ else {
+ if (auto RT =
+ FD->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) {
+ auto const &Layout = Context.getASTRecordLayout(RT->getDecl());
+ EndsWithZeroSizedObject = Layout.hasZeroSizedSubObject();
+ FieldRequiredAlignment = std::max(FieldRequiredAlignment,
+ Layout.getRequiredAlignment());
+ }
+ // Capture required alignment as a side-effect.
+ RequiredAlignment = std::max(RequiredAlignment, FieldRequiredAlignment);
+ }
+ // Respect pragma pack, attribute pack and declspec align
+ if (!MaxFieldAlignment.isZero())
+ Info.Alignment = std::min(Info.Alignment, MaxFieldAlignment);
+ if (FD->hasAttr<PackedAttr>())
+ Info.Alignment = CharUnits::One();
+ Info.Alignment = std::max(Info.Alignment, FieldRequiredAlignment);
+ return Info;
+}
+
+void MicrosoftRecordLayoutBuilder::layout(const RecordDecl *RD) {
+ // For C record layout, zero-sized records always have size 4.
+ MinEmptyStructSize = CharUnits::fromQuantity(4);
+ initializeLayout(RD);
+ layoutFields(RD);
+ DataSize = Size = Size.RoundUpToAlignment(Alignment);
+ RequiredAlignment = std::max(
+ RequiredAlignment, Context.toCharUnitsFromBits(RD->getMaxAlignment()));
+ finalizeLayout(RD);
+}
+
+void MicrosoftRecordLayoutBuilder::cxxLayout(const CXXRecordDecl *RD) {
+ // The C++ standard says that empty structs have size 1.
+ MinEmptyStructSize = CharUnits::One();
+ initializeLayout(RD);
+ initializeCXXLayout(RD);
+ layoutNonVirtualBases(RD);
+ layoutFields(RD);
+ injectVBPtr(RD);
+ injectVFPtr(RD);
+ if (HasOwnVFPtr || (HasVBPtr && !SharedVBPtrBase))
+ Alignment = std::max(Alignment, PointerInfo.Alignment);
+ auto RoundingAlignment = Alignment;
+ if (!MaxFieldAlignment.isZero())
+ RoundingAlignment = std::min(RoundingAlignment, MaxFieldAlignment);
+ NonVirtualSize = Size = Size.RoundUpToAlignment(RoundingAlignment);
+ RequiredAlignment = std::max(
+ RequiredAlignment, Context.toCharUnitsFromBits(RD->getMaxAlignment()));
+ layoutVirtualBases(RD);
+ finalizeLayout(RD);
+}
+
+void MicrosoftRecordLayoutBuilder::initializeLayout(const RecordDecl *RD) {
+ IsUnion = RD->isUnion();
+ Size = CharUnits::Zero();
+ Alignment = CharUnits::One();
+ // In 64-bit mode we always perform an alignment step after laying out vbases.
+ // In 32-bit mode we do not. The check to see if we need to perform alignment
+ // checks the RequiredAlignment field and performs alignment if it isn't 0.
+ RequiredAlignment = Context.getTargetInfo().getTriple().isArch64Bit()
+ ? CharUnits::One()
+ : CharUnits::Zero();
+ // Compute the maximum field alignment.
+ MaxFieldAlignment = CharUnits::Zero();
+ // Honor the default struct packing maximum alignment flag.
+ if (unsigned DefaultMaxFieldAlignment = Context.getLangOpts().PackStruct)
+ MaxFieldAlignment = CharUnits::fromQuantity(DefaultMaxFieldAlignment);
+ // Honor the packing attribute. The MS-ABI ignores pragma pack if its larger
+ // than the pointer size.
+ if (const MaxFieldAlignmentAttr *MFAA = RD->getAttr<MaxFieldAlignmentAttr>()){
+ unsigned PackedAlignment = MFAA->getAlignment();
+ if (PackedAlignment <= Context.getTargetInfo().getPointerWidth(0))
+ MaxFieldAlignment = Context.toCharUnitsFromBits(PackedAlignment);
+ }
+ // Packed attribute forces max field alignment to be 1.
+ if (RD->hasAttr<PackedAttr>())
+ MaxFieldAlignment = CharUnits::One();
+
+ // Try to respect the external layout if present.
+ UseExternalLayout = false;
+ if (ExternalASTSource *Source = Context.getExternalSource())
+ UseExternalLayout = Source->layoutRecordType(
+ RD, External.Size, External.Align, External.FieldOffsets,
+ External.BaseOffsets, External.VirtualBaseOffsets);
+}
+
+void
+MicrosoftRecordLayoutBuilder::initializeCXXLayout(const CXXRecordDecl *RD) {
+ EndsWithZeroSizedObject = false;
+ LeadsWithZeroSizedBase = false;
+ HasOwnVFPtr = false;
+ HasVBPtr = false;
+ PrimaryBase = nullptr;
+ SharedVBPtrBase = nullptr;
+ // Calculate pointer size and alignment. These are used for vfptr and vbprt
+ // injection.
+ PointerInfo.Size =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
+ PointerInfo.Alignment =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(0));
+ // Respect pragma pack.
+ if (!MaxFieldAlignment.isZero())
+ PointerInfo.Alignment = std::min(PointerInfo.Alignment, MaxFieldAlignment);
+}
+
+void
+MicrosoftRecordLayoutBuilder::layoutNonVirtualBases(const CXXRecordDecl *RD) {
+ // The MS-ABI lays out all bases that contain leading vfptrs before it lays
+ // out any bases that do not contain vfptrs. We implement this as two passes
+ // over the bases. This approach guarantees that the primary base is laid out
+ // first. We use these passes to calculate some additional aggregated
+ // information about the bases, such as reqruied alignment and the presence of
+ // zero sized members.
+ const ASTRecordLayout *PreviousBaseLayout = nullptr;
+ // Iterate through the bases and lay out the non-virtual ones.
+ for (const CXXBaseSpecifier &Base : RD->bases()) {
+ const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
+ const ASTRecordLayout &BaseLayout = Context.getASTRecordLayout(BaseDecl);
+ // Mark and skip virtual bases.
+ if (Base.isVirtual()) {
+ HasVBPtr = true;
+ continue;
+ }
+ // Check fo a base to share a VBPtr with.
+ if (!SharedVBPtrBase && BaseLayout.hasVBPtr()) {
+ SharedVBPtrBase = BaseDecl;
+ HasVBPtr = true;
+ }
+ // Only lay out bases with extendable VFPtrs on the first pass.
+ if (!BaseLayout.hasExtendableVFPtr())
+ continue;
+ // If we don't have a primary base, this one qualifies.
+ if (!PrimaryBase) {
+ PrimaryBase = BaseDecl;
+ LeadsWithZeroSizedBase = BaseLayout.leadsWithZeroSizedBase();
+ }
+ // Lay out the base.
+ layoutNonVirtualBase(BaseDecl, BaseLayout, PreviousBaseLayout);
+ }
+ // Figure out if we need a fresh VFPtr for this class.
+ if (!PrimaryBase && RD->isDynamicClass())
+ for (CXXRecordDecl::method_iterator i = RD->method_begin(),
+ e = RD->method_end();
+ !HasOwnVFPtr && i != e; ++i)
+ HasOwnVFPtr = i->isVirtual() && i->size_overridden_methods() == 0;
+ // If we don't have a primary base then we have a leading object that could
+ // itself lead with a zero-sized object, something we track.
+ bool CheckLeadingLayout = !PrimaryBase;
+ // Iterate through the bases and lay out the non-virtual ones.
+ for (const CXXBaseSpecifier &Base : RD->bases()) {
+ if (Base.isVirtual())
+ continue;
+ const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
+ const ASTRecordLayout &BaseLayout = Context.getASTRecordLayout(BaseDecl);
+ // Only lay out bases without extendable VFPtrs on the second pass.
+ if (BaseLayout.hasExtendableVFPtr()) {
+ VBPtrOffset = Bases[BaseDecl] + BaseLayout.getNonVirtualSize();
+ continue;
+ }
+ // If this is the first layout, check to see if it leads with a zero sized
+ // object. If it does, so do we.
+ if (CheckLeadingLayout) {
+ CheckLeadingLayout = false;
+ LeadsWithZeroSizedBase = BaseLayout.leadsWithZeroSizedBase();
+ }
+ // Lay out the base.
+ layoutNonVirtualBase(BaseDecl, BaseLayout, PreviousBaseLayout);
+ VBPtrOffset = Bases[BaseDecl] + BaseLayout.getNonVirtualSize();
+ }
+ // Set our VBPtroffset if we know it at this point.
+ if (!HasVBPtr)
+ VBPtrOffset = CharUnits::fromQuantity(-1);
+ else if (SharedVBPtrBase) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(SharedVBPtrBase);
+ VBPtrOffset = Bases[SharedVBPtrBase] + Layout.getVBPtrOffset();
+ }
+}
+
+void MicrosoftRecordLayoutBuilder::layoutNonVirtualBase(
+ const CXXRecordDecl *BaseDecl,
+ const ASTRecordLayout &BaseLayout,
+ const ASTRecordLayout *&PreviousBaseLayout) {
+ // Insert padding between two bases if the left first one is zero sized or
+ // contains a zero sized subobject and the right is zero sized or one leads
+ // with a zero sized base.
+ if (PreviousBaseLayout && PreviousBaseLayout->hasZeroSizedSubObject() &&
+ BaseLayout.leadsWithZeroSizedBase())
+ Size++;
+ ElementInfo Info = getAdjustedElementInfo(BaseLayout);
+ CharUnits BaseOffset;
+
+ // Respect the external AST source base offset, if present.
+ bool FoundBase = false;
+ if (UseExternalLayout) {
+ FoundBase = External.getExternalNVBaseOffset(BaseDecl, BaseOffset);
+ if (FoundBase)
+ assert(BaseOffset >= Size && "base offset already allocated");
+ }
+
+ if (!FoundBase)
+ BaseOffset = Size.RoundUpToAlignment(Info.Alignment);
+ Bases.insert(std::make_pair(BaseDecl, BaseOffset));
+ Size = BaseOffset + BaseLayout.getNonVirtualSize();
+ PreviousBaseLayout = &BaseLayout;
+}
+
+void MicrosoftRecordLayoutBuilder::layoutFields(const RecordDecl *RD) {
+ LastFieldIsNonZeroWidthBitfield = false;
+ for (const FieldDecl *Field : RD->fields())
+ layoutField(Field);
+}
+
+void MicrosoftRecordLayoutBuilder::layoutField(const FieldDecl *FD) {
+ if (FD->isBitField()) {
+ layoutBitField(FD);
+ return;
+ }
+ LastFieldIsNonZeroWidthBitfield = false;
+ ElementInfo Info = getAdjustedElementInfo(FD);
+ Alignment = std::max(Alignment, Info.Alignment);
+ if (IsUnion) {
+ placeFieldAtOffset(CharUnits::Zero());
+ Size = std::max(Size, Info.Size);
+ } else {
+ CharUnits FieldOffset;
+ if (UseExternalLayout) {
+ FieldOffset =
+ Context.toCharUnitsFromBits(External.getExternalFieldOffset(FD));
+ assert(FieldOffset >= Size && "field offset already allocated");
+ } else {
+ FieldOffset = Size.RoundUpToAlignment(Info.Alignment);
+ }
+ placeFieldAtOffset(FieldOffset);
+ Size = FieldOffset + Info.Size;
+ }
+}
+
+void MicrosoftRecordLayoutBuilder::layoutBitField(const FieldDecl *FD) {
+ unsigned Width = FD->getBitWidthValue(Context);
+ if (Width == 0) {
+ layoutZeroWidthBitField(FD);
+ return;
+ }
+ ElementInfo Info = getAdjustedElementInfo(FD);
+ // Clamp the bitfield to a containable size for the sake of being able
+ // to lay them out. Sema will throw an error.
+ if (Width > Context.toBits(Info.Size))
+ Width = Context.toBits(Info.Size);
+ // Check to see if this bitfield fits into an existing allocation. Note:
+ // MSVC refuses to pack bitfields of formal types with different sizes
+ // into the same allocation.
+ if (!IsUnion && LastFieldIsNonZeroWidthBitfield &&
+ CurrentBitfieldSize == Info.Size && Width <= RemainingBitsInField) {
+ placeFieldAtBitOffset(Context.toBits(Size) - RemainingBitsInField);
+ RemainingBitsInField -= Width;
+ return;
+ }
+ LastFieldIsNonZeroWidthBitfield = true;
+ CurrentBitfieldSize = Info.Size;
+ if (IsUnion) {
+ placeFieldAtOffset(CharUnits::Zero());
+ Size = std::max(Size, Info.Size);
+ // TODO: Add a Sema warning that MS ignores bitfield alignment in unions.
+ } else {
+ // Allocate a new block of memory and place the bitfield in it.
+ CharUnits FieldOffset = Size.RoundUpToAlignment(Info.Alignment);
+ placeFieldAtOffset(FieldOffset);
+ Size = FieldOffset + Info.Size;
+ Alignment = std::max(Alignment, Info.Alignment);
+ RemainingBitsInField = Context.toBits(Info.Size) - Width;
+ }
+}
+
+void
+MicrosoftRecordLayoutBuilder::layoutZeroWidthBitField(const FieldDecl *FD) {
+ // Zero-width bitfields are ignored unless they follow a non-zero-width
+ // bitfield.
+ if (!LastFieldIsNonZeroWidthBitfield) {
+ placeFieldAtOffset(IsUnion ? CharUnits::Zero() : Size);
+ // TODO: Add a Sema warning that MS ignores alignment for zero
+ // sized bitfields that occur after zero-size bitfields or non-bitfields.
+ return;
+ }
+ LastFieldIsNonZeroWidthBitfield = false;
+ ElementInfo Info = getAdjustedElementInfo(FD);
+ if (IsUnion) {
+ placeFieldAtOffset(CharUnits::Zero());
+ Size = std::max(Size, Info.Size);
+ // TODO: Add a Sema warning that MS ignores bitfield alignment in unions.
+ } else {
+ // Round up the current record size to the field's alignment boundary.
+ CharUnits FieldOffset = Size.RoundUpToAlignment(Info.Alignment);
+ placeFieldAtOffset(FieldOffset);
+ Size = FieldOffset;
+ Alignment = std::max(Alignment, Info.Alignment);
+ }
+}
+
+void MicrosoftRecordLayoutBuilder::injectVBPtr(const CXXRecordDecl *RD) {
+ if (!HasVBPtr || SharedVBPtrBase)
+ return;
+ // Inject the VBPointer at the injection site.
+ CharUnits InjectionSite = VBPtrOffset;
+ // But before we do, make sure it's properly aligned.
+ VBPtrOffset = VBPtrOffset.RoundUpToAlignment(PointerInfo.Alignment);
+ // Shift everything after the vbptr down, unless we're using an external
+ // layout.
+ if (UseExternalLayout)
+ return;
+ // Determine where the first field should be laid out after the vbptr.
+ CharUnits FieldStart = VBPtrOffset + PointerInfo.Size;
+ // Make sure that the amount we push the fields back by is a multiple of the
+ // alignment.
+ CharUnits Offset = (FieldStart - InjectionSite).RoundUpToAlignment(
+ std::max(RequiredAlignment, Alignment));
+ Size += Offset;
+ for (uint64_t &FieldOffset : FieldOffsets)
+ FieldOffset += Context.toBits(Offset);
+ for (BaseOffsetsMapTy::value_type &Base : Bases)
+ if (Base.second >= InjectionSite)
+ Base.second += Offset;
+}
+
+void MicrosoftRecordLayoutBuilder::injectVFPtr(const CXXRecordDecl *RD) {
+ if (!HasOwnVFPtr)
+ return;
+ // Make sure that the amount we push the struct back by is a multiple of the
+ // alignment.
+ CharUnits Offset = PointerInfo.Size.RoundUpToAlignment(
+ std::max(RequiredAlignment, Alignment));
+ // Push back the vbptr, but increase the size of the object and push back
+ // regular fields by the offset only if not using external record layout.
+ if (HasVBPtr)
+ VBPtrOffset += Offset;
+
+ if (UseExternalLayout)
+ return;
+
+ Size += Offset;
+
+ // If we're using an external layout, the fields offsets have already
+ // accounted for this adjustment.
+ for (uint64_t &FieldOffset : FieldOffsets)
+ FieldOffset += Context.toBits(Offset);
+ for (BaseOffsetsMapTy::value_type &Base : Bases)
+ Base.second += Offset;
+}
+
+void MicrosoftRecordLayoutBuilder::layoutVirtualBases(const CXXRecordDecl *RD) {
+ if (!HasVBPtr)
+ return;
+ // Vtordisps are always 4 bytes (even in 64-bit mode)
+ CharUnits VtorDispSize = CharUnits::fromQuantity(4);
+ CharUnits VtorDispAlignment = VtorDispSize;
+ // vtordisps respect pragma pack.
+ if (!MaxFieldAlignment.isZero())
+ VtorDispAlignment = std::min(VtorDispAlignment, MaxFieldAlignment);
+ // The alignment of the vtordisp is at least the required alignment of the
+ // entire record. This requirement may be present to support vtordisp
+ // injection.
+ for (const CXXBaseSpecifier &VBase : RD->vbases()) {
+ const CXXRecordDecl *BaseDecl = VBase.getType()->getAsCXXRecordDecl();
+ const ASTRecordLayout &BaseLayout = Context.getASTRecordLayout(BaseDecl);
+ RequiredAlignment =
+ std::max(RequiredAlignment, BaseLayout.getRequiredAlignment());
+ }
+ VtorDispAlignment = std::max(VtorDispAlignment, RequiredAlignment);
+ // Compute the vtordisp set.
+ llvm::SmallPtrSet<const CXXRecordDecl *, 2> HasVtorDispSet;
+ computeVtorDispSet(HasVtorDispSet, RD);
+ // Iterate through the virtual bases and lay them out.
+ const ASTRecordLayout *PreviousBaseLayout = nullptr;
+ for (const CXXBaseSpecifier &VBase : RD->vbases()) {
+ const CXXRecordDecl *BaseDecl = VBase.getType()->getAsCXXRecordDecl();
+ const ASTRecordLayout &BaseLayout = Context.getASTRecordLayout(BaseDecl);
+ bool HasVtordisp = HasVtorDispSet.count(BaseDecl) > 0;
+ // Insert padding between two bases if the left first one is zero sized or
+ // contains a zero sized subobject and the right is zero sized or one leads
+ // with a zero sized base. The padding between virtual bases is 4
+ // bytes (in both 32 and 64 bits modes) and always involves rounding up to
+ // the required alignment, we don't know why.
+ if ((PreviousBaseLayout && PreviousBaseLayout->hasZeroSizedSubObject() &&
+ BaseLayout.leadsWithZeroSizedBase()) || HasVtordisp) {
+ Size = Size.RoundUpToAlignment(VtorDispAlignment) + VtorDispSize;
+ Alignment = std::max(VtorDispAlignment, Alignment);
+ }
+ // Insert the virtual base.
+ ElementInfo Info = getAdjustedElementInfo(BaseLayout);
+ CharUnits BaseOffset;
+
+ // Respect the external AST source base offset, if present.
+ bool FoundBase = false;
+ if (UseExternalLayout) {
+ FoundBase = External.getExternalVBaseOffset(BaseDecl, BaseOffset);
+ if (FoundBase)
+ assert(BaseOffset >= Size && "base offset already allocated");
+ }
+ if (!FoundBase)
+ BaseOffset = Size.RoundUpToAlignment(Info.Alignment);
+
+ VBases.insert(std::make_pair(BaseDecl,
+ ASTRecordLayout::VBaseInfo(BaseOffset, HasVtordisp)));
+ Size = BaseOffset + BaseLayout.getNonVirtualSize();
+ PreviousBaseLayout = &BaseLayout;
+ }
+}
+
+void MicrosoftRecordLayoutBuilder::finalizeLayout(const RecordDecl *RD) {
+ // Respect required alignment. Note that in 32-bit mode Required alignment
+ // may be 0 and cause size not to be updated.
+ DataSize = Size;
+ if (!RequiredAlignment.isZero()) {
+ Alignment = std::max(Alignment, RequiredAlignment);
+ auto RoundingAlignment = Alignment;
+ if (!MaxFieldAlignment.isZero())
+ RoundingAlignment = std::min(RoundingAlignment, MaxFieldAlignment);
+ RoundingAlignment = std::max(RoundingAlignment, RequiredAlignment);
+ Size = Size.RoundUpToAlignment(RoundingAlignment);
+ }
+ if (Size.isZero()) {
+ EndsWithZeroSizedObject = true;
+ LeadsWithZeroSizedBase = true;
+ // Zero-sized structures have size equal to their alignment if a
+ // __declspec(align) came into play.
+ if (RequiredAlignment >= MinEmptyStructSize)
+ Size = Alignment;
+ else
+ Size = MinEmptyStructSize;
+ }
+
+ if (UseExternalLayout) {
+ Size = Context.toCharUnitsFromBits(External.Size);
+ if (External.Align)
+ Alignment = Context.toCharUnitsFromBits(External.Align);
+ }
+}
+
+// Recursively walks the non-virtual bases of a class and determines if any of
+// them are in the bases with overridden methods set.
+static bool
+RequiresVtordisp(const llvm::SmallPtrSetImpl<const CXXRecordDecl *> &
+ BasesWithOverriddenMethods,
+ const CXXRecordDecl *RD) {
+ if (BasesWithOverriddenMethods.count(RD))
+ return true;
+ // If any of a virtual bases non-virtual bases (recursively) requires a
+ // vtordisp than so does this virtual base.
+ for (const CXXBaseSpecifier &Base : RD->bases())
+ if (!Base.isVirtual() &&
+ RequiresVtordisp(BasesWithOverriddenMethods,
+ Base.getType()->getAsCXXRecordDecl()))
+ return true;
+ return false;
+}
+
+void MicrosoftRecordLayoutBuilder::computeVtorDispSet(
+ llvm::SmallPtrSetImpl<const CXXRecordDecl *> &HasVtordispSet,
+ const CXXRecordDecl *RD) const {
+ // /vd2 or #pragma vtordisp(2): Always use vtordisps for virtual bases with
+ // vftables.
+ if (RD->getMSVtorDispMode() == MSVtorDispAttr::ForVFTable) {
+ for (const CXXBaseSpecifier &Base : RD->vbases()) {
+ const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(BaseDecl);
+ if (Layout.hasExtendableVFPtr())
+ HasVtordispSet.insert(BaseDecl);
+ }
+ return;
+ }
+
+ // If any of our bases need a vtordisp for this type, so do we. Check our
+ // direct bases for vtordisp requirements.
+ for (const CXXBaseSpecifier &Base : RD->bases()) {
+ const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(BaseDecl);
+ for (const auto &bi : Layout.getVBaseOffsetsMap())
+ if (bi.second.hasVtorDisp())
+ HasVtordispSet.insert(bi.first);
+ }
+ // We don't introduce any additional vtordisps if either:
+ // * A user declared constructor or destructor aren't declared.
+ // * #pragma vtordisp(0) or the /vd0 flag are in use.
+ if ((!RD->hasUserDeclaredConstructor() && !RD->hasUserDeclaredDestructor()) ||
+ RD->getMSVtorDispMode() == MSVtorDispAttr::Never)
+ return;
+ // /vd1 or #pragma vtordisp(1): Try to guess based on whether we think it's
+ // possible for a partially constructed object with virtual base overrides to
+ // escape a non-trivial constructor.
+ assert(RD->getMSVtorDispMode() == MSVtorDispAttr::ForVBaseOverride);
+ // Compute a set of base classes which define methods we override. A virtual
+ // base in this set will require a vtordisp. A virtual base that transitively
+ // contains one of these bases as a non-virtual base will also require a
+ // vtordisp.
+ llvm::SmallPtrSet<const CXXMethodDecl *, 8> Work;
+ llvm::SmallPtrSet<const CXXRecordDecl *, 2> BasesWithOverriddenMethods;
+ // Seed the working set with our non-destructor, non-pure virtual methods.
+ for (const CXXMethodDecl *MD : RD->methods())
+ if (MD->isVirtual() && !isa<CXXDestructorDecl>(MD) && !MD->isPure())
+ Work.insert(MD);
+ while (!Work.empty()) {
+ const CXXMethodDecl *MD = *Work.begin();
+ CXXMethodDecl::method_iterator i = MD->begin_overridden_methods(),
+ e = MD->end_overridden_methods();
+ // If a virtual method has no-overrides it lives in its parent's vtable.
+ if (i == e)
+ BasesWithOverriddenMethods.insert(MD->getParent());
+ else
+ Work.insert(i, e);
+ // We've finished processing this element, remove it from the working set.
+ Work.erase(MD);
+ }
+ // For each of our virtual bases, check if it is in the set of overridden
+ // bases or if it transitively contains a non-virtual base that is.
+ for (const CXXBaseSpecifier &Base : RD->vbases()) {
+ const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
+ if (!HasVtordispSet.count(BaseDecl) &&
+ RequiresVtordisp(BasesWithOverriddenMethods, BaseDecl))
+ HasVtordispSet.insert(BaseDecl);
+ }
+}
+
+/// getASTRecordLayout - Get or compute information about the layout of the
+/// specified record (struct/union/class), which indicates its size and field
+/// position information.
+const ASTRecordLayout &
+ASTContext::getASTRecordLayout(const RecordDecl *D) const {
+ // These asserts test different things. A record has a definition
+ // as soon as we begin to parse the definition. That definition is
+ // not a complete definition (which is what isDefinition() tests)
+ // until we *finish* parsing the definition.
+
+ if (D->hasExternalLexicalStorage() && !D->getDefinition())
+ getExternalSource()->CompleteType(const_cast<RecordDecl*>(D));
+
+ D = D->getDefinition();
+ assert(D && "Cannot get layout of forward declarations!");
+ assert(!D->isInvalidDecl() && "Cannot get layout of invalid decl!");
+ assert(D->isCompleteDefinition() && "Cannot layout type before complete!");
+
+ // Look up this layout, if already laid out, return what we have.
+ // Note that we can't save a reference to the entry because this function
+ // is recursive.
+ const ASTRecordLayout *Entry = ASTRecordLayouts[D];
+ if (Entry) return *Entry;
+
+ const ASTRecordLayout *NewEntry = nullptr;
+
+ if (isMsLayout(*this)) {
+ MicrosoftRecordLayoutBuilder Builder(*this);
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) {
+ Builder.cxxLayout(RD);
+ NewEntry = new (*this) ASTRecordLayout(
+ *this, Builder.Size, Builder.Alignment, Builder.RequiredAlignment,
+ Builder.HasOwnVFPtr, Builder.HasOwnVFPtr || Builder.PrimaryBase,
+ Builder.VBPtrOffset, Builder.NonVirtualSize,
+ Builder.FieldOffsets.data(), Builder.FieldOffsets.size(),
+ Builder.NonVirtualSize, Builder.Alignment, CharUnits::Zero(),
+ Builder.PrimaryBase, false, Builder.SharedVBPtrBase,
+ Builder.EndsWithZeroSizedObject, Builder.LeadsWithZeroSizedBase,
+ Builder.Bases, Builder.VBases);
+ } else {
+ Builder.layout(D);
+ NewEntry = new (*this) ASTRecordLayout(
+ *this, Builder.Size, Builder.Alignment, Builder.RequiredAlignment,
+ Builder.Size, Builder.FieldOffsets.data(),
+ Builder.FieldOffsets.size());
+ }
+ } else {
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) {
+ EmptySubobjectMap EmptySubobjects(*this, RD);
+ ItaniumRecordLayoutBuilder Builder(*this, &EmptySubobjects);
+ Builder.Layout(RD);
+
+ // In certain situations, we are allowed to lay out objects in the
+ // tail-padding of base classes. This is ABI-dependent.
+ // FIXME: this should be stored in the record layout.
+ bool skipTailPadding =
+ mustSkipTailPadding(getTargetInfo().getCXXABI(), RD);
+
+ // FIXME: This should be done in FinalizeLayout.
+ CharUnits DataSize =
+ skipTailPadding ? Builder.getSize() : Builder.getDataSize();
+ CharUnits NonVirtualSize =
+ skipTailPadding ? DataSize : Builder.NonVirtualSize;
+ NewEntry = new (*this) ASTRecordLayout(
+ *this, Builder.getSize(), Builder.Alignment,
+ /*RequiredAlignment : used by MS-ABI)*/
+ Builder.Alignment, Builder.HasOwnVFPtr, RD->isDynamicClass(),
+ CharUnits::fromQuantity(-1), DataSize, Builder.FieldOffsets.data(),
+ Builder.FieldOffsets.size(), NonVirtualSize,
+ Builder.NonVirtualAlignment,
+ EmptySubobjects.SizeOfLargestEmptySubobject, Builder.PrimaryBase,
+ Builder.PrimaryBaseIsVirtual, nullptr, false, false, Builder.Bases,
+ Builder.VBases);
+ } else {
+ ItaniumRecordLayoutBuilder Builder(*this, /*EmptySubobjects=*/nullptr);
+ Builder.Layout(D);
+
+ NewEntry = new (*this) ASTRecordLayout(
+ *this, Builder.getSize(), Builder.Alignment,
+ /*RequiredAlignment : used by MS-ABI)*/
+ Builder.Alignment, Builder.getSize(), Builder.FieldOffsets.data(),
+ Builder.FieldOffsets.size());
+ }
+ }
+
+ ASTRecordLayouts[D] = NewEntry;
+
+ if (getLangOpts().DumpRecordLayouts) {
+ llvm::outs() << "\n*** Dumping AST Record Layout\n";
+ DumpRecordLayout(D, llvm::outs(), getLangOpts().DumpRecordLayoutsSimple);
+ }
+
+ return *NewEntry;
+}
+
+const CXXMethodDecl *ASTContext::getCurrentKeyFunction(const CXXRecordDecl *RD) {
+ if (!getTargetInfo().getCXXABI().hasKeyFunctions())
+ return nullptr;
+
+ assert(RD->getDefinition() && "Cannot get key function for forward decl!");
+ RD = cast<CXXRecordDecl>(RD->getDefinition());
+
+ // Beware:
+ // 1) computing the key function might trigger deserialization, which might
+ // invalidate iterators into KeyFunctions
+ // 2) 'get' on the LazyDeclPtr might also trigger deserialization and
+ // invalidate the LazyDeclPtr within the map itself
+ LazyDeclPtr Entry = KeyFunctions[RD];
+ const Decl *Result =
+ Entry ? Entry.get(getExternalSource()) : computeKeyFunction(*this, RD);
+
+ // Store it back if it changed.
+ if (Entry.isOffset() || Entry.isValid() != bool(Result))
+ KeyFunctions[RD] = const_cast<Decl*>(Result);
+
+ return cast_or_null<CXXMethodDecl>(Result);
+}
+
+void ASTContext::setNonKeyFunction(const CXXMethodDecl *Method) {
+ assert(Method == Method->getFirstDecl() &&
+ "not working with method declaration from class definition");
+
+ // Look up the cache entry. Since we're working with the first
+ // declaration, its parent must be the class definition, which is
+ // the correct key for the KeyFunctions hash.
+ const auto &Map = KeyFunctions;
+ auto I = Map.find(Method->getParent());
+
+ // If it's not cached, there's nothing to do.
+ if (I == Map.end()) return;
+
+ // If it is cached, check whether it's the target method, and if so,
+ // remove it from the cache. Note, the call to 'get' might invalidate
+ // the iterator and the LazyDeclPtr object within the map.
+ LazyDeclPtr Ptr = I->second;
+ if (Ptr.get(getExternalSource()) == Method) {
+ // FIXME: remember that we did this for module / chained PCH state?
+ KeyFunctions.erase(Method->getParent());
+ }
+}
+
+static uint64_t getFieldOffset(const ASTContext &C, const FieldDecl *FD) {
+ const ASTRecordLayout &Layout = C.getASTRecordLayout(FD->getParent());
+ return Layout.getFieldOffset(FD->getFieldIndex());
+}
+
+uint64_t ASTContext::getFieldOffset(const ValueDecl *VD) const {
+ uint64_t OffsetInBits;
+ if (const FieldDecl *FD = dyn_cast<FieldDecl>(VD)) {
+ OffsetInBits = ::getFieldOffset(*this, FD);
+ } else {
+ const IndirectFieldDecl *IFD = cast<IndirectFieldDecl>(VD);
+
+ OffsetInBits = 0;
+ for (const NamedDecl *ND : IFD->chain())
+ OffsetInBits += ::getFieldOffset(*this, cast<FieldDecl>(ND));
+ }
+
+ return OffsetInBits;
+}
+
+/// getObjCLayout - Get or compute information about the layout of the
+/// given interface.
+///
+/// \param Impl - If given, also include the layout of the interface's
+/// implementation. This may differ by including synthesized ivars.
+const ASTRecordLayout &
+ASTContext::getObjCLayout(const ObjCInterfaceDecl *D,
+ const ObjCImplementationDecl *Impl) const {
+ // Retrieve the definition
+ if (D->hasExternalLexicalStorage() && !D->getDefinition())
+ getExternalSource()->CompleteType(const_cast<ObjCInterfaceDecl*>(D));
+ D = D->getDefinition();
+ assert(D && D->isThisDeclarationADefinition() && "Invalid interface decl!");
+
+ // Look up this layout, if already laid out, return what we have.
+ const ObjCContainerDecl *Key =
+ Impl ? (const ObjCContainerDecl*) Impl : (const ObjCContainerDecl*) D;
+ if (const ASTRecordLayout *Entry = ObjCLayouts[Key])
+ return *Entry;
+
+ // Add in synthesized ivar count if laying out an implementation.
+ if (Impl) {
+ unsigned SynthCount = CountNonClassIvars(D);
+ // If there aren't any sythesized ivars then reuse the interface
+ // entry. Note we can't cache this because we simply free all
+ // entries later; however we shouldn't look up implementations
+ // frequently.
+ if (SynthCount == 0)
+ return getObjCLayout(D, nullptr);
+ }
+
+ ItaniumRecordLayoutBuilder Builder(*this, /*EmptySubobjects=*/nullptr);
+ Builder.Layout(D);
+
+ const ASTRecordLayout *NewEntry =
+ new (*this) ASTRecordLayout(*this, Builder.getSize(),
+ Builder.Alignment,
+ /*RequiredAlignment : used by MS-ABI)*/
+ Builder.Alignment,
+ Builder.getDataSize(),
+ Builder.FieldOffsets.data(),
+ Builder.FieldOffsets.size());
+
+ ObjCLayouts[Key] = NewEntry;
+
+ return *NewEntry;
+}
+
+static void PrintOffset(raw_ostream &OS,
+ CharUnits Offset, unsigned IndentLevel) {
+ OS << llvm::format("%10" PRId64 " | ", (int64_t)Offset.getQuantity());
+ OS.indent(IndentLevel * 2);
+}
+
+static void PrintBitFieldOffset(raw_ostream &OS, CharUnits Offset,
+ unsigned Begin, unsigned Width,
+ unsigned IndentLevel) {
+ llvm::SmallString<10> Buffer;
+ {
+ llvm::raw_svector_ostream BufferOS(Buffer);
+ BufferOS << Offset.getQuantity() << ':';
+ if (Width == 0) {
+ BufferOS << '-';
+ } else {
+ BufferOS << Begin << '-' << (Begin + Width - 1);
+ }
+ }
+
+ OS << llvm::right_justify(Buffer, 10) << " | ";
+ OS.indent(IndentLevel * 2);
+}
+
+static void PrintIndentNoOffset(raw_ostream &OS, unsigned IndentLevel) {
+ OS << " | ";
+ OS.indent(IndentLevel * 2);
+}
+
+static void DumpRecordLayout(raw_ostream &OS, const RecordDecl *RD,
+ const ASTContext &C,
+ CharUnits Offset,
+ unsigned IndentLevel,
+ const char* Description,
+ bool PrintSizeInfo,
+ bool IncludeVirtualBases) {
+ const ASTRecordLayout &Layout = C.getASTRecordLayout(RD);
+ auto CXXRD = dyn_cast<CXXRecordDecl>(RD);
+
+ PrintOffset(OS, Offset, IndentLevel);
+ OS << C.getTypeDeclType(const_cast<RecordDecl*>(RD)).getAsString();
+ if (Description)
+ OS << ' ' << Description;
+ if (CXXRD && CXXRD->isEmpty())
+ OS << " (empty)";
+ OS << '\n';
+
+ IndentLevel++;
+
+ // Dump bases.
+ if (CXXRD) {
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+ bool HasOwnVFPtr = Layout.hasOwnVFPtr();
+ bool HasOwnVBPtr = Layout.hasOwnVBPtr();
+
+ // Vtable pointer.
+ if (CXXRD->isDynamicClass() && !PrimaryBase && !isMsLayout(C)) {
+ PrintOffset(OS, Offset, IndentLevel);
+ OS << '(' << *RD << " vtable pointer)\n";
+ } else if (HasOwnVFPtr) {
+ PrintOffset(OS, Offset, IndentLevel);
+ // vfptr (for Microsoft C++ ABI)
+ OS << '(' << *RD << " vftable pointer)\n";
+ }
+
+ // Collect nvbases.
+ SmallVector<const CXXRecordDecl *, 4> Bases;
+ for (const CXXBaseSpecifier &Base : CXXRD->bases()) {
+ assert(!Base.getType()->isDependentType() &&
+ "Cannot layout class with dependent bases.");
+ if (!Base.isVirtual())
+ Bases.push_back(Base.getType()->getAsCXXRecordDecl());
+ }
+
+ // Sort nvbases by offset.
+ std::stable_sort(Bases.begin(), Bases.end(),
+ [&](const CXXRecordDecl *L, const CXXRecordDecl *R) {
+ return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R);
+ });
+
+ // Dump (non-virtual) bases
+ for (const CXXRecordDecl *Base : Bases) {
+ CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(Base);
+ DumpRecordLayout(OS, Base, C, BaseOffset, IndentLevel,
+ Base == PrimaryBase ? "(primary base)" : "(base)",
+ /*PrintSizeInfo=*/false,
+ /*IncludeVirtualBases=*/false);
+ }
+
+ // vbptr (for Microsoft C++ ABI)
+ if (HasOwnVBPtr) {
+ PrintOffset(OS, Offset + Layout.getVBPtrOffset(), IndentLevel);
+ OS << '(' << *RD << " vbtable pointer)\n";
+ }
+ }
+
+ // Dump fields.
+ uint64_t FieldNo = 0;
+ for (RecordDecl::field_iterator I = RD->field_begin(),
+ E = RD->field_end(); I != E; ++I, ++FieldNo) {
+ const FieldDecl &Field = **I;
+ uint64_t LocalFieldOffsetInBits = Layout.getFieldOffset(FieldNo);
+ CharUnits FieldOffset =
+ Offset + C.toCharUnitsFromBits(LocalFieldOffsetInBits);
+
+ // Recursively dump fields of record type.
+ if (auto RT = Field.getType()->getAs<RecordType>()) {
+ DumpRecordLayout(OS, RT->getDecl(), C, FieldOffset, IndentLevel,
+ Field.getName().data(),
+ /*PrintSizeInfo=*/false,
+ /*IncludeVirtualBases=*/true);
+ continue;
+ }
+
+ if (Field.isBitField()) {
+ uint64_t LocalFieldByteOffsetInBits = C.toBits(FieldOffset - Offset);
+ unsigned Begin = LocalFieldOffsetInBits - LocalFieldByteOffsetInBits;
+ unsigned Width = Field.getBitWidthValue(C);
+ PrintBitFieldOffset(OS, FieldOffset, Begin, Width, IndentLevel);
+ } else {
+ PrintOffset(OS, FieldOffset, IndentLevel);
+ }
+ OS << Field.getType().getAsString() << ' ' << Field << '\n';
+ }
+
+ // Dump virtual bases.
+ if (CXXRD && IncludeVirtualBases) {
+ const ASTRecordLayout::VBaseOffsetsMapTy &VtorDisps =
+ Layout.getVBaseOffsetsMap();
+
+ for (const CXXBaseSpecifier &Base : CXXRD->vbases()) {
+ assert(Base.isVirtual() && "Found non-virtual class!");
+ const CXXRecordDecl *VBase = Base.getType()->getAsCXXRecordDecl();
+
+ CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBase);
+
+ if (VtorDisps.find(VBase)->second.hasVtorDisp()) {
+ PrintOffset(OS, VBaseOffset - CharUnits::fromQuantity(4), IndentLevel);
+ OS << "(vtordisp for vbase " << *VBase << ")\n";
+ }
+
+ DumpRecordLayout(OS, VBase, C, VBaseOffset, IndentLevel,
+ VBase == Layout.getPrimaryBase() ?
+ "(primary virtual base)" : "(virtual base)",
+ /*PrintSizeInfo=*/false,
+ /*IncludeVirtualBases=*/false);
+ }
+ }
+
+ if (!PrintSizeInfo) return;
+
+ PrintIndentNoOffset(OS, IndentLevel - 1);
+ OS << "[sizeof=" << Layout.getSize().getQuantity();
+ if (CXXRD && !isMsLayout(C))
+ OS << ", dsize=" << Layout.getDataSize().getQuantity();
+ OS << ", align=" << Layout.getAlignment().getQuantity();
+
+ if (CXXRD) {
+ OS << ",\n";
+ PrintIndentNoOffset(OS, IndentLevel - 1);
+ OS << " nvsize=" << Layout.getNonVirtualSize().getQuantity();
+ OS << ", nvalign=" << Layout.getNonVirtualAlignment().getQuantity();
+ }
+ OS << "]\n";
+}
+
+void ASTContext::DumpRecordLayout(const RecordDecl *RD,
+ raw_ostream &OS,
+ bool Simple) const {
+ if (!Simple) {
+ ::DumpRecordLayout(OS, RD, *this, CharUnits(), 0, nullptr,
+ /*PrintSizeInfo*/true,
+ /*IncludeVirtualBases=*/true);
+ return;
+ }
+
+ // The "simple" format is designed to be parsed by the
+ // layout-override testing code. There shouldn't be any external
+ // uses of this format --- when LLDB overrides a layout, it sets up
+ // the data structures directly --- so feel free to adjust this as
+ // you like as long as you also update the rudimentary parser for it
+ // in libFrontend.
+
+ const ASTRecordLayout &Info = getASTRecordLayout(RD);
+ OS << "Type: " << getTypeDeclType(RD).getAsString() << "\n";
+ OS << "\nLayout: ";
+ OS << "<ASTRecordLayout\n";
+ OS << " Size:" << toBits(Info.getSize()) << "\n";
+ if (!isMsLayout(*this))
+ OS << " DataSize:" << toBits(Info.getDataSize()) << "\n";
+ OS << " Alignment:" << toBits(Info.getAlignment()) << "\n";
+ OS << " FieldOffsets: [";
+ for (unsigned i = 0, e = Info.getFieldCount(); i != e; ++i) {
+ if (i) OS << ", ";
+ OS << Info.getFieldOffset(i);
+ }
+ OS << "]>\n";
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/SelectorLocationsKind.cpp b/contrib/llvm/tools/clang/lib/AST/SelectorLocationsKind.cpp
new file mode 100644
index 0000000..671207a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/SelectorLocationsKind.cpp
@@ -0,0 +1,128 @@
+//===--- SelectorLocationsKind.cpp - Kind of selector locations -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Describes whether the identifier locations for a selector are "standard"
+// or not.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/SelectorLocationsKind.h"
+#include "clang/AST/Expr.h"
+
+using namespace clang;
+
+static SourceLocation getStandardSelLoc(unsigned Index,
+ Selector Sel,
+ bool WithArgSpace,
+ SourceLocation ArgLoc,
+ SourceLocation EndLoc) {
+ unsigned NumSelArgs = Sel.getNumArgs();
+ if (NumSelArgs == 0) {
+ assert(Index == 0);
+ if (EndLoc.isInvalid())
+ return SourceLocation();
+ IdentifierInfo *II = Sel.getIdentifierInfoForSlot(0);
+ unsigned Len = II ? II->getLength() : 0;
+ return EndLoc.getLocWithOffset(-Len);
+ }
+
+ assert(Index < NumSelArgs);
+ if (ArgLoc.isInvalid())
+ return SourceLocation();
+ IdentifierInfo *II = Sel.getIdentifierInfoForSlot(Index);
+ unsigned Len = /* selector id */ (II ? II->getLength() : 0) + /* ':' */ 1;
+ if (WithArgSpace)
+ ++Len;
+ return ArgLoc.getLocWithOffset(-Len);
+}
+
+namespace {
+
+template <typename T>
+SourceLocation getArgLoc(T* Arg);
+
+template <>
+SourceLocation getArgLoc<Expr>(Expr *Arg) {
+ return Arg->getLocStart();
+}
+
+template <>
+SourceLocation getArgLoc<ParmVarDecl>(ParmVarDecl *Arg) {
+ SourceLocation Loc = Arg->getLocStart();
+ if (Loc.isInvalid())
+ return Loc;
+ // -1 to point to left paren of the method parameter's type.
+ return Loc.getLocWithOffset(-1);
+}
+
+template <typename T>
+SourceLocation getArgLoc(unsigned Index, ArrayRef<T*> Args) {
+ return Index < Args.size() ? getArgLoc(Args[Index]) : SourceLocation();
+}
+
+template <typename T>
+SelectorLocationsKind hasStandardSelLocs(Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ ArrayRef<T *> Args,
+ SourceLocation EndLoc) {
+ // Are selector locations in standard position with no space between args ?
+ unsigned i;
+ for (i = 0; i != SelLocs.size(); ++i) {
+ if (SelLocs[i] != getStandardSelectorLoc(i, Sel, /*WithArgSpace=*/false,
+ Args, EndLoc))
+ break;
+ }
+ if (i == SelLocs.size())
+ return SelLoc_StandardNoSpace;
+
+ // Are selector locations in standard position with space between args ?
+ for (i = 0; i != SelLocs.size(); ++i) {
+ if (SelLocs[i] != getStandardSelectorLoc(i, Sel, /*WithArgSpace=*/true,
+ Args, EndLoc))
+ return SelLoc_NonStandard;
+ }
+
+ return SelLoc_StandardWithSpace;
+}
+
+} // anonymous namespace
+
+SelectorLocationsKind
+clang::hasStandardSelectorLocs(Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ ArrayRef<Expr *> Args,
+ SourceLocation EndLoc) {
+ return hasStandardSelLocs(Sel, SelLocs, Args, EndLoc);
+}
+
+SourceLocation clang::getStandardSelectorLoc(unsigned Index,
+ Selector Sel,
+ bool WithArgSpace,
+ ArrayRef<Expr *> Args,
+ SourceLocation EndLoc) {
+ return getStandardSelLoc(Index, Sel, WithArgSpace,
+ getArgLoc(Index, Args), EndLoc);
+}
+
+SelectorLocationsKind
+clang::hasStandardSelectorLocs(Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ ArrayRef<ParmVarDecl *> Args,
+ SourceLocation EndLoc) {
+ return hasStandardSelLocs(Sel, SelLocs, Args, EndLoc);
+}
+
+SourceLocation clang::getStandardSelectorLoc(unsigned Index,
+ Selector Sel,
+ bool WithArgSpace,
+ ArrayRef<ParmVarDecl *> Args,
+ SourceLocation EndLoc) {
+ return getStandardSelLoc(Index, Sel, WithArgSpace,
+ getArgLoc(Index, Args), EndLoc);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/Stmt.cpp b/contrib/llvm/tools/clang/lib/AST/Stmt.cpp
new file mode 100644
index 0000000..ca63d84
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/Stmt.cpp
@@ -0,0 +1,1110 @@
+//===--- Stmt.cpp - Statement AST Node Implementation ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Stmt class and statement subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTDiagnostic.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ExprOpenMP.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/AST/StmtOpenMP.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/Token.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+static struct StmtClassNameTable {
+ const char *Name;
+ unsigned Counter;
+ unsigned Size;
+} StmtClassInfo[Stmt::lastStmtConstant+1];
+
+static StmtClassNameTable &getStmtInfoTableEntry(Stmt::StmtClass E) {
+ static bool Initialized = false;
+ if (Initialized)
+ return StmtClassInfo[E];
+
+ // Intialize the table on the first use.
+ Initialized = true;
+#define ABSTRACT_STMT(STMT)
+#define STMT(CLASS, PARENT) \
+ StmtClassInfo[(unsigned)Stmt::CLASS##Class].Name = #CLASS; \
+ StmtClassInfo[(unsigned)Stmt::CLASS##Class].Size = sizeof(CLASS);
+#include "clang/AST/StmtNodes.inc"
+
+ return StmtClassInfo[E];
+}
+
+void *Stmt::operator new(size_t bytes, const ASTContext& C,
+ unsigned alignment) {
+ return ::operator new(bytes, C, alignment);
+}
+
+const char *Stmt::getStmtClassName() const {
+ return getStmtInfoTableEntry((StmtClass) StmtBits.sClass).Name;
+}
+
+void Stmt::PrintStats() {
+ // Ensure the table is primed.
+ getStmtInfoTableEntry(Stmt::NullStmtClass);
+
+ unsigned sum = 0;
+ llvm::errs() << "\n*** Stmt/Expr Stats:\n";
+ for (int i = 0; i != Stmt::lastStmtConstant+1; i++) {
+ if (StmtClassInfo[i].Name == nullptr) continue;
+ sum += StmtClassInfo[i].Counter;
+ }
+ llvm::errs() << " " << sum << " stmts/exprs total.\n";
+ sum = 0;
+ for (int i = 0; i != Stmt::lastStmtConstant+1; i++) {
+ if (StmtClassInfo[i].Name == nullptr) continue;
+ if (StmtClassInfo[i].Counter == 0) continue;
+ llvm::errs() << " " << StmtClassInfo[i].Counter << " "
+ << StmtClassInfo[i].Name << ", " << StmtClassInfo[i].Size
+ << " each (" << StmtClassInfo[i].Counter*StmtClassInfo[i].Size
+ << " bytes)\n";
+ sum += StmtClassInfo[i].Counter*StmtClassInfo[i].Size;
+ }
+
+ llvm::errs() << "Total bytes = " << sum << "\n";
+}
+
+void Stmt::addStmtClass(StmtClass s) {
+ ++getStmtInfoTableEntry(s).Counter;
+}
+
+bool Stmt::StatisticsEnabled = false;
+void Stmt::EnableStatistics() {
+ StatisticsEnabled = true;
+}
+
+Stmt *Stmt::IgnoreImplicit() {
+ Stmt *s = this;
+
+ if (auto *ewc = dyn_cast<ExprWithCleanups>(s))
+ s = ewc->getSubExpr();
+
+ if (auto *mte = dyn_cast<MaterializeTemporaryExpr>(s))
+ s = mte->GetTemporaryExpr();
+
+ if (auto *bte = dyn_cast<CXXBindTemporaryExpr>(s))
+ s = bte->getSubExpr();
+
+ while (auto *ice = dyn_cast<ImplicitCastExpr>(s))
+ s = ice->getSubExpr();
+
+ return s;
+}
+
+/// \brief Skip no-op (attributed, compound) container stmts and skip captured
+/// stmt at the top, if \a IgnoreCaptured is true.
+Stmt *Stmt::IgnoreContainers(bool IgnoreCaptured) {
+ Stmt *S = this;
+ if (IgnoreCaptured)
+ if (auto CapS = dyn_cast_or_null<CapturedStmt>(S))
+ S = CapS->getCapturedStmt();
+ while (true) {
+ if (auto AS = dyn_cast_or_null<AttributedStmt>(S))
+ S = AS->getSubStmt();
+ else if (auto CS = dyn_cast_or_null<CompoundStmt>(S)) {
+ if (CS->size() != 1)
+ break;
+ S = CS->body_back();
+ } else
+ break;
+ }
+ return S;
+}
+
+/// \brief Strip off all label-like statements.
+///
+/// This will strip off label statements, case statements, attributed
+/// statements and default statements recursively.
+const Stmt *Stmt::stripLabelLikeStatements() const {
+ const Stmt *S = this;
+ while (true) {
+ if (const LabelStmt *LS = dyn_cast<LabelStmt>(S))
+ S = LS->getSubStmt();
+ else if (const SwitchCase *SC = dyn_cast<SwitchCase>(S))
+ S = SC->getSubStmt();
+ else if (const AttributedStmt *AS = dyn_cast<AttributedStmt>(S))
+ S = AS->getSubStmt();
+ else
+ return S;
+ }
+}
+
+namespace {
+ struct good {};
+ struct bad {};
+
+ // These silly little functions have to be static inline to suppress
+ // unused warnings, and they have to be defined to suppress other
+ // warnings.
+ static inline good is_good(good) { return good(); }
+
+ typedef Stmt::child_range children_t();
+ template <class T> good implements_children(children_t T::*) {
+ return good();
+ }
+ LLVM_ATTRIBUTE_UNUSED
+ static inline bad implements_children(children_t Stmt::*) {
+ return bad();
+ }
+
+ typedef SourceLocation getLocStart_t() const;
+ template <class T> good implements_getLocStart(getLocStart_t T::*) {
+ return good();
+ }
+ LLVM_ATTRIBUTE_UNUSED
+ static inline bad implements_getLocStart(getLocStart_t Stmt::*) {
+ return bad();
+ }
+
+ typedef SourceLocation getLocEnd_t() const;
+ template <class T> good implements_getLocEnd(getLocEnd_t T::*) {
+ return good();
+ }
+ LLVM_ATTRIBUTE_UNUSED
+ static inline bad implements_getLocEnd(getLocEnd_t Stmt::*) {
+ return bad();
+ }
+
+#define ASSERT_IMPLEMENTS_children(type) \
+ (void) is_good(implements_children(&type::children))
+#define ASSERT_IMPLEMENTS_getLocStart(type) \
+ (void) is_good(implements_getLocStart(&type::getLocStart))
+#define ASSERT_IMPLEMENTS_getLocEnd(type) \
+ (void) is_good(implements_getLocEnd(&type::getLocEnd))
+}
+
+/// Check whether the various Stmt classes implement their member
+/// functions.
+LLVM_ATTRIBUTE_UNUSED
+static inline void check_implementations() {
+#define ABSTRACT_STMT(type)
+#define STMT(type, base) \
+ ASSERT_IMPLEMENTS_children(type); \
+ ASSERT_IMPLEMENTS_getLocStart(type); \
+ ASSERT_IMPLEMENTS_getLocEnd(type);
+#include "clang/AST/StmtNodes.inc"
+}
+
+Stmt::child_range Stmt::children() {
+ switch (getStmtClass()) {
+ case Stmt::NoStmtClass: llvm_unreachable("statement without class");
+#define ABSTRACT_STMT(type)
+#define STMT(type, base) \
+ case Stmt::type##Class: \
+ return static_cast<type*>(this)->children();
+#include "clang/AST/StmtNodes.inc"
+ }
+ llvm_unreachable("unknown statement kind!");
+}
+
+// Amusing macro metaprogramming hack: check whether a class provides
+// a more specific implementation of getSourceRange.
+//
+// See also Expr.cpp:getExprLoc().
+namespace {
+ /// This implementation is used when a class provides a custom
+ /// implementation of getSourceRange.
+ template <class S, class T>
+ SourceRange getSourceRangeImpl(const Stmt *stmt,
+ SourceRange (T::*v)() const) {
+ return static_cast<const S*>(stmt)->getSourceRange();
+ }
+
+ /// This implementation is used when a class doesn't provide a custom
+ /// implementation of getSourceRange. Overload resolution should pick it over
+ /// the implementation above because it's more specialized according to
+ /// function template partial ordering.
+ template <class S>
+ SourceRange getSourceRangeImpl(const Stmt *stmt,
+ SourceRange (Stmt::*v)() const) {
+ return SourceRange(static_cast<const S*>(stmt)->getLocStart(),
+ static_cast<const S*>(stmt)->getLocEnd());
+ }
+}
+
+SourceRange Stmt::getSourceRange() const {
+ switch (getStmtClass()) {
+ case Stmt::NoStmtClass: llvm_unreachable("statement without class");
+#define ABSTRACT_STMT(type)
+#define STMT(type, base) \
+ case Stmt::type##Class: \
+ return getSourceRangeImpl<type>(this, &type::getSourceRange);
+#include "clang/AST/StmtNodes.inc"
+ }
+ llvm_unreachable("unknown statement kind!");
+}
+
+SourceLocation Stmt::getLocStart() const {
+// llvm::errs() << "getLocStart() for " << getStmtClassName() << "\n";
+ switch (getStmtClass()) {
+ case Stmt::NoStmtClass: llvm_unreachable("statement without class");
+#define ABSTRACT_STMT(type)
+#define STMT(type, base) \
+ case Stmt::type##Class: \
+ return static_cast<const type*>(this)->getLocStart();
+#include "clang/AST/StmtNodes.inc"
+ }
+ llvm_unreachable("unknown statement kind");
+}
+
+SourceLocation Stmt::getLocEnd() const {
+ switch (getStmtClass()) {
+ case Stmt::NoStmtClass: llvm_unreachable("statement without class");
+#define ABSTRACT_STMT(type)
+#define STMT(type, base) \
+ case Stmt::type##Class: \
+ return static_cast<const type*>(this)->getLocEnd();
+#include "clang/AST/StmtNodes.inc"
+ }
+ llvm_unreachable("unknown statement kind");
+}
+
+CompoundStmt::CompoundStmt(const ASTContext &C, ArrayRef<Stmt*> Stmts,
+ SourceLocation LB, SourceLocation RB)
+ : Stmt(CompoundStmtClass), LBraceLoc(LB), RBraceLoc(RB) {
+ CompoundStmtBits.NumStmts = Stmts.size();
+ assert(CompoundStmtBits.NumStmts == Stmts.size() &&
+ "NumStmts doesn't fit in bits of CompoundStmtBits.NumStmts!");
+
+ if (Stmts.size() == 0) {
+ Body = nullptr;
+ return;
+ }
+
+ Body = new (C) Stmt*[Stmts.size()];
+ std::copy(Stmts.begin(), Stmts.end(), Body);
+}
+
+void CompoundStmt::setStmts(const ASTContext &C, ArrayRef<Stmt *> Stmts) {
+ if (Body)
+ C.Deallocate(Body);
+ CompoundStmtBits.NumStmts = Stmts.size();
+ assert(CompoundStmtBits.NumStmts == Stmts.size() &&
+ "NumStmts doesn't fit in bits of CompoundStmtBits.NumStmts!");
+
+ Body = new (C) Stmt*[Stmts.size()];
+ std::copy(Stmts.begin(), Stmts.end(), Body);
+}
+
+const char *LabelStmt::getName() const {
+ return getDecl()->getIdentifier()->getNameStart();
+}
+
+AttributedStmt *AttributedStmt::Create(const ASTContext &C, SourceLocation Loc,
+ ArrayRef<const Attr*> Attrs,
+ Stmt *SubStmt) {
+ assert(!Attrs.empty() && "Attrs should not be empty");
+ void *Mem = C.Allocate(sizeof(AttributedStmt) + sizeof(Attr *) * Attrs.size(),
+ llvm::alignOf<AttributedStmt>());
+ return new (Mem) AttributedStmt(Loc, Attrs, SubStmt);
+}
+
+AttributedStmt *AttributedStmt::CreateEmpty(const ASTContext &C,
+ unsigned NumAttrs) {
+ assert(NumAttrs > 0 && "NumAttrs should be greater than zero");
+ void *Mem = C.Allocate(sizeof(AttributedStmt) + sizeof(Attr *) * NumAttrs,
+ llvm::alignOf<AttributedStmt>());
+ return new (Mem) AttributedStmt(EmptyShell(), NumAttrs);
+}
+
+std::string AsmStmt::generateAsmString(const ASTContext &C) const {
+ if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
+ return gccAsmStmt->generateAsmString(C);
+ if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this))
+ return msAsmStmt->generateAsmString(C);
+ llvm_unreachable("unknown asm statement kind!");
+}
+
+StringRef AsmStmt::getOutputConstraint(unsigned i) const {
+ if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
+ return gccAsmStmt->getOutputConstraint(i);
+ if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this))
+ return msAsmStmt->getOutputConstraint(i);
+ llvm_unreachable("unknown asm statement kind!");
+}
+
+const Expr *AsmStmt::getOutputExpr(unsigned i) const {
+ if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
+ return gccAsmStmt->getOutputExpr(i);
+ if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this))
+ return msAsmStmt->getOutputExpr(i);
+ llvm_unreachable("unknown asm statement kind!");
+}
+
+StringRef AsmStmt::getInputConstraint(unsigned i) const {
+ if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
+ return gccAsmStmt->getInputConstraint(i);
+ if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this))
+ return msAsmStmt->getInputConstraint(i);
+ llvm_unreachable("unknown asm statement kind!");
+}
+
+const Expr *AsmStmt::getInputExpr(unsigned i) const {
+ if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
+ return gccAsmStmt->getInputExpr(i);
+ if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this))
+ return msAsmStmt->getInputExpr(i);
+ llvm_unreachable("unknown asm statement kind!");
+}
+
+StringRef AsmStmt::getClobber(unsigned i) const {
+ if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
+ return gccAsmStmt->getClobber(i);
+ if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this))
+ return msAsmStmt->getClobber(i);
+ llvm_unreachable("unknown asm statement kind!");
+}
+
+/// getNumPlusOperands - Return the number of output operands that have a "+"
+/// constraint.
+unsigned AsmStmt::getNumPlusOperands() const {
+ unsigned Res = 0;
+ for (unsigned i = 0, e = getNumOutputs(); i != e; ++i)
+ if (isOutputPlusConstraint(i))
+ ++Res;
+ return Res;
+}
+
+char GCCAsmStmt::AsmStringPiece::getModifier() const {
+ assert(isOperand() && "Only Operands can have modifiers.");
+ return isLetter(Str[0]) ? Str[0] : '\0';
+}
+
+StringRef GCCAsmStmt::getClobber(unsigned i) const {
+ return getClobberStringLiteral(i)->getString();
+}
+
+Expr *GCCAsmStmt::getOutputExpr(unsigned i) {
+ return cast<Expr>(Exprs[i]);
+}
+
+/// getOutputConstraint - Return the constraint string for the specified
+/// output operand. All output constraints are known to be non-empty (either
+/// '=' or '+').
+StringRef GCCAsmStmt::getOutputConstraint(unsigned i) const {
+ return getOutputConstraintLiteral(i)->getString();
+}
+
+Expr *GCCAsmStmt::getInputExpr(unsigned i) {
+ return cast<Expr>(Exprs[i + NumOutputs]);
+}
+void GCCAsmStmt::setInputExpr(unsigned i, Expr *E) {
+ Exprs[i + NumOutputs] = E;
+}
+
+/// getInputConstraint - Return the specified input constraint. Unlike output
+/// constraints, these can be empty.
+StringRef GCCAsmStmt::getInputConstraint(unsigned i) const {
+ return getInputConstraintLiteral(i)->getString();
+}
+
+void GCCAsmStmt::setOutputsAndInputsAndClobbers(const ASTContext &C,
+ IdentifierInfo **Names,
+ StringLiteral **Constraints,
+ Stmt **Exprs,
+ unsigned NumOutputs,
+ unsigned NumInputs,
+ StringLiteral **Clobbers,
+ unsigned NumClobbers) {
+ this->NumOutputs = NumOutputs;
+ this->NumInputs = NumInputs;
+ this->NumClobbers = NumClobbers;
+
+ unsigned NumExprs = NumOutputs + NumInputs;
+
+ C.Deallocate(this->Names);
+ this->Names = new (C) IdentifierInfo*[NumExprs];
+ std::copy(Names, Names + NumExprs, this->Names);
+
+ C.Deallocate(this->Exprs);
+ this->Exprs = new (C) Stmt*[NumExprs];
+ std::copy(Exprs, Exprs + NumExprs, this->Exprs);
+
+ C.Deallocate(this->Constraints);
+ this->Constraints = new (C) StringLiteral*[NumExprs];
+ std::copy(Constraints, Constraints + NumExprs, this->Constraints);
+
+ C.Deallocate(this->Clobbers);
+ this->Clobbers = new (C) StringLiteral*[NumClobbers];
+ std::copy(Clobbers, Clobbers + NumClobbers, this->Clobbers);
+}
+
+/// getNamedOperand - Given a symbolic operand reference like %[foo],
+/// translate this into a numeric value needed to reference the same operand.
+/// This returns -1 if the operand name is invalid.
+int GCCAsmStmt::getNamedOperand(StringRef SymbolicName) const {
+ unsigned NumPlusOperands = 0;
+
+ // Check if this is an output operand.
+ for (unsigned i = 0, e = getNumOutputs(); i != e; ++i) {
+ if (getOutputName(i) == SymbolicName)
+ return i;
+ }
+
+ for (unsigned i = 0, e = getNumInputs(); i != e; ++i)
+ if (getInputName(i) == SymbolicName)
+ return getNumOutputs() + NumPlusOperands + i;
+
+ // Not found.
+ return -1;
+}
+
+/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
+/// it into pieces. If the asm string is erroneous, emit errors and return
+/// true, otherwise return false.
+unsigned GCCAsmStmt::AnalyzeAsmString(SmallVectorImpl<AsmStringPiece>&Pieces,
+ const ASTContext &C, unsigned &DiagOffs) const {
+ StringRef Str = getAsmString()->getString();
+ const char *StrStart = Str.begin();
+ const char *StrEnd = Str.end();
+ const char *CurPtr = StrStart;
+
+ // "Simple" inline asms have no constraints or operands, just convert the asm
+ // string to escape $'s.
+ if (isSimple()) {
+ std::string Result;
+ for (; CurPtr != StrEnd; ++CurPtr) {
+ switch (*CurPtr) {
+ case '$':
+ Result += "$$";
+ break;
+ default:
+ Result += *CurPtr;
+ break;
+ }
+ }
+ Pieces.push_back(AsmStringPiece(Result));
+ return 0;
+ }
+
+ // CurStringPiece - The current string that we are building up as we scan the
+ // asm string.
+ std::string CurStringPiece;
+
+ bool HasVariants = !C.getTargetInfo().hasNoAsmVariants();
+
+ while (1) {
+ // Done with the string?
+ if (CurPtr == StrEnd) {
+ if (!CurStringPiece.empty())
+ Pieces.push_back(AsmStringPiece(CurStringPiece));
+ return 0;
+ }
+
+ char CurChar = *CurPtr++;
+ switch (CurChar) {
+ case '$': CurStringPiece += "$$"; continue;
+ case '{': CurStringPiece += (HasVariants ? "$(" : "{"); continue;
+ case '|': CurStringPiece += (HasVariants ? "$|" : "|"); continue;
+ case '}': CurStringPiece += (HasVariants ? "$)" : "}"); continue;
+ case '%':
+ break;
+ default:
+ CurStringPiece += CurChar;
+ continue;
+ }
+
+ // Escaped "%" character in asm string.
+ if (CurPtr == StrEnd) {
+ // % at end of string is invalid (no escape).
+ DiagOffs = CurPtr-StrStart-1;
+ return diag::err_asm_invalid_escape;
+ }
+
+ char EscapedChar = *CurPtr++;
+ if (EscapedChar == '%') { // %% -> %
+ // Escaped percentage sign.
+ CurStringPiece += '%';
+ continue;
+ }
+
+ if (EscapedChar == '=') { // %= -> Generate an unique ID.
+ CurStringPiece += "${:uid}";
+ continue;
+ }
+
+ // Otherwise, we have an operand. If we have accumulated a string so far,
+ // add it to the Pieces list.
+ if (!CurStringPiece.empty()) {
+ Pieces.push_back(AsmStringPiece(CurStringPiece));
+ CurStringPiece.clear();
+ }
+
+ // Handle operands that have asmSymbolicName (e.g., %x[foo]) and those that
+ // don't (e.g., %x4). 'x' following the '%' is the constraint modifier.
+
+ const char *Begin = CurPtr - 1; // Points to the character following '%'.
+ const char *Percent = Begin - 1; // Points to '%'.
+
+ if (isLetter(EscapedChar)) {
+ if (CurPtr == StrEnd) { // Premature end.
+ DiagOffs = CurPtr-StrStart-1;
+ return diag::err_asm_invalid_escape;
+ }
+ EscapedChar = *CurPtr++;
+ }
+
+ const TargetInfo &TI = C.getTargetInfo();
+ const SourceManager &SM = C.getSourceManager();
+ const LangOptions &LO = C.getLangOpts();
+
+ // Handle operands that don't have asmSymbolicName (e.g., %x4).
+ if (isDigit(EscapedChar)) {
+ // %n - Assembler operand n
+ unsigned N = 0;
+
+ --CurPtr;
+ while (CurPtr != StrEnd && isDigit(*CurPtr))
+ N = N*10 + ((*CurPtr++)-'0');
+
+ unsigned NumOperands =
+ getNumOutputs() + getNumPlusOperands() + getNumInputs();
+ if (N >= NumOperands) {
+ DiagOffs = CurPtr-StrStart-1;
+ return diag::err_asm_invalid_operand_number;
+ }
+
+ // Str contains "x4" (Operand without the leading %).
+ std::string Str(Begin, CurPtr - Begin);
+
+ // (BeginLoc, EndLoc) represents the range of the operand we are currently
+ // processing. Unlike Str, the range includes the leading '%'.
+ SourceLocation BeginLoc =
+ getAsmString()->getLocationOfByte(Percent - StrStart, SM, LO, TI);
+ SourceLocation EndLoc =
+ getAsmString()->getLocationOfByte(CurPtr - StrStart, SM, LO, TI);
+
+ Pieces.emplace_back(N, std::move(Str), BeginLoc, EndLoc);
+ continue;
+ }
+
+ // Handle operands that have asmSymbolicName (e.g., %x[foo]).
+ if (EscapedChar == '[') {
+ DiagOffs = CurPtr-StrStart-1;
+
+ // Find the ']'.
+ const char *NameEnd = (const char*)memchr(CurPtr, ']', StrEnd-CurPtr);
+ if (NameEnd == nullptr)
+ return diag::err_asm_unterminated_symbolic_operand_name;
+ if (NameEnd == CurPtr)
+ return diag::err_asm_empty_symbolic_operand_name;
+
+ StringRef SymbolicName(CurPtr, NameEnd - CurPtr);
+
+ int N = getNamedOperand(SymbolicName);
+ if (N == -1) {
+ // Verify that an operand with that name exists.
+ DiagOffs = CurPtr-StrStart;
+ return diag::err_asm_unknown_symbolic_operand_name;
+ }
+
+ // Str contains "x[foo]" (Operand without the leading %).
+ std::string Str(Begin, NameEnd + 1 - Begin);
+
+ // (BeginLoc, EndLoc) represents the range of the operand we are currently
+ // processing. Unlike Str, the range includes the leading '%'.
+ SourceLocation BeginLoc =
+ getAsmString()->getLocationOfByte(Percent - StrStart, SM, LO, TI);
+ SourceLocation EndLoc =
+ getAsmString()->getLocationOfByte(NameEnd + 1 - StrStart, SM, LO, TI);
+
+ Pieces.emplace_back(N, std::move(Str), BeginLoc, EndLoc);
+
+ CurPtr = NameEnd+1;
+ continue;
+ }
+
+ DiagOffs = CurPtr-StrStart-1;
+ return diag::err_asm_invalid_escape;
+ }
+}
+
+/// Assemble final IR asm string (GCC-style).
+std::string GCCAsmStmt::generateAsmString(const ASTContext &C) const {
+ // Analyze the asm string to decompose it into its pieces. We know that Sema
+ // has already done this, so it is guaranteed to be successful.
+ SmallVector<GCCAsmStmt::AsmStringPiece, 4> Pieces;
+ unsigned DiagOffs;
+ AnalyzeAsmString(Pieces, C, DiagOffs);
+
+ std::string AsmString;
+ for (unsigned i = 0, e = Pieces.size(); i != e; ++i) {
+ if (Pieces[i].isString())
+ AsmString += Pieces[i].getString();
+ else if (Pieces[i].getModifier() == '\0')
+ AsmString += '$' + llvm::utostr(Pieces[i].getOperandNo());
+ else
+ AsmString += "${" + llvm::utostr(Pieces[i].getOperandNo()) + ':' +
+ Pieces[i].getModifier() + '}';
+ }
+ return AsmString;
+}
+
+/// Assemble final IR asm string (MS-style).
+std::string MSAsmStmt::generateAsmString(const ASTContext &C) const {
+ // FIXME: This needs to be translated into the IR string representation.
+ return AsmStr;
+}
+
+Expr *MSAsmStmt::getOutputExpr(unsigned i) {
+ return cast<Expr>(Exprs[i]);
+}
+
+Expr *MSAsmStmt::getInputExpr(unsigned i) {
+ return cast<Expr>(Exprs[i + NumOutputs]);
+}
+void MSAsmStmt::setInputExpr(unsigned i, Expr *E) {
+ Exprs[i + NumOutputs] = E;
+}
+
+//===----------------------------------------------------------------------===//
+// Constructors
+//===----------------------------------------------------------------------===//
+
+GCCAsmStmt::GCCAsmStmt(const ASTContext &C, SourceLocation asmloc,
+ bool issimple, bool isvolatile, unsigned numoutputs,
+ unsigned numinputs, IdentifierInfo **names,
+ StringLiteral **constraints, Expr **exprs,
+ StringLiteral *asmstr, unsigned numclobbers,
+ StringLiteral **clobbers, SourceLocation rparenloc)
+ : AsmStmt(GCCAsmStmtClass, asmloc, issimple, isvolatile, numoutputs,
+ numinputs, numclobbers), RParenLoc(rparenloc), AsmStr(asmstr) {
+
+ unsigned NumExprs = NumOutputs + NumInputs;
+
+ Names = new (C) IdentifierInfo*[NumExprs];
+ std::copy(names, names + NumExprs, Names);
+
+ Exprs = new (C) Stmt*[NumExprs];
+ std::copy(exprs, exprs + NumExprs, Exprs);
+
+ Constraints = new (C) StringLiteral*[NumExprs];
+ std::copy(constraints, constraints + NumExprs, Constraints);
+
+ Clobbers = new (C) StringLiteral*[NumClobbers];
+ std::copy(clobbers, clobbers + NumClobbers, Clobbers);
+}
+
+MSAsmStmt::MSAsmStmt(const ASTContext &C, SourceLocation asmloc,
+ SourceLocation lbraceloc, bool issimple, bool isvolatile,
+ ArrayRef<Token> asmtoks, unsigned numoutputs,
+ unsigned numinputs,
+ ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs,
+ StringRef asmstr, ArrayRef<StringRef> clobbers,
+ SourceLocation endloc)
+ : AsmStmt(MSAsmStmtClass, asmloc, issimple, isvolatile, numoutputs,
+ numinputs, clobbers.size()), LBraceLoc(lbraceloc),
+ EndLoc(endloc), NumAsmToks(asmtoks.size()) {
+
+ initialize(C, asmstr, asmtoks, constraints, exprs, clobbers);
+}
+
+static StringRef copyIntoContext(const ASTContext &C, StringRef str) {
+ return str.copy(C);
+}
+
+void MSAsmStmt::initialize(const ASTContext &C, StringRef asmstr,
+ ArrayRef<Token> asmtoks,
+ ArrayRef<StringRef> constraints,
+ ArrayRef<Expr*> exprs,
+ ArrayRef<StringRef> clobbers) {
+ assert(NumAsmToks == asmtoks.size());
+ assert(NumClobbers == clobbers.size());
+
+ assert(exprs.size() == NumOutputs + NumInputs);
+ assert(exprs.size() == constraints.size());
+
+ AsmStr = copyIntoContext(C, asmstr);
+
+ Exprs = new (C) Stmt*[exprs.size()];
+ std::copy(exprs.begin(), exprs.end(), Exprs);
+
+ AsmToks = new (C) Token[asmtoks.size()];
+ std::copy(asmtoks.begin(), asmtoks.end(), AsmToks);
+
+ Constraints = new (C) StringRef[exprs.size()];
+ std::transform(constraints.begin(), constraints.end(), Constraints,
+ [&](StringRef Constraint) {
+ return copyIntoContext(C, Constraint);
+ });
+
+ Clobbers = new (C) StringRef[NumClobbers];
+ // FIXME: Avoid the allocation/copy if at all possible.
+ std::transform(clobbers.begin(), clobbers.end(), Clobbers,
+ [&](StringRef Clobber) {
+ return copyIntoContext(C, Clobber);
+ });
+}
+
+IfStmt::IfStmt(const ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond,
+ Stmt *then, SourceLocation EL, Stmt *elsev)
+ : Stmt(IfStmtClass), IfLoc(IL), ElseLoc(EL)
+{
+ setConditionVariable(C, var);
+ SubExprs[COND] = cond;
+ SubExprs[THEN] = then;
+ SubExprs[ELSE] = elsev;
+}
+
+VarDecl *IfStmt::getConditionVariable() const {
+ if (!SubExprs[VAR])
+ return nullptr;
+
+ DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]);
+ return cast<VarDecl>(DS->getSingleDecl());
+}
+
+void IfStmt::setConditionVariable(const ASTContext &C, VarDecl *V) {
+ if (!V) {
+ SubExprs[VAR] = nullptr;
+ return;
+ }
+
+ SourceRange VarRange = V->getSourceRange();
+ SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(),
+ VarRange.getEnd());
+}
+
+ForStmt::ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
+ Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
+ SourceLocation RP)
+ : Stmt(ForStmtClass), ForLoc(FL), LParenLoc(LP), RParenLoc(RP)
+{
+ SubExprs[INIT] = Init;
+ setConditionVariable(C, condVar);
+ SubExprs[COND] = Cond;
+ SubExprs[INC] = Inc;
+ SubExprs[BODY] = Body;
+}
+
+VarDecl *ForStmt::getConditionVariable() const {
+ if (!SubExprs[CONDVAR])
+ return nullptr;
+
+ DeclStmt *DS = cast<DeclStmt>(SubExprs[CONDVAR]);
+ return cast<VarDecl>(DS->getSingleDecl());
+}
+
+void ForStmt::setConditionVariable(const ASTContext &C, VarDecl *V) {
+ if (!V) {
+ SubExprs[CONDVAR] = nullptr;
+ return;
+ }
+
+ SourceRange VarRange = V->getSourceRange();
+ SubExprs[CONDVAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(),
+ VarRange.getEnd());
+}
+
+SwitchStmt::SwitchStmt(const ASTContext &C, VarDecl *Var, Expr *cond)
+ : Stmt(SwitchStmtClass), FirstCase(nullptr, false) {
+ setConditionVariable(C, Var);
+ SubExprs[COND] = cond;
+ SubExprs[BODY] = nullptr;
+}
+
+VarDecl *SwitchStmt::getConditionVariable() const {
+ if (!SubExprs[VAR])
+ return nullptr;
+
+ DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]);
+ return cast<VarDecl>(DS->getSingleDecl());
+}
+
+void SwitchStmt::setConditionVariable(const ASTContext &C, VarDecl *V) {
+ if (!V) {
+ SubExprs[VAR] = nullptr;
+ return;
+ }
+
+ SourceRange VarRange = V->getSourceRange();
+ SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(),
+ VarRange.getEnd());
+}
+
+Stmt *SwitchCase::getSubStmt() {
+ if (isa<CaseStmt>(this))
+ return cast<CaseStmt>(this)->getSubStmt();
+ return cast<DefaultStmt>(this)->getSubStmt();
+}
+
+WhileStmt::WhileStmt(const ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body,
+ SourceLocation WL)
+ : Stmt(WhileStmtClass) {
+ setConditionVariable(C, Var);
+ SubExprs[COND] = cond;
+ SubExprs[BODY] = body;
+ WhileLoc = WL;
+}
+
+VarDecl *WhileStmt::getConditionVariable() const {
+ if (!SubExprs[VAR])
+ return nullptr;
+
+ DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]);
+ return cast<VarDecl>(DS->getSingleDecl());
+}
+
+void WhileStmt::setConditionVariable(const ASTContext &C, VarDecl *V) {
+ if (!V) {
+ SubExprs[VAR] = nullptr;
+ return;
+ }
+
+ SourceRange VarRange = V->getSourceRange();
+ SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(),
+ VarRange.getEnd());
+}
+
+// IndirectGotoStmt
+LabelDecl *IndirectGotoStmt::getConstantTarget() {
+ if (AddrLabelExpr *E =
+ dyn_cast<AddrLabelExpr>(getTarget()->IgnoreParenImpCasts()))
+ return E->getLabel();
+ return nullptr;
+}
+
+// ReturnStmt
+const Expr* ReturnStmt::getRetValue() const {
+ return cast_or_null<Expr>(RetExpr);
+}
+Expr* ReturnStmt::getRetValue() {
+ return cast_or_null<Expr>(RetExpr);
+}
+
+SEHTryStmt::SEHTryStmt(bool IsCXXTry,
+ SourceLocation TryLoc,
+ Stmt *TryBlock,
+ Stmt *Handler)
+ : Stmt(SEHTryStmtClass),
+ IsCXXTry(IsCXXTry),
+ TryLoc(TryLoc)
+{
+ Children[TRY] = TryBlock;
+ Children[HANDLER] = Handler;
+}
+
+SEHTryStmt* SEHTryStmt::Create(const ASTContext &C, bool IsCXXTry,
+ SourceLocation TryLoc, Stmt *TryBlock,
+ Stmt *Handler) {
+ return new(C) SEHTryStmt(IsCXXTry,TryLoc,TryBlock,Handler);
+}
+
+SEHExceptStmt* SEHTryStmt::getExceptHandler() const {
+ return dyn_cast<SEHExceptStmt>(getHandler());
+}
+
+SEHFinallyStmt* SEHTryStmt::getFinallyHandler() const {
+ return dyn_cast<SEHFinallyStmt>(getHandler());
+}
+
+SEHExceptStmt::SEHExceptStmt(SourceLocation Loc,
+ Expr *FilterExpr,
+ Stmt *Block)
+ : Stmt(SEHExceptStmtClass),
+ Loc(Loc)
+{
+ Children[FILTER_EXPR] = FilterExpr;
+ Children[BLOCK] = Block;
+}
+
+SEHExceptStmt* SEHExceptStmt::Create(const ASTContext &C, SourceLocation Loc,
+ Expr *FilterExpr, Stmt *Block) {
+ return new(C) SEHExceptStmt(Loc,FilterExpr,Block);
+}
+
+SEHFinallyStmt::SEHFinallyStmt(SourceLocation Loc,
+ Stmt *Block)
+ : Stmt(SEHFinallyStmtClass),
+ Loc(Loc),
+ Block(Block)
+{}
+
+SEHFinallyStmt* SEHFinallyStmt::Create(const ASTContext &C, SourceLocation Loc,
+ Stmt *Block) {
+ return new(C)SEHFinallyStmt(Loc,Block);
+}
+
+CapturedStmt::Capture::Capture(SourceLocation Loc, VariableCaptureKind Kind,
+ VarDecl *Var)
+ : VarAndKind(Var, Kind), Loc(Loc) {
+ switch (Kind) {
+ case VCK_This:
+ assert(!Var && "'this' capture cannot have a variable!");
+ break;
+ case VCK_ByRef:
+ assert(Var && "capturing by reference must have a variable!");
+ break;
+ case VCK_ByCopy:
+ assert(Var && "capturing by copy must have a variable!");
+ assert(
+ (Var->getType()->isScalarType() || (Var->getType()->isReferenceType() &&
+ Var->getType()
+ ->castAs<ReferenceType>()
+ ->getPointeeType()
+ ->isScalarType())) &&
+ "captures by copy are expected to have a scalar type!");
+ break;
+ case VCK_VLAType:
+ assert(!Var &&
+ "Variable-length array type capture cannot have a variable!");
+ break;
+ }
+}
+
+CapturedStmt::VariableCaptureKind
+CapturedStmt::Capture::getCaptureKind() const {
+ return VarAndKind.getInt();
+}
+
+VarDecl *CapturedStmt::Capture::getCapturedVar() const {
+ assert((capturesVariable() || capturesVariableByCopy()) &&
+ "No variable available for 'this' or VAT capture");
+ return VarAndKind.getPointer();
+}
+
+CapturedStmt::Capture *CapturedStmt::getStoredCaptures() const {
+ unsigned Size = sizeof(CapturedStmt) + sizeof(Stmt *) * (NumCaptures + 1);
+
+ // Offset of the first Capture object.
+ unsigned FirstCaptureOffset =
+ llvm::RoundUpToAlignment(Size, llvm::alignOf<Capture>());
+
+ return reinterpret_cast<Capture *>(
+ reinterpret_cast<char *>(const_cast<CapturedStmt *>(this))
+ + FirstCaptureOffset);
+}
+
+CapturedStmt::CapturedStmt(Stmt *S, CapturedRegionKind Kind,
+ ArrayRef<Capture> Captures,
+ ArrayRef<Expr *> CaptureInits,
+ CapturedDecl *CD,
+ RecordDecl *RD)
+ : Stmt(CapturedStmtClass), NumCaptures(Captures.size()),
+ CapDeclAndKind(CD, Kind), TheRecordDecl(RD) {
+ assert( S && "null captured statement");
+ assert(CD && "null captured declaration for captured statement");
+ assert(RD && "null record declaration for captured statement");
+
+ // Copy initialization expressions.
+ Stmt **Stored = getStoredStmts();
+ for (unsigned I = 0, N = NumCaptures; I != N; ++I)
+ *Stored++ = CaptureInits[I];
+
+ // Copy the statement being captured.
+ *Stored = S;
+
+ // Copy all Capture objects.
+ Capture *Buffer = getStoredCaptures();
+ std::copy(Captures.begin(), Captures.end(), Buffer);
+}
+
+CapturedStmt::CapturedStmt(EmptyShell Empty, unsigned NumCaptures)
+ : Stmt(CapturedStmtClass, Empty), NumCaptures(NumCaptures),
+ CapDeclAndKind(nullptr, CR_Default), TheRecordDecl(nullptr) {
+ getStoredStmts()[NumCaptures] = nullptr;
+}
+
+CapturedStmt *CapturedStmt::Create(const ASTContext &Context, Stmt *S,
+ CapturedRegionKind Kind,
+ ArrayRef<Capture> Captures,
+ ArrayRef<Expr *> CaptureInits,
+ CapturedDecl *CD,
+ RecordDecl *RD) {
+ // The layout is
+ //
+ // -----------------------------------------------------------
+ // | CapturedStmt, Init, ..., Init, S, Capture, ..., Capture |
+ // ----------------^-------------------^----------------------
+ // getStoredStmts() getStoredCaptures()
+ //
+ // where S is the statement being captured.
+ //
+ assert(CaptureInits.size() == Captures.size() && "wrong number of arguments");
+
+ unsigned Size = sizeof(CapturedStmt) + sizeof(Stmt *) * (Captures.size() + 1);
+ if (!Captures.empty()) {
+ // Realign for the following Capture array.
+ Size = llvm::RoundUpToAlignment(Size, llvm::alignOf<Capture>());
+ Size += sizeof(Capture) * Captures.size();
+ }
+
+ void *Mem = Context.Allocate(Size);
+ return new (Mem) CapturedStmt(S, Kind, Captures, CaptureInits, CD, RD);
+}
+
+CapturedStmt *CapturedStmt::CreateDeserialized(const ASTContext &Context,
+ unsigned NumCaptures) {
+ unsigned Size = sizeof(CapturedStmt) + sizeof(Stmt *) * (NumCaptures + 1);
+ if (NumCaptures > 0) {
+ // Realign for the following Capture array.
+ Size = llvm::RoundUpToAlignment(Size, llvm::alignOf<Capture>());
+ Size += sizeof(Capture) * NumCaptures;
+ }
+
+ void *Mem = Context.Allocate(Size);
+ return new (Mem) CapturedStmt(EmptyShell(), NumCaptures);
+}
+
+Stmt::child_range CapturedStmt::children() {
+ // Children are captured field initilizers.
+ return child_range(getStoredStmts(), getStoredStmts() + NumCaptures);
+}
+
+CapturedDecl *CapturedStmt::getCapturedDecl() {
+ return CapDeclAndKind.getPointer();
+}
+const CapturedDecl *CapturedStmt::getCapturedDecl() const {
+ return CapDeclAndKind.getPointer();
+}
+
+/// \brief Set the outlined function declaration.
+void CapturedStmt::setCapturedDecl(CapturedDecl *D) {
+ assert(D && "null CapturedDecl");
+ CapDeclAndKind.setPointer(D);
+}
+
+/// \brief Retrieve the captured region kind.
+CapturedRegionKind CapturedStmt::getCapturedRegionKind() const {
+ return CapDeclAndKind.getInt();
+}
+
+/// \brief Set the captured region kind.
+void CapturedStmt::setCapturedRegionKind(CapturedRegionKind Kind) {
+ CapDeclAndKind.setInt(Kind);
+}
+
+bool CapturedStmt::capturesVariable(const VarDecl *Var) const {
+ for (const auto &I : captures()) {
+ if (!I.capturesVariable())
+ continue;
+
+ // This does not handle variable redeclarations. This should be
+ // extended to capture variables with redeclarations, for example
+ // a thread-private variable in OpenMP.
+ if (I.getCapturedVar() == Var)
+ return true;
+ }
+
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/StmtCXX.cpp b/contrib/llvm/tools/clang/lib/AST/StmtCXX.cpp
new file mode 100644
index 0000000..e39a01d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/StmtCXX.cpp
@@ -0,0 +1,86 @@
+//===--- StmtCXX.cpp - Classes for representing C++ statements ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the subclesses of Stmt class declared in StmtCXX.h
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/StmtCXX.h"
+
+#include "clang/AST/ASTContext.h"
+
+using namespace clang;
+
+QualType CXXCatchStmt::getCaughtType() const {
+ if (ExceptionDecl)
+ return ExceptionDecl->getType();
+ return QualType();
+}
+
+CXXTryStmt *CXXTryStmt::Create(const ASTContext &C, SourceLocation tryLoc,
+ Stmt *tryBlock, ArrayRef<Stmt *> handlers) {
+ std::size_t Size = sizeof(CXXTryStmt);
+ Size += ((handlers.size() + 1) * sizeof(Stmt *));
+
+ void *Mem = C.Allocate(Size, llvm::alignOf<CXXTryStmt>());
+ return new (Mem) CXXTryStmt(tryLoc, tryBlock, handlers);
+}
+
+CXXTryStmt *CXXTryStmt::Create(const ASTContext &C, EmptyShell Empty,
+ unsigned numHandlers) {
+ std::size_t Size = sizeof(CXXTryStmt);
+ Size += ((numHandlers + 1) * sizeof(Stmt *));
+
+ void *Mem = C.Allocate(Size, llvm::alignOf<CXXTryStmt>());
+ return new (Mem) CXXTryStmt(Empty, numHandlers);
+}
+
+CXXTryStmt::CXXTryStmt(SourceLocation tryLoc, Stmt *tryBlock,
+ ArrayRef<Stmt *> handlers)
+ : Stmt(CXXTryStmtClass), TryLoc(tryLoc), NumHandlers(handlers.size()) {
+ Stmt **Stmts = reinterpret_cast<Stmt **>(this + 1);
+ Stmts[0] = tryBlock;
+ std::copy(handlers.begin(), handlers.end(), Stmts + 1);
+}
+
+CXXForRangeStmt::CXXForRangeStmt(DeclStmt *Range, DeclStmt *BeginEndStmt,
+ Expr *Cond, Expr *Inc, DeclStmt *LoopVar,
+ Stmt *Body, SourceLocation FL,
+ SourceLocation CAL, SourceLocation CL,
+ SourceLocation RPL)
+ : Stmt(CXXForRangeStmtClass), ForLoc(FL), CoawaitLoc(CAL), ColonLoc(CL),
+ RParenLoc(RPL) {
+ SubExprs[RANGE] = Range;
+ SubExprs[BEGINEND] = BeginEndStmt;
+ SubExprs[COND] = Cond;
+ SubExprs[INC] = Inc;
+ SubExprs[LOOPVAR] = LoopVar;
+ SubExprs[BODY] = Body;
+}
+
+Expr *CXXForRangeStmt::getRangeInit() {
+ DeclStmt *RangeStmt = getRangeStmt();
+ VarDecl *RangeDecl = dyn_cast_or_null<VarDecl>(RangeStmt->getSingleDecl());
+ assert(RangeDecl && "for-range should have a single var decl");
+ return RangeDecl->getInit();
+}
+
+const Expr *CXXForRangeStmt::getRangeInit() const {
+ return const_cast<CXXForRangeStmt *>(this)->getRangeInit();
+}
+
+VarDecl *CXXForRangeStmt::getLoopVariable() {
+ Decl *LV = cast<DeclStmt>(getLoopVarStmt())->getSingleDecl();
+ assert(LV && "No loop variable in CXXForRangeStmt");
+ return cast<VarDecl>(LV);
+}
+
+const VarDecl *CXXForRangeStmt::getLoopVariable() const {
+ return const_cast<CXXForRangeStmt *>(this)->getLoopVariable();
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/StmtIterator.cpp b/contrib/llvm/tools/clang/lib/AST/StmtIterator.cpp
new file mode 100644
index 0000000..861d090
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/StmtIterator.cpp
@@ -0,0 +1,114 @@
+//===--- StmtIterator.cpp - Iterators for Statements ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines internal methods for StmtIterator.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/StmtIterator.h"
+#include "clang/AST/Decl.h"
+
+using namespace clang;
+
+// FIXME: Add support for dependent-sized array types in C++?
+// Does it even make sense to build a CFG for an uninstantiated template?
+static inline const VariableArrayType *FindVA(const Type* t) {
+ while (const ArrayType *vt = dyn_cast<ArrayType>(t)) {
+ if (const VariableArrayType *vat = dyn_cast<VariableArrayType>(vt))
+ if (vat->getSizeExpr())
+ return vat;
+
+ t = vt->getElementType().getTypePtr();
+ }
+
+ return nullptr;
+}
+
+void StmtIteratorBase::NextVA() {
+ assert (getVAPtr());
+
+ const VariableArrayType *p = getVAPtr();
+ p = FindVA(p->getElementType().getTypePtr());
+ setVAPtr(p);
+
+ if (p)
+ return;
+
+ if (inDeclGroup()) {
+ if (VarDecl* VD = dyn_cast<VarDecl>(*DGI))
+ if (VD->hasInit())
+ return;
+
+ NextDecl();
+ }
+ else {
+ assert(inSizeOfTypeVA());
+ RawVAPtr = 0;
+ }
+}
+
+void StmtIteratorBase::NextDecl(bool ImmediateAdvance) {
+ assert(getVAPtr() == nullptr);
+ assert(inDeclGroup());
+
+ if (ImmediateAdvance)
+ ++DGI;
+
+ for ( ; DGI != DGE; ++DGI)
+ if (HandleDecl(*DGI))
+ return;
+
+ RawVAPtr = 0;
+}
+
+bool StmtIteratorBase::HandleDecl(Decl* D) {
+ if (VarDecl* VD = dyn_cast<VarDecl>(D)) {
+ if (const VariableArrayType* VAPtr = FindVA(VD->getType().getTypePtr())) {
+ setVAPtr(VAPtr);
+ return true;
+ }
+
+ if (VD->getInit())
+ return true;
+ }
+ else if (TypedefNameDecl* TD = dyn_cast<TypedefNameDecl>(D)) {
+ if (const VariableArrayType* VAPtr =
+ FindVA(TD->getUnderlyingType().getTypePtr())) {
+ setVAPtr(VAPtr);
+ return true;
+ }
+ }
+ else if (EnumConstantDecl* ECD = dyn_cast<EnumConstantDecl>(D)) {
+ if (ECD->getInitExpr())
+ return true;
+ }
+
+ return false;
+}
+
+StmtIteratorBase::StmtIteratorBase(Decl** dgi, Decl** dge)
+ : DGI(dgi), RawVAPtr(DeclGroupMode), DGE(dge) {
+ NextDecl(false);
+}
+
+StmtIteratorBase::StmtIteratorBase(const VariableArrayType* t)
+ : DGI(nullptr), RawVAPtr(SizeOfTypeVAMode) {
+ RawVAPtr |= reinterpret_cast<uintptr_t>(t);
+}
+
+Stmt*& StmtIteratorBase::GetDeclExpr() const {
+ if (const VariableArrayType* VAPtr = getVAPtr()) {
+ assert (VAPtr->SizeExpr);
+ return const_cast<Stmt*&>(VAPtr->SizeExpr);
+ }
+
+ assert (inDeclGroup());
+ VarDecl* VD = cast<VarDecl>(*DGI);
+ return *VD->getInitAddress();
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/StmtObjC.cpp b/contrib/llvm/tools/clang/lib/AST/StmtObjC.cpp
new file mode 100644
index 0000000..a77550c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/StmtObjC.cpp
@@ -0,0 +1,73 @@
+//===--- StmtObjC.cpp - Classes for representing ObjC statements ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the subclesses of Stmt class declared in StmtObjC.h
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/StmtObjC.h"
+
+#include "clang/AST/Expr.h"
+#include "clang/AST/ASTContext.h"
+
+using namespace clang;
+
+ObjCForCollectionStmt::ObjCForCollectionStmt(Stmt *Elem, Expr *Collect,
+ Stmt *Body, SourceLocation FCL,
+ SourceLocation RPL)
+ : Stmt(ObjCForCollectionStmtClass) {
+ SubExprs[ELEM] = Elem;
+ SubExprs[COLLECTION] = Collect;
+ SubExprs[BODY] = Body;
+ ForLoc = FCL;
+ RParenLoc = RPL;
+}
+
+ObjCAtTryStmt::ObjCAtTryStmt(SourceLocation atTryLoc, Stmt *atTryStmt,
+ Stmt **CatchStmts, unsigned NumCatchStmts,
+ Stmt *atFinallyStmt)
+ : Stmt(ObjCAtTryStmtClass), AtTryLoc(atTryLoc),
+ NumCatchStmts(NumCatchStmts), HasFinally(atFinallyStmt != nullptr) {
+ Stmt **Stmts = getStmts();
+ Stmts[0] = atTryStmt;
+ for (unsigned I = 0; I != NumCatchStmts; ++I)
+ Stmts[I + 1] = CatchStmts[I];
+
+ if (HasFinally)
+ Stmts[NumCatchStmts + 1] = atFinallyStmt;
+}
+
+ObjCAtTryStmt *ObjCAtTryStmt::Create(const ASTContext &Context,
+ SourceLocation atTryLoc, Stmt *atTryStmt,
+ Stmt **CatchStmts, unsigned NumCatchStmts,
+ Stmt *atFinallyStmt) {
+ unsigned Size =
+ sizeof(ObjCAtTryStmt) +
+ (1 + NumCatchStmts + (atFinallyStmt != nullptr)) * sizeof(Stmt *);
+ void *Mem = Context.Allocate(Size, llvm::alignOf<ObjCAtTryStmt>());
+ return new (Mem) ObjCAtTryStmt(atTryLoc, atTryStmt, CatchStmts, NumCatchStmts,
+ atFinallyStmt);
+}
+
+ObjCAtTryStmt *ObjCAtTryStmt::CreateEmpty(const ASTContext &Context,
+ unsigned NumCatchStmts,
+ bool HasFinally) {
+ unsigned Size =
+ sizeof(ObjCAtTryStmt) + (1 + NumCatchStmts + HasFinally) * sizeof(Stmt *);
+ void *Mem = Context.Allocate(Size, llvm::alignOf<ObjCAtTryStmt>());
+ return new (Mem) ObjCAtTryStmt(EmptyShell(), NumCatchStmts, HasFinally);
+}
+
+SourceLocation ObjCAtTryStmt::getLocEnd() const {
+ if (HasFinally)
+ return getFinallyStmt()->getLocEnd();
+ if (NumCatchStmts)
+ return getCatchStmt(NumCatchStmts - 1)->getLocEnd();
+ return getTryBody()->getLocEnd();
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/StmtOpenMP.cpp b/contrib/llvm/tools/clang/lib/AST/StmtOpenMP.cpp
new file mode 100644
index 0000000..7f923d8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/StmtOpenMP.cpp
@@ -0,0 +1,884 @@
+//===--- StmtOpenMP.cpp - Classes for OpenMP directives -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the subclesses of Stmt class declared in StmtOpenMP.h
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/StmtOpenMP.h"
+
+#include "clang/AST/ASTContext.h"
+
+using namespace clang;
+
+void OMPExecutableDirective::setClauses(ArrayRef<OMPClause *> Clauses) {
+ assert(Clauses.size() == getNumClauses() &&
+ "Number of clauses is not the same as the preallocated buffer");
+ std::copy(Clauses.begin(), Clauses.end(), getClauses().begin());
+}
+
+void OMPLoopDirective::setCounters(ArrayRef<Expr *> A) {
+ assert(A.size() == getCollapsedNumber() &&
+ "Number of loop counters is not the same as the collapsed number");
+ std::copy(A.begin(), A.end(), getCounters().begin());
+}
+
+void OMPLoopDirective::setPrivateCounters(ArrayRef<Expr *> A) {
+ assert(A.size() == getCollapsedNumber() && "Number of loop private counters "
+ "is not the same as the collapsed "
+ "number");
+ std::copy(A.begin(), A.end(), getPrivateCounters().begin());
+}
+
+void OMPLoopDirective::setInits(ArrayRef<Expr *> A) {
+ assert(A.size() == getCollapsedNumber() &&
+ "Number of counter inits is not the same as the collapsed number");
+ std::copy(A.begin(), A.end(), getInits().begin());
+}
+
+void OMPLoopDirective::setUpdates(ArrayRef<Expr *> A) {
+ assert(A.size() == getCollapsedNumber() &&
+ "Number of counter updates is not the same as the collapsed number");
+ std::copy(A.begin(), A.end(), getUpdates().begin());
+}
+
+void OMPLoopDirective::setFinals(ArrayRef<Expr *> A) {
+ assert(A.size() == getCollapsedNumber() &&
+ "Number of counter finals is not the same as the collapsed number");
+ std::copy(A.begin(), A.end(), getFinals().begin());
+}
+
+OMPParallelDirective *OMPParallelDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
+ OMPParallelDirective *Dir =
+ new (Mem) OMPParallelDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setHasCancel(HasCancel);
+ return Dir;
+}
+
+OMPParallelDirective *OMPParallelDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
+ return new (Mem) OMPParallelDirective(NumClauses);
+}
+
+OMPSimdDirective *
+OMPSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation EndLoc, unsigned CollapsedNum,
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSimdDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_simd));
+ OMPSimdDirective *Dir = new (Mem)
+ OMPSimdDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ return Dir;
+}
+
+OMPSimdDirective *OMPSimdDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSimdDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_simd));
+ return new (Mem) OMPSimdDirective(CollapsedNum, NumClauses);
+}
+
+OMPForDirective *
+OMPForDirective::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation EndLoc, unsigned CollapsedNum,
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs, bool HasCancel) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPForDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_for));
+ OMPForDirective *Dir =
+ new (Mem) OMPForDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ Dir->setHasCancel(HasCancel);
+ return Dir;
+}
+
+OMPForDirective *OMPForDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPForDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_for));
+ return new (Mem) OMPForDirective(CollapsedNum, NumClauses);
+}
+
+OMPForSimdDirective *
+OMPForSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation EndLoc, unsigned CollapsedNum,
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPForSimdDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_for_simd));
+ OMPForSimdDirective *Dir = new (Mem)
+ OMPForSimdDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ return Dir;
+}
+
+OMPForSimdDirective *OMPForSimdDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPForSimdDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_for_simd));
+ return new (Mem) OMPForSimdDirective(CollapsedNum, NumClauses);
+}
+
+OMPSectionsDirective *OMPSectionsDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSectionsDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
+ OMPSectionsDirective *Dir =
+ new (Mem) OMPSectionsDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setHasCancel(HasCancel);
+ return Dir;
+}
+
+OMPSectionsDirective *OMPSectionsDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSectionsDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
+ return new (Mem) OMPSectionsDirective(NumClauses);
+}
+
+OMPSectionDirective *OMPSectionDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ Stmt *AssociatedStmt,
+ bool HasCancel) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSectionDirective),
+ llvm::alignOf<Stmt *>());
+ void *Mem = C.Allocate(Size + sizeof(Stmt *));
+ OMPSectionDirective *Dir = new (Mem) OMPSectionDirective(StartLoc, EndLoc);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setHasCancel(HasCancel);
+ return Dir;
+}
+
+OMPSectionDirective *OMPSectionDirective::CreateEmpty(const ASTContext &C,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSectionDirective),
+ llvm::alignOf<Stmt *>());
+ void *Mem = C.Allocate(Size + sizeof(Stmt *));
+ return new (Mem) OMPSectionDirective();
+}
+
+OMPSingleDirective *OMPSingleDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSingleDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
+ OMPSingleDirective *Dir =
+ new (Mem) OMPSingleDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ return Dir;
+}
+
+OMPSingleDirective *OMPSingleDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSingleDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
+ return new (Mem) OMPSingleDirective(NumClauses);
+}
+
+OMPMasterDirective *OMPMasterDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ Stmt *AssociatedStmt) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPMasterDirective),
+ llvm::alignOf<Stmt *>());
+ void *Mem = C.Allocate(Size + sizeof(Stmt *));
+ OMPMasterDirective *Dir = new (Mem) OMPMasterDirective(StartLoc, EndLoc);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ return Dir;
+}
+
+OMPMasterDirective *OMPMasterDirective::CreateEmpty(const ASTContext &C,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPMasterDirective),
+ llvm::alignOf<Stmt *>());
+ void *Mem = C.Allocate(Size + sizeof(Stmt *));
+ return new (Mem) OMPMasterDirective();
+}
+
+OMPCriticalDirective *OMPCriticalDirective::Create(
+ const ASTContext &C, const DeclarationNameInfo &Name,
+ SourceLocation StartLoc, SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPCriticalDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
+ OMPCriticalDirective *Dir =
+ new (Mem) OMPCriticalDirective(Name, StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ return Dir;
+}
+
+OMPCriticalDirective *OMPCriticalDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPCriticalDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
+ return new (Mem) OMPCriticalDirective(NumClauses);
+}
+
+OMPParallelForDirective *OMPParallelForDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs, bool HasCancel) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelForDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *) *
+ numLoopChildren(CollapsedNum, OMPD_parallel_for));
+ OMPParallelForDirective *Dir = new (Mem)
+ OMPParallelForDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ Dir->setHasCancel(HasCancel);
+ return Dir;
+}
+
+OMPParallelForDirective *
+OMPParallelForDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses,
+ unsigned CollapsedNum, EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelForDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *) *
+ numLoopChildren(CollapsedNum, OMPD_parallel_for));
+ return new (Mem) OMPParallelForDirective(CollapsedNum, NumClauses);
+}
+
+OMPParallelForSimdDirective *OMPParallelForSimdDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelForSimdDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem = C.Allocate(
+ Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_parallel_for_simd));
+ OMPParallelForSimdDirective *Dir = new (Mem) OMPParallelForSimdDirective(
+ StartLoc, EndLoc, CollapsedNum, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ return Dir;
+}
+
+OMPParallelForSimdDirective *
+OMPParallelForSimdDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum, EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelForSimdDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem = C.Allocate(
+ Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_parallel_for_simd));
+ return new (Mem) OMPParallelForSimdDirective(CollapsedNum, NumClauses);
+}
+
+OMPParallelSectionsDirective *OMPParallelSectionsDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelSectionsDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
+ OMPParallelSectionsDirective *Dir =
+ new (Mem) OMPParallelSectionsDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setHasCancel(HasCancel);
+ return Dir;
+}
+
+OMPParallelSectionsDirective *
+OMPParallelSectionsDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses, EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelSectionsDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
+ return new (Mem) OMPParallelSectionsDirective(NumClauses);
+}
+
+OMPTaskDirective *
+OMPTaskDirective::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt, bool HasCancel) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTaskDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
+ OMPTaskDirective *Dir =
+ new (Mem) OMPTaskDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setHasCancel(HasCancel);
+ return Dir;
+}
+
+OMPTaskDirective *OMPTaskDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTaskDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
+ return new (Mem) OMPTaskDirective(NumClauses);
+}
+
+OMPTaskyieldDirective *OMPTaskyieldDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(sizeof(OMPTaskyieldDirective));
+ OMPTaskyieldDirective *Dir =
+ new (Mem) OMPTaskyieldDirective(StartLoc, EndLoc);
+ return Dir;
+}
+
+OMPTaskyieldDirective *OMPTaskyieldDirective::CreateEmpty(const ASTContext &C,
+ EmptyShell) {
+ void *Mem = C.Allocate(sizeof(OMPTaskyieldDirective));
+ return new (Mem) OMPTaskyieldDirective();
+}
+
+OMPBarrierDirective *OMPBarrierDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(sizeof(OMPBarrierDirective));
+ OMPBarrierDirective *Dir = new (Mem) OMPBarrierDirective(StartLoc, EndLoc);
+ return Dir;
+}
+
+OMPBarrierDirective *OMPBarrierDirective::CreateEmpty(const ASTContext &C,
+ EmptyShell) {
+ void *Mem = C.Allocate(sizeof(OMPBarrierDirective));
+ return new (Mem) OMPBarrierDirective();
+}
+
+OMPTaskwaitDirective *OMPTaskwaitDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(sizeof(OMPTaskwaitDirective));
+ OMPTaskwaitDirective *Dir = new (Mem) OMPTaskwaitDirective(StartLoc, EndLoc);
+ return Dir;
+}
+
+OMPTaskwaitDirective *OMPTaskwaitDirective::CreateEmpty(const ASTContext &C,
+ EmptyShell) {
+ void *Mem = C.Allocate(sizeof(OMPTaskwaitDirective));
+ return new (Mem) OMPTaskwaitDirective();
+}
+
+OMPTaskgroupDirective *OMPTaskgroupDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ Stmt *AssociatedStmt) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTaskgroupDirective),
+ llvm::alignOf<Stmt *>());
+ void *Mem = C.Allocate(Size + sizeof(Stmt *));
+ OMPTaskgroupDirective *Dir =
+ new (Mem) OMPTaskgroupDirective(StartLoc, EndLoc);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ return Dir;
+}
+
+OMPTaskgroupDirective *OMPTaskgroupDirective::CreateEmpty(const ASTContext &C,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTaskgroupDirective),
+ llvm::alignOf<Stmt *>());
+ void *Mem = C.Allocate(Size + sizeof(Stmt *));
+ return new (Mem) OMPTaskgroupDirective();
+}
+
+OMPCancellationPointDirective *OMPCancellationPointDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ OpenMPDirectiveKind CancelRegion) {
+ unsigned Size = llvm::RoundUpToAlignment(
+ sizeof(OMPCancellationPointDirective), llvm::alignOf<Stmt *>());
+ void *Mem = C.Allocate(Size);
+ OMPCancellationPointDirective *Dir =
+ new (Mem) OMPCancellationPointDirective(StartLoc, EndLoc);
+ Dir->setCancelRegion(CancelRegion);
+ return Dir;
+}
+
+OMPCancellationPointDirective *
+OMPCancellationPointDirective::CreateEmpty(const ASTContext &C, EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(
+ sizeof(OMPCancellationPointDirective), llvm::alignOf<Stmt *>());
+ void *Mem = C.Allocate(Size);
+ return new (Mem) OMPCancellationPointDirective();
+}
+
+OMPCancelDirective *
+OMPCancelDirective::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses,
+ OpenMPDirectiveKind CancelRegion) {
+ unsigned Size = llvm::RoundUpToAlignment(
+ sizeof(OMPCancelDirective) + sizeof(OMPClause *) * Clauses.size(),
+ llvm::alignOf<Stmt *>());
+ void *Mem = C.Allocate(Size);
+ OMPCancelDirective *Dir =
+ new (Mem) OMPCancelDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setCancelRegion(CancelRegion);
+ return Dir;
+}
+
+OMPCancelDirective *OMPCancelDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPCancelDirective) +
+ sizeof(OMPClause *) * NumClauses,
+ llvm::alignOf<Stmt *>());
+ void *Mem = C.Allocate(Size);
+ return new (Mem) OMPCancelDirective(NumClauses);
+}
+
+OMPFlushDirective *OMPFlushDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPFlushDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size());
+ OMPFlushDirective *Dir =
+ new (Mem) OMPFlushDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ return Dir;
+}
+
+OMPFlushDirective *OMPFlushDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPFlushDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses);
+ return new (Mem) OMPFlushDirective(NumClauses);
+}
+
+OMPOrderedDirective *OMPOrderedDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPOrderedDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(Stmt *) + sizeof(OMPClause *) * Clauses.size());
+ OMPOrderedDirective *Dir =
+ new (Mem) OMPOrderedDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ return Dir;
+}
+
+OMPOrderedDirective *OMPOrderedDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPOrderedDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(Stmt *) + sizeof(OMPClause *) * NumClauses);
+ return new (Mem) OMPOrderedDirective(NumClauses);
+}
+
+OMPAtomicDirective *OMPAtomicDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V,
+ Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPAtomicDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
+ 5 * sizeof(Stmt *));
+ OMPAtomicDirective *Dir =
+ new (Mem) OMPAtomicDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setX(X);
+ Dir->setV(V);
+ Dir->setExpr(E);
+ Dir->setUpdateExpr(UE);
+ Dir->IsXLHSInRHSPart = IsXLHSInRHSPart;
+ Dir->IsPostfixUpdate = IsPostfixUpdate;
+ return Dir;
+}
+
+OMPAtomicDirective *OMPAtomicDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPAtomicDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses + 5 * sizeof(Stmt *));
+ return new (Mem) OMPAtomicDirective(NumClauses);
+}
+
+OMPTargetDirective *OMPTargetDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTargetDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
+ OMPTargetDirective *Dir =
+ new (Mem) OMPTargetDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ return Dir;
+}
+
+OMPTargetDirective *OMPTargetDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTargetDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
+ return new (Mem) OMPTargetDirective(NumClauses);
+}
+
+OMPTargetDataDirective *OMPTargetDataDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt) {
+ void *Mem =
+ C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPTargetDataDirective),
+ llvm::alignOf<OMPClause *>()) +
+ sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
+ OMPTargetDataDirective *Dir =
+ new (Mem) OMPTargetDataDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ return Dir;
+}
+
+OMPTargetDataDirective *OMPTargetDataDirective::CreateEmpty(const ASTContext &C,
+ unsigned N,
+ EmptyShell) {
+ void *Mem =
+ C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPTargetDataDirective),
+ llvm::alignOf<OMPClause *>()) +
+ sizeof(OMPClause *) * N + sizeof(Stmt *));
+ return new (Mem) OMPTargetDataDirective(N);
+}
+
+OMPTeamsDirective *OMPTeamsDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTeamsDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
+ OMPTeamsDirective *Dir =
+ new (Mem) OMPTeamsDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ return Dir;
+}
+
+OMPTeamsDirective *OMPTeamsDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTeamsDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
+ return new (Mem) OMPTeamsDirective(NumClauses);
+}
+
+OMPTaskLoopDirective *OMPTaskLoopDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTaskLoopDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_taskloop));
+ OMPTaskLoopDirective *Dir = new (Mem)
+ OMPTaskLoopDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ return Dir;
+}
+
+OMPTaskLoopDirective *OMPTaskLoopDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTaskLoopDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_taskloop));
+ return new (Mem) OMPTaskLoopDirective(CollapsedNum, NumClauses);
+}
+
+OMPTaskLoopSimdDirective *OMPTaskLoopSimdDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTaskLoopSimdDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *) *
+ numLoopChildren(CollapsedNum, OMPD_taskloop_simd));
+ OMPTaskLoopSimdDirective *Dir = new (Mem)
+ OMPTaskLoopSimdDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ return Dir;
+}
+
+OMPTaskLoopSimdDirective *
+OMPTaskLoopSimdDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses,
+ unsigned CollapsedNum, EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTaskLoopSimdDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *) *
+ numLoopChildren(CollapsedNum, OMPD_taskloop_simd));
+ return new (Mem) OMPTaskLoopSimdDirective(CollapsedNum, NumClauses);
+}
+
+OMPDistributeDirective *OMPDistributeDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPDistributeDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *) *
+ numLoopChildren(CollapsedNum, OMPD_distribute));
+ OMPDistributeDirective *Dir = new (Mem)
+ OMPDistributeDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ return Dir;
+}
+
+OMPDistributeDirective *
+OMPDistributeDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses,
+ unsigned CollapsedNum, EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPDistributeDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *) *
+ numLoopChildren(CollapsedNum, OMPD_distribute));
+ return new (Mem) OMPDistributeDirective(CollapsedNum, NumClauses);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp b/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp
new file mode 100644
index 0000000..e55b2fc
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp
@@ -0,0 +1,2458 @@
+//===--- StmtPrinter.cpp - Printing implementation for Stmt ASTs ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Stmt::dumpPretty/Stmt::printPretty methods, which
+// pretty print the AST back out to C code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprOpenMP.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/CharInfo.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/Format.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// StmtPrinter Visitor
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class StmtPrinter : public StmtVisitor<StmtPrinter> {
+ raw_ostream &OS;
+ unsigned IndentLevel;
+ clang::PrinterHelper* Helper;
+ PrintingPolicy Policy;
+
+ public:
+ StmtPrinter(raw_ostream &os, PrinterHelper* helper,
+ const PrintingPolicy &Policy,
+ unsigned Indentation = 0)
+ : OS(os), IndentLevel(Indentation), Helper(helper), Policy(Policy) {}
+
+ void PrintStmt(Stmt *S) {
+ PrintStmt(S, Policy.Indentation);
+ }
+
+ void PrintStmt(Stmt *S, int SubIndent) {
+ IndentLevel += SubIndent;
+ if (S && isa<Expr>(S)) {
+ // If this is an expr used in a stmt context, indent and newline it.
+ Indent();
+ Visit(S);
+ OS << ";\n";
+ } else if (S) {
+ Visit(S);
+ } else {
+ Indent() << "<<<NULL STATEMENT>>>\n";
+ }
+ IndentLevel -= SubIndent;
+ }
+
+ void PrintRawCompoundStmt(CompoundStmt *S);
+ void PrintRawDecl(Decl *D);
+ void PrintRawDeclStmt(const DeclStmt *S);
+ void PrintRawIfStmt(IfStmt *If);
+ void PrintRawCXXCatchStmt(CXXCatchStmt *Catch);
+ void PrintCallArgs(CallExpr *E);
+ void PrintRawSEHExceptHandler(SEHExceptStmt *S);
+ void PrintRawSEHFinallyStmt(SEHFinallyStmt *S);
+ void PrintOMPExecutableDirective(OMPExecutableDirective *S);
+
+ void PrintExpr(Expr *E) {
+ if (E)
+ Visit(E);
+ else
+ OS << "<null expr>";
+ }
+
+ raw_ostream &Indent(int Delta = 0) {
+ for (int i = 0, e = IndentLevel+Delta; i < e; ++i)
+ OS << " ";
+ return OS;
+ }
+
+ void Visit(Stmt* S) {
+ if (Helper && Helper->handledStmt(S,OS))
+ return;
+ else StmtVisitor<StmtPrinter>::Visit(S);
+ }
+
+ void VisitStmt(Stmt *Node) LLVM_ATTRIBUTE_UNUSED {
+ Indent() << "<<unknown stmt type>>\n";
+ }
+ void VisitExpr(Expr *Node) LLVM_ATTRIBUTE_UNUSED {
+ OS << "<<unknown expr type>>";
+ }
+ void VisitCXXNamedCastExpr(CXXNamedCastExpr *Node);
+
+#define ABSTRACT_STMT(CLASS)
+#define STMT(CLASS, PARENT) \
+ void Visit##CLASS(CLASS *Node);
+#include "clang/AST/StmtNodes.inc"
+ };
+}
+
+//===----------------------------------------------------------------------===//
+// Stmt printing methods.
+//===----------------------------------------------------------------------===//
+
+/// PrintRawCompoundStmt - Print a compound stmt without indenting the {, and
+/// with no newline after the }.
+void StmtPrinter::PrintRawCompoundStmt(CompoundStmt *Node) {
+ OS << "{\n";
+ for (auto *I : Node->body())
+ PrintStmt(I);
+
+ Indent() << "}";
+}
+
+void StmtPrinter::PrintRawDecl(Decl *D) {
+ D->print(OS, Policy, IndentLevel);
+}
+
+void StmtPrinter::PrintRawDeclStmt(const DeclStmt *S) {
+ SmallVector<Decl*, 2> Decls(S->decls());
+ Decl::printGroup(Decls.data(), Decls.size(), OS, Policy, IndentLevel);
+}
+
+void StmtPrinter::VisitNullStmt(NullStmt *Node) {
+ Indent() << ";\n";
+}
+
+void StmtPrinter::VisitDeclStmt(DeclStmt *Node) {
+ Indent();
+ PrintRawDeclStmt(Node);
+ OS << ";\n";
+}
+
+void StmtPrinter::VisitCompoundStmt(CompoundStmt *Node) {
+ Indent();
+ PrintRawCompoundStmt(Node);
+ OS << "\n";
+}
+
+void StmtPrinter::VisitCaseStmt(CaseStmt *Node) {
+ Indent(-1) << "case ";
+ PrintExpr(Node->getLHS());
+ if (Node->getRHS()) {
+ OS << " ... ";
+ PrintExpr(Node->getRHS());
+ }
+ OS << ":\n";
+
+ PrintStmt(Node->getSubStmt(), 0);
+}
+
+void StmtPrinter::VisitDefaultStmt(DefaultStmt *Node) {
+ Indent(-1) << "default:\n";
+ PrintStmt(Node->getSubStmt(), 0);
+}
+
+void StmtPrinter::VisitLabelStmt(LabelStmt *Node) {
+ Indent(-1) << Node->getName() << ":\n";
+ PrintStmt(Node->getSubStmt(), 0);
+}
+
+void StmtPrinter::VisitAttributedStmt(AttributedStmt *Node) {
+ for (const auto *Attr : Node->getAttrs()) {
+ Attr->printPretty(OS, Policy);
+ }
+
+ PrintStmt(Node->getSubStmt(), 0);
+}
+
+void StmtPrinter::PrintRawIfStmt(IfStmt *If) {
+ OS << "if (";
+ if (const DeclStmt *DS = If->getConditionVariableDeclStmt())
+ PrintRawDeclStmt(DS);
+ else
+ PrintExpr(If->getCond());
+ OS << ')';
+
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(If->getThen())) {
+ OS << ' ';
+ PrintRawCompoundStmt(CS);
+ OS << (If->getElse() ? ' ' : '\n');
+ } else {
+ OS << '\n';
+ PrintStmt(If->getThen());
+ if (If->getElse()) Indent();
+ }
+
+ if (Stmt *Else = If->getElse()) {
+ OS << "else";
+
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Else)) {
+ OS << ' ';
+ PrintRawCompoundStmt(CS);
+ OS << '\n';
+ } else if (IfStmt *ElseIf = dyn_cast<IfStmt>(Else)) {
+ OS << ' ';
+ PrintRawIfStmt(ElseIf);
+ } else {
+ OS << '\n';
+ PrintStmt(If->getElse());
+ }
+ }
+}
+
+void StmtPrinter::VisitIfStmt(IfStmt *If) {
+ Indent();
+ PrintRawIfStmt(If);
+}
+
+void StmtPrinter::VisitSwitchStmt(SwitchStmt *Node) {
+ Indent() << "switch (";
+ if (const DeclStmt *DS = Node->getConditionVariableDeclStmt())
+ PrintRawDeclStmt(DS);
+ else
+ PrintExpr(Node->getCond());
+ OS << ")";
+
+ // Pretty print compoundstmt bodies (very common).
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
+ OS << " ";
+ PrintRawCompoundStmt(CS);
+ OS << "\n";
+ } else {
+ OS << "\n";
+ PrintStmt(Node->getBody());
+ }
+}
+
+void StmtPrinter::VisitWhileStmt(WhileStmt *Node) {
+ Indent() << "while (";
+ if (const DeclStmt *DS = Node->getConditionVariableDeclStmt())
+ PrintRawDeclStmt(DS);
+ else
+ PrintExpr(Node->getCond());
+ OS << ")\n";
+ PrintStmt(Node->getBody());
+}
+
+void StmtPrinter::VisitDoStmt(DoStmt *Node) {
+ Indent() << "do ";
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
+ PrintRawCompoundStmt(CS);
+ OS << " ";
+ } else {
+ OS << "\n";
+ PrintStmt(Node->getBody());
+ Indent();
+ }
+
+ OS << "while (";
+ PrintExpr(Node->getCond());
+ OS << ");\n";
+}
+
+void StmtPrinter::VisitForStmt(ForStmt *Node) {
+ Indent() << "for (";
+ if (Node->getInit()) {
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(Node->getInit()))
+ PrintRawDeclStmt(DS);
+ else
+ PrintExpr(cast<Expr>(Node->getInit()));
+ }
+ OS << ";";
+ if (Node->getCond()) {
+ OS << " ";
+ PrintExpr(Node->getCond());
+ }
+ OS << ";";
+ if (Node->getInc()) {
+ OS << " ";
+ PrintExpr(Node->getInc());
+ }
+ OS << ") ";
+
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
+ PrintRawCompoundStmt(CS);
+ OS << "\n";
+ } else {
+ OS << "\n";
+ PrintStmt(Node->getBody());
+ }
+}
+
+void StmtPrinter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *Node) {
+ Indent() << "for (";
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(Node->getElement()))
+ PrintRawDeclStmt(DS);
+ else
+ PrintExpr(cast<Expr>(Node->getElement()));
+ OS << " in ";
+ PrintExpr(Node->getCollection());
+ OS << ") ";
+
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
+ PrintRawCompoundStmt(CS);
+ OS << "\n";
+ } else {
+ OS << "\n";
+ PrintStmt(Node->getBody());
+ }
+}
+
+void StmtPrinter::VisitCXXForRangeStmt(CXXForRangeStmt *Node) {
+ Indent() << "for (";
+ PrintingPolicy SubPolicy(Policy);
+ SubPolicy.SuppressInitializers = true;
+ Node->getLoopVariable()->print(OS, SubPolicy, IndentLevel);
+ OS << " : ";
+ PrintExpr(Node->getRangeInit());
+ OS << ") {\n";
+ PrintStmt(Node->getBody());
+ Indent() << "}";
+ if (Policy.IncludeNewlines) OS << "\n";
+}
+
+void StmtPrinter::VisitMSDependentExistsStmt(MSDependentExistsStmt *Node) {
+ Indent();
+ if (Node->isIfExists())
+ OS << "__if_exists (";
+ else
+ OS << "__if_not_exists (";
+
+ if (NestedNameSpecifier *Qualifier
+ = Node->getQualifierLoc().getNestedNameSpecifier())
+ Qualifier->print(OS, Policy);
+
+ OS << Node->getNameInfo() << ") ";
+
+ PrintRawCompoundStmt(Node->getSubStmt());
+}
+
+void StmtPrinter::VisitGotoStmt(GotoStmt *Node) {
+ Indent() << "goto " << Node->getLabel()->getName() << ";";
+ if (Policy.IncludeNewlines) OS << "\n";
+}
+
+void StmtPrinter::VisitIndirectGotoStmt(IndirectGotoStmt *Node) {
+ Indent() << "goto *";
+ PrintExpr(Node->getTarget());
+ OS << ";";
+ if (Policy.IncludeNewlines) OS << "\n";
+}
+
+void StmtPrinter::VisitContinueStmt(ContinueStmt *Node) {
+ Indent() << "continue;";
+ if (Policy.IncludeNewlines) OS << "\n";
+}
+
+void StmtPrinter::VisitBreakStmt(BreakStmt *Node) {
+ Indent() << "break;";
+ if (Policy.IncludeNewlines) OS << "\n";
+}
+
+
+void StmtPrinter::VisitReturnStmt(ReturnStmt *Node) {
+ Indent() << "return";
+ if (Node->getRetValue()) {
+ OS << " ";
+ PrintExpr(Node->getRetValue());
+ }
+ OS << ";";
+ if (Policy.IncludeNewlines) OS << "\n";
+}
+
+
+void StmtPrinter::VisitGCCAsmStmt(GCCAsmStmt *Node) {
+ Indent() << "asm ";
+
+ if (Node->isVolatile())
+ OS << "volatile ";
+
+ OS << "(";
+ VisitStringLiteral(Node->getAsmString());
+
+ // Outputs
+ if (Node->getNumOutputs() != 0 || Node->getNumInputs() != 0 ||
+ Node->getNumClobbers() != 0)
+ OS << " : ";
+
+ for (unsigned i = 0, e = Node->getNumOutputs(); i != e; ++i) {
+ if (i != 0)
+ OS << ", ";
+
+ if (!Node->getOutputName(i).empty()) {
+ OS << '[';
+ OS << Node->getOutputName(i);
+ OS << "] ";
+ }
+
+ VisitStringLiteral(Node->getOutputConstraintLiteral(i));
+ OS << " (";
+ Visit(Node->getOutputExpr(i));
+ OS << ")";
+ }
+
+ // Inputs
+ if (Node->getNumInputs() != 0 || Node->getNumClobbers() != 0)
+ OS << " : ";
+
+ for (unsigned i = 0, e = Node->getNumInputs(); i != e; ++i) {
+ if (i != 0)
+ OS << ", ";
+
+ if (!Node->getInputName(i).empty()) {
+ OS << '[';
+ OS << Node->getInputName(i);
+ OS << "] ";
+ }
+
+ VisitStringLiteral(Node->getInputConstraintLiteral(i));
+ OS << " (";
+ Visit(Node->getInputExpr(i));
+ OS << ")";
+ }
+
+ // Clobbers
+ if (Node->getNumClobbers() != 0)
+ OS << " : ";
+
+ for (unsigned i = 0, e = Node->getNumClobbers(); i != e; ++i) {
+ if (i != 0)
+ OS << ", ";
+
+ VisitStringLiteral(Node->getClobberStringLiteral(i));
+ }
+
+ OS << ");";
+ if (Policy.IncludeNewlines) OS << "\n";
+}
+
+void StmtPrinter::VisitMSAsmStmt(MSAsmStmt *Node) {
+ // FIXME: Implement MS style inline asm statement printer.
+ Indent() << "__asm ";
+ if (Node->hasBraces())
+ OS << "{\n";
+ OS << Node->getAsmString() << "\n";
+ if (Node->hasBraces())
+ Indent() << "}\n";
+}
+
+void StmtPrinter::VisitCapturedStmt(CapturedStmt *Node) {
+ PrintStmt(Node->getCapturedDecl()->getBody());
+}
+
+void StmtPrinter::VisitObjCAtTryStmt(ObjCAtTryStmt *Node) {
+ Indent() << "@try";
+ if (CompoundStmt *TS = dyn_cast<CompoundStmt>(Node->getTryBody())) {
+ PrintRawCompoundStmt(TS);
+ OS << "\n";
+ }
+
+ for (unsigned I = 0, N = Node->getNumCatchStmts(); I != N; ++I) {
+ ObjCAtCatchStmt *catchStmt = Node->getCatchStmt(I);
+ Indent() << "@catch(";
+ if (catchStmt->getCatchParamDecl()) {
+ if (Decl *DS = catchStmt->getCatchParamDecl())
+ PrintRawDecl(DS);
+ }
+ OS << ")";
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(catchStmt->getCatchBody())) {
+ PrintRawCompoundStmt(CS);
+ OS << "\n";
+ }
+ }
+
+ if (ObjCAtFinallyStmt *FS = static_cast<ObjCAtFinallyStmt *>(
+ Node->getFinallyStmt())) {
+ Indent() << "@finally";
+ PrintRawCompoundStmt(dyn_cast<CompoundStmt>(FS->getFinallyBody()));
+ OS << "\n";
+ }
+}
+
+void StmtPrinter::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *Node) {
+}
+
+void StmtPrinter::VisitObjCAtCatchStmt (ObjCAtCatchStmt *Node) {
+ Indent() << "@catch (...) { /* todo */ } \n";
+}
+
+void StmtPrinter::VisitObjCAtThrowStmt(ObjCAtThrowStmt *Node) {
+ Indent() << "@throw";
+ if (Node->getThrowExpr()) {
+ OS << " ";
+ PrintExpr(Node->getThrowExpr());
+ }
+ OS << ";\n";
+}
+
+void StmtPrinter::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *Node) {
+ Indent() << "@synchronized (";
+ PrintExpr(Node->getSynchExpr());
+ OS << ")";
+ PrintRawCompoundStmt(Node->getSynchBody());
+ OS << "\n";
+}
+
+void StmtPrinter::VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *Node) {
+ Indent() << "@autoreleasepool";
+ PrintRawCompoundStmt(dyn_cast<CompoundStmt>(Node->getSubStmt()));
+ OS << "\n";
+}
+
+void StmtPrinter::PrintRawCXXCatchStmt(CXXCatchStmt *Node) {
+ OS << "catch (";
+ if (Decl *ExDecl = Node->getExceptionDecl())
+ PrintRawDecl(ExDecl);
+ else
+ OS << "...";
+ OS << ") ";
+ PrintRawCompoundStmt(cast<CompoundStmt>(Node->getHandlerBlock()));
+}
+
+void StmtPrinter::VisitCXXCatchStmt(CXXCatchStmt *Node) {
+ Indent();
+ PrintRawCXXCatchStmt(Node);
+ OS << "\n";
+}
+
+void StmtPrinter::VisitCXXTryStmt(CXXTryStmt *Node) {
+ Indent() << "try ";
+ PrintRawCompoundStmt(Node->getTryBlock());
+ for (unsigned i = 0, e = Node->getNumHandlers(); i < e; ++i) {
+ OS << " ";
+ PrintRawCXXCatchStmt(Node->getHandler(i));
+ }
+ OS << "\n";
+}
+
+void StmtPrinter::VisitSEHTryStmt(SEHTryStmt *Node) {
+ Indent() << (Node->getIsCXXTry() ? "try " : "__try ");
+ PrintRawCompoundStmt(Node->getTryBlock());
+ SEHExceptStmt *E = Node->getExceptHandler();
+ SEHFinallyStmt *F = Node->getFinallyHandler();
+ if(E)
+ PrintRawSEHExceptHandler(E);
+ else {
+ assert(F && "Must have a finally block...");
+ PrintRawSEHFinallyStmt(F);
+ }
+ OS << "\n";
+}
+
+void StmtPrinter::PrintRawSEHFinallyStmt(SEHFinallyStmt *Node) {
+ OS << "__finally ";
+ PrintRawCompoundStmt(Node->getBlock());
+ OS << "\n";
+}
+
+void StmtPrinter::PrintRawSEHExceptHandler(SEHExceptStmt *Node) {
+ OS << "__except (";
+ VisitExpr(Node->getFilterExpr());
+ OS << ")\n";
+ PrintRawCompoundStmt(Node->getBlock());
+ OS << "\n";
+}
+
+void StmtPrinter::VisitSEHExceptStmt(SEHExceptStmt *Node) {
+ Indent();
+ PrintRawSEHExceptHandler(Node);
+ OS << "\n";
+}
+
+void StmtPrinter::VisitSEHFinallyStmt(SEHFinallyStmt *Node) {
+ Indent();
+ PrintRawSEHFinallyStmt(Node);
+ OS << "\n";
+}
+
+void StmtPrinter::VisitSEHLeaveStmt(SEHLeaveStmt *Node) {
+ Indent() << "__leave;";
+ if (Policy.IncludeNewlines) OS << "\n";
+}
+
+//===----------------------------------------------------------------------===//
+// OpenMP clauses printing methods
+//===----------------------------------------------------------------------===//
+
+namespace {
+class OMPClausePrinter : public OMPClauseVisitor<OMPClausePrinter> {
+ raw_ostream &OS;
+ const PrintingPolicy &Policy;
+ /// \brief Process clauses with list of variables.
+ template <typename T>
+ void VisitOMPClauseList(T *Node, char StartSym);
+public:
+ OMPClausePrinter(raw_ostream &OS, const PrintingPolicy &Policy)
+ : OS(OS), Policy(Policy) { }
+#define OPENMP_CLAUSE(Name, Class) \
+ void Visit##Class(Class *S);
+#include "clang/Basic/OpenMPKinds.def"
+};
+
+void OMPClausePrinter::VisitOMPIfClause(OMPIfClause *Node) {
+ OS << "if(";
+ if (Node->getNameModifier() != OMPD_unknown)
+ OS << getOpenMPDirectiveName(Node->getNameModifier()) << ": ";
+ Node->getCondition()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPFinalClause(OMPFinalClause *Node) {
+ OS << "final(";
+ Node->getCondition()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPNumThreadsClause(OMPNumThreadsClause *Node) {
+ OS << "num_threads(";
+ Node->getNumThreads()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPSafelenClause(OMPSafelenClause *Node) {
+ OS << "safelen(";
+ Node->getSafelen()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPSimdlenClause(OMPSimdlenClause *Node) {
+ OS << "simdlen(";
+ Node->getSimdlen()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPCollapseClause(OMPCollapseClause *Node) {
+ OS << "collapse(";
+ Node->getNumForLoops()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPDefaultClause(OMPDefaultClause *Node) {
+ OS << "default("
+ << getOpenMPSimpleClauseTypeName(OMPC_default, Node->getDefaultKind())
+ << ")";
+}
+
+void OMPClausePrinter::VisitOMPProcBindClause(OMPProcBindClause *Node) {
+ OS << "proc_bind("
+ << getOpenMPSimpleClauseTypeName(OMPC_proc_bind, Node->getProcBindKind())
+ << ")";
+}
+
+void OMPClausePrinter::VisitOMPScheduleClause(OMPScheduleClause *Node) {
+ OS << "schedule(";
+ if (Node->getFirstScheduleModifier() != OMPC_SCHEDULE_MODIFIER_unknown) {
+ OS << getOpenMPSimpleClauseTypeName(OMPC_schedule,
+ Node->getFirstScheduleModifier());
+ if (Node->getSecondScheduleModifier() != OMPC_SCHEDULE_MODIFIER_unknown) {
+ OS << ", ";
+ OS << getOpenMPSimpleClauseTypeName(OMPC_schedule,
+ Node->getSecondScheduleModifier());
+ }
+ OS << ": ";
+ }
+ OS << getOpenMPSimpleClauseTypeName(OMPC_schedule, Node->getScheduleKind());
+ if (Node->getChunkSize()) {
+ OS << ", ";
+ Node->getChunkSize()->printPretty(OS, nullptr, Policy);
+ }
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPOrderedClause(OMPOrderedClause *Node) {
+ OS << "ordered";
+ if (auto *Num = Node->getNumForLoops()) {
+ OS << "(";
+ Num->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPNowaitClause(OMPNowaitClause *) {
+ OS << "nowait";
+}
+
+void OMPClausePrinter::VisitOMPUntiedClause(OMPUntiedClause *) {
+ OS << "untied";
+}
+
+void OMPClausePrinter::VisitOMPNogroupClause(OMPNogroupClause *) {
+ OS << "nogroup";
+}
+
+void OMPClausePrinter::VisitOMPMergeableClause(OMPMergeableClause *) {
+ OS << "mergeable";
+}
+
+void OMPClausePrinter::VisitOMPReadClause(OMPReadClause *) { OS << "read"; }
+
+void OMPClausePrinter::VisitOMPWriteClause(OMPWriteClause *) { OS << "write"; }
+
+void OMPClausePrinter::VisitOMPUpdateClause(OMPUpdateClause *) {
+ OS << "update";
+}
+
+void OMPClausePrinter::VisitOMPCaptureClause(OMPCaptureClause *) {
+ OS << "capture";
+}
+
+void OMPClausePrinter::VisitOMPSeqCstClause(OMPSeqCstClause *) {
+ OS << "seq_cst";
+}
+
+void OMPClausePrinter::VisitOMPThreadsClause(OMPThreadsClause *) {
+ OS << "threads";
+}
+
+void OMPClausePrinter::VisitOMPSIMDClause(OMPSIMDClause *) { OS << "simd"; }
+
+void OMPClausePrinter::VisitOMPDeviceClause(OMPDeviceClause *Node) {
+ OS << "device(";
+ Node->getDevice()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPNumTeamsClause(OMPNumTeamsClause *Node) {
+ OS << "num_teams(";
+ Node->getNumTeams()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPThreadLimitClause(OMPThreadLimitClause *Node) {
+ OS << "thread_limit(";
+ Node->getThreadLimit()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPPriorityClause(OMPPriorityClause *Node) {
+ OS << "priority(";
+ Node->getPriority()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPGrainsizeClause(OMPGrainsizeClause *Node) {
+ OS << "grainsize(";
+ Node->getGrainsize()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPNumTasksClause(OMPNumTasksClause *Node) {
+ OS << "num_tasks(";
+ Node->getNumTasks()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPHintClause(OMPHintClause *Node) {
+ OS << "hint(";
+ Node->getHint()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+template<typename T>
+void OMPClausePrinter::VisitOMPClauseList(T *Node, char StartSym) {
+ for (typename T::varlist_iterator I = Node->varlist_begin(),
+ E = Node->varlist_end();
+ I != E; ++I) {
+ assert(*I && "Expected non-null Stmt");
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(*I)) {
+ OS << (I == Node->varlist_begin() ? StartSym : ',');
+ cast<NamedDecl>(DRE->getDecl())->printQualifiedName(OS);
+ } else {
+ OS << (I == Node->varlist_begin() ? StartSym : ',');
+ (*I)->printPretty(OS, nullptr, Policy, 0);
+ }
+ }
+}
+
+void OMPClausePrinter::VisitOMPPrivateClause(OMPPrivateClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "private";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPFirstprivateClause(OMPFirstprivateClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "firstprivate";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPLastprivateClause(OMPLastprivateClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "lastprivate";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPSharedClause(OMPSharedClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "shared";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPReductionClause(OMPReductionClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "reduction(";
+ NestedNameSpecifier *QualifierLoc =
+ Node->getQualifierLoc().getNestedNameSpecifier();
+ OverloadedOperatorKind OOK =
+ Node->getNameInfo().getName().getCXXOverloadedOperator();
+ if (QualifierLoc == nullptr && OOK != OO_None) {
+ // Print reduction identifier in C format
+ OS << getOperatorSpelling(OOK);
+ } else {
+ // Use C++ format
+ if (QualifierLoc != nullptr)
+ QualifierLoc->print(OS, Policy);
+ OS << Node->getNameInfo();
+ }
+ OS << ":";
+ VisitOMPClauseList(Node, ' ');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPLinearClause(OMPLinearClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "linear";
+ if (Node->getModifierLoc().isValid()) {
+ OS << '('
+ << getOpenMPSimpleClauseTypeName(OMPC_linear, Node->getModifier());
+ }
+ VisitOMPClauseList(Node, '(');
+ if (Node->getModifierLoc().isValid())
+ OS << ')';
+ if (Node->getStep() != nullptr) {
+ OS << ": ";
+ Node->getStep()->printPretty(OS, nullptr, Policy, 0);
+ }
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPAlignedClause(OMPAlignedClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "aligned";
+ VisitOMPClauseList(Node, '(');
+ if (Node->getAlignment() != nullptr) {
+ OS << ": ";
+ Node->getAlignment()->printPretty(OS, nullptr, Policy, 0);
+ }
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPCopyinClause(OMPCopyinClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "copyin";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPCopyprivateClause(OMPCopyprivateClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "copyprivate";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPFlushClause(OMPFlushClause *Node) {
+ if (!Node->varlist_empty()) {
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPDependClause(OMPDependClause *Node) {
+ OS << "depend(";
+ OS << getOpenMPSimpleClauseTypeName(Node->getClauseKind(),
+ Node->getDependencyKind());
+ if (!Node->varlist_empty()) {
+ OS << " :";
+ VisitOMPClauseList(Node, ' ');
+ }
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPMapClause(OMPMapClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "map(";
+ if (Node->getMapType() != OMPC_MAP_unknown) {
+ if (Node->getMapTypeModifier() != OMPC_MAP_unknown) {
+ OS << getOpenMPSimpleClauseTypeName(OMPC_map,
+ Node->getMapTypeModifier());
+ OS << ',';
+ }
+ OS << getOpenMPSimpleClauseTypeName(OMPC_map, Node->getMapType());
+ OS << ':';
+ }
+ VisitOMPClauseList(Node, ' ');
+ OS << ")";
+ }
+}
+}
+
+//===----------------------------------------------------------------------===//
+// OpenMP directives printing methods
+//===----------------------------------------------------------------------===//
+
+void StmtPrinter::PrintOMPExecutableDirective(OMPExecutableDirective *S) {
+ OMPClausePrinter Printer(OS, Policy);
+ ArrayRef<OMPClause *> Clauses = S->clauses();
+ for (ArrayRef<OMPClause *>::iterator I = Clauses.begin(), E = Clauses.end();
+ I != E; ++I)
+ if (*I && !(*I)->isImplicit()) {
+ Printer.Visit(*I);
+ OS << ' ';
+ }
+ OS << "\n";
+ if (S->hasAssociatedStmt() && S->getAssociatedStmt()) {
+ assert(isa<CapturedStmt>(S->getAssociatedStmt()) &&
+ "Expected captured statement!");
+ Stmt *CS = cast<CapturedStmt>(S->getAssociatedStmt())->getCapturedStmt();
+ PrintStmt(CS);
+ }
+}
+
+void StmtPrinter::VisitOMPParallelDirective(OMPParallelDirective *Node) {
+ Indent() << "#pragma omp parallel ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPSimdDirective(OMPSimdDirective *Node) {
+ Indent() << "#pragma omp simd ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPForDirective(OMPForDirective *Node) {
+ Indent() << "#pragma omp for ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPForSimdDirective(OMPForSimdDirective *Node) {
+ Indent() << "#pragma omp for simd ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPSectionsDirective(OMPSectionsDirective *Node) {
+ Indent() << "#pragma omp sections ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPSectionDirective(OMPSectionDirective *Node) {
+ Indent() << "#pragma omp section";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPSingleDirective(OMPSingleDirective *Node) {
+ Indent() << "#pragma omp single ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPMasterDirective(OMPMasterDirective *Node) {
+ Indent() << "#pragma omp master";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPCriticalDirective(OMPCriticalDirective *Node) {
+ Indent() << "#pragma omp critical";
+ if (Node->getDirectiveName().getName()) {
+ OS << " (";
+ Node->getDirectiveName().printName(OS);
+ OS << ")";
+ }
+ OS << " ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPParallelForDirective(OMPParallelForDirective *Node) {
+ Indent() << "#pragma omp parallel for ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPParallelForSimdDirective(
+ OMPParallelForSimdDirective *Node) {
+ Indent() << "#pragma omp parallel for simd ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPParallelSectionsDirective(
+ OMPParallelSectionsDirective *Node) {
+ Indent() << "#pragma omp parallel sections ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPTaskDirective(OMPTaskDirective *Node) {
+ Indent() << "#pragma omp task ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPTaskyieldDirective(OMPTaskyieldDirective *Node) {
+ Indent() << "#pragma omp taskyield";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPBarrierDirective(OMPBarrierDirective *Node) {
+ Indent() << "#pragma omp barrier";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPTaskwaitDirective(OMPTaskwaitDirective *Node) {
+ Indent() << "#pragma omp taskwait";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPTaskgroupDirective(OMPTaskgroupDirective *Node) {
+ Indent() << "#pragma omp taskgroup";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPFlushDirective(OMPFlushDirective *Node) {
+ Indent() << "#pragma omp flush ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPOrderedDirective(OMPOrderedDirective *Node) {
+ Indent() << "#pragma omp ordered ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPAtomicDirective(OMPAtomicDirective *Node) {
+ Indent() << "#pragma omp atomic ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPTargetDirective(OMPTargetDirective *Node) {
+ Indent() << "#pragma omp target ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPTargetDataDirective(OMPTargetDataDirective *Node) {
+ Indent() << "#pragma omp target data ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPTeamsDirective(OMPTeamsDirective *Node) {
+ Indent() << "#pragma omp teams ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPCancellationPointDirective(
+ OMPCancellationPointDirective *Node) {
+ Indent() << "#pragma omp cancellation point "
+ << getOpenMPDirectiveName(Node->getCancelRegion());
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPCancelDirective(OMPCancelDirective *Node) {
+ Indent() << "#pragma omp cancel "
+ << getOpenMPDirectiveName(Node->getCancelRegion()) << " ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPTaskLoopDirective(OMPTaskLoopDirective *Node) {
+ Indent() << "#pragma omp taskloop ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPTaskLoopSimdDirective(
+ OMPTaskLoopSimdDirective *Node) {
+ Indent() << "#pragma omp taskloop simd ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPDistributeDirective(OMPDistributeDirective *Node) {
+ Indent() << "#pragma omp distribute ";
+ PrintOMPExecutableDirective(Node);
+}
+
+//===----------------------------------------------------------------------===//
+// Expr printing methods.
+//===----------------------------------------------------------------------===//
+
+void StmtPrinter::VisitDeclRefExpr(DeclRefExpr *Node) {
+ if (NestedNameSpecifier *Qualifier = Node->getQualifier())
+ Qualifier->print(OS, Policy);
+ if (Node->hasTemplateKeyword())
+ OS << "template ";
+ OS << Node->getNameInfo();
+ if (Node->hasExplicitTemplateArgs())
+ TemplateSpecializationType::PrintTemplateArgumentList(
+ OS, Node->getTemplateArgs(), Node->getNumTemplateArgs(), Policy);
+}
+
+void StmtPrinter::VisitDependentScopeDeclRefExpr(
+ DependentScopeDeclRefExpr *Node) {
+ if (NestedNameSpecifier *Qualifier = Node->getQualifier())
+ Qualifier->print(OS, Policy);
+ if (Node->hasTemplateKeyword())
+ OS << "template ";
+ OS << Node->getNameInfo();
+ if (Node->hasExplicitTemplateArgs())
+ TemplateSpecializationType::PrintTemplateArgumentList(
+ OS, Node->getTemplateArgs(), Node->getNumTemplateArgs(), Policy);
+}
+
+void StmtPrinter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *Node) {
+ if (Node->getQualifier())
+ Node->getQualifier()->print(OS, Policy);
+ if (Node->hasTemplateKeyword())
+ OS << "template ";
+ OS << Node->getNameInfo();
+ if (Node->hasExplicitTemplateArgs())
+ TemplateSpecializationType::PrintTemplateArgumentList(
+ OS, Node->getTemplateArgs(), Node->getNumTemplateArgs(), Policy);
+}
+
+void StmtPrinter::VisitObjCIvarRefExpr(ObjCIvarRefExpr *Node) {
+ if (Node->getBase()) {
+ PrintExpr(Node->getBase());
+ OS << (Node->isArrow() ? "->" : ".");
+ }
+ OS << *Node->getDecl();
+}
+
+void StmtPrinter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *Node) {
+ if (Node->isSuperReceiver())
+ OS << "super.";
+ else if (Node->isObjectReceiver() && Node->getBase()) {
+ PrintExpr(Node->getBase());
+ OS << ".";
+ } else if (Node->isClassReceiver() && Node->getClassReceiver()) {
+ OS << Node->getClassReceiver()->getName() << ".";
+ }
+
+ if (Node->isImplicitProperty())
+ Node->getImplicitPropertyGetter()->getSelector().print(OS);
+ else
+ OS << Node->getExplicitProperty()->getName();
+}
+
+void StmtPrinter::VisitObjCSubscriptRefExpr(ObjCSubscriptRefExpr *Node) {
+
+ PrintExpr(Node->getBaseExpr());
+ OS << "[";
+ PrintExpr(Node->getKeyExpr());
+ OS << "]";
+}
+
+void StmtPrinter::VisitPredefinedExpr(PredefinedExpr *Node) {
+ OS << PredefinedExpr::getIdentTypeName(Node->getIdentType());
+}
+
+void StmtPrinter::VisitCharacterLiteral(CharacterLiteral *Node) {
+ unsigned value = Node->getValue();
+
+ switch (Node->getKind()) {
+ case CharacterLiteral::Ascii: break; // no prefix.
+ case CharacterLiteral::Wide: OS << 'L'; break;
+ case CharacterLiteral::UTF16: OS << 'u'; break;
+ case CharacterLiteral::UTF32: OS << 'U'; break;
+ }
+
+ switch (value) {
+ case '\\':
+ OS << "'\\\\'";
+ break;
+ case '\'':
+ OS << "'\\''";
+ break;
+ case '\a':
+ // TODO: K&R: the meaning of '\\a' is different in traditional C
+ OS << "'\\a'";
+ break;
+ case '\b':
+ OS << "'\\b'";
+ break;
+ // Nonstandard escape sequence.
+ /*case '\e':
+ OS << "'\\e'";
+ break;*/
+ case '\f':
+ OS << "'\\f'";
+ break;
+ case '\n':
+ OS << "'\\n'";
+ break;
+ case '\r':
+ OS << "'\\r'";
+ break;
+ case '\t':
+ OS << "'\\t'";
+ break;
+ case '\v':
+ OS << "'\\v'";
+ break;
+ default:
+ if (value < 256 && isPrintable((unsigned char)value))
+ OS << "'" << (char)value << "'";
+ else if (value < 256)
+ OS << "'\\x" << llvm::format("%02x", value) << "'";
+ else if (value <= 0xFFFF)
+ OS << "'\\u" << llvm::format("%04x", value) << "'";
+ else
+ OS << "'\\U" << llvm::format("%08x", value) << "'";
+ }
+}
+
+void StmtPrinter::VisitIntegerLiteral(IntegerLiteral *Node) {
+ bool isSigned = Node->getType()->isSignedIntegerType();
+ OS << Node->getValue().toString(10, isSigned);
+
+ // Emit suffixes. Integer literals are always a builtin integer type.
+ switch (Node->getType()->getAs<BuiltinType>()->getKind()) {
+ default: llvm_unreachable("Unexpected type for integer literal!");
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U: OS << "i8"; break;
+ case BuiltinType::UChar: OS << "Ui8"; break;
+ case BuiltinType::Short: OS << "i16"; break;
+ case BuiltinType::UShort: OS << "Ui16"; break;
+ case BuiltinType::Int: break; // no suffix.
+ case BuiltinType::UInt: OS << 'U'; break;
+ case BuiltinType::Long: OS << 'L'; break;
+ case BuiltinType::ULong: OS << "UL"; break;
+ case BuiltinType::LongLong: OS << "LL"; break;
+ case BuiltinType::ULongLong: OS << "ULL"; break;
+ }
+}
+
+static void PrintFloatingLiteral(raw_ostream &OS, FloatingLiteral *Node,
+ bool PrintSuffix) {
+ SmallString<16> Str;
+ Node->getValue().toString(Str);
+ OS << Str;
+ if (Str.find_first_not_of("-0123456789") == StringRef::npos)
+ OS << '.'; // Trailing dot in order to separate from ints.
+
+ if (!PrintSuffix)
+ return;
+
+ // Emit suffixes. Float literals are always a builtin float type.
+ switch (Node->getType()->getAs<BuiltinType>()->getKind()) {
+ default: llvm_unreachable("Unexpected type for float literal!");
+ case BuiltinType::Half: break; // FIXME: suffix?
+ case BuiltinType::Double: break; // no suffix.
+ case BuiltinType::Float: OS << 'F'; break;
+ case BuiltinType::LongDouble: OS << 'L'; break;
+ }
+}
+
+void StmtPrinter::VisitFloatingLiteral(FloatingLiteral *Node) {
+ PrintFloatingLiteral(OS, Node, /*PrintSuffix=*/true);
+}
+
+void StmtPrinter::VisitImaginaryLiteral(ImaginaryLiteral *Node) {
+ PrintExpr(Node->getSubExpr());
+ OS << "i";
+}
+
+void StmtPrinter::VisitStringLiteral(StringLiteral *Str) {
+ Str->outputString(OS);
+}
+void StmtPrinter::VisitParenExpr(ParenExpr *Node) {
+ OS << "(";
+ PrintExpr(Node->getSubExpr());
+ OS << ")";
+}
+void StmtPrinter::VisitUnaryOperator(UnaryOperator *Node) {
+ if (!Node->isPostfix()) {
+ OS << UnaryOperator::getOpcodeStr(Node->getOpcode());
+
+ // Print a space if this is an "identifier operator" like __real, or if
+ // it might be concatenated incorrectly like '+'.
+ switch (Node->getOpcode()) {
+ default: break;
+ case UO_Real:
+ case UO_Imag:
+ case UO_Extension:
+ OS << ' ';
+ break;
+ case UO_Plus:
+ case UO_Minus:
+ if (isa<UnaryOperator>(Node->getSubExpr()))
+ OS << ' ';
+ break;
+ }
+ }
+ PrintExpr(Node->getSubExpr());
+
+ if (Node->isPostfix())
+ OS << UnaryOperator::getOpcodeStr(Node->getOpcode());
+}
+
+void StmtPrinter::VisitOffsetOfExpr(OffsetOfExpr *Node) {
+ OS << "__builtin_offsetof(";
+ Node->getTypeSourceInfo()->getType().print(OS, Policy);
+ OS << ", ";
+ bool PrintedSomething = false;
+ for (unsigned i = 0, n = Node->getNumComponents(); i < n; ++i) {
+ OffsetOfNode ON = Node->getComponent(i);
+ if (ON.getKind() == OffsetOfNode::Array) {
+ // Array node
+ OS << "[";
+ PrintExpr(Node->getIndexExpr(ON.getArrayExprIndex()));
+ OS << "]";
+ PrintedSomething = true;
+ continue;
+ }
+
+ // Skip implicit base indirections.
+ if (ON.getKind() == OffsetOfNode::Base)
+ continue;
+
+ // Field or identifier node.
+ IdentifierInfo *Id = ON.getFieldName();
+ if (!Id)
+ continue;
+
+ if (PrintedSomething)
+ OS << ".";
+ else
+ PrintedSomething = true;
+ OS << Id->getName();
+ }
+ OS << ")";
+}
+
+void StmtPrinter::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *Node){
+ switch(Node->getKind()) {
+ case UETT_SizeOf:
+ OS << "sizeof";
+ break;
+ case UETT_AlignOf:
+ if (Policy.LangOpts.CPlusPlus)
+ OS << "alignof";
+ else if (Policy.LangOpts.C11)
+ OS << "_Alignof";
+ else
+ OS << "__alignof";
+ break;
+ case UETT_VecStep:
+ OS << "vec_step";
+ break;
+ case UETT_OpenMPRequiredSimdAlign:
+ OS << "__builtin_omp_required_simd_align";
+ break;
+ }
+ if (Node->isArgumentType()) {
+ OS << '(';
+ Node->getArgumentType().print(OS, Policy);
+ OS << ')';
+ } else {
+ OS << " ";
+ PrintExpr(Node->getArgumentExpr());
+ }
+}
+
+void StmtPrinter::VisitGenericSelectionExpr(GenericSelectionExpr *Node) {
+ OS << "_Generic(";
+ PrintExpr(Node->getControllingExpr());
+ for (unsigned i = 0; i != Node->getNumAssocs(); ++i) {
+ OS << ", ";
+ QualType T = Node->getAssocType(i);
+ if (T.isNull())
+ OS << "default";
+ else
+ T.print(OS, Policy);
+ OS << ": ";
+ PrintExpr(Node->getAssocExpr(i));
+ }
+ OS << ")";
+}
+
+void StmtPrinter::VisitArraySubscriptExpr(ArraySubscriptExpr *Node) {
+ PrintExpr(Node->getLHS());
+ OS << "[";
+ PrintExpr(Node->getRHS());
+ OS << "]";
+}
+
+void StmtPrinter::VisitOMPArraySectionExpr(OMPArraySectionExpr *Node) {
+ PrintExpr(Node->getBase());
+ OS << "[";
+ if (Node->getLowerBound())
+ PrintExpr(Node->getLowerBound());
+ if (Node->getColonLoc().isValid()) {
+ OS << ":";
+ if (Node->getLength())
+ PrintExpr(Node->getLength());
+ }
+ OS << "]";
+}
+
+void StmtPrinter::PrintCallArgs(CallExpr *Call) {
+ for (unsigned i = 0, e = Call->getNumArgs(); i != e; ++i) {
+ if (isa<CXXDefaultArgExpr>(Call->getArg(i))) {
+ // Don't print any defaulted arguments
+ break;
+ }
+
+ if (i) OS << ", ";
+ PrintExpr(Call->getArg(i));
+ }
+}
+
+void StmtPrinter::VisitCallExpr(CallExpr *Call) {
+ PrintExpr(Call->getCallee());
+ OS << "(";
+ PrintCallArgs(Call);
+ OS << ")";
+}
+void StmtPrinter::VisitMemberExpr(MemberExpr *Node) {
+ // FIXME: Suppress printing implicit bases (like "this")
+ PrintExpr(Node->getBase());
+
+ MemberExpr *ParentMember = dyn_cast<MemberExpr>(Node->getBase());
+ FieldDecl *ParentDecl = ParentMember
+ ? dyn_cast<FieldDecl>(ParentMember->getMemberDecl()) : nullptr;
+
+ if (!ParentDecl || !ParentDecl->isAnonymousStructOrUnion())
+ OS << (Node->isArrow() ? "->" : ".");
+
+ if (FieldDecl *FD = dyn_cast<FieldDecl>(Node->getMemberDecl()))
+ if (FD->isAnonymousStructOrUnion())
+ return;
+
+ if (NestedNameSpecifier *Qualifier = Node->getQualifier())
+ Qualifier->print(OS, Policy);
+ if (Node->hasTemplateKeyword())
+ OS << "template ";
+ OS << Node->getMemberNameInfo();
+ if (Node->hasExplicitTemplateArgs())
+ TemplateSpecializationType::PrintTemplateArgumentList(
+ OS, Node->getTemplateArgs(), Node->getNumTemplateArgs(), Policy);
+}
+void StmtPrinter::VisitObjCIsaExpr(ObjCIsaExpr *Node) {
+ PrintExpr(Node->getBase());
+ OS << (Node->isArrow() ? "->isa" : ".isa");
+}
+
+void StmtPrinter::VisitExtVectorElementExpr(ExtVectorElementExpr *Node) {
+ PrintExpr(Node->getBase());
+ OS << ".";
+ OS << Node->getAccessor().getName();
+}
+void StmtPrinter::VisitCStyleCastExpr(CStyleCastExpr *Node) {
+ OS << '(';
+ Node->getTypeAsWritten().print(OS, Policy);
+ OS << ')';
+ PrintExpr(Node->getSubExpr());
+}
+void StmtPrinter::VisitCompoundLiteralExpr(CompoundLiteralExpr *Node) {
+ OS << '(';
+ Node->getType().print(OS, Policy);
+ OS << ')';
+ PrintExpr(Node->getInitializer());
+}
+void StmtPrinter::VisitImplicitCastExpr(ImplicitCastExpr *Node) {
+ // No need to print anything, simply forward to the subexpression.
+ PrintExpr(Node->getSubExpr());
+}
+void StmtPrinter::VisitBinaryOperator(BinaryOperator *Node) {
+ PrintExpr(Node->getLHS());
+ OS << " " << BinaryOperator::getOpcodeStr(Node->getOpcode()) << " ";
+ PrintExpr(Node->getRHS());
+}
+void StmtPrinter::VisitCompoundAssignOperator(CompoundAssignOperator *Node) {
+ PrintExpr(Node->getLHS());
+ OS << " " << BinaryOperator::getOpcodeStr(Node->getOpcode()) << " ";
+ PrintExpr(Node->getRHS());
+}
+void StmtPrinter::VisitConditionalOperator(ConditionalOperator *Node) {
+ PrintExpr(Node->getCond());
+ OS << " ? ";
+ PrintExpr(Node->getLHS());
+ OS << " : ";
+ PrintExpr(Node->getRHS());
+}
+
+// GNU extensions.
+
+void
+StmtPrinter::VisitBinaryConditionalOperator(BinaryConditionalOperator *Node) {
+ PrintExpr(Node->getCommon());
+ OS << " ?: ";
+ PrintExpr(Node->getFalseExpr());
+}
+void StmtPrinter::VisitAddrLabelExpr(AddrLabelExpr *Node) {
+ OS << "&&" << Node->getLabel()->getName();
+}
+
+void StmtPrinter::VisitStmtExpr(StmtExpr *E) {
+ OS << "(";
+ PrintRawCompoundStmt(E->getSubStmt());
+ OS << ")";
+}
+
+void StmtPrinter::VisitChooseExpr(ChooseExpr *Node) {
+ OS << "__builtin_choose_expr(";
+ PrintExpr(Node->getCond());
+ OS << ", ";
+ PrintExpr(Node->getLHS());
+ OS << ", ";
+ PrintExpr(Node->getRHS());
+ OS << ")";
+}
+
+void StmtPrinter::VisitGNUNullExpr(GNUNullExpr *) {
+ OS << "__null";
+}
+
+void StmtPrinter::VisitShuffleVectorExpr(ShuffleVectorExpr *Node) {
+ OS << "__builtin_shufflevector(";
+ for (unsigned i = 0, e = Node->getNumSubExprs(); i != e; ++i) {
+ if (i) OS << ", ";
+ PrintExpr(Node->getExpr(i));
+ }
+ OS << ")";
+}
+
+void StmtPrinter::VisitConvertVectorExpr(ConvertVectorExpr *Node) {
+ OS << "__builtin_convertvector(";
+ PrintExpr(Node->getSrcExpr());
+ OS << ", ";
+ Node->getType().print(OS, Policy);
+ OS << ")";
+}
+
+void StmtPrinter::VisitInitListExpr(InitListExpr* Node) {
+ if (Node->getSyntacticForm()) {
+ Visit(Node->getSyntacticForm());
+ return;
+ }
+
+ OS << "{";
+ for (unsigned i = 0, e = Node->getNumInits(); i != e; ++i) {
+ if (i) OS << ", ";
+ if (Node->getInit(i))
+ PrintExpr(Node->getInit(i));
+ else
+ OS << "{}";
+ }
+ OS << "}";
+}
+
+void StmtPrinter::VisitParenListExpr(ParenListExpr* Node) {
+ OS << "(";
+ for (unsigned i = 0, e = Node->getNumExprs(); i != e; ++i) {
+ if (i) OS << ", ";
+ PrintExpr(Node->getExpr(i));
+ }
+ OS << ")";
+}
+
+void StmtPrinter::VisitDesignatedInitExpr(DesignatedInitExpr *Node) {
+ bool NeedsEquals = true;
+ for (DesignatedInitExpr::designators_iterator D = Node->designators_begin(),
+ DEnd = Node->designators_end();
+ D != DEnd; ++D) {
+ if (D->isFieldDesignator()) {
+ if (D->getDotLoc().isInvalid()) {
+ if (IdentifierInfo *II = D->getFieldName()) {
+ OS << II->getName() << ":";
+ NeedsEquals = false;
+ }
+ } else {
+ OS << "." << D->getFieldName()->getName();
+ }
+ } else {
+ OS << "[";
+ if (D->isArrayDesignator()) {
+ PrintExpr(Node->getArrayIndex(*D));
+ } else {
+ PrintExpr(Node->getArrayRangeStart(*D));
+ OS << " ... ";
+ PrintExpr(Node->getArrayRangeEnd(*D));
+ }
+ OS << "]";
+ }
+ }
+
+ if (NeedsEquals)
+ OS << " = ";
+ else
+ OS << " ";
+ PrintExpr(Node->getInit());
+}
+
+void StmtPrinter::VisitDesignatedInitUpdateExpr(
+ DesignatedInitUpdateExpr *Node) {
+ OS << "{";
+ OS << "/*base*/";
+ PrintExpr(Node->getBase());
+ OS << ", ";
+
+ OS << "/*updater*/";
+ PrintExpr(Node->getUpdater());
+ OS << "}";
+}
+
+void StmtPrinter::VisitNoInitExpr(NoInitExpr *Node) {
+ OS << "/*no init*/";
+}
+
+void StmtPrinter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *Node) {
+ if (Policy.LangOpts.CPlusPlus) {
+ OS << "/*implicit*/";
+ Node->getType().print(OS, Policy);
+ OS << "()";
+ } else {
+ OS << "/*implicit*/(";
+ Node->getType().print(OS, Policy);
+ OS << ')';
+ if (Node->getType()->isRecordType())
+ OS << "{}";
+ else
+ OS << 0;
+ }
+}
+
+void StmtPrinter::VisitVAArgExpr(VAArgExpr *Node) {
+ OS << "__builtin_va_arg(";
+ PrintExpr(Node->getSubExpr());
+ OS << ", ";
+ Node->getType().print(OS, Policy);
+ OS << ")";
+}
+
+void StmtPrinter::VisitPseudoObjectExpr(PseudoObjectExpr *Node) {
+ PrintExpr(Node->getSyntacticForm());
+}
+
+void StmtPrinter::VisitAtomicExpr(AtomicExpr *Node) {
+ const char *Name = nullptr;
+ switch (Node->getOp()) {
+#define BUILTIN(ID, TYPE, ATTRS)
+#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
+ case AtomicExpr::AO ## ID: \
+ Name = #ID "("; \
+ break;
+#include "clang/Basic/Builtins.def"
+ }
+ OS << Name;
+
+ // AtomicExpr stores its subexpressions in a permuted order.
+ PrintExpr(Node->getPtr());
+ if (Node->getOp() != AtomicExpr::AO__c11_atomic_load &&
+ Node->getOp() != AtomicExpr::AO__atomic_load_n) {
+ OS << ", ";
+ PrintExpr(Node->getVal1());
+ }
+ if (Node->getOp() == AtomicExpr::AO__atomic_exchange ||
+ Node->isCmpXChg()) {
+ OS << ", ";
+ PrintExpr(Node->getVal2());
+ }
+ if (Node->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
+ Node->getOp() == AtomicExpr::AO__atomic_compare_exchange_n) {
+ OS << ", ";
+ PrintExpr(Node->getWeak());
+ }
+ if (Node->getOp() != AtomicExpr::AO__c11_atomic_init) {
+ OS << ", ";
+ PrintExpr(Node->getOrder());
+ }
+ if (Node->isCmpXChg()) {
+ OS << ", ";
+ PrintExpr(Node->getOrderFail());
+ }
+ OS << ")";
+}
+
+// C++
+void StmtPrinter::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *Node) {
+ const char *OpStrings[NUM_OVERLOADED_OPERATORS] = {
+ "",
+#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
+ Spelling,
+#include "clang/Basic/OperatorKinds.def"
+ };
+
+ OverloadedOperatorKind Kind = Node->getOperator();
+ if (Kind == OO_PlusPlus || Kind == OO_MinusMinus) {
+ if (Node->getNumArgs() == 1) {
+ OS << OpStrings[Kind] << ' ';
+ PrintExpr(Node->getArg(0));
+ } else {
+ PrintExpr(Node->getArg(0));
+ OS << ' ' << OpStrings[Kind];
+ }
+ } else if (Kind == OO_Arrow) {
+ PrintExpr(Node->getArg(0));
+ } else if (Kind == OO_Call) {
+ PrintExpr(Node->getArg(0));
+ OS << '(';
+ for (unsigned ArgIdx = 1; ArgIdx < Node->getNumArgs(); ++ArgIdx) {
+ if (ArgIdx > 1)
+ OS << ", ";
+ if (!isa<CXXDefaultArgExpr>(Node->getArg(ArgIdx)))
+ PrintExpr(Node->getArg(ArgIdx));
+ }
+ OS << ')';
+ } else if (Kind == OO_Subscript) {
+ PrintExpr(Node->getArg(0));
+ OS << '[';
+ PrintExpr(Node->getArg(1));
+ OS << ']';
+ } else if (Node->getNumArgs() == 1) {
+ OS << OpStrings[Kind] << ' ';
+ PrintExpr(Node->getArg(0));
+ } else if (Node->getNumArgs() == 2) {
+ PrintExpr(Node->getArg(0));
+ OS << ' ' << OpStrings[Kind] << ' ';
+ PrintExpr(Node->getArg(1));
+ } else {
+ llvm_unreachable("unknown overloaded operator");
+ }
+}
+
+void StmtPrinter::VisitCXXMemberCallExpr(CXXMemberCallExpr *Node) {
+ // If we have a conversion operator call only print the argument.
+ CXXMethodDecl *MD = Node->getMethodDecl();
+ if (MD && isa<CXXConversionDecl>(MD)) {
+ PrintExpr(Node->getImplicitObjectArgument());
+ return;
+ }
+ VisitCallExpr(cast<CallExpr>(Node));
+}
+
+void StmtPrinter::VisitCUDAKernelCallExpr(CUDAKernelCallExpr *Node) {
+ PrintExpr(Node->getCallee());
+ OS << "<<<";
+ PrintCallArgs(Node->getConfig());
+ OS << ">>>(";
+ PrintCallArgs(Node);
+ OS << ")";
+}
+
+void StmtPrinter::VisitCXXNamedCastExpr(CXXNamedCastExpr *Node) {
+ OS << Node->getCastName() << '<';
+ Node->getTypeAsWritten().print(OS, Policy);
+ OS << ">(";
+ PrintExpr(Node->getSubExpr());
+ OS << ")";
+}
+
+void StmtPrinter::VisitCXXStaticCastExpr(CXXStaticCastExpr *Node) {
+ VisitCXXNamedCastExpr(Node);
+}
+
+void StmtPrinter::VisitCXXDynamicCastExpr(CXXDynamicCastExpr *Node) {
+ VisitCXXNamedCastExpr(Node);
+}
+
+void StmtPrinter::VisitCXXReinterpretCastExpr(CXXReinterpretCastExpr *Node) {
+ VisitCXXNamedCastExpr(Node);
+}
+
+void StmtPrinter::VisitCXXConstCastExpr(CXXConstCastExpr *Node) {
+ VisitCXXNamedCastExpr(Node);
+}
+
+void StmtPrinter::VisitCXXTypeidExpr(CXXTypeidExpr *Node) {
+ OS << "typeid(";
+ if (Node->isTypeOperand()) {
+ Node->getTypeOperandSourceInfo()->getType().print(OS, Policy);
+ } else {
+ PrintExpr(Node->getExprOperand());
+ }
+ OS << ")";
+}
+
+void StmtPrinter::VisitCXXUuidofExpr(CXXUuidofExpr *Node) {
+ OS << "__uuidof(";
+ if (Node->isTypeOperand()) {
+ Node->getTypeOperandSourceInfo()->getType().print(OS, Policy);
+ } else {
+ PrintExpr(Node->getExprOperand());
+ }
+ OS << ")";
+}
+
+void StmtPrinter::VisitMSPropertyRefExpr(MSPropertyRefExpr *Node) {
+ PrintExpr(Node->getBaseExpr());
+ if (Node->isArrow())
+ OS << "->";
+ else
+ OS << ".";
+ if (NestedNameSpecifier *Qualifier =
+ Node->getQualifierLoc().getNestedNameSpecifier())
+ Qualifier->print(OS, Policy);
+ OS << Node->getPropertyDecl()->getDeclName();
+}
+
+void StmtPrinter::VisitMSPropertySubscriptExpr(MSPropertySubscriptExpr *Node) {
+ PrintExpr(Node->getBase());
+ OS << "[";
+ PrintExpr(Node->getIdx());
+ OS << "]";
+}
+
+void StmtPrinter::VisitUserDefinedLiteral(UserDefinedLiteral *Node) {
+ switch (Node->getLiteralOperatorKind()) {
+ case UserDefinedLiteral::LOK_Raw:
+ OS << cast<StringLiteral>(Node->getArg(0)->IgnoreImpCasts())->getString();
+ break;
+ case UserDefinedLiteral::LOK_Template: {
+ DeclRefExpr *DRE = cast<DeclRefExpr>(Node->getCallee()->IgnoreImpCasts());
+ const TemplateArgumentList *Args =
+ cast<FunctionDecl>(DRE->getDecl())->getTemplateSpecializationArgs();
+ assert(Args);
+
+ if (Args->size() != 1) {
+ OS << "operator\"\"" << Node->getUDSuffix()->getName();
+ TemplateSpecializationType::PrintTemplateArgumentList(
+ OS, Args->data(), Args->size(), Policy);
+ OS << "()";
+ return;
+ }
+
+ const TemplateArgument &Pack = Args->get(0);
+ for (const auto &P : Pack.pack_elements()) {
+ char C = (char)P.getAsIntegral().getZExtValue();
+ OS << C;
+ }
+ break;
+ }
+ case UserDefinedLiteral::LOK_Integer: {
+ // Print integer literal without suffix.
+ IntegerLiteral *Int = cast<IntegerLiteral>(Node->getCookedLiteral());
+ OS << Int->getValue().toString(10, /*isSigned*/false);
+ break;
+ }
+ case UserDefinedLiteral::LOK_Floating: {
+ // Print floating literal without suffix.
+ FloatingLiteral *Float = cast<FloatingLiteral>(Node->getCookedLiteral());
+ PrintFloatingLiteral(OS, Float, /*PrintSuffix=*/false);
+ break;
+ }
+ case UserDefinedLiteral::LOK_String:
+ case UserDefinedLiteral::LOK_Character:
+ PrintExpr(Node->getCookedLiteral());
+ break;
+ }
+ OS << Node->getUDSuffix()->getName();
+}
+
+void StmtPrinter::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *Node) {
+ OS << (Node->getValue() ? "true" : "false");
+}
+
+void StmtPrinter::VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *Node) {
+ OS << "nullptr";
+}
+
+void StmtPrinter::VisitCXXThisExpr(CXXThisExpr *Node) {
+ OS << "this";
+}
+
+void StmtPrinter::VisitCXXThrowExpr(CXXThrowExpr *Node) {
+ if (!Node->getSubExpr())
+ OS << "throw";
+ else {
+ OS << "throw ";
+ PrintExpr(Node->getSubExpr());
+ }
+}
+
+void StmtPrinter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *Node) {
+ // Nothing to print: we picked up the default argument.
+}
+
+void StmtPrinter::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *Node) {
+ // Nothing to print: we picked up the default initializer.
+}
+
+void StmtPrinter::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *Node) {
+ Node->getType().print(OS, Policy);
+ // If there are no parens, this is list-initialization, and the braces are
+ // part of the syntax of the inner construct.
+ if (Node->getLParenLoc().isValid())
+ OS << "(";
+ PrintExpr(Node->getSubExpr());
+ if (Node->getLParenLoc().isValid())
+ OS << ")";
+}
+
+void StmtPrinter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *Node) {
+ PrintExpr(Node->getSubExpr());
+}
+
+void StmtPrinter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *Node) {
+ Node->getType().print(OS, Policy);
+ if (Node->isStdInitListInitialization())
+ /* Nothing to do; braces are part of creating the std::initializer_list. */;
+ else if (Node->isListInitialization())
+ OS << "{";
+ else
+ OS << "(";
+ for (CXXTemporaryObjectExpr::arg_iterator Arg = Node->arg_begin(),
+ ArgEnd = Node->arg_end();
+ Arg != ArgEnd; ++Arg) {
+ if ((*Arg)->isDefaultArgument())
+ break;
+ if (Arg != Node->arg_begin())
+ OS << ", ";
+ PrintExpr(*Arg);
+ }
+ if (Node->isStdInitListInitialization())
+ /* See above. */;
+ else if (Node->isListInitialization())
+ OS << "}";
+ else
+ OS << ")";
+}
+
+void StmtPrinter::VisitLambdaExpr(LambdaExpr *Node) {
+ OS << '[';
+ bool NeedComma = false;
+ switch (Node->getCaptureDefault()) {
+ case LCD_None:
+ break;
+
+ case LCD_ByCopy:
+ OS << '=';
+ NeedComma = true;
+ break;
+
+ case LCD_ByRef:
+ OS << '&';
+ NeedComma = true;
+ break;
+ }
+ for (LambdaExpr::capture_iterator C = Node->explicit_capture_begin(),
+ CEnd = Node->explicit_capture_end();
+ C != CEnd;
+ ++C) {
+ if (NeedComma)
+ OS << ", ";
+ NeedComma = true;
+
+ switch (C->getCaptureKind()) {
+ case LCK_This:
+ OS << "this";
+ break;
+
+ case LCK_ByRef:
+ if (Node->getCaptureDefault() != LCD_ByRef || Node->isInitCapture(C))
+ OS << '&';
+ OS << C->getCapturedVar()->getName();
+ break;
+
+ case LCK_ByCopy:
+ OS << C->getCapturedVar()->getName();
+ break;
+ case LCK_VLAType:
+ llvm_unreachable("VLA type in explicit captures.");
+ }
+
+ if (Node->isInitCapture(C))
+ PrintExpr(C->getCapturedVar()->getInit());
+ }
+ OS << ']';
+
+ if (Node->hasExplicitParameters()) {
+ OS << " (";
+ CXXMethodDecl *Method = Node->getCallOperator();
+ NeedComma = false;
+ for (auto P : Method->params()) {
+ if (NeedComma) {
+ OS << ", ";
+ } else {
+ NeedComma = true;
+ }
+ std::string ParamStr = P->getNameAsString();
+ P->getOriginalType().print(OS, Policy, ParamStr);
+ }
+ if (Method->isVariadic()) {
+ if (NeedComma)
+ OS << ", ";
+ OS << "...";
+ }
+ OS << ')';
+
+ if (Node->isMutable())
+ OS << " mutable";
+
+ const FunctionProtoType *Proto
+ = Method->getType()->getAs<FunctionProtoType>();
+ Proto->printExceptionSpecification(OS, Policy);
+
+ // FIXME: Attributes
+
+ // Print the trailing return type if it was specified in the source.
+ if (Node->hasExplicitResultType()) {
+ OS << " -> ";
+ Proto->getReturnType().print(OS, Policy);
+ }
+ }
+
+ // Print the body.
+ CompoundStmt *Body = Node->getBody();
+ OS << ' ';
+ PrintStmt(Body);
+}
+
+void StmtPrinter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *Node) {
+ if (TypeSourceInfo *TSInfo = Node->getTypeSourceInfo())
+ TSInfo->getType().print(OS, Policy);
+ else
+ Node->getType().print(OS, Policy);
+ OS << "()";
+}
+
+void StmtPrinter::VisitCXXNewExpr(CXXNewExpr *E) {
+ if (E->isGlobalNew())
+ OS << "::";
+ OS << "new ";
+ unsigned NumPlace = E->getNumPlacementArgs();
+ if (NumPlace > 0 && !isa<CXXDefaultArgExpr>(E->getPlacementArg(0))) {
+ OS << "(";
+ PrintExpr(E->getPlacementArg(0));
+ for (unsigned i = 1; i < NumPlace; ++i) {
+ if (isa<CXXDefaultArgExpr>(E->getPlacementArg(i)))
+ break;
+ OS << ", ";
+ PrintExpr(E->getPlacementArg(i));
+ }
+ OS << ") ";
+ }
+ if (E->isParenTypeId())
+ OS << "(";
+ std::string TypeS;
+ if (Expr *Size = E->getArraySize()) {
+ llvm::raw_string_ostream s(TypeS);
+ s << '[';
+ Size->printPretty(s, Helper, Policy);
+ s << ']';
+ }
+ E->getAllocatedType().print(OS, Policy, TypeS);
+ if (E->isParenTypeId())
+ OS << ")";
+
+ CXXNewExpr::InitializationStyle InitStyle = E->getInitializationStyle();
+ if (InitStyle) {
+ if (InitStyle == CXXNewExpr::CallInit)
+ OS << "(";
+ PrintExpr(E->getInitializer());
+ if (InitStyle == CXXNewExpr::CallInit)
+ OS << ")";
+ }
+}
+
+void StmtPrinter::VisitCXXDeleteExpr(CXXDeleteExpr *E) {
+ if (E->isGlobalDelete())
+ OS << "::";
+ OS << "delete ";
+ if (E->isArrayForm())
+ OS << "[] ";
+ PrintExpr(E->getArgument());
+}
+
+void StmtPrinter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
+ PrintExpr(E->getBase());
+ if (E->isArrow())
+ OS << "->";
+ else
+ OS << '.';
+ if (E->getQualifier())
+ E->getQualifier()->print(OS, Policy);
+ OS << "~";
+
+ if (IdentifierInfo *II = E->getDestroyedTypeIdentifier())
+ OS << II->getName();
+ else
+ E->getDestroyedType().print(OS, Policy);
+}
+
+void StmtPrinter::VisitCXXConstructExpr(CXXConstructExpr *E) {
+ if (E->isListInitialization() && !E->isStdInitListInitialization())
+ OS << "{";
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
+ if (isa<CXXDefaultArgExpr>(E->getArg(i))) {
+ // Don't print any defaulted arguments
+ break;
+ }
+
+ if (i) OS << ", ";
+ PrintExpr(E->getArg(i));
+ }
+
+ if (E->isListInitialization() && !E->isStdInitListInitialization())
+ OS << "}";
+}
+
+void StmtPrinter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
+ PrintExpr(E->getSubExpr());
+}
+
+void StmtPrinter::VisitExprWithCleanups(ExprWithCleanups *E) {
+ // Just forward to the subexpression.
+ PrintExpr(E->getSubExpr());
+}
+
+void
+StmtPrinter::VisitCXXUnresolvedConstructExpr(
+ CXXUnresolvedConstructExpr *Node) {
+ Node->getTypeAsWritten().print(OS, Policy);
+ OS << "(";
+ for (CXXUnresolvedConstructExpr::arg_iterator Arg = Node->arg_begin(),
+ ArgEnd = Node->arg_end();
+ Arg != ArgEnd; ++Arg) {
+ if (Arg != Node->arg_begin())
+ OS << ", ";
+ PrintExpr(*Arg);
+ }
+ OS << ")";
+}
+
+void StmtPrinter::VisitCXXDependentScopeMemberExpr(
+ CXXDependentScopeMemberExpr *Node) {
+ if (!Node->isImplicitAccess()) {
+ PrintExpr(Node->getBase());
+ OS << (Node->isArrow() ? "->" : ".");
+ }
+ if (NestedNameSpecifier *Qualifier = Node->getQualifier())
+ Qualifier->print(OS, Policy);
+ if (Node->hasTemplateKeyword())
+ OS << "template ";
+ OS << Node->getMemberNameInfo();
+ if (Node->hasExplicitTemplateArgs())
+ TemplateSpecializationType::PrintTemplateArgumentList(
+ OS, Node->getTemplateArgs(), Node->getNumTemplateArgs(), Policy);
+}
+
+void StmtPrinter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *Node) {
+ if (!Node->isImplicitAccess()) {
+ PrintExpr(Node->getBase());
+ OS << (Node->isArrow() ? "->" : ".");
+ }
+ if (NestedNameSpecifier *Qualifier = Node->getQualifier())
+ Qualifier->print(OS, Policy);
+ if (Node->hasTemplateKeyword())
+ OS << "template ";
+ OS << Node->getMemberNameInfo();
+ if (Node->hasExplicitTemplateArgs())
+ TemplateSpecializationType::PrintTemplateArgumentList(
+ OS, Node->getTemplateArgs(), Node->getNumTemplateArgs(), Policy);
+}
+
+static const char *getTypeTraitName(TypeTrait TT) {
+ switch (TT) {
+#define TYPE_TRAIT_1(Spelling, Name, Key) \
+case clang::UTT_##Name: return #Spelling;
+#define TYPE_TRAIT_2(Spelling, Name, Key) \
+case clang::BTT_##Name: return #Spelling;
+#define TYPE_TRAIT_N(Spelling, Name, Key) \
+ case clang::TT_##Name: return #Spelling;
+#include "clang/Basic/TokenKinds.def"
+ }
+ llvm_unreachable("Type trait not covered by switch");
+}
+
+static const char *getTypeTraitName(ArrayTypeTrait ATT) {
+ switch (ATT) {
+ case ATT_ArrayRank: return "__array_rank";
+ case ATT_ArrayExtent: return "__array_extent";
+ }
+ llvm_unreachable("Array type trait not covered by switch");
+}
+
+static const char *getExpressionTraitName(ExpressionTrait ET) {
+ switch (ET) {
+ case ET_IsLValueExpr: return "__is_lvalue_expr";
+ case ET_IsRValueExpr: return "__is_rvalue_expr";
+ }
+ llvm_unreachable("Expression type trait not covered by switch");
+}
+
+void StmtPrinter::VisitTypeTraitExpr(TypeTraitExpr *E) {
+ OS << getTypeTraitName(E->getTrait()) << "(";
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I) {
+ if (I > 0)
+ OS << ", ";
+ E->getArg(I)->getType().print(OS, Policy);
+ }
+ OS << ")";
+}
+
+void StmtPrinter::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
+ OS << getTypeTraitName(E->getTrait()) << '(';
+ E->getQueriedType().print(OS, Policy);
+ OS << ')';
+}
+
+void StmtPrinter::VisitExpressionTraitExpr(ExpressionTraitExpr *E) {
+ OS << getExpressionTraitName(E->getTrait()) << '(';
+ PrintExpr(E->getQueriedExpression());
+ OS << ')';
+}
+
+void StmtPrinter::VisitCXXNoexceptExpr(CXXNoexceptExpr *E) {
+ OS << "noexcept(";
+ PrintExpr(E->getOperand());
+ OS << ")";
+}
+
+void StmtPrinter::VisitPackExpansionExpr(PackExpansionExpr *E) {
+ PrintExpr(E->getPattern());
+ OS << "...";
+}
+
+void StmtPrinter::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
+ OS << "sizeof...(" << *E->getPack() << ")";
+}
+
+void StmtPrinter::VisitSubstNonTypeTemplateParmPackExpr(
+ SubstNonTypeTemplateParmPackExpr *Node) {
+ OS << *Node->getParameterPack();
+}
+
+void StmtPrinter::VisitSubstNonTypeTemplateParmExpr(
+ SubstNonTypeTemplateParmExpr *Node) {
+ Visit(Node->getReplacement());
+}
+
+void StmtPrinter::VisitFunctionParmPackExpr(FunctionParmPackExpr *E) {
+ OS << *E->getParameterPack();
+}
+
+void StmtPrinter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *Node){
+ PrintExpr(Node->GetTemporaryExpr());
+}
+
+void StmtPrinter::VisitCXXFoldExpr(CXXFoldExpr *E) {
+ OS << "(";
+ if (E->getLHS()) {
+ PrintExpr(E->getLHS());
+ OS << " " << BinaryOperator::getOpcodeStr(E->getOperator()) << " ";
+ }
+ OS << "...";
+ if (E->getRHS()) {
+ OS << " " << BinaryOperator::getOpcodeStr(E->getOperator()) << " ";
+ PrintExpr(E->getRHS());
+ }
+ OS << ")";
+}
+
+// C++ Coroutines TS
+
+void StmtPrinter::VisitCoroutineBodyStmt(CoroutineBodyStmt *S) {
+ Visit(S->getBody());
+}
+
+void StmtPrinter::VisitCoreturnStmt(CoreturnStmt *S) {
+ OS << "co_return";
+ if (S->getOperand()) {
+ OS << " ";
+ Visit(S->getOperand());
+ }
+ OS << ";";
+}
+
+void StmtPrinter::VisitCoawaitExpr(CoawaitExpr *S) {
+ OS << "co_await ";
+ PrintExpr(S->getOperand());
+}
+
+void StmtPrinter::VisitCoyieldExpr(CoyieldExpr *S) {
+ OS << "co_yield ";
+ PrintExpr(S->getOperand());
+}
+
+// Obj-C
+
+void StmtPrinter::VisitObjCStringLiteral(ObjCStringLiteral *Node) {
+ OS << "@";
+ VisitStringLiteral(Node->getString());
+}
+
+void StmtPrinter::VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
+ OS << "@";
+ Visit(E->getSubExpr());
+}
+
+void StmtPrinter::VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
+ OS << "@[ ";
+ ObjCArrayLiteral::child_range Ch = E->children();
+ for (auto I = Ch.begin(), E = Ch.end(); I != E; ++I) {
+ if (I != Ch.begin())
+ OS << ", ";
+ Visit(*I);
+ }
+ OS << " ]";
+}
+
+void StmtPrinter::VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
+ OS << "@{ ";
+ for (unsigned I = 0, N = E->getNumElements(); I != N; ++I) {
+ if (I > 0)
+ OS << ", ";
+
+ ObjCDictionaryElement Element = E->getKeyValueElement(I);
+ Visit(Element.Key);
+ OS << " : ";
+ Visit(Element.Value);
+ if (Element.isPackExpansion())
+ OS << "...";
+ }
+ OS << " }";
+}
+
+void StmtPrinter::VisitObjCEncodeExpr(ObjCEncodeExpr *Node) {
+ OS << "@encode(";
+ Node->getEncodedType().print(OS, Policy);
+ OS << ')';
+}
+
+void StmtPrinter::VisitObjCSelectorExpr(ObjCSelectorExpr *Node) {
+ OS << "@selector(";
+ Node->getSelector().print(OS);
+ OS << ')';
+}
+
+void StmtPrinter::VisitObjCProtocolExpr(ObjCProtocolExpr *Node) {
+ OS << "@protocol(" << *Node->getProtocol() << ')';
+}
+
+void StmtPrinter::VisitObjCMessageExpr(ObjCMessageExpr *Mess) {
+ OS << "[";
+ switch (Mess->getReceiverKind()) {
+ case ObjCMessageExpr::Instance:
+ PrintExpr(Mess->getInstanceReceiver());
+ break;
+
+ case ObjCMessageExpr::Class:
+ Mess->getClassReceiver().print(OS, Policy);
+ break;
+
+ case ObjCMessageExpr::SuperInstance:
+ case ObjCMessageExpr::SuperClass:
+ OS << "Super";
+ break;
+ }
+
+ OS << ' ';
+ Selector selector = Mess->getSelector();
+ if (selector.isUnarySelector()) {
+ OS << selector.getNameForSlot(0);
+ } else {
+ for (unsigned i = 0, e = Mess->getNumArgs(); i != e; ++i) {
+ if (i < selector.getNumArgs()) {
+ if (i > 0) OS << ' ';
+ if (selector.getIdentifierInfoForSlot(i))
+ OS << selector.getIdentifierInfoForSlot(i)->getName() << ':';
+ else
+ OS << ":";
+ }
+ else OS << ", "; // Handle variadic methods.
+
+ PrintExpr(Mess->getArg(i));
+ }
+ }
+ OS << "]";
+}
+
+void StmtPrinter::VisitObjCBoolLiteralExpr(ObjCBoolLiteralExpr *Node) {
+ OS << (Node->getValue() ? "__objc_yes" : "__objc_no");
+}
+
+void
+StmtPrinter::VisitObjCIndirectCopyRestoreExpr(ObjCIndirectCopyRestoreExpr *E) {
+ PrintExpr(E->getSubExpr());
+}
+
+void
+StmtPrinter::VisitObjCBridgedCastExpr(ObjCBridgedCastExpr *E) {
+ OS << '(' << E->getBridgeKindName();
+ E->getType().print(OS, Policy);
+ OS << ')';
+ PrintExpr(E->getSubExpr());
+}
+
+void StmtPrinter::VisitBlockExpr(BlockExpr *Node) {
+ BlockDecl *BD = Node->getBlockDecl();
+ OS << "^";
+
+ const FunctionType *AFT = Node->getFunctionType();
+
+ if (isa<FunctionNoProtoType>(AFT)) {
+ OS << "()";
+ } else if (!BD->param_empty() || cast<FunctionProtoType>(AFT)->isVariadic()) {
+ OS << '(';
+ for (BlockDecl::param_iterator AI = BD->param_begin(),
+ E = BD->param_end(); AI != E; ++AI) {
+ if (AI != BD->param_begin()) OS << ", ";
+ std::string ParamStr = (*AI)->getNameAsString();
+ (*AI)->getType().print(OS, Policy, ParamStr);
+ }
+
+ const FunctionProtoType *FT = cast<FunctionProtoType>(AFT);
+ if (FT->isVariadic()) {
+ if (!BD->param_empty()) OS << ", ";
+ OS << "...";
+ }
+ OS << ')';
+ }
+ OS << "{ }";
+}
+
+void StmtPrinter::VisitOpaqueValueExpr(OpaqueValueExpr *Node) {
+ PrintExpr(Node->getSourceExpr());
+}
+
+void StmtPrinter::VisitTypoExpr(TypoExpr *Node) {
+ // TODO: Print something reasonable for a TypoExpr, if necessary.
+ assert(false && "Cannot print TypoExpr nodes");
+}
+
+void StmtPrinter::VisitAsTypeExpr(AsTypeExpr *Node) {
+ OS << "__builtin_astype(";
+ PrintExpr(Node->getSrcExpr());
+ OS << ", ";
+ Node->getType().print(OS, Policy);
+ OS << ")";
+}
+
+//===----------------------------------------------------------------------===//
+// Stmt method implementations
+//===----------------------------------------------------------------------===//
+
+void Stmt::dumpPretty(const ASTContext &Context) const {
+ printPretty(llvm::errs(), nullptr, PrintingPolicy(Context.getLangOpts()));
+}
+
+void Stmt::printPretty(raw_ostream &OS,
+ PrinterHelper *Helper,
+ const PrintingPolicy &Policy,
+ unsigned Indentation) const {
+ StmtPrinter P(OS, Helper, Policy, Indentation);
+ P.Visit(const_cast<Stmt*>(this));
+}
+
+//===----------------------------------------------------------------------===//
+// PrinterHelper
+//===----------------------------------------------------------------------===//
+
+// Implement virtual destructor.
+PrinterHelper::~PrinterHelper() {}
diff --git a/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp b/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp
new file mode 100644
index 0000000..175a43a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp
@@ -0,0 +1,1636 @@
+//===---- StmtProfile.cpp - Profile implementation for Stmt ASTs ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Stmt::Profile method, which builds a unique bit
+// representation that identifies a statement/expression.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ExprOpenMP.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/ADT/FoldingSet.h"
+using namespace clang;
+
+namespace {
+ class StmtProfiler : public ConstStmtVisitor<StmtProfiler> {
+ llvm::FoldingSetNodeID &ID;
+ const ASTContext &Context;
+ bool Canonical;
+
+ public:
+ StmtProfiler(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
+ bool Canonical)
+ : ID(ID), Context(Context), Canonical(Canonical) { }
+
+ void VisitStmt(const Stmt *S);
+
+#define STMT(Node, Base) void Visit##Node(const Node *S);
+#include "clang/AST/StmtNodes.inc"
+
+ /// \brief Visit a declaration that is referenced within an expression
+ /// or statement.
+ void VisitDecl(const Decl *D);
+
+ /// \brief Visit a type that is referenced within an expression or
+ /// statement.
+ void VisitType(QualType T);
+
+ /// \brief Visit a name that occurs within an expression or statement.
+ void VisitName(DeclarationName Name);
+
+ /// \brief Visit a nested-name-specifier that occurs within an expression
+ /// or statement.
+ void VisitNestedNameSpecifier(NestedNameSpecifier *NNS);
+
+ /// \brief Visit a template name that occurs within an expression or
+ /// statement.
+ void VisitTemplateName(TemplateName Name);
+
+ /// \brief Visit template arguments that occur within an expression or
+ /// statement.
+ void VisitTemplateArguments(const TemplateArgumentLoc *Args,
+ unsigned NumArgs);
+
+ /// \brief Visit a single template argument.
+ void VisitTemplateArgument(const TemplateArgument &Arg);
+ };
+}
+
+void StmtProfiler::VisitStmt(const Stmt *S) {
+ ID.AddInteger(S->getStmtClass());
+ for (const Stmt *SubStmt : S->children()) {
+ if (SubStmt)
+ Visit(SubStmt);
+ else
+ ID.AddInteger(0);
+ }
+}
+
+void StmtProfiler::VisitDeclStmt(const DeclStmt *S) {
+ VisitStmt(S);
+ for (const auto *D : S->decls())
+ VisitDecl(D);
+}
+
+void StmtProfiler::VisitNullStmt(const NullStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitCompoundStmt(const CompoundStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitSwitchCase(const SwitchCase *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitCaseStmt(const CaseStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitDefaultStmt(const DefaultStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitLabelStmt(const LabelStmt *S) {
+ VisitStmt(S);
+ VisitDecl(S->getDecl());
+}
+
+void StmtProfiler::VisitAttributedStmt(const AttributedStmt *S) {
+ VisitStmt(S);
+ // TODO: maybe visit attributes?
+}
+
+void StmtProfiler::VisitIfStmt(const IfStmt *S) {
+ VisitStmt(S);
+ VisitDecl(S->getConditionVariable());
+}
+
+void StmtProfiler::VisitSwitchStmt(const SwitchStmt *S) {
+ VisitStmt(S);
+ VisitDecl(S->getConditionVariable());
+}
+
+void StmtProfiler::VisitWhileStmt(const WhileStmt *S) {
+ VisitStmt(S);
+ VisitDecl(S->getConditionVariable());
+}
+
+void StmtProfiler::VisitDoStmt(const DoStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitForStmt(const ForStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitGotoStmt(const GotoStmt *S) {
+ VisitStmt(S);
+ VisitDecl(S->getLabel());
+}
+
+void StmtProfiler::VisitIndirectGotoStmt(const IndirectGotoStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitContinueStmt(const ContinueStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitBreakStmt(const BreakStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitReturnStmt(const ReturnStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitGCCAsmStmt(const GCCAsmStmt *S) {
+ VisitStmt(S);
+ ID.AddBoolean(S->isVolatile());
+ ID.AddBoolean(S->isSimple());
+ VisitStringLiteral(S->getAsmString());
+ ID.AddInteger(S->getNumOutputs());
+ for (unsigned I = 0, N = S->getNumOutputs(); I != N; ++I) {
+ ID.AddString(S->getOutputName(I));
+ VisitStringLiteral(S->getOutputConstraintLiteral(I));
+ }
+ ID.AddInteger(S->getNumInputs());
+ for (unsigned I = 0, N = S->getNumInputs(); I != N; ++I) {
+ ID.AddString(S->getInputName(I));
+ VisitStringLiteral(S->getInputConstraintLiteral(I));
+ }
+ ID.AddInteger(S->getNumClobbers());
+ for (unsigned I = 0, N = S->getNumClobbers(); I != N; ++I)
+ VisitStringLiteral(S->getClobberStringLiteral(I));
+}
+
+void StmtProfiler::VisitMSAsmStmt(const MSAsmStmt *S) {
+ // FIXME: Implement MS style inline asm statement profiler.
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitCXXCatchStmt(const CXXCatchStmt *S) {
+ VisitStmt(S);
+ VisitType(S->getCaughtType());
+}
+
+void StmtProfiler::VisitCXXTryStmt(const CXXTryStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitCXXForRangeStmt(const CXXForRangeStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitMSDependentExistsStmt(const MSDependentExistsStmt *S) {
+ VisitStmt(S);
+ ID.AddBoolean(S->isIfExists());
+ VisitNestedNameSpecifier(S->getQualifierLoc().getNestedNameSpecifier());
+ VisitName(S->getNameInfo().getName());
+}
+
+void StmtProfiler::VisitSEHTryStmt(const SEHTryStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitSEHFinallyStmt(const SEHFinallyStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitSEHExceptStmt(const SEHExceptStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitSEHLeaveStmt(const SEHLeaveStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitCapturedStmt(const CapturedStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitObjCAtCatchStmt(const ObjCAtCatchStmt *S) {
+ VisitStmt(S);
+ ID.AddBoolean(S->hasEllipsis());
+ if (S->getCatchParamDecl())
+ VisitType(S->getCatchParamDecl()->getType());
+}
+
+void StmtProfiler::VisitObjCAtFinallyStmt(const ObjCAtFinallyStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitObjCAtTryStmt(const ObjCAtTryStmt *S) {
+ VisitStmt(S);
+}
+
+void
+StmtProfiler::VisitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitObjCAtThrowStmt(const ObjCAtThrowStmt *S) {
+ VisitStmt(S);
+}
+
+void
+StmtProfiler::VisitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt *S) {
+ VisitStmt(S);
+}
+
+namespace {
+class OMPClauseProfiler : public ConstOMPClauseVisitor<OMPClauseProfiler> {
+ StmtProfiler *Profiler;
+ /// \brief Process clauses with list of variables.
+ template <typename T>
+ void VisitOMPClauseList(T *Node);
+
+public:
+ OMPClauseProfiler(StmtProfiler *P) : Profiler(P) { }
+#define OPENMP_CLAUSE(Name, Class) \
+ void Visit##Class(const Class *C);
+#include "clang/Basic/OpenMPKinds.def"
+};
+
+void OMPClauseProfiler::VisitOMPIfClause(const OMPIfClause *C) {
+ if (C->getCondition())
+ Profiler->VisitStmt(C->getCondition());
+}
+
+void OMPClauseProfiler::VisitOMPFinalClause(const OMPFinalClause *C) {
+ if (C->getCondition())
+ Profiler->VisitStmt(C->getCondition());
+}
+
+void OMPClauseProfiler::VisitOMPNumThreadsClause(const OMPNumThreadsClause *C) {
+ if (C->getNumThreads())
+ Profiler->VisitStmt(C->getNumThreads());
+}
+
+void OMPClauseProfiler::VisitOMPSafelenClause(const OMPSafelenClause *C) {
+ if (C->getSafelen())
+ Profiler->VisitStmt(C->getSafelen());
+}
+
+void OMPClauseProfiler::VisitOMPSimdlenClause(const OMPSimdlenClause *C) {
+ if (C->getSimdlen())
+ Profiler->VisitStmt(C->getSimdlen());
+}
+
+void OMPClauseProfiler::VisitOMPCollapseClause(const OMPCollapseClause *C) {
+ if (C->getNumForLoops())
+ Profiler->VisitStmt(C->getNumForLoops());
+}
+
+void OMPClauseProfiler::VisitOMPDefaultClause(const OMPDefaultClause *C) { }
+
+void OMPClauseProfiler::VisitOMPProcBindClause(const OMPProcBindClause *C) { }
+
+void OMPClauseProfiler::VisitOMPScheduleClause(const OMPScheduleClause *C) {
+ if (C->getChunkSize()) {
+ Profiler->VisitStmt(C->getChunkSize());
+ if (C->getHelperChunkSize()) {
+ Profiler->VisitStmt(C->getChunkSize());
+ }
+ }
+}
+
+void OMPClauseProfiler::VisitOMPOrderedClause(const OMPOrderedClause *C) {
+ if (auto *Num = C->getNumForLoops())
+ Profiler->VisitStmt(Num);
+}
+
+void OMPClauseProfiler::VisitOMPNowaitClause(const OMPNowaitClause *) {}
+
+void OMPClauseProfiler::VisitOMPUntiedClause(const OMPUntiedClause *) {}
+
+void OMPClauseProfiler::VisitOMPMergeableClause(const OMPMergeableClause *) {}
+
+void OMPClauseProfiler::VisitOMPReadClause(const OMPReadClause *) {}
+
+void OMPClauseProfiler::VisitOMPWriteClause(const OMPWriteClause *) {}
+
+void OMPClauseProfiler::VisitOMPUpdateClause(const OMPUpdateClause *) {}
+
+void OMPClauseProfiler::VisitOMPCaptureClause(const OMPCaptureClause *) {}
+
+void OMPClauseProfiler::VisitOMPSeqCstClause(const OMPSeqCstClause *) {}
+
+void OMPClauseProfiler::VisitOMPThreadsClause(const OMPThreadsClause *) {}
+
+void OMPClauseProfiler::VisitOMPSIMDClause(const OMPSIMDClause *) {}
+
+void OMPClauseProfiler::VisitOMPNogroupClause(const OMPNogroupClause *) {}
+
+template<typename T>
+void OMPClauseProfiler::VisitOMPClauseList(T *Node) {
+ for (auto *E : Node->varlists()) {
+ Profiler->VisitStmt(E);
+ }
+}
+
+void OMPClauseProfiler::VisitOMPPrivateClause(const OMPPrivateClause *C) {
+ VisitOMPClauseList(C);
+ for (auto *E : C->private_copies()) {
+ Profiler->VisitStmt(E);
+ }
+}
+void
+OMPClauseProfiler::VisitOMPFirstprivateClause(const OMPFirstprivateClause *C) {
+ VisitOMPClauseList(C);
+ for (auto *E : C->private_copies()) {
+ Profiler->VisitStmt(E);
+ }
+ for (auto *E : C->inits()) {
+ Profiler->VisitStmt(E);
+ }
+}
+void
+OMPClauseProfiler::VisitOMPLastprivateClause(const OMPLastprivateClause *C) {
+ VisitOMPClauseList(C);
+ for (auto *E : C->source_exprs()) {
+ Profiler->VisitStmt(E);
+ }
+ for (auto *E : C->destination_exprs()) {
+ Profiler->VisitStmt(E);
+ }
+ for (auto *E : C->assignment_ops()) {
+ Profiler->VisitStmt(E);
+ }
+}
+void OMPClauseProfiler::VisitOMPSharedClause(const OMPSharedClause *C) {
+ VisitOMPClauseList(C);
+}
+void OMPClauseProfiler::VisitOMPReductionClause(
+ const OMPReductionClause *C) {
+ Profiler->VisitNestedNameSpecifier(
+ C->getQualifierLoc().getNestedNameSpecifier());
+ Profiler->VisitName(C->getNameInfo().getName());
+ VisitOMPClauseList(C);
+ for (auto *E : C->privates()) {
+ Profiler->VisitStmt(E);
+ }
+ for (auto *E : C->lhs_exprs()) {
+ Profiler->VisitStmt(E);
+ }
+ for (auto *E : C->rhs_exprs()) {
+ Profiler->VisitStmt(E);
+ }
+ for (auto *E : C->reduction_ops()) {
+ Profiler->VisitStmt(E);
+ }
+}
+void OMPClauseProfiler::VisitOMPLinearClause(const OMPLinearClause *C) {
+ VisitOMPClauseList(C);
+ for (auto *E : C->privates()) {
+ Profiler->VisitStmt(E);
+ }
+ for (auto *E : C->inits()) {
+ Profiler->VisitStmt(E);
+ }
+ for (auto *E : C->updates()) {
+ Profiler->VisitStmt(E);
+ }
+ for (auto *E : C->finals()) {
+ Profiler->VisitStmt(E);
+ }
+ Profiler->VisitStmt(C->getStep());
+ Profiler->VisitStmt(C->getCalcStep());
+}
+void OMPClauseProfiler::VisitOMPAlignedClause(const OMPAlignedClause *C) {
+ VisitOMPClauseList(C);
+ Profiler->VisitStmt(C->getAlignment());
+}
+void OMPClauseProfiler::VisitOMPCopyinClause(const OMPCopyinClause *C) {
+ VisitOMPClauseList(C);
+ for (auto *E : C->source_exprs()) {
+ Profiler->VisitStmt(E);
+ }
+ for (auto *E : C->destination_exprs()) {
+ Profiler->VisitStmt(E);
+ }
+ for (auto *E : C->assignment_ops()) {
+ Profiler->VisitStmt(E);
+ }
+}
+void
+OMPClauseProfiler::VisitOMPCopyprivateClause(const OMPCopyprivateClause *C) {
+ VisitOMPClauseList(C);
+ for (auto *E : C->source_exprs()) {
+ Profiler->VisitStmt(E);
+ }
+ for (auto *E : C->destination_exprs()) {
+ Profiler->VisitStmt(E);
+ }
+ for (auto *E : C->assignment_ops()) {
+ Profiler->VisitStmt(E);
+ }
+}
+void OMPClauseProfiler::VisitOMPFlushClause(const OMPFlushClause *C) {
+ VisitOMPClauseList(C);
+}
+void OMPClauseProfiler::VisitOMPDependClause(const OMPDependClause *C) {
+ VisitOMPClauseList(C);
+}
+void OMPClauseProfiler::VisitOMPDeviceClause(const OMPDeviceClause *C) {
+ Profiler->VisitStmt(C->getDevice());
+}
+void OMPClauseProfiler::VisitOMPMapClause(const OMPMapClause *C) {
+ VisitOMPClauseList(C);
+}
+void OMPClauseProfiler::VisitOMPNumTeamsClause(const OMPNumTeamsClause *C) {
+ Profiler->VisitStmt(C->getNumTeams());
+}
+void OMPClauseProfiler::VisitOMPThreadLimitClause(
+ const OMPThreadLimitClause *C) {
+ Profiler->VisitStmt(C->getThreadLimit());
+}
+void OMPClauseProfiler::VisitOMPPriorityClause(const OMPPriorityClause *C) {
+ Profiler->VisitStmt(C->getPriority());
+}
+void OMPClauseProfiler::VisitOMPGrainsizeClause(const OMPGrainsizeClause *C) {
+ Profiler->VisitStmt(C->getGrainsize());
+}
+void OMPClauseProfiler::VisitOMPNumTasksClause(const OMPNumTasksClause *C) {
+ Profiler->VisitStmt(C->getNumTasks());
+}
+void OMPClauseProfiler::VisitOMPHintClause(const OMPHintClause *C) {
+ Profiler->VisitStmt(C->getHint());
+}
+}
+
+void
+StmtProfiler::VisitOMPExecutableDirective(const OMPExecutableDirective *S) {
+ VisitStmt(S);
+ OMPClauseProfiler P(this);
+ ArrayRef<OMPClause *> Clauses = S->clauses();
+ for (ArrayRef<OMPClause *>::iterator I = Clauses.begin(), E = Clauses.end();
+ I != E; ++I)
+ if (*I)
+ P.Visit(*I);
+}
+
+void StmtProfiler::VisitOMPLoopDirective(const OMPLoopDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPParallelDirective(const OMPParallelDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPSimdDirective(const OMPSimdDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
+void StmtProfiler::VisitOMPForDirective(const OMPForDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
+void StmtProfiler::VisitOMPForSimdDirective(const OMPForSimdDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
+void StmtProfiler::VisitOMPSectionsDirective(const OMPSectionsDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPSectionDirective(const OMPSectionDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPSingleDirective(const OMPSingleDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPMasterDirective(const OMPMasterDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPCriticalDirective(const OMPCriticalDirective *S) {
+ VisitOMPExecutableDirective(S);
+ VisitName(S->getDirectiveName().getName());
+}
+
+void
+StmtProfiler::VisitOMPParallelForDirective(const OMPParallelForDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
+void StmtProfiler::VisitOMPParallelForSimdDirective(
+ const OMPParallelForSimdDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
+void StmtProfiler::VisitOMPParallelSectionsDirective(
+ const OMPParallelSectionsDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPTaskDirective(const OMPTaskDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPTaskyieldDirective(const OMPTaskyieldDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPBarrierDirective(const OMPBarrierDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPTaskwaitDirective(const OMPTaskwaitDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPTaskgroupDirective(const OMPTaskgroupDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPFlushDirective(const OMPFlushDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPOrderedDirective(const OMPOrderedDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPAtomicDirective(const OMPAtomicDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPTargetDirective(const OMPTargetDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPTargetDataDirective(const OMPTargetDataDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPTeamsDirective(const OMPTeamsDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPCancellationPointDirective(
+ const OMPCancellationPointDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPCancelDirective(const OMPCancelDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPTaskLoopDirective(const OMPTaskLoopDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
+void StmtProfiler::VisitOMPTaskLoopSimdDirective(
+ const OMPTaskLoopSimdDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
+void StmtProfiler::VisitOMPDistributeDirective(
+ const OMPDistributeDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
+void StmtProfiler::VisitExpr(const Expr *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitDeclRefExpr(const DeclRefExpr *S) {
+ VisitExpr(S);
+ if (!Canonical)
+ VisitNestedNameSpecifier(S->getQualifier());
+ VisitDecl(S->getDecl());
+ if (!Canonical)
+ VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
+}
+
+void StmtProfiler::VisitPredefinedExpr(const PredefinedExpr *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getIdentType());
+}
+
+void StmtProfiler::VisitIntegerLiteral(const IntegerLiteral *S) {
+ VisitExpr(S);
+ S->getValue().Profile(ID);
+ ID.AddInteger(S->getType()->castAs<BuiltinType>()->getKind());
+}
+
+void StmtProfiler::VisitCharacterLiteral(const CharacterLiteral *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getKind());
+ ID.AddInteger(S->getValue());
+}
+
+void StmtProfiler::VisitFloatingLiteral(const FloatingLiteral *S) {
+ VisitExpr(S);
+ S->getValue().Profile(ID);
+ ID.AddBoolean(S->isExact());
+ ID.AddInteger(S->getType()->castAs<BuiltinType>()->getKind());
+}
+
+void StmtProfiler::VisitImaginaryLiteral(const ImaginaryLiteral *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitStringLiteral(const StringLiteral *S) {
+ VisitExpr(S);
+ ID.AddString(S->getBytes());
+ ID.AddInteger(S->getKind());
+}
+
+void StmtProfiler::VisitParenExpr(const ParenExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitParenListExpr(const ParenListExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitUnaryOperator(const UnaryOperator *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getOpcode());
+}
+
+void StmtProfiler::VisitOffsetOfExpr(const OffsetOfExpr *S) {
+ VisitType(S->getTypeSourceInfo()->getType());
+ unsigned n = S->getNumComponents();
+ for (unsigned i = 0; i < n; ++i) {
+ const OffsetOfNode &ON = S->getComponent(i);
+ ID.AddInteger(ON.getKind());
+ switch (ON.getKind()) {
+ case OffsetOfNode::Array:
+ // Expressions handled below.
+ break;
+
+ case OffsetOfNode::Field:
+ VisitDecl(ON.getField());
+ break;
+
+ case OffsetOfNode::Identifier:
+ ID.AddPointer(ON.getFieldName());
+ break;
+
+ case OffsetOfNode::Base:
+ // These nodes are implicit, and therefore don't need profiling.
+ break;
+ }
+ }
+
+ VisitExpr(S);
+}
+
+void
+StmtProfiler::VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getKind());
+ if (S->isArgumentType())
+ VisitType(S->getArgumentType());
+}
+
+void StmtProfiler::VisitArraySubscriptExpr(const ArraySubscriptExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitOMPArraySectionExpr(const OMPArraySectionExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitCallExpr(const CallExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitMemberExpr(const MemberExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getMemberDecl());
+ if (!Canonical)
+ VisitNestedNameSpecifier(S->getQualifier());
+ ID.AddBoolean(S->isArrow());
+}
+
+void StmtProfiler::VisitCompoundLiteralExpr(const CompoundLiteralExpr *S) {
+ VisitExpr(S);
+ ID.AddBoolean(S->isFileScope());
+}
+
+void StmtProfiler::VisitCastExpr(const CastExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitImplicitCastExpr(const ImplicitCastExpr *S) {
+ VisitCastExpr(S);
+ ID.AddInteger(S->getValueKind());
+}
+
+void StmtProfiler::VisitExplicitCastExpr(const ExplicitCastExpr *S) {
+ VisitCastExpr(S);
+ VisitType(S->getTypeAsWritten());
+}
+
+void StmtProfiler::VisitCStyleCastExpr(const CStyleCastExpr *S) {
+ VisitExplicitCastExpr(S);
+}
+
+void StmtProfiler::VisitBinaryOperator(const BinaryOperator *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getOpcode());
+}
+
+void
+StmtProfiler::VisitCompoundAssignOperator(const CompoundAssignOperator *S) {
+ VisitBinaryOperator(S);
+}
+
+void StmtProfiler::VisitConditionalOperator(const ConditionalOperator *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitBinaryConditionalOperator(
+ const BinaryConditionalOperator *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitAddrLabelExpr(const AddrLabelExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getLabel());
+}
+
+void StmtProfiler::VisitStmtExpr(const StmtExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitShuffleVectorExpr(const ShuffleVectorExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitConvertVectorExpr(const ConvertVectorExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitChooseExpr(const ChooseExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitGNUNullExpr(const GNUNullExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitVAArgExpr(const VAArgExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitInitListExpr(const InitListExpr *S) {
+ if (S->getSyntacticForm()) {
+ VisitInitListExpr(S->getSyntacticForm());
+ return;
+ }
+
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitDesignatedInitExpr(const DesignatedInitExpr *S) {
+ VisitExpr(S);
+ ID.AddBoolean(S->usesGNUSyntax());
+ for (DesignatedInitExpr::const_designators_iterator D =
+ S->designators_begin(), DEnd = S->designators_end();
+ D != DEnd; ++D) {
+ if (D->isFieldDesignator()) {
+ ID.AddInteger(0);
+ VisitName(D->getFieldName());
+ continue;
+ }
+
+ if (D->isArrayDesignator()) {
+ ID.AddInteger(1);
+ } else {
+ assert(D->isArrayRangeDesignator());
+ ID.AddInteger(2);
+ }
+ ID.AddInteger(D->getFirstExprIndex());
+ }
+}
+
+// Seems that if VisitInitListExpr() only works on the syntactic form of an
+// InitListExpr, then a DesignatedInitUpdateExpr is not encountered.
+void StmtProfiler::VisitDesignatedInitUpdateExpr(
+ const DesignatedInitUpdateExpr *S) {
+ llvm_unreachable("Unexpected DesignatedInitUpdateExpr in syntactic form of "
+ "initializer");
+}
+
+void StmtProfiler::VisitNoInitExpr(const NoInitExpr *S) {
+ llvm_unreachable("Unexpected NoInitExpr in syntactic form of initializer");
+}
+
+void StmtProfiler::VisitImplicitValueInitExpr(const ImplicitValueInitExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitExtVectorElementExpr(const ExtVectorElementExpr *S) {
+ VisitExpr(S);
+ VisitName(&S->getAccessor());
+}
+
+void StmtProfiler::VisitBlockExpr(const BlockExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getBlockDecl());
+}
+
+void StmtProfiler::VisitGenericSelectionExpr(const GenericSelectionExpr *S) {
+ VisitExpr(S);
+ for (unsigned i = 0; i != S->getNumAssocs(); ++i) {
+ QualType T = S->getAssocType(i);
+ if (T.isNull())
+ ID.AddPointer(nullptr);
+ else
+ VisitType(T);
+ VisitExpr(S->getAssocExpr(i));
+ }
+}
+
+void StmtProfiler::VisitPseudoObjectExpr(const PseudoObjectExpr *S) {
+ VisitExpr(S);
+ for (PseudoObjectExpr::const_semantics_iterator
+ i = S->semantics_begin(), e = S->semantics_end(); i != e; ++i)
+ // Normally, we would not profile the source expressions of OVEs.
+ if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(*i))
+ Visit(OVE->getSourceExpr());
+}
+
+void StmtProfiler::VisitAtomicExpr(const AtomicExpr *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getOp());
+}
+
+static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S,
+ UnaryOperatorKind &UnaryOp,
+ BinaryOperatorKind &BinaryOp) {
+ switch (S->getOperator()) {
+ case OO_None:
+ case OO_New:
+ case OO_Delete:
+ case OO_Array_New:
+ case OO_Array_Delete:
+ case OO_Arrow:
+ case OO_Call:
+ case OO_Conditional:
+ case OO_Coawait:
+ case NUM_OVERLOADED_OPERATORS:
+ llvm_unreachable("Invalid operator call kind");
+
+ case OO_Plus:
+ if (S->getNumArgs() == 1) {
+ UnaryOp = UO_Plus;
+ return Stmt::UnaryOperatorClass;
+ }
+
+ BinaryOp = BO_Add;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Minus:
+ if (S->getNumArgs() == 1) {
+ UnaryOp = UO_Minus;
+ return Stmt::UnaryOperatorClass;
+ }
+
+ BinaryOp = BO_Sub;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Star:
+ if (S->getNumArgs() == 1) {
+ UnaryOp = UO_Deref;
+ return Stmt::UnaryOperatorClass;
+ }
+
+ BinaryOp = BO_Mul;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Slash:
+ BinaryOp = BO_Div;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Percent:
+ BinaryOp = BO_Rem;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Caret:
+ BinaryOp = BO_Xor;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Amp:
+ if (S->getNumArgs() == 1) {
+ UnaryOp = UO_AddrOf;
+ return Stmt::UnaryOperatorClass;
+ }
+
+ BinaryOp = BO_And;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Pipe:
+ BinaryOp = BO_Or;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Tilde:
+ UnaryOp = UO_Not;
+ return Stmt::UnaryOperatorClass;
+
+ case OO_Exclaim:
+ UnaryOp = UO_LNot;
+ return Stmt::UnaryOperatorClass;
+
+ case OO_Equal:
+ BinaryOp = BO_Assign;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Less:
+ BinaryOp = BO_LT;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Greater:
+ BinaryOp = BO_GT;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_PlusEqual:
+ BinaryOp = BO_AddAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_MinusEqual:
+ BinaryOp = BO_SubAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_StarEqual:
+ BinaryOp = BO_MulAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_SlashEqual:
+ BinaryOp = BO_DivAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_PercentEqual:
+ BinaryOp = BO_RemAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_CaretEqual:
+ BinaryOp = BO_XorAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_AmpEqual:
+ BinaryOp = BO_AndAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_PipeEqual:
+ BinaryOp = BO_OrAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_LessLess:
+ BinaryOp = BO_Shl;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_GreaterGreater:
+ BinaryOp = BO_Shr;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_LessLessEqual:
+ BinaryOp = BO_ShlAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_GreaterGreaterEqual:
+ BinaryOp = BO_ShrAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_EqualEqual:
+ BinaryOp = BO_EQ;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_ExclaimEqual:
+ BinaryOp = BO_NE;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_LessEqual:
+ BinaryOp = BO_LE;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_GreaterEqual:
+ BinaryOp = BO_GE;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_AmpAmp:
+ BinaryOp = BO_LAnd;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_PipePipe:
+ BinaryOp = BO_LOr;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_PlusPlus:
+ UnaryOp = S->getNumArgs() == 1? UO_PreInc
+ : UO_PostInc;
+ return Stmt::UnaryOperatorClass;
+
+ case OO_MinusMinus:
+ UnaryOp = S->getNumArgs() == 1? UO_PreDec
+ : UO_PostDec;
+ return Stmt::UnaryOperatorClass;
+
+ case OO_Comma:
+ BinaryOp = BO_Comma;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_ArrowStar:
+ BinaryOp = BO_PtrMemI;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Subscript:
+ return Stmt::ArraySubscriptExprClass;
+ }
+
+ llvm_unreachable("Invalid overloaded operator expression");
+}
+
+void StmtProfiler::VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *S) {
+ if (S->isTypeDependent()) {
+ // Type-dependent operator calls are profiled like their underlying
+ // syntactic operator.
+ UnaryOperatorKind UnaryOp = UO_Extension;
+ BinaryOperatorKind BinaryOp = BO_Comma;
+ Stmt::StmtClass SC = DecodeOperatorCall(S, UnaryOp, BinaryOp);
+
+ ID.AddInteger(SC);
+ for (unsigned I = 0, N = S->getNumArgs(); I != N; ++I)
+ Visit(S->getArg(I));
+ if (SC == Stmt::UnaryOperatorClass)
+ ID.AddInteger(UnaryOp);
+ else if (SC == Stmt::BinaryOperatorClass ||
+ SC == Stmt::CompoundAssignOperatorClass)
+ ID.AddInteger(BinaryOp);
+ else
+ assert(SC == Stmt::ArraySubscriptExprClass);
+
+ return;
+ }
+
+ VisitCallExpr(S);
+ ID.AddInteger(S->getOperator());
+}
+
+void StmtProfiler::VisitCXXMemberCallExpr(const CXXMemberCallExpr *S) {
+ VisitCallExpr(S);
+}
+
+void StmtProfiler::VisitCUDAKernelCallExpr(const CUDAKernelCallExpr *S) {
+ VisitCallExpr(S);
+}
+
+void StmtProfiler::VisitAsTypeExpr(const AsTypeExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitCXXNamedCastExpr(const CXXNamedCastExpr *S) {
+ VisitExplicitCastExpr(S);
+}
+
+void StmtProfiler::VisitCXXStaticCastExpr(const CXXStaticCastExpr *S) {
+ VisitCXXNamedCastExpr(S);
+}
+
+void StmtProfiler::VisitCXXDynamicCastExpr(const CXXDynamicCastExpr *S) {
+ VisitCXXNamedCastExpr(S);
+}
+
+void
+StmtProfiler::VisitCXXReinterpretCastExpr(const CXXReinterpretCastExpr *S) {
+ VisitCXXNamedCastExpr(S);
+}
+
+void StmtProfiler::VisitCXXConstCastExpr(const CXXConstCastExpr *S) {
+ VisitCXXNamedCastExpr(S);
+}
+
+void StmtProfiler::VisitUserDefinedLiteral(const UserDefinedLiteral *S) {
+ VisitCallExpr(S);
+}
+
+void StmtProfiler::VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *S) {
+ VisitExpr(S);
+ ID.AddBoolean(S->getValue());
+}
+
+void StmtProfiler::VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitCXXStdInitializerListExpr(
+ const CXXStdInitializerListExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitCXXTypeidExpr(const CXXTypeidExpr *S) {
+ VisitExpr(S);
+ if (S->isTypeOperand())
+ VisitType(S->getTypeOperandSourceInfo()->getType());
+}
+
+void StmtProfiler::VisitCXXUuidofExpr(const CXXUuidofExpr *S) {
+ VisitExpr(S);
+ if (S->isTypeOperand())
+ VisitType(S->getTypeOperandSourceInfo()->getType());
+}
+
+void StmtProfiler::VisitMSPropertyRefExpr(const MSPropertyRefExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getPropertyDecl());
+}
+
+void StmtProfiler::VisitMSPropertySubscriptExpr(
+ const MSPropertySubscriptExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitCXXThisExpr(const CXXThisExpr *S) {
+ VisitExpr(S);
+ ID.AddBoolean(S->isImplicit());
+}
+
+void StmtProfiler::VisitCXXThrowExpr(const CXXThrowExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getParam());
+}
+
+void StmtProfiler::VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getField());
+}
+
+void StmtProfiler::VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *S) {
+ VisitExpr(S);
+ VisitDecl(
+ const_cast<CXXDestructorDecl *>(S->getTemporary()->getDestructor()));
+}
+
+void StmtProfiler::VisitCXXConstructExpr(const CXXConstructExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getConstructor());
+ ID.AddBoolean(S->isElidable());
+}
+
+void StmtProfiler::VisitCXXFunctionalCastExpr(const CXXFunctionalCastExpr *S) {
+ VisitExplicitCastExpr(S);
+}
+
+void
+StmtProfiler::VisitCXXTemporaryObjectExpr(const CXXTemporaryObjectExpr *S) {
+ VisitCXXConstructExpr(S);
+}
+
+void
+StmtProfiler::VisitLambdaExpr(const LambdaExpr *S) {
+ VisitExpr(S);
+ for (LambdaExpr::capture_iterator C = S->explicit_capture_begin(),
+ CEnd = S->explicit_capture_end();
+ C != CEnd; ++C) {
+ ID.AddInteger(C->getCaptureKind());
+ switch (C->getCaptureKind()) {
+ case LCK_This:
+ break;
+ case LCK_ByRef:
+ case LCK_ByCopy:
+ VisitDecl(C->getCapturedVar());
+ ID.AddBoolean(C->isPackExpansion());
+ break;
+ case LCK_VLAType:
+ llvm_unreachable("VLA type in explicit captures.");
+ }
+ }
+ // Note: If we actually needed to be able to match lambda
+ // expressions, we would have to consider parameters and return type
+ // here, among other things.
+ VisitStmt(S->getBody());
+}
+
+void
+StmtProfiler::VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitCXXDeleteExpr(const CXXDeleteExpr *S) {
+ VisitExpr(S);
+ ID.AddBoolean(S->isGlobalDelete());
+ ID.AddBoolean(S->isArrayForm());
+ VisitDecl(S->getOperatorDelete());
+}
+
+void StmtProfiler::VisitCXXNewExpr(const CXXNewExpr *S) {
+ VisitExpr(S);
+ VisitType(S->getAllocatedType());
+ VisitDecl(S->getOperatorNew());
+ VisitDecl(S->getOperatorDelete());
+ ID.AddBoolean(S->isArray());
+ ID.AddInteger(S->getNumPlacementArgs());
+ ID.AddBoolean(S->isGlobalNew());
+ ID.AddBoolean(S->isParenTypeId());
+ ID.AddInteger(S->getInitializationStyle());
+}
+
+void
+StmtProfiler::VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *S) {
+ VisitExpr(S);
+ ID.AddBoolean(S->isArrow());
+ VisitNestedNameSpecifier(S->getQualifier());
+ ID.AddBoolean(S->getScopeTypeInfo() != nullptr);
+ if (S->getScopeTypeInfo())
+ VisitType(S->getScopeTypeInfo()->getType());
+ ID.AddBoolean(S->getDestroyedTypeInfo() != nullptr);
+ if (S->getDestroyedTypeInfo())
+ VisitType(S->getDestroyedType());
+ else
+ ID.AddPointer(S->getDestroyedTypeIdentifier());
+}
+
+void StmtProfiler::VisitOverloadExpr(const OverloadExpr *S) {
+ VisitExpr(S);
+ VisitNestedNameSpecifier(S->getQualifier());
+ VisitName(S->getName());
+ ID.AddBoolean(S->hasExplicitTemplateArgs());
+ if (S->hasExplicitTemplateArgs())
+ VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
+}
+
+void
+StmtProfiler::VisitUnresolvedLookupExpr(const UnresolvedLookupExpr *S) {
+ VisitOverloadExpr(S);
+}
+
+void StmtProfiler::VisitTypeTraitExpr(const TypeTraitExpr *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getTrait());
+ ID.AddInteger(S->getNumArgs());
+ for (unsigned I = 0, N = S->getNumArgs(); I != N; ++I)
+ VisitType(S->getArg(I)->getType());
+}
+
+void StmtProfiler::VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getTrait());
+ VisitType(S->getQueriedType());
+}
+
+void StmtProfiler::VisitExpressionTraitExpr(const ExpressionTraitExpr *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getTrait());
+ VisitExpr(S->getQueriedExpression());
+}
+
+void StmtProfiler::VisitDependentScopeDeclRefExpr(
+ const DependentScopeDeclRefExpr *S) {
+ VisitExpr(S);
+ VisitName(S->getDeclName());
+ VisitNestedNameSpecifier(S->getQualifier());
+ ID.AddBoolean(S->hasExplicitTemplateArgs());
+ if (S->hasExplicitTemplateArgs())
+ VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
+}
+
+void StmtProfiler::VisitExprWithCleanups(const ExprWithCleanups *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitCXXUnresolvedConstructExpr(
+ const CXXUnresolvedConstructExpr *S) {
+ VisitExpr(S);
+ VisitType(S->getTypeAsWritten());
+}
+
+void StmtProfiler::VisitCXXDependentScopeMemberExpr(
+ const CXXDependentScopeMemberExpr *S) {
+ ID.AddBoolean(S->isImplicitAccess());
+ if (!S->isImplicitAccess()) {
+ VisitExpr(S);
+ ID.AddBoolean(S->isArrow());
+ }
+ VisitNestedNameSpecifier(S->getQualifier());
+ VisitName(S->getMember());
+ ID.AddBoolean(S->hasExplicitTemplateArgs());
+ if (S->hasExplicitTemplateArgs())
+ VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
+}
+
+void StmtProfiler::VisitUnresolvedMemberExpr(const UnresolvedMemberExpr *S) {
+ ID.AddBoolean(S->isImplicitAccess());
+ if (!S->isImplicitAccess()) {
+ VisitExpr(S);
+ ID.AddBoolean(S->isArrow());
+ }
+ VisitNestedNameSpecifier(S->getQualifier());
+ VisitName(S->getMemberName());
+ ID.AddBoolean(S->hasExplicitTemplateArgs());
+ if (S->hasExplicitTemplateArgs())
+ VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
+}
+
+void StmtProfiler::VisitCXXNoexceptExpr(const CXXNoexceptExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitPackExpansionExpr(const PackExpansionExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitSizeOfPackExpr(const SizeOfPackExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getPack());
+ if (S->isPartiallySubstituted()) {
+ auto Args = S->getPartialArguments();
+ ID.AddInteger(Args.size());
+ for (const auto &TA : Args)
+ VisitTemplateArgument(TA);
+ } else {
+ ID.AddInteger(0);
+ }
+}
+
+void StmtProfiler::VisitSubstNonTypeTemplateParmPackExpr(
+ const SubstNonTypeTemplateParmPackExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getParameterPack());
+ VisitTemplateArgument(S->getArgumentPack());
+}
+
+void StmtProfiler::VisitSubstNonTypeTemplateParmExpr(
+ const SubstNonTypeTemplateParmExpr *E) {
+ // Profile exactly as the replacement expression.
+ Visit(E->getReplacement());
+}
+
+void StmtProfiler::VisitFunctionParmPackExpr(const FunctionParmPackExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getParameterPack());
+ ID.AddInteger(S->getNumExpansions());
+ for (FunctionParmPackExpr::iterator I = S->begin(), E = S->end(); I != E; ++I)
+ VisitDecl(*I);
+}
+
+void StmtProfiler::VisitMaterializeTemporaryExpr(
+ const MaterializeTemporaryExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitCXXFoldExpr(const CXXFoldExpr *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getOperator());
+}
+
+void StmtProfiler::VisitCoroutineBodyStmt(const CoroutineBodyStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitCoreturnStmt(const CoreturnStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitCoawaitExpr(const CoawaitExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitCoyieldExpr(const CoyieldExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitOpaqueValueExpr(const OpaqueValueExpr *E) {
+ VisitExpr(E);
+}
+
+void StmtProfiler::VisitTypoExpr(const TypoExpr *E) {
+ VisitExpr(E);
+}
+
+void StmtProfiler::VisitObjCStringLiteral(const ObjCStringLiteral *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitObjCBoxedExpr(const ObjCBoxedExpr *E) {
+ VisitExpr(E);
+}
+
+void StmtProfiler::VisitObjCArrayLiteral(const ObjCArrayLiteral *E) {
+ VisitExpr(E);
+}
+
+void StmtProfiler::VisitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E) {
+ VisitExpr(E);
+}
+
+void StmtProfiler::VisitObjCEncodeExpr(const ObjCEncodeExpr *S) {
+ VisitExpr(S);
+ VisitType(S->getEncodedType());
+}
+
+void StmtProfiler::VisitObjCSelectorExpr(const ObjCSelectorExpr *S) {
+ VisitExpr(S);
+ VisitName(S->getSelector());
+}
+
+void StmtProfiler::VisitObjCProtocolExpr(const ObjCProtocolExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getProtocol());
+}
+
+void StmtProfiler::VisitObjCIvarRefExpr(const ObjCIvarRefExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getDecl());
+ ID.AddBoolean(S->isArrow());
+ ID.AddBoolean(S->isFreeIvar());
+}
+
+void StmtProfiler::VisitObjCPropertyRefExpr(const ObjCPropertyRefExpr *S) {
+ VisitExpr(S);
+ if (S->isImplicitProperty()) {
+ VisitDecl(S->getImplicitPropertyGetter());
+ VisitDecl(S->getImplicitPropertySetter());
+ } else {
+ VisitDecl(S->getExplicitProperty());
+ }
+ if (S->isSuperReceiver()) {
+ ID.AddBoolean(S->isSuperReceiver());
+ VisitType(S->getSuperReceiverType());
+ }
+}
+
+void StmtProfiler::VisitObjCSubscriptRefExpr(const ObjCSubscriptRefExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getAtIndexMethodDecl());
+ VisitDecl(S->setAtIndexMethodDecl());
+}
+
+void StmtProfiler::VisitObjCMessageExpr(const ObjCMessageExpr *S) {
+ VisitExpr(S);
+ VisitName(S->getSelector());
+ VisitDecl(S->getMethodDecl());
+}
+
+void StmtProfiler::VisitObjCIsaExpr(const ObjCIsaExpr *S) {
+ VisitExpr(S);
+ ID.AddBoolean(S->isArrow());
+}
+
+void StmtProfiler::VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *S) {
+ VisitExpr(S);
+ ID.AddBoolean(S->getValue());
+}
+
+void StmtProfiler::VisitObjCIndirectCopyRestoreExpr(
+ const ObjCIndirectCopyRestoreExpr *S) {
+ VisitExpr(S);
+ ID.AddBoolean(S->shouldCopy());
+}
+
+void StmtProfiler::VisitObjCBridgedCastExpr(const ObjCBridgedCastExpr *S) {
+ VisitExplicitCastExpr(S);
+ ID.AddBoolean(S->getBridgeKind());
+}
+
+void StmtProfiler::VisitDecl(const Decl *D) {
+ ID.AddInteger(D? D->getKind() : 0);
+
+ if (Canonical && D) {
+ if (const NonTypeTemplateParmDecl *NTTP =
+ dyn_cast<NonTypeTemplateParmDecl>(D)) {
+ ID.AddInteger(NTTP->getDepth());
+ ID.AddInteger(NTTP->getIndex());
+ ID.AddBoolean(NTTP->isParameterPack());
+ VisitType(NTTP->getType());
+ return;
+ }
+
+ if (const ParmVarDecl *Parm = dyn_cast<ParmVarDecl>(D)) {
+ // The Itanium C++ ABI uses the type, scope depth, and scope
+ // index of a parameter when mangling expressions that involve
+ // function parameters, so we will use the parameter's type for
+ // establishing function parameter identity. That way, our
+ // definition of "equivalent" (per C++ [temp.over.link]) is at
+ // least as strong as the definition of "equivalent" used for
+ // name mangling.
+ VisitType(Parm->getType());
+ ID.AddInteger(Parm->getFunctionScopeDepth());
+ ID.AddInteger(Parm->getFunctionScopeIndex());
+ return;
+ }
+
+ if (const TemplateTypeParmDecl *TTP =
+ dyn_cast<TemplateTypeParmDecl>(D)) {
+ ID.AddInteger(TTP->getDepth());
+ ID.AddInteger(TTP->getIndex());
+ ID.AddBoolean(TTP->isParameterPack());
+ return;
+ }
+
+ if (const TemplateTemplateParmDecl *TTP =
+ dyn_cast<TemplateTemplateParmDecl>(D)) {
+ ID.AddInteger(TTP->getDepth());
+ ID.AddInteger(TTP->getIndex());
+ ID.AddBoolean(TTP->isParameterPack());
+ return;
+ }
+ }
+
+ ID.AddPointer(D? D->getCanonicalDecl() : nullptr);
+}
+
+void StmtProfiler::VisitType(QualType T) {
+ if (Canonical)
+ T = Context.getCanonicalType(T);
+
+ ID.AddPointer(T.getAsOpaquePtr());
+}
+
+void StmtProfiler::VisitName(DeclarationName Name) {
+ ID.AddPointer(Name.getAsOpaquePtr());
+}
+
+void StmtProfiler::VisitNestedNameSpecifier(NestedNameSpecifier *NNS) {
+ if (Canonical)
+ NNS = Context.getCanonicalNestedNameSpecifier(NNS);
+ ID.AddPointer(NNS);
+}
+
+void StmtProfiler::VisitTemplateName(TemplateName Name) {
+ if (Canonical)
+ Name = Context.getCanonicalTemplateName(Name);
+
+ Name.Profile(ID);
+}
+
+void StmtProfiler::VisitTemplateArguments(const TemplateArgumentLoc *Args,
+ unsigned NumArgs) {
+ ID.AddInteger(NumArgs);
+ for (unsigned I = 0; I != NumArgs; ++I)
+ VisitTemplateArgument(Args[I].getArgument());
+}
+
+void StmtProfiler::VisitTemplateArgument(const TemplateArgument &Arg) {
+ // Mostly repetitive with TemplateArgument::Profile!
+ ID.AddInteger(Arg.getKind());
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ break;
+
+ case TemplateArgument::Type:
+ VisitType(Arg.getAsType());
+ break;
+
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion:
+ VisitTemplateName(Arg.getAsTemplateOrTemplatePattern());
+ break;
+
+ case TemplateArgument::Declaration:
+ VisitDecl(Arg.getAsDecl());
+ break;
+
+ case TemplateArgument::NullPtr:
+ VisitType(Arg.getNullPtrType());
+ break;
+
+ case TemplateArgument::Integral:
+ Arg.getAsIntegral().Profile(ID);
+ VisitType(Arg.getIntegralType());
+ break;
+
+ case TemplateArgument::Expression:
+ Visit(Arg.getAsExpr());
+ break;
+
+ case TemplateArgument::Pack:
+ for (const auto &P : Arg.pack_elements())
+ VisitTemplateArgument(P);
+ break;
+ }
+}
+
+void Stmt::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
+ bool Canonical) const {
+ StmtProfiler Profiler(ID, Context, Canonical);
+ Profiler.Visit(this);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/StmtViz.cpp b/contrib/llvm/tools/clang/lib/AST/StmtViz.cpp
new file mode 100644
index 0000000..8be287e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/StmtViz.cpp
@@ -0,0 +1,62 @@
+//===--- StmtViz.cpp - Graphviz visualization for Stmt ASTs -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements Stmt::viewAST, which generates a Graphviz DOT file
+// that depicts the AST and then calls Graphviz/dot+gv on it.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/StmtGraphTraits.h"
+#include "clang/AST/Decl.h"
+#include "llvm/Support/GraphWriter.h"
+
+using namespace clang;
+
+void Stmt::viewAST() const {
+#ifndef NDEBUG
+ llvm::ViewGraph(this,"AST");
+#else
+ llvm::errs() << "Stmt::viewAST is only available in debug builds on "
+ << "systems with Graphviz or gv!\n";
+#endif
+}
+
+namespace llvm {
+template<>
+struct DOTGraphTraits<const Stmt*> : public DefaultDOTGraphTraits {
+ DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
+
+ static std::string getNodeLabel(const Stmt* Node, const Stmt* Graph) {
+
+#ifndef NDEBUG
+ std::string OutSStr;
+ llvm::raw_string_ostream Out(OutSStr);
+
+ if (Node)
+ Out << Node->getStmtClassName();
+ else
+ Out << "<NULL>";
+
+ std::string OutStr = Out.str();
+ if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
+
+ // Process string output to make it nicer...
+ for (unsigned i = 0; i != OutStr.length(); ++i)
+ if (OutStr[i] == '\n') { // Left justify
+ OutStr[i] = '\\';
+ OutStr.insert(OutStr.begin()+i+1, 'l');
+ }
+
+ return OutStr;
+#else
+ return "";
+#endif
+ }
+};
+} // end namespace llvm
diff --git a/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp b/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp
new file mode 100644
index 0000000..e9edb0d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp
@@ -0,0 +1,586 @@
+//===--- TemplateBase.cpp - Common template AST class implementation ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements common classes used throughout C++ template
+// representations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/TemplateBase.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+
+using namespace clang;
+
+/// \brief Print a template integral argument value.
+///
+/// \param TemplArg the TemplateArgument instance to print.
+///
+/// \param Out the raw_ostream instance to use for printing.
+///
+/// \param Policy the printing policy for EnumConstantDecl printing.
+static void printIntegral(const TemplateArgument &TemplArg,
+ raw_ostream &Out, const PrintingPolicy& Policy) {
+ const ::clang::Type *T = TemplArg.getIntegralType().getTypePtr();
+ const llvm::APSInt &Val = TemplArg.getAsIntegral();
+
+ if (const EnumType *ET = T->getAs<EnumType>()) {
+ for (const EnumConstantDecl* ECD : ET->getDecl()->enumerators()) {
+ // In Sema::CheckTemplateArugment, enum template arguments value are
+ // extended to the size of the integer underlying the enum type. This
+ // may create a size difference between the enum value and template
+ // argument value, requiring isSameValue here instead of operator==.
+ if (llvm::APSInt::isSameValue(ECD->getInitVal(), Val)) {
+ ECD->printQualifiedName(Out, Policy);
+ return;
+ }
+ }
+ }
+
+ if (T->isBooleanType() && !Policy.MSVCFormatting) {
+ Out << (Val.getBoolValue() ? "true" : "false");
+ } else if (T->isCharType()) {
+ const char Ch = Val.getZExtValue();
+ Out << ((Ch == '\'') ? "'\\" : "'");
+ Out.write_escaped(StringRef(&Ch, 1), /*UseHexEscapes=*/ true);
+ Out << "'";
+ } else {
+ Out << Val;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// TemplateArgument Implementation
+//===----------------------------------------------------------------------===//
+
+TemplateArgument::TemplateArgument(ASTContext &Ctx, const llvm::APSInt &Value,
+ QualType Type) {
+ Integer.Kind = Integral;
+ // Copy the APSInt value into our decomposed form.
+ Integer.BitWidth = Value.getBitWidth();
+ Integer.IsUnsigned = Value.isUnsigned();
+ // If the value is large, we have to get additional memory from the ASTContext
+ unsigned NumWords = Value.getNumWords();
+ if (NumWords > 1) {
+ void *Mem = Ctx.Allocate(NumWords * sizeof(uint64_t));
+ std::memcpy(Mem, Value.getRawData(), NumWords * sizeof(uint64_t));
+ Integer.pVal = static_cast<uint64_t *>(Mem);
+ } else {
+ Integer.VAL = Value.getZExtValue();
+ }
+
+ Integer.Type = Type.getAsOpaquePtr();
+}
+
+TemplateArgument
+TemplateArgument::CreatePackCopy(ASTContext &Context,
+ ArrayRef<TemplateArgument> Args) {
+ if (Args.empty())
+ return getEmptyPack();
+
+ return TemplateArgument(Args.copy(Context));
+}
+
+bool TemplateArgument::isDependent() const {
+ switch (getKind()) {
+ case Null:
+ llvm_unreachable("Should not have a NULL template argument");
+
+ case Type:
+ return getAsType()->isDependentType() ||
+ isa<PackExpansionType>(getAsType());
+
+ case Template:
+ return getAsTemplate().isDependent();
+
+ case TemplateExpansion:
+ return true;
+
+ case Declaration:
+ if (DeclContext *DC = dyn_cast<DeclContext>(getAsDecl()))
+ return DC->isDependentContext();
+ return getAsDecl()->getDeclContext()->isDependentContext();
+
+ case NullPtr:
+ return false;
+
+ case Integral:
+ // Never dependent
+ return false;
+
+ case Expression:
+ return (getAsExpr()->isTypeDependent() || getAsExpr()->isValueDependent() ||
+ isa<PackExpansionExpr>(getAsExpr()));
+
+ case Pack:
+ for (const auto &P : pack_elements())
+ if (P.isDependent())
+ return true;
+ return false;
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+bool TemplateArgument::isInstantiationDependent() const {
+ switch (getKind()) {
+ case Null:
+ llvm_unreachable("Should not have a NULL template argument");
+
+ case Type:
+ return getAsType()->isInstantiationDependentType();
+
+ case Template:
+ return getAsTemplate().isInstantiationDependent();
+
+ case TemplateExpansion:
+ return true;
+
+ case Declaration:
+ if (DeclContext *DC = dyn_cast<DeclContext>(getAsDecl()))
+ return DC->isDependentContext();
+ return getAsDecl()->getDeclContext()->isDependentContext();
+
+ case NullPtr:
+ return false;
+
+ case Integral:
+ // Never dependent
+ return false;
+
+ case Expression:
+ return getAsExpr()->isInstantiationDependent();
+
+ case Pack:
+ for (const auto &P : pack_elements())
+ if (P.isInstantiationDependent())
+ return true;
+ return false;
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+bool TemplateArgument::isPackExpansion() const {
+ switch (getKind()) {
+ case Null:
+ case Declaration:
+ case Integral:
+ case Pack:
+ case Template:
+ case NullPtr:
+ return false;
+
+ case TemplateExpansion:
+ return true;
+
+ case Type:
+ return isa<PackExpansionType>(getAsType());
+
+ case Expression:
+ return isa<PackExpansionExpr>(getAsExpr());
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+bool TemplateArgument::containsUnexpandedParameterPack() const {
+ switch (getKind()) {
+ case Null:
+ case Declaration:
+ case Integral:
+ case TemplateExpansion:
+ case NullPtr:
+ break;
+
+ case Type:
+ if (getAsType()->containsUnexpandedParameterPack())
+ return true;
+ break;
+
+ case Template:
+ if (getAsTemplate().containsUnexpandedParameterPack())
+ return true;
+ break;
+
+ case Expression:
+ if (getAsExpr()->containsUnexpandedParameterPack())
+ return true;
+ break;
+
+ case Pack:
+ for (const auto &P : pack_elements())
+ if (P.containsUnexpandedParameterPack())
+ return true;
+
+ break;
+ }
+
+ return false;
+}
+
+Optional<unsigned> TemplateArgument::getNumTemplateExpansions() const {
+ assert(getKind() == TemplateExpansion);
+ if (TemplateArg.NumExpansions)
+ return TemplateArg.NumExpansions - 1;
+
+ return None;
+}
+
+void TemplateArgument::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &Context) const {
+ ID.AddInteger(getKind());
+ switch (getKind()) {
+ case Null:
+ break;
+
+ case Type:
+ getAsType().Profile(ID);
+ break;
+
+ case NullPtr:
+ getNullPtrType().Profile(ID);
+ break;
+
+ case Declaration:
+ ID.AddPointer(getAsDecl()? getAsDecl()->getCanonicalDecl() : nullptr);
+ break;
+
+ case Template:
+ case TemplateExpansion: {
+ TemplateName Template = getAsTemplateOrTemplatePattern();
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast_or_null<TemplateTemplateParmDecl>(
+ Template.getAsTemplateDecl())) {
+ ID.AddBoolean(true);
+ ID.AddInteger(TTP->getDepth());
+ ID.AddInteger(TTP->getPosition());
+ ID.AddBoolean(TTP->isParameterPack());
+ } else {
+ ID.AddBoolean(false);
+ ID.AddPointer(Context.getCanonicalTemplateName(Template)
+ .getAsVoidPointer());
+ }
+ break;
+ }
+
+ case Integral:
+ getAsIntegral().Profile(ID);
+ getIntegralType().Profile(ID);
+ break;
+
+ case Expression:
+ getAsExpr()->Profile(ID, Context, true);
+ break;
+
+ case Pack:
+ ID.AddInteger(Args.NumArgs);
+ for (unsigned I = 0; I != Args.NumArgs; ++I)
+ Args.Args[I].Profile(ID, Context);
+ }
+}
+
+bool TemplateArgument::structurallyEquals(const TemplateArgument &Other) const {
+ if (getKind() != Other.getKind()) return false;
+
+ switch (getKind()) {
+ case Null:
+ case Type:
+ case Expression:
+ case Template:
+ case TemplateExpansion:
+ case NullPtr:
+ return TypeOrValue.V == Other.TypeOrValue.V;
+
+ case Declaration:
+ return getAsDecl() == Other.getAsDecl();
+
+ case Integral:
+ return getIntegralType() == Other.getIntegralType() &&
+ getAsIntegral() == Other.getAsIntegral();
+
+ case Pack:
+ if (Args.NumArgs != Other.Args.NumArgs) return false;
+ for (unsigned I = 0, E = Args.NumArgs; I != E; ++I)
+ if (!Args.Args[I].structurallyEquals(Other.Args.Args[I]))
+ return false;
+ return true;
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+TemplateArgument TemplateArgument::getPackExpansionPattern() const {
+ assert(isPackExpansion());
+
+ switch (getKind()) {
+ case Type:
+ return getAsType()->getAs<PackExpansionType>()->getPattern();
+
+ case Expression:
+ return cast<PackExpansionExpr>(getAsExpr())->getPattern();
+
+ case TemplateExpansion:
+ return TemplateArgument(getAsTemplateOrTemplatePattern());
+
+ case Declaration:
+ case Integral:
+ case Pack:
+ case Null:
+ case Template:
+ case NullPtr:
+ return TemplateArgument();
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+void TemplateArgument::print(const PrintingPolicy &Policy,
+ raw_ostream &Out) const {
+ switch (getKind()) {
+ case Null:
+ Out << "(no value)";
+ break;
+
+ case Type: {
+ PrintingPolicy SubPolicy(Policy);
+ SubPolicy.SuppressStrongLifetime = true;
+ getAsType().print(Out, SubPolicy);
+ break;
+ }
+
+ case Declaration: {
+ NamedDecl *ND = cast<NamedDecl>(getAsDecl());
+ Out << '&';
+ if (ND->getDeclName()) {
+ // FIXME: distinguish between pointer and reference args?
+ ND->printQualifiedName(Out);
+ } else {
+ Out << "(anonymous)";
+ }
+ break;
+ }
+
+ case NullPtr:
+ Out << "nullptr";
+ break;
+
+ case Template:
+ getAsTemplate().print(Out, Policy);
+ break;
+
+ case TemplateExpansion:
+ getAsTemplateOrTemplatePattern().print(Out, Policy);
+ Out << "...";
+ break;
+
+ case Integral: {
+ printIntegral(*this, Out, Policy);
+ break;
+ }
+
+ case Expression:
+ getAsExpr()->printPretty(Out, nullptr, Policy);
+ break;
+
+ case Pack:
+ Out << "<";
+ bool First = true;
+ for (const auto &P : pack_elements()) {
+ if (First)
+ First = false;
+ else
+ Out << ", ";
+
+ P.print(Policy, Out);
+ }
+ Out << ">";
+ break;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// TemplateArgumentLoc Implementation
+//===----------------------------------------------------------------------===//
+
+TemplateArgumentLocInfo::TemplateArgumentLocInfo() {
+ memset((void*)this, 0, sizeof(TemplateArgumentLocInfo));
+}
+
+SourceRange TemplateArgumentLoc::getSourceRange() const {
+ switch (Argument.getKind()) {
+ case TemplateArgument::Expression:
+ return getSourceExpression()->getSourceRange();
+
+ case TemplateArgument::Declaration:
+ return getSourceDeclExpression()->getSourceRange();
+
+ case TemplateArgument::NullPtr:
+ return getSourceNullPtrExpression()->getSourceRange();
+
+ case TemplateArgument::Type:
+ if (TypeSourceInfo *TSI = getTypeSourceInfo())
+ return TSI->getTypeLoc().getSourceRange();
+ else
+ return SourceRange();
+
+ case TemplateArgument::Template:
+ if (getTemplateQualifierLoc())
+ return SourceRange(getTemplateQualifierLoc().getBeginLoc(),
+ getTemplateNameLoc());
+ return SourceRange(getTemplateNameLoc());
+
+ case TemplateArgument::TemplateExpansion:
+ if (getTemplateQualifierLoc())
+ return SourceRange(getTemplateQualifierLoc().getBeginLoc(),
+ getTemplateEllipsisLoc());
+ return SourceRange(getTemplateNameLoc(), getTemplateEllipsisLoc());
+
+ case TemplateArgument::Integral:
+ return getSourceIntegralExpression()->getSourceRange();
+
+ case TemplateArgument::Pack:
+ case TemplateArgument::Null:
+ return SourceRange();
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
+ const TemplateArgument &Arg) {
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ // This is bad, but not as bad as crashing because of argument
+ // count mismatches.
+ return DB << "(null template argument)";
+
+ case TemplateArgument::Type:
+ return DB << Arg.getAsType();
+
+ case TemplateArgument::Declaration:
+ return DB << Arg.getAsDecl();
+
+ case TemplateArgument::NullPtr:
+ return DB << "nullptr";
+
+ case TemplateArgument::Integral:
+ return DB << Arg.getAsIntegral().toString(10);
+
+ case TemplateArgument::Template:
+ return DB << Arg.getAsTemplate();
+
+ case TemplateArgument::TemplateExpansion:
+ return DB << Arg.getAsTemplateOrTemplatePattern() << "...";
+
+ case TemplateArgument::Expression: {
+ // This shouldn't actually ever happen, so it's okay that we're
+ // regurgitating an expression here.
+ // FIXME: We're guessing at LangOptions!
+ SmallString<32> Str;
+ llvm::raw_svector_ostream OS(Str);
+ LangOptions LangOpts;
+ LangOpts.CPlusPlus = true;
+ PrintingPolicy Policy(LangOpts);
+ Arg.getAsExpr()->printPretty(OS, nullptr, Policy);
+ return DB << OS.str();
+ }
+
+ case TemplateArgument::Pack: {
+ // FIXME: We're guessing at LangOptions!
+ SmallString<32> Str;
+ llvm::raw_svector_ostream OS(Str);
+ LangOptions LangOpts;
+ LangOpts.CPlusPlus = true;
+ PrintingPolicy Policy(LangOpts);
+ Arg.print(Policy, OS);
+ return DB << OS.str();
+ }
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+const ASTTemplateArgumentListInfo *
+ASTTemplateArgumentListInfo::Create(ASTContext &C,
+ const TemplateArgumentListInfo &List) {
+ std::size_t size = totalSizeToAlloc<TemplateArgumentLoc>(List.size());
+ void *Mem = C.Allocate(size, llvm::alignOf<ASTTemplateArgumentListInfo>());
+ return new (Mem) ASTTemplateArgumentListInfo(List);
+}
+
+ASTTemplateArgumentListInfo::ASTTemplateArgumentListInfo(
+ const TemplateArgumentListInfo &Info) {
+ LAngleLoc = Info.getLAngleLoc();
+ RAngleLoc = Info.getRAngleLoc();
+ NumTemplateArgs = Info.size();
+
+ TemplateArgumentLoc *ArgBuffer = getTrailingObjects<TemplateArgumentLoc>();
+ for (unsigned i = 0; i != NumTemplateArgs; ++i)
+ new (&ArgBuffer[i]) TemplateArgumentLoc(Info[i]);
+}
+
+void ASTTemplateKWAndArgsInfo::initializeFrom(
+ SourceLocation TemplateKWLoc, const TemplateArgumentListInfo &Info,
+ TemplateArgumentLoc *OutArgArray) {
+ this->TemplateKWLoc = TemplateKWLoc;
+ LAngleLoc = Info.getLAngleLoc();
+ RAngleLoc = Info.getRAngleLoc();
+ NumTemplateArgs = Info.size();
+
+ for (unsigned i = 0; i != NumTemplateArgs; ++i)
+ new (&OutArgArray[i]) TemplateArgumentLoc(Info[i]);
+}
+
+void ASTTemplateKWAndArgsInfo::initializeFrom(SourceLocation TemplateKWLoc) {
+ assert(TemplateKWLoc.isValid());
+ LAngleLoc = SourceLocation();
+ RAngleLoc = SourceLocation();
+ this->TemplateKWLoc = TemplateKWLoc;
+ NumTemplateArgs = 0;
+}
+
+void ASTTemplateKWAndArgsInfo::initializeFrom(
+ SourceLocation TemplateKWLoc, const TemplateArgumentListInfo &Info,
+ TemplateArgumentLoc *OutArgArray, bool &Dependent,
+ bool &InstantiationDependent, bool &ContainsUnexpandedParameterPack) {
+ this->TemplateKWLoc = TemplateKWLoc;
+ LAngleLoc = Info.getLAngleLoc();
+ RAngleLoc = Info.getRAngleLoc();
+ NumTemplateArgs = Info.size();
+
+ for (unsigned i = 0; i != NumTemplateArgs; ++i) {
+ Dependent = Dependent || Info[i].getArgument().isDependent();
+ InstantiationDependent = InstantiationDependent ||
+ Info[i].getArgument().isInstantiationDependent();
+ ContainsUnexpandedParameterPack =
+ ContainsUnexpandedParameterPack ||
+ Info[i].getArgument().containsUnexpandedParameterPack();
+
+ new (&OutArgArray[i]) TemplateArgumentLoc(Info[i]);
+ }
+}
+
+void ASTTemplateKWAndArgsInfo::copyInto(const TemplateArgumentLoc *ArgArray,
+ TemplateArgumentListInfo &Info) const {
+ Info.setLAngleLoc(LAngleLoc);
+ Info.setRAngleLoc(RAngleLoc);
+ for (unsigned I = 0; I != NumTemplateArgs; ++I)
+ Info.addArgument(ArgArray[I]);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/TemplateName.cpp b/contrib/llvm/tools/clang/lib/AST/TemplateName.cpp
new file mode 100644
index 0000000..47e0255
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/TemplateName.cpp
@@ -0,0 +1,232 @@
+//===--- TemplateName.cpp - C++ Template Name Representation---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TemplateName interface and subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/TemplateName.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/TemplateBase.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/LangOptions.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+using namespace llvm;
+
+TemplateArgument
+SubstTemplateTemplateParmPackStorage::getArgumentPack() const {
+ return TemplateArgument(llvm::makeArrayRef(Arguments, size()));
+}
+
+void SubstTemplateTemplateParmStorage::Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, Parameter, Replacement);
+}
+
+void SubstTemplateTemplateParmStorage::Profile(llvm::FoldingSetNodeID &ID,
+ TemplateTemplateParmDecl *parameter,
+ TemplateName replacement) {
+ ID.AddPointer(parameter);
+ ID.AddPointer(replacement.getAsVoidPointer());
+}
+
+void SubstTemplateTemplateParmPackStorage::Profile(llvm::FoldingSetNodeID &ID,
+ ASTContext &Context) {
+ Profile(ID, Context, Parameter, getArgumentPack());
+}
+
+void SubstTemplateTemplateParmPackStorage::Profile(llvm::FoldingSetNodeID &ID,
+ ASTContext &Context,
+ TemplateTemplateParmDecl *Parameter,
+ const TemplateArgument &ArgPack) {
+ ID.AddPointer(Parameter);
+ ArgPack.Profile(ID, Context);
+}
+
+TemplateName::TemplateName(void *Ptr) {
+ Storage = StorageType::getFromOpaqueValue(Ptr);
+}
+
+TemplateName::TemplateName(TemplateDecl *Template) : Storage(Template) {}
+TemplateName::TemplateName(OverloadedTemplateStorage *Storage)
+ : Storage(Storage) {}
+TemplateName::TemplateName(SubstTemplateTemplateParmStorage *Storage)
+ : Storage(Storage) {}
+TemplateName::TemplateName(SubstTemplateTemplateParmPackStorage *Storage)
+ : Storage(Storage) {}
+TemplateName::TemplateName(QualifiedTemplateName *Qual) : Storage(Qual) {}
+TemplateName::TemplateName(DependentTemplateName *Dep) : Storage(Dep) {}
+
+bool TemplateName::isNull() const { return Storage.isNull(); }
+
+TemplateName::NameKind TemplateName::getKind() const {
+ if (Storage.is<TemplateDecl *>())
+ return Template;
+ if (Storage.is<DependentTemplateName *>())
+ return DependentTemplate;
+ if (Storage.is<QualifiedTemplateName *>())
+ return QualifiedTemplate;
+
+ UncommonTemplateNameStorage *uncommon
+ = Storage.get<UncommonTemplateNameStorage*>();
+ if (uncommon->getAsOverloadedStorage())
+ return OverloadedTemplate;
+ if (uncommon->getAsSubstTemplateTemplateParm())
+ return SubstTemplateTemplateParm;
+ return SubstTemplateTemplateParmPack;
+}
+
+TemplateDecl *TemplateName::getAsTemplateDecl() const {
+ if (TemplateDecl *Template = Storage.dyn_cast<TemplateDecl *>())
+ return Template;
+
+ if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName())
+ return QTN->getTemplateDecl();
+
+ if (SubstTemplateTemplateParmStorage *sub = getAsSubstTemplateTemplateParm())
+ return sub->getReplacement().getAsTemplateDecl();
+
+ return nullptr;
+}
+
+OverloadedTemplateStorage *TemplateName::getAsOverloadedTemplate() const {
+ if (UncommonTemplateNameStorage *Uncommon =
+ Storage.dyn_cast<UncommonTemplateNameStorage *>())
+ return Uncommon->getAsOverloadedStorage();
+
+ return nullptr;
+}
+
+SubstTemplateTemplateParmStorage *
+TemplateName::getAsSubstTemplateTemplateParm() const {
+ if (UncommonTemplateNameStorage *uncommon =
+ Storage.dyn_cast<UncommonTemplateNameStorage *>())
+ return uncommon->getAsSubstTemplateTemplateParm();
+
+ return nullptr;
+}
+
+SubstTemplateTemplateParmPackStorage *
+TemplateName::getAsSubstTemplateTemplateParmPack() const {
+ if (UncommonTemplateNameStorage *Uncommon =
+ Storage.dyn_cast<UncommonTemplateNameStorage *>())
+ return Uncommon->getAsSubstTemplateTemplateParmPack();
+
+ return nullptr;
+}
+
+QualifiedTemplateName *TemplateName::getAsQualifiedTemplateName() const {
+ return Storage.dyn_cast<QualifiedTemplateName *>();
+}
+
+DependentTemplateName *TemplateName::getAsDependentTemplateName() const {
+ return Storage.dyn_cast<DependentTemplateName *>();
+}
+
+bool TemplateName::isDependent() const {
+ if (TemplateDecl *Template = getAsTemplateDecl()) {
+ if (isa<TemplateTemplateParmDecl>(Template))
+ return true;
+ // FIXME: Hack, getDeclContext() can be null if Template is still
+ // initializing due to PCH reading, so we check it before using it.
+ // Should probably modify TemplateSpecializationType to allow constructing
+ // it without the isDependent() checking.
+ return Template->getDeclContext() &&
+ Template->getDeclContext()->isDependentContext();
+ }
+
+ assert(!getAsOverloadedTemplate() &&
+ "overloaded templates shouldn't survive to here");
+
+ return true;
+}
+
+bool TemplateName::isInstantiationDependent() const {
+ if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName()) {
+ if (QTN->getQualifier()->isInstantiationDependent())
+ return true;
+ }
+
+ return isDependent();
+}
+
+bool TemplateName::containsUnexpandedParameterPack() const {
+ if (TemplateDecl *Template = getAsTemplateDecl()) {
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(Template))
+ return TTP->isParameterPack();
+
+ return false;
+ }
+
+ if (DependentTemplateName *DTN = getAsDependentTemplateName())
+ return DTN->getQualifier() &&
+ DTN->getQualifier()->containsUnexpandedParameterPack();
+
+ return getAsSubstTemplateTemplateParmPack() != nullptr;
+}
+
+void
+TemplateName::print(raw_ostream &OS, const PrintingPolicy &Policy,
+ bool SuppressNNS) const {
+ if (TemplateDecl *Template = Storage.dyn_cast<TemplateDecl *>())
+ OS << *Template;
+ else if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName()) {
+ if (!SuppressNNS)
+ QTN->getQualifier()->print(OS, Policy);
+ if (QTN->hasTemplateKeyword())
+ OS << "template ";
+ OS << *QTN->getDecl();
+ } else if (DependentTemplateName *DTN = getAsDependentTemplateName()) {
+ if (!SuppressNNS && DTN->getQualifier())
+ DTN->getQualifier()->print(OS, Policy);
+ OS << "template ";
+
+ if (DTN->isIdentifier())
+ OS << DTN->getIdentifier()->getName();
+ else
+ OS << "operator " << getOperatorSpelling(DTN->getOperator());
+ } else if (SubstTemplateTemplateParmStorage *subst
+ = getAsSubstTemplateTemplateParm()) {
+ subst->getReplacement().print(OS, Policy, SuppressNNS);
+ } else if (SubstTemplateTemplateParmPackStorage *SubstPack
+ = getAsSubstTemplateTemplateParmPack())
+ OS << *SubstPack->getParameterPack();
+ else {
+ OverloadedTemplateStorage *OTS = getAsOverloadedTemplate();
+ (*OTS->begin())->printName(OS);
+ }
+}
+
+const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
+ TemplateName N) {
+ std::string NameStr;
+ raw_string_ostream OS(NameStr);
+ LangOptions LO;
+ LO.CPlusPlus = true;
+ LO.Bool = true;
+ OS << '\'';
+ N.print(OS, PrintingPolicy(LO));
+ OS << '\'';
+ OS.flush();
+ return DB << NameStr;
+}
+
+void TemplateName::dump(raw_ostream &OS) const {
+ LangOptions LO; // FIXME!
+ LO.CPlusPlus = true;
+ LO.Bool = true;
+ print(OS, PrintingPolicy(LO));
+}
+
+void TemplateName::dump() const {
+ dump(llvm::errs());
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/Type.cpp b/contrib/llvm/tools/clang/lib/AST/Type.cpp
new file mode 100644
index 0000000..7dd38cb
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/Type.cpp
@@ -0,0 +1,3784 @@
+//===--- Type.cpp - Type representation and manipulation ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements type-related functionality.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/TypeVisitor.h"
+#include "clang/Basic/Specifiers.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+using namespace clang;
+
+bool Qualifiers::isStrictSupersetOf(Qualifiers Other) const {
+ return (*this != Other) &&
+ // CVR qualifiers superset
+ (((Mask & CVRMask) | (Other.Mask & CVRMask)) == (Mask & CVRMask)) &&
+ // ObjC GC qualifiers superset
+ ((getObjCGCAttr() == Other.getObjCGCAttr()) ||
+ (hasObjCGCAttr() && !Other.hasObjCGCAttr())) &&
+ // Address space superset.
+ ((getAddressSpace() == Other.getAddressSpace()) ||
+ (hasAddressSpace()&& !Other.hasAddressSpace())) &&
+ // Lifetime qualifier superset.
+ ((getObjCLifetime() == Other.getObjCLifetime()) ||
+ (hasObjCLifetime() && !Other.hasObjCLifetime()));
+}
+
+const IdentifierInfo* QualType::getBaseTypeIdentifier() const {
+ const Type* ty = getTypePtr();
+ NamedDecl *ND = nullptr;
+ if (ty->isPointerType() || ty->isReferenceType())
+ return ty->getPointeeType().getBaseTypeIdentifier();
+ else if (ty->isRecordType())
+ ND = ty->getAs<RecordType>()->getDecl();
+ else if (ty->isEnumeralType())
+ ND = ty->getAs<EnumType>()->getDecl();
+ else if (ty->getTypeClass() == Type::Typedef)
+ ND = ty->getAs<TypedefType>()->getDecl();
+ else if (ty->isArrayType())
+ return ty->castAsArrayTypeUnsafe()->
+ getElementType().getBaseTypeIdentifier();
+
+ if (ND)
+ return ND->getIdentifier();
+ return nullptr;
+}
+
+bool QualType::isConstant(QualType T, ASTContext &Ctx) {
+ if (T.isConstQualified())
+ return true;
+
+ if (const ArrayType *AT = Ctx.getAsArrayType(T))
+ return AT->getElementType().isConstant(Ctx);
+
+ return T.getAddressSpace() == LangAS::opencl_constant;
+}
+
+unsigned ConstantArrayType::getNumAddressingBits(ASTContext &Context,
+ QualType ElementType,
+ const llvm::APInt &NumElements) {
+ uint64_t ElementSize = Context.getTypeSizeInChars(ElementType).getQuantity();
+
+ // Fast path the common cases so we can avoid the conservative computation
+ // below, which in common cases allocates "large" APSInt values, which are
+ // slow.
+
+ // If the element size is a power of 2, we can directly compute the additional
+ // number of addressing bits beyond those required for the element count.
+ if (llvm::isPowerOf2_64(ElementSize)) {
+ return NumElements.getActiveBits() + llvm::Log2_64(ElementSize);
+ }
+
+ // If both the element count and element size fit in 32-bits, we can do the
+ // computation directly in 64-bits.
+ if ((ElementSize >> 32) == 0 && NumElements.getBitWidth() <= 64 &&
+ (NumElements.getZExtValue() >> 32) == 0) {
+ uint64_t TotalSize = NumElements.getZExtValue() * ElementSize;
+ return 64 - llvm::countLeadingZeros(TotalSize);
+ }
+
+ // Otherwise, use APSInt to handle arbitrary sized values.
+ llvm::APSInt SizeExtended(NumElements, true);
+ unsigned SizeTypeBits = Context.getTypeSize(Context.getSizeType());
+ SizeExtended = SizeExtended.extend(std::max(SizeTypeBits,
+ SizeExtended.getBitWidth()) * 2);
+
+ llvm::APSInt TotalSize(llvm::APInt(SizeExtended.getBitWidth(), ElementSize));
+ TotalSize *= SizeExtended;
+
+ return TotalSize.getActiveBits();
+}
+
+unsigned ConstantArrayType::getMaxSizeBits(ASTContext &Context) {
+ unsigned Bits = Context.getTypeSize(Context.getSizeType());
+
+ // Limit the number of bits in size_t so that maximal bit size fits 64 bit
+ // integer (see PR8256). We can do this as currently there is no hardware
+ // that supports full 64-bit virtual space.
+ if (Bits > 61)
+ Bits = 61;
+
+ return Bits;
+}
+
+DependentSizedArrayType::DependentSizedArrayType(const ASTContext &Context,
+ QualType et, QualType can,
+ Expr *e, ArraySizeModifier sm,
+ unsigned tq,
+ SourceRange brackets)
+ : ArrayType(DependentSizedArray, et, can, sm, tq,
+ (et->containsUnexpandedParameterPack() ||
+ (e && e->containsUnexpandedParameterPack()))),
+ Context(Context), SizeExpr((Stmt*) e), Brackets(brackets)
+{
+}
+
+void DependentSizedArrayType::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &Context,
+ QualType ET,
+ ArraySizeModifier SizeMod,
+ unsigned TypeQuals,
+ Expr *E) {
+ ID.AddPointer(ET.getAsOpaquePtr());
+ ID.AddInteger(SizeMod);
+ ID.AddInteger(TypeQuals);
+ E->Profile(ID, Context, true);
+}
+
+DependentSizedExtVectorType::DependentSizedExtVectorType(const
+ ASTContext &Context,
+ QualType ElementType,
+ QualType can,
+ Expr *SizeExpr,
+ SourceLocation loc)
+ : Type(DependentSizedExtVector, can, /*Dependent=*/true,
+ /*InstantiationDependent=*/true,
+ ElementType->isVariablyModifiedType(),
+ (ElementType->containsUnexpandedParameterPack() ||
+ (SizeExpr && SizeExpr->containsUnexpandedParameterPack()))),
+ Context(Context), SizeExpr(SizeExpr), ElementType(ElementType),
+ loc(loc)
+{
+}
+
+void
+DependentSizedExtVectorType::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &Context,
+ QualType ElementType, Expr *SizeExpr) {
+ ID.AddPointer(ElementType.getAsOpaquePtr());
+ SizeExpr->Profile(ID, Context, true);
+}
+
+VectorType::VectorType(QualType vecType, unsigned nElements, QualType canonType,
+ VectorKind vecKind)
+ : VectorType(Vector, vecType, nElements, canonType, vecKind) {}
+
+VectorType::VectorType(TypeClass tc, QualType vecType, unsigned nElements,
+ QualType canonType, VectorKind vecKind)
+ : Type(tc, canonType, vecType->isDependentType(),
+ vecType->isInstantiationDependentType(),
+ vecType->isVariablyModifiedType(),
+ vecType->containsUnexpandedParameterPack()),
+ ElementType(vecType)
+{
+ VectorTypeBits.VecKind = vecKind;
+ VectorTypeBits.NumElements = nElements;
+}
+
+/// getArrayElementTypeNoTypeQual - If this is an array type, return the
+/// element type of the array, potentially with type qualifiers missing.
+/// This method should never be used when type qualifiers are meaningful.
+const Type *Type::getArrayElementTypeNoTypeQual() const {
+ // If this is directly an array type, return it.
+ if (const ArrayType *ATy = dyn_cast<ArrayType>(this))
+ return ATy->getElementType().getTypePtr();
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (!isa<ArrayType>(CanonicalType))
+ return nullptr;
+
+ // If this is a typedef for an array type, strip the typedef off without
+ // losing all typedef information.
+ return cast<ArrayType>(getUnqualifiedDesugaredType())
+ ->getElementType().getTypePtr();
+}
+
+/// getDesugaredType - Return the specified type with any "sugar" removed from
+/// the type. This takes off typedefs, typeof's etc. If the outer level of
+/// the type is already concrete, it returns it unmodified. This is similar
+/// to getting the canonical type, but it doesn't remove *all* typedefs. For
+/// example, it returns "T*" as "T*", (not as "int*"), because the pointer is
+/// concrete.
+QualType QualType::getDesugaredType(QualType T, const ASTContext &Context) {
+ SplitQualType split = getSplitDesugaredType(T);
+ return Context.getQualifiedType(split.Ty, split.Quals);
+}
+
+QualType QualType::getSingleStepDesugaredTypeImpl(QualType type,
+ const ASTContext &Context) {
+ SplitQualType split = type.split();
+ QualType desugar = split.Ty->getLocallyUnqualifiedSingleStepDesugaredType();
+ return Context.getQualifiedType(desugar, split.Quals);
+}
+
+QualType Type::getLocallyUnqualifiedSingleStepDesugaredType() const {
+ switch (getTypeClass()) {
+#define ABSTRACT_TYPE(Class, Parent)
+#define TYPE(Class, Parent) \
+ case Type::Class: { \
+ const Class##Type *ty = cast<Class##Type>(this); \
+ if (!ty->isSugared()) return QualType(ty, 0); \
+ return ty->desugar(); \
+ }
+#include "clang/AST/TypeNodes.def"
+ }
+ llvm_unreachable("bad type kind!");
+}
+
+SplitQualType QualType::getSplitDesugaredType(QualType T) {
+ QualifierCollector Qs;
+
+ QualType Cur = T;
+ while (true) {
+ const Type *CurTy = Qs.strip(Cur);
+ switch (CurTy->getTypeClass()) {
+#define ABSTRACT_TYPE(Class, Parent)
+#define TYPE(Class, Parent) \
+ case Type::Class: { \
+ const Class##Type *Ty = cast<Class##Type>(CurTy); \
+ if (!Ty->isSugared()) \
+ return SplitQualType(Ty, Qs); \
+ Cur = Ty->desugar(); \
+ break; \
+ }
+#include "clang/AST/TypeNodes.def"
+ }
+ }
+}
+
+SplitQualType QualType::getSplitUnqualifiedTypeImpl(QualType type) {
+ SplitQualType split = type.split();
+
+ // All the qualifiers we've seen so far.
+ Qualifiers quals = split.Quals;
+
+ // The last type node we saw with any nodes inside it.
+ const Type *lastTypeWithQuals = split.Ty;
+
+ while (true) {
+ QualType next;
+
+ // Do a single-step desugar, aborting the loop if the type isn't
+ // sugared.
+ switch (split.Ty->getTypeClass()) {
+#define ABSTRACT_TYPE(Class, Parent)
+#define TYPE(Class, Parent) \
+ case Type::Class: { \
+ const Class##Type *ty = cast<Class##Type>(split.Ty); \
+ if (!ty->isSugared()) goto done; \
+ next = ty->desugar(); \
+ break; \
+ }
+#include "clang/AST/TypeNodes.def"
+ }
+
+ // Otherwise, split the underlying type. If that yields qualifiers,
+ // update the information.
+ split = next.split();
+ if (!split.Quals.empty()) {
+ lastTypeWithQuals = split.Ty;
+ quals.addConsistentQualifiers(split.Quals);
+ }
+ }
+
+ done:
+ return SplitQualType(lastTypeWithQuals, quals);
+}
+
+QualType QualType::IgnoreParens(QualType T) {
+ // FIXME: this seems inherently un-qualifiers-safe.
+ while (const ParenType *PT = T->getAs<ParenType>())
+ T = PT->getInnerType();
+ return T;
+}
+
+/// \brief This will check for a T (which should be a Type which can act as
+/// sugar, such as a TypedefType) by removing any existing sugar until it
+/// reaches a T or a non-sugared type.
+template<typename T> static const T *getAsSugar(const Type *Cur) {
+ while (true) {
+ if (const T *Sugar = dyn_cast<T>(Cur))
+ return Sugar;
+ switch (Cur->getTypeClass()) {
+#define ABSTRACT_TYPE(Class, Parent)
+#define TYPE(Class, Parent) \
+ case Type::Class: { \
+ const Class##Type *Ty = cast<Class##Type>(Cur); \
+ if (!Ty->isSugared()) return 0; \
+ Cur = Ty->desugar().getTypePtr(); \
+ break; \
+ }
+#include "clang/AST/TypeNodes.def"
+ }
+ }
+}
+
+template <> const TypedefType *Type::getAs() const {
+ return getAsSugar<TypedefType>(this);
+}
+
+template <> const TemplateSpecializationType *Type::getAs() const {
+ return getAsSugar<TemplateSpecializationType>(this);
+}
+
+template <> const AttributedType *Type::getAs() const {
+ return getAsSugar<AttributedType>(this);
+}
+
+/// getUnqualifiedDesugaredType - Pull any qualifiers and syntactic
+/// sugar off the given type. This should produce an object of the
+/// same dynamic type as the canonical type.
+const Type *Type::getUnqualifiedDesugaredType() const {
+ const Type *Cur = this;
+
+ while (true) {
+ switch (Cur->getTypeClass()) {
+#define ABSTRACT_TYPE(Class, Parent)
+#define TYPE(Class, Parent) \
+ case Class: { \
+ const Class##Type *Ty = cast<Class##Type>(Cur); \
+ if (!Ty->isSugared()) return Cur; \
+ Cur = Ty->desugar().getTypePtr(); \
+ break; \
+ }
+#include "clang/AST/TypeNodes.def"
+ }
+ }
+}
+bool Type::isClassType() const {
+ if (const RecordType *RT = getAs<RecordType>())
+ return RT->getDecl()->isClass();
+ return false;
+}
+bool Type::isStructureType() const {
+ if (const RecordType *RT = getAs<RecordType>())
+ return RT->getDecl()->isStruct();
+ return false;
+}
+bool Type::isObjCBoxableRecordType() const {
+ if (const RecordType *RT = getAs<RecordType>())
+ return RT->getDecl()->hasAttr<ObjCBoxableAttr>();
+ return false;
+}
+bool Type::isInterfaceType() const {
+ if (const RecordType *RT = getAs<RecordType>())
+ return RT->getDecl()->isInterface();
+ return false;
+}
+bool Type::isStructureOrClassType() const {
+ if (const RecordType *RT = getAs<RecordType>()) {
+ RecordDecl *RD = RT->getDecl();
+ return RD->isStruct() || RD->isClass() || RD->isInterface();
+ }
+ return false;
+}
+bool Type::isVoidPointerType() const {
+ if (const PointerType *PT = getAs<PointerType>())
+ return PT->getPointeeType()->isVoidType();
+ return false;
+}
+
+bool Type::isUnionType() const {
+ if (const RecordType *RT = getAs<RecordType>())
+ return RT->getDecl()->isUnion();
+ return false;
+}
+
+bool Type::isComplexType() const {
+ if (const ComplexType *CT = dyn_cast<ComplexType>(CanonicalType))
+ return CT->getElementType()->isFloatingType();
+ return false;
+}
+
+bool Type::isComplexIntegerType() const {
+ // Check for GCC complex integer extension.
+ return getAsComplexIntegerType();
+}
+
+const ComplexType *Type::getAsComplexIntegerType() const {
+ if (const ComplexType *Complex = getAs<ComplexType>())
+ if (Complex->getElementType()->isIntegerType())
+ return Complex;
+ return nullptr;
+}
+
+QualType Type::getPointeeType() const {
+ if (const PointerType *PT = getAs<PointerType>())
+ return PT->getPointeeType();
+ if (const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>())
+ return OPT->getPointeeType();
+ if (const BlockPointerType *BPT = getAs<BlockPointerType>())
+ return BPT->getPointeeType();
+ if (const ReferenceType *RT = getAs<ReferenceType>())
+ return RT->getPointeeType();
+ if (const MemberPointerType *MPT = getAs<MemberPointerType>())
+ return MPT->getPointeeType();
+ if (const DecayedType *DT = getAs<DecayedType>())
+ return DT->getPointeeType();
+ return QualType();
+}
+
+const RecordType *Type::getAsStructureType() const {
+ // If this is directly a structure type, return it.
+ if (const RecordType *RT = dyn_cast<RecordType>(this)) {
+ if (RT->getDecl()->isStruct())
+ return RT;
+ }
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (const RecordType *RT = dyn_cast<RecordType>(CanonicalType)) {
+ if (!RT->getDecl()->isStruct())
+ return nullptr;
+
+ // If this is a typedef for a structure type, strip the typedef off without
+ // losing all typedef information.
+ return cast<RecordType>(getUnqualifiedDesugaredType());
+ }
+ return nullptr;
+}
+
+const RecordType *Type::getAsUnionType() const {
+ // If this is directly a union type, return it.
+ if (const RecordType *RT = dyn_cast<RecordType>(this)) {
+ if (RT->getDecl()->isUnion())
+ return RT;
+ }
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (const RecordType *RT = dyn_cast<RecordType>(CanonicalType)) {
+ if (!RT->getDecl()->isUnion())
+ return nullptr;
+
+ // If this is a typedef for a union type, strip the typedef off without
+ // losing all typedef information.
+ return cast<RecordType>(getUnqualifiedDesugaredType());
+ }
+
+ return nullptr;
+}
+
+bool Type::isObjCIdOrObjectKindOfType(const ASTContext &ctx,
+ const ObjCObjectType *&bound) const {
+ bound = nullptr;
+
+ const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>();
+ if (!OPT)
+ return false;
+
+ // Easy case: id.
+ if (OPT->isObjCIdType())
+ return true;
+
+ // If it's not a __kindof type, reject it now.
+ if (!OPT->isKindOfType())
+ return false;
+
+ // If it's Class or qualified Class, it's not an object type.
+ if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType())
+ return false;
+
+ // Figure out the type bound for the __kindof type.
+ bound = OPT->getObjectType()->stripObjCKindOfTypeAndQuals(ctx)
+ ->getAs<ObjCObjectType>();
+ return true;
+}
+
+bool Type::isObjCClassOrClassKindOfType() const {
+ const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>();
+ if (!OPT)
+ return false;
+
+ // Easy case: Class.
+ if (OPT->isObjCClassType())
+ return true;
+
+ // If it's not a __kindof type, reject it now.
+ if (!OPT->isKindOfType())
+ return false;
+
+ // If it's Class or qualified Class, it's a class __kindof type.
+ return OPT->isObjCClassType() || OPT->isObjCQualifiedClassType();
+}
+
+/// Was this type written with the special inert-in-MRC __unsafe_unretained
+/// qualifier?
+///
+/// This approximates the answer to the following question: if this
+/// translation unit were compiled in ARC, would this type be qualified
+/// with __unsafe_unretained?
+bool Type::isObjCInertUnsafeUnretainedType() const {
+ const Type *cur = this;
+ while (true) {
+ if (auto attributed = dyn_cast<AttributedType>(cur)) {
+ if (attributed->getAttrKind() ==
+ AttributedType::attr_objc_inert_unsafe_unretained)
+ return true;
+ }
+
+ // Single-step desugar until we run out of sugar.
+ QualType next = cur->getLocallyUnqualifiedSingleStepDesugaredType();
+ if (next.getTypePtr() == cur) return false;
+ cur = next.getTypePtr();
+ }
+}
+
+ObjCObjectType::ObjCObjectType(QualType Canonical, QualType Base,
+ ArrayRef<QualType> typeArgs,
+ ArrayRef<ObjCProtocolDecl *> protocols,
+ bool isKindOf)
+ : Type(ObjCObject, Canonical, Base->isDependentType(),
+ Base->isInstantiationDependentType(),
+ Base->isVariablyModifiedType(),
+ Base->containsUnexpandedParameterPack()),
+ BaseType(Base)
+{
+ ObjCObjectTypeBits.IsKindOf = isKindOf;
+
+ ObjCObjectTypeBits.NumTypeArgs = typeArgs.size();
+ assert(getTypeArgsAsWritten().size() == typeArgs.size() &&
+ "bitfield overflow in type argument count");
+ ObjCObjectTypeBits.NumProtocols = protocols.size();
+ assert(getNumProtocols() == protocols.size() &&
+ "bitfield overflow in protocol count");
+ if (!typeArgs.empty())
+ memcpy(getTypeArgStorage(), typeArgs.data(),
+ typeArgs.size() * sizeof(QualType));
+ if (!protocols.empty())
+ memcpy(getProtocolStorage(), protocols.data(),
+ protocols.size() * sizeof(ObjCProtocolDecl*));
+
+ for (auto typeArg : typeArgs) {
+ if (typeArg->isDependentType())
+ setDependent();
+ else if (typeArg->isInstantiationDependentType())
+ setInstantiationDependent();
+
+ if (typeArg->containsUnexpandedParameterPack())
+ setContainsUnexpandedParameterPack();
+ }
+}
+
+bool ObjCObjectType::isSpecialized() const {
+ // If we have type arguments written here, the type is specialized.
+ if (ObjCObjectTypeBits.NumTypeArgs > 0)
+ return true;
+
+ // Otherwise, check whether the base type is specialized.
+ if (auto objcObject = getBaseType()->getAs<ObjCObjectType>()) {
+ // Terminate when we reach an interface type.
+ if (isa<ObjCInterfaceType>(objcObject))
+ return false;
+
+ return objcObject->isSpecialized();
+ }
+
+ // Not specialized.
+ return false;
+}
+
+ArrayRef<QualType> ObjCObjectType::getTypeArgs() const {
+ // We have type arguments written on this type.
+ if (isSpecializedAsWritten())
+ return getTypeArgsAsWritten();
+
+ // Look at the base type, which might have type arguments.
+ if (auto objcObject = getBaseType()->getAs<ObjCObjectType>()) {
+ // Terminate when we reach an interface type.
+ if (isa<ObjCInterfaceType>(objcObject))
+ return { };
+
+ return objcObject->getTypeArgs();
+ }
+
+ // No type arguments.
+ return { };
+}
+
+bool ObjCObjectType::isKindOfType() const {
+ if (isKindOfTypeAsWritten())
+ return true;
+
+ // Look at the base type, which might have type arguments.
+ if (auto objcObject = getBaseType()->getAs<ObjCObjectType>()) {
+ // Terminate when we reach an interface type.
+ if (isa<ObjCInterfaceType>(objcObject))
+ return false;
+
+ return objcObject->isKindOfType();
+ }
+
+ // Not a "__kindof" type.
+ return false;
+}
+
+QualType ObjCObjectType::stripObjCKindOfTypeAndQuals(
+ const ASTContext &ctx) const {
+ if (!isKindOfType() && qual_empty())
+ return QualType(this, 0);
+
+ // Recursively strip __kindof.
+ SplitQualType splitBaseType = getBaseType().split();
+ QualType baseType(splitBaseType.Ty, 0);
+ if (const ObjCObjectType *baseObj
+ = splitBaseType.Ty->getAs<ObjCObjectType>()) {
+ baseType = baseObj->stripObjCKindOfTypeAndQuals(ctx);
+ }
+
+ return ctx.getObjCObjectType(ctx.getQualifiedType(baseType,
+ splitBaseType.Quals),
+ getTypeArgsAsWritten(),
+ /*protocols=*/{ },
+ /*isKindOf=*/false);
+}
+
+const ObjCObjectPointerType *ObjCObjectPointerType::stripObjCKindOfTypeAndQuals(
+ const ASTContext &ctx) const {
+ if (!isKindOfType() && qual_empty())
+ return this;
+
+ QualType obj = getObjectType()->stripObjCKindOfTypeAndQuals(ctx);
+ return ctx.getObjCObjectPointerType(obj)->castAs<ObjCObjectPointerType>();
+}
+
+namespace {
+
+template<typename F>
+QualType simpleTransform(ASTContext &ctx, QualType type, F &&f);
+
+/// Visitor used by simpleTransform() to perform the transformation.
+template<typename F>
+struct SimpleTransformVisitor
+ : public TypeVisitor<SimpleTransformVisitor<F>, QualType> {
+ ASTContext &Ctx;
+ F &&TheFunc;
+
+ QualType recurse(QualType type) {
+ return simpleTransform(Ctx, type, std::move(TheFunc));
+ }
+
+public:
+ SimpleTransformVisitor(ASTContext &ctx, F &&f) : Ctx(ctx), TheFunc(std::move(f)) { }
+
+ // None of the clients of this transformation can occur where
+ // there are dependent types, so skip dependent types.
+#define TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) \
+ QualType Visit##Class##Type(const Class##Type *T) { return QualType(T, 0); }
+#include "clang/AST/TypeNodes.def"
+
+#define TRIVIAL_TYPE_CLASS(Class) \
+ QualType Visit##Class##Type(const Class##Type *T) { return QualType(T, 0); }
+
+ TRIVIAL_TYPE_CLASS(Builtin)
+
+ QualType VisitComplexType(const ComplexType *T) {
+ QualType elementType = recurse(T->getElementType());
+ if (elementType.isNull())
+ return QualType();
+
+ if (elementType.getAsOpaquePtr() == T->getElementType().getAsOpaquePtr())
+ return QualType(T, 0);
+
+ return Ctx.getComplexType(elementType);
+ }
+
+ QualType VisitPointerType(const PointerType *T) {
+ QualType pointeeType = recurse(T->getPointeeType());
+ if (pointeeType.isNull())
+ return QualType();
+
+ if (pointeeType.getAsOpaquePtr() == T->getPointeeType().getAsOpaquePtr())
+ return QualType(T, 0);
+
+ return Ctx.getPointerType(pointeeType);
+ }
+
+ QualType VisitBlockPointerType(const BlockPointerType *T) {
+ QualType pointeeType = recurse(T->getPointeeType());
+ if (pointeeType.isNull())
+ return QualType();
+
+ if (pointeeType.getAsOpaquePtr() == T->getPointeeType().getAsOpaquePtr())
+ return QualType(T, 0);
+
+ return Ctx.getBlockPointerType(pointeeType);
+ }
+
+ QualType VisitLValueReferenceType(const LValueReferenceType *T) {
+ QualType pointeeType = recurse(T->getPointeeTypeAsWritten());
+ if (pointeeType.isNull())
+ return QualType();
+
+ if (pointeeType.getAsOpaquePtr()
+ == T->getPointeeTypeAsWritten().getAsOpaquePtr())
+ return QualType(T, 0);
+
+ return Ctx.getLValueReferenceType(pointeeType, T->isSpelledAsLValue());
+ }
+
+ QualType VisitRValueReferenceType(const RValueReferenceType *T) {
+ QualType pointeeType = recurse(T->getPointeeTypeAsWritten());
+ if (pointeeType.isNull())
+ return QualType();
+
+ if (pointeeType.getAsOpaquePtr()
+ == T->getPointeeTypeAsWritten().getAsOpaquePtr())
+ return QualType(T, 0);
+
+ return Ctx.getRValueReferenceType(pointeeType);
+ }
+
+ QualType VisitMemberPointerType(const MemberPointerType *T) {
+ QualType pointeeType = recurse(T->getPointeeType());
+ if (pointeeType.isNull())
+ return QualType();
+
+ if (pointeeType.getAsOpaquePtr() == T->getPointeeType().getAsOpaquePtr())
+ return QualType(T, 0);
+
+ return Ctx.getMemberPointerType(pointeeType, T->getClass());
+ }
+
+ QualType VisitConstantArrayType(const ConstantArrayType *T) {
+ QualType elementType = recurse(T->getElementType());
+ if (elementType.isNull())
+ return QualType();
+
+ if (elementType.getAsOpaquePtr() == T->getElementType().getAsOpaquePtr())
+ return QualType(T, 0);
+
+ return Ctx.getConstantArrayType(elementType, T->getSize(),
+ T->getSizeModifier(),
+ T->getIndexTypeCVRQualifiers());
+ }
+
+ QualType VisitVariableArrayType(const VariableArrayType *T) {
+ QualType elementType = recurse(T->getElementType());
+ if (elementType.isNull())
+ return QualType();
+
+ if (elementType.getAsOpaquePtr() == T->getElementType().getAsOpaquePtr())
+ return QualType(T, 0);
+
+ return Ctx.getVariableArrayType(elementType, T->getSizeExpr(),
+ T->getSizeModifier(),
+ T->getIndexTypeCVRQualifiers(),
+ T->getBracketsRange());
+ }
+
+ QualType VisitIncompleteArrayType(const IncompleteArrayType *T) {
+ QualType elementType = recurse(T->getElementType());
+ if (elementType.isNull())
+ return QualType();
+
+ if (elementType.getAsOpaquePtr() == T->getElementType().getAsOpaquePtr())
+ return QualType(T, 0);
+
+ return Ctx.getIncompleteArrayType(elementType, T->getSizeModifier(),
+ T->getIndexTypeCVRQualifiers());
+ }
+
+ QualType VisitVectorType(const VectorType *T) {
+ QualType elementType = recurse(T->getElementType());
+ if (elementType.isNull())
+ return QualType();
+
+ if (elementType.getAsOpaquePtr() == T->getElementType().getAsOpaquePtr())
+ return QualType(T, 0);
+
+ return Ctx.getVectorType(elementType, T->getNumElements(),
+ T->getVectorKind());
+ }
+
+ QualType VisitExtVectorType(const ExtVectorType *T) {
+ QualType elementType = recurse(T->getElementType());
+ if (elementType.isNull())
+ return QualType();
+
+ if (elementType.getAsOpaquePtr() == T->getElementType().getAsOpaquePtr())
+ return QualType(T, 0);
+
+ return Ctx.getExtVectorType(elementType, T->getNumElements());
+ }
+
+ QualType VisitFunctionNoProtoType(const FunctionNoProtoType *T) {
+ QualType returnType = recurse(T->getReturnType());
+ if (returnType.isNull())
+ return QualType();
+
+ if (returnType.getAsOpaquePtr() == T->getReturnType().getAsOpaquePtr())
+ return QualType(T, 0);
+
+ return Ctx.getFunctionNoProtoType(returnType, T->getExtInfo());
+ }
+
+ QualType VisitFunctionProtoType(const FunctionProtoType *T) {
+ QualType returnType = recurse(T->getReturnType());
+ if (returnType.isNull())
+ return QualType();
+
+ // Transform parameter types.
+ SmallVector<QualType, 4> paramTypes;
+ bool paramChanged = false;
+ for (auto paramType : T->getParamTypes()) {
+ QualType newParamType = recurse(paramType);
+ if (newParamType.isNull())
+ return QualType();
+
+ if (newParamType.getAsOpaquePtr() != paramType.getAsOpaquePtr())
+ paramChanged = true;
+
+ paramTypes.push_back(newParamType);
+ }
+
+ // Transform extended info.
+ FunctionProtoType::ExtProtoInfo info = T->getExtProtoInfo();
+ bool exceptionChanged = false;
+ if (info.ExceptionSpec.Type == EST_Dynamic) {
+ SmallVector<QualType, 4> exceptionTypes;
+ for (auto exceptionType : info.ExceptionSpec.Exceptions) {
+ QualType newExceptionType = recurse(exceptionType);
+ if (newExceptionType.isNull())
+ return QualType();
+
+ if (newExceptionType.getAsOpaquePtr()
+ != exceptionType.getAsOpaquePtr())
+ exceptionChanged = true;
+
+ exceptionTypes.push_back(newExceptionType);
+ }
+
+ if (exceptionChanged) {
+ info.ExceptionSpec.Exceptions =
+ llvm::makeArrayRef(exceptionTypes).copy(Ctx);
+ }
+ }
+
+ if (returnType.getAsOpaquePtr() == T->getReturnType().getAsOpaquePtr() &&
+ !paramChanged && !exceptionChanged)
+ return QualType(T, 0);
+
+ return Ctx.getFunctionType(returnType, paramTypes, info);
+ }
+
+ QualType VisitParenType(const ParenType *T) {
+ QualType innerType = recurse(T->getInnerType());
+ if (innerType.isNull())
+ return QualType();
+
+ if (innerType.getAsOpaquePtr() == T->getInnerType().getAsOpaquePtr())
+ return QualType(T, 0);
+
+ return Ctx.getParenType(innerType);
+ }
+
+ TRIVIAL_TYPE_CLASS(Typedef)
+
+ QualType VisitAdjustedType(const AdjustedType *T) {
+ QualType originalType = recurse(T->getOriginalType());
+ if (originalType.isNull())
+ return QualType();
+
+ QualType adjustedType = recurse(T->getAdjustedType());
+ if (adjustedType.isNull())
+ return QualType();
+
+ if (originalType.getAsOpaquePtr()
+ == T->getOriginalType().getAsOpaquePtr() &&
+ adjustedType.getAsOpaquePtr() == T->getAdjustedType().getAsOpaquePtr())
+ return QualType(T, 0);
+
+ return Ctx.getAdjustedType(originalType, adjustedType);
+ }
+
+ QualType VisitDecayedType(const DecayedType *T) {
+ QualType originalType = recurse(T->getOriginalType());
+ if (originalType.isNull())
+ return QualType();
+
+ if (originalType.getAsOpaquePtr()
+ == T->getOriginalType().getAsOpaquePtr())
+ return QualType(T, 0);
+
+ return Ctx.getDecayedType(originalType);
+ }
+
+ TRIVIAL_TYPE_CLASS(TypeOfExpr)
+ TRIVIAL_TYPE_CLASS(TypeOf)
+ TRIVIAL_TYPE_CLASS(Decltype)
+ TRIVIAL_TYPE_CLASS(UnaryTransform)
+ TRIVIAL_TYPE_CLASS(Record)
+ TRIVIAL_TYPE_CLASS(Enum)
+
+ // FIXME: Non-trivial to implement, but important for C++
+ TRIVIAL_TYPE_CLASS(Elaborated)
+
+ QualType VisitAttributedType(const AttributedType *T) {
+ QualType modifiedType = recurse(T->getModifiedType());
+ if (modifiedType.isNull())
+ return QualType();
+
+ QualType equivalentType = recurse(T->getEquivalentType());
+ if (equivalentType.isNull())
+ return QualType();
+
+ if (modifiedType.getAsOpaquePtr()
+ == T->getModifiedType().getAsOpaquePtr() &&
+ equivalentType.getAsOpaquePtr()
+ == T->getEquivalentType().getAsOpaquePtr())
+ return QualType(T, 0);
+
+ return Ctx.getAttributedType(T->getAttrKind(), modifiedType,
+ equivalentType);
+ }
+
+ QualType VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *T) {
+ QualType replacementType = recurse(T->getReplacementType());
+ if (replacementType.isNull())
+ return QualType();
+
+ if (replacementType.getAsOpaquePtr()
+ == T->getReplacementType().getAsOpaquePtr())
+ return QualType(T, 0);
+
+ return Ctx.getSubstTemplateTypeParmType(T->getReplacedParameter(),
+ replacementType);
+ }
+
+ // FIXME: Non-trivial to implement, but important for C++
+ TRIVIAL_TYPE_CLASS(TemplateSpecialization)
+
+ QualType VisitAutoType(const AutoType *T) {
+ if (!T->isDeduced())
+ return QualType(T, 0);
+
+ QualType deducedType = recurse(T->getDeducedType());
+ if (deducedType.isNull())
+ return QualType();
+
+ if (deducedType.getAsOpaquePtr()
+ == T->getDeducedType().getAsOpaquePtr())
+ return QualType(T, 0);
+
+ return Ctx.getAutoType(deducedType, T->getKeyword(),
+ T->isDependentType());
+ }
+
+ // FIXME: Non-trivial to implement, but important for C++
+ TRIVIAL_TYPE_CLASS(PackExpansion)
+
+ QualType VisitObjCObjectType(const ObjCObjectType *T) {
+ QualType baseType = recurse(T->getBaseType());
+ if (baseType.isNull())
+ return QualType();
+
+ // Transform type arguments.
+ bool typeArgChanged = false;
+ SmallVector<QualType, 4> typeArgs;
+ for (auto typeArg : T->getTypeArgsAsWritten()) {
+ QualType newTypeArg = recurse(typeArg);
+ if (newTypeArg.isNull())
+ return QualType();
+
+ if (newTypeArg.getAsOpaquePtr() != typeArg.getAsOpaquePtr())
+ typeArgChanged = true;
+
+ typeArgs.push_back(newTypeArg);
+ }
+
+ if (baseType.getAsOpaquePtr() == T->getBaseType().getAsOpaquePtr() &&
+ !typeArgChanged)
+ return QualType(T, 0);
+
+ return Ctx.getObjCObjectType(baseType, typeArgs,
+ llvm::makeArrayRef(T->qual_begin(),
+ T->getNumProtocols()),
+ T->isKindOfTypeAsWritten());
+ }
+
+ TRIVIAL_TYPE_CLASS(ObjCInterface)
+
+ QualType VisitObjCObjectPointerType(const ObjCObjectPointerType *T) {
+ QualType pointeeType = recurse(T->getPointeeType());
+ if (pointeeType.isNull())
+ return QualType();
+
+ if (pointeeType.getAsOpaquePtr()
+ == T->getPointeeType().getAsOpaquePtr())
+ return QualType(T, 0);
+
+ return Ctx.getObjCObjectPointerType(pointeeType);
+ }
+
+ QualType VisitAtomicType(const AtomicType *T) {
+ QualType valueType = recurse(T->getValueType());
+ if (valueType.isNull())
+ return QualType();
+
+ if (valueType.getAsOpaquePtr()
+ == T->getValueType().getAsOpaquePtr())
+ return QualType(T, 0);
+
+ return Ctx.getAtomicType(valueType);
+ }
+
+#undef TRIVIAL_TYPE_CLASS
+};
+
+/// Perform a simple type transformation that does not change the
+/// semantics of the type.
+template<typename F>
+QualType simpleTransform(ASTContext &ctx, QualType type, F &&f) {
+ // Transform the type. If it changed, return the transformed result.
+ QualType transformed = f(type);
+ if (transformed.getAsOpaquePtr() != type.getAsOpaquePtr())
+ return transformed;
+
+ // Split out the qualifiers from the type.
+ SplitQualType splitType = type.split();
+
+ // Visit the type itself.
+ SimpleTransformVisitor<F> visitor(ctx, std::move(f));
+ QualType result = visitor.Visit(splitType.Ty);
+ if (result.isNull())
+ return result;
+
+ // Reconstruct the transformed type by applying the local qualifiers
+ // from the split type.
+ return ctx.getQualifiedType(result, splitType.Quals);
+}
+
+} // end anonymous namespace
+
+/// Substitute the given type arguments for Objective-C type
+/// parameters within the given type, recursively.
+QualType QualType::substObjCTypeArgs(
+ ASTContext &ctx,
+ ArrayRef<QualType> typeArgs,
+ ObjCSubstitutionContext context) const {
+ return simpleTransform(ctx, *this,
+ [&](QualType type) -> QualType {
+ SplitQualType splitType = type.split();
+
+ // Replace an Objective-C type parameter reference with the corresponding
+ // type argument.
+ if (const auto *typedefTy = dyn_cast<TypedefType>(splitType.Ty)) {
+ if (auto *typeParam = dyn_cast<ObjCTypeParamDecl>(typedefTy->getDecl())) {
+ // If we have type arguments, use them.
+ if (!typeArgs.empty()) {
+ // FIXME: Introduce SubstObjCTypeParamType ?
+ QualType argType = typeArgs[typeParam->getIndex()];
+ return ctx.getQualifiedType(argType, splitType.Quals);
+ }
+
+ switch (context) {
+ case ObjCSubstitutionContext::Ordinary:
+ case ObjCSubstitutionContext::Parameter:
+ case ObjCSubstitutionContext::Superclass:
+ // Substitute the bound.
+ return ctx.getQualifiedType(typeParam->getUnderlyingType(),
+ splitType.Quals);
+
+ case ObjCSubstitutionContext::Result:
+ case ObjCSubstitutionContext::Property: {
+ // Substitute the __kindof form of the underlying type.
+ const auto *objPtr = typeParam->getUnderlyingType()
+ ->castAs<ObjCObjectPointerType>();
+
+ // __kindof types, id, and Class don't need an additional
+ // __kindof.
+ if (objPtr->isKindOfType() || objPtr->isObjCIdOrClassType())
+ return ctx.getQualifiedType(typeParam->getUnderlyingType(),
+ splitType.Quals);
+
+ // Add __kindof.
+ const auto *obj = objPtr->getObjectType();
+ QualType resultTy = ctx.getObjCObjectType(obj->getBaseType(),
+ obj->getTypeArgsAsWritten(),
+ obj->getProtocols(),
+ /*isKindOf=*/true);
+
+ // Rebuild object pointer type.
+ resultTy = ctx.getObjCObjectPointerType(resultTy);
+ return ctx.getQualifiedType(resultTy, splitType.Quals);
+ }
+ }
+ }
+ }
+
+ // If we have a function type, update the context appropriately.
+ if (const auto *funcType = dyn_cast<FunctionType>(splitType.Ty)) {
+ // Substitute result type.
+ QualType returnType = funcType->getReturnType().substObjCTypeArgs(
+ ctx,
+ typeArgs,
+ ObjCSubstitutionContext::Result);
+ if (returnType.isNull())
+ return QualType();
+
+ // Handle non-prototyped functions, which only substitute into the result
+ // type.
+ if (isa<FunctionNoProtoType>(funcType)) {
+ // If the return type was unchanged, do nothing.
+ if (returnType.getAsOpaquePtr()
+ == funcType->getReturnType().getAsOpaquePtr())
+ return type;
+
+ // Otherwise, build a new type.
+ return ctx.getFunctionNoProtoType(returnType, funcType->getExtInfo());
+ }
+
+ const auto *funcProtoType = cast<FunctionProtoType>(funcType);
+
+ // Transform parameter types.
+ SmallVector<QualType, 4> paramTypes;
+ bool paramChanged = false;
+ for (auto paramType : funcProtoType->getParamTypes()) {
+ QualType newParamType = paramType.substObjCTypeArgs(
+ ctx,
+ typeArgs,
+ ObjCSubstitutionContext::Parameter);
+ if (newParamType.isNull())
+ return QualType();
+
+ if (newParamType.getAsOpaquePtr() != paramType.getAsOpaquePtr())
+ paramChanged = true;
+
+ paramTypes.push_back(newParamType);
+ }
+
+ // Transform extended info.
+ FunctionProtoType::ExtProtoInfo info = funcProtoType->getExtProtoInfo();
+ bool exceptionChanged = false;
+ if (info.ExceptionSpec.Type == EST_Dynamic) {
+ SmallVector<QualType, 4> exceptionTypes;
+ for (auto exceptionType : info.ExceptionSpec.Exceptions) {
+ QualType newExceptionType = exceptionType.substObjCTypeArgs(
+ ctx,
+ typeArgs,
+ ObjCSubstitutionContext::Ordinary);
+ if (newExceptionType.isNull())
+ return QualType();
+
+ if (newExceptionType.getAsOpaquePtr()
+ != exceptionType.getAsOpaquePtr())
+ exceptionChanged = true;
+
+ exceptionTypes.push_back(newExceptionType);
+ }
+
+ if (exceptionChanged) {
+ info.ExceptionSpec.Exceptions =
+ llvm::makeArrayRef(exceptionTypes).copy(ctx);
+ }
+ }
+
+ if (returnType.getAsOpaquePtr()
+ == funcProtoType->getReturnType().getAsOpaquePtr() &&
+ !paramChanged && !exceptionChanged)
+ return type;
+
+ return ctx.getFunctionType(returnType, paramTypes, info);
+ }
+
+ // Substitute into the type arguments of a specialized Objective-C object
+ // type.
+ if (const auto *objcObjectType = dyn_cast<ObjCObjectType>(splitType.Ty)) {
+ if (objcObjectType->isSpecializedAsWritten()) {
+ SmallVector<QualType, 4> newTypeArgs;
+ bool anyChanged = false;
+ for (auto typeArg : objcObjectType->getTypeArgsAsWritten()) {
+ QualType newTypeArg = typeArg.substObjCTypeArgs(
+ ctx, typeArgs,
+ ObjCSubstitutionContext::Ordinary);
+ if (newTypeArg.isNull())
+ return QualType();
+
+ if (newTypeArg.getAsOpaquePtr() != typeArg.getAsOpaquePtr()) {
+ // If we're substituting based on an unspecialized context type,
+ // produce an unspecialized type.
+ ArrayRef<ObjCProtocolDecl *> protocols(
+ objcObjectType->qual_begin(),
+ objcObjectType->getNumProtocols());
+ if (typeArgs.empty() &&
+ context != ObjCSubstitutionContext::Superclass) {
+ return ctx.getObjCObjectType(
+ objcObjectType->getBaseType(), { },
+ protocols,
+ objcObjectType->isKindOfTypeAsWritten());
+ }
+
+ anyChanged = true;
+ }
+
+ newTypeArgs.push_back(newTypeArg);
+ }
+
+ if (anyChanged) {
+ ArrayRef<ObjCProtocolDecl *> protocols(
+ objcObjectType->qual_begin(),
+ objcObjectType->getNumProtocols());
+ return ctx.getObjCObjectType(objcObjectType->getBaseType(),
+ newTypeArgs, protocols,
+ objcObjectType->isKindOfTypeAsWritten());
+ }
+ }
+
+ return type;
+ }
+
+ return type;
+ });
+}
+
+QualType QualType::substObjCMemberType(QualType objectType,
+ const DeclContext *dc,
+ ObjCSubstitutionContext context) const {
+ if (auto subs = objectType->getObjCSubstitutions(dc))
+ return substObjCTypeArgs(dc->getParentASTContext(), *subs, context);
+
+ return *this;
+}
+
+QualType QualType::stripObjCKindOfType(const ASTContext &constCtx) const {
+ // FIXME: Because ASTContext::getAttributedType() is non-const.
+ auto &ctx = const_cast<ASTContext &>(constCtx);
+ return simpleTransform(ctx, *this,
+ [&](QualType type) -> QualType {
+ SplitQualType splitType = type.split();
+ if (auto *objType = splitType.Ty->getAs<ObjCObjectType>()) {
+ if (!objType->isKindOfType())
+ return type;
+
+ QualType baseType
+ = objType->getBaseType().stripObjCKindOfType(ctx);
+ return ctx.getQualifiedType(
+ ctx.getObjCObjectType(baseType,
+ objType->getTypeArgsAsWritten(),
+ objType->getProtocols(),
+ /*isKindOf=*/false),
+ splitType.Quals);
+ }
+
+ return type;
+ });
+}
+
+Optional<ArrayRef<QualType>> Type::getObjCSubstitutions(
+ const DeclContext *dc) const {
+ // Look through method scopes.
+ if (auto method = dyn_cast<ObjCMethodDecl>(dc))
+ dc = method->getDeclContext();
+
+ // Find the class or category in which the type we're substituting
+ // was declared.
+ const ObjCInterfaceDecl *dcClassDecl = dyn_cast<ObjCInterfaceDecl>(dc);
+ const ObjCCategoryDecl *dcCategoryDecl = nullptr;
+ ObjCTypeParamList *dcTypeParams = nullptr;
+ if (dcClassDecl) {
+ // If the class does not have any type parameters, there's no
+ // substitution to do.
+ dcTypeParams = dcClassDecl->getTypeParamList();
+ if (!dcTypeParams)
+ return None;
+ } else {
+ // If we are in neither a class nor a category, there's no
+ // substitution to perform.
+ dcCategoryDecl = dyn_cast<ObjCCategoryDecl>(dc);
+ if (!dcCategoryDecl)
+ return None;
+
+ // If the category does not have any type parameters, there's no
+ // substitution to do.
+ dcTypeParams = dcCategoryDecl->getTypeParamList();
+ if (!dcTypeParams)
+ return None;
+
+ dcClassDecl = dcCategoryDecl->getClassInterface();
+ if (!dcClassDecl)
+ return None;
+ }
+ assert(dcTypeParams && "No substitutions to perform");
+ assert(dcClassDecl && "No class context");
+
+ // Find the underlying object type.
+ const ObjCObjectType *objectType;
+ if (const auto *objectPointerType = getAs<ObjCObjectPointerType>()) {
+ objectType = objectPointerType->getObjectType();
+ } else if (getAs<BlockPointerType>()) {
+ ASTContext &ctx = dc->getParentASTContext();
+ objectType = ctx.getObjCObjectType(ctx.ObjCBuiltinIdTy, { }, { })
+ ->castAs<ObjCObjectType>();;
+ } else {
+ objectType = getAs<ObjCObjectType>();
+ }
+
+ /// Extract the class from the receiver object type.
+ ObjCInterfaceDecl *curClassDecl = objectType ? objectType->getInterface()
+ : nullptr;
+ if (!curClassDecl) {
+ // If we don't have a context type (e.g., this is "id" or some
+ // variant thereof), substitute the bounds.
+ return llvm::ArrayRef<QualType>();
+ }
+
+ // Follow the superclass chain until we've mapped the receiver type
+ // to the same class as the context.
+ while (curClassDecl != dcClassDecl) {
+ // Map to the superclass type.
+ QualType superType = objectType->getSuperClassType();
+ if (superType.isNull()) {
+ objectType = nullptr;
+ break;
+ }
+
+ objectType = superType->castAs<ObjCObjectType>();
+ curClassDecl = objectType->getInterface();
+ }
+
+ // If we don't have a receiver type, or the receiver type does not
+ // have type arguments, substitute in the defaults.
+ if (!objectType || objectType->isUnspecialized()) {
+ return llvm::ArrayRef<QualType>();
+ }
+
+ // The receiver type has the type arguments we want.
+ return objectType->getTypeArgs();
+}
+
+bool Type::acceptsObjCTypeParams() const {
+ if (auto *IfaceT = getAsObjCInterfaceType()) {
+ if (auto *ID = IfaceT->getInterface()) {
+ if (ID->getTypeParamList())
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void ObjCObjectType::computeSuperClassTypeSlow() const {
+ // Retrieve the class declaration for this type. If there isn't one
+ // (e.g., this is some variant of "id" or "Class"), then there is no
+ // superclass type.
+ ObjCInterfaceDecl *classDecl = getInterface();
+ if (!classDecl) {
+ CachedSuperClassType.setInt(true);
+ return;
+ }
+
+ // Extract the superclass type.
+ const ObjCObjectType *superClassObjTy = classDecl->getSuperClassType();
+ if (!superClassObjTy) {
+ CachedSuperClassType.setInt(true);
+ return;
+ }
+
+ ObjCInterfaceDecl *superClassDecl = superClassObjTy->getInterface();
+ if (!superClassDecl) {
+ CachedSuperClassType.setInt(true);
+ return;
+ }
+
+ // If the superclass doesn't have type parameters, then there is no
+ // substitution to perform.
+ QualType superClassType(superClassObjTy, 0);
+ ObjCTypeParamList *superClassTypeParams = superClassDecl->getTypeParamList();
+ if (!superClassTypeParams) {
+ CachedSuperClassType.setPointerAndInt(
+ superClassType->castAs<ObjCObjectType>(), true);
+ return;
+ }
+
+ // If the superclass reference is unspecialized, return it.
+ if (superClassObjTy->isUnspecialized()) {
+ CachedSuperClassType.setPointerAndInt(superClassObjTy, true);
+ return;
+ }
+
+ // If the subclass is not parameterized, there aren't any type
+ // parameters in the superclass reference to substitute.
+ ObjCTypeParamList *typeParams = classDecl->getTypeParamList();
+ if (!typeParams) {
+ CachedSuperClassType.setPointerAndInt(
+ superClassType->castAs<ObjCObjectType>(), true);
+ return;
+ }
+
+ // If the subclass type isn't specialized, return the unspecialized
+ // superclass.
+ if (isUnspecialized()) {
+ QualType unspecializedSuper
+ = classDecl->getASTContext().getObjCInterfaceType(
+ superClassObjTy->getInterface());
+ CachedSuperClassType.setPointerAndInt(
+ unspecializedSuper->castAs<ObjCObjectType>(),
+ true);
+ return;
+ }
+
+ // Substitute the provided type arguments into the superclass type.
+ ArrayRef<QualType> typeArgs = getTypeArgs();
+ assert(typeArgs.size() == typeParams->size());
+ CachedSuperClassType.setPointerAndInt(
+ superClassType.substObjCTypeArgs(classDecl->getASTContext(), typeArgs,
+ ObjCSubstitutionContext::Superclass)
+ ->castAs<ObjCObjectType>(),
+ true);
+}
+
+const ObjCInterfaceType *ObjCObjectPointerType::getInterfaceType() const {
+ if (auto interfaceDecl = getObjectType()->getInterface()) {
+ return interfaceDecl->getASTContext().getObjCInterfaceType(interfaceDecl)
+ ->castAs<ObjCInterfaceType>();
+ }
+
+ return nullptr;
+}
+
+QualType ObjCObjectPointerType::getSuperClassType() const {
+ QualType superObjectType = getObjectType()->getSuperClassType();
+ if (superObjectType.isNull())
+ return superObjectType;
+
+ ASTContext &ctx = getInterfaceDecl()->getASTContext();
+ return ctx.getObjCObjectPointerType(superObjectType);
+}
+
+const ObjCObjectType *Type::getAsObjCQualifiedInterfaceType() const {
+ // There is no sugar for ObjCObjectType's, just return the canonical
+ // type pointer if it is the right class. There is no typedef information to
+ // return and these cannot be Address-space qualified.
+ if (const ObjCObjectType *T = getAs<ObjCObjectType>())
+ if (T->getNumProtocols() && T->getInterface())
+ return T;
+ return nullptr;
+}
+
+bool Type::isObjCQualifiedInterfaceType() const {
+ return getAsObjCQualifiedInterfaceType() != nullptr;
+}
+
+const ObjCObjectPointerType *Type::getAsObjCQualifiedIdType() const {
+ // There is no sugar for ObjCQualifiedIdType's, just return the canonical
+ // type pointer if it is the right class.
+ if (const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>()) {
+ if (OPT->isObjCQualifiedIdType())
+ return OPT;
+ }
+ return nullptr;
+}
+
+const ObjCObjectPointerType *Type::getAsObjCQualifiedClassType() const {
+ // There is no sugar for ObjCQualifiedClassType's, just return the canonical
+ // type pointer if it is the right class.
+ if (const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>()) {
+ if (OPT->isObjCQualifiedClassType())
+ return OPT;
+ }
+ return nullptr;
+}
+
+const ObjCObjectType *Type::getAsObjCInterfaceType() const {
+ if (const ObjCObjectType *OT = getAs<ObjCObjectType>()) {
+ if (OT->getInterface())
+ return OT;
+ }
+ return nullptr;
+}
+const ObjCObjectPointerType *Type::getAsObjCInterfacePointerType() const {
+ if (const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>()) {
+ if (OPT->getInterfaceType())
+ return OPT;
+ }
+ return nullptr;
+}
+
+const CXXRecordDecl *Type::getPointeeCXXRecordDecl() const {
+ QualType PointeeType;
+ if (const PointerType *PT = getAs<PointerType>())
+ PointeeType = PT->getPointeeType();
+ else if (const ReferenceType *RT = getAs<ReferenceType>())
+ PointeeType = RT->getPointeeType();
+ else
+ return nullptr;
+
+ if (const RecordType *RT = PointeeType->getAs<RecordType>())
+ return dyn_cast<CXXRecordDecl>(RT->getDecl());
+
+ return nullptr;
+}
+
+CXXRecordDecl *Type::getAsCXXRecordDecl() const {
+ return dyn_cast_or_null<CXXRecordDecl>(getAsTagDecl());
+}
+
+TagDecl *Type::getAsTagDecl() const {
+ if (const auto *TT = getAs<TagType>())
+ return cast<TagDecl>(TT->getDecl());
+ if (const auto *Injected = getAs<InjectedClassNameType>())
+ return Injected->getDecl();
+
+ return nullptr;
+}
+
+namespace {
+ class GetContainedAutoVisitor :
+ public TypeVisitor<GetContainedAutoVisitor, AutoType*> {
+ public:
+ using TypeVisitor<GetContainedAutoVisitor, AutoType*>::Visit;
+ AutoType *Visit(QualType T) {
+ if (T.isNull())
+ return nullptr;
+ return Visit(T.getTypePtr());
+ }
+
+ // The 'auto' type itself.
+ AutoType *VisitAutoType(const AutoType *AT) {
+ return const_cast<AutoType*>(AT);
+ }
+
+ // Only these types can contain the desired 'auto' type.
+ AutoType *VisitPointerType(const PointerType *T) {
+ return Visit(T->getPointeeType());
+ }
+ AutoType *VisitBlockPointerType(const BlockPointerType *T) {
+ return Visit(T->getPointeeType());
+ }
+ AutoType *VisitReferenceType(const ReferenceType *T) {
+ return Visit(T->getPointeeTypeAsWritten());
+ }
+ AutoType *VisitMemberPointerType(const MemberPointerType *T) {
+ return Visit(T->getPointeeType());
+ }
+ AutoType *VisitArrayType(const ArrayType *T) {
+ return Visit(T->getElementType());
+ }
+ AutoType *VisitDependentSizedExtVectorType(
+ const DependentSizedExtVectorType *T) {
+ return Visit(T->getElementType());
+ }
+ AutoType *VisitVectorType(const VectorType *T) {
+ return Visit(T->getElementType());
+ }
+ AutoType *VisitFunctionType(const FunctionType *T) {
+ return Visit(T->getReturnType());
+ }
+ AutoType *VisitParenType(const ParenType *T) {
+ return Visit(T->getInnerType());
+ }
+ AutoType *VisitAttributedType(const AttributedType *T) {
+ return Visit(T->getModifiedType());
+ }
+ AutoType *VisitAdjustedType(const AdjustedType *T) {
+ return Visit(T->getOriginalType());
+ }
+ };
+}
+
+AutoType *Type::getContainedAutoType() const {
+ return GetContainedAutoVisitor().Visit(this);
+}
+
+bool Type::hasIntegerRepresentation() const {
+ if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
+ return VT->getElementType()->isIntegerType();
+ else
+ return isIntegerType();
+}
+
+/// \brief Determine whether this type is an integral type.
+///
+/// This routine determines whether the given type is an integral type per
+/// C++ [basic.fundamental]p7. Although the C standard does not define the
+/// term "integral type", it has a similar term "integer type", and in C++
+/// the two terms are equivalent. However, C's "integer type" includes
+/// enumeration types, while C++'s "integer type" does not. The \c ASTContext
+/// parameter is used to determine whether we should be following the C or
+/// C++ rules when determining whether this type is an integral/integer type.
+///
+/// For cases where C permits "an integer type" and C++ permits "an integral
+/// type", use this routine.
+///
+/// For cases where C permits "an integer type" and C++ permits "an integral
+/// or enumeration type", use \c isIntegralOrEnumerationType() instead.
+///
+/// \param Ctx The context in which this type occurs.
+///
+/// \returns true if the type is considered an integral type, false otherwise.
+bool Type::isIntegralType(ASTContext &Ctx) const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::Int128;
+
+ // Complete enum types are integral in C.
+ if (!Ctx.getLangOpts().CPlusPlus)
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
+ return ET->getDecl()->isComplete();
+
+ return false;
+}
+
+
+bool Type::isIntegralOrUnscopedEnumerationType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::Int128;
+
+ // Check for a complete enum type; incomplete enum types are not properly an
+ // enumeration type in the sense required here.
+ // C++0x: However, if the underlying type of the enum is fixed, it is
+ // considered complete.
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
+ return ET->getDecl()->isComplete() && !ET->getDecl()->isScoped();
+
+ return false;
+}
+
+
+
+bool Type::isCharType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() == BuiltinType::Char_U ||
+ BT->getKind() == BuiltinType::UChar ||
+ BT->getKind() == BuiltinType::Char_S ||
+ BT->getKind() == BuiltinType::SChar;
+ return false;
+}
+
+bool Type::isWideCharType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() == BuiltinType::WChar_S ||
+ BT->getKind() == BuiltinType::WChar_U;
+ return false;
+}
+
+bool Type::isChar16Type() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() == BuiltinType::Char16;
+ return false;
+}
+
+bool Type::isChar32Type() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() == BuiltinType::Char32;
+ return false;
+}
+
+/// \brief Determine whether this type is any of the built-in character
+/// types.
+bool Type::isAnyCharacterType() const {
+ const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType);
+ if (!BT) return false;
+ switch (BT->getKind()) {
+ default: return false;
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ case BuiltinType::WChar_U:
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ case BuiltinType::WChar_S:
+ return true;
+ }
+}
+
+/// isSignedIntegerType - Return true if this is an integer type that is
+/// signed, according to C99 6.2.5p4 [char, signed char, short, int, long..],
+/// an enum decl which has a signed representation
+bool Type::isSignedIntegerType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) {
+ return BT->getKind() >= BuiltinType::Char_S &&
+ BT->getKind() <= BuiltinType::Int128;
+ }
+
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
+ // Incomplete enum types are not treated as integer types.
+ // FIXME: In C++, enum types are never integer types.
+ if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped())
+ return ET->getDecl()->getIntegerType()->isSignedIntegerType();
+ }
+
+ return false;
+}
+
+bool Type::isSignedIntegerOrEnumerationType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) {
+ return BT->getKind() >= BuiltinType::Char_S &&
+ BT->getKind() <= BuiltinType::Int128;
+ }
+
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
+ if (ET->getDecl()->isComplete())
+ return ET->getDecl()->getIntegerType()->isSignedIntegerType();
+ }
+
+ return false;
+}
+
+bool Type::hasSignedIntegerRepresentation() const {
+ if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
+ return VT->getElementType()->isSignedIntegerOrEnumerationType();
+ else
+ return isSignedIntegerOrEnumerationType();
+}
+
+/// isUnsignedIntegerType - Return true if this is an integer type that is
+/// unsigned, according to C99 6.2.5p6 [which returns true for _Bool], an enum
+/// decl which has an unsigned representation
+bool Type::isUnsignedIntegerType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) {
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::UInt128;
+ }
+
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
+ // Incomplete enum types are not treated as integer types.
+ // FIXME: In C++, enum types are never integer types.
+ if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped())
+ return ET->getDecl()->getIntegerType()->isUnsignedIntegerType();
+ }
+
+ return false;
+}
+
+bool Type::isUnsignedIntegerOrEnumerationType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) {
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::UInt128;
+ }
+
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
+ if (ET->getDecl()->isComplete())
+ return ET->getDecl()->getIntegerType()->isUnsignedIntegerType();
+ }
+
+ return false;
+}
+
+bool Type::hasUnsignedIntegerRepresentation() const {
+ if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
+ return VT->getElementType()->isUnsignedIntegerOrEnumerationType();
+ else
+ return isUnsignedIntegerOrEnumerationType();
+}
+
+bool Type::isFloatingType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() >= BuiltinType::Half &&
+ BT->getKind() <= BuiltinType::LongDouble;
+ if (const ComplexType *CT = dyn_cast<ComplexType>(CanonicalType))
+ return CT->getElementType()->isFloatingType();
+ return false;
+}
+
+bool Type::hasFloatingRepresentation() const {
+ if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
+ return VT->getElementType()->isFloatingType();
+ else
+ return isFloatingType();
+}
+
+bool Type::isRealFloatingType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->isFloatingPoint();
+ return false;
+}
+
+bool Type::isRealType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::LongDouble;
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
+ return ET->getDecl()->isComplete() && !ET->getDecl()->isScoped();
+ return false;
+}
+
+bool Type::isArithmeticType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::LongDouble;
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
+ // GCC allows forward declaration of enum types (forbid by C99 6.7.2.3p2).
+ // If a body isn't seen by the time we get here, return false.
+ //
+ // C++0x: Enumerations are not arithmetic types. For now, just return
+ // false for scoped enumerations since that will disable any
+ // unwanted implicit conversions.
+ return !ET->getDecl()->isScoped() && ET->getDecl()->isComplete();
+ return isa<ComplexType>(CanonicalType);
+}
+
+Type::ScalarTypeKind Type::getScalarTypeKind() const {
+ assert(isScalarType());
+
+ const Type *T = CanonicalType.getTypePtr();
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(T)) {
+ if (BT->getKind() == BuiltinType::Bool) return STK_Bool;
+ if (BT->getKind() == BuiltinType::NullPtr) return STK_CPointer;
+ if (BT->isInteger()) return STK_Integral;
+ if (BT->isFloatingPoint()) return STK_Floating;
+ llvm_unreachable("unknown scalar builtin type");
+ } else if (isa<PointerType>(T)) {
+ return STK_CPointer;
+ } else if (isa<BlockPointerType>(T)) {
+ return STK_BlockPointer;
+ } else if (isa<ObjCObjectPointerType>(T)) {
+ return STK_ObjCObjectPointer;
+ } else if (isa<MemberPointerType>(T)) {
+ return STK_MemberPointer;
+ } else if (isa<EnumType>(T)) {
+ assert(cast<EnumType>(T)->getDecl()->isComplete());
+ return STK_Integral;
+ } else if (const ComplexType *CT = dyn_cast<ComplexType>(T)) {
+ if (CT->getElementType()->isRealFloatingType())
+ return STK_FloatingComplex;
+ return STK_IntegralComplex;
+ }
+
+ llvm_unreachable("unknown scalar type");
+}
+
+/// \brief Determines whether the type is a C++ aggregate type or C
+/// aggregate or union type.
+///
+/// An aggregate type is an array or a class type (struct, union, or
+/// class) that has no user-declared constructors, no private or
+/// protected non-static data members, no base classes, and no virtual
+/// functions (C++ [dcl.init.aggr]p1). The notion of an aggregate type
+/// subsumes the notion of C aggregates (C99 6.2.5p21) because it also
+/// includes union types.
+bool Type::isAggregateType() const {
+ if (const RecordType *Record = dyn_cast<RecordType>(CanonicalType)) {
+ if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(Record->getDecl()))
+ return ClassDecl->isAggregate();
+
+ return true;
+ }
+
+ return isa<ArrayType>(CanonicalType);
+}
+
+/// isConstantSizeType - Return true if this is not a variable sized type,
+/// according to the rules of C99 6.7.5p3. It is not legal to call this on
+/// incomplete types or dependent types.
+bool Type::isConstantSizeType() const {
+ assert(!isIncompleteType() && "This doesn't make sense for incomplete types");
+ assert(!isDependentType() && "This doesn't make sense for dependent types");
+ // The VAT must have a size, as it is known to be complete.
+ return !isa<VariableArrayType>(CanonicalType);
+}
+
+/// isIncompleteType - Return true if this is an incomplete type (C99 6.2.5p1)
+/// - a type that can describe objects, but which lacks information needed to
+/// determine its size.
+bool Type::isIncompleteType(NamedDecl **Def) const {
+ if (Def)
+ *Def = nullptr;
+
+ switch (CanonicalType->getTypeClass()) {
+ default: return false;
+ case Builtin:
+ // Void is the only incomplete builtin type. Per C99 6.2.5p19, it can never
+ // be completed.
+ return isVoidType();
+ case Enum: {
+ EnumDecl *EnumD = cast<EnumType>(CanonicalType)->getDecl();
+ if (Def)
+ *Def = EnumD;
+
+ // An enumeration with fixed underlying type is complete (C++0x 7.2p3).
+ if (EnumD->isFixed())
+ return false;
+
+ return !EnumD->isCompleteDefinition();
+ }
+ case Record: {
+ // A tagged type (struct/union/enum/class) is incomplete if the decl is a
+ // forward declaration, but not a full definition (C99 6.2.5p22).
+ RecordDecl *Rec = cast<RecordType>(CanonicalType)->getDecl();
+ if (Def)
+ *Def = Rec;
+ return !Rec->isCompleteDefinition();
+ }
+ case ConstantArray:
+ // An array is incomplete if its element type is incomplete
+ // (C++ [dcl.array]p1).
+ // We don't handle variable arrays (they're not allowed in C++) or
+ // dependent-sized arrays (dependent types are never treated as incomplete).
+ return cast<ArrayType>(CanonicalType)->getElementType()
+ ->isIncompleteType(Def);
+ case IncompleteArray:
+ // An array of unknown size is an incomplete type (C99 6.2.5p22).
+ return true;
+ case MemberPointer: {
+ // Member pointers in the MS ABI have special behavior in
+ // RequireCompleteType: they attach a MSInheritanceAttr to the CXXRecordDecl
+ // to indicate which inheritance model to use.
+ auto *MPTy = cast<MemberPointerType>(CanonicalType);
+ const Type *ClassTy = MPTy->getClass();
+ // Member pointers with dependent class types don't get special treatment.
+ if (ClassTy->isDependentType())
+ return false;
+ const CXXRecordDecl *RD = ClassTy->getAsCXXRecordDecl();
+ ASTContext &Context = RD->getASTContext();
+ // Member pointers not in the MS ABI don't get special treatment.
+ if (!Context.getTargetInfo().getCXXABI().isMicrosoft())
+ return false;
+ // The inheritance attribute might only be present on the most recent
+ // CXXRecordDecl, use that one.
+ RD = RD->getMostRecentDecl();
+ // Nothing interesting to do if the inheritance attribute is already set.
+ if (RD->hasAttr<MSInheritanceAttr>())
+ return false;
+ return true;
+ }
+ case ObjCObject:
+ return cast<ObjCObjectType>(CanonicalType)->getBaseType()
+ ->isIncompleteType(Def);
+ case ObjCInterface: {
+ // ObjC interfaces are incomplete if they are @class, not @interface.
+ ObjCInterfaceDecl *Interface
+ = cast<ObjCInterfaceType>(CanonicalType)->getDecl();
+ if (Def)
+ *Def = Interface;
+ return !Interface->hasDefinition();
+ }
+ }
+}
+
+bool QualType::isPODType(ASTContext &Context) const {
+ // C++11 has a more relaxed definition of POD.
+ if (Context.getLangOpts().CPlusPlus11)
+ return isCXX11PODType(Context);
+
+ return isCXX98PODType(Context);
+}
+
+bool QualType::isCXX98PODType(ASTContext &Context) const {
+ // The compiler shouldn't query this for incomplete types, but the user might.
+ // We return false for that case. Except for incomplete arrays of PODs, which
+ // are PODs according to the standard.
+ if (isNull())
+ return 0;
+
+ if ((*this)->isIncompleteArrayType())
+ return Context.getBaseElementType(*this).isCXX98PODType(Context);
+
+ if ((*this)->isIncompleteType())
+ return false;
+
+ if (Context.getLangOpts().ObjCAutoRefCount) {
+ switch (getObjCLifetime()) {
+ case Qualifiers::OCL_ExplicitNone:
+ return true;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Autoreleasing:
+ return false;
+
+ case Qualifiers::OCL_None:
+ break;
+ }
+ }
+
+ QualType CanonicalType = getTypePtr()->CanonicalType;
+ switch (CanonicalType->getTypeClass()) {
+ // Everything not explicitly mentioned is not POD.
+ default: return false;
+ case Type::VariableArray:
+ case Type::ConstantArray:
+ // IncompleteArray is handled above.
+ return Context.getBaseElementType(*this).isCXX98PODType(Context);
+
+ case Type::ObjCObjectPointer:
+ case Type::BlockPointer:
+ case Type::Builtin:
+ case Type::Complex:
+ case Type::Pointer:
+ case Type::MemberPointer:
+ case Type::Vector:
+ case Type::ExtVector:
+ return true;
+
+ case Type::Enum:
+ return true;
+
+ case Type::Record:
+ if (CXXRecordDecl *ClassDecl
+ = dyn_cast<CXXRecordDecl>(cast<RecordType>(CanonicalType)->getDecl()))
+ return ClassDecl->isPOD();
+
+ // C struct/union is POD.
+ return true;
+ }
+}
+
+bool QualType::isTrivialType(ASTContext &Context) const {
+ // The compiler shouldn't query this for incomplete types, but the user might.
+ // We return false for that case. Except for incomplete arrays of PODs, which
+ // are PODs according to the standard.
+ if (isNull())
+ return 0;
+
+ if ((*this)->isArrayType())
+ return Context.getBaseElementType(*this).isTrivialType(Context);
+
+ // Return false for incomplete types after skipping any incomplete array
+ // types which are expressly allowed by the standard and thus our API.
+ if ((*this)->isIncompleteType())
+ return false;
+
+ if (Context.getLangOpts().ObjCAutoRefCount) {
+ switch (getObjCLifetime()) {
+ case Qualifiers::OCL_ExplicitNone:
+ return true;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Autoreleasing:
+ return false;
+
+ case Qualifiers::OCL_None:
+ if ((*this)->isObjCLifetimeType())
+ return false;
+ break;
+ }
+ }
+
+ QualType CanonicalType = getTypePtr()->CanonicalType;
+ if (CanonicalType->isDependentType())
+ return false;
+
+ // C++0x [basic.types]p9:
+ // Scalar types, trivial class types, arrays of such types, and
+ // cv-qualified versions of these types are collectively called trivial
+ // types.
+
+ // As an extension, Clang treats vector types as Scalar types.
+ if (CanonicalType->isScalarType() || CanonicalType->isVectorType())
+ return true;
+ if (const RecordType *RT = CanonicalType->getAs<RecordType>()) {
+ if (const CXXRecordDecl *ClassDecl =
+ dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ // C++11 [class]p6:
+ // A trivial class is a class that has a default constructor,
+ // has no non-trivial default constructors, and is trivially
+ // copyable.
+ return ClassDecl->hasDefaultConstructor() &&
+ !ClassDecl->hasNonTrivialDefaultConstructor() &&
+ ClassDecl->isTriviallyCopyable();
+ }
+
+ return true;
+ }
+
+ // No other types can match.
+ return false;
+}
+
+bool QualType::isTriviallyCopyableType(ASTContext &Context) const {
+ if ((*this)->isArrayType())
+ return Context.getBaseElementType(*this).isTriviallyCopyableType(Context);
+
+ if (Context.getLangOpts().ObjCAutoRefCount) {
+ switch (getObjCLifetime()) {
+ case Qualifiers::OCL_ExplicitNone:
+ return true;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Autoreleasing:
+ return false;
+
+ case Qualifiers::OCL_None:
+ if ((*this)->isObjCLifetimeType())
+ return false;
+ break;
+ }
+ }
+
+ // C++11 [basic.types]p9
+ // Scalar types, trivially copyable class types, arrays of such types, and
+ // non-volatile const-qualified versions of these types are collectively
+ // called trivially copyable types.
+
+ QualType CanonicalType = getCanonicalType();
+ if (CanonicalType->isDependentType())
+ return false;
+
+ if (CanonicalType.isVolatileQualified())
+ return false;
+
+ // Return false for incomplete types after skipping any incomplete array types
+ // which are expressly allowed by the standard and thus our API.
+ if (CanonicalType->isIncompleteType())
+ return false;
+
+ // As an extension, Clang treats vector types as Scalar types.
+ if (CanonicalType->isScalarType() || CanonicalType->isVectorType())
+ return true;
+
+ if (const RecordType *RT = CanonicalType->getAs<RecordType>()) {
+ if (const CXXRecordDecl *ClassDecl =
+ dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (!ClassDecl->isTriviallyCopyable()) return false;
+ }
+
+ return true;
+ }
+
+ // No other types can match.
+ return false;
+}
+
+
+
+bool Type::isLiteralType(const ASTContext &Ctx) const {
+ if (isDependentType())
+ return false;
+
+ // C++1y [basic.types]p10:
+ // A type is a literal type if it is:
+ // -- cv void; or
+ if (Ctx.getLangOpts().CPlusPlus14 && isVoidType())
+ return true;
+
+ // C++11 [basic.types]p10:
+ // A type is a literal type if it is:
+ // [...]
+ // -- an array of literal type other than an array of runtime bound; or
+ if (isVariableArrayType())
+ return false;
+ const Type *BaseTy = getBaseElementTypeUnsafe();
+ assert(BaseTy && "NULL element type");
+
+ // Return false for incomplete types after skipping any incomplete array
+ // types; those are expressly allowed by the standard and thus our API.
+ if (BaseTy->isIncompleteType())
+ return false;
+
+ // C++11 [basic.types]p10:
+ // A type is a literal type if it is:
+ // -- a scalar type; or
+ // As an extension, Clang treats vector types and complex types as
+ // literal types.
+ if (BaseTy->isScalarType() || BaseTy->isVectorType() ||
+ BaseTy->isAnyComplexType())
+ return true;
+ // -- a reference type; or
+ if (BaseTy->isReferenceType())
+ return true;
+ // -- a class type that has all of the following properties:
+ if (const RecordType *RT = BaseTy->getAs<RecordType>()) {
+ // -- a trivial destructor,
+ // -- every constructor call and full-expression in the
+ // brace-or-equal-initializers for non-static data members (if any)
+ // is a constant expression,
+ // -- it is an aggregate type or has at least one constexpr
+ // constructor or constructor template that is not a copy or move
+ // constructor, and
+ // -- all non-static data members and base classes of literal types
+ //
+ // We resolve DR1361 by ignoring the second bullet.
+ if (const CXXRecordDecl *ClassDecl =
+ dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ return ClassDecl->isLiteral();
+
+ return true;
+ }
+
+ // We treat _Atomic T as a literal type if T is a literal type.
+ if (const AtomicType *AT = BaseTy->getAs<AtomicType>())
+ return AT->getValueType()->isLiteralType(Ctx);
+
+ // If this type hasn't been deduced yet, then conservatively assume that
+ // it'll work out to be a literal type.
+ if (isa<AutoType>(BaseTy->getCanonicalTypeInternal()))
+ return true;
+
+ return false;
+}
+
+bool Type::isStandardLayoutType() const {
+ if (isDependentType())
+ return false;
+
+ // C++0x [basic.types]p9:
+ // Scalar types, standard-layout class types, arrays of such types, and
+ // cv-qualified versions of these types are collectively called
+ // standard-layout types.
+ const Type *BaseTy = getBaseElementTypeUnsafe();
+ assert(BaseTy && "NULL element type");
+
+ // Return false for incomplete types after skipping any incomplete array
+ // types which are expressly allowed by the standard and thus our API.
+ if (BaseTy->isIncompleteType())
+ return false;
+
+ // As an extension, Clang treats vector types as Scalar types.
+ if (BaseTy->isScalarType() || BaseTy->isVectorType()) return true;
+ if (const RecordType *RT = BaseTy->getAs<RecordType>()) {
+ if (const CXXRecordDecl *ClassDecl =
+ dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ if (!ClassDecl->isStandardLayout())
+ return false;
+
+ // Default to 'true' for non-C++ class types.
+ // FIXME: This is a bit dubious, but plain C structs should trivially meet
+ // all the requirements of standard layout classes.
+ return true;
+ }
+
+ // No other types can match.
+ return false;
+}
+
+// This is effectively the intersection of isTrivialType and
+// isStandardLayoutType. We implement it directly to avoid redundant
+// conversions from a type to a CXXRecordDecl.
+bool QualType::isCXX11PODType(ASTContext &Context) const {
+ const Type *ty = getTypePtr();
+ if (ty->isDependentType())
+ return false;
+
+ if (Context.getLangOpts().ObjCAutoRefCount) {
+ switch (getObjCLifetime()) {
+ case Qualifiers::OCL_ExplicitNone:
+ return true;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Autoreleasing:
+ return false;
+
+ case Qualifiers::OCL_None:
+ break;
+ }
+ }
+
+ // C++11 [basic.types]p9:
+ // Scalar types, POD classes, arrays of such types, and cv-qualified
+ // versions of these types are collectively called trivial types.
+ const Type *BaseTy = ty->getBaseElementTypeUnsafe();
+ assert(BaseTy && "NULL element type");
+
+ // Return false for incomplete types after skipping any incomplete array
+ // types which are expressly allowed by the standard and thus our API.
+ if (BaseTy->isIncompleteType())
+ return false;
+
+ // As an extension, Clang treats vector types as Scalar types.
+ if (BaseTy->isScalarType() || BaseTy->isVectorType()) return true;
+ if (const RecordType *RT = BaseTy->getAs<RecordType>()) {
+ if (const CXXRecordDecl *ClassDecl =
+ dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ // C++11 [class]p10:
+ // A POD struct is a non-union class that is both a trivial class [...]
+ if (!ClassDecl->isTrivial()) return false;
+
+ // C++11 [class]p10:
+ // A POD struct is a non-union class that is both a trivial class and
+ // a standard-layout class [...]
+ if (!ClassDecl->isStandardLayout()) return false;
+
+ // C++11 [class]p10:
+ // A POD struct is a non-union class that is both a trivial class and
+ // a standard-layout class, and has no non-static data members of type
+ // non-POD struct, non-POD union (or array of such types). [...]
+ //
+ // We don't directly query the recursive aspect as the requirements for
+ // both standard-layout classes and trivial classes apply recursively
+ // already.
+ }
+
+ return true;
+ }
+
+ // No other types can match.
+ return false;
+}
+
+bool Type::isPromotableIntegerType() const {
+ if (const BuiltinType *BT = getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Bool:
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U:
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ return true;
+ default:
+ return false;
+ }
+
+ // Enumerated types are promotable to their compatible integer types
+ // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2).
+ if (const EnumType *ET = getAs<EnumType>()){
+ if (this->isDependentType() || ET->getDecl()->getPromotionType().isNull()
+ || ET->getDecl()->isScoped())
+ return false;
+
+ return true;
+ }
+
+ return false;
+}
+
+bool Type::isSpecifierType() const {
+ // Note that this intentionally does not use the canonical type.
+ switch (getTypeClass()) {
+ case Builtin:
+ case Record:
+ case Enum:
+ case Typedef:
+ case Complex:
+ case TypeOfExpr:
+ case TypeOf:
+ case TemplateTypeParm:
+ case SubstTemplateTypeParm:
+ case TemplateSpecialization:
+ case Elaborated:
+ case DependentName:
+ case DependentTemplateSpecialization:
+ case ObjCInterface:
+ case ObjCObject:
+ case ObjCObjectPointer: // FIXME: object pointers aren't really specifiers
+ return true;
+ default:
+ return false;
+ }
+}
+
+ElaboratedTypeKeyword
+TypeWithKeyword::getKeywordForTypeSpec(unsigned TypeSpec) {
+ switch (TypeSpec) {
+ default: return ETK_None;
+ case TST_typename: return ETK_Typename;
+ case TST_class: return ETK_Class;
+ case TST_struct: return ETK_Struct;
+ case TST_interface: return ETK_Interface;
+ case TST_union: return ETK_Union;
+ case TST_enum: return ETK_Enum;
+ }
+}
+
+TagTypeKind
+TypeWithKeyword::getTagTypeKindForTypeSpec(unsigned TypeSpec) {
+ switch(TypeSpec) {
+ case TST_class: return TTK_Class;
+ case TST_struct: return TTK_Struct;
+ case TST_interface: return TTK_Interface;
+ case TST_union: return TTK_Union;
+ case TST_enum: return TTK_Enum;
+ }
+
+ llvm_unreachable("Type specifier is not a tag type kind.");
+}
+
+ElaboratedTypeKeyword
+TypeWithKeyword::getKeywordForTagTypeKind(TagTypeKind Kind) {
+ switch (Kind) {
+ case TTK_Class: return ETK_Class;
+ case TTK_Struct: return ETK_Struct;
+ case TTK_Interface: return ETK_Interface;
+ case TTK_Union: return ETK_Union;
+ case TTK_Enum: return ETK_Enum;
+ }
+ llvm_unreachable("Unknown tag type kind.");
+}
+
+TagTypeKind
+TypeWithKeyword::getTagTypeKindForKeyword(ElaboratedTypeKeyword Keyword) {
+ switch (Keyword) {
+ case ETK_Class: return TTK_Class;
+ case ETK_Struct: return TTK_Struct;
+ case ETK_Interface: return TTK_Interface;
+ case ETK_Union: return TTK_Union;
+ case ETK_Enum: return TTK_Enum;
+ case ETK_None: // Fall through.
+ case ETK_Typename:
+ llvm_unreachable("Elaborated type keyword is not a tag type kind.");
+ }
+ llvm_unreachable("Unknown elaborated type keyword.");
+}
+
+bool
+TypeWithKeyword::KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword) {
+ switch (Keyword) {
+ case ETK_None:
+ case ETK_Typename:
+ return false;
+ case ETK_Class:
+ case ETK_Struct:
+ case ETK_Interface:
+ case ETK_Union:
+ case ETK_Enum:
+ return true;
+ }
+ llvm_unreachable("Unknown elaborated type keyword.");
+}
+
+StringRef TypeWithKeyword::getKeywordName(ElaboratedTypeKeyword Keyword) {
+ switch (Keyword) {
+ case ETK_None: return "";
+ case ETK_Typename: return "typename";
+ case ETK_Class: return "class";
+ case ETK_Struct: return "struct";
+ case ETK_Interface: return "__interface";
+ case ETK_Union: return "union";
+ case ETK_Enum: return "enum";
+ }
+
+ llvm_unreachable("Unknown elaborated type keyword.");
+}
+
+DependentTemplateSpecializationType::DependentTemplateSpecializationType(
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS, const IdentifierInfo *Name,
+ unsigned NumArgs, const TemplateArgument *Args,
+ QualType Canon)
+ : TypeWithKeyword(Keyword, DependentTemplateSpecialization, Canon, true, true,
+ /*VariablyModified=*/false,
+ NNS && NNS->containsUnexpandedParameterPack()),
+ NNS(NNS), Name(Name), NumArgs(NumArgs) {
+ assert((!NNS || NNS->isDependent()) &&
+ "DependentTemplateSpecializatonType requires dependent qualifier");
+ for (unsigned I = 0; I != NumArgs; ++I) {
+ if (Args[I].containsUnexpandedParameterPack())
+ setContainsUnexpandedParameterPack();
+
+ new (&getArgBuffer()[I]) TemplateArgument(Args[I]);
+ }
+}
+
+void
+DependentTemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &Context,
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *Qualifier,
+ const IdentifierInfo *Name,
+ unsigned NumArgs,
+ const TemplateArgument *Args) {
+ ID.AddInteger(Keyword);
+ ID.AddPointer(Qualifier);
+ ID.AddPointer(Name);
+ for (unsigned Idx = 0; Idx < NumArgs; ++Idx)
+ Args[Idx].Profile(ID, Context);
+}
+
+bool Type::isElaboratedTypeSpecifier() const {
+ ElaboratedTypeKeyword Keyword;
+ if (const ElaboratedType *Elab = dyn_cast<ElaboratedType>(this))
+ Keyword = Elab->getKeyword();
+ else if (const DependentNameType *DepName = dyn_cast<DependentNameType>(this))
+ Keyword = DepName->getKeyword();
+ else if (const DependentTemplateSpecializationType *DepTST =
+ dyn_cast<DependentTemplateSpecializationType>(this))
+ Keyword = DepTST->getKeyword();
+ else
+ return false;
+
+ return TypeWithKeyword::KeywordIsTagTypeKind(Keyword);
+}
+
+const char *Type::getTypeClassName() const {
+ switch (TypeBits.TC) {
+#define ABSTRACT_TYPE(Derived, Base)
+#define TYPE(Derived, Base) case Derived: return #Derived;
+#include "clang/AST/TypeNodes.def"
+ }
+
+ llvm_unreachable("Invalid type class.");
+}
+
+StringRef BuiltinType::getName(const PrintingPolicy &Policy) const {
+ switch (getKind()) {
+ case Void:
+ return "void";
+ case Bool:
+ return Policy.Bool ? "bool" : "_Bool";
+ case Char_S:
+ return "char";
+ case Char_U:
+ return "char";
+ case SChar:
+ return "signed char";
+ case Short:
+ return "short";
+ case Int:
+ return "int";
+ case Long:
+ return "long";
+ case LongLong:
+ return "long long";
+ case Int128:
+ return "__int128";
+ case UChar:
+ return "unsigned char";
+ case UShort:
+ return "unsigned short";
+ case UInt:
+ return "unsigned int";
+ case ULong:
+ return "unsigned long";
+ case ULongLong:
+ return "unsigned long long";
+ case UInt128:
+ return "unsigned __int128";
+ case Half:
+ return Policy.Half ? "half" : "__fp16";
+ case Float:
+ return "float";
+ case Double:
+ return "double";
+ case LongDouble:
+ return "long double";
+ case WChar_S:
+ case WChar_U:
+ return Policy.MSWChar ? "__wchar_t" : "wchar_t";
+ case Char16:
+ return "char16_t";
+ case Char32:
+ return "char32_t";
+ case NullPtr:
+ return "nullptr_t";
+ case Overload:
+ return "<overloaded function type>";
+ case BoundMember:
+ return "<bound member function type>";
+ case PseudoObject:
+ return "<pseudo-object type>";
+ case Dependent:
+ return "<dependent type>";
+ case UnknownAny:
+ return "<unknown type>";
+ case ARCUnbridgedCast:
+ return "<ARC unbridged cast type>";
+ case BuiltinFn:
+ return "<builtin fn type>";
+ case ObjCId:
+ return "id";
+ case ObjCClass:
+ return "Class";
+ case ObjCSel:
+ return "SEL";
+ case OCLImage1d:
+ return "image1d_t";
+ case OCLImage1dArray:
+ return "image1d_array_t";
+ case OCLImage1dBuffer:
+ return "image1d_buffer_t";
+ case OCLImage2d:
+ return "image2d_t";
+ case OCLImage2dArray:
+ return "image2d_array_t";
+ case OCLImage2dDepth:
+ return "image2d_depth_t";
+ case OCLImage2dArrayDepth:
+ return "image2d_array_depth_t";
+ case OCLImage2dMSAA:
+ return "image2d_msaa_t";
+ case OCLImage2dArrayMSAA:
+ return "image2d_array_msaa_t";
+ case OCLImage2dMSAADepth:
+ return "image2d_msaa_depth_t";
+ case OCLImage2dArrayMSAADepth:
+ return "image2d_array_msaa_depth_t";
+ case OCLImage3d:
+ return "image3d_t";
+ case OCLSampler:
+ return "sampler_t";
+ case OCLEvent:
+ return "event_t";
+ case OCLClkEvent:
+ return "clk_event_t";
+ case OCLQueue:
+ return "queue_t";
+ case OCLNDRange:
+ return "event_t";
+ case OCLReserveID:
+ return "reserve_id_t";
+ case OMPArraySection:
+ return "<OpenMP array section type>";
+ }
+
+ llvm_unreachable("Invalid builtin type.");
+}
+
+QualType QualType::getNonLValueExprType(const ASTContext &Context) const {
+ if (const ReferenceType *RefType = getTypePtr()->getAs<ReferenceType>())
+ return RefType->getPointeeType();
+
+ // C++0x [basic.lval]:
+ // Class prvalues can have cv-qualified types; non-class prvalues always
+ // have cv-unqualified types.
+ //
+ // See also C99 6.3.2.1p2.
+ if (!Context.getLangOpts().CPlusPlus ||
+ (!getTypePtr()->isDependentType() && !getTypePtr()->isRecordType()))
+ return getUnqualifiedType();
+
+ return *this;
+}
+
+StringRef FunctionType::getNameForCallConv(CallingConv CC) {
+ switch (CC) {
+ case CC_C: return "cdecl";
+ case CC_X86StdCall: return "stdcall";
+ case CC_X86FastCall: return "fastcall";
+ case CC_X86ThisCall: return "thiscall";
+ case CC_X86Pascal: return "pascal";
+ case CC_X86VectorCall: return "vectorcall";
+ case CC_X86_64Win64: return "ms_abi";
+ case CC_X86_64SysV: return "sysv_abi";
+ case CC_AAPCS: return "aapcs";
+ case CC_AAPCS_VFP: return "aapcs-vfp";
+ case CC_IntelOclBicc: return "intel_ocl_bicc";
+ case CC_SpirFunction: return "spir_function";
+ case CC_SpirKernel: return "spir_kernel";
+ }
+
+ llvm_unreachable("Invalid calling convention.");
+}
+
+FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
+ QualType canonical,
+ const ExtProtoInfo &epi)
+ : FunctionType(FunctionProto, result, canonical,
+ result->isDependentType(),
+ result->isInstantiationDependentType(),
+ result->isVariablyModifiedType(),
+ result->containsUnexpandedParameterPack(), epi.ExtInfo),
+ NumParams(params.size()),
+ NumExceptions(epi.ExceptionSpec.Exceptions.size()),
+ ExceptionSpecType(epi.ExceptionSpec.Type),
+ HasAnyConsumedParams(epi.ConsumedParameters != nullptr),
+ Variadic(epi.Variadic), HasTrailingReturn(epi.HasTrailingReturn) {
+ assert(NumParams == params.size() && "function has too many parameters");
+
+ FunctionTypeBits.TypeQuals = epi.TypeQuals;
+ FunctionTypeBits.RefQualifier = epi.RefQualifier;
+
+ // Fill in the trailing argument array.
+ QualType *argSlot = reinterpret_cast<QualType*>(this+1);
+ for (unsigned i = 0; i != NumParams; ++i) {
+ if (params[i]->isDependentType())
+ setDependent();
+ else if (params[i]->isInstantiationDependentType())
+ setInstantiationDependent();
+
+ if (params[i]->containsUnexpandedParameterPack())
+ setContainsUnexpandedParameterPack();
+
+ argSlot[i] = params[i];
+ }
+
+ if (getExceptionSpecType() == EST_Dynamic) {
+ // Fill in the exception array.
+ QualType *exnSlot = argSlot + NumParams;
+ unsigned I = 0;
+ for (QualType ExceptionType : epi.ExceptionSpec.Exceptions) {
+ // Note that a dependent exception specification does *not* make
+ // a type dependent; it's not even part of the C++ type system.
+ if (ExceptionType->isInstantiationDependentType())
+ setInstantiationDependent();
+
+ if (ExceptionType->containsUnexpandedParameterPack())
+ setContainsUnexpandedParameterPack();
+
+ exnSlot[I++] = ExceptionType;
+ }
+ } else if (getExceptionSpecType() == EST_ComputedNoexcept) {
+ // Store the noexcept expression and context.
+ Expr **noexSlot = reinterpret_cast<Expr **>(argSlot + NumParams);
+ *noexSlot = epi.ExceptionSpec.NoexceptExpr;
+
+ if (epi.ExceptionSpec.NoexceptExpr) {
+ if (epi.ExceptionSpec.NoexceptExpr->isValueDependent() ||
+ epi.ExceptionSpec.NoexceptExpr->isInstantiationDependent())
+ setInstantiationDependent();
+
+ if (epi.ExceptionSpec.NoexceptExpr->containsUnexpandedParameterPack())
+ setContainsUnexpandedParameterPack();
+ }
+ } else if (getExceptionSpecType() == EST_Uninstantiated) {
+ // Store the function decl from which we will resolve our
+ // exception specification.
+ FunctionDecl **slot =
+ reinterpret_cast<FunctionDecl **>(argSlot + NumParams);
+ slot[0] = epi.ExceptionSpec.SourceDecl;
+ slot[1] = epi.ExceptionSpec.SourceTemplate;
+ // This exception specification doesn't make the type dependent, because
+ // it's not instantiated as part of instantiating the type.
+ } else if (getExceptionSpecType() == EST_Unevaluated) {
+ // Store the function decl from which we will resolve our
+ // exception specification.
+ FunctionDecl **slot =
+ reinterpret_cast<FunctionDecl **>(argSlot + NumParams);
+ slot[0] = epi.ExceptionSpec.SourceDecl;
+ }
+
+ if (epi.ConsumedParameters) {
+ bool *consumedParams = const_cast<bool *>(getConsumedParamsBuffer());
+ for (unsigned i = 0; i != NumParams; ++i)
+ consumedParams[i] = epi.ConsumedParameters[i];
+ }
+}
+
+bool FunctionProtoType::hasDependentExceptionSpec() const {
+ if (Expr *NE = getNoexceptExpr())
+ return NE->isValueDependent();
+ for (QualType ET : exceptions())
+ // A pack expansion with a non-dependent pattern is still dependent,
+ // because we don't know whether the pattern is in the exception spec
+ // or not (that depends on whether the pack has 0 expansions).
+ if (ET->isDependentType() || ET->getAs<PackExpansionType>())
+ return true;
+ return false;
+}
+
+FunctionProtoType::NoexceptResult
+FunctionProtoType::getNoexceptSpec(const ASTContext &ctx) const {
+ ExceptionSpecificationType est = getExceptionSpecType();
+ if (est == EST_BasicNoexcept)
+ return NR_Nothrow;
+
+ if (est != EST_ComputedNoexcept)
+ return NR_NoNoexcept;
+
+ Expr *noexceptExpr = getNoexceptExpr();
+ if (!noexceptExpr)
+ return NR_BadNoexcept;
+ if (noexceptExpr->isValueDependent())
+ return NR_Dependent;
+
+ llvm::APSInt value;
+ bool isICE = noexceptExpr->isIntegerConstantExpr(value, ctx, nullptr,
+ /*evaluated*/false);
+ (void)isICE;
+ assert(isICE && "AST should not contain bad noexcept expressions.");
+
+ return value.getBoolValue() ? NR_Nothrow : NR_Throw;
+}
+
+bool FunctionProtoType::isNothrow(const ASTContext &Ctx,
+ bool ResultIfDependent) const {
+ ExceptionSpecificationType EST = getExceptionSpecType();
+ assert(EST != EST_Unevaluated && EST != EST_Uninstantiated);
+ if (EST == EST_DynamicNone || EST == EST_BasicNoexcept)
+ return true;
+
+ if (EST == EST_Dynamic && ResultIfDependent) {
+ // A dynamic exception specification is throwing unless every exception
+ // type is an (unexpanded) pack expansion type.
+ for (unsigned I = 0, N = NumExceptions; I != N; ++I)
+ if (!getExceptionType(I)->getAs<PackExpansionType>())
+ return false;
+ return ResultIfDependent;
+ }
+
+ if (EST != EST_ComputedNoexcept)
+ return false;
+
+ NoexceptResult NR = getNoexceptSpec(Ctx);
+ if (NR == NR_Dependent)
+ return ResultIfDependent;
+ return NR == NR_Nothrow;
+}
+
+bool FunctionProtoType::isTemplateVariadic() const {
+ for (unsigned ArgIdx = getNumParams(); ArgIdx; --ArgIdx)
+ if (isa<PackExpansionType>(getParamType(ArgIdx - 1)))
+ return true;
+
+ return false;
+}
+
+void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result,
+ const QualType *ArgTys, unsigned NumParams,
+ const ExtProtoInfo &epi,
+ const ASTContext &Context) {
+
+ // We have to be careful not to get ambiguous profile encodings.
+ // Note that valid type pointers are never ambiguous with anything else.
+ //
+ // The encoding grammar begins:
+ // type type* bool int bool
+ // If that final bool is true, then there is a section for the EH spec:
+ // bool type*
+ // This is followed by an optional "consumed argument" section of the
+ // same length as the first type sequence:
+ // bool*
+ // Finally, we have the ext info and trailing return type flag:
+ // int bool
+ //
+ // There is no ambiguity between the consumed arguments and an empty EH
+ // spec because of the leading 'bool' which unambiguously indicates
+ // whether the following bool is the EH spec or part of the arguments.
+
+ ID.AddPointer(Result.getAsOpaquePtr());
+ for (unsigned i = 0; i != NumParams; ++i)
+ ID.AddPointer(ArgTys[i].getAsOpaquePtr());
+ // This method is relatively performance sensitive, so as a performance
+ // shortcut, use one AddInteger call instead of four for the next four
+ // fields.
+ assert(!(unsigned(epi.Variadic) & ~1) &&
+ !(unsigned(epi.TypeQuals) & ~255) &&
+ !(unsigned(epi.RefQualifier) & ~3) &&
+ !(unsigned(epi.ExceptionSpec.Type) & ~15) &&
+ "Values larger than expected.");
+ ID.AddInteger(unsigned(epi.Variadic) +
+ (epi.TypeQuals << 1) +
+ (epi.RefQualifier << 9) +
+ (epi.ExceptionSpec.Type << 11));
+ if (epi.ExceptionSpec.Type == EST_Dynamic) {
+ for (QualType Ex : epi.ExceptionSpec.Exceptions)
+ ID.AddPointer(Ex.getAsOpaquePtr());
+ } else if (epi.ExceptionSpec.Type == EST_ComputedNoexcept &&
+ epi.ExceptionSpec.NoexceptExpr) {
+ epi.ExceptionSpec.NoexceptExpr->Profile(ID, Context, false);
+ } else if (epi.ExceptionSpec.Type == EST_Uninstantiated ||
+ epi.ExceptionSpec.Type == EST_Unevaluated) {
+ ID.AddPointer(epi.ExceptionSpec.SourceDecl->getCanonicalDecl());
+ }
+ if (epi.ConsumedParameters) {
+ for (unsigned i = 0; i != NumParams; ++i)
+ ID.AddBoolean(epi.ConsumedParameters[i]);
+ }
+ epi.ExtInfo.Profile(ID);
+ ID.AddBoolean(epi.HasTrailingReturn);
+}
+
+void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &Ctx) {
+ Profile(ID, getReturnType(), param_type_begin(), NumParams, getExtProtoInfo(),
+ Ctx);
+}
+
+QualType TypedefType::desugar() const {
+ return getDecl()->getUnderlyingType();
+}
+
+TypeOfExprType::TypeOfExprType(Expr *E, QualType can)
+ : Type(TypeOfExpr, can, E->isTypeDependent(),
+ E->isInstantiationDependent(),
+ E->getType()->isVariablyModifiedType(),
+ E->containsUnexpandedParameterPack()),
+ TOExpr(E) {
+}
+
+bool TypeOfExprType::isSugared() const {
+ return !TOExpr->isTypeDependent();
+}
+
+QualType TypeOfExprType::desugar() const {
+ if (isSugared())
+ return getUnderlyingExpr()->getType();
+
+ return QualType(this, 0);
+}
+
+void DependentTypeOfExprType::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &Context, Expr *E) {
+ E->Profile(ID, Context, true);
+}
+
+DecltypeType::DecltypeType(Expr *E, QualType underlyingType, QualType can)
+ // C++11 [temp.type]p2: "If an expression e involves a template parameter,
+ // decltype(e) denotes a unique dependent type." Hence a decltype type is
+ // type-dependent even if its expression is only instantiation-dependent.
+ : Type(Decltype, can, E->isInstantiationDependent(),
+ E->isInstantiationDependent(),
+ E->getType()->isVariablyModifiedType(),
+ E->containsUnexpandedParameterPack()),
+ E(E),
+ UnderlyingType(underlyingType) {
+}
+
+bool DecltypeType::isSugared() const { return !E->isInstantiationDependent(); }
+
+QualType DecltypeType::desugar() const {
+ if (isSugared())
+ return getUnderlyingType();
+
+ return QualType(this, 0);
+}
+
+DependentDecltypeType::DependentDecltypeType(const ASTContext &Context, Expr *E)
+ : DecltypeType(E, Context.DependentTy), Context(Context) { }
+
+void DependentDecltypeType::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &Context, Expr *E) {
+ E->Profile(ID, Context, true);
+}
+
+TagType::TagType(TypeClass TC, const TagDecl *D, QualType can)
+ : Type(TC, can, D->isDependentType(),
+ /*InstantiationDependent=*/D->isDependentType(),
+ /*VariablyModified=*/false,
+ /*ContainsUnexpandedParameterPack=*/false),
+ decl(const_cast<TagDecl*>(D)) {}
+
+static TagDecl *getInterestingTagDecl(TagDecl *decl) {
+ for (auto I : decl->redecls()) {
+ if (I->isCompleteDefinition() || I->isBeingDefined())
+ return I;
+ }
+ // If there's no definition (not even in progress), return what we have.
+ return decl;
+}
+
+UnaryTransformType::UnaryTransformType(QualType BaseType,
+ QualType UnderlyingType,
+ UTTKind UKind,
+ QualType CanonicalType)
+ : Type(UnaryTransform, CanonicalType, UnderlyingType->isDependentType(),
+ UnderlyingType->isInstantiationDependentType(),
+ UnderlyingType->isVariablyModifiedType(),
+ BaseType->containsUnexpandedParameterPack())
+ , BaseType(BaseType), UnderlyingType(UnderlyingType), UKind(UKind)
+{}
+
+TagDecl *TagType::getDecl() const {
+ return getInterestingTagDecl(decl);
+}
+
+bool TagType::isBeingDefined() const {
+ return getDecl()->isBeingDefined();
+}
+
+bool AttributedType::isQualifier() const {
+ switch (getAttrKind()) {
+ // These are type qualifiers in the traditional C sense: they annotate
+ // something about a specific value/variable of a type. (They aren't
+ // always part of the canonical type, though.)
+ case AttributedType::attr_address_space:
+ case AttributedType::attr_objc_gc:
+ case AttributedType::attr_objc_ownership:
+ case AttributedType::attr_objc_inert_unsafe_unretained:
+ case AttributedType::attr_nonnull:
+ case AttributedType::attr_nullable:
+ case AttributedType::attr_null_unspecified:
+ return true;
+
+ // These aren't qualifiers; they rewrite the modified type to be a
+ // semantically different type.
+ case AttributedType::attr_regparm:
+ case AttributedType::attr_vector_size:
+ case AttributedType::attr_neon_vector_type:
+ case AttributedType::attr_neon_polyvector_type:
+ case AttributedType::attr_pcs:
+ case AttributedType::attr_pcs_vfp:
+ case AttributedType::attr_noreturn:
+ case AttributedType::attr_cdecl:
+ case AttributedType::attr_fastcall:
+ case AttributedType::attr_stdcall:
+ case AttributedType::attr_thiscall:
+ case AttributedType::attr_pascal:
+ case AttributedType::attr_vectorcall:
+ case AttributedType::attr_inteloclbicc:
+ case AttributedType::attr_ms_abi:
+ case AttributedType::attr_sysv_abi:
+ case AttributedType::attr_ptr32:
+ case AttributedType::attr_ptr64:
+ case AttributedType::attr_sptr:
+ case AttributedType::attr_uptr:
+ case AttributedType::attr_objc_kindof:
+ return false;
+ }
+ llvm_unreachable("bad attributed type kind");
+}
+
+bool AttributedType::isMSTypeSpec() const {
+ switch (getAttrKind()) {
+ default: return false;
+ case attr_ptr32:
+ case attr_ptr64:
+ case attr_sptr:
+ case attr_uptr:
+ return true;
+ }
+ llvm_unreachable("invalid attr kind");
+}
+
+bool AttributedType::isCallingConv() const {
+ switch (getAttrKind()) {
+ case attr_ptr32:
+ case attr_ptr64:
+ case attr_sptr:
+ case attr_uptr:
+ case attr_address_space:
+ case attr_regparm:
+ case attr_vector_size:
+ case attr_neon_vector_type:
+ case attr_neon_polyvector_type:
+ case attr_objc_gc:
+ case attr_objc_ownership:
+ case attr_objc_inert_unsafe_unretained:
+ case attr_noreturn:
+ case attr_nonnull:
+ case attr_nullable:
+ case attr_null_unspecified:
+ case attr_objc_kindof:
+ return false;
+
+ case attr_pcs:
+ case attr_pcs_vfp:
+ case attr_cdecl:
+ case attr_fastcall:
+ case attr_stdcall:
+ case attr_thiscall:
+ case attr_vectorcall:
+ case attr_pascal:
+ case attr_ms_abi:
+ case attr_sysv_abi:
+ case attr_inteloclbicc:
+ return true;
+ }
+ llvm_unreachable("invalid attr kind");
+}
+
+CXXRecordDecl *InjectedClassNameType::getDecl() const {
+ return cast<CXXRecordDecl>(getInterestingTagDecl(Decl));
+}
+
+IdentifierInfo *TemplateTypeParmType::getIdentifier() const {
+ return isCanonicalUnqualified() ? nullptr : getDecl()->getIdentifier();
+}
+
+SubstTemplateTypeParmPackType::
+SubstTemplateTypeParmPackType(const TemplateTypeParmType *Param,
+ QualType Canon,
+ const TemplateArgument &ArgPack)
+ : Type(SubstTemplateTypeParmPack, Canon, true, true, false, true),
+ Replaced(Param),
+ Arguments(ArgPack.pack_begin()), NumArguments(ArgPack.pack_size())
+{
+}
+
+TemplateArgument SubstTemplateTypeParmPackType::getArgumentPack() const {
+ return TemplateArgument(llvm::makeArrayRef(Arguments, NumArguments));
+}
+
+void SubstTemplateTypeParmPackType::Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getReplacedParameter(), getArgumentPack());
+}
+
+void SubstTemplateTypeParmPackType::Profile(llvm::FoldingSetNodeID &ID,
+ const TemplateTypeParmType *Replaced,
+ const TemplateArgument &ArgPack) {
+ ID.AddPointer(Replaced);
+ ID.AddInteger(ArgPack.pack_size());
+ for (const auto &P : ArgPack.pack_elements())
+ ID.AddPointer(P.getAsType().getAsOpaquePtr());
+}
+
+bool TemplateSpecializationType::
+anyDependentTemplateArguments(const TemplateArgumentListInfo &Args,
+ bool &InstantiationDependent) {
+ return anyDependentTemplateArguments(Args.getArgumentArray(), Args.size(),
+ InstantiationDependent);
+}
+
+bool TemplateSpecializationType::
+anyDependentTemplateArguments(const TemplateArgumentLoc *Args, unsigned N,
+ bool &InstantiationDependent) {
+ for (unsigned i = 0; i != N; ++i) {
+ if (Args[i].getArgument().isDependent()) {
+ InstantiationDependent = true;
+ return true;
+ }
+
+ if (Args[i].getArgument().isInstantiationDependent())
+ InstantiationDependent = true;
+ }
+ return false;
+}
+
+TemplateSpecializationType::
+TemplateSpecializationType(TemplateName T,
+ const TemplateArgument *Args, unsigned NumArgs,
+ QualType Canon, QualType AliasedType)
+ : Type(TemplateSpecialization,
+ Canon.isNull()? QualType(this, 0) : Canon,
+ Canon.isNull()? true : Canon->isDependentType(),
+ Canon.isNull()? true : Canon->isInstantiationDependentType(),
+ false,
+ T.containsUnexpandedParameterPack()),
+ Template(T), NumArgs(NumArgs), TypeAlias(!AliasedType.isNull()) {
+ assert(!T.getAsDependentTemplateName() &&
+ "Use DependentTemplateSpecializationType for dependent template-name");
+ assert((T.getKind() == TemplateName::Template ||
+ T.getKind() == TemplateName::SubstTemplateTemplateParm ||
+ T.getKind() == TemplateName::SubstTemplateTemplateParmPack) &&
+ "Unexpected template name for TemplateSpecializationType");
+
+ TemplateArgument *TemplateArgs
+ = reinterpret_cast<TemplateArgument *>(this + 1);
+ for (unsigned Arg = 0; Arg < NumArgs; ++Arg) {
+ // Update instantiation-dependent and variably-modified bits.
+ // If the canonical type exists and is non-dependent, the template
+ // specialization type can be non-dependent even if one of the type
+ // arguments is. Given:
+ // template<typename T> using U = int;
+ // U<T> is always non-dependent, irrespective of the type T.
+ // However, U<Ts> contains an unexpanded parameter pack, even though
+ // its expansion (and thus its desugared type) doesn't.
+ if (Args[Arg].isInstantiationDependent())
+ setInstantiationDependent();
+ if (Args[Arg].getKind() == TemplateArgument::Type &&
+ Args[Arg].getAsType()->isVariablyModifiedType())
+ setVariablyModified();
+ if (Args[Arg].containsUnexpandedParameterPack())
+ setContainsUnexpandedParameterPack();
+ new (&TemplateArgs[Arg]) TemplateArgument(Args[Arg]);
+ }
+
+ // Store the aliased type if this is a type alias template specialization.
+ if (TypeAlias) {
+ TemplateArgument *Begin = reinterpret_cast<TemplateArgument *>(this + 1);
+ *reinterpret_cast<QualType*>(Begin + getNumArgs()) = AliasedType;
+ }
+}
+
+void
+TemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID,
+ TemplateName T,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ const ASTContext &Context) {
+ T.Profile(ID);
+ for (unsigned Idx = 0; Idx < NumArgs; ++Idx)
+ Args[Idx].Profile(ID, Context);
+}
+
+QualType
+QualifierCollector::apply(const ASTContext &Context, QualType QT) const {
+ if (!hasNonFastQualifiers())
+ return QT.withFastQualifiers(getFastQualifiers());
+
+ return Context.getQualifiedType(QT, *this);
+}
+
+QualType
+QualifierCollector::apply(const ASTContext &Context, const Type *T) const {
+ if (!hasNonFastQualifiers())
+ return QualType(T, getFastQualifiers());
+
+ return Context.getQualifiedType(T, *this);
+}
+
+void ObjCObjectTypeImpl::Profile(llvm::FoldingSetNodeID &ID,
+ QualType BaseType,
+ ArrayRef<QualType> typeArgs,
+ ArrayRef<ObjCProtocolDecl *> protocols,
+ bool isKindOf) {
+ ID.AddPointer(BaseType.getAsOpaquePtr());
+ ID.AddInteger(typeArgs.size());
+ for (auto typeArg : typeArgs)
+ ID.AddPointer(typeArg.getAsOpaquePtr());
+ ID.AddInteger(protocols.size());
+ for (auto proto : protocols)
+ ID.AddPointer(proto);
+ ID.AddBoolean(isKindOf);
+}
+
+void ObjCObjectTypeImpl::Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getBaseType(), getTypeArgsAsWritten(),
+ llvm::makeArrayRef(qual_begin(), getNumProtocols()),
+ isKindOfTypeAsWritten());
+}
+
+namespace {
+
+/// \brief The cached properties of a type.
+class CachedProperties {
+ Linkage L;
+ bool local;
+
+public:
+ CachedProperties(Linkage L, bool local) : L(L), local(local) {}
+
+ Linkage getLinkage() const { return L; }
+ bool hasLocalOrUnnamedType() const { return local; }
+
+ friend CachedProperties merge(CachedProperties L, CachedProperties R) {
+ Linkage MergedLinkage = minLinkage(L.L, R.L);
+ return CachedProperties(MergedLinkage,
+ L.hasLocalOrUnnamedType() | R.hasLocalOrUnnamedType());
+ }
+};
+}
+
+static CachedProperties computeCachedProperties(const Type *T);
+
+namespace clang {
+/// The type-property cache. This is templated so as to be
+/// instantiated at an internal type to prevent unnecessary symbol
+/// leakage.
+template <class Private> class TypePropertyCache {
+public:
+ static CachedProperties get(QualType T) {
+ return get(T.getTypePtr());
+ }
+
+ static CachedProperties get(const Type *T) {
+ ensure(T);
+ return CachedProperties(T->TypeBits.getLinkage(),
+ T->TypeBits.hasLocalOrUnnamedType());
+ }
+
+ static void ensure(const Type *T) {
+ // If the cache is valid, we're okay.
+ if (T->TypeBits.isCacheValid()) return;
+
+ // If this type is non-canonical, ask its canonical type for the
+ // relevant information.
+ if (!T->isCanonicalUnqualified()) {
+ const Type *CT = T->getCanonicalTypeInternal().getTypePtr();
+ ensure(CT);
+ T->TypeBits.CacheValid = true;
+ T->TypeBits.CachedLinkage = CT->TypeBits.CachedLinkage;
+ T->TypeBits.CachedLocalOrUnnamed = CT->TypeBits.CachedLocalOrUnnamed;
+ return;
+ }
+
+ // Compute the cached properties and then set the cache.
+ CachedProperties Result = computeCachedProperties(T);
+ T->TypeBits.CacheValid = true;
+ T->TypeBits.CachedLinkage = Result.getLinkage();
+ T->TypeBits.CachedLocalOrUnnamed = Result.hasLocalOrUnnamedType();
+ }
+};
+}
+
+// Instantiate the friend template at a private class. In a
+// reasonable implementation, these symbols will be internal.
+// It is terrible that this is the best way to accomplish this.
+namespace { class Private {}; }
+typedef TypePropertyCache<Private> Cache;
+
+static CachedProperties computeCachedProperties(const Type *T) {
+ switch (T->getTypeClass()) {
+#define TYPE(Class,Base)
+#define NON_CANONICAL_TYPE(Class,Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("didn't expect a non-canonical type here");
+
+#define TYPE(Class,Base)
+#define DEPENDENT_TYPE(Class,Base) case Type::Class:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class,Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ // Treat instantiation-dependent types as external.
+ assert(T->isInstantiationDependentType());
+ return CachedProperties(ExternalLinkage, false);
+
+ case Type::Auto:
+ // Give non-deduced 'auto' types external linkage. We should only see them
+ // here in error recovery.
+ return CachedProperties(ExternalLinkage, false);
+
+ case Type::Builtin:
+ // C++ [basic.link]p8:
+ // A type is said to have linkage if and only if:
+ // - it is a fundamental type (3.9.1); or
+ return CachedProperties(ExternalLinkage, false);
+
+ case Type::Record:
+ case Type::Enum: {
+ const TagDecl *Tag = cast<TagType>(T)->getDecl();
+
+ // C++ [basic.link]p8:
+ // - it is a class or enumeration type that is named (or has a name
+ // for linkage purposes (7.1.3)) and the name has linkage; or
+ // - it is a specialization of a class template (14); or
+ Linkage L = Tag->getLinkageInternal();
+ bool IsLocalOrUnnamed =
+ Tag->getDeclContext()->isFunctionOrMethod() ||
+ !Tag->hasNameForLinkage();
+ return CachedProperties(L, IsLocalOrUnnamed);
+ }
+
+ // C++ [basic.link]p8:
+ // - it is a compound type (3.9.2) other than a class or enumeration,
+ // compounded exclusively from types that have linkage; or
+ case Type::Complex:
+ return Cache::get(cast<ComplexType>(T)->getElementType());
+ case Type::Pointer:
+ return Cache::get(cast<PointerType>(T)->getPointeeType());
+ case Type::BlockPointer:
+ return Cache::get(cast<BlockPointerType>(T)->getPointeeType());
+ case Type::LValueReference:
+ case Type::RValueReference:
+ return Cache::get(cast<ReferenceType>(T)->getPointeeType());
+ case Type::MemberPointer: {
+ const MemberPointerType *MPT = cast<MemberPointerType>(T);
+ return merge(Cache::get(MPT->getClass()),
+ Cache::get(MPT->getPointeeType()));
+ }
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ return Cache::get(cast<ArrayType>(T)->getElementType());
+ case Type::Vector:
+ case Type::ExtVector:
+ return Cache::get(cast<VectorType>(T)->getElementType());
+ case Type::FunctionNoProto:
+ return Cache::get(cast<FunctionType>(T)->getReturnType());
+ case Type::FunctionProto: {
+ const FunctionProtoType *FPT = cast<FunctionProtoType>(T);
+ CachedProperties result = Cache::get(FPT->getReturnType());
+ for (const auto &ai : FPT->param_types())
+ result = merge(result, Cache::get(ai));
+ return result;
+ }
+ case Type::ObjCInterface: {
+ Linkage L = cast<ObjCInterfaceType>(T)->getDecl()->getLinkageInternal();
+ return CachedProperties(L, false);
+ }
+ case Type::ObjCObject:
+ return Cache::get(cast<ObjCObjectType>(T)->getBaseType());
+ case Type::ObjCObjectPointer:
+ return Cache::get(cast<ObjCObjectPointerType>(T)->getPointeeType());
+ case Type::Atomic:
+ return Cache::get(cast<AtomicType>(T)->getValueType());
+ }
+
+ llvm_unreachable("unhandled type class");
+}
+
+/// \brief Determine the linkage of this type.
+Linkage Type::getLinkage() const {
+ Cache::ensure(this);
+ return TypeBits.getLinkage();
+}
+
+bool Type::hasUnnamedOrLocalType() const {
+ Cache::ensure(this);
+ return TypeBits.hasLocalOrUnnamedType();
+}
+
+static LinkageInfo computeLinkageInfo(QualType T);
+
+static LinkageInfo computeLinkageInfo(const Type *T) {
+ switch (T->getTypeClass()) {
+#define TYPE(Class,Base)
+#define NON_CANONICAL_TYPE(Class,Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("didn't expect a non-canonical type here");
+
+#define TYPE(Class,Base)
+#define DEPENDENT_TYPE(Class,Base) case Type::Class:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class,Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ // Treat instantiation-dependent types as external.
+ assert(T->isInstantiationDependentType());
+ return LinkageInfo::external();
+
+ case Type::Builtin:
+ return LinkageInfo::external();
+
+ case Type::Auto:
+ return LinkageInfo::external();
+
+ case Type::Record:
+ case Type::Enum:
+ return cast<TagType>(T)->getDecl()->getLinkageAndVisibility();
+
+ case Type::Complex:
+ return computeLinkageInfo(cast<ComplexType>(T)->getElementType());
+ case Type::Pointer:
+ return computeLinkageInfo(cast<PointerType>(T)->getPointeeType());
+ case Type::BlockPointer:
+ return computeLinkageInfo(cast<BlockPointerType>(T)->getPointeeType());
+ case Type::LValueReference:
+ case Type::RValueReference:
+ return computeLinkageInfo(cast<ReferenceType>(T)->getPointeeType());
+ case Type::MemberPointer: {
+ const MemberPointerType *MPT = cast<MemberPointerType>(T);
+ LinkageInfo LV = computeLinkageInfo(MPT->getClass());
+ LV.merge(computeLinkageInfo(MPT->getPointeeType()));
+ return LV;
+ }
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ return computeLinkageInfo(cast<ArrayType>(T)->getElementType());
+ case Type::Vector:
+ case Type::ExtVector:
+ return computeLinkageInfo(cast<VectorType>(T)->getElementType());
+ case Type::FunctionNoProto:
+ return computeLinkageInfo(cast<FunctionType>(T)->getReturnType());
+ case Type::FunctionProto: {
+ const FunctionProtoType *FPT = cast<FunctionProtoType>(T);
+ LinkageInfo LV = computeLinkageInfo(FPT->getReturnType());
+ for (const auto &ai : FPT->param_types())
+ LV.merge(computeLinkageInfo(ai));
+ return LV;
+ }
+ case Type::ObjCInterface:
+ return cast<ObjCInterfaceType>(T)->getDecl()->getLinkageAndVisibility();
+ case Type::ObjCObject:
+ return computeLinkageInfo(cast<ObjCObjectType>(T)->getBaseType());
+ case Type::ObjCObjectPointer:
+ return computeLinkageInfo(cast<ObjCObjectPointerType>(T)->getPointeeType());
+ case Type::Atomic:
+ return computeLinkageInfo(cast<AtomicType>(T)->getValueType());
+ }
+
+ llvm_unreachable("unhandled type class");
+}
+
+static LinkageInfo computeLinkageInfo(QualType T) {
+ return computeLinkageInfo(T.getTypePtr());
+}
+
+bool Type::isLinkageValid() const {
+ if (!TypeBits.isCacheValid())
+ return true;
+
+ return computeLinkageInfo(getCanonicalTypeInternal()).getLinkage() ==
+ TypeBits.getLinkage();
+}
+
+LinkageInfo Type::getLinkageAndVisibility() const {
+ if (!isCanonicalUnqualified())
+ return computeLinkageInfo(getCanonicalTypeInternal());
+
+ LinkageInfo LV = computeLinkageInfo(this);
+ assert(LV.getLinkage() == getLinkage());
+ return LV;
+}
+
+Optional<NullabilityKind> Type::getNullability(const ASTContext &context) const {
+ QualType type(this, 0);
+ do {
+ // Check whether this is an attributed type with nullability
+ // information.
+ if (auto attributed = dyn_cast<AttributedType>(type.getTypePtr())) {
+ if (auto nullability = attributed->getImmediateNullability())
+ return nullability;
+ }
+
+ // Desugar the type. If desugaring does nothing, we're done.
+ QualType desugared = type.getSingleStepDesugaredType(context);
+ if (desugared.getTypePtr() == type.getTypePtr())
+ return None;
+
+ type = desugared;
+ } while (true);
+}
+
+bool Type::canHaveNullability() const {
+ QualType type = getCanonicalTypeInternal();
+
+ switch (type->getTypeClass()) {
+ // We'll only see canonical types here.
+#define NON_CANONICAL_TYPE(Class, Parent) \
+ case Type::Class: \
+ llvm_unreachable("non-canonical type");
+#define TYPE(Class, Parent)
+#include "clang/AST/TypeNodes.def"
+
+ // Pointer types.
+ case Type::Pointer:
+ case Type::BlockPointer:
+ case Type::MemberPointer:
+ case Type::ObjCObjectPointer:
+ return true;
+
+ // Dependent types that could instantiate to pointer types.
+ case Type::UnresolvedUsing:
+ case Type::TypeOfExpr:
+ case Type::TypeOf:
+ case Type::Decltype:
+ case Type::UnaryTransform:
+ case Type::TemplateTypeParm:
+ case Type::SubstTemplateTypeParmPack:
+ case Type::DependentName:
+ case Type::DependentTemplateSpecialization:
+ return true;
+
+ // Dependent template specializations can instantiate to pointer
+ // types unless they're known to be specializations of a class
+ // template.
+ case Type::TemplateSpecialization:
+ if (TemplateDecl *templateDecl
+ = cast<TemplateSpecializationType>(type.getTypePtr())
+ ->getTemplateName().getAsTemplateDecl()) {
+ if (isa<ClassTemplateDecl>(templateDecl))
+ return false;
+ }
+ return true;
+
+ // auto is considered dependent when it isn't deduced.
+ case Type::Auto:
+ return !cast<AutoType>(type.getTypePtr())->isDeduced();
+
+ case Type::Builtin:
+ switch (cast<BuiltinType>(type.getTypePtr())->getKind()) {
+ // Signed, unsigned, and floating-point types cannot have nullability.
+#define SIGNED_TYPE(Id, SingletonId) case BuiltinType::Id:
+#define UNSIGNED_TYPE(Id, SingletonId) case BuiltinType::Id:
+#define FLOATING_TYPE(Id, SingletonId) case BuiltinType::Id:
+#define BUILTIN_TYPE(Id, SingletonId)
+#include "clang/AST/BuiltinTypes.def"
+ return false;
+
+ // Dependent types that could instantiate to a pointer type.
+ case BuiltinType::Dependent:
+ case BuiltinType::Overload:
+ case BuiltinType::BoundMember:
+ case BuiltinType::PseudoObject:
+ case BuiltinType::UnknownAny:
+ case BuiltinType::ARCUnbridgedCast:
+ return true;
+
+ case BuiltinType::Void:
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCSel:
+ case BuiltinType::OCLImage1d:
+ case BuiltinType::OCLImage1dArray:
+ case BuiltinType::OCLImage1dBuffer:
+ case BuiltinType::OCLImage2d:
+ case BuiltinType::OCLImage2dArray:
+ case BuiltinType::OCLImage2dDepth:
+ case BuiltinType::OCLImage2dArrayDepth:
+ case BuiltinType::OCLImage2dMSAA:
+ case BuiltinType::OCLImage2dArrayMSAA:
+ case BuiltinType::OCLImage2dMSAADepth:
+ case BuiltinType::OCLImage2dArrayMSAADepth:
+ case BuiltinType::OCLImage3d:
+ case BuiltinType::OCLSampler:
+ case BuiltinType::OCLEvent:
+ case BuiltinType::OCLClkEvent:
+ case BuiltinType::OCLQueue:
+ case BuiltinType::OCLNDRange:
+ case BuiltinType::OCLReserveID:
+ case BuiltinType::BuiltinFn:
+ case BuiltinType::NullPtr:
+ case BuiltinType::OMPArraySection:
+ return false;
+ }
+
+ // Non-pointer types.
+ case Type::Complex:
+ case Type::LValueReference:
+ case Type::RValueReference:
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ case Type::DependentSizedArray:
+ case Type::DependentSizedExtVector:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ case Type::Record:
+ case Type::Enum:
+ case Type::InjectedClassName:
+ case Type::PackExpansion:
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::Atomic:
+ return false;
+ }
+ llvm_unreachable("bad type kind!");
+}
+
+llvm::Optional<NullabilityKind> AttributedType::getImmediateNullability() const {
+ if (getAttrKind() == AttributedType::attr_nonnull)
+ return NullabilityKind::NonNull;
+ if (getAttrKind() == AttributedType::attr_nullable)
+ return NullabilityKind::Nullable;
+ if (getAttrKind() == AttributedType::attr_null_unspecified)
+ return NullabilityKind::Unspecified;
+ return None;
+}
+
+Optional<NullabilityKind> AttributedType::stripOuterNullability(QualType &T) {
+ if (auto attributed = dyn_cast<AttributedType>(T.getTypePtr())) {
+ if (auto nullability = attributed->getImmediateNullability()) {
+ T = attributed->getModifiedType();
+ return nullability;
+ }
+ }
+
+ return None;
+}
+
+bool Type::isBlockCompatibleObjCPointerType(ASTContext &ctx) const {
+ const ObjCObjectPointerType *objcPtr = getAs<ObjCObjectPointerType>();
+ if (!objcPtr)
+ return false;
+
+ if (objcPtr->isObjCIdType()) {
+ // id is always okay.
+ return true;
+ }
+
+ // Blocks are NSObjects.
+ if (ObjCInterfaceDecl *iface = objcPtr->getInterfaceDecl()) {
+ if (iface->getIdentifier() != ctx.getNSObjectName())
+ return false;
+
+ // Continue to check qualifiers, below.
+ } else if (objcPtr->isObjCQualifiedIdType()) {
+ // Continue to check qualifiers, below.
+ } else {
+ return false;
+ }
+
+ // Check protocol qualifiers.
+ for (ObjCProtocolDecl *proto : objcPtr->quals()) {
+ // Blocks conform to NSObject and NSCopying.
+ if (proto->getIdentifier() != ctx.getNSObjectName() &&
+ proto->getIdentifier() != ctx.getNSCopyingName())
+ return false;
+ }
+
+ return true;
+}
+
+Qualifiers::ObjCLifetime Type::getObjCARCImplicitLifetime() const {
+ if (isObjCARCImplicitlyUnretainedType())
+ return Qualifiers::OCL_ExplicitNone;
+ return Qualifiers::OCL_Strong;
+}
+
+bool Type::isObjCARCImplicitlyUnretainedType() const {
+ assert(isObjCLifetimeType() &&
+ "cannot query implicit lifetime for non-inferrable type");
+
+ const Type *canon = getCanonicalTypeInternal().getTypePtr();
+
+ // Walk down to the base type. We don't care about qualifiers for this.
+ while (const ArrayType *array = dyn_cast<ArrayType>(canon))
+ canon = array->getElementType().getTypePtr();
+
+ if (const ObjCObjectPointerType *opt
+ = dyn_cast<ObjCObjectPointerType>(canon)) {
+ // Class and Class<Protocol> don't require retention.
+ if (opt->getObjectType()->isObjCClass())
+ return true;
+ }
+
+ return false;
+}
+
+bool Type::isObjCNSObjectType() const {
+ if (const TypedefType *typedefType = dyn_cast<TypedefType>(this))
+ return typedefType->getDecl()->hasAttr<ObjCNSObjectAttr>();
+ return false;
+}
+bool Type::isObjCIndependentClassType() const {
+ if (const TypedefType *typedefType = dyn_cast<TypedefType>(this))
+ return typedefType->getDecl()->hasAttr<ObjCIndependentClassAttr>();
+ return false;
+}
+bool Type::isObjCRetainableType() const {
+ return isObjCObjectPointerType() ||
+ isBlockPointerType() ||
+ isObjCNSObjectType();
+}
+bool Type::isObjCIndirectLifetimeType() const {
+ if (isObjCLifetimeType())
+ return true;
+ if (const PointerType *OPT = getAs<PointerType>())
+ return OPT->getPointeeType()->isObjCIndirectLifetimeType();
+ if (const ReferenceType *Ref = getAs<ReferenceType>())
+ return Ref->getPointeeType()->isObjCIndirectLifetimeType();
+ if (const MemberPointerType *MemPtr = getAs<MemberPointerType>())
+ return MemPtr->getPointeeType()->isObjCIndirectLifetimeType();
+ return false;
+}
+
+/// Returns true if objects of this type have lifetime semantics under
+/// ARC.
+bool Type::isObjCLifetimeType() const {
+ const Type *type = this;
+ while (const ArrayType *array = type->getAsArrayTypeUnsafe())
+ type = array->getElementType().getTypePtr();
+ return type->isObjCRetainableType();
+}
+
+/// \brief Determine whether the given type T is a "bridgable" Objective-C type,
+/// which is either an Objective-C object pointer type or an
+bool Type::isObjCARCBridgableType() const {
+ return isObjCObjectPointerType() || isBlockPointerType();
+}
+
+/// \brief Determine whether the given type T is a "bridgeable" C type.
+bool Type::isCARCBridgableType() const {
+ const PointerType *Pointer = getAs<PointerType>();
+ if (!Pointer)
+ return false;
+
+ QualType Pointee = Pointer->getPointeeType();
+ return Pointee->isVoidType() || Pointee->isRecordType();
+}
+
+bool Type::hasSizedVLAType() const {
+ if (!isVariablyModifiedType()) return false;
+
+ if (const PointerType *ptr = getAs<PointerType>())
+ return ptr->getPointeeType()->hasSizedVLAType();
+ if (const ReferenceType *ref = getAs<ReferenceType>())
+ return ref->getPointeeType()->hasSizedVLAType();
+ if (const ArrayType *arr = getAsArrayTypeUnsafe()) {
+ if (isa<VariableArrayType>(arr) &&
+ cast<VariableArrayType>(arr)->getSizeExpr())
+ return true;
+
+ return arr->getElementType()->hasSizedVLAType();
+ }
+
+ return false;
+}
+
+QualType::DestructionKind QualType::isDestructedTypeImpl(QualType type) {
+ switch (type.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Strong:
+ return DK_objc_strong_lifetime;
+ case Qualifiers::OCL_Weak:
+ return DK_objc_weak_lifetime;
+ }
+
+ /// Currently, the only destruction kind we recognize is C++ objects
+ /// with non-trivial destructors.
+ const CXXRecordDecl *record =
+ type->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ if (record && record->hasDefinition() && !record->hasTrivialDestructor())
+ return DK_cxx_destructor;
+
+ return DK_none;
+}
+
+CXXRecordDecl *MemberPointerType::getMostRecentCXXRecordDecl() const {
+ return getClass()->getAsCXXRecordDecl()->getMostRecentDecl();
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp b/contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp
new file mode 100644
index 0000000..d08b07b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp
@@ -0,0 +1,508 @@
+//===--- TypeLoc.cpp - Type Source Info Wrapper -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TypeLoc subclasses implementations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/TypeLoc.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/TypeLocVisitor.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+static const unsigned TypeLocMaxDataAlign = llvm::alignOf<void *>();
+
+//===----------------------------------------------------------------------===//
+// TypeLoc Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class TypeLocRanger : public TypeLocVisitor<TypeLocRanger, SourceRange> {
+ public:
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ SourceRange Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc) { \
+ return TyLoc.getLocalSourceRange(); \
+ }
+#include "clang/AST/TypeLocNodes.def"
+ };
+}
+
+SourceRange TypeLoc::getLocalSourceRangeImpl(TypeLoc TL) {
+ if (TL.isNull()) return SourceRange();
+ return TypeLocRanger().Visit(TL);
+}
+
+namespace {
+ class TypeAligner : public TypeLocVisitor<TypeAligner, unsigned> {
+ public:
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ unsigned Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc) { \
+ return TyLoc.getLocalDataAlignment(); \
+ }
+#include "clang/AST/TypeLocNodes.def"
+ };
+}
+
+/// \brief Returns the alignment of the type source info data block.
+unsigned TypeLoc::getLocalAlignmentForType(QualType Ty) {
+ if (Ty.isNull()) return 1;
+ return TypeAligner().Visit(TypeLoc(Ty, nullptr));
+}
+
+namespace {
+ class TypeSizer : public TypeLocVisitor<TypeSizer, unsigned> {
+ public:
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ unsigned Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc) { \
+ return TyLoc.getLocalDataSize(); \
+ }
+#include "clang/AST/TypeLocNodes.def"
+ };
+}
+
+/// \brief Returns the size of the type source info data block.
+unsigned TypeLoc::getFullDataSizeForType(QualType Ty) {
+ unsigned Total = 0;
+ TypeLoc TyLoc(Ty, nullptr);
+ unsigned MaxAlign = 1;
+ while (!TyLoc.isNull()) {
+ unsigned Align = getLocalAlignmentForType(TyLoc.getType());
+ MaxAlign = std::max(Align, MaxAlign);
+ Total = llvm::RoundUpToAlignment(Total, Align);
+ Total += TypeSizer().Visit(TyLoc);
+ TyLoc = TyLoc.getNextTypeLoc();
+ }
+ Total = llvm::RoundUpToAlignment(Total, MaxAlign);
+ return Total;
+}
+
+namespace {
+ class NextLoc : public TypeLocVisitor<NextLoc, TypeLoc> {
+ public:
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ TypeLoc Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc) { \
+ return TyLoc.getNextTypeLoc(); \
+ }
+#include "clang/AST/TypeLocNodes.def"
+ };
+}
+
+/// \brief Get the next TypeLoc pointed by this TypeLoc, e.g for "int*" the
+/// TypeLoc is a PointerLoc and next TypeLoc is for "int".
+TypeLoc TypeLoc::getNextTypeLocImpl(TypeLoc TL) {
+ return NextLoc().Visit(TL);
+}
+
+/// \brief Initializes a type location, and all of its children
+/// recursively, as if the entire tree had been written in the
+/// given location.
+void TypeLoc::initializeImpl(ASTContext &Context, TypeLoc TL,
+ SourceLocation Loc) {
+ while (true) {
+ switch (TL.getTypeLocClass()) {
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ case CLASS: { \
+ CLASS##TypeLoc TLCasted = TL.castAs<CLASS##TypeLoc>(); \
+ TLCasted.initializeLocal(Context, Loc); \
+ TL = TLCasted.getNextTypeLoc(); \
+ if (!TL) return; \
+ continue; \
+ }
+#include "clang/AST/TypeLocNodes.def"
+ }
+ }
+}
+
+namespace {
+ class TypeLocCopier : public TypeLocVisitor<TypeLocCopier> {
+ TypeLoc Source;
+ public:
+ TypeLocCopier(TypeLoc source) : Source(source) { }
+
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ void Visit##CLASS##TypeLoc(CLASS##TypeLoc dest) { \
+ dest.copyLocal(Source.castAs<CLASS##TypeLoc>()); \
+ }
+#include "clang/AST/TypeLocNodes.def"
+ };
+}
+
+
+void TypeLoc::copy(TypeLoc other) {
+ assert(getFullDataSize() == other.getFullDataSize());
+
+ // If both data pointers are aligned to the maximum alignment, we
+ // can memcpy because getFullDataSize() accurately reflects the
+ // layout of the data.
+ if (reinterpret_cast<uintptr_t>(Data)
+ == llvm::RoundUpToAlignment(reinterpret_cast<uintptr_t>(Data),
+ TypeLocMaxDataAlign) &&
+ reinterpret_cast<uintptr_t>(other.Data)
+ == llvm::RoundUpToAlignment(reinterpret_cast<uintptr_t>(other.Data),
+ TypeLocMaxDataAlign)) {
+ memcpy(Data, other.Data, getFullDataSize());
+ return;
+ }
+
+ // Copy each of the pieces.
+ TypeLoc TL(getType(), Data);
+ do {
+ TypeLocCopier(other).Visit(TL);
+ other = other.getNextTypeLoc();
+ } while ((TL = TL.getNextTypeLoc()));
+}
+
+SourceLocation TypeLoc::getBeginLoc() const {
+ TypeLoc Cur = *this;
+ TypeLoc LeftMost = Cur;
+ while (true) {
+ switch (Cur.getTypeLocClass()) {
+ case Elaborated:
+ LeftMost = Cur;
+ break;
+ case FunctionProto:
+ if (Cur.castAs<FunctionProtoTypeLoc>().getTypePtr()
+ ->hasTrailingReturn()) {
+ LeftMost = Cur;
+ break;
+ }
+ /* Fall through */
+ case FunctionNoProto:
+ case ConstantArray:
+ case DependentSizedArray:
+ case IncompleteArray:
+ case VariableArray:
+ // FIXME: Currently QualifiedTypeLoc does not have a source range
+ case Qualified:
+ Cur = Cur.getNextTypeLoc();
+ continue;
+ default:
+ if (Cur.getLocalSourceRange().getBegin().isValid())
+ LeftMost = Cur;
+ Cur = Cur.getNextTypeLoc();
+ if (Cur.isNull())
+ break;
+ continue;
+ } // switch
+ break;
+ } // while
+ return LeftMost.getLocalSourceRange().getBegin();
+}
+
+SourceLocation TypeLoc::getEndLoc() const {
+ TypeLoc Cur = *this;
+ TypeLoc Last;
+ while (true) {
+ switch (Cur.getTypeLocClass()) {
+ default:
+ if (!Last)
+ Last = Cur;
+ return Last.getLocalSourceRange().getEnd();
+ case Paren:
+ case ConstantArray:
+ case DependentSizedArray:
+ case IncompleteArray:
+ case VariableArray:
+ case FunctionNoProto:
+ Last = Cur;
+ break;
+ case FunctionProto:
+ if (Cur.castAs<FunctionProtoTypeLoc>().getTypePtr()->hasTrailingReturn())
+ Last = TypeLoc();
+ else
+ Last = Cur;
+ break;
+ case Pointer:
+ case BlockPointer:
+ case MemberPointer:
+ case LValueReference:
+ case RValueReference:
+ case PackExpansion:
+ if (!Last)
+ Last = Cur;
+ break;
+ case Qualified:
+ case Elaborated:
+ break;
+ }
+ Cur = Cur.getNextTypeLoc();
+ }
+}
+
+
+namespace {
+ struct TSTChecker : public TypeLocVisitor<TSTChecker, bool> {
+ // Overload resolution does the real work for us.
+ static bool isTypeSpec(TypeSpecTypeLoc _) { return true; }
+ static bool isTypeSpec(TypeLoc _) { return false; }
+
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ bool Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc) { \
+ return isTypeSpec(TyLoc); \
+ }
+#include "clang/AST/TypeLocNodes.def"
+ };
+}
+
+
+/// \brief Determines if the given type loc corresponds to a
+/// TypeSpecTypeLoc. Since there is not actually a TypeSpecType in
+/// the type hierarchy, this is made somewhat complicated.
+///
+/// There are a lot of types that currently use TypeSpecTypeLoc
+/// because it's a convenient base class. Ideally we would not accept
+/// those here, but ideally we would have better implementations for
+/// them.
+bool TypeSpecTypeLoc::isKind(const TypeLoc &TL) {
+ if (TL.getType().hasLocalQualifiers()) return false;
+ return TSTChecker().Visit(TL);
+}
+
+// Reimplemented to account for GNU/C++ extension
+// typeof unary-expression
+// where there are no parentheses.
+SourceRange TypeOfExprTypeLoc::getLocalSourceRange() const {
+ if (getRParenLoc().isValid())
+ return SourceRange(getTypeofLoc(), getRParenLoc());
+ else
+ return SourceRange(getTypeofLoc(),
+ getUnderlyingExpr()->getSourceRange().getEnd());
+}
+
+
+TypeSpecifierType BuiltinTypeLoc::getWrittenTypeSpec() const {
+ if (needsExtraLocalData())
+ return static_cast<TypeSpecifierType>(getWrittenBuiltinSpecs().Type);
+ switch (getTypePtr()->getKind()) {
+ case BuiltinType::Void:
+ return TST_void;
+ case BuiltinType::Bool:
+ return TST_bool;
+ case BuiltinType::Char_U:
+ case BuiltinType::Char_S:
+ return TST_char;
+ case BuiltinType::Char16:
+ return TST_char16;
+ case BuiltinType::Char32:
+ return TST_char32;
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ return TST_wchar;
+ case BuiltinType::UChar:
+ case BuiltinType::UShort:
+ case BuiltinType::UInt:
+ case BuiltinType::ULong:
+ case BuiltinType::ULongLong:
+ case BuiltinType::UInt128:
+ case BuiltinType::SChar:
+ case BuiltinType::Short:
+ case BuiltinType::Int:
+ case BuiltinType::Long:
+ case BuiltinType::LongLong:
+ case BuiltinType::Int128:
+ case BuiltinType::Half:
+ case BuiltinType::Float:
+ case BuiltinType::Double:
+ case BuiltinType::LongDouble:
+ llvm_unreachable("Builtin type needs extra local data!");
+ // Fall through, if the impossible happens.
+
+ case BuiltinType::NullPtr:
+ case BuiltinType::Overload:
+ case BuiltinType::Dependent:
+ case BuiltinType::BoundMember:
+ case BuiltinType::UnknownAny:
+ case BuiltinType::ARCUnbridgedCast:
+ case BuiltinType::PseudoObject:
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCSel:
+ case BuiltinType::OCLImage1d:
+ case BuiltinType::OCLImage1dArray:
+ case BuiltinType::OCLImage1dBuffer:
+ case BuiltinType::OCLImage2d:
+ case BuiltinType::OCLImage2dArray:
+ case BuiltinType::OCLImage2dDepth:
+ case BuiltinType::OCLImage2dArrayDepth:
+ case BuiltinType::OCLImage2dMSAA:
+ case BuiltinType::OCLImage2dArrayMSAA:
+ case BuiltinType::OCLImage2dMSAADepth:
+ case BuiltinType::OCLImage2dArrayMSAADepth:
+ case BuiltinType::OCLImage3d:
+ case BuiltinType::OCLSampler:
+ case BuiltinType::OCLEvent:
+ case BuiltinType::OCLClkEvent:
+ case BuiltinType::OCLQueue:
+ case BuiltinType::OCLNDRange:
+ case BuiltinType::OCLReserveID:
+ case BuiltinType::BuiltinFn:
+ case BuiltinType::OMPArraySection:
+ return TST_unspecified;
+ }
+
+ llvm_unreachable("Invalid BuiltinType Kind!");
+}
+
+TypeLoc TypeLoc::IgnoreParensImpl(TypeLoc TL) {
+ while (ParenTypeLoc PTL = TL.getAs<ParenTypeLoc>())
+ TL = PTL.getInnerLoc();
+ return TL;
+}
+
+SourceLocation TypeLoc::findNullabilityLoc() const {
+ if (auto attributedLoc = getAs<AttributedTypeLoc>()) {
+ if (attributedLoc.getAttrKind() == AttributedType::attr_nullable ||
+ attributedLoc.getAttrKind() == AttributedType::attr_nonnull ||
+ attributedLoc.getAttrKind() == AttributedType::attr_null_unspecified)
+ return attributedLoc.getAttrNameLoc();
+ }
+
+ return SourceLocation();
+}
+
+TypeLoc TypeLoc::findExplicitQualifierLoc() const {
+ // Qualified types.
+ if (auto qual = getAs<QualifiedTypeLoc>())
+ return qual;
+
+ TypeLoc loc = IgnoreParens();
+
+ // Attributed types.
+ if (auto attr = loc.getAs<AttributedTypeLoc>()) {
+ if (attr.isQualifier()) return attr;
+ return attr.getModifiedLoc().findExplicitQualifierLoc();
+ }
+
+ // C11 _Atomic types.
+ if (auto atomic = loc.getAs<AtomicTypeLoc>()) {
+ return atomic;
+ }
+
+ return TypeLoc();
+}
+
+void ObjCObjectTypeLoc::initializeLocal(ASTContext &Context,
+ SourceLocation Loc) {
+ setHasBaseTypeAsWritten(true);
+ setTypeArgsLAngleLoc(Loc);
+ setTypeArgsRAngleLoc(Loc);
+ for (unsigned i = 0, e = getNumTypeArgs(); i != e; ++i) {
+ setTypeArgTInfo(i,
+ Context.getTrivialTypeSourceInfo(
+ getTypePtr()->getTypeArgsAsWritten()[i], Loc));
+ }
+ setProtocolLAngleLoc(Loc);
+ setProtocolRAngleLoc(Loc);
+ for (unsigned i = 0, e = getNumProtocols(); i != e; ++i)
+ setProtocolLoc(i, Loc);
+}
+
+void TypeOfTypeLoc::initializeLocal(ASTContext &Context,
+ SourceLocation Loc) {
+ TypeofLikeTypeLoc<TypeOfTypeLoc, TypeOfType, TypeOfTypeLocInfo>
+ ::initializeLocal(Context, Loc);
+ this->getLocalData()->UnderlyingTInfo = Context.getTrivialTypeSourceInfo(
+ getUnderlyingType(), Loc);
+}
+
+void ElaboratedTypeLoc::initializeLocal(ASTContext &Context,
+ SourceLocation Loc) {
+ setElaboratedKeywordLoc(Loc);
+ NestedNameSpecifierLocBuilder Builder;
+ Builder.MakeTrivial(Context, getTypePtr()->getQualifier(), Loc);
+ setQualifierLoc(Builder.getWithLocInContext(Context));
+}
+
+void DependentNameTypeLoc::initializeLocal(ASTContext &Context,
+ SourceLocation Loc) {
+ setElaboratedKeywordLoc(Loc);
+ NestedNameSpecifierLocBuilder Builder;
+ Builder.MakeTrivial(Context, getTypePtr()->getQualifier(), Loc);
+ setQualifierLoc(Builder.getWithLocInContext(Context));
+ setNameLoc(Loc);
+}
+
+void
+DependentTemplateSpecializationTypeLoc::initializeLocal(ASTContext &Context,
+ SourceLocation Loc) {
+ setElaboratedKeywordLoc(Loc);
+ if (getTypePtr()->getQualifier()) {
+ NestedNameSpecifierLocBuilder Builder;
+ Builder.MakeTrivial(Context, getTypePtr()->getQualifier(), Loc);
+ setQualifierLoc(Builder.getWithLocInContext(Context));
+ } else {
+ setQualifierLoc(NestedNameSpecifierLoc());
+ }
+ setTemplateKeywordLoc(Loc);
+ setTemplateNameLoc(Loc);
+ setLAngleLoc(Loc);
+ setRAngleLoc(Loc);
+ TemplateSpecializationTypeLoc::initializeArgLocs(Context, getNumArgs(),
+ getTypePtr()->getArgs(),
+ getArgInfos(), Loc);
+}
+
+void TemplateSpecializationTypeLoc::initializeArgLocs(ASTContext &Context,
+ unsigned NumArgs,
+ const TemplateArgument *Args,
+ TemplateArgumentLocInfo *ArgInfos,
+ SourceLocation Loc) {
+ for (unsigned i = 0, e = NumArgs; i != e; ++i) {
+ switch (Args[i].getKind()) {
+ case TemplateArgument::Null:
+ llvm_unreachable("Impossible TemplateArgument");
+
+ case TemplateArgument::Integral:
+ case TemplateArgument::Declaration:
+ case TemplateArgument::NullPtr:
+ ArgInfos[i] = TemplateArgumentLocInfo();
+ break;
+
+ case TemplateArgument::Expression:
+ ArgInfos[i] = TemplateArgumentLocInfo(Args[i].getAsExpr());
+ break;
+
+ case TemplateArgument::Type:
+ ArgInfos[i] = TemplateArgumentLocInfo(
+ Context.getTrivialTypeSourceInfo(Args[i].getAsType(),
+ Loc));
+ break;
+
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion: {
+ NestedNameSpecifierLocBuilder Builder;
+ TemplateName Template = Args[i].getAsTemplateOrTemplatePattern();
+ if (DependentTemplateName *DTN = Template.getAsDependentTemplateName())
+ Builder.MakeTrivial(Context, DTN->getQualifier(), Loc);
+ else if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
+ Builder.MakeTrivial(Context, QTN->getQualifier(), Loc);
+
+ ArgInfos[i] = TemplateArgumentLocInfo(
+ Builder.getWithLocInContext(Context), Loc,
+ Args[i].getKind() == TemplateArgument::Template ? SourceLocation()
+ : Loc);
+ break;
+ }
+
+ case TemplateArgument::Pack:
+ ArgInfos[i] = TemplateArgumentLocInfo();
+ break;
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp b/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp
new file mode 100644
index 0000000..4617e1d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp
@@ -0,0 +1,1625 @@
+//===--- TypePrinter.cpp - Pretty-Print Clang Types -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to print types from Clang's type system.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/SaveAndRestore.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+namespace {
+ /// \brief RAII object that enables printing of the ARC __strong lifetime
+ /// qualifier.
+ class IncludeStrongLifetimeRAII {
+ PrintingPolicy &Policy;
+ bool Old;
+
+ public:
+ explicit IncludeStrongLifetimeRAII(PrintingPolicy &Policy)
+ : Policy(Policy), Old(Policy.SuppressStrongLifetime) {
+ if (!Policy.SuppressLifetimeQualifiers)
+ Policy.SuppressStrongLifetime = false;
+ }
+
+ ~IncludeStrongLifetimeRAII() {
+ Policy.SuppressStrongLifetime = Old;
+ }
+ };
+
+ class ParamPolicyRAII {
+ PrintingPolicy &Policy;
+ bool Old;
+
+ public:
+ explicit ParamPolicyRAII(PrintingPolicy &Policy)
+ : Policy(Policy), Old(Policy.SuppressSpecifiers) {
+ Policy.SuppressSpecifiers = false;
+ }
+
+ ~ParamPolicyRAII() {
+ Policy.SuppressSpecifiers = Old;
+ }
+ };
+
+ class ElaboratedTypePolicyRAII {
+ PrintingPolicy &Policy;
+ bool SuppressTagKeyword;
+ bool SuppressScope;
+
+ public:
+ explicit ElaboratedTypePolicyRAII(PrintingPolicy &Policy) : Policy(Policy) {
+ SuppressTagKeyword = Policy.SuppressTagKeyword;
+ SuppressScope = Policy.SuppressScope;
+ Policy.SuppressTagKeyword = true;
+ Policy.SuppressScope = true;
+ }
+
+ ~ElaboratedTypePolicyRAII() {
+ Policy.SuppressTagKeyword = SuppressTagKeyword;
+ Policy.SuppressScope = SuppressScope;
+ }
+ };
+
+ class TypePrinter {
+ PrintingPolicy Policy;
+ bool HasEmptyPlaceHolder;
+ bool InsideCCAttribute;
+
+ public:
+ explicit TypePrinter(const PrintingPolicy &Policy)
+ : Policy(Policy), HasEmptyPlaceHolder(false), InsideCCAttribute(false) { }
+
+ void print(const Type *ty, Qualifiers qs, raw_ostream &OS,
+ StringRef PlaceHolder);
+ void print(QualType T, raw_ostream &OS, StringRef PlaceHolder);
+
+ static bool canPrefixQualifiers(const Type *T, bool &NeedARCStrongQualifier);
+ void spaceBeforePlaceHolder(raw_ostream &OS);
+ void printTypeSpec(const NamedDecl *D, raw_ostream &OS);
+
+ void printBefore(const Type *ty, Qualifiers qs, raw_ostream &OS);
+ void printBefore(QualType T, raw_ostream &OS);
+ void printAfter(const Type *ty, Qualifiers qs, raw_ostream &OS);
+ void printAfter(QualType T, raw_ostream &OS);
+ void AppendScope(DeclContext *DC, raw_ostream &OS);
+ void printTag(TagDecl *T, raw_ostream &OS);
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define TYPE(CLASS, PARENT) \
+ void print##CLASS##Before(const CLASS##Type *T, raw_ostream &OS); \
+ void print##CLASS##After(const CLASS##Type *T, raw_ostream &OS);
+#include "clang/AST/TypeNodes.def"
+ };
+}
+
+static void AppendTypeQualList(raw_ostream &OS, unsigned TypeQuals, bool C99) {
+ bool appendSpace = false;
+ if (TypeQuals & Qualifiers::Const) {
+ OS << "const";
+ appendSpace = true;
+ }
+ if (TypeQuals & Qualifiers::Volatile) {
+ if (appendSpace) OS << ' ';
+ OS << "volatile";
+ appendSpace = true;
+ }
+ if (TypeQuals & Qualifiers::Restrict) {
+ if (appendSpace) OS << ' ';
+ if (C99) {
+ OS << "restrict";
+ } else {
+ OS << "__restrict";
+ }
+ }
+}
+
+void TypePrinter::spaceBeforePlaceHolder(raw_ostream &OS) {
+ if (!HasEmptyPlaceHolder)
+ OS << ' ';
+}
+
+void TypePrinter::print(QualType t, raw_ostream &OS, StringRef PlaceHolder) {
+ SplitQualType split = t.split();
+ print(split.Ty, split.Quals, OS, PlaceHolder);
+}
+
+void TypePrinter::print(const Type *T, Qualifiers Quals, raw_ostream &OS,
+ StringRef PlaceHolder) {
+ if (!T) {
+ OS << "NULL TYPE";
+ return;
+ }
+
+ SaveAndRestore<bool> PHVal(HasEmptyPlaceHolder, PlaceHolder.empty());
+
+ printBefore(T, Quals, OS);
+ OS << PlaceHolder;
+ printAfter(T, Quals, OS);
+}
+
+bool TypePrinter::canPrefixQualifiers(const Type *T,
+ bool &NeedARCStrongQualifier) {
+ // CanPrefixQualifiers - We prefer to print type qualifiers before the type,
+ // so that we get "const int" instead of "int const", but we can't do this if
+ // the type is complex. For example if the type is "int*", we *must* print
+ // "int * const", printing "const int *" is different. Only do this when the
+ // type expands to a simple string.
+ bool CanPrefixQualifiers = false;
+ NeedARCStrongQualifier = false;
+ Type::TypeClass TC = T->getTypeClass();
+ if (const AutoType *AT = dyn_cast<AutoType>(T))
+ TC = AT->desugar()->getTypeClass();
+ if (const SubstTemplateTypeParmType *Subst
+ = dyn_cast<SubstTemplateTypeParmType>(T))
+ TC = Subst->getReplacementType()->getTypeClass();
+
+ switch (TC) {
+ case Type::Auto:
+ case Type::Builtin:
+ case Type::Complex:
+ case Type::UnresolvedUsing:
+ case Type::Typedef:
+ case Type::TypeOfExpr:
+ case Type::TypeOf:
+ case Type::Decltype:
+ case Type::UnaryTransform:
+ case Type::Record:
+ case Type::Enum:
+ case Type::Elaborated:
+ case Type::TemplateTypeParm:
+ case Type::SubstTemplateTypeParmPack:
+ case Type::TemplateSpecialization:
+ case Type::InjectedClassName:
+ case Type::DependentName:
+ case Type::DependentTemplateSpecialization:
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::Atomic:
+ CanPrefixQualifiers = true;
+ break;
+
+ case Type::ObjCObjectPointer:
+ CanPrefixQualifiers = T->isObjCIdType() || T->isObjCClassType() ||
+ T->isObjCQualifiedIdType() || T->isObjCQualifiedClassType();
+ break;
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ case Type::DependentSizedArray:
+ NeedARCStrongQualifier = true;
+ // Fall through
+
+ case Type::Adjusted:
+ case Type::Decayed:
+ case Type::Pointer:
+ case Type::BlockPointer:
+ case Type::LValueReference:
+ case Type::RValueReference:
+ case Type::MemberPointer:
+ case Type::DependentSizedExtVector:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ case Type::Paren:
+ case Type::Attributed:
+ case Type::PackExpansion:
+ case Type::SubstTemplateTypeParm:
+ CanPrefixQualifiers = false;
+ break;
+ }
+
+ return CanPrefixQualifiers;
+}
+
+void TypePrinter::printBefore(QualType T, raw_ostream &OS) {
+ SplitQualType Split = T.split();
+
+ // If we have cv1 T, where T is substituted for cv2 U, only print cv1 - cv2
+ // at this level.
+ Qualifiers Quals = Split.Quals;
+ if (const SubstTemplateTypeParmType *Subst =
+ dyn_cast<SubstTemplateTypeParmType>(Split.Ty))
+ Quals -= QualType(Subst, 0).getQualifiers();
+
+ printBefore(Split.Ty, Quals, OS);
+}
+
+/// \brief Prints the part of the type string before an identifier, e.g. for
+/// "int foo[10]" it prints "int ".
+void TypePrinter::printBefore(const Type *T,Qualifiers Quals, raw_ostream &OS) {
+ if (Policy.SuppressSpecifiers && T->isSpecifierType())
+ return;
+
+ SaveAndRestore<bool> PrevPHIsEmpty(HasEmptyPlaceHolder);
+
+ // Print qualifiers as appropriate.
+
+ bool CanPrefixQualifiers = false;
+ bool NeedARCStrongQualifier = false;
+ CanPrefixQualifiers = canPrefixQualifiers(T, NeedARCStrongQualifier);
+
+ if (CanPrefixQualifiers && !Quals.empty()) {
+ if (NeedARCStrongQualifier) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ Quals.print(OS, Policy, /*appendSpaceIfNonEmpty=*/true);
+ } else {
+ Quals.print(OS, Policy, /*appendSpaceIfNonEmpty=*/true);
+ }
+ }
+
+ bool hasAfterQuals = false;
+ if (!CanPrefixQualifiers && !Quals.empty()) {
+ hasAfterQuals = !Quals.isEmptyWhenPrinted(Policy);
+ if (hasAfterQuals)
+ HasEmptyPlaceHolder = false;
+ }
+
+ switch (T->getTypeClass()) {
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define TYPE(CLASS, PARENT) case Type::CLASS: \
+ print##CLASS##Before(cast<CLASS##Type>(T), OS); \
+ break;
+#include "clang/AST/TypeNodes.def"
+ }
+
+ if (hasAfterQuals) {
+ if (NeedARCStrongQualifier) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ Quals.print(OS, Policy, /*appendSpaceIfNonEmpty=*/!PrevPHIsEmpty.get());
+ } else {
+ Quals.print(OS, Policy, /*appendSpaceIfNonEmpty=*/!PrevPHIsEmpty.get());
+ }
+ }
+}
+
+void TypePrinter::printAfter(QualType t, raw_ostream &OS) {
+ SplitQualType split = t.split();
+ printAfter(split.Ty, split.Quals, OS);
+}
+
+/// \brief Prints the part of the type string after an identifier, e.g. for
+/// "int foo[10]" it prints "[10]".
+void TypePrinter::printAfter(const Type *T, Qualifiers Quals, raw_ostream &OS) {
+ switch (T->getTypeClass()) {
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define TYPE(CLASS, PARENT) case Type::CLASS: \
+ print##CLASS##After(cast<CLASS##Type>(T), OS); \
+ break;
+#include "clang/AST/TypeNodes.def"
+ }
+}
+
+void TypePrinter::printBuiltinBefore(const BuiltinType *T, raw_ostream &OS) {
+ OS << T->getName(Policy);
+ spaceBeforePlaceHolder(OS);
+}
+void TypePrinter::printBuiltinAfter(const BuiltinType *T, raw_ostream &OS) { }
+
+void TypePrinter::printComplexBefore(const ComplexType *T, raw_ostream &OS) {
+ OS << "_Complex ";
+ printBefore(T->getElementType(), OS);
+}
+void TypePrinter::printComplexAfter(const ComplexType *T, raw_ostream &OS) {
+ printAfter(T->getElementType(), OS);
+}
+
+void TypePrinter::printPointerBefore(const PointerType *T, raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ printBefore(T->getPointeeType(), OS);
+ // Handle things like 'int (*A)[4];' correctly.
+ // FIXME: this should include vectors, but vectors use attributes I guess.
+ if (isa<ArrayType>(T->getPointeeType()))
+ OS << '(';
+ OS << '*';
+}
+void TypePrinter::printPointerAfter(const PointerType *T, raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ // Handle things like 'int (*A)[4];' correctly.
+ // FIXME: this should include vectors, but vectors use attributes I guess.
+ if (isa<ArrayType>(T->getPointeeType()))
+ OS << ')';
+ printAfter(T->getPointeeType(), OS);
+}
+
+void TypePrinter::printBlockPointerBefore(const BlockPointerType *T,
+ raw_ostream &OS) {
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ printBefore(T->getPointeeType(), OS);
+ OS << '^';
+}
+void TypePrinter::printBlockPointerAfter(const BlockPointerType *T,
+ raw_ostream &OS) {
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ printAfter(T->getPointeeType(), OS);
+}
+
+void TypePrinter::printLValueReferenceBefore(const LValueReferenceType *T,
+ raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ printBefore(T->getPointeeTypeAsWritten(), OS);
+ // Handle things like 'int (&A)[4];' correctly.
+ // FIXME: this should include vectors, but vectors use attributes I guess.
+ if (isa<ArrayType>(T->getPointeeTypeAsWritten()))
+ OS << '(';
+ OS << '&';
+}
+void TypePrinter::printLValueReferenceAfter(const LValueReferenceType *T,
+ raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ // Handle things like 'int (&A)[4];' correctly.
+ // FIXME: this should include vectors, but vectors use attributes I guess.
+ if (isa<ArrayType>(T->getPointeeTypeAsWritten()))
+ OS << ')';
+ printAfter(T->getPointeeTypeAsWritten(), OS);
+}
+
+void TypePrinter::printRValueReferenceBefore(const RValueReferenceType *T,
+ raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ printBefore(T->getPointeeTypeAsWritten(), OS);
+ // Handle things like 'int (&&A)[4];' correctly.
+ // FIXME: this should include vectors, but vectors use attributes I guess.
+ if (isa<ArrayType>(T->getPointeeTypeAsWritten()))
+ OS << '(';
+ OS << "&&";
+}
+void TypePrinter::printRValueReferenceAfter(const RValueReferenceType *T,
+ raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ // Handle things like 'int (&&A)[4];' correctly.
+ // FIXME: this should include vectors, but vectors use attributes I guess.
+ if (isa<ArrayType>(T->getPointeeTypeAsWritten()))
+ OS << ')';
+ printAfter(T->getPointeeTypeAsWritten(), OS);
+}
+
+void TypePrinter::printMemberPointerBefore(const MemberPointerType *T,
+ raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ printBefore(T->getPointeeType(), OS);
+ // Handle things like 'int (Cls::*A)[4];' correctly.
+ // FIXME: this should include vectors, but vectors use attributes I guess.
+ if (isa<ArrayType>(T->getPointeeType()))
+ OS << '(';
+
+ PrintingPolicy InnerPolicy(Policy);
+ InnerPolicy.SuppressTag = false;
+ TypePrinter(InnerPolicy).print(QualType(T->getClass(), 0), OS, StringRef());
+
+ OS << "::*";
+}
+void TypePrinter::printMemberPointerAfter(const MemberPointerType *T,
+ raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ // Handle things like 'int (Cls::*A)[4];' correctly.
+ // FIXME: this should include vectors, but vectors use attributes I guess.
+ if (isa<ArrayType>(T->getPointeeType()))
+ OS << ')';
+ printAfter(T->getPointeeType(), OS);
+}
+
+void TypePrinter::printConstantArrayBefore(const ConstantArrayType *T,
+ raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ printBefore(T->getElementType(), OS);
+}
+void TypePrinter::printConstantArrayAfter(const ConstantArrayType *T,
+ raw_ostream &OS) {
+ OS << '[';
+ if (T->getIndexTypeQualifiers().hasQualifiers()) {
+ AppendTypeQualList(OS, T->getIndexTypeCVRQualifiers(), Policy.LangOpts.C99);
+ OS << ' ';
+ }
+
+ if (T->getSizeModifier() == ArrayType::Static)
+ OS << "static ";
+
+ OS << T->getSize().getZExtValue() << ']';
+ printAfter(T->getElementType(), OS);
+}
+
+void TypePrinter::printIncompleteArrayBefore(const IncompleteArrayType *T,
+ raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ printBefore(T->getElementType(), OS);
+}
+void TypePrinter::printIncompleteArrayAfter(const IncompleteArrayType *T,
+ raw_ostream &OS) {
+ OS << "[]";
+ printAfter(T->getElementType(), OS);
+}
+
+void TypePrinter::printVariableArrayBefore(const VariableArrayType *T,
+ raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ printBefore(T->getElementType(), OS);
+}
+void TypePrinter::printVariableArrayAfter(const VariableArrayType *T,
+ raw_ostream &OS) {
+ OS << '[';
+ if (T->getIndexTypeQualifiers().hasQualifiers()) {
+ AppendTypeQualList(OS, T->getIndexTypeCVRQualifiers(), Policy.LangOpts.C99);
+ OS << ' ';
+ }
+
+ if (T->getSizeModifier() == VariableArrayType::Static)
+ OS << "static ";
+ else if (T->getSizeModifier() == VariableArrayType::Star)
+ OS << '*';
+
+ if (T->getSizeExpr())
+ T->getSizeExpr()->printPretty(OS, nullptr, Policy);
+ OS << ']';
+
+ printAfter(T->getElementType(), OS);
+}
+
+void TypePrinter::printAdjustedBefore(const AdjustedType *T, raw_ostream &OS) {
+ // Print the adjusted representation, otherwise the adjustment will be
+ // invisible.
+ printBefore(T->getAdjustedType(), OS);
+}
+void TypePrinter::printAdjustedAfter(const AdjustedType *T, raw_ostream &OS) {
+ printAfter(T->getAdjustedType(), OS);
+}
+
+void TypePrinter::printDecayedBefore(const DecayedType *T, raw_ostream &OS) {
+ // Print as though it's a pointer.
+ printAdjustedBefore(T, OS);
+}
+void TypePrinter::printDecayedAfter(const DecayedType *T, raw_ostream &OS) {
+ printAdjustedAfter(T, OS);
+}
+
+void TypePrinter::printDependentSizedArrayBefore(
+ const DependentSizedArrayType *T,
+ raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ printBefore(T->getElementType(), OS);
+}
+void TypePrinter::printDependentSizedArrayAfter(
+ const DependentSizedArrayType *T,
+ raw_ostream &OS) {
+ OS << '[';
+ if (T->getSizeExpr())
+ T->getSizeExpr()->printPretty(OS, nullptr, Policy);
+ OS << ']';
+ printAfter(T->getElementType(), OS);
+}
+
+void TypePrinter::printDependentSizedExtVectorBefore(
+ const DependentSizedExtVectorType *T,
+ raw_ostream &OS) {
+ printBefore(T->getElementType(), OS);
+}
+void TypePrinter::printDependentSizedExtVectorAfter(
+ const DependentSizedExtVectorType *T,
+ raw_ostream &OS) {
+ OS << " __attribute__((ext_vector_type(";
+ if (T->getSizeExpr())
+ T->getSizeExpr()->printPretty(OS, nullptr, Policy);
+ OS << ")))";
+ printAfter(T->getElementType(), OS);
+}
+
+void TypePrinter::printVectorBefore(const VectorType *T, raw_ostream &OS) {
+ switch (T->getVectorKind()) {
+ case VectorType::AltiVecPixel:
+ OS << "__vector __pixel ";
+ break;
+ case VectorType::AltiVecBool:
+ OS << "__vector __bool ";
+ printBefore(T->getElementType(), OS);
+ break;
+ case VectorType::AltiVecVector:
+ OS << "__vector ";
+ printBefore(T->getElementType(), OS);
+ break;
+ case VectorType::NeonVector:
+ OS << "__attribute__((neon_vector_type("
+ << T->getNumElements() << "))) ";
+ printBefore(T->getElementType(), OS);
+ break;
+ case VectorType::NeonPolyVector:
+ OS << "__attribute__((neon_polyvector_type(" <<
+ T->getNumElements() << "))) ";
+ printBefore(T->getElementType(), OS);
+ break;
+ case VectorType::GenericVector: {
+ // FIXME: We prefer to print the size directly here, but have no way
+ // to get the size of the type.
+ OS << "__attribute__((__vector_size__("
+ << T->getNumElements()
+ << " * sizeof(";
+ print(T->getElementType(), OS, StringRef());
+ OS << ")))) ";
+ printBefore(T->getElementType(), OS);
+ break;
+ }
+ }
+}
+void TypePrinter::printVectorAfter(const VectorType *T, raw_ostream &OS) {
+ printAfter(T->getElementType(), OS);
+}
+
+void TypePrinter::printExtVectorBefore(const ExtVectorType *T,
+ raw_ostream &OS) {
+ printBefore(T->getElementType(), OS);
+}
+void TypePrinter::printExtVectorAfter(const ExtVectorType *T, raw_ostream &OS) {
+ printAfter(T->getElementType(), OS);
+ OS << " __attribute__((ext_vector_type(";
+ OS << T->getNumElements();
+ OS << ")))";
+}
+
+void
+FunctionProtoType::printExceptionSpecification(raw_ostream &OS,
+ const PrintingPolicy &Policy)
+ const {
+
+ if (hasDynamicExceptionSpec()) {
+ OS << " throw(";
+ if (getExceptionSpecType() == EST_MSAny)
+ OS << "...";
+ else
+ for (unsigned I = 0, N = getNumExceptions(); I != N; ++I) {
+ if (I)
+ OS << ", ";
+
+ OS << getExceptionType(I).stream(Policy);
+ }
+ OS << ')';
+ } else if (isNoexceptExceptionSpec(getExceptionSpecType())) {
+ OS << " noexcept";
+ if (getExceptionSpecType() == EST_ComputedNoexcept) {
+ OS << '(';
+ if (getNoexceptExpr())
+ getNoexceptExpr()->printPretty(OS, nullptr, Policy);
+ OS << ')';
+ }
+ }
+}
+
+void TypePrinter::printFunctionProtoBefore(const FunctionProtoType *T,
+ raw_ostream &OS) {
+ if (T->hasTrailingReturn()) {
+ OS << "auto ";
+ if (!HasEmptyPlaceHolder)
+ OS << '(';
+ } else {
+ // If needed for precedence reasons, wrap the inner part in grouping parens.
+ SaveAndRestore<bool> PrevPHIsEmpty(HasEmptyPlaceHolder, false);
+ printBefore(T->getReturnType(), OS);
+ if (!PrevPHIsEmpty.get())
+ OS << '(';
+ }
+}
+
+void TypePrinter::printFunctionProtoAfter(const FunctionProtoType *T,
+ raw_ostream &OS) {
+ // If needed for precedence reasons, wrap the inner part in grouping parens.
+ if (!HasEmptyPlaceHolder)
+ OS << ')';
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+
+ OS << '(';
+ {
+ ParamPolicyRAII ParamPolicy(Policy);
+ for (unsigned i = 0, e = T->getNumParams(); i != e; ++i) {
+ if (i) OS << ", ";
+ print(T->getParamType(i), OS, StringRef());
+ }
+ }
+
+ if (T->isVariadic()) {
+ if (T->getNumParams())
+ OS << ", ";
+ OS << "...";
+ } else if (T->getNumParams() == 0 && !Policy.LangOpts.CPlusPlus) {
+ // Do not emit int() if we have a proto, emit 'int(void)'.
+ OS << "void";
+ }
+
+ OS << ')';
+
+ FunctionType::ExtInfo Info = T->getExtInfo();
+
+ if (!InsideCCAttribute) {
+ switch (Info.getCC()) {
+ case CC_C:
+ // The C calling convention is the default on the vast majority of platforms
+ // we support. If the user wrote it explicitly, it will usually be printed
+ // while traversing the AttributedType. If the type has been desugared, let
+ // the canonical spelling be the implicit calling convention.
+ // FIXME: It would be better to be explicit in certain contexts, such as a
+ // cdecl function typedef used to declare a member function with the
+ // Microsoft C++ ABI.
+ break;
+ case CC_X86StdCall:
+ OS << " __attribute__((stdcall))";
+ break;
+ case CC_X86FastCall:
+ OS << " __attribute__((fastcall))";
+ break;
+ case CC_X86ThisCall:
+ OS << " __attribute__((thiscall))";
+ break;
+ case CC_X86VectorCall:
+ OS << " __attribute__((vectorcall))";
+ break;
+ case CC_X86Pascal:
+ OS << " __attribute__((pascal))";
+ break;
+ case CC_AAPCS:
+ OS << " __attribute__((pcs(\"aapcs\")))";
+ break;
+ case CC_AAPCS_VFP:
+ OS << " __attribute__((pcs(\"aapcs-vfp\")))";
+ break;
+ case CC_IntelOclBicc:
+ OS << " __attribute__((intel_ocl_bicc))";
+ break;
+ case CC_X86_64Win64:
+ OS << " __attribute__((ms_abi))";
+ break;
+ case CC_X86_64SysV:
+ OS << " __attribute__((sysv_abi))";
+ break;
+ case CC_SpirFunction:
+ case CC_SpirKernel:
+ // Do nothing. These CCs are not available as attributes.
+ break;
+ }
+ }
+
+ if (Info.getNoReturn())
+ OS << " __attribute__((noreturn))";
+ if (Info.getRegParm())
+ OS << " __attribute__((regparm ("
+ << Info.getRegParm() << ")))";
+
+ if (unsigned quals = T->getTypeQuals()) {
+ OS << ' ';
+ AppendTypeQualList(OS, quals, Policy.LangOpts.C99);
+ }
+
+ switch (T->getRefQualifier()) {
+ case RQ_None:
+ break;
+
+ case RQ_LValue:
+ OS << " &";
+ break;
+
+ case RQ_RValue:
+ OS << " &&";
+ break;
+ }
+ T->printExceptionSpecification(OS, Policy);
+
+ if (T->hasTrailingReturn()) {
+ OS << " -> ";
+ print(T->getReturnType(), OS, StringRef());
+ } else
+ printAfter(T->getReturnType(), OS);
+}
+
+void TypePrinter::printFunctionNoProtoBefore(const FunctionNoProtoType *T,
+ raw_ostream &OS) {
+ // If needed for precedence reasons, wrap the inner part in grouping parens.
+ SaveAndRestore<bool> PrevPHIsEmpty(HasEmptyPlaceHolder, false);
+ printBefore(T->getReturnType(), OS);
+ if (!PrevPHIsEmpty.get())
+ OS << '(';
+}
+void TypePrinter::printFunctionNoProtoAfter(const FunctionNoProtoType *T,
+ raw_ostream &OS) {
+ // If needed for precedence reasons, wrap the inner part in grouping parens.
+ if (!HasEmptyPlaceHolder)
+ OS << ')';
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+
+ OS << "()";
+ if (T->getNoReturnAttr())
+ OS << " __attribute__((noreturn))";
+ printAfter(T->getReturnType(), OS);
+}
+
+void TypePrinter::printTypeSpec(const NamedDecl *D, raw_ostream &OS) {
+ IdentifierInfo *II = D->getIdentifier();
+ OS << II->getName();
+ spaceBeforePlaceHolder(OS);
+}
+
+void TypePrinter::printUnresolvedUsingBefore(const UnresolvedUsingType *T,
+ raw_ostream &OS) {
+ printTypeSpec(T->getDecl(), OS);
+}
+void TypePrinter::printUnresolvedUsingAfter(const UnresolvedUsingType *T,
+ raw_ostream &OS) { }
+
+void TypePrinter::printTypedefBefore(const TypedefType *T, raw_ostream &OS) {
+ printTypeSpec(T->getDecl(), OS);
+}
+void TypePrinter::printTypedefAfter(const TypedefType *T, raw_ostream &OS) { }
+
+void TypePrinter::printTypeOfExprBefore(const TypeOfExprType *T,
+ raw_ostream &OS) {
+ OS << "typeof ";
+ if (T->getUnderlyingExpr())
+ T->getUnderlyingExpr()->printPretty(OS, nullptr, Policy);
+ spaceBeforePlaceHolder(OS);
+}
+void TypePrinter::printTypeOfExprAfter(const TypeOfExprType *T,
+ raw_ostream &OS) { }
+
+void TypePrinter::printTypeOfBefore(const TypeOfType *T, raw_ostream &OS) {
+ OS << "typeof(";
+ print(T->getUnderlyingType(), OS, StringRef());
+ OS << ')';
+ spaceBeforePlaceHolder(OS);
+}
+void TypePrinter::printTypeOfAfter(const TypeOfType *T, raw_ostream &OS) { }
+
+void TypePrinter::printDecltypeBefore(const DecltypeType *T, raw_ostream &OS) {
+ OS << "decltype(";
+ if (T->getUnderlyingExpr())
+ T->getUnderlyingExpr()->printPretty(OS, nullptr, Policy);
+ OS << ')';
+ spaceBeforePlaceHolder(OS);
+}
+void TypePrinter::printDecltypeAfter(const DecltypeType *T, raw_ostream &OS) { }
+
+void TypePrinter::printUnaryTransformBefore(const UnaryTransformType *T,
+ raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+
+ switch (T->getUTTKind()) {
+ case UnaryTransformType::EnumUnderlyingType:
+ OS << "__underlying_type(";
+ print(T->getBaseType(), OS, StringRef());
+ OS << ')';
+ spaceBeforePlaceHolder(OS);
+ return;
+ }
+
+ printBefore(T->getBaseType(), OS);
+}
+void TypePrinter::printUnaryTransformAfter(const UnaryTransformType *T,
+ raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+
+ switch (T->getUTTKind()) {
+ case UnaryTransformType::EnumUnderlyingType:
+ return;
+ }
+
+ printAfter(T->getBaseType(), OS);
+}
+
+void TypePrinter::printAutoBefore(const AutoType *T, raw_ostream &OS) {
+ // If the type has been deduced, do not print 'auto'.
+ if (!T->getDeducedType().isNull()) {
+ printBefore(T->getDeducedType(), OS);
+ } else {
+ switch (T->getKeyword()) {
+ case AutoTypeKeyword::Auto: OS << "auto"; break;
+ case AutoTypeKeyword::DecltypeAuto: OS << "decltype(auto)"; break;
+ case AutoTypeKeyword::GNUAutoType: OS << "__auto_type"; break;
+ }
+ spaceBeforePlaceHolder(OS);
+ }
+}
+void TypePrinter::printAutoAfter(const AutoType *T, raw_ostream &OS) {
+ // If the type has been deduced, do not print 'auto'.
+ if (!T->getDeducedType().isNull())
+ printAfter(T->getDeducedType(), OS);
+}
+
+void TypePrinter::printAtomicBefore(const AtomicType *T, raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+
+ OS << "_Atomic(";
+ print(T->getValueType(), OS, StringRef());
+ OS << ')';
+ spaceBeforePlaceHolder(OS);
+}
+void TypePrinter::printAtomicAfter(const AtomicType *T, raw_ostream &OS) { }
+
+/// Appends the given scope to the end of a string.
+void TypePrinter::AppendScope(DeclContext *DC, raw_ostream &OS) {
+ if (DC->isTranslationUnit()) return;
+ if (DC->isFunctionOrMethod()) return;
+ AppendScope(DC->getParent(), OS);
+
+ if (NamespaceDecl *NS = dyn_cast<NamespaceDecl>(DC)) {
+ if (Policy.SuppressUnwrittenScope &&
+ (NS->isAnonymousNamespace() || NS->isInline()))
+ return;
+ if (NS->getIdentifier())
+ OS << NS->getName() << "::";
+ else
+ OS << "(anonymous namespace)::";
+ } else if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(DC)) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ OS << Spec->getIdentifier()->getName();
+ const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
+ TemplateSpecializationType::PrintTemplateArgumentList(OS,
+ TemplateArgs.data(),
+ TemplateArgs.size(),
+ Policy);
+ OS << "::";
+ } else if (TagDecl *Tag = dyn_cast<TagDecl>(DC)) {
+ if (TypedefNameDecl *Typedef = Tag->getTypedefNameForAnonDecl())
+ OS << Typedef->getIdentifier()->getName() << "::";
+ else if (Tag->getIdentifier())
+ OS << Tag->getIdentifier()->getName() << "::";
+ else
+ return;
+ }
+}
+
+void TypePrinter::printTag(TagDecl *D, raw_ostream &OS) {
+ if (Policy.SuppressTag)
+ return;
+
+ bool HasKindDecoration = false;
+
+ // bool SuppressTagKeyword
+ // = Policy.LangOpts.CPlusPlus || Policy.SuppressTagKeyword;
+
+ // We don't print tags unless this is an elaborated type.
+ // In C, we just assume every RecordType is an elaborated type.
+ if (!(Policy.LangOpts.CPlusPlus || Policy.SuppressTagKeyword ||
+ D->getTypedefNameForAnonDecl())) {
+ HasKindDecoration = true;
+ OS << D->getKindName();
+ OS << ' ';
+ }
+
+ // Compute the full nested-name-specifier for this type.
+ // In C, this will always be empty except when the type
+ // being printed is anonymous within other Record.
+ if (!Policy.SuppressScope)
+ AppendScope(D->getDeclContext(), OS);
+
+ if (const IdentifierInfo *II = D->getIdentifier())
+ OS << II->getName();
+ else if (TypedefNameDecl *Typedef = D->getTypedefNameForAnonDecl()) {
+ assert(Typedef->getIdentifier() && "Typedef without identifier?");
+ OS << Typedef->getIdentifier()->getName();
+ } else {
+ // Make an unambiguous representation for anonymous types, e.g.
+ // (anonymous enum at /usr/include/string.h:120:9)
+ OS << (Policy.MSVCFormatting ? '`' : '(');
+
+ if (isa<CXXRecordDecl>(D) && cast<CXXRecordDecl>(D)->isLambda()) {
+ OS << "lambda";
+ HasKindDecoration = true;
+ } else {
+ OS << "anonymous";
+ }
+
+ if (Policy.AnonymousTagLocations) {
+ // Suppress the redundant tag keyword if we just printed one.
+ // We don't have to worry about ElaboratedTypes here because you can't
+ // refer to an anonymous type with one.
+ if (!HasKindDecoration)
+ OS << " " << D->getKindName();
+
+ PresumedLoc PLoc = D->getASTContext().getSourceManager().getPresumedLoc(
+ D->getLocation());
+ if (PLoc.isValid()) {
+ OS << " at " << PLoc.getFilename()
+ << ':' << PLoc.getLine()
+ << ':' << PLoc.getColumn();
+ }
+ }
+
+ OS << (Policy.MSVCFormatting ? '\'' : ')');
+ }
+
+ // If this is a class template specialization, print the template
+ // arguments.
+ if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
+ const TemplateArgument *Args;
+ unsigned NumArgs;
+ if (TypeSourceInfo *TAW = Spec->getTypeAsWritten()) {
+ const TemplateSpecializationType *TST =
+ cast<TemplateSpecializationType>(TAW->getType());
+ Args = TST->getArgs();
+ NumArgs = TST->getNumArgs();
+ } else {
+ const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
+ Args = TemplateArgs.data();
+ NumArgs = TemplateArgs.size();
+ }
+ IncludeStrongLifetimeRAII Strong(Policy);
+ TemplateSpecializationType::PrintTemplateArgumentList(OS,
+ Args, NumArgs,
+ Policy);
+ }
+
+ spaceBeforePlaceHolder(OS);
+}
+
+void TypePrinter::printRecordBefore(const RecordType *T, raw_ostream &OS) {
+ printTag(T->getDecl(), OS);
+}
+void TypePrinter::printRecordAfter(const RecordType *T, raw_ostream &OS) { }
+
+void TypePrinter::printEnumBefore(const EnumType *T, raw_ostream &OS) {
+ printTag(T->getDecl(), OS);
+}
+void TypePrinter::printEnumAfter(const EnumType *T, raw_ostream &OS) { }
+
+void TypePrinter::printTemplateTypeParmBefore(const TemplateTypeParmType *T,
+ raw_ostream &OS) {
+ if (IdentifierInfo *Id = T->getIdentifier())
+ OS << Id->getName();
+ else
+ OS << "type-parameter-" << T->getDepth() << '-' << T->getIndex();
+ spaceBeforePlaceHolder(OS);
+}
+void TypePrinter::printTemplateTypeParmAfter(const TemplateTypeParmType *T,
+ raw_ostream &OS) { }
+
+void TypePrinter::printSubstTemplateTypeParmBefore(
+ const SubstTemplateTypeParmType *T,
+ raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ printBefore(T->getReplacementType(), OS);
+}
+void TypePrinter::printSubstTemplateTypeParmAfter(
+ const SubstTemplateTypeParmType *T,
+ raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ printAfter(T->getReplacementType(), OS);
+}
+
+void TypePrinter::printSubstTemplateTypeParmPackBefore(
+ const SubstTemplateTypeParmPackType *T,
+ raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ printTemplateTypeParmBefore(T->getReplacedParameter(), OS);
+}
+void TypePrinter::printSubstTemplateTypeParmPackAfter(
+ const SubstTemplateTypeParmPackType *T,
+ raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ printTemplateTypeParmAfter(T->getReplacedParameter(), OS);
+}
+
+void TypePrinter::printTemplateSpecializationBefore(
+ const TemplateSpecializationType *T,
+ raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ T->getTemplateName().print(OS, Policy);
+
+ TemplateSpecializationType::PrintTemplateArgumentList(OS,
+ T->getArgs(),
+ T->getNumArgs(),
+ Policy);
+ spaceBeforePlaceHolder(OS);
+}
+void TypePrinter::printTemplateSpecializationAfter(
+ const TemplateSpecializationType *T,
+ raw_ostream &OS) { }
+
+void TypePrinter::printInjectedClassNameBefore(const InjectedClassNameType *T,
+ raw_ostream &OS) {
+ printTemplateSpecializationBefore(T->getInjectedTST(), OS);
+}
+void TypePrinter::printInjectedClassNameAfter(const InjectedClassNameType *T,
+ raw_ostream &OS) { }
+
+void TypePrinter::printElaboratedBefore(const ElaboratedType *T,
+ raw_ostream &OS) {
+ if (Policy.SuppressTag && isa<TagType>(T->getNamedType()))
+ return;
+ OS << TypeWithKeyword::getKeywordName(T->getKeyword());
+ if (T->getKeyword() != ETK_None)
+ OS << " ";
+ NestedNameSpecifier* Qualifier = T->getQualifier();
+ if (Qualifier)
+ Qualifier->print(OS, Policy);
+
+ ElaboratedTypePolicyRAII PolicyRAII(Policy);
+ printBefore(T->getNamedType(), OS);
+}
+void TypePrinter::printElaboratedAfter(const ElaboratedType *T,
+ raw_ostream &OS) {
+ ElaboratedTypePolicyRAII PolicyRAII(Policy);
+ printAfter(T->getNamedType(), OS);
+}
+
+void TypePrinter::printParenBefore(const ParenType *T, raw_ostream &OS) {
+ if (!HasEmptyPlaceHolder && !isa<FunctionType>(T->getInnerType())) {
+ printBefore(T->getInnerType(), OS);
+ OS << '(';
+ } else
+ printBefore(T->getInnerType(), OS);
+}
+void TypePrinter::printParenAfter(const ParenType *T, raw_ostream &OS) {
+ if (!HasEmptyPlaceHolder && !isa<FunctionType>(T->getInnerType())) {
+ OS << ')';
+ printAfter(T->getInnerType(), OS);
+ } else
+ printAfter(T->getInnerType(), OS);
+}
+
+void TypePrinter::printDependentNameBefore(const DependentNameType *T,
+ raw_ostream &OS) {
+ OS << TypeWithKeyword::getKeywordName(T->getKeyword());
+ if (T->getKeyword() != ETK_None)
+ OS << " ";
+
+ T->getQualifier()->print(OS, Policy);
+
+ OS << T->getIdentifier()->getName();
+ spaceBeforePlaceHolder(OS);
+}
+void TypePrinter::printDependentNameAfter(const DependentNameType *T,
+ raw_ostream &OS) { }
+
+void TypePrinter::printDependentTemplateSpecializationBefore(
+ const DependentTemplateSpecializationType *T, raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+
+ OS << TypeWithKeyword::getKeywordName(T->getKeyword());
+ if (T->getKeyword() != ETK_None)
+ OS << " ";
+
+ if (T->getQualifier())
+ T->getQualifier()->print(OS, Policy);
+ OS << T->getIdentifier()->getName();
+ TemplateSpecializationType::PrintTemplateArgumentList(OS,
+ T->getArgs(),
+ T->getNumArgs(),
+ Policy);
+ spaceBeforePlaceHolder(OS);
+}
+void TypePrinter::printDependentTemplateSpecializationAfter(
+ const DependentTemplateSpecializationType *T, raw_ostream &OS) { }
+
+void TypePrinter::printPackExpansionBefore(const PackExpansionType *T,
+ raw_ostream &OS) {
+ printBefore(T->getPattern(), OS);
+}
+void TypePrinter::printPackExpansionAfter(const PackExpansionType *T,
+ raw_ostream &OS) {
+ printAfter(T->getPattern(), OS);
+ OS << "...";
+}
+
+void TypePrinter::printAttributedBefore(const AttributedType *T,
+ raw_ostream &OS) {
+ // Prefer the macro forms of the GC and ownership qualifiers.
+ if (T->getAttrKind() == AttributedType::attr_objc_gc ||
+ T->getAttrKind() == AttributedType::attr_objc_ownership)
+ return printBefore(T->getEquivalentType(), OS);
+
+ if (T->getAttrKind() == AttributedType::attr_objc_kindof)
+ OS << "__kindof ";
+
+ printBefore(T->getModifiedType(), OS);
+
+ if (T->isMSTypeSpec()) {
+ switch (T->getAttrKind()) {
+ default: return;
+ case AttributedType::attr_ptr32: OS << " __ptr32"; break;
+ case AttributedType::attr_ptr64: OS << " __ptr64"; break;
+ case AttributedType::attr_sptr: OS << " __sptr"; break;
+ case AttributedType::attr_uptr: OS << " __uptr"; break;
+ }
+ spaceBeforePlaceHolder(OS);
+ }
+
+ // Print nullability type specifiers.
+ if (T->getAttrKind() == AttributedType::attr_nonnull ||
+ T->getAttrKind() == AttributedType::attr_nullable ||
+ T->getAttrKind() == AttributedType::attr_null_unspecified) {
+ if (T->getAttrKind() == AttributedType::attr_nonnull)
+ OS << " _Nonnull";
+ else if (T->getAttrKind() == AttributedType::attr_nullable)
+ OS << " _Nullable";
+ else if (T->getAttrKind() == AttributedType::attr_null_unspecified)
+ OS << " _Null_unspecified";
+ else
+ llvm_unreachable("unhandled nullability");
+ spaceBeforePlaceHolder(OS);
+ }
+}
+
+void TypePrinter::printAttributedAfter(const AttributedType *T,
+ raw_ostream &OS) {
+ // Prefer the macro forms of the GC and ownership qualifiers.
+ if (T->getAttrKind() == AttributedType::attr_objc_gc ||
+ T->getAttrKind() == AttributedType::attr_objc_ownership)
+ return printAfter(T->getEquivalentType(), OS);
+
+ if (T->getAttrKind() == AttributedType::attr_objc_kindof)
+ return;
+
+ // TODO: not all attributes are GCC-style attributes.
+ if (T->isMSTypeSpec())
+ return;
+
+ // Nothing to print after.
+ if (T->getAttrKind() == AttributedType::attr_nonnull ||
+ T->getAttrKind() == AttributedType::attr_nullable ||
+ T->getAttrKind() == AttributedType::attr_null_unspecified)
+ return printAfter(T->getModifiedType(), OS);
+
+ // If this is a calling convention attribute, don't print the implicit CC from
+ // the modified type.
+ SaveAndRestore<bool> MaybeSuppressCC(InsideCCAttribute, T->isCallingConv());
+
+ printAfter(T->getModifiedType(), OS);
+
+ // Don't print the inert __unsafe_unretained attribute at all.
+ if (T->getAttrKind() == AttributedType::attr_objc_inert_unsafe_unretained)
+ return;
+
+ // Print nullability type specifiers that occur after
+ if (T->getAttrKind() == AttributedType::attr_nonnull ||
+ T->getAttrKind() == AttributedType::attr_nullable ||
+ T->getAttrKind() == AttributedType::attr_null_unspecified) {
+ if (T->getAttrKind() == AttributedType::attr_nonnull)
+ OS << " _Nonnull";
+ else if (T->getAttrKind() == AttributedType::attr_nullable)
+ OS << " _Nullable";
+ else if (T->getAttrKind() == AttributedType::attr_null_unspecified)
+ OS << " _Null_unspecified";
+ else
+ llvm_unreachable("unhandled nullability");
+
+ return;
+ }
+
+ OS << " __attribute__((";
+ switch (T->getAttrKind()) {
+ default: llvm_unreachable("This attribute should have been handled already");
+ case AttributedType::attr_address_space:
+ OS << "address_space(";
+ OS << T->getEquivalentType().getAddressSpace();
+ OS << ')';
+ break;
+
+ case AttributedType::attr_vector_size: {
+ OS << "__vector_size__(";
+ if (const VectorType *vector =T->getEquivalentType()->getAs<VectorType>()) {
+ OS << vector->getNumElements();
+ OS << " * sizeof(";
+ print(vector->getElementType(), OS, StringRef());
+ OS << ')';
+ }
+ OS << ')';
+ break;
+ }
+
+ case AttributedType::attr_neon_vector_type:
+ case AttributedType::attr_neon_polyvector_type: {
+ if (T->getAttrKind() == AttributedType::attr_neon_vector_type)
+ OS << "neon_vector_type(";
+ else
+ OS << "neon_polyvector_type(";
+ const VectorType *vector = T->getEquivalentType()->getAs<VectorType>();
+ OS << vector->getNumElements();
+ OS << ')';
+ break;
+ }
+
+ case AttributedType::attr_regparm: {
+ // FIXME: When Sema learns to form this AttributedType, avoid printing the
+ // attribute again in printFunctionProtoAfter.
+ OS << "regparm(";
+ QualType t = T->getEquivalentType();
+ while (!t->isFunctionType())
+ t = t->getPointeeType();
+ OS << t->getAs<FunctionType>()->getRegParmType();
+ OS << ')';
+ break;
+ }
+
+ case AttributedType::attr_objc_gc: {
+ OS << "objc_gc(";
+
+ QualType tmp = T->getEquivalentType();
+ while (tmp.getObjCGCAttr() == Qualifiers::GCNone) {
+ QualType next = tmp->getPointeeType();
+ if (next == tmp) break;
+ tmp = next;
+ }
+
+ if (tmp.isObjCGCWeak())
+ OS << "weak";
+ else
+ OS << "strong";
+ OS << ')';
+ break;
+ }
+
+ case AttributedType::attr_objc_ownership:
+ OS << "objc_ownership(";
+ switch (T->getEquivalentType().getObjCLifetime()) {
+ case Qualifiers::OCL_None: llvm_unreachable("no ownership!");
+ case Qualifiers::OCL_ExplicitNone: OS << "none"; break;
+ case Qualifiers::OCL_Strong: OS << "strong"; break;
+ case Qualifiers::OCL_Weak: OS << "weak"; break;
+ case Qualifiers::OCL_Autoreleasing: OS << "autoreleasing"; break;
+ }
+ OS << ')';
+ break;
+
+ // FIXME: When Sema learns to form this AttributedType, avoid printing the
+ // attribute again in printFunctionProtoAfter.
+ case AttributedType::attr_noreturn: OS << "noreturn"; break;
+
+ case AttributedType::attr_cdecl: OS << "cdecl"; break;
+ case AttributedType::attr_fastcall: OS << "fastcall"; break;
+ case AttributedType::attr_stdcall: OS << "stdcall"; break;
+ case AttributedType::attr_thiscall: OS << "thiscall"; break;
+ case AttributedType::attr_vectorcall: OS << "vectorcall"; break;
+ case AttributedType::attr_pascal: OS << "pascal"; break;
+ case AttributedType::attr_ms_abi: OS << "ms_abi"; break;
+ case AttributedType::attr_sysv_abi: OS << "sysv_abi"; break;
+ case AttributedType::attr_pcs:
+ case AttributedType::attr_pcs_vfp: {
+ OS << "pcs(";
+ QualType t = T->getEquivalentType();
+ while (!t->isFunctionType())
+ t = t->getPointeeType();
+ OS << (t->getAs<FunctionType>()->getCallConv() == CC_AAPCS ?
+ "\"aapcs\"" : "\"aapcs-vfp\"");
+ OS << ')';
+ break;
+ }
+ case AttributedType::attr_inteloclbicc: OS << "inteloclbicc"; break;
+ }
+ OS << "))";
+}
+
+void TypePrinter::printObjCInterfaceBefore(const ObjCInterfaceType *T,
+ raw_ostream &OS) {
+ OS << T->getDecl()->getName();
+ spaceBeforePlaceHolder(OS);
+}
+void TypePrinter::printObjCInterfaceAfter(const ObjCInterfaceType *T,
+ raw_ostream &OS) { }
+
+void TypePrinter::printObjCObjectBefore(const ObjCObjectType *T,
+ raw_ostream &OS) {
+ if (T->qual_empty() && T->isUnspecializedAsWritten() &&
+ !T->isKindOfTypeAsWritten())
+ return printBefore(T->getBaseType(), OS);
+
+ if (T->isKindOfTypeAsWritten())
+ OS << "__kindof ";
+
+ print(T->getBaseType(), OS, StringRef());
+
+ if (T->isSpecializedAsWritten()) {
+ bool isFirst = true;
+ OS << '<';
+ for (auto typeArg : T->getTypeArgsAsWritten()) {
+ if (isFirst)
+ isFirst = false;
+ else
+ OS << ",";
+
+ print(typeArg, OS, StringRef());
+ }
+ OS << '>';
+ }
+
+ if (!T->qual_empty()) {
+ bool isFirst = true;
+ OS << '<';
+ for (const auto *I : T->quals()) {
+ if (isFirst)
+ isFirst = false;
+ else
+ OS << ',';
+ OS << I->getName();
+ }
+ OS << '>';
+ }
+
+ spaceBeforePlaceHolder(OS);
+}
+void TypePrinter::printObjCObjectAfter(const ObjCObjectType *T,
+ raw_ostream &OS) {
+ if (T->qual_empty() && T->isUnspecializedAsWritten() &&
+ !T->isKindOfTypeAsWritten())
+ return printAfter(T->getBaseType(), OS);
+}
+
+void TypePrinter::printObjCObjectPointerBefore(const ObjCObjectPointerType *T,
+ raw_ostream &OS) {
+ printBefore(T->getPointeeType(), OS);
+
+ // If we need to print the pointer, print it now.
+ if (!T->isObjCIdType() && !T->isObjCQualifiedIdType() &&
+ !T->isObjCClassType() && !T->isObjCQualifiedClassType()) {
+ if (HasEmptyPlaceHolder)
+ OS << ' ';
+ OS << '*';
+ }
+}
+void TypePrinter::printObjCObjectPointerAfter(const ObjCObjectPointerType *T,
+ raw_ostream &OS) { }
+
+void TemplateSpecializationType::
+ PrintTemplateArgumentList(raw_ostream &OS,
+ const TemplateArgumentListInfo &Args,
+ const PrintingPolicy &Policy) {
+ return PrintTemplateArgumentList(OS,
+ Args.getArgumentArray(),
+ Args.size(),
+ Policy);
+}
+
+void
+TemplateSpecializationType::PrintTemplateArgumentList(
+ raw_ostream &OS,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ const PrintingPolicy &Policy,
+ bool SkipBrackets) {
+ const char *Comma = Policy.MSVCFormatting ? "," : ", ";
+ if (!SkipBrackets)
+ OS << '<';
+
+ bool needSpace = false;
+ for (unsigned Arg = 0; Arg < NumArgs; ++Arg) {
+ // Print the argument into a string.
+ SmallString<128> Buf;
+ llvm::raw_svector_ostream ArgOS(Buf);
+ if (Args[Arg].getKind() == TemplateArgument::Pack) {
+ if (Args[Arg].pack_size() && Arg > 0)
+ OS << Comma;
+ PrintTemplateArgumentList(ArgOS,
+ Args[Arg].pack_begin(),
+ Args[Arg].pack_size(),
+ Policy, true);
+ } else {
+ if (Arg > 0)
+ OS << Comma;
+ Args[Arg].print(Policy, ArgOS);
+ }
+ StringRef ArgString = ArgOS.str();
+
+ // If this is the first argument and its string representation
+ // begins with the global scope specifier ('::foo'), add a space
+ // to avoid printing the diagraph '<:'.
+ if (!Arg && !ArgString.empty() && ArgString[0] == ':')
+ OS << ' ';
+
+ OS << ArgString;
+
+ needSpace = (!ArgString.empty() && ArgString.back() == '>');
+ }
+
+ // If the last character of our string is '>', add another space to
+ // keep the two '>''s separate tokens. We don't *have* to do this in
+ // C++0x, but it's still good hygiene.
+ if (needSpace)
+ OS << ' ';
+
+ if (!SkipBrackets)
+ OS << '>';
+}
+
+// Sadly, repeat all that with TemplateArgLoc.
+void TemplateSpecializationType::
+PrintTemplateArgumentList(raw_ostream &OS,
+ const TemplateArgumentLoc *Args, unsigned NumArgs,
+ const PrintingPolicy &Policy) {
+ OS << '<';
+ const char *Comma = Policy.MSVCFormatting ? "," : ", ";
+
+ bool needSpace = false;
+ for (unsigned Arg = 0; Arg < NumArgs; ++Arg) {
+ if (Arg > 0)
+ OS << Comma;
+
+ // Print the argument into a string.
+ SmallString<128> Buf;
+ llvm::raw_svector_ostream ArgOS(Buf);
+ if (Args[Arg].getArgument().getKind() == TemplateArgument::Pack) {
+ PrintTemplateArgumentList(ArgOS,
+ Args[Arg].getArgument().pack_begin(),
+ Args[Arg].getArgument().pack_size(),
+ Policy, true);
+ } else {
+ Args[Arg].getArgument().print(Policy, ArgOS);
+ }
+ StringRef ArgString = ArgOS.str();
+
+ // If this is the first argument and its string representation
+ // begins with the global scope specifier ('::foo'), add a space
+ // to avoid printing the diagraph '<:'.
+ if (!Arg && !ArgString.empty() && ArgString[0] == ':')
+ OS << ' ';
+
+ OS << ArgString;
+
+ needSpace = (!ArgString.empty() && ArgString.back() == '>');
+ }
+
+ // If the last character of our string is '>', add another space to
+ // keep the two '>''s separate tokens. We don't *have* to do this in
+ // C++0x, but it's still good hygiene.
+ if (needSpace)
+ OS << ' ';
+
+ OS << '>';
+}
+
+std::string Qualifiers::getAsString() const {
+ LangOptions LO;
+ return getAsString(PrintingPolicy(LO));
+}
+
+// Appends qualifiers to the given string, separated by spaces. Will
+// prefix a space if the string is non-empty. Will not append a final
+// space.
+std::string Qualifiers::getAsString(const PrintingPolicy &Policy) const {
+ SmallString<64> Buf;
+ llvm::raw_svector_ostream StrOS(Buf);
+ print(StrOS, Policy);
+ return StrOS.str();
+}
+
+bool Qualifiers::isEmptyWhenPrinted(const PrintingPolicy &Policy) const {
+ if (getCVRQualifiers())
+ return false;
+
+ if (getAddressSpace())
+ return false;
+
+ if (getObjCGCAttr())
+ return false;
+
+ if (Qualifiers::ObjCLifetime lifetime = getObjCLifetime())
+ if (!(lifetime == Qualifiers::OCL_Strong && Policy.SuppressStrongLifetime))
+ return false;
+
+ return true;
+}
+
+// Appends qualifiers to the given string, separated by spaces. Will
+// prefix a space if the string is non-empty. Will not append a final
+// space.
+void Qualifiers::print(raw_ostream &OS, const PrintingPolicy& Policy,
+ bool appendSpaceIfNonEmpty) const {
+ bool addSpace = false;
+
+ unsigned quals = getCVRQualifiers();
+ if (quals) {
+ AppendTypeQualList(OS, quals, Policy.LangOpts.C99);
+ addSpace = true;
+ }
+ if (unsigned addrspace = getAddressSpace()) {
+ if (addSpace)
+ OS << ' ';
+ addSpace = true;
+ switch (addrspace) {
+ case LangAS::opencl_global:
+ OS << "__global";
+ break;
+ case LangAS::opencl_local:
+ OS << "__local";
+ break;
+ case LangAS::opencl_constant:
+ OS << "__constant";
+ break;
+ case LangAS::opencl_generic:
+ OS << "__generic";
+ break;
+ default:
+ OS << "__attribute__((address_space(";
+ OS << addrspace;
+ OS << ")))";
+ }
+ }
+ if (Qualifiers::GC gc = getObjCGCAttr()) {
+ if (addSpace)
+ OS << ' ';
+ addSpace = true;
+ if (gc == Qualifiers::Weak)
+ OS << "__weak";
+ else
+ OS << "__strong";
+ }
+ if (Qualifiers::ObjCLifetime lifetime = getObjCLifetime()) {
+ if (!(lifetime == Qualifiers::OCL_Strong && Policy.SuppressStrongLifetime)){
+ if (addSpace)
+ OS << ' ';
+ addSpace = true;
+ }
+
+ switch (lifetime) {
+ case Qualifiers::OCL_None: llvm_unreachable("none but true");
+ case Qualifiers::OCL_ExplicitNone: OS << "__unsafe_unretained"; break;
+ case Qualifiers::OCL_Strong:
+ if (!Policy.SuppressStrongLifetime)
+ OS << "__strong";
+ break;
+
+ case Qualifiers::OCL_Weak: OS << "__weak"; break;
+ case Qualifiers::OCL_Autoreleasing: OS << "__autoreleasing"; break;
+ }
+ }
+
+ if (appendSpaceIfNonEmpty && addSpace)
+ OS << ' ';
+}
+
+std::string QualType::getAsString(const PrintingPolicy &Policy) const {
+ std::string S;
+ getAsStringInternal(S, Policy);
+ return S;
+}
+
+std::string QualType::getAsString(const Type *ty, Qualifiers qs) {
+ std::string buffer;
+ LangOptions options;
+ getAsStringInternal(ty, qs, buffer, PrintingPolicy(options));
+ return buffer;
+}
+
+void QualType::print(const Type *ty, Qualifiers qs,
+ raw_ostream &OS, const PrintingPolicy &policy,
+ const Twine &PlaceHolder) {
+ SmallString<128> PHBuf;
+ StringRef PH = PlaceHolder.toStringRef(PHBuf);
+
+ TypePrinter(policy).print(ty, qs, OS, PH);
+}
+
+void QualType::getAsStringInternal(const Type *ty, Qualifiers qs,
+ std::string &buffer,
+ const PrintingPolicy &policy) {
+ SmallString<256> Buf;
+ llvm::raw_svector_ostream StrOS(Buf);
+ TypePrinter(policy).print(ty, qs, StrOS, buffer);
+ std::string str = StrOS.str();
+ buffer.swap(str);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/VTTBuilder.cpp b/contrib/llvm/tools/clang/lib/AST/VTTBuilder.cpp
new file mode 100644
index 0000000..53461eb
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/VTTBuilder.cpp
@@ -0,0 +1,209 @@
+//===--- VTTBuilder.cpp - C++ VTT layout builder --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with generation of the layout of virtual table
+// tables (VTT).
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/VTTBuilder.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/Support/Format.h"
+#include <algorithm>
+#include <cstdio>
+
+using namespace clang;
+
+#define DUMP_OVERRIDERS 0
+
+VTTBuilder::VTTBuilder(ASTContext &Ctx,
+ const CXXRecordDecl *MostDerivedClass,
+ bool GenerateDefinition)
+ : Ctx(Ctx), MostDerivedClass(MostDerivedClass),
+ MostDerivedClassLayout(Ctx.getASTRecordLayout(MostDerivedClass)),
+ GenerateDefinition(GenerateDefinition) {
+ // Lay out this VTT.
+ LayoutVTT(BaseSubobject(MostDerivedClass, CharUnits::Zero()),
+ /*BaseIsVirtual=*/false);
+}
+
+void VTTBuilder::AddVTablePointer(BaseSubobject Base, uint64_t VTableIndex,
+ const CXXRecordDecl *VTableClass) {
+ // Store the vtable pointer index if we're generating the primary VTT.
+ if (VTableClass == MostDerivedClass) {
+ assert(!SecondaryVirtualPointerIndices.count(Base) &&
+ "A virtual pointer index already exists for this base subobject!");
+ SecondaryVirtualPointerIndices[Base] = VTTComponents.size();
+ }
+
+ if (!GenerateDefinition) {
+ VTTComponents.push_back(VTTComponent());
+ return;
+ }
+
+ VTTComponents.push_back(VTTComponent(VTableIndex, Base));
+}
+
+void VTTBuilder::LayoutSecondaryVTTs(BaseSubobject Base) {
+ const CXXRecordDecl *RD = Base.getBase();
+
+ for (const auto &I : RD->bases()) {
+ // Don't layout virtual bases.
+ if (I.isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
+
+ const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
+ CharUnits BaseOffset = Base.getBaseOffset() +
+ Layout.getBaseClassOffset(BaseDecl);
+
+ // Layout the VTT for this base.
+ LayoutVTT(BaseSubobject(BaseDecl, BaseOffset), /*BaseIsVirtual=*/false);
+ }
+}
+
+void
+VTTBuilder::LayoutSecondaryVirtualPointers(BaseSubobject Base,
+ bool BaseIsMorallyVirtual,
+ uint64_t VTableIndex,
+ const CXXRecordDecl *VTableClass,
+ VisitedVirtualBasesSetTy &VBases) {
+ const CXXRecordDecl *RD = Base.getBase();
+
+ // We're not interested in bases that don't have virtual bases, and not
+ // morally virtual bases.
+ if (!RD->getNumVBases() && !BaseIsMorallyVirtual)
+ return;
+
+ for (const auto &I : RD->bases()) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
+
+ // Itanium C++ ABI 2.6.2:
+ // Secondary virtual pointers are present for all bases with either
+ // virtual bases or virtual function declarations overridden along a
+ // virtual path.
+ //
+ // If the base class is not dynamic, we don't want to add it, nor any
+ // of its base classes.
+ if (!BaseDecl->isDynamicClass())
+ continue;
+
+ bool BaseDeclIsMorallyVirtual = BaseIsMorallyVirtual;
+ bool BaseDeclIsNonVirtualPrimaryBase = false;
+ CharUnits BaseOffset;
+ if (I.isVirtual()) {
+ // Ignore virtual bases that we've already visited.
+ if (!VBases.insert(BaseDecl).second)
+ continue;
+
+ BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+ BaseDeclIsMorallyVirtual = true;
+ } else {
+ const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
+
+ BaseOffset = Base.getBaseOffset() +
+ Layout.getBaseClassOffset(BaseDecl);
+
+ if (!Layout.isPrimaryBaseVirtual() &&
+ Layout.getPrimaryBase() == BaseDecl)
+ BaseDeclIsNonVirtualPrimaryBase = true;
+ }
+
+ // Itanium C++ ABI 2.6.2:
+ // Secondary virtual pointers: for each base class X which (a) has virtual
+ // bases or is reachable along a virtual path from D, and (b) is not a
+ // non-virtual primary base, the address of the virtual table for X-in-D
+ // or an appropriate construction virtual table.
+ if (!BaseDeclIsNonVirtualPrimaryBase &&
+ (BaseDecl->getNumVBases() || BaseDeclIsMorallyVirtual)) {
+ // Add the vtable pointer.
+ AddVTablePointer(BaseSubobject(BaseDecl, BaseOffset), VTableIndex,
+ VTableClass);
+ }
+
+ // And lay out the secondary virtual pointers for the base class.
+ LayoutSecondaryVirtualPointers(BaseSubobject(BaseDecl, BaseOffset),
+ BaseDeclIsMorallyVirtual, VTableIndex,
+ VTableClass, VBases);
+ }
+}
+
+void
+VTTBuilder::LayoutSecondaryVirtualPointers(BaseSubobject Base,
+ uint64_t VTableIndex) {
+ VisitedVirtualBasesSetTy VBases;
+ LayoutSecondaryVirtualPointers(Base, /*BaseIsMorallyVirtual=*/false,
+ VTableIndex, Base.getBase(), VBases);
+}
+
+void VTTBuilder::LayoutVirtualVTTs(const CXXRecordDecl *RD,
+ VisitedVirtualBasesSetTy &VBases) {
+ for (const auto &I : RD->bases()) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
+
+ // Check if this is a virtual base.
+ if (I.isVirtual()) {
+ // Check if we've seen this base before.
+ if (!VBases.insert(BaseDecl).second)
+ continue;
+
+ CharUnits BaseOffset =
+ MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+
+ LayoutVTT(BaseSubobject(BaseDecl, BaseOffset), /*BaseIsVirtual=*/true);
+ }
+
+ // We only need to layout virtual VTTs for this base if it actually has
+ // virtual bases.
+ if (BaseDecl->getNumVBases())
+ LayoutVirtualVTTs(BaseDecl, VBases);
+ }
+}
+
+void VTTBuilder::LayoutVTT(BaseSubobject Base, bool BaseIsVirtual) {
+ const CXXRecordDecl *RD = Base.getBase();
+
+ // Itanium C++ ABI 2.6.2:
+ // An array of virtual table addresses, called the VTT, is declared for
+ // each class type that has indirect or direct virtual base classes.
+ if (RD->getNumVBases() == 0)
+ return;
+
+ bool IsPrimaryVTT = Base.getBase() == MostDerivedClass;
+
+ if (!IsPrimaryVTT) {
+ // Remember the sub-VTT index.
+ SubVTTIndicies[Base] = VTTComponents.size();
+ }
+
+ uint64_t VTableIndex = VTTVTables.size();
+ VTTVTables.push_back(VTTVTable(Base, BaseIsVirtual));
+
+ // Add the primary vtable pointer.
+ AddVTablePointer(Base, VTableIndex, RD);
+
+ // Add the secondary VTTs.
+ LayoutSecondaryVTTs(Base);
+
+ // Add the secondary virtual pointers.
+ LayoutSecondaryVirtualPointers(Base, VTableIndex);
+
+ // If this is the primary VTT, we want to lay out virtual VTTs as well.
+ if (IsPrimaryVTT) {
+ VisitedVirtualBasesSetTy VBases;
+ LayoutVirtualVTTs(Base.getBase(), VBases);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp b/contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp
new file mode 100644
index 0000000..bae0186
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp
@@ -0,0 +1,3758 @@
+//===--- VTableBuilder.cpp - C++ vtable layout builder --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with generation of the layout of virtual tables.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/VTableBuilder.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTDiagnostic.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/SetOperations.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cstdio>
+
+using namespace clang;
+
+#define DUMP_OVERRIDERS 0
+
+namespace {
+
+/// BaseOffset - Represents an offset from a derived class to a direct or
+/// indirect base class.
+struct BaseOffset {
+ /// DerivedClass - The derived class.
+ const CXXRecordDecl *DerivedClass;
+
+ /// VirtualBase - If the path from the derived class to the base class
+ /// involves virtual base classes, this holds the declaration of the last
+ /// virtual base in this path (i.e. closest to the base class).
+ const CXXRecordDecl *VirtualBase;
+
+ /// NonVirtualOffset - The offset from the derived class to the base class.
+ /// (Or the offset from the virtual base class to the base class, if the
+ /// path from the derived class to the base class involves a virtual base
+ /// class.
+ CharUnits NonVirtualOffset;
+
+ BaseOffset() : DerivedClass(nullptr), VirtualBase(nullptr),
+ NonVirtualOffset(CharUnits::Zero()) { }
+ BaseOffset(const CXXRecordDecl *DerivedClass,
+ const CXXRecordDecl *VirtualBase, CharUnits NonVirtualOffset)
+ : DerivedClass(DerivedClass), VirtualBase(VirtualBase),
+ NonVirtualOffset(NonVirtualOffset) { }
+
+ bool isEmpty() const { return NonVirtualOffset.isZero() && !VirtualBase; }
+};
+
+/// FinalOverriders - Contains the final overrider member functions for all
+/// member functions in the base subobjects of a class.
+class FinalOverriders {
+public:
+ /// OverriderInfo - Information about a final overrider.
+ struct OverriderInfo {
+ /// Method - The method decl of the overrider.
+ const CXXMethodDecl *Method;
+
+ /// VirtualBase - The virtual base class subobject of this overrider.
+ /// Note that this records the closest derived virtual base class subobject.
+ const CXXRecordDecl *VirtualBase;
+
+ /// Offset - the base offset of the overrider's parent in the layout class.
+ CharUnits Offset;
+
+ OverriderInfo() : Method(nullptr), VirtualBase(nullptr),
+ Offset(CharUnits::Zero()) { }
+ };
+
+private:
+ /// MostDerivedClass - The most derived class for which the final overriders
+ /// are stored.
+ const CXXRecordDecl *MostDerivedClass;
+
+ /// MostDerivedClassOffset - If we're building final overriders for a
+ /// construction vtable, this holds the offset from the layout class to the
+ /// most derived class.
+ const CharUnits MostDerivedClassOffset;
+
+ /// LayoutClass - The class we're using for layout information. Will be
+ /// different than the most derived class if the final overriders are for a
+ /// construction vtable.
+ const CXXRecordDecl *LayoutClass;
+
+ ASTContext &Context;
+
+ /// MostDerivedClassLayout - the AST record layout of the most derived class.
+ const ASTRecordLayout &MostDerivedClassLayout;
+
+ /// MethodBaseOffsetPairTy - Uniquely identifies a member function
+ /// in a base subobject.
+ typedef std::pair<const CXXMethodDecl *, CharUnits> MethodBaseOffsetPairTy;
+
+ typedef llvm::DenseMap<MethodBaseOffsetPairTy,
+ OverriderInfo> OverridersMapTy;
+
+ /// OverridersMap - The final overriders for all virtual member functions of
+ /// all the base subobjects of the most derived class.
+ OverridersMapTy OverridersMap;
+
+ /// SubobjectsToOffsetsMapTy - A mapping from a base subobject (represented
+ /// as a record decl and a subobject number) and its offsets in the most
+ /// derived class as well as the layout class.
+ typedef llvm::DenseMap<std::pair<const CXXRecordDecl *, unsigned>,
+ CharUnits> SubobjectOffsetMapTy;
+
+ typedef llvm::DenseMap<const CXXRecordDecl *, unsigned> SubobjectCountMapTy;
+
+ /// ComputeBaseOffsets - Compute the offsets for all base subobjects of the
+ /// given base.
+ void ComputeBaseOffsets(BaseSubobject Base, bool IsVirtual,
+ CharUnits OffsetInLayoutClass,
+ SubobjectOffsetMapTy &SubobjectOffsets,
+ SubobjectOffsetMapTy &SubobjectLayoutClassOffsets,
+ SubobjectCountMapTy &SubobjectCounts);
+
+ typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
+
+ /// dump - dump the final overriders for a base subobject, and all its direct
+ /// and indirect base subobjects.
+ void dump(raw_ostream &Out, BaseSubobject Base,
+ VisitedVirtualBasesSetTy& VisitedVirtualBases);
+
+public:
+ FinalOverriders(const CXXRecordDecl *MostDerivedClass,
+ CharUnits MostDerivedClassOffset,
+ const CXXRecordDecl *LayoutClass);
+
+ /// getOverrider - Get the final overrider for the given method declaration in
+ /// the subobject with the given base offset.
+ OverriderInfo getOverrider(const CXXMethodDecl *MD,
+ CharUnits BaseOffset) const {
+ assert(OverridersMap.count(std::make_pair(MD, BaseOffset)) &&
+ "Did not find overrider!");
+
+ return OverridersMap.lookup(std::make_pair(MD, BaseOffset));
+ }
+
+ /// dump - dump the final overriders.
+ void dump() {
+ VisitedVirtualBasesSetTy VisitedVirtualBases;
+ dump(llvm::errs(), BaseSubobject(MostDerivedClass, CharUnits::Zero()),
+ VisitedVirtualBases);
+ }
+
+};
+
+FinalOverriders::FinalOverriders(const CXXRecordDecl *MostDerivedClass,
+ CharUnits MostDerivedClassOffset,
+ const CXXRecordDecl *LayoutClass)
+ : MostDerivedClass(MostDerivedClass),
+ MostDerivedClassOffset(MostDerivedClassOffset), LayoutClass(LayoutClass),
+ Context(MostDerivedClass->getASTContext()),
+ MostDerivedClassLayout(Context.getASTRecordLayout(MostDerivedClass)) {
+
+ // Compute base offsets.
+ SubobjectOffsetMapTy SubobjectOffsets;
+ SubobjectOffsetMapTy SubobjectLayoutClassOffsets;
+ SubobjectCountMapTy SubobjectCounts;
+ ComputeBaseOffsets(BaseSubobject(MostDerivedClass, CharUnits::Zero()),
+ /*IsVirtual=*/false,
+ MostDerivedClassOffset,
+ SubobjectOffsets, SubobjectLayoutClassOffsets,
+ SubobjectCounts);
+
+ // Get the final overriders.
+ CXXFinalOverriderMap FinalOverriders;
+ MostDerivedClass->getFinalOverriders(FinalOverriders);
+
+ for (const auto &Overrider : FinalOverriders) {
+ const CXXMethodDecl *MD = Overrider.first;
+ const OverridingMethods &Methods = Overrider.second;
+
+ for (const auto &M : Methods) {
+ unsigned SubobjectNumber = M.first;
+ assert(SubobjectOffsets.count(std::make_pair(MD->getParent(),
+ SubobjectNumber)) &&
+ "Did not find subobject offset!");
+
+ CharUnits BaseOffset = SubobjectOffsets[std::make_pair(MD->getParent(),
+ SubobjectNumber)];
+
+ assert(M.second.size() == 1 && "Final overrider is not unique!");
+ const UniqueVirtualMethod &Method = M.second.front();
+
+ const CXXRecordDecl *OverriderRD = Method.Method->getParent();
+ assert(SubobjectLayoutClassOffsets.count(
+ std::make_pair(OverriderRD, Method.Subobject))
+ && "Did not find subobject offset!");
+ CharUnits OverriderOffset =
+ SubobjectLayoutClassOffsets[std::make_pair(OverriderRD,
+ Method.Subobject)];
+
+ OverriderInfo& Overrider = OverridersMap[std::make_pair(MD, BaseOffset)];
+ assert(!Overrider.Method && "Overrider should not exist yet!");
+
+ Overrider.Offset = OverriderOffset;
+ Overrider.Method = Method.Method;
+ Overrider.VirtualBase = Method.InVirtualSubobject;
+ }
+ }
+
+#if DUMP_OVERRIDERS
+ // And dump them (for now).
+ dump();
+#endif
+}
+
+static BaseOffset ComputeBaseOffset(const ASTContext &Context,
+ const CXXRecordDecl *DerivedRD,
+ const CXXBasePath &Path) {
+ CharUnits NonVirtualOffset = CharUnits::Zero();
+
+ unsigned NonVirtualStart = 0;
+ const CXXRecordDecl *VirtualBase = nullptr;
+
+ // First, look for the virtual base class.
+ for (int I = Path.size(), E = 0; I != E; --I) {
+ const CXXBasePathElement &Element = Path[I - 1];
+
+ if (Element.Base->isVirtual()) {
+ NonVirtualStart = I;
+ QualType VBaseType = Element.Base->getType();
+ VirtualBase = VBaseType->getAsCXXRecordDecl();
+ break;
+ }
+ }
+
+ // Now compute the non-virtual offset.
+ for (unsigned I = NonVirtualStart, E = Path.size(); I != E; ++I) {
+ const CXXBasePathElement &Element = Path[I];
+
+ // Check the base class offset.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Element.Class);
+
+ const CXXRecordDecl *Base = Element.Base->getType()->getAsCXXRecordDecl();
+
+ NonVirtualOffset += Layout.getBaseClassOffset(Base);
+ }
+
+ // FIXME: This should probably use CharUnits or something. Maybe we should
+ // even change the base offsets in ASTRecordLayout to be specified in
+ // CharUnits.
+ return BaseOffset(DerivedRD, VirtualBase, NonVirtualOffset);
+
+}
+
+static BaseOffset ComputeBaseOffset(const ASTContext &Context,
+ const CXXRecordDecl *BaseRD,
+ const CXXRecordDecl *DerivedRD) {
+ CXXBasePaths Paths(/*FindAmbiguities=*/false,
+ /*RecordPaths=*/true, /*DetectVirtual=*/false);
+
+ if (!DerivedRD->isDerivedFrom(BaseRD, Paths))
+ llvm_unreachable("Class must be derived from the passed in base class!");
+
+ return ComputeBaseOffset(Context, DerivedRD, Paths.front());
+}
+
+static BaseOffset
+ComputeReturnAdjustmentBaseOffset(ASTContext &Context,
+ const CXXMethodDecl *DerivedMD,
+ const CXXMethodDecl *BaseMD) {
+ const FunctionType *BaseFT = BaseMD->getType()->getAs<FunctionType>();
+ const FunctionType *DerivedFT = DerivedMD->getType()->getAs<FunctionType>();
+
+ // Canonicalize the return types.
+ CanQualType CanDerivedReturnType =
+ Context.getCanonicalType(DerivedFT->getReturnType());
+ CanQualType CanBaseReturnType =
+ Context.getCanonicalType(BaseFT->getReturnType());
+
+ assert(CanDerivedReturnType->getTypeClass() ==
+ CanBaseReturnType->getTypeClass() &&
+ "Types must have same type class!");
+
+ if (CanDerivedReturnType == CanBaseReturnType) {
+ // No adjustment needed.
+ return BaseOffset();
+ }
+
+ if (isa<ReferenceType>(CanDerivedReturnType)) {
+ CanDerivedReturnType =
+ CanDerivedReturnType->getAs<ReferenceType>()->getPointeeType();
+ CanBaseReturnType =
+ CanBaseReturnType->getAs<ReferenceType>()->getPointeeType();
+ } else if (isa<PointerType>(CanDerivedReturnType)) {
+ CanDerivedReturnType =
+ CanDerivedReturnType->getAs<PointerType>()->getPointeeType();
+ CanBaseReturnType =
+ CanBaseReturnType->getAs<PointerType>()->getPointeeType();
+ } else {
+ llvm_unreachable("Unexpected return type!");
+ }
+
+ // We need to compare unqualified types here; consider
+ // const T *Base::foo();
+ // T *Derived::foo();
+ if (CanDerivedReturnType.getUnqualifiedType() ==
+ CanBaseReturnType.getUnqualifiedType()) {
+ // No adjustment needed.
+ return BaseOffset();
+ }
+
+ const CXXRecordDecl *DerivedRD =
+ cast<CXXRecordDecl>(cast<RecordType>(CanDerivedReturnType)->getDecl());
+
+ const CXXRecordDecl *BaseRD =
+ cast<CXXRecordDecl>(cast<RecordType>(CanBaseReturnType)->getDecl());
+
+ return ComputeBaseOffset(Context, BaseRD, DerivedRD);
+}
+
+void
+FinalOverriders::ComputeBaseOffsets(BaseSubobject Base, bool IsVirtual,
+ CharUnits OffsetInLayoutClass,
+ SubobjectOffsetMapTy &SubobjectOffsets,
+ SubobjectOffsetMapTy &SubobjectLayoutClassOffsets,
+ SubobjectCountMapTy &SubobjectCounts) {
+ const CXXRecordDecl *RD = Base.getBase();
+
+ unsigned SubobjectNumber = 0;
+ if (!IsVirtual)
+ SubobjectNumber = ++SubobjectCounts[RD];
+
+ // Set up the subobject to offset mapping.
+ assert(!SubobjectOffsets.count(std::make_pair(RD, SubobjectNumber))
+ && "Subobject offset already exists!");
+ assert(!SubobjectLayoutClassOffsets.count(std::make_pair(RD, SubobjectNumber))
+ && "Subobject offset already exists!");
+
+ SubobjectOffsets[std::make_pair(RD, SubobjectNumber)] = Base.getBaseOffset();
+ SubobjectLayoutClassOffsets[std::make_pair(RD, SubobjectNumber)] =
+ OffsetInLayoutClass;
+
+ // Traverse our bases.
+ for (const auto &B : RD->bases()) {
+ const CXXRecordDecl *BaseDecl = B.getType()->getAsCXXRecordDecl();
+
+ CharUnits BaseOffset;
+ CharUnits BaseOffsetInLayoutClass;
+ if (B.isVirtual()) {
+ // Check if we've visited this virtual base before.
+ if (SubobjectOffsets.count(std::make_pair(BaseDecl, 0)))
+ continue;
+
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+ BaseOffsetInLayoutClass =
+ LayoutClassLayout.getVBaseClassOffset(BaseDecl);
+ } else {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ CharUnits Offset = Layout.getBaseClassOffset(BaseDecl);
+
+ BaseOffset = Base.getBaseOffset() + Offset;
+ BaseOffsetInLayoutClass = OffsetInLayoutClass + Offset;
+ }
+
+ ComputeBaseOffsets(BaseSubobject(BaseDecl, BaseOffset),
+ B.isVirtual(), BaseOffsetInLayoutClass,
+ SubobjectOffsets, SubobjectLayoutClassOffsets,
+ SubobjectCounts);
+ }
+}
+
+void FinalOverriders::dump(raw_ostream &Out, BaseSubobject Base,
+ VisitedVirtualBasesSetTy &VisitedVirtualBases) {
+ const CXXRecordDecl *RD = Base.getBase();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ for (const auto &B : RD->bases()) {
+ const CXXRecordDecl *BaseDecl = B.getType()->getAsCXXRecordDecl();
+
+ // Ignore bases that don't have any virtual member functions.
+ if (!BaseDecl->isPolymorphic())
+ continue;
+
+ CharUnits BaseOffset;
+ if (B.isVirtual()) {
+ if (!VisitedVirtualBases.insert(BaseDecl).second) {
+ // We've visited this base before.
+ continue;
+ }
+
+ BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+ } else {
+ BaseOffset = Layout.getBaseClassOffset(BaseDecl) + Base.getBaseOffset();
+ }
+
+ dump(Out, BaseSubobject(BaseDecl, BaseOffset), VisitedVirtualBases);
+ }
+
+ Out << "Final overriders for (";
+ RD->printQualifiedName(Out);
+ Out << ", ";
+ Out << Base.getBaseOffset().getQuantity() << ")\n";
+
+ // Now dump the overriders for this base subobject.
+ for (const auto *MD : RD->methods()) {
+ if (!MD->isVirtual())
+ continue;
+ MD = MD->getCanonicalDecl();
+
+ OverriderInfo Overrider = getOverrider(MD, Base.getBaseOffset());
+
+ Out << " ";
+ MD->printQualifiedName(Out);
+ Out << " - (";
+ Overrider.Method->printQualifiedName(Out);
+ Out << ", " << Overrider.Offset.getQuantity() << ')';
+
+ BaseOffset Offset;
+ if (!Overrider.Method->isPure())
+ Offset = ComputeReturnAdjustmentBaseOffset(Context, Overrider.Method, MD);
+
+ if (!Offset.isEmpty()) {
+ Out << " [ret-adj: ";
+ if (Offset.VirtualBase) {
+ Offset.VirtualBase->printQualifiedName(Out);
+ Out << " vbase, ";
+ }
+
+ Out << Offset.NonVirtualOffset.getQuantity() << " nv]";
+ }
+
+ Out << "\n";
+ }
+}
+
+/// VCallOffsetMap - Keeps track of vcall offsets when building a vtable.
+struct VCallOffsetMap {
+
+ typedef std::pair<const CXXMethodDecl *, CharUnits> MethodAndOffsetPairTy;
+
+ /// Offsets - Keeps track of methods and their offsets.
+ // FIXME: This should be a real map and not a vector.
+ SmallVector<MethodAndOffsetPairTy, 16> Offsets;
+
+ /// MethodsCanShareVCallOffset - Returns whether two virtual member functions
+ /// can share the same vcall offset.
+ static bool MethodsCanShareVCallOffset(const CXXMethodDecl *LHS,
+ const CXXMethodDecl *RHS);
+
+public:
+ /// AddVCallOffset - Adds a vcall offset to the map. Returns true if the
+ /// add was successful, or false if there was already a member function with
+ /// the same signature in the map.
+ bool AddVCallOffset(const CXXMethodDecl *MD, CharUnits OffsetOffset);
+
+ /// getVCallOffsetOffset - Returns the vcall offset offset (relative to the
+ /// vtable address point) for the given virtual member function.
+ CharUnits getVCallOffsetOffset(const CXXMethodDecl *MD);
+
+ // empty - Return whether the offset map is empty or not.
+ bool empty() const { return Offsets.empty(); }
+};
+
+static bool HasSameVirtualSignature(const CXXMethodDecl *LHS,
+ const CXXMethodDecl *RHS) {
+ const FunctionProtoType *LT =
+ cast<FunctionProtoType>(LHS->getType().getCanonicalType());
+ const FunctionProtoType *RT =
+ cast<FunctionProtoType>(RHS->getType().getCanonicalType());
+
+ // Fast-path matches in the canonical types.
+ if (LT == RT) return true;
+
+ // Force the signatures to match. We can't rely on the overrides
+ // list here because there isn't necessarily an inheritance
+ // relationship between the two methods.
+ if (LT->getTypeQuals() != RT->getTypeQuals())
+ return false;
+ return LT->getParamTypes() == RT->getParamTypes();
+}
+
+bool VCallOffsetMap::MethodsCanShareVCallOffset(const CXXMethodDecl *LHS,
+ const CXXMethodDecl *RHS) {
+ assert(LHS->isVirtual() && "LHS must be virtual!");
+ assert(RHS->isVirtual() && "LHS must be virtual!");
+
+ // A destructor can share a vcall offset with another destructor.
+ if (isa<CXXDestructorDecl>(LHS))
+ return isa<CXXDestructorDecl>(RHS);
+
+ // FIXME: We need to check more things here.
+
+ // The methods must have the same name.
+ DeclarationName LHSName = LHS->getDeclName();
+ DeclarationName RHSName = RHS->getDeclName();
+ if (LHSName != RHSName)
+ return false;
+
+ // And the same signatures.
+ return HasSameVirtualSignature(LHS, RHS);
+}
+
+bool VCallOffsetMap::AddVCallOffset(const CXXMethodDecl *MD,
+ CharUnits OffsetOffset) {
+ // Check if we can reuse an offset.
+ for (const auto &OffsetPair : Offsets) {
+ if (MethodsCanShareVCallOffset(OffsetPair.first, MD))
+ return false;
+ }
+
+ // Add the offset.
+ Offsets.push_back(MethodAndOffsetPairTy(MD, OffsetOffset));
+ return true;
+}
+
+CharUnits VCallOffsetMap::getVCallOffsetOffset(const CXXMethodDecl *MD) {
+ // Look for an offset.
+ for (const auto &OffsetPair : Offsets) {
+ if (MethodsCanShareVCallOffset(OffsetPair.first, MD))
+ return OffsetPair.second;
+ }
+
+ llvm_unreachable("Should always find a vcall offset offset!");
+}
+
+/// VCallAndVBaseOffsetBuilder - Class for building vcall and vbase offsets.
+class VCallAndVBaseOffsetBuilder {
+public:
+ typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits>
+ VBaseOffsetOffsetsMapTy;
+
+private:
+ /// MostDerivedClass - The most derived class for which we're building vcall
+ /// and vbase offsets.
+ const CXXRecordDecl *MostDerivedClass;
+
+ /// LayoutClass - The class we're using for layout information. Will be
+ /// different than the most derived class if we're building a construction
+ /// vtable.
+ const CXXRecordDecl *LayoutClass;
+
+ /// Context - The ASTContext which we will use for layout information.
+ ASTContext &Context;
+
+ /// Components - vcall and vbase offset components
+ typedef SmallVector<VTableComponent, 64> VTableComponentVectorTy;
+ VTableComponentVectorTy Components;
+
+ /// VisitedVirtualBases - Visited virtual bases.
+ llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBases;
+
+ /// VCallOffsets - Keeps track of vcall offsets.
+ VCallOffsetMap VCallOffsets;
+
+
+ /// VBaseOffsetOffsets - Contains the offsets of the virtual base offsets,
+ /// relative to the address point.
+ VBaseOffsetOffsetsMapTy VBaseOffsetOffsets;
+
+ /// FinalOverriders - The final overriders of the most derived class.
+ /// (Can be null when we're not building a vtable of the most derived class).
+ const FinalOverriders *Overriders;
+
+ /// AddVCallAndVBaseOffsets - Add vcall offsets and vbase offsets for the
+ /// given base subobject.
+ void AddVCallAndVBaseOffsets(BaseSubobject Base, bool BaseIsVirtual,
+ CharUnits RealBaseOffset);
+
+ /// AddVCallOffsets - Add vcall offsets for the given base subobject.
+ void AddVCallOffsets(BaseSubobject Base, CharUnits VBaseOffset);
+
+ /// AddVBaseOffsets - Add vbase offsets for the given class.
+ void AddVBaseOffsets(const CXXRecordDecl *Base,
+ CharUnits OffsetInLayoutClass);
+
+ /// getCurrentOffsetOffset - Get the current vcall or vbase offset offset in
+ /// chars, relative to the vtable address point.
+ CharUnits getCurrentOffsetOffset() const;
+
+public:
+ VCallAndVBaseOffsetBuilder(const CXXRecordDecl *MostDerivedClass,
+ const CXXRecordDecl *LayoutClass,
+ const FinalOverriders *Overriders,
+ BaseSubobject Base, bool BaseIsVirtual,
+ CharUnits OffsetInLayoutClass)
+ : MostDerivedClass(MostDerivedClass), LayoutClass(LayoutClass),
+ Context(MostDerivedClass->getASTContext()), Overriders(Overriders) {
+
+ // Add vcall and vbase offsets.
+ AddVCallAndVBaseOffsets(Base, BaseIsVirtual, OffsetInLayoutClass);
+ }
+
+ /// Methods for iterating over the components.
+ typedef VTableComponentVectorTy::const_reverse_iterator const_iterator;
+ const_iterator components_begin() const { return Components.rbegin(); }
+ const_iterator components_end() const { return Components.rend(); }
+
+ const VCallOffsetMap &getVCallOffsets() const { return VCallOffsets; }
+ const VBaseOffsetOffsetsMapTy &getVBaseOffsetOffsets() const {
+ return VBaseOffsetOffsets;
+ }
+};
+
+void
+VCallAndVBaseOffsetBuilder::AddVCallAndVBaseOffsets(BaseSubobject Base,
+ bool BaseIsVirtual,
+ CharUnits RealBaseOffset) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Base.getBase());
+
+ // Itanium C++ ABI 2.5.2:
+ // ..in classes sharing a virtual table with a primary base class, the vcall
+ // and vbase offsets added by the derived class all come before the vcall
+ // and vbase offsets required by the base class, so that the latter may be
+ // laid out as required by the base class without regard to additions from
+ // the derived class(es).
+
+ // (Since we're emitting the vcall and vbase offsets in reverse order, we'll
+ // emit them for the primary base first).
+ if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
+ bool PrimaryBaseIsVirtual = Layout.isPrimaryBaseVirtual();
+
+ CharUnits PrimaryBaseOffset;
+
+ // Get the base offset of the primary base.
+ if (PrimaryBaseIsVirtual) {
+ assert(Layout.getVBaseClassOffset(PrimaryBase).isZero() &&
+ "Primary vbase should have a zero offset!");
+
+ const ASTRecordLayout &MostDerivedClassLayout =
+ Context.getASTRecordLayout(MostDerivedClass);
+
+ PrimaryBaseOffset =
+ MostDerivedClassLayout.getVBaseClassOffset(PrimaryBase);
+ } else {
+ assert(Layout.getBaseClassOffset(PrimaryBase).isZero() &&
+ "Primary base should have a zero offset!");
+
+ PrimaryBaseOffset = Base.getBaseOffset();
+ }
+
+ AddVCallAndVBaseOffsets(
+ BaseSubobject(PrimaryBase,PrimaryBaseOffset),
+ PrimaryBaseIsVirtual, RealBaseOffset);
+ }
+
+ AddVBaseOffsets(Base.getBase(), RealBaseOffset);
+
+ // We only want to add vcall offsets for virtual bases.
+ if (BaseIsVirtual)
+ AddVCallOffsets(Base, RealBaseOffset);
+}
+
+CharUnits VCallAndVBaseOffsetBuilder::getCurrentOffsetOffset() const {
+ // OffsetIndex is the index of this vcall or vbase offset, relative to the
+ // vtable address point. (We subtract 3 to account for the information just
+ // above the address point, the RTTI info, the offset to top, and the
+ // vcall offset itself).
+ int64_t OffsetIndex = -(int64_t)(3 + Components.size());
+
+ CharUnits PointerWidth =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
+ CharUnits OffsetOffset = PointerWidth * OffsetIndex;
+ return OffsetOffset;
+}
+
+void VCallAndVBaseOffsetBuilder::AddVCallOffsets(BaseSubobject Base,
+ CharUnits VBaseOffset) {
+ const CXXRecordDecl *RD = Base.getBase();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ // Handle the primary base first.
+ // We only want to add vcall offsets if the base is non-virtual; a virtual
+ // primary base will have its vcall and vbase offsets emitted already.
+ if (PrimaryBase && !Layout.isPrimaryBaseVirtual()) {
+ // Get the base offset of the primary base.
+ assert(Layout.getBaseClassOffset(PrimaryBase).isZero() &&
+ "Primary base should have a zero offset!");
+
+ AddVCallOffsets(BaseSubobject(PrimaryBase, Base.getBaseOffset()),
+ VBaseOffset);
+ }
+
+ // Add the vcall offsets.
+ for (const auto *MD : RD->methods()) {
+ if (!MD->isVirtual())
+ continue;
+ MD = MD->getCanonicalDecl();
+
+ CharUnits OffsetOffset = getCurrentOffsetOffset();
+
+ // Don't add a vcall offset if we already have one for this member function
+ // signature.
+ if (!VCallOffsets.AddVCallOffset(MD, OffsetOffset))
+ continue;
+
+ CharUnits Offset = CharUnits::Zero();
+
+ if (Overriders) {
+ // Get the final overrider.
+ FinalOverriders::OverriderInfo Overrider =
+ Overriders->getOverrider(MD, Base.getBaseOffset());
+
+ /// The vcall offset is the offset from the virtual base to the object
+ /// where the function was overridden.
+ Offset = Overrider.Offset - VBaseOffset;
+ }
+
+ Components.push_back(
+ VTableComponent::MakeVCallOffset(Offset));
+ }
+
+ // And iterate over all non-virtual bases (ignoring the primary base).
+ for (const auto &B : RD->bases()) {
+ if (B.isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl = B.getType()->getAsCXXRecordDecl();
+ if (BaseDecl == PrimaryBase)
+ continue;
+
+ // Get the base offset of this base.
+ CharUnits BaseOffset = Base.getBaseOffset() +
+ Layout.getBaseClassOffset(BaseDecl);
+
+ AddVCallOffsets(BaseSubobject(BaseDecl, BaseOffset),
+ VBaseOffset);
+ }
+}
+
+void
+VCallAndVBaseOffsetBuilder::AddVBaseOffsets(const CXXRecordDecl *RD,
+ CharUnits OffsetInLayoutClass) {
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ // Add vbase offsets.
+ for (const auto &B : RD->bases()) {
+ const CXXRecordDecl *BaseDecl = B.getType()->getAsCXXRecordDecl();
+
+ // Check if this is a virtual base that we haven't visited before.
+ if (B.isVirtual() && VisitedVirtualBases.insert(BaseDecl).second) {
+ CharUnits Offset =
+ LayoutClassLayout.getVBaseClassOffset(BaseDecl) - OffsetInLayoutClass;
+
+ // Add the vbase offset offset.
+ assert(!VBaseOffsetOffsets.count(BaseDecl) &&
+ "vbase offset offset already exists!");
+
+ CharUnits VBaseOffsetOffset = getCurrentOffsetOffset();
+ VBaseOffsetOffsets.insert(
+ std::make_pair(BaseDecl, VBaseOffsetOffset));
+
+ Components.push_back(
+ VTableComponent::MakeVBaseOffset(Offset));
+ }
+
+ // Check the base class looking for more vbase offsets.
+ AddVBaseOffsets(BaseDecl, OffsetInLayoutClass);
+ }
+}
+
+/// ItaniumVTableBuilder - Class for building vtable layout information.
+class ItaniumVTableBuilder {
+public:
+ /// PrimaryBasesSetVectorTy - A set vector of direct and indirect
+ /// primary bases.
+ typedef llvm::SmallSetVector<const CXXRecordDecl *, 8>
+ PrimaryBasesSetVectorTy;
+
+ typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits>
+ VBaseOffsetOffsetsMapTy;
+
+ typedef llvm::DenseMap<BaseSubobject, uint64_t>
+ AddressPointsMapTy;
+
+ typedef llvm::DenseMap<GlobalDecl, int64_t> MethodVTableIndicesTy;
+
+private:
+ /// VTables - Global vtable information.
+ ItaniumVTableContext &VTables;
+
+ /// MostDerivedClass - The most derived class for which we're building this
+ /// vtable.
+ const CXXRecordDecl *MostDerivedClass;
+
+ /// MostDerivedClassOffset - If we're building a construction vtable, this
+ /// holds the offset from the layout class to the most derived class.
+ const CharUnits MostDerivedClassOffset;
+
+ /// MostDerivedClassIsVirtual - Whether the most derived class is a virtual
+ /// base. (This only makes sense when building a construction vtable).
+ bool MostDerivedClassIsVirtual;
+
+ /// LayoutClass - The class we're using for layout information. Will be
+ /// different than the most derived class if we're building a construction
+ /// vtable.
+ const CXXRecordDecl *LayoutClass;
+
+ /// Context - The ASTContext which we will use for layout information.
+ ASTContext &Context;
+
+ /// FinalOverriders - The final overriders of the most derived class.
+ const FinalOverriders Overriders;
+
+ /// VCallOffsetsForVBases - Keeps track of vcall offsets for the virtual
+ /// bases in this vtable.
+ llvm::DenseMap<const CXXRecordDecl *, VCallOffsetMap> VCallOffsetsForVBases;
+
+ /// VBaseOffsetOffsets - Contains the offsets of the virtual base offsets for
+ /// the most derived class.
+ VBaseOffsetOffsetsMapTy VBaseOffsetOffsets;
+
+ /// Components - The components of the vtable being built.
+ SmallVector<VTableComponent, 64> Components;
+
+ /// AddressPoints - Address points for the vtable being built.
+ AddressPointsMapTy AddressPoints;
+
+ /// MethodInfo - Contains information about a method in a vtable.
+ /// (Used for computing 'this' pointer adjustment thunks.
+ struct MethodInfo {
+ /// BaseOffset - The base offset of this method.
+ const CharUnits BaseOffset;
+
+ /// BaseOffsetInLayoutClass - The base offset in the layout class of this
+ /// method.
+ const CharUnits BaseOffsetInLayoutClass;
+
+ /// VTableIndex - The index in the vtable that this method has.
+ /// (For destructors, this is the index of the complete destructor).
+ const uint64_t VTableIndex;
+
+ MethodInfo(CharUnits BaseOffset, CharUnits BaseOffsetInLayoutClass,
+ uint64_t VTableIndex)
+ : BaseOffset(BaseOffset),
+ BaseOffsetInLayoutClass(BaseOffsetInLayoutClass),
+ VTableIndex(VTableIndex) { }
+
+ MethodInfo()
+ : BaseOffset(CharUnits::Zero()),
+ BaseOffsetInLayoutClass(CharUnits::Zero()),
+ VTableIndex(0) { }
+ };
+
+ typedef llvm::DenseMap<const CXXMethodDecl *, MethodInfo> MethodInfoMapTy;
+
+ /// MethodInfoMap - The information for all methods in the vtable we're
+ /// currently building.
+ MethodInfoMapTy MethodInfoMap;
+
+ /// MethodVTableIndices - Contains the index (relative to the vtable address
+ /// point) where the function pointer for a virtual function is stored.
+ MethodVTableIndicesTy MethodVTableIndices;
+
+ typedef llvm::DenseMap<uint64_t, ThunkInfo> VTableThunksMapTy;
+
+ /// VTableThunks - The thunks by vtable index in the vtable currently being
+ /// built.
+ VTableThunksMapTy VTableThunks;
+
+ typedef SmallVector<ThunkInfo, 1> ThunkInfoVectorTy;
+ typedef llvm::DenseMap<const CXXMethodDecl *, ThunkInfoVectorTy> ThunksMapTy;
+
+ /// Thunks - A map that contains all the thunks needed for all methods in the
+ /// most derived class for which the vtable is currently being built.
+ ThunksMapTy Thunks;
+
+ /// AddThunk - Add a thunk for the given method.
+ void AddThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk);
+
+ /// ComputeThisAdjustments - Compute the 'this' pointer adjustments for the
+ /// part of the vtable we're currently building.
+ void ComputeThisAdjustments();
+
+ typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
+
+ /// PrimaryVirtualBases - All known virtual bases who are a primary base of
+ /// some other base.
+ VisitedVirtualBasesSetTy PrimaryVirtualBases;
+
+ /// ComputeReturnAdjustment - Compute the return adjustment given a return
+ /// adjustment base offset.
+ ReturnAdjustment ComputeReturnAdjustment(BaseOffset Offset);
+
+ /// ComputeThisAdjustmentBaseOffset - Compute the base offset for adjusting
+ /// the 'this' pointer from the base subobject to the derived subobject.
+ BaseOffset ComputeThisAdjustmentBaseOffset(BaseSubobject Base,
+ BaseSubobject Derived) const;
+
+ /// ComputeThisAdjustment - Compute the 'this' pointer adjustment for the
+ /// given virtual member function, its offset in the layout class and its
+ /// final overrider.
+ ThisAdjustment
+ ComputeThisAdjustment(const CXXMethodDecl *MD,
+ CharUnits BaseOffsetInLayoutClass,
+ FinalOverriders::OverriderInfo Overrider);
+
+ /// AddMethod - Add a single virtual member function to the vtable
+ /// components vector.
+ void AddMethod(const CXXMethodDecl *MD, ReturnAdjustment ReturnAdjustment);
+
+ /// IsOverriderUsed - Returns whether the overrider will ever be used in this
+ /// part of the vtable.
+ ///
+ /// Itanium C++ ABI 2.5.2:
+ ///
+ /// struct A { virtual void f(); };
+ /// struct B : virtual public A { int i; };
+ /// struct C : virtual public A { int j; };
+ /// struct D : public B, public C {};
+ ///
+ /// When B and C are declared, A is a primary base in each case, so although
+ /// vcall offsets are allocated in the A-in-B and A-in-C vtables, no this
+ /// adjustment is required and no thunk is generated. However, inside D
+ /// objects, A is no longer a primary base of C, so if we allowed calls to
+ /// C::f() to use the copy of A's vtable in the C subobject, we would need
+ /// to adjust this from C* to B::A*, which would require a third-party
+ /// thunk. Since we require that a call to C::f() first convert to A*,
+ /// C-in-D's copy of A's vtable is never referenced, so this is not
+ /// necessary.
+ bool IsOverriderUsed(const CXXMethodDecl *Overrider,
+ CharUnits BaseOffsetInLayoutClass,
+ const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
+ CharUnits FirstBaseOffsetInLayoutClass) const;
+
+
+ /// AddMethods - Add the methods of this base subobject and all its
+ /// primary bases to the vtable components vector.
+ void AddMethods(BaseSubobject Base, CharUnits BaseOffsetInLayoutClass,
+ const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
+ CharUnits FirstBaseOffsetInLayoutClass,
+ PrimaryBasesSetVectorTy &PrimaryBases);
+
+ // LayoutVTable - Layout the vtable for the given base class, including its
+ // secondary vtables and any vtables for virtual bases.
+ void LayoutVTable();
+
+ /// LayoutPrimaryAndSecondaryVTables - Layout the primary vtable for the
+ /// given base subobject, as well as all its secondary vtables.
+ ///
+ /// \param BaseIsMorallyVirtual whether the base subobject is a virtual base
+ /// or a direct or indirect base of a virtual base.
+ ///
+ /// \param BaseIsVirtualInLayoutClass - Whether the base subobject is virtual
+ /// in the layout class.
+ void LayoutPrimaryAndSecondaryVTables(BaseSubobject Base,
+ bool BaseIsMorallyVirtual,
+ bool BaseIsVirtualInLayoutClass,
+ CharUnits OffsetInLayoutClass);
+
+ /// LayoutSecondaryVTables - Layout the secondary vtables for the given base
+ /// subobject.
+ ///
+ /// \param BaseIsMorallyVirtual whether the base subobject is a virtual base
+ /// or a direct or indirect base of a virtual base.
+ void LayoutSecondaryVTables(BaseSubobject Base, bool BaseIsMorallyVirtual,
+ CharUnits OffsetInLayoutClass);
+
+ /// DeterminePrimaryVirtualBases - Determine the primary virtual bases in this
+ /// class hierarchy.
+ void DeterminePrimaryVirtualBases(const CXXRecordDecl *RD,
+ CharUnits OffsetInLayoutClass,
+ VisitedVirtualBasesSetTy &VBases);
+
+ /// LayoutVTablesForVirtualBases - Layout vtables for all virtual bases of the
+ /// given base (excluding any primary bases).
+ void LayoutVTablesForVirtualBases(const CXXRecordDecl *RD,
+ VisitedVirtualBasesSetTy &VBases);
+
+ /// isBuildingConstructionVTable - Return whether this vtable builder is
+ /// building a construction vtable.
+ bool isBuildingConstructorVTable() const {
+ return MostDerivedClass != LayoutClass;
+ }
+
+public:
+ ItaniumVTableBuilder(ItaniumVTableContext &VTables,
+ const CXXRecordDecl *MostDerivedClass,
+ CharUnits MostDerivedClassOffset,
+ bool MostDerivedClassIsVirtual,
+ const CXXRecordDecl *LayoutClass)
+ : VTables(VTables), MostDerivedClass(MostDerivedClass),
+ MostDerivedClassOffset(MostDerivedClassOffset),
+ MostDerivedClassIsVirtual(MostDerivedClassIsVirtual),
+ LayoutClass(LayoutClass), Context(MostDerivedClass->getASTContext()),
+ Overriders(MostDerivedClass, MostDerivedClassOffset, LayoutClass) {
+ assert(!Context.getTargetInfo().getCXXABI().isMicrosoft());
+
+ LayoutVTable();
+
+ if (Context.getLangOpts().DumpVTableLayouts)
+ dumpLayout(llvm::outs());
+ }
+
+ uint64_t getNumThunks() const {
+ return Thunks.size();
+ }
+
+ ThunksMapTy::const_iterator thunks_begin() const {
+ return Thunks.begin();
+ }
+
+ ThunksMapTy::const_iterator thunks_end() const {
+ return Thunks.end();
+ }
+
+ const VBaseOffsetOffsetsMapTy &getVBaseOffsetOffsets() const {
+ return VBaseOffsetOffsets;
+ }
+
+ const AddressPointsMapTy &getAddressPoints() const {
+ return AddressPoints;
+ }
+
+ MethodVTableIndicesTy::const_iterator vtable_indices_begin() const {
+ return MethodVTableIndices.begin();
+ }
+
+ MethodVTableIndicesTy::const_iterator vtable_indices_end() const {
+ return MethodVTableIndices.end();
+ }
+
+ /// getNumVTableComponents - Return the number of components in the vtable
+ /// currently built.
+ uint64_t getNumVTableComponents() const {
+ return Components.size();
+ }
+
+ const VTableComponent *vtable_component_begin() const {
+ return Components.begin();
+ }
+
+ const VTableComponent *vtable_component_end() const {
+ return Components.end();
+ }
+
+ AddressPointsMapTy::const_iterator address_points_begin() const {
+ return AddressPoints.begin();
+ }
+
+ AddressPointsMapTy::const_iterator address_points_end() const {
+ return AddressPoints.end();
+ }
+
+ VTableThunksMapTy::const_iterator vtable_thunks_begin() const {
+ return VTableThunks.begin();
+ }
+
+ VTableThunksMapTy::const_iterator vtable_thunks_end() const {
+ return VTableThunks.end();
+ }
+
+ /// dumpLayout - Dump the vtable layout.
+ void dumpLayout(raw_ostream&);
+};
+
+void ItaniumVTableBuilder::AddThunk(const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk) {
+ assert(!isBuildingConstructorVTable() &&
+ "Can't add thunks for construction vtable");
+
+ SmallVectorImpl<ThunkInfo> &ThunksVector = Thunks[MD];
+
+ // Check if we have this thunk already.
+ if (std::find(ThunksVector.begin(), ThunksVector.end(), Thunk) !=
+ ThunksVector.end())
+ return;
+
+ ThunksVector.push_back(Thunk);
+}
+
+typedef llvm::SmallPtrSet<const CXXMethodDecl *, 8> OverriddenMethodsSetTy;
+
+/// Visit all the methods overridden by the given method recursively,
+/// in a depth-first pre-order. The Visitor's visitor method returns a bool
+/// indicating whether to continue the recursion for the given overridden
+/// method (i.e. returning false stops the iteration).
+template <class VisitorTy>
+static void
+visitAllOverriddenMethods(const CXXMethodDecl *MD, VisitorTy &Visitor) {
+ assert(MD->isVirtual() && "Method is not virtual!");
+
+ for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
+ E = MD->end_overridden_methods(); I != E; ++I) {
+ const CXXMethodDecl *OverriddenMD = *I;
+ if (!Visitor(OverriddenMD))
+ continue;
+ visitAllOverriddenMethods(OverriddenMD, Visitor);
+ }
+}
+
+/// ComputeAllOverriddenMethods - Given a method decl, will return a set of all
+/// the overridden methods that the function decl overrides.
+static void
+ComputeAllOverriddenMethods(const CXXMethodDecl *MD,
+ OverriddenMethodsSetTy& OverriddenMethods) {
+ auto OverriddenMethodsCollector = [&](const CXXMethodDecl *MD) {
+ // Don't recurse on this method if we've already collected it.
+ return OverriddenMethods.insert(MD).second;
+ };
+ visitAllOverriddenMethods(MD, OverriddenMethodsCollector);
+}
+
+void ItaniumVTableBuilder::ComputeThisAdjustments() {
+ // Now go through the method info map and see if any of the methods need
+ // 'this' pointer adjustments.
+ for (const auto &MI : MethodInfoMap) {
+ const CXXMethodDecl *MD = MI.first;
+ const MethodInfo &MethodInfo = MI.second;
+
+ // Ignore adjustments for unused function pointers.
+ uint64_t VTableIndex = MethodInfo.VTableIndex;
+ if (Components[VTableIndex].getKind() ==
+ VTableComponent::CK_UnusedFunctionPointer)
+ continue;
+
+ // Get the final overrider for this method.
+ FinalOverriders::OverriderInfo Overrider =
+ Overriders.getOverrider(MD, MethodInfo.BaseOffset);
+
+ // Check if we need an adjustment at all.
+ if (MethodInfo.BaseOffsetInLayoutClass == Overrider.Offset) {
+ // When a return thunk is needed by a derived class that overrides a
+ // virtual base, gcc uses a virtual 'this' adjustment as well.
+ // While the thunk itself might be needed by vtables in subclasses or
+ // in construction vtables, there doesn't seem to be a reason for using
+ // the thunk in this vtable. Still, we do so to match gcc.
+ if (VTableThunks.lookup(VTableIndex).Return.isEmpty())
+ continue;
+ }
+
+ ThisAdjustment ThisAdjustment =
+ ComputeThisAdjustment(MD, MethodInfo.BaseOffsetInLayoutClass, Overrider);
+
+ if (ThisAdjustment.isEmpty())
+ continue;
+
+ // Add it.
+ VTableThunks[VTableIndex].This = ThisAdjustment;
+
+ if (isa<CXXDestructorDecl>(MD)) {
+ // Add an adjustment for the deleting destructor as well.
+ VTableThunks[VTableIndex + 1].This = ThisAdjustment;
+ }
+ }
+
+ /// Clear the method info map.
+ MethodInfoMap.clear();
+
+ if (isBuildingConstructorVTable()) {
+ // We don't need to store thunk information for construction vtables.
+ return;
+ }
+
+ for (const auto &TI : VTableThunks) {
+ const VTableComponent &Component = Components[TI.first];
+ const ThunkInfo &Thunk = TI.second;
+ const CXXMethodDecl *MD;
+
+ switch (Component.getKind()) {
+ default:
+ llvm_unreachable("Unexpected vtable component kind!");
+ case VTableComponent::CK_FunctionPointer:
+ MD = Component.getFunctionDecl();
+ break;
+ case VTableComponent::CK_CompleteDtorPointer:
+ MD = Component.getDestructorDecl();
+ break;
+ case VTableComponent::CK_DeletingDtorPointer:
+ // We've already added the thunk when we saw the complete dtor pointer.
+ continue;
+ }
+
+ if (MD->getParent() == MostDerivedClass)
+ AddThunk(MD, Thunk);
+ }
+}
+
+ReturnAdjustment
+ItaniumVTableBuilder::ComputeReturnAdjustment(BaseOffset Offset) {
+ ReturnAdjustment Adjustment;
+
+ if (!Offset.isEmpty()) {
+ if (Offset.VirtualBase) {
+ // Get the virtual base offset offset.
+ if (Offset.DerivedClass == MostDerivedClass) {
+ // We can get the offset offset directly from our map.
+ Adjustment.Virtual.Itanium.VBaseOffsetOffset =
+ VBaseOffsetOffsets.lookup(Offset.VirtualBase).getQuantity();
+ } else {
+ Adjustment.Virtual.Itanium.VBaseOffsetOffset =
+ VTables.getVirtualBaseOffsetOffset(Offset.DerivedClass,
+ Offset.VirtualBase).getQuantity();
+ }
+ }
+
+ Adjustment.NonVirtual = Offset.NonVirtualOffset.getQuantity();
+ }
+
+ return Adjustment;
+}
+
+BaseOffset ItaniumVTableBuilder::ComputeThisAdjustmentBaseOffset(
+ BaseSubobject Base, BaseSubobject Derived) const {
+ const CXXRecordDecl *BaseRD = Base.getBase();
+ const CXXRecordDecl *DerivedRD = Derived.getBase();
+
+ CXXBasePaths Paths(/*FindAmbiguities=*/true,
+ /*RecordPaths=*/true, /*DetectVirtual=*/true);
+
+ if (!DerivedRD->isDerivedFrom(BaseRD, Paths))
+ llvm_unreachable("Class must be derived from the passed in base class!");
+
+ // We have to go through all the paths, and see which one leads us to the
+ // right base subobject.
+ for (const CXXBasePath &Path : Paths) {
+ BaseOffset Offset = ComputeBaseOffset(Context, DerivedRD, Path);
+
+ CharUnits OffsetToBaseSubobject = Offset.NonVirtualOffset;
+
+ if (Offset.VirtualBase) {
+ // If we have a virtual base class, the non-virtual offset is relative
+ // to the virtual base class offset.
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ /// Get the virtual base offset, relative to the most derived class
+ /// layout.
+ OffsetToBaseSubobject +=
+ LayoutClassLayout.getVBaseClassOffset(Offset.VirtualBase);
+ } else {
+ // Otherwise, the non-virtual offset is relative to the derived class
+ // offset.
+ OffsetToBaseSubobject += Derived.getBaseOffset();
+ }
+
+ // Check if this path gives us the right base subobject.
+ if (OffsetToBaseSubobject == Base.getBaseOffset()) {
+ // Since we're going from the base class _to_ the derived class, we'll
+ // invert the non-virtual offset here.
+ Offset.NonVirtualOffset = -Offset.NonVirtualOffset;
+ return Offset;
+ }
+ }
+
+ return BaseOffset();
+}
+
+ThisAdjustment ItaniumVTableBuilder::ComputeThisAdjustment(
+ const CXXMethodDecl *MD, CharUnits BaseOffsetInLayoutClass,
+ FinalOverriders::OverriderInfo Overrider) {
+ // Ignore adjustments for pure virtual member functions.
+ if (Overrider.Method->isPure())
+ return ThisAdjustment();
+
+ BaseSubobject OverriddenBaseSubobject(MD->getParent(),
+ BaseOffsetInLayoutClass);
+
+ BaseSubobject OverriderBaseSubobject(Overrider.Method->getParent(),
+ Overrider.Offset);
+
+ // Compute the adjustment offset.
+ BaseOffset Offset = ComputeThisAdjustmentBaseOffset(OverriddenBaseSubobject,
+ OverriderBaseSubobject);
+ if (Offset.isEmpty())
+ return ThisAdjustment();
+
+ ThisAdjustment Adjustment;
+
+ if (Offset.VirtualBase) {
+ // Get the vcall offset map for this virtual base.
+ VCallOffsetMap &VCallOffsets = VCallOffsetsForVBases[Offset.VirtualBase];
+
+ if (VCallOffsets.empty()) {
+ // We don't have vcall offsets for this virtual base, go ahead and
+ // build them.
+ VCallAndVBaseOffsetBuilder Builder(MostDerivedClass, MostDerivedClass,
+ /*FinalOverriders=*/nullptr,
+ BaseSubobject(Offset.VirtualBase,
+ CharUnits::Zero()),
+ /*BaseIsVirtual=*/true,
+ /*OffsetInLayoutClass=*/
+ CharUnits::Zero());
+
+ VCallOffsets = Builder.getVCallOffsets();
+ }
+
+ Adjustment.Virtual.Itanium.VCallOffsetOffset =
+ VCallOffsets.getVCallOffsetOffset(MD).getQuantity();
+ }
+
+ // Set the non-virtual part of the adjustment.
+ Adjustment.NonVirtual = Offset.NonVirtualOffset.getQuantity();
+
+ return Adjustment;
+}
+
+void ItaniumVTableBuilder::AddMethod(const CXXMethodDecl *MD,
+ ReturnAdjustment ReturnAdjustment) {
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ assert(ReturnAdjustment.isEmpty() &&
+ "Destructor can't have return adjustment!");
+
+ // Add both the complete destructor and the deleting destructor.
+ Components.push_back(VTableComponent::MakeCompleteDtor(DD));
+ Components.push_back(VTableComponent::MakeDeletingDtor(DD));
+ } else {
+ // Add the return adjustment if necessary.
+ if (!ReturnAdjustment.isEmpty())
+ VTableThunks[Components.size()].Return = ReturnAdjustment;
+
+ // Add the function.
+ Components.push_back(VTableComponent::MakeFunction(MD));
+ }
+}
+
+/// OverridesIndirectMethodInBase - Return whether the given member function
+/// overrides any methods in the set of given bases.
+/// Unlike OverridesMethodInBase, this checks "overriders of overriders".
+/// For example, if we have:
+///
+/// struct A { virtual void f(); }
+/// struct B : A { virtual void f(); }
+/// struct C : B { virtual void f(); }
+///
+/// OverridesIndirectMethodInBase will return true if given C::f as the method
+/// and { A } as the set of bases.
+static bool OverridesIndirectMethodInBases(
+ const CXXMethodDecl *MD,
+ ItaniumVTableBuilder::PrimaryBasesSetVectorTy &Bases) {
+ if (Bases.count(MD->getParent()))
+ return true;
+
+ for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
+ E = MD->end_overridden_methods(); I != E; ++I) {
+ const CXXMethodDecl *OverriddenMD = *I;
+
+ // Check "indirect overriders".
+ if (OverridesIndirectMethodInBases(OverriddenMD, Bases))
+ return true;
+ }
+
+ return false;
+}
+
+bool ItaniumVTableBuilder::IsOverriderUsed(
+ const CXXMethodDecl *Overrider, CharUnits BaseOffsetInLayoutClass,
+ const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
+ CharUnits FirstBaseOffsetInLayoutClass) const {
+ // If the base and the first base in the primary base chain have the same
+ // offsets, then this overrider will be used.
+ if (BaseOffsetInLayoutClass == FirstBaseOffsetInLayoutClass)
+ return true;
+
+ // We know now that Base (or a direct or indirect base of it) is a primary
+ // base in part of the class hierarchy, but not a primary base in the most
+ // derived class.
+
+ // If the overrider is the first base in the primary base chain, we know
+ // that the overrider will be used.
+ if (Overrider->getParent() == FirstBaseInPrimaryBaseChain)
+ return true;
+
+ ItaniumVTableBuilder::PrimaryBasesSetVectorTy PrimaryBases;
+
+ const CXXRecordDecl *RD = FirstBaseInPrimaryBaseChain;
+ PrimaryBases.insert(RD);
+
+ // Now traverse the base chain, starting with the first base, until we find
+ // the base that is no longer a primary base.
+ while (true) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ if (!PrimaryBase)
+ break;
+
+ if (Layout.isPrimaryBaseVirtual()) {
+ assert(Layout.getVBaseClassOffset(PrimaryBase).isZero() &&
+ "Primary base should always be at offset 0!");
+
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ // Now check if this is the primary base that is not a primary base in the
+ // most derived class.
+ if (LayoutClassLayout.getVBaseClassOffset(PrimaryBase) !=
+ FirstBaseOffsetInLayoutClass) {
+ // We found it, stop walking the chain.
+ break;
+ }
+ } else {
+ assert(Layout.getBaseClassOffset(PrimaryBase).isZero() &&
+ "Primary base should always be at offset 0!");
+ }
+
+ if (!PrimaryBases.insert(PrimaryBase))
+ llvm_unreachable("Found a duplicate primary base!");
+
+ RD = PrimaryBase;
+ }
+
+ // If the final overrider is an override of one of the primary bases,
+ // then we know that it will be used.
+ return OverridesIndirectMethodInBases(Overrider, PrimaryBases);
+}
+
+typedef llvm::SmallSetVector<const CXXRecordDecl *, 8> BasesSetVectorTy;
+
+/// FindNearestOverriddenMethod - Given a method, returns the overridden method
+/// from the nearest base. Returns null if no method was found.
+/// The Bases are expected to be sorted in a base-to-derived order.
+static const CXXMethodDecl *
+FindNearestOverriddenMethod(const CXXMethodDecl *MD,
+ BasesSetVectorTy &Bases) {
+ OverriddenMethodsSetTy OverriddenMethods;
+ ComputeAllOverriddenMethods(MD, OverriddenMethods);
+
+ for (const CXXRecordDecl *PrimaryBase :
+ llvm::make_range(Bases.rbegin(), Bases.rend())) {
+ // Now check the overridden methods.
+ for (const CXXMethodDecl *OverriddenMD : OverriddenMethods) {
+ // We found our overridden method.
+ if (OverriddenMD->getParent() == PrimaryBase)
+ return OverriddenMD;
+ }
+ }
+
+ return nullptr;
+}
+
+void ItaniumVTableBuilder::AddMethods(
+ BaseSubobject Base, CharUnits BaseOffsetInLayoutClass,
+ const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
+ CharUnits FirstBaseOffsetInLayoutClass,
+ PrimaryBasesSetVectorTy &PrimaryBases) {
+ // Itanium C++ ABI 2.5.2:
+ // The order of the virtual function pointers in a virtual table is the
+ // order of declaration of the corresponding member functions in the class.
+ //
+ // There is an entry for any virtual function declared in a class,
+ // whether it is a new function or overrides a base class function,
+ // unless it overrides a function from the primary base, and conversion
+ // between their return types does not require an adjustment.
+
+ const CXXRecordDecl *RD = Base.getBase();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
+ CharUnits PrimaryBaseOffset;
+ CharUnits PrimaryBaseOffsetInLayoutClass;
+ if (Layout.isPrimaryBaseVirtual()) {
+ assert(Layout.getVBaseClassOffset(PrimaryBase).isZero() &&
+ "Primary vbase should have a zero offset!");
+
+ const ASTRecordLayout &MostDerivedClassLayout =
+ Context.getASTRecordLayout(MostDerivedClass);
+
+ PrimaryBaseOffset =
+ MostDerivedClassLayout.getVBaseClassOffset(PrimaryBase);
+
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ PrimaryBaseOffsetInLayoutClass =
+ LayoutClassLayout.getVBaseClassOffset(PrimaryBase);
+ } else {
+ assert(Layout.getBaseClassOffset(PrimaryBase).isZero() &&
+ "Primary base should have a zero offset!");
+
+ PrimaryBaseOffset = Base.getBaseOffset();
+ PrimaryBaseOffsetInLayoutClass = BaseOffsetInLayoutClass;
+ }
+
+ AddMethods(BaseSubobject(PrimaryBase, PrimaryBaseOffset),
+ PrimaryBaseOffsetInLayoutClass, FirstBaseInPrimaryBaseChain,
+ FirstBaseOffsetInLayoutClass, PrimaryBases);
+
+ if (!PrimaryBases.insert(PrimaryBase))
+ llvm_unreachable("Found a duplicate primary base!");
+ }
+
+ const CXXDestructorDecl *ImplicitVirtualDtor = nullptr;
+
+ typedef llvm::SmallVector<const CXXMethodDecl *, 8> NewVirtualFunctionsTy;
+ NewVirtualFunctionsTy NewVirtualFunctions;
+
+ // Now go through all virtual member functions and add them.
+ for (const auto *MD : RD->methods()) {
+ if (!MD->isVirtual())
+ continue;
+ MD = MD->getCanonicalDecl();
+
+ // Get the final overrider.
+ FinalOverriders::OverriderInfo Overrider =
+ Overriders.getOverrider(MD, Base.getBaseOffset());
+
+ // Check if this virtual member function overrides a method in a primary
+ // base. If this is the case, and the return type doesn't require adjustment
+ // then we can just use the member function from the primary base.
+ if (const CXXMethodDecl *OverriddenMD =
+ FindNearestOverriddenMethod(MD, PrimaryBases)) {
+ if (ComputeReturnAdjustmentBaseOffset(Context, MD,
+ OverriddenMD).isEmpty()) {
+ // Replace the method info of the overridden method with our own
+ // method.
+ assert(MethodInfoMap.count(OverriddenMD) &&
+ "Did not find the overridden method!");
+ MethodInfo &OverriddenMethodInfo = MethodInfoMap[OverriddenMD];
+
+ MethodInfo MethodInfo(Base.getBaseOffset(), BaseOffsetInLayoutClass,
+ OverriddenMethodInfo.VTableIndex);
+
+ assert(!MethodInfoMap.count(MD) &&
+ "Should not have method info for this method yet!");
+
+ MethodInfoMap.insert(std::make_pair(MD, MethodInfo));
+ MethodInfoMap.erase(OverriddenMD);
+
+ // If the overridden method exists in a virtual base class or a direct
+ // or indirect base class of a virtual base class, we need to emit a
+ // thunk if we ever have a class hierarchy where the base class is not
+ // a primary base in the complete object.
+ if (!isBuildingConstructorVTable() && OverriddenMD != MD) {
+ // Compute the this adjustment.
+ ThisAdjustment ThisAdjustment =
+ ComputeThisAdjustment(OverriddenMD, BaseOffsetInLayoutClass,
+ Overrider);
+
+ if (ThisAdjustment.Virtual.Itanium.VCallOffsetOffset &&
+ Overrider.Method->getParent() == MostDerivedClass) {
+
+ // There's no return adjustment from OverriddenMD and MD,
+ // but that doesn't mean there isn't one between MD and
+ // the final overrider.
+ BaseOffset ReturnAdjustmentOffset =
+ ComputeReturnAdjustmentBaseOffset(Context, Overrider.Method, MD);
+ ReturnAdjustment ReturnAdjustment =
+ ComputeReturnAdjustment(ReturnAdjustmentOffset);
+
+ // This is a virtual thunk for the most derived class, add it.
+ AddThunk(Overrider.Method,
+ ThunkInfo(ThisAdjustment, ReturnAdjustment));
+ }
+ }
+
+ continue;
+ }
+ }
+
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ if (MD->isImplicit()) {
+ // Itanium C++ ABI 2.5.2:
+ // If a class has an implicitly-defined virtual destructor,
+ // its entries come after the declared virtual function pointers.
+
+ assert(!ImplicitVirtualDtor &&
+ "Did already see an implicit virtual dtor!");
+ ImplicitVirtualDtor = DD;
+ continue;
+ }
+ }
+
+ NewVirtualFunctions.push_back(MD);
+ }
+
+ if (ImplicitVirtualDtor)
+ NewVirtualFunctions.push_back(ImplicitVirtualDtor);
+
+ for (const CXXMethodDecl *MD : NewVirtualFunctions) {
+ // Get the final overrider.
+ FinalOverriders::OverriderInfo Overrider =
+ Overriders.getOverrider(MD, Base.getBaseOffset());
+
+ // Insert the method info for this method.
+ MethodInfo MethodInfo(Base.getBaseOffset(), BaseOffsetInLayoutClass,
+ Components.size());
+
+ assert(!MethodInfoMap.count(MD) &&
+ "Should not have method info for this method yet!");
+ MethodInfoMap.insert(std::make_pair(MD, MethodInfo));
+
+ // Check if this overrider is going to be used.
+ const CXXMethodDecl *OverriderMD = Overrider.Method;
+ if (!IsOverriderUsed(OverriderMD, BaseOffsetInLayoutClass,
+ FirstBaseInPrimaryBaseChain,
+ FirstBaseOffsetInLayoutClass)) {
+ Components.push_back(VTableComponent::MakeUnusedFunction(OverriderMD));
+ continue;
+ }
+
+ // Check if this overrider needs a return adjustment.
+ // We don't want to do this for pure virtual member functions.
+ BaseOffset ReturnAdjustmentOffset;
+ if (!OverriderMD->isPure()) {
+ ReturnAdjustmentOffset =
+ ComputeReturnAdjustmentBaseOffset(Context, OverriderMD, MD);
+ }
+
+ ReturnAdjustment ReturnAdjustment =
+ ComputeReturnAdjustment(ReturnAdjustmentOffset);
+
+ AddMethod(Overrider.Method, ReturnAdjustment);
+ }
+}
+
+void ItaniumVTableBuilder::LayoutVTable() {
+ LayoutPrimaryAndSecondaryVTables(BaseSubobject(MostDerivedClass,
+ CharUnits::Zero()),
+ /*BaseIsMorallyVirtual=*/false,
+ MostDerivedClassIsVirtual,
+ MostDerivedClassOffset);
+
+ VisitedVirtualBasesSetTy VBases;
+
+ // Determine the primary virtual bases.
+ DeterminePrimaryVirtualBases(MostDerivedClass, MostDerivedClassOffset,
+ VBases);
+ VBases.clear();
+
+ LayoutVTablesForVirtualBases(MostDerivedClass, VBases);
+
+ // -fapple-kext adds an extra entry at end of vtbl.
+ bool IsAppleKext = Context.getLangOpts().AppleKext;
+ if (IsAppleKext)
+ Components.push_back(VTableComponent::MakeVCallOffset(CharUnits::Zero()));
+}
+
+void ItaniumVTableBuilder::LayoutPrimaryAndSecondaryVTables(
+ BaseSubobject Base, bool BaseIsMorallyVirtual,
+ bool BaseIsVirtualInLayoutClass, CharUnits OffsetInLayoutClass) {
+ assert(Base.getBase()->isDynamicClass() && "class does not have a vtable!");
+
+ // Add vcall and vbase offsets for this vtable.
+ VCallAndVBaseOffsetBuilder Builder(MostDerivedClass, LayoutClass, &Overriders,
+ Base, BaseIsVirtualInLayoutClass,
+ OffsetInLayoutClass);
+ Components.append(Builder.components_begin(), Builder.components_end());
+
+ // Check if we need to add these vcall offsets.
+ if (BaseIsVirtualInLayoutClass && !Builder.getVCallOffsets().empty()) {
+ VCallOffsetMap &VCallOffsets = VCallOffsetsForVBases[Base.getBase()];
+
+ if (VCallOffsets.empty())
+ VCallOffsets = Builder.getVCallOffsets();
+ }
+
+ // If we're laying out the most derived class we want to keep track of the
+ // virtual base class offset offsets.
+ if (Base.getBase() == MostDerivedClass)
+ VBaseOffsetOffsets = Builder.getVBaseOffsetOffsets();
+
+ // Add the offset to top.
+ CharUnits OffsetToTop = MostDerivedClassOffset - OffsetInLayoutClass;
+ Components.push_back(VTableComponent::MakeOffsetToTop(OffsetToTop));
+
+ // Next, add the RTTI.
+ Components.push_back(VTableComponent::MakeRTTI(MostDerivedClass));
+
+ uint64_t AddressPoint = Components.size();
+
+ // Now go through all virtual member functions and add them.
+ PrimaryBasesSetVectorTy PrimaryBases;
+ AddMethods(Base, OffsetInLayoutClass,
+ Base.getBase(), OffsetInLayoutClass,
+ PrimaryBases);
+
+ const CXXRecordDecl *RD = Base.getBase();
+ if (RD == MostDerivedClass) {
+ assert(MethodVTableIndices.empty());
+ for (const auto &I : MethodInfoMap) {
+ const CXXMethodDecl *MD = I.first;
+ const MethodInfo &MI = I.second;
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ MethodVTableIndices[GlobalDecl(DD, Dtor_Complete)]
+ = MI.VTableIndex - AddressPoint;
+ MethodVTableIndices[GlobalDecl(DD, Dtor_Deleting)]
+ = MI.VTableIndex + 1 - AddressPoint;
+ } else {
+ MethodVTableIndices[MD] = MI.VTableIndex - AddressPoint;
+ }
+ }
+ }
+
+ // Compute 'this' pointer adjustments.
+ ComputeThisAdjustments();
+
+ // Add all address points.
+ while (true) {
+ AddressPoints.insert(std::make_pair(
+ BaseSubobject(RD, OffsetInLayoutClass),
+ AddressPoint));
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ if (!PrimaryBase)
+ break;
+
+ if (Layout.isPrimaryBaseVirtual()) {
+ // Check if this virtual primary base is a primary base in the layout
+ // class. If it's not, we don't want to add it.
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ if (LayoutClassLayout.getVBaseClassOffset(PrimaryBase) !=
+ OffsetInLayoutClass) {
+ // We don't want to add this class (or any of its primary bases).
+ break;
+ }
+ }
+
+ RD = PrimaryBase;
+ }
+
+ // Layout secondary vtables.
+ LayoutSecondaryVTables(Base, BaseIsMorallyVirtual, OffsetInLayoutClass);
+}
+
+void
+ItaniumVTableBuilder::LayoutSecondaryVTables(BaseSubobject Base,
+ bool BaseIsMorallyVirtual,
+ CharUnits OffsetInLayoutClass) {
+ // Itanium C++ ABI 2.5.2:
+ // Following the primary virtual table of a derived class are secondary
+ // virtual tables for each of its proper base classes, except any primary
+ // base(s) with which it shares its primary virtual table.
+
+ const CXXRecordDecl *RD = Base.getBase();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ for (const auto &B : RD->bases()) {
+ // Ignore virtual bases, we'll emit them later.
+ if (B.isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl = B.getType()->getAsCXXRecordDecl();
+
+ // Ignore bases that don't have a vtable.
+ if (!BaseDecl->isDynamicClass())
+ continue;
+
+ if (isBuildingConstructorVTable()) {
+ // Itanium C++ ABI 2.6.4:
+ // Some of the base class subobjects may not need construction virtual
+ // tables, which will therefore not be present in the construction
+ // virtual table group, even though the subobject virtual tables are
+ // present in the main virtual table group for the complete object.
+ if (!BaseIsMorallyVirtual && !BaseDecl->getNumVBases())
+ continue;
+ }
+
+ // Get the base offset of this base.
+ CharUnits RelativeBaseOffset = Layout.getBaseClassOffset(BaseDecl);
+ CharUnits BaseOffset = Base.getBaseOffset() + RelativeBaseOffset;
+
+ CharUnits BaseOffsetInLayoutClass =
+ OffsetInLayoutClass + RelativeBaseOffset;
+
+ // Don't emit a secondary vtable for a primary base. We might however want
+ // to emit secondary vtables for other bases of this base.
+ if (BaseDecl == PrimaryBase) {
+ LayoutSecondaryVTables(BaseSubobject(BaseDecl, BaseOffset),
+ BaseIsMorallyVirtual, BaseOffsetInLayoutClass);
+ continue;
+ }
+
+ // Layout the primary vtable (and any secondary vtables) for this base.
+ LayoutPrimaryAndSecondaryVTables(
+ BaseSubobject(BaseDecl, BaseOffset),
+ BaseIsMorallyVirtual,
+ /*BaseIsVirtualInLayoutClass=*/false,
+ BaseOffsetInLayoutClass);
+ }
+}
+
+void ItaniumVTableBuilder::DeterminePrimaryVirtualBases(
+ const CXXRecordDecl *RD, CharUnits OffsetInLayoutClass,
+ VisitedVirtualBasesSetTy &VBases) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // Check if this base has a primary base.
+ if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
+
+ // Check if it's virtual.
+ if (Layout.isPrimaryBaseVirtual()) {
+ bool IsPrimaryVirtualBase = true;
+
+ if (isBuildingConstructorVTable()) {
+ // Check if the base is actually a primary base in the class we use for
+ // layout.
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ CharUnits PrimaryBaseOffsetInLayoutClass =
+ LayoutClassLayout.getVBaseClassOffset(PrimaryBase);
+
+ // We know that the base is not a primary base in the layout class if
+ // the base offsets are different.
+ if (PrimaryBaseOffsetInLayoutClass != OffsetInLayoutClass)
+ IsPrimaryVirtualBase = false;
+ }
+
+ if (IsPrimaryVirtualBase)
+ PrimaryVirtualBases.insert(PrimaryBase);
+ }
+ }
+
+ // Traverse bases, looking for more primary virtual bases.
+ for (const auto &B : RD->bases()) {
+ const CXXRecordDecl *BaseDecl = B.getType()->getAsCXXRecordDecl();
+
+ CharUnits BaseOffsetInLayoutClass;
+
+ if (B.isVirtual()) {
+ if (!VBases.insert(BaseDecl).second)
+ continue;
+
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ BaseOffsetInLayoutClass =
+ LayoutClassLayout.getVBaseClassOffset(BaseDecl);
+ } else {
+ BaseOffsetInLayoutClass =
+ OffsetInLayoutClass + Layout.getBaseClassOffset(BaseDecl);
+ }
+
+ DeterminePrimaryVirtualBases(BaseDecl, BaseOffsetInLayoutClass, VBases);
+ }
+}
+
+void ItaniumVTableBuilder::LayoutVTablesForVirtualBases(
+ const CXXRecordDecl *RD, VisitedVirtualBasesSetTy &VBases) {
+ // Itanium C++ ABI 2.5.2:
+ // Then come the virtual base virtual tables, also in inheritance graph
+ // order, and again excluding primary bases (which share virtual tables with
+ // the classes for which they are primary).
+ for (const auto &B : RD->bases()) {
+ const CXXRecordDecl *BaseDecl = B.getType()->getAsCXXRecordDecl();
+
+ // Check if this base needs a vtable. (If it's virtual, not a primary base
+ // of some other class, and we haven't visited it before).
+ if (B.isVirtual() && BaseDecl->isDynamicClass() &&
+ !PrimaryVirtualBases.count(BaseDecl) &&
+ VBases.insert(BaseDecl).second) {
+ const ASTRecordLayout &MostDerivedClassLayout =
+ Context.getASTRecordLayout(MostDerivedClass);
+ CharUnits BaseOffset =
+ MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+ CharUnits BaseOffsetInLayoutClass =
+ LayoutClassLayout.getVBaseClassOffset(BaseDecl);
+
+ LayoutPrimaryAndSecondaryVTables(
+ BaseSubobject(BaseDecl, BaseOffset),
+ /*BaseIsMorallyVirtual=*/true,
+ /*BaseIsVirtualInLayoutClass=*/true,
+ BaseOffsetInLayoutClass);
+ }
+
+ // We only need to check the base for virtual base vtables if it actually
+ // has virtual bases.
+ if (BaseDecl->getNumVBases())
+ LayoutVTablesForVirtualBases(BaseDecl, VBases);
+ }
+}
+
+/// dumpLayout - Dump the vtable layout.
+void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) {
+ // FIXME: write more tests that actually use the dumpLayout output to prevent
+ // ItaniumVTableBuilder regressions.
+
+ if (isBuildingConstructorVTable()) {
+ Out << "Construction vtable for ('";
+ MostDerivedClass->printQualifiedName(Out);
+ Out << "', ";
+ Out << MostDerivedClassOffset.getQuantity() << ") in '";
+ LayoutClass->printQualifiedName(Out);
+ } else {
+ Out << "Vtable for '";
+ MostDerivedClass->printQualifiedName(Out);
+ }
+ Out << "' (" << Components.size() << " entries).\n";
+
+ // Iterate through the address points and insert them into a new map where
+ // they are keyed by the index and not the base object.
+ // Since an address point can be shared by multiple subobjects, we use an
+ // STL multimap.
+ std::multimap<uint64_t, BaseSubobject> AddressPointsByIndex;
+ for (const auto &AP : AddressPoints) {
+ const BaseSubobject &Base = AP.first;
+ uint64_t Index = AP.second;
+
+ AddressPointsByIndex.insert(std::make_pair(Index, Base));
+ }
+
+ for (unsigned I = 0, E = Components.size(); I != E; ++I) {
+ uint64_t Index = I;
+
+ Out << llvm::format("%4d | ", I);
+
+ const VTableComponent &Component = Components[I];
+
+ // Dump the component.
+ switch (Component.getKind()) {
+
+ case VTableComponent::CK_VCallOffset:
+ Out << "vcall_offset ("
+ << Component.getVCallOffset().getQuantity()
+ << ")";
+ break;
+
+ case VTableComponent::CK_VBaseOffset:
+ Out << "vbase_offset ("
+ << Component.getVBaseOffset().getQuantity()
+ << ")";
+ break;
+
+ case VTableComponent::CK_OffsetToTop:
+ Out << "offset_to_top ("
+ << Component.getOffsetToTop().getQuantity()
+ << ")";
+ break;
+
+ case VTableComponent::CK_RTTI:
+ Component.getRTTIDecl()->printQualifiedName(Out);
+ Out << " RTTI";
+ break;
+
+ case VTableComponent::CK_FunctionPointer: {
+ const CXXMethodDecl *MD = Component.getFunctionDecl();
+
+ std::string Str =
+ PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
+ MD);
+ Out << Str;
+ if (MD->isPure())
+ Out << " [pure]";
+
+ if (MD->isDeleted())
+ Out << " [deleted]";
+
+ ThunkInfo Thunk = VTableThunks.lookup(I);
+ if (!Thunk.isEmpty()) {
+ // If this function pointer has a return adjustment, dump it.
+ if (!Thunk.Return.isEmpty()) {
+ Out << "\n [return adjustment: ";
+ Out << Thunk.Return.NonVirtual << " non-virtual";
+
+ if (Thunk.Return.Virtual.Itanium.VBaseOffsetOffset) {
+ Out << ", " << Thunk.Return.Virtual.Itanium.VBaseOffsetOffset;
+ Out << " vbase offset offset";
+ }
+
+ Out << ']';
+ }
+
+ // If this function pointer has a 'this' pointer adjustment, dump it.
+ if (!Thunk.This.isEmpty()) {
+ Out << "\n [this adjustment: ";
+ Out << Thunk.This.NonVirtual << " non-virtual";
+
+ if (Thunk.This.Virtual.Itanium.VCallOffsetOffset) {
+ Out << ", " << Thunk.This.Virtual.Itanium.VCallOffsetOffset;
+ Out << " vcall offset offset";
+ }
+
+ Out << ']';
+ }
+ }
+
+ break;
+ }
+
+ case VTableComponent::CK_CompleteDtorPointer:
+ case VTableComponent::CK_DeletingDtorPointer: {
+ bool IsComplete =
+ Component.getKind() == VTableComponent::CK_CompleteDtorPointer;
+
+ const CXXDestructorDecl *DD = Component.getDestructorDecl();
+
+ DD->printQualifiedName(Out);
+ if (IsComplete)
+ Out << "() [complete]";
+ else
+ Out << "() [deleting]";
+
+ if (DD->isPure())
+ Out << " [pure]";
+
+ ThunkInfo Thunk = VTableThunks.lookup(I);
+ if (!Thunk.isEmpty()) {
+ // If this destructor has a 'this' pointer adjustment, dump it.
+ if (!Thunk.This.isEmpty()) {
+ Out << "\n [this adjustment: ";
+ Out << Thunk.This.NonVirtual << " non-virtual";
+
+ if (Thunk.This.Virtual.Itanium.VCallOffsetOffset) {
+ Out << ", " << Thunk.This.Virtual.Itanium.VCallOffsetOffset;
+ Out << " vcall offset offset";
+ }
+
+ Out << ']';
+ }
+ }
+
+ break;
+ }
+
+ case VTableComponent::CK_UnusedFunctionPointer: {
+ const CXXMethodDecl *MD = Component.getUnusedFunctionDecl();
+
+ std::string Str =
+ PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
+ MD);
+ Out << "[unused] " << Str;
+ if (MD->isPure())
+ Out << " [pure]";
+ }
+
+ }
+
+ Out << '\n';
+
+ // Dump the next address point.
+ uint64_t NextIndex = Index + 1;
+ if (AddressPointsByIndex.count(NextIndex)) {
+ if (AddressPointsByIndex.count(NextIndex) == 1) {
+ const BaseSubobject &Base =
+ AddressPointsByIndex.find(NextIndex)->second;
+
+ Out << " -- (";
+ Base.getBase()->printQualifiedName(Out);
+ Out << ", " << Base.getBaseOffset().getQuantity();
+ Out << ") vtable address --\n";
+ } else {
+ CharUnits BaseOffset =
+ AddressPointsByIndex.lower_bound(NextIndex)->second.getBaseOffset();
+
+ // We store the class names in a set to get a stable order.
+ std::set<std::string> ClassNames;
+ for (const auto &I :
+ llvm::make_range(AddressPointsByIndex.equal_range(NextIndex))) {
+ assert(I.second.getBaseOffset() == BaseOffset &&
+ "Invalid base offset!");
+ const CXXRecordDecl *RD = I.second.getBase();
+ ClassNames.insert(RD->getQualifiedNameAsString());
+ }
+
+ for (const std::string &Name : ClassNames) {
+ Out << " -- (" << Name;
+ Out << ", " << BaseOffset.getQuantity() << ") vtable address --\n";
+ }
+ }
+ }
+ }
+
+ Out << '\n';
+
+ if (isBuildingConstructorVTable())
+ return;
+
+ if (MostDerivedClass->getNumVBases()) {
+ // We store the virtual base class names and their offsets in a map to get
+ // a stable order.
+
+ std::map<std::string, CharUnits> ClassNamesAndOffsets;
+ for (const auto &I : VBaseOffsetOffsets) {
+ std::string ClassName = I.first->getQualifiedNameAsString();
+ CharUnits OffsetOffset = I.second;
+ ClassNamesAndOffsets.insert(std::make_pair(ClassName, OffsetOffset));
+ }
+
+ Out << "Virtual base offset offsets for '";
+ MostDerivedClass->printQualifiedName(Out);
+ Out << "' (";
+ Out << ClassNamesAndOffsets.size();
+ Out << (ClassNamesAndOffsets.size() == 1 ? " entry" : " entries") << ").\n";
+
+ for (const auto &I : ClassNamesAndOffsets)
+ Out << " " << I.first << " | " << I.second.getQuantity() << '\n';
+
+ Out << "\n";
+ }
+
+ if (!Thunks.empty()) {
+ // We store the method names in a map to get a stable order.
+ std::map<std::string, const CXXMethodDecl *> MethodNamesAndDecls;
+
+ for (const auto &I : Thunks) {
+ const CXXMethodDecl *MD = I.first;
+ std::string MethodName =
+ PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
+ MD);
+
+ MethodNamesAndDecls.insert(std::make_pair(MethodName, MD));
+ }
+
+ for (const auto &I : MethodNamesAndDecls) {
+ const std::string &MethodName = I.first;
+ const CXXMethodDecl *MD = I.second;
+
+ ThunkInfoVectorTy ThunksVector = Thunks[MD];
+ std::sort(ThunksVector.begin(), ThunksVector.end(),
+ [](const ThunkInfo &LHS, const ThunkInfo &RHS) {
+ assert(LHS.Method == nullptr && RHS.Method == nullptr);
+ return std::tie(LHS.This, LHS.Return) < std::tie(RHS.This, RHS.Return);
+ });
+
+ Out << "Thunks for '" << MethodName << "' (" << ThunksVector.size();
+ Out << (ThunksVector.size() == 1 ? " entry" : " entries") << ").\n";
+
+ for (unsigned I = 0, E = ThunksVector.size(); I != E; ++I) {
+ const ThunkInfo &Thunk = ThunksVector[I];
+
+ Out << llvm::format("%4d | ", I);
+
+ // If this function pointer has a return pointer adjustment, dump it.
+ if (!Thunk.Return.isEmpty()) {
+ Out << "return adjustment: " << Thunk.Return.NonVirtual;
+ Out << " non-virtual";
+ if (Thunk.Return.Virtual.Itanium.VBaseOffsetOffset) {
+ Out << ", " << Thunk.Return.Virtual.Itanium.VBaseOffsetOffset;
+ Out << " vbase offset offset";
+ }
+
+ if (!Thunk.This.isEmpty())
+ Out << "\n ";
+ }
+
+ // If this function pointer has a 'this' pointer adjustment, dump it.
+ if (!Thunk.This.isEmpty()) {
+ Out << "this adjustment: ";
+ Out << Thunk.This.NonVirtual << " non-virtual";
+
+ if (Thunk.This.Virtual.Itanium.VCallOffsetOffset) {
+ Out << ", " << Thunk.This.Virtual.Itanium.VCallOffsetOffset;
+ Out << " vcall offset offset";
+ }
+ }
+
+ Out << '\n';
+ }
+
+ Out << '\n';
+ }
+ }
+
+ // Compute the vtable indices for all the member functions.
+ // Store them in a map keyed by the index so we'll get a sorted table.
+ std::map<uint64_t, std::string> IndicesMap;
+
+ for (const auto *MD : MostDerivedClass->methods()) {
+ // We only want virtual member functions.
+ if (!MD->isVirtual())
+ continue;
+ MD = MD->getCanonicalDecl();
+
+ std::string MethodName =
+ PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
+ MD);
+
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ GlobalDecl GD(DD, Dtor_Complete);
+ assert(MethodVTableIndices.count(GD));
+ uint64_t VTableIndex = MethodVTableIndices[GD];
+ IndicesMap[VTableIndex] = MethodName + " [complete]";
+ IndicesMap[VTableIndex + 1] = MethodName + " [deleting]";
+ } else {
+ assert(MethodVTableIndices.count(MD));
+ IndicesMap[MethodVTableIndices[MD]] = MethodName;
+ }
+ }
+
+ // Print the vtable indices for all the member functions.
+ if (!IndicesMap.empty()) {
+ Out << "VTable indices for '";
+ MostDerivedClass->printQualifiedName(Out);
+ Out << "' (" << IndicesMap.size() << " entries).\n";
+
+ for (const auto &I : IndicesMap) {
+ uint64_t VTableIndex = I.first;
+ const std::string &MethodName = I.second;
+
+ Out << llvm::format("%4" PRIu64 " | ", VTableIndex) << MethodName
+ << '\n';
+ }
+ }
+
+ Out << '\n';
+}
+}
+
+VTableLayout::VTableLayout(uint64_t NumVTableComponents,
+ const VTableComponent *VTableComponents,
+ uint64_t NumVTableThunks,
+ const VTableThunkTy *VTableThunks,
+ const AddressPointsMapTy &AddressPoints,
+ bool IsMicrosoftABI)
+ : NumVTableComponents(NumVTableComponents),
+ VTableComponents(new VTableComponent[NumVTableComponents]),
+ NumVTableThunks(NumVTableThunks),
+ VTableThunks(new VTableThunkTy[NumVTableThunks]),
+ AddressPoints(AddressPoints),
+ IsMicrosoftABI(IsMicrosoftABI) {
+ std::copy(VTableComponents, VTableComponents+NumVTableComponents,
+ this->VTableComponents.get());
+ std::copy(VTableThunks, VTableThunks+NumVTableThunks,
+ this->VTableThunks.get());
+ std::sort(this->VTableThunks.get(),
+ this->VTableThunks.get() + NumVTableThunks,
+ [](const VTableLayout::VTableThunkTy &LHS,
+ const VTableLayout::VTableThunkTy &RHS) {
+ assert((LHS.first != RHS.first || LHS.second == RHS.second) &&
+ "Different thunks should have unique indices!");
+ return LHS.first < RHS.first;
+ });
+}
+
+VTableLayout::~VTableLayout() { }
+
+ItaniumVTableContext::ItaniumVTableContext(ASTContext &Context)
+ : VTableContextBase(/*MS=*/false) {}
+
+ItaniumVTableContext::~ItaniumVTableContext() {
+ llvm::DeleteContainerSeconds(VTableLayouts);
+}
+
+uint64_t ItaniumVTableContext::getMethodVTableIndex(GlobalDecl GD) {
+ MethodVTableIndicesTy::iterator I = MethodVTableIndices.find(GD);
+ if (I != MethodVTableIndices.end())
+ return I->second;
+
+ const CXXRecordDecl *RD = cast<CXXMethodDecl>(GD.getDecl())->getParent();
+
+ computeVTableRelatedInformation(RD);
+
+ I = MethodVTableIndices.find(GD);
+ assert(I != MethodVTableIndices.end() && "Did not find index!");
+ return I->second;
+}
+
+CharUnits
+ItaniumVTableContext::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
+ const CXXRecordDecl *VBase) {
+ ClassPairTy ClassPair(RD, VBase);
+
+ VirtualBaseClassOffsetOffsetsMapTy::iterator I =
+ VirtualBaseClassOffsetOffsets.find(ClassPair);
+ if (I != VirtualBaseClassOffsetOffsets.end())
+ return I->second;
+
+ VCallAndVBaseOffsetBuilder Builder(RD, RD, /*FinalOverriders=*/nullptr,
+ BaseSubobject(RD, CharUnits::Zero()),
+ /*BaseIsVirtual=*/false,
+ /*OffsetInLayoutClass=*/CharUnits::Zero());
+
+ for (const auto &I : Builder.getVBaseOffsetOffsets()) {
+ // Insert all types.
+ ClassPairTy ClassPair(RD, I.first);
+
+ VirtualBaseClassOffsetOffsets.insert(std::make_pair(ClassPair, I.second));
+ }
+
+ I = VirtualBaseClassOffsetOffsets.find(ClassPair);
+ assert(I != VirtualBaseClassOffsetOffsets.end() && "Did not find index!");
+
+ return I->second;
+}
+
+static VTableLayout *CreateVTableLayout(const ItaniumVTableBuilder &Builder) {
+ SmallVector<VTableLayout::VTableThunkTy, 1>
+ VTableThunks(Builder.vtable_thunks_begin(), Builder.vtable_thunks_end());
+
+ return new VTableLayout(Builder.getNumVTableComponents(),
+ Builder.vtable_component_begin(),
+ VTableThunks.size(),
+ VTableThunks.data(),
+ Builder.getAddressPoints(),
+ /*IsMicrosoftABI=*/false);
+}
+
+void
+ItaniumVTableContext::computeVTableRelatedInformation(const CXXRecordDecl *RD) {
+ const VTableLayout *&Entry = VTableLayouts[RD];
+
+ // Check if we've computed this information before.
+ if (Entry)
+ return;
+
+ ItaniumVTableBuilder Builder(*this, RD, CharUnits::Zero(),
+ /*MostDerivedClassIsVirtual=*/0, RD);
+ Entry = CreateVTableLayout(Builder);
+
+ MethodVTableIndices.insert(Builder.vtable_indices_begin(),
+ Builder.vtable_indices_end());
+
+ // Add the known thunks.
+ Thunks.insert(Builder.thunks_begin(), Builder.thunks_end());
+
+ // If we don't have the vbase information for this class, insert it.
+ // getVirtualBaseOffsetOffset will compute it separately without computing
+ // the rest of the vtable related information.
+ if (!RD->getNumVBases())
+ return;
+
+ const CXXRecordDecl *VBase =
+ RD->vbases_begin()->getType()->getAsCXXRecordDecl();
+
+ if (VirtualBaseClassOffsetOffsets.count(std::make_pair(RD, VBase)))
+ return;
+
+ for (const auto &I : Builder.getVBaseOffsetOffsets()) {
+ // Insert all types.
+ ClassPairTy ClassPair(RD, I.first);
+
+ VirtualBaseClassOffsetOffsets.insert(std::make_pair(ClassPair, I.second));
+ }
+}
+
+VTableLayout *ItaniumVTableContext::createConstructionVTableLayout(
+ const CXXRecordDecl *MostDerivedClass, CharUnits MostDerivedClassOffset,
+ bool MostDerivedClassIsVirtual, const CXXRecordDecl *LayoutClass) {
+ ItaniumVTableBuilder Builder(*this, MostDerivedClass, MostDerivedClassOffset,
+ MostDerivedClassIsVirtual, LayoutClass);
+ return CreateVTableLayout(Builder);
+}
+
+namespace {
+
+// Vtables in the Microsoft ABI are different from the Itanium ABI.
+//
+// The main differences are:
+// 1. Separate vftable and vbtable.
+//
+// 2. Each subobject with a vfptr gets its own vftable rather than an address
+// point in a single vtable shared between all the subobjects.
+// Each vftable is represented by a separate section and virtual calls
+// must be done using the vftable which has a slot for the function to be
+// called.
+//
+// 3. Virtual method definitions expect their 'this' parameter to point to the
+// first vfptr whose table provides a compatible overridden method. In many
+// cases, this permits the original vf-table entry to directly call
+// the method instead of passing through a thunk.
+// See example before VFTableBuilder::ComputeThisOffset below.
+//
+// A compatible overridden method is one which does not have a non-trivial
+// covariant-return adjustment.
+//
+// The first vfptr is the one with the lowest offset in the complete-object
+// layout of the defining class, and the method definition will subtract
+// that constant offset from the parameter value to get the real 'this'
+// value. Therefore, if the offset isn't really constant (e.g. if a virtual
+// function defined in a virtual base is overridden in a more derived
+// virtual base and these bases have a reverse order in the complete
+// object), the vf-table may require a this-adjustment thunk.
+//
+// 4. vftables do not contain new entries for overrides that merely require
+// this-adjustment. Together with #3, this keeps vf-tables smaller and
+// eliminates the need for this-adjustment thunks in many cases, at the cost
+// of often requiring redundant work to adjust the "this" pointer.
+//
+// 5. Instead of VTT and constructor vtables, vbtables and vtordisps are used.
+// Vtordisps are emitted into the class layout if a class has
+// a) a user-defined ctor/dtor
+// and
+// b) a method overriding a method in a virtual base.
+//
+// To get a better understanding of this code,
+// you might want to see examples in test/CodeGenCXX/microsoft-abi-vtables-*.cpp
+
+class VFTableBuilder {
+public:
+ typedef MicrosoftVTableContext::MethodVFTableLocation MethodVFTableLocation;
+
+ typedef llvm::DenseMap<GlobalDecl, MethodVFTableLocation>
+ MethodVFTableLocationsTy;
+
+ typedef llvm::iterator_range<MethodVFTableLocationsTy::const_iterator>
+ method_locations_range;
+
+private:
+ /// VTables - Global vtable information.
+ MicrosoftVTableContext &VTables;
+
+ /// Context - The ASTContext which we will use for layout information.
+ ASTContext &Context;
+
+ /// MostDerivedClass - The most derived class for which we're building this
+ /// vtable.
+ const CXXRecordDecl *MostDerivedClass;
+
+ const ASTRecordLayout &MostDerivedClassLayout;
+
+ const VPtrInfo &WhichVFPtr;
+
+ /// FinalOverriders - The final overriders of the most derived class.
+ const FinalOverriders Overriders;
+
+ /// Components - The components of the vftable being built.
+ SmallVector<VTableComponent, 64> Components;
+
+ MethodVFTableLocationsTy MethodVFTableLocations;
+
+ /// \brief Does this class have an RTTI component?
+ bool HasRTTIComponent;
+
+ /// MethodInfo - Contains information about a method in a vtable.
+ /// (Used for computing 'this' pointer adjustment thunks.
+ struct MethodInfo {
+ /// VBTableIndex - The nonzero index in the vbtable that
+ /// this method's base has, or zero.
+ const uint64_t VBTableIndex;
+
+ /// VFTableIndex - The index in the vftable that this method has.
+ const uint64_t VFTableIndex;
+
+ /// Shadowed - Indicates if this vftable slot is shadowed by
+ /// a slot for a covariant-return override. If so, it shouldn't be printed
+ /// or used for vcalls in the most derived class.
+ bool Shadowed;
+
+ /// UsesExtraSlot - Indicates if this vftable slot was created because
+ /// any of the overridden slots required a return adjusting thunk.
+ bool UsesExtraSlot;
+
+ MethodInfo(uint64_t VBTableIndex, uint64_t VFTableIndex,
+ bool UsesExtraSlot = false)
+ : VBTableIndex(VBTableIndex), VFTableIndex(VFTableIndex),
+ Shadowed(false), UsesExtraSlot(UsesExtraSlot) {}
+
+ MethodInfo()
+ : VBTableIndex(0), VFTableIndex(0), Shadowed(false),
+ UsesExtraSlot(false) {}
+ };
+
+ typedef llvm::DenseMap<const CXXMethodDecl *, MethodInfo> MethodInfoMapTy;
+
+ /// MethodInfoMap - The information for all methods in the vftable we're
+ /// currently building.
+ MethodInfoMapTy MethodInfoMap;
+
+ typedef llvm::DenseMap<uint64_t, ThunkInfo> VTableThunksMapTy;
+
+ /// VTableThunks - The thunks by vftable index in the vftable currently being
+ /// built.
+ VTableThunksMapTy VTableThunks;
+
+ typedef SmallVector<ThunkInfo, 1> ThunkInfoVectorTy;
+ typedef llvm::DenseMap<const CXXMethodDecl *, ThunkInfoVectorTy> ThunksMapTy;
+
+ /// Thunks - A map that contains all the thunks needed for all methods in the
+ /// most derived class for which the vftable is currently being built.
+ ThunksMapTy Thunks;
+
+ /// AddThunk - Add a thunk for the given method.
+ void AddThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk) {
+ SmallVector<ThunkInfo, 1> &ThunksVector = Thunks[MD];
+
+ // Check if we have this thunk already.
+ if (std::find(ThunksVector.begin(), ThunksVector.end(), Thunk) !=
+ ThunksVector.end())
+ return;
+
+ ThunksVector.push_back(Thunk);
+ }
+
+ /// ComputeThisOffset - Returns the 'this' argument offset for the given
+ /// method, relative to the beginning of the MostDerivedClass.
+ CharUnits ComputeThisOffset(FinalOverriders::OverriderInfo Overrider);
+
+ void CalculateVtordispAdjustment(FinalOverriders::OverriderInfo Overrider,
+ CharUnits ThisOffset, ThisAdjustment &TA);
+
+ /// AddMethod - Add a single virtual member function to the vftable
+ /// components vector.
+ void AddMethod(const CXXMethodDecl *MD, ThunkInfo TI) {
+ if (!TI.isEmpty()) {
+ VTableThunks[Components.size()] = TI;
+ AddThunk(MD, TI);
+ }
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ assert(TI.Return.isEmpty() &&
+ "Destructor can't have return adjustment!");
+ Components.push_back(VTableComponent::MakeDeletingDtor(DD));
+ } else {
+ Components.push_back(VTableComponent::MakeFunction(MD));
+ }
+ }
+
+ /// AddMethods - Add the methods of this base subobject and the relevant
+ /// subbases to the vftable we're currently laying out.
+ void AddMethods(BaseSubobject Base, unsigned BaseDepth,
+ const CXXRecordDecl *LastVBase,
+ BasesSetVectorTy &VisitedBases);
+
+ void LayoutVFTable() {
+ // RTTI data goes before all other entries.
+ if (HasRTTIComponent)
+ Components.push_back(VTableComponent::MakeRTTI(MostDerivedClass));
+
+ BasesSetVectorTy VisitedBases;
+ AddMethods(BaseSubobject(MostDerivedClass, CharUnits::Zero()), 0, nullptr,
+ VisitedBases);
+ assert((HasRTTIComponent ? Components.size() - 1 : Components.size()) &&
+ "vftable can't be empty");
+
+ assert(MethodVFTableLocations.empty());
+ for (const auto &I : MethodInfoMap) {
+ const CXXMethodDecl *MD = I.first;
+ const MethodInfo &MI = I.second;
+ // Skip the methods that the MostDerivedClass didn't override
+ // and the entries shadowed by return adjusting thunks.
+ if (MD->getParent() != MostDerivedClass || MI.Shadowed)
+ continue;
+ MethodVFTableLocation Loc(MI.VBTableIndex, WhichVFPtr.getVBaseWithVPtr(),
+ WhichVFPtr.NonVirtualOffset, MI.VFTableIndex);
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ MethodVFTableLocations[GlobalDecl(DD, Dtor_Deleting)] = Loc;
+ } else {
+ MethodVFTableLocations[MD] = Loc;
+ }
+ }
+ }
+
+public:
+ VFTableBuilder(MicrosoftVTableContext &VTables,
+ const CXXRecordDecl *MostDerivedClass, const VPtrInfo *Which)
+ : VTables(VTables),
+ Context(MostDerivedClass->getASTContext()),
+ MostDerivedClass(MostDerivedClass),
+ MostDerivedClassLayout(Context.getASTRecordLayout(MostDerivedClass)),
+ WhichVFPtr(*Which),
+ Overriders(MostDerivedClass, CharUnits(), MostDerivedClass) {
+ // Only include the RTTI component if we know that we will provide a
+ // definition of the vftable.
+ HasRTTIComponent = Context.getLangOpts().RTTIData &&
+ !MostDerivedClass->hasAttr<DLLImportAttr>() &&
+ MostDerivedClass->getTemplateSpecializationKind() !=
+ TSK_ExplicitInstantiationDeclaration;
+
+ LayoutVFTable();
+
+ if (Context.getLangOpts().DumpVTableLayouts)
+ dumpLayout(llvm::outs());
+ }
+
+ uint64_t getNumThunks() const { return Thunks.size(); }
+
+ ThunksMapTy::const_iterator thunks_begin() const { return Thunks.begin(); }
+
+ ThunksMapTy::const_iterator thunks_end() const { return Thunks.end(); }
+
+ method_locations_range vtable_locations() const {
+ return method_locations_range(MethodVFTableLocations.begin(),
+ MethodVFTableLocations.end());
+ }
+
+ uint64_t getNumVTableComponents() const { return Components.size(); }
+
+ const VTableComponent *vtable_component_begin() const {
+ return Components.begin();
+ }
+
+ const VTableComponent *vtable_component_end() const {
+ return Components.end();
+ }
+
+ VTableThunksMapTy::const_iterator vtable_thunks_begin() const {
+ return VTableThunks.begin();
+ }
+
+ VTableThunksMapTy::const_iterator vtable_thunks_end() const {
+ return VTableThunks.end();
+ }
+
+ void dumpLayout(raw_ostream &);
+};
+
+} // end namespace
+
+// Let's study one class hierarchy as an example:
+// struct A {
+// virtual void f();
+// int x;
+// };
+//
+// struct B : virtual A {
+// virtual void f();
+// };
+//
+// Record layouts:
+// struct A:
+// 0 | (A vftable pointer)
+// 4 | int x
+//
+// struct B:
+// 0 | (B vbtable pointer)
+// 4 | struct A (virtual base)
+// 4 | (A vftable pointer)
+// 8 | int x
+//
+// Let's assume we have a pointer to the A part of an object of dynamic type B:
+// B b;
+// A *a = (A*)&b;
+// a->f();
+//
+// In this hierarchy, f() belongs to the vftable of A, so B::f() expects
+// "this" parameter to point at the A subobject, which is B+4.
+// In the B::f() prologue, it adjusts "this" back to B by subtracting 4,
+// performed as a *static* adjustment.
+//
+// Interesting thing happens when we alter the relative placement of A and B
+// subobjects in a class:
+// struct C : virtual B { };
+//
+// C c;
+// A *a = (A*)&c;
+// a->f();
+//
+// Respective record layout is:
+// 0 | (C vbtable pointer)
+// 4 | struct A (virtual base)
+// 4 | (A vftable pointer)
+// 8 | int x
+// 12 | struct B (virtual base)
+// 12 | (B vbtable pointer)
+//
+// The final overrider of f() in class C is still B::f(), so B+4 should be
+// passed as "this" to that code. However, "a" points at B-8, so the respective
+// vftable entry should hold a thunk that adds 12 to the "this" argument before
+// performing a tail call to B::f().
+//
+// With this example in mind, we can now calculate the 'this' argument offset
+// for the given method, relative to the beginning of the MostDerivedClass.
+CharUnits
+VFTableBuilder::ComputeThisOffset(FinalOverriders::OverriderInfo Overrider) {
+ BasesSetVectorTy Bases;
+
+ {
+ // Find the set of least derived bases that define the given method.
+ OverriddenMethodsSetTy VisitedOverriddenMethods;
+ auto InitialOverriddenDefinitionCollector = [&](
+ const CXXMethodDecl *OverriddenMD) {
+ if (OverriddenMD->size_overridden_methods() == 0)
+ Bases.insert(OverriddenMD->getParent());
+ // Don't recurse on this method if we've already collected it.
+ return VisitedOverriddenMethods.insert(OverriddenMD).second;
+ };
+ visitAllOverriddenMethods(Overrider.Method,
+ InitialOverriddenDefinitionCollector);
+ }
+
+ // If there are no overrides then 'this' is located
+ // in the base that defines the method.
+ if (Bases.size() == 0)
+ return Overrider.Offset;
+
+ CXXBasePaths Paths;
+ Overrider.Method->getParent()->lookupInBases(
+ [&Bases](const CXXBaseSpecifier *Specifier, CXXBasePath &) {
+ return Bases.count(Specifier->getType()->getAsCXXRecordDecl());
+ },
+ Paths);
+
+ // This will hold the smallest this offset among overridees of MD.
+ // This implies that an offset of a non-virtual base will dominate an offset
+ // of a virtual base to potentially reduce the number of thunks required
+ // in the derived classes that inherit this method.
+ CharUnits Ret;
+ bool First = true;
+
+ const ASTRecordLayout &OverriderRDLayout =
+ Context.getASTRecordLayout(Overrider.Method->getParent());
+ for (const CXXBasePath &Path : Paths) {
+ CharUnits ThisOffset = Overrider.Offset;
+ CharUnits LastVBaseOffset;
+
+ // For each path from the overrider to the parents of the overridden
+ // methods, traverse the path, calculating the this offset in the most
+ // derived class.
+ for (const CXXBasePathElement &Element : Path) {
+ QualType CurTy = Element.Base->getType();
+ const CXXRecordDecl *PrevRD = Element.Class,
+ *CurRD = CurTy->getAsCXXRecordDecl();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(PrevRD);
+
+ if (Element.Base->isVirtual()) {
+ // The interesting things begin when you have virtual inheritance.
+ // The final overrider will use a static adjustment equal to the offset
+ // of the vbase in the final overrider class.
+ // For example, if the final overrider is in a vbase B of the most
+ // derived class and it overrides a method of the B's own vbase A,
+ // it uses A* as "this". In its prologue, it can cast A* to B* with
+ // a static offset. This offset is used regardless of the actual
+ // offset of A from B in the most derived class, requiring an
+ // this-adjusting thunk in the vftable if A and B are laid out
+ // differently in the most derived class.
+ LastVBaseOffset = ThisOffset =
+ Overrider.Offset + OverriderRDLayout.getVBaseClassOffset(CurRD);
+ } else {
+ ThisOffset += Layout.getBaseClassOffset(CurRD);
+ }
+ }
+
+ if (isa<CXXDestructorDecl>(Overrider.Method)) {
+ if (LastVBaseOffset.isZero()) {
+ // If a "Base" class has at least one non-virtual base with a virtual
+ // destructor, the "Base" virtual destructor will take the address
+ // of the "Base" subobject as the "this" argument.
+ ThisOffset = Overrider.Offset;
+ } else {
+ // A virtual destructor of a virtual base takes the address of the
+ // virtual base subobject as the "this" argument.
+ ThisOffset = LastVBaseOffset;
+ }
+ }
+
+ if (Ret > ThisOffset || First) {
+ First = false;
+ Ret = ThisOffset;
+ }
+ }
+
+ assert(!First && "Method not found in the given subobject?");
+ return Ret;
+}
+
+// Things are getting even more complex when the "this" adjustment has to
+// use a dynamic offset instead of a static one, or even two dynamic offsets.
+// This is sometimes required when a virtual call happens in the middle of
+// a non-most-derived class construction or destruction.
+//
+// Let's take a look at the following example:
+// struct A {
+// virtual void f();
+// };
+//
+// void foo(A *a) { a->f(); } // Knows nothing about siblings of A.
+//
+// struct B : virtual A {
+// virtual void f();
+// B() {
+// foo(this);
+// }
+// };
+//
+// struct C : virtual B {
+// virtual void f();
+// };
+//
+// Record layouts for these classes are:
+// struct A
+// 0 | (A vftable pointer)
+//
+// struct B
+// 0 | (B vbtable pointer)
+// 4 | (vtordisp for vbase A)
+// 8 | struct A (virtual base)
+// 8 | (A vftable pointer)
+//
+// struct C
+// 0 | (C vbtable pointer)
+// 4 | (vtordisp for vbase A)
+// 8 | struct A (virtual base) // A precedes B!
+// 8 | (A vftable pointer)
+// 12 | struct B (virtual base)
+// 12 | (B vbtable pointer)
+//
+// When one creates an object of type C, the C constructor:
+// - initializes all the vbptrs, then
+// - calls the A subobject constructor
+// (initializes A's vfptr with an address of A vftable), then
+// - calls the B subobject constructor
+// (initializes A's vfptr with an address of B vftable and vtordisp for A),
+// that in turn calls foo(), then
+// - initializes A's vfptr with an address of C vftable and zeroes out the
+// vtordisp
+// FIXME: if a structor knows it belongs to MDC, why doesn't it use a vftable
+// without vtordisp thunks?
+// FIXME: how are vtordisp handled in the presence of nooverride/final?
+//
+// When foo() is called, an object with a layout of class C has a vftable
+// referencing B::f() that assumes a B layout, so the "this" adjustments are
+// incorrect, unless an extra adjustment is done. This adjustment is called
+// "vtordisp adjustment". Vtordisp basically holds the difference between the
+// actual location of a vbase in the layout class and the location assumed by
+// the vftable of the class being constructed/destructed. Vtordisp is only
+// needed if "this" escapes a
+// structor (or we can't prove otherwise).
+// [i.e. vtordisp is a dynamic adjustment for a static adjustment, which is an
+// estimation of a dynamic adjustment]
+//
+// foo() gets a pointer to the A vbase and doesn't know anything about B or C,
+// so it just passes that pointer as "this" in a virtual call.
+// If there was no vtordisp, that would just dispatch to B::f().
+// However, B::f() assumes B+8 is passed as "this",
+// yet the pointer foo() passes along is B-4 (i.e. C+8).
+// An extra adjustment is needed, so we emit a thunk into the B vftable.
+// This vtordisp thunk subtracts the value of vtordisp
+// from the "this" argument (-12) before making a tailcall to B::f().
+//
+// Let's consider an even more complex example:
+// struct D : virtual B, virtual C {
+// D() {
+// foo(this);
+// }
+// };
+//
+// struct D
+// 0 | (D vbtable pointer)
+// 4 | (vtordisp for vbase A)
+// 8 | struct A (virtual base) // A precedes both B and C!
+// 8 | (A vftable pointer)
+// 12 | struct B (virtual base) // B precedes C!
+// 12 | (B vbtable pointer)
+// 16 | struct C (virtual base)
+// 16 | (C vbtable pointer)
+//
+// When D::D() calls foo(), we find ourselves in a thunk that should tailcall
+// to C::f(), which assumes C+8 as its "this" parameter. This time, foo()
+// passes along A, which is C-8. The A vtordisp holds
+// "D.vbptr[index_of_A] - offset_of_A_in_D"
+// and we statically know offset_of_A_in_D, so can get a pointer to D.
+// When we know it, we can make an extra vbtable lookup to locate the C vbase
+// and one extra static adjustment to calculate the expected value of C+8.
+void VFTableBuilder::CalculateVtordispAdjustment(
+ FinalOverriders::OverriderInfo Overrider, CharUnits ThisOffset,
+ ThisAdjustment &TA) {
+ const ASTRecordLayout::VBaseOffsetsMapTy &VBaseMap =
+ MostDerivedClassLayout.getVBaseOffsetsMap();
+ const ASTRecordLayout::VBaseOffsetsMapTy::const_iterator &VBaseMapEntry =
+ VBaseMap.find(WhichVFPtr.getVBaseWithVPtr());
+ assert(VBaseMapEntry != VBaseMap.end());
+
+ // If there's no vtordisp or the final overrider is defined in the same vbase
+ // as the initial declaration, we don't need any vtordisp adjustment.
+ if (!VBaseMapEntry->second.hasVtorDisp() ||
+ Overrider.VirtualBase == WhichVFPtr.getVBaseWithVPtr())
+ return;
+
+ // OK, now we know we need to use a vtordisp thunk.
+ // The implicit vtordisp field is located right before the vbase.
+ CharUnits OffsetOfVBaseWithVFPtr = VBaseMapEntry->second.VBaseOffset;
+ TA.Virtual.Microsoft.VtordispOffset =
+ (OffsetOfVBaseWithVFPtr - WhichVFPtr.FullOffsetInMDC).getQuantity() - 4;
+
+ // A simple vtordisp thunk will suffice if the final overrider is defined
+ // in either the most derived class or its non-virtual base.
+ if (Overrider.Method->getParent() == MostDerivedClass ||
+ !Overrider.VirtualBase)
+ return;
+
+ // Otherwise, we need to do use the dynamic offset of the final overrider
+ // in order to get "this" adjustment right.
+ TA.Virtual.Microsoft.VBPtrOffset =
+ (OffsetOfVBaseWithVFPtr + WhichVFPtr.NonVirtualOffset -
+ MostDerivedClassLayout.getVBPtrOffset()).getQuantity();
+ TA.Virtual.Microsoft.VBOffsetOffset =
+ Context.getTypeSizeInChars(Context.IntTy).getQuantity() *
+ VTables.getVBTableIndex(MostDerivedClass, Overrider.VirtualBase);
+
+ TA.NonVirtual = (ThisOffset - Overrider.Offset).getQuantity();
+}
+
+static void GroupNewVirtualOverloads(
+ const CXXRecordDecl *RD,
+ SmallVector<const CXXMethodDecl *, 10> &VirtualMethods) {
+ // Put the virtual methods into VirtualMethods in the proper order:
+ // 1) Group overloads by declaration name. New groups are added to the
+ // vftable in the order of their first declarations in this class
+ // (including overrides, non-virtual methods and any other named decl that
+ // might be nested within the class).
+ // 2) In each group, new overloads appear in the reverse order of declaration.
+ typedef SmallVector<const CXXMethodDecl *, 1> MethodGroup;
+ SmallVector<MethodGroup, 10> Groups;
+ typedef llvm::DenseMap<DeclarationName, unsigned> VisitedGroupIndicesTy;
+ VisitedGroupIndicesTy VisitedGroupIndices;
+ for (const auto *D : RD->decls()) {
+ const auto *ND = dyn_cast<NamedDecl>(D);
+ if (!ND)
+ continue;
+ VisitedGroupIndicesTy::iterator J;
+ bool Inserted;
+ std::tie(J, Inserted) = VisitedGroupIndices.insert(
+ std::make_pair(ND->getDeclName(), Groups.size()));
+ if (Inserted)
+ Groups.push_back(MethodGroup());
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(ND))
+ if (MD->isVirtual())
+ Groups[J->second].push_back(MD->getCanonicalDecl());
+ }
+
+ for (const MethodGroup &Group : Groups)
+ VirtualMethods.append(Group.rbegin(), Group.rend());
+}
+
+static bool isDirectVBase(const CXXRecordDecl *Base, const CXXRecordDecl *RD) {
+ for (const auto &B : RD->bases()) {
+ if (B.isVirtual() && B.getType()->getAsCXXRecordDecl() == Base)
+ return true;
+ }
+ return false;
+}
+
+void VFTableBuilder::AddMethods(BaseSubobject Base, unsigned BaseDepth,
+ const CXXRecordDecl *LastVBase,
+ BasesSetVectorTy &VisitedBases) {
+ const CXXRecordDecl *RD = Base.getBase();
+ if (!RD->isPolymorphic())
+ return;
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // See if this class expands a vftable of the base we look at, which is either
+ // the one defined by the vfptr base path or the primary base of the current
+ // class.
+ const CXXRecordDecl *NextBase = nullptr, *NextLastVBase = LastVBase;
+ CharUnits NextBaseOffset;
+ if (BaseDepth < WhichVFPtr.PathToBaseWithVPtr.size()) {
+ NextBase = WhichVFPtr.PathToBaseWithVPtr[BaseDepth];
+ if (isDirectVBase(NextBase, RD)) {
+ NextLastVBase = NextBase;
+ NextBaseOffset = MostDerivedClassLayout.getVBaseClassOffset(NextBase);
+ } else {
+ NextBaseOffset =
+ Base.getBaseOffset() + Layout.getBaseClassOffset(NextBase);
+ }
+ } else if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
+ assert(!Layout.isPrimaryBaseVirtual() &&
+ "No primary virtual bases in this ABI");
+ NextBase = PrimaryBase;
+ NextBaseOffset = Base.getBaseOffset();
+ }
+
+ if (NextBase) {
+ AddMethods(BaseSubobject(NextBase, NextBaseOffset), BaseDepth + 1,
+ NextLastVBase, VisitedBases);
+ if (!VisitedBases.insert(NextBase))
+ llvm_unreachable("Found a duplicate primary base!");
+ }
+
+ SmallVector<const CXXMethodDecl*, 10> VirtualMethods;
+ // Put virtual methods in the proper order.
+ GroupNewVirtualOverloads(RD, VirtualMethods);
+
+ // Now go through all virtual member functions and add them to the current
+ // vftable. This is done by
+ // - replacing overridden methods in their existing slots, as long as they
+ // don't require return adjustment; calculating This adjustment if needed.
+ // - adding new slots for methods of the current base not present in any
+ // sub-bases;
+ // - adding new slots for methods that require Return adjustment.
+ // We keep track of the methods visited in the sub-bases in MethodInfoMap.
+ for (const CXXMethodDecl *MD : VirtualMethods) {
+ FinalOverriders::OverriderInfo FinalOverrider =
+ Overriders.getOverrider(MD, Base.getBaseOffset());
+ const CXXMethodDecl *FinalOverriderMD = FinalOverrider.Method;
+ const CXXMethodDecl *OverriddenMD =
+ FindNearestOverriddenMethod(MD, VisitedBases);
+
+ ThisAdjustment ThisAdjustmentOffset;
+ bool ReturnAdjustingThunk = false, ForceReturnAdjustmentMangling = false;
+ CharUnits ThisOffset = ComputeThisOffset(FinalOverrider);
+ ThisAdjustmentOffset.NonVirtual =
+ (ThisOffset - WhichVFPtr.FullOffsetInMDC).getQuantity();
+ if ((OverriddenMD || FinalOverriderMD != MD) &&
+ WhichVFPtr.getVBaseWithVPtr())
+ CalculateVtordispAdjustment(FinalOverrider, ThisOffset,
+ ThisAdjustmentOffset);
+
+ if (OverriddenMD) {
+ // If MD overrides anything in this vftable, we need to update the
+ // entries.
+ MethodInfoMapTy::iterator OverriddenMDIterator =
+ MethodInfoMap.find(OverriddenMD);
+
+ // If the overridden method went to a different vftable, skip it.
+ if (OverriddenMDIterator == MethodInfoMap.end())
+ continue;
+
+ MethodInfo &OverriddenMethodInfo = OverriddenMDIterator->second;
+
+ // Let's check if the overrider requires any return adjustments.
+ // We must create a new slot if the MD's return type is not trivially
+ // convertible to the OverriddenMD's one.
+ // Once a chain of method overrides adds a return adjusting vftable slot,
+ // all subsequent overrides will also use an extra method slot.
+ ReturnAdjustingThunk = !ComputeReturnAdjustmentBaseOffset(
+ Context, MD, OverriddenMD).isEmpty() ||
+ OverriddenMethodInfo.UsesExtraSlot;
+
+ if (!ReturnAdjustingThunk) {
+ // No return adjustment needed - just replace the overridden method info
+ // with the current info.
+ MethodInfo MI(OverriddenMethodInfo.VBTableIndex,
+ OverriddenMethodInfo.VFTableIndex);
+ MethodInfoMap.erase(OverriddenMDIterator);
+
+ assert(!MethodInfoMap.count(MD) &&
+ "Should not have method info for this method yet!");
+ MethodInfoMap.insert(std::make_pair(MD, MI));
+ continue;
+ }
+
+ // In case we need a return adjustment, we'll add a new slot for
+ // the overrider. Mark the overriden method as shadowed by the new slot.
+ OverriddenMethodInfo.Shadowed = true;
+
+ // Force a special name mangling for a return-adjusting thunk
+ // unless the method is the final overrider without this adjustment.
+ ForceReturnAdjustmentMangling =
+ !(MD == FinalOverriderMD && ThisAdjustmentOffset.isEmpty());
+ } else if (Base.getBaseOffset() != WhichVFPtr.FullOffsetInMDC ||
+ MD->size_overridden_methods()) {
+ // Skip methods that don't belong to the vftable of the current class,
+ // e.g. each method that wasn't seen in any of the visited sub-bases
+ // but overrides multiple methods of other sub-bases.
+ continue;
+ }
+
+ // If we got here, MD is a method not seen in any of the sub-bases or
+ // it requires return adjustment. Insert the method info for this method.
+ unsigned VBIndex =
+ LastVBase ? VTables.getVBTableIndex(MostDerivedClass, LastVBase) : 0;
+ MethodInfo MI(VBIndex,
+ HasRTTIComponent ? Components.size() - 1 : Components.size(),
+ ReturnAdjustingThunk);
+
+ assert(!MethodInfoMap.count(MD) &&
+ "Should not have method info for this method yet!");
+ MethodInfoMap.insert(std::make_pair(MD, MI));
+
+ // Check if this overrider needs a return adjustment.
+ // We don't want to do this for pure virtual member functions.
+ BaseOffset ReturnAdjustmentOffset;
+ ReturnAdjustment ReturnAdjustment;
+ if (!FinalOverriderMD->isPure()) {
+ ReturnAdjustmentOffset =
+ ComputeReturnAdjustmentBaseOffset(Context, FinalOverriderMD, MD);
+ }
+ if (!ReturnAdjustmentOffset.isEmpty()) {
+ ForceReturnAdjustmentMangling = true;
+ ReturnAdjustment.NonVirtual =
+ ReturnAdjustmentOffset.NonVirtualOffset.getQuantity();
+ if (ReturnAdjustmentOffset.VirtualBase) {
+ const ASTRecordLayout &DerivedLayout =
+ Context.getASTRecordLayout(ReturnAdjustmentOffset.DerivedClass);
+ ReturnAdjustment.Virtual.Microsoft.VBPtrOffset =
+ DerivedLayout.getVBPtrOffset().getQuantity();
+ ReturnAdjustment.Virtual.Microsoft.VBIndex =
+ VTables.getVBTableIndex(ReturnAdjustmentOffset.DerivedClass,
+ ReturnAdjustmentOffset.VirtualBase);
+ }
+ }
+
+ AddMethod(FinalOverriderMD,
+ ThunkInfo(ThisAdjustmentOffset, ReturnAdjustment,
+ ForceReturnAdjustmentMangling ? MD : nullptr));
+ }
+}
+
+static void PrintBasePath(const VPtrInfo::BasePath &Path, raw_ostream &Out) {
+ for (const CXXRecordDecl *Elem :
+ llvm::make_range(Path.rbegin(), Path.rend())) {
+ Out << "'";
+ Elem->printQualifiedName(Out);
+ Out << "' in ";
+ }
+}
+
+static void dumpMicrosoftThunkAdjustment(const ThunkInfo &TI, raw_ostream &Out,
+ bool ContinueFirstLine) {
+ const ReturnAdjustment &R = TI.Return;
+ bool Multiline = false;
+ const char *LinePrefix = "\n ";
+ if (!R.isEmpty() || TI.Method) {
+ if (!ContinueFirstLine)
+ Out << LinePrefix;
+ Out << "[return adjustment (to type '"
+ << TI.Method->getReturnType().getCanonicalType().getAsString()
+ << "'): ";
+ if (R.Virtual.Microsoft.VBPtrOffset)
+ Out << "vbptr at offset " << R.Virtual.Microsoft.VBPtrOffset << ", ";
+ if (R.Virtual.Microsoft.VBIndex)
+ Out << "vbase #" << R.Virtual.Microsoft.VBIndex << ", ";
+ Out << R.NonVirtual << " non-virtual]";
+ Multiline = true;
+ }
+
+ const ThisAdjustment &T = TI.This;
+ if (!T.isEmpty()) {
+ if (Multiline || !ContinueFirstLine)
+ Out << LinePrefix;
+ Out << "[this adjustment: ";
+ if (!TI.This.Virtual.isEmpty()) {
+ assert(T.Virtual.Microsoft.VtordispOffset < 0);
+ Out << "vtordisp at " << T.Virtual.Microsoft.VtordispOffset << ", ";
+ if (T.Virtual.Microsoft.VBPtrOffset) {
+ Out << "vbptr at " << T.Virtual.Microsoft.VBPtrOffset
+ << " to the left,";
+ assert(T.Virtual.Microsoft.VBOffsetOffset > 0);
+ Out << LinePrefix << " vboffset at "
+ << T.Virtual.Microsoft.VBOffsetOffset << " in the vbtable, ";
+ }
+ }
+ Out << T.NonVirtual << " non-virtual]";
+ }
+}
+
+void VFTableBuilder::dumpLayout(raw_ostream &Out) {
+ Out << "VFTable for ";
+ PrintBasePath(WhichVFPtr.PathToBaseWithVPtr, Out);
+ Out << "'";
+ MostDerivedClass->printQualifiedName(Out);
+ Out << "' (" << Components.size()
+ << (Components.size() == 1 ? " entry" : " entries") << ").\n";
+
+ for (unsigned I = 0, E = Components.size(); I != E; ++I) {
+ Out << llvm::format("%4d | ", I);
+
+ const VTableComponent &Component = Components[I];
+
+ // Dump the component.
+ switch (Component.getKind()) {
+ case VTableComponent::CK_RTTI:
+ Component.getRTTIDecl()->printQualifiedName(Out);
+ Out << " RTTI";
+ break;
+
+ case VTableComponent::CK_FunctionPointer: {
+ const CXXMethodDecl *MD = Component.getFunctionDecl();
+
+ // FIXME: Figure out how to print the real thunk type, since they can
+ // differ in the return type.
+ std::string Str = PredefinedExpr::ComputeName(
+ PredefinedExpr::PrettyFunctionNoVirtual, MD);
+ Out << Str;
+ if (MD->isPure())
+ Out << " [pure]";
+
+ if (MD->isDeleted())
+ Out << " [deleted]";
+
+ ThunkInfo Thunk = VTableThunks.lookup(I);
+ if (!Thunk.isEmpty())
+ dumpMicrosoftThunkAdjustment(Thunk, Out, /*ContinueFirstLine=*/false);
+
+ break;
+ }
+
+ case VTableComponent::CK_DeletingDtorPointer: {
+ const CXXDestructorDecl *DD = Component.getDestructorDecl();
+
+ DD->printQualifiedName(Out);
+ Out << "() [scalar deleting]";
+
+ if (DD->isPure())
+ Out << " [pure]";
+
+ ThunkInfo Thunk = VTableThunks.lookup(I);
+ if (!Thunk.isEmpty()) {
+ assert(Thunk.Return.isEmpty() &&
+ "No return adjustment needed for destructors!");
+ dumpMicrosoftThunkAdjustment(Thunk, Out, /*ContinueFirstLine=*/false);
+ }
+
+ break;
+ }
+
+ default:
+ DiagnosticsEngine &Diags = Context.getDiagnostics();
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error,
+ "Unexpected vftable component type %0 for component number %1");
+ Diags.Report(MostDerivedClass->getLocation(), DiagID)
+ << I << Component.getKind();
+ }
+
+ Out << '\n';
+ }
+
+ Out << '\n';
+
+ if (!Thunks.empty()) {
+ // We store the method names in a map to get a stable order.
+ std::map<std::string, const CXXMethodDecl *> MethodNamesAndDecls;
+
+ for (const auto &I : Thunks) {
+ const CXXMethodDecl *MD = I.first;
+ std::string MethodName = PredefinedExpr::ComputeName(
+ PredefinedExpr::PrettyFunctionNoVirtual, MD);
+
+ MethodNamesAndDecls.insert(std::make_pair(MethodName, MD));
+ }
+
+ for (const auto &MethodNameAndDecl : MethodNamesAndDecls) {
+ const std::string &MethodName = MethodNameAndDecl.first;
+ const CXXMethodDecl *MD = MethodNameAndDecl.second;
+
+ ThunkInfoVectorTy ThunksVector = Thunks[MD];
+ std::stable_sort(ThunksVector.begin(), ThunksVector.end(),
+ [](const ThunkInfo &LHS, const ThunkInfo &RHS) {
+ // Keep different thunks with the same adjustments in the order they
+ // were put into the vector.
+ return std::tie(LHS.This, LHS.Return) < std::tie(RHS.This, RHS.Return);
+ });
+
+ Out << "Thunks for '" << MethodName << "' (" << ThunksVector.size();
+ Out << (ThunksVector.size() == 1 ? " entry" : " entries") << ").\n";
+
+ for (unsigned I = 0, E = ThunksVector.size(); I != E; ++I) {
+ const ThunkInfo &Thunk = ThunksVector[I];
+
+ Out << llvm::format("%4d | ", I);
+ dumpMicrosoftThunkAdjustment(Thunk, Out, /*ContinueFirstLine=*/true);
+ Out << '\n';
+ }
+
+ Out << '\n';
+ }
+ }
+
+ Out.flush();
+}
+
+static bool setsIntersect(const llvm::SmallPtrSet<const CXXRecordDecl *, 4> &A,
+ ArrayRef<const CXXRecordDecl *> B) {
+ for (const CXXRecordDecl *Decl : B) {
+ if (A.count(Decl))
+ return true;
+ }
+ return false;
+}
+
+static bool rebucketPaths(VPtrInfoVector &Paths);
+
+/// Produces MSVC-compatible vbtable data. The symbols produced by this
+/// algorithm match those produced by MSVC 2012 and newer, which is different
+/// from MSVC 2010.
+///
+/// MSVC 2012 appears to minimize the vbtable names using the following
+/// algorithm. First, walk the class hierarchy in the usual order, depth first,
+/// left to right, to find all of the subobjects which contain a vbptr field.
+/// Visiting each class node yields a list of inheritance paths to vbptrs. Each
+/// record with a vbptr creates an initially empty path.
+///
+/// To combine paths from child nodes, the paths are compared to check for
+/// ambiguity. Paths are "ambiguous" if multiple paths have the same set of
+/// components in the same order. Each group of ambiguous paths is extended by
+/// appending the class of the base from which it came. If the current class
+/// node produced an ambiguous path, its path is extended with the current class.
+/// After extending paths, MSVC again checks for ambiguity, and extends any
+/// ambiguous path which wasn't already extended. Because each node yields an
+/// unambiguous set of paths, MSVC doesn't need to extend any path more than once
+/// to produce an unambiguous set of paths.
+///
+/// TODO: Presumably vftables use the same algorithm.
+void MicrosoftVTableContext::computeVTablePaths(bool ForVBTables,
+ const CXXRecordDecl *RD,
+ VPtrInfoVector &Paths) {
+ assert(Paths.empty());
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // Base case: this subobject has its own vptr.
+ if (ForVBTables ? Layout.hasOwnVBPtr() : Layout.hasOwnVFPtr())
+ Paths.push_back(new VPtrInfo(RD));
+
+ // Recursive case: get all the vbtables from our bases and remove anything
+ // that shares a virtual base.
+ llvm::SmallPtrSet<const CXXRecordDecl*, 4> VBasesSeen;
+ for (const auto &B : RD->bases()) {
+ const CXXRecordDecl *Base = B.getType()->getAsCXXRecordDecl();
+ if (B.isVirtual() && VBasesSeen.count(Base))
+ continue;
+
+ if (!Base->isDynamicClass())
+ continue;
+
+ const VPtrInfoVector &BasePaths =
+ ForVBTables ? enumerateVBTables(Base) : getVFPtrOffsets(Base);
+
+ for (VPtrInfo *BaseInfo : BasePaths) {
+ // Don't include the path if it goes through a virtual base that we've
+ // already included.
+ if (setsIntersect(VBasesSeen, BaseInfo->ContainingVBases))
+ continue;
+
+ // Copy the path and adjust it as necessary.
+ VPtrInfo *P = new VPtrInfo(*BaseInfo);
+
+ // We mangle Base into the path if the path would've been ambiguous and it
+ // wasn't already extended with Base.
+ if (P->MangledPath.empty() || P->MangledPath.back() != Base)
+ P->NextBaseToMangle = Base;
+
+ // Keep track of which vtable the derived class is going to extend with
+ // new methods or bases. We append to either the vftable of our primary
+ // base, or the first non-virtual base that has a vbtable.
+ if (P->ReusingBase == Base &&
+ Base == (ForVBTables ? Layout.getBaseSharingVBPtr()
+ : Layout.getPrimaryBase()))
+ P->ReusingBase = RD;
+
+ // Keep track of the full adjustment from the MDC to this vtable. The
+ // adjustment is captured by an optional vbase and a non-virtual offset.
+ if (B.isVirtual())
+ P->ContainingVBases.push_back(Base);
+ else if (P->ContainingVBases.empty())
+ P->NonVirtualOffset += Layout.getBaseClassOffset(Base);
+
+ // Update the full offset in the MDC.
+ P->FullOffsetInMDC = P->NonVirtualOffset;
+ if (const CXXRecordDecl *VB = P->getVBaseWithVPtr())
+ P->FullOffsetInMDC += Layout.getVBaseClassOffset(VB);
+
+ Paths.push_back(P);
+ }
+
+ if (B.isVirtual())
+ VBasesSeen.insert(Base);
+
+ // After visiting any direct base, we've transitively visited all of its
+ // morally virtual bases.
+ for (const auto &VB : Base->vbases())
+ VBasesSeen.insert(VB.getType()->getAsCXXRecordDecl());
+ }
+
+ // Sort the paths into buckets, and if any of them are ambiguous, extend all
+ // paths in ambiguous buckets.
+ bool Changed = true;
+ while (Changed)
+ Changed = rebucketPaths(Paths);
+}
+
+static bool extendPath(VPtrInfo *P) {
+ if (P->NextBaseToMangle) {
+ P->MangledPath.push_back(P->NextBaseToMangle);
+ P->NextBaseToMangle = nullptr;// Prevent the path from being extended twice.
+ return true;
+ }
+ return false;
+}
+
+static bool rebucketPaths(VPtrInfoVector &Paths) {
+ // What we're essentially doing here is bucketing together ambiguous paths.
+ // Any bucket with more than one path in it gets extended by NextBase, which
+ // is usually the direct base of the inherited the vbptr. This code uses a
+ // sorted vector to implement a multiset to form the buckets. Note that the
+ // ordering is based on pointers, but it doesn't change our output order. The
+ // current algorithm is designed to match MSVC 2012's names.
+ VPtrInfoVector PathsSorted(Paths);
+ std::sort(PathsSorted.begin(), PathsSorted.end(),
+ [](const VPtrInfo *LHS, const VPtrInfo *RHS) {
+ return LHS->MangledPath < RHS->MangledPath;
+ });
+ bool Changed = false;
+ for (size_t I = 0, E = PathsSorted.size(); I != E;) {
+ // Scan forward to find the end of the bucket.
+ size_t BucketStart = I;
+ do {
+ ++I;
+ } while (I != E && PathsSorted[BucketStart]->MangledPath ==
+ PathsSorted[I]->MangledPath);
+
+ // If this bucket has multiple paths, extend them all.
+ if (I - BucketStart > 1) {
+ for (size_t II = BucketStart; II != I; ++II)
+ Changed |= extendPath(PathsSorted[II]);
+ assert(Changed && "no paths were extended to fix ambiguity");
+ }
+ }
+ return Changed;
+}
+
+MicrosoftVTableContext::~MicrosoftVTableContext() {
+ for (auto &P : VFPtrLocations)
+ llvm::DeleteContainerPointers(*P.second);
+ llvm::DeleteContainerSeconds(VFPtrLocations);
+ llvm::DeleteContainerSeconds(VFTableLayouts);
+ llvm::DeleteContainerSeconds(VBaseInfo);
+}
+
+namespace {
+typedef llvm::SetVector<BaseSubobject, std::vector<BaseSubobject>,
+ llvm::DenseSet<BaseSubobject>> FullPathTy;
+}
+
+// This recursive function finds all paths from a subobject centered at
+// (RD, Offset) to the subobject located at BaseWithVPtr.
+static void findPathsToSubobject(ASTContext &Context,
+ const ASTRecordLayout &MostDerivedLayout,
+ const CXXRecordDecl *RD, CharUnits Offset,
+ BaseSubobject BaseWithVPtr,
+ FullPathTy &FullPath,
+ std::list<FullPathTy> &Paths) {
+ if (BaseSubobject(RD, Offset) == BaseWithVPtr) {
+ Paths.push_back(FullPath);
+ return;
+ }
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ for (const CXXBaseSpecifier &BS : RD->bases()) {
+ const CXXRecordDecl *Base = BS.getType()->getAsCXXRecordDecl();
+ CharUnits NewOffset = BS.isVirtual()
+ ? MostDerivedLayout.getVBaseClassOffset(Base)
+ : Offset + Layout.getBaseClassOffset(Base);
+ FullPath.insert(BaseSubobject(Base, NewOffset));
+ findPathsToSubobject(Context, MostDerivedLayout, Base, NewOffset,
+ BaseWithVPtr, FullPath, Paths);
+ FullPath.pop_back();
+ }
+}
+
+// Return the paths which are not subsets of other paths.
+static void removeRedundantPaths(std::list<FullPathTy> &FullPaths) {
+ FullPaths.remove_if([&](const FullPathTy &SpecificPath) {
+ for (const FullPathTy &OtherPath : FullPaths) {
+ if (&SpecificPath == &OtherPath)
+ continue;
+ if (std::all_of(SpecificPath.begin(), SpecificPath.end(),
+ [&](const BaseSubobject &BSO) {
+ return OtherPath.count(BSO) != 0;
+ })) {
+ return true;
+ }
+ }
+ return false;
+ });
+}
+
+static CharUnits getOffsetOfFullPath(ASTContext &Context,
+ const CXXRecordDecl *RD,
+ const FullPathTy &FullPath) {
+ const ASTRecordLayout &MostDerivedLayout =
+ Context.getASTRecordLayout(RD);
+ CharUnits Offset = CharUnits::fromQuantity(-1);
+ for (const BaseSubobject &BSO : FullPath) {
+ const CXXRecordDecl *Base = BSO.getBase();
+ // The first entry in the path is always the most derived record, skip it.
+ if (Base == RD) {
+ assert(Offset.getQuantity() == -1);
+ Offset = CharUnits::Zero();
+ continue;
+ }
+ assert(Offset.getQuantity() != -1);
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ // While we know which base has to be traversed, we don't know if that base
+ // was a virtual base.
+ const CXXBaseSpecifier *BaseBS = std::find_if(
+ RD->bases_begin(), RD->bases_end(), [&](const CXXBaseSpecifier &BS) {
+ return BS.getType()->getAsCXXRecordDecl() == Base;
+ });
+ Offset = BaseBS->isVirtual() ? MostDerivedLayout.getVBaseClassOffset(Base)
+ : Offset + Layout.getBaseClassOffset(Base);
+ RD = Base;
+ }
+ return Offset;
+}
+
+// We want to select the path which introduces the most covariant overrides. If
+// two paths introduce overrides which the other path doesn't contain, issue a
+// diagnostic.
+static const FullPathTy *selectBestPath(ASTContext &Context,
+ const CXXRecordDecl *RD, VPtrInfo *Info,
+ std::list<FullPathTy> &FullPaths) {
+ // Handle some easy cases first.
+ if (FullPaths.empty())
+ return nullptr;
+ if (FullPaths.size() == 1)
+ return &FullPaths.front();
+
+ const FullPathTy *BestPath = nullptr;
+ typedef std::set<const CXXMethodDecl *> OverriderSetTy;
+ OverriderSetTy LastOverrides;
+ for (const FullPathTy &SpecificPath : FullPaths) {
+ assert(!SpecificPath.empty());
+ OverriderSetTy CurrentOverrides;
+ const CXXRecordDecl *TopLevelRD = SpecificPath.begin()->getBase();
+ // Find the distance from the start of the path to the subobject with the
+ // VPtr.
+ CharUnits BaseOffset =
+ getOffsetOfFullPath(Context, TopLevelRD, SpecificPath);
+ FinalOverriders Overriders(TopLevelRD, CharUnits::Zero(), TopLevelRD);
+ for (const CXXMethodDecl *MD : Info->BaseWithVPtr->methods()) {
+ if (!MD->isVirtual())
+ continue;
+ FinalOverriders::OverriderInfo OI =
+ Overriders.getOverrider(MD->getCanonicalDecl(), BaseOffset);
+ const CXXMethodDecl *OverridingMethod = OI.Method;
+ // Only overriders which have a return adjustment introduce problematic
+ // thunks.
+ if (ComputeReturnAdjustmentBaseOffset(Context, OverridingMethod, MD)
+ .isEmpty())
+ continue;
+ // It's possible that the overrider isn't in this path. If so, skip it
+ // because this path didn't introduce it.
+ const CXXRecordDecl *OverridingParent = OverridingMethod->getParent();
+ if (std::none_of(SpecificPath.begin(), SpecificPath.end(),
+ [&](const BaseSubobject &BSO) {
+ return BSO.getBase() == OverridingParent;
+ }))
+ continue;
+ CurrentOverrides.insert(OverridingMethod);
+ }
+ OverriderSetTy NewOverrides =
+ llvm::set_difference(CurrentOverrides, LastOverrides);
+ if (NewOverrides.empty())
+ continue;
+ OverriderSetTy MissingOverrides =
+ llvm::set_difference(LastOverrides, CurrentOverrides);
+ if (MissingOverrides.empty()) {
+ // This path is a strict improvement over the last path, let's use it.
+ BestPath = &SpecificPath;
+ std::swap(CurrentOverrides, LastOverrides);
+ } else {
+ // This path introduces an overrider with a conflicting covariant thunk.
+ DiagnosticsEngine &Diags = Context.getDiagnostics();
+ const CXXMethodDecl *CovariantMD = *NewOverrides.begin();
+ const CXXMethodDecl *ConflictMD = *MissingOverrides.begin();
+ Diags.Report(RD->getLocation(), diag::err_vftable_ambiguous_component)
+ << RD;
+ Diags.Report(CovariantMD->getLocation(), diag::note_covariant_thunk)
+ << CovariantMD;
+ Diags.Report(ConflictMD->getLocation(), diag::note_covariant_thunk)
+ << ConflictMD;
+ }
+ }
+ // Go with the path that introduced the most covariant overrides. If there is
+ // no such path, pick the first path.
+ return BestPath ? BestPath : &FullPaths.front();
+}
+
+static void computeFullPathsForVFTables(ASTContext &Context,
+ const CXXRecordDecl *RD,
+ VPtrInfoVector &Paths) {
+ const ASTRecordLayout &MostDerivedLayout = Context.getASTRecordLayout(RD);
+ FullPathTy FullPath;
+ std::list<FullPathTy> FullPaths;
+ for (VPtrInfo *Info : Paths) {
+ findPathsToSubobject(
+ Context, MostDerivedLayout, RD, CharUnits::Zero(),
+ BaseSubobject(Info->BaseWithVPtr, Info->FullOffsetInMDC), FullPath,
+ FullPaths);
+ FullPath.clear();
+ removeRedundantPaths(FullPaths);
+ Info->PathToBaseWithVPtr.clear();
+ if (const FullPathTy *BestPath =
+ selectBestPath(Context, RD, Info, FullPaths))
+ for (const BaseSubobject &BSO : *BestPath)
+ Info->PathToBaseWithVPtr.push_back(BSO.getBase());
+ FullPaths.clear();
+ }
+}
+
+void MicrosoftVTableContext::computeVTableRelatedInformation(
+ const CXXRecordDecl *RD) {
+ assert(RD->isDynamicClass());
+
+ // Check if we've computed this information before.
+ if (VFPtrLocations.count(RD))
+ return;
+
+ const VTableLayout::AddressPointsMapTy EmptyAddressPointsMap;
+
+ VPtrInfoVector *VFPtrs = new VPtrInfoVector();
+ computeVTablePaths(/*ForVBTables=*/false, RD, *VFPtrs);
+ computeFullPathsForVFTables(Context, RD, *VFPtrs);
+ VFPtrLocations[RD] = VFPtrs;
+
+ MethodVFTableLocationsTy NewMethodLocations;
+ for (const VPtrInfo *VFPtr : *VFPtrs) {
+ VFTableBuilder Builder(*this, RD, VFPtr);
+
+ VFTableIdTy id(RD, VFPtr->FullOffsetInMDC);
+ assert(VFTableLayouts.count(id) == 0);
+ SmallVector<VTableLayout::VTableThunkTy, 1> VTableThunks(
+ Builder.vtable_thunks_begin(), Builder.vtable_thunks_end());
+ VFTableLayouts[id] = new VTableLayout(
+ Builder.getNumVTableComponents(), Builder.vtable_component_begin(),
+ VTableThunks.size(), VTableThunks.data(), EmptyAddressPointsMap, true);
+ Thunks.insert(Builder.thunks_begin(), Builder.thunks_end());
+
+ for (const auto &Loc : Builder.vtable_locations()) {
+ GlobalDecl GD = Loc.first;
+ MethodVFTableLocation NewLoc = Loc.second;
+ auto M = NewMethodLocations.find(GD);
+ if (M == NewMethodLocations.end() || NewLoc < M->second)
+ NewMethodLocations[GD] = NewLoc;
+ }
+ }
+
+ MethodVFTableLocations.insert(NewMethodLocations.begin(),
+ NewMethodLocations.end());
+ if (Context.getLangOpts().DumpVTableLayouts)
+ dumpMethodLocations(RD, NewMethodLocations, llvm::outs());
+}
+
+void MicrosoftVTableContext::dumpMethodLocations(
+ const CXXRecordDecl *RD, const MethodVFTableLocationsTy &NewMethods,
+ raw_ostream &Out) {
+ // Compute the vtable indices for all the member functions.
+ // Store them in a map keyed by the location so we'll get a sorted table.
+ std::map<MethodVFTableLocation, std::string> IndicesMap;
+ bool HasNonzeroOffset = false;
+
+ for (const auto &I : NewMethods) {
+ const CXXMethodDecl *MD = cast<const CXXMethodDecl>(I.first.getDecl());
+ assert(MD->isVirtual());
+
+ std::string MethodName = PredefinedExpr::ComputeName(
+ PredefinedExpr::PrettyFunctionNoVirtual, MD);
+
+ if (isa<CXXDestructorDecl>(MD)) {
+ IndicesMap[I.second] = MethodName + " [scalar deleting]";
+ } else {
+ IndicesMap[I.second] = MethodName;
+ }
+
+ if (!I.second.VFPtrOffset.isZero() || I.second.VBTableIndex != 0)
+ HasNonzeroOffset = true;
+ }
+
+ // Print the vtable indices for all the member functions.
+ if (!IndicesMap.empty()) {
+ Out << "VFTable indices for ";
+ Out << "'";
+ RD->printQualifiedName(Out);
+ Out << "' (" << IndicesMap.size()
+ << (IndicesMap.size() == 1 ? " entry" : " entries") << ").\n";
+
+ CharUnits LastVFPtrOffset = CharUnits::fromQuantity(-1);
+ uint64_t LastVBIndex = 0;
+ for (const auto &I : IndicesMap) {
+ CharUnits VFPtrOffset = I.first.VFPtrOffset;
+ uint64_t VBIndex = I.first.VBTableIndex;
+ if (HasNonzeroOffset &&
+ (VFPtrOffset != LastVFPtrOffset || VBIndex != LastVBIndex)) {
+ assert(VBIndex > LastVBIndex || VFPtrOffset > LastVFPtrOffset);
+ Out << " -- accessible via ";
+ if (VBIndex)
+ Out << "vbtable index " << VBIndex << ", ";
+ Out << "vfptr at offset " << VFPtrOffset.getQuantity() << " --\n";
+ LastVFPtrOffset = VFPtrOffset;
+ LastVBIndex = VBIndex;
+ }
+
+ uint64_t VTableIndex = I.first.Index;
+ const std::string &MethodName = I.second;
+ Out << llvm::format("%4" PRIu64 " | ", VTableIndex) << MethodName << '\n';
+ }
+ Out << '\n';
+ }
+
+ Out.flush();
+}
+
+const VirtualBaseInfo *MicrosoftVTableContext::computeVBTableRelatedInformation(
+ const CXXRecordDecl *RD) {
+ VirtualBaseInfo *VBI;
+
+ {
+ // Get or create a VBI for RD. Don't hold a reference to the DenseMap cell,
+ // as it may be modified and rehashed under us.
+ VirtualBaseInfo *&Entry = VBaseInfo[RD];
+ if (Entry)
+ return Entry;
+ Entry = VBI = new VirtualBaseInfo();
+ }
+
+ computeVTablePaths(/*ForVBTables=*/true, RD, VBI->VBPtrPaths);
+
+ // First, see if the Derived class shared the vbptr with a non-virtual base.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ if (const CXXRecordDecl *VBPtrBase = Layout.getBaseSharingVBPtr()) {
+ // If the Derived class shares the vbptr with a non-virtual base, the shared
+ // virtual bases come first so that the layout is the same.
+ const VirtualBaseInfo *BaseInfo =
+ computeVBTableRelatedInformation(VBPtrBase);
+ VBI->VBTableIndices.insert(BaseInfo->VBTableIndices.begin(),
+ BaseInfo->VBTableIndices.end());
+ }
+
+ // New vbases are added to the end of the vbtable.
+ // Skip the self entry and vbases visited in the non-virtual base, if any.
+ unsigned VBTableIndex = 1 + VBI->VBTableIndices.size();
+ for (const auto &VB : RD->vbases()) {
+ const CXXRecordDecl *CurVBase = VB.getType()->getAsCXXRecordDecl();
+ if (!VBI->VBTableIndices.count(CurVBase))
+ VBI->VBTableIndices[CurVBase] = VBTableIndex++;
+ }
+
+ return VBI;
+}
+
+unsigned MicrosoftVTableContext::getVBTableIndex(const CXXRecordDecl *Derived,
+ const CXXRecordDecl *VBase) {
+ const VirtualBaseInfo *VBInfo = computeVBTableRelatedInformation(Derived);
+ assert(VBInfo->VBTableIndices.count(VBase));
+ return VBInfo->VBTableIndices.find(VBase)->second;
+}
+
+const VPtrInfoVector &
+MicrosoftVTableContext::enumerateVBTables(const CXXRecordDecl *RD) {
+ return computeVBTableRelatedInformation(RD)->VBPtrPaths;
+}
+
+const VPtrInfoVector &
+MicrosoftVTableContext::getVFPtrOffsets(const CXXRecordDecl *RD) {
+ computeVTableRelatedInformation(RD);
+
+ assert(VFPtrLocations.count(RD) && "Couldn't find vfptr locations");
+ return *VFPtrLocations[RD];
+}
+
+const VTableLayout &
+MicrosoftVTableContext::getVFTableLayout(const CXXRecordDecl *RD,
+ CharUnits VFPtrOffset) {
+ computeVTableRelatedInformation(RD);
+
+ VFTableIdTy id(RD, VFPtrOffset);
+ assert(VFTableLayouts.count(id) && "Couldn't find a VFTable at this offset");
+ return *VFTableLayouts[id];
+}
+
+const MicrosoftVTableContext::MethodVFTableLocation &
+MicrosoftVTableContext::getMethodVFTableLocation(GlobalDecl GD) {
+ assert(cast<CXXMethodDecl>(GD.getDecl())->isVirtual() &&
+ "Only use this method for virtual methods or dtors");
+ if (isa<CXXDestructorDecl>(GD.getDecl()))
+ assert(GD.getDtorType() == Dtor_Deleting);
+
+ MethodVFTableLocationsTy::iterator I = MethodVFTableLocations.find(GD);
+ if (I != MethodVFTableLocations.end())
+ return I->second;
+
+ const CXXRecordDecl *RD = cast<CXXMethodDecl>(GD.getDecl())->getParent();
+
+ computeVTableRelatedInformation(RD);
+
+ I = MethodVFTableLocations.find(GD);
+ assert(I != MethodVFTableLocations.end() && "Did not find index!");
+ return I->second;
+}
diff --git a/contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchFinder.cpp b/contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchFinder.cpp
new file mode 100644
index 0000000..847398c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchFinder.cpp
@@ -0,0 +1,995 @@
+//===--- ASTMatchFinder.cpp - Structural query framework ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements an algorithm to efficiently search for matches on AST nodes.
+// Uses memoization to support recursive matches like HasDescendant.
+//
+// The general idea is to visit all AST nodes with a RecursiveASTVisitor,
+// calling the Matches(...) method of each matcher we are running on each
+// AST node. The matcher can recurse via the ASTMatchFinder interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/Timer.h"
+#include <deque>
+#include <memory>
+#include <set>
+
+namespace clang {
+namespace ast_matchers {
+namespace internal {
+namespace {
+
+typedef MatchFinder::MatchCallback MatchCallback;
+
+// The maximum number of memoization entries to store.
+// 10k has been experimentally found to give a good trade-off
+// of performance vs. memory consumption by running matcher
+// that match on every statement over a very large codebase.
+//
+// FIXME: Do some performance optimization in general and
+// revisit this number; also, put up micro-benchmarks that we can
+// optimize this on.
+static const unsigned MaxMemoizationEntries = 10000;
+
+// We use memoization to avoid running the same matcher on the same
+// AST node twice. This struct is the key for looking up match
+// result. It consists of an ID of the MatcherInterface (for
+// identifying the matcher), a pointer to the AST node and the
+// bound nodes before the matcher was executed.
+//
+// We currently only memoize on nodes whose pointers identify the
+// nodes (\c Stmt and \c Decl, but not \c QualType or \c TypeLoc).
+// For \c QualType and \c TypeLoc it is possible to implement
+// generation of keys for each type.
+// FIXME: Benchmark whether memoization of non-pointer typed nodes
+// provides enough benefit for the additional amount of code.
+struct MatchKey {
+ DynTypedMatcher::MatcherIDType MatcherID;
+ ast_type_traits::DynTypedNode Node;
+ BoundNodesTreeBuilder BoundNodes;
+
+ bool operator<(const MatchKey &Other) const {
+ return std::tie(MatcherID, Node, BoundNodes) <
+ std::tie(Other.MatcherID, Other.Node, Other.BoundNodes);
+ }
+};
+
+// Used to store the result of a match and possibly bound nodes.
+struct MemoizedMatchResult {
+ bool ResultOfMatch;
+ BoundNodesTreeBuilder Nodes;
+};
+
+// A RecursiveASTVisitor that traverses all children or all descendants of
+// a node.
+class MatchChildASTVisitor
+ : public RecursiveASTVisitor<MatchChildASTVisitor> {
+public:
+ typedef RecursiveASTVisitor<MatchChildASTVisitor> VisitorBase;
+
+ // Creates an AST visitor that matches 'matcher' on all children or
+ // descendants of a traversed node. max_depth is the maximum depth
+ // to traverse: use 1 for matching the children and INT_MAX for
+ // matching the descendants.
+ MatchChildASTVisitor(const DynTypedMatcher *Matcher,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder,
+ int MaxDepth,
+ ASTMatchFinder::TraversalKind Traversal,
+ ASTMatchFinder::BindKind Bind)
+ : Matcher(Matcher),
+ Finder(Finder),
+ Builder(Builder),
+ CurrentDepth(0),
+ MaxDepth(MaxDepth),
+ Traversal(Traversal),
+ Bind(Bind),
+ Matches(false) {}
+
+ // Returns true if a match is found in the subtree rooted at the
+ // given AST node. This is done via a set of mutually recursive
+ // functions. Here's how the recursion is done (the *wildcard can
+ // actually be Decl, Stmt, or Type):
+ //
+ // - Traverse(node) calls BaseTraverse(node) when it needs
+ // to visit the descendants of node.
+ // - BaseTraverse(node) then calls (via VisitorBase::Traverse*(node))
+ // Traverse*(c) for each child c of 'node'.
+ // - Traverse*(c) in turn calls Traverse(c), completing the
+ // recursion.
+ bool findMatch(const ast_type_traits::DynTypedNode &DynNode) {
+ reset();
+ if (const Decl *D = DynNode.get<Decl>())
+ traverse(*D);
+ else if (const Stmt *S = DynNode.get<Stmt>())
+ traverse(*S);
+ else if (const NestedNameSpecifier *NNS =
+ DynNode.get<NestedNameSpecifier>())
+ traverse(*NNS);
+ else if (const NestedNameSpecifierLoc *NNSLoc =
+ DynNode.get<NestedNameSpecifierLoc>())
+ traverse(*NNSLoc);
+ else if (const QualType *Q = DynNode.get<QualType>())
+ traverse(*Q);
+ else if (const TypeLoc *T = DynNode.get<TypeLoc>())
+ traverse(*T);
+ // FIXME: Add other base types after adding tests.
+
+ // It's OK to always overwrite the bound nodes, as if there was
+ // no match in this recursive branch, the result set is empty
+ // anyway.
+ *Builder = ResultBindings;
+
+ return Matches;
+ }
+
+ // The following are overriding methods from the base visitor class.
+ // They are public only to allow CRTP to work. They are *not *part
+ // of the public API of this class.
+ bool TraverseDecl(Decl *DeclNode) {
+ ScopedIncrement ScopedDepth(&CurrentDepth);
+ return (DeclNode == nullptr) || traverse(*DeclNode);
+ }
+ bool TraverseStmt(Stmt *StmtNode) {
+ ScopedIncrement ScopedDepth(&CurrentDepth);
+ const Stmt *StmtToTraverse = StmtNode;
+ if (Traversal ==
+ ASTMatchFinder::TK_IgnoreImplicitCastsAndParentheses) {
+ const Expr *ExprNode = dyn_cast_or_null<Expr>(StmtNode);
+ if (ExprNode) {
+ StmtToTraverse = ExprNode->IgnoreParenImpCasts();
+ }
+ }
+ return (StmtToTraverse == nullptr) || traverse(*StmtToTraverse);
+ }
+ // We assume that the QualType and the contained type are on the same
+ // hierarchy level. Thus, we try to match either of them.
+ bool TraverseType(QualType TypeNode) {
+ if (TypeNode.isNull())
+ return true;
+ ScopedIncrement ScopedDepth(&CurrentDepth);
+ // Match the Type.
+ if (!match(*TypeNode))
+ return false;
+ // The QualType is matched inside traverse.
+ return traverse(TypeNode);
+ }
+ // We assume that the TypeLoc, contained QualType and contained Type all are
+ // on the same hierarchy level. Thus, we try to match all of them.
+ bool TraverseTypeLoc(TypeLoc TypeLocNode) {
+ if (TypeLocNode.isNull())
+ return true;
+ ScopedIncrement ScopedDepth(&CurrentDepth);
+ // Match the Type.
+ if (!match(*TypeLocNode.getType()))
+ return false;
+ // Match the QualType.
+ if (!match(TypeLocNode.getType()))
+ return false;
+ // The TypeLoc is matched inside traverse.
+ return traverse(TypeLocNode);
+ }
+ bool TraverseNestedNameSpecifier(NestedNameSpecifier *NNS) {
+ ScopedIncrement ScopedDepth(&CurrentDepth);
+ return (NNS == nullptr) || traverse(*NNS);
+ }
+ bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS) {
+ if (!NNS)
+ return true;
+ ScopedIncrement ScopedDepth(&CurrentDepth);
+ if (!match(*NNS.getNestedNameSpecifier()))
+ return false;
+ return traverse(NNS);
+ }
+
+ bool shouldVisitTemplateInstantiations() const { return true; }
+ bool shouldVisitImplicitCode() const { return true; }
+
+private:
+ // Used for updating the depth during traversal.
+ struct ScopedIncrement {
+ explicit ScopedIncrement(int *Depth) : Depth(Depth) { ++(*Depth); }
+ ~ScopedIncrement() { --(*Depth); }
+
+ private:
+ int *Depth;
+ };
+
+ // Resets the state of this object.
+ void reset() {
+ Matches = false;
+ CurrentDepth = 0;
+ }
+
+ // Forwards the call to the corresponding Traverse*() method in the
+ // base visitor class.
+ bool baseTraverse(const Decl &DeclNode) {
+ return VisitorBase::TraverseDecl(const_cast<Decl*>(&DeclNode));
+ }
+ bool baseTraverse(const Stmt &StmtNode) {
+ return VisitorBase::TraverseStmt(const_cast<Stmt*>(&StmtNode));
+ }
+ bool baseTraverse(QualType TypeNode) {
+ return VisitorBase::TraverseType(TypeNode);
+ }
+ bool baseTraverse(TypeLoc TypeLocNode) {
+ return VisitorBase::TraverseTypeLoc(TypeLocNode);
+ }
+ bool baseTraverse(const NestedNameSpecifier &NNS) {
+ return VisitorBase::TraverseNestedNameSpecifier(
+ const_cast<NestedNameSpecifier*>(&NNS));
+ }
+ bool baseTraverse(NestedNameSpecifierLoc NNS) {
+ return VisitorBase::TraverseNestedNameSpecifierLoc(NNS);
+ }
+
+ // Sets 'Matched' to true if 'Matcher' matches 'Node' and:
+ // 0 < CurrentDepth <= MaxDepth.
+ //
+ // Returns 'true' if traversal should continue after this function
+ // returns, i.e. if no match is found or 'Bind' is 'BK_All'.
+ template <typename T>
+ bool match(const T &Node) {
+ if (CurrentDepth == 0 || CurrentDepth > MaxDepth) {
+ return true;
+ }
+ if (Bind != ASTMatchFinder::BK_All) {
+ BoundNodesTreeBuilder RecursiveBuilder(*Builder);
+ if (Matcher->matches(ast_type_traits::DynTypedNode::create(Node), Finder,
+ &RecursiveBuilder)) {
+ Matches = true;
+ ResultBindings.addMatch(RecursiveBuilder);
+ return false; // Abort as soon as a match is found.
+ }
+ } else {
+ BoundNodesTreeBuilder RecursiveBuilder(*Builder);
+ if (Matcher->matches(ast_type_traits::DynTypedNode::create(Node), Finder,
+ &RecursiveBuilder)) {
+ // After the first match the matcher succeeds.
+ Matches = true;
+ ResultBindings.addMatch(RecursiveBuilder);
+ }
+ }
+ return true;
+ }
+
+ // Traverses the subtree rooted at 'Node'; returns true if the
+ // traversal should continue after this function returns.
+ template <typename T>
+ bool traverse(const T &Node) {
+ static_assert(IsBaseType<T>::value,
+ "traverse can only be instantiated with base type");
+ if (!match(Node))
+ return false;
+ return baseTraverse(Node);
+ }
+
+ const DynTypedMatcher *const Matcher;
+ ASTMatchFinder *const Finder;
+ BoundNodesTreeBuilder *const Builder;
+ BoundNodesTreeBuilder ResultBindings;
+ int CurrentDepth;
+ const int MaxDepth;
+ const ASTMatchFinder::TraversalKind Traversal;
+ const ASTMatchFinder::BindKind Bind;
+ bool Matches;
+};
+
+// Controls the outermost traversal of the AST and allows to match multiple
+// matchers.
+class MatchASTVisitor : public RecursiveASTVisitor<MatchASTVisitor>,
+ public ASTMatchFinder {
+public:
+ MatchASTVisitor(const MatchFinder::MatchersByType *Matchers,
+ const MatchFinder::MatchFinderOptions &Options)
+ : Matchers(Matchers), Options(Options), ActiveASTContext(nullptr) {}
+
+ ~MatchASTVisitor() override {
+ if (Options.CheckProfiling) {
+ Options.CheckProfiling->Records = std::move(TimeByBucket);
+ }
+ }
+
+ void onStartOfTranslationUnit() {
+ const bool EnableCheckProfiling = Options.CheckProfiling.hasValue();
+ TimeBucketRegion Timer;
+ for (MatchCallback *MC : Matchers->AllCallbacks) {
+ if (EnableCheckProfiling)
+ Timer.setBucket(&TimeByBucket[MC->getID()]);
+ MC->onStartOfTranslationUnit();
+ }
+ }
+
+ void onEndOfTranslationUnit() {
+ const bool EnableCheckProfiling = Options.CheckProfiling.hasValue();
+ TimeBucketRegion Timer;
+ for (MatchCallback *MC : Matchers->AllCallbacks) {
+ if (EnableCheckProfiling)
+ Timer.setBucket(&TimeByBucket[MC->getID()]);
+ MC->onEndOfTranslationUnit();
+ }
+ }
+
+ void set_active_ast_context(ASTContext *NewActiveASTContext) {
+ ActiveASTContext = NewActiveASTContext;
+ }
+
+ // The following Visit*() and Traverse*() functions "override"
+ // methods in RecursiveASTVisitor.
+
+ bool VisitTypedefNameDecl(TypedefNameDecl *DeclNode) {
+ // When we see 'typedef A B', we add name 'B' to the set of names
+ // A's canonical type maps to. This is necessary for implementing
+ // isDerivedFrom(x) properly, where x can be the name of the base
+ // class or any of its aliases.
+ //
+ // In general, the is-alias-of (as defined by typedefs) relation
+ // is tree-shaped, as you can typedef a type more than once. For
+ // example,
+ //
+ // typedef A B;
+ // typedef A C;
+ // typedef C D;
+ // typedef C E;
+ //
+ // gives you
+ //
+ // A
+ // |- B
+ // `- C
+ // |- D
+ // `- E
+ //
+ // It is wrong to assume that the relation is a chain. A correct
+ // implementation of isDerivedFrom() needs to recognize that B and
+ // E are aliases, even though neither is a typedef of the other.
+ // Therefore, we cannot simply walk through one typedef chain to
+ // find out whether the type name matches.
+ const Type *TypeNode = DeclNode->getUnderlyingType().getTypePtr();
+ const Type *CanonicalType = // root of the typedef tree
+ ActiveASTContext->getCanonicalType(TypeNode);
+ TypeAliases[CanonicalType].insert(DeclNode);
+ return true;
+ }
+
+ bool TraverseDecl(Decl *DeclNode);
+ bool TraverseStmt(Stmt *StmtNode);
+ bool TraverseType(QualType TypeNode);
+ bool TraverseTypeLoc(TypeLoc TypeNode);
+ bool TraverseNestedNameSpecifier(NestedNameSpecifier *NNS);
+ bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS);
+
+ // Matches children or descendants of 'Node' with 'BaseMatcher'.
+ bool memoizedMatchesRecursively(const ast_type_traits::DynTypedNode &Node,
+ const DynTypedMatcher &Matcher,
+ BoundNodesTreeBuilder *Builder, int MaxDepth,
+ TraversalKind Traversal, BindKind Bind) {
+ // For AST-nodes that don't have an identity, we can't memoize.
+ if (!Node.getMemoizationData() || !Builder->isComparable())
+ return matchesRecursively(Node, Matcher, Builder, MaxDepth, Traversal,
+ Bind);
+
+ MatchKey Key;
+ Key.MatcherID = Matcher.getID();
+ Key.Node = Node;
+ // Note that we key on the bindings *before* the match.
+ Key.BoundNodes = *Builder;
+
+ MemoizationMap::iterator I = ResultCache.find(Key);
+ if (I != ResultCache.end()) {
+ *Builder = I->second.Nodes;
+ return I->second.ResultOfMatch;
+ }
+
+ MemoizedMatchResult Result;
+ Result.Nodes = *Builder;
+ Result.ResultOfMatch = matchesRecursively(Node, Matcher, &Result.Nodes,
+ MaxDepth, Traversal, Bind);
+
+ MemoizedMatchResult &CachedResult = ResultCache[Key];
+ CachedResult = std::move(Result);
+
+ *Builder = CachedResult.Nodes;
+ return CachedResult.ResultOfMatch;
+ }
+
+ // Matches children or descendants of 'Node' with 'BaseMatcher'.
+ bool matchesRecursively(const ast_type_traits::DynTypedNode &Node,
+ const DynTypedMatcher &Matcher,
+ BoundNodesTreeBuilder *Builder, int MaxDepth,
+ TraversalKind Traversal, BindKind Bind) {
+ MatchChildASTVisitor Visitor(
+ &Matcher, this, Builder, MaxDepth, Traversal, Bind);
+ return Visitor.findMatch(Node);
+ }
+
+ bool classIsDerivedFrom(const CXXRecordDecl *Declaration,
+ const Matcher<NamedDecl> &Base,
+ BoundNodesTreeBuilder *Builder) override;
+
+ // Implements ASTMatchFinder::matchesChildOf.
+ bool matchesChildOf(const ast_type_traits::DynTypedNode &Node,
+ const DynTypedMatcher &Matcher,
+ BoundNodesTreeBuilder *Builder,
+ TraversalKind Traversal,
+ BindKind Bind) override {
+ if (ResultCache.size() > MaxMemoizationEntries)
+ ResultCache.clear();
+ return memoizedMatchesRecursively(Node, Matcher, Builder, 1, Traversal,
+ Bind);
+ }
+ // Implements ASTMatchFinder::matchesDescendantOf.
+ bool matchesDescendantOf(const ast_type_traits::DynTypedNode &Node,
+ const DynTypedMatcher &Matcher,
+ BoundNodesTreeBuilder *Builder,
+ BindKind Bind) override {
+ if (ResultCache.size() > MaxMemoizationEntries)
+ ResultCache.clear();
+ return memoizedMatchesRecursively(Node, Matcher, Builder, INT_MAX,
+ TK_AsIs, Bind);
+ }
+ // Implements ASTMatchFinder::matchesAncestorOf.
+ bool matchesAncestorOf(const ast_type_traits::DynTypedNode &Node,
+ const DynTypedMatcher &Matcher,
+ BoundNodesTreeBuilder *Builder,
+ AncestorMatchMode MatchMode) override {
+ // Reset the cache outside of the recursive call to make sure we
+ // don't invalidate any iterators.
+ if (ResultCache.size() > MaxMemoizationEntries)
+ ResultCache.clear();
+ return memoizedMatchesAncestorOfRecursively(Node, Matcher, Builder,
+ MatchMode);
+ }
+
+ // Matches all registered matchers on the given node and calls the
+ // result callback for every node that matches.
+ void match(const ast_type_traits::DynTypedNode &Node) {
+ // FIXME: Improve this with a switch or a visitor pattern.
+ if (auto *N = Node.get<Decl>()) {
+ match(*N);
+ } else if (auto *N = Node.get<Stmt>()) {
+ match(*N);
+ } else if (auto *N = Node.get<Type>()) {
+ match(*N);
+ } else if (auto *N = Node.get<QualType>()) {
+ match(*N);
+ } else if (auto *N = Node.get<NestedNameSpecifier>()) {
+ match(*N);
+ } else if (auto *N = Node.get<NestedNameSpecifierLoc>()) {
+ match(*N);
+ } else if (auto *N = Node.get<TypeLoc>()) {
+ match(*N);
+ }
+ }
+
+ template <typename T> void match(const T &Node) {
+ matchDispatch(&Node);
+ }
+
+ // Implements ASTMatchFinder::getASTContext.
+ ASTContext &getASTContext() const override { return *ActiveASTContext; }
+
+ bool shouldVisitTemplateInstantiations() const { return true; }
+ bool shouldVisitImplicitCode() const { return true; }
+
+private:
+ class TimeBucketRegion {
+ public:
+ TimeBucketRegion() : Bucket(nullptr) {}
+ ~TimeBucketRegion() { setBucket(nullptr); }
+
+ /// \brief Start timing for \p NewBucket.
+ ///
+ /// If there was a bucket already set, it will finish the timing for that
+ /// other bucket.
+ /// \p NewBucket will be timed until the next call to \c setBucket() or
+ /// until the \c TimeBucketRegion is destroyed.
+ /// If \p NewBucket is the same as the currently timed bucket, this call
+ /// does nothing.
+ void setBucket(llvm::TimeRecord *NewBucket) {
+ if (Bucket != NewBucket) {
+ auto Now = llvm::TimeRecord::getCurrentTime(true);
+ if (Bucket)
+ *Bucket += Now;
+ if (NewBucket)
+ *NewBucket -= Now;
+ Bucket = NewBucket;
+ }
+ }
+
+ private:
+ llvm::TimeRecord *Bucket;
+ };
+
+ /// \brief Runs all the \p Matchers on \p Node.
+ ///
+ /// Used by \c matchDispatch() below.
+ template <typename T, typename MC>
+ void matchWithoutFilter(const T &Node, const MC &Matchers) {
+ const bool EnableCheckProfiling = Options.CheckProfiling.hasValue();
+ TimeBucketRegion Timer;
+ for (const auto &MP : Matchers) {
+ if (EnableCheckProfiling)
+ Timer.setBucket(&TimeByBucket[MP.second->getID()]);
+ BoundNodesTreeBuilder Builder;
+ if (MP.first.matches(Node, this, &Builder)) {
+ MatchVisitor Visitor(ActiveASTContext, MP.second);
+ Builder.visitMatches(&Visitor);
+ }
+ }
+ }
+
+ void matchWithFilter(const ast_type_traits::DynTypedNode &DynNode) {
+ auto Kind = DynNode.getNodeKind();
+ auto it = MatcherFiltersMap.find(Kind);
+ const auto &Filter =
+ it != MatcherFiltersMap.end() ? it->second : getFilterForKind(Kind);
+
+ if (Filter.empty())
+ return;
+
+ const bool EnableCheckProfiling = Options.CheckProfiling.hasValue();
+ TimeBucketRegion Timer;
+ auto &Matchers = this->Matchers->DeclOrStmt;
+ for (unsigned short I : Filter) {
+ auto &MP = Matchers[I];
+ if (EnableCheckProfiling)
+ Timer.setBucket(&TimeByBucket[MP.second->getID()]);
+ BoundNodesTreeBuilder Builder;
+ if (MP.first.matchesNoKindCheck(DynNode, this, &Builder)) {
+ MatchVisitor Visitor(ActiveASTContext, MP.second);
+ Builder.visitMatches(&Visitor);
+ }
+ }
+ }
+
+ const std::vector<unsigned short> &
+ getFilterForKind(ast_type_traits::ASTNodeKind Kind) {
+ auto &Filter = MatcherFiltersMap[Kind];
+ auto &Matchers = this->Matchers->DeclOrStmt;
+ assert((Matchers.size() < USHRT_MAX) && "Too many matchers.");
+ for (unsigned I = 0, E = Matchers.size(); I != E; ++I) {
+ if (Matchers[I].first.canMatchNodesOfKind(Kind)) {
+ Filter.push_back(I);
+ }
+ }
+ return Filter;
+ }
+
+ /// @{
+ /// \brief Overloads to pair the different node types to their matchers.
+ void matchDispatch(const Decl *Node) {
+ return matchWithFilter(ast_type_traits::DynTypedNode::create(*Node));
+ }
+ void matchDispatch(const Stmt *Node) {
+ return matchWithFilter(ast_type_traits::DynTypedNode::create(*Node));
+ }
+
+ void matchDispatch(const Type *Node) {
+ matchWithoutFilter(QualType(Node, 0), Matchers->Type);
+ }
+ void matchDispatch(const TypeLoc *Node) {
+ matchWithoutFilter(*Node, Matchers->TypeLoc);
+ }
+ void matchDispatch(const QualType *Node) {
+ matchWithoutFilter(*Node, Matchers->Type);
+ }
+ void matchDispatch(const NestedNameSpecifier *Node) {
+ matchWithoutFilter(*Node, Matchers->NestedNameSpecifier);
+ }
+ void matchDispatch(const NestedNameSpecifierLoc *Node) {
+ matchWithoutFilter(*Node, Matchers->NestedNameSpecifierLoc);
+ }
+ void matchDispatch(const void *) { /* Do nothing. */ }
+ /// @}
+
+ // Returns whether an ancestor of \p Node matches \p Matcher.
+ //
+ // The order of matching ((which can lead to different nodes being bound in
+ // case there are multiple matches) is breadth first search.
+ //
+ // To allow memoization in the very common case of having deeply nested
+ // expressions inside a template function, we first walk up the AST, memoizing
+ // the result of the match along the way, as long as there is only a single
+ // parent.
+ //
+ // Once there are multiple parents, the breadth first search order does not
+ // allow simple memoization on the ancestors. Thus, we only memoize as long
+ // as there is a single parent.
+ bool memoizedMatchesAncestorOfRecursively(
+ const ast_type_traits::DynTypedNode &Node, const DynTypedMatcher &Matcher,
+ BoundNodesTreeBuilder *Builder, AncestorMatchMode MatchMode) {
+ if (Node.get<TranslationUnitDecl>() ==
+ ActiveASTContext->getTranslationUnitDecl())
+ return false;
+
+ MatchKey Key;
+ Key.MatcherID = Matcher.getID();
+ Key.Node = Node;
+ Key.BoundNodes = *Builder;
+
+ // Note that we cannot use insert and reuse the iterator, as recursive
+ // calls to match might invalidate the result cache iterators.
+ MemoizationMap::iterator I = ResultCache.find(Key);
+ if (I != ResultCache.end()) {
+ *Builder = I->second.Nodes;
+ return I->second.ResultOfMatch;
+ }
+
+ MemoizedMatchResult Result;
+ Result.ResultOfMatch = false;
+ Result.Nodes = *Builder;
+
+ const auto &Parents = ActiveASTContext->getParents(Node);
+ assert(!Parents.empty() && "Found node that is not in the parent map.");
+ if (Parents.size() == 1) {
+ // Only one parent - do recursive memoization.
+ const ast_type_traits::DynTypedNode Parent = Parents[0];
+ if (Matcher.matches(Parent, this, &Result.Nodes)) {
+ Result.ResultOfMatch = true;
+ } else if (MatchMode != ASTMatchFinder::AMM_ParentOnly) {
+ // Reset the results to not include the bound nodes from the failed
+ // match above.
+ Result.Nodes = *Builder;
+ Result.ResultOfMatch = memoizedMatchesAncestorOfRecursively(
+ Parent, Matcher, &Result.Nodes, MatchMode);
+ // Once we get back from the recursive call, the result will be the
+ // same as the parent's result.
+ }
+ } else {
+ // Multiple parents - BFS over the rest of the nodes.
+ llvm::DenseSet<const void *> Visited;
+ std::deque<ast_type_traits::DynTypedNode> Queue(Parents.begin(),
+ Parents.end());
+ while (!Queue.empty()) {
+ Result.Nodes = *Builder;
+ if (Matcher.matches(Queue.front(), this, &Result.Nodes)) {
+ Result.ResultOfMatch = true;
+ break;
+ }
+ if (MatchMode != ASTMatchFinder::AMM_ParentOnly) {
+ for (const auto &Parent :
+ ActiveASTContext->getParents(Queue.front())) {
+ // Make sure we do not visit the same node twice.
+ // Otherwise, we'll visit the common ancestors as often as there
+ // are splits on the way down.
+ if (Visited.insert(Parent.getMemoizationData()).second)
+ Queue.push_back(Parent);
+ }
+ }
+ Queue.pop_front();
+ }
+ }
+
+ MemoizedMatchResult &CachedResult = ResultCache[Key];
+ CachedResult = std::move(Result);
+
+ *Builder = CachedResult.Nodes;
+ return CachedResult.ResultOfMatch;
+ }
+
+ // Implements a BoundNodesTree::Visitor that calls a MatchCallback with
+ // the aggregated bound nodes for each match.
+ class MatchVisitor : public BoundNodesTreeBuilder::Visitor {
+ public:
+ MatchVisitor(ASTContext* Context,
+ MatchFinder::MatchCallback* Callback)
+ : Context(Context),
+ Callback(Callback) {}
+
+ void visitMatch(const BoundNodes& BoundNodesView) override {
+ Callback->run(MatchFinder::MatchResult(BoundNodesView, Context));
+ }
+
+ private:
+ ASTContext* Context;
+ MatchFinder::MatchCallback* Callback;
+ };
+
+ // Returns true if 'TypeNode' has an alias that matches the given matcher.
+ bool typeHasMatchingAlias(const Type *TypeNode,
+ const Matcher<NamedDecl> Matcher,
+ BoundNodesTreeBuilder *Builder) {
+ const Type *const CanonicalType =
+ ActiveASTContext->getCanonicalType(TypeNode);
+ for (const TypedefNameDecl *Alias : TypeAliases.lookup(CanonicalType)) {
+ BoundNodesTreeBuilder Result(*Builder);
+ if (Matcher.matches(*Alias, this, &Result)) {
+ *Builder = std::move(Result);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /// \brief Bucket to record map.
+ ///
+ /// Used to get the appropriate bucket for each matcher.
+ llvm::StringMap<llvm::TimeRecord> TimeByBucket;
+
+ const MatchFinder::MatchersByType *Matchers;
+
+ /// \brief Filtered list of matcher indices for each matcher kind.
+ ///
+ /// \c Decl and \c Stmt toplevel matchers usually apply to a specific node
+ /// kind (and derived kinds) so it is a waste to try every matcher on every
+ /// node.
+ /// We precalculate a list of matchers that pass the toplevel restrict check.
+ /// This also allows us to skip the restrict check at matching time. See
+ /// use \c matchesNoKindCheck() above.
+ llvm::DenseMap<ast_type_traits::ASTNodeKind, std::vector<unsigned short>>
+ MatcherFiltersMap;
+
+ const MatchFinder::MatchFinderOptions &Options;
+ ASTContext *ActiveASTContext;
+
+ // Maps a canonical type to its TypedefDecls.
+ llvm::DenseMap<const Type*, std::set<const TypedefNameDecl*> > TypeAliases;
+
+ // Maps (matcher, node) -> the match result for memoization.
+ typedef std::map<MatchKey, MemoizedMatchResult> MemoizationMap;
+ MemoizationMap ResultCache;
+};
+
+static CXXRecordDecl *getAsCXXRecordDecl(const Type *TypeNode) {
+ // Type::getAs<...>() drills through typedefs.
+ if (TypeNode->getAs<DependentNameType>() != nullptr ||
+ TypeNode->getAs<DependentTemplateSpecializationType>() != nullptr ||
+ TypeNode->getAs<TemplateTypeParmType>() != nullptr)
+ // Dependent names and template TypeNode parameters will be matched when
+ // the template is instantiated.
+ return nullptr;
+ TemplateSpecializationType const *TemplateType =
+ TypeNode->getAs<TemplateSpecializationType>();
+ if (!TemplateType) {
+ return TypeNode->getAsCXXRecordDecl();
+ }
+ if (TemplateType->getTemplateName().isDependent())
+ // Dependent template specializations will be matched when the
+ // template is instantiated.
+ return nullptr;
+
+ // For template specialization types which are specializing a template
+ // declaration which is an explicit or partial specialization of another
+ // template declaration, getAsCXXRecordDecl() returns the corresponding
+ // ClassTemplateSpecializationDecl.
+ //
+ // For template specialization types which are specializing a template
+ // declaration which is neither an explicit nor partial specialization of
+ // another template declaration, getAsCXXRecordDecl() returns NULL and
+ // we get the CXXRecordDecl of the templated declaration.
+ CXXRecordDecl *SpecializationDecl = TemplateType->getAsCXXRecordDecl();
+ if (SpecializationDecl) {
+ return SpecializationDecl;
+ }
+ NamedDecl *Templated =
+ TemplateType->getTemplateName().getAsTemplateDecl()->getTemplatedDecl();
+ if (CXXRecordDecl *TemplatedRecord = dyn_cast<CXXRecordDecl>(Templated)) {
+ return TemplatedRecord;
+ }
+ // Now it can still be that we have an alias template.
+ TypeAliasDecl *AliasDecl = dyn_cast<TypeAliasDecl>(Templated);
+ assert(AliasDecl);
+ return getAsCXXRecordDecl(AliasDecl->getUnderlyingType().getTypePtr());
+}
+
+// Returns true if the given class is directly or indirectly derived
+// from a base type with the given name. A class is not considered to be
+// derived from itself.
+bool MatchASTVisitor::classIsDerivedFrom(const CXXRecordDecl *Declaration,
+ const Matcher<NamedDecl> &Base,
+ BoundNodesTreeBuilder *Builder) {
+ if (!Declaration->hasDefinition())
+ return false;
+ for (const auto &It : Declaration->bases()) {
+ const Type *TypeNode = It.getType().getTypePtr();
+
+ if (typeHasMatchingAlias(TypeNode, Base, Builder))
+ return true;
+
+ CXXRecordDecl *ClassDecl = getAsCXXRecordDecl(TypeNode);
+ if (!ClassDecl)
+ continue;
+ if (ClassDecl == Declaration) {
+ // This can happen for recursive template definitions; if the
+ // current declaration did not match, we can safely return false.
+ return false;
+ }
+ BoundNodesTreeBuilder Result(*Builder);
+ if (Base.matches(*ClassDecl, this, &Result)) {
+ *Builder = std::move(Result);
+ return true;
+ }
+ if (classIsDerivedFrom(ClassDecl, Base, Builder))
+ return true;
+ }
+ return false;
+}
+
+bool MatchASTVisitor::TraverseDecl(Decl *DeclNode) {
+ if (!DeclNode) {
+ return true;
+ }
+ match(*DeclNode);
+ return RecursiveASTVisitor<MatchASTVisitor>::TraverseDecl(DeclNode);
+}
+
+bool MatchASTVisitor::TraverseStmt(Stmt *StmtNode) {
+ if (!StmtNode) {
+ return true;
+ }
+ match(*StmtNode);
+ return RecursiveASTVisitor<MatchASTVisitor>::TraverseStmt(StmtNode);
+}
+
+bool MatchASTVisitor::TraverseType(QualType TypeNode) {
+ match(TypeNode);
+ return RecursiveASTVisitor<MatchASTVisitor>::TraverseType(TypeNode);
+}
+
+bool MatchASTVisitor::TraverseTypeLoc(TypeLoc TypeLocNode) {
+ // The RecursiveASTVisitor only visits types if they're not within TypeLocs.
+ // We still want to find those types via matchers, so we match them here. Note
+ // that the TypeLocs are structurally a shadow-hierarchy to the expressed
+ // type, so we visit all involved parts of a compound type when matching on
+ // each TypeLoc.
+ match(TypeLocNode);
+ match(TypeLocNode.getType());
+ return RecursiveASTVisitor<MatchASTVisitor>::TraverseTypeLoc(TypeLocNode);
+}
+
+bool MatchASTVisitor::TraverseNestedNameSpecifier(NestedNameSpecifier *NNS) {
+ match(*NNS);
+ return RecursiveASTVisitor<MatchASTVisitor>::TraverseNestedNameSpecifier(NNS);
+}
+
+bool MatchASTVisitor::TraverseNestedNameSpecifierLoc(
+ NestedNameSpecifierLoc NNS) {
+ if (!NNS)
+ return true;
+
+ match(NNS);
+
+ // We only match the nested name specifier here (as opposed to traversing it)
+ // because the traversal is already done in the parallel "Loc"-hierarchy.
+ if (NNS.hasQualifier())
+ match(*NNS.getNestedNameSpecifier());
+ return
+ RecursiveASTVisitor<MatchASTVisitor>::TraverseNestedNameSpecifierLoc(NNS);
+}
+
+class MatchASTConsumer : public ASTConsumer {
+public:
+ MatchASTConsumer(MatchFinder *Finder,
+ MatchFinder::ParsingDoneTestCallback *ParsingDone)
+ : Finder(Finder), ParsingDone(ParsingDone) {}
+
+private:
+ void HandleTranslationUnit(ASTContext &Context) override {
+ if (ParsingDone != nullptr) {
+ ParsingDone->run();
+ }
+ Finder->matchAST(Context);
+ }
+
+ MatchFinder *Finder;
+ MatchFinder::ParsingDoneTestCallback *ParsingDone;
+};
+
+} // end namespace
+} // end namespace internal
+
+MatchFinder::MatchResult::MatchResult(const BoundNodes &Nodes,
+ ASTContext *Context)
+ : Nodes(Nodes), Context(Context),
+ SourceManager(&Context->getSourceManager()) {}
+
+MatchFinder::MatchCallback::~MatchCallback() {}
+MatchFinder::ParsingDoneTestCallback::~ParsingDoneTestCallback() {}
+
+MatchFinder::MatchFinder(MatchFinderOptions Options)
+ : Options(std::move(Options)), ParsingDone(nullptr) {}
+
+MatchFinder::~MatchFinder() {}
+
+void MatchFinder::addMatcher(const DeclarationMatcher &NodeMatch,
+ MatchCallback *Action) {
+ Matchers.DeclOrStmt.emplace_back(NodeMatch, Action);
+ Matchers.AllCallbacks.insert(Action);
+}
+
+void MatchFinder::addMatcher(const TypeMatcher &NodeMatch,
+ MatchCallback *Action) {
+ Matchers.Type.emplace_back(NodeMatch, Action);
+ Matchers.AllCallbacks.insert(Action);
+}
+
+void MatchFinder::addMatcher(const StatementMatcher &NodeMatch,
+ MatchCallback *Action) {
+ Matchers.DeclOrStmt.emplace_back(NodeMatch, Action);
+ Matchers.AllCallbacks.insert(Action);
+}
+
+void MatchFinder::addMatcher(const NestedNameSpecifierMatcher &NodeMatch,
+ MatchCallback *Action) {
+ Matchers.NestedNameSpecifier.emplace_back(NodeMatch, Action);
+ Matchers.AllCallbacks.insert(Action);
+}
+
+void MatchFinder::addMatcher(const NestedNameSpecifierLocMatcher &NodeMatch,
+ MatchCallback *Action) {
+ Matchers.NestedNameSpecifierLoc.emplace_back(NodeMatch, Action);
+ Matchers.AllCallbacks.insert(Action);
+}
+
+void MatchFinder::addMatcher(const TypeLocMatcher &NodeMatch,
+ MatchCallback *Action) {
+ Matchers.TypeLoc.emplace_back(NodeMatch, Action);
+ Matchers.AllCallbacks.insert(Action);
+}
+
+bool MatchFinder::addDynamicMatcher(const internal::DynTypedMatcher &NodeMatch,
+ MatchCallback *Action) {
+ if (NodeMatch.canConvertTo<Decl>()) {
+ addMatcher(NodeMatch.convertTo<Decl>(), Action);
+ return true;
+ } else if (NodeMatch.canConvertTo<QualType>()) {
+ addMatcher(NodeMatch.convertTo<QualType>(), Action);
+ return true;
+ } else if (NodeMatch.canConvertTo<Stmt>()) {
+ addMatcher(NodeMatch.convertTo<Stmt>(), Action);
+ return true;
+ } else if (NodeMatch.canConvertTo<NestedNameSpecifier>()) {
+ addMatcher(NodeMatch.convertTo<NestedNameSpecifier>(), Action);
+ return true;
+ } else if (NodeMatch.canConvertTo<NestedNameSpecifierLoc>()) {
+ addMatcher(NodeMatch.convertTo<NestedNameSpecifierLoc>(), Action);
+ return true;
+ } else if (NodeMatch.canConvertTo<TypeLoc>()) {
+ addMatcher(NodeMatch.convertTo<TypeLoc>(), Action);
+ return true;
+ }
+ return false;
+}
+
+std::unique_ptr<ASTConsumer> MatchFinder::newASTConsumer() {
+ return llvm::make_unique<internal::MatchASTConsumer>(this, ParsingDone);
+}
+
+void MatchFinder::match(const clang::ast_type_traits::DynTypedNode &Node,
+ ASTContext &Context) {
+ internal::MatchASTVisitor Visitor(&Matchers, Options);
+ Visitor.set_active_ast_context(&Context);
+ Visitor.match(Node);
+}
+
+void MatchFinder::matchAST(ASTContext &Context) {
+ internal::MatchASTVisitor Visitor(&Matchers, Options);
+ Visitor.set_active_ast_context(&Context);
+ Visitor.onStartOfTranslationUnit();
+ Visitor.TraverseDecl(Context.getTranslationUnitDecl());
+ Visitor.onEndOfTranslationUnit();
+}
+
+void MatchFinder::registerTestCallbackAfterParsing(
+ MatchFinder::ParsingDoneTestCallback *NewParsingDone) {
+ ParsingDone = NewParsingDone;
+}
+
+StringRef MatchFinder::MatchCallback::getID() const { return "<unknown>"; }
+
+} // end namespace ast_matchers
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchersInternal.cpp b/contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
new file mode 100644
index 0000000..463cf0b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
@@ -0,0 +1,344 @@
+//===--- ASTMatchersInternal.cpp - Structural query framework -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements the base layer of the matcher framework.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/ASTMatchers/ASTMatchers.h"
+#include "clang/ASTMatchers/ASTMatchersInternal.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/ManagedStatic.h"
+
+namespace clang {
+namespace ast_matchers {
+namespace internal {
+
+bool NotUnaryOperator(const ast_type_traits::DynTypedNode &DynNode,
+ ASTMatchFinder *Finder, BoundNodesTreeBuilder *Builder,
+ ArrayRef<DynTypedMatcher> InnerMatchers);
+
+bool AllOfVariadicOperator(const ast_type_traits::DynTypedNode &DynNode,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder,
+ ArrayRef<DynTypedMatcher> InnerMatchers);
+
+bool EachOfVariadicOperator(const ast_type_traits::DynTypedNode &DynNode,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder,
+ ArrayRef<DynTypedMatcher> InnerMatchers);
+
+bool AnyOfVariadicOperator(const ast_type_traits::DynTypedNode &DynNode,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder,
+ ArrayRef<DynTypedMatcher> InnerMatchers);
+
+
+void BoundNodesTreeBuilder::visitMatches(Visitor *ResultVisitor) {
+ if (Bindings.empty())
+ Bindings.push_back(BoundNodesMap());
+ for (BoundNodesMap &Binding : Bindings) {
+ ResultVisitor->visitMatch(BoundNodes(Binding));
+ }
+}
+
+namespace {
+
+typedef bool (*VariadicOperatorFunction)(
+ const ast_type_traits::DynTypedNode &DynNode, ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder, ArrayRef<DynTypedMatcher> InnerMatchers);
+
+template <VariadicOperatorFunction Func>
+class VariadicMatcher : public DynMatcherInterface {
+public:
+ VariadicMatcher(std::vector<DynTypedMatcher> InnerMatchers)
+ : InnerMatchers(std::move(InnerMatchers)) {}
+
+ bool dynMatches(const ast_type_traits::DynTypedNode &DynNode,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const override {
+ return Func(DynNode, Finder, Builder, InnerMatchers);
+ }
+
+private:
+ std::vector<DynTypedMatcher> InnerMatchers;
+};
+
+class IdDynMatcher : public DynMatcherInterface {
+ public:
+ IdDynMatcher(StringRef ID,
+ const IntrusiveRefCntPtr<DynMatcherInterface> &InnerMatcher)
+ : ID(ID), InnerMatcher(InnerMatcher) {}
+
+ bool dynMatches(const ast_type_traits::DynTypedNode &DynNode,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const override {
+ bool Result = InnerMatcher->dynMatches(DynNode, Finder, Builder);
+ if (Result) Builder->setBinding(ID, DynNode);
+ return Result;
+ }
+
+ private:
+ const std::string ID;
+ const IntrusiveRefCntPtr<DynMatcherInterface> InnerMatcher;
+};
+
+/// \brief A matcher that always returns true.
+///
+/// We only ever need one instance of this matcher, so we create a global one
+/// and reuse it to reduce the overhead of the matcher and increase the chance
+/// of cache hits.
+class TrueMatcherImpl : public DynMatcherInterface {
+public:
+ TrueMatcherImpl() {
+ Retain(); // Reference count will never become zero.
+ }
+ bool dynMatches(const ast_type_traits::DynTypedNode &, ASTMatchFinder *,
+ BoundNodesTreeBuilder *) const override {
+ return true;
+ }
+};
+static llvm::ManagedStatic<TrueMatcherImpl> TrueMatcherInstance;
+
+} // namespace
+
+DynTypedMatcher DynTypedMatcher::constructVariadic(
+ DynTypedMatcher::VariadicOperator Op,
+ ast_type_traits::ASTNodeKind SupportedKind,
+ std::vector<DynTypedMatcher> InnerMatchers) {
+ assert(InnerMatchers.size() > 0 && "Array must not be empty.");
+ assert(std::all_of(InnerMatchers.begin(), InnerMatchers.end(),
+ [SupportedKind](const DynTypedMatcher &M) {
+ return M.canConvertTo(SupportedKind);
+ }) &&
+ "InnerMatchers must be convertible to SupportedKind!");
+
+ // We must relax the restrict kind here.
+ // The different operators might deal differently with a mismatch.
+ // Make it the same as SupportedKind, since that is the broadest type we are
+ // allowed to accept.
+ auto RestrictKind = SupportedKind;
+
+ switch (Op) {
+ case VO_AllOf:
+ // In the case of allOf() we must pass all the checks, so making
+ // RestrictKind the most restrictive can save us time. This way we reject
+ // invalid types earlier and we can elide the kind checks inside the
+ // matcher.
+ for (auto &IM : InnerMatchers) {
+ RestrictKind = ast_type_traits::ASTNodeKind::getMostDerivedType(
+ RestrictKind, IM.RestrictKind);
+ }
+ return DynTypedMatcher(
+ SupportedKind, RestrictKind,
+ new VariadicMatcher<AllOfVariadicOperator>(std::move(InnerMatchers)));
+
+ case VO_AnyOf:
+ return DynTypedMatcher(
+ SupportedKind, RestrictKind,
+ new VariadicMatcher<AnyOfVariadicOperator>(std::move(InnerMatchers)));
+
+ case VO_EachOf:
+ return DynTypedMatcher(
+ SupportedKind, RestrictKind,
+ new VariadicMatcher<EachOfVariadicOperator>(std::move(InnerMatchers)));
+
+ case VO_UnaryNot:
+ // FIXME: Implement the Not operator to take a single matcher instead of a
+ // vector.
+ return DynTypedMatcher(
+ SupportedKind, RestrictKind,
+ new VariadicMatcher<NotUnaryOperator>(std::move(InnerMatchers)));
+ }
+ llvm_unreachable("Invalid Op value.");
+}
+
+DynTypedMatcher DynTypedMatcher::trueMatcher(
+ ast_type_traits::ASTNodeKind NodeKind) {
+ return DynTypedMatcher(NodeKind, NodeKind, &*TrueMatcherInstance);
+}
+
+bool DynTypedMatcher::canMatchNodesOfKind(
+ ast_type_traits::ASTNodeKind Kind) const {
+ return RestrictKind.isBaseOf(Kind);
+}
+
+DynTypedMatcher DynTypedMatcher::dynCastTo(
+ const ast_type_traits::ASTNodeKind Kind) const {
+ auto Copy = *this;
+ Copy.SupportedKind = Kind;
+ Copy.RestrictKind =
+ ast_type_traits::ASTNodeKind::getMostDerivedType(Kind, RestrictKind);
+ return Copy;
+}
+
+bool DynTypedMatcher::matches(const ast_type_traits::DynTypedNode &DynNode,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ if (RestrictKind.isBaseOf(DynNode.getNodeKind()) &&
+ Implementation->dynMatches(DynNode, Finder, Builder)) {
+ return true;
+ }
+ // Delete all bindings when a matcher does not match.
+ // This prevents unexpected exposure of bound nodes in unmatches
+ // branches of the match tree.
+ Builder->removeBindings([](const BoundNodesMap &) { return true; });
+ return false;
+}
+
+bool DynTypedMatcher::matchesNoKindCheck(
+ const ast_type_traits::DynTypedNode &DynNode, ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ assert(RestrictKind.isBaseOf(DynNode.getNodeKind()));
+ if (Implementation->dynMatches(DynNode, Finder, Builder)) {
+ return true;
+ }
+ // Delete all bindings when a matcher does not match.
+ // This prevents unexpected exposure of bound nodes in unmatches
+ // branches of the match tree.
+ Builder->removeBindings([](const BoundNodesMap &) { return true; });
+ return false;
+}
+
+llvm::Optional<DynTypedMatcher> DynTypedMatcher::tryBind(StringRef ID) const {
+ if (!AllowBind) return llvm::None;
+ auto Result = *this;
+ Result.Implementation = new IdDynMatcher(ID, Result.Implementation);
+ return Result;
+}
+
+bool DynTypedMatcher::canConvertTo(ast_type_traits::ASTNodeKind To) const {
+ const auto From = getSupportedKind();
+ auto QualKind = ast_type_traits::ASTNodeKind::getFromNodeKind<QualType>();
+ auto TypeKind = ast_type_traits::ASTNodeKind::getFromNodeKind<Type>();
+ /// Mimic the implicit conversions of Matcher<>.
+ /// - From Matcher<Type> to Matcher<QualType>
+ if (From.isSame(TypeKind) && To.isSame(QualKind)) return true;
+ /// - From Matcher<Base> to Matcher<Derived>
+ return From.isBaseOf(To);
+}
+
+void BoundNodesTreeBuilder::addMatch(const BoundNodesTreeBuilder &Other) {
+ Bindings.append(Other.Bindings.begin(), Other.Bindings.end());
+}
+
+bool NotUnaryOperator(const ast_type_traits::DynTypedNode &DynNode,
+ ASTMatchFinder *Finder, BoundNodesTreeBuilder *Builder,
+ ArrayRef<DynTypedMatcher> InnerMatchers) {
+ if (InnerMatchers.size() != 1)
+ return false;
+
+ // The 'unless' matcher will always discard the result:
+ // If the inner matcher doesn't match, unless returns true,
+ // but the inner matcher cannot have bound anything.
+ // If the inner matcher matches, the result is false, and
+ // any possible binding will be discarded.
+ // We still need to hand in all the bound nodes up to this
+ // point so the inner matcher can depend on bound nodes,
+ // and we need to actively discard the bound nodes, otherwise
+ // the inner matcher will reset the bound nodes if it doesn't
+ // match, but this would be inversed by 'unless'.
+ BoundNodesTreeBuilder Discard(*Builder);
+ return !InnerMatchers[0].matches(DynNode, Finder, &Discard);
+}
+
+bool AllOfVariadicOperator(const ast_type_traits::DynTypedNode &DynNode,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder,
+ ArrayRef<DynTypedMatcher> InnerMatchers) {
+ // allOf leads to one matcher for each alternative in the first
+ // matcher combined with each alternative in the second matcher.
+ // Thus, we can reuse the same Builder.
+ for (const DynTypedMatcher &InnerMatcher : InnerMatchers) {
+ if (!InnerMatcher.matchesNoKindCheck(DynNode, Finder, Builder))
+ return false;
+ }
+ return true;
+}
+
+bool EachOfVariadicOperator(const ast_type_traits::DynTypedNode &DynNode,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder,
+ ArrayRef<DynTypedMatcher> InnerMatchers) {
+ BoundNodesTreeBuilder Result;
+ bool Matched = false;
+ for (const DynTypedMatcher &InnerMatcher : InnerMatchers) {
+ BoundNodesTreeBuilder BuilderInner(*Builder);
+ if (InnerMatcher.matches(DynNode, Finder, &BuilderInner)) {
+ Matched = true;
+ Result.addMatch(BuilderInner);
+ }
+ }
+ *Builder = std::move(Result);
+ return Matched;
+}
+
+bool AnyOfVariadicOperator(const ast_type_traits::DynTypedNode &DynNode,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder,
+ ArrayRef<DynTypedMatcher> InnerMatchers) {
+ for (const DynTypedMatcher &InnerMatcher : InnerMatchers) {
+ BoundNodesTreeBuilder Result = *Builder;
+ if (InnerMatcher.matches(DynNode, Finder, &Result)) {
+ *Builder = std::move(Result);
+ return true;
+ }
+ }
+ return false;
+}
+
+HasNameMatcher::HasNameMatcher(StringRef NameRef)
+ : UseUnqualifiedMatch(NameRef.find("::") == NameRef.npos), Name(NameRef) {
+ assert(!Name.empty());
+}
+
+bool HasNameMatcher::matchesNodeUnqualified(const NamedDecl &Node) const {
+ assert(UseUnqualifiedMatch);
+ if (Node.getIdentifier()) {
+ // Simple name.
+ return Name == Node.getName();
+ }
+ if (Node.getDeclName()) {
+ // Name needs to be constructed.
+ llvm::SmallString<128> NodeName;
+ llvm::raw_svector_ostream OS(NodeName);
+ Node.printName(OS);
+ return Name == OS.str();
+ }
+ return false;
+}
+
+bool HasNameMatcher::matchesNodeFull(const NamedDecl &Node) const {
+ llvm::SmallString<128> NodeName = StringRef("::");
+ llvm::raw_svector_ostream OS(NodeName);
+ Node.printQualifiedName(OS);
+ const StringRef FullName = OS.str();
+ const StringRef Pattern = Name;
+
+ if (Pattern.startswith("::"))
+ return FullName == Pattern;
+
+ return FullName.endswith(Pattern) &&
+ FullName.drop_back(Pattern.size()).endswith("::");
+}
+
+bool HasNameMatcher::matchesNode(const NamedDecl &Node) const {
+ // FIXME: There is still room for improvement, but it would require copying a
+ // lot of the logic from NamedDecl::printQualifiedName(). The benchmarks do
+ // not show like that extra complexity is needed right now.
+ if (UseUnqualifiedMatch) {
+ assert(matchesNodeUnqualified(Node) == matchesNodeFull(Node));
+ return matchesNodeUnqualified(Node);
+ }
+ return matchesNodeFull(Node);
+}
+
+} // end namespace internal
+} // end namespace ast_matchers
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp b/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp
new file mode 100644
index 0000000..787b780
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp
@@ -0,0 +1,222 @@
+//===--- Diagnostics.cpp - Helper class for error diagnostics -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/ASTMatchers/Dynamic/Diagnostics.h"
+
+namespace clang {
+namespace ast_matchers {
+namespace dynamic {
+Diagnostics::ArgStream Diagnostics::pushContextFrame(ContextType Type,
+ SourceRange Range) {
+ ContextStack.emplace_back();
+ ContextFrame& data = ContextStack.back();
+ data.Type = Type;
+ data.Range = Range;
+ return ArgStream(&data.Args);
+}
+
+Diagnostics::Context::Context(ConstructMatcherEnum, Diagnostics *Error,
+ StringRef MatcherName,
+ SourceRange MatcherRange)
+ : Error(Error) {
+ Error->pushContextFrame(CT_MatcherConstruct, MatcherRange) << MatcherName;
+}
+
+Diagnostics::Context::Context(MatcherArgEnum, Diagnostics *Error,
+ StringRef MatcherName,
+ SourceRange MatcherRange,
+ unsigned ArgNumber)
+ : Error(Error) {
+ Error->pushContextFrame(CT_MatcherArg, MatcherRange) << ArgNumber
+ << MatcherName;
+}
+
+Diagnostics::Context::~Context() { Error->ContextStack.pop_back(); }
+
+Diagnostics::OverloadContext::OverloadContext(Diagnostics *Error)
+ : Error(Error), BeginIndex(Error->Errors.size()) {}
+
+Diagnostics::OverloadContext::~OverloadContext() {
+ // Merge all errors that happened while in this context.
+ if (BeginIndex < Error->Errors.size()) {
+ Diagnostics::ErrorContent &Dest = Error->Errors[BeginIndex];
+ for (size_t i = BeginIndex + 1, e = Error->Errors.size(); i < e; ++i) {
+ Dest.Messages.push_back(Error->Errors[i].Messages[0]);
+ }
+ Error->Errors.resize(BeginIndex + 1);
+ }
+}
+
+void Diagnostics::OverloadContext::revertErrors() {
+ // Revert the errors.
+ Error->Errors.resize(BeginIndex);
+}
+
+Diagnostics::ArgStream &Diagnostics::ArgStream::operator<<(const Twine &Arg) {
+ Out->push_back(Arg.str());
+ return *this;
+}
+
+Diagnostics::ArgStream Diagnostics::addError(SourceRange Range,
+ ErrorType Error) {
+ Errors.emplace_back();
+ ErrorContent &Last = Errors.back();
+ Last.ContextStack = ContextStack;
+ Last.Messages.emplace_back();
+ Last.Messages.back().Range = Range;
+ Last.Messages.back().Type = Error;
+ return ArgStream(&Last.Messages.back().Args);
+}
+
+static StringRef contextTypeToFormatString(Diagnostics::ContextType Type) {
+ switch (Type) {
+ case Diagnostics::CT_MatcherConstruct:
+ return "Error building matcher $0.";
+ case Diagnostics::CT_MatcherArg:
+ return "Error parsing argument $0 for matcher $1.";
+ }
+ llvm_unreachable("Unknown ContextType value.");
+}
+
+static StringRef errorTypeToFormatString(Diagnostics::ErrorType Type) {
+ switch (Type) {
+ case Diagnostics::ET_RegistryMatcherNotFound:
+ return "Matcher not found: $0";
+ case Diagnostics::ET_RegistryWrongArgCount:
+ return "Incorrect argument count. (Expected = $0) != (Actual = $1)";
+ case Diagnostics::ET_RegistryWrongArgType:
+ return "Incorrect type for arg $0. (Expected = $1) != (Actual = $2)";
+ case Diagnostics::ET_RegistryNotBindable:
+ return "Matcher does not support binding.";
+ case Diagnostics::ET_RegistryAmbiguousOverload:
+ // TODO: Add type info about the overload error.
+ return "Ambiguous matcher overload.";
+ case Diagnostics::ET_RegistryValueNotFound:
+ return "Value not found: $0";
+
+ case Diagnostics::ET_ParserStringError:
+ return "Error parsing string token: <$0>";
+ case Diagnostics::ET_ParserNoOpenParen:
+ return "Error parsing matcher. Found token <$0> while looking for '('.";
+ case Diagnostics::ET_ParserNoCloseParen:
+ return "Error parsing matcher. Found end-of-code while looking for ')'.";
+ case Diagnostics::ET_ParserNoComma:
+ return "Error parsing matcher. Found token <$0> while looking for ','.";
+ case Diagnostics::ET_ParserNoCode:
+ return "End of code found while looking for token.";
+ case Diagnostics::ET_ParserNotAMatcher:
+ return "Input value is not a matcher expression.";
+ case Diagnostics::ET_ParserInvalidToken:
+ return "Invalid token <$0> found when looking for a value.";
+ case Diagnostics::ET_ParserMalformedBindExpr:
+ return "Malformed bind() expression.";
+ case Diagnostics::ET_ParserTrailingCode:
+ return "Expected end of code.";
+ case Diagnostics::ET_ParserUnsignedError:
+ return "Error parsing unsigned token: <$0>";
+ case Diagnostics::ET_ParserOverloadedType:
+ return "Input value has unresolved overloaded type: $0";
+
+ case Diagnostics::ET_None:
+ return "<N/A>";
+ }
+ llvm_unreachable("Unknown ErrorType value.");
+}
+
+static void formatErrorString(StringRef FormatString,
+ ArrayRef<std::string> Args,
+ llvm::raw_ostream &OS) {
+ while (!FormatString.empty()) {
+ std::pair<StringRef, StringRef> Pieces = FormatString.split("$");
+ OS << Pieces.first.str();
+ if (Pieces.second.empty()) break;
+
+ const char Next = Pieces.second.front();
+ FormatString = Pieces.second.drop_front();
+ if (Next >= '0' && Next <= '9') {
+ const unsigned Index = Next - '0';
+ if (Index < Args.size()) {
+ OS << Args[Index];
+ } else {
+ OS << "<Argument_Not_Provided>";
+ }
+ }
+ }
+}
+
+static void maybeAddLineAndColumn(SourceRange Range,
+ llvm::raw_ostream &OS) {
+ if (Range.Start.Line > 0 && Range.Start.Column > 0) {
+ OS << Range.Start.Line << ":" << Range.Start.Column << ": ";
+ }
+}
+
+static void printContextFrameToStream(const Diagnostics::ContextFrame &Frame,
+ llvm::raw_ostream &OS) {
+ maybeAddLineAndColumn(Frame.Range, OS);
+ formatErrorString(contextTypeToFormatString(Frame.Type), Frame.Args, OS);
+}
+
+static void
+printMessageToStream(const Diagnostics::ErrorContent::Message &Message,
+ const Twine Prefix, llvm::raw_ostream &OS) {
+ maybeAddLineAndColumn(Message.Range, OS);
+ OS << Prefix;
+ formatErrorString(errorTypeToFormatString(Message.Type), Message.Args, OS);
+}
+
+static void printErrorContentToStream(const Diagnostics::ErrorContent &Content,
+ llvm::raw_ostream &OS) {
+ if (Content.Messages.size() == 1) {
+ printMessageToStream(Content.Messages[0], "", OS);
+ } else {
+ for (size_t i = 0, e = Content.Messages.size(); i != e; ++i) {
+ if (i != 0) OS << "\n";
+ printMessageToStream(Content.Messages[i],
+ "Candidate " + Twine(i + 1) + ": ", OS);
+ }
+ }
+}
+
+void Diagnostics::printToStream(llvm::raw_ostream &OS) const {
+ for (size_t i = 0, e = Errors.size(); i != e; ++i) {
+ if (i != 0) OS << "\n";
+ printErrorContentToStream(Errors[i], OS);
+ }
+}
+
+std::string Diagnostics::toString() const {
+ std::string S;
+ llvm::raw_string_ostream OS(S);
+ printToStream(OS);
+ return OS.str();
+}
+
+void Diagnostics::printToStreamFull(llvm::raw_ostream &OS) const {
+ for (size_t i = 0, e = Errors.size(); i != e; ++i) {
+ if (i != 0) OS << "\n";
+ const ErrorContent &Error = Errors[i];
+ for (size_t i = 0, e = Error.ContextStack.size(); i != e; ++i) {
+ printContextFrameToStream(Error.ContextStack[i], OS);
+ OS << "\n";
+ }
+ printErrorContentToStream(Error, OS);
+ }
+}
+
+std::string Diagnostics::toStringFull() const {
+ std::string S;
+ llvm::raw_string_ostream OS(S);
+ printToStreamFull(OS);
+ return OS.str();
+}
+
+} // namespace dynamic
+} // namespace ast_matchers
+} // namespace clang
diff --git a/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Marshallers.h b/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Marshallers.h
new file mode 100644
index 0000000..64d6b78
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Marshallers.h
@@ -0,0 +1,716 @@
+//===--- Marshallers.h - Generic matcher function marshallers -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Functions templates and classes to wrap matcher construct functions.
+///
+/// A collection of template function and classes that provide a generic
+/// marshalling layer on top of matcher construct functions.
+/// These are used by the registry to export all marshaller constructors with
+/// the same generic interface.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_ASTMATCHERS_DYNAMIC_MARSHALLERS_H
+#define LLVM_CLANG_LIB_ASTMATCHERS_DYNAMIC_MARSHALLERS_H
+
+#include "clang/ASTMatchers/ASTMatchers.h"
+#include "clang/ASTMatchers/Dynamic/Diagnostics.h"
+#include "clang/ASTMatchers/Dynamic/VariantValue.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/STLExtras.h"
+#include <string>
+
+namespace clang {
+namespace ast_matchers {
+namespace dynamic {
+namespace internal {
+
+
+/// \brief Helper template class to just from argument type to the right is/get
+/// functions in VariantValue.
+/// Used to verify and extract the matcher arguments below.
+template <class T> struct ArgTypeTraits;
+template <class T> struct ArgTypeTraits<const T &> : public ArgTypeTraits<T> {
+};
+
+template <> struct ArgTypeTraits<std::string> {
+ static bool is(const VariantValue &Value) { return Value.isString(); }
+ static const std::string &get(const VariantValue &Value) {
+ return Value.getString();
+ }
+ static ArgKind getKind() {
+ return ArgKind(ArgKind::AK_String);
+ }
+};
+
+template <>
+struct ArgTypeTraits<StringRef> : public ArgTypeTraits<std::string> {
+};
+
+template <class T> struct ArgTypeTraits<ast_matchers::internal::Matcher<T> > {
+ static bool is(const VariantValue &Value) {
+ return Value.isMatcher() && Value.getMatcher().hasTypedMatcher<T>();
+ }
+ static ast_matchers::internal::Matcher<T> get(const VariantValue &Value) {
+ return Value.getMatcher().getTypedMatcher<T>();
+ }
+ static ArgKind getKind() {
+ return ArgKind(ast_type_traits::ASTNodeKind::getFromNodeKind<T>());
+ }
+};
+
+template <> struct ArgTypeTraits<unsigned> {
+ static bool is(const VariantValue &Value) { return Value.isUnsigned(); }
+ static unsigned get(const VariantValue &Value) {
+ return Value.getUnsigned();
+ }
+ static ArgKind getKind() {
+ return ArgKind(ArgKind::AK_Unsigned);
+ }
+};
+
+template <> struct ArgTypeTraits<attr::Kind> {
+private:
+ static attr::Kind getAttrKind(llvm::StringRef AttrKind) {
+ return llvm::StringSwitch<attr::Kind>(AttrKind)
+#define ATTR(X) .Case("attr::" #X, attr:: X)
+#include "clang/Basic/AttrList.inc"
+ .Default(attr::Kind(-1));
+ }
+public:
+ static bool is(const VariantValue &Value) {
+ return Value.isString() &&
+ getAttrKind(Value.getString()) != attr::Kind(-1);
+ }
+ static attr::Kind get(const VariantValue &Value) {
+ return getAttrKind(Value.getString());
+ }
+ static ArgKind getKind() {
+ return ArgKind(ArgKind::AK_String);
+ }
+};
+
+/// \brief Matcher descriptor interface.
+///
+/// Provides a \c create() method that constructs the matcher from the provided
+/// arguments, and various other methods for type introspection.
+class MatcherDescriptor {
+public:
+ virtual ~MatcherDescriptor() {}
+ virtual VariantMatcher create(SourceRange NameRange,
+ ArrayRef<ParserValue> Args,
+ Diagnostics *Error) const = 0;
+
+ /// Returns whether the matcher is variadic. Variadic matchers can take any
+ /// number of arguments, but they must be of the same type.
+ virtual bool isVariadic() const = 0;
+
+ /// Returns the number of arguments accepted by the matcher if not variadic.
+ virtual unsigned getNumArgs() const = 0;
+
+ /// Given that the matcher is being converted to type \p ThisKind, append the
+ /// set of argument types accepted for argument \p ArgNo to \p ArgKinds.
+ // FIXME: We should provide the ability to constrain the output of this
+ // function based on the types of other matcher arguments.
+ virtual void getArgKinds(ast_type_traits::ASTNodeKind ThisKind, unsigned ArgNo,
+ std::vector<ArgKind> &ArgKinds) const = 0;
+
+ /// Returns whether this matcher is convertible to the given type. If it is
+ /// so convertible, store in *Specificity a value corresponding to the
+ /// "specificity" of the converted matcher to the given context, and in
+ /// *LeastDerivedKind the least derived matcher kind which would result in the
+ /// same matcher overload. Zero specificity indicates that this conversion
+ /// would produce a trivial matcher that will either always or never match.
+ /// Such matchers are excluded from code completion results.
+ virtual bool isConvertibleTo(
+ ast_type_traits::ASTNodeKind Kind, unsigned *Specificity = nullptr,
+ ast_type_traits::ASTNodeKind *LeastDerivedKind = nullptr) const = 0;
+
+ /// Returns whether the matcher will, given a matcher of any type T, yield a
+ /// matcher of type T.
+ virtual bool isPolymorphic() const { return false; }
+};
+
+inline bool isRetKindConvertibleTo(
+ ArrayRef<ast_type_traits::ASTNodeKind> RetKinds,
+ ast_type_traits::ASTNodeKind Kind, unsigned *Specificity,
+ ast_type_traits::ASTNodeKind *LeastDerivedKind) {
+ for (const ast_type_traits::ASTNodeKind &NodeKind : RetKinds) {
+ if (ArgKind(NodeKind).isConvertibleTo(Kind, Specificity)) {
+ if (LeastDerivedKind)
+ *LeastDerivedKind = NodeKind;
+ return true;
+ }
+ }
+ return false;
+}
+
+/// \brief Simple callback implementation. Marshaller and function are provided.
+///
+/// This class wraps a function of arbitrary signature and a marshaller
+/// function into a MatcherDescriptor.
+/// The marshaller is in charge of taking the VariantValue arguments, checking
+/// their types, unpacking them and calling the underlying function.
+class FixedArgCountMatcherDescriptor : public MatcherDescriptor {
+public:
+ typedef VariantMatcher (*MarshallerType)(void (*Func)(),
+ StringRef MatcherName,
+ SourceRange NameRange,
+ ArrayRef<ParserValue> Args,
+ Diagnostics *Error);
+
+ /// \param Marshaller Function to unpack the arguments and call \c Func
+ /// \param Func Matcher construct function. This is the function that
+ /// compile-time matcher expressions would use to create the matcher.
+ /// \param RetKinds The list of matcher types to which the matcher is
+ /// convertible.
+ /// \param ArgKinds The types of the arguments this matcher takes.
+ FixedArgCountMatcherDescriptor(
+ MarshallerType Marshaller, void (*Func)(), StringRef MatcherName,
+ ArrayRef<ast_type_traits::ASTNodeKind> RetKinds,
+ ArrayRef<ArgKind> ArgKinds)
+ : Marshaller(Marshaller), Func(Func), MatcherName(MatcherName),
+ RetKinds(RetKinds.begin(), RetKinds.end()),
+ ArgKinds(ArgKinds.begin(), ArgKinds.end()) {}
+
+ VariantMatcher create(SourceRange NameRange,
+ ArrayRef<ParserValue> Args,
+ Diagnostics *Error) const override {
+ return Marshaller(Func, MatcherName, NameRange, Args, Error);
+ }
+
+ bool isVariadic() const override { return false; }
+ unsigned getNumArgs() const override { return ArgKinds.size(); }
+ void getArgKinds(ast_type_traits::ASTNodeKind ThisKind, unsigned ArgNo,
+ std::vector<ArgKind> &Kinds) const override {
+ Kinds.push_back(ArgKinds[ArgNo]);
+ }
+ bool isConvertibleTo(
+ ast_type_traits::ASTNodeKind Kind, unsigned *Specificity,
+ ast_type_traits::ASTNodeKind *LeastDerivedKind) const override {
+ return isRetKindConvertibleTo(RetKinds, Kind, Specificity,
+ LeastDerivedKind);
+ }
+
+private:
+ const MarshallerType Marshaller;
+ void (* const Func)();
+ const std::string MatcherName;
+ const std::vector<ast_type_traits::ASTNodeKind> RetKinds;
+ const std::vector<ArgKind> ArgKinds;
+};
+
+/// \brief Helper methods to extract and merge all possible typed matchers
+/// out of the polymorphic object.
+template <class PolyMatcher>
+static void mergePolyMatchers(const PolyMatcher &Poly,
+ std::vector<DynTypedMatcher> &Out,
+ ast_matchers::internal::EmptyTypeList) {}
+
+template <class PolyMatcher, class TypeList>
+static void mergePolyMatchers(const PolyMatcher &Poly,
+ std::vector<DynTypedMatcher> &Out, TypeList) {
+ Out.push_back(ast_matchers::internal::Matcher<typename TypeList::head>(Poly));
+ mergePolyMatchers(Poly, Out, typename TypeList::tail());
+}
+
+/// \brief Convert the return values of the functions into a VariantMatcher.
+///
+/// There are 2 cases right now: The return value is a Matcher<T> or is a
+/// polymorphic matcher. For the former, we just construct the VariantMatcher.
+/// For the latter, we instantiate all the possible Matcher<T> of the poly
+/// matcher.
+static VariantMatcher outvalueToVariantMatcher(const DynTypedMatcher &Matcher) {
+ return VariantMatcher::SingleMatcher(Matcher);
+}
+
+template <typename T>
+static VariantMatcher outvalueToVariantMatcher(const T &PolyMatcher,
+ typename T::ReturnTypes * =
+ NULL) {
+ std::vector<DynTypedMatcher> Matchers;
+ mergePolyMatchers(PolyMatcher, Matchers, typename T::ReturnTypes());
+ VariantMatcher Out = VariantMatcher::PolymorphicMatcher(std::move(Matchers));
+ return Out;
+}
+
+template <typename T>
+inline void buildReturnTypeVectorFromTypeList(
+ std::vector<ast_type_traits::ASTNodeKind> &RetTypes) {
+ RetTypes.push_back(
+ ast_type_traits::ASTNodeKind::getFromNodeKind<typename T::head>());
+ buildReturnTypeVectorFromTypeList<typename T::tail>(RetTypes);
+}
+
+template <>
+inline void
+buildReturnTypeVectorFromTypeList<ast_matchers::internal::EmptyTypeList>(
+ std::vector<ast_type_traits::ASTNodeKind> &RetTypes) {}
+
+template <typename T>
+struct BuildReturnTypeVector {
+ static void build(std::vector<ast_type_traits::ASTNodeKind> &RetTypes) {
+ buildReturnTypeVectorFromTypeList<typename T::ReturnTypes>(RetTypes);
+ }
+};
+
+template <typename T>
+struct BuildReturnTypeVector<ast_matchers::internal::Matcher<T> > {
+ static void build(std::vector<ast_type_traits::ASTNodeKind> &RetTypes) {
+ RetTypes.push_back(ast_type_traits::ASTNodeKind::getFromNodeKind<T>());
+ }
+};
+
+template <typename T>
+struct BuildReturnTypeVector<ast_matchers::internal::BindableMatcher<T> > {
+ static void build(std::vector<ast_type_traits::ASTNodeKind> &RetTypes) {
+ RetTypes.push_back(ast_type_traits::ASTNodeKind::getFromNodeKind<T>());
+ }
+};
+
+/// \brief Variadic marshaller function.
+template <typename ResultT, typename ArgT,
+ ResultT (*Func)(ArrayRef<const ArgT *>)>
+VariantMatcher
+variadicMatcherDescriptor(StringRef MatcherName, SourceRange NameRange,
+ ArrayRef<ParserValue> Args, Diagnostics *Error) {
+ ArgT **InnerArgs = new ArgT *[Args.size()]();
+
+ bool HasError = false;
+ for (size_t i = 0, e = Args.size(); i != e; ++i) {
+ typedef ArgTypeTraits<ArgT> ArgTraits;
+ const ParserValue &Arg = Args[i];
+ const VariantValue &Value = Arg.Value;
+ if (!ArgTraits::is(Value)) {
+ Error->addError(Arg.Range, Error->ET_RegistryWrongArgType)
+ << (i + 1) << ArgTraits::getKind().asString() << Value.getTypeAsString();
+ HasError = true;
+ break;
+ }
+ InnerArgs[i] = new ArgT(ArgTraits::get(Value));
+ }
+
+ VariantMatcher Out;
+ if (!HasError) {
+ Out = outvalueToVariantMatcher(Func(llvm::makeArrayRef(InnerArgs,
+ Args.size())));
+ }
+
+ for (size_t i = 0, e = Args.size(); i != e; ++i) {
+ delete InnerArgs[i];
+ }
+ delete[] InnerArgs;
+ return Out;
+}
+
+/// \brief Matcher descriptor for variadic functions.
+///
+/// This class simply wraps a VariadicFunction with the right signature to export
+/// it as a MatcherDescriptor.
+/// This allows us to have one implementation of the interface for as many free
+/// functions as we want, reducing the number of symbols and size of the
+/// object file.
+class VariadicFuncMatcherDescriptor : public MatcherDescriptor {
+public:
+ typedef VariantMatcher (*RunFunc)(StringRef MatcherName,
+ SourceRange NameRange,
+ ArrayRef<ParserValue> Args,
+ Diagnostics *Error);
+
+ template <typename ResultT, typename ArgT,
+ ResultT (*F)(ArrayRef<const ArgT *>)>
+ VariadicFuncMatcherDescriptor(llvm::VariadicFunction<ResultT, ArgT, F> Func,
+ StringRef MatcherName)
+ : Func(&variadicMatcherDescriptor<ResultT, ArgT, F>),
+ MatcherName(MatcherName.str()),
+ ArgsKind(ArgTypeTraits<ArgT>::getKind()) {
+ BuildReturnTypeVector<ResultT>::build(RetKinds);
+ }
+
+ VariantMatcher create(SourceRange NameRange,
+ ArrayRef<ParserValue> Args,
+ Diagnostics *Error) const override {
+ return Func(MatcherName, NameRange, Args, Error);
+ }
+
+ bool isVariadic() const override { return true; }
+ unsigned getNumArgs() const override { return 0; }
+ void getArgKinds(ast_type_traits::ASTNodeKind ThisKind, unsigned ArgNo,
+ std::vector<ArgKind> &Kinds) const override {
+ Kinds.push_back(ArgsKind);
+ }
+ bool isConvertibleTo(
+ ast_type_traits::ASTNodeKind Kind, unsigned *Specificity,
+ ast_type_traits::ASTNodeKind *LeastDerivedKind) const override {
+ return isRetKindConvertibleTo(RetKinds, Kind, Specificity,
+ LeastDerivedKind);
+ }
+
+private:
+ const RunFunc Func;
+ const std::string MatcherName;
+ std::vector<ast_type_traits::ASTNodeKind> RetKinds;
+ const ArgKind ArgsKind;
+};
+
+/// \brief Return CK_Trivial when appropriate for VariadicDynCastAllOfMatchers.
+class DynCastAllOfMatcherDescriptor : public VariadicFuncMatcherDescriptor {
+public:
+ template <typename BaseT, typename DerivedT>
+ DynCastAllOfMatcherDescriptor(
+ ast_matchers::internal::VariadicDynCastAllOfMatcher<BaseT, DerivedT> Func,
+ StringRef MatcherName)
+ : VariadicFuncMatcherDescriptor(Func, MatcherName),
+ DerivedKind(ast_type_traits::ASTNodeKind::getFromNodeKind<DerivedT>()) {
+ }
+
+ bool
+ isConvertibleTo(ast_type_traits::ASTNodeKind Kind, unsigned *Specificity,
+ ast_type_traits::ASTNodeKind *LeastDerivedKind) const override {
+ // If Kind is not a base of DerivedKind, either DerivedKind is a base of
+ // Kind (in which case the match will always succeed) or Kind and
+ // DerivedKind are unrelated (in which case it will always fail), so set
+ // Specificity to 0.
+ if (VariadicFuncMatcherDescriptor::isConvertibleTo(Kind, Specificity,
+ LeastDerivedKind)) {
+ if (Kind.isSame(DerivedKind) || !Kind.isBaseOf(DerivedKind)) {
+ if (Specificity)
+ *Specificity = 0;
+ }
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+private:
+ const ast_type_traits::ASTNodeKind DerivedKind;
+};
+
+/// \brief Helper macros to check the arguments on all marshaller functions.
+#define CHECK_ARG_COUNT(count) \
+ if (Args.size() != count) { \
+ Error->addError(NameRange, Error->ET_RegistryWrongArgCount) \
+ << count << Args.size(); \
+ return VariantMatcher(); \
+ }
+
+#define CHECK_ARG_TYPE(index, type) \
+ if (!ArgTypeTraits<type>::is(Args[index].Value)) { \
+ Error->addError(Args[index].Range, Error->ET_RegistryWrongArgType) \
+ << (index + 1) << ArgTypeTraits<type>::getKind().asString() \
+ << Args[index].Value.getTypeAsString(); \
+ return VariantMatcher(); \
+ }
+
+
+/// \brief 0-arg marshaller function.
+template <typename ReturnType>
+static VariantMatcher matcherMarshall0(void (*Func)(), StringRef MatcherName,
+ SourceRange NameRange,
+ ArrayRef<ParserValue> Args,
+ Diagnostics *Error) {
+ typedef ReturnType (*FuncType)();
+ CHECK_ARG_COUNT(0);
+ return outvalueToVariantMatcher(reinterpret_cast<FuncType>(Func)());
+}
+
+/// \brief 1-arg marshaller function.
+template <typename ReturnType, typename ArgType1>
+static VariantMatcher matcherMarshall1(void (*Func)(), StringRef MatcherName,
+ SourceRange NameRange,
+ ArrayRef<ParserValue> Args,
+ Diagnostics *Error) {
+ typedef ReturnType (*FuncType)(ArgType1);
+ CHECK_ARG_COUNT(1);
+ CHECK_ARG_TYPE(0, ArgType1);
+ return outvalueToVariantMatcher(reinterpret_cast<FuncType>(Func)(
+ ArgTypeTraits<ArgType1>::get(Args[0].Value)));
+}
+
+/// \brief 2-arg marshaller function.
+template <typename ReturnType, typename ArgType1, typename ArgType2>
+static VariantMatcher matcherMarshall2(void (*Func)(), StringRef MatcherName,
+ SourceRange NameRange,
+ ArrayRef<ParserValue> Args,
+ Diagnostics *Error) {
+ typedef ReturnType (*FuncType)(ArgType1, ArgType2);
+ CHECK_ARG_COUNT(2);
+ CHECK_ARG_TYPE(0, ArgType1);
+ CHECK_ARG_TYPE(1, ArgType2);
+ return outvalueToVariantMatcher(reinterpret_cast<FuncType>(Func)(
+ ArgTypeTraits<ArgType1>::get(Args[0].Value),
+ ArgTypeTraits<ArgType2>::get(Args[1].Value)));
+}
+
+#undef CHECK_ARG_COUNT
+#undef CHECK_ARG_TYPE
+
+/// \brief Helper class used to collect all the possible overloads of an
+/// argument adaptative matcher function.
+template <template <typename ToArg, typename FromArg> class ArgumentAdapterT,
+ typename FromTypes, typename ToTypes>
+class AdaptativeOverloadCollector {
+public:
+ AdaptativeOverloadCollector(StringRef Name,
+ std::vector<MatcherDescriptor *> &Out)
+ : Name(Name), Out(Out) {
+ collect(FromTypes());
+ }
+
+private:
+ typedef ast_matchers::internal::ArgumentAdaptingMatcherFunc<
+ ArgumentAdapterT, FromTypes, ToTypes> AdaptativeFunc;
+
+ /// \brief End case for the recursion
+ static void collect(ast_matchers::internal::EmptyTypeList) {}
+
+ /// \brief Recursive case. Get the overload for the head of the list, and
+ /// recurse to the tail.
+ template <typename FromTypeList>
+ inline void collect(FromTypeList);
+
+ StringRef Name;
+ std::vector<MatcherDescriptor *> &Out;
+};
+
+/// \brief MatcherDescriptor that wraps multiple "overloads" of the same
+/// matcher.
+///
+/// It will try every overload and generate appropriate errors for when none or
+/// more than one overloads match the arguments.
+class OverloadedMatcherDescriptor : public MatcherDescriptor {
+public:
+ OverloadedMatcherDescriptor(ArrayRef<MatcherDescriptor *> Callbacks)
+ : Overloads(Callbacks.begin(), Callbacks.end()) {}
+
+ ~OverloadedMatcherDescriptor() override {}
+
+ VariantMatcher create(SourceRange NameRange,
+ ArrayRef<ParserValue> Args,
+ Diagnostics *Error) const override {
+ std::vector<VariantMatcher> Constructed;
+ Diagnostics::OverloadContext Ctx(Error);
+ for (const auto &O : Overloads) {
+ VariantMatcher SubMatcher = O->create(NameRange, Args, Error);
+ if (!SubMatcher.isNull()) {
+ Constructed.push_back(SubMatcher);
+ }
+ }
+
+ if (Constructed.empty()) return VariantMatcher(); // No overload matched.
+ // We ignore the errors if any matcher succeeded.
+ Ctx.revertErrors();
+ if (Constructed.size() > 1) {
+ // More than one constructed. It is ambiguous.
+ Error->addError(NameRange, Error->ET_RegistryAmbiguousOverload);
+ return VariantMatcher();
+ }
+ return Constructed[0];
+ }
+
+ bool isVariadic() const override {
+ bool Overload0Variadic = Overloads[0]->isVariadic();
+#ifndef NDEBUG
+ for (const auto &O : Overloads) {
+ assert(Overload0Variadic == O->isVariadic());
+ }
+#endif
+ return Overload0Variadic;
+ }
+
+ unsigned getNumArgs() const override {
+ unsigned Overload0NumArgs = Overloads[0]->getNumArgs();
+#ifndef NDEBUG
+ for (const auto &O : Overloads) {
+ assert(Overload0NumArgs == O->getNumArgs());
+ }
+#endif
+ return Overload0NumArgs;
+ }
+
+ void getArgKinds(ast_type_traits::ASTNodeKind ThisKind, unsigned ArgNo,
+ std::vector<ArgKind> &Kinds) const override {
+ for (const auto &O : Overloads) {
+ if (O->isConvertibleTo(ThisKind))
+ O->getArgKinds(ThisKind, ArgNo, Kinds);
+ }
+ }
+
+ bool isConvertibleTo(
+ ast_type_traits::ASTNodeKind Kind, unsigned *Specificity,
+ ast_type_traits::ASTNodeKind *LeastDerivedKind) const override {
+ for (const auto &O : Overloads) {
+ if (O->isConvertibleTo(Kind, Specificity, LeastDerivedKind))
+ return true;
+ }
+ return false;
+ }
+
+private:
+ std::vector<std::unique_ptr<MatcherDescriptor>> Overloads;
+};
+
+/// \brief Variadic operator marshaller function.
+class VariadicOperatorMatcherDescriptor : public MatcherDescriptor {
+public:
+ typedef DynTypedMatcher::VariadicOperator VarOp;
+ VariadicOperatorMatcherDescriptor(unsigned MinCount, unsigned MaxCount,
+ VarOp Op, StringRef MatcherName)
+ : MinCount(MinCount), MaxCount(MaxCount), Op(Op),
+ MatcherName(MatcherName) {}
+
+ VariantMatcher create(SourceRange NameRange,
+ ArrayRef<ParserValue> Args,
+ Diagnostics *Error) const override {
+ if (Args.size() < MinCount || MaxCount < Args.size()) {
+ const std::string MaxStr =
+ (MaxCount == UINT_MAX ? "" : Twine(MaxCount)).str();
+ Error->addError(NameRange, Error->ET_RegistryWrongArgCount)
+ << ("(" + Twine(MinCount) + ", " + MaxStr + ")") << Args.size();
+ return VariantMatcher();
+ }
+
+ std::vector<VariantMatcher> InnerArgs;
+ for (size_t i = 0, e = Args.size(); i != e; ++i) {
+ const ParserValue &Arg = Args[i];
+ const VariantValue &Value = Arg.Value;
+ if (!Value.isMatcher()) {
+ Error->addError(Arg.Range, Error->ET_RegistryWrongArgType)
+ << (i + 1) << "Matcher<>" << Value.getTypeAsString();
+ return VariantMatcher();
+ }
+ InnerArgs.push_back(Value.getMatcher());
+ }
+ return VariantMatcher::VariadicOperatorMatcher(Op, std::move(InnerArgs));
+ }
+
+ bool isVariadic() const override { return true; }
+ unsigned getNumArgs() const override { return 0; }
+ void getArgKinds(ast_type_traits::ASTNodeKind ThisKind, unsigned ArgNo,
+ std::vector<ArgKind> &Kinds) const override {
+ Kinds.push_back(ThisKind);
+ }
+ bool isConvertibleTo(ast_type_traits::ASTNodeKind Kind, unsigned *Specificity,
+ ast_type_traits::ASTNodeKind *LeastDerivedKind) const override {
+ if (Specificity)
+ *Specificity = 1;
+ if (LeastDerivedKind)
+ *LeastDerivedKind = Kind;
+ return true;
+ }
+ bool isPolymorphic() const override { return true; }
+
+private:
+ const unsigned MinCount;
+ const unsigned MaxCount;
+ const VarOp Op;
+ const StringRef MatcherName;
+};
+
+/// Helper functions to select the appropriate marshaller functions.
+/// They detect the number of arguments, arguments types and return type.
+
+/// \brief 0-arg overload
+template <typename ReturnType>
+MatcherDescriptor *makeMatcherAutoMarshall(ReturnType (*Func)(),
+ StringRef MatcherName) {
+ std::vector<ast_type_traits::ASTNodeKind> RetTypes;
+ BuildReturnTypeVector<ReturnType>::build(RetTypes);
+ return new FixedArgCountMatcherDescriptor(
+ matcherMarshall0<ReturnType>, reinterpret_cast<void (*)()>(Func),
+ MatcherName, RetTypes, None);
+}
+
+/// \brief 1-arg overload
+template <typename ReturnType, typename ArgType1>
+MatcherDescriptor *makeMatcherAutoMarshall(ReturnType (*Func)(ArgType1),
+ StringRef MatcherName) {
+ std::vector<ast_type_traits::ASTNodeKind> RetTypes;
+ BuildReturnTypeVector<ReturnType>::build(RetTypes);
+ ArgKind AK = ArgTypeTraits<ArgType1>::getKind();
+ return new FixedArgCountMatcherDescriptor(
+ matcherMarshall1<ReturnType, ArgType1>,
+ reinterpret_cast<void (*)()>(Func), MatcherName, RetTypes, AK);
+}
+
+/// \brief 2-arg overload
+template <typename ReturnType, typename ArgType1, typename ArgType2>
+MatcherDescriptor *makeMatcherAutoMarshall(ReturnType (*Func)(ArgType1, ArgType2),
+ StringRef MatcherName) {
+ std::vector<ast_type_traits::ASTNodeKind> RetTypes;
+ BuildReturnTypeVector<ReturnType>::build(RetTypes);
+ ArgKind AKs[] = { ArgTypeTraits<ArgType1>::getKind(),
+ ArgTypeTraits<ArgType2>::getKind() };
+ return new FixedArgCountMatcherDescriptor(
+ matcherMarshall2<ReturnType, ArgType1, ArgType2>,
+ reinterpret_cast<void (*)()>(Func), MatcherName, RetTypes, AKs);
+}
+
+/// \brief Variadic overload.
+template <typename ResultT, typename ArgT,
+ ResultT (*Func)(ArrayRef<const ArgT *>)>
+MatcherDescriptor *
+makeMatcherAutoMarshall(llvm::VariadicFunction<ResultT, ArgT, Func> VarFunc,
+ StringRef MatcherName) {
+ return new VariadicFuncMatcherDescriptor(VarFunc, MatcherName);
+}
+
+/// \brief Overload for VariadicDynCastAllOfMatchers.
+///
+/// Not strictly necessary, but DynCastAllOfMatcherDescriptor gives us better
+/// completion results for that type of matcher.
+template <typename BaseT, typename DerivedT>
+MatcherDescriptor *
+makeMatcherAutoMarshall(ast_matchers::internal::VariadicDynCastAllOfMatcher<
+ BaseT, DerivedT> VarFunc,
+ StringRef MatcherName) {
+ return new DynCastAllOfMatcherDescriptor(VarFunc, MatcherName);
+}
+
+/// \brief Argument adaptative overload.
+template <template <typename ToArg, typename FromArg> class ArgumentAdapterT,
+ typename FromTypes, typename ToTypes>
+MatcherDescriptor *
+makeMatcherAutoMarshall(ast_matchers::internal::ArgumentAdaptingMatcherFunc<
+ ArgumentAdapterT, FromTypes, ToTypes>,
+ StringRef MatcherName) {
+ std::vector<MatcherDescriptor *> Overloads;
+ AdaptativeOverloadCollector<ArgumentAdapterT, FromTypes, ToTypes>(MatcherName,
+ Overloads);
+ return new OverloadedMatcherDescriptor(Overloads);
+}
+
+template <template <typename ToArg, typename FromArg> class ArgumentAdapterT,
+ typename FromTypes, typename ToTypes>
+template <typename FromTypeList>
+inline void AdaptativeOverloadCollector<ArgumentAdapterT, FromTypes,
+ ToTypes>::collect(FromTypeList) {
+ Out.push_back(makeMatcherAutoMarshall(
+ &AdaptativeFunc::template create<typename FromTypeList::head>, Name));
+ collect(typename FromTypeList::tail());
+}
+
+/// \brief Variadic operator overload.
+template <unsigned MinCount, unsigned MaxCount>
+MatcherDescriptor *
+makeMatcherAutoMarshall(ast_matchers::internal::VariadicOperatorMatcherFunc<
+ MinCount, MaxCount> Func,
+ StringRef MatcherName) {
+ return new VariadicOperatorMatcherDescriptor(MinCount, MaxCount, Func.Op,
+ MatcherName);
+}
+
+} // namespace internal
+} // namespace dynamic
+} // namespace ast_matchers
+} // namespace clang
+
+#endif // LLVM_CLANG_AST_MATCHERS_DYNAMIC_MARSHALLERS_H
diff --git a/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Parser.cpp b/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Parser.cpp
new file mode 100644
index 0000000..cf9dab6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Parser.cpp
@@ -0,0 +1,613 @@
+//===--- Parser.cpp - Matcher expression parser -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Recursive parser implementation for the matcher expression grammar.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/ASTMatchers/Dynamic/Parser.h"
+#include "clang/ASTMatchers/Dynamic/Registry.h"
+#include "clang/Basic/CharInfo.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/ManagedStatic.h"
+#include <string>
+#include <vector>
+
+namespace clang {
+namespace ast_matchers {
+namespace dynamic {
+
+/// \brief Simple structure to hold information for one token from the parser.
+struct Parser::TokenInfo {
+ /// \brief Different possible tokens.
+ enum TokenKind {
+ TK_Eof,
+ TK_OpenParen,
+ TK_CloseParen,
+ TK_Comma,
+ TK_Period,
+ TK_Literal,
+ TK_Ident,
+ TK_InvalidChar,
+ TK_Error,
+ TK_CodeCompletion
+ };
+
+ /// \brief Some known identifiers.
+ static const char* const ID_Bind;
+
+ TokenInfo() : Text(), Kind(TK_Eof), Range(), Value() {}
+
+ StringRef Text;
+ TokenKind Kind;
+ SourceRange Range;
+ VariantValue Value;
+};
+
+const char* const Parser::TokenInfo::ID_Bind = "bind";
+
+/// \brief Simple tokenizer for the parser.
+class Parser::CodeTokenizer {
+public:
+ explicit CodeTokenizer(StringRef MatcherCode, Diagnostics *Error)
+ : Code(MatcherCode), StartOfLine(MatcherCode), Line(1), Error(Error),
+ CodeCompletionLocation(nullptr) {
+ NextToken = getNextToken();
+ }
+
+ CodeTokenizer(StringRef MatcherCode, Diagnostics *Error,
+ unsigned CodeCompletionOffset)
+ : Code(MatcherCode), StartOfLine(MatcherCode), Line(1), Error(Error),
+ CodeCompletionLocation(MatcherCode.data() + CodeCompletionOffset) {
+ NextToken = getNextToken();
+ }
+
+ /// \brief Returns but doesn't consume the next token.
+ const TokenInfo &peekNextToken() const { return NextToken; }
+
+ /// \brief Consumes and returns the next token.
+ TokenInfo consumeNextToken() {
+ TokenInfo ThisToken = NextToken;
+ NextToken = getNextToken();
+ return ThisToken;
+ }
+
+ TokenInfo::TokenKind nextTokenKind() const { return NextToken.Kind; }
+
+private:
+ TokenInfo getNextToken() {
+ consumeWhitespace();
+ TokenInfo Result;
+ Result.Range.Start = currentLocation();
+
+ if (CodeCompletionLocation && CodeCompletionLocation <= Code.data()) {
+ Result.Kind = TokenInfo::TK_CodeCompletion;
+ Result.Text = StringRef(CodeCompletionLocation, 0);
+ CodeCompletionLocation = nullptr;
+ return Result;
+ }
+
+ if (Code.empty()) {
+ Result.Kind = TokenInfo::TK_Eof;
+ Result.Text = "";
+ return Result;
+ }
+
+ switch (Code[0]) {
+ case ',':
+ Result.Kind = TokenInfo::TK_Comma;
+ Result.Text = Code.substr(0, 1);
+ Code = Code.drop_front();
+ break;
+ case '.':
+ Result.Kind = TokenInfo::TK_Period;
+ Result.Text = Code.substr(0, 1);
+ Code = Code.drop_front();
+ break;
+ case '(':
+ Result.Kind = TokenInfo::TK_OpenParen;
+ Result.Text = Code.substr(0, 1);
+ Code = Code.drop_front();
+ break;
+ case ')':
+ Result.Kind = TokenInfo::TK_CloseParen;
+ Result.Text = Code.substr(0, 1);
+ Code = Code.drop_front();
+ break;
+
+ case '"':
+ case '\'':
+ // Parse a string literal.
+ consumeStringLiteral(&Result);
+ break;
+
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ // Parse an unsigned literal.
+ consumeUnsignedLiteral(&Result);
+ break;
+
+ default:
+ if (isAlphanumeric(Code[0])) {
+ // Parse an identifier
+ size_t TokenLength = 1;
+ while (1) {
+ // A code completion location in/immediately after an identifier will
+ // cause the portion of the identifier before the code completion
+ // location to become a code completion token.
+ if (CodeCompletionLocation == Code.data() + TokenLength) {
+ CodeCompletionLocation = nullptr;
+ Result.Kind = TokenInfo::TK_CodeCompletion;
+ Result.Text = Code.substr(0, TokenLength);
+ Code = Code.drop_front(TokenLength);
+ return Result;
+ }
+ if (TokenLength == Code.size() || !isAlphanumeric(Code[TokenLength]))
+ break;
+ ++TokenLength;
+ }
+ Result.Kind = TokenInfo::TK_Ident;
+ Result.Text = Code.substr(0, TokenLength);
+ Code = Code.drop_front(TokenLength);
+ } else {
+ Result.Kind = TokenInfo::TK_InvalidChar;
+ Result.Text = Code.substr(0, 1);
+ Code = Code.drop_front(1);
+ }
+ break;
+ }
+
+ Result.Range.End = currentLocation();
+ return Result;
+ }
+
+ /// \brief Consume an unsigned literal.
+ void consumeUnsignedLiteral(TokenInfo *Result) {
+ unsigned Length = 1;
+ if (Code.size() > 1) {
+ // Consume the 'x' or 'b' radix modifier, if present.
+ switch (toLowercase(Code[1])) {
+ case 'x': case 'b': Length = 2;
+ }
+ }
+ while (Length < Code.size() && isHexDigit(Code[Length]))
+ ++Length;
+
+ Result->Text = Code.substr(0, Length);
+ Code = Code.drop_front(Length);
+
+ unsigned Value;
+ if (!Result->Text.getAsInteger(0, Value)) {
+ Result->Kind = TokenInfo::TK_Literal;
+ Result->Value = Value;
+ } else {
+ SourceRange Range;
+ Range.Start = Result->Range.Start;
+ Range.End = currentLocation();
+ Error->addError(Range, Error->ET_ParserUnsignedError) << Result->Text;
+ Result->Kind = TokenInfo::TK_Error;
+ }
+ }
+
+ /// \brief Consume a string literal.
+ ///
+ /// \c Code must be positioned at the start of the literal (the opening
+ /// quote). Consumed until it finds the same closing quote character.
+ void consumeStringLiteral(TokenInfo *Result) {
+ bool InEscape = false;
+ const char Marker = Code[0];
+ for (size_t Length = 1, Size = Code.size(); Length != Size; ++Length) {
+ if (InEscape) {
+ InEscape = false;
+ continue;
+ }
+ if (Code[Length] == '\\') {
+ InEscape = true;
+ continue;
+ }
+ if (Code[Length] == Marker) {
+ Result->Kind = TokenInfo::TK_Literal;
+ Result->Text = Code.substr(0, Length + 1);
+ Result->Value = Code.substr(1, Length - 1);
+ Code = Code.drop_front(Length + 1);
+ return;
+ }
+ }
+
+ StringRef ErrorText = Code;
+ Code = Code.drop_front(Code.size());
+ SourceRange Range;
+ Range.Start = Result->Range.Start;
+ Range.End = currentLocation();
+ Error->addError(Range, Error->ET_ParserStringError) << ErrorText;
+ Result->Kind = TokenInfo::TK_Error;
+ }
+
+ /// \brief Consume all leading whitespace from \c Code.
+ void consumeWhitespace() {
+ while (!Code.empty() && isWhitespace(Code[0])) {
+ if (Code[0] == '\n') {
+ ++Line;
+ StartOfLine = Code.drop_front();
+ }
+ Code = Code.drop_front();
+ }
+ }
+
+ SourceLocation currentLocation() {
+ SourceLocation Location;
+ Location.Line = Line;
+ Location.Column = Code.data() - StartOfLine.data() + 1;
+ return Location;
+ }
+
+ StringRef Code;
+ StringRef StartOfLine;
+ unsigned Line;
+ Diagnostics *Error;
+ TokenInfo NextToken;
+ const char *CodeCompletionLocation;
+};
+
+Parser::Sema::~Sema() {}
+
+std::vector<ArgKind> Parser::Sema::getAcceptedCompletionTypes(
+ llvm::ArrayRef<std::pair<MatcherCtor, unsigned>> Context) {
+ return std::vector<ArgKind>();
+}
+
+std::vector<MatcherCompletion>
+Parser::Sema::getMatcherCompletions(llvm::ArrayRef<ArgKind> AcceptedTypes) {
+ return std::vector<MatcherCompletion>();
+}
+
+struct Parser::ScopedContextEntry {
+ Parser *P;
+
+ ScopedContextEntry(Parser *P, MatcherCtor C) : P(P) {
+ P->ContextStack.push_back(std::make_pair(C, 0u));
+ }
+
+ ~ScopedContextEntry() {
+ P->ContextStack.pop_back();
+ }
+
+ void nextArg() {
+ ++P->ContextStack.back().second;
+ }
+};
+
+/// \brief Parse expressions that start with an identifier.
+///
+/// This function can parse named values and matchers.
+/// In case of failure it will try to determine the user's intent to give
+/// an appropriate error message.
+bool Parser::parseIdentifierPrefixImpl(VariantValue *Value) {
+ const TokenInfo NameToken = Tokenizer->consumeNextToken();
+
+ if (Tokenizer->nextTokenKind() != TokenInfo::TK_OpenParen) {
+ // Parse as a named value.
+ if (const VariantValue NamedValue =
+ NamedValues ? NamedValues->lookup(NameToken.Text)
+ : VariantValue()) {
+ *Value = NamedValue;
+ return true;
+ }
+ // If the syntax is correct and the name is not a matcher either, report
+ // unknown named value.
+ if ((Tokenizer->nextTokenKind() == TokenInfo::TK_Comma ||
+ Tokenizer->nextTokenKind() == TokenInfo::TK_CloseParen ||
+ Tokenizer->nextTokenKind() == TokenInfo::TK_Eof) &&
+ !S->lookupMatcherCtor(NameToken.Text)) {
+ Error->addError(NameToken.Range, Error->ET_RegistryValueNotFound)
+ << NameToken.Text;
+ return false;
+ }
+ // Otherwise, fallback to the matcher parser.
+ }
+
+ // Parse as a matcher expression.
+ return parseMatcherExpressionImpl(NameToken, Value);
+}
+
+/// \brief Parse and validate a matcher expression.
+/// \return \c true on success, in which case \c Value has the matcher parsed.
+/// If the input is malformed, or some argument has an error, it
+/// returns \c false.
+bool Parser::parseMatcherExpressionImpl(const TokenInfo &NameToken,
+ VariantValue *Value) {
+ assert(NameToken.Kind == TokenInfo::TK_Ident);
+ const TokenInfo OpenToken = Tokenizer->consumeNextToken();
+ if (OpenToken.Kind != TokenInfo::TK_OpenParen) {
+ Error->addError(OpenToken.Range, Error->ET_ParserNoOpenParen)
+ << OpenToken.Text;
+ return false;
+ }
+
+ llvm::Optional<MatcherCtor> Ctor = S->lookupMatcherCtor(NameToken.Text);
+
+ if (!Ctor) {
+ Error->addError(NameToken.Range, Error->ET_RegistryMatcherNotFound)
+ << NameToken.Text;
+ // Do not return here. We need to continue to give completion suggestions.
+ }
+
+ std::vector<ParserValue> Args;
+ TokenInfo EndToken;
+
+ {
+ ScopedContextEntry SCE(this, Ctor ? *Ctor : nullptr);
+
+ while (Tokenizer->nextTokenKind() != TokenInfo::TK_Eof) {
+ if (Tokenizer->nextTokenKind() == TokenInfo::TK_CloseParen) {
+ // End of args.
+ EndToken = Tokenizer->consumeNextToken();
+ break;
+ }
+ if (Args.size() > 0) {
+ // We must find a , token to continue.
+ const TokenInfo CommaToken = Tokenizer->consumeNextToken();
+ if (CommaToken.Kind != TokenInfo::TK_Comma) {
+ Error->addError(CommaToken.Range, Error->ET_ParserNoComma)
+ << CommaToken.Text;
+ return false;
+ }
+ }
+
+ Diagnostics::Context Ctx(Diagnostics::Context::MatcherArg, Error,
+ NameToken.Text, NameToken.Range,
+ Args.size() + 1);
+ ParserValue ArgValue;
+ ArgValue.Text = Tokenizer->peekNextToken().Text;
+ ArgValue.Range = Tokenizer->peekNextToken().Range;
+ if (!parseExpressionImpl(&ArgValue.Value)) {
+ return false;
+ }
+
+ Args.push_back(ArgValue);
+ SCE.nextArg();
+ }
+ }
+
+ if (EndToken.Kind == TokenInfo::TK_Eof) {
+ Error->addError(OpenToken.Range, Error->ET_ParserNoCloseParen);
+ return false;
+ }
+
+ std::string BindID;
+ if (Tokenizer->peekNextToken().Kind == TokenInfo::TK_Period) {
+ // Parse .bind("foo")
+ Tokenizer->consumeNextToken(); // consume the period.
+ const TokenInfo BindToken = Tokenizer->consumeNextToken();
+ if (BindToken.Kind == TokenInfo::TK_CodeCompletion) {
+ addCompletion(BindToken, MatcherCompletion("bind(\"", "bind", 1));
+ return false;
+ }
+
+ const TokenInfo OpenToken = Tokenizer->consumeNextToken();
+ const TokenInfo IDToken = Tokenizer->consumeNextToken();
+ const TokenInfo CloseToken = Tokenizer->consumeNextToken();
+
+ // TODO: We could use different error codes for each/some to be more
+ // explicit about the syntax error.
+ if (BindToken.Kind != TokenInfo::TK_Ident ||
+ BindToken.Text != TokenInfo::ID_Bind) {
+ Error->addError(BindToken.Range, Error->ET_ParserMalformedBindExpr);
+ return false;
+ }
+ if (OpenToken.Kind != TokenInfo::TK_OpenParen) {
+ Error->addError(OpenToken.Range, Error->ET_ParserMalformedBindExpr);
+ return false;
+ }
+ if (IDToken.Kind != TokenInfo::TK_Literal || !IDToken.Value.isString()) {
+ Error->addError(IDToken.Range, Error->ET_ParserMalformedBindExpr);
+ return false;
+ }
+ if (CloseToken.Kind != TokenInfo::TK_CloseParen) {
+ Error->addError(CloseToken.Range, Error->ET_ParserMalformedBindExpr);
+ return false;
+ }
+ BindID = IDToken.Value.getString();
+ }
+
+ if (!Ctor)
+ return false;
+
+ // Merge the start and end infos.
+ Diagnostics::Context Ctx(Diagnostics::Context::ConstructMatcher, Error,
+ NameToken.Text, NameToken.Range);
+ SourceRange MatcherRange = NameToken.Range;
+ MatcherRange.End = EndToken.Range.End;
+ VariantMatcher Result = S->actOnMatcherExpression(
+ *Ctor, MatcherRange, BindID, Args, Error);
+ if (Result.isNull()) return false;
+
+ *Value = Result;
+ return true;
+}
+
+// If the prefix of this completion matches the completion token, add it to
+// Completions minus the prefix.
+void Parser::addCompletion(const TokenInfo &CompToken,
+ const MatcherCompletion& Completion) {
+ if (StringRef(Completion.TypedText).startswith(CompToken.Text) &&
+ Completion.Specificity > 0) {
+ Completions.emplace_back(Completion.TypedText.substr(CompToken.Text.size()),
+ Completion.MatcherDecl, Completion.Specificity);
+ }
+}
+
+std::vector<MatcherCompletion> Parser::getNamedValueCompletions(
+ ArrayRef<ArgKind> AcceptedTypes) {
+ if (!NamedValues) return std::vector<MatcherCompletion>();
+ std::vector<MatcherCompletion> Result;
+ for (const auto &Entry : *NamedValues) {
+ unsigned Specificity;
+ if (Entry.getValue().isConvertibleTo(AcceptedTypes, &Specificity)) {
+ std::string Decl =
+ (Entry.getValue().getTypeAsString() + " " + Entry.getKey()).str();
+ Result.emplace_back(Entry.getKey(), Decl, Specificity);
+ }
+ }
+ return Result;
+}
+
+void Parser::addExpressionCompletions() {
+ const TokenInfo CompToken = Tokenizer->consumeNextToken();
+ assert(CompToken.Kind == TokenInfo::TK_CodeCompletion);
+
+ // We cannot complete code if there is an invalid element on the context
+ // stack.
+ for (ContextStackTy::iterator I = ContextStack.begin(),
+ E = ContextStack.end();
+ I != E; ++I) {
+ if (!I->first)
+ return;
+ }
+
+ auto AcceptedTypes = S->getAcceptedCompletionTypes(ContextStack);
+ for (const auto &Completion : S->getMatcherCompletions(AcceptedTypes)) {
+ addCompletion(CompToken, Completion);
+ }
+
+ for (const auto &Completion : getNamedValueCompletions(AcceptedTypes)) {
+ addCompletion(CompToken, Completion);
+ }
+}
+
+/// \brief Parse an <Expresssion>
+bool Parser::parseExpressionImpl(VariantValue *Value) {
+ switch (Tokenizer->nextTokenKind()) {
+ case TokenInfo::TK_Literal:
+ *Value = Tokenizer->consumeNextToken().Value;
+ return true;
+
+ case TokenInfo::TK_Ident:
+ return parseIdentifierPrefixImpl(Value);
+
+ case TokenInfo::TK_CodeCompletion:
+ addExpressionCompletions();
+ return false;
+
+ case TokenInfo::TK_Eof:
+ Error->addError(Tokenizer->consumeNextToken().Range,
+ Error->ET_ParserNoCode);
+ return false;
+
+ case TokenInfo::TK_Error:
+ // This error was already reported by the tokenizer.
+ return false;
+
+ case TokenInfo::TK_OpenParen:
+ case TokenInfo::TK_CloseParen:
+ case TokenInfo::TK_Comma:
+ case TokenInfo::TK_Period:
+ case TokenInfo::TK_InvalidChar:
+ const TokenInfo Token = Tokenizer->consumeNextToken();
+ Error->addError(Token.Range, Error->ET_ParserInvalidToken) << Token.Text;
+ return false;
+ }
+
+ llvm_unreachable("Unknown token kind.");
+}
+
+static llvm::ManagedStatic<Parser::RegistrySema> DefaultRegistrySema;
+
+Parser::Parser(CodeTokenizer *Tokenizer, Sema *S,
+ const NamedValueMap *NamedValues, Diagnostics *Error)
+ : Tokenizer(Tokenizer), S(S ? S : &*DefaultRegistrySema),
+ NamedValues(NamedValues), Error(Error) {}
+
+Parser::RegistrySema::~RegistrySema() {}
+
+llvm::Optional<MatcherCtor>
+Parser::RegistrySema::lookupMatcherCtor(StringRef MatcherName) {
+ return Registry::lookupMatcherCtor(MatcherName);
+}
+
+VariantMatcher Parser::RegistrySema::actOnMatcherExpression(
+ MatcherCtor Ctor, SourceRange NameRange, StringRef BindID,
+ ArrayRef<ParserValue> Args, Diagnostics *Error) {
+ if (BindID.empty()) {
+ return Registry::constructMatcher(Ctor, NameRange, Args, Error);
+ } else {
+ return Registry::constructBoundMatcher(Ctor, NameRange, BindID, Args,
+ Error);
+ }
+}
+
+std::vector<ArgKind> Parser::RegistrySema::getAcceptedCompletionTypes(
+ ArrayRef<std::pair<MatcherCtor, unsigned>> Context) {
+ return Registry::getAcceptedCompletionTypes(Context);
+}
+
+std::vector<MatcherCompletion> Parser::RegistrySema::getMatcherCompletions(
+ ArrayRef<ArgKind> AcceptedTypes) {
+ return Registry::getMatcherCompletions(AcceptedTypes);
+}
+
+bool Parser::parseExpression(StringRef Code, Sema *S,
+ const NamedValueMap *NamedValues,
+ VariantValue *Value, Diagnostics *Error) {
+ CodeTokenizer Tokenizer(Code, Error);
+ if (!Parser(&Tokenizer, S, NamedValues, Error).parseExpressionImpl(Value))
+ return false;
+ if (Tokenizer.peekNextToken().Kind != TokenInfo::TK_Eof) {
+ Error->addError(Tokenizer.peekNextToken().Range,
+ Error->ET_ParserTrailingCode);
+ return false;
+ }
+ return true;
+}
+
+std::vector<MatcherCompletion>
+Parser::completeExpression(StringRef Code, unsigned CompletionOffset, Sema *S,
+ const NamedValueMap *NamedValues) {
+ Diagnostics Error;
+ CodeTokenizer Tokenizer(Code, &Error, CompletionOffset);
+ Parser P(&Tokenizer, S, NamedValues, &Error);
+ VariantValue Dummy;
+ P.parseExpressionImpl(&Dummy);
+
+ // Sort by specificity, then by name.
+ std::sort(P.Completions.begin(), P.Completions.end(),
+ [](const MatcherCompletion &A, const MatcherCompletion &B) {
+ if (A.Specificity != B.Specificity)
+ return A.Specificity > B.Specificity;
+ return A.TypedText < B.TypedText;
+ });
+
+ return P.Completions;
+}
+
+llvm::Optional<DynTypedMatcher>
+Parser::parseMatcherExpression(StringRef Code, Sema *S,
+ const NamedValueMap *NamedValues,
+ Diagnostics *Error) {
+ VariantValue Value;
+ if (!parseExpression(Code, S, NamedValues, &Value, Error))
+ return llvm::Optional<DynTypedMatcher>();
+ if (!Value.isMatcher()) {
+ Error->addError(SourceRange(), Error->ET_ParserNotAMatcher);
+ return llvm::Optional<DynTypedMatcher>();
+ }
+ llvm::Optional<DynTypedMatcher> Result =
+ Value.getMatcher().getSingleMatcher();
+ if (!Result.hasValue()) {
+ Error->addError(SourceRange(), Error->ET_ParserOverloadedType)
+ << Value.getTypeAsString();
+ }
+ return Result;
+}
+
+} // namespace dynamic
+} // namespace ast_matchers
+} // namespace clang
diff --git a/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Registry.cpp b/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Registry.cpp
new file mode 100644
index 0000000..5b1c552
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Registry.cpp
@@ -0,0 +1,559 @@
+//===--- Registry.cpp - Matcher registry -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===------------------------------------------------------------===//
+///
+/// \file
+/// \brief Registry map populated at static initialization time.
+///
+//===------------------------------------------------------------===//
+
+#include "clang/ASTMatchers/Dynamic/Registry.h"
+#include "Marshallers.h"
+#include "clang/ASTMatchers/ASTMatchers.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/ManagedStatic.h"
+#include <set>
+#include <utility>
+
+using namespace clang::ast_type_traits;
+
+namespace clang {
+namespace ast_matchers {
+namespace dynamic {
+namespace {
+
+using internal::MatcherDescriptor;
+
+typedef llvm::StringMap<const MatcherDescriptor *> ConstructorMap;
+class RegistryMaps {
+public:
+ RegistryMaps();
+ ~RegistryMaps();
+
+ const ConstructorMap &constructors() const { return Constructors; }
+
+private:
+ void registerMatcher(StringRef MatcherName, MatcherDescriptor *Callback);
+ ConstructorMap Constructors;
+};
+
+void RegistryMaps::registerMatcher(StringRef MatcherName,
+ MatcherDescriptor *Callback) {
+ assert(Constructors.find(MatcherName) == Constructors.end());
+ Constructors[MatcherName] = Callback;
+}
+
+#define REGISTER_MATCHER(name) \
+ registerMatcher(#name, internal::makeMatcherAutoMarshall( \
+ ::clang::ast_matchers::name, #name));
+
+#define SPECIFIC_MATCHER_OVERLOAD(name, Id) \
+ static_cast< ::clang::ast_matchers::name##_Type##Id>( \
+ ::clang::ast_matchers::name)
+
+#define REGISTER_OVERLOADED_2(name) \
+ do { \
+ MatcherDescriptor *Callbacks[] = { \
+ internal::makeMatcherAutoMarshall(SPECIFIC_MATCHER_OVERLOAD(name, 0), \
+ #name), \
+ internal::makeMatcherAutoMarshall(SPECIFIC_MATCHER_OVERLOAD(name, 1), \
+ #name) \
+ }; \
+ registerMatcher(#name, \
+ new internal::OverloadedMatcherDescriptor(Callbacks)); \
+ } while (0)
+
+/// \brief Generate a registry map with all the known matchers.
+RegistryMaps::RegistryMaps() {
+ // TODO: Here is the list of the missing matchers, grouped by reason.
+ //
+ // Need Variant/Parser fixes:
+ // ofKind
+ //
+ // Polymorphic + argument overload:
+ // findAll
+ //
+ // Other:
+ // equals
+ // equalsNode
+
+ REGISTER_OVERLOADED_2(callee);
+ REGISTER_OVERLOADED_2(hasPrefix);
+ REGISTER_OVERLOADED_2(hasType);
+ REGISTER_OVERLOADED_2(isDerivedFrom);
+ REGISTER_OVERLOADED_2(isSameOrDerivedFrom);
+ REGISTER_OVERLOADED_2(loc);
+ REGISTER_OVERLOADED_2(pointsTo);
+ REGISTER_OVERLOADED_2(references);
+ REGISTER_OVERLOADED_2(thisPointerType);
+
+ REGISTER_MATCHER(accessSpecDecl);
+ REGISTER_MATCHER(alignOfExpr);
+ REGISTER_MATCHER(allOf);
+ REGISTER_MATCHER(anyOf);
+ REGISTER_MATCHER(anything);
+ REGISTER_MATCHER(argumentCountIs);
+ REGISTER_MATCHER(arraySubscriptExpr);
+ REGISTER_MATCHER(arrayType);
+ REGISTER_MATCHER(asmStmt);
+ REGISTER_MATCHER(asString);
+ REGISTER_MATCHER(atomicType);
+ REGISTER_MATCHER(autoType);
+ REGISTER_MATCHER(binaryOperator);
+ REGISTER_MATCHER(blockPointerType);
+ REGISTER_MATCHER(booleanType);
+ REGISTER_MATCHER(breakStmt);
+ REGISTER_MATCHER(builtinType);
+ REGISTER_MATCHER(callExpr);
+ REGISTER_MATCHER(caseStmt);
+ REGISTER_MATCHER(castExpr);
+ REGISTER_MATCHER(characterLiteral);
+ REGISTER_MATCHER(classTemplateDecl);
+ REGISTER_MATCHER(classTemplateSpecializationDecl);
+ REGISTER_MATCHER(complexType);
+ REGISTER_MATCHER(compoundLiteralExpr);
+ REGISTER_MATCHER(compoundStmt);
+ REGISTER_MATCHER(conditionalOperator);
+ REGISTER_MATCHER(constantArrayType);
+ REGISTER_MATCHER(containsDeclaration);
+ REGISTER_MATCHER(continueStmt);
+ REGISTER_MATCHER(cStyleCastExpr);
+ REGISTER_MATCHER(cudaKernelCallExpr);
+ REGISTER_MATCHER(cxxBindTemporaryExpr);
+ REGISTER_MATCHER(cxxBoolLiteral);
+ REGISTER_MATCHER(cxxCatchStmt);
+ REGISTER_MATCHER(cxxConstCastExpr);
+ REGISTER_MATCHER(cxxConstructExpr);
+ REGISTER_MATCHER(cxxConstructorDecl);
+ REGISTER_MATCHER(cxxConversionDecl);
+ REGISTER_MATCHER(cxxCtorInitializer);
+ REGISTER_MATCHER(cxxDefaultArgExpr);
+ REGISTER_MATCHER(cxxDeleteExpr);
+ REGISTER_MATCHER(cxxDestructorDecl);
+ REGISTER_MATCHER(cxxDynamicCastExpr);
+ REGISTER_MATCHER(cxxForRangeStmt);
+ REGISTER_MATCHER(cxxFunctionalCastExpr);
+ REGISTER_MATCHER(cxxMemberCallExpr);
+ REGISTER_MATCHER(cxxMethodDecl);
+ REGISTER_MATCHER(cxxNewExpr);
+ REGISTER_MATCHER(cxxNullPtrLiteralExpr);
+ REGISTER_MATCHER(cxxOperatorCallExpr);
+ REGISTER_MATCHER(cxxRecordDecl);
+ REGISTER_MATCHER(cxxReinterpretCastExpr);
+ REGISTER_MATCHER(cxxStaticCastExpr);
+ REGISTER_MATCHER(cxxTemporaryObjectExpr);
+ REGISTER_MATCHER(cxxThisExpr);
+ REGISTER_MATCHER(cxxThrowExpr);
+ REGISTER_MATCHER(cxxTryStmt);
+ REGISTER_MATCHER(cxxUnresolvedConstructExpr);
+ REGISTER_MATCHER(decayedType);
+ REGISTER_MATCHER(decl);
+ REGISTER_MATCHER(declaratorDecl);
+ REGISTER_MATCHER(declCountIs);
+ REGISTER_MATCHER(declRefExpr);
+ REGISTER_MATCHER(declStmt);
+ REGISTER_MATCHER(defaultStmt);
+ REGISTER_MATCHER(dependentSizedArrayType);
+ REGISTER_MATCHER(doStmt);
+ REGISTER_MATCHER(eachOf);
+ REGISTER_MATCHER(elaboratedType);
+ REGISTER_MATCHER(enumConstantDecl);
+ REGISTER_MATCHER(enumDecl);
+ REGISTER_MATCHER(equalsBoundNode);
+ REGISTER_MATCHER(equalsIntegralValue);
+ REGISTER_MATCHER(explicitCastExpr);
+ REGISTER_MATCHER(expr);
+ REGISTER_MATCHER(exprWithCleanups);
+ REGISTER_MATCHER(fieldDecl);
+ REGISTER_MATCHER(floatLiteral);
+ REGISTER_MATCHER(forEach);
+ REGISTER_MATCHER(forEachConstructorInitializer);
+ REGISTER_MATCHER(forEachDescendant);
+ REGISTER_MATCHER(forEachSwitchCase);
+ REGISTER_MATCHER(forField);
+ REGISTER_MATCHER(forStmt);
+ REGISTER_MATCHER(friendDecl);
+ REGISTER_MATCHER(functionDecl);
+ REGISTER_MATCHER(functionTemplateDecl);
+ REGISTER_MATCHER(functionType);
+ REGISTER_MATCHER(gotoStmt);
+ REGISTER_MATCHER(has);
+ REGISTER_MATCHER(hasAncestor);
+ REGISTER_MATCHER(hasAnyArgument);
+ REGISTER_MATCHER(hasAnyConstructorInitializer);
+ REGISTER_MATCHER(hasAnyParameter);
+ REGISTER_MATCHER(hasAnySubstatement);
+ REGISTER_MATCHER(hasAnyTemplateArgument);
+ REGISTER_MATCHER(hasAnyUsingShadowDecl);
+ REGISTER_MATCHER(hasArgument);
+ REGISTER_MATCHER(hasArgumentOfType);
+ REGISTER_MATCHER(hasAttr);
+ REGISTER_MATCHER(hasAutomaticStorageDuration);
+ REGISTER_MATCHER(hasBase);
+ REGISTER_MATCHER(hasBody);
+ REGISTER_MATCHER(hasCanonicalType);
+ REGISTER_MATCHER(hasCaseConstant);
+ REGISTER_MATCHER(hasCondition);
+ REGISTER_MATCHER(hasConditionVariableStatement);
+ REGISTER_MATCHER(hasDecayedType);
+ REGISTER_MATCHER(hasDeclaration);
+ REGISTER_MATCHER(hasDeclContext);
+ REGISTER_MATCHER(hasDeducedType);
+ REGISTER_MATCHER(hasDescendant);
+ REGISTER_MATCHER(hasDestinationType);
+ REGISTER_MATCHER(hasEitherOperand);
+ REGISTER_MATCHER(hasElementType);
+ REGISTER_MATCHER(hasElse);
+ REGISTER_MATCHER(hasFalseExpression);
+ REGISTER_MATCHER(hasGlobalStorage);
+ REGISTER_MATCHER(hasImplicitDestinationType);
+ REGISTER_MATCHER(hasIncrement);
+ REGISTER_MATCHER(hasIndex);
+ REGISTER_MATCHER(hasInitializer);
+ REGISTER_MATCHER(hasKeywordSelector);
+ REGISTER_MATCHER(hasLHS);
+ REGISTER_MATCHER(hasLocalQualifiers);
+ REGISTER_MATCHER(hasLocalStorage);
+ REGISTER_MATCHER(hasLoopInit);
+ REGISTER_MATCHER(hasLoopVariable);
+ REGISTER_MATCHER(hasMethod);
+ REGISTER_MATCHER(hasName);
+ REGISTER_MATCHER(hasNullSelector);
+ REGISTER_MATCHER(hasObjectExpression);
+ REGISTER_MATCHER(hasOperatorName);
+ REGISTER_MATCHER(hasOverloadedOperatorName);
+ REGISTER_MATCHER(hasParameter);
+ REGISTER_MATCHER(hasParent);
+ REGISTER_MATCHER(hasQualifier);
+ REGISTER_MATCHER(hasRangeInit);
+ REGISTER_MATCHER(hasReceiverType);
+ REGISTER_MATCHER(hasRHS);
+ REGISTER_MATCHER(hasSelector);
+ REGISTER_MATCHER(hasSingleDecl);
+ REGISTER_MATCHER(hasSize);
+ REGISTER_MATCHER(hasSizeExpr);
+ REGISTER_MATCHER(hasSourceExpression);
+ REGISTER_MATCHER(hasStaticStorageDuration);
+ REGISTER_MATCHER(hasTargetDecl);
+ REGISTER_MATCHER(hasTemplateArgument);
+ REGISTER_MATCHER(hasThen);
+ REGISTER_MATCHER(hasThreadStorageDuration);
+ REGISTER_MATCHER(hasTrueExpression);
+ REGISTER_MATCHER(hasTypeLoc);
+ REGISTER_MATCHER(hasUnaryOperand);
+ REGISTER_MATCHER(hasUnarySelector);
+ REGISTER_MATCHER(hasValueType);
+ REGISTER_MATCHER(ifStmt);
+ REGISTER_MATCHER(ignoringImpCasts);
+ REGISTER_MATCHER(ignoringParenCasts);
+ REGISTER_MATCHER(ignoringParenImpCasts);
+ REGISTER_MATCHER(implicitCastExpr);
+ REGISTER_MATCHER(incompleteArrayType);
+ REGISTER_MATCHER(initListExpr);
+ REGISTER_MATCHER(injectedClassNameType);
+ REGISTER_MATCHER(innerType);
+ REGISTER_MATCHER(integerLiteral);
+ REGISTER_MATCHER(isAnonymous);
+ REGISTER_MATCHER(isArrow);
+ REGISTER_MATCHER(isBaseInitializer);
+ REGISTER_MATCHER(isCatchAll);
+ REGISTER_MATCHER(isClass);
+ REGISTER_MATCHER(isConst);
+ REGISTER_MATCHER(isConstQualified);
+ REGISTER_MATCHER(isCopyConstructor);
+ REGISTER_MATCHER(isDefaultConstructor);
+ REGISTER_MATCHER(isDefinition);
+ REGISTER_MATCHER(isDeleted);
+ REGISTER_MATCHER(isExceptionVariable);
+ REGISTER_MATCHER(isExplicit);
+ REGISTER_MATCHER(isExplicitTemplateSpecialization);
+ REGISTER_MATCHER(isExpr);
+ REGISTER_MATCHER(isExternC);
+ REGISTER_MATCHER(isFinal);
+ REGISTER_MATCHER(isInline);
+ REGISTER_MATCHER(isImplicit);
+ REGISTER_MATCHER(isExpansionInFileMatching);
+ REGISTER_MATCHER(isExpansionInMainFile);
+ REGISTER_MATCHER(isInstantiated);
+ REGISTER_MATCHER(isExpansionInSystemHeader);
+ REGISTER_MATCHER(isInteger);
+ REGISTER_MATCHER(isIntegral);
+ REGISTER_MATCHER(isInTemplateInstantiation);
+ REGISTER_MATCHER(isListInitialization);
+ REGISTER_MATCHER(isMemberInitializer);
+ REGISTER_MATCHER(isMoveConstructor);
+ REGISTER_MATCHER(isNoThrow);
+ REGISTER_MATCHER(isOverride);
+ REGISTER_MATCHER(isPrivate);
+ REGISTER_MATCHER(isProtected);
+ REGISTER_MATCHER(isPublic);
+ REGISTER_MATCHER(isPure);
+ REGISTER_MATCHER(isStruct);
+ REGISTER_MATCHER(isTemplateInstantiation);
+ REGISTER_MATCHER(isUnion);
+ REGISTER_MATCHER(isVariadic);
+ REGISTER_MATCHER(isVirtual);
+ REGISTER_MATCHER(isVolatileQualified);
+ REGISTER_MATCHER(isWritten);
+ REGISTER_MATCHER(labelStmt);
+ REGISTER_MATCHER(lambdaExpr);
+ REGISTER_MATCHER(lValueReferenceType);
+ REGISTER_MATCHER(matchesName);
+ REGISTER_MATCHER(matchesSelector);
+ REGISTER_MATCHER(materializeTemporaryExpr);
+ REGISTER_MATCHER(member);
+ REGISTER_MATCHER(memberExpr);
+ REGISTER_MATCHER(memberPointerType);
+ REGISTER_MATCHER(namedDecl);
+ REGISTER_MATCHER(namespaceAliasDecl);
+ REGISTER_MATCHER(namespaceDecl);
+ REGISTER_MATCHER(namesType);
+ REGISTER_MATCHER(nestedNameSpecifier);
+ REGISTER_MATCHER(nestedNameSpecifierLoc);
+ REGISTER_MATCHER(nullStmt);
+ REGISTER_MATCHER(numSelectorArgs);
+ REGISTER_MATCHER(ofClass);
+ REGISTER_MATCHER(objcInterfaceDecl);
+ REGISTER_MATCHER(objcMessageExpr);
+ REGISTER_MATCHER(objcObjectPointerType);
+ REGISTER_MATCHER(on);
+ REGISTER_MATCHER(onImplicitObjectArgument);
+ REGISTER_MATCHER(parameterCountIs);
+ REGISTER_MATCHER(parenType);
+ REGISTER_MATCHER(parmVarDecl);
+ REGISTER_MATCHER(pointee);
+ REGISTER_MATCHER(pointerType);
+ REGISTER_MATCHER(qualType);
+ REGISTER_MATCHER(recordDecl);
+ REGISTER_MATCHER(recordType);
+ REGISTER_MATCHER(referenceType);
+ REGISTER_MATCHER(refersToDeclaration);
+ REGISTER_MATCHER(refersToIntegralType);
+ REGISTER_MATCHER(refersToType);
+ REGISTER_MATCHER(returns);
+ REGISTER_MATCHER(returnStmt);
+ REGISTER_MATCHER(rValueReferenceType);
+ REGISTER_MATCHER(sizeOfExpr);
+ REGISTER_MATCHER(specifiesNamespace);
+ REGISTER_MATCHER(specifiesType);
+ REGISTER_MATCHER(specifiesTypeLoc);
+ REGISTER_MATCHER(statementCountIs);
+ REGISTER_MATCHER(staticAssertDecl);
+ REGISTER_MATCHER(stmt);
+ REGISTER_MATCHER(stringLiteral);
+ REGISTER_MATCHER(substNonTypeTemplateParmExpr);
+ REGISTER_MATCHER(substTemplateTypeParmType);
+ REGISTER_MATCHER(switchCase);
+ REGISTER_MATCHER(switchStmt);
+ REGISTER_MATCHER(templateArgument);
+ REGISTER_MATCHER(templateArgumentCountIs);
+ REGISTER_MATCHER(templateSpecializationType);
+ REGISTER_MATCHER(templateTypeParmType);
+ REGISTER_MATCHER(throughUsingDecl);
+ REGISTER_MATCHER(to);
+ REGISTER_MATCHER(translationUnitDecl);
+ REGISTER_MATCHER(type);
+ REGISTER_MATCHER(typedefDecl);
+ REGISTER_MATCHER(typedefType);
+ REGISTER_MATCHER(typeLoc);
+ REGISTER_MATCHER(unaryExprOrTypeTraitExpr);
+ REGISTER_MATCHER(unaryOperator);
+ REGISTER_MATCHER(unaryTransformType);
+ REGISTER_MATCHER(unless);
+ REGISTER_MATCHER(unresolvedUsingTypenameDecl);
+ REGISTER_MATCHER(unresolvedUsingValueDecl);
+ REGISTER_MATCHER(userDefinedLiteral);
+ REGISTER_MATCHER(usingDecl);
+ REGISTER_MATCHER(usingDirectiveDecl);
+ REGISTER_MATCHER(valueDecl);
+ REGISTER_MATCHER(varDecl);
+ REGISTER_MATCHER(variableArrayType);
+ REGISTER_MATCHER(voidType);
+ REGISTER_MATCHER(whileStmt);
+ REGISTER_MATCHER(withInitializer);
+}
+
+RegistryMaps::~RegistryMaps() {
+ llvm::DeleteContainerSeconds(Constructors);
+}
+
+static llvm::ManagedStatic<RegistryMaps> RegistryData;
+
+} // anonymous namespace
+
+// static
+llvm::Optional<MatcherCtor> Registry::lookupMatcherCtor(StringRef MatcherName) {
+ ConstructorMap::const_iterator it =
+ RegistryData->constructors().find(MatcherName);
+ return it == RegistryData->constructors().end()
+ ? llvm::Optional<MatcherCtor>()
+ : it->second;
+}
+
+namespace {
+
+llvm::raw_ostream &operator<<(llvm::raw_ostream &OS,
+ const std::set<ASTNodeKind> &KS) {
+ unsigned Count = 0;
+ for (std::set<ASTNodeKind>::const_iterator I = KS.begin(), E = KS.end();
+ I != E; ++I) {
+ if (I != KS.begin())
+ OS << "|";
+ if (Count++ == 3) {
+ OS << "...";
+ break;
+ }
+ OS << *I;
+ }
+ return OS;
+}
+
+} // namespace
+
+std::vector<ArgKind> Registry::getAcceptedCompletionTypes(
+ ArrayRef<std::pair<MatcherCtor, unsigned>> Context) {
+ ASTNodeKind InitialTypes[] = {
+ ASTNodeKind::getFromNodeKind<Decl>(),
+ ASTNodeKind::getFromNodeKind<QualType>(),
+ ASTNodeKind::getFromNodeKind<Type>(),
+ ASTNodeKind::getFromNodeKind<Stmt>(),
+ ASTNodeKind::getFromNodeKind<NestedNameSpecifier>(),
+ ASTNodeKind::getFromNodeKind<NestedNameSpecifierLoc>(),
+ ASTNodeKind::getFromNodeKind<TypeLoc>()};
+
+ // Starting with the above seed of acceptable top-level matcher types, compute
+ // the acceptable type set for the argument indicated by each context element.
+ std::set<ArgKind> TypeSet(std::begin(InitialTypes), std::end(InitialTypes));
+ for (const auto &CtxEntry : Context) {
+ MatcherCtor Ctor = CtxEntry.first;
+ unsigned ArgNumber = CtxEntry.second;
+ std::vector<ArgKind> NextTypeSet;
+ for (const ArgKind &Kind : TypeSet) {
+ if (Kind.getArgKind() == Kind.AK_Matcher &&
+ Ctor->isConvertibleTo(Kind.getMatcherKind()) &&
+ (Ctor->isVariadic() || ArgNumber < Ctor->getNumArgs()))
+ Ctor->getArgKinds(Kind.getMatcherKind(), ArgNumber, NextTypeSet);
+ }
+ TypeSet.clear();
+ TypeSet.insert(NextTypeSet.begin(), NextTypeSet.end());
+ }
+ return std::vector<ArgKind>(TypeSet.begin(), TypeSet.end());
+}
+
+std::vector<MatcherCompletion>
+Registry::getMatcherCompletions(ArrayRef<ArgKind> AcceptedTypes) {
+ std::vector<MatcherCompletion> Completions;
+
+ // Search the registry for acceptable matchers.
+ for (const auto &M : RegistryData->constructors()) {
+ const auto *Matcher = M.getValue();
+ StringRef Name = M.getKey();
+
+ std::set<ASTNodeKind> RetKinds;
+ unsigned NumArgs = Matcher->isVariadic() ? 1 : Matcher->getNumArgs();
+ bool IsPolymorphic = Matcher->isPolymorphic();
+ std::vector<std::vector<ArgKind>> ArgsKinds(NumArgs);
+ unsigned MaxSpecificity = 0;
+ for (const ArgKind& Kind : AcceptedTypes) {
+ if (Kind.getArgKind() != Kind.AK_Matcher)
+ continue;
+ unsigned Specificity;
+ ASTNodeKind LeastDerivedKind;
+ if (Matcher->isConvertibleTo(Kind.getMatcherKind(), &Specificity,
+ &LeastDerivedKind)) {
+ if (MaxSpecificity < Specificity)
+ MaxSpecificity = Specificity;
+ RetKinds.insert(LeastDerivedKind);
+ for (unsigned Arg = 0; Arg != NumArgs; ++Arg)
+ Matcher->getArgKinds(Kind.getMatcherKind(), Arg, ArgsKinds[Arg]);
+ if (IsPolymorphic)
+ break;
+ }
+ }
+
+ if (!RetKinds.empty() && MaxSpecificity > 0) {
+ std::string Decl;
+ llvm::raw_string_ostream OS(Decl);
+
+ if (IsPolymorphic) {
+ OS << "Matcher<T> " << Name << "(Matcher<T>";
+ } else {
+ OS << "Matcher<" << RetKinds << "> " << Name << "(";
+ for (const std::vector<ArgKind> &Arg : ArgsKinds) {
+ if (&Arg != &ArgsKinds[0])
+ OS << ", ";
+
+ bool FirstArgKind = true;
+ std::set<ASTNodeKind> MatcherKinds;
+ // Two steps. First all non-matchers, then matchers only.
+ for (const ArgKind &AK : Arg) {
+ if (AK.getArgKind() == ArgKind::AK_Matcher) {
+ MatcherKinds.insert(AK.getMatcherKind());
+ } else {
+ if (!FirstArgKind) OS << "|";
+ FirstArgKind = false;
+ OS << AK.asString();
+ }
+ }
+ if (!MatcherKinds.empty()) {
+ if (!FirstArgKind) OS << "|";
+ OS << "Matcher<" << MatcherKinds << ">";
+ }
+ }
+ }
+ if (Matcher->isVariadic())
+ OS << "...";
+ OS << ")";
+
+ std::string TypedText = Name;
+ TypedText += "(";
+ if (ArgsKinds.empty())
+ TypedText += ")";
+ else if (ArgsKinds[0][0].getArgKind() == ArgKind::AK_String)
+ TypedText += "\"";
+
+ Completions.emplace_back(TypedText, OS.str(), MaxSpecificity);
+ }
+ }
+
+ return Completions;
+}
+
+// static
+VariantMatcher Registry::constructMatcher(MatcherCtor Ctor,
+ SourceRange NameRange,
+ ArrayRef<ParserValue> Args,
+ Diagnostics *Error) {
+ return Ctor->create(NameRange, Args, Error);
+}
+
+// static
+VariantMatcher Registry::constructBoundMatcher(MatcherCtor Ctor,
+ SourceRange NameRange,
+ StringRef BindID,
+ ArrayRef<ParserValue> Args,
+ Diagnostics *Error) {
+ VariantMatcher Out = constructMatcher(Ctor, NameRange, Args, Error);
+ if (Out.isNull()) return Out;
+
+ llvm::Optional<DynTypedMatcher> Result = Out.getSingleMatcher();
+ if (Result.hasValue()) {
+ llvm::Optional<DynTypedMatcher> Bound = Result->tryBind(BindID);
+ if (Bound.hasValue()) {
+ return VariantMatcher::SingleMatcher(*Bound);
+ }
+ }
+ Error->addError(NameRange, Error->ET_RegistryNotBindable);
+ return VariantMatcher();
+}
+
+} // namespace dynamic
+} // namespace ast_matchers
+} // namespace clang
diff --git a/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/VariantValue.cpp b/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/VariantValue.cpp
new file mode 100644
index 0000000..8f3c70c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/VariantValue.cpp
@@ -0,0 +1,392 @@
+//===--- VariantValue.cpp - Polymorphic value type -*- C++ -*-===/
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Polymorphic value type.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/ASTMatchers/Dynamic/VariantValue.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/STLExtras.h"
+
+namespace clang {
+namespace ast_matchers {
+namespace dynamic {
+
+std::string ArgKind::asString() const {
+ switch (getArgKind()) {
+ case AK_Matcher:
+ return (Twine("Matcher<") + MatcherKind.asStringRef() + ">").str();
+ case AK_Unsigned:
+ return "unsigned";
+ case AK_String:
+ return "string";
+ }
+ llvm_unreachable("unhandled ArgKind");
+}
+
+bool ArgKind::isConvertibleTo(ArgKind To, unsigned *Specificity) const {
+ if (K != To.K)
+ return false;
+ if (K != AK_Matcher) {
+ if (Specificity)
+ *Specificity = 1;
+ return true;
+ }
+ unsigned Distance;
+ if (!MatcherKind.isBaseOf(To.MatcherKind, &Distance))
+ return false;
+
+ if (Specificity)
+ *Specificity = 100 - Distance;
+ return true;
+}
+
+bool
+VariantMatcher::MatcherOps::canConstructFrom(const DynTypedMatcher &Matcher,
+ bool &IsExactMatch) const {
+ IsExactMatch = Matcher.getSupportedKind().isSame(NodeKind);
+ return Matcher.canConvertTo(NodeKind);
+}
+
+llvm::Optional<DynTypedMatcher>
+VariantMatcher::MatcherOps::constructVariadicOperator(
+ DynTypedMatcher::VariadicOperator Op,
+ ArrayRef<VariantMatcher> InnerMatchers) const {
+ std::vector<DynTypedMatcher> DynMatchers;
+ for (const auto &InnerMatcher : InnerMatchers) {
+ // Abort if any of the inner matchers can't be converted to
+ // Matcher<T>.
+ if (!InnerMatcher.Value)
+ return llvm::None;
+ llvm::Optional<DynTypedMatcher> Inner =
+ InnerMatcher.Value->getTypedMatcher(*this);
+ if (!Inner)
+ return llvm::None;
+ DynMatchers.push_back(*Inner);
+ }
+ return DynTypedMatcher::constructVariadic(Op, NodeKind, DynMatchers);
+}
+
+VariantMatcher::Payload::~Payload() {}
+
+class VariantMatcher::SinglePayload : public VariantMatcher::Payload {
+public:
+ SinglePayload(const DynTypedMatcher &Matcher) : Matcher(Matcher) {}
+
+ llvm::Optional<DynTypedMatcher> getSingleMatcher() const override {
+ return Matcher;
+ }
+
+ std::string getTypeAsString() const override {
+ return (Twine("Matcher<") + Matcher.getSupportedKind().asStringRef() + ">")
+ .str();
+ }
+
+ llvm::Optional<DynTypedMatcher>
+ getTypedMatcher(const MatcherOps &Ops) const override {
+ bool Ignore;
+ if (Ops.canConstructFrom(Matcher, Ignore))
+ return Matcher;
+ return llvm::None;
+ }
+
+ bool isConvertibleTo(ast_type_traits::ASTNodeKind Kind,
+ unsigned *Specificity) const override {
+ return ArgKind(Matcher.getSupportedKind())
+ .isConvertibleTo(Kind, Specificity);
+ }
+
+private:
+ const DynTypedMatcher Matcher;
+};
+
+class VariantMatcher::PolymorphicPayload : public VariantMatcher::Payload {
+public:
+ PolymorphicPayload(std::vector<DynTypedMatcher> MatchersIn)
+ : Matchers(std::move(MatchersIn)) {}
+
+ ~PolymorphicPayload() override {}
+
+ llvm::Optional<DynTypedMatcher> getSingleMatcher() const override {
+ if (Matchers.size() != 1)
+ return llvm::Optional<DynTypedMatcher>();
+ return Matchers[0];
+ }
+
+ std::string getTypeAsString() const override {
+ std::string Inner;
+ for (size_t i = 0, e = Matchers.size(); i != e; ++i) {
+ if (i != 0)
+ Inner += "|";
+ Inner += Matchers[i].getSupportedKind().asStringRef();
+ }
+ return (Twine("Matcher<") + Inner + ">").str();
+ }
+
+ llvm::Optional<DynTypedMatcher>
+ getTypedMatcher(const MatcherOps &Ops) const override {
+ bool FoundIsExact = false;
+ const DynTypedMatcher *Found = nullptr;
+ int NumFound = 0;
+ for (size_t i = 0, e = Matchers.size(); i != e; ++i) {
+ bool IsExactMatch;
+ if (Ops.canConstructFrom(Matchers[i], IsExactMatch)) {
+ if (Found) {
+ if (FoundIsExact) {
+ assert(!IsExactMatch && "We should not have two exact matches.");
+ continue;
+ }
+ }
+ Found = &Matchers[i];
+ FoundIsExact = IsExactMatch;
+ ++NumFound;
+ }
+ }
+ // We only succeed if we found exactly one, or if we found an exact match.
+ if (Found && (FoundIsExact || NumFound == 1))
+ return *Found;
+ return llvm::None;
+ }
+
+ bool isConvertibleTo(ast_type_traits::ASTNodeKind Kind,
+ unsigned *Specificity) const override {
+ unsigned MaxSpecificity = 0;
+ for (const DynTypedMatcher &Matcher : Matchers) {
+ unsigned ThisSpecificity;
+ if (ArgKind(Matcher.getSupportedKind())
+ .isConvertibleTo(Kind, &ThisSpecificity)) {
+ MaxSpecificity = std::max(MaxSpecificity, ThisSpecificity);
+ }
+ }
+ if (Specificity)
+ *Specificity = MaxSpecificity;
+ return MaxSpecificity > 0;
+ }
+
+ const std::vector<DynTypedMatcher> Matchers;
+};
+
+class VariantMatcher::VariadicOpPayload : public VariantMatcher::Payload {
+public:
+ VariadicOpPayload(DynTypedMatcher::VariadicOperator Op,
+ std::vector<VariantMatcher> Args)
+ : Op(Op), Args(std::move(Args)) {}
+
+ llvm::Optional<DynTypedMatcher> getSingleMatcher() const override {
+ return llvm::Optional<DynTypedMatcher>();
+ }
+
+ std::string getTypeAsString() const override {
+ std::string Inner;
+ for (size_t i = 0, e = Args.size(); i != e; ++i) {
+ if (i != 0)
+ Inner += "&";
+ Inner += Args[i].getTypeAsString();
+ }
+ return Inner;
+ }
+
+ llvm::Optional<DynTypedMatcher>
+ getTypedMatcher(const MatcherOps &Ops) const override {
+ return Ops.constructVariadicOperator(Op, Args);
+ }
+
+ bool isConvertibleTo(ast_type_traits::ASTNodeKind Kind,
+ unsigned *Specificity) const override {
+ for (const VariantMatcher &Matcher : Args) {
+ if (!Matcher.isConvertibleTo(Kind, Specificity))
+ return false;
+ }
+ return true;
+ }
+
+private:
+ const DynTypedMatcher::VariadicOperator Op;
+ const std::vector<VariantMatcher> Args;
+};
+
+VariantMatcher::VariantMatcher() {}
+
+VariantMatcher VariantMatcher::SingleMatcher(const DynTypedMatcher &Matcher) {
+ return VariantMatcher(new SinglePayload(Matcher));
+}
+
+VariantMatcher
+VariantMatcher::PolymorphicMatcher(std::vector<DynTypedMatcher> Matchers) {
+ return VariantMatcher(new PolymorphicPayload(std::move(Matchers)));
+}
+
+VariantMatcher VariantMatcher::VariadicOperatorMatcher(
+ DynTypedMatcher::VariadicOperator Op,
+ std::vector<VariantMatcher> Args) {
+ return VariantMatcher(new VariadicOpPayload(Op, std::move(Args)));
+}
+
+llvm::Optional<DynTypedMatcher> VariantMatcher::getSingleMatcher() const {
+ return Value ? Value->getSingleMatcher() : llvm::Optional<DynTypedMatcher>();
+}
+
+void VariantMatcher::reset() { Value.reset(); }
+
+std::string VariantMatcher::getTypeAsString() const {
+ if (Value) return Value->getTypeAsString();
+ return "<Nothing>";
+}
+
+VariantValue::VariantValue(const VariantValue &Other) : Type(VT_Nothing) {
+ *this = Other;
+}
+
+VariantValue::VariantValue(unsigned Unsigned) : Type(VT_Nothing) {
+ setUnsigned(Unsigned);
+}
+
+VariantValue::VariantValue(StringRef String) : Type(VT_Nothing) {
+ setString(String);
+}
+
+VariantValue::VariantValue(const VariantMatcher &Matcher) : Type(VT_Nothing) {
+ setMatcher(Matcher);
+}
+
+VariantValue::~VariantValue() { reset(); }
+
+VariantValue &VariantValue::operator=(const VariantValue &Other) {
+ if (this == &Other) return *this;
+ reset();
+ switch (Other.Type) {
+ case VT_Unsigned:
+ setUnsigned(Other.getUnsigned());
+ break;
+ case VT_String:
+ setString(Other.getString());
+ break;
+ case VT_Matcher:
+ setMatcher(Other.getMatcher());
+ break;
+ case VT_Nothing:
+ Type = VT_Nothing;
+ break;
+ }
+ return *this;
+}
+
+void VariantValue::reset() {
+ switch (Type) {
+ case VT_String:
+ delete Value.String;
+ break;
+ case VT_Matcher:
+ delete Value.Matcher;
+ break;
+ // Cases that do nothing.
+ case VT_Unsigned:
+ case VT_Nothing:
+ break;
+ }
+ Type = VT_Nothing;
+}
+
+bool VariantValue::isUnsigned() const {
+ return Type == VT_Unsigned;
+}
+
+unsigned VariantValue::getUnsigned() const {
+ assert(isUnsigned());
+ return Value.Unsigned;
+}
+
+void VariantValue::setUnsigned(unsigned NewValue) {
+ reset();
+ Type = VT_Unsigned;
+ Value.Unsigned = NewValue;
+}
+
+bool VariantValue::isString() const {
+ return Type == VT_String;
+}
+
+const std::string &VariantValue::getString() const {
+ assert(isString());
+ return *Value.String;
+}
+
+void VariantValue::setString(StringRef NewValue) {
+ reset();
+ Type = VT_String;
+ Value.String = new std::string(NewValue);
+}
+
+bool VariantValue::isMatcher() const {
+ return Type == VT_Matcher;
+}
+
+const VariantMatcher &VariantValue::getMatcher() const {
+ assert(isMatcher());
+ return *Value.Matcher;
+}
+
+void VariantValue::setMatcher(const VariantMatcher &NewValue) {
+ reset();
+ Type = VT_Matcher;
+ Value.Matcher = new VariantMatcher(NewValue);
+}
+
+bool VariantValue::isConvertibleTo(ArgKind Kind, unsigned *Specificity) const {
+ switch (Kind.getArgKind()) {
+ case ArgKind::AK_Unsigned:
+ if (!isUnsigned())
+ return false;
+ *Specificity = 1;
+ return true;
+
+ case ArgKind::AK_String:
+ if (!isString())
+ return false;
+ *Specificity = 1;
+ return true;
+
+ case ArgKind::AK_Matcher:
+ if (!isMatcher())
+ return false;
+ return getMatcher().isConvertibleTo(Kind.getMatcherKind(), Specificity);
+ }
+ llvm_unreachable("Invalid Type");
+}
+
+bool VariantValue::isConvertibleTo(ArrayRef<ArgKind> Kinds,
+ unsigned *Specificity) const {
+ unsigned MaxSpecificity = 0;
+ for (const ArgKind& Kind : Kinds) {
+ unsigned ThisSpecificity;
+ if (!isConvertibleTo(Kind, &ThisSpecificity))
+ continue;
+ MaxSpecificity = std::max(MaxSpecificity, ThisSpecificity);
+ }
+ if (Specificity && MaxSpecificity > 0) {
+ *Specificity = MaxSpecificity;
+ }
+ return MaxSpecificity > 0;
+}
+
+std::string VariantValue::getTypeAsString() const {
+ switch (Type) {
+ case VT_String: return "String";
+ case VT_Matcher: return getMatcher().getTypeAsString();
+ case VT_Unsigned: return "Unsigned";
+ case VT_Nothing: return "Nothing";
+ }
+ llvm_unreachable("Invalid Type");
+}
+
+} // end namespace dynamic
+} // end namespace ast_matchers
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp b/contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp
new file mode 100644
index 0000000..52c7f26
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp
@@ -0,0 +1,604 @@
+//== AnalysisDeclContext.cpp - Analysis context for Path Sens analysis -*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines AnalysisDeclContext, a class that manages the analysis context
+// data for path sensitive analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/AnalysisContext.h"
+#include "BodyFarm.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Analysis/Analyses/PseudoConstantAnalysis.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/CFGStmtMap.h"
+#include "clang/Analysis/Support/BumpVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/SaveAndRestore.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+typedef llvm::DenseMap<const void *, ManagedAnalysis *> ManagedAnalysisMap;
+
+AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
+ const Decl *d,
+ const CFG::BuildOptions &buildOptions)
+ : Manager(Mgr),
+ D(d),
+ cfgBuildOptions(buildOptions),
+ forcedBlkExprs(nullptr),
+ builtCFG(false),
+ builtCompleteCFG(false),
+ ReferencedBlockVars(nullptr),
+ ManagedAnalyses(nullptr)
+{
+ cfgBuildOptions.forcedBlkExprs = &forcedBlkExprs;
+}
+
+AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
+ const Decl *d)
+: Manager(Mgr),
+ D(d),
+ forcedBlkExprs(nullptr),
+ builtCFG(false),
+ builtCompleteCFG(false),
+ ReferencedBlockVars(nullptr),
+ ManagedAnalyses(nullptr)
+{
+ cfgBuildOptions.forcedBlkExprs = &forcedBlkExprs;
+}
+
+AnalysisDeclContextManager::AnalysisDeclContextManager(bool useUnoptimizedCFG,
+ bool addImplicitDtors,
+ bool addInitializers,
+ bool addTemporaryDtors,
+ bool synthesizeBodies,
+ bool addStaticInitBranch,
+ bool addCXXNewAllocator,
+ CodeInjector *injector)
+ : Injector(injector), SynthesizeBodies(synthesizeBodies)
+{
+ cfgBuildOptions.PruneTriviallyFalseEdges = !useUnoptimizedCFG;
+ cfgBuildOptions.AddImplicitDtors = addImplicitDtors;
+ cfgBuildOptions.AddInitializers = addInitializers;
+ cfgBuildOptions.AddTemporaryDtors = addTemporaryDtors;
+ cfgBuildOptions.AddStaticInitBranches = addStaticInitBranch;
+ cfgBuildOptions.AddCXXNewAllocator = addCXXNewAllocator;
+}
+
+void AnalysisDeclContextManager::clear() {
+ llvm::DeleteContainerSeconds(Contexts);
+}
+
+static BodyFarm &getBodyFarm(ASTContext &C, CodeInjector *injector = nullptr) {
+ static BodyFarm *BF = new BodyFarm(C, injector);
+ return *BF;
+}
+
+Stmt *AnalysisDeclContext::getBody(bool &IsAutosynthesized) const {
+ IsAutosynthesized = false;
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ Stmt *Body = FD->getBody();
+ if (!Body && Manager && Manager->synthesizeBodies()) {
+ Body = getBodyFarm(getASTContext(), Manager->Injector.get()).getBody(FD);
+ if (Body)
+ IsAutosynthesized = true;
+ }
+ return Body;
+ }
+ else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ Stmt *Body = MD->getBody();
+ if (!Body && Manager && Manager->synthesizeBodies()) {
+ Body = getBodyFarm(getASTContext(), Manager->Injector.get()).getBody(MD);
+ if (Body)
+ IsAutosynthesized = true;
+ }
+ return Body;
+ } else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D))
+ return BD->getBody();
+ else if (const FunctionTemplateDecl *FunTmpl
+ = dyn_cast_or_null<FunctionTemplateDecl>(D))
+ return FunTmpl->getTemplatedDecl()->getBody();
+
+ llvm_unreachable("unknown code decl");
+}
+
+Stmt *AnalysisDeclContext::getBody() const {
+ bool Tmp;
+ return getBody(Tmp);
+}
+
+bool AnalysisDeclContext::isBodyAutosynthesized() const {
+ bool Tmp;
+ getBody(Tmp);
+ return Tmp;
+}
+
+bool AnalysisDeclContext::isBodyAutosynthesizedFromModelFile() const {
+ bool Tmp;
+ Stmt *Body = getBody(Tmp);
+ return Tmp && Body->getLocStart().isValid();
+}
+
+
+const ImplicitParamDecl *AnalysisDeclContext::getSelfDecl() const {
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
+ return MD->getSelfDecl();
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
+ // See if 'self' was captured by the block.
+ for (const auto &I : BD->captures()) {
+ const VarDecl *VD = I.getVariable();
+ if (VD->getName() == "self")
+ return dyn_cast<ImplicitParamDecl>(VD);
+ }
+ }
+
+ auto *CXXMethod = dyn_cast<CXXMethodDecl>(D);
+ if (!CXXMethod)
+ return nullptr;
+
+ const CXXRecordDecl *parent = CXXMethod->getParent();
+ if (!parent->isLambda())
+ return nullptr;
+
+ for (const LambdaCapture &LC : parent->captures()) {
+ if (!LC.capturesVariable())
+ continue;
+
+ VarDecl *VD = LC.getCapturedVar();
+ if (VD->getName() == "self")
+ return dyn_cast<ImplicitParamDecl>(VD);
+ }
+
+ return nullptr;
+}
+
+void AnalysisDeclContext::registerForcedBlockExpression(const Stmt *stmt) {
+ if (!forcedBlkExprs)
+ forcedBlkExprs = new CFG::BuildOptions::ForcedBlkExprs();
+ // Default construct an entry for 'stmt'.
+ if (const Expr *e = dyn_cast<Expr>(stmt))
+ stmt = e->IgnoreParens();
+ (void) (*forcedBlkExprs)[stmt];
+}
+
+const CFGBlock *
+AnalysisDeclContext::getBlockForRegisteredExpression(const Stmt *stmt) {
+ assert(forcedBlkExprs);
+ if (const Expr *e = dyn_cast<Expr>(stmt))
+ stmt = e->IgnoreParens();
+ CFG::BuildOptions::ForcedBlkExprs::const_iterator itr =
+ forcedBlkExprs->find(stmt);
+ assert(itr != forcedBlkExprs->end());
+ return itr->second;
+}
+
+/// Add each synthetic statement in the CFG to the parent map, using the
+/// source statement's parent.
+static void addParentsForSyntheticStmts(const CFG *TheCFG, ParentMap &PM) {
+ if (!TheCFG)
+ return;
+
+ for (CFG::synthetic_stmt_iterator I = TheCFG->synthetic_stmt_begin(),
+ E = TheCFG->synthetic_stmt_end();
+ I != E; ++I) {
+ PM.setParent(I->first, PM.getParent(I->second));
+ }
+}
+
+CFG *AnalysisDeclContext::getCFG() {
+ if (!cfgBuildOptions.PruneTriviallyFalseEdges)
+ return getUnoptimizedCFG();
+
+ if (!builtCFG) {
+ cfg = CFG::buildCFG(D, getBody(), &D->getASTContext(), cfgBuildOptions);
+ // Even when the cfg is not successfully built, we don't
+ // want to try building it again.
+ builtCFG = true;
+
+ if (PM)
+ addParentsForSyntheticStmts(cfg.get(), *PM);
+
+ // The Observer should only observe one build of the CFG.
+ getCFGBuildOptions().Observer = nullptr;
+ }
+ return cfg.get();
+}
+
+CFG *AnalysisDeclContext::getUnoptimizedCFG() {
+ if (!builtCompleteCFG) {
+ SaveAndRestore<bool> NotPrune(cfgBuildOptions.PruneTriviallyFalseEdges,
+ false);
+ completeCFG =
+ CFG::buildCFG(D, getBody(), &D->getASTContext(), cfgBuildOptions);
+ // Even when the cfg is not successfully built, we don't
+ // want to try building it again.
+ builtCompleteCFG = true;
+
+ if (PM)
+ addParentsForSyntheticStmts(completeCFG.get(), *PM);
+
+ // The Observer should only observe one build of the CFG.
+ getCFGBuildOptions().Observer = nullptr;
+ }
+ return completeCFG.get();
+}
+
+CFGStmtMap *AnalysisDeclContext::getCFGStmtMap() {
+ if (cfgStmtMap)
+ return cfgStmtMap.get();
+
+ if (CFG *c = getCFG()) {
+ cfgStmtMap.reset(CFGStmtMap::Build(c, &getParentMap()));
+ return cfgStmtMap.get();
+ }
+
+ return nullptr;
+}
+
+CFGReverseBlockReachabilityAnalysis *AnalysisDeclContext::getCFGReachablityAnalysis() {
+ if (CFA)
+ return CFA.get();
+
+ if (CFG *c = getCFG()) {
+ CFA.reset(new CFGReverseBlockReachabilityAnalysis(*c));
+ return CFA.get();
+ }
+
+ return nullptr;
+}
+
+void AnalysisDeclContext::dumpCFG(bool ShowColors) {
+ getCFG()->dump(getASTContext().getLangOpts(), ShowColors);
+}
+
+ParentMap &AnalysisDeclContext::getParentMap() {
+ if (!PM) {
+ PM.reset(new ParentMap(getBody()));
+ if (const CXXConstructorDecl *C = dyn_cast<CXXConstructorDecl>(getDecl())) {
+ for (const auto *I : C->inits()) {
+ PM->addStmt(I->getInit());
+ }
+ }
+ if (builtCFG)
+ addParentsForSyntheticStmts(getCFG(), *PM);
+ if (builtCompleteCFG)
+ addParentsForSyntheticStmts(getUnoptimizedCFG(), *PM);
+ }
+ return *PM;
+}
+
+PseudoConstantAnalysis *AnalysisDeclContext::getPseudoConstantAnalysis() {
+ if (!PCA)
+ PCA.reset(new PseudoConstantAnalysis(getBody()));
+ return PCA.get();
+}
+
+AnalysisDeclContext *AnalysisDeclContextManager::getContext(const Decl *D) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // Calling 'hasBody' replaces 'FD' in place with the FunctionDecl
+ // that has the body.
+ FD->hasBody(FD);
+ D = FD;
+ }
+
+ AnalysisDeclContext *&AC = Contexts[D];
+ if (!AC)
+ AC = new AnalysisDeclContext(this, D, cfgBuildOptions);
+ return AC;
+}
+
+const StackFrameContext *
+AnalysisDeclContext::getStackFrame(LocationContext const *Parent, const Stmt *S,
+ const CFGBlock *Blk, unsigned Idx) {
+ return getLocationContextManager().getStackFrame(this, Parent, S, Blk, Idx);
+}
+
+const BlockInvocationContext *
+AnalysisDeclContext::getBlockInvocationContext(const LocationContext *parent,
+ const clang::BlockDecl *BD,
+ const void *ContextData) {
+ return getLocationContextManager().getBlockInvocationContext(this, parent,
+ BD, ContextData);
+}
+
+LocationContextManager & AnalysisDeclContext::getLocationContextManager() {
+ assert(Manager &&
+ "Cannot create LocationContexts without an AnalysisDeclContextManager!");
+ return Manager->getLocationContextManager();
+}
+
+//===----------------------------------------------------------------------===//
+// FoldingSet profiling.
+//===----------------------------------------------------------------------===//
+
+void LocationContext::ProfileCommon(llvm::FoldingSetNodeID &ID,
+ ContextKind ck,
+ AnalysisDeclContext *ctx,
+ const LocationContext *parent,
+ const void *data) {
+ ID.AddInteger(ck);
+ ID.AddPointer(ctx);
+ ID.AddPointer(parent);
+ ID.AddPointer(data);
+}
+
+void StackFrameContext::Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getAnalysisDeclContext(), getParent(), CallSite, Block, Index);
+}
+
+void ScopeContext::Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getAnalysisDeclContext(), getParent(), Enter);
+}
+
+void BlockInvocationContext::Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getAnalysisDeclContext(), getParent(), BD, ContextData);
+}
+
+//===----------------------------------------------------------------------===//
+// LocationContext creation.
+//===----------------------------------------------------------------------===//
+
+template <typename LOC, typename DATA>
+const LOC*
+LocationContextManager::getLocationContext(AnalysisDeclContext *ctx,
+ const LocationContext *parent,
+ const DATA *d) {
+ llvm::FoldingSetNodeID ID;
+ LOC::Profile(ID, ctx, parent, d);
+ void *InsertPos;
+
+ LOC *L = cast_or_null<LOC>(Contexts.FindNodeOrInsertPos(ID, InsertPos));
+
+ if (!L) {
+ L = new LOC(ctx, parent, d);
+ Contexts.InsertNode(L, InsertPos);
+ }
+ return L;
+}
+
+const StackFrameContext*
+LocationContextManager::getStackFrame(AnalysisDeclContext *ctx,
+ const LocationContext *parent,
+ const Stmt *s,
+ const CFGBlock *blk, unsigned idx) {
+ llvm::FoldingSetNodeID ID;
+ StackFrameContext::Profile(ID, ctx, parent, s, blk, idx);
+ void *InsertPos;
+ StackFrameContext *L =
+ cast_or_null<StackFrameContext>(Contexts.FindNodeOrInsertPos(ID, InsertPos));
+ if (!L) {
+ L = new StackFrameContext(ctx, parent, s, blk, idx);
+ Contexts.InsertNode(L, InsertPos);
+ }
+ return L;
+}
+
+const ScopeContext *
+LocationContextManager::getScope(AnalysisDeclContext *ctx,
+ const LocationContext *parent,
+ const Stmt *s) {
+ return getLocationContext<ScopeContext, Stmt>(ctx, parent, s);
+}
+
+const BlockInvocationContext *
+LocationContextManager::getBlockInvocationContext(AnalysisDeclContext *ctx,
+ const LocationContext *parent,
+ const BlockDecl *BD,
+ const void *ContextData) {
+ llvm::FoldingSetNodeID ID;
+ BlockInvocationContext::Profile(ID, ctx, parent, BD, ContextData);
+ void *InsertPos;
+ BlockInvocationContext *L =
+ cast_or_null<BlockInvocationContext>(Contexts.FindNodeOrInsertPos(ID,
+ InsertPos));
+ if (!L) {
+ L = new BlockInvocationContext(ctx, parent, BD, ContextData);
+ Contexts.InsertNode(L, InsertPos);
+ }
+ return L;
+}
+
+//===----------------------------------------------------------------------===//
+// LocationContext methods.
+//===----------------------------------------------------------------------===//
+
+const StackFrameContext *LocationContext::getCurrentStackFrame() const {
+ const LocationContext *LC = this;
+ while (LC) {
+ if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LC))
+ return SFC;
+ LC = LC->getParent();
+ }
+ return nullptr;
+}
+
+bool LocationContext::inTopFrame() const {
+ return getCurrentStackFrame()->inTopFrame();
+}
+
+bool LocationContext::isParentOf(const LocationContext *LC) const {
+ do {
+ const LocationContext *Parent = LC->getParent();
+ if (Parent == this)
+ return true;
+ else
+ LC = Parent;
+ } while (LC);
+
+ return false;
+}
+
+void LocationContext::dumpStack(raw_ostream &OS, StringRef Indent) const {
+ ASTContext &Ctx = getAnalysisDeclContext()->getASTContext();
+ PrintingPolicy PP(Ctx.getLangOpts());
+ PP.TerseOutput = 1;
+
+ unsigned Frame = 0;
+ for (const LocationContext *LCtx = this; LCtx; LCtx = LCtx->getParent()) {
+ switch (LCtx->getKind()) {
+ case StackFrame:
+ OS << Indent << '#' << Frame++ << ' ';
+ cast<StackFrameContext>(LCtx)->getDecl()->print(OS, PP);
+ OS << '\n';
+ break;
+ case Scope:
+ OS << Indent << " (scope)\n";
+ break;
+ case Block:
+ OS << Indent << " (block context: "
+ << cast<BlockInvocationContext>(LCtx)->getContextData()
+ << ")\n";
+ break;
+ }
+ }
+}
+
+LLVM_DUMP_METHOD void LocationContext::dumpStack() const {
+ dumpStack(llvm::errs());
+}
+
+//===----------------------------------------------------------------------===//
+// Lazily generated map to query the external variables referenced by a Block.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class FindBlockDeclRefExprsVals : public StmtVisitor<FindBlockDeclRefExprsVals>{
+ BumpVector<const VarDecl*> &BEVals;
+ BumpVectorContext &BC;
+ llvm::SmallPtrSet<const VarDecl*, 4> Visited;
+ llvm::SmallPtrSet<const DeclContext*, 4> IgnoredContexts;
+public:
+ FindBlockDeclRefExprsVals(BumpVector<const VarDecl*> &bevals,
+ BumpVectorContext &bc)
+ : BEVals(bevals), BC(bc) {}
+
+ void VisitStmt(Stmt *S) {
+ for (Stmt *Child : S->children())
+ if (Child)
+ Visit(Child);
+ }
+
+ void VisitDeclRefExpr(DeclRefExpr *DR) {
+ // Non-local variables are also directly modified.
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ if (!VD->hasLocalStorage()) {
+ if (Visited.insert(VD).second)
+ BEVals.push_back(VD, BC);
+ }
+ }
+ }
+
+ void VisitBlockExpr(BlockExpr *BR) {
+ // Blocks containing blocks can transitively capture more variables.
+ IgnoredContexts.insert(BR->getBlockDecl());
+ Visit(BR->getBlockDecl()->getBody());
+ }
+
+ void VisitPseudoObjectExpr(PseudoObjectExpr *PE) {
+ for (PseudoObjectExpr::semantics_iterator it = PE->semantics_begin(),
+ et = PE->semantics_end(); it != et; ++it) {
+ Expr *Semantic = *it;
+ if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(Semantic))
+ Semantic = OVE->getSourceExpr();
+ Visit(Semantic);
+ }
+ }
+};
+} // end anonymous namespace
+
+typedef BumpVector<const VarDecl*> DeclVec;
+
+static DeclVec* LazyInitializeReferencedDecls(const BlockDecl *BD,
+ void *&Vec,
+ llvm::BumpPtrAllocator &A) {
+ if (Vec)
+ return (DeclVec*) Vec;
+
+ BumpVectorContext BC(A);
+ DeclVec *BV = (DeclVec*) A.Allocate<DeclVec>();
+ new (BV) DeclVec(BC, 10);
+
+ // Go through the capture list.
+ for (const auto &CI : BD->captures()) {
+ BV->push_back(CI.getVariable(), BC);
+ }
+
+ // Find the referenced global/static variables.
+ FindBlockDeclRefExprsVals F(*BV, BC);
+ F.Visit(BD->getBody());
+
+ Vec = BV;
+ return BV;
+}
+
+llvm::iterator_range<AnalysisDeclContext::referenced_decls_iterator>
+AnalysisDeclContext::getReferencedBlockVars(const BlockDecl *BD) {
+ if (!ReferencedBlockVars)
+ ReferencedBlockVars = new llvm::DenseMap<const BlockDecl*,void*>();
+
+ const DeclVec *V =
+ LazyInitializeReferencedDecls(BD, (*ReferencedBlockVars)[BD], A);
+ return llvm::make_range(V->begin(), V->end());
+}
+
+ManagedAnalysis *&AnalysisDeclContext::getAnalysisImpl(const void *tag) {
+ if (!ManagedAnalyses)
+ ManagedAnalyses = new ManagedAnalysisMap();
+ ManagedAnalysisMap *M = (ManagedAnalysisMap*) ManagedAnalyses;
+ return (*M)[tag];
+}
+
+//===----------------------------------------------------------------------===//
+// Cleanup.
+//===----------------------------------------------------------------------===//
+
+ManagedAnalysis::~ManagedAnalysis() {}
+
+AnalysisDeclContext::~AnalysisDeclContext() {
+ delete forcedBlkExprs;
+ delete ReferencedBlockVars;
+ // Release the managed analyses.
+ if (ManagedAnalyses) {
+ ManagedAnalysisMap *M = (ManagedAnalysisMap*) ManagedAnalyses;
+ llvm::DeleteContainerSeconds(*M);
+ delete M;
+ }
+}
+
+AnalysisDeclContextManager::~AnalysisDeclContextManager() {
+ llvm::DeleteContainerSeconds(Contexts);
+}
+
+LocationContext::~LocationContext() {}
+
+LocationContextManager::~LocationContextManager() {
+ clear();
+}
+
+void LocationContextManager::clear() {
+ for (llvm::FoldingSet<LocationContext>::iterator I = Contexts.begin(),
+ E = Contexts.end(); I != E; ) {
+ LocationContext *LC = &*I;
+ ++I;
+ delete LC;
+ }
+
+ Contexts.clear();
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Analysis/BodyFarm.cpp b/contrib/llvm/tools/clang/lib/Analysis/BodyFarm.cpp
new file mode 100644
index 0000000..0990436
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/BodyFarm.cpp
@@ -0,0 +1,469 @@
+//== BodyFarm.cpp - Factory for conjuring up fake bodies ----------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// BodyFarm is a factory for creating faux implementations for functions/methods
+// for analysis purposes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "BodyFarm.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Analysis/CodeInjector.h"
+#include "llvm/ADT/StringSwitch.h"
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Helper creation functions for constructing faux ASTs.
+//===----------------------------------------------------------------------===//
+
+static bool isDispatchBlock(QualType Ty) {
+ // Is it a block pointer?
+ const BlockPointerType *BPT = Ty->getAs<BlockPointerType>();
+ if (!BPT)
+ return false;
+
+ // Check if the block pointer type takes no arguments and
+ // returns void.
+ const FunctionProtoType *FT =
+ BPT->getPointeeType()->getAs<FunctionProtoType>();
+ return FT && FT->getReturnType()->isVoidType() && FT->getNumParams() == 0;
+}
+
+namespace {
+class ASTMaker {
+public:
+ ASTMaker(ASTContext &C) : C(C) {}
+
+ /// Create a new BinaryOperator representing a simple assignment.
+ BinaryOperator *makeAssignment(const Expr *LHS, const Expr *RHS, QualType Ty);
+
+ /// Create a new BinaryOperator representing a comparison.
+ BinaryOperator *makeComparison(const Expr *LHS, const Expr *RHS,
+ BinaryOperator::Opcode Op);
+
+ /// Create a new compound stmt using the provided statements.
+ CompoundStmt *makeCompound(ArrayRef<Stmt*>);
+
+ /// Create a new DeclRefExpr for the referenced variable.
+ DeclRefExpr *makeDeclRefExpr(const VarDecl *D);
+
+ /// Create a new UnaryOperator representing a dereference.
+ UnaryOperator *makeDereference(const Expr *Arg, QualType Ty);
+
+ /// Create an implicit cast for an integer conversion.
+ Expr *makeIntegralCast(const Expr *Arg, QualType Ty);
+
+ /// Create an implicit cast to a builtin boolean type.
+ ImplicitCastExpr *makeIntegralCastToBoolean(const Expr *Arg);
+
+ // Create an implicit cast for lvalue-to-rvaluate conversions.
+ ImplicitCastExpr *makeLvalueToRvalue(const Expr *Arg, QualType Ty);
+
+ /// Create an Objective-C bool literal.
+ ObjCBoolLiteralExpr *makeObjCBool(bool Val);
+
+ /// Create an Objective-C ivar reference.
+ ObjCIvarRefExpr *makeObjCIvarRef(const Expr *Base, const ObjCIvarDecl *IVar);
+
+ /// Create a Return statement.
+ ReturnStmt *makeReturn(const Expr *RetVal);
+
+private:
+ ASTContext &C;
+};
+}
+
+BinaryOperator *ASTMaker::makeAssignment(const Expr *LHS, const Expr *RHS,
+ QualType Ty) {
+ return new (C) BinaryOperator(const_cast<Expr*>(LHS), const_cast<Expr*>(RHS),
+ BO_Assign, Ty, VK_RValue,
+ OK_Ordinary, SourceLocation(), false);
+}
+
+BinaryOperator *ASTMaker::makeComparison(const Expr *LHS, const Expr *RHS,
+ BinaryOperator::Opcode Op) {
+ assert(BinaryOperator::isLogicalOp(Op) ||
+ BinaryOperator::isComparisonOp(Op));
+ return new (C) BinaryOperator(const_cast<Expr*>(LHS),
+ const_cast<Expr*>(RHS),
+ Op,
+ C.getLogicalOperationType(),
+ VK_RValue,
+ OK_Ordinary, SourceLocation(), false);
+}
+
+CompoundStmt *ASTMaker::makeCompound(ArrayRef<Stmt *> Stmts) {
+ return new (C) CompoundStmt(C, Stmts, SourceLocation(), SourceLocation());
+}
+
+DeclRefExpr *ASTMaker::makeDeclRefExpr(const VarDecl *D) {
+ DeclRefExpr *DR =
+ DeclRefExpr::Create(/* Ctx = */ C,
+ /* QualifierLoc = */ NestedNameSpecifierLoc(),
+ /* TemplateKWLoc = */ SourceLocation(),
+ /* D = */ const_cast<VarDecl*>(D),
+ /* RefersToEnclosingVariableOrCapture = */ false,
+ /* NameLoc = */ SourceLocation(),
+ /* T = */ D->getType(),
+ /* VK = */ VK_LValue);
+ return DR;
+}
+
+UnaryOperator *ASTMaker::makeDereference(const Expr *Arg, QualType Ty) {
+ return new (C) UnaryOperator(const_cast<Expr*>(Arg), UO_Deref, Ty,
+ VK_LValue, OK_Ordinary, SourceLocation());
+}
+
+ImplicitCastExpr *ASTMaker::makeLvalueToRvalue(const Expr *Arg, QualType Ty) {
+ return ImplicitCastExpr::Create(C, Ty, CK_LValueToRValue,
+ const_cast<Expr*>(Arg), nullptr, VK_RValue);
+}
+
+Expr *ASTMaker::makeIntegralCast(const Expr *Arg, QualType Ty) {
+ if (Arg->getType() == Ty)
+ return const_cast<Expr*>(Arg);
+
+ return ImplicitCastExpr::Create(C, Ty, CK_IntegralCast,
+ const_cast<Expr*>(Arg), nullptr, VK_RValue);
+}
+
+ImplicitCastExpr *ASTMaker::makeIntegralCastToBoolean(const Expr *Arg) {
+ return ImplicitCastExpr::Create(C, C.BoolTy, CK_IntegralToBoolean,
+ const_cast<Expr*>(Arg), nullptr, VK_RValue);
+}
+
+ObjCBoolLiteralExpr *ASTMaker::makeObjCBool(bool Val) {
+ QualType Ty = C.getBOOLDecl() ? C.getBOOLType() : C.ObjCBuiltinBoolTy;
+ return new (C) ObjCBoolLiteralExpr(Val, Ty, SourceLocation());
+}
+
+ObjCIvarRefExpr *ASTMaker::makeObjCIvarRef(const Expr *Base,
+ const ObjCIvarDecl *IVar) {
+ return new (C) ObjCIvarRefExpr(const_cast<ObjCIvarDecl*>(IVar),
+ IVar->getType(), SourceLocation(),
+ SourceLocation(), const_cast<Expr*>(Base),
+ /*arrow=*/true, /*free=*/false);
+}
+
+
+ReturnStmt *ASTMaker::makeReturn(const Expr *RetVal) {
+ return new (C) ReturnStmt(SourceLocation(), const_cast<Expr*>(RetVal),
+ nullptr);
+}
+
+//===----------------------------------------------------------------------===//
+// Creation functions for faux ASTs.
+//===----------------------------------------------------------------------===//
+
+typedef Stmt *(*FunctionFarmer)(ASTContext &C, const FunctionDecl *D);
+
+/// Create a fake body for dispatch_once.
+static Stmt *create_dispatch_once(ASTContext &C, const FunctionDecl *D) {
+ // Check if we have at least two parameters.
+ if (D->param_size() != 2)
+ return nullptr;
+
+ // Check if the first parameter is a pointer to integer type.
+ const ParmVarDecl *Predicate = D->getParamDecl(0);
+ QualType PredicateQPtrTy = Predicate->getType();
+ const PointerType *PredicatePtrTy = PredicateQPtrTy->getAs<PointerType>();
+ if (!PredicatePtrTy)
+ return nullptr;
+ QualType PredicateTy = PredicatePtrTy->getPointeeType();
+ if (!PredicateTy->isIntegerType())
+ return nullptr;
+
+ // Check if the second parameter is the proper block type.
+ const ParmVarDecl *Block = D->getParamDecl(1);
+ QualType Ty = Block->getType();
+ if (!isDispatchBlock(Ty))
+ return nullptr;
+
+ // Everything checks out. Create a fakse body that checks the predicate,
+ // sets it, and calls the block. Basically, an AST dump of:
+ //
+ // void dispatch_once(dispatch_once_t *predicate, dispatch_block_t block) {
+ // if (!*predicate) {
+ // *predicate = 1;
+ // block();
+ // }
+ // }
+
+ ASTMaker M(C);
+
+ // (1) Create the call.
+ DeclRefExpr *DR = M.makeDeclRefExpr(Block);
+ ImplicitCastExpr *ICE = M.makeLvalueToRvalue(DR, Ty);
+ CallExpr *CE = new (C) CallExpr(C, ICE, None, C.VoidTy, VK_RValue,
+ SourceLocation());
+
+ // (2) Create the assignment to the predicate.
+ IntegerLiteral *IL =
+ IntegerLiteral::Create(C, llvm::APInt(C.getTypeSize(C.IntTy), (uint64_t) 1),
+ C.IntTy, SourceLocation());
+ BinaryOperator *B =
+ M.makeAssignment(
+ M.makeDereference(
+ M.makeLvalueToRvalue(
+ M.makeDeclRefExpr(Predicate), PredicateQPtrTy),
+ PredicateTy),
+ M.makeIntegralCast(IL, PredicateTy),
+ PredicateTy);
+
+ // (3) Create the compound statement.
+ Stmt *Stmts[] = { B, CE };
+ CompoundStmt *CS = M.makeCompound(Stmts);
+
+ // (4) Create the 'if' condition.
+ ImplicitCastExpr *LValToRval =
+ M.makeLvalueToRvalue(
+ M.makeDereference(
+ M.makeLvalueToRvalue(
+ M.makeDeclRefExpr(Predicate),
+ PredicateQPtrTy),
+ PredicateTy),
+ PredicateTy);
+
+ UnaryOperator *UO = new (C) UnaryOperator(LValToRval, UO_LNot, C.IntTy,
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
+
+ // (5) Create the 'if' statement.
+ IfStmt *If = new (C) IfStmt(C, SourceLocation(), nullptr, UO, CS);
+ return If;
+}
+
+/// Create a fake body for dispatch_sync.
+static Stmt *create_dispatch_sync(ASTContext &C, const FunctionDecl *D) {
+ // Check if we have at least two parameters.
+ if (D->param_size() != 2)
+ return nullptr;
+
+ // Check if the second parameter is a block.
+ const ParmVarDecl *PV = D->getParamDecl(1);
+ QualType Ty = PV->getType();
+ if (!isDispatchBlock(Ty))
+ return nullptr;
+
+ // Everything checks out. Create a fake body that just calls the block.
+ // This is basically just an AST dump of:
+ //
+ // void dispatch_sync(dispatch_queue_t queue, void (^block)(void)) {
+ // block();
+ // }
+ //
+ ASTMaker M(C);
+ DeclRefExpr *DR = M.makeDeclRefExpr(PV);
+ ImplicitCastExpr *ICE = M.makeLvalueToRvalue(DR, Ty);
+ CallExpr *CE = new (C) CallExpr(C, ICE, None, C.VoidTy, VK_RValue,
+ SourceLocation());
+ return CE;
+}
+
+static Stmt *create_OSAtomicCompareAndSwap(ASTContext &C, const FunctionDecl *D)
+{
+ // There are exactly 3 arguments.
+ if (D->param_size() != 3)
+ return nullptr;
+
+ // Signature:
+ // _Bool OSAtomicCompareAndSwapPtr(void *__oldValue,
+ // void *__newValue,
+ // void * volatile *__theValue)
+ // Generate body:
+ // if (oldValue == *theValue) {
+ // *theValue = newValue;
+ // return YES;
+ // }
+ // else return NO;
+
+ QualType ResultTy = D->getReturnType();
+ bool isBoolean = ResultTy->isBooleanType();
+ if (!isBoolean && !ResultTy->isIntegralType(C))
+ return nullptr;
+
+ const ParmVarDecl *OldValue = D->getParamDecl(0);
+ QualType OldValueTy = OldValue->getType();
+
+ const ParmVarDecl *NewValue = D->getParamDecl(1);
+ QualType NewValueTy = NewValue->getType();
+
+ assert(OldValueTy == NewValueTy);
+
+ const ParmVarDecl *TheValue = D->getParamDecl(2);
+ QualType TheValueTy = TheValue->getType();
+ const PointerType *PT = TheValueTy->getAs<PointerType>();
+ if (!PT)
+ return nullptr;
+ QualType PointeeTy = PT->getPointeeType();
+
+ ASTMaker M(C);
+ // Construct the comparison.
+ Expr *Comparison =
+ M.makeComparison(
+ M.makeLvalueToRvalue(M.makeDeclRefExpr(OldValue), OldValueTy),
+ M.makeLvalueToRvalue(
+ M.makeDereference(
+ M.makeLvalueToRvalue(M.makeDeclRefExpr(TheValue), TheValueTy),
+ PointeeTy),
+ PointeeTy),
+ BO_EQ);
+
+ // Construct the body of the IfStmt.
+ Stmt *Stmts[2];
+ Stmts[0] =
+ M.makeAssignment(
+ M.makeDereference(
+ M.makeLvalueToRvalue(M.makeDeclRefExpr(TheValue), TheValueTy),
+ PointeeTy),
+ M.makeLvalueToRvalue(M.makeDeclRefExpr(NewValue), NewValueTy),
+ NewValueTy);
+
+ Expr *BoolVal = M.makeObjCBool(true);
+ Expr *RetVal = isBoolean ? M.makeIntegralCastToBoolean(BoolVal)
+ : M.makeIntegralCast(BoolVal, ResultTy);
+ Stmts[1] = M.makeReturn(RetVal);
+ CompoundStmt *Body = M.makeCompound(Stmts);
+
+ // Construct the else clause.
+ BoolVal = M.makeObjCBool(false);
+ RetVal = isBoolean ? M.makeIntegralCastToBoolean(BoolVal)
+ : M.makeIntegralCast(BoolVal, ResultTy);
+ Stmt *Else = M.makeReturn(RetVal);
+
+ /// Construct the If.
+ Stmt *If =
+ new (C) IfStmt(C, SourceLocation(), nullptr, Comparison, Body,
+ SourceLocation(), Else);
+
+ return If;
+}
+
+Stmt *BodyFarm::getBody(const FunctionDecl *D) {
+ D = D->getCanonicalDecl();
+
+ Optional<Stmt *> &Val = Bodies[D];
+ if (Val.hasValue())
+ return Val.getValue();
+
+ Val = nullptr;
+
+ if (D->getIdentifier() == nullptr)
+ return nullptr;
+
+ StringRef Name = D->getName();
+ if (Name.empty())
+ return nullptr;
+
+ FunctionFarmer FF;
+
+ if (Name.startswith("OSAtomicCompareAndSwap") ||
+ Name.startswith("objc_atomicCompareAndSwap")) {
+ FF = create_OSAtomicCompareAndSwap;
+ }
+ else {
+ FF = llvm::StringSwitch<FunctionFarmer>(Name)
+ .Case("dispatch_sync", create_dispatch_sync)
+ .Case("dispatch_once", create_dispatch_once)
+ .Default(nullptr);
+ }
+
+ if (FF) { Val = FF(C, D); }
+ else if (Injector) { Val = Injector->getBody(D); }
+ return Val.getValue();
+}
+
+static Stmt *createObjCPropertyGetter(ASTContext &Ctx,
+ const ObjCPropertyDecl *Prop) {
+ // First, find the backing ivar.
+ const ObjCIvarDecl *IVar = Prop->getPropertyIvarDecl();
+ if (!IVar)
+ return nullptr;
+
+ // Ignore weak variables, which have special behavior.
+ if (Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak)
+ return nullptr;
+
+ // Look to see if Sema has synthesized a body for us. This happens in
+ // Objective-C++ because the return value may be a C++ class type with a
+ // non-trivial copy constructor. We can only do this if we can find the
+ // @synthesize for this property, though (or if we know it's been auto-
+ // synthesized).
+ const ObjCImplementationDecl *ImplDecl =
+ IVar->getContainingInterface()->getImplementation();
+ if (ImplDecl) {
+ for (const auto *I : ImplDecl->property_impls()) {
+ if (I->getPropertyDecl() != Prop)
+ continue;
+
+ if (I->getGetterCXXConstructor()) {
+ ASTMaker M(Ctx);
+ return M.makeReturn(I->getGetterCXXConstructor());
+ }
+ }
+ }
+
+ // Sanity check that the property is the same type as the ivar, or a
+ // reference to it, and that it is either an object pointer or trivially
+ // copyable.
+ if (!Ctx.hasSameUnqualifiedType(IVar->getType(),
+ Prop->getType().getNonReferenceType()))
+ return nullptr;
+ if (!IVar->getType()->isObjCLifetimeType() &&
+ !IVar->getType().isTriviallyCopyableType(Ctx))
+ return nullptr;
+
+ // Generate our body:
+ // return self->_ivar;
+ ASTMaker M(Ctx);
+
+ const VarDecl *selfVar = Prop->getGetterMethodDecl()->getSelfDecl();
+
+ Expr *loadedIVar =
+ M.makeObjCIvarRef(
+ M.makeLvalueToRvalue(
+ M.makeDeclRefExpr(selfVar),
+ selfVar->getType()),
+ IVar);
+
+ if (!Prop->getType()->isReferenceType())
+ loadedIVar = M.makeLvalueToRvalue(loadedIVar, IVar->getType());
+
+ return M.makeReturn(loadedIVar);
+}
+
+Stmt *BodyFarm::getBody(const ObjCMethodDecl *D) {
+ // We currently only know how to synthesize property accessors.
+ if (!D->isPropertyAccessor())
+ return nullptr;
+
+ D = D->getCanonicalDecl();
+
+ Optional<Stmt *> &Val = Bodies[D];
+ if (Val.hasValue())
+ return Val.getValue();
+ Val = nullptr;
+
+ const ObjCPropertyDecl *Prop = D->findPropertyDecl();
+ if (!Prop)
+ return nullptr;
+
+ // For now, we only synthesize getters.
+ if (D->param_size() != 0)
+ return nullptr;
+
+ Val = createObjCPropertyGetter(C, Prop);
+
+ return Val.getValue();
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Analysis/BodyFarm.h b/contrib/llvm/tools/clang/lib/Analysis/BodyFarm.h
new file mode 100644
index 0000000..9137943
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/BodyFarm.h
@@ -0,0 +1,51 @@
+//== BodyFarm.h - Factory for conjuring up fake bodies -------------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// BodyFarm is a factory for creating faux implementations for functions/methods
+// for analysis purposes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_ANALYSIS_BODYFARM_H
+#define LLVM_CLANG_LIB_ANALYSIS_BODYFARM_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
+
+namespace clang {
+
+class ASTContext;
+class Decl;
+class FunctionDecl;
+class ObjCMethodDecl;
+class ObjCPropertyDecl;
+class Stmt;
+class CodeInjector;
+
+class BodyFarm {
+public:
+ BodyFarm(ASTContext &C, CodeInjector *injector) : C(C), Injector(injector) {}
+
+ /// Factory method for creating bodies for ordinary functions.
+ Stmt *getBody(const FunctionDecl *D);
+
+ /// Factory method for creating bodies for Objective-C properties.
+ Stmt *getBody(const ObjCMethodDecl *D);
+
+private:
+ typedef llvm::DenseMap<const Decl *, Optional<Stmt *> > BodyMap;
+
+ ASTContext &C;
+ BodyMap Bodies;
+ CodeInjector *Injector;
+};
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp b/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp
new file mode 100644
index 0000000..ed2239f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp
@@ -0,0 +1,4649 @@
+ //===--- CFG.cpp - Classes for representing and building CFGs----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the CFG and CFGBuilder classes for representing and
+// building Control-Flow Graphs (CFGs) from ASTs.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/CFG.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/Builtins.h"
+#include "llvm/ADT/DenseMap.h"
+#include <memory>
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/GraphWriter.h"
+#include "llvm/Support/SaveAndRestore.h"
+
+using namespace clang;
+
+namespace {
+
+static SourceLocation GetEndLoc(Decl *D) {
+ if (VarDecl *VD = dyn_cast<VarDecl>(D))
+ if (Expr *Ex = VD->getInit())
+ return Ex->getSourceRange().getEnd();
+ return D->getLocation();
+}
+
+/// Helper for tryNormalizeBinaryOperator. Attempts to extract an IntegerLiteral
+/// or EnumConstantDecl from the given Expr. If it fails, returns nullptr.
+const Expr *tryTransformToIntOrEnumConstant(const Expr *E) {
+ E = E->IgnoreParens();
+ if (isa<IntegerLiteral>(E))
+ return E;
+ if (auto *DR = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts()))
+ return isa<EnumConstantDecl>(DR->getDecl()) ? DR : nullptr;
+ return nullptr;
+}
+
+/// Tries to interpret a binary operator into `Decl Op Expr` form, if Expr is
+/// an integer literal or an enum constant.
+///
+/// If this fails, at least one of the returned DeclRefExpr or Expr will be
+/// null.
+static std::tuple<const DeclRefExpr *, BinaryOperatorKind, const Expr *>
+tryNormalizeBinaryOperator(const BinaryOperator *B) {
+ BinaryOperatorKind Op = B->getOpcode();
+
+ const Expr *MaybeDecl = B->getLHS();
+ const Expr *Constant = tryTransformToIntOrEnumConstant(B->getRHS());
+ // Expr looked like `0 == Foo` instead of `Foo == 0`
+ if (Constant == nullptr) {
+ // Flip the operator
+ if (Op == BO_GT)
+ Op = BO_LT;
+ else if (Op == BO_GE)
+ Op = BO_LE;
+ else if (Op == BO_LT)
+ Op = BO_GT;
+ else if (Op == BO_LE)
+ Op = BO_GE;
+
+ MaybeDecl = B->getRHS();
+ Constant = tryTransformToIntOrEnumConstant(B->getLHS());
+ }
+
+ auto *D = dyn_cast<DeclRefExpr>(MaybeDecl->IgnoreParenImpCasts());
+ return std::make_tuple(D, Op, Constant);
+}
+
+/// For an expression `x == Foo && x == Bar`, this determines whether the
+/// `Foo` and `Bar` are either of the same enumeration type, or both integer
+/// literals.
+///
+/// It's an error to pass this arguments that are not either IntegerLiterals
+/// or DeclRefExprs (that have decls of type EnumConstantDecl)
+static bool areExprTypesCompatible(const Expr *E1, const Expr *E2) {
+ // User intent isn't clear if they're mixing int literals with enum
+ // constants.
+ if (isa<IntegerLiteral>(E1) != isa<IntegerLiteral>(E2))
+ return false;
+
+ // Integer literal comparisons, regardless of literal type, are acceptable.
+ if (isa<IntegerLiteral>(E1))
+ return true;
+
+ // IntegerLiterals are handled above and only EnumConstantDecls are expected
+ // beyond this point
+ assert(isa<DeclRefExpr>(E1) && isa<DeclRefExpr>(E2));
+ auto *Decl1 = cast<DeclRefExpr>(E1)->getDecl();
+ auto *Decl2 = cast<DeclRefExpr>(E2)->getDecl();
+
+ assert(isa<EnumConstantDecl>(Decl1) && isa<EnumConstantDecl>(Decl2));
+ const DeclContext *DC1 = Decl1->getDeclContext();
+ const DeclContext *DC2 = Decl2->getDeclContext();
+
+ assert(isa<EnumDecl>(DC1) && isa<EnumDecl>(DC2));
+ return DC1 == DC2;
+}
+
+class CFGBuilder;
+
+/// The CFG builder uses a recursive algorithm to build the CFG. When
+/// we process an expression, sometimes we know that we must add the
+/// subexpressions as block-level expressions. For example:
+///
+/// exp1 || exp2
+///
+/// When processing the '||' expression, we know that exp1 and exp2
+/// need to be added as block-level expressions, even though they
+/// might not normally need to be. AddStmtChoice records this
+/// contextual information. If AddStmtChoice is 'NotAlwaysAdd', then
+/// the builder has an option not to add a subexpression as a
+/// block-level expression.
+///
+class AddStmtChoice {
+public:
+ enum Kind { NotAlwaysAdd = 0, AlwaysAdd = 1 };
+
+ AddStmtChoice(Kind a_kind = NotAlwaysAdd) : kind(a_kind) {}
+
+ bool alwaysAdd(CFGBuilder &builder,
+ const Stmt *stmt) const;
+
+ /// Return a copy of this object, except with the 'always-add' bit
+ /// set as specified.
+ AddStmtChoice withAlwaysAdd(bool alwaysAdd) const {
+ return AddStmtChoice(alwaysAdd ? AlwaysAdd : NotAlwaysAdd);
+ }
+
+private:
+ Kind kind;
+};
+
+/// LocalScope - Node in tree of local scopes created for C++ implicit
+/// destructor calls generation. It contains list of automatic variables
+/// declared in the scope and link to position in previous scope this scope
+/// began in.
+///
+/// The process of creating local scopes is as follows:
+/// - Init CFGBuilder::ScopePos with invalid position (equivalent for null),
+/// - Before processing statements in scope (e.g. CompoundStmt) create
+/// LocalScope object using CFGBuilder::ScopePos as link to previous scope
+/// and set CFGBuilder::ScopePos to the end of new scope,
+/// - On every occurrence of VarDecl increase CFGBuilder::ScopePos if it points
+/// at this VarDecl,
+/// - For every normal (without jump) end of scope add to CFGBlock destructors
+/// for objects in the current scope,
+/// - For every jump add to CFGBlock destructors for objects
+/// between CFGBuilder::ScopePos and local scope position saved for jump
+/// target. Thanks to C++ restrictions on goto jumps we can be sure that
+/// jump target position will be on the path to root from CFGBuilder::ScopePos
+/// (adding any variable that doesn't need constructor to be called to
+/// LocalScope can break this assumption),
+///
+class LocalScope {
+public:
+ typedef BumpVector<VarDecl*> AutomaticVarsTy;
+
+ /// const_iterator - Iterates local scope backwards and jumps to previous
+ /// scope on reaching the beginning of currently iterated scope.
+ class const_iterator {
+ const LocalScope* Scope;
+
+ /// VarIter is guaranteed to be greater then 0 for every valid iterator.
+ /// Invalid iterator (with null Scope) has VarIter equal to 0.
+ unsigned VarIter;
+
+ public:
+ /// Create invalid iterator. Dereferencing invalid iterator is not allowed.
+ /// Incrementing invalid iterator is allowed and will result in invalid
+ /// iterator.
+ const_iterator()
+ : Scope(nullptr), VarIter(0) {}
+
+ /// Create valid iterator. In case when S.Prev is an invalid iterator and
+ /// I is equal to 0, this will create invalid iterator.
+ const_iterator(const LocalScope& S, unsigned I)
+ : Scope(&S), VarIter(I) {
+ // Iterator to "end" of scope is not allowed. Handle it by going up
+ // in scopes tree possibly up to invalid iterator in the root.
+ if (VarIter == 0 && Scope)
+ *this = Scope->Prev;
+ }
+
+ VarDecl *const* operator->() const {
+ assert (Scope && "Dereferencing invalid iterator is not allowed");
+ assert (VarIter != 0 && "Iterator has invalid value of VarIter member");
+ return &Scope->Vars[VarIter - 1];
+ }
+ VarDecl *operator*() const {
+ return *this->operator->();
+ }
+
+ const_iterator &operator++() {
+ if (!Scope)
+ return *this;
+
+ assert (VarIter != 0 && "Iterator has invalid value of VarIter member");
+ --VarIter;
+ if (VarIter == 0)
+ *this = Scope->Prev;
+ return *this;
+ }
+ const_iterator operator++(int) {
+ const_iterator P = *this;
+ ++*this;
+ return P;
+ }
+
+ bool operator==(const const_iterator &rhs) const {
+ return Scope == rhs.Scope && VarIter == rhs.VarIter;
+ }
+ bool operator!=(const const_iterator &rhs) const {
+ return !(*this == rhs);
+ }
+
+ explicit operator bool() const {
+ return *this != const_iterator();
+ }
+
+ int distance(const_iterator L);
+ };
+
+ friend class const_iterator;
+
+private:
+ BumpVectorContext ctx;
+
+ /// Automatic variables in order of declaration.
+ AutomaticVarsTy Vars;
+ /// Iterator to variable in previous scope that was declared just before
+ /// begin of this scope.
+ const_iterator Prev;
+
+public:
+ /// Constructs empty scope linked to previous scope in specified place.
+ LocalScope(BumpVectorContext ctx, const_iterator P)
+ : ctx(std::move(ctx)), Vars(this->ctx, 4), Prev(P) {}
+
+ /// Begin of scope in direction of CFG building (backwards).
+ const_iterator begin() const { return const_iterator(*this, Vars.size()); }
+
+ void addVar(VarDecl *VD) {
+ Vars.push_back(VD, ctx);
+ }
+};
+
+/// distance - Calculates distance from this to L. L must be reachable from this
+/// (with use of ++ operator). Cost of calculating the distance is linear w.r.t.
+/// number of scopes between this and L.
+int LocalScope::const_iterator::distance(LocalScope::const_iterator L) {
+ int D = 0;
+ const_iterator F = *this;
+ while (F.Scope != L.Scope) {
+ assert (F != const_iterator()
+ && "L iterator is not reachable from F iterator.");
+ D += F.VarIter;
+ F = F.Scope->Prev;
+ }
+ D += F.VarIter - L.VarIter;
+ return D;
+}
+
+/// Structure for specifying position in CFG during its build process. It
+/// consists of CFGBlock that specifies position in CFG and
+/// LocalScope::const_iterator that specifies position in LocalScope graph.
+struct BlockScopePosPair {
+ BlockScopePosPair() : block(nullptr) {}
+ BlockScopePosPair(CFGBlock *b, LocalScope::const_iterator scopePos)
+ : block(b), scopePosition(scopePos) {}
+
+ CFGBlock *block;
+ LocalScope::const_iterator scopePosition;
+};
+
+/// TryResult - a class representing a variant over the values
+/// 'true', 'false', or 'unknown'. This is returned by tryEvaluateBool,
+/// and is used by the CFGBuilder to decide if a branch condition
+/// can be decided up front during CFG construction.
+class TryResult {
+ int X;
+public:
+ TryResult(bool b) : X(b ? 1 : 0) {}
+ TryResult() : X(-1) {}
+
+ bool isTrue() const { return X == 1; }
+ bool isFalse() const { return X == 0; }
+ bool isKnown() const { return X >= 0; }
+ void negate() {
+ assert(isKnown());
+ X ^= 0x1;
+ }
+};
+
+TryResult bothKnownTrue(TryResult R1, TryResult R2) {
+ if (!R1.isKnown() || !R2.isKnown())
+ return TryResult();
+ return TryResult(R1.isTrue() && R2.isTrue());
+}
+
+class reverse_children {
+ llvm::SmallVector<Stmt *, 12> childrenBuf;
+ ArrayRef<Stmt*> children;
+public:
+ reverse_children(Stmt *S);
+
+ typedef ArrayRef<Stmt*>::reverse_iterator iterator;
+ iterator begin() const { return children.rbegin(); }
+ iterator end() const { return children.rend(); }
+};
+
+
+reverse_children::reverse_children(Stmt *S) {
+ if (CallExpr *CE = dyn_cast<CallExpr>(S)) {
+ children = CE->getRawSubExprs();
+ return;
+ }
+ switch (S->getStmtClass()) {
+ // Note: Fill in this switch with more cases we want to optimize.
+ case Stmt::InitListExprClass: {
+ InitListExpr *IE = cast<InitListExpr>(S);
+ children = llvm::makeArrayRef(reinterpret_cast<Stmt**>(IE->getInits()),
+ IE->getNumInits());
+ return;
+ }
+ default:
+ break;
+ }
+
+ // Default case for all other statements.
+ for (Stmt *SubStmt : S->children())
+ childrenBuf.push_back(SubStmt);
+
+ // This needs to be done *after* childrenBuf has been populated.
+ children = childrenBuf;
+}
+
+/// CFGBuilder - This class implements CFG construction from an AST.
+/// The builder is stateful: an instance of the builder should be used to only
+/// construct a single CFG.
+///
+/// Example usage:
+///
+/// CFGBuilder builder;
+/// std::unique_ptr<CFG> cfg = builder.buildCFG(decl, stmt1);
+///
+/// CFG construction is done via a recursive walk of an AST. We actually parse
+/// the AST in reverse order so that the successor of a basic block is
+/// constructed prior to its predecessor. This allows us to nicely capture
+/// implicit fall-throughs without extra basic blocks.
+///
+class CFGBuilder {
+ typedef BlockScopePosPair JumpTarget;
+ typedef BlockScopePosPair JumpSource;
+
+ ASTContext *Context;
+ std::unique_ptr<CFG> cfg;
+
+ CFGBlock *Block;
+ CFGBlock *Succ;
+ JumpTarget ContinueJumpTarget;
+ JumpTarget BreakJumpTarget;
+ CFGBlock *SwitchTerminatedBlock;
+ CFGBlock *DefaultCaseBlock;
+ CFGBlock *TryTerminatedBlock;
+
+ // Current position in local scope.
+ LocalScope::const_iterator ScopePos;
+
+ // LabelMap records the mapping from Label expressions to their jump targets.
+ typedef llvm::DenseMap<LabelDecl*, JumpTarget> LabelMapTy;
+ LabelMapTy LabelMap;
+
+ // A list of blocks that end with a "goto" that must be backpatched to their
+ // resolved targets upon completion of CFG construction.
+ typedef std::vector<JumpSource> BackpatchBlocksTy;
+ BackpatchBlocksTy BackpatchBlocks;
+
+ // A list of labels whose address has been taken (for indirect gotos).
+ typedef llvm::SmallPtrSet<LabelDecl*, 5> LabelSetTy;
+ LabelSetTy AddressTakenLabels;
+
+ bool badCFG;
+ const CFG::BuildOptions &BuildOpts;
+
+ // State to track for building switch statements.
+ bool switchExclusivelyCovered;
+ Expr::EvalResult *switchCond;
+
+ CFG::BuildOptions::ForcedBlkExprs::value_type *cachedEntry;
+ const Stmt *lastLookup;
+
+ // Caches boolean evaluations of expressions to avoid multiple re-evaluations
+ // during construction of branches for chained logical operators.
+ typedef llvm::DenseMap<Expr *, TryResult> CachedBoolEvalsTy;
+ CachedBoolEvalsTy CachedBoolEvals;
+
+public:
+ explicit CFGBuilder(ASTContext *astContext,
+ const CFG::BuildOptions &buildOpts)
+ : Context(astContext), cfg(new CFG()), // crew a new CFG
+ Block(nullptr), Succ(nullptr),
+ SwitchTerminatedBlock(nullptr), DefaultCaseBlock(nullptr),
+ TryTerminatedBlock(nullptr), badCFG(false), BuildOpts(buildOpts),
+ switchExclusivelyCovered(false), switchCond(nullptr),
+ cachedEntry(nullptr), lastLookup(nullptr) {}
+
+ // buildCFG - Used by external clients to construct the CFG.
+ std::unique_ptr<CFG> buildCFG(const Decl *D, Stmt *Statement);
+
+ bool alwaysAdd(const Stmt *stmt);
+
+private:
+ // Visitors to walk an AST and construct the CFG.
+ CFGBlock *VisitAddrLabelExpr(AddrLabelExpr *A, AddStmtChoice asc);
+ CFGBlock *VisitBinaryOperator(BinaryOperator *B, AddStmtChoice asc);
+ CFGBlock *VisitBreakStmt(BreakStmt *B);
+ CFGBlock *VisitCallExpr(CallExpr *C, AddStmtChoice asc);
+ CFGBlock *VisitCaseStmt(CaseStmt *C);
+ CFGBlock *VisitChooseExpr(ChooseExpr *C, AddStmtChoice asc);
+ CFGBlock *VisitCompoundStmt(CompoundStmt *C);
+ CFGBlock *VisitConditionalOperator(AbstractConditionalOperator *C,
+ AddStmtChoice asc);
+ CFGBlock *VisitContinueStmt(ContinueStmt *C);
+ CFGBlock *VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E,
+ AddStmtChoice asc);
+ CFGBlock *VisitCXXCatchStmt(CXXCatchStmt *S);
+ CFGBlock *VisitCXXConstructExpr(CXXConstructExpr *C, AddStmtChoice asc);
+ CFGBlock *VisitCXXNewExpr(CXXNewExpr *DE, AddStmtChoice asc);
+ CFGBlock *VisitCXXDeleteExpr(CXXDeleteExpr *DE, AddStmtChoice asc);
+ CFGBlock *VisitCXXForRangeStmt(CXXForRangeStmt *S);
+ CFGBlock *VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E,
+ AddStmtChoice asc);
+ CFGBlock *VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *C,
+ AddStmtChoice asc);
+ CFGBlock *VisitCXXThrowExpr(CXXThrowExpr *T);
+ CFGBlock *VisitCXXTryStmt(CXXTryStmt *S);
+ CFGBlock *VisitDeclStmt(DeclStmt *DS);
+ CFGBlock *VisitDeclSubExpr(DeclStmt *DS);
+ CFGBlock *VisitDefaultStmt(DefaultStmt *D);
+ CFGBlock *VisitDoStmt(DoStmt *D);
+ CFGBlock *VisitExprWithCleanups(ExprWithCleanups *E, AddStmtChoice asc);
+ CFGBlock *VisitForStmt(ForStmt *F);
+ CFGBlock *VisitGotoStmt(GotoStmt *G);
+ CFGBlock *VisitIfStmt(IfStmt *I);
+ CFGBlock *VisitImplicitCastExpr(ImplicitCastExpr *E, AddStmtChoice asc);
+ CFGBlock *VisitIndirectGotoStmt(IndirectGotoStmt *I);
+ CFGBlock *VisitLabelStmt(LabelStmt *L);
+ CFGBlock *VisitBlockExpr(BlockExpr *E, AddStmtChoice asc);
+ CFGBlock *VisitLambdaExpr(LambdaExpr *E, AddStmtChoice asc);
+ CFGBlock *VisitLogicalOperator(BinaryOperator *B);
+ std::pair<CFGBlock *, CFGBlock *> VisitLogicalOperator(BinaryOperator *B,
+ Stmt *Term,
+ CFGBlock *TrueBlock,
+ CFGBlock *FalseBlock);
+ CFGBlock *VisitMemberExpr(MemberExpr *M, AddStmtChoice asc);
+ CFGBlock *VisitObjCAtCatchStmt(ObjCAtCatchStmt *S);
+ CFGBlock *VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S);
+ CFGBlock *VisitObjCAtThrowStmt(ObjCAtThrowStmt *S);
+ CFGBlock *VisitObjCAtTryStmt(ObjCAtTryStmt *S);
+ CFGBlock *VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S);
+ CFGBlock *VisitObjCForCollectionStmt(ObjCForCollectionStmt *S);
+ CFGBlock *VisitPseudoObjectExpr(PseudoObjectExpr *E);
+ CFGBlock *VisitReturnStmt(ReturnStmt *R);
+ CFGBlock *VisitStmtExpr(StmtExpr *S, AddStmtChoice asc);
+ CFGBlock *VisitSwitchStmt(SwitchStmt *S);
+ CFGBlock *VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E,
+ AddStmtChoice asc);
+ CFGBlock *VisitUnaryOperator(UnaryOperator *U, AddStmtChoice asc);
+ CFGBlock *VisitWhileStmt(WhileStmt *W);
+
+ CFGBlock *Visit(Stmt *S, AddStmtChoice asc = AddStmtChoice::NotAlwaysAdd);
+ CFGBlock *VisitStmt(Stmt *S, AddStmtChoice asc);
+ CFGBlock *VisitChildren(Stmt *S);
+ CFGBlock *VisitNoRecurse(Expr *E, AddStmtChoice asc);
+
+ /// When creating the CFG for temporary destructors, we want to mirror the
+ /// branch structure of the corresponding constructor calls.
+ /// Thus, while visiting a statement for temporary destructors, we keep a
+ /// context to keep track of the following information:
+ /// - whether a subexpression is executed unconditionally
+ /// - if a subexpression is executed conditionally, the first
+ /// CXXBindTemporaryExpr we encounter in that subexpression (which
+ /// corresponds to the last temporary destructor we have to call for this
+ /// subexpression) and the CFG block at that point (which will become the
+ /// successor block when inserting the decision point).
+ ///
+ /// That way, we can build the branch structure for temporary destructors as
+ /// follows:
+ /// 1. If a subexpression is executed unconditionally, we add the temporary
+ /// destructor calls to the current block.
+ /// 2. If a subexpression is executed conditionally, when we encounter a
+ /// CXXBindTemporaryExpr:
+ /// a) If it is the first temporary destructor call in the subexpression,
+ /// we remember the CXXBindTemporaryExpr and the current block in the
+ /// TempDtorContext; we start a new block, and insert the temporary
+ /// destructor call.
+ /// b) Otherwise, add the temporary destructor call to the current block.
+ /// 3. When we finished visiting a conditionally executed subexpression,
+ /// and we found at least one temporary constructor during the visitation
+ /// (2.a has executed), we insert a decision block that uses the
+ /// CXXBindTemporaryExpr as terminator, and branches to the current block
+ /// if the CXXBindTemporaryExpr was marked executed, and otherwise
+ /// branches to the stored successor.
+ struct TempDtorContext {
+ TempDtorContext()
+ : IsConditional(false), KnownExecuted(true), Succ(nullptr),
+ TerminatorExpr(nullptr) {}
+
+ TempDtorContext(TryResult KnownExecuted)
+ : IsConditional(true), KnownExecuted(KnownExecuted), Succ(nullptr),
+ TerminatorExpr(nullptr) {}
+
+ /// Returns whether we need to start a new branch for a temporary destructor
+ /// call. This is the case when the temporary destructor is
+ /// conditionally executed, and it is the first one we encounter while
+ /// visiting a subexpression - other temporary destructors at the same level
+ /// will be added to the same block and are executed under the same
+ /// condition.
+ bool needsTempDtorBranch() const {
+ return IsConditional && !TerminatorExpr;
+ }
+
+ /// Remember the successor S of a temporary destructor decision branch for
+ /// the corresponding CXXBindTemporaryExpr E.
+ void setDecisionPoint(CFGBlock *S, CXXBindTemporaryExpr *E) {
+ Succ = S;
+ TerminatorExpr = E;
+ }
+
+ const bool IsConditional;
+ const TryResult KnownExecuted;
+ CFGBlock *Succ;
+ CXXBindTemporaryExpr *TerminatorExpr;
+ };
+
+ // Visitors to walk an AST and generate destructors of temporaries in
+ // full expression.
+ CFGBlock *VisitForTemporaryDtors(Stmt *E, bool BindToTemporary,
+ TempDtorContext &Context);
+ CFGBlock *VisitChildrenForTemporaryDtors(Stmt *E, TempDtorContext &Context);
+ CFGBlock *VisitBinaryOperatorForTemporaryDtors(BinaryOperator *E,
+ TempDtorContext &Context);
+ CFGBlock *VisitCXXBindTemporaryExprForTemporaryDtors(
+ CXXBindTemporaryExpr *E, bool BindToTemporary, TempDtorContext &Context);
+ CFGBlock *VisitConditionalOperatorForTemporaryDtors(
+ AbstractConditionalOperator *E, bool BindToTemporary,
+ TempDtorContext &Context);
+ void InsertTempDtorDecisionBlock(const TempDtorContext &Context,
+ CFGBlock *FalseSucc = nullptr);
+
+ // NYS == Not Yet Supported
+ CFGBlock *NYS() {
+ badCFG = true;
+ return Block;
+ }
+
+ void autoCreateBlock() { if (!Block) Block = createBlock(); }
+ CFGBlock *createBlock(bool add_successor = true);
+ CFGBlock *createNoReturnBlock();
+
+ CFGBlock *addStmt(Stmt *S) {
+ return Visit(S, AddStmtChoice::AlwaysAdd);
+ }
+ CFGBlock *addInitializer(CXXCtorInitializer *I);
+ void addAutomaticObjDtors(LocalScope::const_iterator B,
+ LocalScope::const_iterator E, Stmt *S);
+ void addImplicitDtorsForDestructor(const CXXDestructorDecl *DD);
+
+ // Local scopes creation.
+ LocalScope* createOrReuseLocalScope(LocalScope* Scope);
+
+ void addLocalScopeForStmt(Stmt *S);
+ LocalScope* addLocalScopeForDeclStmt(DeclStmt *DS,
+ LocalScope* Scope = nullptr);
+ LocalScope* addLocalScopeForVarDecl(VarDecl *VD, LocalScope* Scope = nullptr);
+
+ void addLocalScopeAndDtors(Stmt *S);
+
+ // Interface to CFGBlock - adding CFGElements.
+ void appendStmt(CFGBlock *B, const Stmt *S) {
+ if (alwaysAdd(S) && cachedEntry)
+ cachedEntry->second = B;
+
+ // All block-level expressions should have already been IgnoreParens()ed.
+ assert(!isa<Expr>(S) || cast<Expr>(S)->IgnoreParens() == S);
+ B->appendStmt(const_cast<Stmt*>(S), cfg->getBumpVectorContext());
+ }
+ void appendInitializer(CFGBlock *B, CXXCtorInitializer *I) {
+ B->appendInitializer(I, cfg->getBumpVectorContext());
+ }
+ void appendNewAllocator(CFGBlock *B, CXXNewExpr *NE) {
+ B->appendNewAllocator(NE, cfg->getBumpVectorContext());
+ }
+ void appendBaseDtor(CFGBlock *B, const CXXBaseSpecifier *BS) {
+ B->appendBaseDtor(BS, cfg->getBumpVectorContext());
+ }
+ void appendMemberDtor(CFGBlock *B, FieldDecl *FD) {
+ B->appendMemberDtor(FD, cfg->getBumpVectorContext());
+ }
+ void appendTemporaryDtor(CFGBlock *B, CXXBindTemporaryExpr *E) {
+ B->appendTemporaryDtor(E, cfg->getBumpVectorContext());
+ }
+ void appendAutomaticObjDtor(CFGBlock *B, VarDecl *VD, Stmt *S) {
+ B->appendAutomaticObjDtor(VD, S, cfg->getBumpVectorContext());
+ }
+
+ void appendDeleteDtor(CFGBlock *B, CXXRecordDecl *RD, CXXDeleteExpr *DE) {
+ B->appendDeleteDtor(RD, DE, cfg->getBumpVectorContext());
+ }
+
+ void prependAutomaticObjDtorsWithTerminator(CFGBlock *Blk,
+ LocalScope::const_iterator B, LocalScope::const_iterator E);
+
+ void addSuccessor(CFGBlock *B, CFGBlock *S, bool IsReachable = true) {
+ B->addSuccessor(CFGBlock::AdjacentBlock(S, IsReachable),
+ cfg->getBumpVectorContext());
+ }
+
+ /// Add a reachable successor to a block, with the alternate variant that is
+ /// unreachable.
+ void addSuccessor(CFGBlock *B, CFGBlock *ReachableBlock, CFGBlock *AltBlock) {
+ B->addSuccessor(CFGBlock::AdjacentBlock(ReachableBlock, AltBlock),
+ cfg->getBumpVectorContext());
+ }
+
+ /// \brief Find a relational comparison with an expression evaluating to a
+ /// boolean and a constant other than 0 and 1.
+ /// e.g. if ((x < y) == 10)
+ TryResult checkIncorrectRelationalOperator(const BinaryOperator *B) {
+ const Expr *LHSExpr = B->getLHS()->IgnoreParens();
+ const Expr *RHSExpr = B->getRHS()->IgnoreParens();
+
+ const IntegerLiteral *IntLiteral = dyn_cast<IntegerLiteral>(LHSExpr);
+ const Expr *BoolExpr = RHSExpr;
+ bool IntFirst = true;
+ if (!IntLiteral) {
+ IntLiteral = dyn_cast<IntegerLiteral>(RHSExpr);
+ BoolExpr = LHSExpr;
+ IntFirst = false;
+ }
+
+ if (!IntLiteral || !BoolExpr->isKnownToHaveBooleanValue())
+ return TryResult();
+
+ llvm::APInt IntValue = IntLiteral->getValue();
+ if ((IntValue == 1) || (IntValue == 0))
+ return TryResult();
+
+ bool IntLarger = IntLiteral->getType()->isUnsignedIntegerType() ||
+ !IntValue.isNegative();
+
+ BinaryOperatorKind Bok = B->getOpcode();
+ if (Bok == BO_GT || Bok == BO_GE) {
+ // Always true for 10 > bool and bool > -1
+ // Always false for -1 > bool and bool > 10
+ return TryResult(IntFirst == IntLarger);
+ } else {
+ // Always true for -1 < bool and bool < 10
+ // Always false for 10 < bool and bool < -1
+ return TryResult(IntFirst != IntLarger);
+ }
+ }
+
+ /// Find an incorrect equality comparison. Either with an expression
+ /// evaluating to a boolean and a constant other than 0 and 1.
+ /// e.g. if (!x == 10) or a bitwise and/or operation that always evaluates to
+ /// true/false e.q. (x & 8) == 4.
+ TryResult checkIncorrectEqualityOperator(const BinaryOperator *B) {
+ const Expr *LHSExpr = B->getLHS()->IgnoreParens();
+ const Expr *RHSExpr = B->getRHS()->IgnoreParens();
+
+ const IntegerLiteral *IntLiteral = dyn_cast<IntegerLiteral>(LHSExpr);
+ const Expr *BoolExpr = RHSExpr;
+
+ if (!IntLiteral) {
+ IntLiteral = dyn_cast<IntegerLiteral>(RHSExpr);
+ BoolExpr = LHSExpr;
+ }
+
+ if (!IntLiteral)
+ return TryResult();
+
+ const BinaryOperator *BitOp = dyn_cast<BinaryOperator>(BoolExpr);
+ if (BitOp && (BitOp->getOpcode() == BO_And ||
+ BitOp->getOpcode() == BO_Or)) {
+ const Expr *LHSExpr2 = BitOp->getLHS()->IgnoreParens();
+ const Expr *RHSExpr2 = BitOp->getRHS()->IgnoreParens();
+
+ const IntegerLiteral *IntLiteral2 = dyn_cast<IntegerLiteral>(LHSExpr2);
+
+ if (!IntLiteral2)
+ IntLiteral2 = dyn_cast<IntegerLiteral>(RHSExpr2);
+
+ if (!IntLiteral2)
+ return TryResult();
+
+ llvm::APInt L1 = IntLiteral->getValue();
+ llvm::APInt L2 = IntLiteral2->getValue();
+ if ((BitOp->getOpcode() == BO_And && (L2 & L1) != L1) ||
+ (BitOp->getOpcode() == BO_Or && (L2 | L1) != L1)) {
+ if (BuildOpts.Observer)
+ BuildOpts.Observer->compareBitwiseEquality(B,
+ B->getOpcode() != BO_EQ);
+ TryResult(B->getOpcode() != BO_EQ);
+ }
+ } else if (BoolExpr->isKnownToHaveBooleanValue()) {
+ llvm::APInt IntValue = IntLiteral->getValue();
+ if ((IntValue == 1) || (IntValue == 0)) {
+ return TryResult();
+ }
+ return TryResult(B->getOpcode() != BO_EQ);
+ }
+
+ return TryResult();
+ }
+
+ TryResult analyzeLogicOperatorCondition(BinaryOperatorKind Relation,
+ const llvm::APSInt &Value1,
+ const llvm::APSInt &Value2) {
+ assert(Value1.isSigned() == Value2.isSigned());
+ switch (Relation) {
+ default:
+ return TryResult();
+ case BO_EQ:
+ return TryResult(Value1 == Value2);
+ case BO_NE:
+ return TryResult(Value1 != Value2);
+ case BO_LT:
+ return TryResult(Value1 < Value2);
+ case BO_LE:
+ return TryResult(Value1 <= Value2);
+ case BO_GT:
+ return TryResult(Value1 > Value2);
+ case BO_GE:
+ return TryResult(Value1 >= Value2);
+ }
+ }
+
+ /// \brief Find a pair of comparison expressions with or without parentheses
+ /// with a shared variable and constants and a logical operator between them
+ /// that always evaluates to either true or false.
+ /// e.g. if (x != 3 || x != 4)
+ TryResult checkIncorrectLogicOperator(const BinaryOperator *B) {
+ assert(B->isLogicalOp());
+ const BinaryOperator *LHS =
+ dyn_cast<BinaryOperator>(B->getLHS()->IgnoreParens());
+ const BinaryOperator *RHS =
+ dyn_cast<BinaryOperator>(B->getRHS()->IgnoreParens());
+ if (!LHS || !RHS)
+ return TryResult();
+
+ if (!LHS->isComparisonOp() || !RHS->isComparisonOp())
+ return TryResult();
+
+ const DeclRefExpr *Decl1;
+ const Expr *Expr1;
+ BinaryOperatorKind BO1;
+ std::tie(Decl1, BO1, Expr1) = tryNormalizeBinaryOperator(LHS);
+
+ if (!Decl1 || !Expr1)
+ return TryResult();
+
+ const DeclRefExpr *Decl2;
+ const Expr *Expr2;
+ BinaryOperatorKind BO2;
+ std::tie(Decl2, BO2, Expr2) = tryNormalizeBinaryOperator(RHS);
+
+ if (!Decl2 || !Expr2)
+ return TryResult();
+
+ // Check that it is the same variable on both sides.
+ if (Decl1->getDecl() != Decl2->getDecl())
+ return TryResult();
+
+ // Make sure the user's intent is clear (e.g. they're comparing against two
+ // int literals, or two things from the same enum)
+ if (!areExprTypesCompatible(Expr1, Expr2))
+ return TryResult();
+
+ llvm::APSInt L1, L2;
+
+ if (!Expr1->EvaluateAsInt(L1, *Context) ||
+ !Expr2->EvaluateAsInt(L2, *Context))
+ return TryResult();
+
+ // Can't compare signed with unsigned or with different bit width.
+ if (L1.isSigned() != L2.isSigned() || L1.getBitWidth() != L2.getBitWidth())
+ return TryResult();
+
+ // Values that will be used to determine if result of logical
+ // operator is always true/false
+ const llvm::APSInt Values[] = {
+ // Value less than both Value1 and Value2
+ llvm::APSInt::getMinValue(L1.getBitWidth(), L1.isUnsigned()),
+ // L1
+ L1,
+ // Value between Value1 and Value2
+ ((L1 < L2) ? L1 : L2) + llvm::APSInt(llvm::APInt(L1.getBitWidth(), 1),
+ L1.isUnsigned()),
+ // L2
+ L2,
+ // Value greater than both Value1 and Value2
+ llvm::APSInt::getMaxValue(L1.getBitWidth(), L1.isUnsigned()),
+ };
+
+ // Check whether expression is always true/false by evaluating the following
+ // * variable x is less than the smallest literal.
+ // * variable x is equal to the smallest literal.
+ // * Variable x is between smallest and largest literal.
+ // * Variable x is equal to the largest literal.
+ // * Variable x is greater than largest literal.
+ bool AlwaysTrue = true, AlwaysFalse = true;
+ for (llvm::APSInt Value : Values) {
+ TryResult Res1, Res2;
+ Res1 = analyzeLogicOperatorCondition(BO1, Value, L1);
+ Res2 = analyzeLogicOperatorCondition(BO2, Value, L2);
+
+ if (!Res1.isKnown() || !Res2.isKnown())
+ return TryResult();
+
+ if (B->getOpcode() == BO_LAnd) {
+ AlwaysTrue &= (Res1.isTrue() && Res2.isTrue());
+ AlwaysFalse &= !(Res1.isTrue() && Res2.isTrue());
+ } else {
+ AlwaysTrue &= (Res1.isTrue() || Res2.isTrue());
+ AlwaysFalse &= !(Res1.isTrue() || Res2.isTrue());
+ }
+ }
+
+ if (AlwaysTrue || AlwaysFalse) {
+ if (BuildOpts.Observer)
+ BuildOpts.Observer->compareAlwaysTrue(B, AlwaysTrue);
+ return TryResult(AlwaysTrue);
+ }
+ return TryResult();
+ }
+
+ /// Try and evaluate an expression to an integer constant.
+ bool tryEvaluate(Expr *S, Expr::EvalResult &outResult) {
+ if (!BuildOpts.PruneTriviallyFalseEdges)
+ return false;
+ return !S->isTypeDependent() &&
+ !S->isValueDependent() &&
+ S->EvaluateAsRValue(outResult, *Context);
+ }
+
+ /// tryEvaluateBool - Try and evaluate the Stmt and return 0 or 1
+ /// if we can evaluate to a known value, otherwise return -1.
+ TryResult tryEvaluateBool(Expr *S) {
+ if (!BuildOpts.PruneTriviallyFalseEdges ||
+ S->isTypeDependent() || S->isValueDependent())
+ return TryResult();
+
+ if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(S)) {
+ if (Bop->isLogicalOp()) {
+ // Check the cache first.
+ CachedBoolEvalsTy::iterator I = CachedBoolEvals.find(S);
+ if (I != CachedBoolEvals.end())
+ return I->second; // already in map;
+
+ // Retrieve result at first, or the map might be updated.
+ TryResult Result = evaluateAsBooleanConditionNoCache(S);
+ CachedBoolEvals[S] = Result; // update or insert
+ return Result;
+ }
+ else {
+ switch (Bop->getOpcode()) {
+ default: break;
+ // For 'x & 0' and 'x * 0', we can determine that
+ // the value is always false.
+ case BO_Mul:
+ case BO_And: {
+ // If either operand is zero, we know the value
+ // must be false.
+ llvm::APSInt IntVal;
+ if (Bop->getLHS()->EvaluateAsInt(IntVal, *Context)) {
+ if (!IntVal.getBoolValue()) {
+ return TryResult(false);
+ }
+ }
+ if (Bop->getRHS()->EvaluateAsInt(IntVal, *Context)) {
+ if (!IntVal.getBoolValue()) {
+ return TryResult(false);
+ }
+ }
+ }
+ break;
+ }
+ }
+ }
+
+ return evaluateAsBooleanConditionNoCache(S);
+ }
+
+ /// \brief Evaluate as boolean \param E without using the cache.
+ TryResult evaluateAsBooleanConditionNoCache(Expr *E) {
+ if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(E)) {
+ if (Bop->isLogicalOp()) {
+ TryResult LHS = tryEvaluateBool(Bop->getLHS());
+ if (LHS.isKnown()) {
+ // We were able to evaluate the LHS, see if we can get away with not
+ // evaluating the RHS: 0 && X -> 0, 1 || X -> 1
+ if (LHS.isTrue() == (Bop->getOpcode() == BO_LOr))
+ return LHS.isTrue();
+
+ TryResult RHS = tryEvaluateBool(Bop->getRHS());
+ if (RHS.isKnown()) {
+ if (Bop->getOpcode() == BO_LOr)
+ return LHS.isTrue() || RHS.isTrue();
+ else
+ return LHS.isTrue() && RHS.isTrue();
+ }
+ } else {
+ TryResult RHS = tryEvaluateBool(Bop->getRHS());
+ if (RHS.isKnown()) {
+ // We can't evaluate the LHS; however, sometimes the result
+ // is determined by the RHS: X && 0 -> 0, X || 1 -> 1.
+ if (RHS.isTrue() == (Bop->getOpcode() == BO_LOr))
+ return RHS.isTrue();
+ } else {
+ TryResult BopRes = checkIncorrectLogicOperator(Bop);
+ if (BopRes.isKnown())
+ return BopRes.isTrue();
+ }
+ }
+
+ return TryResult();
+ } else if (Bop->isEqualityOp()) {
+ TryResult BopRes = checkIncorrectEqualityOperator(Bop);
+ if (BopRes.isKnown())
+ return BopRes.isTrue();
+ } else if (Bop->isRelationalOp()) {
+ TryResult BopRes = checkIncorrectRelationalOperator(Bop);
+ if (BopRes.isKnown())
+ return BopRes.isTrue();
+ }
+ }
+
+ bool Result;
+ if (E->EvaluateAsBooleanCondition(Result, *Context))
+ return Result;
+
+ return TryResult();
+ }
+
+};
+
+inline bool AddStmtChoice::alwaysAdd(CFGBuilder &builder,
+ const Stmt *stmt) const {
+ return builder.alwaysAdd(stmt) || kind == AlwaysAdd;
+}
+
+bool CFGBuilder::alwaysAdd(const Stmt *stmt) {
+ bool shouldAdd = BuildOpts.alwaysAdd(stmt);
+
+ if (!BuildOpts.forcedBlkExprs)
+ return shouldAdd;
+
+ if (lastLookup == stmt) {
+ if (cachedEntry) {
+ assert(cachedEntry->first == stmt);
+ return true;
+ }
+ return shouldAdd;
+ }
+
+ lastLookup = stmt;
+
+ // Perform the lookup!
+ CFG::BuildOptions::ForcedBlkExprs *fb = *BuildOpts.forcedBlkExprs;
+
+ if (!fb) {
+ // No need to update 'cachedEntry', since it will always be null.
+ assert(!cachedEntry);
+ return shouldAdd;
+ }
+
+ CFG::BuildOptions::ForcedBlkExprs::iterator itr = fb->find(stmt);
+ if (itr == fb->end()) {
+ cachedEntry = nullptr;
+ return shouldAdd;
+ }
+
+ cachedEntry = &*itr;
+ return true;
+}
+
+// FIXME: Add support for dependent-sized array types in C++?
+// Does it even make sense to build a CFG for an uninstantiated template?
+static const VariableArrayType *FindVA(const Type *t) {
+ while (const ArrayType *vt = dyn_cast<ArrayType>(t)) {
+ if (const VariableArrayType *vat = dyn_cast<VariableArrayType>(vt))
+ if (vat->getSizeExpr())
+ return vat;
+
+ t = vt->getElementType().getTypePtr();
+ }
+
+ return nullptr;
+}
+
+/// BuildCFG - Constructs a CFG from an AST (a Stmt*). The AST can represent an
+/// arbitrary statement. Examples include a single expression or a function
+/// body (compound statement). The ownership of the returned CFG is
+/// transferred to the caller. If CFG construction fails, this method returns
+/// NULL.
+std::unique_ptr<CFG> CFGBuilder::buildCFG(const Decl *D, Stmt *Statement) {
+ assert(cfg.get());
+ if (!Statement)
+ return nullptr;
+
+ // Create an empty block that will serve as the exit block for the CFG. Since
+ // this is the first block added to the CFG, it will be implicitly registered
+ // as the exit block.
+ Succ = createBlock();
+ assert(Succ == &cfg->getExit());
+ Block = nullptr; // the EXIT block is empty. Create all other blocks lazily.
+
+ if (BuildOpts.AddImplicitDtors)
+ if (const CXXDestructorDecl *DD = dyn_cast_or_null<CXXDestructorDecl>(D))
+ addImplicitDtorsForDestructor(DD);
+
+ // Visit the statements and create the CFG.
+ CFGBlock *B = addStmt(Statement);
+
+ if (badCFG)
+ return nullptr;
+
+ // For C++ constructor add initializers to CFG.
+ if (const CXXConstructorDecl *CD = dyn_cast_or_null<CXXConstructorDecl>(D)) {
+ for (auto *I : llvm::reverse(CD->inits())) {
+ B = addInitializer(I);
+ if (badCFG)
+ return nullptr;
+ }
+ }
+
+ if (B)
+ Succ = B;
+
+ // Backpatch the gotos whose label -> block mappings we didn't know when we
+ // encountered them.
+ for (BackpatchBlocksTy::iterator I = BackpatchBlocks.begin(),
+ E = BackpatchBlocks.end(); I != E; ++I ) {
+
+ CFGBlock *B = I->block;
+ const GotoStmt *G = cast<GotoStmt>(B->getTerminator());
+ LabelMapTy::iterator LI = LabelMap.find(G->getLabel());
+
+ // If there is no target for the goto, then we are looking at an
+ // incomplete AST. Handle this by not registering a successor.
+ if (LI == LabelMap.end()) continue;
+
+ JumpTarget JT = LI->second;
+ prependAutomaticObjDtorsWithTerminator(B, I->scopePosition,
+ JT.scopePosition);
+ addSuccessor(B, JT.block);
+ }
+
+ // Add successors to the Indirect Goto Dispatch block (if we have one).
+ if (CFGBlock *B = cfg->getIndirectGotoBlock())
+ for (LabelSetTy::iterator I = AddressTakenLabels.begin(),
+ E = AddressTakenLabels.end(); I != E; ++I ) {
+
+ // Lookup the target block.
+ LabelMapTy::iterator LI = LabelMap.find(*I);
+
+ // If there is no target block that contains label, then we are looking
+ // at an incomplete AST. Handle this by not registering a successor.
+ if (LI == LabelMap.end()) continue;
+
+ addSuccessor(B, LI->second.block);
+ }
+
+ // Create an empty entry block that has no predecessors.
+ cfg->setEntry(createBlock());
+
+ return std::move(cfg);
+}
+
+/// createBlock - Used to lazily create blocks that are connected
+/// to the current (global) succcessor.
+CFGBlock *CFGBuilder::createBlock(bool add_successor) {
+ CFGBlock *B = cfg->createBlock();
+ if (add_successor && Succ)
+ addSuccessor(B, Succ);
+ return B;
+}
+
+/// createNoReturnBlock - Used to create a block is a 'noreturn' point in the
+/// CFG. It is *not* connected to the current (global) successor, and instead
+/// directly tied to the exit block in order to be reachable.
+CFGBlock *CFGBuilder::createNoReturnBlock() {
+ CFGBlock *B = createBlock(false);
+ B->setHasNoReturnElement();
+ addSuccessor(B, &cfg->getExit(), Succ);
+ return B;
+}
+
+/// addInitializer - Add C++ base or member initializer element to CFG.
+CFGBlock *CFGBuilder::addInitializer(CXXCtorInitializer *I) {
+ if (!BuildOpts.AddInitializers)
+ return Block;
+
+ bool HasTemporaries = false;
+
+ // Destructors of temporaries in initialization expression should be called
+ // after initialization finishes.
+ Expr *Init = I->getInit();
+ if (Init) {
+ HasTemporaries = isa<ExprWithCleanups>(Init);
+
+ if (BuildOpts.AddTemporaryDtors && HasTemporaries) {
+ // Generate destructors for temporaries in initialization expression.
+ TempDtorContext Context;
+ VisitForTemporaryDtors(cast<ExprWithCleanups>(Init)->getSubExpr(),
+ /*BindToTemporary=*/false, Context);
+ }
+ }
+
+ autoCreateBlock();
+ appendInitializer(Block, I);
+
+ if (Init) {
+ if (HasTemporaries) {
+ // For expression with temporaries go directly to subexpression to omit
+ // generating destructors for the second time.
+ return Visit(cast<ExprWithCleanups>(Init)->getSubExpr());
+ }
+ if (BuildOpts.AddCXXDefaultInitExprInCtors) {
+ if (CXXDefaultInitExpr *Default = dyn_cast<CXXDefaultInitExpr>(Init)) {
+ // In general, appending the expression wrapped by a CXXDefaultInitExpr
+ // may cause the same Expr to appear more than once in the CFG. Doing it
+ // here is safe because there's only one initializer per field.
+ autoCreateBlock();
+ appendStmt(Block, Default);
+ if (Stmt *Child = Default->getExpr())
+ if (CFGBlock *R = Visit(Child))
+ Block = R;
+ return Block;
+ }
+ }
+ return Visit(Init);
+ }
+
+ return Block;
+}
+
+/// \brief Retrieve the type of the temporary object whose lifetime was
+/// extended by a local reference with the given initializer.
+static QualType getReferenceInitTemporaryType(ASTContext &Context,
+ const Expr *Init) {
+ while (true) {
+ // Skip parentheses.
+ Init = Init->IgnoreParens();
+
+ // Skip through cleanups.
+ if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Init)) {
+ Init = EWC->getSubExpr();
+ continue;
+ }
+
+ // Skip through the temporary-materialization expression.
+ if (const MaterializeTemporaryExpr *MTE
+ = dyn_cast<MaterializeTemporaryExpr>(Init)) {
+ Init = MTE->GetTemporaryExpr();
+ continue;
+ }
+
+ // Skip derived-to-base and no-op casts.
+ if (const CastExpr *CE = dyn_cast<CastExpr>(Init)) {
+ if ((CE->getCastKind() == CK_DerivedToBase ||
+ CE->getCastKind() == CK_UncheckedDerivedToBase ||
+ CE->getCastKind() == CK_NoOp) &&
+ Init->getType()->isRecordType()) {
+ Init = CE->getSubExpr();
+ continue;
+ }
+ }
+
+ // Skip member accesses into rvalues.
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(Init)) {
+ if (!ME->isArrow() && ME->getBase()->isRValue()) {
+ Init = ME->getBase();
+ continue;
+ }
+ }
+
+ break;
+ }
+
+ return Init->getType();
+}
+
+/// addAutomaticObjDtors - Add to current block automatic objects destructors
+/// for objects in range of local scope positions. Use S as trigger statement
+/// for destructors.
+void CFGBuilder::addAutomaticObjDtors(LocalScope::const_iterator B,
+ LocalScope::const_iterator E, Stmt *S) {
+ if (!BuildOpts.AddImplicitDtors)
+ return;
+
+ if (B == E)
+ return;
+
+ // We need to append the destructors in reverse order, but any one of them
+ // may be a no-return destructor which changes the CFG. As a result, buffer
+ // this sequence up and replay them in reverse order when appending onto the
+ // CFGBlock(s).
+ SmallVector<VarDecl*, 10> Decls;
+ Decls.reserve(B.distance(E));
+ for (LocalScope::const_iterator I = B; I != E; ++I)
+ Decls.push_back(*I);
+
+ for (SmallVectorImpl<VarDecl*>::reverse_iterator I = Decls.rbegin(),
+ E = Decls.rend();
+ I != E; ++I) {
+ // If this destructor is marked as a no-return destructor, we need to
+ // create a new block for the destructor which does not have as a successor
+ // anything built thus far: control won't flow out of this block.
+ QualType Ty = (*I)->getType();
+ if (Ty->isReferenceType()) {
+ Ty = getReferenceInitTemporaryType(*Context, (*I)->getInit());
+ }
+ Ty = Context->getBaseElementType(Ty);
+
+ if (Ty->getAsCXXRecordDecl()->isAnyDestructorNoReturn())
+ Block = createNoReturnBlock();
+ else
+ autoCreateBlock();
+
+ appendAutomaticObjDtor(Block, *I, S);
+ }
+}
+
+/// addImplicitDtorsForDestructor - Add implicit destructors generated for
+/// base and member objects in destructor.
+void CFGBuilder::addImplicitDtorsForDestructor(const CXXDestructorDecl *DD) {
+ assert (BuildOpts.AddImplicitDtors
+ && "Can be called only when dtors should be added");
+ const CXXRecordDecl *RD = DD->getParent();
+
+ // At the end destroy virtual base objects.
+ for (const auto &VI : RD->vbases()) {
+ const CXXRecordDecl *CD = VI.getType()->getAsCXXRecordDecl();
+ if (!CD->hasTrivialDestructor()) {
+ autoCreateBlock();
+ appendBaseDtor(Block, &VI);
+ }
+ }
+
+ // Before virtual bases destroy direct base objects.
+ for (const auto &BI : RD->bases()) {
+ if (!BI.isVirtual()) {
+ const CXXRecordDecl *CD = BI.getType()->getAsCXXRecordDecl();
+ if (!CD->hasTrivialDestructor()) {
+ autoCreateBlock();
+ appendBaseDtor(Block, &BI);
+ }
+ }
+ }
+
+ // First destroy member objects.
+ for (auto *FI : RD->fields()) {
+ // Check for constant size array. Set type to array element type.
+ QualType QT = FI->getType();
+ if (const ConstantArrayType *AT = Context->getAsConstantArrayType(QT)) {
+ if (AT->getSize() == 0)
+ continue;
+ QT = AT->getElementType();
+ }
+
+ if (const CXXRecordDecl *CD = QT->getAsCXXRecordDecl())
+ if (!CD->hasTrivialDestructor()) {
+ autoCreateBlock();
+ appendMemberDtor(Block, FI);
+ }
+ }
+}
+
+/// createOrReuseLocalScope - If Scope is NULL create new LocalScope. Either
+/// way return valid LocalScope object.
+LocalScope* CFGBuilder::createOrReuseLocalScope(LocalScope* Scope) {
+ if (Scope)
+ return Scope;
+ llvm::BumpPtrAllocator &alloc = cfg->getAllocator();
+ return new (alloc.Allocate<LocalScope>())
+ LocalScope(BumpVectorContext(alloc), ScopePos);
+}
+
+/// addLocalScopeForStmt - Add LocalScope to local scopes tree for statement
+/// that should create implicit scope (e.g. if/else substatements).
+void CFGBuilder::addLocalScopeForStmt(Stmt *S) {
+ if (!BuildOpts.AddImplicitDtors)
+ return;
+
+ LocalScope *Scope = nullptr;
+
+ // For compound statement we will be creating explicit scope.
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
+ for (auto *BI : CS->body()) {
+ Stmt *SI = BI->stripLabelLikeStatements();
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(SI))
+ Scope = addLocalScopeForDeclStmt(DS, Scope);
+ }
+ return;
+ }
+
+ // For any other statement scope will be implicit and as such will be
+ // interesting only for DeclStmt.
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(S->stripLabelLikeStatements()))
+ addLocalScopeForDeclStmt(DS);
+}
+
+/// addLocalScopeForDeclStmt - Add LocalScope for declaration statement. Will
+/// reuse Scope if not NULL.
+LocalScope* CFGBuilder::addLocalScopeForDeclStmt(DeclStmt *DS,
+ LocalScope* Scope) {
+ if (!BuildOpts.AddImplicitDtors)
+ return Scope;
+
+ for (auto *DI : DS->decls())
+ if (VarDecl *VD = dyn_cast<VarDecl>(DI))
+ Scope = addLocalScopeForVarDecl(VD, Scope);
+ return Scope;
+}
+
+/// addLocalScopeForVarDecl - Add LocalScope for variable declaration. It will
+/// create add scope for automatic objects and temporary objects bound to
+/// const reference. Will reuse Scope if not NULL.
+LocalScope* CFGBuilder::addLocalScopeForVarDecl(VarDecl *VD,
+ LocalScope* Scope) {
+ if (!BuildOpts.AddImplicitDtors)
+ return Scope;
+
+ // Check if variable is local.
+ switch (VD->getStorageClass()) {
+ case SC_None:
+ case SC_Auto:
+ case SC_Register:
+ break;
+ default: return Scope;
+ }
+
+ // Check for const references bound to temporary. Set type to pointee.
+ QualType QT = VD->getType();
+ if (QT.getTypePtr()->isReferenceType()) {
+ // Attempt to determine whether this declaration lifetime-extends a
+ // temporary.
+ //
+ // FIXME: This is incorrect. Non-reference declarations can lifetime-extend
+ // temporaries, and a single declaration can extend multiple temporaries.
+ // We should look at the storage duration on each nested
+ // MaterializeTemporaryExpr instead.
+ const Expr *Init = VD->getInit();
+ if (!Init)
+ return Scope;
+ if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Init))
+ Init = EWC->getSubExpr();
+ if (!isa<MaterializeTemporaryExpr>(Init))
+ return Scope;
+
+ // Lifetime-extending a temporary.
+ QT = getReferenceInitTemporaryType(*Context, Init);
+ }
+
+ // Check for constant size array. Set type to array element type.
+ while (const ConstantArrayType *AT = Context->getAsConstantArrayType(QT)) {
+ if (AT->getSize() == 0)
+ return Scope;
+ QT = AT->getElementType();
+ }
+
+ // Check if type is a C++ class with non-trivial destructor.
+ if (const CXXRecordDecl *CD = QT->getAsCXXRecordDecl())
+ if (!CD->hasTrivialDestructor()) {
+ // Add the variable to scope
+ Scope = createOrReuseLocalScope(Scope);
+ Scope->addVar(VD);
+ ScopePos = Scope->begin();
+ }
+ return Scope;
+}
+
+/// addLocalScopeAndDtors - For given statement add local scope for it and
+/// add destructors that will cleanup the scope. Will reuse Scope if not NULL.
+void CFGBuilder::addLocalScopeAndDtors(Stmt *S) {
+ if (!BuildOpts.AddImplicitDtors)
+ return;
+
+ LocalScope::const_iterator scopeBeginPos = ScopePos;
+ addLocalScopeForStmt(S);
+ addAutomaticObjDtors(ScopePos, scopeBeginPos, S);
+}
+
+/// prependAutomaticObjDtorsWithTerminator - Prepend destructor CFGElements for
+/// variables with automatic storage duration to CFGBlock's elements vector.
+/// Elements will be prepended to physical beginning of the vector which
+/// happens to be logical end. Use blocks terminator as statement that specifies
+/// destructors call site.
+/// FIXME: This mechanism for adding automatic destructors doesn't handle
+/// no-return destructors properly.
+void CFGBuilder::prependAutomaticObjDtorsWithTerminator(CFGBlock *Blk,
+ LocalScope::const_iterator B, LocalScope::const_iterator E) {
+ BumpVectorContext &C = cfg->getBumpVectorContext();
+ CFGBlock::iterator InsertPos
+ = Blk->beginAutomaticObjDtorsInsert(Blk->end(), B.distance(E), C);
+ for (LocalScope::const_iterator I = B; I != E; ++I)
+ InsertPos = Blk->insertAutomaticObjDtor(InsertPos, *I,
+ Blk->getTerminator());
+}
+
+/// Visit - Walk the subtree of a statement and add extra
+/// blocks for ternary operators, &&, and ||. We also process "," and
+/// DeclStmts (which may contain nested control-flow).
+CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc) {
+ if (!S) {
+ badCFG = true;
+ return nullptr;
+ }
+
+ if (Expr *E = dyn_cast<Expr>(S))
+ S = E->IgnoreParens();
+
+ switch (S->getStmtClass()) {
+ default:
+ return VisitStmt(S, asc);
+
+ case Stmt::AddrLabelExprClass:
+ return VisitAddrLabelExpr(cast<AddrLabelExpr>(S), asc);
+
+ case Stmt::BinaryConditionalOperatorClass:
+ return VisitConditionalOperator(cast<BinaryConditionalOperator>(S), asc);
+
+ case Stmt::BinaryOperatorClass:
+ return VisitBinaryOperator(cast<BinaryOperator>(S), asc);
+
+ case Stmt::BlockExprClass:
+ return VisitBlockExpr(cast<BlockExpr>(S), asc);
+
+ case Stmt::BreakStmtClass:
+ return VisitBreakStmt(cast<BreakStmt>(S));
+
+ case Stmt::CallExprClass:
+ case Stmt::CXXOperatorCallExprClass:
+ case Stmt::CXXMemberCallExprClass:
+ case Stmt::UserDefinedLiteralClass:
+ return VisitCallExpr(cast<CallExpr>(S), asc);
+
+ case Stmt::CaseStmtClass:
+ return VisitCaseStmt(cast<CaseStmt>(S));
+
+ case Stmt::ChooseExprClass:
+ return VisitChooseExpr(cast<ChooseExpr>(S), asc);
+
+ case Stmt::CompoundStmtClass:
+ return VisitCompoundStmt(cast<CompoundStmt>(S));
+
+ case Stmt::ConditionalOperatorClass:
+ return VisitConditionalOperator(cast<ConditionalOperator>(S), asc);
+
+ case Stmt::ContinueStmtClass:
+ return VisitContinueStmt(cast<ContinueStmt>(S));
+
+ case Stmt::CXXCatchStmtClass:
+ return VisitCXXCatchStmt(cast<CXXCatchStmt>(S));
+
+ case Stmt::ExprWithCleanupsClass:
+ return VisitExprWithCleanups(cast<ExprWithCleanups>(S), asc);
+
+ case Stmt::CXXDefaultArgExprClass:
+ case Stmt::CXXDefaultInitExprClass:
+ // FIXME: The expression inside a CXXDefaultArgExpr is owned by the
+ // called function's declaration, not by the caller. If we simply add
+ // this expression to the CFG, we could end up with the same Expr
+ // appearing multiple times.
+ // PR13385 / <rdar://problem/12156507>
+ //
+ // It's likewise possible for multiple CXXDefaultInitExprs for the same
+ // expression to be used in the same function (through aggregate
+ // initialization).
+ return VisitStmt(S, asc);
+
+ case Stmt::CXXBindTemporaryExprClass:
+ return VisitCXXBindTemporaryExpr(cast<CXXBindTemporaryExpr>(S), asc);
+
+ case Stmt::CXXConstructExprClass:
+ return VisitCXXConstructExpr(cast<CXXConstructExpr>(S), asc);
+
+ case Stmt::CXXNewExprClass:
+ return VisitCXXNewExpr(cast<CXXNewExpr>(S), asc);
+
+ case Stmt::CXXDeleteExprClass:
+ return VisitCXXDeleteExpr(cast<CXXDeleteExpr>(S), asc);
+
+ case Stmt::CXXFunctionalCastExprClass:
+ return VisitCXXFunctionalCastExpr(cast<CXXFunctionalCastExpr>(S), asc);
+
+ case Stmt::CXXTemporaryObjectExprClass:
+ return VisitCXXTemporaryObjectExpr(cast<CXXTemporaryObjectExpr>(S), asc);
+
+ case Stmt::CXXThrowExprClass:
+ return VisitCXXThrowExpr(cast<CXXThrowExpr>(S));
+
+ case Stmt::CXXTryStmtClass:
+ return VisitCXXTryStmt(cast<CXXTryStmt>(S));
+
+ case Stmt::CXXForRangeStmtClass:
+ return VisitCXXForRangeStmt(cast<CXXForRangeStmt>(S));
+
+ case Stmt::DeclStmtClass:
+ return VisitDeclStmt(cast<DeclStmt>(S));
+
+ case Stmt::DefaultStmtClass:
+ return VisitDefaultStmt(cast<DefaultStmt>(S));
+
+ case Stmt::DoStmtClass:
+ return VisitDoStmt(cast<DoStmt>(S));
+
+ case Stmt::ForStmtClass:
+ return VisitForStmt(cast<ForStmt>(S));
+
+ case Stmt::GotoStmtClass:
+ return VisitGotoStmt(cast<GotoStmt>(S));
+
+ case Stmt::IfStmtClass:
+ return VisitIfStmt(cast<IfStmt>(S));
+
+ case Stmt::ImplicitCastExprClass:
+ return VisitImplicitCastExpr(cast<ImplicitCastExpr>(S), asc);
+
+ case Stmt::IndirectGotoStmtClass:
+ return VisitIndirectGotoStmt(cast<IndirectGotoStmt>(S));
+
+ case Stmt::LabelStmtClass:
+ return VisitLabelStmt(cast<LabelStmt>(S));
+
+ case Stmt::LambdaExprClass:
+ return VisitLambdaExpr(cast<LambdaExpr>(S), asc);
+
+ case Stmt::MemberExprClass:
+ return VisitMemberExpr(cast<MemberExpr>(S), asc);
+
+ case Stmt::NullStmtClass:
+ return Block;
+
+ case Stmt::ObjCAtCatchStmtClass:
+ return VisitObjCAtCatchStmt(cast<ObjCAtCatchStmt>(S));
+
+ case Stmt::ObjCAutoreleasePoolStmtClass:
+ return VisitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(S));
+
+ case Stmt::ObjCAtSynchronizedStmtClass:
+ return VisitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(S));
+
+ case Stmt::ObjCAtThrowStmtClass:
+ return VisitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(S));
+
+ case Stmt::ObjCAtTryStmtClass:
+ return VisitObjCAtTryStmt(cast<ObjCAtTryStmt>(S));
+
+ case Stmt::ObjCForCollectionStmtClass:
+ return VisitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(S));
+
+ case Stmt::OpaqueValueExprClass:
+ return Block;
+
+ case Stmt::PseudoObjectExprClass:
+ return VisitPseudoObjectExpr(cast<PseudoObjectExpr>(S));
+
+ case Stmt::ReturnStmtClass:
+ return VisitReturnStmt(cast<ReturnStmt>(S));
+
+ case Stmt::UnaryExprOrTypeTraitExprClass:
+ return VisitUnaryExprOrTypeTraitExpr(cast<UnaryExprOrTypeTraitExpr>(S),
+ asc);
+
+ case Stmt::StmtExprClass:
+ return VisitStmtExpr(cast<StmtExpr>(S), asc);
+
+ case Stmt::SwitchStmtClass:
+ return VisitSwitchStmt(cast<SwitchStmt>(S));
+
+ case Stmt::UnaryOperatorClass:
+ return VisitUnaryOperator(cast<UnaryOperator>(S), asc);
+
+ case Stmt::WhileStmtClass:
+ return VisitWhileStmt(cast<WhileStmt>(S));
+ }
+}
+
+CFGBlock *CFGBuilder::VisitStmt(Stmt *S, AddStmtChoice asc) {
+ if (asc.alwaysAdd(*this, S)) {
+ autoCreateBlock();
+ appendStmt(Block, S);
+ }
+
+ return VisitChildren(S);
+}
+
+/// VisitChildren - Visit the children of a Stmt.
+CFGBlock *CFGBuilder::VisitChildren(Stmt *S) {
+ CFGBlock *B = Block;
+
+ // Visit the children in their reverse order so that they appear in
+ // left-to-right (natural) order in the CFG.
+ reverse_children RChildren(S);
+ for (reverse_children::iterator I = RChildren.begin(), E = RChildren.end();
+ I != E; ++I) {
+ if (Stmt *Child = *I)
+ if (CFGBlock *R = Visit(Child))
+ B = R;
+ }
+ return B;
+}
+
+CFGBlock *CFGBuilder::VisitAddrLabelExpr(AddrLabelExpr *A,
+ AddStmtChoice asc) {
+ AddressTakenLabels.insert(A->getLabel());
+
+ if (asc.alwaysAdd(*this, A)) {
+ autoCreateBlock();
+ appendStmt(Block, A);
+ }
+
+ return Block;
+}
+
+CFGBlock *CFGBuilder::VisitUnaryOperator(UnaryOperator *U,
+ AddStmtChoice asc) {
+ if (asc.alwaysAdd(*this, U)) {
+ autoCreateBlock();
+ appendStmt(Block, U);
+ }
+
+ return Visit(U->getSubExpr(), AddStmtChoice());
+}
+
+CFGBlock *CFGBuilder::VisitLogicalOperator(BinaryOperator *B) {
+ CFGBlock *ConfluenceBlock = Block ? Block : createBlock();
+ appendStmt(ConfluenceBlock, B);
+
+ if (badCFG)
+ return nullptr;
+
+ return VisitLogicalOperator(B, nullptr, ConfluenceBlock,
+ ConfluenceBlock).first;
+}
+
+std::pair<CFGBlock*, CFGBlock*>
+CFGBuilder::VisitLogicalOperator(BinaryOperator *B,
+ Stmt *Term,
+ CFGBlock *TrueBlock,
+ CFGBlock *FalseBlock) {
+
+ // Introspect the RHS. If it is a nested logical operation, we recursively
+ // build the CFG using this function. Otherwise, resort to default
+ // CFG construction behavior.
+ Expr *RHS = B->getRHS()->IgnoreParens();
+ CFGBlock *RHSBlock, *ExitBlock;
+
+ do {
+ if (BinaryOperator *B_RHS = dyn_cast<BinaryOperator>(RHS))
+ if (B_RHS->isLogicalOp()) {
+ std::tie(RHSBlock, ExitBlock) =
+ VisitLogicalOperator(B_RHS, Term, TrueBlock, FalseBlock);
+ break;
+ }
+
+ // The RHS is not a nested logical operation. Don't push the terminator
+ // down further, but instead visit RHS and construct the respective
+ // pieces of the CFG, and link up the RHSBlock with the terminator
+ // we have been provided.
+ ExitBlock = RHSBlock = createBlock(false);
+
+ if (!Term) {
+ assert(TrueBlock == FalseBlock);
+ addSuccessor(RHSBlock, TrueBlock);
+ }
+ else {
+ RHSBlock->setTerminator(Term);
+ TryResult KnownVal = tryEvaluateBool(RHS);
+ if (!KnownVal.isKnown())
+ KnownVal = tryEvaluateBool(B);
+ addSuccessor(RHSBlock, TrueBlock, !KnownVal.isFalse());
+ addSuccessor(RHSBlock, FalseBlock, !KnownVal.isTrue());
+ }
+
+ Block = RHSBlock;
+ RHSBlock = addStmt(RHS);
+ }
+ while (false);
+
+ if (badCFG)
+ return std::make_pair(nullptr, nullptr);
+
+ // Generate the blocks for evaluating the LHS.
+ Expr *LHS = B->getLHS()->IgnoreParens();
+
+ if (BinaryOperator *B_LHS = dyn_cast<BinaryOperator>(LHS))
+ if (B_LHS->isLogicalOp()) {
+ if (B->getOpcode() == BO_LOr)
+ FalseBlock = RHSBlock;
+ else
+ TrueBlock = RHSBlock;
+
+ // For the LHS, treat 'B' as the terminator that we want to sink
+ // into the nested branch. The RHS always gets the top-most
+ // terminator.
+ return VisitLogicalOperator(B_LHS, B, TrueBlock, FalseBlock);
+ }
+
+ // Create the block evaluating the LHS.
+ // This contains the '&&' or '||' as the terminator.
+ CFGBlock *LHSBlock = createBlock(false);
+ LHSBlock->setTerminator(B);
+
+ Block = LHSBlock;
+ CFGBlock *EntryLHSBlock = addStmt(LHS);
+
+ if (badCFG)
+ return std::make_pair(nullptr, nullptr);
+
+ // See if this is a known constant.
+ TryResult KnownVal = tryEvaluateBool(LHS);
+
+ // Now link the LHSBlock with RHSBlock.
+ if (B->getOpcode() == BO_LOr) {
+ addSuccessor(LHSBlock, TrueBlock, !KnownVal.isFalse());
+ addSuccessor(LHSBlock, RHSBlock, !KnownVal.isTrue());
+ } else {
+ assert(B->getOpcode() == BO_LAnd);
+ addSuccessor(LHSBlock, RHSBlock, !KnownVal.isFalse());
+ addSuccessor(LHSBlock, FalseBlock, !KnownVal.isTrue());
+ }
+
+ return std::make_pair(EntryLHSBlock, ExitBlock);
+}
+
+
+CFGBlock *CFGBuilder::VisitBinaryOperator(BinaryOperator *B,
+ AddStmtChoice asc) {
+ // && or ||
+ if (B->isLogicalOp())
+ return VisitLogicalOperator(B);
+
+ if (B->getOpcode() == BO_Comma) { // ,
+ autoCreateBlock();
+ appendStmt(Block, B);
+ addStmt(B->getRHS());
+ return addStmt(B->getLHS());
+ }
+
+ if (B->isAssignmentOp()) {
+ if (asc.alwaysAdd(*this, B)) {
+ autoCreateBlock();
+ appendStmt(Block, B);
+ }
+ Visit(B->getLHS());
+ return Visit(B->getRHS());
+ }
+
+ if (asc.alwaysAdd(*this, B)) {
+ autoCreateBlock();
+ appendStmt(Block, B);
+ }
+
+ CFGBlock *RBlock = Visit(B->getRHS());
+ CFGBlock *LBlock = Visit(B->getLHS());
+ // If visiting RHS causes us to finish 'Block', e.g. the RHS is a StmtExpr
+ // containing a DoStmt, and the LHS doesn't create a new block, then we should
+ // return RBlock. Otherwise we'll incorrectly return NULL.
+ return (LBlock ? LBlock : RBlock);
+}
+
+CFGBlock *CFGBuilder::VisitNoRecurse(Expr *E, AddStmtChoice asc) {
+ if (asc.alwaysAdd(*this, E)) {
+ autoCreateBlock();
+ appendStmt(Block, E);
+ }
+ return Block;
+}
+
+CFGBlock *CFGBuilder::VisitBreakStmt(BreakStmt *B) {
+ // "break" is a control-flow statement. Thus we stop processing the current
+ // block.
+ if (badCFG)
+ return nullptr;
+
+ // Now create a new block that ends with the break statement.
+ Block = createBlock(false);
+ Block->setTerminator(B);
+
+ // If there is no target for the break, then we are looking at an incomplete
+ // AST. This means that the CFG cannot be constructed.
+ if (BreakJumpTarget.block) {
+ addAutomaticObjDtors(ScopePos, BreakJumpTarget.scopePosition, B);
+ addSuccessor(Block, BreakJumpTarget.block);
+ } else
+ badCFG = true;
+
+
+ return Block;
+}
+
+static bool CanThrow(Expr *E, ASTContext &Ctx) {
+ QualType Ty = E->getType();
+ if (Ty->isFunctionPointerType())
+ Ty = Ty->getAs<PointerType>()->getPointeeType();
+ else if (Ty->isBlockPointerType())
+ Ty = Ty->getAs<BlockPointerType>()->getPointeeType();
+
+ const FunctionType *FT = Ty->getAs<FunctionType>();
+ if (FT) {
+ if (const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FT))
+ if (!isUnresolvedExceptionSpec(Proto->getExceptionSpecType()) &&
+ Proto->isNothrow(Ctx))
+ return false;
+ }
+ return true;
+}
+
+CFGBlock *CFGBuilder::VisitCallExpr(CallExpr *C, AddStmtChoice asc) {
+ // Compute the callee type.
+ QualType calleeType = C->getCallee()->getType();
+ if (calleeType == Context->BoundMemberTy) {
+ QualType boundType = Expr::findBoundMemberType(C->getCallee());
+
+ // We should only get a null bound type if processing a dependent
+ // CFG. Recover by assuming nothing.
+ if (!boundType.isNull()) calleeType = boundType;
+ }
+
+ // If this is a call to a no-return function, this stops the block here.
+ bool NoReturn = getFunctionExtInfo(*calleeType).getNoReturn();
+
+ bool AddEHEdge = false;
+
+ // Languages without exceptions are assumed to not throw.
+ if (Context->getLangOpts().Exceptions) {
+ if (BuildOpts.AddEHEdges)
+ AddEHEdge = true;
+ }
+
+ // If this is a call to a builtin function, it might not actually evaluate
+ // its arguments. Don't add them to the CFG if this is the case.
+ bool OmitArguments = false;
+
+ if (FunctionDecl *FD = C->getDirectCallee()) {
+ if (FD->isNoReturn())
+ NoReturn = true;
+ if (FD->hasAttr<NoThrowAttr>())
+ AddEHEdge = false;
+ if (FD->getBuiltinID() == Builtin::BI__builtin_object_size)
+ OmitArguments = true;
+ }
+
+ if (!CanThrow(C->getCallee(), *Context))
+ AddEHEdge = false;
+
+ if (OmitArguments) {
+ assert(!NoReturn && "noreturn calls with unevaluated args not implemented");
+ assert(!AddEHEdge && "EH calls with unevaluated args not implemented");
+ autoCreateBlock();
+ appendStmt(Block, C);
+ return Visit(C->getCallee());
+ }
+
+ if (!NoReturn && !AddEHEdge) {
+ return VisitStmt(C, asc.withAlwaysAdd(true));
+ }
+
+ if (Block) {
+ Succ = Block;
+ if (badCFG)
+ return nullptr;
+ }
+
+ if (NoReturn)
+ Block = createNoReturnBlock();
+ else
+ Block = createBlock();
+
+ appendStmt(Block, C);
+
+ if (AddEHEdge) {
+ // Add exceptional edges.
+ if (TryTerminatedBlock)
+ addSuccessor(Block, TryTerminatedBlock);
+ else
+ addSuccessor(Block, &cfg->getExit());
+ }
+
+ return VisitChildren(C);
+}
+
+CFGBlock *CFGBuilder::VisitChooseExpr(ChooseExpr *C,
+ AddStmtChoice asc) {
+ CFGBlock *ConfluenceBlock = Block ? Block : createBlock();
+ appendStmt(ConfluenceBlock, C);
+ if (badCFG)
+ return nullptr;
+
+ AddStmtChoice alwaysAdd = asc.withAlwaysAdd(true);
+ Succ = ConfluenceBlock;
+ Block = nullptr;
+ CFGBlock *LHSBlock = Visit(C->getLHS(), alwaysAdd);
+ if (badCFG)
+ return nullptr;
+
+ Succ = ConfluenceBlock;
+ Block = nullptr;
+ CFGBlock *RHSBlock = Visit(C->getRHS(), alwaysAdd);
+ if (badCFG)
+ return nullptr;
+
+ Block = createBlock(false);
+ // See if this is a known constant.
+ const TryResult& KnownVal = tryEvaluateBool(C->getCond());
+ addSuccessor(Block, KnownVal.isFalse() ? nullptr : LHSBlock);
+ addSuccessor(Block, KnownVal.isTrue() ? nullptr : RHSBlock);
+ Block->setTerminator(C);
+ return addStmt(C->getCond());
+}
+
+
+CFGBlock *CFGBuilder::VisitCompoundStmt(CompoundStmt *C) {
+ LocalScope::const_iterator scopeBeginPos = ScopePos;
+ if (BuildOpts.AddImplicitDtors) {
+ addLocalScopeForStmt(C);
+ }
+ if (!C->body_empty() && !isa<ReturnStmt>(*C->body_rbegin())) {
+ // If the body ends with a ReturnStmt, the dtors will be added in VisitReturnStmt
+ addAutomaticObjDtors(ScopePos, scopeBeginPos, C);
+ }
+
+ CFGBlock *LastBlock = Block;
+
+ for (CompoundStmt::reverse_body_iterator I=C->body_rbegin(), E=C->body_rend();
+ I != E; ++I ) {
+ // If we hit a segment of code just containing ';' (NullStmts), we can
+ // get a null block back. In such cases, just use the LastBlock
+ if (CFGBlock *newBlock = addStmt(*I))
+ LastBlock = newBlock;
+
+ if (badCFG)
+ return nullptr;
+ }
+
+ return LastBlock;
+}
+
+CFGBlock *CFGBuilder::VisitConditionalOperator(AbstractConditionalOperator *C,
+ AddStmtChoice asc) {
+ const BinaryConditionalOperator *BCO = dyn_cast<BinaryConditionalOperator>(C);
+ const OpaqueValueExpr *opaqueValue = (BCO ? BCO->getOpaqueValue() : nullptr);
+
+ // Create the confluence block that will "merge" the results of the ternary
+ // expression.
+ CFGBlock *ConfluenceBlock = Block ? Block : createBlock();
+ appendStmt(ConfluenceBlock, C);
+ if (badCFG)
+ return nullptr;
+
+ AddStmtChoice alwaysAdd = asc.withAlwaysAdd(true);
+
+ // Create a block for the LHS expression if there is an LHS expression. A
+ // GCC extension allows LHS to be NULL, causing the condition to be the
+ // value that is returned instead.
+ // e.g: x ?: y is shorthand for: x ? x : y;
+ Succ = ConfluenceBlock;
+ Block = nullptr;
+ CFGBlock *LHSBlock = nullptr;
+ const Expr *trueExpr = C->getTrueExpr();
+ if (trueExpr != opaqueValue) {
+ LHSBlock = Visit(C->getTrueExpr(), alwaysAdd);
+ if (badCFG)
+ return nullptr;
+ Block = nullptr;
+ }
+ else
+ LHSBlock = ConfluenceBlock;
+
+ // Create the block for the RHS expression.
+ Succ = ConfluenceBlock;
+ CFGBlock *RHSBlock = Visit(C->getFalseExpr(), alwaysAdd);
+ if (badCFG)
+ return nullptr;
+
+ // If the condition is a logical '&&' or '||', build a more accurate CFG.
+ if (BinaryOperator *Cond =
+ dyn_cast<BinaryOperator>(C->getCond()->IgnoreParens()))
+ if (Cond->isLogicalOp())
+ return VisitLogicalOperator(Cond, C, LHSBlock, RHSBlock).first;
+
+ // Create the block that will contain the condition.
+ Block = createBlock(false);
+
+ // See if this is a known constant.
+ const TryResult& KnownVal = tryEvaluateBool(C->getCond());
+ addSuccessor(Block, LHSBlock, !KnownVal.isFalse());
+ addSuccessor(Block, RHSBlock, !KnownVal.isTrue());
+ Block->setTerminator(C);
+ Expr *condExpr = C->getCond();
+
+ if (opaqueValue) {
+ // Run the condition expression if it's not trivially expressed in
+ // terms of the opaque value (or if there is no opaque value).
+ if (condExpr != opaqueValue)
+ addStmt(condExpr);
+
+ // Before that, run the common subexpression if there was one.
+ // At least one of this or the above will be run.
+ return addStmt(BCO->getCommon());
+ }
+
+ return addStmt(condExpr);
+}
+
+CFGBlock *CFGBuilder::VisitDeclStmt(DeclStmt *DS) {
+ // Check if the Decl is for an __label__. If so, elide it from the
+ // CFG entirely.
+ if (isa<LabelDecl>(*DS->decl_begin()))
+ return Block;
+
+ // This case also handles static_asserts.
+ if (DS->isSingleDecl())
+ return VisitDeclSubExpr(DS);
+
+ CFGBlock *B = nullptr;
+
+ // Build an individual DeclStmt for each decl.
+ for (DeclStmt::reverse_decl_iterator I = DS->decl_rbegin(),
+ E = DS->decl_rend();
+ I != E; ++I) {
+ // Get the alignment of the new DeclStmt, padding out to >=8 bytes.
+ unsigned A = llvm::AlignOf<DeclStmt>::Alignment < 8
+ ? 8 : llvm::AlignOf<DeclStmt>::Alignment;
+
+ // Allocate the DeclStmt using the BumpPtrAllocator. It will get
+ // automatically freed with the CFG.
+ DeclGroupRef DG(*I);
+ Decl *D = *I;
+ void *Mem = cfg->getAllocator().Allocate(sizeof(DeclStmt), A);
+ DeclStmt *DSNew = new (Mem) DeclStmt(DG, D->getLocation(), GetEndLoc(D));
+ cfg->addSyntheticDeclStmt(DSNew, DS);
+
+ // Append the fake DeclStmt to block.
+ B = VisitDeclSubExpr(DSNew);
+ }
+
+ return B;
+}
+
+/// VisitDeclSubExpr - Utility method to add block-level expressions for
+/// DeclStmts and initializers in them.
+CFGBlock *CFGBuilder::VisitDeclSubExpr(DeclStmt *DS) {
+ assert(DS->isSingleDecl() && "Can handle single declarations only.");
+ VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl());
+
+ if (!VD) {
+ // Of everything that can be declared in a DeclStmt, only VarDecls impact
+ // runtime semantics.
+ return Block;
+ }
+
+ bool HasTemporaries = false;
+
+ // Guard static initializers under a branch.
+ CFGBlock *blockAfterStaticInit = nullptr;
+
+ if (BuildOpts.AddStaticInitBranches && VD->isStaticLocal()) {
+ // For static variables, we need to create a branch to track
+ // whether or not they are initialized.
+ if (Block) {
+ Succ = Block;
+ Block = nullptr;
+ if (badCFG)
+ return nullptr;
+ }
+ blockAfterStaticInit = Succ;
+ }
+
+ // Destructors of temporaries in initialization expression should be called
+ // after initialization finishes.
+ Expr *Init = VD->getInit();
+ if (Init) {
+ HasTemporaries = isa<ExprWithCleanups>(Init);
+
+ if (BuildOpts.AddTemporaryDtors && HasTemporaries) {
+ // Generate destructors for temporaries in initialization expression.
+ TempDtorContext Context;
+ VisitForTemporaryDtors(cast<ExprWithCleanups>(Init)->getSubExpr(),
+ /*BindToTemporary=*/false, Context);
+ }
+ }
+
+ autoCreateBlock();
+ appendStmt(Block, DS);
+
+ // Keep track of the last non-null block, as 'Block' can be nulled out
+ // if the initializer expression is something like a 'while' in a
+ // statement-expression.
+ CFGBlock *LastBlock = Block;
+
+ if (Init) {
+ if (HasTemporaries) {
+ // For expression with temporaries go directly to subexpression to omit
+ // generating destructors for the second time.
+ ExprWithCleanups *EC = cast<ExprWithCleanups>(Init);
+ if (CFGBlock *newBlock = Visit(EC->getSubExpr()))
+ LastBlock = newBlock;
+ }
+ else {
+ if (CFGBlock *newBlock = Visit(Init))
+ LastBlock = newBlock;
+ }
+ }
+
+ // If the type of VD is a VLA, then we must process its size expressions.
+ for (const VariableArrayType* VA = FindVA(VD->getType().getTypePtr());
+ VA != nullptr; VA = FindVA(VA->getElementType().getTypePtr())) {
+ if (CFGBlock *newBlock = addStmt(VA->getSizeExpr()))
+ LastBlock = newBlock;
+ }
+
+ // Remove variable from local scope.
+ if (ScopePos && VD == *ScopePos)
+ ++ScopePos;
+
+ CFGBlock *B = LastBlock;
+ if (blockAfterStaticInit) {
+ Succ = B;
+ Block = createBlock(false);
+ Block->setTerminator(DS);
+ addSuccessor(Block, blockAfterStaticInit);
+ addSuccessor(Block, B);
+ B = Block;
+ }
+
+ return B;
+}
+
+CFGBlock *CFGBuilder::VisitIfStmt(IfStmt *I) {
+ // We may see an if statement in the middle of a basic block, or it may be the
+ // first statement we are processing. In either case, we create a new basic
+ // block. First, we create the blocks for the then...else statements, and
+ // then we create the block containing the if statement. If we were in the
+ // middle of a block, we stop processing that block. That block is then the
+ // implicit successor for the "then" and "else" clauses.
+
+ // Save local scope position because in case of condition variable ScopePos
+ // won't be restored when traversing AST.
+ SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
+
+ // Create local scope for possible condition variable.
+ // Store scope position. Add implicit destructor.
+ if (VarDecl *VD = I->getConditionVariable()) {
+ LocalScope::const_iterator BeginScopePos = ScopePos;
+ addLocalScopeForVarDecl(VD);
+ addAutomaticObjDtors(ScopePos, BeginScopePos, I);
+ }
+
+ // The block we were processing is now finished. Make it the successor
+ // block.
+ if (Block) {
+ Succ = Block;
+ if (badCFG)
+ return nullptr;
+ }
+
+ // Process the false branch.
+ CFGBlock *ElseBlock = Succ;
+
+ if (Stmt *Else = I->getElse()) {
+ SaveAndRestore<CFGBlock*> sv(Succ);
+
+ // NULL out Block so that the recursive call to Visit will
+ // create a new basic block.
+ Block = nullptr;
+
+ // If branch is not a compound statement create implicit scope
+ // and add destructors.
+ if (!isa<CompoundStmt>(Else))
+ addLocalScopeAndDtors(Else);
+
+ ElseBlock = addStmt(Else);
+
+ if (!ElseBlock) // Can occur when the Else body has all NullStmts.
+ ElseBlock = sv.get();
+ else if (Block) {
+ if (badCFG)
+ return nullptr;
+ }
+ }
+
+ // Process the true branch.
+ CFGBlock *ThenBlock;
+ {
+ Stmt *Then = I->getThen();
+ assert(Then);
+ SaveAndRestore<CFGBlock*> sv(Succ);
+ Block = nullptr;
+
+ // If branch is not a compound statement create implicit scope
+ // and add destructors.
+ if (!isa<CompoundStmt>(Then))
+ addLocalScopeAndDtors(Then);
+
+ ThenBlock = addStmt(Then);
+
+ if (!ThenBlock) {
+ // We can reach here if the "then" body has all NullStmts.
+ // Create an empty block so we can distinguish between true and false
+ // branches in path-sensitive analyses.
+ ThenBlock = createBlock(false);
+ addSuccessor(ThenBlock, sv.get());
+ } else if (Block) {
+ if (badCFG)
+ return nullptr;
+ }
+ }
+
+ // Specially handle "if (expr1 || ...)" and "if (expr1 && ...)" by
+ // having these handle the actual control-flow jump. Note that
+ // if we introduce a condition variable, e.g. "if (int x = exp1 || exp2)"
+ // we resort to the old control-flow behavior. This special handling
+ // removes infeasible paths from the control-flow graph by having the
+ // control-flow transfer of '&&' or '||' go directly into the then/else
+ // blocks directly.
+ if (!I->getConditionVariable())
+ if (BinaryOperator *Cond =
+ dyn_cast<BinaryOperator>(I->getCond()->IgnoreParens()))
+ if (Cond->isLogicalOp())
+ return VisitLogicalOperator(Cond, I, ThenBlock, ElseBlock).first;
+
+ // Now create a new block containing the if statement.
+ Block = createBlock(false);
+
+ // Set the terminator of the new block to the If statement.
+ Block->setTerminator(I);
+
+ // See if this is a known constant.
+ const TryResult &KnownVal = tryEvaluateBool(I->getCond());
+
+ // Add the successors. If we know that specific branches are
+ // unreachable, inform addSuccessor() of that knowledge.
+ addSuccessor(Block, ThenBlock, /* isReachable = */ !KnownVal.isFalse());
+ addSuccessor(Block, ElseBlock, /* isReachable = */ !KnownVal.isTrue());
+
+ // Add the condition as the last statement in the new block. This may create
+ // new blocks as the condition may contain control-flow. Any newly created
+ // blocks will be pointed to be "Block".
+ CFGBlock *LastBlock = addStmt(I->getCond());
+
+ // Finally, if the IfStmt contains a condition variable, add it and its
+ // initializer to the CFG.
+ if (const DeclStmt* DS = I->getConditionVariableDeclStmt()) {
+ autoCreateBlock();
+ LastBlock = addStmt(const_cast<DeclStmt *>(DS));
+ }
+
+ return LastBlock;
+}
+
+
+CFGBlock *CFGBuilder::VisitReturnStmt(ReturnStmt *R) {
+ // If we were in the middle of a block we stop processing that block.
+ //
+ // NOTE: If a "return" appears in the middle of a block, this means that the
+ // code afterwards is DEAD (unreachable). We still keep a basic block
+ // for that code; a simple "mark-and-sweep" from the entry block will be
+ // able to report such dead blocks.
+
+ // Create the new block.
+ Block = createBlock(false);
+
+ addAutomaticObjDtors(ScopePos, LocalScope::const_iterator(), R);
+
+ // If the one of the destructors does not return, we already have the Exit
+ // block as a successor.
+ if (!Block->hasNoReturnElement())
+ addSuccessor(Block, &cfg->getExit());
+
+ // Add the return statement to the block. This may create new blocks if R
+ // contains control-flow (short-circuit operations).
+ return VisitStmt(R, AddStmtChoice::AlwaysAdd);
+}
+
+CFGBlock *CFGBuilder::VisitLabelStmt(LabelStmt *L) {
+ // Get the block of the labeled statement. Add it to our map.
+ addStmt(L->getSubStmt());
+ CFGBlock *LabelBlock = Block;
+
+ if (!LabelBlock) // This can happen when the body is empty, i.e.
+ LabelBlock = createBlock(); // scopes that only contains NullStmts.
+
+ assert(LabelMap.find(L->getDecl()) == LabelMap.end() &&
+ "label already in map");
+ LabelMap[L->getDecl()] = JumpTarget(LabelBlock, ScopePos);
+
+ // Labels partition blocks, so this is the end of the basic block we were
+ // processing (L is the block's label). Because this is label (and we have
+ // already processed the substatement) there is no extra control-flow to worry
+ // about.
+ LabelBlock->setLabel(L);
+ if (badCFG)
+ return nullptr;
+
+ // We set Block to NULL to allow lazy creation of a new block (if necessary);
+ Block = nullptr;
+
+ // This block is now the implicit successor of other blocks.
+ Succ = LabelBlock;
+
+ return LabelBlock;
+}
+
+CFGBlock *CFGBuilder::VisitBlockExpr(BlockExpr *E, AddStmtChoice asc) {
+ CFGBlock *LastBlock = VisitNoRecurse(E, asc);
+ for (const BlockDecl::Capture &CI : E->getBlockDecl()->captures()) {
+ if (Expr *CopyExpr = CI.getCopyExpr()) {
+ CFGBlock *Tmp = Visit(CopyExpr);
+ if (Tmp)
+ LastBlock = Tmp;
+ }
+ }
+ return LastBlock;
+}
+
+CFGBlock *CFGBuilder::VisitLambdaExpr(LambdaExpr *E, AddStmtChoice asc) {
+ CFGBlock *LastBlock = VisitNoRecurse(E, asc);
+ for (LambdaExpr::capture_init_iterator it = E->capture_init_begin(),
+ et = E->capture_init_end(); it != et; ++it) {
+ if (Expr *Init = *it) {
+ CFGBlock *Tmp = Visit(Init);
+ if (Tmp)
+ LastBlock = Tmp;
+ }
+ }
+ return LastBlock;
+}
+
+CFGBlock *CFGBuilder::VisitGotoStmt(GotoStmt *G) {
+ // Goto is a control-flow statement. Thus we stop processing the current
+ // block and create a new one.
+
+ Block = createBlock(false);
+ Block->setTerminator(G);
+
+ // If we already know the mapping to the label block add the successor now.
+ LabelMapTy::iterator I = LabelMap.find(G->getLabel());
+
+ if (I == LabelMap.end())
+ // We will need to backpatch this block later.
+ BackpatchBlocks.push_back(JumpSource(Block, ScopePos));
+ else {
+ JumpTarget JT = I->second;
+ addAutomaticObjDtors(ScopePos, JT.scopePosition, G);
+ addSuccessor(Block, JT.block);
+ }
+
+ return Block;
+}
+
+CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) {
+ CFGBlock *LoopSuccessor = nullptr;
+
+ // Save local scope position because in case of condition variable ScopePos
+ // won't be restored when traversing AST.
+ SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
+
+ // Create local scope for init statement and possible condition variable.
+ // Add destructor for init statement and condition variable.
+ // Store scope position for continue statement.
+ if (Stmt *Init = F->getInit())
+ addLocalScopeForStmt(Init);
+ LocalScope::const_iterator LoopBeginScopePos = ScopePos;
+
+ if (VarDecl *VD = F->getConditionVariable())
+ addLocalScopeForVarDecl(VD);
+ LocalScope::const_iterator ContinueScopePos = ScopePos;
+
+ addAutomaticObjDtors(ScopePos, save_scope_pos.get(), F);
+
+ // "for" is a control-flow statement. Thus we stop processing the current
+ // block.
+ if (Block) {
+ if (badCFG)
+ return nullptr;
+ LoopSuccessor = Block;
+ } else
+ LoopSuccessor = Succ;
+
+ // Save the current value for the break targets.
+ // All breaks should go to the code following the loop.
+ SaveAndRestore<JumpTarget> save_break(BreakJumpTarget);
+ BreakJumpTarget = JumpTarget(LoopSuccessor, ScopePos);
+
+ CFGBlock *BodyBlock = nullptr, *TransitionBlock = nullptr;
+
+ // Now create the loop body.
+ {
+ assert(F->getBody());
+
+ // Save the current values for Block, Succ, continue and break targets.
+ SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ);
+ SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget);
+
+ // Create an empty block to represent the transition block for looping back
+ // to the head of the loop. If we have increment code, it will
+ // go in this block as well.
+ Block = Succ = TransitionBlock = createBlock(false);
+ TransitionBlock->setLoopTarget(F);
+
+ if (Stmt *I = F->getInc()) {
+ // Generate increment code in its own basic block. This is the target of
+ // continue statements.
+ Succ = addStmt(I);
+ }
+
+ // Finish up the increment (or empty) block if it hasn't been already.
+ if (Block) {
+ assert(Block == Succ);
+ if (badCFG)
+ return nullptr;
+ Block = nullptr;
+ }
+
+ // The starting block for the loop increment is the block that should
+ // represent the 'loop target' for looping back to the start of the loop.
+ ContinueJumpTarget = JumpTarget(Succ, ContinueScopePos);
+ ContinueJumpTarget.block->setLoopTarget(F);
+
+ // Loop body should end with destructor of Condition variable (if any).
+ addAutomaticObjDtors(ScopePos, LoopBeginScopePos, F);
+
+ // If body is not a compound statement create implicit scope
+ // and add destructors.
+ if (!isa<CompoundStmt>(F->getBody()))
+ addLocalScopeAndDtors(F->getBody());
+
+ // Now populate the body block, and in the process create new blocks as we
+ // walk the body of the loop.
+ BodyBlock = addStmt(F->getBody());
+
+ if (!BodyBlock) {
+ // In the case of "for (...;...;...);" we can have a null BodyBlock.
+ // Use the continue jump target as the proxy for the body.
+ BodyBlock = ContinueJumpTarget.block;
+ }
+ else if (badCFG)
+ return nullptr;
+ }
+
+ // Because of short-circuit evaluation, the condition of the loop can span
+ // multiple basic blocks. Thus we need the "Entry" and "Exit" blocks that
+ // evaluate the condition.
+ CFGBlock *EntryConditionBlock = nullptr, *ExitConditionBlock = nullptr;
+
+ do {
+ Expr *C = F->getCond();
+
+ // Specially handle logical operators, which have a slightly
+ // more optimal CFG representation.
+ if (BinaryOperator *Cond =
+ dyn_cast_or_null<BinaryOperator>(C ? C->IgnoreParens() : nullptr))
+ if (Cond->isLogicalOp()) {
+ std::tie(EntryConditionBlock, ExitConditionBlock) =
+ VisitLogicalOperator(Cond, F, BodyBlock, LoopSuccessor);
+ break;
+ }
+
+ // The default case when not handling logical operators.
+ EntryConditionBlock = ExitConditionBlock = createBlock(false);
+ ExitConditionBlock->setTerminator(F);
+
+ // See if this is a known constant.
+ TryResult KnownVal(true);
+
+ if (C) {
+ // Now add the actual condition to the condition block.
+ // Because the condition itself may contain control-flow, new blocks may
+ // be created. Thus we update "Succ" after adding the condition.
+ Block = ExitConditionBlock;
+ EntryConditionBlock = addStmt(C);
+
+ // If this block contains a condition variable, add both the condition
+ // variable and initializer to the CFG.
+ if (VarDecl *VD = F->getConditionVariable()) {
+ if (Expr *Init = VD->getInit()) {
+ autoCreateBlock();
+ appendStmt(Block, F->getConditionVariableDeclStmt());
+ EntryConditionBlock = addStmt(Init);
+ assert(Block == EntryConditionBlock);
+ }
+ }
+
+ if (Block && badCFG)
+ return nullptr;
+
+ KnownVal = tryEvaluateBool(C);
+ }
+
+ // Add the loop body entry as a successor to the condition.
+ addSuccessor(ExitConditionBlock, KnownVal.isFalse() ? nullptr : BodyBlock);
+ // Link up the condition block with the code that follows the loop. (the
+ // false branch).
+ addSuccessor(ExitConditionBlock,
+ KnownVal.isTrue() ? nullptr : LoopSuccessor);
+
+ } while (false);
+
+ // Link up the loop-back block to the entry condition block.
+ addSuccessor(TransitionBlock, EntryConditionBlock);
+
+ // The condition block is the implicit successor for any code above the loop.
+ Succ = EntryConditionBlock;
+
+ // If the loop contains initialization, create a new block for those
+ // statements. This block can also contain statements that precede the loop.
+ if (Stmt *I = F->getInit()) {
+ Block = createBlock();
+ return addStmt(I);
+ }
+
+ // There is no loop initialization. We are thus basically a while loop.
+ // NULL out Block to force lazy block construction.
+ Block = nullptr;
+ Succ = EntryConditionBlock;
+ return EntryConditionBlock;
+}
+
+CFGBlock *CFGBuilder::VisitMemberExpr(MemberExpr *M, AddStmtChoice asc) {
+ if (asc.alwaysAdd(*this, M)) {
+ autoCreateBlock();
+ appendStmt(Block, M);
+ }
+ return Visit(M->getBase());
+}
+
+CFGBlock *CFGBuilder::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
+ // Objective-C fast enumeration 'for' statements:
+ // http://developer.apple.com/documentation/Cocoa/Conceptual/ObjectiveC
+ //
+ // for ( Type newVariable in collection_expression ) { statements }
+ //
+ // becomes:
+ //
+ // prologue:
+ // 1. collection_expression
+ // T. jump to loop_entry
+ // loop_entry:
+ // 1. side-effects of element expression
+ // 1. ObjCForCollectionStmt [performs binding to newVariable]
+ // T. ObjCForCollectionStmt TB, FB [jumps to TB if newVariable != nil]
+ // TB:
+ // statements
+ // T. jump to loop_entry
+ // FB:
+ // what comes after
+ //
+ // and
+ //
+ // Type existingItem;
+ // for ( existingItem in expression ) { statements }
+ //
+ // becomes:
+ //
+ // the same with newVariable replaced with existingItem; the binding works
+ // the same except that for one ObjCForCollectionStmt::getElement() returns
+ // a DeclStmt and the other returns a DeclRefExpr.
+ //
+
+ CFGBlock *LoopSuccessor = nullptr;
+
+ if (Block) {
+ if (badCFG)
+ return nullptr;
+ LoopSuccessor = Block;
+ Block = nullptr;
+ } else
+ LoopSuccessor = Succ;
+
+ // Build the condition blocks.
+ CFGBlock *ExitConditionBlock = createBlock(false);
+
+ // Set the terminator for the "exit" condition block.
+ ExitConditionBlock->setTerminator(S);
+
+ // The last statement in the block should be the ObjCForCollectionStmt, which
+ // performs the actual binding to 'element' and determines if there are any
+ // more items in the collection.
+ appendStmt(ExitConditionBlock, S);
+ Block = ExitConditionBlock;
+
+ // Walk the 'element' expression to see if there are any side-effects. We
+ // generate new blocks as necessary. We DON'T add the statement by default to
+ // the CFG unless it contains control-flow.
+ CFGBlock *EntryConditionBlock = Visit(S->getElement(),
+ AddStmtChoice::NotAlwaysAdd);
+ if (Block) {
+ if (badCFG)
+ return nullptr;
+ Block = nullptr;
+ }
+
+ // The condition block is the implicit successor for the loop body as well as
+ // any code above the loop.
+ Succ = EntryConditionBlock;
+
+ // Now create the true branch.
+ {
+ // Save the current values for Succ, continue and break targets.
+ SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ);
+ SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget),
+ save_break(BreakJumpTarget);
+
+ // Add an intermediate block between the BodyBlock and the
+ // EntryConditionBlock to represent the "loop back" transition, for looping
+ // back to the head of the loop.
+ CFGBlock *LoopBackBlock = nullptr;
+ Succ = LoopBackBlock = createBlock();
+ LoopBackBlock->setLoopTarget(S);
+
+ BreakJumpTarget = JumpTarget(LoopSuccessor, ScopePos);
+ ContinueJumpTarget = JumpTarget(Succ, ScopePos);
+
+ CFGBlock *BodyBlock = addStmt(S->getBody());
+
+ if (!BodyBlock)
+ BodyBlock = ContinueJumpTarget.block; // can happen for "for (X in Y) ;"
+ else if (Block) {
+ if (badCFG)
+ return nullptr;
+ }
+
+ // This new body block is a successor to our "exit" condition block.
+ addSuccessor(ExitConditionBlock, BodyBlock);
+ }
+
+ // Link up the condition block with the code that follows the loop.
+ // (the false branch).
+ addSuccessor(ExitConditionBlock, LoopSuccessor);
+
+ // Now create a prologue block to contain the collection expression.
+ Block = createBlock();
+ return addStmt(S->getCollection());
+}
+
+CFGBlock *CFGBuilder::VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S) {
+ // Inline the body.
+ return addStmt(S->getSubStmt());
+ // TODO: consider adding cleanups for the end of @autoreleasepool scope.
+}
+
+CFGBlock *CFGBuilder::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
+ // FIXME: Add locking 'primitives' to CFG for @synchronized.
+
+ // Inline the body.
+ CFGBlock *SyncBlock = addStmt(S->getSynchBody());
+
+ // The sync body starts its own basic block. This makes it a little easier
+ // for diagnostic clients.
+ if (SyncBlock) {
+ if (badCFG)
+ return nullptr;
+
+ Block = nullptr;
+ Succ = SyncBlock;
+ }
+
+ // Add the @synchronized to the CFG.
+ autoCreateBlock();
+ appendStmt(Block, S);
+
+ // Inline the sync expression.
+ return addStmt(S->getSynchExpr());
+}
+
+CFGBlock *CFGBuilder::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
+ // FIXME
+ return NYS();
+}
+
+CFGBlock *CFGBuilder::VisitPseudoObjectExpr(PseudoObjectExpr *E) {
+ autoCreateBlock();
+
+ // Add the PseudoObject as the last thing.
+ appendStmt(Block, E);
+
+ CFGBlock *lastBlock = Block;
+
+ // Before that, evaluate all of the semantics in order. In
+ // CFG-land, that means appending them in reverse order.
+ for (unsigned i = E->getNumSemanticExprs(); i != 0; ) {
+ Expr *Semantic = E->getSemanticExpr(--i);
+
+ // If the semantic is an opaque value, we're being asked to bind
+ // it to its source expression.
+ if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(Semantic))
+ Semantic = OVE->getSourceExpr();
+
+ if (CFGBlock *B = Visit(Semantic))
+ lastBlock = B;
+ }
+
+ return lastBlock;
+}
+
+CFGBlock *CFGBuilder::VisitWhileStmt(WhileStmt *W) {
+ CFGBlock *LoopSuccessor = nullptr;
+
+ // Save local scope position because in case of condition variable ScopePos
+ // won't be restored when traversing AST.
+ SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
+
+ // Create local scope for possible condition variable.
+ // Store scope position for continue statement.
+ LocalScope::const_iterator LoopBeginScopePos = ScopePos;
+ if (VarDecl *VD = W->getConditionVariable()) {
+ addLocalScopeForVarDecl(VD);
+ addAutomaticObjDtors(ScopePos, LoopBeginScopePos, W);
+ }
+
+ // "while" is a control-flow statement. Thus we stop processing the current
+ // block.
+ if (Block) {
+ if (badCFG)
+ return nullptr;
+ LoopSuccessor = Block;
+ Block = nullptr;
+ } else {
+ LoopSuccessor = Succ;
+ }
+
+ CFGBlock *BodyBlock = nullptr, *TransitionBlock = nullptr;
+
+ // Process the loop body.
+ {
+ assert(W->getBody());
+
+ // Save the current values for Block, Succ, continue and break targets.
+ SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ);
+ SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget),
+ save_break(BreakJumpTarget);
+
+ // Create an empty block to represent the transition block for looping back
+ // to the head of the loop.
+ Succ = TransitionBlock = createBlock(false);
+ TransitionBlock->setLoopTarget(W);
+ ContinueJumpTarget = JumpTarget(Succ, LoopBeginScopePos);
+
+ // All breaks should go to the code following the loop.
+ BreakJumpTarget = JumpTarget(LoopSuccessor, ScopePos);
+
+ // Loop body should end with destructor of Condition variable (if any).
+ addAutomaticObjDtors(ScopePos, LoopBeginScopePos, W);
+
+ // If body is not a compound statement create implicit scope
+ // and add destructors.
+ if (!isa<CompoundStmt>(W->getBody()))
+ addLocalScopeAndDtors(W->getBody());
+
+ // Create the body. The returned block is the entry to the loop body.
+ BodyBlock = addStmt(W->getBody());
+
+ if (!BodyBlock)
+ BodyBlock = ContinueJumpTarget.block; // can happen for "while(...) ;"
+ else if (Block && badCFG)
+ return nullptr;
+ }
+
+ // Because of short-circuit evaluation, the condition of the loop can span
+ // multiple basic blocks. Thus we need the "Entry" and "Exit" blocks that
+ // evaluate the condition.
+ CFGBlock *EntryConditionBlock = nullptr, *ExitConditionBlock = nullptr;
+
+ do {
+ Expr *C = W->getCond();
+
+ // Specially handle logical operators, which have a slightly
+ // more optimal CFG representation.
+ if (BinaryOperator *Cond = dyn_cast<BinaryOperator>(C->IgnoreParens()))
+ if (Cond->isLogicalOp()) {
+ std::tie(EntryConditionBlock, ExitConditionBlock) =
+ VisitLogicalOperator(Cond, W, BodyBlock, LoopSuccessor);
+ break;
+ }
+
+ // The default case when not handling logical operators.
+ ExitConditionBlock = createBlock(false);
+ ExitConditionBlock->setTerminator(W);
+
+ // Now add the actual condition to the condition block.
+ // Because the condition itself may contain control-flow, new blocks may
+ // be created. Thus we update "Succ" after adding the condition.
+ Block = ExitConditionBlock;
+ Block = EntryConditionBlock = addStmt(C);
+
+ // If this block contains a condition variable, add both the condition
+ // variable and initializer to the CFG.
+ if (VarDecl *VD = W->getConditionVariable()) {
+ if (Expr *Init = VD->getInit()) {
+ autoCreateBlock();
+ appendStmt(Block, W->getConditionVariableDeclStmt());
+ EntryConditionBlock = addStmt(Init);
+ assert(Block == EntryConditionBlock);
+ }
+ }
+
+ if (Block && badCFG)
+ return nullptr;
+
+ // See if this is a known constant.
+ const TryResult& KnownVal = tryEvaluateBool(C);
+
+ // Add the loop body entry as a successor to the condition.
+ addSuccessor(ExitConditionBlock, KnownVal.isFalse() ? nullptr : BodyBlock);
+ // Link up the condition block with the code that follows the loop. (the
+ // false branch).
+ addSuccessor(ExitConditionBlock,
+ KnownVal.isTrue() ? nullptr : LoopSuccessor);
+
+ } while(false);
+
+ // Link up the loop-back block to the entry condition block.
+ addSuccessor(TransitionBlock, EntryConditionBlock);
+
+ // There can be no more statements in the condition block since we loop back
+ // to this block. NULL out Block to force lazy creation of another block.
+ Block = nullptr;
+
+ // Return the condition block, which is the dominating block for the loop.
+ Succ = EntryConditionBlock;
+ return EntryConditionBlock;
+}
+
+
+CFGBlock *CFGBuilder::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) {
+ // FIXME: For now we pretend that @catch and the code it contains does not
+ // exit.
+ return Block;
+}
+
+CFGBlock *CFGBuilder::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
+ // FIXME: This isn't complete. We basically treat @throw like a return
+ // statement.
+
+ // If we were in the middle of a block we stop processing that block.
+ if (badCFG)
+ return nullptr;
+
+ // Create the new block.
+ Block = createBlock(false);
+
+ // The Exit block is the only successor.
+ addSuccessor(Block, &cfg->getExit());
+
+ // Add the statement to the block. This may create new blocks if S contains
+ // control-flow (short-circuit operations).
+ return VisitStmt(S, AddStmtChoice::AlwaysAdd);
+}
+
+CFGBlock *CFGBuilder::VisitCXXThrowExpr(CXXThrowExpr *T) {
+ // If we were in the middle of a block we stop processing that block.
+ if (badCFG)
+ return nullptr;
+
+ // Create the new block.
+ Block = createBlock(false);
+
+ if (TryTerminatedBlock)
+ // The current try statement is the only successor.
+ addSuccessor(Block, TryTerminatedBlock);
+ else
+ // otherwise the Exit block is the only successor.
+ addSuccessor(Block, &cfg->getExit());
+
+ // Add the statement to the block. This may create new blocks if S contains
+ // control-flow (short-circuit operations).
+ return VisitStmt(T, AddStmtChoice::AlwaysAdd);
+}
+
+CFGBlock *CFGBuilder::VisitDoStmt(DoStmt *D) {
+ CFGBlock *LoopSuccessor = nullptr;
+
+ // "do...while" is a control-flow statement. Thus we stop processing the
+ // current block.
+ if (Block) {
+ if (badCFG)
+ return nullptr;
+ LoopSuccessor = Block;
+ } else
+ LoopSuccessor = Succ;
+
+ // Because of short-circuit evaluation, the condition of the loop can span
+ // multiple basic blocks. Thus we need the "Entry" and "Exit" blocks that
+ // evaluate the condition.
+ CFGBlock *ExitConditionBlock = createBlock(false);
+ CFGBlock *EntryConditionBlock = ExitConditionBlock;
+
+ // Set the terminator for the "exit" condition block.
+ ExitConditionBlock->setTerminator(D);
+
+ // Now add the actual condition to the condition block. Because the condition
+ // itself may contain control-flow, new blocks may be created.
+ if (Stmt *C = D->getCond()) {
+ Block = ExitConditionBlock;
+ EntryConditionBlock = addStmt(C);
+ if (Block) {
+ if (badCFG)
+ return nullptr;
+ }
+ }
+
+ // The condition block is the implicit successor for the loop body.
+ Succ = EntryConditionBlock;
+
+ // See if this is a known constant.
+ const TryResult &KnownVal = tryEvaluateBool(D->getCond());
+
+ // Process the loop body.
+ CFGBlock *BodyBlock = nullptr;
+ {
+ assert(D->getBody());
+
+ // Save the current values for Block, Succ, and continue and break targets
+ SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ);
+ SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget),
+ save_break(BreakJumpTarget);
+
+ // All continues within this loop should go to the condition block
+ ContinueJumpTarget = JumpTarget(EntryConditionBlock, ScopePos);
+
+ // All breaks should go to the code following the loop.
+ BreakJumpTarget = JumpTarget(LoopSuccessor, ScopePos);
+
+ // NULL out Block to force lazy instantiation of blocks for the body.
+ Block = nullptr;
+
+ // If body is not a compound statement create implicit scope
+ // and add destructors.
+ if (!isa<CompoundStmt>(D->getBody()))
+ addLocalScopeAndDtors(D->getBody());
+
+ // Create the body. The returned block is the entry to the loop body.
+ BodyBlock = addStmt(D->getBody());
+
+ if (!BodyBlock)
+ BodyBlock = EntryConditionBlock; // can happen for "do ; while(...)"
+ else if (Block) {
+ if (badCFG)
+ return nullptr;
+ }
+
+ if (!KnownVal.isFalse()) {
+ // Add an intermediate block between the BodyBlock and the
+ // ExitConditionBlock to represent the "loop back" transition. Create an
+ // empty block to represent the transition block for looping back to the
+ // head of the loop.
+ // FIXME: Can we do this more efficiently without adding another block?
+ Block = nullptr;
+ Succ = BodyBlock;
+ CFGBlock *LoopBackBlock = createBlock();
+ LoopBackBlock->setLoopTarget(D);
+
+ // Add the loop body entry as a successor to the condition.
+ addSuccessor(ExitConditionBlock, LoopBackBlock);
+ }
+ else
+ addSuccessor(ExitConditionBlock, nullptr);
+ }
+
+ // Link up the condition block with the code that follows the loop.
+ // (the false branch).
+ addSuccessor(ExitConditionBlock, KnownVal.isTrue() ? nullptr : LoopSuccessor);
+
+ // There can be no more statements in the body block(s) since we loop back to
+ // the body. NULL out Block to force lazy creation of another block.
+ Block = nullptr;
+
+ // Return the loop body, which is the dominating block for the loop.
+ Succ = BodyBlock;
+ return BodyBlock;
+}
+
+CFGBlock *CFGBuilder::VisitContinueStmt(ContinueStmt *C) {
+ // "continue" is a control-flow statement. Thus we stop processing the
+ // current block.
+ if (badCFG)
+ return nullptr;
+
+ // Now create a new block that ends with the continue statement.
+ Block = createBlock(false);
+ Block->setTerminator(C);
+
+ // If there is no target for the continue, then we are looking at an
+ // incomplete AST. This means the CFG cannot be constructed.
+ if (ContinueJumpTarget.block) {
+ addAutomaticObjDtors(ScopePos, ContinueJumpTarget.scopePosition, C);
+ addSuccessor(Block, ContinueJumpTarget.block);
+ } else
+ badCFG = true;
+
+ return Block;
+}
+
+CFGBlock *CFGBuilder::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E,
+ AddStmtChoice asc) {
+
+ if (asc.alwaysAdd(*this, E)) {
+ autoCreateBlock();
+ appendStmt(Block, E);
+ }
+
+ // VLA types have expressions that must be evaluated.
+ CFGBlock *lastBlock = Block;
+
+ if (E->isArgumentType()) {
+ for (const VariableArrayType *VA =FindVA(E->getArgumentType().getTypePtr());
+ VA != nullptr; VA = FindVA(VA->getElementType().getTypePtr()))
+ lastBlock = addStmt(VA->getSizeExpr());
+ }
+ return lastBlock;
+}
+
+/// VisitStmtExpr - Utility method to handle (nested) statement
+/// expressions (a GCC extension).
+CFGBlock *CFGBuilder::VisitStmtExpr(StmtExpr *SE, AddStmtChoice asc) {
+ if (asc.alwaysAdd(*this, SE)) {
+ autoCreateBlock();
+ appendStmt(Block, SE);
+ }
+ return VisitCompoundStmt(SE->getSubStmt());
+}
+
+CFGBlock *CFGBuilder::VisitSwitchStmt(SwitchStmt *Terminator) {
+ // "switch" is a control-flow statement. Thus we stop processing the current
+ // block.
+ CFGBlock *SwitchSuccessor = nullptr;
+
+ // Save local scope position because in case of condition variable ScopePos
+ // won't be restored when traversing AST.
+ SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
+
+ // Create local scope for possible condition variable.
+ // Store scope position. Add implicit destructor.
+ if (VarDecl *VD = Terminator->getConditionVariable()) {
+ LocalScope::const_iterator SwitchBeginScopePos = ScopePos;
+ addLocalScopeForVarDecl(VD);
+ addAutomaticObjDtors(ScopePos, SwitchBeginScopePos, Terminator);
+ }
+
+ if (Block) {
+ if (badCFG)
+ return nullptr;
+ SwitchSuccessor = Block;
+ } else SwitchSuccessor = Succ;
+
+ // Save the current "switch" context.
+ SaveAndRestore<CFGBlock*> save_switch(SwitchTerminatedBlock),
+ save_default(DefaultCaseBlock);
+ SaveAndRestore<JumpTarget> save_break(BreakJumpTarget);
+
+ // Set the "default" case to be the block after the switch statement. If the
+ // switch statement contains a "default:", this value will be overwritten with
+ // the block for that code.
+ DefaultCaseBlock = SwitchSuccessor;
+
+ // Create a new block that will contain the switch statement.
+ SwitchTerminatedBlock = createBlock(false);
+
+ // Now process the switch body. The code after the switch is the implicit
+ // successor.
+ Succ = SwitchSuccessor;
+ BreakJumpTarget = JumpTarget(SwitchSuccessor, ScopePos);
+
+ // When visiting the body, the case statements should automatically get linked
+ // up to the switch. We also don't keep a pointer to the body, since all
+ // control-flow from the switch goes to case/default statements.
+ assert(Terminator->getBody() && "switch must contain a non-NULL body");
+ Block = nullptr;
+
+ // For pruning unreachable case statements, save the current state
+ // for tracking the condition value.
+ SaveAndRestore<bool> save_switchExclusivelyCovered(switchExclusivelyCovered,
+ false);
+
+ // Determine if the switch condition can be explicitly evaluated.
+ assert(Terminator->getCond() && "switch condition must be non-NULL");
+ Expr::EvalResult result;
+ bool b = tryEvaluate(Terminator->getCond(), result);
+ SaveAndRestore<Expr::EvalResult*> save_switchCond(switchCond,
+ b ? &result : nullptr);
+
+ // If body is not a compound statement create implicit scope
+ // and add destructors.
+ if (!isa<CompoundStmt>(Terminator->getBody()))
+ addLocalScopeAndDtors(Terminator->getBody());
+
+ addStmt(Terminator->getBody());
+ if (Block) {
+ if (badCFG)
+ return nullptr;
+ }
+
+ // If we have no "default:" case, the default transition is to the code
+ // following the switch body. Moreover, take into account if all the
+ // cases of a switch are covered (e.g., switching on an enum value).
+ //
+ // Note: We add a successor to a switch that is considered covered yet has no
+ // case statements if the enumeration has no enumerators.
+ bool SwitchAlwaysHasSuccessor = false;
+ SwitchAlwaysHasSuccessor |= switchExclusivelyCovered;
+ SwitchAlwaysHasSuccessor |= Terminator->isAllEnumCasesCovered() &&
+ Terminator->getSwitchCaseList();
+ addSuccessor(SwitchTerminatedBlock, DefaultCaseBlock,
+ !SwitchAlwaysHasSuccessor);
+
+ // Add the terminator and condition in the switch block.
+ SwitchTerminatedBlock->setTerminator(Terminator);
+ Block = SwitchTerminatedBlock;
+ CFGBlock *LastBlock = addStmt(Terminator->getCond());
+
+ // Finally, if the SwitchStmt contains a condition variable, add both the
+ // SwitchStmt and the condition variable initialization to the CFG.
+ if (VarDecl *VD = Terminator->getConditionVariable()) {
+ if (Expr *Init = VD->getInit()) {
+ autoCreateBlock();
+ appendStmt(Block, Terminator->getConditionVariableDeclStmt());
+ LastBlock = addStmt(Init);
+ }
+ }
+
+ return LastBlock;
+}
+
+static bool shouldAddCase(bool &switchExclusivelyCovered,
+ const Expr::EvalResult *switchCond,
+ const CaseStmt *CS,
+ ASTContext &Ctx) {
+ if (!switchCond)
+ return true;
+
+ bool addCase = false;
+
+ if (!switchExclusivelyCovered) {
+ if (switchCond->Val.isInt()) {
+ // Evaluate the LHS of the case value.
+ const llvm::APSInt &lhsInt = CS->getLHS()->EvaluateKnownConstInt(Ctx);
+ const llvm::APSInt &condInt = switchCond->Val.getInt();
+
+ if (condInt == lhsInt) {
+ addCase = true;
+ switchExclusivelyCovered = true;
+ }
+ else if (condInt > lhsInt) {
+ if (const Expr *RHS = CS->getRHS()) {
+ // Evaluate the RHS of the case value.
+ const llvm::APSInt &V2 = RHS->EvaluateKnownConstInt(Ctx);
+ if (V2 >= condInt) {
+ addCase = true;
+ switchExclusivelyCovered = true;
+ }
+ }
+ }
+ }
+ else
+ addCase = true;
+ }
+ return addCase;
+}
+
+CFGBlock *CFGBuilder::VisitCaseStmt(CaseStmt *CS) {
+ // CaseStmts are essentially labels, so they are the first statement in a
+ // block.
+ CFGBlock *TopBlock = nullptr, *LastBlock = nullptr;
+
+ if (Stmt *Sub = CS->getSubStmt()) {
+ // For deeply nested chains of CaseStmts, instead of doing a recursion
+ // (which can blow out the stack), manually unroll and create blocks
+ // along the way.
+ while (isa<CaseStmt>(Sub)) {
+ CFGBlock *currentBlock = createBlock(false);
+ currentBlock->setLabel(CS);
+
+ if (TopBlock)
+ addSuccessor(LastBlock, currentBlock);
+ else
+ TopBlock = currentBlock;
+
+ addSuccessor(SwitchTerminatedBlock,
+ shouldAddCase(switchExclusivelyCovered, switchCond,
+ CS, *Context)
+ ? currentBlock : nullptr);
+
+ LastBlock = currentBlock;
+ CS = cast<CaseStmt>(Sub);
+ Sub = CS->getSubStmt();
+ }
+
+ addStmt(Sub);
+ }
+
+ CFGBlock *CaseBlock = Block;
+ if (!CaseBlock)
+ CaseBlock = createBlock();
+
+ // Cases statements partition blocks, so this is the top of the basic block we
+ // were processing (the "case XXX:" is the label).
+ CaseBlock->setLabel(CS);
+
+ if (badCFG)
+ return nullptr;
+
+ // Add this block to the list of successors for the block with the switch
+ // statement.
+ assert(SwitchTerminatedBlock);
+ addSuccessor(SwitchTerminatedBlock, CaseBlock,
+ shouldAddCase(switchExclusivelyCovered, switchCond,
+ CS, *Context));
+
+ // We set Block to NULL to allow lazy creation of a new block (if necessary)
+ Block = nullptr;
+
+ if (TopBlock) {
+ addSuccessor(LastBlock, CaseBlock);
+ Succ = TopBlock;
+ } else {
+ // This block is now the implicit successor of other blocks.
+ Succ = CaseBlock;
+ }
+
+ return Succ;
+}
+
+CFGBlock *CFGBuilder::VisitDefaultStmt(DefaultStmt *Terminator) {
+ if (Terminator->getSubStmt())
+ addStmt(Terminator->getSubStmt());
+
+ DefaultCaseBlock = Block;
+
+ if (!DefaultCaseBlock)
+ DefaultCaseBlock = createBlock();
+
+ // Default statements partition blocks, so this is the top of the basic block
+ // we were processing (the "default:" is the label).
+ DefaultCaseBlock->setLabel(Terminator);
+
+ if (badCFG)
+ return nullptr;
+
+ // Unlike case statements, we don't add the default block to the successors
+ // for the switch statement immediately. This is done when we finish
+ // processing the switch statement. This allows for the default case
+ // (including a fall-through to the code after the switch statement) to always
+ // be the last successor of a switch-terminated block.
+
+ // We set Block to NULL to allow lazy creation of a new block (if necessary)
+ Block = nullptr;
+
+ // This block is now the implicit successor of other blocks.
+ Succ = DefaultCaseBlock;
+
+ return DefaultCaseBlock;
+}
+
+CFGBlock *CFGBuilder::VisitCXXTryStmt(CXXTryStmt *Terminator) {
+ // "try"/"catch" is a control-flow statement. Thus we stop processing the
+ // current block.
+ CFGBlock *TrySuccessor = nullptr;
+
+ if (Block) {
+ if (badCFG)
+ return nullptr;
+ TrySuccessor = Block;
+ } else TrySuccessor = Succ;
+
+ CFGBlock *PrevTryTerminatedBlock = TryTerminatedBlock;
+
+ // Create a new block that will contain the try statement.
+ CFGBlock *NewTryTerminatedBlock = createBlock(false);
+ // Add the terminator in the try block.
+ NewTryTerminatedBlock->setTerminator(Terminator);
+
+ bool HasCatchAll = false;
+ for (unsigned h = 0; h <Terminator->getNumHandlers(); ++h) {
+ // The code after the try is the implicit successor.
+ Succ = TrySuccessor;
+ CXXCatchStmt *CS = Terminator->getHandler(h);
+ if (CS->getExceptionDecl() == nullptr) {
+ HasCatchAll = true;
+ }
+ Block = nullptr;
+ CFGBlock *CatchBlock = VisitCXXCatchStmt(CS);
+ if (!CatchBlock)
+ return nullptr;
+ // Add this block to the list of successors for the block with the try
+ // statement.
+ addSuccessor(NewTryTerminatedBlock, CatchBlock);
+ }
+ if (!HasCatchAll) {
+ if (PrevTryTerminatedBlock)
+ addSuccessor(NewTryTerminatedBlock, PrevTryTerminatedBlock);
+ else
+ addSuccessor(NewTryTerminatedBlock, &cfg->getExit());
+ }
+
+ // The code after the try is the implicit successor.
+ Succ = TrySuccessor;
+
+ // Save the current "try" context.
+ SaveAndRestore<CFGBlock*> save_try(TryTerminatedBlock, NewTryTerminatedBlock);
+ cfg->addTryDispatchBlock(TryTerminatedBlock);
+
+ assert(Terminator->getTryBlock() && "try must contain a non-NULL body");
+ Block = nullptr;
+ return addStmt(Terminator->getTryBlock());
+}
+
+CFGBlock *CFGBuilder::VisitCXXCatchStmt(CXXCatchStmt *CS) {
+ // CXXCatchStmt are treated like labels, so they are the first statement in a
+ // block.
+
+ // Save local scope position because in case of exception variable ScopePos
+ // won't be restored when traversing AST.
+ SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
+
+ // Create local scope for possible exception variable.
+ // Store scope position. Add implicit destructor.
+ if (VarDecl *VD = CS->getExceptionDecl()) {
+ LocalScope::const_iterator BeginScopePos = ScopePos;
+ addLocalScopeForVarDecl(VD);
+ addAutomaticObjDtors(ScopePos, BeginScopePos, CS);
+ }
+
+ if (CS->getHandlerBlock())
+ addStmt(CS->getHandlerBlock());
+
+ CFGBlock *CatchBlock = Block;
+ if (!CatchBlock)
+ CatchBlock = createBlock();
+
+ // CXXCatchStmt is more than just a label. They have semantic meaning
+ // as well, as they implicitly "initialize" the catch variable. Add
+ // it to the CFG as a CFGElement so that the control-flow of these
+ // semantics gets captured.
+ appendStmt(CatchBlock, CS);
+
+ // Also add the CXXCatchStmt as a label, to mirror handling of regular
+ // labels.
+ CatchBlock->setLabel(CS);
+
+ // Bail out if the CFG is bad.
+ if (badCFG)
+ return nullptr;
+
+ // We set Block to NULL to allow lazy creation of a new block (if necessary)
+ Block = nullptr;
+
+ return CatchBlock;
+}
+
+CFGBlock *CFGBuilder::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
+ // C++0x for-range statements are specified as [stmt.ranged]:
+ //
+ // {
+ // auto && __range = range-init;
+ // for ( auto __begin = begin-expr,
+ // __end = end-expr;
+ // __begin != __end;
+ // ++__begin ) {
+ // for-range-declaration = *__begin;
+ // statement
+ // }
+ // }
+
+ // Save local scope position before the addition of the implicit variables.
+ SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
+
+ // Create local scopes and destructors for range, begin and end variables.
+ if (Stmt *Range = S->getRangeStmt())
+ addLocalScopeForStmt(Range);
+ if (Stmt *BeginEnd = S->getBeginEndStmt())
+ addLocalScopeForStmt(BeginEnd);
+ addAutomaticObjDtors(ScopePos, save_scope_pos.get(), S);
+
+ LocalScope::const_iterator ContinueScopePos = ScopePos;
+
+ // "for" is a control-flow statement. Thus we stop processing the current
+ // block.
+ CFGBlock *LoopSuccessor = nullptr;
+ if (Block) {
+ if (badCFG)
+ return nullptr;
+ LoopSuccessor = Block;
+ } else
+ LoopSuccessor = Succ;
+
+ // Save the current value for the break targets.
+ // All breaks should go to the code following the loop.
+ SaveAndRestore<JumpTarget> save_break(BreakJumpTarget);
+ BreakJumpTarget = JumpTarget(LoopSuccessor, ScopePos);
+
+ // The block for the __begin != __end expression.
+ CFGBlock *ConditionBlock = createBlock(false);
+ ConditionBlock->setTerminator(S);
+
+ // Now add the actual condition to the condition block.
+ if (Expr *C = S->getCond()) {
+ Block = ConditionBlock;
+ CFGBlock *BeginConditionBlock = addStmt(C);
+ if (badCFG)
+ return nullptr;
+ assert(BeginConditionBlock == ConditionBlock &&
+ "condition block in for-range was unexpectedly complex");
+ (void)BeginConditionBlock;
+ }
+
+ // The condition block is the implicit successor for the loop body as well as
+ // any code above the loop.
+ Succ = ConditionBlock;
+
+ // See if this is a known constant.
+ TryResult KnownVal(true);
+
+ if (S->getCond())
+ KnownVal = tryEvaluateBool(S->getCond());
+
+ // Now create the loop body.
+ {
+ assert(S->getBody());
+
+ // Save the current values for Block, Succ, and continue targets.
+ SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ);
+ SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget);
+
+ // Generate increment code in its own basic block. This is the target of
+ // continue statements.
+ Block = nullptr;
+ Succ = addStmt(S->getInc());
+ ContinueJumpTarget = JumpTarget(Succ, ContinueScopePos);
+
+ // The starting block for the loop increment is the block that should
+ // represent the 'loop target' for looping back to the start of the loop.
+ ContinueJumpTarget.block->setLoopTarget(S);
+
+ // Finish up the increment block and prepare to start the loop body.
+ assert(Block);
+ if (badCFG)
+ return nullptr;
+ Block = nullptr;
+
+ // Add implicit scope and dtors for loop variable.
+ addLocalScopeAndDtors(S->getLoopVarStmt());
+
+ // Populate a new block to contain the loop body and loop variable.
+ addStmt(S->getBody());
+ if (badCFG)
+ return nullptr;
+ CFGBlock *LoopVarStmtBlock = addStmt(S->getLoopVarStmt());
+ if (badCFG)
+ return nullptr;
+
+ // This new body block is a successor to our condition block.
+ addSuccessor(ConditionBlock,
+ KnownVal.isFalse() ? nullptr : LoopVarStmtBlock);
+ }
+
+ // Link up the condition block with the code that follows the loop (the
+ // false branch).
+ addSuccessor(ConditionBlock, KnownVal.isTrue() ? nullptr : LoopSuccessor);
+
+ // Add the initialization statements.
+ Block = createBlock();
+ addStmt(S->getBeginEndStmt());
+ return addStmt(S->getRangeStmt());
+}
+
+CFGBlock *CFGBuilder::VisitExprWithCleanups(ExprWithCleanups *E,
+ AddStmtChoice asc) {
+ if (BuildOpts.AddTemporaryDtors) {
+ // If adding implicit destructors visit the full expression for adding
+ // destructors of temporaries.
+ TempDtorContext Context;
+ VisitForTemporaryDtors(E->getSubExpr(), false, Context);
+
+ // Full expression has to be added as CFGStmt so it will be sequenced
+ // before destructors of it's temporaries.
+ asc = asc.withAlwaysAdd(true);
+ }
+ return Visit(E->getSubExpr(), asc);
+}
+
+CFGBlock *CFGBuilder::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E,
+ AddStmtChoice asc) {
+ if (asc.alwaysAdd(*this, E)) {
+ autoCreateBlock();
+ appendStmt(Block, E);
+
+ // We do not want to propagate the AlwaysAdd property.
+ asc = asc.withAlwaysAdd(false);
+ }
+ return Visit(E->getSubExpr(), asc);
+}
+
+CFGBlock *CFGBuilder::VisitCXXConstructExpr(CXXConstructExpr *C,
+ AddStmtChoice asc) {
+ autoCreateBlock();
+ appendStmt(Block, C);
+
+ return VisitChildren(C);
+}
+
+CFGBlock *CFGBuilder::VisitCXXNewExpr(CXXNewExpr *NE,
+ AddStmtChoice asc) {
+
+ autoCreateBlock();
+ appendStmt(Block, NE);
+
+ if (NE->getInitializer())
+ Block = Visit(NE->getInitializer());
+ if (BuildOpts.AddCXXNewAllocator)
+ appendNewAllocator(Block, NE);
+ if (NE->isArray())
+ Block = Visit(NE->getArraySize());
+ for (CXXNewExpr::arg_iterator I = NE->placement_arg_begin(),
+ E = NE->placement_arg_end(); I != E; ++I)
+ Block = Visit(*I);
+ return Block;
+}
+
+CFGBlock *CFGBuilder::VisitCXXDeleteExpr(CXXDeleteExpr *DE,
+ AddStmtChoice asc) {
+ autoCreateBlock();
+ appendStmt(Block, DE);
+ QualType DTy = DE->getDestroyedType();
+ DTy = DTy.getNonReferenceType();
+ CXXRecordDecl *RD = Context->getBaseElementType(DTy)->getAsCXXRecordDecl();
+ if (RD) {
+ if (RD->isCompleteDefinition() && !RD->hasTrivialDestructor())
+ appendDeleteDtor(Block, RD, DE);
+ }
+
+ return VisitChildren(DE);
+}
+
+CFGBlock *CFGBuilder::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E,
+ AddStmtChoice asc) {
+ if (asc.alwaysAdd(*this, E)) {
+ autoCreateBlock();
+ appendStmt(Block, E);
+ // We do not want to propagate the AlwaysAdd property.
+ asc = asc.withAlwaysAdd(false);
+ }
+ return Visit(E->getSubExpr(), asc);
+}
+
+CFGBlock *CFGBuilder::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *C,
+ AddStmtChoice asc) {
+ autoCreateBlock();
+ appendStmt(Block, C);
+ return VisitChildren(C);
+}
+
+CFGBlock *CFGBuilder::VisitImplicitCastExpr(ImplicitCastExpr *E,
+ AddStmtChoice asc) {
+ if (asc.alwaysAdd(*this, E)) {
+ autoCreateBlock();
+ appendStmt(Block, E);
+ }
+ return Visit(E->getSubExpr(), AddStmtChoice());
+}
+
+CFGBlock *CFGBuilder::VisitIndirectGotoStmt(IndirectGotoStmt *I) {
+ // Lazily create the indirect-goto dispatch block if there isn't one already.
+ CFGBlock *IBlock = cfg->getIndirectGotoBlock();
+
+ if (!IBlock) {
+ IBlock = createBlock(false);
+ cfg->setIndirectGotoBlock(IBlock);
+ }
+
+ // IndirectGoto is a control-flow statement. Thus we stop processing the
+ // current block and create a new one.
+ if (badCFG)
+ return nullptr;
+
+ Block = createBlock(false);
+ Block->setTerminator(I);
+ addSuccessor(Block, IBlock);
+ return addStmt(I->getTarget());
+}
+
+CFGBlock *CFGBuilder::VisitForTemporaryDtors(Stmt *E, bool BindToTemporary,
+ TempDtorContext &Context) {
+ assert(BuildOpts.AddImplicitDtors && BuildOpts.AddTemporaryDtors);
+
+tryAgain:
+ if (!E) {
+ badCFG = true;
+ return nullptr;
+ }
+ switch (E->getStmtClass()) {
+ default:
+ return VisitChildrenForTemporaryDtors(E, Context);
+
+ case Stmt::BinaryOperatorClass:
+ return VisitBinaryOperatorForTemporaryDtors(cast<BinaryOperator>(E),
+ Context);
+
+ case Stmt::CXXBindTemporaryExprClass:
+ return VisitCXXBindTemporaryExprForTemporaryDtors(
+ cast<CXXBindTemporaryExpr>(E), BindToTemporary, Context);
+
+ case Stmt::BinaryConditionalOperatorClass:
+ case Stmt::ConditionalOperatorClass:
+ return VisitConditionalOperatorForTemporaryDtors(
+ cast<AbstractConditionalOperator>(E), BindToTemporary, Context);
+
+ case Stmt::ImplicitCastExprClass:
+ // For implicit cast we want BindToTemporary to be passed further.
+ E = cast<CastExpr>(E)->getSubExpr();
+ goto tryAgain;
+
+ case Stmt::CXXFunctionalCastExprClass:
+ // For functional cast we want BindToTemporary to be passed further.
+ E = cast<CXXFunctionalCastExpr>(E)->getSubExpr();
+ goto tryAgain;
+
+ case Stmt::ParenExprClass:
+ E = cast<ParenExpr>(E)->getSubExpr();
+ goto tryAgain;
+
+ case Stmt::MaterializeTemporaryExprClass: {
+ const MaterializeTemporaryExpr* MTE = cast<MaterializeTemporaryExpr>(E);
+ BindToTemporary = (MTE->getStorageDuration() != SD_FullExpression);
+ SmallVector<const Expr *, 2> CommaLHSs;
+ SmallVector<SubobjectAdjustment, 2> Adjustments;
+ // Find the expression whose lifetime needs to be extended.
+ E = const_cast<Expr *>(
+ cast<MaterializeTemporaryExpr>(E)
+ ->GetTemporaryExpr()
+ ->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments));
+ // Visit the skipped comma operator left-hand sides for other temporaries.
+ for (const Expr *CommaLHS : CommaLHSs) {
+ VisitForTemporaryDtors(const_cast<Expr *>(CommaLHS),
+ /*BindToTemporary=*/false, Context);
+ }
+ goto tryAgain;
+ }
+
+ case Stmt::BlockExprClass:
+ // Don't recurse into blocks; their subexpressions don't get evaluated
+ // here.
+ return Block;
+
+ case Stmt::LambdaExprClass: {
+ // For lambda expressions, only recurse into the capture initializers,
+ // and not the body.
+ auto *LE = cast<LambdaExpr>(E);
+ CFGBlock *B = Block;
+ for (Expr *Init : LE->capture_inits()) {
+ if (CFGBlock *R = VisitForTemporaryDtors(
+ Init, /*BindToTemporary=*/false, Context))
+ B = R;
+ }
+ return B;
+ }
+
+ case Stmt::CXXDefaultArgExprClass:
+ E = cast<CXXDefaultArgExpr>(E)->getExpr();
+ goto tryAgain;
+
+ case Stmt::CXXDefaultInitExprClass:
+ E = cast<CXXDefaultInitExpr>(E)->getExpr();
+ goto tryAgain;
+ }
+}
+
+CFGBlock *CFGBuilder::VisitChildrenForTemporaryDtors(Stmt *E,
+ TempDtorContext &Context) {
+ if (isa<LambdaExpr>(E)) {
+ // Do not visit the children of lambdas; they have their own CFGs.
+ return Block;
+ }
+
+ // When visiting children for destructors we want to visit them in reverse
+ // order that they will appear in the CFG. Because the CFG is built
+ // bottom-up, this means we visit them in their natural order, which
+ // reverses them in the CFG.
+ CFGBlock *B = Block;
+ for (Stmt *Child : E->children())
+ if (Child)
+ if (CFGBlock *R = VisitForTemporaryDtors(Child, false, Context))
+ B = R;
+
+ return B;
+}
+
+CFGBlock *CFGBuilder::VisitBinaryOperatorForTemporaryDtors(
+ BinaryOperator *E, TempDtorContext &Context) {
+ if (E->isLogicalOp()) {
+ VisitForTemporaryDtors(E->getLHS(), false, Context);
+ TryResult RHSExecuted = tryEvaluateBool(E->getLHS());
+ if (RHSExecuted.isKnown() && E->getOpcode() == BO_LOr)
+ RHSExecuted.negate();
+
+ // We do not know at CFG-construction time whether the right-hand-side was
+ // executed, thus we add a branch node that depends on the temporary
+ // constructor call.
+ TempDtorContext RHSContext(
+ bothKnownTrue(Context.KnownExecuted, RHSExecuted));
+ VisitForTemporaryDtors(E->getRHS(), false, RHSContext);
+ InsertTempDtorDecisionBlock(RHSContext);
+
+ return Block;
+ }
+
+ if (E->isAssignmentOp()) {
+ // For assignment operator (=) LHS expression is visited
+ // before RHS expression. For destructors visit them in reverse order.
+ CFGBlock *RHSBlock = VisitForTemporaryDtors(E->getRHS(), false, Context);
+ CFGBlock *LHSBlock = VisitForTemporaryDtors(E->getLHS(), false, Context);
+ return LHSBlock ? LHSBlock : RHSBlock;
+ }
+
+ // For any other binary operator RHS expression is visited before
+ // LHS expression (order of children). For destructors visit them in reverse
+ // order.
+ CFGBlock *LHSBlock = VisitForTemporaryDtors(E->getLHS(), false, Context);
+ CFGBlock *RHSBlock = VisitForTemporaryDtors(E->getRHS(), false, Context);
+ return RHSBlock ? RHSBlock : LHSBlock;
+}
+
+CFGBlock *CFGBuilder::VisitCXXBindTemporaryExprForTemporaryDtors(
+ CXXBindTemporaryExpr *E, bool BindToTemporary, TempDtorContext &Context) {
+ // First add destructors for temporaries in subexpression.
+ CFGBlock *B = VisitForTemporaryDtors(E->getSubExpr(), false, Context);
+ if (!BindToTemporary) {
+ // If lifetime of temporary is not prolonged (by assigning to constant
+ // reference) add destructor for it.
+
+ const CXXDestructorDecl *Dtor = E->getTemporary()->getDestructor();
+
+ if (Dtor->getParent()->isAnyDestructorNoReturn()) {
+ // If the destructor is marked as a no-return destructor, we need to
+ // create a new block for the destructor which does not have as a
+ // successor anything built thus far. Control won't flow out of this
+ // block.
+ if (B) Succ = B;
+ Block = createNoReturnBlock();
+ } else if (Context.needsTempDtorBranch()) {
+ // If we need to introduce a branch, we add a new block that we will hook
+ // up to a decision block later.
+ if (B) Succ = B;
+ Block = createBlock();
+ } else {
+ autoCreateBlock();
+ }
+ if (Context.needsTempDtorBranch()) {
+ Context.setDecisionPoint(Succ, E);
+ }
+ appendTemporaryDtor(Block, E);
+
+ B = Block;
+ }
+ return B;
+}
+
+void CFGBuilder::InsertTempDtorDecisionBlock(const TempDtorContext &Context,
+ CFGBlock *FalseSucc) {
+ if (!Context.TerminatorExpr) {
+ // If no temporary was found, we do not need to insert a decision point.
+ return;
+ }
+ assert(Context.TerminatorExpr);
+ CFGBlock *Decision = createBlock(false);
+ Decision->setTerminator(CFGTerminator(Context.TerminatorExpr, true));
+ addSuccessor(Decision, Block, !Context.KnownExecuted.isFalse());
+ addSuccessor(Decision, FalseSucc ? FalseSucc : Context.Succ,
+ !Context.KnownExecuted.isTrue());
+ Block = Decision;
+}
+
+CFGBlock *CFGBuilder::VisitConditionalOperatorForTemporaryDtors(
+ AbstractConditionalOperator *E, bool BindToTemporary,
+ TempDtorContext &Context) {
+ VisitForTemporaryDtors(E->getCond(), false, Context);
+ CFGBlock *ConditionBlock = Block;
+ CFGBlock *ConditionSucc = Succ;
+ TryResult ConditionVal = tryEvaluateBool(E->getCond());
+ TryResult NegatedVal = ConditionVal;
+ if (NegatedVal.isKnown()) NegatedVal.negate();
+
+ TempDtorContext TrueContext(
+ bothKnownTrue(Context.KnownExecuted, ConditionVal));
+ VisitForTemporaryDtors(E->getTrueExpr(), BindToTemporary, TrueContext);
+ CFGBlock *TrueBlock = Block;
+
+ Block = ConditionBlock;
+ Succ = ConditionSucc;
+ TempDtorContext FalseContext(
+ bothKnownTrue(Context.KnownExecuted, NegatedVal));
+ VisitForTemporaryDtors(E->getFalseExpr(), BindToTemporary, FalseContext);
+
+ if (TrueContext.TerminatorExpr && FalseContext.TerminatorExpr) {
+ InsertTempDtorDecisionBlock(FalseContext, TrueBlock);
+ } else if (TrueContext.TerminatorExpr) {
+ Block = TrueBlock;
+ InsertTempDtorDecisionBlock(TrueContext);
+ } else {
+ InsertTempDtorDecisionBlock(FalseContext);
+ }
+ return Block;
+}
+
+} // end anonymous namespace
+
+/// createBlock - Constructs and adds a new CFGBlock to the CFG. The block has
+/// no successors or predecessors. If this is the first block created in the
+/// CFG, it is automatically set to be the Entry and Exit of the CFG.
+CFGBlock *CFG::createBlock() {
+ bool first_block = begin() == end();
+
+ // Create the block.
+ CFGBlock *Mem = getAllocator().Allocate<CFGBlock>();
+ new (Mem) CFGBlock(NumBlockIDs++, BlkBVC, this);
+ Blocks.push_back(Mem, BlkBVC);
+
+ // If this is the first block, set it as the Entry and Exit.
+ if (first_block)
+ Entry = Exit = &back();
+
+ // Return the block.
+ return &back();
+}
+
+/// buildCFG - Constructs a CFG from an AST.
+std::unique_ptr<CFG> CFG::buildCFG(const Decl *D, Stmt *Statement,
+ ASTContext *C, const BuildOptions &BO) {
+ CFGBuilder Builder(C, BO);
+ return Builder.buildCFG(D, Statement);
+}
+
+const CXXDestructorDecl *
+CFGImplicitDtor::getDestructorDecl(ASTContext &astContext) const {
+ switch (getKind()) {
+ case CFGElement::Statement:
+ case CFGElement::Initializer:
+ case CFGElement::NewAllocator:
+ llvm_unreachable("getDestructorDecl should only be used with "
+ "ImplicitDtors");
+ case CFGElement::AutomaticObjectDtor: {
+ const VarDecl *var = castAs<CFGAutomaticObjDtor>().getVarDecl();
+ QualType ty = var->getType();
+ ty = ty.getNonReferenceType();
+ while (const ArrayType *arrayType = astContext.getAsArrayType(ty)) {
+ ty = arrayType->getElementType();
+ }
+ const RecordType *recordType = ty->getAs<RecordType>();
+ const CXXRecordDecl *classDecl =
+ cast<CXXRecordDecl>(recordType->getDecl());
+ return classDecl->getDestructor();
+ }
+ case CFGElement::DeleteDtor: {
+ const CXXDeleteExpr *DE = castAs<CFGDeleteDtor>().getDeleteExpr();
+ QualType DTy = DE->getDestroyedType();
+ DTy = DTy.getNonReferenceType();
+ const CXXRecordDecl *classDecl =
+ astContext.getBaseElementType(DTy)->getAsCXXRecordDecl();
+ return classDecl->getDestructor();
+ }
+ case CFGElement::TemporaryDtor: {
+ const CXXBindTemporaryExpr *bindExpr =
+ castAs<CFGTemporaryDtor>().getBindTemporaryExpr();
+ const CXXTemporary *temp = bindExpr->getTemporary();
+ return temp->getDestructor();
+ }
+ case CFGElement::BaseDtor:
+ case CFGElement::MemberDtor:
+
+ // Not yet supported.
+ return nullptr;
+ }
+ llvm_unreachable("getKind() returned bogus value");
+}
+
+bool CFGImplicitDtor::isNoReturn(ASTContext &astContext) const {
+ if (const CXXDestructorDecl *DD = getDestructorDecl(astContext))
+ return DD->isNoReturn();
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// CFGBlock operations.
+//===----------------------------------------------------------------------===//
+
+CFGBlock::AdjacentBlock::AdjacentBlock(CFGBlock *B, bool IsReachable)
+ : ReachableBlock(IsReachable ? B : nullptr),
+ UnreachableBlock(!IsReachable ? B : nullptr,
+ B && IsReachable ? AB_Normal : AB_Unreachable) {}
+
+CFGBlock::AdjacentBlock::AdjacentBlock(CFGBlock *B, CFGBlock *AlternateBlock)
+ : ReachableBlock(B),
+ UnreachableBlock(B == AlternateBlock ? nullptr : AlternateBlock,
+ B == AlternateBlock ? AB_Alternate : AB_Normal) {}
+
+void CFGBlock::addSuccessor(AdjacentBlock Succ,
+ BumpVectorContext &C) {
+ if (CFGBlock *B = Succ.getReachableBlock())
+ B->Preds.push_back(AdjacentBlock(this, Succ.isReachable()), C);
+
+ if (CFGBlock *UnreachableB = Succ.getPossiblyUnreachableBlock())
+ UnreachableB->Preds.push_back(AdjacentBlock(this, false), C);
+
+ Succs.push_back(Succ, C);
+}
+
+bool CFGBlock::FilterEdge(const CFGBlock::FilterOptions &F,
+ const CFGBlock *From, const CFGBlock *To) {
+
+ if (F.IgnoreNullPredecessors && !From)
+ return true;
+
+ if (To && From && F.IgnoreDefaultsWithCoveredEnums) {
+ // If the 'To' has no label or is labeled but the label isn't a
+ // CaseStmt then filter this edge.
+ if (const SwitchStmt *S =
+ dyn_cast_or_null<SwitchStmt>(From->getTerminator().getStmt())) {
+ if (S->isAllEnumCasesCovered()) {
+ const Stmt *L = To->getLabel();
+ if (!L || !isa<CaseStmt>(L))
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// CFG pretty printing
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class StmtPrinterHelper : public PrinterHelper {
+ typedef llvm::DenseMap<const Stmt*,std::pair<unsigned,unsigned> > StmtMapTy;
+ typedef llvm::DenseMap<const Decl*,std::pair<unsigned,unsigned> > DeclMapTy;
+ StmtMapTy StmtMap;
+ DeclMapTy DeclMap;
+ signed currentBlock;
+ unsigned currStmt;
+ const LangOptions &LangOpts;
+public:
+
+ StmtPrinterHelper(const CFG* cfg, const LangOptions &LO)
+ : currentBlock(0), currStmt(0), LangOpts(LO)
+ {
+ for (CFG::const_iterator I = cfg->begin(), E = cfg->end(); I != E; ++I ) {
+ unsigned j = 1;
+ for (CFGBlock::const_iterator BI = (*I)->begin(), BEnd = (*I)->end() ;
+ BI != BEnd; ++BI, ++j ) {
+ if (Optional<CFGStmt> SE = BI->getAs<CFGStmt>()) {
+ const Stmt *stmt= SE->getStmt();
+ std::pair<unsigned, unsigned> P((*I)->getBlockID(), j);
+ StmtMap[stmt] = P;
+
+ switch (stmt->getStmtClass()) {
+ case Stmt::DeclStmtClass:
+ DeclMap[cast<DeclStmt>(stmt)->getSingleDecl()] = P;
+ break;
+ case Stmt::IfStmtClass: {
+ const VarDecl *var = cast<IfStmt>(stmt)->getConditionVariable();
+ if (var)
+ DeclMap[var] = P;
+ break;
+ }
+ case Stmt::ForStmtClass: {
+ const VarDecl *var = cast<ForStmt>(stmt)->getConditionVariable();
+ if (var)
+ DeclMap[var] = P;
+ break;
+ }
+ case Stmt::WhileStmtClass: {
+ const VarDecl *var =
+ cast<WhileStmt>(stmt)->getConditionVariable();
+ if (var)
+ DeclMap[var] = P;
+ break;
+ }
+ case Stmt::SwitchStmtClass: {
+ const VarDecl *var =
+ cast<SwitchStmt>(stmt)->getConditionVariable();
+ if (var)
+ DeclMap[var] = P;
+ break;
+ }
+ case Stmt::CXXCatchStmtClass: {
+ const VarDecl *var =
+ cast<CXXCatchStmt>(stmt)->getExceptionDecl();
+ if (var)
+ DeclMap[var] = P;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ ~StmtPrinterHelper() override {}
+
+ const LangOptions &getLangOpts() const { return LangOpts; }
+ void setBlockID(signed i) { currentBlock = i; }
+ void setStmtID(unsigned i) { currStmt = i; }
+
+ bool handledStmt(Stmt *S, raw_ostream &OS) override {
+ StmtMapTy::iterator I = StmtMap.find(S);
+
+ if (I == StmtMap.end())
+ return false;
+
+ if (currentBlock >= 0 && I->second.first == (unsigned) currentBlock
+ && I->second.second == currStmt) {
+ return false;
+ }
+
+ OS << "[B" << I->second.first << "." << I->second.second << "]";
+ return true;
+ }
+
+ bool handleDecl(const Decl *D, raw_ostream &OS) {
+ DeclMapTy::iterator I = DeclMap.find(D);
+
+ if (I == DeclMap.end())
+ return false;
+
+ if (currentBlock >= 0 && I->second.first == (unsigned) currentBlock
+ && I->second.second == currStmt) {
+ return false;
+ }
+
+ OS << "[B" << I->second.first << "." << I->second.second << "]";
+ return true;
+ }
+};
+} // end anonymous namespace
+
+
+namespace {
+class CFGBlockTerminatorPrint
+ : public StmtVisitor<CFGBlockTerminatorPrint,void> {
+
+ raw_ostream &OS;
+ StmtPrinterHelper* Helper;
+ PrintingPolicy Policy;
+public:
+ CFGBlockTerminatorPrint(raw_ostream &os, StmtPrinterHelper* helper,
+ const PrintingPolicy &Policy)
+ : OS(os), Helper(helper), Policy(Policy) {
+ this->Policy.IncludeNewlines = false;
+ }
+
+ void VisitIfStmt(IfStmt *I) {
+ OS << "if ";
+ if (Stmt *C = I->getCond())
+ C->printPretty(OS, Helper, Policy);
+ }
+
+ // Default case.
+ void VisitStmt(Stmt *Terminator) {
+ Terminator->printPretty(OS, Helper, Policy);
+ }
+
+ void VisitDeclStmt(DeclStmt *DS) {
+ VarDecl *VD = cast<VarDecl>(DS->getSingleDecl());
+ OS << "static init " << VD->getName();
+ }
+
+ void VisitForStmt(ForStmt *F) {
+ OS << "for (" ;
+ if (F->getInit())
+ OS << "...";
+ OS << "; ";
+ if (Stmt *C = F->getCond())
+ C->printPretty(OS, Helper, Policy);
+ OS << "; ";
+ if (F->getInc())
+ OS << "...";
+ OS << ")";
+ }
+
+ void VisitWhileStmt(WhileStmt *W) {
+ OS << "while " ;
+ if (Stmt *C = W->getCond())
+ C->printPretty(OS, Helper, Policy);
+ }
+
+ void VisitDoStmt(DoStmt *D) {
+ OS << "do ... while ";
+ if (Stmt *C = D->getCond())
+ C->printPretty(OS, Helper, Policy);
+ }
+
+ void VisitSwitchStmt(SwitchStmt *Terminator) {
+ OS << "switch ";
+ Terminator->getCond()->printPretty(OS, Helper, Policy);
+ }
+
+ void VisitCXXTryStmt(CXXTryStmt *CS) {
+ OS << "try ...";
+ }
+
+ void VisitAbstractConditionalOperator(AbstractConditionalOperator* C) {
+ if (Stmt *Cond = C->getCond())
+ Cond->printPretty(OS, Helper, Policy);
+ OS << " ? ... : ...";
+ }
+
+ void VisitChooseExpr(ChooseExpr *C) {
+ OS << "__builtin_choose_expr( ";
+ if (Stmt *Cond = C->getCond())
+ Cond->printPretty(OS, Helper, Policy);
+ OS << " )";
+ }
+
+ void VisitIndirectGotoStmt(IndirectGotoStmt *I) {
+ OS << "goto *";
+ if (Stmt *T = I->getTarget())
+ T->printPretty(OS, Helper, Policy);
+ }
+
+ void VisitBinaryOperator(BinaryOperator* B) {
+ if (!B->isLogicalOp()) {
+ VisitExpr(B);
+ return;
+ }
+
+ if (B->getLHS())
+ B->getLHS()->printPretty(OS, Helper, Policy);
+
+ switch (B->getOpcode()) {
+ case BO_LOr:
+ OS << " || ...";
+ return;
+ case BO_LAnd:
+ OS << " && ...";
+ return;
+ default:
+ llvm_unreachable("Invalid logical operator.");
+ }
+ }
+
+ void VisitExpr(Expr *E) {
+ E->printPretty(OS, Helper, Policy);
+ }
+
+public:
+ void print(CFGTerminator T) {
+ if (T.isTemporaryDtorsBranch())
+ OS << "(Temp Dtor) ";
+ Visit(T.getStmt());
+ }
+};
+} // end anonymous namespace
+
+static void print_elem(raw_ostream &OS, StmtPrinterHelper &Helper,
+ const CFGElement &E) {
+ if (Optional<CFGStmt> CS = E.getAs<CFGStmt>()) {
+ const Stmt *S = CS->getStmt();
+ assert(S != nullptr && "Expecting non-null Stmt");
+
+ // special printing for statement-expressions.
+ if (const StmtExpr *SE = dyn_cast<StmtExpr>(S)) {
+ const CompoundStmt *Sub = SE->getSubStmt();
+
+ auto Children = Sub->children();
+ if (Children.begin() != Children.end()) {
+ OS << "({ ... ; ";
+ Helper.handledStmt(*SE->getSubStmt()->body_rbegin(),OS);
+ OS << " })\n";
+ return;
+ }
+ }
+ // special printing for comma expressions.
+ if (const BinaryOperator* B = dyn_cast<BinaryOperator>(S)) {
+ if (B->getOpcode() == BO_Comma) {
+ OS << "... , ";
+ Helper.handledStmt(B->getRHS(),OS);
+ OS << '\n';
+ return;
+ }
+ }
+ S->printPretty(OS, &Helper, PrintingPolicy(Helper.getLangOpts()));
+
+ if (isa<CXXOperatorCallExpr>(S)) {
+ OS << " (OperatorCall)";
+ }
+ else if (isa<CXXBindTemporaryExpr>(S)) {
+ OS << " (BindTemporary)";
+ }
+ else if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(S)) {
+ OS << " (CXXConstructExpr, " << CCE->getType().getAsString() << ")";
+ }
+ else if (const CastExpr *CE = dyn_cast<CastExpr>(S)) {
+ OS << " (" << CE->getStmtClassName() << ", "
+ << CE->getCastKindName()
+ << ", " << CE->getType().getAsString()
+ << ")";
+ }
+
+ // Expressions need a newline.
+ if (isa<Expr>(S))
+ OS << '\n';
+
+ } else if (Optional<CFGInitializer> IE = E.getAs<CFGInitializer>()) {
+ const CXXCtorInitializer *I = IE->getInitializer();
+ if (I->isBaseInitializer())
+ OS << I->getBaseClass()->getAsCXXRecordDecl()->getName();
+ else if (I->isDelegatingInitializer())
+ OS << I->getTypeSourceInfo()->getType()->getAsCXXRecordDecl()->getName();
+ else OS << I->getAnyMember()->getName();
+
+ OS << "(";
+ if (Expr *IE = I->getInit())
+ IE->printPretty(OS, &Helper, PrintingPolicy(Helper.getLangOpts()));
+ OS << ")";
+
+ if (I->isBaseInitializer())
+ OS << " (Base initializer)\n";
+ else if (I->isDelegatingInitializer())
+ OS << " (Delegating initializer)\n";
+ else OS << " (Member initializer)\n";
+
+ } else if (Optional<CFGAutomaticObjDtor> DE =
+ E.getAs<CFGAutomaticObjDtor>()) {
+ const VarDecl *VD = DE->getVarDecl();
+ Helper.handleDecl(VD, OS);
+
+ const Type* T = VD->getType().getTypePtr();
+ if (const ReferenceType* RT = T->getAs<ReferenceType>())
+ T = RT->getPointeeType().getTypePtr();
+ T = T->getBaseElementTypeUnsafe();
+
+ OS << ".~" << T->getAsCXXRecordDecl()->getName().str() << "()";
+ OS << " (Implicit destructor)\n";
+
+ } else if (Optional<CFGNewAllocator> NE = E.getAs<CFGNewAllocator>()) {
+ OS << "CFGNewAllocator(";
+ if (const CXXNewExpr *AllocExpr = NE->getAllocatorExpr())
+ AllocExpr->getType().print(OS, PrintingPolicy(Helper.getLangOpts()));
+ OS << ")\n";
+ } else if (Optional<CFGDeleteDtor> DE = E.getAs<CFGDeleteDtor>()) {
+ const CXXRecordDecl *RD = DE->getCXXRecordDecl();
+ if (!RD)
+ return;
+ CXXDeleteExpr *DelExpr =
+ const_cast<CXXDeleteExpr*>(DE->getDeleteExpr());
+ Helper.handledStmt(cast<Stmt>(DelExpr->getArgument()), OS);
+ OS << "->~" << RD->getName().str() << "()";
+ OS << " (Implicit destructor)\n";
+ } else if (Optional<CFGBaseDtor> BE = E.getAs<CFGBaseDtor>()) {
+ const CXXBaseSpecifier *BS = BE->getBaseSpecifier();
+ OS << "~" << BS->getType()->getAsCXXRecordDecl()->getName() << "()";
+ OS << " (Base object destructor)\n";
+
+ } else if (Optional<CFGMemberDtor> ME = E.getAs<CFGMemberDtor>()) {
+ const FieldDecl *FD = ME->getFieldDecl();
+ const Type *T = FD->getType()->getBaseElementTypeUnsafe();
+ OS << "this->" << FD->getName();
+ OS << ".~" << T->getAsCXXRecordDecl()->getName() << "()";
+ OS << " (Member object destructor)\n";
+
+ } else if (Optional<CFGTemporaryDtor> TE = E.getAs<CFGTemporaryDtor>()) {
+ const CXXBindTemporaryExpr *BT = TE->getBindTemporaryExpr();
+ OS << "~";
+ BT->getType().print(OS, PrintingPolicy(Helper.getLangOpts()));
+ OS << "() (Temporary object destructor)\n";
+ }
+}
+
+static void print_block(raw_ostream &OS, const CFG* cfg,
+ const CFGBlock &B,
+ StmtPrinterHelper &Helper, bool print_edges,
+ bool ShowColors) {
+
+ Helper.setBlockID(B.getBlockID());
+
+ // Print the header.
+ if (ShowColors)
+ OS.changeColor(raw_ostream::YELLOW, true);
+
+ OS << "\n [B" << B.getBlockID();
+
+ if (&B == &cfg->getEntry())
+ OS << " (ENTRY)]\n";
+ else if (&B == &cfg->getExit())
+ OS << " (EXIT)]\n";
+ else if (&B == cfg->getIndirectGotoBlock())
+ OS << " (INDIRECT GOTO DISPATCH)]\n";
+ else if (B.hasNoReturnElement())
+ OS << " (NORETURN)]\n";
+ else
+ OS << "]\n";
+
+ if (ShowColors)
+ OS.resetColor();
+
+ // Print the label of this block.
+ if (Stmt *Label = const_cast<Stmt*>(B.getLabel())) {
+
+ if (print_edges)
+ OS << " ";
+
+ if (LabelStmt *L = dyn_cast<LabelStmt>(Label))
+ OS << L->getName();
+ else if (CaseStmt *C = dyn_cast<CaseStmt>(Label)) {
+ OS << "case ";
+ if (C->getLHS())
+ C->getLHS()->printPretty(OS, &Helper,
+ PrintingPolicy(Helper.getLangOpts()));
+ if (C->getRHS()) {
+ OS << " ... ";
+ C->getRHS()->printPretty(OS, &Helper,
+ PrintingPolicy(Helper.getLangOpts()));
+ }
+ } else if (isa<DefaultStmt>(Label))
+ OS << "default";
+ else if (CXXCatchStmt *CS = dyn_cast<CXXCatchStmt>(Label)) {
+ OS << "catch (";
+ if (CS->getExceptionDecl())
+ CS->getExceptionDecl()->print(OS, PrintingPolicy(Helper.getLangOpts()),
+ 0);
+ else
+ OS << "...";
+ OS << ")";
+
+ } else
+ llvm_unreachable("Invalid label statement in CFGBlock.");
+
+ OS << ":\n";
+ }
+
+ // Iterate through the statements in the block and print them.
+ unsigned j = 1;
+
+ for (CFGBlock::const_iterator I = B.begin(), E = B.end() ;
+ I != E ; ++I, ++j ) {
+
+ // Print the statement # in the basic block and the statement itself.
+ if (print_edges)
+ OS << " ";
+
+ OS << llvm::format("%3d", j) << ": ";
+
+ Helper.setStmtID(j);
+
+ print_elem(OS, Helper, *I);
+ }
+
+ // Print the terminator of this block.
+ if (B.getTerminator()) {
+ if (ShowColors)
+ OS.changeColor(raw_ostream::GREEN);
+
+ OS << " T: ";
+
+ Helper.setBlockID(-1);
+
+ PrintingPolicy PP(Helper.getLangOpts());
+ CFGBlockTerminatorPrint TPrinter(OS, &Helper, PP);
+ TPrinter.print(B.getTerminator());
+ OS << '\n';
+
+ if (ShowColors)
+ OS.resetColor();
+ }
+
+ if (print_edges) {
+ // Print the predecessors of this block.
+ if (!B.pred_empty()) {
+ const raw_ostream::Colors Color = raw_ostream::BLUE;
+ if (ShowColors)
+ OS.changeColor(Color);
+ OS << " Preds " ;
+ if (ShowColors)
+ OS.resetColor();
+ OS << '(' << B.pred_size() << "):";
+ unsigned i = 0;
+
+ if (ShowColors)
+ OS.changeColor(Color);
+
+ for (CFGBlock::const_pred_iterator I = B.pred_begin(), E = B.pred_end();
+ I != E; ++I, ++i) {
+
+ if (i % 10 == 8)
+ OS << "\n ";
+
+ CFGBlock *B = *I;
+ bool Reachable = true;
+ if (!B) {
+ Reachable = false;
+ B = I->getPossiblyUnreachableBlock();
+ }
+
+ OS << " B" << B->getBlockID();
+ if (!Reachable)
+ OS << "(Unreachable)";
+ }
+
+ if (ShowColors)
+ OS.resetColor();
+
+ OS << '\n';
+ }
+
+ // Print the successors of this block.
+ if (!B.succ_empty()) {
+ const raw_ostream::Colors Color = raw_ostream::MAGENTA;
+ if (ShowColors)
+ OS.changeColor(Color);
+ OS << " Succs ";
+ if (ShowColors)
+ OS.resetColor();
+ OS << '(' << B.succ_size() << "):";
+ unsigned i = 0;
+
+ if (ShowColors)
+ OS.changeColor(Color);
+
+ for (CFGBlock::const_succ_iterator I = B.succ_begin(), E = B.succ_end();
+ I != E; ++I, ++i) {
+
+ if (i % 10 == 8)
+ OS << "\n ";
+
+ CFGBlock *B = *I;
+
+ bool Reachable = true;
+ if (!B) {
+ Reachable = false;
+ B = I->getPossiblyUnreachableBlock();
+ }
+
+ if (B) {
+ OS << " B" << B->getBlockID();
+ if (!Reachable)
+ OS << "(Unreachable)";
+ }
+ else {
+ OS << " NULL";
+ }
+ }
+
+ if (ShowColors)
+ OS.resetColor();
+ OS << '\n';
+ }
+ }
+}
+
+
+/// dump - A simple pretty printer of a CFG that outputs to stderr.
+void CFG::dump(const LangOptions &LO, bool ShowColors) const {
+ print(llvm::errs(), LO, ShowColors);
+}
+
+/// print - A simple pretty printer of a CFG that outputs to an ostream.
+void CFG::print(raw_ostream &OS, const LangOptions &LO, bool ShowColors) const {
+ StmtPrinterHelper Helper(this, LO);
+
+ // Print the entry block.
+ print_block(OS, this, getEntry(), Helper, true, ShowColors);
+
+ // Iterate through the CFGBlocks and print them one by one.
+ for (const_iterator I = Blocks.begin(), E = Blocks.end() ; I != E ; ++I) {
+ // Skip the entry block, because we already printed it.
+ if (&(**I) == &getEntry() || &(**I) == &getExit())
+ continue;
+
+ print_block(OS, this, **I, Helper, true, ShowColors);
+ }
+
+ // Print the exit block.
+ print_block(OS, this, getExit(), Helper, true, ShowColors);
+ OS << '\n';
+ OS.flush();
+}
+
+/// dump - A simply pretty printer of a CFGBlock that outputs to stderr.
+void CFGBlock::dump(const CFG* cfg, const LangOptions &LO,
+ bool ShowColors) const {
+ print(llvm::errs(), cfg, LO, ShowColors);
+}
+
+void CFGBlock::dump() const {
+ dump(getParent(), LangOptions(), false);
+}
+
+/// print - A simple pretty printer of a CFGBlock that outputs to an ostream.
+/// Generally this will only be called from CFG::print.
+void CFGBlock::print(raw_ostream &OS, const CFG* cfg,
+ const LangOptions &LO, bool ShowColors) const {
+ StmtPrinterHelper Helper(cfg, LO);
+ print_block(OS, cfg, *this, Helper, true, ShowColors);
+ OS << '\n';
+}
+
+/// printTerminator - A simple pretty printer of the terminator of a CFGBlock.
+void CFGBlock::printTerminator(raw_ostream &OS,
+ const LangOptions &LO) const {
+ CFGBlockTerminatorPrint TPrinter(OS, nullptr, PrintingPolicy(LO));
+ TPrinter.print(getTerminator());
+}
+
+Stmt *CFGBlock::getTerminatorCondition(bool StripParens) {
+ Stmt *Terminator = this->Terminator;
+ if (!Terminator)
+ return nullptr;
+
+ Expr *E = nullptr;
+
+ switch (Terminator->getStmtClass()) {
+ default:
+ break;
+
+ case Stmt::CXXForRangeStmtClass:
+ E = cast<CXXForRangeStmt>(Terminator)->getCond();
+ break;
+
+ case Stmt::ForStmtClass:
+ E = cast<ForStmt>(Terminator)->getCond();
+ break;
+
+ case Stmt::WhileStmtClass:
+ E = cast<WhileStmt>(Terminator)->getCond();
+ break;
+
+ case Stmt::DoStmtClass:
+ E = cast<DoStmt>(Terminator)->getCond();
+ break;
+
+ case Stmt::IfStmtClass:
+ E = cast<IfStmt>(Terminator)->getCond();
+ break;
+
+ case Stmt::ChooseExprClass:
+ E = cast<ChooseExpr>(Terminator)->getCond();
+ break;
+
+ case Stmt::IndirectGotoStmtClass:
+ E = cast<IndirectGotoStmt>(Terminator)->getTarget();
+ break;
+
+ case Stmt::SwitchStmtClass:
+ E = cast<SwitchStmt>(Terminator)->getCond();
+ break;
+
+ case Stmt::BinaryConditionalOperatorClass:
+ E = cast<BinaryConditionalOperator>(Terminator)->getCond();
+ break;
+
+ case Stmt::ConditionalOperatorClass:
+ E = cast<ConditionalOperator>(Terminator)->getCond();
+ break;
+
+ case Stmt::BinaryOperatorClass: // '&&' and '||'
+ E = cast<BinaryOperator>(Terminator)->getLHS();
+ break;
+
+ case Stmt::ObjCForCollectionStmtClass:
+ return Terminator;
+ }
+
+ if (!StripParens)
+ return E;
+
+ return E ? E->IgnoreParens() : nullptr;
+}
+
+//===----------------------------------------------------------------------===//
+// CFG Graphviz Visualization
+//===----------------------------------------------------------------------===//
+
+
+#ifndef NDEBUG
+static StmtPrinterHelper* GraphHelper;
+#endif
+
+void CFG::viewCFG(const LangOptions &LO) const {
+#ifndef NDEBUG
+ StmtPrinterHelper H(this, LO);
+ GraphHelper = &H;
+ llvm::ViewGraph(this,"CFG");
+ GraphHelper = nullptr;
+#endif
+}
+
+namespace llvm {
+template<>
+struct DOTGraphTraits<const CFG*> : public DefaultDOTGraphTraits {
+
+ DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
+
+ static std::string getNodeLabel(const CFGBlock *Node, const CFG* Graph) {
+
+#ifndef NDEBUG
+ std::string OutSStr;
+ llvm::raw_string_ostream Out(OutSStr);
+ print_block(Out,Graph, *Node, *GraphHelper, false, false);
+ std::string& OutStr = Out.str();
+
+ if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
+
+ // Process string output to make it nicer...
+ for (unsigned i = 0; i != OutStr.length(); ++i)
+ if (OutStr[i] == '\n') { // Left justify
+ OutStr[i] = '\\';
+ OutStr.insert(OutStr.begin()+i+1, 'l');
+ }
+
+ return OutStr;
+#else
+ return "";
+#endif
+ }
+};
+} // end namespace llvm
diff --git a/contrib/llvm/tools/clang/lib/Analysis/CFGReachabilityAnalysis.cpp b/contrib/llvm/tools/clang/lib/Analysis/CFGReachabilityAnalysis.cpp
new file mode 100644
index 0000000..4ae135f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/CFGReachabilityAnalysis.cpp
@@ -0,0 +1,76 @@
+//==- CFGReachabilityAnalysis.cpp - Basic reachability analysis --*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a flow-sensitive, (mostly) path-insensitive reachability
+// analysis based on Clang's CFGs. Clients can query if a given basic block
+// is reachable within the CFG.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/SmallVector.h"
+#include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h"
+#include "clang/Analysis/CFG.h"
+
+using namespace clang;
+
+CFGReverseBlockReachabilityAnalysis::CFGReverseBlockReachabilityAnalysis(const CFG &cfg)
+ : analyzed(cfg.getNumBlockIDs(), false) {}
+
+bool CFGReverseBlockReachabilityAnalysis::isReachable(const CFGBlock *Src,
+ const CFGBlock *Dst) {
+
+ const unsigned DstBlockID = Dst->getBlockID();
+
+ // If we haven't analyzed the destination node, run the analysis now
+ if (!analyzed[DstBlockID]) {
+ mapReachability(Dst);
+ analyzed[DstBlockID] = true;
+ }
+
+ // Return the cached result
+ return reachable[DstBlockID][Src->getBlockID()];
+}
+
+// Maps reachability to a common node by walking the predecessors of the
+// destination node.
+void CFGReverseBlockReachabilityAnalysis::mapReachability(const CFGBlock *Dst) {
+ SmallVector<const CFGBlock *, 11> worklist;
+ llvm::BitVector visited(analyzed.size());
+
+ ReachableSet &DstReachability = reachable[Dst->getBlockID()];
+ DstReachability.resize(analyzed.size(), false);
+
+ // Start searching from the destination node, since we commonly will perform
+ // multiple queries relating to a destination node.
+ worklist.push_back(Dst);
+ bool firstRun = true;
+
+ while (!worklist.empty()) {
+ const CFGBlock *block = worklist.pop_back_val();
+
+ if (visited[block->getBlockID()])
+ continue;
+ visited[block->getBlockID()] = true;
+
+ // Update reachability information for this node -> Dst
+ if (!firstRun) {
+ // Don't insert Dst -> Dst unless it was a predecessor of itself
+ DstReachability[block->getBlockID()] = true;
+ }
+ else
+ firstRun = false;
+
+ // Add the predecessors to the worklist.
+ for (CFGBlock::const_pred_iterator i = block->pred_begin(),
+ e = block->pred_end(); i != e; ++i) {
+ if (*i)
+ worklist.push_back(*i);
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/CFGStmtMap.cpp b/contrib/llvm/tools/clang/lib/Analysis/CFGStmtMap.cpp
new file mode 100644
index 0000000..19b8019
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/CFGStmtMap.cpp
@@ -0,0 +1,91 @@
+//===--- CFGStmtMap.h - Map from Stmt* to CFGBlock* -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the CFGStmtMap class, which defines a mapping from
+// Stmt* to CFGBlock*
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/DenseMap.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/CFGStmtMap.h"
+
+using namespace clang;
+
+typedef llvm::DenseMap<const Stmt*, CFGBlock*> SMap;
+static SMap *AsMap(void *m) { return (SMap*) m; }
+
+CFGStmtMap::~CFGStmtMap() { delete AsMap(M); }
+
+CFGBlock *CFGStmtMap::getBlock(Stmt *S) {
+ SMap *SM = AsMap(M);
+ Stmt *X = S;
+
+ // If 'S' isn't in the map, walk the ParentMap to see if one of its ancestors
+ // is in the map.
+ while (X) {
+ SMap::iterator I = SM->find(X);
+ if (I != SM->end()) {
+ CFGBlock *B = I->second;
+ // Memoize this lookup.
+ if (X != S)
+ (*SM)[X] = B;
+ return B;
+ }
+
+ X = PM->getParentIgnoreParens(X);
+ }
+
+ return nullptr;
+}
+
+static void Accumulate(SMap &SM, CFGBlock *B) {
+ // First walk the block-level expressions.
+ for (CFGBlock::iterator I = B->begin(), E = B->end(); I != E; ++I) {
+ const CFGElement &CE = *I;
+ Optional<CFGStmt> CS = CE.getAs<CFGStmt>();
+ if (!CS)
+ continue;
+
+ CFGBlock *&Entry = SM[CS->getStmt()];
+ // If 'Entry' is already initialized (e.g., a terminator was already),
+ // skip.
+ if (Entry)
+ continue;
+
+ Entry = B;
+
+ }
+
+ // Look at the label of the block.
+ if (Stmt *Label = B->getLabel())
+ SM[Label] = B;
+
+ // Finally, look at the terminator. If the terminator was already added
+ // because it is a block-level expression in another block, overwrite
+ // that mapping.
+ if (Stmt *Term = B->getTerminator())
+ SM[Term] = B;
+}
+
+CFGStmtMap *CFGStmtMap::Build(CFG *C, ParentMap *PM) {
+ if (!C || !PM)
+ return nullptr;
+
+ SMap *SM = new SMap();
+
+ // Walk all blocks, accumulating the block-level expressions, labels,
+ // and terminators.
+ for (CFG::iterator I = C->begin(), E = C->end(); I != E; ++I)
+ Accumulate(*SM, *I);
+
+ return new CFGStmtMap(PM, SM);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Analysis/CallGraph.cpp b/contrib/llvm/tools/clang/lib/Analysis/CallGraph.cpp
new file mode 100644
index 0000000..d0660346
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/CallGraph.cpp
@@ -0,0 +1,228 @@
+//== CallGraph.cpp - AST-based Call graph ----------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the AST-based CallGraph.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Analysis/CallGraph.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/GraphWriter.h"
+
+using namespace clang;
+
+#define DEBUG_TYPE "CallGraph"
+
+STATISTIC(NumObjCCallEdges, "Number of Objective-C method call edges");
+STATISTIC(NumBlockCallEdges, "Number of block call edges");
+
+namespace {
+/// A helper class, which walks the AST and locates all the call sites in the
+/// given function body.
+class CGBuilder : public StmtVisitor<CGBuilder> {
+ CallGraph *G;
+ CallGraphNode *CallerNode;
+
+public:
+ CGBuilder(CallGraph *g, CallGraphNode *N)
+ : G(g), CallerNode(N) {}
+
+ void VisitStmt(Stmt *S) { VisitChildren(S); }
+
+ Decl *getDeclFromCall(CallExpr *CE) {
+ if (FunctionDecl *CalleeDecl = CE->getDirectCallee())
+ return CalleeDecl;
+
+ // Simple detection of a call through a block.
+ Expr *CEE = CE->getCallee()->IgnoreParenImpCasts();
+ if (BlockExpr *Block = dyn_cast<BlockExpr>(CEE)) {
+ NumBlockCallEdges++;
+ return Block->getBlockDecl();
+ }
+
+ return nullptr;
+ }
+
+ void addCalledDecl(Decl *D) {
+ if (G->includeInGraph(D)) {
+ CallGraphNode *CalleeNode = G->getOrInsertNode(D);
+ CallerNode->addCallee(CalleeNode, G);
+ }
+ }
+
+ void VisitCallExpr(CallExpr *CE) {
+ if (Decl *D = getDeclFromCall(CE))
+ addCalledDecl(D);
+ }
+
+ // Adds may-call edges for the ObjC message sends.
+ void VisitObjCMessageExpr(ObjCMessageExpr *ME) {
+ if (ObjCInterfaceDecl *IDecl = ME->getReceiverInterface()) {
+ Selector Sel = ME->getSelector();
+
+ // Find the callee definition within the same translation unit.
+ Decl *D = nullptr;
+ if (ME->isInstanceMessage())
+ D = IDecl->lookupPrivateMethod(Sel);
+ else
+ D = IDecl->lookupPrivateClassMethod(Sel);
+ if (D) {
+ addCalledDecl(D);
+ NumObjCCallEdges++;
+ }
+ }
+ }
+
+ void VisitChildren(Stmt *S) {
+ for (Stmt *SubStmt : S->children())
+ if (SubStmt)
+ this->Visit(SubStmt);
+ }
+};
+
+} // end anonymous namespace
+
+void CallGraph::addNodesForBlocks(DeclContext *D) {
+ if (BlockDecl *BD = dyn_cast<BlockDecl>(D))
+ addNodeForDecl(BD, true);
+
+ for (auto *I : D->decls())
+ if (auto *DC = dyn_cast<DeclContext>(I))
+ addNodesForBlocks(DC);
+}
+
+CallGraph::CallGraph() {
+ Root = getOrInsertNode(nullptr);
+}
+
+CallGraph::~CallGraph() {
+ llvm::DeleteContainerSeconds(FunctionMap);
+}
+
+bool CallGraph::includeInGraph(const Decl *D) {
+ assert(D);
+ if (!D->hasBody())
+ return false;
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // We skip function template definitions, as their semantics is
+ // only determined when they are instantiated.
+ if (FD->isDependentContext())
+ return false;
+
+ IdentifierInfo *II = FD->getIdentifier();
+ if (II && II->getName().startswith("__inline"))
+ return false;
+ }
+
+ return true;
+}
+
+void CallGraph::addNodeForDecl(Decl* D, bool IsGlobal) {
+ assert(D);
+
+ // Allocate a new node, mark it as root, and process it's calls.
+ CallGraphNode *Node = getOrInsertNode(D);
+
+ // Process all the calls by this function as well.
+ CGBuilder builder(this, Node);
+ if (Stmt *Body = D->getBody())
+ builder.Visit(Body);
+}
+
+CallGraphNode *CallGraph::getNode(const Decl *F) const {
+ FunctionMapTy::const_iterator I = FunctionMap.find(F);
+ if (I == FunctionMap.end()) return nullptr;
+ return I->second;
+}
+
+CallGraphNode *CallGraph::getOrInsertNode(Decl *F) {
+ if (F && !isa<ObjCMethodDecl>(F))
+ F = F->getCanonicalDecl();
+
+ CallGraphNode *&Node = FunctionMap[F];
+ if (Node)
+ return Node;
+
+ Node = new CallGraphNode(F);
+ // Make Root node a parent of all functions to make sure all are reachable.
+ if (F)
+ Root->addCallee(Node, this);
+ return Node;
+}
+
+void CallGraph::print(raw_ostream &OS) const {
+ OS << " --- Call graph Dump --- \n";
+
+ // We are going to print the graph in reverse post order, partially, to make
+ // sure the output is deterministic.
+ llvm::ReversePostOrderTraversal<const clang::CallGraph*> RPOT(this);
+ for (llvm::ReversePostOrderTraversal<const clang::CallGraph*>::rpo_iterator
+ I = RPOT.begin(), E = RPOT.end(); I != E; ++I) {
+ const CallGraphNode *N = *I;
+
+ OS << " Function: ";
+ if (N == Root)
+ OS << "< root >";
+ else
+ N->print(OS);
+
+ OS << " calls: ";
+ for (CallGraphNode::const_iterator CI = N->begin(),
+ CE = N->end(); CI != CE; ++CI) {
+ assert(*CI != Root && "No one can call the root node.");
+ (*CI)->print(OS);
+ OS << " ";
+ }
+ OS << '\n';
+ }
+ OS.flush();
+}
+
+void CallGraph::dump() const {
+ print(llvm::errs());
+}
+
+void CallGraph::viewGraph() const {
+ llvm::ViewGraph(this, "CallGraph");
+}
+
+void CallGraphNode::print(raw_ostream &os) const {
+ if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(FD))
+ return ND->printName(os);
+ os << "< >";
+}
+
+void CallGraphNode::dump() const {
+ print(llvm::errs());
+}
+
+namespace llvm {
+
+template <>
+struct DOTGraphTraits<const CallGraph*> : public DefaultDOTGraphTraits {
+
+ DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
+
+ static std::string getNodeLabel(const CallGraphNode *Node,
+ const CallGraph *CG) {
+ if (CG->getRoot() == Node) {
+ return "< root >";
+ }
+ if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(Node->getDecl()))
+ return ND->getNameAsString();
+ else
+ return "< >";
+ }
+
+};
+}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp b/contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp
new file mode 100644
index 0000000..be1262d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp
@@ -0,0 +1,140 @@
+//===- CocoaConventions.h - Special handling of Cocoa conventions -*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements cocoa naming convention analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/CharInfo.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace clang;
+using namespace ento;
+
+bool cocoa::isRefType(QualType RetTy, StringRef Prefix,
+ StringRef Name) {
+ // Recursively walk the typedef stack, allowing typedefs of reference types.
+ while (const TypedefType *TD = RetTy->getAs<TypedefType>()) {
+ StringRef TDName = TD->getDecl()->getIdentifier()->getName();
+ if (TDName.startswith(Prefix) && TDName.endswith("Ref"))
+ return true;
+ // XPC unfortunately uses CF-style function names, but aren't CF types.
+ if (TDName.startswith("xpc_"))
+ return false;
+ RetTy = TD->getDecl()->getUnderlyingType();
+ }
+
+ if (Name.empty())
+ return false;
+
+ // Is the type void*?
+ const PointerType* PT = RetTy->getAs<PointerType>();
+ if (!(PT->getPointeeType().getUnqualifiedType()->isVoidType()))
+ return false;
+
+ // Does the name start with the prefix?
+ return Name.startswith(Prefix);
+}
+
+bool coreFoundation::isCFObjectRef(QualType T) {
+ return cocoa::isRefType(T, "CF") || // Core Foundation.
+ cocoa::isRefType(T, "CG") || // Core Graphics.
+ cocoa::isRefType(T, "DADisk") || // Disk Arbitration API.
+ cocoa::isRefType(T, "DADissenter") ||
+ cocoa::isRefType(T, "DASessionRef");
+}
+
+
+bool cocoa::isCocoaObjectRef(QualType Ty) {
+ if (!Ty->isObjCObjectPointerType())
+ return false;
+
+ const ObjCObjectPointerType *PT = Ty->getAs<ObjCObjectPointerType>();
+
+ // Can be true for objects with the 'NSObject' attribute.
+ if (!PT)
+ return true;
+
+ // We assume that id<..>, id, Class, and Class<..> all represent tracked
+ // objects.
+ if (PT->isObjCIdType() || PT->isObjCQualifiedIdType() ||
+ PT->isObjCClassType() || PT->isObjCQualifiedClassType())
+ return true;
+
+ // Does the interface subclass NSObject?
+ // FIXME: We can memoize here if this gets too expensive.
+ const ObjCInterfaceDecl *ID = PT->getInterfaceDecl();
+
+ // Assume that anything declared with a forward declaration and no
+ // @interface subclasses NSObject.
+ if (!ID->hasDefinition())
+ return true;
+
+ for ( ; ID ; ID = ID->getSuperClass())
+ if (ID->getIdentifier()->getName() == "NSObject")
+ return true;
+
+ return false;
+}
+
+bool coreFoundation::followsCreateRule(const FunctionDecl *fn) {
+ // For now, *just* base this on the function name, not on anything else.
+
+ const IdentifierInfo *ident = fn->getIdentifier();
+ if (!ident) return false;
+ StringRef functionName = ident->getName();
+
+ StringRef::iterator it = functionName.begin();
+ StringRef::iterator start = it;
+ StringRef::iterator endI = functionName.end();
+
+ while (true) {
+ // Scan for the start of 'create' or 'copy'.
+ for ( ; it != endI ; ++it) {
+ // Search for the first character. It can either be 'C' or 'c'.
+ char ch = *it;
+ if (ch == 'C' || ch == 'c') {
+ // Make sure this isn't something like 'recreate' or 'Scopy'.
+ if (ch == 'c' && it != start && isLetter(*(it - 1)))
+ continue;
+
+ ++it;
+ break;
+ }
+ }
+
+ // Did we hit the end of the string? If so, we didn't find a match.
+ if (it == endI)
+ return false;
+
+ // Scan for *lowercase* 'reate' or 'opy', followed by no lowercase
+ // character.
+ StringRef suffix = functionName.substr(it - start);
+ if (suffix.startswith("reate")) {
+ it += 5;
+ }
+ else if (suffix.startswith("opy")) {
+ it += 3;
+ } else {
+ // Keep scanning.
+ continue;
+ }
+
+ if (it == endI || !isLowercase(*it))
+ return true;
+
+ // If we matched a lowercase character, it isn't the end of the
+ // word. Keep scanning.
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/CodeInjector.cpp b/contrib/llvm/tools/clang/lib/Analysis/CodeInjector.cpp
new file mode 100644
index 0000000..76bf364
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/CodeInjector.cpp
@@ -0,0 +1,15 @@
+//===-- CodeInjector.cpp ----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/CodeInjector.h"
+
+using namespace clang;
+
+CodeInjector::CodeInjector() {}
+CodeInjector::~CodeInjector() {}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/Consumed.cpp b/contrib/llvm/tools/clang/lib/Analysis/Consumed.cpp
new file mode 100644
index 0000000..9df2392
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/Consumed.cpp
@@ -0,0 +1,1453 @@
+//===- Consumed.cpp --------------------------------------------*- C++ --*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A intra-procedural analysis for checking consumed properties. This is based,
+// in part, on research on linear types.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/Type.h"
+#include "clang/Analysis/Analyses/Consumed.h"
+#include "clang/Analysis/Analyses/PostOrderCFGView.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Basic/OperatorKinds.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/raw_ostream.h"
+#include <memory>
+
+// TODO: Adjust states of args to constructors in the same way that arguments to
+// function calls are handled.
+// TODO: Use information from tests in for- and while-loop conditional.
+// TODO: Add notes about the actual and expected state for
+// TODO: Correctly identify unreachable blocks when chaining boolean operators.
+// TODO: Adjust the parser and AttributesList class to support lists of
+// identifiers.
+// TODO: Warn about unreachable code.
+// TODO: Switch to using a bitmap to track unreachable blocks.
+// TODO: Handle variable definitions, e.g. bool valid = x.isValid();
+// if (valid) ...; (Deferred)
+// TODO: Take notes on state transitions to provide better warning messages.
+// (Deferred)
+// TODO: Test nested conditionals: A) Checking the same value multiple times,
+// and 2) Checking different values. (Deferred)
+
+using namespace clang;
+using namespace consumed;
+
+// Key method definition
+ConsumedWarningsHandlerBase::~ConsumedWarningsHandlerBase() {}
+
+static SourceLocation getFirstStmtLoc(const CFGBlock *Block) {
+ // Find the source location of the first statement in the block, if the block
+ // is not empty.
+ for (const auto &B : *Block)
+ if (Optional<CFGStmt> CS = B.getAs<CFGStmt>())
+ return CS->getStmt()->getLocStart();
+
+ // Block is empty.
+ // If we have one successor, return the first statement in that block
+ if (Block->succ_size() == 1 && *Block->succ_begin())
+ return getFirstStmtLoc(*Block->succ_begin());
+
+ return SourceLocation();
+}
+
+static SourceLocation getLastStmtLoc(const CFGBlock *Block) {
+ // Find the source location of the last statement in the block, if the block
+ // is not empty.
+ if (const Stmt *StmtNode = Block->getTerminator()) {
+ return StmtNode->getLocStart();
+ } else {
+ for (CFGBlock::const_reverse_iterator BI = Block->rbegin(),
+ BE = Block->rend(); BI != BE; ++BI) {
+ if (Optional<CFGStmt> CS = BI->getAs<CFGStmt>())
+ return CS->getStmt()->getLocStart();
+ }
+ }
+
+ // If we have one successor, return the first statement in that block
+ SourceLocation Loc;
+ if (Block->succ_size() == 1 && *Block->succ_begin())
+ Loc = getFirstStmtLoc(*Block->succ_begin());
+ if (Loc.isValid())
+ return Loc;
+
+ // If we have one predecessor, return the last statement in that block
+ if (Block->pred_size() == 1 && *Block->pred_begin())
+ return getLastStmtLoc(*Block->pred_begin());
+
+ return Loc;
+}
+
+static ConsumedState invertConsumedUnconsumed(ConsumedState State) {
+ switch (State) {
+ case CS_Unconsumed:
+ return CS_Consumed;
+ case CS_Consumed:
+ return CS_Unconsumed;
+ case CS_None:
+ return CS_None;
+ case CS_Unknown:
+ return CS_Unknown;
+ }
+ llvm_unreachable("invalid enum");
+}
+
+static bool isCallableInState(const CallableWhenAttr *CWAttr,
+ ConsumedState State) {
+
+ for (const auto &S : CWAttr->callableStates()) {
+ ConsumedState MappedAttrState = CS_None;
+
+ switch (S) {
+ case CallableWhenAttr::Unknown:
+ MappedAttrState = CS_Unknown;
+ break;
+
+ case CallableWhenAttr::Unconsumed:
+ MappedAttrState = CS_Unconsumed;
+ break;
+
+ case CallableWhenAttr::Consumed:
+ MappedAttrState = CS_Consumed;
+ break;
+ }
+
+ if (MappedAttrState == State)
+ return true;
+ }
+
+ return false;
+}
+
+
+static bool isConsumableType(const QualType &QT) {
+ if (QT->isPointerType() || QT->isReferenceType())
+ return false;
+
+ if (const CXXRecordDecl *RD = QT->getAsCXXRecordDecl())
+ return RD->hasAttr<ConsumableAttr>();
+
+ return false;
+}
+
+static bool isAutoCastType(const QualType &QT) {
+ if (QT->isPointerType() || QT->isReferenceType())
+ return false;
+
+ if (const CXXRecordDecl *RD = QT->getAsCXXRecordDecl())
+ return RD->hasAttr<ConsumableAutoCastAttr>();
+
+ return false;
+}
+
+static bool isSetOnReadPtrType(const QualType &QT) {
+ if (const CXXRecordDecl *RD = QT->getPointeeCXXRecordDecl())
+ return RD->hasAttr<ConsumableSetOnReadAttr>();
+ return false;
+}
+
+
+static bool isKnownState(ConsumedState State) {
+ switch (State) {
+ case CS_Unconsumed:
+ case CS_Consumed:
+ return true;
+ case CS_None:
+ case CS_Unknown:
+ return false;
+ }
+ llvm_unreachable("invalid enum");
+}
+
+static bool isRValueRef(QualType ParamType) {
+ return ParamType->isRValueReferenceType();
+}
+
+static bool isTestingFunction(const FunctionDecl *FunDecl) {
+ return FunDecl->hasAttr<TestTypestateAttr>();
+}
+
+static bool isPointerOrRef(QualType ParamType) {
+ return ParamType->isPointerType() || ParamType->isReferenceType();
+}
+
+static ConsumedState mapConsumableAttrState(const QualType QT) {
+ assert(isConsumableType(QT));
+
+ const ConsumableAttr *CAttr =
+ QT->getAsCXXRecordDecl()->getAttr<ConsumableAttr>();
+
+ switch (CAttr->getDefaultState()) {
+ case ConsumableAttr::Unknown:
+ return CS_Unknown;
+ case ConsumableAttr::Unconsumed:
+ return CS_Unconsumed;
+ case ConsumableAttr::Consumed:
+ return CS_Consumed;
+ }
+ llvm_unreachable("invalid enum");
+}
+
+static ConsumedState
+mapParamTypestateAttrState(const ParamTypestateAttr *PTAttr) {
+ switch (PTAttr->getParamState()) {
+ case ParamTypestateAttr::Unknown:
+ return CS_Unknown;
+ case ParamTypestateAttr::Unconsumed:
+ return CS_Unconsumed;
+ case ParamTypestateAttr::Consumed:
+ return CS_Consumed;
+ }
+ llvm_unreachable("invalid_enum");
+}
+
+static ConsumedState
+mapReturnTypestateAttrState(const ReturnTypestateAttr *RTSAttr) {
+ switch (RTSAttr->getState()) {
+ case ReturnTypestateAttr::Unknown:
+ return CS_Unknown;
+ case ReturnTypestateAttr::Unconsumed:
+ return CS_Unconsumed;
+ case ReturnTypestateAttr::Consumed:
+ return CS_Consumed;
+ }
+ llvm_unreachable("invalid enum");
+}
+
+static ConsumedState mapSetTypestateAttrState(const SetTypestateAttr *STAttr) {
+ switch (STAttr->getNewState()) {
+ case SetTypestateAttr::Unknown:
+ return CS_Unknown;
+ case SetTypestateAttr::Unconsumed:
+ return CS_Unconsumed;
+ case SetTypestateAttr::Consumed:
+ return CS_Consumed;
+ }
+ llvm_unreachable("invalid_enum");
+}
+
+static StringRef stateToString(ConsumedState State) {
+ switch (State) {
+ case consumed::CS_None:
+ return "none";
+
+ case consumed::CS_Unknown:
+ return "unknown";
+
+ case consumed::CS_Unconsumed:
+ return "unconsumed";
+
+ case consumed::CS_Consumed:
+ return "consumed";
+ }
+ llvm_unreachable("invalid enum");
+}
+
+static ConsumedState testsFor(const FunctionDecl *FunDecl) {
+ assert(isTestingFunction(FunDecl));
+ switch (FunDecl->getAttr<TestTypestateAttr>()->getTestState()) {
+ case TestTypestateAttr::Unconsumed:
+ return CS_Unconsumed;
+ case TestTypestateAttr::Consumed:
+ return CS_Consumed;
+ }
+ llvm_unreachable("invalid enum");
+}
+
+namespace {
+struct VarTestResult {
+ const VarDecl *Var;
+ ConsumedState TestsFor;
+};
+} // end anonymous::VarTestResult
+
+namespace clang {
+namespace consumed {
+
+enum EffectiveOp {
+ EO_And,
+ EO_Or
+};
+
+class PropagationInfo {
+ enum {
+ IT_None,
+ IT_State,
+ IT_VarTest,
+ IT_BinTest,
+ IT_Var,
+ IT_Tmp
+ } InfoType;
+
+ struct BinTestTy {
+ const BinaryOperator *Source;
+ EffectiveOp EOp;
+ VarTestResult LTest;
+ VarTestResult RTest;
+ };
+
+ union {
+ ConsumedState State;
+ VarTestResult VarTest;
+ const VarDecl *Var;
+ const CXXBindTemporaryExpr *Tmp;
+ BinTestTy BinTest;
+ };
+
+public:
+ PropagationInfo() : InfoType(IT_None) {}
+
+ PropagationInfo(const VarTestResult &VarTest)
+ : InfoType(IT_VarTest), VarTest(VarTest) {}
+
+ PropagationInfo(const VarDecl *Var, ConsumedState TestsFor)
+ : InfoType(IT_VarTest) {
+
+ VarTest.Var = Var;
+ VarTest.TestsFor = TestsFor;
+ }
+
+ PropagationInfo(const BinaryOperator *Source, EffectiveOp EOp,
+ const VarTestResult &LTest, const VarTestResult &RTest)
+ : InfoType(IT_BinTest) {
+
+ BinTest.Source = Source;
+ BinTest.EOp = EOp;
+ BinTest.LTest = LTest;
+ BinTest.RTest = RTest;
+ }
+
+ PropagationInfo(const BinaryOperator *Source, EffectiveOp EOp,
+ const VarDecl *LVar, ConsumedState LTestsFor,
+ const VarDecl *RVar, ConsumedState RTestsFor)
+ : InfoType(IT_BinTest) {
+
+ BinTest.Source = Source;
+ BinTest.EOp = EOp;
+ BinTest.LTest.Var = LVar;
+ BinTest.LTest.TestsFor = LTestsFor;
+ BinTest.RTest.Var = RVar;
+ BinTest.RTest.TestsFor = RTestsFor;
+ }
+
+ PropagationInfo(ConsumedState State)
+ : InfoType(IT_State), State(State) {}
+
+ PropagationInfo(const VarDecl *Var) : InfoType(IT_Var), Var(Var) {}
+ PropagationInfo(const CXXBindTemporaryExpr *Tmp)
+ : InfoType(IT_Tmp), Tmp(Tmp) {}
+
+ const ConsumedState & getState() const {
+ assert(InfoType == IT_State);
+ return State;
+ }
+
+ const VarTestResult & getVarTest() const {
+ assert(InfoType == IT_VarTest);
+ return VarTest;
+ }
+
+ const VarTestResult & getLTest() const {
+ assert(InfoType == IT_BinTest);
+ return BinTest.LTest;
+ }
+
+ const VarTestResult & getRTest() const {
+ assert(InfoType == IT_BinTest);
+ return BinTest.RTest;
+ }
+
+ const VarDecl * getVar() const {
+ assert(InfoType == IT_Var);
+ return Var;
+ }
+
+ const CXXBindTemporaryExpr * getTmp() const {
+ assert(InfoType == IT_Tmp);
+ return Tmp;
+ }
+
+ ConsumedState getAsState(const ConsumedStateMap *StateMap) const {
+ assert(isVar() || isTmp() || isState());
+
+ if (isVar())
+ return StateMap->getState(Var);
+ else if (isTmp())
+ return StateMap->getState(Tmp);
+ else if (isState())
+ return State;
+ else
+ return CS_None;
+ }
+
+ EffectiveOp testEffectiveOp() const {
+ assert(InfoType == IT_BinTest);
+ return BinTest.EOp;
+ }
+
+ const BinaryOperator * testSourceNode() const {
+ assert(InfoType == IT_BinTest);
+ return BinTest.Source;
+ }
+
+ inline bool isValid() const { return InfoType != IT_None; }
+ inline bool isState() const { return InfoType == IT_State; }
+ inline bool isVarTest() const { return InfoType == IT_VarTest; }
+ inline bool isBinTest() const { return InfoType == IT_BinTest; }
+ inline bool isVar() const { return InfoType == IT_Var; }
+ inline bool isTmp() const { return InfoType == IT_Tmp; }
+
+ bool isTest() const {
+ return InfoType == IT_VarTest || InfoType == IT_BinTest;
+ }
+
+ bool isPointerToValue() const {
+ return InfoType == IT_Var || InfoType == IT_Tmp;
+ }
+
+ PropagationInfo invertTest() const {
+ assert(InfoType == IT_VarTest || InfoType == IT_BinTest);
+
+ if (InfoType == IT_VarTest) {
+ return PropagationInfo(VarTest.Var,
+ invertConsumedUnconsumed(VarTest.TestsFor));
+
+ } else if (InfoType == IT_BinTest) {
+ return PropagationInfo(BinTest.Source,
+ BinTest.EOp == EO_And ? EO_Or : EO_And,
+ BinTest.LTest.Var, invertConsumedUnconsumed(BinTest.LTest.TestsFor),
+ BinTest.RTest.Var, invertConsumedUnconsumed(BinTest.RTest.TestsFor));
+ } else {
+ return PropagationInfo();
+ }
+ }
+};
+
+static inline void
+setStateForVarOrTmp(ConsumedStateMap *StateMap, const PropagationInfo &PInfo,
+ ConsumedState State) {
+
+ assert(PInfo.isVar() || PInfo.isTmp());
+
+ if (PInfo.isVar())
+ StateMap->setState(PInfo.getVar(), State);
+ else
+ StateMap->setState(PInfo.getTmp(), State);
+}
+
+class ConsumedStmtVisitor : public ConstStmtVisitor<ConsumedStmtVisitor> {
+
+ typedef llvm::DenseMap<const Stmt *, PropagationInfo> MapType;
+ typedef std::pair<const Stmt *, PropagationInfo> PairType;
+ typedef MapType::iterator InfoEntry;
+ typedef MapType::const_iterator ConstInfoEntry;
+
+ AnalysisDeclContext &AC;
+ ConsumedAnalyzer &Analyzer;
+ ConsumedStateMap *StateMap;
+ MapType PropagationMap;
+
+ InfoEntry findInfo(const Expr *E) {
+ return PropagationMap.find(E->IgnoreParens());
+ }
+ ConstInfoEntry findInfo(const Expr *E) const {
+ return PropagationMap.find(E->IgnoreParens());
+ }
+ void insertInfo(const Expr *E, const PropagationInfo &PI) {
+ PropagationMap.insert(PairType(E->IgnoreParens(), PI));
+ }
+
+ void forwardInfo(const Expr *From, const Expr *To);
+ void copyInfo(const Expr *From, const Expr *To, ConsumedState CS);
+ ConsumedState getInfo(const Expr *From);
+ void setInfo(const Expr *To, ConsumedState NS);
+ void propagateReturnType(const Expr *Call, const FunctionDecl *Fun);
+
+public:
+ void checkCallability(const PropagationInfo &PInfo,
+ const FunctionDecl *FunDecl,
+ SourceLocation BlameLoc);
+ bool handleCall(const CallExpr *Call, const Expr *ObjArg,
+ const FunctionDecl *FunD);
+
+ void VisitBinaryOperator(const BinaryOperator *BinOp);
+ void VisitCallExpr(const CallExpr *Call);
+ void VisitCastExpr(const CastExpr *Cast);
+ void VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *Temp);
+ void VisitCXXConstructExpr(const CXXConstructExpr *Call);
+ void VisitCXXMemberCallExpr(const CXXMemberCallExpr *Call);
+ void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *Call);
+ void VisitDeclRefExpr(const DeclRefExpr *DeclRef);
+ void VisitDeclStmt(const DeclStmt *DelcS);
+ void VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *Temp);
+ void VisitMemberExpr(const MemberExpr *MExpr);
+ void VisitParmVarDecl(const ParmVarDecl *Param);
+ void VisitReturnStmt(const ReturnStmt *Ret);
+ void VisitUnaryOperator(const UnaryOperator *UOp);
+ void VisitVarDecl(const VarDecl *Var);
+
+ ConsumedStmtVisitor(AnalysisDeclContext &AC, ConsumedAnalyzer &Analyzer,
+ ConsumedStateMap *StateMap)
+ : AC(AC), Analyzer(Analyzer), StateMap(StateMap) {}
+
+ PropagationInfo getInfo(const Expr *StmtNode) const {
+ ConstInfoEntry Entry = findInfo(StmtNode);
+
+ if (Entry != PropagationMap.end())
+ return Entry->second;
+ else
+ return PropagationInfo();
+ }
+
+ void reset(ConsumedStateMap *NewStateMap) {
+ StateMap = NewStateMap;
+ }
+};
+
+
+void ConsumedStmtVisitor::forwardInfo(const Expr *From, const Expr *To) {
+ InfoEntry Entry = findInfo(From);
+ if (Entry != PropagationMap.end())
+ insertInfo(To, Entry->second);
+}
+
+
+// Create a new state for To, which is initialized to the state of From.
+// If NS is not CS_None, sets the state of From to NS.
+void ConsumedStmtVisitor::copyInfo(const Expr *From, const Expr *To,
+ ConsumedState NS) {
+ InfoEntry Entry = findInfo(From);
+ if (Entry != PropagationMap.end()) {
+ PropagationInfo& PInfo = Entry->second;
+ ConsumedState CS = PInfo.getAsState(StateMap);
+ if (CS != CS_None)
+ insertInfo(To, PropagationInfo(CS));
+ if (NS != CS_None && PInfo.isPointerToValue())
+ setStateForVarOrTmp(StateMap, PInfo, NS);
+ }
+}
+
+
+// Get the ConsumedState for From
+ConsumedState ConsumedStmtVisitor::getInfo(const Expr *From) {
+ InfoEntry Entry = findInfo(From);
+ if (Entry != PropagationMap.end()) {
+ PropagationInfo& PInfo = Entry->second;
+ return PInfo.getAsState(StateMap);
+ }
+ return CS_None;
+}
+
+
+// If we already have info for To then update it, otherwise create a new entry.
+void ConsumedStmtVisitor::setInfo(const Expr *To, ConsumedState NS) {
+ InfoEntry Entry = findInfo(To);
+ if (Entry != PropagationMap.end()) {
+ PropagationInfo& PInfo = Entry->second;
+ if (PInfo.isPointerToValue())
+ setStateForVarOrTmp(StateMap, PInfo, NS);
+ } else if (NS != CS_None) {
+ insertInfo(To, PropagationInfo(NS));
+ }
+}
+
+
+
+void ConsumedStmtVisitor::checkCallability(const PropagationInfo &PInfo,
+ const FunctionDecl *FunDecl,
+ SourceLocation BlameLoc) {
+ assert(!PInfo.isTest());
+
+ const CallableWhenAttr *CWAttr = FunDecl->getAttr<CallableWhenAttr>();
+ if (!CWAttr)
+ return;
+
+ if (PInfo.isVar()) {
+ ConsumedState VarState = StateMap->getState(PInfo.getVar());
+
+ if (VarState == CS_None || isCallableInState(CWAttr, VarState))
+ return;
+
+ Analyzer.WarningsHandler.warnUseInInvalidState(
+ FunDecl->getNameAsString(), PInfo.getVar()->getNameAsString(),
+ stateToString(VarState), BlameLoc);
+
+ } else {
+ ConsumedState TmpState = PInfo.getAsState(StateMap);
+
+ if (TmpState == CS_None || isCallableInState(CWAttr, TmpState))
+ return;
+
+ Analyzer.WarningsHandler.warnUseOfTempInInvalidState(
+ FunDecl->getNameAsString(), stateToString(TmpState), BlameLoc);
+ }
+}
+
+
+// Factors out common behavior for function, method, and operator calls.
+// Check parameters and set parameter state if necessary.
+// Returns true if the state of ObjArg is set, or false otherwise.
+bool ConsumedStmtVisitor::handleCall(const CallExpr *Call, const Expr *ObjArg,
+ const FunctionDecl *FunD) {
+ unsigned Offset = 0;
+ if (isa<CXXOperatorCallExpr>(Call) && isa<CXXMethodDecl>(FunD))
+ Offset = 1; // first argument is 'this'
+
+ // check explicit parameters
+ for (unsigned Index = Offset; Index < Call->getNumArgs(); ++Index) {
+ // Skip variable argument lists.
+ if (Index - Offset >= FunD->getNumParams())
+ break;
+
+ const ParmVarDecl *Param = FunD->getParamDecl(Index - Offset);
+ QualType ParamType = Param->getType();
+
+ InfoEntry Entry = findInfo(Call->getArg(Index));
+
+ if (Entry == PropagationMap.end() || Entry->second.isTest())
+ continue;
+ PropagationInfo PInfo = Entry->second;
+
+ // Check that the parameter is in the correct state.
+ if (ParamTypestateAttr *PTA = Param->getAttr<ParamTypestateAttr>()) {
+ ConsumedState ParamState = PInfo.getAsState(StateMap);
+ ConsumedState ExpectedState = mapParamTypestateAttrState(PTA);
+
+ if (ParamState != ExpectedState)
+ Analyzer.WarningsHandler.warnParamTypestateMismatch(
+ Call->getArg(Index)->getExprLoc(),
+ stateToString(ExpectedState), stateToString(ParamState));
+ }
+
+ if (!(Entry->second.isVar() || Entry->second.isTmp()))
+ continue;
+
+ // Adjust state on the caller side.
+ if (isRValueRef(ParamType))
+ setStateForVarOrTmp(StateMap, PInfo, consumed::CS_Consumed);
+ else if (ReturnTypestateAttr *RT = Param->getAttr<ReturnTypestateAttr>())
+ setStateForVarOrTmp(StateMap, PInfo, mapReturnTypestateAttrState(RT));
+ else if (isPointerOrRef(ParamType) &&
+ (!ParamType->getPointeeType().isConstQualified() ||
+ isSetOnReadPtrType(ParamType)))
+ setStateForVarOrTmp(StateMap, PInfo, consumed::CS_Unknown);
+ }
+
+ if (!ObjArg)
+ return false;
+
+ // check implicit 'self' parameter, if present
+ InfoEntry Entry = findInfo(ObjArg);
+ if (Entry != PropagationMap.end()) {
+ PropagationInfo PInfo = Entry->second;
+ checkCallability(PInfo, FunD, Call->getExprLoc());
+
+ if (SetTypestateAttr *STA = FunD->getAttr<SetTypestateAttr>()) {
+ if (PInfo.isVar()) {
+ StateMap->setState(PInfo.getVar(), mapSetTypestateAttrState(STA));
+ return true;
+ }
+ else if (PInfo.isTmp()) {
+ StateMap->setState(PInfo.getTmp(), mapSetTypestateAttrState(STA));
+ return true;
+ }
+ }
+ else if (isTestingFunction(FunD) && PInfo.isVar()) {
+ PropagationMap.insert(PairType(Call,
+ PropagationInfo(PInfo.getVar(), testsFor(FunD))));
+ }
+ }
+ return false;
+}
+
+
+void ConsumedStmtVisitor::propagateReturnType(const Expr *Call,
+ const FunctionDecl *Fun) {
+ QualType RetType = Fun->getCallResultType();
+ if (RetType->isReferenceType())
+ RetType = RetType->getPointeeType();
+
+ if (isConsumableType(RetType)) {
+ ConsumedState ReturnState;
+ if (ReturnTypestateAttr *RTA = Fun->getAttr<ReturnTypestateAttr>())
+ ReturnState = mapReturnTypestateAttrState(RTA);
+ else
+ ReturnState = mapConsumableAttrState(RetType);
+
+ PropagationMap.insert(PairType(Call, PropagationInfo(ReturnState)));
+ }
+}
+
+
+void ConsumedStmtVisitor::VisitBinaryOperator(const BinaryOperator *BinOp) {
+ switch (BinOp->getOpcode()) {
+ case BO_LAnd:
+ case BO_LOr : {
+ InfoEntry LEntry = findInfo(BinOp->getLHS()),
+ REntry = findInfo(BinOp->getRHS());
+
+ VarTestResult LTest, RTest;
+
+ if (LEntry != PropagationMap.end() && LEntry->second.isVarTest()) {
+ LTest = LEntry->second.getVarTest();
+
+ } else {
+ LTest.Var = nullptr;
+ LTest.TestsFor = CS_None;
+ }
+
+ if (REntry != PropagationMap.end() && REntry->second.isVarTest()) {
+ RTest = REntry->second.getVarTest();
+
+ } else {
+ RTest.Var = nullptr;
+ RTest.TestsFor = CS_None;
+ }
+
+ if (!(LTest.Var == nullptr && RTest.Var == nullptr))
+ PropagationMap.insert(PairType(BinOp, PropagationInfo(BinOp,
+ static_cast<EffectiveOp>(BinOp->getOpcode() == BO_LOr), LTest, RTest)));
+
+ break;
+ }
+
+ case BO_PtrMemD:
+ case BO_PtrMemI:
+ forwardInfo(BinOp->getLHS(), BinOp);
+ break;
+
+ default:
+ break;
+ }
+}
+
+void ConsumedStmtVisitor::VisitCallExpr(const CallExpr *Call) {
+ const FunctionDecl *FunDecl = Call->getDirectCallee();
+ if (!FunDecl)
+ return;
+
+ // Special case for the std::move function.
+ // TODO: Make this more specific. (Deferred)
+ if (Call->getNumArgs() == 1 && FunDecl->getNameAsString() == "move" &&
+ FunDecl->isInStdNamespace()) {
+ copyInfo(Call->getArg(0), Call, CS_Consumed);
+ return;
+ }
+
+ handleCall(Call, nullptr, FunDecl);
+ propagateReturnType(Call, FunDecl);
+}
+
+void ConsumedStmtVisitor::VisitCastExpr(const CastExpr *Cast) {
+ forwardInfo(Cast->getSubExpr(), Cast);
+}
+
+void ConsumedStmtVisitor::VisitCXXBindTemporaryExpr(
+ const CXXBindTemporaryExpr *Temp) {
+
+ InfoEntry Entry = findInfo(Temp->getSubExpr());
+
+ if (Entry != PropagationMap.end() && !Entry->second.isTest()) {
+ StateMap->setState(Temp, Entry->second.getAsState(StateMap));
+ PropagationMap.insert(PairType(Temp, PropagationInfo(Temp)));
+ }
+}
+
+void ConsumedStmtVisitor::VisitCXXConstructExpr(const CXXConstructExpr *Call) {
+ CXXConstructorDecl *Constructor = Call->getConstructor();
+
+ ASTContext &CurrContext = AC.getASTContext();
+ QualType ThisType = Constructor->getThisType(CurrContext)->getPointeeType();
+
+ if (!isConsumableType(ThisType))
+ return;
+
+ // FIXME: What should happen if someone annotates the move constructor?
+ if (ReturnTypestateAttr *RTA = Constructor->getAttr<ReturnTypestateAttr>()) {
+ // TODO: Adjust state of args appropriately.
+ ConsumedState RetState = mapReturnTypestateAttrState(RTA);
+ PropagationMap.insert(PairType(Call, PropagationInfo(RetState)));
+ } else if (Constructor->isDefaultConstructor()) {
+ PropagationMap.insert(PairType(Call,
+ PropagationInfo(consumed::CS_Consumed)));
+ } else if (Constructor->isMoveConstructor()) {
+ copyInfo(Call->getArg(0), Call, CS_Consumed);
+ } else if (Constructor->isCopyConstructor()) {
+ // Copy state from arg. If setStateOnRead then set arg to CS_Unknown.
+ ConsumedState NS =
+ isSetOnReadPtrType(Constructor->getThisType(CurrContext)) ?
+ CS_Unknown : CS_None;
+ copyInfo(Call->getArg(0), Call, NS);
+ } else {
+ // TODO: Adjust state of args appropriately.
+ ConsumedState RetState = mapConsumableAttrState(ThisType);
+ PropagationMap.insert(PairType(Call, PropagationInfo(RetState)));
+ }
+}
+
+
+void ConsumedStmtVisitor::VisitCXXMemberCallExpr(
+ const CXXMemberCallExpr *Call) {
+ CXXMethodDecl* MD = Call->getMethodDecl();
+ if (!MD)
+ return;
+
+ handleCall(Call, Call->getImplicitObjectArgument(), MD);
+ propagateReturnType(Call, MD);
+}
+
+
+void ConsumedStmtVisitor::VisitCXXOperatorCallExpr(
+ const CXXOperatorCallExpr *Call) {
+
+ const FunctionDecl *FunDecl =
+ dyn_cast_or_null<FunctionDecl>(Call->getDirectCallee());
+ if (!FunDecl) return;
+
+ if (Call->getOperator() == OO_Equal) {
+ ConsumedState CS = getInfo(Call->getArg(1));
+ if (!handleCall(Call, Call->getArg(0), FunDecl))
+ setInfo(Call->getArg(0), CS);
+ return;
+ }
+
+ if (const CXXMemberCallExpr *MCall = dyn_cast<CXXMemberCallExpr>(Call))
+ handleCall(MCall, MCall->getImplicitObjectArgument(), FunDecl);
+ else
+ handleCall(Call, Call->getArg(0), FunDecl);
+
+ propagateReturnType(Call, FunDecl);
+}
+
+void ConsumedStmtVisitor::VisitDeclRefExpr(const DeclRefExpr *DeclRef) {
+ if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(DeclRef->getDecl()))
+ if (StateMap->getState(Var) != consumed::CS_None)
+ PropagationMap.insert(PairType(DeclRef, PropagationInfo(Var)));
+}
+
+void ConsumedStmtVisitor::VisitDeclStmt(const DeclStmt *DeclS) {
+ for (const auto *DI : DeclS->decls())
+ if (isa<VarDecl>(DI))
+ VisitVarDecl(cast<VarDecl>(DI));
+
+ if (DeclS->isSingleDecl())
+ if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(DeclS->getSingleDecl()))
+ PropagationMap.insert(PairType(DeclS, PropagationInfo(Var)));
+}
+
+void ConsumedStmtVisitor::VisitMaterializeTemporaryExpr(
+ const MaterializeTemporaryExpr *Temp) {
+
+ forwardInfo(Temp->GetTemporaryExpr(), Temp);
+}
+
+void ConsumedStmtVisitor::VisitMemberExpr(const MemberExpr *MExpr) {
+ forwardInfo(MExpr->getBase(), MExpr);
+}
+
+
+void ConsumedStmtVisitor::VisitParmVarDecl(const ParmVarDecl *Param) {
+ QualType ParamType = Param->getType();
+ ConsumedState ParamState = consumed::CS_None;
+
+ if (const ParamTypestateAttr *PTA = Param->getAttr<ParamTypestateAttr>())
+ ParamState = mapParamTypestateAttrState(PTA);
+ else if (isConsumableType(ParamType))
+ ParamState = mapConsumableAttrState(ParamType);
+ else if (isRValueRef(ParamType) &&
+ isConsumableType(ParamType->getPointeeType()))
+ ParamState = mapConsumableAttrState(ParamType->getPointeeType());
+ else if (ParamType->isReferenceType() &&
+ isConsumableType(ParamType->getPointeeType()))
+ ParamState = consumed::CS_Unknown;
+
+ if (ParamState != CS_None)
+ StateMap->setState(Param, ParamState);
+}
+
+void ConsumedStmtVisitor::VisitReturnStmt(const ReturnStmt *Ret) {
+ ConsumedState ExpectedState = Analyzer.getExpectedReturnState();
+
+ if (ExpectedState != CS_None) {
+ InfoEntry Entry = findInfo(Ret->getRetValue());
+
+ if (Entry != PropagationMap.end()) {
+ ConsumedState RetState = Entry->second.getAsState(StateMap);
+
+ if (RetState != ExpectedState)
+ Analyzer.WarningsHandler.warnReturnTypestateMismatch(
+ Ret->getReturnLoc(), stateToString(ExpectedState),
+ stateToString(RetState));
+ }
+ }
+
+ StateMap->checkParamsForReturnTypestate(Ret->getLocStart(),
+ Analyzer.WarningsHandler);
+}
+
+void ConsumedStmtVisitor::VisitUnaryOperator(const UnaryOperator *UOp) {
+ InfoEntry Entry = findInfo(UOp->getSubExpr());
+ if (Entry == PropagationMap.end()) return;
+
+ switch (UOp->getOpcode()) {
+ case UO_AddrOf:
+ PropagationMap.insert(PairType(UOp, Entry->second));
+ break;
+
+ case UO_LNot:
+ if (Entry->second.isTest())
+ PropagationMap.insert(PairType(UOp, Entry->second.invertTest()));
+ break;
+
+ default:
+ break;
+ }
+}
+
+// TODO: See if I need to check for reference types here.
+void ConsumedStmtVisitor::VisitVarDecl(const VarDecl *Var) {
+ if (isConsumableType(Var->getType())) {
+ if (Var->hasInit()) {
+ MapType::iterator VIT = findInfo(Var->getInit()->IgnoreImplicit());
+ if (VIT != PropagationMap.end()) {
+ PropagationInfo PInfo = VIT->second;
+ ConsumedState St = PInfo.getAsState(StateMap);
+
+ if (St != consumed::CS_None) {
+ StateMap->setState(Var, St);
+ return;
+ }
+ }
+ }
+ // Otherwise
+ StateMap->setState(Var, consumed::CS_Unknown);
+ }
+}
+}} // end clang::consumed::ConsumedStmtVisitor
+
+namespace clang {
+namespace consumed {
+
+static void splitVarStateForIf(const IfStmt *IfNode, const VarTestResult &Test,
+ ConsumedStateMap *ThenStates,
+ ConsumedStateMap *ElseStates) {
+ ConsumedState VarState = ThenStates->getState(Test.Var);
+
+ if (VarState == CS_Unknown) {
+ ThenStates->setState(Test.Var, Test.TestsFor);
+ ElseStates->setState(Test.Var, invertConsumedUnconsumed(Test.TestsFor));
+
+ } else if (VarState == invertConsumedUnconsumed(Test.TestsFor)) {
+ ThenStates->markUnreachable();
+
+ } else if (VarState == Test.TestsFor) {
+ ElseStates->markUnreachable();
+ }
+}
+
+static void splitVarStateForIfBinOp(const PropagationInfo &PInfo,
+ ConsumedStateMap *ThenStates,
+ ConsumedStateMap *ElseStates) {
+ const VarTestResult &LTest = PInfo.getLTest(),
+ &RTest = PInfo.getRTest();
+
+ ConsumedState LState = LTest.Var ? ThenStates->getState(LTest.Var) : CS_None,
+ RState = RTest.Var ? ThenStates->getState(RTest.Var) : CS_None;
+
+ if (LTest.Var) {
+ if (PInfo.testEffectiveOp() == EO_And) {
+ if (LState == CS_Unknown) {
+ ThenStates->setState(LTest.Var, LTest.TestsFor);
+
+ } else if (LState == invertConsumedUnconsumed(LTest.TestsFor)) {
+ ThenStates->markUnreachable();
+
+ } else if (LState == LTest.TestsFor && isKnownState(RState)) {
+ if (RState == RTest.TestsFor)
+ ElseStates->markUnreachable();
+ else
+ ThenStates->markUnreachable();
+ }
+
+ } else {
+ if (LState == CS_Unknown) {
+ ElseStates->setState(LTest.Var,
+ invertConsumedUnconsumed(LTest.TestsFor));
+
+ } else if (LState == LTest.TestsFor) {
+ ElseStates->markUnreachable();
+
+ } else if (LState == invertConsumedUnconsumed(LTest.TestsFor) &&
+ isKnownState(RState)) {
+
+ if (RState == RTest.TestsFor)
+ ElseStates->markUnreachable();
+ else
+ ThenStates->markUnreachable();
+ }
+ }
+ }
+
+ if (RTest.Var) {
+ if (PInfo.testEffectiveOp() == EO_And) {
+ if (RState == CS_Unknown)
+ ThenStates->setState(RTest.Var, RTest.TestsFor);
+ else if (RState == invertConsumedUnconsumed(RTest.TestsFor))
+ ThenStates->markUnreachable();
+
+ } else {
+ if (RState == CS_Unknown)
+ ElseStates->setState(RTest.Var,
+ invertConsumedUnconsumed(RTest.TestsFor));
+ else if (RState == RTest.TestsFor)
+ ElseStates->markUnreachable();
+ }
+ }
+}
+
+bool ConsumedBlockInfo::allBackEdgesVisited(const CFGBlock *CurrBlock,
+ const CFGBlock *TargetBlock) {
+
+ assert(CurrBlock && "Block pointer must not be NULL");
+ assert(TargetBlock && "TargetBlock pointer must not be NULL");
+
+ unsigned int CurrBlockOrder = VisitOrder[CurrBlock->getBlockID()];
+ for (CFGBlock::const_pred_iterator PI = TargetBlock->pred_begin(),
+ PE = TargetBlock->pred_end(); PI != PE; ++PI) {
+ if (*PI && CurrBlockOrder < VisitOrder[(*PI)->getBlockID()] )
+ return false;
+ }
+ return true;
+}
+
+void ConsumedBlockInfo::addInfo(
+ const CFGBlock *Block, ConsumedStateMap *StateMap,
+ std::unique_ptr<ConsumedStateMap> &OwnedStateMap) {
+
+ assert(Block && "Block pointer must not be NULL");
+
+ auto &Entry = StateMapsArray[Block->getBlockID()];
+
+ if (Entry) {
+ Entry->intersect(*StateMap);
+ } else if (OwnedStateMap)
+ Entry = std::move(OwnedStateMap);
+ else
+ Entry = llvm::make_unique<ConsumedStateMap>(*StateMap);
+}
+
+void ConsumedBlockInfo::addInfo(const CFGBlock *Block,
+ std::unique_ptr<ConsumedStateMap> StateMap) {
+
+ assert(Block && "Block pointer must not be NULL");
+
+ auto &Entry = StateMapsArray[Block->getBlockID()];
+
+ if (Entry) {
+ Entry->intersect(*StateMap);
+ } else {
+ Entry = std::move(StateMap);
+ }
+}
+
+ConsumedStateMap* ConsumedBlockInfo::borrowInfo(const CFGBlock *Block) {
+ assert(Block && "Block pointer must not be NULL");
+ assert(StateMapsArray[Block->getBlockID()] && "Block has no block info");
+
+ return StateMapsArray[Block->getBlockID()].get();
+}
+
+void ConsumedBlockInfo::discardInfo(const CFGBlock *Block) {
+ StateMapsArray[Block->getBlockID()] = nullptr;
+}
+
+std::unique_ptr<ConsumedStateMap>
+ConsumedBlockInfo::getInfo(const CFGBlock *Block) {
+ assert(Block && "Block pointer must not be NULL");
+
+ auto &Entry = StateMapsArray[Block->getBlockID()];
+ return isBackEdgeTarget(Block) ? llvm::make_unique<ConsumedStateMap>(*Entry)
+ : std::move(Entry);
+}
+
+bool ConsumedBlockInfo::isBackEdge(const CFGBlock *From, const CFGBlock *To) {
+ assert(From && "From block must not be NULL");
+ assert(To && "From block must not be NULL");
+
+ return VisitOrder[From->getBlockID()] > VisitOrder[To->getBlockID()];
+}
+
+bool ConsumedBlockInfo::isBackEdgeTarget(const CFGBlock *Block) {
+ assert(Block && "Block pointer must not be NULL");
+
+ // Anything with less than two predecessors can't be the target of a back
+ // edge.
+ if (Block->pred_size() < 2)
+ return false;
+
+ unsigned int BlockVisitOrder = VisitOrder[Block->getBlockID()];
+ for (CFGBlock::const_pred_iterator PI = Block->pred_begin(),
+ PE = Block->pred_end(); PI != PE; ++PI) {
+ if (*PI && BlockVisitOrder < VisitOrder[(*PI)->getBlockID()])
+ return true;
+ }
+ return false;
+}
+
+void ConsumedStateMap::checkParamsForReturnTypestate(SourceLocation BlameLoc,
+ ConsumedWarningsHandlerBase &WarningsHandler) const {
+
+ for (const auto &DM : VarMap) {
+ if (isa<ParmVarDecl>(DM.first)) {
+ const ParmVarDecl *Param = cast<ParmVarDecl>(DM.first);
+ const ReturnTypestateAttr *RTA = Param->getAttr<ReturnTypestateAttr>();
+
+ if (!RTA)
+ continue;
+
+ ConsumedState ExpectedState = mapReturnTypestateAttrState(RTA);
+ if (DM.second != ExpectedState)
+ WarningsHandler.warnParamReturnTypestateMismatch(BlameLoc,
+ Param->getNameAsString(), stateToString(ExpectedState),
+ stateToString(DM.second));
+ }
+ }
+}
+
+void ConsumedStateMap::clearTemporaries() {
+ TmpMap.clear();
+}
+
+ConsumedState ConsumedStateMap::getState(const VarDecl *Var) const {
+ VarMapType::const_iterator Entry = VarMap.find(Var);
+
+ if (Entry != VarMap.end())
+ return Entry->second;
+
+ return CS_None;
+}
+
+ConsumedState
+ConsumedStateMap::getState(const CXXBindTemporaryExpr *Tmp) const {
+ TmpMapType::const_iterator Entry = TmpMap.find(Tmp);
+
+ if (Entry != TmpMap.end())
+ return Entry->second;
+
+ return CS_None;
+}
+
+void ConsumedStateMap::intersect(const ConsumedStateMap &Other) {
+ ConsumedState LocalState;
+
+ if (this->From && this->From == Other.From && !Other.Reachable) {
+ this->markUnreachable();
+ return;
+ }
+
+ for (const auto &DM : Other.VarMap) {
+ LocalState = this->getState(DM.first);
+
+ if (LocalState == CS_None)
+ continue;
+
+ if (LocalState != DM.second)
+ VarMap[DM.first] = CS_Unknown;
+ }
+}
+
+void ConsumedStateMap::intersectAtLoopHead(const CFGBlock *LoopHead,
+ const CFGBlock *LoopBack, const ConsumedStateMap *LoopBackStates,
+ ConsumedWarningsHandlerBase &WarningsHandler) {
+
+ ConsumedState LocalState;
+ SourceLocation BlameLoc = getLastStmtLoc(LoopBack);
+
+ for (const auto &DM : LoopBackStates->VarMap) {
+ LocalState = this->getState(DM.first);
+
+ if (LocalState == CS_None)
+ continue;
+
+ if (LocalState != DM.second) {
+ VarMap[DM.first] = CS_Unknown;
+ WarningsHandler.warnLoopStateMismatch(BlameLoc,
+ DM.first->getNameAsString());
+ }
+ }
+}
+
+void ConsumedStateMap::markUnreachable() {
+ this->Reachable = false;
+ VarMap.clear();
+ TmpMap.clear();
+}
+
+void ConsumedStateMap::setState(const VarDecl *Var, ConsumedState State) {
+ VarMap[Var] = State;
+}
+
+void ConsumedStateMap::setState(const CXXBindTemporaryExpr *Tmp,
+ ConsumedState State) {
+ TmpMap[Tmp] = State;
+}
+
+void ConsumedStateMap::remove(const CXXBindTemporaryExpr *Tmp) {
+ TmpMap.erase(Tmp);
+}
+
+bool ConsumedStateMap::operator!=(const ConsumedStateMap *Other) const {
+ for (const auto &DM : Other->VarMap)
+ if (this->getState(DM.first) != DM.second)
+ return true;
+ return false;
+}
+
+void ConsumedAnalyzer::determineExpectedReturnState(AnalysisDeclContext &AC,
+ const FunctionDecl *D) {
+ QualType ReturnType;
+ if (const CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
+ ASTContext &CurrContext = AC.getASTContext();
+ ReturnType = Constructor->getThisType(CurrContext)->getPointeeType();
+ } else
+ ReturnType = D->getCallResultType();
+
+ if (const ReturnTypestateAttr *RTSAttr = D->getAttr<ReturnTypestateAttr>()) {
+ const CXXRecordDecl *RD = ReturnType->getAsCXXRecordDecl();
+ if (!RD || !RD->hasAttr<ConsumableAttr>()) {
+ // FIXME: This should be removed when template instantiation propagates
+ // attributes at template specialization definition, not
+ // declaration. When it is removed the test needs to be enabled
+ // in SemaDeclAttr.cpp.
+ WarningsHandler.warnReturnTypestateForUnconsumableType(
+ RTSAttr->getLocation(), ReturnType.getAsString());
+ ExpectedReturnState = CS_None;
+ } else
+ ExpectedReturnState = mapReturnTypestateAttrState(RTSAttr);
+ } else if (isConsumableType(ReturnType)) {
+ if (isAutoCastType(ReturnType)) // We can auto-cast the state to the
+ ExpectedReturnState = CS_None; // expected state.
+ else
+ ExpectedReturnState = mapConsumableAttrState(ReturnType);
+ }
+ else
+ ExpectedReturnState = CS_None;
+}
+
+bool ConsumedAnalyzer::splitState(const CFGBlock *CurrBlock,
+ const ConsumedStmtVisitor &Visitor) {
+
+ std::unique_ptr<ConsumedStateMap> FalseStates(
+ new ConsumedStateMap(*CurrStates));
+ PropagationInfo PInfo;
+
+ if (const IfStmt *IfNode =
+ dyn_cast_or_null<IfStmt>(CurrBlock->getTerminator().getStmt())) {
+
+ const Expr *Cond = IfNode->getCond();
+
+ PInfo = Visitor.getInfo(Cond);
+ if (!PInfo.isValid() && isa<BinaryOperator>(Cond))
+ PInfo = Visitor.getInfo(cast<BinaryOperator>(Cond)->getRHS());
+
+ if (PInfo.isVarTest()) {
+ CurrStates->setSource(Cond);
+ FalseStates->setSource(Cond);
+ splitVarStateForIf(IfNode, PInfo.getVarTest(), CurrStates.get(),
+ FalseStates.get());
+
+ } else if (PInfo.isBinTest()) {
+ CurrStates->setSource(PInfo.testSourceNode());
+ FalseStates->setSource(PInfo.testSourceNode());
+ splitVarStateForIfBinOp(PInfo, CurrStates.get(), FalseStates.get());
+
+ } else {
+ return false;
+ }
+
+ } else if (const BinaryOperator *BinOp =
+ dyn_cast_or_null<BinaryOperator>(CurrBlock->getTerminator().getStmt())) {
+
+ PInfo = Visitor.getInfo(BinOp->getLHS());
+ if (!PInfo.isVarTest()) {
+ if ((BinOp = dyn_cast_or_null<BinaryOperator>(BinOp->getLHS()))) {
+ PInfo = Visitor.getInfo(BinOp->getRHS());
+
+ if (!PInfo.isVarTest())
+ return false;
+
+ } else {
+ return false;
+ }
+ }
+
+ CurrStates->setSource(BinOp);
+ FalseStates->setSource(BinOp);
+
+ const VarTestResult &Test = PInfo.getVarTest();
+ ConsumedState VarState = CurrStates->getState(Test.Var);
+
+ if (BinOp->getOpcode() == BO_LAnd) {
+ if (VarState == CS_Unknown)
+ CurrStates->setState(Test.Var, Test.TestsFor);
+ else if (VarState == invertConsumedUnconsumed(Test.TestsFor))
+ CurrStates->markUnreachable();
+
+ } else if (BinOp->getOpcode() == BO_LOr) {
+ if (VarState == CS_Unknown)
+ FalseStates->setState(Test.Var,
+ invertConsumedUnconsumed(Test.TestsFor));
+ else if (VarState == Test.TestsFor)
+ FalseStates->markUnreachable();
+ }
+
+ } else {
+ return false;
+ }
+
+ CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin();
+
+ if (*SI)
+ BlockInfo.addInfo(*SI, std::move(CurrStates));
+ else
+ CurrStates = nullptr;
+
+ if (*++SI)
+ BlockInfo.addInfo(*SI, std::move(FalseStates));
+
+ return true;
+}
+
+void ConsumedAnalyzer::run(AnalysisDeclContext &AC) {
+ const FunctionDecl *D = dyn_cast_or_null<FunctionDecl>(AC.getDecl());
+ if (!D)
+ return;
+
+ CFG *CFGraph = AC.getCFG();
+ if (!CFGraph)
+ return;
+
+ determineExpectedReturnState(AC, D);
+
+ PostOrderCFGView *SortedGraph = AC.getAnalysis<PostOrderCFGView>();
+ // AC.getCFG()->viewCFG(LangOptions());
+
+ BlockInfo = ConsumedBlockInfo(CFGraph->getNumBlockIDs(), SortedGraph);
+
+ CurrStates = llvm::make_unique<ConsumedStateMap>();
+ ConsumedStmtVisitor Visitor(AC, *this, CurrStates.get());
+
+ // Add all trackable parameters to the state map.
+ for (const auto *PI : D->params())
+ Visitor.VisitParmVarDecl(PI);
+
+ // Visit all of the function's basic blocks.
+ for (const auto *CurrBlock : *SortedGraph) {
+ if (!CurrStates)
+ CurrStates = BlockInfo.getInfo(CurrBlock);
+
+ if (!CurrStates) {
+ continue;
+
+ } else if (!CurrStates->isReachable()) {
+ CurrStates = nullptr;
+ continue;
+ }
+
+ Visitor.reset(CurrStates.get());
+
+ // Visit all of the basic block's statements.
+ for (const auto &B : *CurrBlock) {
+ switch (B.getKind()) {
+ case CFGElement::Statement:
+ Visitor.Visit(B.castAs<CFGStmt>().getStmt());
+ break;
+
+ case CFGElement::TemporaryDtor: {
+ const CFGTemporaryDtor &DTor = B.castAs<CFGTemporaryDtor>();
+ const CXXBindTemporaryExpr *BTE = DTor.getBindTemporaryExpr();
+
+ Visitor.checkCallability(PropagationInfo(BTE),
+ DTor.getDestructorDecl(AC.getASTContext()),
+ BTE->getExprLoc());
+ CurrStates->remove(BTE);
+ break;
+ }
+
+ case CFGElement::AutomaticObjectDtor: {
+ const CFGAutomaticObjDtor &DTor = B.castAs<CFGAutomaticObjDtor>();
+ SourceLocation Loc = DTor.getTriggerStmt()->getLocEnd();
+ const VarDecl *Var = DTor.getVarDecl();
+
+ Visitor.checkCallability(PropagationInfo(Var),
+ DTor.getDestructorDecl(AC.getASTContext()),
+ Loc);
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ // TODO: Handle other forms of branching with precision, including while-
+ // and for-loops. (Deferred)
+ if (!splitState(CurrBlock, Visitor)) {
+ CurrStates->setSource(nullptr);
+
+ if (CurrBlock->succ_size() > 1 ||
+ (CurrBlock->succ_size() == 1 &&
+ (*CurrBlock->succ_begin())->pred_size() > 1)) {
+
+ auto *RawState = CurrStates.get();
+
+ for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
+ SE = CurrBlock->succ_end(); SI != SE; ++SI) {
+
+ if (*SI == nullptr) continue;
+
+ if (BlockInfo.isBackEdge(CurrBlock, *SI)) {
+ BlockInfo.borrowInfo(*SI)->intersectAtLoopHead(
+ *SI, CurrBlock, RawState, WarningsHandler);
+
+ if (BlockInfo.allBackEdgesVisited(CurrBlock, *SI))
+ BlockInfo.discardInfo(*SI);
+ } else {
+ BlockInfo.addInfo(*SI, RawState, CurrStates);
+ }
+ }
+
+ CurrStates = nullptr;
+ }
+ }
+
+ if (CurrBlock == &AC.getCFG()->getExit() &&
+ D->getCallResultType()->isVoidType())
+ CurrStates->checkParamsForReturnTypestate(D->getLocation(),
+ WarningsHandler);
+ } // End of block iterator.
+
+ // Delete the last existing state map.
+ CurrStates = nullptr;
+
+ WarningsHandler.emitDiagnostics();
+}
+}} // end namespace clang::consumed
diff --git a/contrib/llvm/tools/clang/lib/Analysis/Dominators.cpp b/contrib/llvm/tools/clang/lib/Analysis/Dominators.cpp
new file mode 100644
index 0000000..0e02c6d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/Dominators.cpp
@@ -0,0 +1,14 @@
+//=- Dominators.cpp - Implementation of dominators tree for Clang CFG C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/Analyses/Dominators.h"
+
+using namespace clang;
+
+void DominatorTree::anchor() { }
diff --git a/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp b/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp
new file mode 100644
index 0000000..0948bc0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp
@@ -0,0 +1,907 @@
+// FormatString.cpp - Common stuff for handling printf/scanf formats -*- C++ -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Shared details for processing format strings of printf and scanf
+// (and friends).
+//
+//===----------------------------------------------------------------------===//
+
+#include "FormatStringParsing.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/TargetInfo.h"
+
+using clang::analyze_format_string::ArgType;
+using clang::analyze_format_string::FormatStringHandler;
+using clang::analyze_format_string::FormatSpecifier;
+using clang::analyze_format_string::LengthModifier;
+using clang::analyze_format_string::OptionalAmount;
+using clang::analyze_format_string::PositionContext;
+using clang::analyze_format_string::ConversionSpecifier;
+using namespace clang;
+
+// Key function to FormatStringHandler.
+FormatStringHandler::~FormatStringHandler() {}
+
+//===----------------------------------------------------------------------===//
+// Functions for parsing format strings components in both printf and
+// scanf format strings.
+//===----------------------------------------------------------------------===//
+
+OptionalAmount
+clang::analyze_format_string::ParseAmount(const char *&Beg, const char *E) {
+ const char *I = Beg;
+ UpdateOnReturn <const char*> UpdateBeg(Beg, I);
+
+ unsigned accumulator = 0;
+ bool hasDigits = false;
+
+ for ( ; I != E; ++I) {
+ char c = *I;
+ if (c >= '0' && c <= '9') {
+ hasDigits = true;
+ accumulator = (accumulator * 10) + (c - '0');
+ continue;
+ }
+
+ if (hasDigits)
+ return OptionalAmount(OptionalAmount::Constant, accumulator, Beg, I - Beg,
+ false);
+
+ break;
+ }
+
+ return OptionalAmount();
+}
+
+OptionalAmount
+clang::analyze_format_string::ParseNonPositionAmount(const char *&Beg,
+ const char *E,
+ unsigned &argIndex) {
+ if (*Beg == '*') {
+ ++Beg;
+ return OptionalAmount(OptionalAmount::Arg, argIndex++, Beg, 0, false);
+ }
+
+ return ParseAmount(Beg, E);
+}
+
+OptionalAmount
+clang::analyze_format_string::ParsePositionAmount(FormatStringHandler &H,
+ const char *Start,
+ const char *&Beg,
+ const char *E,
+ PositionContext p) {
+ if (*Beg == '*') {
+ const char *I = Beg + 1;
+ const OptionalAmount &Amt = ParseAmount(I, E);
+
+ if (Amt.getHowSpecified() == OptionalAmount::NotSpecified) {
+ H.HandleInvalidPosition(Beg, I - Beg, p);
+ return OptionalAmount(false);
+ }
+
+ if (I == E) {
+ // No more characters left?
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return OptionalAmount(false);
+ }
+
+ assert(Amt.getHowSpecified() == OptionalAmount::Constant);
+
+ if (*I == '$') {
+ // Handle positional arguments
+
+ // Special case: '*0$', since this is an easy mistake.
+ if (Amt.getConstantAmount() == 0) {
+ H.HandleZeroPosition(Beg, I - Beg + 1);
+ return OptionalAmount(false);
+ }
+
+ const char *Tmp = Beg;
+ Beg = ++I;
+
+ return OptionalAmount(OptionalAmount::Arg, Amt.getConstantAmount() - 1,
+ Tmp, 0, true);
+ }
+
+ H.HandleInvalidPosition(Beg, I - Beg, p);
+ return OptionalAmount(false);
+ }
+
+ return ParseAmount(Beg, E);
+}
+
+
+bool
+clang::analyze_format_string::ParseFieldWidth(FormatStringHandler &H,
+ FormatSpecifier &CS,
+ const char *Start,
+ const char *&Beg, const char *E,
+ unsigned *argIndex) {
+ // FIXME: Support negative field widths.
+ if (argIndex) {
+ CS.setFieldWidth(ParseNonPositionAmount(Beg, E, *argIndex));
+ }
+ else {
+ const OptionalAmount Amt =
+ ParsePositionAmount(H, Start, Beg, E,
+ analyze_format_string::FieldWidthPos);
+
+ if (Amt.isInvalid())
+ return true;
+ CS.setFieldWidth(Amt);
+ }
+ return false;
+}
+
+bool
+clang::analyze_format_string::ParseArgPosition(FormatStringHandler &H,
+ FormatSpecifier &FS,
+ const char *Start,
+ const char *&Beg,
+ const char *E) {
+ const char *I = Beg;
+
+ const OptionalAmount &Amt = ParseAmount(I, E);
+
+ if (I == E) {
+ // No more characters left?
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+
+ if (Amt.getHowSpecified() == OptionalAmount::Constant && *(I++) == '$') {
+ // Warn that positional arguments are non-standard.
+ H.HandlePosition(Start, I - Start);
+
+ // Special case: '%0$', since this is an easy mistake.
+ if (Amt.getConstantAmount() == 0) {
+ H.HandleZeroPosition(Start, I - Start);
+ return true;
+ }
+
+ FS.setArgIndex(Amt.getConstantAmount() - 1);
+ FS.setUsesPositionalArg();
+ // Update the caller's pointer if we decided to consume
+ // these characters.
+ Beg = I;
+ return false;
+ }
+
+ return false;
+}
+
+bool
+clang::analyze_format_string::ParseLengthModifier(FormatSpecifier &FS,
+ const char *&I,
+ const char *E,
+ const LangOptions &LO,
+ bool IsScanf) {
+ LengthModifier::Kind lmKind = LengthModifier::None;
+ const char *lmPosition = I;
+ switch (*I) {
+ default:
+ return false;
+ case 'h':
+ ++I;
+ lmKind = (I != E && *I == 'h') ? (++I, LengthModifier::AsChar)
+ : LengthModifier::AsShort;
+ break;
+ case 'l':
+ ++I;
+ lmKind = (I != E && *I == 'l') ? (++I, LengthModifier::AsLongLong)
+ : LengthModifier::AsLong;
+ break;
+ case 'j': lmKind = LengthModifier::AsIntMax; ++I; break;
+ case 'z': lmKind = LengthModifier::AsSizeT; ++I; break;
+ case 't': lmKind = LengthModifier::AsPtrDiff; ++I; break;
+ case 'L': lmKind = LengthModifier::AsLongDouble; ++I; break;
+ case 'q': lmKind = LengthModifier::AsQuad; ++I; break;
+ case 'a':
+ if (IsScanf && !LO.C99 && !LO.CPlusPlus11) {
+ // For scanf in C90, look at the next character to see if this should
+ // be parsed as the GNU extension 'a' length modifier. If not, this
+ // will be parsed as a conversion specifier.
+ ++I;
+ if (I != E && (*I == 's' || *I == 'S' || *I == '[')) {
+ lmKind = LengthModifier::AsAllocate;
+ break;
+ }
+ --I;
+ }
+ return false;
+ case 'm':
+ if (IsScanf) {
+ lmKind = LengthModifier::AsMAllocate;
+ ++I;
+ break;
+ }
+ return false;
+ // printf: AsInt64, AsInt32, AsInt3264
+ // scanf: AsInt64
+ case 'I':
+ if (I + 1 != E && I + 2 != E) {
+ if (I[1] == '6' && I[2] == '4') {
+ I += 3;
+ lmKind = LengthModifier::AsInt64;
+ break;
+ }
+ if (IsScanf)
+ return false;
+
+ if (I[1] == '3' && I[2] == '2') {
+ I += 3;
+ lmKind = LengthModifier::AsInt32;
+ break;
+ }
+ }
+ ++I;
+ lmKind = LengthModifier::AsInt3264;
+ break;
+ case 'w':
+ lmKind = LengthModifier::AsWide; ++I; break;
+ }
+ LengthModifier lm(lmPosition, lmKind);
+ FS.setLengthModifier(lm);
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// Methods on ArgType.
+//===----------------------------------------------------------------------===//
+
+clang::analyze_format_string::ArgType::MatchKind
+ArgType::matchesType(ASTContext &C, QualType argTy) const {
+ if (Ptr) {
+ // It has to be a pointer.
+ const PointerType *PT = argTy->getAs<PointerType>();
+ if (!PT)
+ return NoMatch;
+
+ // We cannot write through a const qualified pointer.
+ if (PT->getPointeeType().isConstQualified())
+ return NoMatch;
+
+ argTy = PT->getPointeeType();
+ }
+
+ switch (K) {
+ case InvalidTy:
+ llvm_unreachable("ArgType must be valid");
+
+ case UnknownTy:
+ return Match;
+
+ case AnyCharTy: {
+ if (const EnumType *ETy = argTy->getAs<EnumType>())
+ argTy = ETy->getDecl()->getIntegerType();
+
+ if (const BuiltinType *BT = argTy->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ default:
+ break;
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ case BuiltinType::Char_U:
+ return Match;
+ }
+ return NoMatch;
+ }
+
+ case SpecificTy: {
+ if (const EnumType *ETy = argTy->getAs<EnumType>())
+ argTy = ETy->getDecl()->getIntegerType();
+ argTy = C.getCanonicalType(argTy).getUnqualifiedType();
+
+ if (T == argTy)
+ return Match;
+ // Check for "compatible types".
+ if (const BuiltinType *BT = argTy->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ default:
+ break;
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ return T == C.UnsignedCharTy || T == C.SignedCharTy ? Match
+ : NoMatch;
+ case BuiltinType::Short:
+ return T == C.UnsignedShortTy ? Match : NoMatch;
+ case BuiltinType::UShort:
+ return T == C.ShortTy ? Match : NoMatch;
+ case BuiltinType::Int:
+ return T == C.UnsignedIntTy ? Match : NoMatch;
+ case BuiltinType::UInt:
+ return T == C.IntTy ? Match : NoMatch;
+ case BuiltinType::Long:
+ return T == C.UnsignedLongTy ? Match : NoMatch;
+ case BuiltinType::ULong:
+ return T == C.LongTy ? Match : NoMatch;
+ case BuiltinType::LongLong:
+ return T == C.UnsignedLongLongTy ? Match : NoMatch;
+ case BuiltinType::ULongLong:
+ return T == C.LongLongTy ? Match : NoMatch;
+ }
+ return NoMatch;
+ }
+
+ case CStrTy: {
+ const PointerType *PT = argTy->getAs<PointerType>();
+ if (!PT)
+ return NoMatch;
+ QualType pointeeTy = PT->getPointeeType();
+ if (const BuiltinType *BT = pointeeTy->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Void:
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ return Match;
+ default:
+ break;
+ }
+
+ return NoMatch;
+ }
+
+ case WCStrTy: {
+ const PointerType *PT = argTy->getAs<PointerType>();
+ if (!PT)
+ return NoMatch;
+ QualType pointeeTy =
+ C.getCanonicalType(PT->getPointeeType()).getUnqualifiedType();
+ return pointeeTy == C.getWideCharType() ? Match : NoMatch;
+ }
+
+ case WIntTy: {
+
+ QualType PromoArg =
+ argTy->isPromotableIntegerType()
+ ? C.getPromotedIntegerType(argTy) : argTy;
+
+ QualType WInt = C.getCanonicalType(C.getWIntType()).getUnqualifiedType();
+ PromoArg = C.getCanonicalType(PromoArg).getUnqualifiedType();
+
+ // If the promoted argument is the corresponding signed type of the
+ // wint_t type, then it should match.
+ if (PromoArg->hasSignedIntegerRepresentation() &&
+ C.getCorrespondingUnsignedType(PromoArg) == WInt)
+ return Match;
+
+ return WInt == PromoArg ? Match : NoMatch;
+ }
+
+ case CPointerTy:
+ if (argTy->isVoidPointerType()) {
+ return Match;
+ } if (argTy->isPointerType() || argTy->isObjCObjectPointerType() ||
+ argTy->isBlockPointerType() || argTy->isNullPtrType()) {
+ return NoMatchPedantic;
+ } else {
+ return NoMatch;
+ }
+
+ case ObjCPointerTy: {
+ if (argTy->getAs<ObjCObjectPointerType>() ||
+ argTy->getAs<BlockPointerType>())
+ return Match;
+
+ // Handle implicit toll-free bridging.
+ if (const PointerType *PT = argTy->getAs<PointerType>()) {
+ // Things such as CFTypeRef are really just opaque pointers
+ // to C structs representing CF types that can often be bridged
+ // to Objective-C objects. Since the compiler doesn't know which
+ // structs can be toll-free bridged, we just accept them all.
+ QualType pointee = PT->getPointeeType();
+ if (pointee->getAsStructureType() || pointee->isVoidType())
+ return Match;
+ }
+ return NoMatch;
+ }
+ }
+
+ llvm_unreachable("Invalid ArgType Kind!");
+}
+
+QualType ArgType::getRepresentativeType(ASTContext &C) const {
+ QualType Res;
+ switch (K) {
+ case InvalidTy:
+ llvm_unreachable("No representative type for Invalid ArgType");
+ case UnknownTy:
+ llvm_unreachable("No representative type for Unknown ArgType");
+ case AnyCharTy:
+ Res = C.CharTy;
+ break;
+ case SpecificTy:
+ Res = T;
+ break;
+ case CStrTy:
+ Res = C.getPointerType(C.CharTy);
+ break;
+ case WCStrTy:
+ Res = C.getPointerType(C.getWideCharType());
+ break;
+ case ObjCPointerTy:
+ Res = C.ObjCBuiltinIdTy;
+ break;
+ case CPointerTy:
+ Res = C.VoidPtrTy;
+ break;
+ case WIntTy: {
+ Res = C.getWIntType();
+ break;
+ }
+ }
+
+ if (Ptr)
+ Res = C.getPointerType(Res);
+ return Res;
+}
+
+std::string ArgType::getRepresentativeTypeName(ASTContext &C) const {
+ std::string S = getRepresentativeType(C).getAsString();
+
+ std::string Alias;
+ if (Name) {
+ // Use a specific name for this type, e.g. "size_t".
+ Alias = Name;
+ if (Ptr) {
+ // If ArgType is actually a pointer to T, append an asterisk.
+ Alias += (Alias[Alias.size()-1] == '*') ? "*" : " *";
+ }
+ // If Alias is the same as the underlying type, e.g. wchar_t, then drop it.
+ if (S == Alias)
+ Alias.clear();
+ }
+
+ if (!Alias.empty())
+ return std::string("'") + Alias + "' (aka '" + S + "')";
+ return std::string("'") + S + "'";
+}
+
+
+//===----------------------------------------------------------------------===//
+// Methods on OptionalAmount.
+//===----------------------------------------------------------------------===//
+
+ArgType
+analyze_format_string::OptionalAmount::getArgType(ASTContext &Ctx) const {
+ return Ctx.IntTy;
+}
+
+//===----------------------------------------------------------------------===//
+// Methods on LengthModifier.
+//===----------------------------------------------------------------------===//
+
+const char *
+analyze_format_string::LengthModifier::toString() const {
+ switch (kind) {
+ case AsChar:
+ return "hh";
+ case AsShort:
+ return "h";
+ case AsLong: // or AsWideChar
+ return "l";
+ case AsLongLong:
+ return "ll";
+ case AsQuad:
+ return "q";
+ case AsIntMax:
+ return "j";
+ case AsSizeT:
+ return "z";
+ case AsPtrDiff:
+ return "t";
+ case AsInt32:
+ return "I32";
+ case AsInt3264:
+ return "I";
+ case AsInt64:
+ return "I64";
+ case AsLongDouble:
+ return "L";
+ case AsAllocate:
+ return "a";
+ case AsMAllocate:
+ return "m";
+ case AsWide:
+ return "w";
+ case None:
+ return "";
+ }
+ return nullptr;
+}
+
+//===----------------------------------------------------------------------===//
+// Methods on ConversionSpecifier.
+//===----------------------------------------------------------------------===//
+
+const char *ConversionSpecifier::toString() const {
+ switch (kind) {
+ case dArg: return "d";
+ case DArg: return "D";
+ case iArg: return "i";
+ case oArg: return "o";
+ case OArg: return "O";
+ case uArg: return "u";
+ case UArg: return "U";
+ case xArg: return "x";
+ case XArg: return "X";
+ case fArg: return "f";
+ case FArg: return "F";
+ case eArg: return "e";
+ case EArg: return "E";
+ case gArg: return "g";
+ case GArg: return "G";
+ case aArg: return "a";
+ case AArg: return "A";
+ case cArg: return "c";
+ case sArg: return "s";
+ case pArg: return "p";
+ case nArg: return "n";
+ case PercentArg: return "%";
+ case ScanListArg: return "[";
+ case InvalidSpecifier: return nullptr;
+
+ // POSIX unicode extensions.
+ case CArg: return "C";
+ case SArg: return "S";
+
+ // Objective-C specific specifiers.
+ case ObjCObjArg: return "@";
+
+ // FreeBSD kernel specific specifiers.
+ case FreeBSDbArg: return "b";
+ case FreeBSDDArg: return "D";
+ case FreeBSDrArg: return "r";
+ case FreeBSDyArg: return "y";
+
+ // GlibC specific specifiers.
+ case PrintErrno: return "m";
+
+ // MS specific specifiers.
+ case ZArg: return "Z";
+ }
+ return nullptr;
+}
+
+Optional<ConversionSpecifier>
+ConversionSpecifier::getStandardSpecifier() const {
+ ConversionSpecifier::Kind NewKind;
+
+ switch (getKind()) {
+ default:
+ return None;
+ case DArg:
+ NewKind = dArg;
+ break;
+ case UArg:
+ NewKind = uArg;
+ break;
+ case OArg:
+ NewKind = oArg;
+ break;
+ }
+
+ ConversionSpecifier FixedCS(*this);
+ FixedCS.setKind(NewKind);
+ return FixedCS;
+}
+
+//===----------------------------------------------------------------------===//
+// Methods on OptionalAmount.
+//===----------------------------------------------------------------------===//
+
+void OptionalAmount::toString(raw_ostream &os) const {
+ switch (hs) {
+ case Invalid:
+ case NotSpecified:
+ return;
+ case Arg:
+ if (UsesDotPrefix)
+ os << ".";
+ if (usesPositionalArg())
+ os << "*" << getPositionalArgIndex() << "$";
+ else
+ os << "*";
+ break;
+ case Constant:
+ if (UsesDotPrefix)
+ os << ".";
+ os << amt;
+ break;
+ }
+}
+
+bool FormatSpecifier::hasValidLengthModifier(const TargetInfo &Target) const {
+ switch (LM.getKind()) {
+ case LengthModifier::None:
+ return true;
+
+ // Handle most integer flags
+ case LengthModifier::AsShort:
+ if (Target.getTriple().isOSMSVCRT()) {
+ switch (CS.getKind()) {
+ case ConversionSpecifier::cArg:
+ case ConversionSpecifier::CArg:
+ case ConversionSpecifier::sArg:
+ case ConversionSpecifier::SArg:
+ case ConversionSpecifier::ZArg:
+ return true;
+ default:
+ break;
+ }
+ }
+ // Fall through.
+ case LengthModifier::AsChar:
+ case LengthModifier::AsLongLong:
+ case LengthModifier::AsQuad:
+ case LengthModifier::AsIntMax:
+ case LengthModifier::AsSizeT:
+ case LengthModifier::AsPtrDiff:
+ switch (CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::DArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::OArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::UArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ case ConversionSpecifier::nArg:
+ return true;
+ case ConversionSpecifier::FreeBSDrArg:
+ case ConversionSpecifier::FreeBSDyArg:
+ return Target.getTriple().isOSFreeBSD();
+ default:
+ return false;
+ }
+
+ // Handle 'l' flag
+ case LengthModifier::AsLong: // or AsWideChar
+ switch (CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::DArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::OArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::UArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ case ConversionSpecifier::nArg:
+ case ConversionSpecifier::cArg:
+ case ConversionSpecifier::sArg:
+ case ConversionSpecifier::ScanListArg:
+ case ConversionSpecifier::ZArg:
+ return true;
+ case ConversionSpecifier::FreeBSDrArg:
+ case ConversionSpecifier::FreeBSDyArg:
+ return Target.getTriple().isOSFreeBSD();
+ default:
+ return false;
+ }
+
+ case LengthModifier::AsLongDouble:
+ switch (CS.getKind()) {
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ return true;
+ // GNU libc extension.
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ return !Target.getTriple().isOSDarwin() &&
+ !Target.getTriple().isOSWindows();
+ default:
+ return false;
+ }
+
+ case LengthModifier::AsAllocate:
+ switch (CS.getKind()) {
+ case ConversionSpecifier::sArg:
+ case ConversionSpecifier::SArg:
+ case ConversionSpecifier::ScanListArg:
+ return true;
+ default:
+ return false;
+ }
+
+ case LengthModifier::AsMAllocate:
+ switch (CS.getKind()) {
+ case ConversionSpecifier::cArg:
+ case ConversionSpecifier::CArg:
+ case ConversionSpecifier::sArg:
+ case ConversionSpecifier::SArg:
+ case ConversionSpecifier::ScanListArg:
+ return true;
+ default:
+ return false;
+ }
+ case LengthModifier::AsInt32:
+ case LengthModifier::AsInt3264:
+ case LengthModifier::AsInt64:
+ switch (CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ return Target.getTriple().isOSMSVCRT();
+ default:
+ return false;
+ }
+ case LengthModifier::AsWide:
+ switch (CS.getKind()) {
+ case ConversionSpecifier::cArg:
+ case ConversionSpecifier::CArg:
+ case ConversionSpecifier::sArg:
+ case ConversionSpecifier::SArg:
+ case ConversionSpecifier::ZArg:
+ return Target.getTriple().isOSMSVCRT();
+ default:
+ return false;
+ }
+ }
+ llvm_unreachable("Invalid LengthModifier Kind!");
+}
+
+bool FormatSpecifier::hasStandardLengthModifier() const {
+ switch (LM.getKind()) {
+ case LengthModifier::None:
+ case LengthModifier::AsChar:
+ case LengthModifier::AsShort:
+ case LengthModifier::AsLong:
+ case LengthModifier::AsLongLong:
+ case LengthModifier::AsIntMax:
+ case LengthModifier::AsSizeT:
+ case LengthModifier::AsPtrDiff:
+ case LengthModifier::AsLongDouble:
+ return true;
+ case LengthModifier::AsAllocate:
+ case LengthModifier::AsMAllocate:
+ case LengthModifier::AsQuad:
+ case LengthModifier::AsInt32:
+ case LengthModifier::AsInt3264:
+ case LengthModifier::AsInt64:
+ case LengthModifier::AsWide:
+ return false;
+ }
+ llvm_unreachable("Invalid LengthModifier Kind!");
+}
+
+bool FormatSpecifier::hasStandardConversionSpecifier(
+ const LangOptions &LangOpt) const {
+ switch (CS.getKind()) {
+ case ConversionSpecifier::cArg:
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ case ConversionSpecifier::sArg:
+ case ConversionSpecifier::pArg:
+ case ConversionSpecifier::nArg:
+ case ConversionSpecifier::ObjCObjArg:
+ case ConversionSpecifier::ScanListArg:
+ case ConversionSpecifier::PercentArg:
+ return true;
+ case ConversionSpecifier::CArg:
+ case ConversionSpecifier::SArg:
+ return LangOpt.ObjC1 || LangOpt.ObjC2;
+ case ConversionSpecifier::InvalidSpecifier:
+ case ConversionSpecifier::FreeBSDbArg:
+ case ConversionSpecifier::FreeBSDDArg:
+ case ConversionSpecifier::FreeBSDrArg:
+ case ConversionSpecifier::FreeBSDyArg:
+ case ConversionSpecifier::PrintErrno:
+ case ConversionSpecifier::DArg:
+ case ConversionSpecifier::OArg:
+ case ConversionSpecifier::UArg:
+ case ConversionSpecifier::ZArg:
+ return false;
+ }
+ llvm_unreachable("Invalid ConversionSpecifier Kind!");
+}
+
+bool FormatSpecifier::hasStandardLengthConversionCombination() const {
+ if (LM.getKind() == LengthModifier::AsLongDouble) {
+ switch(CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ return false;
+ default:
+ return true;
+ }
+ }
+ return true;
+}
+
+Optional<LengthModifier> FormatSpecifier::getCorrectedLengthModifier() const {
+ if (CS.isAnyIntArg() || CS.getKind() == ConversionSpecifier::nArg) {
+ if (LM.getKind() == LengthModifier::AsLongDouble ||
+ LM.getKind() == LengthModifier::AsQuad) {
+ LengthModifier FixedLM(LM);
+ FixedLM.setKind(LengthModifier::AsLongLong);
+ return FixedLM;
+ }
+ }
+
+ return None;
+}
+
+bool FormatSpecifier::namedTypeToLengthModifier(QualType QT,
+ LengthModifier &LM) {
+ assert(isa<TypedefType>(QT) && "Expected a TypedefType");
+ const TypedefNameDecl *Typedef = cast<TypedefType>(QT)->getDecl();
+
+ for (;;) {
+ const IdentifierInfo *Identifier = Typedef->getIdentifier();
+ if (Identifier->getName() == "size_t") {
+ LM.setKind(LengthModifier::AsSizeT);
+ return true;
+ } else if (Identifier->getName() == "ssize_t") {
+ // Not C99, but common in Unix.
+ LM.setKind(LengthModifier::AsSizeT);
+ return true;
+ } else if (Identifier->getName() == "intmax_t") {
+ LM.setKind(LengthModifier::AsIntMax);
+ return true;
+ } else if (Identifier->getName() == "uintmax_t") {
+ LM.setKind(LengthModifier::AsIntMax);
+ return true;
+ } else if (Identifier->getName() == "ptrdiff_t") {
+ LM.setKind(LengthModifier::AsPtrDiff);
+ return true;
+ }
+
+ QualType T = Typedef->getUnderlyingType();
+ if (!isa<TypedefType>(T))
+ break;
+
+ Typedef = cast<TypedefType>(T)->getDecl();
+ }
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/FormatStringParsing.h b/contrib/llvm/tools/clang/lib/Analysis/FormatStringParsing.h
new file mode 100644
index 0000000..e165296
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/FormatStringParsing.h
@@ -0,0 +1,74 @@
+#ifndef LLVM_CLANG_LIB_ANALYSIS_FORMATSTRINGPARSING_H
+#define LLVM_CLANG_LIB_ANALYSIS_FORMATSTRINGPARSING_H
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Type.h"
+#include "clang/Analysis/Analyses/FormatString.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace clang {
+
+class LangOptions;
+
+template <typename T>
+class UpdateOnReturn {
+ T &ValueToUpdate;
+ const T &ValueToCopy;
+public:
+ UpdateOnReturn(T &valueToUpdate, const T &valueToCopy)
+ : ValueToUpdate(valueToUpdate), ValueToCopy(valueToCopy) {}
+
+ ~UpdateOnReturn() {
+ ValueToUpdate = ValueToCopy;
+ }
+};
+
+namespace analyze_format_string {
+
+OptionalAmount ParseAmount(const char *&Beg, const char *E);
+OptionalAmount ParseNonPositionAmount(const char *&Beg, const char *E,
+ unsigned &argIndex);
+
+OptionalAmount ParsePositionAmount(FormatStringHandler &H,
+ const char *Start, const char *&Beg,
+ const char *E, PositionContext p);
+
+bool ParseFieldWidth(FormatStringHandler &H,
+ FormatSpecifier &CS,
+ const char *Start, const char *&Beg, const char *E,
+ unsigned *argIndex);
+
+bool ParseArgPosition(FormatStringHandler &H,
+ FormatSpecifier &CS, const char *Start,
+ const char *&Beg, const char *E);
+
+/// Returns true if a LengthModifier was parsed and installed in the
+/// FormatSpecifier& argument, and false otherwise.
+bool ParseLengthModifier(FormatSpecifier &FS, const char *&Beg, const char *E,
+ const LangOptions &LO, bool IsScanf = false);
+
+template <typename T> class SpecifierResult {
+ T FS;
+ const char *Start;
+ bool Stop;
+public:
+ SpecifierResult(bool stop = false)
+ : Start(nullptr), Stop(stop) {}
+ SpecifierResult(const char *start,
+ const T &fs)
+ : FS(fs), Start(start), Stop(false) {}
+
+ const char *getStart() const { return Start; }
+ bool shouldStop() const { return Stop; }
+ bool hasValue() const { return Start != nullptr; }
+ const T &getValue() const {
+ assert(hasValue());
+ return FS;
+ }
+ const T &getValue() { return FS; }
+};
+
+} // end analyze_format_string namespace
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp b/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp
new file mode 100644
index 0000000..5e0a9a0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp
@@ -0,0 +1,611 @@
+//=- LiveVariables.cpp - Live Variable Analysis for Source CFGs ----------*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements Live Variables analysis for source-level CFGs.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Analysis/Analyses/PostOrderCFGView.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Analysis/CFG.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <vector>
+
+using namespace clang;
+
+namespace {
+
+class DataflowWorklist {
+ SmallVector<const CFGBlock *, 20> worklist;
+ llvm::BitVector enqueuedBlocks;
+ PostOrderCFGView *POV;
+public:
+ DataflowWorklist(const CFG &cfg, AnalysisDeclContext &Ctx)
+ : enqueuedBlocks(cfg.getNumBlockIDs()),
+ POV(Ctx.getAnalysis<PostOrderCFGView>()) {}
+
+ void enqueueBlock(const CFGBlock *block);
+ void enqueuePredecessors(const CFGBlock *block);
+
+ const CFGBlock *dequeue();
+
+ void sortWorklist();
+};
+
+}
+
+void DataflowWorklist::enqueueBlock(const clang::CFGBlock *block) {
+ if (block && !enqueuedBlocks[block->getBlockID()]) {
+ enqueuedBlocks[block->getBlockID()] = true;
+ worklist.push_back(block);
+ }
+}
+
+void DataflowWorklist::enqueuePredecessors(const clang::CFGBlock *block) {
+ const unsigned OldWorklistSize = worklist.size();
+ for (CFGBlock::const_pred_iterator I = block->pred_begin(),
+ E = block->pred_end(); I != E; ++I) {
+ enqueueBlock(*I);
+ }
+
+ if (OldWorklistSize == 0 || OldWorklistSize == worklist.size())
+ return;
+
+ sortWorklist();
+}
+
+void DataflowWorklist::sortWorklist() {
+ std::sort(worklist.begin(), worklist.end(), POV->getComparator());
+}
+
+const CFGBlock *DataflowWorklist::dequeue() {
+ if (worklist.empty())
+ return nullptr;
+ const CFGBlock *b = worklist.pop_back_val();
+ enqueuedBlocks[b->getBlockID()] = false;
+ return b;
+}
+
+namespace {
+class LiveVariablesImpl {
+public:
+ AnalysisDeclContext &analysisContext;
+ llvm::ImmutableSet<const Stmt *>::Factory SSetFact;
+ llvm::ImmutableSet<const VarDecl *>::Factory DSetFact;
+ llvm::DenseMap<const CFGBlock *, LiveVariables::LivenessValues> blocksEndToLiveness;
+ llvm::DenseMap<const CFGBlock *, LiveVariables::LivenessValues> blocksBeginToLiveness;
+ llvm::DenseMap<const Stmt *, LiveVariables::LivenessValues> stmtsToLiveness;
+ llvm::DenseMap<const DeclRefExpr *, unsigned> inAssignment;
+ const bool killAtAssign;
+
+ LiveVariables::LivenessValues
+ merge(LiveVariables::LivenessValues valsA,
+ LiveVariables::LivenessValues valsB);
+
+ LiveVariables::LivenessValues
+ runOnBlock(const CFGBlock *block, LiveVariables::LivenessValues val,
+ LiveVariables::Observer *obs = nullptr);
+
+ void dumpBlockLiveness(const SourceManager& M);
+
+ LiveVariablesImpl(AnalysisDeclContext &ac, bool KillAtAssign)
+ : analysisContext(ac),
+ SSetFact(false), // Do not canonicalize ImmutableSets by default.
+ DSetFact(false), // This is a *major* performance win.
+ killAtAssign(KillAtAssign) {}
+};
+}
+
+static LiveVariablesImpl &getImpl(void *x) {
+ return *((LiveVariablesImpl *) x);
+}
+
+//===----------------------------------------------------------------------===//
+// Operations and queries on LivenessValues.
+//===----------------------------------------------------------------------===//
+
+bool LiveVariables::LivenessValues::isLive(const Stmt *S) const {
+ return liveStmts.contains(S);
+}
+
+bool LiveVariables::LivenessValues::isLive(const VarDecl *D) const {
+ return liveDecls.contains(D);
+}
+
+namespace {
+ template <typename SET>
+ SET mergeSets(SET A, SET B) {
+ if (A.isEmpty())
+ return B;
+
+ for (typename SET::iterator it = B.begin(), ei = B.end(); it != ei; ++it) {
+ A = A.add(*it);
+ }
+ return A;
+ }
+}
+
+void LiveVariables::Observer::anchor() { }
+
+LiveVariables::LivenessValues
+LiveVariablesImpl::merge(LiveVariables::LivenessValues valsA,
+ LiveVariables::LivenessValues valsB) {
+
+ llvm::ImmutableSetRef<const Stmt *>
+ SSetRefA(valsA.liveStmts.getRootWithoutRetain(), SSetFact.getTreeFactory()),
+ SSetRefB(valsB.liveStmts.getRootWithoutRetain(), SSetFact.getTreeFactory());
+
+
+ llvm::ImmutableSetRef<const VarDecl *>
+ DSetRefA(valsA.liveDecls.getRootWithoutRetain(), DSetFact.getTreeFactory()),
+ DSetRefB(valsB.liveDecls.getRootWithoutRetain(), DSetFact.getTreeFactory());
+
+
+ SSetRefA = mergeSets(SSetRefA, SSetRefB);
+ DSetRefA = mergeSets(DSetRefA, DSetRefB);
+
+ // asImmutableSet() canonicalizes the tree, allowing us to do an easy
+ // comparison afterwards.
+ return LiveVariables::LivenessValues(SSetRefA.asImmutableSet(),
+ DSetRefA.asImmutableSet());
+}
+
+bool LiveVariables::LivenessValues::equals(const LivenessValues &V) const {
+ return liveStmts == V.liveStmts && liveDecls == V.liveDecls;
+}
+
+//===----------------------------------------------------------------------===//
+// Query methods.
+//===----------------------------------------------------------------------===//
+
+static bool isAlwaysAlive(const VarDecl *D) {
+ return D->hasGlobalStorage();
+}
+
+bool LiveVariables::isLive(const CFGBlock *B, const VarDecl *D) {
+ return isAlwaysAlive(D) || getImpl(impl).blocksEndToLiveness[B].isLive(D);
+}
+
+bool LiveVariables::isLive(const Stmt *S, const VarDecl *D) {
+ return isAlwaysAlive(D) || getImpl(impl).stmtsToLiveness[S].isLive(D);
+}
+
+bool LiveVariables::isLive(const Stmt *Loc, const Stmt *S) {
+ return getImpl(impl).stmtsToLiveness[Loc].isLive(S);
+}
+
+//===----------------------------------------------------------------------===//
+// Dataflow computation.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class TransferFunctions : public StmtVisitor<TransferFunctions> {
+ LiveVariablesImpl &LV;
+ LiveVariables::LivenessValues &val;
+ LiveVariables::Observer *observer;
+ const CFGBlock *currentBlock;
+public:
+ TransferFunctions(LiveVariablesImpl &im,
+ LiveVariables::LivenessValues &Val,
+ LiveVariables::Observer *Observer,
+ const CFGBlock *CurrentBlock)
+ : LV(im), val(Val), observer(Observer), currentBlock(CurrentBlock) {}
+
+ void VisitBinaryOperator(BinaryOperator *BO);
+ void VisitBlockExpr(BlockExpr *BE);
+ void VisitDeclRefExpr(DeclRefExpr *DR);
+ void VisitDeclStmt(DeclStmt *DS);
+ void VisitObjCForCollectionStmt(ObjCForCollectionStmt *OS);
+ void VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *UE);
+ void VisitUnaryOperator(UnaryOperator *UO);
+ void Visit(Stmt *S);
+};
+}
+
+static const VariableArrayType *FindVA(QualType Ty) {
+ const Type *ty = Ty.getTypePtr();
+ while (const ArrayType *VT = dyn_cast<ArrayType>(ty)) {
+ if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(VT))
+ if (VAT->getSizeExpr())
+ return VAT;
+
+ ty = VT->getElementType().getTypePtr();
+ }
+
+ return nullptr;
+}
+
+static const Stmt *LookThroughStmt(const Stmt *S) {
+ while (S) {
+ if (const Expr *Ex = dyn_cast<Expr>(S))
+ S = Ex->IgnoreParens();
+ if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(S)) {
+ S = EWC->getSubExpr();
+ continue;
+ }
+ if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(S)) {
+ S = OVE->getSourceExpr();
+ continue;
+ }
+ break;
+ }
+ return S;
+}
+
+static void AddLiveStmt(llvm::ImmutableSet<const Stmt *> &Set,
+ llvm::ImmutableSet<const Stmt *>::Factory &F,
+ const Stmt *S) {
+ Set = F.add(Set, LookThroughStmt(S));
+}
+
+void TransferFunctions::Visit(Stmt *S) {
+ if (observer)
+ observer->observeStmt(S, currentBlock, val);
+
+ StmtVisitor<TransferFunctions>::Visit(S);
+
+ if (isa<Expr>(S)) {
+ val.liveStmts = LV.SSetFact.remove(val.liveStmts, S);
+ }
+
+ // Mark all children expressions live.
+
+ switch (S->getStmtClass()) {
+ default:
+ break;
+ case Stmt::StmtExprClass: {
+ // For statement expressions, look through the compound statement.
+ S = cast<StmtExpr>(S)->getSubStmt();
+ break;
+ }
+ case Stmt::CXXMemberCallExprClass: {
+ // Include the implicit "this" pointer as being live.
+ CXXMemberCallExpr *CE = cast<CXXMemberCallExpr>(S);
+ if (Expr *ImplicitObj = CE->getImplicitObjectArgument()) {
+ AddLiveStmt(val.liveStmts, LV.SSetFact, ImplicitObj);
+ }
+ break;
+ }
+ case Stmt::ObjCMessageExprClass: {
+ // In calls to super, include the implicit "self" pointer as being live.
+ ObjCMessageExpr *CE = cast<ObjCMessageExpr>(S);
+ if (CE->getReceiverKind() == ObjCMessageExpr::SuperInstance)
+ val.liveDecls = LV.DSetFact.add(val.liveDecls,
+ LV.analysisContext.getSelfDecl());
+ break;
+ }
+ case Stmt::DeclStmtClass: {
+ const DeclStmt *DS = cast<DeclStmt>(S);
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl())) {
+ for (const VariableArrayType* VA = FindVA(VD->getType());
+ VA != nullptr; VA = FindVA(VA->getElementType())) {
+ AddLiveStmt(val.liveStmts, LV.SSetFact, VA->getSizeExpr());
+ }
+ }
+ break;
+ }
+ case Stmt::PseudoObjectExprClass: {
+ // A pseudo-object operation only directly consumes its result
+ // expression.
+ Expr *child = cast<PseudoObjectExpr>(S)->getResultExpr();
+ if (!child) return;
+ if (OpaqueValueExpr *OV = dyn_cast<OpaqueValueExpr>(child))
+ child = OV->getSourceExpr();
+ child = child->IgnoreParens();
+ val.liveStmts = LV.SSetFact.add(val.liveStmts, child);
+ return;
+ }
+
+ // FIXME: These cases eventually shouldn't be needed.
+ case Stmt::ExprWithCleanupsClass: {
+ S = cast<ExprWithCleanups>(S)->getSubExpr();
+ break;
+ }
+ case Stmt::CXXBindTemporaryExprClass: {
+ S = cast<CXXBindTemporaryExpr>(S)->getSubExpr();
+ break;
+ }
+ case Stmt::UnaryExprOrTypeTraitExprClass: {
+ // No need to unconditionally visit subexpressions.
+ return;
+ }
+ }
+
+ for (Stmt *Child : S->children()) {
+ if (Child)
+ AddLiveStmt(val.liveStmts, LV.SSetFact, Child);
+ }
+}
+
+void TransferFunctions::VisitBinaryOperator(BinaryOperator *B) {
+ if (B->isAssignmentOp()) {
+ if (!LV.killAtAssign)
+ return;
+
+ // Assigning to a variable?
+ Expr *LHS = B->getLHS()->IgnoreParens();
+
+ if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(LHS))
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ // Assignments to references don't kill the ref's address
+ if (VD->getType()->isReferenceType())
+ return;
+
+ if (!isAlwaysAlive(VD)) {
+ // The variable is now dead.
+ val.liveDecls = LV.DSetFact.remove(val.liveDecls, VD);
+ }
+
+ if (observer)
+ observer->observerKill(DR);
+ }
+ }
+}
+
+void TransferFunctions::VisitBlockExpr(BlockExpr *BE) {
+ for (const VarDecl *VD :
+ LV.analysisContext.getReferencedBlockVars(BE->getBlockDecl())) {
+ if (isAlwaysAlive(VD))
+ continue;
+ val.liveDecls = LV.DSetFact.add(val.liveDecls, VD);
+ }
+}
+
+void TransferFunctions::VisitDeclRefExpr(DeclRefExpr *DR) {
+ if (const VarDecl *D = dyn_cast<VarDecl>(DR->getDecl()))
+ if (!isAlwaysAlive(D) && LV.inAssignment.find(DR) == LV.inAssignment.end())
+ val.liveDecls = LV.DSetFact.add(val.liveDecls, D);
+}
+
+void TransferFunctions::VisitDeclStmt(DeclStmt *DS) {
+ for (const auto *DI : DS->decls())
+ if (const auto *VD = dyn_cast<VarDecl>(DI)) {
+ if (!isAlwaysAlive(VD))
+ val.liveDecls = LV.DSetFact.remove(val.liveDecls, VD);
+ }
+}
+
+void TransferFunctions::VisitObjCForCollectionStmt(ObjCForCollectionStmt *OS) {
+ // Kill the iteration variable.
+ DeclRefExpr *DR = nullptr;
+ const VarDecl *VD = nullptr;
+
+ Stmt *element = OS->getElement();
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(element)) {
+ VD = cast<VarDecl>(DS->getSingleDecl());
+ }
+ else if ((DR = dyn_cast<DeclRefExpr>(cast<Expr>(element)->IgnoreParens()))) {
+ VD = cast<VarDecl>(DR->getDecl());
+ }
+
+ if (VD) {
+ val.liveDecls = LV.DSetFact.remove(val.liveDecls, VD);
+ if (observer && DR)
+ observer->observerKill(DR);
+ }
+}
+
+void TransferFunctions::
+VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *UE)
+{
+ // While sizeof(var) doesn't technically extend the liveness of 'var', it
+ // does extent the liveness of metadata if 'var' is a VariableArrayType.
+ // We handle that special case here.
+ if (UE->getKind() != UETT_SizeOf || UE->isArgumentType())
+ return;
+
+ const Expr *subEx = UE->getArgumentExpr();
+ if (subEx->getType()->isVariableArrayType()) {
+ assert(subEx->isLValue());
+ val.liveStmts = LV.SSetFact.add(val.liveStmts, subEx->IgnoreParens());
+ }
+}
+
+void TransferFunctions::VisitUnaryOperator(UnaryOperator *UO) {
+ // Treat ++/-- as a kill.
+ // Note we don't actually have to do anything if we don't have an observer,
+ // since a ++/-- acts as both a kill and a "use".
+ if (!observer)
+ return;
+
+ switch (UO->getOpcode()) {
+ default:
+ return;
+ case UO_PostInc:
+ case UO_PostDec:
+ case UO_PreInc:
+ case UO_PreDec:
+ break;
+ }
+
+ if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(UO->getSubExpr()->IgnoreParens()))
+ if (isa<VarDecl>(DR->getDecl())) {
+ // Treat ++/-- as a kill.
+ observer->observerKill(DR);
+ }
+}
+
+LiveVariables::LivenessValues
+LiveVariablesImpl::runOnBlock(const CFGBlock *block,
+ LiveVariables::LivenessValues val,
+ LiveVariables::Observer *obs) {
+
+ TransferFunctions TF(*this, val, obs, block);
+
+ // Visit the terminator (if any).
+ if (const Stmt *term = block->getTerminator())
+ TF.Visit(const_cast<Stmt*>(term));
+
+ // Apply the transfer function for all Stmts in the block.
+ for (CFGBlock::const_reverse_iterator it = block->rbegin(),
+ ei = block->rend(); it != ei; ++it) {
+ const CFGElement &elem = *it;
+
+ if (Optional<CFGAutomaticObjDtor> Dtor =
+ elem.getAs<CFGAutomaticObjDtor>()) {
+ val.liveDecls = DSetFact.add(val.liveDecls, Dtor->getVarDecl());
+ continue;
+ }
+
+ if (!elem.getAs<CFGStmt>())
+ continue;
+
+ const Stmt *S = elem.castAs<CFGStmt>().getStmt();
+ TF.Visit(const_cast<Stmt*>(S));
+ stmtsToLiveness[S] = val;
+ }
+ return val;
+}
+
+void LiveVariables::runOnAllBlocks(LiveVariables::Observer &obs) {
+ const CFG *cfg = getImpl(impl).analysisContext.getCFG();
+ for (CFG::const_iterator it = cfg->begin(), ei = cfg->end(); it != ei; ++it)
+ getImpl(impl).runOnBlock(*it, getImpl(impl).blocksEndToLiveness[*it], &obs);
+}
+
+LiveVariables::LiveVariables(void *im) : impl(im) {}
+
+LiveVariables::~LiveVariables() {
+ delete (LiveVariablesImpl*) impl;
+}
+
+LiveVariables *
+LiveVariables::computeLiveness(AnalysisDeclContext &AC,
+ bool killAtAssign) {
+
+ // No CFG? Bail out.
+ CFG *cfg = AC.getCFG();
+ if (!cfg)
+ return nullptr;
+
+ // The analysis currently has scalability issues for very large CFGs.
+ // Bail out if it looks too large.
+ if (cfg->getNumBlockIDs() > 300000)
+ return nullptr;
+
+ LiveVariablesImpl *LV = new LiveVariablesImpl(AC, killAtAssign);
+
+ // Construct the dataflow worklist. Enqueue the exit block as the
+ // start of the analysis.
+ DataflowWorklist worklist(*cfg, AC);
+ llvm::BitVector everAnalyzedBlock(cfg->getNumBlockIDs());
+
+ // FIXME: we should enqueue using post order.
+ for (CFG::const_iterator it = cfg->begin(), ei = cfg->end(); it != ei; ++it) {
+ const CFGBlock *block = *it;
+ worklist.enqueueBlock(block);
+
+ // FIXME: Scan for DeclRefExprs using in the LHS of an assignment.
+ // We need to do this because we lack context in the reverse analysis
+ // to determine if a DeclRefExpr appears in such a context, and thus
+ // doesn't constitute a "use".
+ if (killAtAssign)
+ for (CFGBlock::const_iterator bi = block->begin(), be = block->end();
+ bi != be; ++bi) {
+ if (Optional<CFGStmt> cs = bi->getAs<CFGStmt>()) {
+ if (const BinaryOperator *BO =
+ dyn_cast<BinaryOperator>(cs->getStmt())) {
+ if (BO->getOpcode() == BO_Assign) {
+ if (const DeclRefExpr *DR =
+ dyn_cast<DeclRefExpr>(BO->getLHS()->IgnoreParens())) {
+ LV->inAssignment[DR] = 1;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ worklist.sortWorklist();
+
+ while (const CFGBlock *block = worklist.dequeue()) {
+ // Determine if the block's end value has changed. If not, we
+ // have nothing left to do for this block.
+ LivenessValues &prevVal = LV->blocksEndToLiveness[block];
+
+ // Merge the values of all successor blocks.
+ LivenessValues val;
+ for (CFGBlock::const_succ_iterator it = block->succ_begin(),
+ ei = block->succ_end(); it != ei; ++it) {
+ if (const CFGBlock *succ = *it) {
+ val = LV->merge(val, LV->blocksBeginToLiveness[succ]);
+ }
+ }
+
+ if (!everAnalyzedBlock[block->getBlockID()])
+ everAnalyzedBlock[block->getBlockID()] = true;
+ else if (prevVal.equals(val))
+ continue;
+
+ prevVal = val;
+
+ // Update the dataflow value for the start of this block.
+ LV->blocksBeginToLiveness[block] = LV->runOnBlock(block, val);
+
+ // Enqueue the value to the predecessors.
+ worklist.enqueuePredecessors(block);
+ }
+
+ return new LiveVariables(LV);
+}
+
+void LiveVariables::dumpBlockLiveness(const SourceManager &M) {
+ getImpl(impl).dumpBlockLiveness(M);
+}
+
+void LiveVariablesImpl::dumpBlockLiveness(const SourceManager &M) {
+ std::vector<const CFGBlock *> vec;
+ for (llvm::DenseMap<const CFGBlock *, LiveVariables::LivenessValues>::iterator
+ it = blocksEndToLiveness.begin(), ei = blocksEndToLiveness.end();
+ it != ei; ++it) {
+ vec.push_back(it->first);
+ }
+ std::sort(vec.begin(), vec.end(), [](const CFGBlock *A, const CFGBlock *B) {
+ return A->getBlockID() < B->getBlockID();
+ });
+
+ std::vector<const VarDecl*> declVec;
+
+ for (std::vector<const CFGBlock *>::iterator
+ it = vec.begin(), ei = vec.end(); it != ei; ++it) {
+ llvm::errs() << "\n[ B" << (*it)->getBlockID()
+ << " (live variables at block exit) ]\n";
+
+ LiveVariables::LivenessValues vals = blocksEndToLiveness[*it];
+ declVec.clear();
+
+ for (llvm::ImmutableSet<const VarDecl *>::iterator si =
+ vals.liveDecls.begin(),
+ se = vals.liveDecls.end(); si != se; ++si) {
+ declVec.push_back(*si);
+ }
+
+ std::sort(declVec.begin(), declVec.end(), [](const Decl *A, const Decl *B) {
+ return A->getLocStart() < B->getLocStart();
+ });
+
+ for (std::vector<const VarDecl*>::iterator di = declVec.begin(),
+ de = declVec.end(); di != de; ++di) {
+ llvm::errs() << " " << (*di)->getDeclName().getAsString()
+ << " <";
+ (*di)->getLocation().dump(M);
+ llvm::errs() << ">\n";
+ }
+ }
+ llvm::errs() << "\n";
+}
+
+const void *LiveVariables::getTag() { static int x; return &x; }
+const void *RelaxedLiveVariables::getTag() { static int x; return &x; }
diff --git a/contrib/llvm/tools/clang/lib/Analysis/ObjCNoReturn.cpp b/contrib/llvm/tools/clang/lib/Analysis/ObjCNoReturn.cpp
new file mode 100644
index 0000000..52d844b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/ObjCNoReturn.cpp
@@ -0,0 +1,67 @@
+//= ObjCNoReturn.cpp - Handling of Cocoa APIs known not to return --*- C++ -*---
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements special handling of recognizing ObjC API hooks that
+// do not return but aren't marked as such in API headers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Analysis/DomainSpecific/ObjCNoReturn.h"
+
+using namespace clang;
+
+static bool isSubclass(const ObjCInterfaceDecl *Class, IdentifierInfo *II) {
+ if (!Class)
+ return false;
+ if (Class->getIdentifier() == II)
+ return true;
+ return isSubclass(Class->getSuperClass(), II);
+}
+
+ObjCNoReturn::ObjCNoReturn(ASTContext &C)
+ : RaiseSel(GetNullarySelector("raise", C)),
+ NSExceptionII(&C.Idents.get("NSException"))
+{
+ // Generate selectors.
+ SmallVector<IdentifierInfo*, 3> II;
+
+ // raise:format:
+ II.push_back(&C.Idents.get("raise"));
+ II.push_back(&C.Idents.get("format"));
+ NSExceptionInstanceRaiseSelectors[0] =
+ C.Selectors.getSelector(II.size(), &II[0]);
+
+ // raise:format:arguments:
+ II.push_back(&C.Idents.get("arguments"));
+ NSExceptionInstanceRaiseSelectors[1] =
+ C.Selectors.getSelector(II.size(), &II[0]);
+}
+
+
+bool ObjCNoReturn::isImplicitNoReturn(const ObjCMessageExpr *ME) {
+ Selector S = ME->getSelector();
+
+ if (ME->isInstanceMessage()) {
+ // Check for the "raise" message.
+ return S == RaiseSel;
+ }
+
+ if (const ObjCInterfaceDecl *ID = ME->getReceiverInterface()) {
+ if (isSubclass(ID, NSExceptionII)) {
+ for (unsigned i = 0; i < NUM_RAISE_SELECTORS; ++i) {
+ if (S == NSExceptionInstanceRaiseSelectors[i])
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/PostOrderCFGView.cpp b/contrib/llvm/tools/clang/lib/Analysis/PostOrderCFGView.cpp
new file mode 100644
index 0000000..5a3c818
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/PostOrderCFGView.cpp
@@ -0,0 +1,49 @@
+//===- PostOrderCFGView.cpp - Post order view of CFG blocks -------*- C++ --*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements post order view of the blocks in a CFG.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/Analyses/PostOrderCFGView.h"
+
+using namespace clang;
+
+void PostOrderCFGView::anchor() { }
+
+PostOrderCFGView::PostOrderCFGView(const CFG *cfg) {
+ Blocks.reserve(cfg->getNumBlockIDs());
+ CFGBlockSet BSet(cfg);
+
+ for (po_iterator I = po_iterator::begin(cfg, BSet),
+ E = po_iterator::end(cfg, BSet); I != E; ++I) {
+ BlockOrder[*I] = Blocks.size() + 1;
+ Blocks.push_back(*I);
+ }
+}
+
+PostOrderCFGView *PostOrderCFGView::create(AnalysisDeclContext &ctx) {
+ const CFG *cfg = ctx.getCFG();
+ if (!cfg)
+ return nullptr;
+ return new PostOrderCFGView(cfg);
+}
+
+const void *PostOrderCFGView::getTag() { static int x; return &x; }
+
+bool PostOrderCFGView::BlockOrderCompare::operator()(const CFGBlock *b1,
+ const CFGBlock *b2) const {
+ PostOrderCFGView::BlockOrderTy::const_iterator b1It = POV.BlockOrder.find(b1);
+ PostOrderCFGView::BlockOrderTy::const_iterator b2It = POV.BlockOrder.find(b2);
+
+ unsigned b1V = (b1It == POV.BlockOrder.end()) ? 0 : b1It->second;
+ unsigned b2V = (b2It == POV.BlockOrder.end()) ? 0 : b2It->second;
+ return b1V > b2V;
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp b/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp
new file mode 100644
index 0000000..f0976bc
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp
@@ -0,0 +1,934 @@
+//== PrintfFormatString.cpp - Analysis of printf format strings --*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Handling of format string in printf and friends. The structure of format
+// strings for fprintf() are described in C99 7.19.6.1.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/Analyses/FormatString.h"
+#include "FormatStringParsing.h"
+#include "clang/Basic/TargetInfo.h"
+
+using clang::analyze_format_string::ArgType;
+using clang::analyze_format_string::FormatStringHandler;
+using clang::analyze_format_string::LengthModifier;
+using clang::analyze_format_string::OptionalAmount;
+using clang::analyze_format_string::ConversionSpecifier;
+using clang::analyze_printf::PrintfSpecifier;
+
+using namespace clang;
+
+typedef clang::analyze_format_string::SpecifierResult<PrintfSpecifier>
+ PrintfSpecifierResult;
+
+//===----------------------------------------------------------------------===//
+// Methods for parsing format strings.
+//===----------------------------------------------------------------------===//
+
+using analyze_format_string::ParseNonPositionAmount;
+
+static bool ParsePrecision(FormatStringHandler &H, PrintfSpecifier &FS,
+ const char *Start, const char *&Beg, const char *E,
+ unsigned *argIndex) {
+ if (argIndex) {
+ FS.setPrecision(ParseNonPositionAmount(Beg, E, *argIndex));
+ } else {
+ const OptionalAmount Amt = ParsePositionAmount(H, Start, Beg, E,
+ analyze_format_string::PrecisionPos);
+ if (Amt.isInvalid())
+ return true;
+ FS.setPrecision(Amt);
+ }
+ return false;
+}
+
+static bool ParseObjCFlags(FormatStringHandler &H, PrintfSpecifier &FS,
+ const char *FlagBeg, const char *E, bool Warn) {
+ StringRef Flag(FlagBeg, E - FlagBeg);
+ // Currently there is only one flag.
+ if (Flag == "tt") {
+ FS.setHasObjCTechnicalTerm(FlagBeg);
+ return false;
+ }
+ // Handle either the case of no flag or an invalid flag.
+ if (Warn) {
+ if (Flag == "")
+ H.HandleEmptyObjCModifierFlag(FlagBeg, E - FlagBeg);
+ else
+ H.HandleInvalidObjCModifierFlag(FlagBeg, E - FlagBeg);
+ }
+ return true;
+}
+
+static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
+ const char *&Beg,
+ const char *E,
+ unsigned &argIndex,
+ const LangOptions &LO,
+ const TargetInfo &Target,
+ bool Warn,
+ bool isFreeBSDKPrintf) {
+
+ using namespace clang::analyze_format_string;
+ using namespace clang::analyze_printf;
+
+ const char *I = Beg;
+ const char *Start = nullptr;
+ UpdateOnReturn <const char*> UpdateBeg(Beg, I);
+
+ // Look for a '%' character that indicates the start of a format specifier.
+ for ( ; I != E ; ++I) {
+ char c = *I;
+ if (c == '\0') {
+ // Detect spurious null characters, which are likely errors.
+ H.HandleNullChar(I);
+ return true;
+ }
+ if (c == '%') {
+ Start = I++; // Record the start of the format specifier.
+ break;
+ }
+ }
+
+ // No format specifier found?
+ if (!Start)
+ return false;
+
+ if (I == E) {
+ // No more characters left?
+ if (Warn)
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+
+ PrintfSpecifier FS;
+ if (ParseArgPosition(H, FS, Start, I, E))
+ return true;
+
+ if (I == E) {
+ // No more characters left?
+ if (Warn)
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+
+ // Look for flags (if any).
+ bool hasMore = true;
+ for ( ; I != E; ++I) {
+ switch (*I) {
+ default: hasMore = false; break;
+ case '\'':
+ // FIXME: POSIX specific. Always accept?
+ FS.setHasThousandsGrouping(I);
+ break;
+ case '-': FS.setIsLeftJustified(I); break;
+ case '+': FS.setHasPlusPrefix(I); break;
+ case ' ': FS.setHasSpacePrefix(I); break;
+ case '#': FS.setHasAlternativeForm(I); break;
+ case '0': FS.setHasLeadingZeros(I); break;
+ }
+ if (!hasMore)
+ break;
+ }
+
+ if (I == E) {
+ // No more characters left?
+ if (Warn)
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+
+ // Look for the field width (if any).
+ if (ParseFieldWidth(H, FS, Start, I, E,
+ FS.usesPositionalArg() ? nullptr : &argIndex))
+ return true;
+
+ if (I == E) {
+ // No more characters left?
+ if (Warn)
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+
+ // Look for the precision (if any).
+ if (*I == '.') {
+ ++I;
+ if (I == E) {
+ if (Warn)
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+
+ if (ParsePrecision(H, FS, Start, I, E,
+ FS.usesPositionalArg() ? nullptr : &argIndex))
+ return true;
+
+ if (I == E) {
+ // No more characters left?
+ if (Warn)
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+ }
+
+ // Look for the length modifier.
+ if (ParseLengthModifier(FS, I, E, LO) && I == E) {
+ // No more characters left?
+ if (Warn)
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+
+ // Look for the Objective-C modifier flags, if any.
+ // We parse these here, even if they don't apply to
+ // the conversion specifier, and then emit an error
+ // later if the conversion specifier isn't '@'. This
+ // enables better recovery, and we don't know if
+ // these flags are applicable until later.
+ const char *ObjCModifierFlagsStart = nullptr,
+ *ObjCModifierFlagsEnd = nullptr;
+ if (*I == '[') {
+ ObjCModifierFlagsStart = I;
+ ++I;
+ auto flagStart = I;
+ for (;; ++I) {
+ ObjCModifierFlagsEnd = I;
+ if (I == E) {
+ if (Warn)
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+ // Did we find the closing ']'?
+ if (*I == ']') {
+ if (ParseObjCFlags(H, FS, flagStart, I, Warn))
+ return true;
+ ++I;
+ break;
+ }
+ // There are no separators defined yet for multiple
+ // Objective-C modifier flags. When those are
+ // defined, this is the place to check.
+ }
+ }
+
+ if (*I == '\0') {
+ // Detect spurious null characters, which are likely errors.
+ H.HandleNullChar(I);
+ return true;
+ }
+
+ // Finally, look for the conversion specifier.
+ const char *conversionPosition = I++;
+ ConversionSpecifier::Kind k = ConversionSpecifier::InvalidSpecifier;
+ switch (*conversionPosition) {
+ default:
+ break;
+ // C99: 7.19.6.1 (section 8).
+ case '%': k = ConversionSpecifier::PercentArg; break;
+ case 'A': k = ConversionSpecifier::AArg; break;
+ case 'E': k = ConversionSpecifier::EArg; break;
+ case 'F': k = ConversionSpecifier::FArg; break;
+ case 'G': k = ConversionSpecifier::GArg; break;
+ case 'X': k = ConversionSpecifier::XArg; break;
+ case 'a': k = ConversionSpecifier::aArg; break;
+ case 'c': k = ConversionSpecifier::cArg; break;
+ case 'd': k = ConversionSpecifier::dArg; break;
+ case 'e': k = ConversionSpecifier::eArg; break;
+ case 'f': k = ConversionSpecifier::fArg; break;
+ case 'g': k = ConversionSpecifier::gArg; break;
+ case 'i': k = ConversionSpecifier::iArg; break;
+ case 'n': k = ConversionSpecifier::nArg; break;
+ case 'o': k = ConversionSpecifier::oArg; break;
+ case 'p': k = ConversionSpecifier::pArg; break;
+ case 's': k = ConversionSpecifier::sArg; break;
+ case 'u': k = ConversionSpecifier::uArg; break;
+ case 'x': k = ConversionSpecifier::xArg; break;
+ // POSIX specific.
+ case 'C': k = ConversionSpecifier::CArg; break;
+ case 'S': k = ConversionSpecifier::SArg; break;
+ // Objective-C.
+ case '@': k = ConversionSpecifier::ObjCObjArg; break;
+ // Glibc specific.
+ case 'm': k = ConversionSpecifier::PrintErrno; break;
+ // FreeBSD kernel specific.
+ case 'b':
+ if (isFreeBSDKPrintf)
+ k = ConversionSpecifier::FreeBSDbArg; // int followed by char *
+ break;
+ case 'r':
+ if (isFreeBSDKPrintf)
+ k = ConversionSpecifier::FreeBSDrArg; // int
+ break;
+ case 'y':
+ if (isFreeBSDKPrintf)
+ k = ConversionSpecifier::FreeBSDyArg; // int
+ break;
+ // Apple-specific.
+ case 'D':
+ if (isFreeBSDKPrintf)
+ k = ConversionSpecifier::FreeBSDDArg; // void * followed by char *
+ else if (Target.getTriple().isOSDarwin())
+ k = ConversionSpecifier::DArg;
+ break;
+ case 'O':
+ if (Target.getTriple().isOSDarwin())
+ k = ConversionSpecifier::OArg;
+ break;
+ case 'U':
+ if (Target.getTriple().isOSDarwin())
+ k = ConversionSpecifier::UArg;
+ break;
+ // MS specific.
+ case 'Z':
+ if (Target.getTriple().isOSMSVCRT())
+ k = ConversionSpecifier::ZArg;
+ }
+
+ // Check to see if we used the Objective-C modifier flags with
+ // a conversion specifier other than '@'.
+ if (k != ConversionSpecifier::ObjCObjArg &&
+ k != ConversionSpecifier::InvalidSpecifier &&
+ ObjCModifierFlagsStart) {
+ H.HandleObjCFlagsWithNonObjCConversion(ObjCModifierFlagsStart,
+ ObjCModifierFlagsEnd + 1,
+ conversionPosition);
+ return true;
+ }
+
+ PrintfConversionSpecifier CS(conversionPosition, k);
+ FS.setConversionSpecifier(CS);
+ if (CS.consumesDataArgument() && !FS.usesPositionalArg())
+ FS.setArgIndex(argIndex++);
+ // FreeBSD kernel specific.
+ if (k == ConversionSpecifier::FreeBSDbArg ||
+ k == ConversionSpecifier::FreeBSDDArg)
+ argIndex++;
+
+ if (k == ConversionSpecifier::InvalidSpecifier) {
+ // Assume the conversion takes one argument.
+ return !H.HandleInvalidPrintfConversionSpecifier(FS, Start, I - Start);
+ }
+ return PrintfSpecifierResult(Start, FS);
+}
+
+bool clang::analyze_format_string::ParsePrintfString(FormatStringHandler &H,
+ const char *I,
+ const char *E,
+ const LangOptions &LO,
+ const TargetInfo &Target,
+ bool isFreeBSDKPrintf) {
+
+ unsigned argIndex = 0;
+
+ // Keep looking for a format specifier until we have exhausted the string.
+ while (I != E) {
+ const PrintfSpecifierResult &FSR = ParsePrintfSpecifier(H, I, E, argIndex,
+ LO, Target, true,
+ isFreeBSDKPrintf);
+ // Did a fail-stop error of any kind occur when parsing the specifier?
+ // If so, don't do any more processing.
+ if (FSR.shouldStop())
+ return true;
+ // Did we exhaust the string or encounter an error that
+ // we can recover from?
+ if (!FSR.hasValue())
+ continue;
+ // We have a format specifier. Pass it to the callback.
+ if (!H.HandlePrintfSpecifier(FSR.getValue(), FSR.getStart(),
+ I - FSR.getStart()))
+ return true;
+ }
+ assert(I == E && "Format string not exhausted");
+ return false;
+}
+
+bool clang::analyze_format_string::ParseFormatStringHasSArg(const char *I,
+ const char *E,
+ const LangOptions &LO,
+ const TargetInfo &Target) {
+
+ unsigned argIndex = 0;
+
+ // Keep looking for a %s format specifier until we have exhausted the string.
+ FormatStringHandler H;
+ while (I != E) {
+ const PrintfSpecifierResult &FSR = ParsePrintfSpecifier(H, I, E, argIndex,
+ LO, Target, false,
+ false);
+ // Did a fail-stop error of any kind occur when parsing the specifier?
+ // If so, don't do any more processing.
+ if (FSR.shouldStop())
+ return false;
+ // Did we exhaust the string or encounter an error that
+ // we can recover from?
+ if (!FSR.hasValue())
+ continue;
+ const analyze_printf::PrintfSpecifier &FS = FSR.getValue();
+ // Return true if this a %s format specifier.
+ if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::Kind::sArg)
+ return true;
+ }
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Methods on PrintfSpecifier.
+//===----------------------------------------------------------------------===//
+
+ArgType PrintfSpecifier::getArgType(ASTContext &Ctx,
+ bool IsObjCLiteral) const {
+ const PrintfConversionSpecifier &CS = getConversionSpecifier();
+
+ if (!CS.consumesDataArgument())
+ return ArgType::Invalid();
+
+ if (CS.getKind() == ConversionSpecifier::cArg)
+ switch (LM.getKind()) {
+ case LengthModifier::None:
+ return Ctx.IntTy;
+ case LengthModifier::AsLong:
+ case LengthModifier::AsWide:
+ return ArgType(ArgType::WIntTy, "wint_t");
+ case LengthModifier::AsShort:
+ if (Ctx.getTargetInfo().getTriple().isOSMSVCRT())
+ return Ctx.IntTy;
+ default:
+ return ArgType::Invalid();
+ }
+
+ if (CS.isIntArg())
+ switch (LM.getKind()) {
+ case LengthModifier::AsLongDouble:
+ // GNU extension.
+ return Ctx.LongLongTy;
+ case LengthModifier::None:
+ return Ctx.IntTy;
+ case LengthModifier::AsInt32:
+ return ArgType(Ctx.IntTy, "__int32");
+ case LengthModifier::AsChar: return ArgType::AnyCharTy;
+ case LengthModifier::AsShort: return Ctx.ShortTy;
+ case LengthModifier::AsLong: return Ctx.LongTy;
+ case LengthModifier::AsLongLong:
+ case LengthModifier::AsQuad:
+ return Ctx.LongLongTy;
+ case LengthModifier::AsInt64:
+ return ArgType(Ctx.LongLongTy, "__int64");
+ case LengthModifier::AsIntMax:
+ return ArgType(Ctx.getIntMaxType(), "intmax_t");
+ case LengthModifier::AsSizeT:
+ // FIXME: How to get the corresponding signed version of size_t?
+ return ArgType();
+ case LengthModifier::AsInt3264:
+ return Ctx.getTargetInfo().getTriple().isArch64Bit()
+ ? ArgType(Ctx.LongLongTy, "__int64")
+ : ArgType(Ctx.IntTy, "__int32");
+ case LengthModifier::AsPtrDiff:
+ return ArgType(Ctx.getPointerDiffType(), "ptrdiff_t");
+ case LengthModifier::AsAllocate:
+ case LengthModifier::AsMAllocate:
+ case LengthModifier::AsWide:
+ return ArgType::Invalid();
+ }
+
+ if (CS.isUIntArg())
+ switch (LM.getKind()) {
+ case LengthModifier::AsLongDouble:
+ // GNU extension.
+ return Ctx.UnsignedLongLongTy;
+ case LengthModifier::None:
+ return Ctx.UnsignedIntTy;
+ case LengthModifier::AsInt32:
+ return ArgType(Ctx.UnsignedIntTy, "unsigned __int32");
+ case LengthModifier::AsChar: return Ctx.UnsignedCharTy;
+ case LengthModifier::AsShort: return Ctx.UnsignedShortTy;
+ case LengthModifier::AsLong: return Ctx.UnsignedLongTy;
+ case LengthModifier::AsLongLong:
+ case LengthModifier::AsQuad:
+ return Ctx.UnsignedLongLongTy;
+ case LengthModifier::AsInt64:
+ return ArgType(Ctx.UnsignedLongLongTy, "unsigned __int64");
+ case LengthModifier::AsIntMax:
+ return ArgType(Ctx.getUIntMaxType(), "uintmax_t");
+ case LengthModifier::AsSizeT:
+ return ArgType(Ctx.getSizeType(), "size_t");
+ case LengthModifier::AsInt3264:
+ return Ctx.getTargetInfo().getTriple().isArch64Bit()
+ ? ArgType(Ctx.UnsignedLongLongTy, "unsigned __int64")
+ : ArgType(Ctx.UnsignedIntTy, "unsigned __int32");
+ case LengthModifier::AsPtrDiff:
+ // FIXME: How to get the corresponding unsigned
+ // version of ptrdiff_t?
+ return ArgType();
+ case LengthModifier::AsAllocate:
+ case LengthModifier::AsMAllocate:
+ case LengthModifier::AsWide:
+ return ArgType::Invalid();
+ }
+
+ if (CS.isDoubleArg()) {
+ if (LM.getKind() == LengthModifier::AsLongDouble)
+ return Ctx.LongDoubleTy;
+ return Ctx.DoubleTy;
+ }
+
+ if (CS.getKind() == ConversionSpecifier::nArg) {
+ switch (LM.getKind()) {
+ case LengthModifier::None:
+ return ArgType::PtrTo(Ctx.IntTy);
+ case LengthModifier::AsChar:
+ return ArgType::PtrTo(Ctx.SignedCharTy);
+ case LengthModifier::AsShort:
+ return ArgType::PtrTo(Ctx.ShortTy);
+ case LengthModifier::AsLong:
+ return ArgType::PtrTo(Ctx.LongTy);
+ case LengthModifier::AsLongLong:
+ case LengthModifier::AsQuad:
+ return ArgType::PtrTo(Ctx.LongLongTy);
+ case LengthModifier::AsIntMax:
+ return ArgType::PtrTo(ArgType(Ctx.getIntMaxType(), "intmax_t"));
+ case LengthModifier::AsSizeT:
+ return ArgType(); // FIXME: ssize_t
+ case LengthModifier::AsPtrDiff:
+ return ArgType::PtrTo(ArgType(Ctx.getPointerDiffType(), "ptrdiff_t"));
+ case LengthModifier::AsLongDouble:
+ return ArgType(); // FIXME: Is this a known extension?
+ case LengthModifier::AsAllocate:
+ case LengthModifier::AsMAllocate:
+ case LengthModifier::AsInt32:
+ case LengthModifier::AsInt3264:
+ case LengthModifier::AsInt64:
+ case LengthModifier::AsWide:
+ return ArgType::Invalid();
+ }
+ }
+
+ switch (CS.getKind()) {
+ case ConversionSpecifier::sArg:
+ if (LM.getKind() == LengthModifier::AsWideChar) {
+ if (IsObjCLiteral)
+ return ArgType(Ctx.getPointerType(Ctx.UnsignedShortTy.withConst()),
+ "const unichar *");
+ return ArgType(ArgType::WCStrTy, "wchar_t *");
+ }
+ if (LM.getKind() == LengthModifier::AsWide)
+ return ArgType(ArgType::WCStrTy, "wchar_t *");
+ return ArgType::CStrTy;
+ case ConversionSpecifier::SArg:
+ if (IsObjCLiteral)
+ return ArgType(Ctx.getPointerType(Ctx.UnsignedShortTy.withConst()),
+ "const unichar *");
+ if (Ctx.getTargetInfo().getTriple().isOSMSVCRT() &&
+ LM.getKind() == LengthModifier::AsShort)
+ return ArgType::CStrTy;
+ return ArgType(ArgType::WCStrTy, "wchar_t *");
+ case ConversionSpecifier::CArg:
+ if (IsObjCLiteral)
+ return ArgType(Ctx.UnsignedShortTy, "unichar");
+ if (Ctx.getTargetInfo().getTriple().isOSMSVCRT() &&
+ LM.getKind() == LengthModifier::AsShort)
+ return Ctx.IntTy;
+ return ArgType(Ctx.WideCharTy, "wchar_t");
+ case ConversionSpecifier::pArg:
+ return ArgType::CPointerTy;
+ case ConversionSpecifier::ObjCObjArg:
+ return ArgType::ObjCPointerTy;
+ default:
+ break;
+ }
+
+ // FIXME: Handle other cases.
+ return ArgType();
+}
+
+bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
+ ASTContext &Ctx, bool IsObjCLiteral) {
+ // %n is different from other conversion specifiers; don't try to fix it.
+ if (CS.getKind() == ConversionSpecifier::nArg)
+ return false;
+
+ // Handle Objective-C objects first. Note that while the '%@' specifier will
+ // not warn for structure pointer or void pointer arguments (because that's
+ // how CoreFoundation objects are implemented), we only show a fixit for '%@'
+ // if we know it's an object (block, id, class, or __attribute__((NSObject))).
+ if (QT->isObjCRetainableType()) {
+ if (!IsObjCLiteral)
+ return false;
+
+ CS.setKind(ConversionSpecifier::ObjCObjArg);
+
+ // Disable irrelevant flags
+ HasThousandsGrouping = false;
+ HasPlusPrefix = false;
+ HasSpacePrefix = false;
+ HasAlternativeForm = false;
+ HasLeadingZeroes = false;
+ Precision.setHowSpecified(OptionalAmount::NotSpecified);
+ LM.setKind(LengthModifier::None);
+
+ return true;
+ }
+
+ // Handle strings next (char *, wchar_t *)
+ if (QT->isPointerType() && (QT->getPointeeType()->isAnyCharacterType())) {
+ CS.setKind(ConversionSpecifier::sArg);
+
+ // Disable irrelevant flags
+ HasAlternativeForm = 0;
+ HasLeadingZeroes = 0;
+
+ // Set the long length modifier for wide characters
+ if (QT->getPointeeType()->isWideCharType())
+ LM.setKind(LengthModifier::AsWideChar);
+ else
+ LM.setKind(LengthModifier::None);
+
+ return true;
+ }
+
+ // If it's an enum, get its underlying type.
+ if (const EnumType *ETy = QT->getAs<EnumType>())
+ QT = ETy->getDecl()->getIntegerType();
+
+ // We can only work with builtin types.
+ const BuiltinType *BT = QT->getAs<BuiltinType>();
+ if (!BT)
+ return false;
+
+ // Set length modifier
+ switch (BT->getKind()) {
+ case BuiltinType::Bool:
+ case BuiltinType::WChar_U:
+ case BuiltinType::WChar_S:
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ case BuiltinType::UInt128:
+ case BuiltinType::Int128:
+ case BuiltinType::Half:
+ // Various types which are non-trivial to correct.
+ return false;
+
+#define SIGNED_TYPE(Id, SingletonId)
+#define UNSIGNED_TYPE(Id, SingletonId)
+#define FLOATING_TYPE(Id, SingletonId)
+#define BUILTIN_TYPE(Id, SingletonId) \
+ case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
+ // Misc other stuff which doesn't make sense here.
+ return false;
+
+ case BuiltinType::UInt:
+ case BuiltinType::Int:
+ case BuiltinType::Float:
+ case BuiltinType::Double:
+ LM.setKind(LengthModifier::None);
+ break;
+
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ LM.setKind(LengthModifier::AsChar);
+ break;
+
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ LM.setKind(LengthModifier::AsShort);
+ break;
+
+ case BuiltinType::Long:
+ case BuiltinType::ULong:
+ LM.setKind(LengthModifier::AsLong);
+ break;
+
+ case BuiltinType::LongLong:
+ case BuiltinType::ULongLong:
+ LM.setKind(LengthModifier::AsLongLong);
+ break;
+
+ case BuiltinType::LongDouble:
+ LM.setKind(LengthModifier::AsLongDouble);
+ break;
+ }
+
+ // Handle size_t, ptrdiff_t, etc. that have dedicated length modifiers in C99.
+ if (isa<TypedefType>(QT) && (LangOpt.C99 || LangOpt.CPlusPlus11))
+ namedTypeToLengthModifier(QT, LM);
+
+ // If fixing the length modifier was enough, we might be done.
+ if (hasValidLengthModifier(Ctx.getTargetInfo())) {
+ // If we're going to offer a fix anyway, make sure the sign matches.
+ switch (CS.getKind()) {
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::UArg:
+ if (QT->isSignedIntegerType())
+ CS.setKind(clang::analyze_format_string::ConversionSpecifier::dArg);
+ break;
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::DArg:
+ case ConversionSpecifier::iArg:
+ if (QT->isUnsignedIntegerType() && !HasPlusPrefix)
+ CS.setKind(clang::analyze_format_string::ConversionSpecifier::uArg);
+ break;
+ default:
+ // Other specifiers do not have signed/unsigned variants.
+ break;
+ }
+
+ const analyze_printf::ArgType &ATR = getArgType(Ctx, IsObjCLiteral);
+ if (ATR.isValid() && ATR.matchesType(Ctx, QT))
+ return true;
+ }
+
+ // Set conversion specifier and disable any flags which do not apply to it.
+ // Let typedefs to char fall through to int, as %c is silly for uint8_t.
+ if (!isa<TypedefType>(QT) && QT->isCharType()) {
+ CS.setKind(ConversionSpecifier::cArg);
+ LM.setKind(LengthModifier::None);
+ Precision.setHowSpecified(OptionalAmount::NotSpecified);
+ HasAlternativeForm = 0;
+ HasLeadingZeroes = 0;
+ HasPlusPrefix = 0;
+ }
+ // Test for Floating type first as LongDouble can pass isUnsignedIntegerType
+ else if (QT->isRealFloatingType()) {
+ CS.setKind(ConversionSpecifier::fArg);
+ }
+ else if (QT->isSignedIntegerType()) {
+ CS.setKind(ConversionSpecifier::dArg);
+ HasAlternativeForm = 0;
+ }
+ else if (QT->isUnsignedIntegerType()) {
+ CS.setKind(ConversionSpecifier::uArg);
+ HasAlternativeForm = 0;
+ HasPlusPrefix = 0;
+ } else {
+ llvm_unreachable("Unexpected type");
+ }
+
+ return true;
+}
+
+void PrintfSpecifier::toString(raw_ostream &os) const {
+ // Whilst some features have no defined order, we are using the order
+ // appearing in the C99 standard (ISO/IEC 9899:1999 (E) 7.19.6.1)
+ os << "%";
+
+ // Positional args
+ if (usesPositionalArg()) {
+ os << getPositionalArgIndex() << "$";
+ }
+
+ // Conversion flags
+ if (IsLeftJustified) os << "-";
+ if (HasPlusPrefix) os << "+";
+ if (HasSpacePrefix) os << " ";
+ if (HasAlternativeForm) os << "#";
+ if (HasLeadingZeroes) os << "0";
+
+ // Minimum field width
+ FieldWidth.toString(os);
+ // Precision
+ Precision.toString(os);
+ // Length modifier
+ os << LM.toString();
+ // Conversion specifier
+ os << CS.toString();
+}
+
+bool PrintfSpecifier::hasValidPlusPrefix() const {
+ if (!HasPlusPrefix)
+ return true;
+
+ // The plus prefix only makes sense for signed conversions
+ switch (CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::DArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ case ConversionSpecifier::FreeBSDrArg:
+ case ConversionSpecifier::FreeBSDyArg:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool PrintfSpecifier::hasValidAlternativeForm() const {
+ if (!HasAlternativeForm)
+ return true;
+
+ // Alternate form flag only valid with the oxXaAeEfFgG conversions
+ switch (CS.getKind()) {
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::OArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ case ConversionSpecifier::FreeBSDrArg:
+ case ConversionSpecifier::FreeBSDyArg:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool PrintfSpecifier::hasValidLeadingZeros() const {
+ if (!HasLeadingZeroes)
+ return true;
+
+ // Leading zeroes flag only valid with the diouxXaAeEfFgG conversions
+ switch (CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::DArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::OArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::UArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ case ConversionSpecifier::FreeBSDrArg:
+ case ConversionSpecifier::FreeBSDyArg:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool PrintfSpecifier::hasValidSpacePrefix() const {
+ if (!HasSpacePrefix)
+ return true;
+
+ // The space prefix only makes sense for signed conversions
+ switch (CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::DArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ case ConversionSpecifier::FreeBSDrArg:
+ case ConversionSpecifier::FreeBSDyArg:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool PrintfSpecifier::hasValidLeftJustified() const {
+ if (!IsLeftJustified)
+ return true;
+
+ // The left justified flag is valid for all conversions except n
+ switch (CS.getKind()) {
+ case ConversionSpecifier::nArg:
+ return false;
+
+ default:
+ return true;
+ }
+}
+
+bool PrintfSpecifier::hasValidThousandsGroupingPrefix() const {
+ if (!HasThousandsGrouping)
+ return true;
+
+ switch (CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::DArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::UArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool PrintfSpecifier::hasValidPrecision() const {
+ if (Precision.getHowSpecified() == OptionalAmount::NotSpecified)
+ return true;
+
+ // Precision is only valid with the diouxXaAeEfFgGs conversions
+ switch (CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::DArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::OArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::UArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ case ConversionSpecifier::sArg:
+ case ConversionSpecifier::FreeBSDrArg:
+ case ConversionSpecifier::FreeBSDyArg:
+ return true;
+
+ default:
+ return false;
+ }
+}
+bool PrintfSpecifier::hasValidFieldWidth() const {
+ if (FieldWidth.getHowSpecified() == OptionalAmount::NotSpecified)
+ return true;
+
+ // The field width is valid for all conversions except n
+ switch (CS.getKind()) {
+ case ConversionSpecifier::nArg:
+ return false;
+
+ default:
+ return true;
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/ProgramPoint.cpp b/contrib/llvm/tools/clang/lib/Analysis/ProgramPoint.cpp
new file mode 100644
index 0000000..26b59bb
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/ProgramPoint.cpp
@@ -0,0 +1,52 @@
+//==- ProgramPoint.cpp - Program Points for Path-Sensitive Analysis -*- C++ -*-/
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interface ProgramPoint, which identifies a
+// distinct location in a function.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/ProgramPoint.h"
+
+using namespace clang;
+
+ProgramPointTag::~ProgramPointTag() {}
+
+ProgramPoint ProgramPoint::getProgramPoint(const Stmt *S, ProgramPoint::Kind K,
+ const LocationContext *LC,
+ const ProgramPointTag *tag){
+ switch (K) {
+ default:
+ llvm_unreachable("Unhandled ProgramPoint kind");
+ case ProgramPoint::PreStmtKind:
+ return PreStmt(S, LC, tag);
+ case ProgramPoint::PostStmtKind:
+ return PostStmt(S, LC, tag);
+ case ProgramPoint::PreLoadKind:
+ return PreLoad(S, LC, tag);
+ case ProgramPoint::PostLoadKind:
+ return PostLoad(S, LC, tag);
+ case ProgramPoint::PreStoreKind:
+ return PreStore(S, LC, tag);
+ case ProgramPoint::PostLValueKind:
+ return PostLValue(S, LC, tag);
+ case ProgramPoint::PostStmtPurgeDeadSymbolsKind:
+ return PostStmtPurgeDeadSymbols(S, LC, tag);
+ case ProgramPoint::PreStmtPurgeDeadSymbolsKind:
+ return PreStmtPurgeDeadSymbols(S, LC, tag);
+ }
+}
+
+SimpleProgramPointTag::SimpleProgramPointTag(StringRef MsgProvider,
+ StringRef Msg)
+ : Desc((MsgProvider + " : " + Msg).str()) {}
+
+StringRef SimpleProgramPointTag::getTagDescription() const {
+ return Desc;
+}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp b/contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp
new file mode 100644
index 0000000..5b917a7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp
@@ -0,0 +1,227 @@
+//== PseudoConstantAnalysis.cpp - Find Pseudoconstants in the AST-*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file tracks the usage of variables in a Decl body to see if they are
+// never written to, implying that they constant. This is useful in static
+// analysis to see if a developer might have intended a variable to be const.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/Analyses/PseudoConstantAnalysis.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/Stmt.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include <deque>
+
+using namespace clang;
+
+// The number of ValueDecls we want to keep track of by default (per-function)
+#define VARDECL_SET_SIZE 256
+typedef llvm::SmallPtrSet<const VarDecl*, VARDECL_SET_SIZE> VarDeclSet;
+
+PseudoConstantAnalysis::PseudoConstantAnalysis(const Stmt *DeclBody) :
+ DeclBody(DeclBody), Analyzed(false) {
+ NonConstantsImpl = new VarDeclSet;
+ UsedVarsImpl = new VarDeclSet;
+}
+
+PseudoConstantAnalysis::~PseudoConstantAnalysis() {
+ delete (VarDeclSet*)NonConstantsImpl;
+ delete (VarDeclSet*)UsedVarsImpl;
+}
+
+// Returns true if the given ValueDecl is never written to in the given DeclBody
+bool PseudoConstantAnalysis::isPseudoConstant(const VarDecl *VD) {
+ // Only local and static variables can be pseudoconstants
+ if (!VD->hasLocalStorage() && !VD->isStaticLocal())
+ return false;
+
+ if (!Analyzed) {
+ RunAnalysis();
+ Analyzed = true;
+ }
+
+ VarDeclSet *NonConstants = (VarDeclSet*)NonConstantsImpl;
+
+ return !NonConstants->count(VD);
+}
+
+// Returns true if the variable was used (self assignments don't count)
+bool PseudoConstantAnalysis::wasReferenced(const VarDecl *VD) {
+ if (!Analyzed) {
+ RunAnalysis();
+ Analyzed = true;
+ }
+
+ VarDeclSet *UsedVars = (VarDeclSet*)UsedVarsImpl;
+
+ return UsedVars->count(VD);
+}
+
+// Returns a Decl from a (Block)DeclRefExpr (if any)
+const Decl *PseudoConstantAnalysis::getDecl(const Expr *E) {
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E))
+ return DR->getDecl();
+ else
+ return nullptr;
+}
+
+void PseudoConstantAnalysis::RunAnalysis() {
+ std::deque<const Stmt *> WorkList;
+ VarDeclSet *NonConstants = (VarDeclSet*)NonConstantsImpl;
+ VarDeclSet *UsedVars = (VarDeclSet*)UsedVarsImpl;
+
+ // Start with the top level statement of the function
+ WorkList.push_back(DeclBody);
+
+ while (!WorkList.empty()) {
+ const Stmt *Head = WorkList.front();
+ WorkList.pop_front();
+
+ if (const Expr *Ex = dyn_cast<Expr>(Head))
+ Head = Ex->IgnoreParenCasts();
+
+ switch (Head->getStmtClass()) {
+ // Case 1: Assignment operators modifying VarDecls
+ case Stmt::BinaryOperatorClass: {
+ const BinaryOperator *BO = cast<BinaryOperator>(Head);
+ // Look for a Decl on the LHS
+ const Decl *LHSDecl = getDecl(BO->getLHS()->IgnoreParenCasts());
+ if (!LHSDecl)
+ break;
+
+ // We found a binary operator with a DeclRefExpr on the LHS. We now check
+ // for any of the assignment operators, implying that this Decl is being
+ // written to.
+ switch (BO->getOpcode()) {
+ // Self-assignments don't count as use of a variable
+ case BO_Assign: {
+ // Look for a DeclRef on the RHS
+ const Decl *RHSDecl = getDecl(BO->getRHS()->IgnoreParenCasts());
+
+ // If the Decls match, we have self-assignment
+ if (LHSDecl == RHSDecl)
+ // Do not visit the children
+ continue;
+
+ }
+ case BO_AddAssign:
+ case BO_SubAssign:
+ case BO_MulAssign:
+ case BO_DivAssign:
+ case BO_AndAssign:
+ case BO_OrAssign:
+ case BO_XorAssign:
+ case BO_ShlAssign:
+ case BO_ShrAssign: {
+ const VarDecl *VD = dyn_cast<VarDecl>(LHSDecl);
+ // The DeclRefExpr is being assigned to - mark it as non-constant
+ if (VD)
+ NonConstants->insert(VD);
+ break;
+ }
+
+ default:
+ break;
+ }
+ break;
+ }
+
+ // Case 2: Pre/post increment/decrement and address of
+ case Stmt::UnaryOperatorClass: {
+ const UnaryOperator *UO = cast<UnaryOperator>(Head);
+
+ // Look for a DeclRef in the subexpression
+ const Decl *D = getDecl(UO->getSubExpr()->IgnoreParenCasts());
+ if (!D)
+ break;
+
+ // We found a unary operator with a DeclRef as a subexpression. We now
+ // check for any of the increment/decrement operators, as well as
+ // addressOf.
+ switch (UO->getOpcode()) {
+ case UO_PostDec:
+ case UO_PostInc:
+ case UO_PreDec:
+ case UO_PreInc:
+ // The DeclRef is being changed - mark it as non-constant
+ case UO_AddrOf: {
+ // If we are taking the address of the DeclRefExpr, assume it is
+ // non-constant.
+ const VarDecl *VD = dyn_cast<VarDecl>(D);
+ if (VD)
+ NonConstants->insert(VD);
+ break;
+ }
+
+ default:
+ break;
+ }
+ break;
+ }
+
+ // Case 3: Reference Declarations
+ case Stmt::DeclStmtClass: {
+ const DeclStmt *DS = cast<DeclStmt>(Head);
+ // Iterate over each decl and see if any of them contain reference decls
+ for (const auto *I : DS->decls()) {
+ // We only care about VarDecls
+ const VarDecl *VD = dyn_cast<VarDecl>(I);
+ if (!VD)
+ continue;
+
+ // We found a VarDecl; make sure it is a reference type
+ if (!VD->getType().getTypePtr()->isReferenceType())
+ continue;
+
+ // Try to find a Decl in the initializer
+ const Decl *D = getDecl(VD->getInit()->IgnoreParenCasts());
+ if (!D)
+ break;
+
+ // If the reference is to another var, add the var to the non-constant
+ // list
+ if (const VarDecl *RefVD = dyn_cast<VarDecl>(D)) {
+ NonConstants->insert(RefVD);
+ continue;
+ }
+ }
+ break;
+ }
+
+ // Case 4: Variable references
+ case Stmt::DeclRefExprClass: {
+ const DeclRefExpr *DR = cast<DeclRefExpr>(Head);
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ // Add the Decl to the used list
+ UsedVars->insert(VD);
+ continue;
+ }
+ break;
+ }
+
+ // Case 5: Block expressions
+ case Stmt::BlockExprClass: {
+ const BlockExpr *B = cast<BlockExpr>(Head);
+ // Add the body of the block to the list
+ WorkList.push_back(B->getBody());
+ continue;
+ }
+
+ default:
+ break;
+ } // switch (head->getStmtClass())
+
+ // Add all substatements to the worklist
+ for (const Stmt *SubStmt : Head->children())
+ if (SubStmt)
+ WorkList.push_back(SubStmt);
+ } // while (!WorkList.empty())
+}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/ReachableCode.cpp b/contrib/llvm/tools/clang/lib/Analysis/ReachableCode.cpp
new file mode 100644
index 0000000..8165b09
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/ReachableCode.cpp
@@ -0,0 +1,679 @@
+//=- ReachableCodePathInsensitive.cpp ---------------------------*- C++ --*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a flow-sensitive, path-insensitive analysis of
+// determining reachable blocks within a CFG.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/Analyses/ReachableCode.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/SmallVector.h"
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Core Reachability Analysis routines.
+//===----------------------------------------------------------------------===//
+
+static bool isEnumConstant(const Expr *Ex) {
+ const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Ex);
+ if (!DR)
+ return false;
+ return isa<EnumConstantDecl>(DR->getDecl());
+}
+
+static bool isTrivialExpression(const Expr *Ex) {
+ Ex = Ex->IgnoreParenCasts();
+ return isa<IntegerLiteral>(Ex) || isa<StringLiteral>(Ex) ||
+ isa<CXXBoolLiteralExpr>(Ex) || isa<ObjCBoolLiteralExpr>(Ex) ||
+ isa<CharacterLiteral>(Ex) ||
+ isEnumConstant(Ex);
+}
+
+static bool isTrivialDoWhile(const CFGBlock *B, const Stmt *S) {
+ // Check if the block ends with a do...while() and see if 'S' is the
+ // condition.
+ if (const Stmt *Term = B->getTerminator()) {
+ if (const DoStmt *DS = dyn_cast<DoStmt>(Term)) {
+ const Expr *Cond = DS->getCond()->IgnoreParenCasts();
+ return Cond == S && isTrivialExpression(Cond);
+ }
+ }
+ return false;
+}
+
+static bool isDeadReturn(const CFGBlock *B, const Stmt *S) {
+ // Look to see if the current control flow ends with a 'return', and see if
+ // 'S' is a substatement. The 'return' may not be the last element in the
+ // block, or may be in a subsequent block because of destructors.
+ const CFGBlock *Current = B;
+ while (true) {
+ for (CFGBlock::const_reverse_iterator I = Current->rbegin(),
+ E = Current->rend();
+ I != E; ++I) {
+ if (Optional<CFGStmt> CS = I->getAs<CFGStmt>()) {
+ if (const ReturnStmt *RS = dyn_cast<ReturnStmt>(CS->getStmt())) {
+ if (RS == S)
+ return true;
+ if (const Expr *RE = RS->getRetValue()) {
+ RE = RE->IgnoreParenCasts();
+ if (RE == S)
+ return true;
+ ParentMap PM(const_cast<Expr *>(RE));
+ // If 'S' is in the ParentMap, it is a subexpression of
+ // the return statement.
+ return PM.getParent(S);
+ }
+ }
+ break;
+ }
+ }
+ // Note also that we are restricting the search for the return statement
+ // to stop at control-flow; only part of a return statement may be dead,
+ // without the whole return statement being dead.
+ if (Current->getTerminator().isTemporaryDtorsBranch()) {
+ // Temporary destructors have a predictable control flow, thus we want to
+ // look into the next block for the return statement.
+ // We look into the false branch, as we know the true branch only contains
+ // the call to the destructor.
+ assert(Current->succ_size() == 2);
+ Current = *(Current->succ_begin() + 1);
+ } else if (!Current->getTerminator() && Current->succ_size() == 1) {
+ // If there is only one successor, we're not dealing with outgoing control
+ // flow. Thus, look into the next block.
+ Current = *Current->succ_begin();
+ if (Current->pred_size() > 1) {
+ // If there is more than one predecessor, we're dealing with incoming
+ // control flow - if the return statement is in that block, it might
+ // well be reachable via a different control flow, thus it's not dead.
+ return false;
+ }
+ } else {
+ // We hit control flow or a dead end. Stop searching.
+ return false;
+ }
+ }
+ llvm_unreachable("Broke out of infinite loop.");
+}
+
+static SourceLocation getTopMostMacro(SourceLocation Loc, SourceManager &SM) {
+ assert(Loc.isMacroID());
+ SourceLocation Last;
+ while (Loc.isMacroID()) {
+ Last = Loc;
+ Loc = SM.getImmediateMacroCallerLoc(Loc);
+ }
+ return Last;
+}
+
+/// Returns true if the statement is expanded from a configuration macro.
+static bool isExpandedFromConfigurationMacro(const Stmt *S,
+ Preprocessor &PP,
+ bool IgnoreYES_NO = false) {
+ // FIXME: This is not very precise. Here we just check to see if the
+ // value comes from a macro, but we can do much better. This is likely
+ // to be over conservative. This logic is factored into a separate function
+ // so that we can refine it later.
+ SourceLocation L = S->getLocStart();
+ if (L.isMacroID()) {
+ if (IgnoreYES_NO) {
+ // The Objective-C constant 'YES' and 'NO'
+ // are defined as macros. Do not treat them
+ // as configuration values.
+ SourceManager &SM = PP.getSourceManager();
+ SourceLocation TopL = getTopMostMacro(L, SM);
+ StringRef MacroName = PP.getImmediateMacroName(TopL);
+ if (MacroName == "YES" || MacroName == "NO")
+ return false;
+ }
+ return true;
+ }
+ return false;
+}
+
+static bool isConfigurationValue(const ValueDecl *D, Preprocessor &PP);
+
+/// Returns true if the statement represents a configuration value.
+///
+/// A configuration value is something usually determined at compile-time
+/// to conditionally always execute some branch. Such guards are for
+/// "sometimes unreachable" code. Such code is usually not interesting
+/// to report as unreachable, and may mask truly unreachable code within
+/// those blocks.
+static bool isConfigurationValue(const Stmt *S,
+ Preprocessor &PP,
+ SourceRange *SilenceableCondVal = nullptr,
+ bool IncludeIntegers = true,
+ bool WrappedInParens = false) {
+ if (!S)
+ return false;
+
+ if (const Expr *Ex = dyn_cast<Expr>(S))
+ S = Ex->IgnoreCasts();
+
+ // Special case looking for the sigil '()' around an integer literal.
+ if (const ParenExpr *PE = dyn_cast<ParenExpr>(S))
+ if (!PE->getLocStart().isMacroID())
+ return isConfigurationValue(PE->getSubExpr(), PP, SilenceableCondVal,
+ IncludeIntegers, true);
+
+ if (const Expr *Ex = dyn_cast<Expr>(S))
+ S = Ex->IgnoreCasts();
+
+ bool IgnoreYES_NO = false;
+
+ switch (S->getStmtClass()) {
+ case Stmt::CallExprClass: {
+ const FunctionDecl *Callee =
+ dyn_cast_or_null<FunctionDecl>(cast<CallExpr>(S)->getCalleeDecl());
+ return Callee ? Callee->isConstexpr() : false;
+ }
+ case Stmt::DeclRefExprClass:
+ return isConfigurationValue(cast<DeclRefExpr>(S)->getDecl(), PP);
+ case Stmt::ObjCBoolLiteralExprClass:
+ IgnoreYES_NO = true;
+ // Fallthrough.
+ case Stmt::CXXBoolLiteralExprClass:
+ case Stmt::IntegerLiteralClass: {
+ const Expr *E = cast<Expr>(S);
+ if (IncludeIntegers) {
+ if (SilenceableCondVal && !SilenceableCondVal->getBegin().isValid())
+ *SilenceableCondVal = E->getSourceRange();
+ return WrappedInParens || isExpandedFromConfigurationMacro(E, PP, IgnoreYES_NO);
+ }
+ return false;
+ }
+ case Stmt::MemberExprClass:
+ return isConfigurationValue(cast<MemberExpr>(S)->getMemberDecl(), PP);
+ case Stmt::UnaryExprOrTypeTraitExprClass:
+ return true;
+ case Stmt::BinaryOperatorClass: {
+ const BinaryOperator *B = cast<BinaryOperator>(S);
+ // Only include raw integers (not enums) as configuration
+ // values if they are used in a logical or comparison operator
+ // (not arithmetic).
+ IncludeIntegers &= (B->isLogicalOp() || B->isComparisonOp());
+ return isConfigurationValue(B->getLHS(), PP, SilenceableCondVal,
+ IncludeIntegers) ||
+ isConfigurationValue(B->getRHS(), PP, SilenceableCondVal,
+ IncludeIntegers);
+ }
+ case Stmt::UnaryOperatorClass: {
+ const UnaryOperator *UO = cast<UnaryOperator>(S);
+ if (SilenceableCondVal)
+ *SilenceableCondVal = UO->getSourceRange();
+ return UO->getOpcode() == UO_LNot &&
+ isConfigurationValue(UO->getSubExpr(), PP, SilenceableCondVal,
+ IncludeIntegers, WrappedInParens);
+ }
+ default:
+ return false;
+ }
+}
+
+static bool isConfigurationValue(const ValueDecl *D, Preprocessor &PP) {
+ if (const EnumConstantDecl *ED = dyn_cast<EnumConstantDecl>(D))
+ return isConfigurationValue(ED->getInitExpr(), PP);
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ // As a heuristic, treat globals as configuration values. Note
+ // that we only will get here if Sema evaluated this
+ // condition to a constant expression, which means the global
+ // had to be declared in a way to be a truly constant value.
+ // We could generalize this to local variables, but it isn't
+ // clear if those truly represent configuration values that
+ // gate unreachable code.
+ if (!VD->hasLocalStorage())
+ return true;
+
+ // As a heuristic, locals that have been marked 'const' explicitly
+ // can be treated as configuration values as well.
+ return VD->getType().isLocalConstQualified();
+ }
+ return false;
+}
+
+/// Returns true if we should always explore all successors of a block.
+static bool shouldTreatSuccessorsAsReachable(const CFGBlock *B,
+ Preprocessor &PP) {
+ if (const Stmt *Term = B->getTerminator()) {
+ if (isa<SwitchStmt>(Term))
+ return true;
+ // Specially handle '||' and '&&'.
+ if (isa<BinaryOperator>(Term)) {
+ return isConfigurationValue(Term, PP);
+ }
+ }
+
+ const Stmt *Cond = B->getTerminatorCondition(/* stripParens */ false);
+ return isConfigurationValue(Cond, PP);
+}
+
+static unsigned scanFromBlock(const CFGBlock *Start,
+ llvm::BitVector &Reachable,
+ Preprocessor *PP,
+ bool IncludeSometimesUnreachableEdges) {
+ unsigned count = 0;
+
+ // Prep work queue
+ SmallVector<const CFGBlock*, 32> WL;
+
+ // The entry block may have already been marked reachable
+ // by the caller.
+ if (!Reachable[Start->getBlockID()]) {
+ ++count;
+ Reachable[Start->getBlockID()] = true;
+ }
+
+ WL.push_back(Start);
+
+ // Find the reachable blocks from 'Start'.
+ while (!WL.empty()) {
+ const CFGBlock *item = WL.pop_back_val();
+
+ // There are cases where we want to treat all successors as reachable.
+ // The idea is that some "sometimes unreachable" code is not interesting,
+ // and that we should forge ahead and explore those branches anyway.
+ // This allows us to potentially uncover some "always unreachable" code
+ // within the "sometimes unreachable" code.
+ // Look at the successors and mark then reachable.
+ Optional<bool> TreatAllSuccessorsAsReachable;
+ if (!IncludeSometimesUnreachableEdges)
+ TreatAllSuccessorsAsReachable = false;
+
+ for (CFGBlock::const_succ_iterator I = item->succ_begin(),
+ E = item->succ_end(); I != E; ++I) {
+ const CFGBlock *B = *I;
+ if (!B) do {
+ const CFGBlock *UB = I->getPossiblyUnreachableBlock();
+ if (!UB)
+ break;
+
+ if (!TreatAllSuccessorsAsReachable.hasValue()) {
+ assert(PP);
+ TreatAllSuccessorsAsReachable =
+ shouldTreatSuccessorsAsReachable(item, *PP);
+ }
+
+ if (TreatAllSuccessorsAsReachable.getValue()) {
+ B = UB;
+ break;
+ }
+ }
+ while (false);
+
+ if (B) {
+ unsigned blockID = B->getBlockID();
+ if (!Reachable[blockID]) {
+ Reachable.set(blockID);
+ WL.push_back(B);
+ ++count;
+ }
+ }
+ }
+ }
+ return count;
+}
+
+static unsigned scanMaybeReachableFromBlock(const CFGBlock *Start,
+ Preprocessor &PP,
+ llvm::BitVector &Reachable) {
+ return scanFromBlock(Start, Reachable, &PP, true);
+}
+
+//===----------------------------------------------------------------------===//
+// Dead Code Scanner.
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class DeadCodeScan {
+ llvm::BitVector Visited;
+ llvm::BitVector &Reachable;
+ SmallVector<const CFGBlock *, 10> WorkList;
+ Preprocessor &PP;
+
+ typedef SmallVector<std::pair<const CFGBlock *, const Stmt *>, 12>
+ DeferredLocsTy;
+
+ DeferredLocsTy DeferredLocs;
+
+ public:
+ DeadCodeScan(llvm::BitVector &reachable, Preprocessor &PP)
+ : Visited(reachable.size()),
+ Reachable(reachable),
+ PP(PP) {}
+
+ void enqueue(const CFGBlock *block);
+ unsigned scanBackwards(const CFGBlock *Start,
+ clang::reachable_code::Callback &CB);
+
+ bool isDeadCodeRoot(const CFGBlock *Block);
+
+ const Stmt *findDeadCode(const CFGBlock *Block);
+
+ void reportDeadCode(const CFGBlock *B,
+ const Stmt *S,
+ clang::reachable_code::Callback &CB);
+ };
+}
+
+void DeadCodeScan::enqueue(const CFGBlock *block) {
+ unsigned blockID = block->getBlockID();
+ if (Reachable[blockID] || Visited[blockID])
+ return;
+ Visited[blockID] = true;
+ WorkList.push_back(block);
+}
+
+bool DeadCodeScan::isDeadCodeRoot(const clang::CFGBlock *Block) {
+ bool isDeadRoot = true;
+
+ for (CFGBlock::const_pred_iterator I = Block->pred_begin(),
+ E = Block->pred_end(); I != E; ++I) {
+ if (const CFGBlock *PredBlock = *I) {
+ unsigned blockID = PredBlock->getBlockID();
+ if (Visited[blockID]) {
+ isDeadRoot = false;
+ continue;
+ }
+ if (!Reachable[blockID]) {
+ isDeadRoot = false;
+ Visited[blockID] = true;
+ WorkList.push_back(PredBlock);
+ continue;
+ }
+ }
+ }
+
+ return isDeadRoot;
+}
+
+static bool isValidDeadStmt(const Stmt *S) {
+ if (S->getLocStart().isInvalid())
+ return false;
+ if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(S))
+ return BO->getOpcode() != BO_Comma;
+ return true;
+}
+
+const Stmt *DeadCodeScan::findDeadCode(const clang::CFGBlock *Block) {
+ for (CFGBlock::const_iterator I = Block->begin(), E = Block->end(); I!=E; ++I)
+ if (Optional<CFGStmt> CS = I->getAs<CFGStmt>()) {
+ const Stmt *S = CS->getStmt();
+ if (isValidDeadStmt(S))
+ return S;
+ }
+
+ if (CFGTerminator T = Block->getTerminator()) {
+ if (!T.isTemporaryDtorsBranch()) {
+ const Stmt *S = T.getStmt();
+ if (isValidDeadStmt(S))
+ return S;
+ }
+ }
+
+ return nullptr;
+}
+
+static int SrcCmp(const std::pair<const CFGBlock *, const Stmt *> *p1,
+ const std::pair<const CFGBlock *, const Stmt *> *p2) {
+ if (p1->second->getLocStart() < p2->second->getLocStart())
+ return -1;
+ if (p2->second->getLocStart() < p1->second->getLocStart())
+ return 1;
+ return 0;
+}
+
+unsigned DeadCodeScan::scanBackwards(const clang::CFGBlock *Start,
+ clang::reachable_code::Callback &CB) {
+
+ unsigned count = 0;
+ enqueue(Start);
+
+ while (!WorkList.empty()) {
+ const CFGBlock *Block = WorkList.pop_back_val();
+
+ // It is possible that this block has been marked reachable after
+ // it was enqueued.
+ if (Reachable[Block->getBlockID()])
+ continue;
+
+ // Look for any dead code within the block.
+ const Stmt *S = findDeadCode(Block);
+
+ if (!S) {
+ // No dead code. Possibly an empty block. Look at dead predecessors.
+ for (CFGBlock::const_pred_iterator I = Block->pred_begin(),
+ E = Block->pred_end(); I != E; ++I) {
+ if (const CFGBlock *predBlock = *I)
+ enqueue(predBlock);
+ }
+ continue;
+ }
+
+ // Specially handle macro-expanded code.
+ if (S->getLocStart().isMacroID()) {
+ count += scanMaybeReachableFromBlock(Block, PP, Reachable);
+ continue;
+ }
+
+ if (isDeadCodeRoot(Block)) {
+ reportDeadCode(Block, S, CB);
+ count += scanMaybeReachableFromBlock(Block, PP, Reachable);
+ }
+ else {
+ // Record this statement as the possibly best location in a
+ // strongly-connected component of dead code for emitting a
+ // warning.
+ DeferredLocs.push_back(std::make_pair(Block, S));
+ }
+ }
+
+ // If we didn't find a dead root, then report the dead code with the
+ // earliest location.
+ if (!DeferredLocs.empty()) {
+ llvm::array_pod_sort(DeferredLocs.begin(), DeferredLocs.end(), SrcCmp);
+ for (DeferredLocsTy::iterator I = DeferredLocs.begin(),
+ E = DeferredLocs.end(); I != E; ++I) {
+ const CFGBlock *Block = I->first;
+ if (Reachable[Block->getBlockID()])
+ continue;
+ reportDeadCode(Block, I->second, CB);
+ count += scanMaybeReachableFromBlock(Block, PP, Reachable);
+ }
+ }
+
+ return count;
+}
+
+static SourceLocation GetUnreachableLoc(const Stmt *S,
+ SourceRange &R1,
+ SourceRange &R2) {
+ R1 = R2 = SourceRange();
+
+ if (const Expr *Ex = dyn_cast<Expr>(S))
+ S = Ex->IgnoreParenImpCasts();
+
+ switch (S->getStmtClass()) {
+ case Expr::BinaryOperatorClass: {
+ const BinaryOperator *BO = cast<BinaryOperator>(S);
+ return BO->getOperatorLoc();
+ }
+ case Expr::UnaryOperatorClass: {
+ const UnaryOperator *UO = cast<UnaryOperator>(S);
+ R1 = UO->getSubExpr()->getSourceRange();
+ return UO->getOperatorLoc();
+ }
+ case Expr::CompoundAssignOperatorClass: {
+ const CompoundAssignOperator *CAO = cast<CompoundAssignOperator>(S);
+ R1 = CAO->getLHS()->getSourceRange();
+ R2 = CAO->getRHS()->getSourceRange();
+ return CAO->getOperatorLoc();
+ }
+ case Expr::BinaryConditionalOperatorClass:
+ case Expr::ConditionalOperatorClass: {
+ const AbstractConditionalOperator *CO =
+ cast<AbstractConditionalOperator>(S);
+ return CO->getQuestionLoc();
+ }
+ case Expr::MemberExprClass: {
+ const MemberExpr *ME = cast<MemberExpr>(S);
+ R1 = ME->getSourceRange();
+ return ME->getMemberLoc();
+ }
+ case Expr::ArraySubscriptExprClass: {
+ const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(S);
+ R1 = ASE->getLHS()->getSourceRange();
+ R2 = ASE->getRHS()->getSourceRange();
+ return ASE->getRBracketLoc();
+ }
+ case Expr::CStyleCastExprClass: {
+ const CStyleCastExpr *CSC = cast<CStyleCastExpr>(S);
+ R1 = CSC->getSubExpr()->getSourceRange();
+ return CSC->getLParenLoc();
+ }
+ case Expr::CXXFunctionalCastExprClass: {
+ const CXXFunctionalCastExpr *CE = cast <CXXFunctionalCastExpr>(S);
+ R1 = CE->getSubExpr()->getSourceRange();
+ return CE->getLocStart();
+ }
+ case Stmt::CXXTryStmtClass: {
+ return cast<CXXTryStmt>(S)->getHandler(0)->getCatchLoc();
+ }
+ case Expr::ObjCBridgedCastExprClass: {
+ const ObjCBridgedCastExpr *CSC = cast<ObjCBridgedCastExpr>(S);
+ R1 = CSC->getSubExpr()->getSourceRange();
+ return CSC->getLParenLoc();
+ }
+ default: ;
+ }
+ R1 = S->getSourceRange();
+ return S->getLocStart();
+}
+
+void DeadCodeScan::reportDeadCode(const CFGBlock *B,
+ const Stmt *S,
+ clang::reachable_code::Callback &CB) {
+ // Classify the unreachable code found, or suppress it in some cases.
+ reachable_code::UnreachableKind UK = reachable_code::UK_Other;
+
+ if (isa<BreakStmt>(S)) {
+ UK = reachable_code::UK_Break;
+ }
+ else if (isTrivialDoWhile(B, S)) {
+ return;
+ }
+ else if (isDeadReturn(B, S)) {
+ UK = reachable_code::UK_Return;
+ }
+
+ SourceRange SilenceableCondVal;
+
+ if (UK == reachable_code::UK_Other) {
+ // Check if the dead code is part of the "loop target" of
+ // a for/for-range loop. This is the block that contains
+ // the increment code.
+ if (const Stmt *LoopTarget = B->getLoopTarget()) {
+ SourceLocation Loc = LoopTarget->getLocStart();
+ SourceRange R1(Loc, Loc), R2;
+
+ if (const ForStmt *FS = dyn_cast<ForStmt>(LoopTarget)) {
+ const Expr *Inc = FS->getInc();
+ Loc = Inc->getLocStart();
+ R2 = Inc->getSourceRange();
+ }
+
+ CB.HandleUnreachable(reachable_code::UK_Loop_Increment,
+ Loc, SourceRange(), SourceRange(Loc, Loc), R2);
+ return;
+ }
+
+ // Check if the dead block has a predecessor whose branch has
+ // a configuration value that *could* be modified to
+ // silence the warning.
+ CFGBlock::const_pred_iterator PI = B->pred_begin();
+ if (PI != B->pred_end()) {
+ if (const CFGBlock *PredBlock = PI->getPossiblyUnreachableBlock()) {
+ const Stmt *TermCond =
+ PredBlock->getTerminatorCondition(/* strip parens */ false);
+ isConfigurationValue(TermCond, PP, &SilenceableCondVal);
+ }
+ }
+ }
+
+ SourceRange R1, R2;
+ SourceLocation Loc = GetUnreachableLoc(S, R1, R2);
+ CB.HandleUnreachable(UK, Loc, SilenceableCondVal, R1, R2);
+}
+
+//===----------------------------------------------------------------------===//
+// Reachability APIs.
+//===----------------------------------------------------------------------===//
+
+namespace clang { namespace reachable_code {
+
+void Callback::anchor() { }
+
+unsigned ScanReachableFromBlock(const CFGBlock *Start,
+ llvm::BitVector &Reachable) {
+ return scanFromBlock(Start, Reachable, /* SourceManager* */ nullptr, false);
+}
+
+void FindUnreachableCode(AnalysisDeclContext &AC, Preprocessor &PP,
+ Callback &CB) {
+
+ CFG *cfg = AC.getCFG();
+ if (!cfg)
+ return;
+
+ // Scan for reachable blocks from the entrance of the CFG.
+ // If there are no unreachable blocks, we're done.
+ llvm::BitVector reachable(cfg->getNumBlockIDs());
+ unsigned numReachable =
+ scanMaybeReachableFromBlock(&cfg->getEntry(), PP, reachable);
+ if (numReachable == cfg->getNumBlockIDs())
+ return;
+
+ // If there aren't explicit EH edges, we should include the 'try' dispatch
+ // blocks as roots.
+ if (!AC.getCFGBuildOptions().AddEHEdges) {
+ for (CFG::try_block_iterator I = cfg->try_blocks_begin(),
+ E = cfg->try_blocks_end() ; I != E; ++I) {
+ numReachable += scanMaybeReachableFromBlock(*I, PP, reachable);
+ }
+ if (numReachable == cfg->getNumBlockIDs())
+ return;
+ }
+
+ // There are some unreachable blocks. We need to find the root blocks that
+ // contain code that should be considered unreachable.
+ for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) {
+ const CFGBlock *block = *I;
+ // A block may have been marked reachable during this loop.
+ if (reachable[block->getBlockID()])
+ continue;
+
+ DeadCodeScan DS(reachable, PP);
+ numReachable += DS.scanBackwards(block, CB);
+
+ if (numReachable == cfg->getNumBlockIDs())
+ return;
+ }
+}
+
+}} // end namespace clang::reachable_code
diff --git a/contrib/llvm/tools/clang/lib/Analysis/ScanfFormatString.cpp b/contrib/llvm/tools/clang/lib/Analysis/ScanfFormatString.cpp
new file mode 100644
index 0000000..d484d8e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/ScanfFormatString.cpp
@@ -0,0 +1,553 @@
+//= ScanfFormatString.cpp - Analysis of printf format strings --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Handling of format string in scanf and friends. The structure of format
+// strings for fscanf() are described in C99 7.19.6.2.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/Analyses/FormatString.h"
+#include "FormatStringParsing.h"
+#include "clang/Basic/TargetInfo.h"
+
+using clang::analyze_format_string::ArgType;
+using clang::analyze_format_string::FormatStringHandler;
+using clang::analyze_format_string::LengthModifier;
+using clang::analyze_format_string::OptionalAmount;
+using clang::analyze_format_string::ConversionSpecifier;
+using clang::analyze_scanf::ScanfConversionSpecifier;
+using clang::analyze_scanf::ScanfSpecifier;
+using clang::UpdateOnReturn;
+using namespace clang;
+
+typedef clang::analyze_format_string::SpecifierResult<ScanfSpecifier>
+ ScanfSpecifierResult;
+
+static bool ParseScanList(FormatStringHandler &H,
+ ScanfConversionSpecifier &CS,
+ const char *&Beg, const char *E) {
+ const char *I = Beg;
+ const char *start = I - 1;
+ UpdateOnReturn <const char*> UpdateBeg(Beg, I);
+
+ // No more characters?
+ if (I == E) {
+ H.HandleIncompleteScanList(start, I);
+ return true;
+ }
+
+ // Special case: ']' is the first character.
+ if (*I == ']') {
+ if (++I == E) {
+ H.HandleIncompleteScanList(start, I - 1);
+ return true;
+ }
+ }
+
+ // Special case: "^]" are the first characters.
+ if (I + 1 != E && I[0] == '^' && I[1] == ']') {
+ I += 2;
+ if (I == E) {
+ H.HandleIncompleteScanList(start, I - 1);
+ return true;
+ }
+ }
+
+ // Look for a ']' character which denotes the end of the scan list.
+ while (*I != ']') {
+ if (++I == E) {
+ H.HandleIncompleteScanList(start, I - 1);
+ return true;
+ }
+ }
+
+ CS.setEndScanList(I);
+ return false;
+}
+
+// FIXME: Much of this is copy-paste from ParsePrintfSpecifier.
+// We can possibly refactor.
+static ScanfSpecifierResult ParseScanfSpecifier(FormatStringHandler &H,
+ const char *&Beg,
+ const char *E,
+ unsigned &argIndex,
+ const LangOptions &LO,
+ const TargetInfo &Target) {
+
+ using namespace clang::analyze_scanf;
+ const char *I = Beg;
+ const char *Start = nullptr;
+ UpdateOnReturn <const char*> UpdateBeg(Beg, I);
+
+ // Look for a '%' character that indicates the start of a format specifier.
+ for ( ; I != E ; ++I) {
+ char c = *I;
+ if (c == '\0') {
+ // Detect spurious null characters, which are likely errors.
+ H.HandleNullChar(I);
+ return true;
+ }
+ if (c == '%') {
+ Start = I++; // Record the start of the format specifier.
+ break;
+ }
+ }
+
+ // No format specifier found?
+ if (!Start)
+ return false;
+
+ if (I == E) {
+ // No more characters left?
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+
+ ScanfSpecifier FS;
+ if (ParseArgPosition(H, FS, Start, I, E))
+ return true;
+
+ if (I == E) {
+ // No more characters left?
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+
+ // Look for '*' flag if it is present.
+ if (*I == '*') {
+ FS.setSuppressAssignment(I);
+ if (++I == E) {
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+ }
+
+ // Look for the field width (if any). Unlike printf, this is either
+ // a fixed integer or isn't present.
+ const OptionalAmount &Amt = clang::analyze_format_string::ParseAmount(I, E);
+ if (Amt.getHowSpecified() != OptionalAmount::NotSpecified) {
+ assert(Amt.getHowSpecified() == OptionalAmount::Constant);
+ FS.setFieldWidth(Amt);
+
+ if (I == E) {
+ // No more characters left?
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+ }
+
+ // Look for the length modifier.
+ if (ParseLengthModifier(FS, I, E, LO, /*scanf=*/true) && I == E) {
+ // No more characters left?
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+
+ // Detect spurious null characters, which are likely errors.
+ if (*I == '\0') {
+ H.HandleNullChar(I);
+ return true;
+ }
+
+ // Finally, look for the conversion specifier.
+ const char *conversionPosition = I++;
+ ScanfConversionSpecifier::Kind k = ScanfConversionSpecifier::InvalidSpecifier;
+ switch (*conversionPosition) {
+ default:
+ break;
+ case '%': k = ConversionSpecifier::PercentArg; break;
+ case 'A': k = ConversionSpecifier::AArg; break;
+ case 'E': k = ConversionSpecifier::EArg; break;
+ case 'F': k = ConversionSpecifier::FArg; break;
+ case 'G': k = ConversionSpecifier::GArg; break;
+ case 'X': k = ConversionSpecifier::XArg; break;
+ case 'a': k = ConversionSpecifier::aArg; break;
+ case 'd': k = ConversionSpecifier::dArg; break;
+ case 'e': k = ConversionSpecifier::eArg; break;
+ case 'f': k = ConversionSpecifier::fArg; break;
+ case 'g': k = ConversionSpecifier::gArg; break;
+ case 'i': k = ConversionSpecifier::iArg; break;
+ case 'n': k = ConversionSpecifier::nArg; break;
+ case 'c': k = ConversionSpecifier::cArg; break;
+ case 'C': k = ConversionSpecifier::CArg; break;
+ case 'S': k = ConversionSpecifier::SArg; break;
+ case '[': k = ConversionSpecifier::ScanListArg; break;
+ case 'u': k = ConversionSpecifier::uArg; break;
+ case 'x': k = ConversionSpecifier::xArg; break;
+ case 'o': k = ConversionSpecifier::oArg; break;
+ case 's': k = ConversionSpecifier::sArg; break;
+ case 'p': k = ConversionSpecifier::pArg; break;
+ // Apple extensions
+ // Apple-specific
+ case 'D':
+ if (Target.getTriple().isOSDarwin())
+ k = ConversionSpecifier::DArg;
+ break;
+ case 'O':
+ if (Target.getTriple().isOSDarwin())
+ k = ConversionSpecifier::OArg;
+ break;
+ case 'U':
+ if (Target.getTriple().isOSDarwin())
+ k = ConversionSpecifier::UArg;
+ break;
+ }
+ ScanfConversionSpecifier CS(conversionPosition, k);
+ if (k == ScanfConversionSpecifier::ScanListArg) {
+ if (ParseScanList(H, CS, I, E))
+ return true;
+ }
+ FS.setConversionSpecifier(CS);
+ if (CS.consumesDataArgument() && !FS.getSuppressAssignment()
+ && !FS.usesPositionalArg())
+ FS.setArgIndex(argIndex++);
+
+ // FIXME: '%' and '*' doesn't make sense. Issue a warning.
+ // FIXME: 'ConsumedSoFar' and '*' doesn't make sense.
+
+ if (k == ScanfConversionSpecifier::InvalidSpecifier) {
+ // Assume the conversion takes one argument.
+ return !H.HandleInvalidScanfConversionSpecifier(FS, Beg, I - Beg);
+ }
+ return ScanfSpecifierResult(Start, FS);
+}
+
+ArgType ScanfSpecifier::getArgType(ASTContext &Ctx) const {
+ const ScanfConversionSpecifier &CS = getConversionSpecifier();
+
+ if (!CS.consumesDataArgument())
+ return ArgType::Invalid();
+
+ switch(CS.getKind()) {
+ // Signed int.
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::DArg:
+ case ConversionSpecifier::iArg:
+ switch (LM.getKind()) {
+ case LengthModifier::None:
+ return ArgType::PtrTo(Ctx.IntTy);
+ case LengthModifier::AsChar:
+ return ArgType::PtrTo(ArgType::AnyCharTy);
+ case LengthModifier::AsShort:
+ return ArgType::PtrTo(Ctx.ShortTy);
+ case LengthModifier::AsLong:
+ return ArgType::PtrTo(Ctx.LongTy);
+ case LengthModifier::AsLongLong:
+ case LengthModifier::AsQuad:
+ return ArgType::PtrTo(Ctx.LongLongTy);
+ case LengthModifier::AsInt64:
+ return ArgType::PtrTo(ArgType(Ctx.LongLongTy, "__int64"));
+ case LengthModifier::AsIntMax:
+ return ArgType::PtrTo(ArgType(Ctx.getIntMaxType(), "intmax_t"));
+ case LengthModifier::AsSizeT:
+ // FIXME: ssize_t.
+ return ArgType();
+ case LengthModifier::AsPtrDiff:
+ return ArgType::PtrTo(ArgType(Ctx.getPointerDiffType(), "ptrdiff_t"));
+ case LengthModifier::AsLongDouble:
+ // GNU extension.
+ return ArgType::PtrTo(Ctx.LongLongTy);
+ case LengthModifier::AsAllocate:
+ case LengthModifier::AsMAllocate:
+ case LengthModifier::AsInt32:
+ case LengthModifier::AsInt3264:
+ case LengthModifier::AsWide:
+ return ArgType::Invalid();
+ }
+
+ // Unsigned int.
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::OArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::UArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ switch (LM.getKind()) {
+ case LengthModifier::None:
+ return ArgType::PtrTo(Ctx.UnsignedIntTy);
+ case LengthModifier::AsChar:
+ return ArgType::PtrTo(Ctx.UnsignedCharTy);
+ case LengthModifier::AsShort:
+ return ArgType::PtrTo(Ctx.UnsignedShortTy);
+ case LengthModifier::AsLong:
+ return ArgType::PtrTo(Ctx.UnsignedLongTy);
+ case LengthModifier::AsLongLong:
+ case LengthModifier::AsQuad:
+ return ArgType::PtrTo(Ctx.UnsignedLongLongTy);
+ case LengthModifier::AsInt64:
+ return ArgType::PtrTo(ArgType(Ctx.UnsignedLongLongTy, "unsigned __int64"));
+ case LengthModifier::AsIntMax:
+ return ArgType::PtrTo(ArgType(Ctx.getUIntMaxType(), "uintmax_t"));
+ case LengthModifier::AsSizeT:
+ return ArgType::PtrTo(ArgType(Ctx.getSizeType(), "size_t"));
+ case LengthModifier::AsPtrDiff:
+ // FIXME: Unsigned version of ptrdiff_t?
+ return ArgType();
+ case LengthModifier::AsLongDouble:
+ // GNU extension.
+ return ArgType::PtrTo(Ctx.UnsignedLongLongTy);
+ case LengthModifier::AsAllocate:
+ case LengthModifier::AsMAllocate:
+ case LengthModifier::AsInt32:
+ case LengthModifier::AsInt3264:
+ case LengthModifier::AsWide:
+ return ArgType::Invalid();
+ }
+
+ // Float.
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ switch (LM.getKind()) {
+ case LengthModifier::None:
+ return ArgType::PtrTo(Ctx.FloatTy);
+ case LengthModifier::AsLong:
+ return ArgType::PtrTo(Ctx.DoubleTy);
+ case LengthModifier::AsLongDouble:
+ return ArgType::PtrTo(Ctx.LongDoubleTy);
+ default:
+ return ArgType::Invalid();
+ }
+
+ // Char, string and scanlist.
+ case ConversionSpecifier::cArg:
+ case ConversionSpecifier::sArg:
+ case ConversionSpecifier::ScanListArg:
+ switch (LM.getKind()) {
+ case LengthModifier::None:
+ return ArgType::PtrTo(ArgType::AnyCharTy);
+ case LengthModifier::AsLong:
+ case LengthModifier::AsWide:
+ return ArgType::PtrTo(ArgType(Ctx.getWideCharType(), "wchar_t"));
+ case LengthModifier::AsAllocate:
+ case LengthModifier::AsMAllocate:
+ return ArgType::PtrTo(ArgType::CStrTy);
+ case LengthModifier::AsShort:
+ if (Ctx.getTargetInfo().getTriple().isOSMSVCRT())
+ return ArgType::PtrTo(ArgType::AnyCharTy);
+ default:
+ return ArgType::Invalid();
+ }
+ case ConversionSpecifier::CArg:
+ case ConversionSpecifier::SArg:
+ // FIXME: Mac OS X specific?
+ switch (LM.getKind()) {
+ case LengthModifier::None:
+ case LengthModifier::AsWide:
+ return ArgType::PtrTo(ArgType(Ctx.getWideCharType(), "wchar_t"));
+ case LengthModifier::AsAllocate:
+ case LengthModifier::AsMAllocate:
+ return ArgType::PtrTo(ArgType(ArgType::WCStrTy, "wchar_t *"));
+ case LengthModifier::AsShort:
+ if (Ctx.getTargetInfo().getTriple().isOSMSVCRT())
+ return ArgType::PtrTo(ArgType::AnyCharTy);
+ default:
+ return ArgType::Invalid();
+ }
+
+ // Pointer.
+ case ConversionSpecifier::pArg:
+ return ArgType::PtrTo(ArgType::CPointerTy);
+
+ // Write-back.
+ case ConversionSpecifier::nArg:
+ switch (LM.getKind()) {
+ case LengthModifier::None:
+ return ArgType::PtrTo(Ctx.IntTy);
+ case LengthModifier::AsChar:
+ return ArgType::PtrTo(Ctx.SignedCharTy);
+ case LengthModifier::AsShort:
+ return ArgType::PtrTo(Ctx.ShortTy);
+ case LengthModifier::AsLong:
+ return ArgType::PtrTo(Ctx.LongTy);
+ case LengthModifier::AsLongLong:
+ case LengthModifier::AsQuad:
+ return ArgType::PtrTo(Ctx.LongLongTy);
+ case LengthModifier::AsInt64:
+ return ArgType::PtrTo(ArgType(Ctx.LongLongTy, "__int64"));
+ case LengthModifier::AsIntMax:
+ return ArgType::PtrTo(ArgType(Ctx.getIntMaxType(), "intmax_t"));
+ case LengthModifier::AsSizeT:
+ return ArgType(); // FIXME: ssize_t
+ case LengthModifier::AsPtrDiff:
+ return ArgType::PtrTo(ArgType(Ctx.getPointerDiffType(), "ptrdiff_t"));
+ case LengthModifier::AsLongDouble:
+ return ArgType(); // FIXME: Is this a known extension?
+ case LengthModifier::AsAllocate:
+ case LengthModifier::AsMAllocate:
+ case LengthModifier::AsInt32:
+ case LengthModifier::AsInt3264:
+ case LengthModifier::AsWide:
+ return ArgType::Invalid();
+ }
+
+ default:
+ break;
+ }
+
+ return ArgType();
+}
+
+bool ScanfSpecifier::fixType(QualType QT, QualType RawQT,
+ const LangOptions &LangOpt,
+ ASTContext &Ctx) {
+
+ // %n is different from other conversion specifiers; don't try to fix it.
+ if (CS.getKind() == ConversionSpecifier::nArg)
+ return false;
+
+ if (!QT->isPointerType())
+ return false;
+
+ QualType PT = QT->getPointeeType();
+
+ // If it's an enum, get its underlying type.
+ if (const EnumType *ETy = PT->getAs<EnumType>())
+ PT = ETy->getDecl()->getIntegerType();
+
+ const BuiltinType *BT = PT->getAs<BuiltinType>();
+ if (!BT)
+ return false;
+
+ // Pointer to a character.
+ if (PT->isAnyCharacterType()) {
+ CS.setKind(ConversionSpecifier::sArg);
+ if (PT->isWideCharType())
+ LM.setKind(LengthModifier::AsWideChar);
+ else
+ LM.setKind(LengthModifier::None);
+
+ // If we know the target array length, we can use it as a field width.
+ if (const ConstantArrayType *CAT = Ctx.getAsConstantArrayType(RawQT)) {
+ if (CAT->getSizeModifier() == ArrayType::Normal)
+ FieldWidth = OptionalAmount(OptionalAmount::Constant,
+ CAT->getSize().getZExtValue() - 1,
+ "", 0, false);
+
+ }
+ return true;
+ }
+
+ // Figure out the length modifier.
+ switch (BT->getKind()) {
+ // no modifier
+ case BuiltinType::UInt:
+ case BuiltinType::Int:
+ case BuiltinType::Float:
+ LM.setKind(LengthModifier::None);
+ break;
+
+ // hh
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ LM.setKind(LengthModifier::AsChar);
+ break;
+
+ // h
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ LM.setKind(LengthModifier::AsShort);
+ break;
+
+ // l
+ case BuiltinType::Long:
+ case BuiltinType::ULong:
+ case BuiltinType::Double:
+ LM.setKind(LengthModifier::AsLong);
+ break;
+
+ // ll
+ case BuiltinType::LongLong:
+ case BuiltinType::ULongLong:
+ LM.setKind(LengthModifier::AsLongLong);
+ break;
+
+ // L
+ case BuiltinType::LongDouble:
+ LM.setKind(LengthModifier::AsLongDouble);
+ break;
+
+ // Don't know.
+ default:
+ return false;
+ }
+
+ // Handle size_t, ptrdiff_t, etc. that have dedicated length modifiers in C99.
+ if (isa<TypedefType>(PT) && (LangOpt.C99 || LangOpt.CPlusPlus11))
+ namedTypeToLengthModifier(PT, LM);
+
+ // If fixing the length modifier was enough, we are done.
+ if (hasValidLengthModifier(Ctx.getTargetInfo())) {
+ const analyze_scanf::ArgType &AT = getArgType(Ctx);
+ if (AT.isValid() && AT.matchesType(Ctx, QT))
+ return true;
+ }
+
+ // Figure out the conversion specifier.
+ if (PT->isRealFloatingType())
+ CS.setKind(ConversionSpecifier::fArg);
+ else if (PT->isSignedIntegerType())
+ CS.setKind(ConversionSpecifier::dArg);
+ else if (PT->isUnsignedIntegerType())
+ CS.setKind(ConversionSpecifier::uArg);
+ else
+ llvm_unreachable("Unexpected type");
+
+ return true;
+}
+
+void ScanfSpecifier::toString(raw_ostream &os) const {
+ os << "%";
+
+ if (usesPositionalArg())
+ os << getPositionalArgIndex() << "$";
+ if (SuppressAssignment)
+ os << "*";
+
+ FieldWidth.toString(os);
+ os << LM.toString();
+ os << CS.toString();
+}
+
+bool clang::analyze_format_string::ParseScanfString(FormatStringHandler &H,
+ const char *I,
+ const char *E,
+ const LangOptions &LO,
+ const TargetInfo &Target) {
+
+ unsigned argIndex = 0;
+
+ // Keep looking for a format specifier until we have exhausted the string.
+ while (I != E) {
+ const ScanfSpecifierResult &FSR = ParseScanfSpecifier(H, I, E, argIndex,
+ LO, Target);
+ // Did a fail-stop error of any kind occur when parsing the specifier?
+ // If so, don't do any more processing.
+ if (FSR.shouldStop())
+ return true;
+ // Did we exhaust the string or encounter an error that
+ // we can recover from?
+ if (!FSR.hasValue())
+ continue;
+ // We have a format specifier. Pass it to the callback.
+ if (!H.HandleScanfSpecifier(FSR.getValue(), FSR.getStart(),
+ I - FSR.getStart())) {
+ return true;
+ }
+ }
+ assert(I == E && "Format string not exhausted");
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp b/contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp
new file mode 100644
index 0000000..b282a5b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp
@@ -0,0 +1,2406 @@
+//===- ThreadSafety.cpp ----------------------------------------*- C++ --*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A intra-procedural analysis for thread safety (e.g. deadlocks and race
+// conditions), based off of an annotation system.
+//
+// See http://clang.llvm.org/docs/ThreadSafetyAnalysis.html
+// for more information.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Analysis/Analyses/PostOrderCFGView.h"
+#include "clang/Analysis/Analyses/ThreadSafety.h"
+#include "clang/Analysis/Analyses/ThreadSafetyCommon.h"
+#include "clang/Analysis/Analyses/ThreadSafetyLogical.h"
+#include "clang/Analysis/Analyses/ThreadSafetyTIL.h"
+#include "clang/Analysis/Analyses/ThreadSafetyTraverse.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/CFGStmtMap.h"
+#include "clang/Basic/OperatorKinds.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/ImmutableMap.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <ostream>
+#include <sstream>
+#include <utility>
+#include <vector>
+using namespace clang;
+using namespace threadSafety;
+
+// Key method definition
+ThreadSafetyHandler::~ThreadSafetyHandler() {}
+
+namespace {
+class TILPrinter :
+ public til::PrettyPrinter<TILPrinter, llvm::raw_ostream> {};
+
+
+/// Issue a warning about an invalid lock expression
+static void warnInvalidLock(ThreadSafetyHandler &Handler,
+ const Expr *MutexExp, const NamedDecl *D,
+ const Expr *DeclExp, StringRef Kind) {
+ SourceLocation Loc;
+ if (DeclExp)
+ Loc = DeclExp->getExprLoc();
+
+ // FIXME: add a note about the attribute location in MutexExp or D
+ if (Loc.isValid())
+ Handler.handleInvalidLockExp(Kind, Loc);
+}
+
+/// \brief A set of CapabilityInfo objects, which are compiled from the
+/// requires attributes on a function.
+class CapExprSet : public SmallVector<CapabilityExpr, 4> {
+public:
+ /// \brief Push M onto list, but discard duplicates.
+ void push_back_nodup(const CapabilityExpr &CapE) {
+ iterator It = std::find_if(begin(), end(),
+ [=](const CapabilityExpr &CapE2) {
+ return CapE.equals(CapE2);
+ });
+ if (It == end())
+ push_back(CapE);
+ }
+};
+
+class FactManager;
+class FactSet;
+
+/// \brief This is a helper class that stores a fact that is known at a
+/// particular point in program execution. Currently, a fact is a capability,
+/// along with additional information, such as where it was acquired, whether
+/// it is exclusive or shared, etc.
+///
+/// FIXME: this analysis does not currently support either re-entrant
+/// locking or lock "upgrading" and "downgrading" between exclusive and
+/// shared.
+class FactEntry : public CapabilityExpr {
+private:
+ LockKind LKind; ///< exclusive or shared
+ SourceLocation AcquireLoc; ///< where it was acquired.
+ bool Asserted; ///< true if the lock was asserted
+ bool Declared; ///< true if the lock was declared
+
+public:
+ FactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
+ bool Asrt, bool Declrd = false)
+ : CapabilityExpr(CE), LKind(LK), AcquireLoc(Loc), Asserted(Asrt),
+ Declared(Declrd) {}
+
+ virtual ~FactEntry() {}
+
+ LockKind kind() const { return LKind; }
+ SourceLocation loc() const { return AcquireLoc; }
+ bool asserted() const { return Asserted; }
+ bool declared() const { return Declared; }
+
+ void setDeclared(bool D) { Declared = D; }
+
+ virtual void
+ handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
+ SourceLocation JoinLoc, LockErrorKind LEK,
+ ThreadSafetyHandler &Handler) const = 0;
+ virtual void handleUnlock(FactSet &FSet, FactManager &FactMan,
+ const CapabilityExpr &Cp, SourceLocation UnlockLoc,
+ bool FullyRemove, ThreadSafetyHandler &Handler,
+ StringRef DiagKind) const = 0;
+
+ // Return true if LKind >= LK, where exclusive > shared
+ bool isAtLeast(LockKind LK) {
+ return (LKind == LK_Exclusive) || (LK == LK_Shared);
+ }
+};
+
+
+typedef unsigned short FactID;
+
+/// \brief FactManager manages the memory for all facts that are created during
+/// the analysis of a single routine.
+class FactManager {
+private:
+ std::vector<std::unique_ptr<FactEntry>> Facts;
+
+public:
+ FactID newFact(std::unique_ptr<FactEntry> Entry) {
+ Facts.push_back(std::move(Entry));
+ return static_cast<unsigned short>(Facts.size() - 1);
+ }
+
+ const FactEntry &operator[](FactID F) const { return *Facts[F]; }
+ FactEntry &operator[](FactID F) { return *Facts[F]; }
+};
+
+
+/// \brief A FactSet is the set of facts that are known to be true at a
+/// particular program point. FactSets must be small, because they are
+/// frequently copied, and are thus implemented as a set of indices into a
+/// table maintained by a FactManager. A typical FactSet only holds 1 or 2
+/// locks, so we can get away with doing a linear search for lookup. Note
+/// that a hashtable or map is inappropriate in this case, because lookups
+/// may involve partial pattern matches, rather than exact matches.
+class FactSet {
+private:
+ typedef SmallVector<FactID, 4> FactVec;
+
+ FactVec FactIDs;
+
+public:
+ typedef FactVec::iterator iterator;
+ typedef FactVec::const_iterator const_iterator;
+
+ iterator begin() { return FactIDs.begin(); }
+ const_iterator begin() const { return FactIDs.begin(); }
+
+ iterator end() { return FactIDs.end(); }
+ const_iterator end() const { return FactIDs.end(); }
+
+ bool isEmpty() const { return FactIDs.size() == 0; }
+
+ // Return true if the set contains only negative facts
+ bool isEmpty(FactManager &FactMan) const {
+ for (FactID FID : *this) {
+ if (!FactMan[FID].negative())
+ return false;
+ }
+ return true;
+ }
+
+ void addLockByID(FactID ID) { FactIDs.push_back(ID); }
+
+ FactID addLock(FactManager &FM, std::unique_ptr<FactEntry> Entry) {
+ FactID F = FM.newFact(std::move(Entry));
+ FactIDs.push_back(F);
+ return F;
+ }
+
+ bool removeLock(FactManager& FM, const CapabilityExpr &CapE) {
+ unsigned n = FactIDs.size();
+ if (n == 0)
+ return false;
+
+ for (unsigned i = 0; i < n-1; ++i) {
+ if (FM[FactIDs[i]].matches(CapE)) {
+ FactIDs[i] = FactIDs[n-1];
+ FactIDs.pop_back();
+ return true;
+ }
+ }
+ if (FM[FactIDs[n-1]].matches(CapE)) {
+ FactIDs.pop_back();
+ return true;
+ }
+ return false;
+ }
+
+ iterator findLockIter(FactManager &FM, const CapabilityExpr &CapE) {
+ return std::find_if(begin(), end(), [&](FactID ID) {
+ return FM[ID].matches(CapE);
+ });
+ }
+
+ FactEntry *findLock(FactManager &FM, const CapabilityExpr &CapE) const {
+ auto I = std::find_if(begin(), end(), [&](FactID ID) {
+ return FM[ID].matches(CapE);
+ });
+ return I != end() ? &FM[*I] : nullptr;
+ }
+
+ FactEntry *findLockUniv(FactManager &FM, const CapabilityExpr &CapE) const {
+ auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool {
+ return FM[ID].matchesUniv(CapE);
+ });
+ return I != end() ? &FM[*I] : nullptr;
+ }
+
+ FactEntry *findPartialMatch(FactManager &FM,
+ const CapabilityExpr &CapE) const {
+ auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool {
+ return FM[ID].partiallyMatches(CapE);
+ });
+ return I != end() ? &FM[*I] : nullptr;
+ }
+
+ bool containsMutexDecl(FactManager &FM, const ValueDecl* Vd) const {
+ auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool {
+ return FM[ID].valueDecl() == Vd;
+ });
+ return I != end();
+ }
+};
+
+class ThreadSafetyAnalyzer;
+} // namespace
+
+namespace clang {
+namespace threadSafety {
+class BeforeSet {
+private:
+ typedef SmallVector<const ValueDecl*, 4> BeforeVect;
+
+ struct BeforeInfo {
+ BeforeInfo() : Visited(0) {}
+ BeforeInfo(BeforeInfo &&O) : Vect(std::move(O.Vect)), Visited(O.Visited) {}
+
+ BeforeVect Vect;
+ int Visited;
+ };
+
+ typedef llvm::DenseMap<const ValueDecl *, std::unique_ptr<BeforeInfo>>
+ BeforeMap;
+ typedef llvm::DenseMap<const ValueDecl*, bool> CycleMap;
+
+public:
+ BeforeSet() { }
+
+ BeforeInfo* insertAttrExprs(const ValueDecl* Vd,
+ ThreadSafetyAnalyzer& Analyzer);
+
+ BeforeInfo *getBeforeInfoForDecl(const ValueDecl *Vd,
+ ThreadSafetyAnalyzer &Analyzer);
+
+ void checkBeforeAfter(const ValueDecl* Vd,
+ const FactSet& FSet,
+ ThreadSafetyAnalyzer& Analyzer,
+ SourceLocation Loc, StringRef CapKind);
+
+private:
+ BeforeMap BMap;
+ CycleMap CycMap;
+};
+} // end namespace threadSafety
+} // end namespace clang
+
+namespace {
+typedef llvm::ImmutableMap<const NamedDecl*, unsigned> LocalVarContext;
+class LocalVariableMap;
+
+/// A side (entry or exit) of a CFG node.
+enum CFGBlockSide { CBS_Entry, CBS_Exit };
+
+/// CFGBlockInfo is a struct which contains all the information that is
+/// maintained for each block in the CFG. See LocalVariableMap for more
+/// information about the contexts.
+struct CFGBlockInfo {
+ FactSet EntrySet; // Lockset held at entry to block
+ FactSet ExitSet; // Lockset held at exit from block
+ LocalVarContext EntryContext; // Context held at entry to block
+ LocalVarContext ExitContext; // Context held at exit from block
+ SourceLocation EntryLoc; // Location of first statement in block
+ SourceLocation ExitLoc; // Location of last statement in block.
+ unsigned EntryIndex; // Used to replay contexts later
+ bool Reachable; // Is this block reachable?
+
+ const FactSet &getSet(CFGBlockSide Side) const {
+ return Side == CBS_Entry ? EntrySet : ExitSet;
+ }
+ SourceLocation getLocation(CFGBlockSide Side) const {
+ return Side == CBS_Entry ? EntryLoc : ExitLoc;
+ }
+
+private:
+ CFGBlockInfo(LocalVarContext EmptyCtx)
+ : EntryContext(EmptyCtx), ExitContext(EmptyCtx), Reachable(false)
+ { }
+
+public:
+ static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M);
+};
+
+
+
+// A LocalVariableMap maintains a map from local variables to their currently
+// valid definitions. It provides SSA-like functionality when traversing the
+// CFG. Like SSA, each definition or assignment to a variable is assigned a
+// unique name (an integer), which acts as the SSA name for that definition.
+// The total set of names is shared among all CFG basic blocks.
+// Unlike SSA, we do not rewrite expressions to replace local variables declrefs
+// with their SSA-names. Instead, we compute a Context for each point in the
+// code, which maps local variables to the appropriate SSA-name. This map
+// changes with each assignment.
+//
+// The map is computed in a single pass over the CFG. Subsequent analyses can
+// then query the map to find the appropriate Context for a statement, and use
+// that Context to look up the definitions of variables.
+class LocalVariableMap {
+public:
+ typedef LocalVarContext Context;
+
+ /// A VarDefinition consists of an expression, representing the value of the
+ /// variable, along with the context in which that expression should be
+ /// interpreted. A reference VarDefinition does not itself contain this
+ /// information, but instead contains a pointer to a previous VarDefinition.
+ struct VarDefinition {
+ public:
+ friend class LocalVariableMap;
+
+ const NamedDecl *Dec; // The original declaration for this variable.
+ const Expr *Exp; // The expression for this variable, OR
+ unsigned Ref; // Reference to another VarDefinition
+ Context Ctx; // The map with which Exp should be interpreted.
+
+ bool isReference() { return !Exp; }
+
+ private:
+ // Create ordinary variable definition
+ VarDefinition(const NamedDecl *D, const Expr *E, Context C)
+ : Dec(D), Exp(E), Ref(0), Ctx(C)
+ { }
+
+ // Create reference to previous definition
+ VarDefinition(const NamedDecl *D, unsigned R, Context C)
+ : Dec(D), Exp(nullptr), Ref(R), Ctx(C)
+ { }
+ };
+
+private:
+ Context::Factory ContextFactory;
+ std::vector<VarDefinition> VarDefinitions;
+ std::vector<unsigned> CtxIndices;
+ std::vector<std::pair<Stmt*, Context> > SavedContexts;
+
+public:
+ LocalVariableMap() {
+ // index 0 is a placeholder for undefined variables (aka phi-nodes).
+ VarDefinitions.push_back(VarDefinition(nullptr, 0u, getEmptyContext()));
+ }
+
+ /// Look up a definition, within the given context.
+ const VarDefinition* lookup(const NamedDecl *D, Context Ctx) {
+ const unsigned *i = Ctx.lookup(D);
+ if (!i)
+ return nullptr;
+ assert(*i < VarDefinitions.size());
+ return &VarDefinitions[*i];
+ }
+
+ /// Look up the definition for D within the given context. Returns
+ /// NULL if the expression is not statically known. If successful, also
+ /// modifies Ctx to hold the context of the return Expr.
+ const Expr* lookupExpr(const NamedDecl *D, Context &Ctx) {
+ const unsigned *P = Ctx.lookup(D);
+ if (!P)
+ return nullptr;
+
+ unsigned i = *P;
+ while (i > 0) {
+ if (VarDefinitions[i].Exp) {
+ Ctx = VarDefinitions[i].Ctx;
+ return VarDefinitions[i].Exp;
+ }
+ i = VarDefinitions[i].Ref;
+ }
+ return nullptr;
+ }
+
+ Context getEmptyContext() { return ContextFactory.getEmptyMap(); }
+
+ /// Return the next context after processing S. This function is used by
+ /// clients of the class to get the appropriate context when traversing the
+ /// CFG. It must be called for every assignment or DeclStmt.
+ Context getNextContext(unsigned &CtxIndex, Stmt *S, Context C) {
+ if (SavedContexts[CtxIndex+1].first == S) {
+ CtxIndex++;
+ Context Result = SavedContexts[CtxIndex].second;
+ return Result;
+ }
+ return C;
+ }
+
+ void dumpVarDefinitionName(unsigned i) {
+ if (i == 0) {
+ llvm::errs() << "Undefined";
+ return;
+ }
+ const NamedDecl *Dec = VarDefinitions[i].Dec;
+ if (!Dec) {
+ llvm::errs() << "<<NULL>>";
+ return;
+ }
+ Dec->printName(llvm::errs());
+ llvm::errs() << "." << i << " " << ((const void*) Dec);
+ }
+
+ /// Dumps an ASCII representation of the variable map to llvm::errs()
+ void dump() {
+ for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) {
+ const Expr *Exp = VarDefinitions[i].Exp;
+ unsigned Ref = VarDefinitions[i].Ref;
+
+ dumpVarDefinitionName(i);
+ llvm::errs() << " = ";
+ if (Exp) Exp->dump();
+ else {
+ dumpVarDefinitionName(Ref);
+ llvm::errs() << "\n";
+ }
+ }
+ }
+
+ /// Dumps an ASCII representation of a Context to llvm::errs()
+ void dumpContext(Context C) {
+ for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
+ const NamedDecl *D = I.getKey();
+ D->printName(llvm::errs());
+ const unsigned *i = C.lookup(D);
+ llvm::errs() << " -> ";
+ dumpVarDefinitionName(*i);
+ llvm::errs() << "\n";
+ }
+ }
+
+ /// Builds the variable map.
+ void traverseCFG(CFG *CFGraph, const PostOrderCFGView *SortedGraph,
+ std::vector<CFGBlockInfo> &BlockInfo);
+
+protected:
+ // Get the current context index
+ unsigned getContextIndex() { return SavedContexts.size()-1; }
+
+ // Save the current context for later replay
+ void saveContext(Stmt *S, Context C) {
+ SavedContexts.push_back(std::make_pair(S,C));
+ }
+
+ // Adds a new definition to the given context, and returns a new context.
+ // This method should be called when declaring a new variable.
+ Context addDefinition(const NamedDecl *D, const Expr *Exp, Context Ctx) {
+ assert(!Ctx.contains(D));
+ unsigned newID = VarDefinitions.size();
+ Context NewCtx = ContextFactory.add(Ctx, D, newID);
+ VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
+ return NewCtx;
+ }
+
+ // Add a new reference to an existing definition.
+ Context addReference(const NamedDecl *D, unsigned i, Context Ctx) {
+ unsigned newID = VarDefinitions.size();
+ Context NewCtx = ContextFactory.add(Ctx, D, newID);
+ VarDefinitions.push_back(VarDefinition(D, i, Ctx));
+ return NewCtx;
+ }
+
+ // Updates a definition only if that definition is already in the map.
+ // This method should be called when assigning to an existing variable.
+ Context updateDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) {
+ if (Ctx.contains(D)) {
+ unsigned newID = VarDefinitions.size();
+ Context NewCtx = ContextFactory.remove(Ctx, D);
+ NewCtx = ContextFactory.add(NewCtx, D, newID);
+ VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
+ return NewCtx;
+ }
+ return Ctx;
+ }
+
+ // Removes a definition from the context, but keeps the variable name
+ // as a valid variable. The index 0 is a placeholder for cleared definitions.
+ Context clearDefinition(const NamedDecl *D, Context Ctx) {
+ Context NewCtx = Ctx;
+ if (NewCtx.contains(D)) {
+ NewCtx = ContextFactory.remove(NewCtx, D);
+ NewCtx = ContextFactory.add(NewCtx, D, 0);
+ }
+ return NewCtx;
+ }
+
+ // Remove a definition entirely frmo the context.
+ Context removeDefinition(const NamedDecl *D, Context Ctx) {
+ Context NewCtx = Ctx;
+ if (NewCtx.contains(D)) {
+ NewCtx = ContextFactory.remove(NewCtx, D);
+ }
+ return NewCtx;
+ }
+
+ Context intersectContexts(Context C1, Context C2);
+ Context createReferenceContext(Context C);
+ void intersectBackEdge(Context C1, Context C2);
+
+ friend class VarMapBuilder;
+};
+
+
+// This has to be defined after LocalVariableMap.
+CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) {
+ return CFGBlockInfo(M.getEmptyContext());
+}
+
+
+/// Visitor which builds a LocalVariableMap
+class VarMapBuilder : public StmtVisitor<VarMapBuilder> {
+public:
+ LocalVariableMap* VMap;
+ LocalVariableMap::Context Ctx;
+
+ VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C)
+ : VMap(VM), Ctx(C) {}
+
+ void VisitDeclStmt(DeclStmt *S);
+ void VisitBinaryOperator(BinaryOperator *BO);
+};
+
+
+// Add new local variables to the variable map
+void VarMapBuilder::VisitDeclStmt(DeclStmt *S) {
+ bool modifiedCtx = false;
+ DeclGroupRef DGrp = S->getDeclGroup();
+ for (const auto *D : DGrp) {
+ if (const auto *VD = dyn_cast_or_null<VarDecl>(D)) {
+ const Expr *E = VD->getInit();
+
+ // Add local variables with trivial type to the variable map
+ QualType T = VD->getType();
+ if (T.isTrivialType(VD->getASTContext())) {
+ Ctx = VMap->addDefinition(VD, E, Ctx);
+ modifiedCtx = true;
+ }
+ }
+ }
+ if (modifiedCtx)
+ VMap->saveContext(S, Ctx);
+}
+
+// Update local variable definitions in variable map
+void VarMapBuilder::VisitBinaryOperator(BinaryOperator *BO) {
+ if (!BO->isAssignmentOp())
+ return;
+
+ Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
+
+ // Update the variable map and current context.
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(LHSExp)) {
+ ValueDecl *VDec = DRE->getDecl();
+ if (Ctx.lookup(VDec)) {
+ if (BO->getOpcode() == BO_Assign)
+ Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx);
+ else
+ // FIXME -- handle compound assignment operators
+ Ctx = VMap->clearDefinition(VDec, Ctx);
+ VMap->saveContext(BO, Ctx);
+ }
+ }
+}
+
+
+// Computes the intersection of two contexts. The intersection is the
+// set of variables which have the same definition in both contexts;
+// variables with different definitions are discarded.
+LocalVariableMap::Context
+LocalVariableMap::intersectContexts(Context C1, Context C2) {
+ Context Result = C1;
+ for (const auto &P : C1) {
+ const NamedDecl *Dec = P.first;
+ const unsigned *i2 = C2.lookup(Dec);
+ if (!i2) // variable doesn't exist on second path
+ Result = removeDefinition(Dec, Result);
+ else if (*i2 != P.second) // variable exists, but has different definition
+ Result = clearDefinition(Dec, Result);
+ }
+ return Result;
+}
+
+// For every variable in C, create a new variable that refers to the
+// definition in C. Return a new context that contains these new variables.
+// (We use this for a naive implementation of SSA on loop back-edges.)
+LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) {
+ Context Result = getEmptyContext();
+ for (const auto &P : C)
+ Result = addReference(P.first, P.second, Result);
+ return Result;
+}
+
+// This routine also takes the intersection of C1 and C2, but it does so by
+// altering the VarDefinitions. C1 must be the result of an earlier call to
+// createReferenceContext.
+void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
+ for (const auto &P : C1) {
+ unsigned i1 = P.second;
+ VarDefinition *VDef = &VarDefinitions[i1];
+ assert(VDef->isReference());
+
+ const unsigned *i2 = C2.lookup(P.first);
+ if (!i2 || (*i2 != i1))
+ VDef->Ref = 0; // Mark this variable as undefined
+ }
+}
+
+
+// Traverse the CFG in topological order, so all predecessors of a block
+// (excluding back-edges) are visited before the block itself. At
+// each point in the code, we calculate a Context, which holds the set of
+// variable definitions which are visible at that point in execution.
+// Visible variables are mapped to their definitions using an array that
+// contains all definitions.
+//
+// At join points in the CFG, the set is computed as the intersection of
+// the incoming sets along each edge, E.g.
+//
+// { Context | VarDefinitions }
+// int x = 0; { x -> x1 | x1 = 0 }
+// int y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
+// if (b) x = 1; { x -> x2, y -> y1 | x2 = 1, y1 = 0, ... }
+// else x = 2; { x -> x3, y -> y1 | x3 = 2, x2 = 1, ... }
+// ... { y -> y1 (x is unknown) | x3 = 2, x2 = 1, ... }
+//
+// This is essentially a simpler and more naive version of the standard SSA
+// algorithm. Those definitions that remain in the intersection are from blocks
+// that strictly dominate the current block. We do not bother to insert proper
+// phi nodes, because they are not used in our analysis; instead, wherever
+// a phi node would be required, we simply remove that definition from the
+// context (E.g. x above).
+//
+// The initial traversal does not capture back-edges, so those need to be
+// handled on a separate pass. Whenever the first pass encounters an
+// incoming back edge, it duplicates the context, creating new definitions
+// that refer back to the originals. (These correspond to places where SSA
+// might have to insert a phi node.) On the second pass, these definitions are
+// set to NULL if the variable has changed on the back-edge (i.e. a phi
+// node was actually required.) E.g.
+//
+// { Context | VarDefinitions }
+// int x = 0, y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
+// while (b) { x -> x2, y -> y1 | [1st:] x2=x1; [2nd:] x2=NULL; }
+// x = x+1; { x -> x3, y -> y1 | x3 = x2 + 1, ... }
+// ... { y -> y1 | x3 = 2, x2 = 1, ... }
+//
+void LocalVariableMap::traverseCFG(CFG *CFGraph,
+ const PostOrderCFGView *SortedGraph,
+ std::vector<CFGBlockInfo> &BlockInfo) {
+ PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
+
+ CtxIndices.resize(CFGraph->getNumBlockIDs());
+
+ for (const auto *CurrBlock : *SortedGraph) {
+ int CurrBlockID = CurrBlock->getBlockID();
+ CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
+
+ VisitedBlocks.insert(CurrBlock);
+
+ // Calculate the entry context for the current block
+ bool HasBackEdges = false;
+ bool CtxInit = true;
+ for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
+ PE = CurrBlock->pred_end(); PI != PE; ++PI) {
+ // if *PI -> CurrBlock is a back edge, so skip it
+ if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI)) {
+ HasBackEdges = true;
+ continue;
+ }
+
+ int PrevBlockID = (*PI)->getBlockID();
+ CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
+
+ if (CtxInit) {
+ CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext;
+ CtxInit = false;
+ }
+ else {
+ CurrBlockInfo->EntryContext =
+ intersectContexts(CurrBlockInfo->EntryContext,
+ PrevBlockInfo->ExitContext);
+ }
+ }
+
+ // Duplicate the context if we have back-edges, so we can call
+ // intersectBackEdges later.
+ if (HasBackEdges)
+ CurrBlockInfo->EntryContext =
+ createReferenceContext(CurrBlockInfo->EntryContext);
+
+ // Create a starting context index for the current block
+ saveContext(nullptr, CurrBlockInfo->EntryContext);
+ CurrBlockInfo->EntryIndex = getContextIndex();
+
+ // Visit all the statements in the basic block.
+ VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext);
+ for (CFGBlock::const_iterator BI = CurrBlock->begin(),
+ BE = CurrBlock->end(); BI != BE; ++BI) {
+ switch (BI->getKind()) {
+ case CFGElement::Statement: {
+ CFGStmt CS = BI->castAs<CFGStmt>();
+ VMapBuilder.Visit(const_cast<Stmt*>(CS.getStmt()));
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ CurrBlockInfo->ExitContext = VMapBuilder.Ctx;
+
+ // Mark variables on back edges as "unknown" if they've been changed.
+ for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
+ SE = CurrBlock->succ_end(); SI != SE; ++SI) {
+ // if CurrBlock -> *SI is *not* a back edge
+ if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI))
+ continue;
+
+ CFGBlock *FirstLoopBlock = *SI;
+ Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext;
+ Context LoopEnd = CurrBlockInfo->ExitContext;
+ intersectBackEdge(LoopBegin, LoopEnd);
+ }
+ }
+
+ // Put an extra entry at the end of the indexed context array
+ unsigned exitID = CFGraph->getExit().getBlockID();
+ saveContext(nullptr, BlockInfo[exitID].ExitContext);
+}
+
+/// Find the appropriate source locations to use when producing diagnostics for
+/// each block in the CFG.
+static void findBlockLocations(CFG *CFGraph,
+ const PostOrderCFGView *SortedGraph,
+ std::vector<CFGBlockInfo> &BlockInfo) {
+ for (const auto *CurrBlock : *SortedGraph) {
+ CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()];
+
+ // Find the source location of the last statement in the block, if the
+ // block is not empty.
+ if (const Stmt *S = CurrBlock->getTerminator()) {
+ CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getLocStart();
+ } else {
+ for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(),
+ BE = CurrBlock->rend(); BI != BE; ++BI) {
+ // FIXME: Handle other CFGElement kinds.
+ if (Optional<CFGStmt> CS = BI->getAs<CFGStmt>()) {
+ CurrBlockInfo->ExitLoc = CS->getStmt()->getLocStart();
+ break;
+ }
+ }
+ }
+
+ if (CurrBlockInfo->ExitLoc.isValid()) {
+ // This block contains at least one statement. Find the source location
+ // of the first statement in the block.
+ for (CFGBlock::const_iterator BI = CurrBlock->begin(),
+ BE = CurrBlock->end(); BI != BE; ++BI) {
+ // FIXME: Handle other CFGElement kinds.
+ if (Optional<CFGStmt> CS = BI->getAs<CFGStmt>()) {
+ CurrBlockInfo->EntryLoc = CS->getStmt()->getLocStart();
+ break;
+ }
+ }
+ } else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() &&
+ CurrBlock != &CFGraph->getExit()) {
+ // The block is empty, and has a single predecessor. Use its exit
+ // location.
+ CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
+ BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc;
+ }
+ }
+}
+
+class LockableFactEntry : public FactEntry {
+private:
+ bool Managed; ///< managed by ScopedLockable object
+
+public:
+ LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
+ bool Mng = false, bool Asrt = false)
+ : FactEntry(CE, LK, Loc, Asrt), Managed(Mng) {}
+
+ void
+ handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
+ SourceLocation JoinLoc, LockErrorKind LEK,
+ ThreadSafetyHandler &Handler) const override {
+ if (!Managed && !asserted() && !negative() && !isUniversal()) {
+ Handler.handleMutexHeldEndOfScope("mutex", toString(), loc(), JoinLoc,
+ LEK);
+ }
+ }
+
+ void handleUnlock(FactSet &FSet, FactManager &FactMan,
+ const CapabilityExpr &Cp, SourceLocation UnlockLoc,
+ bool FullyRemove, ThreadSafetyHandler &Handler,
+ StringRef DiagKind) const override {
+ FSet.removeLock(FactMan, Cp);
+ if (!Cp.negative()) {
+ FSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>(
+ !Cp, LK_Exclusive, UnlockLoc));
+ }
+ }
+};
+
+class ScopedLockableFactEntry : public FactEntry {
+private:
+ SmallVector<const til::SExpr *, 4> UnderlyingMutexes;
+
+public:
+ ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc,
+ const CapExprSet &Excl, const CapExprSet &Shrd)
+ : FactEntry(CE, LK_Exclusive, Loc, false) {
+ for (const auto &M : Excl)
+ UnderlyingMutexes.push_back(M.sexpr());
+ for (const auto &M : Shrd)
+ UnderlyingMutexes.push_back(M.sexpr());
+ }
+
+ void
+ handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
+ SourceLocation JoinLoc, LockErrorKind LEK,
+ ThreadSafetyHandler &Handler) const override {
+ for (const til::SExpr *UnderlyingMutex : UnderlyingMutexes) {
+ if (FSet.findLock(FactMan, CapabilityExpr(UnderlyingMutex, false))) {
+ // If this scoped lock manages another mutex, and if the underlying
+ // mutex is still held, then warn about the underlying mutex.
+ Handler.handleMutexHeldEndOfScope(
+ "mutex", sx::toString(UnderlyingMutex), loc(), JoinLoc, LEK);
+ }
+ }
+ }
+
+ void handleUnlock(FactSet &FSet, FactManager &FactMan,
+ const CapabilityExpr &Cp, SourceLocation UnlockLoc,
+ bool FullyRemove, ThreadSafetyHandler &Handler,
+ StringRef DiagKind) const override {
+ assert(!Cp.negative() && "Managing object cannot be negative.");
+ for (const til::SExpr *UnderlyingMutex : UnderlyingMutexes) {
+ CapabilityExpr UnderCp(UnderlyingMutex, false);
+ auto UnderEntry = llvm::make_unique<LockableFactEntry>(
+ !UnderCp, LK_Exclusive, UnlockLoc);
+
+ if (FullyRemove) {
+ // We're destroying the managing object.
+ // Remove the underlying mutex if it exists; but don't warn.
+ if (FSet.findLock(FactMan, UnderCp)) {
+ FSet.removeLock(FactMan, UnderCp);
+ FSet.addLock(FactMan, std::move(UnderEntry));
+ }
+ } else {
+ // We're releasing the underlying mutex, but not destroying the
+ // managing object. Warn on dual release.
+ if (!FSet.findLock(FactMan, UnderCp)) {
+ Handler.handleUnmatchedUnlock(DiagKind, UnderCp.toString(),
+ UnlockLoc);
+ }
+ FSet.removeLock(FactMan, UnderCp);
+ FSet.addLock(FactMan, std::move(UnderEntry));
+ }
+ }
+ if (FullyRemove)
+ FSet.removeLock(FactMan, Cp);
+ }
+};
+
+/// \brief Class which implements the core thread safety analysis routines.
+class ThreadSafetyAnalyzer {
+ friend class BuildLockset;
+ friend class threadSafety::BeforeSet;
+
+ llvm::BumpPtrAllocator Bpa;
+ threadSafety::til::MemRegionRef Arena;
+ threadSafety::SExprBuilder SxBuilder;
+
+ ThreadSafetyHandler &Handler;
+ const CXXMethodDecl *CurrentMethod;
+ LocalVariableMap LocalVarMap;
+ FactManager FactMan;
+ std::vector<CFGBlockInfo> BlockInfo;
+
+ BeforeSet* GlobalBeforeSet;
+
+public:
+ ThreadSafetyAnalyzer(ThreadSafetyHandler &H, BeforeSet* Bset)
+ : Arena(&Bpa), SxBuilder(Arena), Handler(H), GlobalBeforeSet(Bset) {}
+
+ bool inCurrentScope(const CapabilityExpr &CapE);
+
+ void addLock(FactSet &FSet, std::unique_ptr<FactEntry> Entry,
+ StringRef DiagKind, bool ReqAttr = false);
+ void removeLock(FactSet &FSet, const CapabilityExpr &CapE,
+ SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind,
+ StringRef DiagKind);
+
+ template <typename AttrType>
+ void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, Expr *Exp,
+ const NamedDecl *D, VarDecl *SelfDecl = nullptr);
+
+ template <class AttrType>
+ void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, Expr *Exp,
+ const NamedDecl *D,
+ const CFGBlock *PredBlock, const CFGBlock *CurrBlock,
+ Expr *BrE, bool Neg);
+
+ const CallExpr* getTrylockCallExpr(const Stmt *Cond, LocalVarContext C,
+ bool &Negate);
+
+ void getEdgeLockset(FactSet &Result, const FactSet &ExitSet,
+ const CFGBlock* PredBlock,
+ const CFGBlock *CurrBlock);
+
+ void intersectAndWarn(FactSet &FSet1, const FactSet &FSet2,
+ SourceLocation JoinLoc,
+ LockErrorKind LEK1, LockErrorKind LEK2,
+ bool Modify=true);
+
+ void intersectAndWarn(FactSet &FSet1, const FactSet &FSet2,
+ SourceLocation JoinLoc, LockErrorKind LEK1,
+ bool Modify=true) {
+ intersectAndWarn(FSet1, FSet2, JoinLoc, LEK1, LEK1, Modify);
+ }
+
+ void runAnalysis(AnalysisDeclContext &AC);
+};
+} // namespace
+
+/// Process acquired_before and acquired_after attributes on Vd.
+BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd,
+ ThreadSafetyAnalyzer& Analyzer) {
+ // Create a new entry for Vd.
+ BeforeInfo *Info = nullptr;
+ {
+ // Keep InfoPtr in its own scope in case BMap is modified later and the
+ // reference becomes invalid.
+ std::unique_ptr<BeforeInfo> &InfoPtr = BMap[Vd];
+ if (!InfoPtr)
+ InfoPtr.reset(new BeforeInfo());
+ Info = InfoPtr.get();
+ }
+
+ for (Attr* At : Vd->attrs()) {
+ switch (At->getKind()) {
+ case attr::AcquiredBefore: {
+ auto *A = cast<AcquiredBeforeAttr>(At);
+
+ // Read exprs from the attribute, and add them to BeforeVect.
+ for (const auto *Arg : A->args()) {
+ CapabilityExpr Cp =
+ Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr);
+ if (const ValueDecl *Cpvd = Cp.valueDecl()) {
+ Info->Vect.push_back(Cpvd);
+ auto It = BMap.find(Cpvd);
+ if (It == BMap.end())
+ insertAttrExprs(Cpvd, Analyzer);
+ }
+ }
+ break;
+ }
+ case attr::AcquiredAfter: {
+ auto *A = cast<AcquiredAfterAttr>(At);
+
+ // Read exprs from the attribute, and add them to BeforeVect.
+ for (const auto *Arg : A->args()) {
+ CapabilityExpr Cp =
+ Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr);
+ if (const ValueDecl *ArgVd = Cp.valueDecl()) {
+ // Get entry for mutex listed in attribute
+ BeforeInfo *ArgInfo = getBeforeInfoForDecl(ArgVd, Analyzer);
+ ArgInfo->Vect.push_back(Vd);
+ }
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ return Info;
+}
+
+BeforeSet::BeforeInfo *
+BeforeSet::getBeforeInfoForDecl(const ValueDecl *Vd,
+ ThreadSafetyAnalyzer &Analyzer) {
+ auto It = BMap.find(Vd);
+ BeforeInfo *Info = nullptr;
+ if (It == BMap.end())
+ Info = insertAttrExprs(Vd, Analyzer);
+ else
+ Info = It->second.get();
+ assert(Info && "BMap contained nullptr?");
+ return Info;
+}
+
+/// Return true if any mutexes in FSet are in the acquired_before set of Vd.
+void BeforeSet::checkBeforeAfter(const ValueDecl* StartVd,
+ const FactSet& FSet,
+ ThreadSafetyAnalyzer& Analyzer,
+ SourceLocation Loc, StringRef CapKind) {
+ SmallVector<BeforeInfo*, 8> InfoVect;
+
+ // Do a depth-first traversal of Vd.
+ // Return true if there are cycles.
+ std::function<bool (const ValueDecl*)> traverse = [&](const ValueDecl* Vd) {
+ if (!Vd)
+ return false;
+
+ BeforeSet::BeforeInfo *Info = getBeforeInfoForDecl(Vd, Analyzer);
+
+ if (Info->Visited == 1)
+ return true;
+
+ if (Info->Visited == 2)
+ return false;
+
+ if (Info->Vect.empty())
+ return false;
+
+ InfoVect.push_back(Info);
+ Info->Visited = 1;
+ for (auto *Vdb : Info->Vect) {
+ // Exclude mutexes in our immediate before set.
+ if (FSet.containsMutexDecl(Analyzer.FactMan, Vdb)) {
+ StringRef L1 = StartVd->getName();
+ StringRef L2 = Vdb->getName();
+ Analyzer.Handler.handleLockAcquiredBefore(CapKind, L1, L2, Loc);
+ }
+ // Transitively search other before sets, and warn on cycles.
+ if (traverse(Vdb)) {
+ if (CycMap.find(Vd) == CycMap.end()) {
+ CycMap.insert(std::make_pair(Vd, true));
+ StringRef L1 = Vd->getName();
+ Analyzer.Handler.handleBeforeAfterCycle(L1, Vd->getLocation());
+ }
+ }
+ }
+ Info->Visited = 2;
+ return false;
+ };
+
+ traverse(StartVd);
+
+ for (auto* Info : InfoVect)
+ Info->Visited = 0;
+}
+
+
+
+/// \brief Gets the value decl pointer from DeclRefExprs or MemberExprs.
+static const ValueDecl *getValueDecl(const Expr *Exp) {
+ if (const auto *CE = dyn_cast<ImplicitCastExpr>(Exp))
+ return getValueDecl(CE->getSubExpr());
+
+ if (const auto *DR = dyn_cast<DeclRefExpr>(Exp))
+ return DR->getDecl();
+
+ if (const auto *ME = dyn_cast<MemberExpr>(Exp))
+ return ME->getMemberDecl();
+
+ return nullptr;
+}
+
+namespace {
+template <typename Ty>
+class has_arg_iterator_range {
+ typedef char yes[1];
+ typedef char no[2];
+
+ template <typename Inner>
+ static yes& test(Inner *I, decltype(I->args()) * = nullptr);
+
+ template <typename>
+ static no& test(...);
+
+public:
+ static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes);
+};
+} // namespace
+
+static StringRef ClassifyDiagnostic(const CapabilityAttr *A) {
+ return A->getName();
+}
+
+static StringRef ClassifyDiagnostic(QualType VDT) {
+ // We need to look at the declaration of the type of the value to determine
+ // which it is. The type should either be a record or a typedef, or a pointer
+ // or reference thereof.
+ if (const auto *RT = VDT->getAs<RecordType>()) {
+ if (const auto *RD = RT->getDecl())
+ if (const auto *CA = RD->getAttr<CapabilityAttr>())
+ return ClassifyDiagnostic(CA);
+ } else if (const auto *TT = VDT->getAs<TypedefType>()) {
+ if (const auto *TD = TT->getDecl())
+ if (const auto *CA = TD->getAttr<CapabilityAttr>())
+ return ClassifyDiagnostic(CA);
+ } else if (VDT->isPointerType() || VDT->isReferenceType())
+ return ClassifyDiagnostic(VDT->getPointeeType());
+
+ return "mutex";
+}
+
+static StringRef ClassifyDiagnostic(const ValueDecl *VD) {
+ assert(VD && "No ValueDecl passed");
+
+ // The ValueDecl is the declaration of a mutex or role (hopefully).
+ return ClassifyDiagnostic(VD->getType());
+}
+
+template <typename AttrTy>
+static typename std::enable_if<!has_arg_iterator_range<AttrTy>::value,
+ StringRef>::type
+ClassifyDiagnostic(const AttrTy *A) {
+ if (const ValueDecl *VD = getValueDecl(A->getArg()))
+ return ClassifyDiagnostic(VD);
+ return "mutex";
+}
+
+template <typename AttrTy>
+static typename std::enable_if<has_arg_iterator_range<AttrTy>::value,
+ StringRef>::type
+ClassifyDiagnostic(const AttrTy *A) {
+ for (const auto *Arg : A->args()) {
+ if (const ValueDecl *VD = getValueDecl(Arg))
+ return ClassifyDiagnostic(VD);
+ }
+ return "mutex";
+}
+
+
+inline bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) {
+ if (!CurrentMethod)
+ return false;
+ if (auto *P = dyn_cast_or_null<til::Project>(CapE.sexpr())) {
+ auto *VD = P->clangDecl();
+ if (VD)
+ return VD->getDeclContext() == CurrentMethod->getDeclContext();
+ }
+ return false;
+}
+
+
+/// \brief Add a new lock to the lockset, warning if the lock is already there.
+/// \param ReqAttr -- true if this is part of an initial Requires attribute.
+void ThreadSafetyAnalyzer::addLock(FactSet &FSet,
+ std::unique_ptr<FactEntry> Entry,
+ StringRef DiagKind, bool ReqAttr) {
+ if (Entry->shouldIgnore())
+ return;
+
+ if (!ReqAttr && !Entry->negative()) {
+ // look for the negative capability, and remove it from the fact set.
+ CapabilityExpr NegC = !*Entry;
+ FactEntry *Nen = FSet.findLock(FactMan, NegC);
+ if (Nen) {
+ FSet.removeLock(FactMan, NegC);
+ }
+ else {
+ if (inCurrentScope(*Entry) && !Entry->asserted())
+ Handler.handleNegativeNotHeld(DiagKind, Entry->toString(),
+ NegC.toString(), Entry->loc());
+ }
+ }
+
+ // Check before/after constraints
+ if (Handler.issueBetaWarnings() &&
+ !Entry->asserted() && !Entry->declared()) {
+ GlobalBeforeSet->checkBeforeAfter(Entry->valueDecl(), FSet, *this,
+ Entry->loc(), DiagKind);
+ }
+
+ // FIXME: Don't always warn when we have support for reentrant locks.
+ if (FSet.findLock(FactMan, *Entry)) {
+ if (!Entry->asserted())
+ Handler.handleDoubleLock(DiagKind, Entry->toString(), Entry->loc());
+ } else {
+ FSet.addLock(FactMan, std::move(Entry));
+ }
+}
+
+
+/// \brief Remove a lock from the lockset, warning if the lock is not there.
+/// \param UnlockLoc The source location of the unlock (only used in error msg)
+void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
+ SourceLocation UnlockLoc,
+ bool FullyRemove, LockKind ReceivedKind,
+ StringRef DiagKind) {
+ if (Cp.shouldIgnore())
+ return;
+
+ const FactEntry *LDat = FSet.findLock(FactMan, Cp);
+ if (!LDat) {
+ Handler.handleUnmatchedUnlock(DiagKind, Cp.toString(), UnlockLoc);
+ return;
+ }
+
+ // Generic lock removal doesn't care about lock kind mismatches, but
+ // otherwise diagnose when the lock kinds are mismatched.
+ if (ReceivedKind != LK_Generic && LDat->kind() != ReceivedKind) {
+ Handler.handleIncorrectUnlockKind(DiagKind, Cp.toString(),
+ LDat->kind(), ReceivedKind, UnlockLoc);
+ }
+
+ LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler,
+ DiagKind);
+}
+
+
+/// \brief Extract the list of mutexIDs from the attribute on an expression,
+/// and push them onto Mtxs, discarding any duplicates.
+template <typename AttrType>
+void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
+ Expr *Exp, const NamedDecl *D,
+ VarDecl *SelfDecl) {
+ if (Attr->args_size() == 0) {
+ // The mutex held is the "this" object.
+ CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, SelfDecl);
+ if (Cp.isInvalid()) {
+ warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
+ return;
+ }
+ //else
+ if (!Cp.shouldIgnore())
+ Mtxs.push_back_nodup(Cp);
+ return;
+ }
+
+ for (const auto *Arg : Attr->args()) {
+ CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, SelfDecl);
+ if (Cp.isInvalid()) {
+ warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
+ continue;
+ }
+ //else
+ if (!Cp.shouldIgnore())
+ Mtxs.push_back_nodup(Cp);
+ }
+}
+
+
+/// \brief Extract the list of mutexIDs from a trylock attribute. If the
+/// trylock applies to the given edge, then push them onto Mtxs, discarding
+/// any duplicates.
+template <class AttrType>
+void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
+ Expr *Exp, const NamedDecl *D,
+ const CFGBlock *PredBlock,
+ const CFGBlock *CurrBlock,
+ Expr *BrE, bool Neg) {
+ // Find out which branch has the lock
+ bool branch = false;
+ if (CXXBoolLiteralExpr *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE))
+ branch = BLE->getValue();
+ else if (IntegerLiteral *ILE = dyn_cast_or_null<IntegerLiteral>(BrE))
+ branch = ILE->getValue().getBoolValue();
+
+ int branchnum = branch ? 0 : 1;
+ if (Neg)
+ branchnum = !branchnum;
+
+ // If we've taken the trylock branch, then add the lock
+ int i = 0;
+ for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(),
+ SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) {
+ if (*SI == CurrBlock && i == branchnum)
+ getMutexIDs(Mtxs, Attr, Exp, D);
+ }
+}
+
+static bool getStaticBooleanValue(Expr *E, bool &TCond) {
+ if (isa<CXXNullPtrLiteralExpr>(E) || isa<GNUNullExpr>(E)) {
+ TCond = false;
+ return true;
+ } else if (CXXBoolLiteralExpr *BLE = dyn_cast<CXXBoolLiteralExpr>(E)) {
+ TCond = BLE->getValue();
+ return true;
+ } else if (IntegerLiteral *ILE = dyn_cast<IntegerLiteral>(E)) {
+ TCond = ILE->getValue().getBoolValue();
+ return true;
+ } else if (ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E)) {
+ return getStaticBooleanValue(CE->getSubExpr(), TCond);
+ }
+ return false;
+}
+
+
+// If Cond can be traced back to a function call, return the call expression.
+// The negate variable should be called with false, and will be set to true
+// if the function call is negated, e.g. if (!mu.tryLock(...))
+const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond,
+ LocalVarContext C,
+ bool &Negate) {
+ if (!Cond)
+ return nullptr;
+
+ if (const CallExpr *CallExp = dyn_cast<CallExpr>(Cond)) {
+ return CallExp;
+ }
+ else if (const ParenExpr *PE = dyn_cast<ParenExpr>(Cond)) {
+ return getTrylockCallExpr(PE->getSubExpr(), C, Negate);
+ }
+ else if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(Cond)) {
+ return getTrylockCallExpr(CE->getSubExpr(), C, Negate);
+ }
+ else if (const ExprWithCleanups* EWC = dyn_cast<ExprWithCleanups>(Cond)) {
+ return getTrylockCallExpr(EWC->getSubExpr(), C, Negate);
+ }
+ else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Cond)) {
+ const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C);
+ return getTrylockCallExpr(E, C, Negate);
+ }
+ else if (const UnaryOperator *UOP = dyn_cast<UnaryOperator>(Cond)) {
+ if (UOP->getOpcode() == UO_LNot) {
+ Negate = !Negate;
+ return getTrylockCallExpr(UOP->getSubExpr(), C, Negate);
+ }
+ return nullptr;
+ }
+ else if (const BinaryOperator *BOP = dyn_cast<BinaryOperator>(Cond)) {
+ if (BOP->getOpcode() == BO_EQ || BOP->getOpcode() == BO_NE) {
+ if (BOP->getOpcode() == BO_NE)
+ Negate = !Negate;
+
+ bool TCond = false;
+ if (getStaticBooleanValue(BOP->getRHS(), TCond)) {
+ if (!TCond) Negate = !Negate;
+ return getTrylockCallExpr(BOP->getLHS(), C, Negate);
+ }
+ TCond = false;
+ if (getStaticBooleanValue(BOP->getLHS(), TCond)) {
+ if (!TCond) Negate = !Negate;
+ return getTrylockCallExpr(BOP->getRHS(), C, Negate);
+ }
+ return nullptr;
+ }
+ if (BOP->getOpcode() == BO_LAnd) {
+ // LHS must have been evaluated in a different block.
+ return getTrylockCallExpr(BOP->getRHS(), C, Negate);
+ }
+ if (BOP->getOpcode() == BO_LOr) {
+ return getTrylockCallExpr(BOP->getRHS(), C, Negate);
+ }
+ return nullptr;
+ }
+ return nullptr;
+}
+
+
+/// \brief Find the lockset that holds on the edge between PredBlock
+/// and CurrBlock. The edge set is the exit set of PredBlock (passed
+/// as the ExitSet parameter) plus any trylocks, which are conditionally held.
+void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
+ const FactSet &ExitSet,
+ const CFGBlock *PredBlock,
+ const CFGBlock *CurrBlock) {
+ Result = ExitSet;
+
+ const Stmt *Cond = PredBlock->getTerminatorCondition();
+ if (!Cond)
+ return;
+
+ bool Negate = false;
+ const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()];
+ const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext;
+ StringRef CapDiagKind = "mutex";
+
+ CallExpr *Exp =
+ const_cast<CallExpr*>(getTrylockCallExpr(Cond, LVarCtx, Negate));
+ if (!Exp)
+ return;
+
+ NamedDecl *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
+ if(!FunDecl || !FunDecl->hasAttrs())
+ return;
+
+ CapExprSet ExclusiveLocksToAdd;
+ CapExprSet SharedLocksToAdd;
+
+ // If the condition is a call to a Trylock function, then grab the attributes
+ for (auto *Attr : FunDecl->attrs()) {
+ switch (Attr->getKind()) {
+ case attr::ExclusiveTrylockFunction: {
+ ExclusiveTrylockFunctionAttr *A =
+ cast<ExclusiveTrylockFunctionAttr>(Attr);
+ getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl,
+ PredBlock, CurrBlock, A->getSuccessValue(), Negate);
+ CapDiagKind = ClassifyDiagnostic(A);
+ break;
+ }
+ case attr::SharedTrylockFunction: {
+ SharedTrylockFunctionAttr *A =
+ cast<SharedTrylockFunctionAttr>(Attr);
+ getMutexIDs(SharedLocksToAdd, A, Exp, FunDecl,
+ PredBlock, CurrBlock, A->getSuccessValue(), Negate);
+ CapDiagKind = ClassifyDiagnostic(A);
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ // Add and remove locks.
+ SourceLocation Loc = Exp->getExprLoc();
+ for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd)
+ addLock(Result, llvm::make_unique<LockableFactEntry>(ExclusiveLockToAdd,
+ LK_Exclusive, Loc),
+ CapDiagKind);
+ for (const auto &SharedLockToAdd : SharedLocksToAdd)
+ addLock(Result, llvm::make_unique<LockableFactEntry>(SharedLockToAdd,
+ LK_Shared, Loc),
+ CapDiagKind);
+}
+
+namespace {
+/// \brief We use this class to visit different types of expressions in
+/// CFGBlocks, and build up the lockset.
+/// An expression may cause us to add or remove locks from the lockset, or else
+/// output error messages related to missing locks.
+/// FIXME: In future, we may be able to not inherit from a visitor.
+class BuildLockset : public StmtVisitor<BuildLockset> {
+ friend class ThreadSafetyAnalyzer;
+
+ ThreadSafetyAnalyzer *Analyzer;
+ FactSet FSet;
+ LocalVariableMap::Context LVarCtx;
+ unsigned CtxIndex;
+
+ // helper functions
+ void warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp, AccessKind AK,
+ Expr *MutexExp, ProtectedOperationKind POK,
+ StringRef DiagKind, SourceLocation Loc);
+ void warnIfMutexHeld(const NamedDecl *D, const Expr *Exp, Expr *MutexExp,
+ StringRef DiagKind);
+
+ void checkAccess(const Expr *Exp, AccessKind AK,
+ ProtectedOperationKind POK = POK_VarAccess);
+ void checkPtAccess(const Expr *Exp, AccessKind AK,
+ ProtectedOperationKind POK = POK_VarAccess);
+
+ void handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD = nullptr);
+
+public:
+ BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info)
+ : StmtVisitor<BuildLockset>(),
+ Analyzer(Anlzr),
+ FSet(Info.EntrySet),
+ LVarCtx(Info.EntryContext),
+ CtxIndex(Info.EntryIndex)
+ {}
+
+ void VisitUnaryOperator(UnaryOperator *UO);
+ void VisitBinaryOperator(BinaryOperator *BO);
+ void VisitCastExpr(CastExpr *CE);
+ void VisitCallExpr(CallExpr *Exp);
+ void VisitCXXConstructExpr(CXXConstructExpr *Exp);
+ void VisitDeclStmt(DeclStmt *S);
+};
+} // namespace
+
+/// \brief Warn if the LSet does not contain a lock sufficient to protect access
+/// of at least the passed in AccessKind.
+void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp,
+ AccessKind AK, Expr *MutexExp,
+ ProtectedOperationKind POK,
+ StringRef DiagKind, SourceLocation Loc) {
+ LockKind LK = getLockKindFromAccessKind(AK);
+
+ CapabilityExpr Cp = Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp);
+ if (Cp.isInvalid()) {
+ warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, DiagKind);
+ return;
+ } else if (Cp.shouldIgnore()) {
+ return;
+ }
+
+ if (Cp.negative()) {
+ // Negative capabilities act like locks excluded
+ FactEntry *LDat = FSet.findLock(Analyzer->FactMan, !Cp);
+ if (LDat) {
+ Analyzer->Handler.handleFunExcludesLock(
+ DiagKind, D->getNameAsString(), (!Cp).toString(), Loc);
+ return;
+ }
+
+ // If this does not refer to a negative capability in the same class,
+ // then stop here.
+ if (!Analyzer->inCurrentScope(Cp))
+ return;
+
+ // Otherwise the negative requirement must be propagated to the caller.
+ LDat = FSet.findLock(Analyzer->FactMan, Cp);
+ if (!LDat) {
+ Analyzer->Handler.handleMutexNotHeld("", D, POK, Cp.toString(),
+ LK_Shared, Loc);
+ }
+ return;
+ }
+
+ FactEntry* LDat = FSet.findLockUniv(Analyzer->FactMan, Cp);
+ bool NoError = true;
+ if (!LDat) {
+ // No exact match found. Look for a partial match.
+ LDat = FSet.findPartialMatch(Analyzer->FactMan, Cp);
+ if (LDat) {
+ // Warn that there's no precise match.
+ std::string PartMatchStr = LDat->toString();
+ StringRef PartMatchName(PartMatchStr);
+ Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(),
+ LK, Loc, &PartMatchName);
+ } else {
+ // Warn that there's no match at all.
+ Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(),
+ LK, Loc);
+ }
+ NoError = false;
+ }
+ // Make sure the mutex we found is the right kind.
+ if (NoError && LDat && !LDat->isAtLeast(LK)) {
+ Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(),
+ LK, Loc);
+ }
+}
+
+/// \brief Warn if the LSet contains the given lock.
+void BuildLockset::warnIfMutexHeld(const NamedDecl *D, const Expr *Exp,
+ Expr *MutexExp, StringRef DiagKind) {
+ CapabilityExpr Cp = Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp);
+ if (Cp.isInvalid()) {
+ warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, DiagKind);
+ return;
+ } else if (Cp.shouldIgnore()) {
+ return;
+ }
+
+ FactEntry* LDat = FSet.findLock(Analyzer->FactMan, Cp);
+ if (LDat) {
+ Analyzer->Handler.handleFunExcludesLock(
+ DiagKind, D->getNameAsString(), Cp.toString(), Exp->getExprLoc());
+ }
+}
+
+/// \brief Checks guarded_by and pt_guarded_by attributes.
+/// Whenever we identify an access (read or write) to a DeclRefExpr that is
+/// marked with guarded_by, we must ensure the appropriate mutexes are held.
+/// Similarly, we check if the access is to an expression that dereferences
+/// a pointer marked with pt_guarded_by.
+void BuildLockset::checkAccess(const Expr *Exp, AccessKind AK,
+ ProtectedOperationKind POK) {
+ Exp = Exp->IgnoreParenCasts();
+
+ SourceLocation Loc = Exp->getExprLoc();
+
+ // Local variables of reference type cannot be re-assigned;
+ // map them to their initializer.
+ while (const auto *DRE = dyn_cast<DeclRefExpr>(Exp)) {
+ const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()->getCanonicalDecl());
+ if (VD && VD->isLocalVarDecl() && VD->getType()->isReferenceType()) {
+ if (const auto *E = VD->getInit()) {
+ Exp = E;
+ continue;
+ }
+ }
+ break;
+ }
+
+ if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Exp)) {
+ // For dereferences
+ if (UO->getOpcode() == clang::UO_Deref)
+ checkPtAccess(UO->getSubExpr(), AK, POK);
+ return;
+ }
+
+ if (const ArraySubscriptExpr *AE = dyn_cast<ArraySubscriptExpr>(Exp)) {
+ checkPtAccess(AE->getLHS(), AK, POK);
+ return;
+ }
+
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) {
+ if (ME->isArrow())
+ checkPtAccess(ME->getBase(), AK, POK);
+ else
+ checkAccess(ME->getBase(), AK, POK);
+ }
+
+ const ValueDecl *D = getValueDecl(Exp);
+ if (!D || !D->hasAttrs())
+ return;
+
+ if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty(Analyzer->FactMan)) {
+ Analyzer->Handler.handleNoMutexHeld("mutex", D, POK, AK, Loc);
+ }
+
+ for (const auto *I : D->specific_attrs<GuardedByAttr>())
+ warnIfMutexNotHeld(D, Exp, AK, I->getArg(), POK,
+ ClassifyDiagnostic(I), Loc);
+}
+
+
+/// \brief Checks pt_guarded_by and pt_guarded_var attributes.
+/// POK is the same operationKind that was passed to checkAccess.
+void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK,
+ ProtectedOperationKind POK) {
+ while (true) {
+ if (const ParenExpr *PE = dyn_cast<ParenExpr>(Exp)) {
+ Exp = PE->getSubExpr();
+ continue;
+ }
+ if (const CastExpr *CE = dyn_cast<CastExpr>(Exp)) {
+ if (CE->getCastKind() == CK_ArrayToPointerDecay) {
+ // If it's an actual array, and not a pointer, then it's elements
+ // are protected by GUARDED_BY, not PT_GUARDED_BY;
+ checkAccess(CE->getSubExpr(), AK, POK);
+ return;
+ }
+ Exp = CE->getSubExpr();
+ continue;
+ }
+ break;
+ }
+
+ // Pass by reference warnings are under a different flag.
+ ProtectedOperationKind PtPOK = POK_VarDereference;
+ if (POK == POK_PassByRef) PtPOK = POK_PtPassByRef;
+
+ const ValueDecl *D = getValueDecl(Exp);
+ if (!D || !D->hasAttrs())
+ return;
+
+ if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty(Analyzer->FactMan))
+ Analyzer->Handler.handleNoMutexHeld("mutex", D, PtPOK, AK,
+ Exp->getExprLoc());
+
+ for (auto const *I : D->specific_attrs<PtGuardedByAttr>())
+ warnIfMutexNotHeld(D, Exp, AK, I->getArg(), PtPOK,
+ ClassifyDiagnostic(I), Exp->getExprLoc());
+}
+
+/// \brief Process a function call, method call, constructor call,
+/// or destructor call. This involves looking at the attributes on the
+/// corresponding function/method/constructor/destructor, issuing warnings,
+/// and updating the locksets accordingly.
+///
+/// FIXME: For classes annotated with one of the guarded annotations, we need
+/// to treat const method calls as reads and non-const method calls as writes,
+/// and check that the appropriate locks are held. Non-const method calls with
+/// the same signature as const method calls can be also treated as reads.
+///
+void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
+ SourceLocation Loc = Exp->getExprLoc();
+ CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd;
+ CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove;
+ CapExprSet ScopedExclusiveReqs, ScopedSharedReqs;
+ StringRef CapDiagKind = "mutex";
+
+ // Figure out if we're calling the constructor of scoped lockable class
+ bool isScopedVar = false;
+ if (VD) {
+ if (const CXXConstructorDecl *CD = dyn_cast<const CXXConstructorDecl>(D)) {
+ const CXXRecordDecl* PD = CD->getParent();
+ if (PD && PD->hasAttr<ScopedLockableAttr>())
+ isScopedVar = true;
+ }
+ }
+
+ for(Attr *Atconst : D->attrs()) {
+ Attr* At = const_cast<Attr*>(Atconst);
+ switch (At->getKind()) {
+ // When we encounter a lock function, we need to add the lock to our
+ // lockset.
+ case attr::AcquireCapability: {
+ auto *A = cast<AcquireCapabilityAttr>(At);
+ Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd
+ : ExclusiveLocksToAdd,
+ A, Exp, D, VD);
+
+ CapDiagKind = ClassifyDiagnostic(A);
+ break;
+ }
+
+ // An assert will add a lock to the lockset, but will not generate
+ // a warning if it is already there, and will not generate a warning
+ // if it is not removed.
+ case attr::AssertExclusiveLock: {
+ AssertExclusiveLockAttr *A = cast<AssertExclusiveLockAttr>(At);
+
+ CapExprSet AssertLocks;
+ Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
+ for (const auto &AssertLock : AssertLocks)
+ Analyzer->addLock(FSet,
+ llvm::make_unique<LockableFactEntry>(
+ AssertLock, LK_Exclusive, Loc, false, true),
+ ClassifyDiagnostic(A));
+ break;
+ }
+ case attr::AssertSharedLock: {
+ AssertSharedLockAttr *A = cast<AssertSharedLockAttr>(At);
+
+ CapExprSet AssertLocks;
+ Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
+ for (const auto &AssertLock : AssertLocks)
+ Analyzer->addLock(FSet, llvm::make_unique<LockableFactEntry>(
+ AssertLock, LK_Shared, Loc, false, true),
+ ClassifyDiagnostic(A));
+ break;
+ }
+
+ // When we encounter an unlock function, we need to remove unlocked
+ // mutexes from the lockset, and flag a warning if they are not there.
+ case attr::ReleaseCapability: {
+ auto *A = cast<ReleaseCapabilityAttr>(At);
+ if (A->isGeneric())
+ Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, VD);
+ else if (A->isShared())
+ Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, VD);
+ else
+ Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, VD);
+
+ CapDiagKind = ClassifyDiagnostic(A);
+ break;
+ }
+
+ case attr::RequiresCapability: {
+ RequiresCapabilityAttr *A = cast<RequiresCapabilityAttr>(At);
+ for (auto *Arg : A->args()) {
+ warnIfMutexNotHeld(D, Exp, A->isShared() ? AK_Read : AK_Written, Arg,
+ POK_FunctionCall, ClassifyDiagnostic(A),
+ Exp->getExprLoc());
+ // use for adopting a lock
+ if (isScopedVar) {
+ Analyzer->getMutexIDs(A->isShared() ? ScopedSharedReqs
+ : ScopedExclusiveReqs,
+ A, Exp, D, VD);
+ }
+ }
+ break;
+ }
+
+ case attr::LocksExcluded: {
+ LocksExcludedAttr *A = cast<LocksExcludedAttr>(At);
+ for (auto *Arg : A->args())
+ warnIfMutexHeld(D, Exp, Arg, ClassifyDiagnostic(A));
+ break;
+ }
+
+ // Ignore attributes unrelated to thread-safety
+ default:
+ break;
+ }
+ }
+
+ // Add locks.
+ for (const auto &M : ExclusiveLocksToAdd)
+ Analyzer->addLock(FSet, llvm::make_unique<LockableFactEntry>(
+ M, LK_Exclusive, Loc, isScopedVar),
+ CapDiagKind);
+ for (const auto &M : SharedLocksToAdd)
+ Analyzer->addLock(FSet, llvm::make_unique<LockableFactEntry>(
+ M, LK_Shared, Loc, isScopedVar),
+ CapDiagKind);
+
+ if (isScopedVar) {
+ // Add the managing object as a dummy mutex, mapped to the underlying mutex.
+ SourceLocation MLoc = VD->getLocation();
+ DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue, VD->getLocation());
+ // FIXME: does this store a pointer to DRE?
+ CapabilityExpr Scp = Analyzer->SxBuilder.translateAttrExpr(&DRE, nullptr);
+
+ std::copy(ScopedExclusiveReqs.begin(), ScopedExclusiveReqs.end(),
+ std::back_inserter(ExclusiveLocksToAdd));
+ std::copy(ScopedSharedReqs.begin(), ScopedSharedReqs.end(),
+ std::back_inserter(SharedLocksToAdd));
+ Analyzer->addLock(FSet,
+ llvm::make_unique<ScopedLockableFactEntry>(
+ Scp, MLoc, ExclusiveLocksToAdd, SharedLocksToAdd),
+ CapDiagKind);
+ }
+
+ // Remove locks.
+ // FIXME -- should only fully remove if the attribute refers to 'this'.
+ bool Dtor = isa<CXXDestructorDecl>(D);
+ for (const auto &M : ExclusiveLocksToRemove)
+ Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Exclusive, CapDiagKind);
+ for (const auto &M : SharedLocksToRemove)
+ Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Shared, CapDiagKind);
+ for (const auto &M : GenericLocksToRemove)
+ Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic, CapDiagKind);
+}
+
+
+/// \brief For unary operations which read and write a variable, we need to
+/// check whether we hold any required mutexes. Reads are checked in
+/// VisitCastExpr.
+void BuildLockset::VisitUnaryOperator(UnaryOperator *UO) {
+ switch (UO->getOpcode()) {
+ case clang::UO_PostDec:
+ case clang::UO_PostInc:
+ case clang::UO_PreDec:
+ case clang::UO_PreInc: {
+ checkAccess(UO->getSubExpr(), AK_Written);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+/// For binary operations which assign to a variable (writes), we need to check
+/// whether we hold any required mutexes.
+/// FIXME: Deal with non-primitive types.
+void BuildLockset::VisitBinaryOperator(BinaryOperator *BO) {
+ if (!BO->isAssignmentOp())
+ return;
+
+ // adjust the context
+ LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx);
+
+ checkAccess(BO->getLHS(), AK_Written);
+}
+
+
+/// Whenever we do an LValue to Rvalue cast, we are reading a variable and
+/// need to ensure we hold any required mutexes.
+/// FIXME: Deal with non-primitive types.
+void BuildLockset::VisitCastExpr(CastExpr *CE) {
+ if (CE->getCastKind() != CK_LValueToRValue)
+ return;
+ checkAccess(CE->getSubExpr(), AK_Read);
+}
+
+
+void BuildLockset::VisitCallExpr(CallExpr *Exp) {
+ bool ExamineArgs = true;
+ bool OperatorFun = false;
+
+ if (CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(Exp)) {
+ MemberExpr *ME = dyn_cast<MemberExpr>(CE->getCallee());
+ // ME can be null when calling a method pointer
+ CXXMethodDecl *MD = CE->getMethodDecl();
+
+ if (ME && MD) {
+ if (ME->isArrow()) {
+ if (MD->isConst()) {
+ checkPtAccess(CE->getImplicitObjectArgument(), AK_Read);
+ } else { // FIXME -- should be AK_Written
+ checkPtAccess(CE->getImplicitObjectArgument(), AK_Read);
+ }
+ } else {
+ if (MD->isConst())
+ checkAccess(CE->getImplicitObjectArgument(), AK_Read);
+ else // FIXME -- should be AK_Written
+ checkAccess(CE->getImplicitObjectArgument(), AK_Read);
+ }
+ }
+ } else if (CXXOperatorCallExpr *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) {
+ OperatorFun = true;
+
+ auto OEop = OE->getOperator();
+ switch (OEop) {
+ case OO_Equal: {
+ ExamineArgs = false;
+ const Expr *Target = OE->getArg(0);
+ const Expr *Source = OE->getArg(1);
+ checkAccess(Target, AK_Written);
+ checkAccess(Source, AK_Read);
+ break;
+ }
+ case OO_Star:
+ case OO_Arrow:
+ case OO_Subscript: {
+ const Expr *Obj = OE->getArg(0);
+ checkAccess(Obj, AK_Read);
+ if (!(OEop == OO_Star && OE->getNumArgs() > 1)) {
+ // Grrr. operator* can be multiplication...
+ checkPtAccess(Obj, AK_Read);
+ }
+ break;
+ }
+ default: {
+ // TODO: get rid of this, and rely on pass-by-ref instead.
+ const Expr *Obj = OE->getArg(0);
+ checkAccess(Obj, AK_Read);
+ break;
+ }
+ }
+ }
+
+ if (ExamineArgs) {
+ if (FunctionDecl *FD = Exp->getDirectCallee()) {
+
+ // NO_THREAD_SAFETY_ANALYSIS does double duty here. Normally it
+ // only turns off checking within the body of a function, but we also
+ // use it to turn off checking in arguments to the function. This
+ // could result in some false negatives, but the alternative is to
+ // create yet another attribute.
+ //
+ if (!FD->hasAttr<NoThreadSafetyAnalysisAttr>()) {
+ unsigned Fn = FD->getNumParams();
+ unsigned Cn = Exp->getNumArgs();
+ unsigned Skip = 0;
+
+ unsigned i = 0;
+ if (OperatorFun) {
+ if (isa<CXXMethodDecl>(FD)) {
+ // First arg in operator call is implicit self argument,
+ // and doesn't appear in the FunctionDecl.
+ Skip = 1;
+ Cn--;
+ } else {
+ // Ignore the first argument of operators; it's been checked above.
+ i = 1;
+ }
+ }
+ // Ignore default arguments
+ unsigned n = (Fn < Cn) ? Fn : Cn;
+
+ for (; i < n; ++i) {
+ ParmVarDecl* Pvd = FD->getParamDecl(i);
+ Expr* Arg = Exp->getArg(i+Skip);
+ QualType Qt = Pvd->getType();
+ if (Qt->isReferenceType())
+ checkAccess(Arg, AK_Read, POK_PassByRef);
+ }
+ }
+ }
+ }
+
+ NamedDecl *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
+ if(!D || !D->hasAttrs())
+ return;
+ handleCall(Exp, D);
+}
+
+void BuildLockset::VisitCXXConstructExpr(CXXConstructExpr *Exp) {
+ const CXXConstructorDecl *D = Exp->getConstructor();
+ if (D && D->isCopyConstructor()) {
+ const Expr* Source = Exp->getArg(0);
+ checkAccess(Source, AK_Read);
+ }
+ // FIXME -- only handles constructors in DeclStmt below.
+}
+
+void BuildLockset::VisitDeclStmt(DeclStmt *S) {
+ // adjust the context
+ LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx);
+
+ for (auto *D : S->getDeclGroup()) {
+ if (VarDecl *VD = dyn_cast_or_null<VarDecl>(D)) {
+ Expr *E = VD->getInit();
+ // handle constructors that involve temporaries
+ if (ExprWithCleanups *EWC = dyn_cast_or_null<ExprWithCleanups>(E))
+ E = EWC->getSubExpr();
+
+ if (CXXConstructExpr *CE = dyn_cast_or_null<CXXConstructExpr>(E)) {
+ NamedDecl *CtorD = dyn_cast_or_null<NamedDecl>(CE->getConstructor());
+ if (!CtorD || !CtorD->hasAttrs())
+ return;
+ handleCall(CE, CtorD, VD);
+ }
+ }
+ }
+}
+
+
+
+/// \brief Compute the intersection of two locksets and issue warnings for any
+/// locks in the symmetric difference.
+///
+/// This function is used at a merge point in the CFG when comparing the lockset
+/// of each branch being merged. For example, given the following sequence:
+/// A; if () then B; else C; D; we need to check that the lockset after B and C
+/// are the same. In the event of a difference, we use the intersection of these
+/// two locksets at the start of D.
+///
+/// \param FSet1 The first lockset.
+/// \param FSet2 The second lockset.
+/// \param JoinLoc The location of the join point for error reporting
+/// \param LEK1 The error message to report if a mutex is missing from LSet1
+/// \param LEK2 The error message to report if a mutex is missing from Lset2
+void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &FSet1,
+ const FactSet &FSet2,
+ SourceLocation JoinLoc,
+ LockErrorKind LEK1,
+ LockErrorKind LEK2,
+ bool Modify) {
+ FactSet FSet1Orig = FSet1;
+
+ // Find locks in FSet2 that conflict or are not in FSet1, and warn.
+ for (const auto &Fact : FSet2) {
+ const FactEntry *LDat1 = nullptr;
+ const FactEntry *LDat2 = &FactMan[Fact];
+ FactSet::iterator Iter1 = FSet1.findLockIter(FactMan, *LDat2);
+ if (Iter1 != FSet1.end()) LDat1 = &FactMan[*Iter1];
+
+ if (LDat1) {
+ if (LDat1->kind() != LDat2->kind()) {
+ Handler.handleExclusiveAndShared("mutex", LDat2->toString(),
+ LDat2->loc(), LDat1->loc());
+ if (Modify && LDat1->kind() != LK_Exclusive) {
+ // Take the exclusive lock, which is the one in FSet2.
+ *Iter1 = Fact;
+ }
+ }
+ else if (Modify && LDat1->asserted() && !LDat2->asserted()) {
+ // The non-asserted lock in FSet2 is the one we want to track.
+ *Iter1 = Fact;
+ }
+ } else {
+ LDat2->handleRemovalFromIntersection(FSet2, FactMan, JoinLoc, LEK1,
+ Handler);
+ }
+ }
+
+ // Find locks in FSet1 that are not in FSet2, and remove them.
+ for (const auto &Fact : FSet1Orig) {
+ const FactEntry *LDat1 = &FactMan[Fact];
+ const FactEntry *LDat2 = FSet2.findLock(FactMan, *LDat1);
+
+ if (!LDat2) {
+ LDat1->handleRemovalFromIntersection(FSet1Orig, FactMan, JoinLoc, LEK2,
+ Handler);
+ if (Modify)
+ FSet1.removeLock(FactMan, *LDat1);
+ }
+ }
+}
+
+
+// Return true if block B never continues to its successors.
+static bool neverReturns(const CFGBlock *B) {
+ if (B->hasNoReturnElement())
+ return true;
+ if (B->empty())
+ return false;
+
+ CFGElement Last = B->back();
+ if (Optional<CFGStmt> S = Last.getAs<CFGStmt>()) {
+ if (isa<CXXThrowExpr>(S->getStmt()))
+ return true;
+ }
+ return false;
+}
+
+
+/// \brief Check a function's CFG for thread-safety violations.
+///
+/// We traverse the blocks in the CFG, compute the set of mutexes that are held
+/// at the end of each block, and issue warnings for thread safety violations.
+/// Each block in the CFG is traversed exactly once.
+void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
+ // TODO: this whole function needs be rewritten as a visitor for CFGWalker.
+ // For now, we just use the walker to set things up.
+ threadSafety::CFGWalker walker;
+ if (!walker.init(AC))
+ return;
+
+ // AC.dumpCFG(true);
+ // threadSafety::printSCFG(walker);
+
+ CFG *CFGraph = walker.getGraph();
+ const NamedDecl *D = walker.getDecl();
+ const FunctionDecl *CurrentFunction = dyn_cast<FunctionDecl>(D);
+ CurrentMethod = dyn_cast<CXXMethodDecl>(D);
+
+ if (D->hasAttr<NoThreadSafetyAnalysisAttr>())
+ return;
+
+ // FIXME: Do something a bit more intelligent inside constructor and
+ // destructor code. Constructors and destructors must assume unique access
+ // to 'this', so checks on member variable access is disabled, but we should
+ // still enable checks on other objects.
+ if (isa<CXXConstructorDecl>(D))
+ return; // Don't check inside constructors.
+ if (isa<CXXDestructorDecl>(D))
+ return; // Don't check inside destructors.
+
+ Handler.enterFunction(CurrentFunction);
+
+ BlockInfo.resize(CFGraph->getNumBlockIDs(),
+ CFGBlockInfo::getEmptyBlockInfo(LocalVarMap));
+
+ // We need to explore the CFG via a "topological" ordering.
+ // That way, we will be guaranteed to have information about required
+ // predecessor locksets when exploring a new block.
+ const PostOrderCFGView *SortedGraph = walker.getSortedGraph();
+ PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
+
+ // Mark entry block as reachable
+ BlockInfo[CFGraph->getEntry().getBlockID()].Reachable = true;
+
+ // Compute SSA names for local variables
+ LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo);
+
+ // Fill in source locations for all CFGBlocks.
+ findBlockLocations(CFGraph, SortedGraph, BlockInfo);
+
+ CapExprSet ExclusiveLocksAcquired;
+ CapExprSet SharedLocksAcquired;
+ CapExprSet LocksReleased;
+
+ // Add locks from exclusive_locks_required and shared_locks_required
+ // to initial lockset. Also turn off checking for lock and unlock functions.
+ // FIXME: is there a more intelligent way to check lock/unlock functions?
+ if (!SortedGraph->empty() && D->hasAttrs()) {
+ const CFGBlock *FirstBlock = *SortedGraph->begin();
+ FactSet &InitialLockset = BlockInfo[FirstBlock->getBlockID()].EntrySet;
+
+ CapExprSet ExclusiveLocksToAdd;
+ CapExprSet SharedLocksToAdd;
+ StringRef CapDiagKind = "mutex";
+
+ SourceLocation Loc = D->getLocation();
+ for (const auto *Attr : D->attrs()) {
+ Loc = Attr->getLocation();
+ if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) {
+ getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
+ nullptr, D);
+ CapDiagKind = ClassifyDiagnostic(A);
+ } else if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) {
+ // UNLOCK_FUNCTION() is used to hide the underlying lock implementation.
+ // We must ignore such methods.
+ if (A->args_size() == 0)
+ return;
+ // FIXME -- deal with exclusive vs. shared unlock functions?
+ getMutexIDs(ExclusiveLocksToAdd, A, nullptr, D);
+ getMutexIDs(LocksReleased, A, nullptr, D);
+ CapDiagKind = ClassifyDiagnostic(A);
+ } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) {
+ if (A->args_size() == 0)
+ return;
+ getMutexIDs(A->isShared() ? SharedLocksAcquired
+ : ExclusiveLocksAcquired,
+ A, nullptr, D);
+ CapDiagKind = ClassifyDiagnostic(A);
+ } else if (isa<ExclusiveTrylockFunctionAttr>(Attr)) {
+ // Don't try to check trylock functions for now
+ return;
+ } else if (isa<SharedTrylockFunctionAttr>(Attr)) {
+ // Don't try to check trylock functions for now
+ return;
+ }
+ }
+
+ // FIXME -- Loc can be wrong here.
+ for (const auto &Mu : ExclusiveLocksToAdd) {
+ auto Entry = llvm::make_unique<LockableFactEntry>(Mu, LK_Exclusive, Loc);
+ Entry->setDeclared(true);
+ addLock(InitialLockset, std::move(Entry), CapDiagKind, true);
+ }
+ for (const auto &Mu : SharedLocksToAdd) {
+ auto Entry = llvm::make_unique<LockableFactEntry>(Mu, LK_Shared, Loc);
+ Entry->setDeclared(true);
+ addLock(InitialLockset, std::move(Entry), CapDiagKind, true);
+ }
+ }
+
+ for (const auto *CurrBlock : *SortedGraph) {
+ int CurrBlockID = CurrBlock->getBlockID();
+ CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
+
+ // Use the default initial lockset in case there are no predecessors.
+ VisitedBlocks.insert(CurrBlock);
+
+ // Iterate through the predecessor blocks and warn if the lockset for all
+ // predecessors is not the same. We take the entry lockset of the current
+ // block to be the intersection of all previous locksets.
+ // FIXME: By keeping the intersection, we may output more errors in future
+ // for a lock which is not in the intersection, but was in the union. We
+ // may want to also keep the union in future. As an example, let's say
+ // the intersection contains Mutex L, and the union contains L and M.
+ // Later we unlock M. At this point, we would output an error because we
+ // never locked M; although the real error is probably that we forgot to
+ // lock M on all code paths. Conversely, let's say that later we lock M.
+ // In this case, we should compare against the intersection instead of the
+ // union because the real error is probably that we forgot to unlock M on
+ // all code paths.
+ bool LocksetInitialized = false;
+ SmallVector<CFGBlock *, 8> SpecialBlocks;
+ for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
+ PE = CurrBlock->pred_end(); PI != PE; ++PI) {
+
+ // if *PI -> CurrBlock is a back edge
+ if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI))
+ continue;
+
+ int PrevBlockID = (*PI)->getBlockID();
+ CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
+
+ // Ignore edges from blocks that can't return.
+ if (neverReturns(*PI) || !PrevBlockInfo->Reachable)
+ continue;
+
+ // Okay, we can reach this block from the entry.
+ CurrBlockInfo->Reachable = true;
+
+ // If the previous block ended in a 'continue' or 'break' statement, then
+ // a difference in locksets is probably due to a bug in that block, rather
+ // than in some other predecessor. In that case, keep the other
+ // predecessor's lockset.
+ if (const Stmt *Terminator = (*PI)->getTerminator()) {
+ if (isa<ContinueStmt>(Terminator) || isa<BreakStmt>(Terminator)) {
+ SpecialBlocks.push_back(*PI);
+ continue;
+ }
+ }
+
+ FactSet PrevLockset;
+ getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, *PI, CurrBlock);
+
+ if (!LocksetInitialized) {
+ CurrBlockInfo->EntrySet = PrevLockset;
+ LocksetInitialized = true;
+ } else {
+ intersectAndWarn(CurrBlockInfo->EntrySet, PrevLockset,
+ CurrBlockInfo->EntryLoc,
+ LEK_LockedSomePredecessors);
+ }
+ }
+
+ // Skip rest of block if it's not reachable.
+ if (!CurrBlockInfo->Reachable)
+ continue;
+
+ // Process continue and break blocks. Assume that the lockset for the
+ // resulting block is unaffected by any discrepancies in them.
+ for (const auto *PrevBlock : SpecialBlocks) {
+ int PrevBlockID = PrevBlock->getBlockID();
+ CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
+
+ if (!LocksetInitialized) {
+ CurrBlockInfo->EntrySet = PrevBlockInfo->ExitSet;
+ LocksetInitialized = true;
+ } else {
+ // Determine whether this edge is a loop terminator for diagnostic
+ // purposes. FIXME: A 'break' statement might be a loop terminator, but
+ // it might also be part of a switch. Also, a subsequent destructor
+ // might add to the lockset, in which case the real issue might be a
+ // double lock on the other path.
+ const Stmt *Terminator = PrevBlock->getTerminator();
+ bool IsLoop = Terminator && isa<ContinueStmt>(Terminator);
+
+ FactSet PrevLockset;
+ getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet,
+ PrevBlock, CurrBlock);
+
+ // Do not update EntrySet.
+ intersectAndWarn(CurrBlockInfo->EntrySet, PrevLockset,
+ PrevBlockInfo->ExitLoc,
+ IsLoop ? LEK_LockedSomeLoopIterations
+ : LEK_LockedSomePredecessors,
+ false);
+ }
+ }
+
+ BuildLockset LocksetBuilder(this, *CurrBlockInfo);
+
+ // Visit all the statements in the basic block.
+ for (CFGBlock::const_iterator BI = CurrBlock->begin(),
+ BE = CurrBlock->end(); BI != BE; ++BI) {
+ switch (BI->getKind()) {
+ case CFGElement::Statement: {
+ CFGStmt CS = BI->castAs<CFGStmt>();
+ LocksetBuilder.Visit(const_cast<Stmt*>(CS.getStmt()));
+ break;
+ }
+ // Ignore BaseDtor, MemberDtor, and TemporaryDtor for now.
+ case CFGElement::AutomaticObjectDtor: {
+ CFGAutomaticObjDtor AD = BI->castAs<CFGAutomaticObjDtor>();
+ CXXDestructorDecl *DD = const_cast<CXXDestructorDecl *>(
+ AD.getDestructorDecl(AC.getASTContext()));
+ if (!DD->hasAttrs())
+ break;
+
+ // Create a dummy expression,
+ VarDecl *VD = const_cast<VarDecl*>(AD.getVarDecl());
+ DeclRefExpr DRE(VD, false, VD->getType().getNonReferenceType(),
+ VK_LValue, AD.getTriggerStmt()->getLocEnd());
+ LocksetBuilder.handleCall(&DRE, DD);
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ CurrBlockInfo->ExitSet = LocksetBuilder.FSet;
+
+ // For every back edge from CurrBlock (the end of the loop) to another block
+ // (FirstLoopBlock) we need to check that the Lockset of Block is equal to
+ // the one held at the beginning of FirstLoopBlock. We can look up the
+ // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map.
+ for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
+ SE = CurrBlock->succ_end(); SI != SE; ++SI) {
+
+ // if CurrBlock -> *SI is *not* a back edge
+ if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI))
+ continue;
+
+ CFGBlock *FirstLoopBlock = *SI;
+ CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()];
+ CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID];
+ intersectAndWarn(LoopEnd->ExitSet, PreLoop->EntrySet,
+ PreLoop->EntryLoc,
+ LEK_LockedSomeLoopIterations,
+ false);
+ }
+ }
+
+ CFGBlockInfo *Initial = &BlockInfo[CFGraph->getEntry().getBlockID()];
+ CFGBlockInfo *Final = &BlockInfo[CFGraph->getExit().getBlockID()];
+
+ // Skip the final check if the exit block is unreachable.
+ if (!Final->Reachable)
+ return;
+
+ // By default, we expect all locks held on entry to be held on exit.
+ FactSet ExpectedExitSet = Initial->EntrySet;
+
+ // Adjust the expected exit set by adding or removing locks, as declared
+ // by *-LOCK_FUNCTION and UNLOCK_FUNCTION. The intersect below will then
+ // issue the appropriate warning.
+ // FIXME: the location here is not quite right.
+ for (const auto &Lock : ExclusiveLocksAcquired)
+ ExpectedExitSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>(
+ Lock, LK_Exclusive, D->getLocation()));
+ for (const auto &Lock : SharedLocksAcquired)
+ ExpectedExitSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>(
+ Lock, LK_Shared, D->getLocation()));
+ for (const auto &Lock : LocksReleased)
+ ExpectedExitSet.removeLock(FactMan, Lock);
+
+ // FIXME: Should we call this function for all blocks which exit the function?
+ intersectAndWarn(ExpectedExitSet, Final->ExitSet,
+ Final->ExitLoc,
+ LEK_LockedAtEndOfFunction,
+ LEK_NotLockedAtEndOfFunction,
+ false);
+
+ Handler.leaveFunction(CurrentFunction);
+}
+
+
+/// \brief Check a function's CFG for thread-safety violations.
+///
+/// We traverse the blocks in the CFG, compute the set of mutexes that are held
+/// at the end of each block, and issue warnings for thread safety violations.
+/// Each block in the CFG is traversed exactly once.
+void threadSafety::runThreadSafetyAnalysis(AnalysisDeclContext &AC,
+ ThreadSafetyHandler &Handler,
+ BeforeSet **BSet) {
+ if (!*BSet)
+ *BSet = new BeforeSet;
+ ThreadSafetyAnalyzer Analyzer(Handler, *BSet);
+ Analyzer.runAnalysis(AC);
+}
+
+void threadSafety::threadSafetyCleanup(BeforeSet *Cache) { delete Cache; }
+
+/// \brief Helper function that returns a LockKind required for the given level
+/// of access.
+LockKind threadSafety::getLockKindFromAccessKind(AccessKind AK) {
+ switch (AK) {
+ case AK_Read :
+ return LK_Shared;
+ case AK_Written :
+ return LK_Exclusive;
+ }
+ llvm_unreachable("Unknown AccessKind");
+}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/ThreadSafetyCommon.cpp b/contrib/llvm/tools/clang/lib/Analysis/ThreadSafetyCommon.cpp
new file mode 100644
index 0000000..ffe95ea
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/ThreadSafetyCommon.cpp
@@ -0,0 +1,938 @@
+//===- ThreadSafetyCommon.cpp -----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of the interfaces declared in ThreadSafetyCommon.h
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/Analyses/ThreadSafetyCommon.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/Analysis/Analyses/PostOrderCFGView.h"
+#include "clang/Analysis/Analyses/ThreadSafetyTIL.h"
+#include "clang/Analysis/Analyses/ThreadSafetyTraverse.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Basic/OperatorKinds.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include <algorithm>
+#include <climits>
+#include <vector>
+
+using namespace clang;
+using namespace threadSafety;
+
+// From ThreadSafetyUtil.h
+std::string threadSafety::getSourceLiteralString(const clang::Expr *CE) {
+ switch (CE->getStmtClass()) {
+ case Stmt::IntegerLiteralClass:
+ return cast<IntegerLiteral>(CE)->getValue().toString(10, true);
+ case Stmt::StringLiteralClass: {
+ std::string ret("\"");
+ ret += cast<StringLiteral>(CE)->getString();
+ ret += "\"";
+ return ret;
+ }
+ case Stmt::CharacterLiteralClass:
+ case Stmt::CXXNullPtrLiteralExprClass:
+ case Stmt::GNUNullExprClass:
+ case Stmt::CXXBoolLiteralExprClass:
+ case Stmt::FloatingLiteralClass:
+ case Stmt::ImaginaryLiteralClass:
+ case Stmt::ObjCStringLiteralClass:
+ default:
+ return "#lit";
+ }
+}
+
+// Return true if E is a variable that points to an incomplete Phi node.
+static bool isIncompletePhi(const til::SExpr *E) {
+ if (const auto *Ph = dyn_cast<til::Phi>(E))
+ return Ph->status() == til::Phi::PH_Incomplete;
+ return false;
+}
+
+typedef SExprBuilder::CallingContext CallingContext;
+
+til::SExpr *SExprBuilder::lookupStmt(const Stmt *S) {
+ auto It = SMap.find(S);
+ if (It != SMap.end())
+ return It->second;
+ return nullptr;
+}
+
+til::SCFG *SExprBuilder::buildCFG(CFGWalker &Walker) {
+ Walker.walk(*this);
+ return Scfg;
+}
+
+static bool isCalleeArrow(const Expr *E) {
+ const MemberExpr *ME = dyn_cast<MemberExpr>(E->IgnoreParenCasts());
+ return ME ? ME->isArrow() : false;
+}
+
+/// \brief Translate a clang expression in an attribute to a til::SExpr.
+/// Constructs the context from D, DeclExp, and SelfDecl.
+///
+/// \param AttrExp The expression to translate.
+/// \param D The declaration to which the attribute is attached.
+/// \param DeclExp An expression involving the Decl to which the attribute
+/// is attached. E.g. the call to a function.
+CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
+ const NamedDecl *D,
+ const Expr *DeclExp,
+ VarDecl *SelfDecl) {
+ // If we are processing a raw attribute expression, with no substitutions.
+ if (!DeclExp)
+ return translateAttrExpr(AttrExp, nullptr);
+
+ CallingContext Ctx(nullptr, D);
+
+ // Examine DeclExp to find SelfArg and FunArgs, which are used to substitute
+ // for formal parameters when we call buildMutexID later.
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(DeclExp)) {
+ Ctx.SelfArg = ME->getBase();
+ Ctx.SelfArrow = ME->isArrow();
+ } else if (const CXXMemberCallExpr *CE =
+ dyn_cast<CXXMemberCallExpr>(DeclExp)) {
+ Ctx.SelfArg = CE->getImplicitObjectArgument();
+ Ctx.SelfArrow = isCalleeArrow(CE->getCallee());
+ Ctx.NumArgs = CE->getNumArgs();
+ Ctx.FunArgs = CE->getArgs();
+ } else if (const CallExpr *CE = dyn_cast<CallExpr>(DeclExp)) {
+ Ctx.NumArgs = CE->getNumArgs();
+ Ctx.FunArgs = CE->getArgs();
+ } else if (const CXXConstructExpr *CE =
+ dyn_cast<CXXConstructExpr>(DeclExp)) {
+ Ctx.SelfArg = nullptr; // Will be set below
+ Ctx.NumArgs = CE->getNumArgs();
+ Ctx.FunArgs = CE->getArgs();
+ } else if (D && isa<CXXDestructorDecl>(D)) {
+ // There's no such thing as a "destructor call" in the AST.
+ Ctx.SelfArg = DeclExp;
+ }
+
+ // Hack to handle constructors, where self cannot be recovered from
+ // the expression.
+ if (SelfDecl && !Ctx.SelfArg) {
+ DeclRefExpr SelfDRE(SelfDecl, false, SelfDecl->getType(), VK_LValue,
+ SelfDecl->getLocation());
+ Ctx.SelfArg = &SelfDRE;
+
+ // If the attribute has no arguments, then assume the argument is "this".
+ if (!AttrExp)
+ return translateAttrExpr(Ctx.SelfArg, nullptr);
+ else // For most attributes.
+ return translateAttrExpr(AttrExp, &Ctx);
+ }
+
+ // If the attribute has no arguments, then assume the argument is "this".
+ if (!AttrExp)
+ return translateAttrExpr(Ctx.SelfArg, nullptr);
+ else // For most attributes.
+ return translateAttrExpr(AttrExp, &Ctx);
+}
+
+/// \brief Translate a clang expression in an attribute to a til::SExpr.
+// This assumes a CallingContext has already been created.
+CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
+ CallingContext *Ctx) {
+ if (!AttrExp)
+ return CapabilityExpr(nullptr, false);
+
+ if (auto* SLit = dyn_cast<StringLiteral>(AttrExp)) {
+ if (SLit->getString() == StringRef("*"))
+ // The "*" expr is a universal lock, which essentially turns off
+ // checks until it is removed from the lockset.
+ return CapabilityExpr(new (Arena) til::Wildcard(), false);
+ else
+ // Ignore other string literals for now.
+ return CapabilityExpr(nullptr, false);
+ }
+
+ bool Neg = false;
+ if (auto *OE = dyn_cast<CXXOperatorCallExpr>(AttrExp)) {
+ if (OE->getOperator() == OO_Exclaim) {
+ Neg = true;
+ AttrExp = OE->getArg(0);
+ }
+ }
+ else if (auto *UO = dyn_cast<UnaryOperator>(AttrExp)) {
+ if (UO->getOpcode() == UO_LNot) {
+ Neg = true;
+ AttrExp = UO->getSubExpr();
+ }
+ }
+
+ til::SExpr *E = translate(AttrExp, Ctx);
+
+ // Trap mutex expressions like nullptr, or 0.
+ // Any literal value is nonsense.
+ if (!E || isa<til::Literal>(E))
+ return CapabilityExpr(nullptr, false);
+
+ // Hack to deal with smart pointers -- strip off top-level pointer casts.
+ if (auto *CE = dyn_cast_or_null<til::Cast>(E)) {
+ if (CE->castOpcode() == til::CAST_objToPtr)
+ return CapabilityExpr(CE->expr(), Neg);
+ }
+ return CapabilityExpr(E, Neg);
+}
+
+// Translate a clang statement or expression to a TIL expression.
+// Also performs substitution of variables; Ctx provides the context.
+// Dispatches on the type of S.
+til::SExpr *SExprBuilder::translate(const Stmt *S, CallingContext *Ctx) {
+ if (!S)
+ return nullptr;
+
+ // Check if S has already been translated and cached.
+ // This handles the lookup of SSA names for DeclRefExprs here.
+ if (til::SExpr *E = lookupStmt(S))
+ return E;
+
+ switch (S->getStmtClass()) {
+ case Stmt::DeclRefExprClass:
+ return translateDeclRefExpr(cast<DeclRefExpr>(S), Ctx);
+ case Stmt::CXXThisExprClass:
+ return translateCXXThisExpr(cast<CXXThisExpr>(S), Ctx);
+ case Stmt::MemberExprClass:
+ return translateMemberExpr(cast<MemberExpr>(S), Ctx);
+ case Stmt::CallExprClass:
+ return translateCallExpr(cast<CallExpr>(S), Ctx);
+ case Stmt::CXXMemberCallExprClass:
+ return translateCXXMemberCallExpr(cast<CXXMemberCallExpr>(S), Ctx);
+ case Stmt::CXXOperatorCallExprClass:
+ return translateCXXOperatorCallExpr(cast<CXXOperatorCallExpr>(S), Ctx);
+ case Stmt::UnaryOperatorClass:
+ return translateUnaryOperator(cast<UnaryOperator>(S), Ctx);
+ case Stmt::BinaryOperatorClass:
+ case Stmt::CompoundAssignOperatorClass:
+ return translateBinaryOperator(cast<BinaryOperator>(S), Ctx);
+
+ case Stmt::ArraySubscriptExprClass:
+ return translateArraySubscriptExpr(cast<ArraySubscriptExpr>(S), Ctx);
+ case Stmt::ConditionalOperatorClass:
+ return translateAbstractConditionalOperator(
+ cast<ConditionalOperator>(S), Ctx);
+ case Stmt::BinaryConditionalOperatorClass:
+ return translateAbstractConditionalOperator(
+ cast<BinaryConditionalOperator>(S), Ctx);
+
+ // We treat these as no-ops
+ case Stmt::ParenExprClass:
+ return translate(cast<ParenExpr>(S)->getSubExpr(), Ctx);
+ case Stmt::ExprWithCleanupsClass:
+ return translate(cast<ExprWithCleanups>(S)->getSubExpr(), Ctx);
+ case Stmt::CXXBindTemporaryExprClass:
+ return translate(cast<CXXBindTemporaryExpr>(S)->getSubExpr(), Ctx);
+
+ // Collect all literals
+ case Stmt::CharacterLiteralClass:
+ case Stmt::CXXNullPtrLiteralExprClass:
+ case Stmt::GNUNullExprClass:
+ case Stmt::CXXBoolLiteralExprClass:
+ case Stmt::FloatingLiteralClass:
+ case Stmt::ImaginaryLiteralClass:
+ case Stmt::IntegerLiteralClass:
+ case Stmt::StringLiteralClass:
+ case Stmt::ObjCStringLiteralClass:
+ return new (Arena) til::Literal(cast<Expr>(S));
+
+ case Stmt::DeclStmtClass:
+ return translateDeclStmt(cast<DeclStmt>(S), Ctx);
+ default:
+ break;
+ }
+ if (const CastExpr *CE = dyn_cast<CastExpr>(S))
+ return translateCastExpr(CE, Ctx);
+
+ return new (Arena) til::Undefined(S);
+}
+
+til::SExpr *SExprBuilder::translateDeclRefExpr(const DeclRefExpr *DRE,
+ CallingContext *Ctx) {
+ const ValueDecl *VD = cast<ValueDecl>(DRE->getDecl()->getCanonicalDecl());
+
+ // Function parameters require substitution and/or renaming.
+ if (const ParmVarDecl *PV = dyn_cast_or_null<ParmVarDecl>(VD)) {
+ const FunctionDecl *FD =
+ cast<FunctionDecl>(PV->getDeclContext())->getCanonicalDecl();
+ unsigned I = PV->getFunctionScopeIndex();
+
+ if (Ctx && Ctx->FunArgs && FD == Ctx->AttrDecl->getCanonicalDecl()) {
+ // Substitute call arguments for references to function parameters
+ assert(I < Ctx->NumArgs);
+ return translate(Ctx->FunArgs[I], Ctx->Prev);
+ }
+ // Map the param back to the param of the original function declaration
+ // for consistent comparisons.
+ VD = FD->getParamDecl(I);
+ }
+
+ // For non-local variables, treat it as a reference to a named object.
+ return new (Arena) til::LiteralPtr(VD);
+}
+
+til::SExpr *SExprBuilder::translateCXXThisExpr(const CXXThisExpr *TE,
+ CallingContext *Ctx) {
+ // Substitute for 'this'
+ if (Ctx && Ctx->SelfArg)
+ return translate(Ctx->SelfArg, Ctx->Prev);
+ assert(SelfVar && "We have no variable for 'this'!");
+ return SelfVar;
+}
+
+static const ValueDecl *getValueDeclFromSExpr(const til::SExpr *E) {
+ if (auto *V = dyn_cast<til::Variable>(E))
+ return V->clangDecl();
+ if (auto *Ph = dyn_cast<til::Phi>(E))
+ return Ph->clangDecl();
+ if (auto *P = dyn_cast<til::Project>(E))
+ return P->clangDecl();
+ if (auto *L = dyn_cast<til::LiteralPtr>(E))
+ return L->clangDecl();
+ return nullptr;
+}
+
+static bool hasCppPointerType(const til::SExpr *E) {
+ auto *VD = getValueDeclFromSExpr(E);
+ if (VD && VD->getType()->isPointerType())
+ return true;
+ if (auto *C = dyn_cast<til::Cast>(E))
+ return C->castOpcode() == til::CAST_objToPtr;
+
+ return false;
+}
+
+// Grab the very first declaration of virtual method D
+static const CXXMethodDecl *getFirstVirtualDecl(const CXXMethodDecl *D) {
+ while (true) {
+ D = D->getCanonicalDecl();
+ CXXMethodDecl::method_iterator I = D->begin_overridden_methods(),
+ E = D->end_overridden_methods();
+ if (I == E)
+ return D; // Method does not override anything
+ D = *I; // FIXME: this does not work with multiple inheritance.
+ }
+ return nullptr;
+}
+
+til::SExpr *SExprBuilder::translateMemberExpr(const MemberExpr *ME,
+ CallingContext *Ctx) {
+ til::SExpr *BE = translate(ME->getBase(), Ctx);
+ til::SExpr *E = new (Arena) til::SApply(BE);
+
+ const ValueDecl *D =
+ cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl());
+ if (auto *VD = dyn_cast<CXXMethodDecl>(D))
+ D = getFirstVirtualDecl(VD);
+
+ til::Project *P = new (Arena) til::Project(E, D);
+ if (hasCppPointerType(BE))
+ P->setArrow(true);
+ return P;
+}
+
+til::SExpr *SExprBuilder::translateCallExpr(const CallExpr *CE,
+ CallingContext *Ctx,
+ const Expr *SelfE) {
+ if (CapabilityExprMode) {
+ // Handle LOCK_RETURNED
+ const FunctionDecl *FD = CE->getDirectCallee()->getMostRecentDecl();
+ if (LockReturnedAttr* At = FD->getAttr<LockReturnedAttr>()) {
+ CallingContext LRCallCtx(Ctx);
+ LRCallCtx.AttrDecl = CE->getDirectCallee();
+ LRCallCtx.SelfArg = SelfE;
+ LRCallCtx.NumArgs = CE->getNumArgs();
+ LRCallCtx.FunArgs = CE->getArgs();
+ return const_cast<til::SExpr*>(
+ translateAttrExpr(At->getArg(), &LRCallCtx).sexpr());
+ }
+ }
+
+ til::SExpr *E = translate(CE->getCallee(), Ctx);
+ for (const auto *Arg : CE->arguments()) {
+ til::SExpr *A = translate(Arg, Ctx);
+ E = new (Arena) til::Apply(E, A);
+ }
+ return new (Arena) til::Call(E, CE);
+}
+
+til::SExpr *SExprBuilder::translateCXXMemberCallExpr(
+ const CXXMemberCallExpr *ME, CallingContext *Ctx) {
+ if (CapabilityExprMode) {
+ // Ignore calls to get() on smart pointers.
+ if (ME->getMethodDecl()->getNameAsString() == "get" &&
+ ME->getNumArgs() == 0) {
+ auto *E = translate(ME->getImplicitObjectArgument(), Ctx);
+ return new (Arena) til::Cast(til::CAST_objToPtr, E);
+ // return E;
+ }
+ }
+ return translateCallExpr(cast<CallExpr>(ME), Ctx,
+ ME->getImplicitObjectArgument());
+}
+
+til::SExpr *SExprBuilder::translateCXXOperatorCallExpr(
+ const CXXOperatorCallExpr *OCE, CallingContext *Ctx) {
+ if (CapabilityExprMode) {
+ // Ignore operator * and operator -> on smart pointers.
+ OverloadedOperatorKind k = OCE->getOperator();
+ if (k == OO_Star || k == OO_Arrow) {
+ auto *E = translate(OCE->getArg(0), Ctx);
+ return new (Arena) til::Cast(til::CAST_objToPtr, E);
+ // return E;
+ }
+ }
+ return translateCallExpr(cast<CallExpr>(OCE), Ctx);
+}
+
+til::SExpr *SExprBuilder::translateUnaryOperator(const UnaryOperator *UO,
+ CallingContext *Ctx) {
+ switch (UO->getOpcode()) {
+ case UO_PostInc:
+ case UO_PostDec:
+ case UO_PreInc:
+ case UO_PreDec:
+ return new (Arena) til::Undefined(UO);
+
+ case UO_AddrOf: {
+ if (CapabilityExprMode) {
+ // interpret &Graph::mu_ as an existential.
+ if (DeclRefExpr* DRE = dyn_cast<DeclRefExpr>(UO->getSubExpr())) {
+ if (DRE->getDecl()->isCXXInstanceMember()) {
+ // This is a pointer-to-member expression, e.g. &MyClass::mu_.
+ // We interpret this syntax specially, as a wildcard.
+ auto *W = new (Arena) til::Wildcard();
+ return new (Arena) til::Project(W, DRE->getDecl());
+ }
+ }
+ }
+ // otherwise, & is a no-op
+ return translate(UO->getSubExpr(), Ctx);
+ }
+
+ // We treat these as no-ops
+ case UO_Deref:
+ case UO_Plus:
+ return translate(UO->getSubExpr(), Ctx);
+
+ case UO_Minus:
+ return new (Arena)
+ til::UnaryOp(til::UOP_Minus, translate(UO->getSubExpr(), Ctx));
+ case UO_Not:
+ return new (Arena)
+ til::UnaryOp(til::UOP_BitNot, translate(UO->getSubExpr(), Ctx));
+ case UO_LNot:
+ return new (Arena)
+ til::UnaryOp(til::UOP_LogicNot, translate(UO->getSubExpr(), Ctx));
+
+ // Currently unsupported
+ case UO_Real:
+ case UO_Imag:
+ case UO_Extension:
+ case UO_Coawait:
+ return new (Arena) til::Undefined(UO);
+ }
+ return new (Arena) til::Undefined(UO);
+}
+
+til::SExpr *SExprBuilder::translateBinOp(til::TIL_BinaryOpcode Op,
+ const BinaryOperator *BO,
+ CallingContext *Ctx, bool Reverse) {
+ til::SExpr *E0 = translate(BO->getLHS(), Ctx);
+ til::SExpr *E1 = translate(BO->getRHS(), Ctx);
+ if (Reverse)
+ return new (Arena) til::BinaryOp(Op, E1, E0);
+ else
+ return new (Arena) til::BinaryOp(Op, E0, E1);
+}
+
+til::SExpr *SExprBuilder::translateBinAssign(til::TIL_BinaryOpcode Op,
+ const BinaryOperator *BO,
+ CallingContext *Ctx,
+ bool Assign) {
+ const Expr *LHS = BO->getLHS();
+ const Expr *RHS = BO->getRHS();
+ til::SExpr *E0 = translate(LHS, Ctx);
+ til::SExpr *E1 = translate(RHS, Ctx);
+
+ const ValueDecl *VD = nullptr;
+ til::SExpr *CV = nullptr;
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(LHS)) {
+ VD = DRE->getDecl();
+ CV = lookupVarDecl(VD);
+ }
+
+ if (!Assign) {
+ til::SExpr *Arg = CV ? CV : new (Arena) til::Load(E0);
+ E1 = new (Arena) til::BinaryOp(Op, Arg, E1);
+ E1 = addStatement(E1, nullptr, VD);
+ }
+ if (VD && CV)
+ return updateVarDecl(VD, E1);
+ return new (Arena) til::Store(E0, E1);
+}
+
+til::SExpr *SExprBuilder::translateBinaryOperator(const BinaryOperator *BO,
+ CallingContext *Ctx) {
+ switch (BO->getOpcode()) {
+ case BO_PtrMemD:
+ case BO_PtrMemI:
+ return new (Arena) til::Undefined(BO);
+
+ case BO_Mul: return translateBinOp(til::BOP_Mul, BO, Ctx);
+ case BO_Div: return translateBinOp(til::BOP_Div, BO, Ctx);
+ case BO_Rem: return translateBinOp(til::BOP_Rem, BO, Ctx);
+ case BO_Add: return translateBinOp(til::BOP_Add, BO, Ctx);
+ case BO_Sub: return translateBinOp(til::BOP_Sub, BO, Ctx);
+ case BO_Shl: return translateBinOp(til::BOP_Shl, BO, Ctx);
+ case BO_Shr: return translateBinOp(til::BOP_Shr, BO, Ctx);
+ case BO_LT: return translateBinOp(til::BOP_Lt, BO, Ctx);
+ case BO_GT: return translateBinOp(til::BOP_Lt, BO, Ctx, true);
+ case BO_LE: return translateBinOp(til::BOP_Leq, BO, Ctx);
+ case BO_GE: return translateBinOp(til::BOP_Leq, BO, Ctx, true);
+ case BO_EQ: return translateBinOp(til::BOP_Eq, BO, Ctx);
+ case BO_NE: return translateBinOp(til::BOP_Neq, BO, Ctx);
+ case BO_And: return translateBinOp(til::BOP_BitAnd, BO, Ctx);
+ case BO_Xor: return translateBinOp(til::BOP_BitXor, BO, Ctx);
+ case BO_Or: return translateBinOp(til::BOP_BitOr, BO, Ctx);
+ case BO_LAnd: return translateBinOp(til::BOP_LogicAnd, BO, Ctx);
+ case BO_LOr: return translateBinOp(til::BOP_LogicOr, BO, Ctx);
+
+ case BO_Assign: return translateBinAssign(til::BOP_Eq, BO, Ctx, true);
+ case BO_MulAssign: return translateBinAssign(til::BOP_Mul, BO, Ctx);
+ case BO_DivAssign: return translateBinAssign(til::BOP_Div, BO, Ctx);
+ case BO_RemAssign: return translateBinAssign(til::BOP_Rem, BO, Ctx);
+ case BO_AddAssign: return translateBinAssign(til::BOP_Add, BO, Ctx);
+ case BO_SubAssign: return translateBinAssign(til::BOP_Sub, BO, Ctx);
+ case BO_ShlAssign: return translateBinAssign(til::BOP_Shl, BO, Ctx);
+ case BO_ShrAssign: return translateBinAssign(til::BOP_Shr, BO, Ctx);
+ case BO_AndAssign: return translateBinAssign(til::BOP_BitAnd, BO, Ctx);
+ case BO_XorAssign: return translateBinAssign(til::BOP_BitXor, BO, Ctx);
+ case BO_OrAssign: return translateBinAssign(til::BOP_BitOr, BO, Ctx);
+
+ case BO_Comma:
+ // The clang CFG should have already processed both sides.
+ return translate(BO->getRHS(), Ctx);
+ }
+ return new (Arena) til::Undefined(BO);
+}
+
+til::SExpr *SExprBuilder::translateCastExpr(const CastExpr *CE,
+ CallingContext *Ctx) {
+ clang::CastKind K = CE->getCastKind();
+ switch (K) {
+ case CK_LValueToRValue: {
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) {
+ til::SExpr *E0 = lookupVarDecl(DRE->getDecl());
+ if (E0)
+ return E0;
+ }
+ til::SExpr *E0 = translate(CE->getSubExpr(), Ctx);
+ return E0;
+ // FIXME!! -- get Load working properly
+ // return new (Arena) til::Load(E0);
+ }
+ case CK_NoOp:
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay: {
+ til::SExpr *E0 = translate(CE->getSubExpr(), Ctx);
+ return E0;
+ }
+ default: {
+ // FIXME: handle different kinds of casts.
+ til::SExpr *E0 = translate(CE->getSubExpr(), Ctx);
+ if (CapabilityExprMode)
+ return E0;
+ return new (Arena) til::Cast(til::CAST_none, E0);
+ }
+ }
+}
+
+til::SExpr *
+SExprBuilder::translateArraySubscriptExpr(const ArraySubscriptExpr *E,
+ CallingContext *Ctx) {
+ til::SExpr *E0 = translate(E->getBase(), Ctx);
+ til::SExpr *E1 = translate(E->getIdx(), Ctx);
+ return new (Arena) til::ArrayIndex(E0, E1);
+}
+
+til::SExpr *
+SExprBuilder::translateAbstractConditionalOperator(
+ const AbstractConditionalOperator *CO, CallingContext *Ctx) {
+ auto *C = translate(CO->getCond(), Ctx);
+ auto *T = translate(CO->getTrueExpr(), Ctx);
+ auto *E = translate(CO->getFalseExpr(), Ctx);
+ return new (Arena) til::IfThenElse(C, T, E);
+}
+
+til::SExpr *
+SExprBuilder::translateDeclStmt(const DeclStmt *S, CallingContext *Ctx) {
+ DeclGroupRef DGrp = S->getDeclGroup();
+ for (DeclGroupRef::iterator I = DGrp.begin(), E = DGrp.end(); I != E; ++I) {
+ if (VarDecl *VD = dyn_cast_or_null<VarDecl>(*I)) {
+ Expr *E = VD->getInit();
+ til::SExpr* SE = translate(E, Ctx);
+
+ // Add local variables with trivial type to the variable map
+ QualType T = VD->getType();
+ if (T.isTrivialType(VD->getASTContext())) {
+ return addVarDecl(VD, SE);
+ }
+ else {
+ // TODO: add alloca
+ }
+ }
+ }
+ return nullptr;
+}
+
+// If (E) is non-trivial, then add it to the current basic block, and
+// update the statement map so that S refers to E. Returns a new variable
+// that refers to E.
+// If E is trivial returns E.
+til::SExpr *SExprBuilder::addStatement(til::SExpr* E, const Stmt *S,
+ const ValueDecl *VD) {
+ if (!E || !CurrentBB || E->block() || til::ThreadSafetyTIL::isTrivial(E))
+ return E;
+ if (VD)
+ E = new (Arena) til::Variable(E, VD);
+ CurrentInstructions.push_back(E);
+ if (S)
+ insertStmt(S, E);
+ return E;
+}
+
+// Returns the current value of VD, if known, and nullptr otherwise.
+til::SExpr *SExprBuilder::lookupVarDecl(const ValueDecl *VD) {
+ auto It = LVarIdxMap.find(VD);
+ if (It != LVarIdxMap.end()) {
+ assert(CurrentLVarMap[It->second].first == VD);
+ return CurrentLVarMap[It->second].second;
+ }
+ return nullptr;
+}
+
+// if E is a til::Variable, update its clangDecl.
+static void maybeUpdateVD(til::SExpr *E, const ValueDecl *VD) {
+ if (!E)
+ return;
+ if (til::Variable *V = dyn_cast<til::Variable>(E)) {
+ if (!V->clangDecl())
+ V->setClangDecl(VD);
+ }
+}
+
+// Adds a new variable declaration.
+til::SExpr *SExprBuilder::addVarDecl(const ValueDecl *VD, til::SExpr *E) {
+ maybeUpdateVD(E, VD);
+ LVarIdxMap.insert(std::make_pair(VD, CurrentLVarMap.size()));
+ CurrentLVarMap.makeWritable();
+ CurrentLVarMap.push_back(std::make_pair(VD, E));
+ return E;
+}
+
+// Updates a current variable declaration. (E.g. by assignment)
+til::SExpr *SExprBuilder::updateVarDecl(const ValueDecl *VD, til::SExpr *E) {
+ maybeUpdateVD(E, VD);
+ auto It = LVarIdxMap.find(VD);
+ if (It == LVarIdxMap.end()) {
+ til::SExpr *Ptr = new (Arena) til::LiteralPtr(VD);
+ til::SExpr *St = new (Arena) til::Store(Ptr, E);
+ return St;
+ }
+ CurrentLVarMap.makeWritable();
+ CurrentLVarMap.elem(It->second).second = E;
+ return E;
+}
+
+// Make a Phi node in the current block for the i^th variable in CurrentVarMap.
+// If E != null, sets Phi[CurrentBlockInfo->ArgIndex] = E.
+// If E == null, this is a backedge and will be set later.
+void SExprBuilder::makePhiNodeVar(unsigned i, unsigned NPreds, til::SExpr *E) {
+ unsigned ArgIndex = CurrentBlockInfo->ProcessedPredecessors;
+ assert(ArgIndex > 0 && ArgIndex < NPreds);
+
+ til::SExpr *CurrE = CurrentLVarMap[i].second;
+ if (CurrE->block() == CurrentBB) {
+ // We already have a Phi node in the current block,
+ // so just add the new variable to the Phi node.
+ til::Phi *Ph = dyn_cast<til::Phi>(CurrE);
+ assert(Ph && "Expecting Phi node.");
+ if (E)
+ Ph->values()[ArgIndex] = E;
+ return;
+ }
+
+ // Make a new phi node: phi(..., E)
+ // All phi args up to the current index are set to the current value.
+ til::Phi *Ph = new (Arena) til::Phi(Arena, NPreds);
+ Ph->values().setValues(NPreds, nullptr);
+ for (unsigned PIdx = 0; PIdx < ArgIndex; ++PIdx)
+ Ph->values()[PIdx] = CurrE;
+ if (E)
+ Ph->values()[ArgIndex] = E;
+ Ph->setClangDecl(CurrentLVarMap[i].first);
+ // If E is from a back-edge, or either E or CurrE are incomplete, then
+ // mark this node as incomplete; we may need to remove it later.
+ if (!E || isIncompletePhi(E) || isIncompletePhi(CurrE)) {
+ Ph->setStatus(til::Phi::PH_Incomplete);
+ }
+
+ // Add Phi node to current block, and update CurrentLVarMap[i]
+ CurrentArguments.push_back(Ph);
+ if (Ph->status() == til::Phi::PH_Incomplete)
+ IncompleteArgs.push_back(Ph);
+
+ CurrentLVarMap.makeWritable();
+ CurrentLVarMap.elem(i).second = Ph;
+}
+
+// Merge values from Map into the current variable map.
+// This will construct Phi nodes in the current basic block as necessary.
+void SExprBuilder::mergeEntryMap(LVarDefinitionMap Map) {
+ assert(CurrentBlockInfo && "Not processing a block!");
+
+ if (!CurrentLVarMap.valid()) {
+ // Steal Map, using copy-on-write.
+ CurrentLVarMap = std::move(Map);
+ return;
+ }
+ if (CurrentLVarMap.sameAs(Map))
+ return; // Easy merge: maps from different predecessors are unchanged.
+
+ unsigned NPreds = CurrentBB->numPredecessors();
+ unsigned ESz = CurrentLVarMap.size();
+ unsigned MSz = Map.size();
+ unsigned Sz = std::min(ESz, MSz);
+
+ for (unsigned i=0; i<Sz; ++i) {
+ if (CurrentLVarMap[i].first != Map[i].first) {
+ // We've reached the end of variables in common.
+ CurrentLVarMap.makeWritable();
+ CurrentLVarMap.downsize(i);
+ break;
+ }
+ if (CurrentLVarMap[i].second != Map[i].second)
+ makePhiNodeVar(i, NPreds, Map[i].second);
+ }
+ if (ESz > MSz) {
+ CurrentLVarMap.makeWritable();
+ CurrentLVarMap.downsize(Map.size());
+ }
+}
+
+// Merge a back edge into the current variable map.
+// This will create phi nodes for all variables in the variable map.
+void SExprBuilder::mergeEntryMapBackEdge() {
+ // We don't have definitions for variables on the backedge, because we
+ // haven't gotten that far in the CFG. Thus, when encountering a back edge,
+ // we conservatively create Phi nodes for all variables. Unnecessary Phi
+ // nodes will be marked as incomplete, and stripped out at the end.
+ //
+ // An Phi node is unnecessary if it only refers to itself and one other
+ // variable, e.g. x = Phi(y, y, x) can be reduced to x = y.
+
+ assert(CurrentBlockInfo && "Not processing a block!");
+
+ if (CurrentBlockInfo->HasBackEdges)
+ return;
+ CurrentBlockInfo->HasBackEdges = true;
+
+ CurrentLVarMap.makeWritable();
+ unsigned Sz = CurrentLVarMap.size();
+ unsigned NPreds = CurrentBB->numPredecessors();
+
+ for (unsigned i=0; i < Sz; ++i) {
+ makePhiNodeVar(i, NPreds, nullptr);
+ }
+}
+
+// Update the phi nodes that were initially created for a back edge
+// once the variable definitions have been computed.
+// I.e., merge the current variable map into the phi nodes for Blk.
+void SExprBuilder::mergePhiNodesBackEdge(const CFGBlock *Blk) {
+ til::BasicBlock *BB = lookupBlock(Blk);
+ unsigned ArgIndex = BBInfo[Blk->getBlockID()].ProcessedPredecessors;
+ assert(ArgIndex > 0 && ArgIndex < BB->numPredecessors());
+
+ for (til::SExpr *PE : BB->arguments()) {
+ til::Phi *Ph = dyn_cast_or_null<til::Phi>(PE);
+ assert(Ph && "Expecting Phi Node.");
+ assert(Ph->values()[ArgIndex] == nullptr && "Wrong index for back edge.");
+
+ til::SExpr *E = lookupVarDecl(Ph->clangDecl());
+ assert(E && "Couldn't find local variable for Phi node.");
+ Ph->values()[ArgIndex] = E;
+ }
+}
+
+void SExprBuilder::enterCFG(CFG *Cfg, const NamedDecl *D,
+ const CFGBlock *First) {
+ // Perform initial setup operations.
+ unsigned NBlocks = Cfg->getNumBlockIDs();
+ Scfg = new (Arena) til::SCFG(Arena, NBlocks);
+
+ // allocate all basic blocks immediately, to handle forward references.
+ BBInfo.resize(NBlocks);
+ BlockMap.resize(NBlocks, nullptr);
+ // create map from clang blockID to til::BasicBlocks
+ for (auto *B : *Cfg) {
+ auto *BB = new (Arena) til::BasicBlock(Arena);
+ BB->reserveInstructions(B->size());
+ BlockMap[B->getBlockID()] = BB;
+ }
+
+ CurrentBB = lookupBlock(&Cfg->getEntry());
+ auto Parms = isa<ObjCMethodDecl>(D) ? cast<ObjCMethodDecl>(D)->parameters()
+ : cast<FunctionDecl>(D)->parameters();
+ for (auto *Pm : Parms) {
+ QualType T = Pm->getType();
+ if (!T.isTrivialType(Pm->getASTContext()))
+ continue;
+
+ // Add parameters to local variable map.
+ // FIXME: right now we emulate params with loads; that should be fixed.
+ til::SExpr *Lp = new (Arena) til::LiteralPtr(Pm);
+ til::SExpr *Ld = new (Arena) til::Load(Lp);
+ til::SExpr *V = addStatement(Ld, nullptr, Pm);
+ addVarDecl(Pm, V);
+ }
+}
+
+void SExprBuilder::enterCFGBlock(const CFGBlock *B) {
+ // Intialize TIL basic block and add it to the CFG.
+ CurrentBB = lookupBlock(B);
+ CurrentBB->reservePredecessors(B->pred_size());
+ Scfg->add(CurrentBB);
+
+ CurrentBlockInfo = &BBInfo[B->getBlockID()];
+
+ // CurrentLVarMap is moved to ExitMap on block exit.
+ // FIXME: the entry block will hold function parameters.
+ // assert(!CurrentLVarMap.valid() && "CurrentLVarMap already initialized.");
+}
+
+void SExprBuilder::handlePredecessor(const CFGBlock *Pred) {
+ // Compute CurrentLVarMap on entry from ExitMaps of predecessors
+
+ CurrentBB->addPredecessor(BlockMap[Pred->getBlockID()]);
+ BlockInfo *PredInfo = &BBInfo[Pred->getBlockID()];
+ assert(PredInfo->UnprocessedSuccessors > 0);
+
+ if (--PredInfo->UnprocessedSuccessors == 0)
+ mergeEntryMap(std::move(PredInfo->ExitMap));
+ else
+ mergeEntryMap(PredInfo->ExitMap.clone());
+
+ ++CurrentBlockInfo->ProcessedPredecessors;
+}
+
+void SExprBuilder::handlePredecessorBackEdge(const CFGBlock *Pred) {
+ mergeEntryMapBackEdge();
+}
+
+void SExprBuilder::enterCFGBlockBody(const CFGBlock *B) {
+ // The merge*() methods have created arguments.
+ // Push those arguments onto the basic block.
+ CurrentBB->arguments().reserve(
+ static_cast<unsigned>(CurrentArguments.size()), Arena);
+ for (auto *A : CurrentArguments)
+ CurrentBB->addArgument(A);
+}
+
+void SExprBuilder::handleStatement(const Stmt *S) {
+ til::SExpr *E = translate(S, nullptr);
+ addStatement(E, S);
+}
+
+void SExprBuilder::handleDestructorCall(const VarDecl *VD,
+ const CXXDestructorDecl *DD) {
+ til::SExpr *Sf = new (Arena) til::LiteralPtr(VD);
+ til::SExpr *Dr = new (Arena) til::LiteralPtr(DD);
+ til::SExpr *Ap = new (Arena) til::Apply(Dr, Sf);
+ til::SExpr *E = new (Arena) til::Call(Ap);
+ addStatement(E, nullptr);
+}
+
+void SExprBuilder::exitCFGBlockBody(const CFGBlock *B) {
+ CurrentBB->instructions().reserve(
+ static_cast<unsigned>(CurrentInstructions.size()), Arena);
+ for (auto *V : CurrentInstructions)
+ CurrentBB->addInstruction(V);
+
+ // Create an appropriate terminator
+ unsigned N = B->succ_size();
+ auto It = B->succ_begin();
+ if (N == 1) {
+ til::BasicBlock *BB = *It ? lookupBlock(*It) : nullptr;
+ // TODO: set index
+ unsigned Idx = BB ? BB->findPredecessorIndex(CurrentBB) : 0;
+ auto *Tm = new (Arena) til::Goto(BB, Idx);
+ CurrentBB->setTerminator(Tm);
+ }
+ else if (N == 2) {
+ til::SExpr *C = translate(B->getTerminatorCondition(true), nullptr);
+ til::BasicBlock *BB1 = *It ? lookupBlock(*It) : nullptr;
+ ++It;
+ til::BasicBlock *BB2 = *It ? lookupBlock(*It) : nullptr;
+ // FIXME: make sure these arent' critical edges.
+ auto *Tm = new (Arena) til::Branch(C, BB1, BB2);
+ CurrentBB->setTerminator(Tm);
+ }
+}
+
+void SExprBuilder::handleSuccessor(const CFGBlock *Succ) {
+ ++CurrentBlockInfo->UnprocessedSuccessors;
+}
+
+void SExprBuilder::handleSuccessorBackEdge(const CFGBlock *Succ) {
+ mergePhiNodesBackEdge(Succ);
+ ++BBInfo[Succ->getBlockID()].ProcessedPredecessors;
+}
+
+void SExprBuilder::exitCFGBlock(const CFGBlock *B) {
+ CurrentArguments.clear();
+ CurrentInstructions.clear();
+ CurrentBlockInfo->ExitMap = std::move(CurrentLVarMap);
+ CurrentBB = nullptr;
+ CurrentBlockInfo = nullptr;
+}
+
+void SExprBuilder::exitCFG(const CFGBlock *Last) {
+ for (auto *Ph : IncompleteArgs) {
+ if (Ph->status() == til::Phi::PH_Incomplete)
+ simplifyIncompleteArg(Ph);
+ }
+
+ CurrentArguments.clear();
+ CurrentInstructions.clear();
+ IncompleteArgs.clear();
+}
+
+/*
+void printSCFG(CFGWalker &Walker) {
+ llvm::BumpPtrAllocator Bpa;
+ til::MemRegionRef Arena(&Bpa);
+ SExprBuilder SxBuilder(Arena);
+ til::SCFG *Scfg = SxBuilder.buildCFG(Walker);
+ TILPrinter::print(Scfg, llvm::errs());
+}
+*/
diff --git a/contrib/llvm/tools/clang/lib/Analysis/ThreadSafetyLogical.cpp b/contrib/llvm/tools/clang/lib/Analysis/ThreadSafetyLogical.cpp
new file mode 100644
index 0000000..facfa11
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/ThreadSafetyLogical.cpp
@@ -0,0 +1,112 @@
+//===- ThreadSafetyLogical.cpp ---------------------------------*- C++ --*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file defines a representation for logical expressions with SExpr leaves
+// that are used as part of fact-checking capability expressions.
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/Analyses/ThreadSafetyLogical.h"
+
+using namespace llvm;
+using namespace clang::threadSafety::lexpr;
+
+// Implication. We implement De Morgan's Laws by maintaining LNeg and RNeg
+// to keep track of whether LHS and RHS are negated.
+static bool implies(const LExpr *LHS, bool LNeg, const LExpr *RHS, bool RNeg) {
+ // In comments below, we write => for implication.
+
+ // Calculates the logical AND implication operator.
+ const auto LeftAndOperator = [=](const BinOp *A) {
+ return implies(A->left(), LNeg, RHS, RNeg) &&
+ implies(A->right(), LNeg, RHS, RNeg);
+ };
+ const auto RightAndOperator = [=](const BinOp *A) {
+ return implies(LHS, LNeg, A->left(), RNeg) &&
+ implies(LHS, LNeg, A->right(), RNeg);
+ };
+
+ // Calculates the logical OR implication operator.
+ const auto LeftOrOperator = [=](const BinOp *A) {
+ return implies(A->left(), LNeg, RHS, RNeg) ||
+ implies(A->right(), LNeg, RHS, RNeg);
+ };
+ const auto RightOrOperator = [=](const BinOp *A) {
+ return implies(LHS, LNeg, A->left(), RNeg) ||
+ implies(LHS, LNeg, A->right(), RNeg);
+ };
+
+ // Recurse on right.
+ switch (RHS->kind()) {
+ case LExpr::And:
+ // When performing right recursion:
+ // C => A & B [if] C => A and C => B
+ // When performing right recursion (negated):
+ // C => !(A & B) [if] C => !A | !B [===] C => !A or C => !B
+ return RNeg ? RightOrOperator(cast<And>(RHS))
+ : RightAndOperator(cast<And>(RHS));
+ case LExpr::Or:
+ // When performing right recursion:
+ // C => (A | B) [if] C => A or C => B
+ // When performing right recursion (negated):
+ // C => !(A | B) [if] C => !A & !B [===] C => !A and C => !B
+ return RNeg ? RightAndOperator(cast<Or>(RHS))
+ : RightOrOperator(cast<Or>(RHS));
+ case LExpr::Not:
+ // Note that C => !A is very different from !(C => A). It would be incorrect
+ // to return !implies(LHS, RHS).
+ return implies(LHS, LNeg, cast<Not>(RHS)->exp(), !RNeg);
+ case LExpr::Terminal:
+ // After reaching the terminal, it's time to recurse on the left.
+ break;
+ }
+
+ // RHS is now a terminal. Recurse on Left.
+ switch (LHS->kind()) {
+ case LExpr::And:
+ // When performing left recursion:
+ // A & B => C [if] A => C or B => C
+ // When performing left recursion (negated):
+ // !(A & B) => C [if] !A | !B => C [===] !A => C and !B => C
+ return LNeg ? LeftAndOperator(cast<And>(LHS))
+ : LeftOrOperator(cast<And>(LHS));
+ case LExpr::Or:
+ // When performing left recursion:
+ // A | B => C [if] A => C and B => C
+ // When performing left recursion (negated):
+ // !(A | B) => C [if] !A & !B => C [===] !A => C or !B => C
+ return LNeg ? LeftOrOperator(cast<Or>(LHS))
+ : LeftAndOperator(cast<Or>(LHS));
+ case LExpr::Not:
+ // Note that A => !C is very different from !(A => C). It would be incorrect
+ // to return !implies(LHS, RHS).
+ return implies(cast<Not>(LHS)->exp(), !LNeg, RHS, RNeg);
+ case LExpr::Terminal:
+ // After reaching the terminal, it's time to perform identity comparisons.
+ break;
+ }
+
+ // A => A
+ // !A => !A
+ if (LNeg != RNeg)
+ return false;
+
+ // FIXME -- this should compare SExprs for equality, not pointer equality.
+ return cast<Terminal>(LHS)->expr() == cast<Terminal>(RHS)->expr();
+}
+
+namespace clang {
+namespace threadSafety {
+namespace lexpr {
+
+bool implies(const LExpr *LHS, const LExpr *RHS) {
+ // Start out by assuming that LHS and RHS are not negated.
+ return ::implies(LHS, false, RHS, false);
+}
+}
+}
+}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/ThreadSafetyTIL.cpp b/contrib/llvm/tools/clang/lib/Analysis/ThreadSafetyTIL.cpp
new file mode 100644
index 0000000..2923f7e6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/ThreadSafetyTIL.cpp
@@ -0,0 +1,336 @@
+//===- ThreadSafetyTIL.cpp -------------------------------------*- C++ --*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT in the llvm repository for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/Analyses/ThreadSafetyTIL.h"
+#include "clang/Analysis/Analyses/ThreadSafetyTraverse.h"
+using namespace clang;
+using namespace threadSafety;
+using namespace til;
+
+StringRef til::getUnaryOpcodeString(TIL_UnaryOpcode Op) {
+ switch (Op) {
+ case UOP_Minus: return "-";
+ case UOP_BitNot: return "~";
+ case UOP_LogicNot: return "!";
+ }
+ return "";
+}
+
+StringRef til::getBinaryOpcodeString(TIL_BinaryOpcode Op) {
+ switch (Op) {
+ case BOP_Mul: return "*";
+ case BOP_Div: return "/";
+ case BOP_Rem: return "%";
+ case BOP_Add: return "+";
+ case BOP_Sub: return "-";
+ case BOP_Shl: return "<<";
+ case BOP_Shr: return ">>";
+ case BOP_BitAnd: return "&";
+ case BOP_BitXor: return "^";
+ case BOP_BitOr: return "|";
+ case BOP_Eq: return "==";
+ case BOP_Neq: return "!=";
+ case BOP_Lt: return "<";
+ case BOP_Leq: return "<=";
+ case BOP_LogicAnd: return "&&";
+ case BOP_LogicOr: return "||";
+ }
+ return "";
+}
+
+
+SExpr* Future::force() {
+ Status = FS_evaluating;
+ Result = compute();
+ Status = FS_done;
+ return Result;
+}
+
+
+unsigned BasicBlock::addPredecessor(BasicBlock *Pred) {
+ unsigned Idx = Predecessors.size();
+ Predecessors.reserveCheck(1, Arena);
+ Predecessors.push_back(Pred);
+ for (SExpr *E : Args) {
+ if (Phi* Ph = dyn_cast<Phi>(E)) {
+ Ph->values().reserveCheck(1, Arena);
+ Ph->values().push_back(nullptr);
+ }
+ }
+ return Idx;
+}
+
+
+void BasicBlock::reservePredecessors(unsigned NumPreds) {
+ Predecessors.reserve(NumPreds, Arena);
+ for (SExpr *E : Args) {
+ if (Phi* Ph = dyn_cast<Phi>(E)) {
+ Ph->values().reserve(NumPreds, Arena);
+ }
+ }
+}
+
+
+// If E is a variable, then trace back through any aliases or redundant
+// Phi nodes to find the canonical definition.
+const SExpr *til::getCanonicalVal(const SExpr *E) {
+ while (true) {
+ if (auto *V = dyn_cast<Variable>(E)) {
+ if (V->kind() == Variable::VK_Let) {
+ E = V->definition();
+ continue;
+ }
+ }
+ if (const Phi *Ph = dyn_cast<Phi>(E)) {
+ if (Ph->status() == Phi::PH_SingleVal) {
+ E = Ph->values()[0];
+ continue;
+ }
+ }
+ break;
+ }
+ return E;
+}
+
+
+// If E is a variable, then trace back through any aliases or redundant
+// Phi nodes to find the canonical definition.
+// The non-const version will simplify incomplete Phi nodes.
+SExpr *til::simplifyToCanonicalVal(SExpr *E) {
+ while (true) {
+ if (auto *V = dyn_cast<Variable>(E)) {
+ if (V->kind() != Variable::VK_Let)
+ return V;
+ // Eliminate redundant variables, e.g. x = y, or x = 5,
+ // but keep anything more complicated.
+ if (til::ThreadSafetyTIL::isTrivial(V->definition())) {
+ E = V->definition();
+ continue;
+ }
+ return V;
+ }
+ if (auto *Ph = dyn_cast<Phi>(E)) {
+ if (Ph->status() == Phi::PH_Incomplete)
+ simplifyIncompleteArg(Ph);
+ // Eliminate redundant Phi nodes.
+ if (Ph->status() == Phi::PH_SingleVal) {
+ E = Ph->values()[0];
+ continue;
+ }
+ }
+ return E;
+ }
+}
+
+
+// Trace the arguments of an incomplete Phi node to see if they have the same
+// canonical definition. If so, mark the Phi node as redundant.
+// getCanonicalVal() will recursively call simplifyIncompletePhi().
+void til::simplifyIncompleteArg(til::Phi *Ph) {
+ assert(Ph && Ph->status() == Phi::PH_Incomplete);
+
+ // eliminate infinite recursion -- assume that this node is not redundant.
+ Ph->setStatus(Phi::PH_MultiVal);
+
+ SExpr *E0 = simplifyToCanonicalVal(Ph->values()[0]);
+ for (unsigned i=1, n=Ph->values().size(); i<n; ++i) {
+ SExpr *Ei = simplifyToCanonicalVal(Ph->values()[i]);
+ if (Ei == Ph)
+ continue; // Recursive reference to itself. Don't count.
+ if (Ei != E0) {
+ return; // Status is already set to MultiVal.
+ }
+ }
+ Ph->setStatus(Phi::PH_SingleVal);
+}
+
+
+// Renumbers the arguments and instructions to have unique, sequential IDs.
+int BasicBlock::renumberInstrs(int ID) {
+ for (auto *Arg : Args)
+ Arg->setID(this, ID++);
+ for (auto *Instr : Instrs)
+ Instr->setID(this, ID++);
+ TermInstr->setID(this, ID++);
+ return ID;
+}
+
+// Sorts the CFGs blocks using a reverse post-order depth-first traversal.
+// Each block will be written into the Blocks array in order, and its BlockID
+// will be set to the index in the array. Sorting should start from the entry
+// block, and ID should be the total number of blocks.
+int BasicBlock::topologicalSort(SimpleArray<BasicBlock*>& Blocks, int ID) {
+ if (Visited) return ID;
+ Visited = true;
+ for (auto *Block : successors())
+ ID = Block->topologicalSort(Blocks, ID);
+ // set ID and update block array in place.
+ // We may lose pointers to unreachable blocks.
+ assert(ID > 0);
+ BlockID = --ID;
+ Blocks[BlockID] = this;
+ return ID;
+}
+
+// Performs a reverse topological traversal, starting from the exit block and
+// following back-edges. The dominator is serialized before any predecessors,
+// which guarantees that all blocks are serialized after their dominator and
+// before their post-dominator (because it's a reverse topological traversal).
+// ID should be initially set to 0.
+//
+// This sort assumes that (1) dominators have been computed, (2) there are no
+// critical edges, and (3) the entry block is reachable from the exit block
+// and no blocks are accessable via traversal of back-edges from the exit that
+// weren't accessable via forward edges from the entry.
+int BasicBlock::topologicalFinalSort(SimpleArray<BasicBlock*>& Blocks, int ID) {
+ // Visited is assumed to have been set by the topologicalSort. This pass
+ // assumes !Visited means that we've visited this node before.
+ if (!Visited) return ID;
+ Visited = false;
+ if (DominatorNode.Parent)
+ ID = DominatorNode.Parent->topologicalFinalSort(Blocks, ID);
+ for (auto *Pred : Predecessors)
+ ID = Pred->topologicalFinalSort(Blocks, ID);
+ assert(static_cast<size_t>(ID) < Blocks.size());
+ BlockID = ID++;
+ Blocks[BlockID] = this;
+ return ID;
+}
+
+// Computes the immediate dominator of the current block. Assumes that all of
+// its predecessors have already computed their dominators. This is achieved
+// by visiting the nodes in topological order.
+void BasicBlock::computeDominator() {
+ BasicBlock *Candidate = nullptr;
+ // Walk backwards from each predecessor to find the common dominator node.
+ for (auto *Pred : Predecessors) {
+ // Skip back-edges
+ if (Pred->BlockID >= BlockID) continue;
+ // If we don't yet have a candidate for dominator yet, take this one.
+ if (Candidate == nullptr) {
+ Candidate = Pred;
+ continue;
+ }
+ // Walk the alternate and current candidate back to find a common ancestor.
+ auto *Alternate = Pred;
+ while (Alternate != Candidate) {
+ if (Candidate->BlockID > Alternate->BlockID)
+ Candidate = Candidate->DominatorNode.Parent;
+ else
+ Alternate = Alternate->DominatorNode.Parent;
+ }
+ }
+ DominatorNode.Parent = Candidate;
+ DominatorNode.SizeOfSubTree = 1;
+}
+
+// Computes the immediate post-dominator of the current block. Assumes that all
+// of its successors have already computed their post-dominators. This is
+// achieved visiting the nodes in reverse topological order.
+void BasicBlock::computePostDominator() {
+ BasicBlock *Candidate = nullptr;
+ // Walk back from each predecessor to find the common post-dominator node.
+ for (auto *Succ : successors()) {
+ // Skip back-edges
+ if (Succ->BlockID <= BlockID) continue;
+ // If we don't yet have a candidate for post-dominator yet, take this one.
+ if (Candidate == nullptr) {
+ Candidate = Succ;
+ continue;
+ }
+ // Walk the alternate and current candidate back to find a common ancestor.
+ auto *Alternate = Succ;
+ while (Alternate != Candidate) {
+ if (Candidate->BlockID < Alternate->BlockID)
+ Candidate = Candidate->PostDominatorNode.Parent;
+ else
+ Alternate = Alternate->PostDominatorNode.Parent;
+ }
+ }
+ PostDominatorNode.Parent = Candidate;
+ PostDominatorNode.SizeOfSubTree = 1;
+}
+
+
+// Renumber instructions in all blocks
+void SCFG::renumberInstrs() {
+ int InstrID = 0;
+ for (auto *Block : Blocks)
+ InstrID = Block->renumberInstrs(InstrID);
+}
+
+
+static inline void computeNodeSize(BasicBlock *B,
+ BasicBlock::TopologyNode BasicBlock::*TN) {
+ BasicBlock::TopologyNode *N = &(B->*TN);
+ if (N->Parent) {
+ BasicBlock::TopologyNode *P = &(N->Parent->*TN);
+ // Initially set ID relative to the (as yet uncomputed) parent ID
+ N->NodeID = P->SizeOfSubTree;
+ P->SizeOfSubTree += N->SizeOfSubTree;
+ }
+}
+
+static inline void computeNodeID(BasicBlock *B,
+ BasicBlock::TopologyNode BasicBlock::*TN) {
+ BasicBlock::TopologyNode *N = &(B->*TN);
+ if (N->Parent) {
+ BasicBlock::TopologyNode *P = &(N->Parent->*TN);
+ N->NodeID += P->NodeID; // Fix NodeIDs relative to starting node.
+ }
+}
+
+
+// Normalizes a CFG. Normalization has a few major components:
+// 1) Removing unreachable blocks.
+// 2) Computing dominators and post-dominators
+// 3) Topologically sorting the blocks into the "Blocks" array.
+void SCFG::computeNormalForm() {
+ // Topologically sort the blocks starting from the entry block.
+ int NumUnreachableBlocks = Entry->topologicalSort(Blocks, Blocks.size());
+ if (NumUnreachableBlocks > 0) {
+ // If there were unreachable blocks shift everything down, and delete them.
+ for (size_t I = NumUnreachableBlocks, E = Blocks.size(); I < E; ++I) {
+ size_t NI = I - NumUnreachableBlocks;
+ Blocks[NI] = Blocks[I];
+ Blocks[NI]->BlockID = NI;
+ // FIXME: clean up predecessor pointers to unreachable blocks?
+ }
+ Blocks.drop(NumUnreachableBlocks);
+ }
+
+ // Compute dominators.
+ for (auto *Block : Blocks)
+ Block->computeDominator();
+
+ // Once dominators have been computed, the final sort may be performed.
+ int NumBlocks = Exit->topologicalFinalSort(Blocks, 0);
+ assert(static_cast<size_t>(NumBlocks) == Blocks.size());
+ (void) NumBlocks;
+
+ // Renumber the instructions now that we have a final sort.
+ renumberInstrs();
+
+ // Compute post-dominators and compute the sizes of each node in the
+ // dominator tree.
+ for (auto *Block : Blocks.reverse()) {
+ Block->computePostDominator();
+ computeNodeSize(Block, &BasicBlock::DominatorNode);
+ }
+ // Compute the sizes of each node in the post-dominator tree and assign IDs in
+ // the dominator tree.
+ for (auto *Block : Blocks) {
+ computeNodeID(Block, &BasicBlock::DominatorNode);
+ computeNodeSize(Block, &BasicBlock::PostDominatorNode);
+ }
+ // Assign IDs in the post-dominator tree.
+ for (auto *Block : Blocks.reverse()) {
+ computeNodeID(Block, &BasicBlock::PostDominatorNode);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp b/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp
new file mode 100644
index 0000000..f2f7919
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp
@@ -0,0 +1,925 @@
+//==- UninitializedValues.cpp - Find Uninitialized Values -------*- C++ --*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements uninitialized values analysis for source-level CFGs.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Analysis/Analyses/PostOrderCFGView.h"
+#include "clang/Analysis/Analyses/UninitializedValues.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/DomainSpecific/ObjCNoReturn.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/PackedVector.h"
+#include "llvm/ADT/SmallBitVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/SaveAndRestore.h"
+#include <utility>
+
+using namespace clang;
+
+#define DEBUG_LOGGING 0
+
+static bool isTrackedVar(const VarDecl *vd, const DeclContext *dc) {
+ if (vd->isLocalVarDecl() && !vd->hasGlobalStorage() &&
+ !vd->isExceptionVariable() && !vd->isInitCapture() &&
+ !vd->isImplicit() && vd->getDeclContext() == dc) {
+ QualType ty = vd->getType();
+ return ty->isScalarType() || ty->isVectorType() || ty->isRecordType();
+ }
+ return false;
+}
+
+//------------------------------------------------------------------------====//
+// DeclToIndex: a mapping from Decls we track to value indices.
+//====------------------------------------------------------------------------//
+
+namespace {
+class DeclToIndex {
+ llvm::DenseMap<const VarDecl *, unsigned> map;
+public:
+ DeclToIndex() {}
+
+ /// Compute the actual mapping from declarations to bits.
+ void computeMap(const DeclContext &dc);
+
+ /// Return the number of declarations in the map.
+ unsigned size() const { return map.size(); }
+
+ /// Returns the bit vector index for a given declaration.
+ Optional<unsigned> getValueIndex(const VarDecl *d) const;
+};
+}
+
+void DeclToIndex::computeMap(const DeclContext &dc) {
+ unsigned count = 0;
+ DeclContext::specific_decl_iterator<VarDecl> I(dc.decls_begin()),
+ E(dc.decls_end());
+ for ( ; I != E; ++I) {
+ const VarDecl *vd = *I;
+ if (isTrackedVar(vd, &dc))
+ map[vd] = count++;
+ }
+}
+
+Optional<unsigned> DeclToIndex::getValueIndex(const VarDecl *d) const {
+ llvm::DenseMap<const VarDecl *, unsigned>::const_iterator I = map.find(d);
+ if (I == map.end())
+ return None;
+ return I->second;
+}
+
+//------------------------------------------------------------------------====//
+// CFGBlockValues: dataflow values for CFG blocks.
+//====------------------------------------------------------------------------//
+
+// These values are defined in such a way that a merge can be done using
+// a bitwise OR.
+enum Value { Unknown = 0x0, /* 00 */
+ Initialized = 0x1, /* 01 */
+ Uninitialized = 0x2, /* 10 */
+ MayUninitialized = 0x3 /* 11 */ };
+
+static bool isUninitialized(const Value v) {
+ return v >= Uninitialized;
+}
+static bool isAlwaysUninit(const Value v) {
+ return v == Uninitialized;
+}
+
+namespace {
+
+typedef llvm::PackedVector<Value, 2, llvm::SmallBitVector> ValueVector;
+
+class CFGBlockValues {
+ const CFG &cfg;
+ SmallVector<ValueVector, 8> vals;
+ ValueVector scratch;
+ DeclToIndex declToIndex;
+public:
+ CFGBlockValues(const CFG &cfg);
+
+ unsigned getNumEntries() const { return declToIndex.size(); }
+
+ void computeSetOfDeclarations(const DeclContext &dc);
+ ValueVector &getValueVector(const CFGBlock *block) {
+ return vals[block->getBlockID()];
+ }
+
+ void setAllScratchValues(Value V);
+ void mergeIntoScratch(ValueVector const &source, bool isFirst);
+ bool updateValueVectorWithScratch(const CFGBlock *block);
+
+ bool hasNoDeclarations() const {
+ return declToIndex.size() == 0;
+ }
+
+ void resetScratch();
+
+ ValueVector::reference operator[](const VarDecl *vd);
+
+ Value getValue(const CFGBlock *block, const CFGBlock *dstBlock,
+ const VarDecl *vd) {
+ const Optional<unsigned> &idx = declToIndex.getValueIndex(vd);
+ assert(idx.hasValue());
+ return getValueVector(block)[idx.getValue()];
+ }
+};
+} // end anonymous namespace
+
+CFGBlockValues::CFGBlockValues(const CFG &c) : cfg(c), vals(0) {}
+
+void CFGBlockValues::computeSetOfDeclarations(const DeclContext &dc) {
+ declToIndex.computeMap(dc);
+ unsigned decls = declToIndex.size();
+ scratch.resize(decls);
+ unsigned n = cfg.getNumBlockIDs();
+ if (!n)
+ return;
+ vals.resize(n);
+ for (unsigned i = 0; i < n; ++i)
+ vals[i].resize(decls);
+}
+
+#if DEBUG_LOGGING
+static void printVector(const CFGBlock *block, ValueVector &bv,
+ unsigned num) {
+ llvm::errs() << block->getBlockID() << " :";
+ for (unsigned i = 0; i < bv.size(); ++i) {
+ llvm::errs() << ' ' << bv[i];
+ }
+ llvm::errs() << " : " << num << '\n';
+}
+#endif
+
+void CFGBlockValues::setAllScratchValues(Value V) {
+ for (unsigned I = 0, E = scratch.size(); I != E; ++I)
+ scratch[I] = V;
+}
+
+void CFGBlockValues::mergeIntoScratch(ValueVector const &source,
+ bool isFirst) {
+ if (isFirst)
+ scratch = source;
+ else
+ scratch |= source;
+}
+
+bool CFGBlockValues::updateValueVectorWithScratch(const CFGBlock *block) {
+ ValueVector &dst = getValueVector(block);
+ bool changed = (dst != scratch);
+ if (changed)
+ dst = scratch;
+#if DEBUG_LOGGING
+ printVector(block, scratch, 0);
+#endif
+ return changed;
+}
+
+void CFGBlockValues::resetScratch() {
+ scratch.reset();
+}
+
+ValueVector::reference CFGBlockValues::operator[](const VarDecl *vd) {
+ const Optional<unsigned> &idx = declToIndex.getValueIndex(vd);
+ assert(idx.hasValue());
+ return scratch[idx.getValue()];
+}
+
+//------------------------------------------------------------------------====//
+// Worklist: worklist for dataflow analysis.
+//====------------------------------------------------------------------------//
+
+namespace {
+class DataflowWorklist {
+ PostOrderCFGView::iterator PO_I, PO_E;
+ SmallVector<const CFGBlock *, 20> worklist;
+ llvm::BitVector enqueuedBlocks;
+public:
+ DataflowWorklist(const CFG &cfg, PostOrderCFGView &view)
+ : PO_I(view.begin()), PO_E(view.end()),
+ enqueuedBlocks(cfg.getNumBlockIDs(), true) {
+ // Treat the first block as already analyzed.
+ if (PO_I != PO_E) {
+ assert(*PO_I == &cfg.getEntry());
+ enqueuedBlocks[(*PO_I)->getBlockID()] = false;
+ ++PO_I;
+ }
+ }
+
+ void enqueueSuccessors(const CFGBlock *block);
+ const CFGBlock *dequeue();
+};
+}
+
+void DataflowWorklist::enqueueSuccessors(const clang::CFGBlock *block) {
+ for (CFGBlock::const_succ_iterator I = block->succ_begin(),
+ E = block->succ_end(); I != E; ++I) {
+ const CFGBlock *Successor = *I;
+ if (!Successor || enqueuedBlocks[Successor->getBlockID()])
+ continue;
+ worklist.push_back(Successor);
+ enqueuedBlocks[Successor->getBlockID()] = true;
+ }
+}
+
+const CFGBlock *DataflowWorklist::dequeue() {
+ const CFGBlock *B = nullptr;
+
+ // First dequeue from the worklist. This can represent
+ // updates along backedges that we want propagated as quickly as possible.
+ if (!worklist.empty())
+ B = worklist.pop_back_val();
+
+ // Next dequeue from the initial reverse post order. This is the
+ // theoretical ideal in the presence of no back edges.
+ else if (PO_I != PO_E) {
+ B = *PO_I;
+ ++PO_I;
+ }
+ else {
+ return nullptr;
+ }
+
+ assert(enqueuedBlocks[B->getBlockID()] == true);
+ enqueuedBlocks[B->getBlockID()] = false;
+ return B;
+}
+
+//------------------------------------------------------------------------====//
+// Classification of DeclRefExprs as use or initialization.
+//====------------------------------------------------------------------------//
+
+namespace {
+class FindVarResult {
+ const VarDecl *vd;
+ const DeclRefExpr *dr;
+public:
+ FindVarResult(const VarDecl *vd, const DeclRefExpr *dr) : vd(vd), dr(dr) {}
+
+ const DeclRefExpr *getDeclRefExpr() const { return dr; }
+ const VarDecl *getDecl() const { return vd; }
+};
+
+static const Expr *stripCasts(ASTContext &C, const Expr *Ex) {
+ while (Ex) {
+ Ex = Ex->IgnoreParenNoopCasts(C);
+ if (const CastExpr *CE = dyn_cast<CastExpr>(Ex)) {
+ if (CE->getCastKind() == CK_LValueBitCast) {
+ Ex = CE->getSubExpr();
+ continue;
+ }
+ }
+ break;
+ }
+ return Ex;
+}
+
+/// If E is an expression comprising a reference to a single variable, find that
+/// variable.
+static FindVarResult findVar(const Expr *E, const DeclContext *DC) {
+ if (const DeclRefExpr *DRE =
+ dyn_cast<DeclRefExpr>(stripCasts(DC->getParentASTContext(), E)))
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (isTrackedVar(VD, DC))
+ return FindVarResult(VD, DRE);
+ return FindVarResult(nullptr, nullptr);
+}
+
+/// \brief Classify each DeclRefExpr as an initialization or a use. Any
+/// DeclRefExpr which isn't explicitly classified will be assumed to have
+/// escaped the analysis and will be treated as an initialization.
+class ClassifyRefs : public StmtVisitor<ClassifyRefs> {
+public:
+ enum Class {
+ Init,
+ Use,
+ SelfInit,
+ Ignore
+ };
+
+private:
+ const DeclContext *DC;
+ llvm::DenseMap<const DeclRefExpr*, Class> Classification;
+
+ bool isTrackedVar(const VarDecl *VD) const {
+ return ::isTrackedVar(VD, DC);
+ }
+
+ void classify(const Expr *E, Class C);
+
+public:
+ ClassifyRefs(AnalysisDeclContext &AC) : DC(cast<DeclContext>(AC.getDecl())) {}
+
+ void VisitDeclStmt(DeclStmt *DS);
+ void VisitUnaryOperator(UnaryOperator *UO);
+ void VisitBinaryOperator(BinaryOperator *BO);
+ void VisitCallExpr(CallExpr *CE);
+ void VisitCastExpr(CastExpr *CE);
+
+ void operator()(Stmt *S) { Visit(S); }
+
+ Class get(const DeclRefExpr *DRE) const {
+ llvm::DenseMap<const DeclRefExpr*, Class>::const_iterator I
+ = Classification.find(DRE);
+ if (I != Classification.end())
+ return I->second;
+
+ const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl());
+ if (!VD || !isTrackedVar(VD))
+ return Ignore;
+
+ return Init;
+ }
+};
+}
+
+static const DeclRefExpr *getSelfInitExpr(VarDecl *VD) {
+ if (VD->getType()->isRecordType()) return nullptr;
+ if (Expr *Init = VD->getInit()) {
+ const DeclRefExpr *DRE
+ = dyn_cast<DeclRefExpr>(stripCasts(VD->getASTContext(), Init));
+ if (DRE && DRE->getDecl() == VD)
+ return DRE;
+ }
+ return nullptr;
+}
+
+void ClassifyRefs::classify(const Expr *E, Class C) {
+ // The result of a ?: could also be an lvalue.
+ E = E->IgnoreParens();
+ if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
+ classify(CO->getTrueExpr(), C);
+ classify(CO->getFalseExpr(), C);
+ return;
+ }
+
+ if (const BinaryConditionalOperator *BCO =
+ dyn_cast<BinaryConditionalOperator>(E)) {
+ classify(BCO->getFalseExpr(), C);
+ return;
+ }
+
+ if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E)) {
+ classify(OVE->getSourceExpr(), C);
+ return;
+ }
+
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
+ if (VarDecl *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {
+ if (!VD->isStaticDataMember())
+ classify(ME->getBase(), C);
+ }
+ return;
+ }
+
+ if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ switch (BO->getOpcode()) {
+ case BO_PtrMemD:
+ case BO_PtrMemI:
+ classify(BO->getLHS(), C);
+ return;
+ case BO_Comma:
+ classify(BO->getRHS(), C);
+ return;
+ default:
+ return;
+ }
+ }
+
+ FindVarResult Var = findVar(E, DC);
+ if (const DeclRefExpr *DRE = Var.getDeclRefExpr())
+ Classification[DRE] = std::max(Classification[DRE], C);
+}
+
+void ClassifyRefs::VisitDeclStmt(DeclStmt *DS) {
+ for (auto *DI : DS->decls()) {
+ VarDecl *VD = dyn_cast<VarDecl>(DI);
+ if (VD && isTrackedVar(VD))
+ if (const DeclRefExpr *DRE = getSelfInitExpr(VD))
+ Classification[DRE] = SelfInit;
+ }
+}
+
+void ClassifyRefs::VisitBinaryOperator(BinaryOperator *BO) {
+ // Ignore the evaluation of a DeclRefExpr on the LHS of an assignment. If this
+ // is not a compound-assignment, we will treat it as initializing the variable
+ // when TransferFunctions visits it. A compound-assignment does not affect
+ // whether a variable is uninitialized, and there's no point counting it as a
+ // use.
+ if (BO->isCompoundAssignmentOp())
+ classify(BO->getLHS(), Use);
+ else if (BO->getOpcode() == BO_Assign || BO->getOpcode() == BO_Comma)
+ classify(BO->getLHS(), Ignore);
+}
+
+void ClassifyRefs::VisitUnaryOperator(UnaryOperator *UO) {
+ // Increment and decrement are uses despite there being no lvalue-to-rvalue
+ // conversion.
+ if (UO->isIncrementDecrementOp())
+ classify(UO->getSubExpr(), Use);
+}
+
+static bool isPointerToConst(const QualType &QT) {
+ return QT->isAnyPointerType() && QT->getPointeeType().isConstQualified();
+}
+
+void ClassifyRefs::VisitCallExpr(CallExpr *CE) {
+ // Classify arguments to std::move as used.
+ if (CE->getNumArgs() == 1) {
+ if (FunctionDecl *FD = CE->getDirectCallee()) {
+ if (FD->isInStdNamespace() && FD->getIdentifier() &&
+ FD->getIdentifier()->isStr("move")) {
+ // RecordTypes are handled in SemaDeclCXX.cpp.
+ if (!CE->getArg(0)->getType()->isRecordType())
+ classify(CE->getArg(0), Use);
+ return;
+ }
+ }
+ }
+
+ // If a value is passed by const pointer or by const reference to a function,
+ // we should not assume that it is initialized by the call, and we
+ // conservatively do not assume that it is used.
+ for (CallExpr::arg_iterator I = CE->arg_begin(), E = CE->arg_end();
+ I != E; ++I) {
+ if ((*I)->isGLValue()) {
+ if ((*I)->getType().isConstQualified())
+ classify((*I), Ignore);
+ } else if (isPointerToConst((*I)->getType())) {
+ const Expr *Ex = stripCasts(DC->getParentASTContext(), *I);
+ const UnaryOperator *UO = dyn_cast<UnaryOperator>(Ex);
+ if (UO && UO->getOpcode() == UO_AddrOf)
+ Ex = UO->getSubExpr();
+ classify(Ex, Ignore);
+ }
+ }
+}
+
+void ClassifyRefs::VisitCastExpr(CastExpr *CE) {
+ if (CE->getCastKind() == CK_LValueToRValue)
+ classify(CE->getSubExpr(), Use);
+ else if (CStyleCastExpr *CSE = dyn_cast<CStyleCastExpr>(CE)) {
+ if (CSE->getType()->isVoidType()) {
+ // Squelch any detected load of an uninitialized value if
+ // we cast it to void.
+ // e.g. (void) x;
+ classify(CSE->getSubExpr(), Ignore);
+ }
+ }
+}
+
+//------------------------------------------------------------------------====//
+// Transfer function for uninitialized values analysis.
+//====------------------------------------------------------------------------//
+
+namespace {
+class TransferFunctions : public StmtVisitor<TransferFunctions> {
+ CFGBlockValues &vals;
+ const CFG &cfg;
+ const CFGBlock *block;
+ AnalysisDeclContext &ac;
+ const ClassifyRefs &classification;
+ ObjCNoReturn objCNoRet;
+ UninitVariablesHandler &handler;
+
+public:
+ TransferFunctions(CFGBlockValues &vals, const CFG &cfg,
+ const CFGBlock *block, AnalysisDeclContext &ac,
+ const ClassifyRefs &classification,
+ UninitVariablesHandler &handler)
+ : vals(vals), cfg(cfg), block(block), ac(ac),
+ classification(classification), objCNoRet(ac.getASTContext()),
+ handler(handler) {}
+
+ void reportUse(const Expr *ex, const VarDecl *vd);
+
+ void VisitBinaryOperator(BinaryOperator *bo);
+ void VisitBlockExpr(BlockExpr *be);
+ void VisitCallExpr(CallExpr *ce);
+ void VisitDeclRefExpr(DeclRefExpr *dr);
+ void VisitDeclStmt(DeclStmt *ds);
+ void VisitObjCForCollectionStmt(ObjCForCollectionStmt *FS);
+ void VisitObjCMessageExpr(ObjCMessageExpr *ME);
+
+ bool isTrackedVar(const VarDecl *vd) {
+ return ::isTrackedVar(vd, cast<DeclContext>(ac.getDecl()));
+ }
+
+ FindVarResult findVar(const Expr *ex) {
+ return ::findVar(ex, cast<DeclContext>(ac.getDecl()));
+ }
+
+ UninitUse getUninitUse(const Expr *ex, const VarDecl *vd, Value v) {
+ UninitUse Use(ex, isAlwaysUninit(v));
+
+ assert(isUninitialized(v));
+ if (Use.getKind() == UninitUse::Always)
+ return Use;
+
+ // If an edge which leads unconditionally to this use did not initialize
+ // the variable, we can say something stronger than 'may be uninitialized':
+ // we can say 'either it's used uninitialized or you have dead code'.
+ //
+ // We track the number of successors of a node which have been visited, and
+ // visit a node once we have visited all of its successors. Only edges where
+ // the variable might still be uninitialized are followed. Since a variable
+ // can't transfer from being initialized to being uninitialized, this will
+ // trace out the subgraph which inevitably leads to the use and does not
+ // initialize the variable. We do not want to skip past loops, since their
+ // non-termination might be correlated with the initialization condition.
+ //
+ // For example:
+ //
+ // void f(bool a, bool b) {
+ // block1: int n;
+ // if (a) {
+ // block2: if (b)
+ // block3: n = 1;
+ // block4: } else if (b) {
+ // block5: while (!a) {
+ // block6: do_work(&a);
+ // n = 2;
+ // }
+ // }
+ // block7: if (a)
+ // block8: g();
+ // block9: return n;
+ // }
+ //
+ // Starting from the maybe-uninitialized use in block 9:
+ // * Block 7 is not visited because we have only visited one of its two
+ // successors.
+ // * Block 8 is visited because we've visited its only successor.
+ // From block 8:
+ // * Block 7 is visited because we've now visited both of its successors.
+ // From block 7:
+ // * Blocks 1, 2, 4, 5, and 6 are not visited because we didn't visit all
+ // of their successors (we didn't visit 4, 3, 5, 6, and 5, respectively).
+ // * Block 3 is not visited because it initializes 'n'.
+ // Now the algorithm terminates, having visited blocks 7 and 8, and having
+ // found the frontier is blocks 2, 4, and 5.
+ //
+ // 'n' is definitely uninitialized for two edges into block 7 (from blocks 2
+ // and 4), so we report that any time either of those edges is taken (in
+ // each case when 'b == false'), 'n' is used uninitialized.
+ SmallVector<const CFGBlock*, 32> Queue;
+ SmallVector<unsigned, 32> SuccsVisited(cfg.getNumBlockIDs(), 0);
+ Queue.push_back(block);
+ // Specify that we've already visited all successors of the starting block.
+ // This has the dual purpose of ensuring we never add it to the queue, and
+ // of marking it as not being a candidate element of the frontier.
+ SuccsVisited[block->getBlockID()] = block->succ_size();
+ while (!Queue.empty()) {
+ const CFGBlock *B = Queue.pop_back_val();
+
+ // If the use is always reached from the entry block, make a note of that.
+ if (B == &cfg.getEntry())
+ Use.setUninitAfterCall();
+
+ for (CFGBlock::const_pred_iterator I = B->pred_begin(), E = B->pred_end();
+ I != E; ++I) {
+ const CFGBlock *Pred = *I;
+ if (!Pred)
+ continue;
+
+ Value AtPredExit = vals.getValue(Pred, B, vd);
+ if (AtPredExit == Initialized)
+ // This block initializes the variable.
+ continue;
+ if (AtPredExit == MayUninitialized &&
+ vals.getValue(B, nullptr, vd) == Uninitialized) {
+ // This block declares the variable (uninitialized), and is reachable
+ // from a block that initializes the variable. We can't guarantee to
+ // give an earlier location for the diagnostic (and it appears that
+ // this code is intended to be reachable) so give a diagnostic here
+ // and go no further down this path.
+ Use.setUninitAfterDecl();
+ continue;
+ }
+
+ unsigned &SV = SuccsVisited[Pred->getBlockID()];
+ if (!SV) {
+ // When visiting the first successor of a block, mark all NULL
+ // successors as having been visited.
+ for (CFGBlock::const_succ_iterator SI = Pred->succ_begin(),
+ SE = Pred->succ_end();
+ SI != SE; ++SI)
+ if (!*SI)
+ ++SV;
+ }
+
+ if (++SV == Pred->succ_size())
+ // All paths from this block lead to the use and don't initialize the
+ // variable.
+ Queue.push_back(Pred);
+ }
+ }
+
+ // Scan the frontier, looking for blocks where the variable was
+ // uninitialized.
+ for (CFG::const_iterator BI = cfg.begin(), BE = cfg.end(); BI != BE; ++BI) {
+ const CFGBlock *Block = *BI;
+ unsigned BlockID = Block->getBlockID();
+ const Stmt *Term = Block->getTerminator();
+ if (SuccsVisited[BlockID] && SuccsVisited[BlockID] < Block->succ_size() &&
+ Term) {
+ // This block inevitably leads to the use. If we have an edge from here
+ // to a post-dominator block, and the variable is uninitialized on that
+ // edge, we have found a bug.
+ for (CFGBlock::const_succ_iterator I = Block->succ_begin(),
+ E = Block->succ_end(); I != E; ++I) {
+ const CFGBlock *Succ = *I;
+ if (Succ && SuccsVisited[Succ->getBlockID()] >= Succ->succ_size() &&
+ vals.getValue(Block, Succ, vd) == Uninitialized) {
+ // Switch cases are a special case: report the label to the caller
+ // as the 'terminator', not the switch statement itself. Suppress
+ // situations where no label matched: we can't be sure that's
+ // possible.
+ if (isa<SwitchStmt>(Term)) {
+ const Stmt *Label = Succ->getLabel();
+ if (!Label || !isa<SwitchCase>(Label))
+ // Might not be possible.
+ continue;
+ UninitUse::Branch Branch;
+ Branch.Terminator = Label;
+ Branch.Output = 0; // Ignored.
+ Use.addUninitBranch(Branch);
+ } else {
+ UninitUse::Branch Branch;
+ Branch.Terminator = Term;
+ Branch.Output = I - Block->succ_begin();
+ Use.addUninitBranch(Branch);
+ }
+ }
+ }
+ }
+ }
+
+ return Use;
+ }
+};
+}
+
+void TransferFunctions::reportUse(const Expr *ex, const VarDecl *vd) {
+ Value v = vals[vd];
+ if (isUninitialized(v))
+ handler.handleUseOfUninitVariable(vd, getUninitUse(ex, vd, v));
+}
+
+void TransferFunctions::VisitObjCForCollectionStmt(ObjCForCollectionStmt *FS) {
+ // This represents an initialization of the 'element' value.
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(FS->getElement())) {
+ const VarDecl *VD = cast<VarDecl>(DS->getSingleDecl());
+ if (isTrackedVar(VD))
+ vals[VD] = Initialized;
+ }
+}
+
+void TransferFunctions::VisitBlockExpr(BlockExpr *be) {
+ const BlockDecl *bd = be->getBlockDecl();
+ for (const auto &I : bd->captures()) {
+ const VarDecl *vd = I.getVariable();
+ if (!isTrackedVar(vd))
+ continue;
+ if (I.isByRef()) {
+ vals[vd] = Initialized;
+ continue;
+ }
+ reportUse(be, vd);
+ }
+}
+
+void TransferFunctions::VisitCallExpr(CallExpr *ce) {
+ if (Decl *Callee = ce->getCalleeDecl()) {
+ if (Callee->hasAttr<ReturnsTwiceAttr>()) {
+ // After a call to a function like setjmp or vfork, any variable which is
+ // initialized anywhere within this function may now be initialized. For
+ // now, just assume such a call initializes all variables. FIXME: Only
+ // mark variables as initialized if they have an initializer which is
+ // reachable from here.
+ vals.setAllScratchValues(Initialized);
+ }
+ else if (Callee->hasAttr<AnalyzerNoReturnAttr>()) {
+ // Functions labeled like "analyzer_noreturn" are often used to denote
+ // "panic" functions that in special debug situations can still return,
+ // but for the most part should not be treated as returning. This is a
+ // useful annotation borrowed from the static analyzer that is useful for
+ // suppressing branch-specific false positives when we call one of these
+ // functions but keep pretending the path continues (when in reality the
+ // user doesn't care).
+ vals.setAllScratchValues(Unknown);
+ }
+ }
+}
+
+void TransferFunctions::VisitDeclRefExpr(DeclRefExpr *dr) {
+ switch (classification.get(dr)) {
+ case ClassifyRefs::Ignore:
+ break;
+ case ClassifyRefs::Use:
+ reportUse(dr, cast<VarDecl>(dr->getDecl()));
+ break;
+ case ClassifyRefs::Init:
+ vals[cast<VarDecl>(dr->getDecl())] = Initialized;
+ break;
+ case ClassifyRefs::SelfInit:
+ handler.handleSelfInit(cast<VarDecl>(dr->getDecl()));
+ break;
+ }
+}
+
+void TransferFunctions::VisitBinaryOperator(BinaryOperator *BO) {
+ if (BO->getOpcode() == BO_Assign) {
+ FindVarResult Var = findVar(BO->getLHS());
+ if (const VarDecl *VD = Var.getDecl())
+ vals[VD] = Initialized;
+ }
+}
+
+void TransferFunctions::VisitDeclStmt(DeclStmt *DS) {
+ for (auto *DI : DS->decls()) {
+ VarDecl *VD = dyn_cast<VarDecl>(DI);
+ if (VD && isTrackedVar(VD)) {
+ if (getSelfInitExpr(VD)) {
+ // If the initializer consists solely of a reference to itself, we
+ // explicitly mark the variable as uninitialized. This allows code
+ // like the following:
+ //
+ // int x = x;
+ //
+ // to deliberately leave a variable uninitialized. Different analysis
+ // clients can detect this pattern and adjust their reporting
+ // appropriately, but we need to continue to analyze subsequent uses
+ // of the variable.
+ vals[VD] = Uninitialized;
+ } else if (VD->getInit()) {
+ // Treat the new variable as initialized.
+ vals[VD] = Initialized;
+ } else {
+ // No initializer: the variable is now uninitialized. This matters
+ // for cases like:
+ // while (...) {
+ // int n;
+ // use(n);
+ // n = 0;
+ // }
+ // FIXME: Mark the variable as uninitialized whenever its scope is
+ // left, since its scope could be re-entered by a jump over the
+ // declaration.
+ vals[VD] = Uninitialized;
+ }
+ }
+ }
+}
+
+void TransferFunctions::VisitObjCMessageExpr(ObjCMessageExpr *ME) {
+ // If the Objective-C message expression is an implicit no-return that
+ // is not modeled in the CFG, set the tracked dataflow values to Unknown.
+ if (objCNoRet.isImplicitNoReturn(ME)) {
+ vals.setAllScratchValues(Unknown);
+ }
+}
+
+//------------------------------------------------------------------------====//
+// High-level "driver" logic for uninitialized values analysis.
+//====------------------------------------------------------------------------//
+
+static bool runOnBlock(const CFGBlock *block, const CFG &cfg,
+ AnalysisDeclContext &ac, CFGBlockValues &vals,
+ const ClassifyRefs &classification,
+ llvm::BitVector &wasAnalyzed,
+ UninitVariablesHandler &handler) {
+ wasAnalyzed[block->getBlockID()] = true;
+ vals.resetScratch();
+ // Merge in values of predecessor blocks.
+ bool isFirst = true;
+ for (CFGBlock::const_pred_iterator I = block->pred_begin(),
+ E = block->pred_end(); I != E; ++I) {
+ const CFGBlock *pred = *I;
+ if (!pred)
+ continue;
+ if (wasAnalyzed[pred->getBlockID()]) {
+ vals.mergeIntoScratch(vals.getValueVector(pred), isFirst);
+ isFirst = false;
+ }
+ }
+ // Apply the transfer function.
+ TransferFunctions tf(vals, cfg, block, ac, classification, handler);
+ for (CFGBlock::const_iterator I = block->begin(), E = block->end();
+ I != E; ++I) {
+ if (Optional<CFGStmt> cs = I->getAs<CFGStmt>())
+ tf.Visit(const_cast<Stmt*>(cs->getStmt()));
+ }
+ return vals.updateValueVectorWithScratch(block);
+}
+
+/// PruneBlocksHandler is a special UninitVariablesHandler that is used
+/// to detect when a CFGBlock has any *potential* use of an uninitialized
+/// variable. It is mainly used to prune out work during the final
+/// reporting pass.
+namespace {
+struct PruneBlocksHandler : public UninitVariablesHandler {
+ PruneBlocksHandler(unsigned numBlocks)
+ : hadUse(numBlocks, false), hadAnyUse(false),
+ currentBlock(0) {}
+
+ ~PruneBlocksHandler() override {}
+
+ /// Records if a CFGBlock had a potential use of an uninitialized variable.
+ llvm::BitVector hadUse;
+
+ /// Records if any CFGBlock had a potential use of an uninitialized variable.
+ bool hadAnyUse;
+
+ /// The current block to scribble use information.
+ unsigned currentBlock;
+
+ void handleUseOfUninitVariable(const VarDecl *vd,
+ const UninitUse &use) override {
+ hadUse[currentBlock] = true;
+ hadAnyUse = true;
+ }
+
+ /// Called when the uninitialized variable analysis detects the
+ /// idiom 'int x = x'. All other uses of 'x' within the initializer
+ /// are handled by handleUseOfUninitVariable.
+ void handleSelfInit(const VarDecl *vd) override {
+ hadUse[currentBlock] = true;
+ hadAnyUse = true;
+ }
+};
+}
+
+void clang::runUninitializedVariablesAnalysis(
+ const DeclContext &dc,
+ const CFG &cfg,
+ AnalysisDeclContext &ac,
+ UninitVariablesHandler &handler,
+ UninitVariablesAnalysisStats &stats) {
+ CFGBlockValues vals(cfg);
+ vals.computeSetOfDeclarations(dc);
+ if (vals.hasNoDeclarations())
+ return;
+
+ stats.NumVariablesAnalyzed = vals.getNumEntries();
+
+ // Precompute which expressions are uses and which are initializations.
+ ClassifyRefs classification(ac);
+ cfg.VisitBlockStmts(classification);
+
+ // Mark all variables uninitialized at the entry.
+ const CFGBlock &entry = cfg.getEntry();
+ ValueVector &vec = vals.getValueVector(&entry);
+ const unsigned n = vals.getNumEntries();
+ for (unsigned j = 0; j < n ; ++j) {
+ vec[j] = Uninitialized;
+ }
+
+ // Proceed with the workist.
+ DataflowWorklist worklist(cfg, *ac.getAnalysis<PostOrderCFGView>());
+ llvm::BitVector previouslyVisited(cfg.getNumBlockIDs());
+ worklist.enqueueSuccessors(&cfg.getEntry());
+ llvm::BitVector wasAnalyzed(cfg.getNumBlockIDs(), false);
+ wasAnalyzed[cfg.getEntry().getBlockID()] = true;
+ PruneBlocksHandler PBH(cfg.getNumBlockIDs());
+
+ while (const CFGBlock *block = worklist.dequeue()) {
+ PBH.currentBlock = block->getBlockID();
+
+ // Did the block change?
+ bool changed = runOnBlock(block, cfg, ac, vals,
+ classification, wasAnalyzed, PBH);
+ ++stats.NumBlockVisits;
+ if (changed || !previouslyVisited[block->getBlockID()])
+ worklist.enqueueSuccessors(block);
+ previouslyVisited[block->getBlockID()] = true;
+ }
+
+ if (!PBH.hadAnyUse)
+ return;
+
+ // Run through the blocks one more time, and report uninitialized variables.
+ for (CFG::const_iterator BI = cfg.begin(), BE = cfg.end(); BI != BE; ++BI) {
+ const CFGBlock *block = *BI;
+ if (PBH.hadUse[block->getBlockID()]) {
+ runOnBlock(block, cfg, ac, vals, classification, wasAnalyzed, handler);
+ ++stats.NumBlockVisits;
+ }
+ }
+}
+
+UninitVariablesHandler::~UninitVariablesHandler() {}
diff --git a/contrib/llvm/tools/clang/lib/Basic/Attributes.cpp b/contrib/llvm/tools/clang/lib/Basic/Attributes.cpp
new file mode 100644
index 0000000..c215366
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/Attributes.cpp
@@ -0,0 +1,17 @@
+#include "clang/Basic/Attributes.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/ADT/StringSwitch.h"
+using namespace clang;
+
+int clang::hasAttribute(AttrSyntax Syntax, const IdentifierInfo *Scope,
+ const IdentifierInfo *Attr, const TargetInfo &Target,
+ const LangOptions &LangOpts) {
+ StringRef Name = Attr->getName();
+ // Normalize the attribute name, __foo__ becomes foo.
+ if (Name.size() >= 4 && Name.startswith("__") && Name.endswith("__"))
+ Name = Name.substr(2, Name.size() - 4);
+
+#include "clang/Basic/AttrHasAttributeImpl.inc"
+
+ return 0;
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/Builtins.cpp b/contrib/llvm/tools/clang/lib/Basic/Builtins.cpp
new file mode 100644
index 0000000..fb6a645
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/Builtins.cpp
@@ -0,0 +1,133 @@
+//===--- Builtins.cpp - Builtin function implementation -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements various things for builtin functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/StringRef.h"
+using namespace clang;
+
+static const Builtin::Info BuiltinInfo[] = {
+ { "not a builtin function", nullptr, nullptr, nullptr, ALL_LANGUAGES,nullptr},
+#define BUILTIN(ID, TYPE, ATTRS) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
+#define LANGBUILTIN(ID, TYPE, ATTRS, LANGS) \
+ { #ID, TYPE, ATTRS, nullptr, LANGS, nullptr },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER, LANGS) \
+ { #ID, TYPE, ATTRS, HEADER, LANGS, nullptr },
+#include "clang/Basic/Builtins.def"
+};
+
+const Builtin::Info &Builtin::Context::getRecord(unsigned ID) const {
+ if (ID < Builtin::FirstTSBuiltin)
+ return BuiltinInfo[ID];
+ assert(((ID - Builtin::FirstTSBuiltin) <
+ (TSRecords.size() + AuxTSRecords.size())) &&
+ "Invalid builtin ID!");
+ if (isAuxBuiltinID(ID))
+ return AuxTSRecords[getAuxBuiltinID(ID) - Builtin::FirstTSBuiltin];
+ return TSRecords[ID - Builtin::FirstTSBuiltin];
+}
+
+void Builtin::Context::InitializeTarget(const TargetInfo &Target,
+ const TargetInfo *AuxTarget) {
+ assert(TSRecords.empty() && "Already initialized target?");
+ TSRecords = Target.getTargetBuiltins();
+ if (AuxTarget)
+ AuxTSRecords = AuxTarget->getTargetBuiltins();
+}
+
+bool Builtin::Context::isBuiltinFunc(const char *Name) {
+ StringRef FuncName(Name);
+ for (unsigned i = Builtin::NotBuiltin + 1; i != Builtin::FirstTSBuiltin; ++i)
+ if (FuncName.equals(BuiltinInfo[i].Name))
+ return strchr(BuiltinInfo[i].Attributes, 'f') != nullptr;
+
+ return false;
+}
+
+bool Builtin::Context::builtinIsSupported(const Builtin::Info &BuiltinInfo,
+ const LangOptions &LangOpts) {
+ bool BuiltinsUnsupported =
+ (LangOpts.NoBuiltin || LangOpts.isNoBuiltinFunc(BuiltinInfo.Name)) &&
+ strchr(BuiltinInfo.Attributes, 'f');
+ bool MathBuiltinsUnsupported =
+ LangOpts.NoMathBuiltin && BuiltinInfo.HeaderName &&
+ llvm::StringRef(BuiltinInfo.HeaderName).equals("math.h");
+ bool GnuModeUnsupported = !LangOpts.GNUMode && (BuiltinInfo.Langs & GNU_LANG);
+ bool MSModeUnsupported =
+ !LangOpts.MicrosoftExt && (BuiltinInfo.Langs & MS_LANG);
+ bool ObjCUnsupported = !LangOpts.ObjC1 && BuiltinInfo.Langs == OBJC_LANG;
+ return !BuiltinsUnsupported && !MathBuiltinsUnsupported &&
+ !GnuModeUnsupported && !MSModeUnsupported && !ObjCUnsupported;
+}
+
+/// initializeBuiltins - Mark the identifiers for all the builtins with their
+/// appropriate builtin ID # and mark any non-portable builtin identifiers as
+/// such.
+void Builtin::Context::initializeBuiltins(IdentifierTable &Table,
+ const LangOptions& LangOpts) {
+ // Step #1: mark all target-independent builtins with their ID's.
+ for (unsigned i = Builtin::NotBuiltin+1; i != Builtin::FirstTSBuiltin; ++i)
+ if (builtinIsSupported(BuiltinInfo[i], LangOpts)) {
+ Table.get(BuiltinInfo[i].Name).setBuiltinID(i);
+ }
+
+ // Step #2: Register target-specific builtins.
+ for (unsigned i = 0, e = TSRecords.size(); i != e; ++i)
+ if (builtinIsSupported(TSRecords[i], LangOpts))
+ Table.get(TSRecords[i].Name).setBuiltinID(i + Builtin::FirstTSBuiltin);
+
+ // Step #3: Register target-specific builtins for AuxTarget.
+ for (unsigned i = 0, e = AuxTSRecords.size(); i != e; ++i)
+ Table.get(AuxTSRecords[i].Name)
+ .setBuiltinID(i + Builtin::FirstTSBuiltin + TSRecords.size());
+}
+
+void Builtin::Context::forgetBuiltin(unsigned ID, IdentifierTable &Table) {
+ Table.get(getRecord(ID).Name).setBuiltinID(0);
+}
+
+bool Builtin::Context::isLike(unsigned ID, unsigned &FormatIdx,
+ bool &HasVAListArg, const char *Fmt) const {
+ assert(Fmt && "Not passed a format string");
+ assert(::strlen(Fmt) == 2 &&
+ "Format string needs to be two characters long");
+ assert(::toupper(Fmt[0]) == Fmt[1] &&
+ "Format string is not in the form \"xX\"");
+
+ const char *Like = ::strpbrk(getRecord(ID).Attributes, Fmt);
+ if (!Like)
+ return false;
+
+ HasVAListArg = (*Like == Fmt[1]);
+
+ ++Like;
+ assert(*Like == ':' && "Format specifier must be followed by a ':'");
+ ++Like;
+
+ assert(::strchr(Like, ':') && "Format specifier must end with a ':'");
+ FormatIdx = ::strtol(Like, nullptr, 10);
+ return true;
+}
+
+bool Builtin::Context::isPrintfLike(unsigned ID, unsigned &FormatIdx,
+ bool &HasVAListArg) {
+ return isLike(ID, FormatIdx, HasVAListArg, "pP");
+}
+
+bool Builtin::Context::isScanfLike(unsigned ID, unsigned &FormatIdx,
+ bool &HasVAListArg) {
+ return isLike(ID, FormatIdx, HasVAListArg, "sS");
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/CharInfo.cpp b/contrib/llvm/tools/clang/lib/Basic/CharInfo.cpp
new file mode 100644
index 0000000..32b3277
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/CharInfo.cpp
@@ -0,0 +1,81 @@
+//===--- CharInfo.cpp - Static Data for Classifying ASCII Characters ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/CharInfo.h"
+
+using namespace clang::charinfo;
+
+// Statically initialize CharInfo table based on ASCII character set
+// Reference: FreeBSD 7.2 /usr/share/misc/ascii
+const uint16_t clang::charinfo::InfoTable[256] = {
+ // 0 NUL 1 SOH 2 STX 3 ETX
+ // 4 EOT 5 ENQ 6 ACK 7 BEL
+ 0 , 0 , 0 , 0 ,
+ 0 , 0 , 0 , 0 ,
+ // 8 BS 9 HT 10 NL 11 VT
+ //12 NP 13 CR 14 SO 15 SI
+ 0 , CHAR_HORZ_WS, CHAR_VERT_WS, CHAR_HORZ_WS,
+ CHAR_HORZ_WS, CHAR_VERT_WS, 0 , 0 ,
+ //16 DLE 17 DC1 18 DC2 19 DC3
+ //20 DC4 21 NAK 22 SYN 23 ETB
+ 0 , 0 , 0 , 0 ,
+ 0 , 0 , 0 , 0 ,
+ //24 CAN 25 EM 26 SUB 27 ESC
+ //28 FS 29 GS 30 RS 31 US
+ 0 , 0 , 0 , 0 ,
+ 0 , 0 , 0 , 0 ,
+ //32 SP 33 ! 34 " 35 #
+ //36 $ 37 % 38 & 39 '
+ CHAR_SPACE , CHAR_RAWDEL , CHAR_RAWDEL , CHAR_RAWDEL ,
+ CHAR_PUNCT , CHAR_RAWDEL , CHAR_RAWDEL , CHAR_RAWDEL ,
+ //40 ( 41 ) 42 * 43 +
+ //44 , 45 - 46 . 47 /
+ CHAR_PUNCT , CHAR_PUNCT , CHAR_RAWDEL , CHAR_RAWDEL ,
+ CHAR_RAWDEL , CHAR_RAWDEL , CHAR_PERIOD , CHAR_RAWDEL ,
+ //48 0 49 1 50 2 51 3
+ //52 4 53 5 54 6 55 7
+ CHAR_DIGIT , CHAR_DIGIT , CHAR_DIGIT , CHAR_DIGIT ,
+ CHAR_DIGIT , CHAR_DIGIT , CHAR_DIGIT , CHAR_DIGIT ,
+ //56 8 57 9 58 : 59 ;
+ //60 < 61 = 62 > 63 ?
+ CHAR_DIGIT , CHAR_DIGIT , CHAR_RAWDEL , CHAR_RAWDEL ,
+ CHAR_RAWDEL , CHAR_RAWDEL , CHAR_RAWDEL , CHAR_RAWDEL ,
+ //64 @ 65 A 66 B 67 C
+ //68 D 69 E 70 F 71 G
+ CHAR_PUNCT , CHAR_XUPPER , CHAR_XUPPER , CHAR_XUPPER ,
+ CHAR_XUPPER , CHAR_XUPPER , CHAR_XUPPER , CHAR_UPPER ,
+ //72 H 73 I 74 J 75 K
+ //76 L 77 M 78 N 79 O
+ CHAR_UPPER , CHAR_UPPER , CHAR_UPPER , CHAR_UPPER ,
+ CHAR_UPPER , CHAR_UPPER , CHAR_UPPER , CHAR_UPPER ,
+ //80 P 81 Q 82 R 83 S
+ //84 T 85 U 86 V 87 W
+ CHAR_UPPER , CHAR_UPPER , CHAR_UPPER , CHAR_UPPER ,
+ CHAR_UPPER , CHAR_UPPER , CHAR_UPPER , CHAR_UPPER ,
+ //88 X 89 Y 90 Z 91 [
+ //92 \ 93 ] 94 ^ 95 _
+ CHAR_UPPER , CHAR_UPPER , CHAR_UPPER , CHAR_RAWDEL ,
+ CHAR_PUNCT , CHAR_RAWDEL , CHAR_RAWDEL , CHAR_UNDER ,
+ //96 ` 97 a 98 b 99 c
+ //100 d 101 e 102 f 103 g
+ CHAR_PUNCT , CHAR_XLOWER , CHAR_XLOWER , CHAR_XLOWER ,
+ CHAR_XLOWER , CHAR_XLOWER , CHAR_XLOWER , CHAR_LOWER ,
+ //104 h 105 i 106 j 107 k
+ //108 l 109 m 110 n 111 o
+ CHAR_LOWER , CHAR_LOWER , CHAR_LOWER , CHAR_LOWER ,
+ CHAR_LOWER , CHAR_LOWER , CHAR_LOWER , CHAR_LOWER ,
+ //112 p 113 q 114 r 115 s
+ //116 t 117 u 118 v 119 w
+ CHAR_LOWER , CHAR_LOWER , CHAR_LOWER , CHAR_LOWER ,
+ CHAR_LOWER , CHAR_LOWER , CHAR_LOWER , CHAR_LOWER ,
+ //120 x 121 y 122 z 123 {
+ //124 | 125 } 126 ~ 127 DEL
+ CHAR_LOWER , CHAR_LOWER , CHAR_LOWER , CHAR_RAWDEL ,
+ CHAR_RAWDEL , CHAR_RAWDEL , CHAR_RAWDEL , 0
+};
diff --git a/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp b/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp
new file mode 100644
index 0000000..7cf7305
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp
@@ -0,0 +1,1013 @@
+//===--- Diagnostic.cpp - C Language Family Diagnostic Handling -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Diagnostic-related interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/PartialDiagnostic.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/CrashRecoveryContext.h"
+#include "llvm/Support/Locale.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
+ DiagNullabilityKind nullability) {
+ StringRef string;
+ switch (nullability.first) {
+ case NullabilityKind::NonNull:
+ string = nullability.second ? "'nonnull'" : "'_Nonnull'";
+ break;
+
+ case NullabilityKind::Nullable:
+ string = nullability.second ? "'nullable'" : "'_Nullable'";
+ break;
+
+ case NullabilityKind::Unspecified:
+ string = nullability.second ? "'null_unspecified'" : "'_Null_unspecified'";
+ break;
+ }
+
+ DB.AddString(string);
+ return DB;
+}
+
+static void DummyArgToStringFn(DiagnosticsEngine::ArgumentKind AK, intptr_t QT,
+ StringRef Modifier, StringRef Argument,
+ ArrayRef<DiagnosticsEngine::ArgumentValue> PrevArgs,
+ SmallVectorImpl<char> &Output,
+ void *Cookie,
+ ArrayRef<intptr_t> QualTypeVals) {
+ StringRef Str = "<can't format argument>";
+ Output.append(Str.begin(), Str.end());
+}
+
+DiagnosticsEngine::DiagnosticsEngine(
+ const IntrusiveRefCntPtr<DiagnosticIDs> &diags, DiagnosticOptions *DiagOpts,
+ DiagnosticConsumer *client, bool ShouldOwnClient)
+ : Diags(diags), DiagOpts(DiagOpts), Client(nullptr), SourceMgr(nullptr) {
+ setClient(client, ShouldOwnClient);
+ ArgToStringFn = DummyArgToStringFn;
+ ArgToStringCookie = nullptr;
+
+ AllExtensionsSilenced = 0;
+ IgnoreAllWarnings = false;
+ WarningsAsErrors = false;
+ EnableAllWarnings = false;
+ ErrorsAsFatal = false;
+ SuppressSystemWarnings = false;
+ SuppressAllDiagnostics = false;
+ ElideType = true;
+ PrintTemplateTree = false;
+ ShowColors = false;
+ ShowOverloads = Ovl_All;
+ ExtBehavior = diag::Severity::Ignored;
+
+ ErrorLimit = 0;
+ TemplateBacktraceLimit = 0;
+ ConstexprBacktraceLimit = 0;
+
+ Reset();
+}
+
+DiagnosticsEngine::~DiagnosticsEngine() {
+ // If we own the diagnostic client, destroy it first so that it can access the
+ // engine from its destructor.
+ setClient(nullptr);
+}
+
+void DiagnosticsEngine::setClient(DiagnosticConsumer *client,
+ bool ShouldOwnClient) {
+ Owner.reset(ShouldOwnClient ? client : nullptr);
+ Client = client;
+}
+
+void DiagnosticsEngine::pushMappings(SourceLocation Loc) {
+ DiagStateOnPushStack.push_back(GetCurDiagState());
+}
+
+bool DiagnosticsEngine::popMappings(SourceLocation Loc) {
+ if (DiagStateOnPushStack.empty())
+ return false;
+
+ if (DiagStateOnPushStack.back() != GetCurDiagState()) {
+ // State changed at some point between push/pop.
+ PushDiagStatePoint(DiagStateOnPushStack.back(), Loc);
+ }
+ DiagStateOnPushStack.pop_back();
+ return true;
+}
+
+void DiagnosticsEngine::Reset() {
+ ErrorOccurred = false;
+ UncompilableErrorOccurred = false;
+ FatalErrorOccurred = false;
+ UnrecoverableErrorOccurred = false;
+
+ NumWarnings = 0;
+ NumErrors = 0;
+ TrapNumErrorsOccurred = 0;
+ TrapNumUnrecoverableErrorsOccurred = 0;
+
+ CurDiagID = ~0U;
+ LastDiagLevel = DiagnosticIDs::Ignored;
+ DelayedDiagID = 0;
+
+ // Clear state related to #pragma diagnostic.
+ DiagStates.clear();
+ DiagStatePoints.clear();
+ DiagStateOnPushStack.clear();
+
+ // Create a DiagState and DiagStatePoint representing diagnostic changes
+ // through command-line.
+ DiagStates.emplace_back();
+ DiagStatePoints.push_back(DiagStatePoint(&DiagStates.back(), FullSourceLoc()));
+}
+
+void DiagnosticsEngine::SetDelayedDiagnostic(unsigned DiagID, StringRef Arg1,
+ StringRef Arg2) {
+ if (DelayedDiagID)
+ return;
+
+ DelayedDiagID = DiagID;
+ DelayedDiagArg1 = Arg1.str();
+ DelayedDiagArg2 = Arg2.str();
+}
+
+void DiagnosticsEngine::ReportDelayed() {
+ Report(DelayedDiagID) << DelayedDiagArg1 << DelayedDiagArg2;
+ DelayedDiagID = 0;
+ DelayedDiagArg1.clear();
+ DelayedDiagArg2.clear();
+}
+
+DiagnosticsEngine::DiagStatePointsTy::iterator
+DiagnosticsEngine::GetDiagStatePointForLoc(SourceLocation L) const {
+ assert(!DiagStatePoints.empty());
+ assert(DiagStatePoints.front().Loc.isInvalid() &&
+ "Should have created a DiagStatePoint for command-line");
+
+ if (!SourceMgr)
+ return DiagStatePoints.end() - 1;
+
+ FullSourceLoc Loc(L, *SourceMgr);
+ if (Loc.isInvalid())
+ return DiagStatePoints.end() - 1;
+
+ DiagStatePointsTy::iterator Pos = DiagStatePoints.end();
+ FullSourceLoc LastStateChangePos = DiagStatePoints.back().Loc;
+ if (LastStateChangePos.isValid() &&
+ Loc.isBeforeInTranslationUnitThan(LastStateChangePos))
+ Pos = std::upper_bound(DiagStatePoints.begin(), DiagStatePoints.end(),
+ DiagStatePoint(nullptr, Loc));
+ --Pos;
+ return Pos;
+}
+
+void DiagnosticsEngine::setSeverity(diag::kind Diag, diag::Severity Map,
+ SourceLocation L) {
+ assert(Diag < diag::DIAG_UPPER_LIMIT &&
+ "Can only map builtin diagnostics");
+ assert((Diags->isBuiltinWarningOrExtension(Diag) ||
+ (Map == diag::Severity::Fatal || Map == diag::Severity::Error)) &&
+ "Cannot map errors into warnings!");
+ assert(!DiagStatePoints.empty());
+ assert((L.isInvalid() || SourceMgr) && "No SourceMgr for valid location");
+
+ FullSourceLoc Loc = SourceMgr? FullSourceLoc(L, *SourceMgr) : FullSourceLoc();
+ FullSourceLoc LastStateChangePos = DiagStatePoints.back().Loc;
+ // Don't allow a mapping to a warning override an error/fatal mapping.
+ if (Map == diag::Severity::Warning) {
+ DiagnosticMapping &Info = GetCurDiagState()->getOrAddMapping(Diag);
+ if (Info.getSeverity() == diag::Severity::Error ||
+ Info.getSeverity() == diag::Severity::Fatal)
+ Map = Info.getSeverity();
+ }
+ DiagnosticMapping Mapping = makeUserMapping(Map, L);
+
+ // Common case; setting all the diagnostics of a group in one place.
+ if (Loc.isInvalid() || Loc == LastStateChangePos) {
+ GetCurDiagState()->setMapping(Diag, Mapping);
+ return;
+ }
+
+ // Another common case; modifying diagnostic state in a source location
+ // after the previous one.
+ if ((Loc.isValid() && LastStateChangePos.isInvalid()) ||
+ LastStateChangePos.isBeforeInTranslationUnitThan(Loc)) {
+ // A diagnostic pragma occurred, create a new DiagState initialized with
+ // the current one and a new DiagStatePoint to record at which location
+ // the new state became active.
+ DiagStates.push_back(*GetCurDiagState());
+ PushDiagStatePoint(&DiagStates.back(), Loc);
+ GetCurDiagState()->setMapping(Diag, Mapping);
+ return;
+ }
+
+ // We allow setting the diagnostic state in random source order for
+ // completeness but it should not be actually happening in normal practice.
+
+ DiagStatePointsTy::iterator Pos = GetDiagStatePointForLoc(Loc);
+ assert(Pos != DiagStatePoints.end());
+
+ // Update all diagnostic states that are active after the given location.
+ for (DiagStatePointsTy::iterator
+ I = Pos+1, E = DiagStatePoints.end(); I != E; ++I) {
+ I->State->setMapping(Diag, Mapping);
+ }
+
+ // If the location corresponds to an existing point, just update its state.
+ if (Pos->Loc == Loc) {
+ Pos->State->setMapping(Diag, Mapping);
+ return;
+ }
+
+ // Create a new state/point and fit it into the vector of DiagStatePoints
+ // so that the vector is always ordered according to location.
+ assert(Pos->Loc.isBeforeInTranslationUnitThan(Loc));
+ DiagStates.push_back(*Pos->State);
+ DiagState *NewState = &DiagStates.back();
+ NewState->setMapping(Diag, Mapping);
+ DiagStatePoints.insert(Pos+1, DiagStatePoint(NewState,
+ FullSourceLoc(Loc, *SourceMgr)));
+}
+
+bool DiagnosticsEngine::setSeverityForGroup(diag::Flavor Flavor,
+ StringRef Group, diag::Severity Map,
+ SourceLocation Loc) {
+ // Get the diagnostics in this group.
+ SmallVector<diag::kind, 256> GroupDiags;
+ if (Diags->getDiagnosticsInGroup(Flavor, Group, GroupDiags))
+ return true;
+
+ // Set the mapping.
+ for (diag::kind Diag : GroupDiags)
+ setSeverity(Diag, Map, Loc);
+
+ return false;
+}
+
+bool DiagnosticsEngine::setDiagnosticGroupWarningAsError(StringRef Group,
+ bool Enabled) {
+ // If we are enabling this feature, just set the diagnostic mappings to map to
+ // errors.
+ if (Enabled)
+ return setSeverityForGroup(diag::Flavor::WarningOrError, Group,
+ diag::Severity::Error);
+
+ // Otherwise, we want to set the diagnostic mapping's "no Werror" bit, and
+ // potentially downgrade anything already mapped to be a warning.
+
+ // Get the diagnostics in this group.
+ SmallVector<diag::kind, 8> GroupDiags;
+ if (Diags->getDiagnosticsInGroup(diag::Flavor::WarningOrError, Group,
+ GroupDiags))
+ return true;
+
+ // Perform the mapping change.
+ for (diag::kind Diag : GroupDiags) {
+ DiagnosticMapping &Info = GetCurDiagState()->getOrAddMapping(Diag);
+
+ if (Info.getSeverity() == diag::Severity::Error ||
+ Info.getSeverity() == diag::Severity::Fatal)
+ Info.setSeverity(diag::Severity::Warning);
+
+ Info.setNoWarningAsError(true);
+ }
+
+ return false;
+}
+
+bool DiagnosticsEngine::setDiagnosticGroupErrorAsFatal(StringRef Group,
+ bool Enabled) {
+ // If we are enabling this feature, just set the diagnostic mappings to map to
+ // fatal errors.
+ if (Enabled)
+ return setSeverityForGroup(diag::Flavor::WarningOrError, Group,
+ diag::Severity::Fatal);
+
+ // Otherwise, we want to set the diagnostic mapping's "no Werror" bit, and
+ // potentially downgrade anything already mapped to be an error.
+
+ // Get the diagnostics in this group.
+ SmallVector<diag::kind, 8> GroupDiags;
+ if (Diags->getDiagnosticsInGroup(diag::Flavor::WarningOrError, Group,
+ GroupDiags))
+ return true;
+
+ // Perform the mapping change.
+ for (diag::kind Diag : GroupDiags) {
+ DiagnosticMapping &Info = GetCurDiagState()->getOrAddMapping(Diag);
+
+ if (Info.getSeverity() == diag::Severity::Fatal)
+ Info.setSeverity(diag::Severity::Error);
+
+ Info.setNoErrorAsFatal(true);
+ }
+
+ return false;
+}
+
+void DiagnosticsEngine::setSeverityForAll(diag::Flavor Flavor,
+ diag::Severity Map,
+ SourceLocation Loc) {
+ // Get all the diagnostics.
+ SmallVector<diag::kind, 64> AllDiags;
+ Diags->getAllDiagnostics(Flavor, AllDiags);
+
+ // Set the mapping.
+ for (diag::kind Diag : AllDiags)
+ if (Diags->isBuiltinWarningOrExtension(Diag))
+ setSeverity(Diag, Map, Loc);
+}
+
+void DiagnosticsEngine::Report(const StoredDiagnostic &storedDiag) {
+ assert(CurDiagID == ~0U && "Multiple diagnostics in flight at once!");
+
+ CurDiagLoc = storedDiag.getLocation();
+ CurDiagID = storedDiag.getID();
+ NumDiagArgs = 0;
+
+ DiagRanges.clear();
+ DiagRanges.append(storedDiag.range_begin(), storedDiag.range_end());
+
+ DiagFixItHints.clear();
+ DiagFixItHints.append(storedDiag.fixit_begin(), storedDiag.fixit_end());
+
+ assert(Client && "DiagnosticConsumer not set!");
+ Level DiagLevel = storedDiag.getLevel();
+ Diagnostic Info(this, storedDiag.getMessage());
+ Client->HandleDiagnostic(DiagLevel, Info);
+ if (Client->IncludeInDiagnosticCounts()) {
+ if (DiagLevel == DiagnosticsEngine::Warning)
+ ++NumWarnings;
+ }
+
+ CurDiagID = ~0U;
+}
+
+bool DiagnosticsEngine::EmitCurrentDiagnostic(bool Force) {
+ assert(getClient() && "DiagnosticClient not set!");
+
+ bool Emitted;
+ if (Force) {
+ Diagnostic Info(this);
+
+ // Figure out the diagnostic level of this message.
+ DiagnosticIDs::Level DiagLevel
+ = Diags->getDiagnosticLevel(Info.getID(), Info.getLocation(), *this);
+
+ Emitted = (DiagLevel != DiagnosticIDs::Ignored);
+ if (Emitted) {
+ // Emit the diagnostic regardless of suppression level.
+ Diags->EmitDiag(*this, DiagLevel);
+ }
+ } else {
+ // Process the diagnostic, sending the accumulated information to the
+ // DiagnosticConsumer.
+ Emitted = ProcessDiag();
+ }
+
+ // Clear out the current diagnostic object.
+ unsigned DiagID = CurDiagID;
+ Clear();
+
+ // If there was a delayed diagnostic, emit it now.
+ if (!Force && DelayedDiagID && DelayedDiagID != DiagID)
+ ReportDelayed();
+
+ return Emitted;
+}
+
+
+DiagnosticConsumer::~DiagnosticConsumer() {}
+
+void DiagnosticConsumer::HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
+ const Diagnostic &Info) {
+ if (!IncludeInDiagnosticCounts())
+ return;
+
+ if (DiagLevel == DiagnosticsEngine::Warning)
+ ++NumWarnings;
+ else if (DiagLevel >= DiagnosticsEngine::Error)
+ ++NumErrors;
+}
+
+/// ModifierIs - Return true if the specified modifier matches specified string.
+template <std::size_t StrLen>
+static bool ModifierIs(const char *Modifier, unsigned ModifierLen,
+ const char (&Str)[StrLen]) {
+ return StrLen-1 == ModifierLen && !memcmp(Modifier, Str, StrLen-1);
+}
+
+/// ScanForward - Scans forward, looking for the given character, skipping
+/// nested clauses and escaped characters.
+static const char *ScanFormat(const char *I, const char *E, char Target) {
+ unsigned Depth = 0;
+
+ for ( ; I != E; ++I) {
+ if (Depth == 0 && *I == Target) return I;
+ if (Depth != 0 && *I == '}') Depth--;
+
+ if (*I == '%') {
+ I++;
+ if (I == E) break;
+
+ // Escaped characters get implicitly skipped here.
+
+ // Format specifier.
+ if (!isDigit(*I) && !isPunctuation(*I)) {
+ for (I++; I != E && !isDigit(*I) && *I != '{'; I++) ;
+ if (I == E) break;
+ if (*I == '{')
+ Depth++;
+ }
+ }
+ }
+ return E;
+}
+
+/// HandleSelectModifier - Handle the integer 'select' modifier. This is used
+/// like this: %select{foo|bar|baz}2. This means that the integer argument
+/// "%2" has a value from 0-2. If the value is 0, the diagnostic prints 'foo'.
+/// If the value is 1, it prints 'bar'. If it has the value 2, it prints 'baz'.
+/// This is very useful for certain classes of variant diagnostics.
+static void HandleSelectModifier(const Diagnostic &DInfo, unsigned ValNo,
+ const char *Argument, unsigned ArgumentLen,
+ SmallVectorImpl<char> &OutStr) {
+ const char *ArgumentEnd = Argument+ArgumentLen;
+
+ // Skip over 'ValNo' |'s.
+ while (ValNo) {
+ const char *NextVal = ScanFormat(Argument, ArgumentEnd, '|');
+ assert(NextVal != ArgumentEnd && "Value for integer select modifier was"
+ " larger than the number of options in the diagnostic string!");
+ Argument = NextVal+1; // Skip this string.
+ --ValNo;
+ }
+
+ // Get the end of the value. This is either the } or the |.
+ const char *EndPtr = ScanFormat(Argument, ArgumentEnd, '|');
+
+ // Recursively format the result of the select clause into the output string.
+ DInfo.FormatDiagnostic(Argument, EndPtr, OutStr);
+}
+
+/// HandleIntegerSModifier - Handle the integer 's' modifier. This adds the
+/// letter 's' to the string if the value is not 1. This is used in cases like
+/// this: "you idiot, you have %4 parameter%s4!".
+static void HandleIntegerSModifier(unsigned ValNo,
+ SmallVectorImpl<char> &OutStr) {
+ if (ValNo != 1)
+ OutStr.push_back('s');
+}
+
+/// HandleOrdinalModifier - Handle the integer 'ord' modifier. This
+/// prints the ordinal form of the given integer, with 1 corresponding
+/// to the first ordinal. Currently this is hard-coded to use the
+/// English form.
+static void HandleOrdinalModifier(unsigned ValNo,
+ SmallVectorImpl<char> &OutStr) {
+ assert(ValNo != 0 && "ValNo must be strictly positive!");
+
+ llvm::raw_svector_ostream Out(OutStr);
+
+ // We could use text forms for the first N ordinals, but the numeric
+ // forms are actually nicer in diagnostics because they stand out.
+ Out << ValNo << llvm::getOrdinalSuffix(ValNo);
+}
+
+
+/// PluralNumber - Parse an unsigned integer and advance Start.
+static unsigned PluralNumber(const char *&Start, const char *End) {
+ // Programming 101: Parse a decimal number :-)
+ unsigned Val = 0;
+ while (Start != End && *Start >= '0' && *Start <= '9') {
+ Val *= 10;
+ Val += *Start - '0';
+ ++Start;
+ }
+ return Val;
+}
+
+/// TestPluralRange - Test if Val is in the parsed range. Modifies Start.
+static bool TestPluralRange(unsigned Val, const char *&Start, const char *End) {
+ if (*Start != '[') {
+ unsigned Ref = PluralNumber(Start, End);
+ return Ref == Val;
+ }
+
+ ++Start;
+ unsigned Low = PluralNumber(Start, End);
+ assert(*Start == ',' && "Bad plural expression syntax: expected ,");
+ ++Start;
+ unsigned High = PluralNumber(Start, End);
+ assert(*Start == ']' && "Bad plural expression syntax: expected )");
+ ++Start;
+ return Low <= Val && Val <= High;
+}
+
+/// EvalPluralExpr - Actual expression evaluator for HandlePluralModifier.
+static bool EvalPluralExpr(unsigned ValNo, const char *Start, const char *End) {
+ // Empty condition?
+ if (*Start == ':')
+ return true;
+
+ while (1) {
+ char C = *Start;
+ if (C == '%') {
+ // Modulo expression
+ ++Start;
+ unsigned Arg = PluralNumber(Start, End);
+ assert(*Start == '=' && "Bad plural expression syntax: expected =");
+ ++Start;
+ unsigned ValMod = ValNo % Arg;
+ if (TestPluralRange(ValMod, Start, End))
+ return true;
+ } else {
+ assert((C == '[' || (C >= '0' && C <= '9')) &&
+ "Bad plural expression syntax: unexpected character");
+ // Range expression
+ if (TestPluralRange(ValNo, Start, End))
+ return true;
+ }
+
+ // Scan for next or-expr part.
+ Start = std::find(Start, End, ',');
+ if (Start == End)
+ break;
+ ++Start;
+ }
+ return false;
+}
+
+/// HandlePluralModifier - Handle the integer 'plural' modifier. This is used
+/// for complex plural forms, or in languages where all plurals are complex.
+/// The syntax is: %plural{cond1:form1|cond2:form2|:form3}, where condn are
+/// conditions that are tested in order, the form corresponding to the first
+/// that applies being emitted. The empty condition is always true, making the
+/// last form a default case.
+/// Conditions are simple boolean expressions, where n is the number argument.
+/// Here are the rules.
+/// condition := expression | empty
+/// empty := -> always true
+/// expression := numeric [',' expression] -> logical or
+/// numeric := range -> true if n in range
+/// | '%' number '=' range -> true if n % number in range
+/// range := number
+/// | '[' number ',' number ']' -> ranges are inclusive both ends
+///
+/// Here are some examples from the GNU gettext manual written in this form:
+/// English:
+/// {1:form0|:form1}
+/// Latvian:
+/// {0:form2|%100=11,%10=0,%10=[2,9]:form1|:form0}
+/// Gaeilge:
+/// {1:form0|2:form1|:form2}
+/// Romanian:
+/// {1:form0|0,%100=[1,19]:form1|:form2}
+/// Lithuanian:
+/// {%10=0,%100=[10,19]:form2|%10=1:form0|:form1}
+/// Russian (requires repeated form):
+/// {%100=[11,14]:form2|%10=1:form0|%10=[2,4]:form1|:form2}
+/// Slovak
+/// {1:form0|[2,4]:form1|:form2}
+/// Polish (requires repeated form):
+/// {1:form0|%100=[10,20]:form2|%10=[2,4]:form1|:form2}
+static void HandlePluralModifier(const Diagnostic &DInfo, unsigned ValNo,
+ const char *Argument, unsigned ArgumentLen,
+ SmallVectorImpl<char> &OutStr) {
+ const char *ArgumentEnd = Argument + ArgumentLen;
+ while (1) {
+ assert(Argument < ArgumentEnd && "Plural expression didn't match.");
+ const char *ExprEnd = Argument;
+ while (*ExprEnd != ':') {
+ assert(ExprEnd != ArgumentEnd && "Plural missing expression end");
+ ++ExprEnd;
+ }
+ if (EvalPluralExpr(ValNo, Argument, ExprEnd)) {
+ Argument = ExprEnd + 1;
+ ExprEnd = ScanFormat(Argument, ArgumentEnd, '|');
+
+ // Recursively format the result of the plural clause into the
+ // output string.
+ DInfo.FormatDiagnostic(Argument, ExprEnd, OutStr);
+ return;
+ }
+ Argument = ScanFormat(Argument, ArgumentEnd - 1, '|') + 1;
+ }
+}
+
+/// \brief Returns the friendly description for a token kind that will appear
+/// without quotes in diagnostic messages. These strings may be translatable in
+/// future.
+static const char *getTokenDescForDiagnostic(tok::TokenKind Kind) {
+ switch (Kind) {
+ case tok::identifier:
+ return "identifier";
+ default:
+ return nullptr;
+ }
+}
+
+/// FormatDiagnostic - Format this diagnostic into a string, substituting the
+/// formal arguments into the %0 slots. The result is appended onto the Str
+/// array.
+void Diagnostic::
+FormatDiagnostic(SmallVectorImpl<char> &OutStr) const {
+ if (!StoredDiagMessage.empty()) {
+ OutStr.append(StoredDiagMessage.begin(), StoredDiagMessage.end());
+ return;
+ }
+
+ StringRef Diag =
+ getDiags()->getDiagnosticIDs()->getDescription(getID());
+
+ FormatDiagnostic(Diag.begin(), Diag.end(), OutStr);
+}
+
+void Diagnostic::
+FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
+ SmallVectorImpl<char> &OutStr) const {
+
+ // When the diagnostic string is only "%0", the entire string is being given
+ // by an outside source. Remove unprintable characters from this string
+ // and skip all the other string processing.
+ if (DiagEnd - DiagStr == 2 &&
+ StringRef(DiagStr, DiagEnd - DiagStr).equals("%0") &&
+ getArgKind(0) == DiagnosticsEngine::ak_std_string) {
+ const std::string &S = getArgStdStr(0);
+ for (char c : S) {
+ if (llvm::sys::locale::isPrint(c) || c == '\t') {
+ OutStr.push_back(c);
+ }
+ }
+ return;
+ }
+
+ /// FormattedArgs - Keep track of all of the arguments formatted by
+ /// ConvertArgToString and pass them into subsequent calls to
+ /// ConvertArgToString, allowing the implementation to avoid redundancies in
+ /// obvious cases.
+ SmallVector<DiagnosticsEngine::ArgumentValue, 8> FormattedArgs;
+
+ /// QualTypeVals - Pass a vector of arrays so that QualType names can be
+ /// compared to see if more information is needed to be printed.
+ SmallVector<intptr_t, 2> QualTypeVals;
+ SmallVector<char, 64> Tree;
+
+ for (unsigned i = 0, e = getNumArgs(); i < e; ++i)
+ if (getArgKind(i) == DiagnosticsEngine::ak_qualtype)
+ QualTypeVals.push_back(getRawArg(i));
+
+ while (DiagStr != DiagEnd) {
+ if (DiagStr[0] != '%') {
+ // Append non-%0 substrings to Str if we have one.
+ const char *StrEnd = std::find(DiagStr, DiagEnd, '%');
+ OutStr.append(DiagStr, StrEnd);
+ DiagStr = StrEnd;
+ continue;
+ } else if (isPunctuation(DiagStr[1])) {
+ OutStr.push_back(DiagStr[1]); // %% -> %.
+ DiagStr += 2;
+ continue;
+ }
+
+ // Skip the %.
+ ++DiagStr;
+
+ // This must be a placeholder for a diagnostic argument. The format for a
+ // placeholder is one of "%0", "%modifier0", or "%modifier{arguments}0".
+ // The digit is a number from 0-9 indicating which argument this comes from.
+ // The modifier is a string of digits from the set [-a-z]+, arguments is a
+ // brace enclosed string.
+ const char *Modifier = nullptr, *Argument = nullptr;
+ unsigned ModifierLen = 0, ArgumentLen = 0;
+
+ // Check to see if we have a modifier. If so eat it.
+ if (!isDigit(DiagStr[0])) {
+ Modifier = DiagStr;
+ while (DiagStr[0] == '-' ||
+ (DiagStr[0] >= 'a' && DiagStr[0] <= 'z'))
+ ++DiagStr;
+ ModifierLen = DiagStr-Modifier;
+
+ // If we have an argument, get it next.
+ if (DiagStr[0] == '{') {
+ ++DiagStr; // Skip {.
+ Argument = DiagStr;
+
+ DiagStr = ScanFormat(DiagStr, DiagEnd, '}');
+ assert(DiagStr != DiagEnd && "Mismatched {}'s in diagnostic string!");
+ ArgumentLen = DiagStr-Argument;
+ ++DiagStr; // Skip }.
+ }
+ }
+
+ assert(isDigit(*DiagStr) && "Invalid format for argument in diagnostic");
+ unsigned ArgNo = *DiagStr++ - '0';
+
+ // Only used for type diffing.
+ unsigned ArgNo2 = ArgNo;
+
+ DiagnosticsEngine::ArgumentKind Kind = getArgKind(ArgNo);
+ if (ModifierIs(Modifier, ModifierLen, "diff")) {
+ assert(*DiagStr == ',' && isDigit(*(DiagStr + 1)) &&
+ "Invalid format for diff modifier");
+ ++DiagStr; // Comma.
+ ArgNo2 = *DiagStr++ - '0';
+ DiagnosticsEngine::ArgumentKind Kind2 = getArgKind(ArgNo2);
+ if (Kind == DiagnosticsEngine::ak_qualtype &&
+ Kind2 == DiagnosticsEngine::ak_qualtype)
+ Kind = DiagnosticsEngine::ak_qualtype_pair;
+ else {
+ // %diff only supports QualTypes. For other kinds of arguments,
+ // use the default printing. For example, if the modifier is:
+ // "%diff{compare $ to $|other text}1,2"
+ // treat it as:
+ // "compare %1 to %2"
+ const char *Pipe = ScanFormat(Argument, Argument + ArgumentLen, '|');
+ const char *FirstDollar = ScanFormat(Argument, Pipe, '$');
+ const char *SecondDollar = ScanFormat(FirstDollar + 1, Pipe, '$');
+ const char ArgStr1[] = { '%', static_cast<char>('0' + ArgNo) };
+ const char ArgStr2[] = { '%', static_cast<char>('0' + ArgNo2) };
+ FormatDiagnostic(Argument, FirstDollar, OutStr);
+ FormatDiagnostic(ArgStr1, ArgStr1 + 2, OutStr);
+ FormatDiagnostic(FirstDollar + 1, SecondDollar, OutStr);
+ FormatDiagnostic(ArgStr2, ArgStr2 + 2, OutStr);
+ FormatDiagnostic(SecondDollar + 1, Pipe, OutStr);
+ continue;
+ }
+ }
+
+ switch (Kind) {
+ // ---- STRINGS ----
+ case DiagnosticsEngine::ak_std_string: {
+ const std::string &S = getArgStdStr(ArgNo);
+ assert(ModifierLen == 0 && "No modifiers for strings yet");
+ OutStr.append(S.begin(), S.end());
+ break;
+ }
+ case DiagnosticsEngine::ak_c_string: {
+ const char *S = getArgCStr(ArgNo);
+ assert(ModifierLen == 0 && "No modifiers for strings yet");
+
+ // Don't crash if get passed a null pointer by accident.
+ if (!S)
+ S = "(null)";
+
+ OutStr.append(S, S + strlen(S));
+ break;
+ }
+ // ---- INTEGERS ----
+ case DiagnosticsEngine::ak_sint: {
+ int Val = getArgSInt(ArgNo);
+
+ if (ModifierIs(Modifier, ModifierLen, "select")) {
+ HandleSelectModifier(*this, (unsigned)Val, Argument, ArgumentLen,
+ OutStr);
+ } else if (ModifierIs(Modifier, ModifierLen, "s")) {
+ HandleIntegerSModifier(Val, OutStr);
+ } else if (ModifierIs(Modifier, ModifierLen, "plural")) {
+ HandlePluralModifier(*this, (unsigned)Val, Argument, ArgumentLen,
+ OutStr);
+ } else if (ModifierIs(Modifier, ModifierLen, "ordinal")) {
+ HandleOrdinalModifier((unsigned)Val, OutStr);
+ } else {
+ assert(ModifierLen == 0 && "Unknown integer modifier");
+ llvm::raw_svector_ostream(OutStr) << Val;
+ }
+ break;
+ }
+ case DiagnosticsEngine::ak_uint: {
+ unsigned Val = getArgUInt(ArgNo);
+
+ if (ModifierIs(Modifier, ModifierLen, "select")) {
+ HandleSelectModifier(*this, Val, Argument, ArgumentLen, OutStr);
+ } else if (ModifierIs(Modifier, ModifierLen, "s")) {
+ HandleIntegerSModifier(Val, OutStr);
+ } else if (ModifierIs(Modifier, ModifierLen, "plural")) {
+ HandlePluralModifier(*this, (unsigned)Val, Argument, ArgumentLen,
+ OutStr);
+ } else if (ModifierIs(Modifier, ModifierLen, "ordinal")) {
+ HandleOrdinalModifier(Val, OutStr);
+ } else {
+ assert(ModifierLen == 0 && "Unknown integer modifier");
+ llvm::raw_svector_ostream(OutStr) << Val;
+ }
+ break;
+ }
+ // ---- TOKEN SPELLINGS ----
+ case DiagnosticsEngine::ak_tokenkind: {
+ tok::TokenKind Kind = static_cast<tok::TokenKind>(getRawArg(ArgNo));
+ assert(ModifierLen == 0 && "No modifiers for token kinds yet");
+
+ llvm::raw_svector_ostream Out(OutStr);
+ if (const char *S = tok::getPunctuatorSpelling(Kind))
+ // Quoted token spelling for punctuators.
+ Out << '\'' << S << '\'';
+ else if (const char *S = tok::getKeywordSpelling(Kind))
+ // Unquoted token spelling for keywords.
+ Out << S;
+ else if (const char *S = getTokenDescForDiagnostic(Kind))
+ // Unquoted translatable token name.
+ Out << S;
+ else if (const char *S = tok::getTokenName(Kind))
+ // Debug name, shouldn't appear in user-facing diagnostics.
+ Out << '<' << S << '>';
+ else
+ Out << "(null)";
+ break;
+ }
+ // ---- NAMES and TYPES ----
+ case DiagnosticsEngine::ak_identifierinfo: {
+ const IdentifierInfo *II = getArgIdentifier(ArgNo);
+ assert(ModifierLen == 0 && "No modifiers for strings yet");
+
+ // Don't crash if get passed a null pointer by accident.
+ if (!II) {
+ const char *S = "(null)";
+ OutStr.append(S, S + strlen(S));
+ continue;
+ }
+
+ llvm::raw_svector_ostream(OutStr) << '\'' << II->getName() << '\'';
+ break;
+ }
+ case DiagnosticsEngine::ak_qualtype:
+ case DiagnosticsEngine::ak_declarationname:
+ case DiagnosticsEngine::ak_nameddecl:
+ case DiagnosticsEngine::ak_nestednamespec:
+ case DiagnosticsEngine::ak_declcontext:
+ case DiagnosticsEngine::ak_attr:
+ getDiags()->ConvertArgToString(Kind, getRawArg(ArgNo),
+ StringRef(Modifier, ModifierLen),
+ StringRef(Argument, ArgumentLen),
+ FormattedArgs,
+ OutStr, QualTypeVals);
+ break;
+ case DiagnosticsEngine::ak_qualtype_pair:
+ // Create a struct with all the info needed for printing.
+ TemplateDiffTypes TDT;
+ TDT.FromType = getRawArg(ArgNo);
+ TDT.ToType = getRawArg(ArgNo2);
+ TDT.ElideType = getDiags()->ElideType;
+ TDT.ShowColors = getDiags()->ShowColors;
+ TDT.TemplateDiffUsed = false;
+ intptr_t val = reinterpret_cast<intptr_t>(&TDT);
+
+ const char *ArgumentEnd = Argument + ArgumentLen;
+ const char *Pipe = ScanFormat(Argument, ArgumentEnd, '|');
+
+ // Print the tree. If this diagnostic already has a tree, skip the
+ // second tree.
+ if (getDiags()->PrintTemplateTree && Tree.empty()) {
+ TDT.PrintFromType = true;
+ TDT.PrintTree = true;
+ getDiags()->ConvertArgToString(Kind, val,
+ StringRef(Modifier, ModifierLen),
+ StringRef(Argument, ArgumentLen),
+ FormattedArgs,
+ Tree, QualTypeVals);
+ // If there is no tree information, fall back to regular printing.
+ if (!Tree.empty()) {
+ FormatDiagnostic(Pipe + 1, ArgumentEnd, OutStr);
+ break;
+ }
+ }
+
+ // Non-tree printing, also the fall-back when tree printing fails.
+ // The fall-back is triggered when the types compared are not templates.
+ const char *FirstDollar = ScanFormat(Argument, ArgumentEnd, '$');
+ const char *SecondDollar = ScanFormat(FirstDollar + 1, ArgumentEnd, '$');
+
+ // Append before text
+ FormatDiagnostic(Argument, FirstDollar, OutStr);
+
+ // Append first type
+ TDT.PrintTree = false;
+ TDT.PrintFromType = true;
+ getDiags()->ConvertArgToString(Kind, val,
+ StringRef(Modifier, ModifierLen),
+ StringRef(Argument, ArgumentLen),
+ FormattedArgs,
+ OutStr, QualTypeVals);
+ if (!TDT.TemplateDiffUsed)
+ FormattedArgs.push_back(std::make_pair(DiagnosticsEngine::ak_qualtype,
+ TDT.FromType));
+
+ // Append middle text
+ FormatDiagnostic(FirstDollar + 1, SecondDollar, OutStr);
+
+ // Append second type
+ TDT.PrintFromType = false;
+ getDiags()->ConvertArgToString(Kind, val,
+ StringRef(Modifier, ModifierLen),
+ StringRef(Argument, ArgumentLen),
+ FormattedArgs,
+ OutStr, QualTypeVals);
+ if (!TDT.TemplateDiffUsed)
+ FormattedArgs.push_back(std::make_pair(DiagnosticsEngine::ak_qualtype,
+ TDT.ToType));
+
+ // Append end text
+ FormatDiagnostic(SecondDollar + 1, Pipe, OutStr);
+ break;
+ }
+
+ // Remember this argument info for subsequent formatting operations. Turn
+ // std::strings into a null terminated string to make it be the same case as
+ // all the other ones.
+ if (Kind == DiagnosticsEngine::ak_qualtype_pair)
+ continue;
+ else if (Kind != DiagnosticsEngine::ak_std_string)
+ FormattedArgs.push_back(std::make_pair(Kind, getRawArg(ArgNo)));
+ else
+ FormattedArgs.push_back(std::make_pair(DiagnosticsEngine::ak_c_string,
+ (intptr_t)getArgStdStr(ArgNo).c_str()));
+
+ }
+
+ // Append the type tree to the end of the diagnostics.
+ OutStr.append(Tree.begin(), Tree.end());
+}
+
+StoredDiagnostic::StoredDiagnostic(DiagnosticsEngine::Level Level, unsigned ID,
+ StringRef Message)
+ : ID(ID), Level(Level), Loc(), Message(Message) { }
+
+StoredDiagnostic::StoredDiagnostic(DiagnosticsEngine::Level Level,
+ const Diagnostic &Info)
+ : ID(Info.getID()), Level(Level)
+{
+ assert((Info.getLocation().isInvalid() || Info.hasSourceManager()) &&
+ "Valid source location without setting a source manager for diagnostic");
+ if (Info.getLocation().isValid())
+ Loc = FullSourceLoc(Info.getLocation(), Info.getSourceManager());
+ SmallString<64> Message;
+ Info.FormatDiagnostic(Message);
+ this->Message.assign(Message.begin(), Message.end());
+ this->Ranges.assign(Info.getRanges().begin(), Info.getRanges().end());
+ this->FixIts.assign(Info.getFixItHints().begin(), Info.getFixItHints().end());
+}
+
+StoredDiagnostic::StoredDiagnostic(DiagnosticsEngine::Level Level, unsigned ID,
+ StringRef Message, FullSourceLoc Loc,
+ ArrayRef<CharSourceRange> Ranges,
+ ArrayRef<FixItHint> FixIts)
+ : ID(ID), Level(Level), Loc(Loc), Message(Message),
+ Ranges(Ranges.begin(), Ranges.end()), FixIts(FixIts.begin(), FixIts.end())
+{
+}
+
+/// IncludeInDiagnosticCounts - This method (whose default implementation
+/// returns true) indicates whether the diagnostics handled by this
+/// DiagnosticConsumer should be included in the number of diagnostics
+/// reported by DiagnosticsEngine.
+bool DiagnosticConsumer::IncludeInDiagnosticCounts() const { return true; }
+
+void IgnoringDiagConsumer::anchor() { }
+
+ForwardingDiagnosticConsumer::~ForwardingDiagnosticConsumer() {}
+
+void ForwardingDiagnosticConsumer::HandleDiagnostic(
+ DiagnosticsEngine::Level DiagLevel,
+ const Diagnostic &Info) {
+ Target.HandleDiagnostic(DiagLevel, Info);
+}
+
+void ForwardingDiagnosticConsumer::clear() {
+ DiagnosticConsumer::clear();
+ Target.clear();
+}
+
+bool ForwardingDiagnosticConsumer::IncludeInDiagnosticCounts() const {
+ return Target.IncludeInDiagnosticCounts();
+}
+
+PartialDiagnostic::StorageAllocator::StorageAllocator() {
+ for (unsigned I = 0; I != NumCached; ++I)
+ FreeList[I] = Cached + I;
+ NumFreeListEntries = NumCached;
+}
+
+PartialDiagnostic::StorageAllocator::~StorageAllocator() {
+ // Don't assert if we are in a CrashRecovery context, as this invariant may
+ // be invalidated during a crash.
+ assert((NumFreeListEntries == NumCached ||
+ llvm::CrashRecoveryContext::isRecoveringFromCrash()) &&
+ "A partial is on the lamb");
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp b/contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp
new file mode 100644
index 0000000..a34c7fec
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp
@@ -0,0 +1,713 @@
+//===--- DiagnosticIDs.cpp - Diagnostic IDs Handling ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Diagnostic IDs-related interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/DiagnosticIDs.h"
+#include "clang/Basic/AllDiagnostics.h"
+#include "clang/Basic/DiagnosticCategories.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <map>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Builtin Diagnostic information
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+// Diagnostic classes.
+enum {
+ CLASS_NOTE = 0x01,
+ CLASS_REMARK = 0x02,
+ CLASS_WARNING = 0x03,
+ CLASS_EXTENSION = 0x04,
+ CLASS_ERROR = 0x05
+};
+
+struct StaticDiagInfoRec {
+ uint16_t DiagID;
+ unsigned DefaultSeverity : 3;
+ unsigned Class : 3;
+ unsigned SFINAE : 2;
+ unsigned WarnNoWerror : 1;
+ unsigned WarnShowInSystemHeader : 1;
+ unsigned Category : 5;
+
+ uint16_t OptionGroupIndex;
+
+ uint16_t DescriptionLen;
+ const char *DescriptionStr;
+
+ unsigned getOptionGroupIndex() const {
+ return OptionGroupIndex;
+ }
+
+ StringRef getDescription() const {
+ return StringRef(DescriptionStr, DescriptionLen);
+ }
+
+ diag::Flavor getFlavor() const {
+ return Class == CLASS_REMARK ? diag::Flavor::Remark
+ : diag::Flavor::WarningOrError;
+ }
+
+ bool operator<(const StaticDiagInfoRec &RHS) const {
+ return DiagID < RHS.DiagID;
+ }
+};
+
+} // namespace anonymous
+
+static const StaticDiagInfoRec StaticDiagInfo[] = {
+#define DIAG(ENUM, CLASS, DEFAULT_SEVERITY, DESC, GROUP, SFINAE, NOWERROR, \
+ SHOWINSYSHEADER, CATEGORY) \
+ { \
+ diag::ENUM, DEFAULT_SEVERITY, CLASS, DiagnosticIDs::SFINAE, NOWERROR, \
+ SHOWINSYSHEADER, CATEGORY, GROUP, STR_SIZE(DESC, uint16_t), DESC \
+ } \
+ ,
+#include "clang/Basic/DiagnosticCommonKinds.inc"
+#include "clang/Basic/DiagnosticDriverKinds.inc"
+#include "clang/Basic/DiagnosticFrontendKinds.inc"
+#include "clang/Basic/DiagnosticSerializationKinds.inc"
+#include "clang/Basic/DiagnosticLexKinds.inc"
+#include "clang/Basic/DiagnosticParseKinds.inc"
+#include "clang/Basic/DiagnosticASTKinds.inc"
+#include "clang/Basic/DiagnosticCommentKinds.inc"
+#include "clang/Basic/DiagnosticSemaKinds.inc"
+#include "clang/Basic/DiagnosticAnalysisKinds.inc"
+#undef DIAG
+};
+
+static const unsigned StaticDiagInfoSize = llvm::array_lengthof(StaticDiagInfo);
+
+/// GetDiagInfo - Return the StaticDiagInfoRec entry for the specified DiagID,
+/// or null if the ID is invalid.
+static const StaticDiagInfoRec *GetDiagInfo(unsigned DiagID) {
+ // If assertions are enabled, verify that the StaticDiagInfo array is sorted.
+#ifndef NDEBUG
+ static bool IsFirst = true; // So the check is only performed on first call.
+ if (IsFirst) {
+ assert(std::is_sorted(std::begin(StaticDiagInfo),
+ std::end(StaticDiagInfo)) &&
+ "Diag ID conflict, the enums at the start of clang::diag (in "
+ "DiagnosticIDs.h) probably need to be increased");
+ IsFirst = false;
+ }
+#endif
+
+ // Out of bounds diag. Can't be in the table.
+ using namespace diag;
+ if (DiagID >= DIAG_UPPER_LIMIT || DiagID <= DIAG_START_COMMON)
+ return nullptr;
+
+ // Compute the index of the requested diagnostic in the static table.
+ // 1. Add the number of diagnostics in each category preceding the
+ // diagnostic and of the category the diagnostic is in. This gives us
+ // the offset of the category in the table.
+ // 2. Subtract the number of IDs in each category from our ID. This gives us
+ // the offset of the diagnostic in the category.
+ // This is cheaper than a binary search on the table as it doesn't touch
+ // memory at all.
+ unsigned Offset = 0;
+ unsigned ID = DiagID - DIAG_START_COMMON - 1;
+#define CATEGORY(NAME, PREV) \
+ if (DiagID > DIAG_START_##NAME) { \
+ Offset += NUM_BUILTIN_##PREV##_DIAGNOSTICS - DIAG_START_##PREV - 1; \
+ ID -= DIAG_START_##NAME - DIAG_START_##PREV; \
+ }
+CATEGORY(DRIVER, COMMON)
+CATEGORY(FRONTEND, DRIVER)
+CATEGORY(SERIALIZATION, FRONTEND)
+CATEGORY(LEX, SERIALIZATION)
+CATEGORY(PARSE, LEX)
+CATEGORY(AST, PARSE)
+CATEGORY(COMMENT, AST)
+CATEGORY(SEMA, COMMENT)
+CATEGORY(ANALYSIS, SEMA)
+#undef CATEGORY
+
+ // Avoid out of bounds reads.
+ if (ID + Offset >= StaticDiagInfoSize)
+ return nullptr;
+
+ assert(ID < StaticDiagInfoSize && Offset < StaticDiagInfoSize);
+
+ const StaticDiagInfoRec *Found = &StaticDiagInfo[ID + Offset];
+ // If the diag id doesn't match we found a different diag, abort. This can
+ // happen when this function is called with an ID that points into a hole in
+ // the diagID space.
+ if (Found->DiagID != DiagID)
+ return nullptr;
+ return Found;
+}
+
+static DiagnosticMapping GetDefaultDiagMapping(unsigned DiagID) {
+ DiagnosticMapping Info = DiagnosticMapping::Make(
+ diag::Severity::Fatal, /*IsUser=*/false, /*IsPragma=*/false);
+
+ if (const StaticDiagInfoRec *StaticInfo = GetDiagInfo(DiagID)) {
+ Info.setSeverity((diag::Severity)StaticInfo->DefaultSeverity);
+
+ if (StaticInfo->WarnNoWerror) {
+ assert(Info.getSeverity() == diag::Severity::Warning &&
+ "Unexpected mapping with no-Werror bit!");
+ Info.setNoWarningAsError(true);
+ }
+ }
+
+ return Info;
+}
+
+/// getCategoryNumberForDiag - Return the category number that a specified
+/// DiagID belongs to, or 0 if no category.
+unsigned DiagnosticIDs::getCategoryNumberForDiag(unsigned DiagID) {
+ if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
+ return Info->Category;
+ return 0;
+}
+
+namespace {
+ // The diagnostic category names.
+ struct StaticDiagCategoryRec {
+ const char *NameStr;
+ uint8_t NameLen;
+
+ StringRef getName() const {
+ return StringRef(NameStr, NameLen);
+ }
+ };
+}
+
+// Unfortunately, the split between DiagnosticIDs and Diagnostic is not
+// particularly clean, but for now we just implement this method here so we can
+// access GetDefaultDiagMapping.
+DiagnosticMapping &
+DiagnosticsEngine::DiagState::getOrAddMapping(diag::kind Diag) {
+ std::pair<iterator, bool> Result =
+ DiagMap.insert(std::make_pair(Diag, DiagnosticMapping()));
+
+ // Initialize the entry if we added it.
+ if (Result.second)
+ Result.first->second = GetDefaultDiagMapping(Diag);
+
+ return Result.first->second;
+}
+
+static const StaticDiagCategoryRec CategoryNameTable[] = {
+#define GET_CATEGORY_TABLE
+#define CATEGORY(X, ENUM) { X, STR_SIZE(X, uint8_t) },
+#include "clang/Basic/DiagnosticGroups.inc"
+#undef GET_CATEGORY_TABLE
+ { nullptr, 0 }
+};
+
+/// getNumberOfCategories - Return the number of categories
+unsigned DiagnosticIDs::getNumberOfCategories() {
+ return llvm::array_lengthof(CategoryNameTable) - 1;
+}
+
+/// getCategoryNameFromID - Given a category ID, return the name of the
+/// category, an empty string if CategoryID is zero, or null if CategoryID is
+/// invalid.
+StringRef DiagnosticIDs::getCategoryNameFromID(unsigned CategoryID) {
+ if (CategoryID >= getNumberOfCategories())
+ return StringRef();
+ return CategoryNameTable[CategoryID].getName();
+}
+
+
+
+DiagnosticIDs::SFINAEResponse
+DiagnosticIDs::getDiagnosticSFINAEResponse(unsigned DiagID) {
+ if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
+ return static_cast<DiagnosticIDs::SFINAEResponse>(Info->SFINAE);
+ return SFINAE_Report;
+}
+
+/// getBuiltinDiagClass - Return the class field of the diagnostic.
+///
+static unsigned getBuiltinDiagClass(unsigned DiagID) {
+ if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
+ return Info->Class;
+ return ~0U;
+}
+
+//===----------------------------------------------------------------------===//
+// Custom Diagnostic information
+//===----------------------------------------------------------------------===//
+
+namespace clang {
+ namespace diag {
+ class CustomDiagInfo {
+ typedef std::pair<DiagnosticIDs::Level, std::string> DiagDesc;
+ std::vector<DiagDesc> DiagInfo;
+ std::map<DiagDesc, unsigned> DiagIDs;
+ public:
+
+ /// getDescription - Return the description of the specified custom
+ /// diagnostic.
+ StringRef getDescription(unsigned DiagID) const {
+ assert(DiagID - DIAG_UPPER_LIMIT < DiagInfo.size() &&
+ "Invalid diagnostic ID");
+ return DiagInfo[DiagID-DIAG_UPPER_LIMIT].second;
+ }
+
+ /// getLevel - Return the level of the specified custom diagnostic.
+ DiagnosticIDs::Level getLevel(unsigned DiagID) const {
+ assert(DiagID - DIAG_UPPER_LIMIT < DiagInfo.size() &&
+ "Invalid diagnostic ID");
+ return DiagInfo[DiagID-DIAG_UPPER_LIMIT].first;
+ }
+
+ unsigned getOrCreateDiagID(DiagnosticIDs::Level L, StringRef Message,
+ DiagnosticIDs &Diags) {
+ DiagDesc D(L, Message);
+ // Check to see if it already exists.
+ std::map<DiagDesc, unsigned>::iterator I = DiagIDs.lower_bound(D);
+ if (I != DiagIDs.end() && I->first == D)
+ return I->second;
+
+ // If not, assign a new ID.
+ unsigned ID = DiagInfo.size()+DIAG_UPPER_LIMIT;
+ DiagIDs.insert(std::make_pair(D, ID));
+ DiagInfo.push_back(D);
+ return ID;
+ }
+ };
+
+ } // end diag namespace
+} // end clang namespace
+
+
+//===----------------------------------------------------------------------===//
+// Common Diagnostic implementation
+//===----------------------------------------------------------------------===//
+
+DiagnosticIDs::DiagnosticIDs() { CustomDiagInfo = nullptr; }
+
+DiagnosticIDs::~DiagnosticIDs() {
+ delete CustomDiagInfo;
+}
+
+/// getCustomDiagID - Return an ID for a diagnostic with the specified message
+/// and level. If this is the first request for this diagnostic, it is
+/// registered and created, otherwise the existing ID is returned.
+///
+/// \param FormatString A fixed diagnostic format string that will be hashed and
+/// mapped to a unique DiagID.
+unsigned DiagnosticIDs::getCustomDiagID(Level L, StringRef FormatString) {
+ if (!CustomDiagInfo)
+ CustomDiagInfo = new diag::CustomDiagInfo();
+ return CustomDiagInfo->getOrCreateDiagID(L, FormatString, *this);
+}
+
+
+/// isBuiltinWarningOrExtension - Return true if the unmapped diagnostic
+/// level of the specified diagnostic ID is a Warning or Extension.
+/// This only works on builtin diagnostics, not custom ones, and is not legal to
+/// call on NOTEs.
+bool DiagnosticIDs::isBuiltinWarningOrExtension(unsigned DiagID) {
+ return DiagID < diag::DIAG_UPPER_LIMIT &&
+ getBuiltinDiagClass(DiagID) != CLASS_ERROR;
+}
+
+/// \brief Determine whether the given built-in diagnostic ID is a
+/// Note.
+bool DiagnosticIDs::isBuiltinNote(unsigned DiagID) {
+ return DiagID < diag::DIAG_UPPER_LIMIT &&
+ getBuiltinDiagClass(DiagID) == CLASS_NOTE;
+}
+
+/// isBuiltinExtensionDiag - Determine whether the given built-in diagnostic
+/// ID is for an extension of some sort. This also returns EnabledByDefault,
+/// which is set to indicate whether the diagnostic is ignored by default (in
+/// which case -pedantic enables it) or treated as a warning/error by default.
+///
+bool DiagnosticIDs::isBuiltinExtensionDiag(unsigned DiagID,
+ bool &EnabledByDefault) {
+ if (DiagID >= diag::DIAG_UPPER_LIMIT ||
+ getBuiltinDiagClass(DiagID) != CLASS_EXTENSION)
+ return false;
+
+ EnabledByDefault =
+ GetDefaultDiagMapping(DiagID).getSeverity() != diag::Severity::Ignored;
+ return true;
+}
+
+bool DiagnosticIDs::isDefaultMappingAsError(unsigned DiagID) {
+ if (DiagID >= diag::DIAG_UPPER_LIMIT)
+ return false;
+
+ return GetDefaultDiagMapping(DiagID).getSeverity() == diag::Severity::Error;
+}
+
+/// getDescription - Given a diagnostic ID, return a description of the
+/// issue.
+StringRef DiagnosticIDs::getDescription(unsigned DiagID) const {
+ if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
+ return Info->getDescription();
+ assert(CustomDiagInfo && "Invalid CustomDiagInfo");
+ return CustomDiagInfo->getDescription(DiagID);
+}
+
+static DiagnosticIDs::Level toLevel(diag::Severity SV) {
+ switch (SV) {
+ case diag::Severity::Ignored:
+ return DiagnosticIDs::Ignored;
+ case diag::Severity::Remark:
+ return DiagnosticIDs::Remark;
+ case diag::Severity::Warning:
+ return DiagnosticIDs::Warning;
+ case diag::Severity::Error:
+ return DiagnosticIDs::Error;
+ case diag::Severity::Fatal:
+ return DiagnosticIDs::Fatal;
+ }
+ llvm_unreachable("unexpected severity");
+}
+
+/// getDiagnosticLevel - Based on the way the client configured the
+/// DiagnosticsEngine object, classify the specified diagnostic ID into a Level,
+/// by consumable the DiagnosticClient.
+DiagnosticIDs::Level
+DiagnosticIDs::getDiagnosticLevel(unsigned DiagID, SourceLocation Loc,
+ const DiagnosticsEngine &Diag) const {
+ // Handle custom diagnostics, which cannot be mapped.
+ if (DiagID >= diag::DIAG_UPPER_LIMIT) {
+ assert(CustomDiagInfo && "Invalid CustomDiagInfo");
+ return CustomDiagInfo->getLevel(DiagID);
+ }
+
+ unsigned DiagClass = getBuiltinDiagClass(DiagID);
+ if (DiagClass == CLASS_NOTE) return DiagnosticIDs::Note;
+ return toLevel(getDiagnosticSeverity(DiagID, Loc, Diag));
+}
+
+/// \brief Based on the way the client configured the Diagnostic
+/// object, classify the specified diagnostic ID into a Level, consumable by
+/// the DiagnosticClient.
+///
+/// \param Loc The source location we are interested in finding out the
+/// diagnostic state. Can be null in order to query the latest state.
+diag::Severity
+DiagnosticIDs::getDiagnosticSeverity(unsigned DiagID, SourceLocation Loc,
+ const DiagnosticsEngine &Diag) const {
+ assert(getBuiltinDiagClass(DiagID) != CLASS_NOTE);
+
+ // Specific non-error diagnostics may be mapped to various levels from ignored
+ // to error. Errors can only be mapped to fatal.
+ diag::Severity Result = diag::Severity::Fatal;
+
+ DiagnosticsEngine::DiagStatePointsTy::iterator
+ Pos = Diag.GetDiagStatePointForLoc(Loc);
+ DiagnosticsEngine::DiagState *State = Pos->State;
+
+ // Get the mapping information, or compute it lazily.
+ DiagnosticMapping &Mapping = State->getOrAddMapping((diag::kind)DiagID);
+
+ // TODO: Can a null severity really get here?
+ if (Mapping.getSeverity() != diag::Severity())
+ Result = Mapping.getSeverity();
+
+ // Upgrade ignored diagnostics if -Weverything is enabled.
+ if (Diag.EnableAllWarnings && Result == diag::Severity::Ignored &&
+ !Mapping.isUser() && getBuiltinDiagClass(DiagID) != CLASS_REMARK)
+ Result = diag::Severity::Warning;
+
+ // Ignore -pedantic diagnostics inside __extension__ blocks.
+ // (The diagnostics controlled by -pedantic are the extension diagnostics
+ // that are not enabled by default.)
+ bool EnabledByDefault = false;
+ bool IsExtensionDiag = isBuiltinExtensionDiag(DiagID, EnabledByDefault);
+ if (Diag.AllExtensionsSilenced && IsExtensionDiag && !EnabledByDefault)
+ return diag::Severity::Ignored;
+
+ // For extension diagnostics that haven't been explicitly mapped, check if we
+ // should upgrade the diagnostic.
+ if (IsExtensionDiag && !Mapping.isUser())
+ Result = std::max(Result, Diag.ExtBehavior);
+
+ // At this point, ignored errors can no longer be upgraded.
+ if (Result == diag::Severity::Ignored)
+ return Result;
+
+ // Honor -w, which is lower in priority than pedantic-errors, but higher than
+ // -Werror.
+ if (Result == diag::Severity::Warning && Diag.IgnoreAllWarnings)
+ return diag::Severity::Ignored;
+
+ // If -Werror is enabled, map warnings to errors unless explicitly disabled.
+ if (Result == diag::Severity::Warning) {
+ if (Diag.WarningsAsErrors && !Mapping.hasNoWarningAsError())
+ Result = diag::Severity::Error;
+ }
+
+ // If -Wfatal-errors is enabled, map errors to fatal unless explicity
+ // disabled.
+ if (Result == diag::Severity::Error) {
+ if (Diag.ErrorsAsFatal && !Mapping.hasNoErrorAsFatal())
+ Result = diag::Severity::Fatal;
+ }
+
+ // Custom diagnostics always are emitted in system headers.
+ bool ShowInSystemHeader =
+ !GetDiagInfo(DiagID) || GetDiagInfo(DiagID)->WarnShowInSystemHeader;
+
+ // If we are in a system header, we ignore it. We look at the diagnostic class
+ // because we also want to ignore extensions and warnings in -Werror and
+ // -pedantic-errors modes, which *map* warnings/extensions to errors.
+ if (Diag.SuppressSystemWarnings && !ShowInSystemHeader && Loc.isValid() &&
+ Diag.getSourceManager().isInSystemHeader(
+ Diag.getSourceManager().getExpansionLoc(Loc)))
+ return diag::Severity::Ignored;
+
+ return Result;
+}
+
+#define GET_DIAG_ARRAYS
+#include "clang/Basic/DiagnosticGroups.inc"
+#undef GET_DIAG_ARRAYS
+
+namespace {
+ struct WarningOption {
+ uint16_t NameOffset;
+ uint16_t Members;
+ uint16_t SubGroups;
+
+ // String is stored with a pascal-style length byte.
+ StringRef getName() const {
+ return StringRef(DiagGroupNames + NameOffset + 1,
+ DiagGroupNames[NameOffset]);
+ }
+ };
+}
+
+// Second the table of options, sorted by name for fast binary lookup.
+static const WarningOption OptionTable[] = {
+#define GET_DIAG_TABLE
+#include "clang/Basic/DiagnosticGroups.inc"
+#undef GET_DIAG_TABLE
+};
+
+/// getWarningOptionForDiag - Return the lowest-level warning option that
+/// enables the specified diagnostic. If there is no -Wfoo flag that controls
+/// the diagnostic, this returns null.
+StringRef DiagnosticIDs::getWarningOptionForDiag(unsigned DiagID) {
+ if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
+ return OptionTable[Info->getOptionGroupIndex()].getName();
+ return StringRef();
+}
+
+/// Return \c true if any diagnostics were found in this group, even if they
+/// were filtered out due to having the wrong flavor.
+static bool getDiagnosticsInGroup(diag::Flavor Flavor,
+ const WarningOption *Group,
+ SmallVectorImpl<diag::kind> &Diags) {
+ // An empty group is considered to be a warning group: we have empty groups
+ // for GCC compatibility, and GCC does not have remarks.
+ if (!Group->Members && !Group->SubGroups)
+ return Flavor == diag::Flavor::Remark;
+
+ bool NotFound = true;
+
+ // Add the members of the option diagnostic set.
+ const int16_t *Member = DiagArrays + Group->Members;
+ for (; *Member != -1; ++Member) {
+ if (GetDiagInfo(*Member)->getFlavor() == Flavor) {
+ NotFound = false;
+ Diags.push_back(*Member);
+ }
+ }
+
+ // Add the members of the subgroups.
+ const int16_t *SubGroups = DiagSubGroups + Group->SubGroups;
+ for (; *SubGroups != (int16_t)-1; ++SubGroups)
+ NotFound &= getDiagnosticsInGroup(Flavor, &OptionTable[(short)*SubGroups],
+ Diags);
+
+ return NotFound;
+}
+
+bool
+DiagnosticIDs::getDiagnosticsInGroup(diag::Flavor Flavor, StringRef Group,
+ SmallVectorImpl<diag::kind> &Diags) const {
+ auto Found = std::lower_bound(std::begin(OptionTable), std::end(OptionTable),
+ Group,
+ [](const WarningOption &LHS, StringRef RHS) {
+ return LHS.getName() < RHS;
+ });
+ if (Found == std::end(OptionTable) || Found->getName() != Group)
+ return true; // Option not found.
+
+ return ::getDiagnosticsInGroup(Flavor, Found, Diags);
+}
+
+void DiagnosticIDs::getAllDiagnostics(diag::Flavor Flavor,
+ SmallVectorImpl<diag::kind> &Diags) const {
+ for (unsigned i = 0; i != StaticDiagInfoSize; ++i)
+ if (StaticDiagInfo[i].getFlavor() == Flavor)
+ Diags.push_back(StaticDiagInfo[i].DiagID);
+}
+
+StringRef DiagnosticIDs::getNearestOption(diag::Flavor Flavor,
+ StringRef Group) {
+ StringRef Best;
+ unsigned BestDistance = Group.size() + 1; // Sanity threshold.
+ for (const WarningOption &O : OptionTable) {
+ // Don't suggest ignored warning flags.
+ if (!O.Members && !O.SubGroups)
+ continue;
+
+ unsigned Distance = O.getName().edit_distance(Group, true, BestDistance);
+ if (Distance > BestDistance)
+ continue;
+
+ // Don't suggest groups that are not of this kind.
+ llvm::SmallVector<diag::kind, 8> Diags;
+ if (::getDiagnosticsInGroup(Flavor, &O, Diags) || Diags.empty())
+ continue;
+
+ if (Distance == BestDistance) {
+ // Two matches with the same distance, don't prefer one over the other.
+ Best = "";
+ } else if (Distance < BestDistance) {
+ // This is a better match.
+ Best = O.getName();
+ BestDistance = Distance;
+ }
+ }
+
+ return Best;
+}
+
+/// ProcessDiag - This is the method used to report a diagnostic that is
+/// finally fully formed.
+bool DiagnosticIDs::ProcessDiag(DiagnosticsEngine &Diag) const {
+ Diagnostic Info(&Diag);
+
+ assert(Diag.getClient() && "DiagnosticClient not set!");
+
+ // Figure out the diagnostic level of this message.
+ unsigned DiagID = Info.getID();
+ DiagnosticIDs::Level DiagLevel
+ = getDiagnosticLevel(DiagID, Info.getLocation(), Diag);
+
+ // Update counts for DiagnosticErrorTrap even if a fatal error occurred
+ // or diagnostics are suppressed.
+ if (DiagLevel >= DiagnosticIDs::Error) {
+ ++Diag.TrapNumErrorsOccurred;
+ if (isUnrecoverable(DiagID))
+ ++Diag.TrapNumUnrecoverableErrorsOccurred;
+ }
+
+ if (Diag.SuppressAllDiagnostics)
+ return false;
+
+ if (DiagLevel != DiagnosticIDs::Note) {
+ // Record that a fatal error occurred only when we see a second
+ // non-note diagnostic. This allows notes to be attached to the
+ // fatal error, but suppresses any diagnostics that follow those
+ // notes.
+ if (Diag.LastDiagLevel == DiagnosticIDs::Fatal)
+ Diag.FatalErrorOccurred = true;
+
+ Diag.LastDiagLevel = DiagLevel;
+ }
+
+ // If a fatal error has already been emitted, silence all subsequent
+ // diagnostics.
+ if (Diag.FatalErrorOccurred) {
+ if (DiagLevel >= DiagnosticIDs::Error &&
+ Diag.Client->IncludeInDiagnosticCounts()) {
+ ++Diag.NumErrors;
+ }
+
+ return false;
+ }
+
+ // If the client doesn't care about this message, don't issue it. If this is
+ // a note and the last real diagnostic was ignored, ignore it too.
+ if (DiagLevel == DiagnosticIDs::Ignored ||
+ (DiagLevel == DiagnosticIDs::Note &&
+ Diag.LastDiagLevel == DiagnosticIDs::Ignored))
+ return false;
+
+ if (DiagLevel >= DiagnosticIDs::Error) {
+ if (isUnrecoverable(DiagID))
+ Diag.UnrecoverableErrorOccurred = true;
+
+ // Warnings which have been upgraded to errors do not prevent compilation.
+ if (isDefaultMappingAsError(DiagID))
+ Diag.UncompilableErrorOccurred = true;
+
+ Diag.ErrorOccurred = true;
+ if (Diag.Client->IncludeInDiagnosticCounts()) {
+ ++Diag.NumErrors;
+ }
+
+ // If we've emitted a lot of errors, emit a fatal error instead of it to
+ // stop a flood of bogus errors.
+ if (Diag.ErrorLimit && Diag.NumErrors > Diag.ErrorLimit &&
+ DiagLevel == DiagnosticIDs::Error) {
+ Diag.SetDelayedDiagnostic(diag::fatal_too_many_errors);
+ return false;
+ }
+ }
+
+ // Finally, report it.
+ EmitDiag(Diag, DiagLevel);
+ return true;
+}
+
+void DiagnosticIDs::EmitDiag(DiagnosticsEngine &Diag, Level DiagLevel) const {
+ Diagnostic Info(&Diag);
+ assert(DiagLevel != DiagnosticIDs::Ignored && "Cannot emit ignored diagnostics!");
+
+ Diag.Client->HandleDiagnostic((DiagnosticsEngine::Level)DiagLevel, Info);
+ if (Diag.Client->IncludeInDiagnosticCounts()) {
+ if (DiagLevel == DiagnosticIDs::Warning)
+ ++Diag.NumWarnings;
+ }
+
+ Diag.CurDiagID = ~0U;
+}
+
+bool DiagnosticIDs::isUnrecoverable(unsigned DiagID) const {
+ if (DiagID >= diag::DIAG_UPPER_LIMIT) {
+ assert(CustomDiagInfo && "Invalid CustomDiagInfo");
+ // Custom diagnostics.
+ return CustomDiagInfo->getLevel(DiagID) >= DiagnosticIDs::Error;
+ }
+
+ // Only errors may be unrecoverable.
+ if (getBuiltinDiagClass(DiagID) < CLASS_ERROR)
+ return false;
+
+ if (DiagID == diag::err_unavailable ||
+ DiagID == diag::err_unavailable_message)
+ return false;
+
+ // Currently we consider all ARC errors as recoverable.
+ if (isARCDiagnostic(DiagID))
+ return false;
+
+ return true;
+}
+
+bool DiagnosticIDs::isARCDiagnostic(unsigned DiagID) {
+ unsigned cat = getCategoryNumberForDiag(DiagID);
+ return DiagnosticIDs::getCategoryNameFromID(cat).startswith("ARC ");
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/DiagnosticOptions.cpp b/contrib/llvm/tools/clang/lib/Basic/DiagnosticOptions.cpp
new file mode 100644
index 0000000..f54a0ef
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/DiagnosticOptions.cpp
@@ -0,0 +1,24 @@
+//===--- DiagnosticOptions.cpp - C Language Family Diagnostic Handling ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the DiagnosticOptions related interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/DiagnosticOptions.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace clang {
+
+raw_ostream& operator<<(raw_ostream& Out, DiagnosticLevelMask M) {
+ using UT = std::underlying_type<DiagnosticLevelMask>::type;
+ return Out << static_cast<UT>(M);
+}
+
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp b/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp
new file mode 100644
index 0000000..cb3f75c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp
@@ -0,0 +1,570 @@
+//===--- FileManager.cpp - File System Probing and Caching ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the FileManager interface.
+//
+//===----------------------------------------------------------------------===//
+//
+// TODO: This should index all interesting directories with dirent calls.
+// getdirentries ?
+// opendir/readdir_r/closedir ?
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/FileSystemStatCache.h"
+#include "clang/Frontend/PCHContainerOperations.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+#include <map>
+#include <set>
+#include <string>
+#include <system_error>
+
+using namespace clang;
+
+/// NON_EXISTENT_DIR - A special value distinct from null that is used to
+/// represent a dir name that doesn't exist on the disk.
+#define NON_EXISTENT_DIR reinterpret_cast<DirectoryEntry*>((intptr_t)-1)
+
+/// NON_EXISTENT_FILE - A special value distinct from null that is used to
+/// represent a filename that doesn't exist on the disk.
+#define NON_EXISTENT_FILE reinterpret_cast<FileEntry*>((intptr_t)-1)
+
+//===----------------------------------------------------------------------===//
+// Common logic.
+//===----------------------------------------------------------------------===//
+
+FileManager::FileManager(const FileSystemOptions &FSO,
+ IntrusiveRefCntPtr<vfs::FileSystem> FS)
+ : FS(FS), FileSystemOpts(FSO),
+ SeenDirEntries(64), SeenFileEntries(64), NextFileUID(0) {
+ NumDirLookups = NumFileLookups = 0;
+ NumDirCacheMisses = NumFileCacheMisses = 0;
+
+ // If the caller doesn't provide a virtual file system, just grab the real
+ // file system.
+ if (!FS)
+ this->FS = vfs::getRealFileSystem();
+}
+
+FileManager::~FileManager() = default;
+
+void FileManager::addStatCache(std::unique_ptr<FileSystemStatCache> statCache,
+ bool AtBeginning) {
+ assert(statCache && "No stat cache provided?");
+ if (AtBeginning || !StatCache.get()) {
+ statCache->setNextStatCache(std::move(StatCache));
+ StatCache = std::move(statCache);
+ return;
+ }
+
+ FileSystemStatCache *LastCache = StatCache.get();
+ while (LastCache->getNextStatCache())
+ LastCache = LastCache->getNextStatCache();
+
+ LastCache->setNextStatCache(std::move(statCache));
+}
+
+void FileManager::removeStatCache(FileSystemStatCache *statCache) {
+ if (!statCache)
+ return;
+
+ if (StatCache.get() == statCache) {
+ // This is the first stat cache.
+ StatCache = StatCache->takeNextStatCache();
+ return;
+ }
+
+ // Find the stat cache in the list.
+ FileSystemStatCache *PrevCache = StatCache.get();
+ while (PrevCache && PrevCache->getNextStatCache() != statCache)
+ PrevCache = PrevCache->getNextStatCache();
+
+ assert(PrevCache && "Stat cache not found for removal");
+ PrevCache->setNextStatCache(statCache->takeNextStatCache());
+}
+
+void FileManager::clearStatCaches() {
+ StatCache.reset();
+}
+
+/// \brief Retrieve the directory that the given file name resides in.
+/// Filename can point to either a real file or a virtual file.
+static const DirectoryEntry *getDirectoryFromFile(FileManager &FileMgr,
+ StringRef Filename,
+ bool CacheFailure) {
+ if (Filename.empty())
+ return nullptr;
+
+ if (llvm::sys::path::is_separator(Filename[Filename.size() - 1]))
+ return nullptr; // If Filename is a directory.
+
+ StringRef DirName = llvm::sys::path::parent_path(Filename);
+ // Use the current directory if file has no path component.
+ if (DirName.empty())
+ DirName = ".";
+
+ return FileMgr.getDirectory(DirName, CacheFailure);
+}
+
+/// Add all ancestors of the given path (pointing to either a file or
+/// a directory) as virtual directories.
+void FileManager::addAncestorsAsVirtualDirs(StringRef Path) {
+ StringRef DirName = llvm::sys::path::parent_path(Path);
+ if (DirName.empty())
+ return;
+
+ auto &NamedDirEnt =
+ *SeenDirEntries.insert(std::make_pair(DirName, nullptr)).first;
+
+ // When caching a virtual directory, we always cache its ancestors
+ // at the same time. Therefore, if DirName is already in the cache,
+ // we don't need to recurse as its ancestors must also already be in
+ // the cache.
+ if (NamedDirEnt.second && NamedDirEnt.second != NON_EXISTENT_DIR)
+ return;
+
+ // Add the virtual directory to the cache.
+ auto UDE = llvm::make_unique<DirectoryEntry>();
+ UDE->Name = NamedDirEnt.first().data();
+ NamedDirEnt.second = UDE.get();
+ VirtualDirectoryEntries.push_back(std::move(UDE));
+
+ // Recursively add the other ancestors.
+ addAncestorsAsVirtualDirs(DirName);
+}
+
+const DirectoryEntry *FileManager::getDirectory(StringRef DirName,
+ bool CacheFailure) {
+ // stat doesn't like trailing separators except for root directory.
+ // At least, on Win32 MSVCRT, stat() cannot strip trailing '/'.
+ // (though it can strip '\\')
+ if (DirName.size() > 1 &&
+ DirName != llvm::sys::path::root_path(DirName) &&
+ llvm::sys::path::is_separator(DirName.back()))
+ DirName = DirName.substr(0, DirName.size()-1);
+#ifdef LLVM_ON_WIN32
+ // Fixing a problem with "clang C:test.c" on Windows.
+ // Stat("C:") does not recognize "C:" as a valid directory
+ std::string DirNameStr;
+ if (DirName.size() > 1 && DirName.back() == ':' &&
+ DirName.equals_lower(llvm::sys::path::root_name(DirName))) {
+ DirNameStr = DirName.str() + '.';
+ DirName = DirNameStr;
+ }
+#endif
+
+ ++NumDirLookups;
+ auto &NamedDirEnt =
+ *SeenDirEntries.insert(std::make_pair(DirName, nullptr)).first;
+
+ // See if there was already an entry in the map. Note that the map
+ // contains both virtual and real directories.
+ if (NamedDirEnt.second)
+ return NamedDirEnt.second == NON_EXISTENT_DIR ? nullptr
+ : NamedDirEnt.second;
+
+ ++NumDirCacheMisses;
+
+ // By default, initialize it to invalid.
+ NamedDirEnt.second = NON_EXISTENT_DIR;
+
+ // Get the null-terminated directory name as stored as the key of the
+ // SeenDirEntries map.
+ const char *InterndDirName = NamedDirEnt.first().data();
+
+ // Check to see if the directory exists.
+ FileData Data;
+ if (getStatValue(InterndDirName, Data, false, nullptr /*directory lookup*/)) {
+ // There's no real directory at the given path.
+ if (!CacheFailure)
+ SeenDirEntries.erase(DirName);
+ return nullptr;
+ }
+
+ // It exists. See if we have already opened a directory with the
+ // same inode (this occurs on Unix-like systems when one dir is
+ // symlinked to another, for example) or the same path (on
+ // Windows).
+ DirectoryEntry &UDE = UniqueRealDirs[Data.UniqueID];
+
+ NamedDirEnt.second = &UDE;
+ if (!UDE.getName()) {
+ // We don't have this directory yet, add it. We use the string
+ // key from the SeenDirEntries map as the string.
+ UDE.Name = InterndDirName;
+ }
+
+ return &UDE;
+}
+
+const FileEntry *FileManager::getFile(StringRef Filename, bool openFile,
+ bool CacheFailure) {
+ ++NumFileLookups;
+
+ // See if there is already an entry in the map.
+ auto &NamedFileEnt =
+ *SeenFileEntries.insert(std::make_pair(Filename, nullptr)).first;
+
+ // See if there is already an entry in the map.
+ if (NamedFileEnt.second)
+ return NamedFileEnt.second == NON_EXISTENT_FILE ? nullptr
+ : NamedFileEnt.second;
+
+ ++NumFileCacheMisses;
+
+ // By default, initialize it to invalid.
+ NamedFileEnt.second = NON_EXISTENT_FILE;
+
+ // Get the null-terminated file name as stored as the key of the
+ // SeenFileEntries map.
+ const char *InterndFileName = NamedFileEnt.first().data();
+
+ // Look up the directory for the file. When looking up something like
+ // sys/foo.h we'll discover all of the search directories that have a 'sys'
+ // subdirectory. This will let us avoid having to waste time on known-to-fail
+ // searches when we go to find sys/bar.h, because all the search directories
+ // without a 'sys' subdir will get a cached failure result.
+ const DirectoryEntry *DirInfo = getDirectoryFromFile(*this, Filename,
+ CacheFailure);
+ if (DirInfo == nullptr) { // Directory doesn't exist, file can't exist.
+ if (!CacheFailure)
+ SeenFileEntries.erase(Filename);
+
+ return nullptr;
+ }
+
+ // FIXME: Use the directory info to prune this, before doing the stat syscall.
+ // FIXME: This will reduce the # syscalls.
+
+ // Nope, there isn't. Check to see if the file exists.
+ std::unique_ptr<vfs::File> F;
+ FileData Data;
+ if (getStatValue(InterndFileName, Data, true, openFile ? &F : nullptr)) {
+ // There's no real file at the given path.
+ if (!CacheFailure)
+ SeenFileEntries.erase(Filename);
+
+ return nullptr;
+ }
+
+ assert((openFile || !F) && "undesired open file");
+
+ // It exists. See if we have already opened a file with the same inode.
+ // This occurs when one dir is symlinked to another, for example.
+ FileEntry &UFE = UniqueRealFiles[Data.UniqueID];
+
+ NamedFileEnt.second = &UFE;
+
+ // If the name returned by getStatValue is different than Filename, re-intern
+ // the name.
+ if (Data.Name != Filename) {
+ auto &NamedFileEnt =
+ *SeenFileEntries.insert(std::make_pair(Data.Name, nullptr)).first;
+ if (!NamedFileEnt.second)
+ NamedFileEnt.second = &UFE;
+ else
+ assert(NamedFileEnt.second == &UFE &&
+ "filename from getStatValue() refers to wrong file");
+ InterndFileName = NamedFileEnt.first().data();
+ }
+
+ if (UFE.isValid()) { // Already have an entry with this inode, return it.
+
+ // FIXME: this hack ensures that if we look up a file by a virtual path in
+ // the VFS that the getDir() will have the virtual path, even if we found
+ // the file by a 'real' path first. This is required in order to find a
+ // module's structure when its headers/module map are mapped in the VFS.
+ // We should remove this as soon as we can properly support a file having
+ // multiple names.
+ if (DirInfo != UFE.Dir && Data.IsVFSMapped)
+ UFE.Dir = DirInfo;
+
+ // Always update the name to use the last name by which a file was accessed.
+ // FIXME: Neither this nor always using the first name is correct; we want
+ // to switch towards a design where we return a FileName object that
+ // encapsulates both the name by which the file was accessed and the
+ // corresponding FileEntry.
+ UFE.Name = InterndFileName;
+
+ return &UFE;
+ }
+
+ // Otherwise, we don't have this file yet, add it.
+ UFE.Name = InterndFileName;
+ UFE.Size = Data.Size;
+ UFE.ModTime = Data.ModTime;
+ UFE.Dir = DirInfo;
+ UFE.UID = NextFileUID++;
+ UFE.UniqueID = Data.UniqueID;
+ UFE.IsNamedPipe = Data.IsNamedPipe;
+ UFE.InPCH = Data.InPCH;
+ UFE.File = std::move(F);
+ UFE.IsValid = true;
+ return &UFE;
+}
+
+const FileEntry *
+FileManager::getVirtualFile(StringRef Filename, off_t Size,
+ time_t ModificationTime) {
+ ++NumFileLookups;
+
+ // See if there is already an entry in the map.
+ auto &NamedFileEnt =
+ *SeenFileEntries.insert(std::make_pair(Filename, nullptr)).first;
+
+ // See if there is already an entry in the map.
+ if (NamedFileEnt.second && NamedFileEnt.second != NON_EXISTENT_FILE)
+ return NamedFileEnt.second;
+
+ ++NumFileCacheMisses;
+
+ // By default, initialize it to invalid.
+ NamedFileEnt.second = NON_EXISTENT_FILE;
+
+ addAncestorsAsVirtualDirs(Filename);
+ FileEntry *UFE = nullptr;
+
+ // Now that all ancestors of Filename are in the cache, the
+ // following call is guaranteed to find the DirectoryEntry from the
+ // cache.
+ const DirectoryEntry *DirInfo = getDirectoryFromFile(*this, Filename,
+ /*CacheFailure=*/true);
+ assert(DirInfo &&
+ "The directory of a virtual file should already be in the cache.");
+
+ // Check to see if the file exists. If so, drop the virtual file
+ FileData Data;
+ const char *InterndFileName = NamedFileEnt.first().data();
+ if (getStatValue(InterndFileName, Data, true, nullptr) == 0) {
+ Data.Size = Size;
+ Data.ModTime = ModificationTime;
+ UFE = &UniqueRealFiles[Data.UniqueID];
+
+ NamedFileEnt.second = UFE;
+
+ // If we had already opened this file, close it now so we don't
+ // leak the descriptor. We're not going to use the file
+ // descriptor anyway, since this is a virtual file.
+ if (UFE->File)
+ UFE->closeFile();
+
+ // If we already have an entry with this inode, return it.
+ if (UFE->isValid())
+ return UFE;
+
+ UFE->UniqueID = Data.UniqueID;
+ UFE->IsNamedPipe = Data.IsNamedPipe;
+ UFE->InPCH = Data.InPCH;
+ }
+
+ if (!UFE) {
+ VirtualFileEntries.push_back(llvm::make_unique<FileEntry>());
+ UFE = VirtualFileEntries.back().get();
+ NamedFileEnt.second = UFE;
+ }
+
+ UFE->Name = InterndFileName;
+ UFE->Size = Size;
+ UFE->ModTime = ModificationTime;
+ UFE->Dir = DirInfo;
+ UFE->UID = NextFileUID++;
+ UFE->File.reset();
+ return UFE;
+}
+
+bool FileManager::FixupRelativePath(SmallVectorImpl<char> &path) const {
+ StringRef pathRef(path.data(), path.size());
+
+ if (FileSystemOpts.WorkingDir.empty()
+ || llvm::sys::path::is_absolute(pathRef))
+ return false;
+
+ SmallString<128> NewPath(FileSystemOpts.WorkingDir);
+ llvm::sys::path::append(NewPath, pathRef);
+ path = NewPath;
+ return true;
+}
+
+bool FileManager::makeAbsolutePath(SmallVectorImpl<char> &Path) const {
+ bool Changed = FixupRelativePath(Path);
+
+ if (!llvm::sys::path::is_absolute(StringRef(Path.data(), Path.size()))) {
+ llvm::sys::fs::make_absolute(Path);
+ Changed = true;
+ }
+
+ return Changed;
+}
+
+llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
+FileManager::getBufferForFile(const FileEntry *Entry, bool isVolatile,
+ bool ShouldCloseOpenFile) {
+ uint64_t FileSize = Entry->getSize();
+ // If there's a high enough chance that the file have changed since we
+ // got its size, force a stat before opening it.
+ if (isVolatile)
+ FileSize = -1;
+
+ const char *Filename = Entry->getName();
+ // If the file is already open, use the open file descriptor.
+ if (Entry->File) {
+ auto Result =
+ Entry->File->getBuffer(Filename, FileSize,
+ /*RequiresNullTerminator=*/true, isVolatile);
+ // FIXME: we need a set of APIs that can make guarantees about whether a
+ // FileEntry is open or not.
+ if (ShouldCloseOpenFile)
+ Entry->closeFile();
+ return Result;
+ }
+
+ // Otherwise, open the file.
+
+ if (FileSystemOpts.WorkingDir.empty())
+ return FS->getBufferForFile(Filename, FileSize,
+ /*RequiresNullTerminator=*/true, isVolatile);
+
+ SmallString<128> FilePath(Entry->getName());
+ FixupRelativePath(FilePath);
+ return FS->getBufferForFile(FilePath, FileSize,
+ /*RequiresNullTerminator=*/true, isVolatile);
+}
+
+llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
+FileManager::getBufferForFile(StringRef Filename) {
+ if (FileSystemOpts.WorkingDir.empty())
+ return FS->getBufferForFile(Filename);
+
+ SmallString<128> FilePath(Filename);
+ FixupRelativePath(FilePath);
+ return FS->getBufferForFile(FilePath.c_str());
+}
+
+/// getStatValue - Get the 'stat' information for the specified path,
+/// using the cache to accelerate it if possible. This returns true
+/// if the path points to a virtual file or does not exist, or returns
+/// false if it's an existent real file. If FileDescriptor is NULL,
+/// do directory look-up instead of file look-up.
+bool FileManager::getStatValue(const char *Path, FileData &Data, bool isFile,
+ std::unique_ptr<vfs::File> *F) {
+ // FIXME: FileSystemOpts shouldn't be passed in here, all paths should be
+ // absolute!
+ if (FileSystemOpts.WorkingDir.empty())
+ return FileSystemStatCache::get(Path, Data, isFile, F,StatCache.get(), *FS);
+
+ SmallString<128> FilePath(Path);
+ FixupRelativePath(FilePath);
+
+ return FileSystemStatCache::get(FilePath.c_str(), Data, isFile, F,
+ StatCache.get(), *FS);
+}
+
+bool FileManager::getNoncachedStatValue(StringRef Path,
+ vfs::Status &Result) {
+ SmallString<128> FilePath(Path);
+ FixupRelativePath(FilePath);
+
+ llvm::ErrorOr<vfs::Status> S = FS->status(FilePath.c_str());
+ if (!S)
+ return true;
+ Result = *S;
+ return false;
+}
+
+void FileManager::invalidateCache(const FileEntry *Entry) {
+ assert(Entry && "Cannot invalidate a NULL FileEntry");
+
+ SeenFileEntries.erase(Entry->getName());
+
+ // FileEntry invalidation should not block future optimizations in the file
+ // caches. Possible alternatives are cache truncation (invalidate last N) or
+ // invalidation of the whole cache.
+ UniqueRealFiles.erase(Entry->getUniqueID());
+}
+
+
+void FileManager::GetUniqueIDMapping(
+ SmallVectorImpl<const FileEntry *> &UIDToFiles) const {
+ UIDToFiles.clear();
+ UIDToFiles.resize(NextFileUID);
+
+ // Map file entries
+ for (llvm::StringMap<FileEntry*, llvm::BumpPtrAllocator>::const_iterator
+ FE = SeenFileEntries.begin(), FEEnd = SeenFileEntries.end();
+ FE != FEEnd; ++FE)
+ if (FE->getValue() && FE->getValue() != NON_EXISTENT_FILE)
+ UIDToFiles[FE->getValue()->getUID()] = FE->getValue();
+
+ // Map virtual file entries
+ for (const auto &VFE : VirtualFileEntries)
+ if (VFE && VFE.get() != NON_EXISTENT_FILE)
+ UIDToFiles[VFE->getUID()] = VFE.get();
+}
+
+void FileManager::modifyFileEntry(FileEntry *File,
+ off_t Size, time_t ModificationTime) {
+ File->Size = Size;
+ File->ModTime = ModificationTime;
+}
+
+StringRef FileManager::getCanonicalName(const DirectoryEntry *Dir) {
+ // FIXME: use llvm::sys::fs::canonical() when it gets implemented
+ llvm::DenseMap<const DirectoryEntry *, llvm::StringRef>::iterator Known
+ = CanonicalDirNames.find(Dir);
+ if (Known != CanonicalDirNames.end())
+ return Known->second;
+
+ StringRef CanonicalName(Dir->getName());
+
+#ifdef LLVM_ON_UNIX
+ char CanonicalNameBuf[PATH_MAX];
+ if (realpath(Dir->getName(), CanonicalNameBuf))
+ CanonicalName = StringRef(CanonicalNameBuf).copy(CanonicalNameStorage);
+#else
+ SmallString<256> CanonicalNameBuf(CanonicalName);
+ llvm::sys::fs::make_absolute(CanonicalNameBuf);
+ llvm::sys::path::native(CanonicalNameBuf);
+ // We've run into needing to remove '..' here in the wild though, so
+ // remove it.
+ // On Windows, symlinks are significantly less prevalent, so removing
+ // '..' is pretty safe.
+ // Ideally we'd have an equivalent of `realpath` and could implement
+ // sys::fs::canonical across all the platforms.
+ llvm::sys::path::remove_dots(CanonicalNameBuf, /* remove_dot_dot */ true);
+ CanonicalName = StringRef(CanonicalNameBuf).copy(CanonicalNameStorage);
+#endif
+
+ CanonicalDirNames.insert(std::make_pair(Dir, CanonicalName));
+ return CanonicalName;
+}
+
+void FileManager::PrintStats() const {
+ llvm::errs() << "\n*** File Manager Stats:\n";
+ llvm::errs() << UniqueRealFiles.size() << " real files found, "
+ << UniqueRealDirs.size() << " real dirs found.\n";
+ llvm::errs() << VirtualFileEntries.size() << " virtual files found, "
+ << VirtualDirectoryEntries.size() << " virtual dirs found.\n";
+ llvm::errs() << NumDirLookups << " dir lookups, "
+ << NumDirCacheMisses << " dir cache misses.\n";
+ llvm::errs() << NumFileLookups << " file lookups, "
+ << NumFileCacheMisses << " file cache misses.\n";
+
+ //llvm::errs() << PagesMapped << BytesOfPagesMapped << FSLookups;
+}
+
+// Virtual destructors for abstract base classes that need live in Basic.
+PCHContainerWriter::~PCHContainerWriter() {}
+PCHContainerReader::~PCHContainerReader() {}
diff --git a/contrib/llvm/tools/clang/lib/Basic/FileSystemStatCache.cpp b/contrib/llvm/tools/clang/lib/Basic/FileSystemStatCache.cpp
new file mode 100644
index 0000000..187ea37
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/FileSystemStatCache.cpp
@@ -0,0 +1,126 @@
+//===--- FileSystemStatCache.cpp - Caching for 'stat' calls ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the FileSystemStatCache interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/FileSystemStatCache.h"
+#include "clang/Basic/VirtualFileSystem.h"
+#include "llvm/Support/Path.h"
+
+using namespace clang;
+
+void FileSystemStatCache::anchor() { }
+
+static void copyStatusToFileData(const vfs::Status &Status,
+ FileData &Data) {
+ Data.Name = Status.getName();
+ Data.Size = Status.getSize();
+ Data.ModTime = Status.getLastModificationTime().toEpochTime();
+ Data.UniqueID = Status.getUniqueID();
+ Data.IsDirectory = Status.isDirectory();
+ Data.IsNamedPipe = Status.getType() == llvm::sys::fs::file_type::fifo_file;
+ Data.InPCH = false;
+ Data.IsVFSMapped = Status.IsVFSMapped;
+}
+
+/// FileSystemStatCache::get - Get the 'stat' information for the specified
+/// path, using the cache to accelerate it if possible. This returns true if
+/// the path does not exist or false if it exists.
+///
+/// If isFile is true, then this lookup should only return success for files
+/// (not directories). If it is false this lookup should only return
+/// success for directories (not files). On a successful file lookup, the
+/// implementation can optionally fill in FileDescriptor with a valid
+/// descriptor and the client guarantees that it will close it.
+bool FileSystemStatCache::get(const char *Path, FileData &Data, bool isFile,
+ std::unique_ptr<vfs::File> *F,
+ FileSystemStatCache *Cache, vfs::FileSystem &FS) {
+ LookupResult R;
+ bool isForDir = !isFile;
+
+ // If we have a cache, use it to resolve the stat query.
+ if (Cache)
+ R = Cache->getStat(Path, Data, isFile, F, FS);
+ else if (isForDir || !F) {
+ // If this is a directory or a file descriptor is not needed and we have
+ // no cache, just go to the file system.
+ llvm::ErrorOr<vfs::Status> Status = FS.status(Path);
+ if (!Status) {
+ R = CacheMissing;
+ } else {
+ R = CacheExists;
+ copyStatusToFileData(*Status, Data);
+ }
+ } else {
+ // Otherwise, we have to go to the filesystem. We can always just use
+ // 'stat' here, but (for files) the client is asking whether the file exists
+ // because it wants to turn around and *open* it. It is more efficient to
+ // do "open+fstat" on success than it is to do "stat+open".
+ //
+ // Because of this, check to see if the file exists with 'open'. If the
+ // open succeeds, use fstat to get the stat info.
+ auto OwnedFile = FS.openFileForRead(Path);
+
+ if (!OwnedFile) {
+ // If the open fails, our "stat" fails.
+ R = CacheMissing;
+ } else {
+ // Otherwise, the open succeeded. Do an fstat to get the information
+ // about the file. We'll end up returning the open file descriptor to the
+ // client to do what they please with it.
+ llvm::ErrorOr<vfs::Status> Status = (*OwnedFile)->status();
+ if (Status) {
+ R = CacheExists;
+ copyStatusToFileData(*Status, Data);
+ *F = std::move(*OwnedFile);
+ } else {
+ // fstat rarely fails. If it does, claim the initial open didn't
+ // succeed.
+ R = CacheMissing;
+ *F = nullptr;
+ }
+ }
+ }
+
+ // If the path doesn't exist, return failure.
+ if (R == CacheMissing) return true;
+
+ // If the path exists, make sure that its "directoryness" matches the clients
+ // demands.
+ if (Data.IsDirectory != isForDir) {
+ // If not, close the file if opened.
+ if (F)
+ *F = nullptr;
+
+ return true;
+ }
+
+ return false;
+}
+
+MemorizeStatCalls::LookupResult
+MemorizeStatCalls::getStat(const char *Path, FileData &Data, bool isFile,
+ std::unique_ptr<vfs::File> *F, vfs::FileSystem &FS) {
+ LookupResult Result = statChained(Path, Data, isFile, F, FS);
+
+ // Do not cache failed stats, it is easy to construct common inconsistent
+ // situations if we do, and they are not important for PCH performance (which
+ // currently only needs the stats to construct the initial FileManager
+ // entries).
+ if (Result == CacheMissing)
+ return Result;
+
+ // Cache file 'stat' results and directories with absolutely paths.
+ if (!Data.IsDirectory || llvm::sys::path::is_absolute(Path))
+ StatCalls[Path] = Data;
+
+ return Result;
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp b/contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp
new file mode 100644
index 0000000..67de1cb
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp
@@ -0,0 +1,663 @@
+//===--- IdentifierTable.cpp - Hash table for identifier lookup -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the IdentifierInfo, IdentifierVisitor, and
+// IdentifierTable interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/OperatorKinds.h"
+#include "clang/Basic/Specifiers.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstdio>
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// IdentifierInfo Implementation
+//===----------------------------------------------------------------------===//
+
+IdentifierInfo::IdentifierInfo() {
+ TokenID = tok::identifier;
+ ObjCOrBuiltinID = 0;
+ HasMacro = false;
+ HadMacro = false;
+ IsExtension = false;
+ IsFutureCompatKeyword = false;
+ IsPoisoned = false;
+ IsCPPOperatorKeyword = false;
+ NeedsHandleIdentifier = false;
+ IsFromAST = false;
+ ChangedAfterLoad = false;
+ RevertedTokenID = false;
+ OutOfDate = false;
+ IsModulesImport = false;
+ FETokenInfo = nullptr;
+ Entry = nullptr;
+}
+
+//===----------------------------------------------------------------------===//
+// IdentifierTable Implementation
+//===----------------------------------------------------------------------===//
+
+IdentifierIterator::~IdentifierIterator() { }
+
+IdentifierInfoLookup::~IdentifierInfoLookup() {}
+
+namespace {
+ /// \brief A simple identifier lookup iterator that represents an
+ /// empty sequence of identifiers.
+ class EmptyLookupIterator : public IdentifierIterator
+ {
+ public:
+ StringRef Next() override { return StringRef(); }
+ };
+}
+
+IdentifierIterator *IdentifierInfoLookup::getIdentifiers() {
+ return new EmptyLookupIterator();
+}
+
+IdentifierTable::IdentifierTable(const LangOptions &LangOpts,
+ IdentifierInfoLookup* externalLookup)
+ : HashTable(8192), // Start with space for 8K identifiers.
+ ExternalLookup(externalLookup) {
+
+ // Populate the identifier table with info about keywords for the current
+ // language.
+ AddKeywords(LangOpts);
+
+
+ // Add the '_experimental_modules_import' contextual keyword.
+ get("import").setModulesImport(true);
+}
+
+//===----------------------------------------------------------------------===//
+// Language Keyword Implementation
+//===----------------------------------------------------------------------===//
+
+// Constants for TokenKinds.def
+namespace {
+ enum {
+ KEYC99 = 0x1,
+ KEYCXX = 0x2,
+ KEYCXX11 = 0x4,
+ KEYGNU = 0x8,
+ KEYMS = 0x10,
+ BOOLSUPPORT = 0x20,
+ KEYALTIVEC = 0x40,
+ KEYNOCXX = 0x80,
+ KEYBORLAND = 0x100,
+ KEYOPENCL = 0x200,
+ KEYC11 = 0x400,
+ KEYARC = 0x800,
+ KEYNOMS18 = 0x01000,
+ KEYNOOPENCL = 0x02000,
+ WCHARSUPPORT = 0x04000,
+ HALFSUPPORT = 0x08000,
+ KEYCONCEPTS = 0x10000,
+ KEYOBJC2 = 0x20000,
+ KEYZVECTOR = 0x40000,
+ KEYCOROUTINES = 0x80000,
+ KEYALL = (0xfffff & ~KEYNOMS18 &
+ ~KEYNOOPENCL) // KEYNOMS18 and KEYNOOPENCL are used to exclude.
+ };
+
+ /// \brief How a keyword is treated in the selected standard.
+ enum KeywordStatus {
+ KS_Disabled, // Disabled
+ KS_Extension, // Is an extension
+ KS_Enabled, // Enabled
+ KS_Future // Is a keyword in future standard
+ };
+}
+
+/// \brief Translates flags as specified in TokenKinds.def into keyword status
+/// in the given language standard.
+static KeywordStatus getKeywordStatus(const LangOptions &LangOpts,
+ unsigned Flags) {
+ if (Flags == KEYALL) return KS_Enabled;
+ if (LangOpts.CPlusPlus && (Flags & KEYCXX)) return KS_Enabled;
+ if (LangOpts.CPlusPlus11 && (Flags & KEYCXX11)) return KS_Enabled;
+ if (LangOpts.C99 && (Flags & KEYC99)) return KS_Enabled;
+ if (LangOpts.GNUKeywords && (Flags & KEYGNU)) return KS_Extension;
+ if (LangOpts.MicrosoftExt && (Flags & KEYMS)) return KS_Extension;
+ if (LangOpts.Borland && (Flags & KEYBORLAND)) return KS_Extension;
+ if (LangOpts.Bool && (Flags & BOOLSUPPORT)) return KS_Enabled;
+ if (LangOpts.Half && (Flags & HALFSUPPORT)) return KS_Enabled;
+ if (LangOpts.WChar && (Flags & WCHARSUPPORT)) return KS_Enabled;
+ if (LangOpts.AltiVec && (Flags & KEYALTIVEC)) return KS_Enabled;
+ if (LangOpts.OpenCL && (Flags & KEYOPENCL)) return KS_Enabled;
+ if (!LangOpts.CPlusPlus && (Flags & KEYNOCXX)) return KS_Enabled;
+ if (LangOpts.C11 && (Flags & KEYC11)) return KS_Enabled;
+ // We treat bridge casts as objective-C keywords so we can warn on them
+ // in non-arc mode.
+ if (LangOpts.ObjC2 && (Flags & KEYARC)) return KS_Enabled;
+ if (LangOpts.ConceptsTS && (Flags & KEYCONCEPTS)) return KS_Enabled;
+ if (LangOpts.ObjC2 && (Flags & KEYOBJC2)) return KS_Enabled;
+ if (LangOpts.Coroutines && (Flags & KEYCOROUTINES)) return KS_Enabled;
+ if (LangOpts.CPlusPlus && (Flags & KEYCXX11)) return KS_Future;
+ return KS_Disabled;
+}
+
+/// AddKeyword - This method is used to associate a token ID with specific
+/// identifiers because they are language keywords. This causes the lexer to
+/// automatically map matching identifiers to specialized token codes.
+static void AddKeyword(StringRef Keyword,
+ tok::TokenKind TokenCode, unsigned Flags,
+ const LangOptions &LangOpts, IdentifierTable &Table) {
+ KeywordStatus AddResult = getKeywordStatus(LangOpts, Flags);
+
+ // Don't add this keyword under MSVCCompat.
+ if (LangOpts.MSVCCompat && (Flags & KEYNOMS18) &&
+ !LangOpts.isCompatibleWithMSVC(LangOptions::MSVC2015))
+ return;
+
+ // Don't add this keyword under OpenCL.
+ if (LangOpts.OpenCL && (Flags & KEYNOOPENCL))
+ return;
+
+ // Don't add this keyword if disabled in this language.
+ if (AddResult == KS_Disabled) return;
+
+ IdentifierInfo &Info =
+ Table.get(Keyword, AddResult == KS_Future ? tok::identifier : TokenCode);
+ Info.setIsExtensionToken(AddResult == KS_Extension);
+ Info.setIsFutureCompatKeyword(AddResult == KS_Future);
+}
+
+/// AddCXXOperatorKeyword - Register a C++ operator keyword alternative
+/// representations.
+static void AddCXXOperatorKeyword(StringRef Keyword,
+ tok::TokenKind TokenCode,
+ IdentifierTable &Table) {
+ IdentifierInfo &Info = Table.get(Keyword, TokenCode);
+ Info.setIsCPlusPlusOperatorKeyword();
+}
+
+/// AddObjCKeyword - Register an Objective-C \@keyword like "class" "selector"
+/// or "property".
+static void AddObjCKeyword(StringRef Name,
+ tok::ObjCKeywordKind ObjCID,
+ IdentifierTable &Table) {
+ Table.get(Name).setObjCKeywordID(ObjCID);
+}
+
+/// AddKeywords - Add all keywords to the symbol table.
+///
+void IdentifierTable::AddKeywords(const LangOptions &LangOpts) {
+ // Add keywords and tokens for the current language.
+#define KEYWORD(NAME, FLAGS) \
+ AddKeyword(StringRef(#NAME), tok::kw_ ## NAME, \
+ FLAGS, LangOpts, *this);
+#define ALIAS(NAME, TOK, FLAGS) \
+ AddKeyword(StringRef(NAME), tok::kw_ ## TOK, \
+ FLAGS, LangOpts, *this);
+#define CXX_KEYWORD_OPERATOR(NAME, ALIAS) \
+ if (LangOpts.CXXOperatorNames) \
+ AddCXXOperatorKeyword(StringRef(#NAME), tok::ALIAS, *this);
+#define OBJC1_AT_KEYWORD(NAME) \
+ if (LangOpts.ObjC1) \
+ AddObjCKeyword(StringRef(#NAME), tok::objc_##NAME, *this);
+#define OBJC2_AT_KEYWORD(NAME) \
+ if (LangOpts.ObjC2) \
+ AddObjCKeyword(StringRef(#NAME), tok::objc_##NAME, *this);
+#define TESTING_KEYWORD(NAME, FLAGS)
+#include "clang/Basic/TokenKinds.def"
+
+ if (LangOpts.ParseUnknownAnytype)
+ AddKeyword("__unknown_anytype", tok::kw___unknown_anytype, KEYALL,
+ LangOpts, *this);
+
+ if (LangOpts.DeclSpecKeyword)
+ AddKeyword("__declspec", tok::kw___declspec, KEYALL, LangOpts, *this);
+}
+
+/// \brief Checks if the specified token kind represents a keyword in the
+/// specified language.
+/// \returns Status of the keyword in the language.
+static KeywordStatus getTokenKwStatus(const LangOptions &LangOpts,
+ tok::TokenKind K) {
+ switch (K) {
+#define KEYWORD(NAME, FLAGS) \
+ case tok::kw_##NAME: return getKeywordStatus(LangOpts, FLAGS);
+#include "clang/Basic/TokenKinds.def"
+ default: return KS_Disabled;
+ }
+}
+
+/// \brief Returns true if the identifier represents a keyword in the
+/// specified language.
+bool IdentifierInfo::isKeyword(const LangOptions &LangOpts) {
+ switch (getTokenKwStatus(LangOpts, getTokenID())) {
+ case KS_Enabled:
+ case KS_Extension:
+ return true;
+ default:
+ return false;
+ }
+}
+
+tok::PPKeywordKind IdentifierInfo::getPPKeywordID() const {
+ // We use a perfect hash function here involving the length of the keyword,
+ // the first and third character. For preprocessor ID's there are no
+ // collisions (if there were, the switch below would complain about duplicate
+ // case values). Note that this depends on 'if' being null terminated.
+
+#define HASH(LEN, FIRST, THIRD) \
+ (LEN << 5) + (((FIRST-'a') + (THIRD-'a')) & 31)
+#define CASE(LEN, FIRST, THIRD, NAME) \
+ case HASH(LEN, FIRST, THIRD): \
+ return memcmp(Name, #NAME, LEN) ? tok::pp_not_keyword : tok::pp_ ## NAME
+
+ unsigned Len = getLength();
+ if (Len < 2) return tok::pp_not_keyword;
+ const char *Name = getNameStart();
+ switch (HASH(Len, Name[0], Name[2])) {
+ default: return tok::pp_not_keyword;
+ CASE( 2, 'i', '\0', if);
+ CASE( 4, 'e', 'i', elif);
+ CASE( 4, 'e', 's', else);
+ CASE( 4, 'l', 'n', line);
+ CASE( 4, 's', 'c', sccs);
+ CASE( 5, 'e', 'd', endif);
+ CASE( 5, 'e', 'r', error);
+ CASE( 5, 'i', 'e', ident);
+ CASE( 5, 'i', 'd', ifdef);
+ CASE( 5, 'u', 'd', undef);
+
+ CASE( 6, 'a', 's', assert);
+ CASE( 6, 'd', 'f', define);
+ CASE( 6, 'i', 'n', ifndef);
+ CASE( 6, 'i', 'p', import);
+ CASE( 6, 'p', 'a', pragma);
+
+ CASE( 7, 'd', 'f', defined);
+ CASE( 7, 'i', 'c', include);
+ CASE( 7, 'w', 'r', warning);
+
+ CASE( 8, 'u', 'a', unassert);
+ CASE(12, 'i', 'c', include_next);
+
+ CASE(14, '_', 'p', __public_macro);
+
+ CASE(15, '_', 'p', __private_macro);
+
+ CASE(16, '_', 'i', __include_macros);
+#undef CASE
+#undef HASH
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Stats Implementation
+//===----------------------------------------------------------------------===//
+
+/// PrintStats - Print statistics about how well the identifier table is doing
+/// at hashing identifiers.
+void IdentifierTable::PrintStats() const {
+ unsigned NumBuckets = HashTable.getNumBuckets();
+ unsigned NumIdentifiers = HashTable.getNumItems();
+ unsigned NumEmptyBuckets = NumBuckets-NumIdentifiers;
+ unsigned AverageIdentifierSize = 0;
+ unsigned MaxIdentifierLength = 0;
+
+ // TODO: Figure out maximum times an identifier had to probe for -stats.
+ for (llvm::StringMap<IdentifierInfo*, llvm::BumpPtrAllocator>::const_iterator
+ I = HashTable.begin(), E = HashTable.end(); I != E; ++I) {
+ unsigned IdLen = I->getKeyLength();
+ AverageIdentifierSize += IdLen;
+ if (MaxIdentifierLength < IdLen)
+ MaxIdentifierLength = IdLen;
+ }
+
+ fprintf(stderr, "\n*** Identifier Table Stats:\n");
+ fprintf(stderr, "# Identifiers: %d\n", NumIdentifiers);
+ fprintf(stderr, "# Empty Buckets: %d\n", NumEmptyBuckets);
+ fprintf(stderr, "Hash density (#identifiers per bucket): %f\n",
+ NumIdentifiers/(double)NumBuckets);
+ fprintf(stderr, "Ave identifier length: %f\n",
+ (AverageIdentifierSize/(double)NumIdentifiers));
+ fprintf(stderr, "Max identifier length: %d\n", MaxIdentifierLength);
+
+ // Compute statistics about the memory allocated for identifiers.
+ HashTable.getAllocator().PrintStats();
+}
+
+//===----------------------------------------------------------------------===//
+// SelectorTable Implementation
+//===----------------------------------------------------------------------===//
+
+unsigned llvm::DenseMapInfo<clang::Selector>::getHashValue(clang::Selector S) {
+ return DenseMapInfo<void*>::getHashValue(S.getAsOpaquePtr());
+}
+
+namespace clang {
+/// MultiKeywordSelector - One of these variable length records is kept for each
+/// selector containing more than one keyword. We use a folding set
+/// to unique aggregate names (keyword selectors in ObjC parlance). Access to
+/// this class is provided strictly through Selector.
+class MultiKeywordSelector
+ : public DeclarationNameExtra, public llvm::FoldingSetNode {
+ MultiKeywordSelector(unsigned nKeys) {
+ ExtraKindOrNumArgs = NUM_EXTRA_KINDS + nKeys;
+ }
+public:
+ // Constructor for keyword selectors.
+ MultiKeywordSelector(unsigned nKeys, IdentifierInfo **IIV) {
+ assert((nKeys > 1) && "not a multi-keyword selector");
+ ExtraKindOrNumArgs = NUM_EXTRA_KINDS + nKeys;
+
+ // Fill in the trailing keyword array.
+ IdentifierInfo **KeyInfo = reinterpret_cast<IdentifierInfo **>(this+1);
+ for (unsigned i = 0; i != nKeys; ++i)
+ KeyInfo[i] = IIV[i];
+ }
+
+ // getName - Derive the full selector name and return it.
+ std::string getName() const;
+
+ unsigned getNumArgs() const { return ExtraKindOrNumArgs - NUM_EXTRA_KINDS; }
+
+ typedef IdentifierInfo *const *keyword_iterator;
+ keyword_iterator keyword_begin() const {
+ return reinterpret_cast<keyword_iterator>(this+1);
+ }
+ keyword_iterator keyword_end() const {
+ return keyword_begin()+getNumArgs();
+ }
+ IdentifierInfo *getIdentifierInfoForSlot(unsigned i) const {
+ assert(i < getNumArgs() && "getIdentifierInfoForSlot(): illegal index");
+ return keyword_begin()[i];
+ }
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ keyword_iterator ArgTys, unsigned NumArgs) {
+ ID.AddInteger(NumArgs);
+ for (unsigned i = 0; i != NumArgs; ++i)
+ ID.AddPointer(ArgTys[i]);
+ }
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, keyword_begin(), getNumArgs());
+ }
+};
+} // end namespace clang.
+
+unsigned Selector::getNumArgs() const {
+ unsigned IIF = getIdentifierInfoFlag();
+ if (IIF <= ZeroArg)
+ return 0;
+ if (IIF == OneArg)
+ return 1;
+ // We point to a MultiKeywordSelector.
+ MultiKeywordSelector *SI = getMultiKeywordSelector();
+ return SI->getNumArgs();
+}
+
+IdentifierInfo *Selector::getIdentifierInfoForSlot(unsigned argIndex) const {
+ if (getIdentifierInfoFlag() < MultiArg) {
+ assert(argIndex == 0 && "illegal keyword index");
+ return getAsIdentifierInfo();
+ }
+ // We point to a MultiKeywordSelector.
+ MultiKeywordSelector *SI = getMultiKeywordSelector();
+ return SI->getIdentifierInfoForSlot(argIndex);
+}
+
+StringRef Selector::getNameForSlot(unsigned int argIndex) const {
+ IdentifierInfo *II = getIdentifierInfoForSlot(argIndex);
+ return II? II->getName() : StringRef();
+}
+
+std::string MultiKeywordSelector::getName() const {
+ SmallString<256> Str;
+ llvm::raw_svector_ostream OS(Str);
+ for (keyword_iterator I = keyword_begin(), E = keyword_end(); I != E; ++I) {
+ if (*I)
+ OS << (*I)->getName();
+ OS << ':';
+ }
+
+ return OS.str();
+}
+
+std::string Selector::getAsString() const {
+ if (InfoPtr == 0)
+ return "<null selector>";
+
+ if (getIdentifierInfoFlag() < MultiArg) {
+ IdentifierInfo *II = getAsIdentifierInfo();
+
+ // If the number of arguments is 0 then II is guaranteed to not be null.
+ if (getNumArgs() == 0)
+ return II->getName();
+
+ if (!II)
+ return ":";
+
+ return II->getName().str() + ":";
+ }
+
+ // We have a multiple keyword selector.
+ return getMultiKeywordSelector()->getName();
+}
+
+void Selector::print(llvm::raw_ostream &OS) const {
+ OS << getAsString();
+}
+
+/// Interpreting the given string using the normal CamelCase
+/// conventions, determine whether the given string starts with the
+/// given "word", which is assumed to end in a lowercase letter.
+static bool startsWithWord(StringRef name, StringRef word) {
+ if (name.size() < word.size()) return false;
+ return ((name.size() == word.size() || !isLowercase(name[word.size()])) &&
+ name.startswith(word));
+}
+
+ObjCMethodFamily Selector::getMethodFamilyImpl(Selector sel) {
+ IdentifierInfo *first = sel.getIdentifierInfoForSlot(0);
+ if (!first) return OMF_None;
+
+ StringRef name = first->getName();
+ if (sel.isUnarySelector()) {
+ if (name == "autorelease") return OMF_autorelease;
+ if (name == "dealloc") return OMF_dealloc;
+ if (name == "finalize") return OMF_finalize;
+ if (name == "release") return OMF_release;
+ if (name == "retain") return OMF_retain;
+ if (name == "retainCount") return OMF_retainCount;
+ if (name == "self") return OMF_self;
+ if (name == "initialize") return OMF_initialize;
+ }
+
+ if (name == "performSelector") return OMF_performSelector;
+
+ // The other method families may begin with a prefix of underscores.
+ while (!name.empty() && name.front() == '_')
+ name = name.substr(1);
+
+ if (name.empty()) return OMF_None;
+ switch (name.front()) {
+ case 'a':
+ if (startsWithWord(name, "alloc")) return OMF_alloc;
+ break;
+ case 'c':
+ if (startsWithWord(name, "copy")) return OMF_copy;
+ break;
+ case 'i':
+ if (startsWithWord(name, "init")) return OMF_init;
+ break;
+ case 'm':
+ if (startsWithWord(name, "mutableCopy")) return OMF_mutableCopy;
+ break;
+ case 'n':
+ if (startsWithWord(name, "new")) return OMF_new;
+ break;
+ default:
+ break;
+ }
+
+ return OMF_None;
+}
+
+ObjCInstanceTypeFamily Selector::getInstTypeMethodFamily(Selector sel) {
+ IdentifierInfo *first = sel.getIdentifierInfoForSlot(0);
+ if (!first) return OIT_None;
+
+ StringRef name = first->getName();
+
+ if (name.empty()) return OIT_None;
+ switch (name.front()) {
+ case 'a':
+ if (startsWithWord(name, "array")) return OIT_Array;
+ break;
+ case 'd':
+ if (startsWithWord(name, "default")) return OIT_ReturnsSelf;
+ if (startsWithWord(name, "dictionary")) return OIT_Dictionary;
+ break;
+ case 's':
+ if (startsWithWord(name, "shared")) return OIT_ReturnsSelf;
+ if (startsWithWord(name, "standard")) return OIT_Singleton;
+ case 'i':
+ if (startsWithWord(name, "init")) return OIT_Init;
+ default:
+ break;
+ }
+ return OIT_None;
+}
+
+ObjCStringFormatFamily Selector::getStringFormatFamilyImpl(Selector sel) {
+ IdentifierInfo *first = sel.getIdentifierInfoForSlot(0);
+ if (!first) return SFF_None;
+
+ StringRef name = first->getName();
+
+ switch (name.front()) {
+ case 'a':
+ if (name == "appendFormat") return SFF_NSString;
+ break;
+
+ case 'i':
+ if (name == "initWithFormat") return SFF_NSString;
+ break;
+
+ case 'l':
+ if (name == "localizedStringWithFormat") return SFF_NSString;
+ break;
+
+ case 's':
+ if (name == "stringByAppendingFormat" ||
+ name == "stringWithFormat") return SFF_NSString;
+ break;
+ }
+ return SFF_None;
+}
+
+namespace {
+ struct SelectorTableImpl {
+ llvm::FoldingSet<MultiKeywordSelector> Table;
+ llvm::BumpPtrAllocator Allocator;
+ };
+} // end anonymous namespace.
+
+static SelectorTableImpl &getSelectorTableImpl(void *P) {
+ return *static_cast<SelectorTableImpl*>(P);
+}
+
+SmallString<64>
+SelectorTable::constructSetterName(StringRef Name) {
+ SmallString<64> SetterName("set");
+ SetterName += Name;
+ SetterName[3] = toUppercase(SetterName[3]);
+ return SetterName;
+}
+
+Selector
+SelectorTable::constructSetterSelector(IdentifierTable &Idents,
+ SelectorTable &SelTable,
+ const IdentifierInfo *Name) {
+ IdentifierInfo *SetterName =
+ &Idents.get(constructSetterName(Name->getName()));
+ return SelTable.getUnarySelector(SetterName);
+}
+
+size_t SelectorTable::getTotalMemory() const {
+ SelectorTableImpl &SelTabImpl = getSelectorTableImpl(Impl);
+ return SelTabImpl.Allocator.getTotalMemory();
+}
+
+Selector SelectorTable::getSelector(unsigned nKeys, IdentifierInfo **IIV) {
+ if (nKeys < 2)
+ return Selector(IIV[0], nKeys);
+
+ SelectorTableImpl &SelTabImpl = getSelectorTableImpl(Impl);
+
+ // Unique selector, to guarantee there is one per name.
+ llvm::FoldingSetNodeID ID;
+ MultiKeywordSelector::Profile(ID, IIV, nKeys);
+
+ void *InsertPos = nullptr;
+ if (MultiKeywordSelector *SI =
+ SelTabImpl.Table.FindNodeOrInsertPos(ID, InsertPos))
+ return Selector(SI);
+
+ // MultiKeywordSelector objects are not allocated with new because they have a
+ // variable size array (for parameter types) at the end of them.
+ unsigned Size = sizeof(MultiKeywordSelector) + nKeys*sizeof(IdentifierInfo *);
+ MultiKeywordSelector *SI =
+ (MultiKeywordSelector*)SelTabImpl.Allocator.Allocate(Size,
+ llvm::alignOf<MultiKeywordSelector>());
+ new (SI) MultiKeywordSelector(nKeys, IIV);
+ SelTabImpl.Table.InsertNode(SI, InsertPos);
+ return Selector(SI);
+}
+
+SelectorTable::SelectorTable() {
+ Impl = new SelectorTableImpl();
+}
+
+SelectorTable::~SelectorTable() {
+ delete &getSelectorTableImpl(Impl);
+}
+
+const char *clang::getOperatorSpelling(OverloadedOperatorKind Operator) {
+ switch (Operator) {
+ case OO_None:
+ case NUM_OVERLOADED_OPERATORS:
+ return nullptr;
+
+#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
+ case OO_##Name: return Spelling;
+#include "clang/Basic/OperatorKinds.def"
+ }
+
+ llvm_unreachable("Invalid OverloadedOperatorKind!");
+}
+
+StringRef clang::getNullabilitySpelling(NullabilityKind kind,
+ bool isContextSensitive) {
+ switch (kind) {
+ case NullabilityKind::NonNull:
+ return isContextSensitive ? "nonnull" : "_Nonnull";
+
+ case NullabilityKind::Nullable:
+ return isContextSensitive ? "nullable" : "_Nullable";
+
+ case NullabilityKind::Unspecified:
+ return isContextSensitive ? "null_unspecified" : "_Null_unspecified";
+ }
+ llvm_unreachable("Unknown nullability kind.");
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/LangOptions.cpp b/contrib/llvm/tools/clang/lib/Basic/LangOptions.cpp
new file mode 100644
index 0000000..1b08b06
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/LangOptions.cpp
@@ -0,0 +1,46 @@
+//===--- LangOptions.cpp - C Language Family Language Options ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LangOptions class.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Basic/LangOptions.h"
+#include "llvm/ADT/StringRef.h"
+
+using namespace clang;
+
+LangOptions::LangOptions() {
+#define LANGOPT(Name, Bits, Default, Description) Name = Default;
+#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) set##Name(Default);
+#include "clang/Basic/LangOptions.def"
+}
+
+void LangOptions::resetNonModularOptions() {
+#define LANGOPT(Name, Bits, Default, Description)
+#define BENIGN_LANGOPT(Name, Bits, Default, Description) Name = Default;
+#define BENIGN_ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
+ Name = Default;
+#include "clang/Basic/LangOptions.def"
+
+ // FIXME: This should not be reset; modules can be different with different
+ // sanitizer options (this affects __has_feature(address_sanitizer) etc).
+ Sanitize.clear();
+ SanitizerBlacklistFiles.clear();
+
+ CurrentModule.clear();
+ ImplementationOfModule.clear();
+}
+
+bool LangOptions::isNoBuiltinFunc(const char *Name) const {
+ StringRef FuncName(Name);
+ for (unsigned i = 0, e = NoBuiltinFuncs.size(); i != e; ++i)
+ if (FuncName.equals(NoBuiltinFuncs[i]))
+ return true;
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/Module.cpp b/contrib/llvm/tools/clang/lib/Basic/Module.cpp
new file mode 100644
index 0000000..0b78326
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/Module.cpp
@@ -0,0 +1,536 @@
+//===--- Module.cpp - Describe a module -----------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Module class, which describes a module in the source
+// code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/Module.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+Module::Module(StringRef Name, SourceLocation DefinitionLoc, Module *Parent,
+ bool IsFramework, bool IsExplicit, unsigned VisibilityID)
+ : Name(Name), DefinitionLoc(DefinitionLoc), Parent(Parent), Directory(),
+ Umbrella(), Signature(0), ASTFile(nullptr), VisibilityID(VisibilityID),
+ IsMissingRequirement(false), HasIncompatibleModuleFile(false),
+ IsAvailable(true), IsFromModuleFile(false), IsFramework(IsFramework),
+ IsExplicit(IsExplicit), IsSystem(false), IsExternC(false),
+ IsInferred(false), InferSubmodules(false), InferExplicitSubmodules(false),
+ InferExportWildcard(false), ConfigMacrosExhaustive(false),
+ NameVisibility(Hidden) {
+ if (Parent) {
+ if (!Parent->isAvailable())
+ IsAvailable = false;
+ if (Parent->IsSystem)
+ IsSystem = true;
+ if (Parent->IsExternC)
+ IsExternC = true;
+ IsMissingRequirement = Parent->IsMissingRequirement;
+
+ Parent->SubModuleIndex[Name] = Parent->SubModules.size();
+ Parent->SubModules.push_back(this);
+ }
+}
+
+Module::~Module() {
+ for (submodule_iterator I = submodule_begin(), IEnd = submodule_end();
+ I != IEnd; ++I) {
+ delete *I;
+ }
+}
+
+/// \brief Determine whether a translation unit built using the current
+/// language options has the given feature.
+static bool hasFeature(StringRef Feature, const LangOptions &LangOpts,
+ const TargetInfo &Target) {
+ bool HasFeature = llvm::StringSwitch<bool>(Feature)
+ .Case("altivec", LangOpts.AltiVec)
+ .Case("blocks", LangOpts.Blocks)
+ .Case("cplusplus", LangOpts.CPlusPlus)
+ .Case("cplusplus11", LangOpts.CPlusPlus11)
+ .Case("objc", LangOpts.ObjC1)
+ .Case("objc_arc", LangOpts.ObjCAutoRefCount)
+ .Case("opencl", LangOpts.OpenCL)
+ .Case("tls", Target.isTLSSupported())
+ .Case("zvector", LangOpts.ZVector)
+ .Default(Target.hasFeature(Feature));
+ if (!HasFeature)
+ HasFeature = std::find(LangOpts.ModuleFeatures.begin(),
+ LangOpts.ModuleFeatures.end(),
+ Feature) != LangOpts.ModuleFeatures.end();
+ return HasFeature;
+}
+
+bool Module::isAvailable(const LangOptions &LangOpts, const TargetInfo &Target,
+ Requirement &Req,
+ UnresolvedHeaderDirective &MissingHeader) const {
+ if (IsAvailable)
+ return true;
+
+ for (const Module *Current = this; Current; Current = Current->Parent) {
+ for (unsigned I = 0, N = Current->Requirements.size(); I != N; ++I) {
+ if (hasFeature(Current->Requirements[I].first, LangOpts, Target) !=
+ Current->Requirements[I].second) {
+ Req = Current->Requirements[I];
+ return false;
+ }
+ }
+ if (!Current->MissingHeaders.empty()) {
+ MissingHeader = Current->MissingHeaders.front();
+ return false;
+ }
+ }
+
+ llvm_unreachable("could not find a reason why module is unavailable");
+}
+
+bool Module::isSubModuleOf(const Module *Other) const {
+ const Module *This = this;
+ do {
+ if (This == Other)
+ return true;
+
+ This = This->Parent;
+ } while (This);
+
+ return false;
+}
+
+const Module *Module::getTopLevelModule() const {
+ const Module *Result = this;
+ while (Result->Parent)
+ Result = Result->Parent;
+
+ return Result;
+}
+
+std::string Module::getFullModuleName() const {
+ SmallVector<StringRef, 2> Names;
+
+ // Build up the set of module names (from innermost to outermost).
+ for (const Module *M = this; M; M = M->Parent)
+ Names.push_back(M->Name);
+
+ std::string Result;
+ for (SmallVectorImpl<StringRef>::reverse_iterator I = Names.rbegin(),
+ IEnd = Names.rend();
+ I != IEnd; ++I) {
+ if (!Result.empty())
+ Result += '.';
+
+ Result += *I;
+ }
+
+ return Result;
+}
+
+bool Module::fullModuleNameIs(ArrayRef<StringRef> nameParts) const {
+ for (const Module *M = this; M; M = M->Parent) {
+ if (nameParts.empty() || M->Name != nameParts.back())
+ return false;
+ nameParts = nameParts.drop_back();
+ }
+ return nameParts.empty();
+}
+
+Module::DirectoryName Module::getUmbrellaDir() const {
+ if (Header U = getUmbrellaHeader())
+ return {"", U.Entry->getDir()};
+
+ return {UmbrellaAsWritten, Umbrella.dyn_cast<const DirectoryEntry *>()};
+}
+
+ArrayRef<const FileEntry *> Module::getTopHeaders(FileManager &FileMgr) {
+ if (!TopHeaderNames.empty()) {
+ for (std::vector<std::string>::iterator
+ I = TopHeaderNames.begin(), E = TopHeaderNames.end(); I != E; ++I) {
+ if (const FileEntry *FE = FileMgr.getFile(*I))
+ TopHeaders.insert(FE);
+ }
+ TopHeaderNames.clear();
+ }
+
+ return llvm::makeArrayRef(TopHeaders.begin(), TopHeaders.end());
+}
+
+bool Module::directlyUses(const Module *Requested) const {
+ auto *Top = getTopLevelModule();
+
+ // A top-level module implicitly uses itself.
+ if (Requested->isSubModuleOf(Top))
+ return true;
+
+ for (auto *Use : Top->DirectUses)
+ if (Requested->isSubModuleOf(Use))
+ return true;
+ return false;
+}
+
+void Module::addRequirement(StringRef Feature, bool RequiredState,
+ const LangOptions &LangOpts,
+ const TargetInfo &Target) {
+ Requirements.push_back(Requirement(Feature, RequiredState));
+
+ // If this feature is currently available, we're done.
+ if (hasFeature(Feature, LangOpts, Target) == RequiredState)
+ return;
+
+ markUnavailable(/*MissingRequirement*/true);
+}
+
+void Module::markUnavailable(bool MissingRequirement) {
+ auto needUpdate = [MissingRequirement](Module *M) {
+ return M->IsAvailable || (!M->IsMissingRequirement && MissingRequirement);
+ };
+
+ if (!needUpdate(this))
+ return;
+
+ SmallVector<Module *, 2> Stack;
+ Stack.push_back(this);
+ while (!Stack.empty()) {
+ Module *Current = Stack.back();
+ Stack.pop_back();
+
+ if (!needUpdate(Current))
+ continue;
+
+ Current->IsAvailable = false;
+ Current->IsMissingRequirement |= MissingRequirement;
+ for (submodule_iterator Sub = Current->submodule_begin(),
+ SubEnd = Current->submodule_end();
+ Sub != SubEnd; ++Sub) {
+ if (needUpdate(*Sub))
+ Stack.push_back(*Sub);
+ }
+ }
+}
+
+Module *Module::findSubmodule(StringRef Name) const {
+ llvm::StringMap<unsigned>::const_iterator Pos = SubModuleIndex.find(Name);
+ if (Pos == SubModuleIndex.end())
+ return nullptr;
+
+ return SubModules[Pos->getValue()];
+}
+
+static void printModuleId(raw_ostream &OS, const ModuleId &Id) {
+ for (unsigned I = 0, N = Id.size(); I != N; ++I) {
+ if (I)
+ OS << ".";
+ OS << Id[I].first;
+ }
+}
+
+void Module::getExportedModules(SmallVectorImpl<Module *> &Exported) const {
+ // All non-explicit submodules are exported.
+ for (std::vector<Module *>::const_iterator I = SubModules.begin(),
+ E = SubModules.end();
+ I != E; ++I) {
+ Module *Mod = *I;
+ if (!Mod->IsExplicit)
+ Exported.push_back(Mod);
+ }
+
+ // Find re-exported modules by filtering the list of imported modules.
+ bool AnyWildcard = false;
+ bool UnrestrictedWildcard = false;
+ SmallVector<Module *, 4> WildcardRestrictions;
+ for (unsigned I = 0, N = Exports.size(); I != N; ++I) {
+ Module *Mod = Exports[I].getPointer();
+ if (!Exports[I].getInt()) {
+ // Export a named module directly; no wildcards involved.
+ Exported.push_back(Mod);
+
+ continue;
+ }
+
+ // Wildcard export: export all of the imported modules that match
+ // the given pattern.
+ AnyWildcard = true;
+ if (UnrestrictedWildcard)
+ continue;
+
+ if (Module *Restriction = Exports[I].getPointer())
+ WildcardRestrictions.push_back(Restriction);
+ else {
+ WildcardRestrictions.clear();
+ UnrestrictedWildcard = true;
+ }
+ }
+
+ // If there were any wildcards, push any imported modules that were
+ // re-exported by the wildcard restriction.
+ if (!AnyWildcard)
+ return;
+
+ for (unsigned I = 0, N = Imports.size(); I != N; ++I) {
+ Module *Mod = Imports[I];
+ bool Acceptable = UnrestrictedWildcard;
+ if (!Acceptable) {
+ // Check whether this module meets one of the restrictions.
+ for (unsigned R = 0, NR = WildcardRestrictions.size(); R != NR; ++R) {
+ Module *Restriction = WildcardRestrictions[R];
+ if (Mod == Restriction || Mod->isSubModuleOf(Restriction)) {
+ Acceptable = true;
+ break;
+ }
+ }
+ }
+
+ if (!Acceptable)
+ continue;
+
+ Exported.push_back(Mod);
+ }
+}
+
+void Module::buildVisibleModulesCache() const {
+ assert(VisibleModulesCache.empty() && "cache does not need building");
+
+ // This module is visible to itself.
+ VisibleModulesCache.insert(this);
+
+ // Every imported module is visible.
+ SmallVector<Module *, 16> Stack(Imports.begin(), Imports.end());
+ while (!Stack.empty()) {
+ Module *CurrModule = Stack.pop_back_val();
+
+ // Every module transitively exported by an imported module is visible.
+ if (VisibleModulesCache.insert(CurrModule).second)
+ CurrModule->getExportedModules(Stack);
+ }
+}
+
+void Module::print(raw_ostream &OS, unsigned Indent) const {
+ OS.indent(Indent);
+ if (IsFramework)
+ OS << "framework ";
+ if (IsExplicit)
+ OS << "explicit ";
+ OS << "module " << Name;
+
+ if (IsSystem || IsExternC) {
+ OS.indent(Indent + 2);
+ if (IsSystem)
+ OS << " [system]";
+ if (IsExternC)
+ OS << " [extern_c]";
+ }
+
+ OS << " {\n";
+
+ if (!Requirements.empty()) {
+ OS.indent(Indent + 2);
+ OS << "requires ";
+ for (unsigned I = 0, N = Requirements.size(); I != N; ++I) {
+ if (I)
+ OS << ", ";
+ if (!Requirements[I].second)
+ OS << "!";
+ OS << Requirements[I].first;
+ }
+ OS << "\n";
+ }
+
+ if (Header H = getUmbrellaHeader()) {
+ OS.indent(Indent + 2);
+ OS << "umbrella header \"";
+ OS.write_escaped(H.NameAsWritten);
+ OS << "\"\n";
+ } else if (DirectoryName D = getUmbrellaDir()) {
+ OS.indent(Indent + 2);
+ OS << "umbrella \"";
+ OS.write_escaped(D.NameAsWritten);
+ OS << "\"\n";
+ }
+
+ if (!ConfigMacros.empty() || ConfigMacrosExhaustive) {
+ OS.indent(Indent + 2);
+ OS << "config_macros ";
+ if (ConfigMacrosExhaustive)
+ OS << "[exhaustive]";
+ for (unsigned I = 0, N = ConfigMacros.size(); I != N; ++I) {
+ if (I)
+ OS << ", ";
+ OS << ConfigMacros[I];
+ }
+ OS << "\n";
+ }
+
+ struct {
+ StringRef Prefix;
+ HeaderKind Kind;
+ } Kinds[] = {{"", HK_Normal},
+ {"textual ", HK_Textual},
+ {"private ", HK_Private},
+ {"private textual ", HK_PrivateTextual},
+ {"exclude ", HK_Excluded}};
+
+ for (auto &K : Kinds) {
+ for (auto &H : Headers[K.Kind]) {
+ OS.indent(Indent + 2);
+ OS << K.Prefix << "header \"";
+ OS.write_escaped(H.NameAsWritten);
+ OS << "\"\n";
+ }
+ }
+
+ for (submodule_const_iterator MI = submodule_begin(), MIEnd = submodule_end();
+ MI != MIEnd; ++MI)
+ // Print inferred subframework modules so that we don't need to re-infer
+ // them (requires expensive directory iteration + stat calls) when we build
+ // the module. Regular inferred submodules are OK, as we need to look at all
+ // those header files anyway.
+ if (!(*MI)->IsInferred || (*MI)->IsFramework)
+ (*MI)->print(OS, Indent + 2);
+
+ for (unsigned I = 0, N = Exports.size(); I != N; ++I) {
+ OS.indent(Indent + 2);
+ OS << "export ";
+ if (Module *Restriction = Exports[I].getPointer()) {
+ OS << Restriction->getFullModuleName();
+ if (Exports[I].getInt())
+ OS << ".*";
+ } else {
+ OS << "*";
+ }
+ OS << "\n";
+ }
+
+ for (unsigned I = 0, N = UnresolvedExports.size(); I != N; ++I) {
+ OS.indent(Indent + 2);
+ OS << "export ";
+ printModuleId(OS, UnresolvedExports[I].Id);
+ if (UnresolvedExports[I].Wildcard) {
+ if (UnresolvedExports[I].Id.empty())
+ OS << "*";
+ else
+ OS << ".*";
+ }
+ OS << "\n";
+ }
+
+ for (unsigned I = 0, N = DirectUses.size(); I != N; ++I) {
+ OS.indent(Indent + 2);
+ OS << "use ";
+ OS << DirectUses[I]->getFullModuleName();
+ OS << "\n";
+ }
+
+ for (unsigned I = 0, N = UnresolvedDirectUses.size(); I != N; ++I) {
+ OS.indent(Indent + 2);
+ OS << "use ";
+ printModuleId(OS, UnresolvedDirectUses[I]);
+ OS << "\n";
+ }
+
+ for (unsigned I = 0, N = LinkLibraries.size(); I != N; ++I) {
+ OS.indent(Indent + 2);
+ OS << "link ";
+ if (LinkLibraries[I].IsFramework)
+ OS << "framework ";
+ OS << "\"";
+ OS.write_escaped(LinkLibraries[I].Library);
+ OS << "\"";
+ }
+
+ for (unsigned I = 0, N = UnresolvedConflicts.size(); I != N; ++I) {
+ OS.indent(Indent + 2);
+ OS << "conflict ";
+ printModuleId(OS, UnresolvedConflicts[I].Id);
+ OS << ", \"";
+ OS.write_escaped(UnresolvedConflicts[I].Message);
+ OS << "\"\n";
+ }
+
+ for (unsigned I = 0, N = Conflicts.size(); I != N; ++I) {
+ OS.indent(Indent + 2);
+ OS << "conflict ";
+ OS << Conflicts[I].Other->getFullModuleName();
+ OS << ", \"";
+ OS.write_escaped(Conflicts[I].Message);
+ OS << "\"\n";
+ }
+
+ if (InferSubmodules) {
+ OS.indent(Indent + 2);
+ if (InferExplicitSubmodules)
+ OS << "explicit ";
+ OS << "module * {\n";
+ if (InferExportWildcard) {
+ OS.indent(Indent + 4);
+ OS << "export *\n";
+ }
+ OS.indent(Indent + 2);
+ OS << "}\n";
+ }
+
+ OS.indent(Indent);
+ OS << "}\n";
+}
+
+void Module::dump() const {
+ print(llvm::errs());
+}
+
+void VisibleModuleSet::setVisible(Module *M, SourceLocation Loc,
+ VisibleCallback Vis, ConflictCallback Cb) {
+ if (isVisible(M))
+ return;
+
+ ++Generation;
+
+ struct Visiting {
+ Module *M;
+ Visiting *ExportedBy;
+ };
+
+ std::function<void(Visiting)> VisitModule = [&](Visiting V) {
+ // Modules that aren't available cannot be made visible.
+ if (!V.M->isAvailable())
+ return;
+
+ // Nothing to do for a module that's already visible.
+ unsigned ID = V.M->getVisibilityID();
+ if (ImportLocs.size() <= ID)
+ ImportLocs.resize(ID + 1);
+ else if (ImportLocs[ID].isValid())
+ return;
+
+ ImportLocs[ID] = Loc;
+ Vis(M);
+
+ // Make any exported modules visible.
+ SmallVector<Module *, 16> Exports;
+ V.M->getExportedModules(Exports);
+ for (Module *E : Exports)
+ VisitModule({E, &V});
+
+ for (auto &C : V.M->Conflicts) {
+ if (isVisible(C.Other)) {
+ llvm::SmallVector<Module*, 8> Path;
+ for (Visiting *I = &V; I; I = I->ExportedBy)
+ Path.push_back(I->M);
+ Cb(Path, C.Other, C.Message);
+ }
+ }
+ };
+ VisitModule({M, nullptr});
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/ObjCRuntime.cpp b/contrib/llvm/tools/clang/lib/Basic/ObjCRuntime.cpp
new file mode 100644
index 0000000..133c669
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/ObjCRuntime.cpp
@@ -0,0 +1,93 @@
+//===- ObjCRuntime.cpp - Objective-C Runtime Handling -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ObjCRuntime class, which represents the
+// target Objective-C runtime.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Basic/ObjCRuntime.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+std::string ObjCRuntime::getAsString() const {
+ std::string Result;
+ {
+ llvm::raw_string_ostream Out(Result);
+ Out << *this;
+ }
+ return Result;
+}
+
+raw_ostream &clang::operator<<(raw_ostream &out, const ObjCRuntime &value) {
+ switch (value.getKind()) {
+ case ObjCRuntime::MacOSX: out << "macosx"; break;
+ case ObjCRuntime::FragileMacOSX: out << "macosx-fragile"; break;
+ case ObjCRuntime::iOS: out << "ios"; break;
+ case ObjCRuntime::WatchOS: out << "watchos"; break;
+ case ObjCRuntime::GNUstep: out << "gnustep"; break;
+ case ObjCRuntime::GCC: out << "gcc"; break;
+ case ObjCRuntime::ObjFW: out << "objfw"; break;
+ }
+ if (value.getVersion() > VersionTuple(0)) {
+ out << '-' << value.getVersion();
+ }
+ return out;
+}
+
+bool ObjCRuntime::tryParse(StringRef input) {
+ // Look for the last dash.
+ std::size_t dash = input.rfind('-');
+
+ // We permit dashes in the runtime name, and we also permit the
+ // version to be omitted, so if we see a dash not followed by a
+ // digit then we need to ignore it.
+ if (dash != StringRef::npos && dash + 1 != input.size() &&
+ (input[dash+1] < '0' || input[dash+1] > '9')) {
+ dash = StringRef::npos;
+ }
+
+ // Everything prior to that must be a valid string name.
+ Kind kind;
+ StringRef runtimeName = input.substr(0, dash);
+ Version = VersionTuple(0);
+ if (runtimeName == "macosx") {
+ kind = ObjCRuntime::MacOSX;
+ } else if (runtimeName == "macosx-fragile") {
+ kind = ObjCRuntime::FragileMacOSX;
+ } else if (runtimeName == "ios") {
+ kind = ObjCRuntime::iOS;
+ } else if (runtimeName == "watchos") {
+ kind = ObjCRuntime::WatchOS;
+ } else if (runtimeName == "gnustep") {
+ // If no version is specified then default to the most recent one that we
+ // know about.
+ Version = VersionTuple(1, 6);
+ kind = ObjCRuntime::GNUstep;
+ } else if (runtimeName == "gcc") {
+ kind = ObjCRuntime::GCC;
+ } else if (runtimeName == "objfw") {
+ kind = ObjCRuntime::ObjFW;
+ Version = VersionTuple(0, 8);
+ } else {
+ return true;
+ }
+ TheKind = kind;
+
+ if (dash != StringRef::npos) {
+ StringRef verString = input.substr(dash + 1);
+ if (Version.tryParse(verString))
+ return true;
+ }
+
+ if (kind == ObjCRuntime::ObjFW && Version > VersionTuple(0, 8))
+ Version = VersionTuple(0, 8);
+
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/OpenMPKinds.cpp b/contrib/llvm/tools/clang/lib/Basic/OpenMPKinds.cpp
new file mode 100644
index 0000000..577132d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/OpenMPKinds.cpp
@@ -0,0 +1,538 @@
+//===--- OpenMPKinds.cpp - Token Kinds Support ----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// \brief This file implements the OpenMP enum and support functions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/OpenMPKinds.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+
+using namespace clang;
+
+OpenMPDirectiveKind clang::getOpenMPDirectiveKind(StringRef Str) {
+ return llvm::StringSwitch<OpenMPDirectiveKind>(Str)
+#define OPENMP_DIRECTIVE(Name) .Case(#Name, OMPD_##Name)
+#define OPENMP_DIRECTIVE_EXT(Name, Str) .Case(Str, OMPD_##Name)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPD_unknown);
+}
+
+const char *clang::getOpenMPDirectiveName(OpenMPDirectiveKind Kind) {
+ assert(Kind <= OMPD_unknown);
+ switch (Kind) {
+ case OMPD_unknown:
+ return "unknown";
+#define OPENMP_DIRECTIVE(Name) \
+ case OMPD_##Name: \
+ return #Name;
+#define OPENMP_DIRECTIVE_EXT(Name, Str) \
+ case OMPD_##Name: \
+ return Str;
+#include "clang/Basic/OpenMPKinds.def"
+ break;
+ }
+ llvm_unreachable("Invalid OpenMP directive kind");
+}
+
+OpenMPClauseKind clang::getOpenMPClauseKind(StringRef Str) {
+ // 'flush' clause cannot be specified explicitly, because this is an implicit
+ // clause for 'flush' directive. If the 'flush' clause is explicitly specified
+ // the Parser should generate a warning about extra tokens at the end of the
+ // directive.
+ if (Str == "flush")
+ return OMPC_unknown;
+ return llvm::StringSwitch<OpenMPClauseKind>(Str)
+#define OPENMP_CLAUSE(Name, Class) .Case(#Name, OMPC_##Name)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_unknown);
+}
+
+const char *clang::getOpenMPClauseName(OpenMPClauseKind Kind) {
+ assert(Kind <= OMPC_unknown);
+ switch (Kind) {
+ case OMPC_unknown:
+ return "unknown";
+#define OPENMP_CLAUSE(Name, Class) \
+ case OMPC_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+ case OMPC_threadprivate:
+ return "threadprivate or thread local";
+ }
+ llvm_unreachable("Invalid OpenMP clause kind");
+}
+
+unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind,
+ StringRef Str) {
+ switch (Kind) {
+ case OMPC_default:
+ return llvm::StringSwitch<OpenMPDefaultClauseKind>(Str)
+#define OPENMP_DEFAULT_KIND(Name) .Case(#Name, OMPC_DEFAULT_##Name)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_DEFAULT_unknown);
+ case OMPC_proc_bind:
+ return llvm::StringSwitch<OpenMPProcBindClauseKind>(Str)
+#define OPENMP_PROC_BIND_KIND(Name) .Case(#Name, OMPC_PROC_BIND_##Name)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_PROC_BIND_unknown);
+ case OMPC_schedule:
+ return llvm::StringSwitch<unsigned>(Str)
+#define OPENMP_SCHEDULE_KIND(Name) \
+ .Case(#Name, static_cast<unsigned>(OMPC_SCHEDULE_##Name))
+#define OPENMP_SCHEDULE_MODIFIER(Name) \
+ .Case(#Name, static_cast<unsigned>(OMPC_SCHEDULE_MODIFIER_##Name))
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_SCHEDULE_unknown);
+ case OMPC_depend:
+ return llvm::StringSwitch<OpenMPDependClauseKind>(Str)
+#define OPENMP_DEPEND_KIND(Name) .Case(#Name, OMPC_DEPEND_##Name)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_DEPEND_unknown);
+ case OMPC_linear:
+ return llvm::StringSwitch<OpenMPLinearClauseKind>(Str)
+#define OPENMP_LINEAR_KIND(Name) .Case(#Name, OMPC_LINEAR_##Name)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_LINEAR_unknown);
+ case OMPC_map:
+ return llvm::StringSwitch<OpenMPMapClauseKind>(Str)
+#define OPENMP_MAP_KIND(Name) .Case(#Name, OMPC_MAP_##Name)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_MAP_unknown);
+ case OMPC_unknown:
+ case OMPC_threadprivate:
+ case OMPC_if:
+ case OMPC_final:
+ case OMPC_num_threads:
+ case OMPC_safelen:
+ case OMPC_simdlen:
+ case OMPC_collapse:
+ case OMPC_private:
+ case OMPC_firstprivate:
+ case OMPC_lastprivate:
+ case OMPC_shared:
+ case OMPC_reduction:
+ case OMPC_aligned:
+ case OMPC_copyin:
+ case OMPC_copyprivate:
+ case OMPC_ordered:
+ case OMPC_nowait:
+ case OMPC_untied:
+ case OMPC_mergeable:
+ case OMPC_flush:
+ case OMPC_read:
+ case OMPC_write:
+ case OMPC_update:
+ case OMPC_capture:
+ case OMPC_seq_cst:
+ case OMPC_device:
+ case OMPC_threads:
+ case OMPC_simd:
+ case OMPC_num_teams:
+ case OMPC_thread_limit:
+ case OMPC_priority:
+ case OMPC_grainsize:
+ case OMPC_nogroup:
+ case OMPC_num_tasks:
+ case OMPC_hint:
+ break;
+ }
+ llvm_unreachable("Invalid OpenMP simple clause kind");
+}
+
+const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
+ unsigned Type) {
+ switch (Kind) {
+ case OMPC_default:
+ switch (Type) {
+ case OMPC_DEFAULT_unknown:
+ return "unknown";
+#define OPENMP_DEFAULT_KIND(Name) \
+ case OMPC_DEFAULT_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+ }
+ llvm_unreachable("Invalid OpenMP 'default' clause type");
+ case OMPC_proc_bind:
+ switch (Type) {
+ case OMPC_PROC_BIND_unknown:
+ return "unknown";
+#define OPENMP_PROC_BIND_KIND(Name) \
+ case OMPC_PROC_BIND_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+ }
+ llvm_unreachable("Invalid OpenMP 'proc_bind' clause type");
+ case OMPC_schedule:
+ switch (Type) {
+ case OMPC_SCHEDULE_unknown:
+ case OMPC_SCHEDULE_MODIFIER_last:
+ return "unknown";
+#define OPENMP_SCHEDULE_KIND(Name) \
+ case OMPC_SCHEDULE_##Name: \
+ return #Name;
+#define OPENMP_SCHEDULE_MODIFIER(Name) \
+ case OMPC_SCHEDULE_MODIFIER_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+ }
+ llvm_unreachable("Invalid OpenMP 'schedule' clause type");
+ case OMPC_depend:
+ switch (Type) {
+ case OMPC_DEPEND_unknown:
+ return "unknown";
+#define OPENMP_DEPEND_KIND(Name) \
+ case OMPC_DEPEND_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+ }
+ llvm_unreachable("Invalid OpenMP 'depend' clause type");
+ case OMPC_linear:
+ switch (Type) {
+ case OMPC_LINEAR_unknown:
+ return "unknown";
+#define OPENMP_LINEAR_KIND(Name) \
+ case OMPC_LINEAR_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+ }
+ llvm_unreachable("Invalid OpenMP 'linear' clause type");
+ case OMPC_map:
+ switch (Type) {
+ case OMPC_MAP_unknown:
+ return "unknown";
+#define OPENMP_MAP_KIND(Name) \
+ case OMPC_MAP_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ llvm_unreachable("Invalid OpenMP 'map' clause type");
+ case OMPC_unknown:
+ case OMPC_threadprivate:
+ case OMPC_if:
+ case OMPC_final:
+ case OMPC_num_threads:
+ case OMPC_safelen:
+ case OMPC_simdlen:
+ case OMPC_collapse:
+ case OMPC_private:
+ case OMPC_firstprivate:
+ case OMPC_lastprivate:
+ case OMPC_shared:
+ case OMPC_reduction:
+ case OMPC_aligned:
+ case OMPC_copyin:
+ case OMPC_copyprivate:
+ case OMPC_ordered:
+ case OMPC_nowait:
+ case OMPC_untied:
+ case OMPC_mergeable:
+ case OMPC_flush:
+ case OMPC_read:
+ case OMPC_write:
+ case OMPC_update:
+ case OMPC_capture:
+ case OMPC_seq_cst:
+ case OMPC_device:
+ case OMPC_threads:
+ case OMPC_simd:
+ case OMPC_num_teams:
+ case OMPC_thread_limit:
+ case OMPC_priority:
+ case OMPC_grainsize:
+ case OMPC_nogroup:
+ case OMPC_num_tasks:
+ case OMPC_hint:
+ break;
+ }
+ llvm_unreachable("Invalid OpenMP simple clause kind");
+}
+
+bool clang::isAllowedClauseForDirective(OpenMPDirectiveKind DKind,
+ OpenMPClauseKind CKind) {
+ assert(DKind <= OMPD_unknown);
+ assert(CKind <= OMPC_unknown);
+ switch (DKind) {
+ case OMPD_parallel:
+ switch (CKind) {
+#define OPENMP_PARALLEL_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_simd:
+ switch (CKind) {
+#define OPENMP_SIMD_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_for:
+ switch (CKind) {
+#define OPENMP_FOR_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_for_simd:
+ switch (CKind) {
+#define OPENMP_FOR_SIMD_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_sections:
+ switch (CKind) {
+#define OPENMP_SECTIONS_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_single:
+ switch (CKind) {
+#define OPENMP_SINGLE_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_parallel_for:
+ switch (CKind) {
+#define OPENMP_PARALLEL_FOR_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_parallel_for_simd:
+ switch (CKind) {
+#define OPENMP_PARALLEL_FOR_SIMD_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_parallel_sections:
+ switch (CKind) {
+#define OPENMP_PARALLEL_SECTIONS_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_task:
+ switch (CKind) {
+#define OPENMP_TASK_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_flush:
+ return CKind == OMPC_flush;
+ break;
+ case OMPD_atomic:
+ switch (CKind) {
+#define OPENMP_ATOMIC_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_target:
+ switch (CKind) {
+#define OPENMP_TARGET_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_target_data:
+ switch (CKind) {
+#define OPENMP_TARGET_DATA_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_teams:
+ switch (CKind) {
+#define OPENMP_TEAMS_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_cancel:
+ switch (CKind) {
+#define OPENMP_CANCEL_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_ordered:
+ switch (CKind) {
+#define OPENMP_ORDERED_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_taskloop:
+ switch (CKind) {
+#define OPENMP_TASKLOOP_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_taskloop_simd:
+ switch (CKind) {
+#define OPENMP_TASKLOOP_SIMD_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_critical:
+ switch (CKind) {
+#define OPENMP_CRITICAL_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_distribute:
+ switch (CKind) {
+#define OPENMP_DISTRIBUTE_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_unknown:
+ case OMPD_threadprivate:
+ case OMPD_section:
+ case OMPD_master:
+ case OMPD_taskyield:
+ case OMPD_barrier:
+ case OMPD_taskwait:
+ case OMPD_taskgroup:
+ case OMPD_cancellation_point:
+ break;
+ }
+ return false;
+}
+
+bool clang::isOpenMPLoopDirective(OpenMPDirectiveKind DKind) {
+ return DKind == OMPD_simd || DKind == OMPD_for || DKind == OMPD_for_simd ||
+ DKind == OMPD_parallel_for || DKind == OMPD_parallel_for_simd ||
+ DKind == OMPD_taskloop ||
+ DKind == OMPD_taskloop_simd ||
+ DKind == OMPD_distribute; // TODO add next directives.
+}
+
+bool clang::isOpenMPWorksharingDirective(OpenMPDirectiveKind DKind) {
+ return DKind == OMPD_for || DKind == OMPD_for_simd ||
+ DKind == OMPD_sections || DKind == OMPD_section ||
+ DKind == OMPD_single || DKind == OMPD_parallel_for ||
+ DKind == OMPD_parallel_for_simd ||
+ DKind == OMPD_parallel_sections; // TODO add next directives.
+}
+
+bool clang::isOpenMPTaskLoopDirective(OpenMPDirectiveKind DKind) {
+ return DKind == OMPD_taskloop || DKind == OMPD_taskloop_simd;
+}
+
+bool clang::isOpenMPParallelDirective(OpenMPDirectiveKind DKind) {
+ return DKind == OMPD_parallel || DKind == OMPD_parallel_for ||
+ DKind == OMPD_parallel_for_simd ||
+ DKind == OMPD_parallel_sections; // TODO add next directives.
+}
+
+bool clang::isOpenMPTargetDirective(OpenMPDirectiveKind DKind) {
+ return DKind == OMPD_target; // TODO add next directives.
+}
+
+bool clang::isOpenMPTeamsDirective(OpenMPDirectiveKind DKind) {
+ return DKind == OMPD_teams; // TODO add next directives.
+}
+
+bool clang::isOpenMPSimdDirective(OpenMPDirectiveKind DKind) {
+ return DKind == OMPD_simd || DKind == OMPD_for_simd ||
+ DKind == OMPD_parallel_for_simd ||
+ DKind == OMPD_taskloop_simd; // TODO add next directives.
+}
+
+bool clang::isOpenMPDistributeDirective(OpenMPDirectiveKind Kind) {
+ return Kind == OMPD_distribute; // TODO add next directives.
+}
+
+bool clang::isOpenMPPrivate(OpenMPClauseKind Kind) {
+ return Kind == OMPC_private || Kind == OMPC_firstprivate ||
+ Kind == OMPC_lastprivate || Kind == OMPC_linear ||
+ Kind == OMPC_reduction; // TODO add next clauses like 'reduction'.
+}
+
+bool clang::isOpenMPThreadPrivate(OpenMPClauseKind Kind) {
+ return Kind == OMPC_threadprivate || Kind == OMPC_copyin;
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Basic/OperatorPrecedence.cpp b/contrib/llvm/tools/clang/lib/Basic/OperatorPrecedence.cpp
new file mode 100644
index 0000000..ade8d6d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/OperatorPrecedence.cpp
@@ -0,0 +1,76 @@
+//===--- OperatorPrecedence.cpp ---------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Defines and computes precedence levels for binary/ternary operators.
+///
+//===----------------------------------------------------------------------===//
+#include "clang/Basic/OperatorPrecedence.h"
+
+namespace clang {
+
+prec::Level getBinOpPrecedence(tok::TokenKind Kind, bool GreaterThanIsOperator,
+ bool CPlusPlus11) {
+ switch (Kind) {
+ case tok::greater:
+ // C++ [temp.names]p3:
+ // [...] When parsing a template-argument-list, the first
+ // non-nested > is taken as the ending delimiter rather than a
+ // greater-than operator. [...]
+ if (GreaterThanIsOperator)
+ return prec::Relational;
+ return prec::Unknown;
+
+ case tok::greatergreater:
+ // C++11 [temp.names]p3:
+ //
+ // [...] Similarly, the first non-nested >> is treated as two
+ // consecutive but distinct > tokens, the first of which is
+ // taken as the end of the template-argument-list and completes
+ // the template-id. [...]
+ if (GreaterThanIsOperator || !CPlusPlus11)
+ return prec::Shift;
+ return prec::Unknown;
+
+ default: return prec::Unknown;
+ case tok::comma: return prec::Comma;
+ case tok::equal:
+ case tok::starequal:
+ case tok::slashequal:
+ case tok::percentequal:
+ case tok::plusequal:
+ case tok::minusequal:
+ case tok::lesslessequal:
+ case tok::greatergreaterequal:
+ case tok::ampequal:
+ case tok::caretequal:
+ case tok::pipeequal: return prec::Assignment;
+ case tok::question: return prec::Conditional;
+ case tok::pipepipe: return prec::LogicalOr;
+ case tok::ampamp: return prec::LogicalAnd;
+ case tok::pipe: return prec::InclusiveOr;
+ case tok::caret: return prec::ExclusiveOr;
+ case tok::amp: return prec::And;
+ case tok::exclaimequal:
+ case tok::equalequal: return prec::Equality;
+ case tok::lessequal:
+ case tok::less:
+ case tok::greaterequal: return prec::Relational;
+ case tok::lessless: return prec::Shift;
+ case tok::plus:
+ case tok::minus: return prec::Additive;
+ case tok::percent:
+ case tok::slash:
+ case tok::star: return prec::Multiplicative;
+ case tok::periodstar:
+ case tok::arrowstar: return prec::PointerToMember;
+ }
+}
+
+} // namespace clang
diff --git a/contrib/llvm/tools/clang/lib/Basic/SanitizerBlacklist.cpp b/contrib/llvm/tools/clang/lib/Basic/SanitizerBlacklist.cpp
new file mode 100644
index 0000000..de78c94
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/SanitizerBlacklist.cpp
@@ -0,0 +1,46 @@
+//===--- SanitizerBlacklist.cpp - Blacklist for sanitizers ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// User-provided blacklist used to disable/alter instrumentation done in
+// sanitizers.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Basic/SanitizerBlacklist.h"
+
+using namespace clang;
+
+SanitizerBlacklist::SanitizerBlacklist(
+ const std::vector<std::string> &BlacklistPaths, SourceManager &SM)
+ : SCL(llvm::SpecialCaseList::createOrDie(BlacklistPaths)), SM(SM) {}
+
+bool SanitizerBlacklist::isBlacklistedGlobal(StringRef GlobalName,
+ StringRef Category) const {
+ return SCL->inSection("global", GlobalName, Category);
+}
+
+bool SanitizerBlacklist::isBlacklistedType(StringRef MangledTypeName,
+ StringRef Category) const {
+ return SCL->inSection("type", MangledTypeName, Category);
+}
+
+bool SanitizerBlacklist::isBlacklistedFunction(StringRef FunctionName) const {
+ return SCL->inSection("fun", FunctionName);
+}
+
+bool SanitizerBlacklist::isBlacklistedFile(StringRef FileName,
+ StringRef Category) const {
+ return SCL->inSection("src", FileName, Category);
+}
+
+bool SanitizerBlacklist::isBlacklistedLocation(SourceLocation Loc,
+ StringRef Category) const {
+ return Loc.isValid() &&
+ isBlacklistedFile(SM.getFilename(SM.getFileLoc(Loc)), Category);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Basic/Sanitizers.cpp b/contrib/llvm/tools/clang/lib/Basic/Sanitizers.cpp
new file mode 100644
index 0000000..91b6b2d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/Sanitizers.cpp
@@ -0,0 +1,37 @@
+//===--- Sanitizers.cpp - C Language Family Language Options ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the classes from Sanitizers.h
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Basic/Sanitizers.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
+
+using namespace clang;
+
+SanitizerMask clang::parseSanitizerValue(StringRef Value, bool AllowGroups) {
+ SanitizerMask ParsedKind = llvm::StringSwitch<SanitizerMask>(Value)
+#define SANITIZER(NAME, ID) .Case(NAME, SanitizerKind::ID)
+#define SANITIZER_GROUP(NAME, ID, ALIAS) \
+ .Case(NAME, AllowGroups ? SanitizerKind::ID##Group : 0)
+#include "clang/Basic/Sanitizers.def"
+ .Default(0);
+ return ParsedKind;
+}
+
+SanitizerMask clang::expandSanitizerGroups(SanitizerMask Kinds) {
+#define SANITIZER(NAME, ID)
+#define SANITIZER_GROUP(NAME, ID, ALIAS) \
+ if (Kinds & SanitizerKind::ID##Group) \
+ Kinds |= SanitizerKind::ID;
+#include "clang/Basic/Sanitizers.def"
+ return Kinds;
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/SourceLocation.cpp b/contrib/llvm/tools/clang/lib/Basic/SourceLocation.cpp
new file mode 100644
index 0000000..d254e86
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/SourceLocation.cpp
@@ -0,0 +1,142 @@
+//==--- SourceLocation.cpp - Compact identifier for Source Files -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines accessor methods for the FullSourceLoc class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstdio>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// PrettyStackTraceLoc
+//===----------------------------------------------------------------------===//
+
+void PrettyStackTraceLoc::print(raw_ostream &OS) const {
+ if (Loc.isValid()) {
+ Loc.print(OS, SM);
+ OS << ": ";
+ }
+ OS << Message << '\n';
+}
+
+//===----------------------------------------------------------------------===//
+// SourceLocation
+//===----------------------------------------------------------------------===//
+
+void SourceLocation::print(raw_ostream &OS, const SourceManager &SM)const{
+ if (!isValid()) {
+ OS << "<invalid loc>";
+ return;
+ }
+
+ if (isFileID()) {
+ PresumedLoc PLoc = SM.getPresumedLoc(*this);
+
+ if (PLoc.isInvalid()) {
+ OS << "<invalid>";
+ return;
+ }
+ // The macro expansion and spelling pos is identical for file locs.
+ OS << PLoc.getFilename() << ':' << PLoc.getLine()
+ << ':' << PLoc.getColumn();
+ return;
+ }
+
+ SM.getExpansionLoc(*this).print(OS, SM);
+
+ OS << " <Spelling=";
+ SM.getSpellingLoc(*this).print(OS, SM);
+ OS << '>';
+}
+
+LLVM_DUMP_METHOD std::string
+SourceLocation::printToString(const SourceManager &SM) const {
+ std::string S;
+ llvm::raw_string_ostream OS(S);
+ print(OS, SM);
+ return OS.str();
+}
+
+LLVM_DUMP_METHOD void SourceLocation::dump(const SourceManager &SM) const {
+ print(llvm::errs(), SM);
+}
+
+//===----------------------------------------------------------------------===//
+// FullSourceLoc
+//===----------------------------------------------------------------------===//
+
+FileID FullSourceLoc::getFileID() const {
+ assert(isValid());
+ return SrcMgr->getFileID(*this);
+}
+
+
+FullSourceLoc FullSourceLoc::getExpansionLoc() const {
+ assert(isValid());
+ return FullSourceLoc(SrcMgr->getExpansionLoc(*this), *SrcMgr);
+}
+
+FullSourceLoc FullSourceLoc::getSpellingLoc() const {
+ assert(isValid());
+ return FullSourceLoc(SrcMgr->getSpellingLoc(*this), *SrcMgr);
+}
+
+unsigned FullSourceLoc::getExpansionLineNumber(bool *Invalid) const {
+ assert(isValid());
+ return SrcMgr->getExpansionLineNumber(*this, Invalid);
+}
+
+unsigned FullSourceLoc::getExpansionColumnNumber(bool *Invalid) const {
+ assert(isValid());
+ return SrcMgr->getExpansionColumnNumber(*this, Invalid);
+}
+
+unsigned FullSourceLoc::getSpellingLineNumber(bool *Invalid) const {
+ assert(isValid());
+ return SrcMgr->getSpellingLineNumber(*this, Invalid);
+}
+
+unsigned FullSourceLoc::getSpellingColumnNumber(bool *Invalid) const {
+ assert(isValid());
+ return SrcMgr->getSpellingColumnNumber(*this, Invalid);
+}
+
+bool FullSourceLoc::isInSystemHeader() const {
+ assert(isValid());
+ return SrcMgr->isInSystemHeader(*this);
+}
+
+bool FullSourceLoc::isBeforeInTranslationUnitThan(SourceLocation Loc) const {
+ assert(isValid());
+ return SrcMgr->isBeforeInTranslationUnit(*this, Loc);
+}
+
+LLVM_DUMP_METHOD void FullSourceLoc::dump() const {
+ SourceLocation::dump(*SrcMgr);
+}
+
+const char *FullSourceLoc::getCharacterData(bool *Invalid) const {
+ assert(isValid());
+ return SrcMgr->getCharacterData(*this, Invalid);
+}
+
+StringRef FullSourceLoc::getBufferData(bool *Invalid) const {
+ assert(isValid());
+ return SrcMgr->getBuffer(SrcMgr->getFileID(*this), Invalid)->getBuffer();
+}
+
+std::pair<FileID, unsigned> FullSourceLoc::getDecomposedLoc() const {
+ return SrcMgr->getDecomposedLoc(*this);
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp b/contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp
new file mode 100644
index 0000000..4c50161
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp
@@ -0,0 +1,2234 @@
+//===--- SourceManager.cpp - Track and cache source files -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the SourceManager interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManagerInternals.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/Capacity.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cstring>
+#include <string>
+
+using namespace clang;
+using namespace SrcMgr;
+using llvm::MemoryBuffer;
+
+//===----------------------------------------------------------------------===//
+// SourceManager Helper Classes
+//===----------------------------------------------------------------------===//
+
+ContentCache::~ContentCache() {
+ if (shouldFreeBuffer())
+ delete Buffer.getPointer();
+}
+
+/// getSizeBytesMapped - Returns the number of bytes actually mapped for this
+/// ContentCache. This can be 0 if the MemBuffer was not actually expanded.
+unsigned ContentCache::getSizeBytesMapped() const {
+ return Buffer.getPointer() ? Buffer.getPointer()->getBufferSize() : 0;
+}
+
+/// Returns the kind of memory used to back the memory buffer for
+/// this content cache. This is used for performance analysis.
+llvm::MemoryBuffer::BufferKind ContentCache::getMemoryBufferKind() const {
+ assert(Buffer.getPointer());
+
+ // Should be unreachable, but keep for sanity.
+ if (!Buffer.getPointer())
+ return llvm::MemoryBuffer::MemoryBuffer_Malloc;
+
+ llvm::MemoryBuffer *buf = Buffer.getPointer();
+ return buf->getBufferKind();
+}
+
+/// getSize - Returns the size of the content encapsulated by this ContentCache.
+/// This can be the size of the source file or the size of an arbitrary
+/// scratch buffer. If the ContentCache encapsulates a source file, that
+/// file is not lazily brought in from disk to satisfy this query.
+unsigned ContentCache::getSize() const {
+ return Buffer.getPointer() ? (unsigned) Buffer.getPointer()->getBufferSize()
+ : (unsigned) ContentsEntry->getSize();
+}
+
+void ContentCache::replaceBuffer(llvm::MemoryBuffer *B, bool DoNotFree) {
+ if (B && B == Buffer.getPointer()) {
+ assert(0 && "Replacing with the same buffer");
+ Buffer.setInt(DoNotFree? DoNotFreeFlag : 0);
+ return;
+ }
+
+ if (shouldFreeBuffer())
+ delete Buffer.getPointer();
+ Buffer.setPointer(B);
+ Buffer.setInt(DoNotFree? DoNotFreeFlag : 0);
+}
+
+llvm::MemoryBuffer *ContentCache::getBuffer(DiagnosticsEngine &Diag,
+ const SourceManager &SM,
+ SourceLocation Loc,
+ bool *Invalid) const {
+ // Lazily create the Buffer for ContentCaches that wrap files. If we already
+ // computed it, just return what we have.
+ if (Buffer.getPointer() || !ContentsEntry) {
+ if (Invalid)
+ *Invalid = isBufferInvalid();
+
+ return Buffer.getPointer();
+ }
+
+ bool isVolatile = SM.userFilesAreVolatile() && !IsSystemFile;
+ auto BufferOrError =
+ SM.getFileManager().getBufferForFile(ContentsEntry, isVolatile);
+
+ // If we were unable to open the file, then we are in an inconsistent
+ // situation where the content cache referenced a file which no longer
+ // exists. Most likely, we were using a stat cache with an invalid entry but
+ // the file could also have been removed during processing. Since we can't
+ // really deal with this situation, just create an empty buffer.
+ //
+ // FIXME: This is definitely not ideal, but our immediate clients can't
+ // currently handle returning a null entry here. Ideally we should detect
+ // that we are in an inconsistent situation and error out as quickly as
+ // possible.
+ if (!BufferOrError) {
+ StringRef FillStr("<<<MISSING SOURCE FILE>>>\n");
+ Buffer.setPointer(MemoryBuffer::getNewUninitMemBuffer(
+ ContentsEntry->getSize(), "<invalid>").release());
+ char *Ptr = const_cast<char*>(Buffer.getPointer()->getBufferStart());
+ for (unsigned i = 0, e = ContentsEntry->getSize(); i != e; ++i)
+ Ptr[i] = FillStr[i % FillStr.size()];
+
+ if (Diag.isDiagnosticInFlight())
+ Diag.SetDelayedDiagnostic(diag::err_cannot_open_file,
+ ContentsEntry->getName(),
+ BufferOrError.getError().message());
+ else
+ Diag.Report(Loc, diag::err_cannot_open_file)
+ << ContentsEntry->getName() << BufferOrError.getError().message();
+
+ Buffer.setInt(Buffer.getInt() | InvalidFlag);
+
+ if (Invalid) *Invalid = true;
+ return Buffer.getPointer();
+ }
+
+ Buffer.setPointer(BufferOrError->release());
+
+ // Check that the file's size is the same as in the file entry (which may
+ // have come from a stat cache).
+ if (getRawBuffer()->getBufferSize() != (size_t)ContentsEntry->getSize()) {
+ if (Diag.isDiagnosticInFlight())
+ Diag.SetDelayedDiagnostic(diag::err_file_modified,
+ ContentsEntry->getName());
+ else
+ Diag.Report(Loc, diag::err_file_modified)
+ << ContentsEntry->getName();
+
+ Buffer.setInt(Buffer.getInt() | InvalidFlag);
+ if (Invalid) *Invalid = true;
+ return Buffer.getPointer();
+ }
+
+ // If the buffer is valid, check to see if it has a UTF Byte Order Mark
+ // (BOM). We only support UTF-8 with and without a BOM right now. See
+ // http://en.wikipedia.org/wiki/Byte_order_mark for more information.
+ StringRef BufStr = Buffer.getPointer()->getBuffer();
+ const char *InvalidBOM = llvm::StringSwitch<const char *>(BufStr)
+ .StartsWith("\xFE\xFF", "UTF-16 (BE)")
+ .StartsWith("\xFF\xFE", "UTF-16 (LE)")
+ .StartsWith("\x00\x00\xFE\xFF", "UTF-32 (BE)")
+ .StartsWith("\xFF\xFE\x00\x00", "UTF-32 (LE)")
+ .StartsWith("\x2B\x2F\x76", "UTF-7")
+ .StartsWith("\xF7\x64\x4C", "UTF-1")
+ .StartsWith("\xDD\x73\x66\x73", "UTF-EBCDIC")
+ .StartsWith("\x0E\xFE\xFF", "SDSU")
+ .StartsWith("\xFB\xEE\x28", "BOCU-1")
+ .StartsWith("\x84\x31\x95\x33", "GB-18030")
+ .Default(nullptr);
+
+ if (InvalidBOM) {
+ Diag.Report(Loc, diag::err_unsupported_bom)
+ << InvalidBOM << ContentsEntry->getName();
+ Buffer.setInt(Buffer.getInt() | InvalidFlag);
+ }
+
+ if (Invalid)
+ *Invalid = isBufferInvalid();
+
+ return Buffer.getPointer();
+}
+
+unsigned LineTableInfo::getLineTableFilenameID(StringRef Name) {
+ auto IterBool =
+ FilenameIDs.insert(std::make_pair(Name, FilenamesByID.size()));
+ if (IterBool.second)
+ FilenamesByID.push_back(&*IterBool.first);
+ return IterBool.first->second;
+}
+
+/// AddLineNote - Add a line note to the line table that indicates that there
+/// is a \#line at the specified FID/Offset location which changes the presumed
+/// location to LineNo/FilenameID.
+void LineTableInfo::AddLineNote(FileID FID, unsigned Offset,
+ unsigned LineNo, int FilenameID) {
+ std::vector<LineEntry> &Entries = LineEntries[FID];
+
+ assert((Entries.empty() || Entries.back().FileOffset < Offset) &&
+ "Adding line entries out of order!");
+
+ SrcMgr::CharacteristicKind Kind = SrcMgr::C_User;
+ unsigned IncludeOffset = 0;
+
+ if (!Entries.empty()) {
+ // If this is a '#line 4' after '#line 42 "foo.h"', make sure to remember
+ // that we are still in "foo.h".
+ if (FilenameID == -1)
+ FilenameID = Entries.back().FilenameID;
+
+ // If we are after a line marker that switched us to system header mode, or
+ // that set #include information, preserve it.
+ Kind = Entries.back().FileKind;
+ IncludeOffset = Entries.back().IncludeOffset;
+ }
+
+ Entries.push_back(LineEntry::get(Offset, LineNo, FilenameID, Kind,
+ IncludeOffset));
+}
+
+/// AddLineNote This is the same as the previous version of AddLineNote, but is
+/// used for GNU line markers. If EntryExit is 0, then this doesn't change the
+/// presumed \#include stack. If it is 1, this is a file entry, if it is 2 then
+/// this is a file exit. FileKind specifies whether this is a system header or
+/// extern C system header.
+void LineTableInfo::AddLineNote(FileID FID, unsigned Offset,
+ unsigned LineNo, int FilenameID,
+ unsigned EntryExit,
+ SrcMgr::CharacteristicKind FileKind) {
+ assert(FilenameID != -1 && "Unspecified filename should use other accessor");
+
+ std::vector<LineEntry> &Entries = LineEntries[FID];
+
+ assert((Entries.empty() || Entries.back().FileOffset < Offset) &&
+ "Adding line entries out of order!");
+
+ unsigned IncludeOffset = 0;
+ if (EntryExit == 0) { // No #include stack change.
+ IncludeOffset = Entries.empty() ? 0 : Entries.back().IncludeOffset;
+ } else if (EntryExit == 1) {
+ IncludeOffset = Offset-1;
+ } else if (EntryExit == 2) {
+ assert(!Entries.empty() && Entries.back().IncludeOffset &&
+ "PPDirectives should have caught case when popping empty include stack");
+
+ // Get the include loc of the last entries' include loc as our include loc.
+ IncludeOffset = 0;
+ if (const LineEntry *PrevEntry =
+ FindNearestLineEntry(FID, Entries.back().IncludeOffset))
+ IncludeOffset = PrevEntry->IncludeOffset;
+ }
+
+ Entries.push_back(LineEntry::get(Offset, LineNo, FilenameID, FileKind,
+ IncludeOffset));
+}
+
+
+/// FindNearestLineEntry - Find the line entry nearest to FID that is before
+/// it. If there is no line entry before Offset in FID, return null.
+const LineEntry *LineTableInfo::FindNearestLineEntry(FileID FID,
+ unsigned Offset) {
+ const std::vector<LineEntry> &Entries = LineEntries[FID];
+ assert(!Entries.empty() && "No #line entries for this FID after all!");
+
+ // It is very common for the query to be after the last #line, check this
+ // first.
+ if (Entries.back().FileOffset <= Offset)
+ return &Entries.back();
+
+ // Do a binary search to find the maximal element that is still before Offset.
+ std::vector<LineEntry>::const_iterator I =
+ std::upper_bound(Entries.begin(), Entries.end(), Offset);
+ if (I == Entries.begin()) return nullptr;
+ return &*--I;
+}
+
+/// \brief Add a new line entry that has already been encoded into
+/// the internal representation of the line table.
+void LineTableInfo::AddEntry(FileID FID,
+ const std::vector<LineEntry> &Entries) {
+ LineEntries[FID] = Entries;
+}
+
+/// getLineTableFilenameID - Return the uniqued ID for the specified filename.
+///
+unsigned SourceManager::getLineTableFilenameID(StringRef Name) {
+ return getLineTable().getLineTableFilenameID(Name);
+}
+
+
+/// AddLineNote - Add a line note to the line table for the FileID and offset
+/// specified by Loc. If FilenameID is -1, it is considered to be
+/// unspecified.
+void SourceManager::AddLineNote(SourceLocation Loc, unsigned LineNo,
+ int FilenameID) {
+ std::pair<FileID, unsigned> LocInfo = getDecomposedExpansionLoc(Loc);
+
+ bool Invalid = false;
+ const SLocEntry &Entry = getSLocEntry(LocInfo.first, &Invalid);
+ if (!Entry.isFile() || Invalid)
+ return;
+
+ const SrcMgr::FileInfo &FileInfo = Entry.getFile();
+
+ // Remember that this file has #line directives now if it doesn't already.
+ const_cast<SrcMgr::FileInfo&>(FileInfo).setHasLineDirectives();
+
+ getLineTable().AddLineNote(LocInfo.first, LocInfo.second, LineNo, FilenameID);
+}
+
+/// AddLineNote - Add a GNU line marker to the line table.
+void SourceManager::AddLineNote(SourceLocation Loc, unsigned LineNo,
+ int FilenameID, bool IsFileEntry,
+ bool IsFileExit, bool IsSystemHeader,
+ bool IsExternCHeader) {
+ // If there is no filename and no flags, this is treated just like a #line,
+ // which does not change the flags of the previous line marker.
+ if (FilenameID == -1) {
+ assert(!IsFileEntry && !IsFileExit && !IsSystemHeader && !IsExternCHeader &&
+ "Can't set flags without setting the filename!");
+ return AddLineNote(Loc, LineNo, FilenameID);
+ }
+
+ std::pair<FileID, unsigned> LocInfo = getDecomposedExpansionLoc(Loc);
+
+ bool Invalid = false;
+ const SLocEntry &Entry = getSLocEntry(LocInfo.first, &Invalid);
+ if (!Entry.isFile() || Invalid)
+ return;
+
+ const SrcMgr::FileInfo &FileInfo = Entry.getFile();
+
+ // Remember that this file has #line directives now if it doesn't already.
+ const_cast<SrcMgr::FileInfo&>(FileInfo).setHasLineDirectives();
+
+ (void) getLineTable();
+
+ SrcMgr::CharacteristicKind FileKind;
+ if (IsExternCHeader)
+ FileKind = SrcMgr::C_ExternCSystem;
+ else if (IsSystemHeader)
+ FileKind = SrcMgr::C_System;
+ else
+ FileKind = SrcMgr::C_User;
+
+ unsigned EntryExit = 0;
+ if (IsFileEntry)
+ EntryExit = 1;
+ else if (IsFileExit)
+ EntryExit = 2;
+
+ LineTable->AddLineNote(LocInfo.first, LocInfo.second, LineNo, FilenameID,
+ EntryExit, FileKind);
+}
+
+LineTableInfo &SourceManager::getLineTable() {
+ if (!LineTable)
+ LineTable = new LineTableInfo();
+ return *LineTable;
+}
+
+//===----------------------------------------------------------------------===//
+// Private 'Create' methods.
+//===----------------------------------------------------------------------===//
+
+SourceManager::SourceManager(DiagnosticsEngine &Diag, FileManager &FileMgr,
+ bool UserFilesAreVolatile)
+ : Diag(Diag), FileMgr(FileMgr), OverridenFilesKeepOriginalName(true),
+ UserFilesAreVolatile(UserFilesAreVolatile), FilesAreTransient(false),
+ ExternalSLocEntries(nullptr), LineTable(nullptr), NumLinearScans(0),
+ NumBinaryProbes(0) {
+ clearIDTables();
+ Diag.setSourceManager(this);
+}
+
+SourceManager::~SourceManager() {
+ delete LineTable;
+
+ // Delete FileEntry objects corresponding to content caches. Since the actual
+ // content cache objects are bump pointer allocated, we just have to run the
+ // dtors, but we call the deallocate method for completeness.
+ for (unsigned i = 0, e = MemBufferInfos.size(); i != e; ++i) {
+ if (MemBufferInfos[i]) {
+ MemBufferInfos[i]->~ContentCache();
+ ContentCacheAlloc.Deallocate(MemBufferInfos[i]);
+ }
+ }
+ for (llvm::DenseMap<const FileEntry*, SrcMgr::ContentCache*>::iterator
+ I = FileInfos.begin(), E = FileInfos.end(); I != E; ++I) {
+ if (I->second) {
+ I->second->~ContentCache();
+ ContentCacheAlloc.Deallocate(I->second);
+ }
+ }
+
+ llvm::DeleteContainerSeconds(MacroArgsCacheMap);
+}
+
+void SourceManager::clearIDTables() {
+ MainFileID = FileID();
+ LocalSLocEntryTable.clear();
+ LoadedSLocEntryTable.clear();
+ SLocEntryLoaded.clear();
+ LastLineNoFileIDQuery = FileID();
+ LastLineNoContentCache = nullptr;
+ LastFileIDLookup = FileID();
+
+ if (LineTable)
+ LineTable->clear();
+
+ // Use up FileID #0 as an invalid expansion.
+ NextLocalOffset = 0;
+ CurrentLoadedOffset = MaxLoadedOffset;
+ createExpansionLoc(SourceLocation(),SourceLocation(),SourceLocation(), 1);
+}
+
+/// getOrCreateContentCache - Create or return a cached ContentCache for the
+/// specified file.
+const ContentCache *
+SourceManager::getOrCreateContentCache(const FileEntry *FileEnt,
+ bool isSystemFile) {
+ assert(FileEnt && "Didn't specify a file entry to use?");
+
+ // Do we already have information about this file?
+ ContentCache *&Entry = FileInfos[FileEnt];
+ if (Entry) return Entry;
+
+ // Nope, create a new Cache entry.
+ Entry = ContentCacheAlloc.Allocate<ContentCache>();
+
+ if (OverriddenFilesInfo) {
+ // If the file contents are overridden with contents from another file,
+ // pass that file to ContentCache.
+ llvm::DenseMap<const FileEntry *, const FileEntry *>::iterator
+ overI = OverriddenFilesInfo->OverriddenFiles.find(FileEnt);
+ if (overI == OverriddenFilesInfo->OverriddenFiles.end())
+ new (Entry) ContentCache(FileEnt);
+ else
+ new (Entry) ContentCache(OverridenFilesKeepOriginalName ? FileEnt
+ : overI->second,
+ overI->second);
+ } else {
+ new (Entry) ContentCache(FileEnt);
+ }
+
+ Entry->IsSystemFile = isSystemFile;
+ Entry->IsTransient = FilesAreTransient;
+
+ return Entry;
+}
+
+
+/// createMemBufferContentCache - Create a new ContentCache for the specified
+/// memory buffer. This does no caching.
+const ContentCache *SourceManager::createMemBufferContentCache(
+ std::unique_ptr<llvm::MemoryBuffer> Buffer) {
+ // Add a new ContentCache to the MemBufferInfos list and return it.
+ ContentCache *Entry = ContentCacheAlloc.Allocate<ContentCache>();
+ new (Entry) ContentCache();
+ MemBufferInfos.push_back(Entry);
+ Entry->setBuffer(std::move(Buffer));
+ return Entry;
+}
+
+const SrcMgr::SLocEntry &SourceManager::loadSLocEntry(unsigned Index,
+ bool *Invalid) const {
+ assert(!SLocEntryLoaded[Index]);
+ if (ExternalSLocEntries->ReadSLocEntry(-(static_cast<int>(Index) + 2))) {
+ if (Invalid)
+ *Invalid = true;
+ // If the file of the SLocEntry changed we could still have loaded it.
+ if (!SLocEntryLoaded[Index]) {
+ // Try to recover; create a SLocEntry so the rest of clang can handle it.
+ LoadedSLocEntryTable[Index] = SLocEntry::get(0,
+ FileInfo::get(SourceLocation(),
+ getFakeContentCacheForRecovery(),
+ SrcMgr::C_User));
+ }
+ }
+
+ return LoadedSLocEntryTable[Index];
+}
+
+std::pair<int, unsigned>
+SourceManager::AllocateLoadedSLocEntries(unsigned NumSLocEntries,
+ unsigned TotalSize) {
+ assert(ExternalSLocEntries && "Don't have an external sloc source");
+ // Make sure we're not about to run out of source locations.
+ if (CurrentLoadedOffset - TotalSize < NextLocalOffset)
+ return std::make_pair(0, 0);
+ LoadedSLocEntryTable.resize(LoadedSLocEntryTable.size() + NumSLocEntries);
+ SLocEntryLoaded.resize(LoadedSLocEntryTable.size());
+ CurrentLoadedOffset -= TotalSize;
+ int ID = LoadedSLocEntryTable.size();
+ return std::make_pair(-ID - 1, CurrentLoadedOffset);
+}
+
+/// \brief As part of recovering from missing or changed content, produce a
+/// fake, non-empty buffer.
+llvm::MemoryBuffer *SourceManager::getFakeBufferForRecovery() const {
+ if (!FakeBufferForRecovery)
+ FakeBufferForRecovery =
+ llvm::MemoryBuffer::getMemBuffer("<<<INVALID BUFFER>>");
+
+ return FakeBufferForRecovery.get();
+}
+
+/// \brief As part of recovering from missing or changed content, produce a
+/// fake content cache.
+const SrcMgr::ContentCache *
+SourceManager::getFakeContentCacheForRecovery() const {
+ if (!FakeContentCacheForRecovery) {
+ FakeContentCacheForRecovery = llvm::make_unique<SrcMgr::ContentCache>();
+ FakeContentCacheForRecovery->replaceBuffer(getFakeBufferForRecovery(),
+ /*DoNotFree=*/true);
+ }
+ return FakeContentCacheForRecovery.get();
+}
+
+/// \brief Returns the previous in-order FileID or an invalid FileID if there
+/// is no previous one.
+FileID SourceManager::getPreviousFileID(FileID FID) const {
+ if (FID.isInvalid())
+ return FileID();
+
+ int ID = FID.ID;
+ if (ID == -1)
+ return FileID();
+
+ if (ID > 0) {
+ if (ID-1 == 0)
+ return FileID();
+ } else if (unsigned(-(ID-1) - 2) >= LoadedSLocEntryTable.size()) {
+ return FileID();
+ }
+
+ return FileID::get(ID-1);
+}
+
+/// \brief Returns the next in-order FileID or an invalid FileID if there is
+/// no next one.
+FileID SourceManager::getNextFileID(FileID FID) const {
+ if (FID.isInvalid())
+ return FileID();
+
+ int ID = FID.ID;
+ if (ID > 0) {
+ if (unsigned(ID+1) >= local_sloc_entry_size())
+ return FileID();
+ } else if (ID+1 >= -1) {
+ return FileID();
+ }
+
+ return FileID::get(ID+1);
+}
+
+//===----------------------------------------------------------------------===//
+// Methods to create new FileID's and macro expansions.
+//===----------------------------------------------------------------------===//
+
+/// createFileID - Create a new FileID for the specified ContentCache and
+/// include position. This works regardless of whether the ContentCache
+/// corresponds to a file or some other input source.
+FileID SourceManager::createFileID(const ContentCache *File,
+ SourceLocation IncludePos,
+ SrcMgr::CharacteristicKind FileCharacter,
+ int LoadedID, unsigned LoadedOffset) {
+ if (LoadedID < 0) {
+ assert(LoadedID != -1 && "Loading sentinel FileID");
+ unsigned Index = unsigned(-LoadedID) - 2;
+ assert(Index < LoadedSLocEntryTable.size() && "FileID out of range");
+ assert(!SLocEntryLoaded[Index] && "FileID already loaded");
+ LoadedSLocEntryTable[Index] = SLocEntry::get(LoadedOffset,
+ FileInfo::get(IncludePos, File, FileCharacter));
+ SLocEntryLoaded[Index] = true;
+ return FileID::get(LoadedID);
+ }
+ LocalSLocEntryTable.push_back(SLocEntry::get(NextLocalOffset,
+ FileInfo::get(IncludePos, File,
+ FileCharacter)));
+ unsigned FileSize = File->getSize();
+ assert(NextLocalOffset + FileSize + 1 > NextLocalOffset &&
+ NextLocalOffset + FileSize + 1 <= CurrentLoadedOffset &&
+ "Ran out of source locations!");
+ // We do a +1 here because we want a SourceLocation that means "the end of the
+ // file", e.g. for the "no newline at the end of the file" diagnostic.
+ NextLocalOffset += FileSize + 1;
+
+ // Set LastFileIDLookup to the newly created file. The next getFileID call is
+ // almost guaranteed to be from that file.
+ FileID FID = FileID::get(LocalSLocEntryTable.size()-1);
+ return LastFileIDLookup = FID;
+}
+
+SourceLocation
+SourceManager::createMacroArgExpansionLoc(SourceLocation SpellingLoc,
+ SourceLocation ExpansionLoc,
+ unsigned TokLength) {
+ ExpansionInfo Info = ExpansionInfo::createForMacroArg(SpellingLoc,
+ ExpansionLoc);
+ return createExpansionLocImpl(Info, TokLength);
+}
+
+SourceLocation
+SourceManager::createExpansionLoc(SourceLocation SpellingLoc,
+ SourceLocation ExpansionLocStart,
+ SourceLocation ExpansionLocEnd,
+ unsigned TokLength,
+ int LoadedID,
+ unsigned LoadedOffset) {
+ ExpansionInfo Info = ExpansionInfo::create(SpellingLoc, ExpansionLocStart,
+ ExpansionLocEnd);
+ return createExpansionLocImpl(Info, TokLength, LoadedID, LoadedOffset);
+}
+
+SourceLocation
+SourceManager::createExpansionLocImpl(const ExpansionInfo &Info,
+ unsigned TokLength,
+ int LoadedID,
+ unsigned LoadedOffset) {
+ if (LoadedID < 0) {
+ assert(LoadedID != -1 && "Loading sentinel FileID");
+ unsigned Index = unsigned(-LoadedID) - 2;
+ assert(Index < LoadedSLocEntryTable.size() && "FileID out of range");
+ assert(!SLocEntryLoaded[Index] && "FileID already loaded");
+ LoadedSLocEntryTable[Index] = SLocEntry::get(LoadedOffset, Info);
+ SLocEntryLoaded[Index] = true;
+ return SourceLocation::getMacroLoc(LoadedOffset);
+ }
+ LocalSLocEntryTable.push_back(SLocEntry::get(NextLocalOffset, Info));
+ assert(NextLocalOffset + TokLength + 1 > NextLocalOffset &&
+ NextLocalOffset + TokLength + 1 <= CurrentLoadedOffset &&
+ "Ran out of source locations!");
+ // See createFileID for that +1.
+ NextLocalOffset += TokLength + 1;
+ return SourceLocation::getMacroLoc(NextLocalOffset - (TokLength + 1));
+}
+
+llvm::MemoryBuffer *SourceManager::getMemoryBufferForFile(const FileEntry *File,
+ bool *Invalid) {
+ const SrcMgr::ContentCache *IR = getOrCreateContentCache(File);
+ assert(IR && "getOrCreateContentCache() cannot return NULL");
+ return IR->getBuffer(Diag, *this, SourceLocation(), Invalid);
+}
+
+void SourceManager::overrideFileContents(const FileEntry *SourceFile,
+ llvm::MemoryBuffer *Buffer,
+ bool DoNotFree) {
+ const SrcMgr::ContentCache *IR = getOrCreateContentCache(SourceFile);
+ assert(IR && "getOrCreateContentCache() cannot return NULL");
+
+ const_cast<SrcMgr::ContentCache *>(IR)->replaceBuffer(Buffer, DoNotFree);
+ const_cast<SrcMgr::ContentCache *>(IR)->BufferOverridden = true;
+
+ getOverriddenFilesInfo().OverriddenFilesWithBuffer.insert(SourceFile);
+}
+
+void SourceManager::overrideFileContents(const FileEntry *SourceFile,
+ const FileEntry *NewFile) {
+ assert(SourceFile->getSize() == NewFile->getSize() &&
+ "Different sizes, use the FileManager to create a virtual file with "
+ "the correct size");
+ assert(FileInfos.count(SourceFile) == 0 &&
+ "This function should be called at the initialization stage, before "
+ "any parsing occurs.");
+ getOverriddenFilesInfo().OverriddenFiles[SourceFile] = NewFile;
+}
+
+void SourceManager::disableFileContentsOverride(const FileEntry *File) {
+ if (!isFileOverridden(File))
+ return;
+
+ const SrcMgr::ContentCache *IR = getOrCreateContentCache(File);
+ const_cast<SrcMgr::ContentCache *>(IR)->replaceBuffer(nullptr);
+ const_cast<SrcMgr::ContentCache *>(IR)->ContentsEntry = IR->OrigEntry;
+
+ assert(OverriddenFilesInfo);
+ OverriddenFilesInfo->OverriddenFiles.erase(File);
+ OverriddenFilesInfo->OverriddenFilesWithBuffer.erase(File);
+}
+
+void SourceManager::setFileIsTransient(const FileEntry *File) {
+ const SrcMgr::ContentCache *CC = getOrCreateContentCache(File);
+ const_cast<SrcMgr::ContentCache *>(CC)->IsTransient = true;
+}
+
+StringRef SourceManager::getBufferData(FileID FID, bool *Invalid) const {
+ bool MyInvalid = false;
+ const SLocEntry &SLoc = getSLocEntry(FID, &MyInvalid);
+ if (!SLoc.isFile() || MyInvalid) {
+ if (Invalid)
+ *Invalid = true;
+ return "<<<<<INVALID SOURCE LOCATION>>>>>";
+ }
+
+ llvm::MemoryBuffer *Buf = SLoc.getFile().getContentCache()->getBuffer(
+ Diag, *this, SourceLocation(), &MyInvalid);
+ if (Invalid)
+ *Invalid = MyInvalid;
+
+ if (MyInvalid)
+ return "<<<<<INVALID SOURCE LOCATION>>>>>";
+
+ return Buf->getBuffer();
+}
+
+//===----------------------------------------------------------------------===//
+// SourceLocation manipulation methods.
+//===----------------------------------------------------------------------===//
+
+/// \brief Return the FileID for a SourceLocation.
+///
+/// This is the cache-miss path of getFileID. Not as hot as that function, but
+/// still very important. It is responsible for finding the entry in the
+/// SLocEntry tables that contains the specified location.
+FileID SourceManager::getFileIDSlow(unsigned SLocOffset) const {
+ if (!SLocOffset)
+ return FileID::get(0);
+
+ // Now it is time to search for the correct file. See where the SLocOffset
+ // sits in the global view and consult local or loaded buffers for it.
+ if (SLocOffset < NextLocalOffset)
+ return getFileIDLocal(SLocOffset);
+ return getFileIDLoaded(SLocOffset);
+}
+
+/// \brief Return the FileID for a SourceLocation with a low offset.
+///
+/// This function knows that the SourceLocation is in a local buffer, not a
+/// loaded one.
+FileID SourceManager::getFileIDLocal(unsigned SLocOffset) const {
+ assert(SLocOffset < NextLocalOffset && "Bad function choice");
+
+ // After the first and second level caches, I see two common sorts of
+ // behavior: 1) a lot of searched FileID's are "near" the cached file
+ // location or are "near" the cached expansion location. 2) others are just
+ // completely random and may be a very long way away.
+ //
+ // To handle this, we do a linear search for up to 8 steps to catch #1 quickly
+ // then we fall back to a less cache efficient, but more scalable, binary
+ // search to find the location.
+
+ // See if this is near the file point - worst case we start scanning from the
+ // most newly created FileID.
+ const SrcMgr::SLocEntry *I;
+
+ if (LastFileIDLookup.ID < 0 ||
+ LocalSLocEntryTable[LastFileIDLookup.ID].getOffset() < SLocOffset) {
+ // Neither loc prunes our search.
+ I = LocalSLocEntryTable.end();
+ } else {
+ // Perhaps it is near the file point.
+ I = LocalSLocEntryTable.begin()+LastFileIDLookup.ID;
+ }
+
+ // Find the FileID that contains this. "I" is an iterator that points to a
+ // FileID whose offset is known to be larger than SLocOffset.
+ unsigned NumProbes = 0;
+ while (1) {
+ --I;
+ if (I->getOffset() <= SLocOffset) {
+ FileID Res = FileID::get(int(I - LocalSLocEntryTable.begin()));
+
+ // If this isn't an expansion, remember it. We have good locality across
+ // FileID lookups.
+ if (!I->isExpansion())
+ LastFileIDLookup = Res;
+ NumLinearScans += NumProbes+1;
+ return Res;
+ }
+ if (++NumProbes == 8)
+ break;
+ }
+
+ // Convert "I" back into an index. We know that it is an entry whose index is
+ // larger than the offset we are looking for.
+ unsigned GreaterIndex = I - LocalSLocEntryTable.begin();
+ // LessIndex - This is the lower bound of the range that we're searching.
+ // We know that the offset corresponding to the FileID is is less than
+ // SLocOffset.
+ unsigned LessIndex = 0;
+ NumProbes = 0;
+ while (1) {
+ bool Invalid = false;
+ unsigned MiddleIndex = (GreaterIndex-LessIndex)/2+LessIndex;
+ unsigned MidOffset = getLocalSLocEntry(MiddleIndex, &Invalid).getOffset();
+ if (Invalid)
+ return FileID::get(0);
+
+ ++NumProbes;
+
+ // If the offset of the midpoint is too large, chop the high side of the
+ // range to the midpoint.
+ if (MidOffset > SLocOffset) {
+ GreaterIndex = MiddleIndex;
+ continue;
+ }
+
+ // If the middle index contains the value, succeed and return.
+ // FIXME: This could be made faster by using a function that's aware of
+ // being in the local area.
+ if (isOffsetInFileID(FileID::get(MiddleIndex), SLocOffset)) {
+ FileID Res = FileID::get(MiddleIndex);
+
+ // If this isn't a macro expansion, remember it. We have good locality
+ // across FileID lookups.
+ if (!LocalSLocEntryTable[MiddleIndex].isExpansion())
+ LastFileIDLookup = Res;
+ NumBinaryProbes += NumProbes;
+ return Res;
+ }
+
+ // Otherwise, move the low-side up to the middle index.
+ LessIndex = MiddleIndex;
+ }
+}
+
+/// \brief Return the FileID for a SourceLocation with a high offset.
+///
+/// This function knows that the SourceLocation is in a loaded buffer, not a
+/// local one.
+FileID SourceManager::getFileIDLoaded(unsigned SLocOffset) const {
+ // Sanity checking, otherwise a bug may lead to hanging in release build.
+ if (SLocOffset < CurrentLoadedOffset) {
+ assert(0 && "Invalid SLocOffset or bad function choice");
+ return FileID();
+ }
+
+ // Essentially the same as the local case, but the loaded array is sorted
+ // in the other direction.
+
+ // First do a linear scan from the last lookup position, if possible.
+ unsigned I;
+ int LastID = LastFileIDLookup.ID;
+ if (LastID >= 0 || getLoadedSLocEntryByID(LastID).getOffset() < SLocOffset)
+ I = 0;
+ else
+ I = (-LastID - 2) + 1;
+
+ unsigned NumProbes;
+ for (NumProbes = 0; NumProbes < 8; ++NumProbes, ++I) {
+ // Make sure the entry is loaded!
+ const SrcMgr::SLocEntry &E = getLoadedSLocEntry(I);
+ if (E.getOffset() <= SLocOffset) {
+ FileID Res = FileID::get(-int(I) - 2);
+
+ if (!E.isExpansion())
+ LastFileIDLookup = Res;
+ NumLinearScans += NumProbes + 1;
+ return Res;
+ }
+ }
+
+ // Linear scan failed. Do the binary search. Note the reverse sorting of the
+ // table: GreaterIndex is the one where the offset is greater, which is
+ // actually a lower index!
+ unsigned GreaterIndex = I;
+ unsigned LessIndex = LoadedSLocEntryTable.size();
+ NumProbes = 0;
+ while (1) {
+ ++NumProbes;
+ unsigned MiddleIndex = (LessIndex - GreaterIndex) / 2 + GreaterIndex;
+ const SrcMgr::SLocEntry &E = getLoadedSLocEntry(MiddleIndex);
+ if (E.getOffset() == 0)
+ return FileID(); // invalid entry.
+
+ ++NumProbes;
+
+ if (E.getOffset() > SLocOffset) {
+ // Sanity checking, otherwise a bug may lead to hanging in release build.
+ if (GreaterIndex == MiddleIndex) {
+ assert(0 && "binary search missed the entry");
+ return FileID();
+ }
+ GreaterIndex = MiddleIndex;
+ continue;
+ }
+
+ if (isOffsetInFileID(FileID::get(-int(MiddleIndex) - 2), SLocOffset)) {
+ FileID Res = FileID::get(-int(MiddleIndex) - 2);
+ if (!E.isExpansion())
+ LastFileIDLookup = Res;
+ NumBinaryProbes += NumProbes;
+ return Res;
+ }
+
+ // Sanity checking, otherwise a bug may lead to hanging in release build.
+ if (LessIndex == MiddleIndex) {
+ assert(0 && "binary search missed the entry");
+ return FileID();
+ }
+ LessIndex = MiddleIndex;
+ }
+}
+
+SourceLocation SourceManager::
+getExpansionLocSlowCase(SourceLocation Loc) const {
+ do {
+ // Note: If Loc indicates an offset into a token that came from a macro
+ // expansion (e.g. the 5th character of the token) we do not want to add
+ // this offset when going to the expansion location. The expansion
+ // location is the macro invocation, which the offset has nothing to do
+ // with. This is unlike when we get the spelling loc, because the offset
+ // directly correspond to the token whose spelling we're inspecting.
+ Loc = getSLocEntry(getFileID(Loc)).getExpansion().getExpansionLocStart();
+ } while (!Loc.isFileID());
+
+ return Loc;
+}
+
+SourceLocation SourceManager::getSpellingLocSlowCase(SourceLocation Loc) const {
+ do {
+ std::pair<FileID, unsigned> LocInfo = getDecomposedLoc(Loc);
+ Loc = getSLocEntry(LocInfo.first).getExpansion().getSpellingLoc();
+ Loc = Loc.getLocWithOffset(LocInfo.second);
+ } while (!Loc.isFileID());
+ return Loc;
+}
+
+SourceLocation SourceManager::getFileLocSlowCase(SourceLocation Loc) const {
+ do {
+ if (isMacroArgExpansion(Loc))
+ Loc = getImmediateSpellingLoc(Loc);
+ else
+ Loc = getImmediateExpansionRange(Loc).first;
+ } while (!Loc.isFileID());
+ return Loc;
+}
+
+
+std::pair<FileID, unsigned>
+SourceManager::getDecomposedExpansionLocSlowCase(
+ const SrcMgr::SLocEntry *E) const {
+ // If this is an expansion record, walk through all the expansion points.
+ FileID FID;
+ SourceLocation Loc;
+ unsigned Offset;
+ do {
+ Loc = E->getExpansion().getExpansionLocStart();
+
+ FID = getFileID(Loc);
+ E = &getSLocEntry(FID);
+ Offset = Loc.getOffset()-E->getOffset();
+ } while (!Loc.isFileID());
+
+ return std::make_pair(FID, Offset);
+}
+
+std::pair<FileID, unsigned>
+SourceManager::getDecomposedSpellingLocSlowCase(const SrcMgr::SLocEntry *E,
+ unsigned Offset) const {
+ // If this is an expansion record, walk through all the expansion points.
+ FileID FID;
+ SourceLocation Loc;
+ do {
+ Loc = E->getExpansion().getSpellingLoc();
+ Loc = Loc.getLocWithOffset(Offset);
+
+ FID = getFileID(Loc);
+ E = &getSLocEntry(FID);
+ Offset = Loc.getOffset()-E->getOffset();
+ } while (!Loc.isFileID());
+
+ return std::make_pair(FID, Offset);
+}
+
+/// getImmediateSpellingLoc - Given a SourceLocation object, return the
+/// spelling location referenced by the ID. This is the first level down
+/// towards the place where the characters that make up the lexed token can be
+/// found. This should not generally be used by clients.
+SourceLocation SourceManager::getImmediateSpellingLoc(SourceLocation Loc) const{
+ if (Loc.isFileID()) return Loc;
+ std::pair<FileID, unsigned> LocInfo = getDecomposedLoc(Loc);
+ Loc = getSLocEntry(LocInfo.first).getExpansion().getSpellingLoc();
+ return Loc.getLocWithOffset(LocInfo.second);
+}
+
+
+/// getImmediateExpansionRange - Loc is required to be an expansion location.
+/// Return the start/end of the expansion information.
+std::pair<SourceLocation,SourceLocation>
+SourceManager::getImmediateExpansionRange(SourceLocation Loc) const {
+ assert(Loc.isMacroID() && "Not a macro expansion loc!");
+ const ExpansionInfo &Expansion = getSLocEntry(getFileID(Loc)).getExpansion();
+ return Expansion.getExpansionLocRange();
+}
+
+/// getExpansionRange - Given a SourceLocation object, return the range of
+/// tokens covered by the expansion in the ultimate file.
+std::pair<SourceLocation,SourceLocation>
+SourceManager::getExpansionRange(SourceLocation Loc) const {
+ if (Loc.isFileID()) return std::make_pair(Loc, Loc);
+
+ std::pair<SourceLocation,SourceLocation> Res =
+ getImmediateExpansionRange(Loc);
+
+ // Fully resolve the start and end locations to their ultimate expansion
+ // points.
+ while (!Res.first.isFileID())
+ Res.first = getImmediateExpansionRange(Res.first).first;
+ while (!Res.second.isFileID())
+ Res.second = getImmediateExpansionRange(Res.second).second;
+ return Res;
+}
+
+bool SourceManager::isMacroArgExpansion(SourceLocation Loc,
+ SourceLocation *StartLoc) const {
+ if (!Loc.isMacroID()) return false;
+
+ FileID FID = getFileID(Loc);
+ const SrcMgr::ExpansionInfo &Expansion = getSLocEntry(FID).getExpansion();
+ if (!Expansion.isMacroArgExpansion()) return false;
+
+ if (StartLoc)
+ *StartLoc = Expansion.getExpansionLocStart();
+ return true;
+}
+
+bool SourceManager::isMacroBodyExpansion(SourceLocation Loc) const {
+ if (!Loc.isMacroID()) return false;
+
+ FileID FID = getFileID(Loc);
+ const SrcMgr::ExpansionInfo &Expansion = getSLocEntry(FID).getExpansion();
+ return Expansion.isMacroBodyExpansion();
+}
+
+bool SourceManager::isAtStartOfImmediateMacroExpansion(SourceLocation Loc,
+ SourceLocation *MacroBegin) const {
+ assert(Loc.isValid() && Loc.isMacroID() && "Expected a valid macro loc");
+
+ std::pair<FileID, unsigned> DecompLoc = getDecomposedLoc(Loc);
+ if (DecompLoc.second > 0)
+ return false; // Does not point at the start of expansion range.
+
+ bool Invalid = false;
+ const SrcMgr::ExpansionInfo &ExpInfo =
+ getSLocEntry(DecompLoc.first, &Invalid).getExpansion();
+ if (Invalid)
+ return false;
+ SourceLocation ExpLoc = ExpInfo.getExpansionLocStart();
+
+ if (ExpInfo.isMacroArgExpansion()) {
+ // For macro argument expansions, check if the previous FileID is part of
+ // the same argument expansion, in which case this Loc is not at the
+ // beginning of the expansion.
+ FileID PrevFID = getPreviousFileID(DecompLoc.first);
+ if (!PrevFID.isInvalid()) {
+ const SrcMgr::SLocEntry &PrevEntry = getSLocEntry(PrevFID, &Invalid);
+ if (Invalid)
+ return false;
+ if (PrevEntry.isExpansion() &&
+ PrevEntry.getExpansion().getExpansionLocStart() == ExpLoc)
+ return false;
+ }
+ }
+
+ if (MacroBegin)
+ *MacroBegin = ExpLoc;
+ return true;
+}
+
+bool SourceManager::isAtEndOfImmediateMacroExpansion(SourceLocation Loc,
+ SourceLocation *MacroEnd) const {
+ assert(Loc.isValid() && Loc.isMacroID() && "Expected a valid macro loc");
+
+ FileID FID = getFileID(Loc);
+ SourceLocation NextLoc = Loc.getLocWithOffset(1);
+ if (isInFileID(NextLoc, FID))
+ return false; // Does not point at the end of expansion range.
+
+ bool Invalid = false;
+ const SrcMgr::ExpansionInfo &ExpInfo =
+ getSLocEntry(FID, &Invalid).getExpansion();
+ if (Invalid)
+ return false;
+
+ if (ExpInfo.isMacroArgExpansion()) {
+ // For macro argument expansions, check if the next FileID is part of the
+ // same argument expansion, in which case this Loc is not at the end of the
+ // expansion.
+ FileID NextFID = getNextFileID(FID);
+ if (!NextFID.isInvalid()) {
+ const SrcMgr::SLocEntry &NextEntry = getSLocEntry(NextFID, &Invalid);
+ if (Invalid)
+ return false;
+ if (NextEntry.isExpansion() &&
+ NextEntry.getExpansion().getExpansionLocStart() ==
+ ExpInfo.getExpansionLocStart())
+ return false;
+ }
+ }
+
+ if (MacroEnd)
+ *MacroEnd = ExpInfo.getExpansionLocEnd();
+ return true;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Queries about the code at a SourceLocation.
+//===----------------------------------------------------------------------===//
+
+/// getCharacterData - Return a pointer to the start of the specified location
+/// in the appropriate MemoryBuffer.
+const char *SourceManager::getCharacterData(SourceLocation SL,
+ bool *Invalid) const {
+ // Note that this is a hot function in the getSpelling() path, which is
+ // heavily used by -E mode.
+ std::pair<FileID, unsigned> LocInfo = getDecomposedSpellingLoc(SL);
+
+ // Note that calling 'getBuffer()' may lazily page in a source file.
+ bool CharDataInvalid = false;
+ const SLocEntry &Entry = getSLocEntry(LocInfo.first, &CharDataInvalid);
+ if (CharDataInvalid || !Entry.isFile()) {
+ if (Invalid)
+ *Invalid = true;
+
+ return "<<<<INVALID BUFFER>>>>";
+ }
+ llvm::MemoryBuffer *Buffer = Entry.getFile().getContentCache()->getBuffer(
+ Diag, *this, SourceLocation(), &CharDataInvalid);
+ if (Invalid)
+ *Invalid = CharDataInvalid;
+ return Buffer->getBufferStart() + (CharDataInvalid? 0 : LocInfo.second);
+}
+
+
+/// getColumnNumber - Return the column # for the specified file position.
+/// this is significantly cheaper to compute than the line number.
+unsigned SourceManager::getColumnNumber(FileID FID, unsigned FilePos,
+ bool *Invalid) const {
+ bool MyInvalid = false;
+ llvm::MemoryBuffer *MemBuf = getBuffer(FID, &MyInvalid);
+ if (Invalid)
+ *Invalid = MyInvalid;
+
+ if (MyInvalid)
+ return 1;
+
+ // It is okay to request a position just past the end of the buffer.
+ if (FilePos > MemBuf->getBufferSize()) {
+ if (Invalid)
+ *Invalid = true;
+ return 1;
+ }
+
+ // See if we just calculated the line number for this FilePos and can use
+ // that to lookup the start of the line instead of searching for it.
+ if (LastLineNoFileIDQuery == FID &&
+ LastLineNoContentCache->SourceLineCache != nullptr &&
+ LastLineNoResult < LastLineNoContentCache->NumLines) {
+ unsigned *SourceLineCache = LastLineNoContentCache->SourceLineCache;
+ unsigned LineStart = SourceLineCache[LastLineNoResult - 1];
+ unsigned LineEnd = SourceLineCache[LastLineNoResult];
+ if (FilePos >= LineStart && FilePos < LineEnd)
+ return FilePos - LineStart + 1;
+ }
+
+ const char *Buf = MemBuf->getBufferStart();
+ unsigned LineStart = FilePos;
+ while (LineStart && Buf[LineStart-1] != '\n' && Buf[LineStart-1] != '\r')
+ --LineStart;
+ return FilePos-LineStart+1;
+}
+
+// isInvalid - Return the result of calling loc.isInvalid(), and
+// if Invalid is not null, set its value to same.
+static bool isInvalid(SourceLocation Loc, bool *Invalid) {
+ bool MyInvalid = Loc.isInvalid();
+ if (Invalid)
+ *Invalid = MyInvalid;
+ return MyInvalid;
+}
+
+unsigned SourceManager::getSpellingColumnNumber(SourceLocation Loc,
+ bool *Invalid) const {
+ if (isInvalid(Loc, Invalid)) return 0;
+ std::pair<FileID, unsigned> LocInfo = getDecomposedSpellingLoc(Loc);
+ return getColumnNumber(LocInfo.first, LocInfo.second, Invalid);
+}
+
+unsigned SourceManager::getExpansionColumnNumber(SourceLocation Loc,
+ bool *Invalid) const {
+ if (isInvalid(Loc, Invalid)) return 0;
+ std::pair<FileID, unsigned> LocInfo = getDecomposedExpansionLoc(Loc);
+ return getColumnNumber(LocInfo.first, LocInfo.second, Invalid);
+}
+
+unsigned SourceManager::getPresumedColumnNumber(SourceLocation Loc,
+ bool *Invalid) const {
+ if (isInvalid(Loc, Invalid)) return 0;
+ return getPresumedLoc(Loc).getColumn();
+}
+
+#ifdef __SSE2__
+#include <emmintrin.h>
+#endif
+
+static LLVM_ATTRIBUTE_NOINLINE void
+ComputeLineNumbers(DiagnosticsEngine &Diag, ContentCache *FI,
+ llvm::BumpPtrAllocator &Alloc,
+ const SourceManager &SM, bool &Invalid);
+static void ComputeLineNumbers(DiagnosticsEngine &Diag, ContentCache *FI,
+ llvm::BumpPtrAllocator &Alloc,
+ const SourceManager &SM, bool &Invalid) {
+ // Note that calling 'getBuffer()' may lazily page in the file.
+ MemoryBuffer *Buffer = FI->getBuffer(Diag, SM, SourceLocation(), &Invalid);
+ if (Invalid)
+ return;
+
+ // Find the file offsets of all of the *physical* source lines. This does
+ // not look at trigraphs, escaped newlines, or anything else tricky.
+ SmallVector<unsigned, 256> LineOffsets;
+
+ // Line #1 starts at char 0.
+ LineOffsets.push_back(0);
+
+ const unsigned char *Buf = (const unsigned char *)Buffer->getBufferStart();
+ const unsigned char *End = (const unsigned char *)Buffer->getBufferEnd();
+ unsigned Offs = 0;
+ while (1) {
+ // Skip over the contents of the line.
+ const unsigned char *NextBuf = (const unsigned char *)Buf;
+
+#ifdef __SSE2__
+ // Try to skip to the next newline using SSE instructions. This is very
+ // performance sensitive for programs with lots of diagnostics and in -E
+ // mode.
+ __m128i CRs = _mm_set1_epi8('\r');
+ __m128i LFs = _mm_set1_epi8('\n');
+
+ // First fix up the alignment to 16 bytes.
+ while (((uintptr_t)NextBuf & 0xF) != 0) {
+ if (*NextBuf == '\n' || *NextBuf == '\r' || *NextBuf == '\0')
+ goto FoundSpecialChar;
+ ++NextBuf;
+ }
+
+ // Scan 16 byte chunks for '\r' and '\n'. Ignore '\0'.
+ while (NextBuf+16 <= End) {
+ const __m128i Chunk = *(const __m128i*)NextBuf;
+ __m128i Cmp = _mm_or_si128(_mm_cmpeq_epi8(Chunk, CRs),
+ _mm_cmpeq_epi8(Chunk, LFs));
+ unsigned Mask = _mm_movemask_epi8(Cmp);
+
+ // If we found a newline, adjust the pointer and jump to the handling code.
+ if (Mask != 0) {
+ NextBuf += llvm::countTrailingZeros(Mask);
+ goto FoundSpecialChar;
+ }
+ NextBuf += 16;
+ }
+#endif
+
+ while (*NextBuf != '\n' && *NextBuf != '\r' && *NextBuf != '\0')
+ ++NextBuf;
+
+#ifdef __SSE2__
+FoundSpecialChar:
+#endif
+ Offs += NextBuf-Buf;
+ Buf = NextBuf;
+
+ if (Buf[0] == '\n' || Buf[0] == '\r') {
+ // If this is \n\r or \r\n, skip both characters.
+ if ((Buf[1] == '\n' || Buf[1] == '\r') && Buf[0] != Buf[1])
+ ++Offs, ++Buf;
+ ++Offs, ++Buf;
+ LineOffsets.push_back(Offs);
+ } else {
+ // Otherwise, this is a null. If end of file, exit.
+ if (Buf == End) break;
+ // Otherwise, skip the null.
+ ++Offs, ++Buf;
+ }
+ }
+
+ // Copy the offsets into the FileInfo structure.
+ FI->NumLines = LineOffsets.size();
+ FI->SourceLineCache = Alloc.Allocate<unsigned>(LineOffsets.size());
+ std::copy(LineOffsets.begin(), LineOffsets.end(), FI->SourceLineCache);
+}
+
+/// getLineNumber - Given a SourceLocation, return the spelling line number
+/// for the position indicated. This requires building and caching a table of
+/// line offsets for the MemoryBuffer, so this is not cheap: use only when
+/// about to emit a diagnostic.
+unsigned SourceManager::getLineNumber(FileID FID, unsigned FilePos,
+ bool *Invalid) const {
+ if (FID.isInvalid()) {
+ if (Invalid)
+ *Invalid = true;
+ return 1;
+ }
+
+ ContentCache *Content;
+ if (LastLineNoFileIDQuery == FID)
+ Content = LastLineNoContentCache;
+ else {
+ bool MyInvalid = false;
+ const SLocEntry &Entry = getSLocEntry(FID, &MyInvalid);
+ if (MyInvalid || !Entry.isFile()) {
+ if (Invalid)
+ *Invalid = true;
+ return 1;
+ }
+
+ Content = const_cast<ContentCache*>(Entry.getFile().getContentCache());
+ }
+
+ // If this is the first use of line information for this buffer, compute the
+ /// SourceLineCache for it on demand.
+ if (!Content->SourceLineCache) {
+ bool MyInvalid = false;
+ ComputeLineNumbers(Diag, Content, ContentCacheAlloc, *this, MyInvalid);
+ if (Invalid)
+ *Invalid = MyInvalid;
+ if (MyInvalid)
+ return 1;
+ } else if (Invalid)
+ *Invalid = false;
+
+ // Okay, we know we have a line number table. Do a binary search to find the
+ // line number that this character position lands on.
+ unsigned *SourceLineCache = Content->SourceLineCache;
+ unsigned *SourceLineCacheStart = SourceLineCache;
+ unsigned *SourceLineCacheEnd = SourceLineCache + Content->NumLines;
+
+ unsigned QueriedFilePos = FilePos+1;
+
+ // FIXME: I would like to be convinced that this code is worth being as
+ // complicated as it is, binary search isn't that slow.
+ //
+ // If it is worth being optimized, then in my opinion it could be more
+ // performant, simpler, and more obviously correct by just "galloping" outward
+ // from the queried file position. In fact, this could be incorporated into a
+ // generic algorithm such as lower_bound_with_hint.
+ //
+ // If someone gives me a test case where this matters, and I will do it! - DWD
+
+ // If the previous query was to the same file, we know both the file pos from
+ // that query and the line number returned. This allows us to narrow the
+ // search space from the entire file to something near the match.
+ if (LastLineNoFileIDQuery == FID) {
+ if (QueriedFilePos >= LastLineNoFilePos) {
+ // FIXME: Potential overflow?
+ SourceLineCache = SourceLineCache+LastLineNoResult-1;
+
+ // The query is likely to be nearby the previous one. Here we check to
+ // see if it is within 5, 10 or 20 lines. It can be far away in cases
+ // where big comment blocks and vertical whitespace eat up lines but
+ // contribute no tokens.
+ if (SourceLineCache+5 < SourceLineCacheEnd) {
+ if (SourceLineCache[5] > QueriedFilePos)
+ SourceLineCacheEnd = SourceLineCache+5;
+ else if (SourceLineCache+10 < SourceLineCacheEnd) {
+ if (SourceLineCache[10] > QueriedFilePos)
+ SourceLineCacheEnd = SourceLineCache+10;
+ else if (SourceLineCache+20 < SourceLineCacheEnd) {
+ if (SourceLineCache[20] > QueriedFilePos)
+ SourceLineCacheEnd = SourceLineCache+20;
+ }
+ }
+ }
+ } else {
+ if (LastLineNoResult < Content->NumLines)
+ SourceLineCacheEnd = SourceLineCache+LastLineNoResult+1;
+ }
+ }
+
+ unsigned *Pos
+ = std::lower_bound(SourceLineCache, SourceLineCacheEnd, QueriedFilePos);
+ unsigned LineNo = Pos-SourceLineCacheStart;
+
+ LastLineNoFileIDQuery = FID;
+ LastLineNoContentCache = Content;
+ LastLineNoFilePos = QueriedFilePos;
+ LastLineNoResult = LineNo;
+ return LineNo;
+}
+
+unsigned SourceManager::getSpellingLineNumber(SourceLocation Loc,
+ bool *Invalid) const {
+ if (isInvalid(Loc, Invalid)) return 0;
+ std::pair<FileID, unsigned> LocInfo = getDecomposedSpellingLoc(Loc);
+ return getLineNumber(LocInfo.first, LocInfo.second);
+}
+unsigned SourceManager::getExpansionLineNumber(SourceLocation Loc,
+ bool *Invalid) const {
+ if (isInvalid(Loc, Invalid)) return 0;
+ std::pair<FileID, unsigned> LocInfo = getDecomposedExpansionLoc(Loc);
+ return getLineNumber(LocInfo.first, LocInfo.second);
+}
+unsigned SourceManager::getPresumedLineNumber(SourceLocation Loc,
+ bool *Invalid) const {
+ if (isInvalid(Loc, Invalid)) return 0;
+ return getPresumedLoc(Loc).getLine();
+}
+
+/// getFileCharacteristic - return the file characteristic of the specified
+/// source location, indicating whether this is a normal file, a system
+/// header, or an "implicit extern C" system header.
+///
+/// This state can be modified with flags on GNU linemarker directives like:
+/// # 4 "foo.h" 3
+/// which changes all source locations in the current file after that to be
+/// considered to be from a system header.
+SrcMgr::CharacteristicKind
+SourceManager::getFileCharacteristic(SourceLocation Loc) const {
+ assert(Loc.isValid() && "Can't get file characteristic of invalid loc!");
+ std::pair<FileID, unsigned> LocInfo = getDecomposedExpansionLoc(Loc);
+ bool Invalid = false;
+ const SLocEntry &SEntry = getSLocEntry(LocInfo.first, &Invalid);
+ if (Invalid || !SEntry.isFile())
+ return C_User;
+
+ const SrcMgr::FileInfo &FI = SEntry.getFile();
+
+ // If there are no #line directives in this file, just return the whole-file
+ // state.
+ if (!FI.hasLineDirectives())
+ return FI.getFileCharacteristic();
+
+ assert(LineTable && "Can't have linetable entries without a LineTable!");
+ // See if there is a #line directive before the location.
+ const LineEntry *Entry =
+ LineTable->FindNearestLineEntry(LocInfo.first, LocInfo.second);
+
+ // If this is before the first line marker, use the file characteristic.
+ if (!Entry)
+ return FI.getFileCharacteristic();
+
+ return Entry->FileKind;
+}
+
+/// Return the filename or buffer identifier of the buffer the location is in.
+/// Note that this name does not respect \#line directives. Use getPresumedLoc
+/// for normal clients.
+const char *SourceManager::getBufferName(SourceLocation Loc,
+ bool *Invalid) const {
+ if (isInvalid(Loc, Invalid)) return "<invalid loc>";
+
+ return getBuffer(getFileID(Loc), Invalid)->getBufferIdentifier();
+}
+
+
+/// getPresumedLoc - This method returns the "presumed" location of a
+/// SourceLocation specifies. A "presumed location" can be modified by \#line
+/// or GNU line marker directives. This provides a view on the data that a
+/// user should see in diagnostics, for example.
+///
+/// Note that a presumed location is always given as the expansion point of an
+/// expansion location, not at the spelling location.
+PresumedLoc SourceManager::getPresumedLoc(SourceLocation Loc,
+ bool UseLineDirectives) const {
+ if (Loc.isInvalid()) return PresumedLoc();
+
+ // Presumed locations are always for expansion points.
+ std::pair<FileID, unsigned> LocInfo = getDecomposedExpansionLoc(Loc);
+
+ bool Invalid = false;
+ const SLocEntry &Entry = getSLocEntry(LocInfo.first, &Invalid);
+ if (Invalid || !Entry.isFile())
+ return PresumedLoc();
+
+ const SrcMgr::FileInfo &FI = Entry.getFile();
+ const SrcMgr::ContentCache *C = FI.getContentCache();
+
+ // To get the source name, first consult the FileEntry (if one exists)
+ // before the MemBuffer as this will avoid unnecessarily paging in the
+ // MemBuffer.
+ const char *Filename;
+ if (C->OrigEntry)
+ Filename = C->OrigEntry->getName();
+ else
+ Filename = C->getBuffer(Diag, *this)->getBufferIdentifier();
+
+ unsigned LineNo = getLineNumber(LocInfo.first, LocInfo.second, &Invalid);
+ if (Invalid)
+ return PresumedLoc();
+ unsigned ColNo = getColumnNumber(LocInfo.first, LocInfo.second, &Invalid);
+ if (Invalid)
+ return PresumedLoc();
+
+ SourceLocation IncludeLoc = FI.getIncludeLoc();
+
+ // If we have #line directives in this file, update and overwrite the physical
+ // location info if appropriate.
+ if (UseLineDirectives && FI.hasLineDirectives()) {
+ assert(LineTable && "Can't have linetable entries without a LineTable!");
+ // See if there is a #line directive before this. If so, get it.
+ if (const LineEntry *Entry =
+ LineTable->FindNearestLineEntry(LocInfo.first, LocInfo.second)) {
+ // If the LineEntry indicates a filename, use it.
+ if (Entry->FilenameID != -1)
+ Filename = LineTable->getFilename(Entry->FilenameID);
+
+ // Use the line number specified by the LineEntry. This line number may
+ // be multiple lines down from the line entry. Add the difference in
+ // physical line numbers from the query point and the line marker to the
+ // total.
+ unsigned MarkerLineNo = getLineNumber(LocInfo.first, Entry->FileOffset);
+ LineNo = Entry->LineNo + (LineNo-MarkerLineNo-1);
+
+ // Note that column numbers are not molested by line markers.
+
+ // Handle virtual #include manipulation.
+ if (Entry->IncludeOffset) {
+ IncludeLoc = getLocForStartOfFile(LocInfo.first);
+ IncludeLoc = IncludeLoc.getLocWithOffset(Entry->IncludeOffset);
+ }
+ }
+ }
+
+ return PresumedLoc(Filename, LineNo, ColNo, IncludeLoc);
+}
+
+/// \brief Returns whether the PresumedLoc for a given SourceLocation is
+/// in the main file.
+///
+/// This computes the "presumed" location for a SourceLocation, then checks
+/// whether it came from a file other than the main file. This is different
+/// from isWrittenInMainFile() because it takes line marker directives into
+/// account.
+bool SourceManager::isInMainFile(SourceLocation Loc) const {
+ if (Loc.isInvalid()) return false;
+
+ // Presumed locations are always for expansion points.
+ std::pair<FileID, unsigned> LocInfo = getDecomposedExpansionLoc(Loc);
+
+ bool Invalid = false;
+ const SLocEntry &Entry = getSLocEntry(LocInfo.first, &Invalid);
+ if (Invalid || !Entry.isFile())
+ return false;
+
+ const SrcMgr::FileInfo &FI = Entry.getFile();
+
+ // Check if there is a line directive for this location.
+ if (FI.hasLineDirectives())
+ if (const LineEntry *Entry =
+ LineTable->FindNearestLineEntry(LocInfo.first, LocInfo.second))
+ if (Entry->IncludeOffset)
+ return false;
+
+ return FI.getIncludeLoc().isInvalid();
+}
+
+/// \brief The size of the SLocEntry that \p FID represents.
+unsigned SourceManager::getFileIDSize(FileID FID) const {
+ bool Invalid = false;
+ const SrcMgr::SLocEntry &Entry = getSLocEntry(FID, &Invalid);
+ if (Invalid)
+ return 0;
+
+ int ID = FID.ID;
+ unsigned NextOffset;
+ if ((ID > 0 && unsigned(ID+1) == local_sloc_entry_size()))
+ NextOffset = getNextLocalOffset();
+ else if (ID+1 == -1)
+ NextOffset = MaxLoadedOffset;
+ else
+ NextOffset = getSLocEntry(FileID::get(ID+1)).getOffset();
+
+ return NextOffset - Entry.getOffset() - 1;
+}
+
+//===----------------------------------------------------------------------===//
+// Other miscellaneous methods.
+//===----------------------------------------------------------------------===//
+
+/// \brief Retrieve the inode for the given file entry, if possible.
+///
+/// This routine involves a system call, and therefore should only be used
+/// in non-performance-critical code.
+static Optional<llvm::sys::fs::UniqueID>
+getActualFileUID(const FileEntry *File) {
+ if (!File)
+ return None;
+
+ llvm::sys::fs::UniqueID ID;
+ if (llvm::sys::fs::getUniqueID(File->getName(), ID))
+ return None;
+
+ return ID;
+}
+
+/// \brief Get the source location for the given file:line:col triplet.
+///
+/// If the source file is included multiple times, the source location will
+/// be based upon an arbitrary inclusion.
+SourceLocation SourceManager::translateFileLineCol(const FileEntry *SourceFile,
+ unsigned Line,
+ unsigned Col) const {
+ assert(SourceFile && "Null source file!");
+ assert(Line && Col && "Line and column should start from 1!");
+
+ FileID FirstFID = translateFile(SourceFile);
+ return translateLineCol(FirstFID, Line, Col);
+}
+
+/// \brief Get the FileID for the given file.
+///
+/// If the source file is included multiple times, the FileID will be the
+/// first inclusion.
+FileID SourceManager::translateFile(const FileEntry *SourceFile) const {
+ assert(SourceFile && "Null source file!");
+
+ // Find the first file ID that corresponds to the given file.
+ FileID FirstFID;
+
+ // First, check the main file ID, since it is common to look for a
+ // location in the main file.
+ Optional<llvm::sys::fs::UniqueID> SourceFileUID;
+ Optional<StringRef> SourceFileName;
+ if (MainFileID.isValid()) {
+ bool Invalid = false;
+ const SLocEntry &MainSLoc = getSLocEntry(MainFileID, &Invalid);
+ if (Invalid)
+ return FileID();
+
+ if (MainSLoc.isFile()) {
+ const ContentCache *MainContentCache
+ = MainSLoc.getFile().getContentCache();
+ if (!MainContentCache) {
+ // Can't do anything
+ } else if (MainContentCache->OrigEntry == SourceFile) {
+ FirstFID = MainFileID;
+ } else {
+ // Fall back: check whether we have the same base name and inode
+ // as the main file.
+ const FileEntry *MainFile = MainContentCache->OrigEntry;
+ SourceFileName = llvm::sys::path::filename(SourceFile->getName());
+ if (*SourceFileName == llvm::sys::path::filename(MainFile->getName())) {
+ SourceFileUID = getActualFileUID(SourceFile);
+ if (SourceFileUID) {
+ if (Optional<llvm::sys::fs::UniqueID> MainFileUID =
+ getActualFileUID(MainFile)) {
+ if (*SourceFileUID == *MainFileUID) {
+ FirstFID = MainFileID;
+ SourceFile = MainFile;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if (FirstFID.isInvalid()) {
+ // The location we're looking for isn't in the main file; look
+ // through all of the local source locations.
+ for (unsigned I = 0, N = local_sloc_entry_size(); I != N; ++I) {
+ bool Invalid = false;
+ const SLocEntry &SLoc = getLocalSLocEntry(I, &Invalid);
+ if (Invalid)
+ return FileID();
+
+ if (SLoc.isFile() &&
+ SLoc.getFile().getContentCache() &&
+ SLoc.getFile().getContentCache()->OrigEntry == SourceFile) {
+ FirstFID = FileID::get(I);
+ break;
+ }
+ }
+ // If that still didn't help, try the modules.
+ if (FirstFID.isInvalid()) {
+ for (unsigned I = 0, N = loaded_sloc_entry_size(); I != N; ++I) {
+ const SLocEntry &SLoc = getLoadedSLocEntry(I);
+ if (SLoc.isFile() &&
+ SLoc.getFile().getContentCache() &&
+ SLoc.getFile().getContentCache()->OrigEntry == SourceFile) {
+ FirstFID = FileID::get(-int(I) - 2);
+ break;
+ }
+ }
+ }
+ }
+
+ // If we haven't found what we want yet, try again, but this time stat()
+ // each of the files in case the files have changed since we originally
+ // parsed the file.
+ if (FirstFID.isInvalid() &&
+ (SourceFileName ||
+ (SourceFileName = llvm::sys::path::filename(SourceFile->getName()))) &&
+ (SourceFileUID || (SourceFileUID = getActualFileUID(SourceFile)))) {
+ bool Invalid = false;
+ for (unsigned I = 0, N = local_sloc_entry_size(); I != N; ++I) {
+ FileID IFileID;
+ IFileID.ID = I;
+ const SLocEntry &SLoc = getSLocEntry(IFileID, &Invalid);
+ if (Invalid)
+ return FileID();
+
+ if (SLoc.isFile()) {
+ const ContentCache *FileContentCache
+ = SLoc.getFile().getContentCache();
+ const FileEntry *Entry = FileContentCache ? FileContentCache->OrigEntry
+ : nullptr;
+ if (Entry &&
+ *SourceFileName == llvm::sys::path::filename(Entry->getName())) {
+ if (Optional<llvm::sys::fs::UniqueID> EntryUID =
+ getActualFileUID(Entry)) {
+ if (*SourceFileUID == *EntryUID) {
+ FirstFID = FileID::get(I);
+ SourceFile = Entry;
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ (void) SourceFile;
+ return FirstFID;
+}
+
+/// \brief Get the source location in \arg FID for the given line:col.
+/// Returns null location if \arg FID is not a file SLocEntry.
+SourceLocation SourceManager::translateLineCol(FileID FID,
+ unsigned Line,
+ unsigned Col) const {
+ // Lines are used as a one-based index into a zero-based array. This assert
+ // checks for possible buffer underruns.
+ assert(Line && Col && "Line and column should start from 1!");
+
+ if (FID.isInvalid())
+ return SourceLocation();
+
+ bool Invalid = false;
+ const SLocEntry &Entry = getSLocEntry(FID, &Invalid);
+ if (Invalid)
+ return SourceLocation();
+
+ if (!Entry.isFile())
+ return SourceLocation();
+
+ SourceLocation FileLoc = SourceLocation::getFileLoc(Entry.getOffset());
+
+ if (Line == 1 && Col == 1)
+ return FileLoc;
+
+ ContentCache *Content
+ = const_cast<ContentCache *>(Entry.getFile().getContentCache());
+ if (!Content)
+ return SourceLocation();
+
+ // If this is the first use of line information for this buffer, compute the
+ // SourceLineCache for it on demand.
+ if (!Content->SourceLineCache) {
+ bool MyInvalid = false;
+ ComputeLineNumbers(Diag, Content, ContentCacheAlloc, *this, MyInvalid);
+ if (MyInvalid)
+ return SourceLocation();
+ }
+
+ if (Line > Content->NumLines) {
+ unsigned Size = Content->getBuffer(Diag, *this)->getBufferSize();
+ if (Size > 0)
+ --Size;
+ return FileLoc.getLocWithOffset(Size);
+ }
+
+ llvm::MemoryBuffer *Buffer = Content->getBuffer(Diag, *this);
+ unsigned FilePos = Content->SourceLineCache[Line - 1];
+ const char *Buf = Buffer->getBufferStart() + FilePos;
+ unsigned BufLength = Buffer->getBufferSize() - FilePos;
+ if (BufLength == 0)
+ return FileLoc.getLocWithOffset(FilePos);
+
+ unsigned i = 0;
+
+ // Check that the given column is valid.
+ while (i < BufLength-1 && i < Col-1 && Buf[i] != '\n' && Buf[i] != '\r')
+ ++i;
+ return FileLoc.getLocWithOffset(FilePos + i);
+}
+
+/// \brief Compute a map of macro argument chunks to their expanded source
+/// location. Chunks that are not part of a macro argument will map to an
+/// invalid source location. e.g. if a file contains one macro argument at
+/// offset 100 with length 10, this is how the map will be formed:
+/// 0 -> SourceLocation()
+/// 100 -> Expanded macro arg location
+/// 110 -> SourceLocation()
+void SourceManager::computeMacroArgsCache(MacroArgsMap *&CachePtr,
+ FileID FID) const {
+ assert(FID.isValid());
+ assert(!CachePtr);
+
+ CachePtr = new MacroArgsMap();
+ MacroArgsMap &MacroArgsCache = *CachePtr;
+ // Initially no macro argument chunk is present.
+ MacroArgsCache.insert(std::make_pair(0, SourceLocation()));
+
+ int ID = FID.ID;
+ while (1) {
+ ++ID;
+ // Stop if there are no more FileIDs to check.
+ if (ID > 0) {
+ if (unsigned(ID) >= local_sloc_entry_size())
+ return;
+ } else if (ID == -1) {
+ return;
+ }
+
+ bool Invalid = false;
+ const SrcMgr::SLocEntry &Entry = getSLocEntryByID(ID, &Invalid);
+ if (Invalid)
+ return;
+ if (Entry.isFile()) {
+ SourceLocation IncludeLoc = Entry.getFile().getIncludeLoc();
+ if (IncludeLoc.isInvalid())
+ continue;
+ if (!isInFileID(IncludeLoc, FID))
+ return; // No more files/macros that may be "contained" in this file.
+
+ // Skip the files/macros of the #include'd file, we only care about macros
+ // that lexed macro arguments from our file.
+ if (Entry.getFile().NumCreatedFIDs)
+ ID += Entry.getFile().NumCreatedFIDs - 1/*because of next ++ID*/;
+ continue;
+ }
+
+ const ExpansionInfo &ExpInfo = Entry.getExpansion();
+
+ if (ExpInfo.getExpansionLocStart().isFileID()) {
+ if (!isInFileID(ExpInfo.getExpansionLocStart(), FID))
+ return; // No more files/macros that may be "contained" in this file.
+ }
+
+ if (!ExpInfo.isMacroArgExpansion())
+ continue;
+
+ associateFileChunkWithMacroArgExp(MacroArgsCache, FID,
+ ExpInfo.getSpellingLoc(),
+ SourceLocation::getMacroLoc(Entry.getOffset()),
+ getFileIDSize(FileID::get(ID)));
+ }
+}
+
+void SourceManager::associateFileChunkWithMacroArgExp(
+ MacroArgsMap &MacroArgsCache,
+ FileID FID,
+ SourceLocation SpellLoc,
+ SourceLocation ExpansionLoc,
+ unsigned ExpansionLength) const {
+ if (!SpellLoc.isFileID()) {
+ unsigned SpellBeginOffs = SpellLoc.getOffset();
+ unsigned SpellEndOffs = SpellBeginOffs + ExpansionLength;
+
+ // The spelling range for this macro argument expansion can span multiple
+ // consecutive FileID entries. Go through each entry contained in the
+ // spelling range and if one is itself a macro argument expansion, recurse
+ // and associate the file chunk that it represents.
+
+ FileID SpellFID; // Current FileID in the spelling range.
+ unsigned SpellRelativeOffs;
+ std::tie(SpellFID, SpellRelativeOffs) = getDecomposedLoc(SpellLoc);
+ while (1) {
+ const SLocEntry &Entry = getSLocEntry(SpellFID);
+ unsigned SpellFIDBeginOffs = Entry.getOffset();
+ unsigned SpellFIDSize = getFileIDSize(SpellFID);
+ unsigned SpellFIDEndOffs = SpellFIDBeginOffs + SpellFIDSize;
+ const ExpansionInfo &Info = Entry.getExpansion();
+ if (Info.isMacroArgExpansion()) {
+ unsigned CurrSpellLength;
+ if (SpellFIDEndOffs < SpellEndOffs)
+ CurrSpellLength = SpellFIDSize - SpellRelativeOffs;
+ else
+ CurrSpellLength = ExpansionLength;
+ associateFileChunkWithMacroArgExp(MacroArgsCache, FID,
+ Info.getSpellingLoc().getLocWithOffset(SpellRelativeOffs),
+ ExpansionLoc, CurrSpellLength);
+ }
+
+ if (SpellFIDEndOffs >= SpellEndOffs)
+ return; // we covered all FileID entries in the spelling range.
+
+ // Move to the next FileID entry in the spelling range.
+ unsigned advance = SpellFIDSize - SpellRelativeOffs + 1;
+ ExpansionLoc = ExpansionLoc.getLocWithOffset(advance);
+ ExpansionLength -= advance;
+ ++SpellFID.ID;
+ SpellRelativeOffs = 0;
+ }
+
+ }
+
+ assert(SpellLoc.isFileID());
+
+ unsigned BeginOffs;
+ if (!isInFileID(SpellLoc, FID, &BeginOffs))
+ return;
+
+ unsigned EndOffs = BeginOffs + ExpansionLength;
+
+ // Add a new chunk for this macro argument. A previous macro argument chunk
+ // may have been lexed again, so e.g. if the map is
+ // 0 -> SourceLocation()
+ // 100 -> Expanded loc #1
+ // 110 -> SourceLocation()
+ // and we found a new macro FileID that lexed from offet 105 with length 3,
+ // the new map will be:
+ // 0 -> SourceLocation()
+ // 100 -> Expanded loc #1
+ // 105 -> Expanded loc #2
+ // 108 -> Expanded loc #1
+ // 110 -> SourceLocation()
+ //
+ // Since re-lexed macro chunks will always be the same size or less of
+ // previous chunks, we only need to find where the ending of the new macro
+ // chunk is mapped to and update the map with new begin/end mappings.
+
+ MacroArgsMap::iterator I = MacroArgsCache.upper_bound(EndOffs);
+ --I;
+ SourceLocation EndOffsMappedLoc = I->second;
+ MacroArgsCache[BeginOffs] = ExpansionLoc;
+ MacroArgsCache[EndOffs] = EndOffsMappedLoc;
+}
+
+/// \brief If \arg Loc points inside a function macro argument, the returned
+/// location will be the macro location in which the argument was expanded.
+/// If a macro argument is used multiple times, the expanded location will
+/// be at the first expansion of the argument.
+/// e.g.
+/// MY_MACRO(foo);
+/// ^
+/// Passing a file location pointing at 'foo', will yield a macro location
+/// where 'foo' was expanded into.
+SourceLocation
+SourceManager::getMacroArgExpandedLocation(SourceLocation Loc) const {
+ if (Loc.isInvalid() || !Loc.isFileID())
+ return Loc;
+
+ FileID FID;
+ unsigned Offset;
+ std::tie(FID, Offset) = getDecomposedLoc(Loc);
+ if (FID.isInvalid())
+ return Loc;
+
+ MacroArgsMap *&MacroArgsCache = MacroArgsCacheMap[FID];
+ if (!MacroArgsCache)
+ computeMacroArgsCache(MacroArgsCache, FID);
+
+ assert(!MacroArgsCache->empty());
+ MacroArgsMap::iterator I = MacroArgsCache->upper_bound(Offset);
+ --I;
+
+ unsigned MacroArgBeginOffs = I->first;
+ SourceLocation MacroArgExpandedLoc = I->second;
+ if (MacroArgExpandedLoc.isValid())
+ return MacroArgExpandedLoc.getLocWithOffset(Offset - MacroArgBeginOffs);
+
+ return Loc;
+}
+
+std::pair<FileID, unsigned>
+SourceManager::getDecomposedIncludedLoc(FileID FID) const {
+ if (FID.isInvalid())
+ return std::make_pair(FileID(), 0);
+
+ // Uses IncludedLocMap to retrieve/cache the decomposed loc.
+
+ typedef std::pair<FileID, unsigned> DecompTy;
+ typedef llvm::DenseMap<FileID, DecompTy> MapTy;
+ std::pair<MapTy::iterator, bool>
+ InsertOp = IncludedLocMap.insert(std::make_pair(FID, DecompTy()));
+ DecompTy &DecompLoc = InsertOp.first->second;
+ if (!InsertOp.second)
+ return DecompLoc; // already in map.
+
+ SourceLocation UpperLoc;
+ bool Invalid = false;
+ const SrcMgr::SLocEntry &Entry = getSLocEntry(FID, &Invalid);
+ if (!Invalid) {
+ if (Entry.isExpansion())
+ UpperLoc = Entry.getExpansion().getExpansionLocStart();
+ else
+ UpperLoc = Entry.getFile().getIncludeLoc();
+ }
+
+ if (UpperLoc.isValid())
+ DecompLoc = getDecomposedLoc(UpperLoc);
+
+ return DecompLoc;
+}
+
+/// Given a decomposed source location, move it up the include/expansion stack
+/// to the parent source location. If this is possible, return the decomposed
+/// version of the parent in Loc and return false. If Loc is the top-level
+/// entry, return true and don't modify it.
+static bool MoveUpIncludeHierarchy(std::pair<FileID, unsigned> &Loc,
+ const SourceManager &SM) {
+ std::pair<FileID, unsigned> UpperLoc = SM.getDecomposedIncludedLoc(Loc.first);
+ if (UpperLoc.first.isInvalid())
+ return true; // We reached the top.
+
+ Loc = UpperLoc;
+ return false;
+}
+
+/// Return the cache entry for comparing the given file IDs
+/// for isBeforeInTranslationUnit.
+InBeforeInTUCacheEntry &SourceManager::getInBeforeInTUCache(FileID LFID,
+ FileID RFID) const {
+ // This is a magic number for limiting the cache size. It was experimentally
+ // derived from a small Objective-C project (where the cache filled
+ // out to ~250 items). We can make it larger if necessary.
+ enum { MagicCacheSize = 300 };
+ IsBeforeInTUCacheKey Key(LFID, RFID);
+
+ // If the cache size isn't too large, do a lookup and if necessary default
+ // construct an entry. We can then return it to the caller for direct
+ // use. When they update the value, the cache will get automatically
+ // updated as well.
+ if (IBTUCache.size() < MagicCacheSize)
+ return IBTUCache[Key];
+
+ // Otherwise, do a lookup that will not construct a new value.
+ InBeforeInTUCache::iterator I = IBTUCache.find(Key);
+ if (I != IBTUCache.end())
+ return I->second;
+
+ // Fall back to the overflow value.
+ return IBTUCacheOverflow;
+}
+
+/// \brief Determines the order of 2 source locations in the translation unit.
+///
+/// \returns true if LHS source location comes before RHS, false otherwise.
+bool SourceManager::isBeforeInTranslationUnit(SourceLocation LHS,
+ SourceLocation RHS) const {
+ assert(LHS.isValid() && RHS.isValid() && "Passed invalid source location!");
+ if (LHS == RHS)
+ return false;
+
+ std::pair<FileID, unsigned> LOffs = getDecomposedLoc(LHS);
+ std::pair<FileID, unsigned> ROffs = getDecomposedLoc(RHS);
+
+ // getDecomposedLoc may have failed to return a valid FileID because, e.g. it
+ // is a serialized one referring to a file that was removed after we loaded
+ // the PCH.
+ if (LOffs.first.isInvalid() || ROffs.first.isInvalid())
+ return LOffs.first.isInvalid() && !ROffs.first.isInvalid();
+
+ // If the source locations are in the same file, just compare offsets.
+ if (LOffs.first == ROffs.first)
+ return LOffs.second < ROffs.second;
+
+ // If we are comparing a source location with multiple locations in the same
+ // file, we get a big win by caching the result.
+ InBeforeInTUCacheEntry &IsBeforeInTUCache =
+ getInBeforeInTUCache(LOffs.first, ROffs.first);
+
+ // If we are comparing a source location with multiple locations in the same
+ // file, we get a big win by caching the result.
+ if (IsBeforeInTUCache.isCacheValid(LOffs.first, ROffs.first))
+ return IsBeforeInTUCache.getCachedResult(LOffs.second, ROffs.second);
+
+ // Okay, we missed in the cache, start updating the cache for this query.
+ IsBeforeInTUCache.setQueryFIDs(LOffs.first, ROffs.first,
+ /*isLFIDBeforeRFID=*/LOffs.first.ID < ROffs.first.ID);
+
+ // We need to find the common ancestor. The only way of doing this is to
+ // build the complete include chain for one and then walking up the chain
+ // of the other looking for a match.
+ // We use a map from FileID to Offset to store the chain. Easier than writing
+ // a custom set hash info that only depends on the first part of a pair.
+ typedef llvm::SmallDenseMap<FileID, unsigned, 16> LocSet;
+ LocSet LChain;
+ do {
+ LChain.insert(LOffs);
+ // We catch the case where LOffs is in a file included by ROffs and
+ // quit early. The other way round unfortunately remains suboptimal.
+ } while (LOffs.first != ROffs.first && !MoveUpIncludeHierarchy(LOffs, *this));
+ LocSet::iterator I;
+ while((I = LChain.find(ROffs.first)) == LChain.end()) {
+ if (MoveUpIncludeHierarchy(ROffs, *this))
+ break; // Met at topmost file.
+ }
+ if (I != LChain.end())
+ LOffs = *I;
+
+ // If we exited because we found a nearest common ancestor, compare the
+ // locations within the common file and cache them.
+ if (LOffs.first == ROffs.first) {
+ IsBeforeInTUCache.setCommonLoc(LOffs.first, LOffs.second, ROffs.second);
+ return IsBeforeInTUCache.getCachedResult(LOffs.second, ROffs.second);
+ }
+
+ // If we arrived here, the location is either in a built-ins buffer or
+ // associated with global inline asm. PR5662 and PR22576 are examples.
+
+ // Clear the lookup cache, it depends on a common location.
+ IsBeforeInTUCache.clear();
+ llvm::MemoryBuffer *LBuf = getBuffer(LOffs.first);
+ llvm::MemoryBuffer *RBuf = getBuffer(ROffs.first);
+ bool LIsBuiltins = strcmp("<built-in>", LBuf->getBufferIdentifier()) == 0;
+ bool RIsBuiltins = strcmp("<built-in>", RBuf->getBufferIdentifier()) == 0;
+ // Sort built-in before non-built-in.
+ if (LIsBuiltins || RIsBuiltins) {
+ if (LIsBuiltins != RIsBuiltins)
+ return LIsBuiltins;
+ // Both are in built-in buffers, but from different files. We just claim that
+ // lower IDs come first.
+ return LOffs.first < ROffs.first;
+ }
+ bool LIsAsm = strcmp("<inline asm>", LBuf->getBufferIdentifier()) == 0;
+ bool RIsAsm = strcmp("<inline asm>", RBuf->getBufferIdentifier()) == 0;
+ // Sort assembler after built-ins, but before the rest.
+ if (LIsAsm || RIsAsm) {
+ if (LIsAsm != RIsAsm)
+ return RIsAsm;
+ assert(LOffs.first == ROffs.first);
+ return false;
+ }
+ llvm_unreachable("Unsortable locations found");
+}
+
+void SourceManager::PrintStats() const {
+ llvm::errs() << "\n*** Source Manager Stats:\n";
+ llvm::errs() << FileInfos.size() << " files mapped, " << MemBufferInfos.size()
+ << " mem buffers mapped.\n";
+ llvm::errs() << LocalSLocEntryTable.size() << " local SLocEntry's allocated ("
+ << llvm::capacity_in_bytes(LocalSLocEntryTable)
+ << " bytes of capacity), "
+ << NextLocalOffset << "B of Sloc address space used.\n";
+ llvm::errs() << LoadedSLocEntryTable.size()
+ << " loaded SLocEntries allocated, "
+ << MaxLoadedOffset - CurrentLoadedOffset
+ << "B of Sloc address space used.\n";
+
+ unsigned NumLineNumsComputed = 0;
+ unsigned NumFileBytesMapped = 0;
+ for (fileinfo_iterator I = fileinfo_begin(), E = fileinfo_end(); I != E; ++I){
+ NumLineNumsComputed += I->second->SourceLineCache != nullptr;
+ NumFileBytesMapped += I->second->getSizeBytesMapped();
+ }
+ unsigned NumMacroArgsComputed = MacroArgsCacheMap.size();
+
+ llvm::errs() << NumFileBytesMapped << " bytes of files mapped, "
+ << NumLineNumsComputed << " files with line #'s computed, "
+ << NumMacroArgsComputed << " files with macro args computed.\n";
+ llvm::errs() << "FileID scans: " << NumLinearScans << " linear, "
+ << NumBinaryProbes << " binary.\n";
+}
+
+LLVM_DUMP_METHOD void SourceManager::dump() const {
+ llvm::raw_ostream &out = llvm::errs();
+
+ auto DumpSLocEntry = [&](int ID, const SrcMgr::SLocEntry &Entry,
+ llvm::Optional<unsigned> NextStart) {
+ out << "SLocEntry <FileID " << ID << "> " << (Entry.isFile() ? "file" : "expansion")
+ << " <SourceLocation " << Entry.getOffset() << ":";
+ if (NextStart)
+ out << *NextStart << ">\n";
+ else
+ out << "???\?>\n";
+ if (Entry.isFile()) {
+ auto &FI = Entry.getFile();
+ if (FI.NumCreatedFIDs)
+ out << " covers <FileID " << ID << ":" << int(ID + FI.NumCreatedFIDs)
+ << ">\n";
+ if (FI.getIncludeLoc().isValid())
+ out << " included from " << FI.getIncludeLoc().getOffset() << "\n";
+ if (auto *CC = FI.getContentCache()) {
+ out << " for " << (CC->OrigEntry ? CC->OrigEntry->getName() : "<none>")
+ << "\n";
+ if (CC->BufferOverridden)
+ out << " contents overridden\n";
+ if (CC->ContentsEntry != CC->OrigEntry) {
+ out << " contents from "
+ << (CC->ContentsEntry ? CC->ContentsEntry->getName() : "<none>")
+ << "\n";
+ }
+ }
+ } else {
+ auto &EI = Entry.getExpansion();
+ out << " spelling from " << EI.getSpellingLoc().getOffset() << "\n";
+ out << " macro " << (EI.isMacroArgExpansion() ? "arg" : "body")
+ << " range <" << EI.getExpansionLocStart().getOffset() << ":"
+ << EI.getExpansionLocEnd().getOffset() << ">\n";
+ }
+ };
+
+ // Dump local SLocEntries.
+ for (unsigned ID = 0, NumIDs = LocalSLocEntryTable.size(); ID != NumIDs; ++ID) {
+ DumpSLocEntry(ID, LocalSLocEntryTable[ID],
+ ID == NumIDs - 1 ? NextLocalOffset
+ : LocalSLocEntryTable[ID + 1].getOffset());
+ }
+ // Dump loaded SLocEntries.
+ llvm::Optional<unsigned> NextStart;
+ for (unsigned Index = 0; Index != LoadedSLocEntryTable.size(); ++Index) {
+ int ID = -(int)Index - 2;
+ if (SLocEntryLoaded[Index]) {
+ DumpSLocEntry(ID, LoadedSLocEntryTable[Index], NextStart);
+ NextStart = LoadedSLocEntryTable[Index].getOffset();
+ } else {
+ NextStart = None;
+ }
+ }
+}
+
+ExternalSLocEntrySource::~ExternalSLocEntrySource() { }
+
+/// Return the amount of memory used by memory buffers, breaking down
+/// by heap-backed versus mmap'ed memory.
+SourceManager::MemoryBufferSizes SourceManager::getMemoryBufferSizes() const {
+ size_t malloc_bytes = 0;
+ size_t mmap_bytes = 0;
+
+ for (unsigned i = 0, e = MemBufferInfos.size(); i != e; ++i)
+ if (size_t sized_mapped = MemBufferInfos[i]->getSizeBytesMapped())
+ switch (MemBufferInfos[i]->getMemoryBufferKind()) {
+ case llvm::MemoryBuffer::MemoryBuffer_MMap:
+ mmap_bytes += sized_mapped;
+ break;
+ case llvm::MemoryBuffer::MemoryBuffer_Malloc:
+ malloc_bytes += sized_mapped;
+ break;
+ }
+
+ return MemoryBufferSizes(malloc_bytes, mmap_bytes);
+}
+
+size_t SourceManager::getDataStructureSizes() const {
+ size_t size = llvm::capacity_in_bytes(MemBufferInfos)
+ + llvm::capacity_in_bytes(LocalSLocEntryTable)
+ + llvm::capacity_in_bytes(LoadedSLocEntryTable)
+ + llvm::capacity_in_bytes(SLocEntryLoaded)
+ + llvm::capacity_in_bytes(FileInfos);
+
+ if (OverriddenFilesInfo)
+ size += llvm::capacity_in_bytes(OverriddenFilesInfo->OverriddenFiles);
+
+ return size;
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp b/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp
new file mode 100644
index 0000000..1648a27
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp
@@ -0,0 +1,639 @@
+//===--- TargetInfo.cpp - Information about Target machine ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the TargetInfo and TargetInfoImpl interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/AddressSpaces.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/LangOptions.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cstdlib>
+using namespace clang;
+
+static const LangAS::Map DefaultAddrSpaceMap = { 0 };
+
+// TargetInfo Constructor.
+TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) {
+ // Set defaults. Defaults are set for a 32-bit RISC platform, like PPC or
+ // SPARC. These should be overridden by concrete targets as needed.
+ BigEndian = true;
+ TLSSupported = true;
+ NoAsmVariants = false;
+ PointerWidth = PointerAlign = 32;
+ BoolWidth = BoolAlign = 8;
+ IntWidth = IntAlign = 32;
+ LongWidth = LongAlign = 32;
+ LongLongWidth = LongLongAlign = 64;
+ SuitableAlign = 64;
+ DefaultAlignForAttributeAligned = 128;
+ MinGlobalAlign = 0;
+ HalfWidth = 16;
+ HalfAlign = 16;
+ FloatWidth = 32;
+ FloatAlign = 32;
+ DoubleWidth = 64;
+ DoubleAlign = 64;
+ LongDoubleWidth = 64;
+ LongDoubleAlign = 64;
+ LargeArrayMinWidth = 0;
+ LargeArrayAlign = 0;
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 0;
+ MaxVectorAlign = 0;
+ MaxTLSAlign = 0;
+ SimdDefaultAlign = 0;
+ SizeType = UnsignedLong;
+ PtrDiffType = SignedLong;
+ IntMaxType = SignedLongLong;
+ IntPtrType = SignedLong;
+ WCharType = SignedInt;
+ WIntType = SignedInt;
+ Char16Type = UnsignedShort;
+ Char32Type = UnsignedInt;
+ Int64Type = SignedLongLong;
+ SigAtomicType = SignedInt;
+ ProcessIDType = SignedInt;
+ UseSignedCharForObjCBool = true;
+ UseBitFieldTypeAlignment = true;
+ UseZeroLengthBitfieldAlignment = false;
+ ZeroLengthBitfieldBoundary = 0;
+ HalfFormat = &llvm::APFloat::IEEEhalf;
+ FloatFormat = &llvm::APFloat::IEEEsingle;
+ DoubleFormat = &llvm::APFloat::IEEEdouble;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+ DataLayoutString = nullptr;
+ UserLabelPrefix = "_";
+ MCountName = "mcount";
+ RegParmMax = 0;
+ SSERegParmMax = 0;
+ HasAlignMac68kSupport = false;
+ HasBuiltinMSVaList = false;
+
+ // Default to no types using fpret.
+ RealTypeUsesObjCFPRet = 0;
+
+ // Default to not using fp2ret for __Complex long double
+ ComplexLongDoubleUsesFP2Ret = false;
+
+ // Set the C++ ABI based on the triple.
+ TheCXXABI.set(Triple.isKnownWindowsMSVCEnvironment()
+ ? TargetCXXABI::Microsoft
+ : TargetCXXABI::GenericItanium);
+
+ // Default to an empty address space map.
+ AddrSpaceMap = &DefaultAddrSpaceMap;
+ UseAddrSpaceMapMangling = false;
+
+ // Default to an unknown platform name.
+ PlatformName = "unknown";
+ PlatformMinVersion = VersionTuple();
+}
+
+// Out of line virtual dtor for TargetInfo.
+TargetInfo::~TargetInfo() {}
+
+/// getTypeName - Return the user string for the specified integer type enum.
+/// For example, SignedShort -> "short".
+const char *TargetInfo::getTypeName(IntType T) {
+ switch (T) {
+ default: llvm_unreachable("not an integer!");
+ case SignedChar: return "signed char";
+ case UnsignedChar: return "unsigned char";
+ case SignedShort: return "short";
+ case UnsignedShort: return "unsigned short";
+ case SignedInt: return "int";
+ case UnsignedInt: return "unsigned int";
+ case SignedLong: return "long int";
+ case UnsignedLong: return "long unsigned int";
+ case SignedLongLong: return "long long int";
+ case UnsignedLongLong: return "long long unsigned int";
+ }
+}
+
+/// getTypeConstantSuffix - Return the constant suffix for the specified
+/// integer type enum. For example, SignedLong -> "L".
+const char *TargetInfo::getTypeConstantSuffix(IntType T) const {
+ switch (T) {
+ default: llvm_unreachable("not an integer!");
+ case SignedChar:
+ case SignedShort:
+ case SignedInt: return "";
+ case SignedLong: return "L";
+ case SignedLongLong: return "LL";
+ case UnsignedChar:
+ if (getCharWidth() < getIntWidth())
+ return "";
+ case UnsignedShort:
+ if (getShortWidth() < getIntWidth())
+ return "";
+ case UnsignedInt: return "U";
+ case UnsignedLong: return "UL";
+ case UnsignedLongLong: return "ULL";
+ }
+}
+
+/// getTypeFormatModifier - Return the printf format modifier for the
+/// specified integer type enum. For example, SignedLong -> "l".
+
+const char *TargetInfo::getTypeFormatModifier(IntType T) {
+ switch (T) {
+ default: llvm_unreachable("not an integer!");
+ case SignedChar:
+ case UnsignedChar: return "hh";
+ case SignedShort:
+ case UnsignedShort: return "h";
+ case SignedInt:
+ case UnsignedInt: return "";
+ case SignedLong:
+ case UnsignedLong: return "l";
+ case SignedLongLong:
+ case UnsignedLongLong: return "ll";
+ }
+}
+
+/// getTypeWidth - Return the width (in bits) of the specified integer type
+/// enum. For example, SignedInt -> getIntWidth().
+unsigned TargetInfo::getTypeWidth(IntType T) const {
+ switch (T) {
+ default: llvm_unreachable("not an integer!");
+ case SignedChar:
+ case UnsignedChar: return getCharWidth();
+ case SignedShort:
+ case UnsignedShort: return getShortWidth();
+ case SignedInt:
+ case UnsignedInt: return getIntWidth();
+ case SignedLong:
+ case UnsignedLong: return getLongWidth();
+ case SignedLongLong:
+ case UnsignedLongLong: return getLongLongWidth();
+ };
+}
+
+TargetInfo::IntType TargetInfo::getIntTypeByWidth(
+ unsigned BitWidth, bool IsSigned) const {
+ if (getCharWidth() == BitWidth)
+ return IsSigned ? SignedChar : UnsignedChar;
+ if (getShortWidth() == BitWidth)
+ return IsSigned ? SignedShort : UnsignedShort;
+ if (getIntWidth() == BitWidth)
+ return IsSigned ? SignedInt : UnsignedInt;
+ if (getLongWidth() == BitWidth)
+ return IsSigned ? SignedLong : UnsignedLong;
+ if (getLongLongWidth() == BitWidth)
+ return IsSigned ? SignedLongLong : UnsignedLongLong;
+ return NoInt;
+}
+
+TargetInfo::IntType TargetInfo::getLeastIntTypeByWidth(unsigned BitWidth,
+ bool IsSigned) const {
+ if (getCharWidth() >= BitWidth)
+ return IsSigned ? SignedChar : UnsignedChar;
+ if (getShortWidth() >= BitWidth)
+ return IsSigned ? SignedShort : UnsignedShort;
+ if (getIntWidth() >= BitWidth)
+ return IsSigned ? SignedInt : UnsignedInt;
+ if (getLongWidth() >= BitWidth)
+ return IsSigned ? SignedLong : UnsignedLong;
+ if (getLongLongWidth() >= BitWidth)
+ return IsSigned ? SignedLongLong : UnsignedLongLong;
+ return NoInt;
+}
+
+TargetInfo::RealType TargetInfo::getRealTypeByWidth(unsigned BitWidth) const {
+ if (getFloatWidth() == BitWidth)
+ return Float;
+ if (getDoubleWidth() == BitWidth)
+ return Double;
+
+ switch (BitWidth) {
+ case 96:
+ if (&getLongDoubleFormat() == &llvm::APFloat::x87DoubleExtended)
+ return LongDouble;
+ break;
+ case 128:
+ if (&getLongDoubleFormat() == &llvm::APFloat::PPCDoubleDouble ||
+ &getLongDoubleFormat() == &llvm::APFloat::IEEEquad)
+ return LongDouble;
+ break;
+ }
+
+ return NoFloat;
+}
+
+/// getTypeAlign - Return the alignment (in bits) of the specified integer type
+/// enum. For example, SignedInt -> getIntAlign().
+unsigned TargetInfo::getTypeAlign(IntType T) const {
+ switch (T) {
+ default: llvm_unreachable("not an integer!");
+ case SignedChar:
+ case UnsignedChar: return getCharAlign();
+ case SignedShort:
+ case UnsignedShort: return getShortAlign();
+ case SignedInt:
+ case UnsignedInt: return getIntAlign();
+ case SignedLong:
+ case UnsignedLong: return getLongAlign();
+ case SignedLongLong:
+ case UnsignedLongLong: return getLongLongAlign();
+ };
+}
+
+/// isTypeSigned - Return whether an integer types is signed. Returns true if
+/// the type is signed; false otherwise.
+bool TargetInfo::isTypeSigned(IntType T) {
+ switch (T) {
+ default: llvm_unreachable("not an integer!");
+ case SignedChar:
+ case SignedShort:
+ case SignedInt:
+ case SignedLong:
+ case SignedLongLong:
+ return true;
+ case UnsignedChar:
+ case UnsignedShort:
+ case UnsignedInt:
+ case UnsignedLong:
+ case UnsignedLongLong:
+ return false;
+ };
+}
+
+/// adjust - Set forced language options.
+/// Apply changes to the target information with respect to certain
+/// language options which change the target configuration.
+void TargetInfo::adjust(const LangOptions &Opts) {
+ if (Opts.NoBitFieldTypeAlign)
+ UseBitFieldTypeAlignment = false;
+ if (Opts.ShortWChar)
+ WCharType = UnsignedShort;
+
+ if (Opts.OpenCL) {
+ // OpenCL C requires specific widths for types, irrespective of
+ // what these normally are for the target.
+ // We also define long long and long double here, although the
+ // OpenCL standard only mentions these as "reserved".
+ IntWidth = IntAlign = 32;
+ LongWidth = LongAlign = 64;
+ LongLongWidth = LongLongAlign = 128;
+ HalfWidth = HalfAlign = 16;
+ FloatWidth = FloatAlign = 32;
+
+ // Embedded 32-bit targets (OpenCL EP) might have double C type
+ // defined as float. Let's not override this as it might lead
+ // to generating illegal code that uses 64bit doubles.
+ if (DoubleWidth != FloatWidth) {
+ DoubleWidth = DoubleAlign = 64;
+ DoubleFormat = &llvm::APFloat::IEEEdouble;
+ }
+ LongDoubleWidth = LongDoubleAlign = 128;
+
+ assert(PointerWidth == 32 || PointerWidth == 64);
+ bool Is32BitArch = PointerWidth == 32;
+ SizeType = Is32BitArch ? UnsignedInt : UnsignedLong;
+ PtrDiffType = Is32BitArch ? SignedInt : SignedLong;
+ IntPtrType = Is32BitArch ? SignedInt : SignedLong;
+
+ IntMaxType = SignedLongLong;
+ Int64Type = SignedLong;
+
+ HalfFormat = &llvm::APFloat::IEEEhalf;
+ FloatFormat = &llvm::APFloat::IEEEsingle;
+ LongDoubleFormat = &llvm::APFloat::IEEEquad;
+ }
+}
+
+bool TargetInfo::initFeatureMap(
+ llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
+ const std::vector<std::string> &FeatureVec) const {
+ for (const auto &F : FeatureVec) {
+ StringRef Name = F;
+ // Apply the feature via the target.
+ bool Enabled = Name[0] == '+';
+ setFeatureEnabled(Features, Name.substr(1), Enabled);
+ }
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+
+
+static StringRef removeGCCRegisterPrefix(StringRef Name) {
+ if (Name[0] == '%' || Name[0] == '#')
+ Name = Name.substr(1);
+
+ return Name;
+}
+
+/// isValidClobber - Returns whether the passed in string is
+/// a valid clobber in an inline asm statement. This is used by
+/// Sema.
+bool TargetInfo::isValidClobber(StringRef Name) const {
+ return (isValidGCCRegisterName(Name) ||
+ Name == "memory" || Name == "cc");
+}
+
+/// isValidGCCRegisterName - Returns whether the passed in string
+/// is a valid register name according to GCC. This is used by Sema for
+/// inline asm statements.
+bool TargetInfo::isValidGCCRegisterName(StringRef Name) const {
+ if (Name.empty())
+ return false;
+
+ // Get rid of any register prefix.
+ Name = removeGCCRegisterPrefix(Name);
+ if (Name.empty())
+ return false;
+
+ ArrayRef<const char *> Names = getGCCRegNames();
+
+ // If we have a number it maps to an entry in the register name array.
+ if (isDigit(Name[0])) {
+ unsigned n;
+ if (!Name.getAsInteger(0, n))
+ return n < Names.size();
+ }
+
+ // Check register names.
+ if (std::find(Names.begin(), Names.end(), Name) != Names.end())
+ return true;
+
+ // Check any additional names that we have.
+ for (const AddlRegName &ARN : getGCCAddlRegNames())
+ for (const char *AN : ARN.Names) {
+ if (!AN)
+ break;
+ // Make sure the register that the additional name is for is within
+ // the bounds of the register names from above.
+ if (AN == Name && ARN.RegNum < Names.size())
+ return true;
+ }
+
+ // Now check aliases.
+ for (const GCCRegAlias &GRA : getGCCRegAliases())
+ for (const char *A : GRA.Aliases) {
+ if (!A)
+ break;
+ if (A == Name)
+ return true;
+ }
+
+ return false;
+}
+
+StringRef
+TargetInfo::getNormalizedGCCRegisterName(StringRef Name) const {
+ assert(isValidGCCRegisterName(Name) && "Invalid register passed in");
+
+ // Get rid of any register prefix.
+ Name = removeGCCRegisterPrefix(Name);
+
+ ArrayRef<const char *> Names = getGCCRegNames();
+
+ // First, check if we have a number.
+ if (isDigit(Name[0])) {
+ unsigned n;
+ if (!Name.getAsInteger(0, n)) {
+ assert(n < Names.size() && "Out of bounds register number!");
+ return Names[n];
+ }
+ }
+
+ // Check any additional names that we have.
+ for (const AddlRegName &ARN : getGCCAddlRegNames())
+ for (const char *AN : ARN.Names) {
+ if (!AN)
+ break;
+ // Make sure the register that the additional name is for is within
+ // the bounds of the register names from above.
+ if (AN == Name && ARN.RegNum < Names.size())
+ return Name;
+ }
+
+ // Now check aliases.
+ for (const GCCRegAlias &RA : getGCCRegAliases())
+ for (const char *A : RA.Aliases) {
+ if (!A)
+ break;
+ if (A == Name)
+ return RA.Register;
+ }
+
+ return Name;
+}
+
+bool TargetInfo::validateOutputConstraint(ConstraintInfo &Info) const {
+ const char *Name = Info.getConstraintStr().c_str();
+ // An output constraint must start with '=' or '+'
+ if (*Name != '=' && *Name != '+')
+ return false;
+
+ if (*Name == '+')
+ Info.setIsReadWrite();
+
+ Name++;
+ while (*Name) {
+ switch (*Name) {
+ default:
+ if (!validateAsmConstraint(Name, Info)) {
+ // FIXME: We temporarily return false
+ // so we can add more constraints as we hit it.
+ // Eventually, an unknown constraint should just be treated as 'g'.
+ return false;
+ }
+ break;
+ case '&': // early clobber.
+ Info.setEarlyClobber();
+ break;
+ case '%': // commutative.
+ // FIXME: Check that there is a another register after this one.
+ break;
+ case 'r': // general register.
+ Info.setAllowsRegister();
+ break;
+ case 'm': // memory operand.
+ case 'o': // offsetable memory operand.
+ case 'V': // non-offsetable memory operand.
+ case '<': // autodecrement memory operand.
+ case '>': // autoincrement memory operand.
+ Info.setAllowsMemory();
+ break;
+ case 'g': // general register, memory operand or immediate integer.
+ case 'X': // any operand.
+ Info.setAllowsRegister();
+ Info.setAllowsMemory();
+ break;
+ case ',': // multiple alternative constraint. Pass it.
+ // Handle additional optional '=' or '+' modifiers.
+ if (Name[1] == '=' || Name[1] == '+')
+ Name++;
+ break;
+ case '#': // Ignore as constraint.
+ while (Name[1] && Name[1] != ',')
+ Name++;
+ break;
+ case '?': // Disparage slightly code.
+ case '!': // Disparage severely.
+ case '*': // Ignore for choosing register preferences.
+ break; // Pass them.
+ }
+
+ Name++;
+ }
+
+ // Early clobber with a read-write constraint which doesn't permit registers
+ // is invalid.
+ if (Info.earlyClobber() && Info.isReadWrite() && !Info.allowsRegister())
+ return false;
+
+ // If a constraint allows neither memory nor register operands it contains
+ // only modifiers. Reject it.
+ return Info.allowsMemory() || Info.allowsRegister();
+}
+
+bool TargetInfo::resolveSymbolicName(const char *&Name,
+ ArrayRef<ConstraintInfo> OutputConstraints,
+ unsigned &Index) const {
+ assert(*Name == '[' && "Symbolic name did not start with '['");
+ Name++;
+ const char *Start = Name;
+ while (*Name && *Name != ']')
+ Name++;
+
+ if (!*Name) {
+ // Missing ']'
+ return false;
+ }
+
+ std::string SymbolicName(Start, Name - Start);
+
+ for (Index = 0; Index != OutputConstraints.size(); ++Index)
+ if (SymbolicName == OutputConstraints[Index].getName())
+ return true;
+
+ return false;
+}
+
+bool TargetInfo::validateInputConstraint(
+ MutableArrayRef<ConstraintInfo> OutputConstraints,
+ ConstraintInfo &Info) const {
+ const char *Name = Info.ConstraintStr.c_str();
+
+ if (!*Name)
+ return false;
+
+ while (*Name) {
+ switch (*Name) {
+ default:
+ // Check if we have a matching constraint
+ if (*Name >= '0' && *Name <= '9') {
+ const char *DigitStart = Name;
+ while (Name[1] >= '0' && Name[1] <= '9')
+ Name++;
+ const char *DigitEnd = Name;
+ unsigned i;
+ if (StringRef(DigitStart, DigitEnd - DigitStart + 1)
+ .getAsInteger(10, i))
+ return false;
+
+ // Check if matching constraint is out of bounds.
+ if (i >= OutputConstraints.size()) return false;
+
+ // A number must refer to an output only operand.
+ if (OutputConstraints[i].isReadWrite())
+ return false;
+
+ // If the constraint is already tied, it must be tied to the
+ // same operand referenced to by the number.
+ if (Info.hasTiedOperand() && Info.getTiedOperand() != i)
+ return false;
+
+ // The constraint should have the same info as the respective
+ // output constraint.
+ Info.setTiedOperand(i, OutputConstraints[i]);
+ } else if (!validateAsmConstraint(Name, Info)) {
+ // FIXME: This error return is in place temporarily so we can
+ // add more constraints as we hit it. Eventually, an unknown
+ // constraint should just be treated as 'g'.
+ return false;
+ }
+ break;
+ case '[': {
+ unsigned Index = 0;
+ if (!resolveSymbolicName(Name, OutputConstraints, Index))
+ return false;
+
+ // If the constraint is already tied, it must be tied to the
+ // same operand referenced to by the number.
+ if (Info.hasTiedOperand() && Info.getTiedOperand() != Index)
+ return false;
+
+ // A number must refer to an output only operand.
+ if (OutputConstraints[Index].isReadWrite())
+ return false;
+
+ Info.setTiedOperand(Index, OutputConstraints[Index]);
+ break;
+ }
+ case '%': // commutative
+ // FIXME: Fail if % is used with the last operand.
+ break;
+ case 'i': // immediate integer.
+ case 'n': // immediate integer with a known value.
+ break;
+ case 'I': // Various constant constraints with target-specific meanings.
+ case 'J':
+ case 'K':
+ case 'L':
+ case 'M':
+ case 'N':
+ case 'O':
+ case 'P':
+ if (!validateAsmConstraint(Name, Info))
+ return false;
+ break;
+ case 'r': // general register.
+ Info.setAllowsRegister();
+ break;
+ case 'm': // memory operand.
+ case 'o': // offsettable memory operand.
+ case 'V': // non-offsettable memory operand.
+ case '<': // autodecrement memory operand.
+ case '>': // autoincrement memory operand.
+ Info.setAllowsMemory();
+ break;
+ case 'g': // general register, memory operand or immediate integer.
+ case 'X': // any operand.
+ Info.setAllowsRegister();
+ Info.setAllowsMemory();
+ break;
+ case 'E': // immediate floating point.
+ case 'F': // immediate floating point.
+ case 'p': // address operand.
+ break;
+ case ',': // multiple alternative constraint. Ignore comma.
+ break;
+ case '#': // Ignore as constraint.
+ while (Name[1] && Name[1] != ',')
+ Name++;
+ break;
+ case '?': // Disparage slightly code.
+ case '!': // Disparage severely.
+ case '*': // Ignore for choosing register preferences.
+ break; // Pass them.
+ }
+
+ Name++;
+ }
+
+ return true;
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/Targets.cpp b/contrib/llvm/tools/clang/lib/Basic/Targets.cpp
new file mode 100644
index 0000000..9ce5257
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/Targets.cpp
@@ -0,0 +1,7963 @@
+//===--- Targets.cpp - Implement target feature support -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements construction of a TargetInfo object from a
+// target triple.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/MacroBuilder.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Basic/TargetOptions.h"
+#include "clang/Basic/Version.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/MC/MCSectionMachO.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TargetParser.h"
+#include <algorithm>
+#include <memory>
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Common code shared among targets.
+//===----------------------------------------------------------------------===//
+
+/// DefineStd - Define a macro name and standard variants. For example if
+/// MacroName is "unix", then this will define "__unix", "__unix__", and "unix"
+/// when in GNU mode.
+static void DefineStd(MacroBuilder &Builder, StringRef MacroName,
+ const LangOptions &Opts) {
+ assert(MacroName[0] != '_' && "Identifier should be in the user's namespace");
+
+ // If in GNU mode (e.g. -std=gnu99 but not -std=c99) define the raw identifier
+ // in the user's namespace.
+ if (Opts.GNUMode)
+ Builder.defineMacro(MacroName);
+
+ // Define __unix.
+ Builder.defineMacro("__" + MacroName);
+
+ // Define __unix__.
+ Builder.defineMacro("__" + MacroName + "__");
+}
+
+static void defineCPUMacros(MacroBuilder &Builder, StringRef CPUName,
+ bool Tuning = true) {
+ Builder.defineMacro("__" + CPUName);
+ Builder.defineMacro("__" + CPUName + "__");
+ if (Tuning)
+ Builder.defineMacro("__tune_" + CPUName + "__");
+}
+
+//===----------------------------------------------------------------------===//
+// Defines specific to certain operating systems.
+//===----------------------------------------------------------------------===//
+
+namespace {
+template<typename TgtInfo>
+class OSTargetInfo : public TgtInfo {
+protected:
+ virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const=0;
+public:
+ OSTargetInfo(const llvm::Triple &Triple) : TgtInfo(Triple) {}
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ TgtInfo::getTargetDefines(Opts, Builder);
+ getOSDefines(Opts, TgtInfo::getTriple(), Builder);
+ }
+
+};
+
+// CloudABI Target
+template <typename Target>
+class CloudABITargetInfo : public OSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ Builder.defineMacro("__CloudABI__");
+ Builder.defineMacro("__ELF__");
+
+ // CloudABI uses ISO/IEC 10646:2012 for wchar_t, char16_t and char32_t.
+ Builder.defineMacro("__STDC_ISO_10646__", "201206L");
+ Builder.defineMacro("__STDC_UTF_16__");
+ Builder.defineMacro("__STDC_UTF_32__");
+ }
+
+public:
+ CloudABITargetInfo(const llvm::Triple &Triple)
+ : OSTargetInfo<Target>(Triple) {
+ this->UserLabelPrefix = "";
+ }
+};
+
+static void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
+ const llvm::Triple &Triple,
+ StringRef &PlatformName,
+ VersionTuple &PlatformMinVersion) {
+ Builder.defineMacro("__APPLE_CC__", "6000");
+ Builder.defineMacro("__APPLE__");
+ Builder.defineMacro("OBJC_NEW_PROPERTIES");
+ // AddressSanitizer doesn't play well with source fortification, which is on
+ // by default on Darwin.
+ if (Opts.Sanitize.has(SanitizerKind::Address))
+ Builder.defineMacro("_FORTIFY_SOURCE", "0");
+
+ // Darwin defines __weak, __strong, and __unsafe_unretained even in C mode.
+ if (!Opts.ObjC1) {
+ // __weak is always defined, for use in blocks and with objc pointers.
+ Builder.defineMacro("__weak", "__attribute__((objc_gc(weak)))");
+ Builder.defineMacro("__strong", "");
+ Builder.defineMacro("__unsafe_unretained", "");
+ }
+
+ if (Opts.Static)
+ Builder.defineMacro("__STATIC__");
+ else
+ Builder.defineMacro("__DYNAMIC__");
+
+ if (Opts.POSIXThreads)
+ Builder.defineMacro("_REENTRANT");
+
+ // Get the platform type and version number from the triple.
+ unsigned Maj, Min, Rev;
+ if (Triple.isMacOSX()) {
+ Triple.getMacOSXVersion(Maj, Min, Rev);
+ PlatformName = "macosx";
+ } else {
+ Triple.getOSVersion(Maj, Min, Rev);
+ PlatformName = llvm::Triple::getOSTypeName(Triple.getOS());
+ }
+
+ // If -target arch-pc-win32-macho option specified, we're
+ // generating code for Win32 ABI. No need to emit
+ // __ENVIRONMENT_XX_OS_VERSION_MIN_REQUIRED__.
+ if (PlatformName == "win32") {
+ PlatformMinVersion = VersionTuple(Maj, Min, Rev);
+ return;
+ }
+
+ // Set the appropriate OS version define.
+ if (Triple.isiOS()) {
+ assert(Maj < 10 && Min < 100 && Rev < 100 && "Invalid version!");
+ char Str[6];
+ Str[0] = '0' + Maj;
+ Str[1] = '0' + (Min / 10);
+ Str[2] = '0' + (Min % 10);
+ Str[3] = '0' + (Rev / 10);
+ Str[4] = '0' + (Rev % 10);
+ Str[5] = '\0';
+ if (Triple.isTvOS())
+ Builder.defineMacro("__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__", Str);
+ else
+ Builder.defineMacro("__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__",
+ Str);
+
+ } else if (Triple.isWatchOS()) {
+ assert(Maj < 10 && Min < 100 && Rev < 100 && "Invalid version!");
+ char Str[6];
+ Str[0] = '0' + Maj;
+ Str[1] = '0' + (Min / 10);
+ Str[2] = '0' + (Min % 10);
+ Str[3] = '0' + (Rev / 10);
+ Str[4] = '0' + (Rev % 10);
+ Str[5] = '\0';
+ Builder.defineMacro("__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__", Str);
+ } else if (Triple.isMacOSX()) {
+ // Note that the Driver allows versions which aren't representable in the
+ // define (because we only get a single digit for the minor and micro
+ // revision numbers). So, we limit them to the maximum representable
+ // version.
+ assert(Maj < 100 && Min < 100 && Rev < 100 && "Invalid version!");
+ char Str[7];
+ if (Maj < 10 || (Maj == 10 && Min < 10)) {
+ Str[0] = '0' + (Maj / 10);
+ Str[1] = '0' + (Maj % 10);
+ Str[2] = '0' + std::min(Min, 9U);
+ Str[3] = '0' + std::min(Rev, 9U);
+ Str[4] = '\0';
+ } else {
+ // Handle versions > 10.9.
+ Str[0] = '0' + (Maj / 10);
+ Str[1] = '0' + (Maj % 10);
+ Str[2] = '0' + (Min / 10);
+ Str[3] = '0' + (Min % 10);
+ Str[4] = '0' + (Rev / 10);
+ Str[5] = '0' + (Rev % 10);
+ Str[6] = '\0';
+ }
+ Builder.defineMacro("__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__", Str);
+ }
+
+ // Tell users about the kernel if there is one.
+ if (Triple.isOSDarwin())
+ Builder.defineMacro("__MACH__");
+
+ PlatformMinVersion = VersionTuple(Maj, Min, Rev);
+}
+
+template<typename Target>
+class DarwinTargetInfo : public OSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ getDarwinDefines(Builder, Opts, Triple, this->PlatformName,
+ this->PlatformMinVersion);
+ }
+
+public:
+ DarwinTargetInfo(const llvm::Triple &Triple) : OSTargetInfo<Target>(Triple) {
+ this->TLSSupported = Triple.isMacOSX() && !Triple.isMacOSXVersionLT(10, 7);
+ this->MCountName = "\01mcount";
+ }
+
+ std::string isValidSectionSpecifier(StringRef SR) const override {
+ // Let MCSectionMachO validate this.
+ StringRef Segment, Section;
+ unsigned TAA, StubSize;
+ bool HasTAA;
+ return llvm::MCSectionMachO::ParseSectionSpecifier(SR, Segment, Section,
+ TAA, HasTAA, StubSize);
+ }
+
+ const char *getStaticInitSectionSpecifier() const override {
+ // FIXME: We should return 0 when building kexts.
+ return "__TEXT,__StaticInit,regular,pure_instructions";
+ }
+
+ /// Darwin does not support protected visibility. Darwin's "default"
+ /// is very similar to ELF's "protected"; Darwin requires a "weak"
+ /// attribute on declarations that can be dynamically replaced.
+ bool hasProtectedVisibility() const override {
+ return false;
+ }
+};
+
+
+// DragonFlyBSD Target
+template<typename Target>
+class DragonFlyBSDTargetInfo : public OSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ // DragonFly defines; list based off of gcc output
+ Builder.defineMacro("__DragonFly__");
+ Builder.defineMacro("__DragonFly_cc_version", "100001");
+ Builder.defineMacro("__ELF__");
+ Builder.defineMacro("__KPRINTF_ATTRIBUTE__");
+ Builder.defineMacro("__tune_i386__");
+ DefineStd(Builder, "unix", Opts);
+ }
+public:
+ DragonFlyBSDTargetInfo(const llvm::Triple &Triple)
+ : OSTargetInfo<Target>(Triple) {
+ this->UserLabelPrefix = "";
+
+ switch (Triple.getArch()) {
+ default:
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ this->MCountName = ".mcount";
+ break;
+ }
+ }
+};
+
+// FreeBSD Target
+template<typename Target>
+class FreeBSDTargetInfo : public OSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ // FreeBSD defines; list based off of gcc output
+
+ unsigned Release = Triple.getOSMajorVersion();
+ if (Release == 0U)
+ Release = 8;
+
+ Builder.defineMacro("__FreeBSD__", Twine(Release));
+ Builder.defineMacro("__FreeBSD_cc_version", Twine(Release * 100000U + 1U));
+ Builder.defineMacro("__KPRINTF_ATTRIBUTE__");
+ DefineStd(Builder, "unix", Opts);
+ Builder.defineMacro("__ELF__");
+
+ // On FreeBSD, wchar_t contains the number of the code point as
+ // used by the character set of the locale. These character sets are
+ // not necessarily a superset of ASCII.
+ //
+ // FIXME: This is wrong; the macro refers to the numerical values
+ // of wchar_t *literals*, which are not locale-dependent. However,
+ // FreeBSD systems apparently depend on us getting this wrong, and
+ // setting this to 1 is conforming even if all the basic source
+ // character literals have the same encoding as char and wchar_t.
+ Builder.defineMacro("__STDC_MB_MIGHT_NEQ_WC__", "1");
+ }
+public:
+ FreeBSDTargetInfo(const llvm::Triple &Triple) : OSTargetInfo<Target>(Triple) {
+ this->UserLabelPrefix = "";
+
+ switch (Triple.getArch()) {
+ default:
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ this->MCountName = ".mcount";
+ break;
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
+ this->MCountName = "_mcount";
+ break;
+ case llvm::Triple::arm:
+ this->MCountName = "__mcount";
+ break;
+ }
+ }
+};
+
+// GNU/kFreeBSD Target
+template<typename Target>
+class KFreeBSDTargetInfo : public OSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ // GNU/kFreeBSD defines; list based off of gcc output
+
+ DefineStd(Builder, "unix", Opts);
+ Builder.defineMacro("__FreeBSD_kernel__");
+ Builder.defineMacro("__GLIBC__");
+ Builder.defineMacro("__ELF__");
+ if (Opts.POSIXThreads)
+ Builder.defineMacro("_REENTRANT");
+ if (Opts.CPlusPlus)
+ Builder.defineMacro("_GNU_SOURCE");
+ }
+public:
+ KFreeBSDTargetInfo(const llvm::Triple &Triple)
+ : OSTargetInfo<Target>(Triple) {
+ this->UserLabelPrefix = "";
+ }
+};
+
+// Minix Target
+template<typename Target>
+class MinixTargetInfo : public OSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ // Minix defines
+
+ Builder.defineMacro("__minix", "3");
+ Builder.defineMacro("_EM_WSIZE", "4");
+ Builder.defineMacro("_EM_PSIZE", "4");
+ Builder.defineMacro("_EM_SSIZE", "2");
+ Builder.defineMacro("_EM_LSIZE", "4");
+ Builder.defineMacro("_EM_FSIZE", "4");
+ Builder.defineMacro("_EM_DSIZE", "8");
+ Builder.defineMacro("__ELF__");
+ DefineStd(Builder, "unix", Opts);
+ }
+public:
+ MinixTargetInfo(const llvm::Triple &Triple) : OSTargetInfo<Target>(Triple) {
+ this->UserLabelPrefix = "";
+ }
+};
+
+// Linux target
+template<typename Target>
+class LinuxTargetInfo : public OSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ // Linux defines; list based off of gcc output
+ DefineStd(Builder, "unix", Opts);
+ DefineStd(Builder, "linux", Opts);
+ Builder.defineMacro("__gnu_linux__");
+ Builder.defineMacro("__ELF__");
+ if (Triple.isAndroid()) {
+ Builder.defineMacro("__ANDROID__", "1");
+ unsigned Maj, Min, Rev;
+ Triple.getEnvironmentVersion(Maj, Min, Rev);
+ this->PlatformName = "android";
+ this->PlatformMinVersion = VersionTuple(Maj, Min, Rev);
+ }
+ if (Opts.POSIXThreads)
+ Builder.defineMacro("_REENTRANT");
+ if (Opts.CPlusPlus)
+ Builder.defineMacro("_GNU_SOURCE");
+ }
+public:
+ LinuxTargetInfo(const llvm::Triple &Triple) : OSTargetInfo<Target>(Triple) {
+ this->UserLabelPrefix = "";
+ this->WIntType = TargetInfo::UnsignedInt;
+
+ switch (Triple.getArch()) {
+ default:
+ break;
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
+ this->MCountName = "_mcount";
+ break;
+ }
+ }
+
+ const char *getStaticInitSectionSpecifier() const override {
+ return ".text.startup";
+ }
+};
+
+// NetBSD Target
+template<typename Target>
+class NetBSDTargetInfo : public OSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ // NetBSD defines; list based off of gcc output
+ Builder.defineMacro("__NetBSD__");
+ Builder.defineMacro("__unix__");
+ Builder.defineMacro("__ELF__");
+ if (Opts.POSIXThreads)
+ Builder.defineMacro("_POSIX_THREADS");
+
+ switch (Triple.getArch()) {
+ default:
+ break;
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ Builder.defineMacro("__ARM_DWARF_EH__");
+ break;
+ }
+ }
+public:
+ NetBSDTargetInfo(const llvm::Triple &Triple) : OSTargetInfo<Target>(Triple) {
+ this->UserLabelPrefix = "";
+ this->MCountName = "_mcount";
+ }
+};
+
+// OpenBSD Target
+template<typename Target>
+class OpenBSDTargetInfo : public OSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ // OpenBSD defines; list based off of gcc output
+
+ Builder.defineMacro("__OpenBSD__");
+ DefineStd(Builder, "unix", Opts);
+ Builder.defineMacro("__ELF__");
+ if (Opts.POSIXThreads)
+ Builder.defineMacro("_REENTRANT");
+ }
+public:
+ OpenBSDTargetInfo(const llvm::Triple &Triple) : OSTargetInfo<Target>(Triple) {
+ this->UserLabelPrefix = "";
+ this->TLSSupported = false;
+
+ switch (Triple.getArch()) {
+ default:
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ case llvm::Triple::arm:
+ case llvm::Triple::sparc:
+ this->MCountName = "__mcount";
+ break;
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ case llvm::Triple::ppc:
+ case llvm::Triple::sparcv9:
+ this->MCountName = "_mcount";
+ break;
+ }
+ }
+};
+
+// Bitrig Target
+template<typename Target>
+class BitrigTargetInfo : public OSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ // Bitrig defines; list based off of gcc output
+
+ Builder.defineMacro("__Bitrig__");
+ DefineStd(Builder, "unix", Opts);
+ Builder.defineMacro("__ELF__");
+ if (Opts.POSIXThreads)
+ Builder.defineMacro("_REENTRANT");
+
+ switch (Triple.getArch()) {
+ default:
+ break;
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ Builder.defineMacro("__ARM_DWARF_EH__");
+ break;
+ }
+ }
+public:
+ BitrigTargetInfo(const llvm::Triple &Triple) : OSTargetInfo<Target>(Triple) {
+ this->UserLabelPrefix = "";
+ this->MCountName = "__mcount";
+ }
+};
+
+// PSP Target
+template<typename Target>
+class PSPTargetInfo : public OSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ // PSP defines; list based on the output of the pspdev gcc toolchain.
+ Builder.defineMacro("PSP");
+ Builder.defineMacro("_PSP");
+ Builder.defineMacro("__psp__");
+ Builder.defineMacro("__ELF__");
+ }
+public:
+ PSPTargetInfo(const llvm::Triple &Triple) : OSTargetInfo<Target>(Triple) {
+ this->UserLabelPrefix = "";
+ }
+};
+
+// PS3 PPU Target
+template<typename Target>
+class PS3PPUTargetInfo : public OSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ // PS3 PPU defines.
+ Builder.defineMacro("__PPC__");
+ Builder.defineMacro("__PPU__");
+ Builder.defineMacro("__CELLOS_LV2__");
+ Builder.defineMacro("__ELF__");
+ Builder.defineMacro("__LP32__");
+ Builder.defineMacro("_ARCH_PPC64");
+ Builder.defineMacro("__powerpc64__");
+ }
+public:
+ PS3PPUTargetInfo(const llvm::Triple &Triple) : OSTargetInfo<Target>(Triple) {
+ this->UserLabelPrefix = "";
+ this->LongWidth = this->LongAlign = 32;
+ this->PointerWidth = this->PointerAlign = 32;
+ this->IntMaxType = TargetInfo::SignedLongLong;
+ this->Int64Type = TargetInfo::SignedLongLong;
+ this->SizeType = TargetInfo::UnsignedInt;
+ this->DataLayoutString = "E-m:e-p:32:32-i64:64-n32:64";
+ }
+};
+
+template <typename Target>
+class PS4OSTargetInfo : public OSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ Builder.defineMacro("__FreeBSD__", "9");
+ Builder.defineMacro("__FreeBSD_cc_version", "900001");
+ Builder.defineMacro("__KPRINTF_ATTRIBUTE__");
+ DefineStd(Builder, "unix", Opts);
+ Builder.defineMacro("__ELF__");
+ Builder.defineMacro("__PS4__");
+ }
+public:
+ PS4OSTargetInfo(const llvm::Triple &Triple) : OSTargetInfo<Target>(Triple) {
+ this->WCharType = this->UnsignedShort;
+
+ // On PS4, TLS variable cannot be aligned to more than 32 bytes (256 bits).
+ this->MaxTLSAlign = 256;
+ this->UserLabelPrefix = "";
+
+ switch (Triple.getArch()) {
+ default:
+ case llvm::Triple::x86_64:
+ this->MCountName = ".mcount";
+ break;
+ }
+ }
+};
+
+// Solaris target
+template<typename Target>
+class SolarisTargetInfo : public OSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ DefineStd(Builder, "sun", Opts);
+ DefineStd(Builder, "unix", Opts);
+ Builder.defineMacro("__ELF__");
+ Builder.defineMacro("__svr4__");
+ Builder.defineMacro("__SVR4");
+ // Solaris headers require _XOPEN_SOURCE to be set to 600 for C99 and
+ // newer, but to 500 for everything else. feature_test.h has a check to
+ // ensure that you are not using C99 with an old version of X/Open or C89
+ // with a new version.
+ if (Opts.C99)
+ Builder.defineMacro("_XOPEN_SOURCE", "600");
+ else
+ Builder.defineMacro("_XOPEN_SOURCE", "500");
+ if (Opts.CPlusPlus)
+ Builder.defineMacro("__C99FEATURES__");
+ Builder.defineMacro("_LARGEFILE_SOURCE");
+ Builder.defineMacro("_LARGEFILE64_SOURCE");
+ Builder.defineMacro("__EXTENSIONS__");
+ Builder.defineMacro("_REENTRANT");
+ }
+public:
+ SolarisTargetInfo(const llvm::Triple &Triple) : OSTargetInfo<Target>(Triple) {
+ this->UserLabelPrefix = "";
+ this->WCharType = this->SignedInt;
+ // FIXME: WIntType should be SignedLong
+ }
+};
+
+// Windows target
+template<typename Target>
+class WindowsTargetInfo : public OSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ Builder.defineMacro("_WIN32");
+ }
+ void getVisualStudioDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ if (Opts.CPlusPlus) {
+ if (Opts.RTTIData)
+ Builder.defineMacro("_CPPRTTI");
+
+ if (Opts.CXXExceptions)
+ Builder.defineMacro("_CPPUNWIND");
+ }
+
+ if (Opts.Bool)
+ Builder.defineMacro("__BOOL_DEFINED");
+
+ if (!Opts.CharIsSigned)
+ Builder.defineMacro("_CHAR_UNSIGNED");
+
+ // FIXME: POSIXThreads isn't exactly the option this should be defined for,
+ // but it works for now.
+ if (Opts.POSIXThreads)
+ Builder.defineMacro("_MT");
+
+ if (Opts.MSCompatibilityVersion) {
+ Builder.defineMacro("_MSC_VER",
+ Twine(Opts.MSCompatibilityVersion / 100000));
+ Builder.defineMacro("_MSC_FULL_VER", Twine(Opts.MSCompatibilityVersion));
+ // FIXME We cannot encode the revision information into 32-bits
+ Builder.defineMacro("_MSC_BUILD", Twine(1));
+
+ if (Opts.CPlusPlus11 && Opts.isCompatibleWithMSVC(LangOptions::MSVC2015))
+ Builder.defineMacro("_HAS_CHAR16_T_LANGUAGE_SUPPORT", Twine(1));
+ }
+
+ if (Opts.MicrosoftExt) {
+ Builder.defineMacro("_MSC_EXTENSIONS");
+
+ if (Opts.CPlusPlus11) {
+ Builder.defineMacro("_RVALUE_REFERENCES_V2_SUPPORTED");
+ Builder.defineMacro("_RVALUE_REFERENCES_SUPPORTED");
+ Builder.defineMacro("_NATIVE_NULLPTR_SUPPORTED");
+ }
+ }
+
+ Builder.defineMacro("_INTEGRAL_MAX_BITS", "64");
+ }
+
+public:
+ WindowsTargetInfo(const llvm::Triple &Triple)
+ : OSTargetInfo<Target>(Triple) {}
+};
+
+template <typename Target>
+class NaClTargetInfo : public OSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ if (Opts.POSIXThreads)
+ Builder.defineMacro("_REENTRANT");
+ if (Opts.CPlusPlus)
+ Builder.defineMacro("_GNU_SOURCE");
+
+ DefineStd(Builder, "unix", Opts);
+ Builder.defineMacro("__ELF__");
+ Builder.defineMacro("__native_client__");
+ }
+
+public:
+ NaClTargetInfo(const llvm::Triple &Triple) : OSTargetInfo<Target>(Triple) {
+ this->UserLabelPrefix = "";
+ this->LongAlign = 32;
+ this->LongWidth = 32;
+ this->PointerAlign = 32;
+ this->PointerWidth = 32;
+ this->IntMaxType = TargetInfo::SignedLongLong;
+ this->Int64Type = TargetInfo::SignedLongLong;
+ this->DoubleAlign = 64;
+ this->LongDoubleWidth = 64;
+ this->LongDoubleAlign = 64;
+ this->LongLongWidth = 64;
+ this->LongLongAlign = 64;
+ this->SizeType = TargetInfo::UnsignedInt;
+ this->PtrDiffType = TargetInfo::SignedInt;
+ this->IntPtrType = TargetInfo::SignedInt;
+ // RegParmMax is inherited from the underlying architecture
+ this->LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+ if (Triple.getArch() == llvm::Triple::arm) {
+ // Handled in ARM's setABI().
+ } else if (Triple.getArch() == llvm::Triple::x86) {
+ this->DataLayoutString = "e-m:e-p:32:32-i64:64-n8:16:32-S128";
+ } else if (Triple.getArch() == llvm::Triple::x86_64) {
+ this->DataLayoutString = "e-m:e-p:32:32-i64:64-n8:16:32:64-S128";
+ } else if (Triple.getArch() == llvm::Triple::mipsel) {
+ // Handled on mips' setDataLayoutString.
+ } else {
+ assert(Triple.getArch() == llvm::Triple::le32);
+ this->DataLayoutString = "e-p:32:32-i64:64";
+ }
+ }
+};
+
+// WebAssembly target
+template <typename Target>
+class WebAssemblyOSTargetInfo : public OSTargetInfo<Target> {
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const final {
+ // A common platform macro.
+ if (Opts.POSIXThreads)
+ Builder.defineMacro("_REENTRANT");
+ // Follow g++ convention and predefine _GNU_SOURCE for C++.
+ if (Opts.CPlusPlus)
+ Builder.defineMacro("_GNU_SOURCE");
+ }
+
+ // As an optimization, group static init code together in a section.
+ const char *getStaticInitSectionSpecifier() const final {
+ return ".text.__startup";
+ }
+
+public:
+ explicit WebAssemblyOSTargetInfo(const llvm::Triple &Triple)
+ : OSTargetInfo<Target>(Triple) {
+ this->MCountName = "__mcount";
+ this->UserLabelPrefix = "";
+ this->TheCXXABI.set(TargetCXXABI::WebAssembly);
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// Specific target implementations.
+//===----------------------------------------------------------------------===//
+
+// PPC abstract base class
+class PPCTargetInfo : public TargetInfo {
+ static const Builtin::Info BuiltinInfo[];
+ static const char * const GCCRegNames[];
+ static const TargetInfo::GCCRegAlias GCCRegAliases[];
+ std::string CPU;
+
+ // Target cpu features.
+ bool HasVSX;
+ bool HasP8Vector;
+ bool HasP8Crypto;
+ bool HasDirectMove;
+ bool HasQPX;
+ bool HasHTM;
+ bool HasBPERMD;
+ bool HasExtDiv;
+
+protected:
+ std::string ABI;
+
+public:
+ PPCTargetInfo(const llvm::Triple &Triple)
+ : TargetInfo(Triple), HasVSX(false), HasP8Vector(false),
+ HasP8Crypto(false), HasDirectMove(false), HasQPX(false), HasHTM(false),
+ HasBPERMD(false), HasExtDiv(false) {
+ BigEndian = (Triple.getArch() != llvm::Triple::ppc64le);
+ SimdDefaultAlign = 128;
+ LongDoubleWidth = LongDoubleAlign = 128;
+ LongDoubleFormat = &llvm::APFloat::PPCDoubleDouble;
+ }
+
+ /// \brief Flags for architecture specific defines.
+ typedef enum {
+ ArchDefineNone = 0,
+ ArchDefineName = 1 << 0, // <name> is substituted for arch name.
+ ArchDefinePpcgr = 1 << 1,
+ ArchDefinePpcsq = 1 << 2,
+ ArchDefine440 = 1 << 3,
+ ArchDefine603 = 1 << 4,
+ ArchDefine604 = 1 << 5,
+ ArchDefinePwr4 = 1 << 6,
+ ArchDefinePwr5 = 1 << 7,
+ ArchDefinePwr5x = 1 << 8,
+ ArchDefinePwr6 = 1 << 9,
+ ArchDefinePwr6x = 1 << 10,
+ ArchDefinePwr7 = 1 << 11,
+ ArchDefinePwr8 = 1 << 12,
+ ArchDefineA2 = 1 << 13,
+ ArchDefineA2q = 1 << 14
+ } ArchDefineTypes;
+
+ // Note: GCC recognizes the following additional cpus:
+ // 401, 403, 405, 405fp, 440fp, 464, 464fp, 476, 476fp, 505, 740, 801,
+ // 821, 823, 8540, 8548, e300c2, e300c3, e500mc64, e6500, 860, cell,
+ // titan, rs64.
+ bool setCPU(const std::string &Name) override {
+ bool CPUKnown = llvm::StringSwitch<bool>(Name)
+ .Case("generic", true)
+ .Case("440", true)
+ .Case("450", true)
+ .Case("601", true)
+ .Case("602", true)
+ .Case("603", true)
+ .Case("603e", true)
+ .Case("603ev", true)
+ .Case("604", true)
+ .Case("604e", true)
+ .Case("620", true)
+ .Case("630", true)
+ .Case("g3", true)
+ .Case("7400", true)
+ .Case("g4", true)
+ .Case("7450", true)
+ .Case("g4+", true)
+ .Case("750", true)
+ .Case("970", true)
+ .Case("g5", true)
+ .Case("a2", true)
+ .Case("a2q", true)
+ .Case("e500mc", true)
+ .Case("e5500", true)
+ .Case("power3", true)
+ .Case("pwr3", true)
+ .Case("power4", true)
+ .Case("pwr4", true)
+ .Case("power5", true)
+ .Case("pwr5", true)
+ .Case("power5x", true)
+ .Case("pwr5x", true)
+ .Case("power6", true)
+ .Case("pwr6", true)
+ .Case("power6x", true)
+ .Case("pwr6x", true)
+ .Case("power7", true)
+ .Case("pwr7", true)
+ .Case("power8", true)
+ .Case("pwr8", true)
+ .Case("powerpc", true)
+ .Case("ppc", true)
+ .Case("powerpc64", true)
+ .Case("ppc64", true)
+ .Case("powerpc64le", true)
+ .Case("ppc64le", true)
+ .Default(false);
+
+ if (CPUKnown)
+ CPU = Name;
+
+ return CPUKnown;
+ }
+
+
+ StringRef getABI() const override { return ABI; }
+
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return llvm::makeArrayRef(BuiltinInfo,
+ clang::PPC::LastTSBuiltin-Builtin::FirstTSBuiltin);
+ }
+
+ bool isCLZForZeroUndef() const override { return false; }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override;
+
+ bool
+ initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
+ StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const override;
+
+ bool handleTargetFeatures(std::vector<std::string> &Features,
+ DiagnosticsEngine &Diags) override;
+ bool hasFeature(StringRef Feature) const override;
+ void setFeatureEnabled(llvm::StringMap<bool> &Features, StringRef Name,
+ bool Enabled) const override;
+
+ ArrayRef<const char *> getGCCRegNames() const override;
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override;
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const override {
+ switch (*Name) {
+ default: return false;
+ case 'O': // Zero
+ break;
+ case 'b': // Base register
+ case 'f': // Floating point register
+ Info.setAllowsRegister();
+ break;
+ // FIXME: The following are added to allow parsing.
+ // I just took a guess at what the actions should be.
+ // Also, is more specific checking needed? I.e. specific registers?
+ case 'd': // Floating point register (containing 64-bit value)
+ case 'v': // Altivec vector register
+ Info.setAllowsRegister();
+ break;
+ case 'w':
+ switch (Name[1]) {
+ case 'd':// VSX vector register to hold vector double data
+ case 'f':// VSX vector register to hold vector float data
+ case 's':// VSX vector register to hold scalar float data
+ case 'a':// Any VSX register
+ case 'c':// An individual CR bit
+ break;
+ default:
+ return false;
+ }
+ Info.setAllowsRegister();
+ Name++; // Skip over 'w'.
+ break;
+ case 'h': // `MQ', `CTR', or `LINK' register
+ case 'q': // `MQ' register
+ case 'c': // `CTR' register
+ case 'l': // `LINK' register
+ case 'x': // `CR' register (condition register) number 0
+ case 'y': // `CR' register (condition register)
+ case 'z': // `XER[CA]' carry bit (part of the XER register)
+ Info.setAllowsRegister();
+ break;
+ case 'I': // Signed 16-bit constant
+ case 'J': // Unsigned 16-bit constant shifted left 16 bits
+ // (use `L' instead for SImode constants)
+ case 'K': // Unsigned 16-bit constant
+ case 'L': // Signed 16-bit constant shifted left 16 bits
+ case 'M': // Constant larger than 31
+ case 'N': // Exact power of 2
+ case 'P': // Constant whose negation is a signed 16-bit constant
+ case 'G': // Floating point constant that can be loaded into a
+ // register with one instruction per word
+ case 'H': // Integer/Floating point constant that can be loaded
+ // into a register using three instructions
+ break;
+ case 'm': // Memory operand. Note that on PowerPC targets, m can
+ // include addresses that update the base register. It
+ // is therefore only safe to use `m' in an asm statement
+ // if that asm statement accesses the operand exactly once.
+ // The asm statement must also use `%U<opno>' as a
+ // placeholder for the "update" flag in the corresponding
+ // load or store instruction. For example:
+ // asm ("st%U0 %1,%0" : "=m" (mem) : "r" (val));
+ // is correct but:
+ // asm ("st %1,%0" : "=m" (mem) : "r" (val));
+ // is not. Use es rather than m if you don't want the base
+ // register to be updated.
+ case 'e':
+ if (Name[1] != 's')
+ return false;
+ // es: A "stable" memory operand; that is, one which does not
+ // include any automodification of the base register. Unlike
+ // `m', this constraint can be used in asm statements that
+ // might access the operand several times, or that might not
+ // access it at all.
+ Info.setAllowsMemory();
+ Name++; // Skip over 'e'.
+ break;
+ case 'Q': // Memory operand that is an offset from a register (it is
+ // usually better to use `m' or `es' in asm statements)
+ case 'Z': // Memory operand that is an indexed or indirect from a
+ // register (it is usually better to use `m' or `es' in
+ // asm statements)
+ Info.setAllowsMemory();
+ Info.setAllowsRegister();
+ break;
+ case 'R': // AIX TOC entry
+ case 'a': // Address operand that is an indexed or indirect from a
+ // register (`p' is preferable for asm statements)
+ case 'S': // Constant suitable as a 64-bit mask operand
+ case 'T': // Constant suitable as a 32-bit mask operand
+ case 'U': // System V Release 4 small data area reference
+ case 't': // AND masks that can be performed by two rldic{l, r}
+ // instructions
+ case 'W': // Vector constant that does not require memory
+ case 'j': // Vector constant that is all zeros.
+ break;
+ // End FIXME.
+ }
+ return true;
+ }
+ std::string convertConstraint(const char *&Constraint) const override {
+ std::string R;
+ switch (*Constraint) {
+ case 'e':
+ case 'w':
+ // Two-character constraint; add "^" hint for later parsing.
+ R = std::string("^") + std::string(Constraint, 2);
+ Constraint++;
+ break;
+ default:
+ return TargetInfo::convertConstraint(Constraint);
+ }
+ return R;
+ }
+ const char *getClobbers() const override {
+ return "";
+ }
+ int getEHDataRegisterNumber(unsigned RegNo) const override {
+ if (RegNo == 0) return 3;
+ if (RegNo == 1) return 4;
+ return -1;
+ }
+
+ bool hasSjLjLowering() const override {
+ return true;
+ }
+
+ bool useFloat128ManglingForLongDouble() const override {
+ return LongDoubleWidth == 128 &&
+ LongDoubleFormat == &llvm::APFloat::PPCDoubleDouble &&
+ getTriple().isOSBinFormatELF();
+ }
+};
+
+const Builtin::Info PPCTargetInfo::BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
+ { #ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr },
+#include "clang/Basic/BuiltinsPPC.def"
+};
+
+/// handleTargetFeatures - Perform initialization based on the user
+/// configured set of features.
+bool PPCTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
+ DiagnosticsEngine &Diags) {
+ for (const auto &Feature : Features) {
+ if (Feature == "+vsx") {
+ HasVSX = true;
+ } else if (Feature == "+bpermd") {
+ HasBPERMD = true;
+ } else if (Feature == "+extdiv") {
+ HasExtDiv = true;
+ } else if (Feature == "+power8-vector") {
+ HasP8Vector = true;
+ } else if (Feature == "+crypto") {
+ HasP8Crypto = true;
+ } else if (Feature == "+direct-move") {
+ HasDirectMove = true;
+ } else if (Feature == "+qpx") {
+ HasQPX = true;
+ } else if (Feature == "+htm") {
+ HasHTM = true;
+ }
+ // TODO: Finish this list and add an assert that we've handled them
+ // all.
+ }
+
+ return true;
+}
+
+/// PPCTargetInfo::getTargetDefines - Return a set of the PowerPC-specific
+/// #defines that are not tied to a specific subtarget.
+void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Target identification.
+ Builder.defineMacro("__ppc__");
+ Builder.defineMacro("__PPC__");
+ Builder.defineMacro("_ARCH_PPC");
+ Builder.defineMacro("__powerpc__");
+ Builder.defineMacro("__POWERPC__");
+ if (PointerWidth == 64) {
+ Builder.defineMacro("_ARCH_PPC64");
+ Builder.defineMacro("__powerpc64__");
+ Builder.defineMacro("__ppc64__");
+ Builder.defineMacro("__PPC64__");
+ }
+
+ // Target properties.
+ if (getTriple().getArch() == llvm::Triple::ppc64le) {
+ Builder.defineMacro("_LITTLE_ENDIAN");
+ } else {
+ if (getTriple().getOS() != llvm::Triple::NetBSD &&
+ getTriple().getOS() != llvm::Triple::OpenBSD)
+ Builder.defineMacro("_BIG_ENDIAN");
+ }
+
+ // ABI options.
+ if (ABI == "elfv1" || ABI == "elfv1-qpx")
+ Builder.defineMacro("_CALL_ELF", "1");
+ if (ABI == "elfv2")
+ Builder.defineMacro("_CALL_ELF", "2");
+
+ // Subtarget options.
+ Builder.defineMacro("__NATURAL_ALIGNMENT__");
+ Builder.defineMacro("__REGISTER_PREFIX__", "");
+
+ // FIXME: Should be controlled by command line option.
+ if (LongDoubleWidth == 128)
+ Builder.defineMacro("__LONG_DOUBLE_128__");
+
+ if (Opts.AltiVec) {
+ Builder.defineMacro("__VEC__", "10206");
+ Builder.defineMacro("__ALTIVEC__");
+ }
+
+ // CPU identification.
+ ArchDefineTypes defs = (ArchDefineTypes)llvm::StringSwitch<int>(CPU)
+ .Case("440", ArchDefineName)
+ .Case("450", ArchDefineName | ArchDefine440)
+ .Case("601", ArchDefineName)
+ .Case("602", ArchDefineName | ArchDefinePpcgr)
+ .Case("603", ArchDefineName | ArchDefinePpcgr)
+ .Case("603e", ArchDefineName | ArchDefine603 | ArchDefinePpcgr)
+ .Case("603ev", ArchDefineName | ArchDefine603 | ArchDefinePpcgr)
+ .Case("604", ArchDefineName | ArchDefinePpcgr)
+ .Case("604e", ArchDefineName | ArchDefine604 | ArchDefinePpcgr)
+ .Case("620", ArchDefineName | ArchDefinePpcgr)
+ .Case("630", ArchDefineName | ArchDefinePpcgr)
+ .Case("7400", ArchDefineName | ArchDefinePpcgr)
+ .Case("7450", ArchDefineName | ArchDefinePpcgr)
+ .Case("750", ArchDefineName | ArchDefinePpcgr)
+ .Case("970", ArchDefineName | ArchDefinePwr4 | ArchDefinePpcgr
+ | ArchDefinePpcsq)
+ .Case("a2", ArchDefineA2)
+ .Case("a2q", ArchDefineName | ArchDefineA2 | ArchDefineA2q)
+ .Case("pwr3", ArchDefinePpcgr)
+ .Case("pwr4", ArchDefineName | ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("pwr5", ArchDefineName | ArchDefinePwr4 | ArchDefinePpcgr
+ | ArchDefinePpcsq)
+ .Case("pwr5x", ArchDefineName | ArchDefinePwr5 | ArchDefinePwr4
+ | ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("pwr6", ArchDefineName | ArchDefinePwr5x | ArchDefinePwr5
+ | ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("pwr6x", ArchDefineName | ArchDefinePwr6 | ArchDefinePwr5x
+ | ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr
+ | ArchDefinePpcsq)
+ .Case("pwr7", ArchDefineName | ArchDefinePwr6x | ArchDefinePwr6
+ | ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4
+ | ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("pwr8", ArchDefineName | ArchDefinePwr7 | ArchDefinePwr6x
+ | ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5
+ | ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("power3", ArchDefinePpcgr)
+ .Case("power4", ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("power5", ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr
+ | ArchDefinePpcsq)
+ .Case("power5x", ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4
+ | ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("power6", ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5
+ | ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("power6x", ArchDefinePwr6x | ArchDefinePwr6 | ArchDefinePwr5x
+ | ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr
+ | ArchDefinePpcsq)
+ .Case("power7", ArchDefinePwr7 | ArchDefinePwr6x | ArchDefinePwr6
+ | ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4
+ | ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("power8", ArchDefinePwr8 | ArchDefinePwr7 | ArchDefinePwr6x
+ | ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5
+ | ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
+ .Default(ArchDefineNone);
+
+ if (defs & ArchDefineName)
+ Builder.defineMacro(Twine("_ARCH_", StringRef(CPU).upper()));
+ if (defs & ArchDefinePpcgr)
+ Builder.defineMacro("_ARCH_PPCGR");
+ if (defs & ArchDefinePpcsq)
+ Builder.defineMacro("_ARCH_PPCSQ");
+ if (defs & ArchDefine440)
+ Builder.defineMacro("_ARCH_440");
+ if (defs & ArchDefine603)
+ Builder.defineMacro("_ARCH_603");
+ if (defs & ArchDefine604)
+ Builder.defineMacro("_ARCH_604");
+ if (defs & ArchDefinePwr4)
+ Builder.defineMacro("_ARCH_PWR4");
+ if (defs & ArchDefinePwr5)
+ Builder.defineMacro("_ARCH_PWR5");
+ if (defs & ArchDefinePwr5x)
+ Builder.defineMacro("_ARCH_PWR5X");
+ if (defs & ArchDefinePwr6)
+ Builder.defineMacro("_ARCH_PWR6");
+ if (defs & ArchDefinePwr6x)
+ Builder.defineMacro("_ARCH_PWR6X");
+ if (defs & ArchDefinePwr7)
+ Builder.defineMacro("_ARCH_PWR7");
+ if (defs & ArchDefinePwr8)
+ Builder.defineMacro("_ARCH_PWR8");
+ if (defs & ArchDefineA2)
+ Builder.defineMacro("_ARCH_A2");
+ if (defs & ArchDefineA2q) {
+ Builder.defineMacro("_ARCH_A2Q");
+ Builder.defineMacro("_ARCH_QP");
+ }
+
+ if (getTriple().getVendor() == llvm::Triple::BGQ) {
+ Builder.defineMacro("__bg__");
+ Builder.defineMacro("__THW_BLUEGENE__");
+ Builder.defineMacro("__bgq__");
+ Builder.defineMacro("__TOS_BGQ__");
+ }
+
+ if (HasVSX)
+ Builder.defineMacro("__VSX__");
+ if (HasP8Vector)
+ Builder.defineMacro("__POWER8_VECTOR__");
+ if (HasP8Crypto)
+ Builder.defineMacro("__CRYPTO__");
+ if (HasHTM)
+ Builder.defineMacro("__HTM__");
+
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
+ if (PointerWidth == 64)
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
+
+ // FIXME: The following are not yet generated here by Clang, but are
+ // generated by GCC:
+ //
+ // _SOFT_FLOAT_
+ // __RECIP_PRECISION__
+ // __APPLE_ALTIVEC__
+ // __RECIP__
+ // __RECIPF__
+ // __RSQRTE__
+ // __RSQRTEF__
+ // _SOFT_DOUBLE_
+ // __NO_LWSYNC__
+ // __HAVE_BSWAP__
+ // __LONGDOUBLE128
+ // __CMODEL_MEDIUM__
+ // __CMODEL_LARGE__
+ // _CALL_SYSV
+ // _CALL_DARWIN
+ // __NO_FPRS__
+}
+
+// Handle explicit options being passed to the compiler here: if we've
+// explicitly turned off vsx and turned on power8-vector or direct-move then
+// go ahead and error since the customer has expressed a somewhat incompatible
+// set of options.
+static bool ppcUserFeaturesCheck(DiagnosticsEngine &Diags,
+ const std::vector<std::string> &FeaturesVec) {
+
+ if (std::find(FeaturesVec.begin(), FeaturesVec.end(), "-vsx") !=
+ FeaturesVec.end()) {
+ if (std::find(FeaturesVec.begin(), FeaturesVec.end(), "+power8-vector") !=
+ FeaturesVec.end()) {
+ Diags.Report(diag::err_opt_not_valid_with_opt) << "-mpower8-vector"
+ << "-mno-vsx";
+ return false;
+ }
+
+ if (std::find(FeaturesVec.begin(), FeaturesVec.end(), "+direct-move") !=
+ FeaturesVec.end()) {
+ Diags.Report(diag::err_opt_not_valid_with_opt) << "-mdirect-move"
+ << "-mno-vsx";
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool PPCTargetInfo::initFeatureMap(
+ llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const {
+ Features["altivec"] = llvm::StringSwitch<bool>(CPU)
+ .Case("7400", true)
+ .Case("g4", true)
+ .Case("7450", true)
+ .Case("g4+", true)
+ .Case("970", true)
+ .Case("g5", true)
+ .Case("pwr6", true)
+ .Case("pwr7", true)
+ .Case("pwr8", true)
+ .Case("ppc64", true)
+ .Case("ppc64le", true)
+ .Default(false);
+
+ Features["qpx"] = (CPU == "a2q");
+ Features["crypto"] = llvm::StringSwitch<bool>(CPU)
+ .Case("ppc64le", true)
+ .Case("pwr8", true)
+ .Default(false);
+ Features["power8-vector"] = llvm::StringSwitch<bool>(CPU)
+ .Case("ppc64le", true)
+ .Case("pwr8", true)
+ .Default(false);
+ Features["bpermd"] = llvm::StringSwitch<bool>(CPU)
+ .Case("ppc64le", true)
+ .Case("pwr8", true)
+ .Case("pwr7", true)
+ .Default(false);
+ Features["extdiv"] = llvm::StringSwitch<bool>(CPU)
+ .Case("ppc64le", true)
+ .Case("pwr8", true)
+ .Case("pwr7", true)
+ .Default(false);
+ Features["direct-move"] = llvm::StringSwitch<bool>(CPU)
+ .Case("ppc64le", true)
+ .Case("pwr8", true)
+ .Default(false);
+ Features["vsx"] = llvm::StringSwitch<bool>(CPU)
+ .Case("ppc64le", true)
+ .Case("pwr8", true)
+ .Case("pwr7", true)
+ .Default(false);
+
+ if (!ppcUserFeaturesCheck(Diags, FeaturesVec))
+ return false;
+
+ return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
+}
+
+bool PPCTargetInfo::hasFeature(StringRef Feature) const {
+ return llvm::StringSwitch<bool>(Feature)
+ .Case("powerpc", true)
+ .Case("vsx", HasVSX)
+ .Case("power8-vector", HasP8Vector)
+ .Case("crypto", HasP8Crypto)
+ .Case("direct-move", HasDirectMove)
+ .Case("qpx", HasQPX)
+ .Case("htm", HasHTM)
+ .Case("bpermd", HasBPERMD)
+ .Case("extdiv", HasExtDiv)
+ .Default(false);
+}
+
+void PPCTargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
+ StringRef Name, bool Enabled) const {
+ // If we're enabling direct-move or power8-vector go ahead and enable vsx
+ // as well. Do the inverse if we're disabling vsx. We'll diagnose any user
+ // incompatible options.
+ if (Enabled) {
+ if (Name == "vsx") {
+ Features[Name] = true;
+ } else if (Name == "direct-move") {
+ Features[Name] = Features["vsx"] = true;
+ } else if (Name == "power8-vector") {
+ Features[Name] = Features["vsx"] = true;
+ } else {
+ Features[Name] = true;
+ }
+ } else {
+ if (Name == "vsx") {
+ Features[Name] = Features["direct-move"] = Features["power8-vector"] =
+ false;
+ } else {
+ Features[Name] = false;
+ }
+ }
+}
+
+const char * const PPCTargetInfo::GCCRegNames[] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
+ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
+ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
+ "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",
+ "mq", "lr", "ctr", "ap",
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7",
+ "xer",
+ "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
+ "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
+ "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
+ "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
+ "vrsave", "vscr",
+ "spe_acc", "spefscr",
+ "sfp"
+};
+
+ArrayRef<const char*> PPCTargetInfo::getGCCRegNames() const {
+ return llvm::makeArrayRef(GCCRegNames);
+}
+
+const TargetInfo::GCCRegAlias PPCTargetInfo::GCCRegAliases[] = {
+ // While some of these aliases do map to different registers
+ // they still share the same register name.
+ { { "0" }, "r0" },
+ { { "1"}, "r1" },
+ { { "2" }, "r2" },
+ { { "3" }, "r3" },
+ { { "4" }, "r4" },
+ { { "5" }, "r5" },
+ { { "6" }, "r6" },
+ { { "7" }, "r7" },
+ { { "8" }, "r8" },
+ { { "9" }, "r9" },
+ { { "10" }, "r10" },
+ { { "11" }, "r11" },
+ { { "12" }, "r12" },
+ { { "13" }, "r13" },
+ { { "14" }, "r14" },
+ { { "15" }, "r15" },
+ { { "16" }, "r16" },
+ { { "17" }, "r17" },
+ { { "18" }, "r18" },
+ { { "19" }, "r19" },
+ { { "20" }, "r20" },
+ { { "21" }, "r21" },
+ { { "22" }, "r22" },
+ { { "23" }, "r23" },
+ { { "24" }, "r24" },
+ { { "25" }, "r25" },
+ { { "26" }, "r26" },
+ { { "27" }, "r27" },
+ { { "28" }, "r28" },
+ { { "29" }, "r29" },
+ { { "30" }, "r30" },
+ { { "31" }, "r31" },
+ { { "fr0" }, "f0" },
+ { { "fr1" }, "f1" },
+ { { "fr2" }, "f2" },
+ { { "fr3" }, "f3" },
+ { { "fr4" }, "f4" },
+ { { "fr5" }, "f5" },
+ { { "fr6" }, "f6" },
+ { { "fr7" }, "f7" },
+ { { "fr8" }, "f8" },
+ { { "fr9" }, "f9" },
+ { { "fr10" }, "f10" },
+ { { "fr11" }, "f11" },
+ { { "fr12" }, "f12" },
+ { { "fr13" }, "f13" },
+ { { "fr14" }, "f14" },
+ { { "fr15" }, "f15" },
+ { { "fr16" }, "f16" },
+ { { "fr17" }, "f17" },
+ { { "fr18" }, "f18" },
+ { { "fr19" }, "f19" },
+ { { "fr20" }, "f20" },
+ { { "fr21" }, "f21" },
+ { { "fr22" }, "f22" },
+ { { "fr23" }, "f23" },
+ { { "fr24" }, "f24" },
+ { { "fr25" }, "f25" },
+ { { "fr26" }, "f26" },
+ { { "fr27" }, "f27" },
+ { { "fr28" }, "f28" },
+ { { "fr29" }, "f29" },
+ { { "fr30" }, "f30" },
+ { { "fr31" }, "f31" },
+ { { "cc" }, "cr0" },
+};
+
+ArrayRef<TargetInfo::GCCRegAlias> PPCTargetInfo::getGCCRegAliases() const {
+ return llvm::makeArrayRef(GCCRegAliases);
+}
+
+class PPC32TargetInfo : public PPCTargetInfo {
+public:
+ PPC32TargetInfo(const llvm::Triple &Triple) : PPCTargetInfo(Triple) {
+ DataLayoutString = "E-m:e-p:32:32-i64:64-n32";
+
+ switch (getTriple().getOS()) {
+ case llvm::Triple::Linux:
+ case llvm::Triple::FreeBSD:
+ case llvm::Triple::NetBSD:
+ SizeType = UnsignedInt;
+ PtrDiffType = SignedInt;
+ IntPtrType = SignedInt;
+ break;
+ default:
+ break;
+ }
+
+ if (getTriple().getOS() == llvm::Triple::FreeBSD) {
+ LongDoubleWidth = LongDoubleAlign = 64;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+ }
+
+ // PPC32 supports atomics up to 4 bytes.
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 32;
+ }
+
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ // This is the ELF definition, and is overridden by the Darwin sub-target
+ return TargetInfo::PowerABIBuiltinVaList;
+ }
+};
+
+// Note: ABI differences may eventually require us to have a separate
+// TargetInfo for little endian.
+class PPC64TargetInfo : public PPCTargetInfo {
+public:
+ PPC64TargetInfo(const llvm::Triple &Triple) : PPCTargetInfo(Triple) {
+ LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
+ IntMaxType = SignedLong;
+ Int64Type = SignedLong;
+
+ if ((Triple.getArch() == llvm::Triple::ppc64le)) {
+ DataLayoutString = "e-m:e-i64:64-n32:64";
+ ABI = "elfv2";
+ } else {
+ DataLayoutString = "E-m:e-i64:64-n32:64";
+ ABI = "elfv1";
+ }
+
+ switch (getTriple().getOS()) {
+ case llvm::Triple::FreeBSD:
+ LongDoubleWidth = LongDoubleAlign = 64;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+ break;
+ case llvm::Triple::NetBSD:
+ IntMaxType = SignedLongLong;
+ Int64Type = SignedLongLong;
+ break;
+ default:
+ break;
+ }
+
+ // PPC64 supports atomics up to 8 bytes.
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+ }
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::CharPtrBuiltinVaList;
+ }
+ // PPC64 Linux-specific ABI options.
+ bool setABI(const std::string &Name) override {
+ if (Name == "elfv1" || Name == "elfv1-qpx" || Name == "elfv2") {
+ ABI = Name;
+ return true;
+ }
+ return false;
+ }
+};
+
+class DarwinPPC32TargetInfo :
+ public DarwinTargetInfo<PPC32TargetInfo> {
+public:
+ DarwinPPC32TargetInfo(const llvm::Triple &Triple)
+ : DarwinTargetInfo<PPC32TargetInfo>(Triple) {
+ HasAlignMac68kSupport = true;
+ BoolWidth = BoolAlign = 32; //XXX support -mone-byte-bool?
+ PtrDiffType = SignedInt; // for http://llvm.org/bugs/show_bug.cgi?id=15726
+ LongLongAlign = 32;
+ SuitableAlign = 128;
+ DataLayoutString = "E-m:o-p:32:32-f64:32:64-n32";
+ }
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::CharPtrBuiltinVaList;
+ }
+};
+
+class DarwinPPC64TargetInfo :
+ public DarwinTargetInfo<PPC64TargetInfo> {
+public:
+ DarwinPPC64TargetInfo(const llvm::Triple &Triple)
+ : DarwinTargetInfo<PPC64TargetInfo>(Triple) {
+ HasAlignMac68kSupport = true;
+ SuitableAlign = 128;
+ DataLayoutString = "E-m:o-i64:64-n32:64";
+ }
+};
+
+static const unsigned NVPTXAddrSpaceMap[] = {
+ 1, // opencl_global
+ 3, // opencl_local
+ 4, // opencl_constant
+ // FIXME: generic has to be added to the target
+ 0, // opencl_generic
+ 1, // cuda_device
+ 4, // cuda_constant
+ 3, // cuda_shared
+};
+
+class NVPTXTargetInfo : public TargetInfo {
+ static const char *const GCCRegNames[];
+ static const Builtin::Info BuiltinInfo[];
+
+ // The GPU profiles supported by the NVPTX backend
+ enum GPUKind {
+ GK_NONE,
+ GK_SM20,
+ GK_SM21,
+ GK_SM30,
+ GK_SM35,
+ GK_SM37,
+ } GPU;
+
+public:
+ NVPTXTargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) {
+ BigEndian = false;
+ TLSSupported = false;
+ LongWidth = LongAlign = 64;
+ AddrSpaceMap = &NVPTXAddrSpaceMap;
+ UseAddrSpaceMapMangling = true;
+ // Define available target features
+ // These must be defined in sorted order!
+ NoAsmVariants = true;
+ // Set the default GPU to sm20
+ GPU = GK_SM20;
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ Builder.defineMacro("__PTX__");
+ Builder.defineMacro("__NVPTX__");
+ if (Opts.CUDAIsDevice) {
+ // Set __CUDA_ARCH__ for the GPU specified.
+ std::string CUDAArchCode;
+ switch (GPU) {
+ case GK_SM20:
+ CUDAArchCode = "200";
+ break;
+ case GK_SM21:
+ CUDAArchCode = "210";
+ break;
+ case GK_SM30:
+ CUDAArchCode = "300";
+ break;
+ case GK_SM35:
+ CUDAArchCode = "350";
+ break;
+ case GK_SM37:
+ CUDAArchCode = "370";
+ break;
+ default:
+ llvm_unreachable("Unhandled target CPU");
+ }
+ Builder.defineMacro("__CUDA_ARCH__", CUDAArchCode);
+ }
+ }
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return llvm::makeArrayRef(BuiltinInfo,
+ clang::NVPTX::LastTSBuiltin - Builtin::FirstTSBuiltin);
+ }
+ bool hasFeature(StringRef Feature) const override {
+ return Feature == "ptx" || Feature == "nvptx";
+ }
+
+ ArrayRef<const char *> getGCCRegNames() const override;
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
+ // No aliases.
+ return None;
+ }
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const override {
+ switch (*Name) {
+ default:
+ return false;
+ case 'c':
+ case 'h':
+ case 'r':
+ case 'l':
+ case 'f':
+ case 'd':
+ Info.setAllowsRegister();
+ return true;
+ }
+ }
+ const char *getClobbers() const override {
+ // FIXME: Is this really right?
+ return "";
+ }
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ // FIXME: implement
+ return TargetInfo::CharPtrBuiltinVaList;
+ }
+ bool setCPU(const std::string &Name) override {
+ GPU = llvm::StringSwitch<GPUKind>(Name)
+ .Case("sm_20", GK_SM20)
+ .Case("sm_21", GK_SM21)
+ .Case("sm_30", GK_SM30)
+ .Case("sm_35", GK_SM35)
+ .Case("sm_37", GK_SM37)
+ .Default(GK_NONE);
+
+ return GPU != GK_NONE;
+ }
+};
+
+const Builtin::Info NVPTXTargetInfo::BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
+ { #ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr },
+#include "clang/Basic/BuiltinsNVPTX.def"
+};
+
+const char *const NVPTXTargetInfo::GCCRegNames[] = {"r0"};
+
+ArrayRef<const char *> NVPTXTargetInfo::getGCCRegNames() const {
+ return llvm::makeArrayRef(GCCRegNames);
+}
+
+class NVPTX32TargetInfo : public NVPTXTargetInfo {
+public:
+ NVPTX32TargetInfo(const llvm::Triple &Triple) : NVPTXTargetInfo(Triple) {
+ LongWidth = LongAlign = 32;
+ PointerWidth = PointerAlign = 32;
+ SizeType = TargetInfo::UnsignedInt;
+ PtrDiffType = TargetInfo::SignedInt;
+ IntPtrType = TargetInfo::SignedInt;
+ DataLayoutString = "e-p:32:32-i64:64-v16:16-v32:32-n16:32:64";
+ }
+};
+
+class NVPTX64TargetInfo : public NVPTXTargetInfo {
+public:
+ NVPTX64TargetInfo(const llvm::Triple &Triple) : NVPTXTargetInfo(Triple) {
+ PointerWidth = PointerAlign = 64;
+ SizeType = TargetInfo::UnsignedLong;
+ PtrDiffType = TargetInfo::SignedLong;
+ IntPtrType = TargetInfo::SignedLong;
+ DataLayoutString = "e-i64:64-v16:16-v32:32-n16:32:64";
+ }
+};
+
+static const unsigned AMDGPUAddrSpaceMap[] = {
+ 1, // opencl_global
+ 3, // opencl_local
+ 2, // opencl_constant
+ 4, // opencl_generic
+ 1, // cuda_device
+ 2, // cuda_constant
+ 3 // cuda_shared
+};
+
+// If you edit the description strings, make sure you update
+// getPointerWidthV().
+
+static const char *const DataLayoutStringR600 =
+ "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
+ "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
+
+static const char *const DataLayoutStringR600DoubleOps =
+ "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
+ "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
+
+static const char *const DataLayoutStringSI =
+ "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64"
+ "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
+ "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
+
+class AMDGPUTargetInfo : public TargetInfo {
+ static const Builtin::Info BuiltinInfo[];
+ static const char * const GCCRegNames[];
+
+ /// \brief The GPU profiles supported by the AMDGPU target.
+ enum GPUKind {
+ GK_NONE,
+ GK_R600,
+ GK_R600_DOUBLE_OPS,
+ GK_R700,
+ GK_R700_DOUBLE_OPS,
+ GK_EVERGREEN,
+ GK_EVERGREEN_DOUBLE_OPS,
+ GK_NORTHERN_ISLANDS,
+ GK_CAYMAN,
+ GK_SOUTHERN_ISLANDS,
+ GK_SEA_ISLANDS,
+ GK_VOLCANIC_ISLANDS
+ } GPU;
+
+ bool hasFP64:1;
+ bool hasFMAF:1;
+ bool hasLDEXPF:1;
+
+public:
+ AMDGPUTargetInfo(const llvm::Triple &Triple)
+ : TargetInfo(Triple) {
+
+ if (Triple.getArch() == llvm::Triple::amdgcn) {
+ DataLayoutString = DataLayoutStringSI;
+ GPU = GK_SOUTHERN_ISLANDS;
+ hasFP64 = true;
+ hasFMAF = true;
+ hasLDEXPF = true;
+ } else {
+ DataLayoutString = DataLayoutStringR600;
+ GPU = GK_R600;
+ hasFP64 = false;
+ hasFMAF = false;
+ hasLDEXPF = false;
+ }
+ AddrSpaceMap = &AMDGPUAddrSpaceMap;
+ UseAddrSpaceMapMangling = true;
+ }
+
+ uint64_t getPointerWidthV(unsigned AddrSpace) const override {
+ if (GPU <= GK_CAYMAN)
+ return 32;
+
+ switch(AddrSpace) {
+ default:
+ return 64;
+ case 0:
+ case 3:
+ case 5:
+ return 32;
+ }
+ }
+
+ const char * getClobbers() const override {
+ return "";
+ }
+
+ ArrayRef<const char *> getGCCRegNames() const override;
+
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
+ return None;
+ }
+
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const override {
+ switch (*Name) {
+ default: break;
+ case 'v': // vgpr
+ case 's': // sgpr
+ Info.setAllowsRegister();
+ return true;
+ }
+ return false;
+ }
+
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return llvm::makeArrayRef(BuiltinInfo,
+ clang::AMDGPU::LastTSBuiltin - Builtin::FirstTSBuiltin);
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ Builder.defineMacro("__R600__");
+ if (hasFMAF)
+ Builder.defineMacro("__HAS_FMAF__");
+ if (hasLDEXPF)
+ Builder.defineMacro("__HAS_LDEXPF__");
+ if (hasFP64 && Opts.OpenCL)
+ Builder.defineMacro("cl_khr_fp64");
+ if (Opts.OpenCL) {
+ if (GPU >= GK_NORTHERN_ISLANDS) {
+ Builder.defineMacro("cl_khr_byte_addressable_store");
+ Builder.defineMacro("cl_khr_global_int32_base_atomics");
+ Builder.defineMacro("cl_khr_global_int32_extended_atomics");
+ Builder.defineMacro("cl_khr_local_int32_base_atomics");
+ Builder.defineMacro("cl_khr_local_int32_extended_atomics");
+ }
+ }
+ }
+
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::CharPtrBuiltinVaList;
+ }
+
+ bool setCPU(const std::string &Name) override {
+ GPU = llvm::StringSwitch<GPUKind>(Name)
+ .Case("r600" , GK_R600)
+ .Case("rv610", GK_R600)
+ .Case("rv620", GK_R600)
+ .Case("rv630", GK_R600)
+ .Case("rv635", GK_R600)
+ .Case("rs780", GK_R600)
+ .Case("rs880", GK_R600)
+ .Case("rv670", GK_R600_DOUBLE_OPS)
+ .Case("rv710", GK_R700)
+ .Case("rv730", GK_R700)
+ .Case("rv740", GK_R700_DOUBLE_OPS)
+ .Case("rv770", GK_R700_DOUBLE_OPS)
+ .Case("palm", GK_EVERGREEN)
+ .Case("cedar", GK_EVERGREEN)
+ .Case("sumo", GK_EVERGREEN)
+ .Case("sumo2", GK_EVERGREEN)
+ .Case("redwood", GK_EVERGREEN)
+ .Case("juniper", GK_EVERGREEN)
+ .Case("hemlock", GK_EVERGREEN_DOUBLE_OPS)
+ .Case("cypress", GK_EVERGREEN_DOUBLE_OPS)
+ .Case("barts", GK_NORTHERN_ISLANDS)
+ .Case("turks", GK_NORTHERN_ISLANDS)
+ .Case("caicos", GK_NORTHERN_ISLANDS)
+ .Case("cayman", GK_CAYMAN)
+ .Case("aruba", GK_CAYMAN)
+ .Case("tahiti", GK_SOUTHERN_ISLANDS)
+ .Case("pitcairn", GK_SOUTHERN_ISLANDS)
+ .Case("verde", GK_SOUTHERN_ISLANDS)
+ .Case("oland", GK_SOUTHERN_ISLANDS)
+ .Case("hainan", GK_SOUTHERN_ISLANDS)
+ .Case("bonaire", GK_SEA_ISLANDS)
+ .Case("kabini", GK_SEA_ISLANDS)
+ .Case("kaveri", GK_SEA_ISLANDS)
+ .Case("hawaii", GK_SEA_ISLANDS)
+ .Case("mullins", GK_SEA_ISLANDS)
+ .Case("tonga", GK_VOLCANIC_ISLANDS)
+ .Case("iceland", GK_VOLCANIC_ISLANDS)
+ .Case("carrizo", GK_VOLCANIC_ISLANDS)
+ .Default(GK_NONE);
+
+ if (GPU == GK_NONE) {
+ return false;
+ }
+
+ // Set the correct data layout
+ switch (GPU) {
+ case GK_NONE:
+ case GK_R600:
+ case GK_R700:
+ case GK_EVERGREEN:
+ case GK_NORTHERN_ISLANDS:
+ DataLayoutString = DataLayoutStringR600;
+ hasFP64 = false;
+ hasFMAF = false;
+ hasLDEXPF = false;
+ break;
+ case GK_R600_DOUBLE_OPS:
+ case GK_R700_DOUBLE_OPS:
+ case GK_EVERGREEN_DOUBLE_OPS:
+ case GK_CAYMAN:
+ DataLayoutString = DataLayoutStringR600DoubleOps;
+ hasFP64 = true;
+ hasFMAF = true;
+ hasLDEXPF = false;
+ break;
+ case GK_SOUTHERN_ISLANDS:
+ case GK_SEA_ISLANDS:
+ case GK_VOLCANIC_ISLANDS:
+ DataLayoutString = DataLayoutStringSI;
+ hasFP64 = true;
+ hasFMAF = true;
+ hasLDEXPF = true;
+ break;
+ }
+
+ return true;
+ }
+};
+
+const Builtin::Info AMDGPUTargetInfo::BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
+#include "clang/Basic/BuiltinsAMDGPU.def"
+};
+const char * const AMDGPUTargetInfo::GCCRegNames[] = {
+ "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
+ "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
+ "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
+ "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
+ "v32", "v33", "v34", "v35", "v36", "v37", "v38", "v39",
+ "v40", "v41", "v42", "v43", "v44", "v45", "v46", "v47",
+ "v48", "v49", "v50", "v51", "v52", "v53", "v54", "v55",
+ "v56", "v57", "v58", "v59", "v60", "v61", "v62", "v63",
+ "v64", "v65", "v66", "v67", "v68", "v69", "v70", "v71",
+ "v72", "v73", "v74", "v75", "v76", "v77", "v78", "v79",
+ "v80", "v81", "v82", "v83", "v84", "v85", "v86", "v87",
+ "v88", "v89", "v90", "v91", "v92", "v93", "v94", "v95",
+ "v96", "v97", "v98", "v99", "v100", "v101", "v102", "v103",
+ "v104", "v105", "v106", "v107", "v108", "v109", "v110", "v111",
+ "v112", "v113", "v114", "v115", "v116", "v117", "v118", "v119",
+ "v120", "v121", "v122", "v123", "v124", "v125", "v126", "v127",
+ "v128", "v129", "v130", "v131", "v132", "v133", "v134", "v135",
+ "v136", "v137", "v138", "v139", "v140", "v141", "v142", "v143",
+ "v144", "v145", "v146", "v147", "v148", "v149", "v150", "v151",
+ "v152", "v153", "v154", "v155", "v156", "v157", "v158", "v159",
+ "v160", "v161", "v162", "v163", "v164", "v165", "v166", "v167",
+ "v168", "v169", "v170", "v171", "v172", "v173", "v174", "v175",
+ "v176", "v177", "v178", "v179", "v180", "v181", "v182", "v183",
+ "v184", "v185", "v186", "v187", "v188", "v189", "v190", "v191",
+ "v192", "v193", "v194", "v195", "v196", "v197", "v198", "v199",
+ "v200", "v201", "v202", "v203", "v204", "v205", "v206", "v207",
+ "v208", "v209", "v210", "v211", "v212", "v213", "v214", "v215",
+ "v216", "v217", "v218", "v219", "v220", "v221", "v222", "v223",
+ "v224", "v225", "v226", "v227", "v228", "v229", "v230", "v231",
+ "v232", "v233", "v234", "v235", "v236", "v237", "v238", "v239",
+ "v240", "v241", "v242", "v243", "v244", "v245", "v246", "v247",
+ "v248", "v249", "v250", "v251", "v252", "v253", "v254", "v255",
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
+ "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
+ "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
+ "s32", "s33", "s34", "s35", "s36", "s37", "s38", "s39",
+ "s40", "s41", "s42", "s43", "s44", "s45", "s46", "s47",
+ "s48", "s49", "s50", "s51", "s52", "s53", "s54", "s55",
+ "s56", "s57", "s58", "s59", "s60", "s61", "s62", "s63",
+ "s64", "s65", "s66", "s67", "s68", "s69", "s70", "s71",
+ "s72", "s73", "s74", "s75", "s76", "s77", "s78", "s79",
+ "s80", "s81", "s82", "s83", "s84", "s85", "s86", "s87",
+ "s88", "s89", "s90", "s91", "s92", "s93", "s94", "s95",
+ "s96", "s97", "s98", "s99", "s100", "s101", "s102", "s103",
+ "s104", "s105", "s106", "s107", "s108", "s109", "s110", "s111",
+ "s112", "s113", "s114", "s115", "s116", "s117", "s118", "s119",
+ "s120", "s121", "s122", "s123", "s124", "s125", "s126", "s127"
+ "exec", "vcc", "scc", "m0", "flat_scr", "exec_lo", "exec_hi",
+ "vcc_lo", "vcc_hi", "flat_scr_lo", "flat_scr_hi"
+};
+
+ArrayRef<const char *> AMDGPUTargetInfo::getGCCRegNames() const {
+ return llvm::makeArrayRef(GCCRegNames);
+}
+
+// Namespace for x86 abstract base class
+const Builtin::Info BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
+ { #ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr },
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, FEATURE },
+#include "clang/Basic/BuiltinsX86.def"
+};
+
+static const char* const GCCRegNames[] = {
+ "ax", "dx", "cx", "bx", "si", "di", "bp", "sp",
+ "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)",
+ "argp", "flags", "fpcr", "fpsr", "dirflag", "frame",
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
+ "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
+ "ymm0", "ymm1", "ymm2", "ymm3", "ymm4", "ymm5", "ymm6", "ymm7",
+ "ymm8", "ymm9", "ymm10", "ymm11", "ymm12", "ymm13", "ymm14", "ymm15",
+};
+
+const TargetInfo::AddlRegName AddlRegNames[] = {
+ { { "al", "ah", "eax", "rax" }, 0 },
+ { { "bl", "bh", "ebx", "rbx" }, 3 },
+ { { "cl", "ch", "ecx", "rcx" }, 2 },
+ { { "dl", "dh", "edx", "rdx" }, 1 },
+ { { "esi", "rsi" }, 4 },
+ { { "edi", "rdi" }, 5 },
+ { { "esp", "rsp" }, 7 },
+ { { "ebp", "rbp" }, 6 },
+ { { "r8d", "r8w", "r8b" }, 38 },
+ { { "r9d", "r9w", "r9b" }, 39 },
+ { { "r10d", "r10w", "r10b" }, 40 },
+ { { "r11d", "r11w", "r11b" }, 41 },
+ { { "r12d", "r12w", "r12b" }, 42 },
+ { { "r13d", "r13w", "r13b" }, 43 },
+ { { "r14d", "r14w", "r14b" }, 44 },
+ { { "r15d", "r15w", "r15b" }, 45 },
+};
+
+// X86 target abstract base class; x86-32 and x86-64 are very close, so
+// most of the implementation can be shared.
+class X86TargetInfo : public TargetInfo {
+ enum X86SSEEnum {
+ NoSSE, SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42, AVX, AVX2, AVX512F
+ } SSELevel = NoSSE;
+ enum MMX3DNowEnum {
+ NoMMX3DNow, MMX, AMD3DNow, AMD3DNowAthlon
+ } MMX3DNowLevel = NoMMX3DNow;
+ enum XOPEnum {
+ NoXOP,
+ SSE4A,
+ FMA4,
+ XOP
+ } XOPLevel = NoXOP;
+
+ bool HasAES = false;
+ bool HasPCLMUL = false;
+ bool HasLZCNT = false;
+ bool HasRDRND = false;
+ bool HasFSGSBASE = false;
+ bool HasBMI = false;
+ bool HasBMI2 = false;
+ bool HasPOPCNT = false;
+ bool HasRTM = false;
+ bool HasPRFCHW = false;
+ bool HasRDSEED = false;
+ bool HasADX = false;
+ bool HasTBM = false;
+ bool HasFMA = false;
+ bool HasF16C = false;
+ bool HasAVX512CD = false;
+ bool HasAVX512ER = false;
+ bool HasAVX512PF = false;
+ bool HasAVX512DQ = false;
+ bool HasAVX512BW = false;
+ bool HasAVX512VL = false;
+ bool HasSHA = false;
+ bool HasCX16 = false;
+ bool HasFXSR = false;
+ bool HasXSAVE = false;
+ bool HasXSAVEOPT = false;
+ bool HasXSAVEC = false;
+ bool HasXSAVES = false;
+ bool HasPKU = false;
+
+ /// \brief Enumeration of all of the X86 CPUs supported by Clang.
+ ///
+ /// Each enumeration represents a particular CPU supported by Clang. These
+ /// loosely correspond to the options passed to '-march' or '-mtune' flags.
+ enum CPUKind {
+ CK_Generic,
+
+ /// \name i386
+ /// i386-generation processors.
+ //@{
+ CK_i386,
+ //@}
+
+ /// \name i486
+ /// i486-generation processors.
+ //@{
+ CK_i486,
+ CK_WinChipC6,
+ CK_WinChip2,
+ CK_C3,
+ //@}
+
+ /// \name i586
+ /// i586-generation processors, P5 microarchitecture based.
+ //@{
+ CK_i586,
+ CK_Pentium,
+ CK_PentiumMMX,
+ //@}
+
+ /// \name i686
+ /// i686-generation processors, P6 / Pentium M microarchitecture based.
+ //@{
+ CK_i686,
+ CK_PentiumPro,
+ CK_Pentium2,
+ CK_Pentium3,
+ CK_Pentium3M,
+ CK_PentiumM,
+ CK_C3_2,
+
+ /// This enumerator is a bit odd, as GCC no longer accepts -march=yonah.
+ /// Clang however has some logic to suport this.
+ // FIXME: Warn, deprecate, and potentially remove this.
+ CK_Yonah,
+ //@}
+
+ /// \name Netburst
+ /// Netburst microarchitecture based processors.
+ //@{
+ CK_Pentium4,
+ CK_Pentium4M,
+ CK_Prescott,
+ CK_Nocona,
+ //@}
+
+ /// \name Core
+ /// Core microarchitecture based processors.
+ //@{
+ CK_Core2,
+
+ /// This enumerator, like \see CK_Yonah, is a bit odd. It is another
+ /// codename which GCC no longer accepts as an option to -march, but Clang
+ /// has some logic for recognizing it.
+ // FIXME: Warn, deprecate, and potentially remove this.
+ CK_Penryn,
+ //@}
+
+ /// \name Atom
+ /// Atom processors
+ //@{
+ CK_Bonnell,
+ CK_Silvermont,
+ //@}
+
+ /// \name Nehalem
+ /// Nehalem microarchitecture based processors.
+ CK_Nehalem,
+
+ /// \name Westmere
+ /// Westmere microarchitecture based processors.
+ CK_Westmere,
+
+ /// \name Sandy Bridge
+ /// Sandy Bridge microarchitecture based processors.
+ CK_SandyBridge,
+
+ /// \name Ivy Bridge
+ /// Ivy Bridge microarchitecture based processors.
+ CK_IvyBridge,
+
+ /// \name Haswell
+ /// Haswell microarchitecture based processors.
+ CK_Haswell,
+
+ /// \name Broadwell
+ /// Broadwell microarchitecture based processors.
+ CK_Broadwell,
+
+ /// \name Skylake
+ /// Skylake microarchitecture based processors.
+ CK_Skylake,
+
+ /// \name Knights Landing
+ /// Knights Landing processor.
+ CK_KNL,
+
+ /// \name K6
+ /// K6 architecture processors.
+ //@{
+ CK_K6,
+ CK_K6_2,
+ CK_K6_3,
+ //@}
+
+ /// \name K7
+ /// K7 architecture processors.
+ //@{
+ CK_Athlon,
+ CK_AthlonThunderbird,
+ CK_Athlon4,
+ CK_AthlonXP,
+ CK_AthlonMP,
+ //@}
+
+ /// \name K8
+ /// K8 architecture processors.
+ //@{
+ CK_Athlon64,
+ CK_Athlon64SSE3,
+ CK_AthlonFX,
+ CK_K8,
+ CK_K8SSE3,
+ CK_Opteron,
+ CK_OpteronSSE3,
+ CK_AMDFAM10,
+ //@}
+
+ /// \name Bobcat
+ /// Bobcat architecture processors.
+ //@{
+ CK_BTVER1,
+ CK_BTVER2,
+ //@}
+
+ /// \name Bulldozer
+ /// Bulldozer architecture processors.
+ //@{
+ CK_BDVER1,
+ CK_BDVER2,
+ CK_BDVER3,
+ CK_BDVER4,
+ //@}
+
+ /// This specification is deprecated and will be removed in the future.
+ /// Users should prefer \see CK_K8.
+ // FIXME: Warn on this when the CPU is set to it.
+ //@{
+ CK_x86_64,
+ //@}
+
+ /// \name Geode
+ /// Geode processors.
+ //@{
+ CK_Geode
+ //@}
+ } CPU = CK_Generic;
+
+ CPUKind getCPUKind(StringRef CPU) const {
+ return llvm::StringSwitch<CPUKind>(CPU)
+ .Case("i386", CK_i386)
+ .Case("i486", CK_i486)
+ .Case("winchip-c6", CK_WinChipC6)
+ .Case("winchip2", CK_WinChip2)
+ .Case("c3", CK_C3)
+ .Case("i586", CK_i586)
+ .Case("pentium", CK_Pentium)
+ .Case("pentium-mmx", CK_PentiumMMX)
+ .Case("i686", CK_i686)
+ .Case("pentiumpro", CK_PentiumPro)
+ .Case("pentium2", CK_Pentium2)
+ .Case("pentium3", CK_Pentium3)
+ .Case("pentium3m", CK_Pentium3M)
+ .Case("pentium-m", CK_PentiumM)
+ .Case("c3-2", CK_C3_2)
+ .Case("yonah", CK_Yonah)
+ .Case("pentium4", CK_Pentium4)
+ .Case("pentium4m", CK_Pentium4M)
+ .Case("prescott", CK_Prescott)
+ .Case("nocona", CK_Nocona)
+ .Case("core2", CK_Core2)
+ .Case("penryn", CK_Penryn)
+ .Case("bonnell", CK_Bonnell)
+ .Case("atom", CK_Bonnell) // Legacy name.
+ .Case("silvermont", CK_Silvermont)
+ .Case("slm", CK_Silvermont) // Legacy name.
+ .Case("nehalem", CK_Nehalem)
+ .Case("corei7", CK_Nehalem) // Legacy name.
+ .Case("westmere", CK_Westmere)
+ .Case("sandybridge", CK_SandyBridge)
+ .Case("corei7-avx", CK_SandyBridge) // Legacy name.
+ .Case("ivybridge", CK_IvyBridge)
+ .Case("core-avx-i", CK_IvyBridge) // Legacy name.
+ .Case("haswell", CK_Haswell)
+ .Case("core-avx2", CK_Haswell) // Legacy name.
+ .Case("broadwell", CK_Broadwell)
+ .Case("skylake", CK_Skylake)
+ .Case("skx", CK_Skylake) // Legacy name.
+ .Case("knl", CK_KNL)
+ .Case("k6", CK_K6)
+ .Case("k6-2", CK_K6_2)
+ .Case("k6-3", CK_K6_3)
+ .Case("athlon", CK_Athlon)
+ .Case("athlon-tbird", CK_AthlonThunderbird)
+ .Case("athlon-4", CK_Athlon4)
+ .Case("athlon-xp", CK_AthlonXP)
+ .Case("athlon-mp", CK_AthlonMP)
+ .Case("athlon64", CK_Athlon64)
+ .Case("athlon64-sse3", CK_Athlon64SSE3)
+ .Case("athlon-fx", CK_AthlonFX)
+ .Case("k8", CK_K8)
+ .Case("k8-sse3", CK_K8SSE3)
+ .Case("opteron", CK_Opteron)
+ .Case("opteron-sse3", CK_OpteronSSE3)
+ .Case("barcelona", CK_AMDFAM10)
+ .Case("amdfam10", CK_AMDFAM10)
+ .Case("btver1", CK_BTVER1)
+ .Case("btver2", CK_BTVER2)
+ .Case("bdver1", CK_BDVER1)
+ .Case("bdver2", CK_BDVER2)
+ .Case("bdver3", CK_BDVER3)
+ .Case("bdver4", CK_BDVER4)
+ .Case("x86-64", CK_x86_64)
+ .Case("geode", CK_Geode)
+ .Default(CK_Generic);
+ }
+
+ enum FPMathKind {
+ FP_Default,
+ FP_SSE,
+ FP_387
+ } FPMath = FP_Default;
+
+public:
+ X86TargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) {
+ BigEndian = false;
+ LongDoubleFormat = &llvm::APFloat::x87DoubleExtended;
+ }
+ unsigned getFloatEvalMethod() const override {
+ // X87 evaluates with 80 bits "long double" precision.
+ return SSELevel == NoSSE ? 2 : 0;
+ }
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return llvm::makeArrayRef(BuiltinInfo,
+ clang::X86::LastTSBuiltin-Builtin::FirstTSBuiltin);
+ }
+ ArrayRef<const char *> getGCCRegNames() const override {
+ return llvm::makeArrayRef(GCCRegNames);
+ }
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
+ return None;
+ }
+ ArrayRef<TargetInfo::AddlRegName> getGCCAddlRegNames() const override {
+ return llvm::makeArrayRef(AddlRegNames);
+ }
+ bool validateCpuSupports(StringRef Name) const override;
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &info) const override;
+
+ bool validateGlobalRegisterVariable(StringRef RegName,
+ unsigned RegSize,
+ bool &HasSizeMismatch) const override {
+ // esp and ebp are the only 32-bit registers the x86 backend can currently
+ // handle.
+ if (RegName.equals("esp") || RegName.equals("ebp")) {
+ // Check that the register size is 32-bit.
+ HasSizeMismatch = RegSize != 32;
+ return true;
+ }
+
+ return false;
+ }
+
+ bool validateOutputSize(StringRef Constraint, unsigned Size) const override;
+
+ bool validateInputSize(StringRef Constraint, unsigned Size) const override;
+
+ virtual bool validateOperandSize(StringRef Constraint, unsigned Size) const;
+
+ std::string convertConstraint(const char *&Constraint) const override;
+ const char *getClobbers() const override {
+ return "~{dirflag},~{fpsr},~{flags}";
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override;
+ static void setSSELevel(llvm::StringMap<bool> &Features, X86SSEEnum Level,
+ bool Enabled);
+ static void setMMXLevel(llvm::StringMap<bool> &Features, MMX3DNowEnum Level,
+ bool Enabled);
+ static void setXOPLevel(llvm::StringMap<bool> &Features, XOPEnum Level,
+ bool Enabled);
+ void setFeatureEnabled(llvm::StringMap<bool> &Features,
+ StringRef Name, bool Enabled) const override {
+ setFeatureEnabledImpl(Features, Name, Enabled);
+ }
+ // This exists purely to cut down on the number of virtual calls in
+ // initFeatureMap which calls this repeatedly.
+ static void setFeatureEnabledImpl(llvm::StringMap<bool> &Features,
+ StringRef Name, bool Enabled);
+ bool
+ initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
+ StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const override;
+ bool hasFeature(StringRef Feature) const override;
+ bool handleTargetFeatures(std::vector<std::string> &Features,
+ DiagnosticsEngine &Diags) override;
+ StringRef getABI() const override {
+ if (getTriple().getArch() == llvm::Triple::x86_64 && SSELevel >= AVX512F)
+ return "avx512";
+ if (getTriple().getArch() == llvm::Triple::x86_64 && SSELevel >= AVX)
+ return "avx";
+ if (getTriple().getArch() == llvm::Triple::x86 &&
+ MMX3DNowLevel == NoMMX3DNow)
+ return "no-mmx";
+ return "";
+ }
+ bool setCPU(const std::string &Name) override {
+ CPU = getCPUKind(Name);
+
+ // Perform any per-CPU checks necessary to determine if this CPU is
+ // acceptable.
+ // FIXME: This results in terrible diagnostics. Clang just says the CPU is
+ // invalid without explaining *why*.
+ switch (CPU) {
+ case CK_Generic:
+ // No processor selected!
+ return false;
+
+ case CK_i386:
+ case CK_i486:
+ case CK_WinChipC6:
+ case CK_WinChip2:
+ case CK_C3:
+ case CK_i586:
+ case CK_Pentium:
+ case CK_PentiumMMX:
+ case CK_i686:
+ case CK_PentiumPro:
+ case CK_Pentium2:
+ case CK_Pentium3:
+ case CK_Pentium3M:
+ case CK_PentiumM:
+ case CK_Yonah:
+ case CK_C3_2:
+ case CK_Pentium4:
+ case CK_Pentium4M:
+ case CK_Prescott:
+ case CK_K6:
+ case CK_K6_2:
+ case CK_K6_3:
+ case CK_Athlon:
+ case CK_AthlonThunderbird:
+ case CK_Athlon4:
+ case CK_AthlonXP:
+ case CK_AthlonMP:
+ case CK_Geode:
+ // Only accept certain architectures when compiling in 32-bit mode.
+ if (getTriple().getArch() != llvm::Triple::x86)
+ return false;
+
+ // Fallthrough
+ case CK_Nocona:
+ case CK_Core2:
+ case CK_Penryn:
+ case CK_Bonnell:
+ case CK_Silvermont:
+ case CK_Nehalem:
+ case CK_Westmere:
+ case CK_SandyBridge:
+ case CK_IvyBridge:
+ case CK_Haswell:
+ case CK_Broadwell:
+ case CK_Skylake:
+ case CK_KNL:
+ case CK_Athlon64:
+ case CK_Athlon64SSE3:
+ case CK_AthlonFX:
+ case CK_K8:
+ case CK_K8SSE3:
+ case CK_Opteron:
+ case CK_OpteronSSE3:
+ case CK_AMDFAM10:
+ case CK_BTVER1:
+ case CK_BTVER2:
+ case CK_BDVER1:
+ case CK_BDVER2:
+ case CK_BDVER3:
+ case CK_BDVER4:
+ case CK_x86_64:
+ return true;
+ }
+ llvm_unreachable("Unhandled CPU kind");
+ }
+
+ bool setFPMath(StringRef Name) override;
+
+ CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
+ // We accept all non-ARM calling conventions
+ return (CC == CC_X86ThisCall ||
+ CC == CC_X86FastCall ||
+ CC == CC_X86StdCall ||
+ CC == CC_X86VectorCall ||
+ CC == CC_C ||
+ CC == CC_X86Pascal ||
+ CC == CC_IntelOclBicc) ? CCCR_OK : CCCR_Warning;
+ }
+
+ CallingConv getDefaultCallingConv(CallingConvMethodType MT) const override {
+ return MT == CCMT_Member ? CC_X86ThisCall : CC_C;
+ }
+
+ bool hasSjLjLowering() const override {
+ return true;
+ }
+};
+
+bool X86TargetInfo::setFPMath(StringRef Name) {
+ if (Name == "387") {
+ FPMath = FP_387;
+ return true;
+ }
+ if (Name == "sse") {
+ FPMath = FP_SSE;
+ return true;
+ }
+ return false;
+}
+
+bool X86TargetInfo::initFeatureMap(
+ llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const {
+ // FIXME: This *really* should not be here.
+ // X86_64 always has SSE2.
+ if (getTriple().getArch() == llvm::Triple::x86_64)
+ setFeatureEnabledImpl(Features, "sse2", true);
+
+ switch (getCPUKind(CPU)) {
+ case CK_Generic:
+ case CK_i386:
+ case CK_i486:
+ case CK_i586:
+ case CK_Pentium:
+ case CK_i686:
+ case CK_PentiumPro:
+ break;
+ case CK_PentiumMMX:
+ case CK_Pentium2:
+ case CK_K6:
+ case CK_WinChipC6:
+ setFeatureEnabledImpl(Features, "mmx", true);
+ break;
+ case CK_Pentium3:
+ case CK_Pentium3M:
+ case CK_C3_2:
+ setFeatureEnabledImpl(Features, "sse", true);
+ setFeatureEnabledImpl(Features, "fxsr", true);
+ break;
+ case CK_PentiumM:
+ case CK_Pentium4:
+ case CK_Pentium4M:
+ case CK_x86_64:
+ setFeatureEnabledImpl(Features, "sse2", true);
+ setFeatureEnabledImpl(Features, "fxsr", true);
+ break;
+ case CK_Yonah:
+ case CK_Prescott:
+ case CK_Nocona:
+ setFeatureEnabledImpl(Features, "sse3", true);
+ setFeatureEnabledImpl(Features, "fxsr", true);
+ setFeatureEnabledImpl(Features, "cx16", true);
+ break;
+ case CK_Core2:
+ case CK_Bonnell:
+ setFeatureEnabledImpl(Features, "ssse3", true);
+ setFeatureEnabledImpl(Features, "fxsr", true);
+ setFeatureEnabledImpl(Features, "cx16", true);
+ break;
+ case CK_Penryn:
+ setFeatureEnabledImpl(Features, "sse4.1", true);
+ setFeatureEnabledImpl(Features, "fxsr", true);
+ setFeatureEnabledImpl(Features, "cx16", true);
+ break;
+ case CK_Skylake:
+ setFeatureEnabledImpl(Features, "avx512f", true);
+ setFeatureEnabledImpl(Features, "avx512cd", true);
+ setFeatureEnabledImpl(Features, "avx512dq", true);
+ setFeatureEnabledImpl(Features, "avx512bw", true);
+ setFeatureEnabledImpl(Features, "avx512vl", true);
+ setFeatureEnabledImpl(Features, "xsavec", true);
+ setFeatureEnabledImpl(Features, "xsaves", true);
+ setFeatureEnabledImpl(Features, "pku", true);
+ // FALLTHROUGH
+ case CK_Broadwell:
+ setFeatureEnabledImpl(Features, "rdseed", true);
+ setFeatureEnabledImpl(Features, "adx", true);
+ // FALLTHROUGH
+ case CK_Haswell:
+ setFeatureEnabledImpl(Features, "avx2", true);
+ setFeatureEnabledImpl(Features, "lzcnt", true);
+ setFeatureEnabledImpl(Features, "bmi", true);
+ setFeatureEnabledImpl(Features, "bmi2", true);
+ setFeatureEnabledImpl(Features, "rtm", true);
+ setFeatureEnabledImpl(Features, "fma", true);
+ // FALLTHROUGH
+ case CK_IvyBridge:
+ setFeatureEnabledImpl(Features, "rdrnd", true);
+ setFeatureEnabledImpl(Features, "f16c", true);
+ setFeatureEnabledImpl(Features, "fsgsbase", true);
+ // FALLTHROUGH
+ case CK_SandyBridge:
+ setFeatureEnabledImpl(Features, "avx", true);
+ setFeatureEnabledImpl(Features, "xsave", true);
+ setFeatureEnabledImpl(Features, "xsaveopt", true);
+ // FALLTHROUGH
+ case CK_Westmere:
+ case CK_Silvermont:
+ setFeatureEnabledImpl(Features, "aes", true);
+ setFeatureEnabledImpl(Features, "pclmul", true);
+ // FALLTHROUGH
+ case CK_Nehalem:
+ setFeatureEnabledImpl(Features, "sse4.2", true);
+ setFeatureEnabledImpl(Features, "fxsr", true);
+ setFeatureEnabledImpl(Features, "cx16", true);
+ break;
+ case CK_KNL:
+ setFeatureEnabledImpl(Features, "avx512f", true);
+ setFeatureEnabledImpl(Features, "avx512cd", true);
+ setFeatureEnabledImpl(Features, "avx512er", true);
+ setFeatureEnabledImpl(Features, "avx512pf", true);
+ setFeatureEnabledImpl(Features, "fxsr", true);
+ setFeatureEnabledImpl(Features, "rdseed", true);
+ setFeatureEnabledImpl(Features, "adx", true);
+ setFeatureEnabledImpl(Features, "lzcnt", true);
+ setFeatureEnabledImpl(Features, "bmi", true);
+ setFeatureEnabledImpl(Features, "bmi2", true);
+ setFeatureEnabledImpl(Features, "rtm", true);
+ setFeatureEnabledImpl(Features, "fma", true);
+ setFeatureEnabledImpl(Features, "rdrnd", true);
+ setFeatureEnabledImpl(Features, "f16c", true);
+ setFeatureEnabledImpl(Features, "fsgsbase", true);
+ setFeatureEnabledImpl(Features, "aes", true);
+ setFeatureEnabledImpl(Features, "pclmul", true);
+ setFeatureEnabledImpl(Features, "cx16", true);
+ setFeatureEnabledImpl(Features, "xsaveopt", true);
+ setFeatureEnabledImpl(Features, "xsave", true);
+ break;
+ case CK_K6_2:
+ case CK_K6_3:
+ case CK_WinChip2:
+ case CK_C3:
+ setFeatureEnabledImpl(Features, "3dnow", true);
+ break;
+ case CK_Athlon:
+ case CK_AthlonThunderbird:
+ case CK_Geode:
+ setFeatureEnabledImpl(Features, "3dnowa", true);
+ break;
+ case CK_Athlon4:
+ case CK_AthlonXP:
+ case CK_AthlonMP:
+ setFeatureEnabledImpl(Features, "sse", true);
+ setFeatureEnabledImpl(Features, "3dnowa", true);
+ setFeatureEnabledImpl(Features, "fxsr", true);
+ break;
+ case CK_K8:
+ case CK_Opteron:
+ case CK_Athlon64:
+ case CK_AthlonFX:
+ setFeatureEnabledImpl(Features, "sse2", true);
+ setFeatureEnabledImpl(Features, "3dnowa", true);
+ setFeatureEnabledImpl(Features, "fxsr", true);
+ break;
+ case CK_AMDFAM10:
+ setFeatureEnabledImpl(Features, "sse4a", true);
+ setFeatureEnabledImpl(Features, "lzcnt", true);
+ setFeatureEnabledImpl(Features, "popcnt", true);
+ // FALLTHROUGH
+ case CK_K8SSE3:
+ case CK_OpteronSSE3:
+ case CK_Athlon64SSE3:
+ setFeatureEnabledImpl(Features, "sse3", true);
+ setFeatureEnabledImpl(Features, "3dnowa", true);
+ setFeatureEnabledImpl(Features, "fxsr", true);
+ break;
+ case CK_BTVER2:
+ setFeatureEnabledImpl(Features, "avx", true);
+ setFeatureEnabledImpl(Features, "aes", true);
+ setFeatureEnabledImpl(Features, "pclmul", true);
+ setFeatureEnabledImpl(Features, "bmi", true);
+ setFeatureEnabledImpl(Features, "f16c", true);
+ setFeatureEnabledImpl(Features, "xsaveopt", true);
+ // FALLTHROUGH
+ case CK_BTVER1:
+ setFeatureEnabledImpl(Features, "ssse3", true);
+ setFeatureEnabledImpl(Features, "sse4a", true);
+ setFeatureEnabledImpl(Features, "lzcnt", true);
+ setFeatureEnabledImpl(Features, "popcnt", true);
+ setFeatureEnabledImpl(Features, "prfchw", true);
+ setFeatureEnabledImpl(Features, "cx16", true);
+ setFeatureEnabledImpl(Features, "fxsr", true);
+ setFeatureEnabledImpl(Features, "xsave", true);
+ break;
+ case CK_BDVER4:
+ setFeatureEnabledImpl(Features, "avx2", true);
+ setFeatureEnabledImpl(Features, "bmi2", true);
+ // FALLTHROUGH
+ case CK_BDVER3:
+ setFeatureEnabledImpl(Features, "fsgsbase", true);
+ setFeatureEnabledImpl(Features, "xsaveopt", true);
+ // FALLTHROUGH
+ case CK_BDVER2:
+ setFeatureEnabledImpl(Features, "bmi", true);
+ setFeatureEnabledImpl(Features, "fma", true);
+ setFeatureEnabledImpl(Features, "f16c", true);
+ setFeatureEnabledImpl(Features, "tbm", true);
+ // FALLTHROUGH
+ case CK_BDVER1:
+ // xop implies avx, sse4a and fma4.
+ setFeatureEnabledImpl(Features, "xop", true);
+ setFeatureEnabledImpl(Features, "lzcnt", true);
+ setFeatureEnabledImpl(Features, "aes", true);
+ setFeatureEnabledImpl(Features, "pclmul", true);
+ setFeatureEnabledImpl(Features, "prfchw", true);
+ setFeatureEnabledImpl(Features, "cx16", true);
+ setFeatureEnabledImpl(Features, "fxsr", true);
+ setFeatureEnabledImpl(Features, "xsave", true);
+ break;
+ }
+ if (!TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec))
+ return false;
+
+ // Can't do this earlier because we need to be able to explicitly enable
+ // or disable these features and the things that they depend upon.
+
+ // Enable popcnt if sse4.2 is enabled and popcnt is not explicitly disabled.
+ auto I = Features.find("sse4.2");
+ if (I != Features.end() && I->getValue() &&
+ std::find(FeaturesVec.begin(), FeaturesVec.end(), "-popcnt") ==
+ FeaturesVec.end())
+ Features["popcnt"] = true;
+
+ // Enable prfchw if 3DNow! is enabled and prfchw is not explicitly disabled.
+ I = Features.find("3dnow");
+ if (I != Features.end() && I->getValue() &&
+ std::find(FeaturesVec.begin(), FeaturesVec.end(), "-prfchw") ==
+ FeaturesVec.end())
+ Features["prfchw"] = true;
+
+ // Additionally, if SSE is enabled and mmx is not explicitly disabled,
+ // then enable MMX.
+ I = Features.find("sse");
+ if (I != Features.end() && I->getValue() &&
+ std::find(FeaturesVec.begin(), FeaturesVec.end(), "-mmx") ==
+ FeaturesVec.end())
+ Features["mmx"] = true;
+
+ return true;
+}
+
+void X86TargetInfo::setSSELevel(llvm::StringMap<bool> &Features,
+ X86SSEEnum Level, bool Enabled) {
+ if (Enabled) {
+ switch (Level) {
+ case AVX512F:
+ Features["avx512f"] = true;
+ case AVX2:
+ Features["avx2"] = true;
+ case AVX:
+ Features["avx"] = true;
+ Features["xsave"] = true;
+ case SSE42:
+ Features["sse4.2"] = true;
+ case SSE41:
+ Features["sse4.1"] = true;
+ case SSSE3:
+ Features["ssse3"] = true;
+ case SSE3:
+ Features["sse3"] = true;
+ case SSE2:
+ Features["sse2"] = true;
+ case SSE1:
+ Features["sse"] = true;
+ case NoSSE:
+ break;
+ }
+ return;
+ }
+
+ switch (Level) {
+ case NoSSE:
+ case SSE1:
+ Features["sse"] = false;
+ case SSE2:
+ Features["sse2"] = Features["pclmul"] = Features["aes"] =
+ Features["sha"] = false;
+ case SSE3:
+ Features["sse3"] = false;
+ setXOPLevel(Features, NoXOP, false);
+ case SSSE3:
+ Features["ssse3"] = false;
+ case SSE41:
+ Features["sse4.1"] = false;
+ case SSE42:
+ Features["sse4.2"] = false;
+ case AVX:
+ Features["fma"] = Features["avx"] = Features["f16c"] = Features["xsave"] =
+ Features["xsaveopt"] = false;
+ setXOPLevel(Features, FMA4, false);
+ case AVX2:
+ Features["avx2"] = false;
+ case AVX512F:
+ Features["avx512f"] = Features["avx512cd"] = Features["avx512er"] =
+ Features["avx512pf"] = Features["avx512dq"] = Features["avx512bw"] =
+ Features["avx512vl"] = false;
+ }
+}
+
+void X86TargetInfo::setMMXLevel(llvm::StringMap<bool> &Features,
+ MMX3DNowEnum Level, bool Enabled) {
+ if (Enabled) {
+ switch (Level) {
+ case AMD3DNowAthlon:
+ Features["3dnowa"] = true;
+ case AMD3DNow:
+ Features["3dnow"] = true;
+ case MMX:
+ Features["mmx"] = true;
+ case NoMMX3DNow:
+ break;
+ }
+ return;
+ }
+
+ switch (Level) {
+ case NoMMX3DNow:
+ case MMX:
+ Features["mmx"] = false;
+ case AMD3DNow:
+ Features["3dnow"] = false;
+ case AMD3DNowAthlon:
+ Features["3dnowa"] = false;
+ }
+}
+
+void X86TargetInfo::setXOPLevel(llvm::StringMap<bool> &Features, XOPEnum Level,
+ bool Enabled) {
+ if (Enabled) {
+ switch (Level) {
+ case XOP:
+ Features["xop"] = true;
+ case FMA4:
+ Features["fma4"] = true;
+ setSSELevel(Features, AVX, true);
+ case SSE4A:
+ Features["sse4a"] = true;
+ setSSELevel(Features, SSE3, true);
+ case NoXOP:
+ break;
+ }
+ return;
+ }
+
+ switch (Level) {
+ case NoXOP:
+ case SSE4A:
+ Features["sse4a"] = false;
+ case FMA4:
+ Features["fma4"] = false;
+ case XOP:
+ Features["xop"] = false;
+ }
+}
+
+void X86TargetInfo::setFeatureEnabledImpl(llvm::StringMap<bool> &Features,
+ StringRef Name, bool Enabled) {
+ // This is a bit of a hack to deal with the sse4 target feature when used
+ // as part of the target attribute. We handle sse4 correctly everywhere
+ // else. See below for more information on how we handle the sse4 options.
+ if (Name != "sse4")
+ Features[Name] = Enabled;
+
+ if (Name == "mmx") {
+ setMMXLevel(Features, MMX, Enabled);
+ } else if (Name == "sse") {
+ setSSELevel(Features, SSE1, Enabled);
+ } else if (Name == "sse2") {
+ setSSELevel(Features, SSE2, Enabled);
+ } else if (Name == "sse3") {
+ setSSELevel(Features, SSE3, Enabled);
+ } else if (Name == "ssse3") {
+ setSSELevel(Features, SSSE3, Enabled);
+ } else if (Name == "sse4.2") {
+ setSSELevel(Features, SSE42, Enabled);
+ } else if (Name == "sse4.1") {
+ setSSELevel(Features, SSE41, Enabled);
+ } else if (Name == "3dnow") {
+ setMMXLevel(Features, AMD3DNow, Enabled);
+ } else if (Name == "3dnowa") {
+ setMMXLevel(Features, AMD3DNowAthlon, Enabled);
+ } else if (Name == "aes") {
+ if (Enabled)
+ setSSELevel(Features, SSE2, Enabled);
+ } else if (Name == "pclmul") {
+ if (Enabled)
+ setSSELevel(Features, SSE2, Enabled);
+ } else if (Name == "avx") {
+ setSSELevel(Features, AVX, Enabled);
+ } else if (Name == "avx2") {
+ setSSELevel(Features, AVX2, Enabled);
+ } else if (Name == "avx512f") {
+ setSSELevel(Features, AVX512F, Enabled);
+ } else if (Name == "avx512cd" || Name == "avx512er" || Name == "avx512pf"
+ || Name == "avx512dq" || Name == "avx512bw" || Name == "avx512vl") {
+ if (Enabled)
+ setSSELevel(Features, AVX512F, Enabled);
+ } else if (Name == "fma") {
+ if (Enabled)
+ setSSELevel(Features, AVX, Enabled);
+ } else if (Name == "fma4") {
+ setXOPLevel(Features, FMA4, Enabled);
+ } else if (Name == "xop") {
+ setXOPLevel(Features, XOP, Enabled);
+ } else if (Name == "sse4a") {
+ setXOPLevel(Features, SSE4A, Enabled);
+ } else if (Name == "f16c") {
+ if (Enabled)
+ setSSELevel(Features, AVX, Enabled);
+ } else if (Name == "sha") {
+ if (Enabled)
+ setSSELevel(Features, SSE2, Enabled);
+ } else if (Name == "sse4") {
+ // We can get here via the __target__ attribute since that's not controlled
+ // via the -msse4/-mno-sse4 command line alias. Handle this the same way
+ // here - turn on the sse4.2 if enabled, turn off the sse4.1 level if
+ // disabled.
+ if (Enabled)
+ setSSELevel(Features, SSE42, Enabled);
+ else
+ setSSELevel(Features, SSE41, Enabled);
+ } else if (Name == "xsave") {
+ if (Enabled)
+ setSSELevel(Features, AVX, Enabled);
+ else
+ Features["xsaveopt"] = false;
+ } else if (Name == "xsaveopt" || Name == "xsavec" || Name == "xsaves") {
+ if (Enabled) {
+ Features["xsave"] = true;
+ setSSELevel(Features, AVX, Enabled);
+ }
+ }
+}
+
+/// handleTargetFeatures - Perform initialization based on the user
+/// configured set of features.
+bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
+ DiagnosticsEngine &Diags) {
+ for (const auto &Feature : Features) {
+ if (Feature[0] != '+')
+ continue;
+
+ if (Feature == "+aes") {
+ HasAES = true;
+ } else if (Feature == "+pclmul") {
+ HasPCLMUL = true;
+ } else if (Feature == "+lzcnt") {
+ HasLZCNT = true;
+ } else if (Feature == "+rdrnd") {
+ HasRDRND = true;
+ } else if (Feature == "+fsgsbase") {
+ HasFSGSBASE = true;
+ } else if (Feature == "+bmi") {
+ HasBMI = true;
+ } else if (Feature == "+bmi2") {
+ HasBMI2 = true;
+ } else if (Feature == "+popcnt") {
+ HasPOPCNT = true;
+ } else if (Feature == "+rtm") {
+ HasRTM = true;
+ } else if (Feature == "+prfchw") {
+ HasPRFCHW = true;
+ } else if (Feature == "+rdseed") {
+ HasRDSEED = true;
+ } else if (Feature == "+adx") {
+ HasADX = true;
+ } else if (Feature == "+tbm") {
+ HasTBM = true;
+ } else if (Feature == "+fma") {
+ HasFMA = true;
+ } else if (Feature == "+f16c") {
+ HasF16C = true;
+ } else if (Feature == "+avx512cd") {
+ HasAVX512CD = true;
+ } else if (Feature == "+avx512er") {
+ HasAVX512ER = true;
+ } else if (Feature == "+avx512pf") {
+ HasAVX512PF = true;
+ } else if (Feature == "+avx512dq") {
+ HasAVX512DQ = true;
+ } else if (Feature == "+avx512bw") {
+ HasAVX512BW = true;
+ } else if (Feature == "+avx512vl") {
+ HasAVX512VL = true;
+ } else if (Feature == "+sha") {
+ HasSHA = true;
+ } else if (Feature == "+cx16") {
+ HasCX16 = true;
+ } else if (Feature == "+fxsr") {
+ HasFXSR = true;
+ } else if (Feature == "+xsave") {
+ HasXSAVE = true;
+ } else if (Feature == "+xsaveopt") {
+ HasXSAVEOPT = true;
+ } else if (Feature == "+xsavec") {
+ HasXSAVEC = true;
+ } else if (Feature == "+xsaves") {
+ HasXSAVES = true;
+ } else if (Feature == "+pku") {
+ HasPKU = true;
+ }
+
+ X86SSEEnum Level = llvm::StringSwitch<X86SSEEnum>(Feature)
+ .Case("+avx512f", AVX512F)
+ .Case("+avx2", AVX2)
+ .Case("+avx", AVX)
+ .Case("+sse4.2", SSE42)
+ .Case("+sse4.1", SSE41)
+ .Case("+ssse3", SSSE3)
+ .Case("+sse3", SSE3)
+ .Case("+sse2", SSE2)
+ .Case("+sse", SSE1)
+ .Default(NoSSE);
+ SSELevel = std::max(SSELevel, Level);
+
+ MMX3DNowEnum ThreeDNowLevel =
+ llvm::StringSwitch<MMX3DNowEnum>(Feature)
+ .Case("+3dnowa", AMD3DNowAthlon)
+ .Case("+3dnow", AMD3DNow)
+ .Case("+mmx", MMX)
+ .Default(NoMMX3DNow);
+ MMX3DNowLevel = std::max(MMX3DNowLevel, ThreeDNowLevel);
+
+ XOPEnum XLevel = llvm::StringSwitch<XOPEnum>(Feature)
+ .Case("+xop", XOP)
+ .Case("+fma4", FMA4)
+ .Case("+sse4a", SSE4A)
+ .Default(NoXOP);
+ XOPLevel = std::max(XOPLevel, XLevel);
+ }
+
+ // LLVM doesn't have a separate switch for fpmath, so only accept it if it
+ // matches the selected sse level.
+ if ((FPMath == FP_SSE && SSELevel < SSE1) ||
+ (FPMath == FP_387 && SSELevel >= SSE1)) {
+ Diags.Report(diag::err_target_unsupported_fpmath) <<
+ (FPMath == FP_SSE ? "sse" : "387");
+ return false;
+ }
+
+ SimdDefaultAlign =
+ hasFeature("avx512f") ? 512 : hasFeature("avx") ? 256 : 128;
+ return true;
+}
+
+/// X86TargetInfo::getTargetDefines - Return the set of the X86-specific macro
+/// definitions for this particular subtarget.
+void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Target identification.
+ if (getTriple().getArch() == llvm::Triple::x86_64) {
+ Builder.defineMacro("__amd64__");
+ Builder.defineMacro("__amd64");
+ Builder.defineMacro("__x86_64");
+ Builder.defineMacro("__x86_64__");
+ if (getTriple().getArchName() == "x86_64h") {
+ Builder.defineMacro("__x86_64h");
+ Builder.defineMacro("__x86_64h__");
+ }
+ } else {
+ DefineStd(Builder, "i386", Opts);
+ }
+
+ // Subtarget options.
+ // FIXME: We are hard-coding the tune parameters based on the CPU, but they
+ // truly should be based on -mtune options.
+ switch (CPU) {
+ case CK_Generic:
+ break;
+ case CK_i386:
+ // The rest are coming from the i386 define above.
+ Builder.defineMacro("__tune_i386__");
+ break;
+ case CK_i486:
+ case CK_WinChipC6:
+ case CK_WinChip2:
+ case CK_C3:
+ defineCPUMacros(Builder, "i486");
+ break;
+ case CK_PentiumMMX:
+ Builder.defineMacro("__pentium_mmx__");
+ Builder.defineMacro("__tune_pentium_mmx__");
+ // Fallthrough
+ case CK_i586:
+ case CK_Pentium:
+ defineCPUMacros(Builder, "i586");
+ defineCPUMacros(Builder, "pentium");
+ break;
+ case CK_Pentium3:
+ case CK_Pentium3M:
+ case CK_PentiumM:
+ Builder.defineMacro("__tune_pentium3__");
+ // Fallthrough
+ case CK_Pentium2:
+ case CK_C3_2:
+ Builder.defineMacro("__tune_pentium2__");
+ // Fallthrough
+ case CK_PentiumPro:
+ Builder.defineMacro("__tune_i686__");
+ Builder.defineMacro("__tune_pentiumpro__");
+ // Fallthrough
+ case CK_i686:
+ Builder.defineMacro("__i686");
+ Builder.defineMacro("__i686__");
+ // Strangely, __tune_i686__ isn't defined by GCC when CPU == i686.
+ Builder.defineMacro("__pentiumpro");
+ Builder.defineMacro("__pentiumpro__");
+ break;
+ case CK_Pentium4:
+ case CK_Pentium4M:
+ defineCPUMacros(Builder, "pentium4");
+ break;
+ case CK_Yonah:
+ case CK_Prescott:
+ case CK_Nocona:
+ defineCPUMacros(Builder, "nocona");
+ break;
+ case CK_Core2:
+ case CK_Penryn:
+ defineCPUMacros(Builder, "core2");
+ break;
+ case CK_Bonnell:
+ defineCPUMacros(Builder, "atom");
+ break;
+ case CK_Silvermont:
+ defineCPUMacros(Builder, "slm");
+ break;
+ case CK_Nehalem:
+ case CK_Westmere:
+ case CK_SandyBridge:
+ case CK_IvyBridge:
+ case CK_Haswell:
+ case CK_Broadwell:
+ // FIXME: Historically, we defined this legacy name, it would be nice to
+ // remove it at some point. We've never exposed fine-grained names for
+ // recent primary x86 CPUs, and we should keep it that way.
+ defineCPUMacros(Builder, "corei7");
+ break;
+ case CK_Skylake:
+ // FIXME: Historically, we defined this legacy name, it would be nice to
+ // remove it at some point. This is the only fine-grained CPU macro in the
+ // main intel CPU line, and it would be better to not have these and force
+ // people to use ISA macros.
+ defineCPUMacros(Builder, "skx");
+ break;
+ case CK_KNL:
+ defineCPUMacros(Builder, "knl");
+ break;
+ case CK_K6_2:
+ Builder.defineMacro("__k6_2__");
+ Builder.defineMacro("__tune_k6_2__");
+ // Fallthrough
+ case CK_K6_3:
+ if (CPU != CK_K6_2) { // In case of fallthrough
+ // FIXME: GCC may be enabling these in cases where some other k6
+ // architecture is specified but -m3dnow is explicitly provided. The
+ // exact semantics need to be determined and emulated here.
+ Builder.defineMacro("__k6_3__");
+ Builder.defineMacro("__tune_k6_3__");
+ }
+ // Fallthrough
+ case CK_K6:
+ defineCPUMacros(Builder, "k6");
+ break;
+ case CK_Athlon:
+ case CK_AthlonThunderbird:
+ case CK_Athlon4:
+ case CK_AthlonXP:
+ case CK_AthlonMP:
+ defineCPUMacros(Builder, "athlon");
+ if (SSELevel != NoSSE) {
+ Builder.defineMacro("__athlon_sse__");
+ Builder.defineMacro("__tune_athlon_sse__");
+ }
+ break;
+ case CK_K8:
+ case CK_K8SSE3:
+ case CK_x86_64:
+ case CK_Opteron:
+ case CK_OpteronSSE3:
+ case CK_Athlon64:
+ case CK_Athlon64SSE3:
+ case CK_AthlonFX:
+ defineCPUMacros(Builder, "k8");
+ break;
+ case CK_AMDFAM10:
+ defineCPUMacros(Builder, "amdfam10");
+ break;
+ case CK_BTVER1:
+ defineCPUMacros(Builder, "btver1");
+ break;
+ case CK_BTVER2:
+ defineCPUMacros(Builder, "btver2");
+ break;
+ case CK_BDVER1:
+ defineCPUMacros(Builder, "bdver1");
+ break;
+ case CK_BDVER2:
+ defineCPUMacros(Builder, "bdver2");
+ break;
+ case CK_BDVER3:
+ defineCPUMacros(Builder, "bdver3");
+ break;
+ case CK_BDVER4:
+ defineCPUMacros(Builder, "bdver4");
+ break;
+ case CK_Geode:
+ defineCPUMacros(Builder, "geode");
+ break;
+ }
+
+ // Target properties.
+ Builder.defineMacro("__REGISTER_PREFIX__", "");
+
+ // Define __NO_MATH_INLINES on linux/x86 so that we don't get inline
+ // functions in glibc header files that use FP Stack inline asm which the
+ // backend can't deal with (PR879).
+ Builder.defineMacro("__NO_MATH_INLINES");
+
+ if (HasAES)
+ Builder.defineMacro("__AES__");
+
+ if (HasPCLMUL)
+ Builder.defineMacro("__PCLMUL__");
+
+ if (HasLZCNT)
+ Builder.defineMacro("__LZCNT__");
+
+ if (HasRDRND)
+ Builder.defineMacro("__RDRND__");
+
+ if (HasFSGSBASE)
+ Builder.defineMacro("__FSGSBASE__");
+
+ if (HasBMI)
+ Builder.defineMacro("__BMI__");
+
+ if (HasBMI2)
+ Builder.defineMacro("__BMI2__");
+
+ if (HasPOPCNT)
+ Builder.defineMacro("__POPCNT__");
+
+ if (HasRTM)
+ Builder.defineMacro("__RTM__");
+
+ if (HasPRFCHW)
+ Builder.defineMacro("__PRFCHW__");
+
+ if (HasRDSEED)
+ Builder.defineMacro("__RDSEED__");
+
+ if (HasADX)
+ Builder.defineMacro("__ADX__");
+
+ if (HasTBM)
+ Builder.defineMacro("__TBM__");
+
+ switch (XOPLevel) {
+ case XOP:
+ Builder.defineMacro("__XOP__");
+ case FMA4:
+ Builder.defineMacro("__FMA4__");
+ case SSE4A:
+ Builder.defineMacro("__SSE4A__");
+ case NoXOP:
+ break;
+ }
+
+ if (HasFMA)
+ Builder.defineMacro("__FMA__");
+
+ if (HasF16C)
+ Builder.defineMacro("__F16C__");
+
+ if (HasAVX512CD)
+ Builder.defineMacro("__AVX512CD__");
+ if (HasAVX512ER)
+ Builder.defineMacro("__AVX512ER__");
+ if (HasAVX512PF)
+ Builder.defineMacro("__AVX512PF__");
+ if (HasAVX512DQ)
+ Builder.defineMacro("__AVX512DQ__");
+ if (HasAVX512BW)
+ Builder.defineMacro("__AVX512BW__");
+ if (HasAVX512VL)
+ Builder.defineMacro("__AVX512VL__");
+
+ if (HasSHA)
+ Builder.defineMacro("__SHA__");
+
+ if (HasFXSR)
+ Builder.defineMacro("__FXSR__");
+ if (HasXSAVE)
+ Builder.defineMacro("__XSAVE__");
+ if (HasXSAVEOPT)
+ Builder.defineMacro("__XSAVEOPT__");
+ if (HasXSAVEC)
+ Builder.defineMacro("__XSAVEC__");
+ if (HasXSAVES)
+ Builder.defineMacro("__XSAVES__");
+ if (HasPKU)
+ Builder.defineMacro("__PKU__");
+ if (HasCX16)
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16");
+
+ // Each case falls through to the previous one here.
+ switch (SSELevel) {
+ case AVX512F:
+ Builder.defineMacro("__AVX512F__");
+ case AVX2:
+ Builder.defineMacro("__AVX2__");
+ case AVX:
+ Builder.defineMacro("__AVX__");
+ case SSE42:
+ Builder.defineMacro("__SSE4_2__");
+ case SSE41:
+ Builder.defineMacro("__SSE4_1__");
+ case SSSE3:
+ Builder.defineMacro("__SSSE3__");
+ case SSE3:
+ Builder.defineMacro("__SSE3__");
+ case SSE2:
+ Builder.defineMacro("__SSE2__");
+ Builder.defineMacro("__SSE2_MATH__"); // -mfp-math=sse always implied.
+ case SSE1:
+ Builder.defineMacro("__SSE__");
+ Builder.defineMacro("__SSE_MATH__"); // -mfp-math=sse always implied.
+ case NoSSE:
+ break;
+ }
+
+ if (Opts.MicrosoftExt && getTriple().getArch() == llvm::Triple::x86) {
+ switch (SSELevel) {
+ case AVX512F:
+ case AVX2:
+ case AVX:
+ case SSE42:
+ case SSE41:
+ case SSSE3:
+ case SSE3:
+ case SSE2:
+ Builder.defineMacro("_M_IX86_FP", Twine(2));
+ break;
+ case SSE1:
+ Builder.defineMacro("_M_IX86_FP", Twine(1));
+ break;
+ default:
+ Builder.defineMacro("_M_IX86_FP", Twine(0));
+ }
+ }
+
+ // Each case falls through to the previous one here.
+ switch (MMX3DNowLevel) {
+ case AMD3DNowAthlon:
+ Builder.defineMacro("__3dNOW_A__");
+ case AMD3DNow:
+ Builder.defineMacro("__3dNOW__");
+ case MMX:
+ Builder.defineMacro("__MMX__");
+ case NoMMX3DNow:
+ break;
+ }
+
+ if (CPU >= CK_i486) {
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
+ }
+ if (CPU >= CK_i586)
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
+}
+
+bool X86TargetInfo::hasFeature(StringRef Feature) const {
+ return llvm::StringSwitch<bool>(Feature)
+ .Case("aes", HasAES)
+ .Case("avx", SSELevel >= AVX)
+ .Case("avx2", SSELevel >= AVX2)
+ .Case("avx512f", SSELevel >= AVX512F)
+ .Case("avx512cd", HasAVX512CD)
+ .Case("avx512er", HasAVX512ER)
+ .Case("avx512pf", HasAVX512PF)
+ .Case("avx512dq", HasAVX512DQ)
+ .Case("avx512bw", HasAVX512BW)
+ .Case("avx512vl", HasAVX512VL)
+ .Case("bmi", HasBMI)
+ .Case("bmi2", HasBMI2)
+ .Case("cx16", HasCX16)
+ .Case("f16c", HasF16C)
+ .Case("fma", HasFMA)
+ .Case("fma4", XOPLevel >= FMA4)
+ .Case("fsgsbase", HasFSGSBASE)
+ .Case("fxsr", HasFXSR)
+ .Case("lzcnt", HasLZCNT)
+ .Case("mm3dnow", MMX3DNowLevel >= AMD3DNow)
+ .Case("mm3dnowa", MMX3DNowLevel >= AMD3DNowAthlon)
+ .Case("mmx", MMX3DNowLevel >= MMX)
+ .Case("pclmul", HasPCLMUL)
+ .Case("popcnt", HasPOPCNT)
+ .Case("prfchw", HasPRFCHW)
+ .Case("rdrnd", HasRDRND)
+ .Case("rdseed", HasRDSEED)
+ .Case("rtm", HasRTM)
+ .Case("sha", HasSHA)
+ .Case("sse", SSELevel >= SSE1)
+ .Case("sse2", SSELevel >= SSE2)
+ .Case("sse3", SSELevel >= SSE3)
+ .Case("ssse3", SSELevel >= SSSE3)
+ .Case("sse4.1", SSELevel >= SSE41)
+ .Case("sse4.2", SSELevel >= SSE42)
+ .Case("sse4a", XOPLevel >= SSE4A)
+ .Case("tbm", HasTBM)
+ .Case("x86", true)
+ .Case("x86_32", getTriple().getArch() == llvm::Triple::x86)
+ .Case("x86_64", getTriple().getArch() == llvm::Triple::x86_64)
+ .Case("xop", XOPLevel >= XOP)
+ .Case("xsave", HasXSAVE)
+ .Case("xsavec", HasXSAVEC)
+ .Case("xsaves", HasXSAVES)
+ .Case("xsaveopt", HasXSAVEOPT)
+ .Case("pku", HasPKU)
+ .Default(false);
+}
+
+// We can't use a generic validation scheme for the features accepted here
+// versus subtarget features accepted in the target attribute because the
+// bitfield structure that's initialized in the runtime only supports the
+// below currently rather than the full range of subtarget features. (See
+// X86TargetInfo::hasFeature for a somewhat comprehensive list).
+bool X86TargetInfo::validateCpuSupports(StringRef FeatureStr) const {
+ return llvm::StringSwitch<bool>(FeatureStr)
+ .Case("cmov", true)
+ .Case("mmx", true)
+ .Case("popcnt", true)
+ .Case("sse", true)
+ .Case("sse2", true)
+ .Case("sse3", true)
+ .Case("sse4.1", true)
+ .Case("sse4.2", true)
+ .Case("avx", true)
+ .Case("avx2", true)
+ .Case("sse4a", true)
+ .Case("fma4", true)
+ .Case("xop", true)
+ .Case("fma", true)
+ .Case("avx512f", true)
+ .Case("bmi", true)
+ .Case("bmi2", true)
+ .Default(false);
+}
+
+bool
+X86TargetInfo::validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const {
+ switch (*Name) {
+ default: return false;
+ // Constant constraints.
+ case 'e': // 32-bit signed integer constant for use with sign-extending x86_64
+ // instructions.
+ case 'Z': // 32-bit unsigned integer constant for use with zero-extending
+ // x86_64 instructions.
+ case 's':
+ Info.setRequiresImmediate();
+ return true;
+ case 'I':
+ Info.setRequiresImmediate(0, 31);
+ return true;
+ case 'J':
+ Info.setRequiresImmediate(0, 63);
+ return true;
+ case 'K':
+ Info.setRequiresImmediate(-128, 127);
+ return true;
+ case 'L':
+ Info.setRequiresImmediate({ int(0xff), int(0xffff), int(0xffffffff) });
+ return true;
+ case 'M':
+ Info.setRequiresImmediate(0, 3);
+ return true;
+ case 'N':
+ Info.setRequiresImmediate(0, 255);
+ return true;
+ case 'O':
+ Info.setRequiresImmediate(0, 127);
+ return true;
+ // Register constraints.
+ case 'Y': // 'Y' is the first character for several 2-character constraints.
+ // Shift the pointer to the second character of the constraint.
+ Name++;
+ switch (*Name) {
+ default:
+ return false;
+ case '0': // First SSE register.
+ case 't': // Any SSE register, when SSE2 is enabled.
+ case 'i': // Any SSE register, when SSE2 and inter-unit moves enabled.
+ case 'm': // Any MMX register, when inter-unit moves enabled.
+ Info.setAllowsRegister();
+ return true;
+ }
+ case 'f': // Any x87 floating point stack register.
+ // Constraint 'f' cannot be used for output operands.
+ if (Info.ConstraintStr[0] == '=')
+ return false;
+ Info.setAllowsRegister();
+ return true;
+ case 'a': // eax.
+ case 'b': // ebx.
+ case 'c': // ecx.
+ case 'd': // edx.
+ case 'S': // esi.
+ case 'D': // edi.
+ case 'A': // edx:eax.
+ case 't': // Top of floating point stack.
+ case 'u': // Second from top of floating point stack.
+ case 'q': // Any register accessible as [r]l: a, b, c, and d.
+ case 'y': // Any MMX register.
+ case 'x': // Any SSE register.
+ case 'Q': // Any register accessible as [r]h: a, b, c, and d.
+ case 'R': // "Legacy" registers: ax, bx, cx, dx, di, si, sp, bp.
+ case 'l': // "Index" registers: any general register that can be used as an
+ // index in a base+index memory access.
+ Info.setAllowsRegister();
+ return true;
+ // Floating point constant constraints.
+ case 'C': // SSE floating point constant.
+ case 'G': // x87 floating point constant.
+ return true;
+ }
+}
+
+bool X86TargetInfo::validateOutputSize(StringRef Constraint,
+ unsigned Size) const {
+ // Strip off constraint modifiers.
+ while (Constraint[0] == '=' ||
+ Constraint[0] == '+' ||
+ Constraint[0] == '&')
+ Constraint = Constraint.substr(1);
+
+ return validateOperandSize(Constraint, Size);
+}
+
+bool X86TargetInfo::validateInputSize(StringRef Constraint,
+ unsigned Size) const {
+ return validateOperandSize(Constraint, Size);
+}
+
+bool X86TargetInfo::validateOperandSize(StringRef Constraint,
+ unsigned Size) const {
+ switch (Constraint[0]) {
+ default: break;
+ case 'y':
+ return Size <= 64;
+ case 'f':
+ case 't':
+ case 'u':
+ return Size <= 128;
+ case 'x':
+ if (SSELevel >= AVX512F)
+ // 512-bit zmm registers can be used if target supports AVX512F.
+ return Size <= 512U;
+ else if (SSELevel >= AVX)
+ // 256-bit ymm registers can be used if target supports AVX.
+ return Size <= 256U;
+ return Size <= 128U;
+ case 'Y':
+ // 'Y' is the first character for several 2-character constraints.
+ switch (Constraint[1]) {
+ default: break;
+ case 'm':
+ // 'Ym' is synonymous with 'y'.
+ return Size <= 64;
+ case 'i':
+ case 't':
+ // 'Yi' and 'Yt' are synonymous with 'x' when SSE2 is enabled.
+ if (SSELevel >= AVX512F)
+ return Size <= 512U;
+ else if (SSELevel >= AVX)
+ return Size <= 256U;
+ return SSELevel >= SSE2 && Size <= 128U;
+ }
+
+ }
+
+ return true;
+}
+
+std::string
+X86TargetInfo::convertConstraint(const char *&Constraint) const {
+ switch (*Constraint) {
+ case 'a': return std::string("{ax}");
+ case 'b': return std::string("{bx}");
+ case 'c': return std::string("{cx}");
+ case 'd': return std::string("{dx}");
+ case 'S': return std::string("{si}");
+ case 'D': return std::string("{di}");
+ case 'p': // address
+ return std::string("im");
+ case 't': // top of floating point stack.
+ return std::string("{st}");
+ case 'u': // second from top of floating point stack.
+ return std::string("{st(1)}"); // second from top of floating point stack.
+ default:
+ return std::string(1, *Constraint);
+ }
+}
+
+// X86-32 generic target
+class X86_32TargetInfo : public X86TargetInfo {
+public:
+ X86_32TargetInfo(const llvm::Triple &Triple) : X86TargetInfo(Triple) {
+ DoubleAlign = LongLongAlign = 32;
+ LongDoubleWidth = 96;
+ LongDoubleAlign = 32;
+ SuitableAlign = 128;
+ DataLayoutString = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128";
+ SizeType = UnsignedInt;
+ PtrDiffType = SignedInt;
+ IntPtrType = SignedInt;
+ RegParmMax = 3;
+
+ // Use fpret for all types.
+ RealTypeUsesObjCFPRet = ((1 << TargetInfo::Float) |
+ (1 << TargetInfo::Double) |
+ (1 << TargetInfo::LongDouble));
+
+ // x86-32 has atomics up to 8 bytes
+ // FIXME: Check that we actually have cmpxchg8b before setting
+ // MaxAtomicInlineWidth. (cmpxchg8b is an i586 instruction.)
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+ }
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::CharPtrBuiltinVaList;
+ }
+
+ int getEHDataRegisterNumber(unsigned RegNo) const override {
+ if (RegNo == 0) return 0;
+ if (RegNo == 1) return 2;
+ return -1;
+ }
+ bool validateOperandSize(StringRef Constraint,
+ unsigned Size) const override {
+ switch (Constraint[0]) {
+ default: break;
+ case 'R':
+ case 'q':
+ case 'Q':
+ case 'a':
+ case 'b':
+ case 'c':
+ case 'd':
+ case 'S':
+ case 'D':
+ return Size <= 32;
+ case 'A':
+ return Size <= 64;
+ }
+
+ return X86TargetInfo::validateOperandSize(Constraint, Size);
+ }
+};
+
+class NetBSDI386TargetInfo : public NetBSDTargetInfo<X86_32TargetInfo> {
+public:
+ NetBSDI386TargetInfo(const llvm::Triple &Triple)
+ : NetBSDTargetInfo<X86_32TargetInfo>(Triple) {}
+
+ unsigned getFloatEvalMethod() const override {
+ unsigned Major, Minor, Micro;
+ getTriple().getOSVersion(Major, Minor, Micro);
+ // New NetBSD uses the default rounding mode.
+ if (Major >= 7 || (Major == 6 && Minor == 99 && Micro >= 26) || Major == 0)
+ return X86_32TargetInfo::getFloatEvalMethod();
+ // NetBSD before 6.99.26 defaults to "double" rounding.
+ return 1;
+ }
+};
+
+class OpenBSDI386TargetInfo : public OpenBSDTargetInfo<X86_32TargetInfo> {
+public:
+ OpenBSDI386TargetInfo(const llvm::Triple &Triple)
+ : OpenBSDTargetInfo<X86_32TargetInfo>(Triple) {
+ SizeType = UnsignedLong;
+ IntPtrType = SignedLong;
+ PtrDiffType = SignedLong;
+ }
+};
+
+class BitrigI386TargetInfo : public BitrigTargetInfo<X86_32TargetInfo> {
+public:
+ BitrigI386TargetInfo(const llvm::Triple &Triple)
+ : BitrigTargetInfo<X86_32TargetInfo>(Triple) {
+ SizeType = UnsignedLong;
+ IntPtrType = SignedLong;
+ PtrDiffType = SignedLong;
+ }
+};
+
+class DarwinI386TargetInfo : public DarwinTargetInfo<X86_32TargetInfo> {
+public:
+ DarwinI386TargetInfo(const llvm::Triple &Triple)
+ : DarwinTargetInfo<X86_32TargetInfo>(Triple) {
+ LongDoubleWidth = 128;
+ LongDoubleAlign = 128;
+ SuitableAlign = 128;
+ MaxVectorAlign = 256;
+ // The watchOS simulator uses the builtin bool type for Objective-C.
+ llvm::Triple T = llvm::Triple(Triple);
+ if (T.isWatchOS())
+ UseSignedCharForObjCBool = false;
+ SizeType = UnsignedLong;
+ IntPtrType = SignedLong;
+ DataLayoutString = "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128";
+ HasAlignMac68kSupport = true;
+ }
+
+ bool handleTargetFeatures(std::vector<std::string> &Features,
+ DiagnosticsEngine &Diags) override {
+ if (!DarwinTargetInfo<X86_32TargetInfo>::handleTargetFeatures(Features,
+ Diags))
+ return false;
+ // We now know the features we have: we can decide how to align vectors.
+ MaxVectorAlign =
+ hasFeature("avx512f") ? 512 : hasFeature("avx") ? 256 : 128;
+ return true;
+ }
+};
+
+// x86-32 Windows target
+class WindowsX86_32TargetInfo : public WindowsTargetInfo<X86_32TargetInfo> {
+public:
+ WindowsX86_32TargetInfo(const llvm::Triple &Triple)
+ : WindowsTargetInfo<X86_32TargetInfo>(Triple) {
+ WCharType = UnsignedShort;
+ DoubleAlign = LongLongAlign = 64;
+ bool IsWinCOFF =
+ getTriple().isOSWindows() && getTriple().isOSBinFormatCOFF();
+ DataLayoutString = IsWinCOFF
+ ? "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32"
+ : "e-m:e-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32";
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ WindowsTargetInfo<X86_32TargetInfo>::getTargetDefines(Opts, Builder);
+ }
+};
+
+// x86-32 Windows Visual Studio target
+class MicrosoftX86_32TargetInfo : public WindowsX86_32TargetInfo {
+public:
+ MicrosoftX86_32TargetInfo(const llvm::Triple &Triple)
+ : WindowsX86_32TargetInfo(Triple) {
+ LongDoubleWidth = LongDoubleAlign = 64;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ WindowsX86_32TargetInfo::getTargetDefines(Opts, Builder);
+ WindowsX86_32TargetInfo::getVisualStudioDefines(Opts, Builder);
+ // The value of the following reflects processor type.
+ // 300=386, 400=486, 500=Pentium, 600=Blend (default)
+ // We lost the original triple, so we use the default.
+ Builder.defineMacro("_M_IX86", "600");
+ }
+};
+
+static void addCygMingDefines(const LangOptions &Opts, MacroBuilder &Builder) {
+ // Mingw and cygwin define __declspec(a) to __attribute__((a)). Clang
+ // supports __declspec natively under -fms-extensions, but we define a no-op
+ // __declspec macro anyway for pre-processor compatibility.
+ if (Opts.MicrosoftExt)
+ Builder.defineMacro("__declspec", "__declspec");
+ else
+ Builder.defineMacro("__declspec(a)", "__attribute__((a))");
+
+ if (!Opts.MicrosoftExt) {
+ // Provide macros for all the calling convention keywords. Provide both
+ // single and double underscore prefixed variants. These are available on
+ // x64 as well as x86, even though they have no effect.
+ const char *CCs[] = {"cdecl", "stdcall", "fastcall", "thiscall", "pascal"};
+ for (const char *CC : CCs) {
+ std::string GCCSpelling = "__attribute__((__";
+ GCCSpelling += CC;
+ GCCSpelling += "__))";
+ Builder.defineMacro(Twine("_") + CC, GCCSpelling);
+ Builder.defineMacro(Twine("__") + CC, GCCSpelling);
+ }
+ }
+}
+
+static void addMinGWDefines(const LangOptions &Opts, MacroBuilder &Builder) {
+ Builder.defineMacro("__MSVCRT__");
+ Builder.defineMacro("__MINGW32__");
+ addCygMingDefines(Opts, Builder);
+}
+
+// x86-32 MinGW target
+class MinGWX86_32TargetInfo : public WindowsX86_32TargetInfo {
+public:
+ MinGWX86_32TargetInfo(const llvm::Triple &Triple)
+ : WindowsX86_32TargetInfo(Triple) {}
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ WindowsX86_32TargetInfo::getTargetDefines(Opts, Builder);
+ DefineStd(Builder, "WIN32", Opts);
+ DefineStd(Builder, "WINNT", Opts);
+ Builder.defineMacro("_X86_");
+ addMinGWDefines(Opts, Builder);
+ }
+};
+
+// x86-32 Cygwin target
+class CygwinX86_32TargetInfo : public X86_32TargetInfo {
+public:
+ CygwinX86_32TargetInfo(const llvm::Triple &Triple)
+ : X86_32TargetInfo(Triple) {
+ WCharType = UnsignedShort;
+ DoubleAlign = LongLongAlign = 64;
+ DataLayoutString = "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32";
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ X86_32TargetInfo::getTargetDefines(Opts, Builder);
+ Builder.defineMacro("_X86_");
+ Builder.defineMacro("__CYGWIN__");
+ Builder.defineMacro("__CYGWIN32__");
+ addCygMingDefines(Opts, Builder);
+ DefineStd(Builder, "unix", Opts);
+ if (Opts.CPlusPlus)
+ Builder.defineMacro("_GNU_SOURCE");
+ }
+};
+
+// x86-32 Haiku target
+class HaikuX86_32TargetInfo : public X86_32TargetInfo {
+public:
+ HaikuX86_32TargetInfo(const llvm::Triple &Triple) : X86_32TargetInfo(Triple) {
+ SizeType = UnsignedLong;
+ IntPtrType = SignedLong;
+ PtrDiffType = SignedLong;
+ ProcessIDType = SignedLong;
+ this->UserLabelPrefix = "";
+ this->TLSSupported = false;
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ X86_32TargetInfo::getTargetDefines(Opts, Builder);
+ Builder.defineMacro("__INTEL__");
+ Builder.defineMacro("__HAIKU__");
+ }
+};
+
+// X86-32 MCU target
+class MCUX86_32TargetInfo : public X86_32TargetInfo {
+public:
+ MCUX86_32TargetInfo(const llvm::Triple &Triple) : X86_32TargetInfo(Triple) {
+ LongDoubleWidth = 64;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+ }
+
+ CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
+ // On MCU we support only C calling convention.
+ return CC == CC_C ? CCCR_OK : CCCR_Warning;
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ X86_32TargetInfo::getTargetDefines(Opts, Builder);
+ Builder.defineMacro("__iamcu");
+ Builder.defineMacro("__iamcu__");
+ }
+};
+
+// RTEMS Target
+template<typename Target>
+class RTEMSTargetInfo : public OSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ // RTEMS defines; list based off of gcc output
+
+ Builder.defineMacro("__rtems__");
+ Builder.defineMacro("__ELF__");
+ }
+
+public:
+ RTEMSTargetInfo(const llvm::Triple &Triple) : OSTargetInfo<Target>(Triple) {
+ this->UserLabelPrefix = "";
+
+ switch (Triple.getArch()) {
+ default:
+ case llvm::Triple::x86:
+ // this->MCountName = ".mcount";
+ break;
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
+ // this->MCountName = "_mcount";
+ break;
+ case llvm::Triple::arm:
+ // this->MCountName = "__mcount";
+ break;
+ }
+ }
+};
+
+// x86-32 RTEMS target
+class RTEMSX86_32TargetInfo : public X86_32TargetInfo {
+public:
+ RTEMSX86_32TargetInfo(const llvm::Triple &Triple) : X86_32TargetInfo(Triple) {
+ SizeType = UnsignedLong;
+ IntPtrType = SignedLong;
+ PtrDiffType = SignedLong;
+ this->UserLabelPrefix = "";
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ X86_32TargetInfo::getTargetDefines(Opts, Builder);
+ Builder.defineMacro("__INTEL__");
+ Builder.defineMacro("__rtems__");
+ }
+};
+
+// x86-64 generic target
+class X86_64TargetInfo : public X86TargetInfo {
+public:
+ X86_64TargetInfo(const llvm::Triple &Triple) : X86TargetInfo(Triple) {
+ const bool IsX32 = getTriple().getEnvironment() == llvm::Triple::GNUX32;
+ bool IsWinCOFF =
+ getTriple().isOSWindows() && getTriple().isOSBinFormatCOFF();
+ LongWidth = LongAlign = PointerWidth = PointerAlign = IsX32 ? 32 : 64;
+ LongDoubleWidth = 128;
+ LongDoubleAlign = 128;
+ LargeArrayMinWidth = 128;
+ LargeArrayAlign = 128;
+ SuitableAlign = 128;
+ SizeType = IsX32 ? UnsignedInt : UnsignedLong;
+ PtrDiffType = IsX32 ? SignedInt : SignedLong;
+ IntPtrType = IsX32 ? SignedInt : SignedLong;
+ IntMaxType = IsX32 ? SignedLongLong : SignedLong;
+ Int64Type = IsX32 ? SignedLongLong : SignedLong;
+ RegParmMax = 6;
+
+ // Pointers are 32-bit in x32.
+ DataLayoutString = IsX32 ? "e-m:e-p:32:32-i64:64-f80:128-n8:16:32:64-S128"
+ : IsWinCOFF
+ ? "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
+ : "e-m:e-i64:64-f80:128-n8:16:32:64-S128";
+
+ // Use fpret only for long double.
+ RealTypeUsesObjCFPRet = (1 << TargetInfo::LongDouble);
+
+ // Use fp2ret for _Complex long double.
+ ComplexLongDoubleUsesFP2Ret = true;
+
+ // Make __builtin_ms_va_list available.
+ HasBuiltinMSVaList = true;
+
+ // x86-64 has atomics up to 16 bytes.
+ MaxAtomicPromoteWidth = 128;
+ MaxAtomicInlineWidth = 128;
+ }
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::X86_64ABIBuiltinVaList;
+ }
+
+ int getEHDataRegisterNumber(unsigned RegNo) const override {
+ if (RegNo == 0) return 0;
+ if (RegNo == 1) return 1;
+ return -1;
+ }
+
+ CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
+ return (CC == CC_C ||
+ CC == CC_X86VectorCall ||
+ CC == CC_IntelOclBicc ||
+ CC == CC_X86_64Win64) ? CCCR_OK : CCCR_Warning;
+ }
+
+ CallingConv getDefaultCallingConv(CallingConvMethodType MT) const override {
+ return CC_C;
+ }
+
+ // for x32 we need it here explicitly
+ bool hasInt128Type() const override { return true; }
+
+ bool validateGlobalRegisterVariable(StringRef RegName,
+ unsigned RegSize,
+ bool &HasSizeMismatch) const override {
+ // rsp and rbp are the only 64-bit registers the x86 backend can currently
+ // handle.
+ if (RegName.equals("rsp") || RegName.equals("rbp")) {
+ // Check that the register size is 64-bit.
+ HasSizeMismatch = RegSize != 64;
+ return true;
+ }
+
+ // Check if the register is a 32-bit register the backend can handle.
+ return X86TargetInfo::validateGlobalRegisterVariable(RegName, RegSize,
+ HasSizeMismatch);
+ }
+};
+
+// x86-64 Windows target
+class WindowsX86_64TargetInfo : public WindowsTargetInfo<X86_64TargetInfo> {
+public:
+ WindowsX86_64TargetInfo(const llvm::Triple &Triple)
+ : WindowsTargetInfo<X86_64TargetInfo>(Triple) {
+ WCharType = UnsignedShort;
+ LongWidth = LongAlign = 32;
+ DoubleAlign = LongLongAlign = 64;
+ IntMaxType = SignedLongLong;
+ Int64Type = SignedLongLong;
+ SizeType = UnsignedLongLong;
+ PtrDiffType = SignedLongLong;
+ IntPtrType = SignedLongLong;
+ this->UserLabelPrefix = "";
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ WindowsTargetInfo<X86_64TargetInfo>::getTargetDefines(Opts, Builder);
+ Builder.defineMacro("_WIN64");
+ }
+
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::CharPtrBuiltinVaList;
+ }
+
+ CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
+ switch (CC) {
+ case CC_X86StdCall:
+ case CC_X86ThisCall:
+ case CC_X86FastCall:
+ return CCCR_Ignore;
+ case CC_C:
+ case CC_X86VectorCall:
+ case CC_IntelOclBicc:
+ case CC_X86_64SysV:
+ return CCCR_OK;
+ default:
+ return CCCR_Warning;
+ }
+ }
+};
+
+// x86-64 Windows Visual Studio target
+class MicrosoftX86_64TargetInfo : public WindowsX86_64TargetInfo {
+public:
+ MicrosoftX86_64TargetInfo(const llvm::Triple &Triple)
+ : WindowsX86_64TargetInfo(Triple) {
+ LongDoubleWidth = LongDoubleAlign = 64;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ WindowsX86_64TargetInfo::getTargetDefines(Opts, Builder);
+ WindowsX86_64TargetInfo::getVisualStudioDefines(Opts, Builder);
+ Builder.defineMacro("_M_X64", "100");
+ Builder.defineMacro("_M_AMD64", "100");
+ }
+};
+
+// x86-64 MinGW target
+class MinGWX86_64TargetInfo : public WindowsX86_64TargetInfo {
+public:
+ MinGWX86_64TargetInfo(const llvm::Triple &Triple)
+ : WindowsX86_64TargetInfo(Triple) {
+ // Mingw64 rounds long double size and alignment up to 16 bytes, but sticks
+ // with x86 FP ops. Weird.
+ LongDoubleWidth = LongDoubleAlign = 128;
+ LongDoubleFormat = &llvm::APFloat::x87DoubleExtended;
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ WindowsX86_64TargetInfo::getTargetDefines(Opts, Builder);
+ DefineStd(Builder, "WIN64", Opts);
+ Builder.defineMacro("__MINGW64__");
+ addMinGWDefines(Opts, Builder);
+
+ // GCC defines this macro when it is using __gxx_personality_seh0.
+ if (!Opts.SjLjExceptions)
+ Builder.defineMacro("__SEH__");
+ }
+};
+
+// x86-64 Cygwin target
+class CygwinX86_64TargetInfo : public X86_64TargetInfo {
+public:
+ CygwinX86_64TargetInfo(const llvm::Triple &Triple)
+ : X86_64TargetInfo(Triple) {
+ TLSSupported = false;
+ WCharType = UnsignedShort;
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ X86_64TargetInfo::getTargetDefines(Opts, Builder);
+ Builder.defineMacro("__x86_64__");
+ Builder.defineMacro("__CYGWIN__");
+ Builder.defineMacro("__CYGWIN64__");
+ addCygMingDefines(Opts, Builder);
+ DefineStd(Builder, "unix", Opts);
+ if (Opts.CPlusPlus)
+ Builder.defineMacro("_GNU_SOURCE");
+
+ // GCC defines this macro when it is using __gxx_personality_seh0.
+ if (!Opts.SjLjExceptions)
+ Builder.defineMacro("__SEH__");
+ }
+};
+
+class DarwinX86_64TargetInfo : public DarwinTargetInfo<X86_64TargetInfo> {
+public:
+ DarwinX86_64TargetInfo(const llvm::Triple &Triple)
+ : DarwinTargetInfo<X86_64TargetInfo>(Triple) {
+ Int64Type = SignedLongLong;
+ // The 64-bit iOS simulator uses the builtin bool type for Objective-C.
+ llvm::Triple T = llvm::Triple(Triple);
+ if (T.isiOS())
+ UseSignedCharForObjCBool = false;
+ DataLayoutString = "e-m:o-i64:64-f80:128-n8:16:32:64-S128";
+ }
+
+ bool handleTargetFeatures(std::vector<std::string> &Features,
+ DiagnosticsEngine &Diags) override {
+ if (!DarwinTargetInfo<X86_64TargetInfo>::handleTargetFeatures(Features,
+ Diags))
+ return false;
+ // We now know the features we have: we can decide how to align vectors.
+ MaxVectorAlign =
+ hasFeature("avx512f") ? 512 : hasFeature("avx") ? 256 : 128;
+ return true;
+ }
+};
+
+class OpenBSDX86_64TargetInfo : public OpenBSDTargetInfo<X86_64TargetInfo> {
+public:
+ OpenBSDX86_64TargetInfo(const llvm::Triple &Triple)
+ : OpenBSDTargetInfo<X86_64TargetInfo>(Triple) {
+ IntMaxType = SignedLongLong;
+ Int64Type = SignedLongLong;
+ }
+};
+
+class BitrigX86_64TargetInfo : public BitrigTargetInfo<X86_64TargetInfo> {
+public:
+ BitrigX86_64TargetInfo(const llvm::Triple &Triple)
+ : BitrigTargetInfo<X86_64TargetInfo>(Triple) {
+ IntMaxType = SignedLongLong;
+ Int64Type = SignedLongLong;
+ }
+};
+
+class ARMTargetInfo : public TargetInfo {
+ // Possible FPU choices.
+ enum FPUMode {
+ VFP2FPU = (1 << 0),
+ VFP3FPU = (1 << 1),
+ VFP4FPU = (1 << 2),
+ NeonFPU = (1 << 3),
+ FPARMV8 = (1 << 4)
+ };
+
+ // Possible HWDiv features.
+ enum HWDivMode {
+ HWDivThumb = (1 << 0),
+ HWDivARM = (1 << 1)
+ };
+
+ static bool FPUModeIsVFP(FPUMode Mode) {
+ return Mode & (VFP2FPU | VFP3FPU | VFP4FPU | NeonFPU | FPARMV8);
+ }
+
+ static const TargetInfo::GCCRegAlias GCCRegAliases[];
+ static const char * const GCCRegNames[];
+
+ std::string ABI, CPU;
+
+ StringRef CPUProfile;
+ StringRef CPUAttr;
+
+ enum {
+ FP_Default,
+ FP_VFP,
+ FP_Neon
+ } FPMath;
+
+ unsigned ArchISA;
+ unsigned ArchKind = llvm::ARM::AK_ARMV4T;
+ unsigned ArchProfile;
+ unsigned ArchVersion;
+
+ unsigned FPU : 5;
+
+ unsigned IsAAPCS : 1;
+ unsigned HWDiv : 2;
+
+ // Initialized via features.
+ unsigned SoftFloat : 1;
+ unsigned SoftFloatABI : 1;
+
+ unsigned CRC : 1;
+ unsigned Crypto : 1;
+ unsigned DSP : 1;
+ unsigned Unaligned : 1;
+
+ enum {
+ LDREX_B = (1 << 0), /// byte (8-bit)
+ LDREX_H = (1 << 1), /// half (16-bit)
+ LDREX_W = (1 << 2), /// word (32-bit)
+ LDREX_D = (1 << 3), /// double (64-bit)
+ };
+
+ uint32_t LDREX;
+
+ // ACLE 6.5.1 Hardware floating point
+ enum {
+ HW_FP_HP = (1 << 1), /// half (16-bit)
+ HW_FP_SP = (1 << 2), /// single (32-bit)
+ HW_FP_DP = (1 << 3), /// double (64-bit)
+ };
+ uint32_t HW_FP;
+
+ static const Builtin::Info BuiltinInfo[];
+
+ void setABIAAPCS() {
+ IsAAPCS = true;
+
+ DoubleAlign = LongLongAlign = LongDoubleAlign = SuitableAlign = 64;
+ const llvm::Triple &T = getTriple();
+
+ // size_t is unsigned long on MachO-derived environments, NetBSD and Bitrig.
+ if (T.isOSBinFormatMachO() || T.getOS() == llvm::Triple::NetBSD ||
+ T.getOS() == llvm::Triple::Bitrig)
+ SizeType = UnsignedLong;
+ else
+ SizeType = UnsignedInt;
+
+ switch (T.getOS()) {
+ case llvm::Triple::NetBSD:
+ WCharType = SignedInt;
+ break;
+ case llvm::Triple::Win32:
+ WCharType = UnsignedShort;
+ break;
+ case llvm::Triple::Linux:
+ default:
+ // AAPCS 7.1.1, ARM-Linux ABI 2.4: type of wchar_t is unsigned int.
+ WCharType = UnsignedInt;
+ break;
+ }
+
+ UseBitFieldTypeAlignment = true;
+
+ ZeroLengthBitfieldBoundary = 0;
+
+ // Thumb1 add sp, #imm requires the immediate value be multiple of 4,
+ // so set preferred for small types to 32.
+ if (T.isOSBinFormatMachO()) {
+ DataLayoutString =
+ BigEndian ? "E-m:o-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+ : "e-m:o-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64";
+ } else if (T.isOSWindows()) {
+ assert(!BigEndian && "Windows on ARM does not support big endian");
+ DataLayoutString = "e"
+ "-m:w"
+ "-p:32:32"
+ "-i64:64"
+ "-v128:64:128"
+ "-a:0:32"
+ "-n32"
+ "-S64";
+ } else if (T.isOSNaCl()) {
+ assert(!BigEndian && "NaCl on ARM does not support big endian");
+ DataLayoutString = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S128";
+ } else {
+ DataLayoutString =
+ BigEndian ? "E-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+ : "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64";
+ }
+
+ // FIXME: Enumerated types are variable width in straight AAPCS.
+ }
+
+ void setABIAPCS(bool IsAAPCS16) {
+ const llvm::Triple &T = getTriple();
+
+ IsAAPCS = false;
+
+ if (IsAAPCS16)
+ DoubleAlign = LongLongAlign = LongDoubleAlign = SuitableAlign = 64;
+ else
+ DoubleAlign = LongLongAlign = LongDoubleAlign = SuitableAlign = 32;
+
+ // size_t is unsigned int on FreeBSD.
+ if (T.getOS() == llvm::Triple::FreeBSD)
+ SizeType = UnsignedInt;
+ else
+ SizeType = UnsignedLong;
+
+ // Revert to using SignedInt on apcs-gnu to comply with existing behaviour.
+ WCharType = SignedInt;
+
+ // Do not respect the alignment of bit-field types when laying out
+ // structures. This corresponds to PCC_BITFIELD_TYPE_MATTERS in gcc.
+ UseBitFieldTypeAlignment = false;
+
+ /// gcc forces the alignment to 4 bytes, regardless of the type of the
+ /// zero length bitfield. This corresponds to EMPTY_FIELD_BOUNDARY in
+ /// gcc.
+ ZeroLengthBitfieldBoundary = 32;
+
+ if (T.isOSBinFormatMachO() && IsAAPCS16) {
+ assert(!BigEndian && "AAPCS16 does not support big-endian");
+ DataLayoutString = "e-m:o-p:32:32-i64:64-a:0:32-n32-S128";
+ } else if (T.isOSBinFormatMachO())
+ DataLayoutString =
+ BigEndian
+ ? "E-m:o-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32"
+ : "e-m:o-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32";
+ else
+ DataLayoutString =
+ BigEndian
+ ? "E-m:e-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32"
+ : "e-m:e-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32";
+
+ // FIXME: Override "preferred align" for double and long long.
+ }
+
+ void setArchInfo() {
+ StringRef ArchName = getTriple().getArchName();
+
+ ArchISA = llvm::ARM::parseArchISA(ArchName);
+ CPU = llvm::ARM::getDefaultCPU(ArchName);
+ unsigned AK = llvm::ARM::parseArch(ArchName);
+ if (AK != llvm::ARM::AK_INVALID)
+ ArchKind = AK;
+ setArchInfo(ArchKind);
+ }
+
+ void setArchInfo(unsigned Kind) {
+ StringRef SubArch;
+
+ // cache TargetParser info
+ ArchKind = Kind;
+ SubArch = llvm::ARM::getSubArch(ArchKind);
+ ArchProfile = llvm::ARM::parseArchProfile(SubArch);
+ ArchVersion = llvm::ARM::parseArchVersion(SubArch);
+
+ // cache CPU related strings
+ CPUAttr = getCPUAttr();
+ CPUProfile = getCPUProfile();
+ }
+
+ void setAtomic() {
+ // when triple does not specify a sub arch,
+ // then we are not using inline atomics
+ bool ShouldUseInlineAtomic =
+ (ArchISA == llvm::ARM::IK_ARM && ArchVersion >= 6) ||
+ (ArchISA == llvm::ARM::IK_THUMB && ArchVersion >= 7);
+ // Cortex M does not support 8 byte atomics, while general Thumb2 does.
+ if (ArchProfile == llvm::ARM::PK_M) {
+ MaxAtomicPromoteWidth = 32;
+ if (ShouldUseInlineAtomic)
+ MaxAtomicInlineWidth = 32;
+ }
+ else {
+ MaxAtomicPromoteWidth = 64;
+ if (ShouldUseInlineAtomic)
+ MaxAtomicInlineWidth = 64;
+ }
+ }
+
+ bool isThumb() const {
+ return (ArchISA == llvm::ARM::IK_THUMB);
+ }
+
+ bool supportsThumb() const {
+ return CPUAttr.count('T') || ArchVersion >= 6;
+ }
+
+ bool supportsThumb2() const {
+ return CPUAttr.equals("6T2") || ArchVersion >= 7;
+ }
+
+ StringRef getCPUAttr() const {
+ // For most sub-arches, the build attribute CPU name is enough.
+ // For Cortex variants, it's slightly different.
+ switch(ArchKind) {
+ default:
+ return llvm::ARM::getCPUAttr(ArchKind);
+ case llvm::ARM::AK_ARMV6M:
+ return "6M";
+ case llvm::ARM::AK_ARMV7S:
+ return "7S";
+ case llvm::ARM::AK_ARMV7A:
+ return "7A";
+ case llvm::ARM::AK_ARMV7R:
+ return "7R";
+ case llvm::ARM::AK_ARMV7M:
+ return "7M";
+ case llvm::ARM::AK_ARMV7EM:
+ return "7EM";
+ case llvm::ARM::AK_ARMV8A:
+ return "8A";
+ case llvm::ARM::AK_ARMV8_1A:
+ return "8_1A";
+ }
+ }
+
+ StringRef getCPUProfile() const {
+ switch(ArchProfile) {
+ case llvm::ARM::PK_A:
+ return "A";
+ case llvm::ARM::PK_R:
+ return "R";
+ case llvm::ARM::PK_M:
+ return "M";
+ default:
+ return "";
+ }
+ }
+
+public:
+ ARMTargetInfo(const llvm::Triple &Triple, bool IsBigEndian)
+ : TargetInfo(Triple), FPMath(FP_Default),
+ IsAAPCS(true), LDREX(0), HW_FP(0) {
+ BigEndian = IsBigEndian;
+
+ switch (getTriple().getOS()) {
+ case llvm::Triple::NetBSD:
+ PtrDiffType = SignedLong;
+ break;
+ default:
+ PtrDiffType = SignedInt;
+ break;
+ }
+
+ // Cache arch related info.
+ setArchInfo();
+
+ // {} in inline assembly are neon specifiers, not assembly variant
+ // specifiers.
+ NoAsmVariants = true;
+
+ // FIXME: This duplicates code from the driver that sets the -target-abi
+ // option - this code is used if -target-abi isn't passed and should
+ // be unified in some way.
+ if (Triple.isOSBinFormatMachO()) {
+ // The backend is hardwired to assume AAPCS for M-class processors, ensure
+ // the frontend matches that.
+ if (Triple.getEnvironment() == llvm::Triple::EABI ||
+ Triple.getOS() == llvm::Triple::UnknownOS ||
+ StringRef(CPU).startswith("cortex-m")) {
+ setABI("aapcs");
+ } else if (Triple.isWatchOS()) {
+ setABI("aapcs16");
+ } else {
+ setABI("apcs-gnu");
+ }
+ } else if (Triple.isOSWindows()) {
+ // FIXME: this is invalid for WindowsCE
+ setABI("aapcs");
+ } else {
+ // Select the default based on the platform.
+ switch (Triple.getEnvironment()) {
+ case llvm::Triple::Android:
+ case llvm::Triple::GNUEABI:
+ case llvm::Triple::GNUEABIHF:
+ setABI("aapcs-linux");
+ break;
+ case llvm::Triple::EABIHF:
+ case llvm::Triple::EABI:
+ setABI("aapcs");
+ break;
+ case llvm::Triple::GNU:
+ setABI("apcs-gnu");
+ break;
+ default:
+ if (Triple.getOS() == llvm::Triple::NetBSD)
+ setABI("apcs-gnu");
+ else
+ setABI("aapcs");
+ break;
+ }
+ }
+
+ // ARM targets default to using the ARM C++ ABI.
+ TheCXXABI.set(TargetCXXABI::GenericARM);
+
+ // ARM has atomics up to 8 bytes
+ setAtomic();
+
+ // Do force alignment of members that follow zero length bitfields. If
+ // the alignment of the zero-length bitfield is greater than the member
+ // that follows it, `bar', `bar' will be aligned as the type of the
+ // zero length bitfield.
+ UseZeroLengthBitfieldAlignment = true;
+ }
+
+ StringRef getABI() const override { return ABI; }
+
+ bool setABI(const std::string &Name) override {
+ ABI = Name;
+
+ // The defaults (above) are for AAPCS, check if we need to change them.
+ //
+ // FIXME: We need support for -meabi... we could just mangle it into the
+ // name.
+ if (Name == "apcs-gnu" || Name == "aapcs16") {
+ setABIAPCS(Name == "aapcs16");
+ return true;
+ }
+ if (Name == "aapcs" || Name == "aapcs-vfp" || Name == "aapcs-linux") {
+ setABIAAPCS();
+ return true;
+ }
+ return false;
+ }
+
+ // FIXME: This should be based on Arch attributes, not CPU names.
+ bool
+ initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
+ StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const override {
+
+ std::vector<const char*> TargetFeatures;
+ unsigned Arch = llvm::ARM::parseArch(getTriple().getArchName());
+
+ // get default FPU features
+ unsigned FPUKind = llvm::ARM::getDefaultFPU(CPU, Arch);
+ llvm::ARM::getFPUFeatures(FPUKind, TargetFeatures);
+
+ // get default Extension features
+ unsigned Extensions = llvm::ARM::getDefaultExtensions(CPU, Arch);
+ llvm::ARM::getExtensionFeatures(Extensions, TargetFeatures);
+
+ for (const char *Feature : TargetFeatures)
+ if (Feature[0] == '+')
+ Features[Feature+1] = true;
+
+ return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
+ }
+
+ bool handleTargetFeatures(std::vector<std::string> &Features,
+ DiagnosticsEngine &Diags) override {
+ FPU = 0;
+ CRC = 0;
+ Crypto = 0;
+ DSP = 0;
+ Unaligned = 1;
+ SoftFloat = SoftFloatABI = false;
+ HWDiv = 0;
+
+ // This does not diagnose illegal cases like having both
+ // "+vfpv2" and "+vfpv3" or having "+neon" and "+fp-only-sp".
+ uint32_t HW_FP_remove = 0;
+ for (const auto &Feature : Features) {
+ if (Feature == "+soft-float") {
+ SoftFloat = true;
+ } else if (Feature == "+soft-float-abi") {
+ SoftFloatABI = true;
+ } else if (Feature == "+vfp2") {
+ FPU |= VFP2FPU;
+ HW_FP |= HW_FP_SP | HW_FP_DP;
+ } else if (Feature == "+vfp3") {
+ FPU |= VFP3FPU;
+ HW_FP |= HW_FP_SP | HW_FP_DP;
+ } else if (Feature == "+vfp4") {
+ FPU |= VFP4FPU;
+ HW_FP |= HW_FP_SP | HW_FP_DP | HW_FP_HP;
+ } else if (Feature == "+fp-armv8") {
+ FPU |= FPARMV8;
+ HW_FP |= HW_FP_SP | HW_FP_DP | HW_FP_HP;
+ } else if (Feature == "+neon") {
+ FPU |= NeonFPU;
+ HW_FP |= HW_FP_SP | HW_FP_DP;
+ } else if (Feature == "+hwdiv") {
+ HWDiv |= HWDivThumb;
+ } else if (Feature == "+hwdiv-arm") {
+ HWDiv |= HWDivARM;
+ } else if (Feature == "+crc") {
+ CRC = 1;
+ } else if (Feature == "+crypto") {
+ Crypto = 1;
+ } else if (Feature == "+dsp") {
+ DSP = 1;
+ } else if (Feature == "+fp-only-sp") {
+ HW_FP_remove |= HW_FP_DP;
+ } else if (Feature == "+strict-align") {
+ Unaligned = 0;
+ } else if (Feature == "+fp16") {
+ HW_FP |= HW_FP_HP;
+ }
+ }
+ HW_FP &= ~HW_FP_remove;
+
+ switch (ArchVersion) {
+ case 6:
+ if (ArchProfile == llvm::ARM::PK_M)
+ LDREX = 0;
+ else if (ArchKind == llvm::ARM::AK_ARMV6K)
+ LDREX = LDREX_D | LDREX_W | LDREX_H | LDREX_B ;
+ else
+ LDREX = LDREX_W;
+ break;
+ case 7:
+ if (ArchProfile == llvm::ARM::PK_M)
+ LDREX = LDREX_W | LDREX_H | LDREX_B ;
+ else
+ LDREX = LDREX_D | LDREX_W | LDREX_H | LDREX_B ;
+ break;
+ case 8:
+ LDREX = LDREX_D | LDREX_W | LDREX_H | LDREX_B ;
+ }
+
+ if (!(FPU & NeonFPU) && FPMath == FP_Neon) {
+ Diags.Report(diag::err_target_unsupported_fpmath) << "neon";
+ return false;
+ }
+
+ if (FPMath == FP_Neon)
+ Features.push_back("+neonfp");
+ else if (FPMath == FP_VFP)
+ Features.push_back("-neonfp");
+
+ // Remove front-end specific options which the backend handles differently.
+ auto Feature =
+ std::find(Features.begin(), Features.end(), "+soft-float-abi");
+ if (Feature != Features.end())
+ Features.erase(Feature);
+
+ return true;
+ }
+
+ bool hasFeature(StringRef Feature) const override {
+ return llvm::StringSwitch<bool>(Feature)
+ .Case("arm", true)
+ .Case("aarch32", true)
+ .Case("softfloat", SoftFloat)
+ .Case("thumb", isThumb())
+ .Case("neon", (FPU & NeonFPU) && !SoftFloat)
+ .Case("hwdiv", HWDiv & HWDivThumb)
+ .Case("hwdiv-arm", HWDiv & HWDivARM)
+ .Default(false);
+ }
+
+ bool setCPU(const std::string &Name) override {
+ if (Name != "generic")
+ setArchInfo(llvm::ARM::parseCPUArch(Name));
+
+ if (ArchKind == llvm::ARM::AK_INVALID)
+ return false;
+ setAtomic();
+ CPU = Name;
+ return true;
+ }
+
+ bool setFPMath(StringRef Name) override;
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ // Target identification.
+ Builder.defineMacro("__arm");
+ Builder.defineMacro("__arm__");
+
+ // Target properties.
+ Builder.defineMacro("__REGISTER_PREFIX__", "");
+
+ // Unfortunately, __ARM_ARCH_7K__ is now more of an ABI descriptor. The CPU
+ // happens to be Cortex-A7 though, so it should still get __ARM_ARCH_7A__.
+ if (getTriple().isWatchOS())
+ Builder.defineMacro("__ARM_ARCH_7K__", "2");
+
+ if (!CPUAttr.empty())
+ Builder.defineMacro("__ARM_ARCH_" + CPUAttr + "__");
+
+ // ACLE 6.4.1 ARM/Thumb instruction set architecture
+ // __ARM_ARCH is defined as an integer value indicating the current ARM ISA
+ Builder.defineMacro("__ARM_ARCH", Twine(ArchVersion));
+
+ if (ArchVersion >= 8) {
+ // ACLE 6.5.7 Crypto Extension
+ if (Crypto)
+ Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
+ // ACLE 6.5.8 CRC32 Extension
+ if (CRC)
+ Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
+ // ACLE 6.5.10 Numeric Maximum and Minimum
+ Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
+ // ACLE 6.5.9 Directed Rounding
+ Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
+ }
+
+ // __ARM_ARCH_ISA_ARM is defined to 1 if the core supports the ARM ISA. It
+ // is not defined for the M-profile.
+ // NOTE that the deffault profile is assumed to be 'A'
+ if (CPUProfile.empty() || CPUProfile != "M")
+ Builder.defineMacro("__ARM_ARCH_ISA_ARM", "1");
+
+ // __ARM_ARCH_ISA_THUMB is defined to 1 if the core supporst the original
+ // Thumb ISA (including v6-M). It is set to 2 if the core supports the
+ // Thumb-2 ISA as found in the v6T2 architecture and all v7 architecture.
+ if (supportsThumb2())
+ Builder.defineMacro("__ARM_ARCH_ISA_THUMB", "2");
+ else if (supportsThumb())
+ Builder.defineMacro("__ARM_ARCH_ISA_THUMB", "1");
+
+ // __ARM_32BIT_STATE is defined to 1 if code is being generated for a 32-bit
+ // instruction set such as ARM or Thumb.
+ Builder.defineMacro("__ARM_32BIT_STATE", "1");
+
+ // ACLE 6.4.2 Architectural Profile (A, R, M or pre-Cortex)
+
+ // __ARM_ARCH_PROFILE is defined as 'A', 'R', 'M' or 'S', or unset.
+ if (!CPUProfile.empty())
+ Builder.defineMacro("__ARM_ARCH_PROFILE", "'" + CPUProfile + "'");
+
+ // ACLE 6.4.3 Unaligned access supported in hardware
+ if (Unaligned)
+ Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
+
+ // ACLE 6.4.4 LDREX/STREX
+ if (LDREX)
+ Builder.defineMacro("__ARM_FEATURE_LDREX", "0x" + llvm::utohexstr(LDREX));
+
+ // ACLE 6.4.5 CLZ
+ if (ArchVersion == 5 ||
+ (ArchVersion == 6 && CPUProfile != "M") ||
+ ArchVersion > 6)
+ Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
+
+ // ACLE 6.5.1 Hardware Floating Point
+ if (HW_FP)
+ Builder.defineMacro("__ARM_FP", "0x" + llvm::utohexstr(HW_FP));
+
+ // ACLE predefines.
+ Builder.defineMacro("__ARM_ACLE", "200");
+
+ // FP16 support (we currently only support IEEE format).
+ Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
+ Builder.defineMacro("__ARM_FP16_ARGS", "1");
+
+ // ACLE 6.5.3 Fused multiply-accumulate (FMA)
+ if (ArchVersion >= 7 && (CPUProfile != "M" || CPUAttr == "7EM"))
+ Builder.defineMacro("__ARM_FEATURE_FMA", "1");
+
+ // Subtarget options.
+
+ // FIXME: It's more complicated than this and we don't really support
+ // interworking.
+ // Windows on ARM does not "support" interworking
+ if (5 <= ArchVersion && ArchVersion <= 8 && !getTriple().isOSWindows())
+ Builder.defineMacro("__THUMB_INTERWORK__");
+
+ if (ABI == "aapcs" || ABI == "aapcs-linux" || ABI == "aapcs-vfp") {
+ // Embedded targets on Darwin follow AAPCS, but not EABI.
+ // Windows on ARM follows AAPCS VFP, but does not conform to EABI.
+ if (!getTriple().isOSDarwin() && !getTriple().isOSWindows())
+ Builder.defineMacro("__ARM_EABI__");
+ Builder.defineMacro("__ARM_PCS", "1");
+
+ if ((!SoftFloat && !SoftFloatABI) || ABI == "aapcs-vfp")
+ Builder.defineMacro("__ARM_PCS_VFP", "1");
+ }
+
+ if (SoftFloat)
+ Builder.defineMacro("__SOFTFP__");
+
+ if (CPU == "xscale")
+ Builder.defineMacro("__XSCALE__");
+
+ if (isThumb()) {
+ Builder.defineMacro("__THUMBEL__");
+ Builder.defineMacro("__thumb__");
+ if (supportsThumb2())
+ Builder.defineMacro("__thumb2__");
+ }
+
+ // ACLE 6.4.9 32-bit SIMD instructions
+ if (ArchVersion >= 6 && (CPUProfile != "M" || CPUAttr == "7EM"))
+ Builder.defineMacro("__ARM_FEATURE_SIMD32", "1");
+
+ // ACLE 6.4.10 Hardware Integer Divide
+ if (((HWDiv & HWDivThumb) && isThumb()) ||
+ ((HWDiv & HWDivARM) && !isThumb())) {
+ Builder.defineMacro("__ARM_FEATURE_IDIV", "1");
+ Builder.defineMacro("__ARM_ARCH_EXT_IDIV__", "1");
+ }
+
+ // Note, this is always on in gcc, even though it doesn't make sense.
+ Builder.defineMacro("__APCS_32__");
+
+ if (FPUModeIsVFP((FPUMode) FPU)) {
+ Builder.defineMacro("__VFP_FP__");
+ if (FPU & VFP2FPU)
+ Builder.defineMacro("__ARM_VFPV2__");
+ if (FPU & VFP3FPU)
+ Builder.defineMacro("__ARM_VFPV3__");
+ if (FPU & VFP4FPU)
+ Builder.defineMacro("__ARM_VFPV4__");
+ }
+
+ // This only gets set when Neon instructions are actually available, unlike
+ // the VFP define, hence the soft float and arch check. This is subtly
+ // different from gcc, we follow the intent which was that it should be set
+ // when Neon instructions are actually available.
+ if ((FPU & NeonFPU) && !SoftFloat && ArchVersion >= 7) {
+ Builder.defineMacro("__ARM_NEON", "1");
+ Builder.defineMacro("__ARM_NEON__");
+ // current AArch32 NEON implementations do not support double-precision
+ // floating-point even when it is present in VFP.
+ Builder.defineMacro("__ARM_NEON_FP",
+ "0x" + llvm::utohexstr(HW_FP & ~HW_FP_DP));
+ }
+
+ Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
+ Opts.ShortWChar ? "2" : "4");
+
+ Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM",
+ Opts.ShortEnums ? "1" : "4");
+
+ if (ArchVersion >= 6 && CPUAttr != "6M") {
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
+ }
+
+ // ACLE 6.4.7 DSP instructions
+ if (DSP) {
+ Builder.defineMacro("__ARM_FEATURE_DSP", "1");
+ }
+
+ // ACLE 6.4.8 Saturation instructions
+ bool SAT = false;
+ if ((ArchVersion == 6 && CPUProfile != "M") || ArchVersion > 6 ) {
+ Builder.defineMacro("__ARM_FEATURE_SAT", "1");
+ SAT = true;
+ }
+
+ // ACLE 6.4.6 Q (saturation) flag
+ if (DSP || SAT)
+ Builder.defineMacro("__ARM_FEATURE_QBIT", "1");
+
+ if (Opts.UnsafeFPMath)
+ Builder.defineMacro("__ARM_FP_FAST", "1");
+
+ if (ArchKind == llvm::ARM::AK_ARMV8_1A)
+ Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
+ }
+
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return llvm::makeArrayRef(BuiltinInfo,
+ clang::ARM::LastTSBuiltin-Builtin::FirstTSBuiltin);
+ }
+ bool isCLZForZeroUndef() const override { return false; }
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return IsAAPCS
+ ? AAPCSABIBuiltinVaList
+ : (getTriple().isWatchOS() ? TargetInfo::CharPtrBuiltinVaList
+ : TargetInfo::VoidPtrBuiltinVaList);
+ }
+ ArrayRef<const char *> getGCCRegNames() const override;
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override;
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const override {
+ switch (*Name) {
+ default: break;
+ case 'l': // r0-r7
+ case 'h': // r8-r15
+ case 'w': // VFP Floating point register single precision
+ case 'P': // VFP Floating point register double precision
+ Info.setAllowsRegister();
+ return true;
+ case 'I':
+ case 'J':
+ case 'K':
+ case 'L':
+ case 'M':
+ // FIXME
+ return true;
+ case 'Q': // A memory address that is a single base register.
+ Info.setAllowsMemory();
+ return true;
+ case 'U': // a memory reference...
+ switch (Name[1]) {
+ case 'q': // ...ARMV4 ldrsb
+ case 'v': // ...VFP load/store (reg+constant offset)
+ case 'y': // ...iWMMXt load/store
+ case 't': // address valid for load/store opaque types wider
+ // than 128-bits
+ case 'n': // valid address for Neon doubleword vector load/store
+ case 'm': // valid address for Neon element and structure load/store
+ case 's': // valid address for non-offset loads/stores of quad-word
+ // values in four ARM registers
+ Info.setAllowsMemory();
+ Name++;
+ return true;
+ }
+ }
+ return false;
+ }
+ std::string convertConstraint(const char *&Constraint) const override {
+ std::string R;
+ switch (*Constraint) {
+ case 'U': // Two-character constraint; add "^" hint for later parsing.
+ R = std::string("^") + std::string(Constraint, 2);
+ Constraint++;
+ break;
+ case 'p': // 'p' should be translated to 'r' by default.
+ R = std::string("r");
+ break;
+ default:
+ return std::string(1, *Constraint);
+ }
+ return R;
+ }
+ bool
+ validateConstraintModifier(StringRef Constraint, char Modifier, unsigned Size,
+ std::string &SuggestedModifier) const override {
+ bool isOutput = (Constraint[0] == '=');
+ bool isInOut = (Constraint[0] == '+');
+
+ // Strip off constraint modifiers.
+ while (Constraint[0] == '=' ||
+ Constraint[0] == '+' ||
+ Constraint[0] == '&')
+ Constraint = Constraint.substr(1);
+
+ switch (Constraint[0]) {
+ default: break;
+ case 'r': {
+ switch (Modifier) {
+ default:
+ return (isInOut || isOutput || Size <= 64);
+ case 'q':
+ // A register of size 32 cannot fit a vector type.
+ return false;
+ }
+ }
+ }
+
+ return true;
+ }
+ const char *getClobbers() const override {
+ // FIXME: Is this really right?
+ return "";
+ }
+
+ CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
+ return (CC == CC_AAPCS || CC == CC_AAPCS_VFP) ? CCCR_OK : CCCR_Warning;
+ }
+
+ int getEHDataRegisterNumber(unsigned RegNo) const override {
+ if (RegNo == 0) return 0;
+ if (RegNo == 1) return 1;
+ return -1;
+ }
+
+ bool hasSjLjLowering() const override {
+ return true;
+ }
+};
+
+bool ARMTargetInfo::setFPMath(StringRef Name) {
+ if (Name == "neon") {
+ FPMath = FP_Neon;
+ return true;
+ } else if (Name == "vfp" || Name == "vfp2" || Name == "vfp3" ||
+ Name == "vfp4") {
+ FPMath = FP_VFP;
+ return true;
+ }
+ return false;
+}
+
+const char * const ARMTargetInfo::GCCRegNames[] = {
+ // Integer registers
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "sp", "lr", "pc",
+
+ // Float registers
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
+ "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
+ "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
+
+ // Double registers
+ "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
+ "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
+ "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
+ "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
+
+ // Quad registers
+ "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+};
+
+ArrayRef<const char *> ARMTargetInfo::getGCCRegNames() const {
+ return llvm::makeArrayRef(GCCRegNames);
+}
+
+const TargetInfo::GCCRegAlias ARMTargetInfo::GCCRegAliases[] = {
+ { { "a1" }, "r0" },
+ { { "a2" }, "r1" },
+ { { "a3" }, "r2" },
+ { { "a4" }, "r3" },
+ { { "v1" }, "r4" },
+ { { "v2" }, "r5" },
+ { { "v3" }, "r6" },
+ { { "v4" }, "r7" },
+ { { "v5" }, "r8" },
+ { { "v6", "rfp" }, "r9" },
+ { { "sl" }, "r10" },
+ { { "fp" }, "r11" },
+ { { "ip" }, "r12" },
+ { { "r13" }, "sp" },
+ { { "r14" }, "lr" },
+ { { "r15" }, "pc" },
+ // The S, D and Q registers overlap, but aren't really aliases; we
+ // don't want to substitute one of these for a different-sized one.
+};
+
+ArrayRef<TargetInfo::GCCRegAlias> ARMTargetInfo::getGCCRegAliases() const {
+ return llvm::makeArrayRef(GCCRegAliases);
+}
+
+const Builtin::Info ARMTargetInfo::BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
+ { #ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr },
+#include "clang/Basic/BuiltinsNEON.def"
+
+#define BUILTIN(ID, TYPE, ATTRS) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
+#define LANGBUILTIN(ID, TYPE, ATTRS, LANG) \
+ { #ID, TYPE, ATTRS, nullptr, LANG, nullptr },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
+ { #ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr },
+#include "clang/Basic/BuiltinsARM.def"
+};
+
+class ARMleTargetInfo : public ARMTargetInfo {
+public:
+ ARMleTargetInfo(const llvm::Triple &Triple)
+ : ARMTargetInfo(Triple, false) { }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ Builder.defineMacro("__ARMEL__");
+ ARMTargetInfo::getTargetDefines(Opts, Builder);
+ }
+};
+
+class ARMbeTargetInfo : public ARMTargetInfo {
+public:
+ ARMbeTargetInfo(const llvm::Triple &Triple)
+ : ARMTargetInfo(Triple, true) { }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ Builder.defineMacro("__ARMEB__");
+ Builder.defineMacro("__ARM_BIG_ENDIAN");
+ ARMTargetInfo::getTargetDefines(Opts, Builder);
+ }
+};
+
+class WindowsARMTargetInfo : public WindowsTargetInfo<ARMleTargetInfo> {
+ const llvm::Triple Triple;
+public:
+ WindowsARMTargetInfo(const llvm::Triple &Triple)
+ : WindowsTargetInfo<ARMleTargetInfo>(Triple), Triple(Triple) {
+ TLSSupported = false;
+ WCharType = UnsignedShort;
+ SizeType = UnsignedInt;
+ UserLabelPrefix = "";
+ }
+ void getVisualStudioDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ WindowsTargetInfo<ARMleTargetInfo>::getVisualStudioDefines(Opts, Builder);
+
+ // FIXME: this is invalid for WindowsCE
+ Builder.defineMacro("_M_ARM_NT", "1");
+ Builder.defineMacro("_M_ARMT", "_M_ARM");
+ Builder.defineMacro("_M_THUMB", "_M_ARM");
+
+ assert((Triple.getArch() == llvm::Triple::arm ||
+ Triple.getArch() == llvm::Triple::thumb) &&
+ "invalid architecture for Windows ARM target info");
+ unsigned Offset = Triple.getArch() == llvm::Triple::arm ? 4 : 6;
+ Builder.defineMacro("_M_ARM", Triple.getArchName().substr(Offset));
+
+ // TODO map the complete set of values
+ // 31: VFPv3 40: VFPv4
+ Builder.defineMacro("_M_ARM_FP", "31");
+ }
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::CharPtrBuiltinVaList;
+ }
+ CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
+ switch (CC) {
+ case CC_X86StdCall:
+ case CC_X86ThisCall:
+ case CC_X86FastCall:
+ case CC_X86VectorCall:
+ return CCCR_Ignore;
+ case CC_C:
+ return CCCR_OK;
+ default:
+ return CCCR_Warning;
+ }
+ }
+};
+
+// Windows ARM + Itanium C++ ABI Target
+class ItaniumWindowsARMleTargetInfo : public WindowsARMTargetInfo {
+public:
+ ItaniumWindowsARMleTargetInfo(const llvm::Triple &Triple)
+ : WindowsARMTargetInfo(Triple) {
+ TheCXXABI.set(TargetCXXABI::GenericARM);
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ WindowsARMTargetInfo::getTargetDefines(Opts, Builder);
+
+ if (Opts.MSVCCompat)
+ WindowsARMTargetInfo::getVisualStudioDefines(Opts, Builder);
+ }
+};
+
+// Windows ARM, MS (C++) ABI
+class MicrosoftARMleTargetInfo : public WindowsARMTargetInfo {
+public:
+ MicrosoftARMleTargetInfo(const llvm::Triple &Triple)
+ : WindowsARMTargetInfo(Triple) {
+ TheCXXABI.set(TargetCXXABI::Microsoft);
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ WindowsARMTargetInfo::getTargetDefines(Opts, Builder);
+ WindowsARMTargetInfo::getVisualStudioDefines(Opts, Builder);
+ }
+};
+
+// ARM MinGW target
+class MinGWARMTargetInfo : public WindowsARMTargetInfo {
+public:
+ MinGWARMTargetInfo(const llvm::Triple &Triple)
+ : WindowsARMTargetInfo(Triple) {
+ TheCXXABI.set(TargetCXXABI::GenericARM);
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ WindowsARMTargetInfo::getTargetDefines(Opts, Builder);
+ DefineStd(Builder, "WIN32", Opts);
+ DefineStd(Builder, "WINNT", Opts);
+ Builder.defineMacro("_ARM_");
+ addMinGWDefines(Opts, Builder);
+ }
+};
+
+// ARM Cygwin target
+class CygwinARMTargetInfo : public ARMleTargetInfo {
+public:
+ CygwinARMTargetInfo(const llvm::Triple &Triple) : ARMleTargetInfo(Triple) {
+ TLSSupported = false;
+ WCharType = UnsignedShort;
+ DoubleAlign = LongLongAlign = 64;
+ DataLayoutString = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64";
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ ARMleTargetInfo::getTargetDefines(Opts, Builder);
+ Builder.defineMacro("_ARM_");
+ Builder.defineMacro("__CYGWIN__");
+ Builder.defineMacro("__CYGWIN32__");
+ DefineStd(Builder, "unix", Opts);
+ if (Opts.CPlusPlus)
+ Builder.defineMacro("_GNU_SOURCE");
+ }
+};
+
+class DarwinARMTargetInfo :
+ public DarwinTargetInfo<ARMleTargetInfo> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
+ }
+
+public:
+ DarwinARMTargetInfo(const llvm::Triple &Triple)
+ : DarwinTargetInfo<ARMleTargetInfo>(Triple) {
+ HasAlignMac68kSupport = true;
+ // iOS always has 64-bit atomic instructions.
+ // FIXME: This should be based off of the target features in
+ // ARMleTargetInfo.
+ MaxAtomicInlineWidth = 64;
+
+ if (Triple.isWatchOS()) {
+ // Darwin on iOS uses a variant of the ARM C++ ABI.
+ TheCXXABI.set(TargetCXXABI::WatchOS);
+
+ // The 32-bit ABI is silent on what ptrdiff_t should be, but given that
+ // size_t is long, it's a bit weird for it to be int.
+ PtrDiffType = SignedLong;
+
+ // BOOL should be a real boolean on the new ABI
+ UseSignedCharForObjCBool = false;
+ } else
+ TheCXXABI.set(TargetCXXABI::iOS);
+ }
+};
+
+class AArch64TargetInfo : public TargetInfo {
+ virtual void setDataLayoutString() = 0;
+ static const TargetInfo::GCCRegAlias GCCRegAliases[];
+ static const char *const GCCRegNames[];
+
+ enum FPUModeEnum {
+ FPUMode,
+ NeonMode
+ };
+
+ unsigned FPU;
+ unsigned CRC;
+ unsigned Crypto;
+ unsigned Unaligned;
+ unsigned V8_1A;
+
+ static const Builtin::Info BuiltinInfo[];
+
+ std::string ABI;
+
+public:
+ AArch64TargetInfo(const llvm::Triple &Triple)
+ : TargetInfo(Triple), ABI("aapcs") {
+
+ if (getTriple().getOS() == llvm::Triple::NetBSD) {
+ WCharType = SignedInt;
+
+ // NetBSD apparently prefers consistency across ARM targets to consistency
+ // across 64-bit targets.
+ Int64Type = SignedLongLong;
+ IntMaxType = SignedLongLong;
+ } else {
+ WCharType = UnsignedInt;
+ Int64Type = SignedLong;
+ IntMaxType = SignedLong;
+ }
+
+ LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
+ MaxVectorAlign = 128;
+ MaxAtomicInlineWidth = 128;
+ MaxAtomicPromoteWidth = 128;
+
+ LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
+ LongDoubleFormat = &llvm::APFloat::IEEEquad;
+
+ // {} in inline assembly are neon specifiers, not assembly variant
+ // specifiers.
+ NoAsmVariants = true;
+
+ // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
+ // contributes to the alignment of the containing aggregate in the same way
+ // a plain (non bit-field) member of that type would, without exception for
+ // zero-sized or anonymous bit-fields."
+ assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
+ UseZeroLengthBitfieldAlignment = true;
+
+ // AArch64 targets default to using the ARM C++ ABI.
+ TheCXXABI.set(TargetCXXABI::GenericAArch64);
+ }
+
+ StringRef getABI() const override { return ABI; }
+ bool setABI(const std::string &Name) override {
+ if (Name != "aapcs" && Name != "darwinpcs")
+ return false;
+
+ ABI = Name;
+ return true;
+ }
+
+ bool setCPU(const std::string &Name) override {
+ bool CPUKnown = llvm::StringSwitch<bool>(Name)
+ .Case("generic", true)
+ .Cases("cortex-a53", "cortex-a57", "cortex-a72",
+ "cortex-a35", "exynos-m1", true)
+ .Case("cyclone", true)
+ .Default(false);
+ return CPUKnown;
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ // Target identification.
+ Builder.defineMacro("__aarch64__");
+
+ // Target properties.
+ Builder.defineMacro("_LP64");
+ Builder.defineMacro("__LP64__");
+
+ // ACLE predefines. Many can only have one possible value on v8 AArch64.
+ Builder.defineMacro("__ARM_ACLE", "200");
+ Builder.defineMacro("__ARM_ARCH", "8");
+ Builder.defineMacro("__ARM_ARCH_PROFILE", "'A'");
+
+ Builder.defineMacro("__ARM_64BIT_STATE", "1");
+ Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
+ Builder.defineMacro("__ARM_ARCH_ISA_A64", "1");
+
+ Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
+ Builder.defineMacro("__ARM_FEATURE_FMA", "1");
+ Builder.defineMacro("__ARM_FEATURE_LDREX", "0xF");
+ Builder.defineMacro("__ARM_FEATURE_IDIV", "1"); // As specified in ACLE
+ Builder.defineMacro("__ARM_FEATURE_DIV"); // For backwards compatibility
+ Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
+ Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
+
+ Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
+
+ // 0xe implies support for half, single and double precision operations.
+ Builder.defineMacro("__ARM_FP", "0xE");
+
+ // PCS specifies this for SysV variants, which is all we support. Other ABIs
+ // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
+ Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
+ Builder.defineMacro("__ARM_FP16_ARGS", "1");
+
+ if (Opts.UnsafeFPMath)
+ Builder.defineMacro("__ARM_FP_FAST", "1");
+
+ Builder.defineMacro("__ARM_SIZEOF_WCHAR_T", Opts.ShortWChar ? "2" : "4");
+
+ Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM",
+ Opts.ShortEnums ? "1" : "4");
+
+ if (FPU == NeonMode) {
+ Builder.defineMacro("__ARM_NEON", "1");
+ // 64-bit NEON supports half, single and double precision operations.
+ Builder.defineMacro("__ARM_NEON_FP", "0xE");
+ }
+
+ if (CRC)
+ Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
+
+ if (Crypto)
+ Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
+
+ if (Unaligned)
+ Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
+
+ if (V8_1A)
+ Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
+
+ // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
+ }
+
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return llvm::makeArrayRef(BuiltinInfo,
+ clang::AArch64::LastTSBuiltin - Builtin::FirstTSBuiltin);
+ }
+
+ bool hasFeature(StringRef Feature) const override {
+ return Feature == "aarch64" ||
+ Feature == "arm64" ||
+ Feature == "arm" ||
+ (Feature == "neon" && FPU == NeonMode);
+ }
+
+ bool handleTargetFeatures(std::vector<std::string> &Features,
+ DiagnosticsEngine &Diags) override {
+ FPU = FPUMode;
+ CRC = 0;
+ Crypto = 0;
+ Unaligned = 1;
+ V8_1A = 0;
+
+ for (const auto &Feature : Features) {
+ if (Feature == "+neon")
+ FPU = NeonMode;
+ if (Feature == "+crc")
+ CRC = 1;
+ if (Feature == "+crypto")
+ Crypto = 1;
+ if (Feature == "+strict-align")
+ Unaligned = 0;
+ if (Feature == "+v8.1a")
+ V8_1A = 1;
+ }
+
+ setDataLayoutString();
+
+ return true;
+ }
+
+ bool isCLZForZeroUndef() const override { return false; }
+
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::AArch64ABIBuiltinVaList;
+ }
+
+ ArrayRef<const char *> getGCCRegNames() const override;
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override;
+
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const override {
+ switch (*Name) {
+ default:
+ return false;
+ case 'w': // Floating point and SIMD registers (V0-V31)
+ Info.setAllowsRegister();
+ return true;
+ case 'I': // Constant that can be used with an ADD instruction
+ case 'J': // Constant that can be used with a SUB instruction
+ case 'K': // Constant that can be used with a 32-bit logical instruction
+ case 'L': // Constant that can be used with a 64-bit logical instruction
+ case 'M': // Constant that can be used as a 32-bit MOV immediate
+ case 'N': // Constant that can be used as a 64-bit MOV immediate
+ case 'Y': // Floating point constant zero
+ case 'Z': // Integer constant zero
+ return true;
+ case 'Q': // A memory reference with base register and no offset
+ Info.setAllowsMemory();
+ return true;
+ case 'S': // A symbolic address
+ Info.setAllowsRegister();
+ return true;
+ case 'U':
+ // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
+ // Utf: A memory address suitable for ldp/stp in TF mode.
+ // Usa: An absolute symbolic address.
+ // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
+ llvm_unreachable("FIXME: Unimplemented support for U* constraints.");
+ case 'z': // Zero register, wzr or xzr
+ Info.setAllowsRegister();
+ return true;
+ case 'x': // Floating point and SIMD registers (V0-V15)
+ Info.setAllowsRegister();
+ return true;
+ }
+ return false;
+ }
+
+ bool
+ validateConstraintModifier(StringRef Constraint, char Modifier, unsigned Size,
+ std::string &SuggestedModifier) const override {
+ // Strip off constraint modifiers.
+ while (Constraint[0] == '=' || Constraint[0] == '+' || Constraint[0] == '&')
+ Constraint = Constraint.substr(1);
+
+ switch (Constraint[0]) {
+ default:
+ return true;
+ case 'z':
+ case 'r': {
+ switch (Modifier) {
+ case 'x':
+ case 'w':
+ // For now assume that the person knows what they're
+ // doing with the modifier.
+ return true;
+ default:
+ // By default an 'r' constraint will be in the 'x'
+ // registers.
+ if (Size == 64)
+ return true;
+
+ SuggestedModifier = "w";
+ return false;
+ }
+ }
+ }
+ }
+
+ const char *getClobbers() const override { return ""; }
+
+ int getEHDataRegisterNumber(unsigned RegNo) const override {
+ if (RegNo == 0)
+ return 0;
+ if (RegNo == 1)
+ return 1;
+ return -1;
+ }
+};
+
+const char *const AArch64TargetInfo::GCCRegNames[] = {
+ // 32-bit Integer registers
+ "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10",
+ "w11", "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21",
+ "w22", "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
+
+ // 64-bit Integer registers
+ "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10",
+ "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21",
+ "x22", "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
+
+ // 32-bit floating point regsisters
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10",
+ "s11", "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21",
+ "s22", "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
+
+ // 64-bit floating point regsisters
+ "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10",
+ "d11", "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21",
+ "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
+
+ // Vector registers
+ "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
+ "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21",
+ "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
+};
+
+ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
+ return llvm::makeArrayRef(GCCRegNames);
+}
+
+const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
+ { { "w31" }, "wsp" },
+ { { "x29" }, "fp" },
+ { { "x30" }, "lr" },
+ { { "x31" }, "sp" },
+ // The S/D/Q and W/X registers overlap, but aren't really aliases; we
+ // don't want to substitute one of these for a different-sized one.
+};
+
+ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
+ return llvm::makeArrayRef(GCCRegAliases);
+}
+
+const Builtin::Info AArch64TargetInfo::BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
+#include "clang/Basic/BuiltinsNEON.def"
+
+#define BUILTIN(ID, TYPE, ATTRS) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
+#include "clang/Basic/BuiltinsAArch64.def"
+};
+
+class AArch64leTargetInfo : public AArch64TargetInfo {
+ void setDataLayoutString() override {
+ if (getTriple().isOSBinFormatMachO())
+ DataLayoutString = "e-m:o-i64:64-i128:128-n32:64-S128";
+ else
+ DataLayoutString = "e-m:e-i64:64-i128:128-n32:64-S128";
+ }
+
+public:
+ AArch64leTargetInfo(const llvm::Triple &Triple)
+ : AArch64TargetInfo(Triple) {
+ BigEndian = false;
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ Builder.defineMacro("__AARCH64EL__");
+ AArch64TargetInfo::getTargetDefines(Opts, Builder);
+ }
+};
+
+class AArch64beTargetInfo : public AArch64TargetInfo {
+ void setDataLayoutString() override {
+ assert(!getTriple().isOSBinFormatMachO());
+ DataLayoutString = "E-m:e-i64:64-i128:128-n32:64-S128";
+ }
+
+public:
+ AArch64beTargetInfo(const llvm::Triple &Triple)
+ : AArch64TargetInfo(Triple) { }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ Builder.defineMacro("__AARCH64EB__");
+ Builder.defineMacro("__AARCH_BIG_ENDIAN");
+ Builder.defineMacro("__ARM_BIG_ENDIAN");
+ AArch64TargetInfo::getTargetDefines(Opts, Builder);
+ }
+};
+
+class DarwinAArch64TargetInfo : public DarwinTargetInfo<AArch64leTargetInfo> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ Builder.defineMacro("__AARCH64_SIMD__");
+ Builder.defineMacro("__ARM64_ARCH_8__");
+ Builder.defineMacro("__ARM_NEON__");
+ Builder.defineMacro("__LITTLE_ENDIAN__");
+ Builder.defineMacro("__REGISTER_PREFIX__", "");
+ Builder.defineMacro("__arm64", "1");
+ Builder.defineMacro("__arm64__", "1");
+
+ getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
+ }
+
+public:
+ DarwinAArch64TargetInfo(const llvm::Triple &Triple)
+ : DarwinTargetInfo<AArch64leTargetInfo>(Triple) {
+ Int64Type = SignedLongLong;
+ WCharType = SignedInt;
+ UseSignedCharForObjCBool = false;
+
+ LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+
+ TheCXXABI.set(TargetCXXABI::iOS64);
+ }
+
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::CharPtrBuiltinVaList;
+ }
+};
+
+// Hexagon abstract base class
+class HexagonTargetInfo : public TargetInfo {
+ static const Builtin::Info BuiltinInfo[];
+ static const char * const GCCRegNames[];
+ static const TargetInfo::GCCRegAlias GCCRegAliases[];
+ std::string CPU;
+ bool HasHVX, HasHVXDouble;
+
+public:
+ HexagonTargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) {
+ BigEndian = false;
+ DataLayoutString = "e-m:e-p:32:32:32-"
+ "i64:64:64-i32:32:32-i16:16:16-i1:8:8-"
+ "f64:64:64-f32:32:32-v64:64:64-v32:32:32-a:0-n16:32";
+ SizeType = UnsignedInt;
+ PtrDiffType = SignedInt;
+ IntPtrType = SignedInt;
+
+ // {} in inline assembly are packet specifiers, not assembly variant
+ // specifiers.
+ NoAsmVariants = true;
+
+ LargeArrayMinWidth = 64;
+ LargeArrayAlign = 64;
+ UseBitFieldTypeAlignment = true;
+ ZeroLengthBitfieldBoundary = 32;
+ HasHVX = HasHVXDouble = false;
+ }
+
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return llvm::makeArrayRef(BuiltinInfo,
+ clang::Hexagon::LastTSBuiltin-Builtin::FirstTSBuiltin);
+ }
+
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const override {
+ return true;
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override;
+
+ bool isCLZForZeroUndef() const override { return false; }
+
+ bool hasFeature(StringRef Feature) const override {
+ return llvm::StringSwitch<bool>(Feature)
+ .Case("hexagon", true)
+ .Case("hvx", HasHVX)
+ .Case("hvx-double", HasHVXDouble)
+ .Default(false);
+ }
+
+ bool initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
+ StringRef CPU, const std::vector<std::string> &FeaturesVec)
+ const override;
+
+ bool handleTargetFeatures(std::vector<std::string> &Features,
+ DiagnosticsEngine &Diags) override;
+
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::CharPtrBuiltinVaList;
+ }
+ ArrayRef<const char *> getGCCRegNames() const override;
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override;
+ const char *getClobbers() const override {
+ return "";
+ }
+
+ static const char *getHexagonCPUSuffix(StringRef Name) {
+ return llvm::StringSwitch<const char*>(Name)
+ .Case("hexagonv4", "4")
+ .Case("hexagonv5", "5")
+ .Case("hexagonv55", "55")
+ .Case("hexagonv60", "60")
+ .Default(nullptr);
+ }
+
+ bool setCPU(const std::string &Name) override {
+ if (!getHexagonCPUSuffix(Name))
+ return false;
+ CPU = Name;
+ return true;
+ }
+
+ int getEHDataRegisterNumber(unsigned RegNo) const override {
+ return RegNo < 2 ? RegNo : -1;
+ }
+};
+
+void HexagonTargetInfo::getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ Builder.defineMacro("__qdsp6__", "1");
+ Builder.defineMacro("__hexagon__", "1");
+
+ if (CPU == "hexagonv4") {
+ Builder.defineMacro("__HEXAGON_V4__");
+ Builder.defineMacro("__HEXAGON_ARCH__", "4");
+ if (Opts.HexagonQdsp6Compat) {
+ Builder.defineMacro("__QDSP6_V4__");
+ Builder.defineMacro("__QDSP6_ARCH__", "4");
+ }
+ } else if (CPU == "hexagonv5") {
+ Builder.defineMacro("__HEXAGON_V5__");
+ Builder.defineMacro("__HEXAGON_ARCH__", "5");
+ if(Opts.HexagonQdsp6Compat) {
+ Builder.defineMacro("__QDSP6_V5__");
+ Builder.defineMacro("__QDSP6_ARCH__", "5");
+ }
+ } else if (CPU == "hexagonv60") {
+ Builder.defineMacro("__HEXAGON_V60__");
+ Builder.defineMacro("__HEXAGON_ARCH__", "60");
+ Builder.defineMacro("__QDSP6_V60__");
+ Builder.defineMacro("__QDSP6_ARCH__", "60");
+ }
+}
+
+bool HexagonTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
+ DiagnosticsEngine &Diags) {
+ for (auto &F : Features) {
+ if (F == "+hvx")
+ HasHVX = true;
+ else if (F == "-hvx")
+ HasHVX = HasHVXDouble = false;
+ else if (F == "+hvx-double")
+ HasHVX = HasHVXDouble = true;
+ else if (F == "-hvx-double")
+ HasHVXDouble = false;
+ }
+ return true;
+}
+
+bool HexagonTargetInfo::initFeatureMap(llvm::StringMap<bool> &Features,
+ DiagnosticsEngine &Diags, StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const {
+ // Default for v60: -hvx, -hvx-double.
+ Features["hvx"] = false;
+ Features["hvx-double"] = false;
+
+ return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
+}
+
+
+const char *const HexagonTargetInfo::GCCRegNames[] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
+ "p0", "p1", "p2", "p3",
+ "sa0", "lc0", "sa1", "lc1", "m0", "m1", "usr", "ugp"
+};
+
+ArrayRef<const char*> HexagonTargetInfo::getGCCRegNames() const {
+ return llvm::makeArrayRef(GCCRegNames);
+}
+
+const TargetInfo::GCCRegAlias HexagonTargetInfo::GCCRegAliases[] = {
+ { { "sp" }, "r29" },
+ { { "fp" }, "r30" },
+ { { "lr" }, "r31" },
+};
+
+ArrayRef<TargetInfo::GCCRegAlias> HexagonTargetInfo::getGCCRegAliases() const {
+ return llvm::makeArrayRef(GCCRegAliases);
+}
+
+
+const Builtin::Info HexagonTargetInfo::BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
+ { #ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr },
+#include "clang/Basic/BuiltinsHexagon.def"
+};
+
+// Shared base class for SPARC v8 (32-bit) and SPARC v9 (64-bit).
+class SparcTargetInfo : public TargetInfo {
+ static const TargetInfo::GCCRegAlias GCCRegAliases[];
+ static const char * const GCCRegNames[];
+ bool SoftFloat;
+public:
+ SparcTargetInfo(const llvm::Triple &Triple)
+ : TargetInfo(Triple), SoftFloat(false) {}
+
+ bool handleTargetFeatures(std::vector<std::string> &Features,
+ DiagnosticsEngine &Diags) override {
+ // The backend doesn't actually handle soft float yet, but in case someone
+ // is using the support for the front end continue to support it.
+ auto Feature = std::find(Features.begin(), Features.end(), "+soft-float");
+ if (Feature != Features.end()) {
+ SoftFloat = true;
+ Features.erase(Feature);
+ }
+ return true;
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ DefineStd(Builder, "sparc", Opts);
+ Builder.defineMacro("__REGISTER_PREFIX__", "");
+
+ if (SoftFloat)
+ Builder.defineMacro("SOFT_FLOAT", "1");
+ }
+
+ bool hasFeature(StringRef Feature) const override {
+ return llvm::StringSwitch<bool>(Feature)
+ .Case("softfloat", SoftFloat)
+ .Case("sparc", true)
+ .Default(false);
+ }
+
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ // FIXME: Implement!
+ return None;
+ }
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::VoidPtrBuiltinVaList;
+ }
+ ArrayRef<const char *> getGCCRegNames() const override;
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override;
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &info) const override {
+ // FIXME: Implement!
+ switch (*Name) {
+ case 'I': // Signed 13-bit constant
+ case 'J': // Zero
+ case 'K': // 32-bit constant with the low 12 bits clear
+ case 'L': // A constant in the range supported by movcc (11-bit signed imm)
+ case 'M': // A constant in the range supported by movrcc (19-bit signed imm)
+ case 'N': // Same as 'K' but zext (required for SIMode)
+ case 'O': // The constant 4096
+ return true;
+ }
+ return false;
+ }
+ const char *getClobbers() const override {
+ // FIXME: Implement!
+ return "";
+ }
+
+ // No Sparc V7 for now, the backend doesn't support it anyway.
+ enum CPUKind {
+ CK_GENERIC,
+ CK_V8,
+ CK_SUPERSPARC,
+ CK_SPARCLITE,
+ CK_F934,
+ CK_HYPERSPARC,
+ CK_SPARCLITE86X,
+ CK_SPARCLET,
+ CK_TSC701,
+ CK_V9,
+ CK_ULTRASPARC,
+ CK_ULTRASPARC3,
+ CK_NIAGARA,
+ CK_NIAGARA2,
+ CK_NIAGARA3,
+ CK_NIAGARA4
+ } CPU = CK_GENERIC;
+
+ enum CPUGeneration {
+ CG_V8,
+ CG_V9,
+ };
+
+ CPUGeneration getCPUGeneration(CPUKind Kind) const {
+ switch (Kind) {
+ case CK_GENERIC:
+ case CK_V8:
+ case CK_SUPERSPARC:
+ case CK_SPARCLITE:
+ case CK_F934:
+ case CK_HYPERSPARC:
+ case CK_SPARCLITE86X:
+ case CK_SPARCLET:
+ case CK_TSC701:
+ return CG_V8;
+ case CK_V9:
+ case CK_ULTRASPARC:
+ case CK_ULTRASPARC3:
+ case CK_NIAGARA:
+ case CK_NIAGARA2:
+ case CK_NIAGARA3:
+ case CK_NIAGARA4:
+ return CG_V9;
+ }
+ llvm_unreachable("Unexpected CPU kind");
+ }
+
+ CPUKind getCPUKind(StringRef Name) const {
+ return llvm::StringSwitch<CPUKind>(Name)
+ .Case("v8", CK_V8)
+ .Case("supersparc", CK_SUPERSPARC)
+ .Case("sparclite", CK_SPARCLITE)
+ .Case("f934", CK_F934)
+ .Case("hypersparc", CK_HYPERSPARC)
+ .Case("sparclite86x", CK_SPARCLITE86X)
+ .Case("sparclet", CK_SPARCLET)
+ .Case("tsc701", CK_TSC701)
+ .Case("v9", CK_V9)
+ .Case("ultrasparc", CK_ULTRASPARC)
+ .Case("ultrasparc3", CK_ULTRASPARC3)
+ .Case("niagara", CK_NIAGARA)
+ .Case("niagara2", CK_NIAGARA2)
+ .Case("niagara3", CK_NIAGARA3)
+ .Case("niagara4", CK_NIAGARA4)
+ .Default(CK_GENERIC);
+ }
+
+ bool setCPU(const std::string &Name) override {
+ CPU = getCPUKind(Name);
+ return CPU != CK_GENERIC;
+ }
+};
+
+const char * const SparcTargetInfo::GCCRegNames[] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
+};
+
+ArrayRef<const char *> SparcTargetInfo::getGCCRegNames() const {
+ return llvm::makeArrayRef(GCCRegNames);
+}
+
+const TargetInfo::GCCRegAlias SparcTargetInfo::GCCRegAliases[] = {
+ { { "g0" }, "r0" },
+ { { "g1" }, "r1" },
+ { { "g2" }, "r2" },
+ { { "g3" }, "r3" },
+ { { "g4" }, "r4" },
+ { { "g5" }, "r5" },
+ { { "g6" }, "r6" },
+ { { "g7" }, "r7" },
+ { { "o0" }, "r8" },
+ { { "o1" }, "r9" },
+ { { "o2" }, "r10" },
+ { { "o3" }, "r11" },
+ { { "o4" }, "r12" },
+ { { "o5" }, "r13" },
+ { { "o6", "sp" }, "r14" },
+ { { "o7" }, "r15" },
+ { { "l0" }, "r16" },
+ { { "l1" }, "r17" },
+ { { "l2" }, "r18" },
+ { { "l3" }, "r19" },
+ { { "l4" }, "r20" },
+ { { "l5" }, "r21" },
+ { { "l6" }, "r22" },
+ { { "l7" }, "r23" },
+ { { "i0" }, "r24" },
+ { { "i1" }, "r25" },
+ { { "i2" }, "r26" },
+ { { "i3" }, "r27" },
+ { { "i4" }, "r28" },
+ { { "i5" }, "r29" },
+ { { "i6", "fp" }, "r30" },
+ { { "i7" }, "r31" },
+};
+
+ArrayRef<TargetInfo::GCCRegAlias> SparcTargetInfo::getGCCRegAliases() const {
+ return llvm::makeArrayRef(GCCRegAliases);
+}
+
+// SPARC v8 is the 32-bit mode selected by Triple::sparc.
+class SparcV8TargetInfo : public SparcTargetInfo {
+public:
+ SparcV8TargetInfo(const llvm::Triple &Triple) : SparcTargetInfo(Triple) {
+ DataLayoutString = "E-m:e-p:32:32-i64:64-f128:64-n32-S64";
+ // NetBSD / OpenBSD use long (same as llvm default); everyone else uses int.
+ switch (getTriple().getOS()) {
+ default:
+ SizeType = UnsignedInt;
+ IntPtrType = SignedInt;
+ PtrDiffType = SignedInt;
+ break;
+ case llvm::Triple::NetBSD:
+ case llvm::Triple::OpenBSD:
+ SizeType = UnsignedLong;
+ IntPtrType = SignedLong;
+ PtrDiffType = SignedLong;
+ break;
+ }
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ SparcTargetInfo::getTargetDefines(Opts, Builder);
+ switch (getCPUGeneration(CPU)) {
+ case CG_V8:
+ Builder.defineMacro("__sparcv8");
+ if (getTriple().getOS() != llvm::Triple::Solaris)
+ Builder.defineMacro("__sparcv8__");
+ break;
+ case CG_V9:
+ Builder.defineMacro("__sparcv9");
+ if (getTriple().getOS() != llvm::Triple::Solaris) {
+ Builder.defineMacro("__sparcv9__");
+ Builder.defineMacro("__sparc_v9__");
+ }
+ break;
+ }
+ }
+};
+
+// SPARCV8el is the 32-bit little-endian mode selected by Triple::sparcel.
+class SparcV8elTargetInfo : public SparcV8TargetInfo {
+ public:
+ SparcV8elTargetInfo(const llvm::Triple &Triple) : SparcV8TargetInfo(Triple) {
+ DataLayoutString = "e-m:e-p:32:32-i64:64-f128:64-n32-S64";
+ BigEndian = false;
+ }
+};
+
+// SPARC v9 is the 64-bit mode selected by Triple::sparcv9.
+class SparcV9TargetInfo : public SparcTargetInfo {
+public:
+ SparcV9TargetInfo(const llvm::Triple &Triple) : SparcTargetInfo(Triple) {
+ // FIXME: Support Sparc quad-precision long double?
+ DataLayoutString = "E-m:e-i64:64-n32:64-S128";
+ // This is an LP64 platform.
+ LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
+
+ // OpenBSD uses long long for int64_t and intmax_t.
+ if (getTriple().getOS() == llvm::Triple::OpenBSD)
+ IntMaxType = SignedLongLong;
+ else
+ IntMaxType = SignedLong;
+ Int64Type = IntMaxType;
+
+ // The SPARCv8 System V ABI has long double 128-bits in size, but 64-bit
+ // aligned. The SPARCv9 SCD 2.4.1 says 16-byte aligned.
+ LongDoubleWidth = 128;
+ LongDoubleAlign = 128;
+ LongDoubleFormat = &llvm::APFloat::IEEEquad;
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ SparcTargetInfo::getTargetDefines(Opts, Builder);
+ Builder.defineMacro("__sparcv9");
+ Builder.defineMacro("__arch64__");
+ // Solaris doesn't need these variants, but the BSDs do.
+ if (getTriple().getOS() != llvm::Triple::Solaris) {
+ Builder.defineMacro("__sparc64__");
+ Builder.defineMacro("__sparc_v9__");
+ Builder.defineMacro("__sparcv9__");
+ }
+ }
+
+ bool setCPU(const std::string &Name) override {
+ if (!SparcTargetInfo::setCPU(Name))
+ return false;
+ return getCPUGeneration(CPU) == CG_V9;
+ }
+};
+
+class SystemZTargetInfo : public TargetInfo {
+ static const Builtin::Info BuiltinInfo[];
+ static const char *const GCCRegNames[];
+ std::string CPU;
+ bool HasTransactionalExecution;
+ bool HasVector;
+
+public:
+ SystemZTargetInfo(const llvm::Triple &Triple)
+ : TargetInfo(Triple), CPU("z10"), HasTransactionalExecution(false),
+ HasVector(false) {
+ IntMaxType = SignedLong;
+ Int64Type = SignedLong;
+ TLSSupported = true;
+ IntWidth = IntAlign = 32;
+ LongWidth = LongLongWidth = LongAlign = LongLongAlign = 64;
+ PointerWidth = PointerAlign = 64;
+ LongDoubleWidth = 128;
+ LongDoubleAlign = 64;
+ LongDoubleFormat = &llvm::APFloat::IEEEquad;
+ DefaultAlignForAttributeAligned = 64;
+ MinGlobalAlign = 16;
+ DataLayoutString = "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64";
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ Builder.defineMacro("__s390__");
+ Builder.defineMacro("__s390x__");
+ Builder.defineMacro("__zarch__");
+ Builder.defineMacro("__LONG_DOUBLE_128__");
+ if (HasTransactionalExecution)
+ Builder.defineMacro("__HTM__");
+ if (Opts.ZVector)
+ Builder.defineMacro("__VEC__", "10301");
+ }
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return llvm::makeArrayRef(BuiltinInfo,
+ clang::SystemZ::LastTSBuiltin-Builtin::FirstTSBuiltin);
+ }
+
+ ArrayRef<const char *> getGCCRegNames() const override;
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
+ // No aliases.
+ return None;
+ }
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &info) const override;
+ const char *getClobbers() const override {
+ // FIXME: Is this really right?
+ return "";
+ }
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::SystemZBuiltinVaList;
+ }
+ bool setCPU(const std::string &Name) override {
+ CPU = Name;
+ bool CPUKnown = llvm::StringSwitch<bool>(Name)
+ .Case("z10", true)
+ .Case("z196", true)
+ .Case("zEC12", true)
+ .Case("z13", true)
+ .Default(false);
+
+ return CPUKnown;
+ }
+ bool
+ initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
+ StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const override {
+ if (CPU == "zEC12")
+ Features["transactional-execution"] = true;
+ if (CPU == "z13") {
+ Features["transactional-execution"] = true;
+ Features["vector"] = true;
+ }
+ return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
+ }
+
+ bool handleTargetFeatures(std::vector<std::string> &Features,
+ DiagnosticsEngine &Diags) override {
+ HasTransactionalExecution = false;
+ for (const auto &Feature : Features) {
+ if (Feature == "+transactional-execution")
+ HasTransactionalExecution = true;
+ else if (Feature == "+vector")
+ HasVector = true;
+ }
+ // If we use the vector ABI, vector types are 64-bit aligned.
+ if (HasVector) {
+ MaxVectorAlign = 64;
+ DataLayoutString = "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64"
+ "-v128:64-a:8:16-n32:64";
+ }
+ return true;
+ }
+
+ bool hasFeature(StringRef Feature) const override {
+ return llvm::StringSwitch<bool>(Feature)
+ .Case("systemz", true)
+ .Case("htm", HasTransactionalExecution)
+ .Case("vx", HasVector)
+ .Default(false);
+ }
+
+ StringRef getABI() const override {
+ if (HasVector)
+ return "vector";
+ return "";
+ }
+
+ bool useFloat128ManglingForLongDouble() const override {
+ return true;
+ }
+};
+
+const Builtin::Info SystemZTargetInfo::BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
+#include "clang/Basic/BuiltinsSystemZ.def"
+};
+
+const char *const SystemZTargetInfo::GCCRegNames[] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "f0", "f2", "f4", "f6", "f1", "f3", "f5", "f7",
+ "f8", "f10", "f12", "f14", "f9", "f11", "f13", "f15"
+};
+
+ArrayRef<const char *> SystemZTargetInfo::getGCCRegNames() const {
+ return llvm::makeArrayRef(GCCRegNames);
+}
+
+bool SystemZTargetInfo::
+validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const {
+ switch (*Name) {
+ default:
+ return false;
+
+ case 'a': // Address register
+ case 'd': // Data register (equivalent to 'r')
+ case 'f': // Floating-point register
+ Info.setAllowsRegister();
+ return true;
+
+ case 'I': // Unsigned 8-bit constant
+ case 'J': // Unsigned 12-bit constant
+ case 'K': // Signed 16-bit constant
+ case 'L': // Signed 20-bit displacement (on all targets we support)
+ case 'M': // 0x7fffffff
+ return true;
+
+ case 'Q': // Memory with base and unsigned 12-bit displacement
+ case 'R': // Likewise, plus an index
+ case 'S': // Memory with base and signed 20-bit displacement
+ case 'T': // Likewise, plus an index
+ Info.setAllowsMemory();
+ return true;
+ }
+}
+
+class MSP430TargetInfo : public TargetInfo {
+ static const char *const GCCRegNames[];
+
+public:
+ MSP430TargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) {
+ BigEndian = false;
+ TLSSupported = false;
+ IntWidth = 16;
+ IntAlign = 16;
+ LongWidth = 32;
+ LongLongWidth = 64;
+ LongAlign = LongLongAlign = 16;
+ PointerWidth = 16;
+ PointerAlign = 16;
+ SuitableAlign = 16;
+ SizeType = UnsignedInt;
+ IntMaxType = SignedLongLong;
+ IntPtrType = SignedInt;
+ PtrDiffType = SignedInt;
+ SigAtomicType = SignedLong;
+ DataLayoutString = "e-m:e-p:16:16-i32:16:32-a:16-n8:16";
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ Builder.defineMacro("MSP430");
+ Builder.defineMacro("__MSP430__");
+ // FIXME: defines for different 'flavours' of MCU
+ }
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ // FIXME: Implement.
+ return None;
+ }
+ bool hasFeature(StringRef Feature) const override {
+ return Feature == "msp430";
+ }
+ ArrayRef<const char *> getGCCRegNames() const override;
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
+ // No aliases.
+ return None;
+ }
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &info) const override {
+ // FIXME: implement
+ switch (*Name) {
+ case 'K': // the constant 1
+ case 'L': // constant -1^20 .. 1^19
+ case 'M': // constant 1-4:
+ return true;
+ }
+ // No target constraints for now.
+ return false;
+ }
+ const char *getClobbers() const override {
+ // FIXME: Is this really right?
+ return "";
+ }
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ // FIXME: implement
+ return TargetInfo::CharPtrBuiltinVaList;
+ }
+};
+
+const char *const MSP430TargetInfo::GCCRegNames[] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"};
+
+ArrayRef<const char *> MSP430TargetInfo::getGCCRegNames() const {
+ return llvm::makeArrayRef(GCCRegNames);
+}
+
+// LLVM and Clang cannot be used directly to output native binaries for
+// target, but is used to compile C code to llvm bitcode with correct
+// type and alignment information.
+//
+// TCE uses the llvm bitcode as input and uses it for generating customized
+// target processor and program binary. TCE co-design environment is
+// publicly available in http://tce.cs.tut.fi
+
+static const unsigned TCEOpenCLAddrSpaceMap[] = {
+ 3, // opencl_global
+ 4, // opencl_local
+ 5, // opencl_constant
+ // FIXME: generic has to be added to the target
+ 0, // opencl_generic
+ 0, // cuda_device
+ 0, // cuda_constant
+ 0 // cuda_shared
+};
+
+class TCETargetInfo : public TargetInfo {
+public:
+ TCETargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) {
+ TLSSupported = false;
+ IntWidth = 32;
+ LongWidth = LongLongWidth = 32;
+ PointerWidth = 32;
+ IntAlign = 32;
+ LongAlign = LongLongAlign = 32;
+ PointerAlign = 32;
+ SuitableAlign = 32;
+ SizeType = UnsignedInt;
+ IntMaxType = SignedLong;
+ IntPtrType = SignedInt;
+ PtrDiffType = SignedInt;
+ FloatWidth = 32;
+ FloatAlign = 32;
+ DoubleWidth = 32;
+ DoubleAlign = 32;
+ LongDoubleWidth = 32;
+ LongDoubleAlign = 32;
+ FloatFormat = &llvm::APFloat::IEEEsingle;
+ DoubleFormat = &llvm::APFloat::IEEEsingle;
+ LongDoubleFormat = &llvm::APFloat::IEEEsingle;
+ DataLayoutString = "E-p:32:32-i8:8:32-i16:16:32-i64:32"
+ "-f64:32-v64:32-v128:32-a:0:32-n32";
+ AddrSpaceMap = &TCEOpenCLAddrSpaceMap;
+ UseAddrSpaceMapMangling = true;
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ DefineStd(Builder, "tce", Opts);
+ Builder.defineMacro("__TCE__");
+ Builder.defineMacro("__TCE_V1__");
+ }
+ bool hasFeature(StringRef Feature) const override { return Feature == "tce"; }
+
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override { return None; }
+ const char *getClobbers() const override { return ""; }
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::VoidPtrBuiltinVaList;
+ }
+ ArrayRef<const char *> getGCCRegNames() const override { return None; }
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &info) const override {
+ return true;
+ }
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
+ return None;
+ }
+};
+
+class BPFTargetInfo : public TargetInfo {
+public:
+ BPFTargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) {
+ LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
+ SizeType = UnsignedLong;
+ PtrDiffType = SignedLong;
+ IntPtrType = SignedLong;
+ IntMaxType = SignedLong;
+ Int64Type = SignedLong;
+ RegParmMax = 5;
+ if (Triple.getArch() == llvm::Triple::bpfeb) {
+ BigEndian = true;
+ DataLayoutString = "E-m:e-p:64:64-i64:64-n32:64-S128";
+ } else {
+ BigEndian = false;
+ DataLayoutString = "e-m:e-p:64:64-i64:64-n32:64-S128";
+ }
+ MaxAtomicPromoteWidth = 64;
+ MaxAtomicInlineWidth = 64;
+ TLSSupported = false;
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ DefineStd(Builder, "bpf", Opts);
+ Builder.defineMacro("__BPF__");
+ }
+ bool hasFeature(StringRef Feature) const override {
+ return Feature == "bpf";
+ }
+
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override { return None; }
+ const char *getClobbers() const override {
+ return "";
+ }
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::VoidPtrBuiltinVaList;
+ }
+ ArrayRef<const char *> getGCCRegNames() const override {
+ return None;
+ }
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &info) const override {
+ return true;
+ }
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
+ return None;
+ }
+};
+
+class MipsTargetInfoBase : public TargetInfo {
+ virtual void setDataLayoutString() = 0;
+
+ static const Builtin::Info BuiltinInfo[];
+ std::string CPU;
+ bool IsMips16;
+ bool IsMicromips;
+ bool IsNan2008;
+ bool IsSingleFloat;
+ enum MipsFloatABI {
+ HardFloat, SoftFloat
+ } FloatABI;
+ enum DspRevEnum {
+ NoDSP, DSP1, DSP2
+ } DspRev;
+ bool HasMSA;
+
+protected:
+ bool HasFP64;
+ std::string ABI;
+
+public:
+ MipsTargetInfoBase(const llvm::Triple &Triple, const std::string &ABIStr,
+ const std::string &CPUStr)
+ : TargetInfo(Triple), CPU(CPUStr), IsMips16(false), IsMicromips(false),
+ IsNan2008(false), IsSingleFloat(false), FloatABI(HardFloat),
+ DspRev(NoDSP), HasMSA(false), HasFP64(false), ABI(ABIStr) {
+ TheCXXABI.set(TargetCXXABI::GenericMIPS);
+ }
+
+ bool isNaN2008Default() const {
+ return CPU == "mips32r6" || CPU == "mips64r6";
+ }
+
+ bool isFP64Default() const {
+ return CPU == "mips32r6" || ABI == "n32" || ABI == "n64" || ABI == "64";
+ }
+
+ bool isNan2008() const override {
+ return IsNan2008;
+ }
+
+ StringRef getABI() const override { return ABI; }
+ bool setCPU(const std::string &Name) override {
+ bool IsMips32 = getTriple().getArch() == llvm::Triple::mips ||
+ getTriple().getArch() == llvm::Triple::mipsel;
+ CPU = Name;
+ return llvm::StringSwitch<bool>(Name)
+ .Case("mips1", IsMips32)
+ .Case("mips2", IsMips32)
+ .Case("mips3", true)
+ .Case("mips4", true)
+ .Case("mips5", true)
+ .Case("mips32", IsMips32)
+ .Case("mips32r2", IsMips32)
+ .Case("mips32r3", IsMips32)
+ .Case("mips32r5", IsMips32)
+ .Case("mips32r6", IsMips32)
+ .Case("mips64", true)
+ .Case("mips64r2", true)
+ .Case("mips64r3", true)
+ .Case("mips64r5", true)
+ .Case("mips64r6", true)
+ .Case("octeon", true)
+ .Case("p5600", true)
+ .Default(false);
+ }
+ const std::string& getCPU() const { return CPU; }
+ bool
+ initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
+ StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const override {
+ if (CPU == "octeon")
+ Features["mips64r2"] = Features["cnmips"] = true;
+ else
+ Features[CPU] = true;
+ return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ Builder.defineMacro("__mips__");
+ Builder.defineMacro("_mips");
+ if (Opts.GNUMode)
+ Builder.defineMacro("mips");
+
+ Builder.defineMacro("__REGISTER_PREFIX__", "");
+
+ switch (FloatABI) {
+ case HardFloat:
+ Builder.defineMacro("__mips_hard_float", Twine(1));
+ break;
+ case SoftFloat:
+ Builder.defineMacro("__mips_soft_float", Twine(1));
+ break;
+ }
+
+ if (IsSingleFloat)
+ Builder.defineMacro("__mips_single_float", Twine(1));
+
+ Builder.defineMacro("__mips_fpr", HasFP64 ? Twine(64) : Twine(32));
+ Builder.defineMacro("_MIPS_FPSET",
+ Twine(32 / (HasFP64 || IsSingleFloat ? 1 : 2)));
+
+ if (IsMips16)
+ Builder.defineMacro("__mips16", Twine(1));
+
+ if (IsMicromips)
+ Builder.defineMacro("__mips_micromips", Twine(1));
+
+ if (IsNan2008)
+ Builder.defineMacro("__mips_nan2008", Twine(1));
+
+ switch (DspRev) {
+ default:
+ break;
+ case DSP1:
+ Builder.defineMacro("__mips_dsp_rev", Twine(1));
+ Builder.defineMacro("__mips_dsp", Twine(1));
+ break;
+ case DSP2:
+ Builder.defineMacro("__mips_dsp_rev", Twine(2));
+ Builder.defineMacro("__mips_dspr2", Twine(1));
+ Builder.defineMacro("__mips_dsp", Twine(1));
+ break;
+ }
+
+ if (HasMSA)
+ Builder.defineMacro("__mips_msa", Twine(1));
+
+ Builder.defineMacro("_MIPS_SZPTR", Twine(getPointerWidth(0)));
+ Builder.defineMacro("_MIPS_SZINT", Twine(getIntWidth()));
+ Builder.defineMacro("_MIPS_SZLONG", Twine(getLongWidth()));
+
+ Builder.defineMacro("_MIPS_ARCH", "\"" + CPU + "\"");
+ Builder.defineMacro("_MIPS_ARCH_" + StringRef(CPU).upper());
+
+ // These shouldn't be defined for MIPS-I but there's no need to check
+ // for that since MIPS-I isn't supported.
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
+ }
+
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return llvm::makeArrayRef(BuiltinInfo,
+ clang::Mips::LastTSBuiltin - Builtin::FirstTSBuiltin);
+ }
+ bool hasFeature(StringRef Feature) const override {
+ return llvm::StringSwitch<bool>(Feature)
+ .Case("mips", true)
+ .Case("fp64", HasFP64)
+ .Default(false);
+ }
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::VoidPtrBuiltinVaList;
+ }
+ ArrayRef<const char *> getGCCRegNames() const override {
+ static const char *const GCCRegNames[] = {
+ // CPU register names
+ // Must match second column of GCCRegAliases
+ "$0", "$1", "$2", "$3", "$4", "$5", "$6", "$7",
+ "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15",
+ "$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23",
+ "$24", "$25", "$26", "$27", "$28", "$29", "$30", "$31",
+ // Floating point register names
+ "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7",
+ "$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15",
+ "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23",
+ "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31",
+ // Hi/lo and condition register names
+ "hi", "lo", "", "$fcc0","$fcc1","$fcc2","$fcc3","$fcc4",
+ "$fcc5","$fcc6","$fcc7",
+ // MSA register names
+ "$w0", "$w1", "$w2", "$w3", "$w4", "$w5", "$w6", "$w7",
+ "$w8", "$w9", "$w10", "$w11", "$w12", "$w13", "$w14", "$w15",
+ "$w16", "$w17", "$w18", "$w19", "$w20", "$w21", "$w22", "$w23",
+ "$w24", "$w25", "$w26", "$w27", "$w28", "$w29", "$w30", "$w31",
+ // MSA control register names
+ "$msair", "$msacsr", "$msaaccess", "$msasave", "$msamodify",
+ "$msarequest", "$msamap", "$msaunmap"
+ };
+ return llvm::makeArrayRef(GCCRegNames);
+ }
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override = 0;
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const override {
+ switch (*Name) {
+ default:
+ return false;
+ case 'r': // CPU registers.
+ case 'd': // Equivalent to "r" unless generating MIPS16 code.
+ case 'y': // Equivalent to "r", backward compatibility only.
+ case 'f': // floating-point registers.
+ case 'c': // $25 for indirect jumps
+ case 'l': // lo register
+ case 'x': // hilo register pair
+ Info.setAllowsRegister();
+ return true;
+ case 'I': // Signed 16-bit constant
+ case 'J': // Integer 0
+ case 'K': // Unsigned 16-bit constant
+ case 'L': // Signed 32-bit constant, lower 16-bit zeros (for lui)
+ case 'M': // Constants not loadable via lui, addiu, or ori
+ case 'N': // Constant -1 to -65535
+ case 'O': // A signed 15-bit constant
+ case 'P': // A constant between 1 go 65535
+ return true;
+ case 'R': // An address that can be used in a non-macro load or store
+ Info.setAllowsMemory();
+ return true;
+ case 'Z':
+ if (Name[1] == 'C') { // An address usable by ll, and sc.
+ Info.setAllowsMemory();
+ Name++; // Skip over 'Z'.
+ return true;
+ }
+ return false;
+ }
+ }
+
+ std::string convertConstraint(const char *&Constraint) const override {
+ std::string R;
+ switch (*Constraint) {
+ case 'Z': // Two-character constraint; add "^" hint for later parsing.
+ if (Constraint[1] == 'C') {
+ R = std::string("^") + std::string(Constraint, 2);
+ Constraint++;
+ return R;
+ }
+ break;
+ }
+ return TargetInfo::convertConstraint(Constraint);
+ }
+
+ const char *getClobbers() const override {
+ // In GCC, $1 is not widely used in generated code (it's used only in a few
+ // specific situations), so there is no real need for users to add it to
+ // the clobbers list if they want to use it in their inline assembly code.
+ //
+ // In LLVM, $1 is treated as a normal GPR and is always allocatable during
+ // code generation, so using it in inline assembly without adding it to the
+ // clobbers list can cause conflicts between the inline assembly code and
+ // the surrounding generated code.
+ //
+ // Another problem is that LLVM is allowed to choose $1 for inline assembly
+ // operands, which will conflict with the ".set at" assembler option (which
+ // we use only for inline assembly, in order to maintain compatibility with
+ // GCC) and will also conflict with the user's usage of $1.
+ //
+ // The easiest way to avoid these conflicts and keep $1 as an allocatable
+ // register for generated code is to automatically clobber $1 for all inline
+ // assembly code.
+ //
+ // FIXME: We should automatically clobber $1 only for inline assembly code
+ // which actually uses it. This would allow LLVM to use $1 for inline
+ // assembly operands if the user's assembly code doesn't use it.
+ return "~{$1}";
+ }
+
+ bool handleTargetFeatures(std::vector<std::string> &Features,
+ DiagnosticsEngine &Diags) override {
+ IsMips16 = false;
+ IsMicromips = false;
+ IsNan2008 = isNaN2008Default();
+ IsSingleFloat = false;
+ FloatABI = HardFloat;
+ DspRev = NoDSP;
+ HasFP64 = isFP64Default();
+
+ for (const auto &Feature : Features) {
+ if (Feature == "+single-float")
+ IsSingleFloat = true;
+ else if (Feature == "+soft-float")
+ FloatABI = SoftFloat;
+ else if (Feature == "+mips16")
+ IsMips16 = true;
+ else if (Feature == "+micromips")
+ IsMicromips = true;
+ else if (Feature == "+dsp")
+ DspRev = std::max(DspRev, DSP1);
+ else if (Feature == "+dspr2")
+ DspRev = std::max(DspRev, DSP2);
+ else if (Feature == "+msa")
+ HasMSA = true;
+ else if (Feature == "+fp64")
+ HasFP64 = true;
+ else if (Feature == "-fp64")
+ HasFP64 = false;
+ else if (Feature == "+nan2008")
+ IsNan2008 = true;
+ else if (Feature == "-nan2008")
+ IsNan2008 = false;
+ }
+
+ setDataLayoutString();
+
+ return true;
+ }
+
+ int getEHDataRegisterNumber(unsigned RegNo) const override {
+ if (RegNo == 0) return 4;
+ if (RegNo == 1) return 5;
+ return -1;
+ }
+
+ bool isCLZForZeroUndef() const override { return false; }
+};
+
+const Builtin::Info MipsTargetInfoBase::BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
+ { #ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr },
+#include "clang/Basic/BuiltinsMips.def"
+};
+
+class Mips32TargetInfoBase : public MipsTargetInfoBase {
+public:
+ Mips32TargetInfoBase(const llvm::Triple &Triple)
+ : MipsTargetInfoBase(Triple, "o32", "mips32r2") {
+ SizeType = UnsignedInt;
+ PtrDiffType = SignedInt;
+ Int64Type = SignedLongLong;
+ IntMaxType = Int64Type;
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 32;
+ }
+ bool setABI(const std::string &Name) override {
+ if (Name == "o32" || Name == "eabi") {
+ ABI = Name;
+ return true;
+ }
+ return false;
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ MipsTargetInfoBase::getTargetDefines(Opts, Builder);
+
+ Builder.defineMacro("__mips", "32");
+ Builder.defineMacro("_MIPS_ISA", "_MIPS_ISA_MIPS32");
+
+ const std::string& CPUStr = getCPU();
+ if (CPUStr == "mips32")
+ Builder.defineMacro("__mips_isa_rev", "1");
+ else if (CPUStr == "mips32r2")
+ Builder.defineMacro("__mips_isa_rev", "2");
+ else if (CPUStr == "mips32r3")
+ Builder.defineMacro("__mips_isa_rev", "3");
+ else if (CPUStr == "mips32r5")
+ Builder.defineMacro("__mips_isa_rev", "5");
+ else if (CPUStr == "mips32r6")
+ Builder.defineMacro("__mips_isa_rev", "6");
+
+ if (ABI == "o32") {
+ Builder.defineMacro("__mips_o32");
+ Builder.defineMacro("_ABIO32", "1");
+ Builder.defineMacro("_MIPS_SIM", "_ABIO32");
+ }
+ else if (ABI == "eabi")
+ Builder.defineMacro("__mips_eabi");
+ else
+ llvm_unreachable("Invalid ABI for Mips32.");
+ }
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
+ static const TargetInfo::GCCRegAlias GCCRegAliases[] = {
+ { { "at" }, "$1" },
+ { { "v0" }, "$2" },
+ { { "v1" }, "$3" },
+ { { "a0" }, "$4" },
+ { { "a1" }, "$5" },
+ { { "a2" }, "$6" },
+ { { "a3" }, "$7" },
+ { { "t0" }, "$8" },
+ { { "t1" }, "$9" },
+ { { "t2" }, "$10" },
+ { { "t3" }, "$11" },
+ { { "t4" }, "$12" },
+ { { "t5" }, "$13" },
+ { { "t6" }, "$14" },
+ { { "t7" }, "$15" },
+ { { "s0" }, "$16" },
+ { { "s1" }, "$17" },
+ { { "s2" }, "$18" },
+ { { "s3" }, "$19" },
+ { { "s4" }, "$20" },
+ { { "s5" }, "$21" },
+ { { "s6" }, "$22" },
+ { { "s7" }, "$23" },
+ { { "t8" }, "$24" },
+ { { "t9" }, "$25" },
+ { { "k0" }, "$26" },
+ { { "k1" }, "$27" },
+ { { "gp" }, "$28" },
+ { { "sp","$sp" }, "$29" },
+ { { "fp","$fp" }, "$30" },
+ { { "ra" }, "$31" }
+ };
+ return llvm::makeArrayRef(GCCRegAliases);
+ }
+};
+
+class Mips32EBTargetInfo : public Mips32TargetInfoBase {
+ void setDataLayoutString() override {
+ DataLayoutString = "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64";
+ }
+
+public:
+ Mips32EBTargetInfo(const llvm::Triple &Triple)
+ : Mips32TargetInfoBase(Triple) {
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ DefineStd(Builder, "MIPSEB", Opts);
+ Builder.defineMacro("_MIPSEB");
+ Mips32TargetInfoBase::getTargetDefines(Opts, Builder);
+ }
+};
+
+class Mips32ELTargetInfo : public Mips32TargetInfoBase {
+ void setDataLayoutString() override {
+ DataLayoutString = "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64";
+ }
+
+public:
+ Mips32ELTargetInfo(const llvm::Triple &Triple)
+ : Mips32TargetInfoBase(Triple) {
+ BigEndian = false;
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ DefineStd(Builder, "MIPSEL", Opts);
+ Builder.defineMacro("_MIPSEL");
+ Mips32TargetInfoBase::getTargetDefines(Opts, Builder);
+ }
+};
+
+class Mips64TargetInfoBase : public MipsTargetInfoBase {
+public:
+ Mips64TargetInfoBase(const llvm::Triple &Triple)
+ : MipsTargetInfoBase(Triple, "n64", "mips64r2") {
+ LongDoubleWidth = LongDoubleAlign = 128;
+ LongDoubleFormat = &llvm::APFloat::IEEEquad;
+ if (getTriple().getOS() == llvm::Triple::FreeBSD) {
+ LongDoubleWidth = LongDoubleAlign = 64;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+ }
+ setN64ABITypes();
+ SuitableAlign = 128;
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+ }
+
+ void setN64ABITypes() {
+ LongWidth = LongAlign = 64;
+ PointerWidth = PointerAlign = 64;
+ SizeType = UnsignedLong;
+ PtrDiffType = SignedLong;
+ Int64Type = SignedLong;
+ IntMaxType = Int64Type;
+ }
+
+ void setN32ABITypes() {
+ LongWidth = LongAlign = 32;
+ PointerWidth = PointerAlign = 32;
+ SizeType = UnsignedInt;
+ PtrDiffType = SignedInt;
+ Int64Type = SignedLongLong;
+ IntMaxType = Int64Type;
+ }
+
+ bool setABI(const std::string &Name) override {
+ if (Name == "n32") {
+ setN32ABITypes();
+ ABI = Name;
+ return true;
+ }
+ if (Name == "n64") {
+ setN64ABITypes();
+ ABI = Name;
+ return true;
+ }
+ return false;
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ MipsTargetInfoBase::getTargetDefines(Opts, Builder);
+
+ Builder.defineMacro("__mips", "64");
+ Builder.defineMacro("__mips64");
+ Builder.defineMacro("__mips64__");
+ Builder.defineMacro("_MIPS_ISA", "_MIPS_ISA_MIPS64");
+
+ const std::string& CPUStr = getCPU();
+ if (CPUStr == "mips64")
+ Builder.defineMacro("__mips_isa_rev", "1");
+ else if (CPUStr == "mips64r2")
+ Builder.defineMacro("__mips_isa_rev", "2");
+ else if (CPUStr == "mips64r3")
+ Builder.defineMacro("__mips_isa_rev", "3");
+ else if (CPUStr == "mips64r5")
+ Builder.defineMacro("__mips_isa_rev", "5");
+ else if (CPUStr == "mips64r6")
+ Builder.defineMacro("__mips_isa_rev", "6");
+
+ if (ABI == "n32") {
+ Builder.defineMacro("__mips_n32");
+ Builder.defineMacro("_ABIN32", "2");
+ Builder.defineMacro("_MIPS_SIM", "_ABIN32");
+ }
+ else if (ABI == "n64") {
+ Builder.defineMacro("__mips_n64");
+ Builder.defineMacro("_ABI64", "3");
+ Builder.defineMacro("_MIPS_SIM", "_ABI64");
+ }
+ else
+ llvm_unreachable("Invalid ABI for Mips64.");
+
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
+ }
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
+ static const TargetInfo::GCCRegAlias GCCRegAliases[] = {
+ { { "at" }, "$1" },
+ { { "v0" }, "$2" },
+ { { "v1" }, "$3" },
+ { { "a0" }, "$4" },
+ { { "a1" }, "$5" },
+ { { "a2" }, "$6" },
+ { { "a3" }, "$7" },
+ { { "a4" }, "$8" },
+ { { "a5" }, "$9" },
+ { { "a6" }, "$10" },
+ { { "a7" }, "$11" },
+ { { "t0" }, "$12" },
+ { { "t1" }, "$13" },
+ { { "t2" }, "$14" },
+ { { "t3" }, "$15" },
+ { { "s0" }, "$16" },
+ { { "s1" }, "$17" },
+ { { "s2" }, "$18" },
+ { { "s3" }, "$19" },
+ { { "s4" }, "$20" },
+ { { "s5" }, "$21" },
+ { { "s6" }, "$22" },
+ { { "s7" }, "$23" },
+ { { "t8" }, "$24" },
+ { { "t9" }, "$25" },
+ { { "k0" }, "$26" },
+ { { "k1" }, "$27" },
+ { { "gp" }, "$28" },
+ { { "sp","$sp" }, "$29" },
+ { { "fp","$fp" }, "$30" },
+ { { "ra" }, "$31" }
+ };
+ return llvm::makeArrayRef(GCCRegAliases);
+ }
+
+ bool hasInt128Type() const override { return true; }
+};
+
+class Mips64EBTargetInfo : public Mips64TargetInfoBase {
+ void setDataLayoutString() override {
+ if (ABI == "n32")
+ DataLayoutString = "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32:64-S128";
+ else
+ DataLayoutString = "E-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128";
+
+ }
+
+public:
+ Mips64EBTargetInfo(const llvm::Triple &Triple)
+ : Mips64TargetInfoBase(Triple) {}
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ DefineStd(Builder, "MIPSEB", Opts);
+ Builder.defineMacro("_MIPSEB");
+ Mips64TargetInfoBase::getTargetDefines(Opts, Builder);
+ }
+};
+
+class Mips64ELTargetInfo : public Mips64TargetInfoBase {
+ void setDataLayoutString() override {
+ if (ABI == "n32")
+ DataLayoutString = "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32:64-S128";
+ else
+ DataLayoutString = "e-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128";
+ }
+public:
+ Mips64ELTargetInfo(const llvm::Triple &Triple)
+ : Mips64TargetInfoBase(Triple) {
+ // Default ABI is n64.
+ BigEndian = false;
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ DefineStd(Builder, "MIPSEL", Opts);
+ Builder.defineMacro("_MIPSEL");
+ Mips64TargetInfoBase::getTargetDefines(Opts, Builder);
+ }
+};
+
+class PNaClTargetInfo : public TargetInfo {
+public:
+ PNaClTargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) {
+ BigEndian = false;
+ this->UserLabelPrefix = "";
+ this->LongAlign = 32;
+ this->LongWidth = 32;
+ this->PointerAlign = 32;
+ this->PointerWidth = 32;
+ this->IntMaxType = TargetInfo::SignedLongLong;
+ this->Int64Type = TargetInfo::SignedLongLong;
+ this->DoubleAlign = 64;
+ this->LongDoubleWidth = 64;
+ this->LongDoubleAlign = 64;
+ this->SizeType = TargetInfo::UnsignedInt;
+ this->PtrDiffType = TargetInfo::SignedInt;
+ this->IntPtrType = TargetInfo::SignedInt;
+ this->RegParmMax = 0; // Disallow regparm
+ }
+
+ void getArchDefines(const LangOptions &Opts, MacroBuilder &Builder) const {
+ Builder.defineMacro("__le32__");
+ Builder.defineMacro("__pnacl__");
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ getArchDefines(Opts, Builder);
+ }
+ bool hasFeature(StringRef Feature) const override {
+ return Feature == "pnacl";
+ }
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override { return None; }
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::PNaClABIBuiltinVaList;
+ }
+ ArrayRef<const char *> getGCCRegNames() const override;
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override;
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const override {
+ return false;
+ }
+
+ const char *getClobbers() const override {
+ return "";
+ }
+};
+
+ArrayRef<const char *> PNaClTargetInfo::getGCCRegNames() const {
+ return None;
+}
+
+ArrayRef<TargetInfo::GCCRegAlias> PNaClTargetInfo::getGCCRegAliases() const {
+ return None;
+}
+
+// We attempt to use PNaCl (le32) frontend and Mips32EL backend.
+class NaClMips32ELTargetInfo : public Mips32ELTargetInfo {
+public:
+ NaClMips32ELTargetInfo(const llvm::Triple &Triple) :
+ Mips32ELTargetInfo(Triple) {
+ }
+
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::PNaClABIBuiltinVaList;
+ }
+};
+
+class Le64TargetInfo : public TargetInfo {
+ static const Builtin::Info BuiltinInfo[];
+
+public:
+ Le64TargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) {
+ BigEndian = false;
+ NoAsmVariants = true;
+ LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+ DataLayoutString = "e-m:e-v128:32-v16:16-v32:32-v96:32-n8:16:32:64-S128";
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ DefineStd(Builder, "unix", Opts);
+ defineCPUMacros(Builder, "le64", /*Tuning=*/false);
+ Builder.defineMacro("__ELF__");
+ }
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return llvm::makeArrayRef(BuiltinInfo,
+ clang::Le64::LastTSBuiltin - Builtin::FirstTSBuiltin);
+ }
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::PNaClABIBuiltinVaList;
+ }
+ const char *getClobbers() const override { return ""; }
+ ArrayRef<const char *> getGCCRegNames() const override {
+ return None;
+ }
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
+ return None;
+ }
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const override {
+ return false;
+ }
+
+ bool hasProtectedVisibility() const override { return false; }
+};
+
+class WebAssemblyTargetInfo : public TargetInfo {
+ static const Builtin::Info BuiltinInfo[];
+
+ enum SIMDEnum {
+ NoSIMD,
+ SIMD128,
+ } SIMDLevel;
+
+public:
+ explicit WebAssemblyTargetInfo(const llvm::Triple &T)
+ : TargetInfo(T), SIMDLevel(NoSIMD) {
+ BigEndian = false;
+ NoAsmVariants = true;
+ SuitableAlign = 128;
+ LargeArrayMinWidth = 128;
+ LargeArrayAlign = 128;
+ SimdDefaultAlign = 128;
+ SigAtomicType = SignedLong;
+ LongDoubleWidth = LongDoubleAlign = 128;
+ LongDoubleFormat = &llvm::APFloat::IEEEquad;
+ }
+
+protected:
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ defineCPUMacros(Builder, "wasm", /*Tuning=*/false);
+ if (SIMDLevel >= SIMD128)
+ Builder.defineMacro("__wasm_simd128__");
+ }
+
+private:
+ bool
+ initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
+ StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const override {
+ if (CPU == "bleeding-edge")
+ Features["simd128"] = true;
+ return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
+ }
+ bool hasFeature(StringRef Feature) const final {
+ return llvm::StringSwitch<bool>(Feature)
+ .Case("simd128", SIMDLevel >= SIMD128)
+ .Default(false);
+ }
+ bool handleTargetFeatures(std::vector<std::string> &Features,
+ DiagnosticsEngine &Diags) final {
+ for (const auto &Feature : Features) {
+ if (Feature == "+simd128") {
+ SIMDLevel = std::max(SIMDLevel, SIMD128);
+ continue;
+ }
+ if (Feature == "-simd128") {
+ SIMDLevel = std::min(SIMDLevel, SIMDEnum(SIMD128 - 1));
+ continue;
+ }
+
+ Diags.Report(diag::err_opt_not_valid_with_opt) << Feature
+ << "-target-feature";
+ return false;
+ }
+ return true;
+ }
+ bool setCPU(const std::string &Name) final {
+ return llvm::StringSwitch<bool>(Name)
+ .Case("mvp", true)
+ .Case("bleeding-edge", true)
+ .Case("generic", true)
+ .Default(false);
+ }
+ ArrayRef<Builtin::Info> getTargetBuiltins() const final {
+ return llvm::makeArrayRef(BuiltinInfo,
+ clang::WebAssembly::LastTSBuiltin - Builtin::FirstTSBuiltin);
+ }
+ BuiltinVaListKind getBuiltinVaListKind() const final {
+ return VoidPtrBuiltinVaList;
+ }
+ ArrayRef<const char *> getGCCRegNames() const final {
+ return None;
+ }
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const final {
+ return None;
+ }
+ bool
+ validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const final {
+ return false;
+ }
+ const char *getClobbers() const final { return ""; }
+ bool isCLZForZeroUndef() const final { return false; }
+ bool hasInt128Type() const final { return true; }
+ IntType getIntTypeByWidth(unsigned BitWidth,
+ bool IsSigned) const final {
+ // WebAssembly prefers long long for explicitly 64-bit integers.
+ return BitWidth == 64 ? (IsSigned ? SignedLongLong : UnsignedLongLong)
+ : TargetInfo::getIntTypeByWidth(BitWidth, IsSigned);
+ }
+ IntType getLeastIntTypeByWidth(unsigned BitWidth,
+ bool IsSigned) const final {
+ // WebAssembly uses long long for int_least64_t and int_fast64_t.
+ return BitWidth == 64
+ ? (IsSigned ? SignedLongLong : UnsignedLongLong)
+ : TargetInfo::getLeastIntTypeByWidth(BitWidth, IsSigned);
+ }
+};
+
+const Builtin::Info WebAssemblyTargetInfo::BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
+ { #ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr },
+#include "clang/Basic/BuiltinsWebAssembly.def"
+};
+
+class WebAssembly32TargetInfo : public WebAssemblyTargetInfo {
+public:
+ explicit WebAssembly32TargetInfo(const llvm::Triple &T)
+ : WebAssemblyTargetInfo(T) {
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 32;
+ DataLayoutString = "e-p:32:32-i64:64-n32:64-S128";
+ }
+
+protected:
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ WebAssemblyTargetInfo::getTargetDefines(Opts, Builder);
+ defineCPUMacros(Builder, "wasm32", /*Tuning=*/false);
+ }
+};
+
+class WebAssembly64TargetInfo : public WebAssemblyTargetInfo {
+public:
+ explicit WebAssembly64TargetInfo(const llvm::Triple &T)
+ : WebAssemblyTargetInfo(T) {
+ LongAlign = LongWidth = 64;
+ PointerAlign = PointerWidth = 64;
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+ DataLayoutString = "e-p:64:64-i64:64-n32:64-S128";
+ }
+
+protected:
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ WebAssemblyTargetInfo::getTargetDefines(Opts, Builder);
+ defineCPUMacros(Builder, "wasm64", /*Tuning=*/false);
+ }
+};
+
+const Builtin::Info Le64TargetInfo::BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
+#include "clang/Basic/BuiltinsLe64.def"
+};
+
+static const unsigned SPIRAddrSpaceMap[] = {
+ 1, // opencl_global
+ 3, // opencl_local
+ 2, // opencl_constant
+ 4, // opencl_generic
+ 0, // cuda_device
+ 0, // cuda_constant
+ 0 // cuda_shared
+};
+class SPIRTargetInfo : public TargetInfo {
+public:
+ SPIRTargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) {
+ assert(getTriple().getOS() == llvm::Triple::UnknownOS &&
+ "SPIR target must use unknown OS");
+ assert(getTriple().getEnvironment() == llvm::Triple::UnknownEnvironment &&
+ "SPIR target must use unknown environment type");
+ BigEndian = false;
+ TLSSupported = false;
+ LongWidth = LongAlign = 64;
+ AddrSpaceMap = &SPIRAddrSpaceMap;
+ UseAddrSpaceMapMangling = true;
+ // Define available target features
+ // These must be defined in sorted order!
+ NoAsmVariants = true;
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ DefineStd(Builder, "SPIR", Opts);
+ }
+ bool hasFeature(StringRef Feature) const override {
+ return Feature == "spir";
+ }
+
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override { return None; }
+ const char *getClobbers() const override { return ""; }
+ ArrayRef<const char *> getGCCRegNames() const override { return None; }
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &info) const override {
+ return true;
+ }
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
+ return None;
+ }
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::VoidPtrBuiltinVaList;
+ }
+
+ CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
+ return (CC == CC_SpirFunction || CC == CC_SpirKernel) ? CCCR_OK
+ : CCCR_Warning;
+ }
+
+ CallingConv getDefaultCallingConv(CallingConvMethodType MT) const override {
+ return CC_SpirFunction;
+ }
+};
+
+class SPIR32TargetInfo : public SPIRTargetInfo {
+public:
+ SPIR32TargetInfo(const llvm::Triple &Triple) : SPIRTargetInfo(Triple) {
+ PointerWidth = PointerAlign = 32;
+ SizeType = TargetInfo::UnsignedInt;
+ PtrDiffType = IntPtrType = TargetInfo::SignedInt;
+ DataLayoutString = "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-"
+ "v96:128-v192:256-v256:256-v512:512-v1024:1024";
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ DefineStd(Builder, "SPIR32", Opts);
+ }
+};
+
+class SPIR64TargetInfo : public SPIRTargetInfo {
+public:
+ SPIR64TargetInfo(const llvm::Triple &Triple) : SPIRTargetInfo(Triple) {
+ PointerWidth = PointerAlign = 64;
+ SizeType = TargetInfo::UnsignedLong;
+ PtrDiffType = IntPtrType = TargetInfo::SignedLong;
+ DataLayoutString = "e-i64:64-v16:16-v24:32-v32:32-v48:64-"
+ "v96:128-v192:256-v256:256-v512:512-v1024:1024";
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ DefineStd(Builder, "SPIR64", Opts);
+ }
+};
+
+class XCoreTargetInfo : public TargetInfo {
+ static const Builtin::Info BuiltinInfo[];
+public:
+ XCoreTargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) {
+ BigEndian = false;
+ NoAsmVariants = true;
+ LongLongAlign = 32;
+ SuitableAlign = 32;
+ DoubleAlign = LongDoubleAlign = 32;
+ SizeType = UnsignedInt;
+ PtrDiffType = SignedInt;
+ IntPtrType = SignedInt;
+ WCharType = UnsignedChar;
+ WIntType = UnsignedInt;
+ UseZeroLengthBitfieldAlignment = true;
+ DataLayoutString = "e-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-i64:32"
+ "-f64:32-a:0:32-n32";
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ Builder.defineMacro("__XS1B__");
+ }
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return llvm::makeArrayRef(BuiltinInfo,
+ clang::XCore::LastTSBuiltin-Builtin::FirstTSBuiltin);
+ }
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::VoidPtrBuiltinVaList;
+ }
+ const char *getClobbers() const override {
+ return "";
+ }
+ ArrayRef<const char *> getGCCRegNames() const override {
+ static const char * const GCCRegNames[] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "cp", "dp", "sp", "lr"
+ };
+ return llvm::makeArrayRef(GCCRegNames);
+ }
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
+ return None;
+ }
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const override {
+ return false;
+ }
+ int getEHDataRegisterNumber(unsigned RegNo) const override {
+ // R0=ExceptionPointerRegister R1=ExceptionSelectorRegister
+ return (RegNo < 2)? RegNo : -1;
+ }
+};
+
+const Builtin::Info XCoreTargetInfo::BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
+ { #ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr },
+#include "clang/Basic/BuiltinsXCore.def"
+};
+
+// x86_32 Android target
+class AndroidX86_32TargetInfo : public LinuxTargetInfo<X86_32TargetInfo> {
+public:
+ AndroidX86_32TargetInfo(const llvm::Triple &Triple)
+ : LinuxTargetInfo<X86_32TargetInfo>(Triple) {
+ SuitableAlign = 32;
+ LongDoubleWidth = 64;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+ }
+};
+
+// x86_64 Android target
+class AndroidX86_64TargetInfo : public LinuxTargetInfo<X86_64TargetInfo> {
+public:
+ AndroidX86_64TargetInfo(const llvm::Triple &Triple)
+ : LinuxTargetInfo<X86_64TargetInfo>(Triple) {
+ LongDoubleFormat = &llvm::APFloat::IEEEquad;
+ }
+
+ bool useFloat128ManglingForLongDouble() const override {
+ return true;
+ }
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Driver code
+//===----------------------------------------------------------------------===//
+
+static TargetInfo *AllocateTarget(const llvm::Triple &Triple) {
+ llvm::Triple::OSType os = Triple.getOS();
+
+ switch (Triple.getArch()) {
+ default:
+ return nullptr;
+
+ case llvm::Triple::xcore:
+ return new XCoreTargetInfo(Triple);
+
+ case llvm::Triple::hexagon:
+ return new HexagonTargetInfo(Triple);
+
+ case llvm::Triple::aarch64:
+ if (Triple.isOSDarwin())
+ return new DarwinAArch64TargetInfo(Triple);
+
+ switch (os) {
+ case llvm::Triple::CloudABI:
+ return new CloudABITargetInfo<AArch64leTargetInfo>(Triple);
+ case llvm::Triple::FreeBSD:
+ return new FreeBSDTargetInfo<AArch64leTargetInfo>(Triple);
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<AArch64leTargetInfo>(Triple);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<AArch64leTargetInfo>(Triple);
+ default:
+ return new AArch64leTargetInfo(Triple);
+ }
+
+ case llvm::Triple::aarch64_be:
+ switch (os) {
+ case llvm::Triple::FreeBSD:
+ return new FreeBSDTargetInfo<AArch64beTargetInfo>(Triple);
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<AArch64beTargetInfo>(Triple);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<AArch64beTargetInfo>(Triple);
+ default:
+ return new AArch64beTargetInfo(Triple);
+ }
+
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ if (Triple.isOSBinFormatMachO())
+ return new DarwinARMTargetInfo(Triple);
+
+ switch (os) {
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<ARMleTargetInfo>(Triple);
+ case llvm::Triple::FreeBSD:
+ return new FreeBSDTargetInfo<ARMleTargetInfo>(Triple);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<ARMleTargetInfo>(Triple);
+ case llvm::Triple::OpenBSD:
+ return new OpenBSDTargetInfo<ARMleTargetInfo>(Triple);
+ case llvm::Triple::Bitrig:
+ return new BitrigTargetInfo<ARMleTargetInfo>(Triple);
+ case llvm::Triple::RTEMS:
+ return new RTEMSTargetInfo<ARMleTargetInfo>(Triple);
+ case llvm::Triple::NaCl:
+ return new NaClTargetInfo<ARMleTargetInfo>(Triple);
+ case llvm::Triple::Win32:
+ switch (Triple.getEnvironment()) {
+ case llvm::Triple::Cygnus:
+ return new CygwinARMTargetInfo(Triple);
+ case llvm::Triple::GNU:
+ return new MinGWARMTargetInfo(Triple);
+ case llvm::Triple::Itanium:
+ return new ItaniumWindowsARMleTargetInfo(Triple);
+ case llvm::Triple::MSVC:
+ default: // Assume MSVC for unknown environments
+ return new MicrosoftARMleTargetInfo(Triple);
+ }
+ default:
+ return new ARMleTargetInfo(Triple);
+ }
+
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumbeb:
+ if (Triple.isOSDarwin())
+ return new DarwinARMTargetInfo(Triple);
+
+ switch (os) {
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<ARMbeTargetInfo>(Triple);
+ case llvm::Triple::FreeBSD:
+ return new FreeBSDTargetInfo<ARMbeTargetInfo>(Triple);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<ARMbeTargetInfo>(Triple);
+ case llvm::Triple::OpenBSD:
+ return new OpenBSDTargetInfo<ARMbeTargetInfo>(Triple);
+ case llvm::Triple::Bitrig:
+ return new BitrigTargetInfo<ARMbeTargetInfo>(Triple);
+ case llvm::Triple::RTEMS:
+ return new RTEMSTargetInfo<ARMbeTargetInfo>(Triple);
+ case llvm::Triple::NaCl:
+ return new NaClTargetInfo<ARMbeTargetInfo>(Triple);
+ default:
+ return new ARMbeTargetInfo(Triple);
+ }
+
+ case llvm::Triple::bpfeb:
+ case llvm::Triple::bpfel:
+ return new BPFTargetInfo(Triple);
+
+ case llvm::Triple::msp430:
+ return new MSP430TargetInfo(Triple);
+
+ case llvm::Triple::mips:
+ switch (os) {
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<Mips32EBTargetInfo>(Triple);
+ case llvm::Triple::RTEMS:
+ return new RTEMSTargetInfo<Mips32EBTargetInfo>(Triple);
+ case llvm::Triple::FreeBSD:
+ return new FreeBSDTargetInfo<Mips32EBTargetInfo>(Triple);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<Mips32EBTargetInfo>(Triple);
+ default:
+ return new Mips32EBTargetInfo(Triple);
+ }
+
+ case llvm::Triple::mipsel:
+ switch (os) {
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<Mips32ELTargetInfo>(Triple);
+ case llvm::Triple::RTEMS:
+ return new RTEMSTargetInfo<Mips32ELTargetInfo>(Triple);
+ case llvm::Triple::FreeBSD:
+ return new FreeBSDTargetInfo<Mips32ELTargetInfo>(Triple);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<Mips32ELTargetInfo>(Triple);
+ case llvm::Triple::NaCl:
+ return new NaClTargetInfo<NaClMips32ELTargetInfo>(Triple);
+ default:
+ return new Mips32ELTargetInfo(Triple);
+ }
+
+ case llvm::Triple::mips64:
+ switch (os) {
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<Mips64EBTargetInfo>(Triple);
+ case llvm::Triple::RTEMS:
+ return new RTEMSTargetInfo<Mips64EBTargetInfo>(Triple);
+ case llvm::Triple::FreeBSD:
+ return new FreeBSDTargetInfo<Mips64EBTargetInfo>(Triple);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<Mips64EBTargetInfo>(Triple);
+ case llvm::Triple::OpenBSD:
+ return new OpenBSDTargetInfo<Mips64EBTargetInfo>(Triple);
+ default:
+ return new Mips64EBTargetInfo(Triple);
+ }
+
+ case llvm::Triple::mips64el:
+ switch (os) {
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<Mips64ELTargetInfo>(Triple);
+ case llvm::Triple::RTEMS:
+ return new RTEMSTargetInfo<Mips64ELTargetInfo>(Triple);
+ case llvm::Triple::FreeBSD:
+ return new FreeBSDTargetInfo<Mips64ELTargetInfo>(Triple);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<Mips64ELTargetInfo>(Triple);
+ case llvm::Triple::OpenBSD:
+ return new OpenBSDTargetInfo<Mips64ELTargetInfo>(Triple);
+ default:
+ return new Mips64ELTargetInfo(Triple);
+ }
+
+ case llvm::Triple::le32:
+ switch (os) {
+ case llvm::Triple::NaCl:
+ return new NaClTargetInfo<PNaClTargetInfo>(Triple);
+ default:
+ return nullptr;
+ }
+
+ case llvm::Triple::le64:
+ return new Le64TargetInfo(Triple);
+
+ case llvm::Triple::ppc:
+ if (Triple.isOSDarwin())
+ return new DarwinPPC32TargetInfo(Triple);
+ switch (os) {
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<PPC32TargetInfo>(Triple);
+ case llvm::Triple::FreeBSD:
+ return new FreeBSDTargetInfo<PPC32TargetInfo>(Triple);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<PPC32TargetInfo>(Triple);
+ case llvm::Triple::OpenBSD:
+ return new OpenBSDTargetInfo<PPC32TargetInfo>(Triple);
+ case llvm::Triple::RTEMS:
+ return new RTEMSTargetInfo<PPC32TargetInfo>(Triple);
+ default:
+ return new PPC32TargetInfo(Triple);
+ }
+
+ case llvm::Triple::ppc64:
+ if (Triple.isOSDarwin())
+ return new DarwinPPC64TargetInfo(Triple);
+ switch (os) {
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<PPC64TargetInfo>(Triple);
+ case llvm::Triple::Lv2:
+ return new PS3PPUTargetInfo<PPC64TargetInfo>(Triple);
+ case llvm::Triple::FreeBSD:
+ return new FreeBSDTargetInfo<PPC64TargetInfo>(Triple);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<PPC64TargetInfo>(Triple);
+ default:
+ return new PPC64TargetInfo(Triple);
+ }
+
+ case llvm::Triple::ppc64le:
+ switch (os) {
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<PPC64TargetInfo>(Triple);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<PPC64TargetInfo>(Triple);
+ default:
+ return new PPC64TargetInfo(Triple);
+ }
+
+ case llvm::Triple::nvptx:
+ return new NVPTX32TargetInfo(Triple);
+ case llvm::Triple::nvptx64:
+ return new NVPTX64TargetInfo(Triple);
+
+ case llvm::Triple::amdgcn:
+ case llvm::Triple::r600:
+ return new AMDGPUTargetInfo(Triple);
+
+ case llvm::Triple::sparc:
+ switch (os) {
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<SparcV8TargetInfo>(Triple);
+ case llvm::Triple::Solaris:
+ return new SolarisTargetInfo<SparcV8TargetInfo>(Triple);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<SparcV8TargetInfo>(Triple);
+ case llvm::Triple::OpenBSD:
+ return new OpenBSDTargetInfo<SparcV8TargetInfo>(Triple);
+ case llvm::Triple::RTEMS:
+ return new RTEMSTargetInfo<SparcV8TargetInfo>(Triple);
+ default:
+ return new SparcV8TargetInfo(Triple);
+ }
+
+ // The 'sparcel' architecture copies all the above cases except for Solaris.
+ case llvm::Triple::sparcel:
+ switch (os) {
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<SparcV8elTargetInfo>(Triple);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<SparcV8elTargetInfo>(Triple);
+ case llvm::Triple::OpenBSD:
+ return new OpenBSDTargetInfo<SparcV8elTargetInfo>(Triple);
+ case llvm::Triple::RTEMS:
+ return new RTEMSTargetInfo<SparcV8elTargetInfo>(Triple);
+ default:
+ return new SparcV8elTargetInfo(Triple);
+ }
+
+ case llvm::Triple::sparcv9:
+ switch (os) {
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<SparcV9TargetInfo>(Triple);
+ case llvm::Triple::Solaris:
+ return new SolarisTargetInfo<SparcV9TargetInfo>(Triple);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<SparcV9TargetInfo>(Triple);
+ case llvm::Triple::OpenBSD:
+ return new OpenBSDTargetInfo<SparcV9TargetInfo>(Triple);
+ case llvm::Triple::FreeBSD:
+ return new FreeBSDTargetInfo<SparcV9TargetInfo>(Triple);
+ default:
+ return new SparcV9TargetInfo(Triple);
+ }
+
+ case llvm::Triple::systemz:
+ switch (os) {
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<SystemZTargetInfo>(Triple);
+ default:
+ return new SystemZTargetInfo(Triple);
+ }
+
+ case llvm::Triple::tce:
+ return new TCETargetInfo(Triple);
+
+ case llvm::Triple::x86:
+ if (Triple.isOSDarwin())
+ return new DarwinI386TargetInfo(Triple);
+
+ switch (os) {
+ case llvm::Triple::CloudABI:
+ return new CloudABITargetInfo<X86_32TargetInfo>(Triple);
+ case llvm::Triple::Linux: {
+ switch (Triple.getEnvironment()) {
+ default:
+ return new LinuxTargetInfo<X86_32TargetInfo>(Triple);
+ case llvm::Triple::Android:
+ return new AndroidX86_32TargetInfo(Triple);
+ }
+ }
+ case llvm::Triple::DragonFly:
+ return new DragonFlyBSDTargetInfo<X86_32TargetInfo>(Triple);
+ case llvm::Triple::NetBSD:
+ return new NetBSDI386TargetInfo(Triple);
+ case llvm::Triple::OpenBSD:
+ return new OpenBSDI386TargetInfo(Triple);
+ case llvm::Triple::Bitrig:
+ return new BitrigI386TargetInfo(Triple);
+ case llvm::Triple::FreeBSD:
+ return new FreeBSDTargetInfo<X86_32TargetInfo>(Triple);
+ case llvm::Triple::KFreeBSD:
+ return new KFreeBSDTargetInfo<X86_32TargetInfo>(Triple);
+ case llvm::Triple::Minix:
+ return new MinixTargetInfo<X86_32TargetInfo>(Triple);
+ case llvm::Triple::Solaris:
+ return new SolarisTargetInfo<X86_32TargetInfo>(Triple);
+ case llvm::Triple::Win32: {
+ switch (Triple.getEnvironment()) {
+ case llvm::Triple::Cygnus:
+ return new CygwinX86_32TargetInfo(Triple);
+ case llvm::Triple::GNU:
+ return new MinGWX86_32TargetInfo(Triple);
+ case llvm::Triple::Itanium:
+ case llvm::Triple::MSVC:
+ default: // Assume MSVC for unknown environments
+ return new MicrosoftX86_32TargetInfo(Triple);
+ }
+ }
+ case llvm::Triple::Haiku:
+ return new HaikuX86_32TargetInfo(Triple);
+ case llvm::Triple::RTEMS:
+ return new RTEMSX86_32TargetInfo(Triple);
+ case llvm::Triple::NaCl:
+ return new NaClTargetInfo<X86_32TargetInfo>(Triple);
+ case llvm::Triple::ELFIAMCU:
+ return new MCUX86_32TargetInfo(Triple);
+ default:
+ return new X86_32TargetInfo(Triple);
+ }
+
+ case llvm::Triple::x86_64:
+ if (Triple.isOSDarwin() || Triple.isOSBinFormatMachO())
+ return new DarwinX86_64TargetInfo(Triple);
+
+ switch (os) {
+ case llvm::Triple::CloudABI:
+ return new CloudABITargetInfo<X86_64TargetInfo>(Triple);
+ case llvm::Triple::Linux: {
+ switch (Triple.getEnvironment()) {
+ default:
+ return new LinuxTargetInfo<X86_64TargetInfo>(Triple);
+ case llvm::Triple::Android:
+ return new AndroidX86_64TargetInfo(Triple);
+ }
+ }
+ case llvm::Triple::DragonFly:
+ return new DragonFlyBSDTargetInfo<X86_64TargetInfo>(Triple);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<X86_64TargetInfo>(Triple);
+ case llvm::Triple::OpenBSD:
+ return new OpenBSDX86_64TargetInfo(Triple);
+ case llvm::Triple::Bitrig:
+ return new BitrigX86_64TargetInfo(Triple);
+ case llvm::Triple::FreeBSD:
+ return new FreeBSDTargetInfo<X86_64TargetInfo>(Triple);
+ case llvm::Triple::KFreeBSD:
+ return new KFreeBSDTargetInfo<X86_64TargetInfo>(Triple);
+ case llvm::Triple::Solaris:
+ return new SolarisTargetInfo<X86_64TargetInfo>(Triple);
+ case llvm::Triple::Win32: {
+ switch (Triple.getEnvironment()) {
+ case llvm::Triple::Cygnus:
+ return new CygwinX86_64TargetInfo(Triple);
+ case llvm::Triple::GNU:
+ return new MinGWX86_64TargetInfo(Triple);
+ case llvm::Triple::MSVC:
+ default: // Assume MSVC for unknown environments
+ return new MicrosoftX86_64TargetInfo(Triple);
+ }
+ }
+ case llvm::Triple::NaCl:
+ return new NaClTargetInfo<X86_64TargetInfo>(Triple);
+ case llvm::Triple::PS4:
+ return new PS4OSTargetInfo<X86_64TargetInfo>(Triple);
+ default:
+ return new X86_64TargetInfo(Triple);
+ }
+
+ case llvm::Triple::spir: {
+ if (Triple.getOS() != llvm::Triple::UnknownOS ||
+ Triple.getEnvironment() != llvm::Triple::UnknownEnvironment)
+ return nullptr;
+ return new SPIR32TargetInfo(Triple);
+ }
+ case llvm::Triple::spir64: {
+ if (Triple.getOS() != llvm::Triple::UnknownOS ||
+ Triple.getEnvironment() != llvm::Triple::UnknownEnvironment)
+ return nullptr;
+ return new SPIR64TargetInfo(Triple);
+ }
+ case llvm::Triple::wasm32:
+ if (!(Triple == llvm::Triple("wasm32-unknown-unknown")))
+ return nullptr;
+ return new WebAssemblyOSTargetInfo<WebAssembly32TargetInfo>(Triple);
+ case llvm::Triple::wasm64:
+ if (!(Triple == llvm::Triple("wasm64-unknown-unknown")))
+ return nullptr;
+ return new WebAssemblyOSTargetInfo<WebAssembly64TargetInfo>(Triple);
+ }
+}
+
+/// CreateTargetInfo - Return the target info object for the specified target
+/// options.
+TargetInfo *
+TargetInfo::CreateTargetInfo(DiagnosticsEngine &Diags,
+ const std::shared_ptr<TargetOptions> &Opts) {
+ llvm::Triple Triple(Opts->Triple);
+
+ // Construct the target
+ std::unique_ptr<TargetInfo> Target(AllocateTarget(Triple));
+ if (!Target) {
+ Diags.Report(diag::err_target_unknown_triple) << Triple.str();
+ return nullptr;
+ }
+ Target->TargetOpts = Opts;
+
+ // Set the target CPU if specified.
+ if (!Opts->CPU.empty() && !Target->setCPU(Opts->CPU)) {
+ Diags.Report(diag::err_target_unknown_cpu) << Opts->CPU;
+ return nullptr;
+ }
+
+ // Set the target ABI if specified.
+ if (!Opts->ABI.empty() && !Target->setABI(Opts->ABI)) {
+ Diags.Report(diag::err_target_unknown_abi) << Opts->ABI;
+ return nullptr;
+ }
+
+ // Set the fp math unit.
+ if (!Opts->FPMath.empty() && !Target->setFPMath(Opts->FPMath)) {
+ Diags.Report(diag::err_target_unknown_fpmath) << Opts->FPMath;
+ return nullptr;
+ }
+
+ // Compute the default target features, we need the target to handle this
+ // because features may have dependencies on one another.
+ llvm::StringMap<bool> Features;
+ if (!Target->initFeatureMap(Features, Diags, Opts->CPU,
+ Opts->FeaturesAsWritten))
+ return nullptr;
+
+ // Add the features to the compile options.
+ Opts->Features.clear();
+ for (const auto &F : Features)
+ Opts->Features.push_back((F.getValue() ? "+" : "-") + F.getKey().str());
+
+ if (!Target->handleTargetFeatures(Opts->Features, Diags))
+ return nullptr;
+
+ return Target.release();
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/TokenKinds.cpp b/contrib/llvm/tools/clang/lib/Basic/TokenKinds.cpp
new file mode 100644
index 0000000..3b1f8fe
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/TokenKinds.cpp
@@ -0,0 +1,48 @@
+//===--- TokenKinds.cpp - Token Kinds Support -----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the TokenKind enum and support functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/TokenKinds.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace clang;
+
+static const char * const TokNames[] = {
+#define TOK(X) #X,
+#define KEYWORD(X,Y) #X,
+#include "clang/Basic/TokenKinds.def"
+ nullptr
+};
+
+const char *tok::getTokenName(TokenKind Kind) {
+ if (Kind < tok::NUM_TOKENS)
+ return TokNames[Kind];
+ llvm_unreachable("unknown TokenKind");
+ return nullptr;
+}
+
+const char *tok::getPunctuatorSpelling(TokenKind Kind) {
+ switch (Kind) {
+#define PUNCTUATOR(X,Y) case X: return Y;
+#include "clang/Basic/TokenKinds.def"
+ default: break;
+ }
+ return nullptr;
+}
+
+const char *tok::getKeywordSpelling(TokenKind Kind) {
+ switch (Kind) {
+#define KEYWORD(X,Y) case kw_ ## X: return #X;
+#include "clang/Basic/TokenKinds.def"
+ default: break;
+ }
+ return nullptr;
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/Version.cpp b/contrib/llvm/tools/clang/lib/Basic/Version.cpp
new file mode 100644
index 0000000..a1a67c2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/Version.cpp
@@ -0,0 +1,151 @@
+//===- Version.cpp - Clang Version Number -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines several version-related utility functions for Clang.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/Version.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Config/config.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstdlib>
+#include <cstring>
+
+#ifdef HAVE_SVN_VERSION_INC
+# include "SVNVersion.inc"
+#endif
+
+namespace clang {
+
+std::string getClangRepositoryPath() {
+#if defined(CLANG_REPOSITORY_STRING)
+ return CLANG_REPOSITORY_STRING;
+#else
+#ifdef SVN_REPOSITORY
+ StringRef URL(SVN_REPOSITORY);
+#else
+ StringRef URL("");
+#endif
+
+ // If the SVN_REPOSITORY is empty, try to use the SVN keyword. This helps us
+ // pick up a tag in an SVN export, for example.
+ StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/trunk/lib/Basic/Version.cpp $");
+ if (URL.empty()) {
+ URL = SVNRepository.slice(SVNRepository.find(':'),
+ SVNRepository.find("/lib/Basic"));
+ }
+
+ // Strip off version from a build from an integration branch.
+ URL = URL.slice(0, URL.find("/src/tools/clang"));
+
+ // Trim path prefix off, assuming path came from standard cfe path.
+ size_t Start = URL.find("cfe/");
+ if (Start != StringRef::npos)
+ URL = URL.substr(Start + 4);
+
+ return URL;
+#endif
+}
+
+std::string getLLVMRepositoryPath() {
+#ifdef LLVM_REPOSITORY
+ StringRef URL(LLVM_REPOSITORY);
+#else
+ StringRef URL("");
+#endif
+
+ // Trim path prefix off, assuming path came from standard llvm path.
+ // Leave "llvm/" prefix to distinguish the following llvm revision from the
+ // clang revision.
+ size_t Start = URL.find("llvm/");
+ if (Start != StringRef::npos)
+ URL = URL.substr(Start);
+
+ return URL;
+}
+
+std::string getClangRevision() {
+#ifdef SVN_REVISION
+ return SVN_REVISION;
+#else
+ return "";
+#endif
+}
+
+std::string getLLVMRevision() {
+#ifdef LLVM_REVISION
+ return LLVM_REVISION;
+#else
+ return "";
+#endif
+}
+
+std::string getClangFullRepositoryVersion() {
+ std::string buf;
+ llvm::raw_string_ostream OS(buf);
+ std::string Path = getClangRepositoryPath();
+ std::string Revision = getClangRevision();
+ if (!Path.empty() || !Revision.empty()) {
+ OS << '(';
+ if (!Path.empty())
+ OS << Path;
+ if (!Revision.empty()) {
+ if (!Path.empty())
+ OS << ' ';
+ OS << Revision;
+ }
+ OS << ')';
+ }
+ // Support LLVM in a separate repository.
+ std::string LLVMRev = getLLVMRevision();
+ if (!LLVMRev.empty() && LLVMRev != Revision) {
+ OS << " (";
+ std::string LLVMRepo = getLLVMRepositoryPath();
+ if (!LLVMRepo.empty())
+ OS << LLVMRepo << ' ';
+ OS << LLVMRev << ')';
+ }
+ return OS.str();
+}
+
+std::string getClangFullVersion() {
+ return getClangToolFullVersion("clang");
+}
+
+std::string getClangToolFullVersion(StringRef ToolName) {
+ std::string buf;
+ llvm::raw_string_ostream OS(buf);
+#ifdef CLANG_VENDOR
+ OS << CLANG_VENDOR;
+#endif
+ OS << ToolName << " version " CLANG_VERSION_STRING " "
+ << getClangFullRepositoryVersion();
+
+ // If vendor supplied, include the base LLVM version as well.
+#ifdef CLANG_VENDOR
+ OS << " (based on " << BACKEND_PACKAGE_STRING << ")";
+#endif
+
+ return OS.str();
+}
+
+std::string getClangFullCPPVersion() {
+ // The version string we report in __VERSION__ is just a compacted version of
+ // the one we report on the command line.
+ std::string buf;
+ llvm::raw_string_ostream OS(buf);
+#ifdef CLANG_VENDOR
+ OS << CLANG_VENDOR;
+#endif
+ OS << "Clang " CLANG_VERSION_STRING " " << getClangFullRepositoryVersion();
+ return OS.str();
+}
+
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/Basic/VersionTuple.cpp b/contrib/llvm/tools/clang/lib/Basic/VersionTuple.cpp
new file mode 100644
index 0000000..9c73fd9
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/VersionTuple.cpp
@@ -0,0 +1,100 @@
+//===- VersionTuple.cpp - Version Number Handling ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the VersionTuple class, which represents a version in
+// the form major[.minor[.subminor]].
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Basic/VersionTuple.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+std::string VersionTuple::getAsString() const {
+ std::string Result;
+ {
+ llvm::raw_string_ostream Out(Result);
+ Out << *this;
+ }
+ return Result;
+}
+
+raw_ostream& clang::operator<<(raw_ostream &Out,
+ const VersionTuple &V) {
+ Out << V.getMajor();
+ if (Optional<unsigned> Minor = V.getMinor())
+ Out << (V.usesUnderscores() ? '_' : '.') << *Minor;
+ if (Optional<unsigned> Subminor = V.getSubminor())
+ Out << (V.usesUnderscores() ? '_' : '.') << *Subminor;
+ if (Optional<unsigned> Build = V.getBuild())
+ Out << (V.usesUnderscores() ? '_' : '.') << *Build;
+ return Out;
+}
+
+static bool parseInt(StringRef &input, unsigned &value) {
+ assert(value == 0);
+ if (input.empty()) return true;
+
+ char next = input[0];
+ input = input.substr(1);
+ if (next < '0' || next > '9') return true;
+ value = (unsigned) (next - '0');
+
+ while (!input.empty()) {
+ next = input[0];
+ if (next < '0' || next > '9') return false;
+ input = input.substr(1);
+ value = value * 10 + (unsigned) (next - '0');
+ }
+
+ return false;
+}
+
+bool VersionTuple::tryParse(StringRef input) {
+ unsigned major = 0, minor = 0, micro = 0, build = 0;
+
+ // Parse the major version, [0-9]+
+ if (parseInt(input, major)) return true;
+
+ if (input.empty()) {
+ *this = VersionTuple(major);
+ return false;
+ }
+
+ // If we're not done, parse the minor version, \.[0-9]+
+ if (input[0] != '.') return true;
+ input = input.substr(1);
+ if (parseInt(input, minor)) return true;
+
+ if (input.empty()) {
+ *this = VersionTuple(major, minor);
+ return false;
+ }
+
+ // If we're not done, parse the micro version, \.[0-9]+
+ if (input[0] != '.') return true;
+ input = input.substr(1);
+ if (parseInt(input, micro)) return true;
+
+ if (input.empty()) {
+ *this = VersionTuple(major, minor, micro);
+ return false;
+ }
+
+ // If we're not done, parse the micro version, \.[0-9]+
+ if (input[0] != '.') return true;
+ input = input.substr(1);
+ if (parseInt(input, build)) return true;
+
+ // If we have characters left over, it's an error.
+ if (!input.empty()) return true;
+
+ *this = VersionTuple(major, minor, micro, build);
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/VirtualFileSystem.cpp b/contrib/llvm/tools/clang/lib/Basic/VirtualFileSystem.cpp
new file mode 100644
index 0000000..cf5a8d6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/VirtualFileSystem.cpp
@@ -0,0 +1,1560 @@
+//===- VirtualFileSystem.cpp - Virtual File System Layer --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file implements the VirtualFileSystem interface.
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/VirtualFileSystem.h"
+#include "clang/Basic/FileManager.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/Errc.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/YAMLParser.h"
+#include "llvm/Config/llvm-config.h"
+#include <atomic>
+#include <memory>
+
+// For chdir.
+#ifdef LLVM_ON_WIN32
+# include <direct.h>
+#else
+# include <unistd.h>
+#endif
+
+using namespace clang;
+using namespace clang::vfs;
+using namespace llvm;
+using llvm::sys::fs::file_status;
+using llvm::sys::fs::file_type;
+using llvm::sys::fs::perms;
+using llvm::sys::fs::UniqueID;
+
+Status::Status(const file_status &Status)
+ : UID(Status.getUniqueID()), MTime(Status.getLastModificationTime()),
+ User(Status.getUser()), Group(Status.getGroup()), Size(Status.getSize()),
+ Type(Status.type()), Perms(Status.permissions()), IsVFSMapped(false) {}
+
+Status::Status(StringRef Name, UniqueID UID, sys::TimeValue MTime,
+ uint32_t User, uint32_t Group, uint64_t Size, file_type Type,
+ perms Perms)
+ : Name(Name), UID(UID), MTime(MTime), User(User), Group(Group), Size(Size),
+ Type(Type), Perms(Perms), IsVFSMapped(false) {}
+
+Status Status::copyWithNewName(const Status &In, StringRef NewName) {
+ return Status(NewName, In.getUniqueID(), In.getLastModificationTime(),
+ In.getUser(), In.getGroup(), In.getSize(), In.getType(),
+ In.getPermissions());
+}
+
+Status Status::copyWithNewName(const file_status &In, StringRef NewName) {
+ return Status(NewName, In.getUniqueID(), In.getLastModificationTime(),
+ In.getUser(), In.getGroup(), In.getSize(), In.type(),
+ In.permissions());
+}
+
+bool Status::equivalent(const Status &Other) const {
+ return getUniqueID() == Other.getUniqueID();
+}
+bool Status::isDirectory() const {
+ return Type == file_type::directory_file;
+}
+bool Status::isRegularFile() const {
+ return Type == file_type::regular_file;
+}
+bool Status::isOther() const {
+ return exists() && !isRegularFile() && !isDirectory() && !isSymlink();
+}
+bool Status::isSymlink() const {
+ return Type == file_type::symlink_file;
+}
+bool Status::isStatusKnown() const {
+ return Type != file_type::status_error;
+}
+bool Status::exists() const {
+ return isStatusKnown() && Type != file_type::file_not_found;
+}
+
+File::~File() {}
+
+FileSystem::~FileSystem() {}
+
+ErrorOr<std::unique_ptr<MemoryBuffer>>
+FileSystem::getBufferForFile(const llvm::Twine &Name, int64_t FileSize,
+ bool RequiresNullTerminator, bool IsVolatile) {
+ auto F = openFileForRead(Name);
+ if (!F)
+ return F.getError();
+
+ return (*F)->getBuffer(Name, FileSize, RequiresNullTerminator, IsVolatile);
+}
+
+std::error_code FileSystem::makeAbsolute(SmallVectorImpl<char> &Path) const {
+ auto WorkingDir = getCurrentWorkingDirectory();
+ if (!WorkingDir)
+ return WorkingDir.getError();
+
+ return llvm::sys::fs::make_absolute(WorkingDir.get(), Path);
+}
+
+bool FileSystem::exists(const Twine &Path) {
+ auto Status = status(Path);
+ return Status && Status->exists();
+}
+
+//===-----------------------------------------------------------------------===/
+// RealFileSystem implementation
+//===-----------------------------------------------------------------------===/
+
+namespace {
+/// \brief Wrapper around a raw file descriptor.
+class RealFile : public File {
+ int FD;
+ Status S;
+ friend class RealFileSystem;
+ RealFile(int FD, StringRef NewName)
+ : FD(FD), S(NewName, {}, {}, {}, {}, {},
+ llvm::sys::fs::file_type::status_error, {}) {
+ assert(FD >= 0 && "Invalid or inactive file descriptor");
+ }
+
+public:
+ ~RealFile() override;
+ ErrorOr<Status> status() override;
+ ErrorOr<std::unique_ptr<MemoryBuffer>> getBuffer(const Twine &Name,
+ int64_t FileSize,
+ bool RequiresNullTerminator,
+ bool IsVolatile) override;
+ std::error_code close() override;
+};
+} // end anonymous namespace
+RealFile::~RealFile() { close(); }
+
+ErrorOr<Status> RealFile::status() {
+ assert(FD != -1 && "cannot stat closed file");
+ if (!S.isStatusKnown()) {
+ file_status RealStatus;
+ if (std::error_code EC = sys::fs::status(FD, RealStatus))
+ return EC;
+ S = Status::copyWithNewName(RealStatus, S.getName());
+ }
+ return S;
+}
+
+ErrorOr<std::unique_ptr<MemoryBuffer>>
+RealFile::getBuffer(const Twine &Name, int64_t FileSize,
+ bool RequiresNullTerminator, bool IsVolatile) {
+ assert(FD != -1 && "cannot get buffer for closed file");
+ return MemoryBuffer::getOpenFile(FD, Name, FileSize, RequiresNullTerminator,
+ IsVolatile);
+}
+
+// FIXME: This is terrible, we need this for ::close.
+#if !defined(_MSC_VER) && !defined(__MINGW32__)
+#include <unistd.h>
+#include <sys/uio.h>
+#else
+#include <io.h>
+#ifndef S_ISFIFO
+#define S_ISFIFO(x) (0)
+#endif
+#endif
+std::error_code RealFile::close() {
+ if (::close(FD))
+ return std::error_code(errno, std::generic_category());
+ FD = -1;
+ return std::error_code();
+}
+
+namespace {
+/// \brief The file system according to your operating system.
+class RealFileSystem : public FileSystem {
+public:
+ ErrorOr<Status> status(const Twine &Path) override;
+ ErrorOr<std::unique_ptr<File>> openFileForRead(const Twine &Path) override;
+ directory_iterator dir_begin(const Twine &Dir, std::error_code &EC) override;
+
+ llvm::ErrorOr<std::string> getCurrentWorkingDirectory() const override;
+ std::error_code setCurrentWorkingDirectory(const Twine &Path) override;
+};
+} // end anonymous namespace
+
+ErrorOr<Status> RealFileSystem::status(const Twine &Path) {
+ sys::fs::file_status RealStatus;
+ if (std::error_code EC = sys::fs::status(Path, RealStatus))
+ return EC;
+ return Status::copyWithNewName(RealStatus, Path.str());
+}
+
+ErrorOr<std::unique_ptr<File>>
+RealFileSystem::openFileForRead(const Twine &Name) {
+ int FD;
+ if (std::error_code EC = sys::fs::openFileForRead(Name, FD))
+ return EC;
+ return std::unique_ptr<File>(new RealFile(FD, Name.str()));
+}
+
+llvm::ErrorOr<std::string> RealFileSystem::getCurrentWorkingDirectory() const {
+ SmallString<256> Dir;
+ if (std::error_code EC = llvm::sys::fs::current_path(Dir))
+ return EC;
+ return Dir.str().str();
+}
+
+std::error_code RealFileSystem::setCurrentWorkingDirectory(const Twine &Path) {
+ // FIXME: chdir is thread hostile; on the other hand, creating the same
+ // behavior as chdir is complex: chdir resolves the path once, thus
+ // guaranteeing that all subsequent relative path operations work
+ // on the same path the original chdir resulted in. This makes a
+ // difference for example on network filesystems, where symlinks might be
+ // switched during runtime of the tool. Fixing this depends on having a
+ // file system abstraction that allows openat() style interactions.
+ SmallString<256> Storage;
+ StringRef Dir = Path.toNullTerminatedStringRef(Storage);
+ if (int Err = ::chdir(Dir.data()))
+ return std::error_code(Err, std::generic_category());
+ return std::error_code();
+}
+
+IntrusiveRefCntPtr<FileSystem> vfs::getRealFileSystem() {
+ static IntrusiveRefCntPtr<FileSystem> FS = new RealFileSystem();
+ return FS;
+}
+
+namespace {
+class RealFSDirIter : public clang::vfs::detail::DirIterImpl {
+ std::string Path;
+ llvm::sys::fs::directory_iterator Iter;
+public:
+ RealFSDirIter(const Twine &_Path, std::error_code &EC)
+ : Path(_Path.str()), Iter(Path, EC) {
+ if (!EC && Iter != llvm::sys::fs::directory_iterator()) {
+ llvm::sys::fs::file_status S;
+ EC = Iter->status(S);
+ if (!EC)
+ CurrentEntry = Status::copyWithNewName(S, Iter->path());
+ }
+ }
+
+ std::error_code increment() override {
+ std::error_code EC;
+ Iter.increment(EC);
+ if (EC) {
+ return EC;
+ } else if (Iter == llvm::sys::fs::directory_iterator()) {
+ CurrentEntry = Status();
+ } else {
+ llvm::sys::fs::file_status S;
+ EC = Iter->status(S);
+ CurrentEntry = Status::copyWithNewName(S, Iter->path());
+ }
+ return EC;
+ }
+};
+}
+
+directory_iterator RealFileSystem::dir_begin(const Twine &Dir,
+ std::error_code &EC) {
+ return directory_iterator(std::make_shared<RealFSDirIter>(Dir, EC));
+}
+
+//===-----------------------------------------------------------------------===/
+// OverlayFileSystem implementation
+//===-----------------------------------------------------------------------===/
+OverlayFileSystem::OverlayFileSystem(IntrusiveRefCntPtr<FileSystem> BaseFS) {
+ FSList.push_back(BaseFS);
+}
+
+void OverlayFileSystem::pushOverlay(IntrusiveRefCntPtr<FileSystem> FS) {
+ FSList.push_back(FS);
+ // Synchronize added file systems by duplicating the working directory from
+ // the first one in the list.
+ FS->setCurrentWorkingDirectory(getCurrentWorkingDirectory().get());
+}
+
+ErrorOr<Status> OverlayFileSystem::status(const Twine &Path) {
+ // FIXME: handle symlinks that cross file systems
+ for (iterator I = overlays_begin(), E = overlays_end(); I != E; ++I) {
+ ErrorOr<Status> Status = (*I)->status(Path);
+ if (Status || Status.getError() != llvm::errc::no_such_file_or_directory)
+ return Status;
+ }
+ return make_error_code(llvm::errc::no_such_file_or_directory);
+}
+
+ErrorOr<std::unique_ptr<File>>
+OverlayFileSystem::openFileForRead(const llvm::Twine &Path) {
+ // FIXME: handle symlinks that cross file systems
+ for (iterator I = overlays_begin(), E = overlays_end(); I != E; ++I) {
+ auto Result = (*I)->openFileForRead(Path);
+ if (Result || Result.getError() != llvm::errc::no_such_file_or_directory)
+ return Result;
+ }
+ return make_error_code(llvm::errc::no_such_file_or_directory);
+}
+
+llvm::ErrorOr<std::string>
+OverlayFileSystem::getCurrentWorkingDirectory() const {
+ // All file systems are synchronized, just take the first working directory.
+ return FSList.front()->getCurrentWorkingDirectory();
+}
+std::error_code
+OverlayFileSystem::setCurrentWorkingDirectory(const Twine &Path) {
+ for (auto &FS : FSList)
+ if (std::error_code EC = FS->setCurrentWorkingDirectory(Path))
+ return EC;
+ return std::error_code();
+}
+
+clang::vfs::detail::DirIterImpl::~DirIterImpl() { }
+
+namespace {
+class OverlayFSDirIterImpl : public clang::vfs::detail::DirIterImpl {
+ OverlayFileSystem &Overlays;
+ std::string Path;
+ OverlayFileSystem::iterator CurrentFS;
+ directory_iterator CurrentDirIter;
+ llvm::StringSet<> SeenNames;
+
+ std::error_code incrementFS() {
+ assert(CurrentFS != Overlays.overlays_end() && "incrementing past end");
+ ++CurrentFS;
+ for (auto E = Overlays.overlays_end(); CurrentFS != E; ++CurrentFS) {
+ std::error_code EC;
+ CurrentDirIter = (*CurrentFS)->dir_begin(Path, EC);
+ if (EC && EC != errc::no_such_file_or_directory)
+ return EC;
+ if (CurrentDirIter != directory_iterator())
+ break; // found
+ }
+ return std::error_code();
+ }
+
+ std::error_code incrementDirIter(bool IsFirstTime) {
+ assert((IsFirstTime || CurrentDirIter != directory_iterator()) &&
+ "incrementing past end");
+ std::error_code EC;
+ if (!IsFirstTime)
+ CurrentDirIter.increment(EC);
+ if (!EC && CurrentDirIter == directory_iterator())
+ EC = incrementFS();
+ return EC;
+ }
+
+ std::error_code incrementImpl(bool IsFirstTime) {
+ while (true) {
+ std::error_code EC = incrementDirIter(IsFirstTime);
+ if (EC || CurrentDirIter == directory_iterator()) {
+ CurrentEntry = Status();
+ return EC;
+ }
+ CurrentEntry = *CurrentDirIter;
+ StringRef Name = llvm::sys::path::filename(CurrentEntry.getName());
+ if (SeenNames.insert(Name).second)
+ return EC; // name not seen before
+ }
+ llvm_unreachable("returned above");
+ }
+
+public:
+ OverlayFSDirIterImpl(const Twine &Path, OverlayFileSystem &FS,
+ std::error_code &EC)
+ : Overlays(FS), Path(Path.str()), CurrentFS(Overlays.overlays_begin()) {
+ CurrentDirIter = (*CurrentFS)->dir_begin(Path, EC);
+ EC = incrementImpl(true);
+ }
+
+ std::error_code increment() override { return incrementImpl(false); }
+};
+} // end anonymous namespace
+
+directory_iterator OverlayFileSystem::dir_begin(const Twine &Dir,
+ std::error_code &EC) {
+ return directory_iterator(
+ std::make_shared<OverlayFSDirIterImpl>(Dir, *this, EC));
+}
+
+namespace clang {
+namespace vfs {
+namespace detail {
+
+enum InMemoryNodeKind { IME_File, IME_Directory };
+
+/// The in memory file system is a tree of Nodes. Every node can either be a
+/// file or a directory.
+class InMemoryNode {
+ Status Stat;
+ InMemoryNodeKind Kind;
+
+public:
+ InMemoryNode(Status Stat, InMemoryNodeKind Kind)
+ : Stat(std::move(Stat)), Kind(Kind) {}
+ virtual ~InMemoryNode() {}
+ const Status &getStatus() const { return Stat; }
+ InMemoryNodeKind getKind() const { return Kind; }
+ virtual std::string toString(unsigned Indent) const = 0;
+};
+
+namespace {
+class InMemoryFile : public InMemoryNode {
+ std::unique_ptr<llvm::MemoryBuffer> Buffer;
+
+public:
+ InMemoryFile(Status Stat, std::unique_ptr<llvm::MemoryBuffer> Buffer)
+ : InMemoryNode(std::move(Stat), IME_File), Buffer(std::move(Buffer)) {}
+
+ llvm::MemoryBuffer *getBuffer() { return Buffer.get(); }
+ std::string toString(unsigned Indent) const override {
+ return (std::string(Indent, ' ') + getStatus().getName() + "\n").str();
+ }
+ static bool classof(const InMemoryNode *N) {
+ return N->getKind() == IME_File;
+ }
+};
+
+/// Adapt a InMemoryFile for VFS' File interface.
+class InMemoryFileAdaptor : public File {
+ InMemoryFile &Node;
+
+public:
+ explicit InMemoryFileAdaptor(InMemoryFile &Node) : Node(Node) {}
+
+ llvm::ErrorOr<Status> status() override { return Node.getStatus(); }
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
+ getBuffer(const Twine &Name, int64_t FileSize, bool RequiresNullTerminator,
+ bool IsVolatile) override {
+ llvm::MemoryBuffer *Buf = Node.getBuffer();
+ return llvm::MemoryBuffer::getMemBuffer(
+ Buf->getBuffer(), Buf->getBufferIdentifier(), RequiresNullTerminator);
+ }
+ std::error_code close() override { return std::error_code(); }
+};
+} // end anonymous namespace
+
+class InMemoryDirectory : public InMemoryNode {
+ std::map<std::string, std::unique_ptr<InMemoryNode>> Entries;
+
+public:
+ InMemoryDirectory(Status Stat)
+ : InMemoryNode(std::move(Stat), IME_Directory) {}
+ InMemoryNode *getChild(StringRef Name) {
+ auto I = Entries.find(Name);
+ if (I != Entries.end())
+ return I->second.get();
+ return nullptr;
+ }
+ InMemoryNode *addChild(StringRef Name, std::unique_ptr<InMemoryNode> Child) {
+ return Entries.insert(make_pair(Name, std::move(Child)))
+ .first->second.get();
+ }
+
+ typedef decltype(Entries)::const_iterator const_iterator;
+ const_iterator begin() const { return Entries.begin(); }
+ const_iterator end() const { return Entries.end(); }
+
+ std::string toString(unsigned Indent) const override {
+ std::string Result =
+ (std::string(Indent, ' ') + getStatus().getName() + "\n").str();
+ for (const auto &Entry : Entries) {
+ Result += Entry.second->toString(Indent + 2);
+ }
+ return Result;
+ }
+ static bool classof(const InMemoryNode *N) {
+ return N->getKind() == IME_Directory;
+ }
+};
+}
+
+InMemoryFileSystem::InMemoryFileSystem(bool UseNormalizedPaths)
+ : Root(new detail::InMemoryDirectory(
+ Status("", getNextVirtualUniqueID(), llvm::sys::TimeValue::MinTime(),
+ 0, 0, 0, llvm::sys::fs::file_type::directory_file,
+ llvm::sys::fs::perms::all_all))),
+ UseNormalizedPaths(UseNormalizedPaths) {}
+
+InMemoryFileSystem::~InMemoryFileSystem() {}
+
+std::string InMemoryFileSystem::toString() const {
+ return Root->toString(/*Indent=*/0);
+}
+
+bool InMemoryFileSystem::addFile(const Twine &P, time_t ModificationTime,
+ std::unique_ptr<llvm::MemoryBuffer> Buffer) {
+ SmallString<128> Path;
+ P.toVector(Path);
+
+ // Fix up relative paths. This just prepends the current working directory.
+ std::error_code EC = makeAbsolute(Path);
+ assert(!EC);
+ (void)EC;
+
+ if (useNormalizedPaths())
+ llvm::sys::path::remove_dots(Path, /*remove_dot_dot=*/true);
+
+ if (Path.empty())
+ return false;
+
+ detail::InMemoryDirectory *Dir = Root.get();
+ auto I = llvm::sys::path::begin(Path), E = llvm::sys::path::end(Path);
+ while (true) {
+ StringRef Name = *I;
+ detail::InMemoryNode *Node = Dir->getChild(Name);
+ ++I;
+ if (!Node) {
+ if (I == E) {
+ // End of the path, create a new file.
+ // FIXME: expose the status details in the interface.
+ Status Stat(P.str(), getNextVirtualUniqueID(),
+ llvm::sys::TimeValue(ModificationTime, 0), 0, 0,
+ Buffer->getBufferSize(),
+ llvm::sys::fs::file_type::regular_file,
+ llvm::sys::fs::all_all);
+ Dir->addChild(Name, llvm::make_unique<detail::InMemoryFile>(
+ std::move(Stat), std::move(Buffer)));
+ return true;
+ }
+
+ // Create a new directory. Use the path up to here.
+ // FIXME: expose the status details in the interface.
+ Status Stat(
+ StringRef(Path.str().begin(), Name.end() - Path.str().begin()),
+ getNextVirtualUniqueID(), llvm::sys::TimeValue(ModificationTime, 0),
+ 0, 0, Buffer->getBufferSize(),
+ llvm::sys::fs::file_type::directory_file, llvm::sys::fs::all_all);
+ Dir = cast<detail::InMemoryDirectory>(Dir->addChild(
+ Name, llvm::make_unique<detail::InMemoryDirectory>(std::move(Stat))));
+ continue;
+ }
+
+ if (auto *NewDir = dyn_cast<detail::InMemoryDirectory>(Node)) {
+ Dir = NewDir;
+ } else {
+ assert(isa<detail::InMemoryFile>(Node) &&
+ "Must be either file or directory!");
+
+ // Trying to insert a directory in place of a file.
+ if (I != E)
+ return false;
+
+ // Return false only if the new file is different from the existing one.
+ return cast<detail::InMemoryFile>(Node)->getBuffer()->getBuffer() ==
+ Buffer->getBuffer();
+ }
+ }
+}
+
+bool InMemoryFileSystem::addFileNoOwn(const Twine &P, time_t ModificationTime,
+ llvm::MemoryBuffer *Buffer) {
+ return addFile(P, ModificationTime,
+ llvm::MemoryBuffer::getMemBuffer(
+ Buffer->getBuffer(), Buffer->getBufferIdentifier()));
+}
+
+static ErrorOr<detail::InMemoryNode *>
+lookupInMemoryNode(const InMemoryFileSystem &FS, detail::InMemoryDirectory *Dir,
+ const Twine &P) {
+ SmallString<128> Path;
+ P.toVector(Path);
+
+ // Fix up relative paths. This just prepends the current working directory.
+ std::error_code EC = FS.makeAbsolute(Path);
+ assert(!EC);
+ (void)EC;
+
+ if (FS.useNormalizedPaths())
+ llvm::sys::path::remove_dots(Path, /*remove_dot_dot=*/true);
+
+ if (Path.empty())
+ return Dir;
+
+ auto I = llvm::sys::path::begin(Path), E = llvm::sys::path::end(Path);
+ while (true) {
+ detail::InMemoryNode *Node = Dir->getChild(*I);
+ ++I;
+ if (!Node)
+ return errc::no_such_file_or_directory;
+
+ // Return the file if it's at the end of the path.
+ if (auto File = dyn_cast<detail::InMemoryFile>(Node)) {
+ if (I == E)
+ return File;
+ return errc::no_such_file_or_directory;
+ }
+
+ // Traverse directories.
+ Dir = cast<detail::InMemoryDirectory>(Node);
+ if (I == E)
+ return Dir;
+ }
+}
+
+llvm::ErrorOr<Status> InMemoryFileSystem::status(const Twine &Path) {
+ auto Node = lookupInMemoryNode(*this, Root.get(), Path);
+ if (Node)
+ return (*Node)->getStatus();
+ return Node.getError();
+}
+
+llvm::ErrorOr<std::unique_ptr<File>>
+InMemoryFileSystem::openFileForRead(const Twine &Path) {
+ auto Node = lookupInMemoryNode(*this, Root.get(), Path);
+ if (!Node)
+ return Node.getError();
+
+ // When we have a file provide a heap-allocated wrapper for the memory buffer
+ // to match the ownership semantics for File.
+ if (auto *F = dyn_cast<detail::InMemoryFile>(*Node))
+ return std::unique_ptr<File>(new detail::InMemoryFileAdaptor(*F));
+
+ // FIXME: errc::not_a_file?
+ return make_error_code(llvm::errc::invalid_argument);
+}
+
+namespace {
+/// Adaptor from InMemoryDir::iterator to directory_iterator.
+class InMemoryDirIterator : public clang::vfs::detail::DirIterImpl {
+ detail::InMemoryDirectory::const_iterator I;
+ detail::InMemoryDirectory::const_iterator E;
+
+public:
+ InMemoryDirIterator() {}
+ explicit InMemoryDirIterator(detail::InMemoryDirectory &Dir)
+ : I(Dir.begin()), E(Dir.end()) {
+ if (I != E)
+ CurrentEntry = I->second->getStatus();
+ }
+
+ std::error_code increment() override {
+ ++I;
+ // When we're at the end, make CurrentEntry invalid and DirIterImpl will do
+ // the rest.
+ CurrentEntry = I != E ? I->second->getStatus() : Status();
+ return std::error_code();
+ }
+};
+} // end anonymous namespace
+
+directory_iterator InMemoryFileSystem::dir_begin(const Twine &Dir,
+ std::error_code &EC) {
+ auto Node = lookupInMemoryNode(*this, Root.get(), Dir);
+ if (!Node) {
+ EC = Node.getError();
+ return directory_iterator(std::make_shared<InMemoryDirIterator>());
+ }
+
+ if (auto *DirNode = dyn_cast<detail::InMemoryDirectory>(*Node))
+ return directory_iterator(std::make_shared<InMemoryDirIterator>(*DirNode));
+
+ EC = make_error_code(llvm::errc::not_a_directory);
+ return directory_iterator(std::make_shared<InMemoryDirIterator>());
+}
+}
+}
+
+//===-----------------------------------------------------------------------===/
+// RedirectingFileSystem implementation
+//===-----------------------------------------------------------------------===/
+
+namespace {
+
+enum EntryKind {
+ EK_Directory,
+ EK_File
+};
+
+/// \brief A single file or directory in the VFS.
+class Entry {
+ EntryKind Kind;
+ std::string Name;
+
+public:
+ virtual ~Entry();
+ Entry(EntryKind K, StringRef Name) : Kind(K), Name(Name) {}
+ StringRef getName() const { return Name; }
+ EntryKind getKind() const { return Kind; }
+};
+
+class RedirectingDirectoryEntry : public Entry {
+ std::vector<std::unique_ptr<Entry>> Contents;
+ Status S;
+
+public:
+ RedirectingDirectoryEntry(StringRef Name,
+ std::vector<std::unique_ptr<Entry>> Contents,
+ Status S)
+ : Entry(EK_Directory, Name), Contents(std::move(Contents)),
+ S(std::move(S)) {}
+ Status getStatus() { return S; }
+ typedef decltype(Contents)::iterator iterator;
+ iterator contents_begin() { return Contents.begin(); }
+ iterator contents_end() { return Contents.end(); }
+ static bool classof(const Entry *E) { return E->getKind() == EK_Directory; }
+};
+
+class RedirectingFileEntry : public Entry {
+public:
+ enum NameKind {
+ NK_NotSet,
+ NK_External,
+ NK_Virtual
+ };
+private:
+ std::string ExternalContentsPath;
+ NameKind UseName;
+public:
+ RedirectingFileEntry(StringRef Name, StringRef ExternalContentsPath,
+ NameKind UseName)
+ : Entry(EK_File, Name), ExternalContentsPath(ExternalContentsPath),
+ UseName(UseName) {}
+ StringRef getExternalContentsPath() const { return ExternalContentsPath; }
+ /// \brief whether to use the external path as the name for this file.
+ bool useExternalName(bool GlobalUseExternalName) const {
+ return UseName == NK_NotSet ? GlobalUseExternalName
+ : (UseName == NK_External);
+ }
+ static bool classof(const Entry *E) { return E->getKind() == EK_File; }
+};
+
+class RedirectingFileSystem;
+
+class VFSFromYamlDirIterImpl : public clang::vfs::detail::DirIterImpl {
+ std::string Dir;
+ RedirectingFileSystem &FS;
+ RedirectingDirectoryEntry::iterator Current, End;
+
+public:
+ VFSFromYamlDirIterImpl(const Twine &Path, RedirectingFileSystem &FS,
+ RedirectingDirectoryEntry::iterator Begin,
+ RedirectingDirectoryEntry::iterator End,
+ std::error_code &EC);
+ std::error_code increment() override;
+};
+
+/// \brief A virtual file system parsed from a YAML file.
+///
+/// Currently, this class allows creating virtual directories and mapping
+/// virtual file paths to existing external files, available in \c ExternalFS.
+///
+/// The basic structure of the parsed file is:
+/// \verbatim
+/// {
+/// 'version': <version number>,
+/// <optional configuration>
+/// 'roots': [
+/// <directory entries>
+/// ]
+/// }
+/// \endverbatim
+///
+/// All configuration options are optional.
+/// 'case-sensitive': <boolean, default=true>
+/// 'use-external-names': <boolean, default=true>
+///
+/// Virtual directories are represented as
+/// \verbatim
+/// {
+/// 'type': 'directory',
+/// 'name': <string>,
+/// 'contents': [ <file or directory entries> ]
+/// }
+/// \endverbatim
+///
+/// The default attributes for virtual directories are:
+/// \verbatim
+/// MTime = now() when created
+/// Perms = 0777
+/// User = Group = 0
+/// Size = 0
+/// UniqueID = unspecified unique value
+/// \endverbatim
+///
+/// Re-mapped files are represented as
+/// \verbatim
+/// {
+/// 'type': 'file',
+/// 'name': <string>,
+/// 'use-external-name': <boolean> # Optional
+/// 'external-contents': <path to external file>)
+/// }
+/// \endverbatim
+///
+/// and inherit their attributes from the external contents.
+///
+/// In both cases, the 'name' field may contain multiple path components (e.g.
+/// /path/to/file). However, any directory that contains more than one child
+/// must be uniquely represented by a directory entry.
+class RedirectingFileSystem : public vfs::FileSystem {
+ /// The root(s) of the virtual file system.
+ std::vector<std::unique_ptr<Entry>> Roots;
+ /// \brief The file system to use for external references.
+ IntrusiveRefCntPtr<FileSystem> ExternalFS;
+
+ /// @name Configuration
+ /// @{
+
+ /// \brief Whether to perform case-sensitive comparisons.
+ ///
+ /// Currently, case-insensitive matching only works correctly with ASCII.
+ bool CaseSensitive;
+
+ /// \brief Whether to use to use the value of 'external-contents' for the
+ /// names of files. This global value is overridable on a per-file basis.
+ bool UseExternalNames;
+ /// @}
+
+ friend class RedirectingFileSystemParser;
+
+private:
+ RedirectingFileSystem(IntrusiveRefCntPtr<FileSystem> ExternalFS)
+ : ExternalFS(ExternalFS), CaseSensitive(true), UseExternalNames(true) {}
+
+ /// \brief Looks up \p Path in \c Roots.
+ ErrorOr<Entry *> lookupPath(const Twine &Path);
+
+ /// \brief Looks up the path <tt>[Start, End)</tt> in \p From, possibly
+ /// recursing into the contents of \p From if it is a directory.
+ ErrorOr<Entry *> lookupPath(sys::path::const_iterator Start,
+ sys::path::const_iterator End, Entry *From);
+
+ /// \brief Get the status of a given an \c Entry.
+ ErrorOr<Status> status(const Twine &Path, Entry *E);
+
+public:
+ /// \brief Parses \p Buffer, which is expected to be in YAML format and
+ /// returns a virtual file system representing its contents.
+ static RedirectingFileSystem *
+ create(std::unique_ptr<MemoryBuffer> Buffer,
+ SourceMgr::DiagHandlerTy DiagHandler, void *DiagContext,
+ IntrusiveRefCntPtr<FileSystem> ExternalFS);
+
+ ErrorOr<Status> status(const Twine &Path) override;
+ ErrorOr<std::unique_ptr<File>> openFileForRead(const Twine &Path) override;
+
+ llvm::ErrorOr<std::string> getCurrentWorkingDirectory() const override {
+ return ExternalFS->getCurrentWorkingDirectory();
+ }
+ std::error_code setCurrentWorkingDirectory(const Twine &Path) override {
+ return ExternalFS->setCurrentWorkingDirectory(Path);
+ }
+
+ directory_iterator dir_begin(const Twine &Dir, std::error_code &EC) override{
+ ErrorOr<Entry *> E = lookupPath(Dir);
+ if (!E) {
+ EC = E.getError();
+ return directory_iterator();
+ }
+ ErrorOr<Status> S = status(Dir, *E);
+ if (!S) {
+ EC = S.getError();
+ return directory_iterator();
+ }
+ if (!S->isDirectory()) {
+ EC = std::error_code(static_cast<int>(errc::not_a_directory),
+ std::system_category());
+ return directory_iterator();
+ }
+
+ auto *D = cast<RedirectingDirectoryEntry>(*E);
+ return directory_iterator(std::make_shared<VFSFromYamlDirIterImpl>(Dir,
+ *this, D->contents_begin(), D->contents_end(), EC));
+ }
+};
+
+/// \brief A helper class to hold the common YAML parsing state.
+class RedirectingFileSystemParser {
+ yaml::Stream &Stream;
+
+ void error(yaml::Node *N, const Twine &Msg) {
+ Stream.printError(N, Msg);
+ }
+
+ // false on error
+ bool parseScalarString(yaml::Node *N, StringRef &Result,
+ SmallVectorImpl<char> &Storage) {
+ yaml::ScalarNode *S = dyn_cast<yaml::ScalarNode>(N);
+ if (!S) {
+ error(N, "expected string");
+ return false;
+ }
+ Result = S->getValue(Storage);
+ return true;
+ }
+
+ // false on error
+ bool parseScalarBool(yaml::Node *N, bool &Result) {
+ SmallString<5> Storage;
+ StringRef Value;
+ if (!parseScalarString(N, Value, Storage))
+ return false;
+
+ if (Value.equals_lower("true") || Value.equals_lower("on") ||
+ Value.equals_lower("yes") || Value == "1") {
+ Result = true;
+ return true;
+ } else if (Value.equals_lower("false") || Value.equals_lower("off") ||
+ Value.equals_lower("no") || Value == "0") {
+ Result = false;
+ return true;
+ }
+
+ error(N, "expected boolean value");
+ return false;
+ }
+
+ struct KeyStatus {
+ KeyStatus(bool Required=false) : Required(Required), Seen(false) {}
+ bool Required;
+ bool Seen;
+ };
+ typedef std::pair<StringRef, KeyStatus> KeyStatusPair;
+
+ // false on error
+ bool checkDuplicateOrUnknownKey(yaml::Node *KeyNode, StringRef Key,
+ DenseMap<StringRef, KeyStatus> &Keys) {
+ if (!Keys.count(Key)) {
+ error(KeyNode, "unknown key");
+ return false;
+ }
+ KeyStatus &S = Keys[Key];
+ if (S.Seen) {
+ error(KeyNode, Twine("duplicate key '") + Key + "'");
+ return false;
+ }
+ S.Seen = true;
+ return true;
+ }
+
+ // false on error
+ bool checkMissingKeys(yaml::Node *Obj, DenseMap<StringRef, KeyStatus> &Keys) {
+ for (DenseMap<StringRef, KeyStatus>::iterator I = Keys.begin(),
+ E = Keys.end();
+ I != E; ++I) {
+ if (I->second.Required && !I->second.Seen) {
+ error(Obj, Twine("missing key '") + I->first + "'");
+ return false;
+ }
+ }
+ return true;
+ }
+
+ std::unique_ptr<Entry> parseEntry(yaml::Node *N) {
+ yaml::MappingNode *M = dyn_cast<yaml::MappingNode>(N);
+ if (!M) {
+ error(N, "expected mapping node for file or directory entry");
+ return nullptr;
+ }
+
+ KeyStatusPair Fields[] = {
+ KeyStatusPair("name", true),
+ KeyStatusPair("type", true),
+ KeyStatusPair("contents", false),
+ KeyStatusPair("external-contents", false),
+ KeyStatusPair("use-external-name", false),
+ };
+
+ DenseMap<StringRef, KeyStatus> Keys(std::begin(Fields), std::end(Fields));
+
+ bool HasContents = false; // external or otherwise
+ std::vector<std::unique_ptr<Entry>> EntryArrayContents;
+ std::string ExternalContentsPath;
+ std::string Name;
+ auto UseExternalName = RedirectingFileEntry::NK_NotSet;
+ EntryKind Kind;
+
+ for (yaml::MappingNode::iterator I = M->begin(), E = M->end(); I != E;
+ ++I) {
+ StringRef Key;
+ // Reuse the buffer for key and value, since we don't look at key after
+ // parsing value.
+ SmallString<256> Buffer;
+ if (!parseScalarString(I->getKey(), Key, Buffer))
+ return nullptr;
+
+ if (!checkDuplicateOrUnknownKey(I->getKey(), Key, Keys))
+ return nullptr;
+
+ StringRef Value;
+ if (Key == "name") {
+ if (!parseScalarString(I->getValue(), Value, Buffer))
+ return nullptr;
+ Name = Value;
+ } else if (Key == "type") {
+ if (!parseScalarString(I->getValue(), Value, Buffer))
+ return nullptr;
+ if (Value == "file")
+ Kind = EK_File;
+ else if (Value == "directory")
+ Kind = EK_Directory;
+ else {
+ error(I->getValue(), "unknown value for 'type'");
+ return nullptr;
+ }
+ } else if (Key == "contents") {
+ if (HasContents) {
+ error(I->getKey(),
+ "entry already has 'contents' or 'external-contents'");
+ return nullptr;
+ }
+ HasContents = true;
+ yaml::SequenceNode *Contents =
+ dyn_cast<yaml::SequenceNode>(I->getValue());
+ if (!Contents) {
+ // FIXME: this is only for directories, what about files?
+ error(I->getValue(), "expected array");
+ return nullptr;
+ }
+
+ for (yaml::SequenceNode::iterator I = Contents->begin(),
+ E = Contents->end();
+ I != E; ++I) {
+ if (std::unique_ptr<Entry> E = parseEntry(&*I))
+ EntryArrayContents.push_back(std::move(E));
+ else
+ return nullptr;
+ }
+ } else if (Key == "external-contents") {
+ if (HasContents) {
+ error(I->getKey(),
+ "entry already has 'contents' or 'external-contents'");
+ return nullptr;
+ }
+ HasContents = true;
+ if (!parseScalarString(I->getValue(), Value, Buffer))
+ return nullptr;
+ ExternalContentsPath = Value;
+ } else if (Key == "use-external-name") {
+ bool Val;
+ if (!parseScalarBool(I->getValue(), Val))
+ return nullptr;
+ UseExternalName = Val ? RedirectingFileEntry::NK_External
+ : RedirectingFileEntry::NK_Virtual;
+ } else {
+ llvm_unreachable("key missing from Keys");
+ }
+ }
+
+ if (Stream.failed())
+ return nullptr;
+
+ // check for missing keys
+ if (!HasContents) {
+ error(N, "missing key 'contents' or 'external-contents'");
+ return nullptr;
+ }
+ if (!checkMissingKeys(N, Keys))
+ return nullptr;
+
+ // check invalid configuration
+ if (Kind == EK_Directory &&
+ UseExternalName != RedirectingFileEntry::NK_NotSet) {
+ error(N, "'use-external-name' is not supported for directories");
+ return nullptr;
+ }
+
+ // Remove trailing slash(es), being careful not to remove the root path
+ StringRef Trimmed(Name);
+ size_t RootPathLen = sys::path::root_path(Trimmed).size();
+ while (Trimmed.size() > RootPathLen &&
+ sys::path::is_separator(Trimmed.back()))
+ Trimmed = Trimmed.slice(0, Trimmed.size()-1);
+ // Get the last component
+ StringRef LastComponent = sys::path::filename(Trimmed);
+
+ std::unique_ptr<Entry> Result;
+ switch (Kind) {
+ case EK_File:
+ Result = llvm::make_unique<RedirectingFileEntry>(
+ LastComponent, std::move(ExternalContentsPath), UseExternalName);
+ break;
+ case EK_Directory:
+ Result = llvm::make_unique<RedirectingDirectoryEntry>(
+ LastComponent, std::move(EntryArrayContents),
+ Status("", getNextVirtualUniqueID(), sys::TimeValue::now(), 0, 0, 0,
+ file_type::directory_file, sys::fs::all_all));
+ break;
+ }
+
+ StringRef Parent = sys::path::parent_path(Trimmed);
+ if (Parent.empty())
+ return Result;
+
+ // if 'name' contains multiple components, create implicit directory entries
+ for (sys::path::reverse_iterator I = sys::path::rbegin(Parent),
+ E = sys::path::rend(Parent);
+ I != E; ++I) {
+ std::vector<std::unique_ptr<Entry>> Entries;
+ Entries.push_back(std::move(Result));
+ Result = llvm::make_unique<RedirectingDirectoryEntry>(
+ *I, std::move(Entries),
+ Status("", getNextVirtualUniqueID(), sys::TimeValue::now(), 0, 0, 0,
+ file_type::directory_file, sys::fs::all_all));
+ }
+ return Result;
+ }
+
+public:
+ RedirectingFileSystemParser(yaml::Stream &S) : Stream(S) {}
+
+ // false on error
+ bool parse(yaml::Node *Root, RedirectingFileSystem *FS) {
+ yaml::MappingNode *Top = dyn_cast<yaml::MappingNode>(Root);
+ if (!Top) {
+ error(Root, "expected mapping node");
+ return false;
+ }
+
+ KeyStatusPair Fields[] = {
+ KeyStatusPair("version", true),
+ KeyStatusPair("case-sensitive", false),
+ KeyStatusPair("use-external-names", false),
+ KeyStatusPair("roots", true),
+ };
+
+ DenseMap<StringRef, KeyStatus> Keys(std::begin(Fields), std::end(Fields));
+
+ // Parse configuration and 'roots'
+ for (yaml::MappingNode::iterator I = Top->begin(), E = Top->end(); I != E;
+ ++I) {
+ SmallString<10> KeyBuffer;
+ StringRef Key;
+ if (!parseScalarString(I->getKey(), Key, KeyBuffer))
+ return false;
+
+ if (!checkDuplicateOrUnknownKey(I->getKey(), Key, Keys))
+ return false;
+
+ if (Key == "roots") {
+ yaml::SequenceNode *Roots = dyn_cast<yaml::SequenceNode>(I->getValue());
+ if (!Roots) {
+ error(I->getValue(), "expected array");
+ return false;
+ }
+
+ for (yaml::SequenceNode::iterator I = Roots->begin(), E = Roots->end();
+ I != E; ++I) {
+ if (std::unique_ptr<Entry> E = parseEntry(&*I))
+ FS->Roots.push_back(std::move(E));
+ else
+ return false;
+ }
+ } else if (Key == "version") {
+ StringRef VersionString;
+ SmallString<4> Storage;
+ if (!parseScalarString(I->getValue(), VersionString, Storage))
+ return false;
+ int Version;
+ if (VersionString.getAsInteger<int>(10, Version)) {
+ error(I->getValue(), "expected integer");
+ return false;
+ }
+ if (Version < 0) {
+ error(I->getValue(), "invalid version number");
+ return false;
+ }
+ if (Version != 0) {
+ error(I->getValue(), "version mismatch, expected 0");
+ return false;
+ }
+ } else if (Key == "case-sensitive") {
+ if (!parseScalarBool(I->getValue(), FS->CaseSensitive))
+ return false;
+ } else if (Key == "use-external-names") {
+ if (!parseScalarBool(I->getValue(), FS->UseExternalNames))
+ return false;
+ } else {
+ llvm_unreachable("key missing from Keys");
+ }
+ }
+
+ if (Stream.failed())
+ return false;
+
+ if (!checkMissingKeys(Top, Keys))
+ return false;
+ return true;
+ }
+};
+} // end of anonymous namespace
+
+Entry::~Entry() = default;
+
+RedirectingFileSystem *RedirectingFileSystem::create(
+ std::unique_ptr<MemoryBuffer> Buffer, SourceMgr::DiagHandlerTy DiagHandler,
+ void *DiagContext, IntrusiveRefCntPtr<FileSystem> ExternalFS) {
+
+ SourceMgr SM;
+ yaml::Stream Stream(Buffer->getMemBufferRef(), SM);
+
+ SM.setDiagHandler(DiagHandler, DiagContext);
+ yaml::document_iterator DI = Stream.begin();
+ yaml::Node *Root = DI->getRoot();
+ if (DI == Stream.end() || !Root) {
+ SM.PrintMessage(SMLoc(), SourceMgr::DK_Error, "expected root node");
+ return nullptr;
+ }
+
+ RedirectingFileSystemParser P(Stream);
+
+ std::unique_ptr<RedirectingFileSystem> FS(
+ new RedirectingFileSystem(ExternalFS));
+ if (!P.parse(Root, FS.get()))
+ return nullptr;
+
+ return FS.release();
+}
+
+ErrorOr<Entry *> RedirectingFileSystem::lookupPath(const Twine &Path_) {
+ SmallString<256> Path;
+ Path_.toVector(Path);
+
+ // Handle relative paths
+ if (std::error_code EC = makeAbsolute(Path))
+ return EC;
+
+ if (Path.empty())
+ return make_error_code(llvm::errc::invalid_argument);
+
+ sys::path::const_iterator Start = sys::path::begin(Path);
+ sys::path::const_iterator End = sys::path::end(Path);
+ for (const std::unique_ptr<Entry> &Root : Roots) {
+ ErrorOr<Entry *> Result = lookupPath(Start, End, Root.get());
+ if (Result || Result.getError() != llvm::errc::no_such_file_or_directory)
+ return Result;
+ }
+ return make_error_code(llvm::errc::no_such_file_or_directory);
+}
+
+ErrorOr<Entry *>
+RedirectingFileSystem::lookupPath(sys::path::const_iterator Start,
+ sys::path::const_iterator End, Entry *From) {
+ if (Start->equals("."))
+ ++Start;
+
+ // FIXME: handle ..
+ if (CaseSensitive ? !Start->equals(From->getName())
+ : !Start->equals_lower(From->getName()))
+ // failure to match
+ return make_error_code(llvm::errc::no_such_file_or_directory);
+
+ ++Start;
+
+ if (Start == End) {
+ // Match!
+ return From;
+ }
+
+ auto *DE = dyn_cast<RedirectingDirectoryEntry>(From);
+ if (!DE)
+ return make_error_code(llvm::errc::not_a_directory);
+
+ for (const std::unique_ptr<Entry> &DirEntry :
+ llvm::make_range(DE->contents_begin(), DE->contents_end())) {
+ ErrorOr<Entry *> Result = lookupPath(Start, End, DirEntry.get());
+ if (Result || Result.getError() != llvm::errc::no_such_file_or_directory)
+ return Result;
+ }
+ return make_error_code(llvm::errc::no_such_file_or_directory);
+}
+
+static Status getRedirectedFileStatus(const Twine &Path, bool UseExternalNames,
+ Status ExternalStatus) {
+ Status S = ExternalStatus;
+ if (!UseExternalNames)
+ S = Status::copyWithNewName(S, Path.str());
+ S.IsVFSMapped = true;
+ return S;
+}
+
+ErrorOr<Status> RedirectingFileSystem::status(const Twine &Path, Entry *E) {
+ assert(E != nullptr);
+ if (auto *F = dyn_cast<RedirectingFileEntry>(E)) {
+ ErrorOr<Status> S = ExternalFS->status(F->getExternalContentsPath());
+ assert(!S || S->getName() == F->getExternalContentsPath());
+ if (S)
+ return getRedirectedFileStatus(Path, F->useExternalName(UseExternalNames),
+ *S);
+ return S;
+ } else { // directory
+ auto *DE = cast<RedirectingDirectoryEntry>(E);
+ return Status::copyWithNewName(DE->getStatus(), Path.str());
+ }
+}
+
+ErrorOr<Status> RedirectingFileSystem::status(const Twine &Path) {
+ ErrorOr<Entry *> Result = lookupPath(Path);
+ if (!Result)
+ return Result.getError();
+ return status(Path, *Result);
+}
+
+namespace {
+/// Provide a file wrapper with an overriden status.
+class FileWithFixedStatus : public File {
+ std::unique_ptr<File> InnerFile;
+ Status S;
+
+public:
+ FileWithFixedStatus(std::unique_ptr<File> InnerFile, Status S)
+ : InnerFile(std::move(InnerFile)), S(S) {}
+
+ ErrorOr<Status> status() override { return S; }
+ ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
+ getBuffer(const Twine &Name, int64_t FileSize, bool RequiresNullTerminator,
+ bool IsVolatile) override {
+ return InnerFile->getBuffer(Name, FileSize, RequiresNullTerminator,
+ IsVolatile);
+ }
+ std::error_code close() override { return InnerFile->close(); }
+};
+} // end anonymous namespace
+
+ErrorOr<std::unique_ptr<File>>
+RedirectingFileSystem::openFileForRead(const Twine &Path) {
+ ErrorOr<Entry *> E = lookupPath(Path);
+ if (!E)
+ return E.getError();
+
+ auto *F = dyn_cast<RedirectingFileEntry>(*E);
+ if (!F) // FIXME: errc::not_a_file?
+ return make_error_code(llvm::errc::invalid_argument);
+
+ auto Result = ExternalFS->openFileForRead(F->getExternalContentsPath());
+ if (!Result)
+ return Result;
+
+ auto ExternalStatus = (*Result)->status();
+ if (!ExternalStatus)
+ return ExternalStatus.getError();
+
+ // FIXME: Update the status with the name and VFSMapped.
+ Status S = getRedirectedFileStatus(Path, F->useExternalName(UseExternalNames),
+ *ExternalStatus);
+ return std::unique_ptr<File>(
+ llvm::make_unique<FileWithFixedStatus>(std::move(*Result), S));
+}
+
+IntrusiveRefCntPtr<FileSystem>
+vfs::getVFSFromYAML(std::unique_ptr<MemoryBuffer> Buffer,
+ SourceMgr::DiagHandlerTy DiagHandler, void *DiagContext,
+ IntrusiveRefCntPtr<FileSystem> ExternalFS) {
+ return RedirectingFileSystem::create(std::move(Buffer), DiagHandler,
+ DiagContext, ExternalFS);
+}
+
+UniqueID vfs::getNextVirtualUniqueID() {
+ static std::atomic<unsigned> UID;
+ unsigned ID = ++UID;
+ // The following assumes that uint64_t max will never collide with a real
+ // dev_t value from the OS.
+ return UniqueID(std::numeric_limits<uint64_t>::max(), ID);
+}
+
+#ifndef NDEBUG
+static bool pathHasTraversal(StringRef Path) {
+ using namespace llvm::sys;
+ for (StringRef Comp : llvm::make_range(path::begin(Path), path::end(Path)))
+ if (Comp == "." || Comp == "..")
+ return true;
+ return false;
+}
+#endif
+
+void YAMLVFSWriter::addFileMapping(StringRef VirtualPath, StringRef RealPath) {
+ assert(sys::path::is_absolute(VirtualPath) && "virtual path not absolute");
+ assert(sys::path::is_absolute(RealPath) && "real path not absolute");
+ assert(!pathHasTraversal(VirtualPath) && "path traversal is not supported");
+ Mappings.emplace_back(VirtualPath, RealPath);
+}
+
+namespace {
+class JSONWriter {
+ llvm::raw_ostream &OS;
+ SmallVector<StringRef, 16> DirStack;
+ inline unsigned getDirIndent() { return 4 * DirStack.size(); }
+ inline unsigned getFileIndent() { return 4 * (DirStack.size() + 1); }
+ bool containedIn(StringRef Parent, StringRef Path);
+ StringRef containedPart(StringRef Parent, StringRef Path);
+ void startDirectory(StringRef Path);
+ void endDirectory();
+ void writeEntry(StringRef VPath, StringRef RPath);
+
+public:
+ JSONWriter(llvm::raw_ostream &OS) : OS(OS) {}
+ void write(ArrayRef<YAMLVFSEntry> Entries, Optional<bool> IsCaseSensitive);
+};
+}
+
+bool JSONWriter::containedIn(StringRef Parent, StringRef Path) {
+ using namespace llvm::sys;
+ // Compare each path component.
+ auto IParent = path::begin(Parent), EParent = path::end(Parent);
+ for (auto IChild = path::begin(Path), EChild = path::end(Path);
+ IParent != EParent && IChild != EChild; ++IParent, ++IChild) {
+ if (*IParent != *IChild)
+ return false;
+ }
+ // Have we exhausted the parent path?
+ return IParent == EParent;
+}
+
+StringRef JSONWriter::containedPart(StringRef Parent, StringRef Path) {
+ assert(!Parent.empty());
+ assert(containedIn(Parent, Path));
+ return Path.slice(Parent.size() + 1, StringRef::npos);
+}
+
+void JSONWriter::startDirectory(StringRef Path) {
+ StringRef Name =
+ DirStack.empty() ? Path : containedPart(DirStack.back(), Path);
+ DirStack.push_back(Path);
+ unsigned Indent = getDirIndent();
+ OS.indent(Indent) << "{\n";
+ OS.indent(Indent + 2) << "'type': 'directory',\n";
+ OS.indent(Indent + 2) << "'name': \"" << llvm::yaml::escape(Name) << "\",\n";
+ OS.indent(Indent + 2) << "'contents': [\n";
+}
+
+void JSONWriter::endDirectory() {
+ unsigned Indent = getDirIndent();
+ OS.indent(Indent + 2) << "]\n";
+ OS.indent(Indent) << "}";
+
+ DirStack.pop_back();
+}
+
+void JSONWriter::writeEntry(StringRef VPath, StringRef RPath) {
+ unsigned Indent = getFileIndent();
+ OS.indent(Indent) << "{\n";
+ OS.indent(Indent + 2) << "'type': 'file',\n";
+ OS.indent(Indent + 2) << "'name': \"" << llvm::yaml::escape(VPath) << "\",\n";
+ OS.indent(Indent + 2) << "'external-contents': \""
+ << llvm::yaml::escape(RPath) << "\"\n";
+ OS.indent(Indent) << "}";
+}
+
+void JSONWriter::write(ArrayRef<YAMLVFSEntry> Entries,
+ Optional<bool> IsCaseSensitive) {
+ using namespace llvm::sys;
+
+ OS << "{\n"
+ " 'version': 0,\n";
+ if (IsCaseSensitive.hasValue())
+ OS << " 'case-sensitive': '"
+ << (IsCaseSensitive.getValue() ? "true" : "false") << "',\n";
+ OS << " 'roots': [\n";
+
+ if (!Entries.empty()) {
+ const YAMLVFSEntry &Entry = Entries.front();
+ startDirectory(path::parent_path(Entry.VPath));
+ writeEntry(path::filename(Entry.VPath), Entry.RPath);
+
+ for (const auto &Entry : Entries.slice(1)) {
+ StringRef Dir = path::parent_path(Entry.VPath);
+ if (Dir == DirStack.back())
+ OS << ",\n";
+ else {
+ while (!DirStack.empty() && !containedIn(DirStack.back(), Dir)) {
+ OS << "\n";
+ endDirectory();
+ }
+ OS << ",\n";
+ startDirectory(Dir);
+ }
+ writeEntry(path::filename(Entry.VPath), Entry.RPath);
+ }
+
+ while (!DirStack.empty()) {
+ OS << "\n";
+ endDirectory();
+ }
+ OS << "\n";
+ }
+
+ OS << " ]\n"
+ << "}\n";
+}
+
+void YAMLVFSWriter::write(llvm::raw_ostream &OS) {
+ std::sort(Mappings.begin(), Mappings.end(),
+ [](const YAMLVFSEntry &LHS, const YAMLVFSEntry &RHS) {
+ return LHS.VPath < RHS.VPath;
+ });
+
+ JSONWriter(OS).write(Mappings, IsCaseSensitive);
+}
+
+VFSFromYamlDirIterImpl::VFSFromYamlDirIterImpl(
+ const Twine &_Path, RedirectingFileSystem &FS,
+ RedirectingDirectoryEntry::iterator Begin,
+ RedirectingDirectoryEntry::iterator End, std::error_code &EC)
+ : Dir(_Path.str()), FS(FS), Current(Begin), End(End) {
+ if (Current != End) {
+ SmallString<128> PathStr(Dir);
+ llvm::sys::path::append(PathStr, (*Current)->getName());
+ llvm::ErrorOr<vfs::Status> S = FS.status(PathStr);
+ if (S)
+ CurrentEntry = *S;
+ else
+ EC = S.getError();
+ }
+}
+
+std::error_code VFSFromYamlDirIterImpl::increment() {
+ assert(Current != End && "cannot iterate past end");
+ if (++Current != End) {
+ SmallString<128> PathStr(Dir);
+ llvm::sys::path::append(PathStr, (*Current)->getName());
+ llvm::ErrorOr<vfs::Status> S = FS.status(PathStr);
+ if (!S)
+ return S.getError();
+ CurrentEntry = *S;
+ } else {
+ CurrentEntry = Status();
+ }
+ return std::error_code();
+}
+
+vfs::recursive_directory_iterator::recursive_directory_iterator(FileSystem &FS_,
+ const Twine &Path,
+ std::error_code &EC)
+ : FS(&FS_) {
+ directory_iterator I = FS->dir_begin(Path, EC);
+ if (!EC && I != directory_iterator()) {
+ State = std::make_shared<IterState>();
+ State->push(I);
+ }
+}
+
+vfs::recursive_directory_iterator &
+recursive_directory_iterator::increment(std::error_code &EC) {
+ assert(FS && State && !State->empty() && "incrementing past end");
+ assert(State->top()->isStatusKnown() && "non-canonical end iterator");
+ vfs::directory_iterator End;
+ if (State->top()->isDirectory()) {
+ vfs::directory_iterator I = FS->dir_begin(State->top()->getName(), EC);
+ if (EC)
+ return *this;
+ if (I != End) {
+ State->push(I);
+ return *this;
+ }
+ }
+
+ while (!State->empty() && State->top().increment(EC) == End)
+ State->pop();
+
+ if (State->empty())
+ State.reset(); // end iterator
+
+ return *this;
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/Warnings.cpp b/contrib/llvm/tools/clang/lib/Basic/Warnings.cpp
new file mode 100644
index 0000000..6306cea
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/Warnings.cpp
@@ -0,0 +1,230 @@
+//===--- Warnings.cpp - C-Language Front-end ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Command line warning options handler.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is responsible for handling all warning options. This includes
+// a number of -Wfoo options and their variants, which are driven by TableGen-
+// generated data, and the special cases -pedantic, -pedantic-errors, -w,
+// -Werror and -Wfatal-errors.
+//
+// Each warning option controls any number of actual warnings.
+// Given a warning option 'foo', the following are valid:
+// -Wfoo, -Wno-foo, -Werror=foo, -Wfatal-errors=foo
+//
+// Remark options are also handled here, analogously, except that they are much
+// simpler because a remark can't be promoted to an error.
+#include "clang/Basic/AllDiagnostics.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/DiagnosticOptions.h"
+#include <algorithm>
+#include <cstring>
+#include <utility>
+using namespace clang;
+
+// EmitUnknownDiagWarning - Emit a warning and typo hint for unknown warning
+// opts
+static void EmitUnknownDiagWarning(DiagnosticsEngine &Diags,
+ diag::Flavor Flavor, StringRef Prefix,
+ StringRef Opt) {
+ StringRef Suggestion = DiagnosticIDs::getNearestOption(Flavor, Opt);
+ Diags.Report(diag::warn_unknown_diag_option)
+ << (Flavor == diag::Flavor::WarningOrError ? 0 : 1) << (Prefix.str() += Opt)
+ << !Suggestion.empty() << (Prefix.str() += Suggestion);
+}
+
+void clang::ProcessWarningOptions(DiagnosticsEngine &Diags,
+ const DiagnosticOptions &Opts,
+ bool ReportDiags) {
+ Diags.setSuppressSystemWarnings(true); // Default to -Wno-system-headers
+ Diags.setIgnoreAllWarnings(Opts.IgnoreWarnings);
+ Diags.setShowOverloads(Opts.getShowOverloads());
+
+ Diags.setElideType(Opts.ElideType);
+ Diags.setPrintTemplateTree(Opts.ShowTemplateTree);
+ Diags.setShowColors(Opts.ShowColors);
+
+ // Handle -ferror-limit
+ if (Opts.ErrorLimit)
+ Diags.setErrorLimit(Opts.ErrorLimit);
+ if (Opts.TemplateBacktraceLimit)
+ Diags.setTemplateBacktraceLimit(Opts.TemplateBacktraceLimit);
+ if (Opts.ConstexprBacktraceLimit)
+ Diags.setConstexprBacktraceLimit(Opts.ConstexprBacktraceLimit);
+
+ // If -pedantic or -pedantic-errors was specified, then we want to map all
+ // extension diagnostics onto WARNING or ERROR unless the user has futz'd
+ // around with them explicitly.
+ if (Opts.PedanticErrors)
+ Diags.setExtensionHandlingBehavior(diag::Severity::Error);
+ else if (Opts.Pedantic)
+ Diags.setExtensionHandlingBehavior(diag::Severity::Warning);
+ else
+ Diags.setExtensionHandlingBehavior(diag::Severity::Ignored);
+
+ SmallVector<diag::kind, 10> _Diags;
+ const IntrusiveRefCntPtr< DiagnosticIDs > DiagIDs =
+ Diags.getDiagnosticIDs();
+ // We parse the warning options twice. The first pass sets diagnostic state,
+ // while the second pass reports warnings/errors. This has the effect that
+ // we follow the more canonical "last option wins" paradigm when there are
+ // conflicting options.
+ for (unsigned Report = 0, ReportEnd = 2; Report != ReportEnd; ++Report) {
+ bool SetDiagnostic = (Report == 0);
+
+ // If we've set the diagnostic state and are not reporting diagnostics then
+ // we're done.
+ if (!SetDiagnostic && !ReportDiags)
+ break;
+
+ for (unsigned i = 0, e = Opts.Warnings.size(); i != e; ++i) {
+ const auto Flavor = diag::Flavor::WarningOrError;
+ StringRef Opt = Opts.Warnings[i];
+ StringRef OrigOpt = Opts.Warnings[i];
+
+ // Treat -Wformat=0 as an alias for -Wno-format.
+ if (Opt == "format=0")
+ Opt = "no-format";
+
+ // Check to see if this warning starts with "no-", if so, this is a
+ // negative form of the option.
+ bool isPositive = true;
+ if (Opt.startswith("no-")) {
+ isPositive = false;
+ Opt = Opt.substr(3);
+ }
+
+ // Figure out how this option affects the warning. If -Wfoo, map the
+ // diagnostic to a warning, if -Wno-foo, map it to ignore.
+ diag::Severity Mapping =
+ isPositive ? diag::Severity::Warning : diag::Severity::Ignored;
+
+ // -Wsystem-headers is a special case, not driven by the option table. It
+ // cannot be controlled with -Werror.
+ if (Opt == "system-headers") {
+ if (SetDiagnostic)
+ Diags.setSuppressSystemWarnings(!isPositive);
+ continue;
+ }
+
+ // -Weverything is a special case as well. It implicitly enables all
+ // warnings, including ones not explicitly in a warning group.
+ if (Opt == "everything") {
+ if (SetDiagnostic) {
+ if (isPositive) {
+ Diags.setEnableAllWarnings(true);
+ } else {
+ Diags.setEnableAllWarnings(false);
+ Diags.setSeverityForAll(Flavor, diag::Severity::Ignored);
+ }
+ }
+ continue;
+ }
+
+ // -Werror/-Wno-error is a special case, not controlled by the option
+ // table. It also has the "specifier" form of -Werror=foo and -Werror-foo.
+ if (Opt.startswith("error")) {
+ StringRef Specifier;
+ if (Opt.size() > 5) { // Specifier must be present.
+ if ((Opt[5] != '=' && Opt[5] != '-') || Opt.size() == 6) {
+ if (Report)
+ Diags.Report(diag::warn_unknown_warning_specifier)
+ << "-Werror" << ("-W" + OrigOpt.str());
+ continue;
+ }
+ Specifier = Opt.substr(6);
+ }
+
+ if (Specifier.empty()) {
+ if (SetDiagnostic)
+ Diags.setWarningsAsErrors(isPositive);
+ continue;
+ }
+
+ if (SetDiagnostic) {
+ // Set the warning as error flag for this specifier.
+ Diags.setDiagnosticGroupWarningAsError(Specifier, isPositive);
+ } else if (DiagIDs->getDiagnosticsInGroup(Flavor, Specifier, _Diags)) {
+ EmitUnknownDiagWarning(Diags, Flavor, "-Werror=", Specifier);
+ }
+ continue;
+ }
+
+ // -Wfatal-errors is yet another special case.
+ if (Opt.startswith("fatal-errors")) {
+ StringRef Specifier;
+ if (Opt.size() != 12) {
+ if ((Opt[12] != '=' && Opt[12] != '-') || Opt.size() == 13) {
+ if (Report)
+ Diags.Report(diag::warn_unknown_warning_specifier)
+ << "-Wfatal-errors" << ("-W" + OrigOpt.str());
+ continue;
+ }
+ Specifier = Opt.substr(13);
+ }
+
+ if (Specifier.empty()) {
+ if (SetDiagnostic)
+ Diags.setErrorsAsFatal(isPositive);
+ continue;
+ }
+
+ if (SetDiagnostic) {
+ // Set the error as fatal flag for this specifier.
+ Diags.setDiagnosticGroupErrorAsFatal(Specifier, isPositive);
+ } else if (DiagIDs->getDiagnosticsInGroup(Flavor, Specifier, _Diags)) {
+ EmitUnknownDiagWarning(Diags, Flavor, "-Wfatal-errors=", Specifier);
+ }
+ continue;
+ }
+
+ if (Report) {
+ if (DiagIDs->getDiagnosticsInGroup(Flavor, Opt, _Diags))
+ EmitUnknownDiagWarning(Diags, Flavor, isPositive ? "-W" : "-Wno-",
+ Opt);
+ } else {
+ Diags.setSeverityForGroup(Flavor, Opt, Mapping);
+ }
+ }
+
+ for (unsigned i = 0, e = Opts.Remarks.size(); i != e; ++i) {
+ StringRef Opt = Opts.Remarks[i];
+ const auto Flavor = diag::Flavor::Remark;
+
+ // Check to see if this warning starts with "no-", if so, this is a
+ // negative form of the option.
+ bool IsPositive = !Opt.startswith("no-");
+ if (!IsPositive) Opt = Opt.substr(3);
+
+ auto Severity = IsPositive ? diag::Severity::Remark
+ : diag::Severity::Ignored;
+
+ // -Reverything sets the state of all remarks. Note that all remarks are
+ // in remark groups, so we don't need a separate 'all remarks enabled'
+ // flag.
+ if (Opt == "everything") {
+ if (SetDiagnostic)
+ Diags.setSeverityForAll(Flavor, Severity);
+ continue;
+ }
+
+ if (Report) {
+ if (DiagIDs->getDiagnosticsInGroup(Flavor, Opt, _Diags))
+ EmitUnknownDiagWarning(Diags, Flavor, IsPositive ? "-R" : "-Rno-",
+ Opt);
+ } else {
+ Diags.setSeverityForGroup(Flavor, Opt,
+ IsPositive ? diag::Severity::Remark
+ : diag::Severity::Ignored);
+ }
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h b/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h
new file mode 100644
index 0000000..a65f270
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h
@@ -0,0 +1,116 @@
+//===----- ABIInfo.h - ABI information access & encapsulation ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_ABIINFO_H
+#define LLVM_CLANG_LIB_CODEGEN_ABIINFO_H
+
+#include "clang/AST/Type.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/Type.h"
+
+namespace llvm {
+ class Value;
+ class LLVMContext;
+ class DataLayout;
+}
+
+namespace clang {
+ class ASTContext;
+ class TargetInfo;
+
+ namespace CodeGen {
+ class ABIArgInfo;
+ class Address;
+ class CGCXXABI;
+ class CGFunctionInfo;
+ class CodeGenFunction;
+ class CodeGenTypes;
+ }
+
+ // FIXME: All of this stuff should be part of the target interface
+ // somehow. It is currently here because it is not clear how to factor
+ // the targets to support this, since the Targets currently live in a
+ // layer below types n'stuff.
+
+
+ /// ABIInfo - Target specific hooks for defining how a type should be
+ /// passed or returned from functions.
+ class ABIInfo {
+ public:
+ CodeGen::CodeGenTypes &CGT;
+ protected:
+ llvm::CallingConv::ID RuntimeCC;
+ llvm::CallingConv::ID BuiltinCC;
+ public:
+ ABIInfo(CodeGen::CodeGenTypes &cgt)
+ : CGT(cgt),
+ RuntimeCC(llvm::CallingConv::C),
+ BuiltinCC(llvm::CallingConv::C) {}
+
+ virtual ~ABIInfo();
+
+ CodeGen::CGCXXABI &getCXXABI() const;
+ ASTContext &getContext() const;
+ llvm::LLVMContext &getVMContext() const;
+ const llvm::DataLayout &getDataLayout() const;
+ const TargetInfo &getTarget() const;
+
+ /// Return the calling convention to use for system runtime
+ /// functions.
+ llvm::CallingConv::ID getRuntimeCC() const {
+ return RuntimeCC;
+ }
+
+ /// Return the calling convention to use for compiler builtins
+ llvm::CallingConv::ID getBuiltinCC() const {
+ return BuiltinCC;
+ }
+
+ virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const = 0;
+
+ /// EmitVAArg - Emit the target dependent code to load a value of
+ /// \arg Ty from the va_list pointed to by \arg VAListAddr.
+
+ // FIXME: This is a gaping layering violation if we wanted to drop
+ // the ABI information any lower than CodeGen. Of course, for
+ // VAArg handling it has to be at this level; there is no way to
+ // abstract this out.
+ virtual CodeGen::Address EmitVAArg(CodeGen::CodeGenFunction &CGF,
+ CodeGen::Address VAListAddr,
+ QualType Ty) const = 0;
+
+ /// Emit the target dependent code to load a value of
+ /// \arg Ty from the \c __builtin_ms_va_list pointed to by \arg VAListAddr.
+ virtual CodeGen::Address EmitMSVAArg(CodeGen::CodeGenFunction &CGF,
+ CodeGen::Address VAListAddr,
+ QualType Ty) const;
+
+ virtual bool isHomogeneousAggregateBaseType(QualType Ty) const;
+
+ virtual bool isHomogeneousAggregateSmallEnough(const Type *Base,
+ uint64_t Members) const;
+
+ virtual bool shouldSignExtUnsignedType(QualType Ty) const;
+
+ bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
+ uint64_t &Members) const;
+
+ /// A convenience method to return an indirect ABIArgInfo with an
+ /// expected alignment equal to the ABI alignment of the given type.
+ CodeGen::ABIArgInfo
+ getNaturalAlignIndirect(QualType Ty, bool ByRef = true,
+ bool Realign = false,
+ llvm::Type *Padding = nullptr) const;
+
+ CodeGen::ABIArgInfo
+ getNaturalAlignIndirectInReg(QualType Ty, bool Realign = false) const;
+ };
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/Address.h b/contrib/llvm/tools/clang/lib/CodeGen/Address.h
new file mode 100644
index 0000000..9d145fa
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/Address.h
@@ -0,0 +1,126 @@
+//===-- Address.h - An aligned address -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class provides a simple wrapper for a pair of a pointer and an
+// alignment.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_ADDRESS_H
+#define LLVM_CLANG_LIB_CODEGEN_ADDRESS_H
+
+#include "llvm/IR/Constants.h"
+#include "clang/AST/CharUnits.h"
+
+namespace clang {
+namespace CodeGen {
+
+/// An aligned address.
+class Address {
+ llvm::Value *Pointer;
+ CharUnits Alignment;
+public:
+ Address(llvm::Value *pointer, CharUnits alignment)
+ : Pointer(pointer), Alignment(alignment) {
+ assert((!alignment.isZero() || pointer == nullptr) &&
+ "creating valid address with invalid alignment");
+ }
+
+ static Address invalid() { return Address(nullptr, CharUnits()); }
+ bool isValid() const { return Pointer != nullptr; }
+
+ llvm::Value *getPointer() const {
+ assert(isValid());
+ return Pointer;
+ }
+
+ /// Return the type of the pointer value.
+ llvm::PointerType *getType() const {
+ return llvm::cast<llvm::PointerType>(getPointer()->getType());
+ }
+
+ /// Return the type of the values stored in this address.
+ ///
+ /// When IR pointer types lose their element type, we should simply
+ /// store it in Address instead for the convenience of writing code.
+ llvm::Type *getElementType() const {
+ return getType()->getElementType();
+ }
+
+ /// Return the address space that this address resides in.
+ unsigned getAddressSpace() const {
+ return getType()->getAddressSpace();
+ }
+
+ /// Return the IR name of the pointer value.
+ llvm::StringRef getName() const {
+ return getPointer()->getName();
+ }
+
+ /// Return the alignment of this pointer.
+ CharUnits getAlignment() const {
+ assert(isValid());
+ return Alignment;
+ }
+};
+
+/// A specialization of Address that requires the address to be an
+/// LLVM Constant.
+class ConstantAddress : public Address {
+public:
+ ConstantAddress(llvm::Constant *pointer, CharUnits alignment)
+ : Address(pointer, alignment) {}
+
+ static ConstantAddress invalid() {
+ return ConstantAddress(nullptr, CharUnits());
+ }
+
+ llvm::Constant *getPointer() const {
+ return llvm::cast<llvm::Constant>(Address::getPointer());
+ }
+
+ ConstantAddress getBitCast(llvm::Type *ty) const {
+ return ConstantAddress(llvm::ConstantExpr::getBitCast(getPointer(), ty),
+ getAlignment());
+ }
+
+ ConstantAddress getElementBitCast(llvm::Type *ty) const {
+ return getBitCast(ty->getPointerTo(getAddressSpace()));
+ }
+
+ static bool isaImpl(Address addr) {
+ return llvm::isa<llvm::Constant>(addr.getPointer());
+ }
+ static ConstantAddress castImpl(Address addr) {
+ return ConstantAddress(llvm::cast<llvm::Constant>(addr.getPointer()),
+ addr.getAlignment());
+ }
+};
+
+}
+}
+
+namespace llvm {
+ // Present a minimal LLVM-like casting interface.
+ template <class U> inline U cast(clang::CodeGen::Address addr) {
+ return U::castImpl(addr);
+ }
+ template <class U> inline bool isa(clang::CodeGen::Address addr) {
+ return U::isaImpl(addr);
+ }
+}
+
+namespace clang {
+ // Make our custom isa and cast available in namespace clang, to mirror
+ // what we do for LLVM's versions in Basic/LLVM.h.
+ using llvm::isa;
+ using llvm::cast;
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp b/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp
new file mode 100644
index 0000000..7032d00
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp
@@ -0,0 +1,716 @@
+//===--- BackendUtil.cpp - LLVM Backend Utilities -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/CodeGen/BackendUtil.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/TargetOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/Utils.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Bitcode/BitcodeWriterPass.h"
+#include "llvm/CodeGen/RegAllocRegistry.h"
+#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/FunctionInfo.h"
+#include "llvm/IR/IRPrintingPasses.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Verifier.h"
+#include "llvm/MC/SubtargetFeature.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+#include "llvm/Transforms/IPO.h"
+#include "llvm/Transforms/IPO/PassManagerBuilder.h"
+#include "llvm/Transforms/Instrumentation.h"
+#include "llvm/Transforms/ObjCARC.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/SymbolRewriter.h"
+#include <memory>
+using namespace clang;
+using namespace llvm;
+
+namespace {
+
+class EmitAssemblyHelper {
+ DiagnosticsEngine &Diags;
+ const CodeGenOptions &CodeGenOpts;
+ const clang::TargetOptions &TargetOpts;
+ const LangOptions &LangOpts;
+ Module *TheModule;
+ std::unique_ptr<FunctionInfoIndex> FunctionIndex;
+
+ Timer CodeGenerationTime;
+
+ mutable legacy::PassManager *CodeGenPasses;
+ mutable legacy::PassManager *PerModulePasses;
+ mutable legacy::FunctionPassManager *PerFunctionPasses;
+
+private:
+ TargetIRAnalysis getTargetIRAnalysis() const {
+ if (TM)
+ return TM->getTargetIRAnalysis();
+
+ return TargetIRAnalysis();
+ }
+
+ legacy::PassManager *getCodeGenPasses() const {
+ if (!CodeGenPasses) {
+ CodeGenPasses = new legacy::PassManager();
+ CodeGenPasses->add(
+ createTargetTransformInfoWrapperPass(getTargetIRAnalysis()));
+ }
+ return CodeGenPasses;
+ }
+
+ legacy::PassManager *getPerModulePasses() const {
+ if (!PerModulePasses) {
+ PerModulePasses = new legacy::PassManager();
+ PerModulePasses->add(
+ createTargetTransformInfoWrapperPass(getTargetIRAnalysis()));
+ }
+ return PerModulePasses;
+ }
+
+ legacy::FunctionPassManager *getPerFunctionPasses() const {
+ if (!PerFunctionPasses) {
+ PerFunctionPasses = new legacy::FunctionPassManager(TheModule);
+ PerFunctionPasses->add(
+ createTargetTransformInfoWrapperPass(getTargetIRAnalysis()));
+ }
+ return PerFunctionPasses;
+ }
+
+ void CreatePasses();
+
+ /// Generates the TargetMachine.
+ /// Returns Null if it is unable to create the target machine.
+ /// Some of our clang tests specify triples which are not built
+ /// into clang. This is okay because these tests check the generated
+ /// IR, and they require DataLayout which depends on the triple.
+ /// In this case, we allow this method to fail and not report an error.
+ /// When MustCreateTM is used, we print an error if we are unable to load
+ /// the requested target.
+ TargetMachine *CreateTargetMachine(bool MustCreateTM);
+
+ /// Add passes necessary to emit assembly or LLVM IR.
+ ///
+ /// \return True on success.
+ bool AddEmitPasses(BackendAction Action, raw_pwrite_stream &OS);
+
+public:
+ EmitAssemblyHelper(DiagnosticsEngine &_Diags, const CodeGenOptions &CGOpts,
+ const clang::TargetOptions &TOpts,
+ const LangOptions &LOpts, Module *M,
+ std::unique_ptr<FunctionInfoIndex> Index)
+ : Diags(_Diags), CodeGenOpts(CGOpts), TargetOpts(TOpts), LangOpts(LOpts),
+ TheModule(M), FunctionIndex(std::move(Index)),
+ CodeGenerationTime("Code Generation Time"), CodeGenPasses(nullptr),
+ PerModulePasses(nullptr), PerFunctionPasses(nullptr) {}
+
+ ~EmitAssemblyHelper() {
+ delete CodeGenPasses;
+ delete PerModulePasses;
+ delete PerFunctionPasses;
+ if (CodeGenOpts.DisableFree)
+ BuryPointer(std::move(TM));
+ }
+
+ std::unique_ptr<TargetMachine> TM;
+
+ void EmitAssembly(BackendAction Action, raw_pwrite_stream *OS);
+};
+
+// We need this wrapper to access LangOpts and CGOpts from extension functions
+// that we add to the PassManagerBuilder.
+class PassManagerBuilderWrapper : public PassManagerBuilder {
+public:
+ PassManagerBuilderWrapper(const CodeGenOptions &CGOpts,
+ const LangOptions &LangOpts)
+ : PassManagerBuilder(), CGOpts(CGOpts), LangOpts(LangOpts) {}
+ const CodeGenOptions &getCGOpts() const { return CGOpts; }
+ const LangOptions &getLangOpts() const { return LangOpts; }
+private:
+ const CodeGenOptions &CGOpts;
+ const LangOptions &LangOpts;
+};
+
+}
+
+static void addObjCARCAPElimPass(const PassManagerBuilder &Builder, PassManagerBase &PM) {
+ if (Builder.OptLevel > 0)
+ PM.add(createObjCARCAPElimPass());
+}
+
+static void addObjCARCExpandPass(const PassManagerBuilder &Builder, PassManagerBase &PM) {
+ if (Builder.OptLevel > 0)
+ PM.add(createObjCARCExpandPass());
+}
+
+static void addObjCARCOptPass(const PassManagerBuilder &Builder, PassManagerBase &PM) {
+ if (Builder.OptLevel > 0)
+ PM.add(createObjCARCOptPass());
+}
+
+static void addAddDiscriminatorsPass(const PassManagerBuilder &Builder,
+ legacy::PassManagerBase &PM) {
+ PM.add(createAddDiscriminatorsPass());
+}
+
+static void addBoundsCheckingPass(const PassManagerBuilder &Builder,
+ legacy::PassManagerBase &PM) {
+ PM.add(createBoundsCheckingPass());
+}
+
+static void addSanitizerCoveragePass(const PassManagerBuilder &Builder,
+ legacy::PassManagerBase &PM) {
+ const PassManagerBuilderWrapper &BuilderWrapper =
+ static_cast<const PassManagerBuilderWrapper&>(Builder);
+ const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
+ SanitizerCoverageOptions Opts;
+ Opts.CoverageType =
+ static_cast<SanitizerCoverageOptions::Type>(CGOpts.SanitizeCoverageType);
+ Opts.IndirectCalls = CGOpts.SanitizeCoverageIndirectCalls;
+ Opts.TraceBB = CGOpts.SanitizeCoverageTraceBB;
+ Opts.TraceCmp = CGOpts.SanitizeCoverageTraceCmp;
+ Opts.Use8bitCounters = CGOpts.SanitizeCoverage8bitCounters;
+ PM.add(createSanitizerCoverageModulePass(Opts));
+}
+
+static void addAddressSanitizerPasses(const PassManagerBuilder &Builder,
+ legacy::PassManagerBase &PM) {
+ const PassManagerBuilderWrapper &BuilderWrapper =
+ static_cast<const PassManagerBuilderWrapper&>(Builder);
+ const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
+ bool Recover = CGOpts.SanitizeRecover.has(SanitizerKind::Address);
+ PM.add(createAddressSanitizerFunctionPass(/*CompileKernel*/false, Recover));
+ PM.add(createAddressSanitizerModulePass(/*CompileKernel*/false, Recover));
+}
+
+static void addKernelAddressSanitizerPasses(const PassManagerBuilder &Builder,
+ legacy::PassManagerBase &PM) {
+ PM.add(createAddressSanitizerFunctionPass(/*CompileKernel*/true,
+ /*Recover*/true));
+ PM.add(createAddressSanitizerModulePass(/*CompileKernel*/true,
+ /*Recover*/true));
+}
+
+static void addMemorySanitizerPass(const PassManagerBuilder &Builder,
+ legacy::PassManagerBase &PM) {
+ const PassManagerBuilderWrapper &BuilderWrapper =
+ static_cast<const PassManagerBuilderWrapper&>(Builder);
+ const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
+ PM.add(createMemorySanitizerPass(CGOpts.SanitizeMemoryTrackOrigins));
+
+ // MemorySanitizer inserts complex instrumentation that mostly follows
+ // the logic of the original code, but operates on "shadow" values.
+ // It can benefit from re-running some general purpose optimization passes.
+ if (Builder.OptLevel > 0) {
+ PM.add(createEarlyCSEPass());
+ PM.add(createReassociatePass());
+ PM.add(createLICMPass());
+ PM.add(createGVNPass());
+ PM.add(createInstructionCombiningPass());
+ PM.add(createDeadStoreEliminationPass());
+ }
+}
+
+static void addThreadSanitizerPass(const PassManagerBuilder &Builder,
+ legacy::PassManagerBase &PM) {
+ PM.add(createThreadSanitizerPass());
+}
+
+static void addDataFlowSanitizerPass(const PassManagerBuilder &Builder,
+ legacy::PassManagerBase &PM) {
+ const PassManagerBuilderWrapper &BuilderWrapper =
+ static_cast<const PassManagerBuilderWrapper&>(Builder);
+ const LangOptions &LangOpts = BuilderWrapper.getLangOpts();
+ PM.add(createDataFlowSanitizerPass(LangOpts.SanitizerBlacklistFiles));
+}
+
+static TargetLibraryInfoImpl *createTLII(llvm::Triple &TargetTriple,
+ const CodeGenOptions &CodeGenOpts) {
+ TargetLibraryInfoImpl *TLII = new TargetLibraryInfoImpl(TargetTriple);
+ if (!CodeGenOpts.SimplifyLibCalls)
+ TLII->disableAllFunctions();
+ else {
+ // Disable individual libc/libm calls in TargetLibraryInfo.
+ LibFunc::Func F;
+ for (auto &FuncName : CodeGenOpts.getNoBuiltinFuncs())
+ if (TLII->getLibFunc(FuncName, F))
+ TLII->setUnavailable(F);
+ }
+
+ switch (CodeGenOpts.getVecLib()) {
+ case CodeGenOptions::Accelerate:
+ TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::Accelerate);
+ break;
+ default:
+ break;
+ }
+ return TLII;
+}
+
+static void addSymbolRewriterPass(const CodeGenOptions &Opts,
+ legacy::PassManager *MPM) {
+ llvm::SymbolRewriter::RewriteDescriptorList DL;
+
+ llvm::SymbolRewriter::RewriteMapParser MapParser;
+ for (const auto &MapFile : Opts.RewriteMapFiles)
+ MapParser.parse(MapFile, &DL);
+
+ MPM->add(createRewriteSymbolsPass(DL));
+}
+
+void EmitAssemblyHelper::CreatePasses() {
+ if (CodeGenOpts.DisableLLVMPasses)
+ return;
+
+ unsigned OptLevel = CodeGenOpts.OptimizationLevel;
+ CodeGenOptions::InliningMethod Inlining = CodeGenOpts.getInlining();
+
+ // Handle disabling of LLVM optimization, where we want to preserve the
+ // internal module before any optimization.
+ if (CodeGenOpts.DisableLLVMOpts) {
+ OptLevel = 0;
+ Inlining = CodeGenOpts.NoInlining;
+ }
+
+ PassManagerBuilderWrapper PMBuilder(CodeGenOpts, LangOpts);
+
+ // Figure out TargetLibraryInfo.
+ Triple TargetTriple(TheModule->getTargetTriple());
+ PMBuilder.LibraryInfo = createTLII(TargetTriple, CodeGenOpts);
+
+ switch (Inlining) {
+ case CodeGenOptions::NoInlining:
+ break;
+ case CodeGenOptions::NormalInlining: {
+ PMBuilder.Inliner =
+ createFunctionInliningPass(OptLevel, CodeGenOpts.OptimizeSize);
+ break;
+ }
+ case CodeGenOptions::OnlyAlwaysInlining:
+ // Respect always_inline.
+ if (OptLevel == 0)
+ // Do not insert lifetime intrinsics at -O0.
+ PMBuilder.Inliner = createAlwaysInlinerPass(false);
+ else
+ PMBuilder.Inliner = createAlwaysInlinerPass();
+ break;
+ }
+
+ PMBuilder.OptLevel = OptLevel;
+ PMBuilder.SizeLevel = CodeGenOpts.OptimizeSize;
+ PMBuilder.BBVectorize = CodeGenOpts.VectorizeBB;
+ PMBuilder.SLPVectorize = CodeGenOpts.VectorizeSLP;
+ PMBuilder.LoopVectorize = CodeGenOpts.VectorizeLoop;
+
+ PMBuilder.DisableUnitAtATime = !CodeGenOpts.UnitAtATime;
+ PMBuilder.DisableUnrollLoops = !CodeGenOpts.UnrollLoops;
+ PMBuilder.MergeFunctions = CodeGenOpts.MergeFunctions;
+ PMBuilder.PrepareForLTO = CodeGenOpts.PrepareForLTO;
+ PMBuilder.RerollLoops = CodeGenOpts.RerollLoops;
+
+ legacy::PassManager *MPM = getPerModulePasses();
+
+ // If we are performing a ThinLTO importing compile, invoke the LTO
+ // pipeline and pass down the in-memory function index.
+ if (!CodeGenOpts.ThinLTOIndexFile.empty()) {
+ assert(FunctionIndex && "Expected non-empty function index");
+ PMBuilder.FunctionIndex = FunctionIndex.get();
+ PMBuilder.populateLTOPassManager(*MPM);
+ return;
+ }
+
+ PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible,
+ addAddDiscriminatorsPass);
+
+ // In ObjC ARC mode, add the main ARC optimization passes.
+ if (LangOpts.ObjCAutoRefCount) {
+ PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible,
+ addObjCARCExpandPass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_ModuleOptimizerEarly,
+ addObjCARCAPElimPass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate,
+ addObjCARCOptPass);
+ }
+
+ if (LangOpts.Sanitize.has(SanitizerKind::LocalBounds)) {
+ PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate,
+ addBoundsCheckingPass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
+ addBoundsCheckingPass);
+ }
+
+ if (CodeGenOpts.SanitizeCoverageType ||
+ CodeGenOpts.SanitizeCoverageIndirectCalls ||
+ CodeGenOpts.SanitizeCoverageTraceCmp) {
+ PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
+ addSanitizerCoveragePass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
+ addSanitizerCoveragePass);
+ }
+
+ if (LangOpts.Sanitize.has(SanitizerKind::Address)) {
+ PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
+ addAddressSanitizerPasses);
+ PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
+ addAddressSanitizerPasses);
+ }
+
+ if (LangOpts.Sanitize.has(SanitizerKind::KernelAddress)) {
+ PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
+ addKernelAddressSanitizerPasses);
+ PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
+ addKernelAddressSanitizerPasses);
+ }
+
+ if (LangOpts.Sanitize.has(SanitizerKind::Memory)) {
+ PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
+ addMemorySanitizerPass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
+ addMemorySanitizerPass);
+ }
+
+ if (LangOpts.Sanitize.has(SanitizerKind::Thread)) {
+ PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
+ addThreadSanitizerPass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
+ addThreadSanitizerPass);
+ }
+
+ if (LangOpts.Sanitize.has(SanitizerKind::DataFlow)) {
+ PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
+ addDataFlowSanitizerPass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
+ addDataFlowSanitizerPass);
+ }
+
+ // Set up the per-function pass manager.
+ legacy::FunctionPassManager *FPM = getPerFunctionPasses();
+ if (CodeGenOpts.VerifyModule)
+ FPM->add(createVerifierPass());
+ PMBuilder.populateFunctionPassManager(*FPM);
+
+ // Set up the per-module pass manager.
+ if (!CodeGenOpts.RewriteMapFiles.empty())
+ addSymbolRewriterPass(CodeGenOpts, MPM);
+
+ if (!CodeGenOpts.DisableGCov &&
+ (CodeGenOpts.EmitGcovArcs || CodeGenOpts.EmitGcovNotes)) {
+ // Not using 'GCOVOptions::getDefault' allows us to avoid exiting if
+ // LLVM's -default-gcov-version flag is set to something invalid.
+ GCOVOptions Options;
+ Options.EmitNotes = CodeGenOpts.EmitGcovNotes;
+ Options.EmitData = CodeGenOpts.EmitGcovArcs;
+ memcpy(Options.Version, CodeGenOpts.CoverageVersion, 4);
+ Options.UseCfgChecksum = CodeGenOpts.CoverageExtraChecksum;
+ Options.NoRedZone = CodeGenOpts.DisableRedZone;
+ Options.FunctionNamesInData =
+ !CodeGenOpts.CoverageNoFunctionNamesInData;
+ Options.ExitBlockBeforeBody = CodeGenOpts.CoverageExitBlockBeforeBody;
+ MPM->add(createGCOVProfilerPass(Options));
+ if (CodeGenOpts.getDebugInfo() == CodeGenOptions::NoDebugInfo)
+ MPM->add(createStripSymbolsPass(true));
+ }
+
+ if (CodeGenOpts.ProfileInstrGenerate) {
+ InstrProfOptions Options;
+ Options.NoRedZone = CodeGenOpts.DisableRedZone;
+ Options.InstrProfileOutput = CodeGenOpts.InstrProfileOutput;
+ MPM->add(createInstrProfilingPass(Options));
+ }
+
+ if (!CodeGenOpts.SampleProfileFile.empty())
+ MPM->add(createSampleProfileLoaderPass(CodeGenOpts.SampleProfileFile));
+
+ PMBuilder.populateModulePassManager(*MPM);
+}
+
+TargetMachine *EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) {
+ // Create the TargetMachine for generating code.
+ std::string Error;
+ std::string Triple = TheModule->getTargetTriple();
+ const llvm::Target *TheTarget = TargetRegistry::lookupTarget(Triple, Error);
+ if (!TheTarget) {
+ if (MustCreateTM)
+ Diags.Report(diag::err_fe_unable_to_create_target) << Error;
+ return nullptr;
+ }
+
+ unsigned CodeModel =
+ llvm::StringSwitch<unsigned>(CodeGenOpts.CodeModel)
+ .Case("small", llvm::CodeModel::Small)
+ .Case("kernel", llvm::CodeModel::Kernel)
+ .Case("medium", llvm::CodeModel::Medium)
+ .Case("large", llvm::CodeModel::Large)
+ .Case("default", llvm::CodeModel::Default)
+ .Default(~0u);
+ assert(CodeModel != ~0u && "invalid code model!");
+ llvm::CodeModel::Model CM = static_cast<llvm::CodeModel::Model>(CodeModel);
+
+ SmallVector<const char *, 16> BackendArgs;
+ BackendArgs.push_back("clang"); // Fake program name.
+ if (!CodeGenOpts.DebugPass.empty()) {
+ BackendArgs.push_back("-debug-pass");
+ BackendArgs.push_back(CodeGenOpts.DebugPass.c_str());
+ }
+ if (!CodeGenOpts.LimitFloatPrecision.empty()) {
+ BackendArgs.push_back("-limit-float-precision");
+ BackendArgs.push_back(CodeGenOpts.LimitFloatPrecision.c_str());
+ }
+ for (const std::string &BackendOption : CodeGenOpts.BackendOptions)
+ BackendArgs.push_back(BackendOption.c_str());
+ BackendArgs.push_back(nullptr);
+ llvm::cl::ParseCommandLineOptions(BackendArgs.size() - 1,
+ BackendArgs.data());
+
+ std::string FeaturesStr =
+ llvm::join(TargetOpts.Features.begin(), TargetOpts.Features.end(), ",");
+
+ // Keep this synced with the equivalent code in tools/driver/cc1as_main.cpp.
+ llvm::Reloc::Model RM = llvm::Reloc::Default;
+ if (CodeGenOpts.RelocationModel == "static") {
+ RM = llvm::Reloc::Static;
+ } else if (CodeGenOpts.RelocationModel == "pic") {
+ RM = llvm::Reloc::PIC_;
+ } else {
+ assert(CodeGenOpts.RelocationModel == "dynamic-no-pic" &&
+ "Invalid PIC model!");
+ RM = llvm::Reloc::DynamicNoPIC;
+ }
+
+ CodeGenOpt::Level OptLevel = CodeGenOpt::Default;
+ switch (CodeGenOpts.OptimizationLevel) {
+ default: break;
+ case 0: OptLevel = CodeGenOpt::None; break;
+ case 3: OptLevel = CodeGenOpt::Aggressive; break;
+ }
+
+ llvm::TargetOptions Options;
+
+ if (!TargetOpts.Reciprocals.empty())
+ Options.Reciprocals = TargetRecip(TargetOpts.Reciprocals);
+
+ Options.ThreadModel =
+ llvm::StringSwitch<llvm::ThreadModel::Model>(CodeGenOpts.ThreadModel)
+ .Case("posix", llvm::ThreadModel::POSIX)
+ .Case("single", llvm::ThreadModel::Single);
+
+ // Set float ABI type.
+ assert((CodeGenOpts.FloatABI == "soft" || CodeGenOpts.FloatABI == "softfp" ||
+ CodeGenOpts.FloatABI == "hard" || CodeGenOpts.FloatABI.empty()) &&
+ "Invalid Floating Point ABI!");
+ Options.FloatABIType =
+ llvm::StringSwitch<llvm::FloatABI::ABIType>(CodeGenOpts.FloatABI)
+ .Case("soft", llvm::FloatABI::Soft)
+ .Case("softfp", llvm::FloatABI::Soft)
+ .Case("hard", llvm::FloatABI::Hard)
+ .Default(llvm::FloatABI::Default);
+
+ // Set FP fusion mode.
+ switch (CodeGenOpts.getFPContractMode()) {
+ case CodeGenOptions::FPC_Off:
+ Options.AllowFPOpFusion = llvm::FPOpFusion::Strict;
+ break;
+ case CodeGenOptions::FPC_On:
+ Options.AllowFPOpFusion = llvm::FPOpFusion::Standard;
+ break;
+ case CodeGenOptions::FPC_Fast:
+ Options.AllowFPOpFusion = llvm::FPOpFusion::Fast;
+ break;
+ }
+
+ Options.UseInitArray = CodeGenOpts.UseInitArray;
+ Options.DisableIntegratedAS = CodeGenOpts.DisableIntegratedAS;
+ Options.CompressDebugSections = CodeGenOpts.CompressDebugSections;
+
+ // Set EABI version.
+ Options.EABIVersion = llvm::StringSwitch<llvm::EABI>(CodeGenOpts.EABIVersion)
+ .Case("4", llvm::EABI::EABI4)
+ .Case("5", llvm::EABI::EABI5)
+ .Case("gnu", llvm::EABI::GNU)
+ .Default(llvm::EABI::Default);
+
+ Options.LessPreciseFPMADOption = CodeGenOpts.LessPreciseFPMAD;
+ Options.NoInfsFPMath = CodeGenOpts.NoInfsFPMath;
+ Options.NoNaNsFPMath = CodeGenOpts.NoNaNsFPMath;
+ Options.NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS;
+ Options.UnsafeFPMath = CodeGenOpts.UnsafeFPMath;
+ Options.StackAlignmentOverride = CodeGenOpts.StackAlignment;
+ Options.PositionIndependentExecutable = LangOpts.PIELevel != 0;
+ Options.FunctionSections = CodeGenOpts.FunctionSections;
+ Options.DataSections = CodeGenOpts.DataSections;
+ Options.UniqueSectionNames = CodeGenOpts.UniqueSectionNames;
+ Options.EmulatedTLS = CodeGenOpts.EmulatedTLS;
+ switch (CodeGenOpts.getDebuggerTuning()) {
+ case CodeGenOptions::DebuggerKindGDB:
+ Options.DebuggerTuning = llvm::DebuggerKind::GDB;
+ break;
+ case CodeGenOptions::DebuggerKindLLDB:
+ Options.DebuggerTuning = llvm::DebuggerKind::LLDB;
+ break;
+ case CodeGenOptions::DebuggerKindSCE:
+ Options.DebuggerTuning = llvm::DebuggerKind::SCE;
+ break;
+ default:
+ break;
+ }
+
+ Options.MCOptions.MCRelaxAll = CodeGenOpts.RelaxAll;
+ Options.MCOptions.MCSaveTempLabels = CodeGenOpts.SaveTempLabels;
+ Options.MCOptions.MCUseDwarfDirectory = !CodeGenOpts.NoDwarfDirectoryAsm;
+ Options.MCOptions.MCNoExecStack = CodeGenOpts.NoExecStack;
+ Options.MCOptions.MCIncrementalLinkerCompatible =
+ CodeGenOpts.IncrementalLinkerCompatible;
+ Options.MCOptions.MCFatalWarnings = CodeGenOpts.FatalWarnings;
+ Options.MCOptions.AsmVerbose = CodeGenOpts.AsmVerbose;
+ Options.MCOptions.ABIName = TargetOpts.ABI;
+
+ TargetMachine *TM = TheTarget->createTargetMachine(Triple, TargetOpts.CPU,
+ FeaturesStr, Options,
+ RM, CM, OptLevel);
+
+ return TM;
+}
+
+bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
+ raw_pwrite_stream &OS) {
+
+ // Create the code generator passes.
+ legacy::PassManager *PM = getCodeGenPasses();
+
+ // Add LibraryInfo.
+ llvm::Triple TargetTriple(TheModule->getTargetTriple());
+ std::unique_ptr<TargetLibraryInfoImpl> TLII(
+ createTLII(TargetTriple, CodeGenOpts));
+ PM->add(new TargetLibraryInfoWrapperPass(*TLII));
+
+ // Normal mode, emit a .s or .o file by running the code generator. Note,
+ // this also adds codegenerator level optimization passes.
+ TargetMachine::CodeGenFileType CGFT = TargetMachine::CGFT_AssemblyFile;
+ if (Action == Backend_EmitObj)
+ CGFT = TargetMachine::CGFT_ObjectFile;
+ else if (Action == Backend_EmitMCNull)
+ CGFT = TargetMachine::CGFT_Null;
+ else
+ assert(Action == Backend_EmitAssembly && "Invalid action!");
+
+ // Add ObjC ARC final-cleanup optimizations. This is done as part of the
+ // "codegen" passes so that it isn't run multiple times when there is
+ // inlining happening.
+ if (CodeGenOpts.OptimizationLevel > 0)
+ PM->add(createObjCARCContractPass());
+
+ if (TM->addPassesToEmitFile(*PM, OS, CGFT,
+ /*DisableVerify=*/!CodeGenOpts.VerifyModule)) {
+ Diags.Report(diag::err_fe_unable_to_interface_with_target);
+ return false;
+ }
+
+ return true;
+}
+
+void EmitAssemblyHelper::EmitAssembly(BackendAction Action,
+ raw_pwrite_stream *OS) {
+ TimeRegion Region(llvm::TimePassesIsEnabled ? &CodeGenerationTime : nullptr);
+
+ bool UsesCodeGen = (Action != Backend_EmitNothing &&
+ Action != Backend_EmitBC &&
+ Action != Backend_EmitLL);
+ if (!TM)
+ TM.reset(CreateTargetMachine(UsesCodeGen));
+
+ if (UsesCodeGen && !TM)
+ return;
+ if (TM)
+ TheModule->setDataLayout(TM->createDataLayout());
+ CreatePasses();
+
+ switch (Action) {
+ case Backend_EmitNothing:
+ break;
+
+ case Backend_EmitBC:
+ getPerModulePasses()->add(createBitcodeWriterPass(
+ *OS, CodeGenOpts.EmitLLVMUseLists, CodeGenOpts.EmitFunctionSummary));
+ break;
+
+ case Backend_EmitLL:
+ getPerModulePasses()->add(
+ createPrintModulePass(*OS, "", CodeGenOpts.EmitLLVMUseLists));
+ break;
+
+ default:
+ if (!AddEmitPasses(Action, *OS))
+ return;
+ }
+
+ // Before executing passes, print the final values of the LLVM options.
+ cl::PrintOptionValues();
+
+ // Run passes. For now we do all passes at once, but eventually we
+ // would like to have the option of streaming code generation.
+
+ if (PerFunctionPasses) {
+ PrettyStackTraceString CrashInfo("Per-function optimization");
+
+ PerFunctionPasses->doInitialization();
+ for (Function &F : *TheModule)
+ if (!F.isDeclaration())
+ PerFunctionPasses->run(F);
+ PerFunctionPasses->doFinalization();
+ }
+
+ if (PerModulePasses) {
+ PrettyStackTraceString CrashInfo("Per-module optimization passes");
+ PerModulePasses->run(*TheModule);
+ }
+
+ if (CodeGenPasses) {
+ PrettyStackTraceString CrashInfo("Code generation");
+ CodeGenPasses->run(*TheModule);
+ }
+}
+
+void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
+ const CodeGenOptions &CGOpts,
+ const clang::TargetOptions &TOpts,
+ const LangOptions &LOpts, StringRef TDesc,
+ Module *M, BackendAction Action,
+ raw_pwrite_stream *OS,
+ std::unique_ptr<FunctionInfoIndex> Index) {
+ EmitAssemblyHelper AsmHelper(Diags, CGOpts, TOpts, LOpts, M,
+ std::move(Index));
+
+ AsmHelper.EmitAssembly(Action, OS);
+
+ // If an optional clang TargetInfo description string was passed in, use it to
+ // verify the LLVM TargetMachine's DataLayout.
+ if (AsmHelper.TM && !TDesc.empty()) {
+ std::string DLDesc = M->getDataLayout().getStringRepresentation();
+ if (DLDesc != TDesc) {
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error, "backend data layout '%0' does not match "
+ "expected target description '%1'");
+ Diags.Report(DiagID) << DLDesc << TDesc;
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGAtomic.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGAtomic.cpp
new file mode 100644
index 0000000..24de30b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGAtomic.cpp
@@ -0,0 +1,1873 @@
+//===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the code for emitting atomic operations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CGCall.h"
+#include "CGRecordLayout.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/CodeGen/CGFunctionInfo.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Operator.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+ class AtomicInfo {
+ CodeGenFunction &CGF;
+ QualType AtomicTy;
+ QualType ValueTy;
+ uint64_t AtomicSizeInBits;
+ uint64_t ValueSizeInBits;
+ CharUnits AtomicAlign;
+ CharUnits ValueAlign;
+ CharUnits LValueAlign;
+ TypeEvaluationKind EvaluationKind;
+ bool UseLibcall;
+ LValue LVal;
+ CGBitFieldInfo BFI;
+ public:
+ AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
+ : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
+ EvaluationKind(TEK_Scalar), UseLibcall(true) {
+ assert(!lvalue.isGlobalReg());
+ ASTContext &C = CGF.getContext();
+ if (lvalue.isSimple()) {
+ AtomicTy = lvalue.getType();
+ if (auto *ATy = AtomicTy->getAs<AtomicType>())
+ ValueTy = ATy->getValueType();
+ else
+ ValueTy = AtomicTy;
+ EvaluationKind = CGF.getEvaluationKind(ValueTy);
+
+ uint64_t ValueAlignInBits;
+ uint64_t AtomicAlignInBits;
+ TypeInfo ValueTI = C.getTypeInfo(ValueTy);
+ ValueSizeInBits = ValueTI.Width;
+ ValueAlignInBits = ValueTI.Align;
+
+ TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
+ AtomicSizeInBits = AtomicTI.Width;
+ AtomicAlignInBits = AtomicTI.Align;
+
+ assert(ValueSizeInBits <= AtomicSizeInBits);
+ assert(ValueAlignInBits <= AtomicAlignInBits);
+
+ AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
+ ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
+ if (lvalue.getAlignment().isZero())
+ lvalue.setAlignment(AtomicAlign);
+
+ LVal = lvalue;
+ } else if (lvalue.isBitField()) {
+ ValueTy = lvalue.getType();
+ ValueSizeInBits = C.getTypeSize(ValueTy);
+ auto &OrigBFI = lvalue.getBitFieldInfo();
+ auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
+ AtomicSizeInBits = C.toBits(
+ C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
+ .RoundUpToAlignment(lvalue.getAlignment()));
+ auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldPointer());
+ auto OffsetInChars =
+ (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
+ lvalue.getAlignment();
+ VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
+ VoidPtrAddr, OffsetInChars.getQuantity());
+ auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ VoidPtrAddr,
+ CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
+ "atomic_bitfield_base");
+ BFI = OrigBFI;
+ BFI.Offset = Offset;
+ BFI.StorageSize = AtomicSizeInBits;
+ BFI.StorageOffset += OffsetInChars;
+ LVal = LValue::MakeBitfield(Address(Addr, lvalue.getAlignment()),
+ BFI, lvalue.getType(),
+ lvalue.getAlignmentSource());
+ LVal.setTBAAInfo(lvalue.getTBAAInfo());
+ AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
+ if (AtomicTy.isNull()) {
+ llvm::APInt Size(
+ /*numBits=*/32,
+ C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
+ AtomicTy = C.getConstantArrayType(C.CharTy, Size, ArrayType::Normal,
+ /*IndexTypeQuals=*/0);
+ }
+ AtomicAlign = ValueAlign = lvalue.getAlignment();
+ } else if (lvalue.isVectorElt()) {
+ ValueTy = lvalue.getType()->getAs<VectorType>()->getElementType();
+ ValueSizeInBits = C.getTypeSize(ValueTy);
+ AtomicTy = lvalue.getType();
+ AtomicSizeInBits = C.getTypeSize(AtomicTy);
+ AtomicAlign = ValueAlign = lvalue.getAlignment();
+ LVal = lvalue;
+ } else {
+ assert(lvalue.isExtVectorElt());
+ ValueTy = lvalue.getType();
+ ValueSizeInBits = C.getTypeSize(ValueTy);
+ AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
+ lvalue.getType(), lvalue.getExtVectorAddress()
+ .getElementType()->getVectorNumElements());
+ AtomicSizeInBits = C.getTypeSize(AtomicTy);
+ AtomicAlign = ValueAlign = lvalue.getAlignment();
+ LVal = lvalue;
+ }
+ UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
+ AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
+ }
+
+ QualType getAtomicType() const { return AtomicTy; }
+ QualType getValueType() const { return ValueTy; }
+ CharUnits getAtomicAlignment() const { return AtomicAlign; }
+ CharUnits getValueAlignment() const { return ValueAlign; }
+ uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
+ uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
+ TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
+ bool shouldUseLibcall() const { return UseLibcall; }
+ const LValue &getAtomicLValue() const { return LVal; }
+ llvm::Value *getAtomicPointer() const {
+ if (LVal.isSimple())
+ return LVal.getPointer();
+ else if (LVal.isBitField())
+ return LVal.getBitFieldPointer();
+ else if (LVal.isVectorElt())
+ return LVal.getVectorPointer();
+ assert(LVal.isExtVectorElt());
+ return LVal.getExtVectorPointer();
+ }
+ Address getAtomicAddress() const {
+ return Address(getAtomicPointer(), getAtomicAlignment());
+ }
+
+ Address getAtomicAddressAsAtomicIntPointer() const {
+ return emitCastToAtomicIntPointer(getAtomicAddress());
+ }
+
+ /// Is the atomic size larger than the underlying value type?
+ ///
+ /// Note that the absence of padding does not mean that atomic
+ /// objects are completely interchangeable with non-atomic
+ /// objects: we might have promoted the alignment of a type
+ /// without making it bigger.
+ bool hasPadding() const {
+ return (ValueSizeInBits != AtomicSizeInBits);
+ }
+
+ bool emitMemSetZeroIfNecessary() const;
+
+ llvm::Value *getAtomicSizeValue() const {
+ CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
+ return CGF.CGM.getSize(size);
+ }
+
+ /// Cast the given pointer to an integer pointer suitable for atomic
+ /// operations if the source.
+ Address emitCastToAtomicIntPointer(Address Addr) const;
+
+ /// If Addr is compatible with the iN that will be used for an atomic
+ /// operation, bitcast it. Otherwise, create a temporary that is suitable
+ /// and copy the value across.
+ Address convertToAtomicIntPointer(Address Addr) const;
+
+ /// Turn an atomic-layout object into an r-value.
+ RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
+ SourceLocation loc, bool AsValue) const;
+
+ /// \brief Converts a rvalue to integer value.
+ llvm::Value *convertRValueToInt(RValue RVal) const;
+
+ RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
+ AggValueSlot ResultSlot,
+ SourceLocation Loc, bool AsValue) const;
+
+ /// Copy an atomic r-value into atomic-layout memory.
+ void emitCopyIntoMemory(RValue rvalue) const;
+
+ /// Project an l-value down to the value field.
+ LValue projectValue() const {
+ assert(LVal.isSimple());
+ Address addr = getAtomicAddress();
+ if (hasPadding())
+ addr = CGF.Builder.CreateStructGEP(addr, 0, CharUnits());
+
+ return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
+ LVal.getAlignmentSource(), LVal.getTBAAInfo());
+ }
+
+ /// \brief Emits atomic load.
+ /// \returns Loaded value.
+ RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
+ bool AsValue, llvm::AtomicOrdering AO,
+ bool IsVolatile);
+
+ /// \brief Emits atomic compare-and-exchange sequence.
+ /// \param Expected Expected value.
+ /// \param Desired Desired value.
+ /// \param Success Atomic ordering for success operation.
+ /// \param Failure Atomic ordering for failed operation.
+ /// \param IsWeak true if atomic operation is weak, false otherwise.
+ /// \returns Pair of values: previous value from storage (value type) and
+ /// boolean flag (i1 type) with true if success and false otherwise.
+ std::pair<RValue, llvm::Value *> EmitAtomicCompareExchange(
+ RValue Expected, RValue Desired,
+ llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
+ llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent,
+ bool IsWeak = false);
+
+ /// \brief Emits atomic update.
+ /// \param AO Atomic ordering.
+ /// \param UpdateOp Update operation for the current lvalue.
+ void EmitAtomicUpdate(llvm::AtomicOrdering AO,
+ const llvm::function_ref<RValue(RValue)> &UpdateOp,
+ bool IsVolatile);
+ /// \brief Emits atomic update.
+ /// \param AO Atomic ordering.
+ void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
+ bool IsVolatile);
+
+ /// Materialize an atomic r-value in atomic-layout memory.
+ Address materializeRValue(RValue rvalue) const;
+
+ /// \brief Translates LLVM atomic ordering to GNU atomic ordering for
+ /// libcalls.
+ static AtomicExpr::AtomicOrderingKind
+ translateAtomicOrdering(const llvm::AtomicOrdering AO);
+
+ /// \brief Creates temp alloca for intermediate operations on atomic value.
+ Address CreateTempAlloca() const;
+ private:
+ bool requiresMemSetZero(llvm::Type *type) const;
+
+
+ /// \brief Emits atomic load as a libcall.
+ void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
+ llvm::AtomicOrdering AO, bool IsVolatile);
+ /// \brief Emits atomic load as LLVM instruction.
+ llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
+ /// \brief Emits atomic compare-and-exchange op as a libcall.
+ llvm::Value *EmitAtomicCompareExchangeLibcall(
+ llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
+ llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
+ llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent);
+ /// \brief Emits atomic compare-and-exchange op as LLVM instruction.
+ std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
+ llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
+ llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
+ llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent,
+ bool IsWeak = false);
+ /// \brief Emit atomic update as libcalls.
+ void
+ EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
+ const llvm::function_ref<RValue(RValue)> &UpdateOp,
+ bool IsVolatile);
+ /// \brief Emit atomic update as LLVM instructions.
+ void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
+ const llvm::function_ref<RValue(RValue)> &UpdateOp,
+ bool IsVolatile);
+ /// \brief Emit atomic update as libcalls.
+ void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
+ bool IsVolatile);
+ /// \brief Emit atomic update as LLVM instructions.
+ void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
+ bool IsVolatile);
+ };
+}
+
+AtomicExpr::AtomicOrderingKind
+AtomicInfo::translateAtomicOrdering(const llvm::AtomicOrdering AO) {
+ switch (AO) {
+ case llvm::Unordered:
+ case llvm::NotAtomic:
+ case llvm::Monotonic:
+ return AtomicExpr::AO_ABI_memory_order_relaxed;
+ case llvm::Acquire:
+ return AtomicExpr::AO_ABI_memory_order_acquire;
+ case llvm::Release:
+ return AtomicExpr::AO_ABI_memory_order_release;
+ case llvm::AcquireRelease:
+ return AtomicExpr::AO_ABI_memory_order_acq_rel;
+ case llvm::SequentiallyConsistent:
+ return AtomicExpr::AO_ABI_memory_order_seq_cst;
+ }
+ llvm_unreachable("Unhandled AtomicOrdering");
+}
+
+Address AtomicInfo::CreateTempAlloca() const {
+ Address TempAlloca = CGF.CreateMemTemp(
+ (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
+ : AtomicTy,
+ getAtomicAlignment(),
+ "atomic-temp");
+ // Cast to pointer to value type for bitfields.
+ if (LVal.isBitField())
+ return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ TempAlloca, getAtomicAddress().getType());
+ return TempAlloca;
+}
+
+static RValue emitAtomicLibcall(CodeGenFunction &CGF,
+ StringRef fnName,
+ QualType resultType,
+ CallArgList &args) {
+ const CGFunctionInfo &fnInfo =
+ CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args,
+ FunctionType::ExtInfo(), RequiredArgs::All);
+ llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
+ llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
+ return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
+}
+
+/// Does a store of the given IR type modify the full expected width?
+static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
+ uint64_t expectedSize) {
+ return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
+}
+
+/// Does the atomic type require memsetting to zero before initialization?
+///
+/// The IR type is provided as a way of making certain queries faster.
+bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
+ // If the atomic type has size padding, we definitely need a memset.
+ if (hasPadding()) return true;
+
+ // Otherwise, do some simple heuristics to try to avoid it:
+ switch (getEvaluationKind()) {
+ // For scalars and complexes, check whether the store size of the
+ // type uses the full size.
+ case TEK_Scalar:
+ return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
+ case TEK_Complex:
+ return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
+ AtomicSizeInBits / 2);
+
+ // Padding in structs has an undefined bit pattern. User beware.
+ case TEK_Aggregate:
+ return false;
+ }
+ llvm_unreachable("bad evaluation kind");
+}
+
+bool AtomicInfo::emitMemSetZeroIfNecessary() const {
+ assert(LVal.isSimple());
+ llvm::Value *addr = LVal.getPointer();
+ if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
+ return false;
+
+ CGF.Builder.CreateMemSet(
+ addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
+ CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
+ LVal.getAlignment().getQuantity());
+ return true;
+}
+
+static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
+ Address Dest, Address Ptr,
+ Address Val1, Address Val2,
+ uint64_t Size,
+ llvm::AtomicOrdering SuccessOrder,
+ llvm::AtomicOrdering FailureOrder) {
+ // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
+ llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
+ llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
+
+ llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
+ Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder);
+ Pair->setVolatile(E->isVolatile());
+ Pair->setWeak(IsWeak);
+
+ // Cmp holds the result of the compare-exchange operation: true on success,
+ // false on failure.
+ llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
+ llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
+
+ // This basic block is used to hold the store instruction if the operation
+ // failed.
+ llvm::BasicBlock *StoreExpectedBB =
+ CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
+
+ // This basic block is the exit point of the operation, we should end up
+ // here regardless of whether or not the operation succeeded.
+ llvm::BasicBlock *ContinueBB =
+ CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
+
+ // Update Expected if Expected isn't equal to Old, otherwise branch to the
+ // exit point.
+ CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
+
+ CGF.Builder.SetInsertPoint(StoreExpectedBB);
+ // Update the memory at Expected with Old's value.
+ CGF.Builder.CreateStore(Old, Val1);
+ // Finally, branch to the exit point.
+ CGF.Builder.CreateBr(ContinueBB);
+
+ CGF.Builder.SetInsertPoint(ContinueBB);
+ // Update the memory at Dest with Cmp's value.
+ CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
+}
+
+/// Given an ordering required on success, emit all possible cmpxchg
+/// instructions to cope with the provided (but possibly only dynamically known)
+/// FailureOrder.
+static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
+ bool IsWeak, Address Dest,
+ Address Ptr, Address Val1,
+ Address Val2,
+ llvm::Value *FailureOrderVal,
+ uint64_t Size,
+ llvm::AtomicOrdering SuccessOrder) {
+ llvm::AtomicOrdering FailureOrder;
+ if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
+ switch (FO->getSExtValue()) {
+ default:
+ FailureOrder = llvm::Monotonic;
+ break;
+ case AtomicExpr::AO_ABI_memory_order_consume:
+ case AtomicExpr::AO_ABI_memory_order_acquire:
+ FailureOrder = llvm::Acquire;
+ break;
+ case AtomicExpr::AO_ABI_memory_order_seq_cst:
+ FailureOrder = llvm::SequentiallyConsistent;
+ break;
+ }
+ if (FailureOrder >= SuccessOrder) {
+ // Don't assert on undefined behaviour.
+ FailureOrder =
+ llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
+ }
+ emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size,
+ SuccessOrder, FailureOrder);
+ return;
+ }
+
+ // Create all the relevant BB's
+ llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
+ *SeqCstBB = nullptr;
+ MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
+ if (SuccessOrder != llvm::Monotonic && SuccessOrder != llvm::Release)
+ AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
+ if (SuccessOrder == llvm::SequentiallyConsistent)
+ SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
+
+ llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
+
+ llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
+
+ // Emit all the different atomics
+
+ // MonotonicBB is arbitrarily chosen as the default case; in practice, this
+ // doesn't matter unless someone is crazy enough to use something that
+ // doesn't fold to a constant for the ordering.
+ CGF.Builder.SetInsertPoint(MonotonicBB);
+ emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
+ Size, SuccessOrder, llvm::Monotonic);
+ CGF.Builder.CreateBr(ContBB);
+
+ if (AcquireBB) {
+ CGF.Builder.SetInsertPoint(AcquireBB);
+ emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
+ Size, SuccessOrder, llvm::Acquire);
+ CGF.Builder.CreateBr(ContBB);
+ SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
+ AcquireBB);
+ SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
+ AcquireBB);
+ }
+ if (SeqCstBB) {
+ CGF.Builder.SetInsertPoint(SeqCstBB);
+ emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
+ Size, SuccessOrder, llvm::SequentiallyConsistent);
+ CGF.Builder.CreateBr(ContBB);
+ SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
+ SeqCstBB);
+ }
+
+ CGF.Builder.SetInsertPoint(ContBB);
+}
+
+static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
+ Address Ptr, Address Val1, Address Val2,
+ llvm::Value *IsWeak, llvm::Value *FailureOrder,
+ uint64_t Size, llvm::AtomicOrdering Order) {
+ llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
+ llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
+
+ switch (E->getOp()) {
+ case AtomicExpr::AO__c11_atomic_init:
+ llvm_unreachable("Already handled!");
+
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
+ FailureOrder, Size, Order);
+ return;
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
+ FailureOrder, Size, Order);
+ return;
+ case AtomicExpr::AO__atomic_compare_exchange:
+ case AtomicExpr::AO__atomic_compare_exchange_n: {
+ if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
+ emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
+ Val1, Val2, FailureOrder, Size, Order);
+ } else {
+ // Create all the relevant BB's
+ llvm::BasicBlock *StrongBB =
+ CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
+ llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
+ llvm::BasicBlock *ContBB =
+ CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
+
+ llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
+ SI->addCase(CGF.Builder.getInt1(false), StrongBB);
+
+ CGF.Builder.SetInsertPoint(StrongBB);
+ emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
+ FailureOrder, Size, Order);
+ CGF.Builder.CreateBr(ContBB);
+
+ CGF.Builder.SetInsertPoint(WeakBB);
+ emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
+ FailureOrder, Size, Order);
+ CGF.Builder.CreateBr(ContBB);
+
+ CGF.Builder.SetInsertPoint(ContBB);
+ }
+ return;
+ }
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
+ case AtomicExpr::AO__atomic_load: {
+ llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
+ Load->setAtomic(Order);
+ Load->setVolatile(E->isVolatile());
+ CGF.Builder.CreateStore(Load, Dest);
+ return;
+ }
+
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__atomic_store:
+ case AtomicExpr::AO__atomic_store_n: {
+ llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
+ llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
+ Store->setAtomic(Order);
+ Store->setVolatile(E->isVolatile());
+ return;
+ }
+
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__atomic_exchange_n:
+ case AtomicExpr::AO__atomic_exchange:
+ Op = llvm::AtomicRMWInst::Xchg;
+ break;
+
+ case AtomicExpr::AO__atomic_add_fetch:
+ PostOp = llvm::Instruction::Add;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_add:
+ Op = llvm::AtomicRMWInst::Add;
+ break;
+
+ case AtomicExpr::AO__atomic_sub_fetch:
+ PostOp = llvm::Instruction::Sub;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ Op = llvm::AtomicRMWInst::Sub;
+ break;
+
+ case AtomicExpr::AO__atomic_and_fetch:
+ PostOp = llvm::Instruction::And;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_and:
+ Op = llvm::AtomicRMWInst::And;
+ break;
+
+ case AtomicExpr::AO__atomic_or_fetch:
+ PostOp = llvm::Instruction::Or;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_or:
+ Op = llvm::AtomicRMWInst::Or;
+ break;
+
+ case AtomicExpr::AO__atomic_xor_fetch:
+ PostOp = llvm::Instruction::Xor;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ Op = llvm::AtomicRMWInst::Xor;
+ break;
+
+ case AtomicExpr::AO__atomic_nand_fetch:
+ PostOp = llvm::Instruction::And; // the NOT is special cased below
+ // Fall through.
+ case AtomicExpr::AO__atomic_fetch_nand:
+ Op = llvm::AtomicRMWInst::Nand;
+ break;
+ }
+
+ llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
+ llvm::AtomicRMWInst *RMWI =
+ CGF.Builder.CreateAtomicRMW(Op, Ptr.getPointer(), LoadVal1, Order);
+ RMWI->setVolatile(E->isVolatile());
+
+ // For __atomic_*_fetch operations, perform the operation again to
+ // determine the value which was written.
+ llvm::Value *Result = RMWI;
+ if (PostOp)
+ Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
+ if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
+ Result = CGF.Builder.CreateNot(Result);
+ CGF.Builder.CreateStore(Result, Dest);
+}
+
+// This function emits any expression (scalar, complex, or aggregate)
+// into a temporary alloca.
+static Address
+EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
+ Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
+ CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
+ /*Init*/ true);
+ return DeclPtr;
+}
+
+static void
+AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
+ bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
+ SourceLocation Loc, CharUnits SizeInChars) {
+ if (UseOptimizedLibcall) {
+ // Load value and pass it to the function directly.
+ CharUnits Align = CGF.getContext().getTypeAlignInChars(ValTy);
+ int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
+ ValTy =
+ CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
+ llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
+ SizeInBits)->getPointerTo();
+ Address Ptr = Address(CGF.Builder.CreateBitCast(Val, IPtrTy), Align);
+ Val = CGF.EmitLoadOfScalar(Ptr, false,
+ CGF.getContext().getPointerType(ValTy),
+ Loc);
+ // Coerce the value into an appropriately sized integer type.
+ Args.add(RValue::get(Val), ValTy);
+ } else {
+ // Non-optimized functions always take a reference.
+ Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
+ CGF.getContext().VoidPtrTy);
+ }
+}
+
+RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
+ QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
+ QualType MemTy = AtomicTy;
+ if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
+ MemTy = AT->getValueType();
+ CharUnits sizeChars, alignChars;
+ std::tie(sizeChars, alignChars) = getContext().getTypeInfoInChars(AtomicTy);
+ uint64_t Size = sizeChars.getQuantity();
+ unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
+ bool UseLibcall = (sizeChars != alignChars ||
+ getContext().toBits(sizeChars) > MaxInlineWidthInBits);
+
+ llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;
+
+ Address Val1 = Address::invalid();
+ Address Val2 = Address::invalid();
+ Address Dest = Address::invalid();
+ Address Ptr(EmitScalarExpr(E->getPtr()), alignChars);
+
+ if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
+ LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
+ EmitAtomicInit(E->getVal1(), lvalue);
+ return RValue::get(nullptr);
+ }
+
+ llvm::Value *Order = EmitScalarExpr(E->getOrder());
+
+ switch (E->getOp()) {
+ case AtomicExpr::AO__c11_atomic_init:
+ llvm_unreachable("Already handled above with EmitAtomicInit!");
+
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
+ break;
+
+ case AtomicExpr::AO__atomic_load:
+ Dest = EmitPointerWithAlignment(E->getVal1());
+ break;
+
+ case AtomicExpr::AO__atomic_store:
+ Val1 = EmitPointerWithAlignment(E->getVal1());
+ break;
+
+ case AtomicExpr::AO__atomic_exchange:
+ Val1 = EmitPointerWithAlignment(E->getVal1());
+ Dest = EmitPointerWithAlignment(E->getVal2());
+ break;
+
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__atomic_compare_exchange_n:
+ case AtomicExpr::AO__atomic_compare_exchange:
+ Val1 = EmitPointerWithAlignment(E->getVal1());
+ if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
+ Val2 = EmitPointerWithAlignment(E->getVal2());
+ else
+ Val2 = EmitValToTemp(*this, E->getVal2());
+ OrderFail = EmitScalarExpr(E->getOrderFail());
+ if (E->getNumSubExprs() == 6)
+ IsWeak = EmitScalarExpr(E->getWeak());
+ break;
+
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ if (MemTy->isPointerType()) {
+ // For pointer arithmetic, we're required to do a bit of math:
+ // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
+ // ... but only for the C11 builtins. The GNU builtins expect the
+ // user to multiply by sizeof(T).
+ QualType Val1Ty = E->getVal1()->getType();
+ llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
+ CharUnits PointeeIncAmt =
+ getContext().getTypeSizeInChars(MemTy->getPointeeType());
+ Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
+ auto Temp = CreateMemTemp(Val1Ty, ".atomictmp");
+ Val1 = Temp;
+ EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
+ break;
+ }
+ // Fall through.
+ case AtomicExpr::AO__atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_add_fetch:
+ case AtomicExpr::AO__atomic_sub_fetch:
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__atomic_store_n:
+ case AtomicExpr::AO__atomic_exchange_n:
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_nand:
+ case AtomicExpr::AO__atomic_and_fetch:
+ case AtomicExpr::AO__atomic_or_fetch:
+ case AtomicExpr::AO__atomic_xor_fetch:
+ case AtomicExpr::AO__atomic_nand_fetch:
+ Val1 = EmitValToTemp(*this, E->getVal1());
+ break;
+ }
+
+ QualType RValTy = E->getType().getUnqualifiedType();
+
+ // The inlined atomics only function on iN types, where N is a power of 2. We
+ // need to make sure (via temporaries if necessary) that all incoming values
+ // are compatible.
+ LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
+ AtomicInfo Atomics(*this, AtomicVal);
+
+ Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
+ if (Val1.isValid()) Val1 = Atomics.convertToAtomicIntPointer(Val1);
+ if (Val2.isValid()) Val2 = Atomics.convertToAtomicIntPointer(Val2);
+ if (Dest.isValid())
+ Dest = Atomics.emitCastToAtomicIntPointer(Dest);
+ else if (E->isCmpXChg())
+ Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
+ else if (!RValTy->isVoidType())
+ Dest = Atomics.emitCastToAtomicIntPointer(Atomics.CreateTempAlloca());
+
+ // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
+ if (UseLibcall) {
+ bool UseOptimizedLibcall = false;
+ switch (E->getOp()) {
+ case AtomicExpr::AO__c11_atomic_init:
+ llvm_unreachable("Already handled above with EmitAtomicInit!");
+
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_add:
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_and:
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_nand:
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_add_fetch:
+ case AtomicExpr::AO__atomic_and_fetch:
+ case AtomicExpr::AO__atomic_nand_fetch:
+ case AtomicExpr::AO__atomic_or_fetch:
+ case AtomicExpr::AO__atomic_sub_fetch:
+ case AtomicExpr::AO__atomic_xor_fetch:
+ // For these, only library calls for certain sizes exist.
+ UseOptimizedLibcall = true;
+ break;
+
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__atomic_load_n:
+ case AtomicExpr::AO__atomic_load:
+ case AtomicExpr::AO__atomic_store_n:
+ case AtomicExpr::AO__atomic_store:
+ case AtomicExpr::AO__atomic_exchange_n:
+ case AtomicExpr::AO__atomic_exchange:
+ case AtomicExpr::AO__atomic_compare_exchange_n:
+ case AtomicExpr::AO__atomic_compare_exchange:
+ // Only use optimized library calls for sizes for which they exist.
+ if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
+ UseOptimizedLibcall = true;
+ break;
+ }
+
+ CallArgList Args;
+ if (!UseOptimizedLibcall) {
+ // For non-optimized library calls, the size is the first parameter
+ Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
+ getContext().getSizeType());
+ }
+ // Atomic address is the first or second parameter
+ Args.add(RValue::get(EmitCastToVoidPtr(Ptr.getPointer())),
+ getContext().VoidPtrTy);
+
+ std::string LibCallName;
+ QualType LoweredMemTy =
+ MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
+ QualType RetTy;
+ bool HaveRetTy = false;
+ llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
+ switch (E->getOp()) {
+ case AtomicExpr::AO__c11_atomic_init:
+ llvm_unreachable("Already handled!");
+
+ // There is only one libcall for compare an exchange, because there is no
+ // optimisation benefit possible from a libcall version of a weak compare
+ // and exchange.
+ // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
+ // void *desired, int success, int failure)
+ // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
+ // int success, int failure)
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__atomic_compare_exchange:
+ case AtomicExpr::AO__atomic_compare_exchange_n:
+ LibCallName = "__atomic_compare_exchange";
+ RetTy = getContext().BoolTy;
+ HaveRetTy = true;
+ Args.add(RValue::get(EmitCastToVoidPtr(Val1.getPointer())),
+ getContext().VoidPtrTy);
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
+ MemTy, E->getExprLoc(), sizeChars);
+ Args.add(RValue::get(Order), getContext().IntTy);
+ Order = OrderFail;
+ break;
+ // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
+ // int order)
+ // T __atomic_exchange_N(T *mem, T val, int order)
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__atomic_exchange_n:
+ case AtomicExpr::AO__atomic_exchange:
+ LibCallName = "__atomic_exchange";
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ MemTy, E->getExprLoc(), sizeChars);
+ break;
+ // void __atomic_store(size_t size, void *mem, void *val, int order)
+ // void __atomic_store_N(T *mem, T val, int order)
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__atomic_store:
+ case AtomicExpr::AO__atomic_store_n:
+ LibCallName = "__atomic_store";
+ RetTy = getContext().VoidTy;
+ HaveRetTy = true;
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ MemTy, E->getExprLoc(), sizeChars);
+ break;
+ // void __atomic_load(size_t size, void *mem, void *return, int order)
+ // T __atomic_load_N(T *mem, int order)
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
+ LibCallName = "__atomic_load";
+ break;
+ // T __atomic_add_fetch_N(T *mem, T val, int order)
+ // T __atomic_fetch_add_N(T *mem, T val, int order)
+ case AtomicExpr::AO__atomic_add_fetch:
+ PostOp = llvm::Instruction::Add;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_add:
+ LibCallName = "__atomic_fetch_add";
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ LoweredMemTy, E->getExprLoc(), sizeChars);
+ break;
+ // T __atomic_and_fetch_N(T *mem, T val, int order)
+ // T __atomic_fetch_and_N(T *mem, T val, int order)
+ case AtomicExpr::AO__atomic_and_fetch:
+ PostOp = llvm::Instruction::And;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_and:
+ LibCallName = "__atomic_fetch_and";
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ MemTy, E->getExprLoc(), sizeChars);
+ break;
+ // T __atomic_or_fetch_N(T *mem, T val, int order)
+ // T __atomic_fetch_or_N(T *mem, T val, int order)
+ case AtomicExpr::AO__atomic_or_fetch:
+ PostOp = llvm::Instruction::Or;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_or:
+ LibCallName = "__atomic_fetch_or";
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ MemTy, E->getExprLoc(), sizeChars);
+ break;
+ // T __atomic_sub_fetch_N(T *mem, T val, int order)
+ // T __atomic_fetch_sub_N(T *mem, T val, int order)
+ case AtomicExpr::AO__atomic_sub_fetch:
+ PostOp = llvm::Instruction::Sub;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ LibCallName = "__atomic_fetch_sub";
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ LoweredMemTy, E->getExprLoc(), sizeChars);
+ break;
+ // T __atomic_xor_fetch_N(T *mem, T val, int order)
+ // T __atomic_fetch_xor_N(T *mem, T val, int order)
+ case AtomicExpr::AO__atomic_xor_fetch:
+ PostOp = llvm::Instruction::Xor;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ LibCallName = "__atomic_fetch_xor";
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ MemTy, E->getExprLoc(), sizeChars);
+ break;
+ // T __atomic_nand_fetch_N(T *mem, T val, int order)
+ // T __atomic_fetch_nand_N(T *mem, T val, int order)
+ case AtomicExpr::AO__atomic_nand_fetch:
+ PostOp = llvm::Instruction::And; // the NOT is special cased below
+ // Fall through.
+ case AtomicExpr::AO__atomic_fetch_nand:
+ LibCallName = "__atomic_fetch_nand";
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ MemTy, E->getExprLoc(), sizeChars);
+ break;
+ }
+
+ // Optimized functions have the size in their name.
+ if (UseOptimizedLibcall)
+ LibCallName += "_" + llvm::utostr(Size);
+ // By default, assume we return a value of the atomic type.
+ if (!HaveRetTy) {
+ if (UseOptimizedLibcall) {
+ // Value is returned directly.
+ // The function returns an appropriately sized integer type.
+ RetTy = getContext().getIntTypeForBitwidth(
+ getContext().toBits(sizeChars), /*Signed=*/false);
+ } else {
+ // Value is returned through parameter before the order.
+ RetTy = getContext().VoidTy;
+ Args.add(RValue::get(EmitCastToVoidPtr(Dest.getPointer())),
+ getContext().VoidPtrTy);
+ }
+ }
+ // order is always the last parameter
+ Args.add(RValue::get(Order),
+ getContext().IntTy);
+
+ // PostOp is only needed for the atomic_*_fetch operations, and
+ // thus is only needed for and implemented in the
+ // UseOptimizedLibcall codepath.
+ assert(UseOptimizedLibcall || !PostOp);
+
+ RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
+ // The value is returned directly from the libcall.
+ if (E->isCmpXChg())
+ return Res;
+
+ // The value is returned directly for optimized libcalls but the expr
+ // provided an out-param.
+ if (UseOptimizedLibcall && Res.getScalarVal()) {
+ llvm::Value *ResVal = Res.getScalarVal();
+ if (PostOp) {
+ llvm::Value *LoadVal1 = Args[1].RV.getScalarVal();
+ ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
+ }
+ if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
+ ResVal = Builder.CreateNot(ResVal);
+
+ Builder.CreateStore(
+ ResVal,
+ Builder.CreateBitCast(Dest, ResVal->getType()->getPointerTo()));
+ }
+
+ if (RValTy->isVoidType())
+ return RValue::get(nullptr);
+
+ return convertTempToRValue(
+ Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
+ RValTy, E->getExprLoc());
+ }
+
+ bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
+ E->getOp() == AtomicExpr::AO__atomic_store ||
+ E->getOp() == AtomicExpr::AO__atomic_store_n;
+ bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
+ E->getOp() == AtomicExpr::AO__atomic_load ||
+ E->getOp() == AtomicExpr::AO__atomic_load_n;
+
+ if (isa<llvm::ConstantInt>(Order)) {
+ int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
+ switch (ord) {
+ case AtomicExpr::AO_ABI_memory_order_relaxed:
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
+ Size, llvm::Monotonic);
+ break;
+ case AtomicExpr::AO_ABI_memory_order_consume:
+ case AtomicExpr::AO_ABI_memory_order_acquire:
+ if (IsStore)
+ break; // Avoid crashing on code with undefined behavior
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
+ Size, llvm::Acquire);
+ break;
+ case AtomicExpr::AO_ABI_memory_order_release:
+ if (IsLoad)
+ break; // Avoid crashing on code with undefined behavior
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
+ Size, llvm::Release);
+ break;
+ case AtomicExpr::AO_ABI_memory_order_acq_rel:
+ if (IsLoad || IsStore)
+ break; // Avoid crashing on code with undefined behavior
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
+ Size, llvm::AcquireRelease);
+ break;
+ case AtomicExpr::AO_ABI_memory_order_seq_cst:
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
+ Size, llvm::SequentiallyConsistent);
+ break;
+ default: // invalid order
+ // We should not ever get here normally, but it's hard to
+ // enforce that in general.
+ break;
+ }
+ if (RValTy->isVoidType())
+ return RValue::get(nullptr);
+
+ return convertTempToRValue(
+ Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
+ RValTy, E->getExprLoc());
+ }
+
+ // Long case, when Order isn't obviously constant.
+
+ // Create all the relevant BB's
+ llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
+ *ReleaseBB = nullptr, *AcqRelBB = nullptr,
+ *SeqCstBB = nullptr;
+ MonotonicBB = createBasicBlock("monotonic", CurFn);
+ if (!IsStore)
+ AcquireBB = createBasicBlock("acquire", CurFn);
+ if (!IsLoad)
+ ReleaseBB = createBasicBlock("release", CurFn);
+ if (!IsLoad && !IsStore)
+ AcqRelBB = createBasicBlock("acqrel", CurFn);
+ SeqCstBB = createBasicBlock("seqcst", CurFn);
+ llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
+
+ // Create the switch for the split
+ // MonotonicBB is arbitrarily chosen as the default case; in practice, this
+ // doesn't matter unless someone is crazy enough to use something that
+ // doesn't fold to a constant for the ordering.
+ Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
+ llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
+
+ // Emit all the different atomics
+ Builder.SetInsertPoint(MonotonicBB);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
+ Size, llvm::Monotonic);
+ Builder.CreateBr(ContBB);
+ if (!IsStore) {
+ Builder.SetInsertPoint(AcquireBB);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
+ Size, llvm::Acquire);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
+ AcquireBB);
+ SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
+ AcquireBB);
+ }
+ if (!IsLoad) {
+ Builder.SetInsertPoint(ReleaseBB);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
+ Size, llvm::Release);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release),
+ ReleaseBB);
+ }
+ if (!IsLoad && !IsStore) {
+ Builder.SetInsertPoint(AcqRelBB);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
+ Size, llvm::AcquireRelease);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel),
+ AcqRelBB);
+ }
+ Builder.SetInsertPoint(SeqCstBB);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
+ Size, llvm::SequentiallyConsistent);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
+ SeqCstBB);
+
+ // Cleanup and return
+ Builder.SetInsertPoint(ContBB);
+ if (RValTy->isVoidType())
+ return RValue::get(nullptr);
+
+ assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
+ return convertTempToRValue(
+ Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
+ RValTy, E->getExprLoc());
+}
+
+Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const {
+ unsigned addrspace =
+ cast<llvm::PointerType>(addr.getPointer()->getType())->getAddressSpace();
+ llvm::IntegerType *ty =
+ llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
+ return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
+}
+
+Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
+ llvm::Type *Ty = Addr.getElementType();
+ uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
+ if (SourceSizeInBits != AtomicSizeInBits) {
+ Address Tmp = CreateTempAlloca();
+ CGF.Builder.CreateMemCpy(Tmp, Addr,
+ std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
+ Addr = Tmp;
+ }
+
+ return emitCastToAtomicIntPointer(Addr);
+}
+
+RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
+ AggValueSlot resultSlot,
+ SourceLocation loc,
+ bool asValue) const {
+ if (LVal.isSimple()) {
+ if (EvaluationKind == TEK_Aggregate)
+ return resultSlot.asRValue();
+
+ // Drill into the padding structure if we have one.
+ if (hasPadding())
+ addr = CGF.Builder.CreateStructGEP(addr, 0, CharUnits());
+
+ // Otherwise, just convert the temporary to an r-value using the
+ // normal conversion routine.
+ return CGF.convertTempToRValue(addr, getValueType(), loc);
+ }
+ if (!asValue)
+ // Get RValue from temp memory as atomic for non-simple lvalues
+ return RValue::get(CGF.Builder.CreateLoad(addr));
+ if (LVal.isBitField())
+ return CGF.EmitLoadOfBitfieldLValue(
+ LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
+ LVal.getAlignmentSource()));
+ if (LVal.isVectorElt())
+ return CGF.EmitLoadOfLValue(
+ LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
+ LVal.getAlignmentSource()), loc);
+ assert(LVal.isExtVectorElt());
+ return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
+ addr, LVal.getExtVectorElts(), LVal.getType(),
+ LVal.getAlignmentSource()));
+}
+
+RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
+ AggValueSlot ResultSlot,
+ SourceLocation Loc,
+ bool AsValue) const {
+ // Try not to in some easy cases.
+ assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
+ if (getEvaluationKind() == TEK_Scalar &&
+ (((!LVal.isBitField() ||
+ LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
+ !hasPadding()) ||
+ !AsValue)) {
+ auto *ValTy = AsValue
+ ? CGF.ConvertTypeForMem(ValueTy)
+ : getAtomicAddress().getType()->getPointerElementType();
+ if (ValTy->isIntegerTy()) {
+ assert(IntVal->getType() == ValTy && "Different integer types.");
+ return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
+ } else if (ValTy->isPointerTy())
+ return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
+ else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
+ return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
+ }
+
+ // Create a temporary. This needs to be big enough to hold the
+ // atomic integer.
+ Address Temp = Address::invalid();
+ bool TempIsVolatile = false;
+ if (AsValue && getEvaluationKind() == TEK_Aggregate) {
+ assert(!ResultSlot.isIgnored());
+ Temp = ResultSlot.getAddress();
+ TempIsVolatile = ResultSlot.isVolatile();
+ } else {
+ Temp = CreateTempAlloca();
+ }
+
+ // Slam the integer into the temporary.
+ Address CastTemp = emitCastToAtomicIntPointer(Temp);
+ CGF.Builder.CreateStore(IntVal, CastTemp)
+ ->setVolatile(TempIsVolatile);
+
+ return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
+}
+
+void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
+ llvm::AtomicOrdering AO, bool) {
+ // void __atomic_load(size_t size, void *mem, void *return, int order);
+ CallArgList Args;
+ Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
+ Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
+ CGF.getContext().VoidPtrTy);
+ Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
+ CGF.getContext().VoidPtrTy);
+ Args.add(RValue::get(
+ llvm::ConstantInt::get(CGF.IntTy, translateAtomicOrdering(AO))),
+ CGF.getContext().IntTy);
+ emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
+}
+
+llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
+ bool IsVolatile) {
+ // Okay, we're doing this natively.
+ Address Addr = getAtomicAddressAsAtomicIntPointer();
+ llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
+ Load->setAtomic(AO);
+
+ // Other decoration.
+ if (IsVolatile)
+ Load->setVolatile(true);
+ if (LVal.getTBAAInfo())
+ CGF.CGM.DecorateInstructionWithTBAA(Load, LVal.getTBAAInfo());
+ return Load;
+}
+
+/// An LValue is a candidate for having its loads and stores be made atomic if
+/// we are operating under /volatile:ms *and* the LValue itself is volatile and
+/// performing such an operation can be performed without a libcall.
+bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
+ if (!CGM.getCodeGenOpts().MSVolatile) return false;
+ AtomicInfo AI(*this, LV);
+ bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
+ // An atomic is inline if we don't need to use a libcall.
+ bool AtomicIsInline = !AI.shouldUseLibcall();
+ return IsVolatile && AtomicIsInline;
+}
+
+/// An type is a candidate for having its loads and stores be made atomic if
+/// we are operating under /volatile:ms *and* we know the access is volatile and
+/// performing such an operation can be performed without a libcall.
+bool CodeGenFunction::typeIsSuitableForInlineAtomic(QualType Ty,
+ bool IsVolatile) const {
+ // An atomic is inline if we don't need to use a libcall (e.g. it is builtin).
+ bool AtomicIsInline = getContext().getTargetInfo().hasBuiltinAtomic(
+ getContext().getTypeSize(Ty), getContext().getTypeAlign(Ty));
+ return CGM.getCodeGenOpts().MSVolatile && IsVolatile && AtomicIsInline;
+}
+
+RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,
+ AggValueSlot Slot) {
+ llvm::AtomicOrdering AO;
+ bool IsVolatile = LV.isVolatileQualified();
+ if (LV.getType()->isAtomicType()) {
+ AO = llvm::SequentiallyConsistent;
+ } else {
+ AO = llvm::Acquire;
+ IsVolatile = true;
+ }
+ return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
+}
+
+RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
+ bool AsValue, llvm::AtomicOrdering AO,
+ bool IsVolatile) {
+ // Check whether we should use a library call.
+ if (shouldUseLibcall()) {
+ Address TempAddr = Address::invalid();
+ if (LVal.isSimple() && !ResultSlot.isIgnored()) {
+ assert(getEvaluationKind() == TEK_Aggregate);
+ TempAddr = ResultSlot.getAddress();
+ } else
+ TempAddr = CreateTempAlloca();
+
+ EmitAtomicLoadLibcall(TempAddr.getPointer(), AO, IsVolatile);
+
+ // Okay, turn that back into the original value or whole atomic (for
+ // non-simple lvalues) type.
+ return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
+ }
+
+ // Okay, we're doing this natively.
+ auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
+
+ // If we're ignoring an aggregate return, don't do anything.
+ if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
+ return RValue::getAggregate(Address::invalid(), false);
+
+ // Okay, turn that back into the original value or atomic (for non-simple
+ // lvalues) type.
+ return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
+}
+
+/// Emit a load from an l-value of atomic type. Note that the r-value
+/// we produce is an r-value of the atomic *value* type.
+RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
+ llvm::AtomicOrdering AO, bool IsVolatile,
+ AggValueSlot resultSlot) {
+ AtomicInfo Atomics(*this, src);
+ return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
+ IsVolatile);
+}
+
+/// Copy an r-value into memory as part of storing to an atomic type.
+/// This needs to create a bit-pattern suitable for atomic operations.
+void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
+ assert(LVal.isSimple());
+ // If we have an r-value, the rvalue should be of the atomic type,
+ // which means that the caller is responsible for having zeroed
+ // any padding. Just do an aggregate copy of that type.
+ if (rvalue.isAggregate()) {
+ CGF.EmitAggregateCopy(getAtomicAddress(),
+ rvalue.getAggregateAddress(),
+ getAtomicType(),
+ (rvalue.isVolatileQualified()
+ || LVal.isVolatileQualified()));
+ return;
+ }
+
+ // Okay, otherwise we're copying stuff.
+
+ // Zero out the buffer if necessary.
+ emitMemSetZeroIfNecessary();
+
+ // Drill past the padding if present.
+ LValue TempLVal = projectValue();
+
+ // Okay, store the rvalue in.
+ if (rvalue.isScalar()) {
+ CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
+ } else {
+ CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
+ }
+}
+
+
+/// Materialize an r-value into memory for the purposes of storing it
+/// to an atomic type.
+Address AtomicInfo::materializeRValue(RValue rvalue) const {
+ // Aggregate r-values are already in memory, and EmitAtomicStore
+ // requires them to be values of the atomic type.
+ if (rvalue.isAggregate())
+ return rvalue.getAggregateAddress();
+
+ // Otherwise, make a temporary and materialize into it.
+ LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
+ AtomicInfo Atomics(CGF, TempLV);
+ Atomics.emitCopyIntoMemory(rvalue);
+ return TempLV.getAddress();
+}
+
+llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
+ // If we've got a scalar value of the right size, try to avoid going
+ // through memory.
+ if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) {
+ llvm::Value *Value = RVal.getScalarVal();
+ if (isa<llvm::IntegerType>(Value->getType()))
+ return CGF.EmitToMemory(Value, ValueTy);
+ else {
+ llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
+ CGF.getLLVMContext(),
+ LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
+ if (isa<llvm::PointerType>(Value->getType()))
+ return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
+ else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
+ return CGF.Builder.CreateBitCast(Value, InputIntTy);
+ }
+ }
+ // Otherwise, we need to go through memory.
+ // Put the r-value in memory.
+ Address Addr = materializeRValue(RVal);
+
+ // Cast the temporary to the atomic int type and pull a value out.
+ Addr = emitCastToAtomicIntPointer(Addr);
+ return CGF.Builder.CreateLoad(Addr);
+}
+
+std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
+ llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
+ llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
+ // Do the atomic store.
+ Address Addr = getAtomicAddressAsAtomicIntPointer();
+ auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr.getPointer(),
+ ExpectedVal, DesiredVal,
+ Success, Failure);
+ // Other decoration.
+ Inst->setVolatile(LVal.isVolatileQualified());
+ Inst->setWeak(IsWeak);
+
+ // Okay, turn that back into the original value type.
+ auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
+ auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
+ return std::make_pair(PreviousVal, SuccessFailureVal);
+}
+
+llvm::Value *
+AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
+ llvm::Value *DesiredAddr,
+ llvm::AtomicOrdering Success,
+ llvm::AtomicOrdering Failure) {
+ // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
+ // void *desired, int success, int failure);
+ CallArgList Args;
+ Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
+ Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
+ CGF.getContext().VoidPtrTy);
+ Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
+ CGF.getContext().VoidPtrTy);
+ Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
+ CGF.getContext().VoidPtrTy);
+ Args.add(RValue::get(llvm::ConstantInt::get(
+ CGF.IntTy, translateAtomicOrdering(Success))),
+ CGF.getContext().IntTy);
+ Args.add(RValue::get(llvm::ConstantInt::get(
+ CGF.IntTy, translateAtomicOrdering(Failure))),
+ CGF.getContext().IntTy);
+ auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
+ CGF.getContext().BoolTy, Args);
+
+ return SuccessFailureRVal.getScalarVal();
+}
+
+std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
+ RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
+ llvm::AtomicOrdering Failure, bool IsWeak) {
+ if (Failure >= Success)
+ // Don't assert on undefined behavior.
+ Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
+
+ // Check whether we should use a library call.
+ if (shouldUseLibcall()) {
+ // Produce a source address.
+ Address ExpectedAddr = materializeRValue(Expected);
+ Address DesiredAddr = materializeRValue(Desired);
+ auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
+ DesiredAddr.getPointer(),
+ Success, Failure);
+ return std::make_pair(
+ convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
+ SourceLocation(), /*AsValue=*/false),
+ Res);
+ }
+
+ // If we've got a scalar value of the right size, try to avoid going
+ // through memory.
+ auto *ExpectedVal = convertRValueToInt(Expected);
+ auto *DesiredVal = convertRValueToInt(Desired);
+ auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
+ Failure, IsWeak);
+ return std::make_pair(
+ ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(),
+ SourceLocation(), /*AsValue=*/false),
+ Res.second);
+}
+
+static void
+EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
+ const llvm::function_ref<RValue(RValue)> &UpdateOp,
+ Address DesiredAddr) {
+ RValue UpRVal;
+ LValue AtomicLVal = Atomics.getAtomicLValue();
+ LValue DesiredLVal;
+ if (AtomicLVal.isSimple()) {
+ UpRVal = OldRVal;
+ DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
+ } else {
+ // Build new lvalue for temp address
+ Address Ptr = Atomics.materializeRValue(OldRVal);
+ LValue UpdateLVal;
+ if (AtomicLVal.isBitField()) {
+ UpdateLVal =
+ LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
+ AtomicLVal.getType(),
+ AtomicLVal.getAlignmentSource());
+ DesiredLVal =
+ LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
+ AtomicLVal.getType(),
+ AtomicLVal.getAlignmentSource());
+ } else if (AtomicLVal.isVectorElt()) {
+ UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
+ AtomicLVal.getType(),
+ AtomicLVal.getAlignmentSource());
+ DesiredLVal = LValue::MakeVectorElt(
+ DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
+ AtomicLVal.getAlignmentSource());
+ } else {
+ assert(AtomicLVal.isExtVectorElt());
+ UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
+ AtomicLVal.getType(),
+ AtomicLVal.getAlignmentSource());
+ DesiredLVal = LValue::MakeExtVectorElt(
+ DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
+ AtomicLVal.getAlignmentSource());
+ }
+ UpdateLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
+ DesiredLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
+ UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
+ }
+ // Store new value in the corresponding memory area
+ RValue NewRVal = UpdateOp(UpRVal);
+ if (NewRVal.isScalar()) {
+ CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
+ } else {
+ assert(NewRVal.isComplex());
+ CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
+ /*isInit=*/false);
+ }
+}
+
+void AtomicInfo::EmitAtomicUpdateLibcall(
+ llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
+ bool IsVolatile) {
+ auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
+
+ Address ExpectedAddr = CreateTempAlloca();
+
+ EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
+ auto *ContBB = CGF.createBasicBlock("atomic_cont");
+ auto *ExitBB = CGF.createBasicBlock("atomic_exit");
+ CGF.EmitBlock(ContBB);
+ Address DesiredAddr = CreateTempAlloca();
+ if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
+ requiresMemSetZero(getAtomicAddress().getElementType())) {
+ auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
+ CGF.Builder.CreateStore(OldVal, DesiredAddr);
+ }
+ auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
+ AggValueSlot::ignored(),
+ SourceLocation(), /*AsValue=*/false);
+ EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
+ auto *Res =
+ EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
+ DesiredAddr.getPointer(),
+ AO, Failure);
+ CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
+ CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
+}
+
+void AtomicInfo::EmitAtomicUpdateOp(
+ llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
+ bool IsVolatile) {
+ auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
+
+ // Do the atomic load.
+ auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
+ // For non-simple lvalues perform compare-and-swap procedure.
+ auto *ContBB = CGF.createBasicBlock("atomic_cont");
+ auto *ExitBB = CGF.createBasicBlock("atomic_exit");
+ auto *CurBB = CGF.Builder.GetInsertBlock();
+ CGF.EmitBlock(ContBB);
+ llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
+ /*NumReservedValues=*/2);
+ PHI->addIncoming(OldVal, CurBB);
+ Address NewAtomicAddr = CreateTempAlloca();
+ Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
+ if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
+ requiresMemSetZero(getAtomicAddress().getElementType())) {
+ CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
+ }
+ auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
+ SourceLocation(), /*AsValue=*/false);
+ EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
+ auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
+ // Try to write new value using cmpxchg operation
+ auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
+ PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
+ CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
+ CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
+}
+
+static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
+ RValue UpdateRVal, Address DesiredAddr) {
+ LValue AtomicLVal = Atomics.getAtomicLValue();
+ LValue DesiredLVal;
+ // Build new lvalue for temp address
+ if (AtomicLVal.isBitField()) {
+ DesiredLVal =
+ LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
+ AtomicLVal.getType(),
+ AtomicLVal.getAlignmentSource());
+ } else if (AtomicLVal.isVectorElt()) {
+ DesiredLVal =
+ LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
+ AtomicLVal.getType(),
+ AtomicLVal.getAlignmentSource());
+ } else {
+ assert(AtomicLVal.isExtVectorElt());
+ DesiredLVal = LValue::MakeExtVectorElt(
+ DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
+ AtomicLVal.getAlignmentSource());
+ }
+ DesiredLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
+ // Store new value in the corresponding memory area
+ assert(UpdateRVal.isScalar());
+ CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
+}
+
+void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
+ RValue UpdateRVal, bool IsVolatile) {
+ auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
+
+ Address ExpectedAddr = CreateTempAlloca();
+
+ EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
+ auto *ContBB = CGF.createBasicBlock("atomic_cont");
+ auto *ExitBB = CGF.createBasicBlock("atomic_exit");
+ CGF.EmitBlock(ContBB);
+ Address DesiredAddr = CreateTempAlloca();
+ if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
+ requiresMemSetZero(getAtomicAddress().getElementType())) {
+ auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
+ CGF.Builder.CreateStore(OldVal, DesiredAddr);
+ }
+ EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
+ auto *Res =
+ EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
+ DesiredAddr.getPointer(),
+ AO, Failure);
+ CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
+ CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
+}
+
+void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
+ bool IsVolatile) {
+ auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
+
+ // Do the atomic load.
+ auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
+ // For non-simple lvalues perform compare-and-swap procedure.
+ auto *ContBB = CGF.createBasicBlock("atomic_cont");
+ auto *ExitBB = CGF.createBasicBlock("atomic_exit");
+ auto *CurBB = CGF.Builder.GetInsertBlock();
+ CGF.EmitBlock(ContBB);
+ llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
+ /*NumReservedValues=*/2);
+ PHI->addIncoming(OldVal, CurBB);
+ Address NewAtomicAddr = CreateTempAlloca();
+ Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
+ if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
+ requiresMemSetZero(getAtomicAddress().getElementType())) {
+ CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
+ }
+ EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
+ auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
+ // Try to write new value using cmpxchg operation
+ auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
+ PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
+ CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
+ CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
+}
+
+void AtomicInfo::EmitAtomicUpdate(
+ llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
+ bool IsVolatile) {
+ if (shouldUseLibcall()) {
+ EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
+ } else {
+ EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
+ }
+}
+
+void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
+ bool IsVolatile) {
+ if (shouldUseLibcall()) {
+ EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
+ } else {
+ EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
+ }
+}
+
+void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue,
+ bool isInit) {
+ bool IsVolatile = lvalue.isVolatileQualified();
+ llvm::AtomicOrdering AO;
+ if (lvalue.getType()->isAtomicType()) {
+ AO = llvm::SequentiallyConsistent;
+ } else {
+ AO = llvm::Release;
+ IsVolatile = true;
+ }
+ return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
+}
+
+/// Emit a store to an l-value of atomic type.
+///
+/// Note that the r-value is expected to be an r-value *of the atomic
+/// type*; this means that for aggregate r-values, it should include
+/// storage for any padding that was necessary.
+void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
+ llvm::AtomicOrdering AO, bool IsVolatile,
+ bool isInit) {
+ // If this is an aggregate r-value, it should agree in type except
+ // maybe for address-space qualification.
+ assert(!rvalue.isAggregate() ||
+ rvalue.getAggregateAddress().getElementType()
+ == dest.getAddress().getElementType());
+
+ AtomicInfo atomics(*this, dest);
+ LValue LVal = atomics.getAtomicLValue();
+
+ // If this is an initialization, just put the value there normally.
+ if (LVal.isSimple()) {
+ if (isInit) {
+ atomics.emitCopyIntoMemory(rvalue);
+ return;
+ }
+
+ // Check whether we should use a library call.
+ if (atomics.shouldUseLibcall()) {
+ // Produce a source address.
+ Address srcAddr = atomics.materializeRValue(rvalue);
+
+ // void __atomic_store(size_t size, void *mem, void *val, int order)
+ CallArgList args;
+ args.add(RValue::get(atomics.getAtomicSizeValue()),
+ getContext().getSizeType());
+ args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
+ getContext().VoidPtrTy);
+ args.add(RValue::get(EmitCastToVoidPtr(srcAddr.getPointer())),
+ getContext().VoidPtrTy);
+ args.add(RValue::get(llvm::ConstantInt::get(
+ IntTy, AtomicInfo::translateAtomicOrdering(AO))),
+ getContext().IntTy);
+ emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
+ return;
+ }
+
+ // Okay, we're doing this natively.
+ llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
+
+ // Do the atomic store.
+ Address addr =
+ atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
+ intValue = Builder.CreateIntCast(
+ intValue, addr.getElementType(), /*isSigned=*/false);
+ llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
+
+ // Initializations don't need to be atomic.
+ if (!isInit)
+ store->setAtomic(AO);
+
+ // Other decoration.
+ if (IsVolatile)
+ store->setVolatile(true);
+ if (dest.getTBAAInfo())
+ CGM.DecorateInstructionWithTBAA(store, dest.getTBAAInfo());
+ return;
+ }
+
+ // Emit simple atomic update operation.
+ atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
+}
+
+/// Emit a compare-and-exchange op for atomic type.
+///
+std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
+ LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
+ llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
+ AggValueSlot Slot) {
+ // If this is an aggregate r-value, it should agree in type except
+ // maybe for address-space qualification.
+ assert(!Expected.isAggregate() ||
+ Expected.getAggregateAddress().getElementType() ==
+ Obj.getAddress().getElementType());
+ assert(!Desired.isAggregate() ||
+ Desired.getAggregateAddress().getElementType() ==
+ Obj.getAddress().getElementType());
+ AtomicInfo Atomics(*this, Obj);
+
+ return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
+ IsWeak);
+}
+
+void CodeGenFunction::EmitAtomicUpdate(
+ LValue LVal, llvm::AtomicOrdering AO,
+ const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
+ AtomicInfo Atomics(*this, LVal);
+ Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
+}
+
+void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
+ AtomicInfo atomics(*this, dest);
+
+ switch (atomics.getEvaluationKind()) {
+ case TEK_Scalar: {
+ llvm::Value *value = EmitScalarExpr(init);
+ atomics.emitCopyIntoMemory(RValue::get(value));
+ return;
+ }
+
+ case TEK_Complex: {
+ ComplexPairTy value = EmitComplexExpr(init);
+ atomics.emitCopyIntoMemory(RValue::getComplex(value));
+ return;
+ }
+
+ case TEK_Aggregate: {
+ // Fix up the destination if the initializer isn't an expression
+ // of atomic type.
+ bool Zeroed = false;
+ if (!init->getType()->isAtomicType()) {
+ Zeroed = atomics.emitMemSetZeroIfNecessary();
+ dest = atomics.projectValue();
+ }
+
+ // Evaluate the expression directly into the destination.
+ AggValueSlot slot = AggValueSlot::forLValue(dest,
+ AggValueSlot::IsNotDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased,
+ Zeroed ? AggValueSlot::IsZeroed :
+ AggValueSlot::IsNotZeroed);
+
+ EmitAggExpr(init, slot);
+ return;
+ }
+ }
+ llvm_unreachable("bad evaluation kind");
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp
new file mode 100644
index 0000000..ba2941e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp
@@ -0,0 +1,2339 @@
+//===--- CGBlocks.cpp - Emit LLVM Code for declarations ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit blocks.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGBlocks.h"
+#include "CGDebugInfo.h"
+#include "CGObjCRuntime.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/DeclObjC.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Module.h"
+#include <algorithm>
+#include <cstdio>
+
+using namespace clang;
+using namespace CodeGen;
+
+CGBlockInfo::CGBlockInfo(const BlockDecl *block, StringRef name)
+ : Name(name), CXXThisIndex(0), CanBeGlobal(false), NeedsCopyDispose(false),
+ HasCXXObject(false), UsesStret(false), HasCapturedVariableLayout(false),
+ LocalAddress(Address::invalid()), StructureType(nullptr), Block(block),
+ DominatingIP(nullptr) {
+
+ // Skip asm prefix, if any. 'name' is usually taken directly from
+ // the mangled name of the enclosing function.
+ if (!name.empty() && name[0] == '\01')
+ name = name.substr(1);
+}
+
+// Anchor the vtable to this translation unit.
+BlockByrefHelpers::~BlockByrefHelpers() {}
+
+/// Build the given block as a global block.
+static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo,
+ llvm::Constant *blockFn);
+
+/// Build the helper function to copy a block.
+static llvm::Constant *buildCopyHelper(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+ return CodeGenFunction(CGM).GenerateCopyHelperFunction(blockInfo);
+}
+
+/// Build the helper function to dispose of a block.
+static llvm::Constant *buildDisposeHelper(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+ return CodeGenFunction(CGM).GenerateDestroyHelperFunction(blockInfo);
+}
+
+/// buildBlockDescriptor - Build the block descriptor meta-data for a block.
+/// buildBlockDescriptor is accessed from 5th field of the Block_literal
+/// meta-data and contains stationary information about the block literal.
+/// Its definition will have 4 (or optinally 6) words.
+/// \code
+/// struct Block_descriptor {
+/// unsigned long reserved;
+/// unsigned long size; // size of Block_literal metadata in bytes.
+/// void *copy_func_helper_decl; // optional copy helper.
+/// void *destroy_func_decl; // optioanl destructor helper.
+/// void *block_method_encoding_address; // @encode for block literal signature.
+/// void *block_layout_info; // encoding of captured block variables.
+/// };
+/// \endcode
+static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+ ASTContext &C = CGM.getContext();
+
+ llvm::Type *ulong = CGM.getTypes().ConvertType(C.UnsignedLongTy);
+ llvm::Type *i8p = nullptr;
+ if (CGM.getLangOpts().OpenCL)
+ i8p =
+ llvm::Type::getInt8PtrTy(
+ CGM.getLLVMContext(), C.getTargetAddressSpace(LangAS::opencl_constant));
+ else
+ i8p = CGM.getTypes().ConvertType(C.VoidPtrTy);
+
+ SmallVector<llvm::Constant*, 6> elements;
+
+ // reserved
+ elements.push_back(llvm::ConstantInt::get(ulong, 0));
+
+ // Size
+ // FIXME: What is the right way to say this doesn't fit? We should give
+ // a user diagnostic in that case. Better fix would be to change the
+ // API to size_t.
+ elements.push_back(llvm::ConstantInt::get(ulong,
+ blockInfo.BlockSize.getQuantity()));
+
+ // Optional copy/dispose helpers.
+ if (blockInfo.NeedsCopyDispose) {
+ // copy_func_helper_decl
+ elements.push_back(buildCopyHelper(CGM, blockInfo));
+
+ // destroy_func_decl
+ elements.push_back(buildDisposeHelper(CGM, blockInfo));
+ }
+
+ // Signature. Mandatory ObjC-style method descriptor @encode sequence.
+ std::string typeAtEncoding =
+ CGM.getContext().getObjCEncodingForBlock(blockInfo.getBlockExpr());
+ elements.push_back(llvm::ConstantExpr::getBitCast(
+ CGM.GetAddrOfConstantCString(typeAtEncoding).getPointer(), i8p));
+
+ // GC layout.
+ if (C.getLangOpts().ObjC1) {
+ if (CGM.getLangOpts().getGC() != LangOptions::NonGC)
+ elements.push_back(CGM.getObjCRuntime().BuildGCBlockLayout(CGM, blockInfo));
+ else
+ elements.push_back(CGM.getObjCRuntime().BuildRCBlockLayout(CGM, blockInfo));
+ }
+ else
+ elements.push_back(llvm::Constant::getNullValue(i8p));
+
+ llvm::Constant *init = llvm::ConstantStruct::getAnon(elements);
+
+ llvm::GlobalVariable *global =
+ new llvm::GlobalVariable(CGM.getModule(), init->getType(), true,
+ llvm::GlobalValue::InternalLinkage,
+ init, "__block_descriptor_tmp");
+
+ return llvm::ConstantExpr::getBitCast(global, CGM.getBlockDescriptorType());
+}
+
+/*
+ Purely notional variadic template describing the layout of a block.
+
+ template <class _ResultType, class... _ParamTypes, class... _CaptureTypes>
+ struct Block_literal {
+ /// Initialized to one of:
+ /// extern void *_NSConcreteStackBlock[];
+ /// extern void *_NSConcreteGlobalBlock[];
+ ///
+ /// In theory, we could start one off malloc'ed by setting
+ /// BLOCK_NEEDS_FREE, giving it a refcount of 1, and using
+ /// this isa:
+ /// extern void *_NSConcreteMallocBlock[];
+ struct objc_class *isa;
+
+ /// These are the flags (with corresponding bit number) that the
+ /// compiler is actually supposed to know about.
+ /// 25. BLOCK_HAS_COPY_DISPOSE - indicates that the block
+ /// descriptor provides copy and dispose helper functions
+ /// 26. BLOCK_HAS_CXX_OBJ - indicates that there's a captured
+ /// object with a nontrivial destructor or copy constructor
+ /// 28. BLOCK_IS_GLOBAL - indicates that the block is allocated
+ /// as global memory
+ /// 29. BLOCK_USE_STRET - indicates that the block function
+ /// uses stret, which objc_msgSend needs to know about
+ /// 30. BLOCK_HAS_SIGNATURE - indicates that the block has an
+ /// @encoded signature string
+ /// And we're not supposed to manipulate these:
+ /// 24. BLOCK_NEEDS_FREE - indicates that the block has been moved
+ /// to malloc'ed memory
+ /// 27. BLOCK_IS_GC - indicates that the block has been moved to
+ /// to GC-allocated memory
+ /// Additionally, the bottom 16 bits are a reference count which
+ /// should be zero on the stack.
+ int flags;
+
+ /// Reserved; should be zero-initialized.
+ int reserved;
+
+ /// Function pointer generated from block literal.
+ _ResultType (*invoke)(Block_literal *, _ParamTypes...);
+
+ /// Block description metadata generated from block literal.
+ struct Block_descriptor *block_descriptor;
+
+ /// Captured values follow.
+ _CapturesTypes captures...;
+ };
+ */
+
+/// The number of fields in a block header.
+const unsigned BlockHeaderSize = 5;
+
+namespace {
+ /// A chunk of data that we actually have to capture in the block.
+ struct BlockLayoutChunk {
+ CharUnits Alignment;
+ CharUnits Size;
+ Qualifiers::ObjCLifetime Lifetime;
+ const BlockDecl::Capture *Capture; // null for 'this'
+ llvm::Type *Type;
+
+ BlockLayoutChunk(CharUnits align, CharUnits size,
+ Qualifiers::ObjCLifetime lifetime,
+ const BlockDecl::Capture *capture,
+ llvm::Type *type)
+ : Alignment(align), Size(size), Lifetime(lifetime),
+ Capture(capture), Type(type) {}
+
+ /// Tell the block info that this chunk has the given field index.
+ void setIndex(CGBlockInfo &info, unsigned index, CharUnits offset) {
+ if (!Capture) {
+ info.CXXThisIndex = index;
+ info.CXXThisOffset = offset;
+ } else {
+ info.Captures.insert({Capture->getVariable(),
+ CGBlockInfo::Capture::makeIndex(index, offset)});
+ }
+ }
+ };
+
+ /// Order by 1) all __strong together 2) next, all byfref together 3) next,
+ /// all __weak together. Preserve descending alignment in all situations.
+ bool operator<(const BlockLayoutChunk &left, const BlockLayoutChunk &right) {
+ if (left.Alignment != right.Alignment)
+ return left.Alignment > right.Alignment;
+
+ auto getPrefOrder = [](const BlockLayoutChunk &chunk) {
+ if (chunk.Capture && chunk.Capture->isByRef())
+ return 1;
+ if (chunk.Lifetime == Qualifiers::OCL_Strong)
+ return 0;
+ if (chunk.Lifetime == Qualifiers::OCL_Weak)
+ return 2;
+ return 3;
+ };
+
+ return getPrefOrder(left) < getPrefOrder(right);
+ }
+} // end anonymous namespace
+
+/// Determines if the given type is safe for constant capture in C++.
+static bool isSafeForCXXConstantCapture(QualType type) {
+ const RecordType *recordType =
+ type->getBaseElementTypeUnsafe()->getAs<RecordType>();
+
+ // Only records can be unsafe.
+ if (!recordType) return true;
+
+ const auto *record = cast<CXXRecordDecl>(recordType->getDecl());
+
+ // Maintain semantics for classes with non-trivial dtors or copy ctors.
+ if (!record->hasTrivialDestructor()) return false;
+ if (record->hasNonTrivialCopyConstructor()) return false;
+
+ // Otherwise, we just have to make sure there aren't any mutable
+ // fields that might have changed since initialization.
+ return !record->hasMutableFields();
+}
+
+/// It is illegal to modify a const object after initialization.
+/// Therefore, if a const object has a constant initializer, we don't
+/// actually need to keep storage for it in the block; we'll just
+/// rematerialize it at the start of the block function. This is
+/// acceptable because we make no promises about address stability of
+/// captured variables.
+static llvm::Constant *tryCaptureAsConstant(CodeGenModule &CGM,
+ CodeGenFunction *CGF,
+ const VarDecl *var) {
+ QualType type = var->getType();
+
+ // We can only do this if the variable is const.
+ if (!type.isConstQualified()) return nullptr;
+
+ // Furthermore, in C++ we have to worry about mutable fields:
+ // C++ [dcl.type.cv]p4:
+ // Except that any class member declared mutable can be
+ // modified, any attempt to modify a const object during its
+ // lifetime results in undefined behavior.
+ if (CGM.getLangOpts().CPlusPlus && !isSafeForCXXConstantCapture(type))
+ return nullptr;
+
+ // If the variable doesn't have any initializer (shouldn't this be
+ // invalid?), it's not clear what we should do. Maybe capture as
+ // zero?
+ const Expr *init = var->getInit();
+ if (!init) return nullptr;
+
+ return CGM.EmitConstantInit(*var, CGF);
+}
+
+/// Get the low bit of a nonzero character count. This is the
+/// alignment of the nth byte if the 0th byte is universally aligned.
+static CharUnits getLowBit(CharUnits v) {
+ return CharUnits::fromQuantity(v.getQuantity() & (~v.getQuantity() + 1));
+}
+
+static void initializeForBlockHeader(CodeGenModule &CGM, CGBlockInfo &info,
+ SmallVectorImpl<llvm::Type*> &elementTypes) {
+ // The header is basically 'struct { void *; int; int; void *; void *; }'.
+ // Assert that that struct is packed.
+ assert(CGM.getIntSize() <= CGM.getPointerSize());
+ assert(CGM.getIntAlign() <= CGM.getPointerAlign());
+ assert((2 * CGM.getIntSize()).isMultipleOf(CGM.getPointerAlign()));
+
+ info.BlockAlign = CGM.getPointerAlign();
+ info.BlockSize = 3 * CGM.getPointerSize() + 2 * CGM.getIntSize();
+
+ assert(elementTypes.empty());
+ elementTypes.push_back(CGM.VoidPtrTy);
+ elementTypes.push_back(CGM.IntTy);
+ elementTypes.push_back(CGM.IntTy);
+ elementTypes.push_back(CGM.VoidPtrTy);
+ elementTypes.push_back(CGM.getBlockDescriptorType());
+
+ assert(elementTypes.size() == BlockHeaderSize);
+}
+
+/// Compute the layout of the given block. Attempts to lay the block
+/// out with minimal space requirements.
+static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
+ CGBlockInfo &info) {
+ ASTContext &C = CGM.getContext();
+ const BlockDecl *block = info.getBlockDecl();
+
+ SmallVector<llvm::Type*, 8> elementTypes;
+ initializeForBlockHeader(CGM, info, elementTypes);
+
+ if (!block->hasCaptures()) {
+ info.StructureType =
+ llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true);
+ info.CanBeGlobal = true;
+ return;
+ }
+ else if (C.getLangOpts().ObjC1 &&
+ CGM.getLangOpts().getGC() == LangOptions::NonGC)
+ info.HasCapturedVariableLayout = true;
+
+ // Collect the layout chunks.
+ SmallVector<BlockLayoutChunk, 16> layout;
+ layout.reserve(block->capturesCXXThis() +
+ (block->capture_end() - block->capture_begin()));
+
+ CharUnits maxFieldAlign;
+
+ // First, 'this'.
+ if (block->capturesCXXThis()) {
+ assert(CGF && CGF->CurFuncDecl && isa<CXXMethodDecl>(CGF->CurFuncDecl) &&
+ "Can't capture 'this' outside a method");
+ QualType thisType = cast<CXXMethodDecl>(CGF->CurFuncDecl)->getThisType(C);
+
+ // Theoretically, this could be in a different address space, so
+ // don't assume standard pointer size/align.
+ llvm::Type *llvmType = CGM.getTypes().ConvertType(thisType);
+ std::pair<CharUnits,CharUnits> tinfo
+ = CGM.getContext().getTypeInfoInChars(thisType);
+ maxFieldAlign = std::max(maxFieldAlign, tinfo.second);
+
+ layout.push_back(BlockLayoutChunk(tinfo.second, tinfo.first,
+ Qualifiers::OCL_None,
+ nullptr, llvmType));
+ }
+
+ // Next, all the block captures.
+ for (const auto &CI : block->captures()) {
+ const VarDecl *variable = CI.getVariable();
+
+ if (CI.isByRef()) {
+ // We have to copy/dispose of the __block reference.
+ info.NeedsCopyDispose = true;
+
+ // Just use void* instead of a pointer to the byref type.
+ CharUnits align = CGM.getPointerAlign();
+ maxFieldAlign = std::max(maxFieldAlign, align);
+
+ layout.push_back(BlockLayoutChunk(align, CGM.getPointerSize(),
+ Qualifiers::OCL_None, &CI,
+ CGM.VoidPtrTy));
+ continue;
+ }
+
+ // Otherwise, build a layout chunk with the size and alignment of
+ // the declaration.
+ if (llvm::Constant *constant = tryCaptureAsConstant(CGM, CGF, variable)) {
+ info.Captures[variable] = CGBlockInfo::Capture::makeConstant(constant);
+ continue;
+ }
+
+ // If we have a lifetime qualifier, honor it for capture purposes.
+ // That includes *not* copying it if it's __unsafe_unretained.
+ Qualifiers::ObjCLifetime lifetime =
+ variable->getType().getObjCLifetime();
+ if (lifetime) {
+ switch (lifetime) {
+ case Qualifiers::OCL_None: llvm_unreachable("impossible");
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ info.NeedsCopyDispose = true;
+ }
+
+ // Block pointers require copy/dispose. So do Objective-C pointers.
+ } else if (variable->getType()->isObjCRetainableType()) {
+ // But honor the inert __unsafe_unretained qualifier, which doesn't
+ // actually make it into the type system.
+ if (variable->getType()->isObjCInertUnsafeUnretainedType()) {
+ lifetime = Qualifiers::OCL_ExplicitNone;
+ } else {
+ info.NeedsCopyDispose = true;
+ // used for mrr below.
+ lifetime = Qualifiers::OCL_Strong;
+ }
+
+ // So do types that require non-trivial copy construction.
+ } else if (CI.hasCopyExpr()) {
+ info.NeedsCopyDispose = true;
+ info.HasCXXObject = true;
+
+ // And so do types with destructors.
+ } else if (CGM.getLangOpts().CPlusPlus) {
+ if (const CXXRecordDecl *record =
+ variable->getType()->getAsCXXRecordDecl()) {
+ if (!record->hasTrivialDestructor()) {
+ info.HasCXXObject = true;
+ info.NeedsCopyDispose = true;
+ }
+ }
+ }
+
+ QualType VT = variable->getType();
+ CharUnits size = C.getTypeSizeInChars(VT);
+ CharUnits align = C.getDeclAlign(variable);
+
+ maxFieldAlign = std::max(maxFieldAlign, align);
+
+ llvm::Type *llvmType =
+ CGM.getTypes().ConvertTypeForMem(VT);
+
+ layout.push_back(BlockLayoutChunk(align, size, lifetime, &CI, llvmType));
+ }
+
+ // If that was everything, we're done here.
+ if (layout.empty()) {
+ info.StructureType =
+ llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true);
+ info.CanBeGlobal = true;
+ return;
+ }
+
+ // Sort the layout by alignment. We have to use a stable sort here
+ // to get reproducible results. There should probably be an
+ // llvm::array_pod_stable_sort.
+ std::stable_sort(layout.begin(), layout.end());
+
+ // Needed for blocks layout info.
+ info.BlockHeaderForcedGapOffset = info.BlockSize;
+ info.BlockHeaderForcedGapSize = CharUnits::Zero();
+
+ CharUnits &blockSize = info.BlockSize;
+ info.BlockAlign = std::max(maxFieldAlign, info.BlockAlign);
+
+ // Assuming that the first byte in the header is maximally aligned,
+ // get the alignment of the first byte following the header.
+ CharUnits endAlign = getLowBit(blockSize);
+
+ // If the end of the header isn't satisfactorily aligned for the
+ // maximum thing, look for things that are okay with the header-end
+ // alignment, and keep appending them until we get something that's
+ // aligned right. This algorithm is only guaranteed optimal if
+ // that condition is satisfied at some point; otherwise we can get
+ // things like:
+ // header // next byte has alignment 4
+ // something_with_size_5; // next byte has alignment 1
+ // something_with_alignment_8;
+ // which has 7 bytes of padding, as opposed to the naive solution
+ // which might have less (?).
+ if (endAlign < maxFieldAlign) {
+ SmallVectorImpl<BlockLayoutChunk>::iterator
+ li = layout.begin() + 1, le = layout.end();
+
+ // Look for something that the header end is already
+ // satisfactorily aligned for.
+ for (; li != le && endAlign < li->Alignment; ++li)
+ ;
+
+ // If we found something that's naturally aligned for the end of
+ // the header, keep adding things...
+ if (li != le) {
+ SmallVectorImpl<BlockLayoutChunk>::iterator first = li;
+ for (; li != le; ++li) {
+ assert(endAlign >= li->Alignment);
+
+ li->setIndex(info, elementTypes.size(), blockSize);
+ elementTypes.push_back(li->Type);
+ blockSize += li->Size;
+ endAlign = getLowBit(blockSize);
+
+ // ...until we get to the alignment of the maximum field.
+ if (endAlign >= maxFieldAlign) {
+ break;
+ }
+ }
+ // Don't re-append everything we just appended.
+ layout.erase(first, li);
+ }
+ }
+
+ assert(endAlign == getLowBit(blockSize));
+
+ // At this point, we just have to add padding if the end align still
+ // isn't aligned right.
+ if (endAlign < maxFieldAlign) {
+ CharUnits newBlockSize = blockSize.RoundUpToAlignment(maxFieldAlign);
+ CharUnits padding = newBlockSize - blockSize;
+
+ // If we haven't yet added any fields, remember that there was an
+ // initial gap; this need to go into the block layout bit map.
+ if (blockSize == info.BlockHeaderForcedGapOffset) {
+ info.BlockHeaderForcedGapSize = padding;
+ }
+
+ elementTypes.push_back(llvm::ArrayType::get(CGM.Int8Ty,
+ padding.getQuantity()));
+ blockSize = newBlockSize;
+ endAlign = getLowBit(blockSize); // might be > maxFieldAlign
+ }
+
+ assert(endAlign >= maxFieldAlign);
+ assert(endAlign == getLowBit(blockSize));
+ // Slam everything else on now. This works because they have
+ // strictly decreasing alignment and we expect that size is always a
+ // multiple of alignment.
+ for (SmallVectorImpl<BlockLayoutChunk>::iterator
+ li = layout.begin(), le = layout.end(); li != le; ++li) {
+ if (endAlign < li->Alignment) {
+ // size may not be multiple of alignment. This can only happen with
+ // an over-aligned variable. We will be adding a padding field to
+ // make the size be multiple of alignment.
+ CharUnits padding = li->Alignment - endAlign;
+ elementTypes.push_back(llvm::ArrayType::get(CGM.Int8Ty,
+ padding.getQuantity()));
+ blockSize += padding;
+ endAlign = getLowBit(blockSize);
+ }
+ assert(endAlign >= li->Alignment);
+ li->setIndex(info, elementTypes.size(), blockSize);
+ elementTypes.push_back(li->Type);
+ blockSize += li->Size;
+ endAlign = getLowBit(blockSize);
+ }
+
+ info.StructureType =
+ llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true);
+}
+
+/// Enter the scope of a block. This should be run at the entrance to
+/// a full-expression so that the block's cleanups are pushed at the
+/// right place in the stack.
+static void enterBlockScope(CodeGenFunction &CGF, BlockDecl *block) {
+ assert(CGF.HaveInsertPoint());
+
+ // Allocate the block info and place it at the head of the list.
+ CGBlockInfo &blockInfo =
+ *new CGBlockInfo(block, CGF.CurFn->getName());
+ blockInfo.NextBlockInfo = CGF.FirstBlockInfo;
+ CGF.FirstBlockInfo = &blockInfo;
+
+ // Compute information about the layout, etc., of this block,
+ // pushing cleanups as necessary.
+ computeBlockInfo(CGF.CGM, &CGF, blockInfo);
+
+ // Nothing else to do if it can be global.
+ if (blockInfo.CanBeGlobal) return;
+
+ // Make the allocation for the block.
+ blockInfo.LocalAddress = CGF.CreateTempAlloca(blockInfo.StructureType,
+ blockInfo.BlockAlign, "block");
+
+ // If there are cleanups to emit, enter them (but inactive).
+ if (!blockInfo.NeedsCopyDispose) return;
+
+ // Walk through the captures (in order) and find the ones not
+ // captured by constant.
+ for (const auto &CI : block->captures()) {
+ // Ignore __block captures; there's nothing special in the
+ // on-stack block that we need to do for them.
+ if (CI.isByRef()) continue;
+
+ // Ignore variables that are constant-captured.
+ const VarDecl *variable = CI.getVariable();
+ CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+ if (capture.isConstant()) continue;
+
+ // Ignore objects that aren't destructed.
+ QualType::DestructionKind dtorKind =
+ variable->getType().isDestructedType();
+ if (dtorKind == QualType::DK_none) continue;
+
+ CodeGenFunction::Destroyer *destroyer;
+
+ // Block captures count as local values and have imprecise semantics.
+ // They also can't be arrays, so need to worry about that.
+ if (dtorKind == QualType::DK_objc_strong_lifetime) {
+ destroyer = CodeGenFunction::destroyARCStrongImprecise;
+ } else {
+ destroyer = CGF.getDestroyer(dtorKind);
+ }
+
+ // GEP down to the address.
+ Address addr = CGF.Builder.CreateStructGEP(blockInfo.LocalAddress,
+ capture.getIndex(),
+ capture.getOffset());
+
+ // We can use that GEP as the dominating IP.
+ if (!blockInfo.DominatingIP)
+ blockInfo.DominatingIP = cast<llvm::Instruction>(addr.getPointer());
+
+ CleanupKind cleanupKind = InactiveNormalCleanup;
+ bool useArrayEHCleanup = CGF.needsEHCleanup(dtorKind);
+ if (useArrayEHCleanup)
+ cleanupKind = InactiveNormalAndEHCleanup;
+
+ CGF.pushDestroy(cleanupKind, addr, variable->getType(),
+ destroyer, useArrayEHCleanup);
+
+ // Remember where that cleanup was.
+ capture.setCleanup(CGF.EHStack.stable_begin());
+ }
+}
+
+/// Enter a full-expression with a non-trivial number of objects to
+/// clean up. This is in this file because, at the moment, the only
+/// kind of cleanup object is a BlockDecl*.
+void CodeGenFunction::enterNonTrivialFullExpression(const ExprWithCleanups *E) {
+ assert(E->getNumObjects() != 0);
+ ArrayRef<ExprWithCleanups::CleanupObject> cleanups = E->getObjects();
+ for (ArrayRef<ExprWithCleanups::CleanupObject>::iterator
+ i = cleanups.begin(), e = cleanups.end(); i != e; ++i) {
+ enterBlockScope(*this, *i);
+ }
+}
+
+/// Find the layout for the given block in a linked list and remove it.
+static CGBlockInfo *findAndRemoveBlockInfo(CGBlockInfo **head,
+ const BlockDecl *block) {
+ while (true) {
+ assert(head && *head);
+ CGBlockInfo *cur = *head;
+
+ // If this is the block we're looking for, splice it out of the list.
+ if (cur->getBlockDecl() == block) {
+ *head = cur->NextBlockInfo;
+ return cur;
+ }
+
+ head = &cur->NextBlockInfo;
+ }
+}
+
+/// Destroy a chain of block layouts.
+void CodeGenFunction::destroyBlockInfos(CGBlockInfo *head) {
+ assert(head && "destroying an empty chain");
+ do {
+ CGBlockInfo *cur = head;
+ head = cur->NextBlockInfo;
+ delete cur;
+ } while (head != nullptr);
+}
+
+/// Emit a block literal expression in the current function.
+llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
+ // If the block has no captures, we won't have a pre-computed
+ // layout for it.
+ if (!blockExpr->getBlockDecl()->hasCaptures()) {
+ CGBlockInfo blockInfo(blockExpr->getBlockDecl(), CurFn->getName());
+ computeBlockInfo(CGM, this, blockInfo);
+ blockInfo.BlockExpression = blockExpr;
+ return EmitBlockLiteral(blockInfo);
+ }
+
+ // Find the block info for this block and take ownership of it.
+ std::unique_ptr<CGBlockInfo> blockInfo;
+ blockInfo.reset(findAndRemoveBlockInfo(&FirstBlockInfo,
+ blockExpr->getBlockDecl()));
+
+ blockInfo->BlockExpression = blockExpr;
+ return EmitBlockLiteral(*blockInfo);
+}
+
+llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
+ // Using the computed layout, generate the actual block function.
+ bool isLambdaConv = blockInfo.getBlockDecl()->isConversionFromLambda();
+ llvm::Constant *blockFn
+ = CodeGenFunction(CGM, true).GenerateBlockFunction(CurGD, blockInfo,
+ LocalDeclMap,
+ isLambdaConv);
+ blockFn = llvm::ConstantExpr::getBitCast(blockFn, VoidPtrTy);
+
+ // If there is nothing to capture, we can emit this as a global block.
+ if (blockInfo.CanBeGlobal)
+ return buildGlobalBlock(CGM, blockInfo, blockFn);
+
+ // Otherwise, we have to emit this as a local block.
+
+ llvm::Constant *isa = CGM.getNSConcreteStackBlock();
+ isa = llvm::ConstantExpr::getBitCast(isa, VoidPtrTy);
+
+ // Build the block descriptor.
+ llvm::Constant *descriptor = buildBlockDescriptor(CGM, blockInfo);
+
+ Address blockAddr = blockInfo.LocalAddress;
+ assert(blockAddr.isValid() && "block has no address!");
+
+ // Compute the initial on-stack block flags.
+ BlockFlags flags = BLOCK_HAS_SIGNATURE;
+ if (blockInfo.HasCapturedVariableLayout) flags |= BLOCK_HAS_EXTENDED_LAYOUT;
+ if (blockInfo.NeedsCopyDispose) flags |= BLOCK_HAS_COPY_DISPOSE;
+ if (blockInfo.HasCXXObject) flags |= BLOCK_HAS_CXX_OBJ;
+ if (blockInfo.UsesStret) flags |= BLOCK_USE_STRET;
+
+ auto projectField =
+ [&](unsigned index, CharUnits offset, const Twine &name) -> Address {
+ return Builder.CreateStructGEP(blockAddr, index, offset, name);
+ };
+ auto storeField =
+ [&](llvm::Value *value, unsigned index, CharUnits offset,
+ const Twine &name) {
+ Builder.CreateStore(value, projectField(index, offset, name));
+ };
+
+ // Initialize the block header.
+ {
+ // We assume all the header fields are densely packed.
+ unsigned index = 0;
+ CharUnits offset;
+ auto addHeaderField =
+ [&](llvm::Value *value, CharUnits size, const Twine &name) {
+ storeField(value, index, offset, name);
+ offset += size;
+ index++;
+ };
+
+ addHeaderField(isa, getPointerSize(), "block.isa");
+ addHeaderField(llvm::ConstantInt::get(IntTy, flags.getBitMask()),
+ getIntSize(), "block.flags");
+ addHeaderField(llvm::ConstantInt::get(IntTy, 0),
+ getIntSize(), "block.reserved");
+ addHeaderField(blockFn, getPointerSize(), "block.invoke");
+ addHeaderField(descriptor, getPointerSize(), "block.descriptor");
+ }
+
+ // Finally, capture all the values into the block.
+ const BlockDecl *blockDecl = blockInfo.getBlockDecl();
+
+ // First, 'this'.
+ if (blockDecl->capturesCXXThis()) {
+ Address addr = projectField(blockInfo.CXXThisIndex, blockInfo.CXXThisOffset,
+ "block.captured-this.addr");
+ Builder.CreateStore(LoadCXXThis(), addr);
+ }
+
+ // Next, captured variables.
+ for (const auto &CI : blockDecl->captures()) {
+ const VarDecl *variable = CI.getVariable();
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+
+ // Ignore constant captures.
+ if (capture.isConstant()) continue;
+
+ QualType type = variable->getType();
+
+ // This will be a [[type]]*, except that a byref entry will just be
+ // an i8**.
+ Address blockField =
+ projectField(capture.getIndex(), capture.getOffset(), "block.captured");
+
+ // Compute the address of the thing we're going to move into the
+ // block literal.
+ Address src = Address::invalid();
+ if (BlockInfo && CI.isNested()) {
+ // We need to use the capture from the enclosing block.
+ const CGBlockInfo::Capture &enclosingCapture =
+ BlockInfo->getCapture(variable);
+
+ // This is a [[type]]*, except that a byref entry wil just be an i8**.
+ src = Builder.CreateStructGEP(LoadBlockStruct(),
+ enclosingCapture.getIndex(),
+ enclosingCapture.getOffset(),
+ "block.capture.addr");
+ } else if (blockDecl->isConversionFromLambda()) {
+ // The lambda capture in a lambda's conversion-to-block-pointer is
+ // special; we'll simply emit it directly.
+ src = Address::invalid();
+ } else {
+ // Just look it up in the locals map, which will give us back a
+ // [[type]]*. If that doesn't work, do the more elaborate DRE
+ // emission.
+ auto it = LocalDeclMap.find(variable);
+ if (it != LocalDeclMap.end()) {
+ src = it->second;
+ } else {
+ DeclRefExpr declRef(
+ const_cast<VarDecl *>(variable),
+ /*RefersToEnclosingVariableOrCapture*/ CI.isNested(), type,
+ VK_LValue, SourceLocation());
+ src = EmitDeclRefLValue(&declRef).getAddress();
+ }
+ }
+
+ // For byrefs, we just write the pointer to the byref struct into
+ // the block field. There's no need to chase the forwarding
+ // pointer at this point, since we're building something that will
+ // live a shorter life than the stack byref anyway.
+ if (CI.isByRef()) {
+ // Get a void* that points to the byref struct.
+ llvm::Value *byrefPointer;
+ if (CI.isNested())
+ byrefPointer = Builder.CreateLoad(src, "byref.capture");
+ else
+ byrefPointer = Builder.CreateBitCast(src.getPointer(), VoidPtrTy);
+
+ // Write that void* into the capture field.
+ Builder.CreateStore(byrefPointer, blockField);
+
+ // If we have a copy constructor, evaluate that into the block field.
+ } else if (const Expr *copyExpr = CI.getCopyExpr()) {
+ if (blockDecl->isConversionFromLambda()) {
+ // If we have a lambda conversion, emit the expression
+ // directly into the block instead.
+ AggValueSlot Slot =
+ AggValueSlot::forAddr(blockField, Qualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+ EmitAggExpr(copyExpr, Slot);
+ } else {
+ EmitSynthesizedCXXCopyCtor(blockField, src, copyExpr);
+ }
+
+ // If it's a reference variable, copy the reference into the block field.
+ } else if (type->isReferenceType()) {
+ llvm::Value *ref = Builder.CreateLoad(src, "ref.val");
+ Builder.CreateStore(ref, blockField);
+
+ // If this is an ARC __strong block-pointer variable, don't do a
+ // block copy.
+ //
+ // TODO: this can be generalized into the normal initialization logic:
+ // we should never need to do a block-copy when initializing a local
+ // variable, because the local variable's lifetime should be strictly
+ // contained within the stack block's.
+ } else if (type.getObjCLifetime() == Qualifiers::OCL_Strong &&
+ type->isBlockPointerType()) {
+ // Load the block and do a simple retain.
+ llvm::Value *value = Builder.CreateLoad(src, "block.captured_block");
+ value = EmitARCRetainNonBlock(value);
+
+ // Do a primitive store to the block field.
+ Builder.CreateStore(value, blockField);
+
+ // Otherwise, fake up a POD copy into the block field.
+ } else {
+ // Fake up a new variable so that EmitScalarInit doesn't think
+ // we're referring to the variable in its own initializer.
+ ImplicitParamDecl blockFieldPseudoVar(getContext(), /*DC*/ nullptr,
+ SourceLocation(), /*name*/ nullptr,
+ type);
+
+ // We use one of these or the other depending on whether the
+ // reference is nested.
+ DeclRefExpr declRef(const_cast<VarDecl *>(variable),
+ /*RefersToEnclosingVariableOrCapture*/ CI.isNested(),
+ type, VK_LValue, SourceLocation());
+
+ ImplicitCastExpr l2r(ImplicitCastExpr::OnStack, type, CK_LValueToRValue,
+ &declRef, VK_RValue);
+ // FIXME: Pass a specific location for the expr init so that the store is
+ // attributed to a reasonable location - otherwise it may be attributed to
+ // locations of subexpressions in the initialization.
+ EmitExprAsInit(&l2r, &blockFieldPseudoVar,
+ MakeAddrLValue(blockField, type, AlignmentSource::Decl),
+ /*captured by init*/ false);
+ }
+
+ // Activate the cleanup if layout pushed one.
+ if (!CI.isByRef()) {
+ EHScopeStack::stable_iterator cleanup = capture.getCleanup();
+ if (cleanup.isValid())
+ ActivateCleanupBlock(cleanup, blockInfo.DominatingIP);
+ }
+ }
+
+ // Cast to the converted block-pointer type, which happens (somewhat
+ // unfortunately) to be a pointer to function type.
+ llvm::Value *result =
+ Builder.CreateBitCast(blockAddr.getPointer(),
+ ConvertType(blockInfo.getBlockExpr()->getType()));
+
+ return result;
+}
+
+
+llvm::Type *CodeGenModule::getBlockDescriptorType() {
+ if (BlockDescriptorType)
+ return BlockDescriptorType;
+
+ llvm::Type *UnsignedLongTy =
+ getTypes().ConvertType(getContext().UnsignedLongTy);
+
+ // struct __block_descriptor {
+ // unsigned long reserved;
+ // unsigned long block_size;
+ //
+ // // later, the following will be added
+ //
+ // struct {
+ // void (*copyHelper)();
+ // void (*copyHelper)();
+ // } helpers; // !!! optional
+ //
+ // const char *signature; // the block signature
+ // const char *layout; // reserved
+ // };
+ BlockDescriptorType =
+ llvm::StructType::create("struct.__block_descriptor",
+ UnsignedLongTy, UnsignedLongTy, nullptr);
+
+ // Now form a pointer to that.
+ BlockDescriptorType = llvm::PointerType::getUnqual(BlockDescriptorType);
+ return BlockDescriptorType;
+}
+
+llvm::Type *CodeGenModule::getGenericBlockLiteralType() {
+ if (GenericBlockLiteralType)
+ return GenericBlockLiteralType;
+
+ llvm::Type *BlockDescPtrTy = getBlockDescriptorType();
+
+ // struct __block_literal_generic {
+ // void *__isa;
+ // int __flags;
+ // int __reserved;
+ // void (*__invoke)(void *);
+ // struct __block_descriptor *__descriptor;
+ // };
+ GenericBlockLiteralType =
+ llvm::StructType::create("struct.__block_literal_generic",
+ VoidPtrTy, IntTy, IntTy, VoidPtrTy,
+ BlockDescPtrTy, nullptr);
+
+ return GenericBlockLiteralType;
+}
+
+RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
+ ReturnValueSlot ReturnValue) {
+ const BlockPointerType *BPT =
+ E->getCallee()->getType()->getAs<BlockPointerType>();
+
+ llvm::Value *Callee = EmitScalarExpr(E->getCallee());
+
+ // Get a pointer to the generic block literal.
+ llvm::Type *BlockLiteralTy =
+ llvm::PointerType::getUnqual(CGM.getGenericBlockLiteralType());
+
+ // Bitcast the callee to a block literal.
+ llvm::Value *BlockLiteral =
+ Builder.CreateBitCast(Callee, BlockLiteralTy, "block.literal");
+
+ // Get the function pointer from the literal.
+ llvm::Value *FuncPtr =
+ Builder.CreateStructGEP(CGM.getGenericBlockLiteralType(), BlockLiteral, 3);
+
+ BlockLiteral = Builder.CreateBitCast(BlockLiteral, VoidPtrTy);
+
+ // Add the block literal.
+ CallArgList Args;
+ Args.add(RValue::get(BlockLiteral), getContext().VoidPtrTy);
+
+ QualType FnType = BPT->getPointeeType();
+
+ // And the rest of the arguments.
+ EmitCallArgs(Args, FnType->getAs<FunctionProtoType>(), E->arguments());
+
+ // Load the function.
+ llvm::Value *Func = Builder.CreateAlignedLoad(FuncPtr, getPointerAlign());
+
+ const FunctionType *FuncTy = FnType->castAs<FunctionType>();
+ const CGFunctionInfo &FnInfo =
+ CGM.getTypes().arrangeBlockFunctionCall(Args, FuncTy);
+
+ // Cast the function pointer to the right type.
+ llvm::Type *BlockFTy = CGM.getTypes().GetFunctionType(FnInfo);
+
+ llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy);
+ Func = Builder.CreateBitCast(Func, BlockFTyPtr);
+
+ // And call the block.
+ return EmitCall(FnInfo, Func, ReturnValue, Args);
+}
+
+Address CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable,
+ bool isByRef) {
+ assert(BlockInfo && "evaluating block ref without block information?");
+ const CGBlockInfo::Capture &capture = BlockInfo->getCapture(variable);
+
+ // Handle constant captures.
+ if (capture.isConstant()) return LocalDeclMap.find(variable)->second;
+
+ Address addr =
+ Builder.CreateStructGEP(LoadBlockStruct(), capture.getIndex(),
+ capture.getOffset(), "block.capture.addr");
+
+ if (isByRef) {
+ // addr should be a void** right now. Load, then cast the result
+ // to byref*.
+
+ auto &byrefInfo = getBlockByrefInfo(variable);
+ addr = Address(Builder.CreateLoad(addr), byrefInfo.ByrefAlignment);
+
+ auto byrefPointerType = llvm::PointerType::get(byrefInfo.Type, 0);
+ addr = Builder.CreateBitCast(addr, byrefPointerType, "byref.addr");
+
+ addr = emitBlockByrefAddress(addr, byrefInfo, /*follow*/ true,
+ variable->getName());
+ }
+
+ if (auto refType = variable->getType()->getAs<ReferenceType>()) {
+ addr = EmitLoadOfReference(addr, refType);
+ }
+
+ return addr;
+}
+
+llvm::Constant *
+CodeGenModule::GetAddrOfGlobalBlock(const BlockExpr *blockExpr,
+ const char *name) {
+ CGBlockInfo blockInfo(blockExpr->getBlockDecl(), name);
+ blockInfo.BlockExpression = blockExpr;
+
+ // Compute information about the layout, etc., of this block.
+ computeBlockInfo(*this, nullptr, blockInfo);
+
+ // Using that metadata, generate the actual block function.
+ llvm::Constant *blockFn;
+ {
+ CodeGenFunction::DeclMapTy LocalDeclMap;
+ blockFn = CodeGenFunction(*this).GenerateBlockFunction(GlobalDecl(),
+ blockInfo,
+ LocalDeclMap,
+ false);
+ }
+ blockFn = llvm::ConstantExpr::getBitCast(blockFn, VoidPtrTy);
+
+ return buildGlobalBlock(*this, blockInfo, blockFn);
+}
+
+static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo,
+ llvm::Constant *blockFn) {
+ assert(blockInfo.CanBeGlobal);
+
+ // Generate the constants for the block literal initializer.
+ llvm::Constant *fields[BlockHeaderSize];
+
+ // isa
+ fields[0] = CGM.getNSConcreteGlobalBlock();
+
+ // __flags
+ BlockFlags flags = BLOCK_IS_GLOBAL | BLOCK_HAS_SIGNATURE;
+ if (blockInfo.UsesStret) flags |= BLOCK_USE_STRET;
+
+ fields[1] = llvm::ConstantInt::get(CGM.IntTy, flags.getBitMask());
+
+ // Reserved
+ fields[2] = llvm::Constant::getNullValue(CGM.IntTy);
+
+ // Function
+ fields[3] = blockFn;
+
+ // Descriptor
+ fields[4] = buildBlockDescriptor(CGM, blockInfo);
+
+ llvm::Constant *init = llvm::ConstantStruct::getAnon(fields);
+
+ llvm::GlobalVariable *literal =
+ new llvm::GlobalVariable(CGM.getModule(),
+ init->getType(),
+ /*constant*/ true,
+ llvm::GlobalVariable::InternalLinkage,
+ init,
+ "__block_literal_global");
+ literal->setAlignment(blockInfo.BlockAlign.getQuantity());
+
+ // Return a constant of the appropriately-casted type.
+ llvm::Type *requiredType =
+ CGM.getTypes().ConvertType(blockInfo.getBlockExpr()->getType());
+ return llvm::ConstantExpr::getBitCast(literal, requiredType);
+}
+
+void CodeGenFunction::setBlockContextParameter(const ImplicitParamDecl *D,
+ unsigned argNum,
+ llvm::Value *arg) {
+ assert(BlockInfo && "not emitting prologue of block invocation function?!");
+
+ llvm::Value *localAddr = nullptr;
+ if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
+ // Allocate a stack slot to let the debug info survive the RA.
+ Address alloc = CreateMemTemp(D->getType(), D->getName() + ".addr");
+ Builder.CreateStore(arg, alloc);
+ localAddr = Builder.CreateLoad(alloc);
+ }
+
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ if (CGM.getCodeGenOpts().getDebugInfo()
+ >= CodeGenOptions::LimitedDebugInfo) {
+ DI->setLocation(D->getLocation());
+ DI->EmitDeclareOfBlockLiteralArgVariable(*BlockInfo, arg, argNum,
+ localAddr, Builder);
+ }
+ }
+
+ SourceLocation StartLoc = BlockInfo->getBlockExpr()->getBody()->getLocStart();
+ ApplyDebugLocation Scope(*this, StartLoc);
+
+ // Instead of messing around with LocalDeclMap, just set the value
+ // directly as BlockPointer.
+ BlockPointer = Builder.CreateBitCast(arg,
+ BlockInfo->StructureType->getPointerTo(),
+ "block");
+}
+
+Address CodeGenFunction::LoadBlockStruct() {
+ assert(BlockInfo && "not in a block invocation function!");
+ assert(BlockPointer && "no block pointer set!");
+ return Address(BlockPointer, BlockInfo->BlockAlign);
+}
+
+llvm::Function *
+CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
+ const CGBlockInfo &blockInfo,
+ const DeclMapTy &ldm,
+ bool IsLambdaConversionToBlock) {
+ const BlockDecl *blockDecl = blockInfo.getBlockDecl();
+
+ CurGD = GD;
+
+ CurEHLocation = blockInfo.getBlockExpr()->getLocEnd();
+
+ BlockInfo = &blockInfo;
+
+ // Arrange for local static and local extern declarations to appear
+ // to be local to this function as well, in case they're directly
+ // referenced in a block.
+ for (DeclMapTy::const_iterator i = ldm.begin(), e = ldm.end(); i != e; ++i) {
+ const auto *var = dyn_cast<VarDecl>(i->first);
+ if (var && !var->hasLocalStorage())
+ setAddrOfLocalVar(var, i->second);
+ }
+
+ // Begin building the function declaration.
+
+ // Build the argument list.
+ FunctionArgList args;
+
+ // The first argument is the block pointer. Just take it as a void*
+ // and cast it later.
+ QualType selfTy = getContext().VoidPtrTy;
+ IdentifierInfo *II = &CGM.getContext().Idents.get(".block_descriptor");
+
+ ImplicitParamDecl selfDecl(getContext(), const_cast<BlockDecl*>(blockDecl),
+ SourceLocation(), II, selfTy);
+ args.push_back(&selfDecl);
+
+ // Now add the rest of the parameters.
+ args.append(blockDecl->param_begin(), blockDecl->param_end());
+
+ // Create the function declaration.
+ const FunctionProtoType *fnType = blockInfo.getBlockExpr()->getFunctionType();
+ const CGFunctionInfo &fnInfo = CGM.getTypes().arrangeFreeFunctionDeclaration(
+ fnType->getReturnType(), args, fnType->getExtInfo(),
+ fnType->isVariadic());
+ if (CGM.ReturnSlotInterferesWithArgs(fnInfo))
+ blockInfo.UsesStret = true;
+
+ llvm::FunctionType *fnLLVMType = CGM.getTypes().GetFunctionType(fnInfo);
+
+ StringRef name = CGM.getBlockMangledName(GD, blockDecl);
+ llvm::Function *fn = llvm::Function::Create(
+ fnLLVMType, llvm::GlobalValue::InternalLinkage, name, &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(blockDecl, fn, fnInfo);
+
+ // Begin generating the function.
+ StartFunction(blockDecl, fnType->getReturnType(), fn, fnInfo, args,
+ blockDecl->getLocation(),
+ blockInfo.getBlockExpr()->getBody()->getLocStart());
+
+ // Okay. Undo some of what StartFunction did.
+
+ // At -O0 we generate an explicit alloca for the BlockPointer, so the RA
+ // won't delete the dbg.declare intrinsics for captured variables.
+ llvm::Value *BlockPointerDbgLoc = BlockPointer;
+ if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
+ // Allocate a stack slot for it, so we can point the debugger to it
+ Address Alloca = CreateTempAlloca(BlockPointer->getType(),
+ getPointerAlign(),
+ "block.addr");
+ // Set the DebugLocation to empty, so the store is recognized as a
+ // frame setup instruction by llvm::DwarfDebug::beginFunction().
+ auto NL = ApplyDebugLocation::CreateEmpty(*this);
+ Builder.CreateStore(BlockPointer, Alloca);
+ BlockPointerDbgLoc = Alloca.getPointer();
+ }
+
+ // If we have a C++ 'this' reference, go ahead and force it into
+ // existence now.
+ if (blockDecl->capturesCXXThis()) {
+ Address addr =
+ Builder.CreateStructGEP(LoadBlockStruct(), blockInfo.CXXThisIndex,
+ blockInfo.CXXThisOffset, "block.captured-this");
+ CXXThisValue = Builder.CreateLoad(addr, "this");
+ }
+
+ // Also force all the constant captures.
+ for (const auto &CI : blockDecl->captures()) {
+ const VarDecl *variable = CI.getVariable();
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+ if (!capture.isConstant()) continue;
+
+ CharUnits align = getContext().getDeclAlign(variable);
+ Address alloca =
+ CreateMemTemp(variable->getType(), align, "block.captured-const");
+
+ Builder.CreateStore(capture.getConstant(), alloca);
+
+ setAddrOfLocalVar(variable, alloca);
+ }
+
+ // Save a spot to insert the debug information for all the DeclRefExprs.
+ llvm::BasicBlock *entry = Builder.GetInsertBlock();
+ llvm::BasicBlock::iterator entry_ptr = Builder.GetInsertPoint();
+ --entry_ptr;
+
+ if (IsLambdaConversionToBlock)
+ EmitLambdaBlockInvokeBody();
+ else {
+ PGO.assignRegionCounters(GlobalDecl(blockDecl), fn);
+ incrementProfileCounter(blockDecl->getBody());
+ EmitStmt(blockDecl->getBody());
+ }
+
+ // Remember where we were...
+ llvm::BasicBlock *resume = Builder.GetInsertBlock();
+
+ // Go back to the entry.
+ ++entry_ptr;
+ Builder.SetInsertPoint(entry, entry_ptr);
+
+ // Emit debug information for all the DeclRefExprs.
+ // FIXME: also for 'this'
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ for (const auto &CI : blockDecl->captures()) {
+ const VarDecl *variable = CI.getVariable();
+ DI->EmitLocation(Builder, variable->getLocation());
+
+ if (CGM.getCodeGenOpts().getDebugInfo()
+ >= CodeGenOptions::LimitedDebugInfo) {
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+ if (capture.isConstant()) {
+ auto addr = LocalDeclMap.find(variable)->second;
+ DI->EmitDeclareOfAutoVariable(variable, addr.getPointer(),
+ Builder);
+ continue;
+ }
+
+ DI->EmitDeclareOfBlockDeclRefVariable(
+ variable, BlockPointerDbgLoc, Builder, blockInfo,
+ entry_ptr == entry->end() ? nullptr : &*entry_ptr);
+ }
+ }
+ // Recover location if it was changed in the above loop.
+ DI->EmitLocation(Builder,
+ cast<CompoundStmt>(blockDecl->getBody())->getRBracLoc());
+ }
+
+ // And resume where we left off.
+ if (resume == nullptr)
+ Builder.ClearInsertionPoint();
+ else
+ Builder.SetInsertPoint(resume);
+
+ FinishFunction(cast<CompoundStmt>(blockDecl->getBody())->getRBracLoc());
+
+ return fn;
+}
+
+/*
+ notes.push_back(HelperInfo());
+ HelperInfo &note = notes.back();
+ note.index = capture.getIndex();
+ note.RequiresCopying = (ci->hasCopyExpr() || BlockRequiresCopying(type));
+ note.cxxbar_import = ci->getCopyExpr();
+
+ if (ci->isByRef()) {
+ note.flag = BLOCK_FIELD_IS_BYREF;
+ if (type.isObjCGCWeak())
+ note.flag |= BLOCK_FIELD_IS_WEAK;
+ } else if (type->isBlockPointerType()) {
+ note.flag = BLOCK_FIELD_IS_BLOCK;
+ } else {
+ note.flag = BLOCK_FIELD_IS_OBJECT;
+ }
+ */
+
+/// Generate the copy-helper function for a block closure object:
+/// static void block_copy_helper(block_t *dst, block_t *src);
+/// The runtime will have previously initialized 'dst' by doing a
+/// bit-copy of 'src'.
+///
+/// Note that this copies an entire block closure object to the heap;
+/// it should not be confused with a 'byref copy helper', which moves
+/// the contents of an individual __block variable to the heap.
+llvm::Constant *
+CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
+ ASTContext &C = getContext();
+
+ FunctionArgList args;
+ ImplicitParamDecl dstDecl(getContext(), nullptr, SourceLocation(), nullptr,
+ C.VoidPtrTy);
+ args.push_back(&dstDecl);
+ ImplicitParamDecl srcDecl(getContext(), nullptr, SourceLocation(), nullptr,
+ C.VoidPtrTy);
+ args.push_back(&srcDecl);
+
+ const CGFunctionInfo &FI = CGM.getTypes().arrangeFreeFunctionDeclaration(
+ C.VoidTy, args, FunctionType::ExtInfo(), /*variadic=*/false);
+
+ // FIXME: it would be nice if these were mergeable with things with
+ // identical semantics.
+ llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__copy_helper_block_", &CGM.getModule());
+
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__copy_helper_block_");
+
+ FunctionDecl *FD = FunctionDecl::Create(C,
+ C.getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(), II, C.VoidTy,
+ nullptr, SC_Static,
+ false,
+ false);
+
+ CGM.SetInternalFunctionAttributes(nullptr, Fn, FI);
+
+ auto NL = ApplyDebugLocation::CreateEmpty(*this);
+ StartFunction(FD, C.VoidTy, Fn, FI, args);
+ // Create a scope with an artificial location for the body of this function.
+ auto AL = ApplyDebugLocation::CreateArtificial(*this);
+ llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
+
+ Address src = GetAddrOfLocalVar(&srcDecl);
+ src = Address(Builder.CreateLoad(src), blockInfo.BlockAlign);
+ src = Builder.CreateBitCast(src, structPtrTy, "block.source");
+
+ Address dst = GetAddrOfLocalVar(&dstDecl);
+ dst = Address(Builder.CreateLoad(dst), blockInfo.BlockAlign);
+ dst = Builder.CreateBitCast(dst, structPtrTy, "block.dest");
+
+ const BlockDecl *blockDecl = blockInfo.getBlockDecl();
+
+ for (const auto &CI : blockDecl->captures()) {
+ const VarDecl *variable = CI.getVariable();
+ QualType type = variable->getType();
+
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+ if (capture.isConstant()) continue;
+
+ const Expr *copyExpr = CI.getCopyExpr();
+ BlockFieldFlags flags;
+
+ bool useARCWeakCopy = false;
+ bool useARCStrongCopy = false;
+
+ if (copyExpr) {
+ assert(!CI.isByRef());
+ // don't bother computing flags
+
+ } else if (CI.isByRef()) {
+ flags = BLOCK_FIELD_IS_BYREF;
+ if (type.isObjCGCWeak())
+ flags |= BLOCK_FIELD_IS_WEAK;
+
+ } else if (type->isObjCRetainableType()) {
+ flags = BLOCK_FIELD_IS_OBJECT;
+ bool isBlockPointer = type->isBlockPointerType();
+ if (isBlockPointer)
+ flags = BLOCK_FIELD_IS_BLOCK;
+
+ // Special rules for ARC captures:
+ Qualifiers qs = type.getQualifiers();
+
+ // We need to register __weak direct captures with the runtime.
+ if (qs.getObjCLifetime() == Qualifiers::OCL_Weak) {
+ useARCWeakCopy = true;
+
+ // We need to retain the copied value for __strong direct captures.
+ } else if (qs.getObjCLifetime() == Qualifiers::OCL_Strong) {
+ // If it's a block pointer, we have to copy the block and
+ // assign that to the destination pointer, so we might as
+ // well use _Block_object_assign. Otherwise we can avoid that.
+ if (!isBlockPointer)
+ useARCStrongCopy = true;
+
+ // Non-ARC captures of retainable pointers are strong and
+ // therefore require a call to _Block_object_assign.
+ } else if (!qs.getObjCLifetime() && !getLangOpts().ObjCAutoRefCount) {
+ // fall through
+
+ // Otherwise the memcpy is fine.
+ } else {
+ continue;
+ }
+
+ // For all other types, the memcpy is fine.
+ } else {
+ continue;
+ }
+
+ unsigned index = capture.getIndex();
+ Address srcField = Builder.CreateStructGEP(src, index, capture.getOffset());
+ Address dstField = Builder.CreateStructGEP(dst, index, capture.getOffset());
+
+ // If there's an explicit copy expression, we do that.
+ if (copyExpr) {
+ EmitSynthesizedCXXCopyCtor(dstField, srcField, copyExpr);
+ } else if (useARCWeakCopy) {
+ EmitARCCopyWeak(dstField, srcField);
+ } else {
+ llvm::Value *srcValue = Builder.CreateLoad(srcField, "blockcopy.src");
+ if (useARCStrongCopy) {
+ // At -O0, store null into the destination field (so that the
+ // storeStrong doesn't over-release) and then call storeStrong.
+ // This is a workaround to not having an initStrong call.
+ if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
+ auto *ty = cast<llvm::PointerType>(srcValue->getType());
+ llvm::Value *null = llvm::ConstantPointerNull::get(ty);
+ Builder.CreateStore(null, dstField);
+ EmitARCStoreStrongCall(dstField, srcValue, true);
+
+ // With optimization enabled, take advantage of the fact that
+ // the blocks runtime guarantees a memcpy of the block data, and
+ // just emit a retain of the src field.
+ } else {
+ EmitARCRetainNonBlock(srcValue);
+
+ // We don't need this anymore, so kill it. It's not quite
+ // worth the annoyance to avoid creating it in the first place.
+ cast<llvm::Instruction>(dstField.getPointer())->eraseFromParent();
+ }
+ } else {
+ srcValue = Builder.CreateBitCast(srcValue, VoidPtrTy);
+ llvm::Value *dstAddr =
+ Builder.CreateBitCast(dstField.getPointer(), VoidPtrTy);
+ llvm::Value *args[] = {
+ dstAddr, srcValue, llvm::ConstantInt::get(Int32Ty, flags.getBitMask())
+ };
+
+ bool copyCanThrow = false;
+ if (CI.isByRef() && variable->getType()->getAsCXXRecordDecl()) {
+ const Expr *copyExpr =
+ CGM.getContext().getBlockVarCopyInits(variable);
+ if (copyExpr) {
+ copyCanThrow = true; // FIXME: reuse the noexcept logic
+ }
+ }
+
+ if (copyCanThrow) {
+ EmitRuntimeCallOrInvoke(CGM.getBlockObjectAssign(), args);
+ } else {
+ EmitNounwindRuntimeCall(CGM.getBlockObjectAssign(), args);
+ }
+ }
+ }
+ }
+
+ FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
+}
+
+/// Generate the destroy-helper function for a block closure object:
+/// static void block_destroy_helper(block_t *theBlock);
+///
+/// Note that this destroys a heap-allocated block closure object;
+/// it should not be confused with a 'byref destroy helper', which
+/// destroys the heap-allocated contents of an individual __block
+/// variable.
+llvm::Constant *
+CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
+ ASTContext &C = getContext();
+
+ FunctionArgList args;
+ ImplicitParamDecl srcDecl(getContext(), nullptr, SourceLocation(), nullptr,
+ C.VoidPtrTy);
+ args.push_back(&srcDecl);
+
+ const CGFunctionInfo &FI = CGM.getTypes().arrangeFreeFunctionDeclaration(
+ C.VoidTy, args, FunctionType::ExtInfo(), /*variadic=*/false);
+
+ // FIXME: We'd like to put these into a mergable by content, with
+ // internal linkage.
+ llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__destroy_helper_block_", &CGM.getModule());
+
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__destroy_helper_block_");
+
+ FunctionDecl *FD = FunctionDecl::Create(C, C.getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(), II, C.VoidTy,
+ nullptr, SC_Static,
+ false, false);
+
+ CGM.SetInternalFunctionAttributes(nullptr, Fn, FI);
+
+ // Create a scope with an artificial location for the body of this function.
+ auto NL = ApplyDebugLocation::CreateEmpty(*this);
+ StartFunction(FD, C.VoidTy, Fn, FI, args);
+ auto AL = ApplyDebugLocation::CreateArtificial(*this);
+
+ llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
+
+ Address src = GetAddrOfLocalVar(&srcDecl);
+ src = Address(Builder.CreateLoad(src), blockInfo.BlockAlign);
+ src = Builder.CreateBitCast(src, structPtrTy, "block");
+
+ const BlockDecl *blockDecl = blockInfo.getBlockDecl();
+
+ CodeGenFunction::RunCleanupsScope cleanups(*this);
+
+ for (const auto &CI : blockDecl->captures()) {
+ const VarDecl *variable = CI.getVariable();
+ QualType type = variable->getType();
+
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+ if (capture.isConstant()) continue;
+
+ BlockFieldFlags flags;
+ const CXXDestructorDecl *dtor = nullptr;
+
+ bool useARCWeakDestroy = false;
+ bool useARCStrongDestroy = false;
+
+ if (CI.isByRef()) {
+ flags = BLOCK_FIELD_IS_BYREF;
+ if (type.isObjCGCWeak())
+ flags |= BLOCK_FIELD_IS_WEAK;
+ } else if (const CXXRecordDecl *record = type->getAsCXXRecordDecl()) {
+ if (record->hasTrivialDestructor())
+ continue;
+ dtor = record->getDestructor();
+ } else if (type->isObjCRetainableType()) {
+ flags = BLOCK_FIELD_IS_OBJECT;
+ if (type->isBlockPointerType())
+ flags = BLOCK_FIELD_IS_BLOCK;
+
+ // Special rules for ARC captures.
+ Qualifiers qs = type.getQualifiers();
+
+ // Use objc_storeStrong for __strong direct captures; the
+ // dynamic tools really like it when we do this.
+ if (qs.getObjCLifetime() == Qualifiers::OCL_Strong) {
+ useARCStrongDestroy = true;
+
+ // Support __weak direct captures.
+ } else if (qs.getObjCLifetime() == Qualifiers::OCL_Weak) {
+ useARCWeakDestroy = true;
+
+ // Non-ARC captures are strong, and we need to use _Block_object_dispose.
+ } else if (!qs.hasObjCLifetime() && !getLangOpts().ObjCAutoRefCount) {
+ // fall through
+
+ // Otherwise, we have nothing to do.
+ } else {
+ continue;
+ }
+ } else {
+ continue;
+ }
+
+ Address srcField =
+ Builder.CreateStructGEP(src, capture.getIndex(), capture.getOffset());
+
+ // If there's an explicit copy expression, we do that.
+ if (dtor) {
+ PushDestructorCleanup(dtor, srcField);
+
+ // If this is a __weak capture, emit the release directly.
+ } else if (useARCWeakDestroy) {
+ EmitARCDestroyWeak(srcField);
+
+ // Destroy strong objects with a call if requested.
+ } else if (useARCStrongDestroy) {
+ EmitARCDestroyStrong(srcField, ARCImpreciseLifetime);
+
+ // Otherwise we call _Block_object_dispose. It wouldn't be too
+ // hard to just emit this as a cleanup if we wanted to make sure
+ // that things were done in reverse.
+ } else {
+ llvm::Value *value = Builder.CreateLoad(srcField);
+ value = Builder.CreateBitCast(value, VoidPtrTy);
+ BuildBlockRelease(value, flags);
+ }
+ }
+
+ cleanups.ForceCleanup();
+
+ FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
+}
+
+namespace {
+
+/// Emits the copy/dispose helper functions for a __block object of id type.
+class ObjectByrefHelpers final : public BlockByrefHelpers {
+ BlockFieldFlags Flags;
+
+public:
+ ObjectByrefHelpers(CharUnits alignment, BlockFieldFlags flags)
+ : BlockByrefHelpers(alignment), Flags(flags) {}
+
+ void emitCopy(CodeGenFunction &CGF, Address destField,
+ Address srcField) override {
+ destField = CGF.Builder.CreateBitCast(destField, CGF.VoidPtrTy);
+
+ srcField = CGF.Builder.CreateBitCast(srcField, CGF.VoidPtrPtrTy);
+ llvm::Value *srcValue = CGF.Builder.CreateLoad(srcField);
+
+ unsigned flags = (Flags | BLOCK_BYREF_CALLER).getBitMask();
+
+ llvm::Value *flagsVal = llvm::ConstantInt::get(CGF.Int32Ty, flags);
+ llvm::Value *fn = CGF.CGM.getBlockObjectAssign();
+
+ llvm::Value *args[] = { destField.getPointer(), srcValue, flagsVal };
+ CGF.EmitNounwindRuntimeCall(fn, args);
+ }
+
+ void emitDispose(CodeGenFunction &CGF, Address field) override {
+ field = CGF.Builder.CreateBitCast(field, CGF.Int8PtrTy->getPointerTo(0));
+ llvm::Value *value = CGF.Builder.CreateLoad(field);
+
+ CGF.BuildBlockRelease(value, Flags | BLOCK_BYREF_CALLER);
+ }
+
+ void profileImpl(llvm::FoldingSetNodeID &id) const override {
+ id.AddInteger(Flags.getBitMask());
+ }
+};
+
+/// Emits the copy/dispose helpers for an ARC __block __weak variable.
+class ARCWeakByrefHelpers final : public BlockByrefHelpers {
+public:
+ ARCWeakByrefHelpers(CharUnits alignment) : BlockByrefHelpers(alignment) {}
+
+ void emitCopy(CodeGenFunction &CGF, Address destField,
+ Address srcField) override {
+ CGF.EmitARCMoveWeak(destField, srcField);
+ }
+
+ void emitDispose(CodeGenFunction &CGF, Address field) override {
+ CGF.EmitARCDestroyWeak(field);
+ }
+
+ void profileImpl(llvm::FoldingSetNodeID &id) const override {
+ // 0 is distinguishable from all pointers and byref flags
+ id.AddInteger(0);
+ }
+};
+
+/// Emits the copy/dispose helpers for an ARC __block __strong variable
+/// that's not of block-pointer type.
+class ARCStrongByrefHelpers final : public BlockByrefHelpers {
+public:
+ ARCStrongByrefHelpers(CharUnits alignment) : BlockByrefHelpers(alignment) {}
+
+ void emitCopy(CodeGenFunction &CGF, Address destField,
+ Address srcField) override {
+ // Do a "move" by copying the value and then zeroing out the old
+ // variable.
+
+ llvm::Value *value = CGF.Builder.CreateLoad(srcField);
+
+ llvm::Value *null =
+ llvm::ConstantPointerNull::get(cast<llvm::PointerType>(value->getType()));
+
+ if (CGF.CGM.getCodeGenOpts().OptimizationLevel == 0) {
+ CGF.Builder.CreateStore(null, destField);
+ CGF.EmitARCStoreStrongCall(destField, value, /*ignored*/ true);
+ CGF.EmitARCStoreStrongCall(srcField, null, /*ignored*/ true);
+ return;
+ }
+ CGF.Builder.CreateStore(value, destField);
+ CGF.Builder.CreateStore(null, srcField);
+ }
+
+ void emitDispose(CodeGenFunction &CGF, Address field) override {
+ CGF.EmitARCDestroyStrong(field, ARCImpreciseLifetime);
+ }
+
+ void profileImpl(llvm::FoldingSetNodeID &id) const override {
+ // 1 is distinguishable from all pointers and byref flags
+ id.AddInteger(1);
+ }
+};
+
+/// Emits the copy/dispose helpers for an ARC __block __strong
+/// variable that's of block-pointer type.
+class ARCStrongBlockByrefHelpers final : public BlockByrefHelpers {
+public:
+ ARCStrongBlockByrefHelpers(CharUnits alignment)
+ : BlockByrefHelpers(alignment) {}
+
+ void emitCopy(CodeGenFunction &CGF, Address destField,
+ Address srcField) override {
+ // Do the copy with objc_retainBlock; that's all that
+ // _Block_object_assign would do anyway, and we'd have to pass the
+ // right arguments to make sure it doesn't get no-op'ed.
+ llvm::Value *oldValue = CGF.Builder.CreateLoad(srcField);
+ llvm::Value *copy = CGF.EmitARCRetainBlock(oldValue, /*mandatory*/ true);
+ CGF.Builder.CreateStore(copy, destField);
+ }
+
+ void emitDispose(CodeGenFunction &CGF, Address field) override {
+ CGF.EmitARCDestroyStrong(field, ARCImpreciseLifetime);
+ }
+
+ void profileImpl(llvm::FoldingSetNodeID &id) const override {
+ // 2 is distinguishable from all pointers and byref flags
+ id.AddInteger(2);
+ }
+};
+
+/// Emits the copy/dispose helpers for a __block variable with a
+/// nontrivial copy constructor or destructor.
+class CXXByrefHelpers final : public BlockByrefHelpers {
+ QualType VarType;
+ const Expr *CopyExpr;
+
+public:
+ CXXByrefHelpers(CharUnits alignment, QualType type,
+ const Expr *copyExpr)
+ : BlockByrefHelpers(alignment), VarType(type), CopyExpr(copyExpr) {}
+
+ bool needsCopy() const override { return CopyExpr != nullptr; }
+ void emitCopy(CodeGenFunction &CGF, Address destField,
+ Address srcField) override {
+ if (!CopyExpr) return;
+ CGF.EmitSynthesizedCXXCopyCtor(destField, srcField, CopyExpr);
+ }
+
+ void emitDispose(CodeGenFunction &CGF, Address field) override {
+ EHScopeStack::stable_iterator cleanupDepth = CGF.EHStack.stable_begin();
+ CGF.PushDestructorCleanup(VarType, field);
+ CGF.PopCleanupBlocks(cleanupDepth);
+ }
+
+ void profileImpl(llvm::FoldingSetNodeID &id) const override {
+ id.AddPointer(VarType.getCanonicalType().getAsOpaquePtr());
+ }
+};
+} // end anonymous namespace
+
+static llvm::Constant *
+generateByrefCopyHelper(CodeGenFunction &CGF, const BlockByrefInfo &byrefInfo,
+ BlockByrefHelpers &generator) {
+ ASTContext &Context = CGF.getContext();
+
+ QualType R = Context.VoidTy;
+
+ FunctionArgList args;
+ ImplicitParamDecl dst(CGF.getContext(), nullptr, SourceLocation(), nullptr,
+ Context.VoidPtrTy);
+ args.push_back(&dst);
+
+ ImplicitParamDecl src(CGF.getContext(), nullptr, SourceLocation(), nullptr,
+ Context.VoidPtrTy);
+ args.push_back(&src);
+
+ const CGFunctionInfo &FI = CGF.CGM.getTypes().arrangeFreeFunctionDeclaration(
+ R, args, FunctionType::ExtInfo(), /*variadic=*/false);
+
+ llvm::FunctionType *LTy = CGF.CGM.getTypes().GetFunctionType(FI);
+
+ // FIXME: We'd like to put these into a mergable by content, with
+ // internal linkage.
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__Block_byref_object_copy_", &CGF.CGM.getModule());
+
+ IdentifierInfo *II
+ = &Context.Idents.get("__Block_byref_object_copy_");
+
+ FunctionDecl *FD = FunctionDecl::Create(Context,
+ Context.getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(), II, R, nullptr,
+ SC_Static,
+ false, false);
+
+ CGF.CGM.SetInternalFunctionAttributes(nullptr, Fn, FI);
+
+ CGF.StartFunction(FD, R, Fn, FI, args);
+
+ if (generator.needsCopy()) {
+ llvm::Type *byrefPtrType = byrefInfo.Type->getPointerTo(0);
+
+ // dst->x
+ Address destField = CGF.GetAddrOfLocalVar(&dst);
+ destField = Address(CGF.Builder.CreateLoad(destField),
+ byrefInfo.ByrefAlignment);
+ destField = CGF.Builder.CreateBitCast(destField, byrefPtrType);
+ destField = CGF.emitBlockByrefAddress(destField, byrefInfo, false,
+ "dest-object");
+
+ // src->x
+ Address srcField = CGF.GetAddrOfLocalVar(&src);
+ srcField = Address(CGF.Builder.CreateLoad(srcField),
+ byrefInfo.ByrefAlignment);
+ srcField = CGF.Builder.CreateBitCast(srcField, byrefPtrType);
+ srcField = CGF.emitBlockByrefAddress(srcField, byrefInfo, false,
+ "src-object");
+
+ generator.emitCopy(CGF, destField, srcField);
+ }
+
+ CGF.FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, CGF.Int8PtrTy);
+}
+
+/// Build the copy helper for a __block variable.
+static llvm::Constant *buildByrefCopyHelper(CodeGenModule &CGM,
+ const BlockByrefInfo &byrefInfo,
+ BlockByrefHelpers &generator) {
+ CodeGenFunction CGF(CGM);
+ return generateByrefCopyHelper(CGF, byrefInfo, generator);
+}
+
+/// Generate code for a __block variable's dispose helper.
+static llvm::Constant *
+generateByrefDisposeHelper(CodeGenFunction &CGF,
+ const BlockByrefInfo &byrefInfo,
+ BlockByrefHelpers &generator) {
+ ASTContext &Context = CGF.getContext();
+ QualType R = Context.VoidTy;
+
+ FunctionArgList args;
+ ImplicitParamDecl src(CGF.getContext(), nullptr, SourceLocation(), nullptr,
+ Context.VoidPtrTy);
+ args.push_back(&src);
+
+ const CGFunctionInfo &FI = CGF.CGM.getTypes().arrangeFreeFunctionDeclaration(
+ R, args, FunctionType::ExtInfo(), /*variadic=*/false);
+
+ llvm::FunctionType *LTy = CGF.CGM.getTypes().GetFunctionType(FI);
+
+ // FIXME: We'd like to put these into a mergable by content, with
+ // internal linkage.
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__Block_byref_object_dispose_",
+ &CGF.CGM.getModule());
+
+ IdentifierInfo *II
+ = &Context.Idents.get("__Block_byref_object_dispose_");
+
+ FunctionDecl *FD = FunctionDecl::Create(Context,
+ Context.getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(), II, R, nullptr,
+ SC_Static,
+ false, false);
+
+ CGF.CGM.SetInternalFunctionAttributes(nullptr, Fn, FI);
+
+ CGF.StartFunction(FD, R, Fn, FI, args);
+
+ if (generator.needsDispose()) {
+ Address addr = CGF.GetAddrOfLocalVar(&src);
+ addr = Address(CGF.Builder.CreateLoad(addr), byrefInfo.ByrefAlignment);
+ auto byrefPtrType = byrefInfo.Type->getPointerTo(0);
+ addr = CGF.Builder.CreateBitCast(addr, byrefPtrType);
+ addr = CGF.emitBlockByrefAddress(addr, byrefInfo, false, "object");
+
+ generator.emitDispose(CGF, addr);
+ }
+
+ CGF.FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, CGF.Int8PtrTy);
+}
+
+/// Build the dispose helper for a __block variable.
+static llvm::Constant *buildByrefDisposeHelper(CodeGenModule &CGM,
+ const BlockByrefInfo &byrefInfo,
+ BlockByrefHelpers &generator) {
+ CodeGenFunction CGF(CGM);
+ return generateByrefDisposeHelper(CGF, byrefInfo, generator);
+}
+
+/// Lazily build the copy and dispose helpers for a __block variable
+/// with the given information.
+template <class T>
+static T *buildByrefHelpers(CodeGenModule &CGM, const BlockByrefInfo &byrefInfo,
+ T &&generator) {
+ llvm::FoldingSetNodeID id;
+ generator.Profile(id);
+
+ void *insertPos;
+ BlockByrefHelpers *node
+ = CGM.ByrefHelpersCache.FindNodeOrInsertPos(id, insertPos);
+ if (node) return static_cast<T*>(node);
+
+ generator.CopyHelper = buildByrefCopyHelper(CGM, byrefInfo, generator);
+ generator.DisposeHelper = buildByrefDisposeHelper(CGM, byrefInfo, generator);
+
+ T *copy = new (CGM.getContext()) T(std::move(generator));
+ CGM.ByrefHelpersCache.InsertNode(copy, insertPos);
+ return copy;
+}
+
+/// Build the copy and dispose helpers for the given __block variable
+/// emission. Places the helpers in the global cache. Returns null
+/// if no helpers are required.
+BlockByrefHelpers *
+CodeGenFunction::buildByrefHelpers(llvm::StructType &byrefType,
+ const AutoVarEmission &emission) {
+ const VarDecl &var = *emission.Variable;
+ QualType type = var.getType();
+
+ auto &byrefInfo = getBlockByrefInfo(&var);
+
+ // The alignment we care about for the purposes of uniquing byref
+ // helpers is the alignment of the actual byref value field.
+ CharUnits valueAlignment =
+ byrefInfo.ByrefAlignment.alignmentAtOffset(byrefInfo.FieldOffset);
+
+ if (const CXXRecordDecl *record = type->getAsCXXRecordDecl()) {
+ const Expr *copyExpr = CGM.getContext().getBlockVarCopyInits(&var);
+ if (!copyExpr && record->hasTrivialDestructor()) return nullptr;
+
+ return ::buildByrefHelpers(
+ CGM, byrefInfo, CXXByrefHelpers(valueAlignment, type, copyExpr));
+ }
+
+ // Otherwise, if we don't have a retainable type, there's nothing to do.
+ // that the runtime does extra copies.
+ if (!type->isObjCRetainableType()) return nullptr;
+
+ Qualifiers qs = type.getQualifiers();
+
+ // If we have lifetime, that dominates.
+ if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) {
+ switch (lifetime) {
+ case Qualifiers::OCL_None: llvm_unreachable("impossible");
+
+ // These are just bits as far as the runtime is concerned.
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ return nullptr;
+
+ // Tell the runtime that this is ARC __weak, called by the
+ // byref routines.
+ case Qualifiers::OCL_Weak:
+ return ::buildByrefHelpers(CGM, byrefInfo,
+ ARCWeakByrefHelpers(valueAlignment));
+
+ // ARC __strong __block variables need to be retained.
+ case Qualifiers::OCL_Strong:
+ // Block pointers need to be copied, and there's no direct
+ // transfer possible.
+ if (type->isBlockPointerType()) {
+ return ::buildByrefHelpers(CGM, byrefInfo,
+ ARCStrongBlockByrefHelpers(valueAlignment));
+
+ // Otherwise, we transfer ownership of the retain from the stack
+ // to the heap.
+ } else {
+ return ::buildByrefHelpers(CGM, byrefInfo,
+ ARCStrongByrefHelpers(valueAlignment));
+ }
+ }
+ llvm_unreachable("fell out of lifetime switch!");
+ }
+
+ BlockFieldFlags flags;
+ if (type->isBlockPointerType()) {
+ flags |= BLOCK_FIELD_IS_BLOCK;
+ } else if (CGM.getContext().isObjCNSObjectType(type) ||
+ type->isObjCObjectPointerType()) {
+ flags |= BLOCK_FIELD_IS_OBJECT;
+ } else {
+ return nullptr;
+ }
+
+ if (type.isObjCGCWeak())
+ flags |= BLOCK_FIELD_IS_WEAK;
+
+ return ::buildByrefHelpers(CGM, byrefInfo,
+ ObjectByrefHelpers(valueAlignment, flags));
+}
+
+Address CodeGenFunction::emitBlockByrefAddress(Address baseAddr,
+ const VarDecl *var,
+ bool followForward) {
+ auto &info = getBlockByrefInfo(var);
+ return emitBlockByrefAddress(baseAddr, info, followForward, var->getName());
+}
+
+Address CodeGenFunction::emitBlockByrefAddress(Address baseAddr,
+ const BlockByrefInfo &info,
+ bool followForward,
+ const llvm::Twine &name) {
+ // Chase the forwarding address if requested.
+ if (followForward) {
+ Address forwardingAddr =
+ Builder.CreateStructGEP(baseAddr, 1, getPointerSize(), "forwarding");
+ baseAddr = Address(Builder.CreateLoad(forwardingAddr), info.ByrefAlignment);
+ }
+
+ return Builder.CreateStructGEP(baseAddr, info.FieldIndex,
+ info.FieldOffset, name);
+}
+
+/// BuildByrefInfo - This routine changes a __block variable declared as T x
+/// into:
+///
+/// struct {
+/// void *__isa;
+/// void *__forwarding;
+/// int32_t __flags;
+/// int32_t __size;
+/// void *__copy_helper; // only if needed
+/// void *__destroy_helper; // only if needed
+/// void *__byref_variable_layout;// only if needed
+/// char padding[X]; // only if needed
+/// T x;
+/// } x
+///
+const BlockByrefInfo &CodeGenFunction::getBlockByrefInfo(const VarDecl *D) {
+ auto it = BlockByrefInfos.find(D);
+ if (it != BlockByrefInfos.end())
+ return it->second;
+
+ llvm::StructType *byrefType =
+ llvm::StructType::create(getLLVMContext(),
+ "struct.__block_byref_" + D->getNameAsString());
+
+ QualType Ty = D->getType();
+
+ CharUnits size;
+ SmallVector<llvm::Type *, 8> types;
+
+ // void *__isa;
+ types.push_back(Int8PtrTy);
+ size += getPointerSize();
+
+ // void *__forwarding;
+ types.push_back(llvm::PointerType::getUnqual(byrefType));
+ size += getPointerSize();
+
+ // int32_t __flags;
+ types.push_back(Int32Ty);
+ size += CharUnits::fromQuantity(4);
+
+ // int32_t __size;
+ types.push_back(Int32Ty);
+ size += CharUnits::fromQuantity(4);
+
+ // Note that this must match *exactly* the logic in buildByrefHelpers.
+ bool hasCopyAndDispose = getContext().BlockRequiresCopying(Ty, D);
+ if (hasCopyAndDispose) {
+ /// void *__copy_helper;
+ types.push_back(Int8PtrTy);
+ size += getPointerSize();
+
+ /// void *__destroy_helper;
+ types.push_back(Int8PtrTy);
+ size += getPointerSize();
+ }
+
+ bool HasByrefExtendedLayout = false;
+ Qualifiers::ObjCLifetime Lifetime;
+ if (getContext().getByrefLifetime(Ty, Lifetime, HasByrefExtendedLayout) &&
+ HasByrefExtendedLayout) {
+ /// void *__byref_variable_layout;
+ types.push_back(Int8PtrTy);
+ size += CharUnits::fromQuantity(PointerSizeInBytes);
+ }
+
+ // T x;
+ llvm::Type *varTy = ConvertTypeForMem(Ty);
+
+ bool packed = false;
+ CharUnits varAlign = getContext().getDeclAlign(D);
+ CharUnits varOffset = size.RoundUpToAlignment(varAlign);
+
+ // We may have to insert padding.
+ if (varOffset != size) {
+ llvm::Type *paddingTy =
+ llvm::ArrayType::get(Int8Ty, (varOffset - size).getQuantity());
+
+ types.push_back(paddingTy);
+ size = varOffset;
+
+ // Conversely, we might have to prevent LLVM from inserting padding.
+ } else if (CGM.getDataLayout().getABITypeAlignment(varTy)
+ > varAlign.getQuantity()) {
+ packed = true;
+ }
+ types.push_back(varTy);
+
+ byrefType->setBody(types, packed);
+
+ BlockByrefInfo info;
+ info.Type = byrefType;
+ info.FieldIndex = types.size() - 1;
+ info.FieldOffset = varOffset;
+ info.ByrefAlignment = std::max(varAlign, getPointerAlign());
+
+ auto pair = BlockByrefInfos.insert({D, info});
+ assert(pair.second && "info was inserted recursively?");
+ return pair.first->second;
+}
+
+/// Initialize the structural components of a __block variable, i.e.
+/// everything but the actual object.
+void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) {
+ // Find the address of the local.
+ Address addr = emission.Addr;
+
+ // That's an alloca of the byref structure type.
+ llvm::StructType *byrefType = cast<llvm::StructType>(
+ cast<llvm::PointerType>(addr.getPointer()->getType())->getElementType());
+
+ unsigned nextHeaderIndex = 0;
+ CharUnits nextHeaderOffset;
+ auto storeHeaderField = [&](llvm::Value *value, CharUnits fieldSize,
+ const Twine &name) {
+ auto fieldAddr = Builder.CreateStructGEP(addr, nextHeaderIndex,
+ nextHeaderOffset, name);
+ Builder.CreateStore(value, fieldAddr);
+
+ nextHeaderIndex++;
+ nextHeaderOffset += fieldSize;
+ };
+
+ // Build the byref helpers if necessary. This is null if we don't need any.
+ BlockByrefHelpers *helpers = buildByrefHelpers(*byrefType, emission);
+
+ const VarDecl &D = *emission.Variable;
+ QualType type = D.getType();
+
+ bool HasByrefExtendedLayout;
+ Qualifiers::ObjCLifetime ByrefLifetime;
+ bool ByRefHasLifetime =
+ getContext().getByrefLifetime(type, ByrefLifetime, HasByrefExtendedLayout);
+
+ llvm::Value *V;
+
+ // Initialize the 'isa', which is just 0 or 1.
+ int isa = 0;
+ if (type.isObjCGCWeak())
+ isa = 1;
+ V = Builder.CreateIntToPtr(Builder.getInt32(isa), Int8PtrTy, "isa");
+ storeHeaderField(V, getPointerSize(), "byref.isa");
+
+ // Store the address of the variable into its own forwarding pointer.
+ storeHeaderField(addr.getPointer(), getPointerSize(), "byref.forwarding");
+
+ // Blocks ABI:
+ // c) the flags field is set to either 0 if no helper functions are
+ // needed or BLOCK_BYREF_HAS_COPY_DISPOSE if they are,
+ BlockFlags flags;
+ if (helpers) flags |= BLOCK_BYREF_HAS_COPY_DISPOSE;
+ if (ByRefHasLifetime) {
+ if (HasByrefExtendedLayout) flags |= BLOCK_BYREF_LAYOUT_EXTENDED;
+ else switch (ByrefLifetime) {
+ case Qualifiers::OCL_Strong:
+ flags |= BLOCK_BYREF_LAYOUT_STRONG;
+ break;
+ case Qualifiers::OCL_Weak:
+ flags |= BLOCK_BYREF_LAYOUT_WEAK;
+ break;
+ case Qualifiers::OCL_ExplicitNone:
+ flags |= BLOCK_BYREF_LAYOUT_UNRETAINED;
+ break;
+ case Qualifiers::OCL_None:
+ if (!type->isObjCObjectPointerType() && !type->isBlockPointerType())
+ flags |= BLOCK_BYREF_LAYOUT_NON_OBJECT;
+ break;
+ default:
+ break;
+ }
+ if (CGM.getLangOpts().ObjCGCBitmapPrint) {
+ printf("\n Inline flag for BYREF variable layout (%d):", flags.getBitMask());
+ if (flags & BLOCK_BYREF_HAS_COPY_DISPOSE)
+ printf(" BLOCK_BYREF_HAS_COPY_DISPOSE");
+ if (flags & BLOCK_BYREF_LAYOUT_MASK) {
+ BlockFlags ThisFlag(flags.getBitMask() & BLOCK_BYREF_LAYOUT_MASK);
+ if (ThisFlag == BLOCK_BYREF_LAYOUT_EXTENDED)
+ printf(" BLOCK_BYREF_LAYOUT_EXTENDED");
+ if (ThisFlag == BLOCK_BYREF_LAYOUT_STRONG)
+ printf(" BLOCK_BYREF_LAYOUT_STRONG");
+ if (ThisFlag == BLOCK_BYREF_LAYOUT_WEAK)
+ printf(" BLOCK_BYREF_LAYOUT_WEAK");
+ if (ThisFlag == BLOCK_BYREF_LAYOUT_UNRETAINED)
+ printf(" BLOCK_BYREF_LAYOUT_UNRETAINED");
+ if (ThisFlag == BLOCK_BYREF_LAYOUT_NON_OBJECT)
+ printf(" BLOCK_BYREF_LAYOUT_NON_OBJECT");
+ }
+ printf("\n");
+ }
+ }
+ storeHeaderField(llvm::ConstantInt::get(IntTy, flags.getBitMask()),
+ getIntSize(), "byref.flags");
+
+ CharUnits byrefSize = CGM.GetTargetTypeStoreSize(byrefType);
+ V = llvm::ConstantInt::get(IntTy, byrefSize.getQuantity());
+ storeHeaderField(V, getIntSize(), "byref.size");
+
+ if (helpers) {
+ storeHeaderField(helpers->CopyHelper, getPointerSize(),
+ "byref.copyHelper");
+ storeHeaderField(helpers->DisposeHelper, getPointerSize(),
+ "byref.disposeHelper");
+ }
+
+ if (ByRefHasLifetime && HasByrefExtendedLayout) {
+ auto layoutInfo = CGM.getObjCRuntime().BuildByrefLayout(CGM, type);
+ storeHeaderField(layoutInfo, getPointerSize(), "byref.layout");
+ }
+}
+
+void CodeGenFunction::BuildBlockRelease(llvm::Value *V, BlockFieldFlags flags) {
+ llvm::Value *F = CGM.getBlockObjectDispose();
+ llvm::Value *args[] = {
+ Builder.CreateBitCast(V, Int8PtrTy),
+ llvm::ConstantInt::get(Int32Ty, flags.getBitMask())
+ };
+ EmitNounwindRuntimeCall(F, args); // FIXME: throwing destructors?
+}
+
+namespace {
+ /// Release a __block variable.
+ struct CallBlockRelease final : EHScopeStack::Cleanup {
+ llvm::Value *Addr;
+ CallBlockRelease(llvm::Value *Addr) : Addr(Addr) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ // Should we be passing FIELD_IS_WEAK here?
+ CGF.BuildBlockRelease(Addr, BLOCK_FIELD_IS_BYREF);
+ }
+ };
+} // end anonymous namespace
+
+/// Enter a cleanup to destroy a __block variable. Note that this
+/// cleanup should be a no-op if the variable hasn't left the stack
+/// yet; if a cleanup is required for the variable itself, that needs
+/// to be done externally.
+void CodeGenFunction::enterByrefCleanup(const AutoVarEmission &emission) {
+ // We don't enter this cleanup if we're in pure-GC mode.
+ if (CGM.getLangOpts().getGC() == LangOptions::GCOnly)
+ return;
+
+ EHStack.pushCleanup<CallBlockRelease>(NormalAndEHCleanup,
+ emission.Addr.getPointer());
+}
+
+/// Adjust the declaration of something from the blocks API.
+static void configureBlocksRuntimeObject(CodeGenModule &CGM,
+ llvm::Constant *C) {
+ if (!CGM.getLangOpts().BlocksRuntimeOptional) return;
+
+ auto *GV = cast<llvm::GlobalValue>(C->stripPointerCasts());
+ if (GV->isDeclaration() && GV->hasExternalLinkage())
+ GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+}
+
+llvm::Constant *CodeGenModule::getBlockObjectDispose() {
+ if (BlockObjectDispose)
+ return BlockObjectDispose;
+
+ llvm::Type *args[] = { Int8PtrTy, Int32Ty };
+ llvm::FunctionType *fty
+ = llvm::FunctionType::get(VoidTy, args, false);
+ BlockObjectDispose = CreateRuntimeFunction(fty, "_Block_object_dispose");
+ configureBlocksRuntimeObject(*this, BlockObjectDispose);
+ return BlockObjectDispose;
+}
+
+llvm::Constant *CodeGenModule::getBlockObjectAssign() {
+ if (BlockObjectAssign)
+ return BlockObjectAssign;
+
+ llvm::Type *args[] = { Int8PtrTy, Int8PtrTy, Int32Ty };
+ llvm::FunctionType *fty
+ = llvm::FunctionType::get(VoidTy, args, false);
+ BlockObjectAssign = CreateRuntimeFunction(fty, "_Block_object_assign");
+ configureBlocksRuntimeObject(*this, BlockObjectAssign);
+ return BlockObjectAssign;
+}
+
+llvm::Constant *CodeGenModule::getNSConcreteGlobalBlock() {
+ if (NSConcreteGlobalBlock)
+ return NSConcreteGlobalBlock;
+
+ NSConcreteGlobalBlock = GetOrCreateLLVMGlobal("_NSConcreteGlobalBlock",
+ Int8PtrTy->getPointerTo(),
+ nullptr);
+ configureBlocksRuntimeObject(*this, NSConcreteGlobalBlock);
+ return NSConcreteGlobalBlock;
+}
+
+llvm::Constant *CodeGenModule::getNSConcreteStackBlock() {
+ if (NSConcreteStackBlock)
+ return NSConcreteStackBlock;
+
+ NSConcreteStackBlock = GetOrCreateLLVMGlobal("_NSConcreteStackBlock",
+ Int8PtrTy->getPointerTo(),
+ nullptr);
+ configureBlocksRuntimeObject(*this, NSConcreteStackBlock);
+ return NSConcreteStackBlock;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h
new file mode 100644
index 0000000..1edabef
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h
@@ -0,0 +1,277 @@
+//===-- CGBlocks.h - state for LLVM CodeGen for blocks ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the internal state used for llvm translation for block literals.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGBLOCKS_H
+#define LLVM_CLANG_LIB_CODEGEN_CGBLOCKS_H
+
+#include "CGBuilder.h"
+#include "CGCall.h"
+#include "CGValue.h"
+#include "CodeGenFunction.h"
+#include "CodeGenTypes.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/IR/Module.h"
+
+namespace llvm {
+class Module;
+class Constant;
+class Function;
+class GlobalValue;
+class DataLayout;
+class FunctionType;
+class PointerType;
+class Value;
+class LLVMContext;
+}
+
+namespace clang {
+
+namespace CodeGen {
+
+class CodeGenModule;
+class CGBlockInfo;
+
+// Flags stored in __block variables.
+enum BlockByrefFlags {
+ BLOCK_BYREF_HAS_COPY_DISPOSE = (1 << 25), // compiler
+ BLOCK_BYREF_LAYOUT_MASK = (0xF << 28), // compiler
+ BLOCK_BYREF_LAYOUT_EXTENDED = (1 << 28),
+ BLOCK_BYREF_LAYOUT_NON_OBJECT = (2 << 28),
+ BLOCK_BYREF_LAYOUT_STRONG = (3 << 28),
+ BLOCK_BYREF_LAYOUT_WEAK = (4 << 28),
+ BLOCK_BYREF_LAYOUT_UNRETAINED = (5 << 28)
+};
+
+enum BlockLiteralFlags {
+ BLOCK_HAS_COPY_DISPOSE = (1 << 25),
+ BLOCK_HAS_CXX_OBJ = (1 << 26),
+ BLOCK_IS_GLOBAL = (1 << 28),
+ BLOCK_USE_STRET = (1 << 29),
+ BLOCK_HAS_SIGNATURE = (1 << 30),
+ BLOCK_HAS_EXTENDED_LAYOUT = (1 << 31)
+};
+class BlockFlags {
+ uint32_t flags;
+
+public:
+ BlockFlags(uint32_t flags) : flags(flags) {}
+ BlockFlags() : flags(0) {}
+ BlockFlags(BlockLiteralFlags flag) : flags(flag) {}
+ BlockFlags(BlockByrefFlags flag) : flags(flag) {}
+
+ uint32_t getBitMask() const { return flags; }
+ bool empty() const { return flags == 0; }
+
+ friend BlockFlags operator|(BlockFlags l, BlockFlags r) {
+ return BlockFlags(l.flags | r.flags);
+ }
+ friend BlockFlags &operator|=(BlockFlags &l, BlockFlags r) {
+ l.flags |= r.flags;
+ return l;
+ }
+ friend bool operator&(BlockFlags l, BlockFlags r) {
+ return (l.flags & r.flags);
+ }
+ bool operator==(BlockFlags r) {
+ return (flags == r.flags);
+ }
+};
+inline BlockFlags operator|(BlockLiteralFlags l, BlockLiteralFlags r) {
+ return BlockFlags(l) | BlockFlags(r);
+}
+
+enum BlockFieldFlag_t {
+ BLOCK_FIELD_IS_OBJECT = 0x03, /* id, NSObject, __attribute__((NSObject)),
+ block, ... */
+ BLOCK_FIELD_IS_BLOCK = 0x07, /* a block variable */
+
+ BLOCK_FIELD_IS_BYREF = 0x08, /* the on stack structure holding the __block
+ variable */
+ BLOCK_FIELD_IS_WEAK = 0x10, /* declared __weak, only used in byref copy
+ helpers */
+ BLOCK_FIELD_IS_ARC = 0x40, /* field has ARC-specific semantics */
+ BLOCK_BYREF_CALLER = 128, /* called from __block (byref) copy/dispose
+ support routines */
+ BLOCK_BYREF_CURRENT_MAX = 256
+};
+
+class BlockFieldFlags {
+ uint32_t flags;
+
+ BlockFieldFlags(uint32_t flags) : flags(flags) {}
+public:
+ BlockFieldFlags() : flags(0) {}
+ BlockFieldFlags(BlockFieldFlag_t flag) : flags(flag) {}
+
+ uint32_t getBitMask() const { return flags; }
+ bool empty() const { return flags == 0; }
+
+ /// Answers whether the flags indicate that this field is an object
+ /// or block pointer that requires _Block_object_assign/dispose.
+ bool isSpecialPointer() const { return flags & BLOCK_FIELD_IS_OBJECT; }
+
+ friend BlockFieldFlags operator|(BlockFieldFlags l, BlockFieldFlags r) {
+ return BlockFieldFlags(l.flags | r.flags);
+ }
+ friend BlockFieldFlags &operator|=(BlockFieldFlags &l, BlockFieldFlags r) {
+ l.flags |= r.flags;
+ return l;
+ }
+ friend bool operator&(BlockFieldFlags l, BlockFieldFlags r) {
+ return (l.flags & r.flags);
+ }
+};
+inline BlockFieldFlags operator|(BlockFieldFlag_t l, BlockFieldFlag_t r) {
+ return BlockFieldFlags(l) | BlockFieldFlags(r);
+}
+
+/// Information about the layout of a __block variable.
+class BlockByrefInfo {
+public:
+ llvm::StructType *Type;
+ unsigned FieldIndex;
+ CharUnits ByrefAlignment;
+ CharUnits FieldOffset;
+};
+
+/// CGBlockInfo - Information to generate a block literal.
+class CGBlockInfo {
+public:
+ /// Name - The name of the block, kindof.
+ StringRef Name;
+
+ /// The field index of 'this' within the block, if there is one.
+ unsigned CXXThisIndex;
+
+ class Capture {
+ uintptr_t Data;
+ EHScopeStack::stable_iterator Cleanup;
+ CharUnits::QuantityType Offset;
+
+ public:
+ bool isIndex() const { return (Data & 1) != 0; }
+ bool isConstant() const { return !isIndex(); }
+
+ unsigned getIndex() const {
+ assert(isIndex());
+ return Data >> 1;
+ }
+ CharUnits getOffset() const {
+ assert(isIndex());
+ return CharUnits::fromQuantity(Offset);
+ }
+ EHScopeStack::stable_iterator getCleanup() const {
+ assert(isIndex());
+ return Cleanup;
+ }
+ void setCleanup(EHScopeStack::stable_iterator cleanup) {
+ assert(isIndex());
+ Cleanup = cleanup;
+ }
+
+ llvm::Value *getConstant() const {
+ assert(isConstant());
+ return reinterpret_cast<llvm::Value*>(Data);
+ }
+
+ static Capture makeIndex(unsigned index, CharUnits offset) {
+ Capture v;
+ v.Data = (index << 1) | 1;
+ v.Offset = offset.getQuantity();
+ return v;
+ }
+
+ static Capture makeConstant(llvm::Value *value) {
+ Capture v;
+ v.Data = reinterpret_cast<uintptr_t>(value);
+ return v;
+ }
+ };
+
+ /// CanBeGlobal - True if the block can be global, i.e. it has
+ /// no non-constant captures.
+ bool CanBeGlobal : 1;
+
+ /// True if the block needs a custom copy or dispose function.
+ bool NeedsCopyDispose : 1;
+
+ /// HasCXXObject - True if the block's custom copy/dispose functions
+ /// need to be run even in GC mode.
+ bool HasCXXObject : 1;
+
+ /// UsesStret : True if the block uses an stret return. Mutable
+ /// because it gets set later in the block-creation process.
+ mutable bool UsesStret : 1;
+
+ /// HasCapturedVariableLayout : True if block has captured variables
+ /// and their layout meta-data has been generated.
+ bool HasCapturedVariableLayout : 1;
+
+ /// The mapping of allocated indexes within the block.
+ llvm::DenseMap<const VarDecl*, Capture> Captures;
+
+ Address LocalAddress;
+ llvm::StructType *StructureType;
+ const BlockDecl *Block;
+ const BlockExpr *BlockExpression;
+ CharUnits BlockSize;
+ CharUnits BlockAlign;
+ CharUnits CXXThisOffset;
+
+ // Offset of the gap caused by block header having a smaller
+ // alignment than the alignment of the block descriptor. This
+ // is the gap offset before the first capturued field.
+ CharUnits BlockHeaderForcedGapOffset;
+ // Gap size caused by aligning first field after block header.
+ // This could be zero if no forced alignment is required.
+ CharUnits BlockHeaderForcedGapSize;
+
+ /// An instruction which dominates the full-expression that the
+ /// block is inside.
+ llvm::Instruction *DominatingIP;
+
+ /// The next block in the block-info chain. Invalid if this block
+ /// info is not part of the CGF's block-info chain, which is true
+ /// if it corresponds to a global block or a block whose expression
+ /// has been encountered.
+ CGBlockInfo *NextBlockInfo;
+
+ const Capture &getCapture(const VarDecl *var) const {
+ return const_cast<CGBlockInfo*>(this)->getCapture(var);
+ }
+ Capture &getCapture(const VarDecl *var) {
+ llvm::DenseMap<const VarDecl*, Capture>::iterator
+ it = Captures.find(var);
+ assert(it != Captures.end() && "no entry for variable!");
+ return it->second;
+ }
+
+ const BlockDecl *getBlockDecl() const { return Block; }
+ const BlockExpr *getBlockExpr() const {
+ assert(BlockExpression);
+ assert(BlockExpression->getBlockDecl() == Block);
+ return BlockExpression;
+ }
+
+ CGBlockInfo(const BlockDecl *blockDecl, StringRef Name);
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h b/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h
new file mode 100644
index 0000000..489f341
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h
@@ -0,0 +1,306 @@
+//===-- CGBuilder.h - Choose IRBuilder implementation ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGBUILDER_H
+#define LLVM_CLANG_LIB_CODEGEN_CGBUILDER_H
+
+#include "llvm/IR/IRBuilder.h"
+#include "Address.h"
+#include "CodeGenTypeCache.h"
+
+namespace clang {
+namespace CodeGen {
+
+class CodeGenFunction;
+
+/// \brief This is an IRBuilder insertion helper that forwards to
+/// CodeGenFunction::InsertHelper, which adds necessary metadata to
+/// instructions.
+template <bool PreserveNames>
+class CGBuilderInserter
+ : protected llvm::IRBuilderDefaultInserter<PreserveNames> {
+public:
+ CGBuilderInserter() = default;
+ explicit CGBuilderInserter(CodeGenFunction *CGF) : CGF(CGF) {}
+
+protected:
+ /// \brief This forwards to CodeGenFunction::InsertHelper.
+ void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
+ llvm::BasicBlock *BB,
+ llvm::BasicBlock::iterator InsertPt) const;
+private:
+ CodeGenFunction *CGF = nullptr;
+};
+
+// Don't preserve names on values in an optimized build.
+#ifdef NDEBUG
+#define PreserveNames false
+#else
+#define PreserveNames true
+#endif
+
+typedef CGBuilderInserter<PreserveNames> CGBuilderInserterTy;
+
+typedef llvm::IRBuilder<PreserveNames, llvm::ConstantFolder,
+ CGBuilderInserterTy> CGBuilderBaseTy;
+
+class CGBuilderTy : public CGBuilderBaseTy {
+ /// Storing a reference to the type cache here makes it a lot easier
+ /// to build natural-feeling, target-specific IR.
+ const CodeGenTypeCache &TypeCache;
+public:
+ CGBuilderTy(const CodeGenTypeCache &TypeCache, llvm::LLVMContext &C)
+ : CGBuilderBaseTy(C), TypeCache(TypeCache) {}
+ CGBuilderTy(const CodeGenTypeCache &TypeCache,
+ llvm::LLVMContext &C, const llvm::ConstantFolder &F,
+ const CGBuilderInserterTy &Inserter)
+ : CGBuilderBaseTy(C, F, Inserter), TypeCache(TypeCache) {}
+ CGBuilderTy(const CodeGenTypeCache &TypeCache, llvm::Instruction *I)
+ : CGBuilderBaseTy(I), TypeCache(TypeCache) {}
+ CGBuilderTy(const CodeGenTypeCache &TypeCache, llvm::BasicBlock *BB)
+ : CGBuilderBaseTy(BB), TypeCache(TypeCache) {}
+
+ llvm::ConstantInt *getSize(CharUnits N) {
+ return llvm::ConstantInt::get(TypeCache.SizeTy, N.getQuantity());
+ }
+ llvm::ConstantInt *getSize(uint64_t N) {
+ return llvm::ConstantInt::get(TypeCache.SizeTy, N);
+ }
+
+ // Note that we intentionally hide the CreateLoad APIs that don't
+ // take an alignment.
+ llvm::LoadInst *CreateLoad(Address Addr, const llvm::Twine &Name = "") {
+ return CreateAlignedLoad(Addr.getPointer(),
+ Addr.getAlignment().getQuantity(),
+ Name);
+ }
+ llvm::LoadInst *CreateLoad(Address Addr, const char *Name) {
+ // This overload is required to prevent string literals from
+ // ending up in the IsVolatile overload.
+ return CreateAlignedLoad(Addr.getPointer(),
+ Addr.getAlignment().getQuantity(),
+ Name);
+ }
+ llvm::LoadInst *CreateLoad(Address Addr, bool IsVolatile,
+ const llvm::Twine &Name = "") {
+ return CreateAlignedLoad(Addr.getPointer(),
+ Addr.getAlignment().getQuantity(),
+ IsVolatile,
+ Name);
+ }
+
+ using CGBuilderBaseTy::CreateAlignedLoad;
+ llvm::LoadInst *CreateAlignedLoad(llvm::Value *Addr, CharUnits Align,
+ const llvm::Twine &Name = "") {
+ return CreateAlignedLoad(Addr, Align.getQuantity(), Name);
+ }
+ llvm::LoadInst *CreateAlignedLoad(llvm::Value *Addr, CharUnits Align,
+ const char *Name) {
+ return CreateAlignedLoad(Addr, Align.getQuantity(), Name);
+ }
+ llvm::LoadInst *CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr,
+ CharUnits Align,
+ const llvm::Twine &Name = "") {
+ assert(Addr->getType()->getPointerElementType() == Ty);
+ return CreateAlignedLoad(Addr, Align.getQuantity(), Name);
+ }
+ llvm::LoadInst *CreateAlignedLoad(llvm::Value *Addr, CharUnits Align,
+ bool IsVolatile,
+ const llvm::Twine &Name = "") {
+ return CreateAlignedLoad(Addr, Align.getQuantity(), IsVolatile, Name);
+ }
+
+ // Note that we intentionally hide the CreateStore APIs that don't
+ // take an alignment.
+ llvm::StoreInst *CreateStore(llvm::Value *Val, Address Addr,
+ bool IsVolatile = false) {
+ return CreateAlignedStore(Val, Addr.getPointer(),
+ Addr.getAlignment().getQuantity(), IsVolatile);
+ }
+
+ using CGBuilderBaseTy::CreateAlignedStore;
+ llvm::StoreInst *CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr,
+ CharUnits Align, bool IsVolatile = false) {
+ return CreateAlignedStore(Val, Addr, Align.getQuantity(), IsVolatile);
+ }
+
+ // FIXME: these "default-aligned" APIs should be removed,
+ // but I don't feel like fixing all the builtin code right now.
+ llvm::LoadInst *CreateDefaultAlignedLoad(llvm::Value *Addr,
+ const llvm::Twine &Name = "") {
+ return CGBuilderBaseTy::CreateLoad(Addr, false, Name);
+ }
+ llvm::LoadInst *CreateDefaultAlignedLoad(llvm::Value *Addr,
+ const char *Name) {
+ return CGBuilderBaseTy::CreateLoad(Addr, false, Name);
+ }
+ llvm::LoadInst *CreateDefaultAlignedLoad(llvm::Value *Addr, bool IsVolatile,
+ const llvm::Twine &Name = "") {
+ return CGBuilderBaseTy::CreateLoad(Addr, IsVolatile, Name);
+ }
+
+ llvm::StoreInst *CreateDefaultAlignedStore(llvm::Value *Val,
+ llvm::Value *Addr,
+ bool IsVolatile = false) {
+ return CGBuilderBaseTy::CreateStore(Val, Addr, IsVolatile);
+ }
+
+ /// Emit a load from an i1 flag variable.
+ llvm::LoadInst *CreateFlagLoad(llvm::Value *Addr,
+ const llvm::Twine &Name = "") {
+ assert(Addr->getType()->getPointerElementType() == getInt1Ty());
+ return CreateAlignedLoad(getInt1Ty(), Addr, CharUnits::One(), Name);
+ }
+
+ /// Emit a store to an i1 flag variable.
+ llvm::StoreInst *CreateFlagStore(bool Value, llvm::Value *Addr) {
+ assert(Addr->getType()->getPointerElementType() == getInt1Ty());
+ return CreateAlignedStore(getInt1(Value), Addr, CharUnits::One());
+ }
+
+ using CGBuilderBaseTy::CreateBitCast;
+ Address CreateBitCast(Address Addr, llvm::Type *Ty,
+ const llvm::Twine &Name = "") {
+ return Address(CreateBitCast(Addr.getPointer(), Ty, Name),
+ Addr.getAlignment());
+ }
+
+ /// Cast the element type of the given address to a different type,
+ /// preserving information like the alignment and address space.
+ Address CreateElementBitCast(Address Addr, llvm::Type *Ty,
+ const llvm::Twine &Name = "") {
+ auto PtrTy = Ty->getPointerTo(Addr.getAddressSpace());
+ return CreateBitCast(Addr, PtrTy, Name);
+ }
+
+ using CGBuilderBaseTy::CreatePointerBitCastOrAddrSpaceCast;
+ Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty,
+ const llvm::Twine &Name = "") {
+ llvm::Value *Ptr =
+ CreatePointerBitCastOrAddrSpaceCast(Addr.getPointer(), Ty, Name);
+ return Address(Ptr, Addr.getAlignment());
+ }
+
+ using CGBuilderBaseTy::CreateStructGEP;
+ Address CreateStructGEP(Address Addr, unsigned Index, CharUnits Offset,
+ const llvm::Twine &Name = "") {
+ return Address(CreateStructGEP(Addr.getElementType(),
+ Addr.getPointer(), Index, Name),
+ Addr.getAlignment().alignmentAtOffset(Offset));
+ }
+
+ /// Given
+ /// %addr = [n x T]* ...
+ /// produce
+ /// %name = getelementptr inbounds %addr, i64 0, i64 index
+ /// where i64 is actually the target word size.
+ ///
+ /// This API assumes that drilling into an array like this is always
+ /// an inbounds operation.
+ ///
+ /// \param EltSize - the size of the type T in bytes
+ Address CreateConstArrayGEP(Address Addr, uint64_t Index, CharUnits EltSize,
+ const llvm::Twine &Name = "") {
+ return Address(CreateInBoundsGEP(Addr.getPointer(),
+ {getSize(CharUnits::Zero()),
+ getSize(Index)},
+ Name),
+ Addr.getAlignment().alignmentAtOffset(Index * EltSize));
+ }
+
+ /// Given
+ /// %addr = T* ...
+ /// produce
+ /// %name = getelementptr inbounds %addr, i64 index
+ /// where i64 is actually the target word size.
+ ///
+ /// \param EltSize - the size of the type T in bytes
+ Address CreateConstInBoundsGEP(Address Addr, uint64_t Index,
+ CharUnits EltSize,
+ const llvm::Twine &Name = "") {
+ return Address(CreateInBoundsGEP(Addr.getElementType(), Addr.getPointer(),
+ getSize(Index), Name),
+ Addr.getAlignment().alignmentAtOffset(Index * EltSize));
+ }
+
+ /// Given
+ /// %addr = T* ...
+ /// produce
+ /// %name = getelementptr inbounds %addr, i64 index
+ /// where i64 is actually the target word size.
+ ///
+ /// \param EltSize - the size of the type T in bytes
+ Address CreateConstGEP(Address Addr, uint64_t Index, CharUnits EltSize,
+ const llvm::Twine &Name = "") {
+ return Address(CreateGEP(Addr.getElementType(), Addr.getPointer(),
+ getSize(Index), Name),
+ Addr.getAlignment().alignmentAtOffset(Index * EltSize));
+ }
+
+ /// Given a pointer to i8, adjust it by a given constant offset.
+ Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset,
+ const llvm::Twine &Name = "") {
+ assert(Addr.getElementType() == TypeCache.Int8Ty);
+ return Address(CreateInBoundsGEP(Addr.getPointer(), getSize(Offset), Name),
+ Addr.getAlignment().alignmentAtOffset(Offset));
+ }
+ Address CreateConstByteGEP(Address Addr, CharUnits Offset,
+ const llvm::Twine &Name = "") {
+ assert(Addr.getElementType() == TypeCache.Int8Ty);
+ return Address(CreateGEP(Addr.getPointer(), getSize(Offset), Name),
+ Addr.getAlignment().alignmentAtOffset(Offset));
+ }
+
+ llvm::Value *CreateConstInBoundsByteGEP(llvm::Value *Ptr, CharUnits Offset,
+ const llvm::Twine &Name = "") {
+ assert(Ptr->getType()->getPointerElementType() == TypeCache.Int8Ty);
+ return CreateInBoundsGEP(Ptr, getSize(Offset), Name);
+ }
+ llvm::Value *CreateConstByteGEP(llvm::Value *Ptr, CharUnits Offset,
+ const llvm::Twine &Name = "") {
+ assert(Ptr->getType()->getPointerElementType() == TypeCache.Int8Ty);
+ return CreateGEP(Ptr, getSize(Offset), Name);
+ }
+
+ using CGBuilderBaseTy::CreateMemCpy;
+ llvm::CallInst *CreateMemCpy(Address Dest, Address Src, llvm::Value *Size,
+ bool IsVolatile = false) {
+ auto Align = std::min(Dest.getAlignment(), Src.getAlignment());
+ return CreateMemCpy(Dest.getPointer(), Src.getPointer(), Size,
+ Align.getQuantity(), IsVolatile);
+ }
+ llvm::CallInst *CreateMemCpy(Address Dest, Address Src, uint64_t Size,
+ bool IsVolatile = false) {
+ auto Align = std::min(Dest.getAlignment(), Src.getAlignment());
+ return CreateMemCpy(Dest.getPointer(), Src.getPointer(), Size,
+ Align.getQuantity(), IsVolatile);
+ }
+
+ using CGBuilderBaseTy::CreateMemMove;
+ llvm::CallInst *CreateMemMove(Address Dest, Address Src, llvm::Value *Size,
+ bool IsVolatile = false) {
+ auto Align = std::min(Dest.getAlignment(), Src.getAlignment());
+ return CreateMemMove(Dest.getPointer(), Src.getPointer(), Size,
+ Align.getQuantity(), IsVolatile);
+ }
+
+ using CGBuilderBaseTy::CreateMemSet;
+ llvm::CallInst *CreateMemSet(Address Dest, llvm::Value *Value,
+ llvm::Value *Size, bool IsVolatile = false) {
+ return CreateMemSet(Dest.getPointer(), Value, Size,
+ Dest.getAlignment().getQuantity(), IsVolatile);
+ }
+};
+
+#undef PreserveNames
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
new file mode 100644
index 0000000..787ac53
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
@@ -0,0 +1,7289 @@
+//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Builtin calls as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CGCXXABI.h"
+#include "CGObjCRuntime.h"
+#include "CodeGenModule.h"
+#include "TargetInfo.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/CodeGen/CGFunctionInfo.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/Intrinsics.h"
+#include <sstream>
+
+using namespace clang;
+using namespace CodeGen;
+using namespace llvm;
+
+/// getBuiltinLibFunction - Given a builtin id for a function like
+/// "__builtin_fabsf", return a Function* for "fabsf".
+llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
+ unsigned BuiltinID) {
+ assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
+
+ // Get the name, skip over the __builtin_ prefix (if necessary).
+ StringRef Name;
+ GlobalDecl D(FD);
+
+ // If the builtin has been declared explicitly with an assembler label,
+ // use the mangled name. This differs from the plain label on platforms
+ // that prefix labels.
+ if (FD->hasAttr<AsmLabelAttr>())
+ Name = getMangledName(D);
+ else
+ Name = Context.BuiltinInfo.getName(BuiltinID) + 10;
+
+ llvm::FunctionType *Ty =
+ cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
+
+ return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
+}
+
+/// Emit the conversions required to turn the given value into an
+/// integer of the given size.
+static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
+ QualType T, llvm::IntegerType *IntType) {
+ V = CGF.EmitToMemory(V, T);
+
+ if (V->getType()->isPointerTy())
+ return CGF.Builder.CreatePtrToInt(V, IntType);
+
+ assert(V->getType() == IntType);
+ return V;
+}
+
+static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
+ QualType T, llvm::Type *ResultType) {
+ V = CGF.EmitFromMemory(V, T);
+
+ if (ResultType->isPointerTy())
+ return CGF.Builder.CreateIntToPtr(V, ResultType);
+
+ assert(V->getType() == ResultType);
+ return V;
+}
+
+/// Utility to insert an atomic instruction based on Instrinsic::ID
+/// and the expression node.
+static Value *MakeBinaryAtomicValue(CodeGenFunction &CGF,
+ llvm::AtomicRMWInst::BinOp Kind,
+ const CallExpr *E) {
+ QualType T = E->getType();
+ assert(E->getArg(0)->getType()->isPointerType());
+ assert(CGF.getContext().hasSameUnqualifiedType(T,
+ E->getArg(0)->getType()->getPointeeType()));
+ assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
+
+ llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
+
+ llvm::IntegerType *IntType =
+ llvm::IntegerType::get(CGF.getLLVMContext(),
+ CGF.getContext().getTypeSize(T));
+ llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+
+ llvm::Value *Args[2];
+ Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
+ Args[1] = CGF.EmitScalarExpr(E->getArg(1));
+ llvm::Type *ValueType = Args[1]->getType();
+ Args[1] = EmitToInt(CGF, Args[1], T, IntType);
+
+ llvm::Value *Result =
+ CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1],
+ llvm::SequentiallyConsistent);
+ return EmitFromInt(CGF, Result, T, ValueType);
+}
+
+static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) {
+ Value *Val = CGF.EmitScalarExpr(E->getArg(0));
+ Value *Address = CGF.EmitScalarExpr(E->getArg(1));
+
+ // Convert the type of the pointer to a pointer to the stored type.
+ Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
+ Value *BC = CGF.Builder.CreateBitCast(
+ Address, llvm::PointerType::getUnqual(Val->getType()), "cast");
+ LValue LV = CGF.MakeNaturalAlignAddrLValue(BC, E->getArg(0)->getType());
+ LV.setNontemporal(true);
+ CGF.EmitStoreOfScalar(Val, LV, false);
+ return nullptr;
+}
+
+static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) {
+ Value *Address = CGF.EmitScalarExpr(E->getArg(0));
+
+ LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType());
+ LV.setNontemporal(true);
+ return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
+}
+
+static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
+ llvm::AtomicRMWInst::BinOp Kind,
+ const CallExpr *E) {
+ return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
+}
+
+/// Utility to insert an atomic instruction based Instrinsic::ID and
+/// the expression node, where the return value is the result of the
+/// operation.
+static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
+ llvm::AtomicRMWInst::BinOp Kind,
+ const CallExpr *E,
+ Instruction::BinaryOps Op,
+ bool Invert = false) {
+ QualType T = E->getType();
+ assert(E->getArg(0)->getType()->isPointerType());
+ assert(CGF.getContext().hasSameUnqualifiedType(T,
+ E->getArg(0)->getType()->getPointeeType()));
+ assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
+
+ llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
+
+ llvm::IntegerType *IntType =
+ llvm::IntegerType::get(CGF.getLLVMContext(),
+ CGF.getContext().getTypeSize(T));
+ llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+
+ llvm::Value *Args[2];
+ Args[1] = CGF.EmitScalarExpr(E->getArg(1));
+ llvm::Type *ValueType = Args[1]->getType();
+ Args[1] = EmitToInt(CGF, Args[1], T, IntType);
+ Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
+
+ llvm::Value *Result =
+ CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1],
+ llvm::SequentiallyConsistent);
+ Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
+ if (Invert)
+ Result = CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
+ llvm::ConstantInt::get(IntType, -1));
+ Result = EmitFromInt(CGF, Result, T, ValueType);
+ return RValue::get(Result);
+}
+
+/// @brief Utility to insert an atomic cmpxchg instruction.
+///
+/// @param CGF The current codegen function.
+/// @param E Builtin call expression to convert to cmpxchg.
+/// arg0 - address to operate on
+/// arg1 - value to compare with
+/// arg2 - new value
+/// @param ReturnBool Specifies whether to return success flag of
+/// cmpxchg result or the old value.
+///
+/// @returns result of cmpxchg, according to ReturnBool
+static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
+ bool ReturnBool) {
+ QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
+ llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
+
+ llvm::IntegerType *IntType = llvm::IntegerType::get(
+ CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
+ llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+
+ Value *Args[3];
+ Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
+ Args[1] = CGF.EmitScalarExpr(E->getArg(1));
+ llvm::Type *ValueType = Args[1]->getType();
+ Args[1] = EmitToInt(CGF, Args[1], T, IntType);
+ Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
+
+ Value *Pair = CGF.Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2],
+ llvm::SequentiallyConsistent,
+ llvm::SequentiallyConsistent);
+ if (ReturnBool)
+ // Extract boolean success flag and zext it to int.
+ return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
+ CGF.ConvertType(E->getType()));
+ else
+ // Extract old value and emit it using the same type as compare value.
+ return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
+ ValueType);
+}
+
+/// EmitFAbs - Emit a call to @llvm.fabs().
+static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) {
+ Value *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
+ llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
+ Call->setDoesNotAccessMemory();
+ return Call;
+}
+
+/// Emit the computation of the sign bit for a floating point value. Returns
+/// the i1 sign bit value.
+static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) {
+ LLVMContext &C = CGF.CGM.getLLVMContext();
+
+ llvm::Type *Ty = V->getType();
+ int Width = Ty->getPrimitiveSizeInBits();
+ llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
+ V = CGF.Builder.CreateBitCast(V, IntTy);
+ if (Ty->isPPC_FP128Ty()) {
+ // We want the sign bit of the higher-order double. The bitcast we just
+ // did works as if the double-double was stored to memory and then
+ // read as an i128. The "store" will put the higher-order double in the
+ // lower address in both little- and big-Endian modes, but the "load"
+ // will treat those bits as a different part of the i128: the low bits in
+ // little-Endian, the high bits in big-Endian. Therefore, on big-Endian
+ // we need to shift the high bits down to the low before truncating.
+ Width >>= 1;
+ if (CGF.getTarget().isBigEndian()) {
+ Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width);
+ V = CGF.Builder.CreateLShr(V, ShiftCst);
+ }
+ // We are truncating value in order to extract the higher-order
+ // double, which we will be using to extract the sign from.
+ IntTy = llvm::IntegerType::get(C, Width);
+ V = CGF.Builder.CreateTrunc(V, IntTy);
+ }
+ Value *Zero = llvm::Constant::getNullValue(IntTy);
+ return CGF.Builder.CreateICmpSLT(V, Zero);
+}
+
+static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *Fn,
+ const CallExpr *E, llvm::Value *calleeValue) {
+ return CGF.EmitCall(E->getCallee()->getType(), calleeValue, E,
+ ReturnValueSlot(), Fn);
+}
+
+/// \brief Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
+/// depending on IntrinsicID.
+///
+/// \arg CGF The current codegen function.
+/// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
+/// \arg X The first argument to the llvm.*.with.overflow.*.
+/// \arg Y The second argument to the llvm.*.with.overflow.*.
+/// \arg Carry The carry returned by the llvm.*.with.overflow.*.
+/// \returns The result (i.e. sum/product) returned by the intrinsic.
+static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
+ const llvm::Intrinsic::ID IntrinsicID,
+ llvm::Value *X, llvm::Value *Y,
+ llvm::Value *&Carry) {
+ // Make sure we have integers of the same width.
+ assert(X->getType() == Y->getType() &&
+ "Arguments must be the same type. (Did you forget to make sure both "
+ "arguments have the same integer width?)");
+
+ llvm::Value *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
+ llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
+ Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
+ return CGF.Builder.CreateExtractValue(Tmp, 0);
+}
+
+namespace {
+ struct WidthAndSignedness {
+ unsigned Width;
+ bool Signed;
+ };
+}
+
+static WidthAndSignedness
+getIntegerWidthAndSignedness(const clang::ASTContext &context,
+ const clang::QualType Type) {
+ assert(Type->isIntegerType() && "Given type is not an integer.");
+ unsigned Width = Type->isBooleanType() ? 1 : context.getTypeInfo(Type).Width;
+ bool Signed = Type->isSignedIntegerType();
+ return {Width, Signed};
+}
+
+// Given one or more integer types, this function produces an integer type that
+// encompasses them: any value in one of the given types could be expressed in
+// the encompassing type.
+static struct WidthAndSignedness
+EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
+ assert(Types.size() > 0 && "Empty list of types.");
+
+ // If any of the given types is signed, we must return a signed type.
+ bool Signed = false;
+ for (const auto &Type : Types) {
+ Signed |= Type.Signed;
+ }
+
+ // The encompassing type must have a width greater than or equal to the width
+ // of the specified types. Aditionally, if the encompassing type is signed,
+ // its width must be strictly greater than the width of any unsigned types
+ // given.
+ unsigned Width = 0;
+ for (const auto &Type : Types) {
+ unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
+ if (Width < MinWidth) {
+ Width = MinWidth;
+ }
+ }
+
+ return {Width, Signed};
+}
+
+Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
+ llvm::Type *DestType = Int8PtrTy;
+ if (ArgValue->getType() != DestType)
+ ArgValue =
+ Builder.CreateBitCast(ArgValue, DestType, ArgValue->getName().data());
+
+ Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
+ return Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue);
+}
+
+/// Checks if using the result of __builtin_object_size(p, @p From) in place of
+/// __builtin_object_size(p, @p To) is correct
+static bool areBOSTypesCompatible(int From, int To) {
+ // Note: Our __builtin_object_size implementation currently treats Type=0 and
+ // Type=2 identically. Encoding this implementation detail here may make
+ // improving __builtin_object_size difficult in the future, so it's omitted.
+ return From == To || (From == 0 && To == 1) || (From == 3 && To == 2);
+}
+
+static llvm::Value *
+getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
+ return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
+}
+
+llvm::Value *
+CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
+ llvm::IntegerType *ResType) {
+ uint64_t ObjectSize;
+ if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type))
+ return emitBuiltinObjectSize(E, Type, ResType);
+ return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
+}
+
+/// Returns a Value corresponding to the size of the given expression.
+/// This Value may be either of the following:
+/// - A llvm::Argument (if E is a param with the pass_object_size attribute on
+/// it)
+/// - A call to the @llvm.objectsize intrinsic
+llvm::Value *
+CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
+ llvm::IntegerType *ResType) {
+ // We need to reference an argument if the pointer is a parameter with the
+ // pass_object_size attribute.
+ if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
+ auto *Param = dyn_cast<ParmVarDecl>(D->getDecl());
+ auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>();
+ if (Param != nullptr && PS != nullptr &&
+ areBOSTypesCompatible(PS->getType(), Type)) {
+ auto Iter = SizeArguments.find(Param);
+ assert(Iter != SizeArguments.end());
+
+ const ImplicitParamDecl *D = Iter->second;
+ auto DIter = LocalDeclMap.find(D);
+ assert(DIter != LocalDeclMap.end());
+
+ return EmitLoadOfScalar(DIter->second, /*volatile=*/false,
+ getContext().getSizeType(), E->getLocStart());
+ }
+ }
+
+ // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
+ // evaluate E for side-effects. In either case, we shouldn't lower to
+ // @llvm.objectsize.
+ if (Type == 3 || E->HasSideEffects(getContext()))
+ return getDefaultBuiltinObjectSizeResult(Type, ResType);
+
+ // LLVM only supports 0 and 2, make sure that we pass along that
+ // as a boolean.
+ auto *CI = ConstantInt::get(Builder.getInt1Ty(), (Type & 2) >> 1);
+ // FIXME: Get right address space.
+ llvm::Type *Tys[] = {ResType, Builder.getInt8PtrTy(0)};
+ Value *F = CGM.getIntrinsic(Intrinsic::objectsize, Tys);
+ return Builder.CreateCall(F, {EmitScalarExpr(E), CI});
+}
+
+RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
+ unsigned BuiltinID, const CallExpr *E,
+ ReturnValueSlot ReturnValue) {
+ // See if we can constant fold this builtin. If so, don't emit it at all.
+ Expr::EvalResult Result;
+ if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
+ !Result.hasSideEffects()) {
+ if (Result.Val.isInt())
+ return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
+ Result.Val.getInt()));
+ if (Result.Val.isFloat())
+ return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
+ Result.Val.getFloat()));
+ }
+
+ switch (BuiltinID) {
+ default: break; // Handle intrinsics and libm functions below.
+ case Builtin::BI__builtin___CFStringMakeConstantString:
+ case Builtin::BI__builtin___NSStringMakeConstantString:
+ return RValue::get(CGM.EmitConstantExpr(E, E->getType(), nullptr));
+ case Builtin::BI__builtin_stdarg_start:
+ case Builtin::BI__builtin_va_start:
+ case Builtin::BI__va_start:
+ case Builtin::BI__builtin_va_end:
+ return RValue::get(
+ EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
+ ? EmitScalarExpr(E->getArg(0))
+ : EmitVAListRef(E->getArg(0)).getPointer(),
+ BuiltinID != Builtin::BI__builtin_va_end));
+ case Builtin::BI__builtin_va_copy: {
+ Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer();
+ Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer();
+
+ llvm::Type *Type = Int8PtrTy;
+
+ DstPtr = Builder.CreateBitCast(DstPtr, Type);
+ SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
+ return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy),
+ {DstPtr, SrcPtr}));
+ }
+ case Builtin::BI__builtin_abs:
+ case Builtin::BI__builtin_labs:
+ case Builtin::BI__builtin_llabs: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ Value *NegOp = Builder.CreateNeg(ArgValue, "neg");
+ Value *CmpResult =
+ Builder.CreateICmpSGE(ArgValue,
+ llvm::Constant::getNullValue(ArgValue->getType()),
+ "abscond");
+ Value *Result =
+ Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs");
+
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_fabs:
+ case Builtin::BI__builtin_fabsf:
+ case Builtin::BI__builtin_fabsl: {
+ Value *Arg1 = EmitScalarExpr(E->getArg(0));
+ Value *Result = EmitFAbs(*this, Arg1);
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_fmod:
+ case Builtin::BI__builtin_fmodf:
+ case Builtin::BI__builtin_fmodl: {
+ Value *Arg1 = EmitScalarExpr(E->getArg(0));
+ Value *Arg2 = EmitScalarExpr(E->getArg(1));
+ Value *Result = Builder.CreateFRem(Arg1, Arg2, "fmod");
+ return RValue::get(Result);
+ }
+
+ case Builtin::BI__builtin_conj:
+ case Builtin::BI__builtin_conjf:
+ case Builtin::BI__builtin_conjl: {
+ ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
+ Value *Real = ComplexVal.first;
+ Value *Imag = ComplexVal.second;
+ Value *Zero =
+ Imag->getType()->isFPOrFPVectorTy()
+ ? llvm::ConstantFP::getZeroValueForNegation(Imag->getType())
+ : llvm::Constant::getNullValue(Imag->getType());
+
+ Imag = Builder.CreateFSub(Zero, Imag, "sub");
+ return RValue::getComplex(std::make_pair(Real, Imag));
+ }
+ case Builtin::BI__builtin_creal:
+ case Builtin::BI__builtin_crealf:
+ case Builtin::BI__builtin_creall:
+ case Builtin::BIcreal:
+ case Builtin::BIcrealf:
+ case Builtin::BIcreall: {
+ ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
+ return RValue::get(ComplexVal.first);
+ }
+
+ case Builtin::BI__builtin_cimag:
+ case Builtin::BI__builtin_cimagf:
+ case Builtin::BI__builtin_cimagl:
+ case Builtin::BIcimag:
+ case Builtin::BIcimagf:
+ case Builtin::BIcimagl: {
+ ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
+ return RValue::get(ComplexVal.second);
+ }
+
+ case Builtin::BI__builtin_ctzs:
+ case Builtin::BI__builtin_ctz:
+ case Builtin::BI__builtin_ctzl:
+ case Builtin::BI__builtin_ctzll: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
+
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
+ Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_clzs:
+ case Builtin::BI__builtin_clz:
+ case Builtin::BI__builtin_clzl:
+ case Builtin::BI__builtin_clzll: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
+
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
+ Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_ffs:
+ case Builtin::BI__builtin_ffsl:
+ case Builtin::BI__builtin_ffsll: {
+ // ffs(x) -> x ? cttz(x) + 1 : 0
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
+
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Tmp =
+ Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
+ llvm::ConstantInt::get(ArgType, 1));
+ Value *Zero = llvm::Constant::getNullValue(ArgType);
+ Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
+ Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_parity:
+ case Builtin::BI__builtin_parityl:
+ case Builtin::BI__builtin_parityll: {
+ // parity(x) -> ctpop(x) & 1
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
+
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Tmp = Builder.CreateCall(F, ArgValue);
+ Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_popcount:
+ case Builtin::BI__builtin_popcountl:
+ case Builtin::BI__builtin_popcountll: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
+
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Result = Builder.CreateCall(F, ArgValue);
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_unpredictable: {
+ // Always return the argument of __builtin_unpredictable. LLVM does not
+ // handle this builtin. Metadata for this builtin should be added directly
+ // to instructions such as branches or switches that use it.
+ return RValue::get(EmitScalarExpr(E->getArg(0)));
+ }
+ case Builtin::BI__builtin_expect: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ArgType = ArgValue->getType();
+
+ Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
+ // Don't generate llvm.expect on -O0 as the backend won't use it for
+ // anything.
+ // Note, we still IRGen ExpectedValue because it could have side-effects.
+ if (CGM.getCodeGenOpts().OptimizationLevel == 0)
+ return RValue::get(ArgValue);
+
+ Value *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
+ Value *Result =
+ Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_assume_aligned: {
+ Value *PtrValue = EmitScalarExpr(E->getArg(0));
+ Value *OffsetValue =
+ (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
+
+ Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
+ ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
+ unsigned Alignment = (unsigned) AlignmentCI->getZExtValue();
+
+ EmitAlignmentAssumption(PtrValue, Alignment, OffsetValue);
+ return RValue::get(PtrValue);
+ }
+ case Builtin::BI__assume:
+ case Builtin::BI__builtin_assume: {
+ if (E->getArg(0)->HasSideEffects(getContext()))
+ return RValue::get(nullptr);
+
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+ Value *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
+ return RValue::get(Builder.CreateCall(FnAssume, ArgValue));
+ }
+ case Builtin::BI__builtin_bswap16:
+ case Builtin::BI__builtin_bswap32:
+ case Builtin::BI__builtin_bswap64: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::bswap, ArgType);
+ return RValue::get(Builder.CreateCall(F, ArgValue));
+ }
+ case Builtin::BI__builtin_object_size: {
+ unsigned Type =
+ E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
+ auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType()));
+
+ // We pass this builtin onto the optimizer so that it can figure out the
+ // object size in more complex cases.
+ return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType));
+ }
+ case Builtin::BI__builtin_prefetch: {
+ Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
+ // FIXME: Technically these constants should of type 'int', yes?
+ RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
+ llvm::ConstantInt::get(Int32Ty, 0);
+ Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
+ llvm::ConstantInt::get(Int32Ty, 3);
+ Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
+ Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
+ return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data}));
+ }
+ case Builtin::BI__builtin_readcyclecounter: {
+ Value *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
+ return RValue::get(Builder.CreateCall(F));
+ }
+ case Builtin::BI__builtin___clear_cache: {
+ Value *Begin = EmitScalarExpr(E->getArg(0));
+ Value *End = EmitScalarExpr(E->getArg(1));
+ Value *F = CGM.getIntrinsic(Intrinsic::clear_cache);
+ return RValue::get(Builder.CreateCall(F, {Begin, End}));
+ }
+ case Builtin::BI__builtin_trap:
+ return RValue::get(EmitTrapCall(Intrinsic::trap));
+ case Builtin::BI__debugbreak:
+ return RValue::get(EmitTrapCall(Intrinsic::debugtrap));
+ case Builtin::BI__builtin_unreachable: {
+ if (SanOpts.has(SanitizerKind::Unreachable)) {
+ SanitizerScope SanScope(this);
+ EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
+ SanitizerKind::Unreachable),
+ "builtin_unreachable", EmitCheckSourceLocation(E->getExprLoc()),
+ None);
+ } else
+ Builder.CreateUnreachable();
+
+ // We do need to preserve an insertion point.
+ EmitBlock(createBasicBlock("unreachable.cont"));
+
+ return RValue::get(nullptr);
+ }
+
+ case Builtin::BI__builtin_powi:
+ case Builtin::BI__builtin_powif:
+ case Builtin::BI__builtin_powil: {
+ Value *Base = EmitScalarExpr(E->getArg(0));
+ Value *Exponent = EmitScalarExpr(E->getArg(1));
+ llvm::Type *ArgType = Base->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::powi, ArgType);
+ return RValue::get(Builder.CreateCall(F, {Base, Exponent}));
+ }
+
+ case Builtin::BI__builtin_isgreater:
+ case Builtin::BI__builtin_isgreaterequal:
+ case Builtin::BI__builtin_isless:
+ case Builtin::BI__builtin_islessequal:
+ case Builtin::BI__builtin_islessgreater:
+ case Builtin::BI__builtin_isunordered: {
+ // Ordered comparisons: we know the arguments to these are matching scalar
+ // floating point values.
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unknown ordered comparison");
+ case Builtin::BI__builtin_isgreater:
+ LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_isgreaterequal:
+ LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_isless:
+ LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_islessequal:
+ LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_islessgreater:
+ LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_isunordered:
+ LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
+ break;
+ }
+ // ZExt bool to int type.
+ return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
+ }
+ case Builtin::BI__builtin_isnan: {
+ Value *V = EmitScalarExpr(E->getArg(0));
+ V = Builder.CreateFCmpUNO(V, V, "cmp");
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
+ }
+
+ case Builtin::BI__builtin_isinf: {
+ // isinf(x) --> fabs(x) == infinity
+ Value *V = EmitScalarExpr(E->getArg(0));
+ V = EmitFAbs(*this, V);
+
+ V = Builder.CreateFCmpOEQ(V, ConstantFP::getInfinity(V->getType()),"isinf");
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
+ }
+
+ case Builtin::BI__builtin_isinf_sign: {
+ // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
+ Value *Arg = EmitScalarExpr(E->getArg(0));
+ Value *AbsArg = EmitFAbs(*this, Arg);
+ Value *IsInf = Builder.CreateFCmpOEQ(
+ AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
+ Value *IsNeg = EmitSignBit(*this, Arg);
+
+ llvm::Type *IntTy = ConvertType(E->getType());
+ Value *Zero = Constant::getNullValue(IntTy);
+ Value *One = ConstantInt::get(IntTy, 1);
+ Value *NegativeOne = ConstantInt::get(IntTy, -1);
+ Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
+ Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
+ return RValue::get(Result);
+ }
+
+ case Builtin::BI__builtin_isnormal: {
+ // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
+ Value *V = EmitScalarExpr(E->getArg(0));
+ Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
+
+ Value *Abs = EmitFAbs(*this, V);
+ Value *IsLessThanInf =
+ Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
+ APFloat Smallest = APFloat::getSmallestNormalized(
+ getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
+ Value *IsNormal =
+ Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
+ "isnormal");
+ V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
+ V = Builder.CreateAnd(V, IsNormal, "and");
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
+ }
+
+ case Builtin::BI__builtin_isfinite: {
+ // isfinite(x) --> x == x && fabs(x) != infinity;
+ Value *V = EmitScalarExpr(E->getArg(0));
+ Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
+
+ Value *Abs = EmitFAbs(*this, V);
+ Value *IsNotInf =
+ Builder.CreateFCmpUNE(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
+
+ V = Builder.CreateAnd(Eq, IsNotInf, "and");
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
+ }
+
+ case Builtin::BI__builtin_fpclassify: {
+ Value *V = EmitScalarExpr(E->getArg(5));
+ llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
+
+ // Create Result
+ BasicBlock *Begin = Builder.GetInsertBlock();
+ BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
+ Builder.SetInsertPoint(End);
+ PHINode *Result =
+ Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
+ "fpclassify_result");
+
+ // if (V==0) return FP_ZERO
+ Builder.SetInsertPoint(Begin);
+ Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
+ "iszero");
+ Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
+ BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
+ Builder.CreateCondBr(IsZero, End, NotZero);
+ Result->addIncoming(ZeroLiteral, Begin);
+
+ // if (V != V) return FP_NAN
+ Builder.SetInsertPoint(NotZero);
+ Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
+ Value *NanLiteral = EmitScalarExpr(E->getArg(0));
+ BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
+ Builder.CreateCondBr(IsNan, End, NotNan);
+ Result->addIncoming(NanLiteral, NotZero);
+
+ // if (fabs(V) == infinity) return FP_INFINITY
+ Builder.SetInsertPoint(NotNan);
+ Value *VAbs = EmitFAbs(*this, V);
+ Value *IsInf =
+ Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
+ "isinf");
+ Value *InfLiteral = EmitScalarExpr(E->getArg(1));
+ BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
+ Builder.CreateCondBr(IsInf, End, NotInf);
+ Result->addIncoming(InfLiteral, NotNan);
+
+ // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
+ Builder.SetInsertPoint(NotInf);
+ APFloat Smallest = APFloat::getSmallestNormalized(
+ getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
+ Value *IsNormal =
+ Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
+ "isnormal");
+ Value *NormalResult =
+ Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
+ EmitScalarExpr(E->getArg(3)));
+ Builder.CreateBr(End);
+ Result->addIncoming(NormalResult, NotInf);
+
+ // return Result
+ Builder.SetInsertPoint(End);
+ return RValue::get(Result);
+ }
+
+ case Builtin::BIalloca:
+ case Builtin::BI_alloca:
+ case Builtin::BI__builtin_alloca: {
+ Value *Size = EmitScalarExpr(E->getArg(0));
+ return RValue::get(Builder.CreateAlloca(Builder.getInt8Ty(), Size));
+ }
+ case Builtin::BIbzero:
+ case Builtin::BI__builtin_bzero: {
+ Address Dest = EmitPointerWithAlignment(E->getArg(0));
+ Value *SizeVal = EmitScalarExpr(E->getArg(1));
+ EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
+ E->getArg(0)->getExprLoc(), FD, 0);
+ Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
+ return RValue::get(Dest.getPointer());
+ }
+ case Builtin::BImemcpy:
+ case Builtin::BI__builtin_memcpy: {
+ Address Dest = EmitPointerWithAlignment(E->getArg(0));
+ Address Src = EmitPointerWithAlignment(E->getArg(1));
+ Value *SizeVal = EmitScalarExpr(E->getArg(2));
+ EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
+ E->getArg(0)->getExprLoc(), FD, 0);
+ EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
+ E->getArg(1)->getExprLoc(), FD, 1);
+ Builder.CreateMemCpy(Dest, Src, SizeVal, false);
+ return RValue::get(Dest.getPointer());
+ }
+
+ case Builtin::BI__builtin___memcpy_chk: {
+ // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
+ llvm::APSInt Size, DstSize;
+ if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
+ !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
+ break;
+ if (Size.ugt(DstSize))
+ break;
+ Address Dest = EmitPointerWithAlignment(E->getArg(0));
+ Address Src = EmitPointerWithAlignment(E->getArg(1));
+ Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
+ Builder.CreateMemCpy(Dest, Src, SizeVal, false);
+ return RValue::get(Dest.getPointer());
+ }
+
+ case Builtin::BI__builtin_objc_memmove_collectable: {
+ Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
+ Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
+ Value *SizeVal = EmitScalarExpr(E->getArg(2));
+ CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
+ DestAddr, SrcAddr, SizeVal);
+ return RValue::get(DestAddr.getPointer());
+ }
+
+ case Builtin::BI__builtin___memmove_chk: {
+ // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
+ llvm::APSInt Size, DstSize;
+ if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
+ !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
+ break;
+ if (Size.ugt(DstSize))
+ break;
+ Address Dest = EmitPointerWithAlignment(E->getArg(0));
+ Address Src = EmitPointerWithAlignment(E->getArg(1));
+ Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
+ Builder.CreateMemMove(Dest, Src, SizeVal, false);
+ return RValue::get(Dest.getPointer());
+ }
+
+ case Builtin::BImemmove:
+ case Builtin::BI__builtin_memmove: {
+ Address Dest = EmitPointerWithAlignment(E->getArg(0));
+ Address Src = EmitPointerWithAlignment(E->getArg(1));
+ Value *SizeVal = EmitScalarExpr(E->getArg(2));
+ EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
+ E->getArg(0)->getExprLoc(), FD, 0);
+ EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
+ E->getArg(1)->getExprLoc(), FD, 1);
+ Builder.CreateMemMove(Dest, Src, SizeVal, false);
+ return RValue::get(Dest.getPointer());
+ }
+ case Builtin::BImemset:
+ case Builtin::BI__builtin_memset: {
+ Address Dest = EmitPointerWithAlignment(E->getArg(0));
+ Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
+ Builder.getInt8Ty());
+ Value *SizeVal = EmitScalarExpr(E->getArg(2));
+ EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
+ E->getArg(0)->getExprLoc(), FD, 0);
+ Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
+ return RValue::get(Dest.getPointer());
+ }
+ case Builtin::BI__builtin___memset_chk: {
+ // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
+ llvm::APSInt Size, DstSize;
+ if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
+ !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
+ break;
+ if (Size.ugt(DstSize))
+ break;
+ Address Dest = EmitPointerWithAlignment(E->getArg(0));
+ Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
+ Builder.getInt8Ty());
+ Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
+ Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
+ return RValue::get(Dest.getPointer());
+ }
+ case Builtin::BI__builtin_dwarf_cfa: {
+ // The offset in bytes from the first argument to the CFA.
+ //
+ // Why on earth is this in the frontend? Is there any reason at
+ // all that the backend can't reasonably determine this while
+ // lowering llvm.eh.dwarf.cfa()?
+ //
+ // TODO: If there's a satisfactory reason, add a target hook for
+ // this instead of hard-coding 0, which is correct for most targets.
+ int32_t Offset = 0;
+
+ Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
+ return RValue::get(Builder.CreateCall(F,
+ llvm::ConstantInt::get(Int32Ty, Offset)));
+ }
+ case Builtin::BI__builtin_return_address: {
+ Value *Depth =
+ CGM.EmitConstantExpr(E->getArg(0), getContext().UnsignedIntTy, this);
+ Value *F = CGM.getIntrinsic(Intrinsic::returnaddress);
+ return RValue::get(Builder.CreateCall(F, Depth));
+ }
+ case Builtin::BI__builtin_frame_address: {
+ Value *Depth =
+ CGM.EmitConstantExpr(E->getArg(0), getContext().UnsignedIntTy, this);
+ Value *F = CGM.getIntrinsic(Intrinsic::frameaddress);
+ return RValue::get(Builder.CreateCall(F, Depth));
+ }
+ case Builtin::BI__builtin_extract_return_addr: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_frob_return_addr: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_dwarf_sp_column: {
+ llvm::IntegerType *Ty
+ = cast<llvm::IntegerType>(ConvertType(E->getType()));
+ int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
+ if (Column == -1) {
+ CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
+ return RValue::get(llvm::UndefValue::get(Ty));
+ }
+ return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
+ }
+ case Builtin::BI__builtin_init_dwarf_reg_size_table: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
+ CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
+ return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
+ }
+ case Builtin::BI__builtin_eh_return: {
+ Value *Int = EmitScalarExpr(E->getArg(0));
+ Value *Ptr = EmitScalarExpr(E->getArg(1));
+
+ llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
+ assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
+ "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
+ Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32
+ ? Intrinsic::eh_return_i32
+ : Intrinsic::eh_return_i64);
+ Builder.CreateCall(F, {Int, Ptr});
+ Builder.CreateUnreachable();
+
+ // We do need to preserve an insertion point.
+ EmitBlock(createBasicBlock("builtin_eh_return.cont"));
+
+ return RValue::get(nullptr);
+ }
+ case Builtin::BI__builtin_unwind_init: {
+ Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
+ return RValue::get(Builder.CreateCall(F));
+ }
+ case Builtin::BI__builtin_extend_pointer: {
+ // Extends a pointer to the size of an _Unwind_Word, which is
+ // uint64_t on all platforms. Generally this gets poked into a
+ // register and eventually used as an address, so if the
+ // addressing registers are wider than pointers and the platform
+ // doesn't implicitly ignore high-order bits when doing
+ // addressing, we need to make sure we zext / sext based on
+ // the platform's expectations.
+ //
+ // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
+
+ // Cast the pointer to intptr_t.
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
+
+ // If that's 64 bits, we're done.
+ if (IntPtrTy->getBitWidth() == 64)
+ return RValue::get(Result);
+
+ // Otherwise, ask the codegen data what to do.
+ if (getTargetHooks().extendPointerWithSExt())
+ return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
+ else
+ return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
+ }
+ case Builtin::BI__builtin_setjmp: {
+ // Buffer is a void**.
+ Address Buf = EmitPointerWithAlignment(E->getArg(0));
+
+ // Store the frame pointer to the setjmp buffer.
+ Value *FrameAddr =
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress),
+ ConstantInt::get(Int32Ty, 0));
+ Builder.CreateStore(FrameAddr, Buf);
+
+ // Store the stack pointer to the setjmp buffer.
+ Value *StackAddr =
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
+ Address StackSaveSlot =
+ Builder.CreateConstInBoundsGEP(Buf, 2, getPointerSize());
+ Builder.CreateStore(StackAddr, StackSaveSlot);
+
+ // Call LLVM's EH setjmp, which is lightweight.
+ Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
+ Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
+ return RValue::get(Builder.CreateCall(F, Buf.getPointer()));
+ }
+ case Builtin::BI__builtin_longjmp: {
+ Value *Buf = EmitScalarExpr(E->getArg(0));
+ Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
+
+ // Call LLVM's EH longjmp, which is lightweight.
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
+
+ // longjmp doesn't return; mark this as unreachable.
+ Builder.CreateUnreachable();
+
+ // We do need to preserve an insertion point.
+ EmitBlock(createBasicBlock("longjmp.cont"));
+
+ return RValue::get(nullptr);
+ }
+ case Builtin::BI__sync_fetch_and_add:
+ case Builtin::BI__sync_fetch_and_sub:
+ case Builtin::BI__sync_fetch_and_or:
+ case Builtin::BI__sync_fetch_and_and:
+ case Builtin::BI__sync_fetch_and_xor:
+ case Builtin::BI__sync_fetch_and_nand:
+ case Builtin::BI__sync_add_and_fetch:
+ case Builtin::BI__sync_sub_and_fetch:
+ case Builtin::BI__sync_and_and_fetch:
+ case Builtin::BI__sync_or_and_fetch:
+ case Builtin::BI__sync_xor_and_fetch:
+ case Builtin::BI__sync_nand_and_fetch:
+ case Builtin::BI__sync_val_compare_and_swap:
+ case Builtin::BI__sync_bool_compare_and_swap:
+ case Builtin::BI__sync_lock_test_and_set:
+ case Builtin::BI__sync_lock_release:
+ case Builtin::BI__sync_swap:
+ llvm_unreachable("Shouldn't make it through sema");
+ case Builtin::BI__sync_fetch_and_add_1:
+ case Builtin::BI__sync_fetch_and_add_2:
+ case Builtin::BI__sync_fetch_and_add_4:
+ case Builtin::BI__sync_fetch_and_add_8:
+ case Builtin::BI__sync_fetch_and_add_16:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
+ case Builtin::BI__sync_fetch_and_sub_1:
+ case Builtin::BI__sync_fetch_and_sub_2:
+ case Builtin::BI__sync_fetch_and_sub_4:
+ case Builtin::BI__sync_fetch_and_sub_8:
+ case Builtin::BI__sync_fetch_and_sub_16:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
+ case Builtin::BI__sync_fetch_and_or_1:
+ case Builtin::BI__sync_fetch_and_or_2:
+ case Builtin::BI__sync_fetch_and_or_4:
+ case Builtin::BI__sync_fetch_and_or_8:
+ case Builtin::BI__sync_fetch_and_or_16:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
+ case Builtin::BI__sync_fetch_and_and_1:
+ case Builtin::BI__sync_fetch_and_and_2:
+ case Builtin::BI__sync_fetch_and_and_4:
+ case Builtin::BI__sync_fetch_and_and_8:
+ case Builtin::BI__sync_fetch_and_and_16:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
+ case Builtin::BI__sync_fetch_and_xor_1:
+ case Builtin::BI__sync_fetch_and_xor_2:
+ case Builtin::BI__sync_fetch_and_xor_4:
+ case Builtin::BI__sync_fetch_and_xor_8:
+ case Builtin::BI__sync_fetch_and_xor_16:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
+ case Builtin::BI__sync_fetch_and_nand_1:
+ case Builtin::BI__sync_fetch_and_nand_2:
+ case Builtin::BI__sync_fetch_and_nand_4:
+ case Builtin::BI__sync_fetch_and_nand_8:
+ case Builtin::BI__sync_fetch_and_nand_16:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
+
+ // Clang extensions: not overloaded yet.
+ case Builtin::BI__sync_fetch_and_min:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
+ case Builtin::BI__sync_fetch_and_max:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
+ case Builtin::BI__sync_fetch_and_umin:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
+ case Builtin::BI__sync_fetch_and_umax:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
+
+ case Builtin::BI__sync_add_and_fetch_1:
+ case Builtin::BI__sync_add_and_fetch_2:
+ case Builtin::BI__sync_add_and_fetch_4:
+ case Builtin::BI__sync_add_and_fetch_8:
+ case Builtin::BI__sync_add_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
+ llvm::Instruction::Add);
+ case Builtin::BI__sync_sub_and_fetch_1:
+ case Builtin::BI__sync_sub_and_fetch_2:
+ case Builtin::BI__sync_sub_and_fetch_4:
+ case Builtin::BI__sync_sub_and_fetch_8:
+ case Builtin::BI__sync_sub_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
+ llvm::Instruction::Sub);
+ case Builtin::BI__sync_and_and_fetch_1:
+ case Builtin::BI__sync_and_and_fetch_2:
+ case Builtin::BI__sync_and_and_fetch_4:
+ case Builtin::BI__sync_and_and_fetch_8:
+ case Builtin::BI__sync_and_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
+ llvm::Instruction::And);
+ case Builtin::BI__sync_or_and_fetch_1:
+ case Builtin::BI__sync_or_and_fetch_2:
+ case Builtin::BI__sync_or_and_fetch_4:
+ case Builtin::BI__sync_or_and_fetch_8:
+ case Builtin::BI__sync_or_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
+ llvm::Instruction::Or);
+ case Builtin::BI__sync_xor_and_fetch_1:
+ case Builtin::BI__sync_xor_and_fetch_2:
+ case Builtin::BI__sync_xor_and_fetch_4:
+ case Builtin::BI__sync_xor_and_fetch_8:
+ case Builtin::BI__sync_xor_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
+ llvm::Instruction::Xor);
+ case Builtin::BI__sync_nand_and_fetch_1:
+ case Builtin::BI__sync_nand_and_fetch_2:
+ case Builtin::BI__sync_nand_and_fetch_4:
+ case Builtin::BI__sync_nand_and_fetch_8:
+ case Builtin::BI__sync_nand_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
+ llvm::Instruction::And, true);
+
+ case Builtin::BI__sync_val_compare_and_swap_1:
+ case Builtin::BI__sync_val_compare_and_swap_2:
+ case Builtin::BI__sync_val_compare_and_swap_4:
+ case Builtin::BI__sync_val_compare_and_swap_8:
+ case Builtin::BI__sync_val_compare_and_swap_16:
+ return RValue::get(MakeAtomicCmpXchgValue(*this, E, false));
+
+ case Builtin::BI__sync_bool_compare_and_swap_1:
+ case Builtin::BI__sync_bool_compare_and_swap_2:
+ case Builtin::BI__sync_bool_compare_and_swap_4:
+ case Builtin::BI__sync_bool_compare_and_swap_8:
+ case Builtin::BI__sync_bool_compare_and_swap_16:
+ return RValue::get(MakeAtomicCmpXchgValue(*this, E, true));
+
+ case Builtin::BI__sync_swap_1:
+ case Builtin::BI__sync_swap_2:
+ case Builtin::BI__sync_swap_4:
+ case Builtin::BI__sync_swap_8:
+ case Builtin::BI__sync_swap_16:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
+
+ case Builtin::BI__sync_lock_test_and_set_1:
+ case Builtin::BI__sync_lock_test_and_set_2:
+ case Builtin::BI__sync_lock_test_and_set_4:
+ case Builtin::BI__sync_lock_test_and_set_8:
+ case Builtin::BI__sync_lock_test_and_set_16:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
+
+ case Builtin::BI__sync_lock_release_1:
+ case Builtin::BI__sync_lock_release_2:
+ case Builtin::BI__sync_lock_release_4:
+ case Builtin::BI__sync_lock_release_8:
+ case Builtin::BI__sync_lock_release_16: {
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ QualType ElTy = E->getArg(0)->getType()->getPointeeType();
+ CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
+ llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
+ StoreSize.getQuantity() * 8);
+ Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
+ llvm::StoreInst *Store =
+ Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr,
+ StoreSize);
+ Store->setAtomic(llvm::Release);
+ return RValue::get(nullptr);
+ }
+
+ case Builtin::BI__sync_synchronize: {
+ // We assume this is supposed to correspond to a C++0x-style
+ // sequentially-consistent fence (i.e. this is only usable for
+ // synchonization, not device I/O or anything like that). This intrinsic
+ // is really badly designed in the sense that in theory, there isn't
+ // any way to safely use it... but in practice, it mostly works
+ // to use it with non-atomic loads and stores to get acquire/release
+ // semantics.
+ Builder.CreateFence(llvm::SequentiallyConsistent);
+ return RValue::get(nullptr);
+ }
+
+ case Builtin::BI__builtin_nontemporal_load:
+ return RValue::get(EmitNontemporalLoad(*this, E));
+ case Builtin::BI__builtin_nontemporal_store:
+ return RValue::get(EmitNontemporalStore(*this, E));
+ case Builtin::BI__c11_atomic_is_lock_free:
+ case Builtin::BI__atomic_is_lock_free: {
+ // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
+ // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
+ // _Atomic(T) is always properly-aligned.
+ const char *LibCallName = "__atomic_is_lock_free";
+ CallArgList Args;
+ Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
+ getContext().getSizeType());
+ if (BuiltinID == Builtin::BI__atomic_is_lock_free)
+ Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
+ getContext().VoidPtrTy);
+ else
+ Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
+ getContext().VoidPtrTy);
+ const CGFunctionInfo &FuncInfo =
+ CGM.getTypes().arrangeFreeFunctionCall(E->getType(), Args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All);
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
+ llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
+ return EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
+ }
+
+ case Builtin::BI__atomic_test_and_set: {
+ // Look at the argument type to determine whether this is a volatile
+ // operation. The parameter type is always volatile.
+ QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
+ bool Volatile =
+ PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
+
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
+ Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
+ Value *NewVal = Builder.getInt8(1);
+ Value *Order = EmitScalarExpr(E->getArg(1));
+ if (isa<llvm::ConstantInt>(Order)) {
+ int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
+ AtomicRMWInst *Result = nullptr;
+ switch (ord) {
+ case 0: // memory_order_relaxed
+ default: // invalid order
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::Monotonic);
+ break;
+ case 1: // memory_order_consume
+ case 2: // memory_order_acquire
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::Acquire);
+ break;
+ case 3: // memory_order_release
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::Release);
+ break;
+ case 4: // memory_order_acq_rel
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::AcquireRelease);
+ break;
+ case 5: // memory_order_seq_cst
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::SequentiallyConsistent);
+ break;
+ }
+ Result->setVolatile(Volatile);
+ return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
+ }
+
+ llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
+
+ llvm::BasicBlock *BBs[5] = {
+ createBasicBlock("monotonic", CurFn),
+ createBasicBlock("acquire", CurFn),
+ createBasicBlock("release", CurFn),
+ createBasicBlock("acqrel", CurFn),
+ createBasicBlock("seqcst", CurFn)
+ };
+ llvm::AtomicOrdering Orders[5] = {
+ llvm::Monotonic, llvm::Acquire, llvm::Release,
+ llvm::AcquireRelease, llvm::SequentiallyConsistent
+ };
+
+ Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
+ llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
+
+ Builder.SetInsertPoint(ContBB);
+ PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
+
+ for (unsigned i = 0; i < 5; ++i) {
+ Builder.SetInsertPoint(BBs[i]);
+ AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal, Orders[i]);
+ RMW->setVolatile(Volatile);
+ Result->addIncoming(RMW, BBs[i]);
+ Builder.CreateBr(ContBB);
+ }
+
+ SI->addCase(Builder.getInt32(0), BBs[0]);
+ SI->addCase(Builder.getInt32(1), BBs[1]);
+ SI->addCase(Builder.getInt32(2), BBs[1]);
+ SI->addCase(Builder.getInt32(3), BBs[2]);
+ SI->addCase(Builder.getInt32(4), BBs[3]);
+ SI->addCase(Builder.getInt32(5), BBs[4]);
+
+ Builder.SetInsertPoint(ContBB);
+ return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
+ }
+
+ case Builtin::BI__atomic_clear: {
+ QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
+ bool Volatile =
+ PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
+
+ Address Ptr = EmitPointerWithAlignment(E->getArg(0));
+ unsigned AddrSpace = Ptr.getPointer()->getType()->getPointerAddressSpace();
+ Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
+ Value *NewVal = Builder.getInt8(0);
+ Value *Order = EmitScalarExpr(E->getArg(1));
+ if (isa<llvm::ConstantInt>(Order)) {
+ int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
+ StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
+ switch (ord) {
+ case 0: // memory_order_relaxed
+ default: // invalid order
+ Store->setOrdering(llvm::Monotonic);
+ break;
+ case 3: // memory_order_release
+ Store->setOrdering(llvm::Release);
+ break;
+ case 5: // memory_order_seq_cst
+ Store->setOrdering(llvm::SequentiallyConsistent);
+ break;
+ }
+ return RValue::get(nullptr);
+ }
+
+ llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
+
+ llvm::BasicBlock *BBs[3] = {
+ createBasicBlock("monotonic", CurFn),
+ createBasicBlock("release", CurFn),
+ createBasicBlock("seqcst", CurFn)
+ };
+ llvm::AtomicOrdering Orders[3] = {
+ llvm::Monotonic, llvm::Release, llvm::SequentiallyConsistent
+ };
+
+ Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
+ llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
+
+ for (unsigned i = 0; i < 3; ++i) {
+ Builder.SetInsertPoint(BBs[i]);
+ StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
+ Store->setOrdering(Orders[i]);
+ Builder.CreateBr(ContBB);
+ }
+
+ SI->addCase(Builder.getInt32(0), BBs[0]);
+ SI->addCase(Builder.getInt32(3), BBs[1]);
+ SI->addCase(Builder.getInt32(5), BBs[2]);
+
+ Builder.SetInsertPoint(ContBB);
+ return RValue::get(nullptr);
+ }
+
+ case Builtin::BI__atomic_thread_fence:
+ case Builtin::BI__atomic_signal_fence:
+ case Builtin::BI__c11_atomic_thread_fence:
+ case Builtin::BI__c11_atomic_signal_fence: {
+ llvm::SynchronizationScope Scope;
+ if (BuiltinID == Builtin::BI__atomic_signal_fence ||
+ BuiltinID == Builtin::BI__c11_atomic_signal_fence)
+ Scope = llvm::SingleThread;
+ else
+ Scope = llvm::CrossThread;
+ Value *Order = EmitScalarExpr(E->getArg(0));
+ if (isa<llvm::ConstantInt>(Order)) {
+ int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
+ switch (ord) {
+ case 0: // memory_order_relaxed
+ default: // invalid order
+ break;
+ case 1: // memory_order_consume
+ case 2: // memory_order_acquire
+ Builder.CreateFence(llvm::Acquire, Scope);
+ break;
+ case 3: // memory_order_release
+ Builder.CreateFence(llvm::Release, Scope);
+ break;
+ case 4: // memory_order_acq_rel
+ Builder.CreateFence(llvm::AcquireRelease, Scope);
+ break;
+ case 5: // memory_order_seq_cst
+ Builder.CreateFence(llvm::SequentiallyConsistent, Scope);
+ break;
+ }
+ return RValue::get(nullptr);
+ }
+
+ llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
+ AcquireBB = createBasicBlock("acquire", CurFn);
+ ReleaseBB = createBasicBlock("release", CurFn);
+ AcqRelBB = createBasicBlock("acqrel", CurFn);
+ SeqCstBB = createBasicBlock("seqcst", CurFn);
+ llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
+
+ Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
+ llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
+
+ Builder.SetInsertPoint(AcquireBB);
+ Builder.CreateFence(llvm::Acquire, Scope);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(1), AcquireBB);
+ SI->addCase(Builder.getInt32(2), AcquireBB);
+
+ Builder.SetInsertPoint(ReleaseBB);
+ Builder.CreateFence(llvm::Release, Scope);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(3), ReleaseBB);
+
+ Builder.SetInsertPoint(AcqRelBB);
+ Builder.CreateFence(llvm::AcquireRelease, Scope);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(4), AcqRelBB);
+
+ Builder.SetInsertPoint(SeqCstBB);
+ Builder.CreateFence(llvm::SequentiallyConsistent, Scope);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(5), SeqCstBB);
+
+ Builder.SetInsertPoint(ContBB);
+ return RValue::get(nullptr);
+ }
+
+ // Library functions with special handling.
+ case Builtin::BIsqrt:
+ case Builtin::BIsqrtf:
+ case Builtin::BIsqrtl: {
+ // Transform a call to sqrt* into a @llvm.sqrt.* intrinsic call, but only
+ // in finite- or unsafe-math mode (the intrinsic has different semantics
+ // for handling negative numbers compared to the library function, so
+ // -fmath-errno=0 is not enough).
+ if (!FD->hasAttr<ConstAttr>())
+ break;
+ if (!(CGM.getCodeGenOpts().UnsafeFPMath ||
+ CGM.getCodeGenOpts().NoNaNsFPMath))
+ break;
+ Value *Arg0 = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ArgType = Arg0->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::sqrt, ArgType);
+ return RValue::get(Builder.CreateCall(F, Arg0));
+ }
+
+ case Builtin::BI__builtin_pow:
+ case Builtin::BI__builtin_powf:
+ case Builtin::BI__builtin_powl:
+ case Builtin::BIpow:
+ case Builtin::BIpowf:
+ case Builtin::BIpowl: {
+ // Transform a call to pow* into a @llvm.pow.* intrinsic call.
+ if (!FD->hasAttr<ConstAttr>())
+ break;
+ Value *Base = EmitScalarExpr(E->getArg(0));
+ Value *Exponent = EmitScalarExpr(E->getArg(1));
+ llvm::Type *ArgType = Base->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::pow, ArgType);
+ return RValue::get(Builder.CreateCall(F, {Base, Exponent}));
+ }
+
+ case Builtin::BIfma:
+ case Builtin::BIfmaf:
+ case Builtin::BIfmal:
+ case Builtin::BI__builtin_fma:
+ case Builtin::BI__builtin_fmaf:
+ case Builtin::BI__builtin_fmal: {
+ // Rewrite fma to intrinsic.
+ Value *FirstArg = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ArgType = FirstArg->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::fma, ArgType);
+ return RValue::get(
+ Builder.CreateCall(F, {FirstArg, EmitScalarExpr(E->getArg(1)),
+ EmitScalarExpr(E->getArg(2))}));
+ }
+
+ case Builtin::BI__builtin_signbit:
+ case Builtin::BI__builtin_signbitf:
+ case Builtin::BI__builtin_signbitl: {
+ return RValue::get(
+ Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
+ ConvertType(E->getType())));
+ }
+ case Builtin::BI__builtin_annotation: {
+ llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
+ AnnVal->getType());
+
+ // Get the annotation string, go through casts. Sema requires this to be a
+ // non-wide string literal, potentially casted, so the cast<> is safe.
+ const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
+ StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
+ return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc()));
+ }
+ case Builtin::BI__builtin_addcb:
+ case Builtin::BI__builtin_addcs:
+ case Builtin::BI__builtin_addc:
+ case Builtin::BI__builtin_addcl:
+ case Builtin::BI__builtin_addcll:
+ case Builtin::BI__builtin_subcb:
+ case Builtin::BI__builtin_subcs:
+ case Builtin::BI__builtin_subc:
+ case Builtin::BI__builtin_subcl:
+ case Builtin::BI__builtin_subcll: {
+
+ // We translate all of these builtins from expressions of the form:
+ // int x = ..., y = ..., carryin = ..., carryout, result;
+ // result = __builtin_addc(x, y, carryin, &carryout);
+ //
+ // to LLVM IR of the form:
+ //
+ // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
+ // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
+ // %carry1 = extractvalue {i32, i1} %tmp1, 1
+ // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
+ // i32 %carryin)
+ // %result = extractvalue {i32, i1} %tmp2, 0
+ // %carry2 = extractvalue {i32, i1} %tmp2, 1
+ // %tmp3 = or i1 %carry1, %carry2
+ // %tmp4 = zext i1 %tmp3 to i32
+ // store i32 %tmp4, i32* %carryout
+
+ // Scalarize our inputs.
+ llvm::Value *X = EmitScalarExpr(E->getArg(0));
+ llvm::Value *Y = EmitScalarExpr(E->getArg(1));
+ llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
+ Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
+
+ // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
+ llvm::Intrinsic::ID IntrinsicId;
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unknown multiprecision builtin id.");
+ case Builtin::BI__builtin_addcb:
+ case Builtin::BI__builtin_addcs:
+ case Builtin::BI__builtin_addc:
+ case Builtin::BI__builtin_addcl:
+ case Builtin::BI__builtin_addcll:
+ IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
+ break;
+ case Builtin::BI__builtin_subcb:
+ case Builtin::BI__builtin_subcs:
+ case Builtin::BI__builtin_subc:
+ case Builtin::BI__builtin_subcl:
+ case Builtin::BI__builtin_subcll:
+ IntrinsicId = llvm::Intrinsic::usub_with_overflow;
+ break;
+ }
+
+ // Construct our resulting LLVM IR expression.
+ llvm::Value *Carry1;
+ llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
+ X, Y, Carry1);
+ llvm::Value *Carry2;
+ llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
+ Sum1, Carryin, Carry2);
+ llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
+ X->getType());
+ Builder.CreateStore(CarryOut, CarryOutPtr);
+ return RValue::get(Sum2);
+ }
+
+ case Builtin::BI__builtin_add_overflow:
+ case Builtin::BI__builtin_sub_overflow:
+ case Builtin::BI__builtin_mul_overflow: {
+ const clang::Expr *LeftArg = E->getArg(0);
+ const clang::Expr *RightArg = E->getArg(1);
+ const clang::Expr *ResultArg = E->getArg(2);
+
+ clang::QualType ResultQTy =
+ ResultArg->getType()->castAs<PointerType>()->getPointeeType();
+
+ WidthAndSignedness LeftInfo =
+ getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
+ WidthAndSignedness RightInfo =
+ getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
+ WidthAndSignedness ResultInfo =
+ getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
+ WidthAndSignedness EncompassingInfo =
+ EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
+
+ llvm::Type *EncompassingLLVMTy =
+ llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
+
+ llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
+
+ llvm::Intrinsic::ID IntrinsicId;
+ switch (BuiltinID) {
+ default:
+ llvm_unreachable("Unknown overflow builtin id.");
+ case Builtin::BI__builtin_add_overflow:
+ IntrinsicId = EncompassingInfo.Signed
+ ? llvm::Intrinsic::sadd_with_overflow
+ : llvm::Intrinsic::uadd_with_overflow;
+ break;
+ case Builtin::BI__builtin_sub_overflow:
+ IntrinsicId = EncompassingInfo.Signed
+ ? llvm::Intrinsic::ssub_with_overflow
+ : llvm::Intrinsic::usub_with_overflow;
+ break;
+ case Builtin::BI__builtin_mul_overflow:
+ IntrinsicId = EncompassingInfo.Signed
+ ? llvm::Intrinsic::smul_with_overflow
+ : llvm::Intrinsic::umul_with_overflow;
+ break;
+ }
+
+ llvm::Value *Left = EmitScalarExpr(LeftArg);
+ llvm::Value *Right = EmitScalarExpr(RightArg);
+ Address ResultPtr = EmitPointerWithAlignment(ResultArg);
+
+ // Extend each operand to the encompassing type.
+ Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
+ Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
+
+ // Perform the operation on the extended values.
+ llvm::Value *Overflow, *Result;
+ Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
+
+ if (EncompassingInfo.Width > ResultInfo.Width) {
+ // The encompassing type is wider than the result type, so we need to
+ // truncate it.
+ llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
+
+ // To see if the truncation caused an overflow, we will extend
+ // the result and then compare it to the original result.
+ llvm::Value *ResultTruncExt = Builder.CreateIntCast(
+ ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
+ llvm::Value *TruncationOverflow =
+ Builder.CreateICmpNE(Result, ResultTruncExt);
+
+ Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
+ Result = ResultTrunc;
+ }
+
+ // Finally, store the result using the pointer.
+ bool isVolatile =
+ ResultArg->getType()->getPointeeType().isVolatileQualified();
+ Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
+
+ return RValue::get(Overflow);
+ }
+
+ case Builtin::BI__builtin_uadd_overflow:
+ case Builtin::BI__builtin_uaddl_overflow:
+ case Builtin::BI__builtin_uaddll_overflow:
+ case Builtin::BI__builtin_usub_overflow:
+ case Builtin::BI__builtin_usubl_overflow:
+ case Builtin::BI__builtin_usubll_overflow:
+ case Builtin::BI__builtin_umul_overflow:
+ case Builtin::BI__builtin_umull_overflow:
+ case Builtin::BI__builtin_umulll_overflow:
+ case Builtin::BI__builtin_sadd_overflow:
+ case Builtin::BI__builtin_saddl_overflow:
+ case Builtin::BI__builtin_saddll_overflow:
+ case Builtin::BI__builtin_ssub_overflow:
+ case Builtin::BI__builtin_ssubl_overflow:
+ case Builtin::BI__builtin_ssubll_overflow:
+ case Builtin::BI__builtin_smul_overflow:
+ case Builtin::BI__builtin_smull_overflow:
+ case Builtin::BI__builtin_smulll_overflow: {
+
+ // We translate all of these builtins directly to the relevant llvm IR node.
+
+ // Scalarize our inputs.
+ llvm::Value *X = EmitScalarExpr(E->getArg(0));
+ llvm::Value *Y = EmitScalarExpr(E->getArg(1));
+ Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
+
+ // Decide which of the overflow intrinsics we are lowering to:
+ llvm::Intrinsic::ID IntrinsicId;
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unknown overflow builtin id.");
+ case Builtin::BI__builtin_uadd_overflow:
+ case Builtin::BI__builtin_uaddl_overflow:
+ case Builtin::BI__builtin_uaddll_overflow:
+ IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
+ break;
+ case Builtin::BI__builtin_usub_overflow:
+ case Builtin::BI__builtin_usubl_overflow:
+ case Builtin::BI__builtin_usubll_overflow:
+ IntrinsicId = llvm::Intrinsic::usub_with_overflow;
+ break;
+ case Builtin::BI__builtin_umul_overflow:
+ case Builtin::BI__builtin_umull_overflow:
+ case Builtin::BI__builtin_umulll_overflow:
+ IntrinsicId = llvm::Intrinsic::umul_with_overflow;
+ break;
+ case Builtin::BI__builtin_sadd_overflow:
+ case Builtin::BI__builtin_saddl_overflow:
+ case Builtin::BI__builtin_saddll_overflow:
+ IntrinsicId = llvm::Intrinsic::sadd_with_overflow;
+ break;
+ case Builtin::BI__builtin_ssub_overflow:
+ case Builtin::BI__builtin_ssubl_overflow:
+ case Builtin::BI__builtin_ssubll_overflow:
+ IntrinsicId = llvm::Intrinsic::ssub_with_overflow;
+ break;
+ case Builtin::BI__builtin_smul_overflow:
+ case Builtin::BI__builtin_smull_overflow:
+ case Builtin::BI__builtin_smulll_overflow:
+ IntrinsicId = llvm::Intrinsic::smul_with_overflow;
+ break;
+ }
+
+
+ llvm::Value *Carry;
+ llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
+ Builder.CreateStore(Sum, SumOutPtr);
+
+ return RValue::get(Carry);
+ }
+ case Builtin::BI__builtin_addressof:
+ return RValue::get(EmitLValue(E->getArg(0)).getPointer());
+ case Builtin::BI__builtin_operator_new:
+ return EmitBuiltinNewDeleteCall(FD->getType()->castAs<FunctionProtoType>(),
+ E->getArg(0), false);
+ case Builtin::BI__builtin_operator_delete:
+ return EmitBuiltinNewDeleteCall(FD->getType()->castAs<FunctionProtoType>(),
+ E->getArg(0), true);
+ case Builtin::BI__noop:
+ // __noop always evaluates to an integer literal zero.
+ return RValue::get(ConstantInt::get(IntTy, 0));
+ case Builtin::BI__builtin_call_with_static_chain: {
+ const CallExpr *Call = cast<CallExpr>(E->getArg(0));
+ const Expr *Chain = E->getArg(1);
+ return EmitCall(Call->getCallee()->getType(),
+ EmitScalarExpr(Call->getCallee()), Call, ReturnValue,
+ Call->getCalleeDecl(), EmitScalarExpr(Chain));
+ }
+ case Builtin::BI_InterlockedExchange:
+ case Builtin::BI_InterlockedExchangePointer:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
+ case Builtin::BI_InterlockedCompareExchangePointer: {
+ llvm::Type *RTy;
+ llvm::IntegerType *IntType =
+ IntegerType::get(getLLVMContext(),
+ getContext().getTypeSize(E->getType()));
+ llvm::Type *IntPtrType = IntType->getPointerTo();
+
+ llvm::Value *Destination =
+ Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType);
+
+ llvm::Value *Exchange = EmitScalarExpr(E->getArg(1));
+ RTy = Exchange->getType();
+ Exchange = Builder.CreatePtrToInt(Exchange, IntType);
+
+ llvm::Value *Comparand =
+ Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType);
+
+ auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
+ SequentiallyConsistent,
+ SequentiallyConsistent);
+ Result->setVolatile(true);
+
+ return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result,
+ 0),
+ RTy));
+ }
+ case Builtin::BI_InterlockedCompareExchange: {
+ AtomicCmpXchgInst *CXI = Builder.CreateAtomicCmpXchg(
+ EmitScalarExpr(E->getArg(0)),
+ EmitScalarExpr(E->getArg(2)),
+ EmitScalarExpr(E->getArg(1)),
+ SequentiallyConsistent,
+ SequentiallyConsistent);
+ CXI->setVolatile(true);
+ return RValue::get(Builder.CreateExtractValue(CXI, 0));
+ }
+ case Builtin::BI_InterlockedIncrement: {
+ AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
+ AtomicRMWInst::Add,
+ EmitScalarExpr(E->getArg(0)),
+ ConstantInt::get(Int32Ty, 1),
+ llvm::SequentiallyConsistent);
+ RMWI->setVolatile(true);
+ return RValue::get(Builder.CreateAdd(RMWI, ConstantInt::get(Int32Ty, 1)));
+ }
+ case Builtin::BI_InterlockedDecrement: {
+ AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
+ AtomicRMWInst::Sub,
+ EmitScalarExpr(E->getArg(0)),
+ ConstantInt::get(Int32Ty, 1),
+ llvm::SequentiallyConsistent);
+ RMWI->setVolatile(true);
+ return RValue::get(Builder.CreateSub(RMWI, ConstantInt::get(Int32Ty, 1)));
+ }
+ case Builtin::BI_InterlockedExchangeAdd: {
+ AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
+ AtomicRMWInst::Add,
+ EmitScalarExpr(E->getArg(0)),
+ EmitScalarExpr(E->getArg(1)),
+ llvm::SequentiallyConsistent);
+ RMWI->setVolatile(true);
+ return RValue::get(RMWI);
+ }
+ case Builtin::BI__readfsdword: {
+ Value *IntToPtr =
+ Builder.CreateIntToPtr(EmitScalarExpr(E->getArg(0)),
+ llvm::PointerType::get(CGM.Int32Ty, 257));
+ LoadInst *Load =
+ Builder.CreateAlignedLoad(IntToPtr, /*Align=*/4, /*isVolatile=*/true);
+ return RValue::get(Load);
+ }
+
+ case Builtin::BI__exception_code:
+ case Builtin::BI_exception_code:
+ return RValue::get(EmitSEHExceptionCode());
+ case Builtin::BI__exception_info:
+ case Builtin::BI_exception_info:
+ return RValue::get(EmitSEHExceptionInfo());
+ case Builtin::BI__abnormal_termination:
+ case Builtin::BI_abnormal_termination:
+ return RValue::get(EmitSEHAbnormalTermination());
+ case Builtin::BI_setjmpex: {
+ if (getTarget().getTriple().isOSMSVCRT()) {
+ llvm::Type *ArgTypes[] = {Int8PtrTy, Int8PtrTy};
+ llvm::AttributeSet ReturnsTwiceAttr =
+ AttributeSet::get(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::ReturnsTwice);
+ llvm::Constant *SetJmpEx = CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/false),
+ "_setjmpex", ReturnsTwiceAttr);
+ llvm::Value *Buf = Builder.CreateBitOrPointerCast(
+ EmitScalarExpr(E->getArg(0)), Int8PtrTy);
+ llvm::Value *FrameAddr =
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress),
+ ConstantInt::get(Int32Ty, 0));
+ llvm::Value *Args[] = {Buf, FrameAddr};
+ llvm::CallSite CS = EmitRuntimeCallOrInvoke(SetJmpEx, Args);
+ CS.setAttributes(ReturnsTwiceAttr);
+ return RValue::get(CS.getInstruction());
+ }
+ break;
+ }
+ case Builtin::BI_setjmp: {
+ if (getTarget().getTriple().isOSMSVCRT()) {
+ llvm::AttributeSet ReturnsTwiceAttr =
+ AttributeSet::get(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::ReturnsTwice);
+ llvm::Value *Buf = Builder.CreateBitOrPointerCast(
+ EmitScalarExpr(E->getArg(0)), Int8PtrTy);
+ llvm::CallSite CS;
+ if (getTarget().getTriple().getArch() == llvm::Triple::x86) {
+ llvm::Type *ArgTypes[] = {Int8PtrTy, IntTy};
+ llvm::Constant *SetJmp3 = CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/true),
+ "_setjmp3", ReturnsTwiceAttr);
+ llvm::Value *Count = ConstantInt::get(IntTy, 0);
+ llvm::Value *Args[] = {Buf, Count};
+ CS = EmitRuntimeCallOrInvoke(SetJmp3, Args);
+ } else {
+ llvm::Type *ArgTypes[] = {Int8PtrTy, Int8PtrTy};
+ llvm::Constant *SetJmp = CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/false),
+ "_setjmp", ReturnsTwiceAttr);
+ llvm::Value *FrameAddr =
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress),
+ ConstantInt::get(Int32Ty, 0));
+ llvm::Value *Args[] = {Buf, FrameAddr};
+ CS = EmitRuntimeCallOrInvoke(SetJmp, Args);
+ }
+ CS.setAttributes(ReturnsTwiceAttr);
+ return RValue::get(CS.getInstruction());
+ }
+ break;
+ }
+
+ case Builtin::BI__GetExceptionInfo: {
+ if (llvm::GlobalVariable *GV =
+ CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
+ return RValue::get(llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy));
+ break;
+ }
+ }
+
+ // If this is an alias for a lib function (e.g. __builtin_sin), emit
+ // the call using the normal call path, but using the unmangled
+ // version of the function name.
+ if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
+ return emitLibraryCall(*this, FD, E,
+ CGM.getBuiltinLibFunction(FD, BuiltinID));
+
+ // If this is a predefined lib function (e.g. malloc), emit the call
+ // using exactly the normal call path.
+ if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
+ return emitLibraryCall(*this, FD, E, EmitScalarExpr(E->getCallee()));
+
+ // Check that a call to a target specific builtin has the correct target
+ // features.
+ // This is down here to avoid non-target specific builtins, however, if
+ // generic builtins start to require generic target features then we
+ // can move this up to the beginning of the function.
+ checkTargetFeatures(E, FD);
+
+ // See if we have a target specific intrinsic.
+ const char *Name = getContext().BuiltinInfo.getName(BuiltinID);
+ Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
+ if (const char *Prefix =
+ llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch())) {
+ IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name);
+ // NOTE we dont need to perform a compatibility flag check here since the
+ // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
+ // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
+ if (IntrinsicID == Intrinsic::not_intrinsic)
+ IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix, Name);
+ }
+
+ if (IntrinsicID != Intrinsic::not_intrinsic) {
+ SmallVector<Value*, 16> Args;
+
+ // Find out if any arguments are required to be integer constant
+ // expressions.
+ unsigned ICEArguments = 0;
+ ASTContext::GetBuiltinTypeError Error;
+ getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
+ assert(Error == ASTContext::GE_None && "Should not codegen an error");
+
+ Function *F = CGM.getIntrinsic(IntrinsicID);
+ llvm::FunctionType *FTy = F->getFunctionType();
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
+ Value *ArgValue;
+ // If this is a normal argument, just emit it as a scalar.
+ if ((ICEArguments & (1 << i)) == 0) {
+ ArgValue = EmitScalarExpr(E->getArg(i));
+ } else {
+ // If this is required to be a constant, constant fold it so that we
+ // know that the generated intrinsic gets a ConstantInt.
+ llvm::APSInt Result;
+ bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext());
+ assert(IsConst && "Constant arg isn't actually constant?");
+ (void)IsConst;
+ ArgValue = llvm::ConstantInt::get(getLLVMContext(), Result);
+ }
+
+ // If the intrinsic arg type is different from the builtin arg type
+ // we need to do a bit cast.
+ llvm::Type *PTy = FTy->getParamType(i);
+ if (PTy != ArgValue->getType()) {
+ assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
+ "Must be able to losslessly bit cast to param");
+ ArgValue = Builder.CreateBitCast(ArgValue, PTy);
+ }
+
+ Args.push_back(ArgValue);
+ }
+
+ Value *V = Builder.CreateCall(F, Args);
+ QualType BuiltinRetType = E->getType();
+
+ llvm::Type *RetTy = VoidTy;
+ if (!BuiltinRetType->isVoidType())
+ RetTy = ConvertType(BuiltinRetType);
+
+ if (RetTy != V->getType()) {
+ assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
+ "Must be able to losslessly bit cast result type");
+ V = Builder.CreateBitCast(V, RetTy);
+ }
+
+ return RValue::get(V);
+ }
+
+ // See if we have a target specific builtin that needs to be lowered.
+ if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E))
+ return RValue::get(V);
+
+ ErrorUnsupported(E, "builtin function");
+
+ // Unknown builtin, for now just dump it out and return undef.
+ return GetUndefRValue(E->getType());
+}
+
+static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF,
+ unsigned BuiltinID, const CallExpr *E,
+ llvm::Triple::ArchType Arch) {
+ switch (Arch) {
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ return CGF->EmitARMBuiltinExpr(BuiltinID, E);
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_be:
+ return CGF->EmitAArch64BuiltinExpr(BuiltinID, E);
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ return CGF->EmitX86BuiltinExpr(BuiltinID, E);
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
+ return CGF->EmitPPCBuiltinExpr(BuiltinID, E);
+ case llvm::Triple::r600:
+ case llvm::Triple::amdgcn:
+ return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
+ case llvm::Triple::systemz:
+ return CGF->EmitSystemZBuiltinExpr(BuiltinID, E);
+ case llvm::Triple::nvptx:
+ case llvm::Triple::nvptx64:
+ return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E);
+ case llvm::Triple::wasm32:
+ case llvm::Triple::wasm64:
+ return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
+ default:
+ return nullptr;
+ }
+}
+
+Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
+ assert(getContext().getAuxTargetInfo() && "Missing aux target info");
+ return EmitTargetArchBuiltinExpr(
+ this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E,
+ getContext().getAuxTargetInfo()->getTriple().getArch());
+ }
+
+ return EmitTargetArchBuiltinExpr(this, BuiltinID, E,
+ getTarget().getTriple().getArch());
+}
+
+static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
+ NeonTypeFlags TypeFlags,
+ bool V1Ty=false) {
+ int IsQuad = TypeFlags.isQuad();
+ switch (TypeFlags.getEltType()) {
+ case NeonTypeFlags::Int8:
+ case NeonTypeFlags::Poly8:
+ return llvm::VectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
+ case NeonTypeFlags::Int16:
+ case NeonTypeFlags::Poly16:
+ case NeonTypeFlags::Float16:
+ return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
+ case NeonTypeFlags::Int32:
+ return llvm::VectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
+ case NeonTypeFlags::Int64:
+ case NeonTypeFlags::Poly64:
+ return llvm::VectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
+ case NeonTypeFlags::Poly128:
+ // FIXME: i128 and f128 doesn't get fully support in Clang and llvm.
+ // There is a lot of i128 and f128 API missing.
+ // so we use v16i8 to represent poly128 and get pattern matched.
+ return llvm::VectorType::get(CGF->Int8Ty, 16);
+ case NeonTypeFlags::Float32:
+ return llvm::VectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
+ case NeonTypeFlags::Float64:
+ return llvm::VectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad));
+ }
+ llvm_unreachable("Unknown vector element type!");
+}
+
+static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF,
+ NeonTypeFlags IntTypeFlags) {
+ int IsQuad = IntTypeFlags.isQuad();
+ switch (IntTypeFlags.getEltType()) {
+ case NeonTypeFlags::Int32:
+ return llvm::VectorType::get(CGF->FloatTy, (2 << IsQuad));
+ case NeonTypeFlags::Int64:
+ return llvm::VectorType::get(CGF->DoubleTy, (1 << IsQuad));
+ default:
+ llvm_unreachable("Type can't be converted to floating-point!");
+ }
+}
+
+Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
+ unsigned nElts = cast<llvm::VectorType>(V->getType())->getNumElements();
+ Value* SV = llvm::ConstantVector::getSplat(nElts, C);
+ return Builder.CreateShuffleVector(V, V, SV, "lane");
+}
+
+Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
+ const char *name,
+ unsigned shift, bool rightshift) {
+ unsigned j = 0;
+ for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
+ ai != ae; ++ai, ++j)
+ if (shift > 0 && shift == j)
+ Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
+ else
+ Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
+
+ return Builder.CreateCall(F, Ops, name);
+}
+
+Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
+ bool neg) {
+ int SV = cast<ConstantInt>(V)->getSExtValue();
+ return ConstantInt::get(Ty, neg ? -SV : SV);
+}
+
+// \brief Right-shift a vector by a constant.
+Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
+ llvm::Type *Ty, bool usgn,
+ const char *name) {
+ llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
+
+ int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue();
+ int EltSize = VTy->getScalarSizeInBits();
+
+ Vec = Builder.CreateBitCast(Vec, Ty);
+
+ // lshr/ashr are undefined when the shift amount is equal to the vector
+ // element size.
+ if (ShiftAmt == EltSize) {
+ if (usgn) {
+ // Right-shifting an unsigned value by its size yields 0.
+ return llvm::ConstantAggregateZero::get(VTy);
+ } else {
+ // Right-shifting a signed value by its size is equivalent
+ // to a shift of size-1.
+ --ShiftAmt;
+ Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt);
+ }
+ }
+
+ Shift = EmitNeonShiftVector(Shift, Ty, false);
+ if (usgn)
+ return Builder.CreateLShr(Vec, Shift, name);
+ else
+ return Builder.CreateAShr(Vec, Shift, name);
+}
+
+enum {
+ AddRetType = (1 << 0),
+ Add1ArgType = (1 << 1),
+ Add2ArgTypes = (1 << 2),
+
+ VectorizeRetType = (1 << 3),
+ VectorizeArgTypes = (1 << 4),
+
+ InventFloatType = (1 << 5),
+ UnsignedAlts = (1 << 6),
+
+ Use64BitVectors = (1 << 7),
+ Use128BitVectors = (1 << 8),
+
+ Vectorize1ArgType = Add1ArgType | VectorizeArgTypes,
+ VectorRet = AddRetType | VectorizeRetType,
+ VectorRetGetArgs01 =
+ AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes,
+ FpCmpzModifiers =
+ AddRetType | VectorizeRetType | Add1ArgType | InventFloatType
+};
+
+namespace {
+struct NeonIntrinsicInfo {
+ const char *NameHint;
+ unsigned BuiltinID;
+ unsigned LLVMIntrinsic;
+ unsigned AltLLVMIntrinsic;
+ unsigned TypeModifier;
+
+ bool operator<(unsigned RHSBuiltinID) const {
+ return BuiltinID < RHSBuiltinID;
+ }
+ bool operator<(const NeonIntrinsicInfo &TE) const {
+ return BuiltinID < TE.BuiltinID;
+ }
+};
+} // end anonymous namespace
+
+#define NEONMAP0(NameBase) \
+ { #NameBase, NEON::BI__builtin_neon_ ## NameBase, 0, 0, 0 }
+
+#define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
+ { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
+ Intrinsic::LLVMIntrinsic, 0, TypeModifier }
+
+#define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \
+ { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
+ Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
+ TypeModifier }
+
+static const NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
+ NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
+ NEONMAP1(vabs_v, arm_neon_vabs, 0),
+ NEONMAP1(vabsq_v, arm_neon_vabs, 0),
+ NEONMAP0(vaddhn_v),
+ NEONMAP1(vaesdq_v, arm_neon_aesd, 0),
+ NEONMAP1(vaeseq_v, arm_neon_aese, 0),
+ NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0),
+ NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0),
+ NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType),
+ NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType),
+ NEONMAP1(vcage_v, arm_neon_vacge, 0),
+ NEONMAP1(vcageq_v, arm_neon_vacge, 0),
+ NEONMAP1(vcagt_v, arm_neon_vacgt, 0),
+ NEONMAP1(vcagtq_v, arm_neon_vacgt, 0),
+ NEONMAP1(vcale_v, arm_neon_vacge, 0),
+ NEONMAP1(vcaleq_v, arm_neon_vacge, 0),
+ NEONMAP1(vcalt_v, arm_neon_vacgt, 0),
+ NEONMAP1(vcaltq_v, arm_neon_vacgt, 0),
+ NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType),
+ NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType),
+ NEONMAP1(vclz_v, ctlz, Add1ArgType),
+ NEONMAP1(vclzq_v, ctlz, Add1ArgType),
+ NEONMAP1(vcnt_v, ctpop, Add1ArgType),
+ NEONMAP1(vcntq_v, ctpop, Add1ArgType),
+ NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0),
+ NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
+ NEONMAP0(vcvt_f32_v),
+ NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
+ NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
+ NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
+ NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
+ NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
+ NEONMAP0(vcvt_s32_v),
+ NEONMAP0(vcvt_s64_v),
+ NEONMAP0(vcvt_u32_v),
+ NEONMAP0(vcvt_u64_v),
+ NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
+ NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
+ NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
+ NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
+ NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
+ NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
+ NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
+ NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
+ NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
+ NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
+ NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
+ NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
+ NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
+ NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
+ NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
+ NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
+ NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
+ NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
+ NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
+ NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
+ NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
+ NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
+ NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
+ NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
+ NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
+ NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
+ NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
+ NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
+ NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
+ NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
+ NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
+ NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
+ NEONMAP0(vcvtq_f32_v),
+ NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
+ NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
+ NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
+ NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
+ NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
+ NEONMAP0(vcvtq_s32_v),
+ NEONMAP0(vcvtq_s64_v),
+ NEONMAP0(vcvtq_u32_v),
+ NEONMAP0(vcvtq_u64_v),
+ NEONMAP0(vext_v),
+ NEONMAP0(vextq_v),
+ NEONMAP0(vfma_v),
+ NEONMAP0(vfmaq_v),
+ NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
+ NEONMAP0(vld1_dup_v),
+ NEONMAP1(vld1_v, arm_neon_vld1, 0),
+ NEONMAP0(vld1q_dup_v),
+ NEONMAP1(vld1q_v, arm_neon_vld1, 0),
+ NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0),
+ NEONMAP1(vld2_v, arm_neon_vld2, 0),
+ NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0),
+ NEONMAP1(vld2q_v, arm_neon_vld2, 0),
+ NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0),
+ NEONMAP1(vld3_v, arm_neon_vld3, 0),
+ NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0),
+ NEONMAP1(vld3q_v, arm_neon_vld3, 0),
+ NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0),
+ NEONMAP1(vld4_v, arm_neon_vld4, 0),
+ NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
+ NEONMAP1(vld4q_v, arm_neon_vld4, 0),
+ NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
+ NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType),
+ NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType),
+ NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
+ NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType),
+ NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType),
+ NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
+ NEONMAP0(vmovl_v),
+ NEONMAP0(vmovn_v),
+ NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType),
+ NEONMAP0(vmull_v),
+ NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType),
+ NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
+ NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
+ NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType),
+ NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
+ NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
+ NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType),
+ NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts),
+ NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType),
+ NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType),
+ NEONMAP2(vqadd_v, arm_neon_vqaddu, arm_neon_vqadds, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vqaddq_v, arm_neon_vqaddu, arm_neon_vqadds, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vqdmlal_v, arm_neon_vqdmull, arm_neon_vqadds, 0),
+ NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, arm_neon_vqsubs, 0),
+ NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType),
+ NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType),
+ NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType),
+ NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts),
+ NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType),
+ NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType),
+ NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType),
+ NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType),
+ NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType),
+ NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
+ NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
+ NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
+ NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0),
+ NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0),
+ NEONMAP2(vqsub_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vqsubq_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts),
+ NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType),
+ NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
+ NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
+ NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType),
+ NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType),
+ NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
+ NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType),
+ NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType),
+ NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType),
+ NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType),
+ NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType),
+ NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType),
+ NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType),
+ NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType),
+ NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType),
+ NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType),
+ NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType),
+ NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType),
+ NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
+ NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
+ NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
+ NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
+ NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType),
+ NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType),
+ NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType),
+ NEONMAP1(vsha1su0q_v, arm_neon_sha1su0, 0),
+ NEONMAP1(vsha1su1q_v, arm_neon_sha1su1, 0),
+ NEONMAP1(vsha256h2q_v, arm_neon_sha256h2, 0),
+ NEONMAP1(vsha256hq_v, arm_neon_sha256h, 0),
+ NEONMAP1(vsha256su0q_v, arm_neon_sha256su0, 0),
+ NEONMAP1(vsha256su1q_v, arm_neon_sha256su1, 0),
+ NEONMAP0(vshl_n_v),
+ NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
+ NEONMAP0(vshll_n_v),
+ NEONMAP0(vshlq_n_v),
+ NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
+ NEONMAP0(vshr_n_v),
+ NEONMAP0(vshrn_n_v),
+ NEONMAP0(vshrq_n_v),
+ NEONMAP1(vst1_v, arm_neon_vst1, 0),
+ NEONMAP1(vst1q_v, arm_neon_vst1, 0),
+ NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0),
+ NEONMAP1(vst2_v, arm_neon_vst2, 0),
+ NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0),
+ NEONMAP1(vst2q_v, arm_neon_vst2, 0),
+ NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0),
+ NEONMAP1(vst3_v, arm_neon_vst3, 0),
+ NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0),
+ NEONMAP1(vst3q_v, arm_neon_vst3, 0),
+ NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0),
+ NEONMAP1(vst4_v, arm_neon_vst4, 0),
+ NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0),
+ NEONMAP1(vst4q_v, arm_neon_vst4, 0),
+ NEONMAP0(vsubhn_v),
+ NEONMAP0(vtrn_v),
+ NEONMAP0(vtrnq_v),
+ NEONMAP0(vtst_v),
+ NEONMAP0(vtstq_v),
+ NEONMAP0(vuzp_v),
+ NEONMAP0(vuzpq_v),
+ NEONMAP0(vzip_v),
+ NEONMAP0(vzipq_v)
+};
+
+static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
+ NEONMAP1(vabs_v, aarch64_neon_abs, 0),
+ NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
+ NEONMAP0(vaddhn_v),
+ NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0),
+ NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0),
+ NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0),
+ NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0),
+ NEONMAP1(vcage_v, aarch64_neon_facge, 0),
+ NEONMAP1(vcageq_v, aarch64_neon_facge, 0),
+ NEONMAP1(vcagt_v, aarch64_neon_facgt, 0),
+ NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0),
+ NEONMAP1(vcale_v, aarch64_neon_facge, 0),
+ NEONMAP1(vcaleq_v, aarch64_neon_facge, 0),
+ NEONMAP1(vcalt_v, aarch64_neon_facgt, 0),
+ NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0),
+ NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType),
+ NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType),
+ NEONMAP1(vclz_v, ctlz, Add1ArgType),
+ NEONMAP1(vclzq_v, ctlz, Add1ArgType),
+ NEONMAP1(vcnt_v, ctpop, Add1ArgType),
+ NEONMAP1(vcntq_v, ctpop, Add1ArgType),
+ NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0),
+ NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
+ NEONMAP0(vcvt_f32_v),
+ NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
+ NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
+ NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
+ NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
+ NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
+ NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
+ NEONMAP0(vcvtq_f32_v),
+ NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
+ NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
+ NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
+ NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
+ NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
+ NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
+ NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
+ NEONMAP0(vext_v),
+ NEONMAP0(vextq_v),
+ NEONMAP0(vfma_v),
+ NEONMAP0(vfmaq_v),
+ NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
+ NEONMAP0(vmovl_v),
+ NEONMAP0(vmovn_v),
+ NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType),
+ NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType),
+ NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType),
+ NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
+ NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
+ NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType),
+ NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType),
+ NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType),
+ NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0),
+ NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0),
+ NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType),
+ NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType),
+ NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType),
+ NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts),
+ NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
+ NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
+ NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
+ NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
+ NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType),
+ NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts),
+ NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts),
+ NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
+ NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0),
+ NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0),
+ NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
+ NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType),
+ NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
+ NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
+ NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType),
+ NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType),
+ NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
+ NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
+ NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
+ NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
+ NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType),
+ NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType),
+ NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType),
+ NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0),
+ NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0),
+ NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0),
+ NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0),
+ NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0),
+ NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0),
+ NEONMAP0(vshl_n_v),
+ NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
+ NEONMAP0(vshll_n_v),
+ NEONMAP0(vshlq_n_v),
+ NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
+ NEONMAP0(vshr_n_v),
+ NEONMAP0(vshrn_n_v),
+ NEONMAP0(vshrq_n_v),
+ NEONMAP0(vsubhn_v),
+ NEONMAP0(vtst_v),
+ NEONMAP0(vtstq_v),
+};
+
+static const NeonIntrinsicInfo AArch64SISDIntrinsicMap[] = {
+ NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType),
+ NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType),
+ NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType),
+ NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
+ NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
+ NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
+ NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
+ NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
+ NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
+ NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
+ NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
+ NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType),
+ NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
+ NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType),
+ NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
+ NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
+ NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
+ NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
+ NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
+ NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
+ NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
+ NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
+ NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
+ NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
+ NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
+ NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
+ NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0),
+ NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
+ NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
+ NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
+ NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
+ NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
+ NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
+ NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
+ NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
+ NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
+ NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
+ NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
+ NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
+ NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
+ NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
+ NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
+ NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
+ NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
+ NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
+ NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
+ NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
+ NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0),
+ NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType),
+ NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType),
+ NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
+ NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
+ NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
+ NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
+ NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
+ NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
+ NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
+ NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
+ NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
+ NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
+ NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType),
+ NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType),
+ NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType),
+ NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType),
+ NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType),
+ NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType),
+ NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType),
+ NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors),
+ NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0),
+ NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType),
+ NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType),
+ NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
+ NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
+ NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
+ NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
+ NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType),
+ NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
+ NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
+ NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType),
+ NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType),
+ NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType),
+ NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType),
+ NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType),
+ NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType),
+ NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType),
+ NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType),
+ NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType),
+ NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
+ NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
+ NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
+ NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
+ NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType),
+ NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
+ NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
+ NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType),
+ NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType),
+ NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType),
+ NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType),
+ NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType),
+ NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType),
+ NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType),
+ NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType),
+ NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType),
+ NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
+ NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
+ NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
+ NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
+ NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType),
+ NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
+ NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
+ NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType),
+ NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType),
+ NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType),
+ NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType),
+ NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType),
+ NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType),
+ NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType),
+ NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType),
+ NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType),
+ NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType),
+ NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType),
+ NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType),
+ NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType),
+ NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType),
+ NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0),
+ NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0),
+ NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0),
+ NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0),
+ NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType),
+ NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType),
+ NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType),
+ NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType),
+ NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType),
+ NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType),
+ NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType),
+ NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType),
+ NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType),
+ NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType),
+};
+
+#undef NEONMAP0
+#undef NEONMAP1
+#undef NEONMAP2
+
+static bool NEONSIMDIntrinsicsProvenSorted = false;
+
+static bool AArch64SIMDIntrinsicsProvenSorted = false;
+static bool AArch64SISDIntrinsicsProvenSorted = false;
+
+
+static const NeonIntrinsicInfo *
+findNeonIntrinsicInMap(ArrayRef<NeonIntrinsicInfo> IntrinsicMap,
+ unsigned BuiltinID, bool &MapProvenSorted) {
+
+#ifndef NDEBUG
+ if (!MapProvenSorted) {
+ assert(std::is_sorted(std::begin(IntrinsicMap), std::end(IntrinsicMap)));
+ MapProvenSorted = true;
+ }
+#endif
+
+ const NeonIntrinsicInfo *Builtin =
+ std::lower_bound(IntrinsicMap.begin(), IntrinsicMap.end(), BuiltinID);
+
+ if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID)
+ return Builtin;
+
+ return nullptr;
+}
+
+Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
+ unsigned Modifier,
+ llvm::Type *ArgType,
+ const CallExpr *E) {
+ int VectorSize = 0;
+ if (Modifier & Use64BitVectors)
+ VectorSize = 64;
+ else if (Modifier & Use128BitVectors)
+ VectorSize = 128;
+
+ // Return type.
+ SmallVector<llvm::Type *, 3> Tys;
+ if (Modifier & AddRetType) {
+ llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
+ if (Modifier & VectorizeRetType)
+ Ty = llvm::VectorType::get(
+ Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1);
+
+ Tys.push_back(Ty);
+ }
+
+ // Arguments.
+ if (Modifier & VectorizeArgTypes) {
+ int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1;
+ ArgType = llvm::VectorType::get(ArgType, Elts);
+ }
+
+ if (Modifier & (Add1ArgType | Add2ArgTypes))
+ Tys.push_back(ArgType);
+
+ if (Modifier & Add2ArgTypes)
+ Tys.push_back(ArgType);
+
+ if (Modifier & InventFloatType)
+ Tys.push_back(FloatTy);
+
+ return CGM.getIntrinsic(IntrinsicID, Tys);
+}
+
+static Value *EmitCommonNeonSISDBuiltinExpr(CodeGenFunction &CGF,
+ const NeonIntrinsicInfo &SISDInfo,
+ SmallVectorImpl<Value *> &Ops,
+ const CallExpr *E) {
+ unsigned BuiltinID = SISDInfo.BuiltinID;
+ unsigned int Int = SISDInfo.LLVMIntrinsic;
+ unsigned Modifier = SISDInfo.TypeModifier;
+ const char *s = SISDInfo.NameHint;
+
+ switch (BuiltinID) {
+ case NEON::BI__builtin_neon_vcled_s64:
+ case NEON::BI__builtin_neon_vcled_u64:
+ case NEON::BI__builtin_neon_vcles_f32:
+ case NEON::BI__builtin_neon_vcled_f64:
+ case NEON::BI__builtin_neon_vcltd_s64:
+ case NEON::BI__builtin_neon_vcltd_u64:
+ case NEON::BI__builtin_neon_vclts_f32:
+ case NEON::BI__builtin_neon_vcltd_f64:
+ case NEON::BI__builtin_neon_vcales_f32:
+ case NEON::BI__builtin_neon_vcaled_f64:
+ case NEON::BI__builtin_neon_vcalts_f32:
+ case NEON::BI__builtin_neon_vcaltd_f64:
+ // Only one direction of comparisons actually exist, cmle is actually a cmge
+ // with swapped operands. The table gives us the right intrinsic but we
+ // still need to do the swap.
+ std::swap(Ops[0], Ops[1]);
+ break;
+ }
+
+ assert(Int && "Generic code assumes a valid intrinsic");
+
+ // Determine the type(s) of this overloaded AArch64 intrinsic.
+ const Expr *Arg = E->getArg(0);
+ llvm::Type *ArgTy = CGF.ConvertType(Arg->getType());
+ Function *F = CGF.LookupNeonLLVMIntrinsic(Int, Modifier, ArgTy, E);
+
+ int j = 0;
+ ConstantInt *C0 = ConstantInt::get(CGF.SizeTy, 0);
+ for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
+ ai != ae; ++ai, ++j) {
+ llvm::Type *ArgTy = ai->getType();
+ if (Ops[j]->getType()->getPrimitiveSizeInBits() ==
+ ArgTy->getPrimitiveSizeInBits())
+ continue;
+
+ assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy());
+ // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate
+ // it before inserting.
+ Ops[j] =
+ CGF.Builder.CreateTruncOrBitCast(Ops[j], ArgTy->getVectorElementType());
+ Ops[j] =
+ CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0);
+ }
+
+ Value *Result = CGF.EmitNeonCall(F, Ops, s);
+ llvm::Type *ResultType = CGF.ConvertType(E->getType());
+ if (ResultType->getPrimitiveSizeInBits() <
+ Result->getType()->getPrimitiveSizeInBits())
+ return CGF.Builder.CreateExtractElement(Result, C0);
+
+ return CGF.Builder.CreateBitCast(Result, ResultType, s);
+}
+
+Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
+ unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic,
+ const char *NameHint, unsigned Modifier, const CallExpr *E,
+ SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1) {
+ // Get the last argument, which specifies the vector type.
+ llvm::APSInt NeonTypeConst;
+ const Expr *Arg = E->getArg(E->getNumArgs() - 1);
+ if (!Arg->isIntegerConstantExpr(NeonTypeConst, getContext()))
+ return nullptr;
+
+ // Determine the type of this overloaded NEON intrinsic.
+ NeonTypeFlags Type(NeonTypeConst.getZExtValue());
+ bool Usgn = Type.isUnsigned();
+ bool Quad = Type.isQuad();
+
+ llvm::VectorType *VTy = GetNeonType(this, Type);
+ llvm::Type *Ty = VTy;
+ if (!Ty)
+ return nullptr;
+
+ auto getAlignmentValue32 = [&](Address addr) -> Value* {
+ return Builder.getInt32(addr.getAlignment().getQuantity());
+ };
+
+ unsigned Int = LLVMIntrinsic;
+ if ((Modifier & UnsignedAlts) && !Usgn)
+ Int = AltLLVMIntrinsic;
+
+ switch (BuiltinID) {
+ default: break;
+ case NEON::BI__builtin_neon_vabs_v:
+ case NEON::BI__builtin_neon_vabsq_v:
+ if (VTy->getElementType()->isFloatingPointTy())
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
+ return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs");
+ case NEON::BI__builtin_neon_vaddhn_v: {
+ llvm::VectorType *SrcTy =
+ llvm::VectorType::getExtendedElementVectorType(VTy);
+
+ // %sum = add <4 x i32> %lhs, %rhs
+ Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
+ Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
+ Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn");
+
+ // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
+ Constant *ShiftAmt =
+ ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
+ Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn");
+
+ // %res = trunc <4 x i32> %high to <4 x i16>
+ return Builder.CreateTrunc(Ops[0], VTy, "vaddhn");
+ }
+ case NEON::BI__builtin_neon_vcale_v:
+ case NEON::BI__builtin_neon_vcaleq_v:
+ case NEON::BI__builtin_neon_vcalt_v:
+ case NEON::BI__builtin_neon_vcaltq_v:
+ std::swap(Ops[0], Ops[1]);
+ case NEON::BI__builtin_neon_vcage_v:
+ case NEON::BI__builtin_neon_vcageq_v:
+ case NEON::BI__builtin_neon_vcagt_v:
+ case NEON::BI__builtin_neon_vcagtq_v: {
+ llvm::Type *VecFlt = llvm::VectorType::get(
+ VTy->getScalarSizeInBits() == 32 ? FloatTy : DoubleTy,
+ VTy->getNumElements());
+ llvm::Type *Tys[] = { VTy, VecFlt };
+ Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
+ return EmitNeonCall(F, Ops, NameHint);
+ }
+ case NEON::BI__builtin_neon_vclz_v:
+ case NEON::BI__builtin_neon_vclzq_v:
+ // We generate target-independent intrinsic, which needs a second argument
+ // for whether or not clz of zero is undefined; on ARM it isn't.
+ Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef()));
+ break;
+ case NEON::BI__builtin_neon_vcvt_f32_v:
+ case NEON::BI__builtin_neon_vcvtq_f32_v:
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad));
+ return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
+ : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
+ case NEON::BI__builtin_neon_vcvt_n_f32_v:
+ case NEON::BI__builtin_neon_vcvt_n_f64_v:
+ case NEON::BI__builtin_neon_vcvtq_n_f32_v:
+ case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
+ llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty };
+ Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
+ Function *F = CGM.getIntrinsic(Int, Tys);
+ return EmitNeonCall(F, Ops, "vcvt_n");
+ }
+ case NEON::BI__builtin_neon_vcvt_n_s32_v:
+ case NEON::BI__builtin_neon_vcvt_n_u32_v:
+ case NEON::BI__builtin_neon_vcvt_n_s64_v:
+ case NEON::BI__builtin_neon_vcvt_n_u64_v:
+ case NEON::BI__builtin_neon_vcvtq_n_s32_v:
+ case NEON::BI__builtin_neon_vcvtq_n_u32_v:
+ case NEON::BI__builtin_neon_vcvtq_n_s64_v:
+ case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
+ llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
+ Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
+ return EmitNeonCall(F, Ops, "vcvt_n");
+ }
+ case NEON::BI__builtin_neon_vcvt_s32_v:
+ case NEON::BI__builtin_neon_vcvt_u32_v:
+ case NEON::BI__builtin_neon_vcvt_s64_v:
+ case NEON::BI__builtin_neon_vcvt_u64_v:
+ case NEON::BI__builtin_neon_vcvtq_s32_v:
+ case NEON::BI__builtin_neon_vcvtq_u32_v:
+ case NEON::BI__builtin_neon_vcvtq_s64_v:
+ case NEON::BI__builtin_neon_vcvtq_u64_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
+ return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
+ : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
+ }
+ case NEON::BI__builtin_neon_vcvta_s32_v:
+ case NEON::BI__builtin_neon_vcvta_s64_v:
+ case NEON::BI__builtin_neon_vcvta_u32_v:
+ case NEON::BI__builtin_neon_vcvta_u64_v:
+ case NEON::BI__builtin_neon_vcvtaq_s32_v:
+ case NEON::BI__builtin_neon_vcvtaq_s64_v:
+ case NEON::BI__builtin_neon_vcvtaq_u32_v:
+ case NEON::BI__builtin_neon_vcvtaq_u64_v:
+ case NEON::BI__builtin_neon_vcvtn_s32_v:
+ case NEON::BI__builtin_neon_vcvtn_s64_v:
+ case NEON::BI__builtin_neon_vcvtn_u32_v:
+ case NEON::BI__builtin_neon_vcvtn_u64_v:
+ case NEON::BI__builtin_neon_vcvtnq_s32_v:
+ case NEON::BI__builtin_neon_vcvtnq_s64_v:
+ case NEON::BI__builtin_neon_vcvtnq_u32_v:
+ case NEON::BI__builtin_neon_vcvtnq_u64_v:
+ case NEON::BI__builtin_neon_vcvtp_s32_v:
+ case NEON::BI__builtin_neon_vcvtp_s64_v:
+ case NEON::BI__builtin_neon_vcvtp_u32_v:
+ case NEON::BI__builtin_neon_vcvtp_u64_v:
+ case NEON::BI__builtin_neon_vcvtpq_s32_v:
+ case NEON::BI__builtin_neon_vcvtpq_s64_v:
+ case NEON::BI__builtin_neon_vcvtpq_u32_v:
+ case NEON::BI__builtin_neon_vcvtpq_u64_v:
+ case NEON::BI__builtin_neon_vcvtm_s32_v:
+ case NEON::BI__builtin_neon_vcvtm_s64_v:
+ case NEON::BI__builtin_neon_vcvtm_u32_v:
+ case NEON::BI__builtin_neon_vcvtm_u64_v:
+ case NEON::BI__builtin_neon_vcvtmq_s32_v:
+ case NEON::BI__builtin_neon_vcvtmq_s64_v:
+ case NEON::BI__builtin_neon_vcvtmq_u32_v:
+ case NEON::BI__builtin_neon_vcvtmq_u64_v: {
+ llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
+ return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
+ }
+ case NEON::BI__builtin_neon_vext_v:
+ case NEON::BI__builtin_neon_vextq_v: {
+ int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
+ SmallVector<Constant*, 16> Indices;
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
+ Indices.push_back(ConstantInt::get(Int32Ty, i+CV));
+
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Value *SV = llvm::ConstantVector::get(Indices);
+ return Builder.CreateShuffleVector(Ops[0], Ops[1], SV, "vext");
+ }
+ case NEON::BI__builtin_neon_vfma_v:
+ case NEON::BI__builtin_neon_vfmaq_v: {
+ Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+
+ // NEON intrinsic puts accumulator first, unlike the LLVM fma.
+ return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
+ }
+ case NEON::BI__builtin_neon_vld1_v:
+ case NEON::BI__builtin_neon_vld1q_v: {
+ llvm::Type *Tys[] = {Ty, Int8PtrTy};
+ Ops.push_back(getAlignmentValue32(PtrOp0));
+ return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vld1");
+ }
+ case NEON::BI__builtin_neon_vld2_v:
+ case NEON::BI__builtin_neon_vld2q_v:
+ case NEON::BI__builtin_neon_vld3_v:
+ case NEON::BI__builtin_neon_vld3q_v:
+ case NEON::BI__builtin_neon_vld4_v:
+ case NEON::BI__builtin_neon_vld4q_v: {
+ llvm::Type *Tys[] = {Ty, Int8PtrTy};
+ Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
+ Value *Align = getAlignmentValue32(PtrOp1);
+ Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint);
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
+ }
+ case NEON::BI__builtin_neon_vld1_dup_v:
+ case NEON::BI__builtin_neon_vld1q_dup_v: {
+ Value *V = UndefValue::get(Ty);
+ Ty = llvm::PointerType::getUnqual(VTy->getElementType());
+ PtrOp0 = Builder.CreateBitCast(PtrOp0, Ty);
+ LoadInst *Ld = Builder.CreateLoad(PtrOp0);
+ llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
+ Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
+ return EmitNeonSplat(Ops[0], CI);
+ }
+ case NEON::BI__builtin_neon_vld2_lane_v:
+ case NEON::BI__builtin_neon_vld2q_lane_v:
+ case NEON::BI__builtin_neon_vld3_lane_v:
+ case NEON::BI__builtin_neon_vld3q_lane_v:
+ case NEON::BI__builtin_neon_vld4_lane_v:
+ case NEON::BI__builtin_neon_vld4q_lane_v: {
+ llvm::Type *Tys[] = {Ty, Int8PtrTy};
+ Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
+ for (unsigned I = 2; I < Ops.size() - 1; ++I)
+ Ops[I] = Builder.CreateBitCast(Ops[I], Ty);
+ Ops.push_back(getAlignmentValue32(PtrOp1));
+ Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), NameHint);
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
+ }
+ case NEON::BI__builtin_neon_vmovl_v: {
+ llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
+ if (Usgn)
+ return Builder.CreateZExt(Ops[0], Ty, "vmovl");
+ return Builder.CreateSExt(Ops[0], Ty, "vmovl");
+ }
+ case NEON::BI__builtin_neon_vmovn_v: {
+ llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
+ return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
+ }
+ case NEON::BI__builtin_neon_vmull_v:
+ // FIXME: the integer vmull operations could be emitted in terms of pure
+ // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of
+ // hoisting the exts outside loops. Until global ISel comes along that can
+ // see through such movement this leads to bad CodeGen. So we need an
+ // intrinsic for now.
+ Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
+ Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
+ case NEON::BI__builtin_neon_vpadal_v:
+ case NEON::BI__builtin_neon_vpadalq_v: {
+ // The source operand type has twice as many elements of half the size.
+ unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
+ llvm::Type *EltTy =
+ llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
+ llvm::Type *NarrowTy =
+ llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
+ llvm::Type *Tys[2] = { Ty, NarrowTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
+ }
+ case NEON::BI__builtin_neon_vpaddl_v:
+ case NEON::BI__builtin_neon_vpaddlq_v: {
+ // The source operand type has twice as many elements of half the size.
+ unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
+ llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
+ llvm::Type *NarrowTy =
+ llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
+ llvm::Type *Tys[2] = { Ty, NarrowTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
+ }
+ case NEON::BI__builtin_neon_vqdmlal_v:
+ case NEON::BI__builtin_neon_vqdmlsl_v: {
+ SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
+ Ops[1] =
+ EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), MulOps, "vqdmlal");
+ Ops.resize(2);
+ return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty), Ops, NameHint);
+ }
+ case NEON::BI__builtin_neon_vqshl_n_v:
+ case NEON::BI__builtin_neon_vqshlq_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
+ 1, false);
+ case NEON::BI__builtin_neon_vqshlu_n_v:
+ case NEON::BI__builtin_neon_vqshluq_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n",
+ 1, false);
+ case NEON::BI__builtin_neon_vrecpe_v:
+ case NEON::BI__builtin_neon_vrecpeq_v:
+ case NEON::BI__builtin_neon_vrsqrte_v:
+ case NEON::BI__builtin_neon_vrsqrteq_v:
+ Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
+
+ case NEON::BI__builtin_neon_vrshr_n_v:
+ case NEON::BI__builtin_neon_vrshrq_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n",
+ 1, true);
+ case NEON::BI__builtin_neon_vshl_n_v:
+ case NEON::BI__builtin_neon_vshlq_n_v:
+ Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
+ return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
+ "vshl_n");
+ case NEON::BI__builtin_neon_vshll_n_v: {
+ llvm::Type *SrcTy = llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
+ if (Usgn)
+ Ops[0] = Builder.CreateZExt(Ops[0], VTy);
+ else
+ Ops[0] = Builder.CreateSExt(Ops[0], VTy);
+ Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false);
+ return Builder.CreateShl(Ops[0], Ops[1], "vshll_n");
+ }
+ case NEON::BI__builtin_neon_vshrn_n_v: {
+ llvm::Type *SrcTy = llvm::VectorType::getExtendedElementVectorType(VTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
+ Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false);
+ if (Usgn)
+ Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]);
+ else
+ Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]);
+ return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n");
+ }
+ case NEON::BI__builtin_neon_vshr_n_v:
+ case NEON::BI__builtin_neon_vshrq_n_v:
+ return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, Usgn, "vshr_n");
+ case NEON::BI__builtin_neon_vst1_v:
+ case NEON::BI__builtin_neon_vst1q_v:
+ case NEON::BI__builtin_neon_vst2_v:
+ case NEON::BI__builtin_neon_vst2q_v:
+ case NEON::BI__builtin_neon_vst3_v:
+ case NEON::BI__builtin_neon_vst3q_v:
+ case NEON::BI__builtin_neon_vst4_v:
+ case NEON::BI__builtin_neon_vst4q_v:
+ case NEON::BI__builtin_neon_vst2_lane_v:
+ case NEON::BI__builtin_neon_vst2q_lane_v:
+ case NEON::BI__builtin_neon_vst3_lane_v:
+ case NEON::BI__builtin_neon_vst3q_lane_v:
+ case NEON::BI__builtin_neon_vst4_lane_v:
+ case NEON::BI__builtin_neon_vst4q_lane_v: {
+ llvm::Type *Tys[] = {Int8PtrTy, Ty};
+ Ops.push_back(getAlignmentValue32(PtrOp0));
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "");
+ }
+ case NEON::BI__builtin_neon_vsubhn_v: {
+ llvm::VectorType *SrcTy =
+ llvm::VectorType::getExtendedElementVectorType(VTy);
+
+ // %sum = add <4 x i32> %lhs, %rhs
+ Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
+ Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
+ Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn");
+
+ // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
+ Constant *ShiftAmt =
+ ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
+ Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn");
+
+ // %res = trunc <4 x i32> %high to <4 x i16>
+ return Builder.CreateTrunc(Ops[0], VTy, "vsubhn");
+ }
+ case NEON::BI__builtin_neon_vtrn_v:
+ case NEON::BI__builtin_neon_vtrnq_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Value *SV = nullptr;
+
+ for (unsigned vi = 0; vi != 2; ++vi) {
+ SmallVector<Constant*, 16> Indices;
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
+ Indices.push_back(Builder.getInt32(i+vi));
+ Indices.push_back(Builder.getInt32(i+e+vi));
+ }
+ Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
+ SV = llvm::ConstantVector::get(Indices);
+ SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn");
+ SV = Builder.CreateDefaultAlignedStore(SV, Addr);
+ }
+ return SV;
+ }
+ case NEON::BI__builtin_neon_vtst_v:
+ case NEON::BI__builtin_neon_vtstq_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
+ Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
+ ConstantAggregateZero::get(Ty));
+ return Builder.CreateSExt(Ops[0], Ty, "vtst");
+ }
+ case NEON::BI__builtin_neon_vuzp_v:
+ case NEON::BI__builtin_neon_vuzpq_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Value *SV = nullptr;
+
+ for (unsigned vi = 0; vi != 2; ++vi) {
+ SmallVector<Constant*, 16> Indices;
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
+ Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi));
+
+ Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
+ SV = llvm::ConstantVector::get(Indices);
+ SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp");
+ SV = Builder.CreateDefaultAlignedStore(SV, Addr);
+ }
+ return SV;
+ }
+ case NEON::BI__builtin_neon_vzip_v:
+ case NEON::BI__builtin_neon_vzipq_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Value *SV = nullptr;
+
+ for (unsigned vi = 0; vi != 2; ++vi) {
+ SmallVector<Constant*, 16> Indices;
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
+ Indices.push_back(ConstantInt::get(Int32Ty, (i + vi*e) >> 1));
+ Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e));
+ }
+ Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
+ SV = llvm::ConstantVector::get(Indices);
+ SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip");
+ SV = Builder.CreateDefaultAlignedStore(SV, Addr);
+ }
+ return SV;
+ }
+ }
+
+ assert(Int && "Expected valid intrinsic number");
+
+ // Determine the type(s) of this overloaded AArch64 intrinsic.
+ Function *F = LookupNeonLLVMIntrinsic(Int, Modifier, Ty, E);
+
+ Value *Result = EmitNeonCall(F, Ops, NameHint);
+ llvm::Type *ResultType = ConvertType(E->getType());
+ // AArch64 intrinsic one-element vector type cast to
+ // scalar type expected by the builtin
+ return Builder.CreateBitCast(Result, ResultType, NameHint);
+}
+
+Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr(
+ Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp,
+ const CmpInst::Predicate Ip, const Twine &Name) {
+ llvm::Type *OTy = Op->getType();
+
+ // FIXME: this is utterly horrific. We should not be looking at previous
+ // codegen context to find out what needs doing. Unfortunately TableGen
+ // currently gives us exactly the same calls for vceqz_f32 and vceqz_s32
+ // (etc).
+ if (BitCastInst *BI = dyn_cast<BitCastInst>(Op))
+ OTy = BI->getOperand(0)->getType();
+
+ Op = Builder.CreateBitCast(Op, OTy);
+ if (OTy->getScalarType()->isFloatingPointTy()) {
+ Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy));
+ } else {
+ Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy));
+ }
+ return Builder.CreateSExt(Op, Ty, Name);
+}
+
+static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
+ Value *ExtOp, Value *IndexOp,
+ llvm::Type *ResTy, unsigned IntID,
+ const char *Name) {
+ SmallVector<Value *, 2> TblOps;
+ if (ExtOp)
+ TblOps.push_back(ExtOp);
+
+ // Build a vector containing sequential number like (0, 1, 2, ..., 15)
+ SmallVector<Constant*, 16> Indices;
+ llvm::VectorType *TblTy = cast<llvm::VectorType>(Ops[0]->getType());
+ for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) {
+ Indices.push_back(ConstantInt::get(CGF.Int32Ty, 2*i));
+ Indices.push_back(ConstantInt::get(CGF.Int32Ty, 2*i+1));
+ }
+ Value *SV = llvm::ConstantVector::get(Indices);
+
+ int PairPos = 0, End = Ops.size() - 1;
+ while (PairPos < End) {
+ TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
+ Ops[PairPos+1], SV, Name));
+ PairPos += 2;
+ }
+
+ // If there's an odd number of 64-bit lookup table, fill the high 64-bit
+ // of the 128-bit lookup table with zero.
+ if (PairPos == End) {
+ Value *ZeroTbl = ConstantAggregateZero::get(TblTy);
+ TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
+ ZeroTbl, SV, Name));
+ }
+
+ Function *TblF;
+ TblOps.push_back(IndexOp);
+ TblF = CGF.CGM.getIntrinsic(IntID, ResTy);
+
+ return CGF.EmitNeonCall(TblF, TblOps, Name);
+}
+
+Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) {
+ unsigned Value;
+ switch (BuiltinID) {
+ default:
+ return nullptr;
+ case ARM::BI__builtin_arm_nop:
+ Value = 0;
+ break;
+ case ARM::BI__builtin_arm_yield:
+ case ARM::BI__yield:
+ Value = 1;
+ break;
+ case ARM::BI__builtin_arm_wfe:
+ case ARM::BI__wfe:
+ Value = 2;
+ break;
+ case ARM::BI__builtin_arm_wfi:
+ case ARM::BI__wfi:
+ Value = 3;
+ break;
+ case ARM::BI__builtin_arm_sev:
+ case ARM::BI__sev:
+ Value = 4;
+ break;
+ case ARM::BI__builtin_arm_sevl:
+ case ARM::BI__sevl:
+ Value = 5;
+ break;
+ }
+
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
+ llvm::ConstantInt::get(Int32Ty, Value));
+}
+
+// Generates the IR for the read/write special register builtin,
+// ValueType is the type of the value that is to be written or read,
+// RegisterType is the type of the register being written to or read from.
+static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
+ const CallExpr *E,
+ llvm::Type *RegisterType,
+ llvm::Type *ValueType, bool IsRead) {
+ // write and register intrinsics only support 32 and 64 bit operations.
+ assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64))
+ && "Unsupported size for register.");
+
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+ CodeGen::CodeGenModule &CGM = CGF.CGM;
+ LLVMContext &Context = CGM.getLLVMContext();
+
+ const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts();
+ StringRef SysReg = cast<StringLiteral>(SysRegStrExpr)->getString();
+
+ llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) };
+ llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
+ llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
+
+ llvm::Type *Types[] = { RegisterType };
+
+ bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32);
+ assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64))
+ && "Can't fit 64-bit value in 32-bit register");
+
+ if (IsRead) {
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
+ llvm::Value *Call = Builder.CreateCall(F, Metadata);
+
+ if (MixedTypes)
+ // Read into 64 bit register and then truncate result to 32 bit.
+ return Builder.CreateTrunc(Call, ValueType);
+
+ if (ValueType->isPointerTy())
+ // Have i32/i64 result (Call) but want to return a VoidPtrTy (i8*).
+ return Builder.CreateIntToPtr(Call, ValueType);
+
+ return Call;
+ }
+
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
+ llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1));
+ if (MixedTypes) {
+ // Extend 32 bit write value to 64 bit to pass to write.
+ ArgValue = Builder.CreateZExt(ArgValue, RegisterType);
+ return Builder.CreateCall(F, { Metadata, ArgValue });
+ }
+
+ if (ValueType->isPointerTy()) {
+ // Have VoidPtrTy ArgValue but want to return an i32/i64.
+ ArgValue = Builder.CreatePtrToInt(ArgValue, RegisterType);
+ return Builder.CreateCall(F, { Metadata, ArgValue });
+ }
+
+ return Builder.CreateCall(F, { Metadata, ArgValue });
+}
+
+/// Return true if BuiltinID is an overloaded Neon intrinsic with an extra
+/// argument that specifies the vector type.
+static bool HasExtraNeonArgument(unsigned BuiltinID) {
+ switch (BuiltinID) {
+ default: break;
+ case NEON::BI__builtin_neon_vget_lane_i8:
+ case NEON::BI__builtin_neon_vget_lane_i16:
+ case NEON::BI__builtin_neon_vget_lane_i32:
+ case NEON::BI__builtin_neon_vget_lane_i64:
+ case NEON::BI__builtin_neon_vget_lane_f32:
+ case NEON::BI__builtin_neon_vgetq_lane_i8:
+ case NEON::BI__builtin_neon_vgetq_lane_i16:
+ case NEON::BI__builtin_neon_vgetq_lane_i32:
+ case NEON::BI__builtin_neon_vgetq_lane_i64:
+ case NEON::BI__builtin_neon_vgetq_lane_f32:
+ case NEON::BI__builtin_neon_vset_lane_i8:
+ case NEON::BI__builtin_neon_vset_lane_i16:
+ case NEON::BI__builtin_neon_vset_lane_i32:
+ case NEON::BI__builtin_neon_vset_lane_i64:
+ case NEON::BI__builtin_neon_vset_lane_f32:
+ case NEON::BI__builtin_neon_vsetq_lane_i8:
+ case NEON::BI__builtin_neon_vsetq_lane_i16:
+ case NEON::BI__builtin_neon_vsetq_lane_i32:
+ case NEON::BI__builtin_neon_vsetq_lane_i64:
+ case NEON::BI__builtin_neon_vsetq_lane_f32:
+ case NEON::BI__builtin_neon_vsha1h_u32:
+ case NEON::BI__builtin_neon_vsha1cq_u32:
+ case NEON::BI__builtin_neon_vsha1pq_u32:
+ case NEON::BI__builtin_neon_vsha1mq_u32:
+ case ARM::BI_MoveToCoprocessor:
+ case ARM::BI_MoveToCoprocessor2:
+ return false;
+ }
+ return true;
+}
+
+Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ if (auto Hint = GetValueForARMHint(BuiltinID))
+ return Hint;
+
+ if (BuiltinID == ARM::BI__emit) {
+ bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb;
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(VoidTy, /*Variadic=*/false);
+
+ APSInt Value;
+ if (!E->getArg(0)->EvaluateAsInt(Value, CGM.getContext()))
+ llvm_unreachable("Sema will ensure that the parameter is constant");
+
+ uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue();
+
+ llvm::InlineAsm *Emit =
+ IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "",
+ /*SideEffects=*/true)
+ : InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "",
+ /*SideEffects=*/true);
+
+ return Builder.CreateCall(Emit);
+ }
+
+ if (BuiltinID == ARM::BI__builtin_arm_dbg) {
+ Value *Option = EmitScalarExpr(E->getArg(0));
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option);
+ }
+
+ if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *RW = EmitScalarExpr(E->getArg(1));
+ Value *IsData = EmitScalarExpr(E->getArg(2));
+
+ // Locality is not supported on ARM target
+ Value *Locality = llvm::ConstantInt::get(Int32Ty, 3);
+
+ Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
+ return Builder.CreateCall(F, {Address, RW, Locality, IsData});
+ }
+
+ if (BuiltinID == ARM::BI__builtin_arm_rbit) {
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_rbit),
+ EmitScalarExpr(E->getArg(0)),
+ "rbit");
+ }
+
+ if (BuiltinID == ARM::BI__clear_cache) {
+ assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
+ const FunctionDecl *FD = E->getDirectCallee();
+ Value *Ops[2];
+ for (unsigned i = 0; i < 2; i++)
+ Ops[i] = EmitScalarExpr(E->getArg(i));
+ llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
+ llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
+ StringRef Name = FD->getName();
+ return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
+ }
+
+ if (BuiltinID == ARM::BI__builtin_arm_ldrexd ||
+ ((BuiltinID == ARM::BI__builtin_arm_ldrex ||
+ BuiltinID == ARM::BI__builtin_arm_ldaex) &&
+ getContext().getTypeSize(E->getType()) == 64) ||
+ BuiltinID == ARM::BI__ldrexd) {
+ Function *F;
+
+ switch (BuiltinID) {
+ default: llvm_unreachable("unexpected builtin");
+ case ARM::BI__builtin_arm_ldaex:
+ F = CGM.getIntrinsic(Intrinsic::arm_ldaexd);
+ break;
+ case ARM::BI__builtin_arm_ldrexd:
+ case ARM::BI__builtin_arm_ldrex:
+ case ARM::BI__ldrexd:
+ F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
+ break;
+ }
+
+ Value *LdPtr = EmitScalarExpr(E->getArg(0));
+ Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
+ "ldrexd");
+
+ Value *Val0 = Builder.CreateExtractValue(Val, 1);
+ Value *Val1 = Builder.CreateExtractValue(Val, 0);
+ Val0 = Builder.CreateZExt(Val0, Int64Ty);
+ Val1 = Builder.CreateZExt(Val1, Int64Ty);
+
+ Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
+ Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
+ Val = Builder.CreateOr(Val, Val1);
+ return Builder.CreateBitCast(Val, ConvertType(E->getType()));
+ }
+
+ if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
+ BuiltinID == ARM::BI__builtin_arm_ldaex) {
+ Value *LoadAddr = EmitScalarExpr(E->getArg(0));
+
+ QualType Ty = E->getType();
+ llvm::Type *RealResTy = ConvertType(Ty);
+ llvm::Type *IntResTy = llvm::IntegerType::get(getLLVMContext(),
+ getContext().getTypeSize(Ty));
+ LoadAddr = Builder.CreateBitCast(LoadAddr, IntResTy->getPointerTo());
+
+ Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_ldaex
+ ? Intrinsic::arm_ldaex
+ : Intrinsic::arm_ldrex,
+ LoadAddr->getType());
+ Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
+
+ if (RealResTy->isPointerTy())
+ return Builder.CreateIntToPtr(Val, RealResTy);
+ else {
+ Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
+ return Builder.CreateBitCast(Val, RealResTy);
+ }
+ }
+
+ if (BuiltinID == ARM::BI__builtin_arm_strexd ||
+ ((BuiltinID == ARM::BI__builtin_arm_stlex ||
+ BuiltinID == ARM::BI__builtin_arm_strex) &&
+ getContext().getTypeSize(E->getArg(0)->getType()) == 64)) {
+ Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
+ ? Intrinsic::arm_stlexd
+ : Intrinsic::arm_strexd);
+ llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, nullptr);
+
+ Address Tmp = CreateMemTemp(E->getArg(0)->getType());
+ Value *Val = EmitScalarExpr(E->getArg(0));
+ Builder.CreateStore(Val, Tmp);
+
+ Address LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
+ Val = Builder.CreateLoad(LdPtr);
+
+ Value *Arg0 = Builder.CreateExtractValue(Val, 0);
+ Value *Arg1 = Builder.CreateExtractValue(Val, 1);
+ Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy);
+ return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "strexd");
+ }
+
+ if (BuiltinID == ARM::BI__builtin_arm_strex ||
+ BuiltinID == ARM::BI__builtin_arm_stlex) {
+ Value *StoreVal = EmitScalarExpr(E->getArg(0));
+ Value *StoreAddr = EmitScalarExpr(E->getArg(1));
+
+ QualType Ty = E->getArg(0)->getType();
+ llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
+ getContext().getTypeSize(Ty));
+ StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
+
+ if (StoreVal->getType()->isPointerTy())
+ StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
+ else {
+ StoreVal = Builder.CreateBitCast(StoreVal, StoreTy);
+ StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
+ }
+
+ Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
+ ? Intrinsic::arm_stlex
+ : Intrinsic::arm_strex,
+ StoreAddr->getType());
+ return Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex");
+ }
+
+ if (BuiltinID == ARM::BI__builtin_arm_clrex) {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
+ return Builder.CreateCall(F);
+ }
+
+ // CRC32
+ Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
+ switch (BuiltinID) {
+ case ARM::BI__builtin_arm_crc32b:
+ CRCIntrinsicID = Intrinsic::arm_crc32b; break;
+ case ARM::BI__builtin_arm_crc32cb:
+ CRCIntrinsicID = Intrinsic::arm_crc32cb; break;
+ case ARM::BI__builtin_arm_crc32h:
+ CRCIntrinsicID = Intrinsic::arm_crc32h; break;
+ case ARM::BI__builtin_arm_crc32ch:
+ CRCIntrinsicID = Intrinsic::arm_crc32ch; break;
+ case ARM::BI__builtin_arm_crc32w:
+ case ARM::BI__builtin_arm_crc32d:
+ CRCIntrinsicID = Intrinsic::arm_crc32w; break;
+ case ARM::BI__builtin_arm_crc32cw:
+ case ARM::BI__builtin_arm_crc32cd:
+ CRCIntrinsicID = Intrinsic::arm_crc32cw; break;
+ }
+
+ if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
+ Value *Arg0 = EmitScalarExpr(E->getArg(0));
+ Value *Arg1 = EmitScalarExpr(E->getArg(1));
+
+ // crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w
+ // intrinsics, hence we need different codegen for these cases.
+ if (BuiltinID == ARM::BI__builtin_arm_crc32d ||
+ BuiltinID == ARM::BI__builtin_arm_crc32cd) {
+ Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
+ Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty);
+ Value *Arg1b = Builder.CreateLShr(Arg1, C1);
+ Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty);
+
+ Function *F = CGM.getIntrinsic(CRCIntrinsicID);
+ Value *Res = Builder.CreateCall(F, {Arg0, Arg1a});
+ return Builder.CreateCall(F, {Res, Arg1b});
+ } else {
+ Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty);
+
+ Function *F = CGM.getIntrinsic(CRCIntrinsicID);
+ return Builder.CreateCall(F, {Arg0, Arg1});
+ }
+ }
+
+ if (BuiltinID == ARM::BI__builtin_arm_rsr ||
+ BuiltinID == ARM::BI__builtin_arm_rsr64 ||
+ BuiltinID == ARM::BI__builtin_arm_rsrp ||
+ BuiltinID == ARM::BI__builtin_arm_wsr ||
+ BuiltinID == ARM::BI__builtin_arm_wsr64 ||
+ BuiltinID == ARM::BI__builtin_arm_wsrp) {
+
+ bool IsRead = BuiltinID == ARM::BI__builtin_arm_rsr ||
+ BuiltinID == ARM::BI__builtin_arm_rsr64 ||
+ BuiltinID == ARM::BI__builtin_arm_rsrp;
+
+ bool IsPointerBuiltin = BuiltinID == ARM::BI__builtin_arm_rsrp ||
+ BuiltinID == ARM::BI__builtin_arm_wsrp;
+
+ bool Is64Bit = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
+ BuiltinID == ARM::BI__builtin_arm_wsr64;
+
+ llvm::Type *ValueType;
+ llvm::Type *RegisterType;
+ if (IsPointerBuiltin) {
+ ValueType = VoidPtrTy;
+ RegisterType = Int32Ty;
+ } else if (Is64Bit) {
+ ValueType = RegisterType = Int64Ty;
+ } else {
+ ValueType = RegisterType = Int32Ty;
+ }
+
+ return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead);
+ }
+
+ // Find out if any arguments are required to be integer constant
+ // expressions.
+ unsigned ICEArguments = 0;
+ ASTContext::GetBuiltinTypeError Error;
+ getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
+ assert(Error == ASTContext::GE_None && "Should not codegen an error");
+
+ auto getAlignmentValue32 = [&](Address addr) -> Value* {
+ return Builder.getInt32(addr.getAlignment().getQuantity());
+ };
+
+ Address PtrOp0 = Address::invalid();
+ Address PtrOp1 = Address::invalid();
+ SmallVector<Value*, 4> Ops;
+ bool HasExtraArg = HasExtraNeonArgument(BuiltinID);
+ unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0);
+ for (unsigned i = 0, e = NumArgs; i != e; i++) {
+ if (i == 0) {
+ switch (BuiltinID) {
+ case NEON::BI__builtin_neon_vld1_v:
+ case NEON::BI__builtin_neon_vld1q_v:
+ case NEON::BI__builtin_neon_vld1q_lane_v:
+ case NEON::BI__builtin_neon_vld1_lane_v:
+ case NEON::BI__builtin_neon_vld1_dup_v:
+ case NEON::BI__builtin_neon_vld1q_dup_v:
+ case NEON::BI__builtin_neon_vst1_v:
+ case NEON::BI__builtin_neon_vst1q_v:
+ case NEON::BI__builtin_neon_vst1q_lane_v:
+ case NEON::BI__builtin_neon_vst1_lane_v:
+ case NEON::BI__builtin_neon_vst2_v:
+ case NEON::BI__builtin_neon_vst2q_v:
+ case NEON::BI__builtin_neon_vst2_lane_v:
+ case NEON::BI__builtin_neon_vst2q_lane_v:
+ case NEON::BI__builtin_neon_vst3_v:
+ case NEON::BI__builtin_neon_vst3q_v:
+ case NEON::BI__builtin_neon_vst3_lane_v:
+ case NEON::BI__builtin_neon_vst3q_lane_v:
+ case NEON::BI__builtin_neon_vst4_v:
+ case NEON::BI__builtin_neon_vst4q_v:
+ case NEON::BI__builtin_neon_vst4_lane_v:
+ case NEON::BI__builtin_neon_vst4q_lane_v:
+ // Get the alignment for the argument in addition to the value;
+ // we'll use it later.
+ PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
+ Ops.push_back(PtrOp0.getPointer());
+ continue;
+ }
+ }
+ if (i == 1) {
+ switch (BuiltinID) {
+ case NEON::BI__builtin_neon_vld2_v:
+ case NEON::BI__builtin_neon_vld2q_v:
+ case NEON::BI__builtin_neon_vld3_v:
+ case NEON::BI__builtin_neon_vld3q_v:
+ case NEON::BI__builtin_neon_vld4_v:
+ case NEON::BI__builtin_neon_vld4q_v:
+ case NEON::BI__builtin_neon_vld2_lane_v:
+ case NEON::BI__builtin_neon_vld2q_lane_v:
+ case NEON::BI__builtin_neon_vld3_lane_v:
+ case NEON::BI__builtin_neon_vld3q_lane_v:
+ case NEON::BI__builtin_neon_vld4_lane_v:
+ case NEON::BI__builtin_neon_vld4q_lane_v:
+ case NEON::BI__builtin_neon_vld2_dup_v:
+ case NEON::BI__builtin_neon_vld3_dup_v:
+ case NEON::BI__builtin_neon_vld4_dup_v:
+ // Get the alignment for the argument in addition to the value;
+ // we'll use it later.
+ PtrOp1 = EmitPointerWithAlignment(E->getArg(1));
+ Ops.push_back(PtrOp1.getPointer());
+ continue;
+ }
+ }
+
+ if ((ICEArguments & (1 << i)) == 0) {
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ } else {
+ // If this is required to be a constant, constant fold it so that we know
+ // that the generated intrinsic gets a ConstantInt.
+ llvm::APSInt Result;
+ bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
+ assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
+ Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
+ }
+ }
+
+ switch (BuiltinID) {
+ default: break;
+
+ case NEON::BI__builtin_neon_vget_lane_i8:
+ case NEON::BI__builtin_neon_vget_lane_i16:
+ case NEON::BI__builtin_neon_vget_lane_i32:
+ case NEON::BI__builtin_neon_vget_lane_i64:
+ case NEON::BI__builtin_neon_vget_lane_f32:
+ case NEON::BI__builtin_neon_vgetq_lane_i8:
+ case NEON::BI__builtin_neon_vgetq_lane_i16:
+ case NEON::BI__builtin_neon_vgetq_lane_i32:
+ case NEON::BI__builtin_neon_vgetq_lane_i64:
+ case NEON::BI__builtin_neon_vgetq_lane_f32:
+ return Builder.CreateExtractElement(Ops[0], Ops[1], "vget_lane");
+
+ case NEON::BI__builtin_neon_vset_lane_i8:
+ case NEON::BI__builtin_neon_vset_lane_i16:
+ case NEON::BI__builtin_neon_vset_lane_i32:
+ case NEON::BI__builtin_neon_vset_lane_i64:
+ case NEON::BI__builtin_neon_vset_lane_f32:
+ case NEON::BI__builtin_neon_vsetq_lane_i8:
+ case NEON::BI__builtin_neon_vsetq_lane_i16:
+ case NEON::BI__builtin_neon_vsetq_lane_i32:
+ case NEON::BI__builtin_neon_vsetq_lane_i64:
+ case NEON::BI__builtin_neon_vsetq_lane_f32:
+ return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
+
+ case NEON::BI__builtin_neon_vsha1h_u32:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops,
+ "vsha1h");
+ case NEON::BI__builtin_neon_vsha1cq_u32:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops,
+ "vsha1h");
+ case NEON::BI__builtin_neon_vsha1pq_u32:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops,
+ "vsha1h");
+ case NEON::BI__builtin_neon_vsha1mq_u32:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops,
+ "vsha1h");
+
+ // The ARM _MoveToCoprocessor builtins put the input register value as
+ // the first argument, but the LLVM intrinsic expects it as the third one.
+ case ARM::BI_MoveToCoprocessor:
+ case ARM::BI_MoveToCoprocessor2: {
+ Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI_MoveToCoprocessor ?
+ Intrinsic::arm_mcr : Intrinsic::arm_mcr2);
+ return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0],
+ Ops[3], Ops[4], Ops[5]});
+ }
+ }
+
+ // Get the last argument, which specifies the vector type.
+ assert(HasExtraArg);
+ llvm::APSInt Result;
+ const Expr *Arg = E->getArg(E->getNumArgs()-1);
+ if (!Arg->isIntegerConstantExpr(Result, getContext()))
+ return nullptr;
+
+ if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
+ BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
+ // Determine the overloaded type of this builtin.
+ llvm::Type *Ty;
+ if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
+ Ty = FloatTy;
+ else
+ Ty = DoubleTy;
+
+ // Determine whether this is an unsigned conversion or not.
+ bool usgn = Result.getZExtValue() == 1;
+ unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
+
+ // Call the appropriate intrinsic.
+ Function *F = CGM.getIntrinsic(Int, Ty);
+ return Builder.CreateCall(F, Ops, "vcvtr");
+ }
+
+ // Determine the type of this overloaded NEON intrinsic.
+ NeonTypeFlags Type(Result.getZExtValue());
+ bool usgn = Type.isUnsigned();
+ bool rightShift = false;
+
+ llvm::VectorType *VTy = GetNeonType(this, Type);
+ llvm::Type *Ty = VTy;
+ if (!Ty)
+ return nullptr;
+
+ // Many NEON builtins have identical semantics and uses in ARM and
+ // AArch64. Emit these in a single function.
+ auto IntrinsicMap = makeArrayRef(ARMSIMDIntrinsicMap);
+ const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
+ IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted);
+ if (Builtin)
+ return EmitCommonNeonBuiltinExpr(
+ Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
+ Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1);
+
+ unsigned Int;
+ switch (BuiltinID) {
+ default: return nullptr;
+ case NEON::BI__builtin_neon_vld1q_lane_v:
+ // Handle 64-bit integer elements as a special case. Use shuffles of
+ // one-element vectors to avoid poor code for i64 in the backend.
+ if (VTy->getElementType()->isIntegerTy(64)) {
+ // Extract the other lane.
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ uint32_t Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
+ Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
+ Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
+ // Load the value as a one-element vector.
+ Ty = llvm::VectorType::get(VTy->getElementType(), 1);
+ llvm::Type *Tys[] = {Ty, Int8PtrTy};
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Tys);
+ Value *Align = getAlignmentValue32(PtrOp0);
+ Value *Ld = Builder.CreateCall(F, {Ops[0], Align});
+ // Combine them.
+ uint32_t Indices[] = {1 - Lane, Lane};
+ SV = llvm::ConstantDataVector::get(getLLVMContext(), Indices);
+ return Builder.CreateShuffleVector(Ops[1], Ld, SV, "vld1q_lane");
+ }
+ // fall through
+ case NEON::BI__builtin_neon_vld1_lane_v: {
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ PtrOp0 = Builder.CreateElementBitCast(PtrOp0, VTy->getElementType());
+ Value *Ld = Builder.CreateLoad(PtrOp0);
+ return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
+ }
+ case NEON::BI__builtin_neon_vld2_dup_v:
+ case NEON::BI__builtin_neon_vld3_dup_v:
+ case NEON::BI__builtin_neon_vld4_dup_v: {
+ // Handle 64-bit elements as a special-case. There is no "dup" needed.
+ if (VTy->getElementType()->getPrimitiveSizeInBits() == 64) {
+ switch (BuiltinID) {
+ case NEON::BI__builtin_neon_vld2_dup_v:
+ Int = Intrinsic::arm_neon_vld2;
+ break;
+ case NEON::BI__builtin_neon_vld3_dup_v:
+ Int = Intrinsic::arm_neon_vld3;
+ break;
+ case NEON::BI__builtin_neon_vld4_dup_v:
+ Int = Intrinsic::arm_neon_vld4;
+ break;
+ default: llvm_unreachable("unknown vld_dup intrinsic?");
+ }
+ llvm::Type *Tys[] = {Ty, Int8PtrTy};
+ Function *F = CGM.getIntrinsic(Int, Tys);
+ llvm::Value *Align = getAlignmentValue32(PtrOp1);
+ Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, "vld_dup");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
+ }
+ switch (BuiltinID) {
+ case NEON::BI__builtin_neon_vld2_dup_v:
+ Int = Intrinsic::arm_neon_vld2lane;
+ break;
+ case NEON::BI__builtin_neon_vld3_dup_v:
+ Int = Intrinsic::arm_neon_vld3lane;
+ break;
+ case NEON::BI__builtin_neon_vld4_dup_v:
+ Int = Intrinsic::arm_neon_vld4lane;
+ break;
+ default: llvm_unreachable("unknown vld_dup intrinsic?");
+ }
+ llvm::Type *Tys[] = {Ty, Int8PtrTy};
+ Function *F = CGM.getIntrinsic(Int, Tys);
+ llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType());
+
+ SmallVector<Value*, 6> Args;
+ Args.push_back(Ops[1]);
+ Args.append(STy->getNumElements(), UndefValue::get(Ty));
+
+ llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
+ Args.push_back(CI);
+ Args.push_back(getAlignmentValue32(PtrOp1));
+
+ Ops[1] = Builder.CreateCall(F, Args, "vld_dup");
+ // splat lane 0 to all elts in each vector of the result.
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ Value *Val = Builder.CreateExtractValue(Ops[1], i);
+ Value *Elt = Builder.CreateBitCast(Val, Ty);
+ Elt = EmitNeonSplat(Elt, CI);
+ Elt = Builder.CreateBitCast(Elt, Val->getType());
+ Ops[1] = Builder.CreateInsertValue(Ops[1], Elt, i);
+ }
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
+ }
+ case NEON::BI__builtin_neon_vqrshrn_n_v:
+ Int =
+ usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
+ 1, true);
+ case NEON::BI__builtin_neon_vqrshrun_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
+ Ops, "vqrshrun_n", 1, true);
+ case NEON::BI__builtin_neon_vqshrn_n_v:
+ Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
+ 1, true);
+ case NEON::BI__builtin_neon_vqshrun_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
+ Ops, "vqshrun_n", 1, true);
+ case NEON::BI__builtin_neon_vrecpe_v:
+ case NEON::BI__builtin_neon_vrecpeq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
+ Ops, "vrecpe");
+ case NEON::BI__builtin_neon_vrshrn_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
+ Ops, "vrshrn_n", 1, true);
+ case NEON::BI__builtin_neon_vrsra_n_v:
+ case NEON::BI__builtin_neon_vrsraq_n_v:
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
+ Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
+ Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Ty), {Ops[1], Ops[2]});
+ return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
+ case NEON::BI__builtin_neon_vsri_n_v:
+ case NEON::BI__builtin_neon_vsriq_n_v:
+ rightShift = true;
+ case NEON::BI__builtin_neon_vsli_n_v:
+ case NEON::BI__builtin_neon_vsliq_n_v:
+ Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
+ Ops, "vsli_n");
+ case NEON::BI__builtin_neon_vsra_n_v:
+ case NEON::BI__builtin_neon_vsraq_n_v:
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
+ return Builder.CreateAdd(Ops[0], Ops[1]);
+ case NEON::BI__builtin_neon_vst1q_lane_v:
+ // Handle 64-bit integer elements as a special case. Use a shuffle to get
+ // a one-element vector and avoid poor code for i64 in the backend.
+ if (VTy->getElementType()->isIntegerTy(64)) {
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
+ Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
+ Ops[2] = getAlignmentValue32(PtrOp0);
+ llvm::Type *Tys[] = {Int8PtrTy, Ops[1]->getType()};
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
+ Tys), Ops);
+ }
+ // fall through
+ case NEON::BI__builtin_neon_vst1_lane_v: {
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ auto St = Builder.CreateStore(Ops[1], Builder.CreateBitCast(PtrOp0, Ty));
+ return St;
+ }
+ case NEON::BI__builtin_neon_vtbl1_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
+ Ops, "vtbl1");
+ case NEON::BI__builtin_neon_vtbl2_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
+ Ops, "vtbl2");
+ case NEON::BI__builtin_neon_vtbl3_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
+ Ops, "vtbl3");
+ case NEON::BI__builtin_neon_vtbl4_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
+ Ops, "vtbl4");
+ case NEON::BI__builtin_neon_vtbx1_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
+ Ops, "vtbx1");
+ case NEON::BI__builtin_neon_vtbx2_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
+ Ops, "vtbx2");
+ case NEON::BI__builtin_neon_vtbx3_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
+ Ops, "vtbx3");
+ case NEON::BI__builtin_neon_vtbx4_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
+ Ops, "vtbx4");
+ }
+}
+
+static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
+ const CallExpr *E,
+ SmallVectorImpl<Value *> &Ops) {
+ unsigned int Int = 0;
+ const char *s = nullptr;
+
+ switch (BuiltinID) {
+ default:
+ return nullptr;
+ case NEON::BI__builtin_neon_vtbl1_v:
+ case NEON::BI__builtin_neon_vqtbl1_v:
+ case NEON::BI__builtin_neon_vqtbl1q_v:
+ case NEON::BI__builtin_neon_vtbl2_v:
+ case NEON::BI__builtin_neon_vqtbl2_v:
+ case NEON::BI__builtin_neon_vqtbl2q_v:
+ case NEON::BI__builtin_neon_vtbl3_v:
+ case NEON::BI__builtin_neon_vqtbl3_v:
+ case NEON::BI__builtin_neon_vqtbl3q_v:
+ case NEON::BI__builtin_neon_vtbl4_v:
+ case NEON::BI__builtin_neon_vqtbl4_v:
+ case NEON::BI__builtin_neon_vqtbl4q_v:
+ break;
+ case NEON::BI__builtin_neon_vtbx1_v:
+ case NEON::BI__builtin_neon_vqtbx1_v:
+ case NEON::BI__builtin_neon_vqtbx1q_v:
+ case NEON::BI__builtin_neon_vtbx2_v:
+ case NEON::BI__builtin_neon_vqtbx2_v:
+ case NEON::BI__builtin_neon_vqtbx2q_v:
+ case NEON::BI__builtin_neon_vtbx3_v:
+ case NEON::BI__builtin_neon_vqtbx3_v:
+ case NEON::BI__builtin_neon_vqtbx3q_v:
+ case NEON::BI__builtin_neon_vtbx4_v:
+ case NEON::BI__builtin_neon_vqtbx4_v:
+ case NEON::BI__builtin_neon_vqtbx4q_v:
+ break;
+ }
+
+ assert(E->getNumArgs() >= 3);
+
+ // Get the last argument, which specifies the vector type.
+ llvm::APSInt Result;
+ const Expr *Arg = E->getArg(E->getNumArgs() - 1);
+ if (!Arg->isIntegerConstantExpr(Result, CGF.getContext()))
+ return nullptr;
+
+ // Determine the type of this overloaded NEON intrinsic.
+ NeonTypeFlags Type(Result.getZExtValue());
+ llvm::VectorType *Ty = GetNeonType(&CGF, Type);
+ if (!Ty)
+ return nullptr;
+
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+
+ // AArch64 scalar builtins are not overloaded, they do not have an extra
+ // argument that specifies the vector type, need to handle each case.
+ switch (BuiltinID) {
+ case NEON::BI__builtin_neon_vtbl1_v: {
+ return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 1), nullptr,
+ Ops[1], Ty, Intrinsic::aarch64_neon_tbl1,
+ "vtbl1");
+ }
+ case NEON::BI__builtin_neon_vtbl2_v: {
+ return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 2), nullptr,
+ Ops[2], Ty, Intrinsic::aarch64_neon_tbl1,
+ "vtbl1");
+ }
+ case NEON::BI__builtin_neon_vtbl3_v: {
+ return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 3), nullptr,
+ Ops[3], Ty, Intrinsic::aarch64_neon_tbl2,
+ "vtbl2");
+ }
+ case NEON::BI__builtin_neon_vtbl4_v: {
+ return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 4), nullptr,
+ Ops[4], Ty, Intrinsic::aarch64_neon_tbl2,
+ "vtbl2");
+ }
+ case NEON::BI__builtin_neon_vtbx1_v: {
+ Value *TblRes =
+ packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 1), nullptr, Ops[2],
+ Ty, Intrinsic::aarch64_neon_tbl1, "vtbl1");
+
+ llvm::Constant *EightV = ConstantInt::get(Ty, 8);
+ Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV);
+ CmpRes = Builder.CreateSExt(CmpRes, Ty);
+
+ Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
+ Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
+ return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
+ }
+ case NEON::BI__builtin_neon_vtbx2_v: {
+ return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 2), Ops[0],
+ Ops[3], Ty, Intrinsic::aarch64_neon_tbx1,
+ "vtbx1");
+ }
+ case NEON::BI__builtin_neon_vtbx3_v: {
+ Value *TblRes =
+ packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 3), nullptr, Ops[4],
+ Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2");
+
+ llvm::Constant *TwentyFourV = ConstantInt::get(Ty, 24);
+ Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4],
+ TwentyFourV);
+ CmpRes = Builder.CreateSExt(CmpRes, Ty);
+
+ Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
+ Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
+ return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
+ }
+ case NEON::BI__builtin_neon_vtbx4_v: {
+ return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 4), Ops[0],
+ Ops[5], Ty, Intrinsic::aarch64_neon_tbx2,
+ "vtbx2");
+ }
+ case NEON::BI__builtin_neon_vqtbl1_v:
+ case NEON::BI__builtin_neon_vqtbl1q_v:
+ Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break;
+ case NEON::BI__builtin_neon_vqtbl2_v:
+ case NEON::BI__builtin_neon_vqtbl2q_v: {
+ Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break;
+ case NEON::BI__builtin_neon_vqtbl3_v:
+ case NEON::BI__builtin_neon_vqtbl3q_v:
+ Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break;
+ case NEON::BI__builtin_neon_vqtbl4_v:
+ case NEON::BI__builtin_neon_vqtbl4q_v:
+ Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break;
+ case NEON::BI__builtin_neon_vqtbx1_v:
+ case NEON::BI__builtin_neon_vqtbx1q_v:
+ Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break;
+ case NEON::BI__builtin_neon_vqtbx2_v:
+ case NEON::BI__builtin_neon_vqtbx2q_v:
+ Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break;
+ case NEON::BI__builtin_neon_vqtbx3_v:
+ case NEON::BI__builtin_neon_vqtbx3q_v:
+ Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break;
+ case NEON::BI__builtin_neon_vqtbx4_v:
+ case NEON::BI__builtin_neon_vqtbx4q_v:
+ Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break;
+ }
+ }
+
+ if (!Int)
+ return nullptr;
+
+ Function *F = CGF.CGM.getIntrinsic(Int, Ty);
+ return CGF.EmitNeonCall(F, Ops, s);
+}
+
+Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
+ llvm::Type *VTy = llvm::VectorType::get(Int16Ty, 4);
+ Op = Builder.CreateBitCast(Op, Int16Ty);
+ Value *V = UndefValue::get(VTy);
+ llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
+ Op = Builder.CreateInsertElement(V, Op, CI);
+ return Op;
+}
+
+Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ unsigned HintID = static_cast<unsigned>(-1);
+ switch (BuiltinID) {
+ default: break;
+ case AArch64::BI__builtin_arm_nop:
+ HintID = 0;
+ break;
+ case AArch64::BI__builtin_arm_yield:
+ HintID = 1;
+ break;
+ case AArch64::BI__builtin_arm_wfe:
+ HintID = 2;
+ break;
+ case AArch64::BI__builtin_arm_wfi:
+ HintID = 3;
+ break;
+ case AArch64::BI__builtin_arm_sev:
+ HintID = 4;
+ break;
+ case AArch64::BI__builtin_arm_sevl:
+ HintID = 5;
+ break;
+ }
+
+ if (HintID != static_cast<unsigned>(-1)) {
+ Function *F = CGM.getIntrinsic(Intrinsic::aarch64_hint);
+ return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *RW = EmitScalarExpr(E->getArg(1));
+ Value *CacheLevel = EmitScalarExpr(E->getArg(2));
+ Value *RetentionPolicy = EmitScalarExpr(E->getArg(3));
+ Value *IsData = EmitScalarExpr(E->getArg(4));
+
+ Value *Locality = nullptr;
+ if (cast<llvm::ConstantInt>(RetentionPolicy)->isZero()) {
+ // Temporal fetch, needs to convert cache level to locality.
+ Locality = llvm::ConstantInt::get(Int32Ty,
+ -cast<llvm::ConstantInt>(CacheLevel)->getValue() + 3);
+ } else {
+ // Streaming fetch.
+ Locality = llvm::ConstantInt::get(Int32Ty, 0);
+ }
+
+ // FIXME: We need AArch64 specific LLVM intrinsic if we want to specify
+ // PLDL3STRM or PLDL2STRM.
+ Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
+ return Builder.CreateCall(F, {Address, RW, Locality, IsData});
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_arm_rbit) {
+ assert((getContext().getTypeSize(E->getType()) == 32) &&
+ "rbit of unusual size!");
+ llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::aarch64_rbit, Arg->getType()), Arg, "rbit");
+ }
+ if (BuiltinID == AArch64::BI__builtin_arm_rbit64) {
+ assert((getContext().getTypeSize(E->getType()) == 64) &&
+ "rbit of unusual size!");
+ llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::aarch64_rbit, Arg->getType()), Arg, "rbit");
+ }
+
+ if (BuiltinID == AArch64::BI__clear_cache) {
+ assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
+ const FunctionDecl *FD = E->getDirectCallee();
+ Value *Ops[2];
+ for (unsigned i = 0; i < 2; i++)
+ Ops[i] = EmitScalarExpr(E->getArg(i));
+ llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
+ llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
+ StringRef Name = FD->getName();
+ return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
+ }
+
+ if ((BuiltinID == AArch64::BI__builtin_arm_ldrex ||
+ BuiltinID == AArch64::BI__builtin_arm_ldaex) &&
+ getContext().getTypeSize(E->getType()) == 128) {
+ Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
+ ? Intrinsic::aarch64_ldaxp
+ : Intrinsic::aarch64_ldxp);
+
+ Value *LdPtr = EmitScalarExpr(E->getArg(0));
+ Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
+ "ldxp");
+
+ Value *Val0 = Builder.CreateExtractValue(Val, 1);
+ Value *Val1 = Builder.CreateExtractValue(Val, 0);
+ llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
+ Val0 = Builder.CreateZExt(Val0, Int128Ty);
+ Val1 = Builder.CreateZExt(Val1, Int128Ty);
+
+ Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64);
+ Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
+ Val = Builder.CreateOr(Val, Val1);
+ return Builder.CreateBitCast(Val, ConvertType(E->getType()));
+ } else if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
+ BuiltinID == AArch64::BI__builtin_arm_ldaex) {
+ Value *LoadAddr = EmitScalarExpr(E->getArg(0));
+
+ QualType Ty = E->getType();
+ llvm::Type *RealResTy = ConvertType(Ty);
+ llvm::Type *IntResTy = llvm::IntegerType::get(getLLVMContext(),
+ getContext().getTypeSize(Ty));
+ LoadAddr = Builder.CreateBitCast(LoadAddr, IntResTy->getPointerTo());
+
+ Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
+ ? Intrinsic::aarch64_ldaxr
+ : Intrinsic::aarch64_ldxr,
+ LoadAddr->getType());
+ Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
+
+ if (RealResTy->isPointerTy())
+ return Builder.CreateIntToPtr(Val, RealResTy);
+
+ Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
+ return Builder.CreateBitCast(Val, RealResTy);
+ }
+
+ if ((BuiltinID == AArch64::BI__builtin_arm_strex ||
+ BuiltinID == AArch64::BI__builtin_arm_stlex) &&
+ getContext().getTypeSize(E->getArg(0)->getType()) == 128) {
+ Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
+ ? Intrinsic::aarch64_stlxp
+ : Intrinsic::aarch64_stxp);
+ llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty, nullptr);
+
+ Address Tmp = CreateMemTemp(E->getArg(0)->getType());
+ EmitAnyExprToMem(E->getArg(0), Tmp, Qualifiers(), /*init*/ true);
+
+ Tmp = Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(STy));
+ llvm::Value *Val = Builder.CreateLoad(Tmp);
+
+ Value *Arg0 = Builder.CreateExtractValue(Val, 0);
+ Value *Arg1 = Builder.CreateExtractValue(Val, 1);
+ Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)),
+ Int8PtrTy);
+ return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "stxp");
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_arm_strex ||
+ BuiltinID == AArch64::BI__builtin_arm_stlex) {
+ Value *StoreVal = EmitScalarExpr(E->getArg(0));
+ Value *StoreAddr = EmitScalarExpr(E->getArg(1));
+
+ QualType Ty = E->getArg(0)->getType();
+ llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
+ getContext().getTypeSize(Ty));
+ StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
+
+ if (StoreVal->getType()->isPointerTy())
+ StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty);
+ else {
+ StoreVal = Builder.CreateBitCast(StoreVal, StoreTy);
+ StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty);
+ }
+
+ Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
+ ? Intrinsic::aarch64_stlxr
+ : Intrinsic::aarch64_stxr,
+ StoreAddr->getType());
+ return Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_arm_clrex) {
+ Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex);
+ return Builder.CreateCall(F);
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_thread_pointer) {
+ Function *F = CGM.getIntrinsic(Intrinsic::aarch64_thread_pointer);
+ return Builder.CreateCall(F);
+ }
+
+ // CRC32
+ Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
+ switch (BuiltinID) {
+ case AArch64::BI__builtin_arm_crc32b:
+ CRCIntrinsicID = Intrinsic::aarch64_crc32b; break;
+ case AArch64::BI__builtin_arm_crc32cb:
+ CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break;
+ case AArch64::BI__builtin_arm_crc32h:
+ CRCIntrinsicID = Intrinsic::aarch64_crc32h; break;
+ case AArch64::BI__builtin_arm_crc32ch:
+ CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break;
+ case AArch64::BI__builtin_arm_crc32w:
+ CRCIntrinsicID = Intrinsic::aarch64_crc32w; break;
+ case AArch64::BI__builtin_arm_crc32cw:
+ CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break;
+ case AArch64::BI__builtin_arm_crc32d:
+ CRCIntrinsicID = Intrinsic::aarch64_crc32x; break;
+ case AArch64::BI__builtin_arm_crc32cd:
+ CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break;
+ }
+
+ if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
+ Value *Arg0 = EmitScalarExpr(E->getArg(0));
+ Value *Arg1 = EmitScalarExpr(E->getArg(1));
+ Function *F = CGM.getIntrinsic(CRCIntrinsicID);
+
+ llvm::Type *DataTy = F->getFunctionType()->getParamType(1);
+ Arg1 = Builder.CreateZExtOrBitCast(Arg1, DataTy);
+
+ return Builder.CreateCall(F, {Arg0, Arg1});
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
+ BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
+ BuiltinID == AArch64::BI__builtin_arm_rsrp ||
+ BuiltinID == AArch64::BI__builtin_arm_wsr ||
+ BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
+ BuiltinID == AArch64::BI__builtin_arm_wsrp) {
+
+ bool IsRead = BuiltinID == AArch64::BI__builtin_arm_rsr ||
+ BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
+ BuiltinID == AArch64::BI__builtin_arm_rsrp;
+
+ bool IsPointerBuiltin = BuiltinID == AArch64::BI__builtin_arm_rsrp ||
+ BuiltinID == AArch64::BI__builtin_arm_wsrp;
+
+ bool Is64Bit = BuiltinID != AArch64::BI__builtin_arm_rsr &&
+ BuiltinID != AArch64::BI__builtin_arm_wsr;
+
+ llvm::Type *ValueType;
+ llvm::Type *RegisterType = Int64Ty;
+ if (IsPointerBuiltin) {
+ ValueType = VoidPtrTy;
+ } else if (Is64Bit) {
+ ValueType = Int64Ty;
+ } else {
+ ValueType = Int32Ty;
+ }
+
+ return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead);
+ }
+
+ // Find out if any arguments are required to be integer constant
+ // expressions.
+ unsigned ICEArguments = 0;
+ ASTContext::GetBuiltinTypeError Error;
+ getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
+ assert(Error == ASTContext::GE_None && "Should not codegen an error");
+
+ llvm::SmallVector<Value*, 4> Ops;
+ for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
+ if ((ICEArguments & (1 << i)) == 0) {
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ } else {
+ // If this is required to be a constant, constant fold it so that we know
+ // that the generated intrinsic gets a ConstantInt.
+ llvm::APSInt Result;
+ bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
+ assert(IsConst && "Constant arg isn't actually constant?");
+ (void)IsConst;
+ Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
+ }
+ }
+
+ auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap);
+ const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
+ SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted);
+
+ if (Builtin) {
+ Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1)));
+ Value *Result = EmitCommonNeonSISDBuiltinExpr(*this, *Builtin, Ops, E);
+ assert(Result && "SISD intrinsic should have been handled");
+ return Result;
+ }
+
+ llvm::APSInt Result;
+ const Expr *Arg = E->getArg(E->getNumArgs()-1);
+ NeonTypeFlags Type(0);
+ if (Arg->isIntegerConstantExpr(Result, getContext()))
+ // Determine the type of this overloaded NEON intrinsic.
+ Type = NeonTypeFlags(Result.getZExtValue());
+
+ bool usgn = Type.isUnsigned();
+ bool quad = Type.isQuad();
+
+ // Handle non-overloaded intrinsics first.
+ switch (BuiltinID) {
+ default: break;
+ case NEON::BI__builtin_neon_vldrq_p128: {
+ llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
+ Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy);
+ return Builder.CreateDefaultAlignedLoad(Ptr);
+ }
+ case NEON::BI__builtin_neon_vstrq_p128: {
+ llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
+ Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy);
+ return Builder.CreateDefaultAlignedStore(EmitScalarExpr(E->getArg(1)), Ptr);
+ }
+ case NEON::BI__builtin_neon_vcvts_u32_f32:
+ case NEON::BI__builtin_neon_vcvtd_u64_f64:
+ usgn = true;
+ // FALL THROUGH
+ case NEON::BI__builtin_neon_vcvts_s32_f32:
+ case NEON::BI__builtin_neon_vcvtd_s64_f64: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
+ llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
+ llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
+ Ops[0] = Builder.CreateBitCast(Ops[0], FTy);
+ if (usgn)
+ return Builder.CreateFPToUI(Ops[0], InTy);
+ return Builder.CreateFPToSI(Ops[0], InTy);
+ }
+ case NEON::BI__builtin_neon_vcvts_f32_u32:
+ case NEON::BI__builtin_neon_vcvtd_f64_u64:
+ usgn = true;
+ // FALL THROUGH
+ case NEON::BI__builtin_neon_vcvts_f32_s32:
+ case NEON::BI__builtin_neon_vcvtd_f64_s64: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
+ llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
+ llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
+ Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
+ if (usgn)
+ return Builder.CreateUIToFP(Ops[0], FTy);
+ return Builder.CreateSIToFP(Ops[0], FTy);
+ }
+ case NEON::BI__builtin_neon_vpaddd_s64: {
+ llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2);
+ Value *Vec = EmitScalarExpr(E->getArg(0));
+ // The vector is v2f64, so make sure it's bitcast to that.
+ Vec = Builder.CreateBitCast(Vec, Ty, "v2i64");
+ llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
+ llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
+ Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
+ Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
+ // Pairwise addition of a v2f64 into a scalar f64.
+ return Builder.CreateAdd(Op0, Op1, "vpaddd");
+ }
+ case NEON::BI__builtin_neon_vpaddd_f64: {
+ llvm::Type *Ty =
+ llvm::VectorType::get(DoubleTy, 2);
+ Value *Vec = EmitScalarExpr(E->getArg(0));
+ // The vector is v2f64, so make sure it's bitcast to that.
+ Vec = Builder.CreateBitCast(Vec, Ty, "v2f64");
+ llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
+ llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
+ Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
+ Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
+ // Pairwise addition of a v2f64 into a scalar f64.
+ return Builder.CreateFAdd(Op0, Op1, "vpaddd");
+ }
+ case NEON::BI__builtin_neon_vpadds_f32: {
+ llvm::Type *Ty =
+ llvm::VectorType::get(FloatTy, 2);
+ Value *Vec = EmitScalarExpr(E->getArg(0));
+ // The vector is v2f32, so make sure it's bitcast to that.
+ Vec = Builder.CreateBitCast(Vec, Ty, "v2f32");
+ llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
+ llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
+ Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
+ Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
+ // Pairwise addition of a v2f32 into a scalar f32.
+ return Builder.CreateFAdd(Op0, Op1, "vpaddd");
+ }
+ case NEON::BI__builtin_neon_vceqzd_s64:
+ case NEON::BI__builtin_neon_vceqzd_f64:
+ case NEON::BI__builtin_neon_vceqzs_f32:
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ return EmitAArch64CompareBuiltinExpr(
+ Ops[0], ConvertType(E->getCallReturnType(getContext())),
+ ICmpInst::FCMP_OEQ, ICmpInst::ICMP_EQ, "vceqz");
+ case NEON::BI__builtin_neon_vcgezd_s64:
+ case NEON::BI__builtin_neon_vcgezd_f64:
+ case NEON::BI__builtin_neon_vcgezs_f32:
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ return EmitAArch64CompareBuiltinExpr(
+ Ops[0], ConvertType(E->getCallReturnType(getContext())),
+ ICmpInst::FCMP_OGE, ICmpInst::ICMP_SGE, "vcgez");
+ case NEON::BI__builtin_neon_vclezd_s64:
+ case NEON::BI__builtin_neon_vclezd_f64:
+ case NEON::BI__builtin_neon_vclezs_f32:
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ return EmitAArch64CompareBuiltinExpr(
+ Ops[0], ConvertType(E->getCallReturnType(getContext())),
+ ICmpInst::FCMP_OLE, ICmpInst::ICMP_SLE, "vclez");
+ case NEON::BI__builtin_neon_vcgtzd_s64:
+ case NEON::BI__builtin_neon_vcgtzd_f64:
+ case NEON::BI__builtin_neon_vcgtzs_f32:
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ return EmitAArch64CompareBuiltinExpr(
+ Ops[0], ConvertType(E->getCallReturnType(getContext())),
+ ICmpInst::FCMP_OGT, ICmpInst::ICMP_SGT, "vcgtz");
+ case NEON::BI__builtin_neon_vcltzd_s64:
+ case NEON::BI__builtin_neon_vcltzd_f64:
+ case NEON::BI__builtin_neon_vcltzs_f32:
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ return EmitAArch64CompareBuiltinExpr(
+ Ops[0], ConvertType(E->getCallReturnType(getContext())),
+ ICmpInst::FCMP_OLT, ICmpInst::ICMP_SLT, "vcltz");
+
+ case NEON::BI__builtin_neon_vceqzd_u64: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
+ Ops[0] =
+ Builder.CreateICmpEQ(Ops[0], llvm::Constant::getNullValue(Int64Ty));
+ return Builder.CreateSExt(Ops[0], Int64Ty, "vceqzd");
+ }
+ case NEON::BI__builtin_neon_vceqd_f64:
+ case NEON::BI__builtin_neon_vcled_f64:
+ case NEON::BI__builtin_neon_vcltd_f64:
+ case NEON::BI__builtin_neon_vcged_f64:
+ case NEON::BI__builtin_neon_vcgtd_f64: {
+ llvm::CmpInst::Predicate P;
+ switch (BuiltinID) {
+ default: llvm_unreachable("missing builtin ID in switch!");
+ case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break;
+ case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break;
+ case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break;
+ case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break;
+ case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break;
+ }
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
+ Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
+ Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
+ return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd");
+ }
+ case NEON::BI__builtin_neon_vceqs_f32:
+ case NEON::BI__builtin_neon_vcles_f32:
+ case NEON::BI__builtin_neon_vclts_f32:
+ case NEON::BI__builtin_neon_vcges_f32:
+ case NEON::BI__builtin_neon_vcgts_f32: {
+ llvm::CmpInst::Predicate P;
+ switch (BuiltinID) {
+ default: llvm_unreachable("missing builtin ID in switch!");
+ case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break;
+ case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break;
+ case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break;
+ case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break;
+ case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break;
+ }
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
+ Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy);
+ Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
+ return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd");
+ }
+ case NEON::BI__builtin_neon_vceqd_s64:
+ case NEON::BI__builtin_neon_vceqd_u64:
+ case NEON::BI__builtin_neon_vcgtd_s64:
+ case NEON::BI__builtin_neon_vcgtd_u64:
+ case NEON::BI__builtin_neon_vcltd_s64:
+ case NEON::BI__builtin_neon_vcltd_u64:
+ case NEON::BI__builtin_neon_vcged_u64:
+ case NEON::BI__builtin_neon_vcged_s64:
+ case NEON::BI__builtin_neon_vcled_u64:
+ case NEON::BI__builtin_neon_vcled_s64: {
+ llvm::CmpInst::Predicate P;
+ switch (BuiltinID) {
+ default: llvm_unreachable("missing builtin ID in switch!");
+ case NEON::BI__builtin_neon_vceqd_s64:
+ case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break;
+ case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break;
+ case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break;
+ case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break;
+ case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break;
+ case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break;
+ case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break;
+ case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break;
+ case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break;
+ }
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
+ Ops[0] = Builder.CreateICmp(P, Ops[0], Ops[1]);
+ return Builder.CreateSExt(Ops[0], Int64Ty, "vceqd");
+ }
+ case NEON::BI__builtin_neon_vtstd_s64:
+ case NEON::BI__builtin_neon_vtstd_u64: {
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
+ Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
+ Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
+ llvm::Constant::getNullValue(Int64Ty));
+ return Builder.CreateSExt(Ops[0], Int64Ty, "vtstd");
+ }
+ case NEON::BI__builtin_neon_vset_lane_i8:
+ case NEON::BI__builtin_neon_vset_lane_i16:
+ case NEON::BI__builtin_neon_vset_lane_i32:
+ case NEON::BI__builtin_neon_vset_lane_i64:
+ case NEON::BI__builtin_neon_vset_lane_f32:
+ case NEON::BI__builtin_neon_vsetq_lane_i8:
+ case NEON::BI__builtin_neon_vsetq_lane_i16:
+ case NEON::BI__builtin_neon_vsetq_lane_i32:
+ case NEON::BI__builtin_neon_vsetq_lane_i64:
+ case NEON::BI__builtin_neon_vsetq_lane_f32:
+ Ops.push_back(EmitScalarExpr(E->getArg(2)));
+ return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
+ case NEON::BI__builtin_neon_vset_lane_f64:
+ // The vector type needs a cast for the v1f64 variant.
+ Ops[1] = Builder.CreateBitCast(Ops[1],
+ llvm::VectorType::get(DoubleTy, 1));
+ Ops.push_back(EmitScalarExpr(E->getArg(2)));
+ return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
+ case NEON::BI__builtin_neon_vsetq_lane_f64:
+ // The vector type needs a cast for the v2f64 variant.
+ Ops[1] = Builder.CreateBitCast(Ops[1],
+ llvm::VectorType::get(DoubleTy, 2));
+ Ops.push_back(EmitScalarExpr(E->getArg(2)));
+ return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
+
+ case NEON::BI__builtin_neon_vget_lane_i8:
+ case NEON::BI__builtin_neon_vdupb_lane_i8:
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int8Ty, 8));
+ return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+ "vget_lane");
+ case NEON::BI__builtin_neon_vgetq_lane_i8:
+ case NEON::BI__builtin_neon_vdupb_laneq_i8:
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int8Ty, 16));
+ return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+ "vgetq_lane");
+ case NEON::BI__builtin_neon_vget_lane_i16:
+ case NEON::BI__builtin_neon_vduph_lane_i16:
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int16Ty, 4));
+ return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+ "vget_lane");
+ case NEON::BI__builtin_neon_vgetq_lane_i16:
+ case NEON::BI__builtin_neon_vduph_laneq_i16:
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int16Ty, 8));
+ return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+ "vgetq_lane");
+ case NEON::BI__builtin_neon_vget_lane_i32:
+ case NEON::BI__builtin_neon_vdups_lane_i32:
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 2));
+ return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+ "vget_lane");
+ case NEON::BI__builtin_neon_vdups_lane_f32:
+ Ops[0] = Builder.CreateBitCast(Ops[0],
+ llvm::VectorType::get(FloatTy, 2));
+ return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+ "vdups_lane");
+ case NEON::BI__builtin_neon_vgetq_lane_i32:
+ case NEON::BI__builtin_neon_vdups_laneq_i32:
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4));
+ return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+ "vgetq_lane");
+ case NEON::BI__builtin_neon_vget_lane_i64:
+ case NEON::BI__builtin_neon_vdupd_lane_i64:
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 1));
+ return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+ "vget_lane");
+ case NEON::BI__builtin_neon_vdupd_lane_f64:
+ Ops[0] = Builder.CreateBitCast(Ops[0],
+ llvm::VectorType::get(DoubleTy, 1));
+ return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+ "vdupd_lane");
+ case NEON::BI__builtin_neon_vgetq_lane_i64:
+ case NEON::BI__builtin_neon_vdupd_laneq_i64:
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
+ return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+ "vgetq_lane");
+ case NEON::BI__builtin_neon_vget_lane_f32:
+ Ops[0] = Builder.CreateBitCast(Ops[0],
+ llvm::VectorType::get(FloatTy, 2));
+ return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+ "vget_lane");
+ case NEON::BI__builtin_neon_vget_lane_f64:
+ Ops[0] = Builder.CreateBitCast(Ops[0],
+ llvm::VectorType::get(DoubleTy, 1));
+ return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+ "vget_lane");
+ case NEON::BI__builtin_neon_vgetq_lane_f32:
+ case NEON::BI__builtin_neon_vdups_laneq_f32:
+ Ops[0] = Builder.CreateBitCast(Ops[0],
+ llvm::VectorType::get(FloatTy, 4));
+ return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+ "vgetq_lane");
+ case NEON::BI__builtin_neon_vgetq_lane_f64:
+ case NEON::BI__builtin_neon_vdupd_laneq_f64:
+ Ops[0] = Builder.CreateBitCast(Ops[0],
+ llvm::VectorType::get(DoubleTy, 2));
+ return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+ "vgetq_lane");
+ case NEON::BI__builtin_neon_vaddd_s64:
+ case NEON::BI__builtin_neon_vaddd_u64:
+ return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd");
+ case NEON::BI__builtin_neon_vsubd_s64:
+ case NEON::BI__builtin_neon_vsubd_u64:
+ return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd");
+ case NEON::BI__builtin_neon_vqdmlalh_s16:
+ case NEON::BI__builtin_neon_vqdmlslh_s16: {
+ SmallVector<Value *, 2> ProductOps;
+ ProductOps.push_back(vectorWrapScalar16(Ops[1]));
+ ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2))));
+ llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
+ Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
+ ProductOps, "vqdmlXl");
+ Constant *CI = ConstantInt::get(SizeTy, 0);
+ Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
+
+ unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16
+ ? Intrinsic::aarch64_neon_sqadd
+ : Intrinsic::aarch64_neon_sqsub;
+ return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl");
+ }
+ case NEON::BI__builtin_neon_vqshlud_n_s64: {
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty),
+ Ops, "vqshlu_n");
+ }
+ case NEON::BI__builtin_neon_vqshld_n_u64:
+ case NEON::BI__builtin_neon_vqshld_n_s64: {
+ unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64
+ ? Intrinsic::aarch64_neon_uqshl
+ : Intrinsic::aarch64_neon_sqshl;
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
+ return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n");
+ }
+ case NEON::BI__builtin_neon_vrshrd_n_u64:
+ case NEON::BI__builtin_neon_vrshrd_n_s64: {
+ unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64
+ ? Intrinsic::aarch64_neon_urshl
+ : Intrinsic::aarch64_neon_srshl;
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ int SV = cast<ConstantInt>(Ops[1])->getSExtValue();
+ Ops[1] = ConstantInt::get(Int64Ty, -SV);
+ return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vrshr_n");
+ }
+ case NEON::BI__builtin_neon_vrsrad_n_u64:
+ case NEON::BI__builtin_neon_vrsrad_n_s64: {
+ unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64
+ ? Intrinsic::aarch64_neon_urshl
+ : Intrinsic::aarch64_neon_srshl;
+ Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
+ Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2))));
+ Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Int64Ty),
+ {Ops[1], Builder.CreateSExt(Ops[2], Int64Ty)});
+ return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty));
+ }
+ case NEON::BI__builtin_neon_vshld_n_s64:
+ case NEON::BI__builtin_neon_vshld_n_u64: {
+ llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
+ return Builder.CreateShl(
+ Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n");
+ }
+ case NEON::BI__builtin_neon_vshrd_n_s64: {
+ llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
+ return Builder.CreateAShr(
+ Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
+ Amt->getZExtValue())),
+ "shrd_n");
+ }
+ case NEON::BI__builtin_neon_vshrd_n_u64: {
+ llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
+ uint64_t ShiftAmt = Amt->getZExtValue();
+ // Right-shifting an unsigned value by its size yields 0.
+ if (ShiftAmt == 64)
+ return ConstantInt::get(Int64Ty, 0);
+ return Builder.CreateLShr(Ops[0], ConstantInt::get(Int64Ty, ShiftAmt),
+ "shrd_n");
+ }
+ case NEON::BI__builtin_neon_vsrad_n_s64: {
+ llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
+ Ops[1] = Builder.CreateAShr(
+ Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
+ Amt->getZExtValue())),
+ "shrd_n");
+ return Builder.CreateAdd(Ops[0], Ops[1]);
+ }
+ case NEON::BI__builtin_neon_vsrad_n_u64: {
+ llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
+ uint64_t ShiftAmt = Amt->getZExtValue();
+ // Right-shifting an unsigned value by its size yields 0.
+ // As Op + 0 = Op, return Ops[0] directly.
+ if (ShiftAmt == 64)
+ return Ops[0];
+ Ops[1] = Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, ShiftAmt),
+ "shrd_n");
+ return Builder.CreateAdd(Ops[0], Ops[1]);
+ }
+ case NEON::BI__builtin_neon_vqdmlalh_lane_s16:
+ case NEON::BI__builtin_neon_vqdmlalh_laneq_s16:
+ case NEON::BI__builtin_neon_vqdmlslh_lane_s16:
+ case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: {
+ Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
+ "lane");
+ SmallVector<Value *, 2> ProductOps;
+ ProductOps.push_back(vectorWrapScalar16(Ops[1]));
+ ProductOps.push_back(vectorWrapScalar16(Ops[2]));
+ llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
+ Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
+ ProductOps, "vqdmlXl");
+ Constant *CI = ConstantInt::get(SizeTy, 0);
+ Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
+ Ops.pop_back();
+
+ unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 ||
+ BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16)
+ ? Intrinsic::aarch64_neon_sqadd
+ : Intrinsic::aarch64_neon_sqsub;
+ return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl");
+ }
+ case NEON::BI__builtin_neon_vqdmlals_s32:
+ case NEON::BI__builtin_neon_vqdmlsls_s32: {
+ SmallVector<Value *, 2> ProductOps;
+ ProductOps.push_back(Ops[1]);
+ ProductOps.push_back(EmitScalarExpr(E->getArg(2)));
+ Ops[1] =
+ EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
+ ProductOps, "vqdmlXl");
+
+ unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32
+ ? Intrinsic::aarch64_neon_sqadd
+ : Intrinsic::aarch64_neon_sqsub;
+ return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl");
+ }
+ case NEON::BI__builtin_neon_vqdmlals_lane_s32:
+ case NEON::BI__builtin_neon_vqdmlals_laneq_s32:
+ case NEON::BI__builtin_neon_vqdmlsls_lane_s32:
+ case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: {
+ Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
+ "lane");
+ SmallVector<Value *, 2> ProductOps;
+ ProductOps.push_back(Ops[1]);
+ ProductOps.push_back(Ops[2]);
+ Ops[1] =
+ EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
+ ProductOps, "vqdmlXl");
+ Ops.pop_back();
+
+ unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 ||
+ BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32)
+ ? Intrinsic::aarch64_neon_sqadd
+ : Intrinsic::aarch64_neon_sqsub;
+ return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl");
+ }
+ }
+
+ llvm::VectorType *VTy = GetNeonType(this, Type);
+ llvm::Type *Ty = VTy;
+ if (!Ty)
+ return nullptr;
+
+ // Not all intrinsics handled by the common case work for AArch64 yet, so only
+ // defer to common code if it's been added to our special map.
+ Builtin = findNeonIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID,
+ AArch64SIMDIntrinsicsProvenSorted);
+
+ if (Builtin)
+ return EmitCommonNeonBuiltinExpr(
+ Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
+ Builtin->NameHint, Builtin->TypeModifier, E, Ops,
+ /*never use addresses*/ Address::invalid(), Address::invalid());
+
+ if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops))
+ return V;
+
+ unsigned Int;
+ switch (BuiltinID) {
+ default: return nullptr;
+ case NEON::BI__builtin_neon_vbsl_v:
+ case NEON::BI__builtin_neon_vbslq_v: {
+ llvm::Type *BitTy = llvm::VectorType::getInteger(VTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], BitTy, "vbsl");
+ Ops[1] = Builder.CreateBitCast(Ops[1], BitTy, "vbsl");
+ Ops[2] = Builder.CreateBitCast(Ops[2], BitTy, "vbsl");
+
+ Ops[1] = Builder.CreateAnd(Ops[0], Ops[1], "vbsl");
+ Ops[2] = Builder.CreateAnd(Builder.CreateNot(Ops[0]), Ops[2], "vbsl");
+ Ops[0] = Builder.CreateOr(Ops[1], Ops[2], "vbsl");
+ return Builder.CreateBitCast(Ops[0], Ty);
+ }
+ case NEON::BI__builtin_neon_vfma_lane_v:
+ case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types
+ // The ARM builtins (and instructions) have the addend as the first
+ // operand, but the 'fma' intrinsics have it last. Swap it around here.
+ Value *Addend = Ops[0];
+ Value *Multiplicand = Ops[1];
+ Value *LaneSource = Ops[2];
+ Ops[0] = Multiplicand;
+ Ops[1] = LaneSource;
+ Ops[2] = Addend;
+
+ // Now adjust things to handle the lane access.
+ llvm::Type *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v ?
+ llvm::VectorType::get(VTy->getElementType(), VTy->getNumElements() / 2) :
+ VTy;
+ llvm::Constant *cst = cast<Constant>(Ops[3]);
+ Value *SV = llvm::ConstantVector::getSplat(VTy->getNumElements(), cst);
+ Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy);
+ Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane");
+
+ Ops.pop_back();
+ Int = Intrinsic::fma;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla");
+ }
+ case NEON::BI__builtin_neon_vfma_laneq_v: {
+ llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
+ // v1f64 fma should be mapped to Neon scalar f64 fma
+ if (VTy && VTy->getElementType() == DoubleTy) {
+ Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
+ Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
+ llvm::Type *VTy = GetNeonType(this,
+ NeonTypeFlags(NeonTypeFlags::Float64, false, true));
+ Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
+ Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
+ Value *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy);
+ Value *Result = Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
+ return Builder.CreateBitCast(Result, Ty);
+ }
+ Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+
+ llvm::Type *STy = llvm::VectorType::get(VTy->getElementType(),
+ VTy->getNumElements() * 2);
+ Ops[2] = Builder.CreateBitCast(Ops[2], STy);
+ Value* SV = llvm::ConstantVector::getSplat(VTy->getNumElements(),
+ cast<ConstantInt>(Ops[3]));
+ Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
+
+ return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]});
+ }
+ case NEON::BI__builtin_neon_vfmaq_laneq_v: {
+ Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
+ return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]});
+ }
+ case NEON::BI__builtin_neon_vfmas_lane_f32:
+ case NEON::BI__builtin_neon_vfmas_laneq_f32:
+ case NEON::BI__builtin_neon_vfmad_lane_f64:
+ case NEON::BI__builtin_neon_vfmad_laneq_f64: {
+ Ops.push_back(EmitScalarExpr(E->getArg(3)));
+ llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
+ Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
+ Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
+ return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
+ }
+ case NEON::BI__builtin_neon_vfms_v:
+ case NEON::BI__builtin_neon_vfmsq_v: { // Only used for FP types
+ // FIXME: probably remove when we no longer support aarch64_simd.h
+ // (arm_neon.h delegates to vfma).
+
+ // The ARM builtins (and instructions) have the addend as the first
+ // operand, but the 'fma' intrinsics have it last. Swap it around here.
+ Value *Subtrahend = Ops[0];
+ Value *Multiplicand = Ops[2];
+ Ops[0] = Multiplicand;
+ Ops[2] = Subtrahend;
+ Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
+ Ops[1] = Builder.CreateFNeg(Ops[1]);
+ Int = Intrinsic::fma;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmls");
+ }
+ case NEON::BI__builtin_neon_vmull_v:
+ // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
+ Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull;
+ if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
+ case NEON::BI__builtin_neon_vmax_v:
+ case NEON::BI__builtin_neon_vmaxq_v:
+ // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
+ Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax;
+ if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
+ case NEON::BI__builtin_neon_vmin_v:
+ case NEON::BI__builtin_neon_vminq_v:
+ // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
+ Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin;
+ if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
+ case NEON::BI__builtin_neon_vabd_v:
+ case NEON::BI__builtin_neon_vabdq_v:
+ // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
+ Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd;
+ if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
+ case NEON::BI__builtin_neon_vpadal_v:
+ case NEON::BI__builtin_neon_vpadalq_v: {
+ unsigned ArgElts = VTy->getNumElements();
+ llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType());
+ unsigned BitWidth = EltTy->getBitWidth();
+ llvm::Type *ArgTy = llvm::VectorType::get(
+ llvm::IntegerType::get(getLLVMContext(), BitWidth/2), 2*ArgElts);
+ llvm::Type* Tys[2] = { VTy, ArgTy };
+ Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp;
+ SmallVector<llvm::Value*, 1> TmpOps;
+ TmpOps.push_back(Ops[1]);
+ Function *F = CGM.getIntrinsic(Int, Tys);
+ llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal");
+ llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType());
+ return Builder.CreateAdd(tmp, addend);
+ }
+ case NEON::BI__builtin_neon_vpmin_v:
+ case NEON::BI__builtin_neon_vpminq_v:
+ // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
+ Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp;
+ if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
+ case NEON::BI__builtin_neon_vpmax_v:
+ case NEON::BI__builtin_neon_vpmaxq_v:
+ // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
+ Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp;
+ if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
+ case NEON::BI__builtin_neon_vminnm_v:
+ case NEON::BI__builtin_neon_vminnmq_v:
+ Int = Intrinsic::aarch64_neon_fminnm;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
+ case NEON::BI__builtin_neon_vmaxnm_v:
+ case NEON::BI__builtin_neon_vmaxnmq_v:
+ Int = Intrinsic::aarch64_neon_fmaxnm;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
+ case NEON::BI__builtin_neon_vrecpss_f32: {
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy),
+ Ops, "vrecps");
+ }
+ case NEON::BI__builtin_neon_vrecpsd_f64: {
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy),
+ Ops, "vrecps");
+ }
+ case NEON::BI__builtin_neon_vqshrun_n_v:
+ Int = Intrinsic::aarch64_neon_sqshrun;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
+ case NEON::BI__builtin_neon_vqrshrun_n_v:
+ Int = Intrinsic::aarch64_neon_sqrshrun;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
+ case NEON::BI__builtin_neon_vqshrn_n_v:
+ Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
+ case NEON::BI__builtin_neon_vrshrn_n_v:
+ Int = Intrinsic::aarch64_neon_rshrn;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
+ case NEON::BI__builtin_neon_vqrshrn_n_v:
+ Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
+ case NEON::BI__builtin_neon_vrnda_v:
+ case NEON::BI__builtin_neon_vrndaq_v: {
+ Int = Intrinsic::round;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
+ }
+ case NEON::BI__builtin_neon_vrndi_v:
+ case NEON::BI__builtin_neon_vrndiq_v: {
+ Int = Intrinsic::nearbyint;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndi");
+ }
+ case NEON::BI__builtin_neon_vrndm_v:
+ case NEON::BI__builtin_neon_vrndmq_v: {
+ Int = Intrinsic::floor;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
+ }
+ case NEON::BI__builtin_neon_vrndn_v:
+ case NEON::BI__builtin_neon_vrndnq_v: {
+ Int = Intrinsic::aarch64_neon_frintn;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
+ }
+ case NEON::BI__builtin_neon_vrndp_v:
+ case NEON::BI__builtin_neon_vrndpq_v: {
+ Int = Intrinsic::ceil;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
+ }
+ case NEON::BI__builtin_neon_vrndx_v:
+ case NEON::BI__builtin_neon_vrndxq_v: {
+ Int = Intrinsic::rint;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
+ }
+ case NEON::BI__builtin_neon_vrnd_v:
+ case NEON::BI__builtin_neon_vrndq_v: {
+ Int = Intrinsic::trunc;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
+ }
+ case NEON::BI__builtin_neon_vceqz_v:
+ case NEON::BI__builtin_neon_vceqzq_v:
+ return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ,
+ ICmpInst::ICMP_EQ, "vceqz");
+ case NEON::BI__builtin_neon_vcgez_v:
+ case NEON::BI__builtin_neon_vcgezq_v:
+ return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE,
+ ICmpInst::ICMP_SGE, "vcgez");
+ case NEON::BI__builtin_neon_vclez_v:
+ case NEON::BI__builtin_neon_vclezq_v:
+ return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE,
+ ICmpInst::ICMP_SLE, "vclez");
+ case NEON::BI__builtin_neon_vcgtz_v:
+ case NEON::BI__builtin_neon_vcgtzq_v:
+ return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT,
+ ICmpInst::ICMP_SGT, "vcgtz");
+ case NEON::BI__builtin_neon_vcltz_v:
+ case NEON::BI__builtin_neon_vcltzq_v:
+ return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT,
+ ICmpInst::ICMP_SLT, "vcltz");
+ case NEON::BI__builtin_neon_vcvt_f64_v:
+ case NEON::BI__builtin_neon_vcvtq_f64_v:
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
+ return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
+ : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
+ case NEON::BI__builtin_neon_vcvt_f64_f32: {
+ assert(Type.getEltType() == NeonTypeFlags::Float64 && quad &&
+ "unexpected vcvt_f64_f32 builtin");
+ NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false);
+ Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
+
+ return Builder.CreateFPExt(Ops[0], Ty, "vcvt");
+ }
+ case NEON::BI__builtin_neon_vcvt_f32_f64: {
+ assert(Type.getEltType() == NeonTypeFlags::Float32 &&
+ "unexpected vcvt_f32_f64 builtin");
+ NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true);
+ Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
+
+ return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt");
+ }
+ case NEON::BI__builtin_neon_vcvt_s32_v:
+ case NEON::BI__builtin_neon_vcvt_u32_v:
+ case NEON::BI__builtin_neon_vcvt_s64_v:
+ case NEON::BI__builtin_neon_vcvt_u64_v:
+ case NEON::BI__builtin_neon_vcvtq_s32_v:
+ case NEON::BI__builtin_neon_vcvtq_u32_v:
+ case NEON::BI__builtin_neon_vcvtq_s64_v:
+ case NEON::BI__builtin_neon_vcvtq_u64_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
+ if (usgn)
+ return Builder.CreateFPToUI(Ops[0], Ty);
+ return Builder.CreateFPToSI(Ops[0], Ty);
+ }
+ case NEON::BI__builtin_neon_vcvta_s32_v:
+ case NEON::BI__builtin_neon_vcvtaq_s32_v:
+ case NEON::BI__builtin_neon_vcvta_u32_v:
+ case NEON::BI__builtin_neon_vcvtaq_u32_v:
+ case NEON::BI__builtin_neon_vcvta_s64_v:
+ case NEON::BI__builtin_neon_vcvtaq_s64_v:
+ case NEON::BI__builtin_neon_vcvta_u64_v:
+ case NEON::BI__builtin_neon_vcvtaq_u64_v: {
+ Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas;
+ llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta");
+ }
+ case NEON::BI__builtin_neon_vcvtm_s32_v:
+ case NEON::BI__builtin_neon_vcvtmq_s32_v:
+ case NEON::BI__builtin_neon_vcvtm_u32_v:
+ case NEON::BI__builtin_neon_vcvtmq_u32_v:
+ case NEON::BI__builtin_neon_vcvtm_s64_v:
+ case NEON::BI__builtin_neon_vcvtmq_s64_v:
+ case NEON::BI__builtin_neon_vcvtm_u64_v:
+ case NEON::BI__builtin_neon_vcvtmq_u64_v: {
+ Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms;
+ llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm");
+ }
+ case NEON::BI__builtin_neon_vcvtn_s32_v:
+ case NEON::BI__builtin_neon_vcvtnq_s32_v:
+ case NEON::BI__builtin_neon_vcvtn_u32_v:
+ case NEON::BI__builtin_neon_vcvtnq_u32_v:
+ case NEON::BI__builtin_neon_vcvtn_s64_v:
+ case NEON::BI__builtin_neon_vcvtnq_s64_v:
+ case NEON::BI__builtin_neon_vcvtn_u64_v:
+ case NEON::BI__builtin_neon_vcvtnq_u64_v: {
+ Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns;
+ llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn");
+ }
+ case NEON::BI__builtin_neon_vcvtp_s32_v:
+ case NEON::BI__builtin_neon_vcvtpq_s32_v:
+ case NEON::BI__builtin_neon_vcvtp_u32_v:
+ case NEON::BI__builtin_neon_vcvtpq_u32_v:
+ case NEON::BI__builtin_neon_vcvtp_s64_v:
+ case NEON::BI__builtin_neon_vcvtpq_s64_v:
+ case NEON::BI__builtin_neon_vcvtp_u64_v:
+ case NEON::BI__builtin_neon_vcvtpq_u64_v: {
+ Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps;
+ llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp");
+ }
+ case NEON::BI__builtin_neon_vmulx_v:
+ case NEON::BI__builtin_neon_vmulxq_v: {
+ Int = Intrinsic::aarch64_neon_fmulx;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
+ }
+ case NEON::BI__builtin_neon_vmul_lane_v:
+ case NEON::BI__builtin_neon_vmul_laneq_v: {
+ // v1f64 vmul_lane should be mapped to Neon scalar mul lane
+ bool Quad = false;
+ if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v)
+ Quad = true;
+ Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
+ llvm::Type *VTy = GetNeonType(this,
+ NeonTypeFlags(NeonTypeFlags::Float64, false, Quad));
+ Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
+ Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
+ Value *Result = Builder.CreateFMul(Ops[0], Ops[1]);
+ return Builder.CreateBitCast(Result, Ty);
+ }
+ case NEON::BI__builtin_neon_vnegd_s64:
+ return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd");
+ case NEON::BI__builtin_neon_vpmaxnm_v:
+ case NEON::BI__builtin_neon_vpmaxnmq_v: {
+ Int = Intrinsic::aarch64_neon_fmaxnmp;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm");
+ }
+ case NEON::BI__builtin_neon_vpminnm_v:
+ case NEON::BI__builtin_neon_vpminnmq_v: {
+ Int = Intrinsic::aarch64_neon_fminnmp;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
+ }
+ case NEON::BI__builtin_neon_vsqrt_v:
+ case NEON::BI__builtin_neon_vsqrtq_v: {
+ Int = Intrinsic::sqrt;
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
+ }
+ case NEON::BI__builtin_neon_vrbit_v:
+ case NEON::BI__builtin_neon_vrbitq_v: {
+ Int = Intrinsic::aarch64_neon_rbit;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
+ }
+ case NEON::BI__builtin_neon_vaddv_u8:
+ // FIXME: These are handled by the AArch64 scalar code.
+ usgn = true;
+ // FALLTHROUGH
+ case NEON::BI__builtin_neon_vaddv_s8: {
+ Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int8Ty, 8);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
+ return Builder.CreateTrunc(Ops[0], Int8Ty);
+ }
+ case NEON::BI__builtin_neon_vaddv_u16:
+ usgn = true;
+ // FALLTHROUGH
+ case NEON::BI__builtin_neon_vaddv_s16: {
+ Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int16Ty, 4);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
+ }
+ case NEON::BI__builtin_neon_vaddvq_u8:
+ usgn = true;
+ // FALLTHROUGH
+ case NEON::BI__builtin_neon_vaddvq_s8: {
+ Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int8Ty, 16);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
+ return Builder.CreateTrunc(Ops[0], Int8Ty);
+ }
+ case NEON::BI__builtin_neon_vaddvq_u16:
+ usgn = true;
+ // FALLTHROUGH
+ case NEON::BI__builtin_neon_vaddvq_s16: {
+ Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int16Ty, 8);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
+ }
+ case NEON::BI__builtin_neon_vmaxv_u8: {
+ Int = Intrinsic::aarch64_neon_umaxv;
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int8Ty, 8);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
+ return Builder.CreateTrunc(Ops[0], Int8Ty);
+ }
+ case NEON::BI__builtin_neon_vmaxv_u16: {
+ Int = Intrinsic::aarch64_neon_umaxv;
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int16Ty, 4);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
+ }
+ case NEON::BI__builtin_neon_vmaxvq_u8: {
+ Int = Intrinsic::aarch64_neon_umaxv;
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int8Ty, 16);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
+ return Builder.CreateTrunc(Ops[0], Int8Ty);
+ }
+ case NEON::BI__builtin_neon_vmaxvq_u16: {
+ Int = Intrinsic::aarch64_neon_umaxv;
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int16Ty, 8);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
+ }
+ case NEON::BI__builtin_neon_vmaxv_s8: {
+ Int = Intrinsic::aarch64_neon_smaxv;
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int8Ty, 8);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
+ return Builder.CreateTrunc(Ops[0], Int8Ty);
+ }
+ case NEON::BI__builtin_neon_vmaxv_s16: {
+ Int = Intrinsic::aarch64_neon_smaxv;
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int16Ty, 4);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
+ }
+ case NEON::BI__builtin_neon_vmaxvq_s8: {
+ Int = Intrinsic::aarch64_neon_smaxv;
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int8Ty, 16);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
+ return Builder.CreateTrunc(Ops[0], Int8Ty);
+ }
+ case NEON::BI__builtin_neon_vmaxvq_s16: {
+ Int = Intrinsic::aarch64_neon_smaxv;
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int16Ty, 8);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
+ }
+ case NEON::BI__builtin_neon_vminv_u8: {
+ Int = Intrinsic::aarch64_neon_uminv;
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int8Ty, 8);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
+ return Builder.CreateTrunc(Ops[0], Int8Ty);
+ }
+ case NEON::BI__builtin_neon_vminv_u16: {
+ Int = Intrinsic::aarch64_neon_uminv;
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int16Ty, 4);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
+ }
+ case NEON::BI__builtin_neon_vminvq_u8: {
+ Int = Intrinsic::aarch64_neon_uminv;
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int8Ty, 16);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
+ return Builder.CreateTrunc(Ops[0], Int8Ty);
+ }
+ case NEON::BI__builtin_neon_vminvq_u16: {
+ Int = Intrinsic::aarch64_neon_uminv;
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int16Ty, 8);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
+ }
+ case NEON::BI__builtin_neon_vminv_s8: {
+ Int = Intrinsic::aarch64_neon_sminv;
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int8Ty, 8);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
+ return Builder.CreateTrunc(Ops[0], Int8Ty);
+ }
+ case NEON::BI__builtin_neon_vminv_s16: {
+ Int = Intrinsic::aarch64_neon_sminv;
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int16Ty, 4);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
+ }
+ case NEON::BI__builtin_neon_vminvq_s8: {
+ Int = Intrinsic::aarch64_neon_sminv;
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int8Ty, 16);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
+ return Builder.CreateTrunc(Ops[0], Int8Ty);
+ }
+ case NEON::BI__builtin_neon_vminvq_s16: {
+ Int = Intrinsic::aarch64_neon_sminv;
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int16Ty, 8);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
+ }
+ case NEON::BI__builtin_neon_vmul_n_f64: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
+ Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy);
+ return Builder.CreateFMul(Ops[0], RHS);
+ }
+ case NEON::BI__builtin_neon_vaddlv_u8: {
+ Int = Intrinsic::aarch64_neon_uaddlv;
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int8Ty, 8);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
+ }
+ case NEON::BI__builtin_neon_vaddlv_u16: {
+ Int = Intrinsic::aarch64_neon_uaddlv;
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int16Ty, 4);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
+ }
+ case NEON::BI__builtin_neon_vaddlvq_u8: {
+ Int = Intrinsic::aarch64_neon_uaddlv;
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int8Ty, 16);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
+ }
+ case NEON::BI__builtin_neon_vaddlvq_u16: {
+ Int = Intrinsic::aarch64_neon_uaddlv;
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int16Ty, 8);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
+ }
+ case NEON::BI__builtin_neon_vaddlv_s8: {
+ Int = Intrinsic::aarch64_neon_saddlv;
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int8Ty, 8);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
+ }
+ case NEON::BI__builtin_neon_vaddlv_s16: {
+ Int = Intrinsic::aarch64_neon_saddlv;
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int16Ty, 4);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
+ }
+ case NEON::BI__builtin_neon_vaddlvq_s8: {
+ Int = Intrinsic::aarch64_neon_saddlv;
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int8Ty, 16);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
+ }
+ case NEON::BI__builtin_neon_vaddlvq_s16: {
+ Int = Intrinsic::aarch64_neon_saddlv;
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int16Ty, 8);
+ llvm::Type *Tys[2] = { Ty, VTy };
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
+ }
+ case NEON::BI__builtin_neon_vsri_n_v:
+ case NEON::BI__builtin_neon_vsriq_n_v: {
+ Int = Intrinsic::aarch64_neon_vsri;
+ llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
+ return EmitNeonCall(Intrin, Ops, "vsri_n");
+ }
+ case NEON::BI__builtin_neon_vsli_n_v:
+ case NEON::BI__builtin_neon_vsliq_n_v: {
+ Int = Intrinsic::aarch64_neon_vsli;
+ llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
+ return EmitNeonCall(Intrin, Ops, "vsli_n");
+ }
+ case NEON::BI__builtin_neon_vsra_n_v:
+ case NEON::BI__builtin_neon_vsraq_n_v:
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
+ return Builder.CreateAdd(Ops[0], Ops[1]);
+ case NEON::BI__builtin_neon_vrsra_n_v:
+ case NEON::BI__builtin_neon_vrsraq_n_v: {
+ Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
+ SmallVector<llvm::Value*,2> TmpOps;
+ TmpOps.push_back(Ops[1]);
+ TmpOps.push_back(Ops[2]);
+ Function* F = CGM.getIntrinsic(Int, Ty);
+ llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true);
+ Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
+ return Builder.CreateAdd(Ops[0], tmp);
+ }
+ // FIXME: Sharing loads & stores with 32-bit is complicated by the absence
+ // of an Align parameter here.
+ case NEON::BI__builtin_neon_vld1_x2_v:
+ case NEON::BI__builtin_neon_vld1q_x2_v:
+ case NEON::BI__builtin_neon_vld1_x3_v:
+ case NEON::BI__builtin_neon_vld1q_x3_v:
+ case NEON::BI__builtin_neon_vld1_x4_v:
+ case NEON::BI__builtin_neon_vld1q_x4_v: {
+ llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
+ Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
+ llvm::Type *Tys[2] = { VTy, PTy };
+ unsigned Int;
+ switch (BuiltinID) {
+ case NEON::BI__builtin_neon_vld1_x2_v:
+ case NEON::BI__builtin_neon_vld1q_x2_v:
+ Int = Intrinsic::aarch64_neon_ld1x2;
+ break;
+ case NEON::BI__builtin_neon_vld1_x3_v:
+ case NEON::BI__builtin_neon_vld1q_x3_v:
+ Int = Intrinsic::aarch64_neon_ld1x3;
+ break;
+ case NEON::BI__builtin_neon_vld1_x4_v:
+ case NEON::BI__builtin_neon_vld1q_x4_v:
+ Int = Intrinsic::aarch64_neon_ld1x4;
+ break;
+ }
+ Function *F = CGM.getIntrinsic(Int, Tys);
+ Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
+ }
+ case NEON::BI__builtin_neon_vst1_x2_v:
+ case NEON::BI__builtin_neon_vst1q_x2_v:
+ case NEON::BI__builtin_neon_vst1_x3_v:
+ case NEON::BI__builtin_neon_vst1q_x3_v:
+ case NEON::BI__builtin_neon_vst1_x4_v:
+ case NEON::BI__builtin_neon_vst1q_x4_v: {
+ llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
+ llvm::Type *Tys[2] = { VTy, PTy };
+ unsigned Int;
+ switch (BuiltinID) {
+ case NEON::BI__builtin_neon_vst1_x2_v:
+ case NEON::BI__builtin_neon_vst1q_x2_v:
+ Int = Intrinsic::aarch64_neon_st1x2;
+ break;
+ case NEON::BI__builtin_neon_vst1_x3_v:
+ case NEON::BI__builtin_neon_vst1q_x3_v:
+ Int = Intrinsic::aarch64_neon_st1x3;
+ break;
+ case NEON::BI__builtin_neon_vst1_x4_v:
+ case NEON::BI__builtin_neon_vst1q_x4_v:
+ Int = Intrinsic::aarch64_neon_st1x4;
+ break;
+ }
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "");
+ }
+ case NEON::BI__builtin_neon_vld1_v:
+ case NEON::BI__builtin_neon_vld1q_v:
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
+ return Builder.CreateDefaultAlignedLoad(Ops[0]);
+ case NEON::BI__builtin_neon_vst1_v:
+ case NEON::BI__builtin_neon_vst1q_v:
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
+ Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
+ case NEON::BI__builtin_neon_vld1_lane_v:
+ case NEON::BI__builtin_neon_vld1q_lane_v:
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ty = llvm::PointerType::getUnqual(VTy->getElementType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[0] = Builder.CreateDefaultAlignedLoad(Ops[0]);
+ return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
+ case NEON::BI__builtin_neon_vld1_dup_v:
+ case NEON::BI__builtin_neon_vld1q_dup_v: {
+ Value *V = UndefValue::get(Ty);
+ Ty = llvm::PointerType::getUnqual(VTy->getElementType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[0] = Builder.CreateDefaultAlignedLoad(Ops[0]);
+ llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
+ Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
+ return EmitNeonSplat(Ops[0], CI);
+ }
+ case NEON::BI__builtin_neon_vst1_lane_v:
+ case NEON::BI__builtin_neon_vst1q_lane_v:
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ return Builder.CreateDefaultAlignedStore(Ops[1],
+ Builder.CreateBitCast(Ops[0], Ty));
+ case NEON::BI__builtin_neon_vld2_v:
+ case NEON::BI__builtin_neon_vld2q_v: {
+ llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
+ Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
+ llvm::Type *Tys[2] = { VTy, PTy };
+ Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
+ Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
+ Ops[0] = Builder.CreateBitCast(Ops[0],
+ llvm::PointerType::getUnqual(Ops[1]->getType()));
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
+ }
+ case NEON::BI__builtin_neon_vld3_v:
+ case NEON::BI__builtin_neon_vld3q_v: {
+ llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
+ Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
+ llvm::Type *Tys[2] = { VTy, PTy };
+ Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
+ Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
+ Ops[0] = Builder.CreateBitCast(Ops[0],
+ llvm::PointerType::getUnqual(Ops[1]->getType()));
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
+ }
+ case NEON::BI__builtin_neon_vld4_v:
+ case NEON::BI__builtin_neon_vld4q_v: {
+ llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
+ Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
+ llvm::Type *Tys[2] = { VTy, PTy };
+ Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
+ Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
+ Ops[0] = Builder.CreateBitCast(Ops[0],
+ llvm::PointerType::getUnqual(Ops[1]->getType()));
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
+ }
+ case NEON::BI__builtin_neon_vld2_dup_v:
+ case NEON::BI__builtin_neon_vld2q_dup_v: {
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(VTy->getElementType());
+ Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
+ llvm::Type *Tys[2] = { VTy, PTy };
+ Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
+ Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
+ Ops[0] = Builder.CreateBitCast(Ops[0],
+ llvm::PointerType::getUnqual(Ops[1]->getType()));
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
+ }
+ case NEON::BI__builtin_neon_vld3_dup_v:
+ case NEON::BI__builtin_neon_vld3q_dup_v: {
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(VTy->getElementType());
+ Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
+ llvm::Type *Tys[2] = { VTy, PTy };
+ Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
+ Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
+ Ops[0] = Builder.CreateBitCast(Ops[0],
+ llvm::PointerType::getUnqual(Ops[1]->getType()));
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
+ }
+ case NEON::BI__builtin_neon_vld4_dup_v:
+ case NEON::BI__builtin_neon_vld4q_dup_v: {
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(VTy->getElementType());
+ Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
+ llvm::Type *Tys[2] = { VTy, PTy };
+ Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
+ Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
+ Ops[0] = Builder.CreateBitCast(Ops[0],
+ llvm::PointerType::getUnqual(Ops[1]->getType()));
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
+ }
+ case NEON::BI__builtin_neon_vld2_lane_v:
+ case NEON::BI__builtin_neon_vld2q_lane_v: {
+ llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
+ Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys);
+ Ops.push_back(Ops[1]);
+ Ops.erase(Ops.begin()+1);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
+ Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
+ }
+ case NEON::BI__builtin_neon_vld3_lane_v:
+ case NEON::BI__builtin_neon_vld3q_lane_v: {
+ llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
+ Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys);
+ Ops.push_back(Ops[1]);
+ Ops.erase(Ops.begin()+1);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
+ Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
+ Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
+ }
+ case NEON::BI__builtin_neon_vld4_lane_v:
+ case NEON::BI__builtin_neon_vld4q_lane_v: {
+ llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
+ Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys);
+ Ops.push_back(Ops[1]);
+ Ops.erase(Ops.begin()+1);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
+ Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
+ Ops[5] = Builder.CreateZExt(Ops[5], Int64Ty);
+ Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld4_lane");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
+ }
+ case NEON::BI__builtin_neon_vst2_v:
+ case NEON::BI__builtin_neon_vst2q_v: {
+ Ops.push_back(Ops[0]);
+ Ops.erase(Ops.begin());
+ llvm::Type *Tys[2] = { VTy, Ops[2]->getType() };
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys),
+ Ops, "");
+ }
+ case NEON::BI__builtin_neon_vst2_lane_v:
+ case NEON::BI__builtin_neon_vst2q_lane_v: {
+ Ops.push_back(Ops[0]);
+ Ops.erase(Ops.begin());
+ Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
+ llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys),
+ Ops, "");
+ }
+ case NEON::BI__builtin_neon_vst3_v:
+ case NEON::BI__builtin_neon_vst3q_v: {
+ Ops.push_back(Ops[0]);
+ Ops.erase(Ops.begin());
+ llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys),
+ Ops, "");
+ }
+ case NEON::BI__builtin_neon_vst3_lane_v:
+ case NEON::BI__builtin_neon_vst3q_lane_v: {
+ Ops.push_back(Ops[0]);
+ Ops.erase(Ops.begin());
+ Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
+ llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys),
+ Ops, "");
+ }
+ case NEON::BI__builtin_neon_vst4_v:
+ case NEON::BI__builtin_neon_vst4q_v: {
+ Ops.push_back(Ops[0]);
+ Ops.erase(Ops.begin());
+ llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys),
+ Ops, "");
+ }
+ case NEON::BI__builtin_neon_vst4_lane_v:
+ case NEON::BI__builtin_neon_vst4q_lane_v: {
+ Ops.push_back(Ops[0]);
+ Ops.erase(Ops.begin());
+ Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
+ llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys),
+ Ops, "");
+ }
+ case NEON::BI__builtin_neon_vtrn_v:
+ case NEON::BI__builtin_neon_vtrnq_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Value *SV = nullptr;
+
+ for (unsigned vi = 0; vi != 2; ++vi) {
+ SmallVector<Constant*, 16> Indices;
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
+ Indices.push_back(ConstantInt::get(Int32Ty, i+vi));
+ Indices.push_back(ConstantInt::get(Int32Ty, i+e+vi));
+ }
+ Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
+ SV = llvm::ConstantVector::get(Indices);
+ SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn");
+ SV = Builder.CreateDefaultAlignedStore(SV, Addr);
+ }
+ return SV;
+ }
+ case NEON::BI__builtin_neon_vuzp_v:
+ case NEON::BI__builtin_neon_vuzpq_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Value *SV = nullptr;
+
+ for (unsigned vi = 0; vi != 2; ++vi) {
+ SmallVector<Constant*, 16> Indices;
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
+ Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi));
+
+ Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
+ SV = llvm::ConstantVector::get(Indices);
+ SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp");
+ SV = Builder.CreateDefaultAlignedStore(SV, Addr);
+ }
+ return SV;
+ }
+ case NEON::BI__builtin_neon_vzip_v:
+ case NEON::BI__builtin_neon_vzipq_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Value *SV = nullptr;
+
+ for (unsigned vi = 0; vi != 2; ++vi) {
+ SmallVector<Constant*, 16> Indices;
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
+ Indices.push_back(ConstantInt::get(Int32Ty, (i + vi*e) >> 1));
+ Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e));
+ }
+ Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
+ SV = llvm::ConstantVector::get(Indices);
+ SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip");
+ SV = Builder.CreateDefaultAlignedStore(SV, Addr);
+ }
+ return SV;
+ }
+ case NEON::BI__builtin_neon_vqtbl1q_v: {
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty),
+ Ops, "vtbl1");
+ }
+ case NEON::BI__builtin_neon_vqtbl2q_v: {
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty),
+ Ops, "vtbl2");
+ }
+ case NEON::BI__builtin_neon_vqtbl3q_v: {
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty),
+ Ops, "vtbl3");
+ }
+ case NEON::BI__builtin_neon_vqtbl4q_v: {
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty),
+ Ops, "vtbl4");
+ }
+ case NEON::BI__builtin_neon_vqtbx1q_v: {
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty),
+ Ops, "vtbx1");
+ }
+ case NEON::BI__builtin_neon_vqtbx2q_v: {
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty),
+ Ops, "vtbx2");
+ }
+ case NEON::BI__builtin_neon_vqtbx3q_v: {
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty),
+ Ops, "vtbx3");
+ }
+ case NEON::BI__builtin_neon_vqtbx4q_v: {
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty),
+ Ops, "vtbx4");
+ }
+ case NEON::BI__builtin_neon_vsqadd_v:
+ case NEON::BI__builtin_neon_vsqaddq_v: {
+ Int = Intrinsic::aarch64_neon_usqadd;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd");
+ }
+ case NEON::BI__builtin_neon_vuqadd_v:
+ case NEON::BI__builtin_neon_vuqaddq_v: {
+ Int = Intrinsic::aarch64_neon_suqadd;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
+ }
+ }
+}
+
+llvm::Value *CodeGenFunction::
+BuildVector(ArrayRef<llvm::Value*> Ops) {
+ assert((Ops.size() & (Ops.size() - 1)) == 0 &&
+ "Not a power-of-two sized vector!");
+ bool AllConstants = true;
+ for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
+ AllConstants &= isa<Constant>(Ops[i]);
+
+ // If this is a constant vector, create a ConstantVector.
+ if (AllConstants) {
+ SmallVector<llvm::Constant*, 16> CstOps;
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i)
+ CstOps.push_back(cast<Constant>(Ops[i]));
+ return llvm::ConstantVector::get(CstOps);
+ }
+
+ // Otherwise, insertelement the values to build the vector.
+ Value *Result =
+ llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size()));
+
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i)
+ Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
+
+ return Result;
+}
+
+Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ if (BuiltinID == X86::BI__builtin_ms_va_start ||
+ BuiltinID == X86::BI__builtin_ms_va_end)
+ return EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).getPointer(),
+ BuiltinID == X86::BI__builtin_ms_va_start);
+ if (BuiltinID == X86::BI__builtin_ms_va_copy) {
+ // Lower this manually. We can't reliably determine whether or not any
+ // given va_copy() is for a Win64 va_list from the calling convention
+ // alone, because it's legal to do this from a System V ABI function.
+ // With opaque pointer types, we won't have enough information in LLVM
+ // IR to determine this from the argument types, either. Best to do it
+ // now, while we have enough information.
+ Address DestAddr = EmitMSVAListRef(E->getArg(0));
+ Address SrcAddr = EmitMSVAListRef(E->getArg(1));
+
+ llvm::Type *BPP = Int8PtrPtrTy;
+
+ DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), BPP, "cp"),
+ DestAddr.getAlignment());
+ SrcAddr = Address(Builder.CreateBitCast(SrcAddr.getPointer(), BPP, "ap"),
+ SrcAddr.getAlignment());
+
+ Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
+ return Builder.CreateStore(ArgPtr, DestAddr);
+ }
+
+ SmallVector<Value*, 4> Ops;
+
+ // Find out if any arguments are required to be integer constant expressions.
+ unsigned ICEArguments = 0;
+ ASTContext::GetBuiltinTypeError Error;
+ getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
+ assert(Error == ASTContext::GE_None && "Should not codegen an error");
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
+ // If this is a normal argument, just emit it as a scalar.
+ if ((ICEArguments & (1 << i)) == 0) {
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ continue;
+ }
+
+ // If this is required to be a constant, constant fold it so that we know
+ // that the generated intrinsic gets a ConstantInt.
+ llvm::APSInt Result;
+ bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
+ assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
+ Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
+ }
+
+ switch (BuiltinID) {
+ default: return nullptr;
+ case X86::BI__builtin_cpu_supports: {
+ const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts();
+ StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString();
+
+ // TODO: When/if this becomes more than x86 specific then use a TargetInfo
+ // based mapping.
+ // Processor features and mapping to processor feature value.
+ enum X86Features {
+ CMOV = 0,
+ MMX,
+ POPCNT,
+ SSE,
+ SSE2,
+ SSE3,
+ SSSE3,
+ SSE4_1,
+ SSE4_2,
+ AVX,
+ AVX2,
+ SSE4_A,
+ FMA4,
+ XOP,
+ FMA,
+ AVX512F,
+ BMI,
+ BMI2,
+ MAX
+ };
+
+ X86Features Feature = StringSwitch<X86Features>(FeatureStr)
+ .Case("cmov", X86Features::CMOV)
+ .Case("mmx", X86Features::MMX)
+ .Case("popcnt", X86Features::POPCNT)
+ .Case("sse", X86Features::SSE)
+ .Case("sse2", X86Features::SSE2)
+ .Case("sse3", X86Features::SSE3)
+ .Case("sse4.1", X86Features::SSE4_1)
+ .Case("sse4.2", X86Features::SSE4_2)
+ .Case("avx", X86Features::AVX)
+ .Case("avx2", X86Features::AVX2)
+ .Case("sse4a", X86Features::SSE4_A)
+ .Case("fma4", X86Features::FMA4)
+ .Case("xop", X86Features::XOP)
+ .Case("fma", X86Features::FMA)
+ .Case("avx512f", X86Features::AVX512F)
+ .Case("bmi", X86Features::BMI)
+ .Case("bmi2", X86Features::BMI2)
+ .Default(X86Features::MAX);
+ assert(Feature != X86Features::MAX && "Invalid feature!");
+
+ // Matching the struct layout from the compiler-rt/libgcc structure that is
+ // filled in:
+ // unsigned int __cpu_vendor;
+ // unsigned int __cpu_type;
+ // unsigned int __cpu_subtype;
+ // unsigned int __cpu_features[1];
+ llvm::Type *STy = llvm::StructType::get(
+ Int32Ty, Int32Ty, Int32Ty, llvm::ArrayType::get(Int32Ty, 1), nullptr);
+
+ // Grab the global __cpu_model.
+ llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
+
+ // Grab the first (0th) element from the field __cpu_features off of the
+ // global in the struct STy.
+ Value *Idxs[] = {
+ ConstantInt::get(Int32Ty, 0),
+ ConstantInt::get(Int32Ty, 3),
+ ConstantInt::get(Int32Ty, 0)
+ };
+ Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs);
+ Value *Features = Builder.CreateAlignedLoad(CpuFeatures,
+ CharUnits::fromQuantity(4));
+
+ // Check the value of the bit corresponding to the feature requested.
+ Value *Bitset = Builder.CreateAnd(
+ Features, llvm::ConstantInt::get(Int32Ty, 1 << Feature));
+ return Builder.CreateICmpNE(Bitset, llvm::ConstantInt::get(Int32Ty, 0));
+ }
+ case X86::BI_mm_prefetch: {
+ Value *Address = Ops[0];
+ Value *RW = ConstantInt::get(Int32Ty, 0);
+ Value *Locality = Ops[1];
+ Value *Data = ConstantInt::get(Int32Ty, 1);
+ Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
+ return Builder.CreateCall(F, {Address, RW, Locality, Data});
+ }
+ case X86::BI__builtin_ia32_undef128:
+ case X86::BI__builtin_ia32_undef256:
+ case X86::BI__builtin_ia32_undef512:
+ return UndefValue::get(ConvertType(E->getType()));
+ case X86::BI__builtin_ia32_vec_init_v8qi:
+ case X86::BI__builtin_ia32_vec_init_v4hi:
+ case X86::BI__builtin_ia32_vec_init_v2si:
+ return Builder.CreateBitCast(BuildVector(Ops),
+ llvm::Type::getX86_MMXTy(getLLVMContext()));
+ case X86::BI__builtin_ia32_vec_ext_v2si:
+ return Builder.CreateExtractElement(Ops[0],
+ llvm::ConstantInt::get(Ops[1]->getType(), 0));
+ case X86::BI__builtin_ia32_ldmxcsr: {
+ Address Tmp = CreateMemTemp(E->getArg(0)->getType());
+ Builder.CreateStore(Ops[0], Tmp);
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
+ Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
+ }
+ case X86::BI__builtin_ia32_stmxcsr: {
+ Address Tmp = CreateMemTemp(E->getType());
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
+ Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
+ return Builder.CreateLoad(Tmp, "stmxcsr");
+ }
+ case X86::BI__builtin_ia32_xsave:
+ case X86::BI__builtin_ia32_xsave64:
+ case X86::BI__builtin_ia32_xrstor:
+ case X86::BI__builtin_ia32_xrstor64:
+ case X86::BI__builtin_ia32_xsaveopt:
+ case X86::BI__builtin_ia32_xsaveopt64:
+ case X86::BI__builtin_ia32_xrstors:
+ case X86::BI__builtin_ia32_xrstors64:
+ case X86::BI__builtin_ia32_xsavec:
+ case X86::BI__builtin_ia32_xsavec64:
+ case X86::BI__builtin_ia32_xsaves:
+ case X86::BI__builtin_ia32_xsaves64: {
+ Intrinsic::ID ID;
+#define INTRINSIC_X86_XSAVE_ID(NAME) \
+ case X86::BI__builtin_ia32_##NAME: \
+ ID = Intrinsic::x86_##NAME; \
+ break
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unsupported intrinsic!");
+ INTRINSIC_X86_XSAVE_ID(xsave);
+ INTRINSIC_X86_XSAVE_ID(xsave64);
+ INTRINSIC_X86_XSAVE_ID(xrstor);
+ INTRINSIC_X86_XSAVE_ID(xrstor64);
+ INTRINSIC_X86_XSAVE_ID(xsaveopt);
+ INTRINSIC_X86_XSAVE_ID(xsaveopt64);
+ INTRINSIC_X86_XSAVE_ID(xrstors);
+ INTRINSIC_X86_XSAVE_ID(xrstors64);
+ INTRINSIC_X86_XSAVE_ID(xsavec);
+ INTRINSIC_X86_XSAVE_ID(xsavec64);
+ INTRINSIC_X86_XSAVE_ID(xsaves);
+ INTRINSIC_X86_XSAVE_ID(xsaves64);
+ }
+#undef INTRINSIC_X86_XSAVE_ID
+ Value *Mhi = Builder.CreateTrunc(
+ Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty);
+ Value *Mlo = Builder.CreateTrunc(Ops[1], Int32Ty);
+ Ops[1] = Mhi;
+ Ops.push_back(Mlo);
+ return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
+ }
+ case X86::BI__builtin_ia32_storehps:
+ case X86::BI__builtin_ia32_storelps: {
+ llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty);
+ llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
+
+ // cast val v2i64
+ Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast");
+
+ // extract (0, 1)
+ unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1;
+ llvm::Value *Idx = llvm::ConstantInt::get(SizeTy, Index);
+ Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract");
+
+ // cast pointer to i64 & store
+ Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
+ }
+ case X86::BI__builtin_ia32_palignr128:
+ case X86::BI__builtin_ia32_palignr256: {
+ unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
+
+ unsigned NumElts =
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+ assert(NumElts % 16 == 0);
+ unsigned NumLanes = NumElts / 16;
+ unsigned NumLaneElts = NumElts / NumLanes;
+
+ // If palignr is shifting the pair of vectors more than the size of two
+ // lanes, emit zero.
+ if (ShiftVal >= (2 * NumLaneElts))
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+
+ // If palignr is shifting the pair of input vectors more than one lane,
+ // but less than two lanes, convert to shifting in zeroes.
+ if (ShiftVal > NumLaneElts) {
+ ShiftVal -= NumLaneElts;
+ Ops[1] = Ops[0];
+ Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType());
+ }
+
+ uint32_t Indices[32];
+ // 256-bit palignr operates on 128-bit lanes so we need to handle that
+ for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
+ for (unsigned i = 0; i != NumLaneElts; ++i) {
+ unsigned Idx = ShiftVal + i;
+ if (Idx >= NumLaneElts)
+ Idx += NumElts - NumLaneElts; // End of lane, switch operand.
+ Indices[l + i] = Idx + l;
+ }
+ }
+
+ Value *SV = llvm::ConstantDataVector::get(getLLVMContext(),
+ makeArrayRef(Indices, NumElts));
+ return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
+ }
+ case X86::BI__builtin_ia32_pslldqi256: {
+ // Shift value is in bits so divide by 8.
+ unsigned shiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() >> 3;
+
+ // If pslldq is shifting the vector more than 15 bytes, emit zero.
+ if (shiftVal >= 16)
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+
+ uint32_t Indices[32];
+ // 256-bit pslldq operates on 128-bit lanes so we need to handle that
+ for (unsigned l = 0; l != 32; l += 16) {
+ for (unsigned i = 0; i != 16; ++i) {
+ unsigned Idx = 32 + i - shiftVal;
+ if (Idx < 32) Idx -= 16; // end of lane, switch operand.
+ Indices[l + i] = Idx + l;
+ }
+ }
+
+ llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, 32);
+ Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
+ Value *Zero = llvm::Constant::getNullValue(VecTy);
+
+ Value *SV = llvm::ConstantDataVector::get(getLLVMContext(), Indices);
+ SV = Builder.CreateShuffleVector(Zero, Ops[0], SV, "pslldq");
+ llvm::Type *ResultType = ConvertType(E->getType());
+ return Builder.CreateBitCast(SV, ResultType, "cast");
+ }
+ case X86::BI__builtin_ia32_psrldqi256: {
+ // Shift value is in bits so divide by 8.
+ unsigned shiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() >> 3;
+
+ // If psrldq is shifting the vector more than 15 bytes, emit zero.
+ if (shiftVal >= 16)
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+
+ uint32_t Indices[32];
+ // 256-bit psrldq operates on 128-bit lanes so we need to handle that
+ for (unsigned l = 0; l != 32; l += 16) {
+ for (unsigned i = 0; i != 16; ++i) {
+ unsigned Idx = i + shiftVal;
+ if (Idx >= 16) Idx += 16; // end of lane, switch operand.
+ Indices[l + i] = Idx + l;
+ }
+ }
+
+ llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, 32);
+ Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
+ Value *Zero = llvm::Constant::getNullValue(VecTy);
+
+ Value *SV = llvm::ConstantDataVector::get(getLLVMContext(), Indices);
+ SV = Builder.CreateShuffleVector(Ops[0], Zero, SV, "psrldq");
+ llvm::Type *ResultType = ConvertType(E->getType());
+ return Builder.CreateBitCast(SV, ResultType, "cast");
+ }
+ case X86::BI__builtin_ia32_movntps:
+ case X86::BI__builtin_ia32_movntps256:
+ case X86::BI__builtin_ia32_movntpd:
+ case X86::BI__builtin_ia32_movntpd256:
+ case X86::BI__builtin_ia32_movntdq:
+ case X86::BI__builtin_ia32_movntdq256:
+ case X86::BI__builtin_ia32_movnti:
+ case X86::BI__builtin_ia32_movnti64: {
+ llvm::MDNode *Node = llvm::MDNode::get(
+ getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
+
+ // Convert the type of the pointer to a pointer to the stored type.
+ Value *BC = Builder.CreateBitCast(Ops[0],
+ llvm::PointerType::getUnqual(Ops[1]->getType()),
+ "cast");
+ StoreInst *SI = Builder.CreateDefaultAlignedStore(Ops[1], BC);
+ SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
+
+ // If the operand is an integer, we can't assume alignment. Otherwise,
+ // assume natural alignment.
+ QualType ArgTy = E->getArg(1)->getType();
+ unsigned Align;
+ if (ArgTy->isIntegerType())
+ Align = 1;
+ else
+ Align = getContext().getTypeSizeInChars(ArgTy).getQuantity();
+ SI->setAlignment(Align);
+ return SI;
+ }
+ // 3DNow!
+ case X86::BI__builtin_ia32_pswapdsf:
+ case X86::BI__builtin_ia32_pswapdsi: {
+ llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
+ Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_3dnowa_pswapd);
+ return Builder.CreateCall(F, Ops, "pswapd");
+ }
+ case X86::BI__builtin_ia32_rdrand16_step:
+ case X86::BI__builtin_ia32_rdrand32_step:
+ case X86::BI__builtin_ia32_rdrand64_step:
+ case X86::BI__builtin_ia32_rdseed16_step:
+ case X86::BI__builtin_ia32_rdseed32_step:
+ case X86::BI__builtin_ia32_rdseed64_step: {
+ Intrinsic::ID ID;
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unsupported intrinsic!");
+ case X86::BI__builtin_ia32_rdrand16_step:
+ ID = Intrinsic::x86_rdrand_16;
+ break;
+ case X86::BI__builtin_ia32_rdrand32_step:
+ ID = Intrinsic::x86_rdrand_32;
+ break;
+ case X86::BI__builtin_ia32_rdrand64_step:
+ ID = Intrinsic::x86_rdrand_64;
+ break;
+ case X86::BI__builtin_ia32_rdseed16_step:
+ ID = Intrinsic::x86_rdseed_16;
+ break;
+ case X86::BI__builtin_ia32_rdseed32_step:
+ ID = Intrinsic::x86_rdseed_32;
+ break;
+ case X86::BI__builtin_ia32_rdseed64_step:
+ ID = Intrinsic::x86_rdseed_64;
+ break;
+ }
+
+ Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
+ Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 0),
+ Ops[0]);
+ return Builder.CreateExtractValue(Call, 1);
+ }
+ // SSE comparison intrisics
+ case X86::BI__builtin_ia32_cmpeqps:
+ case X86::BI__builtin_ia32_cmpltps:
+ case X86::BI__builtin_ia32_cmpleps:
+ case X86::BI__builtin_ia32_cmpunordps:
+ case X86::BI__builtin_ia32_cmpneqps:
+ case X86::BI__builtin_ia32_cmpnltps:
+ case X86::BI__builtin_ia32_cmpnleps:
+ case X86::BI__builtin_ia32_cmpordps:
+ case X86::BI__builtin_ia32_cmpeqss:
+ case X86::BI__builtin_ia32_cmpltss:
+ case X86::BI__builtin_ia32_cmpless:
+ case X86::BI__builtin_ia32_cmpunordss:
+ case X86::BI__builtin_ia32_cmpneqss:
+ case X86::BI__builtin_ia32_cmpnltss:
+ case X86::BI__builtin_ia32_cmpnless:
+ case X86::BI__builtin_ia32_cmpordss:
+ case X86::BI__builtin_ia32_cmpeqpd:
+ case X86::BI__builtin_ia32_cmpltpd:
+ case X86::BI__builtin_ia32_cmplepd:
+ case X86::BI__builtin_ia32_cmpunordpd:
+ case X86::BI__builtin_ia32_cmpneqpd:
+ case X86::BI__builtin_ia32_cmpnltpd:
+ case X86::BI__builtin_ia32_cmpnlepd:
+ case X86::BI__builtin_ia32_cmpordpd:
+ case X86::BI__builtin_ia32_cmpeqsd:
+ case X86::BI__builtin_ia32_cmpltsd:
+ case X86::BI__builtin_ia32_cmplesd:
+ case X86::BI__builtin_ia32_cmpunordsd:
+ case X86::BI__builtin_ia32_cmpneqsd:
+ case X86::BI__builtin_ia32_cmpnltsd:
+ case X86::BI__builtin_ia32_cmpnlesd:
+ case X86::BI__builtin_ia32_cmpordsd:
+ // These exist so that the builtin that takes an immediate can be bounds
+ // checked by clang to avoid passing bad immediates to the backend. Since
+ // AVX has a larger immediate than SSE we would need separate builtins to
+ // do the different bounds checking. Rather than create a clang specific
+ // SSE only builtin, this implements eight separate builtins to match gcc
+ // implementation.
+
+ // Choose the immediate.
+ unsigned Imm;
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unsupported intrinsic!");
+ case X86::BI__builtin_ia32_cmpeqps:
+ case X86::BI__builtin_ia32_cmpeqss:
+ case X86::BI__builtin_ia32_cmpeqpd:
+ case X86::BI__builtin_ia32_cmpeqsd:
+ Imm = 0;
+ break;
+ case X86::BI__builtin_ia32_cmpltps:
+ case X86::BI__builtin_ia32_cmpltss:
+ case X86::BI__builtin_ia32_cmpltpd:
+ case X86::BI__builtin_ia32_cmpltsd:
+ Imm = 1;
+ break;
+ case X86::BI__builtin_ia32_cmpleps:
+ case X86::BI__builtin_ia32_cmpless:
+ case X86::BI__builtin_ia32_cmplepd:
+ case X86::BI__builtin_ia32_cmplesd:
+ Imm = 2;
+ break;
+ case X86::BI__builtin_ia32_cmpunordps:
+ case X86::BI__builtin_ia32_cmpunordss:
+ case X86::BI__builtin_ia32_cmpunordpd:
+ case X86::BI__builtin_ia32_cmpunordsd:
+ Imm = 3;
+ break;
+ case X86::BI__builtin_ia32_cmpneqps:
+ case X86::BI__builtin_ia32_cmpneqss:
+ case X86::BI__builtin_ia32_cmpneqpd:
+ case X86::BI__builtin_ia32_cmpneqsd:
+ Imm = 4;
+ break;
+ case X86::BI__builtin_ia32_cmpnltps:
+ case X86::BI__builtin_ia32_cmpnltss:
+ case X86::BI__builtin_ia32_cmpnltpd:
+ case X86::BI__builtin_ia32_cmpnltsd:
+ Imm = 5;
+ break;
+ case X86::BI__builtin_ia32_cmpnleps:
+ case X86::BI__builtin_ia32_cmpnless:
+ case X86::BI__builtin_ia32_cmpnlepd:
+ case X86::BI__builtin_ia32_cmpnlesd:
+ Imm = 6;
+ break;
+ case X86::BI__builtin_ia32_cmpordps:
+ case X86::BI__builtin_ia32_cmpordss:
+ case X86::BI__builtin_ia32_cmpordpd:
+ case X86::BI__builtin_ia32_cmpordsd:
+ Imm = 7;
+ break;
+ }
+
+ // Choose the intrinsic ID.
+ const char *name;
+ Intrinsic::ID ID;
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unsupported intrinsic!");
+ case X86::BI__builtin_ia32_cmpeqps:
+ case X86::BI__builtin_ia32_cmpltps:
+ case X86::BI__builtin_ia32_cmpleps:
+ case X86::BI__builtin_ia32_cmpunordps:
+ case X86::BI__builtin_ia32_cmpneqps:
+ case X86::BI__builtin_ia32_cmpnltps:
+ case X86::BI__builtin_ia32_cmpnleps:
+ case X86::BI__builtin_ia32_cmpordps:
+ name = "cmpps";
+ ID = Intrinsic::x86_sse_cmp_ps;
+ break;
+ case X86::BI__builtin_ia32_cmpeqss:
+ case X86::BI__builtin_ia32_cmpltss:
+ case X86::BI__builtin_ia32_cmpless:
+ case X86::BI__builtin_ia32_cmpunordss:
+ case X86::BI__builtin_ia32_cmpneqss:
+ case X86::BI__builtin_ia32_cmpnltss:
+ case X86::BI__builtin_ia32_cmpnless:
+ case X86::BI__builtin_ia32_cmpordss:
+ name = "cmpss";
+ ID = Intrinsic::x86_sse_cmp_ss;
+ break;
+ case X86::BI__builtin_ia32_cmpeqpd:
+ case X86::BI__builtin_ia32_cmpltpd:
+ case X86::BI__builtin_ia32_cmplepd:
+ case X86::BI__builtin_ia32_cmpunordpd:
+ case X86::BI__builtin_ia32_cmpneqpd:
+ case X86::BI__builtin_ia32_cmpnltpd:
+ case X86::BI__builtin_ia32_cmpnlepd:
+ case X86::BI__builtin_ia32_cmpordpd:
+ name = "cmppd";
+ ID = Intrinsic::x86_sse2_cmp_pd;
+ break;
+ case X86::BI__builtin_ia32_cmpeqsd:
+ case X86::BI__builtin_ia32_cmpltsd:
+ case X86::BI__builtin_ia32_cmplesd:
+ case X86::BI__builtin_ia32_cmpunordsd:
+ case X86::BI__builtin_ia32_cmpneqsd:
+ case X86::BI__builtin_ia32_cmpnltsd:
+ case X86::BI__builtin_ia32_cmpnlesd:
+ case X86::BI__builtin_ia32_cmpordsd:
+ name = "cmpsd";
+ ID = Intrinsic::x86_sse2_cmp_sd;
+ break;
+ }
+
+ Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm));
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, Ops, name);
+ }
+}
+
+
+Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ SmallVector<Value*, 4> Ops;
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+
+ Intrinsic::ID ID = Intrinsic::not_intrinsic;
+
+ switch (BuiltinID) {
+ default: return nullptr;
+
+ // __builtin_ppc_get_timebase is GCC 4.8+'s PowerPC-specific name for what we
+ // call __builtin_readcyclecounter.
+ case PPC::BI__builtin_ppc_get_timebase:
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::readcyclecounter));
+
+ // vec_ld, vec_lvsl, vec_lvsr
+ case PPC::BI__builtin_altivec_lvx:
+ case PPC::BI__builtin_altivec_lvxl:
+ case PPC::BI__builtin_altivec_lvebx:
+ case PPC::BI__builtin_altivec_lvehx:
+ case PPC::BI__builtin_altivec_lvewx:
+ case PPC::BI__builtin_altivec_lvsl:
+ case PPC::BI__builtin_altivec_lvsr:
+ case PPC::BI__builtin_vsx_lxvd2x:
+ case PPC::BI__builtin_vsx_lxvw4x:
+ {
+ Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
+
+ Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
+ Ops.pop_back();
+
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
+ case PPC::BI__builtin_altivec_lvx:
+ ID = Intrinsic::ppc_altivec_lvx;
+ break;
+ case PPC::BI__builtin_altivec_lvxl:
+ ID = Intrinsic::ppc_altivec_lvxl;
+ break;
+ case PPC::BI__builtin_altivec_lvebx:
+ ID = Intrinsic::ppc_altivec_lvebx;
+ break;
+ case PPC::BI__builtin_altivec_lvehx:
+ ID = Intrinsic::ppc_altivec_lvehx;
+ break;
+ case PPC::BI__builtin_altivec_lvewx:
+ ID = Intrinsic::ppc_altivec_lvewx;
+ break;
+ case PPC::BI__builtin_altivec_lvsl:
+ ID = Intrinsic::ppc_altivec_lvsl;
+ break;
+ case PPC::BI__builtin_altivec_lvsr:
+ ID = Intrinsic::ppc_altivec_lvsr;
+ break;
+ case PPC::BI__builtin_vsx_lxvd2x:
+ ID = Intrinsic::ppc_vsx_lxvd2x;
+ break;
+ case PPC::BI__builtin_vsx_lxvw4x:
+ ID = Intrinsic::ppc_vsx_lxvw4x;
+ break;
+ }
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, Ops, "");
+ }
+
+ // vec_st
+ case PPC::BI__builtin_altivec_stvx:
+ case PPC::BI__builtin_altivec_stvxl:
+ case PPC::BI__builtin_altivec_stvebx:
+ case PPC::BI__builtin_altivec_stvehx:
+ case PPC::BI__builtin_altivec_stvewx:
+ case PPC::BI__builtin_vsx_stxvd2x:
+ case PPC::BI__builtin_vsx_stxvw4x:
+ {
+ Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
+ Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
+ Ops.pop_back();
+
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unsupported st intrinsic!");
+ case PPC::BI__builtin_altivec_stvx:
+ ID = Intrinsic::ppc_altivec_stvx;
+ break;
+ case PPC::BI__builtin_altivec_stvxl:
+ ID = Intrinsic::ppc_altivec_stvxl;
+ break;
+ case PPC::BI__builtin_altivec_stvebx:
+ ID = Intrinsic::ppc_altivec_stvebx;
+ break;
+ case PPC::BI__builtin_altivec_stvehx:
+ ID = Intrinsic::ppc_altivec_stvehx;
+ break;
+ case PPC::BI__builtin_altivec_stvewx:
+ ID = Intrinsic::ppc_altivec_stvewx;
+ break;
+ case PPC::BI__builtin_vsx_stxvd2x:
+ ID = Intrinsic::ppc_vsx_stxvd2x;
+ break;
+ case PPC::BI__builtin_vsx_stxvw4x:
+ ID = Intrinsic::ppc_vsx_stxvw4x;
+ break;
+ }
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, Ops, "");
+ }
+ // Square root
+ case PPC::BI__builtin_vsx_xvsqrtsp:
+ case PPC::BI__builtin_vsx_xvsqrtdp: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *X = EmitScalarExpr(E->getArg(0));
+ ID = Intrinsic::sqrt;
+ llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
+ return Builder.CreateCall(F, X);
+ }
+ // Count leading zeros
+ case PPC::BI__builtin_altivec_vclzb:
+ case PPC::BI__builtin_altivec_vclzh:
+ case PPC::BI__builtin_altivec_vclzw:
+ case PPC::BI__builtin_altivec_vclzd: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *X = EmitScalarExpr(E->getArg(0));
+ Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
+ Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
+ return Builder.CreateCall(F, {X, Undef});
+ }
+ // Copy sign
+ case PPC::BI__builtin_vsx_xvcpsgnsp:
+ case PPC::BI__builtin_vsx_xvcpsgndp: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *X = EmitScalarExpr(E->getArg(0));
+ Value *Y = EmitScalarExpr(E->getArg(1));
+ ID = Intrinsic::copysign;
+ llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
+ return Builder.CreateCall(F, {X, Y});
+ }
+ // Rounding/truncation
+ case PPC::BI__builtin_vsx_xvrspip:
+ case PPC::BI__builtin_vsx_xvrdpip:
+ case PPC::BI__builtin_vsx_xvrdpim:
+ case PPC::BI__builtin_vsx_xvrspim:
+ case PPC::BI__builtin_vsx_xvrdpi:
+ case PPC::BI__builtin_vsx_xvrspi:
+ case PPC::BI__builtin_vsx_xvrdpic:
+ case PPC::BI__builtin_vsx_xvrspic:
+ case PPC::BI__builtin_vsx_xvrdpiz:
+ case PPC::BI__builtin_vsx_xvrspiz: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *X = EmitScalarExpr(E->getArg(0));
+ if (BuiltinID == PPC::BI__builtin_vsx_xvrdpim ||
+ BuiltinID == PPC::BI__builtin_vsx_xvrspim)
+ ID = Intrinsic::floor;
+ else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpi ||
+ BuiltinID == PPC::BI__builtin_vsx_xvrspi)
+ ID = Intrinsic::round;
+ else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic ||
+ BuiltinID == PPC::BI__builtin_vsx_xvrspic)
+ ID = Intrinsic::nearbyint;
+ else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip ||
+ BuiltinID == PPC::BI__builtin_vsx_xvrspip)
+ ID = Intrinsic::ceil;
+ else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz ||
+ BuiltinID == PPC::BI__builtin_vsx_xvrspiz)
+ ID = Intrinsic::trunc;
+ llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
+ return Builder.CreateCall(F, X);
+ }
+ // FMA variations
+ case PPC::BI__builtin_vsx_xvmaddadp:
+ case PPC::BI__builtin_vsx_xvmaddasp:
+ case PPC::BI__builtin_vsx_xvnmaddadp:
+ case PPC::BI__builtin_vsx_xvnmaddasp:
+ case PPC::BI__builtin_vsx_xvmsubadp:
+ case PPC::BI__builtin_vsx_xvmsubasp:
+ case PPC::BI__builtin_vsx_xvnmsubadp:
+ case PPC::BI__builtin_vsx_xvnmsubasp: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *X = EmitScalarExpr(E->getArg(0));
+ Value *Y = EmitScalarExpr(E->getArg(1));
+ Value *Z = EmitScalarExpr(E->getArg(2));
+ Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
+ switch (BuiltinID) {
+ case PPC::BI__builtin_vsx_xvmaddadp:
+ case PPC::BI__builtin_vsx_xvmaddasp:
+ return Builder.CreateCall(F, {X, Y, Z});
+ case PPC::BI__builtin_vsx_xvnmaddadp:
+ case PPC::BI__builtin_vsx_xvnmaddasp:
+ return Builder.CreateFSub(Zero,
+ Builder.CreateCall(F, {X, Y, Z}), "sub");
+ case PPC::BI__builtin_vsx_xvmsubadp:
+ case PPC::BI__builtin_vsx_xvmsubasp:
+ return Builder.CreateCall(F,
+ {X, Y, Builder.CreateFSub(Zero, Z, "sub")});
+ case PPC::BI__builtin_vsx_xvnmsubadp:
+ case PPC::BI__builtin_vsx_xvnmsubasp:
+ Value *FsubRes =
+ Builder.CreateCall(F, {X, Y, Builder.CreateFSub(Zero, Z, "sub")});
+ return Builder.CreateFSub(Zero, FsubRes, "sub");
+ }
+ llvm_unreachable("Unknown FMA operation");
+ return nullptr; // Suppress no-return warning
+ }
+ }
+}
+
+// Emit an intrinsic that has 1 float or double.
+static Value *emitUnaryFPBuiltin(CodeGenFunction &CGF,
+ const CallExpr *E,
+ unsigned IntrinsicID) {
+ llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
+
+ Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
+ return CGF.Builder.CreateCall(F, Src0);
+}
+
+// Emit an intrinsic that has 3 float or double operands.
+static Value *emitTernaryFPBuiltin(CodeGenFunction &CGF,
+ const CallExpr *E,
+ unsigned IntrinsicID) {
+ llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
+ llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
+ llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
+
+ Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
+ return CGF.Builder.CreateCall(F, {Src0, Src1, Src2});
+}
+
+// Emit an intrinsic that has 1 float or double operand, and 1 integer.
+static Value *emitFPIntBuiltin(CodeGenFunction &CGF,
+ const CallExpr *E,
+ unsigned IntrinsicID) {
+ llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
+ llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
+
+ Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
+ return CGF.Builder.CreateCall(F, {Src0, Src1});
+}
+
+Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ switch (BuiltinID) {
+ case AMDGPU::BI__builtin_amdgpu_div_scale:
+ case AMDGPU::BI__builtin_amdgpu_div_scalef: {
+ // Translate from the intrinsics's struct return to the builtin's out
+ // argument.
+
+ Address FlagOutPtr = EmitPointerWithAlignment(E->getArg(3));
+
+ llvm::Value *X = EmitScalarExpr(E->getArg(0));
+ llvm::Value *Y = EmitScalarExpr(E->getArg(1));
+ llvm::Value *Z = EmitScalarExpr(E->getArg(2));
+
+ llvm::Value *Callee = CGM.getIntrinsic(Intrinsic::AMDGPU_div_scale,
+ X->getType());
+
+ llvm::Value *Tmp = Builder.CreateCall(Callee, {X, Y, Z});
+
+ llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0);
+ llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1);
+
+ llvm::Type *RealFlagType
+ = FlagOutPtr.getPointer()->getType()->getPointerElementType();
+
+ llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType);
+ Builder.CreateStore(FlagExt, FlagOutPtr);
+ return Result;
+ }
+ case AMDGPU::BI__builtin_amdgpu_div_fmas:
+ case AMDGPU::BI__builtin_amdgpu_div_fmasf: {
+ llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
+ llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
+ llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
+ llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
+
+ llvm::Value *F = CGM.getIntrinsic(Intrinsic::AMDGPU_div_fmas,
+ Src0->getType());
+ llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3);
+ return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool});
+ }
+ case AMDGPU::BI__builtin_amdgpu_div_fixup:
+ case AMDGPU::BI__builtin_amdgpu_div_fixupf:
+ return emitTernaryFPBuiltin(*this, E, Intrinsic::AMDGPU_div_fixup);
+ case AMDGPU::BI__builtin_amdgpu_trig_preop:
+ case AMDGPU::BI__builtin_amdgpu_trig_preopf:
+ return emitFPIntBuiltin(*this, E, Intrinsic::AMDGPU_trig_preop);
+ case AMDGPU::BI__builtin_amdgpu_rcp:
+ case AMDGPU::BI__builtin_amdgpu_rcpf:
+ return emitUnaryFPBuiltin(*this, E, Intrinsic::AMDGPU_rcp);
+ case AMDGPU::BI__builtin_amdgpu_rsq:
+ case AMDGPU::BI__builtin_amdgpu_rsqf:
+ return emitUnaryFPBuiltin(*this, E, Intrinsic::AMDGPU_rsq);
+ case AMDGPU::BI__builtin_amdgpu_rsq_clamped:
+ case AMDGPU::BI__builtin_amdgpu_rsq_clampedf:
+ return emitUnaryFPBuiltin(*this, E, Intrinsic::AMDGPU_rsq_clamped);
+ case AMDGPU::BI__builtin_amdgpu_ldexp:
+ case AMDGPU::BI__builtin_amdgpu_ldexpf:
+ return emitFPIntBuiltin(*this, E, Intrinsic::AMDGPU_ldexp);
+ case AMDGPU::BI__builtin_amdgpu_class:
+ case AMDGPU::BI__builtin_amdgpu_classf:
+ return emitFPIntBuiltin(*this, E, Intrinsic::AMDGPU_class);
+ default:
+ return nullptr;
+ }
+}
+
+/// Handle a SystemZ function in which the final argument is a pointer
+/// to an int that receives the post-instruction CC value. At the LLVM level
+/// this is represented as a function that returns a {result, cc} pair.
+static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF,
+ unsigned IntrinsicID,
+ const CallExpr *E) {
+ unsigned NumArgs = E->getNumArgs() - 1;
+ SmallVector<Value *, 8> Args(NumArgs);
+ for (unsigned I = 0; I < NumArgs; ++I)
+ Args[I] = CGF.EmitScalarExpr(E->getArg(I));
+ Address CCPtr = CGF.EmitPointerWithAlignment(E->getArg(NumArgs));
+ Value *F = CGF.CGM.getIntrinsic(IntrinsicID);
+ Value *Call = CGF.Builder.CreateCall(F, Args);
+ Value *CC = CGF.Builder.CreateExtractValue(Call, 1);
+ CGF.Builder.CreateStore(CC, CCPtr);
+ return CGF.Builder.CreateExtractValue(Call, 0);
+}
+
+Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ switch (BuiltinID) {
+ case SystemZ::BI__builtin_tbegin: {
+ Value *TDB = EmitScalarExpr(E->getArg(0));
+ Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
+ Value *F = CGM.getIntrinsic(Intrinsic::s390_tbegin);
+ return Builder.CreateCall(F, {TDB, Control});
+ }
+ case SystemZ::BI__builtin_tbegin_nofloat: {
+ Value *TDB = EmitScalarExpr(E->getArg(0));
+ Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
+ Value *F = CGM.getIntrinsic(Intrinsic::s390_tbegin_nofloat);
+ return Builder.CreateCall(F, {TDB, Control});
+ }
+ case SystemZ::BI__builtin_tbeginc: {
+ Value *TDB = llvm::ConstantPointerNull::get(Int8PtrTy);
+ Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff08);
+ Value *F = CGM.getIntrinsic(Intrinsic::s390_tbeginc);
+ return Builder.CreateCall(F, {TDB, Control});
+ }
+ case SystemZ::BI__builtin_tabort: {
+ Value *Data = EmitScalarExpr(E->getArg(0));
+ Value *F = CGM.getIntrinsic(Intrinsic::s390_tabort);
+ return Builder.CreateCall(F, Builder.CreateSExt(Data, Int64Ty, "tabort"));
+ }
+ case SystemZ::BI__builtin_non_tx_store: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *Data = EmitScalarExpr(E->getArg(1));
+ Value *F = CGM.getIntrinsic(Intrinsic::s390_ntstg);
+ return Builder.CreateCall(F, {Data, Address});
+ }
+
+ // Vector builtins. Note that most vector builtins are mapped automatically
+ // to target-specific LLVM intrinsics. The ones handled specially here can
+ // be represented via standard LLVM IR, which is preferable to enable common
+ // LLVM optimizations.
+
+ case SystemZ::BI__builtin_s390_vpopctb:
+ case SystemZ::BI__builtin_s390_vpopcth:
+ case SystemZ::BI__builtin_s390_vpopctf:
+ case SystemZ::BI__builtin_s390_vpopctg: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *X = EmitScalarExpr(E->getArg(0));
+ Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
+ return Builder.CreateCall(F, X);
+ }
+
+ case SystemZ::BI__builtin_s390_vclzb:
+ case SystemZ::BI__builtin_s390_vclzh:
+ case SystemZ::BI__builtin_s390_vclzf:
+ case SystemZ::BI__builtin_s390_vclzg: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *X = EmitScalarExpr(E->getArg(0));
+ Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
+ Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
+ return Builder.CreateCall(F, {X, Undef});
+ }
+
+ case SystemZ::BI__builtin_s390_vctzb:
+ case SystemZ::BI__builtin_s390_vctzh:
+ case SystemZ::BI__builtin_s390_vctzf:
+ case SystemZ::BI__builtin_s390_vctzg: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *X = EmitScalarExpr(E->getArg(0));
+ Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
+ Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
+ return Builder.CreateCall(F, {X, Undef});
+ }
+
+ case SystemZ::BI__builtin_s390_vfsqdb: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *X = EmitScalarExpr(E->getArg(0));
+ Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
+ return Builder.CreateCall(F, X);
+ }
+ case SystemZ::BI__builtin_s390_vfmadb: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *X = EmitScalarExpr(E->getArg(0));
+ Value *Y = EmitScalarExpr(E->getArg(1));
+ Value *Z = EmitScalarExpr(E->getArg(2));
+ Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
+ return Builder.CreateCall(F, {X, Y, Z});
+ }
+ case SystemZ::BI__builtin_s390_vfmsdb: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *X = EmitScalarExpr(E->getArg(0));
+ Value *Y = EmitScalarExpr(E->getArg(1));
+ Value *Z = EmitScalarExpr(E->getArg(2));
+ Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
+ Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
+ return Builder.CreateCall(F, {X, Y, Builder.CreateFSub(Zero, Z, "sub")});
+ }
+ case SystemZ::BI__builtin_s390_vflpdb: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *X = EmitScalarExpr(E->getArg(0));
+ Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
+ return Builder.CreateCall(F, X);
+ }
+ case SystemZ::BI__builtin_s390_vflndb: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *X = EmitScalarExpr(E->getArg(0));
+ Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
+ Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
+ return Builder.CreateFSub(Zero, Builder.CreateCall(F, X), "sub");
+ }
+ case SystemZ::BI__builtin_s390_vfidb: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *X = EmitScalarExpr(E->getArg(0));
+ // Constant-fold the M4 and M5 mask arguments.
+ llvm::APSInt M4, M5;
+ bool IsConstM4 = E->getArg(1)->isIntegerConstantExpr(M4, getContext());
+ bool IsConstM5 = E->getArg(2)->isIntegerConstantExpr(M5, getContext());
+ assert(IsConstM4 && IsConstM5 && "Constant arg isn't actually constant?");
+ (void)IsConstM4; (void)IsConstM5;
+ // Check whether this instance of vfidb can be represented via a LLVM
+ // standard intrinsic. We only support some combinations of M4 and M5.
+ Intrinsic::ID ID = Intrinsic::not_intrinsic;
+ switch (M4.getZExtValue()) {
+ default: break;
+ case 0: // IEEE-inexact exception allowed
+ switch (M5.getZExtValue()) {
+ default: break;
+ case 0: ID = Intrinsic::rint; break;
+ }
+ break;
+ case 4: // IEEE-inexact exception suppressed
+ switch (M5.getZExtValue()) {
+ default: break;
+ case 0: ID = Intrinsic::nearbyint; break;
+ case 1: ID = Intrinsic::round; break;
+ case 5: ID = Intrinsic::trunc; break;
+ case 6: ID = Intrinsic::ceil; break;
+ case 7: ID = Intrinsic::floor; break;
+ }
+ break;
+ }
+ if (ID != Intrinsic::not_intrinsic) {
+ Function *F = CGM.getIntrinsic(ID, ResultType);
+ return Builder.CreateCall(F, X);
+ }
+ Function *F = CGM.getIntrinsic(Intrinsic::s390_vfidb);
+ Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
+ Value *M5Value = llvm::ConstantInt::get(getLLVMContext(), M5);
+ return Builder.CreateCall(F, {X, M4Value, M5Value});
+ }
+
+ // Vector intrisincs that output the post-instruction CC value.
+
+#define INTRINSIC_WITH_CC(NAME) \
+ case SystemZ::BI__builtin_##NAME: \
+ return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E)
+
+ INTRINSIC_WITH_CC(s390_vpkshs);
+ INTRINSIC_WITH_CC(s390_vpksfs);
+ INTRINSIC_WITH_CC(s390_vpksgs);
+
+ INTRINSIC_WITH_CC(s390_vpklshs);
+ INTRINSIC_WITH_CC(s390_vpklsfs);
+ INTRINSIC_WITH_CC(s390_vpklsgs);
+
+ INTRINSIC_WITH_CC(s390_vceqbs);
+ INTRINSIC_WITH_CC(s390_vceqhs);
+ INTRINSIC_WITH_CC(s390_vceqfs);
+ INTRINSIC_WITH_CC(s390_vceqgs);
+
+ INTRINSIC_WITH_CC(s390_vchbs);
+ INTRINSIC_WITH_CC(s390_vchhs);
+ INTRINSIC_WITH_CC(s390_vchfs);
+ INTRINSIC_WITH_CC(s390_vchgs);
+
+ INTRINSIC_WITH_CC(s390_vchlbs);
+ INTRINSIC_WITH_CC(s390_vchlhs);
+ INTRINSIC_WITH_CC(s390_vchlfs);
+ INTRINSIC_WITH_CC(s390_vchlgs);
+
+ INTRINSIC_WITH_CC(s390_vfaebs);
+ INTRINSIC_WITH_CC(s390_vfaehs);
+ INTRINSIC_WITH_CC(s390_vfaefs);
+
+ INTRINSIC_WITH_CC(s390_vfaezbs);
+ INTRINSIC_WITH_CC(s390_vfaezhs);
+ INTRINSIC_WITH_CC(s390_vfaezfs);
+
+ INTRINSIC_WITH_CC(s390_vfeebs);
+ INTRINSIC_WITH_CC(s390_vfeehs);
+ INTRINSIC_WITH_CC(s390_vfeefs);
+
+ INTRINSIC_WITH_CC(s390_vfeezbs);
+ INTRINSIC_WITH_CC(s390_vfeezhs);
+ INTRINSIC_WITH_CC(s390_vfeezfs);
+
+ INTRINSIC_WITH_CC(s390_vfenebs);
+ INTRINSIC_WITH_CC(s390_vfenehs);
+ INTRINSIC_WITH_CC(s390_vfenefs);
+
+ INTRINSIC_WITH_CC(s390_vfenezbs);
+ INTRINSIC_WITH_CC(s390_vfenezhs);
+ INTRINSIC_WITH_CC(s390_vfenezfs);
+
+ INTRINSIC_WITH_CC(s390_vistrbs);
+ INTRINSIC_WITH_CC(s390_vistrhs);
+ INTRINSIC_WITH_CC(s390_vistrfs);
+
+ INTRINSIC_WITH_CC(s390_vstrcbs);
+ INTRINSIC_WITH_CC(s390_vstrchs);
+ INTRINSIC_WITH_CC(s390_vstrcfs);
+
+ INTRINSIC_WITH_CC(s390_vstrczbs);
+ INTRINSIC_WITH_CC(s390_vstrczhs);
+ INTRINSIC_WITH_CC(s390_vstrczfs);
+
+ INTRINSIC_WITH_CC(s390_vfcedbs);
+ INTRINSIC_WITH_CC(s390_vfchdbs);
+ INTRINSIC_WITH_CC(s390_vfchedbs);
+
+ INTRINSIC_WITH_CC(s390_vftcidb);
+
+#undef INTRINSIC_WITH_CC
+
+ default:
+ return nullptr;
+ }
+}
+
+Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ switch (BuiltinID) {
+ case NVPTX::BI__nvvm_atom_add_gen_i:
+ case NVPTX::BI__nvvm_atom_add_gen_l:
+ case NVPTX::BI__nvvm_atom_add_gen_ll:
+ return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Add, E);
+
+ case NVPTX::BI__nvvm_atom_sub_gen_i:
+ case NVPTX::BI__nvvm_atom_sub_gen_l:
+ case NVPTX::BI__nvvm_atom_sub_gen_ll:
+ return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Sub, E);
+
+ case NVPTX::BI__nvvm_atom_and_gen_i:
+ case NVPTX::BI__nvvm_atom_and_gen_l:
+ case NVPTX::BI__nvvm_atom_and_gen_ll:
+ return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::And, E);
+
+ case NVPTX::BI__nvvm_atom_or_gen_i:
+ case NVPTX::BI__nvvm_atom_or_gen_l:
+ case NVPTX::BI__nvvm_atom_or_gen_ll:
+ return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Or, E);
+
+ case NVPTX::BI__nvvm_atom_xor_gen_i:
+ case NVPTX::BI__nvvm_atom_xor_gen_l:
+ case NVPTX::BI__nvvm_atom_xor_gen_ll:
+ return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xor, E);
+
+ case NVPTX::BI__nvvm_atom_xchg_gen_i:
+ case NVPTX::BI__nvvm_atom_xchg_gen_l:
+ case NVPTX::BI__nvvm_atom_xchg_gen_ll:
+ return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xchg, E);
+
+ case NVPTX::BI__nvvm_atom_max_gen_i:
+ case NVPTX::BI__nvvm_atom_max_gen_l:
+ case NVPTX::BI__nvvm_atom_max_gen_ll:
+ return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max, E);
+
+ case NVPTX::BI__nvvm_atom_max_gen_ui:
+ case NVPTX::BI__nvvm_atom_max_gen_ul:
+ case NVPTX::BI__nvvm_atom_max_gen_ull:
+ return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMax, E);
+
+ case NVPTX::BI__nvvm_atom_min_gen_i:
+ case NVPTX::BI__nvvm_atom_min_gen_l:
+ case NVPTX::BI__nvvm_atom_min_gen_ll:
+ return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min, E);
+
+ case NVPTX::BI__nvvm_atom_min_gen_ui:
+ case NVPTX::BI__nvvm_atom_min_gen_ul:
+ case NVPTX::BI__nvvm_atom_min_gen_ull:
+ return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMin, E);
+
+ case NVPTX::BI__nvvm_atom_cas_gen_i:
+ case NVPTX::BI__nvvm_atom_cas_gen_l:
+ case NVPTX::BI__nvvm_atom_cas_gen_ll:
+ // __nvvm_atom_cas_gen_* should return the old value rather than the
+ // success flag.
+ return MakeAtomicCmpXchgValue(*this, E, /*ReturnBool=*/false);
+
+ case NVPTX::BI__nvvm_atom_add_gen_f: {
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ Value *Val = EmitScalarExpr(E->getArg(1));
+ // atomicrmw only deals with integer arguments so we need to use
+ // LLVM's nvvm_atomic_load_add_f32 intrinsic for that.
+ Value *FnALAF32 =
+ CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_add_f32, Ptr->getType());
+ return Builder.CreateCall(FnALAF32, {Ptr, Val});
+ }
+
+ default:
+ return nullptr;
+ }
+}
+
+Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ switch (BuiltinID) {
+ case WebAssembly::BI__builtin_wasm_memory_size: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_size, ResultType);
+ return Builder.CreateCall(Callee);
+ }
+ case WebAssembly::BI__builtin_wasm_grow_memory: {
+ Value *X = EmitScalarExpr(E->getArg(0));
+ Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_grow_memory, X->getType());
+ return Builder.CreateCall(Callee, X);
+ }
+
+ default:
+ return nullptr;
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCUDANV.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCUDANV.cpp
new file mode 100644
index 0000000..045e19b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCUDANV.cpp
@@ -0,0 +1,319 @@
+//===----- CGCUDANV.cpp - Interface to NVIDIA CUDA Runtime ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides a class for CUDA code generation targeting the NVIDIA CUDA
+// runtime library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCUDARuntime.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/Decl.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+
+class CGNVCUDARuntime : public CGCUDARuntime {
+
+private:
+ llvm::Type *IntTy, *SizeTy, *VoidTy;
+ llvm::PointerType *CharPtrTy, *VoidPtrTy, *VoidPtrPtrTy;
+
+ /// Convenience reference to LLVM Context
+ llvm::LLVMContext &Context;
+ /// Convenience reference to the current module
+ llvm::Module &TheModule;
+ /// Keeps track of kernel launch stubs emitted in this module
+ llvm::SmallVector<llvm::Function *, 16> EmittedKernels;
+ /// Keeps track of variables containing handles of GPU binaries. Populated by
+ /// ModuleCtorFunction() and used to create corresponding cleanup calls in
+ /// ModuleDtorFunction()
+ llvm::SmallVector<llvm::GlobalVariable *, 16> GpuBinaryHandles;
+
+ llvm::Constant *getSetupArgumentFn() const;
+ llvm::Constant *getLaunchFn() const;
+
+ /// Creates a function to register all kernel stubs generated in this module.
+ llvm::Function *makeRegisterKernelsFn();
+
+ /// Helper function that generates a constant string and returns a pointer to
+ /// the start of the string. The result of this function can be used anywhere
+ /// where the C code specifies const char*.
+ llvm::Constant *makeConstantString(const std::string &Str,
+ const std::string &Name = "",
+ unsigned Alignment = 0) {
+ llvm::Constant *Zeros[] = {llvm::ConstantInt::get(SizeTy, 0),
+ llvm::ConstantInt::get(SizeTy, 0)};
+ auto ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str());
+ return llvm::ConstantExpr::getGetElementPtr(ConstStr.getElementType(),
+ ConstStr.getPointer(), Zeros);
+ }
+
+ void emitDeviceStubBody(CodeGenFunction &CGF, FunctionArgList &Args);
+
+public:
+ CGNVCUDARuntime(CodeGenModule &CGM);
+
+ void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args) override;
+ /// Creates module constructor function
+ llvm::Function *makeModuleCtorFunction() override;
+ /// Creates module destructor function
+ llvm::Function *makeModuleDtorFunction() override;
+};
+
+}
+
+CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
+ : CGCUDARuntime(CGM), Context(CGM.getLLVMContext()),
+ TheModule(CGM.getModule()) {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+
+ IntTy = Types.ConvertType(Ctx.IntTy);
+ SizeTy = Types.ConvertType(Ctx.getSizeType());
+ VoidTy = llvm::Type::getVoidTy(Context);
+
+ CharPtrTy = llvm::PointerType::getUnqual(Types.ConvertType(Ctx.CharTy));
+ VoidPtrTy = cast<llvm::PointerType>(Types.ConvertType(Ctx.VoidPtrTy));
+ VoidPtrPtrTy = VoidPtrTy->getPointerTo();
+}
+
+llvm::Constant *CGNVCUDARuntime::getSetupArgumentFn() const {
+ // cudaError_t cudaSetupArgument(void *, size_t, size_t)
+ std::vector<llvm::Type*> Params;
+ Params.push_back(VoidPtrTy);
+ Params.push_back(SizeTy);
+ Params.push_back(SizeTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(IntTy,
+ Params, false),
+ "cudaSetupArgument");
+}
+
+llvm::Constant *CGNVCUDARuntime::getLaunchFn() const {
+ // cudaError_t cudaLaunch(char *)
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(IntTy, CharPtrTy, false), "cudaLaunch");
+}
+
+void CGNVCUDARuntime::emitDeviceStub(CodeGenFunction &CGF,
+ FunctionArgList &Args) {
+ EmittedKernels.push_back(CGF.CurFn);
+ emitDeviceStubBody(CGF, Args);
+}
+
+void CGNVCUDARuntime::emitDeviceStubBody(CodeGenFunction &CGF,
+ FunctionArgList &Args) {
+ // Build the argument value list and the argument stack struct type.
+ SmallVector<llvm::Value *, 16> ArgValues;
+ std::vector<llvm::Type *> ArgTypes;
+ for (FunctionArgList::const_iterator I = Args.begin(), E = Args.end();
+ I != E; ++I) {
+ llvm::Value *V = CGF.GetAddrOfLocalVar(*I).getPointer();
+ ArgValues.push_back(V);
+ assert(isa<llvm::PointerType>(V->getType()) && "Arg type not PointerType");
+ ArgTypes.push_back(cast<llvm::PointerType>(V->getType())->getElementType());
+ }
+ llvm::StructType *ArgStackTy = llvm::StructType::get(Context, ArgTypes);
+
+ llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
+
+ // Emit the calls to cudaSetupArgument
+ llvm::Constant *cudaSetupArgFn = getSetupArgumentFn();
+ for (unsigned I = 0, E = Args.size(); I != E; ++I) {
+ llvm::Value *Args[3];
+ llvm::BasicBlock *NextBlock = CGF.createBasicBlock("setup.next");
+ Args[0] = CGF.Builder.CreatePointerCast(ArgValues[I], VoidPtrTy);
+ Args[1] = CGF.Builder.CreateIntCast(
+ llvm::ConstantExpr::getSizeOf(ArgTypes[I]),
+ SizeTy, false);
+ Args[2] = CGF.Builder.CreateIntCast(
+ llvm::ConstantExpr::getOffsetOf(ArgStackTy, I),
+ SizeTy, false);
+ llvm::CallSite CS = CGF.EmitRuntimeCallOrInvoke(cudaSetupArgFn, Args);
+ llvm::Constant *Zero = llvm::ConstantInt::get(IntTy, 0);
+ llvm::Value *CSZero = CGF.Builder.CreateICmpEQ(CS.getInstruction(), Zero);
+ CGF.Builder.CreateCondBr(CSZero, NextBlock, EndBlock);
+ CGF.EmitBlock(NextBlock);
+ }
+
+ // Emit the call to cudaLaunch
+ llvm::Constant *cudaLaunchFn = getLaunchFn();
+ llvm::Value *Arg = CGF.Builder.CreatePointerCast(CGF.CurFn, CharPtrTy);
+ CGF.EmitRuntimeCallOrInvoke(cudaLaunchFn, Arg);
+ CGF.EmitBranch(EndBlock);
+
+ CGF.EmitBlock(EndBlock);
+}
+
+/// Creates internal function to register all kernel stubs generated in this
+/// module with the CUDA runtime.
+/// \code
+/// void __cuda_register_kernels(void** GpuBinaryHandle) {
+/// __cudaRegisterFunction(GpuBinaryHandle,Kernel0,...);
+/// ...
+/// __cudaRegisterFunction(GpuBinaryHandle,KernelM,...);
+/// }
+/// \endcode
+llvm::Function *CGNVCUDARuntime::makeRegisterKernelsFn() {
+ llvm::Function *RegisterKernelsFunc = llvm::Function::Create(
+ llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false),
+ llvm::GlobalValue::InternalLinkage, "__cuda_register_kernels", &TheModule);
+ llvm::BasicBlock *EntryBB =
+ llvm::BasicBlock::Create(Context, "entry", RegisterKernelsFunc);
+ CGBuilderTy Builder(CGM, Context);
+ Builder.SetInsertPoint(EntryBB);
+
+ // void __cudaRegisterFunction(void **, const char *, char *, const char *,
+ // int, uint3*, uint3*, dim3*, dim3*, int*)
+ std::vector<llvm::Type *> RegisterFuncParams = {
+ VoidPtrPtrTy, CharPtrTy, CharPtrTy, CharPtrTy, IntTy,
+ VoidPtrTy, VoidPtrTy, VoidPtrTy, VoidPtrTy, IntTy->getPointerTo()};
+ llvm::Constant *RegisterFunc = CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(IntTy, RegisterFuncParams, false),
+ "__cudaRegisterFunction");
+
+ // Extract GpuBinaryHandle passed as the first argument passed to
+ // __cuda_register_kernels() and generate __cudaRegisterFunction() call for
+ // each emitted kernel.
+ llvm::Argument &GpuBinaryHandlePtr = *RegisterKernelsFunc->arg_begin();
+ for (llvm::Function *Kernel : EmittedKernels) {
+ llvm::Constant *KernelName = makeConstantString(Kernel->getName());
+ llvm::Constant *NullPtr = llvm::ConstantPointerNull::get(VoidPtrTy);
+ llvm::Value *args[] = {
+ &GpuBinaryHandlePtr, Builder.CreateBitCast(Kernel, VoidPtrTy),
+ KernelName, KernelName, llvm::ConstantInt::get(IntTy, -1), NullPtr,
+ NullPtr, NullPtr, NullPtr,
+ llvm::ConstantPointerNull::get(IntTy->getPointerTo())};
+ Builder.CreateCall(RegisterFunc, args);
+ }
+
+ Builder.CreateRetVoid();
+ return RegisterKernelsFunc;
+}
+
+/// Creates a global constructor function for the module:
+/// \code
+/// void __cuda_module_ctor(void*) {
+/// Handle0 = __cudaRegisterFatBinary(GpuBinaryBlob0);
+/// __cuda_register_kernels(Handle0);
+/// ...
+/// HandleN = __cudaRegisterFatBinary(GpuBinaryBlobN);
+/// __cuda_register_kernels(HandleN);
+/// }
+/// \endcode
+llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
+ // void __cuda_register_kernels(void* handle);
+ llvm::Function *RegisterKernelsFunc = makeRegisterKernelsFn();
+ // void ** __cudaRegisterFatBinary(void *);
+ llvm::Constant *RegisterFatbinFunc = CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(VoidPtrPtrTy, VoidPtrTy, false),
+ "__cudaRegisterFatBinary");
+ // struct { int magic, int version, void * gpu_binary, void * dont_care };
+ llvm::StructType *FatbinWrapperTy =
+ llvm::StructType::get(IntTy, IntTy, VoidPtrTy, VoidPtrTy, nullptr);
+
+ llvm::Function *ModuleCtorFunc = llvm::Function::Create(
+ llvm::FunctionType::get(VoidTy, VoidPtrTy, false),
+ llvm::GlobalValue::InternalLinkage, "__cuda_module_ctor", &TheModule);
+ llvm::BasicBlock *CtorEntryBB =
+ llvm::BasicBlock::Create(Context, "entry", ModuleCtorFunc);
+ CGBuilderTy CtorBuilder(CGM, Context);
+
+ CtorBuilder.SetInsertPoint(CtorEntryBB);
+
+ // For each GPU binary, register it with the CUDA runtime and store returned
+ // handle in a global variable and save the handle in GpuBinaryHandles vector
+ // to be cleaned up in destructor on exit. Then associate all known kernels
+ // with the GPU binary handle so CUDA runtime can figure out what to call on
+ // the GPU side.
+ for (const std::string &GpuBinaryFileName :
+ CGM.getCodeGenOpts().CudaGpuBinaryFileNames) {
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> GpuBinaryOrErr =
+ llvm::MemoryBuffer::getFileOrSTDIN(GpuBinaryFileName);
+ if (std::error_code EC = GpuBinaryOrErr.getError()) {
+ CGM.getDiags().Report(diag::err_cannot_open_file) << GpuBinaryFileName
+ << EC.message();
+ continue;
+ }
+
+ // Create initialized wrapper structure that points to the loaded GPU binary
+ llvm::Constant *Values[] = {
+ llvm::ConstantInt::get(IntTy, 0x466243b1), // Fatbin wrapper magic.
+ llvm::ConstantInt::get(IntTy, 1), // Fatbin version.
+ makeConstantString(GpuBinaryOrErr.get()->getBuffer(), "", 16), // Data.
+ llvm::ConstantPointerNull::get(VoidPtrTy)}; // Unused in fatbin v1.
+ llvm::GlobalVariable *FatbinWrapper = new llvm::GlobalVariable(
+ TheModule, FatbinWrapperTy, true, llvm::GlobalValue::InternalLinkage,
+ llvm::ConstantStruct::get(FatbinWrapperTy, Values),
+ "__cuda_fatbin_wrapper");
+
+ // GpuBinaryHandle = __cudaRegisterFatBinary(&FatbinWrapper);
+ llvm::CallInst *RegisterFatbinCall = CtorBuilder.CreateCall(
+ RegisterFatbinFunc,
+ CtorBuilder.CreateBitCast(FatbinWrapper, VoidPtrTy));
+ llvm::GlobalVariable *GpuBinaryHandle = new llvm::GlobalVariable(
+ TheModule, VoidPtrPtrTy, false, llvm::GlobalValue::InternalLinkage,
+ llvm::ConstantPointerNull::get(VoidPtrPtrTy), "__cuda_gpubin_handle");
+ CtorBuilder.CreateAlignedStore(RegisterFatbinCall, GpuBinaryHandle,
+ CGM.getPointerAlign());
+
+ // Call __cuda_register_kernels(GpuBinaryHandle);
+ CtorBuilder.CreateCall(RegisterKernelsFunc, RegisterFatbinCall);
+
+ // Save GpuBinaryHandle so we can unregister it in destructor.
+ GpuBinaryHandles.push_back(GpuBinaryHandle);
+ }
+
+ CtorBuilder.CreateRetVoid();
+ return ModuleCtorFunc;
+}
+
+/// Creates a global destructor function that unregisters all GPU code blobs
+/// registered by constructor.
+/// \code
+/// void __cuda_module_dtor(void*) {
+/// __cudaUnregisterFatBinary(Handle0);
+/// ...
+/// __cudaUnregisterFatBinary(HandleN);
+/// }
+/// \endcode
+llvm::Function *CGNVCUDARuntime::makeModuleDtorFunction() {
+ // void __cudaUnregisterFatBinary(void ** handle);
+ llvm::Constant *UnregisterFatbinFunc = CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false),
+ "__cudaUnregisterFatBinary");
+
+ llvm::Function *ModuleDtorFunc = llvm::Function::Create(
+ llvm::FunctionType::get(VoidTy, VoidPtrTy, false),
+ llvm::GlobalValue::InternalLinkage, "__cuda_module_dtor", &TheModule);
+ llvm::BasicBlock *DtorEntryBB =
+ llvm::BasicBlock::Create(Context, "entry", ModuleDtorFunc);
+ CGBuilderTy DtorBuilder(CGM, Context);
+ DtorBuilder.SetInsertPoint(DtorEntryBB);
+
+ for (llvm::GlobalVariable *GpuBinaryHandle : GpuBinaryHandles) {
+ auto HandleValue =
+ DtorBuilder.CreateAlignedLoad(GpuBinaryHandle, CGM.getPointerAlign());
+ DtorBuilder.CreateCall(UnregisterFatbinFunc, HandleValue);
+ }
+
+ DtorBuilder.CreateRetVoid();
+ return ModuleDtorFunc;
+}
+
+CGCUDARuntime *CodeGen::CreateNVCUDARuntime(CodeGenModule &CGM) {
+ return new CGNVCUDARuntime(CGM);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCUDARuntime.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCUDARuntime.cpp
new file mode 100644
index 0000000..014a5db
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCUDARuntime.cpp
@@ -0,0 +1,55 @@
+//===----- CGCUDARuntime.cpp - Interface to CUDA Runtimes -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for CUDA code generation. Concrete
+// subclasses of this implement code generation for specific CUDA
+// runtime libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCUDARuntime.h"
+#include "CGCall.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/ExprCXX.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+CGCUDARuntime::~CGCUDARuntime() {}
+
+RValue CGCUDARuntime::EmitCUDAKernelCallExpr(CodeGenFunction &CGF,
+ const CUDAKernelCallExpr *E,
+ ReturnValueSlot ReturnValue) {
+ llvm::BasicBlock *ConfigOKBlock = CGF.createBasicBlock("kcall.configok");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("kcall.end");
+
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
+ CGF.EmitBranchOnBoolExpr(E->getConfig(), ContBlock, ConfigOKBlock,
+ /*TrueCount=*/0);
+
+ eval.begin(CGF);
+ CGF.EmitBlock(ConfigOKBlock);
+
+ const Decl *TargetDecl = nullptr;
+ if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E->getCallee())) {
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) {
+ TargetDecl = DRE->getDecl();
+ }
+ }
+
+ llvm::Value *Callee = CGF.EmitScalarExpr(E->getCallee());
+ CGF.EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue, TargetDecl);
+ CGF.EmitBranch(ContBlock);
+
+ CGF.EmitBlock(ContBlock);
+ eval.end(CGF);
+
+ return RValue::get(nullptr);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCUDARuntime.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCUDARuntime.h
new file mode 100644
index 0000000..dcacf97
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCUDARuntime.h
@@ -0,0 +1,65 @@
+//===----- CGCUDARuntime.h - Interface to CUDA Runtimes ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for CUDA code generation. Concrete
+// subclasses of this implement code generation for specific CUDA
+// runtime libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGCUDARUNTIME_H
+#define LLVM_CLANG_LIB_CODEGEN_CGCUDARUNTIME_H
+
+namespace llvm {
+class Function;
+}
+
+namespace clang {
+
+class CUDAKernelCallExpr;
+
+namespace CodeGen {
+
+class CodeGenFunction;
+class CodeGenModule;
+class FunctionArgList;
+class ReturnValueSlot;
+class RValue;
+
+class CGCUDARuntime {
+protected:
+ CodeGenModule &CGM;
+
+public:
+ CGCUDARuntime(CodeGenModule &CGM) : CGM(CGM) {}
+ virtual ~CGCUDARuntime();
+
+ virtual RValue EmitCUDAKernelCallExpr(CodeGenFunction &CGF,
+ const CUDAKernelCallExpr *E,
+ ReturnValueSlot ReturnValue);
+
+ /// Emits a kernel launch stub.
+ virtual void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args) = 0;
+
+ /// Constructs and returns a module initialization function or nullptr if it's
+ /// not needed. Must be called after all kernels have been emitted.
+ virtual llvm::Function *makeModuleCtorFunction() = 0;
+
+ /// Returns a module cleanup function or nullptr if it's not needed.
+ /// Must be called after ModuleCtorFunction
+ virtual llvm::Function *makeModuleDtorFunction() = 0;
+};
+
+/// Creates an instance of a CUDA runtime class.
+CGCUDARuntime *CreateNVCUDARuntime(CodeGenModule &CGM);
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp
new file mode 100644
index 0000000..6847df9
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp
@@ -0,0 +1,326 @@
+//===--- CGCXX.cpp - Emit LLVM Code for declarations ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation.
+//
+//===----------------------------------------------------------------------===//
+
+// We might split this into multiple files if it gets too unwieldy
+
+#include "CodeGenModule.h"
+#include "CGCXXABI.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Mangle.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/ADT/StringExtras.h"
+using namespace clang;
+using namespace CodeGen;
+
+
+/// Try to emit a base destructor as an alias to its primary
+/// base-class destructor.
+bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
+ if (!getCodeGenOpts().CXXCtorDtorAliases)
+ return true;
+
+ // Producing an alias to a base class ctor/dtor can degrade debug quality
+ // as the debugger cannot tell them apart.
+ if (getCodeGenOpts().OptimizationLevel == 0)
+ return true;
+
+ // If sanitizing memory to check for use-after-dtor, do not emit as
+ // an alias, unless this class owns no members.
+ if (getCodeGenOpts().SanitizeMemoryUseAfterDtor &&
+ !D->getParent()->field_empty())
+ return true;
+
+ // If the destructor doesn't have a trivial body, we have to emit it
+ // separately.
+ if (!D->hasTrivialBody())
+ return true;
+
+ const CXXRecordDecl *Class = D->getParent();
+
+ // We are going to instrument this destructor, so give up even if it is
+ // currently empty.
+ if (Class->mayInsertExtraPadding())
+ return true;
+
+ // If we need to manipulate a VTT parameter, give up.
+ if (Class->getNumVBases()) {
+ // Extra Credit: passing extra parameters is perfectly safe
+ // in many calling conventions, so only bail out if the ctor's
+ // calling convention is nonstandard.
+ return true;
+ }
+
+ // If any field has a non-trivial destructor, we have to emit the
+ // destructor separately.
+ for (const auto *I : Class->fields())
+ if (I->getType().isDestructedType())
+ return true;
+
+ // Try to find a unique base class with a non-trivial destructor.
+ const CXXRecordDecl *UniqueBase = nullptr;
+ for (const auto &I : Class->bases()) {
+
+ // We're in the base destructor, so skip virtual bases.
+ if (I.isVirtual()) continue;
+
+ // Skip base classes with trivial destructors.
+ const auto *Base =
+ cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
+ if (Base->hasTrivialDestructor()) continue;
+
+ // If we've already found a base class with a non-trivial
+ // destructor, give up.
+ if (UniqueBase) return true;
+ UniqueBase = Base;
+ }
+
+ // If we didn't find any bases with a non-trivial destructor, then
+ // the base destructor is actually effectively trivial, which can
+ // happen if it was needlessly user-defined or if there are virtual
+ // bases with non-trivial destructors.
+ if (!UniqueBase)
+ return true;
+
+ // If the base is at a non-zero offset, give up.
+ const ASTRecordLayout &ClassLayout = Context.getASTRecordLayout(Class);
+ if (!ClassLayout.getBaseClassOffset(UniqueBase).isZero())
+ return true;
+
+ // Give up if the calling conventions don't match. We could update the call,
+ // but it is probably not worth it.
+ const CXXDestructorDecl *BaseD = UniqueBase->getDestructor();
+ if (BaseD->getType()->getAs<FunctionType>()->getCallConv() !=
+ D->getType()->getAs<FunctionType>()->getCallConv())
+ return true;
+
+ return TryEmitDefinitionAsAlias(GlobalDecl(D, Dtor_Base),
+ GlobalDecl(BaseD, Dtor_Base),
+ false);
+}
+
+/// Try to emit a definition as a global alias for another definition.
+/// If \p InEveryTU is true, we know that an equivalent alias can be produced
+/// in every translation unit.
+bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
+ GlobalDecl TargetDecl,
+ bool InEveryTU) {
+ if (!getCodeGenOpts().CXXCtorDtorAliases)
+ return true;
+
+ // The alias will use the linkage of the referent. If we can't
+ // support aliases with that linkage, fail.
+ llvm::GlobalValue::LinkageTypes Linkage = getFunctionLinkage(AliasDecl);
+
+ // We can't use an alias if the linkage is not valid for one.
+ if (!llvm::GlobalAlias::isValidLinkage(Linkage))
+ return true;
+
+ llvm::GlobalValue::LinkageTypes TargetLinkage =
+ getFunctionLinkage(TargetDecl);
+
+ // Check if we have it already.
+ StringRef MangledName = getMangledName(AliasDecl);
+ llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
+ if (Entry && !Entry->isDeclaration())
+ return false;
+ if (Replacements.count(MangledName))
+ return false;
+
+ // Derive the type for the alias.
+ llvm::Type *AliasValueType = getTypes().GetFunctionType(AliasDecl);
+ llvm::PointerType *AliasType = AliasValueType->getPointerTo();
+
+ // Find the referent. Some aliases might require a bitcast, in
+ // which case the caller is responsible for ensuring the soundness
+ // of these semantics.
+ auto *Ref = cast<llvm::GlobalValue>(GetAddrOfGlobal(TargetDecl));
+ llvm::Constant *Aliasee = Ref;
+ if (Ref->getType() != AliasType)
+ Aliasee = llvm::ConstantExpr::getBitCast(Ref, AliasType);
+
+ // Instead of creating as alias to a linkonce_odr, replace all of the uses
+ // of the aliasee.
+ if (llvm::GlobalValue::isDiscardableIfUnused(Linkage) &&
+ (TargetLinkage != llvm::GlobalValue::AvailableExternallyLinkage ||
+ !TargetDecl.getDecl()->hasAttr<AlwaysInlineAttr>())) {
+ // FIXME: An extern template instantiation will create functions with
+ // linkage "AvailableExternally". In libc++, some classes also define
+ // members with attribute "AlwaysInline" and expect no reference to
+ // be generated. It is desirable to reenable this optimisation after
+ // corresponding LLVM changes.
+ Replacements[MangledName] = Aliasee;
+ return false;
+ }
+
+ // If we have a weak, non-discardable alias (weak, weak_odr), like an extern
+ // template instantiation or a dllexported class, avoid forming it on COFF.
+ // A COFF weak external alias cannot satisfy a normal undefined symbol
+ // reference from another TU. The other TU must also mark the referenced
+ // symbol as weak, which we cannot rely on.
+ if (llvm::GlobalValue::isWeakForLinker(Linkage) &&
+ getTriple().isOSBinFormatCOFF()) {
+ return true;
+ }
+
+ if (!InEveryTU) {
+ // If we don't have a definition for the destructor yet, don't
+ // emit. We can't emit aliases to declarations; that's just not
+ // how aliases work.
+ if (Ref->isDeclaration())
+ return true;
+ }
+
+ // Don't create an alias to a linker weak symbol. This avoids producing
+ // different COMDATs in different TUs. Another option would be to
+ // output the alias both for weak_odr and linkonce_odr, but that
+ // requires explicit comdat support in the IL.
+ if (llvm::GlobalValue::isWeakForLinker(TargetLinkage))
+ return true;
+
+ // Create the alias with no name.
+ auto *Alias = llvm::GlobalAlias::create(AliasValueType, 0, Linkage, "",
+ Aliasee, &getModule());
+
+ // Switch any previous uses to the alias.
+ if (Entry) {
+ assert(Entry->getType() == AliasType &&
+ "declaration exists with different type");
+ Alias->takeName(Entry);
+ Entry->replaceAllUsesWith(Alias);
+ Entry->eraseFromParent();
+ } else {
+ Alias->setName(MangledName);
+ }
+
+ // Finally, set up the alias with its proper name and attributes.
+ setAliasAttributes(cast<NamedDecl>(AliasDecl.getDecl()), Alias);
+
+ return false;
+}
+
+llvm::Function *CodeGenModule::codegenCXXStructor(const CXXMethodDecl *MD,
+ StructorType Type) {
+ const CGFunctionInfo &FnInfo =
+ getTypes().arrangeCXXStructorDeclaration(MD, Type);
+ auto *Fn = cast<llvm::Function>(
+ getAddrOfCXXStructor(MD, Type, &FnInfo, /*FnType=*/nullptr,
+ /*DontDefer=*/true, /*IsForDefinition=*/true));
+
+ GlobalDecl GD;
+ if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ GD = GlobalDecl(DD, toCXXDtorType(Type));
+ } else {
+ const auto *CD = cast<CXXConstructorDecl>(MD);
+ GD = GlobalDecl(CD, toCXXCtorType(Type));
+ }
+
+ setFunctionLinkage(GD, Fn);
+ setFunctionDLLStorageClass(GD, Fn);
+
+ CodeGenFunction(*this).GenerateCode(GD, Fn, FnInfo);
+ setFunctionDefinitionAttributes(MD, Fn);
+ SetLLVMFunctionAttributesForDefinition(MD, Fn);
+ return Fn;
+}
+
+llvm::Constant *CodeGenModule::getAddrOfCXXStructor(
+ const CXXMethodDecl *MD, StructorType Type, const CGFunctionInfo *FnInfo,
+ llvm::FunctionType *FnType, bool DontDefer, bool IsForDefinition) {
+ GlobalDecl GD;
+ if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
+ GD = GlobalDecl(CD, toCXXCtorType(Type));
+ } else {
+ GD = GlobalDecl(cast<CXXDestructorDecl>(MD), toCXXDtorType(Type));
+ }
+
+ if (!FnType) {
+ if (!FnInfo)
+ FnInfo = &getTypes().arrangeCXXStructorDeclaration(MD, Type);
+ FnType = getTypes().GetFunctionType(*FnInfo);
+ }
+
+ return GetOrCreateLLVMFunction(
+ getMangledName(GD), FnType, GD, /*ForVTable=*/false, DontDefer,
+ /*isThunk=*/false, /*ExtraAttrs=*/llvm::AttributeSet(), IsForDefinition);
+}
+
+static llvm::Value *BuildAppleKextVirtualCall(CodeGenFunction &CGF,
+ GlobalDecl GD,
+ llvm::Type *Ty,
+ const CXXRecordDecl *RD) {
+ assert(!CGF.CGM.getTarget().getCXXABI().isMicrosoft() &&
+ "No kext in Microsoft ABI");
+ GD = GD.getCanonicalDecl();
+ CodeGenModule &CGM = CGF.CGM;
+ llvm::Value *VTable = CGM.getCXXABI().getAddrOfVTable(RD, CharUnits());
+ Ty = Ty->getPointerTo()->getPointerTo();
+ VTable = CGF.Builder.CreateBitCast(VTable, Ty);
+ assert(VTable && "BuildVirtualCall = kext vtbl pointer is null");
+ uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
+ uint64_t AddressPoint =
+ CGM.getItaniumVTableContext().getVTableLayout(RD)
+ .getAddressPoint(BaseSubobject(RD, CharUnits::Zero()));
+ VTableIndex += AddressPoint;
+ llvm::Value *VFuncPtr =
+ CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt");
+ return CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.PointerAlignInBytes);
+}
+
+/// BuildAppleKextVirtualCall - This routine is to support gcc's kext ABI making
+/// indirect call to virtual functions. It makes the call through indexing
+/// into the vtable.
+llvm::Value *
+CodeGenFunction::BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
+ NestedNameSpecifier *Qual,
+ llvm::Type *Ty) {
+ assert((Qual->getKind() == NestedNameSpecifier::TypeSpec) &&
+ "BuildAppleKextVirtualCall - bad Qual kind");
+
+ const Type *QTy = Qual->getAsType();
+ QualType T = QualType(QTy, 0);
+ const RecordType *RT = T->getAs<RecordType>();
+ assert(RT && "BuildAppleKextVirtualCall - Qual type must be record");
+ const auto *RD = cast<CXXRecordDecl>(RT->getDecl());
+
+ if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD))
+ return BuildAppleKextVirtualDestructorCall(DD, Dtor_Complete, RD);
+
+ return ::BuildAppleKextVirtualCall(*this, MD, Ty, RD);
+}
+
+/// BuildVirtualCall - This routine makes indirect vtable call for
+/// call to virtual destructors. It returns 0 if it could not do it.
+llvm::Value *
+CodeGenFunction::BuildAppleKextVirtualDestructorCall(
+ const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ const CXXRecordDecl *RD) {
+ const auto *MD = cast<CXXMethodDecl>(DD);
+ // FIXME. Dtor_Base dtor is always direct!!
+ // It need be somehow inline expanded into the caller.
+ // -O does that. But need to support -O0 as well.
+ if (MD->isVirtual() && Type != Dtor_Base) {
+ // Compute the function type we're calling.
+ const CGFunctionInfo &FInfo = CGM.getTypes().arrangeCXXStructorDeclaration(
+ DD, StructorType::Complete);
+ llvm::Type *Ty = CGM.getTypes().GetFunctionType(FInfo);
+ return ::BuildAppleKextVirtualCall(*this, GlobalDecl(DD, Type), Ty, RD);
+ }
+ return nullptr;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp
new file mode 100644
index 0000000..e4da447
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp
@@ -0,0 +1,332 @@
+//===----- CGCXXABI.cpp - Interface to C++ ABIs ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for C++ code generation. Concrete subclasses
+// of this implement code generation for specific C++ ABIs.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCXXABI.h"
+#include "CGCleanup.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+CGCXXABI::~CGCXXABI() { }
+
+void CGCXXABI::ErrorUnsupportedABI(CodeGenFunction &CGF, StringRef S) {
+ DiagnosticsEngine &Diags = CGF.CGM.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot yet compile %0 in this ABI");
+ Diags.Report(CGF.getContext().getFullLoc(CGF.CurCodeDecl->getLocation()),
+ DiagID)
+ << S;
+}
+
+bool CGCXXABI::canCopyArgument(const CXXRecordDecl *RD) const {
+ // If RD has a non-trivial move or copy constructor, we cannot copy the
+ // argument.
+ if (RD->hasNonTrivialCopyConstructor() || RD->hasNonTrivialMoveConstructor())
+ return false;
+
+ // If RD has a non-trivial destructor, we cannot copy the argument.
+ if (RD->hasNonTrivialDestructor())
+ return false;
+
+ // We can only copy the argument if there exists at least one trivial,
+ // non-deleted copy or move constructor.
+ // FIXME: This assumes that all lazily declared copy and move constructors are
+ // not deleted. This assumption might not be true in some corner cases.
+ bool CopyDeleted = false;
+ bool MoveDeleted = false;
+ for (const CXXConstructorDecl *CD : RD->ctors()) {
+ if (CD->isCopyConstructor() || CD->isMoveConstructor()) {
+ assert(CD->isTrivial());
+ // We had at least one undeleted trivial copy or move ctor. Return
+ // directly.
+ if (!CD->isDeleted())
+ return true;
+ if (CD->isCopyConstructor())
+ CopyDeleted = true;
+ else
+ MoveDeleted = true;
+ }
+ }
+
+ // If all trivial copy and move constructors are deleted, we cannot copy the
+ // argument.
+ return !(CopyDeleted && MoveDeleted);
+}
+
+llvm::Constant *CGCXXABI::GetBogusMemberPointer(QualType T) {
+ return llvm::Constant::getNullValue(CGM.getTypes().ConvertType(T));
+}
+
+llvm::Type *
+CGCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
+ return CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
+}
+
+llvm::Value *CGCXXABI::EmitLoadOfMemberFunctionPointer(
+ CodeGenFunction &CGF, const Expr *E, Address This,
+ llvm::Value *&ThisPtrForCall,
+ llvm::Value *MemPtr, const MemberPointerType *MPT) {
+ ErrorUnsupportedABI(CGF, "calls through member pointers");
+
+ ThisPtrForCall = This.getPointer();
+ const FunctionProtoType *FPT =
+ MPT->getPointeeType()->getAs<FunctionProtoType>();
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
+ CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
+ return llvm::Constant::getNullValue(FTy->getPointerTo());
+}
+
+llvm::Value *
+CGCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
+ Address Base, llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ ErrorUnsupportedABI(CGF, "loads of member pointers");
+ llvm::Type *Ty = CGF.ConvertType(MPT->getPointeeType())
+ ->getPointerTo(Base.getAddressSpace());
+ return llvm::Constant::getNullValue(Ty);
+}
+
+llvm::Value *CGCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
+ const CastExpr *E,
+ llvm::Value *Src) {
+ ErrorUnsupportedABI(CGF, "member function pointer conversions");
+ return GetBogusMemberPointer(E->getType());
+}
+
+llvm::Constant *CGCXXABI::EmitMemberPointerConversion(const CastExpr *E,
+ llvm::Constant *Src) {
+ return GetBogusMemberPointer(E->getType());
+}
+
+llvm::Value *
+CGCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
+ llvm::Value *L,
+ llvm::Value *R,
+ const MemberPointerType *MPT,
+ bool Inequality) {
+ ErrorUnsupportedABI(CGF, "member function pointer comparison");
+ return CGF.Builder.getFalse();
+}
+
+llvm::Value *
+CGCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ ErrorUnsupportedABI(CGF, "member function pointer null testing");
+ return CGF.Builder.getFalse();
+}
+
+llvm::Constant *
+CGCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
+ return GetBogusMemberPointer(QualType(MPT, 0));
+}
+
+llvm::Constant *CGCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
+ return GetBogusMemberPointer(CGM.getContext().getMemberPointerType(
+ MD->getType(), MD->getParent()->getTypeForDecl()));
+}
+
+llvm::Constant *CGCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
+ CharUnits offset) {
+ return GetBogusMemberPointer(QualType(MPT, 0));
+}
+
+llvm::Constant *CGCXXABI::EmitMemberPointer(const APValue &MP, QualType MPT) {
+ return GetBogusMemberPointer(MPT);
+}
+
+bool CGCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
+ // Fake answer.
+ return true;
+}
+
+void CGCXXABI::buildThisParam(CodeGenFunction &CGF, FunctionArgList &params) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
+
+ // FIXME: I'm not entirely sure I like using a fake decl just for code
+ // generation. Maybe we can come up with a better way?
+ ImplicitParamDecl *ThisDecl
+ = ImplicitParamDecl::Create(CGM.getContext(), nullptr, MD->getLocation(),
+ &CGM.getContext().Idents.get("this"),
+ MD->getThisType(CGM.getContext()));
+ params.push_back(ThisDecl);
+ CGF.CXXABIThisDecl = ThisDecl;
+
+ // Compute the presumed alignment of 'this', which basically comes
+ // down to whether we know it's a complete object or not.
+ auto &Layout = CGF.getContext().getASTRecordLayout(MD->getParent());
+ if (MD->getParent()->getNumVBases() == 0 || // avoid vcall in common case
+ MD->getParent()->hasAttr<FinalAttr>() ||
+ !isThisCompleteObject(CGF.CurGD)) {
+ CGF.CXXABIThisAlignment = Layout.getAlignment();
+ } else {
+ CGF.CXXABIThisAlignment = Layout.getNonVirtualAlignment();
+ }
+}
+
+void CGCXXABI::EmitThisParam(CodeGenFunction &CGF) {
+ /// Initialize the 'this' slot.
+ assert(getThisDecl(CGF) && "no 'this' variable for function");
+ CGF.CXXABIThisValue
+ = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(getThisDecl(CGF)),
+ "this");
+}
+
+void CGCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
+ RValue RV, QualType ResultType) {
+ CGF.EmitReturnOfRValue(RV, ResultType);
+}
+
+CharUnits CGCXXABI::GetArrayCookieSize(const CXXNewExpr *expr) {
+ if (!requiresArrayCookie(expr))
+ return CharUnits::Zero();
+ return getArrayCookieSizeImpl(expr->getAllocatedType());
+}
+
+CharUnits CGCXXABI::getArrayCookieSizeImpl(QualType elementType) {
+ // BOGUS
+ return CharUnits::Zero();
+}
+
+Address CGCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
+ Address NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType) {
+ // Should never be called.
+ ErrorUnsupportedABI(CGF, "array cookie initialization");
+ return Address::invalid();
+}
+
+bool CGCXXABI::requiresArrayCookie(const CXXDeleteExpr *expr,
+ QualType elementType) {
+ // If the class's usual deallocation function takes two arguments,
+ // it needs a cookie.
+ if (expr->doesUsualArrayDeleteWantSize())
+ return true;
+
+ return elementType.isDestructedType();
+}
+
+bool CGCXXABI::requiresArrayCookie(const CXXNewExpr *expr) {
+ // If the class's usual deallocation function takes two arguments,
+ // it needs a cookie.
+ if (expr->doesUsualArrayDeleteWantSize())
+ return true;
+
+ return expr->getAllocatedType().isDestructedType();
+}
+
+void CGCXXABI::ReadArrayCookie(CodeGenFunction &CGF, Address ptr,
+ const CXXDeleteExpr *expr, QualType eltTy,
+ llvm::Value *&numElements,
+ llvm::Value *&allocPtr, CharUnits &cookieSize) {
+ // Derive a char* in the same address space as the pointer.
+ ptr = CGF.Builder.CreateElementBitCast(ptr, CGF.Int8Ty);
+
+ // If we don't need an array cookie, bail out early.
+ if (!requiresArrayCookie(expr, eltTy)) {
+ allocPtr = ptr.getPointer();
+ numElements = nullptr;
+ cookieSize = CharUnits::Zero();
+ return;
+ }
+
+ cookieSize = getArrayCookieSizeImpl(eltTy);
+ Address allocAddr =
+ CGF.Builder.CreateConstInBoundsByteGEP(ptr, -cookieSize);
+ allocPtr = allocAddr.getPointer();
+ numElements = readArrayCookieImpl(CGF, allocAddr, cookieSize);
+}
+
+llvm::Value *CGCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
+ Address ptr,
+ CharUnits cookieSize) {
+ ErrorUnsupportedABI(CGF, "reading a new[] cookie");
+ return llvm::ConstantInt::get(CGF.SizeTy, 0);
+}
+
+/// Returns the adjustment, in bytes, required for the given
+/// member-pointer operation. Returns null if no adjustment is
+/// required.
+llvm::Constant *CGCXXABI::getMemberPointerAdjustment(const CastExpr *E) {
+ assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
+ E->getCastKind() == CK_BaseToDerivedMemberPointer);
+
+ QualType derivedType;
+ if (E->getCastKind() == CK_DerivedToBaseMemberPointer)
+ derivedType = E->getSubExpr()->getType();
+ else
+ derivedType = E->getType();
+
+ const CXXRecordDecl *derivedClass =
+ derivedType->castAs<MemberPointerType>()->getClass()->getAsCXXRecordDecl();
+
+ return CGM.GetNonVirtualBaseClassOffset(derivedClass,
+ E->path_begin(),
+ E->path_end());
+}
+
+CharUnits CGCXXABI::getMemberPointerPathAdjustment(const APValue &MP) {
+ // TODO: Store base specifiers in APValue member pointer paths so we can
+ // easily reuse CGM.GetNonVirtualBaseClassOffset().
+ const ValueDecl *MPD = MP.getMemberPointerDecl();
+ CharUnits ThisAdjustment = CharUnits::Zero();
+ ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath();
+ bool DerivedMember = MP.isMemberPointerToDerivedMember();
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext());
+ for (unsigned I = 0, N = Path.size(); I != N; ++I) {
+ const CXXRecordDecl *Base = RD;
+ const CXXRecordDecl *Derived = Path[I];
+ if (DerivedMember)
+ std::swap(Base, Derived);
+ ThisAdjustment +=
+ getContext().getASTRecordLayout(Derived).getBaseClassOffset(Base);
+ RD = Path[I];
+ }
+ if (DerivedMember)
+ ThisAdjustment = -ThisAdjustment;
+ return ThisAdjustment;
+}
+
+llvm::BasicBlock *
+CGCXXABI::EmitCtorCompleteObjectHandler(CodeGenFunction &CGF,
+ const CXXRecordDecl *RD) {
+ if (CGM.getTarget().getCXXABI().hasConstructorVariants())
+ llvm_unreachable("shouldn't be called in this ABI");
+
+ ErrorUnsupportedABI(CGF, "complete object detection in ctor");
+ return nullptr;
+}
+
+bool CGCXXABI::NeedsVTTParameter(GlobalDecl GD) {
+ return false;
+}
+
+llvm::CallInst *
+CGCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
+ llvm::Value *Exn) {
+ // Just call std::terminate and ignore the violating exception.
+ return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
+}
+
+CatchTypeInfo CGCXXABI::getCatchAllTypeInfo() {
+ return CatchTypeInfo{nullptr, 0};
+}
+
+std::vector<CharUnits> CGCXXABI::getVBPtrOffsets(const CXXRecordDecl *RD) {
+ return std::vector<CharUnits>();
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h
new file mode 100644
index 0000000..3f240b1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h
@@ -0,0 +1,572 @@
+//===----- CGCXXABI.h - Interface to C++ ABIs -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for C++ code generation. Concrete subclasses
+// of this implement code generation for specific C++ ABIs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGCXXABI_H
+#define LLVM_CLANG_LIB_CODEGEN_CGCXXABI_H
+
+#include "CodeGenFunction.h"
+#include "clang/Basic/LLVM.h"
+
+namespace llvm {
+class Constant;
+class Type;
+class Value;
+class CallInst;
+}
+
+namespace clang {
+class CastExpr;
+class CXXConstructorDecl;
+class CXXDestructorDecl;
+class CXXMethodDecl;
+class CXXRecordDecl;
+class FieldDecl;
+class MangleContext;
+
+namespace CodeGen {
+class CodeGenFunction;
+class CodeGenModule;
+struct CatchTypeInfo;
+
+/// \brief Implements C++ ABI-specific code generation functions.
+class CGCXXABI {
+protected:
+ CodeGenModule &CGM;
+ std::unique_ptr<MangleContext> MangleCtx;
+
+ CGCXXABI(CodeGenModule &CGM)
+ : CGM(CGM), MangleCtx(CGM.getContext().createMangleContext()) {}
+
+protected:
+ ImplicitParamDecl *getThisDecl(CodeGenFunction &CGF) {
+ return CGF.CXXABIThisDecl;
+ }
+ llvm::Value *getThisValue(CodeGenFunction &CGF) {
+ return CGF.CXXABIThisValue;
+ }
+ Address getThisAddress(CodeGenFunction &CGF) {
+ return Address(CGF.CXXABIThisValue, CGF.CXXABIThisAlignment);
+ }
+
+ /// Issue a diagnostic about unsupported features in the ABI.
+ void ErrorUnsupportedABI(CodeGenFunction &CGF, StringRef S);
+
+ /// Get a null value for unsupported member pointers.
+ llvm::Constant *GetBogusMemberPointer(QualType T);
+
+ ImplicitParamDecl *&getStructorImplicitParamDecl(CodeGenFunction &CGF) {
+ return CGF.CXXStructorImplicitParamDecl;
+ }
+ llvm::Value *&getStructorImplicitParamValue(CodeGenFunction &CGF) {
+ return CGF.CXXStructorImplicitParamValue;
+ }
+
+ /// Perform prolog initialization of the parameter variable suitable
+ /// for 'this' emitted by buildThisParam.
+ void EmitThisParam(CodeGenFunction &CGF);
+
+ ASTContext &getContext() const { return CGM.getContext(); }
+
+ virtual bool requiresArrayCookie(const CXXDeleteExpr *E, QualType eltType);
+ virtual bool requiresArrayCookie(const CXXNewExpr *E);
+
+ /// Determine whether there's something special about the rules of
+ /// the ABI tell us that 'this' is a complete object within the
+ /// given function. Obvious common logic like being defined on a
+ /// final class will have been taken care of by the caller.
+ virtual bool isThisCompleteObject(GlobalDecl GD) const = 0;
+
+public:
+
+ virtual ~CGCXXABI();
+
+ /// Gets the mangle context.
+ MangleContext &getMangleContext() {
+ return *MangleCtx;
+ }
+
+ /// Returns true if the given constructor or destructor is one of the
+ /// kinds that the ABI says returns 'this' (only applies when called
+ /// non-virtually for destructors).
+ ///
+ /// There currently is no way to indicate if a destructor returns 'this'
+ /// when called virtually, and code generation does not support the case.
+ virtual bool HasThisReturn(GlobalDecl GD) const { return false; }
+
+ virtual bool hasMostDerivedReturn(GlobalDecl GD) const { return false; }
+
+ /// If the C++ ABI requires the given type be returned in a particular way,
+ /// this method sets RetAI and returns true.
+ virtual bool classifyReturnType(CGFunctionInfo &FI) const = 0;
+
+ /// Specify how one should pass an argument of a record type.
+ enum RecordArgABI {
+ /// Pass it using the normal C aggregate rules for the ABI, potentially
+ /// introducing extra copies and passing some or all of it in registers.
+ RAA_Default = 0,
+
+ /// Pass it on the stack using its defined layout. The argument must be
+ /// evaluated directly into the correct stack position in the arguments area,
+ /// and the call machinery must not move it or introduce extra copies.
+ RAA_DirectInMemory,
+
+ /// Pass it as a pointer to temporary memory.
+ RAA_Indirect
+ };
+
+ /// Returns true if C++ allows us to copy the memory of an object of type RD
+ /// when it is passed as an argument.
+ bool canCopyArgument(const CXXRecordDecl *RD) const;
+
+ /// Returns how an argument of the given record type should be passed.
+ virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const = 0;
+
+ /// Returns true if the implicit 'sret' parameter comes after the implicit
+ /// 'this' parameter of C++ instance methods.
+ virtual bool isSRetParameterAfterThis() const { return false; }
+
+ /// Find the LLVM type used to represent the given member pointer
+ /// type.
+ virtual llvm::Type *
+ ConvertMemberPointerType(const MemberPointerType *MPT);
+
+ /// Load a member function from an object and a member function
+ /// pointer. Apply the this-adjustment and set 'This' to the
+ /// adjusted value.
+ virtual llvm::Value *EmitLoadOfMemberFunctionPointer(
+ CodeGenFunction &CGF, const Expr *E, Address This,
+ llvm::Value *&ThisPtrForCall, llvm::Value *MemPtr,
+ const MemberPointerType *MPT);
+
+ /// Calculate an l-value from an object and a data member pointer.
+ virtual llvm::Value *
+ EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
+ Address Base, llvm::Value *MemPtr,
+ const MemberPointerType *MPT);
+
+ /// Perform a derived-to-base, base-to-derived, or bitcast member
+ /// pointer conversion.
+ virtual llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
+ const CastExpr *E,
+ llvm::Value *Src);
+
+ /// Perform a derived-to-base, base-to-derived, or bitcast member
+ /// pointer conversion on a constant value.
+ virtual llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
+ llvm::Constant *Src);
+
+ /// Return true if the given member pointer can be zero-initialized
+ /// (in the C++ sense) with an LLVM zeroinitializer.
+ virtual bool isZeroInitializable(const MemberPointerType *MPT);
+
+ /// Return whether or not a member pointers type is convertible to an IR type.
+ virtual bool isMemberPointerConvertible(const MemberPointerType *MPT) const {
+ return true;
+ }
+
+ /// Create a null member pointer of the given type.
+ virtual llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT);
+
+ /// Create a member pointer for the given method.
+ virtual llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD);
+
+ /// Create a member pointer for the given field.
+ virtual llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
+ CharUnits offset);
+
+ /// Create a member pointer for the given member pointer constant.
+ virtual llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT);
+
+ /// Emit a comparison between two member pointers. Returns an i1.
+ virtual llvm::Value *
+ EmitMemberPointerComparison(CodeGenFunction &CGF,
+ llvm::Value *L,
+ llvm::Value *R,
+ const MemberPointerType *MPT,
+ bool Inequality);
+
+ /// Determine if a member pointer is non-null. Returns an i1.
+ virtual llvm::Value *
+ EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT);
+
+protected:
+ /// A utility method for computing the offset required for the given
+ /// base-to-derived or derived-to-base member-pointer conversion.
+ /// Does not handle virtual conversions (in case we ever fully
+ /// support an ABI that allows this). Returns null if no adjustment
+ /// is required.
+ llvm::Constant *getMemberPointerAdjustment(const CastExpr *E);
+
+ /// \brief Computes the non-virtual adjustment needed for a member pointer
+ /// conversion along an inheritance path stored in an APValue. Unlike
+ /// getMemberPointerAdjustment(), the adjustment can be negative if the path
+ /// is from a derived type to a base type.
+ CharUnits getMemberPointerPathAdjustment(const APValue &MP);
+
+public:
+ virtual void emitVirtualObjectDelete(CodeGenFunction &CGF,
+ const CXXDeleteExpr *DE,
+ Address Ptr, QualType ElementType,
+ const CXXDestructorDecl *Dtor) = 0;
+ virtual void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) = 0;
+ virtual void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) = 0;
+ virtual llvm::GlobalVariable *getThrowInfo(QualType T) { return nullptr; }
+
+ /// \brief Determine whether it's possible to emit a vtable for \p RD, even
+ /// though we do not know that the vtable has been marked as used by semantic
+ /// analysis.
+ virtual bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const = 0;
+
+ virtual void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) = 0;
+
+ virtual llvm::CallInst *
+ emitTerminateForUnexpectedException(CodeGenFunction &CGF,
+ llvm::Value *Exn);
+
+ virtual llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) = 0;
+ virtual CatchTypeInfo
+ getAddrOfCXXCatchHandlerType(QualType Ty, QualType CatchHandlerType) = 0;
+ virtual CatchTypeInfo getCatchAllTypeInfo();
+
+ virtual bool shouldTypeidBeNullChecked(bool IsDeref,
+ QualType SrcRecordTy) = 0;
+ virtual void EmitBadTypeidCall(CodeGenFunction &CGF) = 0;
+ virtual llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
+ Address ThisPtr,
+ llvm::Type *StdTypeInfoPtrTy) = 0;
+
+ virtual bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
+ QualType SrcRecordTy) = 0;
+
+ virtual llvm::Value *
+ EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
+ QualType SrcRecordTy, QualType DestTy,
+ QualType DestRecordTy, llvm::BasicBlock *CastEnd) = 0;
+
+ virtual llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF,
+ Address Value,
+ QualType SrcRecordTy,
+ QualType DestTy) = 0;
+
+ virtual bool EmitBadCastCall(CodeGenFunction &CGF) = 0;
+
+ virtual llvm::Value *GetVirtualBaseClassOffset(CodeGenFunction &CGF,
+ Address This,
+ const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *BaseClassDecl) = 0;
+
+ virtual llvm::BasicBlock *EmitCtorCompleteObjectHandler(CodeGenFunction &CGF,
+ const CXXRecordDecl *RD);
+
+ /// Emit the code to initialize hidden members required
+ /// to handle virtual inheritance, if needed by the ABI.
+ virtual void
+ initializeHiddenVirtualInheritanceMembers(CodeGenFunction &CGF,
+ const CXXRecordDecl *RD) {}
+
+ /// Emit constructor variants required by this ABI.
+ virtual void EmitCXXConstructors(const CXXConstructorDecl *D) = 0;
+
+ /// Build the signature of the given constructor or destructor variant by
+ /// adding any required parameters. For convenience, ArgTys has been
+ /// initialized with the type of 'this'.
+ virtual void buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
+ SmallVectorImpl<CanQualType> &ArgTys) = 0;
+
+ /// Returns true if the given destructor type should be emitted as a linkonce
+ /// delegating thunk, regardless of whether the dtor is defined in this TU or
+ /// not.
+ virtual bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
+ CXXDtorType DT) const = 0;
+
+ /// Emit destructor variants required by this ABI.
+ virtual void EmitCXXDestructors(const CXXDestructorDecl *D) = 0;
+
+ /// Get the type of the implicit "this" parameter used by a method. May return
+ /// zero if no specific type is applicable, e.g. if the ABI expects the "this"
+ /// parameter to point to some artificial offset in a complete object due to
+ /// vbases being reordered.
+ virtual const CXXRecordDecl *
+ getThisArgumentTypeForMethod(const CXXMethodDecl *MD) {
+ return MD->getParent();
+ }
+
+ /// Perform ABI-specific "this" argument adjustment required prior to
+ /// a call of a virtual function.
+ /// The "VirtualCall" argument is true iff the call itself is virtual.
+ virtual Address
+ adjustThisArgumentForVirtualFunctionCall(CodeGenFunction &CGF, GlobalDecl GD,
+ Address This, bool VirtualCall) {
+ return This;
+ }
+
+ /// Build a parameter variable suitable for 'this'.
+ void buildThisParam(CodeGenFunction &CGF, FunctionArgList &Params);
+
+ /// Insert any ABI-specific implicit parameters into the parameter list for a
+ /// function. This generally involves extra data for constructors and
+ /// destructors.
+ ///
+ /// ABIs may also choose to override the return type, which has been
+ /// initialized with the type of 'this' if HasThisReturn(CGF.CurGD) is true or
+ /// the formal return type of the function otherwise.
+ virtual void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
+ FunctionArgList &Params) = 0;
+
+ /// Perform ABI-specific "this" parameter adjustment in a virtual function
+ /// prologue.
+ virtual llvm::Value *adjustThisParameterInVirtualFunctionPrologue(
+ CodeGenFunction &CGF, GlobalDecl GD, llvm::Value *This) {
+ return This;
+ }
+
+ /// Emit the ABI-specific prolog for the function.
+ virtual void EmitInstanceFunctionProlog(CodeGenFunction &CGF) = 0;
+
+ /// Add any ABI-specific implicit arguments needed to call a constructor.
+ ///
+ /// \return The number of args added to the call, which is typically zero or
+ /// one.
+ virtual unsigned
+ addImplicitConstructorArgs(CodeGenFunction &CGF, const CXXConstructorDecl *D,
+ CXXCtorType Type, bool ForVirtualBase,
+ bool Delegating, CallArgList &Args) = 0;
+
+ /// Emit the destructor call.
+ virtual void EmitDestructorCall(CodeGenFunction &CGF,
+ const CXXDestructorDecl *DD, CXXDtorType Type,
+ bool ForVirtualBase, bool Delegating,
+ Address This) = 0;
+
+ /// Emits the VTable definitions required for the given record type.
+ virtual void emitVTableDefinitions(CodeGenVTables &CGVT,
+ const CXXRecordDecl *RD) = 0;
+
+ /// Checks if ABI requires extra virtual offset for vtable field.
+ virtual bool
+ isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
+ CodeGenFunction::VPtr Vptr) = 0;
+
+ /// Checks if ABI requires to initilize vptrs for given dynamic class.
+ virtual bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) = 0;
+
+ /// Get the address point of the vtable for the given base subobject.
+ virtual llvm::Constant *
+ getVTableAddressPoint(BaseSubobject Base,
+ const CXXRecordDecl *VTableClass) = 0;
+
+ /// Get the address point of the vtable for the given base subobject while
+ /// building a constructor or a destructor.
+ virtual llvm::Value *
+ getVTableAddressPointInStructor(CodeGenFunction &CGF, const CXXRecordDecl *RD,
+ BaseSubobject Base,
+ const CXXRecordDecl *NearestVBase) = 0;
+
+ /// Get the address point of the vtable for the given base subobject while
+ /// building a constexpr.
+ virtual llvm::Constant *
+ getVTableAddressPointForConstExpr(BaseSubobject Base,
+ const CXXRecordDecl *VTableClass) = 0;
+
+ /// Get the address of the vtable for the given record decl which should be
+ /// used for the vptr at the given offset in RD.
+ virtual llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
+ CharUnits VPtrOffset) = 0;
+
+ /// Build a virtual function pointer in the ABI-specific way.
+ virtual llvm::Value *getVirtualFunctionPointer(CodeGenFunction &CGF,
+ GlobalDecl GD,
+ Address This,
+ llvm::Type *Ty,
+ SourceLocation Loc) = 0;
+
+ /// Emit the ABI-specific virtual destructor call.
+ virtual llvm::Value *
+ EmitVirtualDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *Dtor,
+ CXXDtorType DtorType, Address This,
+ const CXXMemberCallExpr *CE) = 0;
+
+ virtual void adjustCallArgsForDestructorThunk(CodeGenFunction &CGF,
+ GlobalDecl GD,
+ CallArgList &CallArgs) {}
+
+ /// Emit any tables needed to implement virtual inheritance. For Itanium,
+ /// this emits virtual table tables. For the MSVC++ ABI, this emits virtual
+ /// base tables.
+ virtual void emitVirtualInheritanceTables(const CXXRecordDecl *RD) = 0;
+
+ virtual void setThunkLinkage(llvm::Function *Thunk, bool ForVTable,
+ GlobalDecl GD, bool ReturnAdjustment) = 0;
+
+ virtual llvm::Value *performThisAdjustment(CodeGenFunction &CGF,
+ Address This,
+ const ThisAdjustment &TA) = 0;
+
+ virtual llvm::Value *performReturnAdjustment(CodeGenFunction &CGF,
+ Address Ret,
+ const ReturnAdjustment &RA) = 0;
+
+ virtual void EmitReturnFromThunk(CodeGenFunction &CGF,
+ RValue RV, QualType ResultType);
+
+ virtual size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
+ FunctionArgList &Args) const = 0;
+
+ /// Gets the offsets of all the virtual base pointers in a given class.
+ virtual std::vector<CharUnits> getVBPtrOffsets(const CXXRecordDecl *RD);
+
+ /// Gets the pure virtual member call function.
+ virtual StringRef GetPureVirtualCallName() = 0;
+
+ /// Gets the deleted virtual member call name.
+ virtual StringRef GetDeletedVirtualCallName() = 0;
+
+ /**************************** Array cookies ******************************/
+
+ /// Returns the extra size required in order to store the array
+ /// cookie for the given new-expression. May return 0 to indicate that no
+ /// array cookie is required.
+ ///
+ /// Several cases are filtered out before this method is called:
+ /// - non-array allocations never need a cookie
+ /// - calls to \::operator new(size_t, void*) never need a cookie
+ ///
+ /// \param expr - the new-expression being allocated.
+ virtual CharUnits GetArrayCookieSize(const CXXNewExpr *expr);
+
+ /// Initialize the array cookie for the given allocation.
+ ///
+ /// \param NewPtr - a char* which is the presumed-non-null
+ /// return value of the allocation function
+ /// \param NumElements - the computed number of elements,
+ /// potentially collapsed from the multidimensional array case;
+ /// always a size_t
+ /// \param ElementType - the base element allocated type,
+ /// i.e. the allocated type after stripping all array types
+ virtual Address InitializeArrayCookie(CodeGenFunction &CGF,
+ Address NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType);
+
+ /// Reads the array cookie associated with the given pointer,
+ /// if it has one.
+ ///
+ /// \param Ptr - a pointer to the first element in the array
+ /// \param ElementType - the base element type of elements of the array
+ /// \param NumElements - an out parameter which will be initialized
+ /// with the number of elements allocated, or zero if there is no
+ /// cookie
+ /// \param AllocPtr - an out parameter which will be initialized
+ /// with a char* pointing to the address returned by the allocation
+ /// function
+ /// \param CookieSize - an out parameter which will be initialized
+ /// with the size of the cookie, or zero if there is no cookie
+ virtual void ReadArrayCookie(CodeGenFunction &CGF, Address Ptr,
+ const CXXDeleteExpr *expr,
+ QualType ElementType, llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr, CharUnits &CookieSize);
+
+ /// Return whether the given global decl needs a VTT parameter.
+ virtual bool NeedsVTTParameter(GlobalDecl GD);
+
+protected:
+ /// Returns the extra size required in order to store the array
+ /// cookie for the given type. Assumes that an array cookie is
+ /// required.
+ virtual CharUnits getArrayCookieSizeImpl(QualType elementType);
+
+ /// Reads the array cookie for an allocation which is known to have one.
+ /// This is called by the standard implementation of ReadArrayCookie.
+ ///
+ /// \param ptr - a pointer to the allocation made for an array, as a char*
+ /// \param cookieSize - the computed cookie size of an array
+ ///
+ /// Other parameters are as above.
+ ///
+ /// \return a size_t
+ virtual llvm::Value *readArrayCookieImpl(CodeGenFunction &IGF, Address ptr,
+ CharUnits cookieSize);
+
+public:
+
+ /*************************** Static local guards ****************************/
+
+ /// Emits the guarded initializer and destructor setup for the given
+ /// variable, given that it couldn't be emitted as a constant.
+ /// If \p PerformInit is false, the initialization has been folded to a
+ /// constant and should not be performed.
+ ///
+ /// The variable may be:
+ /// - a static local variable
+ /// - a static data member of a class template instantiation
+ virtual void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::GlobalVariable *DeclPtr,
+ bool PerformInit) = 0;
+
+ /// Emit code to force the execution of a destructor during global
+ /// teardown. The default implementation of this uses atexit.
+ ///
+ /// \param Dtor - a function taking a single pointer argument
+ /// \param Addr - a pointer to pass to the destructor function.
+ virtual void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::Constant *Dtor,
+ llvm::Constant *Addr) = 0;
+
+ /*************************** thread_local initialization ********************/
+
+ /// Emits ABI-required functions necessary to initialize thread_local
+ /// variables in this translation unit.
+ ///
+ /// \param CXXThreadLocals - The thread_local declarations in this translation
+ /// unit.
+ /// \param CXXThreadLocalInits - If this translation unit contains any
+ /// non-constant initialization or non-trivial destruction for
+ /// thread_local variables, a list of functions to perform the
+ /// initialization.
+ virtual void EmitThreadLocalInitFuncs(
+ CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
+ ArrayRef<llvm::Function *> CXXThreadLocalInits,
+ ArrayRef<const VarDecl *> CXXThreadLocalInitVars) = 0;
+
+ // Determine if references to thread_local global variables can be made
+ // directly or require access through a thread wrapper function.
+ virtual bool usesThreadWrapperFunction() const = 0;
+
+ /// Emit a reference to a non-local thread_local variable (including
+ /// triggering the initialization of all thread_local variables in its
+ /// translation unit).
+ virtual LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
+ const VarDecl *VD,
+ QualType LValType) = 0;
+
+ /// Emit a single constructor/destructor with the given type from a C++
+ /// constructor Decl.
+ virtual void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) = 0;
+};
+
+// Create an instance of a C++ ABI class:
+
+/// Creates an Itanium-family ABI.
+CGCXXABI *CreateItaniumCXXABI(CodeGenModule &CGM);
+
+/// Creates a Microsoft-family ABI.
+CGCXXABI *CreateMicrosoftCXXABI(CodeGenModule &CGM);
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
new file mode 100644
index 0000000..9359850
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
@@ -0,0 +1,3687 @@
+//===--- CGCall.cpp - Encapsulate calling convention details --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCall.h"
+#include "ABIInfo.h"
+#include "CGCXXABI.h"
+#include "CGCleanup.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "TargetInfo.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/CodeGen/CGFunctionInfo.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/Transforms/Utils/Local.h"
+using namespace clang;
+using namespace CodeGen;
+
+/***/
+
+static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
+ switch (CC) {
+ default: return llvm::CallingConv::C;
+ case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
+ case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
+ case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
+ case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64;
+ case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
+ case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
+ case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
+ case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
+ // TODO: Add support for __pascal to LLVM.
+ case CC_X86Pascal: return llvm::CallingConv::C;
+ // TODO: Add support for __vectorcall to LLVM.
+ case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
+ case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
+ case CC_SpirKernel: return llvm::CallingConv::SPIR_KERNEL;
+ }
+}
+
+/// Derives the 'this' type for codegen purposes, i.e. ignoring method
+/// qualification.
+/// FIXME: address space qualification?
+static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
+ QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
+ return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
+}
+
+/// Returns the canonical formal type of the given C++ method.
+static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
+ return MD->getType()->getCanonicalTypeUnqualified()
+ .getAs<FunctionProtoType>();
+}
+
+/// Returns the "extra-canonicalized" return type, which discards
+/// qualifiers on the return type. Codegen doesn't care about them,
+/// and it makes ABI code a little easier to be able to assume that
+/// all parameter and return types are top-level unqualified.
+static CanQualType GetReturnType(QualType RetTy) {
+ return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
+}
+
+/// Arrange the argument and result information for a value of the given
+/// unprototyped freestanding function type.
+const CGFunctionInfo &
+CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
+ // When translating an unprototyped function type, always use a
+ // variadic type.
+ return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
+ /*instanceMethod=*/false,
+ /*chainCall=*/false, None,
+ FTNP->getExtInfo(), RequiredArgs(0));
+}
+
+/// Adds the formal paramaters in FPT to the given prefix. If any parameter in
+/// FPT has pass_object_size attrs, then we'll add parameters for those, too.
+static void appendParameterTypes(const CodeGenTypes &CGT,
+ SmallVectorImpl<CanQualType> &prefix,
+ const CanQual<FunctionProtoType> &FPT,
+ const FunctionDecl *FD) {
+ // Fast path: unknown target.
+ if (FD == nullptr) {
+ prefix.append(FPT->param_type_begin(), FPT->param_type_end());
+ return;
+ }
+
+ // In the vast majority cases, we'll have precisely FPT->getNumParams()
+ // parameters; the only thing that can change this is the presence of
+ // pass_object_size. So, we preallocate for the common case.
+ prefix.reserve(prefix.size() + FPT->getNumParams());
+
+ assert(FD->getNumParams() == FPT->getNumParams());
+ for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
+ prefix.push_back(FPT->getParamType(I));
+ if (FD->getParamDecl(I)->hasAttr<PassObjectSizeAttr>())
+ prefix.push_back(CGT.getContext().getSizeType());
+ }
+}
+
+/// Arrange the LLVM function layout for a value of the given function
+/// type, on top of any implicit parameters already stored.
+static const CGFunctionInfo &
+arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
+ SmallVectorImpl<CanQualType> &prefix,
+ CanQual<FunctionProtoType> FTP,
+ const FunctionDecl *FD) {
+ RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
+ // FIXME: Kill copy.
+ appendParameterTypes(CGT, prefix, FTP, FD);
+ CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
+ return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
+ /*chainCall=*/false, prefix,
+ FTP->getExtInfo(), required);
+}
+
+/// Arrange the argument and result information for a value of the
+/// given freestanding function type.
+const CGFunctionInfo &
+CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP,
+ const FunctionDecl *FD) {
+ SmallVector<CanQualType, 16> argTypes;
+ return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
+ FTP, FD);
+}
+
+static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
+ // Set the appropriate calling convention for the Function.
+ if (D->hasAttr<StdCallAttr>())
+ return CC_X86StdCall;
+
+ if (D->hasAttr<FastCallAttr>())
+ return CC_X86FastCall;
+
+ if (D->hasAttr<ThisCallAttr>())
+ return CC_X86ThisCall;
+
+ if (D->hasAttr<VectorCallAttr>())
+ return CC_X86VectorCall;
+
+ if (D->hasAttr<PascalAttr>())
+ return CC_X86Pascal;
+
+ if (PcsAttr *PCS = D->getAttr<PcsAttr>())
+ return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
+
+ if (D->hasAttr<IntelOclBiccAttr>())
+ return CC_IntelOclBicc;
+
+ if (D->hasAttr<MSABIAttr>())
+ return IsWindows ? CC_C : CC_X86_64Win64;
+
+ if (D->hasAttr<SysVABIAttr>())
+ return IsWindows ? CC_X86_64SysV : CC_C;
+
+ return CC_C;
+}
+
+/// Arrange the argument and result information for a call to an
+/// unknown C++ non-static member function of the given abstract type.
+/// (Zero value of RD means we don't have any meaningful "this" argument type,
+/// so fall back to a generic pointer type).
+/// The member function must be an ordinary function, i.e. not a
+/// constructor or destructor.
+const CGFunctionInfo &
+CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
+ const FunctionProtoType *FTP,
+ const CXXMethodDecl *MD) {
+ SmallVector<CanQualType, 16> argTypes;
+
+ // Add the 'this' pointer.
+ if (RD)
+ argTypes.push_back(GetThisType(Context, RD));
+ else
+ argTypes.push_back(Context.VoidPtrTy);
+
+ return ::arrangeLLVMFunctionInfo(
+ *this, true, argTypes,
+ FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>(), MD);
+}
+
+/// Arrange the argument and result information for a declaration or
+/// definition of the given C++ non-static member function. The
+/// member function must be an ordinary function, i.e. not a
+/// constructor or destructor.
+const CGFunctionInfo &
+CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
+ assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
+ assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
+
+ CanQual<FunctionProtoType> prototype = GetFormalType(MD);
+
+ if (MD->isInstance()) {
+ // The abstract case is perfectly fine.
+ const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
+ return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
+ }
+
+ return arrangeFreeFunctionType(prototype, MD);
+}
+
+const CGFunctionInfo &
+CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
+ StructorType Type) {
+
+ SmallVector<CanQualType, 16> argTypes;
+ argTypes.push_back(GetThisType(Context, MD->getParent()));
+
+ GlobalDecl GD;
+ if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
+ GD = GlobalDecl(CD, toCXXCtorType(Type));
+ } else {
+ auto *DD = dyn_cast<CXXDestructorDecl>(MD);
+ GD = GlobalDecl(DD, toCXXDtorType(Type));
+ }
+
+ CanQual<FunctionProtoType> FTP = GetFormalType(MD);
+
+ // Add the formal parameters.
+ appendParameterTypes(*this, argTypes, FTP, MD);
+
+ TheCXXABI.buildStructorSignature(MD, Type, argTypes);
+
+ RequiredArgs required =
+ (MD->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All);
+
+ FunctionType::ExtInfo extInfo = FTP->getExtInfo();
+ CanQualType resultType = TheCXXABI.HasThisReturn(GD)
+ ? argTypes.front()
+ : TheCXXABI.hasMostDerivedReturn(GD)
+ ? CGM.getContext().VoidPtrTy
+ : Context.VoidTy;
+ return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
+ /*chainCall=*/false, argTypes, extInfo,
+ required);
+}
+
+/// Arrange a call to a C++ method, passing the given arguments.
+const CGFunctionInfo &
+CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
+ const CXXConstructorDecl *D,
+ CXXCtorType CtorKind,
+ unsigned ExtraArgs) {
+ // FIXME: Kill copy.
+ SmallVector<CanQualType, 16> ArgTypes;
+ for (const auto &Arg : args)
+ ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
+
+ CanQual<FunctionProtoType> FPT = GetFormalType(D);
+ RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs);
+ GlobalDecl GD(D, CtorKind);
+ CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
+ ? ArgTypes.front()
+ : TheCXXABI.hasMostDerivedReturn(GD)
+ ? CGM.getContext().VoidPtrTy
+ : Context.VoidTy;
+
+ FunctionType::ExtInfo Info = FPT->getExtInfo();
+ return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
+ /*chainCall=*/false, ArgTypes, Info,
+ Required);
+}
+
+/// Arrange the argument and result information for the declaration or
+/// definition of the given function.
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
+ if (MD->isInstance())
+ return arrangeCXXMethodDeclaration(MD);
+
+ CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
+
+ assert(isa<FunctionType>(FTy));
+
+ // When declaring a function without a prototype, always use a
+ // non-variadic type.
+ if (isa<FunctionNoProtoType>(FTy)) {
+ CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
+ return arrangeLLVMFunctionInfo(
+ noProto->getReturnType(), /*instanceMethod=*/false,
+ /*chainCall=*/false, None, noProto->getExtInfo(), RequiredArgs::All);
+ }
+
+ assert(isa<FunctionProtoType>(FTy));
+ return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>(), FD);
+}
+
+/// Arrange the argument and result information for the declaration or
+/// definition of an Objective-C method.
+const CGFunctionInfo &
+CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
+ // It happens that this is the same as a call with no optional
+ // arguments, except also using the formal 'self' type.
+ return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
+}
+
+/// Arrange the argument and result information for the function type
+/// through which to perform a send to the given Objective-C method,
+/// using the given receiver type. The receiver type is not always
+/// the 'self' type of the method or even an Objective-C pointer type.
+/// This is *not* the right method for actually performing such a
+/// message send, due to the possibility of optional arguments.
+const CGFunctionInfo &
+CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
+ QualType receiverType) {
+ SmallVector<CanQualType, 16> argTys;
+ argTys.push_back(Context.getCanonicalParamType(receiverType));
+ argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
+ // FIXME: Kill copy?
+ for (const auto *I : MD->params()) {
+ argTys.push_back(Context.getCanonicalParamType(I->getType()));
+ }
+
+ FunctionType::ExtInfo einfo;
+ bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
+ einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
+
+ if (getContext().getLangOpts().ObjCAutoRefCount &&
+ MD->hasAttr<NSReturnsRetainedAttr>())
+ einfo = einfo.withProducesResult(true);
+
+ RequiredArgs required =
+ (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
+
+ return arrangeLLVMFunctionInfo(
+ GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
+ /*chainCall=*/false, argTys, einfo, required);
+}
+
+const CGFunctionInfo &
+CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
+ // FIXME: Do we need to handle ObjCMethodDecl?
+ const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
+
+ if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
+ return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType()));
+
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
+ return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType()));
+
+ return arrangeFunctionDeclaration(FD);
+}
+
+/// Arrange a thunk that takes 'this' as the first parameter followed by
+/// varargs. Return a void pointer, regardless of the actual return type.
+/// The body of the thunk will end in a musttail call to a function of the
+/// correct type, and the caller will bitcast the function to the correct
+/// prototype.
+const CGFunctionInfo &
+CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) {
+ assert(MD->isVirtual() && "only virtual memptrs have thunks");
+ CanQual<FunctionProtoType> FTP = GetFormalType(MD);
+ CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
+ return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
+ /*chainCall=*/false, ArgTys,
+ FTP->getExtInfo(), RequiredArgs(1));
+}
+
+const CGFunctionInfo &
+CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
+ CXXCtorType CT) {
+ assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
+
+ CanQual<FunctionProtoType> FTP = GetFormalType(CD);
+ SmallVector<CanQualType, 2> ArgTys;
+ const CXXRecordDecl *RD = CD->getParent();
+ ArgTys.push_back(GetThisType(Context, RD));
+ if (CT == Ctor_CopyingClosure)
+ ArgTys.push_back(*FTP->param_type_begin());
+ if (RD->getNumVBases() > 0)
+ ArgTys.push_back(Context.IntTy);
+ CallingConv CC = Context.getDefaultCallingConvention(
+ /*IsVariadic=*/false, /*IsCXXMethod=*/true);
+ return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
+ /*chainCall=*/false, ArgTys,
+ FunctionType::ExtInfo(CC), RequiredArgs::All);
+}
+
+/// Arrange a call as unto a free function, except possibly with an
+/// additional number of formal parameters considered required.
+static const CGFunctionInfo &
+arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
+ CodeGenModule &CGM,
+ const CallArgList &args,
+ const FunctionType *fnType,
+ unsigned numExtraRequiredArgs,
+ bool chainCall) {
+ assert(args.size() >= numExtraRequiredArgs);
+
+ // In most cases, there are no optional arguments.
+ RequiredArgs required = RequiredArgs::All;
+
+ // If we have a variadic prototype, the required arguments are the
+ // extra prefix plus the arguments in the prototype.
+ if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
+ if (proto->isVariadic())
+ required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
+
+ // If we don't have a prototype at all, but we're supposed to
+ // explicitly use the variadic convention for unprototyped calls,
+ // treat all of the arguments as required but preserve the nominal
+ // possibility of variadics.
+ } else if (CGM.getTargetCodeGenInfo()
+ .isNoProtoCallVariadic(args,
+ cast<FunctionNoProtoType>(fnType))) {
+ required = RequiredArgs(args.size());
+ }
+
+ // FIXME: Kill copy.
+ SmallVector<CanQualType, 16> argTypes;
+ for (const auto &arg : args)
+ argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
+ return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
+ /*instanceMethod=*/false, chainCall,
+ argTypes, fnType->getExtInfo(), required);
+}
+
+/// Figure out the rules for calling a function with the given formal
+/// type using the given arguments. The arguments are necessary
+/// because the function might be unprototyped, in which case it's
+/// target-dependent in crazy ways.
+const CGFunctionInfo &
+CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
+ const FunctionType *fnType,
+ bool chainCall) {
+ return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
+ chainCall ? 1 : 0, chainCall);
+}
+
+/// A block function call is essentially a free-function call with an
+/// extra implicit argument.
+const CGFunctionInfo &
+CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
+ const FunctionType *fnType) {
+ return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
+ /*chainCall=*/false);
+}
+
+const CGFunctionInfo &
+CodeGenTypes::arrangeFreeFunctionCall(QualType resultType,
+ const CallArgList &args,
+ FunctionType::ExtInfo info,
+ RequiredArgs required) {
+ // FIXME: Kill copy.
+ SmallVector<CanQualType, 16> argTypes;
+ for (const auto &Arg : args)
+ argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
+ return arrangeLLVMFunctionInfo(
+ GetReturnType(resultType), /*instanceMethod=*/false,
+ /*chainCall=*/false, argTypes, info, required);
+}
+
+/// Arrange a call to a C++ method, passing the given arguments.
+const CGFunctionInfo &
+CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
+ const FunctionProtoType *FPT,
+ RequiredArgs required) {
+ // FIXME: Kill copy.
+ SmallVector<CanQualType, 16> argTypes;
+ for (const auto &Arg : args)
+ argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
+
+ FunctionType::ExtInfo info = FPT->getExtInfo();
+ return arrangeLLVMFunctionInfo(
+ GetReturnType(FPT->getReturnType()), /*instanceMethod=*/true,
+ /*chainCall=*/false, argTypes, info, required);
+}
+
+const CGFunctionInfo &CodeGenTypes::arrangeFreeFunctionDeclaration(
+ QualType resultType, const FunctionArgList &args,
+ const FunctionType::ExtInfo &info, bool isVariadic) {
+ // FIXME: Kill copy.
+ SmallVector<CanQualType, 16> argTypes;
+ for (auto Arg : args)
+ argTypes.push_back(Context.getCanonicalParamType(Arg->getType()));
+
+ RequiredArgs required =
+ (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
+ return arrangeLLVMFunctionInfo(
+ GetReturnType(resultType), /*instanceMethod=*/false,
+ /*chainCall=*/false, argTypes, info, required);
+}
+
+const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
+ return arrangeLLVMFunctionInfo(
+ getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
+ None, FunctionType::ExtInfo(), RequiredArgs::All);
+}
+
+/// Arrange the argument and result information for an abstract value
+/// of a given function type. This is the method which all of the
+/// above functions ultimately defer to.
+const CGFunctionInfo &
+CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
+ bool instanceMethod,
+ bool chainCall,
+ ArrayRef<CanQualType> argTypes,
+ FunctionType::ExtInfo info,
+ RequiredArgs required) {
+ assert(std::all_of(argTypes.begin(), argTypes.end(),
+ std::mem_fun_ref(&CanQualType::isCanonicalAsParam)));
+
+ unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
+
+ // Lookup or create unique function info.
+ llvm::FoldingSetNodeID ID;
+ CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, required,
+ resultType, argTypes);
+
+ void *insertPos = nullptr;
+ CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
+ if (FI)
+ return *FI;
+
+ // Construct the function info. We co-allocate the ArgInfos.
+ FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
+ resultType, argTypes, required);
+ FunctionInfos.InsertNode(FI, insertPos);
+
+ bool inserted = FunctionsBeingProcessed.insert(FI).second;
+ (void)inserted;
+ assert(inserted && "Recursively being processed?");
+
+ // Compute ABI information.
+ getABIInfo().computeInfo(*FI);
+
+ // Loop over all of the computed argument and return value info. If any of
+ // them are direct or extend without a specified coerce type, specify the
+ // default now.
+ ABIArgInfo &retInfo = FI->getReturnInfo();
+ if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
+ retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
+
+ for (auto &I : FI->arguments())
+ if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
+ I.info.setCoerceToType(ConvertType(I.type));
+
+ bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
+ assert(erased && "Not in set?");
+
+ return *FI;
+}
+
+CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
+ bool instanceMethod,
+ bool chainCall,
+ const FunctionType::ExtInfo &info,
+ CanQualType resultType,
+ ArrayRef<CanQualType> argTypes,
+ RequiredArgs required) {
+ void *buffer = operator new(sizeof(CGFunctionInfo) +
+ sizeof(ArgInfo) * (argTypes.size() + 1));
+ CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
+ FI->CallingConvention = llvmCC;
+ FI->EffectiveCallingConvention = llvmCC;
+ FI->ASTCallingConvention = info.getCC();
+ FI->InstanceMethod = instanceMethod;
+ FI->ChainCall = chainCall;
+ FI->NoReturn = info.getNoReturn();
+ FI->ReturnsRetained = info.getProducesResult();
+ FI->Required = required;
+ FI->HasRegParm = info.getHasRegParm();
+ FI->RegParm = info.getRegParm();
+ FI->ArgStruct = nullptr;
+ FI->ArgStructAlign = 0;
+ FI->NumArgs = argTypes.size();
+ FI->getArgsBuffer()[0].type = resultType;
+ for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
+ FI->getArgsBuffer()[i + 1].type = argTypes[i];
+ return FI;
+}
+
+/***/
+
+namespace {
+// ABIArgInfo::Expand implementation.
+
+// Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
+struct TypeExpansion {
+ enum TypeExpansionKind {
+ // Elements of constant arrays are expanded recursively.
+ TEK_ConstantArray,
+ // Record fields are expanded recursively (but if record is a union, only
+ // the field with the largest size is expanded).
+ TEK_Record,
+ // For complex types, real and imaginary parts are expanded recursively.
+ TEK_Complex,
+ // All other types are not expandable.
+ TEK_None
+ };
+
+ const TypeExpansionKind Kind;
+
+ TypeExpansion(TypeExpansionKind K) : Kind(K) {}
+ virtual ~TypeExpansion() {}
+};
+
+struct ConstantArrayExpansion : TypeExpansion {
+ QualType EltTy;
+ uint64_t NumElts;
+
+ ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
+ : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
+ static bool classof(const TypeExpansion *TE) {
+ return TE->Kind == TEK_ConstantArray;
+ }
+};
+
+struct RecordExpansion : TypeExpansion {
+ SmallVector<const CXXBaseSpecifier *, 1> Bases;
+
+ SmallVector<const FieldDecl *, 1> Fields;
+
+ RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
+ SmallVector<const FieldDecl *, 1> &&Fields)
+ : TypeExpansion(TEK_Record), Bases(Bases), Fields(Fields) {}
+ static bool classof(const TypeExpansion *TE) {
+ return TE->Kind == TEK_Record;
+ }
+};
+
+struct ComplexExpansion : TypeExpansion {
+ QualType EltTy;
+
+ ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
+ static bool classof(const TypeExpansion *TE) {
+ return TE->Kind == TEK_Complex;
+ }
+};
+
+struct NoExpansion : TypeExpansion {
+ NoExpansion() : TypeExpansion(TEK_None) {}
+ static bool classof(const TypeExpansion *TE) {
+ return TE->Kind == TEK_None;
+ }
+};
+} // namespace
+
+static std::unique_ptr<TypeExpansion>
+getTypeExpansion(QualType Ty, const ASTContext &Context) {
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
+ return llvm::make_unique<ConstantArrayExpansion>(
+ AT->getElementType(), AT->getSize().getZExtValue());
+ }
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ SmallVector<const CXXBaseSpecifier *, 1> Bases;
+ SmallVector<const FieldDecl *, 1> Fields;
+ const RecordDecl *RD = RT->getDecl();
+ assert(!RD->hasFlexibleArrayMember() &&
+ "Cannot expand structure with flexible array.");
+ if (RD->isUnion()) {
+ // Unions can be here only in degenerative cases - all the fields are same
+ // after flattening. Thus we have to use the "largest" field.
+ const FieldDecl *LargestFD = nullptr;
+ CharUnits UnionSize = CharUnits::Zero();
+
+ for (const auto *FD : RD->fields()) {
+ // Skip zero length bitfields.
+ if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
+ continue;
+ assert(!FD->isBitField() &&
+ "Cannot expand structure with bit-field members.");
+ CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
+ if (UnionSize < FieldSize) {
+ UnionSize = FieldSize;
+ LargestFD = FD;
+ }
+ }
+ if (LargestFD)
+ Fields.push_back(LargestFD);
+ } else {
+ if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ assert(!CXXRD->isDynamicClass() &&
+ "cannot expand vtable pointers in dynamic classes");
+ for (const CXXBaseSpecifier &BS : CXXRD->bases())
+ Bases.push_back(&BS);
+ }
+
+ for (const auto *FD : RD->fields()) {
+ // Skip zero length bitfields.
+ if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
+ continue;
+ assert(!FD->isBitField() &&
+ "Cannot expand structure with bit-field members.");
+ Fields.push_back(FD);
+ }
+ }
+ return llvm::make_unique<RecordExpansion>(std::move(Bases),
+ std::move(Fields));
+ }
+ if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
+ return llvm::make_unique<ComplexExpansion>(CT->getElementType());
+ }
+ return llvm::make_unique<NoExpansion>();
+}
+
+static int getExpansionSize(QualType Ty, const ASTContext &Context) {
+ auto Exp = getTypeExpansion(Ty, Context);
+ if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
+ return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
+ }
+ if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
+ int Res = 0;
+ for (auto BS : RExp->Bases)
+ Res += getExpansionSize(BS->getType(), Context);
+ for (auto FD : RExp->Fields)
+ Res += getExpansionSize(FD->getType(), Context);
+ return Res;
+ }
+ if (isa<ComplexExpansion>(Exp.get()))
+ return 2;
+ assert(isa<NoExpansion>(Exp.get()));
+ return 1;
+}
+
+void
+CodeGenTypes::getExpandedTypes(QualType Ty,
+ SmallVectorImpl<llvm::Type *>::iterator &TI) {
+ auto Exp = getTypeExpansion(Ty, Context);
+ if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
+ for (int i = 0, n = CAExp->NumElts; i < n; i++) {
+ getExpandedTypes(CAExp->EltTy, TI);
+ }
+ } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
+ for (auto BS : RExp->Bases)
+ getExpandedTypes(BS->getType(), TI);
+ for (auto FD : RExp->Fields)
+ getExpandedTypes(FD->getType(), TI);
+ } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
+ llvm::Type *EltTy = ConvertType(CExp->EltTy);
+ *TI++ = EltTy;
+ *TI++ = EltTy;
+ } else {
+ assert(isa<NoExpansion>(Exp.get()));
+ *TI++ = ConvertType(Ty);
+ }
+}
+
+static void forConstantArrayExpansion(CodeGenFunction &CGF,
+ ConstantArrayExpansion *CAE,
+ Address BaseAddr,
+ llvm::function_ref<void(Address)> Fn) {
+ CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
+ CharUnits EltAlign =
+ BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
+
+ for (int i = 0, n = CAE->NumElts; i < n; i++) {
+ llvm::Value *EltAddr =
+ CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
+ Fn(Address(EltAddr, EltAlign));
+ }
+}
+
+void CodeGenFunction::ExpandTypeFromArgs(
+ QualType Ty, LValue LV, SmallVectorImpl<llvm::Argument *>::iterator &AI) {
+ assert(LV.isSimple() &&
+ "Unexpected non-simple lvalue during struct expansion.");
+
+ auto Exp = getTypeExpansion(Ty, getContext());
+ if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
+ forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
+ [&](Address EltAddr) {
+ LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
+ ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
+ });
+ } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
+ Address This = LV.getAddress();
+ for (const CXXBaseSpecifier *BS : RExp->Bases) {
+ // Perform a single step derived-to-base conversion.
+ Address Base =
+ GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
+ /*NullCheckValue=*/false, SourceLocation());
+ LValue SubLV = MakeAddrLValue(Base, BS->getType());
+
+ // Recurse onto bases.
+ ExpandTypeFromArgs(BS->getType(), SubLV, AI);
+ }
+ for (auto FD : RExp->Fields) {
+ // FIXME: What are the right qualifiers here?
+ LValue SubLV = EmitLValueForField(LV, FD);
+ ExpandTypeFromArgs(FD->getType(), SubLV, AI);
+ }
+ } else if (isa<ComplexExpansion>(Exp.get())) {
+ auto realValue = *AI++;
+ auto imagValue = *AI++;
+ EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
+ } else {
+ assert(isa<NoExpansion>(Exp.get()));
+ EmitStoreThroughLValue(RValue::get(*AI++), LV);
+ }
+}
+
+void CodeGenFunction::ExpandTypeToArgs(
+ QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
+ SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
+ auto Exp = getTypeExpansion(Ty, getContext());
+ if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
+ forConstantArrayExpansion(*this, CAExp, RV.getAggregateAddress(),
+ [&](Address EltAddr) {
+ RValue EltRV =
+ convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation());
+ ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos);
+ });
+ } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
+ Address This = RV.getAggregateAddress();
+ for (const CXXBaseSpecifier *BS : RExp->Bases) {
+ // Perform a single step derived-to-base conversion.
+ Address Base =
+ GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
+ /*NullCheckValue=*/false, SourceLocation());
+ RValue BaseRV = RValue::getAggregate(Base);
+
+ // Recurse onto bases.
+ ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs,
+ IRCallArgPos);
+ }
+
+ LValue LV = MakeAddrLValue(This, Ty);
+ for (auto FD : RExp->Fields) {
+ RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
+ ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs,
+ IRCallArgPos);
+ }
+ } else if (isa<ComplexExpansion>(Exp.get())) {
+ ComplexPairTy CV = RV.getComplexVal();
+ IRCallArgs[IRCallArgPos++] = CV.first;
+ IRCallArgs[IRCallArgPos++] = CV.second;
+ } else {
+ assert(isa<NoExpansion>(Exp.get()));
+ assert(RV.isScalar() &&
+ "Unexpected non-scalar rvalue during struct expansion.");
+
+ // Insert a bitcast as needed.
+ llvm::Value *V = RV.getScalarVal();
+ if (IRCallArgPos < IRFuncTy->getNumParams() &&
+ V->getType() != IRFuncTy->getParamType(IRCallArgPos))
+ V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
+
+ IRCallArgs[IRCallArgPos++] = V;
+ }
+}
+
+/// Create a temporary allocation for the purposes of coercion.
+static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
+ CharUnits MinAlign) {
+ // Don't use an alignment that's worse than what LLVM would prefer.
+ auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
+ CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
+
+ return CGF.CreateTempAlloca(Ty, Align);
+}
+
+/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
+/// accessing some number of bytes out of it, try to gep into the struct to get
+/// at its inner goodness. Dive as deep as possible without entering an element
+/// with an in-memory size smaller than DstSize.
+static Address
+EnterStructPointerForCoercedAccess(Address SrcPtr,
+ llvm::StructType *SrcSTy,
+ uint64_t DstSize, CodeGenFunction &CGF) {
+ // We can't dive into a zero-element struct.
+ if (SrcSTy->getNumElements() == 0) return SrcPtr;
+
+ llvm::Type *FirstElt = SrcSTy->getElementType(0);
+
+ // If the first elt is at least as large as what we're looking for, or if the
+ // first element is the same size as the whole struct, we can enter it. The
+ // comparison must be made on the store size and not the alloca size. Using
+ // the alloca size may overstate the size of the load.
+ uint64_t FirstEltSize =
+ CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
+ if (FirstEltSize < DstSize &&
+ FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
+ return SrcPtr;
+
+ // GEP into the first element.
+ SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive");
+
+ // If the first element is a struct, recurse.
+ llvm::Type *SrcTy = SrcPtr.getElementType();
+ if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
+ return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
+
+ return SrcPtr;
+}
+
+/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
+/// are either integers or pointers. This does a truncation of the value if it
+/// is too large or a zero extension if it is too small.
+///
+/// This behaves as if the value were coerced through memory, so on big-endian
+/// targets the high bits are preserved in a truncation, while little-endian
+/// targets preserve the low bits.
+static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
+ llvm::Type *Ty,
+ CodeGenFunction &CGF) {
+ if (Val->getType() == Ty)
+ return Val;
+
+ if (isa<llvm::PointerType>(Val->getType())) {
+ // If this is Pointer->Pointer avoid conversion to and from int.
+ if (isa<llvm::PointerType>(Ty))
+ return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
+
+ // Convert the pointer to an integer so we can play with its width.
+ Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
+ }
+
+ llvm::Type *DestIntTy = Ty;
+ if (isa<llvm::PointerType>(DestIntTy))
+ DestIntTy = CGF.IntPtrTy;
+
+ if (Val->getType() != DestIntTy) {
+ const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
+ if (DL.isBigEndian()) {
+ // Preserve the high bits on big-endian targets.
+ // That is what memory coercion does.
+ uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
+ uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
+
+ if (SrcSize > DstSize) {
+ Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
+ Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
+ } else {
+ Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
+ Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
+ }
+ } else {
+ // Little-endian targets preserve the low bits. No shifts required.
+ Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
+ }
+ }
+
+ if (isa<llvm::PointerType>(Ty))
+ Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
+ return Val;
+}
+
+
+
+/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
+/// a pointer to an object of type \arg Ty, known to be aligned to
+/// \arg SrcAlign bytes.
+///
+/// This safely handles the case when the src type is smaller than the
+/// destination type; in this situation the values of bits which not
+/// present in the src are undefined.
+static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
+ CodeGenFunction &CGF) {
+ llvm::Type *SrcTy = Src.getElementType();
+
+ // If SrcTy and Ty are the same, just do a load.
+ if (SrcTy == Ty)
+ return CGF.Builder.CreateLoad(Src);
+
+ uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
+
+ if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
+ Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
+ SrcTy = Src.getType()->getElementType();
+ }
+
+ uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
+
+ // If the source and destination are integer or pointer types, just do an
+ // extension or truncation to the desired type.
+ if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
+ (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
+ llvm::Value *Load = CGF.Builder.CreateLoad(Src);
+ return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
+ }
+
+ // If load is legal, just bitcast the src pointer.
+ if (SrcSize >= DstSize) {
+ // Generally SrcSize is never greater than DstSize, since this means we are
+ // losing bits. However, this can happen in cases where the structure has
+ // additional padding, for example due to a user specified alignment.
+ //
+ // FIXME: Assert that we aren't truncating non-padding bits when have access
+ // to that information.
+ Src = CGF.Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(Ty));
+ return CGF.Builder.CreateLoad(Src);
+ }
+
+ // Otherwise do coercion through memory. This is stupid, but simple.
+ Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
+ Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
+ Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.Int8PtrTy);
+ CGF.Builder.CreateMemCpy(Casted, SrcCasted,
+ llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
+ false);
+ return CGF.Builder.CreateLoad(Tmp);
+}
+
+// Function to store a first-class aggregate into memory. We prefer to
+// store the elements rather than the aggregate to be more friendly to
+// fast-isel.
+// FIXME: Do we need to recurse here?
+static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
+ Address Dest, bool DestIsVolatile) {
+ // Prefer scalar stores to first-class aggregate stores.
+ if (llvm::StructType *STy =
+ dyn_cast<llvm::StructType>(Val->getType())) {
+ const llvm::StructLayout *Layout =
+ CGF.CGM.getDataLayout().getStructLayout(STy);
+
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i));
+ Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset);
+ llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
+ CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
+ }
+ } else {
+ CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
+ }
+}
+
+/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
+/// where the source and destination may have different types. The
+/// destination is known to be aligned to \arg DstAlign bytes.
+///
+/// This safely handles the case when the src type is larger than the
+/// destination type; the upper bits of the src will be lost.
+static void CreateCoercedStore(llvm::Value *Src,
+ Address Dst,
+ bool DstIsVolatile,
+ CodeGenFunction &CGF) {
+ llvm::Type *SrcTy = Src->getType();
+ llvm::Type *DstTy = Dst.getType()->getElementType();
+ if (SrcTy == DstTy) {
+ CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
+ return;
+ }
+
+ uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
+
+ if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
+ Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
+ DstTy = Dst.getType()->getElementType();
+ }
+
+ // If the source and destination are integer or pointer types, just do an
+ // extension or truncation to the desired type.
+ if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
+ (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
+ Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
+ CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
+ return;
+ }
+
+ uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
+
+ // If store is legal, just bitcast the src pointer.
+ if (SrcSize <= DstSize) {
+ Dst = CGF.Builder.CreateBitCast(Dst, llvm::PointerType::getUnqual(SrcTy));
+ BuildAggStore(CGF, Src, Dst, DstIsVolatile);
+ } else {
+ // Otherwise do coercion through memory. This is stupid, but
+ // simple.
+
+ // Generally SrcSize is never greater than DstSize, since this means we are
+ // losing bits. However, this can happen in cases where the structure has
+ // additional padding, for example due to a user specified alignment.
+ //
+ // FIXME: Assert that we aren't truncating non-padding bits when have access
+ // to that information.
+ Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
+ CGF.Builder.CreateStore(Src, Tmp);
+ Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
+ Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.Int8PtrTy);
+ CGF.Builder.CreateMemCpy(DstCasted, Casted,
+ llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
+ false);
+ }
+}
+
+static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
+ const ABIArgInfo &info) {
+ if (unsigned offset = info.getDirectOffset()) {
+ addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
+ addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
+ CharUnits::fromQuantity(offset));
+ addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
+ }
+ return addr;
+}
+
+namespace {
+
+/// Encapsulates information about the way function arguments from
+/// CGFunctionInfo should be passed to actual LLVM IR function.
+class ClangToLLVMArgMapping {
+ static const unsigned InvalidIndex = ~0U;
+ unsigned InallocaArgNo;
+ unsigned SRetArgNo;
+ unsigned TotalIRArgs;
+
+ /// Arguments of LLVM IR function corresponding to single Clang argument.
+ struct IRArgs {
+ unsigned PaddingArgIndex;
+ // Argument is expanded to IR arguments at positions
+ // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
+ unsigned FirstArgIndex;
+ unsigned NumberOfArgs;
+
+ IRArgs()
+ : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
+ NumberOfArgs(0) {}
+ };
+
+ SmallVector<IRArgs, 8> ArgInfo;
+
+public:
+ ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
+ bool OnlyRequiredArgs = false)
+ : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
+ ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
+ construct(Context, FI, OnlyRequiredArgs);
+ }
+
+ bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
+ unsigned getInallocaArgNo() const {
+ assert(hasInallocaArg());
+ return InallocaArgNo;
+ }
+
+ bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
+ unsigned getSRetArgNo() const {
+ assert(hasSRetArg());
+ return SRetArgNo;
+ }
+
+ unsigned totalIRArgs() const { return TotalIRArgs; }
+
+ bool hasPaddingArg(unsigned ArgNo) const {
+ assert(ArgNo < ArgInfo.size());
+ return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
+ }
+ unsigned getPaddingArgNo(unsigned ArgNo) const {
+ assert(hasPaddingArg(ArgNo));
+ return ArgInfo[ArgNo].PaddingArgIndex;
+ }
+
+ /// Returns index of first IR argument corresponding to ArgNo, and their
+ /// quantity.
+ std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
+ assert(ArgNo < ArgInfo.size());
+ return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
+ ArgInfo[ArgNo].NumberOfArgs);
+ }
+
+private:
+ void construct(const ASTContext &Context, const CGFunctionInfo &FI,
+ bool OnlyRequiredArgs);
+};
+
+void ClangToLLVMArgMapping::construct(const ASTContext &Context,
+ const CGFunctionInfo &FI,
+ bool OnlyRequiredArgs) {
+ unsigned IRArgNo = 0;
+ bool SwapThisWithSRet = false;
+ const ABIArgInfo &RetAI = FI.getReturnInfo();
+
+ if (RetAI.getKind() == ABIArgInfo::Indirect) {
+ SwapThisWithSRet = RetAI.isSRetAfterThis();
+ SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
+ }
+
+ unsigned ArgNo = 0;
+ unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
+ for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
+ ++I, ++ArgNo) {
+ assert(I != FI.arg_end());
+ QualType ArgType = I->type;
+ const ABIArgInfo &AI = I->info;
+ // Collect data about IR arguments corresponding to Clang argument ArgNo.
+ auto &IRArgs = ArgInfo[ArgNo];
+
+ if (AI.getPaddingType())
+ IRArgs.PaddingArgIndex = IRArgNo++;
+
+ switch (AI.getKind()) {
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ // FIXME: handle sseregparm someday...
+ llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
+ if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
+ IRArgs.NumberOfArgs = STy->getNumElements();
+ } else {
+ IRArgs.NumberOfArgs = 1;
+ }
+ break;
+ }
+ case ABIArgInfo::Indirect:
+ IRArgs.NumberOfArgs = 1;
+ break;
+ case ABIArgInfo::Ignore:
+ case ABIArgInfo::InAlloca:
+ // ignore and inalloca doesn't have matching LLVM parameters.
+ IRArgs.NumberOfArgs = 0;
+ break;
+ case ABIArgInfo::Expand: {
+ IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
+ break;
+ }
+ }
+
+ if (IRArgs.NumberOfArgs > 0) {
+ IRArgs.FirstArgIndex = IRArgNo;
+ IRArgNo += IRArgs.NumberOfArgs;
+ }
+
+ // Skip over the sret parameter when it comes second. We already handled it
+ // above.
+ if (IRArgNo == 1 && SwapThisWithSRet)
+ IRArgNo++;
+ }
+ assert(ArgNo == ArgInfo.size());
+
+ if (FI.usesInAlloca())
+ InallocaArgNo = IRArgNo++;
+
+ TotalIRArgs = IRArgNo;
+}
+} // namespace
+
+/***/
+
+bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
+ return FI.getReturnInfo().isIndirect();
+}
+
+bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
+ return ReturnTypeUsesSRet(FI) &&
+ getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
+}
+
+bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
+ if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
+ switch (BT->getKind()) {
+ default:
+ return false;
+ case BuiltinType::Float:
+ return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
+ case BuiltinType::Double:
+ return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
+ case BuiltinType::LongDouble:
+ return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
+ }
+ }
+
+ return false;
+}
+
+bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
+ if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
+ if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
+ if (BT->getKind() == BuiltinType::LongDouble)
+ return getTarget().useObjCFP2RetForComplexLongDouble();
+ }
+ }
+
+ return false;
+}
+
+llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
+ const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
+ return GetFunctionType(FI);
+}
+
+llvm::FunctionType *
+CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
+
+ bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
+ (void)Inserted;
+ assert(Inserted && "Recursively being processed?");
+
+ llvm::Type *resultType = nullptr;
+ const ABIArgInfo &retAI = FI.getReturnInfo();
+ switch (retAI.getKind()) {
+ case ABIArgInfo::Expand:
+ llvm_unreachable("Invalid ABI kind for return argument");
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct:
+ resultType = retAI.getCoerceToType();
+ break;
+
+ case ABIArgInfo::InAlloca:
+ if (retAI.getInAllocaSRet()) {
+ // sret things on win32 aren't void, they return the sret pointer.
+ QualType ret = FI.getReturnType();
+ llvm::Type *ty = ConvertType(ret);
+ unsigned addressSpace = Context.getTargetAddressSpace(ret);
+ resultType = llvm::PointerType::get(ty, addressSpace);
+ } else {
+ resultType = llvm::Type::getVoidTy(getLLVMContext());
+ }
+ break;
+
+ case ABIArgInfo::Indirect:
+ case ABIArgInfo::Ignore:
+ resultType = llvm::Type::getVoidTy(getLLVMContext());
+ break;
+ }
+
+ ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
+ SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
+
+ // Add type for sret argument.
+ if (IRFunctionArgs.hasSRetArg()) {
+ QualType Ret = FI.getReturnType();
+ llvm::Type *Ty = ConvertType(Ret);
+ unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
+ ArgTypes[IRFunctionArgs.getSRetArgNo()] =
+ llvm::PointerType::get(Ty, AddressSpace);
+ }
+
+ // Add type for inalloca argument.
+ if (IRFunctionArgs.hasInallocaArg()) {
+ auto ArgStruct = FI.getArgStruct();
+ assert(ArgStruct);
+ ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
+ }
+
+ // Add in all of the required arguments.
+ unsigned ArgNo = 0;
+ CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
+ ie = it + FI.getNumRequiredArgs();
+ for (; it != ie; ++it, ++ArgNo) {
+ const ABIArgInfo &ArgInfo = it->info;
+
+ // Insert a padding type to ensure proper alignment.
+ if (IRFunctionArgs.hasPaddingArg(ArgNo))
+ ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
+ ArgInfo.getPaddingType();
+
+ unsigned FirstIRArg, NumIRArgs;
+ std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
+
+ switch (ArgInfo.getKind()) {
+ case ABIArgInfo::Ignore:
+ case ABIArgInfo::InAlloca:
+ assert(NumIRArgs == 0);
+ break;
+
+ case ABIArgInfo::Indirect: {
+ assert(NumIRArgs == 1);
+ // indirect arguments are always on the stack, which is addr space #0.
+ llvm::Type *LTy = ConvertTypeForMem(it->type);
+ ArgTypes[FirstIRArg] = LTy->getPointerTo();
+ break;
+ }
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ // Fast-isel and the optimizer generally like scalar values better than
+ // FCAs, so we flatten them if this is safe to do for this argument.
+ llvm::Type *argType = ArgInfo.getCoerceToType();
+ llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
+ if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
+ assert(NumIRArgs == st->getNumElements());
+ for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
+ ArgTypes[FirstIRArg + i] = st->getElementType(i);
+ } else {
+ assert(NumIRArgs == 1);
+ ArgTypes[FirstIRArg] = argType;
+ }
+ break;
+ }
+
+ case ABIArgInfo::Expand:
+ auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
+ getExpandedTypes(it->type, ArgTypesIter);
+ assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
+ break;
+ }
+ }
+
+ bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
+ assert(Erased && "Not in set?");
+
+ return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
+}
+
+llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+
+ if (!isFuncTypeConvertible(FPT))
+ return llvm::StructType::get(getLLVMContext());
+
+ const CGFunctionInfo *Info;
+ if (isa<CXXDestructorDecl>(MD))
+ Info =
+ &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType()));
+ else
+ Info = &arrangeCXXMethodDeclaration(MD);
+ return GetFunctionType(*Info);
+}
+
+static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
+ llvm::AttrBuilder &FuncAttrs,
+ const FunctionProtoType *FPT) {
+ if (!FPT)
+ return;
+
+ if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
+ FPT->isNothrow(Ctx))
+ FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
+}
+
+void CodeGenModule::ConstructAttributeList(
+ StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
+ AttributeListType &PAL, unsigned &CallingConv, bool AttrOnCallSite) {
+ llvm::AttrBuilder FuncAttrs;
+ llvm::AttrBuilder RetAttrs;
+ bool HasOptnone = false;
+
+ CallingConv = FI.getEffectiveCallingConvention();
+
+ if (FI.isNoReturn())
+ FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
+
+ // If we have information about the function prototype, we can learn
+ // attributes form there.
+ AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
+ CalleeInfo.getCalleeFunctionProtoType());
+
+ const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
+
+ // FIXME: handle sseregparm someday...
+ if (TargetDecl) {
+ if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
+ FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
+ if (TargetDecl->hasAttr<NoThrowAttr>())
+ FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
+ if (TargetDecl->hasAttr<NoReturnAttr>())
+ FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
+ if (TargetDecl->hasAttr<NoDuplicateAttr>())
+ FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
+
+ if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
+ AddAttributesFromFunctionProtoType(
+ getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
+ // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
+ // These attributes are not inherited by overloads.
+ const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
+ if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
+ FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
+ }
+
+ // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
+ if (TargetDecl->hasAttr<ConstAttr>()) {
+ FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
+ FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
+ } else if (TargetDecl->hasAttr<PureAttr>()) {
+ FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
+ FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
+ } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
+ FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
+ FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
+ }
+ if (TargetDecl->hasAttr<RestrictAttr>())
+ RetAttrs.addAttribute(llvm::Attribute::NoAlias);
+ if (TargetDecl->hasAttr<ReturnsNonNullAttr>())
+ RetAttrs.addAttribute(llvm::Attribute::NonNull);
+
+ HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
+ }
+
+ // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
+ if (!HasOptnone) {
+ if (CodeGenOpts.OptimizeSize)
+ FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
+ if (CodeGenOpts.OptimizeSize == 2)
+ FuncAttrs.addAttribute(llvm::Attribute::MinSize);
+ }
+
+ if (CodeGenOpts.DisableRedZone)
+ FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
+ if (CodeGenOpts.NoImplicitFloat)
+ FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
+ if (CodeGenOpts.EnableSegmentedStacks &&
+ !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
+ FuncAttrs.addAttribute("split-stack");
+
+ if (AttrOnCallSite) {
+ // Attributes that should go on the call site only.
+ if (!CodeGenOpts.SimplifyLibCalls ||
+ CodeGenOpts.isNoBuiltinFunc(Name.data()))
+ FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
+ if (!CodeGenOpts.TrapFuncName.empty())
+ FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
+ } else {
+ // Attributes that should go on the function, but not the call site.
+ if (!CodeGenOpts.DisableFPElim) {
+ FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
+ } else if (CodeGenOpts.OmitLeafFramePointer) {
+ FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
+ FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
+ } else {
+ FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
+ FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
+ }
+
+ bool DisableTailCalls =
+ CodeGenOpts.DisableTailCalls ||
+ (TargetDecl && TargetDecl->hasAttr<DisableTailCallsAttr>());
+ FuncAttrs.addAttribute("disable-tail-calls",
+ llvm::toStringRef(DisableTailCalls));
+
+ FuncAttrs.addAttribute("less-precise-fpmad",
+ llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
+ FuncAttrs.addAttribute("no-infs-fp-math",
+ llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
+ FuncAttrs.addAttribute("no-nans-fp-math",
+ llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
+ FuncAttrs.addAttribute("unsafe-fp-math",
+ llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
+ FuncAttrs.addAttribute("use-soft-float",
+ llvm::toStringRef(CodeGenOpts.SoftFloat));
+ FuncAttrs.addAttribute("stack-protector-buffer-size",
+ llvm::utostr(CodeGenOpts.SSPBufferSize));
+
+ if (CodeGenOpts.StackRealignment)
+ FuncAttrs.addAttribute("stackrealign");
+
+ // Add target-cpu and target-features attributes to functions. If
+ // we have a decl for the function and it has a target attribute then
+ // parse that and add it to the feature set.
+ StringRef TargetCPU = getTarget().getTargetOpts().CPU;
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
+ if (FD && FD->hasAttr<TargetAttr>()) {
+ llvm::StringMap<bool> FeatureMap;
+ getFunctionFeatureMap(FeatureMap, FD);
+
+ // Produce the canonical string for this set of features.
+ std::vector<std::string> Features;
+ for (llvm::StringMap<bool>::const_iterator it = FeatureMap.begin(),
+ ie = FeatureMap.end();
+ it != ie; ++it)
+ Features.push_back((it->second ? "+" : "-") + it->first().str());
+
+ // Now add the target-cpu and target-features to the function.
+ // While we populated the feature map above, we still need to
+ // get and parse the target attribute so we can get the cpu for
+ // the function.
+ const auto *TD = FD->getAttr<TargetAttr>();
+ TargetAttr::ParsedTargetAttr ParsedAttr = TD->parse();
+ if (ParsedAttr.second != "")
+ TargetCPU = ParsedAttr.second;
+ if (TargetCPU != "")
+ FuncAttrs.addAttribute("target-cpu", TargetCPU);
+ if (!Features.empty()) {
+ std::sort(Features.begin(), Features.end());
+ FuncAttrs.addAttribute(
+ "target-features",
+ llvm::join(Features.begin(), Features.end(), ","));
+ }
+ } else {
+ // Otherwise just add the existing target cpu and target features to the
+ // function.
+ std::vector<std::string> &Features = getTarget().getTargetOpts().Features;
+ if (TargetCPU != "")
+ FuncAttrs.addAttribute("target-cpu", TargetCPU);
+ if (!Features.empty()) {
+ std::sort(Features.begin(), Features.end());
+ FuncAttrs.addAttribute(
+ "target-features",
+ llvm::join(Features.begin(), Features.end(), ","));
+ }
+ }
+ }
+
+ ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
+
+ QualType RetTy = FI.getReturnType();
+ const ABIArgInfo &RetAI = FI.getReturnInfo();
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::Extend:
+ if (RetTy->hasSignedIntegerRepresentation())
+ RetAttrs.addAttribute(llvm::Attribute::SExt);
+ else if (RetTy->hasUnsignedIntegerRepresentation())
+ RetAttrs.addAttribute(llvm::Attribute::ZExt);
+ // FALL THROUGH
+ case ABIArgInfo::Direct:
+ if (RetAI.getInReg())
+ RetAttrs.addAttribute(llvm::Attribute::InReg);
+ break;
+ case ABIArgInfo::Ignore:
+ break;
+
+ case ABIArgInfo::InAlloca:
+ case ABIArgInfo::Indirect: {
+ // inalloca and sret disable readnone and readonly
+ FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
+ .removeAttribute(llvm::Attribute::ReadNone);
+ break;
+ }
+
+ case ABIArgInfo::Expand:
+ llvm_unreachable("Invalid ABI kind for return argument");
+ }
+
+ if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
+ QualType PTy = RefTy->getPointeeType();
+ if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
+ RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
+ .getQuantity());
+ else if (getContext().getTargetAddressSpace(PTy) == 0)
+ RetAttrs.addAttribute(llvm::Attribute::NonNull);
+ }
+
+ // Attach return attributes.
+ if (RetAttrs.hasAttributes()) {
+ PAL.push_back(llvm::AttributeSet::get(
+ getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs));
+ }
+
+ // Attach attributes to sret.
+ if (IRFunctionArgs.hasSRetArg()) {
+ llvm::AttrBuilder SRETAttrs;
+ SRETAttrs.addAttribute(llvm::Attribute::StructRet);
+ if (RetAI.getInReg())
+ SRETAttrs.addAttribute(llvm::Attribute::InReg);
+ PAL.push_back(llvm::AttributeSet::get(
+ getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs));
+ }
+
+ // Attach attributes to inalloca argument.
+ if (IRFunctionArgs.hasInallocaArg()) {
+ llvm::AttrBuilder Attrs;
+ Attrs.addAttribute(llvm::Attribute::InAlloca);
+ PAL.push_back(llvm::AttributeSet::get(
+ getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs));
+ }
+
+ unsigned ArgNo = 0;
+ for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
+ E = FI.arg_end();
+ I != E; ++I, ++ArgNo) {
+ QualType ParamType = I->type;
+ const ABIArgInfo &AI = I->info;
+ llvm::AttrBuilder Attrs;
+
+ // Add attribute for padding argument, if necessary.
+ if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
+ if (AI.getPaddingInReg())
+ PAL.push_back(llvm::AttributeSet::get(
+ getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1,
+ llvm::Attribute::InReg));
+ }
+
+ // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
+ // have the corresponding parameter variable. It doesn't make
+ // sense to do it here because parameters are so messed up.
+ switch (AI.getKind()) {
+ case ABIArgInfo::Extend:
+ if (ParamType->isSignedIntegerOrEnumerationType())
+ Attrs.addAttribute(llvm::Attribute::SExt);
+ else if (ParamType->isUnsignedIntegerOrEnumerationType()) {
+ if (getTypes().getABIInfo().shouldSignExtUnsignedType(ParamType))
+ Attrs.addAttribute(llvm::Attribute::SExt);
+ else
+ Attrs.addAttribute(llvm::Attribute::ZExt);
+ }
+ // FALL THROUGH
+ case ABIArgInfo::Direct:
+ if (ArgNo == 0 && FI.isChainCall())
+ Attrs.addAttribute(llvm::Attribute::Nest);
+ else if (AI.getInReg())
+ Attrs.addAttribute(llvm::Attribute::InReg);
+ break;
+
+ case ABIArgInfo::Indirect: {
+ if (AI.getInReg())
+ Attrs.addAttribute(llvm::Attribute::InReg);
+
+ if (AI.getIndirectByVal())
+ Attrs.addAttribute(llvm::Attribute::ByVal);
+
+ CharUnits Align = AI.getIndirectAlign();
+
+ // In a byval argument, it is important that the required
+ // alignment of the type is honored, as LLVM might be creating a
+ // *new* stack object, and needs to know what alignment to give
+ // it. (Sometimes it can deduce a sensible alignment on its own,
+ // but not if clang decides it must emit a packed struct, or the
+ // user specifies increased alignment requirements.)
+ //
+ // This is different from indirect *not* byval, where the object
+ // exists already, and the align attribute is purely
+ // informative.
+ assert(!Align.isZero());
+
+ // For now, only add this when we have a byval argument.
+ // TODO: be less lazy about updating test cases.
+ if (AI.getIndirectByVal())
+ Attrs.addAlignmentAttr(Align.getQuantity());
+
+ // byval disables readnone and readonly.
+ FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
+ .removeAttribute(llvm::Attribute::ReadNone);
+ break;
+ }
+ case ABIArgInfo::Ignore:
+ case ABIArgInfo::Expand:
+ continue;
+
+ case ABIArgInfo::InAlloca:
+ // inalloca disables readnone and readonly.
+ FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
+ .removeAttribute(llvm::Attribute::ReadNone);
+ continue;
+ }
+
+ if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
+ QualType PTy = RefTy->getPointeeType();
+ if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
+ Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
+ .getQuantity());
+ else if (getContext().getTargetAddressSpace(PTy) == 0)
+ Attrs.addAttribute(llvm::Attribute::NonNull);
+ }
+
+ if (Attrs.hasAttributes()) {
+ unsigned FirstIRArg, NumIRArgs;
+ std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
+ for (unsigned i = 0; i < NumIRArgs; i++)
+ PAL.push_back(llvm::AttributeSet::get(getLLVMContext(),
+ FirstIRArg + i + 1, Attrs));
+ }
+ }
+ assert(ArgNo == FI.arg_size());
+
+ if (FuncAttrs.hasAttributes())
+ PAL.push_back(llvm::
+ AttributeSet::get(getLLVMContext(),
+ llvm::AttributeSet::FunctionIndex,
+ FuncAttrs));
+}
+
+/// An argument came in as a promoted argument; demote it back to its
+/// declared type.
+static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
+ const VarDecl *var,
+ llvm::Value *value) {
+ llvm::Type *varType = CGF.ConvertType(var->getType());
+
+ // This can happen with promotions that actually don't change the
+ // underlying type, like the enum promotions.
+ if (value->getType() == varType) return value;
+
+ assert((varType->isIntegerTy() || varType->isFloatingPointTy())
+ && "unexpected promotion type");
+
+ if (isa<llvm::IntegerType>(varType))
+ return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
+
+ return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
+}
+
+/// Returns the attribute (either parameter attribute, or function
+/// attribute), which declares argument ArgNo to be non-null.
+static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
+ QualType ArgType, unsigned ArgNo) {
+ // FIXME: __attribute__((nonnull)) can also be applied to:
+ // - references to pointers, where the pointee is known to be
+ // nonnull (apparently a Clang extension)
+ // - transparent unions containing pointers
+ // In the former case, LLVM IR cannot represent the constraint. In
+ // the latter case, we have no guarantee that the transparent union
+ // is in fact passed as a pointer.
+ if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
+ return nullptr;
+ // First, check attribute on parameter itself.
+ if (PVD) {
+ if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
+ return ParmNNAttr;
+ }
+ // Check function attributes.
+ if (!FD)
+ return nullptr;
+ for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
+ if (NNAttr->isNonNull(ArgNo))
+ return NNAttr;
+ }
+ return nullptr;
+}
+
+void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
+ llvm::Function *Fn,
+ const FunctionArgList &Args) {
+ if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
+ // Naked functions don't have prologues.
+ return;
+
+ // If this is an implicit-return-zero function, go ahead and
+ // initialize the return value. TODO: it might be nice to have
+ // a more general mechanism for this that didn't require synthesized
+ // return statements.
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
+ if (FD->hasImplicitReturnZero()) {
+ QualType RetTy = FD->getReturnType().getUnqualifiedType();
+ llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
+ llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
+ Builder.CreateStore(Zero, ReturnValue);
+ }
+ }
+
+ // FIXME: We no longer need the types from FunctionArgList; lift up and
+ // simplify.
+
+ ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
+ // Flattened function arguments.
+ SmallVector<llvm::Argument *, 16> FnArgs;
+ FnArgs.reserve(IRFunctionArgs.totalIRArgs());
+ for (auto &Arg : Fn->args()) {
+ FnArgs.push_back(&Arg);
+ }
+ assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
+
+ // If we're using inalloca, all the memory arguments are GEPs off of the last
+ // parameter, which is a pointer to the complete memory area.
+ Address ArgStruct = Address::invalid();
+ const llvm::StructLayout *ArgStructLayout = nullptr;
+ if (IRFunctionArgs.hasInallocaArg()) {
+ ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct());
+ ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
+ FI.getArgStructAlignment());
+
+ assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
+ }
+
+ // Name the struct return parameter.
+ if (IRFunctionArgs.hasSRetArg()) {
+ auto AI = FnArgs[IRFunctionArgs.getSRetArgNo()];
+ AI->setName("agg.result");
+ AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1,
+ llvm::Attribute::NoAlias));
+ }
+
+ // Track if we received the parameter as a pointer (indirect, byval, or
+ // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
+ // into a local alloca for us.
+ SmallVector<ParamValue, 16> ArgVals;
+ ArgVals.reserve(Args.size());
+
+ // Create a pointer value for every parameter declaration. This usually
+ // entails copying one or more LLVM IR arguments into an alloca. Don't push
+ // any cleanups or do anything that might unwind. We do that separately, so
+ // we can push the cleanups in the correct order for the ABI.
+ assert(FI.arg_size() == Args.size() &&
+ "Mismatch between function signature & arguments.");
+ unsigned ArgNo = 0;
+ CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
+ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ i != e; ++i, ++info_it, ++ArgNo) {
+ const VarDecl *Arg = *i;
+ QualType Ty = info_it->type;
+ const ABIArgInfo &ArgI = info_it->info;
+
+ bool isPromoted =
+ isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
+
+ unsigned FirstIRArg, NumIRArgs;
+ std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
+
+ switch (ArgI.getKind()) {
+ case ABIArgInfo::InAlloca: {
+ assert(NumIRArgs == 0);
+ auto FieldIndex = ArgI.getInAllocaFieldIndex();
+ CharUnits FieldOffset =
+ CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex));
+ Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset,
+ Arg->getName());
+ ArgVals.push_back(ParamValue::forIndirect(V));
+ break;
+ }
+
+ case ABIArgInfo::Indirect: {
+ assert(NumIRArgs == 1);
+ Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
+
+ if (!hasScalarEvaluationKind(Ty)) {
+ // Aggregates and complex variables are accessed by reference. All we
+ // need to do is realign the value, if requested.
+ Address V = ParamAddr;
+ if (ArgI.getIndirectRealign()) {
+ Address AlignedTemp = CreateMemTemp(Ty, "coerce");
+
+ // Copy from the incoming argument pointer to the temporary with the
+ // appropriate alignment.
+ //
+ // FIXME: We should have a common utility for generating an aggregate
+ // copy.
+ CharUnits Size = getContext().getTypeSizeInChars(Ty);
+ auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
+ Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
+ Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
+ Builder.CreateMemCpy(Dst, Src, SizeVal, false);
+ V = AlignedTemp;
+ }
+ ArgVals.push_back(ParamValue::forIndirect(V));
+ } else {
+ // Load scalar value from indirect argument.
+ llvm::Value *V =
+ EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getLocStart());
+
+ if (isPromoted)
+ V = emitArgumentDemotion(*this, Arg, V);
+ ArgVals.push_back(ParamValue::forDirect(V));
+ }
+ break;
+ }
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+
+ // If we have the trivial case, handle it with no muss and fuss.
+ if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
+ ArgI.getCoerceToType() == ConvertType(Ty) &&
+ ArgI.getDirectOffset() == 0) {
+ assert(NumIRArgs == 1);
+ auto AI = FnArgs[FirstIRArg];
+ llvm::Value *V = AI;
+
+ if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
+ if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
+ PVD->getFunctionScopeIndex()))
+ AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
+ AI->getArgNo() + 1,
+ llvm::Attribute::NonNull));
+
+ QualType OTy = PVD->getOriginalType();
+ if (const auto *ArrTy =
+ getContext().getAsConstantArrayType(OTy)) {
+ // A C99 array parameter declaration with the static keyword also
+ // indicates dereferenceability, and if the size is constant we can
+ // use the dereferenceable attribute (which requires the size in
+ // bytes).
+ if (ArrTy->getSizeModifier() == ArrayType::Static) {
+ QualType ETy = ArrTy->getElementType();
+ uint64_t ArrSize = ArrTy->getSize().getZExtValue();
+ if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
+ ArrSize) {
+ llvm::AttrBuilder Attrs;
+ Attrs.addDereferenceableAttr(
+ getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
+ AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
+ AI->getArgNo() + 1, Attrs));
+ } else if (getContext().getTargetAddressSpace(ETy) == 0) {
+ AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
+ AI->getArgNo() + 1,
+ llvm::Attribute::NonNull));
+ }
+ }
+ } else if (const auto *ArrTy =
+ getContext().getAsVariableArrayType(OTy)) {
+ // For C99 VLAs with the static keyword, we don't know the size so
+ // we can't use the dereferenceable attribute, but in addrspace(0)
+ // we know that it must be nonnull.
+ if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
+ !getContext().getTargetAddressSpace(ArrTy->getElementType()))
+ AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
+ AI->getArgNo() + 1,
+ llvm::Attribute::NonNull));
+ }
+
+ const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
+ if (!AVAttr)
+ if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
+ AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
+ if (AVAttr) {
+ llvm::Value *AlignmentValue =
+ EmitScalarExpr(AVAttr->getAlignment());
+ llvm::ConstantInt *AlignmentCI =
+ cast<llvm::ConstantInt>(AlignmentValue);
+ unsigned Alignment =
+ std::min((unsigned) AlignmentCI->getZExtValue(),
+ +llvm::Value::MaximumAlignment);
+
+ llvm::AttrBuilder Attrs;
+ Attrs.addAlignmentAttr(Alignment);
+ AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
+ AI->getArgNo() + 1, Attrs));
+ }
+ }
+
+ if (Arg->getType().isRestrictQualified())
+ AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
+ AI->getArgNo() + 1,
+ llvm::Attribute::NoAlias));
+
+ // Ensure the argument is the correct type.
+ if (V->getType() != ArgI.getCoerceToType())
+ V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
+
+ if (isPromoted)
+ V = emitArgumentDemotion(*this, Arg, V);
+
+ if (const CXXMethodDecl *MD =
+ dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) {
+ if (MD->isVirtual() && Arg == CXXABIThisDecl)
+ V = CGM.getCXXABI().
+ adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V);
+ }
+
+ // Because of merging of function types from multiple decls it is
+ // possible for the type of an argument to not match the corresponding
+ // type in the function type. Since we are codegening the callee
+ // in here, add a cast to the argument type.
+ llvm::Type *LTy = ConvertType(Arg->getType());
+ if (V->getType() != LTy)
+ V = Builder.CreateBitCast(V, LTy);
+
+ ArgVals.push_back(ParamValue::forDirect(V));
+ break;
+ }
+
+ Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
+ Arg->getName());
+
+ // Pointer to store into.
+ Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
+
+ // Fast-isel and the optimizer generally like scalar values better than
+ // FCAs, so we flatten them if this is safe to do for this argument.
+ llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
+ if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
+ STy->getNumElements() > 1) {
+ auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
+ uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
+ llvm::Type *DstTy = Ptr.getElementType();
+ uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
+
+ Address AddrToStoreInto = Address::invalid();
+ if (SrcSize <= DstSize) {
+ AddrToStoreInto =
+ Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
+ } else {
+ AddrToStoreInto =
+ CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
+ }
+
+ assert(STy->getNumElements() == NumIRArgs);
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ auto AI = FnArgs[FirstIRArg + i];
+ AI->setName(Arg->getName() + ".coerce" + Twine(i));
+ auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
+ Address EltPtr =
+ Builder.CreateStructGEP(AddrToStoreInto, i, Offset);
+ Builder.CreateStore(AI, EltPtr);
+ }
+
+ if (SrcSize > DstSize) {
+ Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
+ }
+
+ } else {
+ // Simple case, just do a coerced store of the argument into the alloca.
+ assert(NumIRArgs == 1);
+ auto AI = FnArgs[FirstIRArg];
+ AI->setName(Arg->getName() + ".coerce");
+ CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
+ }
+
+ // Match to what EmitParmDecl is expecting for this type.
+ if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
+ llvm::Value *V =
+ EmitLoadOfScalar(Alloca, false, Ty, Arg->getLocStart());
+ if (isPromoted)
+ V = emitArgumentDemotion(*this, Arg, V);
+ ArgVals.push_back(ParamValue::forDirect(V));
+ } else {
+ ArgVals.push_back(ParamValue::forIndirect(Alloca));
+ }
+ break;
+ }
+
+ case ABIArgInfo::Expand: {
+ // If this structure was expanded into multiple arguments then
+ // we need to create a temporary and reconstruct it from the
+ // arguments.
+ Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
+ LValue LV = MakeAddrLValue(Alloca, Ty);
+ ArgVals.push_back(ParamValue::forIndirect(Alloca));
+
+ auto FnArgIter = FnArgs.begin() + FirstIRArg;
+ ExpandTypeFromArgs(Ty, LV, FnArgIter);
+ assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
+ for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
+ auto AI = FnArgs[FirstIRArg + i];
+ AI->setName(Arg->getName() + "." + Twine(i));
+ }
+ break;
+ }
+
+ case ABIArgInfo::Ignore:
+ assert(NumIRArgs == 0);
+ // Initialize the local variable appropriately.
+ if (!hasScalarEvaluationKind(Ty)) {
+ ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
+ } else {
+ llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
+ ArgVals.push_back(ParamValue::forDirect(U));
+ }
+ break;
+ }
+ }
+
+ if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
+ for (int I = Args.size() - 1; I >= 0; --I)
+ EmitParmDecl(*Args[I], ArgVals[I], I + 1);
+ } else {
+ for (unsigned I = 0, E = Args.size(); I != E; ++I)
+ EmitParmDecl(*Args[I], ArgVals[I], I + 1);
+ }
+}
+
+static void eraseUnusedBitCasts(llvm::Instruction *insn) {
+ while (insn->use_empty()) {
+ llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
+ if (!bitcast) return;
+
+ // This is "safe" because we would have used a ConstantExpr otherwise.
+ insn = cast<llvm::Instruction>(bitcast->getOperand(0));
+ bitcast->eraseFromParent();
+ }
+}
+
+/// Try to emit a fused autorelease of a return result.
+static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
+ llvm::Value *result) {
+ // We must be immediately followed the cast.
+ llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
+ if (BB->empty()) return nullptr;
+ if (&BB->back() != result) return nullptr;
+
+ llvm::Type *resultType = result->getType();
+
+ // result is in a BasicBlock and is therefore an Instruction.
+ llvm::Instruction *generator = cast<llvm::Instruction>(result);
+
+ SmallVector<llvm::Instruction*,4> insnsToKill;
+
+ // Look for:
+ // %generator = bitcast %type1* %generator2 to %type2*
+ while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
+ // We would have emitted this as a constant if the operand weren't
+ // an Instruction.
+ generator = cast<llvm::Instruction>(bitcast->getOperand(0));
+
+ // Require the generator to be immediately followed by the cast.
+ if (generator->getNextNode() != bitcast)
+ return nullptr;
+
+ insnsToKill.push_back(bitcast);
+ }
+
+ // Look for:
+ // %generator = call i8* @objc_retain(i8* %originalResult)
+ // or
+ // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
+ llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
+ if (!call) return nullptr;
+
+ bool doRetainAutorelease;
+
+ if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
+ doRetainAutorelease = true;
+ } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
+ .objc_retainAutoreleasedReturnValue) {
+ doRetainAutorelease = false;
+
+ // If we emitted an assembly marker for this call (and the
+ // ARCEntrypoints field should have been set if so), go looking
+ // for that call. If we can't find it, we can't do this
+ // optimization. But it should always be the immediately previous
+ // instruction, unless we needed bitcasts around the call.
+ if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
+ llvm::Instruction *prev = call->getPrevNode();
+ assert(prev);
+ if (isa<llvm::BitCastInst>(prev)) {
+ prev = prev->getPrevNode();
+ assert(prev);
+ }
+ assert(isa<llvm::CallInst>(prev));
+ assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
+ CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
+ insnsToKill.push_back(prev);
+ }
+ } else {
+ return nullptr;
+ }
+
+ result = call->getArgOperand(0);
+ insnsToKill.push_back(call);
+
+ // Keep killing bitcasts, for sanity. Note that we no longer care
+ // about precise ordering as long as there's exactly one use.
+ while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
+ if (!bitcast->hasOneUse()) break;
+ insnsToKill.push_back(bitcast);
+ result = bitcast->getOperand(0);
+ }
+
+ // Delete all the unnecessary instructions, from latest to earliest.
+ for (SmallVectorImpl<llvm::Instruction*>::iterator
+ i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
+ (*i)->eraseFromParent();
+
+ // Do the fused retain/autorelease if we were asked to.
+ if (doRetainAutorelease)
+ result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
+
+ // Cast back to the result type.
+ return CGF.Builder.CreateBitCast(result, resultType);
+}
+
+/// If this is a +1 of the value of an immutable 'self', remove it.
+static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
+ llvm::Value *result) {
+ // This is only applicable to a method with an immutable 'self'.
+ const ObjCMethodDecl *method =
+ dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
+ if (!method) return nullptr;
+ const VarDecl *self = method->getSelfDecl();
+ if (!self->getType().isConstQualified()) return nullptr;
+
+ // Look for a retain call.
+ llvm::CallInst *retainCall =
+ dyn_cast<llvm::CallInst>(result->stripPointerCasts());
+ if (!retainCall ||
+ retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
+ return nullptr;
+
+ // Look for an ordinary load of 'self'.
+ llvm::Value *retainedValue = retainCall->getArgOperand(0);
+ llvm::LoadInst *load =
+ dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
+ if (!load || load->isAtomic() || load->isVolatile() ||
+ load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
+ return nullptr;
+
+ // Okay! Burn it all down. This relies for correctness on the
+ // assumption that the retain is emitted as part of the return and
+ // that thereafter everything is used "linearly".
+ llvm::Type *resultType = result->getType();
+ eraseUnusedBitCasts(cast<llvm::Instruction>(result));
+ assert(retainCall->use_empty());
+ retainCall->eraseFromParent();
+ eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
+
+ return CGF.Builder.CreateBitCast(load, resultType);
+}
+
+/// Emit an ARC autorelease of the result of a function.
+///
+/// \return the value to actually return from the function
+static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
+ llvm::Value *result) {
+ // If we're returning 'self', kill the initial retain. This is a
+ // heuristic attempt to "encourage correctness" in the really unfortunate
+ // case where we have a return of self during a dealloc and we desperately
+ // need to avoid the possible autorelease.
+ if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
+ return self;
+
+ // At -O0, try to emit a fused retain/autorelease.
+ if (CGF.shouldUseFusedARCCalls())
+ if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
+ return fused;
+
+ return CGF.EmitARCAutoreleaseReturnValue(result);
+}
+
+/// Heuristically search for a dominating store to the return-value slot.
+static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
+ // Check if a User is a store which pointerOperand is the ReturnValue.
+ // We are looking for stores to the ReturnValue, not for stores of the
+ // ReturnValue to some other location.
+ auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
+ auto *SI = dyn_cast<llvm::StoreInst>(U);
+ if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
+ return nullptr;
+ // These aren't actually possible for non-coerced returns, and we
+ // only care about non-coerced returns on this code path.
+ assert(!SI->isAtomic() && !SI->isVolatile());
+ return SI;
+ };
+ // If there are multiple uses of the return-value slot, just check
+ // for something immediately preceding the IP. Sometimes this can
+ // happen with how we generate implicit-returns; it can also happen
+ // with noreturn cleanups.
+ if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
+ llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
+ if (IP->empty()) return nullptr;
+ llvm::Instruction *I = &IP->back();
+
+ // Skip lifetime markers
+ for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
+ IE = IP->rend();
+ II != IE; ++II) {
+ if (llvm::IntrinsicInst *Intrinsic =
+ dyn_cast<llvm::IntrinsicInst>(&*II)) {
+ if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
+ const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
+ ++II;
+ if (II == IE)
+ break;
+ if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
+ continue;
+ }
+ }
+ I = &*II;
+ break;
+ }
+
+ return GetStoreIfValid(I);
+ }
+
+ llvm::StoreInst *store =
+ GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
+ if (!store) return nullptr;
+
+ // Now do a first-and-dirty dominance check: just walk up the
+ // single-predecessors chain from the current insertion point.
+ llvm::BasicBlock *StoreBB = store->getParent();
+ llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
+ while (IP != StoreBB) {
+ if (!(IP = IP->getSinglePredecessor()))
+ return nullptr;
+ }
+
+ // Okay, the store's basic block dominates the insertion point; we
+ // can do our thing.
+ return store;
+}
+
+void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
+ bool EmitRetDbgLoc,
+ SourceLocation EndLoc) {
+ if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
+ // Naked functions don't have epilogues.
+ Builder.CreateUnreachable();
+ return;
+ }
+
+ // Functions with no result always return void.
+ if (!ReturnValue.isValid()) {
+ Builder.CreateRetVoid();
+ return;
+ }
+
+ llvm::DebugLoc RetDbgLoc;
+ llvm::Value *RV = nullptr;
+ QualType RetTy = FI.getReturnType();
+ const ABIArgInfo &RetAI = FI.getReturnInfo();
+
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::InAlloca:
+ // Aggregrates get evaluated directly into the destination. Sometimes we
+ // need to return the sret value in a register, though.
+ assert(hasAggregateEvaluationKind(RetTy));
+ if (RetAI.getInAllocaSRet()) {
+ llvm::Function::arg_iterator EI = CurFn->arg_end();
+ --EI;
+ llvm::Value *ArgStruct = &*EI;
+ llvm::Value *SRet = Builder.CreateStructGEP(
+ nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
+ RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
+ }
+ break;
+
+ case ABIArgInfo::Indirect: {
+ auto AI = CurFn->arg_begin();
+ if (RetAI.isSRetAfterThis())
+ ++AI;
+ switch (getEvaluationKind(RetTy)) {
+ case TEK_Complex: {
+ ComplexPairTy RT =
+ EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
+ EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
+ /*isInit*/ true);
+ break;
+ }
+ case TEK_Aggregate:
+ // Do nothing; aggregrates get evaluated directly into the destination.
+ break;
+ case TEK_Scalar:
+ EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
+ MakeNaturalAlignAddrLValue(&*AI, RetTy),
+ /*isInit*/ true);
+ break;
+ }
+ break;
+ }
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct:
+ if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
+ RetAI.getDirectOffset() == 0) {
+ // The internal return value temp always will have pointer-to-return-type
+ // type, just do a load.
+
+ // If there is a dominating store to ReturnValue, we can elide
+ // the load, zap the store, and usually zap the alloca.
+ if (llvm::StoreInst *SI =
+ findDominatingStoreToReturnValue(*this)) {
+ // Reuse the debug location from the store unless there is
+ // cleanup code to be emitted between the store and return
+ // instruction.
+ if (EmitRetDbgLoc && !AutoreleaseResult)
+ RetDbgLoc = SI->getDebugLoc();
+ // Get the stored value and nuke the now-dead store.
+ RV = SI->getValueOperand();
+ SI->eraseFromParent();
+
+ // If that was the only use of the return value, nuke it as well now.
+ auto returnValueInst = ReturnValue.getPointer();
+ if (returnValueInst->use_empty()) {
+ if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
+ alloca->eraseFromParent();
+ ReturnValue = Address::invalid();
+ }
+ }
+
+ // Otherwise, we have to do a simple load.
+ } else {
+ RV = Builder.CreateLoad(ReturnValue);
+ }
+ } else {
+ // If the value is offset in memory, apply the offset now.
+ Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
+
+ RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
+ }
+
+ // In ARC, end functions that return a retainable type with a call
+ // to objc_autoreleaseReturnValue.
+ if (AutoreleaseResult) {
+ assert(getLangOpts().ObjCAutoRefCount &&
+ !FI.isReturnsRetained() &&
+ RetTy->isObjCRetainableType());
+ RV = emitAutoreleaseOfResult(*this, RV);
+ }
+
+ break;
+
+ case ABIArgInfo::Ignore:
+ break;
+
+ case ABIArgInfo::Expand:
+ llvm_unreachable("Invalid ABI kind for return argument");
+ }
+
+ llvm::Instruction *Ret;
+ if (RV) {
+ if (CurCodeDecl && SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) {
+ if (auto RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>()) {
+ SanitizerScope SanScope(this);
+ llvm::Value *Cond = Builder.CreateICmpNE(
+ RV, llvm::Constant::getNullValue(RV->getType()));
+ llvm::Constant *StaticData[] = {
+ EmitCheckSourceLocation(EndLoc),
+ EmitCheckSourceLocation(RetNNAttr->getLocation()),
+ };
+ EmitCheck(std::make_pair(Cond, SanitizerKind::ReturnsNonnullAttribute),
+ "nonnull_return", StaticData, None);
+ }
+ }
+ Ret = Builder.CreateRet(RV);
+ } else {
+ Ret = Builder.CreateRetVoid();
+ }
+
+ if (RetDbgLoc)
+ Ret->setDebugLoc(std::move(RetDbgLoc));
+}
+
+static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
+ const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
+ return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
+}
+
+static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
+ QualType Ty) {
+ // FIXME: Generate IR in one pass, rather than going back and fixing up these
+ // placeholders.
+ llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Value *Placeholder =
+ llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo());
+ Placeholder = CGF.Builder.CreateDefaultAlignedLoad(Placeholder);
+
+ // FIXME: When we generate this IR in one pass, we shouldn't need
+ // this win32-specific alignment hack.
+ CharUnits Align = CharUnits::fromQuantity(4);
+
+ return AggValueSlot::forAddr(Address(Placeholder, Align),
+ Ty.getQualifiers(),
+ AggValueSlot::IsNotDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+}
+
+void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
+ const VarDecl *param,
+ SourceLocation loc) {
+ // StartFunction converted the ABI-lowered parameter(s) into a
+ // local alloca. We need to turn that into an r-value suitable
+ // for EmitCall.
+ Address local = GetAddrOfLocalVar(param);
+
+ QualType type = param->getType();
+
+ // For the most part, we just need to load the alloca, except:
+ // 1) aggregate r-values are actually pointers to temporaries, and
+ // 2) references to non-scalars are pointers directly to the aggregate.
+ // I don't know why references to scalars are different here.
+ if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
+ if (!hasScalarEvaluationKind(ref->getPointeeType()))
+ return args.add(RValue::getAggregate(local), type);
+
+ // Locals which are references to scalars are represented
+ // with allocas holding the pointer.
+ return args.add(RValue::get(Builder.CreateLoad(local)), type);
+ }
+
+ assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
+ "cannot emit delegate call arguments for inalloca arguments!");
+
+ args.add(convertTempToRValue(local, type, loc), type);
+}
+
+static bool isProvablyNull(llvm::Value *addr) {
+ return isa<llvm::ConstantPointerNull>(addr);
+}
+
+static bool isProvablyNonNull(llvm::Value *addr) {
+ return isa<llvm::AllocaInst>(addr);
+}
+
+/// Emit the actual writing-back of a writeback.
+static void emitWriteback(CodeGenFunction &CGF,
+ const CallArgList::Writeback &writeback) {
+ const LValue &srcLV = writeback.Source;
+ Address srcAddr = srcLV.getAddress();
+ assert(!isProvablyNull(srcAddr.getPointer()) &&
+ "shouldn't have writeback for provably null argument");
+
+ llvm::BasicBlock *contBB = nullptr;
+
+ // If the argument wasn't provably non-null, we need to null check
+ // before doing the store.
+ bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer());
+ if (!provablyNonNull) {
+ llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
+ contBB = CGF.createBasicBlock("icr.done");
+
+ llvm::Value *isNull =
+ CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
+ CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
+ CGF.EmitBlock(writebackBB);
+ }
+
+ // Load the value to writeback.
+ llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
+
+ // Cast it back, in case we're writing an id to a Foo* or something.
+ value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
+ "icr.writeback-cast");
+
+ // Perform the writeback.
+
+ // If we have a "to use" value, it's something we need to emit a use
+ // of. This has to be carefully threaded in: if it's done after the
+ // release it's potentially undefined behavior (and the optimizer
+ // will ignore it), and if it happens before the retain then the
+ // optimizer could move the release there.
+ if (writeback.ToUse) {
+ assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
+
+ // Retain the new value. No need to block-copy here: the block's
+ // being passed up the stack.
+ value = CGF.EmitARCRetainNonBlock(value);
+
+ // Emit the intrinsic use here.
+ CGF.EmitARCIntrinsicUse(writeback.ToUse);
+
+ // Load the old value (primitively).
+ llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
+
+ // Put the new value in place (primitively).
+ CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
+
+ // Release the old value.
+ CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
+
+ // Otherwise, we can just do a normal lvalue store.
+ } else {
+ CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
+ }
+
+ // Jump to the continuation block.
+ if (!provablyNonNull)
+ CGF.EmitBlock(contBB);
+}
+
+static void emitWritebacks(CodeGenFunction &CGF,
+ const CallArgList &args) {
+ for (const auto &I : args.writebacks())
+ emitWriteback(CGF, I);
+}
+
+static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
+ const CallArgList &CallArgs) {
+ assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee());
+ ArrayRef<CallArgList::CallArgCleanup> Cleanups =
+ CallArgs.getCleanupsToDeactivate();
+ // Iterate in reverse to increase the likelihood of popping the cleanup.
+ for (const auto &I : llvm::reverse(Cleanups)) {
+ CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
+ I.IsActiveIP->eraseFromParent();
+ }
+}
+
+static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
+ if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
+ if (uop->getOpcode() == UO_AddrOf)
+ return uop->getSubExpr();
+ return nullptr;
+}
+
+/// Emit an argument that's being passed call-by-writeback. That is,
+/// we are passing the address of an __autoreleased temporary; it
+/// might be copy-initialized with the current value of the given
+/// address, but it will definitely be copied out of after the call.
+static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
+ const ObjCIndirectCopyRestoreExpr *CRE) {
+ LValue srcLV;
+
+ // Make an optimistic effort to emit the address as an l-value.
+ // This can fail if the argument expression is more complicated.
+ if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
+ srcLV = CGF.EmitLValue(lvExpr);
+
+ // Otherwise, just emit it as a scalar.
+ } else {
+ Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
+
+ QualType srcAddrType =
+ CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
+ srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
+ }
+ Address srcAddr = srcLV.getAddress();
+
+ // The dest and src types don't necessarily match in LLVM terms
+ // because of the crazy ObjC compatibility rules.
+
+ llvm::PointerType *destType =
+ cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
+
+ // If the address is a constant null, just pass the appropriate null.
+ if (isProvablyNull(srcAddr.getPointer())) {
+ args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
+ CRE->getType());
+ return;
+ }
+
+ // Create the temporary.
+ Address temp = CGF.CreateTempAlloca(destType->getElementType(),
+ CGF.getPointerAlign(),
+ "icr.temp");
+ // Loading an l-value can introduce a cleanup if the l-value is __weak,
+ // and that cleanup will be conditional if we can't prove that the l-value
+ // isn't null, so we need to register a dominating point so that the cleanups
+ // system will make valid IR.
+ CodeGenFunction::ConditionalEvaluation condEval(CGF);
+
+ // Zero-initialize it if we're not doing a copy-initialization.
+ bool shouldCopy = CRE->shouldCopy();
+ if (!shouldCopy) {
+ llvm::Value *null =
+ llvm::ConstantPointerNull::get(
+ cast<llvm::PointerType>(destType->getElementType()));
+ CGF.Builder.CreateStore(null, temp);
+ }
+
+ llvm::BasicBlock *contBB = nullptr;
+ llvm::BasicBlock *originBB = nullptr;
+
+ // If the address is *not* known to be non-null, we need to switch.
+ llvm::Value *finalArgument;
+
+ bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer());
+ if (provablyNonNull) {
+ finalArgument = temp.getPointer();
+ } else {
+ llvm::Value *isNull =
+ CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
+
+ finalArgument = CGF.Builder.CreateSelect(isNull,
+ llvm::ConstantPointerNull::get(destType),
+ temp.getPointer(), "icr.argument");
+
+ // If we need to copy, then the load has to be conditional, which
+ // means we need control flow.
+ if (shouldCopy) {
+ originBB = CGF.Builder.GetInsertBlock();
+ contBB = CGF.createBasicBlock("icr.cont");
+ llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
+ CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
+ CGF.EmitBlock(copyBB);
+ condEval.begin(CGF);
+ }
+ }
+
+ llvm::Value *valueToUse = nullptr;
+
+ // Perform a copy if necessary.
+ if (shouldCopy) {
+ RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
+ assert(srcRV.isScalar());
+
+ llvm::Value *src = srcRV.getScalarVal();
+ src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
+ "icr.cast");
+
+ // Use an ordinary store, not a store-to-lvalue.
+ CGF.Builder.CreateStore(src, temp);
+
+ // If optimization is enabled, and the value was held in a
+ // __strong variable, we need to tell the optimizer that this
+ // value has to stay alive until we're doing the store back.
+ // This is because the temporary is effectively unretained,
+ // and so otherwise we can violate the high-level semantics.
+ if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
+ srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
+ valueToUse = src;
+ }
+ }
+
+ // Finish the control flow if we needed it.
+ if (shouldCopy && !provablyNonNull) {
+ llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
+ CGF.EmitBlock(contBB);
+
+ // Make a phi for the value to intrinsically use.
+ if (valueToUse) {
+ llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
+ "icr.to-use");
+ phiToUse->addIncoming(valueToUse, copyBB);
+ phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
+ originBB);
+ valueToUse = phiToUse;
+ }
+
+ condEval.end(CGF);
+ }
+
+ args.addWriteback(srcLV, temp, valueToUse);
+ args.add(RValue::get(finalArgument), CRE->getType());
+}
+
+void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
+ assert(!StackBase && !StackCleanup.isValid());
+
+ // Save the stack.
+ llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
+ StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
+}
+
+void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
+ if (StackBase) {
+ // Restore the stack after the call.
+ llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
+ CGF.Builder.CreateCall(F, StackBase);
+ }
+}
+
+void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
+ SourceLocation ArgLoc,
+ const FunctionDecl *FD,
+ unsigned ParmNum) {
+ if (!SanOpts.has(SanitizerKind::NonnullAttribute) || !FD)
+ return;
+ auto PVD = ParmNum < FD->getNumParams() ? FD->getParamDecl(ParmNum) : nullptr;
+ unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
+ auto NNAttr = getNonNullAttr(FD, PVD, ArgType, ArgNo);
+ if (!NNAttr)
+ return;
+ SanitizerScope SanScope(this);
+ assert(RV.isScalar());
+ llvm::Value *V = RV.getScalarVal();
+ llvm::Value *Cond =
+ Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
+ llvm::Constant *StaticData[] = {
+ EmitCheckSourceLocation(ArgLoc),
+ EmitCheckSourceLocation(NNAttr->getLocation()),
+ llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
+ };
+ EmitCheck(std::make_pair(Cond, SanitizerKind::NonnullAttribute),
+ "nonnull_arg", StaticData, None);
+}
+
+void CodeGenFunction::EmitCallArgs(
+ CallArgList &Args, ArrayRef<QualType> ArgTypes,
+ llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
+ const FunctionDecl *CalleeDecl, unsigned ParamsToSkip) {
+ assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
+
+ auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg) {
+ if (CalleeDecl == nullptr || I >= CalleeDecl->getNumParams())
+ return;
+ auto *PS = CalleeDecl->getParamDecl(I)->getAttr<PassObjectSizeAttr>();
+ if (PS == nullptr)
+ return;
+
+ const auto &Context = getContext();
+ auto SizeTy = Context.getSizeType();
+ auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
+ llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T);
+ Args.add(RValue::get(V), SizeTy);
+ };
+
+ // We *have* to evaluate arguments from right to left in the MS C++ ABI,
+ // because arguments are destroyed left to right in the callee.
+ if (CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
+ // Insert a stack save if we're going to need any inalloca args.
+ bool HasInAllocaArgs = false;
+ for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
+ I != E && !HasInAllocaArgs; ++I)
+ HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
+ if (HasInAllocaArgs) {
+ assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
+ Args.allocateArgumentMemory(*this);
+ }
+
+ // Evaluate each argument.
+ size_t CallArgsStart = Args.size();
+ for (int I = ArgTypes.size() - 1; I >= 0; --I) {
+ CallExpr::const_arg_iterator Arg = ArgRange.begin() + I;
+ EmitCallArg(Args, *Arg, ArgTypes[I]);
+ EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], (*Arg)->getExprLoc(),
+ CalleeDecl, ParamsToSkip + I);
+ MaybeEmitImplicitObjectSize(I, *Arg);
+ }
+
+ // Un-reverse the arguments we just evaluated so they match up with the LLVM
+ // IR function.
+ std::reverse(Args.begin() + CallArgsStart, Args.end());
+ return;
+ }
+
+ for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
+ CallExpr::const_arg_iterator Arg = ArgRange.begin() + I;
+ assert(Arg != ArgRange.end());
+ EmitCallArg(Args, *Arg, ArgTypes[I]);
+ EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], (*Arg)->getExprLoc(),
+ CalleeDecl, ParamsToSkip + I);
+ MaybeEmitImplicitObjectSize(I, *Arg);
+ }
+}
+
+namespace {
+
+struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
+ DestroyUnpassedArg(Address Addr, QualType Ty)
+ : Addr(Addr), Ty(Ty) {}
+
+ Address Addr;
+ QualType Ty;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
+ assert(!Dtor->isTrivial());
+ CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
+ /*Delegating=*/false, Addr);
+ }
+};
+
+struct DisableDebugLocationUpdates {
+ CodeGenFunction &CGF;
+ bool disabledDebugInfo;
+ DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
+ if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
+ CGF.disableDebugInfo();
+ }
+ ~DisableDebugLocationUpdates() {
+ if (disabledDebugInfo)
+ CGF.enableDebugInfo();
+ }
+};
+
+} // end anonymous namespace
+
+void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
+ QualType type) {
+ DisableDebugLocationUpdates Dis(*this, E);
+ if (const ObjCIndirectCopyRestoreExpr *CRE
+ = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
+ assert(getLangOpts().ObjCAutoRefCount);
+ assert(getContext().hasSameType(E->getType(), type));
+ return emitWritebackArg(*this, args, CRE);
+ }
+
+ assert(type->isReferenceType() == E->isGLValue() &&
+ "reference binding to unmaterialized r-value!");
+
+ if (E->isGLValue()) {
+ assert(E->getObjectKind() == OK_Ordinary);
+ return args.add(EmitReferenceBindingToExpr(E), type);
+ }
+
+ bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
+
+ // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
+ // However, we still have to push an EH-only cleanup in case we unwind before
+ // we make it to the call.
+ if (HasAggregateEvalKind &&
+ CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
+ // If we're using inalloca, use the argument memory. Otherwise, use a
+ // temporary.
+ AggValueSlot Slot;
+ if (args.isUsingInAlloca())
+ Slot = createPlaceholderSlot(*this, type);
+ else
+ Slot = CreateAggTemp(type, "agg.tmp");
+
+ const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
+ bool DestroyedInCallee =
+ RD && RD->hasNonTrivialDestructor() &&
+ CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default;
+ if (DestroyedInCallee)
+ Slot.setExternallyDestructed();
+
+ EmitAggExpr(E, Slot);
+ RValue RV = Slot.asRValue();
+ args.add(RV, type);
+
+ if (DestroyedInCallee) {
+ // Create a no-op GEP between the placeholder and the cleanup so we can
+ // RAUW it successfully. It also serves as a marker of the first
+ // instruction where the cleanup is active.
+ pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
+ type);
+ // This unreachable is a temporary marker which will be removed later.
+ llvm::Instruction *IsActive = Builder.CreateUnreachable();
+ args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
+ }
+ return;
+ }
+
+ if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
+ cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
+ LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
+ assert(L.isSimple());
+ if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) {
+ args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
+ } else {
+ // We can't represent a misaligned lvalue in the CallArgList, so copy
+ // to an aligned temporary now.
+ Address tmp = CreateMemTemp(type);
+ EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile());
+ args.add(RValue::getAggregate(tmp), type);
+ }
+ return;
+ }
+
+ args.add(EmitAnyExprToTemp(E), type);
+}
+
+QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
+ // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
+ // implicitly widens null pointer constants that are arguments to varargs
+ // functions to pointer-sized ints.
+ if (!getTarget().getTriple().isOSWindows())
+ return Arg->getType();
+
+ if (Arg->getType()->isIntegerType() &&
+ getContext().getTypeSize(Arg->getType()) <
+ getContext().getTargetInfo().getPointerWidth(0) &&
+ Arg->isNullPointerConstant(getContext(),
+ Expr::NPC_ValueDependentIsNotNull)) {
+ return getContext().getIntPtrType();
+ }
+
+ return Arg->getType();
+}
+
+// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
+// optimizer it can aggressively ignore unwind edges.
+void
+CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
+ if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
+ !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
+ Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
+ CGM.getNoObjCARCExceptionsMetadata());
+}
+
+/// Emits a call to the given no-arguments nounwind runtime function.
+llvm::CallInst *
+CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
+ const llvm::Twine &name) {
+ return EmitNounwindRuntimeCall(callee, None, name);
+}
+
+/// Emits a call to the given nounwind runtime function.
+llvm::CallInst *
+CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
+ ArrayRef<llvm::Value*> args,
+ const llvm::Twine &name) {
+ llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
+ call->setDoesNotThrow();
+ return call;
+}
+
+/// Emits a simple call (never an invoke) to the given no-arguments
+/// runtime function.
+llvm::CallInst *
+CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
+ const llvm::Twine &name) {
+ return EmitRuntimeCall(callee, None, name);
+}
+
+/// Emits a simple call (never an invoke) to the given runtime
+/// function.
+llvm::CallInst *
+CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
+ ArrayRef<llvm::Value*> args,
+ const llvm::Twine &name) {
+ llvm::CallInst *call = Builder.CreateCall(callee, args, name);
+ call->setCallingConv(getRuntimeCC());
+ return call;
+}
+
+// Calls which may throw must have operand bundles indicating which funclet
+// they are nested within.
+static void
+getBundlesForFunclet(llvm::Value *Callee,
+ llvm::Instruction *CurrentFuncletPad,
+ SmallVectorImpl<llvm::OperandBundleDef> &BundleList) {
+ // There is no need for a funclet operand bundle if we aren't inside a funclet.
+ if (!CurrentFuncletPad)
+ return;
+
+ // Skip intrinsics which cannot throw.
+ auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
+ if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
+ return;
+
+ BundleList.emplace_back("funclet", CurrentFuncletPad);
+}
+
+/// Emits a call or invoke to the given noreturn runtime function.
+void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
+ ArrayRef<llvm::Value*> args) {
+ SmallVector<llvm::OperandBundleDef, 1> BundleList;
+ getBundlesForFunclet(callee, CurrentFuncletPad, BundleList);
+
+ if (getInvokeDest()) {
+ llvm::InvokeInst *invoke =
+ Builder.CreateInvoke(callee,
+ getUnreachableBlock(),
+ getInvokeDest(),
+ args,
+ BundleList);
+ invoke->setDoesNotReturn();
+ invoke->setCallingConv(getRuntimeCC());
+ } else {
+ llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
+ call->setDoesNotReturn();
+ call->setCallingConv(getRuntimeCC());
+ Builder.CreateUnreachable();
+ }
+}
+
+/// Emits a call or invoke instruction to the given nullary runtime
+/// function.
+llvm::CallSite
+CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
+ const Twine &name) {
+ return EmitRuntimeCallOrInvoke(callee, None, name);
+}
+
+/// Emits a call or invoke instruction to the given runtime function.
+llvm::CallSite
+CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
+ ArrayRef<llvm::Value*> args,
+ const Twine &name) {
+ llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
+ callSite.setCallingConv(getRuntimeCC());
+ return callSite;
+}
+
+/// Emits a call or invoke instruction to the given function, depending
+/// on the current state of the EH stack.
+llvm::CallSite
+CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
+ ArrayRef<llvm::Value *> Args,
+ const Twine &Name) {
+ llvm::BasicBlock *InvokeDest = getInvokeDest();
+
+ llvm::Instruction *Inst;
+ if (!InvokeDest)
+ Inst = Builder.CreateCall(Callee, Args, Name);
+ else {
+ llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
+ Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name);
+ EmitBlock(ContBB);
+ }
+
+ // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
+ // optimizer it can aggressively ignore unwind edges.
+ if (CGM.getLangOpts().ObjCAutoRefCount)
+ AddObjCARCExceptionMetadata(Inst);
+
+ return llvm::CallSite(Inst);
+}
+
+/// \brief Store a non-aggregate value to an address to initialize it. For
+/// initialization, a non-atomic store will be used.
+static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src,
+ LValue Dst) {
+ if (Src.isScalar())
+ CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true);
+ else
+ CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true);
+}
+
+void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
+ llvm::Value *New) {
+ DeferredReplacements.push_back(std::make_pair(Old, New));
+}
+
+RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
+ llvm::Value *Callee,
+ ReturnValueSlot ReturnValue,
+ const CallArgList &CallArgs,
+ CGCalleeInfo CalleeInfo,
+ llvm::Instruction **callOrInvoke) {
+ // FIXME: We no longer need the types from CallArgs; lift up and simplify.
+
+ // Handle struct-return functions by passing a pointer to the
+ // location that we would like to return into.
+ QualType RetTy = CallInfo.getReturnType();
+ const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
+
+ llvm::FunctionType *IRFuncTy =
+ cast<llvm::FunctionType>(
+ cast<llvm::PointerType>(Callee->getType())->getElementType());
+
+ // If we're using inalloca, insert the allocation after the stack save.
+ // FIXME: Do this earlier rather than hacking it in here!
+ Address ArgMemory = Address::invalid();
+ const llvm::StructLayout *ArgMemoryLayout = nullptr;
+ if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
+ ArgMemoryLayout = CGM.getDataLayout().getStructLayout(ArgStruct);
+ llvm::Instruction *IP = CallArgs.getStackBase();
+ llvm::AllocaInst *AI;
+ if (IP) {
+ IP = IP->getNextNode();
+ AI = new llvm::AllocaInst(ArgStruct, "argmem", IP);
+ } else {
+ AI = CreateTempAlloca(ArgStruct, "argmem");
+ }
+ auto Align = CallInfo.getArgStructAlignment();
+ AI->setAlignment(Align.getQuantity());
+ AI->setUsedWithInAlloca(true);
+ assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
+ ArgMemory = Address(AI, Align);
+ }
+
+ // Helper function to drill into the inalloca allocation.
+ auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address {
+ auto FieldOffset =
+ CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex));
+ return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset);
+ };
+
+ ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
+ SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
+
+ // If the call returns a temporary with struct return, create a temporary
+ // alloca to hold the result, unless one is given to us.
+ Address SRetPtr = Address::invalid();
+ size_t UnusedReturnSize = 0;
+ if (RetAI.isIndirect() || RetAI.isInAlloca()) {
+ if (!ReturnValue.isNull()) {
+ SRetPtr = ReturnValue.getValue();
+ } else {
+ SRetPtr = CreateMemTemp(RetTy);
+ if (HaveInsertPoint() && ReturnValue.isUnused()) {
+ uint64_t size =
+ CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
+ if (EmitLifetimeStart(size, SRetPtr.getPointer()))
+ UnusedReturnSize = size;
+ }
+ }
+ if (IRFunctionArgs.hasSRetArg()) {
+ IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
+ } else {
+ Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex());
+ Builder.CreateStore(SRetPtr.getPointer(), Addr);
+ }
+ }
+
+ assert(CallInfo.arg_size() == CallArgs.size() &&
+ "Mismatch between function signature & arguments.");
+ unsigned ArgNo = 0;
+ CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
+ for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
+ I != E; ++I, ++info_it, ++ArgNo) {
+ const ABIArgInfo &ArgInfo = info_it->info;
+ RValue RV = I->RV;
+
+ // Insert a padding argument to ensure proper alignment.
+ if (IRFunctionArgs.hasPaddingArg(ArgNo))
+ IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
+ llvm::UndefValue::get(ArgInfo.getPaddingType());
+
+ unsigned FirstIRArg, NumIRArgs;
+ std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
+
+ switch (ArgInfo.getKind()) {
+ case ABIArgInfo::InAlloca: {
+ assert(NumIRArgs == 0);
+ assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
+ if (RV.isAggregate()) {
+ // Replace the placeholder with the appropriate argument slot GEP.
+ llvm::Instruction *Placeholder =
+ cast<llvm::Instruction>(RV.getAggregatePointer());
+ CGBuilderTy::InsertPoint IP = Builder.saveIP();
+ Builder.SetInsertPoint(Placeholder);
+ Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
+ Builder.restoreIP(IP);
+ deferPlaceholderReplacement(Placeholder, Addr.getPointer());
+ } else {
+ // Store the RValue into the argument struct.
+ Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
+ unsigned AS = Addr.getType()->getPointerAddressSpace();
+ llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
+ // There are some cases where a trivial bitcast is not avoidable. The
+ // definition of a type later in a translation unit may change it's type
+ // from {}* to (%struct.foo*)*.
+ if (Addr.getType() != MemType)
+ Addr = Builder.CreateBitCast(Addr, MemType);
+ LValue argLV = MakeAddrLValue(Addr, I->Ty);
+ EmitInitStoreOfNonAggregate(*this, RV, argLV);
+ }
+ break;
+ }
+
+ case ABIArgInfo::Indirect: {
+ assert(NumIRArgs == 1);
+ if (RV.isScalar() || RV.isComplex()) {
+ // Make a temporary alloca to pass the argument.
+ Address Addr = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign());
+ IRCallArgs[FirstIRArg] = Addr.getPointer();
+
+ LValue argLV = MakeAddrLValue(Addr, I->Ty);
+ EmitInitStoreOfNonAggregate(*this, RV, argLV);
+ } else {
+ // We want to avoid creating an unnecessary temporary+copy here;
+ // however, we need one in three cases:
+ // 1. If the argument is not byval, and we are required to copy the
+ // source. (This case doesn't occur on any common architecture.)
+ // 2. If the argument is byval, RV is not sufficiently aligned, and
+ // we cannot force it to be sufficiently aligned.
+ // 3. If the argument is byval, but RV is located in an address space
+ // different than that of the argument (0).
+ Address Addr = RV.getAggregateAddress();
+ CharUnits Align = ArgInfo.getIndirectAlign();
+ const llvm::DataLayout *TD = &CGM.getDataLayout();
+ const unsigned RVAddrSpace = Addr.getType()->getAddressSpace();
+ const unsigned ArgAddrSpace =
+ (FirstIRArg < IRFuncTy->getNumParams()
+ ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace()
+ : 0);
+ if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
+ (ArgInfo.getIndirectByVal() && Addr.getAlignment() < Align &&
+ llvm::getOrEnforceKnownAlignment(Addr.getPointer(),
+ Align.getQuantity(), *TD)
+ < Align.getQuantity()) ||
+ (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
+ // Create an aligned temporary, and copy to it.
+ Address AI = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign());
+ IRCallArgs[FirstIRArg] = AI.getPointer();
+ EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
+ } else {
+ // Skip the extra memcpy call.
+ IRCallArgs[FirstIRArg] = Addr.getPointer();
+ }
+ }
+ break;
+ }
+
+ case ABIArgInfo::Ignore:
+ assert(NumIRArgs == 0);
+ break;
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
+ ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
+ ArgInfo.getDirectOffset() == 0) {
+ assert(NumIRArgs == 1);
+ llvm::Value *V;
+ if (RV.isScalar())
+ V = RV.getScalarVal();
+ else
+ V = Builder.CreateLoad(RV.getAggregateAddress());
+
+ // We might have to widen integers, but we should never truncate.
+ if (ArgInfo.getCoerceToType() != V->getType() &&
+ V->getType()->isIntegerTy())
+ V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
+
+ // If the argument doesn't match, perform a bitcast to coerce it. This
+ // can happen due to trivial type mismatches.
+ if (FirstIRArg < IRFuncTy->getNumParams() &&
+ V->getType() != IRFuncTy->getParamType(FirstIRArg))
+ V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
+ IRCallArgs[FirstIRArg] = V;
+ break;
+ }
+
+ // FIXME: Avoid the conversion through memory if possible.
+ Address Src = Address::invalid();
+ if (RV.isScalar() || RV.isComplex()) {
+ Src = CreateMemTemp(I->Ty, "coerce");
+ LValue SrcLV = MakeAddrLValue(Src, I->Ty);
+ EmitInitStoreOfNonAggregate(*this, RV, SrcLV);
+ } else {
+ Src = RV.getAggregateAddress();
+ }
+
+ // If the value is offset in memory, apply the offset now.
+ Src = emitAddressAtOffset(*this, Src, ArgInfo);
+
+ // Fast-isel and the optimizer generally like scalar values better than
+ // FCAs, so we flatten them if this is safe to do for this argument.
+ llvm::StructType *STy =
+ dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
+ if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
+ llvm::Type *SrcTy = Src.getType()->getElementType();
+ uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
+ uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
+
+ // If the source type is smaller than the destination type of the
+ // coerce-to logic, copy the source value into a temp alloca the size
+ // of the destination type to allow loading all of it. The bits past
+ // the source value are left undef.
+ if (SrcSize < DstSize) {
+ Address TempAlloca
+ = CreateTempAlloca(STy, Src.getAlignment(),
+ Src.getName() + ".coerce");
+ Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
+ Src = TempAlloca;
+ } else {
+ Src = Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(STy));
+ }
+
+ auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
+ assert(NumIRArgs == STy->getNumElements());
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
+ Address EltPtr = Builder.CreateStructGEP(Src, i, Offset);
+ llvm::Value *LI = Builder.CreateLoad(EltPtr);
+ IRCallArgs[FirstIRArg + i] = LI;
+ }
+ } else {
+ // In the simple case, just pass the coerced loaded value.
+ assert(NumIRArgs == 1);
+ IRCallArgs[FirstIRArg] =
+ CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
+ }
+
+ break;
+ }
+
+ case ABIArgInfo::Expand:
+ unsigned IRArgPos = FirstIRArg;
+ ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos);
+ assert(IRArgPos == FirstIRArg + NumIRArgs);
+ break;
+ }
+ }
+
+ if (ArgMemory.isValid()) {
+ llvm::Value *Arg = ArgMemory.getPointer();
+ if (CallInfo.isVariadic()) {
+ // When passing non-POD arguments by value to variadic functions, we will
+ // end up with a variadic prototype and an inalloca call site. In such
+ // cases, we can't do any parameter mismatch checks. Give up and bitcast
+ // the callee.
+ unsigned CalleeAS =
+ cast<llvm::PointerType>(Callee->getType())->getAddressSpace();
+ Callee = Builder.CreateBitCast(
+ Callee, getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS));
+ } else {
+ llvm::Type *LastParamTy =
+ IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
+ if (Arg->getType() != LastParamTy) {
+#ifndef NDEBUG
+ // Assert that these structs have equivalent element types.
+ llvm::StructType *FullTy = CallInfo.getArgStruct();
+ llvm::StructType *DeclaredTy = cast<llvm::StructType>(
+ cast<llvm::PointerType>(LastParamTy)->getElementType());
+ assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
+ for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
+ DE = DeclaredTy->element_end(),
+ FI = FullTy->element_begin();
+ DI != DE; ++DI, ++FI)
+ assert(*DI == *FI);
+#endif
+ Arg = Builder.CreateBitCast(Arg, LastParamTy);
+ }
+ }
+ assert(IRFunctionArgs.hasInallocaArg());
+ IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
+ }
+
+ if (!CallArgs.getCleanupsToDeactivate().empty())
+ deactivateArgCleanupsBeforeCall(*this, CallArgs);
+
+ // If the callee is a bitcast of a function to a varargs pointer to function
+ // type, check to see if we can remove the bitcast. This handles some cases
+ // with unprototyped functions.
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
+ if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
+ llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
+ llvm::FunctionType *CurFT =
+ cast<llvm::FunctionType>(CurPT->getElementType());
+ llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
+
+ if (CE->getOpcode() == llvm::Instruction::BitCast &&
+ ActualFT->getReturnType() == CurFT->getReturnType() &&
+ ActualFT->getNumParams() == CurFT->getNumParams() &&
+ ActualFT->getNumParams() == IRCallArgs.size() &&
+ (CurFT->isVarArg() || !ActualFT->isVarArg())) {
+ bool ArgsMatch = true;
+ for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
+ if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
+ ArgsMatch = false;
+ break;
+ }
+
+ // Strip the cast if we can get away with it. This is a nice cleanup,
+ // but also allows us to inline the function at -O0 if it is marked
+ // always_inline.
+ if (ArgsMatch)
+ Callee = CalleeF;
+ }
+ }
+
+ assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
+ for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
+ // Inalloca argument can have different type.
+ if (IRFunctionArgs.hasInallocaArg() &&
+ i == IRFunctionArgs.getInallocaArgNo())
+ continue;
+ if (i < IRFuncTy->getNumParams())
+ assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
+ }
+
+ unsigned CallingConv;
+ CodeGen::AttributeListType AttributeList;
+ CGM.ConstructAttributeList(Callee->getName(), CallInfo, CalleeInfo,
+ AttributeList, CallingConv,
+ /*AttrOnCallSite=*/true);
+ llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(),
+ AttributeList);
+
+ bool CannotThrow;
+ if (currentFunctionUsesSEHTry()) {
+ // SEH cares about asynchronous exceptions, everything can "throw."
+ CannotThrow = false;
+ } else if (isCleanupPadScope() &&
+ EHPersonality::get(*this).isMSVCXXPersonality()) {
+ // The MSVC++ personality will implicitly terminate the program if an
+ // exception is thrown. An unwind edge cannot be reached.
+ CannotThrow = true;
+ } else {
+ // Otherwise, nowunind callsites will never throw.
+ CannotThrow = Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::NoUnwind);
+ }
+ llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
+
+ SmallVector<llvm::OperandBundleDef, 1> BundleList;
+ getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList);
+
+ llvm::CallSite CS;
+ if (!InvokeDest) {
+ CS = Builder.CreateCall(Callee, IRCallArgs, BundleList);
+ } else {
+ llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
+ CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs,
+ BundleList);
+ EmitBlock(Cont);
+ }
+ if (callOrInvoke)
+ *callOrInvoke = CS.getInstruction();
+
+ if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
+ !CS.hasFnAttr(llvm::Attribute::NoInline))
+ Attrs =
+ Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::AlwaysInline);
+
+ // Disable inlining inside SEH __try blocks.
+ if (isSEHTryScope())
+ Attrs =
+ Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::NoInline);
+
+ CS.setAttributes(Attrs);
+ CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
+
+ // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
+ // optimizer it can aggressively ignore unwind edges.
+ if (CGM.getLangOpts().ObjCAutoRefCount)
+ AddObjCARCExceptionMetadata(CS.getInstruction());
+
+ // If the call doesn't return, finish the basic block and clear the
+ // insertion point; this allows the rest of IRgen to discard
+ // unreachable code.
+ if (CS.doesNotReturn()) {
+ if (UnusedReturnSize)
+ EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
+ SRetPtr.getPointer());
+
+ Builder.CreateUnreachable();
+ Builder.ClearInsertionPoint();
+
+ // FIXME: For now, emit a dummy basic block because expr emitters in
+ // generally are not ready to handle emitting expressions at unreachable
+ // points.
+ EnsureInsertPoint();
+
+ // Return a reasonable RValue.
+ return GetUndefRValue(RetTy);
+ }
+
+ llvm::Instruction *CI = CS.getInstruction();
+ if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
+ CI->setName("call");
+
+ // Emit any writebacks immediately. Arguably this should happen
+ // after any return-value munging.
+ if (CallArgs.hasWritebacks())
+ emitWritebacks(*this, CallArgs);
+
+ // The stack cleanup for inalloca arguments has to run out of the normal
+ // lexical order, so deactivate it and run it manually here.
+ CallArgs.freeArgumentMemory(*this);
+
+ if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
+ const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
+ if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
+ Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
+ }
+
+ RValue Ret = [&] {
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::InAlloca:
+ case ABIArgInfo::Indirect: {
+ RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
+ if (UnusedReturnSize)
+ EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
+ SRetPtr.getPointer());
+ return ret;
+ }
+
+ case ABIArgInfo::Ignore:
+ // If we are ignoring an argument that had a result, make sure to
+ // construct the appropriate return value for our caller.
+ return GetUndefRValue(RetTy);
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ llvm::Type *RetIRTy = ConvertType(RetTy);
+ if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
+ switch (getEvaluationKind(RetTy)) {
+ case TEK_Complex: {
+ llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
+ llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
+ return RValue::getComplex(std::make_pair(Real, Imag));
+ }
+ case TEK_Aggregate: {
+ Address DestPtr = ReturnValue.getValue();
+ bool DestIsVolatile = ReturnValue.isVolatile();
+
+ if (!DestPtr.isValid()) {
+ DestPtr = CreateMemTemp(RetTy, "agg.tmp");
+ DestIsVolatile = false;
+ }
+ BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
+ return RValue::getAggregate(DestPtr);
+ }
+ case TEK_Scalar: {
+ // If the argument doesn't match, perform a bitcast to coerce it. This
+ // can happen due to trivial type mismatches.
+ llvm::Value *V = CI;
+ if (V->getType() != RetIRTy)
+ V = Builder.CreateBitCast(V, RetIRTy);
+ return RValue::get(V);
+ }
+ }
+ llvm_unreachable("bad evaluation kind");
+ }
+
+ Address DestPtr = ReturnValue.getValue();
+ bool DestIsVolatile = ReturnValue.isVolatile();
+
+ if (!DestPtr.isValid()) {
+ DestPtr = CreateMemTemp(RetTy, "coerce");
+ DestIsVolatile = false;
+ }
+
+ // If the value is offset in memory, apply the offset now.
+ Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
+ CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
+
+ return convertTempToRValue(DestPtr, RetTy, SourceLocation());
+ }
+
+ case ABIArgInfo::Expand:
+ llvm_unreachable("Invalid ABI kind for return argument");
+ }
+
+ llvm_unreachable("Unhandled ABIArgInfo::Kind");
+ } ();
+
+ const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
+
+ if (Ret.isScalar() && TargetDecl) {
+ if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
+ llvm::Value *OffsetValue = nullptr;
+ if (const auto *Offset = AA->getOffset())
+ OffsetValue = EmitScalarExpr(Offset);
+
+ llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
+ llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
+ EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
+ OffsetValue);
+ }
+ }
+
+ return Ret;
+}
+
+/* VarArg handling */
+
+Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
+ VAListAddr = VE->isMicrosoftABI()
+ ? EmitMSVAListRef(VE->getSubExpr())
+ : EmitVAListRef(VE->getSubExpr());
+ QualType Ty = VE->getType();
+ if (VE->isMicrosoftABI())
+ return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
+ return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h
new file mode 100644
index 0000000..2ebd09b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h
@@ -0,0 +1,178 @@
+//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGCALL_H
+#define LLVM_CLANG_LIB_CODEGEN_CGCALL_H
+
+#include "CGValue.h"
+#include "EHScopeStack.h"
+#include "clang/AST/CanonicalType.h"
+#include "clang/AST/Type.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/IR/Value.h"
+
+// FIXME: Restructure so we don't have to expose so much stuff.
+#include "ABIInfo.h"
+
+namespace llvm {
+ class AttributeSet;
+ class Function;
+ class Type;
+ class Value;
+}
+
+namespace clang {
+ class ASTContext;
+ class Decl;
+ class FunctionDecl;
+ class ObjCMethodDecl;
+ class VarDecl;
+
+namespace CodeGen {
+ typedef SmallVector<llvm::AttributeSet, 8> AttributeListType;
+
+ struct CallArg {
+ RValue RV;
+ QualType Ty;
+ bool NeedsCopy;
+ CallArg(RValue rv, QualType ty, bool needscopy)
+ : RV(rv), Ty(ty), NeedsCopy(needscopy)
+ { }
+ };
+
+ /// CallArgList - Type for representing both the value and type of
+ /// arguments in a call.
+ class CallArgList :
+ public SmallVector<CallArg, 16> {
+ public:
+ CallArgList() : StackBase(nullptr) {}
+
+ struct Writeback {
+ /// The original argument. Note that the argument l-value
+ /// is potentially null.
+ LValue Source;
+
+ /// The temporary alloca.
+ Address Temporary;
+
+ /// A value to "use" after the writeback, or null.
+ llvm::Value *ToUse;
+ };
+
+ struct CallArgCleanup {
+ EHScopeStack::stable_iterator Cleanup;
+
+ /// The "is active" insertion point. This instruction is temporary and
+ /// will be removed after insertion.
+ llvm::Instruction *IsActiveIP;
+ };
+
+ void add(RValue rvalue, QualType type, bool needscopy = false) {
+ push_back(CallArg(rvalue, type, needscopy));
+ }
+
+ void addFrom(const CallArgList &other) {
+ insert(end(), other.begin(), other.end());
+ Writebacks.insert(Writebacks.end(),
+ other.Writebacks.begin(), other.Writebacks.end());
+ }
+
+ void addWriteback(LValue srcLV, Address temporary,
+ llvm::Value *toUse) {
+ Writeback writeback = { srcLV, temporary, toUse };
+ Writebacks.push_back(writeback);
+ }
+
+ bool hasWritebacks() const { return !Writebacks.empty(); }
+
+ typedef llvm::iterator_range<SmallVectorImpl<Writeback>::const_iterator>
+ writeback_const_range;
+
+ writeback_const_range writebacks() const {
+ return writeback_const_range(Writebacks.begin(), Writebacks.end());
+ }
+
+ void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup,
+ llvm::Instruction *IsActiveIP) {
+ CallArgCleanup ArgCleanup;
+ ArgCleanup.Cleanup = Cleanup;
+ ArgCleanup.IsActiveIP = IsActiveIP;
+ CleanupsToDeactivate.push_back(ArgCleanup);
+ }
+
+ ArrayRef<CallArgCleanup> getCleanupsToDeactivate() const {
+ return CleanupsToDeactivate;
+ }
+
+ void allocateArgumentMemory(CodeGenFunction &CGF);
+ llvm::Instruction *getStackBase() const { return StackBase; }
+ void freeArgumentMemory(CodeGenFunction &CGF) const;
+
+ /// \brief Returns if we're using an inalloca struct to pass arguments in
+ /// memory.
+ bool isUsingInAlloca() const { return StackBase; }
+
+ private:
+ SmallVector<Writeback, 1> Writebacks;
+
+ /// Deactivate these cleanups immediately before making the call. This
+ /// is used to cleanup objects that are owned by the callee once the call
+ /// occurs.
+ SmallVector<CallArgCleanup, 1> CleanupsToDeactivate;
+
+ /// The stacksave call. It dominates all of the argument evaluation.
+ llvm::CallInst *StackBase;
+
+ /// The iterator pointing to the stack restore cleanup. We manually run and
+ /// deactivate this cleanup after the call in the unexceptional case because
+ /// it doesn't run in the normal order.
+ EHScopeStack::stable_iterator StackCleanup;
+ };
+
+ /// FunctionArgList - Type for representing both the decl and type
+ /// of parameters to a function. The decl must be either a
+ /// ParmVarDecl or ImplicitParamDecl.
+ class FunctionArgList : public SmallVector<const VarDecl*, 16> {
+ };
+
+ /// ReturnValueSlot - Contains the address where the return value of a
+ /// function can be stored, and whether the address is volatile or not.
+ class ReturnValueSlot {
+ llvm::PointerIntPair<llvm::Value *, 2, unsigned int> Value;
+ CharUnits Alignment;
+
+ // Return value slot flags
+ enum Flags {
+ IS_VOLATILE = 0x1,
+ IS_UNUSED = 0x2,
+ };
+
+ public:
+ ReturnValueSlot() {}
+ ReturnValueSlot(Address Addr, bool IsVolatile, bool IsUnused = false)
+ : Value(Addr.isValid() ? Addr.getPointer() : nullptr,
+ (IsVolatile ? IS_VOLATILE : 0) | (IsUnused ? IS_UNUSED : 0)),
+ Alignment(Addr.isValid() ? Addr.getAlignment() : CharUnits::Zero()) {}
+
+ bool isNull() const { return !getValue().isValid(); }
+
+ bool isVolatile() const { return Value.getInt() & IS_VOLATILE; }
+ Address getValue() const { return Address(Value.getPointer(), Alignment); }
+ bool isUnused() const { return Value.getInt() & IS_UNUSED; }
+ };
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp
new file mode 100644
index 0000000..2e566de
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp
@@ -0,0 +1,2792 @@
+//===--- CGClass.cpp - Emit LLVM Code for C++ classes -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of classes
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGBlocks.h"
+#include "CGCXXABI.h"
+#include "CGDebugInfo.h"
+#include "CGRecordLayout.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/CodeGen/CGFunctionInfo.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Metadata.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+/// Return the best known alignment for an unknown pointer to a
+/// particular class.
+CharUnits CodeGenModule::getClassPointerAlignment(const CXXRecordDecl *RD) {
+ if (!RD->isCompleteDefinition())
+ return CharUnits::One(); // Hopefully won't be used anywhere.
+
+ auto &layout = getContext().getASTRecordLayout(RD);
+
+ // If the class is final, then we know that the pointer points to an
+ // object of that type and can use the full alignment.
+ if (RD->hasAttr<FinalAttr>()) {
+ return layout.getAlignment();
+
+ // Otherwise, we have to assume it could be a subclass.
+ } else {
+ return layout.getNonVirtualAlignment();
+ }
+}
+
+/// Return the best known alignment for a pointer to a virtual base,
+/// given the alignment of a pointer to the derived class.
+CharUnits CodeGenModule::getVBaseAlignment(CharUnits actualDerivedAlign,
+ const CXXRecordDecl *derivedClass,
+ const CXXRecordDecl *vbaseClass) {
+ // The basic idea here is that an underaligned derived pointer might
+ // indicate an underaligned base pointer.
+
+ assert(vbaseClass->isCompleteDefinition());
+ auto &baseLayout = getContext().getASTRecordLayout(vbaseClass);
+ CharUnits expectedVBaseAlign = baseLayout.getNonVirtualAlignment();
+
+ return getDynamicOffsetAlignment(actualDerivedAlign, derivedClass,
+ expectedVBaseAlign);
+}
+
+CharUnits
+CodeGenModule::getDynamicOffsetAlignment(CharUnits actualBaseAlign,
+ const CXXRecordDecl *baseDecl,
+ CharUnits expectedTargetAlign) {
+ // If the base is an incomplete type (which is, alas, possible with
+ // member pointers), be pessimistic.
+ if (!baseDecl->isCompleteDefinition())
+ return std::min(actualBaseAlign, expectedTargetAlign);
+
+ auto &baseLayout = getContext().getASTRecordLayout(baseDecl);
+ CharUnits expectedBaseAlign = baseLayout.getNonVirtualAlignment();
+
+ // If the class is properly aligned, assume the target offset is, too.
+ //
+ // This actually isn't necessarily the right thing to do --- if the
+ // class is a complete object, but it's only properly aligned for a
+ // base subobject, then the alignments of things relative to it are
+ // probably off as well. (Note that this requires the alignment of
+ // the target to be greater than the NV alignment of the derived
+ // class.)
+ //
+ // However, our approach to this kind of under-alignment can only
+ // ever be best effort; after all, we're never going to propagate
+ // alignments through variables or parameters. Note, in particular,
+ // that constructing a polymorphic type in an address that's less
+ // than pointer-aligned will generally trap in the constructor,
+ // unless we someday add some sort of attribute to change the
+ // assumed alignment of 'this'. So our goal here is pretty much
+ // just to allow the user to explicitly say that a pointer is
+ // under-aligned and then safely access its fields and v-tables.
+ if (actualBaseAlign >= expectedBaseAlign) {
+ return expectedTargetAlign;
+ }
+
+ // Otherwise, we might be offset by an arbitrary multiple of the
+ // actual alignment. The correct adjustment is to take the min of
+ // the two alignments.
+ return std::min(actualBaseAlign, expectedTargetAlign);
+}
+
+Address CodeGenFunction::LoadCXXThisAddress() {
+ assert(CurFuncDecl && "loading 'this' without a func declaration?");
+ assert(isa<CXXMethodDecl>(CurFuncDecl));
+
+ // Lazily compute CXXThisAlignment.
+ if (CXXThisAlignment.isZero()) {
+ // Just use the best known alignment for the parent.
+ // TODO: if we're currently emitting a complete-object ctor/dtor,
+ // we can always use the complete-object alignment.
+ auto RD = cast<CXXMethodDecl>(CurFuncDecl)->getParent();
+ CXXThisAlignment = CGM.getClassPointerAlignment(RD);
+ }
+
+ return Address(LoadCXXThis(), CXXThisAlignment);
+}
+
+/// Emit the address of a field using a member data pointer.
+///
+/// \param E Only used for emergency diagnostics
+Address
+CodeGenFunction::EmitCXXMemberDataPointerAddress(const Expr *E, Address base,
+ llvm::Value *memberPtr,
+ const MemberPointerType *memberPtrType,
+ AlignmentSource *alignSource) {
+ // Ask the ABI to compute the actual address.
+ llvm::Value *ptr =
+ CGM.getCXXABI().EmitMemberDataPointerAddress(*this, E, base,
+ memberPtr, memberPtrType);
+
+ QualType memberType = memberPtrType->getPointeeType();
+ CharUnits memberAlign = getNaturalTypeAlignment(memberType, alignSource);
+ memberAlign =
+ CGM.getDynamicOffsetAlignment(base.getAlignment(),
+ memberPtrType->getClass()->getAsCXXRecordDecl(),
+ memberAlign);
+ return Address(ptr, memberAlign);
+}
+
+CharUnits CodeGenModule::computeNonVirtualBaseClassOffset(
+ const CXXRecordDecl *DerivedClass, CastExpr::path_const_iterator Start,
+ CastExpr::path_const_iterator End) {
+ CharUnits Offset = CharUnits::Zero();
+
+ const ASTContext &Context = getContext();
+ const CXXRecordDecl *RD = DerivedClass;
+
+ for (CastExpr::path_const_iterator I = Start; I != End; ++I) {
+ const CXXBaseSpecifier *Base = *I;
+ assert(!Base->isVirtual() && "Should not see virtual bases here!");
+
+ // Get the layout.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+
+ // Add the offset.
+ Offset += Layout.getBaseClassOffset(BaseDecl);
+
+ RD = BaseDecl;
+ }
+
+ return Offset;
+}
+
+llvm::Constant *
+CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd) {
+ assert(PathBegin != PathEnd && "Base path should not be empty!");
+
+ CharUnits Offset =
+ computeNonVirtualBaseClassOffset(ClassDecl, PathBegin, PathEnd);
+ if (Offset.isZero())
+ return nullptr;
+
+ llvm::Type *PtrDiffTy =
+ Types.ConvertType(getContext().getPointerDiffType());
+
+ return llvm::ConstantInt::get(PtrDiffTy, Offset.getQuantity());
+}
+
+/// Gets the address of a direct base class within a complete object.
+/// This should only be used for (1) non-virtual bases or (2) virtual bases
+/// when the type is known to be complete (e.g. in complete destructors).
+///
+/// The object pointed to by 'This' is assumed to be non-null.
+Address
+CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(Address This,
+ const CXXRecordDecl *Derived,
+ const CXXRecordDecl *Base,
+ bool BaseIsVirtual) {
+ // 'this' must be a pointer (in some address space) to Derived.
+ assert(This.getElementType() == ConvertType(Derived));
+
+ // Compute the offset of the virtual base.
+ CharUnits Offset;
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived);
+ if (BaseIsVirtual)
+ Offset = Layout.getVBaseClassOffset(Base);
+ else
+ Offset = Layout.getBaseClassOffset(Base);
+
+ // Shift and cast down to the base type.
+ // TODO: for complete types, this should be possible with a GEP.
+ Address V = This;
+ if (!Offset.isZero()) {
+ V = Builder.CreateElementBitCast(V, Int8Ty);
+ V = Builder.CreateConstInBoundsByteGEP(V, Offset);
+ }
+ V = Builder.CreateElementBitCast(V, ConvertType(Base));
+
+ return V;
+}
+
+static Address
+ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, Address addr,
+ CharUnits nonVirtualOffset,
+ llvm::Value *virtualOffset,
+ const CXXRecordDecl *derivedClass,
+ const CXXRecordDecl *nearestVBase) {
+ // Assert that we have something to do.
+ assert(!nonVirtualOffset.isZero() || virtualOffset != nullptr);
+
+ // Compute the offset from the static and dynamic components.
+ llvm::Value *baseOffset;
+ if (!nonVirtualOffset.isZero()) {
+ baseOffset = llvm::ConstantInt::get(CGF.PtrDiffTy,
+ nonVirtualOffset.getQuantity());
+ if (virtualOffset) {
+ baseOffset = CGF.Builder.CreateAdd(virtualOffset, baseOffset);
+ }
+ } else {
+ baseOffset = virtualOffset;
+ }
+
+ // Apply the base offset.
+ llvm::Value *ptr = addr.getPointer();
+ ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy);
+ ptr = CGF.Builder.CreateInBoundsGEP(ptr, baseOffset, "add.ptr");
+
+ // If we have a virtual component, the alignment of the result will
+ // be relative only to the known alignment of that vbase.
+ CharUnits alignment;
+ if (virtualOffset) {
+ assert(nearestVBase && "virtual offset without vbase?");
+ alignment = CGF.CGM.getVBaseAlignment(addr.getAlignment(),
+ derivedClass, nearestVBase);
+ } else {
+ alignment = addr.getAlignment();
+ }
+ alignment = alignment.alignmentAtOffset(nonVirtualOffset);
+
+ return Address(ptr, alignment);
+}
+
+Address CodeGenFunction::GetAddressOfBaseClass(
+ Address Value, const CXXRecordDecl *Derived,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd, bool NullCheckValue,
+ SourceLocation Loc) {
+ assert(PathBegin != PathEnd && "Base path should not be empty!");
+
+ CastExpr::path_const_iterator Start = PathBegin;
+ const CXXRecordDecl *VBase = nullptr;
+
+ // Sema has done some convenient canonicalization here: if the
+ // access path involved any virtual steps, the conversion path will
+ // *start* with a step down to the correct virtual base subobject,
+ // and hence will not require any further steps.
+ if ((*Start)->isVirtual()) {
+ VBase =
+ cast<CXXRecordDecl>((*Start)->getType()->getAs<RecordType>()->getDecl());
+ ++Start;
+ }
+
+ // Compute the static offset of the ultimate destination within its
+ // allocating subobject (the virtual base, if there is one, or else
+ // the "complete" object that we see).
+ CharUnits NonVirtualOffset = CGM.computeNonVirtualBaseClassOffset(
+ VBase ? VBase : Derived, Start, PathEnd);
+
+ // If there's a virtual step, we can sometimes "devirtualize" it.
+ // For now, that's limited to when the derived type is final.
+ // TODO: "devirtualize" this for accesses to known-complete objects.
+ if (VBase && Derived->hasAttr<FinalAttr>()) {
+ const ASTRecordLayout &layout = getContext().getASTRecordLayout(Derived);
+ CharUnits vBaseOffset = layout.getVBaseClassOffset(VBase);
+ NonVirtualOffset += vBaseOffset;
+ VBase = nullptr; // we no longer have a virtual step
+ }
+
+ // Get the base pointer type.
+ llvm::Type *BasePtrTy =
+ ConvertType((PathEnd[-1])->getType())->getPointerTo();
+
+ QualType DerivedTy = getContext().getRecordType(Derived);
+ CharUnits DerivedAlign = CGM.getClassPointerAlignment(Derived);
+
+ // If the static offset is zero and we don't have a virtual step,
+ // just do a bitcast; null checks are unnecessary.
+ if (NonVirtualOffset.isZero() && !VBase) {
+ if (sanitizePerformTypeCheck()) {
+ EmitTypeCheck(TCK_Upcast, Loc, Value.getPointer(),
+ DerivedTy, DerivedAlign, !NullCheckValue);
+ }
+ return Builder.CreateBitCast(Value, BasePtrTy);
+ }
+
+ llvm::BasicBlock *origBB = nullptr;
+ llvm::BasicBlock *endBB = nullptr;
+
+ // Skip over the offset (and the vtable load) if we're supposed to
+ // null-check the pointer.
+ if (NullCheckValue) {
+ origBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull");
+ endBB = createBasicBlock("cast.end");
+
+ llvm::Value *isNull = Builder.CreateIsNull(Value.getPointer());
+ Builder.CreateCondBr(isNull, endBB, notNullBB);
+ EmitBlock(notNullBB);
+ }
+
+ if (sanitizePerformTypeCheck()) {
+ EmitTypeCheck(VBase ? TCK_UpcastToVirtualBase : TCK_Upcast, Loc,
+ Value.getPointer(), DerivedTy, DerivedAlign, true);
+ }
+
+ // Compute the virtual offset.
+ llvm::Value *VirtualOffset = nullptr;
+ if (VBase) {
+ VirtualOffset =
+ CGM.getCXXABI().GetVirtualBaseClassOffset(*this, Value, Derived, VBase);
+ }
+
+ // Apply both offsets.
+ Value = ApplyNonVirtualAndVirtualOffset(*this, Value, NonVirtualOffset,
+ VirtualOffset, Derived, VBase);
+
+ // Cast to the destination type.
+ Value = Builder.CreateBitCast(Value, BasePtrTy);
+
+ // Build a phi if we needed a null check.
+ if (NullCheckValue) {
+ llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
+ Builder.CreateBr(endBB);
+ EmitBlock(endBB);
+
+ llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result");
+ PHI->addIncoming(Value.getPointer(), notNullBB);
+ PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB);
+ Value = Address(PHI, Value.getAlignment());
+ }
+
+ return Value;
+}
+
+Address
+CodeGenFunction::GetAddressOfDerivedClass(Address BaseAddr,
+ const CXXRecordDecl *Derived,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd,
+ bool NullCheckValue) {
+ assert(PathBegin != PathEnd && "Base path should not be empty!");
+
+ QualType DerivedTy =
+ getContext().getCanonicalType(getContext().getTagDeclType(Derived));
+ llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo();
+
+ llvm::Value *NonVirtualOffset =
+ CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd);
+
+ if (!NonVirtualOffset) {
+ // No offset, we can just cast back.
+ return Builder.CreateBitCast(BaseAddr, DerivedPtrTy);
+ }
+
+ llvm::BasicBlock *CastNull = nullptr;
+ llvm::BasicBlock *CastNotNull = nullptr;
+ llvm::BasicBlock *CastEnd = nullptr;
+
+ if (NullCheckValue) {
+ CastNull = createBasicBlock("cast.null");
+ CastNotNull = createBasicBlock("cast.notnull");
+ CastEnd = createBasicBlock("cast.end");
+
+ llvm::Value *IsNull = Builder.CreateIsNull(BaseAddr.getPointer());
+ Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
+ EmitBlock(CastNotNull);
+ }
+
+ // Apply the offset.
+ llvm::Value *Value = Builder.CreateBitCast(BaseAddr.getPointer(), Int8PtrTy);
+ Value = Builder.CreateGEP(Value, Builder.CreateNeg(NonVirtualOffset),
+ "sub.ptr");
+
+ // Just cast.
+ Value = Builder.CreateBitCast(Value, DerivedPtrTy);
+
+ // Produce a PHI if we had a null-check.
+ if (NullCheckValue) {
+ Builder.CreateBr(CastEnd);
+ EmitBlock(CastNull);
+ Builder.CreateBr(CastEnd);
+ EmitBlock(CastEnd);
+
+ llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
+ PHI->addIncoming(Value, CastNotNull);
+ PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
+ Value = PHI;
+ }
+
+ return Address(Value, CGM.getClassPointerAlignment(Derived));
+}
+
+llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD,
+ bool ForVirtualBase,
+ bool Delegating) {
+ if (!CGM.getCXXABI().NeedsVTTParameter(GD)) {
+ // This constructor/destructor does not need a VTT parameter.
+ return nullptr;
+ }
+
+ const CXXRecordDecl *RD = cast<CXXMethodDecl>(CurCodeDecl)->getParent();
+ const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent();
+
+ llvm::Value *VTT;
+
+ uint64_t SubVTTIndex;
+
+ if (Delegating) {
+ // If this is a delegating constructor call, just load the VTT.
+ return LoadCXXVTT();
+ } else if (RD == Base) {
+ // If the record matches the base, this is the complete ctor/dtor
+ // variant calling the base variant in a class with virtual bases.
+ assert(!CGM.getCXXABI().NeedsVTTParameter(CurGD) &&
+ "doing no-op VTT offset in base dtor/ctor?");
+ assert(!ForVirtualBase && "Can't have same class as virtual base!");
+ SubVTTIndex = 0;
+ } else {
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+ CharUnits BaseOffset = ForVirtualBase ?
+ Layout.getVBaseClassOffset(Base) :
+ Layout.getBaseClassOffset(Base);
+
+ SubVTTIndex =
+ CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset));
+ assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!");
+ }
+
+ if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) {
+ // A VTT parameter was passed to the constructor, use it.
+ VTT = LoadCXXVTT();
+ VTT = Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex);
+ } else {
+ // We're the complete constructor, so get the VTT by name.
+ VTT = CGM.getVTables().GetAddrOfVTT(RD);
+ VTT = Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex);
+ }
+
+ return VTT;
+}
+
+namespace {
+ /// Call the destructor for a direct base class.
+ struct CallBaseDtor final : EHScopeStack::Cleanup {
+ const CXXRecordDecl *BaseClass;
+ bool BaseIsVirtual;
+ CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual)
+ : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ const CXXRecordDecl *DerivedClass =
+ cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent();
+
+ const CXXDestructorDecl *D = BaseClass->getDestructor();
+ Address Addr =
+ CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThisAddress(),
+ DerivedClass, BaseClass,
+ BaseIsVirtual);
+ CGF.EmitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual,
+ /*Delegating=*/false, Addr);
+ }
+ };
+
+ /// A visitor which checks whether an initializer uses 'this' in a
+ /// way which requires the vtable to be properly set.
+ struct DynamicThisUseChecker : ConstEvaluatedExprVisitor<DynamicThisUseChecker> {
+ typedef ConstEvaluatedExprVisitor<DynamicThisUseChecker> super;
+
+ bool UsesThis;
+
+ DynamicThisUseChecker(const ASTContext &C) : super(C), UsesThis(false) {}
+
+ // Black-list all explicit and implicit references to 'this'.
+ //
+ // Do we need to worry about external references to 'this' derived
+ // from arbitrary code? If so, then anything which runs arbitrary
+ // external code might potentially access the vtable.
+ void VisitCXXThisExpr(const CXXThisExpr *E) { UsesThis = true; }
+ };
+} // end anonymous namespace
+
+static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) {
+ DynamicThisUseChecker Checker(C);
+ Checker.Visit(Init);
+ return Checker.UsesThis;
+}
+
+static void EmitBaseInitializer(CodeGenFunction &CGF,
+ const CXXRecordDecl *ClassDecl,
+ CXXCtorInitializer *BaseInit,
+ CXXCtorType CtorType) {
+ assert(BaseInit->isBaseInitializer() &&
+ "Must have base initializer!");
+
+ Address ThisPtr = CGF.LoadCXXThisAddress();
+
+ const Type *BaseType = BaseInit->getBaseClass();
+ CXXRecordDecl *BaseClassDecl =
+ cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl());
+
+ bool isBaseVirtual = BaseInit->isBaseVirtual();
+
+ // The base constructor doesn't construct virtual bases.
+ if (CtorType == Ctor_Base && isBaseVirtual)
+ return;
+
+ // If the initializer for the base (other than the constructor
+ // itself) accesses 'this' in any way, we need to initialize the
+ // vtables.
+ if (BaseInitializerUsesThis(CGF.getContext(), BaseInit->getInit()))
+ CGF.InitializeVTablePointers(ClassDecl);
+
+ // We can pretend to be a complete class because it only matters for
+ // virtual bases, and we only do virtual bases for complete ctors.
+ Address V =
+ CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl,
+ BaseClassDecl,
+ isBaseVirtual);
+ AggValueSlot AggSlot =
+ AggValueSlot::forAddr(V, Qualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+
+ CGF.EmitAggExpr(BaseInit->getInit(), AggSlot);
+
+ if (CGF.CGM.getLangOpts().Exceptions &&
+ !BaseClassDecl->hasTrivialDestructor())
+ CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl,
+ isBaseVirtual);
+}
+
+static void EmitAggMemberInitializer(CodeGenFunction &CGF,
+ LValue LHS,
+ Expr *Init,
+ Address ArrayIndexVar,
+ QualType T,
+ ArrayRef<VarDecl *> ArrayIndexes,
+ unsigned Index) {
+ if (Index == ArrayIndexes.size()) {
+ LValue LV = LHS;
+
+ if (ArrayIndexVar.isValid()) {
+ // If we have an array index variable, load it and use it as an offset.
+ // Then, increment the value.
+ llvm::Value *Dest = LHS.getPointer();
+ llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar);
+ Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress");
+ llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1);
+ Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc");
+ CGF.Builder.CreateStore(Next, ArrayIndexVar);
+
+ // Update the LValue.
+ CharUnits EltSize = CGF.getContext().getTypeSizeInChars(T);
+ CharUnits Align = LV.getAlignment().alignmentOfArrayElement(EltSize);
+ LV.setAddress(Address(Dest, Align));
+ }
+
+ switch (CGF.getEvaluationKind(T)) {
+ case TEK_Scalar:
+ CGF.EmitScalarInit(Init, /*decl*/ nullptr, LV, false);
+ break;
+ case TEK_Complex:
+ CGF.EmitComplexExprIntoLValue(Init, LV, /*isInit*/ true);
+ break;
+ case TEK_Aggregate: {
+ AggValueSlot Slot =
+ AggValueSlot::forLValue(LV,
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+
+ CGF.EmitAggExpr(Init, Slot);
+ break;
+ }
+ }
+
+ return;
+ }
+
+ const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T);
+ assert(Array && "Array initialization without the array type?");
+ Address IndexVar = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]);
+
+ // Initialize this index variable to zero.
+ llvm::Value* Zero
+ = llvm::Constant::getNullValue(IndexVar.getElementType());
+ CGF.Builder.CreateStore(Zero, IndexVar);
+
+ // Start the loop with a block that tests the condition.
+ llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond");
+ llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end");
+
+ CGF.EmitBlock(CondBlock);
+
+ llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body");
+ // Generate: if (loop-index < number-of-elements) fall to the loop body,
+ // otherwise, go to the block after the for-loop.
+ uint64_t NumElements = Array->getSize().getZExtValue();
+ llvm::Value *Counter = CGF.Builder.CreateLoad(IndexVar);
+ llvm::Value *NumElementsPtr =
+ llvm::ConstantInt::get(Counter->getType(), NumElements);
+ llvm::Value *IsLess = CGF.Builder.CreateICmpULT(Counter, NumElementsPtr,
+ "isless");
+
+ // If the condition is true, execute the body.
+ CGF.Builder.CreateCondBr(IsLess, ForBody, AfterFor);
+
+ CGF.EmitBlock(ForBody);
+ llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc");
+
+ // Inside the loop body recurse to emit the inner loop or, eventually, the
+ // constructor call.
+ EmitAggMemberInitializer(CGF, LHS, Init, ArrayIndexVar,
+ Array->getElementType(), ArrayIndexes, Index + 1);
+
+ CGF.EmitBlock(ContinueBlock);
+
+ // Emit the increment of the loop counter.
+ llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1);
+ Counter = CGF.Builder.CreateLoad(IndexVar);
+ NextVal = CGF.Builder.CreateAdd(Counter, NextVal, "inc");
+ CGF.Builder.CreateStore(NextVal, IndexVar);
+
+ // Finally, branch back up to the condition for the next iteration.
+ CGF.EmitBranch(CondBlock);
+
+ // Emit the fall-through block.
+ CGF.EmitBlock(AfterFor, true);
+}
+
+static bool isMemcpyEquivalentSpecialMember(const CXXMethodDecl *D) {
+ auto *CD = dyn_cast<CXXConstructorDecl>(D);
+ if (!(CD && CD->isCopyOrMoveConstructor()) &&
+ !D->isCopyAssignmentOperator() && !D->isMoveAssignmentOperator())
+ return false;
+
+ // We can emit a memcpy for a trivial copy or move constructor/assignment.
+ if (D->isTrivial() && !D->getParent()->mayInsertExtraPadding())
+ return true;
+
+ // We *must* emit a memcpy for a defaulted union copy or move op.
+ if (D->getParent()->isUnion() && D->isDefaulted())
+ return true;
+
+ return false;
+}
+
+static void EmitLValueForAnyFieldInitialization(CodeGenFunction &CGF,
+ CXXCtorInitializer *MemberInit,
+ LValue &LHS) {
+ FieldDecl *Field = MemberInit->getAnyMember();
+ if (MemberInit->isIndirectMemberInitializer()) {
+ // If we are initializing an anonymous union field, drill down to the field.
+ IndirectFieldDecl *IndirectField = MemberInit->getIndirectMember();
+ for (const auto *I : IndirectField->chain())
+ LHS = CGF.EmitLValueForFieldInitialization(LHS, cast<FieldDecl>(I));
+ } else {
+ LHS = CGF.EmitLValueForFieldInitialization(LHS, Field);
+ }
+}
+
+static void EmitMemberInitializer(CodeGenFunction &CGF,
+ const CXXRecordDecl *ClassDecl,
+ CXXCtorInitializer *MemberInit,
+ const CXXConstructorDecl *Constructor,
+ FunctionArgList &Args) {
+ ApplyDebugLocation Loc(CGF, MemberInit->getSourceLocation());
+ assert(MemberInit->isAnyMemberInitializer() &&
+ "Must have member initializer!");
+ assert(MemberInit->getInit() && "Must have initializer!");
+
+ // non-static data member initializers.
+ FieldDecl *Field = MemberInit->getAnyMember();
+ QualType FieldType = Field->getType();
+
+ llvm::Value *ThisPtr = CGF.LoadCXXThis();
+ QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
+ LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
+
+ EmitLValueForAnyFieldInitialization(CGF, MemberInit, LHS);
+
+ // Special case: if we are in a copy or move constructor, and we are copying
+ // an array of PODs or classes with trivial copy constructors, ignore the
+ // AST and perform the copy we know is equivalent.
+ // FIXME: This is hacky at best... if we had a bit more explicit information
+ // in the AST, we could generalize it more easily.
+ const ConstantArrayType *Array
+ = CGF.getContext().getAsConstantArrayType(FieldType);
+ if (Array && Constructor->isDefaulted() &&
+ Constructor->isCopyOrMoveConstructor()) {
+ QualType BaseElementTy = CGF.getContext().getBaseElementType(Array);
+ CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit());
+ if (BaseElementTy.isPODType(CGF.getContext()) ||
+ (CE && isMemcpyEquivalentSpecialMember(CE->getConstructor()))) {
+ unsigned SrcArgIndex =
+ CGF.CGM.getCXXABI().getSrcArgforCopyCtor(Constructor, Args);
+ llvm::Value *SrcPtr
+ = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex]));
+ LValue ThisRHSLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy);
+ LValue Src = CGF.EmitLValueForFieldInitialization(ThisRHSLV, Field);
+
+ // Copy the aggregate.
+ CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType,
+ LHS.isVolatileQualified());
+ // Ensure that we destroy the objects if an exception is thrown later in
+ // the constructor.
+ QualType::DestructionKind dtorKind = FieldType.isDestructedType();
+ if (CGF.needsEHCleanup(dtorKind))
+ CGF.pushEHDestroy(dtorKind, LHS.getAddress(), FieldType);
+ return;
+ }
+ }
+
+ ArrayRef<VarDecl *> ArrayIndexes;
+ if (MemberInit->getNumArrayIndices())
+ ArrayIndexes = MemberInit->getArrayIndexes();
+ CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes);
+}
+
+void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, LValue LHS,
+ Expr *Init, ArrayRef<VarDecl *> ArrayIndexes) {
+ QualType FieldType = Field->getType();
+ switch (getEvaluationKind(FieldType)) {
+ case TEK_Scalar:
+ if (LHS.isSimple()) {
+ EmitExprAsInit(Init, Field, LHS, false);
+ } else {
+ RValue RHS = RValue::get(EmitScalarExpr(Init));
+ EmitStoreThroughLValue(RHS, LHS);
+ }
+ break;
+ case TEK_Complex:
+ EmitComplexExprIntoLValue(Init, LHS, /*isInit*/ true);
+ break;
+ case TEK_Aggregate: {
+ Address ArrayIndexVar = Address::invalid();
+ if (ArrayIndexes.size()) {
+ // The LHS is a pointer to the first object we'll be constructing, as
+ // a flat array.
+ QualType BaseElementTy = getContext().getBaseElementType(FieldType);
+ llvm::Type *BasePtr = ConvertType(BaseElementTy);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ Address BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(), BasePtr);
+ LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy);
+
+ // Create an array index that will be used to walk over all of the
+ // objects we're constructing.
+ ArrayIndexVar = CreateMemTemp(getContext().getSizeType(), "object.index");
+ llvm::Value *Zero =
+ llvm::Constant::getNullValue(ArrayIndexVar.getElementType());
+ Builder.CreateStore(Zero, ArrayIndexVar);
+
+ // Emit the block variables for the array indices, if any.
+ for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I)
+ EmitAutoVarDecl(*ArrayIndexes[I]);
+ }
+
+ EmitAggMemberInitializer(*this, LHS, Init, ArrayIndexVar, FieldType,
+ ArrayIndexes, 0);
+ }
+ }
+
+ // Ensure that we destroy this object if an exception is thrown
+ // later in the constructor.
+ QualType::DestructionKind dtorKind = FieldType.isDestructedType();
+ if (needsEHCleanup(dtorKind))
+ pushEHDestroy(dtorKind, LHS.getAddress(), FieldType);
+}
+
+/// Checks whether the given constructor is a valid subject for the
+/// complete-to-base constructor delegation optimization, i.e.
+/// emitting the complete constructor as a simple call to the base
+/// constructor.
+static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor) {
+
+ // Currently we disable the optimization for classes with virtual
+ // bases because (1) the addresses of parameter variables need to be
+ // consistent across all initializers but (2) the delegate function
+ // call necessarily creates a second copy of the parameter variable.
+ //
+ // The limiting example (purely theoretical AFAIK):
+ // struct A { A(int &c) { c++; } };
+ // struct B : virtual A {
+ // B(int count) : A(count) { printf("%d\n", count); }
+ // };
+ // ...although even this example could in principle be emitted as a
+ // delegation since the address of the parameter doesn't escape.
+ if (Ctor->getParent()->getNumVBases()) {
+ // TODO: white-list trivial vbase initializers. This case wouldn't
+ // be subject to the restrictions below.
+
+ // TODO: white-list cases where:
+ // - there are no non-reference parameters to the constructor
+ // - the initializers don't access any non-reference parameters
+ // - the initializers don't take the address of non-reference
+ // parameters
+ // - etc.
+ // If we ever add any of the above cases, remember that:
+ // - function-try-blocks will always blacklist this optimization
+ // - we need to perform the constructor prologue and cleanup in
+ // EmitConstructorBody.
+
+ return false;
+ }
+
+ // We also disable the optimization for variadic functions because
+ // it's impossible to "re-pass" varargs.
+ if (Ctor->getType()->getAs<FunctionProtoType>()->isVariadic())
+ return false;
+
+ // FIXME: Decide if we can do a delegation of a delegating constructor.
+ if (Ctor->isDelegatingConstructor())
+ return false;
+
+ return true;
+}
+
+// Emit code in ctor (Prologue==true) or dtor (Prologue==false)
+// to poison the extra field paddings inserted under
+// -fsanitize-address-field-padding=1|2.
+void CodeGenFunction::EmitAsanPrologueOrEpilogue(bool Prologue) {
+ ASTContext &Context = getContext();
+ const CXXRecordDecl *ClassDecl =
+ Prologue ? cast<CXXConstructorDecl>(CurGD.getDecl())->getParent()
+ : cast<CXXDestructorDecl>(CurGD.getDecl())->getParent();
+ if (!ClassDecl->mayInsertExtraPadding()) return;
+
+ struct SizeAndOffset {
+ uint64_t Size;
+ uint64_t Offset;
+ };
+
+ unsigned PtrSize = CGM.getDataLayout().getPointerSizeInBits();
+ const ASTRecordLayout &Info = Context.getASTRecordLayout(ClassDecl);
+
+ // Populate sizes and offsets of fields.
+ SmallVector<SizeAndOffset, 16> SSV(Info.getFieldCount());
+ for (unsigned i = 0, e = Info.getFieldCount(); i != e; ++i)
+ SSV[i].Offset =
+ Context.toCharUnitsFromBits(Info.getFieldOffset(i)).getQuantity();
+
+ size_t NumFields = 0;
+ for (const auto *Field : ClassDecl->fields()) {
+ const FieldDecl *D = Field;
+ std::pair<CharUnits, CharUnits> FieldInfo =
+ Context.getTypeInfoInChars(D->getType());
+ CharUnits FieldSize = FieldInfo.first;
+ assert(NumFields < SSV.size());
+ SSV[NumFields].Size = D->isBitField() ? 0 : FieldSize.getQuantity();
+ NumFields++;
+ }
+ assert(NumFields == SSV.size());
+ if (SSV.size() <= 1) return;
+
+ // We will insert calls to __asan_* run-time functions.
+ // LLVM AddressSanitizer pass may decide to inline them later.
+ llvm::Type *Args[2] = {IntPtrTy, IntPtrTy};
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, Args, false);
+ llvm::Constant *F = CGM.CreateRuntimeFunction(
+ FTy, Prologue ? "__asan_poison_intra_object_redzone"
+ : "__asan_unpoison_intra_object_redzone");
+
+ llvm::Value *ThisPtr = LoadCXXThis();
+ ThisPtr = Builder.CreatePtrToInt(ThisPtr, IntPtrTy);
+ uint64_t TypeSize = Info.getNonVirtualSize().getQuantity();
+ // For each field check if it has sufficient padding,
+ // if so (un)poison it with a call.
+ for (size_t i = 0; i < SSV.size(); i++) {
+ uint64_t AsanAlignment = 8;
+ uint64_t NextField = i == SSV.size() - 1 ? TypeSize : SSV[i + 1].Offset;
+ uint64_t PoisonSize = NextField - SSV[i].Offset - SSV[i].Size;
+ uint64_t EndOffset = SSV[i].Offset + SSV[i].Size;
+ if (PoisonSize < AsanAlignment || !SSV[i].Size ||
+ (NextField % AsanAlignment) != 0)
+ continue;
+ Builder.CreateCall(
+ F, {Builder.CreateAdd(ThisPtr, Builder.getIntN(PtrSize, EndOffset)),
+ Builder.getIntN(PtrSize, PoisonSize)});
+ }
+}
+
+/// EmitConstructorBody - Emits the body of the current constructor.
+void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
+ EmitAsanPrologueOrEpilogue(true);
+ const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl());
+ CXXCtorType CtorType = CurGD.getCtorType();
+
+ assert((CGM.getTarget().getCXXABI().hasConstructorVariants() ||
+ CtorType == Ctor_Complete) &&
+ "can only generate complete ctor for this ABI");
+
+ // Before we go any further, try the complete->base constructor
+ // delegation optimization.
+ if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) &&
+ CGM.getTarget().getCXXABI().hasConstructorVariants()) {
+ EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args, Ctor->getLocEnd());
+ return;
+ }
+
+ const FunctionDecl *Definition = nullptr;
+ Stmt *Body = Ctor->getBody(Definition);
+ assert(Definition == Ctor && "emitting wrong constructor body");
+
+ // Enter the function-try-block before the constructor prologue if
+ // applicable.
+ bool IsTryBody = (Body && isa<CXXTryStmt>(Body));
+ if (IsTryBody)
+ EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
+
+ incrementProfileCounter(Body);
+
+ RunCleanupsScope RunCleanups(*this);
+
+ // TODO: in restricted cases, we can emit the vbase initializers of
+ // a complete ctor and then delegate to the base ctor.
+
+ // Emit the constructor prologue, i.e. the base and member
+ // initializers.
+ EmitCtorPrologue(Ctor, CtorType, Args);
+
+ // Emit the body of the statement.
+ if (IsTryBody)
+ EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
+ else if (Body)
+ EmitStmt(Body);
+
+ // Emit any cleanup blocks associated with the member or base
+ // initializers, which includes (along the exceptional path) the
+ // destructors for those members and bases that were fully
+ // constructed.
+ RunCleanups.ForceCleanup();
+
+ if (IsTryBody)
+ ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
+}
+
+namespace {
+ /// RAII object to indicate that codegen is copying the value representation
+ /// instead of the object representation. Useful when copying a struct or
+ /// class which has uninitialized members and we're only performing
+ /// lvalue-to-rvalue conversion on the object but not its members.
+ class CopyingValueRepresentation {
+ public:
+ explicit CopyingValueRepresentation(CodeGenFunction &CGF)
+ : CGF(CGF), OldSanOpts(CGF.SanOpts) {
+ CGF.SanOpts.set(SanitizerKind::Bool, false);
+ CGF.SanOpts.set(SanitizerKind::Enum, false);
+ }
+ ~CopyingValueRepresentation() {
+ CGF.SanOpts = OldSanOpts;
+ }
+ private:
+ CodeGenFunction &CGF;
+ SanitizerSet OldSanOpts;
+ };
+}
+
+namespace {
+ class FieldMemcpyizer {
+ public:
+ FieldMemcpyizer(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl,
+ const VarDecl *SrcRec)
+ : CGF(CGF), ClassDecl(ClassDecl), SrcRec(SrcRec),
+ RecLayout(CGF.getContext().getASTRecordLayout(ClassDecl)),
+ FirstField(nullptr), LastField(nullptr), FirstFieldOffset(0),
+ LastFieldOffset(0), LastAddedFieldIndex(0) {}
+
+ bool isMemcpyableField(FieldDecl *F) const {
+ // Never memcpy fields when we are adding poisoned paddings.
+ if (CGF.getContext().getLangOpts().SanitizeAddressFieldPadding)
+ return false;
+ Qualifiers Qual = F->getType().getQualifiers();
+ if (Qual.hasVolatile() || Qual.hasObjCLifetime())
+ return false;
+ return true;
+ }
+
+ void addMemcpyableField(FieldDecl *F) {
+ if (!FirstField)
+ addInitialField(F);
+ else
+ addNextField(F);
+ }
+
+ CharUnits getMemcpySize(uint64_t FirstByteOffset) const {
+ unsigned LastFieldSize =
+ LastField->isBitField() ?
+ LastField->getBitWidthValue(CGF.getContext()) :
+ CGF.getContext().getTypeSize(LastField->getType());
+ uint64_t MemcpySizeBits =
+ LastFieldOffset + LastFieldSize - FirstByteOffset +
+ CGF.getContext().getCharWidth() - 1;
+ CharUnits MemcpySize =
+ CGF.getContext().toCharUnitsFromBits(MemcpySizeBits);
+ return MemcpySize;
+ }
+
+ void emitMemcpy() {
+ // Give the subclass a chance to bail out if it feels the memcpy isn't
+ // worth it (e.g. Hasn't aggregated enough data).
+ if (!FirstField) {
+ return;
+ }
+
+ uint64_t FirstByteOffset;
+ if (FirstField->isBitField()) {
+ const CGRecordLayout &RL =
+ CGF.getTypes().getCGRecordLayout(FirstField->getParent());
+ const CGBitFieldInfo &BFInfo = RL.getBitFieldInfo(FirstField);
+ // FirstFieldOffset is not appropriate for bitfields,
+ // we need to use the storage offset instead.
+ FirstByteOffset = CGF.getContext().toBits(BFInfo.StorageOffset);
+ } else {
+ FirstByteOffset = FirstFieldOffset;
+ }
+
+ CharUnits MemcpySize = getMemcpySize(FirstByteOffset);
+ QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
+ Address ThisPtr = CGF.LoadCXXThisAddress();
+ LValue DestLV = CGF.MakeAddrLValue(ThisPtr, RecordTy);
+ LValue Dest = CGF.EmitLValueForFieldInitialization(DestLV, FirstField);
+ llvm::Value *SrcPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(SrcRec));
+ LValue SrcLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy);
+ LValue Src = CGF.EmitLValueForFieldInitialization(SrcLV, FirstField);
+
+ emitMemcpyIR(Dest.isBitField() ? Dest.getBitFieldAddress() : Dest.getAddress(),
+ Src.isBitField() ? Src.getBitFieldAddress() : Src.getAddress(),
+ MemcpySize);
+ reset();
+ }
+
+ void reset() {
+ FirstField = nullptr;
+ }
+
+ protected:
+ CodeGenFunction &CGF;
+ const CXXRecordDecl *ClassDecl;
+
+ private:
+
+ void emitMemcpyIR(Address DestPtr, Address SrcPtr, CharUnits Size) {
+ llvm::PointerType *DPT = DestPtr.getType();
+ llvm::Type *DBP =
+ llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), DPT->getAddressSpace());
+ DestPtr = CGF.Builder.CreateBitCast(DestPtr, DBP);
+
+ llvm::PointerType *SPT = SrcPtr.getType();
+ llvm::Type *SBP =
+ llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), SPT->getAddressSpace());
+ SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, SBP);
+
+ CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity());
+ }
+
+ void addInitialField(FieldDecl *F) {
+ FirstField = F;
+ LastField = F;
+ FirstFieldOffset = RecLayout.getFieldOffset(F->getFieldIndex());
+ LastFieldOffset = FirstFieldOffset;
+ LastAddedFieldIndex = F->getFieldIndex();
+ return;
+ }
+
+ void addNextField(FieldDecl *F) {
+ // For the most part, the following invariant will hold:
+ // F->getFieldIndex() == LastAddedFieldIndex + 1
+ // The one exception is that Sema won't add a copy-initializer for an
+ // unnamed bitfield, which will show up here as a gap in the sequence.
+ assert(F->getFieldIndex() >= LastAddedFieldIndex + 1 &&
+ "Cannot aggregate fields out of order.");
+ LastAddedFieldIndex = F->getFieldIndex();
+
+ // The 'first' and 'last' fields are chosen by offset, rather than field
+ // index. This allows the code to support bitfields, as well as regular
+ // fields.
+ uint64_t FOffset = RecLayout.getFieldOffset(F->getFieldIndex());
+ if (FOffset < FirstFieldOffset) {
+ FirstField = F;
+ FirstFieldOffset = FOffset;
+ } else if (FOffset > LastFieldOffset) {
+ LastField = F;
+ LastFieldOffset = FOffset;
+ }
+ }
+
+ const VarDecl *SrcRec;
+ const ASTRecordLayout &RecLayout;
+ FieldDecl *FirstField;
+ FieldDecl *LastField;
+ uint64_t FirstFieldOffset, LastFieldOffset;
+ unsigned LastAddedFieldIndex;
+ };
+
+ class ConstructorMemcpyizer : public FieldMemcpyizer {
+ private:
+
+ /// Get source argument for copy constructor. Returns null if not a copy
+ /// constructor.
+ static const VarDecl *getTrivialCopySource(CodeGenFunction &CGF,
+ const CXXConstructorDecl *CD,
+ FunctionArgList &Args) {
+ if (CD->isCopyOrMoveConstructor() && CD->isDefaulted())
+ return Args[CGF.CGM.getCXXABI().getSrcArgforCopyCtor(CD, Args)];
+ return nullptr;
+ }
+
+ // Returns true if a CXXCtorInitializer represents a member initialization
+ // that can be rolled into a memcpy.
+ bool isMemberInitMemcpyable(CXXCtorInitializer *MemberInit) const {
+ if (!MemcpyableCtor)
+ return false;
+ FieldDecl *Field = MemberInit->getMember();
+ assert(Field && "No field for member init.");
+ QualType FieldType = Field->getType();
+ CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit());
+
+ // Bail out on non-memcpyable, not-trivially-copyable members.
+ if (!(CE && isMemcpyEquivalentSpecialMember(CE->getConstructor())) &&
+ !(FieldType.isTriviallyCopyableType(CGF.getContext()) ||
+ FieldType->isReferenceType()))
+ return false;
+
+ // Bail out on volatile fields.
+ if (!isMemcpyableField(Field))
+ return false;
+
+ // Otherwise we're good.
+ return true;
+ }
+
+ public:
+ ConstructorMemcpyizer(CodeGenFunction &CGF, const CXXConstructorDecl *CD,
+ FunctionArgList &Args)
+ : FieldMemcpyizer(CGF, CD->getParent(), getTrivialCopySource(CGF, CD, Args)),
+ ConstructorDecl(CD),
+ MemcpyableCtor(CD->isDefaulted() &&
+ CD->isCopyOrMoveConstructor() &&
+ CGF.getLangOpts().getGC() == LangOptions::NonGC),
+ Args(Args) { }
+
+ void addMemberInitializer(CXXCtorInitializer *MemberInit) {
+ if (isMemberInitMemcpyable(MemberInit)) {
+ AggregatedInits.push_back(MemberInit);
+ addMemcpyableField(MemberInit->getMember());
+ } else {
+ emitAggregatedInits();
+ EmitMemberInitializer(CGF, ConstructorDecl->getParent(), MemberInit,
+ ConstructorDecl, Args);
+ }
+ }
+
+ void emitAggregatedInits() {
+ if (AggregatedInits.size() <= 1) {
+ // This memcpy is too small to be worthwhile. Fall back on default
+ // codegen.
+ if (!AggregatedInits.empty()) {
+ CopyingValueRepresentation CVR(CGF);
+ EmitMemberInitializer(CGF, ConstructorDecl->getParent(),
+ AggregatedInits[0], ConstructorDecl, Args);
+ AggregatedInits.clear();
+ }
+ reset();
+ return;
+ }
+
+ pushEHDestructors();
+ emitMemcpy();
+ AggregatedInits.clear();
+ }
+
+ void pushEHDestructors() {
+ Address ThisPtr = CGF.LoadCXXThisAddress();
+ QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
+ LValue LHS = CGF.MakeAddrLValue(ThisPtr, RecordTy);
+
+ for (unsigned i = 0; i < AggregatedInits.size(); ++i) {
+ CXXCtorInitializer *MemberInit = AggregatedInits[i];
+ QualType FieldType = MemberInit->getAnyMember()->getType();
+ QualType::DestructionKind dtorKind = FieldType.isDestructedType();
+ if (!CGF.needsEHCleanup(dtorKind))
+ continue;
+ LValue FieldLHS = LHS;
+ EmitLValueForAnyFieldInitialization(CGF, MemberInit, FieldLHS);
+ CGF.pushEHDestroy(dtorKind, FieldLHS.getAddress(), FieldType);
+ }
+ }
+
+ void finish() {
+ emitAggregatedInits();
+ }
+
+ private:
+ const CXXConstructorDecl *ConstructorDecl;
+ bool MemcpyableCtor;
+ FunctionArgList &Args;
+ SmallVector<CXXCtorInitializer*, 16> AggregatedInits;
+ };
+
+ class AssignmentMemcpyizer : public FieldMemcpyizer {
+ private:
+
+ // Returns the memcpyable field copied by the given statement, if one
+ // exists. Otherwise returns null.
+ FieldDecl *getMemcpyableField(Stmt *S) {
+ if (!AssignmentsMemcpyable)
+ return nullptr;
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(S)) {
+ // Recognise trivial assignments.
+ if (BO->getOpcode() != BO_Assign)
+ return nullptr;
+ MemberExpr *ME = dyn_cast<MemberExpr>(BO->getLHS());
+ if (!ME)
+ return nullptr;
+ FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl());
+ if (!Field || !isMemcpyableField(Field))
+ return nullptr;
+ Stmt *RHS = BO->getRHS();
+ if (ImplicitCastExpr *EC = dyn_cast<ImplicitCastExpr>(RHS))
+ RHS = EC->getSubExpr();
+ if (!RHS)
+ return nullptr;
+ MemberExpr *ME2 = dyn_cast<MemberExpr>(RHS);
+ if (dyn_cast<FieldDecl>(ME2->getMemberDecl()) != Field)
+ return nullptr;
+ return Field;
+ } else if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(S)) {
+ CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MCE->getCalleeDecl());
+ if (!(MD && isMemcpyEquivalentSpecialMember(MD)))
+ return nullptr;
+ MemberExpr *IOA = dyn_cast<MemberExpr>(MCE->getImplicitObjectArgument());
+ if (!IOA)
+ return nullptr;
+ FieldDecl *Field = dyn_cast<FieldDecl>(IOA->getMemberDecl());
+ if (!Field || !isMemcpyableField(Field))
+ return nullptr;
+ MemberExpr *Arg0 = dyn_cast<MemberExpr>(MCE->getArg(0));
+ if (!Arg0 || Field != dyn_cast<FieldDecl>(Arg0->getMemberDecl()))
+ return nullptr;
+ return Field;
+ } else if (CallExpr *CE = dyn_cast<CallExpr>(S)) {
+ FunctionDecl *FD = dyn_cast<FunctionDecl>(CE->getCalleeDecl());
+ if (!FD || FD->getBuiltinID() != Builtin::BI__builtin_memcpy)
+ return nullptr;
+ Expr *DstPtr = CE->getArg(0);
+ if (ImplicitCastExpr *DC = dyn_cast<ImplicitCastExpr>(DstPtr))
+ DstPtr = DC->getSubExpr();
+ UnaryOperator *DUO = dyn_cast<UnaryOperator>(DstPtr);
+ if (!DUO || DUO->getOpcode() != UO_AddrOf)
+ return nullptr;
+ MemberExpr *ME = dyn_cast<MemberExpr>(DUO->getSubExpr());
+ if (!ME)
+ return nullptr;
+ FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl());
+ if (!Field || !isMemcpyableField(Field))
+ return nullptr;
+ Expr *SrcPtr = CE->getArg(1);
+ if (ImplicitCastExpr *SC = dyn_cast<ImplicitCastExpr>(SrcPtr))
+ SrcPtr = SC->getSubExpr();
+ UnaryOperator *SUO = dyn_cast<UnaryOperator>(SrcPtr);
+ if (!SUO || SUO->getOpcode() != UO_AddrOf)
+ return nullptr;
+ MemberExpr *ME2 = dyn_cast<MemberExpr>(SUO->getSubExpr());
+ if (!ME2 || Field != dyn_cast<FieldDecl>(ME2->getMemberDecl()))
+ return nullptr;
+ return Field;
+ }
+
+ return nullptr;
+ }
+
+ bool AssignmentsMemcpyable;
+ SmallVector<Stmt*, 16> AggregatedStmts;
+
+ public:
+
+ AssignmentMemcpyizer(CodeGenFunction &CGF, const CXXMethodDecl *AD,
+ FunctionArgList &Args)
+ : FieldMemcpyizer(CGF, AD->getParent(), Args[Args.size() - 1]),
+ AssignmentsMemcpyable(CGF.getLangOpts().getGC() == LangOptions::NonGC) {
+ assert(Args.size() == 2);
+ }
+
+ void emitAssignment(Stmt *S) {
+ FieldDecl *F = getMemcpyableField(S);
+ if (F) {
+ addMemcpyableField(F);
+ AggregatedStmts.push_back(S);
+ } else {
+ emitAggregatedStmts();
+ CGF.EmitStmt(S);
+ }
+ }
+
+ void emitAggregatedStmts() {
+ if (AggregatedStmts.size() <= 1) {
+ if (!AggregatedStmts.empty()) {
+ CopyingValueRepresentation CVR(CGF);
+ CGF.EmitStmt(AggregatedStmts[0]);
+ }
+ reset();
+ }
+
+ emitMemcpy();
+ AggregatedStmts.clear();
+ }
+
+ void finish() {
+ emitAggregatedStmts();
+ }
+ };
+} // end anonymous namespace
+
+static bool isInitializerOfDynamicClass(const CXXCtorInitializer *BaseInit) {
+ const Type *BaseType = BaseInit->getBaseClass();
+ const auto *BaseClassDecl =
+ cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl());
+ return BaseClassDecl->isDynamicClass();
+}
+
+/// EmitCtorPrologue - This routine generates necessary code to initialize
+/// base classes and non-static data members belonging to this constructor.
+void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
+ CXXCtorType CtorType,
+ FunctionArgList &Args) {
+ if (CD->isDelegatingConstructor())
+ return EmitDelegatingCXXConstructorCall(CD, Args);
+
+ const CXXRecordDecl *ClassDecl = CD->getParent();
+
+ CXXConstructorDecl::init_const_iterator B = CD->init_begin(),
+ E = CD->init_end();
+
+ llvm::BasicBlock *BaseCtorContinueBB = nullptr;
+ if (ClassDecl->getNumVBases() &&
+ !CGM.getTarget().getCXXABI().hasConstructorVariants()) {
+ // The ABIs that don't have constructor variants need to put a branch
+ // before the virtual base initialization code.
+ BaseCtorContinueBB =
+ CGM.getCXXABI().EmitCtorCompleteObjectHandler(*this, ClassDecl);
+ assert(BaseCtorContinueBB);
+ }
+
+ llvm::Value *const OldThis = CXXThisValue;
+ // Virtual base initializers first.
+ for (; B != E && (*B)->isBaseInitializer() && (*B)->isBaseVirtual(); B++) {
+ if (CGM.getCodeGenOpts().StrictVTablePointers &&
+ CGM.getCodeGenOpts().OptimizationLevel > 0 &&
+ isInitializerOfDynamicClass(*B))
+ CXXThisValue = Builder.CreateInvariantGroupBarrier(LoadCXXThis());
+ EmitBaseInitializer(*this, ClassDecl, *B, CtorType);
+ }
+
+ if (BaseCtorContinueBB) {
+ // Complete object handler should continue to the remaining initializers.
+ Builder.CreateBr(BaseCtorContinueBB);
+ EmitBlock(BaseCtorContinueBB);
+ }
+
+ // Then, non-virtual base initializers.
+ for (; B != E && (*B)->isBaseInitializer(); B++) {
+ assert(!(*B)->isBaseVirtual());
+
+ if (CGM.getCodeGenOpts().StrictVTablePointers &&
+ CGM.getCodeGenOpts().OptimizationLevel > 0 &&
+ isInitializerOfDynamicClass(*B))
+ CXXThisValue = Builder.CreateInvariantGroupBarrier(LoadCXXThis());
+ EmitBaseInitializer(*this, ClassDecl, *B, CtorType);
+ }
+
+ CXXThisValue = OldThis;
+
+ InitializeVTablePointers(ClassDecl);
+
+ // And finally, initialize class members.
+ FieldConstructionScope FCS(*this, LoadCXXThisAddress());
+ ConstructorMemcpyizer CM(*this, CD, Args);
+ for (; B != E; B++) {
+ CXXCtorInitializer *Member = (*B);
+ assert(!Member->isBaseInitializer());
+ assert(Member->isAnyMemberInitializer() &&
+ "Delegating initializer on non-delegating constructor");
+ CM.addMemberInitializer(Member);
+ }
+ CM.finish();
+}
+
+static bool
+FieldHasTrivialDestructorBody(ASTContext &Context, const FieldDecl *Field);
+
+static bool
+HasTrivialDestructorBody(ASTContext &Context,
+ const CXXRecordDecl *BaseClassDecl,
+ const CXXRecordDecl *MostDerivedClassDecl)
+{
+ // If the destructor is trivial we don't have to check anything else.
+ if (BaseClassDecl->hasTrivialDestructor())
+ return true;
+
+ if (!BaseClassDecl->getDestructor()->hasTrivialBody())
+ return false;
+
+ // Check fields.
+ for (const auto *Field : BaseClassDecl->fields())
+ if (!FieldHasTrivialDestructorBody(Context, Field))
+ return false;
+
+ // Check non-virtual bases.
+ for (const auto &I : BaseClassDecl->bases()) {
+ if (I.isVirtual())
+ continue;
+
+ const CXXRecordDecl *NonVirtualBase =
+ cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ if (!HasTrivialDestructorBody(Context, NonVirtualBase,
+ MostDerivedClassDecl))
+ return false;
+ }
+
+ if (BaseClassDecl == MostDerivedClassDecl) {
+ // Check virtual bases.
+ for (const auto &I : BaseClassDecl->vbases()) {
+ const CXXRecordDecl *VirtualBase =
+ cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ if (!HasTrivialDestructorBody(Context, VirtualBase,
+ MostDerivedClassDecl))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool
+FieldHasTrivialDestructorBody(ASTContext &Context,
+ const FieldDecl *Field)
+{
+ QualType FieldBaseElementType = Context.getBaseElementType(Field->getType());
+
+ const RecordType *RT = FieldBaseElementType->getAs<RecordType>();
+ if (!RT)
+ return true;
+
+ CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+
+ // The destructor for an implicit anonymous union member is never invoked.
+ if (FieldClassDecl->isUnion() && FieldClassDecl->isAnonymousStructOrUnion())
+ return false;
+
+ return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl);
+}
+
+/// CanSkipVTablePointerInitialization - Check whether we need to initialize
+/// any vtable pointers before calling this destructor.
+static bool CanSkipVTablePointerInitialization(CodeGenFunction &CGF,
+ const CXXDestructorDecl *Dtor) {
+ const CXXRecordDecl *ClassDecl = Dtor->getParent();
+ if (!ClassDecl->isDynamicClass())
+ return true;
+
+ if (!Dtor->hasTrivialBody())
+ return false;
+
+ // Check the fields.
+ for (const auto *Field : ClassDecl->fields())
+ if (!FieldHasTrivialDestructorBody(CGF.getContext(), Field))
+ return false;
+
+ return true;
+}
+
+/// EmitDestructorBody - Emits the body of the current destructor.
+void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
+ const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl());
+ CXXDtorType DtorType = CurGD.getDtorType();
+
+ Stmt *Body = Dtor->getBody();
+ if (Body)
+ incrementProfileCounter(Body);
+
+ // The call to operator delete in a deleting destructor happens
+ // outside of the function-try-block, which means it's always
+ // possible to delegate the destructor body to the complete
+ // destructor. Do so.
+ if (DtorType == Dtor_Deleting) {
+ EnterDtorCleanups(Dtor, Dtor_Deleting);
+ EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
+ /*Delegating=*/false, LoadCXXThisAddress());
+ PopCleanupBlock();
+ return;
+ }
+
+ // If the body is a function-try-block, enter the try before
+ // anything else.
+ bool isTryBody = (Body && isa<CXXTryStmt>(Body));
+ if (isTryBody)
+ EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
+ EmitAsanPrologueOrEpilogue(false);
+
+ // Enter the epilogue cleanups.
+ RunCleanupsScope DtorEpilogue(*this);
+
+ // If this is the complete variant, just invoke the base variant;
+ // the epilogue will destruct the virtual bases. But we can't do
+ // this optimization if the body is a function-try-block, because
+ // we'd introduce *two* handler blocks. In the Microsoft ABI, we
+ // always delegate because we might not have a definition in this TU.
+ switch (DtorType) {
+ case Dtor_Comdat:
+ llvm_unreachable("not expecting a COMDAT");
+
+ case Dtor_Deleting: llvm_unreachable("already handled deleting case");
+
+ case Dtor_Complete:
+ assert((Body || getTarget().getCXXABI().isMicrosoft()) &&
+ "can't emit a dtor without a body for non-Microsoft ABIs");
+
+ // Enter the cleanup scopes for virtual bases.
+ EnterDtorCleanups(Dtor, Dtor_Complete);
+
+ if (!isTryBody) {
+ EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false,
+ /*Delegating=*/false, LoadCXXThisAddress());
+ break;
+ }
+ // Fallthrough: act like we're in the base variant.
+
+ case Dtor_Base:
+ assert(Body);
+
+ // Enter the cleanup scopes for fields and non-virtual bases.
+ EnterDtorCleanups(Dtor, Dtor_Base);
+
+ // Initialize the vtable pointers before entering the body.
+ if (!CanSkipVTablePointerInitialization(*this, Dtor)) {
+ // Insert the llvm.invariant.group.barrier intrinsic before initializing
+ // the vptrs to cancel any previous assumptions we might have made.
+ if (CGM.getCodeGenOpts().StrictVTablePointers &&
+ CGM.getCodeGenOpts().OptimizationLevel > 0)
+ CXXThisValue = Builder.CreateInvariantGroupBarrier(LoadCXXThis());
+ InitializeVTablePointers(Dtor->getParent());
+ }
+
+ if (isTryBody)
+ EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
+ else if (Body)
+ EmitStmt(Body);
+ else {
+ assert(Dtor->isImplicit() && "bodyless dtor not implicit");
+ // nothing to do besides what's in the epilogue
+ }
+ // -fapple-kext must inline any call to this dtor into
+ // the caller's body.
+ if (getLangOpts().AppleKext)
+ CurFn->addFnAttr(llvm::Attribute::AlwaysInline);
+
+ break;
+ }
+
+ // Jump out through the epilogue cleanups.
+ DtorEpilogue.ForceCleanup();
+
+ // Exit the try if applicable.
+ if (isTryBody)
+ ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
+}
+
+void CodeGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &Args) {
+ const CXXMethodDecl *AssignOp = cast<CXXMethodDecl>(CurGD.getDecl());
+ const Stmt *RootS = AssignOp->getBody();
+ assert(isa<CompoundStmt>(RootS) &&
+ "Body of an implicit assignment operator should be compound stmt.");
+ const CompoundStmt *RootCS = cast<CompoundStmt>(RootS);
+
+ LexicalScope Scope(*this, RootCS->getSourceRange());
+
+ AssignmentMemcpyizer AM(*this, AssignOp, Args);
+ for (auto *I : RootCS->body())
+ AM.emitAssignment(I);
+ AM.finish();
+}
+
+namespace {
+ /// Call the operator delete associated with the current destructor.
+ struct CallDtorDelete final : EHScopeStack::Cleanup {
+ CallDtorDelete() {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl);
+ const CXXRecordDecl *ClassDecl = Dtor->getParent();
+ CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(),
+ CGF.getContext().getTagDeclType(ClassDecl));
+ }
+ };
+
+ struct CallDtorDeleteConditional final : EHScopeStack::Cleanup {
+ llvm::Value *ShouldDeleteCondition;
+ public:
+ CallDtorDeleteConditional(llvm::Value *ShouldDeleteCondition)
+ : ShouldDeleteCondition(ShouldDeleteCondition) {
+ assert(ShouldDeleteCondition != nullptr);
+ }
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ llvm::BasicBlock *callDeleteBB = CGF.createBasicBlock("dtor.call_delete");
+ llvm::BasicBlock *continueBB = CGF.createBasicBlock("dtor.continue");
+ llvm::Value *ShouldCallDelete
+ = CGF.Builder.CreateIsNull(ShouldDeleteCondition);
+ CGF.Builder.CreateCondBr(ShouldCallDelete, continueBB, callDeleteBB);
+
+ CGF.EmitBlock(callDeleteBB);
+ const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl);
+ const CXXRecordDecl *ClassDecl = Dtor->getParent();
+ CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(),
+ CGF.getContext().getTagDeclType(ClassDecl));
+ CGF.Builder.CreateBr(continueBB);
+
+ CGF.EmitBlock(continueBB);
+ }
+ };
+
+ class DestroyField final : public EHScopeStack::Cleanup {
+ const FieldDecl *field;
+ CodeGenFunction::Destroyer *destroyer;
+ bool useEHCleanupForArray;
+
+ public:
+ DestroyField(const FieldDecl *field, CodeGenFunction::Destroyer *destroyer,
+ bool useEHCleanupForArray)
+ : field(field), destroyer(destroyer),
+ useEHCleanupForArray(useEHCleanupForArray) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ // Find the address of the field.
+ Address thisValue = CGF.LoadCXXThisAddress();
+ QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent());
+ LValue ThisLV = CGF.MakeAddrLValue(thisValue, RecordTy);
+ LValue LV = CGF.EmitLValueForField(ThisLV, field);
+ assert(LV.isSimple());
+
+ CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer,
+ flags.isForNormalCleanup() && useEHCleanupForArray);
+ }
+ };
+
+ static void EmitSanitizerDtorCallback(CodeGenFunction &CGF, llvm::Value *Ptr,
+ CharUnits::QuantityType PoisonSize) {
+ // Pass in void pointer and size of region as arguments to runtime
+ // function
+ llvm::Value *Args[] = {CGF.Builder.CreateBitCast(Ptr, CGF.VoidPtrTy),
+ llvm::ConstantInt::get(CGF.SizeTy, PoisonSize)};
+
+ llvm::Type *ArgTypes[] = {CGF.VoidPtrTy, CGF.SizeTy};
+
+ llvm::FunctionType *FnType =
+ llvm::FunctionType::get(CGF.VoidTy, ArgTypes, false);
+ llvm::Value *Fn =
+ CGF.CGM.CreateRuntimeFunction(FnType, "__sanitizer_dtor_callback");
+ CGF.EmitNounwindRuntimeCall(Fn, Args);
+ }
+
+ class SanitizeDtorMembers final : public EHScopeStack::Cleanup {
+ const CXXDestructorDecl *Dtor;
+
+ public:
+ SanitizeDtorMembers(const CXXDestructorDecl *Dtor) : Dtor(Dtor) {}
+
+ // Generate function call for handling object poisoning.
+ // Disables tail call elimination, to prevent the current stack frame
+ // from disappearing from the stack trace.
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ const ASTRecordLayout &Layout =
+ CGF.getContext().getASTRecordLayout(Dtor->getParent());
+
+ // Nothing to poison.
+ if (Layout.getFieldCount() == 0)
+ return;
+
+ // Prevent the current stack frame from disappearing from the stack trace.
+ CGF.CurFn->addFnAttr("disable-tail-calls", "true");
+
+ // Construct pointer to region to begin poisoning, and calculate poison
+ // size, so that only members declared in this class are poisoned.
+ ASTContext &Context = CGF.getContext();
+ unsigned fieldIndex = 0;
+ int startIndex = -1;
+ // RecordDecl::field_iterator Field;
+ for (const FieldDecl *Field : Dtor->getParent()->fields()) {
+ // Poison field if it is trivial
+ if (FieldHasTrivialDestructorBody(Context, Field)) {
+ // Start sanitizing at this field
+ if (startIndex < 0)
+ startIndex = fieldIndex;
+
+ // Currently on the last field, and it must be poisoned with the
+ // current block.
+ if (fieldIndex == Layout.getFieldCount() - 1) {
+ PoisonMembers(CGF, startIndex, Layout.getFieldCount());
+ }
+ } else if (startIndex >= 0) {
+ // No longer within a block of memory to poison, so poison the block
+ PoisonMembers(CGF, startIndex, fieldIndex);
+ // Re-set the start index
+ startIndex = -1;
+ }
+ fieldIndex += 1;
+ }
+ }
+
+ private:
+ /// \param layoutStartOffset index of the ASTRecordLayout field to
+ /// start poisoning (inclusive)
+ /// \param layoutEndOffset index of the ASTRecordLayout field to
+ /// end poisoning (exclusive)
+ void PoisonMembers(CodeGenFunction &CGF, unsigned layoutStartOffset,
+ unsigned layoutEndOffset) {
+ ASTContext &Context = CGF.getContext();
+ const ASTRecordLayout &Layout =
+ Context.getASTRecordLayout(Dtor->getParent());
+
+ llvm::ConstantInt *OffsetSizePtr = llvm::ConstantInt::get(
+ CGF.SizeTy,
+ Context.toCharUnitsFromBits(Layout.getFieldOffset(layoutStartOffset))
+ .getQuantity());
+
+ llvm::Value *OffsetPtr = CGF.Builder.CreateGEP(
+ CGF.Builder.CreateBitCast(CGF.LoadCXXThis(), CGF.Int8PtrTy),
+ OffsetSizePtr);
+
+ CharUnits::QuantityType PoisonSize;
+ if (layoutEndOffset >= Layout.getFieldCount()) {
+ PoisonSize = Layout.getNonVirtualSize().getQuantity() -
+ Context.toCharUnitsFromBits(
+ Layout.getFieldOffset(layoutStartOffset))
+ .getQuantity();
+ } else {
+ PoisonSize = Context.toCharUnitsFromBits(
+ Layout.getFieldOffset(layoutEndOffset) -
+ Layout.getFieldOffset(layoutStartOffset))
+ .getQuantity();
+ }
+
+ if (PoisonSize == 0)
+ return;
+
+ EmitSanitizerDtorCallback(CGF, OffsetPtr, PoisonSize);
+ }
+ };
+
+ class SanitizeDtorVTable final : public EHScopeStack::Cleanup {
+ const CXXDestructorDecl *Dtor;
+
+ public:
+ SanitizeDtorVTable(const CXXDestructorDecl *Dtor) : Dtor(Dtor) {}
+
+ // Generate function call for handling vtable pointer poisoning.
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ assert(Dtor->getParent()->isDynamicClass());
+ (void)Dtor;
+ ASTContext &Context = CGF.getContext();
+ // Poison vtable and vtable ptr if they exist for this class.
+ llvm::Value *VTablePtr = CGF.LoadCXXThis();
+
+ CharUnits::QuantityType PoisonSize =
+ Context.toCharUnitsFromBits(CGF.PointerWidthInBits).getQuantity();
+ // Pass in void pointer and size of region as arguments to runtime
+ // function
+ EmitSanitizerDtorCallback(CGF, VTablePtr, PoisonSize);
+ }
+ };
+} // end anonymous namespace
+
+/// \brief Emit all code that comes at the end of class's
+/// destructor. This is to call destructors on members and base classes
+/// in reverse order of their construction.
+void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
+ CXXDtorType DtorType) {
+ assert((!DD->isTrivial() || DD->hasAttr<DLLExportAttr>()) &&
+ "Should not emit dtor epilogue for non-exported trivial dtor!");
+
+ // The deleting-destructor phase just needs to call the appropriate
+ // operator delete that Sema picked up.
+ if (DtorType == Dtor_Deleting) {
+ assert(DD->getOperatorDelete() &&
+ "operator delete missing - EnterDtorCleanups");
+ if (CXXStructorImplicitParamValue) {
+ // If there is an implicit param to the deleting dtor, it's a boolean
+ // telling whether we should call delete at the end of the dtor.
+ EHStack.pushCleanup<CallDtorDeleteConditional>(
+ NormalAndEHCleanup, CXXStructorImplicitParamValue);
+ } else {
+ EHStack.pushCleanup<CallDtorDelete>(NormalAndEHCleanup);
+ }
+ return;
+ }
+
+ const CXXRecordDecl *ClassDecl = DD->getParent();
+
+ // Unions have no bases and do not call field destructors.
+ if (ClassDecl->isUnion())
+ return;
+
+ // The complete-destructor phase just destructs all the virtual bases.
+ if (DtorType == Dtor_Complete) {
+ // Poison the vtable pointer such that access after the base
+ // and member destructors are invoked is invalid.
+ if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor &&
+ SanOpts.has(SanitizerKind::Memory) && ClassDecl->getNumVBases() &&
+ ClassDecl->isPolymorphic())
+ EHStack.pushCleanup<SanitizeDtorVTable>(NormalAndEHCleanup, DD);
+
+ // We push them in the forward order so that they'll be popped in
+ // the reverse order.
+ for (const auto &Base : ClassDecl->vbases()) {
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore trivial destructors.
+ if (BaseClassDecl->hasTrivialDestructor())
+ continue;
+
+ EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
+ BaseClassDecl,
+ /*BaseIsVirtual*/ true);
+ }
+
+ return;
+ }
+
+ assert(DtorType == Dtor_Base);
+ // Poison the vtable pointer if it has no virtual bases, but inherits
+ // virtual functions.
+ if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor &&
+ SanOpts.has(SanitizerKind::Memory) && !ClassDecl->getNumVBases() &&
+ ClassDecl->isPolymorphic())
+ EHStack.pushCleanup<SanitizeDtorVTable>(NormalAndEHCleanup, DD);
+
+ // Destroy non-virtual bases.
+ for (const auto &Base : ClassDecl->bases()) {
+ // Ignore virtual bases.
+ if (Base.isVirtual())
+ continue;
+
+ CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl();
+
+ // Ignore trivial destructors.
+ if (BaseClassDecl->hasTrivialDestructor())
+ continue;
+
+ EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
+ BaseClassDecl,
+ /*BaseIsVirtual*/ false);
+ }
+
+ // Poison fields such that access after their destructors are
+ // invoked, and before the base class destructor runs, is invalid.
+ if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor &&
+ SanOpts.has(SanitizerKind::Memory))
+ EHStack.pushCleanup<SanitizeDtorMembers>(NormalAndEHCleanup, DD);
+
+ // Destroy direct fields.
+ for (const auto *Field : ClassDecl->fields()) {
+ QualType type = Field->getType();
+ QualType::DestructionKind dtorKind = type.isDestructedType();
+ if (!dtorKind) continue;
+
+ // Anonymous union members do not have their destructors called.
+ const RecordType *RT = type->getAsUnionType();
+ if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue;
+
+ CleanupKind cleanupKind = getCleanupKind(dtorKind);
+ EHStack.pushCleanup<DestroyField>(cleanupKind, Field,
+ getDestroyer(dtorKind),
+ cleanupKind & EHCleanup);
+ }
+}
+
+/// EmitCXXAggrConstructorCall - Emit a loop to call a particular
+/// constructor for each of several members of an array.
+///
+/// \param ctor the constructor to call for each element
+/// \param arrayType the type of the array to initialize
+/// \param arrayBegin an arrayType*
+/// \param zeroInitialize true if each element should be
+/// zero-initialized before it is constructed
+void CodeGenFunction::EmitCXXAggrConstructorCall(
+ const CXXConstructorDecl *ctor, const ConstantArrayType *arrayType,
+ Address arrayBegin, const CXXConstructExpr *E, bool zeroInitialize) {
+ QualType elementType;
+ llvm::Value *numElements =
+ emitArrayLength(arrayType, elementType, arrayBegin);
+
+ EmitCXXAggrConstructorCall(ctor, numElements, arrayBegin, E, zeroInitialize);
+}
+
+/// EmitCXXAggrConstructorCall - Emit a loop to call a particular
+/// constructor for each of several members of an array.
+///
+/// \param ctor the constructor to call for each element
+/// \param numElements the number of elements in the array;
+/// may be zero
+/// \param arrayBase a T*, where T is the type constructed by ctor
+/// \param zeroInitialize true if each element should be
+/// zero-initialized before it is constructed
+void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
+ llvm::Value *numElements,
+ Address arrayBase,
+ const CXXConstructExpr *E,
+ bool zeroInitialize) {
+ // It's legal for numElements to be zero. This can happen both
+ // dynamically, because x can be zero in 'new A[x]', and statically,
+ // because of GCC extensions that permit zero-length arrays. There
+ // are probably legitimate places where we could assume that this
+ // doesn't happen, but it's not clear that it's worth it.
+ llvm::BranchInst *zeroCheckBranch = nullptr;
+
+ // Optimize for a constant count.
+ llvm::ConstantInt *constantCount
+ = dyn_cast<llvm::ConstantInt>(numElements);
+ if (constantCount) {
+ // Just skip out if the constant count is zero.
+ if (constantCount->isZero()) return;
+
+ // Otherwise, emit the check.
+ } else {
+ llvm::BasicBlock *loopBB = createBasicBlock("new.ctorloop");
+ llvm::Value *iszero = Builder.CreateIsNull(numElements, "isempty");
+ zeroCheckBranch = Builder.CreateCondBr(iszero, loopBB, loopBB);
+ EmitBlock(loopBB);
+ }
+
+ // Find the end of the array.
+ llvm::Value *arrayBegin = arrayBase.getPointer();
+ llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements,
+ "arrayctor.end");
+
+ // Enter the loop, setting up a phi for the current location to initialize.
+ llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *loopBB = createBasicBlock("arrayctor.loop");
+ EmitBlock(loopBB);
+ llvm::PHINode *cur = Builder.CreatePHI(arrayBegin->getType(), 2,
+ "arrayctor.cur");
+ cur->addIncoming(arrayBegin, entryBB);
+
+ // Inside the loop body, emit the constructor call on the array element.
+
+ // The alignment of the base, adjusted by the size of a single element,
+ // provides a conservative estimate of the alignment of every element.
+ // (This assumes we never start tracking offsetted alignments.)
+ //
+ // Note that these are complete objects and so we don't need to
+ // use the non-virtual size or alignment.
+ QualType type = getContext().getTypeDeclType(ctor->getParent());
+ CharUnits eltAlignment =
+ arrayBase.getAlignment()
+ .alignmentOfArrayElement(getContext().getTypeSizeInChars(type));
+ Address curAddr = Address(cur, eltAlignment);
+
+ // Zero initialize the storage, if requested.
+ if (zeroInitialize)
+ EmitNullInitialization(curAddr, type);
+
+ // C++ [class.temporary]p4:
+ // There are two contexts in which temporaries are destroyed at a different
+ // point than the end of the full-expression. The first context is when a
+ // default constructor is called to initialize an element of an array.
+ // If the constructor has one or more default arguments, the destruction of
+ // every temporary created in a default argument expression is sequenced
+ // before the construction of the next array element, if any.
+
+ {
+ RunCleanupsScope Scope(*this);
+
+ // Evaluate the constructor and its arguments in a regular
+ // partial-destroy cleanup.
+ if (getLangOpts().Exceptions &&
+ !ctor->getParent()->hasTrivialDestructor()) {
+ Destroyer *destroyer = destroyCXXObject;
+ pushRegularPartialArrayCleanup(arrayBegin, cur, type, eltAlignment,
+ *destroyer);
+ }
+
+ EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/false,
+ /*Delegating=*/false, curAddr, E);
+ }
+
+ // Go to the next element.
+ llvm::Value *next =
+ Builder.CreateInBoundsGEP(cur, llvm::ConstantInt::get(SizeTy, 1),
+ "arrayctor.next");
+ cur->addIncoming(next, Builder.GetInsertBlock());
+
+ // Check whether that's the end of the loop.
+ llvm::Value *done = Builder.CreateICmpEQ(next, arrayEnd, "arrayctor.done");
+ llvm::BasicBlock *contBB = createBasicBlock("arrayctor.cont");
+ Builder.CreateCondBr(done, contBB, loopBB);
+
+ // Patch the earlier check to skip over the loop.
+ if (zeroCheckBranch) zeroCheckBranch->setSuccessor(0, contBB);
+
+ EmitBlock(contBB);
+}
+
+void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF,
+ Address addr,
+ QualType type) {
+ const RecordType *rtype = type->castAs<RecordType>();
+ const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl());
+ const CXXDestructorDecl *dtor = record->getDestructor();
+ assert(!dtor->isTrivial());
+ CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false,
+ /*Delegating=*/false, addr);
+}
+
+void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
+ CXXCtorType Type,
+ bool ForVirtualBase,
+ bool Delegating, Address This,
+ const CXXConstructExpr *E) {
+ const CXXRecordDecl *ClassDecl = D->getParent();
+
+ // C++11 [class.mfct.non-static]p2:
+ // If a non-static member function of a class X is called for an object that
+ // is not of type X, or of a type derived from X, the behavior is undefined.
+ // FIXME: Provide a source location here.
+ EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, SourceLocation(),
+ This.getPointer(), getContext().getRecordType(ClassDecl));
+
+ if (D->isTrivial() && D->isDefaultConstructor()) {
+ assert(E->getNumArgs() == 0 && "trivial default ctor with args");
+ return;
+ }
+
+ // If this is a trivial constructor, just emit what's needed. If this is a
+ // union copy constructor, we must emit a memcpy, because the AST does not
+ // model that copy.
+ if (isMemcpyEquivalentSpecialMember(D)) {
+ assert(E->getNumArgs() == 1 && "unexpected argcount for trivial ctor");
+
+ const Expr *Arg = E->getArg(0);
+ QualType SrcTy = Arg->getType();
+ Address Src = EmitLValue(Arg).getAddress();
+ QualType DestTy = getContext().getTypeDeclType(ClassDecl);
+ EmitAggregateCopyCtor(This, Src, DestTy, SrcTy);
+ return;
+ }
+
+ CallArgList Args;
+
+ // Push the this ptr.
+ Args.add(RValue::get(This.getPointer()), D->getThisType(getContext()));
+
+ // Add the rest of the user-supplied arguments.
+ const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>();
+ EmitCallArgs(Args, FPT, E->arguments(), E->getConstructor());
+
+ // Insert any ABI-specific implicit constructor arguments.
+ unsigned ExtraArgs = CGM.getCXXABI().addImplicitConstructorArgs(
+ *this, D, Type, ForVirtualBase, Delegating, Args);
+
+ // Emit the call.
+ llvm::Value *Callee = CGM.getAddrOfCXXStructor(D, getFromCtorType(Type));
+ const CGFunctionInfo &Info =
+ CGM.getTypes().arrangeCXXConstructorCall(Args, D, Type, ExtraArgs);
+ EmitCall(Info, Callee, ReturnValueSlot(), Args, D);
+
+ // Generate vtable assumptions if we're constructing a complete object
+ // with a vtable. We don't do this for base subobjects for two reasons:
+ // first, it's incorrect for classes with virtual bases, and second, we're
+ // about to overwrite the vptrs anyway.
+ // We also have to make sure if we can refer to vtable:
+ // - Otherwise we can refer to vtable if it's safe to speculatively emit.
+ // FIXME: If vtable is used by ctor/dtor, or if vtable is external and we are
+ // sure that definition of vtable is not hidden,
+ // then we are always safe to refer to it.
+ // FIXME: It looks like InstCombine is very inefficient on dealing with
+ // assumes. Make assumption loads require -fstrict-vtable-pointers temporarily.
+ if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
+ ClassDecl->isDynamicClass() && Type != Ctor_Base &&
+ CGM.getCXXABI().canSpeculativelyEmitVTable(ClassDecl) &&
+ CGM.getCodeGenOpts().StrictVTablePointers)
+ EmitVTableAssumptionLoads(ClassDecl, This);
+}
+
+void CodeGenFunction::EmitVTableAssumptionLoad(const VPtr &Vptr, Address This) {
+ llvm::Value *VTableGlobal =
+ CGM.getCXXABI().getVTableAddressPoint(Vptr.Base, Vptr.VTableClass);
+ if (!VTableGlobal)
+ return;
+
+ // We can just use the base offset in the complete class.
+ CharUnits NonVirtualOffset = Vptr.Base.getBaseOffset();
+
+ if (!NonVirtualOffset.isZero())
+ This =
+ ApplyNonVirtualAndVirtualOffset(*this, This, NonVirtualOffset, nullptr,
+ Vptr.VTableClass, Vptr.NearestVBase);
+
+ llvm::Value *VPtrValue =
+ GetVTablePtr(This, VTableGlobal->getType(), Vptr.VTableClass);
+ llvm::Value *Cmp =
+ Builder.CreateICmpEQ(VPtrValue, VTableGlobal, "cmp.vtables");
+ Builder.CreateAssumption(Cmp);
+}
+
+void CodeGenFunction::EmitVTableAssumptionLoads(const CXXRecordDecl *ClassDecl,
+ Address This) {
+ if (CGM.getCXXABI().doStructorsInitializeVPtrs(ClassDecl))
+ for (const VPtr &Vptr : getVTablePointers(ClassDecl))
+ EmitVTableAssumptionLoad(Vptr, This);
+}
+
+void
+CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
+ Address This, Address Src,
+ const CXXConstructExpr *E) {
+ if (isMemcpyEquivalentSpecialMember(D)) {
+ assert(E->getNumArgs() == 1 && "unexpected argcount for trivial ctor");
+ assert(D->isCopyOrMoveConstructor() &&
+ "trivial 1-arg ctor not a copy/move ctor");
+ EmitAggregateCopyCtor(This, Src,
+ getContext().getTypeDeclType(D->getParent()),
+ (*E->arg_begin())->getType());
+ return;
+ }
+ llvm::Value *Callee = CGM.getAddrOfCXXStructor(D, StructorType::Complete);
+ assert(D->isInstance() &&
+ "Trying to emit a member call expr on a static method!");
+
+ const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>();
+
+ CallArgList Args;
+
+ // Push the this ptr.
+ Args.add(RValue::get(This.getPointer()), D->getThisType(getContext()));
+
+ // Push the src ptr.
+ QualType QT = *(FPT->param_type_begin());
+ llvm::Type *t = CGM.getTypes().ConvertType(QT);
+ Src = Builder.CreateBitCast(Src, t);
+ Args.add(RValue::get(Src.getPointer()), QT);
+
+ // Skip over first argument (Src).
+ EmitCallArgs(Args, FPT, drop_begin(E->arguments(), 1), E->getConstructor(),
+ /*ParamsToSkip*/ 1);
+
+ EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, RequiredArgs::All),
+ Callee, ReturnValueSlot(), Args, D);
+}
+
+void
+CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
+ CXXCtorType CtorType,
+ const FunctionArgList &Args,
+ SourceLocation Loc) {
+ CallArgList DelegateArgs;
+
+ FunctionArgList::const_iterator I = Args.begin(), E = Args.end();
+ assert(I != E && "no parameters to constructor");
+
+ // this
+ DelegateArgs.add(RValue::get(LoadCXXThis()), (*I)->getType());
+ ++I;
+
+ // vtt
+ if (llvm::Value *VTT = GetVTTParameter(GlobalDecl(Ctor, CtorType),
+ /*ForVirtualBase=*/false,
+ /*Delegating=*/true)) {
+ QualType VoidPP = getContext().getPointerType(getContext().VoidPtrTy);
+ DelegateArgs.add(RValue::get(VTT), VoidPP);
+
+ if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) {
+ assert(I != E && "cannot skip vtt parameter, already done with args");
+ assert((*I)->getType() == VoidPP && "skipping parameter not of vtt type");
+ ++I;
+ }
+ }
+
+ // Explicit arguments.
+ for (; I != E; ++I) {
+ const VarDecl *param = *I;
+ // FIXME: per-argument source location
+ EmitDelegateCallArg(DelegateArgs, param, Loc);
+ }
+
+ llvm::Value *Callee =
+ CGM.getAddrOfCXXStructor(Ctor, getFromCtorType(CtorType));
+ EmitCall(CGM.getTypes()
+ .arrangeCXXStructorDeclaration(Ctor, getFromCtorType(CtorType)),
+ Callee, ReturnValueSlot(), DelegateArgs, Ctor);
+}
+
+namespace {
+ struct CallDelegatingCtorDtor final : EHScopeStack::Cleanup {
+ const CXXDestructorDecl *Dtor;
+ Address Addr;
+ CXXDtorType Type;
+
+ CallDelegatingCtorDtor(const CXXDestructorDecl *D, Address Addr,
+ CXXDtorType Type)
+ : Dtor(D), Addr(Addr), Type(Type) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ CGF.EmitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false,
+ /*Delegating=*/true, Addr);
+ }
+ };
+} // end anonymous namespace
+
+void
+CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
+ const FunctionArgList &Args) {
+ assert(Ctor->isDelegatingConstructor());
+
+ Address ThisPtr = LoadCXXThisAddress();
+
+ AggValueSlot AggSlot =
+ AggValueSlot::forAddr(ThisPtr, Qualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+
+ EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot);
+
+ const CXXRecordDecl *ClassDecl = Ctor->getParent();
+ if (CGM.getLangOpts().Exceptions && !ClassDecl->hasTrivialDestructor()) {
+ CXXDtorType Type =
+ CurGD.getCtorType() == Ctor_Complete ? Dtor_Complete : Dtor_Base;
+
+ EHStack.pushCleanup<CallDelegatingCtorDtor>(EHCleanup,
+ ClassDecl->getDestructor(),
+ ThisPtr, Type);
+ }
+}
+
+void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ bool ForVirtualBase,
+ bool Delegating,
+ Address This) {
+ CGM.getCXXABI().EmitDestructorCall(*this, DD, Type, ForVirtualBase,
+ Delegating, This);
+}
+
+namespace {
+ struct CallLocalDtor final : EHScopeStack::Cleanup {
+ const CXXDestructorDecl *Dtor;
+ Address Addr;
+
+ CallLocalDtor(const CXXDestructorDecl *D, Address Addr)
+ : Dtor(D), Addr(Addr) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
+ /*ForVirtualBase=*/false,
+ /*Delegating=*/false, Addr);
+ }
+ };
+}
+
+void CodeGenFunction::PushDestructorCleanup(const CXXDestructorDecl *D,
+ Address Addr) {
+ EHStack.pushCleanup<CallLocalDtor>(NormalAndEHCleanup, D, Addr);
+}
+
+void CodeGenFunction::PushDestructorCleanup(QualType T, Address Addr) {
+ CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl();
+ if (!ClassDecl) return;
+ if (ClassDecl->hasTrivialDestructor()) return;
+
+ const CXXDestructorDecl *D = ClassDecl->getDestructor();
+ assert(D && D->isUsed() && "destructor not marked as used!");
+ PushDestructorCleanup(D, Addr);
+}
+
+void CodeGenFunction::InitializeVTablePointer(const VPtr &Vptr) {
+ // Compute the address point.
+ llvm::Value *VTableAddressPoint =
+ CGM.getCXXABI().getVTableAddressPointInStructor(
+ *this, Vptr.VTableClass, Vptr.Base, Vptr.NearestVBase);
+
+ if (!VTableAddressPoint)
+ return;
+
+ // Compute where to store the address point.
+ llvm::Value *VirtualOffset = nullptr;
+ CharUnits NonVirtualOffset = CharUnits::Zero();
+
+ if (CGM.getCXXABI().isVirtualOffsetNeededForVTableField(*this, Vptr)) {
+ // We need to use the virtual base offset offset because the virtual base
+ // might have a different offset in the most derived class.
+
+ VirtualOffset = CGM.getCXXABI().GetVirtualBaseClassOffset(
+ *this, LoadCXXThisAddress(), Vptr.VTableClass, Vptr.NearestVBase);
+ NonVirtualOffset = Vptr.OffsetFromNearestVBase;
+ } else {
+ // We can just use the base offset in the complete class.
+ NonVirtualOffset = Vptr.Base.getBaseOffset();
+ }
+
+ // Apply the offsets.
+ Address VTableField = LoadCXXThisAddress();
+
+ if (!NonVirtualOffset.isZero() || VirtualOffset)
+ VTableField = ApplyNonVirtualAndVirtualOffset(
+ *this, VTableField, NonVirtualOffset, VirtualOffset, Vptr.VTableClass,
+ Vptr.NearestVBase);
+
+ // Finally, store the address point. Use the same LLVM types as the field to
+ // support optimization.
+ llvm::Type *VTablePtrTy =
+ llvm::FunctionType::get(CGM.Int32Ty, /*isVarArg=*/true)
+ ->getPointerTo()
+ ->getPointerTo();
+ VTableField = Builder.CreateBitCast(VTableField, VTablePtrTy->getPointerTo());
+ VTableAddressPoint = Builder.CreateBitCast(VTableAddressPoint, VTablePtrTy);
+
+ llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField);
+ CGM.DecorateInstructionWithTBAA(Store, CGM.getTBAAInfoForVTablePtr());
+ if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
+ CGM.getCodeGenOpts().StrictVTablePointers)
+ CGM.DecorateInstructionWithInvariantGroup(Store, Vptr.VTableClass);
+}
+
+CodeGenFunction::VPtrsVector
+CodeGenFunction::getVTablePointers(const CXXRecordDecl *VTableClass) {
+ CodeGenFunction::VPtrsVector VPtrsResult;
+ VisitedVirtualBasesSetTy VBases;
+ getVTablePointers(BaseSubobject(VTableClass, CharUnits::Zero()),
+ /*NearestVBase=*/nullptr,
+ /*OffsetFromNearestVBase=*/CharUnits::Zero(),
+ /*BaseIsNonVirtualPrimaryBase=*/false, VTableClass, VBases,
+ VPtrsResult);
+ return VPtrsResult;
+}
+
+void CodeGenFunction::getVTablePointers(BaseSubobject Base,
+ const CXXRecordDecl *NearestVBase,
+ CharUnits OffsetFromNearestVBase,
+ bool BaseIsNonVirtualPrimaryBase,
+ const CXXRecordDecl *VTableClass,
+ VisitedVirtualBasesSetTy &VBases,
+ VPtrsVector &Vptrs) {
+ // If this base is a non-virtual primary base the address point has already
+ // been set.
+ if (!BaseIsNonVirtualPrimaryBase) {
+ // Initialize the vtable pointer for this base.
+ VPtr Vptr = {Base, NearestVBase, OffsetFromNearestVBase, VTableClass};
+ Vptrs.push_back(Vptr);
+ }
+
+ const CXXRecordDecl *RD = Base.getBase();
+
+ // Traverse bases.
+ for (const auto &I : RD->bases()) {
+ CXXRecordDecl *BaseDecl
+ = cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore classes without a vtable.
+ if (!BaseDecl->isDynamicClass())
+ continue;
+
+ CharUnits BaseOffset;
+ CharUnits BaseOffsetFromNearestVBase;
+ bool BaseDeclIsNonVirtualPrimaryBase;
+
+ if (I.isVirtual()) {
+ // Check if we've visited this virtual base before.
+ if (!VBases.insert(BaseDecl).second)
+ continue;
+
+ const ASTRecordLayout &Layout =
+ getContext().getASTRecordLayout(VTableClass);
+
+ BaseOffset = Layout.getVBaseClassOffset(BaseDecl);
+ BaseOffsetFromNearestVBase = CharUnits::Zero();
+ BaseDeclIsNonVirtualPrimaryBase = false;
+ } else {
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+
+ BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl);
+ BaseOffsetFromNearestVBase =
+ OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl);
+ BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl;
+ }
+
+ getVTablePointers(
+ BaseSubobject(BaseDecl, BaseOffset),
+ I.isVirtual() ? BaseDecl : NearestVBase, BaseOffsetFromNearestVBase,
+ BaseDeclIsNonVirtualPrimaryBase, VTableClass, VBases, Vptrs);
+ }
+}
+
+void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
+ // Ignore classes without a vtable.
+ if (!RD->isDynamicClass())
+ return;
+
+ // Initialize the vtable pointers for this class and all of its bases.
+ if (CGM.getCXXABI().doStructorsInitializeVPtrs(RD))
+ for (const VPtr &Vptr : getVTablePointers(RD))
+ InitializeVTablePointer(Vptr);
+
+ if (RD->getNumVBases())
+ CGM.getCXXABI().initializeHiddenVirtualInheritanceMembers(*this, RD);
+}
+
+llvm::Value *CodeGenFunction::GetVTablePtr(Address This,
+ llvm::Type *VTableTy,
+ const CXXRecordDecl *RD) {
+ Address VTablePtrSrc = Builder.CreateElementBitCast(This, VTableTy);
+ llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable");
+ CGM.DecorateInstructionWithTBAA(VTable, CGM.getTBAAInfoForVTablePtr());
+
+ if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
+ CGM.getCodeGenOpts().StrictVTablePointers)
+ CGM.DecorateInstructionWithInvariantGroup(VTable, RD);
+
+ return VTable;
+}
+
+// If a class has a single non-virtual base and does not introduce or override
+// virtual member functions or fields, it will have the same layout as its base.
+// This function returns the least derived such class.
+//
+// Casting an instance of a base class to such a derived class is technically
+// undefined behavior, but it is a relatively common hack for introducing member
+// functions on class instances with specific properties (e.g. llvm::Operator)
+// that works under most compilers and should not have security implications, so
+// we allow it by default. It can be disabled with -fsanitize=cfi-cast-strict.
+static const CXXRecordDecl *
+LeastDerivedClassWithSameLayout(const CXXRecordDecl *RD) {
+ if (!RD->field_empty())
+ return RD;
+
+ if (RD->getNumVBases() != 0)
+ return RD;
+
+ if (RD->getNumBases() != 1)
+ return RD;
+
+ for (const CXXMethodDecl *MD : RD->methods()) {
+ if (MD->isVirtual()) {
+ // Virtual member functions are only ok if they are implicit destructors
+ // because the implicit destructor will have the same semantics as the
+ // base class's destructor if no fields are added.
+ if (isa<CXXDestructorDecl>(MD) && MD->isImplicit())
+ continue;
+ return RD;
+ }
+ }
+
+ return LeastDerivedClassWithSameLayout(
+ RD->bases_begin()->getType()->getAsCXXRecordDecl());
+}
+
+void CodeGenFunction::EmitVTablePtrCheckForCall(const CXXMethodDecl *MD,
+ llvm::Value *VTable,
+ CFITypeCheckKind TCK,
+ SourceLocation Loc) {
+ const CXXRecordDecl *ClassDecl = MD->getParent();
+ if (!SanOpts.has(SanitizerKind::CFICastStrict))
+ ClassDecl = LeastDerivedClassWithSameLayout(ClassDecl);
+
+ EmitVTablePtrCheck(ClassDecl, VTable, TCK, Loc);
+}
+
+void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T,
+ llvm::Value *Derived,
+ bool MayBeNull,
+ CFITypeCheckKind TCK,
+ SourceLocation Loc) {
+ if (!getLangOpts().CPlusPlus)
+ return;
+
+ auto *ClassTy = T->getAs<RecordType>();
+ if (!ClassTy)
+ return;
+
+ const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(ClassTy->getDecl());
+
+ if (!ClassDecl->isCompleteDefinition() || !ClassDecl->isDynamicClass())
+ return;
+
+ if (!SanOpts.has(SanitizerKind::CFICastStrict))
+ ClassDecl = LeastDerivedClassWithSameLayout(ClassDecl);
+
+ llvm::BasicBlock *ContBlock = nullptr;
+
+ if (MayBeNull) {
+ llvm::Value *DerivedNotNull =
+ Builder.CreateIsNotNull(Derived, "cast.nonnull");
+
+ llvm::BasicBlock *CheckBlock = createBasicBlock("cast.check");
+ ContBlock = createBasicBlock("cast.cont");
+
+ Builder.CreateCondBr(DerivedNotNull, CheckBlock, ContBlock);
+
+ EmitBlock(CheckBlock);
+ }
+
+ llvm::Value *VTable =
+ GetVTablePtr(Address(Derived, getPointerAlign()), Int8PtrTy, ClassDecl);
+
+ EmitVTablePtrCheck(ClassDecl, VTable, TCK, Loc);
+
+ if (MayBeNull) {
+ Builder.CreateBr(ContBlock);
+ EmitBlock(ContBlock);
+ }
+}
+
+void CodeGenFunction::EmitVTablePtrCheck(const CXXRecordDecl *RD,
+ llvm::Value *VTable,
+ CFITypeCheckKind TCK,
+ SourceLocation Loc) {
+ if (CGM.IsCFIBlacklistedRecord(RD))
+ return;
+
+ SanitizerScope SanScope(this);
+
+ llvm::Metadata *MD =
+ CGM.CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0));
+ llvm::Value *BitSetName = llvm::MetadataAsValue::get(getLLVMContext(), MD);
+
+ llvm::Value *CastedVTable = Builder.CreateBitCast(VTable, Int8PtrTy);
+ llvm::Value *BitSetTest =
+ Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::bitset_test),
+ {CastedVTable, BitSetName});
+
+ if (CGM.getCodeGenOpts().SanitizeCfiCrossDso) {
+ if (auto TypeId = CGM.CreateCfiIdForTypeMetadata(MD)) {
+ EmitCfiSlowPathCheck(BitSetTest, TypeId, CastedVTable);
+ return;
+ }
+ }
+
+ SanitizerMask M;
+ switch (TCK) {
+ case CFITCK_VCall:
+ M = SanitizerKind::CFIVCall;
+ break;
+ case CFITCK_NVCall:
+ M = SanitizerKind::CFINVCall;
+ break;
+ case CFITCK_DerivedCast:
+ M = SanitizerKind::CFIDerivedCast;
+ break;
+ case CFITCK_UnrelatedCast:
+ M = SanitizerKind::CFIUnrelatedCast;
+ break;
+ }
+
+ llvm::Constant *StaticData[] = {
+ EmitCheckSourceLocation(Loc),
+ EmitCheckTypeDescriptor(QualType(RD->getTypeForDecl(), 0)),
+ llvm::ConstantInt::get(Int8Ty, TCK),
+ };
+ EmitCheck(std::make_pair(BitSetTest, M), "cfi_bad_type", StaticData,
+ CastedVTable);
+}
+
+// FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
+// quite what we want.
+static const Expr *skipNoOpCastsAndParens(const Expr *E) {
+ while (true) {
+ if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
+ E = PE->getSubExpr();
+ continue;
+ }
+
+ if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
+ if (CE->getCastKind() == CK_NoOp) {
+ E = CE->getSubExpr();
+ continue;
+ }
+ }
+ if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
+ if (UO->getOpcode() == UO_Extension) {
+ E = UO->getSubExpr();
+ continue;
+ }
+ }
+ return E;
+ }
+}
+
+bool
+CodeGenFunction::CanDevirtualizeMemberFunctionCall(const Expr *Base,
+ const CXXMethodDecl *MD) {
+ // When building with -fapple-kext, all calls must go through the vtable since
+ // the kernel linker can do runtime patching of vtables.
+ if (getLangOpts().AppleKext)
+ return false;
+
+ // If the most derived class is marked final, we know that no subclass can
+ // override this member function and so we can devirtualize it. For example:
+ //
+ // struct A { virtual void f(); }
+ // struct B final : A { };
+ //
+ // void f(B *b) {
+ // b->f();
+ // }
+ //
+ const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType();
+ if (MostDerivedClassDecl->hasAttr<FinalAttr>())
+ return true;
+
+ // If the member function is marked 'final', we know that it can't be
+ // overridden and can therefore devirtualize it.
+ if (MD->hasAttr<FinalAttr>())
+ return true;
+
+ // Similarly, if the class itself is marked 'final' it can't be overridden
+ // and we can therefore devirtualize the member function call.
+ if (MD->getParent()->hasAttr<FinalAttr>())
+ return true;
+
+ Base = skipNoOpCastsAndParens(Base);
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ // This is a record decl. We know the type and can devirtualize it.
+ return VD->getType()->isRecordType();
+ }
+
+ return false;
+ }
+
+ // We can devirtualize calls on an object accessed by a class member access
+ // expression, since by C++11 [basic.life]p6 we know that it can't refer to
+ // a derived class object constructed in the same location.
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(Base))
+ if (const ValueDecl *VD = dyn_cast<ValueDecl>(ME->getMemberDecl()))
+ return VD->getType()->isRecordType();
+
+ // We can always devirtualize calls on temporary object expressions.
+ if (isa<CXXConstructExpr>(Base))
+ return true;
+
+ // And calls on bound temporaries.
+ if (isa<CXXBindTemporaryExpr>(Base))
+ return true;
+
+ // Check if this is a call expr that returns a record type.
+ if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
+ return CE->getCallReturnType(getContext())->isRecordType();
+
+ // We can't devirtualize the call.
+ return false;
+}
+
+void CodeGenFunction::EmitForwardingCallToLambda(
+ const CXXMethodDecl *callOperator,
+ CallArgList &callArgs) {
+ // Get the address of the call operator.
+ const CGFunctionInfo &calleeFnInfo =
+ CGM.getTypes().arrangeCXXMethodDeclaration(callOperator);
+ llvm::Value *callee =
+ CGM.GetAddrOfFunction(GlobalDecl(callOperator),
+ CGM.getTypes().GetFunctionType(calleeFnInfo));
+
+ // Prepare the return slot.
+ const FunctionProtoType *FPT =
+ callOperator->getType()->castAs<FunctionProtoType>();
+ QualType resultType = FPT->getReturnType();
+ ReturnValueSlot returnSlot;
+ if (!resultType->isVoidType() &&
+ calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect &&
+ !hasScalarEvaluationKind(calleeFnInfo.getReturnType()))
+ returnSlot = ReturnValueSlot(ReturnValue, resultType.isVolatileQualified());
+
+ // We don't need to separately arrange the call arguments because
+ // the call can't be variadic anyway --- it's impossible to forward
+ // variadic arguments.
+
+ // Now emit our call.
+ RValue RV = EmitCall(calleeFnInfo, callee, returnSlot,
+ callArgs, callOperator);
+
+ // If necessary, copy the returned value into the slot.
+ if (!resultType->isVoidType() && returnSlot.isNull())
+ EmitReturnOfRValue(RV, resultType);
+ else
+ EmitBranchThroughCleanup(ReturnBlock);
+}
+
+void CodeGenFunction::EmitLambdaBlockInvokeBody() {
+ const BlockDecl *BD = BlockInfo->getBlockDecl();
+ const VarDecl *variable = BD->capture_begin()->getVariable();
+ const CXXRecordDecl *Lambda = variable->getType()->getAsCXXRecordDecl();
+
+ // Start building arguments for forwarding call
+ CallArgList CallArgs;
+
+ QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
+ Address ThisPtr = GetAddrOfBlockDecl(variable, false);
+ CallArgs.add(RValue::get(ThisPtr.getPointer()), ThisType);
+
+ // Add the rest of the parameters.
+ for (auto param : BD->params())
+ EmitDelegateCallArg(CallArgs, param, param->getLocStart());
+
+ assert(!Lambda->isGenericLambda() &&
+ "generic lambda interconversion to block not implemented");
+ EmitForwardingCallToLambda(Lambda->getLambdaCallOperator(), CallArgs);
+}
+
+void CodeGenFunction::EmitLambdaToBlockPointerBody(FunctionArgList &Args) {
+ if (cast<CXXMethodDecl>(CurCodeDecl)->isVariadic()) {
+ // FIXME: Making this work correctly is nasty because it requires either
+ // cloning the body of the call operator or making the call operator forward.
+ CGM.ErrorUnsupported(CurCodeDecl, "lambda conversion to variadic function");
+ return;
+ }
+
+ EmitFunctionBody(Args, cast<FunctionDecl>(CurGD.getDecl())->getBody());
+}
+
+void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) {
+ const CXXRecordDecl *Lambda = MD->getParent();
+
+ // Start building arguments for forwarding call
+ CallArgList CallArgs;
+
+ QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
+ llvm::Value *ThisPtr = llvm::UndefValue::get(getTypes().ConvertType(ThisType));
+ CallArgs.add(RValue::get(ThisPtr), ThisType);
+
+ // Add the rest of the parameters.
+ for (auto Param : MD->params())
+ EmitDelegateCallArg(CallArgs, Param, Param->getLocStart());
+
+ const CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator();
+ // For a generic lambda, find the corresponding call operator specialization
+ // to which the call to the static-invoker shall be forwarded.
+ if (Lambda->isGenericLambda()) {
+ assert(MD->isFunctionTemplateSpecialization());
+ const TemplateArgumentList *TAL = MD->getTemplateSpecializationArgs();
+ FunctionTemplateDecl *CallOpTemplate = CallOp->getDescribedFunctionTemplate();
+ void *InsertPos = nullptr;
+ FunctionDecl *CorrespondingCallOpSpecialization =
+ CallOpTemplate->findSpecialization(TAL->asArray(), InsertPos);
+ assert(CorrespondingCallOpSpecialization);
+ CallOp = cast<CXXMethodDecl>(CorrespondingCallOpSpecialization);
+ }
+ EmitForwardingCallToLambda(CallOp, CallArgs);
+}
+
+void CodeGenFunction::EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD) {
+ if (MD->isVariadic()) {
+ // FIXME: Making this work correctly is nasty because it requires either
+ // cloning the body of the call operator or making the call operator forward.
+ CGM.ErrorUnsupported(MD, "lambda conversion to variadic function");
+ return;
+ }
+
+ EmitLambdaDelegatingInvokeBody(MD);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.cpp
new file mode 100644
index 0000000..ba7dcf7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.cpp
@@ -0,0 +1,1211 @@
+//===--- CGCleanup.cpp - Bookkeeping and code emission for cleanups -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains code dealing with the IR generation for cleanups
+// and related information.
+//
+// A "cleanup" is a piece of code which needs to be executed whenever
+// control transfers out of a particular scope. This can be
+// conditionalized to occur only on exceptional control flow, only on
+// normal control flow, or both.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCleanup.h"
+#include "CodeGenFunction.h"
+#include "llvm/Support/SaveAndRestore.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+bool DominatingValue<RValue>::saved_type::needsSaving(RValue rv) {
+ if (rv.isScalar())
+ return DominatingLLVMValue::needsSaving(rv.getScalarVal());
+ if (rv.isAggregate())
+ return DominatingLLVMValue::needsSaving(rv.getAggregatePointer());
+ return true;
+}
+
+DominatingValue<RValue>::saved_type
+DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) {
+ if (rv.isScalar()) {
+ llvm::Value *V = rv.getScalarVal();
+
+ // These automatically dominate and don't need to be saved.
+ if (!DominatingLLVMValue::needsSaving(V))
+ return saved_type(V, ScalarLiteral);
+
+ // Everything else needs an alloca.
+ Address addr =
+ CGF.CreateDefaultAlignTempAlloca(V->getType(), "saved-rvalue");
+ CGF.Builder.CreateStore(V, addr);
+ return saved_type(addr.getPointer(), ScalarAddress);
+ }
+
+ if (rv.isComplex()) {
+ CodeGenFunction::ComplexPairTy V = rv.getComplexVal();
+ llvm::Type *ComplexTy =
+ llvm::StructType::get(V.first->getType(), V.second->getType(),
+ (void*) nullptr);
+ Address addr = CGF.CreateDefaultAlignTempAlloca(ComplexTy, "saved-complex");
+ CGF.Builder.CreateStore(V.first,
+ CGF.Builder.CreateStructGEP(addr, 0, CharUnits()));
+ CharUnits offset = CharUnits::fromQuantity(
+ CGF.CGM.getDataLayout().getTypeAllocSize(V.first->getType()));
+ CGF.Builder.CreateStore(V.second,
+ CGF.Builder.CreateStructGEP(addr, 1, offset));
+ return saved_type(addr.getPointer(), ComplexAddress);
+ }
+
+ assert(rv.isAggregate());
+ Address V = rv.getAggregateAddress(); // TODO: volatile?
+ if (!DominatingLLVMValue::needsSaving(V.getPointer()))
+ return saved_type(V.getPointer(), AggregateLiteral,
+ V.getAlignment().getQuantity());
+
+ Address addr =
+ CGF.CreateTempAlloca(V.getType(), CGF.getPointerAlign(), "saved-rvalue");
+ CGF.Builder.CreateStore(V.getPointer(), addr);
+ return saved_type(addr.getPointer(), AggregateAddress,
+ V.getAlignment().getQuantity());
+}
+
+/// Given a saved r-value produced by SaveRValue, perform the code
+/// necessary to restore it to usability at the current insertion
+/// point.
+RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) {
+ auto getSavingAddress = [&](llvm::Value *value) {
+ auto alignment = cast<llvm::AllocaInst>(value)->getAlignment();
+ return Address(value, CharUnits::fromQuantity(alignment));
+ };
+ switch (K) {
+ case ScalarLiteral:
+ return RValue::get(Value);
+ case ScalarAddress:
+ return RValue::get(CGF.Builder.CreateLoad(getSavingAddress(Value)));
+ case AggregateLiteral:
+ return RValue::getAggregate(Address(Value, CharUnits::fromQuantity(Align)));
+ case AggregateAddress: {
+ auto addr = CGF.Builder.CreateLoad(getSavingAddress(Value));
+ return RValue::getAggregate(Address(addr, CharUnits::fromQuantity(Align)));
+ }
+ case ComplexAddress: {
+ Address address = getSavingAddress(Value);
+ llvm::Value *real = CGF.Builder.CreateLoad(
+ CGF.Builder.CreateStructGEP(address, 0, CharUnits()));
+ CharUnits offset = CharUnits::fromQuantity(
+ CGF.CGM.getDataLayout().getTypeAllocSize(real->getType()));
+ llvm::Value *imag = CGF.Builder.CreateLoad(
+ CGF.Builder.CreateStructGEP(address, 1, offset));
+ return RValue::getComplex(real, imag);
+ }
+ }
+
+ llvm_unreachable("bad saved r-value kind");
+}
+
+/// Push an entry of the given size onto this protected-scope stack.
+char *EHScopeStack::allocate(size_t Size) {
+ Size = llvm::RoundUpToAlignment(Size, ScopeStackAlignment);
+ if (!StartOfBuffer) {
+ unsigned Capacity = 1024;
+ while (Capacity < Size) Capacity *= 2;
+ StartOfBuffer = new char[Capacity];
+ StartOfData = EndOfBuffer = StartOfBuffer + Capacity;
+ } else if (static_cast<size_t>(StartOfData - StartOfBuffer) < Size) {
+ unsigned CurrentCapacity = EndOfBuffer - StartOfBuffer;
+ unsigned UsedCapacity = CurrentCapacity - (StartOfData - StartOfBuffer);
+
+ unsigned NewCapacity = CurrentCapacity;
+ do {
+ NewCapacity *= 2;
+ } while (NewCapacity < UsedCapacity + Size);
+
+ char *NewStartOfBuffer = new char[NewCapacity];
+ char *NewEndOfBuffer = NewStartOfBuffer + NewCapacity;
+ char *NewStartOfData = NewEndOfBuffer - UsedCapacity;
+ memcpy(NewStartOfData, StartOfData, UsedCapacity);
+ delete [] StartOfBuffer;
+ StartOfBuffer = NewStartOfBuffer;
+ EndOfBuffer = NewEndOfBuffer;
+ StartOfData = NewStartOfData;
+ }
+
+ assert(StartOfBuffer + Size <= StartOfData);
+ StartOfData -= Size;
+ return StartOfData;
+}
+
+void EHScopeStack::deallocate(size_t Size) {
+ StartOfData += llvm::RoundUpToAlignment(Size, ScopeStackAlignment);
+}
+
+bool EHScopeStack::containsOnlyLifetimeMarkers(
+ EHScopeStack::stable_iterator Old) const {
+ for (EHScopeStack::iterator it = begin(); stabilize(it) != Old; it++) {
+ EHCleanupScope *cleanup = dyn_cast<EHCleanupScope>(&*it);
+ if (!cleanup || !cleanup->isLifetimeMarker())
+ return false;
+ }
+
+ return true;
+}
+
+EHScopeStack::stable_iterator
+EHScopeStack::getInnermostActiveNormalCleanup() const {
+ for (stable_iterator si = getInnermostNormalCleanup(), se = stable_end();
+ si != se; ) {
+ EHCleanupScope &cleanup = cast<EHCleanupScope>(*find(si));
+ if (cleanup.isActive()) return si;
+ si = cleanup.getEnclosingNormalCleanup();
+ }
+ return stable_end();
+}
+
+
+void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) {
+ char *Buffer = allocate(EHCleanupScope::getSizeForCleanupSize(Size));
+ bool IsNormalCleanup = Kind & NormalCleanup;
+ bool IsEHCleanup = Kind & EHCleanup;
+ bool IsActive = !(Kind & InactiveCleanup);
+ EHCleanupScope *Scope =
+ new (Buffer) EHCleanupScope(IsNormalCleanup,
+ IsEHCleanup,
+ IsActive,
+ Size,
+ BranchFixups.size(),
+ InnermostNormalCleanup,
+ InnermostEHScope);
+ if (IsNormalCleanup)
+ InnermostNormalCleanup = stable_begin();
+ if (IsEHCleanup)
+ InnermostEHScope = stable_begin();
+
+ return Scope->getCleanupBuffer();
+}
+
+void EHScopeStack::popCleanup() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ assert(isa<EHCleanupScope>(*begin()));
+ EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin());
+ InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup();
+ InnermostEHScope = Cleanup.getEnclosingEHScope();
+ deallocate(Cleanup.getAllocatedSize());
+
+ // Destroy the cleanup.
+ Cleanup.Destroy();
+
+ // Check whether we can shrink the branch-fixups stack.
+ if (!BranchFixups.empty()) {
+ // If we no longer have any normal cleanups, all the fixups are
+ // complete.
+ if (!hasNormalCleanups())
+ BranchFixups.clear();
+
+ // Otherwise we can still trim out unnecessary nulls.
+ else
+ popNullFixups();
+ }
+}
+
+EHFilterScope *EHScopeStack::pushFilter(unsigned numFilters) {
+ assert(getInnermostEHScope() == stable_end());
+ char *buffer = allocate(EHFilterScope::getSizeForNumFilters(numFilters));
+ EHFilterScope *filter = new (buffer) EHFilterScope(numFilters);
+ InnermostEHScope = stable_begin();
+ return filter;
+}
+
+void EHScopeStack::popFilter() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ EHFilterScope &filter = cast<EHFilterScope>(*begin());
+ deallocate(EHFilterScope::getSizeForNumFilters(filter.getNumFilters()));
+
+ InnermostEHScope = filter.getEnclosingEHScope();
+}
+
+EHCatchScope *EHScopeStack::pushCatch(unsigned numHandlers) {
+ char *buffer = allocate(EHCatchScope::getSizeForNumHandlers(numHandlers));
+ EHCatchScope *scope =
+ new (buffer) EHCatchScope(numHandlers, InnermostEHScope);
+ InnermostEHScope = stable_begin();
+ return scope;
+}
+
+void EHScopeStack::pushTerminate() {
+ char *Buffer = allocate(EHTerminateScope::getSize());
+ new (Buffer) EHTerminateScope(InnermostEHScope);
+ InnermostEHScope = stable_begin();
+}
+
+/// Remove any 'null' fixups on the stack. However, we can't pop more
+/// fixups than the fixup depth on the innermost normal cleanup, or
+/// else fixups that we try to add to that cleanup will end up in the
+/// wrong place. We *could* try to shrink fixup depths, but that's
+/// actually a lot of work for little benefit.
+void EHScopeStack::popNullFixups() {
+ // We expect this to only be called when there's still an innermost
+ // normal cleanup; otherwise there really shouldn't be any fixups.
+ assert(hasNormalCleanups());
+
+ EHScopeStack::iterator it = find(InnermostNormalCleanup);
+ unsigned MinSize = cast<EHCleanupScope>(*it).getFixupDepth();
+ assert(BranchFixups.size() >= MinSize && "fixup stack out of order");
+
+ while (BranchFixups.size() > MinSize &&
+ BranchFixups.back().Destination == nullptr)
+ BranchFixups.pop_back();
+}
+
+void CodeGenFunction::initFullExprCleanup() {
+ // Create a variable to decide whether the cleanup needs to be run.
+ Address active = CreateTempAlloca(Builder.getInt1Ty(), CharUnits::One(),
+ "cleanup.cond");
+
+ // Initialize it to false at a site that's guaranteed to be run
+ // before each evaluation.
+ setBeforeOutermostConditional(Builder.getFalse(), active);
+
+ // Initialize it to true at the current location.
+ Builder.CreateStore(Builder.getTrue(), active);
+
+ // Set that as the active flag in the cleanup.
+ EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin());
+ assert(!cleanup.hasActiveFlag() && "cleanup already has active flag?");
+ cleanup.setActiveFlag(active);
+
+ if (cleanup.isNormalCleanup()) cleanup.setTestFlagInNormalCleanup();
+ if (cleanup.isEHCleanup()) cleanup.setTestFlagInEHCleanup();
+}
+
+void EHScopeStack::Cleanup::anchor() {}
+
+static void createStoreInstBefore(llvm::Value *value, Address addr,
+ llvm::Instruction *beforeInst) {
+ auto store = new llvm::StoreInst(value, addr.getPointer(), beforeInst);
+ store->setAlignment(addr.getAlignment().getQuantity());
+}
+
+static llvm::LoadInst *createLoadInstBefore(Address addr, const Twine &name,
+ llvm::Instruction *beforeInst) {
+ auto load = new llvm::LoadInst(addr.getPointer(), name, beforeInst);
+ load->setAlignment(addr.getAlignment().getQuantity());
+ return load;
+}
+
+/// All the branch fixups on the EH stack have propagated out past the
+/// outermost normal cleanup; resolve them all by adding cases to the
+/// given switch instruction.
+static void ResolveAllBranchFixups(CodeGenFunction &CGF,
+ llvm::SwitchInst *Switch,
+ llvm::BasicBlock *CleanupEntry) {
+ llvm::SmallPtrSet<llvm::BasicBlock*, 4> CasesAdded;
+
+ for (unsigned I = 0, E = CGF.EHStack.getNumBranchFixups(); I != E; ++I) {
+ // Skip this fixup if its destination isn't set.
+ BranchFixup &Fixup = CGF.EHStack.getBranchFixup(I);
+ if (Fixup.Destination == nullptr) continue;
+
+ // If there isn't an OptimisticBranchBlock, then InitialBranch is
+ // still pointing directly to its destination; forward it to the
+ // appropriate cleanup entry. This is required in the specific
+ // case of
+ // { std::string s; goto lbl; }
+ // lbl:
+ // i.e. where there's an unresolved fixup inside a single cleanup
+ // entry which we're currently popping.
+ if (Fixup.OptimisticBranchBlock == nullptr) {
+ createStoreInstBefore(CGF.Builder.getInt32(Fixup.DestinationIndex),
+ CGF.getNormalCleanupDestSlot(),
+ Fixup.InitialBranch);
+ Fixup.InitialBranch->setSuccessor(0, CleanupEntry);
+ }
+
+ // Don't add this case to the switch statement twice.
+ if (!CasesAdded.insert(Fixup.Destination).second)
+ continue;
+
+ Switch->addCase(CGF.Builder.getInt32(Fixup.DestinationIndex),
+ Fixup.Destination);
+ }
+
+ CGF.EHStack.clearFixups();
+}
+
+/// Transitions the terminator of the given exit-block of a cleanup to
+/// be a cleanup switch.
+static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF,
+ llvm::BasicBlock *Block) {
+ // If it's a branch, turn it into a switch whose default
+ // destination is its original target.
+ llvm::TerminatorInst *Term = Block->getTerminator();
+ assert(Term && "can't transition block without terminator");
+
+ if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
+ assert(Br->isUnconditional());
+ auto Load = createLoadInstBefore(CGF.getNormalCleanupDestSlot(),
+ "cleanup.dest", Term);
+ llvm::SwitchInst *Switch =
+ llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block);
+ Br->eraseFromParent();
+ return Switch;
+ } else {
+ return cast<llvm::SwitchInst>(Term);
+ }
+}
+
+void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) {
+ assert(Block && "resolving a null target block");
+ if (!EHStack.getNumBranchFixups()) return;
+
+ assert(EHStack.hasNormalCleanups() &&
+ "branch fixups exist with no normal cleanups on stack");
+
+ llvm::SmallPtrSet<llvm::BasicBlock*, 4> ModifiedOptimisticBlocks;
+ bool ResolvedAny = false;
+
+ for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) {
+ // Skip this fixup if its destination doesn't match.
+ BranchFixup &Fixup = EHStack.getBranchFixup(I);
+ if (Fixup.Destination != Block) continue;
+
+ Fixup.Destination = nullptr;
+ ResolvedAny = true;
+
+ // If it doesn't have an optimistic branch block, LatestBranch is
+ // already pointing to the right place.
+ llvm::BasicBlock *BranchBB = Fixup.OptimisticBranchBlock;
+ if (!BranchBB)
+ continue;
+
+ // Don't process the same optimistic branch block twice.
+ if (!ModifiedOptimisticBlocks.insert(BranchBB).second)
+ continue;
+
+ llvm::SwitchInst *Switch = TransitionToCleanupSwitch(*this, BranchBB);
+
+ // Add a case to the switch.
+ Switch->addCase(Builder.getInt32(Fixup.DestinationIndex), Block);
+ }
+
+ if (ResolvedAny)
+ EHStack.popNullFixups();
+}
+
+/// Pops cleanup blocks until the given savepoint is reached.
+void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) {
+ assert(Old.isValid());
+
+ while (EHStack.stable_begin() != Old) {
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
+
+ // As long as Old strictly encloses the scope's enclosing normal
+ // cleanup, we're going to emit another normal cleanup which
+ // fallthrough can propagate through.
+ bool FallThroughIsBranchThrough =
+ Old.strictlyEncloses(Scope.getEnclosingNormalCleanup());
+
+ PopCleanupBlock(FallThroughIsBranchThrough);
+ }
+}
+
+/// Pops cleanup blocks until the given savepoint is reached, then add the
+/// cleanups from the given savepoint in the lifetime-extended cleanups stack.
+void
+CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old,
+ size_t OldLifetimeExtendedSize) {
+ PopCleanupBlocks(Old);
+
+ // Move our deferred cleanups onto the EH stack.
+ for (size_t I = OldLifetimeExtendedSize,
+ E = LifetimeExtendedCleanupStack.size(); I != E; /**/) {
+ // Alignment should be guaranteed by the vptrs in the individual cleanups.
+ assert((I % llvm::alignOf<LifetimeExtendedCleanupHeader>() == 0) &&
+ "misaligned cleanup stack entry");
+
+ LifetimeExtendedCleanupHeader &Header =
+ reinterpret_cast<LifetimeExtendedCleanupHeader&>(
+ LifetimeExtendedCleanupStack[I]);
+ I += sizeof(Header);
+
+ EHStack.pushCopyOfCleanup(Header.getKind(),
+ &LifetimeExtendedCleanupStack[I],
+ Header.getSize());
+ I += Header.getSize();
+ }
+ LifetimeExtendedCleanupStack.resize(OldLifetimeExtendedSize);
+}
+
+static llvm::BasicBlock *CreateNormalEntry(CodeGenFunction &CGF,
+ EHCleanupScope &Scope) {
+ assert(Scope.isNormalCleanup());
+ llvm::BasicBlock *Entry = Scope.getNormalBlock();
+ if (!Entry) {
+ Entry = CGF.createBasicBlock("cleanup");
+ Scope.setNormalBlock(Entry);
+ }
+ return Entry;
+}
+
+/// Attempts to reduce a cleanup's entry block to a fallthrough. This
+/// is basically llvm::MergeBlockIntoPredecessor, except
+/// simplified/optimized for the tighter constraints on cleanup blocks.
+///
+/// Returns the new block, whatever it is.
+static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF,
+ llvm::BasicBlock *Entry) {
+ llvm::BasicBlock *Pred = Entry->getSinglePredecessor();
+ if (!Pred) return Entry;
+
+ llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator());
+ if (!Br || Br->isConditional()) return Entry;
+ assert(Br->getSuccessor(0) == Entry);
+
+ // If we were previously inserting at the end of the cleanup entry
+ // block, we'll need to continue inserting at the end of the
+ // predecessor.
+ bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry;
+ assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end());
+
+ // Kill the branch.
+ Br->eraseFromParent();
+
+ // Replace all uses of the entry with the predecessor, in case there
+ // are phis in the cleanup.
+ Entry->replaceAllUsesWith(Pred);
+
+ // Merge the blocks.
+ Pred->getInstList().splice(Pred->end(), Entry->getInstList());
+
+ // Kill the entry block.
+ Entry->eraseFromParent();
+
+ if (WasInsertBlock)
+ CGF.Builder.SetInsertPoint(Pred);
+
+ return Pred;
+}
+
+static void EmitCleanup(CodeGenFunction &CGF,
+ EHScopeStack::Cleanup *Fn,
+ EHScopeStack::Cleanup::Flags flags,
+ Address ActiveFlag) {
+ // If there's an active flag, load it and skip the cleanup if it's
+ // false.
+ llvm::BasicBlock *ContBB = nullptr;
+ if (ActiveFlag.isValid()) {
+ ContBB = CGF.createBasicBlock("cleanup.done");
+ llvm::BasicBlock *CleanupBB = CGF.createBasicBlock("cleanup.action");
+ llvm::Value *IsActive
+ = CGF.Builder.CreateLoad(ActiveFlag, "cleanup.is_active");
+ CGF.Builder.CreateCondBr(IsActive, CleanupBB, ContBB);
+ CGF.EmitBlock(CleanupBB);
+ }
+
+ // Ask the cleanup to emit itself.
+ Fn->Emit(CGF, flags);
+ assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?");
+
+ // Emit the continuation block if there was an active flag.
+ if (ActiveFlag.isValid())
+ CGF.EmitBlock(ContBB);
+}
+
+static void ForwardPrebranchedFallthrough(llvm::BasicBlock *Exit,
+ llvm::BasicBlock *From,
+ llvm::BasicBlock *To) {
+ // Exit is the exit block of a cleanup, so it always terminates in
+ // an unconditional branch or a switch.
+ llvm::TerminatorInst *Term = Exit->getTerminator();
+
+ if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
+ assert(Br->isUnconditional() && Br->getSuccessor(0) == From);
+ Br->setSuccessor(0, To);
+ } else {
+ llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Term);
+ for (unsigned I = 0, E = Switch->getNumSuccessors(); I != E; ++I)
+ if (Switch->getSuccessor(I) == From)
+ Switch->setSuccessor(I, To);
+ }
+}
+
+/// We don't need a normal entry block for the given cleanup.
+/// Optimistic fixup branches can cause these blocks to come into
+/// existence anyway; if so, destroy it.
+///
+/// The validity of this transformation is very much specific to the
+/// exact ways in which we form branches to cleanup entries.
+static void destroyOptimisticNormalEntry(CodeGenFunction &CGF,
+ EHCleanupScope &scope) {
+ llvm::BasicBlock *entry = scope.getNormalBlock();
+ if (!entry) return;
+
+ // Replace all the uses with unreachable.
+ llvm::BasicBlock *unreachableBB = CGF.getUnreachableBlock();
+ for (llvm::BasicBlock::use_iterator
+ i = entry->use_begin(), e = entry->use_end(); i != e; ) {
+ llvm::Use &use = *i;
+ ++i;
+
+ use.set(unreachableBB);
+
+ // The only uses should be fixup switches.
+ llvm::SwitchInst *si = cast<llvm::SwitchInst>(use.getUser());
+ if (si->getNumCases() == 1 && si->getDefaultDest() == unreachableBB) {
+ // Replace the switch with a branch.
+ llvm::BranchInst::Create(si->case_begin().getCaseSuccessor(), si);
+
+ // The switch operand is a load from the cleanup-dest alloca.
+ llvm::LoadInst *condition = cast<llvm::LoadInst>(si->getCondition());
+
+ // Destroy the switch.
+ si->eraseFromParent();
+
+ // Destroy the load.
+ assert(condition->getOperand(0) == CGF.NormalCleanupDest);
+ assert(condition->use_empty());
+ condition->eraseFromParent();
+ }
+ }
+
+ assert(entry->use_empty());
+ delete entry;
+}
+
+/// Pops a cleanup block. If the block includes a normal cleanup, the
+/// current insertion point is threaded through the cleanup, as are
+/// any branch fixups on the cleanup.
+void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
+ assert(!EHStack.empty() && "cleanup stack is empty!");
+ assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!");
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
+ assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups());
+
+ // Remember activation information.
+ bool IsActive = Scope.isActive();
+ Address NormalActiveFlag =
+ Scope.shouldTestFlagInNormalCleanup() ? Scope.getActiveFlag()
+ : Address::invalid();
+ Address EHActiveFlag =
+ Scope.shouldTestFlagInEHCleanup() ? Scope.getActiveFlag()
+ : Address::invalid();
+
+ // Check whether we need an EH cleanup. This is only true if we've
+ // generated a lazy EH cleanup block.
+ llvm::BasicBlock *EHEntry = Scope.getCachedEHDispatchBlock();
+ assert(Scope.hasEHBranches() == (EHEntry != nullptr));
+ bool RequiresEHCleanup = (EHEntry != nullptr);
+ EHScopeStack::stable_iterator EHParent = Scope.getEnclosingEHScope();
+
+ // Check the three conditions which might require a normal cleanup:
+
+ // - whether there are branch fix-ups through this cleanup
+ unsigned FixupDepth = Scope.getFixupDepth();
+ bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth;
+
+ // - whether there are branch-throughs or branch-afters
+ bool HasExistingBranches = Scope.hasBranches();
+
+ // - whether there's a fallthrough
+ llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock();
+ bool HasFallthrough = (FallthroughSource != nullptr && IsActive);
+
+ // Branch-through fall-throughs leave the insertion point set to the
+ // end of the last cleanup, which points to the current scope. The
+ // rest of IR gen doesn't need to worry about this; it only happens
+ // during the execution of PopCleanupBlocks().
+ bool HasPrebranchedFallthrough =
+ (FallthroughSource && FallthroughSource->getTerminator());
+
+ // If this is a normal cleanup, then having a prebranched
+ // fallthrough implies that the fallthrough source unconditionally
+ // jumps here.
+ assert(!Scope.isNormalCleanup() || !HasPrebranchedFallthrough ||
+ (Scope.getNormalBlock() &&
+ FallthroughSource->getTerminator()->getSuccessor(0)
+ == Scope.getNormalBlock()));
+
+ bool RequiresNormalCleanup = false;
+ if (Scope.isNormalCleanup() &&
+ (HasFixups || HasExistingBranches || HasFallthrough)) {
+ RequiresNormalCleanup = true;
+ }
+
+ // If we have a prebranched fallthrough into an inactive normal
+ // cleanup, rewrite it so that it leads to the appropriate place.
+ if (Scope.isNormalCleanup() && HasPrebranchedFallthrough && !IsActive) {
+ llvm::BasicBlock *prebranchDest;
+
+ // If the prebranch is semantically branching through the next
+ // cleanup, just forward it to the next block, leaving the
+ // insertion point in the prebranched block.
+ if (FallthroughIsBranchThrough) {
+ EHScope &enclosing = *EHStack.find(Scope.getEnclosingNormalCleanup());
+ prebranchDest = CreateNormalEntry(*this, cast<EHCleanupScope>(enclosing));
+
+ // Otherwise, we need to make a new block. If the normal cleanup
+ // isn't being used at all, we could actually reuse the normal
+ // entry block, but this is simpler, and it avoids conflicts with
+ // dead optimistic fixup branches.
+ } else {
+ prebranchDest = createBasicBlock("forwarded-prebranch");
+ EmitBlock(prebranchDest);
+ }
+
+ llvm::BasicBlock *normalEntry = Scope.getNormalBlock();
+ assert(normalEntry && !normalEntry->use_empty());
+
+ ForwardPrebranchedFallthrough(FallthroughSource,
+ normalEntry, prebranchDest);
+ }
+
+ // If we don't need the cleanup at all, we're done.
+ if (!RequiresNormalCleanup && !RequiresEHCleanup) {
+ destroyOptimisticNormalEntry(*this, Scope);
+ EHStack.popCleanup(); // safe because there are no fixups
+ assert(EHStack.getNumBranchFixups() == 0 ||
+ EHStack.hasNormalCleanups());
+ return;
+ }
+
+ // Copy the cleanup emission data out. This uses either a stack
+ // array or malloc'd memory, depending on the size, which is
+ // behavior that SmallVector would provide, if we could use it
+ // here. Unfortunately, if you ask for a SmallVector<char>, the
+ // alignment isn't sufficient.
+ auto *CleanupSource = reinterpret_cast<char *>(Scope.getCleanupBuffer());
+ llvm::AlignedCharArray<EHScopeStack::ScopeStackAlignment, 8 * sizeof(void *)> CleanupBufferStack;
+ std::unique_ptr<char[]> CleanupBufferHeap;
+ size_t CleanupSize = Scope.getCleanupSize();
+ EHScopeStack::Cleanup *Fn;
+
+ if (CleanupSize <= sizeof(CleanupBufferStack)) {
+ memcpy(CleanupBufferStack.buffer, CleanupSource, CleanupSize);
+ Fn = reinterpret_cast<EHScopeStack::Cleanup *>(CleanupBufferStack.buffer);
+ } else {
+ CleanupBufferHeap.reset(new char[CleanupSize]);
+ memcpy(CleanupBufferHeap.get(), CleanupSource, CleanupSize);
+ Fn = reinterpret_cast<EHScopeStack::Cleanup *>(CleanupBufferHeap.get());
+ }
+
+ EHScopeStack::Cleanup::Flags cleanupFlags;
+ if (Scope.isNormalCleanup())
+ cleanupFlags.setIsNormalCleanupKind();
+ if (Scope.isEHCleanup())
+ cleanupFlags.setIsEHCleanupKind();
+
+ if (!RequiresNormalCleanup) {
+ destroyOptimisticNormalEntry(*this, Scope);
+ EHStack.popCleanup();
+ } else {
+ // If we have a fallthrough and no other need for the cleanup,
+ // emit it directly.
+ if (HasFallthrough && !HasPrebranchedFallthrough &&
+ !HasFixups && !HasExistingBranches) {
+
+ destroyOptimisticNormalEntry(*this, Scope);
+ EHStack.popCleanup();
+
+ EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag);
+
+ // Otherwise, the best approach is to thread everything through
+ // the cleanup block and then try to clean up after ourselves.
+ } else {
+ // Force the entry block to exist.
+ llvm::BasicBlock *NormalEntry = CreateNormalEntry(*this, Scope);
+
+ // I. Set up the fallthrough edge in.
+
+ CGBuilderTy::InsertPoint savedInactiveFallthroughIP;
+
+ // If there's a fallthrough, we need to store the cleanup
+ // destination index. For fall-throughs this is always zero.
+ if (HasFallthrough) {
+ if (!HasPrebranchedFallthrough)
+ Builder.CreateStore(Builder.getInt32(0), getNormalCleanupDestSlot());
+
+ // Otherwise, save and clear the IP if we don't have fallthrough
+ // because the cleanup is inactive.
+ } else if (FallthroughSource) {
+ assert(!IsActive && "source without fallthrough for active cleanup");
+ savedInactiveFallthroughIP = Builder.saveAndClearIP();
+ }
+
+ // II. Emit the entry block. This implicitly branches to it if
+ // we have fallthrough. All the fixups and existing branches
+ // should already be branched to it.
+ EmitBlock(NormalEntry);
+
+ // III. Figure out where we're going and build the cleanup
+ // epilogue.
+
+ bool HasEnclosingCleanups =
+ (Scope.getEnclosingNormalCleanup() != EHStack.stable_end());
+
+ // Compute the branch-through dest if we need it:
+ // - if there are branch-throughs threaded through the scope
+ // - if fall-through is a branch-through
+ // - if there are fixups that will be optimistically forwarded
+ // to the enclosing cleanup
+ llvm::BasicBlock *BranchThroughDest = nullptr;
+ if (Scope.hasBranchThroughs() ||
+ (FallthroughSource && FallthroughIsBranchThrough) ||
+ (HasFixups && HasEnclosingCleanups)) {
+ assert(HasEnclosingCleanups);
+ EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup());
+ BranchThroughDest = CreateNormalEntry(*this, cast<EHCleanupScope>(S));
+ }
+
+ llvm::BasicBlock *FallthroughDest = nullptr;
+ SmallVector<llvm::Instruction*, 2> InstsToAppend;
+
+ // If there's exactly one branch-after and no other threads,
+ // we can route it without a switch.
+ if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough &&
+ Scope.getNumBranchAfters() == 1) {
+ assert(!BranchThroughDest || !IsActive);
+
+ // Clean up the possibly dead store to the cleanup dest slot.
+ llvm::Instruction *NormalCleanupDestSlot =
+ cast<llvm::Instruction>(getNormalCleanupDestSlot().getPointer());
+ if (NormalCleanupDestSlot->hasOneUse()) {
+ NormalCleanupDestSlot->user_back()->eraseFromParent();
+ NormalCleanupDestSlot->eraseFromParent();
+ NormalCleanupDest = nullptr;
+ }
+
+ llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0);
+ InstsToAppend.push_back(llvm::BranchInst::Create(BranchAfter));
+
+ // Build a switch-out if we need it:
+ // - if there are branch-afters threaded through the scope
+ // - if fall-through is a branch-after
+ // - if there are fixups that have nowhere left to go and
+ // so must be immediately resolved
+ } else if (Scope.getNumBranchAfters() ||
+ (HasFallthrough && !FallthroughIsBranchThrough) ||
+ (HasFixups && !HasEnclosingCleanups)) {
+
+ llvm::BasicBlock *Default =
+ (BranchThroughDest ? BranchThroughDest : getUnreachableBlock());
+
+ // TODO: base this on the number of branch-afters and fixups
+ const unsigned SwitchCapacity = 10;
+
+ llvm::LoadInst *Load =
+ createLoadInstBefore(getNormalCleanupDestSlot(), "cleanup.dest",
+ nullptr);
+ llvm::SwitchInst *Switch =
+ llvm::SwitchInst::Create(Load, Default, SwitchCapacity);
+
+ InstsToAppend.push_back(Load);
+ InstsToAppend.push_back(Switch);
+
+ // Branch-after fallthrough.
+ if (FallthroughSource && !FallthroughIsBranchThrough) {
+ FallthroughDest = createBasicBlock("cleanup.cont");
+ if (HasFallthrough)
+ Switch->addCase(Builder.getInt32(0), FallthroughDest);
+ }
+
+ for (unsigned I = 0, E = Scope.getNumBranchAfters(); I != E; ++I) {
+ Switch->addCase(Scope.getBranchAfterIndex(I),
+ Scope.getBranchAfterBlock(I));
+ }
+
+ // If there aren't any enclosing cleanups, we can resolve all
+ // the fixups now.
+ if (HasFixups && !HasEnclosingCleanups)
+ ResolveAllBranchFixups(*this, Switch, NormalEntry);
+ } else {
+ // We should always have a branch-through destination in this case.
+ assert(BranchThroughDest);
+ InstsToAppend.push_back(llvm::BranchInst::Create(BranchThroughDest));
+ }
+
+ // IV. Pop the cleanup and emit it.
+ EHStack.popCleanup();
+ assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups);
+
+ EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag);
+
+ // Append the prepared cleanup prologue from above.
+ llvm::BasicBlock *NormalExit = Builder.GetInsertBlock();
+ for (unsigned I = 0, E = InstsToAppend.size(); I != E; ++I)
+ NormalExit->getInstList().push_back(InstsToAppend[I]);
+
+ // Optimistically hope that any fixups will continue falling through.
+ for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
+ I < E; ++I) {
+ BranchFixup &Fixup = EHStack.getBranchFixup(I);
+ if (!Fixup.Destination) continue;
+ if (!Fixup.OptimisticBranchBlock) {
+ createStoreInstBefore(Builder.getInt32(Fixup.DestinationIndex),
+ getNormalCleanupDestSlot(),
+ Fixup.InitialBranch);
+ Fixup.InitialBranch->setSuccessor(0, NormalEntry);
+ }
+ Fixup.OptimisticBranchBlock = NormalExit;
+ }
+
+ // V. Set up the fallthrough edge out.
+
+ // Case 1: a fallthrough source exists but doesn't branch to the
+ // cleanup because the cleanup is inactive.
+ if (!HasFallthrough && FallthroughSource) {
+ // Prebranched fallthrough was forwarded earlier.
+ // Non-prebranched fallthrough doesn't need to be forwarded.
+ // Either way, all we need to do is restore the IP we cleared before.
+ assert(!IsActive);
+ Builder.restoreIP(savedInactiveFallthroughIP);
+
+ // Case 2: a fallthrough source exists and should branch to the
+ // cleanup, but we're not supposed to branch through to the next
+ // cleanup.
+ } else if (HasFallthrough && FallthroughDest) {
+ assert(!FallthroughIsBranchThrough);
+ EmitBlock(FallthroughDest);
+
+ // Case 3: a fallthrough source exists and should branch to the
+ // cleanup and then through to the next.
+ } else if (HasFallthrough) {
+ // Everything is already set up for this.
+
+ // Case 4: no fallthrough source exists.
+ } else {
+ Builder.ClearInsertionPoint();
+ }
+
+ // VI. Assorted cleaning.
+
+ // Check whether we can merge NormalEntry into a single predecessor.
+ // This might invalidate (non-IR) pointers to NormalEntry.
+ llvm::BasicBlock *NewNormalEntry =
+ SimplifyCleanupEntry(*this, NormalEntry);
+
+ // If it did invalidate those pointers, and NormalEntry was the same
+ // as NormalExit, go back and patch up the fixups.
+ if (NewNormalEntry != NormalEntry && NormalEntry == NormalExit)
+ for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
+ I < E; ++I)
+ EHStack.getBranchFixup(I).OptimisticBranchBlock = NewNormalEntry;
+ }
+ }
+
+ assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0);
+
+ // Emit the EH cleanup if required.
+ if (RequiresEHCleanup) {
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+
+ EmitBlock(EHEntry);
+
+ llvm::BasicBlock *NextAction = getEHDispatchBlock(EHParent);
+
+ // Push a terminate scope or cleanupendpad scope around the potentially
+ // throwing cleanups. For funclet EH personalities, the cleanupendpad models
+ // program termination when cleanups throw.
+ bool PushedTerminate = false;
+ SaveAndRestore<llvm::Instruction *> RestoreCurrentFuncletPad(
+ CurrentFuncletPad);
+ llvm::CleanupPadInst *CPI = nullptr;
+ if (!EHPersonality::get(*this).usesFuncletPads()) {
+ EHStack.pushTerminate();
+ PushedTerminate = true;
+ } else {
+ llvm::Value *ParentPad = CurrentFuncletPad;
+ if (!ParentPad)
+ ParentPad = llvm::ConstantTokenNone::get(CGM.getLLVMContext());
+ CurrentFuncletPad = CPI = Builder.CreateCleanupPad(ParentPad);
+ }
+
+ // We only actually emit the cleanup code if the cleanup is either
+ // active or was used before it was deactivated.
+ if (EHActiveFlag.isValid() || IsActive) {
+ cleanupFlags.setIsForEHCleanup();
+ EmitCleanup(*this, Fn, cleanupFlags, EHActiveFlag);
+ }
+
+ if (CPI)
+ Builder.CreateCleanupRet(CPI, NextAction);
+ else
+ Builder.CreateBr(NextAction);
+
+ // Leave the terminate scope.
+ if (PushedTerminate)
+ EHStack.popTerminate();
+
+ Builder.restoreIP(SavedIP);
+
+ SimplifyCleanupEntry(*this, EHEntry);
+ }
+}
+
+/// isObviouslyBranchWithoutCleanups - Return true if a branch to the
+/// specified destination obviously has no cleanups to run. 'false' is always
+/// a conservatively correct answer for this method.
+bool CodeGenFunction::isObviouslyBranchWithoutCleanups(JumpDest Dest) const {
+ assert(Dest.getScopeDepth().encloses(EHStack.stable_begin())
+ && "stale jump destination");
+
+ // Calculate the innermost active normal cleanup.
+ EHScopeStack::stable_iterator TopCleanup =
+ EHStack.getInnermostActiveNormalCleanup();
+
+ // If we're not in an active normal cleanup scope, or if the
+ // destination scope is within the innermost active normal cleanup
+ // scope, we don't need to worry about fixups.
+ if (TopCleanup == EHStack.stable_end() ||
+ TopCleanup.encloses(Dest.getScopeDepth())) // works for invalid
+ return true;
+
+ // Otherwise, we might need some cleanups.
+ return false;
+}
+
+
+/// Terminate the current block by emitting a branch which might leave
+/// the current cleanup-protected scope. The target scope may not yet
+/// be known, in which case this will require a fixup.
+///
+/// As a side-effect, this method clears the insertion point.
+void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) {
+ assert(Dest.getScopeDepth().encloses(EHStack.stable_begin())
+ && "stale jump destination");
+
+ if (!HaveInsertPoint())
+ return;
+
+ // Create the branch.
+ llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock());
+
+ // Calculate the innermost active normal cleanup.
+ EHScopeStack::stable_iterator
+ TopCleanup = EHStack.getInnermostActiveNormalCleanup();
+
+ // If we're not in an active normal cleanup scope, or if the
+ // destination scope is within the innermost active normal cleanup
+ // scope, we don't need to worry about fixups.
+ if (TopCleanup == EHStack.stable_end() ||
+ TopCleanup.encloses(Dest.getScopeDepth())) { // works for invalid
+ Builder.ClearInsertionPoint();
+ return;
+ }
+
+ // If we can't resolve the destination cleanup scope, just add this
+ // to the current cleanup scope as a branch fixup.
+ if (!Dest.getScopeDepth().isValid()) {
+ BranchFixup &Fixup = EHStack.addBranchFixup();
+ Fixup.Destination = Dest.getBlock();
+ Fixup.DestinationIndex = Dest.getDestIndex();
+ Fixup.InitialBranch = BI;
+ Fixup.OptimisticBranchBlock = nullptr;
+
+ Builder.ClearInsertionPoint();
+ return;
+ }
+
+ // Otherwise, thread through all the normal cleanups in scope.
+
+ // Store the index at the start.
+ llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex());
+ createStoreInstBefore(Index, getNormalCleanupDestSlot(), BI);
+
+ // Adjust BI to point to the first cleanup block.
+ {
+ EHCleanupScope &Scope =
+ cast<EHCleanupScope>(*EHStack.find(TopCleanup));
+ BI->setSuccessor(0, CreateNormalEntry(*this, Scope));
+ }
+
+ // Add this destination to all the scopes involved.
+ EHScopeStack::stable_iterator I = TopCleanup;
+ EHScopeStack::stable_iterator E = Dest.getScopeDepth();
+ if (E.strictlyEncloses(I)) {
+ while (true) {
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I));
+ assert(Scope.isNormalCleanup());
+ I = Scope.getEnclosingNormalCleanup();
+
+ // If this is the last cleanup we're propagating through, tell it
+ // that there's a resolved jump moving through it.
+ if (!E.strictlyEncloses(I)) {
+ Scope.addBranchAfter(Index, Dest.getBlock());
+ break;
+ }
+
+ // Otherwise, tell the scope that there's a jump propoagating
+ // through it. If this isn't new information, all the rest of
+ // the work has been done before.
+ if (!Scope.addBranchThrough(Dest.getBlock()))
+ break;
+ }
+ }
+
+ Builder.ClearInsertionPoint();
+}
+
+static bool IsUsedAsNormalCleanup(EHScopeStack &EHStack,
+ EHScopeStack::stable_iterator C) {
+ // If we needed a normal block for any reason, that counts.
+ if (cast<EHCleanupScope>(*EHStack.find(C)).getNormalBlock())
+ return true;
+
+ // Check whether any enclosed cleanups were needed.
+ for (EHScopeStack::stable_iterator
+ I = EHStack.getInnermostNormalCleanup();
+ I != C; ) {
+ assert(C.strictlyEncloses(I));
+ EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I));
+ if (S.getNormalBlock()) return true;
+ I = S.getEnclosingNormalCleanup();
+ }
+
+ return false;
+}
+
+static bool IsUsedAsEHCleanup(EHScopeStack &EHStack,
+ EHScopeStack::stable_iterator cleanup) {
+ // If we needed an EH block for any reason, that counts.
+ if (EHStack.find(cleanup)->hasEHBranches())
+ return true;
+
+ // Check whether any enclosed cleanups were needed.
+ for (EHScopeStack::stable_iterator
+ i = EHStack.getInnermostEHScope(); i != cleanup; ) {
+ assert(cleanup.strictlyEncloses(i));
+
+ EHScope &scope = *EHStack.find(i);
+ if (scope.hasEHBranches())
+ return true;
+
+ i = scope.getEnclosingEHScope();
+ }
+
+ return false;
+}
+
+enum ForActivation_t {
+ ForActivation,
+ ForDeactivation
+};
+
+/// The given cleanup block is changing activation state. Configure a
+/// cleanup variable if necessary.
+///
+/// It would be good if we had some way of determining if there were
+/// extra uses *after* the change-over point.
+static void SetupCleanupBlockActivation(CodeGenFunction &CGF,
+ EHScopeStack::stable_iterator C,
+ ForActivation_t kind,
+ llvm::Instruction *dominatingIP) {
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*CGF.EHStack.find(C));
+
+ // We always need the flag if we're activating the cleanup in a
+ // conditional context, because we have to assume that the current
+ // location doesn't necessarily dominate the cleanup's code.
+ bool isActivatedInConditional =
+ (kind == ForActivation && CGF.isInConditionalBranch());
+
+ bool needFlag = false;
+
+ // Calculate whether the cleanup was used:
+
+ // - as a normal cleanup
+ if (Scope.isNormalCleanup() &&
+ (isActivatedInConditional || IsUsedAsNormalCleanup(CGF.EHStack, C))) {
+ Scope.setTestFlagInNormalCleanup();
+ needFlag = true;
+ }
+
+ // - as an EH cleanup
+ if (Scope.isEHCleanup() &&
+ (isActivatedInConditional || IsUsedAsEHCleanup(CGF.EHStack, C))) {
+ Scope.setTestFlagInEHCleanup();
+ needFlag = true;
+ }
+
+ // If it hasn't yet been used as either, we're done.
+ if (!needFlag) return;
+
+ Address var = Scope.getActiveFlag();
+ if (!var.isValid()) {
+ var = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), CharUnits::One(),
+ "cleanup.isactive");
+ Scope.setActiveFlag(var);
+
+ assert(dominatingIP && "no existing variable and no dominating IP!");
+
+ // Initialize to true or false depending on whether it was
+ // active up to this point.
+ llvm::Constant *value = CGF.Builder.getInt1(kind == ForDeactivation);
+
+ // If we're in a conditional block, ignore the dominating IP and
+ // use the outermost conditional branch.
+ if (CGF.isInConditionalBranch()) {
+ CGF.setBeforeOutermostConditional(value, var);
+ } else {
+ createStoreInstBefore(value, var, dominatingIP);
+ }
+ }
+
+ CGF.Builder.CreateStore(CGF.Builder.getInt1(kind == ForActivation), var);
+}
+
+/// Activate a cleanup that was created in an inactivated state.
+void CodeGenFunction::ActivateCleanupBlock(EHScopeStack::stable_iterator C,
+ llvm::Instruction *dominatingIP) {
+ assert(C != EHStack.stable_end() && "activating bottom of stack?");
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
+ assert(!Scope.isActive() && "double activation");
+
+ SetupCleanupBlockActivation(*this, C, ForActivation, dominatingIP);
+
+ Scope.setActive(true);
+}
+
+/// Deactive a cleanup that was created in an active state.
+void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C,
+ llvm::Instruction *dominatingIP) {
+ assert(C != EHStack.stable_end() && "deactivating bottom of stack?");
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
+ assert(Scope.isActive() && "double deactivation");
+
+ // If it's the top of the stack, just pop it.
+ if (C == EHStack.stable_begin()) {
+ // If it's a normal cleanup, we need to pretend that the
+ // fallthrough is unreachable.
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+ PopCleanupBlock();
+ Builder.restoreIP(SavedIP);
+ return;
+ }
+
+ // Otherwise, follow the general case.
+ SetupCleanupBlockActivation(*this, C, ForDeactivation, dominatingIP);
+
+ Scope.setActive(false);
+}
+
+Address CodeGenFunction::getNormalCleanupDestSlot() {
+ if (!NormalCleanupDest)
+ NormalCleanupDest =
+ CreateTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot");
+ return Address(NormalCleanupDest, CharUnits::fromQuantity(4));
+}
+
+/// Emits all the code to cause the given temporary to be cleaned up.
+void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary,
+ QualType TempType,
+ Address Ptr) {
+ pushDestroy(NormalAndEHCleanup, Ptr, TempType, destroyCXXObject,
+ /*useEHCleanup*/ true);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.h
new file mode 100644
index 0000000..909f00b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.h
@@ -0,0 +1,642 @@
+//===-- CGCleanup.h - Classes for cleanups IR generation --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes support the generation of LLVM IR for cleanups.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGCLEANUP_H
+#define LLVM_CLANG_LIB_CODEGEN_CGCLEANUP_H
+
+#include "EHScopeStack.h"
+
+#include "Address.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace llvm {
+class BasicBlock;
+class Value;
+class ConstantInt;
+class AllocaInst;
+}
+
+namespace clang {
+class FunctionDecl;
+namespace CodeGen {
+class CodeGenModule;
+class CodeGenFunction;
+
+/// The MS C++ ABI needs a pointer to RTTI data plus some flags to describe the
+/// type of a catch handler, so we use this wrapper.
+struct CatchTypeInfo {
+ llvm::Constant *RTTI;
+ unsigned Flags;
+};
+
+/// A protected scope for zero-cost EH handling.
+class EHScope {
+ llvm::BasicBlock *CachedLandingPad;
+ llvm::BasicBlock *CachedEHDispatchBlock;
+
+ EHScopeStack::stable_iterator EnclosingEHScope;
+
+ class CommonBitFields {
+ friend class EHScope;
+ unsigned Kind : 3;
+ };
+ enum { NumCommonBits = 3 };
+
+protected:
+ class CatchBitFields {
+ friend class EHCatchScope;
+ unsigned : NumCommonBits;
+
+ unsigned NumHandlers : 32 - NumCommonBits;
+ };
+
+ class CleanupBitFields {
+ friend class EHCleanupScope;
+ unsigned : NumCommonBits;
+
+ /// Whether this cleanup needs to be run along normal edges.
+ unsigned IsNormalCleanup : 1;
+
+ /// Whether this cleanup needs to be run along exception edges.
+ unsigned IsEHCleanup : 1;
+
+ /// Whether this cleanup is currently active.
+ unsigned IsActive : 1;
+
+ /// Whether this cleanup is a lifetime marker
+ unsigned IsLifetimeMarker : 1;
+
+ /// Whether the normal cleanup should test the activation flag.
+ unsigned TestFlagInNormalCleanup : 1;
+
+ /// Whether the EH cleanup should test the activation flag.
+ unsigned TestFlagInEHCleanup : 1;
+
+ /// The amount of extra storage needed by the Cleanup.
+ /// Always a multiple of the scope-stack alignment.
+ unsigned CleanupSize : 12;
+
+ /// The number of fixups required by enclosing scopes (not including
+ /// this one). If this is the top cleanup scope, all the fixups
+ /// from this index onwards belong to this scope.
+ unsigned FixupDepth : 32 - 18 - NumCommonBits; // currently 12
+ };
+
+ class FilterBitFields {
+ friend class EHFilterScope;
+ unsigned : NumCommonBits;
+
+ unsigned NumFilters : 32 - NumCommonBits;
+ };
+
+ union {
+ CommonBitFields CommonBits;
+ CatchBitFields CatchBits;
+ CleanupBitFields CleanupBits;
+ FilterBitFields FilterBits;
+ };
+
+public:
+ enum Kind { Cleanup, Catch, Terminate, Filter, PadEnd };
+
+ EHScope(Kind kind, EHScopeStack::stable_iterator enclosingEHScope)
+ : CachedLandingPad(nullptr), CachedEHDispatchBlock(nullptr),
+ EnclosingEHScope(enclosingEHScope) {
+ CommonBits.Kind = kind;
+ }
+
+ Kind getKind() const { return static_cast<Kind>(CommonBits.Kind); }
+
+ llvm::BasicBlock *getCachedLandingPad() const {
+ return CachedLandingPad;
+ }
+
+ void setCachedLandingPad(llvm::BasicBlock *block) {
+ CachedLandingPad = block;
+ }
+
+ llvm::BasicBlock *getCachedEHDispatchBlock() const {
+ return CachedEHDispatchBlock;
+ }
+
+ void setCachedEHDispatchBlock(llvm::BasicBlock *block) {
+ CachedEHDispatchBlock = block;
+ }
+
+ bool hasEHBranches() const {
+ if (llvm::BasicBlock *block = getCachedEHDispatchBlock())
+ return !block->use_empty();
+ return false;
+ }
+
+ EHScopeStack::stable_iterator getEnclosingEHScope() const {
+ return EnclosingEHScope;
+ }
+};
+
+/// A scope which attempts to handle some, possibly all, types of
+/// exceptions.
+///
+/// Objective C \@finally blocks are represented using a cleanup scope
+/// after the catch scope.
+class EHCatchScope : public EHScope {
+ // In effect, we have a flexible array member
+ // Handler Handlers[0];
+ // But that's only standard in C99, not C++, so we have to do
+ // annoying pointer arithmetic instead.
+
+public:
+ struct Handler {
+ /// A type info value, or null (C++ null, not an LLVM null pointer)
+ /// for a catch-all.
+ CatchTypeInfo Type;
+
+ /// The catch handler for this type.
+ llvm::BasicBlock *Block;
+
+ bool isCatchAll() const { return Type.RTTI == nullptr; }
+ };
+
+private:
+ friend class EHScopeStack;
+
+ Handler *getHandlers() {
+ return reinterpret_cast<Handler*>(this+1);
+ }
+
+ const Handler *getHandlers() const {
+ return reinterpret_cast<const Handler*>(this+1);
+ }
+
+public:
+ static size_t getSizeForNumHandlers(unsigned N) {
+ return sizeof(EHCatchScope) + N * sizeof(Handler);
+ }
+
+ EHCatchScope(unsigned numHandlers,
+ EHScopeStack::stable_iterator enclosingEHScope)
+ : EHScope(Catch, enclosingEHScope) {
+ CatchBits.NumHandlers = numHandlers;
+ }
+
+ unsigned getNumHandlers() const {
+ return CatchBits.NumHandlers;
+ }
+
+ void setCatchAllHandler(unsigned I, llvm::BasicBlock *Block) {
+ setHandler(I, CatchTypeInfo{nullptr, 0}, Block);
+ }
+
+ void setHandler(unsigned I, llvm::Constant *Type, llvm::BasicBlock *Block) {
+ assert(I < getNumHandlers());
+ getHandlers()[I].Type = CatchTypeInfo{Type, 0};
+ getHandlers()[I].Block = Block;
+ }
+
+ void setHandler(unsigned I, CatchTypeInfo Type, llvm::BasicBlock *Block) {
+ assert(I < getNumHandlers());
+ getHandlers()[I].Type = Type;
+ getHandlers()[I].Block = Block;
+ }
+
+ const Handler &getHandler(unsigned I) const {
+ assert(I < getNumHandlers());
+ return getHandlers()[I];
+ }
+
+ // Clear all handler blocks.
+ // FIXME: it's better to always call clearHandlerBlocks in DTOR and have a
+ // 'takeHandler' or some such function which removes ownership from the
+ // EHCatchScope object if the handlers should live longer than EHCatchScope.
+ void clearHandlerBlocks() {
+ for (unsigned I = 0, N = getNumHandlers(); I != N; ++I)
+ delete getHandler(I).Block;
+ }
+
+ typedef const Handler *iterator;
+ iterator begin() const { return getHandlers(); }
+ iterator end() const { return getHandlers() + getNumHandlers(); }
+
+ static bool classof(const EHScope *Scope) {
+ return Scope->getKind() == Catch;
+ }
+};
+
+/// A cleanup scope which generates the cleanup blocks lazily.
+class LLVM_ALIGNAS(/*alignof(uint64_t)*/ 8) EHCleanupScope : public EHScope {
+ /// The nearest normal cleanup scope enclosing this one.
+ EHScopeStack::stable_iterator EnclosingNormal;
+
+ /// The nearest EH scope enclosing this one.
+ EHScopeStack::stable_iterator EnclosingEH;
+
+ /// The dual entry/exit block along the normal edge. This is lazily
+ /// created if needed before the cleanup is popped.
+ llvm::BasicBlock *NormalBlock;
+
+ /// An optional i1 variable indicating whether this cleanup has been
+ /// activated yet.
+ llvm::AllocaInst *ActiveFlag;
+
+ /// Extra information required for cleanups that have resolved
+ /// branches through them. This has to be allocated on the side
+ /// because everything on the cleanup stack has be trivially
+ /// movable.
+ struct ExtInfo {
+ /// The destinations of normal branch-afters and branch-throughs.
+ llvm::SmallPtrSet<llvm::BasicBlock*, 4> Branches;
+
+ /// Normal branch-afters.
+ SmallVector<std::pair<llvm::BasicBlock*,llvm::ConstantInt*>, 4>
+ BranchAfters;
+ };
+ mutable struct ExtInfo *ExtInfo;
+
+ struct ExtInfo &getExtInfo() {
+ if (!ExtInfo) ExtInfo = new struct ExtInfo();
+ return *ExtInfo;
+ }
+
+ const struct ExtInfo &getExtInfo() const {
+ if (!ExtInfo) ExtInfo = new struct ExtInfo();
+ return *ExtInfo;
+ }
+
+public:
+ /// Gets the size required for a lazy cleanup scope with the given
+ /// cleanup-data requirements.
+ static size_t getSizeForCleanupSize(size_t Size) {
+ return sizeof(EHCleanupScope) + Size;
+ }
+
+ size_t getAllocatedSize() const {
+ return sizeof(EHCleanupScope) + CleanupBits.CleanupSize;
+ }
+
+ EHCleanupScope(bool isNormal, bool isEH, bool isActive,
+ unsigned cleanupSize, unsigned fixupDepth,
+ EHScopeStack::stable_iterator enclosingNormal,
+ EHScopeStack::stable_iterator enclosingEH)
+ : EHScope(EHScope::Cleanup, enclosingEH), EnclosingNormal(enclosingNormal),
+ NormalBlock(nullptr), ActiveFlag(nullptr), ExtInfo(nullptr) {
+ CleanupBits.IsNormalCleanup = isNormal;
+ CleanupBits.IsEHCleanup = isEH;
+ CleanupBits.IsActive = isActive;
+ CleanupBits.IsLifetimeMarker = false;
+ CleanupBits.TestFlagInNormalCleanup = false;
+ CleanupBits.TestFlagInEHCleanup = false;
+ CleanupBits.CleanupSize = cleanupSize;
+ CleanupBits.FixupDepth = fixupDepth;
+
+ assert(CleanupBits.CleanupSize == cleanupSize && "cleanup size overflow");
+ }
+
+ void Destroy() {
+ delete ExtInfo;
+ }
+ // Objects of EHCleanupScope are not destructed. Use Destroy().
+ ~EHCleanupScope() = delete;
+
+ bool isNormalCleanup() const { return CleanupBits.IsNormalCleanup; }
+ llvm::BasicBlock *getNormalBlock() const { return NormalBlock; }
+ void setNormalBlock(llvm::BasicBlock *BB) { NormalBlock = BB; }
+
+ bool isEHCleanup() const { return CleanupBits.IsEHCleanup; }
+
+ bool isActive() const { return CleanupBits.IsActive; }
+ void setActive(bool A) { CleanupBits.IsActive = A; }
+
+ bool isLifetimeMarker() const { return CleanupBits.IsLifetimeMarker; }
+ void setLifetimeMarker() { CleanupBits.IsLifetimeMarker = true; }
+
+ bool hasActiveFlag() const { return ActiveFlag != nullptr; }
+ Address getActiveFlag() const {
+ return Address(ActiveFlag, CharUnits::One());
+ }
+ void setActiveFlag(Address Var) {
+ assert(Var.getAlignment().isOne());
+ ActiveFlag = cast<llvm::AllocaInst>(Var.getPointer());
+ }
+
+ void setTestFlagInNormalCleanup() {
+ CleanupBits.TestFlagInNormalCleanup = true;
+ }
+ bool shouldTestFlagInNormalCleanup() const {
+ return CleanupBits.TestFlagInNormalCleanup;
+ }
+
+ void setTestFlagInEHCleanup() {
+ CleanupBits.TestFlagInEHCleanup = true;
+ }
+ bool shouldTestFlagInEHCleanup() const {
+ return CleanupBits.TestFlagInEHCleanup;
+ }
+
+ unsigned getFixupDepth() const { return CleanupBits.FixupDepth; }
+ EHScopeStack::stable_iterator getEnclosingNormalCleanup() const {
+ return EnclosingNormal;
+ }
+
+ size_t getCleanupSize() const { return CleanupBits.CleanupSize; }
+ void *getCleanupBuffer() { return this + 1; }
+
+ EHScopeStack::Cleanup *getCleanup() {
+ return reinterpret_cast<EHScopeStack::Cleanup*>(getCleanupBuffer());
+ }
+
+ /// True if this cleanup scope has any branch-afters or branch-throughs.
+ bool hasBranches() const { return ExtInfo && !ExtInfo->Branches.empty(); }
+
+ /// Add a branch-after to this cleanup scope. A branch-after is a
+ /// branch from a point protected by this (normal) cleanup to a
+ /// point in the normal cleanup scope immediately containing it.
+ /// For example,
+ /// for (;;) { A a; break; }
+ /// contains a branch-after.
+ ///
+ /// Branch-afters each have their own destination out of the
+ /// cleanup, guaranteed distinct from anything else threaded through
+ /// it. Therefore branch-afters usually force a switch after the
+ /// cleanup.
+ void addBranchAfter(llvm::ConstantInt *Index,
+ llvm::BasicBlock *Block) {
+ struct ExtInfo &ExtInfo = getExtInfo();
+ if (ExtInfo.Branches.insert(Block).second)
+ ExtInfo.BranchAfters.push_back(std::make_pair(Block, Index));
+ }
+
+ /// Return the number of unique branch-afters on this scope.
+ unsigned getNumBranchAfters() const {
+ return ExtInfo ? ExtInfo->BranchAfters.size() : 0;
+ }
+
+ llvm::BasicBlock *getBranchAfterBlock(unsigned I) const {
+ assert(I < getNumBranchAfters());
+ return ExtInfo->BranchAfters[I].first;
+ }
+
+ llvm::ConstantInt *getBranchAfterIndex(unsigned I) const {
+ assert(I < getNumBranchAfters());
+ return ExtInfo->BranchAfters[I].second;
+ }
+
+ /// Add a branch-through to this cleanup scope. A branch-through is
+ /// a branch from a scope protected by this (normal) cleanup to an
+ /// enclosing scope other than the immediately-enclosing normal
+ /// cleanup scope.
+ ///
+ /// In the following example, the branch through B's scope is a
+ /// branch-through, while the branch through A's scope is a
+ /// branch-after:
+ /// for (;;) { A a; B b; break; }
+ ///
+ /// All branch-throughs have a common destination out of the
+ /// cleanup, one possibly shared with the fall-through. Therefore
+ /// branch-throughs usually don't force a switch after the cleanup.
+ ///
+ /// \return true if the branch-through was new to this scope
+ bool addBranchThrough(llvm::BasicBlock *Block) {
+ return getExtInfo().Branches.insert(Block).second;
+ }
+
+ /// Determines if this cleanup scope has any branch throughs.
+ bool hasBranchThroughs() const {
+ if (!ExtInfo) return false;
+ return (ExtInfo->BranchAfters.size() != ExtInfo->Branches.size());
+ }
+
+ static bool classof(const EHScope *Scope) {
+ return (Scope->getKind() == Cleanup);
+ }
+};
+// NOTE: there's a bunch of different data classes tacked on after an
+// EHCleanupScope. It is asserted (in EHScopeStack::pushCleanup*) that
+// they don't require greater alignment than ScopeStackAlignment. So,
+// EHCleanupScope ought to have alignment equal to that -- not more
+// (would be misaligned by the stack allocator), and not less (would
+// break the appended classes).
+static_assert(llvm::AlignOf<EHCleanupScope>::Alignment ==
+ EHScopeStack::ScopeStackAlignment,
+ "EHCleanupScope expected alignment");
+
+/// An exceptions scope which filters exceptions thrown through it.
+/// Only exceptions matching the filter types will be permitted to be
+/// thrown.
+///
+/// This is used to implement C++ exception specifications.
+class EHFilterScope : public EHScope {
+ // Essentially ends in a flexible array member:
+ // llvm::Value *FilterTypes[0];
+
+ llvm::Value **getFilters() {
+ return reinterpret_cast<llvm::Value**>(this+1);
+ }
+
+ llvm::Value * const *getFilters() const {
+ return reinterpret_cast<llvm::Value* const *>(this+1);
+ }
+
+public:
+ EHFilterScope(unsigned numFilters)
+ : EHScope(Filter, EHScopeStack::stable_end()) {
+ FilterBits.NumFilters = numFilters;
+ }
+
+ static size_t getSizeForNumFilters(unsigned numFilters) {
+ return sizeof(EHFilterScope) + numFilters * sizeof(llvm::Value*);
+ }
+
+ unsigned getNumFilters() const { return FilterBits.NumFilters; }
+
+ void setFilter(unsigned i, llvm::Value *filterValue) {
+ assert(i < getNumFilters());
+ getFilters()[i] = filterValue;
+ }
+
+ llvm::Value *getFilter(unsigned i) const {
+ assert(i < getNumFilters());
+ return getFilters()[i];
+ }
+
+ static bool classof(const EHScope *scope) {
+ return scope->getKind() == Filter;
+ }
+};
+
+/// An exceptions scope which calls std::terminate if any exception
+/// reaches it.
+class EHTerminateScope : public EHScope {
+public:
+ EHTerminateScope(EHScopeStack::stable_iterator enclosingEHScope)
+ : EHScope(Terminate, enclosingEHScope) {}
+ static size_t getSize() { return sizeof(EHTerminateScope); }
+
+ static bool classof(const EHScope *scope) {
+ return scope->getKind() == Terminate;
+ }
+};
+
+class EHPadEndScope : public EHScope {
+public:
+ EHPadEndScope(EHScopeStack::stable_iterator enclosingEHScope)
+ : EHScope(PadEnd, enclosingEHScope) {}
+ static size_t getSize() { return sizeof(EHPadEndScope); }
+
+ static bool classof(const EHScope *scope) {
+ return scope->getKind() == PadEnd;
+ }
+};
+
+/// A non-stable pointer into the scope stack.
+class EHScopeStack::iterator {
+ char *Ptr;
+
+ friend class EHScopeStack;
+ explicit iterator(char *Ptr) : Ptr(Ptr) {}
+
+public:
+ iterator() : Ptr(nullptr) {}
+
+ EHScope *get() const {
+ return reinterpret_cast<EHScope*>(Ptr);
+ }
+
+ EHScope *operator->() const { return get(); }
+ EHScope &operator*() const { return *get(); }
+
+ iterator &operator++() {
+ size_t Size;
+ switch (get()->getKind()) {
+ case EHScope::Catch:
+ Size = EHCatchScope::getSizeForNumHandlers(
+ static_cast<const EHCatchScope *>(get())->getNumHandlers());
+ break;
+
+ case EHScope::Filter:
+ Size = EHFilterScope::getSizeForNumFilters(
+ static_cast<const EHFilterScope *>(get())->getNumFilters());
+ break;
+
+ case EHScope::Cleanup:
+ Size = static_cast<const EHCleanupScope *>(get())->getAllocatedSize();
+ break;
+
+ case EHScope::Terminate:
+ Size = EHTerminateScope::getSize();
+ break;
+
+ case EHScope::PadEnd:
+ Size = EHPadEndScope::getSize();
+ break;
+ }
+ Ptr += llvm::RoundUpToAlignment(Size, ScopeStackAlignment);
+ return *this;
+ }
+
+ iterator next() {
+ iterator copy = *this;
+ ++copy;
+ return copy;
+ }
+
+ iterator operator++(int) {
+ iterator copy = *this;
+ operator++();
+ return copy;
+ }
+
+ bool encloses(iterator other) const { return Ptr >= other.Ptr; }
+ bool strictlyEncloses(iterator other) const { return Ptr > other.Ptr; }
+
+ bool operator==(iterator other) const { return Ptr == other.Ptr; }
+ bool operator!=(iterator other) const { return Ptr != other.Ptr; }
+};
+
+inline EHScopeStack::iterator EHScopeStack::begin() const {
+ return iterator(StartOfData);
+}
+
+inline EHScopeStack::iterator EHScopeStack::end() const {
+ return iterator(EndOfBuffer);
+}
+
+inline void EHScopeStack::popCatch() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ EHCatchScope &scope = cast<EHCatchScope>(*begin());
+ InnermostEHScope = scope.getEnclosingEHScope();
+ deallocate(EHCatchScope::getSizeForNumHandlers(scope.getNumHandlers()));
+}
+
+inline void EHScopeStack::popTerminate() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ EHTerminateScope &scope = cast<EHTerminateScope>(*begin());
+ InnermostEHScope = scope.getEnclosingEHScope();
+ deallocate(EHTerminateScope::getSize());
+}
+
+inline EHScopeStack::iterator EHScopeStack::find(stable_iterator sp) const {
+ assert(sp.isValid() && "finding invalid savepoint");
+ assert(sp.Size <= stable_begin().Size && "finding savepoint after pop");
+ return iterator(EndOfBuffer - sp.Size);
+}
+
+inline EHScopeStack::stable_iterator
+EHScopeStack::stabilize(iterator ir) const {
+ assert(StartOfData <= ir.Ptr && ir.Ptr <= EndOfBuffer);
+ return stable_iterator(EndOfBuffer - ir.Ptr);
+}
+
+/// The exceptions personality for a function.
+struct EHPersonality {
+ const char *PersonalityFn;
+
+ // If this is non-null, this personality requires a non-standard
+ // function for rethrowing an exception after a catchall cleanup.
+ // This function must have prototype void(void*).
+ const char *CatchallRethrowFn;
+
+ static const EHPersonality &get(CodeGenModule &CGM, const FunctionDecl *FD);
+ static const EHPersonality &get(CodeGenFunction &CGF);
+
+ static const EHPersonality GNU_C;
+ static const EHPersonality GNU_C_SJLJ;
+ static const EHPersonality GNU_C_SEH;
+ static const EHPersonality GNU_ObjC;
+ static const EHPersonality GNUstep_ObjC;
+ static const EHPersonality GNU_ObjCXX;
+ static const EHPersonality NeXT_ObjC;
+ static const EHPersonality GNU_CPlusPlus;
+ static const EHPersonality GNU_CPlusPlus_SJLJ;
+ static const EHPersonality GNU_CPlusPlus_SEH;
+ static const EHPersonality MSVC_except_handler;
+ static const EHPersonality MSVC_C_specific_handler;
+ static const EHPersonality MSVC_CxxFrameHandler3;
+
+ /// Does this personality use landingpads or the family of pad instructions
+ /// designed to form funclets?
+ bool usesFuncletPads() const { return isMSVCPersonality(); }
+
+ bool isMSVCPersonality() const {
+ return this == &MSVC_except_handler || this == &MSVC_C_specific_handler ||
+ this == &MSVC_CxxFrameHandler3;
+ }
+
+ bool isMSVCXXPersonality() const { return this == &MSVC_CxxFrameHandler3; }
+};
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp
new file mode 100644
index 0000000..78e3978
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -0,0 +1,3558 @@
+//===--- CGDebugInfo.cpp - Emit Debug Information for a Module ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This coordinates the debug information generation while generating code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CGBlocks.h"
+#include "CGCXXABI.h"
+#include "CGObjCRuntime.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclFriend.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/Version.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Lex/HeaderSearchOptions.h"
+#include "clang/Lex/ModuleMap.h"
+#include "clang/Lex/PreprocessorOptions.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+using namespace clang;
+using namespace clang::CodeGen;
+
+CGDebugInfo::CGDebugInfo(CodeGenModule &CGM)
+ : CGM(CGM), DebugKind(CGM.getCodeGenOpts().getDebugInfo()),
+ DebugTypeExtRefs(CGM.getCodeGenOpts().DebugTypeExtRefs),
+ DBuilder(CGM.getModule()) {
+ for (const auto &KV : CGM.getCodeGenOpts().DebugPrefixMap)
+ DebugPrefixMap[KV.first] = KV.second;
+ CreateCompileUnit();
+}
+
+CGDebugInfo::~CGDebugInfo() {
+ assert(LexicalBlockStack.empty() &&
+ "Region stack mismatch, stack not empty!");
+}
+
+ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF,
+ SourceLocation TemporaryLocation)
+ : CGF(&CGF) {
+ init(TemporaryLocation);
+}
+
+ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF,
+ bool DefaultToEmpty,
+ SourceLocation TemporaryLocation)
+ : CGF(&CGF) {
+ init(TemporaryLocation, DefaultToEmpty);
+}
+
+void ApplyDebugLocation::init(SourceLocation TemporaryLocation,
+ bool DefaultToEmpty) {
+ auto *DI = CGF->getDebugInfo();
+ if (!DI) {
+ CGF = nullptr;
+ return;
+ }
+
+ OriginalLocation = CGF->Builder.getCurrentDebugLocation();
+ if (TemporaryLocation.isValid()) {
+ DI->EmitLocation(CGF->Builder, TemporaryLocation);
+ return;
+ }
+
+ if (DefaultToEmpty) {
+ CGF->Builder.SetCurrentDebugLocation(llvm::DebugLoc());
+ return;
+ }
+
+ // Construct a location that has a valid scope, but no line info.
+ assert(!DI->LexicalBlockStack.empty());
+ CGF->Builder.SetCurrentDebugLocation(
+ llvm::DebugLoc::get(0, 0, DI->LexicalBlockStack.back()));
+}
+
+ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF, const Expr *E)
+ : CGF(&CGF) {
+ init(E->getExprLoc());
+}
+
+ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF, llvm::DebugLoc Loc)
+ : CGF(&CGF) {
+ if (!CGF.getDebugInfo()) {
+ this->CGF = nullptr;
+ return;
+ }
+ OriginalLocation = CGF.Builder.getCurrentDebugLocation();
+ if (Loc)
+ CGF.Builder.SetCurrentDebugLocation(std::move(Loc));
+}
+
+ApplyDebugLocation::~ApplyDebugLocation() {
+ // Query CGF so the location isn't overwritten when location updates are
+ // temporarily disabled (for C++ default function arguments)
+ if (CGF)
+ CGF->Builder.SetCurrentDebugLocation(std::move(OriginalLocation));
+}
+
+void CGDebugInfo::setLocation(SourceLocation Loc) {
+ // If the new location isn't valid return.
+ if (Loc.isInvalid())
+ return;
+
+ CurLoc = CGM.getContext().getSourceManager().getExpansionLoc(Loc);
+
+ // If we've changed files in the middle of a lexical scope go ahead
+ // and create a new lexical scope with file node if it's different
+ // from the one in the scope.
+ if (LexicalBlockStack.empty())
+ return;
+
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ auto *Scope = cast<llvm::DIScope>(LexicalBlockStack.back());
+ PresumedLoc PCLoc = SM.getPresumedLoc(CurLoc);
+
+ if (PCLoc.isInvalid() || Scope->getFilename() == PCLoc.getFilename())
+ return;
+
+ if (auto *LBF = dyn_cast<llvm::DILexicalBlockFile>(Scope)) {
+ LexicalBlockStack.pop_back();
+ LexicalBlockStack.emplace_back(DBuilder.createLexicalBlockFile(
+ LBF->getScope(), getOrCreateFile(CurLoc)));
+ } else if (isa<llvm::DILexicalBlock>(Scope) ||
+ isa<llvm::DISubprogram>(Scope)) {
+ LexicalBlockStack.pop_back();
+ LexicalBlockStack.emplace_back(
+ DBuilder.createLexicalBlockFile(Scope, getOrCreateFile(CurLoc)));
+ }
+}
+
+llvm::DIScope *CGDebugInfo::getDeclContextDescriptor(const Decl *D) {
+ llvm::DIScope *Mod = getParentModuleOrNull(D);
+ return getContextDescriptor(cast<Decl>(D->getDeclContext()),
+ Mod ? Mod : TheCU);
+}
+
+llvm::DIScope *CGDebugInfo::getContextDescriptor(const Decl *Context,
+ llvm::DIScope *Default) {
+ if (!Context)
+ return Default;
+
+ auto I = RegionMap.find(Context);
+ if (I != RegionMap.end()) {
+ llvm::Metadata *V = I->second;
+ return dyn_cast_or_null<llvm::DIScope>(V);
+ }
+
+ // Check namespace.
+ if (const NamespaceDecl *NSDecl = dyn_cast<NamespaceDecl>(Context))
+ return getOrCreateNameSpace(NSDecl);
+
+ if (const RecordDecl *RDecl = dyn_cast<RecordDecl>(Context))
+ if (!RDecl->isDependentType())
+ return getOrCreateType(CGM.getContext().getTypeDeclType(RDecl),
+ getOrCreateMainFile());
+ return Default;
+}
+
+StringRef CGDebugInfo::getFunctionName(const FunctionDecl *FD) {
+ assert(FD && "Invalid FunctionDecl!");
+ IdentifierInfo *FII = FD->getIdentifier();
+ FunctionTemplateSpecializationInfo *Info =
+ FD->getTemplateSpecializationInfo();
+
+ if (!Info && FII && !CGM.getCodeGenOpts().EmitCodeView)
+ return FII->getName();
+
+ // Otherwise construct human readable name for debug info.
+ SmallString<128> NS;
+ llvm::raw_svector_ostream OS(NS);
+ PrintingPolicy Policy(CGM.getLangOpts());
+
+ if (CGM.getCodeGenOpts().EmitCodeView) {
+ // Print a fully qualified name like MSVC would.
+ Policy.MSVCFormatting = true;
+ FD->printQualifiedName(OS, Policy);
+ } else {
+ // Print the unqualified name with some template arguments. This is what
+ // DWARF-based debuggers expect.
+ FD->printName(OS);
+ // Add any template specialization args.
+ if (Info) {
+ const TemplateArgumentList *TArgs = Info->TemplateArguments;
+ const TemplateArgument *Args = TArgs->data();
+ unsigned NumArgs = TArgs->size();
+ TemplateSpecializationType::PrintTemplateArgumentList(OS, Args, NumArgs,
+ Policy);
+ }
+ }
+
+ // Copy this name on the side and use its reference.
+ return internString(OS.str());
+}
+
+StringRef CGDebugInfo::getObjCMethodName(const ObjCMethodDecl *OMD) {
+ SmallString<256> MethodName;
+ llvm::raw_svector_ostream OS(MethodName);
+ OS << (OMD->isInstanceMethod() ? '-' : '+') << '[';
+ const DeclContext *DC = OMD->getDeclContext();
+ if (const ObjCImplementationDecl *OID =
+ dyn_cast<const ObjCImplementationDecl>(DC)) {
+ OS << OID->getName();
+ } else if (const ObjCInterfaceDecl *OID =
+ dyn_cast<const ObjCInterfaceDecl>(DC)) {
+ OS << OID->getName();
+ } else if (const ObjCCategoryDecl *OC = dyn_cast<ObjCCategoryDecl>(DC)) {
+ if (OC->IsClassExtension()) {
+ OS << OC->getClassInterface()->getName();
+ } else {
+ OS << ((const NamedDecl *)OC)->getIdentifier()->getNameStart() << '('
+ << OC->getIdentifier()->getNameStart() << ')';
+ }
+ } else if (const ObjCCategoryImplDecl *OCD =
+ dyn_cast<const ObjCCategoryImplDecl>(DC)) {
+ OS << ((const NamedDecl *)OCD)->getIdentifier()->getNameStart() << '('
+ << OCD->getIdentifier()->getNameStart() << ')';
+ } else if (isa<ObjCProtocolDecl>(DC)) {
+ // We can extract the type of the class from the self pointer.
+ if (ImplicitParamDecl *SelfDecl = OMD->getSelfDecl()) {
+ QualType ClassTy =
+ cast<ObjCObjectPointerType>(SelfDecl->getType())->getPointeeType();
+ ClassTy.print(OS, PrintingPolicy(LangOptions()));
+ }
+ }
+ OS << ' ' << OMD->getSelector().getAsString() << ']';
+
+ return internString(OS.str());
+}
+
+StringRef CGDebugInfo::getSelectorName(Selector S) {
+ return internString(S.getAsString());
+}
+
+StringRef CGDebugInfo::getClassName(const RecordDecl *RD) {
+ // quick optimization to avoid having to intern strings that are already
+ // stored reliably elsewhere
+ if (!isa<ClassTemplateSpecializationDecl>(RD))
+ return RD->getName();
+
+ SmallString<128> Name;
+ {
+ llvm::raw_svector_ostream OS(Name);
+ RD->getNameForDiagnostic(OS, CGM.getContext().getPrintingPolicy(),
+ /*Qualified*/ false);
+ }
+
+ // Copy this name on the side and use its reference.
+ return internString(Name);
+}
+
+llvm::DIFile *CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
+ if (!Loc.isValid())
+ // If Location is not valid then use main input file.
+ return DBuilder.createFile(remapDIPath(TheCU->getFilename()),
+ remapDIPath(TheCU->getDirectory()));
+
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+
+ if (PLoc.isInvalid() || StringRef(PLoc.getFilename()).empty())
+ // If the location is not valid then use main input file.
+ return DBuilder.createFile(remapDIPath(TheCU->getFilename()),
+ remapDIPath(TheCU->getDirectory()));
+
+ // Cache the results.
+ const char *fname = PLoc.getFilename();
+ auto it = DIFileCache.find(fname);
+
+ if (it != DIFileCache.end()) {
+ // Verify that the information still exists.
+ if (llvm::Metadata *V = it->second)
+ return cast<llvm::DIFile>(V);
+ }
+
+ llvm::DIFile *F = DBuilder.createFile(remapDIPath(PLoc.getFilename()),
+ remapDIPath(getCurrentDirname()));
+
+ DIFileCache[fname].reset(F);
+ return F;
+}
+
+llvm::DIFile *CGDebugInfo::getOrCreateMainFile() {
+ return DBuilder.createFile(remapDIPath(TheCU->getFilename()),
+ remapDIPath(TheCU->getDirectory()));
+}
+
+std::string CGDebugInfo::remapDIPath(StringRef Path) const {
+ for (const auto &Entry : DebugPrefixMap)
+ if (Path.startswith(Entry.first))
+ return (Twine(Entry.second) + Path.substr(Entry.first.size())).str();
+ return Path.str();
+}
+
+unsigned CGDebugInfo::getLineNumber(SourceLocation Loc) {
+ if (Loc.isInvalid() && CurLoc.isInvalid())
+ return 0;
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc);
+ return PLoc.isValid() ? PLoc.getLine() : 0;
+}
+
+unsigned CGDebugInfo::getColumnNumber(SourceLocation Loc, bool Force) {
+ // We may not want column information at all.
+ if (!Force && !CGM.getCodeGenOpts().DebugColumnInfo)
+ return 0;
+
+ // If the location is invalid then use the current column.
+ if (Loc.isInvalid() && CurLoc.isInvalid())
+ return 0;
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc);
+ return PLoc.isValid() ? PLoc.getColumn() : 0;
+}
+
+StringRef CGDebugInfo::getCurrentDirname() {
+ if (!CGM.getCodeGenOpts().DebugCompilationDir.empty())
+ return CGM.getCodeGenOpts().DebugCompilationDir;
+
+ if (!CWDName.empty())
+ return CWDName;
+ SmallString<256> CWD;
+ llvm::sys::fs::current_path(CWD);
+ return CWDName = internString(CWD);
+}
+
+void CGDebugInfo::CreateCompileUnit() {
+
+ // Should we be asking the SourceManager for the main file name, instead of
+ // accepting it as an argument? This just causes the main file name to
+ // mismatch with source locations and create extra lexical scopes or
+ // mismatched debug info (a CU with a DW_AT_file of "-", because that's what
+ // the driver passed, but functions/other things have DW_AT_file of "<stdin>"
+ // because that's what the SourceManager says)
+
+ // Get absolute path name.
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ std::string MainFileName = CGM.getCodeGenOpts().MainFileName;
+ if (MainFileName.empty())
+ MainFileName = "<stdin>";
+
+ // The main file name provided via the "-main-file-name" option contains just
+ // the file name itself with no path information. This file name may have had
+ // a relative path, so we look into the actual file entry for the main
+ // file to determine the real absolute path for the file.
+ std::string MainFileDir;
+ if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
+ MainFileDir = remapDIPath(MainFile->getDir()->getName());
+ if (MainFileDir != ".") {
+ llvm::SmallString<1024> MainFileDirSS(MainFileDir);
+ llvm::sys::path::append(MainFileDirSS, MainFileName);
+ MainFileName = MainFileDirSS.str();
+ }
+ }
+
+ llvm::dwarf::SourceLanguage LangTag;
+ const LangOptions &LO = CGM.getLangOpts();
+ if (LO.CPlusPlus) {
+ if (LO.ObjC1)
+ LangTag = llvm::dwarf::DW_LANG_ObjC_plus_plus;
+ else
+ LangTag = llvm::dwarf::DW_LANG_C_plus_plus;
+ } else if (LO.ObjC1) {
+ LangTag = llvm::dwarf::DW_LANG_ObjC;
+ } else if (LO.C99) {
+ LangTag = llvm::dwarf::DW_LANG_C99;
+ } else {
+ LangTag = llvm::dwarf::DW_LANG_C89;
+ }
+
+ std::string Producer = getClangFullVersion();
+
+ // Figure out which version of the ObjC runtime we have.
+ unsigned RuntimeVers = 0;
+ if (LO.ObjC1)
+ RuntimeVers = LO.ObjCRuntime.isNonFragile() ? 2 : 1;
+
+ // Create new compile unit.
+ // FIXME - Eliminate TheCU.
+ TheCU = DBuilder.createCompileUnit(
+ LangTag, remapDIPath(MainFileName), remapDIPath(getCurrentDirname()),
+ Producer, LO.Optimize, CGM.getCodeGenOpts().DwarfDebugFlags, RuntimeVers,
+ CGM.getCodeGenOpts().SplitDwarfFile,
+ DebugKind <= CodeGenOptions::DebugLineTablesOnly
+ ? llvm::DIBuilder::LineTablesOnly
+ : llvm::DIBuilder::FullDebug,
+ 0 /* DWOid */, DebugKind != CodeGenOptions::LocTrackingOnly);
+}
+
+llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
+ llvm::dwarf::TypeKind Encoding;
+ StringRef BTName;
+ switch (BT->getKind()) {
+#define BUILTIN_TYPE(Id, SingletonId)
+#define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
+ case BuiltinType::Dependent:
+ llvm_unreachable("Unexpected builtin type");
+ case BuiltinType::NullPtr:
+ return DBuilder.createNullPtrType();
+ case BuiltinType::Void:
+ return nullptr;
+ case BuiltinType::ObjCClass:
+ if (!ClassTy)
+ ClassTy = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ "objc_class", TheCU,
+ getOrCreateMainFile(), 0);
+ return ClassTy;
+ case BuiltinType::ObjCId: {
+ // typedef struct objc_class *Class;
+ // typedef struct objc_object {
+ // Class isa;
+ // } *id;
+
+ if (ObjTy)
+ return ObjTy;
+
+ if (!ClassTy)
+ ClassTy = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ "objc_class", TheCU,
+ getOrCreateMainFile(), 0);
+
+ unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
+
+ auto *ISATy = DBuilder.createPointerType(ClassTy, Size);
+
+ ObjTy =
+ DBuilder.createStructType(TheCU, "objc_object", getOrCreateMainFile(),
+ 0, 0, 0, 0, nullptr, llvm::DINodeArray());
+
+ DBuilder.replaceArrays(
+ ObjTy,
+ DBuilder.getOrCreateArray(&*DBuilder.createMemberType(
+ ObjTy, "isa", getOrCreateMainFile(), 0, Size, 0, 0, 0, ISATy)));
+ return ObjTy;
+ }
+ case BuiltinType::ObjCSel: {
+ if (!SelTy)
+ SelTy = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ "objc_selector", TheCU,
+ getOrCreateMainFile(), 0);
+ return SelTy;
+ }
+
+ case BuiltinType::OCLImage1d:
+ return getOrCreateStructPtrType("opencl_image1d_t", OCLImage1dDITy);
+ case BuiltinType::OCLImage1dArray:
+ return getOrCreateStructPtrType("opencl_image1d_array_t",
+ OCLImage1dArrayDITy);
+ case BuiltinType::OCLImage1dBuffer:
+ return getOrCreateStructPtrType("opencl_image1d_buffer_t",
+ OCLImage1dBufferDITy);
+ case BuiltinType::OCLImage2d:
+ return getOrCreateStructPtrType("opencl_image2d_t", OCLImage2dDITy);
+ case BuiltinType::OCLImage2dArray:
+ return getOrCreateStructPtrType("opencl_image2d_array_t",
+ OCLImage2dArrayDITy);
+ case BuiltinType::OCLImage2dDepth:
+ return getOrCreateStructPtrType("opencl_image2d_depth_t",
+ OCLImage2dDepthDITy);
+ case BuiltinType::OCLImage2dArrayDepth:
+ return getOrCreateStructPtrType("opencl_image2d_array_depth_t",
+ OCLImage2dArrayDepthDITy);
+ case BuiltinType::OCLImage2dMSAA:
+ return getOrCreateStructPtrType("opencl_image2d_msaa_t",
+ OCLImage2dMSAADITy);
+ case BuiltinType::OCLImage2dArrayMSAA:
+ return getOrCreateStructPtrType("opencl_image2d_array_msaa_t",
+ OCLImage2dArrayMSAADITy);
+ case BuiltinType::OCLImage2dMSAADepth:
+ return getOrCreateStructPtrType("opencl_image2d_msaa_depth_t",
+ OCLImage2dMSAADepthDITy);
+ case BuiltinType::OCLImage2dArrayMSAADepth:
+ return getOrCreateStructPtrType("opencl_image2d_array_msaa_depth_t",
+ OCLImage2dArrayMSAADepthDITy);
+ case BuiltinType::OCLImage3d:
+ return getOrCreateStructPtrType("opencl_image3d_t", OCLImage3dDITy);
+ case BuiltinType::OCLSampler:
+ return DBuilder.createBasicType(
+ "opencl_sampler_t", CGM.getContext().getTypeSize(BT),
+ CGM.getContext().getTypeAlign(BT), llvm::dwarf::DW_ATE_unsigned);
+ case BuiltinType::OCLEvent:
+ return getOrCreateStructPtrType("opencl_event_t", OCLEventDITy);
+ case BuiltinType::OCLClkEvent:
+ return getOrCreateStructPtrType("opencl_clk_event_t", OCLClkEventDITy);
+ case BuiltinType::OCLQueue:
+ return getOrCreateStructPtrType("opencl_queue_t", OCLQueueDITy);
+ case BuiltinType::OCLNDRange:
+ return getOrCreateStructPtrType("opencl_ndrange_t", OCLNDRangeDITy);
+ case BuiltinType::OCLReserveID:
+ return getOrCreateStructPtrType("opencl_reserve_id_t", OCLReserveIDDITy);
+
+ case BuiltinType::UChar:
+ case BuiltinType::Char_U:
+ Encoding = llvm::dwarf::DW_ATE_unsigned_char;
+ break;
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ Encoding = llvm::dwarf::DW_ATE_signed_char;
+ break;
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ Encoding = llvm::dwarf::DW_ATE_UTF;
+ break;
+ case BuiltinType::UShort:
+ case BuiltinType::UInt:
+ case BuiltinType::UInt128:
+ case BuiltinType::ULong:
+ case BuiltinType::WChar_U:
+ case BuiltinType::ULongLong:
+ Encoding = llvm::dwarf::DW_ATE_unsigned;
+ break;
+ case BuiltinType::Short:
+ case BuiltinType::Int:
+ case BuiltinType::Int128:
+ case BuiltinType::Long:
+ case BuiltinType::WChar_S:
+ case BuiltinType::LongLong:
+ Encoding = llvm::dwarf::DW_ATE_signed;
+ break;
+ case BuiltinType::Bool:
+ Encoding = llvm::dwarf::DW_ATE_boolean;
+ break;
+ case BuiltinType::Half:
+ case BuiltinType::Float:
+ case BuiltinType::LongDouble:
+ case BuiltinType::Double:
+ Encoding = llvm::dwarf::DW_ATE_float;
+ break;
+ }
+
+ switch (BT->getKind()) {
+ case BuiltinType::Long:
+ BTName = "long int";
+ break;
+ case BuiltinType::LongLong:
+ BTName = "long long int";
+ break;
+ case BuiltinType::ULong:
+ BTName = "long unsigned int";
+ break;
+ case BuiltinType::ULongLong:
+ BTName = "long long unsigned int";
+ break;
+ default:
+ BTName = BT->getName(CGM.getLangOpts());
+ break;
+ }
+ // Bit size, align and offset of the type.
+ uint64_t Size = CGM.getContext().getTypeSize(BT);
+ uint64_t Align = CGM.getContext().getTypeAlign(BT);
+ return DBuilder.createBasicType(BTName, Size, Align, Encoding);
+}
+
+llvm::DIType *CGDebugInfo::CreateType(const ComplexType *Ty) {
+ // Bit size, align and offset of the type.
+ llvm::dwarf::TypeKind Encoding = llvm::dwarf::DW_ATE_complex_float;
+ if (Ty->isComplexIntegerType())
+ Encoding = llvm::dwarf::DW_ATE_lo_user;
+
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+ return DBuilder.createBasicType("complex", Size, Align, Encoding);
+}
+
+llvm::DIType *CGDebugInfo::CreateQualifiedType(QualType Ty,
+ llvm::DIFile *Unit) {
+ QualifierCollector Qc;
+ const Type *T = Qc.strip(Ty);
+
+ // Ignore these qualifiers for now.
+ Qc.removeObjCGCAttr();
+ Qc.removeAddressSpace();
+ Qc.removeObjCLifetime();
+
+ // We will create one Derived type for one qualifier and recurse to handle any
+ // additional ones.
+ llvm::dwarf::Tag Tag;
+ if (Qc.hasConst()) {
+ Tag = llvm::dwarf::DW_TAG_const_type;
+ Qc.removeConst();
+ } else if (Qc.hasVolatile()) {
+ Tag = llvm::dwarf::DW_TAG_volatile_type;
+ Qc.removeVolatile();
+ } else if (Qc.hasRestrict()) {
+ Tag = llvm::dwarf::DW_TAG_restrict_type;
+ Qc.removeRestrict();
+ } else {
+ assert(Qc.empty() && "Unknown type qualifier for debug info");
+ return getOrCreateType(QualType(T, 0), Unit);
+ }
+
+ auto *FromTy = getOrCreateType(Qc.apply(CGM.getContext(), T), Unit);
+
+ // No need to fill in the Name, Line, Size, Alignment, Offset in case of
+ // CVR derived types.
+ return DBuilder.createQualifiedType(Tag, FromTy);
+}
+
+llvm::DIType *CGDebugInfo::CreateType(const ObjCObjectPointerType *Ty,
+ llvm::DIFile *Unit) {
+
+ // The frontend treats 'id' as a typedef to an ObjCObjectType,
+ // whereas 'id<protocol>' is treated as an ObjCPointerType. For the
+ // debug info, we want to emit 'id' in both cases.
+ if (Ty->isObjCQualifiedIdType())
+ return getOrCreateType(CGM.getContext().getObjCIdType(), Unit);
+
+ return CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty,
+ Ty->getPointeeType(), Unit);
+}
+
+llvm::DIType *CGDebugInfo::CreateType(const PointerType *Ty,
+ llvm::DIFile *Unit) {
+ return CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty,
+ Ty->getPointeeType(), Unit);
+}
+
+/// \return whether a C++ mangling exists for the type defined by TD.
+static bool hasCXXMangling(const TagDecl *TD, llvm::DICompileUnit *TheCU) {
+ switch (TheCU->getSourceLanguage()) {
+ case llvm::dwarf::DW_LANG_C_plus_plus:
+ return true;
+ case llvm::dwarf::DW_LANG_ObjC_plus_plus:
+ return isa<CXXRecordDecl>(TD) || isa<EnumDecl>(TD);
+ default:
+ return false;
+ }
+}
+
+/// In C++ mode, types have linkage, so we can rely on the ODR and
+/// on their mangled names, if they're external.
+static SmallString<256> getUniqueTagTypeName(const TagType *Ty,
+ CodeGenModule &CGM,
+ llvm::DICompileUnit *TheCU) {
+ SmallString<256> FullName;
+ const TagDecl *TD = Ty->getDecl();
+
+ if (!hasCXXMangling(TD, TheCU) || !TD->isExternallyVisible())
+ return FullName;
+
+ // Microsoft Mangler does not have support for mangleCXXRTTIName yet.
+ if (CGM.getTarget().getCXXABI().isMicrosoft())
+ return FullName;
+
+ // TODO: This is using the RTTI name. Is there a better way to get
+ // a unique string for a type?
+ llvm::raw_svector_ostream Out(FullName);
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(QualType(Ty, 0), Out);
+ return FullName;
+}
+
+/// \return the approproate DWARF tag for a composite type.
+static llvm::dwarf::Tag getTagForRecord(const RecordDecl *RD) {
+ llvm::dwarf::Tag Tag;
+ if (RD->isStruct() || RD->isInterface())
+ Tag = llvm::dwarf::DW_TAG_structure_type;
+ else if (RD->isUnion())
+ Tag = llvm::dwarf::DW_TAG_union_type;
+ else {
+ // FIXME: This could be a struct type giving a default visibility different
+ // than C++ class type, but needs llvm metadata changes first.
+ assert(RD->isClass());
+ Tag = llvm::dwarf::DW_TAG_class_type;
+ }
+ return Tag;
+}
+
+llvm::DICompositeType *
+CGDebugInfo::getOrCreateRecordFwdDecl(const RecordType *Ty,
+ llvm::DIScope *Ctx) {
+ const RecordDecl *RD = Ty->getDecl();
+ if (llvm::DIType *T = getTypeOrNull(CGM.getContext().getRecordType(RD)))
+ return cast<llvm::DICompositeType>(T);
+ llvm::DIFile *DefUnit = getOrCreateFile(RD->getLocation());
+ unsigned Line = getLineNumber(RD->getLocation());
+ StringRef RDName = getClassName(RD);
+
+ uint64_t Size = 0;
+ uint64_t Align = 0;
+
+ const RecordDecl *D = RD->getDefinition();
+ if (D && D->isCompleteDefinition()) {
+ Size = CGM.getContext().getTypeSize(Ty);
+ Align = CGM.getContext().getTypeAlign(Ty);
+ }
+
+ // Create the type.
+ SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU);
+ llvm::DICompositeType *RetTy = DBuilder.createReplaceableCompositeType(
+ getTagForRecord(RD), RDName, Ctx, DefUnit, Line, 0, Size, Align,
+ llvm::DINode::FlagFwdDecl, FullName);
+ ReplaceMap.emplace_back(
+ std::piecewise_construct, std::make_tuple(Ty),
+ std::make_tuple(static_cast<llvm::Metadata *>(RetTy)));
+ return RetTy;
+}
+
+llvm::DIType *CGDebugInfo::CreatePointerLikeType(llvm::dwarf::Tag Tag,
+ const Type *Ty,
+ QualType PointeeTy,
+ llvm::DIFile *Unit) {
+ // Bit size, align and offset of the type.
+ // Size is always the size of a pointer. We can't use getTypeSize here
+ // because that does not return the correct value for references.
+ unsigned AS = CGM.getContext().getTargetAddressSpace(PointeeTy);
+ uint64_t Size = CGM.getTarget().getPointerWidth(AS);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+
+ if (Tag == llvm::dwarf::DW_TAG_reference_type ||
+ Tag == llvm::dwarf::DW_TAG_rvalue_reference_type)
+ return DBuilder.createReferenceType(Tag, getOrCreateType(PointeeTy, Unit),
+ Size, Align);
+ else
+ return DBuilder.createPointerType(getOrCreateType(PointeeTy, Unit), Size,
+ Align);
+}
+
+llvm::DIType *CGDebugInfo::getOrCreateStructPtrType(StringRef Name,
+ llvm::DIType *&Cache) {
+ if (Cache)
+ return Cache;
+ Cache = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type, Name,
+ TheCU, getOrCreateMainFile(), 0);
+ unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
+ Cache = DBuilder.createPointerType(Cache, Size);
+ return Cache;
+}
+
+llvm::DIType *CGDebugInfo::CreateType(const BlockPointerType *Ty,
+ llvm::DIFile *Unit) {
+ SmallVector<llvm::Metadata *, 8> EltTys;
+ QualType FType;
+ uint64_t FieldSize, FieldOffset;
+ unsigned FieldAlign;
+ llvm::DINodeArray Elements;
+
+ FieldOffset = 0;
+ FType = CGM.getContext().UnsignedLongTy;
+ EltTys.push_back(CreateMemberType(Unit, FType, "reserved", &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "Size", &FieldOffset));
+
+ Elements = DBuilder.getOrCreateArray(EltTys);
+ EltTys.clear();
+
+ unsigned Flags = llvm::DINode::FlagAppleBlock;
+ unsigned LineNo = 0;
+
+ auto *EltTy =
+ DBuilder.createStructType(Unit, "__block_descriptor", nullptr, LineNo,
+ FieldOffset, 0, Flags, nullptr, Elements);
+
+ // Bit size, align and offset of the type.
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+
+ auto *DescTy = DBuilder.createPointerType(EltTy, Size);
+
+ FieldOffset = 0;
+ FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+ EltTys.push_back(CreateMemberType(Unit, FType, "__isa", &FieldOffset));
+ FType = CGM.getContext().IntTy;
+ EltTys.push_back(CreateMemberType(Unit, FType, "__flags", &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "__reserved", &FieldOffset));
+ FType = CGM.getContext().getPointerType(Ty->getPointeeType());
+ EltTys.push_back(CreateMemberType(Unit, FType, "__FuncPtr", &FieldOffset));
+
+ FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+ FieldSize = CGM.getContext().getTypeSize(Ty);
+ FieldAlign = CGM.getContext().getTypeAlign(Ty);
+ EltTys.push_back(DBuilder.createMemberType(Unit, "__descriptor", nullptr, LineNo,
+ FieldSize, FieldAlign, FieldOffset,
+ 0, DescTy));
+
+ FieldOffset += FieldSize;
+ Elements = DBuilder.getOrCreateArray(EltTys);
+
+ // The __block_literal_generic structs are marked with a special
+ // DW_AT_APPLE_BLOCK attribute and are an implementation detail only
+ // the debugger needs to know about. To allow type uniquing, emit
+ // them without a name or a location.
+ EltTy =
+ DBuilder.createStructType(Unit, "", nullptr, LineNo,
+ FieldOffset, 0, Flags, nullptr, Elements);
+
+ return DBuilder.createPointerType(EltTy, Size);
+}
+
+llvm::DIType *CGDebugInfo::CreateType(const TemplateSpecializationType *Ty,
+ llvm::DIFile *Unit) {
+ assert(Ty->isTypeAlias());
+ llvm::DIType *Src = getOrCreateType(Ty->getAliasedType(), Unit);
+
+ SmallString<128> NS;
+ llvm::raw_svector_ostream OS(NS);
+ Ty->getTemplateName().print(OS, CGM.getContext().getPrintingPolicy(),
+ /*qualified*/ false);
+
+ TemplateSpecializationType::PrintTemplateArgumentList(
+ OS, Ty->getArgs(), Ty->getNumArgs(),
+ CGM.getContext().getPrintingPolicy());
+
+ TypeAliasDecl *AliasDecl = cast<TypeAliasTemplateDecl>(
+ Ty->getTemplateName().getAsTemplateDecl())->getTemplatedDecl();
+
+ SourceLocation Loc = AliasDecl->getLocation();
+ return DBuilder.createTypedef(Src, OS.str(), getOrCreateFile(Loc),
+ getLineNumber(Loc),
+ getDeclContextDescriptor(AliasDecl));
+}
+
+llvm::DIType *CGDebugInfo::CreateType(const TypedefType *Ty,
+ llvm::DIFile *Unit) {
+ // We don't set size information, but do specify where the typedef was
+ // declared.
+ SourceLocation Loc = Ty->getDecl()->getLocation();
+
+ // Typedefs are derived from some other type.
+ return DBuilder.createTypedef(
+ getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit),
+ Ty->getDecl()->getName(), getOrCreateFile(Loc), getLineNumber(Loc),
+ getDeclContextDescriptor(Ty->getDecl()));
+}
+
+llvm::DIType *CGDebugInfo::CreateType(const FunctionType *Ty,
+ llvm::DIFile *Unit) {
+ SmallVector<llvm::Metadata *, 16> EltTys;
+
+ // Add the result type at least.
+ EltTys.push_back(getOrCreateType(Ty->getReturnType(), Unit));
+
+ // Set up remainder of arguments if there is a prototype.
+ // otherwise emit it as a variadic function.
+ if (isa<FunctionNoProtoType>(Ty))
+ EltTys.push_back(DBuilder.createUnspecifiedParameter());
+ else if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(Ty)) {
+ for (unsigned i = 0, e = FPT->getNumParams(); i != e; ++i)
+ EltTys.push_back(getOrCreateType(FPT->getParamType(i), Unit));
+ if (FPT->isVariadic())
+ EltTys.push_back(DBuilder.createUnspecifiedParameter());
+ }
+
+ llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(EltTys);
+ return DBuilder.createSubroutineType(EltTypeArray);
+}
+
+/// Convert an AccessSpecifier into the corresponding DINode flag.
+/// As an optimization, return 0 if the access specifier equals the
+/// default for the containing type.
+static unsigned getAccessFlag(AccessSpecifier Access, const RecordDecl *RD) {
+ AccessSpecifier Default = clang::AS_none;
+ if (RD && RD->isClass())
+ Default = clang::AS_private;
+ else if (RD && (RD->isStruct() || RD->isUnion()))
+ Default = clang::AS_public;
+
+ if (Access == Default)
+ return 0;
+
+ switch (Access) {
+ case clang::AS_private:
+ return llvm::DINode::FlagPrivate;
+ case clang::AS_protected:
+ return llvm::DINode::FlagProtected;
+ case clang::AS_public:
+ return llvm::DINode::FlagPublic;
+ case clang::AS_none:
+ return 0;
+ }
+ llvm_unreachable("unexpected access enumerator");
+}
+
+llvm::DIType *CGDebugInfo::createFieldType(
+ StringRef name, QualType type, uint64_t sizeInBitsOverride,
+ SourceLocation loc, AccessSpecifier AS, uint64_t offsetInBits,
+ llvm::DIFile *tunit, llvm::DIScope *scope, const RecordDecl *RD) {
+ llvm::DIType *debugType = getOrCreateType(type, tunit);
+
+ // Get the location for the field.
+ llvm::DIFile *file = getOrCreateFile(loc);
+ unsigned line = getLineNumber(loc);
+
+ uint64_t SizeInBits = 0;
+ unsigned AlignInBits = 0;
+ if (!type->isIncompleteArrayType()) {
+ TypeInfo TI = CGM.getContext().getTypeInfo(type);
+ SizeInBits = TI.Width;
+ AlignInBits = TI.Align;
+
+ if (sizeInBitsOverride)
+ SizeInBits = sizeInBitsOverride;
+ }
+
+ unsigned flags = getAccessFlag(AS, RD);
+ return DBuilder.createMemberType(scope, name, file, line, SizeInBits,
+ AlignInBits, offsetInBits, flags, debugType);
+}
+
+void CGDebugInfo::CollectRecordLambdaFields(
+ const CXXRecordDecl *CXXDecl, SmallVectorImpl<llvm::Metadata *> &elements,
+ llvm::DIType *RecordTy) {
+ // For C++11 Lambdas a Field will be the same as a Capture, but the Capture
+ // has the name and the location of the variable so we should iterate over
+ // both concurrently.
+ const ASTRecordLayout &layout = CGM.getContext().getASTRecordLayout(CXXDecl);
+ RecordDecl::field_iterator Field = CXXDecl->field_begin();
+ unsigned fieldno = 0;
+ for (CXXRecordDecl::capture_const_iterator I = CXXDecl->captures_begin(),
+ E = CXXDecl->captures_end();
+ I != E; ++I, ++Field, ++fieldno) {
+ const LambdaCapture &C = *I;
+ if (C.capturesVariable()) {
+ VarDecl *V = C.getCapturedVar();
+ llvm::DIFile *VUnit = getOrCreateFile(C.getLocation());
+ StringRef VName = V->getName();
+ uint64_t SizeInBitsOverride = 0;
+ if (Field->isBitField()) {
+ SizeInBitsOverride = Field->getBitWidthValue(CGM.getContext());
+ assert(SizeInBitsOverride && "found named 0-width bitfield");
+ }
+ llvm::DIType *fieldType = createFieldType(
+ VName, Field->getType(), SizeInBitsOverride, C.getLocation(),
+ Field->getAccess(), layout.getFieldOffset(fieldno), VUnit, RecordTy,
+ CXXDecl);
+ elements.push_back(fieldType);
+ } else if (C.capturesThis()) {
+ // TODO: Need to handle 'this' in some way by probably renaming the
+ // this of the lambda class and having a field member of 'this' or
+ // by using AT_object_pointer for the function and having that be
+ // used as 'this' for semantic references.
+ FieldDecl *f = *Field;
+ llvm::DIFile *VUnit = getOrCreateFile(f->getLocation());
+ QualType type = f->getType();
+ llvm::DIType *fieldType = createFieldType(
+ "this", type, 0, f->getLocation(), f->getAccess(),
+ layout.getFieldOffset(fieldno), VUnit, RecordTy, CXXDecl);
+
+ elements.push_back(fieldType);
+ }
+ }
+}
+
+llvm::DIDerivedType *
+CGDebugInfo::CreateRecordStaticField(const VarDecl *Var, llvm::DIType *RecordTy,
+ const RecordDecl *RD) {
+ // Create the descriptor for the static variable, with or without
+ // constant initializers.
+ Var = Var->getCanonicalDecl();
+ llvm::DIFile *VUnit = getOrCreateFile(Var->getLocation());
+ llvm::DIType *VTy = getOrCreateType(Var->getType(), VUnit);
+
+ unsigned LineNumber = getLineNumber(Var->getLocation());
+ StringRef VName = Var->getName();
+ llvm::Constant *C = nullptr;
+ if (Var->getInit()) {
+ const APValue *Value = Var->evaluateValue();
+ if (Value) {
+ if (Value->isInt())
+ C = llvm::ConstantInt::get(CGM.getLLVMContext(), Value->getInt());
+ if (Value->isFloat())
+ C = llvm::ConstantFP::get(CGM.getLLVMContext(), Value->getFloat());
+ }
+ }
+
+ unsigned Flags = getAccessFlag(Var->getAccess(), RD);
+ llvm::DIDerivedType *GV = DBuilder.createStaticMemberType(
+ RecordTy, VName, VUnit, LineNumber, VTy, Flags, C);
+ StaticDataMemberCache[Var->getCanonicalDecl()].reset(GV);
+ return GV;
+}
+
+void CGDebugInfo::CollectRecordNormalField(
+ const FieldDecl *field, uint64_t OffsetInBits, llvm::DIFile *tunit,
+ SmallVectorImpl<llvm::Metadata *> &elements, llvm::DIType *RecordTy,
+ const RecordDecl *RD) {
+ StringRef name = field->getName();
+ QualType type = field->getType();
+
+ // Ignore unnamed fields unless they're anonymous structs/unions.
+ if (name.empty() && !type->isRecordType())
+ return;
+
+ uint64_t SizeInBitsOverride = 0;
+ if (field->isBitField()) {
+ SizeInBitsOverride = field->getBitWidthValue(CGM.getContext());
+ assert(SizeInBitsOverride && "found named 0-width bitfield");
+ }
+
+ llvm::DIType *fieldType =
+ createFieldType(name, type, SizeInBitsOverride, field->getLocation(),
+ field->getAccess(), OffsetInBits, tunit, RecordTy, RD);
+
+ elements.push_back(fieldType);
+}
+
+void CGDebugInfo::CollectRecordFields(
+ const RecordDecl *record, llvm::DIFile *tunit,
+ SmallVectorImpl<llvm::Metadata *> &elements,
+ llvm::DICompositeType *RecordTy) {
+ const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(record);
+
+ if (CXXDecl && CXXDecl->isLambda())
+ CollectRecordLambdaFields(CXXDecl, elements, RecordTy);
+ else {
+ const ASTRecordLayout &layout = CGM.getContext().getASTRecordLayout(record);
+
+ // Field number for non-static fields.
+ unsigned fieldNo = 0;
+
+ // Static and non-static members should appear in the same order as
+ // the corresponding declarations in the source program.
+ for (const auto *I : record->decls())
+ if (const auto *V = dyn_cast<VarDecl>(I)) {
+ // Reuse the existing static member declaration if one exists
+ auto MI = StaticDataMemberCache.find(V->getCanonicalDecl());
+ if (MI != StaticDataMemberCache.end()) {
+ assert(MI->second &&
+ "Static data member declaration should still exist");
+ elements.push_back(MI->second);
+ } else {
+ auto Field = CreateRecordStaticField(V, RecordTy, record);
+ elements.push_back(Field);
+ }
+ } else if (const auto *field = dyn_cast<FieldDecl>(I)) {
+ CollectRecordNormalField(field, layout.getFieldOffset(fieldNo), tunit,
+ elements, RecordTy, record);
+
+ // Bump field number for next field.
+ ++fieldNo;
+ }
+ }
+}
+
+llvm::DISubroutineType *
+CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
+ llvm::DIFile *Unit) {
+ const FunctionProtoType *Func = Method->getType()->getAs<FunctionProtoType>();
+ if (Method->isStatic())
+ return cast_or_null<llvm::DISubroutineType>(
+ getOrCreateType(QualType(Func, 0), Unit));
+ return getOrCreateInstanceMethodType(Method->getThisType(CGM.getContext()),
+ Func, Unit);
+}
+
+llvm::DISubroutineType *CGDebugInfo::getOrCreateInstanceMethodType(
+ QualType ThisPtr, const FunctionProtoType *Func, llvm::DIFile *Unit) {
+ // Add "this" pointer.
+ llvm::DITypeRefArray Args(
+ cast<llvm::DISubroutineType>(getOrCreateType(QualType(Func, 0), Unit))
+ ->getTypeArray());
+ assert(Args.size() && "Invalid number of arguments!");
+
+ SmallVector<llvm::Metadata *, 16> Elts;
+
+ // First element is always return type. For 'void' functions it is NULL.
+ Elts.push_back(Args[0]);
+
+ // "this" pointer is always first argument.
+ const CXXRecordDecl *RD = ThisPtr->getPointeeCXXRecordDecl();
+ if (isa<ClassTemplateSpecializationDecl>(RD)) {
+ // Create pointer type directly in this case.
+ const PointerType *ThisPtrTy = cast<PointerType>(ThisPtr);
+ QualType PointeeTy = ThisPtrTy->getPointeeType();
+ unsigned AS = CGM.getContext().getTargetAddressSpace(PointeeTy);
+ uint64_t Size = CGM.getTarget().getPointerWidth(AS);
+ uint64_t Align = CGM.getContext().getTypeAlign(ThisPtrTy);
+ llvm::DIType *PointeeType = getOrCreateType(PointeeTy, Unit);
+ llvm::DIType *ThisPtrType =
+ DBuilder.createPointerType(PointeeType, Size, Align);
+ TypeCache[ThisPtr.getAsOpaquePtr()].reset(ThisPtrType);
+ // TODO: This and the artificial type below are misleading, the
+ // types aren't artificial the argument is, but the current
+ // metadata doesn't represent that.
+ ThisPtrType = DBuilder.createObjectPointerType(ThisPtrType);
+ Elts.push_back(ThisPtrType);
+ } else {
+ llvm::DIType *ThisPtrType = getOrCreateType(ThisPtr, Unit);
+ TypeCache[ThisPtr.getAsOpaquePtr()].reset(ThisPtrType);
+ ThisPtrType = DBuilder.createObjectPointerType(ThisPtrType);
+ Elts.push_back(ThisPtrType);
+ }
+
+ // Copy rest of the arguments.
+ for (unsigned i = 1, e = Args.size(); i != e; ++i)
+ Elts.push_back(Args[i]);
+
+ llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(Elts);
+
+ unsigned Flags = 0;
+ if (Func->getExtProtoInfo().RefQualifier == RQ_LValue)
+ Flags |= llvm::DINode::FlagLValueReference;
+ if (Func->getExtProtoInfo().RefQualifier == RQ_RValue)
+ Flags |= llvm::DINode::FlagRValueReference;
+
+ return DBuilder.createSubroutineType(EltTypeArray, Flags);
+}
+
+/// isFunctionLocalClass - Return true if CXXRecordDecl is defined
+/// inside a function.
+static bool isFunctionLocalClass(const CXXRecordDecl *RD) {
+ if (const CXXRecordDecl *NRD = dyn_cast<CXXRecordDecl>(RD->getDeclContext()))
+ return isFunctionLocalClass(NRD);
+ if (isa<FunctionDecl>(RD->getDeclContext()))
+ return true;
+ return false;
+}
+
+llvm::DISubprogram *CGDebugInfo::CreateCXXMemberFunction(
+ const CXXMethodDecl *Method, llvm::DIFile *Unit, llvm::DIType *RecordTy) {
+ bool IsCtorOrDtor =
+ isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method);
+
+ StringRef MethodName = getFunctionName(Method);
+ llvm::DISubroutineType *MethodTy = getOrCreateMethodType(Method, Unit);
+
+ // Since a single ctor/dtor corresponds to multiple functions, it doesn't
+ // make sense to give a single ctor/dtor a linkage name.
+ StringRef MethodLinkageName;
+ if (!IsCtorOrDtor && !isFunctionLocalClass(Method->getParent()))
+ MethodLinkageName = CGM.getMangledName(Method);
+
+ // Get the location for the method.
+ llvm::DIFile *MethodDefUnit = nullptr;
+ unsigned MethodLine = 0;
+ if (!Method->isImplicit()) {
+ MethodDefUnit = getOrCreateFile(Method->getLocation());
+ MethodLine = getLineNumber(Method->getLocation());
+ }
+
+ // Collect virtual method info.
+ llvm::DIType *ContainingType = nullptr;
+ unsigned Virtuality = 0;
+ unsigned VIndex = 0;
+
+ if (Method->isVirtual()) {
+ if (Method->isPure())
+ Virtuality = llvm::dwarf::DW_VIRTUALITY_pure_virtual;
+ else
+ Virtuality = llvm::dwarf::DW_VIRTUALITY_virtual;
+
+ // It doesn't make sense to give a virtual destructor a vtable index,
+ // since a single destructor has two entries in the vtable.
+ // FIXME: Add proper support for debug info for virtual calls in
+ // the Microsoft ABI, where we may use multiple vptrs to make a vftable
+ // lookup if we have multiple or virtual inheritance.
+ if (!isa<CXXDestructorDecl>(Method) &&
+ !CGM.getTarget().getCXXABI().isMicrosoft())
+ VIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(Method);
+ ContainingType = RecordTy;
+ }
+
+ unsigned Flags = 0;
+ if (Method->isImplicit())
+ Flags |= llvm::DINode::FlagArtificial;
+ Flags |= getAccessFlag(Method->getAccess(), Method->getParent());
+ if (const CXXConstructorDecl *CXXC = dyn_cast<CXXConstructorDecl>(Method)) {
+ if (CXXC->isExplicit())
+ Flags |= llvm::DINode::FlagExplicit;
+ } else if (const CXXConversionDecl *CXXC =
+ dyn_cast<CXXConversionDecl>(Method)) {
+ if (CXXC->isExplicit())
+ Flags |= llvm::DINode::FlagExplicit;
+ }
+ if (Method->hasPrototype())
+ Flags |= llvm::DINode::FlagPrototyped;
+ if (Method->getRefQualifier() == RQ_LValue)
+ Flags |= llvm::DINode::FlagLValueReference;
+ if (Method->getRefQualifier() == RQ_RValue)
+ Flags |= llvm::DINode::FlagRValueReference;
+
+ llvm::DINodeArray TParamsArray = CollectFunctionTemplateParams(Method, Unit);
+ llvm::DISubprogram *SP = DBuilder.createMethod(
+ RecordTy, MethodName, MethodLinkageName, MethodDefUnit, MethodLine,
+ MethodTy, /*isLocalToUnit=*/false,
+ /* isDefinition=*/false, Virtuality, VIndex, ContainingType, Flags,
+ CGM.getLangOpts().Optimize, TParamsArray.get());
+
+ SPCache[Method->getCanonicalDecl()].reset(SP);
+
+ return SP;
+}
+
+void CGDebugInfo::CollectCXXMemberFunctions(
+ const CXXRecordDecl *RD, llvm::DIFile *Unit,
+ SmallVectorImpl<llvm::Metadata *> &EltTys, llvm::DIType *RecordTy) {
+
+ // Since we want more than just the individual member decls if we
+ // have templated functions iterate over every declaration to gather
+ // the functions.
+ for (const auto *I : RD->decls()) {
+ const auto *Method = dyn_cast<CXXMethodDecl>(I);
+ // If the member is implicit, don't add it to the member list. This avoids
+ // the member being added to type units by LLVM, while still allowing it
+ // to be emitted into the type declaration/reference inside the compile
+ // unit.
+ // Ditto 'nodebug' methods, for consistency with CodeGenFunction.cpp.
+ // FIXME: Handle Using(Shadow?)Decls here to create
+ // DW_TAG_imported_declarations inside the class for base decls brought into
+ // derived classes. GDB doesn't seem to notice/leverage these when I tried
+ // it, so I'm not rushing to fix this. (GCC seems to produce them, if
+ // referenced)
+ if (!Method || Method->isImplicit() || Method->hasAttr<NoDebugAttr>())
+ continue;
+
+ if (Method->getType()->getAs<FunctionProtoType>()->getContainedAutoType())
+ continue;
+
+ // Reuse the existing member function declaration if it exists.
+ // It may be associated with the declaration of the type & should be
+ // reused as we're building the definition.
+ //
+ // This situation can arise in the vtable-based debug info reduction where
+ // implicit members are emitted in a non-vtable TU.
+ auto MI = SPCache.find(Method->getCanonicalDecl());
+ EltTys.push_back(MI == SPCache.end()
+ ? CreateCXXMemberFunction(Method, Unit, RecordTy)
+ : static_cast<llvm::Metadata *>(MI->second));
+ }
+}
+
+void CGDebugInfo::CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile *Unit,
+ SmallVectorImpl<llvm::Metadata *> &EltTys,
+ llvm::DIType *RecordTy) {
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+ for (const auto &BI : RD->bases()) {
+ unsigned BFlags = 0;
+ uint64_t BaseOffset;
+
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(BI.getType()->getAs<RecordType>()->getDecl());
+
+ if (BI.isVirtual()) {
+ if (CGM.getTarget().getCXXABI().isItaniumFamily()) {
+ // virtual base offset offset is -ve. The code generator emits dwarf
+ // expression where it expects +ve number.
+ BaseOffset = 0 - CGM.getItaniumVTableContext()
+ .getVirtualBaseOffsetOffset(RD, Base)
+ .getQuantity();
+ } else {
+ // In the MS ABI, store the vbtable offset, which is analogous to the
+ // vbase offset offset in Itanium.
+ BaseOffset =
+ 4 * CGM.getMicrosoftVTableContext().getVBTableIndex(RD, Base);
+ }
+ BFlags = llvm::DINode::FlagVirtual;
+ } else
+ BaseOffset = CGM.getContext().toBits(RL.getBaseClassOffset(Base));
+ // FIXME: Inconsistent units for BaseOffset. It is in bytes when
+ // BI->isVirtual() and bits when not.
+
+ BFlags |= getAccessFlag(BI.getAccessSpecifier(), RD);
+ llvm::DIType *DTy = DBuilder.createInheritance(
+ RecordTy, getOrCreateType(BI.getType(), Unit), BaseOffset, BFlags);
+ EltTys.push_back(DTy);
+ }
+}
+
+llvm::DINodeArray
+CGDebugInfo::CollectTemplateParams(const TemplateParameterList *TPList,
+ ArrayRef<TemplateArgument> TAList,
+ llvm::DIFile *Unit) {
+ SmallVector<llvm::Metadata *, 16> TemplateParams;
+ for (unsigned i = 0, e = TAList.size(); i != e; ++i) {
+ const TemplateArgument &TA = TAList[i];
+ StringRef Name;
+ if (TPList)
+ Name = TPList->getParam(i)->getName();
+ switch (TA.getKind()) {
+ case TemplateArgument::Type: {
+ llvm::DIType *TTy = getOrCreateType(TA.getAsType(), Unit);
+ TemplateParams.push_back(
+ DBuilder.createTemplateTypeParameter(TheCU, Name, TTy));
+ } break;
+ case TemplateArgument::Integral: {
+ llvm::DIType *TTy = getOrCreateType(TA.getIntegralType(), Unit);
+ TemplateParams.push_back(DBuilder.createTemplateValueParameter(
+ TheCU, Name, TTy,
+ llvm::ConstantInt::get(CGM.getLLVMContext(), TA.getAsIntegral())));
+ } break;
+ case TemplateArgument::Declaration: {
+ const ValueDecl *D = TA.getAsDecl();
+ QualType T = TA.getParamTypeForDecl().getDesugaredType(CGM.getContext());
+ llvm::DIType *TTy = getOrCreateType(T, Unit);
+ llvm::Constant *V = nullptr;
+ const CXXMethodDecl *MD;
+ // Variable pointer template parameters have a value that is the address
+ // of the variable.
+ if (const auto *VD = dyn_cast<VarDecl>(D))
+ V = CGM.GetAddrOfGlobalVar(VD);
+ // Member function pointers have special support for building them, though
+ // this is currently unsupported in LLVM CodeGen.
+ else if ((MD = dyn_cast<CXXMethodDecl>(D)) && MD->isInstance())
+ V = CGM.getCXXABI().EmitMemberFunctionPointer(MD);
+ else if (const auto *FD = dyn_cast<FunctionDecl>(D))
+ V = CGM.GetAddrOfFunction(FD);
+ // Member data pointers have special handling too to compute the fixed
+ // offset within the object.
+ else if (const auto *MPT = dyn_cast<MemberPointerType>(T.getTypePtr())) {
+ // These five lines (& possibly the above member function pointer
+ // handling) might be able to be refactored to use similar code in
+ // CodeGenModule::getMemberPointerConstant
+ uint64_t fieldOffset = CGM.getContext().getFieldOffset(D);
+ CharUnits chars =
+ CGM.getContext().toCharUnitsFromBits((int64_t)fieldOffset);
+ V = CGM.getCXXABI().EmitMemberDataPointer(MPT, chars);
+ }
+ TemplateParams.push_back(DBuilder.createTemplateValueParameter(
+ TheCU, Name, TTy,
+ cast_or_null<llvm::Constant>(V->stripPointerCasts())));
+ } break;
+ case TemplateArgument::NullPtr: {
+ QualType T = TA.getNullPtrType();
+ llvm::DIType *TTy = getOrCreateType(T, Unit);
+ llvm::Constant *V = nullptr;
+ // Special case member data pointer null values since they're actually -1
+ // instead of zero.
+ if (const MemberPointerType *MPT =
+ dyn_cast<MemberPointerType>(T.getTypePtr()))
+ // But treat member function pointers as simple zero integers because
+ // it's easier than having a special case in LLVM's CodeGen. If LLVM
+ // CodeGen grows handling for values of non-null member function
+ // pointers then perhaps we could remove this special case and rely on
+ // EmitNullMemberPointer for member function pointers.
+ if (MPT->isMemberDataPointer())
+ V = CGM.getCXXABI().EmitNullMemberPointer(MPT);
+ if (!V)
+ V = llvm::ConstantInt::get(CGM.Int8Ty, 0);
+ TemplateParams.push_back(DBuilder.createTemplateValueParameter(
+ TheCU, Name, TTy, cast<llvm::Constant>(V)));
+ } break;
+ case TemplateArgument::Template:
+ TemplateParams.push_back(DBuilder.createTemplateTemplateParameter(
+ TheCU, Name, nullptr,
+ TA.getAsTemplate().getAsTemplateDecl()->getQualifiedNameAsString()));
+ break;
+ case TemplateArgument::Pack:
+ TemplateParams.push_back(DBuilder.createTemplateParameterPack(
+ TheCU, Name, nullptr,
+ CollectTemplateParams(nullptr, TA.getPackAsArray(), Unit)));
+ break;
+ case TemplateArgument::Expression: {
+ const Expr *E = TA.getAsExpr();
+ QualType T = E->getType();
+ if (E->isGLValue())
+ T = CGM.getContext().getLValueReferenceType(T);
+ llvm::Constant *V = CGM.EmitConstantExpr(E, T);
+ assert(V && "Expression in template argument isn't constant");
+ llvm::DIType *TTy = getOrCreateType(T, Unit);
+ TemplateParams.push_back(DBuilder.createTemplateValueParameter(
+ TheCU, Name, TTy, cast<llvm::Constant>(V->stripPointerCasts())));
+ } break;
+ // And the following should never occur:
+ case TemplateArgument::TemplateExpansion:
+ case TemplateArgument::Null:
+ llvm_unreachable(
+ "These argument types shouldn't exist in concrete types");
+ }
+ }
+ return DBuilder.getOrCreateArray(TemplateParams);
+}
+
+llvm::DINodeArray
+CGDebugInfo::CollectFunctionTemplateParams(const FunctionDecl *FD,
+ llvm::DIFile *Unit) {
+ if (FD->getTemplatedKind() ==
+ FunctionDecl::TK_FunctionTemplateSpecialization) {
+ const TemplateParameterList *TList = FD->getTemplateSpecializationInfo()
+ ->getTemplate()
+ ->getTemplateParameters();
+ return CollectTemplateParams(
+ TList, FD->getTemplateSpecializationArgs()->asArray(), Unit);
+ }
+ return llvm::DINodeArray();
+}
+
+llvm::DINodeArray CGDebugInfo::CollectCXXTemplateParams(
+ const ClassTemplateSpecializationDecl *TSpecial, llvm::DIFile *Unit) {
+ // Always get the full list of parameters, not just the ones from
+ // the specialization.
+ TemplateParameterList *TPList =
+ TSpecial->getSpecializedTemplate()->getTemplateParameters();
+ const TemplateArgumentList &TAList = TSpecial->getTemplateArgs();
+ return CollectTemplateParams(TPList, TAList.asArray(), Unit);
+}
+
+llvm::DIType *CGDebugInfo::getOrCreateVTablePtrType(llvm::DIFile *Unit) {
+ if (VTablePtrType)
+ return VTablePtrType;
+
+ ASTContext &Context = CGM.getContext();
+
+ /* Function type */
+ llvm::Metadata *STy = getOrCreateType(Context.IntTy, Unit);
+ llvm::DITypeRefArray SElements = DBuilder.getOrCreateTypeArray(STy);
+ llvm::DIType *SubTy = DBuilder.createSubroutineType(SElements);
+ unsigned Size = Context.getTypeSize(Context.VoidPtrTy);
+ llvm::DIType *vtbl_ptr_type =
+ DBuilder.createPointerType(SubTy, Size, 0, "__vtbl_ptr_type");
+ VTablePtrType = DBuilder.createPointerType(vtbl_ptr_type, Size);
+ return VTablePtrType;
+}
+
+StringRef CGDebugInfo::getVTableName(const CXXRecordDecl *RD) {
+ // Copy the gdb compatible name on the side and use its reference.
+ return internString("_vptr$", RD->getNameAsString());
+}
+
+void CGDebugInfo::CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile *Unit,
+ SmallVectorImpl<llvm::Metadata *> &EltTys) {
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+
+ // If there is a primary base then it will hold vtable info.
+ if (RL.getPrimaryBase())
+ return;
+
+ // If this class is not dynamic then there is not any vtable info to collect.
+ if (!RD->isDynamicClass())
+ return;
+
+ unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
+ llvm::DIType *VPTR = DBuilder.createMemberType(
+ Unit, getVTableName(RD), Unit, 0, Size, 0, 0,
+ llvm::DINode::FlagArtificial, getOrCreateVTablePtrType(Unit));
+ EltTys.push_back(VPTR);
+}
+
+llvm::DIType *CGDebugInfo::getOrCreateRecordType(QualType RTy,
+ SourceLocation Loc) {
+ assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
+ llvm::DIType *T = getOrCreateType(RTy, getOrCreateFile(Loc));
+ return T;
+}
+
+llvm::DIType *CGDebugInfo::getOrCreateInterfaceType(QualType D,
+ SourceLocation Loc) {
+ return getOrCreateStandaloneType(D, Loc);
+}
+
+llvm::DIType *CGDebugInfo::getOrCreateStandaloneType(QualType D,
+ SourceLocation Loc) {
+ assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
+ assert(!D.isNull() && "null type");
+ llvm::DIType *T = getOrCreateType(D, getOrCreateFile(Loc));
+ assert(T && "could not create debug info for type");
+
+ // Composite types with UIDs were already retained by DIBuilder
+ // because they are only referenced by name in the IR.
+ if (auto *CTy = dyn_cast<llvm::DICompositeType>(T))
+ if (!CTy->getIdentifier().empty())
+ return T;
+ RetainedTypes.push_back(D.getAsOpaquePtr());
+ return T;
+}
+
+void CGDebugInfo::completeType(const EnumDecl *ED) {
+ if (DebugKind <= CodeGenOptions::DebugLineTablesOnly)
+ return;
+ QualType Ty = CGM.getContext().getEnumType(ED);
+ void *TyPtr = Ty.getAsOpaquePtr();
+ auto I = TypeCache.find(TyPtr);
+ if (I == TypeCache.end() || !cast<llvm::DIType>(I->second)->isForwardDecl())
+ return;
+ llvm::DIType *Res = CreateTypeDefinition(Ty->castAs<EnumType>());
+ assert(!Res->isForwardDecl());
+ TypeCache[TyPtr].reset(Res);
+}
+
+void CGDebugInfo::completeType(const RecordDecl *RD) {
+ if (DebugKind > CodeGenOptions::LimitedDebugInfo ||
+ !CGM.getLangOpts().CPlusPlus)
+ completeRequiredType(RD);
+}
+
+void CGDebugInfo::completeRequiredType(const RecordDecl *RD) {
+ if (DebugKind <= CodeGenOptions::DebugLineTablesOnly)
+ return;
+
+ if (const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD))
+ if (CXXDecl->isDynamicClass())
+ return;
+
+ if (DebugTypeExtRefs && RD->isFromASTFile())
+ return;
+
+ QualType Ty = CGM.getContext().getRecordType(RD);
+ llvm::DIType *T = getTypeOrNull(Ty);
+ if (T && T->isForwardDecl())
+ completeClassData(RD);
+}
+
+void CGDebugInfo::completeClassData(const RecordDecl *RD) {
+ if (DebugKind <= CodeGenOptions::DebugLineTablesOnly)
+ return;
+ QualType Ty = CGM.getContext().getRecordType(RD);
+ void *TyPtr = Ty.getAsOpaquePtr();
+ auto I = TypeCache.find(TyPtr);
+ if (I != TypeCache.end() && !cast<llvm::DIType>(I->second)->isForwardDecl())
+ return;
+ llvm::DIType *Res = CreateTypeDefinition(Ty->castAs<RecordType>());
+ assert(!Res->isForwardDecl());
+ TypeCache[TyPtr].reset(Res);
+}
+
+static bool hasExplicitMemberDefinition(CXXRecordDecl::method_iterator I,
+ CXXRecordDecl::method_iterator End) {
+ for (; I != End; ++I)
+ if (FunctionDecl *Tmpl = I->getInstantiatedFromMemberFunction())
+ if (!Tmpl->isImplicit() && Tmpl->isThisDeclarationADefinition() &&
+ !I->getMemberSpecializationInfo()->isExplicitSpecialization())
+ return true;
+ return false;
+}
+
+static bool shouldOmitDefinition(CodeGenOptions::DebugInfoKind DebugKind,
+ bool DebugTypeExtRefs,
+ const RecordDecl *RD,
+ const LangOptions &LangOpts) {
+ // Does the type exist in an imported clang module?
+ if (DebugTypeExtRefs && RD->isFromASTFile() && RD->getDefinition())
+ return true;
+
+ if (DebugKind > CodeGenOptions::LimitedDebugInfo)
+ return false;
+
+ if (!LangOpts.CPlusPlus)
+ return false;
+
+ if (!RD->isCompleteDefinitionRequired())
+ return true;
+
+ const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
+
+ if (!CXXDecl)
+ return false;
+
+ if (CXXDecl->hasDefinition() && CXXDecl->isDynamicClass())
+ return true;
+
+ TemplateSpecializationKind Spec = TSK_Undeclared;
+ if (const ClassTemplateSpecializationDecl *SD =
+ dyn_cast<ClassTemplateSpecializationDecl>(RD))
+ Spec = SD->getSpecializationKind();
+
+ if (Spec == TSK_ExplicitInstantiationDeclaration &&
+ hasExplicitMemberDefinition(CXXDecl->method_begin(),
+ CXXDecl->method_end()))
+ return true;
+
+ return false;
+}
+
+llvm::DIType *CGDebugInfo::CreateType(const RecordType *Ty) {
+ RecordDecl *RD = Ty->getDecl();
+ llvm::DIType *T = cast_or_null<llvm::DIType>(getTypeOrNull(QualType(Ty, 0)));
+ if (T || shouldOmitDefinition(DebugKind, DebugTypeExtRefs, RD,
+ CGM.getLangOpts())) {
+ if (!T)
+ T = getOrCreateRecordFwdDecl(Ty, getDeclContextDescriptor(RD));
+ return T;
+ }
+
+ return CreateTypeDefinition(Ty);
+}
+
+llvm::DIType *CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
+ RecordDecl *RD = Ty->getDecl();
+
+ // Get overall information about the record type for the debug info.
+ llvm::DIFile *DefUnit = getOrCreateFile(RD->getLocation());
+
+ // Records and classes and unions can all be recursive. To handle them, we
+ // first generate a debug descriptor for the struct as a forward declaration.
+ // Then (if it is a definition) we go through and get debug info for all of
+ // its members. Finally, we create a descriptor for the complete type (which
+ // may refer to the forward decl if the struct is recursive) and replace all
+ // uses of the forward declaration with the final definition.
+ llvm::DICompositeType *FwdDecl = getOrCreateLimitedType(Ty, DefUnit);
+
+ const RecordDecl *D = RD->getDefinition();
+ if (!D || !D->isCompleteDefinition())
+ return FwdDecl;
+
+ if (const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD))
+ CollectContainingType(CXXDecl, FwdDecl);
+
+ // Push the struct on region stack.
+ LexicalBlockStack.emplace_back(&*FwdDecl);
+ RegionMap[Ty->getDecl()].reset(FwdDecl);
+
+ // Convert all the elements.
+ SmallVector<llvm::Metadata *, 16> EltTys;
+ // what about nested types?
+
+ // Note: The split of CXXDecl information here is intentional, the
+ // gdb tests will depend on a certain ordering at printout. The debug
+ // information offsets are still correct if we merge them all together
+ // though.
+ const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
+ if (CXXDecl) {
+ CollectCXXBases(CXXDecl, DefUnit, EltTys, FwdDecl);
+ CollectVTableInfo(CXXDecl, DefUnit, EltTys);
+ }
+
+ // Collect data fields (including static variables and any initializers).
+ CollectRecordFields(RD, DefUnit, EltTys, FwdDecl);
+ if (CXXDecl)
+ CollectCXXMemberFunctions(CXXDecl, DefUnit, EltTys, FwdDecl);
+
+ LexicalBlockStack.pop_back();
+ RegionMap.erase(Ty->getDecl());
+
+ llvm::DINodeArray Elements = DBuilder.getOrCreateArray(EltTys);
+ DBuilder.replaceArrays(FwdDecl, Elements);
+
+ if (FwdDecl->isTemporary())
+ FwdDecl =
+ llvm::MDNode::replaceWithPermanent(llvm::TempDICompositeType(FwdDecl));
+
+ RegionMap[Ty->getDecl()].reset(FwdDecl);
+ return FwdDecl;
+}
+
+llvm::DIType *CGDebugInfo::CreateType(const ObjCObjectType *Ty,
+ llvm::DIFile *Unit) {
+ // Ignore protocols.
+ return getOrCreateType(Ty->getBaseType(), Unit);
+}
+
+/// \return true if Getter has the default name for the property PD.
+static bool hasDefaultGetterName(const ObjCPropertyDecl *PD,
+ const ObjCMethodDecl *Getter) {
+ assert(PD);
+ if (!Getter)
+ return true;
+
+ assert(Getter->getDeclName().isObjCZeroArgSelector());
+ return PD->getName() ==
+ Getter->getDeclName().getObjCSelector().getNameForSlot(0);
+}
+
+/// \return true if Setter has the default name for the property PD.
+static bool hasDefaultSetterName(const ObjCPropertyDecl *PD,
+ const ObjCMethodDecl *Setter) {
+ assert(PD);
+ if (!Setter)
+ return true;
+
+ assert(Setter->getDeclName().isObjCOneArgSelector());
+ return SelectorTable::constructSetterName(PD->getName()) ==
+ Setter->getDeclName().getObjCSelector().getNameForSlot(0);
+}
+
+llvm::DIType *CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
+ llvm::DIFile *Unit) {
+ ObjCInterfaceDecl *ID = Ty->getDecl();
+ if (!ID)
+ return nullptr;
+
+ // Return a forward declaration if this type was imported from a clang module.
+ if (DebugTypeExtRefs && ID->isFromASTFile() && ID->getDefinition())
+ return DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ ID->getName(),
+ getDeclContextDescriptor(ID), Unit, 0);
+
+ // Get overall information about the record type for the debug info.
+ llvm::DIFile *DefUnit = getOrCreateFile(ID->getLocation());
+ unsigned Line = getLineNumber(ID->getLocation());
+ auto RuntimeLang =
+ static_cast<llvm::dwarf::SourceLanguage>(TheCU->getSourceLanguage());
+
+ // If this is just a forward declaration return a special forward-declaration
+ // debug type since we won't be able to lay out the entire type.
+ ObjCInterfaceDecl *Def = ID->getDefinition();
+ if (!Def || !Def->getImplementation()) {
+ llvm::DIScope *Mod = getParentModuleOrNull(ID);
+ llvm::DIType *FwdDecl = DBuilder.createReplaceableCompositeType(
+ llvm::dwarf::DW_TAG_structure_type, ID->getName(), Mod ? Mod : TheCU,
+ DefUnit, Line, RuntimeLang);
+ ObjCInterfaceCache.push_back(ObjCInterfaceCacheEntry(Ty, FwdDecl, Unit));
+ return FwdDecl;
+ }
+
+ return CreateTypeDefinition(Ty, Unit);
+}
+
+llvm::DIModule *
+CGDebugInfo::getOrCreateModuleRef(ExternalASTSource::ASTSourceDescriptor Mod,
+ bool CreateSkeletonCU) {
+ // Use the Module pointer as the key into the cache. This is a
+ // nullptr if the "Module" is a PCH, which is safe because we don't
+ // support chained PCH debug info, so there can only be a single PCH.
+ const Module *M = Mod.getModuleOrNull();
+ auto ModRef = ModuleCache.find(M);
+ if (ModRef != ModuleCache.end())
+ return cast<llvm::DIModule>(ModRef->second);
+
+ // Macro definitions that were defined with "-D" on the command line.
+ SmallString<128> ConfigMacros;
+ {
+ llvm::raw_svector_ostream OS(ConfigMacros);
+ const auto &PPOpts = CGM.getPreprocessorOpts();
+ unsigned I = 0;
+ // Translate the macro definitions back into a commmand line.
+ for (auto &M : PPOpts.Macros) {
+ if (++I > 1)
+ OS << " ";
+ const std::string &Macro = M.first;
+ bool Undef = M.second;
+ OS << "\"-" << (Undef ? 'U' : 'D');
+ for (char c : Macro)
+ switch (c) {
+ case '\\' : OS << "\\\\"; break;
+ case '"' : OS << "\\\""; break;
+ default: OS << c;
+ }
+ OS << '\"';
+ }
+ }
+
+ bool IsRootModule = M ? !M->Parent : true;
+ if (CreateSkeletonCU && IsRootModule) {
+ llvm::DIBuilder DIB(CGM.getModule());
+ DIB.createCompileUnit(TheCU->getSourceLanguage(), Mod.getModuleName(),
+ Mod.getPath(), TheCU->getProducer(), true,
+ StringRef(), 0, Mod.getASTFile(),
+ llvm::DIBuilder::FullDebug, Mod.getSignature());
+ DIB.finalize();
+ }
+ llvm::DIModule *Parent =
+ IsRootModule ? nullptr
+ : getOrCreateModuleRef(
+ ExternalASTSource::ASTSourceDescriptor(*M->Parent),
+ CreateSkeletonCU);
+ llvm::DIModule *DIMod =
+ DBuilder.createModule(Parent, Mod.getModuleName(), ConfigMacros,
+ Mod.getPath(), CGM.getHeaderSearchOpts().Sysroot);
+ ModuleCache[M].reset(DIMod);
+ return DIMod;
+}
+
+llvm::DIType *CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty,
+ llvm::DIFile *Unit) {
+ ObjCInterfaceDecl *ID = Ty->getDecl();
+ llvm::DIFile *DefUnit = getOrCreateFile(ID->getLocation());
+ unsigned Line = getLineNumber(ID->getLocation());
+ unsigned RuntimeLang = TheCU->getSourceLanguage();
+
+ // Bit size, align and offset of the type.
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+
+ unsigned Flags = 0;
+ if (ID->getImplementation())
+ Flags |= llvm::DINode::FlagObjcClassComplete;
+
+ llvm::DIScope *Mod = getParentModuleOrNull(ID);
+ llvm::DICompositeType *RealDecl = DBuilder.createStructType(
+ Mod ? Mod : Unit, ID->getName(), DefUnit, Line, Size, Align, Flags,
+ nullptr, llvm::DINodeArray(), RuntimeLang);
+
+ QualType QTy(Ty, 0);
+ TypeCache[QTy.getAsOpaquePtr()].reset(RealDecl);
+
+ // Push the struct on region stack.
+ LexicalBlockStack.emplace_back(RealDecl);
+ RegionMap[Ty->getDecl()].reset(RealDecl);
+
+ // Convert all the elements.
+ SmallVector<llvm::Metadata *, 16> EltTys;
+
+ ObjCInterfaceDecl *SClass = ID->getSuperClass();
+ if (SClass) {
+ llvm::DIType *SClassTy =
+ getOrCreateType(CGM.getContext().getObjCInterfaceType(SClass), Unit);
+ if (!SClassTy)
+ return nullptr;
+
+ llvm::DIType *InhTag = DBuilder.createInheritance(RealDecl, SClassTy, 0, 0);
+ EltTys.push_back(InhTag);
+ }
+
+ // Create entries for all of the properties.
+ auto AddProperty = [&](const ObjCPropertyDecl *PD) {
+ SourceLocation Loc = PD->getLocation();
+ llvm::DIFile *PUnit = getOrCreateFile(Loc);
+ unsigned PLine = getLineNumber(Loc);
+ ObjCMethodDecl *Getter = PD->getGetterMethodDecl();
+ ObjCMethodDecl *Setter = PD->getSetterMethodDecl();
+ llvm::MDNode *PropertyNode = DBuilder.createObjCProperty(
+ PD->getName(), PUnit, PLine,
+ hasDefaultGetterName(PD, Getter) ? ""
+ : getSelectorName(PD->getGetterName()),
+ hasDefaultSetterName(PD, Setter) ? ""
+ : getSelectorName(PD->getSetterName()),
+ PD->getPropertyAttributes(), getOrCreateType(PD->getType(), PUnit));
+ EltTys.push_back(PropertyNode);
+ };
+ {
+ llvm::SmallPtrSet<const IdentifierInfo*, 16> PropertySet;
+ for (const ObjCCategoryDecl *ClassExt : ID->known_extensions())
+ for (auto *PD : ClassExt->properties()) {
+ PropertySet.insert(PD->getIdentifier());
+ AddProperty(PD);
+ }
+ for (const auto *PD : ID->properties()) {
+ // Don't emit duplicate metadata for properties that were already in a
+ // class extension.
+ if (!PropertySet.insert(PD->getIdentifier()).second)
+ continue;
+ AddProperty(PD);
+ }
+ }
+
+ const ASTRecordLayout &RL = CGM.getContext().getASTObjCInterfaceLayout(ID);
+ unsigned FieldNo = 0;
+ for (ObjCIvarDecl *Field = ID->all_declared_ivar_begin(); Field;
+ Field = Field->getNextIvar(), ++FieldNo) {
+ llvm::DIType *FieldTy = getOrCreateType(Field->getType(), Unit);
+ if (!FieldTy)
+ return nullptr;
+
+ StringRef FieldName = Field->getName();
+
+ // Ignore unnamed fields.
+ if (FieldName.empty())
+ continue;
+
+ // Get the location for the field.
+ llvm::DIFile *FieldDefUnit = getOrCreateFile(Field->getLocation());
+ unsigned FieldLine = getLineNumber(Field->getLocation());
+ QualType FType = Field->getType();
+ uint64_t FieldSize = 0;
+ unsigned FieldAlign = 0;
+
+ if (!FType->isIncompleteArrayType()) {
+
+ // Bit size, align and offset of the type.
+ FieldSize = Field->isBitField()
+ ? Field->getBitWidthValue(CGM.getContext())
+ : CGM.getContext().getTypeSize(FType);
+ FieldAlign = CGM.getContext().getTypeAlign(FType);
+ }
+
+ uint64_t FieldOffset;
+ if (CGM.getLangOpts().ObjCRuntime.isNonFragile()) {
+ // We don't know the runtime offset of an ivar if we're using the
+ // non-fragile ABI. For bitfields, use the bit offset into the first
+ // byte of storage of the bitfield. For other fields, use zero.
+ if (Field->isBitField()) {
+ FieldOffset =
+ CGM.getObjCRuntime().ComputeBitfieldBitOffset(CGM, ID, Field);
+ FieldOffset %= CGM.getContext().getCharWidth();
+ } else {
+ FieldOffset = 0;
+ }
+ } else {
+ FieldOffset = RL.getFieldOffset(FieldNo);
+ }
+
+ unsigned Flags = 0;
+ if (Field->getAccessControl() == ObjCIvarDecl::Protected)
+ Flags = llvm::DINode::FlagProtected;
+ else if (Field->getAccessControl() == ObjCIvarDecl::Private)
+ Flags = llvm::DINode::FlagPrivate;
+ else if (Field->getAccessControl() == ObjCIvarDecl::Public)
+ Flags = llvm::DINode::FlagPublic;
+
+ llvm::MDNode *PropertyNode = nullptr;
+ if (ObjCImplementationDecl *ImpD = ID->getImplementation()) {
+ if (ObjCPropertyImplDecl *PImpD =
+ ImpD->FindPropertyImplIvarDecl(Field->getIdentifier())) {
+ if (ObjCPropertyDecl *PD = PImpD->getPropertyDecl()) {
+ SourceLocation Loc = PD->getLocation();
+ llvm::DIFile *PUnit = getOrCreateFile(Loc);
+ unsigned PLine = getLineNumber(Loc);
+ ObjCMethodDecl *Getter = PD->getGetterMethodDecl();
+ ObjCMethodDecl *Setter = PD->getSetterMethodDecl();
+ PropertyNode = DBuilder.createObjCProperty(
+ PD->getName(), PUnit, PLine,
+ hasDefaultGetterName(PD, Getter) ? "" : getSelectorName(
+ PD->getGetterName()),
+ hasDefaultSetterName(PD, Setter) ? "" : getSelectorName(
+ PD->getSetterName()),
+ PD->getPropertyAttributes(),
+ getOrCreateType(PD->getType(), PUnit));
+ }
+ }
+ }
+ FieldTy = DBuilder.createObjCIVar(FieldName, FieldDefUnit, FieldLine,
+ FieldSize, FieldAlign, FieldOffset, Flags,
+ FieldTy, PropertyNode);
+ EltTys.push_back(FieldTy);
+ }
+
+ llvm::DINodeArray Elements = DBuilder.getOrCreateArray(EltTys);
+ DBuilder.replaceArrays(RealDecl, Elements);
+
+ LexicalBlockStack.pop_back();
+ return RealDecl;
+}
+
+llvm::DIType *CGDebugInfo::CreateType(const VectorType *Ty,
+ llvm::DIFile *Unit) {
+ llvm::DIType *ElementTy = getOrCreateType(Ty->getElementType(), Unit);
+ int64_t Count = Ty->getNumElements();
+ if (Count == 0)
+ // If number of elements are not known then this is an unbounded array.
+ // Use Count == -1 to express such arrays.
+ Count = -1;
+
+ llvm::Metadata *Subscript = DBuilder.getOrCreateSubrange(0, Count);
+ llvm::DINodeArray SubscriptArray = DBuilder.getOrCreateArray(Subscript);
+
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+
+ return DBuilder.createVectorType(Size, Align, ElementTy, SubscriptArray);
+}
+
+llvm::DIType *CGDebugInfo::CreateType(const ArrayType *Ty, llvm::DIFile *Unit) {
+ uint64_t Size;
+ uint64_t Align;
+
+ // FIXME: make getTypeAlign() aware of VLAs and incomplete array types
+ if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(Ty)) {
+ Size = 0;
+ Align =
+ CGM.getContext().getTypeAlign(CGM.getContext().getBaseElementType(VAT));
+ } else if (Ty->isIncompleteArrayType()) {
+ Size = 0;
+ if (Ty->getElementType()->isIncompleteType())
+ Align = 0;
+ else
+ Align = CGM.getContext().getTypeAlign(Ty->getElementType());
+ } else if (Ty->isIncompleteType()) {
+ Size = 0;
+ Align = 0;
+ } else {
+ // Size and align of the whole array, not the element type.
+ Size = CGM.getContext().getTypeSize(Ty);
+ Align = CGM.getContext().getTypeAlign(Ty);
+ }
+
+ // Add the dimensions of the array. FIXME: This loses CV qualifiers from
+ // interior arrays, do we care? Why aren't nested arrays represented the
+ // obvious/recursive way?
+ SmallVector<llvm::Metadata *, 8> Subscripts;
+ QualType EltTy(Ty, 0);
+ while ((Ty = dyn_cast<ArrayType>(EltTy))) {
+ // If the number of elements is known, then count is that number. Otherwise,
+ // it's -1. This allows us to represent a subrange with an array of 0
+ // elements, like this:
+ //
+ // struct foo {
+ // int x[0];
+ // };
+ int64_t Count = -1; // Count == -1 is an unbounded array.
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty))
+ Count = CAT->getSize().getZExtValue();
+
+ // FIXME: Verify this is right for VLAs.
+ Subscripts.push_back(DBuilder.getOrCreateSubrange(0, Count));
+ EltTy = Ty->getElementType();
+ }
+
+ llvm::DINodeArray SubscriptArray = DBuilder.getOrCreateArray(Subscripts);
+
+ return DBuilder.createArrayType(Size, Align, getOrCreateType(EltTy, Unit),
+ SubscriptArray);
+}
+
+llvm::DIType *CGDebugInfo::CreateType(const LValueReferenceType *Ty,
+ llvm::DIFile *Unit) {
+ return CreatePointerLikeType(llvm::dwarf::DW_TAG_reference_type, Ty,
+ Ty->getPointeeType(), Unit);
+}
+
+llvm::DIType *CGDebugInfo::CreateType(const RValueReferenceType *Ty,
+ llvm::DIFile *Unit) {
+ return CreatePointerLikeType(llvm::dwarf::DW_TAG_rvalue_reference_type, Ty,
+ Ty->getPointeeType(), Unit);
+}
+
+llvm::DIType *CGDebugInfo::CreateType(const MemberPointerType *Ty,
+ llvm::DIFile *U) {
+ uint64_t Size =
+ !Ty->isIncompleteType() ? CGM.getContext().getTypeSize(Ty) : 0;
+ llvm::DIType *ClassType = getOrCreateType(QualType(Ty->getClass(), 0), U);
+ if (Ty->isMemberDataPointerType())
+ return DBuilder.createMemberPointerType(
+ getOrCreateType(Ty->getPointeeType(), U), ClassType, Size);
+
+ const FunctionProtoType *FPT =
+ Ty->getPointeeType()->getAs<FunctionProtoType>();
+ return DBuilder.createMemberPointerType(
+ getOrCreateInstanceMethodType(CGM.getContext().getPointerType(QualType(
+ Ty->getClass(), FPT->getTypeQuals())),
+ FPT, U),
+ ClassType, Size);
+}
+
+llvm::DIType *CGDebugInfo::CreateType(const AtomicType *Ty, llvm::DIFile *U) {
+ // Ignore the atomic wrapping
+ // FIXME: What is the correct representation?
+ return getOrCreateType(Ty->getValueType(), U);
+}
+
+llvm::DIType *CGDebugInfo::CreateEnumType(const EnumType *Ty) {
+ const EnumDecl *ED = Ty->getDecl();
+
+ uint64_t Size = 0;
+ uint64_t Align = 0;
+ if (!ED->getTypeForDecl()->isIncompleteType()) {
+ Size = CGM.getContext().getTypeSize(ED->getTypeForDecl());
+ Align = CGM.getContext().getTypeAlign(ED->getTypeForDecl());
+ }
+
+ SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU);
+
+ bool isImportedFromModule =
+ DebugTypeExtRefs && ED->isFromASTFile() && ED->getDefinition();
+
+ // If this is just a forward declaration, construct an appropriately
+ // marked node and just return it.
+ if (isImportedFromModule || !ED->getDefinition()) {
+ llvm::DIScope *EDContext = getDeclContextDescriptor(ED);
+ llvm::DIFile *DefUnit = getOrCreateFile(ED->getLocation());
+ unsigned Line = getLineNumber(ED->getLocation());
+ StringRef EDName = ED->getName();
+ llvm::DIType *RetTy = DBuilder.createReplaceableCompositeType(
+ llvm::dwarf::DW_TAG_enumeration_type, EDName, EDContext, DefUnit, Line,
+ 0, Size, Align, llvm::DINode::FlagFwdDecl, FullName);
+ ReplaceMap.emplace_back(
+ std::piecewise_construct, std::make_tuple(Ty),
+ std::make_tuple(static_cast<llvm::Metadata *>(RetTy)));
+ return RetTy;
+ }
+
+ return CreateTypeDefinition(Ty);
+}
+
+llvm::DIType *CGDebugInfo::CreateTypeDefinition(const EnumType *Ty) {
+ const EnumDecl *ED = Ty->getDecl();
+ uint64_t Size = 0;
+ uint64_t Align = 0;
+ if (!ED->getTypeForDecl()->isIncompleteType()) {
+ Size = CGM.getContext().getTypeSize(ED->getTypeForDecl());
+ Align = CGM.getContext().getTypeAlign(ED->getTypeForDecl());
+ }
+
+ SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU);
+
+ // Create elements for each enumerator.
+ SmallVector<llvm::Metadata *, 16> Enumerators;
+ ED = ED->getDefinition();
+ for (const auto *Enum : ED->enumerators()) {
+ Enumerators.push_back(DBuilder.createEnumerator(
+ Enum->getName(), Enum->getInitVal().getSExtValue()));
+ }
+
+ // Return a CompositeType for the enum itself.
+ llvm::DINodeArray EltArray = DBuilder.getOrCreateArray(Enumerators);
+
+ llvm::DIFile *DefUnit = getOrCreateFile(ED->getLocation());
+ unsigned Line = getLineNumber(ED->getLocation());
+ llvm::DIScope *EnumContext = getDeclContextDescriptor(ED);
+ llvm::DIType *ClassTy =
+ ED->isFixed() ? getOrCreateType(ED->getIntegerType(), DefUnit) : nullptr;
+ return DBuilder.createEnumerationType(EnumContext, ED->getName(), DefUnit,
+ Line, Size, Align, EltArray, ClassTy,
+ FullName);
+}
+
+static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) {
+ Qualifiers Quals;
+ do {
+ Qualifiers InnerQuals = T.getLocalQualifiers();
+ // Qualifiers::operator+() doesn't like it if you add a Qualifier
+ // that is already there.
+ Quals += Qualifiers::removeCommonQualifiers(Quals, InnerQuals);
+ Quals += InnerQuals;
+ QualType LastT = T;
+ switch (T->getTypeClass()) {
+ default:
+ return C.getQualifiedType(T.getTypePtr(), Quals);
+ case Type::TemplateSpecialization: {
+ const auto *Spec = cast<TemplateSpecializationType>(T);
+ if (Spec->isTypeAlias())
+ return C.getQualifiedType(T.getTypePtr(), Quals);
+ T = Spec->desugar();
+ break;
+ }
+ case Type::TypeOfExpr:
+ T = cast<TypeOfExprType>(T)->getUnderlyingExpr()->getType();
+ break;
+ case Type::TypeOf:
+ T = cast<TypeOfType>(T)->getUnderlyingType();
+ break;
+ case Type::Decltype:
+ T = cast<DecltypeType>(T)->getUnderlyingType();
+ break;
+ case Type::UnaryTransform:
+ T = cast<UnaryTransformType>(T)->getUnderlyingType();
+ break;
+ case Type::Attributed:
+ T = cast<AttributedType>(T)->getEquivalentType();
+ break;
+ case Type::Elaborated:
+ T = cast<ElaboratedType>(T)->getNamedType();
+ break;
+ case Type::Paren:
+ T = cast<ParenType>(T)->getInnerType();
+ break;
+ case Type::SubstTemplateTypeParm:
+ T = cast<SubstTemplateTypeParmType>(T)->getReplacementType();
+ break;
+ case Type::Auto:
+ QualType DT = cast<AutoType>(T)->getDeducedType();
+ assert(!DT.isNull() && "Undeduced types shouldn't reach here.");
+ T = DT;
+ break;
+ }
+
+ assert(T != LastT && "Type unwrapping failed to unwrap!");
+ (void)LastT;
+ } while (true);
+}
+
+llvm::DIType *CGDebugInfo::getTypeOrNull(QualType Ty) {
+
+ // Unwrap the type as needed for debug information.
+ Ty = UnwrapTypeForDebugInfo(Ty, CGM.getContext());
+
+ auto it = TypeCache.find(Ty.getAsOpaquePtr());
+ if (it != TypeCache.end()) {
+ // Verify that the debug info still exists.
+ if (llvm::Metadata *V = it->second)
+ return cast<llvm::DIType>(V);
+ }
+
+ return nullptr;
+}
+
+void CGDebugInfo::completeTemplateDefinition(
+ const ClassTemplateSpecializationDecl &SD) {
+ if (DebugKind <= CodeGenOptions::DebugLineTablesOnly)
+ return;
+
+ completeClassData(&SD);
+ // In case this type has no member function definitions being emitted, ensure
+ // it is retained
+ RetainedTypes.push_back(CGM.getContext().getRecordType(&SD).getAsOpaquePtr());
+}
+
+llvm::DIType *CGDebugInfo::getOrCreateType(QualType Ty, llvm::DIFile *Unit) {
+ if (Ty.isNull())
+ return nullptr;
+
+ // Unwrap the type as needed for debug information.
+ Ty = UnwrapTypeForDebugInfo(Ty, CGM.getContext());
+
+ if (auto *T = getTypeOrNull(Ty))
+ return T;
+
+ llvm::DIType *Res = CreateTypeNode(Ty, Unit);
+ void* TyPtr = Ty.getAsOpaquePtr();
+
+ // And update the type cache.
+ TypeCache[TyPtr].reset(Res);
+
+ return Res;
+}
+
+llvm::DIModule *CGDebugInfo::getParentModuleOrNull(const Decl *D) {
+ // A forward declaration inside a module header does not belong to the module.
+ if (isa<RecordDecl>(D) && !cast<RecordDecl>(D)->getDefinition())
+ return nullptr;
+ if (DebugTypeExtRefs && D->isFromASTFile()) {
+ // Record a reference to an imported clang module or precompiled header.
+ auto *Reader = CGM.getContext().getExternalSource();
+ auto Idx = D->getOwningModuleID();
+ auto Info = Reader->getSourceDescriptor(Idx);
+ if (Info)
+ return getOrCreateModuleRef(*Info, /*SkeletonCU=*/true);
+ } else if (ClangModuleMap) {
+ // We are building a clang module or a precompiled header.
+ //
+ // TODO: When D is a CXXRecordDecl or a C++ Enum, the ODR applies
+ // and it wouldn't be necessary to specify the parent scope
+ // because the type is already unique by definition (it would look
+ // like the output of -fno-standalone-debug). On the other hand,
+ // the parent scope helps a consumer to quickly locate the object
+ // file where the type's definition is located, so it might be
+ // best to make this behavior a command line or debugger tuning
+ // option.
+ FullSourceLoc Loc(D->getLocation(), CGM.getContext().getSourceManager());
+ if (Module *M = ClangModuleMap->inferModuleFromLocation(Loc)) {
+ auto Info = ExternalASTSource::ASTSourceDescriptor(*M);
+ return getOrCreateModuleRef(Info, /*SkeletonCU=*/false);
+ }
+ }
+
+ return nullptr;
+}
+
+llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) {
+ // Handle qualifiers, which recursively handles what they refer to.
+ if (Ty.hasLocalQualifiers())
+ return CreateQualifiedType(Ty, Unit);
+
+ // Work out details of type.
+ switch (Ty->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("Dependent types cannot show up in debug information");
+
+ case Type::ExtVector:
+ case Type::Vector:
+ return CreateType(cast<VectorType>(Ty), Unit);
+ case Type::ObjCObjectPointer:
+ return CreateType(cast<ObjCObjectPointerType>(Ty), Unit);
+ case Type::ObjCObject:
+ return CreateType(cast<ObjCObjectType>(Ty), Unit);
+ case Type::ObjCInterface:
+ return CreateType(cast<ObjCInterfaceType>(Ty), Unit);
+ case Type::Builtin:
+ return CreateType(cast<BuiltinType>(Ty));
+ case Type::Complex:
+ return CreateType(cast<ComplexType>(Ty));
+ case Type::Pointer:
+ return CreateType(cast<PointerType>(Ty), Unit);
+ case Type::Adjusted:
+ case Type::Decayed:
+ // Decayed and adjusted types use the adjusted type in LLVM and DWARF.
+ return CreateType(
+ cast<PointerType>(cast<AdjustedType>(Ty)->getAdjustedType()), Unit);
+ case Type::BlockPointer:
+ return CreateType(cast<BlockPointerType>(Ty), Unit);
+ case Type::Typedef:
+ return CreateType(cast<TypedefType>(Ty), Unit);
+ case Type::Record:
+ return CreateType(cast<RecordType>(Ty));
+ case Type::Enum:
+ return CreateEnumType(cast<EnumType>(Ty));
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ return CreateType(cast<FunctionType>(Ty), Unit);
+ case Type::ConstantArray:
+ case Type::VariableArray:
+ case Type::IncompleteArray:
+ return CreateType(cast<ArrayType>(Ty), Unit);
+
+ case Type::LValueReference:
+ return CreateType(cast<LValueReferenceType>(Ty), Unit);
+ case Type::RValueReference:
+ return CreateType(cast<RValueReferenceType>(Ty), Unit);
+
+ case Type::MemberPointer:
+ return CreateType(cast<MemberPointerType>(Ty), Unit);
+
+ case Type::Atomic:
+ return CreateType(cast<AtomicType>(Ty), Unit);
+
+ case Type::TemplateSpecialization:
+ return CreateType(cast<TemplateSpecializationType>(Ty), Unit);
+
+ case Type::Auto:
+ case Type::Attributed:
+ case Type::Elaborated:
+ case Type::Paren:
+ case Type::SubstTemplateTypeParm:
+ case Type::TypeOfExpr:
+ case Type::TypeOf:
+ case Type::Decltype:
+ case Type::UnaryTransform:
+ case Type::PackExpansion:
+ break;
+ }
+
+ llvm_unreachable("type should have been unwrapped!");
+}
+
+llvm::DICompositeType *CGDebugInfo::getOrCreateLimitedType(const RecordType *Ty,
+ llvm::DIFile *Unit) {
+ QualType QTy(Ty, 0);
+
+ auto *T = cast_or_null<llvm::DICompositeType>(getTypeOrNull(QTy));
+
+ // We may have cached a forward decl when we could have created
+ // a non-forward decl. Go ahead and create a non-forward decl
+ // now.
+ if (T && !T->isForwardDecl())
+ return T;
+
+ // Otherwise create the type.
+ llvm::DICompositeType *Res = CreateLimitedType(Ty);
+
+ // Propagate members from the declaration to the definition
+ // CreateType(const RecordType*) will overwrite this with the members in the
+ // correct order if the full type is needed.
+ DBuilder.replaceArrays(Res, T ? T->getElements() : llvm::DINodeArray());
+
+ // And update the type cache.
+ TypeCache[QTy.getAsOpaquePtr()].reset(Res);
+ return Res;
+}
+
+// TODO: Currently used for context chains when limiting debug info.
+llvm::DICompositeType *CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
+ RecordDecl *RD = Ty->getDecl();
+
+ // Get overall information about the record type for the debug info.
+ llvm::DIFile *DefUnit = getOrCreateFile(RD->getLocation());
+ unsigned Line = getLineNumber(RD->getLocation());
+ StringRef RDName = getClassName(RD);
+
+ llvm::DIScope *RDContext = getDeclContextDescriptor(RD);
+
+ // If we ended up creating the type during the context chain construction,
+ // just return that.
+ auto *T = cast_or_null<llvm::DICompositeType>(
+ getTypeOrNull(CGM.getContext().getRecordType(RD)));
+ if (T && (!T->isForwardDecl() || !RD->getDefinition()))
+ return T;
+
+ // If this is just a forward or incomplete declaration, construct an
+ // appropriately marked node and just return it.
+ const RecordDecl *D = RD->getDefinition();
+ if (!D || !D->isCompleteDefinition())
+ return getOrCreateRecordFwdDecl(Ty, RDContext);
+
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+
+ SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU);
+
+ llvm::DICompositeType *RealDecl = DBuilder.createReplaceableCompositeType(
+ getTagForRecord(RD), RDName, RDContext, DefUnit, Line, 0, Size, Align, 0,
+ FullName);
+
+ RegionMap[Ty->getDecl()].reset(RealDecl);
+ TypeCache[QualType(Ty, 0).getAsOpaquePtr()].reset(RealDecl);
+
+ if (const ClassTemplateSpecializationDecl *TSpecial =
+ dyn_cast<ClassTemplateSpecializationDecl>(RD))
+ DBuilder.replaceArrays(RealDecl, llvm::DINodeArray(),
+ CollectCXXTemplateParams(TSpecial, DefUnit));
+ return RealDecl;
+}
+
+void CGDebugInfo::CollectContainingType(const CXXRecordDecl *RD,
+ llvm::DICompositeType *RealDecl) {
+ // A class's primary base or the class itself contains the vtable.
+ llvm::DICompositeType *ContainingType = nullptr;
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+ if (const CXXRecordDecl *PBase = RL.getPrimaryBase()) {
+ // Seek non-virtual primary base root.
+ while (1) {
+ const ASTRecordLayout &BRL = CGM.getContext().getASTRecordLayout(PBase);
+ const CXXRecordDecl *PBT = BRL.getPrimaryBase();
+ if (PBT && !BRL.isPrimaryBaseVirtual())
+ PBase = PBT;
+ else
+ break;
+ }
+ ContainingType = cast<llvm::DICompositeType>(
+ getOrCreateType(QualType(PBase->getTypeForDecl(), 0),
+ getOrCreateFile(RD->getLocation())));
+ } else if (RD->isDynamicClass())
+ ContainingType = RealDecl;
+
+ DBuilder.replaceVTableHolder(RealDecl, ContainingType);
+}
+
+llvm::DIType *CGDebugInfo::CreateMemberType(llvm::DIFile *Unit, QualType FType,
+ StringRef Name, uint64_t *Offset) {
+ llvm::DIType *FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ uint64_t FieldSize = CGM.getContext().getTypeSize(FType);
+ unsigned FieldAlign = CGM.getContext().getTypeAlign(FType);
+ llvm::DIType *Ty = DBuilder.createMemberType(Unit, Name, Unit, 0, FieldSize,
+ FieldAlign, *Offset, 0, FieldTy);
+ *Offset += FieldSize;
+ return Ty;
+}
+
+void CGDebugInfo::collectFunctionDeclProps(GlobalDecl GD, llvm::DIFile *Unit,
+ StringRef &Name,
+ StringRef &LinkageName,
+ llvm::DIScope *&FDContext,
+ llvm::DINodeArray &TParamsArray,
+ unsigned &Flags) {
+ const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
+ Name = getFunctionName(FD);
+ // Use mangled name as linkage name for C/C++ functions.
+ if (FD->hasPrototype()) {
+ LinkageName = CGM.getMangledName(GD);
+ Flags |= llvm::DINode::FlagPrototyped;
+ }
+ // No need to replicate the linkage name if it isn't different from the
+ // subprogram name, no need to have it at all unless coverage is enabled or
+ // debug is set to more than just line tables.
+ if (LinkageName == Name ||
+ (!CGM.getCodeGenOpts().EmitGcovArcs &&
+ !CGM.getCodeGenOpts().EmitGcovNotes &&
+ DebugKind <= CodeGenOptions::DebugLineTablesOnly))
+ LinkageName = StringRef();
+
+ if (DebugKind >= CodeGenOptions::LimitedDebugInfo) {
+ if (const NamespaceDecl *NSDecl =
+ dyn_cast_or_null<NamespaceDecl>(FD->getDeclContext()))
+ FDContext = getOrCreateNameSpace(NSDecl);
+ else if (const RecordDecl *RDecl =
+ dyn_cast_or_null<RecordDecl>(FD->getDeclContext())) {
+ llvm::DIScope *Mod = getParentModuleOrNull(RDecl);
+ FDContext = getContextDescriptor(RDecl, Mod ? Mod : TheCU);
+ }
+ // Collect template parameters.
+ TParamsArray = CollectFunctionTemplateParams(FD, Unit);
+ }
+}
+
+void CGDebugInfo::collectVarDeclProps(const VarDecl *VD, llvm::DIFile *&Unit,
+ unsigned &LineNo, QualType &T,
+ StringRef &Name, StringRef &LinkageName,
+ llvm::DIScope *&VDContext) {
+ Unit = getOrCreateFile(VD->getLocation());
+ LineNo = getLineNumber(VD->getLocation());
+
+ setLocation(VD->getLocation());
+
+ T = VD->getType();
+ if (T->isIncompleteArrayType()) {
+ // CodeGen turns int[] into int[1] so we'll do the same here.
+ llvm::APInt ConstVal(32, 1);
+ QualType ET = CGM.getContext().getAsArrayType(T)->getElementType();
+
+ T = CGM.getContext().getConstantArrayType(ET, ConstVal,
+ ArrayType::Normal, 0);
+ }
+
+ Name = VD->getName();
+ if (VD->getDeclContext() && !isa<FunctionDecl>(VD->getDeclContext()) &&
+ !isa<ObjCMethodDecl>(VD->getDeclContext()))
+ LinkageName = CGM.getMangledName(VD);
+ if (LinkageName == Name)
+ LinkageName = StringRef();
+
+ // Since we emit declarations (DW_AT_members) for static members, place the
+ // definition of those static members in the namespace they were declared in
+ // in the source code (the lexical decl context).
+ // FIXME: Generalize this for even non-member global variables where the
+ // declaration and definition may have different lexical decl contexts, once
+ // we have support for emitting declarations of (non-member) global variables.
+ const DeclContext *DC = VD->isStaticDataMember() ? VD->getLexicalDeclContext()
+ : VD->getDeclContext();
+ // When a record type contains an in-line initialization of a static data
+ // member, and the record type is marked as __declspec(dllexport), an implicit
+ // definition of the member will be created in the record context. DWARF
+ // doesn't seem to have a nice way to describe this in a form that consumers
+ // are likely to understand, so fake the "normal" situation of a definition
+ // outside the class by putting it in the global scope.
+ if (DC->isRecord())
+ DC = CGM.getContext().getTranslationUnitDecl();
+
+ llvm::DIScope *Mod = getParentModuleOrNull(VD);
+ VDContext = getContextDescriptor(cast<Decl>(DC), Mod ? Mod : TheCU);
+}
+
+llvm::DISubprogram *
+CGDebugInfo::getFunctionForwardDeclaration(const FunctionDecl *FD) {
+ llvm::DINodeArray TParamsArray;
+ StringRef Name, LinkageName;
+ unsigned Flags = 0;
+ SourceLocation Loc = FD->getLocation();
+ llvm::DIFile *Unit = getOrCreateFile(Loc);
+ llvm::DIScope *DContext = Unit;
+ unsigned Line = getLineNumber(Loc);
+
+ collectFunctionDeclProps(FD, Unit, Name, LinkageName, DContext,
+ TParamsArray, Flags);
+ // Build function type.
+ SmallVector<QualType, 16> ArgTypes;
+ for (const ParmVarDecl *Parm: FD->parameters())
+ ArgTypes.push_back(Parm->getType());
+ QualType FnType =
+ CGM.getContext().getFunctionType(FD->getReturnType(), ArgTypes,
+ FunctionProtoType::ExtProtoInfo());
+ llvm::DISubprogram *SP = DBuilder.createTempFunctionFwdDecl(
+ DContext, Name, LinkageName, Unit, Line,
+ getOrCreateFunctionType(FD, FnType, Unit), !FD->isExternallyVisible(),
+ /* isDefinition = */ false, 0, Flags, CGM.getLangOpts().Optimize,
+ TParamsArray.get(), getFunctionDeclaration(FD));
+ const FunctionDecl *CanonDecl = cast<FunctionDecl>(FD->getCanonicalDecl());
+ FwdDeclReplaceMap.emplace_back(std::piecewise_construct,
+ std::make_tuple(CanonDecl),
+ std::make_tuple(SP));
+ return SP;
+}
+
+llvm::DIGlobalVariable *
+CGDebugInfo::getGlobalVariableForwardDeclaration(const VarDecl *VD) {
+ QualType T;
+ StringRef Name, LinkageName;
+ SourceLocation Loc = VD->getLocation();
+ llvm::DIFile *Unit = getOrCreateFile(Loc);
+ llvm::DIScope *DContext = Unit;
+ unsigned Line = getLineNumber(Loc);
+
+ collectVarDeclProps(VD, Unit, Line, T, Name, LinkageName, DContext);
+ auto *GV = DBuilder.createTempGlobalVariableFwdDecl(
+ DContext, Name, LinkageName, Unit, Line, getOrCreateType(T, Unit),
+ !VD->isExternallyVisible(), nullptr, nullptr);
+ FwdDeclReplaceMap.emplace_back(
+ std::piecewise_construct,
+ std::make_tuple(cast<VarDecl>(VD->getCanonicalDecl())),
+ std::make_tuple(static_cast<llvm::Metadata *>(GV)));
+ return GV;
+}
+
+llvm::DINode *CGDebugInfo::getDeclarationOrDefinition(const Decl *D) {
+ // We only need a declaration (not a definition) of the type - so use whatever
+ // we would otherwise do to get a type for a pointee. (forward declarations in
+ // limited debug info, full definitions (if the type definition is available)
+ // in unlimited debug info)
+ if (const TypeDecl *TD = dyn_cast<TypeDecl>(D))
+ return getOrCreateType(CGM.getContext().getTypeDeclType(TD),
+ getOrCreateFile(TD->getLocation()));
+ auto I = DeclCache.find(D->getCanonicalDecl());
+
+ if (I != DeclCache.end())
+ return dyn_cast_or_null<llvm::DINode>(I->second);
+
+ // No definition for now. Emit a forward definition that might be
+ // merged with a potential upcoming definition.
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
+ return getFunctionForwardDeclaration(FD);
+ else if (const auto *VD = dyn_cast<VarDecl>(D))
+ return getGlobalVariableForwardDeclaration(VD);
+
+ return nullptr;
+}
+
+llvm::DISubprogram *CGDebugInfo::getFunctionDeclaration(const Decl *D) {
+ if (!D || DebugKind <= CodeGenOptions::DebugLineTablesOnly)
+ return nullptr;
+
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (!FD)
+ return nullptr;
+
+ // Setup context.
+ auto *S = getDeclContextDescriptor(D);
+
+ auto MI = SPCache.find(FD->getCanonicalDecl());
+ if (MI == SPCache.end()) {
+ if (const CXXMethodDecl *MD =
+ dyn_cast<CXXMethodDecl>(FD->getCanonicalDecl())) {
+ return CreateCXXMemberFunction(MD, getOrCreateFile(MD->getLocation()),
+ cast<llvm::DICompositeType>(S));
+ }
+ }
+ if (MI != SPCache.end()) {
+ auto *SP = dyn_cast_or_null<llvm::DISubprogram>(MI->second);
+ if (SP && !SP->isDefinition())
+ return SP;
+ }
+
+ for (auto NextFD : FD->redecls()) {
+ auto MI = SPCache.find(NextFD->getCanonicalDecl());
+ if (MI != SPCache.end()) {
+ auto *SP = dyn_cast_or_null<llvm::DISubprogram>(MI->second);
+ if (SP && !SP->isDefinition())
+ return SP;
+ }
+ }
+ return nullptr;
+}
+
+// getOrCreateFunctionType - Construct type. If it is a c++ method, include
+// implicit parameter "this".
+llvm::DISubroutineType *CGDebugInfo::getOrCreateFunctionType(const Decl *D,
+ QualType FnType,
+ llvm::DIFile *F) {
+ if (!D || DebugKind <= CodeGenOptions::DebugLineTablesOnly)
+ // Create fake but valid subroutine type. Otherwise -verify would fail, and
+ // subprogram DIE will miss DW_AT_decl_file and DW_AT_decl_line fields.
+ return DBuilder.createSubroutineType(DBuilder.getOrCreateTypeArray(None));
+
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
+ return getOrCreateMethodType(Method, F);
+ if (const ObjCMethodDecl *OMethod = dyn_cast<ObjCMethodDecl>(D)) {
+ // Add "self" and "_cmd"
+ SmallVector<llvm::Metadata *, 16> Elts;
+
+ // First element is always return type. For 'void' functions it is NULL.
+ QualType ResultTy = OMethod->getReturnType();
+
+ // Replace the instancetype keyword with the actual type.
+ if (ResultTy == CGM.getContext().getObjCInstanceType())
+ ResultTy = CGM.getContext().getPointerType(
+ QualType(OMethod->getClassInterface()->getTypeForDecl(), 0));
+
+ Elts.push_back(getOrCreateType(ResultTy, F));
+ // "self" pointer is always first argument.
+ QualType SelfDeclTy;
+ if (auto *SelfDecl = OMethod->getSelfDecl())
+ SelfDeclTy = SelfDecl->getType();
+ else if (auto *FPT = dyn_cast<FunctionProtoType>(FnType))
+ if (FPT->getNumParams() > 1)
+ SelfDeclTy = FPT->getParamType(0);
+ if (!SelfDeclTy.isNull())
+ Elts.push_back(CreateSelfType(SelfDeclTy, getOrCreateType(SelfDeclTy, F)));
+ // "_cmd" pointer is always second argument.
+ Elts.push_back(DBuilder.createArtificialType(
+ getOrCreateType(CGM.getContext().getObjCSelType(), F)));
+ // Get rest of the arguments.
+ for (const auto *PI : OMethod->params())
+ Elts.push_back(getOrCreateType(PI->getType(), F));
+ // Variadic methods need a special marker at the end of the type list.
+ if (OMethod->isVariadic())
+ Elts.push_back(DBuilder.createUnspecifiedParameter());
+
+ llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(Elts);
+ return DBuilder.createSubroutineType(EltTypeArray);
+ }
+
+ // Handle variadic function types; they need an additional
+ // unspecified parameter.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ if (FD->isVariadic()) {
+ SmallVector<llvm::Metadata *, 16> EltTys;
+ EltTys.push_back(getOrCreateType(FD->getReturnType(), F));
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FnType))
+ for (unsigned i = 0, e = FPT->getNumParams(); i != e; ++i)
+ EltTys.push_back(getOrCreateType(FPT->getParamType(i), F));
+ EltTys.push_back(DBuilder.createUnspecifiedParameter());
+ llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(EltTys);
+ return DBuilder.createSubroutineType(EltTypeArray);
+ }
+
+ return cast<llvm::DISubroutineType>(getOrCreateType(FnType, F));
+}
+
+void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
+ SourceLocation ScopeLoc, QualType FnType,
+ llvm::Function *Fn, CGBuilderTy &Builder) {
+
+ StringRef Name;
+ StringRef LinkageName;
+
+ FnBeginRegionCount.push_back(LexicalBlockStack.size());
+
+ const Decl *D = GD.getDecl();
+ bool HasDecl = (D != nullptr);
+
+ unsigned Flags = 0;
+ llvm::DIFile *Unit = getOrCreateFile(Loc);
+ llvm::DIScope *FDContext = Unit;
+ llvm::DINodeArray TParamsArray;
+ if (!HasDecl) {
+ // Use llvm function name.
+ LinkageName = Fn->getName();
+ } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // If there is a subprogram for this function available then use it.
+ auto FI = SPCache.find(FD->getCanonicalDecl());
+ if (FI != SPCache.end()) {
+ auto *SP = dyn_cast_or_null<llvm::DISubprogram>(FI->second);
+ if (SP && SP->isDefinition()) {
+ LexicalBlockStack.emplace_back(SP);
+ RegionMap[D].reset(SP);
+ return;
+ }
+ }
+ collectFunctionDeclProps(GD, Unit, Name, LinkageName, FDContext,
+ TParamsArray, Flags);
+ } else if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D)) {
+ Name = getObjCMethodName(OMD);
+ Flags |= llvm::DINode::FlagPrototyped;
+ } else {
+ // Use llvm function name.
+ Name = Fn->getName();
+ Flags |= llvm::DINode::FlagPrototyped;
+ }
+ if (!Name.empty() && Name[0] == '\01')
+ Name = Name.substr(1);
+
+ if (!HasDecl || D->isImplicit()) {
+ Flags |= llvm::DINode::FlagArtificial;
+ // Artificial functions without a location should not silently reuse CurLoc.
+ if (Loc.isInvalid())
+ CurLoc = SourceLocation();
+ }
+ unsigned LineNo = getLineNumber(Loc);
+ unsigned ScopeLine = getLineNumber(ScopeLoc);
+
+ // FIXME: The function declaration we're constructing here is mostly reusing
+ // declarations from CXXMethodDecl and not constructing new ones for arbitrary
+ // FunctionDecls. When/if we fix this we can have FDContext be TheCU/null for
+ // all subprograms instead of the actual context since subprogram definitions
+ // are emitted as CU level entities by the backend.
+ llvm::DISubprogram *SP = DBuilder.createFunction(
+ FDContext, Name, LinkageName, Unit, LineNo,
+ getOrCreateFunctionType(D, FnType, Unit), Fn->hasInternalLinkage(),
+ true /*definition*/, ScopeLine, Flags, CGM.getLangOpts().Optimize,
+ TParamsArray.get(), getFunctionDeclaration(D));
+ Fn->setSubprogram(SP);
+ // We might get here with a VarDecl in the case we're generating
+ // code for the initialization of globals. Do not record these decls
+ // as they will overwrite the actual VarDecl Decl in the cache.
+ if (HasDecl && isa<FunctionDecl>(D))
+ DeclCache[D->getCanonicalDecl()].reset(static_cast<llvm::Metadata *>(SP));
+
+ // Push the function onto the lexical block stack.
+ LexicalBlockStack.emplace_back(SP);
+
+ if (HasDecl)
+ RegionMap[D].reset(SP);
+}
+
+void CGDebugInfo::EmitFunctionDecl(GlobalDecl GD, SourceLocation Loc,
+ QualType FnType) {
+ StringRef Name;
+ StringRef LinkageName;
+
+ const Decl *D = GD.getDecl();
+ if (!D)
+ return;
+
+ unsigned Flags = 0;
+ llvm::DIFile *Unit = getOrCreateFile(Loc);
+ llvm::DIScope *FDContext = getDeclContextDescriptor(D);
+ llvm::DINodeArray TParamsArray;
+ if (isa<FunctionDecl>(D)) {
+ // If there is a DISubprogram for this function available then use it.
+ collectFunctionDeclProps(GD, Unit, Name, LinkageName, FDContext,
+ TParamsArray, Flags);
+ } else if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D)) {
+ Name = getObjCMethodName(OMD);
+ Flags |= llvm::DINode::FlagPrototyped;
+ } else {
+ llvm_unreachable("not a function or ObjC method");
+ }
+ if (!Name.empty() && Name[0] == '\01')
+ Name = Name.substr(1);
+
+ if (D->isImplicit()) {
+ Flags |= llvm::DINode::FlagArtificial;
+ // Artificial functions without a location should not silently reuse CurLoc.
+ if (Loc.isInvalid())
+ CurLoc = SourceLocation();
+ }
+ unsigned LineNo = getLineNumber(Loc);
+ unsigned ScopeLine = 0;
+
+ DBuilder.createFunction(FDContext, Name, LinkageName, Unit, LineNo,
+ getOrCreateFunctionType(D, FnType, Unit),
+ false /*internalLinkage*/, true /*definition*/,
+ ScopeLine, Flags, CGM.getLangOpts().Optimize,
+ TParamsArray.get(), getFunctionDeclaration(D));
+}
+
+void CGDebugInfo::EmitLocation(CGBuilderTy &Builder, SourceLocation Loc) {
+ // Update our current location
+ setLocation(Loc);
+
+ if (CurLoc.isInvalid() || CurLoc.isMacroID())
+ return;
+
+ llvm::MDNode *Scope = LexicalBlockStack.back();
+ Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(
+ getLineNumber(CurLoc), getColumnNumber(CurLoc), Scope));
+}
+
+void CGDebugInfo::CreateLexicalBlock(SourceLocation Loc) {
+ llvm::MDNode *Back = nullptr;
+ if (!LexicalBlockStack.empty())
+ Back = LexicalBlockStack.back().get();
+ LexicalBlockStack.emplace_back(DBuilder.createLexicalBlock(
+ cast<llvm::DIScope>(Back), getOrCreateFile(CurLoc), getLineNumber(CurLoc),
+ getColumnNumber(CurLoc)));
+}
+
+void CGDebugInfo::EmitLexicalBlockStart(CGBuilderTy &Builder,
+ SourceLocation Loc) {
+ // Set our current location.
+ setLocation(Loc);
+
+ // Emit a line table change for the current location inside the new scope.
+ Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(
+ getLineNumber(Loc), getColumnNumber(Loc), LexicalBlockStack.back()));
+
+ if (DebugKind <= CodeGenOptions::DebugLineTablesOnly)
+ return;
+
+ // Create a new lexical block and push it on the stack.
+ CreateLexicalBlock(Loc);
+}
+
+void CGDebugInfo::EmitLexicalBlockEnd(CGBuilderTy &Builder,
+ SourceLocation Loc) {
+ assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
+
+ // Provide an entry in the line table for the end of the block.
+ EmitLocation(Builder, Loc);
+
+ if (DebugKind <= CodeGenOptions::DebugLineTablesOnly)
+ return;
+
+ LexicalBlockStack.pop_back();
+}
+
+void CGDebugInfo::EmitFunctionEnd(CGBuilderTy &Builder) {
+ assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
+ unsigned RCount = FnBeginRegionCount.back();
+ assert(RCount <= LexicalBlockStack.size() && "Region stack mismatch");
+
+ // Pop all regions for this function.
+ while (LexicalBlockStack.size() != RCount) {
+ // Provide an entry in the line table for the end of the block.
+ EmitLocation(Builder, CurLoc);
+ LexicalBlockStack.pop_back();
+ }
+ FnBeginRegionCount.pop_back();
+}
+
+llvm::DIType *CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
+ uint64_t *XOffset) {
+
+ SmallVector<llvm::Metadata *, 5> EltTys;
+ QualType FType;
+ uint64_t FieldSize, FieldOffset;
+ unsigned FieldAlign;
+
+ llvm::DIFile *Unit = getOrCreateFile(VD->getLocation());
+ QualType Type = VD->getType();
+
+ FieldOffset = 0;
+ FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+ EltTys.push_back(CreateMemberType(Unit, FType, "__isa", &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "__forwarding", &FieldOffset));
+ FType = CGM.getContext().IntTy;
+ EltTys.push_back(CreateMemberType(Unit, FType, "__flags", &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "__size", &FieldOffset));
+
+ bool HasCopyAndDispose = CGM.getContext().BlockRequiresCopying(Type, VD);
+ if (HasCopyAndDispose) {
+ FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+ EltTys.push_back(
+ CreateMemberType(Unit, FType, "__copy_helper", &FieldOffset));
+ EltTys.push_back(
+ CreateMemberType(Unit, FType, "__destroy_helper", &FieldOffset));
+ }
+ bool HasByrefExtendedLayout;
+ Qualifiers::ObjCLifetime Lifetime;
+ if (CGM.getContext().getByrefLifetime(Type, Lifetime,
+ HasByrefExtendedLayout) &&
+ HasByrefExtendedLayout) {
+ FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+ EltTys.push_back(
+ CreateMemberType(Unit, FType, "__byref_variable_layout", &FieldOffset));
+ }
+
+ CharUnits Align = CGM.getContext().getDeclAlign(VD);
+ if (Align > CGM.getContext().toCharUnitsFromBits(
+ CGM.getTarget().getPointerAlign(0))) {
+ CharUnits FieldOffsetInBytes =
+ CGM.getContext().toCharUnitsFromBits(FieldOffset);
+ CharUnits AlignedOffsetInBytes =
+ FieldOffsetInBytes.RoundUpToAlignment(Align);
+ CharUnits NumPaddingBytes = AlignedOffsetInBytes - FieldOffsetInBytes;
+
+ if (NumPaddingBytes.isPositive()) {
+ llvm::APInt pad(32, NumPaddingBytes.getQuantity());
+ FType = CGM.getContext().getConstantArrayType(CGM.getContext().CharTy,
+ pad, ArrayType::Normal, 0);
+ EltTys.push_back(CreateMemberType(Unit, FType, "", &FieldOffset));
+ }
+ }
+
+ FType = Type;
+ llvm::DIType *FieldTy = getOrCreateType(FType, Unit);
+ FieldSize = CGM.getContext().getTypeSize(FType);
+ FieldAlign = CGM.getContext().toBits(Align);
+
+ *XOffset = FieldOffset;
+ FieldTy = DBuilder.createMemberType(Unit, VD->getName(), Unit, 0, FieldSize,
+ FieldAlign, FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+ FieldOffset += FieldSize;
+
+ llvm::DINodeArray Elements = DBuilder.getOrCreateArray(EltTys);
+
+ unsigned Flags = llvm::DINode::FlagBlockByrefStruct;
+
+ return DBuilder.createStructType(Unit, "", Unit, 0, FieldOffset, 0, Flags,
+ nullptr, Elements);
+}
+
+void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::Value *Storage,
+ llvm::Optional<unsigned> ArgNo,
+ CGBuilderTy &Builder) {
+ assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
+ assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
+
+ bool Unwritten =
+ VD->isImplicit() || (isa<Decl>(VD->getDeclContext()) &&
+ cast<Decl>(VD->getDeclContext())->isImplicit());
+ llvm::DIFile *Unit = nullptr;
+ if (!Unwritten)
+ Unit = getOrCreateFile(VD->getLocation());
+ llvm::DIType *Ty;
+ uint64_t XOffset = 0;
+ if (VD->hasAttr<BlocksAttr>())
+ Ty = EmitTypeForVarWithBlocksAttr(VD, &XOffset);
+ else
+ Ty = getOrCreateType(VD->getType(), Unit);
+
+ // If there is no debug info for this type then do not emit debug info
+ // for this variable.
+ if (!Ty)
+ return;
+
+ // Get location information.
+ unsigned Line = 0;
+ unsigned Column = 0;
+ if (!Unwritten) {
+ Line = getLineNumber(VD->getLocation());
+ Column = getColumnNumber(VD->getLocation());
+ }
+ SmallVector<int64_t, 9> Expr;
+ unsigned Flags = 0;
+ if (VD->isImplicit())
+ Flags |= llvm::DINode::FlagArtificial;
+ // If this is the first argument and it is implicit then
+ // give it an object pointer flag.
+ // FIXME: There has to be a better way to do this, but for static
+ // functions there won't be an implicit param at arg1 and
+ // otherwise it is 'self' or 'this'.
+ if (isa<ImplicitParamDecl>(VD) && ArgNo && *ArgNo == 1)
+ Flags |= llvm::DINode::FlagObjectPointer;
+ if (llvm::Argument *Arg = dyn_cast<llvm::Argument>(Storage))
+ if (Arg->getType()->isPointerTy() && !Arg->hasByValAttr() &&
+ !VD->getType()->isPointerType())
+ Expr.push_back(llvm::dwarf::DW_OP_deref);
+
+ auto *Scope = cast<llvm::DIScope>(LexicalBlockStack.back());
+
+ StringRef Name = VD->getName();
+ if (!Name.empty()) {
+ if (VD->hasAttr<BlocksAttr>()) {
+ CharUnits offset = CharUnits::fromQuantity(32);
+ Expr.push_back(llvm::dwarf::DW_OP_plus);
+ // offset of __forwarding field
+ offset = CGM.getContext().toCharUnitsFromBits(
+ CGM.getTarget().getPointerWidth(0));
+ Expr.push_back(offset.getQuantity());
+ Expr.push_back(llvm::dwarf::DW_OP_deref);
+ Expr.push_back(llvm::dwarf::DW_OP_plus);
+ // offset of x field
+ offset = CGM.getContext().toCharUnitsFromBits(XOffset);
+ Expr.push_back(offset.getQuantity());
+
+ // Create the descriptor for the variable.
+ auto *D = ArgNo
+ ? DBuilder.createParameterVariable(Scope, VD->getName(),
+ *ArgNo, Unit, Line, Ty)
+ : DBuilder.createAutoVariable(Scope, VD->getName(), Unit,
+ Line, Ty);
+
+ // Insert an llvm.dbg.declare into the current block.
+ DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr),
+ llvm::DebugLoc::get(Line, Column, Scope),
+ Builder.GetInsertBlock());
+ return;
+ } else if (isa<VariableArrayType>(VD->getType()))
+ Expr.push_back(llvm::dwarf::DW_OP_deref);
+ } else if (const RecordType *RT = dyn_cast<RecordType>(VD->getType())) {
+ // If VD is an anonymous union then Storage represents value for
+ // all union fields.
+ const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
+ if (RD->isUnion() && RD->isAnonymousStructOrUnion()) {
+ // GDB has trouble finding local variables in anonymous unions, so we emit
+ // artifical local variables for each of the members.
+ //
+ // FIXME: Remove this code as soon as GDB supports this.
+ // The debug info verifier in LLVM operates based on the assumption that a
+ // variable has the same size as its storage and we had to disable the check
+ // for artificial variables.
+ for (const auto *Field : RD->fields()) {
+ llvm::DIType *FieldTy = getOrCreateType(Field->getType(), Unit);
+ StringRef FieldName = Field->getName();
+
+ // Ignore unnamed fields. Do not ignore unnamed records.
+ if (FieldName.empty() && !isa<RecordType>(Field->getType()))
+ continue;
+
+ // Use VarDecl's Tag, Scope and Line number.
+ auto *D = DBuilder.createAutoVariable(
+ Scope, FieldName, Unit, Line, FieldTy, CGM.getLangOpts().Optimize,
+ Flags | llvm::DINode::FlagArtificial);
+
+ // Insert an llvm.dbg.declare into the current block.
+ DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr),
+ llvm::DebugLoc::get(Line, Column, Scope),
+ Builder.GetInsertBlock());
+ }
+ }
+ }
+
+ // Create the descriptor for the variable.
+ auto *D =
+ ArgNo
+ ? DBuilder.createParameterVariable(Scope, Name, *ArgNo, Unit, Line,
+ Ty, CGM.getLangOpts().Optimize,
+ Flags)
+ : DBuilder.createAutoVariable(Scope, Name, Unit, Line, Ty,
+ CGM.getLangOpts().Optimize, Flags);
+
+ // Insert an llvm.dbg.declare into the current block.
+ DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr),
+ llvm::DebugLoc::get(Line, Column, Scope),
+ Builder.GetInsertBlock());
+}
+
+void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD,
+ llvm::Value *Storage,
+ CGBuilderTy &Builder) {
+ assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
+ EmitDeclare(VD, Storage, llvm::None, Builder);
+}
+
+llvm::DIType *CGDebugInfo::CreateSelfType(const QualType &QualTy,
+ llvm::DIType *Ty) {
+ llvm::DIType *CachedTy = getTypeOrNull(QualTy);
+ if (CachedTy)
+ Ty = CachedTy;
+ return DBuilder.createObjectPointerType(Ty);
+}
+
+void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
+ const VarDecl *VD, llvm::Value *Storage, CGBuilderTy &Builder,
+ const CGBlockInfo &blockInfo, llvm::Instruction *InsertPoint) {
+ assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
+ assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
+
+ if (Builder.GetInsertBlock() == nullptr)
+ return;
+
+ bool isByRef = VD->hasAttr<BlocksAttr>();
+
+ uint64_t XOffset = 0;
+ llvm::DIFile *Unit = getOrCreateFile(VD->getLocation());
+ llvm::DIType *Ty;
+ if (isByRef)
+ Ty = EmitTypeForVarWithBlocksAttr(VD, &XOffset);
+ else
+ Ty = getOrCreateType(VD->getType(), Unit);
+
+ // Self is passed along as an implicit non-arg variable in a
+ // block. Mark it as the object pointer.
+ if (isa<ImplicitParamDecl>(VD) && VD->getName() == "self")
+ Ty = CreateSelfType(VD->getType(), Ty);
+
+ // Get location information.
+ unsigned Line = getLineNumber(VD->getLocation());
+ unsigned Column = getColumnNumber(VD->getLocation());
+
+ const llvm::DataLayout &target = CGM.getDataLayout();
+
+ CharUnits offset = CharUnits::fromQuantity(
+ target.getStructLayout(blockInfo.StructureType)
+ ->getElementOffset(blockInfo.getCapture(VD).getIndex()));
+
+ SmallVector<int64_t, 9> addr;
+ if (isa<llvm::AllocaInst>(Storage))
+ addr.push_back(llvm::dwarf::DW_OP_deref);
+ addr.push_back(llvm::dwarf::DW_OP_plus);
+ addr.push_back(offset.getQuantity());
+ if (isByRef) {
+ addr.push_back(llvm::dwarf::DW_OP_deref);
+ addr.push_back(llvm::dwarf::DW_OP_plus);
+ // offset of __forwarding field
+ offset =
+ CGM.getContext().toCharUnitsFromBits(target.getPointerSizeInBits(0));
+ addr.push_back(offset.getQuantity());
+ addr.push_back(llvm::dwarf::DW_OP_deref);
+ addr.push_back(llvm::dwarf::DW_OP_plus);
+ // offset of x field
+ offset = CGM.getContext().toCharUnitsFromBits(XOffset);
+ addr.push_back(offset.getQuantity());
+ }
+
+ // Create the descriptor for the variable.
+ auto *D = DBuilder.createAutoVariable(
+ cast<llvm::DILocalScope>(LexicalBlockStack.back()), VD->getName(), Unit,
+ Line, Ty);
+
+ // Insert an llvm.dbg.declare into the current block.
+ auto DL = llvm::DebugLoc::get(Line, Column, LexicalBlockStack.back());
+ if (InsertPoint)
+ DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(addr), DL,
+ InsertPoint);
+ else
+ DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(addr), DL,
+ Builder.GetInsertBlock());
+}
+
+void CGDebugInfo::EmitDeclareOfArgVariable(const VarDecl *VD, llvm::Value *AI,
+ unsigned ArgNo,
+ CGBuilderTy &Builder) {
+ assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
+ EmitDeclare(VD, AI, ArgNo, Builder);
+}
+
+namespace {
+struct BlockLayoutChunk {
+ uint64_t OffsetInBits;
+ const BlockDecl::Capture *Capture;
+};
+bool operator<(const BlockLayoutChunk &l, const BlockLayoutChunk &r) {
+ return l.OffsetInBits < r.OffsetInBits;
+}
+}
+
+void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
+ llvm::Value *Arg,
+ unsigned ArgNo,
+ llvm::Value *LocalAddr,
+ CGBuilderTy &Builder) {
+ assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
+ ASTContext &C = CGM.getContext();
+ const BlockDecl *blockDecl = block.getBlockDecl();
+
+ // Collect some general information about the block's location.
+ SourceLocation loc = blockDecl->getCaretLocation();
+ llvm::DIFile *tunit = getOrCreateFile(loc);
+ unsigned line = getLineNumber(loc);
+ unsigned column = getColumnNumber(loc);
+
+ // Build the debug-info type for the block literal.
+ getDeclContextDescriptor(blockDecl);
+
+ const llvm::StructLayout *blockLayout =
+ CGM.getDataLayout().getStructLayout(block.StructureType);
+
+ SmallVector<llvm::Metadata *, 16> fields;
+ fields.push_back(createFieldType("__isa", C.VoidPtrTy, 0, loc, AS_public,
+ blockLayout->getElementOffsetInBits(0),
+ tunit, tunit));
+ fields.push_back(createFieldType("__flags", C.IntTy, 0, loc, AS_public,
+ blockLayout->getElementOffsetInBits(1),
+ tunit, tunit));
+ fields.push_back(createFieldType("__reserved", C.IntTy, 0, loc, AS_public,
+ blockLayout->getElementOffsetInBits(2),
+ tunit, tunit));
+ auto *FnTy = block.getBlockExpr()->getFunctionType();
+ auto FnPtrType = CGM.getContext().getPointerType(FnTy->desugar());
+ fields.push_back(createFieldType("__FuncPtr", FnPtrType, 0, loc, AS_public,
+ blockLayout->getElementOffsetInBits(3),
+ tunit, tunit));
+ fields.push_back(createFieldType(
+ "__descriptor", C.getPointerType(block.NeedsCopyDispose
+ ? C.getBlockDescriptorExtendedType()
+ : C.getBlockDescriptorType()),
+ 0, loc, AS_public, blockLayout->getElementOffsetInBits(4), tunit, tunit));
+
+ // We want to sort the captures by offset, not because DWARF
+ // requires this, but because we're paranoid about debuggers.
+ SmallVector<BlockLayoutChunk, 8> chunks;
+
+ // 'this' capture.
+ if (blockDecl->capturesCXXThis()) {
+ BlockLayoutChunk chunk;
+ chunk.OffsetInBits =
+ blockLayout->getElementOffsetInBits(block.CXXThisIndex);
+ chunk.Capture = nullptr;
+ chunks.push_back(chunk);
+ }
+
+ // Variable captures.
+ for (const auto &capture : blockDecl->captures()) {
+ const VarDecl *variable = capture.getVariable();
+ const CGBlockInfo::Capture &captureInfo = block.getCapture(variable);
+
+ // Ignore constant captures.
+ if (captureInfo.isConstant())
+ continue;
+
+ BlockLayoutChunk chunk;
+ chunk.OffsetInBits =
+ blockLayout->getElementOffsetInBits(captureInfo.getIndex());
+ chunk.Capture = &capture;
+ chunks.push_back(chunk);
+ }
+
+ // Sort by offset.
+ llvm::array_pod_sort(chunks.begin(), chunks.end());
+
+ for (SmallVectorImpl<BlockLayoutChunk>::iterator i = chunks.begin(),
+ e = chunks.end();
+ i != e; ++i) {
+ uint64_t offsetInBits = i->OffsetInBits;
+ const BlockDecl::Capture *capture = i->Capture;
+
+ // If we have a null capture, this must be the C++ 'this' capture.
+ if (!capture) {
+ const CXXMethodDecl *method =
+ cast<CXXMethodDecl>(blockDecl->getNonClosureContext());
+ QualType type = method->getThisType(C);
+
+ fields.push_back(createFieldType("this", type, 0, loc, AS_public,
+ offsetInBits, tunit, tunit));
+ continue;
+ }
+
+ const VarDecl *variable = capture->getVariable();
+ StringRef name = variable->getName();
+
+ llvm::DIType *fieldType;
+ if (capture->isByRef()) {
+ TypeInfo PtrInfo = C.getTypeInfo(C.VoidPtrTy);
+
+ // FIXME: this creates a second copy of this type!
+ uint64_t xoffset;
+ fieldType = EmitTypeForVarWithBlocksAttr(variable, &xoffset);
+ fieldType = DBuilder.createPointerType(fieldType, PtrInfo.Width);
+ fieldType =
+ DBuilder.createMemberType(tunit, name, tunit, line, PtrInfo.Width,
+ PtrInfo.Align, offsetInBits, 0, fieldType);
+ } else {
+ fieldType = createFieldType(name, variable->getType(), 0, loc, AS_public,
+ offsetInBits, tunit, tunit);
+ }
+ fields.push_back(fieldType);
+ }
+
+ SmallString<36> typeName;
+ llvm::raw_svector_ostream(typeName) << "__block_literal_"
+ << CGM.getUniqueBlockCount();
+
+ llvm::DINodeArray fieldsArray = DBuilder.getOrCreateArray(fields);
+
+ llvm::DIType *type = DBuilder.createStructType(
+ tunit, typeName.str(), tunit, line,
+ CGM.getContext().toBits(block.BlockSize),
+ CGM.getContext().toBits(block.BlockAlign), 0, nullptr, fieldsArray);
+ type = DBuilder.createPointerType(type, CGM.PointerWidthInBits);
+
+ // Get overall information about the block.
+ unsigned flags = llvm::DINode::FlagArtificial;
+ auto *scope = cast<llvm::DILocalScope>(LexicalBlockStack.back());
+
+ // Create the descriptor for the parameter.
+ auto *debugVar = DBuilder.createParameterVariable(
+ scope, Arg->getName(), ArgNo, tunit, line, type,
+ CGM.getLangOpts().Optimize, flags);
+
+ if (LocalAddr) {
+ // Insert an llvm.dbg.value into the current block.
+ DBuilder.insertDbgValueIntrinsic(
+ LocalAddr, 0, debugVar, DBuilder.createExpression(),
+ llvm::DebugLoc::get(line, column, scope), Builder.GetInsertBlock());
+ }
+
+ // Insert an llvm.dbg.declare into the current block.
+ DBuilder.insertDeclare(Arg, debugVar, DBuilder.createExpression(),
+ llvm::DebugLoc::get(line, column, scope),
+ Builder.GetInsertBlock());
+}
+
+llvm::DIDerivedType *
+CGDebugInfo::getOrCreateStaticDataMemberDeclarationOrNull(const VarDecl *D) {
+ if (!D->isStaticDataMember())
+ return nullptr;
+
+ auto MI = StaticDataMemberCache.find(D->getCanonicalDecl());
+ if (MI != StaticDataMemberCache.end()) {
+ assert(MI->second && "Static data member declaration should still exist");
+ return MI->second;
+ }
+
+ // If the member wasn't found in the cache, lazily construct and add it to the
+ // type (used when a limited form of the type is emitted).
+ auto DC = D->getDeclContext();
+ auto *Ctxt = cast<llvm::DICompositeType>(getDeclContextDescriptor(D));
+ return CreateRecordStaticField(D, Ctxt, cast<RecordDecl>(DC));
+}
+
+llvm::DIGlobalVariable *CGDebugInfo::CollectAnonRecordDecls(
+ const RecordDecl *RD, llvm::DIFile *Unit, unsigned LineNo,
+ StringRef LinkageName, llvm::GlobalVariable *Var, llvm::DIScope *DContext) {
+ llvm::DIGlobalVariable *GV = nullptr;
+
+ for (const auto *Field : RD->fields()) {
+ llvm::DIType *FieldTy = getOrCreateType(Field->getType(), Unit);
+ StringRef FieldName = Field->getName();
+
+ // Ignore unnamed fields, but recurse into anonymous records.
+ if (FieldName.empty()) {
+ const RecordType *RT = dyn_cast<RecordType>(Field->getType());
+ if (RT)
+ GV = CollectAnonRecordDecls(RT->getDecl(), Unit, LineNo, LinkageName,
+ Var, DContext);
+ continue;
+ }
+ // Use VarDecl's Tag, Scope and Line number.
+ GV = DBuilder.createGlobalVariable(DContext, FieldName, LinkageName, Unit,
+ LineNo, FieldTy,
+ Var->hasInternalLinkage(), Var, nullptr);
+ }
+ return GV;
+}
+
+void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
+ const VarDecl *D) {
+ assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
+ // Create global variable debug descriptor.
+ llvm::DIFile *Unit = nullptr;
+ llvm::DIScope *DContext = nullptr;
+ unsigned LineNo;
+ StringRef DeclName, LinkageName;
+ QualType T;
+ collectVarDeclProps(D, Unit, LineNo, T, DeclName, LinkageName, DContext);
+
+ // Attempt to store one global variable for the declaration - even if we
+ // emit a lot of fields.
+ llvm::DIGlobalVariable *GV = nullptr;
+
+ // If this is an anonymous union then we'll want to emit a global
+ // variable for each member of the anonymous union so that it's possible
+ // to find the name of any field in the union.
+ if (T->isUnionType() && DeclName.empty()) {
+ const RecordDecl *RD = T->castAs<RecordType>()->getDecl();
+ assert(RD->isAnonymousStructOrUnion() &&
+ "unnamed non-anonymous struct or union?");
+ GV = CollectAnonRecordDecls(RD, Unit, LineNo, LinkageName, Var, DContext);
+ } else {
+ GV = DBuilder.createGlobalVariable(
+ DContext, DeclName, LinkageName, Unit, LineNo, getOrCreateType(T, Unit),
+ Var->hasInternalLinkage(), Var,
+ getOrCreateStaticDataMemberDeclarationOrNull(D));
+ }
+ DeclCache[D->getCanonicalDecl()].reset(static_cast<llvm::Metadata *>(GV));
+}
+
+void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD,
+ llvm::Constant *Init) {
+ assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
+ // Create the descriptor for the variable.
+ llvm::DIFile *Unit = getOrCreateFile(VD->getLocation());
+ StringRef Name = VD->getName();
+ llvm::DIType *Ty = getOrCreateType(VD->getType(), Unit);
+ if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(VD)) {
+ const EnumDecl *ED = cast<EnumDecl>(ECD->getDeclContext());
+ assert(isa<EnumType>(ED->getTypeForDecl()) && "Enum without EnumType?");
+ Ty = getOrCreateType(QualType(ED->getTypeForDecl(), 0), Unit);
+ }
+ // Do not use global variables for enums.
+ //
+ // FIXME: why not?
+ if (Ty->getTag() == llvm::dwarf::DW_TAG_enumeration_type)
+ return;
+ // Do not emit separate definitions for function local const/statics.
+ if (isa<FunctionDecl>(VD->getDeclContext()))
+ return;
+ VD = cast<ValueDecl>(VD->getCanonicalDecl());
+ auto *VarD = cast<VarDecl>(VD);
+ if (VarD->isStaticDataMember()) {
+ auto *RD = cast<RecordDecl>(VarD->getDeclContext());
+ getDeclContextDescriptor(VarD);
+ // Ensure that the type is retained even though it's otherwise unreferenced.
+ RetainedTypes.push_back(
+ CGM.getContext().getRecordType(RD).getAsOpaquePtr());
+ return;
+ }
+
+ llvm::DIScope *DContext = getDeclContextDescriptor(VD);
+
+ auto &GV = DeclCache[VD];
+ if (GV)
+ return;
+ GV.reset(DBuilder.createGlobalVariable(
+ DContext, Name, StringRef(), Unit, getLineNumber(VD->getLocation()), Ty,
+ true, Init, getOrCreateStaticDataMemberDeclarationOrNull(VarD)));
+}
+
+llvm::DIScope *CGDebugInfo::getCurrentContextDescriptor(const Decl *D) {
+ if (!LexicalBlockStack.empty())
+ return LexicalBlockStack.back();
+ llvm::DIScope *Mod = getParentModuleOrNull(D);
+ return getContextDescriptor(D, Mod ? Mod : TheCU);
+}
+
+void CGDebugInfo::EmitUsingDirective(const UsingDirectiveDecl &UD) {
+ if (CGM.getCodeGenOpts().getDebugInfo() < CodeGenOptions::LimitedDebugInfo)
+ return;
+ const NamespaceDecl *NSDecl = UD.getNominatedNamespace();
+ if (!NSDecl->isAnonymousNamespace() ||
+ CGM.getCodeGenOpts().DebugExplicitImport) {
+ DBuilder.createImportedModule(
+ getCurrentContextDescriptor(cast<Decl>(UD.getDeclContext())),
+ getOrCreateNameSpace(NSDecl),
+ getLineNumber(UD.getLocation()));
+ }
+}
+
+void CGDebugInfo::EmitUsingDecl(const UsingDecl &UD) {
+ if (CGM.getCodeGenOpts().getDebugInfo() < CodeGenOptions::LimitedDebugInfo)
+ return;
+ assert(UD.shadow_size() &&
+ "We shouldn't be codegening an invalid UsingDecl containing no decls");
+ // Emitting one decl is sufficient - debuggers can detect that this is an
+ // overloaded name & provide lookup for all the overloads.
+ const UsingShadowDecl &USD = **UD.shadow_begin();
+ if (llvm::DINode *Target =
+ getDeclarationOrDefinition(USD.getUnderlyingDecl()))
+ DBuilder.createImportedDeclaration(
+ getCurrentContextDescriptor(cast<Decl>(USD.getDeclContext())), Target,
+ getLineNumber(USD.getLocation()));
+}
+
+void CGDebugInfo::EmitImportDecl(const ImportDecl &ID) {
+ if (Module *M = ID.getImportedModule()) {
+ auto Info = ExternalASTSource::ASTSourceDescriptor(*M);
+ DBuilder.createImportedDeclaration(
+ getCurrentContextDescriptor(cast<Decl>(ID.getDeclContext())),
+ getOrCreateModuleRef(Info, DebugTypeExtRefs),
+ getLineNumber(ID.getLocation()));
+ }
+}
+
+llvm::DIImportedEntity *
+CGDebugInfo::EmitNamespaceAlias(const NamespaceAliasDecl &NA) {
+ if (CGM.getCodeGenOpts().getDebugInfo() < CodeGenOptions::LimitedDebugInfo)
+ return nullptr;
+ auto &VH = NamespaceAliasCache[&NA];
+ if (VH)
+ return cast<llvm::DIImportedEntity>(VH);
+ llvm::DIImportedEntity *R;
+ if (const NamespaceAliasDecl *Underlying =
+ dyn_cast<NamespaceAliasDecl>(NA.getAliasedNamespace()))
+ // This could cache & dedup here rather than relying on metadata deduping.
+ R = DBuilder.createImportedDeclaration(
+ getCurrentContextDescriptor(cast<Decl>(NA.getDeclContext())),
+ EmitNamespaceAlias(*Underlying), getLineNumber(NA.getLocation()),
+ NA.getName());
+ else
+ R = DBuilder.createImportedDeclaration(
+ getCurrentContextDescriptor(cast<Decl>(NA.getDeclContext())),
+ getOrCreateNameSpace(cast<NamespaceDecl>(NA.getAliasedNamespace())),
+ getLineNumber(NA.getLocation()), NA.getName());
+ VH.reset(R);
+ return R;
+}
+
+llvm::DINamespace *
+CGDebugInfo::getOrCreateNameSpace(const NamespaceDecl *NSDecl) {
+ NSDecl = NSDecl->getCanonicalDecl();
+ auto I = NameSpaceCache.find(NSDecl);
+ if (I != NameSpaceCache.end())
+ return cast<llvm::DINamespace>(I->second);
+
+ unsigned LineNo = getLineNumber(NSDecl->getLocation());
+ llvm::DIFile *FileD = getOrCreateFile(NSDecl->getLocation());
+ llvm::DIScope *Context = getDeclContextDescriptor(NSDecl);
+ llvm::DINamespace *NS =
+ DBuilder.createNameSpace(Context, NSDecl->getName(), FileD, LineNo);
+ NameSpaceCache[NSDecl].reset(NS);
+ return NS;
+}
+
+void CGDebugInfo::setDwoId(uint64_t Signature) {
+ assert(TheCU && "no main compile unit");
+ TheCU->setDWOId(Signature);
+}
+
+
+void CGDebugInfo::finalize() {
+ // Creating types might create further types - invalidating the current
+ // element and the size(), so don't cache/reference them.
+ for (size_t i = 0; i != ObjCInterfaceCache.size(); ++i) {
+ ObjCInterfaceCacheEntry E = ObjCInterfaceCache[i];
+ llvm::DIType *Ty = E.Type->getDecl()->getDefinition()
+ ? CreateTypeDefinition(E.Type, E.Unit)
+ : E.Decl;
+ DBuilder.replaceTemporary(llvm::TempDIType(E.Decl), Ty);
+ }
+
+ for (auto p : ReplaceMap) {
+ assert(p.second);
+ auto *Ty = cast<llvm::DIType>(p.second);
+ assert(Ty->isForwardDecl());
+
+ auto it = TypeCache.find(p.first);
+ assert(it != TypeCache.end());
+ assert(it->second);
+
+ DBuilder.replaceTemporary(llvm::TempDIType(Ty),
+ cast<llvm::DIType>(it->second));
+ }
+
+ for (const auto &p : FwdDeclReplaceMap) {
+ assert(p.second);
+ llvm::TempMDNode FwdDecl(cast<llvm::MDNode>(p.second));
+ llvm::Metadata *Repl;
+
+ auto it = DeclCache.find(p.first);
+ // If there has been no definition for the declaration, call RAUW
+ // with ourselves, that will destroy the temporary MDNode and
+ // replace it with a standard one, avoiding leaking memory.
+ if (it == DeclCache.end())
+ Repl = p.second;
+ else
+ Repl = it->second;
+
+ DBuilder.replaceTemporary(std::move(FwdDecl), cast<llvm::MDNode>(Repl));
+ }
+
+ // We keep our own list of retained types, because we need to look
+ // up the final type in the type cache.
+ for (auto &RT : RetainedTypes)
+ if (auto MD = TypeCache[RT])
+ DBuilder.retainType(cast<llvm::DIType>(MD));
+
+ DBuilder.finalize();
+}
+
+void CGDebugInfo::EmitExplicitCastType(QualType Ty) {
+ if (CGM.getCodeGenOpts().getDebugInfo() < CodeGenOptions::LimitedDebugInfo)
+ return;
+
+ if (auto *DieTy = getOrCreateType(Ty, getOrCreateMainFile()))
+ // Don't ignore in case of explicit cast where it is referenced indirectly.
+ DBuilder.retainType(DieTy);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h
new file mode 100644
index 0000000..57d5c80
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h
@@ -0,0 +1,587 @@
+//===--- CGDebugInfo.h - DebugInfo for LLVM CodeGen -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the source-level debug info generator for llvm translation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGDEBUGINFO_H
+#define LLVM_CLANG_LIB_CODEGEN_CGDEBUGINFO_H
+
+#include "CGBuilder.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/IR/DIBuilder.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/Allocator.h"
+
+namespace llvm {
+class MDNode;
+}
+
+namespace clang {
+class CXXMethodDecl;
+class ClassTemplateSpecializationDecl;
+class GlobalDecl;
+class ModuleMap;
+class ObjCInterfaceDecl;
+class ObjCIvarDecl;
+class UsingDecl;
+class VarDecl;
+
+namespace CodeGen {
+class CodeGenModule;
+class CodeGenFunction;
+class CGBlockInfo;
+
+/// This class gathers all debug information during compilation and is
+/// responsible for emitting to llvm globals or pass directly to the
+/// backend.
+class CGDebugInfo {
+ friend class ApplyDebugLocation;
+ friend class SaveAndRestoreLocation;
+ CodeGenModule &CGM;
+ const CodeGenOptions::DebugInfoKind DebugKind;
+ bool DebugTypeExtRefs;
+ llvm::DIBuilder DBuilder;
+ llvm::DICompileUnit *TheCU = nullptr;
+ ModuleMap *ClangModuleMap = nullptr;
+ SourceLocation CurLoc;
+ llvm::DIType *VTablePtrType = nullptr;
+ llvm::DIType *ClassTy = nullptr;
+ llvm::DICompositeType *ObjTy = nullptr;
+ llvm::DIType *SelTy = nullptr;
+ llvm::DIType *OCLImage1dDITy = nullptr;
+ llvm::DIType *OCLImage1dArrayDITy = nullptr;
+ llvm::DIType *OCLImage1dBufferDITy = nullptr;
+ llvm::DIType *OCLImage2dDITy = nullptr;
+ llvm::DIType *OCLImage2dArrayDITy = nullptr;
+ llvm::DIType *OCLImage2dDepthDITy = nullptr;
+ llvm::DIType *OCLImage2dArrayDepthDITy = nullptr;
+ llvm::DIType *OCLImage2dMSAADITy = nullptr;
+ llvm::DIType *OCLImage2dArrayMSAADITy = nullptr;
+ llvm::DIType *OCLImage2dMSAADepthDITy = nullptr;
+ llvm::DIType *OCLImage2dArrayMSAADepthDITy = nullptr;
+ llvm::DIType *OCLImage3dDITy = nullptr;
+ llvm::DIType *OCLEventDITy = nullptr;
+ llvm::DIType *OCLClkEventDITy = nullptr;
+ llvm::DIType *OCLQueueDITy = nullptr;
+ llvm::DIType *OCLNDRangeDITy = nullptr;
+ llvm::DIType *OCLReserveIDDITy = nullptr;
+
+ /// Cache of previously constructed Types.
+ llvm::DenseMap<const void *, llvm::TrackingMDRef> TypeCache;
+
+ llvm::SmallDenseMap<llvm::StringRef, llvm::StringRef> DebugPrefixMap;
+
+ struct ObjCInterfaceCacheEntry {
+ const ObjCInterfaceType *Type;
+ llvm::DIType *Decl;
+ llvm::DIFile *Unit;
+ ObjCInterfaceCacheEntry(const ObjCInterfaceType *Type, llvm::DIType *Decl,
+ llvm::DIFile *Unit)
+ : Type(Type), Decl(Decl), Unit(Unit) {}
+ };
+
+ /// Cache of previously constructed interfaces which may change.
+ llvm::SmallVector<ObjCInterfaceCacheEntry, 32> ObjCInterfaceCache;
+
+ /// Cache of references to clang modules and precompiled headers.
+ llvm::DenseMap<const Module *, llvm::TrackingMDRef> ModuleCache;
+
+ /// List of interfaces we want to keep even if orphaned.
+ std::vector<void *> RetainedTypes;
+
+ /// Cache of forward declared types to RAUW at the end of
+ /// compilation.
+ std::vector<std::pair<const TagType *, llvm::TrackingMDRef>> ReplaceMap;
+
+ /// Cache of replaceable forward declarartions (functions and
+ /// variables) to RAUW at the end of compilation.
+ std::vector<std::pair<const DeclaratorDecl *, llvm::TrackingMDRef>>
+ FwdDeclReplaceMap;
+
+ /// Keep track of our current nested lexical block.
+ std::vector<llvm::TypedTrackingMDRef<llvm::DIScope>> LexicalBlockStack;
+ llvm::DenseMap<const Decl *, llvm::TrackingMDRef> RegionMap;
+ /// Keep track of LexicalBlockStack counter at the beginning of a
+ /// function. This is used to pop unbalanced regions at the end of a
+ /// function.
+ std::vector<unsigned> FnBeginRegionCount;
+
+ /// This is a storage for names that are constructed on demand. For
+ /// example, C++ destructors, C++ operators etc..
+ llvm::BumpPtrAllocator DebugInfoNames;
+ StringRef CWDName;
+
+ llvm::DenseMap<const char *, llvm::TrackingMDRef> DIFileCache;
+ llvm::DenseMap<const FunctionDecl *, llvm::TrackingMDRef> SPCache;
+ /// Cache declarations relevant to DW_TAG_imported_declarations (C++
+ /// using declarations) that aren't covered by other more specific caches.
+ llvm::DenseMap<const Decl *, llvm::TrackingMDRef> DeclCache;
+ llvm::DenseMap<const NamespaceDecl *, llvm::TrackingMDRef> NameSpaceCache;
+ llvm::DenseMap<const NamespaceAliasDecl *, llvm::TrackingMDRef>
+ NamespaceAliasCache;
+ llvm::DenseMap<const Decl *, llvm::TypedTrackingMDRef<llvm::DIDerivedType>>
+ StaticDataMemberCache;
+
+ /// Helper functions for getOrCreateType.
+ /// @{
+ /// Currently the checksum of an interface includes the number of
+ /// ivars and property accessors.
+ llvm::DIType *CreateType(const BuiltinType *Ty);
+ llvm::DIType *CreateType(const ComplexType *Ty);
+ llvm::DIType *CreateQualifiedType(QualType Ty, llvm::DIFile *Fg);
+ llvm::DIType *CreateType(const TypedefType *Ty, llvm::DIFile *Fg);
+ llvm::DIType *CreateType(const TemplateSpecializationType *Ty,
+ llvm::DIFile *Fg);
+ llvm::DIType *CreateType(const ObjCObjectPointerType *Ty, llvm::DIFile *F);
+ llvm::DIType *CreateType(const PointerType *Ty, llvm::DIFile *F);
+ llvm::DIType *CreateType(const BlockPointerType *Ty, llvm::DIFile *F);
+ llvm::DIType *CreateType(const FunctionType *Ty, llvm::DIFile *F);
+ /// Get structure or union type.
+ llvm::DIType *CreateType(const RecordType *Tyg);
+ llvm::DIType *CreateTypeDefinition(const RecordType *Ty);
+ llvm::DICompositeType *CreateLimitedType(const RecordType *Ty);
+ void CollectContainingType(const CXXRecordDecl *RD,
+ llvm::DICompositeType *CT);
+ /// Get Objective-C interface type.
+ llvm::DIType *CreateType(const ObjCInterfaceType *Ty, llvm::DIFile *F);
+ llvm::DIType *CreateTypeDefinition(const ObjCInterfaceType *Ty,
+ llvm::DIFile *F);
+ /// Get Objective-C object type.
+ llvm::DIType *CreateType(const ObjCObjectType *Ty, llvm::DIFile *F);
+ llvm::DIType *CreateType(const VectorType *Ty, llvm::DIFile *F);
+ llvm::DIType *CreateType(const ArrayType *Ty, llvm::DIFile *F);
+ llvm::DIType *CreateType(const LValueReferenceType *Ty, llvm::DIFile *F);
+ llvm::DIType *CreateType(const RValueReferenceType *Ty, llvm::DIFile *Unit);
+ llvm::DIType *CreateType(const MemberPointerType *Ty, llvm::DIFile *F);
+ llvm::DIType *CreateType(const AtomicType *Ty, llvm::DIFile *F);
+ /// Get enumeration type.
+ llvm::DIType *CreateEnumType(const EnumType *Ty);
+ llvm::DIType *CreateTypeDefinition(const EnumType *Ty);
+ /// Look up the completed type for a self pointer in the TypeCache and
+ /// create a copy of it with the ObjectPointer and Artificial flags
+ /// set. If the type is not cached, a new one is created. This should
+ /// never happen though, since creating a type for the implicit self
+ /// argument implies that we already parsed the interface definition
+ /// and the ivar declarations in the implementation.
+ llvm::DIType *CreateSelfType(const QualType &QualTy, llvm::DIType *Ty);
+ /// @}
+
+ /// Get the type from the cache or return null type if it doesn't
+ /// exist.
+ llvm::DIType *getTypeOrNull(const QualType);
+ /// Return the debug type for a C++ method.
+ /// \arg CXXMethodDecl is of FunctionType. This function type is
+ /// not updated to include implicit \c this pointer. Use this routine
+ /// to get a method type which includes \c this pointer.
+ llvm::DISubroutineType *getOrCreateMethodType(const CXXMethodDecl *Method,
+ llvm::DIFile *F);
+ llvm::DISubroutineType *
+ getOrCreateInstanceMethodType(QualType ThisPtr, const FunctionProtoType *Func,
+ llvm::DIFile *Unit);
+ llvm::DISubroutineType *
+ getOrCreateFunctionType(const Decl *D, QualType FnType, llvm::DIFile *F);
+ /// \return debug info descriptor for vtable.
+ llvm::DIType *getOrCreateVTablePtrType(llvm::DIFile *F);
+ /// \return namespace descriptor for the given namespace decl.
+ llvm::DINamespace *getOrCreateNameSpace(const NamespaceDecl *N);
+ llvm::DIType *CreatePointerLikeType(llvm::dwarf::Tag Tag, const Type *Ty,
+ QualType PointeeTy, llvm::DIFile *F);
+ llvm::DIType *getOrCreateStructPtrType(StringRef Name, llvm::DIType *&Cache);
+
+ /// A helper function to create a subprogram for a single member
+ /// function GlobalDecl.
+ llvm::DISubprogram *CreateCXXMemberFunction(const CXXMethodDecl *Method,
+ llvm::DIFile *F,
+ llvm::DIType *RecordTy);
+
+ /// A helper function to collect debug info for C++ member
+ /// functions. This is used while creating debug info entry for a
+ /// Record.
+ void CollectCXXMemberFunctions(const CXXRecordDecl *Decl, llvm::DIFile *F,
+ SmallVectorImpl<llvm::Metadata *> &E,
+ llvm::DIType *T);
+
+ /// A helper function to collect debug info for C++ base
+ /// classes. This is used while creating debug info entry for a
+ /// Record.
+ void CollectCXXBases(const CXXRecordDecl *Decl, llvm::DIFile *F,
+ SmallVectorImpl<llvm::Metadata *> &EltTys,
+ llvm::DIType *RecordTy);
+
+ /// A helper function to collect template parameters.
+ llvm::DINodeArray CollectTemplateParams(const TemplateParameterList *TPList,
+ ArrayRef<TemplateArgument> TAList,
+ llvm::DIFile *Unit);
+ /// A helper function to collect debug info for function template
+ /// parameters.
+ llvm::DINodeArray CollectFunctionTemplateParams(const FunctionDecl *FD,
+ llvm::DIFile *Unit);
+
+ /// A helper function to collect debug info for template
+ /// parameters.
+ llvm::DINodeArray
+ CollectCXXTemplateParams(const ClassTemplateSpecializationDecl *TS,
+ llvm::DIFile *F);
+
+ llvm::DIType *createFieldType(StringRef name, QualType type,
+ uint64_t sizeInBitsOverride, SourceLocation loc,
+ AccessSpecifier AS, uint64_t offsetInBits,
+ llvm::DIFile *tunit, llvm::DIScope *scope,
+ const RecordDecl *RD = nullptr);
+
+ /// Helpers for collecting fields of a record.
+ /// @{
+ void CollectRecordLambdaFields(const CXXRecordDecl *CXXDecl,
+ SmallVectorImpl<llvm::Metadata *> &E,
+ llvm::DIType *RecordTy);
+ llvm::DIDerivedType *CreateRecordStaticField(const VarDecl *Var,
+ llvm::DIType *RecordTy,
+ const RecordDecl *RD);
+ void CollectRecordNormalField(const FieldDecl *Field, uint64_t OffsetInBits,
+ llvm::DIFile *F,
+ SmallVectorImpl<llvm::Metadata *> &E,
+ llvm::DIType *RecordTy, const RecordDecl *RD);
+ void CollectRecordFields(const RecordDecl *Decl, llvm::DIFile *F,
+ SmallVectorImpl<llvm::Metadata *> &E,
+ llvm::DICompositeType *RecordTy);
+
+ /// If the C++ class has vtable info then insert appropriate debug
+ /// info entry in EltTys vector.
+ void CollectVTableInfo(const CXXRecordDecl *Decl, llvm::DIFile *F,
+ SmallVectorImpl<llvm::Metadata *> &EltTys);
+ /// @}
+
+ /// Create a new lexical block node and push it on the stack.
+ void CreateLexicalBlock(SourceLocation Loc);
+
+public:
+ CGDebugInfo(CodeGenModule &CGM);
+ ~CGDebugInfo();
+
+ void finalize();
+
+ /// Set the main CU's DwoId field to \p Signature.
+ void setDwoId(uint64_t Signature);
+
+ /// When generating debug information for a clang module or
+ /// precompiled header, this module map will be used to determine
+ /// the module of origin of each Decl.
+ void setModuleMap(ModuleMap &MMap) { ClangModuleMap = &MMap; }
+
+ /// Update the current source location. If \arg loc is invalid it is
+ /// ignored.
+ void setLocation(SourceLocation Loc);
+
+ /// Emit metadata to indicate a change in line/column information in
+ /// the source file. If the location is invalid, the previous
+ /// location will be reused.
+ void EmitLocation(CGBuilderTy &Builder, SourceLocation Loc);
+
+ /// Emit a call to llvm.dbg.function.start to indicate
+ /// start of a new function.
+ /// \param Loc The location of the function header.
+ /// \param ScopeLoc The location of the function body.
+ void EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
+ SourceLocation ScopeLoc, QualType FnType,
+ llvm::Function *Fn, CGBuilderTy &Builder);
+
+ /// Emit debug info for a function declaration.
+ void EmitFunctionDecl(GlobalDecl GD, SourceLocation Loc, QualType FnType);
+
+ /// Constructs the debug code for exiting a function.
+ void EmitFunctionEnd(CGBuilderTy &Builder);
+
+ /// Emit metadata to indicate the beginning of a new lexical block
+ /// and push the block onto the stack.
+ void EmitLexicalBlockStart(CGBuilderTy &Builder, SourceLocation Loc);
+
+ /// Emit metadata to indicate the end of a new lexical block and pop
+ /// the current block.
+ void EmitLexicalBlockEnd(CGBuilderTy &Builder, SourceLocation Loc);
+
+ /// Emit call to \c llvm.dbg.declare for an automatic variable
+ /// declaration.
+ void EmitDeclareOfAutoVariable(const VarDecl *Decl, llvm::Value *AI,
+ CGBuilderTy &Builder);
+
+ /// Emit call to \c llvm.dbg.declare for an imported variable
+ /// declaration in a block.
+ void EmitDeclareOfBlockDeclRefVariable(const VarDecl *variable,
+ llvm::Value *storage,
+ CGBuilderTy &Builder,
+ const CGBlockInfo &blockInfo,
+ llvm::Instruction *InsertPoint = nullptr);
+
+ /// Emit call to \c llvm.dbg.declare for an argument variable
+ /// declaration.
+ void EmitDeclareOfArgVariable(const VarDecl *Decl, llvm::Value *AI,
+ unsigned ArgNo, CGBuilderTy &Builder);
+
+ /// Emit call to \c llvm.dbg.declare for the block-literal argument
+ /// to a block invocation function.
+ void EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
+ llvm::Value *Arg, unsigned ArgNo,
+ llvm::Value *LocalAddr,
+ CGBuilderTy &Builder);
+
+ /// Emit information about a global variable.
+ void EmitGlobalVariable(llvm::GlobalVariable *GV, const VarDecl *Decl);
+
+ /// Emit global variable's debug info.
+ void EmitGlobalVariable(const ValueDecl *VD, llvm::Constant *Init);
+
+ /// Emit C++ using directive.
+ void EmitUsingDirective(const UsingDirectiveDecl &UD);
+
+ /// Emit the type explicitly casted to.
+ void EmitExplicitCastType(QualType Ty);
+
+ /// Emit C++ using declaration.
+ void EmitUsingDecl(const UsingDecl &UD);
+
+ /// Emit an @import declaration.
+ void EmitImportDecl(const ImportDecl &ID);
+
+ /// Emit C++ namespace alias.
+ llvm::DIImportedEntity *EmitNamespaceAlias(const NamespaceAliasDecl &NA);
+
+ /// Emit record type's standalone debug info.
+ llvm::DIType *getOrCreateRecordType(QualType Ty, SourceLocation L);
+
+ /// Emit an Objective-C interface type standalone debug info.
+ llvm::DIType *getOrCreateInterfaceType(QualType Ty, SourceLocation Loc);
+
+ /// Emit standalone debug info for a type.
+ llvm::DIType *getOrCreateStandaloneType(QualType Ty, SourceLocation Loc);
+
+ void completeType(const EnumDecl *ED);
+ void completeType(const RecordDecl *RD);
+ void completeRequiredType(const RecordDecl *RD);
+ void completeClassData(const RecordDecl *RD);
+
+ void completeTemplateDefinition(const ClassTemplateSpecializationDecl &SD);
+
+private:
+ /// Emit call to llvm.dbg.declare for a variable declaration.
+ void EmitDeclare(const VarDecl *decl, llvm::Value *AI,
+ llvm::Optional<unsigned> ArgNo, CGBuilderTy &Builder);
+
+ /// Build up structure info for the byref. See \a BuildByRefType.
+ llvm::DIType *EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
+ uint64_t *OffSet);
+
+ /// Get context info for the DeclContext of \p Decl.
+ llvm::DIScope *getDeclContextDescriptor(const Decl *D);
+ /// Get context info for a given DeclContext \p Decl.
+ llvm::DIScope *getContextDescriptor(const Decl *Context,
+ llvm::DIScope *Default);
+
+ llvm::DIScope *getCurrentContextDescriptor(const Decl *Decl);
+
+ /// Create a forward decl for a RecordType in a given context.
+ llvm::DICompositeType *getOrCreateRecordFwdDecl(const RecordType *,
+ llvm::DIScope *);
+
+ /// Return current directory name.
+ StringRef getCurrentDirname();
+
+ /// Create new compile unit.
+ void CreateCompileUnit();
+
+ /// Remap a given path with the current debug prefix map
+ std::string remapDIPath(StringRef) const;
+
+ /// Get the file debug info descriptor for the input location.
+ llvm::DIFile *getOrCreateFile(SourceLocation Loc);
+
+ /// Get the file info for main compile unit.
+ llvm::DIFile *getOrCreateMainFile();
+
+ /// Get the type from the cache or create a new type if necessary.
+ llvm::DIType *getOrCreateType(QualType Ty, llvm::DIFile *Fg);
+
+ /// Get a reference to a clang module. If \p CreateSkeletonCU is true,
+ /// this also creates a split dwarf skeleton compile unit.
+ llvm::DIModule *
+ getOrCreateModuleRef(ExternalASTSource::ASTSourceDescriptor Mod,
+ bool CreateSkeletonCU);
+
+ /// DebugTypeExtRefs: If \p D originated in a clang module, return it.
+ llvm::DIModule *getParentModuleOrNull(const Decl *D);
+
+ /// Get the type from the cache or create a new partial type if
+ /// necessary.
+ llvm::DICompositeType *getOrCreateLimitedType(const RecordType *Ty,
+ llvm::DIFile *F);
+
+ /// Create type metadata for a source language type.
+ llvm::DIType *CreateTypeNode(QualType Ty, llvm::DIFile *Fg);
+
+ /// Create new member and increase Offset by FType's size.
+ llvm::DIType *CreateMemberType(llvm::DIFile *Unit, QualType FType,
+ StringRef Name, uint64_t *Offset);
+
+ /// Retrieve the DIDescriptor, if any, for the canonical form of this
+ /// declaration.
+ llvm::DINode *getDeclarationOrDefinition(const Decl *D);
+
+ /// \return debug info descriptor to describe method
+ /// declaration for the given method definition.
+ llvm::DISubprogram *getFunctionDeclaration(const Decl *D);
+
+ /// \return debug info descriptor to describe in-class static data
+ /// member declaration for the given out-of-class definition. If D
+ /// is an out-of-class definition of a static data member of a
+ /// class, find its corresponding in-class declaration.
+ llvm::DIDerivedType *
+ getOrCreateStaticDataMemberDeclarationOrNull(const VarDecl *D);
+
+ /// Create a subprogram describing the forward declaration
+ /// represented in the given FunctionDecl.
+ llvm::DISubprogram *getFunctionForwardDeclaration(const FunctionDecl *FD);
+
+ /// Create a global variable describing the forward decalration
+ /// represented in the given VarDecl.
+ llvm::DIGlobalVariable *
+ getGlobalVariableForwardDeclaration(const VarDecl *VD);
+
+ /// \brief Return a global variable that represents one of the
+ /// collection of global variables created for an anonmyous union.
+ ///
+ /// Recursively collect all of the member fields of a global
+ /// anonymous decl and create static variables for them. The first
+ /// time this is called it needs to be on a union and then from
+ /// there we can have additional unnamed fields.
+ llvm::DIGlobalVariable *
+ CollectAnonRecordDecls(const RecordDecl *RD, llvm::DIFile *Unit,
+ unsigned LineNo, StringRef LinkageName,
+ llvm::GlobalVariable *Var, llvm::DIScope *DContext);
+
+ /// Get function name for the given FunctionDecl. If the name is
+ /// constructed on demand (e.g., C++ destructor) then the name is
+ /// stored on the side.
+ StringRef getFunctionName(const FunctionDecl *FD);
+
+ /// Returns the unmangled name of an Objective-C method.
+ /// This is the display name for the debugging info.
+ StringRef getObjCMethodName(const ObjCMethodDecl *FD);
+
+ /// Return selector name. This is used for debugging
+ /// info.
+ StringRef getSelectorName(Selector S);
+
+ /// Get class name including template argument list.
+ StringRef getClassName(const RecordDecl *RD);
+
+ /// Get the vtable name for the given class.
+ StringRef getVTableName(const CXXRecordDecl *Decl);
+
+ /// Get line number for the location. If location is invalid
+ /// then use current location.
+ unsigned getLineNumber(SourceLocation Loc);
+
+ /// Get column number for the location. If location is
+ /// invalid then use current location.
+ /// \param Force Assume DebugColumnInfo option is true.
+ unsigned getColumnNumber(SourceLocation Loc, bool Force = false);
+
+ /// Collect various properties of a FunctionDecl.
+ /// \param GD A GlobalDecl whose getDecl() must return a FunctionDecl.
+ void collectFunctionDeclProps(GlobalDecl GD, llvm::DIFile *Unit,
+ StringRef &Name, StringRef &LinkageName,
+ llvm::DIScope *&FDContext,
+ llvm::DINodeArray &TParamsArray,
+ unsigned &Flags);
+
+ /// Collect various properties of a VarDecl.
+ void collectVarDeclProps(const VarDecl *VD, llvm::DIFile *&Unit,
+ unsigned &LineNo, QualType &T, StringRef &Name,
+ StringRef &LinkageName, llvm::DIScope *&VDContext);
+
+ /// Allocate a copy of \p A using the DebugInfoNames allocator
+ /// and return a reference to it. If multiple arguments are given the strings
+ /// are concatenated.
+ StringRef internString(StringRef A, StringRef B = StringRef()) {
+ char *Data = DebugInfoNames.Allocate<char>(A.size() + B.size());
+ if (!A.empty())
+ std::memcpy(Data, A.data(), A.size());
+ if (!B.empty())
+ std::memcpy(Data + A.size(), B.data(), B.size());
+ return StringRef(Data, A.size() + B.size());
+ }
+};
+
+/// A scoped helper to set the current debug location to the specified
+/// location or preferred location of the specified Expr.
+class ApplyDebugLocation {
+private:
+ void init(SourceLocation TemporaryLocation, bool DefaultToEmpty = false);
+ ApplyDebugLocation(CodeGenFunction &CGF, bool DefaultToEmpty,
+ SourceLocation TemporaryLocation);
+
+ llvm::DebugLoc OriginalLocation;
+ CodeGenFunction *CGF;
+
+public:
+ /// Set the location to the (valid) TemporaryLocation.
+ ApplyDebugLocation(CodeGenFunction &CGF, SourceLocation TemporaryLocation);
+ ApplyDebugLocation(CodeGenFunction &CGF, const Expr *E);
+ ApplyDebugLocation(CodeGenFunction &CGF, llvm::DebugLoc Loc);
+ ApplyDebugLocation(ApplyDebugLocation &&Other) : CGF(Other.CGF) {
+ Other.CGF = nullptr;
+ }
+
+ ~ApplyDebugLocation();
+
+ /// \brief Apply TemporaryLocation if it is valid. Otherwise switch
+ /// to an artificial debug location that has a valid scope, but no
+ /// line information.
+ ///
+ /// Artificial locations are useful when emitting compiler-generated
+ /// helper functions that have no source location associated with
+ /// them. The DWARF specification allows the compiler to use the
+ /// special line number 0 to indicate code that can not be
+ /// attributed to any source location. Note that passing an empty
+ /// SourceLocation to CGDebugInfo::setLocation() will result in the
+ /// last valid location being reused.
+ static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF) {
+ return ApplyDebugLocation(CGF, false, SourceLocation());
+ }
+ /// \brief Apply TemporaryLocation if it is valid. Otherwise switch
+ /// to an artificial debug location that has a valid scope, but no
+ /// line information.
+ static ApplyDebugLocation
+ CreateDefaultArtificial(CodeGenFunction &CGF,
+ SourceLocation TemporaryLocation) {
+ return ApplyDebugLocation(CGF, false, TemporaryLocation);
+ }
+
+ /// Set the IRBuilder to not attach debug locations. Note that
+ /// passing an empty SourceLocation to \a CGDebugInfo::setLocation()
+ /// will result in the last valid location being reused. Note that
+ /// all instructions that do not have a location at the beginning of
+ /// a function are counted towards to function prologue.
+ static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF) {
+ return ApplyDebugLocation(CGF, true, SourceLocation());
+ }
+
+};
+
+} // namespace CodeGen
+} // namespace clang
+
+#endif // LLVM_CLANG_LIB_CODEGEN_CGDEBUGINFO_H
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp
new file mode 100644
index 0000000..b78e80d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp
@@ -0,0 +1,1863 @@
+//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Decl nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CGBlocks.h"
+#include "CGCleanup.h"
+#include "CGDebugInfo.h"
+#include "CGOpenCLRuntime.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/CodeGen/CGFunctionInfo.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Type.h"
+using namespace clang;
+using namespace CodeGen;
+
+
+void CodeGenFunction::EmitDecl(const Decl &D) {
+ switch (D.getKind()) {
+ case Decl::BuiltinTemplate:
+ case Decl::TranslationUnit:
+ case Decl::ExternCContext:
+ case Decl::Namespace:
+ case Decl::UnresolvedUsingTypename:
+ case Decl::ClassTemplateSpecialization:
+ case Decl::ClassTemplatePartialSpecialization:
+ case Decl::VarTemplateSpecialization:
+ case Decl::VarTemplatePartialSpecialization:
+ case Decl::TemplateTypeParm:
+ case Decl::UnresolvedUsingValue:
+ case Decl::NonTypeTemplateParm:
+ case Decl::CXXMethod:
+ case Decl::CXXConstructor:
+ case Decl::CXXDestructor:
+ case Decl::CXXConversion:
+ case Decl::Field:
+ case Decl::MSProperty:
+ case Decl::IndirectField:
+ case Decl::ObjCIvar:
+ case Decl::ObjCAtDefsField:
+ case Decl::ParmVar:
+ case Decl::ImplicitParam:
+ case Decl::ClassTemplate:
+ case Decl::VarTemplate:
+ case Decl::FunctionTemplate:
+ case Decl::TypeAliasTemplate:
+ case Decl::TemplateTemplateParm:
+ case Decl::ObjCMethod:
+ case Decl::ObjCCategory:
+ case Decl::ObjCProtocol:
+ case Decl::ObjCInterface:
+ case Decl::ObjCCategoryImpl:
+ case Decl::ObjCImplementation:
+ case Decl::ObjCProperty:
+ case Decl::ObjCCompatibleAlias:
+ case Decl::AccessSpec:
+ case Decl::LinkageSpec:
+ case Decl::ObjCPropertyImpl:
+ case Decl::FileScopeAsm:
+ case Decl::Friend:
+ case Decl::FriendTemplate:
+ case Decl::Block:
+ case Decl::Captured:
+ case Decl::ClassScopeFunctionSpecialization:
+ case Decl::UsingShadow:
+ case Decl::ObjCTypeParam:
+ llvm_unreachable("Declaration should not be in declstmts!");
+ case Decl::Function: // void X();
+ case Decl::Record: // struct/union/class X;
+ case Decl::Enum: // enum X;
+ case Decl::EnumConstant: // enum ? { X = ? }
+ case Decl::CXXRecord: // struct/union/class X; [C++]
+ case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
+ case Decl::Label: // __label__ x;
+ case Decl::Import:
+ case Decl::OMPThreadPrivate:
+ case Decl::Empty:
+ // None of these decls require codegen support.
+ return;
+
+ case Decl::NamespaceAlias:
+ if (CGDebugInfo *DI = getDebugInfo())
+ DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(D));
+ return;
+ case Decl::Using: // using X; [C++]
+ if (CGDebugInfo *DI = getDebugInfo())
+ DI->EmitUsingDecl(cast<UsingDecl>(D));
+ return;
+ case Decl::UsingDirective: // using namespace X; [C++]
+ if (CGDebugInfo *DI = getDebugInfo())
+ DI->EmitUsingDirective(cast<UsingDirectiveDecl>(D));
+ return;
+ case Decl::Var: {
+ const VarDecl &VD = cast<VarDecl>(D);
+ assert(VD.isLocalVarDecl() &&
+ "Should not see file-scope variables inside a function!");
+ return EmitVarDecl(VD);
+ }
+
+ case Decl::Typedef: // typedef int X;
+ case Decl::TypeAlias: { // using X = int; [C++0x]
+ const TypedefNameDecl &TD = cast<TypedefNameDecl>(D);
+ QualType Ty = TD.getUnderlyingType();
+
+ if (Ty->isVariablyModifiedType())
+ EmitVariablyModifiedType(Ty);
+ }
+ }
+}
+
+/// EmitVarDecl - This method handles emission of any variable declaration
+/// inside a function, including static vars etc.
+void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
+ if (D.isStaticLocal()) {
+ llvm::GlobalValue::LinkageTypes Linkage =
+ CGM.getLLVMLinkageVarDefinition(&D, /*isConstant=*/false);
+
+ // FIXME: We need to force the emission/use of a guard variable for
+ // some variables even if we can constant-evaluate them because
+ // we can't guarantee every translation unit will constant-evaluate them.
+
+ return EmitStaticVarDecl(D, Linkage);
+ }
+
+ if (D.hasExternalStorage())
+ // Don't emit it now, allow it to be emitted lazily on its first use.
+ return;
+
+ if (D.getType().getAddressSpace() == LangAS::opencl_local)
+ return CGM.getOpenCLRuntime().EmitWorkGroupLocalVarDecl(*this, D);
+
+ assert(D.hasLocalStorage());
+ return EmitAutoVarDecl(D);
+}
+
+static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D) {
+ if (CGM.getLangOpts().CPlusPlus)
+ return CGM.getMangledName(&D).str();
+
+ // If this isn't C++, we don't need a mangled name, just a pretty one.
+ assert(!D.isExternallyVisible() && "name shouldn't matter");
+ std::string ContextName;
+ const DeclContext *DC = D.getDeclContext();
+ if (auto *CD = dyn_cast<CapturedDecl>(DC))
+ DC = cast<DeclContext>(CD->getNonClosureContext());
+ if (const auto *FD = dyn_cast<FunctionDecl>(DC))
+ ContextName = CGM.getMangledName(FD);
+ else if (const auto *BD = dyn_cast<BlockDecl>(DC))
+ ContextName = CGM.getBlockMangledName(GlobalDecl(), BD);
+ else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(DC))
+ ContextName = OMD->getSelector().getAsString();
+ else
+ llvm_unreachable("Unknown context for static var decl");
+
+ ContextName += "." + D.getNameAsString();
+ return ContextName;
+}
+
+llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
+ const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage) {
+ // In general, we don't always emit static var decls once before we reference
+ // them. It is possible to reference them before emitting the function that
+ // contains them, and it is possible to emit the containing function multiple
+ // times.
+ if (llvm::Constant *ExistingGV = StaticLocalDeclMap[&D])
+ return ExistingGV;
+
+ QualType Ty = D.getType();
+ assert(Ty->isConstantSizeType() && "VLAs can't be static");
+
+ // Use the label if the variable is renamed with the asm-label extension.
+ std::string Name;
+ if (D.hasAttr<AsmLabelAttr>())
+ Name = getMangledName(&D);
+ else
+ Name = getStaticDeclName(*this, D);
+
+ llvm::Type *LTy = getTypes().ConvertTypeForMem(Ty);
+ unsigned AddrSpace =
+ GetGlobalVarAddressSpace(&D, getContext().getTargetAddressSpace(Ty));
+
+ // Local address space cannot have an initializer.
+ llvm::Constant *Init = nullptr;
+ if (Ty.getAddressSpace() != LangAS::opencl_local)
+ Init = EmitNullConstant(Ty);
+ else
+ Init = llvm::UndefValue::get(LTy);
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(getModule(), LTy,
+ Ty.isConstant(getContext()), Linkage,
+ Init, Name, nullptr,
+ llvm::GlobalVariable::NotThreadLocal,
+ AddrSpace);
+ GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
+ setGlobalVisibility(GV, &D);
+
+ if (supportsCOMDAT() && GV->isWeakForLinker())
+ GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
+
+ if (D.getTLSKind())
+ setTLSMode(GV, D);
+
+ if (D.isExternallyVisible()) {
+ if (D.hasAttr<DLLImportAttr>())
+ GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
+ else if (D.hasAttr<DLLExportAttr>())
+ GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
+ }
+
+ // Make sure the result is of the correct type.
+ unsigned ExpectedAddrSpace = getContext().getTargetAddressSpace(Ty);
+ llvm::Constant *Addr = GV;
+ if (AddrSpace != ExpectedAddrSpace) {
+ llvm::PointerType *PTy = llvm::PointerType::get(LTy, ExpectedAddrSpace);
+ Addr = llvm::ConstantExpr::getAddrSpaceCast(GV, PTy);
+ }
+
+ setStaticLocalDeclAddress(&D, Addr);
+
+ // Ensure that the static local gets initialized by making sure the parent
+ // function gets emitted eventually.
+ const Decl *DC = cast<Decl>(D.getDeclContext());
+
+ // We can't name blocks or captured statements directly, so try to emit their
+ // parents.
+ if (isa<BlockDecl>(DC) || isa<CapturedDecl>(DC)) {
+ DC = DC->getNonClosureContext();
+ // FIXME: Ensure that global blocks get emitted.
+ if (!DC)
+ return Addr;
+ }
+
+ GlobalDecl GD;
+ if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC))
+ GD = GlobalDecl(CD, Ctor_Base);
+ else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC))
+ GD = GlobalDecl(DD, Dtor_Base);
+ else if (const auto *FD = dyn_cast<FunctionDecl>(DC))
+ GD = GlobalDecl(FD);
+ else {
+ // Don't do anything for Obj-C method decls or global closures. We should
+ // never defer them.
+ assert(isa<ObjCMethodDecl>(DC) && "unexpected parent code decl");
+ }
+ if (GD.getDecl())
+ (void)GetAddrOfGlobal(GD);
+
+ return Addr;
+}
+
+/// hasNontrivialDestruction - Determine whether a type's destruction is
+/// non-trivial. If so, and the variable uses static initialization, we must
+/// register its destructor to run on exit.
+static bool hasNontrivialDestruction(QualType T) {
+ CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ return RD && !RD->hasTrivialDestructor();
+}
+
+/// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
+/// global variable that has already been created for it. If the initializer
+/// has a different type than GV does, this may free GV and return a different
+/// one. Otherwise it just returns GV.
+llvm::GlobalVariable *
+CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
+ llvm::GlobalVariable *GV) {
+ llvm::Constant *Init = CGM.EmitConstantInit(D, this);
+
+ // If constant emission failed, then this should be a C++ static
+ // initializer.
+ if (!Init) {
+ if (!getLangOpts().CPlusPlus)
+ CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
+ else if (Builder.GetInsertBlock()) {
+ // Since we have a static initializer, this global variable can't
+ // be constant.
+ GV->setConstant(false);
+
+ EmitCXXGuardedInit(D, GV, /*PerformInit*/true);
+ }
+ return GV;
+ }
+
+ // The initializer may differ in type from the global. Rewrite
+ // the global to match the initializer. (We have to do this
+ // because some types, like unions, can't be completely represented
+ // in the LLVM type system.)
+ if (GV->getType()->getElementType() != Init->getType()) {
+ llvm::GlobalVariable *OldGV = GV;
+
+ GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
+ OldGV->isConstant(),
+ OldGV->getLinkage(), Init, "",
+ /*InsertBefore*/ OldGV,
+ OldGV->getThreadLocalMode(),
+ CGM.getContext().getTargetAddressSpace(D.getType()));
+ GV->setVisibility(OldGV->getVisibility());
+ GV->setComdat(OldGV->getComdat());
+
+ // Steal the name of the old global
+ GV->takeName(OldGV);
+
+ // Replace all uses of the old global with the new global
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
+ OldGV->replaceAllUsesWith(NewPtrForOldDecl);
+
+ // Erase the old global, since it is no longer used.
+ OldGV->eraseFromParent();
+ }
+
+ GV->setConstant(CGM.isTypeConstant(D.getType(), true));
+ GV->setInitializer(Init);
+
+ if (hasNontrivialDestruction(D.getType())) {
+ // We have a constant initializer, but a nontrivial destructor. We still
+ // need to perform a guarded "initialization" in order to register the
+ // destructor.
+ EmitCXXGuardedInit(D, GV, /*PerformInit*/false);
+ }
+
+ return GV;
+}
+
+void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
+ llvm::GlobalValue::LinkageTypes Linkage) {
+ // Check to see if we already have a global variable for this
+ // declaration. This can happen when double-emitting function
+ // bodies, e.g. with complete and base constructors.
+ llvm::Constant *addr = CGM.getOrCreateStaticVarDecl(D, Linkage);
+ CharUnits alignment = getContext().getDeclAlign(&D);
+
+ // Store into LocalDeclMap before generating initializer to handle
+ // circular references.
+ setAddrOfLocalVar(&D, Address(addr, alignment));
+
+ // We can't have a VLA here, but we can have a pointer to a VLA,
+ // even though that doesn't really make any sense.
+ // Make sure to evaluate VLA bounds now so that we have them for later.
+ if (D.getType()->isVariablyModifiedType())
+ EmitVariablyModifiedType(D.getType());
+
+ // Save the type in case adding the initializer forces a type change.
+ llvm::Type *expectedType = addr->getType();
+
+ llvm::GlobalVariable *var =
+ cast<llvm::GlobalVariable>(addr->stripPointerCasts());
+ // If this value has an initializer, emit it.
+ if (D.getInit())
+ var = AddInitializerToStaticVarDecl(D, var);
+
+ var->setAlignment(alignment.getQuantity());
+
+ if (D.hasAttr<AnnotateAttr>())
+ CGM.AddGlobalAnnotations(&D, var);
+
+ if (const SectionAttr *SA = D.getAttr<SectionAttr>())
+ var->setSection(SA->getName());
+
+ if (D.hasAttr<UsedAttr>())
+ CGM.addUsedGlobal(var);
+
+ // We may have to cast the constant because of the initializer
+ // mismatch above.
+ //
+ // FIXME: It is really dangerous to store this in the map; if anyone
+ // RAUW's the GV uses of this constant will be invalid.
+ llvm::Constant *castedAddr =
+ llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(var, expectedType);
+ if (var != castedAddr)
+ LocalDeclMap.find(&D)->second = Address(castedAddr, alignment);
+ CGM.setStaticLocalDeclAddress(&D, castedAddr);
+
+ CGM.getSanitizerMetadata()->reportGlobalToASan(var, D);
+
+ // Emit global variable debug descriptor for static vars.
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI &&
+ CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo) {
+ DI->setLocation(D.getLocation());
+ DI->EmitGlobalVariable(var, &D);
+ }
+}
+
+namespace {
+ struct DestroyObject final : EHScopeStack::Cleanup {
+ DestroyObject(Address addr, QualType type,
+ CodeGenFunction::Destroyer *destroyer,
+ bool useEHCleanupForArray)
+ : addr(addr), type(type), destroyer(destroyer),
+ useEHCleanupForArray(useEHCleanupForArray) {}
+
+ Address addr;
+ QualType type;
+ CodeGenFunction::Destroyer *destroyer;
+ bool useEHCleanupForArray;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ // Don't use an EH cleanup recursively from an EH cleanup.
+ bool useEHCleanupForArray =
+ flags.isForNormalCleanup() && this->useEHCleanupForArray;
+
+ CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray);
+ }
+ };
+
+ struct DestroyNRVOVariable final : EHScopeStack::Cleanup {
+ DestroyNRVOVariable(Address addr,
+ const CXXDestructorDecl *Dtor,
+ llvm::Value *NRVOFlag)
+ : Dtor(Dtor), NRVOFlag(NRVOFlag), Loc(addr) {}
+
+ const CXXDestructorDecl *Dtor;
+ llvm::Value *NRVOFlag;
+ Address Loc;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ // Along the exceptions path we always execute the dtor.
+ bool NRVO = flags.isForNormalCleanup() && NRVOFlag;
+
+ llvm::BasicBlock *SkipDtorBB = nullptr;
+ if (NRVO) {
+ // If we exited via NRVO, we skip the destructor call.
+ llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused");
+ SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor");
+ llvm::Value *DidNRVO =
+ CGF.Builder.CreateFlagLoad(NRVOFlag, "nrvo.val");
+ CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB);
+ CGF.EmitBlock(RunDtorBB);
+ }
+
+ CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
+ /*ForVirtualBase=*/false,
+ /*Delegating=*/false,
+ Loc);
+
+ if (NRVO) CGF.EmitBlock(SkipDtorBB);
+ }
+ };
+
+ struct CallStackRestore final : EHScopeStack::Cleanup {
+ Address Stack;
+ CallStackRestore(Address Stack) : Stack(Stack) {}
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ llvm::Value *V = CGF.Builder.CreateLoad(Stack);
+ llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
+ CGF.Builder.CreateCall(F, V);
+ }
+ };
+
+ struct ExtendGCLifetime final : EHScopeStack::Cleanup {
+ const VarDecl &Var;
+ ExtendGCLifetime(const VarDecl *var) : Var(*var) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ // Compute the address of the local variable, in case it's a
+ // byref or something.
+ DeclRefExpr DRE(const_cast<VarDecl*>(&Var), false,
+ Var.getType(), VK_LValue, SourceLocation());
+ llvm::Value *value = CGF.EmitLoadOfScalar(CGF.EmitDeclRefLValue(&DRE),
+ SourceLocation());
+ CGF.EmitExtendGCLifetime(value);
+ }
+ };
+
+ struct CallCleanupFunction final : EHScopeStack::Cleanup {
+ llvm::Constant *CleanupFn;
+ const CGFunctionInfo &FnInfo;
+ const VarDecl &Var;
+
+ CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info,
+ const VarDecl *Var)
+ : CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ DeclRefExpr DRE(const_cast<VarDecl*>(&Var), false,
+ Var.getType(), VK_LValue, SourceLocation());
+ // Compute the address of the local variable, in case it's a byref
+ // or something.
+ llvm::Value *Addr = CGF.EmitDeclRefLValue(&DRE).getPointer();
+
+ // In some cases, the type of the function argument will be different from
+ // the type of the pointer. An example of this is
+ // void f(void* arg);
+ // __attribute__((cleanup(f))) void *g;
+ //
+ // To fix this we insert a bitcast here.
+ QualType ArgTy = FnInfo.arg_begin()->type;
+ llvm::Value *Arg =
+ CGF.Builder.CreateBitCast(Addr, CGF.ConvertType(ArgTy));
+
+ CallArgList Args;
+ Args.add(RValue::get(Arg),
+ CGF.getContext().getPointerType(Var.getType()));
+ CGF.EmitCall(FnInfo, CleanupFn, ReturnValueSlot(), Args);
+ }
+ };
+
+ /// A cleanup to call @llvm.lifetime.end.
+ class CallLifetimeEnd final : public EHScopeStack::Cleanup {
+ llvm::Value *Addr;
+ llvm::Value *Size;
+ public:
+ CallLifetimeEnd(Address addr, llvm::Value *size)
+ : Addr(addr.getPointer()), Size(size) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ CGF.EmitLifetimeEnd(Size, Addr);
+ }
+ };
+}
+
+/// EmitAutoVarWithLifetime - Does the setup required for an automatic
+/// variable with lifetime.
+static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var,
+ Address addr,
+ Qualifiers::ObjCLifetime lifetime) {
+ switch (lifetime) {
+ case Qualifiers::OCL_None:
+ llvm_unreachable("present but none");
+
+ case Qualifiers::OCL_ExplicitNone:
+ // nothing to do
+ break;
+
+ case Qualifiers::OCL_Strong: {
+ CodeGenFunction::Destroyer *destroyer =
+ (var.hasAttr<ObjCPreciseLifetimeAttr>()
+ ? CodeGenFunction::destroyARCStrongPrecise
+ : CodeGenFunction::destroyARCStrongImprecise);
+
+ CleanupKind cleanupKind = CGF.getARCCleanupKind();
+ CGF.pushDestroy(cleanupKind, addr, var.getType(), destroyer,
+ cleanupKind & EHCleanup);
+ break;
+ }
+ case Qualifiers::OCL_Autoreleasing:
+ // nothing to do
+ break;
+
+ case Qualifiers::OCL_Weak:
+ // __weak objects always get EH cleanups; otherwise, exceptions
+ // could cause really nasty crashes instead of mere leaks.
+ CGF.pushDestroy(NormalAndEHCleanup, addr, var.getType(),
+ CodeGenFunction::destroyARCWeak,
+ /*useEHCleanup*/ true);
+ break;
+ }
+}
+
+static bool isAccessedBy(const VarDecl &var, const Stmt *s) {
+ if (const Expr *e = dyn_cast<Expr>(s)) {
+ // Skip the most common kinds of expressions that make
+ // hierarchy-walking expensive.
+ s = e = e->IgnoreParenCasts();
+
+ if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e))
+ return (ref->getDecl() == &var);
+ if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) {
+ const BlockDecl *block = be->getBlockDecl();
+ for (const auto &I : block->captures()) {
+ if (I.getVariable() == &var)
+ return true;
+ }
+ }
+ }
+
+ for (const Stmt *SubStmt : s->children())
+ // SubStmt might be null; as in missing decl or conditional of an if-stmt.
+ if (SubStmt && isAccessedBy(var, SubStmt))
+ return true;
+
+ return false;
+}
+
+static bool isAccessedBy(const ValueDecl *decl, const Expr *e) {
+ if (!decl) return false;
+ if (!isa<VarDecl>(decl)) return false;
+ const VarDecl *var = cast<VarDecl>(decl);
+ return isAccessedBy(*var, e);
+}
+
+static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF,
+ const LValue &destLV, const Expr *init) {
+ bool needsCast = false;
+
+ while (auto castExpr = dyn_cast<CastExpr>(init->IgnoreParens())) {
+ switch (castExpr->getCastKind()) {
+ // Look through casts that don't require representation changes.
+ case CK_NoOp:
+ case CK_BitCast:
+ case CK_BlockPointerToObjCPointerCast:
+ needsCast = true;
+ break;
+
+ // If we find an l-value to r-value cast from a __weak variable,
+ // emit this operation as a copy or move.
+ case CK_LValueToRValue: {
+ const Expr *srcExpr = castExpr->getSubExpr();
+ if (srcExpr->getType().getObjCLifetime() != Qualifiers::OCL_Weak)
+ return false;
+
+ // Emit the source l-value.
+ LValue srcLV = CGF.EmitLValue(srcExpr);
+
+ // Handle a formal type change to avoid asserting.
+ auto srcAddr = srcLV.getAddress();
+ if (needsCast) {
+ srcAddr = CGF.Builder.CreateElementBitCast(srcAddr,
+ destLV.getAddress().getElementType());
+ }
+
+ // If it was an l-value, use objc_copyWeak.
+ if (srcExpr->getValueKind() == VK_LValue) {
+ CGF.EmitARCCopyWeak(destLV.getAddress(), srcAddr);
+ } else {
+ assert(srcExpr->getValueKind() == VK_XValue);
+ CGF.EmitARCMoveWeak(destLV.getAddress(), srcAddr);
+ }
+ return true;
+ }
+
+ // Stop at anything else.
+ default:
+ return false;
+ }
+
+ init = castExpr->getSubExpr();
+ continue;
+ }
+ return false;
+}
+
+static void drillIntoBlockVariable(CodeGenFunction &CGF,
+ LValue &lvalue,
+ const VarDecl *var) {
+ lvalue.setAddress(CGF.emitBlockByrefAddress(lvalue.getAddress(), var));
+}
+
+void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
+ LValue lvalue, bool capturedByInit) {
+ Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
+ if (!lifetime) {
+ llvm::Value *value = EmitScalarExpr(init);
+ if (capturedByInit)
+ drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
+ EmitStoreThroughLValue(RValue::get(value), lvalue, true);
+ return;
+ }
+
+ if (const CXXDefaultInitExpr *DIE = dyn_cast<CXXDefaultInitExpr>(init))
+ init = DIE->getExpr();
+
+ // If we're emitting a value with lifetime, we have to do the
+ // initialization *before* we leave the cleanup scopes.
+ if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(init)) {
+ enterFullExpression(ewc);
+ init = ewc->getSubExpr();
+ }
+ CodeGenFunction::RunCleanupsScope Scope(*this);
+
+ // We have to maintain the illusion that the variable is
+ // zero-initialized. If the variable might be accessed in its
+ // initializer, zero-initialize before running the initializer, then
+ // actually perform the initialization with an assign.
+ bool accessedByInit = false;
+ if (lifetime != Qualifiers::OCL_ExplicitNone)
+ accessedByInit = (capturedByInit || isAccessedBy(D, init));
+ if (accessedByInit) {
+ LValue tempLV = lvalue;
+ // Drill down to the __block object if necessary.
+ if (capturedByInit) {
+ // We can use a simple GEP for this because it can't have been
+ // moved yet.
+ tempLV.setAddress(emitBlockByrefAddress(tempLV.getAddress(),
+ cast<VarDecl>(D),
+ /*follow*/ false));
+ }
+
+ auto ty = cast<llvm::PointerType>(tempLV.getAddress().getElementType());
+ llvm::Value *zero = llvm::ConstantPointerNull::get(ty);
+
+ // If __weak, we want to use a barrier under certain conditions.
+ if (lifetime == Qualifiers::OCL_Weak)
+ EmitARCInitWeak(tempLV.getAddress(), zero);
+
+ // Otherwise just do a simple store.
+ else
+ EmitStoreOfScalar(zero, tempLV, /* isInitialization */ true);
+ }
+
+ // Emit the initializer.
+ llvm::Value *value = nullptr;
+
+ switch (lifetime) {
+ case Qualifiers::OCL_None:
+ llvm_unreachable("present but none");
+
+ case Qualifiers::OCL_ExplicitNone:
+ // nothing to do
+ value = EmitScalarExpr(init);
+ break;
+
+ case Qualifiers::OCL_Strong: {
+ value = EmitARCRetainScalarExpr(init);
+ break;
+ }
+
+ case Qualifiers::OCL_Weak: {
+ // If it's not accessed by the initializer, try to emit the
+ // initialization with a copy or move.
+ if (!accessedByInit && tryEmitARCCopyWeakInit(*this, lvalue, init)) {
+ return;
+ }
+
+ // No way to optimize a producing initializer into this. It's not
+ // worth optimizing for, because the value will immediately
+ // disappear in the common case.
+ value = EmitScalarExpr(init);
+
+ if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
+ if (accessedByInit)
+ EmitARCStoreWeak(lvalue.getAddress(), value, /*ignored*/ true);
+ else
+ EmitARCInitWeak(lvalue.getAddress(), value);
+ return;
+ }
+
+ case Qualifiers::OCL_Autoreleasing:
+ value = EmitARCRetainAutoreleaseScalarExpr(init);
+ break;
+ }
+
+ if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
+
+ // If the variable might have been accessed by its initializer, we
+ // might have to initialize with a barrier. We have to do this for
+ // both __weak and __strong, but __weak got filtered out above.
+ if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
+ llvm::Value *oldValue = EmitLoadOfScalar(lvalue, init->getExprLoc());
+ EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
+ EmitARCRelease(oldValue, ARCImpreciseLifetime);
+ return;
+ }
+
+ EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
+}
+
+/// EmitScalarInit - Initialize the given lvalue with the given object.
+void CodeGenFunction::EmitScalarInit(llvm::Value *init, LValue lvalue) {
+ Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
+ if (!lifetime)
+ return EmitStoreThroughLValue(RValue::get(init), lvalue, true);
+
+ switch (lifetime) {
+ case Qualifiers::OCL_None:
+ llvm_unreachable("present but none");
+
+ case Qualifiers::OCL_ExplicitNone:
+ // nothing to do
+ break;
+
+ case Qualifiers::OCL_Strong:
+ init = EmitARCRetain(lvalue.getType(), init);
+ break;
+
+ case Qualifiers::OCL_Weak:
+ // Initialize and then skip the primitive store.
+ EmitARCInitWeak(lvalue.getAddress(), init);
+ return;
+
+ case Qualifiers::OCL_Autoreleasing:
+ init = EmitARCRetainAutorelease(lvalue.getType(), init);
+ break;
+ }
+
+ EmitStoreOfScalar(init, lvalue, /* isInitialization */ true);
+}
+
+/// canEmitInitWithFewStoresAfterMemset - Decide whether we can emit the
+/// non-zero parts of the specified initializer with equal or fewer than
+/// NumStores scalar stores.
+static bool canEmitInitWithFewStoresAfterMemset(llvm::Constant *Init,
+ unsigned &NumStores) {
+ // Zero and Undef never requires any extra stores.
+ if (isa<llvm::ConstantAggregateZero>(Init) ||
+ isa<llvm::ConstantPointerNull>(Init) ||
+ isa<llvm::UndefValue>(Init))
+ return true;
+ if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
+ isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
+ isa<llvm::ConstantExpr>(Init))
+ return Init->isNullValue() || NumStores--;
+
+ // See if we can emit each element.
+ if (isa<llvm::ConstantArray>(Init) || isa<llvm::ConstantStruct>(Init)) {
+ for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
+ llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
+ if (!canEmitInitWithFewStoresAfterMemset(Elt, NumStores))
+ return false;
+ }
+ return true;
+ }
+
+ if (llvm::ConstantDataSequential *CDS =
+ dyn_cast<llvm::ConstantDataSequential>(Init)) {
+ for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
+ llvm::Constant *Elt = CDS->getElementAsConstant(i);
+ if (!canEmitInitWithFewStoresAfterMemset(Elt, NumStores))
+ return false;
+ }
+ return true;
+ }
+
+ // Anything else is hard and scary.
+ return false;
+}
+
+/// emitStoresForInitAfterMemset - For inits that
+/// canEmitInitWithFewStoresAfterMemset returned true for, emit the scalar
+/// stores that would be required.
+static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc,
+ bool isVolatile, CGBuilderTy &Builder) {
+ assert(!Init->isNullValue() && !isa<llvm::UndefValue>(Init) &&
+ "called emitStoresForInitAfterMemset for zero or undef value.");
+
+ if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
+ isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
+ isa<llvm::ConstantExpr>(Init)) {
+ Builder.CreateDefaultAlignedStore(Init, Loc, isVolatile);
+ return;
+ }
+
+ if (llvm::ConstantDataSequential *CDS =
+ dyn_cast<llvm::ConstantDataSequential>(Init)) {
+ for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
+ llvm::Constant *Elt = CDS->getElementAsConstant(i);
+
+ // If necessary, get a pointer to the element and emit it.
+ if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
+ emitStoresForInitAfterMemset(
+ Elt, Builder.CreateConstGEP2_32(Init->getType(), Loc, 0, i),
+ isVolatile, Builder);
+ }
+ return;
+ }
+
+ assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) &&
+ "Unknown value type!");
+
+ for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
+ llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
+
+ // If necessary, get a pointer to the element and emit it.
+ if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
+ emitStoresForInitAfterMemset(
+ Elt, Builder.CreateConstGEP2_32(Init->getType(), Loc, 0, i),
+ isVolatile, Builder);
+ }
+}
+
+
+/// shouldUseMemSetPlusStoresToInitialize - Decide whether we should use memset
+/// plus some stores to initialize a local variable instead of using a memcpy
+/// from a constant global. It is beneficial to use memset if the global is all
+/// zeros, or mostly zeros and large.
+static bool shouldUseMemSetPlusStoresToInitialize(llvm::Constant *Init,
+ uint64_t GlobalSize) {
+ // If a global is all zeros, always use a memset.
+ if (isa<llvm::ConstantAggregateZero>(Init)) return true;
+
+ // If a non-zero global is <= 32 bytes, always use a memcpy. If it is large,
+ // do it if it will require 6 or fewer scalar stores.
+ // TODO: Should budget depends on the size? Avoiding a large global warrants
+ // plopping in more stores.
+ unsigned StoreBudget = 6;
+ uint64_t SizeLimit = 32;
+
+ return GlobalSize > SizeLimit &&
+ canEmitInitWithFewStoresAfterMemset(Init, StoreBudget);
+}
+
+/// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
+/// variable declaration with auto, register, or no storage class specifier.
+/// These turn into simple stack objects, or GlobalValues depending on target.
+void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D) {
+ AutoVarEmission emission = EmitAutoVarAlloca(D);
+ EmitAutoVarInit(emission);
+ EmitAutoVarCleanups(emission);
+}
+
+/// Emit a lifetime.begin marker if some criteria are satisfied.
+/// \return a pointer to the temporary size Value if a marker was emitted, null
+/// otherwise
+llvm::Value *CodeGenFunction::EmitLifetimeStart(uint64_t Size,
+ llvm::Value *Addr) {
+ // For now, only in optimized builds.
+ if (CGM.getCodeGenOpts().OptimizationLevel == 0)
+ return nullptr;
+
+ // Disable lifetime markers in msan builds.
+ // FIXME: Remove this when msan works with lifetime markers.
+ if (getLangOpts().Sanitize.has(SanitizerKind::Memory))
+ return nullptr;
+
+ llvm::Value *SizeV = llvm::ConstantInt::get(Int64Ty, Size);
+ Addr = Builder.CreateBitCast(Addr, Int8PtrTy);
+ llvm::CallInst *C =
+ Builder.CreateCall(CGM.getLLVMLifetimeStartFn(), {SizeV, Addr});
+ C->setDoesNotThrow();
+ return SizeV;
+}
+
+void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr) {
+ Addr = Builder.CreateBitCast(Addr, Int8PtrTy);
+ llvm::CallInst *C =
+ Builder.CreateCall(CGM.getLLVMLifetimeEndFn(), {Size, Addr});
+ C->setDoesNotThrow();
+}
+
+/// EmitAutoVarAlloca - Emit the alloca and debug information for a
+/// local variable. Does not emit initialization or destruction.
+CodeGenFunction::AutoVarEmission
+CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
+ QualType Ty = D.getType();
+
+ AutoVarEmission emission(D);
+
+ bool isByRef = D.hasAttr<BlocksAttr>();
+ emission.IsByRef = isByRef;
+
+ CharUnits alignment = getContext().getDeclAlign(&D);
+
+ // If the type is variably-modified, emit all the VLA sizes for it.
+ if (Ty->isVariablyModifiedType())
+ EmitVariablyModifiedType(Ty);
+
+ Address address = Address::invalid();
+ if (Ty->isConstantSizeType()) {
+ bool NRVO = getLangOpts().ElideConstructors &&
+ D.isNRVOVariable();
+
+ // If this value is an array or struct with a statically determinable
+ // constant initializer, there are optimizations we can do.
+ //
+ // TODO: We should constant-evaluate the initializer of any variable,
+ // as long as it is initialized by a constant expression. Currently,
+ // isConstantInitializer produces wrong answers for structs with
+ // reference or bitfield members, and a few other cases, and checking
+ // for POD-ness protects us from some of these.
+ if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) &&
+ (D.isConstexpr() ||
+ ((Ty.isPODType(getContext()) ||
+ getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) &&
+ D.getInit()->isConstantInitializer(getContext(), false)))) {
+
+ // If the variable's a const type, and it's neither an NRVO
+ // candidate nor a __block variable and has no mutable members,
+ // emit it as a global instead.
+ if (CGM.getCodeGenOpts().MergeAllConstants && !NRVO && !isByRef &&
+ CGM.isTypeConstant(Ty, true)) {
+ EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
+
+ // Signal this condition to later callbacks.
+ emission.Addr = Address::invalid();
+ assert(emission.wasEmittedAsGlobal());
+ return emission;
+ }
+
+ // Otherwise, tell the initialization code that we're in this case.
+ emission.IsConstantAggregate = true;
+ }
+
+ // A normal fixed sized variable becomes an alloca in the entry block,
+ // unless it's an NRVO variable.
+
+ if (NRVO) {
+ // The named return value optimization: allocate this variable in the
+ // return slot, so that we can elide the copy when returning this
+ // variable (C++0x [class.copy]p34).
+ address = ReturnValue;
+
+ if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
+ if (!cast<CXXRecordDecl>(RecordTy->getDecl())->hasTrivialDestructor()) {
+ // Create a flag that is used to indicate when the NRVO was applied
+ // to this variable. Set it to zero to indicate that NRVO was not
+ // applied.
+ llvm::Value *Zero = Builder.getFalse();
+ Address NRVOFlag =
+ CreateTempAlloca(Zero->getType(), CharUnits::One(), "nrvo");
+ EnsureInsertPoint();
+ Builder.CreateStore(Zero, NRVOFlag);
+
+ // Record the NRVO flag for this variable.
+ NRVOFlags[&D] = NRVOFlag.getPointer();
+ emission.NRVOFlag = NRVOFlag.getPointer();
+ }
+ }
+ } else {
+ CharUnits allocaAlignment;
+ llvm::Type *allocaTy;
+ if (isByRef) {
+ auto &byrefInfo = getBlockByrefInfo(&D);
+ allocaTy = byrefInfo.Type;
+ allocaAlignment = byrefInfo.ByrefAlignment;
+ } else {
+ allocaTy = ConvertTypeForMem(Ty);
+ allocaAlignment = alignment;
+ }
+
+ // Create the alloca. Note that we set the name separately from
+ // building the instruction so that it's there even in no-asserts
+ // builds.
+ address = CreateTempAlloca(allocaTy, allocaAlignment);
+ address.getPointer()->setName(D.getName());
+
+ // Don't emit lifetime markers for MSVC catch parameters. The lifetime of
+ // the catch parameter starts in the catchpad instruction, and we can't
+ // insert code in those basic blocks.
+ bool IsMSCatchParam =
+ D.isExceptionVariable() && getTarget().getCXXABI().isMicrosoft();
+
+ // Emit a lifetime intrinsic if meaningful. There's no point
+ // in doing this if we don't have a valid insertion point (?).
+ if (HaveInsertPoint() && !IsMSCatchParam) {
+ uint64_t size = CGM.getDataLayout().getTypeAllocSize(allocaTy);
+ emission.SizeForLifetimeMarkers =
+ EmitLifetimeStart(size, address.getPointer());
+ } else {
+ assert(!emission.useLifetimeMarkers());
+ }
+ }
+ } else {
+ EnsureInsertPoint();
+
+ if (!DidCallStackSave) {
+ // Save the stack.
+ Address Stack =
+ CreateTempAlloca(Int8PtrTy, getPointerAlign(), "saved_stack");
+
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
+ llvm::Value *V = Builder.CreateCall(F);
+ Builder.CreateStore(V, Stack);
+
+ DidCallStackSave = true;
+
+ // Push a cleanup block and restore the stack there.
+ // FIXME: in general circumstances, this should be an EH cleanup.
+ pushStackRestore(NormalCleanup, Stack);
+ }
+
+ llvm::Value *elementCount;
+ QualType elementType;
+ std::tie(elementCount, elementType) = getVLASize(Ty);
+
+ llvm::Type *llvmTy = ConvertTypeForMem(elementType);
+
+ // Allocate memory for the array.
+ llvm::AllocaInst *vla = Builder.CreateAlloca(llvmTy, elementCount, "vla");
+ vla->setAlignment(alignment.getQuantity());
+
+ address = Address(vla, alignment);
+ }
+
+ setAddrOfLocalVar(&D, address);
+ emission.Addr = address;
+
+ // Emit debug info for local var declaration.
+ if (HaveInsertPoint())
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ if (CGM.getCodeGenOpts().getDebugInfo()
+ >= CodeGenOptions::LimitedDebugInfo) {
+ DI->setLocation(D.getLocation());
+ DI->EmitDeclareOfAutoVariable(&D, address.getPointer(), Builder);
+ }
+ }
+
+ if (D.hasAttr<AnnotateAttr>())
+ EmitVarAnnotations(&D, address.getPointer());
+
+ return emission;
+}
+
+/// Determines whether the given __block variable is potentially
+/// captured by the given expression.
+static bool isCapturedBy(const VarDecl &var, const Expr *e) {
+ // Skip the most common kinds of expressions that make
+ // hierarchy-walking expensive.
+ e = e->IgnoreParenCasts();
+
+ if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) {
+ const BlockDecl *block = be->getBlockDecl();
+ for (const auto &I : block->captures()) {
+ if (I.getVariable() == &var)
+ return true;
+ }
+
+ // No need to walk into the subexpressions.
+ return false;
+ }
+
+ if (const StmtExpr *SE = dyn_cast<StmtExpr>(e)) {
+ const CompoundStmt *CS = SE->getSubStmt();
+ for (const auto *BI : CS->body())
+ if (const auto *E = dyn_cast<Expr>(BI)) {
+ if (isCapturedBy(var, E))
+ return true;
+ }
+ else if (const auto *DS = dyn_cast<DeclStmt>(BI)) {
+ // special case declarations
+ for (const auto *I : DS->decls()) {
+ if (const auto *VD = dyn_cast<VarDecl>((I))) {
+ const Expr *Init = VD->getInit();
+ if (Init && isCapturedBy(var, Init))
+ return true;
+ }
+ }
+ }
+ else
+ // FIXME. Make safe assumption assuming arbitrary statements cause capturing.
+ // Later, provide code to poke into statements for capture analysis.
+ return true;
+ return false;
+ }
+
+ for (const Stmt *SubStmt : e->children())
+ if (isCapturedBy(var, cast<Expr>(SubStmt)))
+ return true;
+
+ return false;
+}
+
+/// \brief Determine whether the given initializer is trivial in the sense
+/// that it requires no code to be generated.
+bool CodeGenFunction::isTrivialInitializer(const Expr *Init) {
+ if (!Init)
+ return true;
+
+ if (const CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init))
+ if (CXXConstructorDecl *Constructor = Construct->getConstructor())
+ if (Constructor->isTrivial() &&
+ Constructor->isDefaultConstructor() &&
+ !Construct->requiresZeroInitialization())
+ return true;
+
+ return false;
+}
+void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
+ assert(emission.Variable && "emission was not valid!");
+
+ // If this was emitted as a global constant, we're done.
+ if (emission.wasEmittedAsGlobal()) return;
+
+ const VarDecl &D = *emission.Variable;
+ auto DL = ApplyDebugLocation::CreateDefaultArtificial(*this, D.getLocation());
+ QualType type = D.getType();
+
+ // If this local has an initializer, emit it now.
+ const Expr *Init = D.getInit();
+
+ // If we are at an unreachable point, we don't need to emit the initializer
+ // unless it contains a label.
+ if (!HaveInsertPoint()) {
+ if (!Init || !ContainsLabel(Init)) return;
+ EnsureInsertPoint();
+ }
+
+ // Initialize the structure of a __block variable.
+ if (emission.IsByRef)
+ emitByrefStructureInit(emission);
+
+ if (isTrivialInitializer(Init))
+ return;
+
+ // Check whether this is a byref variable that's potentially
+ // captured and moved by its own initializer. If so, we'll need to
+ // emit the initializer first, then copy into the variable.
+ bool capturedByInit = emission.IsByRef && isCapturedBy(D, Init);
+
+ Address Loc =
+ capturedByInit ? emission.Addr : emission.getObjectAddress(*this);
+
+ llvm::Constant *constant = nullptr;
+ if (emission.IsConstantAggregate || D.isConstexpr()) {
+ assert(!capturedByInit && "constant init contains a capturing block?");
+ constant = CGM.EmitConstantInit(D, this);
+ }
+
+ if (!constant) {
+ LValue lv = MakeAddrLValue(Loc, type);
+ lv.setNonGC(true);
+ return EmitExprAsInit(Init, &D, lv, capturedByInit);
+ }
+
+ if (!emission.IsConstantAggregate) {
+ // For simple scalar/complex initialization, store the value directly.
+ LValue lv = MakeAddrLValue(Loc, type);
+ lv.setNonGC(true);
+ return EmitStoreThroughLValue(RValue::get(constant), lv, true);
+ }
+
+ // If this is a simple aggregate initialization, we can optimize it
+ // in various ways.
+ bool isVolatile = type.isVolatileQualified();
+
+ llvm::Value *SizeVal =
+ llvm::ConstantInt::get(IntPtrTy,
+ getContext().getTypeSizeInChars(type).getQuantity());
+
+ llvm::Type *BP = Int8PtrTy;
+ if (Loc.getType() != BP)
+ Loc = Builder.CreateBitCast(Loc, BP);
+
+ // If the initializer is all or mostly zeros, codegen with memset then do
+ // a few stores afterward.
+ if (shouldUseMemSetPlusStoresToInitialize(constant,
+ CGM.getDataLayout().getTypeAllocSize(constant->getType()))) {
+ Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
+ isVolatile);
+ // Zero and undef don't require a stores.
+ if (!constant->isNullValue() && !isa<llvm::UndefValue>(constant)) {
+ Loc = Builder.CreateBitCast(Loc, constant->getType()->getPointerTo());
+ emitStoresForInitAfterMemset(constant, Loc.getPointer(),
+ isVolatile, Builder);
+ }
+ } else {
+ // Otherwise, create a temporary global with the initializer then
+ // memcpy from the global to the alloca.
+ std::string Name = getStaticDeclName(CGM, D);
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), constant->getType(), true,
+ llvm::GlobalValue::PrivateLinkage,
+ constant, Name);
+ GV->setAlignment(Loc.getAlignment().getQuantity());
+ GV->setUnnamedAddr(true);
+
+ Address SrcPtr = Address(GV, Loc.getAlignment());
+ if (SrcPtr.getType() != BP)
+ SrcPtr = Builder.CreateBitCast(SrcPtr, BP);
+
+ Builder.CreateMemCpy(Loc, SrcPtr, SizeVal, isVolatile);
+ }
+}
+
+/// Emit an expression as an initializer for a variable at the given
+/// location. The expression is not necessarily the normal
+/// initializer for the variable, and the address is not necessarily
+/// its normal location.
+///
+/// \param init the initializing expression
+/// \param var the variable to act as if we're initializing
+/// \param loc the address to initialize; its type is a pointer
+/// to the LLVM mapping of the variable's type
+/// \param alignment the alignment of the address
+/// \param capturedByInit true if the variable is a __block variable
+/// whose address is potentially changed by the initializer
+void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
+ LValue lvalue, bool capturedByInit) {
+ QualType type = D->getType();
+
+ if (type->isReferenceType()) {
+ RValue rvalue = EmitReferenceBindingToExpr(init);
+ if (capturedByInit)
+ drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
+ EmitStoreThroughLValue(rvalue, lvalue, true);
+ return;
+ }
+ switch (getEvaluationKind(type)) {
+ case TEK_Scalar:
+ EmitScalarInit(init, D, lvalue, capturedByInit);
+ return;
+ case TEK_Complex: {
+ ComplexPairTy complex = EmitComplexExpr(init);
+ if (capturedByInit)
+ drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
+ EmitStoreOfComplex(complex, lvalue, /*init*/ true);
+ return;
+ }
+ case TEK_Aggregate:
+ if (type->isAtomicType()) {
+ EmitAtomicInit(const_cast<Expr*>(init), lvalue);
+ } else {
+ // TODO: how can we delay here if D is captured by its initializer?
+ EmitAggExpr(init, AggValueSlot::forLValue(lvalue,
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
+ }
+ return;
+ }
+ llvm_unreachable("bad evaluation kind");
+}
+
+/// Enter a destroy cleanup for the given local variable.
+void CodeGenFunction::emitAutoVarTypeCleanup(
+ const CodeGenFunction::AutoVarEmission &emission,
+ QualType::DestructionKind dtorKind) {
+ assert(dtorKind != QualType::DK_none);
+
+ // Note that for __block variables, we want to destroy the
+ // original stack object, not the possibly forwarded object.
+ Address addr = emission.getObjectAddress(*this);
+
+ const VarDecl *var = emission.Variable;
+ QualType type = var->getType();
+
+ CleanupKind cleanupKind = NormalAndEHCleanup;
+ CodeGenFunction::Destroyer *destroyer = nullptr;
+
+ switch (dtorKind) {
+ case QualType::DK_none:
+ llvm_unreachable("no cleanup for trivially-destructible variable");
+
+ case QualType::DK_cxx_destructor:
+ // If there's an NRVO flag on the emission, we need a different
+ // cleanup.
+ if (emission.NRVOFlag) {
+ assert(!type->isArrayType());
+ CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
+ EHStack.pushCleanup<DestroyNRVOVariable>(cleanupKind, addr,
+ dtor, emission.NRVOFlag);
+ return;
+ }
+ break;
+
+ case QualType::DK_objc_strong_lifetime:
+ // Suppress cleanups for pseudo-strong variables.
+ if (var->isARCPseudoStrong()) return;
+
+ // Otherwise, consider whether to use an EH cleanup or not.
+ cleanupKind = getARCCleanupKind();
+
+ // Use the imprecise destroyer by default.
+ if (!var->hasAttr<ObjCPreciseLifetimeAttr>())
+ destroyer = CodeGenFunction::destroyARCStrongImprecise;
+ break;
+
+ case QualType::DK_objc_weak_lifetime:
+ break;
+ }
+
+ // If we haven't chosen a more specific destroyer, use the default.
+ if (!destroyer) destroyer = getDestroyer(dtorKind);
+
+ // Use an EH cleanup in array destructors iff the destructor itself
+ // is being pushed as an EH cleanup.
+ bool useEHCleanup = (cleanupKind & EHCleanup);
+ EHStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer,
+ useEHCleanup);
+}
+
+void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
+ assert(emission.Variable && "emission was not valid!");
+
+ // If this was emitted as a global constant, we're done.
+ if (emission.wasEmittedAsGlobal()) return;
+
+ // If we don't have an insertion point, we're done. Sema prevents
+ // us from jumping into any of these scopes anyway.
+ if (!HaveInsertPoint()) return;
+
+ const VarDecl &D = *emission.Variable;
+
+ // Make sure we call @llvm.lifetime.end. This needs to happen
+ // *last*, so the cleanup needs to be pushed *first*.
+ if (emission.useLifetimeMarkers()) {
+ EHStack.pushCleanup<CallLifetimeEnd>(NormalCleanup,
+ emission.getAllocatedAddress(),
+ emission.getSizeForLifetimeMarkers());
+ EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin());
+ cleanup.setLifetimeMarker();
+ }
+
+ // Check the type for a cleanup.
+ if (QualType::DestructionKind dtorKind = D.getType().isDestructedType())
+ emitAutoVarTypeCleanup(emission, dtorKind);
+
+ // In GC mode, honor objc_precise_lifetime.
+ if (getLangOpts().getGC() != LangOptions::NonGC &&
+ D.hasAttr<ObjCPreciseLifetimeAttr>()) {
+ EHStack.pushCleanup<ExtendGCLifetime>(NormalCleanup, &D);
+ }
+
+ // Handle the cleanup attribute.
+ if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
+ const FunctionDecl *FD = CA->getFunctionDecl();
+
+ llvm::Constant *F = CGM.GetAddrOfFunction(FD);
+ assert(F && "Could not find function!");
+
+ const CGFunctionInfo &Info = CGM.getTypes().arrangeFunctionDeclaration(FD);
+ EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup, F, &Info, &D);
+ }
+
+ // If this is a block variable, call _Block_object_destroy
+ // (on the unforwarded address).
+ if (emission.IsByRef)
+ enterByrefCleanup(emission);
+}
+
+CodeGenFunction::Destroyer *
+CodeGenFunction::getDestroyer(QualType::DestructionKind kind) {
+ switch (kind) {
+ case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor");
+ case QualType::DK_cxx_destructor:
+ return destroyCXXObject;
+ case QualType::DK_objc_strong_lifetime:
+ return destroyARCStrongPrecise;
+ case QualType::DK_objc_weak_lifetime:
+ return destroyARCWeak;
+ }
+ llvm_unreachable("Unknown DestructionKind");
+}
+
+/// pushEHDestroy - Push the standard destructor for the given type as
+/// an EH-only cleanup.
+void CodeGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind,
+ Address addr, QualType type) {
+ assert(dtorKind && "cannot push destructor for trivial type");
+ assert(needsEHCleanup(dtorKind));
+
+ pushDestroy(EHCleanup, addr, type, getDestroyer(dtorKind), true);
+}
+
+/// pushDestroy - Push the standard destructor for the given type as
+/// at least a normal cleanup.
+void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
+ Address addr, QualType type) {
+ assert(dtorKind && "cannot push destructor for trivial type");
+
+ CleanupKind cleanupKind = getCleanupKind(dtorKind);
+ pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind),
+ cleanupKind & EHCleanup);
+}
+
+void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
+ QualType type, Destroyer *destroyer,
+ bool useEHCleanupForArray) {
+ pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type,
+ destroyer, useEHCleanupForArray);
+}
+
+void CodeGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) {
+ EHStack.pushCleanup<CallStackRestore>(Kind, SPMem);
+}
+
+void CodeGenFunction::pushLifetimeExtendedDestroy(
+ CleanupKind cleanupKind, Address addr, QualType type,
+ Destroyer *destroyer, bool useEHCleanupForArray) {
+ assert(!isInConditionalBranch() &&
+ "performing lifetime extension from within conditional");
+
+ // Push an EH-only cleanup for the object now.
+ // FIXME: When popping normal cleanups, we need to keep this EH cleanup
+ // around in case a temporary's destructor throws an exception.
+ if (cleanupKind & EHCleanup)
+ EHStack.pushCleanup<DestroyObject>(
+ static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), addr, type,
+ destroyer, useEHCleanupForArray);
+
+ // Remember that we need to push a full cleanup for the object at the
+ // end of the full-expression.
+ pushCleanupAfterFullExpr<DestroyObject>(
+ cleanupKind, addr, type, destroyer, useEHCleanupForArray);
+}
+
+/// emitDestroy - Immediately perform the destruction of the given
+/// object.
+///
+/// \param addr - the address of the object; a type*
+/// \param type - the type of the object; if an array type, all
+/// objects are destroyed in reverse order
+/// \param destroyer - the function to call to destroy individual
+/// elements
+/// \param useEHCleanupForArray - whether an EH cleanup should be
+/// used when destroying array elements, in case one of the
+/// destructions throws an exception
+void CodeGenFunction::emitDestroy(Address addr, QualType type,
+ Destroyer *destroyer,
+ bool useEHCleanupForArray) {
+ const ArrayType *arrayType = getContext().getAsArrayType(type);
+ if (!arrayType)
+ return destroyer(*this, addr, type);
+
+ llvm::Value *length = emitArrayLength(arrayType, type, addr);
+
+ CharUnits elementAlign =
+ addr.getAlignment()
+ .alignmentOfArrayElement(getContext().getTypeSizeInChars(type));
+
+ // Normally we have to check whether the array is zero-length.
+ bool checkZeroLength = true;
+
+ // But if the array length is constant, we can suppress that.
+ if (llvm::ConstantInt *constLength = dyn_cast<llvm::ConstantInt>(length)) {
+ // ...and if it's constant zero, we can just skip the entire thing.
+ if (constLength->isZero()) return;
+ checkZeroLength = false;
+ }
+
+ llvm::Value *begin = addr.getPointer();
+ llvm::Value *end = Builder.CreateInBoundsGEP(begin, length);
+ emitArrayDestroy(begin, end, type, elementAlign, destroyer,
+ checkZeroLength, useEHCleanupForArray);
+}
+
+/// emitArrayDestroy - Destroys all the elements of the given array,
+/// beginning from last to first. The array cannot be zero-length.
+///
+/// \param begin - a type* denoting the first element of the array
+/// \param end - a type* denoting one past the end of the array
+/// \param elementType - the element type of the array
+/// \param destroyer - the function to call to destroy elements
+/// \param useEHCleanup - whether to push an EH cleanup to destroy
+/// the remaining elements in case the destruction of a single
+/// element throws
+void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
+ llvm::Value *end,
+ QualType elementType,
+ CharUnits elementAlign,
+ Destroyer *destroyer,
+ bool checkZeroLength,
+ bool useEHCleanup) {
+ assert(!elementType->isArrayType());
+
+ // The basic structure here is a do-while loop, because we don't
+ // need to check for the zero-element case.
+ llvm::BasicBlock *bodyBB = createBasicBlock("arraydestroy.body");
+ llvm::BasicBlock *doneBB = createBasicBlock("arraydestroy.done");
+
+ if (checkZeroLength) {
+ llvm::Value *isEmpty = Builder.CreateICmpEQ(begin, end,
+ "arraydestroy.isempty");
+ Builder.CreateCondBr(isEmpty, doneBB, bodyBB);
+ }
+
+ // Enter the loop body, making that address the current address.
+ llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
+ EmitBlock(bodyBB);
+ llvm::PHINode *elementPast =
+ Builder.CreatePHI(begin->getType(), 2, "arraydestroy.elementPast");
+ elementPast->addIncoming(end, entryBB);
+
+ // Shift the address back by one element.
+ llvm::Value *negativeOne = llvm::ConstantInt::get(SizeTy, -1, true);
+ llvm::Value *element = Builder.CreateInBoundsGEP(elementPast, negativeOne,
+ "arraydestroy.element");
+
+ if (useEHCleanup)
+ pushRegularPartialArrayCleanup(begin, element, elementType, elementAlign,
+ destroyer);
+
+ // Perform the actual destruction there.
+ destroyer(*this, Address(element, elementAlign), elementType);
+
+ if (useEHCleanup)
+ PopCleanupBlock();
+
+ // Check whether we've reached the end.
+ llvm::Value *done = Builder.CreateICmpEQ(element, begin, "arraydestroy.done");
+ Builder.CreateCondBr(done, doneBB, bodyBB);
+ elementPast->addIncoming(element, Builder.GetInsertBlock());
+
+ // Done.
+ EmitBlock(doneBB);
+}
+
+/// Perform partial array destruction as if in an EH cleanup. Unlike
+/// emitArrayDestroy, the element type here may still be an array type.
+static void emitPartialArrayDestroy(CodeGenFunction &CGF,
+ llvm::Value *begin, llvm::Value *end,
+ QualType type, CharUnits elementAlign,
+ CodeGenFunction::Destroyer *destroyer) {
+ // If the element type is itself an array, drill down.
+ unsigned arrayDepth = 0;
+ while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(type)) {
+ // VLAs don't require a GEP index to walk into.
+ if (!isa<VariableArrayType>(arrayType))
+ arrayDepth++;
+ type = arrayType->getElementType();
+ }
+
+ if (arrayDepth) {
+ llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
+
+ SmallVector<llvm::Value*,4> gepIndices(arrayDepth+1, zero);
+ begin = CGF.Builder.CreateInBoundsGEP(begin, gepIndices, "pad.arraybegin");
+ end = CGF.Builder.CreateInBoundsGEP(end, gepIndices, "pad.arrayend");
+ }
+
+ // Destroy the array. We don't ever need an EH cleanup because we
+ // assume that we're in an EH cleanup ourselves, so a throwing
+ // destructor causes an immediate terminate.
+ CGF.emitArrayDestroy(begin, end, type, elementAlign, destroyer,
+ /*checkZeroLength*/ true, /*useEHCleanup*/ false);
+}
+
+namespace {
+ /// RegularPartialArrayDestroy - a cleanup which performs a partial
+ /// array destroy where the end pointer is regularly determined and
+ /// does not need to be loaded from a local.
+ class RegularPartialArrayDestroy final : public EHScopeStack::Cleanup {
+ llvm::Value *ArrayBegin;
+ llvm::Value *ArrayEnd;
+ QualType ElementType;
+ CodeGenFunction::Destroyer *Destroyer;
+ CharUnits ElementAlign;
+ public:
+ RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd,
+ QualType elementType, CharUnits elementAlign,
+ CodeGenFunction::Destroyer *destroyer)
+ : ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
+ ElementType(elementType), Destroyer(destroyer),
+ ElementAlign(elementAlign) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd,
+ ElementType, ElementAlign, Destroyer);
+ }
+ };
+
+ /// IrregularPartialArrayDestroy - a cleanup which performs a
+ /// partial array destroy where the end pointer is irregularly
+ /// determined and must be loaded from a local.
+ class IrregularPartialArrayDestroy final : public EHScopeStack::Cleanup {
+ llvm::Value *ArrayBegin;
+ Address ArrayEndPointer;
+ QualType ElementType;
+ CodeGenFunction::Destroyer *Destroyer;
+ CharUnits ElementAlign;
+ public:
+ IrregularPartialArrayDestroy(llvm::Value *arrayBegin,
+ Address arrayEndPointer,
+ QualType elementType,
+ CharUnits elementAlign,
+ CodeGenFunction::Destroyer *destroyer)
+ : ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
+ ElementType(elementType), Destroyer(destroyer),
+ ElementAlign(elementAlign) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ llvm::Value *arrayEnd = CGF.Builder.CreateLoad(ArrayEndPointer);
+ emitPartialArrayDestroy(CGF, ArrayBegin, arrayEnd,
+ ElementType, ElementAlign, Destroyer);
+ }
+ };
+}
+
+/// pushIrregularPartialArrayCleanup - Push an EH cleanup to destroy
+/// already-constructed elements of the given array. The cleanup
+/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
+///
+/// \param elementType - the immediate element type of the array;
+/// possibly still an array type
+void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
+ Address arrayEndPointer,
+ QualType elementType,
+ CharUnits elementAlign,
+ Destroyer *destroyer) {
+ pushFullExprCleanup<IrregularPartialArrayDestroy>(EHCleanup,
+ arrayBegin, arrayEndPointer,
+ elementType, elementAlign,
+ destroyer);
+}
+
+/// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
+/// already-constructed elements of the given array. The cleanup
+/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
+///
+/// \param elementType - the immediate element type of the array;
+/// possibly still an array type
+void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
+ llvm::Value *arrayEnd,
+ QualType elementType,
+ CharUnits elementAlign,
+ Destroyer *destroyer) {
+ pushFullExprCleanup<RegularPartialArrayDestroy>(EHCleanup,
+ arrayBegin, arrayEnd,
+ elementType, elementAlign,
+ destroyer);
+}
+
+/// Lazily declare the @llvm.lifetime.start intrinsic.
+llvm::Constant *CodeGenModule::getLLVMLifetimeStartFn() {
+ if (LifetimeStartFn) return LifetimeStartFn;
+ LifetimeStartFn = llvm::Intrinsic::getDeclaration(&getModule(),
+ llvm::Intrinsic::lifetime_start);
+ return LifetimeStartFn;
+}
+
+/// Lazily declare the @llvm.lifetime.end intrinsic.
+llvm::Constant *CodeGenModule::getLLVMLifetimeEndFn() {
+ if (LifetimeEndFn) return LifetimeEndFn;
+ LifetimeEndFn = llvm::Intrinsic::getDeclaration(&getModule(),
+ llvm::Intrinsic::lifetime_end);
+ return LifetimeEndFn;
+}
+
+namespace {
+ /// A cleanup to perform a release of an object at the end of a
+ /// function. This is used to balance out the incoming +1 of a
+ /// ns_consumed argument when we can't reasonably do that just by
+ /// not doing the initial retain for a __block argument.
+ struct ConsumeARCParameter final : EHScopeStack::Cleanup {
+ ConsumeARCParameter(llvm::Value *param,
+ ARCPreciseLifetime_t precise)
+ : Param(param), Precise(precise) {}
+
+ llvm::Value *Param;
+ ARCPreciseLifetime_t Precise;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ CGF.EmitARCRelease(Param, Precise);
+ }
+ };
+}
+
+/// Emit an alloca (or GlobalValue depending on target)
+/// for the specified parameter and set up LocalDeclMap.
+void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
+ unsigned ArgNo) {
+ // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
+ assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
+ "Invalid argument to EmitParmDecl");
+
+ Arg.getAnyValue()->setName(D.getName());
+
+ QualType Ty = D.getType();
+
+ // Use better IR generation for certain implicit parameters.
+ if (auto IPD = dyn_cast<ImplicitParamDecl>(&D)) {
+ // The only implicit argument a block has is its literal.
+ // We assume this is always passed directly.
+ if (BlockInfo) {
+ setBlockContextParameter(IPD, ArgNo, Arg.getDirectValue());
+ return;
+ }
+ }
+
+ Address DeclPtr = Address::invalid();
+ bool DoStore = false;
+ bool IsScalar = hasScalarEvaluationKind(Ty);
+ // If we already have a pointer to the argument, reuse the input pointer.
+ if (Arg.isIndirect()) {
+ DeclPtr = Arg.getIndirectAddress();
+ // If we have a prettier pointer type at this point, bitcast to that.
+ unsigned AS = DeclPtr.getType()->getAddressSpace();
+ llvm::Type *IRTy = ConvertTypeForMem(Ty)->getPointerTo(AS);
+ if (DeclPtr.getType() != IRTy)
+ DeclPtr = Builder.CreateBitCast(DeclPtr, IRTy, D.getName());
+
+ // Push a destructor cleanup for this parameter if the ABI requires it.
+ // Don't push a cleanup in a thunk for a method that will also emit a
+ // cleanup.
+ if (!IsScalar && !CurFuncIsThunk &&
+ getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
+ const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
+ if (RD && RD->hasNonTrivialDestructor())
+ pushDestroy(QualType::DK_cxx_destructor, DeclPtr, Ty);
+ }
+ } else {
+ // Otherwise, create a temporary to hold the value.
+ DeclPtr = CreateMemTemp(Ty, getContext().getDeclAlign(&D),
+ D.getName() + ".addr");
+ DoStore = true;
+ }
+
+ llvm::Value *ArgVal = (DoStore ? Arg.getDirectValue() : nullptr);
+
+ LValue lv = MakeAddrLValue(DeclPtr, Ty);
+ if (IsScalar) {
+ Qualifiers qs = Ty.getQualifiers();
+ if (Qualifiers::ObjCLifetime lt = qs.getObjCLifetime()) {
+ // We honor __attribute__((ns_consumed)) for types with lifetime.
+ // For __strong, it's handled by just skipping the initial retain;
+ // otherwise we have to balance out the initial +1 with an extra
+ // cleanup to do the release at the end of the function.
+ bool isConsumed = D.hasAttr<NSConsumedAttr>();
+
+ // 'self' is always formally __strong, but if this is not an
+ // init method then we don't want to retain it.
+ if (D.isARCPseudoStrong()) {
+ const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CurCodeDecl);
+ assert(&D == method->getSelfDecl());
+ assert(lt == Qualifiers::OCL_Strong);
+ assert(qs.hasConst());
+ assert(method->getMethodFamily() != OMF_init);
+ (void) method;
+ lt = Qualifiers::OCL_ExplicitNone;
+ }
+
+ if (lt == Qualifiers::OCL_Strong) {
+ if (!isConsumed) {
+ if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
+ // use objc_storeStrong(&dest, value) for retaining the
+ // object. But first, store a null into 'dest' because
+ // objc_storeStrong attempts to release its old value.
+ llvm::Value *Null = CGM.EmitNullConstant(D.getType());
+ EmitStoreOfScalar(Null, lv, /* isInitialization */ true);
+ EmitARCStoreStrongCall(lv.getAddress(), ArgVal, true);
+ DoStore = false;
+ }
+ else
+ // Don't use objc_retainBlock for block pointers, because we
+ // don't want to Block_copy something just because we got it
+ // as a parameter.
+ ArgVal = EmitARCRetainNonBlock(ArgVal);
+ }
+ } else {
+ // Push the cleanup for a consumed parameter.
+ if (isConsumed) {
+ ARCPreciseLifetime_t precise = (D.hasAttr<ObjCPreciseLifetimeAttr>()
+ ? ARCPreciseLifetime : ARCImpreciseLifetime);
+ EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), ArgVal,
+ precise);
+ }
+
+ if (lt == Qualifiers::OCL_Weak) {
+ EmitARCInitWeak(DeclPtr, ArgVal);
+ DoStore = false; // The weak init is a store, no need to do two.
+ }
+ }
+
+ // Enter the cleanup scope.
+ EmitAutoVarWithLifetime(*this, D, DeclPtr, lt);
+ }
+ }
+
+ // Store the initial value into the alloca.
+ if (DoStore)
+ EmitStoreOfScalar(ArgVal, lv, /* isInitialization */ true);
+
+ setAddrOfLocalVar(&D, DeclPtr);
+
+ // Emit debug info for param declaration.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ if (CGM.getCodeGenOpts().getDebugInfo()
+ >= CodeGenOptions::LimitedDebugInfo) {
+ DI->EmitDeclareOfArgVariable(&D, DeclPtr.getPointer(), ArgNo, Builder);
+ }
+ }
+
+ if (D.hasAttr<AnnotateAttr>())
+ EmitVarAnnotations(&D, DeclPtr.getPointer());
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp
new file mode 100644
index 0000000..adba731
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp
@@ -0,0 +1,605 @@
+//===--- CGDeclCXX.cpp - Emit LLVM Code for C++ declarations --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with code generation of C++ declarations
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CGCXXABI.h"
+#include "CGObjCRuntime.h"
+#include "CGOpenMPRuntime.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/Support/Path.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
+ ConstantAddress DeclPtr) {
+ assert(D.hasGlobalStorage() && "VarDecl must have global storage!");
+ assert(!D.getType()->isReferenceType() &&
+ "Should not call EmitDeclInit on a reference!");
+
+ QualType type = D.getType();
+ LValue lv = CGF.MakeAddrLValue(DeclPtr, type);
+
+ const Expr *Init = D.getInit();
+ switch (CGF.getEvaluationKind(type)) {
+ case TEK_Scalar: {
+ CodeGenModule &CGM = CGF.CGM;
+ if (lv.isObjCStrong())
+ CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init),
+ DeclPtr, D.getTLSKind());
+ else if (lv.isObjCWeak())
+ CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init),
+ DeclPtr);
+ else
+ CGF.EmitScalarInit(Init, &D, lv, false);
+ return;
+ }
+ case TEK_Complex:
+ CGF.EmitComplexExprIntoLValue(Init, lv, /*isInit*/ true);
+ return;
+ case TEK_Aggregate:
+ CGF.EmitAggExpr(Init, AggValueSlot::forLValue(lv,AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
+ return;
+ }
+ llvm_unreachable("bad evaluation kind");
+}
+
+/// Emit code to cause the destruction of the given variable with
+/// static storage duration.
+static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
+ ConstantAddress addr) {
+ CodeGenModule &CGM = CGF.CGM;
+
+ // FIXME: __attribute__((cleanup)) ?
+
+ QualType type = D.getType();
+ QualType::DestructionKind dtorKind = type.isDestructedType();
+
+ switch (dtorKind) {
+ case QualType::DK_none:
+ return;
+
+ case QualType::DK_cxx_destructor:
+ break;
+
+ case QualType::DK_objc_strong_lifetime:
+ case QualType::DK_objc_weak_lifetime:
+ // We don't care about releasing objects during process teardown.
+ assert(!D.getTLSKind() && "should have rejected this");
+ return;
+ }
+
+ llvm::Constant *function;
+ llvm::Constant *argument;
+
+ // Special-case non-array C++ destructors, where there's a function
+ // with the right signature that we can just call.
+ const CXXRecordDecl *record = nullptr;
+ if (dtorKind == QualType::DK_cxx_destructor &&
+ (record = type->getAsCXXRecordDecl())) {
+ assert(!record->hasTrivialDestructor());
+ CXXDestructorDecl *dtor = record->getDestructor();
+
+ function = CGM.getAddrOfCXXStructor(dtor, StructorType::Complete);
+ argument = llvm::ConstantExpr::getBitCast(
+ addr.getPointer(), CGF.getTypes().ConvertType(type)->getPointerTo());
+
+ // Otherwise, the standard logic requires a helper function.
+ } else {
+ function = CodeGenFunction(CGM)
+ .generateDestroyHelper(addr, type, CGF.getDestroyer(dtorKind),
+ CGF.needsEHCleanup(dtorKind), &D);
+ argument = llvm::Constant::getNullValue(CGF.Int8PtrTy);
+ }
+
+ CGM.getCXXABI().registerGlobalDtor(CGF, D, function, argument);
+}
+
+/// Emit code to cause the variable at the given address to be considered as
+/// constant from this point onwards.
+static void EmitDeclInvariant(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::Constant *Addr) {
+ // Don't emit the intrinsic if we're not optimizing.
+ if (!CGF.CGM.getCodeGenOpts().OptimizationLevel)
+ return;
+
+ // Grab the llvm.invariant.start intrinsic.
+ llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start;
+ llvm::Constant *InvariantStart = CGF.CGM.getIntrinsic(InvStartID);
+
+ // Emit a call with the size in bytes of the object.
+ CharUnits WidthChars = CGF.getContext().getTypeSizeInChars(D.getType());
+ uint64_t Width = WidthChars.getQuantity();
+ llvm::Value *Args[2] = { llvm::ConstantInt::getSigned(CGF.Int64Ty, Width),
+ llvm::ConstantExpr::getBitCast(Addr, CGF.Int8PtrTy)};
+ CGF.Builder.CreateCall(InvariantStart, Args);
+}
+
+void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
+ llvm::Constant *DeclPtr,
+ bool PerformInit) {
+
+ const Expr *Init = D.getInit();
+ QualType T = D.getType();
+
+ // The address space of a static local variable (DeclPtr) may be different
+ // from the address space of the "this" argument of the constructor. In that
+ // case, we need an addrspacecast before calling the constructor.
+ //
+ // struct StructWithCtor {
+ // __device__ StructWithCtor() {...}
+ // };
+ // __device__ void foo() {
+ // __shared__ StructWithCtor s;
+ // ...
+ // }
+ //
+ // For example, in the above CUDA code, the static local variable s has a
+ // "shared" address space qualifier, but the constructor of StructWithCtor
+ // expects "this" in the "generic" address space.
+ unsigned ExpectedAddrSpace = getContext().getTargetAddressSpace(T);
+ unsigned ActualAddrSpace = DeclPtr->getType()->getPointerAddressSpace();
+ if (ActualAddrSpace != ExpectedAddrSpace) {
+ llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(T);
+ llvm::PointerType *PTy = llvm::PointerType::get(LTy, ExpectedAddrSpace);
+ DeclPtr = llvm::ConstantExpr::getAddrSpaceCast(DeclPtr, PTy);
+ }
+
+ ConstantAddress DeclAddr(DeclPtr, getContext().getDeclAlign(&D));
+
+ if (!T->isReferenceType()) {
+ if (getLangOpts().OpenMP && D.hasAttr<OMPThreadPrivateDeclAttr>())
+ (void)CGM.getOpenMPRuntime().emitThreadPrivateVarDefinition(
+ &D, DeclAddr, D.getAttr<OMPThreadPrivateDeclAttr>()->getLocation(),
+ PerformInit, this);
+ if (PerformInit)
+ EmitDeclInit(*this, D, DeclAddr);
+ if (CGM.isTypeConstant(D.getType(), true))
+ EmitDeclInvariant(*this, D, DeclPtr);
+ else
+ EmitDeclDestroy(*this, D, DeclAddr);
+ return;
+ }
+
+ assert(PerformInit && "cannot have constant initializer which needs "
+ "destruction for reference");
+ RValue RV = EmitReferenceBindingToExpr(Init);
+ EmitStoreOfScalar(RV.getScalarVal(), DeclAddr, false, T);
+}
+
+/// Create a stub function, suitable for being passed to atexit,
+/// which passes the given address to the given destructor function.
+llvm::Constant *CodeGenFunction::createAtExitStub(const VarDecl &VD,
+ llvm::Constant *dtor,
+ llvm::Constant *addr) {
+ // Get the destructor function type, void(*)(void).
+ llvm::FunctionType *ty = llvm::FunctionType::get(CGM.VoidTy, false);
+ SmallString<256> FnName;
+ {
+ llvm::raw_svector_ostream Out(FnName);
+ CGM.getCXXABI().getMangleContext().mangleDynamicAtExitDestructor(&VD, Out);
+ }
+
+ const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
+ llvm::Function *fn = CGM.CreateGlobalInitOrDestructFunction(ty, FnName.str(),
+ FI,
+ VD.getLocation());
+
+ CodeGenFunction CGF(CGM);
+
+ CGF.StartFunction(&VD, CGM.getContext().VoidTy, fn, FI, FunctionArgList());
+
+ llvm::CallInst *call = CGF.Builder.CreateCall(dtor, addr);
+
+ // Make sure the call and the callee agree on calling convention.
+ if (llvm::Function *dtorFn =
+ dyn_cast<llvm::Function>(dtor->stripPointerCasts()))
+ call->setCallingConv(dtorFn->getCallingConv());
+
+ CGF.FinishFunction();
+
+ return fn;
+}
+
+/// Register a global destructor using the C atexit runtime function.
+void CodeGenFunction::registerGlobalDtorWithAtExit(const VarDecl &VD,
+ llvm::Constant *dtor,
+ llvm::Constant *addr) {
+ // Create a function which calls the destructor.
+ llvm::Constant *dtorStub = createAtExitStub(VD, dtor, addr);
+
+ // extern "C" int atexit(void (*f)(void));
+ llvm::FunctionType *atexitTy =
+ llvm::FunctionType::get(IntTy, dtorStub->getType(), false);
+
+ llvm::Constant *atexit =
+ CGM.CreateRuntimeFunction(atexitTy, "atexit");
+ if (llvm::Function *atexitFn = dyn_cast<llvm::Function>(atexit))
+ atexitFn->setDoesNotThrow();
+
+ EmitNounwindRuntimeCall(atexit, dtorStub);
+}
+
+void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
+ llvm::GlobalVariable *DeclPtr,
+ bool PerformInit) {
+ // If we've been asked to forbid guard variables, emit an error now.
+ // This diagnostic is hard-coded for Darwin's use case; we can find
+ // better phrasing if someone else needs it.
+ if (CGM.getCodeGenOpts().ForbidGuardVariables)
+ CGM.Error(D.getLocation(),
+ "this initialization requires a guard variable, which "
+ "the kernel does not support");
+
+ CGM.getCXXABI().EmitGuardedInit(*this, D, DeclPtr, PerformInit);
+}
+
+llvm::Function *CodeGenModule::CreateGlobalInitOrDestructFunction(
+ llvm::FunctionType *FTy, const Twine &Name, const CGFunctionInfo &FI,
+ SourceLocation Loc, bool TLS) {
+ llvm::Function *Fn =
+ llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
+ Name, &getModule());
+ if (!getLangOpts().AppleKext && !TLS) {
+ // Set the section if needed.
+ if (const char *Section = getTarget().getStaticInitSectionSpecifier())
+ Fn->setSection(Section);
+ }
+
+ SetInternalFunctionAttributes(nullptr, Fn, FI);
+
+ Fn->setCallingConv(getRuntimeCC());
+
+ if (!getLangOpts().Exceptions)
+ Fn->setDoesNotThrow();
+
+ if (!isInSanitizerBlacklist(Fn, Loc)) {
+ if (getLangOpts().Sanitize.hasOneOf(SanitizerKind::Address |
+ SanitizerKind::KernelAddress))
+ Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
+ if (getLangOpts().Sanitize.has(SanitizerKind::Thread))
+ Fn->addFnAttr(llvm::Attribute::SanitizeThread);
+ if (getLangOpts().Sanitize.has(SanitizerKind::Memory))
+ Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
+ if (getLangOpts().Sanitize.has(SanitizerKind::SafeStack))
+ Fn->addFnAttr(llvm::Attribute::SafeStack);
+ }
+
+ return Fn;
+}
+
+/// Create a global pointer to a function that will initialize a global
+/// variable. The user has requested that this pointer be emitted in a specific
+/// section.
+void CodeGenModule::EmitPointerToInitFunc(const VarDecl *D,
+ llvm::GlobalVariable *GV,
+ llvm::Function *InitFunc,
+ InitSegAttr *ISA) {
+ llvm::GlobalVariable *PtrArray = new llvm::GlobalVariable(
+ TheModule, InitFunc->getType(), /*isConstant=*/true,
+ llvm::GlobalValue::PrivateLinkage, InitFunc, "__cxx_init_fn_ptr");
+ PtrArray->setSection(ISA->getSection());
+ addUsedGlobal(PtrArray);
+
+ // If the GV is already in a comdat group, then we have to join it.
+ if (llvm::Comdat *C = GV->getComdat())
+ PtrArray->setComdat(C);
+}
+
+void
+CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
+ llvm::GlobalVariable *Addr,
+ bool PerformInit) {
+ // Check if we've already initialized this decl.
+ auto I = DelayedCXXInitPosition.find(D);
+ if (I != DelayedCXXInitPosition.end() && I->second == ~0U)
+ return;
+
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
+ SmallString<256> FnName;
+ {
+ llvm::raw_svector_ostream Out(FnName);
+ getCXXABI().getMangleContext().mangleDynamicInitializer(D, Out);
+ }
+
+ // Create a variable initialization function.
+ llvm::Function *Fn =
+ CreateGlobalInitOrDestructFunction(FTy, FnName.str(),
+ getTypes().arrangeNullaryFunction(),
+ D->getLocation());
+
+ auto *ISA = D->getAttr<InitSegAttr>();
+ CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr,
+ PerformInit);
+
+ llvm::GlobalVariable *COMDATKey =
+ supportsCOMDAT() && D->isExternallyVisible() ? Addr : nullptr;
+
+ if (D->getTLSKind()) {
+ // FIXME: Should we support init_priority for thread_local?
+ // FIXME: Ideally, initialization of instantiated thread_local static data
+ // members of class templates should not trigger initialization of other
+ // entities in the TU.
+ // FIXME: We only need to register one __cxa_thread_atexit function for the
+ // entire TU.
+ CXXThreadLocalInits.push_back(Fn);
+ CXXThreadLocalInitVars.push_back(D);
+ } else if (PerformInit && ISA) {
+ EmitPointerToInitFunc(D, Addr, Fn, ISA);
+ } else if (auto *IPA = D->getAttr<InitPriorityAttr>()) {
+ OrderGlobalInits Key(IPA->getPriority(), PrioritizedCXXGlobalInits.size());
+ PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn));
+ } else if (isTemplateInstantiation(D->getTemplateSpecializationKind())) {
+ // C++ [basic.start.init]p2:
+ // Definitions of explicitly specialized class template static data
+ // members have ordered initialization. Other class template static data
+ // members (i.e., implicitly or explicitly instantiated specializations)
+ // have unordered initialization.
+ //
+ // As a consequence, we can put them into their own llvm.global_ctors entry.
+ //
+ // If the global is externally visible, put the initializer into a COMDAT
+ // group with the global being initialized. On most platforms, this is a
+ // minor startup time optimization. In the MS C++ ABI, there are no guard
+ // variables, so this COMDAT key is required for correctness.
+ AddGlobalCtor(Fn, 65535, COMDATKey);
+ } else if (D->hasAttr<SelectAnyAttr>()) {
+ // SelectAny globals will be comdat-folded. Put the initializer into a
+ // COMDAT group associated with the global, so the initializers get folded
+ // too.
+ AddGlobalCtor(Fn, 65535, COMDATKey);
+ } else {
+ I = DelayedCXXInitPosition.find(D); // Re-do lookup in case of re-hash.
+ if (I == DelayedCXXInitPosition.end()) {
+ CXXGlobalInits.push_back(Fn);
+ } else if (I->second != ~0U) {
+ assert(I->second < CXXGlobalInits.size() &&
+ CXXGlobalInits[I->second] == nullptr);
+ CXXGlobalInits[I->second] = Fn;
+ }
+ }
+
+ // Remember that we already emitted the initializer for this global.
+ DelayedCXXInitPosition[D] = ~0U;
+}
+
+void CodeGenModule::EmitCXXThreadLocalInitFunc() {
+ getCXXABI().EmitThreadLocalInitFuncs(
+ *this, CXXThreadLocals, CXXThreadLocalInits, CXXThreadLocalInitVars);
+
+ CXXThreadLocalInits.clear();
+ CXXThreadLocalInitVars.clear();
+ CXXThreadLocals.clear();
+}
+
+void
+CodeGenModule::EmitCXXGlobalInitFunc() {
+ while (!CXXGlobalInits.empty() && !CXXGlobalInits.back())
+ CXXGlobalInits.pop_back();
+
+ if (CXXGlobalInits.empty() && PrioritizedCXXGlobalInits.empty())
+ return;
+
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
+ const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
+
+ // Create our global initialization function.
+ if (!PrioritizedCXXGlobalInits.empty()) {
+ SmallVector<llvm::Function *, 8> LocalCXXGlobalInits;
+ llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(),
+ PrioritizedCXXGlobalInits.end());
+ // Iterate over "chunks" of ctors with same priority and emit each chunk
+ // into separate function. Note - everything is sorted first by priority,
+ // second - by lex order, so we emit ctor functions in proper order.
+ for (SmallVectorImpl<GlobalInitData >::iterator
+ I = PrioritizedCXXGlobalInits.begin(),
+ E = PrioritizedCXXGlobalInits.end(); I != E; ) {
+ SmallVectorImpl<GlobalInitData >::iterator
+ PrioE = std::upper_bound(I + 1, E, *I, GlobalInitPriorityCmp());
+
+ LocalCXXGlobalInits.clear();
+ unsigned Priority = I->first.priority;
+ // Compute the function suffix from priority. Prepend with zeroes to make
+ // sure the function names are also ordered as priorities.
+ std::string PrioritySuffix = llvm::utostr(Priority);
+ // Priority is always <= 65535 (enforced by sema).
+ PrioritySuffix = std::string(6-PrioritySuffix.size(), '0')+PrioritySuffix;
+ llvm::Function *Fn = CreateGlobalInitOrDestructFunction(
+ FTy, "_GLOBAL__I_" + PrioritySuffix, FI);
+
+ for (; I < PrioE; ++I)
+ LocalCXXGlobalInits.push_back(I->second);
+
+ CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, LocalCXXGlobalInits);
+ AddGlobalCtor(Fn, Priority);
+ }
+ PrioritizedCXXGlobalInits.clear();
+ }
+
+ SmallString<128> FileName;
+ SourceManager &SM = Context.getSourceManager();
+ if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
+ // Include the filename in the symbol name. Including "sub_" matches gcc and
+ // makes sure these symbols appear lexicographically behind the symbols with
+ // priority emitted above.
+ FileName = llvm::sys::path::filename(MainFile->getName());
+ } else {
+ FileName = "<null>";
+ }
+
+ for (size_t i = 0; i < FileName.size(); ++i) {
+ // Replace everything that's not [a-zA-Z0-9._] with a _. This set happens
+ // to be the set of C preprocessing numbers.
+ if (!isPreprocessingNumberBody(FileName[i]))
+ FileName[i] = '_';
+ }
+
+ llvm::Function *Fn = CreateGlobalInitOrDestructFunction(
+ FTy, llvm::Twine("_GLOBAL__sub_I_", FileName), FI);
+
+ CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, CXXGlobalInits);
+ AddGlobalCtor(Fn);
+
+ CXXGlobalInits.clear();
+}
+
+void CodeGenModule::EmitCXXGlobalDtorFunc() {
+ if (CXXGlobalDtors.empty())
+ return;
+
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
+
+ // Create our global destructor function.
+ const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
+ llvm::Function *Fn =
+ CreateGlobalInitOrDestructFunction(FTy, "_GLOBAL__D_a", FI);
+
+ CodeGenFunction(*this).GenerateCXXGlobalDtorsFunc(Fn, CXXGlobalDtors);
+ AddGlobalDtor(Fn);
+}
+
+/// Emit the code necessary to initialize the given global variable.
+void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
+ const VarDecl *D,
+ llvm::GlobalVariable *Addr,
+ bool PerformInit) {
+ // Check if we need to emit debug info for variable initializer.
+ if (D->hasAttr<NoDebugAttr>())
+ DebugInfo = nullptr; // disable debug info indefinitely for this function
+
+ CurEHLocation = D->getLocStart();
+
+ StartFunction(GlobalDecl(D), getContext().VoidTy, Fn,
+ getTypes().arrangeNullaryFunction(),
+ FunctionArgList(), D->getLocation(),
+ D->getInit()->getExprLoc());
+
+ // Use guarded initialization if the global variable is weak. This
+ // occurs for, e.g., instantiated static data members and
+ // definitions explicitly marked weak.
+ if (Addr->hasWeakLinkage() || Addr->hasLinkOnceLinkage()) {
+ EmitCXXGuardedInit(*D, Addr, PerformInit);
+ } else {
+ EmitCXXGlobalVarDeclInit(*D, Addr, PerformInit);
+ }
+
+ FinishFunction();
+}
+
+void
+CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
+ ArrayRef<llvm::Function *> Decls,
+ Address Guard) {
+ {
+ auto NL = ApplyDebugLocation::CreateEmpty(*this);
+ StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
+ getTypes().arrangeNullaryFunction(), FunctionArgList());
+ // Emit an artificial location for this function.
+ auto AL = ApplyDebugLocation::CreateArtificial(*this);
+
+ llvm::BasicBlock *ExitBlock = nullptr;
+ if (Guard.isValid()) {
+ // If we have a guard variable, check whether we've already performed
+ // these initializations. This happens for TLS initialization functions.
+ llvm::Value *GuardVal = Builder.CreateLoad(Guard);
+ llvm::Value *Uninit = Builder.CreateIsNull(GuardVal,
+ "guard.uninitialized");
+ llvm::BasicBlock *InitBlock = createBasicBlock("init");
+ ExitBlock = createBasicBlock("exit");
+ Builder.CreateCondBr(Uninit, InitBlock, ExitBlock);
+ EmitBlock(InitBlock);
+ // Mark as initialized before initializing anything else. If the
+ // initializers use previously-initialized thread_local vars, that's
+ // probably supposed to be OK, but the standard doesn't say.
+ Builder.CreateStore(llvm::ConstantInt::get(GuardVal->getType(),1), Guard);
+ }
+
+ RunCleanupsScope Scope(*this);
+
+ // When building in Objective-C++ ARC mode, create an autorelease pool
+ // around the global initializers.
+ if (getLangOpts().ObjCAutoRefCount && getLangOpts().CPlusPlus) {
+ llvm::Value *token = EmitObjCAutoreleasePoolPush();
+ EmitObjCAutoreleasePoolCleanup(token);
+ }
+
+ for (unsigned i = 0, e = Decls.size(); i != e; ++i)
+ if (Decls[i])
+ EmitRuntimeCall(Decls[i]);
+
+ Scope.ForceCleanup();
+
+ if (ExitBlock) {
+ Builder.CreateBr(ExitBlock);
+ EmitBlock(ExitBlock);
+ }
+ }
+
+ FinishFunction();
+}
+
+void CodeGenFunction::GenerateCXXGlobalDtorsFunc(llvm::Function *Fn,
+ const std::vector<std::pair<llvm::WeakVH, llvm::Constant*> >
+ &DtorsAndObjects) {
+ {
+ auto NL = ApplyDebugLocation::CreateEmpty(*this);
+ StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
+ getTypes().arrangeNullaryFunction(), FunctionArgList());
+ // Emit an artificial location for this function.
+ auto AL = ApplyDebugLocation::CreateArtificial(*this);
+
+ // Emit the dtors, in reverse order from construction.
+ for (unsigned i = 0, e = DtorsAndObjects.size(); i != e; ++i) {
+ llvm::Value *Callee = DtorsAndObjects[e - i - 1].first;
+ llvm::CallInst *CI = Builder.CreateCall(Callee,
+ DtorsAndObjects[e - i - 1].second);
+ // Make sure the call and the callee agree on calling convention.
+ if (llvm::Function *F = dyn_cast<llvm::Function>(Callee))
+ CI->setCallingConv(F->getCallingConv());
+ }
+ }
+
+ FinishFunction();
+}
+
+/// generateDestroyHelper - Generates a helper function which, when
+/// invoked, destroys the given object. The address of the object
+/// should be in global memory.
+llvm::Function *CodeGenFunction::generateDestroyHelper(
+ Address addr, QualType type, Destroyer *destroyer,
+ bool useEHCleanupForArray, const VarDecl *VD) {
+ FunctionArgList args;
+ ImplicitParamDecl dst(getContext(), nullptr, SourceLocation(), nullptr,
+ getContext().VoidPtrTy);
+ args.push_back(&dst);
+
+ const CGFunctionInfo &FI = CGM.getTypes().arrangeFreeFunctionDeclaration(
+ getContext().VoidTy, args, FunctionType::ExtInfo(), /*variadic=*/false);
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
+ llvm::Function *fn = CGM.CreateGlobalInitOrDestructFunction(
+ FTy, "__cxx_global_array_dtor", FI, VD->getLocation());
+
+ CurEHLocation = VD->getLocStart();
+
+ StartFunction(VD, getContext().VoidTy, fn, FI, args);
+
+ emitDestroy(addr, type, destroyer, useEHCleanupForArray);
+
+ FinishFunction();
+
+ return fn;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp
new file mode 100644
index 0000000..fce2e75
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp
@@ -0,0 +1,1906 @@
+//===--- CGException.cpp - Emit LLVM Code for C++ exceptions ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ exception related code generation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CGCXXABI.h"
+#include "CGCleanup.h"
+#include "CGObjCRuntime.h"
+#include "TargetInfo.h"
+#include "clang/AST/Mangle.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/Support/SaveAndRestore.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+static llvm::Constant *getFreeExceptionFn(CodeGenModule &CGM) {
+ // void __cxa_free_exception(void *thrown_exception);
+
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
+
+ return CGM.CreateRuntimeFunction(FTy, "__cxa_free_exception");
+}
+
+static llvm::Constant *getUnexpectedFn(CodeGenModule &CGM) {
+ // void __cxa_call_unexpected(void *thrown_exception);
+
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
+
+ return CGM.CreateRuntimeFunction(FTy, "__cxa_call_unexpected");
+}
+
+llvm::Constant *CodeGenModule::getTerminateFn() {
+ // void __terminate();
+
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(VoidTy, /*IsVarArgs=*/false);
+
+ StringRef name;
+
+ // In C++, use std::terminate().
+ if (getLangOpts().CPlusPlus &&
+ getTarget().getCXXABI().isItaniumFamily()) {
+ name = "_ZSt9terminatev";
+ } else if (getLangOpts().CPlusPlus &&
+ getTarget().getCXXABI().isMicrosoft()) {
+ if (getLangOpts().isCompatibleWithMSVC(LangOptions::MSVC2015))
+ name = "__std_terminate";
+ else
+ name = "\01?terminate@@YAXXZ";
+ } else if (getLangOpts().ObjC1 &&
+ getLangOpts().ObjCRuntime.hasTerminate())
+ name = "objc_terminate";
+ else
+ name = "abort";
+ return CreateRuntimeFunction(FTy, name);
+}
+
+static llvm::Constant *getCatchallRethrowFn(CodeGenModule &CGM,
+ StringRef Name) {
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
+
+ return CGM.CreateRuntimeFunction(FTy, Name);
+}
+
+const EHPersonality EHPersonality::GNU_C = { "__gcc_personality_v0", nullptr };
+const EHPersonality
+EHPersonality::GNU_C_SJLJ = { "__gcc_personality_sj0", nullptr };
+const EHPersonality
+EHPersonality::GNU_C_SEH = { "__gcc_personality_seh0", nullptr };
+const EHPersonality
+EHPersonality::NeXT_ObjC = { "__objc_personality_v0", nullptr };
+const EHPersonality
+EHPersonality::GNU_CPlusPlus = { "__gxx_personality_v0", nullptr };
+const EHPersonality
+EHPersonality::GNU_CPlusPlus_SJLJ = { "__gxx_personality_sj0", nullptr };
+const EHPersonality
+EHPersonality::GNU_CPlusPlus_SEH = { "__gxx_personality_seh0", nullptr };
+const EHPersonality
+EHPersonality::GNU_ObjC = {"__gnu_objc_personality_v0", "objc_exception_throw"};
+const EHPersonality
+EHPersonality::GNU_ObjCXX = { "__gnustep_objcxx_personality_v0", nullptr };
+const EHPersonality
+EHPersonality::GNUstep_ObjC = { "__gnustep_objc_personality_v0", nullptr };
+const EHPersonality
+EHPersonality::MSVC_except_handler = { "_except_handler3", nullptr };
+const EHPersonality
+EHPersonality::MSVC_C_specific_handler = { "__C_specific_handler", nullptr };
+const EHPersonality
+EHPersonality::MSVC_CxxFrameHandler3 = { "__CxxFrameHandler3", nullptr };
+
+/// On Win64, use libgcc's SEH personality function. We fall back to dwarf on
+/// other platforms, unless the user asked for SjLj exceptions.
+static bool useLibGCCSEHPersonality(const llvm::Triple &T) {
+ return T.isOSWindows() && T.getArch() == llvm::Triple::x86_64;
+}
+
+static const EHPersonality &getCPersonality(const llvm::Triple &T,
+ const LangOptions &L) {
+ if (L.SjLjExceptions)
+ return EHPersonality::GNU_C_SJLJ;
+ else if (useLibGCCSEHPersonality(T))
+ return EHPersonality::GNU_C_SEH;
+ return EHPersonality::GNU_C;
+}
+
+static const EHPersonality &getObjCPersonality(const llvm::Triple &T,
+ const LangOptions &L) {
+ switch (L.ObjCRuntime.getKind()) {
+ case ObjCRuntime::FragileMacOSX:
+ return getCPersonality(T, L);
+ case ObjCRuntime::MacOSX:
+ case ObjCRuntime::iOS:
+ case ObjCRuntime::WatchOS:
+ return EHPersonality::NeXT_ObjC;
+ case ObjCRuntime::GNUstep:
+ if (L.ObjCRuntime.getVersion() >= VersionTuple(1, 7))
+ return EHPersonality::GNUstep_ObjC;
+ // fallthrough
+ case ObjCRuntime::GCC:
+ case ObjCRuntime::ObjFW:
+ return EHPersonality::GNU_ObjC;
+ }
+ llvm_unreachable("bad runtime kind");
+}
+
+static const EHPersonality &getCXXPersonality(const llvm::Triple &T,
+ const LangOptions &L) {
+ if (L.SjLjExceptions)
+ return EHPersonality::GNU_CPlusPlus_SJLJ;
+ else if (useLibGCCSEHPersonality(T))
+ return EHPersonality::GNU_CPlusPlus_SEH;
+ return EHPersonality::GNU_CPlusPlus;
+}
+
+/// Determines the personality function to use when both C++
+/// and Objective-C exceptions are being caught.
+static const EHPersonality &getObjCXXPersonality(const llvm::Triple &T,
+ const LangOptions &L) {
+ switch (L.ObjCRuntime.getKind()) {
+ // The ObjC personality defers to the C++ personality for non-ObjC
+ // handlers. Unlike the C++ case, we use the same personality
+ // function on targets using (backend-driven) SJLJ EH.
+ case ObjCRuntime::MacOSX:
+ case ObjCRuntime::iOS:
+ case ObjCRuntime::WatchOS:
+ return EHPersonality::NeXT_ObjC;
+
+ // In the fragile ABI, just use C++ exception handling and hope
+ // they're not doing crazy exception mixing.
+ case ObjCRuntime::FragileMacOSX:
+ return getCXXPersonality(T, L);
+
+ // The GCC runtime's personality function inherently doesn't support
+ // mixed EH. Use the C++ personality just to avoid returning null.
+ case ObjCRuntime::GCC:
+ case ObjCRuntime::ObjFW: // XXX: this will change soon
+ return EHPersonality::GNU_ObjC;
+ case ObjCRuntime::GNUstep:
+ return EHPersonality::GNU_ObjCXX;
+ }
+ llvm_unreachable("bad runtime kind");
+}
+
+static const EHPersonality &getSEHPersonalityMSVC(const llvm::Triple &T) {
+ if (T.getArch() == llvm::Triple::x86)
+ return EHPersonality::MSVC_except_handler;
+ return EHPersonality::MSVC_C_specific_handler;
+}
+
+const EHPersonality &EHPersonality::get(CodeGenModule &CGM,
+ const FunctionDecl *FD) {
+ const llvm::Triple &T = CGM.getTarget().getTriple();
+ const LangOptions &L = CGM.getLangOpts();
+
+ // Functions using SEH get an SEH personality.
+ if (FD && FD->usesSEHTry())
+ return getSEHPersonalityMSVC(T);
+
+ // Try to pick a personality function that is compatible with MSVC if we're
+ // not compiling Obj-C. Obj-C users better have an Obj-C runtime that supports
+ // the GCC-style personality function.
+ if (T.isWindowsMSVCEnvironment() && !L.ObjC1) {
+ if (L.SjLjExceptions)
+ return EHPersonality::GNU_CPlusPlus_SJLJ;
+ else
+ return EHPersonality::MSVC_CxxFrameHandler3;
+ }
+
+ if (L.CPlusPlus && L.ObjC1)
+ return getObjCXXPersonality(T, L);
+ else if (L.CPlusPlus)
+ return getCXXPersonality(T, L);
+ else if (L.ObjC1)
+ return getObjCPersonality(T, L);
+ else
+ return getCPersonality(T, L);
+}
+
+const EHPersonality &EHPersonality::get(CodeGenFunction &CGF) {
+ return get(CGF.CGM, dyn_cast_or_null<FunctionDecl>(CGF.CurCodeDecl));
+}
+
+static llvm::Constant *getPersonalityFn(CodeGenModule &CGM,
+ const EHPersonality &Personality) {
+ llvm::Constant *Fn =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.Int32Ty, true),
+ Personality.PersonalityFn);
+ return Fn;
+}
+
+static llvm::Constant *getOpaquePersonalityFn(CodeGenModule &CGM,
+ const EHPersonality &Personality) {
+ llvm::Constant *Fn = getPersonalityFn(CGM, Personality);
+ return llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
+}
+
+/// Check whether a landingpad instruction only uses C++ features.
+static bool LandingPadHasOnlyCXXUses(llvm::LandingPadInst *LPI) {
+ for (unsigned I = 0, E = LPI->getNumClauses(); I != E; ++I) {
+ // Look for something that would've been returned by the ObjC
+ // runtime's GetEHType() method.
+ llvm::Value *Val = LPI->getClause(I)->stripPointerCasts();
+ if (LPI->isCatch(I)) {
+ // Check if the catch value has the ObjC prefix.
+ if (llvm::GlobalVariable *GV = dyn_cast<llvm::GlobalVariable>(Val))
+ // ObjC EH selector entries are always global variables with
+ // names starting like this.
+ if (GV->getName().startswith("OBJC_EHTYPE"))
+ return false;
+ } else {
+ // Check if any of the filter values have the ObjC prefix.
+ llvm::Constant *CVal = cast<llvm::Constant>(Val);
+ for (llvm::User::op_iterator
+ II = CVal->op_begin(), IE = CVal->op_end(); II != IE; ++II) {
+ if (llvm::GlobalVariable *GV =
+ cast<llvm::GlobalVariable>((*II)->stripPointerCasts()))
+ // ObjC EH selector entries are always global variables with
+ // names starting like this.
+ if (GV->getName().startswith("OBJC_EHTYPE"))
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+/// Check whether a personality function could reasonably be swapped
+/// for a C++ personality function.
+static bool PersonalityHasOnlyCXXUses(llvm::Constant *Fn) {
+ for (llvm::User *U : Fn->users()) {
+ // Conditionally white-list bitcasts.
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(U)) {
+ if (CE->getOpcode() != llvm::Instruction::BitCast) return false;
+ if (!PersonalityHasOnlyCXXUses(CE))
+ return false;
+ continue;
+ }
+
+ // Otherwise it must be a function.
+ llvm::Function *F = dyn_cast<llvm::Function>(U);
+ if (!F) return false;
+
+ for (auto BB = F->begin(), E = F->end(); BB != E; ++BB) {
+ if (BB->isLandingPad())
+ if (!LandingPadHasOnlyCXXUses(BB->getLandingPadInst()))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/// Try to use the C++ personality function in ObjC++. Not doing this
+/// can cause some incompatibilities with gcc, which is more
+/// aggressive about only using the ObjC++ personality in a function
+/// when it really needs it.
+void CodeGenModule::SimplifyPersonality() {
+ // If we're not in ObjC++ -fexceptions, there's nothing to do.
+ if (!LangOpts.CPlusPlus || !LangOpts.ObjC1 || !LangOpts.Exceptions)
+ return;
+
+ // Both the problem this endeavors to fix and the way the logic
+ // above works is specific to the NeXT runtime.
+ if (!LangOpts.ObjCRuntime.isNeXTFamily())
+ return;
+
+ const EHPersonality &ObjCXX = EHPersonality::get(*this, /*FD=*/nullptr);
+ const EHPersonality &CXX =
+ getCXXPersonality(getTarget().getTriple(), LangOpts);
+ if (&ObjCXX == &CXX)
+ return;
+
+ assert(std::strcmp(ObjCXX.PersonalityFn, CXX.PersonalityFn) != 0 &&
+ "Different EHPersonalities using the same personality function.");
+
+ llvm::Function *Fn = getModule().getFunction(ObjCXX.PersonalityFn);
+
+ // Nothing to do if it's unused.
+ if (!Fn || Fn->use_empty()) return;
+
+ // Can't do the optimization if it has non-C++ uses.
+ if (!PersonalityHasOnlyCXXUses(Fn)) return;
+
+ // Create the C++ personality function and kill off the old
+ // function.
+ llvm::Constant *CXXFn = getPersonalityFn(*this, CXX);
+
+ // This can happen if the user is screwing with us.
+ if (Fn->getType() != CXXFn->getType()) return;
+
+ Fn->replaceAllUsesWith(CXXFn);
+ Fn->eraseFromParent();
+}
+
+/// Returns the value to inject into a selector to indicate the
+/// presence of a catch-all.
+static llvm::Constant *getCatchAllValue(CodeGenFunction &CGF) {
+ // Possibly we should use @llvm.eh.catch.all.value here.
+ return llvm::ConstantPointerNull::get(CGF.Int8PtrTy);
+}
+
+namespace {
+ /// A cleanup to free the exception object if its initialization
+ /// throws.
+ struct FreeException final : EHScopeStack::Cleanup {
+ llvm::Value *exn;
+ FreeException(llvm::Value *exn) : exn(exn) {}
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ CGF.EmitNounwindRuntimeCall(getFreeExceptionFn(CGF.CGM), exn);
+ }
+ };
+} // end anonymous namespace
+
+// Emits an exception expression into the given location. This
+// differs from EmitAnyExprToMem only in that, if a final copy-ctor
+// call is required, an exception within that copy ctor causes
+// std::terminate to be invoked.
+void CodeGenFunction::EmitAnyExprToExn(const Expr *e, Address addr) {
+ // Make sure the exception object is cleaned up if there's an
+ // exception during initialization.
+ pushFullExprCleanup<FreeException>(EHCleanup, addr.getPointer());
+ EHScopeStack::stable_iterator cleanup = EHStack.stable_begin();
+
+ // __cxa_allocate_exception returns a void*; we need to cast this
+ // to the appropriate type for the object.
+ llvm::Type *ty = ConvertTypeForMem(e->getType())->getPointerTo();
+ Address typedAddr = Builder.CreateBitCast(addr, ty);
+
+ // FIXME: this isn't quite right! If there's a final unelided call
+ // to a copy constructor, then according to [except.terminate]p1 we
+ // must call std::terminate() if that constructor throws, because
+ // technically that copy occurs after the exception expression is
+ // evaluated but before the exception is caught. But the best way
+ // to handle that is to teach EmitAggExpr to do the final copy
+ // differently if it can't be elided.
+ EmitAnyExprToMem(e, typedAddr, e->getType().getQualifiers(),
+ /*IsInit*/ true);
+
+ // Deactivate the cleanup block.
+ DeactivateCleanupBlock(cleanup,
+ cast<llvm::Instruction>(typedAddr.getPointer()));
+}
+
+Address CodeGenFunction::getExceptionSlot() {
+ if (!ExceptionSlot)
+ ExceptionSlot = CreateTempAlloca(Int8PtrTy, "exn.slot");
+ return Address(ExceptionSlot, getPointerAlign());
+}
+
+Address CodeGenFunction::getEHSelectorSlot() {
+ if (!EHSelectorSlot)
+ EHSelectorSlot = CreateTempAlloca(Int32Ty, "ehselector.slot");
+ return Address(EHSelectorSlot, CharUnits::fromQuantity(4));
+}
+
+llvm::Value *CodeGenFunction::getExceptionFromSlot() {
+ return Builder.CreateLoad(getExceptionSlot(), "exn");
+}
+
+llvm::Value *CodeGenFunction::getSelectorFromSlot() {
+ return Builder.CreateLoad(getEHSelectorSlot(), "sel");
+}
+
+void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E,
+ bool KeepInsertionPoint) {
+ if (const Expr *SubExpr = E->getSubExpr()) {
+ QualType ThrowType = SubExpr->getType();
+ if (ThrowType->isObjCObjectPointerType()) {
+ const Stmt *ThrowStmt = E->getSubExpr();
+ const ObjCAtThrowStmt S(E->getExprLoc(), const_cast<Stmt *>(ThrowStmt));
+ CGM.getObjCRuntime().EmitThrowStmt(*this, S, false);
+ } else {
+ CGM.getCXXABI().emitThrow(*this, E);
+ }
+ } else {
+ CGM.getCXXABI().emitRethrow(*this, /*isNoReturn=*/true);
+ }
+
+ // throw is an expression, and the expression emitters expect us
+ // to leave ourselves at a valid insertion point.
+ if (KeepInsertionPoint)
+ EmitBlock(createBasicBlock("throw.cont"));
+}
+
+void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
+ if (!CGM.getLangOpts().CXXExceptions)
+ return;
+
+ const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD) {
+ // Check if CapturedDecl is nothrow and create terminate scope for it.
+ if (const CapturedDecl* CD = dyn_cast_or_null<CapturedDecl>(D)) {
+ if (CD->isNothrow())
+ EHStack.pushTerminate();
+ }
+ return;
+ }
+ const FunctionProtoType *Proto = FD->getType()->getAs<FunctionProtoType>();
+ if (!Proto)
+ return;
+
+ ExceptionSpecificationType EST = Proto->getExceptionSpecType();
+ if (isNoexceptExceptionSpec(EST)) {
+ if (Proto->getNoexceptSpec(getContext()) == FunctionProtoType::NR_Nothrow) {
+ // noexcept functions are simple terminate scopes.
+ EHStack.pushTerminate();
+ }
+ } else if (EST == EST_Dynamic || EST == EST_DynamicNone) {
+ // TODO: Revisit exception specifications for the MS ABI. There is a way to
+ // encode these in an object file but MSVC doesn't do anything with it.
+ if (getTarget().getCXXABI().isMicrosoft())
+ return;
+ unsigned NumExceptions = Proto->getNumExceptions();
+ EHFilterScope *Filter = EHStack.pushFilter(NumExceptions);
+
+ for (unsigned I = 0; I != NumExceptions; ++I) {
+ QualType Ty = Proto->getExceptionType(I);
+ QualType ExceptType = Ty.getNonReferenceType().getUnqualifiedType();
+ llvm::Value *EHType = CGM.GetAddrOfRTTIDescriptor(ExceptType,
+ /*ForEH=*/true);
+ Filter->setFilter(I, EHType);
+ }
+ }
+}
+
+/// Emit the dispatch block for a filter scope if necessary.
+static void emitFilterDispatchBlock(CodeGenFunction &CGF,
+ EHFilterScope &filterScope) {
+ llvm::BasicBlock *dispatchBlock = filterScope.getCachedEHDispatchBlock();
+ if (!dispatchBlock) return;
+ if (dispatchBlock->use_empty()) {
+ delete dispatchBlock;
+ return;
+ }
+
+ CGF.EmitBlockAfterUses(dispatchBlock);
+
+ // If this isn't a catch-all filter, we need to check whether we got
+ // here because the filter triggered.
+ if (filterScope.getNumFilters()) {
+ // Load the selector value.
+ llvm::Value *selector = CGF.getSelectorFromSlot();
+ llvm::BasicBlock *unexpectedBB = CGF.createBasicBlock("ehspec.unexpected");
+
+ llvm::Value *zero = CGF.Builder.getInt32(0);
+ llvm::Value *failsFilter =
+ CGF.Builder.CreateICmpSLT(selector, zero, "ehspec.fails");
+ CGF.Builder.CreateCondBr(failsFilter, unexpectedBB,
+ CGF.getEHResumeBlock(false));
+
+ CGF.EmitBlock(unexpectedBB);
+ }
+
+ // Call __cxa_call_unexpected. This doesn't need to be an invoke
+ // because __cxa_call_unexpected magically filters exceptions
+ // according to the last landing pad the exception was thrown
+ // into. Seriously.
+ llvm::Value *exn = CGF.getExceptionFromSlot();
+ CGF.EmitRuntimeCall(getUnexpectedFn(CGF.CGM), exn)
+ ->setDoesNotReturn();
+ CGF.Builder.CreateUnreachable();
+}
+
+void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
+ if (!CGM.getLangOpts().CXXExceptions)
+ return;
+
+ const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD) {
+ // Check if CapturedDecl is nothrow and pop terminate scope for it.
+ if (const CapturedDecl* CD = dyn_cast_or_null<CapturedDecl>(D)) {
+ if (CD->isNothrow())
+ EHStack.popTerminate();
+ }
+ return;
+ }
+ const FunctionProtoType *Proto = FD->getType()->getAs<FunctionProtoType>();
+ if (!Proto)
+ return;
+
+ ExceptionSpecificationType EST = Proto->getExceptionSpecType();
+ if (isNoexceptExceptionSpec(EST)) {
+ if (Proto->getNoexceptSpec(getContext()) == FunctionProtoType::NR_Nothrow) {
+ EHStack.popTerminate();
+ }
+ } else if (EST == EST_Dynamic || EST == EST_DynamicNone) {
+ // TODO: Revisit exception specifications for the MS ABI. There is a way to
+ // encode these in an object file but MSVC doesn't do anything with it.
+ if (getTarget().getCXXABI().isMicrosoft())
+ return;
+ EHFilterScope &filterScope = cast<EHFilterScope>(*EHStack.begin());
+ emitFilterDispatchBlock(*this, filterScope);
+ EHStack.popFilter();
+ }
+}
+
+void CodeGenFunction::EmitCXXTryStmt(const CXXTryStmt &S) {
+ EnterCXXTryStmt(S);
+ EmitStmt(S.getTryBlock());
+ ExitCXXTryStmt(S);
+}
+
+void CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
+ unsigned NumHandlers = S.getNumHandlers();
+ EHCatchScope *CatchScope = EHStack.pushCatch(NumHandlers);
+
+ for (unsigned I = 0; I != NumHandlers; ++I) {
+ const CXXCatchStmt *C = S.getHandler(I);
+
+ llvm::BasicBlock *Handler = createBasicBlock("catch");
+ if (C->getExceptionDecl()) {
+ // FIXME: Dropping the reference type on the type into makes it
+ // impossible to correctly implement catch-by-reference
+ // semantics for pointers. Unfortunately, this is what all
+ // existing compilers do, and it's not clear that the standard
+ // personality routine is capable of doing this right. See C++ DR 388:
+ // http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#388
+ Qualifiers CaughtTypeQuals;
+ QualType CaughtType = CGM.getContext().getUnqualifiedArrayType(
+ C->getCaughtType().getNonReferenceType(), CaughtTypeQuals);
+
+ CatchTypeInfo TypeInfo{nullptr, 0};
+ if (CaughtType->isObjCObjectPointerType())
+ TypeInfo.RTTI = CGM.getObjCRuntime().GetEHType(CaughtType);
+ else
+ TypeInfo = CGM.getCXXABI().getAddrOfCXXCatchHandlerType(
+ CaughtType, C->getCaughtType());
+ CatchScope->setHandler(I, TypeInfo, Handler);
+ } else {
+ // No exception decl indicates '...', a catch-all.
+ CatchScope->setHandler(I, CGM.getCXXABI().getCatchAllTypeInfo(), Handler);
+ }
+ }
+}
+
+llvm::BasicBlock *
+CodeGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si) {
+ if (EHPersonality::get(*this).usesFuncletPads())
+ return getMSVCDispatchBlock(si);
+
+ // The dispatch block for the end of the scope chain is a block that
+ // just resumes unwinding.
+ if (si == EHStack.stable_end())
+ return getEHResumeBlock(true);
+
+ // Otherwise, we should look at the actual scope.
+ EHScope &scope = *EHStack.find(si);
+
+ llvm::BasicBlock *dispatchBlock = scope.getCachedEHDispatchBlock();
+ if (!dispatchBlock) {
+ switch (scope.getKind()) {
+ case EHScope::Catch: {
+ // Apply a special case to a single catch-all.
+ EHCatchScope &catchScope = cast<EHCatchScope>(scope);
+ if (catchScope.getNumHandlers() == 1 &&
+ catchScope.getHandler(0).isCatchAll()) {
+ dispatchBlock = catchScope.getHandler(0).Block;
+
+ // Otherwise, make a dispatch block.
+ } else {
+ dispatchBlock = createBasicBlock("catch.dispatch");
+ }
+ break;
+ }
+
+ case EHScope::Cleanup:
+ dispatchBlock = createBasicBlock("ehcleanup");
+ break;
+
+ case EHScope::Filter:
+ dispatchBlock = createBasicBlock("filter.dispatch");
+ break;
+
+ case EHScope::Terminate:
+ dispatchBlock = getTerminateHandler();
+ break;
+
+ case EHScope::PadEnd:
+ llvm_unreachable("PadEnd unnecessary for Itanium!");
+ }
+ scope.setCachedEHDispatchBlock(dispatchBlock);
+ }
+ return dispatchBlock;
+}
+
+llvm::BasicBlock *
+CodeGenFunction::getMSVCDispatchBlock(EHScopeStack::stable_iterator SI) {
+ // Returning nullptr indicates that the previous dispatch block should unwind
+ // to caller.
+ if (SI == EHStack.stable_end())
+ return nullptr;
+
+ // Otherwise, we should look at the actual scope.
+ EHScope &EHS = *EHStack.find(SI);
+
+ llvm::BasicBlock *DispatchBlock = EHS.getCachedEHDispatchBlock();
+ if (DispatchBlock)
+ return DispatchBlock;
+
+ if (EHS.getKind() == EHScope::Terminate)
+ DispatchBlock = getTerminateHandler();
+ else
+ DispatchBlock = createBasicBlock();
+ CGBuilderTy Builder(*this, DispatchBlock);
+
+ switch (EHS.getKind()) {
+ case EHScope::Catch:
+ DispatchBlock->setName("catch.dispatch");
+ break;
+
+ case EHScope::Cleanup:
+ DispatchBlock->setName("ehcleanup");
+ break;
+
+ case EHScope::Filter:
+ llvm_unreachable("exception specifications not handled yet!");
+
+ case EHScope::Terminate:
+ DispatchBlock->setName("terminate");
+ break;
+
+ case EHScope::PadEnd:
+ llvm_unreachable("PadEnd dispatch block missing!");
+ }
+ EHS.setCachedEHDispatchBlock(DispatchBlock);
+ return DispatchBlock;
+}
+
+/// Check whether this is a non-EH scope, i.e. a scope which doesn't
+/// affect exception handling. Currently, the only non-EH scopes are
+/// normal-only cleanup scopes.
+static bool isNonEHScope(const EHScope &S) {
+ switch (S.getKind()) {
+ case EHScope::Cleanup:
+ return !cast<EHCleanupScope>(S).isEHCleanup();
+ case EHScope::Filter:
+ case EHScope::Catch:
+ case EHScope::Terminate:
+ case EHScope::PadEnd:
+ return false;
+ }
+
+ llvm_unreachable("Invalid EHScope Kind!");
+}
+
+llvm::BasicBlock *CodeGenFunction::getInvokeDestImpl() {
+ assert(EHStack.requiresLandingPad());
+ assert(!EHStack.empty());
+
+ // If exceptions are disabled, there are usually no landingpads. However, when
+ // SEH is enabled, functions using SEH still get landingpads.
+ const LangOptions &LO = CGM.getLangOpts();
+ if (!LO.Exceptions) {
+ if (!LO.Borland && !LO.MicrosoftExt)
+ return nullptr;
+ if (!currentFunctionUsesSEHTry())
+ return nullptr;
+ }
+
+ // Check the innermost scope for a cached landing pad. If this is
+ // a non-EH cleanup, we'll check enclosing scopes in EmitLandingPad.
+ llvm::BasicBlock *LP = EHStack.begin()->getCachedLandingPad();
+ if (LP) return LP;
+
+ const EHPersonality &Personality = EHPersonality::get(*this);
+
+ if (!CurFn->hasPersonalityFn())
+ CurFn->setPersonalityFn(getOpaquePersonalityFn(CGM, Personality));
+
+ if (Personality.usesFuncletPads()) {
+ // We don't need separate landing pads in the funclet model.
+ LP = getEHDispatchBlock(EHStack.getInnermostEHScope());
+ } else {
+ // Build the landing pad for this scope.
+ LP = EmitLandingPad();
+ }
+
+ assert(LP);
+
+ // Cache the landing pad on the innermost scope. If this is a
+ // non-EH scope, cache the landing pad on the enclosing scope, too.
+ for (EHScopeStack::iterator ir = EHStack.begin(); true; ++ir) {
+ ir->setCachedLandingPad(LP);
+ if (!isNonEHScope(*ir)) break;
+ }
+
+ return LP;
+}
+
+llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
+ assert(EHStack.requiresLandingPad());
+
+ EHScope &innermostEHScope = *EHStack.find(EHStack.getInnermostEHScope());
+ switch (innermostEHScope.getKind()) {
+ case EHScope::Terminate:
+ return getTerminateLandingPad();
+
+ case EHScope::PadEnd:
+ llvm_unreachable("PadEnd unnecessary for Itanium!");
+
+ case EHScope::Catch:
+ case EHScope::Cleanup:
+ case EHScope::Filter:
+ if (llvm::BasicBlock *lpad = innermostEHScope.getCachedLandingPad())
+ return lpad;
+ }
+
+ // Save the current IR generation state.
+ CGBuilderTy::InsertPoint savedIP = Builder.saveAndClearIP();
+ auto DL = ApplyDebugLocation::CreateDefaultArtificial(*this, CurEHLocation);
+
+ // Create and configure the landing pad.
+ llvm::BasicBlock *lpad = createBasicBlock("lpad");
+ EmitBlock(lpad);
+
+ llvm::LandingPadInst *LPadInst = Builder.CreateLandingPad(
+ llvm::StructType::get(Int8PtrTy, Int32Ty, nullptr), 0);
+
+ llvm::Value *LPadExn = Builder.CreateExtractValue(LPadInst, 0);
+ Builder.CreateStore(LPadExn, getExceptionSlot());
+ llvm::Value *LPadSel = Builder.CreateExtractValue(LPadInst, 1);
+ Builder.CreateStore(LPadSel, getEHSelectorSlot());
+
+ // Save the exception pointer. It's safe to use a single exception
+ // pointer per function because EH cleanups can never have nested
+ // try/catches.
+ // Build the landingpad instruction.
+
+ // Accumulate all the handlers in scope.
+ bool hasCatchAll = false;
+ bool hasCleanup = false;
+ bool hasFilter = false;
+ SmallVector<llvm::Value*, 4> filterTypes;
+ llvm::SmallPtrSet<llvm::Value*, 4> catchTypes;
+ for (EHScopeStack::iterator I = EHStack.begin(), E = EHStack.end(); I != E;
+ ++I) {
+
+ switch (I->getKind()) {
+ case EHScope::Cleanup:
+ // If we have a cleanup, remember that.
+ hasCleanup = (hasCleanup || cast<EHCleanupScope>(*I).isEHCleanup());
+ continue;
+
+ case EHScope::Filter: {
+ assert(I.next() == EHStack.end() && "EH filter is not end of EH stack");
+ assert(!hasCatchAll && "EH filter reached after catch-all");
+
+ // Filter scopes get added to the landingpad in weird ways.
+ EHFilterScope &filter = cast<EHFilterScope>(*I);
+ hasFilter = true;
+
+ // Add all the filter values.
+ for (unsigned i = 0, e = filter.getNumFilters(); i != e; ++i)
+ filterTypes.push_back(filter.getFilter(i));
+ goto done;
+ }
+
+ case EHScope::Terminate:
+ // Terminate scopes are basically catch-alls.
+ assert(!hasCatchAll);
+ hasCatchAll = true;
+ goto done;
+
+ case EHScope::Catch:
+ break;
+
+ case EHScope::PadEnd:
+ llvm_unreachable("PadEnd unnecessary for Itanium!");
+ }
+
+ EHCatchScope &catchScope = cast<EHCatchScope>(*I);
+ for (unsigned hi = 0, he = catchScope.getNumHandlers(); hi != he; ++hi) {
+ EHCatchScope::Handler handler = catchScope.getHandler(hi);
+ assert(handler.Type.Flags == 0 &&
+ "landingpads do not support catch handler flags");
+
+ // If this is a catch-all, register that and abort.
+ if (!handler.Type.RTTI) {
+ assert(!hasCatchAll);
+ hasCatchAll = true;
+ goto done;
+ }
+
+ // Check whether we already have a handler for this type.
+ if (catchTypes.insert(handler.Type.RTTI).second)
+ // If not, add it directly to the landingpad.
+ LPadInst->addClause(handler.Type.RTTI);
+ }
+ }
+
+ done:
+ // If we have a catch-all, add null to the landingpad.
+ assert(!(hasCatchAll && hasFilter));
+ if (hasCatchAll) {
+ LPadInst->addClause(getCatchAllValue(*this));
+
+ // If we have an EH filter, we need to add those handlers in the
+ // right place in the landingpad, which is to say, at the end.
+ } else if (hasFilter) {
+ // Create a filter expression: a constant array indicating which filter
+ // types there are. The personality routine only lands here if the filter
+ // doesn't match.
+ SmallVector<llvm::Constant*, 8> Filters;
+ llvm::ArrayType *AType =
+ llvm::ArrayType::get(!filterTypes.empty() ?
+ filterTypes[0]->getType() : Int8PtrTy,
+ filterTypes.size());
+
+ for (unsigned i = 0, e = filterTypes.size(); i != e; ++i)
+ Filters.push_back(cast<llvm::Constant>(filterTypes[i]));
+ llvm::Constant *FilterArray = llvm::ConstantArray::get(AType, Filters);
+ LPadInst->addClause(FilterArray);
+
+ // Also check whether we need a cleanup.
+ if (hasCleanup)
+ LPadInst->setCleanup(true);
+
+ // Otherwise, signal that we at least have cleanups.
+ } else if (hasCleanup) {
+ LPadInst->setCleanup(true);
+ }
+
+ assert((LPadInst->getNumClauses() > 0 || LPadInst->isCleanup()) &&
+ "landingpad instruction has no clauses!");
+
+ // Tell the backend how to generate the landing pad.
+ Builder.CreateBr(getEHDispatchBlock(EHStack.getInnermostEHScope()));
+
+ // Restore the old IR generation state.
+ Builder.restoreIP(savedIP);
+
+ return lpad;
+}
+
+static void emitCatchPadBlock(CodeGenFunction &CGF, EHCatchScope &CatchScope) {
+ llvm::BasicBlock *DispatchBlock = CatchScope.getCachedEHDispatchBlock();
+ assert(DispatchBlock);
+
+ CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveIP();
+ CGF.EmitBlockAfterUses(DispatchBlock);
+
+ llvm::Value *ParentPad = CGF.CurrentFuncletPad;
+ if (!ParentPad)
+ ParentPad = llvm::ConstantTokenNone::get(CGF.getLLVMContext());
+ llvm::BasicBlock *UnwindBB =
+ CGF.getEHDispatchBlock(CatchScope.getEnclosingEHScope());
+
+ unsigned NumHandlers = CatchScope.getNumHandlers();
+ llvm::CatchSwitchInst *CatchSwitch =
+ CGF.Builder.CreateCatchSwitch(ParentPad, UnwindBB, NumHandlers);
+
+ // Test against each of the exception types we claim to catch.
+ for (unsigned I = 0; I < NumHandlers; ++I) {
+ const EHCatchScope::Handler &Handler = CatchScope.getHandler(I);
+
+ CatchTypeInfo TypeInfo = Handler.Type;
+ if (!TypeInfo.RTTI)
+ TypeInfo.RTTI = llvm::Constant::getNullValue(CGF.VoidPtrTy);
+
+ CGF.Builder.SetInsertPoint(Handler.Block);
+
+ if (EHPersonality::get(CGF).isMSVCXXPersonality()) {
+ CGF.Builder.CreateCatchPad(
+ CatchSwitch, {TypeInfo.RTTI, CGF.Builder.getInt32(TypeInfo.Flags),
+ llvm::Constant::getNullValue(CGF.VoidPtrTy)});
+ } else {
+ CGF.Builder.CreateCatchPad(CatchSwitch, {TypeInfo.RTTI});
+ }
+
+ CatchSwitch->addHandler(Handler.Block);
+ }
+ CGF.Builder.restoreIP(SavedIP);
+}
+
+/// Emit the structure of the dispatch block for the given catch scope.
+/// It is an invariant that the dispatch block already exists.
+static void emitCatchDispatchBlock(CodeGenFunction &CGF,
+ EHCatchScope &catchScope) {
+ if (EHPersonality::get(CGF).usesFuncletPads())
+ return emitCatchPadBlock(CGF, catchScope);
+
+ llvm::BasicBlock *dispatchBlock = catchScope.getCachedEHDispatchBlock();
+ assert(dispatchBlock);
+
+ // If there's only a single catch-all, getEHDispatchBlock returned
+ // that catch-all as the dispatch block.
+ if (catchScope.getNumHandlers() == 1 &&
+ catchScope.getHandler(0).isCatchAll()) {
+ assert(dispatchBlock == catchScope.getHandler(0).Block);
+ return;
+ }
+
+ CGBuilderTy::InsertPoint savedIP = CGF.Builder.saveIP();
+ CGF.EmitBlockAfterUses(dispatchBlock);
+
+ // Select the right handler.
+ llvm::Value *llvm_eh_typeid_for =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
+
+ // Load the selector value.
+ llvm::Value *selector = CGF.getSelectorFromSlot();
+
+ // Test against each of the exception types we claim to catch.
+ for (unsigned i = 0, e = catchScope.getNumHandlers(); ; ++i) {
+ assert(i < e && "ran off end of handlers!");
+ const EHCatchScope::Handler &handler = catchScope.getHandler(i);
+
+ llvm::Value *typeValue = handler.Type.RTTI;
+ assert(handler.Type.Flags == 0 &&
+ "landingpads do not support catch handler flags");
+ assert(typeValue && "fell into catch-all case!");
+ typeValue = CGF.Builder.CreateBitCast(typeValue, CGF.Int8PtrTy);
+
+ // Figure out the next block.
+ bool nextIsEnd;
+ llvm::BasicBlock *nextBlock;
+
+ // If this is the last handler, we're at the end, and the next
+ // block is the block for the enclosing EH scope.
+ if (i + 1 == e) {
+ nextBlock = CGF.getEHDispatchBlock(catchScope.getEnclosingEHScope());
+ nextIsEnd = true;
+
+ // If the next handler is a catch-all, we're at the end, and the
+ // next block is that handler.
+ } else if (catchScope.getHandler(i+1).isCatchAll()) {
+ nextBlock = catchScope.getHandler(i+1).Block;
+ nextIsEnd = true;
+
+ // Otherwise, we're not at the end and we need a new block.
+ } else {
+ nextBlock = CGF.createBasicBlock("catch.fallthrough");
+ nextIsEnd = false;
+ }
+
+ // Figure out the catch type's index in the LSDA's type table.
+ llvm::CallInst *typeIndex =
+ CGF.Builder.CreateCall(llvm_eh_typeid_for, typeValue);
+ typeIndex->setDoesNotThrow();
+
+ llvm::Value *matchesTypeIndex =
+ CGF.Builder.CreateICmpEQ(selector, typeIndex, "matches");
+ CGF.Builder.CreateCondBr(matchesTypeIndex, handler.Block, nextBlock);
+
+ // If the next handler is a catch-all, we're completely done.
+ if (nextIsEnd) {
+ CGF.Builder.restoreIP(savedIP);
+ return;
+ }
+ // Otherwise we need to emit and continue at that block.
+ CGF.EmitBlock(nextBlock);
+ }
+}
+
+void CodeGenFunction::popCatchScope() {
+ EHCatchScope &catchScope = cast<EHCatchScope>(*EHStack.begin());
+ if (catchScope.hasEHBranches())
+ emitCatchDispatchBlock(*this, catchScope);
+ EHStack.popCatch();
+}
+
+void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
+ unsigned NumHandlers = S.getNumHandlers();
+ EHCatchScope &CatchScope = cast<EHCatchScope>(*EHStack.begin());
+ assert(CatchScope.getNumHandlers() == NumHandlers);
+
+ // If the catch was not required, bail out now.
+ if (!CatchScope.hasEHBranches()) {
+ CatchScope.clearHandlerBlocks();
+ EHStack.popCatch();
+ return;
+ }
+
+ // Emit the structure of the EH dispatch for this catch.
+ emitCatchDispatchBlock(*this, CatchScope);
+
+ // Copy the handler blocks off before we pop the EH stack. Emitting
+ // the handlers might scribble on this memory.
+ SmallVector<EHCatchScope::Handler, 8> Handlers(
+ CatchScope.begin(), CatchScope.begin() + NumHandlers);
+
+ EHStack.popCatch();
+
+ // The fall-through block.
+ llvm::BasicBlock *ContBB = createBasicBlock("try.cont");
+
+ // We just emitted the body of the try; jump to the continue block.
+ if (HaveInsertPoint())
+ Builder.CreateBr(ContBB);
+
+ // Determine if we need an implicit rethrow for all these catch handlers;
+ // see the comment below.
+ bool doImplicitRethrow = false;
+ if (IsFnTryBlock)
+ doImplicitRethrow = isa<CXXDestructorDecl>(CurCodeDecl) ||
+ isa<CXXConstructorDecl>(CurCodeDecl);
+
+ // Perversely, we emit the handlers backwards precisely because we
+ // want them to appear in source order. In all of these cases, the
+ // catch block will have exactly one predecessor, which will be a
+ // particular block in the catch dispatch. However, in the case of
+ // a catch-all, one of the dispatch blocks will branch to two
+ // different handlers, and EmitBlockAfterUses will cause the second
+ // handler to be moved before the first.
+ for (unsigned I = NumHandlers; I != 0; --I) {
+ llvm::BasicBlock *CatchBlock = Handlers[I-1].Block;
+ EmitBlockAfterUses(CatchBlock);
+
+ // Catch the exception if this isn't a catch-all.
+ const CXXCatchStmt *C = S.getHandler(I-1);
+
+ // Enter a cleanup scope, including the catch variable and the
+ // end-catch.
+ RunCleanupsScope CatchScope(*this);
+
+ // Initialize the catch variable and set up the cleanups.
+ SaveAndRestore<llvm::Instruction *> RestoreCurrentFuncletPad(
+ CurrentFuncletPad);
+ CGM.getCXXABI().emitBeginCatch(*this, C);
+
+ // Emit the PGO counter increment.
+ incrementProfileCounter(C);
+
+ // Perform the body of the catch.
+ EmitStmt(C->getHandlerBlock());
+
+ // [except.handle]p11:
+ // The currently handled exception is rethrown if control
+ // reaches the end of a handler of the function-try-block of a
+ // constructor or destructor.
+
+ // It is important that we only do this on fallthrough and not on
+ // return. Note that it's illegal to put a return in a
+ // constructor function-try-block's catch handler (p14), so this
+ // really only applies to destructors.
+ if (doImplicitRethrow && HaveInsertPoint()) {
+ CGM.getCXXABI().emitRethrow(*this, /*isNoReturn*/false);
+ Builder.CreateUnreachable();
+ Builder.ClearInsertionPoint();
+ }
+
+ // Fall out through the catch cleanups.
+ CatchScope.ForceCleanup();
+
+ // Branch out of the try.
+ if (HaveInsertPoint())
+ Builder.CreateBr(ContBB);
+ }
+
+ EmitBlock(ContBB);
+ incrementProfileCounter(&S);
+}
+
+namespace {
+ struct CallEndCatchForFinally final : EHScopeStack::Cleanup {
+ llvm::Value *ForEHVar;
+ llvm::Value *EndCatchFn;
+ CallEndCatchForFinally(llvm::Value *ForEHVar, llvm::Value *EndCatchFn)
+ : ForEHVar(ForEHVar), EndCatchFn(EndCatchFn) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ llvm::BasicBlock *EndCatchBB = CGF.createBasicBlock("finally.endcatch");
+ llvm::BasicBlock *CleanupContBB =
+ CGF.createBasicBlock("finally.cleanup.cont");
+
+ llvm::Value *ShouldEndCatch =
+ CGF.Builder.CreateFlagLoad(ForEHVar, "finally.endcatch");
+ CGF.Builder.CreateCondBr(ShouldEndCatch, EndCatchBB, CleanupContBB);
+ CGF.EmitBlock(EndCatchBB);
+ CGF.EmitRuntimeCallOrInvoke(EndCatchFn); // catch-all, so might throw
+ CGF.EmitBlock(CleanupContBB);
+ }
+ };
+
+ struct PerformFinally final : EHScopeStack::Cleanup {
+ const Stmt *Body;
+ llvm::Value *ForEHVar;
+ llvm::Value *EndCatchFn;
+ llvm::Value *RethrowFn;
+ llvm::Value *SavedExnVar;
+
+ PerformFinally(const Stmt *Body, llvm::Value *ForEHVar,
+ llvm::Value *EndCatchFn,
+ llvm::Value *RethrowFn, llvm::Value *SavedExnVar)
+ : Body(Body), ForEHVar(ForEHVar), EndCatchFn(EndCatchFn),
+ RethrowFn(RethrowFn), SavedExnVar(SavedExnVar) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ // Enter a cleanup to call the end-catch function if one was provided.
+ if (EndCatchFn)
+ CGF.EHStack.pushCleanup<CallEndCatchForFinally>(NormalAndEHCleanup,
+ ForEHVar, EndCatchFn);
+
+ // Save the current cleanup destination in case there are
+ // cleanups in the finally block.
+ llvm::Value *SavedCleanupDest =
+ CGF.Builder.CreateLoad(CGF.getNormalCleanupDestSlot(),
+ "cleanup.dest.saved");
+
+ // Emit the finally block.
+ CGF.EmitStmt(Body);
+
+ // If the end of the finally is reachable, check whether this was
+ // for EH. If so, rethrow.
+ if (CGF.HaveInsertPoint()) {
+ llvm::BasicBlock *RethrowBB = CGF.createBasicBlock("finally.rethrow");
+ llvm::BasicBlock *ContBB = CGF.createBasicBlock("finally.cont");
+
+ llvm::Value *ShouldRethrow =
+ CGF.Builder.CreateFlagLoad(ForEHVar, "finally.shouldthrow");
+ CGF.Builder.CreateCondBr(ShouldRethrow, RethrowBB, ContBB);
+
+ CGF.EmitBlock(RethrowBB);
+ if (SavedExnVar) {
+ CGF.EmitRuntimeCallOrInvoke(RethrowFn,
+ CGF.Builder.CreateAlignedLoad(SavedExnVar, CGF.getPointerAlign()));
+ } else {
+ CGF.EmitRuntimeCallOrInvoke(RethrowFn);
+ }
+ CGF.Builder.CreateUnreachable();
+
+ CGF.EmitBlock(ContBB);
+
+ // Restore the cleanup destination.
+ CGF.Builder.CreateStore(SavedCleanupDest,
+ CGF.getNormalCleanupDestSlot());
+ }
+
+ // Leave the end-catch cleanup. As an optimization, pretend that
+ // the fallthrough path was inaccessible; we've dynamically proven
+ // that we're not in the EH case along that path.
+ if (EndCatchFn) {
+ CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
+ CGF.PopCleanupBlock();
+ CGF.Builder.restoreIP(SavedIP);
+ }
+
+ // Now make sure we actually have an insertion point or the
+ // cleanup gods will hate us.
+ CGF.EnsureInsertPoint();
+ }
+ };
+} // end anonymous namespace
+
+/// Enters a finally block for an implementation using zero-cost
+/// exceptions. This is mostly general, but hard-codes some
+/// language/ABI-specific behavior in the catch-all sections.
+void CodeGenFunction::FinallyInfo::enter(CodeGenFunction &CGF,
+ const Stmt *body,
+ llvm::Constant *beginCatchFn,
+ llvm::Constant *endCatchFn,
+ llvm::Constant *rethrowFn) {
+ assert((beginCatchFn != nullptr) == (endCatchFn != nullptr) &&
+ "begin/end catch functions not paired");
+ assert(rethrowFn && "rethrow function is required");
+
+ BeginCatchFn = beginCatchFn;
+
+ // The rethrow function has one of the following two types:
+ // void (*)()
+ // void (*)(void*)
+ // In the latter case we need to pass it the exception object.
+ // But we can't use the exception slot because the @finally might
+ // have a landing pad (which would overwrite the exception slot).
+ llvm::FunctionType *rethrowFnTy =
+ cast<llvm::FunctionType>(
+ cast<llvm::PointerType>(rethrowFn->getType())->getElementType());
+ SavedExnVar = nullptr;
+ if (rethrowFnTy->getNumParams())
+ SavedExnVar = CGF.CreateTempAlloca(CGF.Int8PtrTy, "finally.exn");
+
+ // A finally block is a statement which must be executed on any edge
+ // out of a given scope. Unlike a cleanup, the finally block may
+ // contain arbitrary control flow leading out of itself. In
+ // addition, finally blocks should always be executed, even if there
+ // are no catch handlers higher on the stack. Therefore, we
+ // surround the protected scope with a combination of a normal
+ // cleanup (to catch attempts to break out of the block via normal
+ // control flow) and an EH catch-all (semantically "outside" any try
+ // statement to which the finally block might have been attached).
+ // The finally block itself is generated in the context of a cleanup
+ // which conditionally leaves the catch-all.
+
+ // Jump destination for performing the finally block on an exception
+ // edge. We'll never actually reach this block, so unreachable is
+ // fine.
+ RethrowDest = CGF.getJumpDestInCurrentScope(CGF.getUnreachableBlock());
+
+ // Whether the finally block is being executed for EH purposes.
+ ForEHVar = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "finally.for-eh");
+ CGF.Builder.CreateFlagStore(false, ForEHVar);
+
+ // Enter a normal cleanup which will perform the @finally block.
+ CGF.EHStack.pushCleanup<PerformFinally>(NormalCleanup, body,
+ ForEHVar, endCatchFn,
+ rethrowFn, SavedExnVar);
+
+ // Enter a catch-all scope.
+ llvm::BasicBlock *catchBB = CGF.createBasicBlock("finally.catchall");
+ EHCatchScope *catchScope = CGF.EHStack.pushCatch(1);
+ catchScope->setCatchAllHandler(0, catchBB);
+}
+
+void CodeGenFunction::FinallyInfo::exit(CodeGenFunction &CGF) {
+ // Leave the finally catch-all.
+ EHCatchScope &catchScope = cast<EHCatchScope>(*CGF.EHStack.begin());
+ llvm::BasicBlock *catchBB = catchScope.getHandler(0).Block;
+
+ CGF.popCatchScope();
+
+ // If there are any references to the catch-all block, emit it.
+ if (catchBB->use_empty()) {
+ delete catchBB;
+ } else {
+ CGBuilderTy::InsertPoint savedIP = CGF.Builder.saveAndClearIP();
+ CGF.EmitBlock(catchBB);
+
+ llvm::Value *exn = nullptr;
+
+ // If there's a begin-catch function, call it.
+ if (BeginCatchFn) {
+ exn = CGF.getExceptionFromSlot();
+ CGF.EmitNounwindRuntimeCall(BeginCatchFn, exn);
+ }
+
+ // If we need to remember the exception pointer to rethrow later, do so.
+ if (SavedExnVar) {
+ if (!exn) exn = CGF.getExceptionFromSlot();
+ CGF.Builder.CreateAlignedStore(exn, SavedExnVar, CGF.getPointerAlign());
+ }
+
+ // Tell the cleanups in the finally block that we're do this for EH.
+ CGF.Builder.CreateFlagStore(true, ForEHVar);
+
+ // Thread a jump through the finally cleanup.
+ CGF.EmitBranchThroughCleanup(RethrowDest);
+
+ CGF.Builder.restoreIP(savedIP);
+ }
+
+ // Finally, leave the @finally cleanup.
+ CGF.PopCleanupBlock();
+}
+
+llvm::BasicBlock *CodeGenFunction::getTerminateLandingPad() {
+ if (TerminateLandingPad)
+ return TerminateLandingPad;
+
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+
+ // This will get inserted at the end of the function.
+ TerminateLandingPad = createBasicBlock("terminate.lpad");
+ Builder.SetInsertPoint(TerminateLandingPad);
+
+ // Tell the backend that this is a landing pad.
+ const EHPersonality &Personality = EHPersonality::get(*this);
+
+ if (!CurFn->hasPersonalityFn())
+ CurFn->setPersonalityFn(getOpaquePersonalityFn(CGM, Personality));
+
+ llvm::LandingPadInst *LPadInst = Builder.CreateLandingPad(
+ llvm::StructType::get(Int8PtrTy, Int32Ty, nullptr), 0);
+ LPadInst->addClause(getCatchAllValue(*this));
+
+ llvm::Value *Exn = nullptr;
+ if (getLangOpts().CPlusPlus)
+ Exn = Builder.CreateExtractValue(LPadInst, 0);
+ llvm::CallInst *terminateCall =
+ CGM.getCXXABI().emitTerminateForUnexpectedException(*this, Exn);
+ terminateCall->setDoesNotReturn();
+ Builder.CreateUnreachable();
+
+ // Restore the saved insertion state.
+ Builder.restoreIP(SavedIP);
+
+ return TerminateLandingPad;
+}
+
+llvm::BasicBlock *CodeGenFunction::getTerminateHandler() {
+ if (TerminateHandler)
+ return TerminateHandler;
+
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+
+ // Set up the terminate handler. This block is inserted at the very
+ // end of the function by FinishFunction.
+ TerminateHandler = createBasicBlock("terminate.handler");
+ Builder.SetInsertPoint(TerminateHandler);
+ llvm::Value *Exn = nullptr;
+ if (EHPersonality::get(*this).usesFuncletPads()) {
+ llvm::Value *ParentPad = CurrentFuncletPad;
+ if (!ParentPad)
+ ParentPad = llvm::ConstantTokenNone::get(CGM.getLLVMContext());
+ Builder.CreateCleanupPad(ParentPad);
+ } else {
+ if (getLangOpts().CPlusPlus)
+ Exn = getExceptionFromSlot();
+ }
+ llvm::CallInst *terminateCall =
+ CGM.getCXXABI().emitTerminateForUnexpectedException(*this, Exn);
+ terminateCall->setDoesNotReturn();
+ Builder.CreateUnreachable();
+
+ // Restore the saved insertion state.
+ Builder.restoreIP(SavedIP);
+
+ return TerminateHandler;
+}
+
+llvm::BasicBlock *CodeGenFunction::getEHResumeBlock(bool isCleanup) {
+ if (EHResumeBlock) return EHResumeBlock;
+
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveIP();
+
+ // We emit a jump to a notional label at the outermost unwind state.
+ EHResumeBlock = createBasicBlock("eh.resume");
+ Builder.SetInsertPoint(EHResumeBlock);
+
+ const EHPersonality &Personality = EHPersonality::get(*this);
+
+ // This can always be a call because we necessarily didn't find
+ // anything on the EH stack which needs our help.
+ const char *RethrowName = Personality.CatchallRethrowFn;
+ if (RethrowName != nullptr && !isCleanup) {
+ EmitRuntimeCall(getCatchallRethrowFn(CGM, RethrowName),
+ getExceptionFromSlot())->setDoesNotReturn();
+ Builder.CreateUnreachable();
+ Builder.restoreIP(SavedIP);
+ return EHResumeBlock;
+ }
+
+ // Recreate the landingpad's return value for the 'resume' instruction.
+ llvm::Value *Exn = getExceptionFromSlot();
+ llvm::Value *Sel = getSelectorFromSlot();
+
+ llvm::Type *LPadType = llvm::StructType::get(Exn->getType(),
+ Sel->getType(), nullptr);
+ llvm::Value *LPadVal = llvm::UndefValue::get(LPadType);
+ LPadVal = Builder.CreateInsertValue(LPadVal, Exn, 0, "lpad.val");
+ LPadVal = Builder.CreateInsertValue(LPadVal, Sel, 1, "lpad.val");
+
+ Builder.CreateResume(LPadVal);
+ Builder.restoreIP(SavedIP);
+ return EHResumeBlock;
+}
+
+void CodeGenFunction::EmitSEHTryStmt(const SEHTryStmt &S) {
+ EnterSEHTryStmt(S);
+ {
+ JumpDest TryExit = getJumpDestInCurrentScope("__try.__leave");
+
+ SEHTryEpilogueStack.push_back(&TryExit);
+ EmitStmt(S.getTryBlock());
+ SEHTryEpilogueStack.pop_back();
+
+ if (!TryExit.getBlock()->use_empty())
+ EmitBlock(TryExit.getBlock(), /*IsFinished=*/true);
+ else
+ delete TryExit.getBlock();
+ }
+ ExitSEHTryStmt(S);
+}
+
+namespace {
+struct PerformSEHFinally final : EHScopeStack::Cleanup {
+ llvm::Function *OutlinedFinally;
+ PerformSEHFinally(llvm::Function *OutlinedFinally)
+ : OutlinedFinally(OutlinedFinally) {}
+
+ void Emit(CodeGenFunction &CGF, Flags F) override {
+ ASTContext &Context = CGF.getContext();
+ CodeGenModule &CGM = CGF.CGM;
+
+ CallArgList Args;
+
+ // Compute the two argument values.
+ QualType ArgTys[2] = {Context.UnsignedCharTy, Context.VoidPtrTy};
+ llvm::Value *LocalAddrFn = CGM.getIntrinsic(llvm::Intrinsic::localaddress);
+ llvm::Value *FP = CGF.Builder.CreateCall(LocalAddrFn);
+ llvm::Value *IsForEH =
+ llvm::ConstantInt::get(CGF.ConvertType(ArgTys[0]), F.isForEHCleanup());
+ Args.add(RValue::get(IsForEH), ArgTys[0]);
+ Args.add(RValue::get(FP), ArgTys[1]);
+
+ // Arrange a two-arg function info and type.
+ FunctionProtoType::ExtProtoInfo EPI;
+ const auto *FPT = cast<FunctionProtoType>(
+ Context.getFunctionType(Context.VoidTy, ArgTys, EPI));
+ const CGFunctionInfo &FnInfo =
+ CGM.getTypes().arrangeFreeFunctionCall(Args, FPT,
+ /*chainCall=*/false);
+
+ CGF.EmitCall(FnInfo, OutlinedFinally, ReturnValueSlot(), Args);
+ }
+};
+} // end anonymous namespace
+
+namespace {
+/// Find all local variable captures in the statement.
+struct CaptureFinder : ConstStmtVisitor<CaptureFinder> {
+ CodeGenFunction &ParentCGF;
+ const VarDecl *ParentThis;
+ llvm::SmallSetVector<const VarDecl *, 4> Captures;
+ Address SEHCodeSlot = Address::invalid();
+ CaptureFinder(CodeGenFunction &ParentCGF, const VarDecl *ParentThis)
+ : ParentCGF(ParentCGF), ParentThis(ParentThis) {}
+
+ // Return true if we need to do any capturing work.
+ bool foundCaptures() {
+ return !Captures.empty() || SEHCodeSlot.isValid();
+ }
+
+ void Visit(const Stmt *S) {
+ // See if this is a capture, then recurse.
+ ConstStmtVisitor<CaptureFinder>::Visit(S);
+ for (const Stmt *Child : S->children())
+ if (Child)
+ Visit(Child);
+ }
+
+ void VisitDeclRefExpr(const DeclRefExpr *E) {
+ // If this is already a capture, just make sure we capture 'this'.
+ if (E->refersToEnclosingVariableOrCapture()) {
+ Captures.insert(ParentThis);
+ return;
+ }
+
+ const auto *D = dyn_cast<VarDecl>(E->getDecl());
+ if (D && D->isLocalVarDeclOrParm() && D->hasLocalStorage())
+ Captures.insert(D);
+ }
+
+ void VisitCXXThisExpr(const CXXThisExpr *E) {
+ Captures.insert(ParentThis);
+ }
+
+ void VisitCallExpr(const CallExpr *E) {
+ // We only need to add parent frame allocations for these builtins in x86.
+ if (ParentCGF.getTarget().getTriple().getArch() != llvm::Triple::x86)
+ return;
+
+ unsigned ID = E->getBuiltinCallee();
+ switch (ID) {
+ case Builtin::BI__exception_code:
+ case Builtin::BI_exception_code:
+ // This is the simple case where we are the outermost finally. All we
+ // have to do here is make sure we escape this and recover it in the
+ // outlined handler.
+ if (!SEHCodeSlot.isValid())
+ SEHCodeSlot = ParentCGF.SEHCodeSlotStack.back();
+ break;
+ }
+ }
+};
+} // end anonymous namespace
+
+Address CodeGenFunction::recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF,
+ Address ParentVar,
+ llvm::Value *ParentFP) {
+ llvm::CallInst *RecoverCall = nullptr;
+ CGBuilderTy Builder(*this, AllocaInsertPt);
+ if (auto *ParentAlloca = dyn_cast<llvm::AllocaInst>(ParentVar.getPointer())) {
+ // Mark the variable escaped if nobody else referenced it and compute the
+ // localescape index.
+ auto InsertPair = ParentCGF.EscapedLocals.insert(
+ std::make_pair(ParentAlloca, ParentCGF.EscapedLocals.size()));
+ int FrameEscapeIdx = InsertPair.first->second;
+ // call i8* @llvm.localrecover(i8* bitcast(@parentFn), i8* %fp, i32 N)
+ llvm::Function *FrameRecoverFn = llvm::Intrinsic::getDeclaration(
+ &CGM.getModule(), llvm::Intrinsic::localrecover);
+ llvm::Constant *ParentI8Fn =
+ llvm::ConstantExpr::getBitCast(ParentCGF.CurFn, Int8PtrTy);
+ RecoverCall = Builder.CreateCall(
+ FrameRecoverFn, {ParentI8Fn, ParentFP,
+ llvm::ConstantInt::get(Int32Ty, FrameEscapeIdx)});
+
+ } else {
+ // If the parent didn't have an alloca, we're doing some nested outlining.
+ // Just clone the existing localrecover call, but tweak the FP argument to
+ // use our FP value. All other arguments are constants.
+ auto *ParentRecover =
+ cast<llvm::IntrinsicInst>(ParentVar.getPointer()->stripPointerCasts());
+ assert(ParentRecover->getIntrinsicID() == llvm::Intrinsic::localrecover &&
+ "expected alloca or localrecover in parent LocalDeclMap");
+ RecoverCall = cast<llvm::CallInst>(ParentRecover->clone());
+ RecoverCall->setArgOperand(1, ParentFP);
+ RecoverCall->insertBefore(AllocaInsertPt);
+ }
+
+ // Bitcast the variable, rename it, and insert it in the local decl map.
+ llvm::Value *ChildVar =
+ Builder.CreateBitCast(RecoverCall, ParentVar.getType());
+ ChildVar->setName(ParentVar.getName());
+ return Address(ChildVar, ParentVar.getAlignment());
+}
+
+void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
+ const Stmt *OutlinedStmt,
+ bool IsFilter) {
+ // Find all captures in the Stmt.
+ CaptureFinder Finder(ParentCGF, ParentCGF.CXXABIThisDecl);
+ Finder.Visit(OutlinedStmt);
+
+ // We can exit early on x86_64 when there are no captures. We just have to
+ // save the exception code in filters so that __exception_code() works.
+ if (!Finder.foundCaptures() &&
+ CGM.getTarget().getTriple().getArch() != llvm::Triple::x86) {
+ if (IsFilter)
+ EmitSEHExceptionCodeSave(ParentCGF, nullptr, nullptr);
+ return;
+ }
+
+ llvm::Value *EntryFP = nullptr;
+ CGBuilderTy Builder(CGM, AllocaInsertPt);
+ if (IsFilter && CGM.getTarget().getTriple().getArch() == llvm::Triple::x86) {
+ // 32-bit SEH filters need to be careful about FP recovery. The end of the
+ // EH registration is passed in as the EBP physical register. We can
+ // recover that with llvm.frameaddress(1).
+ EntryFP = Builder.CreateCall(
+ CGM.getIntrinsic(llvm::Intrinsic::frameaddress), {Builder.getInt32(1)});
+ } else {
+ // Otherwise, for x64 and 32-bit finally functions, the parent FP is the
+ // second parameter.
+ auto AI = CurFn->arg_begin();
+ ++AI;
+ EntryFP = &*AI;
+ }
+
+ llvm::Value *ParentFP = EntryFP;
+ if (IsFilter) {
+ // Given whatever FP the runtime provided us in EntryFP, recover the true
+ // frame pointer of the parent function. We only need to do this in filters,
+ // since finally funclets recover the parent FP for us.
+ llvm::Function *RecoverFPIntrin =
+ CGM.getIntrinsic(llvm::Intrinsic::x86_seh_recoverfp);
+ llvm::Constant *ParentI8Fn =
+ llvm::ConstantExpr::getBitCast(ParentCGF.CurFn, Int8PtrTy);
+ ParentFP = Builder.CreateCall(RecoverFPIntrin, {ParentI8Fn, EntryFP});
+ }
+
+ // Create llvm.localrecover calls for all captures.
+ for (const VarDecl *VD : Finder.Captures) {
+ if (isa<ImplicitParamDecl>(VD)) {
+ CGM.ErrorUnsupported(VD, "'this' captured by SEH");
+ CXXThisValue = llvm::UndefValue::get(ConvertTypeForMem(VD->getType()));
+ continue;
+ }
+ if (VD->getType()->isVariablyModifiedType()) {
+ CGM.ErrorUnsupported(VD, "VLA captured by SEH");
+ continue;
+ }
+ assert((isa<ImplicitParamDecl>(VD) || VD->isLocalVarDeclOrParm()) &&
+ "captured non-local variable");
+
+ // If this decl hasn't been declared yet, it will be declared in the
+ // OutlinedStmt.
+ auto I = ParentCGF.LocalDeclMap.find(VD);
+ if (I == ParentCGF.LocalDeclMap.end())
+ continue;
+
+ Address ParentVar = I->second;
+ setAddrOfLocalVar(
+ VD, recoverAddrOfEscapedLocal(ParentCGF, ParentVar, ParentFP));
+ }
+
+ if (Finder.SEHCodeSlot.isValid()) {
+ SEHCodeSlotStack.push_back(
+ recoverAddrOfEscapedLocal(ParentCGF, Finder.SEHCodeSlot, ParentFP));
+ }
+
+ if (IsFilter)
+ EmitSEHExceptionCodeSave(ParentCGF, ParentFP, EntryFP);
+}
+
+/// Arrange a function prototype that can be called by Windows exception
+/// handling personalities. On Win64, the prototype looks like:
+/// RetTy func(void *EHPtrs, void *ParentFP);
+void CodeGenFunction::startOutlinedSEHHelper(CodeGenFunction &ParentCGF,
+ bool IsFilter,
+ const Stmt *OutlinedStmt) {
+ SourceLocation StartLoc = OutlinedStmt->getLocStart();
+
+ // Get the mangled function name.
+ SmallString<128> Name;
+ {
+ llvm::raw_svector_ostream OS(Name);
+ const Decl *ParentCodeDecl = ParentCGF.CurCodeDecl;
+ const NamedDecl *Parent = dyn_cast_or_null<NamedDecl>(ParentCodeDecl);
+ assert(Parent && "FIXME: handle unnamed decls (lambdas, blocks) with SEH");
+ MangleContext &Mangler = CGM.getCXXABI().getMangleContext();
+ if (IsFilter)
+ Mangler.mangleSEHFilterExpression(Parent, OS);
+ else
+ Mangler.mangleSEHFinallyBlock(Parent, OS);
+ }
+
+ FunctionArgList Args;
+ if (CGM.getTarget().getTriple().getArch() != llvm::Triple::x86 || !IsFilter) {
+ // All SEH finally functions take two parameters. Win64 filters take two
+ // parameters. Win32 filters take no parameters.
+ if (IsFilter) {
+ Args.push_back(ImplicitParamDecl::Create(
+ getContext(), nullptr, StartLoc,
+ &getContext().Idents.get("exception_pointers"),
+ getContext().VoidPtrTy));
+ } else {
+ Args.push_back(ImplicitParamDecl::Create(
+ getContext(), nullptr, StartLoc,
+ &getContext().Idents.get("abnormal_termination"),
+ getContext().UnsignedCharTy));
+ }
+ Args.push_back(ImplicitParamDecl::Create(
+ getContext(), nullptr, StartLoc,
+ &getContext().Idents.get("frame_pointer"), getContext().VoidPtrTy));
+ }
+
+ QualType RetTy = IsFilter ? getContext().LongTy : getContext().VoidTy;
+
+ llvm::Function *ParentFn = ParentCGF.CurFn;
+ const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionDeclaration(
+ RetTy, Args, FunctionType::ExtInfo(), /*isVariadic=*/false);
+
+ llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
+ llvm::Function *Fn = llvm::Function::Create(
+ FnTy, llvm::GlobalValue::InternalLinkage, Name.str(), &CGM.getModule());
+ // The filter is either in the same comdat as the function, or it's internal.
+ if (llvm::Comdat *C = ParentFn->getComdat()) {
+ Fn->setComdat(C);
+ } else if (ParentFn->hasWeakLinkage() || ParentFn->hasLinkOnceLinkage()) {
+ llvm::Comdat *C = CGM.getModule().getOrInsertComdat(ParentFn->getName());
+ ParentFn->setComdat(C);
+ Fn->setComdat(C);
+ } else {
+ Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
+ }
+
+ IsOutlinedSEHHelper = true;
+
+ StartFunction(GlobalDecl(), RetTy, Fn, FnInfo, Args,
+ OutlinedStmt->getLocStart(), OutlinedStmt->getLocStart());
+
+ CGM.SetLLVMFunctionAttributes(nullptr, FnInfo, CurFn);
+ EmitCapturedLocals(ParentCGF, OutlinedStmt, IsFilter);
+}
+
+/// Create a stub filter function that will ultimately hold the code of the
+/// filter expression. The EH preparation passes in LLVM will outline the code
+/// from the main function body into this stub.
+llvm::Function *
+CodeGenFunction::GenerateSEHFilterFunction(CodeGenFunction &ParentCGF,
+ const SEHExceptStmt &Except) {
+ const Expr *FilterExpr = Except.getFilterExpr();
+ startOutlinedSEHHelper(ParentCGF, true, FilterExpr);
+
+ // Emit the original filter expression, convert to i32, and return.
+ llvm::Value *R = EmitScalarExpr(FilterExpr);
+ R = Builder.CreateIntCast(R, ConvertType(getContext().LongTy),
+ FilterExpr->getType()->isSignedIntegerType());
+ Builder.CreateStore(R, ReturnValue);
+
+ FinishFunction(FilterExpr->getLocEnd());
+
+ return CurFn;
+}
+
+llvm::Function *
+CodeGenFunction::GenerateSEHFinallyFunction(CodeGenFunction &ParentCGF,
+ const SEHFinallyStmt &Finally) {
+ const Stmt *FinallyBlock = Finally.getBlock();
+ startOutlinedSEHHelper(ParentCGF, false, FinallyBlock);
+
+ // Mark finally block calls as nounwind and noinline to make LLVM's job a
+ // little easier.
+ // FIXME: Remove these restrictions in the future.
+ CurFn->addFnAttr(llvm::Attribute::NoUnwind);
+ CurFn->addFnAttr(llvm::Attribute::NoInline);
+
+ // Emit the original filter expression, convert to i32, and return.
+ EmitStmt(FinallyBlock);
+
+ FinishFunction(FinallyBlock->getLocEnd());
+
+ return CurFn;
+}
+
+void CodeGenFunction::EmitSEHExceptionCodeSave(CodeGenFunction &ParentCGF,
+ llvm::Value *ParentFP,
+ llvm::Value *EntryFP) {
+ // Get the pointer to the EXCEPTION_POINTERS struct. This is returned by the
+ // __exception_info intrinsic.
+ if (CGM.getTarget().getTriple().getArch() != llvm::Triple::x86) {
+ // On Win64, the info is passed as the first parameter to the filter.
+ SEHInfo = &*CurFn->arg_begin();
+ SEHCodeSlotStack.push_back(
+ CreateMemTemp(getContext().IntTy, "__exception_code"));
+ } else {
+ // On Win32, the EBP on entry to the filter points to the end of an
+ // exception registration object. It contains 6 32-bit fields, and the info
+ // pointer is stored in the second field. So, GEP 20 bytes backwards and
+ // load the pointer.
+ SEHInfo = Builder.CreateConstInBoundsGEP1_32(Int8Ty, EntryFP, -20);
+ SEHInfo = Builder.CreateBitCast(SEHInfo, Int8PtrTy->getPointerTo());
+ SEHInfo = Builder.CreateAlignedLoad(Int8PtrTy, SEHInfo, getPointerAlign());
+ SEHCodeSlotStack.push_back(recoverAddrOfEscapedLocal(
+ ParentCGF, ParentCGF.SEHCodeSlotStack.back(), ParentFP));
+ }
+
+ // Save the exception code in the exception slot to unify exception access in
+ // the filter function and the landing pad.
+ // struct EXCEPTION_POINTERS {
+ // EXCEPTION_RECORD *ExceptionRecord;
+ // CONTEXT *ContextRecord;
+ // };
+ // int exceptioncode = exception_pointers->ExceptionRecord->ExceptionCode;
+ llvm::Type *RecordTy = CGM.Int32Ty->getPointerTo();
+ llvm::Type *PtrsTy = llvm::StructType::get(RecordTy, CGM.VoidPtrTy, nullptr);
+ llvm::Value *Ptrs = Builder.CreateBitCast(SEHInfo, PtrsTy->getPointerTo());
+ llvm::Value *Rec = Builder.CreateStructGEP(PtrsTy, Ptrs, 0);
+ Rec = Builder.CreateAlignedLoad(Rec, getPointerAlign());
+ llvm::Value *Code = Builder.CreateAlignedLoad(Rec, getIntAlign());
+ assert(!SEHCodeSlotStack.empty() && "emitting EH code outside of __except");
+ Builder.CreateStore(Code, SEHCodeSlotStack.back());
+}
+
+llvm::Value *CodeGenFunction::EmitSEHExceptionInfo() {
+ // Sema should diagnose calling this builtin outside of a filter context, but
+ // don't crash if we screw up.
+ if (!SEHInfo)
+ return llvm::UndefValue::get(Int8PtrTy);
+ assert(SEHInfo->getType() == Int8PtrTy);
+ return SEHInfo;
+}
+
+llvm::Value *CodeGenFunction::EmitSEHExceptionCode() {
+ assert(!SEHCodeSlotStack.empty() && "emitting EH code outside of __except");
+ return Builder.CreateLoad(SEHCodeSlotStack.back());
+}
+
+llvm::Value *CodeGenFunction::EmitSEHAbnormalTermination() {
+ // Abnormal termination is just the first parameter to the outlined finally
+ // helper.
+ auto AI = CurFn->arg_begin();
+ return Builder.CreateZExt(&*AI, Int32Ty);
+}
+
+void CodeGenFunction::EnterSEHTryStmt(const SEHTryStmt &S) {
+ CodeGenFunction HelperCGF(CGM, /*suppressNewContext=*/true);
+ if (const SEHFinallyStmt *Finally = S.getFinallyHandler()) {
+ // Outline the finally block.
+ llvm::Function *FinallyFunc =
+ HelperCGF.GenerateSEHFinallyFunction(*this, *Finally);
+
+ // Push a cleanup for __finally blocks.
+ EHStack.pushCleanup<PerformSEHFinally>(NormalAndEHCleanup, FinallyFunc);
+ return;
+ }
+
+ // Otherwise, we must have an __except block.
+ const SEHExceptStmt *Except = S.getExceptHandler();
+ assert(Except);
+ EHCatchScope *CatchScope = EHStack.pushCatch(1);
+ SEHCodeSlotStack.push_back(
+ CreateMemTemp(getContext().IntTy, "__exception_code"));
+
+ // If the filter is known to evaluate to 1, then we can use the clause
+ // "catch i8* null". We can't do this on x86 because the filter has to save
+ // the exception code.
+ llvm::Constant *C =
+ CGM.EmitConstantExpr(Except->getFilterExpr(), getContext().IntTy, this);
+ if (CGM.getTarget().getTriple().getArch() != llvm::Triple::x86 && C &&
+ C->isOneValue()) {
+ CatchScope->setCatchAllHandler(0, createBasicBlock("__except"));
+ return;
+ }
+
+ // In general, we have to emit an outlined filter function. Use the function
+ // in place of the RTTI typeinfo global that C++ EH uses.
+ llvm::Function *FilterFunc =
+ HelperCGF.GenerateSEHFilterFunction(*this, *Except);
+ llvm::Constant *OpaqueFunc =
+ llvm::ConstantExpr::getBitCast(FilterFunc, Int8PtrTy);
+ CatchScope->setHandler(0, OpaqueFunc, createBasicBlock("__except.ret"));
+}
+
+void CodeGenFunction::ExitSEHTryStmt(const SEHTryStmt &S) {
+ // Just pop the cleanup if it's a __finally block.
+ if (S.getFinallyHandler()) {
+ PopCleanupBlock();
+ return;
+ }
+
+ // Otherwise, we must have an __except block.
+ const SEHExceptStmt *Except = S.getExceptHandler();
+ assert(Except && "__try must have __finally xor __except");
+ EHCatchScope &CatchScope = cast<EHCatchScope>(*EHStack.begin());
+
+ // Don't emit the __except block if the __try block lacked invokes.
+ // TODO: Model unwind edges from instructions, either with iload / istore or
+ // a try body function.
+ if (!CatchScope.hasEHBranches()) {
+ CatchScope.clearHandlerBlocks();
+ EHStack.popCatch();
+ SEHCodeSlotStack.pop_back();
+ return;
+ }
+
+ // The fall-through block.
+ llvm::BasicBlock *ContBB = createBasicBlock("__try.cont");
+
+ // We just emitted the body of the __try; jump to the continue block.
+ if (HaveInsertPoint())
+ Builder.CreateBr(ContBB);
+
+ // Check if our filter function returned true.
+ emitCatchDispatchBlock(*this, CatchScope);
+
+ // Grab the block before we pop the handler.
+ llvm::BasicBlock *CatchPadBB = CatchScope.getHandler(0).Block;
+ EHStack.popCatch();
+
+ EmitBlockAfterUses(CatchPadBB);
+
+ // __except blocks don't get outlined into funclets, so immediately do a
+ // catchret.
+ llvm::CatchPadInst *CPI =
+ cast<llvm::CatchPadInst>(CatchPadBB->getFirstNonPHI());
+ llvm::BasicBlock *ExceptBB = createBasicBlock("__except");
+ Builder.CreateCatchRet(CPI, ExceptBB);
+ EmitBlock(ExceptBB);
+
+ // On Win64, the exception code is returned in EAX. Copy it into the slot.
+ if (CGM.getTarget().getTriple().getArch() != llvm::Triple::x86) {
+ llvm::Function *SEHCodeIntrin =
+ CGM.getIntrinsic(llvm::Intrinsic::eh_exceptioncode);
+ llvm::Value *Code = Builder.CreateCall(SEHCodeIntrin, {CPI});
+ Builder.CreateStore(Code, SEHCodeSlotStack.back());
+ }
+
+ // Emit the __except body.
+ EmitStmt(Except->getBlock());
+
+ // End the lifetime of the exception code.
+ SEHCodeSlotStack.pop_back();
+
+ if (HaveInsertPoint())
+ Builder.CreateBr(ContBB);
+
+ EmitBlock(ContBB);
+}
+
+void CodeGenFunction::EmitSEHLeaveStmt(const SEHLeaveStmt &S) {
+ // If this code is reachable then emit a stop point (if generating
+ // debug info). We have to do this ourselves because we are on the
+ // "simple" statement path.
+ if (HaveInsertPoint())
+ EmitStopPoint(&S);
+
+ // This must be a __leave from a __finally block, which we warn on and is UB.
+ // Just emit unreachable.
+ if (!isSEHTryScope()) {
+ Builder.CreateUnreachable();
+ Builder.ClearInsertionPoint();
+ return;
+ }
+
+ EmitBranchThroughCleanup(*SEHTryEpilogueStack.back());
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
new file mode 100644
index 0000000..dabd2b1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
@@ -0,0 +1,4046 @@
+//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Expr nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CGCXXABI.h"
+#include "CGCall.h"
+#include "CGDebugInfo.h"
+#include "CGObjCRuntime.h"
+#include "CGOpenMPRuntime.h"
+#include "CGRecordLayout.h"
+#include "CodeGenModule.h"
+#include "TargetInfo.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/MDBuilder.h"
+#include "llvm/Support/ConvertUTF.h"
+#include "llvm/Support/MathExtras.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+//===--------------------------------------------------------------------===//
+// Miscellaneous Helper Methods
+//===--------------------------------------------------------------------===//
+
+llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
+ unsigned addressSpace =
+ cast<llvm::PointerType>(value->getType())->getAddressSpace();
+
+ llvm::PointerType *destType = Int8PtrTy;
+ if (addressSpace)
+ destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace);
+
+ if (value->getType() == destType) return value;
+ return Builder.CreateBitCast(value, destType);
+}
+
+/// CreateTempAlloca - This creates a alloca and inserts it into the entry
+/// block.
+Address CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align,
+ const Twine &Name) {
+ auto Alloca = CreateTempAlloca(Ty, Name);
+ Alloca->setAlignment(Align.getQuantity());
+ return Address(Alloca, Align);
+}
+
+/// CreateTempAlloca - This creates a alloca and inserts it into the entry
+/// block.
+llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
+ const Twine &Name) {
+ if (!Builder.isNamePreserving())
+ return new llvm::AllocaInst(Ty, nullptr, "", AllocaInsertPt);
+ return new llvm::AllocaInst(Ty, nullptr, Name, AllocaInsertPt);
+}
+
+/// CreateDefaultAlignTempAlloca - This creates an alloca with the
+/// default alignment of the corresponding LLVM type, which is *not*
+/// guaranteed to be related in any way to the expected alignment of
+/// an AST type that might have been lowered to Ty.
+Address CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty,
+ const Twine &Name) {
+ CharUnits Align =
+ CharUnits::fromQuantity(CGM.getDataLayout().getABITypeAlignment(Ty));
+ return CreateTempAlloca(Ty, Align, Name);
+}
+
+void CodeGenFunction::InitTempAlloca(Address Var, llvm::Value *Init) {
+ assert(isa<llvm::AllocaInst>(Var.getPointer()));
+ auto *Store = new llvm::StoreInst(Init, Var.getPointer());
+ Store->setAlignment(Var.getAlignment().getQuantity());
+ llvm::BasicBlock *Block = AllocaInsertPt->getParent();
+ Block->getInstList().insertAfter(AllocaInsertPt->getIterator(), Store);
+}
+
+Address CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) {
+ CharUnits Align = getContext().getTypeAlignInChars(Ty);
+ return CreateTempAlloca(ConvertType(Ty), Align, Name);
+}
+
+Address CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name) {
+ // FIXME: Should we prefer the preferred type alignment here?
+ return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name);
+}
+
+Address CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align,
+ const Twine &Name) {
+ return CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name);
+}
+
+/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
+/// expression and compare the result against zero, returning an Int1Ty value.
+llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
+ PGO.setCurrentStmt(E);
+ if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
+ llvm::Value *MemPtr = EmitScalarExpr(E);
+ return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
+ }
+
+ QualType BoolTy = getContext().BoolTy;
+ SourceLocation Loc = E->getExprLoc();
+ if (!E->getType()->isAnyComplexType())
+ return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc);
+
+ return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(), BoolTy,
+ Loc);
+}
+
+/// EmitIgnoredExpr - Emit code to compute the specified expression,
+/// ignoring the result.
+void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
+ if (E->isRValue())
+ return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true);
+
+ // Just emit it as an l-value and drop the result.
+ EmitLValue(E);
+}
+
+/// EmitAnyExpr - Emit code to compute the specified expression which
+/// can have any type. The result is returned as an RValue struct.
+/// If this is an aggregate expression, AggSlot indicates where the
+/// result should be returned.
+RValue CodeGenFunction::EmitAnyExpr(const Expr *E,
+ AggValueSlot aggSlot,
+ bool ignoreResult) {
+ switch (getEvaluationKind(E->getType())) {
+ case TEK_Scalar:
+ return RValue::get(EmitScalarExpr(E, ignoreResult));
+ case TEK_Complex:
+ return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
+ case TEK_Aggregate:
+ if (!ignoreResult && aggSlot.isIgnored())
+ aggSlot = CreateAggTemp(E->getType(), "agg-temp");
+ EmitAggExpr(E, aggSlot);
+ return aggSlot.asRValue();
+ }
+ llvm_unreachable("bad evaluation kind");
+}
+
+/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
+/// always be accessible even if no aggregate location is provided.
+RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
+ AggValueSlot AggSlot = AggValueSlot::ignored();
+
+ if (hasAggregateEvaluationKind(E->getType()))
+ AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
+ return EmitAnyExpr(E, AggSlot);
+}
+
+/// EmitAnyExprToMem - Evaluate an expression into a given memory
+/// location.
+void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
+ Address Location,
+ Qualifiers Quals,
+ bool IsInit) {
+ // FIXME: This function should take an LValue as an argument.
+ switch (getEvaluationKind(E->getType())) {
+ case TEK_Complex:
+ EmitComplexExprIntoLValue(E, MakeAddrLValue(Location, E->getType()),
+ /*isInit*/ false);
+ return;
+
+ case TEK_Aggregate: {
+ EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
+ AggValueSlot::IsDestructed_t(IsInit),
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsAliased_t(!IsInit)));
+ return;
+ }
+
+ case TEK_Scalar: {
+ RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
+ LValue LV = MakeAddrLValue(Location, E->getType());
+ EmitStoreThroughLValue(RV, LV);
+ return;
+ }
+ }
+ llvm_unreachable("bad evaluation kind");
+}
+
+static void
+pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
+ const Expr *E, Address ReferenceTemporary) {
+ // Objective-C++ ARC:
+ // If we are binding a reference to a temporary that has ownership, we
+ // need to perform retain/release operations on the temporary.
+ //
+ // FIXME: This should be looking at E, not M.
+ if (auto Lifetime = M->getType().getObjCLifetime()) {
+ switch (Lifetime) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ // Carry on to normal cleanup handling.
+ break;
+
+ case Qualifiers::OCL_Autoreleasing:
+ // Nothing to do; cleaned up by an autorelease pool.
+ return;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ switch (StorageDuration Duration = M->getStorageDuration()) {
+ case SD_Static:
+ // Note: we intentionally do not register a cleanup to release
+ // the object on program termination.
+ return;
+
+ case SD_Thread:
+ // FIXME: We should probably register a cleanup in this case.
+ return;
+
+ case SD_Automatic:
+ case SD_FullExpression:
+ CodeGenFunction::Destroyer *Destroy;
+ CleanupKind CleanupKind;
+ if (Lifetime == Qualifiers::OCL_Strong) {
+ const ValueDecl *VD = M->getExtendingDecl();
+ bool Precise =
+ VD && isa<VarDecl>(VD) && VD->hasAttr<ObjCPreciseLifetimeAttr>();
+ CleanupKind = CGF.getARCCleanupKind();
+ Destroy = Precise ? &CodeGenFunction::destroyARCStrongPrecise
+ : &CodeGenFunction::destroyARCStrongImprecise;
+ } else {
+ // __weak objects always get EH cleanups; otherwise, exceptions
+ // could cause really nasty crashes instead of mere leaks.
+ CleanupKind = NormalAndEHCleanup;
+ Destroy = &CodeGenFunction::destroyARCWeak;
+ }
+ if (Duration == SD_FullExpression)
+ CGF.pushDestroy(CleanupKind, ReferenceTemporary,
+ M->getType(), *Destroy,
+ CleanupKind & EHCleanup);
+ else
+ CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary,
+ M->getType(),
+ *Destroy, CleanupKind & EHCleanup);
+ return;
+
+ case SD_Dynamic:
+ llvm_unreachable("temporary cannot have dynamic storage duration");
+ }
+ llvm_unreachable("unknown storage duration");
+ }
+ }
+
+ CXXDestructorDecl *ReferenceTemporaryDtor = nullptr;
+ if (const RecordType *RT =
+ E->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) {
+ // Get the destructor for the reference temporary.
+ auto *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+ if (!ClassDecl->hasTrivialDestructor())
+ ReferenceTemporaryDtor = ClassDecl->getDestructor();
+ }
+
+ if (!ReferenceTemporaryDtor)
+ return;
+
+ // Call the destructor for the temporary.
+ switch (M->getStorageDuration()) {
+ case SD_Static:
+ case SD_Thread: {
+ llvm::Constant *CleanupFn;
+ llvm::Constant *CleanupArg;
+ if (E->getType()->isArrayType()) {
+ CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper(
+ ReferenceTemporary, E->getType(),
+ CodeGenFunction::destroyCXXObject, CGF.getLangOpts().Exceptions,
+ dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
+ CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
+ } else {
+ CleanupFn = CGF.CGM.getAddrOfCXXStructor(ReferenceTemporaryDtor,
+ StructorType::Complete);
+ CleanupArg = cast<llvm::Constant>(ReferenceTemporary.getPointer());
+ }
+ CGF.CGM.getCXXABI().registerGlobalDtor(
+ CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
+ break;
+ }
+
+ case SD_FullExpression:
+ CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(),
+ CodeGenFunction::destroyCXXObject,
+ CGF.getLangOpts().Exceptions);
+ break;
+
+ case SD_Automatic:
+ CGF.pushLifetimeExtendedDestroy(NormalAndEHCleanup,
+ ReferenceTemporary, E->getType(),
+ CodeGenFunction::destroyCXXObject,
+ CGF.getLangOpts().Exceptions);
+ break;
+
+ case SD_Dynamic:
+ llvm_unreachable("temporary cannot have dynamic storage duration");
+ }
+}
+
+static Address
+createReferenceTemporary(CodeGenFunction &CGF,
+ const MaterializeTemporaryExpr *M, const Expr *Inner) {
+ switch (M->getStorageDuration()) {
+ case SD_FullExpression:
+ case SD_Automatic: {
+ // If we have a constant temporary array or record try to promote it into a
+ // constant global under the same rules a normal constant would've been
+ // promoted. This is easier on the optimizer and generally emits fewer
+ // instructions.
+ QualType Ty = Inner->getType();
+ if (CGF.CGM.getCodeGenOpts().MergeAllConstants &&
+ (Ty->isArrayType() || Ty->isRecordType()) &&
+ CGF.CGM.isTypeConstant(Ty, true))
+ if (llvm::Constant *Init = CGF.CGM.EmitConstantExpr(Inner, Ty, &CGF)) {
+ auto *GV = new llvm::GlobalVariable(
+ CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
+ llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp");
+ CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty);
+ GV->setAlignment(alignment.getQuantity());
+ // FIXME: Should we put the new global into a COMDAT?
+ return Address(GV, alignment);
+ }
+ return CGF.CreateMemTemp(Ty, "ref.tmp");
+ }
+ case SD_Thread:
+ case SD_Static:
+ return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner);
+
+ case SD_Dynamic:
+ llvm_unreachable("temporary can't have dynamic storage duration");
+ }
+ llvm_unreachable("unknown storage duration");
+}
+
+LValue CodeGenFunction::
+EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
+ const Expr *E = M->GetTemporaryExpr();
+
+ // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
+ // as that will cause the lifetime adjustment to be lost for ARC
+ auto ownership = M->getType().getObjCLifetime();
+ if (ownership != Qualifiers::OCL_None &&
+ ownership != Qualifiers::OCL_ExplicitNone) {
+ Address Object = createReferenceTemporary(*this, M, E);
+ if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
+ Object = Address(llvm::ConstantExpr::getBitCast(Var,
+ ConvertTypeForMem(E->getType())
+ ->getPointerTo(Object.getAddressSpace())),
+ Object.getAlignment());
+ // We should not have emitted the initializer for this temporary as a
+ // constant.
+ assert(!Var->hasInitializer());
+ Var->setInitializer(CGM.EmitNullConstant(E->getType()));
+ }
+ LValue RefTempDst = MakeAddrLValue(Object, M->getType(),
+ AlignmentSource::Decl);
+
+ switch (getEvaluationKind(E->getType())) {
+ default: llvm_unreachable("expected scalar or aggregate expression");
+ case TEK_Scalar:
+ EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
+ break;
+ case TEK_Aggregate: {
+ EmitAggExpr(E, AggValueSlot::forAddr(Object,
+ E->getType().getQualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
+ break;
+ }
+ }
+
+ pushTemporaryCleanup(*this, M, E, Object);
+ return RefTempDst;
+ }
+
+ SmallVector<const Expr *, 2> CommaLHSs;
+ SmallVector<SubobjectAdjustment, 2> Adjustments;
+ E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
+
+ for (const auto &Ignored : CommaLHSs)
+ EmitIgnoredExpr(Ignored);
+
+ if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) {
+ if (opaque->getType()->isRecordType()) {
+ assert(Adjustments.empty());
+ return EmitOpaqueValueLValue(opaque);
+ }
+ }
+
+ // Create and initialize the reference temporary.
+ Address Object = createReferenceTemporary(*this, M, E);
+ if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
+ Object = Address(llvm::ConstantExpr::getBitCast(
+ Var, ConvertTypeForMem(E->getType())->getPointerTo()),
+ Object.getAlignment());
+ // If the temporary is a global and has a constant initializer or is a
+ // constant temporary that we promoted to a global, we may have already
+ // initialized it.
+ if (!Var->hasInitializer()) {
+ Var->setInitializer(CGM.EmitNullConstant(E->getType()));
+ EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
+ }
+ } else {
+ EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
+ }
+ pushTemporaryCleanup(*this, M, E, Object);
+
+ // Perform derived-to-base casts and/or field accesses, to get from the
+ // temporary object we created (and, potentially, for which we extended
+ // the lifetime) to the subobject we're binding the reference to.
+ for (unsigned I = Adjustments.size(); I != 0; --I) {
+ SubobjectAdjustment &Adjustment = Adjustments[I-1];
+ switch (Adjustment.Kind) {
+ case SubobjectAdjustment::DerivedToBaseAdjustment:
+ Object =
+ GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass,
+ Adjustment.DerivedToBase.BasePath->path_begin(),
+ Adjustment.DerivedToBase.BasePath->path_end(),
+ /*NullCheckValue=*/ false, E->getExprLoc());
+ break;
+
+ case SubobjectAdjustment::FieldAdjustment: {
+ LValue LV = MakeAddrLValue(Object, E->getType(),
+ AlignmentSource::Decl);
+ LV = EmitLValueForField(LV, Adjustment.Field);
+ assert(LV.isSimple() &&
+ "materialized temporary field is not a simple lvalue");
+ Object = LV.getAddress();
+ break;
+ }
+
+ case SubobjectAdjustment::MemberPointerAdjustment: {
+ llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);
+ Object = EmitCXXMemberDataPointerAddress(E, Object, Ptr,
+ Adjustment.Ptr.MPT);
+ break;
+ }
+ }
+ }
+
+ return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
+}
+
+RValue
+CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E) {
+ // Emit the expression as an lvalue.
+ LValue LV = EmitLValue(E);
+ assert(LV.isSimple());
+ llvm::Value *Value = LV.getPointer();
+
+ if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()) {
+ // C++11 [dcl.ref]p5 (as amended by core issue 453):
+ // If a glvalue to which a reference is directly bound designates neither
+ // an existing object or function of an appropriate type nor a region of
+ // storage of suitable size and alignment to contain an object of the
+ // reference's type, the behavior is undefined.
+ QualType Ty = E->getType();
+ EmitTypeCheck(TCK_ReferenceBinding, E->getExprLoc(), Value, Ty);
+ }
+
+ return RValue::get(Value);
+}
+
+
+/// getAccessedFieldNo - Given an encoded value and a result number, return the
+/// input field number being accessed.
+unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
+ const llvm::Constant *Elts) {
+ return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
+ ->getZExtValue();
+}
+
+/// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h.
+static llvm::Value *emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low,
+ llvm::Value *High) {
+ llvm::Value *KMul = Builder.getInt64(0x9ddfea08eb382d69ULL);
+ llvm::Value *K47 = Builder.getInt64(47);
+ llvm::Value *A0 = Builder.CreateMul(Builder.CreateXor(Low, High), KMul);
+ llvm::Value *A1 = Builder.CreateXor(Builder.CreateLShr(A0, K47), A0);
+ llvm::Value *B0 = Builder.CreateMul(Builder.CreateXor(High, A1), KMul);
+ llvm::Value *B1 = Builder.CreateXor(Builder.CreateLShr(B0, K47), B0);
+ return Builder.CreateMul(B1, KMul);
+}
+
+bool CodeGenFunction::sanitizePerformTypeCheck() const {
+ return SanOpts.has(SanitizerKind::Null) |
+ SanOpts.has(SanitizerKind::Alignment) |
+ SanOpts.has(SanitizerKind::ObjectSize) |
+ SanOpts.has(SanitizerKind::Vptr);
+}
+
+void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
+ llvm::Value *Ptr, QualType Ty,
+ CharUnits Alignment, bool SkipNullCheck) {
+ if (!sanitizePerformTypeCheck())
+ return;
+
+ // Don't check pointers outside the default address space. The null check
+ // isn't correct, the object-size check isn't supported by LLVM, and we can't
+ // communicate the addresses to the runtime handler for the vptr check.
+ if (Ptr->getType()->getPointerAddressSpace())
+ return;
+
+ SanitizerScope SanScope(this);
+
+ SmallVector<std::pair<llvm::Value *, SanitizerMask>, 3> Checks;
+ llvm::BasicBlock *Done = nullptr;
+
+ bool AllowNullPointers = TCK == TCK_DowncastPointer || TCK == TCK_Upcast ||
+ TCK == TCK_UpcastToVirtualBase;
+ if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
+ !SkipNullCheck) {
+ // The glvalue must not be an empty glvalue.
+ llvm::Value *IsNonNull = Builder.CreateIsNotNull(Ptr);
+
+ if (AllowNullPointers) {
+ // When performing pointer casts, it's OK if the value is null.
+ // Skip the remaining checks in that case.
+ Done = createBasicBlock("null");
+ llvm::BasicBlock *Rest = createBasicBlock("not.null");
+ Builder.CreateCondBr(IsNonNull, Rest, Done);
+ EmitBlock(Rest);
+ } else {
+ Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::Null));
+ }
+ }
+
+ if (SanOpts.has(SanitizerKind::ObjectSize) && !Ty->isIncompleteType()) {
+ uint64_t Size = getContext().getTypeSizeInChars(Ty).getQuantity();
+
+ // The glvalue must refer to a large enough storage region.
+ // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
+ // to check this.
+ // FIXME: Get object address space
+ llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy };
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
+ llvm::Value *Min = Builder.getFalse();
+ llvm::Value *CastAddr = Builder.CreateBitCast(Ptr, Int8PtrTy);
+ llvm::Value *LargeEnough =
+ Builder.CreateICmpUGE(Builder.CreateCall(F, {CastAddr, Min}),
+ llvm::ConstantInt::get(IntPtrTy, Size));
+ Checks.push_back(std::make_pair(LargeEnough, SanitizerKind::ObjectSize));
+ }
+
+ uint64_t AlignVal = 0;
+
+ if (SanOpts.has(SanitizerKind::Alignment)) {
+ AlignVal = Alignment.getQuantity();
+ if (!Ty->isIncompleteType() && !AlignVal)
+ AlignVal = getContext().getTypeAlignInChars(Ty).getQuantity();
+
+ // The glvalue must be suitably aligned.
+ if (AlignVal) {
+ llvm::Value *Align =
+ Builder.CreateAnd(Builder.CreatePtrToInt(Ptr, IntPtrTy),
+ llvm::ConstantInt::get(IntPtrTy, AlignVal - 1));
+ llvm::Value *Aligned =
+ Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
+ Checks.push_back(std::make_pair(Aligned, SanitizerKind::Alignment));
+ }
+ }
+
+ if (Checks.size() > 0) {
+ llvm::Constant *StaticData[] = {
+ EmitCheckSourceLocation(Loc),
+ EmitCheckTypeDescriptor(Ty),
+ llvm::ConstantInt::get(SizeTy, AlignVal),
+ llvm::ConstantInt::get(Int8Ty, TCK)
+ };
+ EmitCheck(Checks, "type_mismatch", StaticData, Ptr);
+ }
+
+ // If possible, check that the vptr indicates that there is a subobject of
+ // type Ty at offset zero within this object.
+ //
+ // C++11 [basic.life]p5,6:
+ // [For storage which does not refer to an object within its lifetime]
+ // The program has undefined behavior if:
+ // -- the [pointer or glvalue] is used to access a non-static data member
+ // or call a non-static member function
+ CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
+ if (SanOpts.has(SanitizerKind::Vptr) &&
+ (TCK == TCK_MemberAccess || TCK == TCK_MemberCall ||
+ TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference ||
+ TCK == TCK_UpcastToVirtualBase) &&
+ RD && RD->hasDefinition() && RD->isDynamicClass()) {
+ // Compute a hash of the mangled name of the type.
+ //
+ // FIXME: This is not guaranteed to be deterministic! Move to a
+ // fingerprinting mechanism once LLVM provides one. For the time
+ // being the implementation happens to be deterministic.
+ SmallString<64> MangledName;
+ llvm::raw_svector_ostream Out(MangledName);
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(),
+ Out);
+
+ // Blacklist based on the mangled type.
+ if (!CGM.getContext().getSanitizerBlacklist().isBlacklistedType(
+ Out.str())) {
+ llvm::hash_code TypeHash = hash_value(Out.str());
+
+ // Load the vptr, and compute hash_16_bytes(TypeHash, vptr).
+ llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash);
+ llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0);
+ Address VPtrAddr(Builder.CreateBitCast(Ptr, VPtrTy), getPointerAlign());
+ llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr);
+ llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty);
+
+ llvm::Value *Hash = emitHash16Bytes(Builder, Low, High);
+ Hash = Builder.CreateTrunc(Hash, IntPtrTy);
+
+ // Look the hash up in our cache.
+ const int CacheSize = 128;
+ llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
+ llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable,
+ "__ubsan_vptr_type_cache");
+ llvm::Value *Slot = Builder.CreateAnd(Hash,
+ llvm::ConstantInt::get(IntPtrTy,
+ CacheSize-1));
+ llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
+ llvm::Value *CacheVal =
+ Builder.CreateAlignedLoad(Builder.CreateInBoundsGEP(Cache, Indices),
+ getPointerAlign());
+
+ // If the hash isn't in the cache, call a runtime handler to perform the
+ // hard work of checking whether the vptr is for an object of the right
+ // type. This will either fill in the cache and return, or produce a
+ // diagnostic.
+ llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash);
+ llvm::Constant *StaticData[] = {
+ EmitCheckSourceLocation(Loc),
+ EmitCheckTypeDescriptor(Ty),
+ CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()),
+ llvm::ConstantInt::get(Int8Ty, TCK)
+ };
+ llvm::Value *DynamicData[] = { Ptr, Hash };
+ EmitCheck(std::make_pair(EqualHash, SanitizerKind::Vptr),
+ "dynamic_type_cache_miss", StaticData, DynamicData);
+ }
+ }
+
+ if (Done) {
+ Builder.CreateBr(Done);
+ EmitBlock(Done);
+ }
+}
+
+/// Determine whether this expression refers to a flexible array member in a
+/// struct. We disable array bounds checks for such members.
+static bool isFlexibleArrayMemberExpr(const Expr *E) {
+ // For compatibility with existing code, we treat arrays of length 0 or
+ // 1 as flexible array members.
+ const ArrayType *AT = E->getType()->castAsArrayTypeUnsafe();
+ if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) {
+ if (CAT->getSize().ugt(1))
+ return false;
+ } else if (!isa<IncompleteArrayType>(AT))
+ return false;
+
+ E = E->IgnoreParens();
+
+ // A flexible array member must be the last member in the class.
+ if (const auto *ME = dyn_cast<MemberExpr>(E)) {
+ // FIXME: If the base type of the member expr is not FD->getParent(),
+ // this should not be treated as a flexible array member access.
+ if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
+ RecordDecl::field_iterator FI(
+ DeclContext::decl_iterator(const_cast<FieldDecl *>(FD)));
+ return ++FI == FD->getParent()->field_end();
+ }
+ }
+
+ return false;
+}
+
+/// If Base is known to point to the start of an array, return the length of
+/// that array. Return 0 if the length cannot be determined.
+static llvm::Value *getArrayIndexingBound(
+ CodeGenFunction &CGF, const Expr *Base, QualType &IndexedType) {
+ // For the vector indexing extension, the bound is the number of elements.
+ if (const VectorType *VT = Base->getType()->getAs<VectorType>()) {
+ IndexedType = Base->getType();
+ return CGF.Builder.getInt32(VT->getNumElements());
+ }
+
+ Base = Base->IgnoreParens();
+
+ if (const auto *CE = dyn_cast<CastExpr>(Base)) {
+ if (CE->getCastKind() == CK_ArrayToPointerDecay &&
+ !isFlexibleArrayMemberExpr(CE->getSubExpr())) {
+ IndexedType = CE->getSubExpr()->getType();
+ const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe();
+ if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
+ return CGF.Builder.getInt(CAT->getSize());
+ else if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
+ return CGF.getVLASize(VAT).first;
+ }
+ }
+
+ return nullptr;
+}
+
+void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
+ llvm::Value *Index, QualType IndexType,
+ bool Accessed) {
+ assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
+ "should not be called unless adding bounds checks");
+ SanitizerScope SanScope(this);
+
+ QualType IndexedType;
+ llvm::Value *Bound = getArrayIndexingBound(*this, Base, IndexedType);
+ if (!Bound)
+ return;
+
+ bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();
+ llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned);
+ llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false);
+
+ llvm::Constant *StaticData[] = {
+ EmitCheckSourceLocation(E->getExprLoc()),
+ EmitCheckTypeDescriptor(IndexedType),
+ EmitCheckTypeDescriptor(IndexType)
+ };
+ llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal)
+ : Builder.CreateICmpULE(IndexVal, BoundVal);
+ EmitCheck(std::make_pair(Check, SanitizerKind::ArrayBounds), "out_of_bounds",
+ StaticData, Index);
+}
+
+
+CodeGenFunction::ComplexPairTy CodeGenFunction::
+EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre) {
+ ComplexPairTy InVal = EmitLoadOfComplex(LV, E->getExprLoc());
+
+ llvm::Value *NextVal;
+ if (isa<llvm::IntegerType>(InVal.first->getType())) {
+ uint64_t AmountVal = isInc ? 1 : -1;
+ NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
+
+ // Add the inc/dec to the real part.
+ NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
+ } else {
+ QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType();
+ llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
+ if (!isInc)
+ FVal.changeSign();
+ NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
+
+ // Add the inc/dec to the real part.
+ NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
+ }
+
+ ComplexPairTy IncVal(NextVal, InVal.second);
+
+ // Store the updated result through the lvalue.
+ EmitStoreOfComplex(IncVal, LV, /*init*/ false);
+
+ // If this is a postinc, return the value read from memory, otherwise use the
+ // updated value.
+ return isPre ? IncVal : InVal;
+}
+
+void CodeGenModule::EmitExplicitCastExprType(const ExplicitCastExpr *E,
+ CodeGenFunction *CGF) {
+ // Bind VLAs in the cast type.
+ if (CGF && E->getType()->isVariablyModifiedType())
+ CGF->EmitVariablyModifiedType(E->getType());
+
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ DI->EmitExplicitCastType(E->getType());
+}
+
+//===----------------------------------------------------------------------===//
+// LValue Expression Emission
+//===----------------------------------------------------------------------===//
+
+/// EmitPointerWithAlignment - Given an expression of pointer type, try to
+/// derive a more accurate bound on the alignment of the pointer.
+Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
+ AlignmentSource *Source) {
+ // We allow this with ObjC object pointers because of fragile ABIs.
+ assert(E->getType()->isPointerType() ||
+ E->getType()->isObjCObjectPointerType());
+ E = E->IgnoreParens();
+
+ // Casts:
+ if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
+ if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE))
+ CGM.EmitExplicitCastExprType(ECE, this);
+
+ switch (CE->getCastKind()) {
+ // Non-converting casts (but not C's implicit conversion from void*).
+ case CK_BitCast:
+ case CK_NoOp:
+ if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) {
+ if (PtrTy->getPointeeType()->isVoidType())
+ break;
+
+ AlignmentSource InnerSource;
+ Address Addr = EmitPointerWithAlignment(CE->getSubExpr(), &InnerSource);
+ if (Source) *Source = InnerSource;
+
+ // If this is an explicit bitcast, and the source l-value is
+ // opaque, honor the alignment of the casted-to type.
+ if (isa<ExplicitCastExpr>(CE) &&
+ InnerSource != AlignmentSource::Decl) {
+ Addr = Address(Addr.getPointer(),
+ getNaturalPointeeTypeAlignment(E->getType(), Source));
+ }
+
+ if (SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
+ if (auto PT = E->getType()->getAs<PointerType>())
+ EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr.getPointer(),
+ /*MayBeNull=*/true,
+ CodeGenFunction::CFITCK_UnrelatedCast,
+ CE->getLocStart());
+ }
+
+ return Builder.CreateBitCast(Addr, ConvertType(E->getType()));
+ }
+ break;
+
+ // Array-to-pointer decay.
+ case CK_ArrayToPointerDecay:
+ return EmitArrayToPointerDecay(CE->getSubExpr(), Source);
+
+ // Derived-to-base conversions.
+ case CK_UncheckedDerivedToBase:
+ case CK_DerivedToBase: {
+ Address Addr = EmitPointerWithAlignment(CE->getSubExpr(), Source);
+ auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
+ return GetAddressOfBaseClass(Addr, Derived,
+ CE->path_begin(), CE->path_end(),
+ ShouldNullCheckClassCastValue(CE),
+ CE->getExprLoc());
+ }
+
+ // TODO: Is there any reason to treat base-to-derived conversions
+ // specially?
+ default:
+ break;
+ }
+ }
+
+ // Unary &.
+ if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
+ if (UO->getOpcode() == UO_AddrOf) {
+ LValue LV = EmitLValue(UO->getSubExpr());
+ if (Source) *Source = LV.getAlignmentSource();
+ return LV.getAddress();
+ }
+ }
+
+ // TODO: conditional operators, comma.
+
+ // Otherwise, use the alignment of the type.
+ CharUnits Align = getNaturalPointeeTypeAlignment(E->getType(), Source);
+ return Address(EmitScalarExpr(E), Align);
+}
+
+RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
+ if (Ty->isVoidType())
+ return RValue::get(nullptr);
+
+ switch (getEvaluationKind(Ty)) {
+ case TEK_Complex: {
+ llvm::Type *EltTy =
+ ConvertType(Ty->castAs<ComplexType>()->getElementType());
+ llvm::Value *U = llvm::UndefValue::get(EltTy);
+ return RValue::getComplex(std::make_pair(U, U));
+ }
+
+ // If this is a use of an undefined aggregate type, the aggregate must have an
+ // identifiable address. Just because the contents of the value are undefined
+ // doesn't mean that the address can't be taken and compared.
+ case TEK_Aggregate: {
+ Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
+ return RValue::getAggregate(DestPtr);
+ }
+
+ case TEK_Scalar:
+ return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
+ }
+ llvm_unreachable("bad evaluation kind");
+}
+
+RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
+ const char *Name) {
+ ErrorUnsupported(E, Name);
+ return GetUndefRValue(E->getType());
+}
+
+LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
+ const char *Name) {
+ ErrorUnsupported(E, Name);
+ llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
+ return MakeAddrLValue(Address(llvm::UndefValue::get(Ty), CharUnits::One()),
+ E->getType());
+}
+
+LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
+ LValue LV;
+ if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E))
+ LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);
+ else
+ LV = EmitLValue(E);
+ if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple())
+ EmitTypeCheck(TCK, E->getExprLoc(), LV.getPointer(),
+ E->getType(), LV.getAlignment());
+ return LV;
+}
+
+/// EmitLValue - Emit code to compute a designator that specifies the location
+/// of the expression.
+///
+/// This can return one of two things: a simple address or a bitfield reference.
+/// In either case, the LLVM Value* in the LValue structure is guaranteed to be
+/// an LLVM pointer type.
+///
+/// If this returns a bitfield reference, nothing about the pointee type of the
+/// LLVM value is known: For example, it may not be a pointer to an integer.
+///
+/// If this returns a normal address, and if the lvalue's C type is fixed size,
+/// this method guarantees that the returned pointer type will point to an LLVM
+/// type of the same size of the lvalue's type. If the lvalue has a variable
+/// length type, this is not possible.
+///
+LValue CodeGenFunction::EmitLValue(const Expr *E) {
+ ApplyDebugLocation DL(*this, E);
+ switch (E->getStmtClass()) {
+ default: return EmitUnsupportedLValue(E, "l-value expression");
+
+ case Expr::ObjCPropertyRefExprClass:
+ llvm_unreachable("cannot emit a property reference directly");
+
+ case Expr::ObjCSelectorExprClass:
+ return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
+ case Expr::ObjCIsaExprClass:
+ return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
+ case Expr::BinaryOperatorClass:
+ return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
+ case Expr::CompoundAssignOperatorClass: {
+ QualType Ty = E->getType();
+ if (const AtomicType *AT = Ty->getAs<AtomicType>())
+ Ty = AT->getValueType();
+ if (!Ty->isAnyComplexType())
+ return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
+ return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
+ }
+ case Expr::CallExprClass:
+ case Expr::CXXMemberCallExprClass:
+ case Expr::CXXOperatorCallExprClass:
+ case Expr::UserDefinedLiteralClass:
+ return EmitCallExprLValue(cast<CallExpr>(E));
+ case Expr::VAArgExprClass:
+ return EmitVAArgExprLValue(cast<VAArgExpr>(E));
+ case Expr::DeclRefExprClass:
+ return EmitDeclRefLValue(cast<DeclRefExpr>(E));
+ case Expr::ParenExprClass:
+ return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
+ case Expr::GenericSelectionExprClass:
+ return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr());
+ case Expr::PredefinedExprClass:
+ return EmitPredefinedLValue(cast<PredefinedExpr>(E));
+ case Expr::StringLiteralClass:
+ return EmitStringLiteralLValue(cast<StringLiteral>(E));
+ case Expr::ObjCEncodeExprClass:
+ return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
+ case Expr::PseudoObjectExprClass:
+ return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
+ case Expr::InitListExprClass:
+ return EmitInitListLValue(cast<InitListExpr>(E));
+ case Expr::CXXTemporaryObjectExprClass:
+ case Expr::CXXConstructExprClass:
+ return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
+ case Expr::CXXBindTemporaryExprClass:
+ return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
+ case Expr::CXXUuidofExprClass:
+ return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E));
+ case Expr::LambdaExprClass:
+ return EmitLambdaLValue(cast<LambdaExpr>(E));
+
+ case Expr::ExprWithCleanupsClass: {
+ const auto *cleanups = cast<ExprWithCleanups>(E);
+ enterFullExpression(cleanups);
+ RunCleanupsScope Scope(*this);
+ return EmitLValue(cleanups->getSubExpr());
+ }
+
+ case Expr::CXXDefaultArgExprClass:
+ return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr());
+ case Expr::CXXDefaultInitExprClass: {
+ CXXDefaultInitExprScope Scope(*this);
+ return EmitLValue(cast<CXXDefaultInitExpr>(E)->getExpr());
+ }
+ case Expr::CXXTypeidExprClass:
+ return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
+
+ case Expr::ObjCMessageExprClass:
+ return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
+ case Expr::ObjCIvarRefExprClass:
+ return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
+ case Expr::StmtExprClass:
+ return EmitStmtExprLValue(cast<StmtExpr>(E));
+ case Expr::UnaryOperatorClass:
+ return EmitUnaryOpLValue(cast<UnaryOperator>(E));
+ case Expr::ArraySubscriptExprClass:
+ return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
+ case Expr::OMPArraySectionExprClass:
+ return EmitOMPArraySectionExpr(cast<OMPArraySectionExpr>(E));
+ case Expr::ExtVectorElementExprClass:
+ return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
+ case Expr::MemberExprClass:
+ return EmitMemberExpr(cast<MemberExpr>(E));
+ case Expr::CompoundLiteralExprClass:
+ return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
+ case Expr::ConditionalOperatorClass:
+ return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
+ case Expr::BinaryConditionalOperatorClass:
+ return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
+ case Expr::ChooseExprClass:
+ return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr());
+ case Expr::OpaqueValueExprClass:
+ return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
+ case Expr::SubstNonTypeTemplateParmExprClass:
+ return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
+ case Expr::ImplicitCastExprClass:
+ case Expr::CStyleCastExprClass:
+ case Expr::CXXFunctionalCastExprClass:
+ case Expr::CXXStaticCastExprClass:
+ case Expr::CXXDynamicCastExprClass:
+ case Expr::CXXReinterpretCastExprClass:
+ case Expr::CXXConstCastExprClass:
+ case Expr::ObjCBridgedCastExprClass:
+ return EmitCastLValue(cast<CastExpr>(E));
+
+ case Expr::MaterializeTemporaryExprClass:
+ return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
+ }
+}
+
+/// Given an object of the given canonical type, can we safely copy a
+/// value out of it based on its initializer?
+static bool isConstantEmittableObjectType(QualType type) {
+ assert(type.isCanonical());
+ assert(!type->isReferenceType());
+
+ // Must be const-qualified but non-volatile.
+ Qualifiers qs = type.getLocalQualifiers();
+ if (!qs.hasConst() || qs.hasVolatile()) return false;
+
+ // Otherwise, all object types satisfy this except C++ classes with
+ // mutable subobjects or non-trivial copy/destroy behavior.
+ if (const auto *RT = dyn_cast<RecordType>(type))
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ if (RD->hasMutableFields() || !RD->isTrivial())
+ return false;
+
+ return true;
+}
+
+/// Can we constant-emit a load of a reference to a variable of the
+/// given type? This is different from predicates like
+/// Decl::isUsableInConstantExpressions because we do want it to apply
+/// in situations that don't necessarily satisfy the language's rules
+/// for this (e.g. C++'s ODR-use rules). For example, we want to able
+/// to do this with const float variables even if those variables
+/// aren't marked 'constexpr'.
+enum ConstantEmissionKind {
+ CEK_None,
+ CEK_AsReferenceOnly,
+ CEK_AsValueOrReference,
+ CEK_AsValueOnly
+};
+static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) {
+ type = type.getCanonicalType();
+ if (const auto *ref = dyn_cast<ReferenceType>(type)) {
+ if (isConstantEmittableObjectType(ref->getPointeeType()))
+ return CEK_AsValueOrReference;
+ return CEK_AsReferenceOnly;
+ }
+ if (isConstantEmittableObjectType(type))
+ return CEK_AsValueOnly;
+ return CEK_None;
+}
+
+/// Try to emit a reference to the given value without producing it as
+/// an l-value. This is actually more than an optimization: we can't
+/// produce an l-value for variables that we never actually captured
+/// in a block or lambda, which means const int variables or constexpr
+/// literals or similar.
+CodeGenFunction::ConstantEmission
+CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
+ ValueDecl *value = refExpr->getDecl();
+
+ // The value needs to be an enum constant or a constant variable.
+ ConstantEmissionKind CEK;
+ if (isa<ParmVarDecl>(value)) {
+ CEK = CEK_None;
+ } else if (auto *var = dyn_cast<VarDecl>(value)) {
+ CEK = checkVarTypeForConstantEmission(var->getType());
+ } else if (isa<EnumConstantDecl>(value)) {
+ CEK = CEK_AsValueOnly;
+ } else {
+ CEK = CEK_None;
+ }
+ if (CEK == CEK_None) return ConstantEmission();
+
+ Expr::EvalResult result;
+ bool resultIsReference;
+ QualType resultType;
+
+ // It's best to evaluate all the way as an r-value if that's permitted.
+ if (CEK != CEK_AsReferenceOnly &&
+ refExpr->EvaluateAsRValue(result, getContext())) {
+ resultIsReference = false;
+ resultType = refExpr->getType();
+
+ // Otherwise, try to evaluate as an l-value.
+ } else if (CEK != CEK_AsValueOnly &&
+ refExpr->EvaluateAsLValue(result, getContext())) {
+ resultIsReference = true;
+ resultType = value->getType();
+
+ // Failure.
+ } else {
+ return ConstantEmission();
+ }
+
+ // In any case, if the initializer has side-effects, abandon ship.
+ if (result.HasSideEffects)
+ return ConstantEmission();
+
+ // Emit as a constant.
+ llvm::Constant *C = CGM.EmitConstantValue(result.Val, resultType, this);
+
+ // Make sure we emit a debug reference to the global variable.
+ // This should probably fire even for
+ if (isa<VarDecl>(value)) {
+ if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
+ EmitDeclRefExprDbgValue(refExpr, C);
+ } else {
+ assert(isa<EnumConstantDecl>(value));
+ EmitDeclRefExprDbgValue(refExpr, C);
+ }
+
+ // If we emitted a reference constant, we need to dereference that.
+ if (resultIsReference)
+ return ConstantEmission::forReference(C);
+
+ return ConstantEmission::forValue(C);
+}
+
+llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue,
+ SourceLocation Loc) {
+ return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
+ lvalue.getType(), Loc, lvalue.getAlignmentSource(),
+ lvalue.getTBAAInfo(),
+ lvalue.getTBAABaseType(), lvalue.getTBAAOffset(),
+ lvalue.isNontemporal());
+}
+
+static bool hasBooleanRepresentation(QualType Ty) {
+ if (Ty->isBooleanType())
+ return true;
+
+ if (const EnumType *ET = Ty->getAs<EnumType>())
+ return ET->getDecl()->getIntegerType()->isBooleanType();
+
+ if (const AtomicType *AT = Ty->getAs<AtomicType>())
+ return hasBooleanRepresentation(AT->getValueType());
+
+ return false;
+}
+
+static bool getRangeForType(CodeGenFunction &CGF, QualType Ty,
+ llvm::APInt &Min, llvm::APInt &End,
+ bool StrictEnums) {
+ const EnumType *ET = Ty->getAs<EnumType>();
+ bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums &&
+ ET && !ET->getDecl()->isFixed();
+ bool IsBool = hasBooleanRepresentation(Ty);
+ if (!IsBool && !IsRegularCPlusPlusEnum)
+ return false;
+
+ if (IsBool) {
+ Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);
+ End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
+ } else {
+ const EnumDecl *ED = ET->getDecl();
+ llvm::Type *LTy = CGF.ConvertTypeForMem(ED->getIntegerType());
+ unsigned Bitwidth = LTy->getScalarSizeInBits();
+ unsigned NumNegativeBits = ED->getNumNegativeBits();
+ unsigned NumPositiveBits = ED->getNumPositiveBits();
+
+ if (NumNegativeBits) {
+ unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1);
+ assert(NumBits <= Bitwidth);
+ End = llvm::APInt(Bitwidth, 1) << (NumBits - 1);
+ Min = -End;
+ } else {
+ assert(NumPositiveBits <= Bitwidth);
+ End = llvm::APInt(Bitwidth, 1) << NumPositiveBits;
+ Min = llvm::APInt(Bitwidth, 0);
+ }
+ }
+ return true;
+}
+
+llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
+ llvm::APInt Min, End;
+ if (!getRangeForType(*this, Ty, Min, End,
+ CGM.getCodeGenOpts().StrictEnums))
+ return nullptr;
+
+ llvm::MDBuilder MDHelper(getLLVMContext());
+ return MDHelper.createRange(Min, End);
+}
+
+llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
+ QualType Ty,
+ SourceLocation Loc,
+ AlignmentSource AlignSource,
+ llvm::MDNode *TBAAInfo,
+ QualType TBAABaseType,
+ uint64_t TBAAOffset,
+ bool isNontemporal) {
+ // For better performance, handle vector loads differently.
+ if (Ty->isVectorType()) {
+ const llvm::Type *EltTy = Addr.getElementType();
+
+ const auto *VTy = cast<llvm::VectorType>(EltTy);
+
+ // Handle vectors of size 3 like size 4 for better performance.
+ if (VTy->getNumElements() == 3) {
+
+ // Bitcast to vec4 type.
+ llvm::VectorType *vec4Ty = llvm::VectorType::get(VTy->getElementType(),
+ 4);
+ Address Cast = Builder.CreateElementBitCast(Addr, vec4Ty, "castToVec4");
+ // Now load value.
+ llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4");
+
+ // Shuffle vector to get vec3.
+ V = Builder.CreateShuffleVector(V, llvm::UndefValue::get(vec4Ty),
+ {0, 1, 2}, "extractVec");
+ return EmitFromMemory(V, Ty);
+ }
+ }
+
+ // Atomic operations have to be done on integral types.
+ if (Ty->isAtomicType() || typeIsSuitableForInlineAtomic(Ty, Volatile)) {
+ LValue lvalue =
+ LValue::MakeAddr(Addr, Ty, getContext(), AlignSource, TBAAInfo);
+ return EmitAtomicLoad(lvalue, Loc).getScalarVal();
+ }
+
+ llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile);
+ if (isNontemporal) {
+ llvm::MDNode *Node = llvm::MDNode::get(
+ Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
+ Load->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
+ }
+ if (TBAAInfo) {
+ llvm::MDNode *TBAAPath = CGM.getTBAAStructTagInfo(TBAABaseType, TBAAInfo,
+ TBAAOffset);
+ if (TBAAPath)
+ CGM.DecorateInstructionWithTBAA(Load, TBAAPath,
+ false /*ConvertTypeToTag*/);
+ }
+
+ bool NeedsBoolCheck =
+ SanOpts.has(SanitizerKind::Bool) && hasBooleanRepresentation(Ty);
+ bool NeedsEnumCheck =
+ SanOpts.has(SanitizerKind::Enum) && Ty->getAs<EnumType>();
+ if (NeedsBoolCheck || NeedsEnumCheck) {
+ SanitizerScope SanScope(this);
+ llvm::APInt Min, End;
+ if (getRangeForType(*this, Ty, Min, End, true)) {
+ --End;
+ llvm::Value *Check;
+ if (!Min)
+ Check = Builder.CreateICmpULE(
+ Load, llvm::ConstantInt::get(getLLVMContext(), End));
+ else {
+ llvm::Value *Upper = Builder.CreateICmpSLE(
+ Load, llvm::ConstantInt::get(getLLVMContext(), End));
+ llvm::Value *Lower = Builder.CreateICmpSGE(
+ Load, llvm::ConstantInt::get(getLLVMContext(), Min));
+ Check = Builder.CreateAnd(Upper, Lower);
+ }
+ llvm::Constant *StaticArgs[] = {
+ EmitCheckSourceLocation(Loc),
+ EmitCheckTypeDescriptor(Ty)
+ };
+ SanitizerMask Kind = NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool;
+ EmitCheck(std::make_pair(Check, Kind), "load_invalid_value", StaticArgs,
+ EmitCheckValue(Load));
+ }
+ } else if (CGM.getCodeGenOpts().OptimizationLevel > 0)
+ if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty))
+ Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
+
+ return EmitFromMemory(Load, Ty);
+}
+
+llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
+ // Bool has a different representation in memory than in registers.
+ if (hasBooleanRepresentation(Ty)) {
+ // This should really always be an i1, but sometimes it's already
+ // an i8, and it's awkward to track those cases down.
+ if (Value->getType()->isIntegerTy(1))
+ return Builder.CreateZExt(Value, ConvertTypeForMem(Ty), "frombool");
+ assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) &&
+ "wrong value rep of bool");
+ }
+
+ return Value;
+}
+
+llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
+ // Bool has a different representation in memory than in registers.
+ if (hasBooleanRepresentation(Ty)) {
+ assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) &&
+ "wrong value rep of bool");
+ return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
+ }
+
+ return Value;
+}
+
+void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
+ bool Volatile, QualType Ty,
+ AlignmentSource AlignSource,
+ llvm::MDNode *TBAAInfo,
+ bool isInit, QualType TBAABaseType,
+ uint64_t TBAAOffset,
+ bool isNontemporal) {
+
+ // Handle vectors differently to get better performance.
+ if (Ty->isVectorType()) {
+ llvm::Type *SrcTy = Value->getType();
+ auto *VecTy = cast<llvm::VectorType>(SrcTy);
+ // Handle vec3 special.
+ if (VecTy->getNumElements() == 3) {
+ // Our source is a vec3, do a shuffle vector to make it a vec4.
+ llvm::Constant *Mask[] = {Builder.getInt32(0), Builder.getInt32(1),
+ Builder.getInt32(2),
+ llvm::UndefValue::get(Builder.getInt32Ty())};
+ llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
+ Value = Builder.CreateShuffleVector(Value,
+ llvm::UndefValue::get(VecTy),
+ MaskV, "extractVec");
+ SrcTy = llvm::VectorType::get(VecTy->getElementType(), 4);
+ }
+ if (Addr.getElementType() != SrcTy) {
+ Addr = Builder.CreateElementBitCast(Addr, SrcTy, "storetmp");
+ }
+ }
+
+ Value = EmitToMemory(Value, Ty);
+
+ if (Ty->isAtomicType() ||
+ (!isInit && typeIsSuitableForInlineAtomic(Ty, Volatile))) {
+ EmitAtomicStore(RValue::get(Value),
+ LValue::MakeAddr(Addr, Ty, getContext(),
+ AlignSource, TBAAInfo),
+ isInit);
+ return;
+ }
+
+ llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
+ if (isNontemporal) {
+ llvm::MDNode *Node =
+ llvm::MDNode::get(Store->getContext(),
+ llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
+ Store->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
+ }
+ if (TBAAInfo) {
+ llvm::MDNode *TBAAPath = CGM.getTBAAStructTagInfo(TBAABaseType, TBAAInfo,
+ TBAAOffset);
+ if (TBAAPath)
+ CGM.DecorateInstructionWithTBAA(Store, TBAAPath,
+ false /*ConvertTypeToTag*/);
+ }
+}
+
+void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
+ bool isInit) {
+ EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
+ lvalue.getType(), lvalue.getAlignmentSource(),
+ lvalue.getTBAAInfo(), isInit, lvalue.getTBAABaseType(),
+ lvalue.getTBAAOffset(), lvalue.isNontemporal());
+}
+
+/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
+/// method emits the address of the lvalue, then loads the result as an rvalue,
+/// returning the rvalue.
+RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
+ if (LV.isObjCWeak()) {
+ // load of a __weak object.
+ Address AddrWeakObj = LV.getAddress();
+ return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
+ AddrWeakObj));
+ }
+ if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
+ // In MRC mode, we do a load+autorelease.
+ if (!getLangOpts().ObjCAutoRefCount) {
+ return RValue::get(EmitARCLoadWeak(LV.getAddress()));
+ }
+
+ // In ARC mode, we load retained and then consume the value.
+ llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress());
+ Object = EmitObjCConsumeObject(LV.getType(), Object);
+ return RValue::get(Object);
+ }
+
+ if (LV.isSimple()) {
+ assert(!LV.getType()->isFunctionType());
+
+ // Everything needs a load.
+ return RValue::get(EmitLoadOfScalar(LV, Loc));
+ }
+
+ if (LV.isVectorElt()) {
+ llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(),
+ LV.isVolatileQualified());
+ return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
+ "vecext"));
+ }
+
+ // If this is a reference to a subset of the elements of a vector, either
+ // shuffle the input or extract/insert them as appropriate.
+ if (LV.isExtVectorElt())
+ return EmitLoadOfExtVectorElementLValue(LV);
+
+ // Global Register variables always invoke intrinsics
+ if (LV.isGlobalReg())
+ return EmitLoadOfGlobalRegLValue(LV);
+
+ assert(LV.isBitField() && "Unknown LValue type!");
+ return EmitLoadOfBitfieldLValue(LV);
+}
+
+RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
+ const CGBitFieldInfo &Info = LV.getBitFieldInfo();
+
+ // Get the output type.
+ llvm::Type *ResLTy = ConvertType(LV.getType());
+
+ Address Ptr = LV.getBitFieldAddress();
+ llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
+
+ if (Info.IsSigned) {
+ assert(static_cast<unsigned>(Info.Offset + Info.Size) <= Info.StorageSize);
+ unsigned HighBits = Info.StorageSize - Info.Offset - Info.Size;
+ if (HighBits)
+ Val = Builder.CreateShl(Val, HighBits, "bf.shl");
+ if (Info.Offset + HighBits)
+ Val = Builder.CreateAShr(Val, Info.Offset + HighBits, "bf.ashr");
+ } else {
+ if (Info.Offset)
+ Val = Builder.CreateLShr(Val, Info.Offset, "bf.lshr");
+ if (static_cast<unsigned>(Info.Offset) + Info.Size < Info.StorageSize)
+ Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(Info.StorageSize,
+ Info.Size),
+ "bf.clear");
+ }
+ Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
+
+ return RValue::get(Val);
+}
+
+// If this is a reference to a subset of the elements of a vector, create an
+// appropriate shufflevector.
+RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
+ llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(),
+ LV.isVolatileQualified());
+
+ const llvm::Constant *Elts = LV.getExtVectorElts();
+
+ // If the result of the expression is a non-vector type, we must be extracting
+ // a single element. Just codegen as an extractelement.
+ const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
+ if (!ExprVT) {
+ unsigned InIdx = getAccessedFieldNo(0, Elts);
+ llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
+ return RValue::get(Builder.CreateExtractElement(Vec, Elt));
+ }
+
+ // Always use shuffle vector to try to retain the original program structure
+ unsigned NumResultElts = ExprVT->getNumElements();
+
+ SmallVector<llvm::Constant*, 4> Mask;
+ for (unsigned i = 0; i != NumResultElts; ++i)
+ Mask.push_back(Builder.getInt32(getAccessedFieldNo(i, Elts)));
+
+ llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
+ Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()),
+ MaskV);
+ return RValue::get(Vec);
+}
+
+/// @brief Generates lvalue for partial ext_vector access.
+Address CodeGenFunction::EmitExtVectorElementLValue(LValue LV) {
+ Address VectorAddress = LV.getExtVectorAddress();
+ const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
+ QualType EQT = ExprVT->getElementType();
+ llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);
+
+ Address CastToPointerElement =
+ Builder.CreateElementBitCast(VectorAddress, VectorElementTy,
+ "conv.ptr.element");
+
+ const llvm::Constant *Elts = LV.getExtVectorElts();
+ unsigned ix = getAccessedFieldNo(0, Elts);
+
+ Address VectorBasePtrPlusIx =
+ Builder.CreateConstInBoundsGEP(CastToPointerElement, ix,
+ getContext().getTypeSizeInChars(EQT),
+ "vector.elt");
+
+ return VectorBasePtrPlusIx;
+}
+
+/// @brief Load of global gamed gegisters are always calls to intrinsics.
+RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) {
+ assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) &&
+ "Bad type for register variable");
+ llvm::MDNode *RegName = cast<llvm::MDNode>(
+ cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata());
+
+ // We accept integer and pointer types only
+ llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType());
+ llvm::Type *Ty = OrigTy;
+ if (OrigTy->isPointerTy())
+ Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
+ llvm::Type *Types[] = { Ty };
+
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
+ llvm::Value *Call = Builder.CreateCall(
+ F, llvm::MetadataAsValue::get(Ty->getContext(), RegName));
+ if (OrigTy->isPointerTy())
+ Call = Builder.CreateIntToPtr(Call, OrigTy);
+ return RValue::get(Call);
+}
+
+
+/// EmitStoreThroughLValue - Store the specified rvalue into the specified
+/// lvalue, where both are guaranteed to the have the same type, and that type
+/// is 'Ty'.
+void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
+ bool isInit) {
+ if (!Dst.isSimple()) {
+ if (Dst.isVectorElt()) {
+ // Read/modify/write the vector, inserting the new element.
+ llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(),
+ Dst.isVolatileQualified());
+ Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
+ Dst.getVectorIdx(), "vecins");
+ Builder.CreateStore(Vec, Dst.getVectorAddress(),
+ Dst.isVolatileQualified());
+ return;
+ }
+
+ // If this is an update of extended vector elements, insert them as
+ // appropriate.
+ if (Dst.isExtVectorElt())
+ return EmitStoreThroughExtVectorComponentLValue(Src, Dst);
+
+ if (Dst.isGlobalReg())
+ return EmitStoreThroughGlobalRegLValue(Src, Dst);
+
+ assert(Dst.isBitField() && "Unknown LValue type");
+ return EmitStoreThroughBitfieldLValue(Src, Dst);
+ }
+
+ // There's special magic for assigning into an ARC-qualified l-value.
+ if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
+ switch (Lifetime) {
+ case Qualifiers::OCL_None:
+ llvm_unreachable("present but none");
+
+ case Qualifiers::OCL_ExplicitNone:
+ // nothing special
+ break;
+
+ case Qualifiers::OCL_Strong:
+ EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
+ return;
+
+ case Qualifiers::OCL_Weak:
+ EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true);
+ return;
+
+ case Qualifiers::OCL_Autoreleasing:
+ Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(),
+ Src.getScalarVal()));
+ // fall into the normal path
+ break;
+ }
+ }
+
+ if (Dst.isObjCWeak() && !Dst.isNonGC()) {
+ // load of a __weak object.
+ Address LvalueDst = Dst.getAddress();
+ llvm::Value *src = Src.getScalarVal();
+ CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
+ return;
+ }
+
+ if (Dst.isObjCStrong() && !Dst.isNonGC()) {
+ // load of a __strong object.
+ Address LvalueDst = Dst.getAddress();
+ llvm::Value *src = Src.getScalarVal();
+ if (Dst.isObjCIvar()) {
+ assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
+ llvm::Type *ResultType = IntPtrTy;
+ Address dst = EmitPointerWithAlignment(Dst.getBaseIvarExp());
+ llvm::Value *RHS = dst.getPointer();
+ RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
+ llvm::Value *LHS =
+ Builder.CreatePtrToInt(LvalueDst.getPointer(), ResultType,
+ "sub.ptr.lhs.cast");
+ llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
+ CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
+ BytesBetween);
+ } else if (Dst.isGlobalObjCRef()) {
+ CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
+ Dst.isThreadLocalRef());
+ }
+ else
+ CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
+ return;
+ }
+
+ assert(Src.isScalar() && "Can't emit an agg store with this method");
+ EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
+}
+
+void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
+ llvm::Value **Result) {
+ const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
+ llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
+ Address Ptr = Dst.getBitFieldAddress();
+
+ // Get the source value, truncated to the width of the bit-field.
+ llvm::Value *SrcVal = Src.getScalarVal();
+
+ // Cast the source to the storage type and shift it into place.
+ SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
+ /*IsSigned=*/false);
+ llvm::Value *MaskedVal = SrcVal;
+
+ // See if there are other bits in the bitfield's storage we'll need to load
+ // and mask together with source before storing.
+ if (Info.StorageSize != Info.Size) {
+ assert(Info.StorageSize > Info.Size && "Invalid bitfield size.");
+ llvm::Value *Val =
+ Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
+
+ // Mask the source value as needed.
+ if (!hasBooleanRepresentation(Dst.getType()))
+ SrcVal = Builder.CreateAnd(SrcVal,
+ llvm::APInt::getLowBitsSet(Info.StorageSize,
+ Info.Size),
+ "bf.value");
+ MaskedVal = SrcVal;
+ if (Info.Offset)
+ SrcVal = Builder.CreateShl(SrcVal, Info.Offset, "bf.shl");
+
+ // Mask out the original value.
+ Val = Builder.CreateAnd(Val,
+ ~llvm::APInt::getBitsSet(Info.StorageSize,
+ Info.Offset,
+ Info.Offset + Info.Size),
+ "bf.clear");
+
+ // Or together the unchanged values and the source value.
+ SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
+ } else {
+ assert(Info.Offset == 0);
+ }
+
+ // Write the new value back out.
+ Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified());
+
+ // Return the new value of the bit-field, if requested.
+ if (Result) {
+ llvm::Value *ResultVal = MaskedVal;
+
+ // Sign extend the value if needed.
+ if (Info.IsSigned) {
+ assert(Info.Size <= Info.StorageSize);
+ unsigned HighBits = Info.StorageSize - Info.Size;
+ if (HighBits) {
+ ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
+ ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
+ }
+ }
+
+ ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned,
+ "bf.result.cast");
+ *Result = EmitFromMemory(ResultVal, Dst.getType());
+ }
+}
+
+void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
+ LValue Dst) {
+ // This access turns into a read/modify/write of the vector. Load the input
+ // value now.
+ llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddress(),
+ Dst.isVolatileQualified());
+ const llvm::Constant *Elts = Dst.getExtVectorElts();
+
+ llvm::Value *SrcVal = Src.getScalarVal();
+
+ if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
+ unsigned NumSrcElts = VTy->getNumElements();
+ unsigned NumDstElts =
+ cast<llvm::VectorType>(Vec->getType())->getNumElements();
+ if (NumDstElts == NumSrcElts) {
+ // Use shuffle vector is the src and destination are the same number of
+ // elements and restore the vector mask since it is on the side it will be
+ // stored.
+ SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
+ for (unsigned i = 0; i != NumSrcElts; ++i)
+ Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i);
+
+ llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
+ Vec = Builder.CreateShuffleVector(SrcVal,
+ llvm::UndefValue::get(Vec->getType()),
+ MaskV);
+ } else if (NumDstElts > NumSrcElts) {
+ // Extended the source vector to the same length and then shuffle it
+ // into the destination.
+ // FIXME: since we're shuffling with undef, can we just use the indices
+ // into that? This could be simpler.
+ SmallVector<llvm::Constant*, 4> ExtMask;
+ for (unsigned i = 0; i != NumSrcElts; ++i)
+ ExtMask.push_back(Builder.getInt32(i));
+ ExtMask.resize(NumDstElts, llvm::UndefValue::get(Int32Ty));
+ llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask);
+ llvm::Value *ExtSrcVal =
+ Builder.CreateShuffleVector(SrcVal,
+ llvm::UndefValue::get(SrcVal->getType()),
+ ExtMaskV);
+ // build identity
+ SmallVector<llvm::Constant*, 4> Mask;
+ for (unsigned i = 0; i != NumDstElts; ++i)
+ Mask.push_back(Builder.getInt32(i));
+
+ // When the vector size is odd and .odd or .hi is used, the last element
+ // of the Elts constant array will be one past the size of the vector.
+ // Ignore the last element here, if it is greater than the mask size.
+ if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size())
+ NumSrcElts--;
+
+ // modify when what gets shuffled in
+ for (unsigned i = 0; i != NumSrcElts; ++i)
+ Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i+NumDstElts);
+ llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
+ Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV);
+ } else {
+ // We should never shorten the vector
+ llvm_unreachable("unexpected shorten vector length");
+ }
+ } else {
+ // If the Src is a scalar (not a vector) it must be updating one element.
+ unsigned InIdx = getAccessedFieldNo(0, Elts);
+ llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
+ Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
+ }
+
+ Builder.CreateStore(Vec, Dst.getExtVectorAddress(),
+ Dst.isVolatileQualified());
+}
+
+/// @brief Store of global named registers are always calls to intrinsics.
+void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst) {
+ assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) &&
+ "Bad type for register variable");
+ llvm::MDNode *RegName = cast<llvm::MDNode>(
+ cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata());
+ assert(RegName && "Register LValue is not metadata");
+
+ // We accept integer and pointer types only
+ llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType());
+ llvm::Type *Ty = OrigTy;
+ if (OrigTy->isPointerTy())
+ Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
+ llvm::Type *Types[] = { Ty };
+
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
+ llvm::Value *Value = Src.getScalarVal();
+ if (OrigTy->isPointerTy())
+ Value = Builder.CreatePtrToInt(Value, Ty);
+ Builder.CreateCall(
+ F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value});
+}
+
+// setObjCGCLValueClass - sets class of the lvalue for the purpose of
+// generating write-barries API. It is currently a global, ivar,
+// or neither.
+static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
+ LValue &LV,
+ bool IsMemberAccess=false) {
+ if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
+ return;
+
+ if (isa<ObjCIvarRefExpr>(E)) {
+ QualType ExpTy = E->getType();
+ if (IsMemberAccess && ExpTy->isPointerType()) {
+ // If ivar is a structure pointer, assigning to field of
+ // this struct follows gcc's behavior and makes it a non-ivar
+ // writer-barrier conservatively.
+ ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
+ if (ExpTy->isRecordType()) {
+ LV.setObjCIvar(false);
+ return;
+ }
+ }
+ LV.setObjCIvar(true);
+ auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E));
+ LV.setBaseIvarExp(Exp->getBase());
+ LV.setObjCArray(E->getType()->isArrayType());
+ return;
+ }
+
+ if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) {
+ if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
+ if (VD->hasGlobalStorage()) {
+ LV.setGlobalObjCRef(true);
+ LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None);
+ }
+ }
+ LV.setObjCArray(E->getType()->isArrayType());
+ return;
+ }
+
+ if (const auto *Exp = dyn_cast<UnaryOperator>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
+ return;
+ }
+
+ if (const auto *Exp = dyn_cast<ParenExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
+ if (LV.isObjCIvar()) {
+ // If cast is to a structure pointer, follow gcc's behavior and make it
+ // a non-ivar write-barrier.
+ QualType ExpTy = E->getType();
+ if (ExpTy->isPointerType())
+ ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
+ if (ExpTy->isRecordType())
+ LV.setObjCIvar(false);
+ }
+ return;
+ }
+
+ if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
+ return;
+ }
+
+ if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
+ return;
+ }
+
+ if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
+ return;
+ }
+
+ if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
+ return;
+ }
+
+ if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
+ if (LV.isObjCIvar() && !LV.isObjCArray())
+ // Using array syntax to assigning to what an ivar points to is not
+ // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
+ LV.setObjCIvar(false);
+ else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
+ // Using array syntax to assigning to what global points to is not
+ // same as assigning to the global itself. {id *G;} G[i] = 0;
+ LV.setGlobalObjCRef(false);
+ return;
+ }
+
+ if (const auto *Exp = dyn_cast<MemberExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
+ // We don't know if member is an 'ivar', but this flag is looked at
+ // only in the context of LV.isObjCIvar().
+ LV.setObjCArray(E->getType()->isArrayType());
+ return;
+ }
+}
+
+static llvm::Value *
+EmitBitCastOfLValueToProperType(CodeGenFunction &CGF,
+ llvm::Value *V, llvm::Type *IRType,
+ StringRef Name = StringRef()) {
+ unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
+ return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name);
+}
+
+static LValue EmitThreadPrivateVarDeclLValue(
+ CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,
+ llvm::Type *RealVarTy, SourceLocation Loc) {
+ Addr = CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
+ Addr = CGF.Builder.CreateElementBitCast(Addr, RealVarTy);
+ return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
+}
+
+Address CodeGenFunction::EmitLoadOfReference(Address Addr,
+ const ReferenceType *RefTy,
+ AlignmentSource *Source) {
+ llvm::Value *Ptr = Builder.CreateLoad(Addr);
+ return Address(Ptr, getNaturalTypeAlignment(RefTy->getPointeeType(),
+ Source, /*forPointee*/ true));
+
+}
+
+LValue CodeGenFunction::EmitLoadOfReferenceLValue(Address RefAddr,
+ const ReferenceType *RefTy) {
+ AlignmentSource Source;
+ Address Addr = EmitLoadOfReference(RefAddr, RefTy, &Source);
+ return MakeAddrLValue(Addr, RefTy->getPointeeType(), Source);
+}
+
+static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
+ const Expr *E, const VarDecl *VD) {
+ QualType T = E->getType();
+
+ // If it's thread_local, emit a call to its wrapper function instead.
+ if (VD->getTLSKind() == VarDecl::TLS_Dynamic &&
+ CGF.CGM.getCXXABI().usesThreadWrapperFunction())
+ return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);
+
+ llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
+ llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
+ V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
+ CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
+ Address Addr(V, Alignment);
+ LValue LV;
+ // Emit reference to the private copy of the variable if it is an OpenMP
+ // threadprivate variable.
+ if (CGF.getLangOpts().OpenMP && VD->hasAttr<OMPThreadPrivateDeclAttr>())
+ return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy,
+ E->getExprLoc());
+ if (auto RefTy = VD->getType()->getAs<ReferenceType>()) {
+ LV = CGF.EmitLoadOfReferenceLValue(Addr, RefTy);
+ } else {
+ LV = CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
+ }
+ setObjCGCLValueClass(CGF.getContext(), E, LV);
+ return LV;
+}
+
+static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
+ const Expr *E, const FunctionDecl *FD) {
+ llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD);
+ if (!FD->hasPrototype()) {
+ if (const FunctionProtoType *Proto =
+ FD->getType()->getAs<FunctionProtoType>()) {
+ // Ugly case: for a K&R-style definition, the type of the definition
+ // isn't the same as the type of a use. Correct for this with a
+ // bitcast.
+ QualType NoProtoType =
+ CGF.getContext().getFunctionNoProtoType(Proto->getReturnType());
+ NoProtoType = CGF.getContext().getPointerType(NoProtoType);
+ V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType));
+ }
+ }
+ CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
+ return CGF.MakeAddrLValue(V, E->getType(), Alignment, AlignmentSource::Decl);
+}
+
+static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD,
+ llvm::Value *ThisValue) {
+ QualType TagType = CGF.getContext().getTagDeclType(FD->getParent());
+ LValue LV = CGF.MakeNaturalAlignAddrLValue(ThisValue, TagType);
+ return CGF.EmitLValueForField(LV, FD);
+}
+
+/// Named Registers are named metadata pointing to the register name
+/// which will be read from/written to as an argument to the intrinsic
+/// @llvm.read/write_register.
+/// So far, only the name is being passed down, but other options such as
+/// register type, allocation type or even optimization options could be
+/// passed down via the metadata node.
+static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) {
+ SmallString<64> Name("llvm.named.register.");
+ AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>();
+ assert(Asm->getLabel().size() < 64-Name.size() &&
+ "Register name too big");
+ Name.append(Asm->getLabel());
+ llvm::NamedMDNode *M =
+ CGM.getModule().getOrInsertNamedMetadata(Name);
+ if (M->getNumOperands() == 0) {
+ llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(),
+ Asm->getLabel());
+ llvm::Metadata *Ops[] = {Str};
+ M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
+ }
+
+ CharUnits Alignment = CGM.getContext().getDeclAlign(VD);
+
+ llvm::Value *Ptr =
+ llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0));
+ return LValue::MakeGlobalReg(Address(Ptr, Alignment), VD->getType());
+}
+
+LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
+ const NamedDecl *ND = E->getDecl();
+ QualType T = E->getType();
+
+ if (const auto *VD = dyn_cast<VarDecl>(ND)) {
+ // Global Named registers access via intrinsics only
+ if (VD->getStorageClass() == SC_Register &&
+ VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())
+ return EmitGlobalNamedRegister(VD, CGM);
+
+ // A DeclRefExpr for a reference initialized by a constant expression can
+ // appear without being odr-used. Directly emit the constant initializer.
+ const Expr *Init = VD->getAnyInitializer(VD);
+ if (Init && !isa<ParmVarDecl>(VD) && VD->getType()->isReferenceType() &&
+ VD->isUsableInConstantExpressions(getContext()) &&
+ VD->checkInitIsICE() &&
+ // Do not emit if it is private OpenMP variable.
+ !(E->refersToEnclosingVariableOrCapture() && CapturedStmtInfo &&
+ LocalDeclMap.count(VD))) {
+ llvm::Constant *Val =
+ CGM.EmitConstantValue(*VD->evaluateValue(), VD->getType(), this);
+ assert(Val && "failed to emit reference constant expression");
+ // FIXME: Eventually we will want to emit vector element references.
+
+ // Should we be using the alignment of the constant pointer we emitted?
+ CharUnits Alignment = getNaturalTypeAlignment(E->getType(), nullptr,
+ /*pointee*/ true);
+
+ return MakeAddrLValue(Address(Val, Alignment), T, AlignmentSource::Decl);
+ }
+
+ // Check for captured variables.
+ if (E->refersToEnclosingVariableOrCapture()) {
+ if (auto *FD = LambdaCaptureFields.lookup(VD))
+ return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
+ else if (CapturedStmtInfo) {
+ auto it = LocalDeclMap.find(VD);
+ if (it != LocalDeclMap.end()) {
+ if (auto RefTy = VD->getType()->getAs<ReferenceType>()) {
+ return EmitLoadOfReferenceLValue(it->second, RefTy);
+ }
+ return MakeAddrLValue(it->second, T);
+ }
+ LValue CapLVal =
+ EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD),
+ CapturedStmtInfo->getContextValue());
+ return MakeAddrLValue(
+ Address(CapLVal.getPointer(), getContext().getDeclAlign(VD)),
+ CapLVal.getType(), AlignmentSource::Decl);
+ }
+
+ assert(isa<BlockDecl>(CurCodeDecl));
+ Address addr = GetAddrOfBlockDecl(VD, VD->hasAttr<BlocksAttr>());
+ return MakeAddrLValue(addr, T, AlignmentSource::Decl);
+ }
+ }
+
+ // FIXME: We should be able to assert this for FunctionDecls as well!
+ // FIXME: We should be able to assert this for all DeclRefExprs, not just
+ // those with a valid source location.
+ assert((ND->isUsed(false) || !isa<VarDecl>(ND) ||
+ !E->getLocation().isValid()) &&
+ "Should not use decl without marking it used!");
+
+ if (ND->hasAttr<WeakRefAttr>()) {
+ const auto *VD = cast<ValueDecl>(ND);
+ ConstantAddress Aliasee = CGM.GetWeakRefReference(VD);
+ return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl);
+ }
+
+ if (const auto *VD = dyn_cast<VarDecl>(ND)) {
+ // Check if this is a global variable.
+ if (VD->hasLinkage() || VD->isStaticDataMember())
+ return EmitGlobalVarDeclLValue(*this, E, VD);
+
+ Address addr = Address::invalid();
+
+ // The variable should generally be present in the local decl map.
+ auto iter = LocalDeclMap.find(VD);
+ if (iter != LocalDeclMap.end()) {
+ addr = iter->second;
+
+ // Otherwise, it might be static local we haven't emitted yet for
+ // some reason; most likely, because it's in an outer function.
+ } else if (VD->isStaticLocal()) {
+ addr = Address(CGM.getOrCreateStaticVarDecl(
+ *VD, CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false)),
+ getContext().getDeclAlign(VD));
+
+ // No other cases for now.
+ } else {
+ llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
+ }
+
+
+ // Check for OpenMP threadprivate variables.
+ if (getLangOpts().OpenMP && VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
+ return EmitThreadPrivateVarDeclLValue(
+ *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()),
+ E->getExprLoc());
+ }
+
+ // Drill into block byref variables.
+ bool isBlockByref = VD->hasAttr<BlocksAttr>();
+ if (isBlockByref) {
+ addr = emitBlockByrefAddress(addr, VD);
+ }
+
+ // Drill into reference types.
+ LValue LV;
+ if (auto RefTy = VD->getType()->getAs<ReferenceType>()) {
+ LV = EmitLoadOfReferenceLValue(addr, RefTy);
+ } else {
+ LV = MakeAddrLValue(addr, T, AlignmentSource::Decl);
+ }
+
+ bool isLocalStorage = VD->hasLocalStorage();
+
+ bool NonGCable = isLocalStorage &&
+ !VD->getType()->isReferenceType() &&
+ !isBlockByref;
+ if (NonGCable) {
+ LV.getQuals().removeObjCGCAttr();
+ LV.setNonGC(true);
+ }
+
+ bool isImpreciseLifetime =
+ (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>());
+ if (isImpreciseLifetime)
+ LV.setARCPreciseLifetime(ARCImpreciseLifetime);
+ setObjCGCLValueClass(getContext(), E, LV);
+ return LV;
+ }
+
+ if (const auto *FD = dyn_cast<FunctionDecl>(ND))
+ return EmitFunctionDeclLValue(*this, E, FD);
+
+ llvm_unreachable("Unhandled DeclRefExpr");
+}
+
+LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
+ // __extension__ doesn't affect lvalue-ness.
+ if (E->getOpcode() == UO_Extension)
+ return EmitLValue(E->getSubExpr());
+
+ QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
+ switch (E->getOpcode()) {
+ default: llvm_unreachable("Unknown unary operator lvalue!");
+ case UO_Deref: {
+ QualType T = E->getSubExpr()->getType()->getPointeeType();
+ assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
+
+ AlignmentSource AlignSource;
+ Address Addr = EmitPointerWithAlignment(E->getSubExpr(), &AlignSource);
+ LValue LV = MakeAddrLValue(Addr, T, AlignSource);
+ LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
+
+ // We should not generate __weak write barrier on indirect reference
+ // of a pointer to object; as in void foo (__weak id *param); *param = 0;
+ // But, we continue to generate __strong write barrier on indirect write
+ // into a pointer to object.
+ if (getLangOpts().ObjC1 &&
+ getLangOpts().getGC() != LangOptions::NonGC &&
+ LV.isObjCWeak())
+ LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
+ return LV;
+ }
+ case UO_Real:
+ case UO_Imag: {
+ LValue LV = EmitLValue(E->getSubExpr());
+ assert(LV.isSimple() && "real/imag on non-ordinary l-value");
+
+ // __real is valid on scalars. This is a faster way of testing that.
+ // __imag can only produce an rvalue on scalars.
+ if (E->getOpcode() == UO_Real &&
+ !LV.getAddress().getElementType()->isStructTy()) {
+ assert(E->getSubExpr()->getType()->isArithmeticType());
+ return LV;
+ }
+
+ assert(E->getSubExpr()->getType()->isAnyComplexType());
+
+ Address Component =
+ (E->getOpcode() == UO_Real
+ ? emitAddrOfRealComponent(LV.getAddress(), LV.getType())
+ : emitAddrOfImagComponent(LV.getAddress(), LV.getType()));
+ return MakeAddrLValue(Component, ExprTy, LV.getAlignmentSource());
+ }
+ case UO_PreInc:
+ case UO_PreDec: {
+ LValue LV = EmitLValue(E->getSubExpr());
+ bool isInc = E->getOpcode() == UO_PreInc;
+
+ if (E->getType()->isAnyComplexType())
+ EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
+ else
+ EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
+ return LV;
+ }
+ }
+}
+
+LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
+ return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
+ E->getType(), AlignmentSource::Decl);
+}
+
+LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
+ return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
+ E->getType(), AlignmentSource::Decl);
+}
+
+LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
+ auto SL = E->getFunctionName();
+ assert(SL != nullptr && "No StringLiteral name in PredefinedExpr");
+ StringRef FnName = CurFn->getName();
+ if (FnName.startswith("\01"))
+ FnName = FnName.substr(1);
+ StringRef NameItems[] = {
+ PredefinedExpr::getIdentTypeName(E->getIdentType()), FnName};
+ std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
+ if (CurCodeDecl && isa<BlockDecl>(CurCodeDecl)) {
+ auto C = CGM.GetAddrOfConstantCString(FnName, GVName.c_str());
+ return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
+ }
+ auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);
+ return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
+}
+
+/// Emit a type description suitable for use by a runtime sanitizer library. The
+/// format of a type descriptor is
+///
+/// \code
+/// { i16 TypeKind, i16 TypeInfo }
+/// \endcode
+///
+/// followed by an array of i8 containing the type name. TypeKind is 0 for an
+/// integer, 1 for a floating point value, and -1 for anything else.
+llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) {
+ // Only emit each type's descriptor once.
+ if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T))
+ return C;
+
+ uint16_t TypeKind = -1;
+ uint16_t TypeInfo = 0;
+
+ if (T->isIntegerType()) {
+ TypeKind = 0;
+ TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |
+ (T->isSignedIntegerType() ? 1 : 0);
+ } else if (T->isFloatingType()) {
+ TypeKind = 1;
+ TypeInfo = getContext().getTypeSize(T);
+ }
+
+ // Format the type name as if for a diagnostic, including quotes and
+ // optionally an 'aka'.
+ SmallString<32> Buffer;
+ CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype,
+ (intptr_t)T.getAsOpaquePtr(),
+ StringRef(), StringRef(), None, Buffer,
+ None);
+
+ llvm::Constant *Components[] = {
+ Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
+ llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)
+ };
+ llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);
+
+ auto *GV = new llvm::GlobalVariable(
+ CGM.getModule(), Descriptor->getType(),
+ /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor);
+ GV->setUnnamedAddr(true);
+ CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV);
+
+ // Remember the descriptor for this type.
+ CGM.setTypeDescriptorInMap(T, GV);
+
+ return GV;
+}
+
+llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
+ llvm::Type *TargetTy = IntPtrTy;
+
+ // Floating-point types which fit into intptr_t are bitcast to integers
+ // and then passed directly (after zero-extension, if necessary).
+ if (V->getType()->isFloatingPointTy()) {
+ unsigned Bits = V->getType()->getPrimitiveSizeInBits();
+ if (Bits <= TargetTy->getIntegerBitWidth())
+ V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(),
+ Bits));
+ }
+
+ // Integers which fit in intptr_t are zero-extended and passed directly.
+ if (V->getType()->isIntegerTy() &&
+ V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
+ return Builder.CreateZExt(V, TargetTy);
+
+ // Pointers are passed directly, everything else is passed by address.
+ if (!V->getType()->isPointerTy()) {
+ Address Ptr = CreateDefaultAlignTempAlloca(V->getType());
+ Builder.CreateStore(V, Ptr);
+ V = Ptr.getPointer();
+ }
+ return Builder.CreatePtrToInt(V, TargetTy);
+}
+
+/// \brief Emit a representation of a SourceLocation for passing to a handler
+/// in a sanitizer runtime library. The format for this data is:
+/// \code
+/// struct SourceLocation {
+/// const char *Filename;
+/// int32_t Line, Column;
+/// };
+/// \endcode
+/// For an invalid SourceLocation, the Filename pointer is null.
+llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) {
+ llvm::Constant *Filename;
+ int Line, Column;
+
+ PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc);
+ if (PLoc.isValid()) {
+ auto FilenameGV = CGM.GetAddrOfConstantCString(PLoc.getFilename(), ".src");
+ CGM.getSanitizerMetadata()->disableSanitizerForGlobal(
+ cast<llvm::GlobalVariable>(FilenameGV.getPointer()));
+ Filename = FilenameGV.getPointer();
+ Line = PLoc.getLine();
+ Column = PLoc.getColumn();
+ } else {
+ Filename = llvm::Constant::getNullValue(Int8PtrTy);
+ Line = Column = 0;
+ }
+
+ llvm::Constant *Data[] = {Filename, Builder.getInt32(Line),
+ Builder.getInt32(Column)};
+
+ return llvm::ConstantStruct::getAnon(Data);
+}
+
+namespace {
+/// \brief Specify under what conditions this check can be recovered
+enum class CheckRecoverableKind {
+ /// Always terminate program execution if this check fails.
+ Unrecoverable,
+ /// Check supports recovering, runtime has both fatal (noreturn) and
+ /// non-fatal handlers for this check.
+ Recoverable,
+ /// Runtime conditionally aborts, always need to support recovery.
+ AlwaysRecoverable
+};
+}
+
+static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind) {
+ assert(llvm::countPopulation(Kind) == 1);
+ switch (Kind) {
+ case SanitizerKind::Vptr:
+ return CheckRecoverableKind::AlwaysRecoverable;
+ case SanitizerKind::Return:
+ case SanitizerKind::Unreachable:
+ return CheckRecoverableKind::Unrecoverable;
+ default:
+ return CheckRecoverableKind::Recoverable;
+ }
+}
+
+static void emitCheckHandlerCall(CodeGenFunction &CGF,
+ llvm::FunctionType *FnType,
+ ArrayRef<llvm::Value *> FnArgs,
+ StringRef CheckName,
+ CheckRecoverableKind RecoverKind, bool IsFatal,
+ llvm::BasicBlock *ContBB) {
+ assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
+ bool NeedsAbortSuffix =
+ IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;
+ std::string FnName = ("__ubsan_handle_" + CheckName +
+ (NeedsAbortSuffix ? "_abort" : "")).str();
+ bool MayReturn =
+ !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
+
+ llvm::AttrBuilder B;
+ if (!MayReturn) {
+ B.addAttribute(llvm::Attribute::NoReturn)
+ .addAttribute(llvm::Attribute::NoUnwind);
+ }
+ B.addAttribute(llvm::Attribute::UWTable);
+
+ llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(
+ FnType, FnName,
+ llvm::AttributeSet::get(CGF.getLLVMContext(),
+ llvm::AttributeSet::FunctionIndex, B));
+ llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs);
+ if (!MayReturn) {
+ HandlerCall->setDoesNotReturn();
+ CGF.Builder.CreateUnreachable();
+ } else {
+ CGF.Builder.CreateBr(ContBB);
+ }
+}
+
+void CodeGenFunction::EmitCheck(
+ ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked,
+ StringRef CheckName, ArrayRef<llvm::Constant *> StaticArgs,
+ ArrayRef<llvm::Value *> DynamicArgs) {
+ assert(IsSanitizerScope);
+ assert(Checked.size() > 0);
+
+ llvm::Value *FatalCond = nullptr;
+ llvm::Value *RecoverableCond = nullptr;
+ llvm::Value *TrapCond = nullptr;
+ for (int i = 0, n = Checked.size(); i < n; ++i) {
+ llvm::Value *Check = Checked[i].first;
+ // -fsanitize-trap= overrides -fsanitize-recover=.
+ llvm::Value *&Cond =
+ CGM.getCodeGenOpts().SanitizeTrap.has(Checked[i].second)
+ ? TrapCond
+ : CGM.getCodeGenOpts().SanitizeRecover.has(Checked[i].second)
+ ? RecoverableCond
+ : FatalCond;
+ Cond = Cond ? Builder.CreateAnd(Cond, Check) : Check;
+ }
+
+ if (TrapCond)
+ EmitTrapCheck(TrapCond);
+ if (!FatalCond && !RecoverableCond)
+ return;
+
+ llvm::Value *JointCond;
+ if (FatalCond && RecoverableCond)
+ JointCond = Builder.CreateAnd(FatalCond, RecoverableCond);
+ else
+ JointCond = FatalCond ? FatalCond : RecoverableCond;
+ assert(JointCond);
+
+ CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second);
+ assert(SanOpts.has(Checked[0].second));
+#ifndef NDEBUG
+ for (int i = 1, n = Checked.size(); i < n; ++i) {
+ assert(RecoverKind == getRecoverableKind(Checked[i].second) &&
+ "All recoverable kinds in a single check must be same!");
+ assert(SanOpts.has(Checked[i].second));
+ }
+#endif
+
+ llvm::BasicBlock *Cont = createBasicBlock("cont");
+ llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName);
+ llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers);
+ // Give hint that we very much don't expect to execute the handler
+ // Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp
+ llvm::MDBuilder MDHelper(getLLVMContext());
+ llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1);
+ Branch->setMetadata(llvm::LLVMContext::MD_prof, Node);
+ EmitBlock(Handlers);
+
+ // Emit handler arguments and create handler function type.
+ llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
+ auto *InfoPtr =
+ new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
+ llvm::GlobalVariable::PrivateLinkage, Info);
+ InfoPtr->setUnnamedAddr(true);
+ CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
+
+ SmallVector<llvm::Value *, 4> Args;
+ SmallVector<llvm::Type *, 4> ArgTypes;
+ Args.reserve(DynamicArgs.size() + 1);
+ ArgTypes.reserve(DynamicArgs.size() + 1);
+
+ // Handler functions take an i8* pointing to the (handler-specific) static
+ // information block, followed by a sequence of intptr_t arguments
+ // representing operand values.
+ Args.push_back(Builder.CreateBitCast(InfoPtr, Int8PtrTy));
+ ArgTypes.push_back(Int8PtrTy);
+ for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) {
+ Args.push_back(EmitCheckValue(DynamicArgs[i]));
+ ArgTypes.push_back(IntPtrTy);
+ }
+
+ llvm::FunctionType *FnType =
+ llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
+
+ if (!FatalCond || !RecoverableCond) {
+ // Simple case: we need to generate a single handler call, either
+ // fatal, or non-fatal.
+ emitCheckHandlerCall(*this, FnType, Args, CheckName, RecoverKind,
+ (FatalCond != nullptr), Cont);
+ } else {
+ // Emit two handler calls: first one for set of unrecoverable checks,
+ // another one for recoverable.
+ llvm::BasicBlock *NonFatalHandlerBB =
+ createBasicBlock("non_fatal." + CheckName);
+ llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName);
+ Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB);
+ EmitBlock(FatalHandlerBB);
+ emitCheckHandlerCall(*this, FnType, Args, CheckName, RecoverKind, true,
+ NonFatalHandlerBB);
+ EmitBlock(NonFatalHandlerBB);
+ emitCheckHandlerCall(*this, FnType, Args, CheckName, RecoverKind, false,
+ Cont);
+ }
+
+ EmitBlock(Cont);
+}
+
+void CodeGenFunction::EmitCfiSlowPathCheck(llvm::Value *Cond,
+ llvm::ConstantInt *TypeId,
+ llvm::Value *Ptr) {
+ auto &Ctx = getLLVMContext();
+ llvm::BasicBlock *Cont = createBasicBlock("cfi.cont");
+
+ llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath");
+ llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB);
+
+ llvm::MDBuilder MDHelper(getLLVMContext());
+ llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1);
+ BI->setMetadata(llvm::LLVMContext::MD_prof, Node);
+
+ EmitBlock(CheckBB);
+
+ llvm::Constant *SlowPathFn = CGM.getModule().getOrInsertFunction(
+ "__cfi_slowpath",
+ llvm::FunctionType::get(
+ llvm::Type::getVoidTy(Ctx),
+ {llvm::Type::getInt64Ty(Ctx),
+ llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(Ctx))},
+ false));
+ llvm::CallInst *CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr});
+ CheckCall->setDoesNotThrow();
+
+ EmitBlock(Cont);
+}
+
+void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked) {
+ llvm::BasicBlock *Cont = createBasicBlock("cont");
+
+ // If we're optimizing, collapse all calls to trap down to just one per
+ // function to save on code size.
+ if (!CGM.getCodeGenOpts().OptimizationLevel || !TrapBB) {
+ TrapBB = createBasicBlock("trap");
+ Builder.CreateCondBr(Checked, Cont, TrapBB);
+ EmitBlock(TrapBB);
+ llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
+ TrapCall->setDoesNotReturn();
+ TrapCall->setDoesNotThrow();
+ Builder.CreateUnreachable();
+ } else {
+ Builder.CreateCondBr(Checked, Cont, TrapBB);
+ }
+
+ EmitBlock(Cont);
+}
+
+llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) {
+ llvm::CallInst *TrapCall = Builder.CreateCall(CGM.getIntrinsic(IntrID));
+
+ if (!CGM.getCodeGenOpts().TrapFuncName.empty())
+ TrapCall->addAttribute(llvm::AttributeSet::FunctionIndex,
+ "trap-func-name",
+ CGM.getCodeGenOpts().TrapFuncName);
+
+ return TrapCall;
+}
+
+Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E,
+ AlignmentSource *AlignSource) {
+ assert(E->getType()->isArrayType() &&
+ "Array to pointer decay must have array source type!");
+
+ // Expressions of array type can't be bitfields or vector elements.
+ LValue LV = EmitLValue(E);
+ Address Addr = LV.getAddress();
+ if (AlignSource) *AlignSource = LV.getAlignmentSource();
+
+ // If the array type was an incomplete type, we need to make sure
+ // the decay ends up being the right type.
+ llvm::Type *NewTy = ConvertType(E->getType());
+ Addr = Builder.CreateElementBitCast(Addr, NewTy);
+
+ // Note that VLA pointers are always decayed, so we don't need to do
+ // anything here.
+ if (!E->getType()->isVariableArrayType()) {
+ assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
+ "Expected pointer to array");
+ Addr = Builder.CreateStructGEP(Addr, 0, CharUnits::Zero(), "arraydecay");
+ }
+
+ QualType EltType = E->getType()->castAsArrayTypeUnsafe()->getElementType();
+ return Builder.CreateElementBitCast(Addr, ConvertTypeForMem(EltType));
+}
+
+/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
+/// array to pointer, return the array subexpression.
+static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
+ // If this isn't just an array->pointer decay, bail out.
+ const auto *CE = dyn_cast<CastExpr>(E);
+ if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay)
+ return nullptr;
+
+ // If this is a decay from variable width array, bail out.
+ const Expr *SubExpr = CE->getSubExpr();
+ if (SubExpr->getType()->isVariableArrayType())
+ return nullptr;
+
+ return SubExpr;
+}
+
+static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF,
+ llvm::Value *ptr,
+ ArrayRef<llvm::Value*> indices,
+ bool inbounds,
+ const llvm::Twine &name = "arrayidx") {
+ if (inbounds) {
+ return CGF.Builder.CreateInBoundsGEP(ptr, indices, name);
+ } else {
+ return CGF.Builder.CreateGEP(ptr, indices, name);
+ }
+}
+
+static CharUnits getArrayElementAlign(CharUnits arrayAlign,
+ llvm::Value *idx,
+ CharUnits eltSize) {
+ // If we have a constant index, we can use the exact offset of the
+ // element we're accessing.
+ if (auto constantIdx = dyn_cast<llvm::ConstantInt>(idx)) {
+ CharUnits offset = constantIdx->getZExtValue() * eltSize;
+ return arrayAlign.alignmentAtOffset(offset);
+
+ // Otherwise, use the worst-case alignment for any element.
+ } else {
+ return arrayAlign.alignmentOfArrayElement(eltSize);
+ }
+}
+
+static QualType getFixedSizeElementType(const ASTContext &ctx,
+ const VariableArrayType *vla) {
+ QualType eltType;
+ do {
+ eltType = vla->getElementType();
+ } while ((vla = ctx.getAsVariableArrayType(eltType)));
+ return eltType;
+}
+
+static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
+ ArrayRef<llvm::Value*> indices,
+ QualType eltType, bool inbounds,
+ const llvm::Twine &name = "arrayidx") {
+ // All the indices except that last must be zero.
+#ifndef NDEBUG
+ for (auto idx : indices.drop_back())
+ assert(isa<llvm::ConstantInt>(idx) &&
+ cast<llvm::ConstantInt>(idx)->isZero());
+#endif
+
+ // Determine the element size of the statically-sized base. This is
+ // the thing that the indices are expressed in terms of.
+ if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) {
+ eltType = getFixedSizeElementType(CGF.getContext(), vla);
+ }
+
+ // We can use that to compute the best alignment of the element.
+ CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
+ CharUnits eltAlign =
+ getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
+
+ llvm::Value *eltPtr =
+ emitArraySubscriptGEP(CGF, addr.getPointer(), indices, inbounds, name);
+ return Address(eltPtr, eltAlign);
+}
+
+LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
+ bool Accessed) {
+ // The index must always be an integer, which is not an aggregate. Emit it.
+ llvm::Value *Idx = EmitScalarExpr(E->getIdx());
+ QualType IdxTy = E->getIdx()->getType();
+ bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
+
+ if (SanOpts.has(SanitizerKind::ArrayBounds))
+ EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
+
+ // If the base is a vector type, then we are forming a vector element lvalue
+ // with this subscript.
+ if (E->getBase()->getType()->isVectorType() &&
+ !isa<ExtVectorElementExpr>(E->getBase())) {
+ // Emit the vector as an lvalue to get its address.
+ LValue LHS = EmitLValue(E->getBase());
+ assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
+ return LValue::MakeVectorElt(LHS.getAddress(), Idx,
+ E->getBase()->getType(),
+ LHS.getAlignmentSource());
+ }
+
+ // All the other cases basically behave like simple offsetting.
+
+ // Extend or truncate the index type to 32 or 64-bits.
+ if (Idx->getType() != IntPtrTy)
+ Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
+
+ // Handle the extvector case we ignored above.
+ if (isa<ExtVectorElementExpr>(E->getBase())) {
+ LValue LV = EmitLValue(E->getBase());
+ Address Addr = EmitExtVectorElementLValue(LV);
+
+ QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
+ Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true);
+ return MakeAddrLValue(Addr, EltType, LV.getAlignmentSource());
+ }
+
+ AlignmentSource AlignSource;
+ Address Addr = Address::invalid();
+ if (const VariableArrayType *vla =
+ getContext().getAsVariableArrayType(E->getType())) {
+ // The base must be a pointer, which is not an aggregate. Emit
+ // it. It needs to be emitted first in case it's what captures
+ // the VLA bounds.
+ Addr = EmitPointerWithAlignment(E->getBase(), &AlignSource);
+
+ // The element count here is the total number of non-VLA elements.
+ llvm::Value *numElements = getVLASize(vla).first;
+
+ // Effectively, the multiply by the VLA size is part of the GEP.
+ // GEP indexes are signed, and scaling an index isn't permitted to
+ // signed-overflow, so we use the same semantics for our explicit
+ // multiply. We suppress this if overflow is not undefined behavior.
+ if (getLangOpts().isSignedOverflowDefined()) {
+ Idx = Builder.CreateMul(Idx, numElements);
+ } else {
+ Idx = Builder.CreateNSWMul(Idx, numElements);
+ }
+
+ Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(),
+ !getLangOpts().isSignedOverflowDefined());
+
+ } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
+ // Indexing over an interface, as in "NSString *P; P[4];"
+ CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);
+ llvm::Value *InterfaceSizeVal =
+ llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());;
+
+ llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);
+
+ // Emit the base pointer.
+ Addr = EmitPointerWithAlignment(E->getBase(), &AlignSource);
+
+ // We don't necessarily build correct LLVM struct types for ObjC
+ // interfaces, so we can't rely on GEP to do this scaling
+ // correctly, so we need to cast to i8*. FIXME: is this actually
+ // true? A lot of other things in the fragile ABI would break...
+ llvm::Type *OrigBaseTy = Addr.getType();
+ Addr = Builder.CreateElementBitCast(Addr, Int8Ty);
+
+ // Do the GEP.
+ CharUnits EltAlign =
+ getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
+ llvm::Value *EltPtr =
+ emitArraySubscriptGEP(*this, Addr.getPointer(), ScaledIdx, false);
+ Addr = Address(EltPtr, EltAlign);
+
+ // Cast back.
+ Addr = Builder.CreateBitCast(Addr, OrigBaseTy);
+ } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
+ // If this is A[i] where A is an array, the frontend will have decayed the
+ // base to be a ArrayToPointerDecay implicit cast. While correct, it is
+ // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
+ // "gep x, i" here. Emit one "gep A, 0, i".
+ assert(Array->getType()->isArrayType() &&
+ "Array to pointer decay must have array source type!");
+ LValue ArrayLV;
+ // For simple multidimensional array indexing, set the 'accessed' flag for
+ // better bounds-checking of the base expression.
+ if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
+ ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
+ else
+ ArrayLV = EmitLValue(Array);
+
+ // Propagate the alignment from the array itself to the result.
+ Addr = emitArraySubscriptGEP(*this, ArrayLV.getAddress(),
+ {CGM.getSize(CharUnits::Zero()), Idx},
+ E->getType(),
+ !getLangOpts().isSignedOverflowDefined());
+ AlignSource = ArrayLV.getAlignmentSource();
+ } else {
+ // The base must be a pointer; emit it with an estimate of its alignment.
+ Addr = EmitPointerWithAlignment(E->getBase(), &AlignSource);
+ Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(),
+ !getLangOpts().isSignedOverflowDefined());
+ }
+
+ LValue LV = MakeAddrLValue(Addr, E->getType(), AlignSource);
+
+ // TODO: Preserve/extend path TBAA metadata?
+
+ if (getLangOpts().ObjC1 &&
+ getLangOpts().getGC() != LangOptions::NonGC) {
+ LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
+ setObjCGCLValueClass(getContext(), E, LV);
+ }
+ return LV;
+}
+
+LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
+ bool IsLowerBound) {
+ LValue Base;
+ if (auto *ASE =
+ dyn_cast<OMPArraySectionExpr>(E->getBase()->IgnoreParenImpCasts()))
+ Base = EmitOMPArraySectionExpr(ASE, IsLowerBound);
+ else
+ Base = EmitLValue(E->getBase());
+ QualType BaseTy = Base.getType();
+ llvm::Value *Idx = nullptr;
+ QualType ResultExprTy;
+ if (auto *AT = getContext().getAsArrayType(BaseTy))
+ ResultExprTy = AT->getElementType();
+ else
+ ResultExprTy = BaseTy->getPointeeType();
+ if (IsLowerBound || (!IsLowerBound && E->getColonLoc().isInvalid())) {
+ // Requesting lower bound or upper bound, but without provided length and
+ // without ':' symbol for the default length -> length = 1.
+ // Idx = LowerBound ?: 0;
+ if (auto *LowerBound = E->getLowerBound()) {
+ Idx = Builder.CreateIntCast(
+ EmitScalarExpr(LowerBound), IntPtrTy,
+ LowerBound->getType()->hasSignedIntegerRepresentation());
+ } else
+ Idx = llvm::ConstantInt::getNullValue(IntPtrTy);
+ } else {
+ // Try to emit length or lower bound as constant. If this is possible, 1 is
+ // subtracted from constant length or lower bound. Otherwise, emit LLVM IR
+ // (LB + Len) - 1.
+ auto &C = CGM.getContext();
+ auto *Length = E->getLength();
+ llvm::APSInt ConstLength;
+ if (Length) {
+ // Idx = LowerBound + Length - 1;
+ if (Length->isIntegerConstantExpr(ConstLength, C)) {
+ ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits);
+ Length = nullptr;
+ }
+ auto *LowerBound = E->getLowerBound();
+ llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false);
+ if (LowerBound && LowerBound->isIntegerConstantExpr(ConstLowerBound, C)) {
+ ConstLowerBound = ConstLowerBound.zextOrTrunc(PointerWidthInBits);
+ LowerBound = nullptr;
+ }
+ if (!Length)
+ --ConstLength;
+ else if (!LowerBound)
+ --ConstLowerBound;
+
+ if (Length || LowerBound) {
+ auto *LowerBoundVal =
+ LowerBound
+ ? Builder.CreateIntCast(
+ EmitScalarExpr(LowerBound), IntPtrTy,
+ LowerBound->getType()->hasSignedIntegerRepresentation())
+ : llvm::ConstantInt::get(IntPtrTy, ConstLowerBound);
+ auto *LengthVal =
+ Length
+ ? Builder.CreateIntCast(
+ EmitScalarExpr(Length), IntPtrTy,
+ Length->getType()->hasSignedIntegerRepresentation())
+ : llvm::ConstantInt::get(IntPtrTy, ConstLength);
+ Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len",
+ /*HasNUW=*/false,
+ !getLangOpts().isSignedOverflowDefined());
+ if (Length && LowerBound) {
+ Idx = Builder.CreateSub(
+ Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1",
+ /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
+ }
+ } else
+ Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound);
+ } else {
+ // Idx = ArraySize - 1;
+ if (auto *VAT = C.getAsVariableArrayType(BaseTy)) {
+ Length = VAT->getSizeExpr();
+ if (Length->isIntegerConstantExpr(ConstLength, C))
+ Length = nullptr;
+ } else {
+ auto *CAT = C.getAsConstantArrayType(BaseTy);
+ ConstLength = CAT->getSize();
+ }
+ if (Length) {
+ auto *LengthVal = Builder.CreateIntCast(
+ EmitScalarExpr(Length), IntPtrTy,
+ Length->getType()->hasSignedIntegerRepresentation());
+ Idx = Builder.CreateSub(
+ LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1",
+ /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
+ } else {
+ ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits);
+ --ConstLength;
+ Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength);
+ }
+ }
+ }
+ assert(Idx);
+
+ llvm::Value *EltPtr;
+ QualType FixedSizeEltType = ResultExprTy;
+ if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) {
+ // The element count here is the total number of non-VLA elements.
+ llvm::Value *numElements = getVLASize(VLA).first;
+ FixedSizeEltType = getFixedSizeElementType(getContext(), VLA);
+
+ // Effectively, the multiply by the VLA size is part of the GEP.
+ // GEP indexes are signed, and scaling an index isn't permitted to
+ // signed-overflow, so we use the same semantics for our explicit
+ // multiply. We suppress this if overflow is not undefined behavior.
+ if (getLangOpts().isSignedOverflowDefined()) {
+ Idx = Builder.CreateMul(Idx, numElements);
+ EltPtr = Builder.CreateGEP(Base.getPointer(), Idx, "arrayidx");
+ } else {
+ Idx = Builder.CreateNSWMul(Idx, numElements);
+ EltPtr = Builder.CreateInBoundsGEP(Base.getPointer(), Idx, "arrayidx");
+ }
+ } else if (BaseTy->isConstantArrayType()) {
+ llvm::Value *ArrayPtr = Base.getPointer();
+ llvm::Value *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
+ llvm::Value *Args[] = {Zero, Idx};
+
+ if (getLangOpts().isSignedOverflowDefined())
+ EltPtr = Builder.CreateGEP(ArrayPtr, Args, "arrayidx");
+ else
+ EltPtr = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx");
+ } else {
+ // The base must be a pointer, which is not an aggregate. Emit it.
+ if (getLangOpts().isSignedOverflowDefined())
+ EltPtr = Builder.CreateGEP(Base.getPointer(), Idx, "arrayidx");
+ else
+ EltPtr = Builder.CreateInBoundsGEP(Base.getPointer(), Idx, "arrayidx");
+ }
+
+ CharUnits EltAlign =
+ Base.getAlignment().alignmentOfArrayElement(
+ getContext().getTypeSizeInChars(FixedSizeEltType));
+
+ // Limit the alignment to that of the result type.
+ LValue LV = MakeAddrLValue(Address(EltPtr, EltAlign), ResultExprTy,
+ Base.getAlignmentSource());
+
+ LV.getQuals().setAddressSpace(BaseTy.getAddressSpace());
+
+ return LV;
+}
+
+LValue CodeGenFunction::
+EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
+ // Emit the base vector as an l-value.
+ LValue Base;
+
+ // ExtVectorElementExpr's base can either be a vector or pointer to vector.
+ if (E->isArrow()) {
+ // If it is a pointer to a vector, emit the address and form an lvalue with
+ // it.
+ AlignmentSource AlignSource;
+ Address Ptr = EmitPointerWithAlignment(E->getBase(), &AlignSource);
+ const PointerType *PT = E->getBase()->getType()->getAs<PointerType>();
+ Base = MakeAddrLValue(Ptr, PT->getPointeeType(), AlignSource);
+ Base.getQuals().removeObjCGCAttr();
+ } else if (E->getBase()->isGLValue()) {
+ // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
+ // emit the base as an lvalue.
+ assert(E->getBase()->getType()->isVectorType());
+ Base = EmitLValue(E->getBase());
+ } else {
+ // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
+ assert(E->getBase()->getType()->isVectorType() &&
+ "Result must be a vector");
+ llvm::Value *Vec = EmitScalarExpr(E->getBase());
+
+ // Store the vector to memory (because LValue wants an address).
+ Address VecMem = CreateMemTemp(E->getBase()->getType());
+ Builder.CreateStore(Vec, VecMem);
+ Base = MakeAddrLValue(VecMem, E->getBase()->getType(),
+ AlignmentSource::Decl);
+ }
+
+ QualType type =
+ E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
+
+ // Encode the element access list into a vector of unsigned indices.
+ SmallVector<uint32_t, 4> Indices;
+ E->getEncodedElementAccess(Indices);
+
+ if (Base.isSimple()) {
+ llvm::Constant *CV =
+ llvm::ConstantDataVector::get(getLLVMContext(), Indices);
+ return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
+ Base.getAlignmentSource());
+ }
+ assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
+
+ llvm::Constant *BaseElts = Base.getExtVectorElts();
+ SmallVector<llvm::Constant *, 4> CElts;
+
+ for (unsigned i = 0, e = Indices.size(); i != e; ++i)
+ CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
+ llvm::Constant *CV = llvm::ConstantVector::get(CElts);
+ return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type,
+ Base.getAlignmentSource());
+}
+
+LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
+ Expr *BaseExpr = E->getBase();
+
+ // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
+ LValue BaseLV;
+ if (E->isArrow()) {
+ AlignmentSource AlignSource;
+ Address Addr = EmitPointerWithAlignment(BaseExpr, &AlignSource);
+ QualType PtrTy = BaseExpr->getType()->getPointeeType();
+ EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr.getPointer(), PtrTy);
+ BaseLV = MakeAddrLValue(Addr, PtrTy, AlignSource);
+ } else
+ BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
+
+ NamedDecl *ND = E->getMemberDecl();
+ if (auto *Field = dyn_cast<FieldDecl>(ND)) {
+ LValue LV = EmitLValueForField(BaseLV, Field);
+ setObjCGCLValueClass(getContext(), E, LV);
+ return LV;
+ }
+
+ if (auto *VD = dyn_cast<VarDecl>(ND))
+ return EmitGlobalVarDeclLValue(*this, E, VD);
+
+ if (const auto *FD = dyn_cast<FunctionDecl>(ND))
+ return EmitFunctionDeclLValue(*this, E, FD);
+
+ llvm_unreachable("Unhandled member declaration!");
+}
+
+/// Given that we are currently emitting a lambda, emit an l-value for
+/// one of its members.
+LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) {
+ assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent()->isLambda());
+ assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent() == Field->getParent());
+ QualType LambdaTagType =
+ getContext().getTagDeclType(Field->getParent());
+ LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue, LambdaTagType);
+ return EmitLValueForField(LambdaLV, Field);
+}
+
+/// Drill down to the storage of a field without walking into
+/// reference types.
+///
+/// The resulting address doesn't necessarily have the right type.
+static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base,
+ const FieldDecl *field) {
+ const RecordDecl *rec = field->getParent();
+
+ unsigned idx =
+ CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
+
+ CharUnits offset;
+ // Adjust the alignment down to the given offset.
+ // As a special case, if the LLVM field index is 0, we know that this
+ // is zero.
+ assert((idx != 0 || CGF.getContext().getASTRecordLayout(rec)
+ .getFieldOffset(field->getFieldIndex()) == 0) &&
+ "LLVM field at index zero had non-zero offset?");
+ if (idx != 0) {
+ auto &recLayout = CGF.getContext().getASTRecordLayout(rec);
+ auto offsetInBits = recLayout.getFieldOffset(field->getFieldIndex());
+ offset = CGF.getContext().toCharUnitsFromBits(offsetInBits);
+ }
+
+ return CGF.Builder.CreateStructGEP(base, idx, offset, field->getName());
+}
+
+LValue CodeGenFunction::EmitLValueForField(LValue base,
+ const FieldDecl *field) {
+ AlignmentSource fieldAlignSource =
+ getFieldAlignmentSource(base.getAlignmentSource());
+
+ if (field->isBitField()) {
+ const CGRecordLayout &RL =
+ CGM.getTypes().getCGRecordLayout(field->getParent());
+ const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
+ Address Addr = base.getAddress();
+ unsigned Idx = RL.getLLVMFieldNo(field);
+ if (Idx != 0)
+ // For structs, we GEP to the field that the record layout suggests.
+ Addr = Builder.CreateStructGEP(Addr, Idx, Info.StorageOffset,
+ field->getName());
+ // Get the access type.
+ llvm::Type *FieldIntTy =
+ llvm::Type::getIntNTy(getLLVMContext(), Info.StorageSize);
+ if (Addr.getElementType() != FieldIntTy)
+ Addr = Builder.CreateElementBitCast(Addr, FieldIntTy);
+
+ QualType fieldType =
+ field->getType().withCVRQualifiers(base.getVRQualifiers());
+ return LValue::MakeBitfield(Addr, Info, fieldType, fieldAlignSource);
+ }
+
+ const RecordDecl *rec = field->getParent();
+ QualType type = field->getType();
+
+ bool mayAlias = rec->hasAttr<MayAliasAttr>();
+
+ Address addr = base.getAddress();
+ unsigned cvr = base.getVRQualifiers();
+ bool TBAAPath = CGM.getCodeGenOpts().StructPathTBAA;
+ if (rec->isUnion()) {
+ // For unions, there is no pointer adjustment.
+ assert(!type->isReferenceType() && "union has reference member");
+ // TODO: handle path-aware TBAA for union.
+ TBAAPath = false;
+ } else {
+ // For structs, we GEP to the field that the record layout suggests.
+ addr = emitAddrOfFieldStorage(*this, addr, field);
+
+ // If this is a reference field, load the reference right now.
+ if (const ReferenceType *refType = type->getAs<ReferenceType>()) {
+ llvm::LoadInst *load = Builder.CreateLoad(addr, "ref");
+ if (cvr & Qualifiers::Volatile) load->setVolatile(true);
+
+ // Loading the reference will disable path-aware TBAA.
+ TBAAPath = false;
+ if (CGM.shouldUseTBAA()) {
+ llvm::MDNode *tbaa;
+ if (mayAlias)
+ tbaa = CGM.getTBAAInfo(getContext().CharTy);
+ else
+ tbaa = CGM.getTBAAInfo(type);
+ if (tbaa)
+ CGM.DecorateInstructionWithTBAA(load, tbaa);
+ }
+
+ mayAlias = false;
+ type = refType->getPointeeType();
+
+ CharUnits alignment =
+ getNaturalTypeAlignment(type, &fieldAlignSource, /*pointee*/ true);
+ addr = Address(load, alignment);
+
+ // Qualifiers on the struct don't apply to the referencee, and
+ // we'll pick up CVR from the actual type later, so reset these
+ // additional qualifiers now.
+ cvr = 0;
+ }
+ }
+
+ // Make sure that the address is pointing to the right type. This is critical
+ // for both unions and structs. A union needs a bitcast, a struct element
+ // will need a bitcast if the LLVM type laid out doesn't match the desired
+ // type.
+ addr = Builder.CreateElementBitCast(addr,
+ CGM.getTypes().ConvertTypeForMem(type),
+ field->getName());
+
+ if (field->hasAttr<AnnotateAttr>())
+ addr = EmitFieldAnnotations(field, addr);
+
+ LValue LV = MakeAddrLValue(addr, type, fieldAlignSource);
+ LV.getQuals().addCVRQualifiers(cvr);
+ if (TBAAPath) {
+ const ASTRecordLayout &Layout =
+ getContext().getASTRecordLayout(field->getParent());
+ // Set the base type to be the base type of the base LValue and
+ // update offset to be relative to the base type.
+ LV.setTBAABaseType(mayAlias ? getContext().CharTy : base.getTBAABaseType());
+ LV.setTBAAOffset(mayAlias ? 0 : base.getTBAAOffset() +
+ Layout.getFieldOffset(field->getFieldIndex()) /
+ getContext().getCharWidth());
+ }
+
+ // __weak attribute on a field is ignored.
+ if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak)
+ LV.getQuals().removeObjCGCAttr();
+
+ // Fields of may_alias structs act like 'char' for TBAA purposes.
+ // FIXME: this should get propagated down through anonymous structs
+ // and unions.
+ if (mayAlias && LV.getTBAAInfo())
+ LV.setTBAAInfo(CGM.getTBAAInfo(getContext().CharTy));
+
+ return LV;
+}
+
+LValue
+CodeGenFunction::EmitLValueForFieldInitialization(LValue Base,
+ const FieldDecl *Field) {
+ QualType FieldType = Field->getType();
+
+ if (!FieldType->isReferenceType())
+ return EmitLValueForField(Base, Field);
+
+ Address V = emitAddrOfFieldStorage(*this, Base.getAddress(), Field);
+
+ // Make sure that the address is pointing to the right type.
+ llvm::Type *llvmType = ConvertTypeForMem(FieldType);
+ V = Builder.CreateElementBitCast(V, llvmType, Field->getName());
+
+ // TODO: access-path TBAA?
+ auto FieldAlignSource = getFieldAlignmentSource(Base.getAlignmentSource());
+ return MakeAddrLValue(V, FieldType, FieldAlignSource);
+}
+
+LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
+ if (E->isFileScope()) {
+ ConstantAddress GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
+ return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl);
+ }
+ if (E->getType()->isVariablyModifiedType())
+ // make sure to emit the VLA size.
+ EmitVariablyModifiedType(E->getType());
+
+ Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
+ const Expr *InitExpr = E->getInitializer();
+ LValue Result = MakeAddrLValue(DeclPtr, E->getType(), AlignmentSource::Decl);
+
+ EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
+ /*Init*/ true);
+
+ return Result;
+}
+
+LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) {
+ if (!E->isGLValue())
+ // Initializing an aggregate temporary in C++11: T{...}.
+ return EmitAggExprToLValue(E);
+
+ // An lvalue initializer list must be initializing a reference.
+ assert(E->getNumInits() == 1 && "reference init with multiple values");
+ return EmitLValue(E->getInit(0));
+}
+
+/// Emit the operand of a glvalue conditional operator. This is either a glvalue
+/// or a (possibly-parenthesized) throw-expression. If this is a throw, no
+/// LValue is returned and the current block has been terminated.
+static Optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF,
+ const Expr *Operand) {
+ if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) {
+ CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false);
+ return None;
+ }
+
+ return CGF.EmitLValue(Operand);
+}
+
+LValue CodeGenFunction::
+EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
+ if (!expr->isGLValue()) {
+ // ?: here should be an aggregate.
+ assert(hasAggregateEvaluationKind(expr->getType()) &&
+ "Unexpected conditional operator!");
+ return EmitAggExprToLValue(expr);
+ }
+
+ OpaqueValueMapping binding(*this, expr);
+
+ const Expr *condExpr = expr->getCond();
+ bool CondExprBool;
+ if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
+ const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr();
+ if (!CondExprBool) std::swap(live, dead);
+
+ if (!ContainsLabel(dead)) {
+ // If the true case is live, we need to track its region.
+ if (CondExprBool)
+ incrementProfileCounter(expr);
+ return EmitLValue(live);
+ }
+ }
+
+ llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true");
+ llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false");
+ llvm::BasicBlock *contBlock = createBasicBlock("cond.end");
+
+ ConditionalEvaluation eval(*this);
+ EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock, getProfileCount(expr));
+
+ // Any temporaries created here are conditional.
+ EmitBlock(lhsBlock);
+ incrementProfileCounter(expr);
+ eval.begin(*this);
+ Optional<LValue> lhs =
+ EmitLValueOrThrowExpression(*this, expr->getTrueExpr());
+ eval.end(*this);
+
+ if (lhs && !lhs->isSimple())
+ return EmitUnsupportedLValue(expr, "conditional operator");
+
+ lhsBlock = Builder.GetInsertBlock();
+ if (lhs)
+ Builder.CreateBr(contBlock);
+
+ // Any temporaries created here are conditional.
+ EmitBlock(rhsBlock);
+ eval.begin(*this);
+ Optional<LValue> rhs =
+ EmitLValueOrThrowExpression(*this, expr->getFalseExpr());
+ eval.end(*this);
+ if (rhs && !rhs->isSimple())
+ return EmitUnsupportedLValue(expr, "conditional operator");
+ rhsBlock = Builder.GetInsertBlock();
+
+ EmitBlock(contBlock);
+
+ if (lhs && rhs) {
+ llvm::PHINode *phi = Builder.CreatePHI(lhs->getPointer()->getType(),
+ 2, "cond-lvalue");
+ phi->addIncoming(lhs->getPointer(), lhsBlock);
+ phi->addIncoming(rhs->getPointer(), rhsBlock);
+ Address result(phi, std::min(lhs->getAlignment(), rhs->getAlignment()));
+ AlignmentSource alignSource =
+ std::max(lhs->getAlignmentSource(), rhs->getAlignmentSource());
+ return MakeAddrLValue(result, expr->getType(), alignSource);
+ } else {
+ assert((lhs || rhs) &&
+ "both operands of glvalue conditional are throw-expressions?");
+ return lhs ? *lhs : *rhs;
+ }
+}
+
+/// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
+/// type. If the cast is to a reference, we can have the usual lvalue result,
+/// otherwise if a cast is needed by the code generator in an lvalue context,
+/// then it must mean that we need the address of an aggregate in order to
+/// access one of its members. This can happen for all the reasons that casts
+/// are permitted with aggregate result, including noop aggregate casts, and
+/// cast from scalar to union.
+LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ case CK_ToVoid:
+ case CK_BitCast:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_NullToMemberPointer:
+ case CK_NullToPointer:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_PointerToBoolean:
+ case CK_VectorSplat:
+ case CK_IntegralCast:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_MemberPointerToBoolean:
+ case CK_ReinterpretMemberPointer:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject:
+ case CK_AddressSpaceConversion:
+ return EmitUnsupportedLValue(E, "unexpected cast lvalue");
+
+ case CK_Dependent:
+ llvm_unreachable("dependent cast kind in IR gen!");
+
+ case CK_BuiltinFnToFnPtr:
+ llvm_unreachable("builtin functions are handled elsewhere");
+
+ // These are never l-values; just use the aggregate emission code.
+ case CK_NonAtomicToAtomic:
+ case CK_AtomicToNonAtomic:
+ return EmitAggExprToLValue(E);
+
+ case CK_Dynamic: {
+ LValue LV = EmitLValue(E->getSubExpr());
+ Address V = LV.getAddress();
+ const auto *DCE = cast<CXXDynamicCastExpr>(E);
+ return MakeNaturalAlignAddrLValue(EmitDynamicCast(V, DCE), E->getType());
+ }
+
+ case CK_ConstructorConversion:
+ case CK_UserDefinedConversion:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_NoOp:
+ case CK_LValueToRValue:
+ return EmitLValue(E->getSubExpr());
+
+ case CK_UncheckedDerivedToBase:
+ case CK_DerivedToBase: {
+ const RecordType *DerivedClassTy =
+ E->getSubExpr()->getType()->getAs<RecordType>();
+ auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
+
+ LValue LV = EmitLValue(E->getSubExpr());
+ Address This = LV.getAddress();
+
+ // Perform the derived-to-base conversion
+ Address Base = GetAddressOfBaseClass(
+ This, DerivedClassDecl, E->path_begin(), E->path_end(),
+ /*NullCheckValue=*/false, E->getExprLoc());
+
+ return MakeAddrLValue(Base, E->getType(), LV.getAlignmentSource());
+ }
+ case CK_ToUnion:
+ return EmitAggExprToLValue(E);
+ case CK_BaseToDerived: {
+ const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>();
+ auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
+
+ LValue LV = EmitLValue(E->getSubExpr());
+
+ // Perform the base-to-derived conversion
+ Address Derived =
+ GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl,
+ E->path_begin(), E->path_end(),
+ /*NullCheckValue=*/false);
+
+ // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
+ // performed and the object is not of the derived type.
+ if (sanitizePerformTypeCheck())
+ EmitTypeCheck(TCK_DowncastReference, E->getExprLoc(),
+ Derived.getPointer(), E->getType());
+
+ if (SanOpts.has(SanitizerKind::CFIDerivedCast))
+ EmitVTablePtrCheckForCast(E->getType(), Derived.getPointer(),
+ /*MayBeNull=*/false,
+ CFITCK_DerivedCast, E->getLocStart());
+
+ return MakeAddrLValue(Derived, E->getType(), LV.getAlignmentSource());
+ }
+ case CK_LValueBitCast: {
+ // This must be a reinterpret_cast (or c-style equivalent).
+ const auto *CE = cast<ExplicitCastExpr>(E);
+
+ CGM.EmitExplicitCastExprType(CE, this);
+ LValue LV = EmitLValue(E->getSubExpr());
+ Address V = Builder.CreateBitCast(LV.getAddress(),
+ ConvertType(CE->getTypeAsWritten()));
+
+ if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
+ EmitVTablePtrCheckForCast(E->getType(), V.getPointer(),
+ /*MayBeNull=*/false,
+ CFITCK_UnrelatedCast, E->getLocStart());
+
+ return MakeAddrLValue(V, E->getType(), LV.getAlignmentSource());
+ }
+ case CK_ObjCObjectLValueCast: {
+ LValue LV = EmitLValue(E->getSubExpr());
+ Address V = Builder.CreateElementBitCast(LV.getAddress(),
+ ConvertType(E->getType()));
+ return MakeAddrLValue(V, E->getType(), LV.getAlignmentSource());
+ }
+ case CK_ZeroToOCLEvent:
+ llvm_unreachable("NULL to OpenCL event lvalue cast is not valid");
+ }
+
+ llvm_unreachable("Unhandled lvalue cast kind?");
+}
+
+LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
+ assert(OpaqueValueMappingData::shouldBindAsLValue(e));
+ return getOpaqueLValueMapping(e);
+}
+
+RValue CodeGenFunction::EmitRValueForField(LValue LV,
+ const FieldDecl *FD,
+ SourceLocation Loc) {
+ QualType FT = FD->getType();
+ LValue FieldLV = EmitLValueForField(LV, FD);
+ switch (getEvaluationKind(FT)) {
+ case TEK_Complex:
+ return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc));
+ case TEK_Aggregate:
+ return FieldLV.asAggregateRValue();
+ case TEK_Scalar:
+ return EmitLoadOfLValue(FieldLV, Loc);
+ }
+ llvm_unreachable("bad evaluation kind");
+}
+
+//===--------------------------------------------------------------------===//
+// Expression Emission
+//===--------------------------------------------------------------------===//
+
+RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
+ ReturnValueSlot ReturnValue) {
+ // Builtins never have block type.
+ if (E->getCallee()->getType()->isBlockPointerType())
+ return EmitBlockCallExpr(E, ReturnValue);
+
+ if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E))
+ return EmitCXXMemberCallExpr(CE, ReturnValue);
+
+ if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E))
+ return EmitCUDAKernelCallExpr(CE, ReturnValue);
+
+ const Decl *TargetDecl = E->getCalleeDecl();
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
+ if (unsigned builtinID = FD->getBuiltinID())
+ return EmitBuiltinExpr(FD, builtinID, E, ReturnValue);
+ }
+
+ if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E))
+ if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl))
+ return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
+
+ if (const auto *PseudoDtor =
+ dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
+ QualType DestroyedType = PseudoDtor->getDestroyedType();
+ if (DestroyedType.hasStrongOrWeakObjCLifetime()) {
+ // Automatic Reference Counting:
+ // If the pseudo-expression names a retainable object with weak or
+ // strong lifetime, the object shall be released.
+ Expr *BaseExpr = PseudoDtor->getBase();
+ Address BaseValue = Address::invalid();
+ Qualifiers BaseQuals;
+
+ // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
+ if (PseudoDtor->isArrow()) {
+ BaseValue = EmitPointerWithAlignment(BaseExpr);
+ const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>();
+ BaseQuals = PTy->getPointeeType().getQualifiers();
+ } else {
+ LValue BaseLV = EmitLValue(BaseExpr);
+ BaseValue = BaseLV.getAddress();
+ QualType BaseTy = BaseExpr->getType();
+ BaseQuals = BaseTy.getQualifiers();
+ }
+
+ switch (DestroyedType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Strong:
+ EmitARCRelease(Builder.CreateLoad(BaseValue,
+ PseudoDtor->getDestroyedType().isVolatileQualified()),
+ ARCPreciseLifetime);
+ break;
+
+ case Qualifiers::OCL_Weak:
+ EmitARCDestroyWeak(BaseValue);
+ break;
+ }
+ } else {
+ // C++ [expr.pseudo]p1:
+ // The result shall only be used as the operand for the function call
+ // operator (), and the result of such a call has type void. The only
+ // effect is the evaluation of the postfix-expression before the dot or
+ // arrow.
+ EmitScalarExpr(E->getCallee());
+ }
+
+ return RValue::get(nullptr);
+ }
+
+ llvm::Value *Callee = EmitScalarExpr(E->getCallee());
+ return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue,
+ TargetDecl);
+}
+
+LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
+ // Comma expressions just emit their LHS then their RHS as an l-value.
+ if (E->getOpcode() == BO_Comma) {
+ EmitIgnoredExpr(E->getLHS());
+ EnsureInsertPoint();
+ return EmitLValue(E->getRHS());
+ }
+
+ if (E->getOpcode() == BO_PtrMemD ||
+ E->getOpcode() == BO_PtrMemI)
+ return EmitPointerToDataMemberBinaryExpr(E);
+
+ assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
+
+ // Note that in all of these cases, __block variables need the RHS
+ // evaluated first just in case the variable gets moved by the RHS.
+
+ switch (getEvaluationKind(E->getType())) {
+ case TEK_Scalar: {
+ switch (E->getLHS()->getType().getObjCLifetime()) {
+ case Qualifiers::OCL_Strong:
+ return EmitARCStoreStrong(E, /*ignored*/ false).first;
+
+ case Qualifiers::OCL_Autoreleasing:
+ return EmitARCStoreAutoreleasing(E).first;
+
+ // No reason to do any of these differently.
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Weak:
+ break;
+ }
+
+ RValue RV = EmitAnyExpr(E->getRHS());
+ LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store);
+ EmitStoreThroughLValue(RV, LV);
+ return LV;
+ }
+
+ case TEK_Complex:
+ return EmitComplexAssignmentLValue(E);
+
+ case TEK_Aggregate:
+ return EmitAggExprToLValue(E);
+ }
+ llvm_unreachable("bad evaluation kind");
+}
+
+LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
+ RValue RV = EmitCallExpr(E);
+
+ if (!RV.isScalar())
+ return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
+ AlignmentSource::Decl);
+
+ assert(E->getCallReturnType(getContext())->isReferenceType() &&
+ "Can't have a scalar return unless the return type is a "
+ "reference type!");
+
+ return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType());
+}
+
+LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
+ // FIXME: This shouldn't require another copy.
+ return EmitAggExprToLValue(E);
+}
+
+LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
+ assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()
+ && "binding l-value to type which needs a temporary");
+ AggValueSlot Slot = CreateAggTemp(E->getType());
+ EmitCXXConstructExpr(E, Slot);
+ return MakeAddrLValue(Slot.getAddress(), E->getType(),
+ AlignmentSource::Decl);
+}
+
+LValue
+CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
+ return MakeNaturalAlignAddrLValue(EmitCXXTypeidExpr(E), E->getType());
+}
+
+Address CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) {
+ return Builder.CreateElementBitCast(CGM.GetAddrOfUuidDescriptor(E),
+ ConvertType(E->getType()));
+}
+
+LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) {
+ return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType(),
+ AlignmentSource::Decl);
+}
+
+LValue
+CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
+ AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
+ Slot.setExternallyDestructed();
+ EmitAggExpr(E->getSubExpr(), Slot);
+ EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddress());
+ return MakeAddrLValue(Slot.getAddress(), E->getType(),
+ AlignmentSource::Decl);
+}
+
+LValue
+CodeGenFunction::EmitLambdaLValue(const LambdaExpr *E) {
+ AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
+ EmitLambdaExpr(E, Slot);
+ return MakeAddrLValue(Slot.getAddress(), E->getType(),
+ AlignmentSource::Decl);
+}
+
+LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
+ RValue RV = EmitObjCMessageExpr(E);
+
+ if (!RV.isScalar())
+ return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
+ AlignmentSource::Decl);
+
+ assert(E->getMethodDecl()->getReturnType()->isReferenceType() &&
+ "Can't have a scalar return unless the return type is a "
+ "reference type!");
+
+ return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType());
+}
+
+LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {
+ Address V =
+ CGM.getObjCRuntime().GetAddrOfSelector(*this, E->getSelector());
+ return MakeAddrLValue(V, E->getType(), AlignmentSource::Decl);
+}
+
+llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
+}
+
+LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) {
+ return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
+ Ivar, CVRQualifiers);
+}
+
+LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
+ // FIXME: A lot of the code below could be shared with EmitMemberExpr.
+ llvm::Value *BaseValue = nullptr;
+ const Expr *BaseExpr = E->getBase();
+ Qualifiers BaseQuals;
+ QualType ObjectTy;
+ if (E->isArrow()) {
+ BaseValue = EmitScalarExpr(BaseExpr);
+ ObjectTy = BaseExpr->getType()->getPointeeType();
+ BaseQuals = ObjectTy.getQualifiers();
+ } else {
+ LValue BaseLV = EmitLValue(BaseExpr);
+ BaseValue = BaseLV.getPointer();
+ ObjectTy = BaseExpr->getType();
+ BaseQuals = ObjectTy.getQualifiers();
+ }
+
+ LValue LV =
+ EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
+ BaseQuals.getCVRQualifiers());
+ setObjCGCLValueClass(getContext(), E, LV);
+ return LV;
+}
+
+LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
+ // Can only get l-value for message expression returning aggregate type
+ RValue RV = EmitAnyExprToTemp(E);
+ return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
+ AlignmentSource::Decl);
+}
+
+RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
+ const CallExpr *E, ReturnValueSlot ReturnValue,
+ CGCalleeInfo CalleeInfo, llvm::Value *Chain) {
+ // Get the actual function type. The callee type will always be a pointer to
+ // function type or a block pointer type.
+ assert(CalleeType->isFunctionPointerType() &&
+ "Call must have function pointer type!");
+
+ // Preserve the non-canonical function type because things like exception
+ // specifications disappear in the canonical type. That information is useful
+ // to drive the generation of more accurate code for this call later on.
+ const FunctionProtoType *NonCanonicalFTP = CalleeType->getAs<PointerType>()
+ ->getPointeeType()
+ ->getAs<FunctionProtoType>();
+
+ const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
+
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
+ // We can only guarantee that a function is called from the correct
+ // context/function based on the appropriate target attributes,
+ // so only check in the case where we have both always_inline and target
+ // since otherwise we could be making a conditional call after a check for
+ // the proper cpu features (and it won't cause code generation issues due to
+ // function based code generation).
+ if (TargetDecl->hasAttr<AlwaysInlineAttr>() &&
+ TargetDecl->hasAttr<TargetAttr>())
+ checkTargetFeatures(E, FD);
+
+ CalleeType = getContext().getCanonicalType(CalleeType);
+
+ const auto *FnType =
+ cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType());
+
+ if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function) &&
+ (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
+ if (llvm::Constant *PrefixSig =
+ CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) {
+ SanitizerScope SanScope(this);
+ llvm::Constant *FTRTTIConst =
+ CGM.GetAddrOfRTTIDescriptor(QualType(FnType, 0), /*ForEH=*/true);
+ llvm::Type *PrefixStructTyElems[] = {
+ PrefixSig->getType(),
+ FTRTTIConst->getType()
+ };
+ llvm::StructType *PrefixStructTy = llvm::StructType::get(
+ CGM.getLLVMContext(), PrefixStructTyElems, /*isPacked=*/true);
+
+ llvm::Value *CalleePrefixStruct = Builder.CreateBitCast(
+ Callee, llvm::PointerType::getUnqual(PrefixStructTy));
+ llvm::Value *CalleeSigPtr =
+ Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 0);
+ llvm::Value *CalleeSig =
+ Builder.CreateAlignedLoad(CalleeSigPtr, getIntAlign());
+ llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig);
+
+ llvm::BasicBlock *Cont = createBasicBlock("cont");
+ llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck");
+ Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont);
+
+ EmitBlock(TypeCheck);
+ llvm::Value *CalleeRTTIPtr =
+ Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 1);
+ llvm::Value *CalleeRTTI =
+ Builder.CreateAlignedLoad(CalleeRTTIPtr, getPointerAlign());
+ llvm::Value *CalleeRTTIMatch =
+ Builder.CreateICmpEQ(CalleeRTTI, FTRTTIConst);
+ llvm::Constant *StaticData[] = {
+ EmitCheckSourceLocation(E->getLocStart()),
+ EmitCheckTypeDescriptor(CalleeType)
+ };
+ EmitCheck(std::make_pair(CalleeRTTIMatch, SanitizerKind::Function),
+ "function_type_mismatch", StaticData, Callee);
+
+ Builder.CreateBr(Cont);
+ EmitBlock(Cont);
+ }
+ }
+
+ // If we are checking indirect calls and this call is indirect, check that the
+ // function pointer is a member of the bit set for the function type.
+ if (SanOpts.has(SanitizerKind::CFIICall) &&
+ (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
+ SanitizerScope SanScope(this);
+
+ llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(QualType(FnType, 0));
+ llvm::Value *BitSetName = llvm::MetadataAsValue::get(getLLVMContext(), MD);
+
+ llvm::Value *CastedCallee = Builder.CreateBitCast(Callee, Int8PtrTy);
+ llvm::Value *BitSetTest =
+ Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::bitset_test),
+ {CastedCallee, BitSetName});
+
+ auto TypeId = CGM.CreateCfiIdForTypeMetadata(MD);
+ if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && TypeId) {
+ EmitCfiSlowPathCheck(BitSetTest, TypeId, CastedCallee);
+ } else {
+ llvm::Constant *StaticData[] = {
+ EmitCheckSourceLocation(E->getLocStart()),
+ EmitCheckTypeDescriptor(QualType(FnType, 0)),
+ };
+ EmitCheck(std::make_pair(BitSetTest, SanitizerKind::CFIICall),
+ "cfi_bad_icall", StaticData, CastedCallee);
+ }
+ }
+
+ CallArgList Args;
+ if (Chain)
+ Args.add(RValue::get(Builder.CreateBitCast(Chain, CGM.VoidPtrTy)),
+ CGM.getContext().VoidPtrTy);
+ EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), E->arguments(),
+ E->getDirectCallee(), /*ParamsToSkip*/ 0);
+
+ const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall(
+ Args, FnType, /*isChainCall=*/Chain);
+
+ // C99 6.5.2.2p6:
+ // If the expression that denotes the called function has a type
+ // that does not include a prototype, [the default argument
+ // promotions are performed]. If the number of arguments does not
+ // equal the number of parameters, the behavior is undefined. If
+ // the function is defined with a type that includes a prototype,
+ // and either the prototype ends with an ellipsis (, ...) or the
+ // types of the arguments after promotion are not compatible with
+ // the types of the parameters, the behavior is undefined. If the
+ // function is defined with a type that does not include a
+ // prototype, and the types of the arguments after promotion are
+ // not compatible with those of the parameters after promotion,
+ // the behavior is undefined [except in some trivial cases].
+ // That is, in the general case, we should assume that a call
+ // through an unprototyped function type works like a *non-variadic*
+ // call. The way we make this work is to cast to the exact type
+ // of the promoted arguments.
+ //
+ // Chain calls use this same code path to add the invisible chain parameter
+ // to the function type.
+ if (isa<FunctionNoProtoType>(FnType) || Chain) {
+ llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);
+ CalleeTy = CalleeTy->getPointerTo();
+ Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast");
+ }
+
+ return EmitCall(FnInfo, Callee, ReturnValue, Args,
+ CGCalleeInfo(NonCanonicalFTP, TargetDecl));
+}
+
+LValue CodeGenFunction::
+EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
+ Address BaseAddr = Address::invalid();
+ if (E->getOpcode() == BO_PtrMemI) {
+ BaseAddr = EmitPointerWithAlignment(E->getLHS());
+ } else {
+ BaseAddr = EmitLValue(E->getLHS()).getAddress();
+ }
+
+ llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
+
+ const MemberPointerType *MPT
+ = E->getRHS()->getType()->getAs<MemberPointerType>();
+
+ AlignmentSource AlignSource;
+ Address MemberAddr =
+ EmitCXXMemberDataPointerAddress(E, BaseAddr, OffsetV, MPT,
+ &AlignSource);
+
+ return MakeAddrLValue(MemberAddr, MPT->getPointeeType(), AlignSource);
+}
+
+/// Given the address of a temporary variable, produce an r-value of
+/// its type.
+RValue CodeGenFunction::convertTempToRValue(Address addr,
+ QualType type,
+ SourceLocation loc) {
+ LValue lvalue = MakeAddrLValue(addr, type, AlignmentSource::Decl);
+ switch (getEvaluationKind(type)) {
+ case TEK_Complex:
+ return RValue::getComplex(EmitLoadOfComplex(lvalue, loc));
+ case TEK_Aggregate:
+ return lvalue.asAggregateRValue();
+ case TEK_Scalar:
+ return RValue::get(EmitLoadOfScalar(lvalue, loc));
+ }
+ llvm_unreachable("bad evaluation kind");
+}
+
+void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
+ assert(Val->getType()->isFPOrFPVectorTy());
+ if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
+ return;
+
+ llvm::MDBuilder MDHelper(getLLVMContext());
+ llvm::MDNode *Node = MDHelper.createFPMath(Accuracy);
+
+ cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);
+}
+
+namespace {
+ struct LValueOrRValue {
+ LValue LV;
+ RValue RV;
+ };
+}
+
+static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
+ const PseudoObjectExpr *E,
+ bool forLValue,
+ AggValueSlot slot) {
+ SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
+
+ // Find the result expression, if any.
+ const Expr *resultExpr = E->getResultExpr();
+ LValueOrRValue result;
+
+ for (PseudoObjectExpr::const_semantics_iterator
+ i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
+ const Expr *semantic = *i;
+
+ // If this semantic expression is an opaque value, bind it
+ // to the result of its source expression.
+ if (const auto *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
+
+ // If this is the result expression, we may need to evaluate
+ // directly into the slot.
+ typedef CodeGenFunction::OpaqueValueMappingData OVMA;
+ OVMA opaqueData;
+ if (ov == resultExpr && ov->isRValue() && !forLValue &&
+ CodeGenFunction::hasAggregateEvaluationKind(ov->getType())) {
+ CGF.EmitAggExpr(ov->getSourceExpr(), slot);
+
+ LValue LV = CGF.MakeAddrLValue(slot.getAddress(), ov->getType(),
+ AlignmentSource::Decl);
+ opaqueData = OVMA::bind(CGF, ov, LV);
+ result.RV = slot.asRValue();
+
+ // Otherwise, emit as normal.
+ } else {
+ opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
+
+ // If this is the result, also evaluate the result now.
+ if (ov == resultExpr) {
+ if (forLValue)
+ result.LV = CGF.EmitLValue(ov);
+ else
+ result.RV = CGF.EmitAnyExpr(ov, slot);
+ }
+ }
+
+ opaques.push_back(opaqueData);
+
+ // Otherwise, if the expression is the result, evaluate it
+ // and remember the result.
+ } else if (semantic == resultExpr) {
+ if (forLValue)
+ result.LV = CGF.EmitLValue(semantic);
+ else
+ result.RV = CGF.EmitAnyExpr(semantic, slot);
+
+ // Otherwise, evaluate the expression in an ignored context.
+ } else {
+ CGF.EmitIgnoredExpr(semantic);
+ }
+ }
+
+ // Unbind all the opaques now.
+ for (unsigned i = 0, e = opaques.size(); i != e; ++i)
+ opaques[i].unbind(CGF);
+
+ return result;
+}
+
+RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E,
+ AggValueSlot slot) {
+ return emitPseudoObjectExpr(*this, E, false, slot).RV;
+}
+
+LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) {
+ return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp
new file mode 100644
index 0000000..20838db
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp
@@ -0,0 +1,1538 @@
+//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Aggregate Expr nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CGObjCRuntime.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Intrinsics.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===----------------------------------------------------------------------===//
+// Aggregate Expression Emitter
+//===----------------------------------------------------------------------===//
+
+namespace {
+class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
+ CodeGenFunction &CGF;
+ CGBuilderTy &Builder;
+ AggValueSlot Dest;
+ bool IsResultUnused;
+
+ /// We want to use 'dest' as the return slot except under two
+ /// conditions:
+ /// - The destination slot requires garbage collection, so we
+ /// need to use the GC API.
+ /// - The destination slot is potentially aliased.
+ bool shouldUseDestForReturnSlot() const {
+ return !(Dest.requiresGCollection() || Dest.isPotentiallyAliased());
+ }
+
+ ReturnValueSlot getReturnValueSlot() const {
+ if (!shouldUseDestForReturnSlot())
+ return ReturnValueSlot();
+
+ return ReturnValueSlot(Dest.getAddress(), Dest.isVolatile(),
+ IsResultUnused);
+ }
+
+ AggValueSlot EnsureSlot(QualType T) {
+ if (!Dest.isIgnored()) return Dest;
+ return CGF.CreateAggTemp(T, "agg.tmp.ensured");
+ }
+ void EnsureDest(QualType T) {
+ if (!Dest.isIgnored()) return;
+ Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
+ }
+
+public:
+ AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused)
+ : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
+ IsResultUnused(IsResultUnused) { }
+
+ //===--------------------------------------------------------------------===//
+ // Utilities
+ //===--------------------------------------------------------------------===//
+
+ /// EmitAggLoadOfLValue - Given an expression with aggregate type that
+ /// represents a value lvalue, this method emits the address of the lvalue,
+ /// then loads the result into DestPtr.
+ void EmitAggLoadOfLValue(const Expr *E);
+
+ /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
+ void EmitFinalDestCopy(QualType type, const LValue &src);
+ void EmitFinalDestCopy(QualType type, RValue src);
+ void EmitCopy(QualType type, const AggValueSlot &dest,
+ const AggValueSlot &src);
+
+ void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
+
+ void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
+ QualType elementType, InitListExpr *E);
+
+ AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
+ if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
+ return AggValueSlot::NeedsGCBarriers;
+ return AggValueSlot::DoesNotNeedGCBarriers;
+ }
+
+ bool TypeRequiresGCollection(QualType T);
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ void Visit(Expr *E) {
+ ApplyDebugLocation DL(CGF, E);
+ StmtVisitor<AggExprEmitter>::Visit(E);
+ }
+
+ void VisitStmt(Stmt *S) {
+ CGF.ErrorUnsupported(S, "aggregate expression");
+ }
+ void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
+ void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
+ Visit(GE->getResultExpr());
+ }
+ void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
+ void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
+ return Visit(E->getReplacement());
+ }
+
+ // l-values.
+ void VisitDeclRefExpr(DeclRefExpr *E) {
+ // For aggregates, we should always be able to emit the variable
+ // as an l-value unless it's a reference. This is due to the fact
+ // that we can't actually ever see a normal l2r conversion on an
+ // aggregate in C++, and in C there's no language standard
+ // actively preventing us from listing variables in the captures
+ // list of a block.
+ if (E->getDecl()->getType()->isReferenceType()) {
+ if (CodeGenFunction::ConstantEmission result
+ = CGF.tryEmitAsConstant(E)) {
+ EmitFinalDestCopy(E->getType(), result.getReferenceLValue(CGF, E));
+ return;
+ }
+ }
+
+ EmitAggLoadOfLValue(E);
+ }
+
+ void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
+ void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
+ void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
+ void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
+ void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
+ EmitAggLoadOfLValue(E);
+ }
+ void VisitPredefinedExpr(const PredefinedExpr *E) {
+ EmitAggLoadOfLValue(E);
+ }
+
+ // Operators.
+ void VisitCastExpr(CastExpr *E);
+ void VisitCallExpr(const CallExpr *E);
+ void VisitStmtExpr(const StmtExpr *E);
+ void VisitBinaryOperator(const BinaryOperator *BO);
+ void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
+ void VisitBinAssign(const BinaryOperator *E);
+ void VisitBinComma(const BinaryOperator *E);
+
+ void VisitObjCMessageExpr(ObjCMessageExpr *E);
+ void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ EmitAggLoadOfLValue(E);
+ }
+
+ void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E);
+ void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
+ void VisitChooseExpr(const ChooseExpr *CE);
+ void VisitInitListExpr(InitListExpr *E);
+ void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
+ void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing.
+ void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ Visit(DAE->getExpr());
+ }
+ void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
+ CodeGenFunction::CXXDefaultInitExprScope Scope(CGF);
+ Visit(DIE->getExpr());
+ }
+ void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
+ void VisitCXXConstructExpr(const CXXConstructExpr *E);
+ void VisitLambdaExpr(LambdaExpr *E);
+ void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
+ void VisitExprWithCleanups(ExprWithCleanups *E);
+ void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
+ void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
+ void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
+ void VisitOpaqueValueExpr(OpaqueValueExpr *E);
+
+ void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
+ if (E->isGLValue()) {
+ LValue LV = CGF.EmitPseudoObjectLValue(E);
+ return EmitFinalDestCopy(E->getType(), LV);
+ }
+
+ CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
+ }
+
+ void VisitVAArgExpr(VAArgExpr *E);
+
+ void EmitInitializationToLValue(Expr *E, LValue Address);
+ void EmitNullInitializationToLValue(LValue Address);
+ // case Expr::ChooseExprClass:
+ void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
+ void VisitAtomicExpr(AtomicExpr *E) {
+ RValue Res = CGF.EmitAtomicExpr(E);
+ EmitFinalDestCopy(E->getType(), Res);
+ }
+};
+} // end anonymous namespace.
+
+//===----------------------------------------------------------------------===//
+// Utilities
+//===----------------------------------------------------------------------===//
+
+/// EmitAggLoadOfLValue - Given an expression with aggregate type that
+/// represents a value lvalue, this method emits the address of the lvalue,
+/// then loads the result into DestPtr.
+void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
+ LValue LV = CGF.EmitLValue(E);
+
+ // If the type of the l-value is atomic, then do an atomic load.
+ if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV)) {
+ CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);
+ return;
+ }
+
+ EmitFinalDestCopy(E->getType(), LV);
+}
+
+/// \brief True if the given aggregate type requires special GC API calls.
+bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
+ // Only record types have members that might require garbage collection.
+ const RecordType *RecordTy = T->getAs<RecordType>();
+ if (!RecordTy) return false;
+
+ // Don't mess with non-trivial C++ types.
+ RecordDecl *Record = RecordTy->getDecl();
+ if (isa<CXXRecordDecl>(Record) &&
+ (cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
+ !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
+ return false;
+
+ // Check whether the type has an object member.
+ return Record->hasObjectMember();
+}
+
+/// \brief Perform the final move to DestPtr if for some reason
+/// getReturnValueSlot() didn't use it directly.
+///
+/// The idea is that you do something like this:
+/// RValue Result = EmitSomething(..., getReturnValueSlot());
+/// EmitMoveFromReturnSlot(E, Result);
+///
+/// If nothing interferes, this will cause the result to be emitted
+/// directly into the return value slot. Otherwise, a final move
+/// will be performed.
+void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue src) {
+ if (shouldUseDestForReturnSlot()) {
+ // Logically, Dest.getAddr() should equal Src.getAggregateAddr().
+ // The possibility of undef rvalues complicates that a lot,
+ // though, so we can't really assert.
+ return;
+ }
+
+ // Otherwise, copy from there to the destination.
+ assert(Dest.getPointer() != src.getAggregatePointer());
+ EmitFinalDestCopy(E->getType(), src);
+}
+
+/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
+void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) {
+ assert(src.isAggregate() && "value must be aggregate value!");
+ LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type);
+ EmitFinalDestCopy(type, srcLV);
+}
+
+/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
+void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src) {
+ // If Dest is ignored, then we're evaluating an aggregate expression
+ // in a context that doesn't care about the result. Note that loads
+ // from volatile l-values force the existence of a non-ignored
+ // destination.
+ if (Dest.isIgnored())
+ return;
+
+ AggValueSlot srcAgg =
+ AggValueSlot::forLValue(src, AggValueSlot::IsDestructed,
+ needsGC(type), AggValueSlot::IsAliased);
+ EmitCopy(type, Dest, srcAgg);
+}
+
+/// Perform a copy from the source into the destination.
+///
+/// \param type - the type of the aggregate being copied; qualifiers are
+/// ignored
+void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
+ const AggValueSlot &src) {
+ if (dest.requiresGCollection()) {
+ CharUnits sz = CGF.getContext().getTypeSizeInChars(type);
+ llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
+ CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
+ dest.getAddress(),
+ src.getAddress(),
+ size);
+ return;
+ }
+
+ // If the result of the assignment is used, copy the LHS there also.
+ // It's volatile if either side is. Use the minimum alignment of
+ // the two sides.
+ CGF.EmitAggregateCopy(dest.getAddress(), src.getAddress(), type,
+ dest.isVolatile() || src.isVolatile());
+}
+
+/// \brief Emit the initializer for a std::initializer_list initialized with a
+/// real initializer list.
+void
+AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
+ // Emit an array containing the elements. The array is externally destructed
+ // if the std::initializer_list object is.
+ ASTContext &Ctx = CGF.getContext();
+ LValue Array = CGF.EmitLValue(E->getSubExpr());
+ assert(Array.isSimple() && "initializer_list array not a simple lvalue");
+ Address ArrayPtr = Array.getAddress();
+
+ const ConstantArrayType *ArrayType =
+ Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
+ assert(ArrayType && "std::initializer_list constructed from non-array");
+
+ // FIXME: Perform the checks on the field types in SemaInit.
+ RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
+ RecordDecl::field_iterator Field = Record->field_begin();
+ if (Field == Record->field_end()) {
+ CGF.ErrorUnsupported(E, "weird std::initializer_list");
+ return;
+ }
+
+ // Start pointer.
+ if (!Field->getType()->isPointerType() ||
+ !Ctx.hasSameType(Field->getType()->getPointeeType(),
+ ArrayType->getElementType())) {
+ CGF.ErrorUnsupported(E, "weird std::initializer_list");
+ return;
+ }
+
+ AggValueSlot Dest = EnsureSlot(E->getType());
+ LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
+ LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
+ llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
+ llvm::Value *IdxStart[] = { Zero, Zero };
+ llvm::Value *ArrayStart =
+ Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxStart, "arraystart");
+ CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
+ ++Field;
+
+ if (Field == Record->field_end()) {
+ CGF.ErrorUnsupported(E, "weird std::initializer_list");
+ return;
+ }
+
+ llvm::Value *Size = Builder.getInt(ArrayType->getSize());
+ LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
+ if (Field->getType()->isPointerType() &&
+ Ctx.hasSameType(Field->getType()->getPointeeType(),
+ ArrayType->getElementType())) {
+ // End pointer.
+ llvm::Value *IdxEnd[] = { Zero, Size };
+ llvm::Value *ArrayEnd =
+ Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxEnd, "arrayend");
+ CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
+ } else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
+ // Length.
+ CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
+ } else {
+ CGF.ErrorUnsupported(E, "weird std::initializer_list");
+ return;
+ }
+}
+
+/// \brief Determine if E is a trivial array filler, that is, one that is
+/// equivalent to zero-initialization.
+static bool isTrivialFiller(Expr *E) {
+ if (!E)
+ return true;
+
+ if (isa<ImplicitValueInitExpr>(E))
+ return true;
+
+ if (auto *ILE = dyn_cast<InitListExpr>(E)) {
+ if (ILE->getNumInits())
+ return false;
+ return isTrivialFiller(ILE->getArrayFiller());
+ }
+
+ if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E))
+ return Cons->getConstructor()->isDefaultConstructor() &&
+ Cons->getConstructor()->isTrivial();
+
+ // FIXME: Are there other cases where we can avoid emitting an initializer?
+ return false;
+}
+
+/// \brief Emit initialization of an array from an initializer list.
+void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
+ QualType elementType, InitListExpr *E) {
+ uint64_t NumInitElements = E->getNumInits();
+
+ uint64_t NumArrayElements = AType->getNumElements();
+ assert(NumInitElements <= NumArrayElements);
+
+ // DestPtr is an array*. Construct an elementType* by drilling
+ // down a level.
+ llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
+ llvm::Value *indices[] = { zero, zero };
+ llvm::Value *begin =
+ Builder.CreateInBoundsGEP(DestPtr.getPointer(), indices, "arrayinit.begin");
+
+ CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
+ CharUnits elementAlign =
+ DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
+
+ // Exception safety requires us to destroy all the
+ // already-constructed members if an initializer throws.
+ // For that, we'll need an EH cleanup.
+ QualType::DestructionKind dtorKind = elementType.isDestructedType();
+ Address endOfInit = Address::invalid();
+ EHScopeStack::stable_iterator cleanup;
+ llvm::Instruction *cleanupDominator = nullptr;
+ if (CGF.needsEHCleanup(dtorKind)) {
+ // In principle we could tell the cleanup where we are more
+ // directly, but the control flow can get so varied here that it
+ // would actually be quite complex. Therefore we go through an
+ // alloca.
+ endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(),
+ "arrayinit.endOfInit");
+ cleanupDominator = Builder.CreateStore(begin, endOfInit);
+ CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
+ elementAlign,
+ CGF.getDestroyer(dtorKind));
+ cleanup = CGF.EHStack.stable_begin();
+
+ // Otherwise, remember that we didn't need a cleanup.
+ } else {
+ dtorKind = QualType::DK_none;
+ }
+
+ llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
+
+ // The 'current element to initialize'. The invariants on this
+ // variable are complicated. Essentially, after each iteration of
+ // the loop, it points to the last initialized element, except
+ // that it points to the beginning of the array before any
+ // elements have been initialized.
+ llvm::Value *element = begin;
+
+ // Emit the explicit initializers.
+ for (uint64_t i = 0; i != NumInitElements; ++i) {
+ // Advance to the next element.
+ if (i > 0) {
+ element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
+
+ // Tell the cleanup that it needs to destroy up to this
+ // element. TODO: some of these stores can be trivially
+ // observed to be unnecessary.
+ if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
+ }
+
+ LValue elementLV =
+ CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
+ EmitInitializationToLValue(E->getInit(i), elementLV);
+ }
+
+ // Check whether there's a non-trivial array-fill expression.
+ Expr *filler = E->getArrayFiller();
+ bool hasTrivialFiller = isTrivialFiller(filler);
+
+ // Any remaining elements need to be zero-initialized, possibly
+ // using the filler expression. We can skip this if the we're
+ // emitting to zeroed memory.
+ if (NumInitElements != NumArrayElements &&
+ !(Dest.isZeroed() && hasTrivialFiller &&
+ CGF.getTypes().isZeroInitializable(elementType))) {
+
+ // Use an actual loop. This is basically
+ // do { *array++ = filler; } while (array != end);
+
+ // Advance to the start of the rest of the array.
+ if (NumInitElements) {
+ element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
+ if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
+ }
+
+ // Compute the end of the array.
+ llvm::Value *end = Builder.CreateInBoundsGEP(begin,
+ llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
+ "arrayinit.end");
+
+ llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
+
+ // Jump into the body.
+ CGF.EmitBlock(bodyBB);
+ llvm::PHINode *currentElement =
+ Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
+ currentElement->addIncoming(element, entryBB);
+
+ // Emit the actual filler expression.
+ LValue elementLV =
+ CGF.MakeAddrLValue(Address(currentElement, elementAlign), elementType);
+ if (filler)
+ EmitInitializationToLValue(filler, elementLV);
+ else
+ EmitNullInitializationToLValue(elementLV);
+
+ // Move on to the next element.
+ llvm::Value *nextElement =
+ Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
+
+ // Tell the EH cleanup that we finished with the last element.
+ if (endOfInit.isValid()) Builder.CreateStore(nextElement, endOfInit);
+
+ // Leave the loop if we're done.
+ llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
+ "arrayinit.done");
+ llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
+ Builder.CreateCondBr(done, endBB, bodyBB);
+ currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
+
+ CGF.EmitBlock(endBB);
+ }
+
+ // Leave the partial-array cleanup if we entered one.
+ if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator);
+}
+
+//===----------------------------------------------------------------------===//
+// Visitor Methods
+//===----------------------------------------------------------------------===//
+
+void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
+ Visit(E->GetTemporaryExpr());
+}
+
+void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
+ EmitFinalDestCopy(e->getType(), CGF.getOpaqueLValueMapping(e));
+}
+
+void
+AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ if (Dest.isPotentiallyAliased() &&
+ E->getType().isPODType(CGF.getContext())) {
+ // For a POD type, just emit a load of the lvalue + a copy, because our
+ // compound literal might alias the destination.
+ EmitAggLoadOfLValue(E);
+ return;
+ }
+
+ AggValueSlot Slot = EnsureSlot(E->getType());
+ CGF.EmitAggExpr(E->getInitializer(), Slot);
+}
+
+/// Attempt to look through various unimportant expressions to find a
+/// cast of the given kind.
+static Expr *findPeephole(Expr *op, CastKind kind) {
+ while (true) {
+ op = op->IgnoreParens();
+ if (CastExpr *castE = dyn_cast<CastExpr>(op)) {
+ if (castE->getCastKind() == kind)
+ return castE->getSubExpr();
+ if (castE->getCastKind() == CK_NoOp)
+ continue;
+ }
+ return nullptr;
+ }
+}
+
+void AggExprEmitter::VisitCastExpr(CastExpr *E) {
+ if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
+ CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
+ switch (E->getCastKind()) {
+ case CK_Dynamic: {
+ // FIXME: Can this actually happen? We have no test coverage for it.
+ assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
+ LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(),
+ CodeGenFunction::TCK_Load);
+ // FIXME: Do we also need to handle property references here?
+ if (LV.isSimple())
+ CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
+ else
+ CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
+
+ if (!Dest.isIgnored())
+ CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
+ break;
+ }
+
+ case CK_ToUnion: {
+ // Evaluate even if the destination is ignored.
+ if (Dest.isIgnored()) {
+ CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
+ /*ignoreResult=*/true);
+ break;
+ }
+
+ // GCC union extension
+ QualType Ty = E->getSubExpr()->getType();
+ Address CastPtr =
+ Builder.CreateElementBitCast(Dest.getAddress(), CGF.ConvertType(Ty));
+ EmitInitializationToLValue(E->getSubExpr(),
+ CGF.MakeAddrLValue(CastPtr, Ty));
+ break;
+ }
+
+ case CK_DerivedToBase:
+ case CK_BaseToDerived:
+ case CK_UncheckedDerivedToBase: {
+ llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
+ "should have been unpacked before we got here");
+ }
+
+ case CK_NonAtomicToAtomic:
+ case CK_AtomicToNonAtomic: {
+ bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);
+
+ // Determine the atomic and value types.
+ QualType atomicType = E->getSubExpr()->getType();
+ QualType valueType = E->getType();
+ if (isToAtomic) std::swap(atomicType, valueType);
+
+ assert(atomicType->isAtomicType());
+ assert(CGF.getContext().hasSameUnqualifiedType(valueType,
+ atomicType->castAs<AtomicType>()->getValueType()));
+
+ // Just recurse normally if we're ignoring the result or the
+ // atomic type doesn't change representation.
+ if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) {
+ return Visit(E->getSubExpr());
+ }
+
+ CastKind peepholeTarget =
+ (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
+
+ // These two cases are reverses of each other; try to peephole them.
+ if (Expr *op = findPeephole(E->getSubExpr(), peepholeTarget)) {
+ assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
+ E->getType()) &&
+ "peephole significantly changed types?");
+ return Visit(op);
+ }
+
+ // If we're converting an r-value of non-atomic type to an r-value
+ // of atomic type, just emit directly into the relevant sub-object.
+ if (isToAtomic) {
+ AggValueSlot valueDest = Dest;
+ if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
+ // Zero-initialize. (Strictly speaking, we only need to intialize
+ // the padding at the end, but this is simpler.)
+ if (!Dest.isZeroed())
+ CGF.EmitNullInitialization(Dest.getAddress(), atomicType);
+
+ // Build a GEP to refer to the subobject.
+ Address valueAddr =
+ CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0,
+ CharUnits());
+ valueDest = AggValueSlot::forAddr(valueAddr,
+ valueDest.getQualifiers(),
+ valueDest.isExternallyDestructed(),
+ valueDest.requiresGCollection(),
+ valueDest.isPotentiallyAliased(),
+ AggValueSlot::IsZeroed);
+ }
+
+ CGF.EmitAggExpr(E->getSubExpr(), valueDest);
+ return;
+ }
+
+ // Otherwise, we're converting an atomic type to a non-atomic type.
+ // Make an atomic temporary, emit into that, and then copy the value out.
+ AggValueSlot atomicSlot =
+ CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
+ CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
+
+ Address valueAddr =
+ Builder.CreateStructGEP(atomicSlot.getAddress(), 0, CharUnits());
+ RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
+ return EmitFinalDestCopy(valueType, rvalue);
+ }
+
+ case CK_LValueToRValue:
+ // If we're loading from a volatile type, force the destination
+ // into existence.
+ if (E->getSubExpr()->getType().isVolatileQualified()) {
+ EnsureDest(E->getType());
+ return Visit(E->getSubExpr());
+ }
+
+ // fallthrough
+
+ case CK_NoOp:
+ case CK_UserDefinedConversion:
+ case CK_ConstructorConversion:
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
+ E->getType()) &&
+ "Implicit cast types must be compatible");
+ Visit(E->getSubExpr());
+ break;
+
+ case CK_LValueBitCast:
+ llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
+
+ case CK_Dependent:
+ case CK_BitCast:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_NullToPointer:
+ case CK_NullToMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_MemberPointerToBoolean:
+ case CK_ReinterpretMemberPointer:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_PointerToBoolean:
+ case CK_ToVoid:
+ case CK_VectorSplat:
+ case CK_IntegralCast:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ObjCObjectLValueCast:
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject:
+ case CK_BuiltinFnToFnPtr:
+ case CK_ZeroToOCLEvent:
+ case CK_AddressSpaceConversion:
+ llvm_unreachable("cast kind invalid for aggregate types");
+ }
+}
+
+void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
+ if (E->getCallReturnType(CGF.getContext())->isReferenceType()) {
+ EmitAggLoadOfLValue(E);
+ return;
+ }
+
+ RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot());
+ EmitMoveFromReturnSlot(E, RV);
+}
+
+void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot());
+ EmitMoveFromReturnSlot(E, RV);
+}
+
+void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
+ CGF.EmitIgnoredExpr(E->getLHS());
+ Visit(E->getRHS());
+}
+
+void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
+ CodeGenFunction::StmtExprEvaluation eval(CGF);
+ CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
+}
+
+void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
+ if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
+ VisitPointerToDataMemberBinaryOperator(E);
+ else
+ CGF.ErrorUnsupported(E, "aggregate binary expression");
+}
+
+void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
+ const BinaryOperator *E) {
+ LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
+ EmitFinalDestCopy(E->getType(), LV);
+}
+
+/// Is the value of the given expression possibly a reference to or
+/// into a __block variable?
+static bool isBlockVarRef(const Expr *E) {
+ // Make sure we look through parens.
+ E = E->IgnoreParens();
+
+ // Check for a direct reference to a __block variable.
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
+ return (var && var->hasAttr<BlocksAttr>());
+ }
+
+ // More complicated stuff.
+
+ // Binary operators.
+ if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
+ // For an assignment or pointer-to-member operation, just care
+ // about the LHS.
+ if (op->isAssignmentOp() || op->isPtrMemOp())
+ return isBlockVarRef(op->getLHS());
+
+ // For a comma, just care about the RHS.
+ if (op->getOpcode() == BO_Comma)
+ return isBlockVarRef(op->getRHS());
+
+ // FIXME: pointer arithmetic?
+ return false;
+
+ // Check both sides of a conditional operator.
+ } else if (const AbstractConditionalOperator *op
+ = dyn_cast<AbstractConditionalOperator>(E)) {
+ return isBlockVarRef(op->getTrueExpr())
+ || isBlockVarRef(op->getFalseExpr());
+
+ // OVEs are required to support BinaryConditionalOperators.
+ } else if (const OpaqueValueExpr *op
+ = dyn_cast<OpaqueValueExpr>(E)) {
+ if (const Expr *src = op->getSourceExpr())
+ return isBlockVarRef(src);
+
+ // Casts are necessary to get things like (*(int*)&var) = foo().
+ // We don't really care about the kind of cast here, except
+ // we don't want to look through l2r casts, because it's okay
+ // to get the *value* in a __block variable.
+ } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
+ if (cast->getCastKind() == CK_LValueToRValue)
+ return false;
+ return isBlockVarRef(cast->getSubExpr());
+
+ // Handle unary operators. Again, just aggressively look through
+ // it, ignoring the operation.
+ } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
+ return isBlockVarRef(uop->getSubExpr());
+
+ // Look into the base of a field access.
+ } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
+ return isBlockVarRef(mem->getBase());
+
+ // Look into the base of a subscript.
+ } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
+ return isBlockVarRef(sub->getBase());
+ }
+
+ return false;
+}
+
+void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
+ // For an assignment to work, the value on the right has
+ // to be compatible with the value on the left.
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
+ E->getRHS()->getType())
+ && "Invalid assignment");
+
+ // If the LHS might be a __block variable, and the RHS can
+ // potentially cause a block copy, we need to evaluate the RHS first
+ // so that the assignment goes the right place.
+ // This is pretty semantically fragile.
+ if (isBlockVarRef(E->getLHS()) &&
+ E->getRHS()->HasSideEffects(CGF.getContext())) {
+ // Ensure that we have a destination, and evaluate the RHS into that.
+ EnsureDest(E->getRHS()->getType());
+ Visit(E->getRHS());
+
+ // Now emit the LHS and copy into it.
+ LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
+
+ // That copy is an atomic copy if the LHS is atomic.
+ if (LHS.getType()->isAtomicType() ||
+ CGF.LValueIsSuitableForInlineAtomic(LHS)) {
+ CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
+ return;
+ }
+
+ EmitCopy(E->getLHS()->getType(),
+ AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
+ needsGC(E->getLHS()->getType()),
+ AggValueSlot::IsAliased),
+ Dest);
+ return;
+ }
+
+ LValue LHS = CGF.EmitLValue(E->getLHS());
+
+ // If we have an atomic type, evaluate into the destination and then
+ // do an atomic copy.
+ if (LHS.getType()->isAtomicType() ||
+ CGF.LValueIsSuitableForInlineAtomic(LHS)) {
+ EnsureDest(E->getRHS()->getType());
+ Visit(E->getRHS());
+ CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
+ return;
+ }
+
+ // Codegen the RHS so that it stores directly into the LHS.
+ AggValueSlot LHSSlot =
+ AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
+ needsGC(E->getLHS()->getType()),
+ AggValueSlot::IsAliased);
+ // A non-volatile aggregate destination might have volatile member.
+ if (!LHSSlot.isVolatile() &&
+ CGF.hasVolatileMember(E->getLHS()->getType()))
+ LHSSlot.setVolatile(true);
+
+ CGF.EmitAggExpr(E->getRHS(), LHSSlot);
+
+ // Copy into the destination if the assignment isn't ignored.
+ EmitFinalDestCopy(E->getType(), LHS);
+}
+
+void AggExprEmitter::
+VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
+ llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
+
+ // Bind the common expression if necessary.
+ CodeGenFunction::OpaqueValueMapping binding(CGF, E);
+
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
+ CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock,
+ CGF.getProfileCount(E));
+
+ // Save whether the destination's lifetime is externally managed.
+ bool isExternallyDestructed = Dest.isExternallyDestructed();
+
+ eval.begin(CGF);
+ CGF.EmitBlock(LHSBlock);
+ CGF.incrementProfileCounter(E);
+ Visit(E->getTrueExpr());
+ eval.end(CGF);
+
+ assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
+ CGF.Builder.CreateBr(ContBlock);
+
+ // If the result of an agg expression is unused, then the emission
+ // of the LHS might need to create a destination slot. That's fine
+ // with us, and we can safely emit the RHS into the same slot, but
+ // we shouldn't claim that it's already being destructed.
+ Dest.setExternallyDestructed(isExternallyDestructed);
+
+ eval.begin(CGF);
+ CGF.EmitBlock(RHSBlock);
+ Visit(E->getFalseExpr());
+ eval.end(CGF);
+
+ CGF.EmitBlock(ContBlock);
+}
+
+void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
+ Visit(CE->getChosenSubExpr());
+}
+
+void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
+ Address ArgValue = Address::invalid();
+ Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
+
+ if (!ArgPtr.isValid()) {
+ // If EmitVAArg fails, we fall back to the LLVM instruction.
+ llvm::Value *Val = Builder.CreateVAArg(ArgValue.getPointer(),
+ CGF.ConvertType(VE->getType()));
+ if (!Dest.isIgnored())
+ Builder.CreateStore(Val, Dest.getAddress());
+ return;
+ }
+
+ EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType()));
+}
+
+void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
+ // Ensure that we have a slot, but if we already do, remember
+ // whether it was externally destructed.
+ bool wasExternallyDestructed = Dest.isExternallyDestructed();
+ EnsureDest(E->getType());
+
+ // We're going to push a destructor if there isn't already one.
+ Dest.setExternallyDestructed();
+
+ Visit(E->getSubExpr());
+
+ // Push that destructor we promised.
+ if (!wasExternallyDestructed)
+ CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress());
+}
+
+void
+AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
+ AggValueSlot Slot = EnsureSlot(E->getType());
+ CGF.EmitCXXConstructExpr(E, Slot);
+}
+
+void
+AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
+ AggValueSlot Slot = EnsureSlot(E->getType());
+ CGF.EmitLambdaExpr(E, Slot);
+}
+
+void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
+ CGF.enterFullExpression(E);
+ CodeGenFunction::RunCleanupsScope cleanups(CGF);
+ Visit(E->getSubExpr());
+}
+
+void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
+ QualType T = E->getType();
+ AggValueSlot Slot = EnsureSlot(T);
+ EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
+}
+
+void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
+ QualType T = E->getType();
+ AggValueSlot Slot = EnsureSlot(T);
+ EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
+}
+
+/// isSimpleZero - If emitting this value will obviously just cause a store of
+/// zero to memory, return true. This can return false if uncertain, so it just
+/// handles simple cases.
+static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
+ E = E->IgnoreParens();
+
+ // 0
+ if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
+ return IL->getValue() == 0;
+ // +0.0
+ if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
+ return FL->getValue().isPosZero();
+ // int()
+ if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
+ CGF.getTypes().isZeroInitializable(E->getType()))
+ return true;
+ // (int*)0 - Null pointer expressions.
+ if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
+ return ICE->getCastKind() == CK_NullToPointer;
+ // '\0'
+ if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
+ return CL->getValue() == 0;
+
+ // Otherwise, hard case: conservatively return false.
+ return false;
+}
+
+
+void
+AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
+ QualType type = LV.getType();
+ // FIXME: Ignore result?
+ // FIXME: Are initializers affected by volatile?
+ if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
+ // Storing "i32 0" to a zero'd memory location is a noop.
+ return;
+ } else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) {
+ return EmitNullInitializationToLValue(LV);
+ } else if (isa<NoInitExpr>(E)) {
+ // Do nothing.
+ return;
+ } else if (type->isReferenceType()) {
+ RValue RV = CGF.EmitReferenceBindingToExpr(E);
+ return CGF.EmitStoreThroughLValue(RV, LV);
+ }
+
+ switch (CGF.getEvaluationKind(type)) {
+ case TEK_Complex:
+ CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
+ return;
+ case TEK_Aggregate:
+ CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV,
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased,
+ Dest.isZeroed()));
+ return;
+ case TEK_Scalar:
+ if (LV.isSimple()) {
+ CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
+ } else {
+ CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
+ }
+ return;
+ }
+ llvm_unreachable("bad evaluation kind");
+}
+
+void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
+ QualType type = lv.getType();
+
+ // If the destination slot is already zeroed out before the aggregate is
+ // copied into it, we don't have to emit any zeros here.
+ if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
+ return;
+
+ if (CGF.hasScalarEvaluationKind(type)) {
+ // For non-aggregates, we can store the appropriate null constant.
+ llvm::Value *null = CGF.CGM.EmitNullConstant(type);
+ // Note that the following is not equivalent to
+ // EmitStoreThroughBitfieldLValue for ARC types.
+ if (lv.isBitField()) {
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv);
+ } else {
+ assert(lv.isSimple());
+ CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
+ }
+ } else {
+ // There's a potential optimization opportunity in combining
+ // memsets; that would be easy for arrays, but relatively
+ // difficult for structures with the current code.
+ CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
+ }
+}
+
+void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
+#if 0
+ // FIXME: Assess perf here? Figure out what cases are worth optimizing here
+ // (Length of globals? Chunks of zeroed-out space?).
+ //
+ // If we can, prefer a copy from a global; this is a lot less code for long
+ // globals, and it's easier for the current optimizers to analyze.
+ if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
+ llvm::GlobalVariable* GV =
+ new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
+ llvm::GlobalValue::InternalLinkage, C, "");
+ EmitFinalDestCopy(E->getType(), CGF.MakeAddrLValue(GV, E->getType()));
+ return;
+ }
+#endif
+ if (E->hadArrayRangeDesignator())
+ CGF.ErrorUnsupported(E, "GNU array range designator extension");
+
+ AggValueSlot Dest = EnsureSlot(E->getType());
+
+ LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
+
+ // Handle initialization of an array.
+ if (E->getType()->isArrayType()) {
+ if (E->isStringLiteralInit())
+ return Visit(E->getInit(0));
+
+ QualType elementType =
+ CGF.getContext().getAsArrayType(E->getType())->getElementType();
+
+ auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType());
+ EmitArrayInit(Dest.getAddress(), AType, elementType, E);
+ return;
+ }
+
+ if (E->getType()->isAtomicType()) {
+ // An _Atomic(T) object can be list-initialized from an expression
+ // of the same type.
+ assert(E->getNumInits() == 1 &&
+ CGF.getContext().hasSameUnqualifiedType(E->getInit(0)->getType(),
+ E->getType()) &&
+ "unexpected list initialization for atomic object");
+ return Visit(E->getInit(0));
+ }
+
+ assert(E->getType()->isRecordType() && "Only support structs/unions here!");
+
+ // Do struct initialization; this code just sets each individual member
+ // to the approprate value. This makes bitfield support automatic;
+ // the disadvantage is that the generated code is more difficult for
+ // the optimizer, especially with bitfields.
+ unsigned NumInitElements = E->getNumInits();
+ RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
+
+ // Prepare a 'this' for CXXDefaultInitExprs.
+ CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress());
+
+ if (record->isUnion()) {
+ // Only initialize one field of a union. The field itself is
+ // specified by the initializer list.
+ if (!E->getInitializedFieldInUnion()) {
+ // Empty union; we have nothing to do.
+
+#ifndef NDEBUG
+ // Make sure that it's really an empty and not a failure of
+ // semantic analysis.
+ for (const auto *Field : record->fields())
+ assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
+#endif
+ return;
+ }
+
+ // FIXME: volatility
+ FieldDecl *Field = E->getInitializedFieldInUnion();
+
+ LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
+ if (NumInitElements) {
+ // Store the initializer into the field
+ EmitInitializationToLValue(E->getInit(0), FieldLoc);
+ } else {
+ // Default-initialize to null.
+ EmitNullInitializationToLValue(FieldLoc);
+ }
+
+ return;
+ }
+
+ // We'll need to enter cleanup scopes in case any of the member
+ // initializers throw an exception.
+ SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
+ llvm::Instruction *cleanupDominator = nullptr;
+
+ // Here we iterate over the fields; this makes it simpler to both
+ // default-initialize fields and skip over unnamed fields.
+ unsigned curInitIndex = 0;
+ for (const auto *field : record->fields()) {
+ // We're done once we hit the flexible array member.
+ if (field->getType()->isIncompleteArrayType())
+ break;
+
+ // Always skip anonymous bitfields.
+ if (field->isUnnamedBitfield())
+ continue;
+
+ // We're done if we reach the end of the explicit initializers, we
+ // have a zeroed object, and the rest of the fields are
+ // zero-initializable.
+ if (curInitIndex == NumInitElements && Dest.isZeroed() &&
+ CGF.getTypes().isZeroInitializable(E->getType()))
+ break;
+
+
+ LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field);
+ // We never generate write-barries for initialized fields.
+ LV.setNonGC(true);
+
+ if (curInitIndex < NumInitElements) {
+ // Store the initializer into the field.
+ EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
+ } else {
+ // We're out of initalizers; default-initialize to null
+ EmitNullInitializationToLValue(LV);
+ }
+
+ // Push a destructor if necessary.
+ // FIXME: if we have an array of structures, all explicitly
+ // initialized, we can end up pushing a linear number of cleanups.
+ bool pushedCleanup = false;
+ if (QualType::DestructionKind dtorKind
+ = field->getType().isDestructedType()) {
+ assert(LV.isSimple());
+ if (CGF.needsEHCleanup(dtorKind)) {
+ if (!cleanupDominator)
+ cleanupDominator = CGF.Builder.CreateAlignedLoad(
+ CGF.Int8Ty,
+ llvm::Constant::getNullValue(CGF.Int8PtrTy),
+ CharUnits::One()); // placeholder
+
+ CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
+ CGF.getDestroyer(dtorKind), false);
+ cleanups.push_back(CGF.EHStack.stable_begin());
+ pushedCleanup = true;
+ }
+ }
+
+ // If the GEP didn't get used because of a dead zero init or something
+ // else, clean it up for -O0 builds and general tidiness.
+ if (!pushedCleanup && LV.isSimple())
+ if (llvm::GetElementPtrInst *GEP =
+ dyn_cast<llvm::GetElementPtrInst>(LV.getPointer()))
+ if (GEP->use_empty())
+ GEP->eraseFromParent();
+ }
+
+ // Deactivate all the partial cleanups in reverse order, which
+ // generally means popping them.
+ for (unsigned i = cleanups.size(); i != 0; --i)
+ CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);
+
+ // Destroy the placeholder if we made one.
+ if (cleanupDominator)
+ cleanupDominator->eraseFromParent();
+}
+
+void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
+ AggValueSlot Dest = EnsureSlot(E->getType());
+
+ LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
+ EmitInitializationToLValue(E->getBase(), DestLV);
+ VisitInitListExpr(E->getUpdater());
+}
+
+//===----------------------------------------------------------------------===//
+// Entry Points into this File
+//===----------------------------------------------------------------------===//
+
+/// GetNumNonZeroBytesInInit - Get an approximate count of the number of
+/// non-zero bytes that will be stored when outputting the initializer for the
+/// specified initializer expression.
+static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
+ E = E->IgnoreParens();
+
+ // 0 and 0.0 won't require any non-zero stores!
+ if (isSimpleZero(E, CGF)) return CharUnits::Zero();
+
+ // If this is an initlist expr, sum up the size of sizes of the (present)
+ // elements. If this is something weird, assume the whole thing is non-zero.
+ const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
+ if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType()))
+ return CGF.getContext().getTypeSizeInChars(E->getType());
+
+ // InitListExprs for structs have to be handled carefully. If there are
+ // reference members, we need to consider the size of the reference, not the
+ // referencee. InitListExprs for unions and arrays can't have references.
+ if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
+ if (!RT->isUnionType()) {
+ RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
+ CharUnits NumNonZeroBytes = CharUnits::Zero();
+
+ unsigned ILEElement = 0;
+ for (const auto *Field : SD->fields()) {
+ // We're done once we hit the flexible array member or run out of
+ // InitListExpr elements.
+ if (Field->getType()->isIncompleteArrayType() ||
+ ILEElement == ILE->getNumInits())
+ break;
+ if (Field->isUnnamedBitfield())
+ continue;
+
+ const Expr *E = ILE->getInit(ILEElement++);
+
+ // Reference values are always non-null and have the width of a pointer.
+ if (Field->getType()->isReferenceType())
+ NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
+ CGF.getTarget().getPointerWidth(0));
+ else
+ NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
+ }
+
+ return NumNonZeroBytes;
+ }
+ }
+
+
+ CharUnits NumNonZeroBytes = CharUnits::Zero();
+ for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
+ NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
+ return NumNonZeroBytes;
+}
+
+/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
+/// zeros in it, emit a memset and avoid storing the individual zeros.
+///
+static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
+ CodeGenFunction &CGF) {
+ // If the slot is already known to be zeroed, nothing to do. Don't mess with
+ // volatile stores.
+ if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid())
+ return;
+
+ // C++ objects with a user-declared constructor don't need zero'ing.
+ if (CGF.getLangOpts().CPlusPlus)
+ if (const RecordType *RT = CGF.getContext()
+ .getBaseElementType(E->getType())->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (RD->hasUserDeclaredConstructor())
+ return;
+ }
+
+ // If the type is 16-bytes or smaller, prefer individual stores over memset.
+ CharUnits Size = CGF.getContext().getTypeSizeInChars(E->getType());
+ if (Size <= CharUnits::fromQuantity(16))
+ return;
+
+ // Check to see if over 3/4 of the initializer are known to be zero. If so,
+ // we prefer to emit memset + individual stores for the rest.
+ CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
+ if (NumNonZeroBytes*4 > Size)
+ return;
+
+ // Okay, it seems like a good idea to use an initial memset, emit the call.
+ llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity());
+
+ Address Loc = Slot.getAddress();
+ Loc = CGF.Builder.CreateElementBitCast(Loc, CGF.Int8Ty);
+ CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false);
+
+ // Tell the AggExprEmitter that the slot is known zero.
+ Slot.setZeroed();
+}
+
+
+
+
+/// EmitAggExpr - Emit the computation of the specified expression of aggregate
+/// type. The result is computed into DestPtr. Note that if DestPtr is null,
+/// the value of the aggregate expression is not needed. If VolatileDest is
+/// true, DestPtr cannot be 0.
+void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
+ assert(E && hasAggregateEvaluationKind(E->getType()) &&
+ "Invalid aggregate expression to emit");
+ assert((Slot.getAddress().isValid() || Slot.isIgnored()) &&
+ "slot has bits but no address");
+
+ // Optimize the slot if possible.
+ CheckAggExprForMemSetUse(Slot, E, *this);
+
+ AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr*>(E));
+}
+
+LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
+ assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
+ Address Temp = CreateMemTemp(E->getType());
+ LValue LV = MakeAddrLValue(Temp, E->getType());
+ EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
+ return LV;
+}
+
+void CodeGenFunction::EmitAggregateCopy(Address DestPtr,
+ Address SrcPtr, QualType Ty,
+ bool isVolatile,
+ bool isAssignment) {
+ assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
+
+ if (getLangOpts().CPlusPlus) {
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
+ assert((Record->hasTrivialCopyConstructor() ||
+ Record->hasTrivialCopyAssignment() ||
+ Record->hasTrivialMoveConstructor() ||
+ Record->hasTrivialMoveAssignment() ||
+ Record->isUnion()) &&
+ "Trying to aggregate-copy a type without a trivial copy/move "
+ "constructor or assignment operator");
+ // Ignore empty classes in C++.
+ if (Record->isEmpty())
+ return;
+ }
+ }
+
+ // Aggregate assignment turns into llvm.memcpy. This is almost valid per
+ // C99 6.5.16.1p3, which states "If the value being stored in an object is
+ // read from another object that overlaps in anyway the storage of the first
+ // object, then the overlap shall be exact and the two objects shall have
+ // qualified or unqualified versions of a compatible type."
+ //
+ // memcpy is not defined if the source and destination pointers are exactly
+ // equal, but other compilers do this optimization, and almost every memcpy
+ // implementation handles this case safely. If there is a libc that does not
+ // safely handle this, we can add a target hook.
+
+ // Get data size info for this aggregate. If this is an assignment,
+ // don't copy the tail padding, because we might be assigning into a
+ // base subobject where the tail padding is claimed. Otherwise,
+ // copying it is fine.
+ std::pair<CharUnits, CharUnits> TypeInfo;
+ if (isAssignment)
+ TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
+ else
+ TypeInfo = getContext().getTypeInfoInChars(Ty);
+
+ llvm::Value *SizeVal = nullptr;
+ if (TypeInfo.first.isZero()) {
+ // But note that getTypeInfo returns 0 for a VLA.
+ if (auto *VAT = dyn_cast_or_null<VariableArrayType>(
+ getContext().getAsArrayType(Ty))) {
+ QualType BaseEltTy;
+ SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr);
+ TypeInfo = getContext().getTypeInfoDataSizeInChars(BaseEltTy);
+ std::pair<CharUnits, CharUnits> LastElementTypeInfo;
+ if (!isAssignment)
+ LastElementTypeInfo = getContext().getTypeInfoInChars(BaseEltTy);
+ assert(!TypeInfo.first.isZero());
+ SizeVal = Builder.CreateNUWMul(
+ SizeVal,
+ llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity()));
+ if (!isAssignment) {
+ SizeVal = Builder.CreateNUWSub(
+ SizeVal,
+ llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity()));
+ SizeVal = Builder.CreateNUWAdd(
+ SizeVal, llvm::ConstantInt::get(
+ SizeTy, LastElementTypeInfo.first.getQuantity()));
+ }
+ }
+ }
+ if (!SizeVal) {
+ SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity());
+ }
+
+ // FIXME: If we have a volatile struct, the optimizer can remove what might
+ // appear to be `extra' memory ops:
+ //
+ // volatile struct { int i; } a, b;
+ //
+ // int main() {
+ // a = b;
+ // a = b;
+ // }
+ //
+ // we need to use a different call here. We use isVolatile to indicate when
+ // either the source or the destination is volatile.
+
+ DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
+ SrcPtr = Builder.CreateElementBitCast(SrcPtr, Int8Ty);
+
+ // Don't do any of the memmove_collectable tests if GC isn't set.
+ if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
+ // fall through
+ } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
+ RecordDecl *Record = RecordTy->getDecl();
+ if (Record->hasObjectMember()) {
+ CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
+ SizeVal);
+ return;
+ }
+ } else if (Ty->isArrayType()) {
+ QualType BaseType = getContext().getBaseElementType(Ty);
+ if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
+ if (RecordTy->getDecl()->hasObjectMember()) {
+ CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
+ SizeVal);
+ return;
+ }
+ }
+ }
+
+ auto Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile);
+
+ // Determine the metadata to describe the position of any padding in this
+ // memcpy, as well as the TBAA tags for the members of the struct, in case
+ // the optimizer wishes to expand it in to scalar memory operations.
+ if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty))
+ Inst->setMetadata(llvm::LLVMContext::MD_tbaa_struct, TBAAStructTag);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp
new file mode 100644
index 0000000..604cde7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp
@@ -0,0 +1,1950 @@
+//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with code generation of C++ expressions
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CGCUDARuntime.h"
+#include "CGCXXABI.h"
+#include "CGDebugInfo.h"
+#include "CGObjCRuntime.h"
+#include "clang/CodeGen/CGFunctionInfo.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Intrinsics.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+static RequiredArgs commonEmitCXXMemberOrOperatorCall(
+ CodeGenFunction &CGF, const CXXMethodDecl *MD, llvm::Value *Callee,
+ ReturnValueSlot ReturnValue, llvm::Value *This, llvm::Value *ImplicitParam,
+ QualType ImplicitParamTy, const CallExpr *CE, CallArgList &Args) {
+ assert(CE == nullptr || isa<CXXMemberCallExpr>(CE) ||
+ isa<CXXOperatorCallExpr>(CE));
+ assert(MD->isInstance() &&
+ "Trying to emit a member or operator call expr on a static method!");
+
+ // C++11 [class.mfct.non-static]p2:
+ // If a non-static member function of a class X is called for an object that
+ // is not of type X, or of a type derived from X, the behavior is undefined.
+ SourceLocation CallLoc;
+ if (CE)
+ CallLoc = CE->getExprLoc();
+ CGF.EmitTypeCheck(
+ isa<CXXConstructorDecl>(MD) ? CodeGenFunction::TCK_ConstructorCall
+ : CodeGenFunction::TCK_MemberCall,
+ CallLoc, This, CGF.getContext().getRecordType(MD->getParent()));
+
+ // Push the this ptr.
+ Args.add(RValue::get(This), MD->getThisType(CGF.getContext()));
+
+ // If there is an implicit parameter (e.g. VTT), emit it.
+ if (ImplicitParam) {
+ Args.add(RValue::get(ImplicitParam), ImplicitParamTy);
+ }
+
+ const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
+ RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
+
+ // And the rest of the call args.
+ if (CE) {
+ // Special case: skip first argument of CXXOperatorCall (it is "this").
+ unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0;
+ CGF.EmitCallArgs(Args, FPT, drop_begin(CE->arguments(), ArgsToSkip),
+ CE->getDirectCallee());
+ } else {
+ assert(
+ FPT->getNumParams() == 0 &&
+ "No CallExpr specified for function with non-zero number of arguments");
+ }
+ return required;
+}
+
+RValue CodeGenFunction::EmitCXXMemberOrOperatorCall(
+ const CXXMethodDecl *MD, llvm::Value *Callee, ReturnValueSlot ReturnValue,
+ llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy,
+ const CallExpr *CE) {
+ const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
+ CallArgList Args;
+ RequiredArgs required = commonEmitCXXMemberOrOperatorCall(
+ *this, MD, Callee, ReturnValue, This, ImplicitParam, ImplicitParamTy, CE,
+ Args);
+ return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
+ Callee, ReturnValue, Args, MD);
+}
+
+RValue CodeGenFunction::EmitCXXStructorCall(
+ const CXXMethodDecl *MD, llvm::Value *Callee, ReturnValueSlot ReturnValue,
+ llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy,
+ const CallExpr *CE, StructorType Type) {
+ CallArgList Args;
+ commonEmitCXXMemberOrOperatorCall(*this, MD, Callee, ReturnValue, This,
+ ImplicitParam, ImplicitParamTy, CE, Args);
+ return EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(MD, Type),
+ Callee, ReturnValue, Args, MD);
+}
+
+static CXXRecordDecl *getCXXRecord(const Expr *E) {
+ QualType T = E->getType();
+ if (const PointerType *PTy = T->getAs<PointerType>())
+ T = PTy->getPointeeType();
+ const RecordType *Ty = T->castAs<RecordType>();
+ return cast<CXXRecordDecl>(Ty->getDecl());
+}
+
+// Note: This function also emit constructor calls to support a MSVC
+// extensions allowing explicit constructor function call.
+RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
+ ReturnValueSlot ReturnValue) {
+ const Expr *callee = CE->getCallee()->IgnoreParens();
+
+ if (isa<BinaryOperator>(callee))
+ return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
+
+ const MemberExpr *ME = cast<MemberExpr>(callee);
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
+
+ if (MD->isStatic()) {
+ // The method is static, emit it as we would a regular call.
+ llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
+ return EmitCall(getContext().getPointerType(MD->getType()), Callee, CE,
+ ReturnValue);
+ }
+
+ bool HasQualifier = ME->hasQualifier();
+ NestedNameSpecifier *Qualifier = HasQualifier ? ME->getQualifier() : nullptr;
+ bool IsArrow = ME->isArrow();
+ const Expr *Base = ME->getBase();
+
+ return EmitCXXMemberOrOperatorMemberCallExpr(
+ CE, MD, ReturnValue, HasQualifier, Qualifier, IsArrow, Base);
+}
+
+RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
+ const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,
+ bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow,
+ const Expr *Base) {
+ assert(isa<CXXMemberCallExpr>(CE) || isa<CXXOperatorCallExpr>(CE));
+
+ // Compute the object pointer.
+ bool CanUseVirtualCall = MD->isVirtual() && !HasQualifier;
+
+ const CXXMethodDecl *DevirtualizedMethod = nullptr;
+ if (CanUseVirtualCall && CanDevirtualizeMemberFunctionCall(Base, MD)) {
+ const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
+ DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl);
+ assert(DevirtualizedMethod);
+ const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
+ const Expr *Inner = Base->ignoreParenBaseCasts();
+ if (DevirtualizedMethod->getReturnType().getCanonicalType() !=
+ MD->getReturnType().getCanonicalType())
+ // If the return types are not the same, this might be a case where more
+ // code needs to run to compensate for it. For example, the derived
+ // method might return a type that inherits form from the return
+ // type of MD and has a prefix.
+ // For now we just avoid devirtualizing these covariant cases.
+ DevirtualizedMethod = nullptr;
+ else if (getCXXRecord(Inner) == DevirtualizedClass)
+ // If the class of the Inner expression is where the dynamic method
+ // is defined, build the this pointer from it.
+ Base = Inner;
+ else if (getCXXRecord(Base) != DevirtualizedClass) {
+ // If the method is defined in a class that is not the best dynamic
+ // one or the one of the full expression, we would have to build
+ // a derived-to-base cast to compute the correct this pointer, but
+ // we don't have support for that yet, so do a virtual call.
+ DevirtualizedMethod = nullptr;
+ }
+ }
+
+ Address This = Address::invalid();
+ if (IsArrow)
+ This = EmitPointerWithAlignment(Base);
+ else
+ This = EmitLValue(Base).getAddress();
+
+
+ if (MD->isTrivial() || (MD->isDefaulted() && MD->getParent()->isUnion())) {
+ if (isa<CXXDestructorDecl>(MD)) return RValue::get(nullptr);
+ if (isa<CXXConstructorDecl>(MD) &&
+ cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
+ return RValue::get(nullptr);
+
+ if (!MD->getParent()->mayInsertExtraPadding()) {
+ if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
+ // We don't like to generate the trivial copy/move assignment operator
+ // when it isn't necessary; just produce the proper effect here.
+ // Special case: skip first argument of CXXOperatorCall (it is "this").
+ unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0;
+ Address RHS = EmitLValue(*(CE->arg_begin() + ArgsToSkip)).getAddress();
+ EmitAggregateAssign(This, RHS, CE->getType());
+ return RValue::get(This.getPointer());
+ }
+
+ if (isa<CXXConstructorDecl>(MD) &&
+ cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
+ // Trivial move and copy ctor are the same.
+ assert(CE->getNumArgs() == 1 && "unexpected argcount for trivial ctor");
+ Address RHS = EmitLValue(*CE->arg_begin()).getAddress();
+ EmitAggregateCopy(This, RHS, (*CE->arg_begin())->getType());
+ return RValue::get(This.getPointer());
+ }
+ llvm_unreachable("unknown trivial member function");
+ }
+ }
+
+ // Compute the function type we're calling.
+ const CXXMethodDecl *CalleeDecl =
+ DevirtualizedMethod ? DevirtualizedMethod : MD;
+ const CGFunctionInfo *FInfo = nullptr;
+ if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl))
+ FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
+ Dtor, StructorType::Complete);
+ else if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(CalleeDecl))
+ FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
+ Ctor, StructorType::Complete);
+ else
+ FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl);
+
+ llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(*FInfo);
+
+ // C++ [class.virtual]p12:
+ // Explicit qualification with the scope operator (5.1) suppresses the
+ // virtual call mechanism.
+ //
+ // We also don't emit a virtual call if the base expression has a record type
+ // because then we know what the type is.
+ bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
+ llvm::Value *Callee;
+
+ if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
+ assert(CE->arg_begin() == CE->arg_end() &&
+ "Destructor shouldn't have explicit parameters");
+ assert(ReturnValue.isNull() && "Destructor shouldn't have return value");
+ if (UseVirtualCall) {
+ CGM.getCXXABI().EmitVirtualDestructorCall(
+ *this, Dtor, Dtor_Complete, This, cast<CXXMemberCallExpr>(CE));
+ } else {
+ if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
+ Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
+ else if (!DevirtualizedMethod)
+ Callee =
+ CGM.getAddrOfCXXStructor(Dtor, StructorType::Complete, FInfo, Ty);
+ else {
+ const CXXDestructorDecl *DDtor =
+ cast<CXXDestructorDecl>(DevirtualizedMethod);
+ Callee = CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty);
+ }
+ EmitCXXMemberOrOperatorCall(MD, Callee, ReturnValue, This.getPointer(),
+ /*ImplicitParam=*/nullptr, QualType(), CE);
+ }
+ return RValue::get(nullptr);
+ }
+
+ if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
+ Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
+ } else if (UseVirtualCall) {
+ Callee = CGM.getCXXABI().getVirtualFunctionPointer(*this, MD, This, Ty,
+ CE->getLocStart());
+ } else {
+ if (SanOpts.has(SanitizerKind::CFINVCall) &&
+ MD->getParent()->isDynamicClass()) {
+ llvm::Value *VTable = GetVTablePtr(This, Int8PtrTy, MD->getParent());
+ EmitVTablePtrCheckForCall(MD, VTable, CFITCK_NVCall, CE->getLocStart());
+ }
+
+ if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
+ Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
+ else if (!DevirtualizedMethod)
+ Callee = CGM.GetAddrOfFunction(MD, Ty);
+ else {
+ Callee = CGM.GetAddrOfFunction(DevirtualizedMethod, Ty);
+ }
+ }
+
+ if (MD->isVirtual()) {
+ This = CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
+ *this, MD, This, UseVirtualCall);
+ }
+
+ return EmitCXXMemberOrOperatorCall(MD, Callee, ReturnValue, This.getPointer(),
+ /*ImplicitParam=*/nullptr, QualType(), CE);
+}
+
+RValue
+CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
+ ReturnValueSlot ReturnValue) {
+ const BinaryOperator *BO =
+ cast<BinaryOperator>(E->getCallee()->IgnoreParens());
+ const Expr *BaseExpr = BO->getLHS();
+ const Expr *MemFnExpr = BO->getRHS();
+
+ const MemberPointerType *MPT =
+ MemFnExpr->getType()->castAs<MemberPointerType>();
+
+ const FunctionProtoType *FPT =
+ MPT->getPointeeType()->castAs<FunctionProtoType>();
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
+
+ // Get the member function pointer.
+ llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
+
+ // Emit the 'this' pointer.
+ Address This = Address::invalid();
+ if (BO->getOpcode() == BO_PtrMemI)
+ This = EmitPointerWithAlignment(BaseExpr);
+ else
+ This = EmitLValue(BaseExpr).getAddress();
+
+ EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.getPointer(),
+ QualType(MPT->getClass(), 0));
+
+ // Ask the ABI to load the callee. Note that This is modified.
+ llvm::Value *ThisPtrForCall = nullptr;
+ llvm::Value *Callee =
+ CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO, This,
+ ThisPtrForCall, MemFnPtr, MPT);
+
+ CallArgList Args;
+
+ QualType ThisType =
+ getContext().getPointerType(getContext().getTagDeclType(RD));
+
+ // Push the this ptr.
+ Args.add(RValue::get(ThisPtrForCall), ThisType);
+
+ RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1);
+
+ // And the rest of the call args
+ EmitCallArgs(Args, FPT, E->arguments(), E->getDirectCallee());
+ return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
+ Callee, ReturnValue, Args);
+}
+
+RValue
+CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
+ const CXXMethodDecl *MD,
+ ReturnValueSlot ReturnValue) {
+ assert(MD->isInstance() &&
+ "Trying to emit a member call expr on a static method!");
+ return EmitCXXMemberOrOperatorMemberCallExpr(
+ E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr,
+ /*IsArrow=*/false, E->getArg(0));
+}
+
+RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
+ ReturnValueSlot ReturnValue) {
+ return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
+}
+
+static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
+ Address DestPtr,
+ const CXXRecordDecl *Base) {
+ if (Base->isEmpty())
+ return;
+
+ DestPtr = CGF.Builder.CreateElementBitCast(DestPtr, CGF.Int8Ty);
+
+ const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
+ CharUnits NVSize = Layout.getNonVirtualSize();
+
+ // We cannot simply zero-initialize the entire base sub-object if vbptrs are
+ // present, they are initialized by the most derived class before calling the
+ // constructor.
+ SmallVector<std::pair<CharUnits, CharUnits>, 1> Stores;
+ Stores.emplace_back(CharUnits::Zero(), NVSize);
+
+ // Each store is split by the existence of a vbptr.
+ CharUnits VBPtrWidth = CGF.getPointerSize();
+ std::vector<CharUnits> VBPtrOffsets =
+ CGF.CGM.getCXXABI().getVBPtrOffsets(Base);
+ for (CharUnits VBPtrOffset : VBPtrOffsets) {
+ std::pair<CharUnits, CharUnits> LastStore = Stores.pop_back_val();
+ CharUnits LastStoreOffset = LastStore.first;
+ CharUnits LastStoreSize = LastStore.second;
+
+ CharUnits SplitBeforeOffset = LastStoreOffset;
+ CharUnits SplitBeforeSize = VBPtrOffset - SplitBeforeOffset;
+ assert(!SplitBeforeSize.isNegative() && "negative store size!");
+ if (!SplitBeforeSize.isZero())
+ Stores.emplace_back(SplitBeforeOffset, SplitBeforeSize);
+
+ CharUnits SplitAfterOffset = VBPtrOffset + VBPtrWidth;
+ CharUnits SplitAfterSize = LastStoreSize - SplitAfterOffset;
+ assert(!SplitAfterSize.isNegative() && "negative store size!");
+ if (!SplitAfterSize.isZero())
+ Stores.emplace_back(SplitAfterOffset, SplitAfterSize);
+ }
+
+ // If the type contains a pointer to data member we can't memset it to zero.
+ // Instead, create a null constant and copy it to the destination.
+ // TODO: there are other patterns besides zero that we can usefully memset,
+ // like -1, which happens to be the pattern used by member-pointers.
+ // TODO: isZeroInitializable can be over-conservative in the case where a
+ // virtual base contains a member pointer.
+ llvm::Constant *NullConstantForBase = CGF.CGM.EmitNullConstantForBase(Base);
+ if (!NullConstantForBase->isNullValue()) {
+ llvm::GlobalVariable *NullVariable = new llvm::GlobalVariable(
+ CGF.CGM.getModule(), NullConstantForBase->getType(),
+ /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage,
+ NullConstantForBase, Twine());
+
+ CharUnits Align = std::max(Layout.getNonVirtualAlignment(),
+ DestPtr.getAlignment());
+ NullVariable->setAlignment(Align.getQuantity());
+
+ Address SrcPtr = Address(CGF.EmitCastToVoidPtr(NullVariable), Align);
+
+ // Get and call the appropriate llvm.memcpy overload.
+ for (std::pair<CharUnits, CharUnits> Store : Stores) {
+ CharUnits StoreOffset = Store.first;
+ CharUnits StoreSize = Store.second;
+ llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize);
+ CGF.Builder.CreateMemCpy(
+ CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset),
+ CGF.Builder.CreateConstInBoundsByteGEP(SrcPtr, StoreOffset),
+ StoreSizeVal);
+ }
+
+ // Otherwise, just memset the whole thing to zero. This is legal
+ // because in LLVM, all default initializers (other than the ones we just
+ // handled above) are guaranteed to have a bit pattern of all zeros.
+ } else {
+ for (std::pair<CharUnits, CharUnits> Store : Stores) {
+ CharUnits StoreOffset = Store.first;
+ CharUnits StoreSize = Store.second;
+ llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize);
+ CGF.Builder.CreateMemSet(
+ CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset),
+ CGF.Builder.getInt8(0), StoreSizeVal);
+ }
+ }
+}
+
+void
+CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
+ AggValueSlot Dest) {
+ assert(!Dest.isIgnored() && "Must have a destination!");
+ const CXXConstructorDecl *CD = E->getConstructor();
+
+ // If we require zero initialization before (or instead of) calling the
+ // constructor, as can be the case with a non-user-provided default
+ // constructor, emit the zero initialization now, unless destination is
+ // already zeroed.
+ if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
+ switch (E->getConstructionKind()) {
+ case CXXConstructExpr::CK_Delegating:
+ case CXXConstructExpr::CK_Complete:
+ EmitNullInitialization(Dest.getAddress(), E->getType());
+ break;
+ case CXXConstructExpr::CK_VirtualBase:
+ case CXXConstructExpr::CK_NonVirtualBase:
+ EmitNullBaseClassInitialization(*this, Dest.getAddress(),
+ CD->getParent());
+ break;
+ }
+ }
+
+ // If this is a call to a trivial default constructor, do nothing.
+ if (CD->isTrivial() && CD->isDefaultConstructor())
+ return;
+
+ // Elide the constructor if we're constructing from a temporary.
+ // The temporary check is required because Sema sets this on NRVO
+ // returns.
+ if (getLangOpts().ElideConstructors && E->isElidable()) {
+ assert(getContext().hasSameUnqualifiedType(E->getType(),
+ E->getArg(0)->getType()));
+ if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
+ EmitAggExpr(E->getArg(0), Dest);
+ return;
+ }
+ }
+
+ if (const ConstantArrayType *arrayType
+ = getContext().getAsConstantArrayType(E->getType())) {
+ EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddress(), E);
+ } else {
+ CXXCtorType Type = Ctor_Complete;
+ bool ForVirtualBase = false;
+ bool Delegating = false;
+
+ switch (E->getConstructionKind()) {
+ case CXXConstructExpr::CK_Delegating:
+ // We should be emitting a constructor; GlobalDecl will assert this
+ Type = CurGD.getCtorType();
+ Delegating = true;
+ break;
+
+ case CXXConstructExpr::CK_Complete:
+ Type = Ctor_Complete;
+ break;
+
+ case CXXConstructExpr::CK_VirtualBase:
+ ForVirtualBase = true;
+ // fall-through
+
+ case CXXConstructExpr::CK_NonVirtualBase:
+ Type = Ctor_Base;
+ }
+
+ // Call the constructor.
+ EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating,
+ Dest.getAddress(), E);
+ }
+}
+
+void CodeGenFunction::EmitSynthesizedCXXCopyCtor(Address Dest, Address Src,
+ const Expr *Exp) {
+ if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
+ Exp = E->getSubExpr();
+ assert(isa<CXXConstructExpr>(Exp) &&
+ "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
+ const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
+ const CXXConstructorDecl *CD = E->getConstructor();
+ RunCleanupsScope Scope(*this);
+
+ // If we require zero initialization before (or instead of) calling the
+ // constructor, as can be the case with a non-user-provided default
+ // constructor, emit the zero initialization now.
+ // FIXME. Do I still need this for a copy ctor synthesis?
+ if (E->requiresZeroInitialization())
+ EmitNullInitialization(Dest, E->getType());
+
+ assert(!getContext().getAsConstantArrayType(E->getType())
+ && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
+ EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E);
+}
+
+static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
+ const CXXNewExpr *E) {
+ if (!E->isArray())
+ return CharUnits::Zero();
+
+ // No cookie is required if the operator new[] being used is the
+ // reserved placement operator new[].
+ if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
+ return CharUnits::Zero();
+
+ return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
+}
+
+static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
+ const CXXNewExpr *e,
+ unsigned minElements,
+ llvm::Value *&numElements,
+ llvm::Value *&sizeWithoutCookie) {
+ QualType type = e->getAllocatedType();
+
+ if (!e->isArray()) {
+ CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
+ sizeWithoutCookie
+ = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
+ return sizeWithoutCookie;
+ }
+
+ // The width of size_t.
+ unsigned sizeWidth = CGF.SizeTy->getBitWidth();
+
+ // Figure out the cookie size.
+ llvm::APInt cookieSize(sizeWidth,
+ CalculateCookiePadding(CGF, e).getQuantity());
+
+ // Emit the array size expression.
+ // We multiply the size of all dimensions for NumElements.
+ // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
+ numElements = CGF.EmitScalarExpr(e->getArraySize());
+ assert(isa<llvm::IntegerType>(numElements->getType()));
+
+ // The number of elements can be have an arbitrary integer type;
+ // essentially, we need to multiply it by a constant factor, add a
+ // cookie size, and verify that the result is representable as a
+ // size_t. That's just a gloss, though, and it's wrong in one
+ // important way: if the count is negative, it's an error even if
+ // the cookie size would bring the total size >= 0.
+ bool isSigned
+ = e->getArraySize()->getType()->isSignedIntegerOrEnumerationType();
+ llvm::IntegerType *numElementsType
+ = cast<llvm::IntegerType>(numElements->getType());
+ unsigned numElementsWidth = numElementsType->getBitWidth();
+
+ // Compute the constant factor.
+ llvm::APInt arraySizeMultiplier(sizeWidth, 1);
+ while (const ConstantArrayType *CAT
+ = CGF.getContext().getAsConstantArrayType(type)) {
+ type = CAT->getElementType();
+ arraySizeMultiplier *= CAT->getSize();
+ }
+
+ CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
+ llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
+ typeSizeMultiplier *= arraySizeMultiplier;
+
+ // This will be a size_t.
+ llvm::Value *size;
+
+ // If someone is doing 'new int[42]' there is no need to do a dynamic check.
+ // Don't bloat the -O0 code.
+ if (llvm::ConstantInt *numElementsC =
+ dyn_cast<llvm::ConstantInt>(numElements)) {
+ const llvm::APInt &count = numElementsC->getValue();
+
+ bool hasAnyOverflow = false;
+
+ // If 'count' was a negative number, it's an overflow.
+ if (isSigned && count.isNegative())
+ hasAnyOverflow = true;
+
+ // We want to do all this arithmetic in size_t. If numElements is
+ // wider than that, check whether it's already too big, and if so,
+ // overflow.
+ else if (numElementsWidth > sizeWidth &&
+ numElementsWidth - sizeWidth > count.countLeadingZeros())
+ hasAnyOverflow = true;
+
+ // Okay, compute a count at the right width.
+ llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
+
+ // If there is a brace-initializer, we cannot allocate fewer elements than
+ // there are initializers. If we do, that's treated like an overflow.
+ if (adjustedCount.ult(minElements))
+ hasAnyOverflow = true;
+
+ // Scale numElements by that. This might overflow, but we don't
+ // care because it only overflows if allocationSize does, too, and
+ // if that overflows then we shouldn't use this.
+ numElements = llvm::ConstantInt::get(CGF.SizeTy,
+ adjustedCount * arraySizeMultiplier);
+
+ // Compute the size before cookie, and track whether it overflowed.
+ bool overflow;
+ llvm::APInt allocationSize
+ = adjustedCount.umul_ov(typeSizeMultiplier, overflow);
+ hasAnyOverflow |= overflow;
+
+ // Add in the cookie, and check whether it's overflowed.
+ if (cookieSize != 0) {
+ // Save the current size without a cookie. This shouldn't be
+ // used if there was overflow.
+ sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
+
+ allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
+ hasAnyOverflow |= overflow;
+ }
+
+ // On overflow, produce a -1 so operator new will fail.
+ if (hasAnyOverflow) {
+ size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
+ } else {
+ size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
+ }
+
+ // Otherwise, we might need to use the overflow intrinsics.
+ } else {
+ // There are up to five conditions we need to test for:
+ // 1) if isSigned, we need to check whether numElements is negative;
+ // 2) if numElementsWidth > sizeWidth, we need to check whether
+ // numElements is larger than something representable in size_t;
+ // 3) if minElements > 0, we need to check whether numElements is smaller
+ // than that.
+ // 4) we need to compute
+ // sizeWithoutCookie := numElements * typeSizeMultiplier
+ // and check whether it overflows; and
+ // 5) if we need a cookie, we need to compute
+ // size := sizeWithoutCookie + cookieSize
+ // and check whether it overflows.
+
+ llvm::Value *hasOverflow = nullptr;
+
+ // If numElementsWidth > sizeWidth, then one way or another, we're
+ // going to have to do a comparison for (2), and this happens to
+ // take care of (1), too.
+ if (numElementsWidth > sizeWidth) {
+ llvm::APInt threshold(numElementsWidth, 1);
+ threshold <<= sizeWidth;
+
+ llvm::Value *thresholdV
+ = llvm::ConstantInt::get(numElementsType, threshold);
+
+ hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
+ numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
+
+ // Otherwise, if we're signed, we want to sext up to size_t.
+ } else if (isSigned) {
+ if (numElementsWidth < sizeWidth)
+ numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
+
+ // If there's a non-1 type size multiplier, then we can do the
+ // signedness check at the same time as we do the multiply
+ // because a negative number times anything will cause an
+ // unsigned overflow. Otherwise, we have to do it here. But at least
+ // in this case, we can subsume the >= minElements check.
+ if (typeSizeMultiplier == 1)
+ hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
+ llvm::ConstantInt::get(CGF.SizeTy, minElements));
+
+ // Otherwise, zext up to size_t if necessary.
+ } else if (numElementsWidth < sizeWidth) {
+ numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
+ }
+
+ assert(numElements->getType() == CGF.SizeTy);
+
+ if (minElements) {
+ // Don't allow allocation of fewer elements than we have initializers.
+ if (!hasOverflow) {
+ hasOverflow = CGF.Builder.CreateICmpULT(numElements,
+ llvm::ConstantInt::get(CGF.SizeTy, minElements));
+ } else if (numElementsWidth > sizeWidth) {
+ // The other existing overflow subsumes this check.
+ // We do an unsigned comparison, since any signed value < -1 is
+ // taken care of either above or below.
+ hasOverflow = CGF.Builder.CreateOr(hasOverflow,
+ CGF.Builder.CreateICmpULT(numElements,
+ llvm::ConstantInt::get(CGF.SizeTy, minElements)));
+ }
+ }
+
+ size = numElements;
+
+ // Multiply by the type size if necessary. This multiplier
+ // includes all the factors for nested arrays.
+ //
+ // This step also causes numElements to be scaled up by the
+ // nested-array factor if necessary. Overflow on this computation
+ // can be ignored because the result shouldn't be used if
+ // allocation fails.
+ if (typeSizeMultiplier != 1) {
+ llvm::Value *umul_with_overflow
+ = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
+
+ llvm::Value *tsmV =
+ llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
+ llvm::Value *result =
+ CGF.Builder.CreateCall(umul_with_overflow, {size, tsmV});
+
+ llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
+ if (hasOverflow)
+ hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
+ else
+ hasOverflow = overflowed;
+
+ size = CGF.Builder.CreateExtractValue(result, 0);
+
+ // Also scale up numElements by the array size multiplier.
+ if (arraySizeMultiplier != 1) {
+ // If the base element type size is 1, then we can re-use the
+ // multiply we just did.
+ if (typeSize.isOne()) {
+ assert(arraySizeMultiplier == typeSizeMultiplier);
+ numElements = size;
+
+ // Otherwise we need a separate multiply.
+ } else {
+ llvm::Value *asmV =
+ llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
+ numElements = CGF.Builder.CreateMul(numElements, asmV);
+ }
+ }
+ } else {
+ // numElements doesn't need to be scaled.
+ assert(arraySizeMultiplier == 1);
+ }
+
+ // Add in the cookie size if necessary.
+ if (cookieSize != 0) {
+ sizeWithoutCookie = size;
+
+ llvm::Value *uadd_with_overflow
+ = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
+
+ llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
+ llvm::Value *result =
+ CGF.Builder.CreateCall(uadd_with_overflow, {size, cookieSizeV});
+
+ llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
+ if (hasOverflow)
+ hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
+ else
+ hasOverflow = overflowed;
+
+ size = CGF.Builder.CreateExtractValue(result, 0);
+ }
+
+ // If we had any possibility of dynamic overflow, make a select to
+ // overwrite 'size' with an all-ones value, which should cause
+ // operator new to throw.
+ if (hasOverflow)
+ size = CGF.Builder.CreateSelect(hasOverflow,
+ llvm::Constant::getAllOnesValue(CGF.SizeTy),
+ size);
+ }
+
+ if (cookieSize == 0)
+ sizeWithoutCookie = size;
+ else
+ assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
+
+ return size;
+}
+
+static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
+ QualType AllocType, Address NewPtr) {
+ // FIXME: Refactor with EmitExprAsInit.
+ switch (CGF.getEvaluationKind(AllocType)) {
+ case TEK_Scalar:
+ CGF.EmitScalarInit(Init, nullptr,
+ CGF.MakeAddrLValue(NewPtr, AllocType), false);
+ return;
+ case TEK_Complex:
+ CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType),
+ /*isInit*/ true);
+ return;
+ case TEK_Aggregate: {
+ AggValueSlot Slot
+ = AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+ CGF.EmitAggExpr(Init, Slot);
+ return;
+ }
+ }
+ llvm_unreachable("bad evaluation kind");
+}
+
+void CodeGenFunction::EmitNewArrayInitializer(
+ const CXXNewExpr *E, QualType ElementType, llvm::Type *ElementTy,
+ Address BeginPtr, llvm::Value *NumElements,
+ llvm::Value *AllocSizeWithoutCookie) {
+ // If we have a type with trivial initialization and no initializer,
+ // there's nothing to do.
+ if (!E->hasInitializer())
+ return;
+
+ Address CurPtr = BeginPtr;
+
+ unsigned InitListElements = 0;
+
+ const Expr *Init = E->getInitializer();
+ Address EndOfInit = Address::invalid();
+ QualType::DestructionKind DtorKind = ElementType.isDestructedType();
+ EHScopeStack::stable_iterator Cleanup;
+ llvm::Instruction *CleanupDominator = nullptr;
+
+ CharUnits ElementSize = getContext().getTypeSizeInChars(ElementType);
+ CharUnits ElementAlign =
+ BeginPtr.getAlignment().alignmentOfArrayElement(ElementSize);
+
+ // If the initializer is an initializer list, first do the explicit elements.
+ if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
+ InitListElements = ILE->getNumInits();
+
+ // If this is a multi-dimensional array new, we will initialize multiple
+ // elements with each init list element.
+ QualType AllocType = E->getAllocatedType();
+ if (const ConstantArrayType *CAT = dyn_cast_or_null<ConstantArrayType>(
+ AllocType->getAsArrayTypeUnsafe())) {
+ ElementTy = ConvertTypeForMem(AllocType);
+ CurPtr = Builder.CreateElementBitCast(CurPtr, ElementTy);
+ InitListElements *= getContext().getConstantArrayElementCount(CAT);
+ }
+
+ // Enter a partial-destruction Cleanup if necessary.
+ if (needsEHCleanup(DtorKind)) {
+ // In principle we could tell the Cleanup where we are more
+ // directly, but the control flow can get so varied here that it
+ // would actually be quite complex. Therefore we go through an
+ // alloca.
+ EndOfInit = CreateTempAlloca(BeginPtr.getType(), getPointerAlign(),
+ "array.init.end");
+ CleanupDominator = Builder.CreateStore(BeginPtr.getPointer(), EndOfInit);
+ pushIrregularPartialArrayCleanup(BeginPtr.getPointer(), EndOfInit,
+ ElementType, ElementAlign,
+ getDestroyer(DtorKind));
+ Cleanup = EHStack.stable_begin();
+ }
+
+ CharUnits StartAlign = CurPtr.getAlignment();
+ for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
+ // Tell the cleanup that it needs to destroy up to this
+ // element. TODO: some of these stores can be trivially
+ // observed to be unnecessary.
+ if (EndOfInit.isValid()) {
+ auto FinishedPtr =
+ Builder.CreateBitCast(CurPtr.getPointer(), BeginPtr.getType());
+ Builder.CreateStore(FinishedPtr, EndOfInit);
+ }
+ // FIXME: If the last initializer is an incomplete initializer list for
+ // an array, and we have an array filler, we can fold together the two
+ // initialization loops.
+ StoreAnyExprIntoOneUnit(*this, ILE->getInit(i),
+ ILE->getInit(i)->getType(), CurPtr);
+ CurPtr = Address(Builder.CreateInBoundsGEP(CurPtr.getPointer(),
+ Builder.getSize(1),
+ "array.exp.next"),
+ StartAlign.alignmentAtOffset((i + 1) * ElementSize));
+ }
+
+ // The remaining elements are filled with the array filler expression.
+ Init = ILE->getArrayFiller();
+
+ // Extract the initializer for the individual array elements by pulling
+ // out the array filler from all the nested initializer lists. This avoids
+ // generating a nested loop for the initialization.
+ while (Init && Init->getType()->isConstantArrayType()) {
+ auto *SubILE = dyn_cast<InitListExpr>(Init);
+ if (!SubILE)
+ break;
+ assert(SubILE->getNumInits() == 0 && "explicit inits in array filler?");
+ Init = SubILE->getArrayFiller();
+ }
+
+ // Switch back to initializing one base element at a time.
+ CurPtr = Builder.CreateBitCast(CurPtr, BeginPtr.getType());
+ }
+
+ // Attempt to perform zero-initialization using memset.
+ auto TryMemsetInitialization = [&]() -> bool {
+ // FIXME: If the type is a pointer-to-data-member under the Itanium ABI,
+ // we can initialize with a memset to -1.
+ if (!CGM.getTypes().isZeroInitializable(ElementType))
+ return false;
+
+ // Optimization: since zero initialization will just set the memory
+ // to all zeroes, generate a single memset to do it in one shot.
+
+ // Subtract out the size of any elements we've already initialized.
+ auto *RemainingSize = AllocSizeWithoutCookie;
+ if (InitListElements) {
+ // We know this can't overflow; we check this when doing the allocation.
+ auto *InitializedSize = llvm::ConstantInt::get(
+ RemainingSize->getType(),
+ getContext().getTypeSizeInChars(ElementType).getQuantity() *
+ InitListElements);
+ RemainingSize = Builder.CreateSub(RemainingSize, InitializedSize);
+ }
+
+ // Create the memset.
+ Builder.CreateMemSet(CurPtr, Builder.getInt8(0), RemainingSize, false);
+ return true;
+ };
+
+ // If all elements have already been initialized, skip any further
+ // initialization.
+ llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
+ if (ConstNum && ConstNum->getZExtValue() <= InitListElements) {
+ // If there was a Cleanup, deactivate it.
+ if (CleanupDominator)
+ DeactivateCleanupBlock(Cleanup, CleanupDominator);
+ return;
+ }
+
+ assert(Init && "have trailing elements to initialize but no initializer");
+
+ // If this is a constructor call, try to optimize it out, and failing that
+ // emit a single loop to initialize all remaining elements.
+ if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init)) {
+ CXXConstructorDecl *Ctor = CCE->getConstructor();
+ if (Ctor->isTrivial()) {
+ // If new expression did not specify value-initialization, then there
+ // is no initialization.
+ if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
+ return;
+
+ if (TryMemsetInitialization())
+ return;
+ }
+
+ // Store the new Cleanup position for irregular Cleanups.
+ //
+ // FIXME: Share this cleanup with the constructor call emission rather than
+ // having it create a cleanup of its own.
+ if (EndOfInit.isValid())
+ Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
+
+ // Emit a constructor call loop to initialize the remaining elements.
+ if (InitListElements)
+ NumElements = Builder.CreateSub(
+ NumElements,
+ llvm::ConstantInt::get(NumElements->getType(), InitListElements));
+ EmitCXXAggrConstructorCall(Ctor, NumElements, CurPtr, CCE,
+ CCE->requiresZeroInitialization());
+ return;
+ }
+
+ // If this is value-initialization, we can usually use memset.
+ ImplicitValueInitExpr IVIE(ElementType);
+ if (isa<ImplicitValueInitExpr>(Init)) {
+ if (TryMemsetInitialization())
+ return;
+
+ // Switch to an ImplicitValueInitExpr for the element type. This handles
+ // only one case: multidimensional array new of pointers to members. In
+ // all other cases, we already have an initializer for the array element.
+ Init = &IVIE;
+ }
+
+ // At this point we should have found an initializer for the individual
+ // elements of the array.
+ assert(getContext().hasSameUnqualifiedType(ElementType, Init->getType()) &&
+ "got wrong type of element to initialize");
+
+ // If we have an empty initializer list, we can usually use memset.
+ if (auto *ILE = dyn_cast<InitListExpr>(Init))
+ if (ILE->getNumInits() == 0 && TryMemsetInitialization())
+ return;
+
+ // If we have a struct whose every field is value-initialized, we can
+ // usually use memset.
+ if (auto *ILE = dyn_cast<InitListExpr>(Init)) {
+ if (const RecordType *RType = ILE->getType()->getAs<RecordType>()) {
+ if (RType->getDecl()->isStruct()) {
+ unsigned NumFields = 0;
+ for (auto *Field : RType->getDecl()->fields())
+ if (!Field->isUnnamedBitfield())
+ ++NumFields;
+ if (ILE->getNumInits() == NumFields)
+ for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
+ if (!isa<ImplicitValueInitExpr>(ILE->getInit(i)))
+ --NumFields;
+ if (ILE->getNumInits() == NumFields && TryMemsetInitialization())
+ return;
+ }
+ }
+ }
+
+ // Create the loop blocks.
+ llvm::BasicBlock *EntryBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *LoopBB = createBasicBlock("new.loop");
+ llvm::BasicBlock *ContBB = createBasicBlock("new.loop.end");
+
+ // Find the end of the array, hoisted out of the loop.
+ llvm::Value *EndPtr =
+ Builder.CreateInBoundsGEP(BeginPtr.getPointer(), NumElements, "array.end");
+
+ // If the number of elements isn't constant, we have to now check if there is
+ // anything left to initialize.
+ if (!ConstNum) {
+ llvm::Value *IsEmpty =
+ Builder.CreateICmpEQ(CurPtr.getPointer(), EndPtr, "array.isempty");
+ Builder.CreateCondBr(IsEmpty, ContBB, LoopBB);
+ }
+
+ // Enter the loop.
+ EmitBlock(LoopBB);
+
+ // Set up the current-element phi.
+ llvm::PHINode *CurPtrPhi =
+ Builder.CreatePHI(CurPtr.getType(), 2, "array.cur");
+ CurPtrPhi->addIncoming(CurPtr.getPointer(), EntryBB);
+
+ CurPtr = Address(CurPtrPhi, ElementAlign);
+
+ // Store the new Cleanup position for irregular Cleanups.
+ if (EndOfInit.isValid())
+ Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
+
+ // Enter a partial-destruction Cleanup if necessary.
+ if (!CleanupDominator && needsEHCleanup(DtorKind)) {
+ pushRegularPartialArrayCleanup(BeginPtr.getPointer(), CurPtr.getPointer(),
+ ElementType, ElementAlign,
+ getDestroyer(DtorKind));
+ Cleanup = EHStack.stable_begin();
+ CleanupDominator = Builder.CreateUnreachable();
+ }
+
+ // Emit the initializer into this element.
+ StoreAnyExprIntoOneUnit(*this, Init, Init->getType(), CurPtr);
+
+ // Leave the Cleanup if we entered one.
+ if (CleanupDominator) {
+ DeactivateCleanupBlock(Cleanup, CleanupDominator);
+ CleanupDominator->eraseFromParent();
+ }
+
+ // Advance to the next element by adjusting the pointer type as necessary.
+ llvm::Value *NextPtr =
+ Builder.CreateConstInBoundsGEP1_32(ElementTy, CurPtr.getPointer(), 1,
+ "array.next");
+
+ // Check whether we've gotten to the end of the array and, if so,
+ // exit the loop.
+ llvm::Value *IsEnd = Builder.CreateICmpEQ(NextPtr, EndPtr, "array.atend");
+ Builder.CreateCondBr(IsEnd, ContBB, LoopBB);
+ CurPtrPhi->addIncoming(NextPtr, Builder.GetInsertBlock());
+
+ EmitBlock(ContBB);
+}
+
+static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
+ QualType ElementType, llvm::Type *ElementTy,
+ Address NewPtr, llvm::Value *NumElements,
+ llvm::Value *AllocSizeWithoutCookie) {
+ ApplyDebugLocation DL(CGF, E);
+ if (E->isArray())
+ CGF.EmitNewArrayInitializer(E, ElementType, ElementTy, NewPtr, NumElements,
+ AllocSizeWithoutCookie);
+ else if (const Expr *Init = E->getInitializer())
+ StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr);
+}
+
+/// Emit a call to an operator new or operator delete function, as implicitly
+/// created by new-expressions and delete-expressions.
+static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
+ const FunctionDecl *Callee,
+ const FunctionProtoType *CalleeType,
+ const CallArgList &Args) {
+ llvm::Instruction *CallOrInvoke;
+ llvm::Value *CalleeAddr = CGF.CGM.GetAddrOfFunction(Callee);
+ RValue RV =
+ CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(
+ Args, CalleeType, /*chainCall=*/false),
+ CalleeAddr, ReturnValueSlot(), Args, Callee, &CallOrInvoke);
+
+ /// C++1y [expr.new]p10:
+ /// [In a new-expression,] an implementation is allowed to omit a call
+ /// to a replaceable global allocation function.
+ ///
+ /// We model such elidable calls with the 'builtin' attribute.
+ llvm::Function *Fn = dyn_cast<llvm::Function>(CalleeAddr);
+ if (Callee->isReplaceableGlobalAllocationFunction() &&
+ Fn && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) {
+ // FIXME: Add addAttribute to CallSite.
+ if (llvm::CallInst *CI = dyn_cast<llvm::CallInst>(CallOrInvoke))
+ CI->addAttribute(llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::Builtin);
+ else if (llvm::InvokeInst *II = dyn_cast<llvm::InvokeInst>(CallOrInvoke))
+ II->addAttribute(llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::Builtin);
+ else
+ llvm_unreachable("unexpected kind of call instruction");
+ }
+
+ return RV;
+}
+
+RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
+ const Expr *Arg,
+ bool IsDelete) {
+ CallArgList Args;
+ const Stmt *ArgS = Arg;
+ EmitCallArgs(Args, *Type->param_type_begin(), llvm::makeArrayRef(ArgS));
+ // Find the allocation or deallocation function that we're calling.
+ ASTContext &Ctx = getContext();
+ DeclarationName Name = Ctx.DeclarationNames
+ .getCXXOperatorName(IsDelete ? OO_Delete : OO_New);
+ for (auto *Decl : Ctx.getTranslationUnitDecl()->lookup(Name))
+ if (auto *FD = dyn_cast<FunctionDecl>(Decl))
+ if (Ctx.hasSameType(FD->getType(), QualType(Type, 0)))
+ return EmitNewDeleteCall(*this, cast<FunctionDecl>(Decl), Type, Args);
+ llvm_unreachable("predeclared global operator new/delete is missing");
+}
+
+namespace {
+ /// A cleanup to call the given 'operator delete' function upon
+ /// abnormal exit from a new expression.
+ class CallDeleteDuringNew final : public EHScopeStack::Cleanup {
+ size_t NumPlacementArgs;
+ const FunctionDecl *OperatorDelete;
+ llvm::Value *Ptr;
+ llvm::Value *AllocSize;
+
+ RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
+
+ public:
+ static size_t getExtraSize(size_t NumPlacementArgs) {
+ return NumPlacementArgs * sizeof(RValue);
+ }
+
+ CallDeleteDuringNew(size_t NumPlacementArgs,
+ const FunctionDecl *OperatorDelete,
+ llvm::Value *Ptr,
+ llvm::Value *AllocSize)
+ : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
+ Ptr(Ptr), AllocSize(AllocSize) {}
+
+ void setPlacementArg(unsigned I, RValue Arg) {
+ assert(I < NumPlacementArgs && "index out of range");
+ getPlacementArgs()[I] = Arg;
+ }
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ const FunctionProtoType *FPT
+ = OperatorDelete->getType()->getAs<FunctionProtoType>();
+ assert(FPT->getNumParams() == NumPlacementArgs + 1 ||
+ (FPT->getNumParams() == 2 && NumPlacementArgs == 0));
+
+ CallArgList DeleteArgs;
+
+ // The first argument is always a void*.
+ FunctionProtoType::param_type_iterator AI = FPT->param_type_begin();
+ DeleteArgs.add(RValue::get(Ptr), *AI++);
+
+ // A member 'operator delete' can take an extra 'size_t' argument.
+ if (FPT->getNumParams() == NumPlacementArgs + 2)
+ DeleteArgs.add(RValue::get(AllocSize), *AI++);
+
+ // Pass the rest of the arguments, which must match exactly.
+ for (unsigned I = 0; I != NumPlacementArgs; ++I)
+ DeleteArgs.add(getPlacementArgs()[I], *AI++);
+
+ // Call 'operator delete'.
+ EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
+ }
+ };
+
+ /// A cleanup to call the given 'operator delete' function upon
+ /// abnormal exit from a new expression when the new expression is
+ /// conditional.
+ class CallDeleteDuringConditionalNew final : public EHScopeStack::Cleanup {
+ size_t NumPlacementArgs;
+ const FunctionDecl *OperatorDelete;
+ DominatingValue<RValue>::saved_type Ptr;
+ DominatingValue<RValue>::saved_type AllocSize;
+
+ DominatingValue<RValue>::saved_type *getPlacementArgs() {
+ return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
+ }
+
+ public:
+ static size_t getExtraSize(size_t NumPlacementArgs) {
+ return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
+ }
+
+ CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
+ const FunctionDecl *OperatorDelete,
+ DominatingValue<RValue>::saved_type Ptr,
+ DominatingValue<RValue>::saved_type AllocSize)
+ : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
+ Ptr(Ptr), AllocSize(AllocSize) {}
+
+ void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
+ assert(I < NumPlacementArgs && "index out of range");
+ getPlacementArgs()[I] = Arg;
+ }
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ const FunctionProtoType *FPT
+ = OperatorDelete->getType()->getAs<FunctionProtoType>();
+ assert(FPT->getNumParams() == NumPlacementArgs + 1 ||
+ (FPT->getNumParams() == 2 && NumPlacementArgs == 0));
+
+ CallArgList DeleteArgs;
+
+ // The first argument is always a void*.
+ FunctionProtoType::param_type_iterator AI = FPT->param_type_begin();
+ DeleteArgs.add(Ptr.restore(CGF), *AI++);
+
+ // A member 'operator delete' can take an extra 'size_t' argument.
+ if (FPT->getNumParams() == NumPlacementArgs + 2) {
+ RValue RV = AllocSize.restore(CGF);
+ DeleteArgs.add(RV, *AI++);
+ }
+
+ // Pass the rest of the arguments, which must match exactly.
+ for (unsigned I = 0; I != NumPlacementArgs; ++I) {
+ RValue RV = getPlacementArgs()[I].restore(CGF);
+ DeleteArgs.add(RV, *AI++);
+ }
+
+ // Call 'operator delete'.
+ EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
+ }
+ };
+}
+
+/// Enter a cleanup to call 'operator delete' if the initializer in a
+/// new-expression throws.
+static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
+ const CXXNewExpr *E,
+ Address NewPtr,
+ llvm::Value *AllocSize,
+ const CallArgList &NewArgs) {
+ // If we're not inside a conditional branch, then the cleanup will
+ // dominate and we can do the easier (and more efficient) thing.
+ if (!CGF.isInConditionalBranch()) {
+ CallDeleteDuringNew *Cleanup = CGF.EHStack
+ .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
+ E->getNumPlacementArgs(),
+ E->getOperatorDelete(),
+ NewPtr.getPointer(),
+ AllocSize);
+ for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
+ Cleanup->setPlacementArg(I, NewArgs[I+1].RV);
+
+ return;
+ }
+
+ // Otherwise, we need to save all this stuff.
+ DominatingValue<RValue>::saved_type SavedNewPtr =
+ DominatingValue<RValue>::save(CGF, RValue::get(NewPtr.getPointer()));
+ DominatingValue<RValue>::saved_type SavedAllocSize =
+ DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
+
+ CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
+ .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(EHCleanup,
+ E->getNumPlacementArgs(),
+ E->getOperatorDelete(),
+ SavedNewPtr,
+ SavedAllocSize);
+ for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
+ Cleanup->setPlacementArg(I,
+ DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV));
+
+ CGF.initFullExprCleanup();
+}
+
+llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
+ // The element type being allocated.
+ QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
+
+ // 1. Build a call to the allocation function.
+ FunctionDecl *allocator = E->getOperatorNew();
+
+ // If there is a brace-initializer, cannot allocate fewer elements than inits.
+ unsigned minElements = 0;
+ if (E->isArray() && E->hasInitializer()) {
+ if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer()))
+ minElements = ILE->getNumInits();
+ }
+
+ llvm::Value *numElements = nullptr;
+ llvm::Value *allocSizeWithoutCookie = nullptr;
+ llvm::Value *allocSize =
+ EmitCXXNewAllocSize(*this, E, minElements, numElements,
+ allocSizeWithoutCookie);
+
+ // Emit the allocation call. If the allocator is a global placement
+ // operator, just "inline" it directly.
+ Address allocation = Address::invalid();
+ CallArgList allocatorArgs;
+ if (allocator->isReservedGlobalPlacementOperator()) {
+ assert(E->getNumPlacementArgs() == 1);
+ const Expr *arg = *E->placement_arguments().begin();
+
+ AlignmentSource alignSource;
+ allocation = EmitPointerWithAlignment(arg, &alignSource);
+
+ // The pointer expression will, in many cases, be an opaque void*.
+ // In these cases, discard the computed alignment and use the
+ // formal alignment of the allocated type.
+ if (alignSource != AlignmentSource::Decl) {
+ allocation = Address(allocation.getPointer(),
+ getContext().getTypeAlignInChars(allocType));
+ }
+
+ // Set up allocatorArgs for the call to operator delete if it's not
+ // the reserved global operator.
+ if (E->getOperatorDelete() &&
+ !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
+ allocatorArgs.add(RValue::get(allocSize), getContext().getSizeType());
+ allocatorArgs.add(RValue::get(allocation.getPointer()), arg->getType());
+ }
+
+ } else {
+ const FunctionProtoType *allocatorType =
+ allocator->getType()->castAs<FunctionProtoType>();
+
+ // The allocation size is the first argument.
+ QualType sizeType = getContext().getSizeType();
+ allocatorArgs.add(RValue::get(allocSize), sizeType);
+
+ // We start at 1 here because the first argument (the allocation size)
+ // has already been emitted.
+ EmitCallArgs(allocatorArgs, allocatorType, E->placement_arguments(),
+ /* CalleeDecl */ nullptr,
+ /*ParamsToSkip*/ 1);
+
+ RValue RV =
+ EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
+
+ // For now, only assume that the allocation function returns
+ // something satisfactorily aligned for the element type, plus
+ // the cookie if we have one.
+ CharUnits allocationAlign =
+ getContext().getTypeAlignInChars(allocType);
+ if (allocSize != allocSizeWithoutCookie) {
+ CharUnits cookieAlign = getSizeAlign(); // FIXME?
+ allocationAlign = std::max(allocationAlign, cookieAlign);
+ }
+
+ allocation = Address(RV.getScalarVal(), allocationAlign);
+ }
+
+ // Emit a null check on the allocation result if the allocation
+ // function is allowed to return null (because it has a non-throwing
+ // exception spec or is the reserved placement new) and we have an
+ // interesting initializer.
+ bool nullCheck = E->shouldNullCheckAllocation(getContext()) &&
+ (!allocType.isPODType(getContext()) || E->hasInitializer());
+
+ llvm::BasicBlock *nullCheckBB = nullptr;
+ llvm::BasicBlock *contBB = nullptr;
+
+ // The null-check means that the initializer is conditionally
+ // evaluated.
+ ConditionalEvaluation conditional(*this);
+
+ if (nullCheck) {
+ conditional.begin(*this);
+
+ nullCheckBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
+ contBB = createBasicBlock("new.cont");
+
+ llvm::Value *isNull =
+ Builder.CreateIsNull(allocation.getPointer(), "new.isnull");
+ Builder.CreateCondBr(isNull, contBB, notNullBB);
+ EmitBlock(notNullBB);
+ }
+
+ // If there's an operator delete, enter a cleanup to call it if an
+ // exception is thrown.
+ EHScopeStack::stable_iterator operatorDeleteCleanup;
+ llvm::Instruction *cleanupDominator = nullptr;
+ if (E->getOperatorDelete() &&
+ !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
+ EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
+ operatorDeleteCleanup = EHStack.stable_begin();
+ cleanupDominator = Builder.CreateUnreachable();
+ }
+
+ assert((allocSize == allocSizeWithoutCookie) ==
+ CalculateCookiePadding(*this, E).isZero());
+ if (allocSize != allocSizeWithoutCookie) {
+ assert(E->isArray());
+ allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
+ numElements,
+ E, allocType);
+ }
+
+ llvm::Type *elementTy = ConvertTypeForMem(allocType);
+ Address result = Builder.CreateElementBitCast(allocation, elementTy);
+
+ // Passing pointer through invariant.group.barrier to avoid propagation of
+ // vptrs information which may be included in previous type.
+ if (CGM.getCodeGenOpts().StrictVTablePointers &&
+ CGM.getCodeGenOpts().OptimizationLevel > 0 &&
+ allocator->isReservedGlobalPlacementOperator())
+ result = Address(Builder.CreateInvariantGroupBarrier(result.getPointer()),
+ result.getAlignment());
+
+ EmitNewInitializer(*this, E, allocType, elementTy, result, numElements,
+ allocSizeWithoutCookie);
+ if (E->isArray()) {
+ // NewPtr is a pointer to the base element type. If we're
+ // allocating an array of arrays, we'll need to cast back to the
+ // array pointer type.
+ llvm::Type *resultType = ConvertTypeForMem(E->getType());
+ if (result.getType() != resultType)
+ result = Builder.CreateBitCast(result, resultType);
+ }
+
+ // Deactivate the 'operator delete' cleanup if we finished
+ // initialization.
+ if (operatorDeleteCleanup.isValid()) {
+ DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
+ cleanupDominator->eraseFromParent();
+ }
+
+ llvm::Value *resultPtr = result.getPointer();
+ if (nullCheck) {
+ conditional.end(*this);
+
+ llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
+ EmitBlock(contBB);
+
+ llvm::PHINode *PHI = Builder.CreatePHI(resultPtr->getType(), 2);
+ PHI->addIncoming(resultPtr, notNullBB);
+ PHI->addIncoming(llvm::Constant::getNullValue(resultPtr->getType()),
+ nullCheckBB);
+
+ resultPtr = PHI;
+ }
+
+ return resultPtr;
+}
+
+void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
+ llvm::Value *Ptr,
+ QualType DeleteTy) {
+ assert(DeleteFD->getOverloadedOperator() == OO_Delete);
+
+ const FunctionProtoType *DeleteFTy =
+ DeleteFD->getType()->getAs<FunctionProtoType>();
+
+ CallArgList DeleteArgs;
+
+ // Check if we need to pass the size to the delete operator.
+ llvm::Value *Size = nullptr;
+ QualType SizeTy;
+ if (DeleteFTy->getNumParams() == 2) {
+ SizeTy = DeleteFTy->getParamType(1);
+ CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
+ Size = llvm::ConstantInt::get(ConvertType(SizeTy),
+ DeleteTypeSize.getQuantity());
+ }
+
+ QualType ArgTy = DeleteFTy->getParamType(0);
+ llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
+ DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
+
+ if (Size)
+ DeleteArgs.add(RValue::get(Size), SizeTy);
+
+ // Emit the call to delete.
+ EmitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs);
+}
+
+namespace {
+ /// Calls the given 'operator delete' on a single object.
+ struct CallObjectDelete final : EHScopeStack::Cleanup {
+ llvm::Value *Ptr;
+ const FunctionDecl *OperatorDelete;
+ QualType ElementType;
+
+ CallObjectDelete(llvm::Value *Ptr,
+ const FunctionDecl *OperatorDelete,
+ QualType ElementType)
+ : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
+ }
+ };
+}
+
+void
+CodeGenFunction::pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
+ llvm::Value *CompletePtr,
+ QualType ElementType) {
+ EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, CompletePtr,
+ OperatorDelete, ElementType);
+}
+
+/// Emit the code for deleting a single object.
+static void EmitObjectDelete(CodeGenFunction &CGF,
+ const CXXDeleteExpr *DE,
+ Address Ptr,
+ QualType ElementType) {
+ // Find the destructor for the type, if applicable. If the
+ // destructor is virtual, we'll just emit the vcall and return.
+ const CXXDestructorDecl *Dtor = nullptr;
+ if (const RecordType *RT = ElementType->getAs<RecordType>()) {
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
+ Dtor = RD->getDestructor();
+
+ if (Dtor->isVirtual()) {
+ CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
+ Dtor);
+ return;
+ }
+ }
+ }
+
+ // Make sure that we call delete even if the dtor throws.
+ // This doesn't have to a conditional cleanup because we're going
+ // to pop it off in a second.
+ const FunctionDecl *OperatorDelete = DE->getOperatorDelete();
+ CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
+ Ptr.getPointer(),
+ OperatorDelete, ElementType);
+
+ if (Dtor)
+ CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
+ /*ForVirtualBase=*/false,
+ /*Delegating=*/false,
+ Ptr);
+ else if (auto Lifetime = ElementType.getObjCLifetime()) {
+ switch (Lifetime) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Strong:
+ CGF.EmitARCDestroyStrong(Ptr, ARCPreciseLifetime);
+ break;
+
+ case Qualifiers::OCL_Weak:
+ CGF.EmitARCDestroyWeak(Ptr);
+ break;
+ }
+ }
+
+ CGF.PopCleanupBlock();
+}
+
+namespace {
+ /// Calls the given 'operator delete' on an array of objects.
+ struct CallArrayDelete final : EHScopeStack::Cleanup {
+ llvm::Value *Ptr;
+ const FunctionDecl *OperatorDelete;
+ llvm::Value *NumElements;
+ QualType ElementType;
+ CharUnits CookieSize;
+
+ CallArrayDelete(llvm::Value *Ptr,
+ const FunctionDecl *OperatorDelete,
+ llvm::Value *NumElements,
+ QualType ElementType,
+ CharUnits CookieSize)
+ : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
+ ElementType(ElementType), CookieSize(CookieSize) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ const FunctionProtoType *DeleteFTy =
+ OperatorDelete->getType()->getAs<FunctionProtoType>();
+ assert(DeleteFTy->getNumParams() == 1 || DeleteFTy->getNumParams() == 2);
+
+ CallArgList Args;
+
+ // Pass the pointer as the first argument.
+ QualType VoidPtrTy = DeleteFTy->getParamType(0);
+ llvm::Value *DeletePtr
+ = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
+ Args.add(RValue::get(DeletePtr), VoidPtrTy);
+
+ // Pass the original requested size as the second argument.
+ if (DeleteFTy->getNumParams() == 2) {
+ QualType size_t = DeleteFTy->getParamType(1);
+ llvm::IntegerType *SizeTy
+ = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
+
+ CharUnits ElementTypeSize =
+ CGF.CGM.getContext().getTypeSizeInChars(ElementType);
+
+ // The size of an element, multiplied by the number of elements.
+ llvm::Value *Size
+ = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
+ if (NumElements)
+ Size = CGF.Builder.CreateMul(Size, NumElements);
+
+ // Plus the size of the cookie if applicable.
+ if (!CookieSize.isZero()) {
+ llvm::Value *CookieSizeV
+ = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
+ Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
+ }
+
+ Args.add(RValue::get(Size), size_t);
+ }
+
+ // Emit the call to delete.
+ EmitNewDeleteCall(CGF, OperatorDelete, DeleteFTy, Args);
+ }
+ };
+}
+
+/// Emit the code for deleting an array of objects.
+static void EmitArrayDelete(CodeGenFunction &CGF,
+ const CXXDeleteExpr *E,
+ Address deletedPtr,
+ QualType elementType) {
+ llvm::Value *numElements = nullptr;
+ llvm::Value *allocatedPtr = nullptr;
+ CharUnits cookieSize;
+ CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
+ numElements, allocatedPtr, cookieSize);
+
+ assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
+
+ // Make sure that we call delete even if one of the dtors throws.
+ const FunctionDecl *operatorDelete = E->getOperatorDelete();
+ CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
+ allocatedPtr, operatorDelete,
+ numElements, elementType,
+ cookieSize);
+
+ // Destroy the elements.
+ if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
+ assert(numElements && "no element count for a type with a destructor!");
+
+ CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
+ CharUnits elementAlign =
+ deletedPtr.getAlignment().alignmentOfArrayElement(elementSize);
+
+ llvm::Value *arrayBegin = deletedPtr.getPointer();
+ llvm::Value *arrayEnd =
+ CGF.Builder.CreateInBoundsGEP(arrayBegin, numElements, "delete.end");
+
+ // Note that it is legal to allocate a zero-length array, and we
+ // can never fold the check away because the length should always
+ // come from a cookie.
+ CGF.emitArrayDestroy(arrayBegin, arrayEnd, elementType, elementAlign,
+ CGF.getDestroyer(dtorKind),
+ /*checkZeroLength*/ true,
+ CGF.needsEHCleanup(dtorKind));
+ }
+
+ // Pop the cleanup block.
+ CGF.PopCleanupBlock();
+}
+
+void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
+ const Expr *Arg = E->getArgument();
+ Address Ptr = EmitPointerWithAlignment(Arg);
+
+ // Null check the pointer.
+ llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
+ llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
+
+ llvm::Value *IsNull = Builder.CreateIsNull(Ptr.getPointer(), "isnull");
+
+ Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
+ EmitBlock(DeleteNotNull);
+
+ // We might be deleting a pointer to array. If so, GEP down to the
+ // first non-array element.
+ // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
+ QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
+ if (DeleteTy->isConstantArrayType()) {
+ llvm::Value *Zero = Builder.getInt32(0);
+ SmallVector<llvm::Value*,8> GEP;
+
+ GEP.push_back(Zero); // point at the outermost array
+
+ // For each layer of array type we're pointing at:
+ while (const ConstantArrayType *Arr
+ = getContext().getAsConstantArrayType(DeleteTy)) {
+ // 1. Unpeel the array type.
+ DeleteTy = Arr->getElementType();
+
+ // 2. GEP to the first element of the array.
+ GEP.push_back(Zero);
+ }
+
+ Ptr = Address(Builder.CreateInBoundsGEP(Ptr.getPointer(), GEP, "del.first"),
+ Ptr.getAlignment());
+ }
+
+ assert(ConvertTypeForMem(DeleteTy) == Ptr.getElementType());
+
+ if (E->isArrayForm()) {
+ EmitArrayDelete(*this, E, Ptr, DeleteTy);
+ } else {
+ EmitObjectDelete(*this, E, Ptr, DeleteTy);
+ }
+
+ EmitBlock(DeleteEnd);
+}
+
+static bool isGLValueFromPointerDeref(const Expr *E) {
+ E = E->IgnoreParens();
+
+ if (const auto *CE = dyn_cast<CastExpr>(E)) {
+ if (!CE->getSubExpr()->isGLValue())
+ return false;
+ return isGLValueFromPointerDeref(CE->getSubExpr());
+ }
+
+ if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E))
+ return isGLValueFromPointerDeref(OVE->getSourceExpr());
+
+ if (const auto *BO = dyn_cast<BinaryOperator>(E))
+ if (BO->getOpcode() == BO_Comma)
+ return isGLValueFromPointerDeref(BO->getRHS());
+
+ if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(E))
+ return isGLValueFromPointerDeref(ACO->getTrueExpr()) ||
+ isGLValueFromPointerDeref(ACO->getFalseExpr());
+
+ // C++11 [expr.sub]p1:
+ // The expression E1[E2] is identical (by definition) to *((E1)+(E2))
+ if (isa<ArraySubscriptExpr>(E))
+ return true;
+
+ if (const auto *UO = dyn_cast<UnaryOperator>(E))
+ if (UO->getOpcode() == UO_Deref)
+ return true;
+
+ return false;
+}
+
+static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
+ llvm::Type *StdTypeInfoPtrTy) {
+ // Get the vtable pointer.
+ Address ThisPtr = CGF.EmitLValue(E).getAddress();
+
+ // C++ [expr.typeid]p2:
+ // If the glvalue expression is obtained by applying the unary * operator to
+ // a pointer and the pointer is a null pointer value, the typeid expression
+ // throws the std::bad_typeid exception.
+ //
+ // However, this paragraph's intent is not clear. We choose a very generous
+ // interpretation which implores us to consider comma operators, conditional
+ // operators, parentheses and other such constructs.
+ QualType SrcRecordTy = E->getType();
+ if (CGF.CGM.getCXXABI().shouldTypeidBeNullChecked(
+ isGLValueFromPointerDeref(E), SrcRecordTy)) {
+ llvm::BasicBlock *BadTypeidBlock =
+ CGF.createBasicBlock("typeid.bad_typeid");
+ llvm::BasicBlock *EndBlock = CGF.createBasicBlock("typeid.end");
+
+ llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr.getPointer());
+ CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
+
+ CGF.EmitBlock(BadTypeidBlock);
+ CGF.CGM.getCXXABI().EmitBadTypeidCall(CGF);
+ CGF.EmitBlock(EndBlock);
+ }
+
+ return CGF.CGM.getCXXABI().EmitTypeid(CGF, SrcRecordTy, ThisPtr,
+ StdTypeInfoPtrTy);
+}
+
+llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
+ llvm::Type *StdTypeInfoPtrTy =
+ ConvertType(E->getType())->getPointerTo();
+
+ if (E->isTypeOperand()) {
+ llvm::Constant *TypeInfo =
+ CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand(getContext()));
+ return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
+ }
+
+ // C++ [expr.typeid]p2:
+ // When typeid is applied to a glvalue expression whose type is a
+ // polymorphic class type, the result refers to a std::type_info object
+ // representing the type of the most derived object (that is, the dynamic
+ // type) to which the glvalue refers.
+ if (E->isPotentiallyEvaluated())
+ return EmitTypeidFromVTable(*this, E->getExprOperand(),
+ StdTypeInfoPtrTy);
+
+ QualType OperandTy = E->getExprOperand()->getType();
+ return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
+ StdTypeInfoPtrTy);
+}
+
+static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
+ QualType DestTy) {
+ llvm::Type *DestLTy = CGF.ConvertType(DestTy);
+ if (DestTy->isPointerType())
+ return llvm::Constant::getNullValue(DestLTy);
+
+ /// C++ [expr.dynamic.cast]p9:
+ /// A failed cast to reference type throws std::bad_cast
+ if (!CGF.CGM.getCXXABI().EmitBadCastCall(CGF))
+ return nullptr;
+
+ CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
+ return llvm::UndefValue::get(DestLTy);
+}
+
+llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
+ const CXXDynamicCastExpr *DCE) {
+ CGM.EmitExplicitCastExprType(DCE, this);
+ QualType DestTy = DCE->getTypeAsWritten();
+
+ if (DCE->isAlwaysNull())
+ if (llvm::Value *T = EmitDynamicCastToNull(*this, DestTy))
+ return T;
+
+ QualType SrcTy = DCE->getSubExpr()->getType();
+
+ // C++ [expr.dynamic.cast]p7:
+ // If T is "pointer to cv void," then the result is a pointer to the most
+ // derived object pointed to by v.
+ const PointerType *DestPTy = DestTy->getAs<PointerType>();
+
+ bool isDynamicCastToVoid;
+ QualType SrcRecordTy;
+ QualType DestRecordTy;
+ if (DestPTy) {
+ isDynamicCastToVoid = DestPTy->getPointeeType()->isVoidType();
+ SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
+ DestRecordTy = DestPTy->getPointeeType();
+ } else {
+ isDynamicCastToVoid = false;
+ SrcRecordTy = SrcTy;
+ DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
+ }
+
+ assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
+
+ // C++ [expr.dynamic.cast]p4:
+ // If the value of v is a null pointer value in the pointer case, the result
+ // is the null pointer value of type T.
+ bool ShouldNullCheckSrcValue =
+ CGM.getCXXABI().shouldDynamicCastCallBeNullChecked(SrcTy->isPointerType(),
+ SrcRecordTy);
+
+ llvm::BasicBlock *CastNull = nullptr;
+ llvm::BasicBlock *CastNotNull = nullptr;
+ llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
+
+ if (ShouldNullCheckSrcValue) {
+ CastNull = createBasicBlock("dynamic_cast.null");
+ CastNotNull = createBasicBlock("dynamic_cast.notnull");
+
+ llvm::Value *IsNull = Builder.CreateIsNull(ThisAddr.getPointer());
+ Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
+ EmitBlock(CastNotNull);
+ }
+
+ llvm::Value *Value;
+ if (isDynamicCastToVoid) {
+ Value = CGM.getCXXABI().EmitDynamicCastToVoid(*this, ThisAddr, SrcRecordTy,
+ DestTy);
+ } else {
+ assert(DestRecordTy->isRecordType() &&
+ "destination type must be a record type!");
+ Value = CGM.getCXXABI().EmitDynamicCastCall(*this, ThisAddr, SrcRecordTy,
+ DestTy, DestRecordTy, CastEnd);
+ CastNotNull = Builder.GetInsertBlock();
+ }
+
+ if (ShouldNullCheckSrcValue) {
+ EmitBranch(CastEnd);
+
+ EmitBlock(CastNull);
+ EmitBranch(CastEnd);
+ }
+
+ EmitBlock(CastEnd);
+
+ if (ShouldNullCheckSrcValue) {
+ llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
+ PHI->addIncoming(Value, CastNotNull);
+ PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
+
+ Value = PHI;
+ }
+
+ return Value;
+}
+
+void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
+ RunCleanupsScope Scope(*this);
+ LValue SlotLV = MakeAddrLValue(Slot.getAddress(), E->getType());
+
+ CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
+ for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(),
+ e = E->capture_init_end();
+ i != e; ++i, ++CurField) {
+ // Emit initialization
+ LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
+ if (CurField->hasCapturedVLAType()) {
+ auto VAT = CurField->getCapturedVLAType();
+ EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
+ } else {
+ ArrayRef<VarDecl *> ArrayIndexes;
+ if (CurField->getType()->isArrayType())
+ ArrayIndexes = E->getCaptureInitIndexVars(i);
+ EmitInitializerForField(*CurField, LV, *i, ArrayIndexes);
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp
new file mode 100644
index 0000000..ccdb532
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp
@@ -0,0 +1,1108 @@
+//===--- CGExprComplex.cpp - Emit LLVM Code for Complex Exprs -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Expr nodes with complex types as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/Metadata.h"
+#include <algorithm>
+using namespace clang;
+using namespace CodeGen;
+
+//===----------------------------------------------------------------------===//
+// Complex Expression Emitter
+//===----------------------------------------------------------------------===//
+
+typedef CodeGenFunction::ComplexPairTy ComplexPairTy;
+
+/// Return the complex type that we are meant to emit.
+static const ComplexType *getComplexType(QualType type) {
+ type = type.getCanonicalType();
+ if (const ComplexType *comp = dyn_cast<ComplexType>(type)) {
+ return comp;
+ } else {
+ return cast<ComplexType>(cast<AtomicType>(type)->getValueType());
+ }
+}
+
+namespace {
+class ComplexExprEmitter
+ : public StmtVisitor<ComplexExprEmitter, ComplexPairTy> {
+ CodeGenFunction &CGF;
+ CGBuilderTy &Builder;
+ bool IgnoreReal;
+ bool IgnoreImag;
+public:
+ ComplexExprEmitter(CodeGenFunction &cgf, bool ir=false, bool ii=false)
+ : CGF(cgf), Builder(CGF.Builder), IgnoreReal(ir), IgnoreImag(ii) {
+ }
+
+
+ //===--------------------------------------------------------------------===//
+ // Utilities
+ //===--------------------------------------------------------------------===//
+
+ bool TestAndClearIgnoreReal() {
+ bool I = IgnoreReal;
+ IgnoreReal = false;
+ return I;
+ }
+ bool TestAndClearIgnoreImag() {
+ bool I = IgnoreImag;
+ IgnoreImag = false;
+ return I;
+ }
+
+ /// EmitLoadOfLValue - Given an expression with complex type that represents a
+ /// value l-value, this method emits the address of the l-value, then loads
+ /// and returns the result.
+ ComplexPairTy EmitLoadOfLValue(const Expr *E) {
+ return EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc());
+ }
+
+ ComplexPairTy EmitLoadOfLValue(LValue LV, SourceLocation Loc);
+
+ /// EmitStoreOfComplex - Store the specified real/imag parts into the
+ /// specified value pointer.
+ void EmitStoreOfComplex(ComplexPairTy Val, LValue LV, bool isInit);
+
+ /// Emit a cast from complex value Val to DestType.
+ ComplexPairTy EmitComplexToComplexCast(ComplexPairTy Val, QualType SrcType,
+ QualType DestType, SourceLocation Loc);
+ /// Emit a cast from scalar value Val to DestType.
+ ComplexPairTy EmitScalarToComplexCast(llvm::Value *Val, QualType SrcType,
+ QualType DestType, SourceLocation Loc);
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ ComplexPairTy Visit(Expr *E) {
+ ApplyDebugLocation DL(CGF, E);
+ return StmtVisitor<ComplexExprEmitter, ComplexPairTy>::Visit(E);
+ }
+
+ ComplexPairTy VisitStmt(Stmt *S) {
+ S->dump(CGF.getContext().getSourceManager());
+ llvm_unreachable("Stmt can't have complex result type!");
+ }
+ ComplexPairTy VisitExpr(Expr *S);
+ ComplexPairTy VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr());}
+ ComplexPairTy VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
+ return Visit(GE->getResultExpr());
+ }
+ ComplexPairTy VisitImaginaryLiteral(const ImaginaryLiteral *IL);
+ ComplexPairTy
+ VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *PE) {
+ return Visit(PE->getReplacement());
+ }
+
+ // l-values.
+ ComplexPairTy VisitDeclRefExpr(DeclRefExpr *E) {
+ if (CodeGenFunction::ConstantEmission result = CGF.tryEmitAsConstant(E)) {
+ if (result.isReference())
+ return EmitLoadOfLValue(result.getReferenceLValue(CGF, E),
+ E->getExprLoc());
+
+ llvm::Constant *pair = result.getValue();
+ return ComplexPairTy(pair->getAggregateElement(0U),
+ pair->getAggregateElement(1U));
+ }
+ return EmitLoadOfLValue(E);
+ }
+ ComplexPairTy VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+ ComplexPairTy VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ return CGF.EmitObjCMessageExpr(E).getComplexVal();
+ }
+ ComplexPairTy VisitArraySubscriptExpr(Expr *E) { return EmitLoadOfLValue(E); }
+ ComplexPairTy VisitMemberExpr(const Expr *E) { return EmitLoadOfLValue(E); }
+ ComplexPairTy VisitOpaqueValueExpr(OpaqueValueExpr *E) {
+ if (E->isGLValue())
+ return EmitLoadOfLValue(CGF.getOpaqueLValueMapping(E), E->getExprLoc());
+ return CGF.getOpaqueRValueMapping(E).getComplexVal();
+ }
+
+ ComplexPairTy VisitPseudoObjectExpr(PseudoObjectExpr *E) {
+ return CGF.EmitPseudoObjectRValue(E).getComplexVal();
+ }
+
+ // FIXME: CompoundLiteralExpr
+
+ ComplexPairTy EmitCast(CastKind CK, Expr *Op, QualType DestTy);
+ ComplexPairTy VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ // Unlike for scalars, we don't have to worry about function->ptr demotion
+ // here.
+ return EmitCast(E->getCastKind(), E->getSubExpr(), E->getType());
+ }
+ ComplexPairTy VisitCastExpr(CastExpr *E) {
+ if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
+ CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
+ return EmitCast(E->getCastKind(), E->getSubExpr(), E->getType());
+ }
+ ComplexPairTy VisitCallExpr(const CallExpr *E);
+ ComplexPairTy VisitStmtExpr(const StmtExpr *E);
+
+ // Operators.
+ ComplexPairTy VisitPrePostIncDec(const UnaryOperator *E,
+ bool isInc, bool isPre) {
+ LValue LV = CGF.EmitLValue(E->getSubExpr());
+ return CGF.EmitComplexPrePostIncDec(E, LV, isInc, isPre);
+ }
+ ComplexPairTy VisitUnaryPostDec(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, false, false);
+ }
+ ComplexPairTy VisitUnaryPostInc(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, true, false);
+ }
+ ComplexPairTy VisitUnaryPreDec(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, false, true);
+ }
+ ComplexPairTy VisitUnaryPreInc(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, true, true);
+ }
+ ComplexPairTy VisitUnaryDeref(const Expr *E) { return EmitLoadOfLValue(E); }
+ ComplexPairTy VisitUnaryPlus (const UnaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ return Visit(E->getSubExpr());
+ }
+ ComplexPairTy VisitUnaryMinus (const UnaryOperator *E);
+ ComplexPairTy VisitUnaryNot (const UnaryOperator *E);
+ // LNot,Real,Imag never return complex.
+ ComplexPairTy VisitUnaryExtension(const UnaryOperator *E) {
+ return Visit(E->getSubExpr());
+ }
+ ComplexPairTy VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ return Visit(DAE->getExpr());
+ }
+ ComplexPairTy VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
+ CodeGenFunction::CXXDefaultInitExprScope Scope(CGF);
+ return Visit(DIE->getExpr());
+ }
+ ComplexPairTy VisitExprWithCleanups(ExprWithCleanups *E) {
+ CGF.enterFullExpression(E);
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ return Visit(E->getSubExpr());
+ }
+ ComplexPairTy VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
+ assert(E->getType()->isAnyComplexType() && "Expected complex type!");
+ QualType Elem = E->getType()->castAs<ComplexType>()->getElementType();
+ llvm::Constant *Null = llvm::Constant::getNullValue(CGF.ConvertType(Elem));
+ return ComplexPairTy(Null, Null);
+ }
+ ComplexPairTy VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
+ assert(E->getType()->isAnyComplexType() && "Expected complex type!");
+ QualType Elem = E->getType()->castAs<ComplexType>()->getElementType();
+ llvm::Constant *Null =
+ llvm::Constant::getNullValue(CGF.ConvertType(Elem));
+ return ComplexPairTy(Null, Null);
+ }
+
+ struct BinOpInfo {
+ ComplexPairTy LHS;
+ ComplexPairTy RHS;
+ QualType Ty; // Computation Type.
+ };
+
+ BinOpInfo EmitBinOps(const BinaryOperator *E);
+ LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
+ ComplexPairTy (ComplexExprEmitter::*Func)
+ (const BinOpInfo &),
+ RValue &Val);
+ ComplexPairTy EmitCompoundAssign(const CompoundAssignOperator *E,
+ ComplexPairTy (ComplexExprEmitter::*Func)
+ (const BinOpInfo &));
+
+ ComplexPairTy EmitBinAdd(const BinOpInfo &Op);
+ ComplexPairTy EmitBinSub(const BinOpInfo &Op);
+ ComplexPairTy EmitBinMul(const BinOpInfo &Op);
+ ComplexPairTy EmitBinDiv(const BinOpInfo &Op);
+
+ ComplexPairTy EmitComplexBinOpLibCall(StringRef LibCallName,
+ const BinOpInfo &Op);
+
+ ComplexPairTy VisitBinAdd(const BinaryOperator *E) {
+ return EmitBinAdd(EmitBinOps(E));
+ }
+ ComplexPairTy VisitBinSub(const BinaryOperator *E) {
+ return EmitBinSub(EmitBinOps(E));
+ }
+ ComplexPairTy VisitBinMul(const BinaryOperator *E) {
+ return EmitBinMul(EmitBinOps(E));
+ }
+ ComplexPairTy VisitBinDiv(const BinaryOperator *E) {
+ return EmitBinDiv(EmitBinOps(E));
+ }
+
+ // Compound assignments.
+ ComplexPairTy VisitBinAddAssign(const CompoundAssignOperator *E) {
+ return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinAdd);
+ }
+ ComplexPairTy VisitBinSubAssign(const CompoundAssignOperator *E) {
+ return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinSub);
+ }
+ ComplexPairTy VisitBinMulAssign(const CompoundAssignOperator *E) {
+ return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinMul);
+ }
+ ComplexPairTy VisitBinDivAssign(const CompoundAssignOperator *E) {
+ return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinDiv);
+ }
+
+ // GCC rejects rem/and/or/xor for integer complex.
+ // Logical and/or always return int, never complex.
+
+ // No comparisons produce a complex result.
+
+ LValue EmitBinAssignLValue(const BinaryOperator *E,
+ ComplexPairTy &Val);
+ ComplexPairTy VisitBinAssign (const BinaryOperator *E);
+ ComplexPairTy VisitBinComma (const BinaryOperator *E);
+
+
+ ComplexPairTy
+ VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
+ ComplexPairTy VisitChooseExpr(ChooseExpr *CE);
+
+ ComplexPairTy VisitInitListExpr(InitListExpr *E);
+
+ ComplexPairTy VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+
+ ComplexPairTy VisitVAArgExpr(VAArgExpr *E);
+
+ ComplexPairTy VisitAtomicExpr(AtomicExpr *E) {
+ return CGF.EmitAtomicExpr(E).getComplexVal();
+ }
+};
+} // end anonymous namespace.
+
+//===----------------------------------------------------------------------===//
+// Utilities
+//===----------------------------------------------------------------------===//
+
+Address CodeGenFunction::emitAddrOfRealComponent(Address addr,
+ QualType complexType) {
+ CharUnits offset = CharUnits::Zero();
+ return Builder.CreateStructGEP(addr, 0, offset, addr.getName() + ".realp");
+}
+
+Address CodeGenFunction::emitAddrOfImagComponent(Address addr,
+ QualType complexType) {
+ QualType eltType = complexType->castAs<ComplexType>()->getElementType();
+ CharUnits offset = getContext().getTypeSizeInChars(eltType);
+ return Builder.CreateStructGEP(addr, 1, offset, addr.getName() + ".imagp");
+}
+
+/// EmitLoadOfLValue - Given an RValue reference for a complex, emit code to
+/// load the real and imaginary pieces, returning them as Real/Imag.
+ComplexPairTy ComplexExprEmitter::EmitLoadOfLValue(LValue lvalue,
+ SourceLocation loc) {
+ assert(lvalue.isSimple() && "non-simple complex l-value?");
+ if (lvalue.getType()->isAtomicType())
+ return CGF.EmitAtomicLoad(lvalue, loc).getComplexVal();
+
+ Address SrcPtr = lvalue.getAddress();
+ bool isVolatile = lvalue.isVolatileQualified();
+
+ llvm::Value *Real = nullptr, *Imag = nullptr;
+
+ if (!IgnoreReal || isVolatile) {
+ Address RealP = CGF.emitAddrOfRealComponent(SrcPtr, lvalue.getType());
+ Real = Builder.CreateLoad(RealP, isVolatile, SrcPtr.getName() + ".real");
+ }
+
+ if (!IgnoreImag || isVolatile) {
+ Address ImagP = CGF.emitAddrOfImagComponent(SrcPtr, lvalue.getType());
+ Imag = Builder.CreateLoad(ImagP, isVolatile, SrcPtr.getName() + ".imag");
+ }
+
+ return ComplexPairTy(Real, Imag);
+}
+
+/// EmitStoreOfComplex - Store the specified real/imag parts into the
+/// specified value pointer.
+void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, LValue lvalue,
+ bool isInit) {
+ if (lvalue.getType()->isAtomicType() ||
+ (!isInit && CGF.LValueIsSuitableForInlineAtomic(lvalue)))
+ return CGF.EmitAtomicStore(RValue::getComplex(Val), lvalue, isInit);
+
+ Address Ptr = lvalue.getAddress();
+ Address RealPtr = CGF.emitAddrOfRealComponent(Ptr, lvalue.getType());
+ Address ImagPtr = CGF.emitAddrOfImagComponent(Ptr, lvalue.getType());
+
+ Builder.CreateStore(Val.first, RealPtr, lvalue.isVolatileQualified());
+ Builder.CreateStore(Val.second, ImagPtr, lvalue.isVolatileQualified());
+}
+
+
+
+//===----------------------------------------------------------------------===//
+// Visitor Methods
+//===----------------------------------------------------------------------===//
+
+ComplexPairTy ComplexExprEmitter::VisitExpr(Expr *E) {
+ CGF.ErrorUnsupported(E, "complex expression");
+ llvm::Type *EltTy =
+ CGF.ConvertType(getComplexType(E->getType())->getElementType());
+ llvm::Value *U = llvm::UndefValue::get(EltTy);
+ return ComplexPairTy(U, U);
+}
+
+ComplexPairTy ComplexExprEmitter::
+VisitImaginaryLiteral(const ImaginaryLiteral *IL) {
+ llvm::Value *Imag = CGF.EmitScalarExpr(IL->getSubExpr());
+ return ComplexPairTy(llvm::Constant::getNullValue(Imag->getType()), Imag);
+}
+
+
+ComplexPairTy ComplexExprEmitter::VisitCallExpr(const CallExpr *E) {
+ if (E->getCallReturnType(CGF.getContext())->isReferenceType())
+ return EmitLoadOfLValue(E);
+
+ return CGF.EmitCallExpr(E).getComplexVal();
+}
+
+ComplexPairTy ComplexExprEmitter::VisitStmtExpr(const StmtExpr *E) {
+ CodeGenFunction::StmtExprEvaluation eval(CGF);
+ Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(), true);
+ assert(RetAlloca.isValid() && "Expected complex return value");
+ return EmitLoadOfLValue(CGF.MakeAddrLValue(RetAlloca, E->getType()),
+ E->getExprLoc());
+}
+
+/// Emit a cast from complex value Val to DestType.
+ComplexPairTy ComplexExprEmitter::EmitComplexToComplexCast(ComplexPairTy Val,
+ QualType SrcType,
+ QualType DestType,
+ SourceLocation Loc) {
+ // Get the src/dest element type.
+ SrcType = SrcType->castAs<ComplexType>()->getElementType();
+ DestType = DestType->castAs<ComplexType>()->getElementType();
+
+ // C99 6.3.1.6: When a value of complex type is converted to another
+ // complex type, both the real and imaginary parts follow the conversion
+ // rules for the corresponding real types.
+ Val.first = CGF.EmitScalarConversion(Val.first, SrcType, DestType, Loc);
+ Val.second = CGF.EmitScalarConversion(Val.second, SrcType, DestType, Loc);
+ return Val;
+}
+
+ComplexPairTy ComplexExprEmitter::EmitScalarToComplexCast(llvm::Value *Val,
+ QualType SrcType,
+ QualType DestType,
+ SourceLocation Loc) {
+ // Convert the input element to the element type of the complex.
+ DestType = DestType->castAs<ComplexType>()->getElementType();
+ Val = CGF.EmitScalarConversion(Val, SrcType, DestType, Loc);
+
+ // Return (realval, 0).
+ return ComplexPairTy(Val, llvm::Constant::getNullValue(Val->getType()));
+}
+
+ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op,
+ QualType DestTy) {
+ switch (CK) {
+ case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
+
+ // Atomic to non-atomic casts may be more than a no-op for some platforms and
+ // for some types.
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
+ case CK_NoOp:
+ case CK_LValueToRValue:
+ case CK_UserDefinedConversion:
+ return Visit(Op);
+
+ case CK_LValueBitCast: {
+ LValue origLV = CGF.EmitLValue(Op);
+ Address V = origLV.getAddress();
+ V = Builder.CreateElementBitCast(V, CGF.ConvertType(DestTy));
+ return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy), Op->getExprLoc());
+ }
+
+ case CK_BitCast:
+ case CK_BaseToDerived:
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_Dynamic:
+ case CK_ToUnion:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_NullToPointer:
+ case CK_NullToMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_MemberPointerToBoolean:
+ case CK_ReinterpretMemberPointer:
+ case CK_ConstructorConversion:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_PointerToBoolean:
+ case CK_ToVoid:
+ case CK_VectorSplat:
+ case CK_IntegralCast:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ObjCObjectLValueCast:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject:
+ case CK_BuiltinFnToFnPtr:
+ case CK_ZeroToOCLEvent:
+ case CK_AddressSpaceConversion:
+ llvm_unreachable("invalid cast kind for complex value");
+
+ case CK_FloatingRealToComplex:
+ case CK_IntegralRealToComplex:
+ return EmitScalarToComplexCast(CGF.EmitScalarExpr(Op), Op->getType(),
+ DestTy, Op->getExprLoc());
+
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ return EmitComplexToComplexCast(Visit(Op), Op->getType(), DestTy,
+ Op->getExprLoc());
+ }
+
+ llvm_unreachable("unknown cast resulting in complex value");
+}
+
+ComplexPairTy ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ ComplexPairTy Op = Visit(E->getSubExpr());
+
+ llvm::Value *ResR, *ResI;
+ if (Op.first->getType()->isFloatingPointTy()) {
+ ResR = Builder.CreateFNeg(Op.first, "neg.r");
+ ResI = Builder.CreateFNeg(Op.second, "neg.i");
+ } else {
+ ResR = Builder.CreateNeg(Op.first, "neg.r");
+ ResI = Builder.CreateNeg(Op.second, "neg.i");
+ }
+ return ComplexPairTy(ResR, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ // ~(a+ib) = a + i*-b
+ ComplexPairTy Op = Visit(E->getSubExpr());
+ llvm::Value *ResI;
+ if (Op.second->getType()->isFloatingPointTy())
+ ResI = Builder.CreateFNeg(Op.second, "conj.i");
+ else
+ ResI = Builder.CreateNeg(Op.second, "conj.i");
+
+ return ComplexPairTy(Op.first, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::EmitBinAdd(const BinOpInfo &Op) {
+ llvm::Value *ResR, *ResI;
+
+ if (Op.LHS.first->getType()->isFloatingPointTy()) {
+ ResR = Builder.CreateFAdd(Op.LHS.first, Op.RHS.first, "add.r");
+ if (Op.LHS.second && Op.RHS.second)
+ ResI = Builder.CreateFAdd(Op.LHS.second, Op.RHS.second, "add.i");
+ else
+ ResI = Op.LHS.second ? Op.LHS.second : Op.RHS.second;
+ assert(ResI && "Only one operand may be real!");
+ } else {
+ ResR = Builder.CreateAdd(Op.LHS.first, Op.RHS.first, "add.r");
+ assert(Op.LHS.second && Op.RHS.second &&
+ "Both operands of integer complex operators must be complex!");
+ ResI = Builder.CreateAdd(Op.LHS.second, Op.RHS.second, "add.i");
+ }
+ return ComplexPairTy(ResR, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::EmitBinSub(const BinOpInfo &Op) {
+ llvm::Value *ResR, *ResI;
+ if (Op.LHS.first->getType()->isFloatingPointTy()) {
+ ResR = Builder.CreateFSub(Op.LHS.first, Op.RHS.first, "sub.r");
+ if (Op.LHS.second && Op.RHS.second)
+ ResI = Builder.CreateFSub(Op.LHS.second, Op.RHS.second, "sub.i");
+ else
+ ResI = Op.LHS.second ? Op.LHS.second
+ : Builder.CreateFNeg(Op.RHS.second, "sub.i");
+ assert(ResI && "Only one operand may be real!");
+ } else {
+ ResR = Builder.CreateSub(Op.LHS.first, Op.RHS.first, "sub.r");
+ assert(Op.LHS.second && Op.RHS.second &&
+ "Both operands of integer complex operators must be complex!");
+ ResI = Builder.CreateSub(Op.LHS.second, Op.RHS.second, "sub.i");
+ }
+ return ComplexPairTy(ResR, ResI);
+}
+
+/// \brief Emit a libcall for a binary operation on complex types.
+ComplexPairTy ComplexExprEmitter::EmitComplexBinOpLibCall(StringRef LibCallName,
+ const BinOpInfo &Op) {
+ CallArgList Args;
+ Args.add(RValue::get(Op.LHS.first),
+ Op.Ty->castAs<ComplexType>()->getElementType());
+ Args.add(RValue::get(Op.LHS.second),
+ Op.Ty->castAs<ComplexType>()->getElementType());
+ Args.add(RValue::get(Op.RHS.first),
+ Op.Ty->castAs<ComplexType>()->getElementType());
+ Args.add(RValue::get(Op.RHS.second),
+ Op.Ty->castAs<ComplexType>()->getElementType());
+
+ // We *must* use the full CG function call building logic here because the
+ // complex type has special ABI handling. We also should not forget about
+ // special calling convention which may be used for compiler builtins.
+
+ // We create a function qualified type to state that this call does not have
+ // any exceptions.
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI = EPI.withExceptionSpec(
+ FunctionProtoType::ExceptionSpecInfo(EST_BasicNoexcept));
+ SmallVector<QualType, 4> ArgsQTys(
+ 4, Op.Ty->castAs<ComplexType>()->getElementType());
+ QualType FQTy = CGF.getContext().getFunctionType(Op.Ty, ArgsQTys, EPI);
+ const CGFunctionInfo &FuncInfo = CGF.CGM.getTypes().arrangeFreeFunctionCall(
+ Args, cast<FunctionType>(FQTy.getTypePtr()), false);
+
+ llvm::FunctionType *FTy = CGF.CGM.getTypes().GetFunctionType(FuncInfo);
+ llvm::Constant *Func = CGF.CGM.CreateBuiltinFunction(FTy, LibCallName);
+ llvm::Instruction *Call;
+
+ RValue Res = CGF.EmitCall(FuncInfo, Func, ReturnValueSlot(), Args,
+ FQTy->getAs<FunctionProtoType>(), &Call);
+ cast<llvm::CallInst>(Call)->setCallingConv(CGF.CGM.getBuiltinCC());
+ return Res.getComplexVal();
+}
+
+/// \brief Lookup the libcall name for a given floating point type complex
+/// multiply.
+static StringRef getComplexMultiplyLibCallName(llvm::Type *Ty) {
+ switch (Ty->getTypeID()) {
+ default:
+ llvm_unreachable("Unsupported floating point type!");
+ case llvm::Type::HalfTyID:
+ return "__mulhc3";
+ case llvm::Type::FloatTyID:
+ return "__mulsc3";
+ case llvm::Type::DoubleTyID:
+ return "__muldc3";
+ case llvm::Type::PPC_FP128TyID:
+ return "__multc3";
+ case llvm::Type::X86_FP80TyID:
+ return "__mulxc3";
+ case llvm::Type::FP128TyID:
+ return "__multc3";
+ }
+}
+
+// See C11 Annex G.5.1 for the semantics of multiplicative operators on complex
+// typed values.
+ComplexPairTy ComplexExprEmitter::EmitBinMul(const BinOpInfo &Op) {
+ using llvm::Value;
+ Value *ResR, *ResI;
+ llvm::MDBuilder MDHelper(CGF.getLLVMContext());
+
+ if (Op.LHS.first->getType()->isFloatingPointTy()) {
+ // The general formulation is:
+ // (a + ib) * (c + id) = (a * c - b * d) + i(a * d + b * c)
+ //
+ // But we can fold away components which would be zero due to a real
+ // operand according to C11 Annex G.5.1p2.
+ // FIXME: C11 also provides for imaginary types which would allow folding
+ // still more of this within the type system.
+
+ if (Op.LHS.second && Op.RHS.second) {
+ // If both operands are complex, emit the core math directly, and then
+ // test for NaNs. If we find NaNs in the result, we delegate to a libcall
+ // to carefully re-compute the correct infinity representation if
+ // possible. The expectation is that the presence of NaNs here is
+ // *extremely* rare, and so the cost of the libcall is almost irrelevant.
+ // This is good, because the libcall re-computes the core multiplication
+ // exactly the same as we do here and re-tests for NaNs in order to be
+ // a generic complex*complex libcall.
+
+ // First compute the four products.
+ Value *AC = Builder.CreateFMul(Op.LHS.first, Op.RHS.first, "mul_ac");
+ Value *BD = Builder.CreateFMul(Op.LHS.second, Op.RHS.second, "mul_bd");
+ Value *AD = Builder.CreateFMul(Op.LHS.first, Op.RHS.second, "mul_ad");
+ Value *BC = Builder.CreateFMul(Op.LHS.second, Op.RHS.first, "mul_bc");
+
+ // The real part is the difference of the first two, the imaginary part is
+ // the sum of the second.
+ ResR = Builder.CreateFSub(AC, BD, "mul_r");
+ ResI = Builder.CreateFAdd(AD, BC, "mul_i");
+
+ // Emit the test for the real part becoming NaN and create a branch to
+ // handle it. We test for NaN by comparing the number to itself.
+ Value *IsRNaN = Builder.CreateFCmpUNO(ResR, ResR, "isnan_cmp");
+ llvm::BasicBlock *ContBB = CGF.createBasicBlock("complex_mul_cont");
+ llvm::BasicBlock *INaNBB = CGF.createBasicBlock("complex_mul_imag_nan");
+ llvm::Instruction *Branch = Builder.CreateCondBr(IsRNaN, INaNBB, ContBB);
+ llvm::BasicBlock *OrigBB = Branch->getParent();
+
+ // Give hint that we very much don't expect to see NaNs.
+ // Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp
+ llvm::MDNode *BrWeight = MDHelper.createBranchWeights(1, (1U << 20) - 1);
+ Branch->setMetadata(llvm::LLVMContext::MD_prof, BrWeight);
+
+ // Now test the imaginary part and create its branch.
+ CGF.EmitBlock(INaNBB);
+ Value *IsINaN = Builder.CreateFCmpUNO(ResI, ResI, "isnan_cmp");
+ llvm::BasicBlock *LibCallBB = CGF.createBasicBlock("complex_mul_libcall");
+ Branch = Builder.CreateCondBr(IsINaN, LibCallBB, ContBB);
+ Branch->setMetadata(llvm::LLVMContext::MD_prof, BrWeight);
+
+ // Now emit the libcall on this slowest of the slow paths.
+ CGF.EmitBlock(LibCallBB);
+ Value *LibCallR, *LibCallI;
+ std::tie(LibCallR, LibCallI) = EmitComplexBinOpLibCall(
+ getComplexMultiplyLibCallName(Op.LHS.first->getType()), Op);
+ Builder.CreateBr(ContBB);
+
+ // Finally continue execution by phi-ing together the different
+ // computation paths.
+ CGF.EmitBlock(ContBB);
+ llvm::PHINode *RealPHI = Builder.CreatePHI(ResR->getType(), 3, "real_mul_phi");
+ RealPHI->addIncoming(ResR, OrigBB);
+ RealPHI->addIncoming(ResR, INaNBB);
+ RealPHI->addIncoming(LibCallR, LibCallBB);
+ llvm::PHINode *ImagPHI = Builder.CreatePHI(ResI->getType(), 3, "imag_mul_phi");
+ ImagPHI->addIncoming(ResI, OrigBB);
+ ImagPHI->addIncoming(ResI, INaNBB);
+ ImagPHI->addIncoming(LibCallI, LibCallBB);
+ return ComplexPairTy(RealPHI, ImagPHI);
+ }
+ assert((Op.LHS.second || Op.RHS.second) &&
+ "At least one operand must be complex!");
+
+ // If either of the operands is a real rather than a complex, the
+ // imaginary component is ignored when computing the real component of the
+ // result.
+ ResR = Builder.CreateFMul(Op.LHS.first, Op.RHS.first, "mul.rl");
+
+ ResI = Op.LHS.second
+ ? Builder.CreateFMul(Op.LHS.second, Op.RHS.first, "mul.il")
+ : Builder.CreateFMul(Op.LHS.first, Op.RHS.second, "mul.ir");
+ } else {
+ assert(Op.LHS.second && Op.RHS.second &&
+ "Both operands of integer complex operators must be complex!");
+ Value *ResRl = Builder.CreateMul(Op.LHS.first, Op.RHS.first, "mul.rl");
+ Value *ResRr = Builder.CreateMul(Op.LHS.second, Op.RHS.second, "mul.rr");
+ ResR = Builder.CreateSub(ResRl, ResRr, "mul.r");
+
+ Value *ResIl = Builder.CreateMul(Op.LHS.second, Op.RHS.first, "mul.il");
+ Value *ResIr = Builder.CreateMul(Op.LHS.first, Op.RHS.second, "mul.ir");
+ ResI = Builder.CreateAdd(ResIl, ResIr, "mul.i");
+ }
+ return ComplexPairTy(ResR, ResI);
+}
+
+// See C11 Annex G.5.1 for the semantics of multiplicative operators on complex
+// typed values.
+ComplexPairTy ComplexExprEmitter::EmitBinDiv(const BinOpInfo &Op) {
+ llvm::Value *LHSr = Op.LHS.first, *LHSi = Op.LHS.second;
+ llvm::Value *RHSr = Op.RHS.first, *RHSi = Op.RHS.second;
+
+
+ llvm::Value *DSTr, *DSTi;
+ if (LHSr->getType()->isFloatingPointTy()) {
+ // If we have a complex operand on the RHS, we delegate to a libcall to
+ // handle all of the complexities and minimize underflow/overflow cases.
+ //
+ // FIXME: We would be able to avoid the libcall in many places if we
+ // supported imaginary types in addition to complex types.
+ if (RHSi) {
+ BinOpInfo LibCallOp = Op;
+ // If LHS was a real, supply a null imaginary part.
+ if (!LHSi)
+ LibCallOp.LHS.second = llvm::Constant::getNullValue(LHSr->getType());
+
+ StringRef LibCallName;
+ switch (LHSr->getType()->getTypeID()) {
+ default:
+ llvm_unreachable("Unsupported floating point type!");
+ case llvm::Type::HalfTyID:
+ return EmitComplexBinOpLibCall("__divhc3", LibCallOp);
+ case llvm::Type::FloatTyID:
+ return EmitComplexBinOpLibCall("__divsc3", LibCallOp);
+ case llvm::Type::DoubleTyID:
+ return EmitComplexBinOpLibCall("__divdc3", LibCallOp);
+ case llvm::Type::PPC_FP128TyID:
+ return EmitComplexBinOpLibCall("__divtc3", LibCallOp);
+ case llvm::Type::X86_FP80TyID:
+ return EmitComplexBinOpLibCall("__divxc3", LibCallOp);
+ case llvm::Type::FP128TyID:
+ return EmitComplexBinOpLibCall("__divtc3", LibCallOp);
+ }
+ }
+ assert(LHSi && "Can have at most one non-complex operand!");
+
+ DSTr = Builder.CreateFDiv(LHSr, RHSr);
+ DSTi = Builder.CreateFDiv(LHSi, RHSr);
+ } else {
+ assert(Op.LHS.second && Op.RHS.second &&
+ "Both operands of integer complex operators must be complex!");
+ // (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd))
+ llvm::Value *Tmp1 = Builder.CreateMul(LHSr, RHSr); // a*c
+ llvm::Value *Tmp2 = Builder.CreateMul(LHSi, RHSi); // b*d
+ llvm::Value *Tmp3 = Builder.CreateAdd(Tmp1, Tmp2); // ac+bd
+
+ llvm::Value *Tmp4 = Builder.CreateMul(RHSr, RHSr); // c*c
+ llvm::Value *Tmp5 = Builder.CreateMul(RHSi, RHSi); // d*d
+ llvm::Value *Tmp6 = Builder.CreateAdd(Tmp4, Tmp5); // cc+dd
+
+ llvm::Value *Tmp7 = Builder.CreateMul(LHSi, RHSr); // b*c
+ llvm::Value *Tmp8 = Builder.CreateMul(LHSr, RHSi); // a*d
+ llvm::Value *Tmp9 = Builder.CreateSub(Tmp7, Tmp8); // bc-ad
+
+ if (Op.Ty->castAs<ComplexType>()->getElementType()->isUnsignedIntegerType()) {
+ DSTr = Builder.CreateUDiv(Tmp3, Tmp6);
+ DSTi = Builder.CreateUDiv(Tmp9, Tmp6);
+ } else {
+ DSTr = Builder.CreateSDiv(Tmp3, Tmp6);
+ DSTi = Builder.CreateSDiv(Tmp9, Tmp6);
+ }
+ }
+
+ return ComplexPairTy(DSTr, DSTi);
+}
+
+ComplexExprEmitter::BinOpInfo
+ComplexExprEmitter::EmitBinOps(const BinaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ BinOpInfo Ops;
+ if (E->getLHS()->getType()->isRealFloatingType())
+ Ops.LHS = ComplexPairTy(CGF.EmitScalarExpr(E->getLHS()), nullptr);
+ else
+ Ops.LHS = Visit(E->getLHS());
+ if (E->getRHS()->getType()->isRealFloatingType())
+ Ops.RHS = ComplexPairTy(CGF.EmitScalarExpr(E->getRHS()), nullptr);
+ else
+ Ops.RHS = Visit(E->getRHS());
+
+ Ops.Ty = E->getType();
+ return Ops;
+}
+
+
+LValue ComplexExprEmitter::
+EmitCompoundAssignLValue(const CompoundAssignOperator *E,
+ ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&),
+ RValue &Val) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ QualType LHSTy = E->getLHS()->getType();
+ if (const AtomicType *AT = LHSTy->getAs<AtomicType>())
+ LHSTy = AT->getValueType();
+
+ BinOpInfo OpInfo;
+
+ // Load the RHS and LHS operands.
+ // __block variables need to have the rhs evaluated first, plus this should
+ // improve codegen a little.
+ OpInfo.Ty = E->getComputationResultType();
+ QualType ComplexElementTy = cast<ComplexType>(OpInfo.Ty)->getElementType();
+
+ // The RHS should have been converted to the computation type.
+ if (E->getRHS()->getType()->isRealFloatingType()) {
+ assert(
+ CGF.getContext()
+ .hasSameUnqualifiedType(ComplexElementTy, E->getRHS()->getType()));
+ OpInfo.RHS = ComplexPairTy(CGF.EmitScalarExpr(E->getRHS()), nullptr);
+ } else {
+ assert(CGF.getContext()
+ .hasSameUnqualifiedType(OpInfo.Ty, E->getRHS()->getType()));
+ OpInfo.RHS = Visit(E->getRHS());
+ }
+
+ LValue LHS = CGF.EmitLValue(E->getLHS());
+
+ // Load from the l-value and convert it.
+ SourceLocation Loc = E->getExprLoc();
+ if (LHSTy->isAnyComplexType()) {
+ ComplexPairTy LHSVal = EmitLoadOfLValue(LHS, Loc);
+ OpInfo.LHS = EmitComplexToComplexCast(LHSVal, LHSTy, OpInfo.Ty, Loc);
+ } else {
+ llvm::Value *LHSVal = CGF.EmitLoadOfScalar(LHS, Loc);
+ // For floating point real operands we can directly pass the scalar form
+ // to the binary operator emission and potentially get more efficient code.
+ if (LHSTy->isRealFloatingType()) {
+ if (!CGF.getContext().hasSameUnqualifiedType(ComplexElementTy, LHSTy))
+ LHSVal = CGF.EmitScalarConversion(LHSVal, LHSTy, ComplexElementTy, Loc);
+ OpInfo.LHS = ComplexPairTy(LHSVal, nullptr);
+ } else {
+ OpInfo.LHS = EmitScalarToComplexCast(LHSVal, LHSTy, OpInfo.Ty, Loc);
+ }
+ }
+
+ // Expand the binary operator.
+ ComplexPairTy Result = (this->*Func)(OpInfo);
+
+ // Truncate the result and store it into the LHS lvalue.
+ if (LHSTy->isAnyComplexType()) {
+ ComplexPairTy ResVal =
+ EmitComplexToComplexCast(Result, OpInfo.Ty, LHSTy, Loc);
+ EmitStoreOfComplex(ResVal, LHS, /*isInit*/ false);
+ Val = RValue::getComplex(ResVal);
+ } else {
+ llvm::Value *ResVal =
+ CGF.EmitComplexToScalarConversion(Result, OpInfo.Ty, LHSTy, Loc);
+ CGF.EmitStoreOfScalar(ResVal, LHS, /*isInit*/ false);
+ Val = RValue::get(ResVal);
+ }
+
+ return LHS;
+}
+
+// Compound assignments.
+ComplexPairTy ComplexExprEmitter::
+EmitCompoundAssign(const CompoundAssignOperator *E,
+ ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&)){
+ RValue Val;
+ LValue LV = EmitCompoundAssignLValue(E, Func, Val);
+
+ // The result of an assignment in C is the assigned r-value.
+ if (!CGF.getLangOpts().CPlusPlus)
+ return Val.getComplexVal();
+
+ // If the lvalue is non-volatile, return the computed value of the assignment.
+ if (!LV.isVolatileQualified())
+ return Val.getComplexVal();
+
+ return EmitLoadOfLValue(LV, E->getExprLoc());
+}
+
+LValue ComplexExprEmitter::EmitBinAssignLValue(const BinaryOperator *E,
+ ComplexPairTy &Val) {
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
+ E->getRHS()->getType()) &&
+ "Invalid assignment");
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+
+ // Emit the RHS. __block variables need the RHS evaluated first.
+ Val = Visit(E->getRHS());
+
+ // Compute the address to store into.
+ LValue LHS = CGF.EmitLValue(E->getLHS());
+
+ // Store the result value into the LHS lvalue.
+ EmitStoreOfComplex(Val, LHS, /*isInit*/ false);
+
+ return LHS;
+}
+
+ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
+ ComplexPairTy Val;
+ LValue LV = EmitBinAssignLValue(E, Val);
+
+ // The result of an assignment in C is the assigned r-value.
+ if (!CGF.getLangOpts().CPlusPlus)
+ return Val;
+
+ // If the lvalue is non-volatile, return the computed value of the assignment.
+ if (!LV.isVolatileQualified())
+ return Val;
+
+ return EmitLoadOfLValue(LV, E->getExprLoc());
+}
+
+ComplexPairTy ComplexExprEmitter::VisitBinComma(const BinaryOperator *E) {
+ CGF.EmitIgnoredExpr(E->getLHS());
+ return Visit(E->getRHS());
+}
+
+ComplexPairTy ComplexExprEmitter::
+VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
+
+ // Bind the common expression if necessary.
+ CodeGenFunction::OpaqueValueMapping binding(CGF, E);
+
+
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
+ CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock,
+ CGF.getProfileCount(E));
+
+ eval.begin(CGF);
+ CGF.EmitBlock(LHSBlock);
+ CGF.incrementProfileCounter(E);
+ ComplexPairTy LHS = Visit(E->getTrueExpr());
+ LHSBlock = Builder.GetInsertBlock();
+ CGF.EmitBranch(ContBlock);
+ eval.end(CGF);
+
+ eval.begin(CGF);
+ CGF.EmitBlock(RHSBlock);
+ ComplexPairTy RHS = Visit(E->getFalseExpr());
+ RHSBlock = Builder.GetInsertBlock();
+ CGF.EmitBlock(ContBlock);
+ eval.end(CGF);
+
+ // Create a PHI node for the real part.
+ llvm::PHINode *RealPN = Builder.CreatePHI(LHS.first->getType(), 2, "cond.r");
+ RealPN->addIncoming(LHS.first, LHSBlock);
+ RealPN->addIncoming(RHS.first, RHSBlock);
+
+ // Create a PHI node for the imaginary part.
+ llvm::PHINode *ImagPN = Builder.CreatePHI(LHS.first->getType(), 2, "cond.i");
+ ImagPN->addIncoming(LHS.second, LHSBlock);
+ ImagPN->addIncoming(RHS.second, RHSBlock);
+
+ return ComplexPairTy(RealPN, ImagPN);
+}
+
+ComplexPairTy ComplexExprEmitter::VisitChooseExpr(ChooseExpr *E) {
+ return Visit(E->getChosenSubExpr());
+}
+
+ComplexPairTy ComplexExprEmitter::VisitInitListExpr(InitListExpr *E) {
+ bool Ignore = TestAndClearIgnoreReal();
+ (void)Ignore;
+ assert (Ignore == false && "init list ignored");
+ Ignore = TestAndClearIgnoreImag();
+ (void)Ignore;
+ assert (Ignore == false && "init list ignored");
+
+ if (E->getNumInits() == 2) {
+ llvm::Value *Real = CGF.EmitScalarExpr(E->getInit(0));
+ llvm::Value *Imag = CGF.EmitScalarExpr(E->getInit(1));
+ return ComplexPairTy(Real, Imag);
+ } else if (E->getNumInits() == 1) {
+ return Visit(E->getInit(0));
+ }
+
+ // Empty init list intializes to null
+ assert(E->getNumInits() == 0 && "Unexpected number of inits");
+ QualType Ty = E->getType()->castAs<ComplexType>()->getElementType();
+ llvm::Type* LTy = CGF.ConvertType(Ty);
+ llvm::Value* zeroConstant = llvm::Constant::getNullValue(LTy);
+ return ComplexPairTy(zeroConstant, zeroConstant);
+}
+
+ComplexPairTy ComplexExprEmitter::VisitVAArgExpr(VAArgExpr *E) {
+ Address ArgValue = Address::invalid();
+ Address ArgPtr = CGF.EmitVAArg(E, ArgValue);
+
+ if (!ArgPtr.isValid()) {
+ CGF.ErrorUnsupported(E, "complex va_arg expression");
+ llvm::Type *EltTy =
+ CGF.ConvertType(E->getType()->castAs<ComplexType>()->getElementType());
+ llvm::Value *U = llvm::UndefValue::get(EltTy);
+ return ComplexPairTy(U, U);
+ }
+
+ return EmitLoadOfLValue(CGF.MakeAddrLValue(ArgPtr, E->getType()),
+ E->getExprLoc());
+}
+
+//===----------------------------------------------------------------------===//
+// Entry Point into this File
+//===----------------------------------------------------------------------===//
+
+/// EmitComplexExpr - Emit the computation of the specified expression of
+/// complex type, ignoring the result.
+ComplexPairTy CodeGenFunction::EmitComplexExpr(const Expr *E, bool IgnoreReal,
+ bool IgnoreImag) {
+ assert(E && getComplexType(E->getType()) &&
+ "Invalid complex expression to emit");
+
+ return ComplexExprEmitter(*this, IgnoreReal, IgnoreImag)
+ .Visit(const_cast<Expr *>(E));
+}
+
+void CodeGenFunction::EmitComplexExprIntoLValue(const Expr *E, LValue dest,
+ bool isInit) {
+ assert(E && getComplexType(E->getType()) &&
+ "Invalid complex expression to emit");
+ ComplexExprEmitter Emitter(*this);
+ ComplexPairTy Val = Emitter.Visit(const_cast<Expr*>(E));
+ Emitter.EmitStoreOfComplex(Val, dest, isInit);
+}
+
+/// EmitStoreOfComplex - Store a complex number into the specified l-value.
+void CodeGenFunction::EmitStoreOfComplex(ComplexPairTy V, LValue dest,
+ bool isInit) {
+ ComplexExprEmitter(*this).EmitStoreOfComplex(V, dest, isInit);
+}
+
+/// EmitLoadOfComplex - Load a complex number from the specified address.
+ComplexPairTy CodeGenFunction::EmitLoadOfComplex(LValue src,
+ SourceLocation loc) {
+ return ComplexExprEmitter(*this).EmitLoadOfLValue(src, loc);
+}
+
+LValue CodeGenFunction::EmitComplexAssignmentLValue(const BinaryOperator *E) {
+ assert(E->getOpcode() == BO_Assign);
+ ComplexPairTy Val; // ignored
+ return ComplexExprEmitter(*this).EmitBinAssignLValue(E, Val);
+}
+
+typedef ComplexPairTy (ComplexExprEmitter::*CompoundFunc)(
+ const ComplexExprEmitter::BinOpInfo &);
+
+static CompoundFunc getComplexOp(BinaryOperatorKind Op) {
+ switch (Op) {
+ case BO_MulAssign: return &ComplexExprEmitter::EmitBinMul;
+ case BO_DivAssign: return &ComplexExprEmitter::EmitBinDiv;
+ case BO_SubAssign: return &ComplexExprEmitter::EmitBinSub;
+ case BO_AddAssign: return &ComplexExprEmitter::EmitBinAdd;
+ default:
+ llvm_unreachable("unexpected complex compound assignment");
+ }
+}
+
+LValue CodeGenFunction::
+EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E) {
+ CompoundFunc Op = getComplexOp(E->getOpcode());
+ RValue Val;
+ return ComplexExprEmitter(*this).EmitCompoundAssignLValue(E, Op, Val);
+}
+
+LValue CodeGenFunction::
+EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E,
+ llvm::Value *&Result) {
+ CompoundFunc Op = getComplexOp(E->getOpcode());
+ RValue Val;
+ LValue Ret = ComplexExprEmitter(*this).EmitCompoundAssignLValue(E, Op, Val);
+ Result = Val.getScalarVal();
+ return Ret;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp
new file mode 100644
index 0000000..3839ab7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp
@@ -0,0 +1,1607 @@
+//===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Constant Expr nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CGCXXABI.h"
+#include "CGObjCRuntime.h"
+#include "CGRecordLayout.h"
+#include "CodeGenModule.h"
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/Builtins.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalVariable.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===----------------------------------------------------------------------===//
+// ConstStructBuilder
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ConstExprEmitter;
+class ConstStructBuilder {
+ CodeGenModule &CGM;
+ CodeGenFunction *CGF;
+
+ bool Packed;
+ CharUnits NextFieldOffsetInChars;
+ CharUnits LLVMStructAlignment;
+ SmallVector<llvm::Constant *, 32> Elements;
+public:
+ static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CFG,
+ ConstExprEmitter *Emitter,
+ llvm::ConstantStruct *Base,
+ InitListExpr *Updater);
+ static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
+ InitListExpr *ILE);
+ static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
+ const APValue &Value, QualType ValTy);
+
+private:
+ ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF)
+ : CGM(CGM), CGF(CGF), Packed(false),
+ NextFieldOffsetInChars(CharUnits::Zero()),
+ LLVMStructAlignment(CharUnits::One()) { }
+
+ void AppendField(const FieldDecl *Field, uint64_t FieldOffset,
+ llvm::Constant *InitExpr);
+
+ void AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst);
+
+ void AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
+ llvm::ConstantInt *InitExpr);
+
+ void AppendPadding(CharUnits PadSize);
+
+ void AppendTailPadding(CharUnits RecordSize);
+
+ void ConvertStructToPacked();
+
+ bool Build(InitListExpr *ILE);
+ bool Build(ConstExprEmitter *Emitter, llvm::ConstantStruct *Base,
+ InitListExpr *Updater);
+ void Build(const APValue &Val, const RecordDecl *RD, bool IsPrimaryBase,
+ const CXXRecordDecl *VTableClass, CharUnits BaseOffset);
+ llvm::Constant *Finalize(QualType Ty);
+
+ CharUnits getAlignment(const llvm::Constant *C) const {
+ if (Packed) return CharUnits::One();
+ return CharUnits::fromQuantity(
+ CGM.getDataLayout().getABITypeAlignment(C->getType()));
+ }
+
+ CharUnits getSizeInChars(const llvm::Constant *C) const {
+ return CharUnits::fromQuantity(
+ CGM.getDataLayout().getTypeAllocSize(C->getType()));
+ }
+};
+
+void ConstStructBuilder::
+AppendField(const FieldDecl *Field, uint64_t FieldOffset,
+ llvm::Constant *InitCst) {
+ const ASTContext &Context = CGM.getContext();
+
+ CharUnits FieldOffsetInChars = Context.toCharUnitsFromBits(FieldOffset);
+
+ AppendBytes(FieldOffsetInChars, InitCst);
+}
+
+void ConstStructBuilder::
+AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst) {
+
+ assert(NextFieldOffsetInChars <= FieldOffsetInChars
+ && "Field offset mismatch!");
+
+ CharUnits FieldAlignment = getAlignment(InitCst);
+
+ // Round up the field offset to the alignment of the field type.
+ CharUnits AlignedNextFieldOffsetInChars =
+ NextFieldOffsetInChars.RoundUpToAlignment(FieldAlignment);
+
+ if (AlignedNextFieldOffsetInChars < FieldOffsetInChars) {
+ // We need to append padding.
+ AppendPadding(FieldOffsetInChars - NextFieldOffsetInChars);
+
+ assert(NextFieldOffsetInChars == FieldOffsetInChars &&
+ "Did not add enough padding!");
+
+ AlignedNextFieldOffsetInChars =
+ NextFieldOffsetInChars.RoundUpToAlignment(FieldAlignment);
+ }
+
+ if (AlignedNextFieldOffsetInChars > FieldOffsetInChars) {
+ assert(!Packed && "Alignment is wrong even with a packed struct!");
+
+ // Convert the struct to a packed struct.
+ ConvertStructToPacked();
+
+ // After we pack the struct, we may need to insert padding.
+ if (NextFieldOffsetInChars < FieldOffsetInChars) {
+ // We need to append padding.
+ AppendPadding(FieldOffsetInChars - NextFieldOffsetInChars);
+
+ assert(NextFieldOffsetInChars == FieldOffsetInChars &&
+ "Did not add enough padding!");
+ }
+ AlignedNextFieldOffsetInChars = NextFieldOffsetInChars;
+ }
+
+ // Add the field.
+ Elements.push_back(InitCst);
+ NextFieldOffsetInChars = AlignedNextFieldOffsetInChars +
+ getSizeInChars(InitCst);
+
+ if (Packed)
+ assert(LLVMStructAlignment == CharUnits::One() &&
+ "Packed struct not byte-aligned!");
+ else
+ LLVMStructAlignment = std::max(LLVMStructAlignment, FieldAlignment);
+}
+
+void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
+ uint64_t FieldOffset,
+ llvm::ConstantInt *CI) {
+ const ASTContext &Context = CGM.getContext();
+ const uint64_t CharWidth = Context.getCharWidth();
+ uint64_t NextFieldOffsetInBits = Context.toBits(NextFieldOffsetInChars);
+ if (FieldOffset > NextFieldOffsetInBits) {
+ // We need to add padding.
+ CharUnits PadSize = Context.toCharUnitsFromBits(
+ llvm::RoundUpToAlignment(FieldOffset - NextFieldOffsetInBits,
+ Context.getTargetInfo().getCharAlign()));
+
+ AppendPadding(PadSize);
+ }
+
+ uint64_t FieldSize = Field->getBitWidthValue(Context);
+
+ llvm::APInt FieldValue = CI->getValue();
+
+ // Promote the size of FieldValue if necessary
+ // FIXME: This should never occur, but currently it can because initializer
+ // constants are cast to bool, and because clang is not enforcing bitfield
+ // width limits.
+ if (FieldSize > FieldValue.getBitWidth())
+ FieldValue = FieldValue.zext(FieldSize);
+
+ // Truncate the size of FieldValue to the bit field size.
+ if (FieldSize < FieldValue.getBitWidth())
+ FieldValue = FieldValue.trunc(FieldSize);
+
+ NextFieldOffsetInBits = Context.toBits(NextFieldOffsetInChars);
+ if (FieldOffset < NextFieldOffsetInBits) {
+ // Either part of the field or the entire field can go into the previous
+ // byte.
+ assert(!Elements.empty() && "Elements can't be empty!");
+
+ unsigned BitsInPreviousByte = NextFieldOffsetInBits - FieldOffset;
+
+ bool FitsCompletelyInPreviousByte =
+ BitsInPreviousByte >= FieldValue.getBitWidth();
+
+ llvm::APInt Tmp = FieldValue;
+
+ if (!FitsCompletelyInPreviousByte) {
+ unsigned NewFieldWidth = FieldSize - BitsInPreviousByte;
+
+ if (CGM.getDataLayout().isBigEndian()) {
+ Tmp = Tmp.lshr(NewFieldWidth);
+ Tmp = Tmp.trunc(BitsInPreviousByte);
+
+ // We want the remaining high bits.
+ FieldValue = FieldValue.trunc(NewFieldWidth);
+ } else {
+ Tmp = Tmp.trunc(BitsInPreviousByte);
+
+ // We want the remaining low bits.
+ FieldValue = FieldValue.lshr(BitsInPreviousByte);
+ FieldValue = FieldValue.trunc(NewFieldWidth);
+ }
+ }
+
+ Tmp = Tmp.zext(CharWidth);
+ if (CGM.getDataLayout().isBigEndian()) {
+ if (FitsCompletelyInPreviousByte)
+ Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
+ } else {
+ Tmp = Tmp.shl(CharWidth - BitsInPreviousByte);
+ }
+
+ // 'or' in the bits that go into the previous byte.
+ llvm::Value *LastElt = Elements.back();
+ if (llvm::ConstantInt *Val = dyn_cast<llvm::ConstantInt>(LastElt))
+ Tmp |= Val->getValue();
+ else {
+ assert(isa<llvm::UndefValue>(LastElt));
+ // If there is an undef field that we're adding to, it can either be a
+ // scalar undef (in which case, we just replace it with our field) or it
+ // is an array. If it is an array, we have to pull one byte off the
+ // array so that the other undef bytes stay around.
+ if (!isa<llvm::IntegerType>(LastElt->getType())) {
+ // The undef padding will be a multibyte array, create a new smaller
+ // padding and then an hole for our i8 to get plopped into.
+ assert(isa<llvm::ArrayType>(LastElt->getType()) &&
+ "Expected array padding of undefs");
+ llvm::ArrayType *AT = cast<llvm::ArrayType>(LastElt->getType());
+ assert(AT->getElementType()->isIntegerTy(CharWidth) &&
+ AT->getNumElements() != 0 &&
+ "Expected non-empty array padding of undefs");
+
+ // Remove the padding array.
+ NextFieldOffsetInChars -= CharUnits::fromQuantity(AT->getNumElements());
+ Elements.pop_back();
+
+ // Add the padding back in two chunks.
+ AppendPadding(CharUnits::fromQuantity(AT->getNumElements()-1));
+ AppendPadding(CharUnits::One());
+ assert(isa<llvm::UndefValue>(Elements.back()) &&
+ Elements.back()->getType()->isIntegerTy(CharWidth) &&
+ "Padding addition didn't work right");
+ }
+ }
+
+ Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp);
+
+ if (FitsCompletelyInPreviousByte)
+ return;
+ }
+
+ while (FieldValue.getBitWidth() > CharWidth) {
+ llvm::APInt Tmp;
+
+ if (CGM.getDataLayout().isBigEndian()) {
+ // We want the high bits.
+ Tmp =
+ FieldValue.lshr(FieldValue.getBitWidth() - CharWidth).trunc(CharWidth);
+ } else {
+ // We want the low bits.
+ Tmp = FieldValue.trunc(CharWidth);
+
+ FieldValue = FieldValue.lshr(CharWidth);
+ }
+
+ Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
+ ++NextFieldOffsetInChars;
+
+ FieldValue = FieldValue.trunc(FieldValue.getBitWidth() - CharWidth);
+ }
+
+ assert(FieldValue.getBitWidth() > 0 &&
+ "Should have at least one bit left!");
+ assert(FieldValue.getBitWidth() <= CharWidth &&
+ "Should not have more than a byte left!");
+
+ if (FieldValue.getBitWidth() < CharWidth) {
+ if (CGM.getDataLayout().isBigEndian()) {
+ unsigned BitWidth = FieldValue.getBitWidth();
+
+ FieldValue = FieldValue.zext(CharWidth) << (CharWidth - BitWidth);
+ } else
+ FieldValue = FieldValue.zext(CharWidth);
+ }
+
+ // Append the last element.
+ Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(),
+ FieldValue));
+ ++NextFieldOffsetInChars;
+}
+
+void ConstStructBuilder::AppendPadding(CharUnits PadSize) {
+ if (PadSize.isZero())
+ return;
+
+ llvm::Type *Ty = CGM.Int8Ty;
+ if (PadSize > CharUnits::One())
+ Ty = llvm::ArrayType::get(Ty, PadSize.getQuantity());
+
+ llvm::Constant *C = llvm::UndefValue::get(Ty);
+ Elements.push_back(C);
+ assert(getAlignment(C) == CharUnits::One() &&
+ "Padding must have 1 byte alignment!");
+
+ NextFieldOffsetInChars += getSizeInChars(C);
+}
+
+void ConstStructBuilder::AppendTailPadding(CharUnits RecordSize) {
+ assert(NextFieldOffsetInChars <= RecordSize &&
+ "Size mismatch!");
+
+ AppendPadding(RecordSize - NextFieldOffsetInChars);
+}
+
+void ConstStructBuilder::ConvertStructToPacked() {
+ SmallVector<llvm::Constant *, 16> PackedElements;
+ CharUnits ElementOffsetInChars = CharUnits::Zero();
+
+ for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
+ llvm::Constant *C = Elements[i];
+
+ CharUnits ElementAlign = CharUnits::fromQuantity(
+ CGM.getDataLayout().getABITypeAlignment(C->getType()));
+ CharUnits AlignedElementOffsetInChars =
+ ElementOffsetInChars.RoundUpToAlignment(ElementAlign);
+
+ if (AlignedElementOffsetInChars > ElementOffsetInChars) {
+ // We need some padding.
+ CharUnits NumChars =
+ AlignedElementOffsetInChars - ElementOffsetInChars;
+
+ llvm::Type *Ty = CGM.Int8Ty;
+ if (NumChars > CharUnits::One())
+ Ty = llvm::ArrayType::get(Ty, NumChars.getQuantity());
+
+ llvm::Constant *Padding = llvm::UndefValue::get(Ty);
+ PackedElements.push_back(Padding);
+ ElementOffsetInChars += getSizeInChars(Padding);
+ }
+
+ PackedElements.push_back(C);
+ ElementOffsetInChars += getSizeInChars(C);
+ }
+
+ assert(ElementOffsetInChars == NextFieldOffsetInChars &&
+ "Packing the struct changed its size!");
+
+ Elements.swap(PackedElements);
+ LLVMStructAlignment = CharUnits::One();
+ Packed = true;
+}
+
+bool ConstStructBuilder::Build(InitListExpr *ILE) {
+ RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+
+ unsigned FieldNo = 0;
+ unsigned ElementNo = 0;
+
+ for (RecordDecl::field_iterator Field = RD->field_begin(),
+ FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
+ // If this is a union, skip all the fields that aren't being initialized.
+ if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field)
+ continue;
+
+ // Don't emit anonymous bitfields, they just affect layout.
+ if (Field->isUnnamedBitfield())
+ continue;
+
+ // Get the initializer. A struct can include fields without initializers,
+ // we just use explicit null values for them.
+ llvm::Constant *EltInit;
+ if (ElementNo < ILE->getNumInits())
+ EltInit = CGM.EmitConstantExpr(ILE->getInit(ElementNo++),
+ Field->getType(), CGF);
+ else
+ EltInit = CGM.EmitNullConstant(Field->getType());
+
+ if (!EltInit)
+ return false;
+
+ if (!Field->isBitField()) {
+ // Handle non-bitfield members.
+ AppendField(*Field, Layout.getFieldOffset(FieldNo), EltInit);
+ } else {
+ // Otherwise we have a bitfield.
+ if (auto *CI = dyn_cast<llvm::ConstantInt>(EltInit)) {
+ AppendBitField(*Field, Layout.getFieldOffset(FieldNo), CI);
+ } else {
+ // We are trying to initialize a bitfield with a non-trivial constant,
+ // this must require run-time code.
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+namespace {
+struct BaseInfo {
+ BaseInfo(const CXXRecordDecl *Decl, CharUnits Offset, unsigned Index)
+ : Decl(Decl), Offset(Offset), Index(Index) {
+ }
+
+ const CXXRecordDecl *Decl;
+ CharUnits Offset;
+ unsigned Index;
+
+ bool operator<(const BaseInfo &O) const { return Offset < O.Offset; }
+};
+}
+
+void ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
+ bool IsPrimaryBase,
+ const CXXRecordDecl *VTableClass,
+ CharUnits Offset) {
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+
+ if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) {
+ // Add a vtable pointer, if we need one and it hasn't already been added.
+ if (CD->isDynamicClass() && !IsPrimaryBase) {
+ llvm::Constant *VTableAddressPoint =
+ CGM.getCXXABI().getVTableAddressPointForConstExpr(
+ BaseSubobject(CD, Offset), VTableClass);
+ AppendBytes(Offset, VTableAddressPoint);
+ }
+
+ // Accumulate and sort bases, in order to visit them in address order, which
+ // may not be the same as declaration order.
+ SmallVector<BaseInfo, 8> Bases;
+ Bases.reserve(CD->getNumBases());
+ unsigned BaseNo = 0;
+ for (CXXRecordDecl::base_class_const_iterator Base = CD->bases_begin(),
+ BaseEnd = CD->bases_end(); Base != BaseEnd; ++Base, ++BaseNo) {
+ assert(!Base->isVirtual() && "should not have virtual bases here");
+ const CXXRecordDecl *BD = Base->getType()->getAsCXXRecordDecl();
+ CharUnits BaseOffset = Layout.getBaseClassOffset(BD);
+ Bases.push_back(BaseInfo(BD, BaseOffset, BaseNo));
+ }
+ std::stable_sort(Bases.begin(), Bases.end());
+
+ for (unsigned I = 0, N = Bases.size(); I != N; ++I) {
+ BaseInfo &Base = Bases[I];
+
+ bool IsPrimaryBase = Layout.getPrimaryBase() == Base.Decl;
+ Build(Val.getStructBase(Base.Index), Base.Decl, IsPrimaryBase,
+ VTableClass, Offset + Base.Offset);
+ }
+ }
+
+ unsigned FieldNo = 0;
+ uint64_t OffsetBits = CGM.getContext().toBits(Offset);
+
+ for (RecordDecl::field_iterator Field = RD->field_begin(),
+ FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
+ // If this is a union, skip all the fields that aren't being initialized.
+ if (RD->isUnion() && Val.getUnionField() != *Field)
+ continue;
+
+ // Don't emit anonymous bitfields, they just affect layout.
+ if (Field->isUnnamedBitfield())
+ continue;
+
+ // Emit the value of the initializer.
+ const APValue &FieldValue =
+ RD->isUnion() ? Val.getUnionValue() : Val.getStructField(FieldNo);
+ llvm::Constant *EltInit =
+ CGM.EmitConstantValueForMemory(FieldValue, Field->getType(), CGF);
+ assert(EltInit && "EmitConstantValue can't fail");
+
+ if (!Field->isBitField()) {
+ // Handle non-bitfield members.
+ AppendField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits, EltInit);
+ } else {
+ // Otherwise we have a bitfield.
+ AppendBitField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits,
+ cast<llvm::ConstantInt>(EltInit));
+ }
+ }
+}
+
+llvm::Constant *ConstStructBuilder::Finalize(QualType Ty) {
+ RecordDecl *RD = Ty->getAs<RecordType>()->getDecl();
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+
+ CharUnits LayoutSizeInChars = Layout.getSize();
+
+ if (NextFieldOffsetInChars > LayoutSizeInChars) {
+ // If the struct is bigger than the size of the record type,
+ // we must have a flexible array member at the end.
+ assert(RD->hasFlexibleArrayMember() &&
+ "Must have flexible array member if struct is bigger than type!");
+
+ // No tail padding is necessary.
+ } else {
+ // Append tail padding if necessary.
+ CharUnits LLVMSizeInChars =
+ NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment);
+
+ if (LLVMSizeInChars != LayoutSizeInChars)
+ AppendTailPadding(LayoutSizeInChars);
+
+ LLVMSizeInChars =
+ NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment);
+
+ // Check if we need to convert the struct to a packed struct.
+ if (NextFieldOffsetInChars <= LayoutSizeInChars &&
+ LLVMSizeInChars > LayoutSizeInChars) {
+ assert(!Packed && "Size mismatch!");
+
+ ConvertStructToPacked();
+ assert(NextFieldOffsetInChars <= LayoutSizeInChars &&
+ "Converting to packed did not help!");
+ }
+
+ LLVMSizeInChars =
+ NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment);
+
+ assert(LayoutSizeInChars == LLVMSizeInChars &&
+ "Tail padding mismatch!");
+ }
+
+ // Pick the type to use. If the type is layout identical to the ConvertType
+ // type then use it, otherwise use whatever the builder produced for us.
+ llvm::StructType *STy =
+ llvm::ConstantStruct::getTypeForElements(CGM.getLLVMContext(),
+ Elements, Packed);
+ llvm::Type *ValTy = CGM.getTypes().ConvertType(Ty);
+ if (llvm::StructType *ValSTy = dyn_cast<llvm::StructType>(ValTy)) {
+ if (ValSTy->isLayoutIdentical(STy))
+ STy = ValSTy;
+ }
+
+ llvm::Constant *Result = llvm::ConstantStruct::get(STy, Elements);
+
+ assert(NextFieldOffsetInChars.RoundUpToAlignment(getAlignment(Result)) ==
+ getSizeInChars(Result) && "Size mismatch!");
+
+ return Result;
+}
+
+llvm::Constant *ConstStructBuilder::BuildStruct(CodeGenModule &CGM,
+ CodeGenFunction *CGF,
+ ConstExprEmitter *Emitter,
+ llvm::ConstantStruct *Base,
+ InitListExpr *Updater) {
+ ConstStructBuilder Builder(CGM, CGF);
+ if (!Builder.Build(Emitter, Base, Updater))
+ return nullptr;
+ return Builder.Finalize(Updater->getType());
+}
+
+llvm::Constant *ConstStructBuilder::BuildStruct(CodeGenModule &CGM,
+ CodeGenFunction *CGF,
+ InitListExpr *ILE) {
+ ConstStructBuilder Builder(CGM, CGF);
+
+ if (!Builder.Build(ILE))
+ return nullptr;
+
+ return Builder.Finalize(ILE->getType());
+}
+
+llvm::Constant *ConstStructBuilder::BuildStruct(CodeGenModule &CGM,
+ CodeGenFunction *CGF,
+ const APValue &Val,
+ QualType ValTy) {
+ ConstStructBuilder Builder(CGM, CGF);
+
+ const RecordDecl *RD = ValTy->castAs<RecordType>()->getDecl();
+ const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD);
+ Builder.Build(Val, RD, false, CD, CharUnits::Zero());
+
+ return Builder.Finalize(ValTy);
+}
+
+
+//===----------------------------------------------------------------------===//
+// ConstExprEmitter
+//===----------------------------------------------------------------------===//
+
+/// This class only needs to handle two cases:
+/// 1) Literals (this is used by APValue emission to emit literals).
+/// 2) Arrays, structs and unions (outside C++11 mode, we don't currently
+/// constant fold these types).
+class ConstExprEmitter :
+ public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
+ CodeGenModule &CGM;
+ CodeGenFunction *CGF;
+ llvm::LLVMContext &VMContext;
+public:
+ ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf)
+ : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()) {
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ llvm::Constant *VisitStmt(Stmt *S) {
+ return nullptr;
+ }
+
+ llvm::Constant *VisitParenExpr(ParenExpr *PE) {
+ return Visit(PE->getSubExpr());
+ }
+
+ llvm::Constant *
+ VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *PE) {
+ return Visit(PE->getReplacement());
+ }
+
+ llvm::Constant *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
+ return Visit(GE->getResultExpr());
+ }
+
+ llvm::Constant *VisitChooseExpr(ChooseExpr *CE) {
+ return Visit(CE->getChosenSubExpr());
+ }
+
+ llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ return Visit(E->getInitializer());
+ }
+
+ llvm::Constant *VisitCastExpr(CastExpr* E) {
+ if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
+ CGM.EmitExplicitCastExprType(ECE, CGF);
+ Expr *subExpr = E->getSubExpr();
+ llvm::Constant *C = CGM.EmitConstantExpr(subExpr, subExpr->getType(), CGF);
+ if (!C) return nullptr;
+
+ llvm::Type *destType = ConvertType(E->getType());
+
+ switch (E->getCastKind()) {
+ case CK_ToUnion: {
+ // GCC cast to union extension
+ assert(E->getType()->isUnionType() &&
+ "Destination type is not union type!");
+
+ // Build a struct with the union sub-element as the first member,
+ // and padded to the appropriate size
+ SmallVector<llvm::Constant*, 2> Elts;
+ SmallVector<llvm::Type*, 2> Types;
+ Elts.push_back(C);
+ Types.push_back(C->getType());
+ unsigned CurSize = CGM.getDataLayout().getTypeAllocSize(C->getType());
+ unsigned TotalSize = CGM.getDataLayout().getTypeAllocSize(destType);
+
+ assert(CurSize <= TotalSize && "Union size mismatch!");
+ if (unsigned NumPadBytes = TotalSize - CurSize) {
+ llvm::Type *Ty = CGM.Int8Ty;
+ if (NumPadBytes > 1)
+ Ty = llvm::ArrayType::get(Ty, NumPadBytes);
+
+ Elts.push_back(llvm::UndefValue::get(Ty));
+ Types.push_back(Ty);
+ }
+
+ llvm::StructType* STy =
+ llvm::StructType::get(C->getType()->getContext(), Types, false);
+ return llvm::ConstantStruct::get(STy, Elts);
+ }
+
+ case CK_AddressSpaceConversion:
+ return llvm::ConstantExpr::getAddrSpaceCast(C, destType);
+
+ case CK_LValueToRValue:
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
+ case CK_NoOp:
+ case CK_ConstructorConversion:
+ return C;
+
+ case CK_Dependent: llvm_unreachable("saw dependent cast!");
+
+ case CK_BuiltinFnToFnPtr:
+ llvm_unreachable("builtin functions are handled elsewhere");
+
+ case CK_ReinterpretMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ return CGM.getCXXABI().EmitMemberPointerConversion(E, C);
+
+ // These will never be supported.
+ case CK_ObjCObjectLValueCast:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject:
+ return nullptr;
+
+ // These don't need to be handled here because Evaluate knows how to
+ // evaluate them in the cases where they can be folded.
+ case CK_BitCast:
+ case CK_ToVoid:
+ case CK_Dynamic:
+ case CK_LValueBitCast:
+ case CK_NullToMemberPointer:
+ case CK_UserDefinedConversion:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_BaseToDerived:
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_MemberPointerToBoolean:
+ case CK_VectorSplat:
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ case CK_PointerToIntegral:
+ case CK_PointerToBoolean:
+ case CK_NullToPointer:
+ case CK_IntegralCast:
+ case CK_IntegralToPointer:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ case CK_ZeroToOCLEvent:
+ return nullptr;
+ }
+ llvm_unreachable("Invalid CastKind");
+ }
+
+ llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ return Visit(DAE->getExpr());
+ }
+
+ llvm::Constant *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
+ // No need for a DefaultInitExprScope: we don't handle 'this' in a
+ // constant expression.
+ return Visit(DIE->getExpr());
+ }
+
+ llvm::Constant *VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
+ return Visit(E->GetTemporaryExpr());
+ }
+
+ llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
+ if (ILE->isStringLiteralInit())
+ return Visit(ILE->getInit(0));
+
+ llvm::ArrayType *AType =
+ cast<llvm::ArrayType>(ConvertType(ILE->getType()));
+ llvm::Type *ElemTy = AType->getElementType();
+ unsigned NumInitElements = ILE->getNumInits();
+ unsigned NumElements = AType->getNumElements();
+
+ // Initialising an array requires us to automatically
+ // initialise any elements that have not been initialised explicitly
+ unsigned NumInitableElts = std::min(NumInitElements, NumElements);
+
+ // Initialize remaining array elements.
+ // FIXME: This doesn't handle member pointers correctly!
+ llvm::Constant *fillC;
+ if (Expr *filler = ILE->getArrayFiller())
+ fillC = CGM.EmitConstantExpr(filler, filler->getType(), CGF);
+ else
+ fillC = llvm::Constant::getNullValue(ElemTy);
+ if (!fillC)
+ return nullptr;
+
+ // Try to use a ConstantAggregateZero if we can.
+ if (fillC->isNullValue() && !NumInitableElts)
+ return llvm::ConstantAggregateZero::get(AType);
+
+ // Copy initializer elements.
+ std::vector<llvm::Constant*> Elts;
+ Elts.reserve(NumInitableElts + NumElements);
+
+ bool RewriteType = false;
+ for (unsigned i = 0; i < NumInitableElts; ++i) {
+ Expr *Init = ILE->getInit(i);
+ llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
+ if (!C)
+ return nullptr;
+ RewriteType |= (C->getType() != ElemTy);
+ Elts.push_back(C);
+ }
+
+ RewriteType |= (fillC->getType() != ElemTy);
+ Elts.resize(NumElements, fillC);
+
+ if (RewriteType) {
+ // FIXME: Try to avoid packing the array
+ std::vector<llvm::Type*> Types;
+ Types.reserve(NumInitableElts + NumElements);
+ for (unsigned i = 0, e = Elts.size(); i < e; ++i)
+ Types.push_back(Elts[i]->getType());
+ llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
+ Types, true);
+ return llvm::ConstantStruct::get(SType, Elts);
+ }
+
+ return llvm::ConstantArray::get(AType, Elts);
+ }
+
+ llvm::Constant *EmitRecordInitialization(InitListExpr *ILE) {
+ return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
+ }
+
+ llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) {
+ return CGM.EmitNullConstant(E->getType());
+ }
+
+ llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
+ if (ILE->getType()->isArrayType())
+ return EmitArrayInitialization(ILE);
+
+ if (ILE->getType()->isRecordType())
+ return EmitRecordInitialization(ILE);
+
+ return nullptr;
+ }
+
+ llvm::Constant *EmitDesignatedInitUpdater(llvm::Constant *Base,
+ InitListExpr *Updater) {
+ QualType ExprType = Updater->getType();
+
+ if (ExprType->isArrayType()) {
+ llvm::ArrayType *AType = cast<llvm::ArrayType>(ConvertType(ExprType));
+ llvm::Type *ElemType = AType->getElementType();
+
+ unsigned NumInitElements = Updater->getNumInits();
+ unsigned NumElements = AType->getNumElements();
+
+ std::vector<llvm::Constant *> Elts;
+ Elts.reserve(NumElements);
+
+ if (llvm::ConstantDataArray *DataArray =
+ dyn_cast<llvm::ConstantDataArray>(Base))
+ for (unsigned i = 0; i != NumElements; ++i)
+ Elts.push_back(DataArray->getElementAsConstant(i));
+ else if (llvm::ConstantArray *Array =
+ dyn_cast<llvm::ConstantArray>(Base))
+ for (unsigned i = 0; i != NumElements; ++i)
+ Elts.push_back(Array->getOperand(i));
+ else
+ return nullptr; // FIXME: other array types not implemented
+
+ llvm::Constant *fillC = nullptr;
+ if (Expr *filler = Updater->getArrayFiller())
+ if (!isa<NoInitExpr>(filler))
+ fillC = CGM.EmitConstantExpr(filler, filler->getType(), CGF);
+ bool RewriteType = (fillC && fillC->getType() != ElemType);
+
+ for (unsigned i = 0; i != NumElements; ++i) {
+ Expr *Init = nullptr;
+ if (i < NumInitElements)
+ Init = Updater->getInit(i);
+
+ if (!Init && fillC)
+ Elts[i] = fillC;
+ else if (!Init || isa<NoInitExpr>(Init))
+ ; // Do nothing.
+ else if (InitListExpr *ChildILE = dyn_cast<InitListExpr>(Init))
+ Elts[i] = EmitDesignatedInitUpdater(Elts[i], ChildILE);
+ else
+ Elts[i] = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
+
+ if (!Elts[i])
+ return nullptr;
+ RewriteType |= (Elts[i]->getType() != ElemType);
+ }
+
+ if (RewriteType) {
+ std::vector<llvm::Type *> Types;
+ Types.reserve(NumElements);
+ for (unsigned i = 0; i != NumElements; ++i)
+ Types.push_back(Elts[i]->getType());
+ llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
+ Types, true);
+ return llvm::ConstantStruct::get(SType, Elts);
+ }
+
+ return llvm::ConstantArray::get(AType, Elts);
+ }
+
+ if (ExprType->isRecordType())
+ return ConstStructBuilder::BuildStruct(CGM, CGF, this,
+ dyn_cast<llvm::ConstantStruct>(Base), Updater);
+
+ return nullptr;
+ }
+
+ llvm::Constant *VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
+ return EmitDesignatedInitUpdater(
+ CGM.EmitConstantExpr(E->getBase(), E->getType(), CGF),
+ E->getUpdater());
+ }
+
+ llvm::Constant *VisitCXXConstructExpr(CXXConstructExpr *E) {
+ if (!E->getConstructor()->isTrivial())
+ return nullptr;
+
+ QualType Ty = E->getType();
+
+ // FIXME: We should not have to call getBaseElementType here.
+ const RecordType *RT =
+ CGM.getContext().getBaseElementType(Ty)->getAs<RecordType>();
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+
+ // If the class doesn't have a trivial destructor, we can't emit it as a
+ // constant expr.
+ if (!RD->hasTrivialDestructor())
+ return nullptr;
+
+ // Only copy and default constructors can be trivial.
+
+
+ if (E->getNumArgs()) {
+ assert(E->getNumArgs() == 1 && "trivial ctor with > 1 argument");
+ assert(E->getConstructor()->isCopyOrMoveConstructor() &&
+ "trivial ctor has argument but isn't a copy/move ctor");
+
+ Expr *Arg = E->getArg(0);
+ assert(CGM.getContext().hasSameUnqualifiedType(Ty, Arg->getType()) &&
+ "argument to copy ctor is of wrong type");
+
+ return Visit(Arg);
+ }
+
+ return CGM.EmitNullConstant(Ty);
+ }
+
+ llvm::Constant *VisitStringLiteral(StringLiteral *E) {
+ return CGM.GetConstantArrayFromStringLiteral(E);
+ }
+
+ llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
+ // This must be an @encode initializing an array in a static initializer.
+ // Don't emit it as the address of the string, emit the string data itself
+ // as an inline array.
+ std::string Str;
+ CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str);
+ QualType T = E->getType();
+ if (T->getTypeClass() == Type::TypeOfExpr)
+ T = cast<TypeOfExprType>(T)->getUnderlyingExpr()->getType();
+ const ConstantArrayType *CAT = cast<ConstantArrayType>(T);
+
+ // Resize the string to the right size, adding zeros at the end, or
+ // truncating as needed.
+ Str.resize(CAT->getSize().getZExtValue(), '\0');
+ return llvm::ConstantDataArray::getString(VMContext, Str, false);
+ }
+
+ llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) {
+ return Visit(E->getSubExpr());
+ }
+
+ // Utility methods
+ llvm::Type *ConvertType(QualType T) {
+ return CGM.getTypes().ConvertType(T);
+ }
+
+public:
+ ConstantAddress EmitLValue(APValue::LValueBase LVBase) {
+ if (const ValueDecl *Decl = LVBase.dyn_cast<const ValueDecl*>()) {
+ if (Decl->hasAttr<WeakRefAttr>())
+ return CGM.GetWeakRefReference(Decl);
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
+ return ConstantAddress(CGM.GetAddrOfFunction(FD), CharUnits::One());
+ if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) {
+ // We can never refer to a variable with local storage.
+ if (!VD->hasLocalStorage()) {
+ CharUnits Align = CGM.getContext().getDeclAlign(VD);
+ if (VD->isFileVarDecl() || VD->hasExternalStorage())
+ return ConstantAddress(CGM.GetAddrOfGlobalVar(VD), Align);
+ else if (VD->isLocalVarDecl()) {
+ auto Ptr = CGM.getOrCreateStaticVarDecl(
+ *VD, CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false));
+ return ConstantAddress(Ptr, Align);
+ }
+ }
+ }
+ return ConstantAddress::invalid();
+ }
+
+ Expr *E = const_cast<Expr*>(LVBase.get<const Expr*>());
+ switch (E->getStmtClass()) {
+ default: break;
+ case Expr::CompoundLiteralExprClass: {
+ // Note that due to the nature of compound literals, this is guaranteed
+ // to be the only use of the variable, so we just generate it here.
+ CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
+ llvm::Constant* C = CGM.EmitConstantExpr(CLE->getInitializer(),
+ CLE->getType(), CGF);
+ // FIXME: "Leaked" on failure.
+ if (!C) return ConstantAddress::invalid();
+
+ CharUnits Align = CGM.getContext().getTypeAlignInChars(E->getType());
+
+ auto GV = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
+ E->getType().isConstant(CGM.getContext()),
+ llvm::GlobalValue::InternalLinkage,
+ C, ".compoundliteral", nullptr,
+ llvm::GlobalVariable::NotThreadLocal,
+ CGM.getContext().getTargetAddressSpace(E->getType()));
+ GV->setAlignment(Align.getQuantity());
+ return ConstantAddress(GV, Align);
+ }
+ case Expr::StringLiteralClass:
+ return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E));
+ case Expr::ObjCEncodeExprClass:
+ return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E));
+ case Expr::ObjCStringLiteralClass: {
+ ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E);
+ ConstantAddress C =
+ CGM.getObjCRuntime().GenerateConstantString(SL->getString());
+ return C.getElementBitCast(ConvertType(E->getType()));
+ }
+ case Expr::PredefinedExprClass: {
+ unsigned Type = cast<PredefinedExpr>(E)->getIdentType();
+ if (CGF) {
+ LValue Res = CGF->EmitPredefinedLValue(cast<PredefinedExpr>(E));
+ return cast<ConstantAddress>(Res.getAddress());
+ } else if (Type == PredefinedExpr::PrettyFunction) {
+ return CGM.GetAddrOfConstantCString("top level", ".tmp");
+ }
+
+ return CGM.GetAddrOfConstantCString("", ".tmp");
+ }
+ case Expr::AddrLabelExprClass: {
+ assert(CGF && "Invalid address of label expression outside function.");
+ llvm::Constant *Ptr =
+ CGF->GetAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
+ Ptr = llvm::ConstantExpr::getBitCast(Ptr, ConvertType(E->getType()));
+ return ConstantAddress(Ptr, CharUnits::One());
+ }
+ case Expr::CallExprClass: {
+ CallExpr* CE = cast<CallExpr>(E);
+ unsigned builtin = CE->getBuiltinCallee();
+ if (builtin !=
+ Builtin::BI__builtin___CFStringMakeConstantString &&
+ builtin !=
+ Builtin::BI__builtin___NSStringMakeConstantString)
+ break;
+ const Expr *Arg = CE->getArg(0)->IgnoreParenCasts();
+ const StringLiteral *Literal = cast<StringLiteral>(Arg);
+ if (builtin ==
+ Builtin::BI__builtin___NSStringMakeConstantString) {
+ return CGM.getObjCRuntime().GenerateConstantString(Literal);
+ }
+ // FIXME: need to deal with UCN conversion issues.
+ return CGM.GetAddrOfConstantCFString(Literal);
+ }
+ case Expr::BlockExprClass: {
+ std::string FunctionName;
+ if (CGF)
+ FunctionName = CGF->CurFn->getName();
+ else
+ FunctionName = "global";
+
+ // This is not really an l-value.
+ llvm::Constant *Ptr =
+ CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
+ return ConstantAddress(Ptr, CGM.getPointerAlign());
+ }
+ case Expr::CXXTypeidExprClass: {
+ CXXTypeidExpr *Typeid = cast<CXXTypeidExpr>(E);
+ QualType T;
+ if (Typeid->isTypeOperand())
+ T = Typeid->getTypeOperand(CGM.getContext());
+ else
+ T = Typeid->getExprOperand()->getType();
+ return ConstantAddress(CGM.GetAddrOfRTTIDescriptor(T),
+ CGM.getPointerAlign());
+ }
+ case Expr::CXXUuidofExprClass: {
+ return CGM.GetAddrOfUuidDescriptor(cast<CXXUuidofExpr>(E));
+ }
+ case Expr::MaterializeTemporaryExprClass: {
+ MaterializeTemporaryExpr *MTE = cast<MaterializeTemporaryExpr>(E);
+ assert(MTE->getStorageDuration() == SD_Static);
+ SmallVector<const Expr *, 2> CommaLHSs;
+ SmallVector<SubobjectAdjustment, 2> Adjustments;
+ const Expr *Inner = MTE->GetTemporaryExpr()
+ ->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
+ return CGM.GetAddrOfGlobalTemporary(MTE, Inner);
+ }
+ }
+
+ return ConstantAddress::invalid();
+ }
+};
+
+} // end anonymous namespace.
+
+bool ConstStructBuilder::Build(ConstExprEmitter *Emitter,
+ llvm::ConstantStruct *Base,
+ InitListExpr *Updater) {
+ assert(Base && "base expression should not be empty");
+
+ QualType ExprType = Updater->getType();
+ RecordDecl *RD = ExprType->getAs<RecordType>()->getDecl();
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+ const llvm::StructLayout *BaseLayout = CGM.getDataLayout().getStructLayout(
+ Base->getType());
+ unsigned FieldNo = -1;
+ unsigned ElementNo = 0;
+
+ for (FieldDecl *Field : RD->fields()) {
+ ++FieldNo;
+
+ if (RD->isUnion() && Updater->getInitializedFieldInUnion() != Field)
+ continue;
+
+ // Skip anonymous bitfields.
+ if (Field->isUnnamedBitfield())
+ continue;
+
+ llvm::Constant *EltInit = Base->getOperand(ElementNo);
+
+ // Bail out if the type of the ConstantStruct does not have the same layout
+ // as the type of the InitListExpr.
+ if (CGM.getTypes().ConvertType(Field->getType()) != EltInit->getType() ||
+ Layout.getFieldOffset(ElementNo) !=
+ BaseLayout->getElementOffsetInBits(ElementNo))
+ return false;
+
+ // Get the initializer. If we encounter an empty field or a NoInitExpr,
+ // we use values from the base expression.
+ Expr *Init = nullptr;
+ if (ElementNo < Updater->getNumInits())
+ Init = Updater->getInit(ElementNo);
+
+ if (!Init || isa<NoInitExpr>(Init))
+ ; // Do nothing.
+ else if (InitListExpr *ChildILE = dyn_cast<InitListExpr>(Init))
+ EltInit = Emitter->EmitDesignatedInitUpdater(EltInit, ChildILE);
+ else
+ EltInit = CGM.EmitConstantExpr(Init, Field->getType(), CGF);
+
+ ++ElementNo;
+
+ if (!EltInit)
+ return false;
+
+ if (!Field->isBitField())
+ AppendField(Field, Layout.getFieldOffset(FieldNo), EltInit);
+ else if (llvm::ConstantInt *CI = dyn_cast<llvm::ConstantInt>(EltInit))
+ AppendBitField(Field, Layout.getFieldOffset(FieldNo), CI);
+ else
+ // Initializing a bitfield with a non-trivial constant?
+ return false;
+ }
+
+ return true;
+}
+
+llvm::Constant *CodeGenModule::EmitConstantInit(const VarDecl &D,
+ CodeGenFunction *CGF) {
+ // Make a quick check if variable can be default NULL initialized
+ // and avoid going through rest of code which may do, for c++11,
+ // initialization of memory to all NULLs.
+ if (!D.hasLocalStorage()) {
+ QualType Ty = D.getType();
+ if (Ty->isArrayType())
+ Ty = Context.getBaseElementType(Ty);
+ if (Ty->isRecordType())
+ if (const CXXConstructExpr *E =
+ dyn_cast_or_null<CXXConstructExpr>(D.getInit())) {
+ const CXXConstructorDecl *CD = E->getConstructor();
+ if (CD->isTrivial() && CD->isDefaultConstructor())
+ return EmitNullConstant(D.getType());
+ }
+ }
+
+ if (const APValue *Value = D.evaluateValue())
+ return EmitConstantValueForMemory(*Value, D.getType(), CGF);
+
+ // FIXME: Implement C++11 [basic.start.init]p2: if the initializer of a
+ // reference is a constant expression, and the reference binds to a temporary,
+ // then constant initialization is performed. ConstExprEmitter will
+ // incorrectly emit a prvalue constant in this case, and the calling code
+ // interprets that as the (pointer) value of the reference, rather than the
+ // desired value of the referee.
+ if (D.getType()->isReferenceType())
+ return nullptr;
+
+ const Expr *E = D.getInit();
+ assert(E && "No initializer to emit");
+
+ llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
+ if (C && C->getType()->isIntegerTy(1)) {
+ llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
+ C = llvm::ConstantExpr::getZExt(C, BoolTy);
+ }
+ return C;
+}
+
+llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
+ QualType DestType,
+ CodeGenFunction *CGF) {
+ Expr::EvalResult Result;
+
+ bool Success = false;
+
+ if (DestType->isReferenceType())
+ Success = E->EvaluateAsLValue(Result, Context);
+ else
+ Success = E->EvaluateAsRValue(Result, Context);
+
+ llvm::Constant *C = nullptr;
+ if (Success && !Result.HasSideEffects)
+ C = EmitConstantValue(Result.Val, DestType, CGF);
+ else
+ C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
+
+ if (C && C->getType()->isIntegerTy(1)) {
+ llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
+ C = llvm::ConstantExpr::getZExt(C, BoolTy);
+ }
+ return C;
+}
+
+llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
+ QualType DestType,
+ CodeGenFunction *CGF) {
+ // For an _Atomic-qualified constant, we may need to add tail padding.
+ if (auto *AT = DestType->getAs<AtomicType>()) {
+ QualType InnerType = AT->getValueType();
+ auto *Inner = EmitConstantValue(Value, InnerType, CGF);
+
+ uint64_t InnerSize = Context.getTypeSize(InnerType);
+ uint64_t OuterSize = Context.getTypeSize(DestType);
+ if (InnerSize == OuterSize)
+ return Inner;
+
+ assert(InnerSize < OuterSize && "emitted over-large constant for atomic");
+ llvm::Constant *Elts[] = {
+ Inner,
+ llvm::ConstantAggregateZero::get(
+ llvm::ArrayType::get(Int8Ty, (OuterSize - InnerSize) / 8))
+ };
+ return llvm::ConstantStruct::getAnon(Elts);
+ }
+
+ switch (Value.getKind()) {
+ case APValue::Uninitialized:
+ llvm_unreachable("Constant expressions should be initialized.");
+ case APValue::LValue: {
+ llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
+ llvm::Constant *Offset =
+ llvm::ConstantInt::get(Int64Ty, Value.getLValueOffset().getQuantity());
+
+ llvm::Constant *C = nullptr;
+ if (APValue::LValueBase LVBase = Value.getLValueBase()) {
+ // An array can be represented as an lvalue referring to the base.
+ if (isa<llvm::ArrayType>(DestTy)) {
+ assert(Offset->isNullValue() && "offset on array initializer");
+ return ConstExprEmitter(*this, CGF).Visit(
+ const_cast<Expr*>(LVBase.get<const Expr*>()));
+ }
+
+ C = ConstExprEmitter(*this, CGF).EmitLValue(LVBase).getPointer();
+
+ // Apply offset if necessary.
+ if (!Offset->isNullValue()) {
+ unsigned AS = C->getType()->getPointerAddressSpace();
+ llvm::Type *CharPtrTy = Int8Ty->getPointerTo(AS);
+ llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, CharPtrTy);
+ Casted = llvm::ConstantExpr::getGetElementPtr(Int8Ty, Casted, Offset);
+ C = llvm::ConstantExpr::getPointerCast(Casted, C->getType());
+ }
+
+ // Convert to the appropriate type; this could be an lvalue for
+ // an integer.
+ if (isa<llvm::PointerType>(DestTy))
+ return llvm::ConstantExpr::getPointerCast(C, DestTy);
+
+ return llvm::ConstantExpr::getPtrToInt(C, DestTy);
+ } else {
+ C = Offset;
+
+ // Convert to the appropriate type; this could be an lvalue for
+ // an integer.
+ if (isa<llvm::PointerType>(DestTy))
+ return llvm::ConstantExpr::getIntToPtr(C, DestTy);
+
+ // If the types don't match this should only be a truncate.
+ if (C->getType() != DestTy)
+ return llvm::ConstantExpr::getTrunc(C, DestTy);
+
+ return C;
+ }
+ }
+ case APValue::Int:
+ return llvm::ConstantInt::get(VMContext, Value.getInt());
+ case APValue::ComplexInt: {
+ llvm::Constant *Complex[2];
+
+ Complex[0] = llvm::ConstantInt::get(VMContext,
+ Value.getComplexIntReal());
+ Complex[1] = llvm::ConstantInt::get(VMContext,
+ Value.getComplexIntImag());
+
+ // FIXME: the target may want to specify that this is packed.
+ llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
+ Complex[1]->getType(),
+ nullptr);
+ return llvm::ConstantStruct::get(STy, Complex);
+ }
+ case APValue::Float: {
+ const llvm::APFloat &Init = Value.getFloat();
+ if (&Init.getSemantics() == &llvm::APFloat::IEEEhalf &&
+ !Context.getLangOpts().NativeHalfType &&
+ !Context.getLangOpts().HalfArgsAndReturns)
+ return llvm::ConstantInt::get(VMContext, Init.bitcastToAPInt());
+ else
+ return llvm::ConstantFP::get(VMContext, Init);
+ }
+ case APValue::ComplexFloat: {
+ llvm::Constant *Complex[2];
+
+ Complex[0] = llvm::ConstantFP::get(VMContext,
+ Value.getComplexFloatReal());
+ Complex[1] = llvm::ConstantFP::get(VMContext,
+ Value.getComplexFloatImag());
+
+ // FIXME: the target may want to specify that this is packed.
+ llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
+ Complex[1]->getType(),
+ nullptr);
+ return llvm::ConstantStruct::get(STy, Complex);
+ }
+ case APValue::Vector: {
+ unsigned NumElts = Value.getVectorLength();
+ SmallVector<llvm::Constant *, 4> Inits(NumElts);
+
+ for (unsigned I = 0; I != NumElts; ++I) {
+ const APValue &Elt = Value.getVectorElt(I);
+ if (Elt.isInt())
+ Inits[I] = llvm::ConstantInt::get(VMContext, Elt.getInt());
+ else if (Elt.isFloat())
+ Inits[I] = llvm::ConstantFP::get(VMContext, Elt.getFloat());
+ else
+ llvm_unreachable("unsupported vector element type");
+ }
+ return llvm::ConstantVector::get(Inits);
+ }
+ case APValue::AddrLabelDiff: {
+ const AddrLabelExpr *LHSExpr = Value.getAddrLabelDiffLHS();
+ const AddrLabelExpr *RHSExpr = Value.getAddrLabelDiffRHS();
+ llvm::Constant *LHS = EmitConstantExpr(LHSExpr, LHSExpr->getType(), CGF);
+ llvm::Constant *RHS = EmitConstantExpr(RHSExpr, RHSExpr->getType(), CGF);
+
+ // Compute difference
+ llvm::Type *ResultType = getTypes().ConvertType(DestType);
+ LHS = llvm::ConstantExpr::getPtrToInt(LHS, IntPtrTy);
+ RHS = llvm::ConstantExpr::getPtrToInt(RHS, IntPtrTy);
+ llvm::Constant *AddrLabelDiff = llvm::ConstantExpr::getSub(LHS, RHS);
+
+ // LLVM is a bit sensitive about the exact format of the
+ // address-of-label difference; make sure to truncate after
+ // the subtraction.
+ return llvm::ConstantExpr::getTruncOrBitCast(AddrLabelDiff, ResultType);
+ }
+ case APValue::Struct:
+ case APValue::Union:
+ return ConstStructBuilder::BuildStruct(*this, CGF, Value, DestType);
+ case APValue::Array: {
+ const ArrayType *CAT = Context.getAsArrayType(DestType);
+ unsigned NumElements = Value.getArraySize();
+ unsigned NumInitElts = Value.getArrayInitializedElts();
+
+ // Emit array filler, if there is one.
+ llvm::Constant *Filler = nullptr;
+ if (Value.hasArrayFiller())
+ Filler = EmitConstantValueForMemory(Value.getArrayFiller(),
+ CAT->getElementType(), CGF);
+
+ // Emit initializer elements.
+ llvm::Type *CommonElementType =
+ getTypes().ConvertType(CAT->getElementType());
+
+ // Try to use a ConstantAggregateZero if we can.
+ if (Filler && Filler->isNullValue() && !NumInitElts) {
+ llvm::ArrayType *AType =
+ llvm::ArrayType::get(CommonElementType, NumElements);
+ return llvm::ConstantAggregateZero::get(AType);
+ }
+
+ std::vector<llvm::Constant*> Elts;
+ Elts.reserve(NumElements);
+ for (unsigned I = 0; I < NumElements; ++I) {
+ llvm::Constant *C = Filler;
+ if (I < NumInitElts)
+ C = EmitConstantValueForMemory(Value.getArrayInitializedElt(I),
+ CAT->getElementType(), CGF);
+ else
+ assert(Filler && "Missing filler for implicit elements of initializer");
+ if (I == 0)
+ CommonElementType = C->getType();
+ else if (C->getType() != CommonElementType)
+ CommonElementType = nullptr;
+ Elts.push_back(C);
+ }
+
+ if (!CommonElementType) {
+ // FIXME: Try to avoid packing the array
+ std::vector<llvm::Type*> Types;
+ Types.reserve(NumElements);
+ for (unsigned i = 0, e = Elts.size(); i < e; ++i)
+ Types.push_back(Elts[i]->getType());
+ llvm::StructType *SType = llvm::StructType::get(VMContext, Types, true);
+ return llvm::ConstantStruct::get(SType, Elts);
+ }
+
+ llvm::ArrayType *AType =
+ llvm::ArrayType::get(CommonElementType, NumElements);
+ return llvm::ConstantArray::get(AType, Elts);
+ }
+ case APValue::MemberPointer:
+ return getCXXABI().EmitMemberPointer(Value, DestType);
+ }
+ llvm_unreachable("Unknown APValue kind");
+}
+
+llvm::Constant *
+CodeGenModule::EmitConstantValueForMemory(const APValue &Value,
+ QualType DestType,
+ CodeGenFunction *CGF) {
+ llvm::Constant *C = EmitConstantValue(Value, DestType, CGF);
+ if (C->getType()->isIntegerTy(1)) {
+ llvm::Type *BoolTy = getTypes().ConvertTypeForMem(DestType);
+ C = llvm::ConstantExpr::getZExt(C, BoolTy);
+ }
+ return C;
+}
+
+ConstantAddress
+CodeGenModule::GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr *E) {
+ assert(E->isFileScope() && "not a file-scope compound literal expr");
+ return ConstExprEmitter(*this, nullptr).EmitLValue(E);
+}
+
+llvm::Constant *
+CodeGenModule::getMemberPointerConstant(const UnaryOperator *uo) {
+ // Member pointer constants always have a very particular form.
+ const MemberPointerType *type = cast<MemberPointerType>(uo->getType());
+ const ValueDecl *decl = cast<DeclRefExpr>(uo->getSubExpr())->getDecl();
+
+ // A member function pointer.
+ if (const CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(decl))
+ return getCXXABI().EmitMemberFunctionPointer(method);
+
+ // Otherwise, a member data pointer.
+ uint64_t fieldOffset = getContext().getFieldOffset(decl);
+ CharUnits chars = getContext().toCharUnitsFromBits((int64_t) fieldOffset);
+ return getCXXABI().EmitMemberDataPointer(type, chars);
+}
+
+static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
+ llvm::Type *baseType,
+ const CXXRecordDecl *base);
+
+static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
+ const CXXRecordDecl *record,
+ bool asCompleteObject) {
+ const CGRecordLayout &layout = CGM.getTypes().getCGRecordLayout(record);
+ llvm::StructType *structure =
+ (asCompleteObject ? layout.getLLVMType()
+ : layout.getBaseSubobjectLLVMType());
+
+ unsigned numElements = structure->getNumElements();
+ std::vector<llvm::Constant *> elements(numElements);
+
+ // Fill in all the bases.
+ for (const auto &I : record->bases()) {
+ if (I.isVirtual()) {
+ // Ignore virtual bases; if we're laying out for a complete
+ // object, we'll lay these out later.
+ continue;
+ }
+
+ const CXXRecordDecl *base =
+ cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+
+ // Ignore empty bases.
+ if (base->isEmpty())
+ continue;
+
+ unsigned fieldIndex = layout.getNonVirtualBaseLLVMFieldNo(base);
+ llvm::Type *baseType = structure->getElementType(fieldIndex);
+ elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
+ }
+
+ // Fill in all the fields.
+ for (const auto *Field : record->fields()) {
+ // Fill in non-bitfields. (Bitfields always use a zero pattern, which we
+ // will fill in later.)
+ if (!Field->isBitField()) {
+ unsigned fieldIndex = layout.getLLVMFieldNo(Field);
+ elements[fieldIndex] = CGM.EmitNullConstant(Field->getType());
+ }
+
+ // For unions, stop after the first named field.
+ if (record->isUnion()) {
+ if (Field->getIdentifier())
+ break;
+ if (const auto *FieldRD =
+ dyn_cast_or_null<RecordDecl>(Field->getType()->getAsTagDecl()))
+ if (FieldRD->findFirstNamedDataMember())
+ break;
+ }
+ }
+
+ // Fill in the virtual bases, if we're working with the complete object.
+ if (asCompleteObject) {
+ for (const auto &I : record->vbases()) {
+ const CXXRecordDecl *base =
+ cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+
+ // Ignore empty bases.
+ if (base->isEmpty())
+ continue;
+
+ unsigned fieldIndex = layout.getVirtualBaseIndex(base);
+
+ // We might have already laid this field out.
+ if (elements[fieldIndex]) continue;
+
+ llvm::Type *baseType = structure->getElementType(fieldIndex);
+ elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
+ }
+ }
+
+ // Now go through all other fields and zero them out.
+ for (unsigned i = 0; i != numElements; ++i) {
+ if (!elements[i])
+ elements[i] = llvm::Constant::getNullValue(structure->getElementType(i));
+ }
+
+ return llvm::ConstantStruct::get(structure, elements);
+}
+
+/// Emit the null constant for a base subobject.
+static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
+ llvm::Type *baseType,
+ const CXXRecordDecl *base) {
+ const CGRecordLayout &baseLayout = CGM.getTypes().getCGRecordLayout(base);
+
+ // Just zero out bases that don't have any pointer to data members.
+ if (baseLayout.isZeroInitializableAsBase())
+ return llvm::Constant::getNullValue(baseType);
+
+ // Otherwise, we can just use its null constant.
+ return EmitNullConstant(CGM, base, /*asCompleteObject=*/false);
+}
+
+llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
+ if (getTypes().isZeroInitializable(T))
+ return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
+
+ if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T)) {
+ llvm::ArrayType *ATy =
+ cast<llvm::ArrayType>(getTypes().ConvertTypeForMem(T));
+
+ QualType ElementTy = CAT->getElementType();
+
+ llvm::Constant *Element = EmitNullConstant(ElementTy);
+ unsigned NumElements = CAT->getSize().getZExtValue();
+ SmallVector<llvm::Constant *, 8> Array(NumElements, Element);
+ return llvm::ConstantArray::get(ATy, Array);
+ }
+
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ return ::EmitNullConstant(*this, RD, /*complete object*/ true);
+ }
+
+ assert(T->isMemberDataPointerType() &&
+ "Should only see pointers to data members here!");
+
+ return getCXXABI().EmitNullMemberPointer(T->castAs<MemberPointerType>());
+}
+
+llvm::Constant *
+CodeGenModule::EmitNullConstantForBase(const CXXRecordDecl *Record) {
+ return ::EmitNullConstant(*this, Record, false);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
new file mode 100644
index 0000000..725d96f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
@@ -0,0 +1,3555 @@
+//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CGCXXABI.h"
+#include "CGDebugInfo.h"
+#include "CGObjCRuntime.h"
+#include "CodeGenModule.h"
+#include "TargetInfo.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Module.h"
+#include <cstdarg>
+
+using namespace clang;
+using namespace CodeGen;
+using llvm::Value;
+
+//===----------------------------------------------------------------------===//
+// Scalar Expression Emitter
+//===----------------------------------------------------------------------===//
+
+namespace {
+struct BinOpInfo {
+ Value *LHS;
+ Value *RHS;
+ QualType Ty; // Computation Type.
+ BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
+ bool FPContractable;
+ const Expr *E; // Entire expr, for error unsupported. May not be binop.
+};
+
+static bool MustVisitNullValue(const Expr *E) {
+ // If a null pointer expression's type is the C++0x nullptr_t, then
+ // it's not necessarily a simple constant and it must be evaluated
+ // for its potential side effects.
+ return E->getType()->isNullPtrType();
+}
+
+class ScalarExprEmitter
+ : public StmtVisitor<ScalarExprEmitter, Value*> {
+ CodeGenFunction &CGF;
+ CGBuilderTy &Builder;
+ bool IgnoreResultAssign;
+ llvm::LLVMContext &VMContext;
+public:
+
+ ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
+ : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
+ VMContext(cgf.getLLVMContext()) {
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Utilities
+ //===--------------------------------------------------------------------===//
+
+ bool TestAndClearIgnoreResultAssign() {
+ bool I = IgnoreResultAssign;
+ IgnoreResultAssign = false;
+ return I;
+ }
+
+ llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
+ LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
+ LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
+ return CGF.EmitCheckedLValue(E, TCK);
+ }
+
+ void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks,
+ const BinOpInfo &Info);
+
+ Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
+ return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
+ }
+
+ void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
+ const AlignValueAttr *AVAttr = nullptr;
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
+ const ValueDecl *VD = DRE->getDecl();
+
+ if (VD->getType()->isReferenceType()) {
+ if (const auto *TTy =
+ dyn_cast<TypedefType>(VD->getType().getNonReferenceType()))
+ AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
+ } else {
+ // Assumptions for function parameters are emitted at the start of the
+ // function, so there is no need to repeat that here.
+ if (isa<ParmVarDecl>(VD))
+ return;
+
+ AVAttr = VD->getAttr<AlignValueAttr>();
+ }
+ }
+
+ if (!AVAttr)
+ if (const auto *TTy =
+ dyn_cast<TypedefType>(E->getType()))
+ AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
+
+ if (!AVAttr)
+ return;
+
+ Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
+ llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
+ CGF.EmitAlignmentAssumption(V, AlignmentCI->getZExtValue());
+ }
+
+ /// EmitLoadOfLValue - Given an expression with complex type that represents a
+ /// value l-value, this method emits the address of the l-value, then loads
+ /// and returns the result.
+ Value *EmitLoadOfLValue(const Expr *E) {
+ Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
+ E->getExprLoc());
+
+ EmitLValueAlignmentAssumption(E, V);
+ return V;
+ }
+
+ /// EmitConversionToBool - Convert the specified expression value to a
+ /// boolean (i1) truth value. This is equivalent to "Val != 0".
+ Value *EmitConversionToBool(Value *Src, QualType DstTy);
+
+ /// Emit a check that a conversion to or from a floating-point type does not
+ /// overflow.
+ void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
+ Value *Src, QualType SrcType, QualType DstType,
+ llvm::Type *DstTy, SourceLocation Loc);
+
+ /// Emit a conversion from the specified type to the specified destination
+ /// type, both of which are LLVM scalar types.
+ Value *EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
+ SourceLocation Loc);
+
+ Value *EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
+ SourceLocation Loc, bool TreatBooleanAsSigned);
+
+ /// Emit a conversion from the specified complex type to the specified
+ /// destination type, where the destination type is an LLVM scalar type.
+ Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
+ QualType SrcTy, QualType DstTy,
+ SourceLocation Loc);
+
+ /// EmitNullValue - Emit a value that corresponds to null for the given type.
+ Value *EmitNullValue(QualType Ty);
+
+ /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
+ Value *EmitFloatToBoolConversion(Value *V) {
+ // Compare against 0.0 for fp scalars.
+ llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
+ return Builder.CreateFCmpUNE(V, Zero, "tobool");
+ }
+
+ /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
+ Value *EmitPointerToBoolConversion(Value *V) {
+ Value *Zero = llvm::ConstantPointerNull::get(
+ cast<llvm::PointerType>(V->getType()));
+ return Builder.CreateICmpNE(V, Zero, "tobool");
+ }
+
+ Value *EmitIntToBoolConversion(Value *V) {
+ // Because of the type rules of C, we often end up computing a
+ // logical value, then zero extending it to int, then wanting it
+ // as a logical value again. Optimize this common case.
+ if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
+ if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
+ Value *Result = ZI->getOperand(0);
+ // If there aren't any more uses, zap the instruction to save space.
+ // Note that there can be more uses, for example if this
+ // is the result of an assignment.
+ if (ZI->use_empty())
+ ZI->eraseFromParent();
+ return Result;
+ }
+ }
+
+ return Builder.CreateIsNotNull(V, "tobool");
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ Value *Visit(Expr *E) {
+ ApplyDebugLocation DL(CGF, E);
+ return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
+ }
+
+ Value *VisitStmt(Stmt *S) {
+ S->dump(CGF.getContext().getSourceManager());
+ llvm_unreachable("Stmt can't have complex result type!");
+ }
+ Value *VisitExpr(Expr *S);
+
+ Value *VisitParenExpr(ParenExpr *PE) {
+ return Visit(PE->getSubExpr());
+ }
+ Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
+ return Visit(E->getReplacement());
+ }
+ Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
+ return Visit(GE->getResultExpr());
+ }
+
+ // Leaves.
+ Value *VisitIntegerLiteral(const IntegerLiteral *E) {
+ return Builder.getInt(E->getValue());
+ }
+ Value *VisitFloatingLiteral(const FloatingLiteral *E) {
+ return llvm::ConstantFP::get(VMContext, E->getValue());
+ }
+ Value *VisitCharacterLiteral(const CharacterLiteral *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
+ }
+ Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
+ }
+ Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
+ }
+ Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
+ return EmitNullValue(E->getType());
+ }
+ Value *VisitGNUNullExpr(const GNUNullExpr *E) {
+ return EmitNullValue(E->getType());
+ }
+ Value *VisitOffsetOfExpr(OffsetOfExpr *E);
+ Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
+ Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
+ llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
+ return Builder.CreateBitCast(V, ConvertType(E->getType()));
+ }
+
+ Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
+ }
+
+ Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
+ return CGF.EmitPseudoObjectRValue(E).getScalarVal();
+ }
+
+ Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
+ if (E->isGLValue())
+ return EmitLoadOfLValue(CGF.getOpaqueLValueMapping(E), E->getExprLoc());
+
+ // Otherwise, assume the mapping is the scalar directly.
+ return CGF.getOpaqueRValueMapping(E).getScalarVal();
+ }
+
+ // l-values.
+ Value *VisitDeclRefExpr(DeclRefExpr *E) {
+ if (CodeGenFunction::ConstantEmission result = CGF.tryEmitAsConstant(E)) {
+ if (result.isReference())
+ return EmitLoadOfLValue(result.getReferenceLValue(CGF, E),
+ E->getExprLoc());
+ return result.getValue();
+ }
+ return EmitLoadOfLValue(E);
+ }
+
+ Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
+ return CGF.EmitObjCSelectorExpr(E);
+ }
+ Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
+ return CGF.EmitObjCProtocolExpr(E);
+ }
+ Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+ Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ if (E->getMethodDecl() &&
+ E->getMethodDecl()->getReturnType()->isReferenceType())
+ return EmitLoadOfLValue(E);
+ return CGF.EmitObjCMessageExpr(E).getScalarVal();
+ }
+
+ Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
+ LValue LV = CGF.EmitObjCIsaExpr(E);
+ Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal();
+ return V;
+ }
+
+ Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
+ Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
+ Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
+ Value *VisitMemberExpr(MemberExpr *E);
+ Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
+ Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+
+ Value *VisitInitListExpr(InitListExpr *E);
+
+ Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
+ return EmitNullValue(E->getType());
+ }
+ Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
+ CGF.CGM.EmitExplicitCastExprType(E, &CGF);
+ return VisitCastExpr(E);
+ }
+ Value *VisitCastExpr(CastExpr *E);
+
+ Value *VisitCallExpr(const CallExpr *E) {
+ if (E->getCallReturnType(CGF.getContext())->isReferenceType())
+ return EmitLoadOfLValue(E);
+
+ Value *V = CGF.EmitCallExpr(E).getScalarVal();
+
+ EmitLValueAlignmentAssumption(E, V);
+ return V;
+ }
+
+ Value *VisitStmtExpr(const StmtExpr *E);
+
+ // Unary Operators.
+ Value *VisitUnaryPostDec(const UnaryOperator *E) {
+ LValue LV = EmitLValue(E->getSubExpr());
+ return EmitScalarPrePostIncDec(E, LV, false, false);
+ }
+ Value *VisitUnaryPostInc(const UnaryOperator *E) {
+ LValue LV = EmitLValue(E->getSubExpr());
+ return EmitScalarPrePostIncDec(E, LV, true, false);
+ }
+ Value *VisitUnaryPreDec(const UnaryOperator *E) {
+ LValue LV = EmitLValue(E->getSubExpr());
+ return EmitScalarPrePostIncDec(E, LV, false, true);
+ }
+ Value *VisitUnaryPreInc(const UnaryOperator *E) {
+ LValue LV = EmitLValue(E->getSubExpr());
+ return EmitScalarPrePostIncDec(E, LV, true, true);
+ }
+
+ llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
+ llvm::Value *InVal,
+ bool IsInc);
+
+ llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre);
+
+
+ Value *VisitUnaryAddrOf(const UnaryOperator *E) {
+ if (isa<MemberPointerType>(E->getType())) // never sugared
+ return CGF.CGM.getMemberPointerConstant(E);
+
+ return EmitLValue(E->getSubExpr()).getPointer();
+ }
+ Value *VisitUnaryDeref(const UnaryOperator *E) {
+ if (E->getType()->isVoidType())
+ return Visit(E->getSubExpr()); // the actual value should be unused
+ return EmitLoadOfLValue(E);
+ }
+ Value *VisitUnaryPlus(const UnaryOperator *E) {
+ // This differs from gcc, though, most likely due to a bug in gcc.
+ TestAndClearIgnoreResultAssign();
+ return Visit(E->getSubExpr());
+ }
+ Value *VisitUnaryMinus (const UnaryOperator *E);
+ Value *VisitUnaryNot (const UnaryOperator *E);
+ Value *VisitUnaryLNot (const UnaryOperator *E);
+ Value *VisitUnaryReal (const UnaryOperator *E);
+ Value *VisitUnaryImag (const UnaryOperator *E);
+ Value *VisitUnaryExtension(const UnaryOperator *E) {
+ return Visit(E->getSubExpr());
+ }
+
+ // C++
+ Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+
+ Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ return Visit(DAE->getExpr());
+ }
+ Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
+ CodeGenFunction::CXXDefaultInitExprScope Scope(CGF);
+ return Visit(DIE->getExpr());
+ }
+ Value *VisitCXXThisExpr(CXXThisExpr *TE) {
+ return CGF.LoadCXXThis();
+ }
+
+ Value *VisitExprWithCleanups(ExprWithCleanups *E) {
+ CGF.enterFullExpression(E);
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ return Visit(E->getSubExpr());
+ }
+ Value *VisitCXXNewExpr(const CXXNewExpr *E) {
+ return CGF.EmitCXXNewExpr(E);
+ }
+ Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
+ CGF.EmitCXXDeleteExpr(E);
+ return nullptr;
+ }
+
+ Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
+ }
+
+ Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
+ return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue());
+ }
+
+ Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
+ return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
+ }
+
+ Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
+ // C++ [expr.pseudo]p1:
+ // The result shall only be used as the operand for the function call
+ // operator (), and the result of such a call has type void. The only
+ // effect is the evaluation of the postfix-expression before the dot or
+ // arrow.
+ CGF.EmitScalarExpr(E->getBase());
+ return nullptr;
+ }
+
+ Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
+ return EmitNullValue(E->getType());
+ }
+
+ Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
+ CGF.EmitCXXThrowExpr(E);
+ return nullptr;
+ }
+
+ Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
+ return Builder.getInt1(E->getValue());
+ }
+
+ // Binary Operators.
+ Value *EmitMul(const BinOpInfo &Ops) {
+ if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
+ switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Defined:
+ return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
+ case LangOptions::SOB_Undefined:
+ if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
+ return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
+ // Fall through.
+ case LangOptions::SOB_Trapping:
+ return EmitOverflowCheckedBinOp(Ops);
+ }
+ }
+
+ if (Ops.Ty->isUnsignedIntegerType() &&
+ CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow))
+ return EmitOverflowCheckedBinOp(Ops);
+
+ if (Ops.LHS->getType()->isFPOrFPVectorTy())
+ return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
+ return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
+ }
+ /// Create a binary op that checks for overflow.
+ /// Currently only supports +, - and *.
+ Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
+
+ // Check for undefined division and modulus behaviors.
+ void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
+ llvm::Value *Zero,bool isDiv);
+ // Common helper for getting how wide LHS of shift is.
+ static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS);
+ Value *EmitDiv(const BinOpInfo &Ops);
+ Value *EmitRem(const BinOpInfo &Ops);
+ Value *EmitAdd(const BinOpInfo &Ops);
+ Value *EmitSub(const BinOpInfo &Ops);
+ Value *EmitShl(const BinOpInfo &Ops);
+ Value *EmitShr(const BinOpInfo &Ops);
+ Value *EmitAnd(const BinOpInfo &Ops) {
+ return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
+ }
+ Value *EmitXor(const BinOpInfo &Ops) {
+ return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
+ }
+ Value *EmitOr (const BinOpInfo &Ops) {
+ return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
+ }
+
+ BinOpInfo EmitBinOps(const BinaryOperator *E);
+ LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
+ Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
+ Value *&Result);
+
+ Value *EmitCompoundAssign(const CompoundAssignOperator *E,
+ Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
+
+ // Binary operators and binary compound assignment operators.
+#define HANDLEBINOP(OP) \
+ Value *VisitBin ## OP(const BinaryOperator *E) { \
+ return Emit ## OP(EmitBinOps(E)); \
+ } \
+ Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \
+ return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \
+ }
+ HANDLEBINOP(Mul)
+ HANDLEBINOP(Div)
+ HANDLEBINOP(Rem)
+ HANDLEBINOP(Add)
+ HANDLEBINOP(Sub)
+ HANDLEBINOP(Shl)
+ HANDLEBINOP(Shr)
+ HANDLEBINOP(And)
+ HANDLEBINOP(Xor)
+ HANDLEBINOP(Or)
+#undef HANDLEBINOP
+
+ // Comparisons.
+ Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
+ llvm::CmpInst::Predicate SICmpOpc,
+ llvm::CmpInst::Predicate FCmpOpc);
+#define VISITCOMP(CODE, UI, SI, FP) \
+ Value *VisitBin##CODE(const BinaryOperator *E) { \
+ return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
+ llvm::FCmpInst::FP); }
+ VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT)
+ VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT)
+ VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE)
+ VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE)
+ VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ)
+ VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE)
+#undef VISITCOMP
+
+ Value *VisitBinAssign (const BinaryOperator *E);
+
+ Value *VisitBinLAnd (const BinaryOperator *E);
+ Value *VisitBinLOr (const BinaryOperator *E);
+ Value *VisitBinComma (const BinaryOperator *E);
+
+ Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
+ Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
+
+ // Other Operators.
+ Value *VisitBlockExpr(const BlockExpr *BE);
+ Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
+ Value *VisitChooseExpr(ChooseExpr *CE);
+ Value *VisitVAArgExpr(VAArgExpr *VE);
+ Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
+ return CGF.EmitObjCStringLiteral(E);
+ }
+ Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
+ return CGF.EmitObjCBoxedExpr(E);
+ }
+ Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
+ return CGF.EmitObjCArrayLiteral(E);
+ }
+ Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
+ return CGF.EmitObjCDictionaryLiteral(E);
+ }
+ Value *VisitAsTypeExpr(AsTypeExpr *CE);
+ Value *VisitAtomicExpr(AtomicExpr *AE);
+};
+} // end anonymous namespace.
+
+//===----------------------------------------------------------------------===//
+// Utilities
+//===----------------------------------------------------------------------===//
+
+/// EmitConversionToBool - Convert the specified expression value to a
+/// boolean (i1) truth value. This is equivalent to "Val != 0".
+Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
+ assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
+
+ if (SrcType->isRealFloatingType())
+ return EmitFloatToBoolConversion(Src);
+
+ if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
+ return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
+
+ assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
+ "Unknown scalar type to convert");
+
+ if (isa<llvm::IntegerType>(Src->getType()))
+ return EmitIntToBoolConversion(Src);
+
+ assert(isa<llvm::PointerType>(Src->getType()));
+ return EmitPointerToBoolConversion(Src);
+}
+
+void ScalarExprEmitter::EmitFloatConversionCheck(
+ Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
+ QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
+ CodeGenFunction::SanitizerScope SanScope(&CGF);
+ using llvm::APFloat;
+ using llvm::APSInt;
+
+ llvm::Type *SrcTy = Src->getType();
+
+ llvm::Value *Check = nullptr;
+ if (llvm::IntegerType *IntTy = dyn_cast<llvm::IntegerType>(SrcTy)) {
+ // Integer to floating-point. This can fail for unsigned short -> __half
+ // or unsigned __int128 -> float.
+ assert(DstType->isFloatingType());
+ bool SrcIsUnsigned = OrigSrcType->isUnsignedIntegerOrEnumerationType();
+
+ APFloat LargestFloat =
+ APFloat::getLargest(CGF.getContext().getFloatTypeSemantics(DstType));
+ APSInt LargestInt(IntTy->getBitWidth(), SrcIsUnsigned);
+
+ bool IsExact;
+ if (LargestFloat.convertToInteger(LargestInt, APFloat::rmTowardZero,
+ &IsExact) != APFloat::opOK)
+ // The range of representable values of this floating point type includes
+ // all values of this integer type. Don't need an overflow check.
+ return;
+
+ llvm::Value *Max = llvm::ConstantInt::get(VMContext, LargestInt);
+ if (SrcIsUnsigned)
+ Check = Builder.CreateICmpULE(Src, Max);
+ else {
+ llvm::Value *Min = llvm::ConstantInt::get(VMContext, -LargestInt);
+ llvm::Value *GE = Builder.CreateICmpSGE(Src, Min);
+ llvm::Value *LE = Builder.CreateICmpSLE(Src, Max);
+ Check = Builder.CreateAnd(GE, LE);
+ }
+ } else {
+ const llvm::fltSemantics &SrcSema =
+ CGF.getContext().getFloatTypeSemantics(OrigSrcType);
+ if (isa<llvm::IntegerType>(DstTy)) {
+ // Floating-point to integer. This has undefined behavior if the source is
+ // +-Inf, NaN, or doesn't fit into the destination type (after truncation
+ // to an integer).
+ unsigned Width = CGF.getContext().getIntWidth(DstType);
+ bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
+
+ APSInt Min = APSInt::getMinValue(Width, Unsigned);
+ APFloat MinSrc(SrcSema, APFloat::uninitialized);
+ if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
+ APFloat::opOverflow)
+ // Don't need an overflow check for lower bound. Just check for
+ // -Inf/NaN.
+ MinSrc = APFloat::getInf(SrcSema, true);
+ else
+ // Find the largest value which is too small to represent (before
+ // truncation toward zero).
+ MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
+
+ APSInt Max = APSInt::getMaxValue(Width, Unsigned);
+ APFloat MaxSrc(SrcSema, APFloat::uninitialized);
+ if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
+ APFloat::opOverflow)
+ // Don't need an overflow check for upper bound. Just check for
+ // +Inf/NaN.
+ MaxSrc = APFloat::getInf(SrcSema, false);
+ else
+ // Find the smallest value which is too large to represent (before
+ // truncation toward zero).
+ MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
+
+ // If we're converting from __half, convert the range to float to match
+ // the type of src.
+ if (OrigSrcType->isHalfType()) {
+ const llvm::fltSemantics &Sema =
+ CGF.getContext().getFloatTypeSemantics(SrcType);
+ bool IsInexact;
+ MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
+ MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
+ }
+
+ llvm::Value *GE =
+ Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
+ llvm::Value *LE =
+ Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
+ Check = Builder.CreateAnd(GE, LE);
+ } else {
+ // FIXME: Maybe split this sanitizer out from float-cast-overflow.
+ //
+ // Floating-point to floating-point. This has undefined behavior if the
+ // source is not in the range of representable values of the destination
+ // type. The C and C++ standards are spectacularly unclear here. We
+ // diagnose finite out-of-range conversions, but allow infinities and NaNs
+ // to convert to the corresponding value in the smaller type.
+ //
+ // C11 Annex F gives all such conversions defined behavior for IEC 60559
+ // conforming implementations. Unfortunately, LLVM's fptrunc instruction
+ // does not.
+
+ // Converting from a lower rank to a higher rank can never have
+ // undefined behavior, since higher-rank types must have a superset
+ // of values of lower-rank types.
+ if (CGF.getContext().getFloatingTypeOrder(OrigSrcType, DstType) != 1)
+ return;
+
+ assert(!OrigSrcType->isHalfType() &&
+ "should not check conversion from __half, it has the lowest rank");
+
+ const llvm::fltSemantics &DstSema =
+ CGF.getContext().getFloatTypeSemantics(DstType);
+ APFloat MinBad = APFloat::getLargest(DstSema, false);
+ APFloat MaxBad = APFloat::getInf(DstSema, false);
+
+ bool IsInexact;
+ MinBad.convert(SrcSema, APFloat::rmTowardZero, &IsInexact);
+ MaxBad.convert(SrcSema, APFloat::rmTowardZero, &IsInexact);
+
+ Value *AbsSrc = CGF.EmitNounwindRuntimeCall(
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::fabs, Src->getType()), Src);
+ llvm::Value *GE =
+ Builder.CreateFCmpOGT(AbsSrc, llvm::ConstantFP::get(VMContext, MinBad));
+ llvm::Value *LE =
+ Builder.CreateFCmpOLT(AbsSrc, llvm::ConstantFP::get(VMContext, MaxBad));
+ Check = Builder.CreateNot(Builder.CreateAnd(GE, LE));
+ }
+ }
+
+ llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
+ CGF.EmitCheckTypeDescriptor(OrigSrcType),
+ CGF.EmitCheckTypeDescriptor(DstType)};
+ CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),
+ "float_cast_overflow", StaticArgs, OrigSrc);
+}
+
+/// Emit a conversion from the specified type to the specified destination type,
+/// both of which are LLVM scalar types.
+Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
+ QualType DstType,
+ SourceLocation Loc) {
+ return EmitScalarConversion(Src, SrcType, DstType, Loc, false);
+}
+
+Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
+ QualType DstType,
+ SourceLocation Loc,
+ bool TreatBooleanAsSigned) {
+ SrcType = CGF.getContext().getCanonicalType(SrcType);
+ DstType = CGF.getContext().getCanonicalType(DstType);
+ if (SrcType == DstType) return Src;
+
+ if (DstType->isVoidType()) return nullptr;
+
+ llvm::Value *OrigSrc = Src;
+ QualType OrigSrcType = SrcType;
+ llvm::Type *SrcTy = Src->getType();
+
+ // Handle conversions to bool first, they are special: comparisons against 0.
+ if (DstType->isBooleanType())
+ return EmitConversionToBool(Src, SrcType);
+
+ llvm::Type *DstTy = ConvertType(DstType);
+
+ // Cast from half through float if half isn't a native type.
+ if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
+ // Cast to FP using the intrinsic if the half type itself isn't supported.
+ if (DstTy->isFloatingPointTy()) {
+ if (!CGF.getContext().getLangOpts().HalfArgsAndReturns)
+ return Builder.CreateCall(
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
+ Src);
+ } else {
+ // Cast to other types through float, using either the intrinsic or FPExt,
+ // depending on whether the half type itself is supported
+ // (as opposed to operations on half, available with NativeHalfType).
+ if (!CGF.getContext().getLangOpts().HalfArgsAndReturns) {
+ Src = Builder.CreateCall(
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
+ CGF.CGM.FloatTy),
+ Src);
+ } else {
+ Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
+ }
+ SrcType = CGF.getContext().FloatTy;
+ SrcTy = CGF.FloatTy;
+ }
+ }
+
+ // Ignore conversions like int -> uint.
+ if (SrcTy == DstTy)
+ return Src;
+
+ // Handle pointer conversions next: pointers can only be converted to/from
+ // other pointers and integers. Check for pointer types in terms of LLVM, as
+ // some native types (like Obj-C id) may map to a pointer type.
+ if (isa<llvm::PointerType>(DstTy)) {
+ // The source value may be an integer, or a pointer.
+ if (isa<llvm::PointerType>(SrcTy))
+ return Builder.CreateBitCast(Src, DstTy, "conv");
+
+ assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
+ // First, convert to the correct width so that we control the kind of
+ // extension.
+ llvm::Type *MiddleTy = CGF.IntPtrTy;
+ bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
+ llvm::Value* IntResult =
+ Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
+ // Then, cast to pointer.
+ return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
+ }
+
+ if (isa<llvm::PointerType>(SrcTy)) {
+ // Must be an ptr to int cast.
+ assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
+ return Builder.CreatePtrToInt(Src, DstTy, "conv");
+ }
+
+ // A scalar can be splatted to an extended vector of the same element type
+ if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
+ // Cast the scalar to element type
+ QualType EltTy = DstType->getAs<ExtVectorType>()->getElementType();
+ llvm::Value *Elt = EmitScalarConversion(
+ Src, SrcType, EltTy, Loc, CGF.getContext().getLangOpts().OpenCL);
+
+ // Splat the element across to all elements
+ unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
+ return Builder.CreateVectorSplat(NumElements, Elt, "splat");
+ }
+
+ // Allow bitcast from vector to integer/fp of the same size.
+ if (isa<llvm::VectorType>(SrcTy) ||
+ isa<llvm::VectorType>(DstTy))
+ return Builder.CreateBitCast(Src, DstTy, "conv");
+
+ // Finally, we have the arithmetic types: real int/float.
+ Value *Res = nullptr;
+ llvm::Type *ResTy = DstTy;
+
+ // An overflowing conversion has undefined behavior if either the source type
+ // or the destination type is a floating-point type.
+ if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
+ (OrigSrcType->isFloatingType() || DstType->isFloatingType()))
+ EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
+ Loc);
+
+ // Cast to half through float if half isn't a native type.
+ if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
+ // Make sure we cast in a single step if from another FP type.
+ if (SrcTy->isFloatingPointTy()) {
+ // Use the intrinsic if the half type itself isn't supported
+ // (as opposed to operations on half, available with NativeHalfType).
+ if (!CGF.getContext().getLangOpts().HalfArgsAndReturns)
+ return Builder.CreateCall(
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
+ // If the half type is supported, just use an fptrunc.
+ return Builder.CreateFPTrunc(Src, DstTy);
+ }
+ DstTy = CGF.FloatTy;
+ }
+
+ if (isa<llvm::IntegerType>(SrcTy)) {
+ bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
+ if (SrcType->isBooleanType() && TreatBooleanAsSigned) {
+ InputSigned = true;
+ }
+ if (isa<llvm::IntegerType>(DstTy))
+ Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
+ else if (InputSigned)
+ Res = Builder.CreateSIToFP(Src, DstTy, "conv");
+ else
+ Res = Builder.CreateUIToFP(Src, DstTy, "conv");
+ } else if (isa<llvm::IntegerType>(DstTy)) {
+ assert(SrcTy->isFloatingPointTy() && "Unknown real conversion");
+ if (DstType->isSignedIntegerOrEnumerationType())
+ Res = Builder.CreateFPToSI(Src, DstTy, "conv");
+ else
+ Res = Builder.CreateFPToUI(Src, DstTy, "conv");
+ } else {
+ assert(SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy() &&
+ "Unknown real conversion");
+ if (DstTy->getTypeID() < SrcTy->getTypeID())
+ Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
+ else
+ Res = Builder.CreateFPExt(Src, DstTy, "conv");
+ }
+
+ if (DstTy != ResTy) {
+ if (!CGF.getContext().getLangOpts().HalfArgsAndReturns) {
+ assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
+ Res = Builder.CreateCall(
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
+ Res);
+ } else {
+ Res = Builder.CreateFPTrunc(Res, ResTy, "conv");
+ }
+ }
+
+ return Res;
+}
+
+/// Emit a conversion from the specified complex type to the specified
+/// destination type, where the destination type is an LLVM scalar type.
+Value *ScalarExprEmitter::EmitComplexToScalarConversion(
+ CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
+ SourceLocation Loc) {
+ // Get the source element type.
+ SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
+
+ // Handle conversions to bool first, they are special: comparisons against 0.
+ if (DstTy->isBooleanType()) {
+ // Complex != 0 -> (Real != 0) | (Imag != 0)
+ Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
+ Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
+ return Builder.CreateOr(Src.first, Src.second, "tobool");
+ }
+
+ // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
+ // the imaginary part of the complex value is discarded and the value of the
+ // real part is converted according to the conversion rules for the
+ // corresponding real type.
+ return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
+}
+
+Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
+ return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
+}
+
+/// \brief Emit a sanitization check for the given "binary" operation (which
+/// might actually be a unary increment which has been lowered to a binary
+/// operation). The check passes if all values in \p Checks (which are \c i1),
+/// are \c true.
+void ScalarExprEmitter::EmitBinOpCheck(
+ ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) {
+ assert(CGF.IsSanitizerScope);
+ StringRef CheckName;
+ SmallVector<llvm::Constant *, 4> StaticData;
+ SmallVector<llvm::Value *, 2> DynamicData;
+
+ BinaryOperatorKind Opcode = Info.Opcode;
+ if (BinaryOperator::isCompoundAssignmentOp(Opcode))
+ Opcode = BinaryOperator::getOpForCompoundAssignment(Opcode);
+
+ StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
+ const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
+ if (UO && UO->getOpcode() == UO_Minus) {
+ CheckName = "negate_overflow";
+ StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
+ DynamicData.push_back(Info.RHS);
+ } else {
+ if (BinaryOperator::isShiftOp(Opcode)) {
+ // Shift LHS negative or too large, or RHS out of bounds.
+ CheckName = "shift_out_of_bounds";
+ const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
+ StaticData.push_back(
+ CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
+ StaticData.push_back(
+ CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
+ } else if (Opcode == BO_Div || Opcode == BO_Rem) {
+ // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
+ CheckName = "divrem_overflow";
+ StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
+ } else {
+ // Arithmetic overflow (+, -, *).
+ switch (Opcode) {
+ case BO_Add: CheckName = "add_overflow"; break;
+ case BO_Sub: CheckName = "sub_overflow"; break;
+ case BO_Mul: CheckName = "mul_overflow"; break;
+ default: llvm_unreachable("unexpected opcode for bin op check");
+ }
+ StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
+ }
+ DynamicData.push_back(Info.LHS);
+ DynamicData.push_back(Info.RHS);
+ }
+
+ CGF.EmitCheck(Checks, CheckName, StaticData, DynamicData);
+}
+
+//===----------------------------------------------------------------------===//
+// Visitor Methods
+//===----------------------------------------------------------------------===//
+
+Value *ScalarExprEmitter::VisitExpr(Expr *E) {
+ CGF.ErrorUnsupported(E, "scalar expression");
+ if (E->getType()->isVoidType())
+ return nullptr;
+ return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
+}
+
+Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
+ // Vector Mask Case
+ if (E->getNumSubExprs() == 2 ||
+ (E->getNumSubExprs() == 3 && E->getExpr(2)->getType()->isVectorType())) {
+ Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
+ Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
+ Value *Mask;
+
+ llvm::VectorType *LTy = cast<llvm::VectorType>(LHS->getType());
+ unsigned LHSElts = LTy->getNumElements();
+
+ if (E->getNumSubExprs() == 3) {
+ Mask = CGF.EmitScalarExpr(E->getExpr(2));
+
+ // Shuffle LHS & RHS into one input vector.
+ SmallVector<llvm::Constant*, 32> concat;
+ for (unsigned i = 0; i != LHSElts; ++i) {
+ concat.push_back(Builder.getInt32(2*i));
+ concat.push_back(Builder.getInt32(2*i+1));
+ }
+
+ Value* CV = llvm::ConstantVector::get(concat);
+ LHS = Builder.CreateShuffleVector(LHS, RHS, CV, "concat");
+ LHSElts *= 2;
+ } else {
+ Mask = RHS;
+ }
+
+ llvm::VectorType *MTy = cast<llvm::VectorType>(Mask->getType());
+
+ // Mask off the high bits of each shuffle index.
+ Value *MaskBits =
+ llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
+ Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
+
+ // newv = undef
+ // mask = mask & maskbits
+ // for each elt
+ // n = extract mask i
+ // x = extract val n
+ // newv = insert newv, x, i
+ llvm::VectorType *RTy = llvm::VectorType::get(LTy->getElementType(),
+ MTy->getNumElements());
+ Value* NewV = llvm::UndefValue::get(RTy);
+ for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
+ Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
+ Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
+
+ Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
+ NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
+ }
+ return NewV;
+ }
+
+ Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
+ Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
+
+ SmallVector<llvm::Constant*, 32> indices;
+ for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
+ llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
+ // Check for -1 and output it as undef in the IR.
+ if (Idx.isSigned() && Idx.isAllOnesValue())
+ indices.push_back(llvm::UndefValue::get(CGF.Int32Ty));
+ else
+ indices.push_back(Builder.getInt32(Idx.getZExtValue()));
+ }
+
+ Value *SV = llvm::ConstantVector::get(indices);
+ return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
+}
+
+Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
+ QualType SrcType = E->getSrcExpr()->getType(),
+ DstType = E->getType();
+
+ Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
+
+ SrcType = CGF.getContext().getCanonicalType(SrcType);
+ DstType = CGF.getContext().getCanonicalType(DstType);
+ if (SrcType == DstType) return Src;
+
+ assert(SrcType->isVectorType() &&
+ "ConvertVector source type must be a vector");
+ assert(DstType->isVectorType() &&
+ "ConvertVector destination type must be a vector");
+
+ llvm::Type *SrcTy = Src->getType();
+ llvm::Type *DstTy = ConvertType(DstType);
+
+ // Ignore conversions like int -> uint.
+ if (SrcTy == DstTy)
+ return Src;
+
+ QualType SrcEltType = SrcType->getAs<VectorType>()->getElementType(),
+ DstEltType = DstType->getAs<VectorType>()->getElementType();
+
+ assert(SrcTy->isVectorTy() &&
+ "ConvertVector source IR type must be a vector");
+ assert(DstTy->isVectorTy() &&
+ "ConvertVector destination IR type must be a vector");
+
+ llvm::Type *SrcEltTy = SrcTy->getVectorElementType(),
+ *DstEltTy = DstTy->getVectorElementType();
+
+ if (DstEltType->isBooleanType()) {
+ assert((SrcEltTy->isFloatingPointTy() ||
+ isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
+
+ llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
+ if (SrcEltTy->isFloatingPointTy()) {
+ return Builder.CreateFCmpUNE(Src, Zero, "tobool");
+ } else {
+ return Builder.CreateICmpNE(Src, Zero, "tobool");
+ }
+ }
+
+ // We have the arithmetic types: real int/float.
+ Value *Res = nullptr;
+
+ if (isa<llvm::IntegerType>(SrcEltTy)) {
+ bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
+ if (isa<llvm::IntegerType>(DstEltTy))
+ Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
+ else if (InputSigned)
+ Res = Builder.CreateSIToFP(Src, DstTy, "conv");
+ else
+ Res = Builder.CreateUIToFP(Src, DstTy, "conv");
+ } else if (isa<llvm::IntegerType>(DstEltTy)) {
+ assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
+ if (DstEltType->isSignedIntegerOrEnumerationType())
+ Res = Builder.CreateFPToSI(Src, DstTy, "conv");
+ else
+ Res = Builder.CreateFPToUI(Src, DstTy, "conv");
+ } else {
+ assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
+ "Unknown real conversion");
+ if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
+ Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
+ else
+ Res = Builder.CreateFPExt(Src, DstTy, "conv");
+ }
+
+ return Res;
+}
+
+Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
+ llvm::APSInt Value;
+ if (E->EvaluateAsInt(Value, CGF.getContext(), Expr::SE_AllowSideEffects)) {
+ if (E->isArrow())
+ CGF.EmitScalarExpr(E->getBase());
+ else
+ EmitLValue(E->getBase());
+ return Builder.getInt(Value);
+ }
+
+ return EmitLoadOfLValue(E);
+}
+
+Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
+ TestAndClearIgnoreResultAssign();
+
+ // Emit subscript expressions in rvalue context's. For most cases, this just
+ // loads the lvalue formed by the subscript expr. However, we have to be
+ // careful, because the base of a vector subscript is occasionally an rvalue,
+ // so we can't get it as an lvalue.
+ if (!E->getBase()->getType()->isVectorType())
+ return EmitLoadOfLValue(E);
+
+ // Handle the vector case. The base must be a vector, the index must be an
+ // integer value.
+ Value *Base = Visit(E->getBase());
+ Value *Idx = Visit(E->getIdx());
+ QualType IdxTy = E->getIdx()->getType();
+
+ if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
+ CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
+
+ return Builder.CreateExtractElement(Base, Idx, "vecext");
+}
+
+static llvm::Constant *getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
+ unsigned Off, llvm::Type *I32Ty) {
+ int MV = SVI->getMaskValue(Idx);
+ if (MV == -1)
+ return llvm::UndefValue::get(I32Ty);
+ return llvm::ConstantInt::get(I32Ty, Off+MV);
+}
+
+static llvm::Constant *getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
+ if (C->getBitWidth() != 32) {
+ assert(llvm::ConstantInt::isValueValidForType(I32Ty,
+ C->getZExtValue()) &&
+ "Index operand too large for shufflevector mask!");
+ return llvm::ConstantInt::get(I32Ty, C->getZExtValue());
+ }
+ return C;
+}
+
+Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
+ bool Ignore = TestAndClearIgnoreResultAssign();
+ (void)Ignore;
+ assert (Ignore == false && "init list ignored");
+ unsigned NumInitElements = E->getNumInits();
+
+ if (E->hadArrayRangeDesignator())
+ CGF.ErrorUnsupported(E, "GNU array range designator extension");
+
+ llvm::VectorType *VType =
+ dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
+
+ if (!VType) {
+ if (NumInitElements == 0) {
+ // C++11 value-initialization for the scalar.
+ return EmitNullValue(E->getType());
+ }
+ // We have a scalar in braces. Just use the first element.
+ return Visit(E->getInit(0));
+ }
+
+ unsigned ResElts = VType->getNumElements();
+
+ // Loop over initializers collecting the Value for each, and remembering
+ // whether the source was swizzle (ExtVectorElementExpr). This will allow
+ // us to fold the shuffle for the swizzle into the shuffle for the vector
+ // initializer, since LLVM optimizers generally do not want to touch
+ // shuffles.
+ unsigned CurIdx = 0;
+ bool VIsUndefShuffle = false;
+ llvm::Value *V = llvm::UndefValue::get(VType);
+ for (unsigned i = 0; i != NumInitElements; ++i) {
+ Expr *IE = E->getInit(i);
+ Value *Init = Visit(IE);
+ SmallVector<llvm::Constant*, 16> Args;
+
+ llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
+
+ // Handle scalar elements. If the scalar initializer is actually one
+ // element of a different vector of the same width, use shuffle instead of
+ // extract+insert.
+ if (!VVT) {
+ if (isa<ExtVectorElementExpr>(IE)) {
+ llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
+
+ if (EI->getVectorOperandType()->getNumElements() == ResElts) {
+ llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
+ Value *LHS = nullptr, *RHS = nullptr;
+ if (CurIdx == 0) {
+ // insert into undef -> shuffle (src, undef)
+ // shufflemask must use an i32
+ Args.push_back(getAsInt32(C, CGF.Int32Ty));
+ Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
+
+ LHS = EI->getVectorOperand();
+ RHS = V;
+ VIsUndefShuffle = true;
+ } else if (VIsUndefShuffle) {
+ // insert into undefshuffle && size match -> shuffle (v, src)
+ llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
+ for (unsigned j = 0; j != CurIdx; ++j)
+ Args.push_back(getMaskElt(SVV, j, 0, CGF.Int32Ty));
+ Args.push_back(Builder.getInt32(ResElts + C->getZExtValue()));
+ Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
+
+ LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
+ RHS = EI->getVectorOperand();
+ VIsUndefShuffle = false;
+ }
+ if (!Args.empty()) {
+ llvm::Constant *Mask = llvm::ConstantVector::get(Args);
+ V = Builder.CreateShuffleVector(LHS, RHS, Mask);
+ ++CurIdx;
+ continue;
+ }
+ }
+ }
+ V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
+ "vecinit");
+ VIsUndefShuffle = false;
+ ++CurIdx;
+ continue;
+ }
+
+ unsigned InitElts = VVT->getNumElements();
+
+ // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
+ // input is the same width as the vector being constructed, generate an
+ // optimized shuffle of the swizzle input into the result.
+ unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
+ if (isa<ExtVectorElementExpr>(IE)) {
+ llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
+ Value *SVOp = SVI->getOperand(0);
+ llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType());
+
+ if (OpTy->getNumElements() == ResElts) {
+ for (unsigned j = 0; j != CurIdx; ++j) {
+ // If the current vector initializer is a shuffle with undef, merge
+ // this shuffle directly into it.
+ if (VIsUndefShuffle) {
+ Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0,
+ CGF.Int32Ty));
+ } else {
+ Args.push_back(Builder.getInt32(j));
+ }
+ }
+ for (unsigned j = 0, je = InitElts; j != je; ++j)
+ Args.push_back(getMaskElt(SVI, j, Offset, CGF.Int32Ty));
+ Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
+
+ if (VIsUndefShuffle)
+ V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
+
+ Init = SVOp;
+ }
+ }
+
+ // Extend init to result vector length, and then shuffle its contribution
+ // to the vector initializer into V.
+ if (Args.empty()) {
+ for (unsigned j = 0; j != InitElts; ++j)
+ Args.push_back(Builder.getInt32(j));
+ Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
+ llvm::Constant *Mask = llvm::ConstantVector::get(Args);
+ Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT),
+ Mask, "vext");
+
+ Args.clear();
+ for (unsigned j = 0; j != CurIdx; ++j)
+ Args.push_back(Builder.getInt32(j));
+ for (unsigned j = 0; j != InitElts; ++j)
+ Args.push_back(Builder.getInt32(j+Offset));
+ Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
+ }
+
+ // If V is undef, make sure it ends up on the RHS of the shuffle to aid
+ // merging subsequent shuffles into this one.
+ if (CurIdx == 0)
+ std::swap(V, Init);
+ llvm::Constant *Mask = llvm::ConstantVector::get(Args);
+ V = Builder.CreateShuffleVector(V, Init, Mask, "vecinit");
+ VIsUndefShuffle = isa<llvm::UndefValue>(Init);
+ CurIdx += InitElts;
+ }
+
+ // FIXME: evaluate codegen vs. shuffling against constant null vector.
+ // Emit remaining default initializers.
+ llvm::Type *EltTy = VType->getElementType();
+
+ // Emit remaining default initializers
+ for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
+ Value *Idx = Builder.getInt32(CurIdx);
+ llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
+ V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
+ }
+ return V;
+}
+
+bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) {
+ const Expr *E = CE->getSubExpr();
+
+ if (CE->getCastKind() == CK_UncheckedDerivedToBase)
+ return false;
+
+ if (isa<CXXThisExpr>(E->IgnoreParens())) {
+ // We always assume that 'this' is never null.
+ return false;
+ }
+
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
+ // And that glvalue casts are never null.
+ if (ICE->getValueKind() != VK_RValue)
+ return false;
+ }
+
+ return true;
+}
+
+// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
+// have to handle a more broad range of conversions than explicit casts, as they
+// handle things like function to ptr-to-function decay etc.
+Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
+ Expr *E = CE->getSubExpr();
+ QualType DestTy = CE->getType();
+ CastKind Kind = CE->getCastKind();
+
+ if (!DestTy->isVoidType())
+ TestAndClearIgnoreResultAssign();
+
+ // Since almost all cast kinds apply to scalars, this switch doesn't have
+ // a default case, so the compiler will warn on a missing case. The cases
+ // are in the same order as in the CastKind enum.
+ switch (Kind) {
+ case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
+ case CK_BuiltinFnToFnPtr:
+ llvm_unreachable("builtin functions are handled elsewhere");
+
+ case CK_LValueBitCast:
+ case CK_ObjCObjectLValueCast: {
+ Address Addr = EmitLValue(E).getAddress();
+ Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
+ LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
+ return EmitLoadOfLValue(LV, CE->getExprLoc());
+ }
+
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_BitCast: {
+ Value *Src = Visit(const_cast<Expr*>(E));
+ llvm::Type *SrcTy = Src->getType();
+ llvm::Type *DstTy = ConvertType(DestTy);
+ if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
+ SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) {
+ llvm_unreachable("wrong cast for pointers in different address spaces"
+ "(must be an address space cast)!");
+ }
+
+ if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
+ if (auto PT = DestTy->getAs<PointerType>())
+ CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Src,
+ /*MayBeNull=*/true,
+ CodeGenFunction::CFITCK_UnrelatedCast,
+ CE->getLocStart());
+ }
+
+ return Builder.CreateBitCast(Src, DstTy);
+ }
+ case CK_AddressSpaceConversion: {
+ Value *Src = Visit(const_cast<Expr*>(E));
+ return Builder.CreateAddrSpaceCast(Src, ConvertType(DestTy));
+ }
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
+ case CK_NoOp:
+ case CK_UserDefinedConversion:
+ return Visit(const_cast<Expr*>(E));
+
+ case CK_BaseToDerived: {
+ const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
+ assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
+
+ Address Base = CGF.EmitPointerWithAlignment(E);
+ Address Derived =
+ CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
+ CE->path_begin(), CE->path_end(),
+ CGF.ShouldNullCheckClassCastValue(CE));
+
+ // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
+ // performed and the object is not of the derived type.
+ if (CGF.sanitizePerformTypeCheck())
+ CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(),
+ Derived.getPointer(), DestTy->getPointeeType());
+
+ if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
+ CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(),
+ Derived.getPointer(),
+ /*MayBeNull=*/true,
+ CodeGenFunction::CFITCK_DerivedCast,
+ CE->getLocStart());
+
+ return Derived.getPointer();
+ }
+ case CK_UncheckedDerivedToBase:
+ case CK_DerivedToBase: {
+ // The EmitPointerWithAlignment path does this fine; just discard
+ // the alignment.
+ return CGF.EmitPointerWithAlignment(CE).getPointer();
+ }
+
+ case CK_Dynamic: {
+ Address V = CGF.EmitPointerWithAlignment(E);
+ const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
+ return CGF.EmitDynamicCast(V, DCE);
+ }
+
+ case CK_ArrayToPointerDecay:
+ return CGF.EmitArrayToPointerDecay(E).getPointer();
+ case CK_FunctionToPointerDecay:
+ return EmitLValue(E).getPointer();
+
+ case CK_NullToPointer:
+ if (MustVisitNullValue(E))
+ (void) Visit(E);
+
+ return llvm::ConstantPointerNull::get(
+ cast<llvm::PointerType>(ConvertType(DestTy)));
+
+ case CK_NullToMemberPointer: {
+ if (MustVisitNullValue(E))
+ (void) Visit(E);
+
+ const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
+ return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
+ }
+
+ case CK_ReinterpretMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer: {
+ Value *Src = Visit(E);
+
+ // Note that the AST doesn't distinguish between checked and
+ // unchecked member pointer conversions, so we always have to
+ // implement checked conversions here. This is inefficient when
+ // actual control flow may be required in order to perform the
+ // check, which it is for data member pointers (but not member
+ // function pointers on Itanium and ARM).
+ return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
+ }
+
+ case CK_ARCProduceObject:
+ return CGF.EmitARCRetainScalarExpr(E);
+ case CK_ARCConsumeObject:
+ return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
+ case CK_ARCReclaimReturnedObject: {
+ llvm::Value *value = Visit(E);
+ value = CGF.EmitARCRetainAutoreleasedReturnValue(value);
+ return CGF.EmitObjCConsumeObject(E->getType(), value);
+ }
+ case CK_ARCExtendBlockObject:
+ return CGF.EmitARCExtendBlockObject(E);
+
+ case CK_CopyAndAutoreleaseBlockObject:
+ return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
+
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexCast:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_ConstructorConversion:
+ case CK_ToUnion:
+ llvm_unreachable("scalar cast to non-scalar value");
+
+ case CK_LValueToRValue:
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
+ assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
+ return Visit(const_cast<Expr*>(E));
+
+ case CK_IntegralToPointer: {
+ Value *Src = Visit(const_cast<Expr*>(E));
+
+ // First, convert to the correct width so that we control the kind of
+ // extension.
+ llvm::Type *MiddleTy = CGF.IntPtrTy;
+ bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
+ llvm::Value* IntResult =
+ Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
+
+ return Builder.CreateIntToPtr(IntResult, ConvertType(DestTy));
+ }
+ case CK_PointerToIntegral:
+ assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
+ return Builder.CreatePtrToInt(Visit(E), ConvertType(DestTy));
+
+ case CK_ToVoid: {
+ CGF.EmitIgnoredExpr(E);
+ return nullptr;
+ }
+ case CK_VectorSplat: {
+ llvm::Type *DstTy = ConvertType(DestTy);
+ // Need an IgnoreImpCasts here as by default a boolean will be promoted to
+ // an int, which will not perform the sign extension, so if we know we are
+ // going to cast to a vector we have to strip the implicit cast off.
+ Value *Elt = Visit(const_cast<Expr*>(E->IgnoreImpCasts()));
+ Elt = EmitScalarConversion(Elt, E->IgnoreImpCasts()->getType(),
+ DestTy->getAs<VectorType>()->getElementType(),
+ CE->getExprLoc(),
+ CGF.getContext().getLangOpts().OpenCL);
+
+ // Splat the element across to all elements
+ unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
+ return Builder.CreateVectorSplat(NumElements, Elt, "splat");
+ }
+
+ case CK_IntegralCast:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingCast:
+ return EmitScalarConversion(Visit(E), E->getType(), DestTy,
+ CE->getExprLoc());
+ case CK_IntegralToBoolean:
+ return EmitIntToBoolConversion(Visit(E));
+ case CK_PointerToBoolean:
+ return EmitPointerToBoolConversion(Visit(E));
+ case CK_FloatingToBoolean:
+ return EmitFloatToBoolConversion(Visit(E));
+ case CK_MemberPointerToBoolean: {
+ llvm::Value *MemPtr = Visit(E);
+ const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
+ return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
+ }
+
+ case CK_FloatingComplexToReal:
+ case CK_IntegralComplexToReal:
+ return CGF.EmitComplexExpr(E, false, true).first;
+
+ case CK_FloatingComplexToBoolean:
+ case CK_IntegralComplexToBoolean: {
+ CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
+
+ // TODO: kill this function off, inline appropriate case here
+ return EmitComplexToScalarConversion(V, E->getType(), DestTy,
+ CE->getExprLoc());
+ }
+
+ case CK_ZeroToOCLEvent: {
+ assert(DestTy->isEventT() && "CK_ZeroToOCLEvent cast on non-event type");
+ return llvm::Constant::getNullValue(ConvertType(DestTy));
+ }
+
+ }
+
+ llvm_unreachable("unknown scalar cast");
+}
+
+Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
+ CodeGenFunction::StmtExprEvaluation eval(CGF);
+ Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
+ !E->getType()->isVoidType());
+ if (!RetAlloca.isValid())
+ return nullptr;
+ return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
+ E->getExprLoc());
+}
+
+//===----------------------------------------------------------------------===//
+// Unary Operators
+//===----------------------------------------------------------------------===//
+
+static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
+ llvm::Value *InVal, bool IsInc) {
+ BinOpInfo BinOp;
+ BinOp.LHS = InVal;
+ BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
+ BinOp.Ty = E->getType();
+ BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
+ BinOp.FPContractable = false;
+ BinOp.E = E;
+ return BinOp;
+}
+
+llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
+ const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
+ llvm::Value *Amount =
+ llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
+ StringRef Name = IsInc ? "inc" : "dec";
+ switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Defined:
+ return Builder.CreateAdd(InVal, Amount, Name);
+ case LangOptions::SOB_Undefined:
+ if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
+ return Builder.CreateNSWAdd(InVal, Amount, Name);
+ // Fall through.
+ case LangOptions::SOB_Trapping:
+ return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, InVal, IsInc));
+ }
+ llvm_unreachable("Unknown SignedOverflowBehaviorTy");
+}
+
+llvm::Value *
+ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre) {
+
+ QualType type = E->getSubExpr()->getType();
+ llvm::PHINode *atomicPHI = nullptr;
+ llvm::Value *value;
+ llvm::Value *input;
+
+ int amount = (isInc ? 1 : -1);
+
+ if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
+ type = atomicTy->getValueType();
+ if (isInc && type->isBooleanType()) {
+ llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
+ if (isPre) {
+ Builder.CreateStore(True, LV.getAddress(), LV.isVolatileQualified())
+ ->setAtomic(llvm::SequentiallyConsistent);
+ return Builder.getTrue();
+ }
+ // For atomic bool increment, we just store true and return it for
+ // preincrement, do an atomic swap with true for postincrement
+ return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ LV.getPointer(), True, llvm::SequentiallyConsistent);
+ }
+ // Special case for atomic increment / decrement on integers, emit
+ // atomicrmw instructions. We skip this if we want to be doing overflow
+ // checking, and fall into the slow path with the atomic cmpxchg loop.
+ if (!type->isBooleanType() && type->isIntegerType() &&
+ !(type->isUnsignedIntegerType() &&
+ CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
+ CGF.getLangOpts().getSignedOverflowBehavior() !=
+ LangOptions::SOB_Trapping) {
+ llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
+ llvm::AtomicRMWInst::Sub;
+ llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
+ llvm::Instruction::Sub;
+ llvm::Value *amt = CGF.EmitToMemory(
+ llvm::ConstantInt::get(ConvertType(type), 1, true), type);
+ llvm::Value *old = Builder.CreateAtomicRMW(aop,
+ LV.getPointer(), amt, llvm::SequentiallyConsistent);
+ return isPre ? Builder.CreateBinOp(op, old, amt) : old;
+ }
+ value = EmitLoadOfLValue(LV, E->getExprLoc());
+ input = value;
+ // For every other atomic operation, we need to emit a load-op-cmpxchg loop
+ llvm::BasicBlock *startBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
+ value = CGF.EmitToMemory(value, type);
+ Builder.CreateBr(opBB);
+ Builder.SetInsertPoint(opBB);
+ atomicPHI = Builder.CreatePHI(value->getType(), 2);
+ atomicPHI->addIncoming(value, startBB);
+ value = atomicPHI;
+ } else {
+ value = EmitLoadOfLValue(LV, E->getExprLoc());
+ input = value;
+ }
+
+ // Special case of integer increment that we have to check first: bool++.
+ // Due to promotion rules, we get:
+ // bool++ -> bool = bool + 1
+ // -> bool = (int)bool + 1
+ // -> bool = ((int)bool + 1 != 0)
+ // An interesting aspect of this is that increment is always true.
+ // Decrement does not have this property.
+ if (isInc && type->isBooleanType()) {
+ value = Builder.getTrue();
+
+ // Most common case by far: integer increment.
+ } else if (type->isIntegerType()) {
+ // Note that signed integer inc/dec with width less than int can't
+ // overflow because of promotion rules; we're just eliding a few steps here.
+ bool CanOverflow = value->getType()->getIntegerBitWidth() >=
+ CGF.IntTy->getIntegerBitWidth();
+ if (CanOverflow && type->isSignedIntegerOrEnumerationType()) {
+ value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
+ } else if (CanOverflow && type->isUnsignedIntegerType() &&
+ CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
+ value =
+ EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, value, isInc));
+ } else {
+ llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
+ value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
+ }
+
+ // Next most common: pointer increment.
+ } else if (const PointerType *ptr = type->getAs<PointerType>()) {
+ QualType type = ptr->getPointeeType();
+
+ // VLA types don't have constant size.
+ if (const VariableArrayType *vla
+ = CGF.getContext().getAsVariableArrayType(type)) {
+ llvm::Value *numElts = CGF.getVLASize(vla).first;
+ if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
+ if (CGF.getLangOpts().isSignedOverflowDefined())
+ value = Builder.CreateGEP(value, numElts, "vla.inc");
+ else
+ value = Builder.CreateInBoundsGEP(value, numElts, "vla.inc");
+
+ // Arithmetic on function pointers (!) is just +-1.
+ } else if (type->isFunctionType()) {
+ llvm::Value *amt = Builder.getInt32(amount);
+
+ value = CGF.EmitCastToVoidPtr(value);
+ if (CGF.getLangOpts().isSignedOverflowDefined())
+ value = Builder.CreateGEP(value, amt, "incdec.funcptr");
+ else
+ value = Builder.CreateInBoundsGEP(value, amt, "incdec.funcptr");
+ value = Builder.CreateBitCast(value, input->getType());
+
+ // For everything else, we can just do a simple increment.
+ } else {
+ llvm::Value *amt = Builder.getInt32(amount);
+ if (CGF.getLangOpts().isSignedOverflowDefined())
+ value = Builder.CreateGEP(value, amt, "incdec.ptr");
+ else
+ value = Builder.CreateInBoundsGEP(value, amt, "incdec.ptr");
+ }
+
+ // Vector increment/decrement.
+ } else if (type->isVectorType()) {
+ if (type->hasIntegerRepresentation()) {
+ llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
+
+ value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
+ } else {
+ value = Builder.CreateFAdd(
+ value,
+ llvm::ConstantFP::get(value->getType(), amount),
+ isInc ? "inc" : "dec");
+ }
+
+ // Floating point.
+ } else if (type->isRealFloatingType()) {
+ // Add the inc/dec to the real part.
+ llvm::Value *amt;
+
+ if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
+ // Another special case: half FP increment should be done via float
+ if (!CGF.getContext().getLangOpts().HalfArgsAndReturns) {
+ value = Builder.CreateCall(
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
+ CGF.CGM.FloatTy),
+ input, "incdec.conv");
+ } else {
+ value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");
+ }
+ }
+
+ if (value->getType()->isFloatTy())
+ amt = llvm::ConstantFP::get(VMContext,
+ llvm::APFloat(static_cast<float>(amount)));
+ else if (value->getType()->isDoubleTy())
+ amt = llvm::ConstantFP::get(VMContext,
+ llvm::APFloat(static_cast<double>(amount)));
+ else {
+ // Remaining types are either Half or LongDouble. Convert from float.
+ llvm::APFloat F(static_cast<float>(amount));
+ bool ignored;
+ // Don't use getFloatTypeSemantics because Half isn't
+ // necessarily represented using the "half" LLVM type.
+ F.convert(value->getType()->isHalfTy()
+ ? CGF.getTarget().getHalfFormat()
+ : CGF.getTarget().getLongDoubleFormat(),
+ llvm::APFloat::rmTowardZero, &ignored);
+ amt = llvm::ConstantFP::get(VMContext, F);
+ }
+ value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
+
+ if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
+ if (!CGF.getContext().getLangOpts().HalfArgsAndReturns) {
+ value = Builder.CreateCall(
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
+ CGF.CGM.FloatTy),
+ value, "incdec.conv");
+ } else {
+ value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");
+ }
+ }
+
+ // Objective-C pointer types.
+ } else {
+ const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
+ value = CGF.EmitCastToVoidPtr(value);
+
+ CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
+ if (!isInc) size = -size;
+ llvm::Value *sizeValue =
+ llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
+
+ if (CGF.getLangOpts().isSignedOverflowDefined())
+ value = Builder.CreateGEP(value, sizeValue, "incdec.objptr");
+ else
+ value = Builder.CreateInBoundsGEP(value, sizeValue, "incdec.objptr");
+ value = Builder.CreateBitCast(value, input->getType());
+ }
+
+ if (atomicPHI) {
+ llvm::BasicBlock *opBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
+ auto Pair = CGF.EmitAtomicCompareExchange(
+ LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
+ llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
+ llvm::Value *success = Pair.second;
+ atomicPHI->addIncoming(old, opBB);
+ Builder.CreateCondBr(success, contBB, opBB);
+ Builder.SetInsertPoint(contBB);
+ return isPre ? value : input;
+ }
+
+ // Store the updated result through the lvalue.
+ if (LV.isBitField())
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
+ else
+ CGF.EmitStoreThroughLValue(RValue::get(value), LV);
+
+ // If this is a postinc, return the value read from memory, otherwise use the
+ // updated value.
+ return isPre ? value : input;
+}
+
+
+
+Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
+ TestAndClearIgnoreResultAssign();
+ // Emit unary minus with EmitSub so we handle overflow cases etc.
+ BinOpInfo BinOp;
+ BinOp.RHS = Visit(E->getSubExpr());
+
+ if (BinOp.RHS->getType()->isFPOrFPVectorTy())
+ BinOp.LHS = llvm::ConstantFP::getZeroValueForNegation(BinOp.RHS->getType());
+ else
+ BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
+ BinOp.Ty = E->getType();
+ BinOp.Opcode = BO_Sub;
+ BinOp.FPContractable = false;
+ BinOp.E = E;
+ return EmitSub(BinOp);
+}
+
+Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
+ TestAndClearIgnoreResultAssign();
+ Value *Op = Visit(E->getSubExpr());
+ return Builder.CreateNot(Op, "neg");
+}
+
+Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
+ // Perform vector logical not on comparison with zero vector.
+ if (E->getType()->isExtVectorType()) {
+ Value *Oper = Visit(E->getSubExpr());
+ Value *Zero = llvm::Constant::getNullValue(Oper->getType());
+ Value *Result;
+ if (Oper->getType()->isFPOrFPVectorTy())
+ Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
+ else
+ Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
+ return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
+ }
+
+ // Compare operand to zero.
+ Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
+
+ // Invert value.
+ // TODO: Could dynamically modify easy computations here. For example, if
+ // the operand is an icmp ne, turn into icmp eq.
+ BoolVal = Builder.CreateNot(BoolVal, "lnot");
+
+ // ZExt result to the expr type.
+ return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
+}
+
+Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
+ // Try folding the offsetof to a constant.
+ llvm::APSInt Value;
+ if (E->EvaluateAsInt(Value, CGF.getContext()))
+ return Builder.getInt(Value);
+
+ // Loop over the components of the offsetof to compute the value.
+ unsigned n = E->getNumComponents();
+ llvm::Type* ResultType = ConvertType(E->getType());
+ llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
+ QualType CurrentType = E->getTypeSourceInfo()->getType();
+ for (unsigned i = 0; i != n; ++i) {
+ OffsetOfNode ON = E->getComponent(i);
+ llvm::Value *Offset = nullptr;
+ switch (ON.getKind()) {
+ case OffsetOfNode::Array: {
+ // Compute the index
+ Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
+ llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
+ bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
+ Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
+
+ // Save the element type
+ CurrentType =
+ CGF.getContext().getAsArrayType(CurrentType)->getElementType();
+
+ // Compute the element size
+ llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
+ CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
+
+ // Multiply out to compute the result
+ Offset = Builder.CreateMul(Idx, ElemSize);
+ break;
+ }
+
+ case OffsetOfNode::Field: {
+ FieldDecl *MemberDecl = ON.getField();
+ RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl();
+ const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
+
+ // Compute the index of the field in its parent.
+ unsigned i = 0;
+ // FIXME: It would be nice if we didn't have to loop here!
+ for (RecordDecl::field_iterator Field = RD->field_begin(),
+ FieldEnd = RD->field_end();
+ Field != FieldEnd; ++Field, ++i) {
+ if (*Field == MemberDecl)
+ break;
+ }
+ assert(i < RL.getFieldCount() && "offsetof field in wrong type");
+
+ // Compute the offset to the field
+ int64_t OffsetInt = RL.getFieldOffset(i) /
+ CGF.getContext().getCharWidth();
+ Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
+
+ // Save the element type.
+ CurrentType = MemberDecl->getType();
+ break;
+ }
+
+ case OffsetOfNode::Identifier:
+ llvm_unreachable("dependent __builtin_offsetof");
+
+ case OffsetOfNode::Base: {
+ if (ON.getBase()->isVirtual()) {
+ CGF.ErrorUnsupported(E, "virtual base in offsetof");
+ continue;
+ }
+
+ RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl();
+ const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
+
+ // Save the element type.
+ CurrentType = ON.getBase()->getType();
+
+ // Compute the offset to the base.
+ const RecordType *BaseRT = CurrentType->getAs<RecordType>();
+ CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
+ CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
+ Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
+ break;
+ }
+ }
+ Result = Builder.CreateAdd(Result, Offset);
+ }
+ return Result;
+}
+
+/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
+/// argument of the sizeof expression as an integer.
+Value *
+ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
+ const UnaryExprOrTypeTraitExpr *E) {
+ QualType TypeToSize = E->getTypeOfArgument();
+ if (E->getKind() == UETT_SizeOf) {
+ if (const VariableArrayType *VAT =
+ CGF.getContext().getAsVariableArrayType(TypeToSize)) {
+ if (E->isArgumentType()) {
+ // sizeof(type) - make sure to emit the VLA size.
+ CGF.EmitVariablyModifiedType(TypeToSize);
+ } else {
+ // C99 6.5.3.4p2: If the argument is an expression of type
+ // VLA, it is evaluated.
+ CGF.EmitIgnoredExpr(E->getArgumentExpr());
+ }
+
+ QualType eltType;
+ llvm::Value *numElts;
+ std::tie(numElts, eltType) = CGF.getVLASize(VAT);
+
+ llvm::Value *size = numElts;
+
+ // Scale the number of non-VLA elements by the non-VLA element size.
+ CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
+ if (!eltSize.isOne())
+ size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), numElts);
+
+ return size;
+ }
+ } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
+ auto Alignment =
+ CGF.getContext()
+ .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
+ E->getTypeOfArgument()->getPointeeType()))
+ .getQuantity();
+ return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
+ }
+
+ // If this isn't sizeof(vla), the result must be constant; use the constant
+ // folding logic so we don't have to duplicate it here.
+ return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
+}
+
+Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
+ Expr *Op = E->getSubExpr();
+ if (Op->getType()->isAnyComplexType()) {
+ // If it's an l-value, load through the appropriate subobject l-value.
+ // Note that we have to ask E because Op might be an l-value that
+ // this won't work for, e.g. an Obj-C property.
+ if (E->isGLValue())
+ return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
+ E->getExprLoc()).getScalarVal();
+
+ // Otherwise, calculate and project.
+ return CGF.EmitComplexExpr(Op, false, true).first;
+ }
+
+ return Visit(Op);
+}
+
+Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
+ Expr *Op = E->getSubExpr();
+ if (Op->getType()->isAnyComplexType()) {
+ // If it's an l-value, load through the appropriate subobject l-value.
+ // Note that we have to ask E because Op might be an l-value that
+ // this won't work for, e.g. an Obj-C property.
+ if (Op->isGLValue())
+ return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
+ E->getExprLoc()).getScalarVal();
+
+ // Otherwise, calculate and project.
+ return CGF.EmitComplexExpr(Op, true, false).second;
+ }
+
+ // __imag on a scalar returns zero. Emit the subexpr to ensure side
+ // effects are evaluated, but not the actual value.
+ if (Op->isGLValue())
+ CGF.EmitLValue(Op);
+ else
+ CGF.EmitScalarExpr(Op, true);
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+}
+
+//===----------------------------------------------------------------------===//
+// Binary Operators
+//===----------------------------------------------------------------------===//
+
+BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
+ TestAndClearIgnoreResultAssign();
+ BinOpInfo Result;
+ Result.LHS = Visit(E->getLHS());
+ Result.RHS = Visit(E->getRHS());
+ Result.Ty = E->getType();
+ Result.Opcode = E->getOpcode();
+ Result.FPContractable = E->isFPContractable();
+ Result.E = E;
+ return Result;
+}
+
+LValue ScalarExprEmitter::EmitCompoundAssignLValue(
+ const CompoundAssignOperator *E,
+ Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
+ Value *&Result) {
+ QualType LHSTy = E->getLHS()->getType();
+ BinOpInfo OpInfo;
+
+ if (E->getComputationResultType()->isAnyComplexType())
+ return CGF.EmitScalarCompoundAssignWithComplex(E, Result);
+
+ // Emit the RHS first. __block variables need to have the rhs evaluated
+ // first, plus this should improve codegen a little.
+ OpInfo.RHS = Visit(E->getRHS());
+ OpInfo.Ty = E->getComputationResultType();
+ OpInfo.Opcode = E->getOpcode();
+ OpInfo.FPContractable = E->isFPContractable();
+ OpInfo.E = E;
+ // Load/convert the LHS.
+ LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
+
+ llvm::PHINode *atomicPHI = nullptr;
+ if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
+ QualType type = atomicTy->getValueType();
+ if (!type->isBooleanType() && type->isIntegerType() &&
+ !(type->isUnsignedIntegerType() &&
+ CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
+ CGF.getLangOpts().getSignedOverflowBehavior() !=
+ LangOptions::SOB_Trapping) {
+ llvm::AtomicRMWInst::BinOp aop = llvm::AtomicRMWInst::BAD_BINOP;
+ switch (OpInfo.Opcode) {
+ // We don't have atomicrmw operands for *, %, /, <<, >>
+ case BO_MulAssign: case BO_DivAssign:
+ case BO_RemAssign:
+ case BO_ShlAssign:
+ case BO_ShrAssign:
+ break;
+ case BO_AddAssign:
+ aop = llvm::AtomicRMWInst::Add;
+ break;
+ case BO_SubAssign:
+ aop = llvm::AtomicRMWInst::Sub;
+ break;
+ case BO_AndAssign:
+ aop = llvm::AtomicRMWInst::And;
+ break;
+ case BO_XorAssign:
+ aop = llvm::AtomicRMWInst::Xor;
+ break;
+ case BO_OrAssign:
+ aop = llvm::AtomicRMWInst::Or;
+ break;
+ default:
+ llvm_unreachable("Invalid compound assignment type");
+ }
+ if (aop != llvm::AtomicRMWInst::BAD_BINOP) {
+ llvm::Value *amt = CGF.EmitToMemory(
+ EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
+ E->getExprLoc()),
+ LHSTy);
+ Builder.CreateAtomicRMW(aop, LHSLV.getPointer(), amt,
+ llvm::SequentiallyConsistent);
+ return LHSLV;
+ }
+ }
+ // FIXME: For floating point types, we should be saving and restoring the
+ // floating point environment in the loop.
+ llvm::BasicBlock *startBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
+ OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
+ OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
+ Builder.CreateBr(opBB);
+ Builder.SetInsertPoint(opBB);
+ atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
+ atomicPHI->addIncoming(OpInfo.LHS, startBB);
+ OpInfo.LHS = atomicPHI;
+ }
+ else
+ OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
+
+ SourceLocation Loc = E->getExprLoc();
+ OpInfo.LHS =
+ EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType(), Loc);
+
+ // Expand the binary operator.
+ Result = (this->*Func)(OpInfo);
+
+ // Convert the result back to the LHS type.
+ Result =
+ EmitScalarConversion(Result, E->getComputationResultType(), LHSTy, Loc);
+
+ if (atomicPHI) {
+ llvm::BasicBlock *opBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
+ auto Pair = CGF.EmitAtomicCompareExchange(
+ LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
+ llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
+ llvm::Value *success = Pair.second;
+ atomicPHI->addIncoming(old, opBB);
+ Builder.CreateCondBr(success, contBB, opBB);
+ Builder.SetInsertPoint(contBB);
+ return LHSLV;
+ }
+
+ // Store the result value into the LHS lvalue. Bit-fields are handled
+ // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
+ // 'An assignment expression has the value of the left operand after the
+ // assignment...'.
+ if (LHSLV.isBitField())
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result);
+ else
+ CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV);
+
+ return LHSLV;
+}
+
+Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
+ Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
+ bool Ignore = TestAndClearIgnoreResultAssign();
+ Value *RHS;
+ LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
+
+ // If the result is clearly ignored, return now.
+ if (Ignore)
+ return nullptr;
+
+ // The result of an assignment in C is the assigned r-value.
+ if (!CGF.getLangOpts().CPlusPlus)
+ return RHS;
+
+ // If the lvalue is non-volatile, return the computed value of the assignment.
+ if (!LHS.isVolatileQualified())
+ return RHS;
+
+ // Otherwise, reload the value.
+ return EmitLoadOfLValue(LHS, E->getExprLoc());
+}
+
+void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
+ const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
+ SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
+
+ if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
+ Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
+ SanitizerKind::IntegerDivideByZero));
+ }
+
+ if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
+ Ops.Ty->hasSignedIntegerRepresentation()) {
+ llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
+
+ llvm::Value *IntMin =
+ Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
+ llvm::Value *NegOne = llvm::ConstantInt::get(Ty, -1ULL);
+
+ llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
+ llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
+ llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
+ Checks.push_back(
+ std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow));
+ }
+
+ if (Checks.size() > 0)
+ EmitBinOpCheck(Checks, Ops);
+}
+
+Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
+ {
+ CodeGenFunction::SanitizerScope SanScope(&CGF);
+ if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
+ CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
+ Ops.Ty->isIntegerType()) {
+ llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
+ EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
+ } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
+ Ops.Ty->isRealFloatingType()) {
+ llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
+ llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
+ EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero),
+ Ops);
+ }
+ }
+
+ if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
+ llvm::Value *Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
+ if (CGF.getLangOpts().OpenCL) {
+ // OpenCL 1.1 7.4: minimum accuracy of single precision / is 2.5ulp
+ llvm::Type *ValTy = Val->getType();
+ if (ValTy->isFloatTy() ||
+ (isa<llvm::VectorType>(ValTy) &&
+ cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy()))
+ CGF.SetFPAccuracy(Val, 2.5);
+ }
+ return Val;
+ }
+ else if (Ops.Ty->hasUnsignedIntegerRepresentation())
+ return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
+ else
+ return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
+}
+
+Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
+ // Rem in C can't be a floating point type: C99 6.5.5p2.
+ if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
+ CodeGenFunction::SanitizerScope SanScope(&CGF);
+ llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
+
+ if (Ops.Ty->isIntegerType())
+ EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
+ }
+
+ if (Ops.Ty->hasUnsignedIntegerRepresentation())
+ return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
+ else
+ return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
+}
+
+Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
+ unsigned IID;
+ unsigned OpID = 0;
+
+ bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
+ switch (Ops.Opcode) {
+ case BO_Add:
+ case BO_AddAssign:
+ OpID = 1;
+ IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
+ llvm::Intrinsic::uadd_with_overflow;
+ break;
+ case BO_Sub:
+ case BO_SubAssign:
+ OpID = 2;
+ IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
+ llvm::Intrinsic::usub_with_overflow;
+ break;
+ case BO_Mul:
+ case BO_MulAssign:
+ OpID = 3;
+ IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
+ llvm::Intrinsic::umul_with_overflow;
+ break;
+ default:
+ llvm_unreachable("Unsupported operation for overflow detection");
+ }
+ OpID <<= 1;
+ if (isSigned)
+ OpID |= 1;
+
+ llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
+
+ llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
+
+ Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
+ Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
+ Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
+
+ // Handle overflow with llvm.trap if no custom handler has been specified.
+ const std::string *handlerName =
+ &CGF.getLangOpts().OverflowHandler;
+ if (handlerName->empty()) {
+ // If the signed-integer-overflow sanitizer is enabled, emit a call to its
+ // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
+ if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
+ CodeGenFunction::SanitizerScope SanScope(&CGF);
+ llvm::Value *NotOverflow = Builder.CreateNot(overflow);
+ SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow
+ : SanitizerKind::UnsignedIntegerOverflow;
+ EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops);
+ } else
+ CGF.EmitTrapCheck(Builder.CreateNot(overflow));
+ return result;
+ }
+
+ // Branch in case of overflow.
+ llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
+ llvm::Function::iterator insertPt = initialBB->getIterator();
+ llvm::BasicBlock *continueBB = CGF.createBasicBlock("nooverflow", CGF.CurFn,
+ &*std::next(insertPt));
+ llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
+
+ Builder.CreateCondBr(overflow, overflowBB, continueBB);
+
+ // If an overflow handler is set, then we want to call it and then use its
+ // result, if it returns.
+ Builder.SetInsertPoint(overflowBB);
+
+ // Get the overflow handler.
+ llvm::Type *Int8Ty = CGF.Int8Ty;
+ llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
+ llvm::FunctionType *handlerTy =
+ llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
+ llvm::Value *handler = CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
+
+ // Sign extend the args to 64-bit, so that we can use the same handler for
+ // all types of overflow.
+ llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
+ llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
+
+ // Call the handler with the two arguments, the operation, and the size of
+ // the result.
+ llvm::Value *handlerArgs[] = {
+ lhs,
+ rhs,
+ Builder.getInt8(OpID),
+ Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
+ };
+ llvm::Value *handlerResult =
+ CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
+
+ // Truncate the result back to the desired size.
+ handlerResult = Builder.CreateTrunc(handlerResult, opTy);
+ Builder.CreateBr(continueBB);
+
+ Builder.SetInsertPoint(continueBB);
+ llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
+ phi->addIncoming(result, initialBB);
+ phi->addIncoming(handlerResult, overflowBB);
+
+ return phi;
+}
+
+/// Emit pointer + index arithmetic.
+static Value *emitPointerArithmetic(CodeGenFunction &CGF,
+ const BinOpInfo &op,
+ bool isSubtraction) {
+ // Must have binary (not unary) expr here. Unary pointer
+ // increment/decrement doesn't use this path.
+ const BinaryOperator *expr = cast<BinaryOperator>(op.E);
+
+ Value *pointer = op.LHS;
+ Expr *pointerOperand = expr->getLHS();
+ Value *index = op.RHS;
+ Expr *indexOperand = expr->getRHS();
+
+ // In a subtraction, the LHS is always the pointer.
+ if (!isSubtraction && !pointer->getType()->isPointerTy()) {
+ std::swap(pointer, index);
+ std::swap(pointerOperand, indexOperand);
+ }
+
+ unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
+ if (width != CGF.PointerWidthInBits) {
+ // Zero-extend or sign-extend the pointer value according to
+ // whether the index is signed or not.
+ bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
+ index = CGF.Builder.CreateIntCast(index, CGF.PtrDiffTy, isSigned,
+ "idx.ext");
+ }
+
+ // If this is subtraction, negate the index.
+ if (isSubtraction)
+ index = CGF.Builder.CreateNeg(index, "idx.neg");
+
+ if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
+ CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
+ /*Accessed*/ false);
+
+ const PointerType *pointerType
+ = pointerOperand->getType()->getAs<PointerType>();
+ if (!pointerType) {
+ QualType objectType = pointerOperand->getType()
+ ->castAs<ObjCObjectPointerType>()
+ ->getPointeeType();
+ llvm::Value *objectSize
+ = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
+
+ index = CGF.Builder.CreateMul(index, objectSize);
+
+ Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
+ result = CGF.Builder.CreateGEP(result, index, "add.ptr");
+ return CGF.Builder.CreateBitCast(result, pointer->getType());
+ }
+
+ QualType elementType = pointerType->getPointeeType();
+ if (const VariableArrayType *vla
+ = CGF.getContext().getAsVariableArrayType(elementType)) {
+ // The element count here is the total number of non-VLA elements.
+ llvm::Value *numElements = CGF.getVLASize(vla).first;
+
+ // Effectively, the multiply by the VLA size is part of the GEP.
+ // GEP indexes are signed, and scaling an index isn't permitted to
+ // signed-overflow, so we use the same semantics for our explicit
+ // multiply. We suppress this if overflow is not undefined behavior.
+ if (CGF.getLangOpts().isSignedOverflowDefined()) {
+ index = CGF.Builder.CreateMul(index, numElements, "vla.index");
+ pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr");
+ } else {
+ index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
+ pointer = CGF.Builder.CreateInBoundsGEP(pointer, index, "add.ptr");
+ }
+ return pointer;
+ }
+
+ // Explicitly handle GNU void* and function pointer arithmetic extensions. The
+ // GNU void* casts amount to no-ops since our void* type is i8*, but this is
+ // future proof.
+ if (elementType->isVoidType() || elementType->isFunctionType()) {
+ Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
+ result = CGF.Builder.CreateGEP(result, index, "add.ptr");
+ return CGF.Builder.CreateBitCast(result, pointer->getType());
+ }
+
+ if (CGF.getLangOpts().isSignedOverflowDefined())
+ return CGF.Builder.CreateGEP(pointer, index, "add.ptr");
+
+ return CGF.Builder.CreateInBoundsGEP(pointer, index, "add.ptr");
+}
+
+// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
+// Addend. Use negMul and negAdd to negate the first operand of the Mul or
+// the add operand respectively. This allows fmuladd to represent a*b-c, or
+// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
+// efficient operations.
+static Value* buildFMulAdd(llvm::BinaryOperator *MulOp, Value *Addend,
+ const CodeGenFunction &CGF, CGBuilderTy &Builder,
+ bool negMul, bool negAdd) {
+ assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set.");
+
+ Value *MulOp0 = MulOp->getOperand(0);
+ Value *MulOp1 = MulOp->getOperand(1);
+ if (negMul) {
+ MulOp0 =
+ Builder.CreateFSub(
+ llvm::ConstantFP::getZeroValueForNegation(MulOp0->getType()), MulOp0,
+ "neg");
+ } else if (negAdd) {
+ Addend =
+ Builder.CreateFSub(
+ llvm::ConstantFP::getZeroValueForNegation(Addend->getType()), Addend,
+ "neg");
+ }
+
+ Value *FMulAdd = Builder.CreateCall(
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
+ {MulOp0, MulOp1, Addend});
+ MulOp->eraseFromParent();
+
+ return FMulAdd;
+}
+
+// Check whether it would be legal to emit an fmuladd intrinsic call to
+// represent op and if so, build the fmuladd.
+//
+// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
+// Does NOT check the type of the operation - it's assumed that this function
+// will be called from contexts where it's known that the type is contractable.
+static Value* tryEmitFMulAdd(const BinOpInfo &op,
+ const CodeGenFunction &CGF, CGBuilderTy &Builder,
+ bool isSub=false) {
+
+ assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
+ op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
+ "Only fadd/fsub can be the root of an fmuladd.");
+
+ // Check whether this op is marked as fusable.
+ if (!op.FPContractable)
+ return nullptr;
+
+ // Check whether -ffp-contract=on. (If -ffp-contract=off/fast, fusing is
+ // either disabled, or handled entirely by the LLVM backend).
+ if (CGF.CGM.getCodeGenOpts().getFPContractMode() != CodeGenOptions::FPC_On)
+ return nullptr;
+
+ // We have a potentially fusable op. Look for a mul on one of the operands.
+ // Also, make sure that the mul result isn't used directly. In that case,
+ // there's no point creating a muladd operation.
+ if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) {
+ if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
+ LHSBinOp->use_empty())
+ return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
+ }
+ if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(op.RHS)) {
+ if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
+ RHSBinOp->use_empty())
+ return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
+ }
+
+ return nullptr;
+}
+
+Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
+ if (op.LHS->getType()->isPointerTy() ||
+ op.RHS->getType()->isPointerTy())
+ return emitPointerArithmetic(CGF, op, /*subtraction*/ false);
+
+ if (op.Ty->isSignedIntegerOrEnumerationType()) {
+ switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Defined:
+ return Builder.CreateAdd(op.LHS, op.RHS, "add");
+ case LangOptions::SOB_Undefined:
+ if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
+ return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
+ // Fall through.
+ case LangOptions::SOB_Trapping:
+ return EmitOverflowCheckedBinOp(op);
+ }
+ }
+
+ if (op.Ty->isUnsignedIntegerType() &&
+ CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow))
+ return EmitOverflowCheckedBinOp(op);
+
+ if (op.LHS->getType()->isFPOrFPVectorTy()) {
+ // Try to form an fmuladd.
+ if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
+ return FMulAdd;
+
+ return Builder.CreateFAdd(op.LHS, op.RHS, "add");
+ }
+
+ return Builder.CreateAdd(op.LHS, op.RHS, "add");
+}
+
+Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
+ // The LHS is always a pointer if either side is.
+ if (!op.LHS->getType()->isPointerTy()) {
+ if (op.Ty->isSignedIntegerOrEnumerationType()) {
+ switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Defined:
+ return Builder.CreateSub(op.LHS, op.RHS, "sub");
+ case LangOptions::SOB_Undefined:
+ if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
+ return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
+ // Fall through.
+ case LangOptions::SOB_Trapping:
+ return EmitOverflowCheckedBinOp(op);
+ }
+ }
+
+ if (op.Ty->isUnsignedIntegerType() &&
+ CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow))
+ return EmitOverflowCheckedBinOp(op);
+
+ if (op.LHS->getType()->isFPOrFPVectorTy()) {
+ // Try to form an fmuladd.
+ if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
+ return FMulAdd;
+ return Builder.CreateFSub(op.LHS, op.RHS, "sub");
+ }
+
+ return Builder.CreateSub(op.LHS, op.RHS, "sub");
+ }
+
+ // If the RHS is not a pointer, then we have normal pointer
+ // arithmetic.
+ if (!op.RHS->getType()->isPointerTy())
+ return emitPointerArithmetic(CGF, op, /*subtraction*/ true);
+
+ // Otherwise, this is a pointer subtraction.
+
+ // Do the raw subtraction part.
+ llvm::Value *LHS
+ = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
+ llvm::Value *RHS
+ = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
+ Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
+
+ // Okay, figure out the element size.
+ const BinaryOperator *expr = cast<BinaryOperator>(op.E);
+ QualType elementType = expr->getLHS()->getType()->getPointeeType();
+
+ llvm::Value *divisor = nullptr;
+
+ // For a variable-length array, this is going to be non-constant.
+ if (const VariableArrayType *vla
+ = CGF.getContext().getAsVariableArrayType(elementType)) {
+ llvm::Value *numElements;
+ std::tie(numElements, elementType) = CGF.getVLASize(vla);
+
+ divisor = numElements;
+
+ // Scale the number of non-VLA elements by the non-VLA element size.
+ CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
+ if (!eltSize.isOne())
+ divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
+
+ // For everything elese, we can just compute it, safe in the
+ // assumption that Sema won't let anything through that we can't
+ // safely compute the size of.
+ } else {
+ CharUnits elementSize;
+ // Handle GCC extension for pointer arithmetic on void* and
+ // function pointer types.
+ if (elementType->isVoidType() || elementType->isFunctionType())
+ elementSize = CharUnits::One();
+ else
+ elementSize = CGF.getContext().getTypeSizeInChars(elementType);
+
+ // Don't even emit the divide for element size of 1.
+ if (elementSize.isOne())
+ return diffInChars;
+
+ divisor = CGF.CGM.getSize(elementSize);
+ }
+
+ // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
+ // pointer difference in C is only defined in the case where both operands
+ // are pointing to elements of an array.
+ return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
+}
+
+Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) {
+ llvm::IntegerType *Ty;
+ if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
+ Ty = cast<llvm::IntegerType>(VT->getElementType());
+ else
+ Ty = cast<llvm::IntegerType>(LHS->getType());
+ return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1);
+}
+
+Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
+ // LLVM requires the LHS and RHS to be the same type: promote or truncate the
+ // RHS to the same size as the LHS.
+ Value *RHS = Ops.RHS;
+ if (Ops.LHS->getType() != RHS->getType())
+ RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
+
+ bool SanitizeBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
+ Ops.Ty->hasSignedIntegerRepresentation();
+ bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
+ // OpenCL 6.3j: shift values are effectively % word size of LHS.
+ if (CGF.getLangOpts().OpenCL)
+ RHS =
+ Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shl.mask");
+ else if ((SanitizeBase || SanitizeExponent) &&
+ isa<llvm::IntegerType>(Ops.LHS->getType())) {
+ CodeGenFunction::SanitizerScope SanScope(&CGF);
+ SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks;
+ llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, RHS);
+ llvm::Value *ValidExponent = Builder.CreateICmpULE(RHS, WidthMinusOne);
+
+ if (SanitizeExponent) {
+ Checks.push_back(
+ std::make_pair(ValidExponent, SanitizerKind::ShiftExponent));
+ }
+
+ if (SanitizeBase) {
+ // Check whether we are shifting any non-zero bits off the top of the
+ // integer. We only emit this check if exponent is valid - otherwise
+ // instructions below will have undefined behavior themselves.
+ llvm::BasicBlock *Orig = Builder.GetInsertBlock();
+ llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
+ llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
+ Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
+ CGF.EmitBlock(CheckShiftBase);
+ llvm::Value *BitsShiftedOff =
+ Builder.CreateLShr(Ops.LHS,
+ Builder.CreateSub(WidthMinusOne, RHS, "shl.zeros",
+ /*NUW*/true, /*NSW*/true),
+ "shl.check");
+ if (CGF.getLangOpts().CPlusPlus) {
+ // In C99, we are not permitted to shift a 1 bit into the sign bit.
+ // Under C++11's rules, shifting a 1 bit into the sign bit is
+ // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
+ // define signed left shifts, so we use the C99 and C++11 rules there).
+ llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
+ BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
+ }
+ llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
+ llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
+ CGF.EmitBlock(Cont);
+ llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
+ BaseCheck->addIncoming(Builder.getTrue(), Orig);
+ BaseCheck->addIncoming(ValidBase, CheckShiftBase);
+ Checks.push_back(std::make_pair(BaseCheck, SanitizerKind::ShiftBase));
+ }
+
+ assert(!Checks.empty());
+ EmitBinOpCheck(Checks, Ops);
+ }
+
+ return Builder.CreateShl(Ops.LHS, RHS, "shl");
+}
+
+Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
+ // LLVM requires the LHS and RHS to be the same type: promote or truncate the
+ // RHS to the same size as the LHS.
+ Value *RHS = Ops.RHS;
+ if (Ops.LHS->getType() != RHS->getType())
+ RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
+
+ // OpenCL 6.3j: shift values are effectively % word size of LHS.
+ if (CGF.getLangOpts().OpenCL)
+ RHS =
+ Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shr.mask");
+ else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
+ isa<llvm::IntegerType>(Ops.LHS->getType())) {
+ CodeGenFunction::SanitizerScope SanScope(&CGF);
+ llvm::Value *Valid =
+ Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS));
+ EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops);
+ }
+
+ if (Ops.Ty->hasUnsignedIntegerRepresentation())
+ return Builder.CreateLShr(Ops.LHS, RHS, "shr");
+ return Builder.CreateAShr(Ops.LHS, RHS, "shr");
+}
+
+enum IntrinsicType { VCMPEQ, VCMPGT };
+// return corresponding comparison intrinsic for given vector type
+static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
+ BuiltinType::Kind ElemKind) {
+ switch (ElemKind) {
+ default: llvm_unreachable("unexpected element type");
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
+ case BuiltinType::UShort:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
+ case BuiltinType::Short:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
+ case BuiltinType::UInt:
+ case BuiltinType::ULong:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
+ case BuiltinType::Int:
+ case BuiltinType::Long:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
+ case BuiltinType::Float:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
+ }
+}
+
+Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
+ llvm::CmpInst::Predicate UICmpOpc,
+ llvm::CmpInst::Predicate SICmpOpc,
+ llvm::CmpInst::Predicate FCmpOpc) {
+ TestAndClearIgnoreResultAssign();
+ Value *Result;
+ QualType LHSTy = E->getLHS()->getType();
+ QualType RHSTy = E->getRHS()->getType();
+ if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
+ assert(E->getOpcode() == BO_EQ ||
+ E->getOpcode() == BO_NE);
+ Value *LHS = CGF.EmitScalarExpr(E->getLHS());
+ Value *RHS = CGF.EmitScalarExpr(E->getRHS());
+ Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
+ CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
+ } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
+ Value *LHS = Visit(E->getLHS());
+ Value *RHS = Visit(E->getRHS());
+
+ // If AltiVec, the comparison results in a numeric type, so we use
+ // intrinsics comparing vectors and giving 0 or 1 as a result
+ if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
+ // constants for mapping CR6 register bits to predicate result
+ enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
+
+ llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
+
+ // in several cases vector arguments order will be reversed
+ Value *FirstVecArg = LHS,
+ *SecondVecArg = RHS;
+
+ QualType ElTy = LHSTy->getAs<VectorType>()->getElementType();
+ const BuiltinType *BTy = ElTy->getAs<BuiltinType>();
+ BuiltinType::Kind ElementKind = BTy->getKind();
+
+ switch(E->getOpcode()) {
+ default: llvm_unreachable("is not a comparison operation");
+ case BO_EQ:
+ CR6 = CR6_LT;
+ ID = GetIntrinsic(VCMPEQ, ElementKind);
+ break;
+ case BO_NE:
+ CR6 = CR6_EQ;
+ ID = GetIntrinsic(VCMPEQ, ElementKind);
+ break;
+ case BO_LT:
+ CR6 = CR6_LT;
+ ID = GetIntrinsic(VCMPGT, ElementKind);
+ std::swap(FirstVecArg, SecondVecArg);
+ break;
+ case BO_GT:
+ CR6 = CR6_LT;
+ ID = GetIntrinsic(VCMPGT, ElementKind);
+ break;
+ case BO_LE:
+ if (ElementKind == BuiltinType::Float) {
+ CR6 = CR6_LT;
+ ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
+ std::swap(FirstVecArg, SecondVecArg);
+ }
+ else {
+ CR6 = CR6_EQ;
+ ID = GetIntrinsic(VCMPGT, ElementKind);
+ }
+ break;
+ case BO_GE:
+ if (ElementKind == BuiltinType::Float) {
+ CR6 = CR6_LT;
+ ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
+ }
+ else {
+ CR6 = CR6_EQ;
+ ID = GetIntrinsic(VCMPGT, ElementKind);
+ std::swap(FirstVecArg, SecondVecArg);
+ }
+ break;
+ }
+
+ Value *CR6Param = Builder.getInt32(CR6);
+ llvm::Function *F = CGF.CGM.getIntrinsic(ID);
+ Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
+ return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
+ E->getExprLoc());
+ }
+
+ if (LHS->getType()->isFPOrFPVectorTy()) {
+ Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
+ } else if (LHSTy->hasSignedIntegerRepresentation()) {
+ Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
+ } else {
+ // Unsigned integers and pointers.
+ Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
+ }
+
+ // If this is a vector comparison, sign extend the result to the appropriate
+ // vector integer type and return it (don't convert to bool).
+ if (LHSTy->isVectorType())
+ return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
+
+ } else {
+ // Complex Comparison: can only be an equality comparison.
+ CodeGenFunction::ComplexPairTy LHS, RHS;
+ QualType CETy;
+ if (auto *CTy = LHSTy->getAs<ComplexType>()) {
+ LHS = CGF.EmitComplexExpr(E->getLHS());
+ CETy = CTy->getElementType();
+ } else {
+ LHS.first = Visit(E->getLHS());
+ LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
+ CETy = LHSTy;
+ }
+ if (auto *CTy = RHSTy->getAs<ComplexType>()) {
+ RHS = CGF.EmitComplexExpr(E->getRHS());
+ assert(CGF.getContext().hasSameUnqualifiedType(CETy,
+ CTy->getElementType()) &&
+ "The element types must always match.");
+ (void)CTy;
+ } else {
+ RHS.first = Visit(E->getRHS());
+ RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
+ assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
+ "The element types must always match.");
+ }
+
+ Value *ResultR, *ResultI;
+ if (CETy->isRealFloatingType()) {
+ ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
+ ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
+ } else {
+ // Complex comparisons can only be equality comparisons. As such, signed
+ // and unsigned opcodes are the same.
+ ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
+ ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
+ }
+
+ if (E->getOpcode() == BO_EQ) {
+ Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
+ } else {
+ assert(E->getOpcode() == BO_NE &&
+ "Complex comparison other than == or != ?");
+ Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
+ }
+ }
+
+ return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
+ E->getExprLoc());
+}
+
+Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
+ bool Ignore = TestAndClearIgnoreResultAssign();
+
+ Value *RHS;
+ LValue LHS;
+
+ switch (E->getLHS()->getType().getObjCLifetime()) {
+ case Qualifiers::OCL_Strong:
+ std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
+ break;
+
+ case Qualifiers::OCL_Autoreleasing:
+ std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
+ break;
+
+ case Qualifiers::OCL_Weak:
+ RHS = Visit(E->getRHS());
+ LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
+ RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);
+ break;
+
+ // No reason to do any of these differently.
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ // __block variables need to have the rhs evaluated first, plus
+ // this should improve codegen just a little.
+ RHS = Visit(E->getRHS());
+ LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
+
+ // Store the value into the LHS. Bit-fields are handled specially
+ // because the result is altered by the store, i.e., [C99 6.5.16p1]
+ // 'An assignment expression has the value of the left operand after
+ // the assignment...'.
+ if (LHS.isBitField())
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
+ else
+ CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
+ }
+
+ // If the result is clearly ignored, return now.
+ if (Ignore)
+ return nullptr;
+
+ // The result of an assignment in C is the assigned r-value.
+ if (!CGF.getLangOpts().CPlusPlus)
+ return RHS;
+
+ // If the lvalue is non-volatile, return the computed value of the assignment.
+ if (!LHS.isVolatileQualified())
+ return RHS;
+
+ // Otherwise, reload the value.
+ return EmitLoadOfLValue(LHS, E->getExprLoc());
+}
+
+Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
+ // Perform vector logical and on comparisons with zero vectors.
+ if (E->getType()->isVectorType()) {
+ CGF.incrementProfileCounter(E);
+
+ Value *LHS = Visit(E->getLHS());
+ Value *RHS = Visit(E->getRHS());
+ Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
+ if (LHS->getType()->isFPOrFPVectorTy()) {
+ LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
+ RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
+ } else {
+ LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
+ RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
+ }
+ Value *And = Builder.CreateAnd(LHS, RHS);
+ return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
+ }
+
+ llvm::Type *ResTy = ConvertType(E->getType());
+
+ // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
+ // If we have 1 && X, just emit X without inserting the control flow.
+ bool LHSCondVal;
+ if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
+ if (LHSCondVal) { // If we have 1 && X, just emit X.
+ CGF.incrementProfileCounter(E);
+
+ Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+ // ZExt result to int or bool.
+ return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
+ }
+
+ // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
+ if (!CGF.ContainsLabel(E->getRHS()))
+ return llvm::Constant::getNullValue(ResTy);
+ }
+
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
+
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
+
+ // Branch on the LHS first. If it is false, go to the failure (cont) block.
+ CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock,
+ CGF.getProfileCount(E->getRHS()));
+
+ // Any edges into the ContBlock are now from an (indeterminate number of)
+ // edges from this first condition. All of these values will be false. Start
+ // setting up the PHI node in the Cont Block for this.
+ llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
+ "", ContBlock);
+ for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
+ PI != PE; ++PI)
+ PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
+
+ eval.begin(CGF);
+ CGF.EmitBlock(RHSBlock);
+ CGF.incrementProfileCounter(E);
+ Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+ eval.end(CGF);
+
+ // Reaquire the RHS block, as there may be subblocks inserted.
+ RHSBlock = Builder.GetInsertBlock();
+
+ // Emit an unconditional branch from this block to ContBlock.
+ {
+ // There is no need to emit line number for unconditional branch.
+ auto NL = ApplyDebugLocation::CreateEmpty(CGF);
+ CGF.EmitBlock(ContBlock);
+ }
+ // Insert an entry into the phi node for the edge with the value of RHSCond.
+ PN->addIncoming(RHSCond, RHSBlock);
+
+ // ZExt result to int.
+ return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
+}
+
+Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
+ // Perform vector logical or on comparisons with zero vectors.
+ if (E->getType()->isVectorType()) {
+ CGF.incrementProfileCounter(E);
+
+ Value *LHS = Visit(E->getLHS());
+ Value *RHS = Visit(E->getRHS());
+ Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
+ if (LHS->getType()->isFPOrFPVectorTy()) {
+ LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
+ RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
+ } else {
+ LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
+ RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
+ }
+ Value *Or = Builder.CreateOr(LHS, RHS);
+ return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
+ }
+
+ llvm::Type *ResTy = ConvertType(E->getType());
+
+ // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
+ // If we have 0 || X, just emit X without inserting the control flow.
+ bool LHSCondVal;
+ if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
+ if (!LHSCondVal) { // If we have 0 || X, just emit X.
+ CGF.incrementProfileCounter(E);
+
+ Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+ // ZExt result to int or bool.
+ return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
+ }
+
+ // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
+ if (!CGF.ContainsLabel(E->getRHS()))
+ return llvm::ConstantInt::get(ResTy, 1);
+ }
+
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
+
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
+
+ // Branch on the LHS first. If it is true, go to the success (cont) block.
+ CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock,
+ CGF.getCurrentProfileCount() -
+ CGF.getProfileCount(E->getRHS()));
+
+ // Any edges into the ContBlock are now from an (indeterminate number of)
+ // edges from this first condition. All of these values will be true. Start
+ // setting up the PHI node in the Cont Block for this.
+ llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
+ "", ContBlock);
+ for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
+ PI != PE; ++PI)
+ PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
+
+ eval.begin(CGF);
+
+ // Emit the RHS condition as a bool value.
+ CGF.EmitBlock(RHSBlock);
+ CGF.incrementProfileCounter(E);
+ Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+
+ eval.end(CGF);
+
+ // Reaquire the RHS block, as there may be subblocks inserted.
+ RHSBlock = Builder.GetInsertBlock();
+
+ // Emit an unconditional branch from this block to ContBlock. Insert an entry
+ // into the phi node for the edge with the value of RHSCond.
+ CGF.EmitBlock(ContBlock);
+ PN->addIncoming(RHSCond, RHSBlock);
+
+ // ZExt result to int.
+ return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
+}
+
+Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
+ CGF.EmitIgnoredExpr(E->getLHS());
+ CGF.EnsureInsertPoint();
+ return Visit(E->getRHS());
+}
+
+//===----------------------------------------------------------------------===//
+// Other Operators
+//===----------------------------------------------------------------------===//
+
+/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
+/// expression is cheap enough and side-effect-free enough to evaluate
+/// unconditionally instead of conditionally. This is used to convert control
+/// flow into selects in some cases.
+static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
+ CodeGenFunction &CGF) {
+ // Anything that is an integer or floating point constant is fine.
+ return E->IgnoreParens()->isEvaluatable(CGF.getContext());
+
+ // Even non-volatile automatic variables can't be evaluated unconditionally.
+ // Referencing a thread_local may cause non-trivial initialization work to
+ // occur. If we're inside a lambda and one of the variables is from the scope
+ // outside the lambda, that function may have returned already. Reading its
+ // locals is a bad idea. Also, these reads may introduce races there didn't
+ // exist in the source-level program.
+}
+
+
+Value *ScalarExprEmitter::
+VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
+ TestAndClearIgnoreResultAssign();
+
+ // Bind the common expression if necessary.
+ CodeGenFunction::OpaqueValueMapping binding(CGF, E);
+
+ Expr *condExpr = E->getCond();
+ Expr *lhsExpr = E->getTrueExpr();
+ Expr *rhsExpr = E->getFalseExpr();
+
+ // If the condition constant folds and can be elided, try to avoid emitting
+ // the condition and the dead arm.
+ bool CondExprBool;
+ if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
+ Expr *live = lhsExpr, *dead = rhsExpr;
+ if (!CondExprBool) std::swap(live, dead);
+
+ // If the dead side doesn't have labels we need, just emit the Live part.
+ if (!CGF.ContainsLabel(dead)) {
+ if (CondExprBool)
+ CGF.incrementProfileCounter(E);
+ Value *Result = Visit(live);
+
+ // If the live part is a throw expression, it acts like it has a void
+ // type, so evaluating it returns a null Value*. However, a conditional
+ // with non-void type must return a non-null Value*.
+ if (!Result && !E->getType()->isVoidType())
+ Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
+
+ return Result;
+ }
+ }
+
+ // OpenCL: If the condition is a vector, we can treat this condition like
+ // the select function.
+ if (CGF.getLangOpts().OpenCL
+ && condExpr->getType()->isVectorType()) {
+ CGF.incrementProfileCounter(E);
+
+ llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
+ llvm::Value *LHS = Visit(lhsExpr);
+ llvm::Value *RHS = Visit(rhsExpr);
+
+ llvm::Type *condType = ConvertType(condExpr->getType());
+ llvm::VectorType *vecTy = cast<llvm::VectorType>(condType);
+
+ unsigned numElem = vecTy->getNumElements();
+ llvm::Type *elemType = vecTy->getElementType();
+
+ llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
+ llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
+ llvm::Value *tmp = Builder.CreateSExt(TestMSB,
+ llvm::VectorType::get(elemType,
+ numElem),
+ "sext");
+ llvm::Value *tmp2 = Builder.CreateNot(tmp);
+
+ // Cast float to int to perform ANDs if necessary.
+ llvm::Value *RHSTmp = RHS;
+ llvm::Value *LHSTmp = LHS;
+ bool wasCast = false;
+ llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
+ if (rhsVTy->getElementType()->isFloatingPointTy()) {
+ RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
+ LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
+ wasCast = true;
+ }
+
+ llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
+ llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
+ llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
+ if (wasCast)
+ tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
+
+ return tmp5;
+ }
+
+ // If this is a really simple expression (like x ? 4 : 5), emit this as a
+ // select instead of as control flow. We can only do this if it is cheap and
+ // safe to evaluate the LHS and RHS unconditionally.
+ if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
+ isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) {
+ CGF.incrementProfileCounter(E);
+
+ llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
+ llvm::Value *LHS = Visit(lhsExpr);
+ llvm::Value *RHS = Visit(rhsExpr);
+ if (!LHS) {
+ // If the conditional has void type, make sure we return a null Value*.
+ assert(!RHS && "LHS and RHS types must match");
+ return nullptr;
+ }
+ return Builder.CreateSelect(CondV, LHS, RHS, "cond");
+ }
+
+ llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
+
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
+ CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,
+ CGF.getProfileCount(lhsExpr));
+
+ CGF.EmitBlock(LHSBlock);
+ CGF.incrementProfileCounter(E);
+ eval.begin(CGF);
+ Value *LHS = Visit(lhsExpr);
+ eval.end(CGF);
+
+ LHSBlock = Builder.GetInsertBlock();
+ Builder.CreateBr(ContBlock);
+
+ CGF.EmitBlock(RHSBlock);
+ eval.begin(CGF);
+ Value *RHS = Visit(rhsExpr);
+ eval.end(CGF);
+
+ RHSBlock = Builder.GetInsertBlock();
+ CGF.EmitBlock(ContBlock);
+
+ // If the LHS or RHS is a throw expression, it will be legitimately null.
+ if (!LHS)
+ return RHS;
+ if (!RHS)
+ return LHS;
+
+ // Create a PHI node for the real part.
+ llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
+ PN->addIncoming(LHS, LHSBlock);
+ PN->addIncoming(RHS, RHSBlock);
+ return PN;
+}
+
+Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
+ return Visit(E->getChosenSubExpr());
+}
+
+Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
+ QualType Ty = VE->getType();
+
+ if (Ty->isVariablyModifiedType())
+ CGF.EmitVariablyModifiedType(Ty);
+
+ Address ArgValue = Address::invalid();
+ Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
+
+ llvm::Type *ArgTy = ConvertType(VE->getType());
+
+ // If EmitVAArg fails, we fall back to the LLVM instruction.
+ if (!ArgPtr.isValid())
+ return Builder.CreateVAArg(ArgValue.getPointer(), ArgTy);
+
+ // FIXME Volatility.
+ llvm::Value *Val = Builder.CreateLoad(ArgPtr);
+
+ // If EmitVAArg promoted the type, we must truncate it.
+ if (ArgTy != Val->getType()) {
+ if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy())
+ Val = Builder.CreateIntToPtr(Val, ArgTy);
+ else
+ Val = Builder.CreateTrunc(Val, ArgTy);
+ }
+
+ return Val;
+}
+
+Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
+ return CGF.EmitBlockLiteral(block);
+}
+
+Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
+ Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
+ llvm::Type *DstTy = ConvertType(E->getType());
+
+ // Going from vec4->vec3 or vec3->vec4 is a special case and requires
+ // a shuffle vector instead of a bitcast.
+ llvm::Type *SrcTy = Src->getType();
+ if (isa<llvm::VectorType>(DstTy) && isa<llvm::VectorType>(SrcTy)) {
+ unsigned numElementsDst = cast<llvm::VectorType>(DstTy)->getNumElements();
+ unsigned numElementsSrc = cast<llvm::VectorType>(SrcTy)->getNumElements();
+ if ((numElementsDst == 3 && numElementsSrc == 4)
+ || (numElementsDst == 4 && numElementsSrc == 3)) {
+
+
+ // In the case of going from int4->float3, a bitcast is needed before
+ // doing a shuffle.
+ llvm::Type *srcElemTy =
+ cast<llvm::VectorType>(SrcTy)->getElementType();
+ llvm::Type *dstElemTy =
+ cast<llvm::VectorType>(DstTy)->getElementType();
+
+ if ((srcElemTy->isIntegerTy() && dstElemTy->isFloatTy())
+ || (srcElemTy->isFloatTy() && dstElemTy->isIntegerTy())) {
+ // Create a float type of the same size as the source or destination.
+ llvm::VectorType *newSrcTy = llvm::VectorType::get(dstElemTy,
+ numElementsSrc);
+
+ Src = Builder.CreateBitCast(Src, newSrcTy, "astypeCast");
+ }
+
+ llvm::Value *UnV = llvm::UndefValue::get(Src->getType());
+
+ SmallVector<llvm::Constant*, 3> Args;
+ Args.push_back(Builder.getInt32(0));
+ Args.push_back(Builder.getInt32(1));
+ Args.push_back(Builder.getInt32(2));
+
+ if (numElementsDst == 4)
+ Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
+
+ llvm::Constant *Mask = llvm::ConstantVector::get(Args);
+
+ return Builder.CreateShuffleVector(Src, UnV, Mask, "astype");
+ }
+ }
+
+ return Builder.CreateBitCast(Src, DstTy, "astype");
+}
+
+Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
+ return CGF.EmitAtomicExpr(E).getScalarVal();
+}
+
+//===----------------------------------------------------------------------===//
+// Entry Point into this File
+//===----------------------------------------------------------------------===//
+
+/// Emit the computation of the specified expression of scalar type, ignoring
+/// the result.
+Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
+ assert(E && hasScalarEvaluationKind(E->getType()) &&
+ "Invalid scalar expression to emit");
+
+ return ScalarExprEmitter(*this, IgnoreResultAssign)
+ .Visit(const_cast<Expr *>(E));
+}
+
+/// Emit a conversion from the specified type to the specified destination type,
+/// both of which are LLVM scalar types.
+Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
+ QualType DstTy,
+ SourceLocation Loc) {
+ assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
+ "Invalid scalar expression to emit");
+ return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);
+}
+
+/// Emit a conversion from the specified complex type to the specified
+/// destination type, where the destination type is an LLVM scalar type.
+Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
+ QualType SrcTy,
+ QualType DstTy,
+ SourceLocation Loc) {
+ assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
+ "Invalid complex -> scalar conversion");
+ return ScalarExprEmitter(*this)
+ .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
+}
+
+
+llvm::Value *CodeGenFunction::
+EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre) {
+ return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
+}
+
+LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
+ // object->isa or (*object).isa
+ // Generate code as for: *(Class*)object
+
+ Expr *BaseExpr = E->getBase();
+ Address Addr = Address::invalid();
+ if (BaseExpr->isRValue()) {
+ Addr = Address(EmitScalarExpr(BaseExpr), getPointerAlign());
+ } else {
+ Addr = EmitLValue(BaseExpr).getAddress();
+ }
+
+ // Cast the address to Class*.
+ Addr = Builder.CreateElementBitCast(Addr, ConvertType(E->getType()));
+ return MakeAddrLValue(Addr, E->getType());
+}
+
+
+LValue CodeGenFunction::EmitCompoundAssignmentLValue(
+ const CompoundAssignOperator *E) {
+ ScalarExprEmitter Scalar(*this);
+ Value *Result = nullptr;
+ switch (E->getOpcode()) {
+#define COMPOUND_OP(Op) \
+ case BO_##Op##Assign: \
+ return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
+ Result)
+ COMPOUND_OP(Mul);
+ COMPOUND_OP(Div);
+ COMPOUND_OP(Rem);
+ COMPOUND_OP(Add);
+ COMPOUND_OP(Sub);
+ COMPOUND_OP(Shl);
+ COMPOUND_OP(Shr);
+ COMPOUND_OP(And);
+ COMPOUND_OP(Xor);
+ COMPOUND_OP(Or);
+#undef COMPOUND_OP
+
+ case BO_PtrMemD:
+ case BO_PtrMemI:
+ case BO_Mul:
+ case BO_Div:
+ case BO_Rem:
+ case BO_Add:
+ case BO_Sub:
+ case BO_Shl:
+ case BO_Shr:
+ case BO_LT:
+ case BO_GT:
+ case BO_LE:
+ case BO_GE:
+ case BO_EQ:
+ case BO_NE:
+ case BO_And:
+ case BO_Xor:
+ case BO_Or:
+ case BO_LAnd:
+ case BO_LOr:
+ case BO_Assign:
+ case BO_Comma:
+ llvm_unreachable("Not valid compound assignment operators");
+ }
+
+ llvm_unreachable("Unhandled compound assignment operator");
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGLoopInfo.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGLoopInfo.cpp
new file mode 100644
index 0000000..0afe7db
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGLoopInfo.cpp
@@ -0,0 +1,248 @@
+//===---- CGLoopInfo.cpp - LLVM CodeGen for loop metadata -*- C++ -*-------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGLoopInfo.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/Sema/LoopHint.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Metadata.h"
+using namespace clang::CodeGen;
+using namespace llvm;
+
+static MDNode *createMetadata(LLVMContext &Ctx, const LoopAttributes &Attrs) {
+
+ if (!Attrs.IsParallel && Attrs.VectorizeWidth == 0 &&
+ Attrs.InterleaveCount == 0 && Attrs.UnrollCount == 0 &&
+ Attrs.VectorizeEnable == LoopAttributes::Unspecified &&
+ Attrs.UnrollEnable == LoopAttributes::Unspecified)
+ return nullptr;
+
+ SmallVector<Metadata *, 4> Args;
+ // Reserve operand 0 for loop id self reference.
+ auto TempNode = MDNode::getTemporary(Ctx, None);
+ Args.push_back(TempNode.get());
+
+ // Setting vectorize.width
+ if (Attrs.VectorizeWidth > 0) {
+ Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.vectorize.width"),
+ ConstantAsMetadata::get(ConstantInt::get(
+ Type::getInt32Ty(Ctx), Attrs.VectorizeWidth))};
+ Args.push_back(MDNode::get(Ctx, Vals));
+ }
+
+ // Setting interleave.count
+ if (Attrs.InterleaveCount > 0) {
+ Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.interleave.count"),
+ ConstantAsMetadata::get(ConstantInt::get(
+ Type::getInt32Ty(Ctx), Attrs.InterleaveCount))};
+ Args.push_back(MDNode::get(Ctx, Vals));
+ }
+
+ // Setting interleave.count
+ if (Attrs.UnrollCount > 0) {
+ Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.unroll.count"),
+ ConstantAsMetadata::get(ConstantInt::get(
+ Type::getInt32Ty(Ctx), Attrs.UnrollCount))};
+ Args.push_back(MDNode::get(Ctx, Vals));
+ }
+
+ // Setting vectorize.enable
+ if (Attrs.VectorizeEnable != LoopAttributes::Unspecified) {
+ Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.vectorize.enable"),
+ ConstantAsMetadata::get(ConstantInt::get(
+ Type::getInt1Ty(Ctx), (Attrs.VectorizeEnable ==
+ LoopAttributes::Enable)))};
+ Args.push_back(MDNode::get(Ctx, Vals));
+ }
+
+ // Setting unroll.full or unroll.disable
+ if (Attrs.UnrollEnable != LoopAttributes::Unspecified) {
+ std::string Name;
+ if (Attrs.UnrollEnable == LoopAttributes::Enable)
+ Name = "llvm.loop.unroll.enable";
+ else if (Attrs.UnrollEnable == LoopAttributes::Full)
+ Name = "llvm.loop.unroll.full";
+ else
+ Name = "llvm.loop.unroll.disable";
+ Metadata *Vals[] = {MDString::get(Ctx, Name)};
+ Args.push_back(MDNode::get(Ctx, Vals));
+ }
+
+ // Set the first operand to itself.
+ MDNode *LoopID = MDNode::get(Ctx, Args);
+ LoopID->replaceOperandWith(0, LoopID);
+ return LoopID;
+}
+
+LoopAttributes::LoopAttributes(bool IsParallel)
+ : IsParallel(IsParallel), VectorizeEnable(LoopAttributes::Unspecified),
+ UnrollEnable(LoopAttributes::Unspecified), VectorizeWidth(0),
+ InterleaveCount(0), UnrollCount(0) {}
+
+void LoopAttributes::clear() {
+ IsParallel = false;
+ VectorizeWidth = 0;
+ InterleaveCount = 0;
+ UnrollCount = 0;
+ VectorizeEnable = LoopAttributes::Unspecified;
+ UnrollEnable = LoopAttributes::Unspecified;
+}
+
+LoopInfo::LoopInfo(BasicBlock *Header, const LoopAttributes &Attrs)
+ : LoopID(nullptr), Header(Header), Attrs(Attrs) {
+ LoopID = createMetadata(Header->getContext(), Attrs);
+}
+
+void LoopInfoStack::push(BasicBlock *Header) {
+ Active.push_back(LoopInfo(Header, StagedAttrs));
+ // Clear the attributes so nested loops do not inherit them.
+ StagedAttrs.clear();
+}
+
+void LoopInfoStack::push(BasicBlock *Header, clang::ASTContext &Ctx,
+ ArrayRef<const clang::Attr *> Attrs) {
+
+ // Identify loop hint attributes from Attrs.
+ for (const auto *Attr : Attrs) {
+ const LoopHintAttr *LH = dyn_cast<LoopHintAttr>(Attr);
+
+ // Skip non loop hint attributes
+ if (!LH)
+ continue;
+
+ auto *ValueExpr = LH->getValue();
+ unsigned ValueInt = 1;
+ if (ValueExpr) {
+ llvm::APSInt ValueAPS = ValueExpr->EvaluateKnownConstInt(Ctx);
+ ValueInt = ValueAPS.getSExtValue();
+ }
+
+ LoopHintAttr::OptionType Option = LH->getOption();
+ LoopHintAttr::LoopHintState State = LH->getState();
+ switch (State) {
+ case LoopHintAttr::Disable:
+ switch (Option) {
+ case LoopHintAttr::Vectorize:
+ // Disable vectorization by specifying a width of 1.
+ setVectorizeWidth(1);
+ break;
+ case LoopHintAttr::Interleave:
+ // Disable interleaving by speciyfing a count of 1.
+ setInterleaveCount(1);
+ break;
+ case LoopHintAttr::Unroll:
+ setUnrollState(LoopAttributes::Disable);
+ break;
+ case LoopHintAttr::UnrollCount:
+ case LoopHintAttr::VectorizeWidth:
+ case LoopHintAttr::InterleaveCount:
+ llvm_unreachable("Options cannot be disabled.");
+ break;
+ }
+ break;
+ case LoopHintAttr::Enable:
+ switch (Option) {
+ case LoopHintAttr::Vectorize:
+ case LoopHintAttr::Interleave:
+ setVectorizeEnable(true);
+ break;
+ case LoopHintAttr::Unroll:
+ setUnrollState(LoopAttributes::Enable);
+ break;
+ case LoopHintAttr::UnrollCount:
+ case LoopHintAttr::VectorizeWidth:
+ case LoopHintAttr::InterleaveCount:
+ llvm_unreachable("Options cannot enabled.");
+ break;
+ }
+ break;
+ case LoopHintAttr::AssumeSafety:
+ switch (Option) {
+ case LoopHintAttr::Vectorize:
+ case LoopHintAttr::Interleave:
+ // Apply "llvm.mem.parallel_loop_access" metadata to load/stores.
+ setParallel(true);
+ setVectorizeEnable(true);
+ break;
+ case LoopHintAttr::Unroll:
+ case LoopHintAttr::UnrollCount:
+ case LoopHintAttr::VectorizeWidth:
+ case LoopHintAttr::InterleaveCount:
+ llvm_unreachable("Options cannot be used to assume mem safety.");
+ break;
+ }
+ break;
+ case LoopHintAttr::Full:
+ switch (Option) {
+ case LoopHintAttr::Unroll:
+ setUnrollState(LoopAttributes::Full);
+ break;
+ case LoopHintAttr::Vectorize:
+ case LoopHintAttr::Interleave:
+ case LoopHintAttr::UnrollCount:
+ case LoopHintAttr::VectorizeWidth:
+ case LoopHintAttr::InterleaveCount:
+ llvm_unreachable("Options cannot be used with 'full' hint.");
+ break;
+ }
+ break;
+ case LoopHintAttr::Numeric:
+ switch (Option) {
+ case LoopHintAttr::VectorizeWidth:
+ setVectorizeWidth(ValueInt);
+ break;
+ case LoopHintAttr::InterleaveCount:
+ setInterleaveCount(ValueInt);
+ break;
+ case LoopHintAttr::UnrollCount:
+ setUnrollCount(ValueInt);
+ break;
+ case LoopHintAttr::Unroll:
+ case LoopHintAttr::Vectorize:
+ case LoopHintAttr::Interleave:
+ llvm_unreachable("Options cannot be assigned a value.");
+ break;
+ }
+ break;
+ }
+ }
+
+ /// Stage the attributes.
+ push(Header);
+}
+
+void LoopInfoStack::pop() {
+ assert(!Active.empty() && "No active loops to pop");
+ Active.pop_back();
+}
+
+void LoopInfoStack::InsertHelper(Instruction *I) const {
+ if (!hasInfo())
+ return;
+
+ const LoopInfo &L = getInfo();
+ if (!L.getLoopID())
+ return;
+
+ if (TerminatorInst *TI = dyn_cast<TerminatorInst>(I)) {
+ for (unsigned i = 0, ie = TI->getNumSuccessors(); i < ie; ++i)
+ if (TI->getSuccessor(i) == L.getHeader()) {
+ TI->setMetadata("llvm.loop", L.getLoopID());
+ break;
+ }
+ return;
+ }
+
+ if (L.getAttributes().IsParallel && I->mayReadOrWriteMemory())
+ I->setMetadata("llvm.mem.parallel_loop_access", L.getLoopID());
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGLoopInfo.h b/contrib/llvm/tools/clang/lib/CodeGen/CGLoopInfo.h
new file mode 100644
index 0000000..ec33906
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGLoopInfo.h
@@ -0,0 +1,158 @@
+//===---- CGLoopInfo.h - LLVM CodeGen for loop metadata -*- C++ -*---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the internal state used for llvm translation for loop statement
+// metadata.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGLOOPINFO_H
+#define LLVM_CLANG_LIB_CODEGEN_CGLOOPINFO_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+class BasicBlock;
+class Instruction;
+class MDNode;
+} // end namespace llvm
+
+namespace clang {
+class Attr;
+class ASTContext;
+namespace CodeGen {
+
+/// \brief Attributes that may be specified on loops.
+struct LoopAttributes {
+ explicit LoopAttributes(bool IsParallel = false);
+ void clear();
+
+ /// \brief Generate llvm.loop.parallel metadata for loads and stores.
+ bool IsParallel;
+
+ /// \brief State of loop vectorization or unrolling.
+ enum LVEnableState { Unspecified, Enable, Disable, Full };
+
+ /// \brief Value for llvm.loop.vectorize.enable metadata.
+ LVEnableState VectorizeEnable;
+
+ /// \brief Value for llvm.loop.unroll.* metadata (enable, disable, or full).
+ LVEnableState UnrollEnable;
+
+ /// \brief Value for llvm.loop.vectorize.width metadata.
+ unsigned VectorizeWidth;
+
+ /// \brief Value for llvm.loop.interleave.count metadata.
+ unsigned InterleaveCount;
+
+ /// \brief llvm.unroll.
+ unsigned UnrollCount;
+};
+
+/// \brief Information used when generating a structured loop.
+class LoopInfo {
+public:
+ /// \brief Construct a new LoopInfo for the loop with entry Header.
+ LoopInfo(llvm::BasicBlock *Header, const LoopAttributes &Attrs);
+
+ /// \brief Get the loop id metadata for this loop.
+ llvm::MDNode *getLoopID() const { return LoopID; }
+
+ /// \brief Get the header block of this loop.
+ llvm::BasicBlock *getHeader() const { return Header; }
+
+ /// \brief Get the set of attributes active for this loop.
+ const LoopAttributes &getAttributes() const { return Attrs; }
+
+private:
+ /// \brief Loop ID metadata.
+ llvm::MDNode *LoopID;
+ /// \brief Header block of this loop.
+ llvm::BasicBlock *Header;
+ /// \brief The attributes for this loop.
+ LoopAttributes Attrs;
+};
+
+/// \brief A stack of loop information corresponding to loop nesting levels.
+/// This stack can be used to prepare attributes which are applied when a loop
+/// is emitted.
+class LoopInfoStack {
+ LoopInfoStack(const LoopInfoStack &) = delete;
+ void operator=(const LoopInfoStack &) = delete;
+
+public:
+ LoopInfoStack() {}
+
+ /// \brief Begin a new structured loop. The set of staged attributes will be
+ /// applied to the loop and then cleared.
+ void push(llvm::BasicBlock *Header);
+
+ /// \brief Begin a new structured loop. Stage attributes from the Attrs list.
+ /// The staged attributes are applied to the loop and then cleared.
+ void push(llvm::BasicBlock *Header, clang::ASTContext &Ctx,
+ llvm::ArrayRef<const Attr *> Attrs);
+
+ /// \brief End the current loop.
+ void pop();
+
+ /// \brief Return the top loop id metadata.
+ llvm::MDNode *getCurLoopID() const { return getInfo().getLoopID(); }
+
+ /// \brief Return true if the top loop is parallel.
+ bool getCurLoopParallel() const {
+ return hasInfo() ? getInfo().getAttributes().IsParallel : false;
+ }
+
+ /// \brief Function called by the CodeGenFunction when an instruction is
+ /// created.
+ void InsertHelper(llvm::Instruction *I) const;
+
+ /// \brief Set the next pushed loop as parallel.
+ void setParallel(bool Enable = true) { StagedAttrs.IsParallel = Enable; }
+
+ /// \brief Set the next pushed loop 'vectorize.enable'
+ void setVectorizeEnable(bool Enable = true) {
+ StagedAttrs.VectorizeEnable =
+ Enable ? LoopAttributes::Enable : LoopAttributes::Disable;
+ }
+
+ /// \brief Set the next pushed loop unroll state.
+ void setUnrollState(const LoopAttributes::LVEnableState &State) {
+ StagedAttrs.UnrollEnable = State;
+ }
+
+ /// \brief Set the vectorize width for the next loop pushed.
+ void setVectorizeWidth(unsigned W) { StagedAttrs.VectorizeWidth = W; }
+
+ /// \brief Set the interleave count for the next loop pushed.
+ void setInterleaveCount(unsigned C) { StagedAttrs.InterleaveCount = C; }
+
+ /// \brief Set the unroll count for the next loop pushed.
+ void setUnrollCount(unsigned C) { StagedAttrs.UnrollCount = C; }
+
+private:
+ /// \brief Returns true if there is LoopInfo on the stack.
+ bool hasInfo() const { return !Active.empty(); }
+ /// \brief Return the LoopInfo for the current loop. HasInfo should be called
+ /// first to ensure LoopInfo is present.
+ const LoopInfo &getInfo() const { return Active.back(); }
+ /// \brief The set of attributes that will be applied to the next pushed loop.
+ LoopAttributes StagedAttrs;
+ /// \brief Stack of active loops.
+ llvm::SmallVector<LoopInfo, 4> Active;
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp
new file mode 100644
index 0000000..2d5991b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp
@@ -0,0 +1,3103 @@
+//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Objective-C code as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CGObjCRuntime.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "TargetInfo.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/CodeGen/CGFunctionInfo.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/InlineAsm.h"
+using namespace clang;
+using namespace CodeGen;
+
+typedef llvm::PointerIntPair<llvm::Value*,1,bool> TryEmitResult;
+static TryEmitResult
+tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e);
+static RValue AdjustObjCObjectType(CodeGenFunction &CGF,
+ QualType ET,
+ RValue Result);
+
+/// Given the address of a variable of pointer type, find the correct
+/// null to store into it.
+static llvm::Constant *getNullForVariable(Address addr) {
+ llvm::Type *type = addr.getElementType();
+ return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(type));
+}
+
+/// Emits an instance of NSConstantString representing the object.
+llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
+{
+ llvm::Constant *C =
+ CGM.getObjCRuntime().GenerateConstantString(E->getString()).getPointer();
+ // FIXME: This bitcast should just be made an invariant on the Runtime.
+ return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
+}
+
+/// EmitObjCBoxedExpr - This routine generates code to call
+/// the appropriate expression boxing method. This will either be
+/// one of +[NSNumber numberWith<Type>:], or +[NSString stringWithUTF8String:],
+/// or [NSValue valueWithBytes:objCType:].
+///
+llvm::Value *
+CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) {
+ // Generate the correct selector for this literal's concrete type.
+ // Get the method.
+ const ObjCMethodDecl *BoxingMethod = E->getBoxingMethod();
+ const Expr *SubExpr = E->getSubExpr();
+ assert(BoxingMethod && "BoxingMethod is null");
+ assert(BoxingMethod->isClassMethod() && "BoxingMethod must be a class method");
+ Selector Sel = BoxingMethod->getSelector();
+
+ // Generate a reference to the class pointer, which will be the receiver.
+ // Assumes that the method was introduced in the class that should be
+ // messaged (avoids pulling it out of the result type).
+ CGObjCRuntime &Runtime = CGM.getObjCRuntime();
+ const ObjCInterfaceDecl *ClassDecl = BoxingMethod->getClassInterface();
+ llvm::Value *Receiver = Runtime.GetClass(*this, ClassDecl);
+
+ CallArgList Args;
+ const ParmVarDecl *ArgDecl = *BoxingMethod->param_begin();
+ QualType ArgQT = ArgDecl->getType().getUnqualifiedType();
+
+ // ObjCBoxedExpr supports boxing of structs and unions
+ // via [NSValue valueWithBytes:objCType:]
+ const QualType ValueType(SubExpr->getType().getCanonicalType());
+ if (ValueType->isObjCBoxableRecordType()) {
+ // Emit CodeGen for first parameter
+ // and cast value to correct type
+ Address Temporary = CreateMemTemp(SubExpr->getType());
+ EmitAnyExprToMem(SubExpr, Temporary, Qualifiers(), /*isInit*/ true);
+ Address BitCast = Builder.CreateBitCast(Temporary, ConvertType(ArgQT));
+ Args.add(RValue::get(BitCast.getPointer()), ArgQT);
+
+ // Create char array to store type encoding
+ std::string Str;
+ getContext().getObjCEncodingForType(ValueType, Str);
+ llvm::Constant *GV = CGM.GetAddrOfConstantCString(Str).getPointer();
+
+ // Cast type encoding to correct type
+ const ParmVarDecl *EncodingDecl = BoxingMethod->parameters()[1];
+ QualType EncodingQT = EncodingDecl->getType().getUnqualifiedType();
+ llvm::Value *Cast = Builder.CreateBitCast(GV, ConvertType(EncodingQT));
+
+ Args.add(RValue::get(Cast), EncodingQT);
+ } else {
+ Args.add(EmitAnyExpr(SubExpr), ArgQT);
+ }
+
+ RValue result = Runtime.GenerateMessageSend(
+ *this, ReturnValueSlot(), BoxingMethod->getReturnType(), Sel, Receiver,
+ Args, ClassDecl, BoxingMethod);
+ return Builder.CreateBitCast(result.getScalarVal(),
+ ConvertType(E->getType()));
+}
+
+llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
+ const ObjCMethodDecl *MethodWithObjects) {
+ ASTContext &Context = CGM.getContext();
+ const ObjCDictionaryLiteral *DLE = nullptr;
+ const ObjCArrayLiteral *ALE = dyn_cast<ObjCArrayLiteral>(E);
+ if (!ALE)
+ DLE = cast<ObjCDictionaryLiteral>(E);
+
+ // Compute the type of the array we're initializing.
+ uint64_t NumElements =
+ ALE ? ALE->getNumElements() : DLE->getNumElements();
+ llvm::APInt APNumElements(Context.getTypeSize(Context.getSizeType()),
+ NumElements);
+ QualType ElementType = Context.getObjCIdType().withConst();
+ QualType ElementArrayType
+ = Context.getConstantArrayType(ElementType, APNumElements,
+ ArrayType::Normal, /*IndexTypeQuals=*/0);
+
+ // Allocate the temporary array(s).
+ Address Objects = CreateMemTemp(ElementArrayType, "objects");
+ Address Keys = Address::invalid();
+ if (DLE)
+ Keys = CreateMemTemp(ElementArrayType, "keys");
+
+ // In ARC, we may need to do extra work to keep all the keys and
+ // values alive until after the call.
+ SmallVector<llvm::Value *, 16> NeededObjects;
+ bool TrackNeededObjects =
+ (getLangOpts().ObjCAutoRefCount &&
+ CGM.getCodeGenOpts().OptimizationLevel != 0);
+
+ // Perform the actual initialialization of the array(s).
+ for (uint64_t i = 0; i < NumElements; i++) {
+ if (ALE) {
+ // Emit the element and store it to the appropriate array slot.
+ const Expr *Rhs = ALE->getElement(i);
+ LValue LV = MakeAddrLValue(
+ Builder.CreateConstArrayGEP(Objects, i, getPointerSize()),
+ ElementType, AlignmentSource::Decl);
+
+ llvm::Value *value = EmitScalarExpr(Rhs);
+ EmitStoreThroughLValue(RValue::get(value), LV, true);
+ if (TrackNeededObjects) {
+ NeededObjects.push_back(value);
+ }
+ } else {
+ // Emit the key and store it to the appropriate array slot.
+ const Expr *Key = DLE->getKeyValueElement(i).Key;
+ LValue KeyLV = MakeAddrLValue(
+ Builder.CreateConstArrayGEP(Keys, i, getPointerSize()),
+ ElementType, AlignmentSource::Decl);
+ llvm::Value *keyValue = EmitScalarExpr(Key);
+ EmitStoreThroughLValue(RValue::get(keyValue), KeyLV, /*isInit=*/true);
+
+ // Emit the value and store it to the appropriate array slot.
+ const Expr *Value = DLE->getKeyValueElement(i).Value;
+ LValue ValueLV = MakeAddrLValue(
+ Builder.CreateConstArrayGEP(Objects, i, getPointerSize()),
+ ElementType, AlignmentSource::Decl);
+ llvm::Value *valueValue = EmitScalarExpr(Value);
+ EmitStoreThroughLValue(RValue::get(valueValue), ValueLV, /*isInit=*/true);
+ if (TrackNeededObjects) {
+ NeededObjects.push_back(keyValue);
+ NeededObjects.push_back(valueValue);
+ }
+ }
+ }
+
+ // Generate the argument list.
+ CallArgList Args;
+ ObjCMethodDecl::param_const_iterator PI = MethodWithObjects->param_begin();
+ const ParmVarDecl *argDecl = *PI++;
+ QualType ArgQT = argDecl->getType().getUnqualifiedType();
+ Args.add(RValue::get(Objects.getPointer()), ArgQT);
+ if (DLE) {
+ argDecl = *PI++;
+ ArgQT = argDecl->getType().getUnqualifiedType();
+ Args.add(RValue::get(Keys.getPointer()), ArgQT);
+ }
+ argDecl = *PI;
+ ArgQT = argDecl->getType().getUnqualifiedType();
+ llvm::Value *Count =
+ llvm::ConstantInt::get(CGM.getTypes().ConvertType(ArgQT), NumElements);
+ Args.add(RValue::get(Count), ArgQT);
+
+ // Generate a reference to the class pointer, which will be the receiver.
+ Selector Sel = MethodWithObjects->getSelector();
+ QualType ResultType = E->getType();
+ const ObjCObjectPointerType *InterfacePointerType
+ = ResultType->getAsObjCInterfacePointerType();
+ ObjCInterfaceDecl *Class
+ = InterfacePointerType->getObjectType()->getInterface();
+ CGObjCRuntime &Runtime = CGM.getObjCRuntime();
+ llvm::Value *Receiver = Runtime.GetClass(*this, Class);
+
+ // Generate the message send.
+ RValue result = Runtime.GenerateMessageSend(
+ *this, ReturnValueSlot(), MethodWithObjects->getReturnType(), Sel,
+ Receiver, Args, Class, MethodWithObjects);
+
+ // The above message send needs these objects, but in ARC they are
+ // passed in a buffer that is essentially __unsafe_unretained.
+ // Therefore we must prevent the optimizer from releasing them until
+ // after the call.
+ if (TrackNeededObjects) {
+ EmitARCIntrinsicUse(NeededObjects);
+ }
+
+ return Builder.CreateBitCast(result.getScalarVal(),
+ ConvertType(E->getType()));
+}
+
+llvm::Value *CodeGenFunction::EmitObjCArrayLiteral(const ObjCArrayLiteral *E) {
+ return EmitObjCCollectionLiteral(E, E->getArrayWithObjectsMethod());
+}
+
+llvm::Value *CodeGenFunction::EmitObjCDictionaryLiteral(
+ const ObjCDictionaryLiteral *E) {
+ return EmitObjCCollectionLiteral(E, E->getDictWithObjectsMethod());
+}
+
+/// Emit a selector.
+llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) {
+ // Untyped selector.
+ // Note that this implementation allows for non-constant strings to be passed
+ // as arguments to @selector(). Currently, the only thing preventing this
+ // behaviour is the type checking in the front end.
+ return CGM.getObjCRuntime().GetSelector(*this, E->getSelector());
+}
+
+llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) {
+ // FIXME: This should pass the Decl not the name.
+ return CGM.getObjCRuntime().GenerateProtocolRef(*this, E->getProtocol());
+}
+
+/// \brief Adjust the type of an Objective-C object that doesn't match up due
+/// to type erasure at various points, e.g., related result types or the use
+/// of parameterized classes.
+static RValue AdjustObjCObjectType(CodeGenFunction &CGF, QualType ExpT,
+ RValue Result) {
+ if (!ExpT->isObjCRetainableType())
+ return Result;
+
+ // If the converted types are the same, we're done.
+ llvm::Type *ExpLLVMTy = CGF.ConvertType(ExpT);
+ if (ExpLLVMTy == Result.getScalarVal()->getType())
+ return Result;
+
+ // We have applied a substitution. Cast the rvalue appropriately.
+ return RValue::get(CGF.Builder.CreateBitCast(Result.getScalarVal(),
+ ExpLLVMTy));
+}
+
+/// Decide whether to extend the lifetime of the receiver of a
+/// returns-inner-pointer message.
+static bool
+shouldExtendReceiverForInnerPointerMessage(const ObjCMessageExpr *message) {
+ switch (message->getReceiverKind()) {
+
+ // For a normal instance message, we should extend unless the
+ // receiver is loaded from a variable with precise lifetime.
+ case ObjCMessageExpr::Instance: {
+ const Expr *receiver = message->getInstanceReceiver();
+
+ // Look through OVEs.
+ if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) {
+ if (opaque->getSourceExpr())
+ receiver = opaque->getSourceExpr()->IgnoreParens();
+ }
+
+ const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(receiver);
+ if (!ice || ice->getCastKind() != CK_LValueToRValue) return true;
+ receiver = ice->getSubExpr()->IgnoreParens();
+
+ // Look through OVEs.
+ if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) {
+ if (opaque->getSourceExpr())
+ receiver = opaque->getSourceExpr()->IgnoreParens();
+ }
+
+ // Only __strong variables.
+ if (receiver->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
+ return true;
+
+ // All ivars and fields have precise lifetime.
+ if (isa<MemberExpr>(receiver) || isa<ObjCIvarRefExpr>(receiver))
+ return false;
+
+ // Otherwise, check for variables.
+ const DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(ice->getSubExpr());
+ if (!declRef) return true;
+ const VarDecl *var = dyn_cast<VarDecl>(declRef->getDecl());
+ if (!var) return true;
+
+ // All variables have precise lifetime except local variables with
+ // automatic storage duration that aren't specially marked.
+ return (var->hasLocalStorage() &&
+ !var->hasAttr<ObjCPreciseLifetimeAttr>());
+ }
+
+ case ObjCMessageExpr::Class:
+ case ObjCMessageExpr::SuperClass:
+ // It's never necessary for class objects.
+ return false;
+
+ case ObjCMessageExpr::SuperInstance:
+ // We generally assume that 'self' lives throughout a method call.
+ return false;
+ }
+
+ llvm_unreachable("invalid receiver kind");
+}
+
+/// Given an expression of ObjC pointer type, check whether it was
+/// immediately loaded from an ARC __weak l-value.
+static const Expr *findWeakLValue(const Expr *E) {
+ assert(E->getType()->isObjCRetainableType());
+ E = E->IgnoreParens();
+ if (auto CE = dyn_cast<CastExpr>(E)) {
+ if (CE->getCastKind() == CK_LValueToRValue) {
+ if (CE->getSubExpr()->getType().getObjCLifetime() == Qualifiers::OCL_Weak)
+ return CE->getSubExpr();
+ }
+ }
+
+ return nullptr;
+}
+
+RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
+ ReturnValueSlot Return) {
+ // Only the lookup mechanism and first two arguments of the method
+ // implementation vary between runtimes. We can get the receiver and
+ // arguments in generic code.
+
+ bool isDelegateInit = E->isDelegateInitCall();
+
+ const ObjCMethodDecl *method = E->getMethodDecl();
+
+ // If the method is -retain, and the receiver's being loaded from
+ // a __weak variable, peephole the entire operation to objc_loadWeakRetained.
+ if (method && E->getReceiverKind() == ObjCMessageExpr::Instance &&
+ method->getMethodFamily() == OMF_retain) {
+ if (auto lvalueExpr = findWeakLValue(E->getInstanceReceiver())) {
+ LValue lvalue = EmitLValue(lvalueExpr);
+ llvm::Value *result = EmitARCLoadWeakRetained(lvalue.getAddress());
+ return AdjustObjCObjectType(*this, E->getType(), RValue::get(result));
+ }
+ }
+
+ // We don't retain the receiver in delegate init calls, and this is
+ // safe because the receiver value is always loaded from 'self',
+ // which we zero out. We don't want to Block_copy block receivers,
+ // though.
+ bool retainSelf =
+ (!isDelegateInit &&
+ CGM.getLangOpts().ObjCAutoRefCount &&
+ method &&
+ method->hasAttr<NSConsumesSelfAttr>());
+
+ CGObjCRuntime &Runtime = CGM.getObjCRuntime();
+ bool isSuperMessage = false;
+ bool isClassMessage = false;
+ ObjCInterfaceDecl *OID = nullptr;
+ // Find the receiver
+ QualType ReceiverType;
+ llvm::Value *Receiver = nullptr;
+ switch (E->getReceiverKind()) {
+ case ObjCMessageExpr::Instance:
+ ReceiverType = E->getInstanceReceiver()->getType();
+ if (retainSelf) {
+ TryEmitResult ter = tryEmitARCRetainScalarExpr(*this,
+ E->getInstanceReceiver());
+ Receiver = ter.getPointer();
+ if (ter.getInt()) retainSelf = false;
+ } else
+ Receiver = EmitScalarExpr(E->getInstanceReceiver());
+ break;
+
+ case ObjCMessageExpr::Class: {
+ ReceiverType = E->getClassReceiver();
+ const ObjCObjectType *ObjTy = ReceiverType->getAs<ObjCObjectType>();
+ assert(ObjTy && "Invalid Objective-C class message send");
+ OID = ObjTy->getInterface();
+ assert(OID && "Invalid Objective-C class message send");
+ Receiver = Runtime.GetClass(*this, OID);
+ isClassMessage = true;
+ break;
+ }
+
+ case ObjCMessageExpr::SuperInstance:
+ ReceiverType = E->getSuperType();
+ Receiver = LoadObjCSelf();
+ isSuperMessage = true;
+ break;
+
+ case ObjCMessageExpr::SuperClass:
+ ReceiverType = E->getSuperType();
+ Receiver = LoadObjCSelf();
+ isSuperMessage = true;
+ isClassMessage = true;
+ break;
+ }
+
+ if (retainSelf)
+ Receiver = EmitARCRetainNonBlock(Receiver);
+
+ // In ARC, we sometimes want to "extend the lifetime"
+ // (i.e. retain+autorelease) of receivers of returns-inner-pointer
+ // messages.
+ if (getLangOpts().ObjCAutoRefCount && method &&
+ method->hasAttr<ObjCReturnsInnerPointerAttr>() &&
+ shouldExtendReceiverForInnerPointerMessage(E))
+ Receiver = EmitARCRetainAutorelease(ReceiverType, Receiver);
+
+ QualType ResultType = method ? method->getReturnType() : E->getType();
+
+ CallArgList Args;
+ EmitCallArgs(Args, method, E->arguments());
+
+ // For delegate init calls in ARC, do an unsafe store of null into
+ // self. This represents the call taking direct ownership of that
+ // value. We have to do this after emitting the other call
+ // arguments because they might also reference self, but we don't
+ // have to worry about any of them modifying self because that would
+ // be an undefined read and write of an object in unordered
+ // expressions.
+ if (isDelegateInit) {
+ assert(getLangOpts().ObjCAutoRefCount &&
+ "delegate init calls should only be marked in ARC");
+
+ // Do an unsafe store of null into self.
+ Address selfAddr =
+ GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl());
+ Builder.CreateStore(getNullForVariable(selfAddr), selfAddr);
+ }
+
+ RValue result;
+ if (isSuperMessage) {
+ // super is only valid in an Objective-C method
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
+ result = Runtime.GenerateMessageSendSuper(*this, Return, ResultType,
+ E->getSelector(),
+ OMD->getClassInterface(),
+ isCategoryImpl,
+ Receiver,
+ isClassMessage,
+ Args,
+ method);
+ } else {
+ result = Runtime.GenerateMessageSend(*this, Return, ResultType,
+ E->getSelector(),
+ Receiver, Args, OID,
+ method);
+ }
+
+ // For delegate init calls in ARC, implicitly store the result of
+ // the call back into self. This takes ownership of the value.
+ if (isDelegateInit) {
+ Address selfAddr =
+ GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl());
+ llvm::Value *newSelf = result.getScalarVal();
+
+ // The delegate return type isn't necessarily a matching type; in
+ // fact, it's quite likely to be 'id'.
+ llvm::Type *selfTy = selfAddr.getElementType();
+ newSelf = Builder.CreateBitCast(newSelf, selfTy);
+
+ Builder.CreateStore(newSelf, selfAddr);
+ }
+
+ return AdjustObjCObjectType(*this, E->getType(), result);
+}
+
+namespace {
+struct FinishARCDealloc final : EHScopeStack::Cleanup {
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CGF.CurCodeDecl);
+
+ const ObjCImplDecl *impl = cast<ObjCImplDecl>(method->getDeclContext());
+ const ObjCInterfaceDecl *iface = impl->getClassInterface();
+ if (!iface->getSuperClass()) return;
+
+ bool isCategory = isa<ObjCCategoryImplDecl>(impl);
+
+ // Call [super dealloc] if we have a superclass.
+ llvm::Value *self = CGF.LoadObjCSelf();
+
+ CallArgList args;
+ CGF.CGM.getObjCRuntime().GenerateMessageSendSuper(CGF, ReturnValueSlot(),
+ CGF.getContext().VoidTy,
+ method->getSelector(),
+ iface,
+ isCategory,
+ self,
+ /*is class msg*/ false,
+ args,
+ method);
+ }
+};
+}
+
+/// StartObjCMethod - Begin emission of an ObjCMethod. This generates
+/// the LLVM function and sets the other context used by
+/// CodeGenFunction.
+void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD) {
+ SourceLocation StartLoc = OMD->getLocStart();
+ FunctionArgList args;
+ // Check if we should generate debug info for this method.
+ if (OMD->hasAttr<NoDebugAttr>())
+ DebugInfo = nullptr; // disable debug info indefinitely for this function
+
+ llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD);
+
+ const CGFunctionInfo &FI = CGM.getTypes().arrangeObjCMethodDeclaration(OMD);
+ CGM.SetInternalFunctionAttributes(OMD, Fn, FI);
+
+ args.push_back(OMD->getSelfDecl());
+ args.push_back(OMD->getCmdDecl());
+
+ args.append(OMD->param_begin(), OMD->param_end());
+
+ CurGD = OMD;
+ CurEHLocation = OMD->getLocEnd();
+
+ StartFunction(OMD, OMD->getReturnType(), Fn, FI, args,
+ OMD->getLocation(), StartLoc);
+
+ // In ARC, certain methods get an extra cleanup.
+ if (CGM.getLangOpts().ObjCAutoRefCount &&
+ OMD->isInstanceMethod() &&
+ OMD->getSelector().isUnarySelector()) {
+ const IdentifierInfo *ident =
+ OMD->getSelector().getIdentifierInfoForSlot(0);
+ if (ident->isStr("dealloc"))
+ EHStack.pushCleanup<FinishARCDealloc>(getARCCleanupKind());
+ }
+}
+
+static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
+ LValue lvalue, QualType type);
+
+/// Generate an Objective-C method. An Objective-C method is a C function with
+/// its pointer, name, and types registered in the class struture.
+void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) {
+ StartObjCMethod(OMD, OMD->getClassInterface());
+ PGO.assignRegionCounters(GlobalDecl(OMD), CurFn);
+ assert(isa<CompoundStmt>(OMD->getBody()));
+ incrementProfileCounter(OMD->getBody());
+ EmitCompoundStmtWithoutScope(*cast<CompoundStmt>(OMD->getBody()));
+ FinishFunction(OMD->getBodyRBrace());
+}
+
+/// emitStructGetterCall - Call the runtime function to load a property
+/// into the return value slot.
+static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
+ bool isAtomic, bool hasStrong) {
+ ASTContext &Context = CGF.getContext();
+
+ Address src =
+ CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0)
+ .getAddress();
+
+ // objc_copyStruct (ReturnValue, &structIvar,
+ // sizeof (Type of Ivar), isAtomic, false);
+ CallArgList args;
+
+ Address dest = CGF.Builder.CreateBitCast(CGF.ReturnValue, CGF.VoidPtrTy);
+ args.add(RValue::get(dest.getPointer()), Context.VoidPtrTy);
+
+ src = CGF.Builder.CreateBitCast(src, CGF.VoidPtrTy);
+ args.add(RValue::get(src.getPointer()), Context.VoidPtrTy);
+
+ CharUnits size = CGF.getContext().getTypeSizeInChars(ivar->getType());
+ args.add(RValue::get(CGF.CGM.getSize(size)), Context.getSizeType());
+ args.add(RValue::get(CGF.Builder.getInt1(isAtomic)), Context.BoolTy);
+ args.add(RValue::get(CGF.Builder.getInt1(hasStrong)), Context.BoolTy);
+
+ llvm::Value *fn = CGF.CGM.getObjCRuntime().GetGetStructFunction();
+ CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(Context.VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ fn, ReturnValueSlot(), args);
+}
+
+/// Determine whether the given architecture supports unaligned atomic
+/// accesses. They don't have to be fast, just faster than a function
+/// call and a mutex.
+static bool hasUnalignedAtomics(llvm::Triple::ArchType arch) {
+ // FIXME: Allow unaligned atomic load/store on x86. (It is not
+ // currently supported by the backend.)
+ return 0;
+}
+
+/// Return the maximum size that permits atomic accesses for the given
+/// architecture.
+static CharUnits getMaxAtomicAccessSize(CodeGenModule &CGM,
+ llvm::Triple::ArchType arch) {
+ // ARM has 8-byte atomic accesses, but it's not clear whether we
+ // want to rely on them here.
+
+ // In the default case, just assume that any size up to a pointer is
+ // fine given adequate alignment.
+ return CharUnits::fromQuantity(CGM.PointerSizeInBytes);
+}
+
+namespace {
+ class PropertyImplStrategy {
+ public:
+ enum StrategyKind {
+ /// The 'native' strategy is to use the architecture's provided
+ /// reads and writes.
+ Native,
+
+ /// Use objc_setProperty and objc_getProperty.
+ GetSetProperty,
+
+ /// Use objc_setProperty for the setter, but use expression
+ /// evaluation for the getter.
+ SetPropertyAndExpressionGet,
+
+ /// Use objc_copyStruct.
+ CopyStruct,
+
+ /// The 'expression' strategy is to emit normal assignment or
+ /// lvalue-to-rvalue expressions.
+ Expression
+ };
+
+ StrategyKind getKind() const { return StrategyKind(Kind); }
+
+ bool hasStrongMember() const { return HasStrong; }
+ bool isAtomic() const { return IsAtomic; }
+ bool isCopy() const { return IsCopy; }
+
+ CharUnits getIvarSize() const { return IvarSize; }
+ CharUnits getIvarAlignment() const { return IvarAlignment; }
+
+ PropertyImplStrategy(CodeGenModule &CGM,
+ const ObjCPropertyImplDecl *propImpl);
+
+ private:
+ unsigned Kind : 8;
+ unsigned IsAtomic : 1;
+ unsigned IsCopy : 1;
+ unsigned HasStrong : 1;
+
+ CharUnits IvarSize;
+ CharUnits IvarAlignment;
+ };
+}
+
+/// Pick an implementation strategy for the given property synthesis.
+PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
+ const ObjCPropertyImplDecl *propImpl) {
+ const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
+ ObjCPropertyDecl::SetterKind setterKind = prop->getSetterKind();
+
+ IsCopy = (setterKind == ObjCPropertyDecl::Copy);
+ IsAtomic = prop->isAtomic();
+ HasStrong = false; // doesn't matter here.
+
+ // Evaluate the ivar's size and alignment.
+ ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
+ QualType ivarType = ivar->getType();
+ std::tie(IvarSize, IvarAlignment) =
+ CGM.getContext().getTypeInfoInChars(ivarType);
+
+ // If we have a copy property, we always have to use getProperty/setProperty.
+ // TODO: we could actually use setProperty and an expression for non-atomics.
+ if (IsCopy) {
+ Kind = GetSetProperty;
+ return;
+ }
+
+ // Handle retain.
+ if (setterKind == ObjCPropertyDecl::Retain) {
+ // In GC-only, there's nothing special that needs to be done.
+ if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) {
+ // fallthrough
+
+ // In ARC, if the property is non-atomic, use expression emission,
+ // which translates to objc_storeStrong. This isn't required, but
+ // it's slightly nicer.
+ } else if (CGM.getLangOpts().ObjCAutoRefCount && !IsAtomic) {
+ // Using standard expression emission for the setter is only
+ // acceptable if the ivar is __strong, which won't be true if
+ // the property is annotated with __attribute__((NSObject)).
+ // TODO: falling all the way back to objc_setProperty here is
+ // just laziness, though; we could still use objc_storeStrong
+ // if we hacked it right.
+ if (ivarType.getObjCLifetime() == Qualifiers::OCL_Strong)
+ Kind = Expression;
+ else
+ Kind = SetPropertyAndExpressionGet;
+ return;
+
+ // Otherwise, we need to at least use setProperty. However, if
+ // the property isn't atomic, we can use normal expression
+ // emission for the getter.
+ } else if (!IsAtomic) {
+ Kind = SetPropertyAndExpressionGet;
+ return;
+
+ // Otherwise, we have to use both setProperty and getProperty.
+ } else {
+ Kind = GetSetProperty;
+ return;
+ }
+ }
+
+ // If we're not atomic, just use expression accesses.
+ if (!IsAtomic) {
+ Kind = Expression;
+ return;
+ }
+
+ // Properties on bitfield ivars need to be emitted using expression
+ // accesses even if they're nominally atomic.
+ if (ivar->isBitField()) {
+ Kind = Expression;
+ return;
+ }
+
+ // GC-qualified or ARC-qualified ivars need to be emitted as
+ // expressions. This actually works out to being atomic anyway,
+ // except for ARC __strong, but that should trigger the above code.
+ if (ivarType.hasNonTrivialObjCLifetime() ||
+ (CGM.getLangOpts().getGC() &&
+ CGM.getContext().getObjCGCAttrKind(ivarType))) {
+ Kind = Expression;
+ return;
+ }
+
+ // Compute whether the ivar has strong members.
+ if (CGM.getLangOpts().getGC())
+ if (const RecordType *recordType = ivarType->getAs<RecordType>())
+ HasStrong = recordType->getDecl()->hasObjectMember();
+
+ // We can never access structs with object members with a native
+ // access, because we need to use write barriers. This is what
+ // objc_copyStruct is for.
+ if (HasStrong) {
+ Kind = CopyStruct;
+ return;
+ }
+
+ // Otherwise, this is target-dependent and based on the size and
+ // alignment of the ivar.
+
+ // If the size of the ivar is not a power of two, give up. We don't
+ // want to get into the business of doing compare-and-swaps.
+ if (!IvarSize.isPowerOfTwo()) {
+ Kind = CopyStruct;
+ return;
+ }
+
+ llvm::Triple::ArchType arch =
+ CGM.getTarget().getTriple().getArch();
+
+ // Most architectures require memory to fit within a single cache
+ // line, so the alignment has to be at least the size of the access.
+ // Otherwise we have to grab a lock.
+ if (IvarAlignment < IvarSize && !hasUnalignedAtomics(arch)) {
+ Kind = CopyStruct;
+ return;
+ }
+
+ // If the ivar's size exceeds the architecture's maximum atomic
+ // access size, we have to use CopyStruct.
+ if (IvarSize > getMaxAtomicAccessSize(CGM, arch)) {
+ Kind = CopyStruct;
+ return;
+ }
+
+ // Otherwise, we can use native loads and stores.
+ Kind = Native;
+}
+
+/// \brief Generate an Objective-C property getter function.
+///
+/// The given Decl must be an ObjCImplementationDecl. \@synthesize
+/// is illegal within a category.
+void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
+ const ObjCPropertyImplDecl *PID) {
+ llvm::Constant *AtomicHelperFn =
+ CodeGenFunction(CGM).GenerateObjCAtomicGetterCopyHelperFunction(PID);
+ const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ ObjCMethodDecl *OMD = PD->getGetterMethodDecl();
+ assert(OMD && "Invalid call to generate getter (empty method)");
+ StartObjCMethod(OMD, IMP->getClassInterface());
+
+ generateObjCGetterBody(IMP, PID, OMD, AtomicHelperFn);
+
+ FinishFunction();
+}
+
+static bool hasTrivialGetExpr(const ObjCPropertyImplDecl *propImpl) {
+ const Expr *getter = propImpl->getGetterCXXConstructor();
+ if (!getter) return true;
+
+ // Sema only makes only of these when the ivar has a C++ class type,
+ // so the form is pretty constrained.
+
+ // If the property has a reference type, we might just be binding a
+ // reference, in which case the result will be a gl-value. We should
+ // treat this as a non-trivial operation.
+ if (getter->isGLValue())
+ return false;
+
+ // If we selected a trivial copy-constructor, we're okay.
+ if (const CXXConstructExpr *construct = dyn_cast<CXXConstructExpr>(getter))
+ return (construct->getConstructor()->isTrivial());
+
+ // The constructor might require cleanups (in which case it's never
+ // trivial).
+ assert(isa<ExprWithCleanups>(getter));
+ return false;
+}
+
+/// emitCPPObjectAtomicGetterCall - Call the runtime function to
+/// copy the ivar into the resturn slot.
+static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF,
+ llvm::Value *returnAddr,
+ ObjCIvarDecl *ivar,
+ llvm::Constant *AtomicHelperFn) {
+ // objc_copyCppObjectAtomic (&returnSlot, &CppObjectIvar,
+ // AtomicHelperFn);
+ CallArgList args;
+
+ // The 1st argument is the return Slot.
+ args.add(RValue::get(returnAddr), CGF.getContext().VoidPtrTy);
+
+ // The 2nd argument is the address of the ivar.
+ llvm::Value *ivarAddr =
+ CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
+ CGF.LoadObjCSelf(), ivar, 0).getPointer();
+ ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
+ args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
+
+ // Third argument is the helper function.
+ args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
+
+ llvm::Value *copyCppAtomicObjectFn =
+ CGF.CGM.getObjCRuntime().GetCppAtomicObjectGetFunction();
+ CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(CGF.getContext().VoidTy,
+ args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ copyCppAtomicObjectFn, ReturnValueSlot(), args);
+}
+
+void
+CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
+ const ObjCPropertyImplDecl *propImpl,
+ const ObjCMethodDecl *GetterMethodDecl,
+ llvm::Constant *AtomicHelperFn) {
+ // If there's a non-trivial 'get' expression, we just have to emit that.
+ if (!hasTrivialGetExpr(propImpl)) {
+ if (!AtomicHelperFn) {
+ ReturnStmt ret(SourceLocation(), propImpl->getGetterCXXConstructor(),
+ /*nrvo*/ nullptr);
+ EmitReturnStmt(ret);
+ }
+ else {
+ ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
+ emitCPPObjectAtomicGetterCall(*this, ReturnValue.getPointer(),
+ ivar, AtomicHelperFn);
+ }
+ return;
+ }
+
+ const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
+ QualType propType = prop->getType();
+ ObjCMethodDecl *getterMethod = prop->getGetterMethodDecl();
+
+ ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
+
+ // Pick an implementation strategy.
+ PropertyImplStrategy strategy(CGM, propImpl);
+ switch (strategy.getKind()) {
+ case PropertyImplStrategy::Native: {
+ // We don't need to do anything for a zero-size struct.
+ if (strategy.getIvarSize().isZero())
+ return;
+
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0);
+
+ // Currently, all atomic accesses have to be through integer
+ // types, so there's no point in trying to pick a prettier type.
+ llvm::Type *bitcastType =
+ llvm::Type::getIntNTy(getLLVMContext(),
+ getContext().toBits(strategy.getIvarSize()));
+ bitcastType = bitcastType->getPointerTo(); // addrspace 0 okay
+
+ // Perform an atomic load. This does not impose ordering constraints.
+ Address ivarAddr = LV.getAddress();
+ ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType);
+ llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load");
+ load->setAtomic(llvm::Unordered);
+
+ // Store that value into the return address. Doing this with a
+ // bitcast is likely to produce some pretty ugly IR, but it's not
+ // the *most* terrible thing in the world.
+ Builder.CreateStore(load, Builder.CreateBitCast(ReturnValue, bitcastType));
+
+ // Make sure we don't do an autorelease.
+ AutoreleaseResult = false;
+ return;
+ }
+
+ case PropertyImplStrategy::GetSetProperty: {
+ llvm::Value *getPropertyFn =
+ CGM.getObjCRuntime().GetPropertyGetFunction();
+ if (!getPropertyFn) {
+ CGM.ErrorUnsupported(propImpl, "Obj-C getter requiring atomic copy");
+ return;
+ }
+
+ // Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true).
+ // FIXME: Can't this be simpler? This might even be worse than the
+ // corresponding gcc code.
+ llvm::Value *cmd =
+ Builder.CreateLoad(GetAddrOfLocalVar(getterMethod->getCmdDecl()), "cmd");
+ llvm::Value *self = Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy);
+ llvm::Value *ivarOffset =
+ EmitIvarOffset(classImpl->getClassInterface(), ivar);
+
+ CallArgList args;
+ args.add(RValue::get(self), getContext().getObjCIdType());
+ args.add(RValue::get(cmd), getContext().getObjCSelType());
+ args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
+ args.add(RValue::get(Builder.getInt1(strategy.isAtomic())),
+ getContext().BoolTy);
+
+ // FIXME: We shouldn't need to get the function info here, the
+ // runtime already should have computed it to build the function.
+ llvm::Instruction *CallInstruction;
+ RValue RV = EmitCall(
+ getTypes().arrangeFreeFunctionCall(
+ propType, args, FunctionType::ExtInfo(), RequiredArgs::All),
+ getPropertyFn, ReturnValueSlot(), args, CGCalleeInfo(),
+ &CallInstruction);
+ if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(CallInstruction))
+ call->setTailCall();
+
+ // We need to fix the type here. Ivars with copy & retain are
+ // always objects so we don't need to worry about complex or
+ // aggregates.
+ RV = RValue::get(Builder.CreateBitCast(
+ RV.getScalarVal(),
+ getTypes().ConvertType(getterMethod->getReturnType())));
+
+ EmitReturnOfRValue(RV, propType);
+
+ // objc_getProperty does an autorelease, so we should suppress ours.
+ AutoreleaseResult = false;
+
+ return;
+ }
+
+ case PropertyImplStrategy::CopyStruct:
+ emitStructGetterCall(*this, ivar, strategy.isAtomic(),
+ strategy.hasStrongMember());
+ return;
+
+ case PropertyImplStrategy::Expression:
+ case PropertyImplStrategy::SetPropertyAndExpressionGet: {
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0);
+
+ QualType ivarType = ivar->getType();
+ switch (getEvaluationKind(ivarType)) {
+ case TEK_Complex: {
+ ComplexPairTy pair = EmitLoadOfComplex(LV, SourceLocation());
+ EmitStoreOfComplex(pair, MakeAddrLValue(ReturnValue, ivarType),
+ /*init*/ true);
+ return;
+ }
+ case TEK_Aggregate:
+ // The return value slot is guaranteed to not be aliased, but
+ // that's not necessarily the same as "on the stack", so
+ // we still potentially need objc_memmove_collectable.
+ EmitAggregateCopy(ReturnValue, LV.getAddress(), ivarType);
+ return;
+ case TEK_Scalar: {
+ llvm::Value *value;
+ if (propType->isReferenceType()) {
+ value = LV.getAddress().getPointer();
+ } else {
+ // We want to load and autoreleaseReturnValue ARC __weak ivars.
+ if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
+ if (getLangOpts().ObjCAutoRefCount) {
+ value = emitARCRetainLoadOfScalar(*this, LV, ivarType);
+ } else {
+ value = EmitARCLoadWeak(LV.getAddress());
+ }
+
+ // Otherwise we want to do a simple load, suppressing the
+ // final autorelease.
+ } else {
+ value = EmitLoadOfLValue(LV, SourceLocation()).getScalarVal();
+ AutoreleaseResult = false;
+ }
+
+ value = Builder.CreateBitCast(value, ConvertType(propType));
+ value = Builder.CreateBitCast(
+ value, ConvertType(GetterMethodDecl->getReturnType()));
+ }
+
+ EmitReturnOfRValue(RValue::get(value), propType);
+ return;
+ }
+ }
+ llvm_unreachable("bad evaluation kind");
+ }
+
+ }
+ llvm_unreachable("bad @property implementation strategy!");
+}
+
+/// emitStructSetterCall - Call the runtime function to store the value
+/// from the first formal parameter into the given ivar.
+static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD,
+ ObjCIvarDecl *ivar) {
+ // objc_copyStruct (&structIvar, &Arg,
+ // sizeof (struct something), true, false);
+ CallArgList args;
+
+ // The first argument is the address of the ivar.
+ llvm::Value *ivarAddr = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
+ CGF.LoadObjCSelf(), ivar, 0)
+ .getPointer();
+ ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
+ args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
+
+ // The second argument is the address of the parameter variable.
+ ParmVarDecl *argVar = *OMD->param_begin();
+ DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(),
+ VK_LValue, SourceLocation());
+ llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer();
+ argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
+ args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
+
+ // The third argument is the sizeof the type.
+ llvm::Value *size =
+ CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(ivar->getType()));
+ args.add(RValue::get(size), CGF.getContext().getSizeType());
+
+ // The fourth argument is the 'isAtomic' flag.
+ args.add(RValue::get(CGF.Builder.getTrue()), CGF.getContext().BoolTy);
+
+ // The fifth argument is the 'hasStrong' flag.
+ // FIXME: should this really always be false?
+ args.add(RValue::get(CGF.Builder.getFalse()), CGF.getContext().BoolTy);
+
+ llvm::Value *copyStructFn = CGF.CGM.getObjCRuntime().GetSetStructFunction();
+ CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(CGF.getContext().VoidTy,
+ args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ copyStructFn, ReturnValueSlot(), args);
+}
+
+/// emitCPPObjectAtomicSetterCall - Call the runtime function to store
+/// the value from the first formal parameter into the given ivar, using
+/// the Cpp API for atomic Cpp objects with non-trivial copy assignment.
+static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF,
+ ObjCMethodDecl *OMD,
+ ObjCIvarDecl *ivar,
+ llvm::Constant *AtomicHelperFn) {
+ // objc_copyCppObjectAtomic (&CppObjectIvar, &Arg,
+ // AtomicHelperFn);
+ CallArgList args;
+
+ // The first argument is the address of the ivar.
+ llvm::Value *ivarAddr =
+ CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
+ CGF.LoadObjCSelf(), ivar, 0).getPointer();
+ ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
+ args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
+
+ // The second argument is the address of the parameter variable.
+ ParmVarDecl *argVar = *OMD->param_begin();
+ DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(),
+ VK_LValue, SourceLocation());
+ llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer();
+ argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
+ args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
+
+ // Third argument is the helper function.
+ args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
+
+ llvm::Value *copyCppAtomicObjectFn =
+ CGF.CGM.getObjCRuntime().GetCppAtomicObjectSetFunction();
+ CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(CGF.getContext().VoidTy,
+ args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ copyCppAtomicObjectFn, ReturnValueSlot(), args);
+}
+
+
+static bool hasTrivialSetExpr(const ObjCPropertyImplDecl *PID) {
+ Expr *setter = PID->getSetterCXXAssignment();
+ if (!setter) return true;
+
+ // Sema only makes only of these when the ivar has a C++ class type,
+ // so the form is pretty constrained.
+
+ // An operator call is trivial if the function it calls is trivial.
+ // This also implies that there's nothing non-trivial going on with
+ // the arguments, because operator= can only be trivial if it's a
+ // synthesized assignment operator and therefore both parameters are
+ // references.
+ if (CallExpr *call = dyn_cast<CallExpr>(setter)) {
+ if (const FunctionDecl *callee
+ = dyn_cast_or_null<FunctionDecl>(call->getCalleeDecl()))
+ if (callee->isTrivial())
+ return true;
+ return false;
+ }
+
+ assert(isa<ExprWithCleanups>(setter));
+ return false;
+}
+
+static bool UseOptimizedSetter(CodeGenModule &CGM) {
+ if (CGM.getLangOpts().getGC() != LangOptions::NonGC)
+ return false;
+ return CGM.getLangOpts().ObjCRuntime.hasOptimizedSetter();
+}
+
+void
+CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
+ const ObjCPropertyImplDecl *propImpl,
+ llvm::Constant *AtomicHelperFn) {
+ const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
+ ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
+ ObjCMethodDecl *setterMethod = prop->getSetterMethodDecl();
+
+ // Just use the setter expression if Sema gave us one and it's
+ // non-trivial.
+ if (!hasTrivialSetExpr(propImpl)) {
+ if (!AtomicHelperFn)
+ // If non-atomic, assignment is called directly.
+ EmitStmt(propImpl->getSetterCXXAssignment());
+ else
+ // If atomic, assignment is called via a locking api.
+ emitCPPObjectAtomicSetterCall(*this, setterMethod, ivar,
+ AtomicHelperFn);
+ return;
+ }
+
+ PropertyImplStrategy strategy(CGM, propImpl);
+ switch (strategy.getKind()) {
+ case PropertyImplStrategy::Native: {
+ // We don't need to do anything for a zero-size struct.
+ if (strategy.getIvarSize().isZero())
+ return;
+
+ Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin());
+
+ LValue ivarLValue =
+ EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, /*quals*/ 0);
+ Address ivarAddr = ivarLValue.getAddress();
+
+ // Currently, all atomic accesses have to be through integer
+ // types, so there's no point in trying to pick a prettier type.
+ llvm::Type *bitcastType =
+ llvm::Type::getIntNTy(getLLVMContext(),
+ getContext().toBits(strategy.getIvarSize()));
+
+ // Cast both arguments to the chosen operation type.
+ argAddr = Builder.CreateElementBitCast(argAddr, bitcastType);
+ ivarAddr = Builder.CreateElementBitCast(ivarAddr, bitcastType);
+
+ // This bitcast load is likely to cause some nasty IR.
+ llvm::Value *load = Builder.CreateLoad(argAddr);
+
+ // Perform an atomic store. There are no memory ordering requirements.
+ llvm::StoreInst *store = Builder.CreateStore(load, ivarAddr);
+ store->setAtomic(llvm::Unordered);
+ return;
+ }
+
+ case PropertyImplStrategy::GetSetProperty:
+ case PropertyImplStrategy::SetPropertyAndExpressionGet: {
+
+ llvm::Value *setOptimizedPropertyFn = nullptr;
+ llvm::Value *setPropertyFn = nullptr;
+ if (UseOptimizedSetter(CGM)) {
+ // 10.8 and iOS 6.0 code and GC is off
+ setOptimizedPropertyFn =
+ CGM.getObjCRuntime()
+ .GetOptimizedPropertySetFunction(strategy.isAtomic(),
+ strategy.isCopy());
+ if (!setOptimizedPropertyFn) {
+ CGM.ErrorUnsupported(propImpl, "Obj-C optimized setter - NYI");
+ return;
+ }
+ }
+ else {
+ setPropertyFn = CGM.getObjCRuntime().GetPropertySetFunction();
+ if (!setPropertyFn) {
+ CGM.ErrorUnsupported(propImpl, "Obj-C setter requiring atomic copy");
+ return;
+ }
+ }
+
+ // Emit objc_setProperty((id) self, _cmd, offset, arg,
+ // <is-atomic>, <is-copy>).
+ llvm::Value *cmd =
+ Builder.CreateLoad(GetAddrOfLocalVar(setterMethod->getCmdDecl()));
+ llvm::Value *self =
+ Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy);
+ llvm::Value *ivarOffset =
+ EmitIvarOffset(classImpl->getClassInterface(), ivar);
+ Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin());
+ llvm::Value *arg = Builder.CreateLoad(argAddr, "arg");
+ arg = Builder.CreateBitCast(arg, VoidPtrTy);
+
+ CallArgList args;
+ args.add(RValue::get(self), getContext().getObjCIdType());
+ args.add(RValue::get(cmd), getContext().getObjCSelType());
+ if (setOptimizedPropertyFn) {
+ args.add(RValue::get(arg), getContext().getObjCIdType());
+ args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
+ EmitCall(getTypes().arrangeFreeFunctionCall(getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ setOptimizedPropertyFn, ReturnValueSlot(), args);
+ } else {
+ args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
+ args.add(RValue::get(arg), getContext().getObjCIdType());
+ args.add(RValue::get(Builder.getInt1(strategy.isAtomic())),
+ getContext().BoolTy);
+ args.add(RValue::get(Builder.getInt1(strategy.isCopy())),
+ getContext().BoolTy);
+ // FIXME: We shouldn't need to get the function info here, the runtime
+ // already should have computed it to build the function.
+ EmitCall(getTypes().arrangeFreeFunctionCall(getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ setPropertyFn, ReturnValueSlot(), args);
+ }
+
+ return;
+ }
+
+ case PropertyImplStrategy::CopyStruct:
+ emitStructSetterCall(*this, setterMethod, ivar);
+ return;
+
+ case PropertyImplStrategy::Expression:
+ break;
+ }
+
+ // Otherwise, fake up some ASTs and emit a normal assignment.
+ ValueDecl *selfDecl = setterMethod->getSelfDecl();
+ DeclRefExpr self(selfDecl, false, selfDecl->getType(),
+ VK_LValue, SourceLocation());
+ ImplicitCastExpr selfLoad(ImplicitCastExpr::OnStack,
+ selfDecl->getType(), CK_LValueToRValue, &self,
+ VK_RValue);
+ ObjCIvarRefExpr ivarRef(ivar, ivar->getType().getNonReferenceType(),
+ SourceLocation(), SourceLocation(),
+ &selfLoad, true, true);
+
+ ParmVarDecl *argDecl = *setterMethod->param_begin();
+ QualType argType = argDecl->getType().getNonReferenceType();
+ DeclRefExpr arg(argDecl, false, argType, VK_LValue, SourceLocation());
+ ImplicitCastExpr argLoad(ImplicitCastExpr::OnStack,
+ argType.getUnqualifiedType(), CK_LValueToRValue,
+ &arg, VK_RValue);
+
+ // The property type can differ from the ivar type in some situations with
+ // Objective-C pointer types, we can always bit cast the RHS in these cases.
+ // The following absurdity is just to ensure well-formed IR.
+ CastKind argCK = CK_NoOp;
+ if (ivarRef.getType()->isObjCObjectPointerType()) {
+ if (argLoad.getType()->isObjCObjectPointerType())
+ argCK = CK_BitCast;
+ else if (argLoad.getType()->isBlockPointerType())
+ argCK = CK_BlockPointerToObjCPointerCast;
+ else
+ argCK = CK_CPointerToObjCPointerCast;
+ } else if (ivarRef.getType()->isBlockPointerType()) {
+ if (argLoad.getType()->isBlockPointerType())
+ argCK = CK_BitCast;
+ else
+ argCK = CK_AnyPointerToBlockPointerCast;
+ } else if (ivarRef.getType()->isPointerType()) {
+ argCK = CK_BitCast;
+ }
+ ImplicitCastExpr argCast(ImplicitCastExpr::OnStack,
+ ivarRef.getType(), argCK, &argLoad,
+ VK_RValue);
+ Expr *finalArg = &argLoad;
+ if (!getContext().hasSameUnqualifiedType(ivarRef.getType(),
+ argLoad.getType()))
+ finalArg = &argCast;
+
+
+ BinaryOperator assign(&ivarRef, finalArg, BO_Assign,
+ ivarRef.getType(), VK_RValue, OK_Ordinary,
+ SourceLocation(), false);
+ EmitStmt(&assign);
+}
+
+/// \brief Generate an Objective-C property setter function.
+///
+/// The given Decl must be an ObjCImplementationDecl. \@synthesize
+/// is illegal within a category.
+void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
+ const ObjCPropertyImplDecl *PID) {
+ llvm::Constant *AtomicHelperFn =
+ CodeGenFunction(CGM).GenerateObjCAtomicSetterCopyHelperFunction(PID);
+ const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ ObjCMethodDecl *OMD = PD->getSetterMethodDecl();
+ assert(OMD && "Invalid call to generate setter (empty method)");
+ StartObjCMethod(OMD, IMP->getClassInterface());
+
+ generateObjCSetterBody(IMP, PID, AtomicHelperFn);
+
+ FinishFunction();
+}
+
+namespace {
+ struct DestroyIvar final : EHScopeStack::Cleanup {
+ private:
+ llvm::Value *addr;
+ const ObjCIvarDecl *ivar;
+ CodeGenFunction::Destroyer *destroyer;
+ bool useEHCleanupForArray;
+ public:
+ DestroyIvar(llvm::Value *addr, const ObjCIvarDecl *ivar,
+ CodeGenFunction::Destroyer *destroyer,
+ bool useEHCleanupForArray)
+ : addr(addr), ivar(ivar), destroyer(destroyer),
+ useEHCleanupForArray(useEHCleanupForArray) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ LValue lvalue
+ = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), addr, ivar, /*CVR*/ 0);
+ CGF.emitDestroy(lvalue.getAddress(), ivar->getType(), destroyer,
+ flags.isForNormalCleanup() && useEHCleanupForArray);
+ }
+ };
+}
+
+/// Like CodeGenFunction::destroyARCStrong, but do it with a call.
+static void destroyARCStrongWithStore(CodeGenFunction &CGF,
+ Address addr,
+ QualType type) {
+ llvm::Value *null = getNullForVariable(addr);
+ CGF.EmitARCStoreStrongCall(addr, null, /*ignored*/ true);
+}
+
+static void emitCXXDestructMethod(CodeGenFunction &CGF,
+ ObjCImplementationDecl *impl) {
+ CodeGenFunction::RunCleanupsScope scope(CGF);
+
+ llvm::Value *self = CGF.LoadObjCSelf();
+
+ const ObjCInterfaceDecl *iface = impl->getClassInterface();
+ for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin();
+ ivar; ivar = ivar->getNextIvar()) {
+ QualType type = ivar->getType();
+
+ // Check whether the ivar is a destructible type.
+ QualType::DestructionKind dtorKind = type.isDestructedType();
+ if (!dtorKind) continue;
+
+ CodeGenFunction::Destroyer *destroyer = nullptr;
+
+ // Use a call to objc_storeStrong to destroy strong ivars, for the
+ // general benefit of the tools.
+ if (dtorKind == QualType::DK_objc_strong_lifetime) {
+ destroyer = destroyARCStrongWithStore;
+
+ // Otherwise use the default for the destruction kind.
+ } else {
+ destroyer = CGF.getDestroyer(dtorKind);
+ }
+
+ CleanupKind cleanupKind = CGF.getCleanupKind(dtorKind);
+
+ CGF.EHStack.pushCleanup<DestroyIvar>(cleanupKind, self, ivar, destroyer,
+ cleanupKind & EHCleanup);
+ }
+
+ assert(scope.requiresCleanups() && "nothing to do in .cxx_destruct?");
+}
+
+void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
+ ObjCMethodDecl *MD,
+ bool ctor) {
+ MD->createImplicitParams(CGM.getContext(), IMP->getClassInterface());
+ StartObjCMethod(MD, IMP->getClassInterface());
+
+ // Emit .cxx_construct.
+ if (ctor) {
+ // Suppress the final autorelease in ARC.
+ AutoreleaseResult = false;
+
+ for (const auto *IvarInit : IMP->inits()) {
+ FieldDecl *Field = IvarInit->getAnyMember();
+ ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Field);
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(),
+ LoadObjCSelf(), Ivar, 0);
+ EmitAggExpr(IvarInit->getInit(),
+ AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
+ }
+ // constructor returns 'self'.
+ CodeGenTypes &Types = CGM.getTypes();
+ QualType IdTy(CGM.getContext().getObjCIdType());
+ llvm::Value *SelfAsId =
+ Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
+ EmitReturnOfRValue(RValue::get(SelfAsId), IdTy);
+
+ // Emit .cxx_destruct.
+ } else {
+ emitCXXDestructMethod(*this, IMP);
+ }
+ FinishFunction();
+}
+
+llvm::Value *CodeGenFunction::LoadObjCSelf() {
+ VarDecl *Self = cast<ObjCMethodDecl>(CurFuncDecl)->getSelfDecl();
+ DeclRefExpr DRE(Self, /*is enclosing local*/ (CurFuncDecl != CurCodeDecl),
+ Self->getType(), VK_LValue, SourceLocation());
+ return EmitLoadOfScalar(EmitDeclRefLValue(&DRE), SourceLocation());
+}
+
+QualType CodeGenFunction::TypeOfSelfObject() {
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ ImplicitParamDecl *selfDecl = OMD->getSelfDecl();
+ const ObjCObjectPointerType *PTy = cast<ObjCObjectPointerType>(
+ getContext().getCanonicalType(selfDecl->getType()));
+ return PTy->getPointeeType();
+}
+
+void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
+ llvm::Constant *EnumerationMutationFn =
+ CGM.getObjCRuntime().EnumerationMutationFunction();
+
+ if (!EnumerationMutationFn) {
+ CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime");
+ return;
+ }
+
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI)
+ DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
+
+ // The local variable comes into scope immediately.
+ AutoVarEmission variable = AutoVarEmission::invalid();
+ if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement()))
+ variable = EmitAutoVarAlloca(*cast<VarDecl>(SD->getSingleDecl()));
+
+ JumpDest LoopEnd = getJumpDestInCurrentScope("forcoll.end");
+
+ // Fast enumeration state.
+ QualType StateTy = CGM.getObjCFastEnumerationStateType();
+ Address StatePtr = CreateMemTemp(StateTy, "state.ptr");
+ EmitNullInitialization(StatePtr, StateTy);
+
+ // Number of elements in the items array.
+ static const unsigned NumItems = 16;
+
+ // Fetch the countByEnumeratingWithState:objects:count: selector.
+ IdentifierInfo *II[] = {
+ &CGM.getContext().Idents.get("countByEnumeratingWithState"),
+ &CGM.getContext().Idents.get("objects"),
+ &CGM.getContext().Idents.get("count")
+ };
+ Selector FastEnumSel =
+ CGM.getContext().Selectors.getSelector(llvm::array_lengthof(II), &II[0]);
+
+ QualType ItemsTy =
+ getContext().getConstantArrayType(getContext().getObjCIdType(),
+ llvm::APInt(32, NumItems),
+ ArrayType::Normal, 0);
+ Address ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr");
+
+ // Emit the collection pointer. In ARC, we do a retain.
+ llvm::Value *Collection;
+ if (getLangOpts().ObjCAutoRefCount) {
+ Collection = EmitARCRetainScalarExpr(S.getCollection());
+
+ // Enter a cleanup to do the release.
+ EmitObjCConsumeObject(S.getCollection()->getType(), Collection);
+ } else {
+ Collection = EmitScalarExpr(S.getCollection());
+ }
+
+ // The 'continue' label needs to appear within the cleanup for the
+ // collection object.
+ JumpDest AfterBody = getJumpDestInCurrentScope("forcoll.next");
+
+ // Send it our message:
+ CallArgList Args;
+
+ // The first argument is a temporary of the enumeration-state type.
+ Args.add(RValue::get(StatePtr.getPointer()),
+ getContext().getPointerType(StateTy));
+
+ // The second argument is a temporary array with space for NumItems
+ // pointers. We'll actually be loading elements from the array
+ // pointer written into the control state; this buffer is so that
+ // collections that *aren't* backed by arrays can still queue up
+ // batches of elements.
+ Args.add(RValue::get(ItemsPtr.getPointer()),
+ getContext().getPointerType(ItemsTy));
+
+ // The third argument is the capacity of that temporary array.
+ llvm::Type *UnsignedLongLTy = ConvertType(getContext().UnsignedLongTy);
+ llvm::Constant *Count = llvm::ConstantInt::get(UnsignedLongLTy, NumItems);
+ Args.add(RValue::get(Count), getContext().UnsignedLongTy);
+
+ // Start the enumeration.
+ RValue CountRV =
+ CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().UnsignedLongTy,
+ FastEnumSel,
+ Collection, Args);
+
+ // The initial number of objects that were returned in the buffer.
+ llvm::Value *initialBufferLimit = CountRV.getScalarVal();
+
+ llvm::BasicBlock *EmptyBB = createBasicBlock("forcoll.empty");
+ llvm::BasicBlock *LoopInitBB = createBasicBlock("forcoll.loopinit");
+
+ llvm::Value *zero = llvm::Constant::getNullValue(UnsignedLongLTy);
+
+ // If the limit pointer was zero to begin with, the collection is
+ // empty; skip all this. Set the branch weight assuming this has the same
+ // probability of exiting the loop as any other loop exit.
+ uint64_t EntryCount = getCurrentProfileCount();
+ Builder.CreateCondBr(
+ Builder.CreateICmpEQ(initialBufferLimit, zero, "iszero"), EmptyBB,
+ LoopInitBB,
+ createProfileWeights(EntryCount, getProfileCount(S.getBody())));
+
+ // Otherwise, initialize the loop.
+ EmitBlock(LoopInitBB);
+
+ // Save the initial mutations value. This is the value at an
+ // address that was written into the state object by
+ // countByEnumeratingWithState:objects:count:.
+ Address StateMutationsPtrPtr = Builder.CreateStructGEP(
+ StatePtr, 2, 2 * getPointerSize(), "mutationsptr.ptr");
+ llvm::Value *StateMutationsPtr
+ = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
+
+ llvm::Value *initialMutations =
+ Builder.CreateAlignedLoad(StateMutationsPtr, getPointerAlign(),
+ "forcoll.initial-mutations");
+
+ // Start looping. This is the point we return to whenever we have a
+ // fresh, non-empty batch of objects.
+ llvm::BasicBlock *LoopBodyBB = createBasicBlock("forcoll.loopbody");
+ EmitBlock(LoopBodyBB);
+
+ // The current index into the buffer.
+ llvm::PHINode *index = Builder.CreatePHI(UnsignedLongLTy, 3, "forcoll.index");
+ index->addIncoming(zero, LoopInitBB);
+
+ // The current buffer size.
+ llvm::PHINode *count = Builder.CreatePHI(UnsignedLongLTy, 3, "forcoll.count");
+ count->addIncoming(initialBufferLimit, LoopInitBB);
+
+ incrementProfileCounter(&S);
+
+ // Check whether the mutations value has changed from where it was
+ // at start. StateMutationsPtr should actually be invariant between
+ // refreshes.
+ StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
+ llvm::Value *currentMutations
+ = Builder.CreateAlignedLoad(StateMutationsPtr, getPointerAlign(),
+ "statemutations");
+
+ llvm::BasicBlock *WasMutatedBB = createBasicBlock("forcoll.mutated");
+ llvm::BasicBlock *WasNotMutatedBB = createBasicBlock("forcoll.notmutated");
+
+ Builder.CreateCondBr(Builder.CreateICmpEQ(currentMutations, initialMutations),
+ WasNotMutatedBB, WasMutatedBB);
+
+ // If so, call the enumeration-mutation function.
+ EmitBlock(WasMutatedBB);
+ llvm::Value *V =
+ Builder.CreateBitCast(Collection,
+ ConvertType(getContext().getObjCIdType()));
+ CallArgList Args2;
+ Args2.add(RValue::get(V), getContext().getObjCIdType());
+ // FIXME: We shouldn't need to get the function info here, the runtime already
+ // should have computed it to build the function.
+ EmitCall(CGM.getTypes().arrangeFreeFunctionCall(getContext().VoidTy, Args2,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ EnumerationMutationFn, ReturnValueSlot(), Args2);
+
+ // Otherwise, or if the mutation function returns, just continue.
+ EmitBlock(WasNotMutatedBB);
+
+ // Initialize the element variable.
+ RunCleanupsScope elementVariableScope(*this);
+ bool elementIsVariable;
+ LValue elementLValue;
+ QualType elementType;
+ if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) {
+ // Initialize the variable, in case it's a __block variable or something.
+ EmitAutoVarInit(variable);
+
+ const VarDecl* D = cast<VarDecl>(SD->getSingleDecl());
+ DeclRefExpr tempDRE(const_cast<VarDecl*>(D), false, D->getType(),
+ VK_LValue, SourceLocation());
+ elementLValue = EmitLValue(&tempDRE);
+ elementType = D->getType();
+ elementIsVariable = true;
+
+ if (D->isARCPseudoStrong())
+ elementLValue.getQuals().setObjCLifetime(Qualifiers::OCL_ExplicitNone);
+ } else {
+ elementLValue = LValue(); // suppress warning
+ elementType = cast<Expr>(S.getElement())->getType();
+ elementIsVariable = false;
+ }
+ llvm::Type *convertedElementType = ConvertType(elementType);
+
+ // Fetch the buffer out of the enumeration state.
+ // TODO: this pointer should actually be invariant between
+ // refreshes, which would help us do certain loop optimizations.
+ Address StateItemsPtr = Builder.CreateStructGEP(
+ StatePtr, 1, getPointerSize(), "stateitems.ptr");
+ llvm::Value *EnumStateItems =
+ Builder.CreateLoad(StateItemsPtr, "stateitems");
+
+ // Fetch the value at the current index from the buffer.
+ llvm::Value *CurrentItemPtr =
+ Builder.CreateGEP(EnumStateItems, index, "currentitem.ptr");
+ llvm::Value *CurrentItem =
+ Builder.CreateAlignedLoad(CurrentItemPtr, getPointerAlign());
+
+ // Cast that value to the right type.
+ CurrentItem = Builder.CreateBitCast(CurrentItem, convertedElementType,
+ "currentitem");
+
+ // Make sure we have an l-value. Yes, this gets evaluated every
+ // time through the loop.
+ if (!elementIsVariable) {
+ elementLValue = EmitLValue(cast<Expr>(S.getElement()));
+ EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue);
+ } else {
+ EmitScalarInit(CurrentItem, elementLValue);
+ }
+
+ // If we do have an element variable, this assignment is the end of
+ // its initialization.
+ if (elementIsVariable)
+ EmitAutoVarCleanups(variable);
+
+ // Perform the loop body, setting up break and continue labels.
+ BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody));
+ {
+ RunCleanupsScope Scope(*this);
+ EmitStmt(S.getBody());
+ }
+ BreakContinueStack.pop_back();
+
+ // Destroy the element variable now.
+ elementVariableScope.ForceCleanup();
+
+ // Check whether there are more elements.
+ EmitBlock(AfterBody.getBlock());
+
+ llvm::BasicBlock *FetchMoreBB = createBasicBlock("forcoll.refetch");
+
+ // First we check in the local buffer.
+ llvm::Value *indexPlusOne
+ = Builder.CreateAdd(index, llvm::ConstantInt::get(UnsignedLongLTy, 1));
+
+ // If we haven't overrun the buffer yet, we can continue.
+ // Set the branch weights based on the simplifying assumption that this is
+ // like a while-loop, i.e., ignoring that the false branch fetches more
+ // elements and then returns to the loop.
+ Builder.CreateCondBr(
+ Builder.CreateICmpULT(indexPlusOne, count), LoopBodyBB, FetchMoreBB,
+ createProfileWeights(getProfileCount(S.getBody()), EntryCount));
+
+ index->addIncoming(indexPlusOne, AfterBody.getBlock());
+ count->addIncoming(count, AfterBody.getBlock());
+
+ // Otherwise, we have to fetch more elements.
+ EmitBlock(FetchMoreBB);
+
+ CountRV =
+ CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().UnsignedLongTy,
+ FastEnumSel,
+ Collection, Args);
+
+ // If we got a zero count, we're done.
+ llvm::Value *refetchCount = CountRV.getScalarVal();
+
+ // (note that the message send might split FetchMoreBB)
+ index->addIncoming(zero, Builder.GetInsertBlock());
+ count->addIncoming(refetchCount, Builder.GetInsertBlock());
+
+ Builder.CreateCondBr(Builder.CreateICmpEQ(refetchCount, zero),
+ EmptyBB, LoopBodyBB);
+
+ // No more elements.
+ EmitBlock(EmptyBB);
+
+ if (!elementIsVariable) {
+ // If the element was not a declaration, set it to be null.
+
+ llvm::Value *null = llvm::Constant::getNullValue(convertedElementType);
+ elementLValue = EmitLValue(cast<Expr>(S.getElement()));
+ EmitStoreThroughLValue(RValue::get(null), elementLValue);
+ }
+
+ if (DI)
+ DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
+
+ // Leave the cleanup we entered in ARC.
+ if (getLangOpts().ObjCAutoRefCount)
+ PopCleanupBlock();
+
+ EmitBlock(LoopEnd.getBlock());
+}
+
+void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) {
+ CGM.getObjCRuntime().EmitTryStmt(*this, S);
+}
+
+void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) {
+ CGM.getObjCRuntime().EmitThrowStmt(*this, S);
+}
+
+void CodeGenFunction::EmitObjCAtSynchronizedStmt(
+ const ObjCAtSynchronizedStmt &S) {
+ CGM.getObjCRuntime().EmitSynchronizedStmt(*this, S);
+}
+
+namespace {
+ struct CallObjCRelease final : EHScopeStack::Cleanup {
+ CallObjCRelease(llvm::Value *object) : object(object) {}
+ llvm::Value *object;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ // Releases at the end of the full-expression are imprecise.
+ CGF.EmitARCRelease(object, ARCImpreciseLifetime);
+ }
+ };
+}
+
+/// Produce the code for a CK_ARCConsumeObject. Does a primitive
+/// release at the end of the full-expression.
+llvm::Value *CodeGenFunction::EmitObjCConsumeObject(QualType type,
+ llvm::Value *object) {
+ // If we're in a conditional branch, we need to make the cleanup
+ // conditional.
+ pushFullExprCleanup<CallObjCRelease>(getARCCleanupKind(), object);
+ return object;
+}
+
+llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type,
+ llvm::Value *value) {
+ return EmitARCRetainAutorelease(type, value);
+}
+
+/// Given a number of pointers, inform the optimizer that they're
+/// being intrinsically used up until this point in the program.
+void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values) {
+ llvm::Constant *&fn = CGM.getObjCEntrypoints().clang_arc_use;
+ if (!fn) {
+ llvm::FunctionType *fnType =
+ llvm::FunctionType::get(CGM.VoidTy, None, true);
+ fn = CGM.CreateRuntimeFunction(fnType, "clang.arc.use");
+ }
+
+ // This isn't really a "runtime" function, but as an intrinsic it
+ // doesn't really matter as long as we align things up.
+ EmitNounwindRuntimeCall(fn, values);
+}
+
+
+static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM,
+ llvm::FunctionType *type,
+ StringRef fnName) {
+ llvm::Constant *fn = CGM.CreateRuntimeFunction(type, fnName);
+
+ if (llvm::Function *f = dyn_cast<llvm::Function>(fn)) {
+ // If the target runtime doesn't naturally support ARC, emit weak
+ // references to the runtime support library. We don't really
+ // permit this to fail, but we need a particular relocation style.
+ if (!CGM.getLangOpts().ObjCRuntime.hasNativeARC()) {
+ f->setLinkage(llvm::Function::ExternalWeakLinkage);
+ } else if (fnName == "objc_retain" || fnName == "objc_release") {
+ // If we have Native ARC, set nonlazybind attribute for these APIs for
+ // performance.
+ f->addFnAttr(llvm::Attribute::NonLazyBind);
+ }
+ }
+
+ return fn;
+}
+
+/// Perform an operation having the signature
+/// i8* (i8*)
+/// where a null input causes a no-op and returns null.
+static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF,
+ llvm::Value *value,
+ llvm::Constant *&fn,
+ StringRef fnName,
+ bool isTailCall = false) {
+ if (isa<llvm::ConstantPointerNull>(value)) return value;
+
+ if (!fn) {
+ llvm::FunctionType *fnType =
+ llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrTy, false);
+ fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
+ }
+
+ // Cast the argument to 'id'.
+ llvm::Type *origType = value->getType();
+ value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
+
+ // Call the function.
+ llvm::CallInst *call = CGF.EmitNounwindRuntimeCall(fn, value);
+ if (isTailCall)
+ call->setTailCall();
+
+ // Cast the result back to the original type.
+ return CGF.Builder.CreateBitCast(call, origType);
+}
+
+/// Perform an operation having the following signature:
+/// i8* (i8**)
+static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF,
+ Address addr,
+ llvm::Constant *&fn,
+ StringRef fnName) {
+ if (!fn) {
+ llvm::FunctionType *fnType =
+ llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrPtrTy, false);
+ fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
+ }
+
+ // Cast the argument to 'id*'.
+ llvm::Type *origType = addr.getElementType();
+ addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy);
+
+ // Call the function.
+ llvm::Value *result = CGF.EmitNounwindRuntimeCall(fn, addr.getPointer());
+
+ // Cast the result back to a dereference of the original type.
+ if (origType != CGF.Int8PtrTy)
+ result = CGF.Builder.CreateBitCast(result, origType);
+
+ return result;
+}
+
+/// Perform an operation having the following signature:
+/// i8* (i8**, i8*)
+static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF,
+ Address addr,
+ llvm::Value *value,
+ llvm::Constant *&fn,
+ StringRef fnName,
+ bool ignored) {
+ assert(addr.getElementType() == value->getType());
+
+ if (!fn) {
+ llvm::Type *argTypes[] = { CGF.Int8PtrPtrTy, CGF.Int8PtrTy };
+
+ llvm::FunctionType *fnType
+ = llvm::FunctionType::get(CGF.Int8PtrTy, argTypes, false);
+ fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
+ }
+
+ llvm::Type *origType = value->getType();
+
+ llvm::Value *args[] = {
+ CGF.Builder.CreateBitCast(addr.getPointer(), CGF.Int8PtrPtrTy),
+ CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy)
+ };
+ llvm::CallInst *result = CGF.EmitNounwindRuntimeCall(fn, args);
+
+ if (ignored) return nullptr;
+
+ return CGF.Builder.CreateBitCast(result, origType);
+}
+
+/// Perform an operation having the following signature:
+/// void (i8**, i8**)
+static void emitARCCopyOperation(CodeGenFunction &CGF,
+ Address dst,
+ Address src,
+ llvm::Constant *&fn,
+ StringRef fnName) {
+ assert(dst.getType() == src.getType());
+
+ if (!fn) {
+ llvm::Type *argTypes[] = { CGF.Int8PtrPtrTy, CGF.Int8PtrPtrTy };
+
+ llvm::FunctionType *fnType
+ = llvm::FunctionType::get(CGF.Builder.getVoidTy(), argTypes, false);
+ fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
+ }
+
+ llvm::Value *args[] = {
+ CGF.Builder.CreateBitCast(dst.getPointer(), CGF.Int8PtrPtrTy),
+ CGF.Builder.CreateBitCast(src.getPointer(), CGF.Int8PtrPtrTy)
+ };
+ CGF.EmitNounwindRuntimeCall(fn, args);
+}
+
+/// Produce the code to do a retain. Based on the type, calls one of:
+/// call i8* \@objc_retain(i8* %value)
+/// call i8* \@objc_retainBlock(i8* %value)
+llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) {
+ if (type->isBlockPointerType())
+ return EmitARCRetainBlock(value, /*mandatory*/ false);
+ else
+ return EmitARCRetainNonBlock(value);
+}
+
+/// Retain the given object, with normal retain semantics.
+/// call i8* \@objc_retain(i8* %value)
+llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getObjCEntrypoints().objc_retain,
+ "objc_retain");
+}
+
+/// Retain the given block, with _Block_copy semantics.
+/// call i8* \@objc_retainBlock(i8* %value)
+///
+/// \param mandatory - If false, emit the call with metadata
+/// indicating that it's okay for the optimizer to eliminate this call
+/// if it can prove that the block never escapes except down the stack.
+llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value,
+ bool mandatory) {
+ llvm::Value *result
+ = emitARCValueOperation(*this, value,
+ CGM.getObjCEntrypoints().objc_retainBlock,
+ "objc_retainBlock");
+
+ // If the copy isn't mandatory, add !clang.arc.copy_on_escape to
+ // tell the optimizer that it doesn't need to do this copy if the
+ // block doesn't escape, where being passed as an argument doesn't
+ // count as escaping.
+ if (!mandatory && isa<llvm::Instruction>(result)) {
+ llvm::CallInst *call
+ = cast<llvm::CallInst>(result->stripPointerCasts());
+ assert(call->getCalledValue() == CGM.getObjCEntrypoints().objc_retainBlock);
+
+ call->setMetadata("clang.arc.copy_on_escape",
+ llvm::MDNode::get(Builder.getContext(), None));
+ }
+
+ return result;
+}
+
+/// Retain the given object which is the result of a function call.
+/// call i8* \@objc_retainAutoreleasedReturnValue(i8* %value)
+///
+/// Yes, this function name is one character away from a different
+/// call with completely different semantics.
+llvm::Value *
+CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
+ // Fetch the void(void) inline asm which marks that we're going to
+ // retain the autoreleased return value.
+ llvm::InlineAsm *&marker
+ = CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker;
+ if (!marker) {
+ StringRef assembly
+ = CGM.getTargetCodeGenInfo()
+ .getARCRetainAutoreleasedReturnValueMarker();
+
+ // If we have an empty assembly string, there's nothing to do.
+ if (assembly.empty()) {
+
+ // Otherwise, at -O0, build an inline asm that we're going to call
+ // in a moment.
+ } else if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
+ llvm::FunctionType *type =
+ llvm::FunctionType::get(VoidTy, /*variadic*/false);
+
+ marker = llvm::InlineAsm::get(type, assembly, "", /*sideeffects*/ true);
+
+ // If we're at -O1 and above, we don't want to litter the code
+ // with this marker yet, so leave a breadcrumb for the ARC
+ // optimizer to pick up.
+ } else {
+ llvm::NamedMDNode *metadata =
+ CGM.getModule().getOrInsertNamedMetadata(
+ "clang.arc.retainAutoreleasedReturnValueMarker");
+ assert(metadata->getNumOperands() <= 1);
+ if (metadata->getNumOperands() == 0) {
+ metadata->addOperand(llvm::MDNode::get(
+ getLLVMContext(), llvm::MDString::get(getLLVMContext(), assembly)));
+ }
+ }
+ }
+
+ // Call the marker asm if we made one, which we do only at -O0.
+ if (marker)
+ Builder.CreateCall(marker);
+
+ return emitARCValueOperation(*this, value,
+ CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue,
+ "objc_retainAutoreleasedReturnValue");
+}
+
+/// Release the given object.
+/// call void \@objc_release(i8* %value)
+void CodeGenFunction::EmitARCRelease(llvm::Value *value,
+ ARCPreciseLifetime_t precise) {
+ if (isa<llvm::ConstantPointerNull>(value)) return;
+
+ llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_release;
+ if (!fn) {
+ llvm::FunctionType *fnType =
+ llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false);
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_release");
+ }
+
+ // Cast the argument to 'id'.
+ value = Builder.CreateBitCast(value, Int8PtrTy);
+
+ // Call objc_release.
+ llvm::CallInst *call = EmitNounwindRuntimeCall(fn, value);
+
+ if (precise == ARCImpreciseLifetime) {
+ call->setMetadata("clang.imprecise_release",
+ llvm::MDNode::get(Builder.getContext(), None));
+ }
+}
+
+/// Destroy a __strong variable.
+///
+/// At -O0, emit a call to store 'null' into the address;
+/// instrumenting tools prefer this because the address is exposed,
+/// but it's relatively cumbersome to optimize.
+///
+/// At -O1 and above, just load and call objc_release.
+///
+/// call void \@objc_storeStrong(i8** %addr, i8* null)
+void CodeGenFunction::EmitARCDestroyStrong(Address addr,
+ ARCPreciseLifetime_t precise) {
+ if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
+ llvm::Value *null = getNullForVariable(addr);
+ EmitARCStoreStrongCall(addr, null, /*ignored*/ true);
+ return;
+ }
+
+ llvm::Value *value = Builder.CreateLoad(addr);
+ EmitARCRelease(value, precise);
+}
+
+/// Store into a strong object. Always calls this:
+/// call void \@objc_storeStrong(i8** %addr, i8* %value)
+llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(Address addr,
+ llvm::Value *value,
+ bool ignored) {
+ assert(addr.getElementType() == value->getType());
+
+ llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_storeStrong;
+ if (!fn) {
+ llvm::Type *argTypes[] = { Int8PtrPtrTy, Int8PtrTy };
+ llvm::FunctionType *fnType
+ = llvm::FunctionType::get(Builder.getVoidTy(), argTypes, false);
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_storeStrong");
+ }
+
+ llvm::Value *args[] = {
+ Builder.CreateBitCast(addr.getPointer(), Int8PtrPtrTy),
+ Builder.CreateBitCast(value, Int8PtrTy)
+ };
+ EmitNounwindRuntimeCall(fn, args);
+
+ if (ignored) return nullptr;
+ return value;
+}
+
+/// Store into a strong object. Sometimes calls this:
+/// call void \@objc_storeStrong(i8** %addr, i8* %value)
+/// Other times, breaks it down into components.
+llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst,
+ llvm::Value *newValue,
+ bool ignored) {
+ QualType type = dst.getType();
+ bool isBlock = type->isBlockPointerType();
+
+ // Use a store barrier at -O0 unless this is a block type or the
+ // lvalue is inadequately aligned.
+ if (shouldUseFusedARCCalls() &&
+ !isBlock &&
+ (dst.getAlignment().isZero() ||
+ dst.getAlignment() >= CharUnits::fromQuantity(PointerAlignInBytes))) {
+ return EmitARCStoreStrongCall(dst.getAddress(), newValue, ignored);
+ }
+
+ // Otherwise, split it out.
+
+ // Retain the new value.
+ newValue = EmitARCRetain(type, newValue);
+
+ // Read the old value.
+ llvm::Value *oldValue = EmitLoadOfScalar(dst, SourceLocation());
+
+ // Store. We do this before the release so that any deallocs won't
+ // see the old value.
+ EmitStoreOfScalar(newValue, dst);
+
+ // Finally, release the old value.
+ EmitARCRelease(oldValue, dst.isARCPreciseLifetime());
+
+ return newValue;
+}
+
+/// Autorelease the given object.
+/// call i8* \@objc_autorelease(i8* %value)
+llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getObjCEntrypoints().objc_autorelease,
+ "objc_autorelease");
+}
+
+/// Autorelease the given object.
+/// call i8* \@objc_autoreleaseReturnValue(i8* %value)
+llvm::Value *
+CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getObjCEntrypoints().objc_autoreleaseReturnValue,
+ "objc_autoreleaseReturnValue",
+ /*isTailCall*/ true);
+}
+
+/// Do a fused retain/autorelease of the given object.
+/// call i8* \@objc_retainAutoreleaseReturnValue(i8* %value)
+llvm::Value *
+CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getObjCEntrypoints().objc_retainAutoreleaseReturnValue,
+ "objc_retainAutoreleaseReturnValue",
+ /*isTailCall*/ true);
+}
+
+/// Do a fused retain/autorelease of the given object.
+/// call i8* \@objc_retainAutorelease(i8* %value)
+/// or
+/// %retain = call i8* \@objc_retainBlock(i8* %value)
+/// call i8* \@objc_autorelease(i8* %retain)
+llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type,
+ llvm::Value *value) {
+ if (!type->isBlockPointerType())
+ return EmitARCRetainAutoreleaseNonBlock(value);
+
+ if (isa<llvm::ConstantPointerNull>(value)) return value;
+
+ llvm::Type *origType = value->getType();
+ value = Builder.CreateBitCast(value, Int8PtrTy);
+ value = EmitARCRetainBlock(value, /*mandatory*/ true);
+ value = EmitARCAutorelease(value);
+ return Builder.CreateBitCast(value, origType);
+}
+
+/// Do a fused retain/autorelease of the given object.
+/// call i8* \@objc_retainAutorelease(i8* %value)
+llvm::Value *
+CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getObjCEntrypoints().objc_retainAutorelease,
+ "objc_retainAutorelease");
+}
+
+/// i8* \@objc_loadWeak(i8** %addr)
+/// Essentially objc_autorelease(objc_loadWeakRetained(addr)).
+llvm::Value *CodeGenFunction::EmitARCLoadWeak(Address addr) {
+ return emitARCLoadOperation(*this, addr,
+ CGM.getObjCEntrypoints().objc_loadWeak,
+ "objc_loadWeak");
+}
+
+/// i8* \@objc_loadWeakRetained(i8** %addr)
+llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(Address addr) {
+ return emitARCLoadOperation(*this, addr,
+ CGM.getObjCEntrypoints().objc_loadWeakRetained,
+ "objc_loadWeakRetained");
+}
+
+/// i8* \@objc_storeWeak(i8** %addr, i8* %value)
+/// Returns %value.
+llvm::Value *CodeGenFunction::EmitARCStoreWeak(Address addr,
+ llvm::Value *value,
+ bool ignored) {
+ return emitARCStoreOperation(*this, addr, value,
+ CGM.getObjCEntrypoints().objc_storeWeak,
+ "objc_storeWeak", ignored);
+}
+
+/// i8* \@objc_initWeak(i8** %addr, i8* %value)
+/// Returns %value. %addr is known to not have a current weak entry.
+/// Essentially equivalent to:
+/// *addr = nil; objc_storeWeak(addr, value);
+void CodeGenFunction::EmitARCInitWeak(Address addr, llvm::Value *value) {
+ // If we're initializing to null, just write null to memory; no need
+ // to get the runtime involved. But don't do this if optimization
+ // is enabled, because accounting for this would make the optimizer
+ // much more complicated.
+ if (isa<llvm::ConstantPointerNull>(value) &&
+ CGM.getCodeGenOpts().OptimizationLevel == 0) {
+ Builder.CreateStore(value, addr);
+ return;
+ }
+
+ emitARCStoreOperation(*this, addr, value,
+ CGM.getObjCEntrypoints().objc_initWeak,
+ "objc_initWeak", /*ignored*/ true);
+}
+
+/// void \@objc_destroyWeak(i8** %addr)
+/// Essentially objc_storeWeak(addr, nil).
+void CodeGenFunction::EmitARCDestroyWeak(Address addr) {
+ llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_destroyWeak;
+ if (!fn) {
+ llvm::FunctionType *fnType =
+ llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrPtrTy, false);
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_destroyWeak");
+ }
+
+ // Cast the argument to 'id*'.
+ addr = Builder.CreateBitCast(addr, Int8PtrPtrTy);
+
+ EmitNounwindRuntimeCall(fn, addr.getPointer());
+}
+
+/// void \@objc_moveWeak(i8** %dest, i8** %src)
+/// Disregards the current value in %dest. Leaves %src pointing to nothing.
+/// Essentially (objc_copyWeak(dest, src), objc_destroyWeak(src)).
+void CodeGenFunction::EmitARCMoveWeak(Address dst, Address src) {
+ emitARCCopyOperation(*this, dst, src,
+ CGM.getObjCEntrypoints().objc_moveWeak,
+ "objc_moveWeak");
+}
+
+/// void \@objc_copyWeak(i8** %dest, i8** %src)
+/// Disregards the current value in %dest. Essentially
+/// objc_release(objc_initWeak(dest, objc_readWeakRetained(src)))
+void CodeGenFunction::EmitARCCopyWeak(Address dst, Address src) {
+ emitARCCopyOperation(*this, dst, src,
+ CGM.getObjCEntrypoints().objc_copyWeak,
+ "objc_copyWeak");
+}
+
+/// Produce the code to do a objc_autoreleasepool_push.
+/// call i8* \@objc_autoreleasePoolPush(void)
+llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() {
+ llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPush;
+ if (!fn) {
+ llvm::FunctionType *fnType =
+ llvm::FunctionType::get(Int8PtrTy, false);
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPush");
+ }
+
+ return EmitNounwindRuntimeCall(fn);
+}
+
+/// Produce the code to do a primitive release.
+/// call void \@objc_autoreleasePoolPop(i8* %ptr)
+void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) {
+ assert(value->getType() == Int8PtrTy);
+
+ llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPop;
+ if (!fn) {
+ llvm::FunctionType *fnType =
+ llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false);
+
+ // We don't want to use a weak import here; instead we should not
+ // fall into this path.
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPop");
+ }
+
+ // objc_autoreleasePoolPop can throw.
+ EmitRuntimeCallOrInvoke(fn, value);
+}
+
+/// Produce the code to do an MRR version objc_autoreleasepool_push.
+/// Which is: [[NSAutoreleasePool alloc] init];
+/// Where alloc is declared as: + (id) alloc; in NSAutoreleasePool class.
+/// init is declared as: - (id) init; in its NSObject super class.
+///
+llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() {
+ CGObjCRuntime &Runtime = CGM.getObjCRuntime();
+ llvm::Value *Receiver = Runtime.EmitNSAutoreleasePoolClassRef(*this);
+ // [NSAutoreleasePool alloc]
+ IdentifierInfo *II = &CGM.getContext().Idents.get("alloc");
+ Selector AllocSel = getContext().Selectors.getSelector(0, &II);
+ CallArgList Args;
+ RValue AllocRV =
+ Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().getObjCIdType(),
+ AllocSel, Receiver, Args);
+
+ // [Receiver init]
+ Receiver = AllocRV.getScalarVal();
+ II = &CGM.getContext().Idents.get("init");
+ Selector InitSel = getContext().Selectors.getSelector(0, &II);
+ RValue InitRV =
+ Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().getObjCIdType(),
+ InitSel, Receiver, Args);
+ return InitRV.getScalarVal();
+}
+
+/// Produce the code to do a primitive release.
+/// [tmp drain];
+void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) {
+ IdentifierInfo *II = &CGM.getContext().Idents.get("drain");
+ Selector DrainSel = getContext().Selectors.getSelector(0, &II);
+ CallArgList Args;
+ CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().VoidTy, DrainSel, Arg, Args);
+}
+
+void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF,
+ Address addr,
+ QualType type) {
+ CGF.EmitARCDestroyStrong(addr, ARCPreciseLifetime);
+}
+
+void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF,
+ Address addr,
+ QualType type) {
+ CGF.EmitARCDestroyStrong(addr, ARCImpreciseLifetime);
+}
+
+void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF,
+ Address addr,
+ QualType type) {
+ CGF.EmitARCDestroyWeak(addr);
+}
+
+namespace {
+ struct CallObjCAutoreleasePoolObject final : EHScopeStack::Cleanup {
+ llvm::Value *Token;
+
+ CallObjCAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ CGF.EmitObjCAutoreleasePoolPop(Token);
+ }
+ };
+ struct CallObjCMRRAutoreleasePoolObject final : EHScopeStack::Cleanup {
+ llvm::Value *Token;
+
+ CallObjCMRRAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ CGF.EmitObjCMRRAutoreleasePoolPop(Token);
+ }
+ };
+}
+
+void CodeGenFunction::EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr) {
+ if (CGM.getLangOpts().ObjCAutoRefCount)
+ EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, Ptr);
+ else
+ EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, Ptr);
+}
+
+static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
+ LValue lvalue,
+ QualType type) {
+ switch (type.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Autoreleasing:
+ return TryEmitResult(CGF.EmitLoadOfLValue(lvalue,
+ SourceLocation()).getScalarVal(),
+ false);
+
+ case Qualifiers::OCL_Weak:
+ return TryEmitResult(CGF.EmitARCLoadWeakRetained(lvalue.getAddress()),
+ true);
+ }
+
+ llvm_unreachable("impossible lifetime!");
+}
+
+static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
+ const Expr *e) {
+ e = e->IgnoreParens();
+ QualType type = e->getType();
+
+ // If we're loading retained from a __strong xvalue, we can avoid
+ // an extra retain/release pair by zeroing out the source of this
+ // "move" operation.
+ if (e->isXValue() &&
+ !type.isConstQualified() &&
+ type.getObjCLifetime() == Qualifiers::OCL_Strong) {
+ // Emit the lvalue.
+ LValue lv = CGF.EmitLValue(e);
+
+ // Load the object pointer.
+ llvm::Value *result = CGF.EmitLoadOfLValue(lv,
+ SourceLocation()).getScalarVal();
+
+ // Set the source pointer to NULL.
+ CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress()), lv);
+
+ return TryEmitResult(result, true);
+ }
+
+ // As a very special optimization, in ARC++, if the l-value is the
+ // result of a non-volatile assignment, do a simple retain of the
+ // result of the call to objc_storeWeak instead of reloading.
+ if (CGF.getLangOpts().CPlusPlus &&
+ !type.isVolatileQualified() &&
+ type.getObjCLifetime() == Qualifiers::OCL_Weak &&
+ isa<BinaryOperator>(e) &&
+ cast<BinaryOperator>(e)->getOpcode() == BO_Assign)
+ return TryEmitResult(CGF.EmitScalarExpr(e), false);
+
+ return tryEmitARCRetainLoadOfScalar(CGF, CGF.EmitLValue(e), type);
+}
+
+static llvm::Value *emitARCRetainAfterCall(CodeGenFunction &CGF,
+ llvm::Value *value);
+
+/// Given that the given expression is some sort of call (which does
+/// not return retained), emit a retain following it.
+static llvm::Value *emitARCRetainCall(CodeGenFunction &CGF, const Expr *e) {
+ llvm::Value *value = CGF.EmitScalarExpr(e);
+ return emitARCRetainAfterCall(CGF, value);
+}
+
+static llvm::Value *emitARCRetainAfterCall(CodeGenFunction &CGF,
+ llvm::Value *value) {
+ if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(value)) {
+ CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
+
+ // Place the retain immediately following the call.
+ CGF.Builder.SetInsertPoint(call->getParent(),
+ ++llvm::BasicBlock::iterator(call));
+ value = CGF.EmitARCRetainAutoreleasedReturnValue(value);
+
+ CGF.Builder.restoreIP(ip);
+ return value;
+ } else if (llvm::InvokeInst *invoke = dyn_cast<llvm::InvokeInst>(value)) {
+ CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
+
+ // Place the retain at the beginning of the normal destination block.
+ llvm::BasicBlock *BB = invoke->getNormalDest();
+ CGF.Builder.SetInsertPoint(BB, BB->begin());
+ value = CGF.EmitARCRetainAutoreleasedReturnValue(value);
+
+ CGF.Builder.restoreIP(ip);
+ return value;
+
+ // Bitcasts can arise because of related-result returns. Rewrite
+ // the operand.
+ } else if (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(value)) {
+ llvm::Value *operand = bitcast->getOperand(0);
+ operand = emitARCRetainAfterCall(CGF, operand);
+ bitcast->setOperand(0, operand);
+ return bitcast;
+
+ // Generic fall-back case.
+ } else {
+ // Retain using the non-block variant: we never need to do a copy
+ // of a block that's been returned to us.
+ return CGF.EmitARCRetainNonBlock(value);
+ }
+}
+
+/// Determine whether it might be important to emit a separate
+/// objc_retain_block on the result of the given expression, or
+/// whether it's okay to just emit it in a +1 context.
+static bool shouldEmitSeparateBlockRetain(const Expr *e) {
+ assert(e->getType()->isBlockPointerType());
+ e = e->IgnoreParens();
+
+ // For future goodness, emit block expressions directly in +1
+ // contexts if we can.
+ if (isa<BlockExpr>(e))
+ return false;
+
+ if (const CastExpr *cast = dyn_cast<CastExpr>(e)) {
+ switch (cast->getCastKind()) {
+ // Emitting these operations in +1 contexts is goodness.
+ case CK_LValueToRValue:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCProduceObject:
+ return false;
+
+ // These operations preserve a block type.
+ case CK_NoOp:
+ case CK_BitCast:
+ return shouldEmitSeparateBlockRetain(cast->getSubExpr());
+
+ // These operations are known to be bad (or haven't been considered).
+ case CK_AnyPointerToBlockPointerCast:
+ default:
+ return true;
+ }
+ }
+
+ return true;
+}
+
+/// Try to emit a PseudoObjectExpr at +1.
+///
+/// This massively duplicates emitPseudoObjectRValue.
+static TryEmitResult tryEmitARCRetainPseudoObject(CodeGenFunction &CGF,
+ const PseudoObjectExpr *E) {
+ SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
+
+ // Find the result expression.
+ const Expr *resultExpr = E->getResultExpr();
+ assert(resultExpr);
+ TryEmitResult result;
+
+ for (PseudoObjectExpr::const_semantics_iterator
+ i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
+ const Expr *semantic = *i;
+
+ // If this semantic expression is an opaque value, bind it
+ // to the result of its source expression.
+ if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
+ typedef CodeGenFunction::OpaqueValueMappingData OVMA;
+ OVMA opaqueData;
+
+ // If this semantic is the result of the pseudo-object
+ // expression, try to evaluate the source as +1.
+ if (ov == resultExpr) {
+ assert(!OVMA::shouldBindAsLValue(ov));
+ result = tryEmitARCRetainScalarExpr(CGF, ov->getSourceExpr());
+ opaqueData = OVMA::bind(CGF, ov, RValue::get(result.getPointer()));
+
+ // Otherwise, just bind it.
+ } else {
+ opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
+ }
+ opaques.push_back(opaqueData);
+
+ // Otherwise, if the expression is the result, evaluate it
+ // and remember the result.
+ } else if (semantic == resultExpr) {
+ result = tryEmitARCRetainScalarExpr(CGF, semantic);
+
+ // Otherwise, evaluate the expression in an ignored context.
+ } else {
+ CGF.EmitIgnoredExpr(semantic);
+ }
+ }
+
+ // Unbind all the opaques now.
+ for (unsigned i = 0, e = opaques.size(); i != e; ++i)
+ opaques[i].unbind(CGF);
+
+ return result;
+}
+
+static TryEmitResult
+tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) {
+ // We should *never* see a nested full-expression here, because if
+ // we fail to emit at +1, our caller must not retain after we close
+ // out the full-expression.
+ assert(!isa<ExprWithCleanups>(e));
+
+ // The desired result type, if it differs from the type of the
+ // ultimate opaque expression.
+ llvm::Type *resultType = nullptr;
+
+ while (true) {
+ e = e->IgnoreParens();
+
+ // There's a break at the end of this if-chain; anything
+ // that wants to keep looping has to explicitly continue.
+ if (const CastExpr *ce = dyn_cast<CastExpr>(e)) {
+ switch (ce->getCastKind()) {
+ // No-op casts don't change the type, so we just ignore them.
+ case CK_NoOp:
+ e = ce->getSubExpr();
+ continue;
+
+ case CK_LValueToRValue: {
+ TryEmitResult loadResult
+ = tryEmitARCRetainLoadOfScalar(CGF, ce->getSubExpr());
+ if (resultType) {
+ llvm::Value *value = loadResult.getPointer();
+ value = CGF.Builder.CreateBitCast(value, resultType);
+ loadResult.setPointer(value);
+ }
+ return loadResult;
+ }
+
+ // These casts can change the type, so remember that and
+ // soldier on. We only need to remember the outermost such
+ // cast, though.
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_BitCast:
+ if (!resultType)
+ resultType = CGF.ConvertType(ce->getType());
+ e = ce->getSubExpr();
+ assert(e->getType()->hasPointerRepresentation());
+ continue;
+
+ // For consumptions, just emit the subexpression and thus elide
+ // the retain/release pair.
+ case CK_ARCConsumeObject: {
+ llvm::Value *result = CGF.EmitScalarExpr(ce->getSubExpr());
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, true);
+ }
+
+ // Block extends are net +0. Naively, we could just recurse on
+ // the subexpression, but actually we need to ensure that the
+ // value is copied as a block, so there's a little filter here.
+ case CK_ARCExtendBlockObject: {
+ llvm::Value *result; // will be a +0 value
+
+ // If we can't safely assume the sub-expression will produce a
+ // block-copied value, emit the sub-expression at +0.
+ if (shouldEmitSeparateBlockRetain(ce->getSubExpr())) {
+ result = CGF.EmitScalarExpr(ce->getSubExpr());
+
+ // Otherwise, try to emit the sub-expression at +1 recursively.
+ } else {
+ TryEmitResult subresult
+ = tryEmitARCRetainScalarExpr(CGF, ce->getSubExpr());
+ result = subresult.getPointer();
+
+ // If that produced a retained value, just use that,
+ // possibly casting down.
+ if (subresult.getInt()) {
+ if (resultType)
+ result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, true);
+ }
+
+ // Otherwise it's +0.
+ }
+
+ // Retain the object as a block, then cast down.
+ result = CGF.EmitARCRetainBlock(result, /*mandatory*/ true);
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, true);
+ }
+
+ // For reclaims, emit the subexpression as a retained call and
+ // skip the consumption.
+ case CK_ARCReclaimReturnedObject: {
+ llvm::Value *result = emitARCRetainCall(CGF, ce->getSubExpr());
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, true);
+ }
+
+ default:
+ break;
+ }
+
+ // Skip __extension__.
+ } else if (const UnaryOperator *op = dyn_cast<UnaryOperator>(e)) {
+ if (op->getOpcode() == UO_Extension) {
+ e = op->getSubExpr();
+ continue;
+ }
+
+ // For calls and message sends, use the retained-call logic.
+ // Delegate inits are a special case in that they're the only
+ // returns-retained expression that *isn't* surrounded by
+ // a consume.
+ } else if (isa<CallExpr>(e) ||
+ (isa<ObjCMessageExpr>(e) &&
+ !cast<ObjCMessageExpr>(e)->isDelegateInitCall())) {
+ llvm::Value *result = emitARCRetainCall(CGF, e);
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, true);
+
+ // Look through pseudo-object expressions.
+ } else if (const PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) {
+ TryEmitResult result
+ = tryEmitARCRetainPseudoObject(CGF, pseudo);
+ if (resultType) {
+ llvm::Value *value = result.getPointer();
+ value = CGF.Builder.CreateBitCast(value, resultType);
+ result.setPointer(value);
+ }
+ return result;
+ }
+
+ // Conservatively halt the search at any other expression kind.
+ break;
+ }
+
+ // We didn't find an obvious production, so emit what we've got and
+ // tell the caller that we didn't manage to retain.
+ llvm::Value *result = CGF.EmitScalarExpr(e);
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, false);
+}
+
+static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
+ LValue lvalue,
+ QualType type) {
+ TryEmitResult result = tryEmitARCRetainLoadOfScalar(CGF, lvalue, type);
+ llvm::Value *value = result.getPointer();
+ if (!result.getInt())
+ value = CGF.EmitARCRetain(type, value);
+ return value;
+}
+
+/// EmitARCRetainScalarExpr - Semantically equivalent to
+/// EmitARCRetainObject(e->getType(), EmitScalarExpr(e)), but making a
+/// best-effort attempt to peephole expressions that naturally produce
+/// retained objects.
+llvm::Value *CodeGenFunction::EmitARCRetainScalarExpr(const Expr *e) {
+ // The retain needs to happen within the full-expression.
+ if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
+ enterFullExpression(cleanups);
+ RunCleanupsScope scope(*this);
+ return EmitARCRetainScalarExpr(cleanups->getSubExpr());
+ }
+
+ TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e);
+ llvm::Value *value = result.getPointer();
+ if (!result.getInt())
+ value = EmitARCRetain(e->getType(), value);
+ return value;
+}
+
+llvm::Value *
+CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) {
+ // The retain needs to happen within the full-expression.
+ if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
+ enterFullExpression(cleanups);
+ RunCleanupsScope scope(*this);
+ return EmitARCRetainAutoreleaseScalarExpr(cleanups->getSubExpr());
+ }
+
+ TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e);
+ llvm::Value *value = result.getPointer();
+ if (result.getInt())
+ value = EmitARCAutorelease(value);
+ else
+ value = EmitARCRetainAutorelease(e->getType(), value);
+ return value;
+}
+
+llvm::Value *CodeGenFunction::EmitARCExtendBlockObject(const Expr *e) {
+ llvm::Value *result;
+ bool doRetain;
+
+ if (shouldEmitSeparateBlockRetain(e)) {
+ result = EmitScalarExpr(e);
+ doRetain = true;
+ } else {
+ TryEmitResult subresult = tryEmitARCRetainScalarExpr(*this, e);
+ result = subresult.getPointer();
+ doRetain = !subresult.getInt();
+ }
+
+ if (doRetain)
+ result = EmitARCRetainBlock(result, /*mandatory*/ true);
+ return EmitObjCConsumeObject(e->getType(), result);
+}
+
+llvm::Value *CodeGenFunction::EmitObjCThrowOperand(const Expr *expr) {
+ // In ARC, retain and autorelease the expression.
+ if (getLangOpts().ObjCAutoRefCount) {
+ // Do so before running any cleanups for the full-expression.
+ // EmitARCRetainAutoreleaseScalarExpr does this for us.
+ return EmitARCRetainAutoreleaseScalarExpr(expr);
+ }
+
+ // Otherwise, use the normal scalar-expression emission. The
+ // exception machinery doesn't do anything special with the
+ // exception like retaining it, so there's no safety associated with
+ // only running cleanups after the throw has started, and when it
+ // matters it tends to be substantially inferior code.
+ return EmitScalarExpr(expr);
+}
+
+std::pair<LValue,llvm::Value*>
+CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e,
+ bool ignored) {
+ // Evaluate the RHS first.
+ TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e->getRHS());
+ llvm::Value *value = result.getPointer();
+
+ bool hasImmediateRetain = result.getInt();
+
+ // If we didn't emit a retained object, and the l-value is of block
+ // type, then we need to emit the block-retain immediately in case
+ // it invalidates the l-value.
+ if (!hasImmediateRetain && e->getType()->isBlockPointerType()) {
+ value = EmitARCRetainBlock(value, /*mandatory*/ false);
+ hasImmediateRetain = true;
+ }
+
+ LValue lvalue = EmitLValue(e->getLHS());
+
+ // If the RHS was emitted retained, expand this.
+ if (hasImmediateRetain) {
+ llvm::Value *oldValue = EmitLoadOfScalar(lvalue, SourceLocation());
+ EmitStoreOfScalar(value, lvalue);
+ EmitARCRelease(oldValue, lvalue.isARCPreciseLifetime());
+ } else {
+ value = EmitARCStoreStrong(lvalue, value, ignored);
+ }
+
+ return std::pair<LValue,llvm::Value*>(lvalue, value);
+}
+
+std::pair<LValue,llvm::Value*>
+CodeGenFunction::EmitARCStoreAutoreleasing(const BinaryOperator *e) {
+ llvm::Value *value = EmitARCRetainAutoreleaseScalarExpr(e->getRHS());
+ LValue lvalue = EmitLValue(e->getLHS());
+
+ EmitStoreOfScalar(value, lvalue);
+
+ return std::pair<LValue,llvm::Value*>(lvalue, value);
+}
+
+void CodeGenFunction::EmitObjCAutoreleasePoolStmt(
+ const ObjCAutoreleasePoolStmt &ARPS) {
+ const Stmt *subStmt = ARPS.getSubStmt();
+ const CompoundStmt &S = cast<CompoundStmt>(*subStmt);
+
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI)
+ DI->EmitLexicalBlockStart(Builder, S.getLBracLoc());
+
+ // Keep track of the current cleanup stack depth.
+ RunCleanupsScope Scope(*this);
+ if (CGM.getLangOpts().ObjCRuntime.hasNativeARC()) {
+ llvm::Value *token = EmitObjCAutoreleasePoolPush();
+ EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, token);
+ } else {
+ llvm::Value *token = EmitObjCMRRAutoreleasePoolPush();
+ EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, token);
+ }
+
+ for (const auto *I : S.body())
+ EmitStmt(I);
+
+ if (DI)
+ DI->EmitLexicalBlockEnd(Builder, S.getRBracLoc());
+}
+
+/// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
+/// make sure it survives garbage collection until this point.
+void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) {
+ // We just use an inline assembly.
+ llvm::FunctionType *extenderType
+ = llvm::FunctionType::get(VoidTy, VoidPtrTy, RequiredArgs::All);
+ llvm::Value *extender
+ = llvm::InlineAsm::get(extenderType,
+ /* assembly */ "",
+ /* constraints */ "r",
+ /* side effects */ true);
+
+ object = Builder.CreateBitCast(object, VoidPtrTy);
+ EmitNounwindRuntimeCall(extender, object);
+}
+
+/// GenerateObjCAtomicSetterCopyHelperFunction - Given a c++ object type with
+/// non-trivial copy assignment function, produce following helper function.
+/// static void copyHelper(Ty *dest, const Ty *source) { *dest = *source; }
+///
+llvm::Constant *
+CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
+ const ObjCPropertyImplDecl *PID) {
+ if (!getLangOpts().CPlusPlus ||
+ !getLangOpts().ObjCRuntime.hasAtomicCopyHelper())
+ return nullptr;
+ QualType Ty = PID->getPropertyIvarDecl()->getType();
+ if (!Ty->isRecordType())
+ return nullptr;
+ const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
+ return nullptr;
+ llvm::Constant *HelperFn = nullptr;
+ if (hasTrivialSetExpr(PID))
+ return nullptr;
+ assert(PID->getSetterCXXAssignment() && "SetterCXXAssignment - null");
+ if ((HelperFn = CGM.getAtomicSetterHelperFnMap(Ty)))
+ return HelperFn;
+
+ ASTContext &C = getContext();
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__assign_helper_atomic_property_");
+ FunctionDecl *FD = FunctionDecl::Create(C,
+ C.getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(), II, C.VoidTy,
+ nullptr, SC_Static,
+ false,
+ false);
+
+ QualType DestTy = C.getPointerType(Ty);
+ QualType SrcTy = Ty;
+ SrcTy.addConst();
+ SrcTy = C.getPointerType(SrcTy);
+
+ FunctionArgList args;
+ ImplicitParamDecl dstDecl(getContext(), FD, SourceLocation(), nullptr,DestTy);
+ args.push_back(&dstDecl);
+ ImplicitParamDecl srcDecl(getContext(), FD, SourceLocation(), nullptr, SrcTy);
+ args.push_back(&srcDecl);
+
+ const CGFunctionInfo &FI = CGM.getTypes().arrangeFreeFunctionDeclaration(
+ C.VoidTy, args, FunctionType::ExtInfo(), RequiredArgs::All);
+
+ llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__assign_helper_atomic_property_",
+ &CGM.getModule());
+
+ CGM.SetInternalFunctionAttributes(nullptr, Fn, FI);
+
+ StartFunction(FD, C.VoidTy, Fn, FI, args);
+
+ DeclRefExpr DstExpr(&dstDecl, false, DestTy,
+ VK_RValue, SourceLocation());
+ UnaryOperator DST(&DstExpr, UO_Deref, DestTy->getPointeeType(),
+ VK_LValue, OK_Ordinary, SourceLocation());
+
+ DeclRefExpr SrcExpr(&srcDecl, false, SrcTy,
+ VK_RValue, SourceLocation());
+ UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
+ VK_LValue, OK_Ordinary, SourceLocation());
+
+ Expr *Args[2] = { &DST, &SRC };
+ CallExpr *CalleeExp = cast<CallExpr>(PID->getSetterCXXAssignment());
+ CXXOperatorCallExpr TheCall(C, OO_Equal, CalleeExp->getCallee(),
+ Args, DestTy->getPointeeType(),
+ VK_LValue, SourceLocation(), false);
+
+ EmitStmt(&TheCall);
+
+ FinishFunction();
+ HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
+ CGM.setAtomicSetterHelperFnMap(Ty, HelperFn);
+ return HelperFn;
+}
+
+llvm::Constant *
+CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
+ const ObjCPropertyImplDecl *PID) {
+ if (!getLangOpts().CPlusPlus ||
+ !getLangOpts().ObjCRuntime.hasAtomicCopyHelper())
+ return nullptr;
+ const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ QualType Ty = PD->getType();
+ if (!Ty->isRecordType())
+ return nullptr;
+ if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
+ return nullptr;
+ llvm::Constant *HelperFn = nullptr;
+
+ if (hasTrivialGetExpr(PID))
+ return nullptr;
+ assert(PID->getGetterCXXConstructor() && "getGetterCXXConstructor - null");
+ if ((HelperFn = CGM.getAtomicGetterHelperFnMap(Ty)))
+ return HelperFn;
+
+
+ ASTContext &C = getContext();
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__copy_helper_atomic_property_");
+ FunctionDecl *FD = FunctionDecl::Create(C,
+ C.getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(), II, C.VoidTy,
+ nullptr, SC_Static,
+ false,
+ false);
+
+ QualType DestTy = C.getPointerType(Ty);
+ QualType SrcTy = Ty;
+ SrcTy.addConst();
+ SrcTy = C.getPointerType(SrcTy);
+
+ FunctionArgList args;
+ ImplicitParamDecl dstDecl(getContext(), FD, SourceLocation(), nullptr,DestTy);
+ args.push_back(&dstDecl);
+ ImplicitParamDecl srcDecl(getContext(), FD, SourceLocation(), nullptr, SrcTy);
+ args.push_back(&srcDecl);
+
+ const CGFunctionInfo &FI = CGM.getTypes().arrangeFreeFunctionDeclaration(
+ C.VoidTy, args, FunctionType::ExtInfo(), RequiredArgs::All);
+
+ llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__copy_helper_atomic_property_", &CGM.getModule());
+
+ CGM.SetInternalFunctionAttributes(nullptr, Fn, FI);
+
+ StartFunction(FD, C.VoidTy, Fn, FI, args);
+
+ DeclRefExpr SrcExpr(&srcDecl, false, SrcTy,
+ VK_RValue, SourceLocation());
+
+ UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
+ VK_LValue, OK_Ordinary, SourceLocation());
+
+ CXXConstructExpr *CXXConstExpr =
+ cast<CXXConstructExpr>(PID->getGetterCXXConstructor());
+
+ SmallVector<Expr*, 4> ConstructorArgs;
+ ConstructorArgs.push_back(&SRC);
+ ConstructorArgs.append(std::next(CXXConstExpr->arg_begin()),
+ CXXConstExpr->arg_end());
+
+ CXXConstructExpr *TheCXXConstructExpr =
+ CXXConstructExpr::Create(C, Ty, SourceLocation(),
+ CXXConstExpr->getConstructor(),
+ CXXConstExpr->isElidable(),
+ ConstructorArgs,
+ CXXConstExpr->hadMultipleCandidates(),
+ CXXConstExpr->isListInitialization(),
+ CXXConstExpr->isStdInitListInitialization(),
+ CXXConstExpr->requiresZeroInitialization(),
+ CXXConstExpr->getConstructionKind(),
+ SourceRange());
+
+ DeclRefExpr DstExpr(&dstDecl, false, DestTy,
+ VK_RValue, SourceLocation());
+
+ RValue DV = EmitAnyExpr(&DstExpr);
+ CharUnits Alignment
+ = getContext().getTypeAlignInChars(TheCXXConstructExpr->getType());
+ EmitAggExpr(TheCXXConstructExpr,
+ AggValueSlot::forAddr(Address(DV.getScalarVal(), Alignment),
+ Qualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
+
+ FinishFunction();
+ HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
+ CGM.setAtomicGetterHelperFnMap(Ty, HelperFn);
+ return HelperFn;
+}
+
+llvm::Value *
+CodeGenFunction::EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty) {
+ // Get selectors for retain/autorelease.
+ IdentifierInfo *CopyID = &getContext().Idents.get("copy");
+ Selector CopySelector =
+ getContext().Selectors.getNullarySelector(CopyID);
+ IdentifierInfo *AutoreleaseID = &getContext().Idents.get("autorelease");
+ Selector AutoreleaseSelector =
+ getContext().Selectors.getNullarySelector(AutoreleaseID);
+
+ // Emit calls to retain/autorelease.
+ CGObjCRuntime &Runtime = CGM.getObjCRuntime();
+ llvm::Value *Val = Block;
+ RValue Result;
+ Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ Ty, CopySelector,
+ Val, CallArgList(), nullptr, nullptr);
+ Val = Result.getScalarVal();
+ Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ Ty, AutoreleaseSelector,
+ Val, CallArgList(), nullptr, nullptr);
+ Val = Result.getScalarVal();
+ return Val;
+}
+
+
+CGObjCRuntime::~CGObjCRuntime() {}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp
new file mode 100644
index 0000000..f0af3e9
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -0,0 +1,2896 @@
+//===------- CGObjCGNU.cpp - Emit LLVM Code from ASTs for a Module --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides Objective-C code generation targeting the GNU runtime. The
+// class in this file generates structures used by the GNU Objective-C runtime
+// library. These structures are defined in objc/objc.h and objc/objc-api.h in
+// the GNU runtime distribution.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGObjCRuntime.h"
+#include "CGCleanup.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/Compiler.h"
+#include <cstdarg>
+
+
+using namespace clang;
+using namespace CodeGen;
+
+
+namespace {
+/// Class that lazily initialises the runtime function. Avoids inserting the
+/// types and the function declaration into a module if they're not used, and
+/// avoids constructing the type more than once if it's used more than once.
+class LazyRuntimeFunction {
+ CodeGenModule *CGM;
+ llvm::FunctionType *FTy;
+ const char *FunctionName;
+ llvm::Constant *Function;
+
+public:
+ /// Constructor leaves this class uninitialized, because it is intended to
+ /// be used as a field in another class and not all of the types that are
+ /// used as arguments will necessarily be available at construction time.
+ LazyRuntimeFunction()
+ : CGM(nullptr), FunctionName(nullptr), Function(nullptr) {}
+
+ /// Initialises the lazy function with the name, return type, and the types
+ /// of the arguments.
+ LLVM_END_WITH_NULL
+ void init(CodeGenModule *Mod, const char *name, llvm::Type *RetTy, ...) {
+ CGM = Mod;
+ FunctionName = name;
+ Function = nullptr;
+ std::vector<llvm::Type *> ArgTys;
+ va_list Args;
+ va_start(Args, RetTy);
+ while (llvm::Type *ArgTy = va_arg(Args, llvm::Type *))
+ ArgTys.push_back(ArgTy);
+ va_end(Args);
+ FTy = llvm::FunctionType::get(RetTy, ArgTys, false);
+ }
+
+ llvm::FunctionType *getType() { return FTy; }
+
+ /// Overloaded cast operator, allows the class to be implicitly cast to an
+ /// LLVM constant.
+ operator llvm::Constant *() {
+ if (!Function) {
+ if (!FunctionName)
+ return nullptr;
+ Function =
+ cast<llvm::Constant>(CGM->CreateRuntimeFunction(FTy, FunctionName));
+ }
+ return Function;
+ }
+ operator llvm::Function *() {
+ return cast<llvm::Function>((llvm::Constant *)*this);
+ }
+};
+
+
+/// GNU Objective-C runtime code generation. This class implements the parts of
+/// Objective-C support that are specific to the GNU family of runtimes (GCC,
+/// GNUstep and ObjFW).
+class CGObjCGNU : public CGObjCRuntime {
+protected:
+ /// The LLVM module into which output is inserted
+ llvm::Module &TheModule;
+ /// strut objc_super. Used for sending messages to super. This structure
+ /// contains the receiver (object) and the expected class.
+ llvm::StructType *ObjCSuperTy;
+ /// struct objc_super*. The type of the argument to the superclass message
+ /// lookup functions.
+ llvm::PointerType *PtrToObjCSuperTy;
+ /// LLVM type for selectors. Opaque pointer (i8*) unless a header declaring
+ /// SEL is included in a header somewhere, in which case it will be whatever
+ /// type is declared in that header, most likely {i8*, i8*}.
+ llvm::PointerType *SelectorTy;
+ /// LLVM i8 type. Cached here to avoid repeatedly getting it in all of the
+ /// places where it's used
+ llvm::IntegerType *Int8Ty;
+ /// Pointer to i8 - LLVM type of char*, for all of the places where the
+ /// runtime needs to deal with C strings.
+ llvm::PointerType *PtrToInt8Ty;
+ /// Instance Method Pointer type. This is a pointer to a function that takes,
+ /// at a minimum, an object and a selector, and is the generic type for
+ /// Objective-C methods. Due to differences between variadic / non-variadic
+ /// calling conventions, it must always be cast to the correct type before
+ /// actually being used.
+ llvm::PointerType *IMPTy;
+ /// Type of an untyped Objective-C object. Clang treats id as a built-in type
+ /// when compiling Objective-C code, so this may be an opaque pointer (i8*),
+ /// but if the runtime header declaring it is included then it may be a
+ /// pointer to a structure.
+ llvm::PointerType *IdTy;
+ /// Pointer to a pointer to an Objective-C object. Used in the new ABI
+ /// message lookup function and some GC-related functions.
+ llvm::PointerType *PtrToIdTy;
+ /// The clang type of id. Used when using the clang CGCall infrastructure to
+ /// call Objective-C methods.
+ CanQualType ASTIdTy;
+ /// LLVM type for C int type.
+ llvm::IntegerType *IntTy;
+ /// LLVM type for an opaque pointer. This is identical to PtrToInt8Ty, but is
+ /// used in the code to document the difference between i8* meaning a pointer
+ /// to a C string and i8* meaning a pointer to some opaque type.
+ llvm::PointerType *PtrTy;
+ /// LLVM type for C long type. The runtime uses this in a lot of places where
+ /// it should be using intptr_t, but we can't fix this without breaking
+ /// compatibility with GCC...
+ llvm::IntegerType *LongTy;
+ /// LLVM type for C size_t. Used in various runtime data structures.
+ llvm::IntegerType *SizeTy;
+ /// LLVM type for C intptr_t.
+ llvm::IntegerType *IntPtrTy;
+ /// LLVM type for C ptrdiff_t. Mainly used in property accessor functions.
+ llvm::IntegerType *PtrDiffTy;
+ /// LLVM type for C int*. Used for GCC-ABI-compatible non-fragile instance
+ /// variables.
+ llvm::PointerType *PtrToIntTy;
+ /// LLVM type for Objective-C BOOL type.
+ llvm::Type *BoolTy;
+ /// 32-bit integer type, to save us needing to look it up every time it's used.
+ llvm::IntegerType *Int32Ty;
+ /// 64-bit integer type, to save us needing to look it up every time it's used.
+ llvm::IntegerType *Int64Ty;
+ /// Metadata kind used to tie method lookups to message sends. The GNUstep
+ /// runtime provides some LLVM passes that can use this to do things like
+ /// automatic IMP caching and speculative inlining.
+ unsigned msgSendMDKind;
+ /// Helper function that generates a constant string and returns a pointer to
+ /// the start of the string. The result of this function can be used anywhere
+ /// where the C code specifies const char*.
+ llvm::Constant *MakeConstantString(const std::string &Str,
+ const std::string &Name="") {
+ ConstantAddress Array = CGM.GetAddrOfConstantCString(Str, Name.c_str());
+ return llvm::ConstantExpr::getGetElementPtr(Array.getElementType(),
+ Array.getPointer(), Zeros);
+ }
+ /// Emits a linkonce_odr string, whose name is the prefix followed by the
+ /// string value. This allows the linker to combine the strings between
+ /// different modules. Used for EH typeinfo names, selector strings, and a
+ /// few other things.
+ llvm::Constant *ExportUniqueString(const std::string &Str,
+ const std::string prefix) {
+ std::string name = prefix + Str;
+ auto *ConstStr = TheModule.getGlobalVariable(name);
+ if (!ConstStr) {
+ llvm::Constant *value = llvm::ConstantDataArray::getString(VMContext,Str);
+ ConstStr = new llvm::GlobalVariable(TheModule, value->getType(), true,
+ llvm::GlobalValue::LinkOnceODRLinkage, value, prefix + Str);
+ }
+ return llvm::ConstantExpr::getGetElementPtr(ConstStr->getValueType(),
+ ConstStr, Zeros);
+ }
+ /// Generates a global structure, initialized by the elements in the vector.
+ /// The element types must match the types of the structure elements in the
+ /// first argument.
+ llvm::GlobalVariable *MakeGlobal(llvm::StructType *Ty,
+ ArrayRef<llvm::Constant *> V,
+ CharUnits Align,
+ StringRef Name="",
+ llvm::GlobalValue::LinkageTypes linkage
+ =llvm::GlobalValue::InternalLinkage) {
+ llvm::Constant *C = llvm::ConstantStruct::get(Ty, V);
+ auto GV = new llvm::GlobalVariable(TheModule, Ty, false,
+ linkage, C, Name);
+ GV->setAlignment(Align.getQuantity());
+ return GV;
+ }
+ /// Generates a global array. The vector must contain the same number of
+ /// elements that the array type declares, of the type specified as the array
+ /// element type.
+ llvm::GlobalVariable *MakeGlobal(llvm::ArrayType *Ty,
+ ArrayRef<llvm::Constant *> V,
+ CharUnits Align,
+ StringRef Name="",
+ llvm::GlobalValue::LinkageTypes linkage
+ =llvm::GlobalValue::InternalLinkage) {
+ llvm::Constant *C = llvm::ConstantArray::get(Ty, V);
+ auto GV = new llvm::GlobalVariable(TheModule, Ty, false,
+ linkage, C, Name);
+ GV->setAlignment(Align.getQuantity());
+ return GV;
+ }
+ /// Generates a global array, inferring the array type from the specified
+ /// element type and the size of the initialiser.
+ llvm::GlobalVariable *MakeGlobalArray(llvm::Type *Ty,
+ ArrayRef<llvm::Constant *> V,
+ CharUnits Align,
+ StringRef Name="",
+ llvm::GlobalValue::LinkageTypes linkage
+ =llvm::GlobalValue::InternalLinkage) {
+ llvm::ArrayType *ArrayTy = llvm::ArrayType::get(Ty, V.size());
+ return MakeGlobal(ArrayTy, V, Align, Name, linkage);
+ }
+ /// Returns a property name and encoding string.
+ llvm::Constant *MakePropertyEncodingString(const ObjCPropertyDecl *PD,
+ const Decl *Container) {
+ const ObjCRuntime &R = CGM.getLangOpts().ObjCRuntime;
+ if ((R.getKind() == ObjCRuntime::GNUstep) &&
+ (R.getVersion() >= VersionTuple(1, 6))) {
+ std::string NameAndAttributes;
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForPropertyDecl(PD, Container, TypeStr);
+ NameAndAttributes += '\0';
+ NameAndAttributes += TypeStr.length() + 3;
+ NameAndAttributes += TypeStr;
+ NameAndAttributes += '\0';
+ NameAndAttributes += PD->getNameAsString();
+ return MakeConstantString(NameAndAttributes);
+ }
+ return MakeConstantString(PD->getNameAsString());
+ }
+ /// Push the property attributes into two structure fields.
+ void PushPropertyAttributes(std::vector<llvm::Constant*> &Fields,
+ ObjCPropertyDecl *property, bool isSynthesized=true, bool
+ isDynamic=true) {
+ int attrs = property->getPropertyAttributes();
+ // For read-only properties, clear the copy and retain flags
+ if (attrs & ObjCPropertyDecl::OBJC_PR_readonly) {
+ attrs &= ~ObjCPropertyDecl::OBJC_PR_copy;
+ attrs &= ~ObjCPropertyDecl::OBJC_PR_retain;
+ attrs &= ~ObjCPropertyDecl::OBJC_PR_weak;
+ attrs &= ~ObjCPropertyDecl::OBJC_PR_strong;
+ }
+ // The first flags field has the same attribute values as clang uses internally
+ Fields.push_back(llvm::ConstantInt::get(Int8Ty, attrs & 0xff));
+ attrs >>= 8;
+ attrs <<= 2;
+ // For protocol properties, synthesized and dynamic have no meaning, so we
+ // reuse these flags to indicate that this is a protocol property (both set
+ // has no meaning, as a property can't be both synthesized and dynamic)
+ attrs |= isSynthesized ? (1<<0) : 0;
+ attrs |= isDynamic ? (1<<1) : 0;
+ // The second field is the next four fields left shifted by two, with the
+ // low bit set to indicate whether the field is synthesized or dynamic.
+ Fields.push_back(llvm::ConstantInt::get(Int8Ty, attrs & 0xff));
+ // Two padding fields
+ Fields.push_back(llvm::ConstantInt::get(Int8Ty, 0));
+ Fields.push_back(llvm::ConstantInt::get(Int8Ty, 0));
+ }
+ /// Ensures that the value has the required type, by inserting a bitcast if
+ /// required. This function lets us avoid inserting bitcasts that are
+ /// redundant.
+ llvm::Value* EnforceType(CGBuilderTy &B, llvm::Value *V, llvm::Type *Ty) {
+ if (V->getType() == Ty) return V;
+ return B.CreateBitCast(V, Ty);
+ }
+ Address EnforceType(CGBuilderTy &B, Address V, llvm::Type *Ty) {
+ if (V.getType() == Ty) return V;
+ return B.CreateBitCast(V, Ty);
+ }
+ // Some zeros used for GEPs in lots of places.
+ llvm::Constant *Zeros[2];
+ /// Null pointer value. Mainly used as a terminator in various arrays.
+ llvm::Constant *NULLPtr;
+ /// LLVM context.
+ llvm::LLVMContext &VMContext;
+private:
+ /// Placeholder for the class. Lots of things refer to the class before we've
+ /// actually emitted it. We use this alias as a placeholder, and then replace
+ /// it with a pointer to the class structure before finally emitting the
+ /// module.
+ llvm::GlobalAlias *ClassPtrAlias;
+ /// Placeholder for the metaclass. Lots of things refer to the class before
+ /// we've / actually emitted it. We use this alias as a placeholder, and then
+ /// replace / it with a pointer to the metaclass structure before finally
+ /// emitting the / module.
+ llvm::GlobalAlias *MetaClassPtrAlias;
+ /// All of the classes that have been generated for this compilation units.
+ std::vector<llvm::Constant*> Classes;
+ /// All of the categories that have been generated for this compilation units.
+ std::vector<llvm::Constant*> Categories;
+ /// All of the Objective-C constant strings that have been generated for this
+ /// compilation units.
+ std::vector<llvm::Constant*> ConstantStrings;
+ /// Map from string values to Objective-C constant strings in the output.
+ /// Used to prevent emitting Objective-C strings more than once. This should
+ /// not be required at all - CodeGenModule should manage this list.
+ llvm::StringMap<llvm::Constant*> ObjCStrings;
+ /// All of the protocols that have been declared.
+ llvm::StringMap<llvm::Constant*> ExistingProtocols;
+ /// For each variant of a selector, we store the type encoding and a
+ /// placeholder value. For an untyped selector, the type will be the empty
+ /// string. Selector references are all done via the module's selector table,
+ /// so we create an alias as a placeholder and then replace it with the real
+ /// value later.
+ typedef std::pair<std::string, llvm::GlobalAlias*> TypedSelector;
+ /// Type of the selector map. This is roughly equivalent to the structure
+ /// used in the GNUstep runtime, which maintains a list of all of the valid
+ /// types for a selector in a table.
+ typedef llvm::DenseMap<Selector, SmallVector<TypedSelector, 2> >
+ SelectorMap;
+ /// A map from selectors to selector types. This allows us to emit all
+ /// selectors of the same name and type together.
+ SelectorMap SelectorTable;
+
+ /// Selectors related to memory management. When compiling in GC mode, we
+ /// omit these.
+ Selector RetainSel, ReleaseSel, AutoreleaseSel;
+ /// Runtime functions used for memory management in GC mode. Note that clang
+ /// supports code generation for calling these functions, but neither GNU
+ /// runtime actually supports this API properly yet.
+ LazyRuntimeFunction IvarAssignFn, StrongCastAssignFn, MemMoveFn, WeakReadFn,
+ WeakAssignFn, GlobalAssignFn;
+
+ typedef std::pair<std::string, std::string> ClassAliasPair;
+ /// All classes that have aliases set for them.
+ std::vector<ClassAliasPair> ClassAliases;
+
+protected:
+ /// Function used for throwing Objective-C exceptions.
+ LazyRuntimeFunction ExceptionThrowFn;
+ /// Function used for rethrowing exceptions, used at the end of \@finally or
+ /// \@synchronize blocks.
+ LazyRuntimeFunction ExceptionReThrowFn;
+ /// Function called when entering a catch function. This is required for
+ /// differentiating Objective-C exceptions and foreign exceptions.
+ LazyRuntimeFunction EnterCatchFn;
+ /// Function called when exiting from a catch block. Used to do exception
+ /// cleanup.
+ LazyRuntimeFunction ExitCatchFn;
+ /// Function called when entering an \@synchronize block. Acquires the lock.
+ LazyRuntimeFunction SyncEnterFn;
+ /// Function called when exiting an \@synchronize block. Releases the lock.
+ LazyRuntimeFunction SyncExitFn;
+
+private:
+
+ /// Function called if fast enumeration detects that the collection is
+ /// modified during the update.
+ LazyRuntimeFunction EnumerationMutationFn;
+ /// Function for implementing synthesized property getters that return an
+ /// object.
+ LazyRuntimeFunction GetPropertyFn;
+ /// Function for implementing synthesized property setters that return an
+ /// object.
+ LazyRuntimeFunction SetPropertyFn;
+ /// Function used for non-object declared property getters.
+ LazyRuntimeFunction GetStructPropertyFn;
+ /// Function used for non-object declared property setters.
+ LazyRuntimeFunction SetStructPropertyFn;
+
+ /// The version of the runtime that this class targets. Must match the
+ /// version in the runtime.
+ int RuntimeVersion;
+ /// The version of the protocol class. Used to differentiate between ObjC1
+ /// and ObjC2 protocols. Objective-C 1 protocols can not contain optional
+ /// components and can not contain declared properties. We always emit
+ /// Objective-C 2 property structures, but we have to pretend that they're
+ /// Objective-C 1 property structures when targeting the GCC runtime or it
+ /// will abort.
+ const int ProtocolVersion;
+private:
+ /// Generates an instance variable list structure. This is a structure
+ /// containing a size and an array of structures containing instance variable
+ /// metadata. This is used purely for introspection in the fragile ABI. In
+ /// the non-fragile ABI, it's used for instance variable fixup.
+ llvm::Constant *GenerateIvarList(ArrayRef<llvm::Constant *> IvarNames,
+ ArrayRef<llvm::Constant *> IvarTypes,
+ ArrayRef<llvm::Constant *> IvarOffsets);
+ /// Generates a method list structure. This is a structure containing a size
+ /// and an array of structures containing method metadata.
+ ///
+ /// This structure is used by both classes and categories, and contains a next
+ /// pointer allowing them to be chained together in a linked list.
+ llvm::Constant *GenerateMethodList(StringRef ClassName,
+ StringRef CategoryName,
+ ArrayRef<Selector> MethodSels,
+ ArrayRef<llvm::Constant *> MethodTypes,
+ bool isClassMethodList);
+ /// Emits an empty protocol. This is used for \@protocol() where no protocol
+ /// is found. The runtime will (hopefully) fix up the pointer to refer to the
+ /// real protocol.
+ llvm::Constant *GenerateEmptyProtocol(const std::string &ProtocolName);
+ /// Generates a list of property metadata structures. This follows the same
+ /// pattern as method and instance variable metadata lists.
+ llvm::Constant *GeneratePropertyList(const ObjCImplementationDecl *OID,
+ SmallVectorImpl<Selector> &InstanceMethodSels,
+ SmallVectorImpl<llvm::Constant*> &InstanceMethodTypes);
+ /// Generates a list of referenced protocols. Classes, categories, and
+ /// protocols all use this structure.
+ llvm::Constant *GenerateProtocolList(ArrayRef<std::string> Protocols);
+ /// To ensure that all protocols are seen by the runtime, we add a category on
+ /// a class defined in the runtime, declaring no methods, but adopting the
+ /// protocols. This is a horribly ugly hack, but it allows us to collect all
+ /// of the protocols without changing the ABI.
+ void GenerateProtocolHolderCategory();
+ /// Generates a class structure.
+ llvm::Constant *GenerateClassStructure(
+ llvm::Constant *MetaClass,
+ llvm::Constant *SuperClass,
+ unsigned info,
+ const char *Name,
+ llvm::Constant *Version,
+ llvm::Constant *InstanceSize,
+ llvm::Constant *IVars,
+ llvm::Constant *Methods,
+ llvm::Constant *Protocols,
+ llvm::Constant *IvarOffsets,
+ llvm::Constant *Properties,
+ llvm::Constant *StrongIvarBitmap,
+ llvm::Constant *WeakIvarBitmap,
+ bool isMeta=false);
+ /// Generates a method list. This is used by protocols to define the required
+ /// and optional methods.
+ llvm::Constant *GenerateProtocolMethodList(
+ ArrayRef<llvm::Constant *> MethodNames,
+ ArrayRef<llvm::Constant *> MethodTypes);
+ /// Returns a selector with the specified type encoding. An empty string is
+ /// used to return an untyped selector (with the types field set to NULL).
+ llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel,
+ const std::string &TypeEncoding);
+ /// Returns the variable used to store the offset of an instance variable.
+ llvm::GlobalVariable *ObjCIvarOffsetVariable(const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar);
+ /// Emits a reference to a class. This allows the linker to object if there
+ /// is no class of the matching name.
+protected:
+ void EmitClassRef(const std::string &className);
+ /// Emits a pointer to the named class
+ virtual llvm::Value *GetClassNamed(CodeGenFunction &CGF,
+ const std::string &Name, bool isWeak);
+ /// Looks up the method for sending a message to the specified object. This
+ /// mechanism differs between the GCC and GNU runtimes, so this method must be
+ /// overridden in subclasses.
+ virtual llvm::Value *LookupIMP(CodeGenFunction &CGF,
+ llvm::Value *&Receiver,
+ llvm::Value *cmd,
+ llvm::MDNode *node,
+ MessageSendInfo &MSI) = 0;
+ /// Looks up the method for sending a message to a superclass. This
+ /// mechanism differs between the GCC and GNU runtimes, so this method must
+ /// be overridden in subclasses.
+ virtual llvm::Value *LookupIMPSuper(CodeGenFunction &CGF,
+ Address ObjCSuper,
+ llvm::Value *cmd,
+ MessageSendInfo &MSI) = 0;
+ /// Libobjc2 uses a bitfield representation where small(ish) bitfields are
+ /// stored in a 64-bit value with the low bit set to 1 and the remaining 63
+ /// bits set to their values, LSB first, while larger ones are stored in a
+ /// structure of this / form:
+ ///
+ /// struct { int32_t length; int32_t values[length]; };
+ ///
+ /// The values in the array are stored in host-endian format, with the least
+ /// significant bit being assumed to come first in the bitfield. Therefore,
+ /// a bitfield with the 64th bit set will be (int64_t)&{ 2, [0, 1<<31] },
+ /// while a bitfield / with the 63rd bit set will be 1<<64.
+ llvm::Constant *MakeBitField(ArrayRef<bool> bits);
+public:
+ CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
+ unsigned protocolClassVersion);
+
+ ConstantAddress GenerateConstantString(const StringLiteral *) override;
+
+ RValue
+ GenerateMessageSend(CodeGenFunction &CGF, ReturnValueSlot Return,
+ QualType ResultType, Selector Sel,
+ llvm::Value *Receiver, const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method) override;
+ RValue
+ GenerateMessageSendSuper(CodeGenFunction &CGF, ReturnValueSlot Return,
+ QualType ResultType, Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl, llvm::Value *Receiver,
+ bool IsClassMessage, const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) override;
+ llvm::Value *GetClass(CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *OID) override;
+ llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel) override;
+ Address GetAddrOfSelector(CodeGenFunction &CGF, Selector Sel) override;
+ llvm::Value *GetSelector(CodeGenFunction &CGF,
+ const ObjCMethodDecl *Method) override;
+ llvm::Constant *GetEHType(QualType T) override;
+
+ llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD) override;
+ void GenerateCategory(const ObjCCategoryImplDecl *CMD) override;
+ void GenerateClass(const ObjCImplementationDecl *ClassDecl) override;
+ void RegisterAlias(const ObjCCompatibleAliasDecl *OAD) override;
+ llvm::Value *GenerateProtocolRef(CodeGenFunction &CGF,
+ const ObjCProtocolDecl *PD) override;
+ void GenerateProtocol(const ObjCProtocolDecl *PD) override;
+ llvm::Function *ModuleInitFunction() override;
+ llvm::Constant *GetPropertyGetFunction() override;
+ llvm::Constant *GetPropertySetFunction() override;
+ llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) override;
+ llvm::Constant *GetSetStructFunction() override;
+ llvm::Constant *GetGetStructFunction() override;
+ llvm::Constant *GetCppAtomicObjectGetFunction() override;
+ llvm::Constant *GetCppAtomicObjectSetFunction() override;
+ llvm::Constant *EnumerationMutationFunction() override;
+
+ void EmitTryStmt(CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S) override;
+ void EmitSynchronizedStmt(CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) override;
+ void EmitThrowStmt(CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S,
+ bool ClearInsertionPoint=true) override;
+ llvm::Value * EmitObjCWeakRead(CodeGenFunction &CGF,
+ Address AddrWeakObj) override;
+ void EmitObjCWeakAssign(CodeGenFunction &CGF,
+ llvm::Value *src, Address dst) override;
+ void EmitObjCGlobalAssign(CodeGenFunction &CGF,
+ llvm::Value *src, Address dest,
+ bool threadlocal=false) override;
+ void EmitObjCIvarAssign(CodeGenFunction &CGF, llvm::Value *src,
+ Address dest, llvm::Value *ivarOffset) override;
+ void EmitObjCStrongCastAssign(CodeGenFunction &CGF,
+ llvm::Value *src, Address dest) override;
+ void EmitGCMemmoveCollectable(CodeGenFunction &CGF, Address DestPtr,
+ Address SrcPtr,
+ llvm::Value *Size) override;
+ LValue EmitObjCValueForIvar(CodeGenFunction &CGF, QualType ObjectTy,
+ llvm::Value *BaseValue, const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) override;
+ llvm::Value *EmitIvarOffset(CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) override;
+ llvm::Value *EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF) override;
+ llvm::Constant *BuildGCBlockLayout(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) override {
+ return NULLPtr;
+ }
+ llvm::Constant *BuildRCBlockLayout(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) override {
+ return NULLPtr;
+ }
+
+ llvm::Constant *BuildByrefLayout(CodeGenModule &CGM, QualType T) override {
+ return NULLPtr;
+ }
+
+ llvm::GlobalVariable *GetClassGlobal(const std::string &Name,
+ bool Weak = false) override {
+ return nullptr;
+ }
+};
+/// Class representing the legacy GCC Objective-C ABI. This is the default when
+/// -fobjc-nonfragile-abi is not specified.
+///
+/// The GCC ABI target actually generates code that is approximately compatible
+/// with the new GNUstep runtime ABI, but refrains from using any features that
+/// would not work with the GCC runtime. For example, clang always generates
+/// the extended form of the class structure, and the extra fields are simply
+/// ignored by GCC libobjc.
+class CGObjCGCC : public CGObjCGNU {
+ /// The GCC ABI message lookup function. Returns an IMP pointing to the
+ /// method implementation for this message.
+ LazyRuntimeFunction MsgLookupFn;
+ /// The GCC ABI superclass message lookup function. Takes a pointer to a
+ /// structure describing the receiver and the class, and a selector as
+ /// arguments. Returns the IMP for the corresponding method.
+ LazyRuntimeFunction MsgLookupSuperFn;
+protected:
+ llvm::Value *LookupIMP(CodeGenFunction &CGF, llvm::Value *&Receiver,
+ llvm::Value *cmd, llvm::MDNode *node,
+ MessageSendInfo &MSI) override {
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *args[] = {
+ EnforceType(Builder, Receiver, IdTy),
+ EnforceType(Builder, cmd, SelectorTy) };
+ llvm::CallSite imp = CGF.EmitRuntimeCallOrInvoke(MsgLookupFn, args);
+ imp->setMetadata(msgSendMDKind, node);
+ return imp.getInstruction();
+ }
+ llvm::Value *LookupIMPSuper(CodeGenFunction &CGF, Address ObjCSuper,
+ llvm::Value *cmd, MessageSendInfo &MSI) override {
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *lookupArgs[] = {EnforceType(Builder, ObjCSuper,
+ PtrToObjCSuperTy).getPointer(), cmd};
+ return CGF.EmitNounwindRuntimeCall(MsgLookupSuperFn, lookupArgs);
+ }
+ public:
+ CGObjCGCC(CodeGenModule &Mod) : CGObjCGNU(Mod, 8, 2) {
+ // IMP objc_msg_lookup(id, SEL);
+ MsgLookupFn.init(&CGM, "objc_msg_lookup", IMPTy, IdTy, SelectorTy,
+ nullptr);
+ // IMP objc_msg_lookup_super(struct objc_super*, SEL);
+ MsgLookupSuperFn.init(&CGM, "objc_msg_lookup_super", IMPTy,
+ PtrToObjCSuperTy, SelectorTy, nullptr);
+ }
+};
+/// Class used when targeting the new GNUstep runtime ABI.
+class CGObjCGNUstep : public CGObjCGNU {
+ /// The slot lookup function. Returns a pointer to a cacheable structure
+ /// that contains (among other things) the IMP.
+ LazyRuntimeFunction SlotLookupFn;
+ /// The GNUstep ABI superclass message lookup function. Takes a pointer to
+ /// a structure describing the receiver and the class, and a selector as
+ /// arguments. Returns the slot for the corresponding method. Superclass
+ /// message lookup rarely changes, so this is a good caching opportunity.
+ LazyRuntimeFunction SlotLookupSuperFn;
+ /// Specialised function for setting atomic retain properties
+ LazyRuntimeFunction SetPropertyAtomic;
+ /// Specialised function for setting atomic copy properties
+ LazyRuntimeFunction SetPropertyAtomicCopy;
+ /// Specialised function for setting nonatomic retain properties
+ LazyRuntimeFunction SetPropertyNonAtomic;
+ /// Specialised function for setting nonatomic copy properties
+ LazyRuntimeFunction SetPropertyNonAtomicCopy;
+ /// Function to perform atomic copies of C++ objects with nontrivial copy
+ /// constructors from Objective-C ivars.
+ LazyRuntimeFunction CxxAtomicObjectGetFn;
+ /// Function to perform atomic copies of C++ objects with nontrivial copy
+ /// constructors to Objective-C ivars.
+ LazyRuntimeFunction CxxAtomicObjectSetFn;
+ /// Type of an slot structure pointer. This is returned by the various
+ /// lookup functions.
+ llvm::Type *SlotTy;
+ public:
+ llvm::Constant *GetEHType(QualType T) override;
+ protected:
+ llvm::Value *LookupIMP(CodeGenFunction &CGF, llvm::Value *&Receiver,
+ llvm::Value *cmd, llvm::MDNode *node,
+ MessageSendInfo &MSI) override {
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Function *LookupFn = SlotLookupFn;
+
+ // Store the receiver on the stack so that we can reload it later
+ Address ReceiverPtr =
+ CGF.CreateTempAlloca(Receiver->getType(), CGF.getPointerAlign());
+ Builder.CreateStore(Receiver, ReceiverPtr);
+
+ llvm::Value *self;
+
+ if (isa<ObjCMethodDecl>(CGF.CurCodeDecl)) {
+ self = CGF.LoadObjCSelf();
+ } else {
+ self = llvm::ConstantPointerNull::get(IdTy);
+ }
+
+ // The lookup function is guaranteed not to capture the receiver pointer.
+ LookupFn->setDoesNotCapture(1);
+
+ llvm::Value *args[] = {
+ EnforceType(Builder, ReceiverPtr.getPointer(), PtrToIdTy),
+ EnforceType(Builder, cmd, SelectorTy),
+ EnforceType(Builder, self, IdTy) };
+ llvm::CallSite slot = CGF.EmitRuntimeCallOrInvoke(LookupFn, args);
+ slot.setOnlyReadsMemory();
+ slot->setMetadata(msgSendMDKind, node);
+
+ // Load the imp from the slot
+ llvm::Value *imp = Builder.CreateAlignedLoad(
+ Builder.CreateStructGEP(nullptr, slot.getInstruction(), 4),
+ CGF.getPointerAlign());
+
+ // The lookup function may have changed the receiver, so make sure we use
+ // the new one.
+ Receiver = Builder.CreateLoad(ReceiverPtr, true);
+ return imp;
+ }
+ llvm::Value *LookupIMPSuper(CodeGenFunction &CGF, Address ObjCSuper,
+ llvm::Value *cmd,
+ MessageSendInfo &MSI) override {
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *lookupArgs[] = {ObjCSuper.getPointer(), cmd};
+
+ llvm::CallInst *slot =
+ CGF.EmitNounwindRuntimeCall(SlotLookupSuperFn, lookupArgs);
+ slot->setOnlyReadsMemory();
+
+ return Builder.CreateAlignedLoad(Builder.CreateStructGEP(nullptr, slot, 4),
+ CGF.getPointerAlign());
+ }
+ public:
+ CGObjCGNUstep(CodeGenModule &Mod) : CGObjCGNU(Mod, 9, 3) {
+ const ObjCRuntime &R = CGM.getLangOpts().ObjCRuntime;
+
+ llvm::StructType *SlotStructTy = llvm::StructType::get(PtrTy,
+ PtrTy, PtrTy, IntTy, IMPTy, nullptr);
+ SlotTy = llvm::PointerType::getUnqual(SlotStructTy);
+ // Slot_t objc_msg_lookup_sender(id *receiver, SEL selector, id sender);
+ SlotLookupFn.init(&CGM, "objc_msg_lookup_sender", SlotTy, PtrToIdTy,
+ SelectorTy, IdTy, nullptr);
+ // Slot_t objc_msg_lookup_super(struct objc_super*, SEL);
+ SlotLookupSuperFn.init(&CGM, "objc_slot_lookup_super", SlotTy,
+ PtrToObjCSuperTy, SelectorTy, nullptr);
+ // If we're in ObjC++ mode, then we want to make
+ if (CGM.getLangOpts().CPlusPlus) {
+ llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
+ // void *__cxa_begin_catch(void *e)
+ EnterCatchFn.init(&CGM, "__cxa_begin_catch", PtrTy, PtrTy, nullptr);
+ // void __cxa_end_catch(void)
+ ExitCatchFn.init(&CGM, "__cxa_end_catch", VoidTy, nullptr);
+ // void _Unwind_Resume_or_Rethrow(void*)
+ ExceptionReThrowFn.init(&CGM, "_Unwind_Resume_or_Rethrow", VoidTy,
+ PtrTy, nullptr);
+ } else if (R.getVersion() >= VersionTuple(1, 7)) {
+ llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
+ // id objc_begin_catch(void *e)
+ EnterCatchFn.init(&CGM, "objc_begin_catch", IdTy, PtrTy, nullptr);
+ // void objc_end_catch(void)
+ ExitCatchFn.init(&CGM, "objc_end_catch", VoidTy, nullptr);
+ // void _Unwind_Resume_or_Rethrow(void*)
+ ExceptionReThrowFn.init(&CGM, "objc_exception_rethrow", VoidTy,
+ PtrTy, nullptr);
+ }
+ llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
+ SetPropertyAtomic.init(&CGM, "objc_setProperty_atomic", VoidTy, IdTy,
+ SelectorTy, IdTy, PtrDiffTy, nullptr);
+ SetPropertyAtomicCopy.init(&CGM, "objc_setProperty_atomic_copy", VoidTy,
+ IdTy, SelectorTy, IdTy, PtrDiffTy, nullptr);
+ SetPropertyNonAtomic.init(&CGM, "objc_setProperty_nonatomic", VoidTy,
+ IdTy, SelectorTy, IdTy, PtrDiffTy, nullptr);
+ SetPropertyNonAtomicCopy.init(&CGM, "objc_setProperty_nonatomic_copy",
+ VoidTy, IdTy, SelectorTy, IdTy, PtrDiffTy, nullptr);
+ // void objc_setCppObjectAtomic(void *dest, const void *src, void
+ // *helper);
+ CxxAtomicObjectSetFn.init(&CGM, "objc_setCppObjectAtomic", VoidTy, PtrTy,
+ PtrTy, PtrTy, nullptr);
+ // void objc_getCppObjectAtomic(void *dest, const void *src, void
+ // *helper);
+ CxxAtomicObjectGetFn.init(&CGM, "objc_getCppObjectAtomic", VoidTy, PtrTy,
+ PtrTy, PtrTy, nullptr);
+ }
+ llvm::Constant *GetCppAtomicObjectGetFunction() override {
+ // The optimised functions were added in version 1.7 of the GNUstep
+ // runtime.
+ assert (CGM.getLangOpts().ObjCRuntime.getVersion() >=
+ VersionTuple(1, 7));
+ return CxxAtomicObjectGetFn;
+ }
+ llvm::Constant *GetCppAtomicObjectSetFunction() override {
+ // The optimised functions were added in version 1.7 of the GNUstep
+ // runtime.
+ assert (CGM.getLangOpts().ObjCRuntime.getVersion() >=
+ VersionTuple(1, 7));
+ return CxxAtomicObjectSetFn;
+ }
+ llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) override {
+ // The optimised property functions omit the GC check, and so are not
+ // safe to use in GC mode. The standard functions are fast in GC mode,
+ // so there is less advantage in using them.
+ assert ((CGM.getLangOpts().getGC() == LangOptions::NonGC));
+ // The optimised functions were added in version 1.7 of the GNUstep
+ // runtime.
+ assert (CGM.getLangOpts().ObjCRuntime.getVersion() >=
+ VersionTuple(1, 7));
+
+ if (atomic) {
+ if (copy) return SetPropertyAtomicCopy;
+ return SetPropertyAtomic;
+ }
+
+ return copy ? SetPropertyNonAtomicCopy : SetPropertyNonAtomic;
+ }
+};
+
+/// Support for the ObjFW runtime.
+class CGObjCObjFW: public CGObjCGNU {
+protected:
+ /// The GCC ABI message lookup function. Returns an IMP pointing to the
+ /// method implementation for this message.
+ LazyRuntimeFunction MsgLookupFn;
+ /// stret lookup function. While this does not seem to make sense at the
+ /// first look, this is required to call the correct forwarding function.
+ LazyRuntimeFunction MsgLookupFnSRet;
+ /// The GCC ABI superclass message lookup function. Takes a pointer to a
+ /// structure describing the receiver and the class, and a selector as
+ /// arguments. Returns the IMP for the corresponding method.
+ LazyRuntimeFunction MsgLookupSuperFn, MsgLookupSuperFnSRet;
+
+ llvm::Value *LookupIMP(CodeGenFunction &CGF, llvm::Value *&Receiver,
+ llvm::Value *cmd, llvm::MDNode *node,
+ MessageSendInfo &MSI) override {
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *args[] = {
+ EnforceType(Builder, Receiver, IdTy),
+ EnforceType(Builder, cmd, SelectorTy) };
+
+ llvm::CallSite imp;
+ if (CGM.ReturnTypeUsesSRet(MSI.CallInfo))
+ imp = CGF.EmitRuntimeCallOrInvoke(MsgLookupFnSRet, args);
+ else
+ imp = CGF.EmitRuntimeCallOrInvoke(MsgLookupFn, args);
+
+ imp->setMetadata(msgSendMDKind, node);
+ return imp.getInstruction();
+ }
+
+ llvm::Value *LookupIMPSuper(CodeGenFunction &CGF, Address ObjCSuper,
+ llvm::Value *cmd, MessageSendInfo &MSI) override {
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *lookupArgs[] = {EnforceType(Builder, ObjCSuper.getPointer(),
+ PtrToObjCSuperTy), cmd};
+
+ if (CGM.ReturnTypeUsesSRet(MSI.CallInfo))
+ return CGF.EmitNounwindRuntimeCall(MsgLookupSuperFnSRet, lookupArgs);
+ else
+ return CGF.EmitNounwindRuntimeCall(MsgLookupSuperFn, lookupArgs);
+ }
+
+ llvm::Value *GetClassNamed(CodeGenFunction &CGF,
+ const std::string &Name, bool isWeak) override {
+ if (isWeak)
+ return CGObjCGNU::GetClassNamed(CGF, Name, isWeak);
+
+ EmitClassRef(Name);
+
+ std::string SymbolName = "_OBJC_CLASS_" + Name;
+
+ llvm::GlobalVariable *ClassSymbol = TheModule.getGlobalVariable(SymbolName);
+
+ if (!ClassSymbol)
+ ClassSymbol = new llvm::GlobalVariable(TheModule, LongTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ nullptr, SymbolName);
+
+ return ClassSymbol;
+ }
+
+public:
+ CGObjCObjFW(CodeGenModule &Mod): CGObjCGNU(Mod, 9, 3) {
+ // IMP objc_msg_lookup(id, SEL);
+ MsgLookupFn.init(&CGM, "objc_msg_lookup", IMPTy, IdTy, SelectorTy, nullptr);
+ MsgLookupFnSRet.init(&CGM, "objc_msg_lookup_stret", IMPTy, IdTy,
+ SelectorTy, nullptr);
+ // IMP objc_msg_lookup_super(struct objc_super*, SEL);
+ MsgLookupSuperFn.init(&CGM, "objc_msg_lookup_super", IMPTy,
+ PtrToObjCSuperTy, SelectorTy, nullptr);
+ MsgLookupSuperFnSRet.init(&CGM, "objc_msg_lookup_super_stret", IMPTy,
+ PtrToObjCSuperTy, SelectorTy, nullptr);
+ }
+};
+} // end anonymous namespace
+
+
+/// Emits a reference to a dummy variable which is emitted with each class.
+/// This ensures that a linker error will be generated when trying to link
+/// together modules where a referenced class is not defined.
+void CGObjCGNU::EmitClassRef(const std::string &className) {
+ std::string symbolRef = "__objc_class_ref_" + className;
+ // Don't emit two copies of the same symbol
+ if (TheModule.getGlobalVariable(symbolRef))
+ return;
+ std::string symbolName = "__objc_class_name_" + className;
+ llvm::GlobalVariable *ClassSymbol = TheModule.getGlobalVariable(symbolName);
+ if (!ClassSymbol) {
+ ClassSymbol = new llvm::GlobalVariable(TheModule, LongTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ nullptr, symbolName);
+ }
+ new llvm::GlobalVariable(TheModule, ClassSymbol->getType(), true,
+ llvm::GlobalValue::WeakAnyLinkage, ClassSymbol, symbolRef);
+}
+
+static std::string SymbolNameForMethod( StringRef ClassName,
+ StringRef CategoryName, const Selector MethodName,
+ bool isClassMethod) {
+ std::string MethodNameColonStripped = MethodName.getAsString();
+ std::replace(MethodNameColonStripped.begin(), MethodNameColonStripped.end(),
+ ':', '_');
+ return (Twine(isClassMethod ? "_c_" : "_i_") + ClassName + "_" +
+ CategoryName + "_" + MethodNameColonStripped).str();
+}
+
+CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
+ unsigned protocolClassVersion)
+ : CGObjCRuntime(cgm), TheModule(CGM.getModule()),
+ VMContext(cgm.getLLVMContext()), ClassPtrAlias(nullptr),
+ MetaClassPtrAlias(nullptr), RuntimeVersion(runtimeABIVersion),
+ ProtocolVersion(protocolClassVersion) {
+
+ msgSendMDKind = VMContext.getMDKindID("GNUObjCMessageSend");
+
+ CodeGenTypes &Types = CGM.getTypes();
+ IntTy = cast<llvm::IntegerType>(
+ Types.ConvertType(CGM.getContext().IntTy));
+ LongTy = cast<llvm::IntegerType>(
+ Types.ConvertType(CGM.getContext().LongTy));
+ SizeTy = cast<llvm::IntegerType>(
+ Types.ConvertType(CGM.getContext().getSizeType()));
+ PtrDiffTy = cast<llvm::IntegerType>(
+ Types.ConvertType(CGM.getContext().getPointerDiffType()));
+ BoolTy = CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
+
+ Int8Ty = llvm::Type::getInt8Ty(VMContext);
+ // C string type. Used in lots of places.
+ PtrToInt8Ty = llvm::PointerType::getUnqual(Int8Ty);
+
+ Zeros[0] = llvm::ConstantInt::get(LongTy, 0);
+ Zeros[1] = Zeros[0];
+ NULLPtr = llvm::ConstantPointerNull::get(PtrToInt8Ty);
+ // Get the selector Type.
+ QualType selTy = CGM.getContext().getObjCSelType();
+ if (QualType() == selTy) {
+ SelectorTy = PtrToInt8Ty;
+ } else {
+ SelectorTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(selTy));
+ }
+
+ PtrToIntTy = llvm::PointerType::getUnqual(IntTy);
+ PtrTy = PtrToInt8Ty;
+
+ Int32Ty = llvm::Type::getInt32Ty(VMContext);
+ Int64Ty = llvm::Type::getInt64Ty(VMContext);
+
+ IntPtrTy =
+ CGM.getDataLayout().getPointerSizeInBits() == 32 ? Int32Ty : Int64Ty;
+
+ // Object type
+ QualType UnqualIdTy = CGM.getContext().getObjCIdType();
+ ASTIdTy = CanQualType();
+ if (UnqualIdTy != QualType()) {
+ ASTIdTy = CGM.getContext().getCanonicalType(UnqualIdTy);
+ IdTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(ASTIdTy));
+ } else {
+ IdTy = PtrToInt8Ty;
+ }
+ PtrToIdTy = llvm::PointerType::getUnqual(IdTy);
+
+ ObjCSuperTy = llvm::StructType::get(IdTy, IdTy, nullptr);
+ PtrToObjCSuperTy = llvm::PointerType::getUnqual(ObjCSuperTy);
+
+ llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
+
+ // void objc_exception_throw(id);
+ ExceptionThrowFn.init(&CGM, "objc_exception_throw", VoidTy, IdTy, nullptr);
+ ExceptionReThrowFn.init(&CGM, "objc_exception_throw", VoidTy, IdTy, nullptr);
+ // int objc_sync_enter(id);
+ SyncEnterFn.init(&CGM, "objc_sync_enter", IntTy, IdTy, nullptr);
+ // int objc_sync_exit(id);
+ SyncExitFn.init(&CGM, "objc_sync_exit", IntTy, IdTy, nullptr);
+
+ // void objc_enumerationMutation (id)
+ EnumerationMutationFn.init(&CGM, "objc_enumerationMutation", VoidTy,
+ IdTy, nullptr);
+
+ // id objc_getProperty(id, SEL, ptrdiff_t, BOOL)
+ GetPropertyFn.init(&CGM, "objc_getProperty", IdTy, IdTy, SelectorTy,
+ PtrDiffTy, BoolTy, nullptr);
+ // void objc_setProperty(id, SEL, ptrdiff_t, id, BOOL, BOOL)
+ SetPropertyFn.init(&CGM, "objc_setProperty", VoidTy, IdTy, SelectorTy,
+ PtrDiffTy, IdTy, BoolTy, BoolTy, nullptr);
+ // void objc_setPropertyStruct(void*, void*, ptrdiff_t, BOOL, BOOL)
+ GetStructPropertyFn.init(&CGM, "objc_getPropertyStruct", VoidTy, PtrTy, PtrTy,
+ PtrDiffTy, BoolTy, BoolTy, nullptr);
+ // void objc_setPropertyStruct(void*, void*, ptrdiff_t, BOOL, BOOL)
+ SetStructPropertyFn.init(&CGM, "objc_setPropertyStruct", VoidTy, PtrTy, PtrTy,
+ PtrDiffTy, BoolTy, BoolTy, nullptr);
+
+ // IMP type
+ llvm::Type *IMPArgs[] = { IdTy, SelectorTy };
+ IMPTy = llvm::PointerType::getUnqual(llvm::FunctionType::get(IdTy, IMPArgs,
+ true));
+
+ const LangOptions &Opts = CGM.getLangOpts();
+ if ((Opts.getGC() != LangOptions::NonGC) || Opts.ObjCAutoRefCount)
+ RuntimeVersion = 10;
+
+ // Don't bother initialising the GC stuff unless we're compiling in GC mode
+ if (Opts.getGC() != LangOptions::NonGC) {
+ // This is a bit of an hack. We should sort this out by having a proper
+ // CGObjCGNUstep subclass for GC, but we may want to really support the old
+ // ABI and GC added in ObjectiveC2.framework, so we fudge it a bit for now
+ // Get selectors needed in GC mode
+ RetainSel = GetNullarySelector("retain", CGM.getContext());
+ ReleaseSel = GetNullarySelector("release", CGM.getContext());
+ AutoreleaseSel = GetNullarySelector("autorelease", CGM.getContext());
+
+ // Get functions needed in GC mode
+
+ // id objc_assign_ivar(id, id, ptrdiff_t);
+ IvarAssignFn.init(&CGM, "objc_assign_ivar", IdTy, IdTy, IdTy, PtrDiffTy,
+ nullptr);
+ // id objc_assign_strongCast (id, id*)
+ StrongCastAssignFn.init(&CGM, "objc_assign_strongCast", IdTy, IdTy,
+ PtrToIdTy, nullptr);
+ // id objc_assign_global(id, id*);
+ GlobalAssignFn.init(&CGM, "objc_assign_global", IdTy, IdTy, PtrToIdTy,
+ nullptr);
+ // id objc_assign_weak(id, id*);
+ WeakAssignFn.init(&CGM, "objc_assign_weak", IdTy, IdTy, PtrToIdTy, nullptr);
+ // id objc_read_weak(id*);
+ WeakReadFn.init(&CGM, "objc_read_weak", IdTy, PtrToIdTy, nullptr);
+ // void *objc_memmove_collectable(void*, void *, size_t);
+ MemMoveFn.init(&CGM, "objc_memmove_collectable", PtrTy, PtrTy, PtrTy,
+ SizeTy, nullptr);
+ }
+}
+
+llvm::Value *CGObjCGNU::GetClassNamed(CodeGenFunction &CGF,
+ const std::string &Name,
+ bool isWeak) {
+ llvm::Constant *ClassName = MakeConstantString(Name);
+ // With the incompatible ABI, this will need to be replaced with a direct
+ // reference to the class symbol. For the compatible nonfragile ABI we are
+ // still performing this lookup at run time but emitting the symbol for the
+ // class externally so that we can make the switch later.
+ //
+ // Libobjc2 contains an LLVM pass that replaces calls to objc_lookup_class
+ // with memoized versions or with static references if it's safe to do so.
+ if (!isWeak)
+ EmitClassRef(Name);
+
+ llvm::Constant *ClassLookupFn =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, PtrToInt8Ty, true),
+ "objc_lookup_class");
+ return CGF.EmitNounwindRuntimeCall(ClassLookupFn, ClassName);
+}
+
+// This has to perform the lookup every time, since posing and related
+// techniques can modify the name -> class mapping.
+llvm::Value *CGObjCGNU::GetClass(CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *OID) {
+ return GetClassNamed(CGF, OID->getNameAsString(), OID->isWeakImported());
+}
+llvm::Value *CGObjCGNU::EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF) {
+ return GetClassNamed(CGF, "NSAutoreleasePool", false);
+}
+
+llvm::Value *CGObjCGNU::GetSelector(CodeGenFunction &CGF, Selector Sel,
+ const std::string &TypeEncoding) {
+
+ SmallVectorImpl<TypedSelector> &Types = SelectorTable[Sel];
+ llvm::GlobalAlias *SelValue = nullptr;
+
+ for (SmallVectorImpl<TypedSelector>::iterator i = Types.begin(),
+ e = Types.end() ; i!=e ; i++) {
+ if (i->first == TypeEncoding) {
+ SelValue = i->second;
+ break;
+ }
+ }
+ if (!SelValue) {
+ SelValue = llvm::GlobalAlias::create(
+ SelectorTy->getElementType(), 0, llvm::GlobalValue::PrivateLinkage,
+ ".objc_selector_" + Sel.getAsString(), &TheModule);
+ Types.emplace_back(TypeEncoding, SelValue);
+ }
+
+ return SelValue;
+}
+
+Address CGObjCGNU::GetAddrOfSelector(CodeGenFunction &CGF, Selector Sel) {
+ llvm::Value *SelValue = GetSelector(CGF, Sel);
+
+ // Store it to a temporary. Does this satisfy the semantics of
+ // GetAddrOfSelector? Hopefully.
+ Address tmp = CGF.CreateTempAlloca(SelValue->getType(),
+ CGF.getPointerAlign());
+ CGF.Builder.CreateStore(SelValue, tmp);
+ return tmp;
+}
+
+llvm::Value *CGObjCGNU::GetSelector(CodeGenFunction &CGF, Selector Sel) {
+ return GetSelector(CGF, Sel, std::string());
+}
+
+llvm::Value *CGObjCGNU::GetSelector(CodeGenFunction &CGF,
+ const ObjCMethodDecl *Method) {
+ std::string SelTypes;
+ CGM.getContext().getObjCEncodingForMethodDecl(Method, SelTypes);
+ return GetSelector(CGF, Method->getSelector(), SelTypes);
+}
+
+llvm::Constant *CGObjCGNU::GetEHType(QualType T) {
+ if (T->isObjCIdType() || T->isObjCQualifiedIdType()) {
+ // With the old ABI, there was only one kind of catchall, which broke
+ // foreign exceptions. With the new ABI, we use __objc_id_typeinfo as
+ // a pointer indicating object catchalls, and NULL to indicate real
+ // catchalls
+ if (CGM.getLangOpts().ObjCRuntime.isNonFragile()) {
+ return MakeConstantString("@id");
+ } else {
+ return nullptr;
+ }
+ }
+
+ // All other types should be Objective-C interface pointer types.
+ const ObjCObjectPointerType *OPT = T->getAs<ObjCObjectPointerType>();
+ assert(OPT && "Invalid @catch type.");
+ const ObjCInterfaceDecl *IDecl = OPT->getObjectType()->getInterface();
+ assert(IDecl && "Invalid @catch type.");
+ return MakeConstantString(IDecl->getIdentifier()->getName());
+}
+
+llvm::Constant *CGObjCGNUstep::GetEHType(QualType T) {
+ if (!CGM.getLangOpts().CPlusPlus)
+ return CGObjCGNU::GetEHType(T);
+
+ // For Objective-C++, we want to provide the ability to catch both C++ and
+ // Objective-C objects in the same function.
+
+ // There's a particular fixed type info for 'id'.
+ if (T->isObjCIdType() ||
+ T->isObjCQualifiedIdType()) {
+ llvm::Constant *IDEHType =
+ CGM.getModule().getGlobalVariable("__objc_id_type_info");
+ if (!IDEHType)
+ IDEHType =
+ new llvm::GlobalVariable(CGM.getModule(), PtrToInt8Ty,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ nullptr, "__objc_id_type_info");
+ return llvm::ConstantExpr::getBitCast(IDEHType, PtrToInt8Ty);
+ }
+
+ const ObjCObjectPointerType *PT =
+ T->getAs<ObjCObjectPointerType>();
+ assert(PT && "Invalid @catch type.");
+ const ObjCInterfaceType *IT = PT->getInterfaceType();
+ assert(IT && "Invalid @catch type.");
+ std::string className = IT->getDecl()->getIdentifier()->getName();
+
+ std::string typeinfoName = "__objc_eh_typeinfo_" + className;
+
+ // Return the existing typeinfo if it exists
+ llvm::Constant *typeinfo = TheModule.getGlobalVariable(typeinfoName);
+ if (typeinfo)
+ return llvm::ConstantExpr::getBitCast(typeinfo, PtrToInt8Ty);
+
+ // Otherwise create it.
+
+ // vtable for gnustep::libobjc::__objc_class_type_info
+ // It's quite ugly hard-coding this. Ideally we'd generate it using the host
+ // platform's name mangling.
+ const char *vtableName = "_ZTVN7gnustep7libobjc22__objc_class_type_infoE";
+ auto *Vtable = TheModule.getGlobalVariable(vtableName);
+ if (!Vtable) {
+ Vtable = new llvm::GlobalVariable(TheModule, PtrToInt8Ty, true,
+ llvm::GlobalValue::ExternalLinkage,
+ nullptr, vtableName);
+ }
+ llvm::Constant *Two = llvm::ConstantInt::get(IntTy, 2);
+ auto *BVtable = llvm::ConstantExpr::getBitCast(
+ llvm::ConstantExpr::getGetElementPtr(Vtable->getValueType(), Vtable, Two),
+ PtrToInt8Ty);
+
+ llvm::Constant *typeName =
+ ExportUniqueString(className, "__objc_eh_typename_");
+
+ std::vector<llvm::Constant*> fields;
+ fields.push_back(BVtable);
+ fields.push_back(typeName);
+ llvm::Constant *TI =
+ MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, nullptr),
+ fields, CGM.getPointerAlign(),
+ "__objc_eh_typeinfo_" + className,
+ llvm::GlobalValue::LinkOnceODRLinkage);
+ return llvm::ConstantExpr::getBitCast(TI, PtrToInt8Ty);
+}
+
+/// Generate an NSConstantString object.
+ConstantAddress CGObjCGNU::GenerateConstantString(const StringLiteral *SL) {
+
+ std::string Str = SL->getString().str();
+ CharUnits Align = CGM.getPointerAlign();
+
+ // Look for an existing one
+ llvm::StringMap<llvm::Constant*>::iterator old = ObjCStrings.find(Str);
+ if (old != ObjCStrings.end())
+ return ConstantAddress(old->getValue(), Align);
+
+ StringRef StringClass = CGM.getLangOpts().ObjCConstantStringClass;
+
+ if (StringClass.empty()) StringClass = "NXConstantString";
+
+ std::string Sym = "_OBJC_CLASS_";
+ Sym += StringClass;
+
+ llvm::Constant *isa = TheModule.getNamedGlobal(Sym);
+
+ if (!isa)
+ isa = new llvm::GlobalVariable(TheModule, IdTy, /* isConstant */false,
+ llvm::GlobalValue::ExternalWeakLinkage, nullptr, Sym);
+ else if (isa->getType() != PtrToIdTy)
+ isa = llvm::ConstantExpr::getBitCast(isa, PtrToIdTy);
+
+ std::vector<llvm::Constant*> Ivars;
+ Ivars.push_back(isa);
+ Ivars.push_back(MakeConstantString(Str));
+ Ivars.push_back(llvm::ConstantInt::get(IntTy, Str.size()));
+ llvm::Constant *ObjCStr = MakeGlobal(
+ llvm::StructType::get(PtrToIdTy, PtrToInt8Ty, IntTy, nullptr),
+ Ivars, Align, ".objc_str");
+ ObjCStr = llvm::ConstantExpr::getBitCast(ObjCStr, PtrToInt8Ty);
+ ObjCStrings[Str] = ObjCStr;
+ ConstantStrings.push_back(ObjCStr);
+ return ConstantAddress(ObjCStr, Align);
+}
+
+///Generates a message send where the super is the receiver. This is a message
+///send to self with special delivery semantics indicating which class's method
+///should be called.
+RValue
+CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) {
+ CGBuilderTy &Builder = CGF.Builder;
+ if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) {
+ if (Sel == RetainSel || Sel == AutoreleaseSel) {
+ return RValue::get(EnforceType(Builder, Receiver,
+ CGM.getTypes().ConvertType(ResultType)));
+ }
+ if (Sel == ReleaseSel) {
+ return RValue::get(nullptr);
+ }
+ }
+
+ llvm::Value *cmd = GetSelector(CGF, Sel);
+
+
+ CallArgList ActualArgs;
+
+ ActualArgs.add(RValue::get(EnforceType(Builder, Receiver, IdTy)), ASTIdTy);
+ ActualArgs.add(RValue::get(cmd), CGF.getContext().getObjCSelType());
+ ActualArgs.addFrom(CallArgs);
+
+ MessageSendInfo MSI = getMessageSendInfo(Method, ResultType, ActualArgs);
+
+ llvm::Value *ReceiverClass = nullptr;
+ if (isCategoryImpl) {
+ llvm::Constant *classLookupFunction = nullptr;
+ if (IsClassMessage) {
+ classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ IdTy, PtrTy, true), "objc_get_meta_class");
+ } else {
+ classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ IdTy, PtrTy, true), "objc_get_class");
+ }
+ ReceiverClass = Builder.CreateCall(classLookupFunction,
+ MakeConstantString(Class->getNameAsString()));
+ } else {
+ // Set up global aliases for the metaclass or class pointer if they do not
+ // already exist. These will are forward-references which will be set to
+ // pointers to the class and metaclass structure created for the runtime
+ // load function. To send a message to super, we look up the value of the
+ // super_class pointer from either the class or metaclass structure.
+ if (IsClassMessage) {
+ if (!MetaClassPtrAlias) {
+ MetaClassPtrAlias = llvm::GlobalAlias::create(
+ IdTy->getElementType(), 0, llvm::GlobalValue::InternalLinkage,
+ ".objc_metaclass_ref" + Class->getNameAsString(), &TheModule);
+ }
+ ReceiverClass = MetaClassPtrAlias;
+ } else {
+ if (!ClassPtrAlias) {
+ ClassPtrAlias = llvm::GlobalAlias::create(
+ IdTy->getElementType(), 0, llvm::GlobalValue::InternalLinkage,
+ ".objc_class_ref" + Class->getNameAsString(), &TheModule);
+ }
+ ReceiverClass = ClassPtrAlias;
+ }
+ }
+ // Cast the pointer to a simplified version of the class structure
+ llvm::Type *CastTy = llvm::StructType::get(IdTy, IdTy, nullptr);
+ ReceiverClass = Builder.CreateBitCast(ReceiverClass,
+ llvm::PointerType::getUnqual(CastTy));
+ // Get the superclass pointer
+ ReceiverClass = Builder.CreateStructGEP(CastTy, ReceiverClass, 1);
+ // Load the superclass pointer
+ ReceiverClass =
+ Builder.CreateAlignedLoad(ReceiverClass, CGF.getPointerAlign());
+ // Construct the structure used to look up the IMP
+ llvm::StructType *ObjCSuperTy = llvm::StructType::get(
+ Receiver->getType(), IdTy, nullptr);
+
+ // FIXME: Is this really supposed to be a dynamic alloca?
+ Address ObjCSuper = Address(Builder.CreateAlloca(ObjCSuperTy),
+ CGF.getPointerAlign());
+
+ Builder.CreateStore(Receiver,
+ Builder.CreateStructGEP(ObjCSuper, 0, CharUnits::Zero()));
+ Builder.CreateStore(ReceiverClass,
+ Builder.CreateStructGEP(ObjCSuper, 1, CGF.getPointerSize()));
+
+ ObjCSuper = EnforceType(Builder, ObjCSuper, PtrToObjCSuperTy);
+
+ // Get the IMP
+ llvm::Value *imp = LookupIMPSuper(CGF, ObjCSuper, cmd, MSI);
+ imp = EnforceType(Builder, imp, MSI.MessengerType);
+
+ llvm::Metadata *impMD[] = {
+ llvm::MDString::get(VMContext, Sel.getAsString()),
+ llvm::MDString::get(VMContext, Class->getSuperClass()->getNameAsString()),
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
+ llvm::Type::getInt1Ty(VMContext), IsClassMessage))};
+ llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD);
+
+ llvm::Instruction *call;
+ RValue msgRet = CGF.EmitCall(MSI.CallInfo, imp, Return, ActualArgs,
+ CGCalleeInfo(), &call);
+ call->setMetadata(msgSendMDKind, node);
+ return msgRet;
+}
+
+/// Generate code for a message send expression.
+RValue
+CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method) {
+ CGBuilderTy &Builder = CGF.Builder;
+
+ // Strip out message sends to retain / release in GC mode
+ if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) {
+ if (Sel == RetainSel || Sel == AutoreleaseSel) {
+ return RValue::get(EnforceType(Builder, Receiver,
+ CGM.getTypes().ConvertType(ResultType)));
+ }
+ if (Sel == ReleaseSel) {
+ return RValue::get(nullptr);
+ }
+ }
+
+ // If the return type is something that goes in an integer register, the
+ // runtime will handle 0 returns. For other cases, we fill in the 0 value
+ // ourselves.
+ //
+ // The language spec says the result of this kind of message send is
+ // undefined, but lots of people seem to have forgotten to read that
+ // paragraph and insist on sending messages to nil that have structure
+ // returns. With GCC, this generates a random return value (whatever happens
+ // to be on the stack / in those registers at the time) on most platforms,
+ // and generates an illegal instruction trap on SPARC. With LLVM it corrupts
+ // the stack.
+ bool isPointerSizedReturn = (ResultType->isAnyPointerType() ||
+ ResultType->isIntegralOrEnumerationType() || ResultType->isVoidType());
+
+ llvm::BasicBlock *startBB = nullptr;
+ llvm::BasicBlock *messageBB = nullptr;
+ llvm::BasicBlock *continueBB = nullptr;
+
+ if (!isPointerSizedReturn) {
+ startBB = Builder.GetInsertBlock();
+ messageBB = CGF.createBasicBlock("msgSend");
+ continueBB = CGF.createBasicBlock("continue");
+
+ llvm::Value *isNil = Builder.CreateICmpEQ(Receiver,
+ llvm::Constant::getNullValue(Receiver->getType()));
+ Builder.CreateCondBr(isNil, continueBB, messageBB);
+ CGF.EmitBlock(messageBB);
+ }
+
+ IdTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(ASTIdTy));
+ llvm::Value *cmd;
+ if (Method)
+ cmd = GetSelector(CGF, Method);
+ else
+ cmd = GetSelector(CGF, Sel);
+ cmd = EnforceType(Builder, cmd, SelectorTy);
+ Receiver = EnforceType(Builder, Receiver, IdTy);
+
+ llvm::Metadata *impMD[] = {
+ llvm::MDString::get(VMContext, Sel.getAsString()),
+ llvm::MDString::get(VMContext, Class ? Class->getNameAsString() : ""),
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
+ llvm::Type::getInt1Ty(VMContext), Class != nullptr))};
+ llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD);
+
+ CallArgList ActualArgs;
+ ActualArgs.add(RValue::get(Receiver), ASTIdTy);
+ ActualArgs.add(RValue::get(cmd), CGF.getContext().getObjCSelType());
+ ActualArgs.addFrom(CallArgs);
+
+ MessageSendInfo MSI = getMessageSendInfo(Method, ResultType, ActualArgs);
+
+ // Get the IMP to call
+ llvm::Value *imp;
+
+ // If we have non-legacy dispatch specified, we try using the objc_msgSend()
+ // functions. These are not supported on all platforms (or all runtimes on a
+ // given platform), so we
+ switch (CGM.getCodeGenOpts().getObjCDispatchMethod()) {
+ case CodeGenOptions::Legacy:
+ imp = LookupIMP(CGF, Receiver, cmd, node, MSI);
+ break;
+ case CodeGenOptions::Mixed:
+ case CodeGenOptions::NonLegacy:
+ if (CGM.ReturnTypeUsesFPRet(ResultType)) {
+ imp = CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, IdTy, true),
+ "objc_msgSend_fpret");
+ } else if (CGM.ReturnTypeUsesSRet(MSI.CallInfo)) {
+ // The actual types here don't matter - we're going to bitcast the
+ // function anyway
+ imp = CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, IdTy, true),
+ "objc_msgSend_stret");
+ } else {
+ imp = CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, IdTy, true),
+ "objc_msgSend");
+ }
+ }
+
+ // Reset the receiver in case the lookup modified it
+ ActualArgs[0] = CallArg(RValue::get(Receiver), ASTIdTy, false);
+
+ imp = EnforceType(Builder, imp, MSI.MessengerType);
+
+ llvm::Instruction *call;
+ RValue msgRet = CGF.EmitCall(MSI.CallInfo, imp, Return, ActualArgs,
+ CGCalleeInfo(), &call);
+ call->setMetadata(msgSendMDKind, node);
+
+
+ if (!isPointerSizedReturn) {
+ messageBB = CGF.Builder.GetInsertBlock();
+ CGF.Builder.CreateBr(continueBB);
+ CGF.EmitBlock(continueBB);
+ if (msgRet.isScalar()) {
+ llvm::Value *v = msgRet.getScalarVal();
+ llvm::PHINode *phi = Builder.CreatePHI(v->getType(), 2);
+ phi->addIncoming(v, messageBB);
+ phi->addIncoming(llvm::Constant::getNullValue(v->getType()), startBB);
+ msgRet = RValue::get(phi);
+ } else if (msgRet.isAggregate()) {
+ Address v = msgRet.getAggregateAddress();
+ llvm::PHINode *phi = Builder.CreatePHI(v.getType(), 2);
+ llvm::Type *RetTy = v.getElementType();
+ Address NullVal = CGF.CreateTempAlloca(RetTy, v.getAlignment(), "null");
+ CGF.InitTempAlloca(NullVal, llvm::Constant::getNullValue(RetTy));
+ phi->addIncoming(v.getPointer(), messageBB);
+ phi->addIncoming(NullVal.getPointer(), startBB);
+ msgRet = RValue::getAggregate(Address(phi, v.getAlignment()));
+ } else /* isComplex() */ {
+ std::pair<llvm::Value*,llvm::Value*> v = msgRet.getComplexVal();
+ llvm::PHINode *phi = Builder.CreatePHI(v.first->getType(), 2);
+ phi->addIncoming(v.first, messageBB);
+ phi->addIncoming(llvm::Constant::getNullValue(v.first->getType()),
+ startBB);
+ llvm::PHINode *phi2 = Builder.CreatePHI(v.second->getType(), 2);
+ phi2->addIncoming(v.second, messageBB);
+ phi2->addIncoming(llvm::Constant::getNullValue(v.second->getType()),
+ startBB);
+ msgRet = RValue::getComplex(phi, phi2);
+ }
+ }
+ return msgRet;
+}
+
+/// Generates a MethodList. Used in construction of a objc_class and
+/// objc_category structures.
+llvm::Constant *CGObjCGNU::
+GenerateMethodList(StringRef ClassName,
+ StringRef CategoryName,
+ ArrayRef<Selector> MethodSels,
+ ArrayRef<llvm::Constant *> MethodTypes,
+ bool isClassMethodList) {
+ if (MethodSels.empty())
+ return NULLPtr;
+ // Get the method structure type.
+ llvm::StructType *ObjCMethodTy = llvm::StructType::get(
+ PtrToInt8Ty, // Really a selector, but the runtime creates it us.
+ PtrToInt8Ty, // Method types
+ IMPTy, //Method pointer
+ nullptr);
+ std::vector<llvm::Constant*> Methods;
+ std::vector<llvm::Constant*> Elements;
+ for (unsigned int i = 0, e = MethodTypes.size(); i < e; ++i) {
+ Elements.clear();
+ llvm::Constant *Method =
+ TheModule.getFunction(SymbolNameForMethod(ClassName, CategoryName,
+ MethodSels[i],
+ isClassMethodList));
+ assert(Method && "Can't generate metadata for method that doesn't exist");
+ llvm::Constant *C = MakeConstantString(MethodSels[i].getAsString());
+ Elements.push_back(C);
+ Elements.push_back(MethodTypes[i]);
+ Method = llvm::ConstantExpr::getBitCast(Method,
+ IMPTy);
+ Elements.push_back(Method);
+ Methods.push_back(llvm::ConstantStruct::get(ObjCMethodTy, Elements));
+ }
+
+ // Array of method structures
+ llvm::ArrayType *ObjCMethodArrayTy = llvm::ArrayType::get(ObjCMethodTy,
+ Methods.size());
+ llvm::Constant *MethodArray = llvm::ConstantArray::get(ObjCMethodArrayTy,
+ Methods);
+
+ // Structure containing list pointer, array and array count
+ llvm::StructType *ObjCMethodListTy = llvm::StructType::create(VMContext);
+ llvm::Type *NextPtrTy = llvm::PointerType::getUnqual(ObjCMethodListTy);
+ ObjCMethodListTy->setBody(
+ NextPtrTy,
+ IntTy,
+ ObjCMethodArrayTy,
+ nullptr);
+
+ Methods.clear();
+ Methods.push_back(llvm::ConstantPointerNull::get(
+ llvm::PointerType::getUnqual(ObjCMethodListTy)));
+ Methods.push_back(llvm::ConstantInt::get(Int32Ty, MethodTypes.size()));
+ Methods.push_back(MethodArray);
+
+ // Create an instance of the structure
+ return MakeGlobal(ObjCMethodListTy, Methods, CGM.getPointerAlign(),
+ ".objc_method_list");
+}
+
+/// Generates an IvarList. Used in construction of a objc_class.
+llvm::Constant *CGObjCGNU::
+GenerateIvarList(ArrayRef<llvm::Constant *> IvarNames,
+ ArrayRef<llvm::Constant *> IvarTypes,
+ ArrayRef<llvm::Constant *> IvarOffsets) {
+ if (IvarNames.size() == 0)
+ return NULLPtr;
+ // Get the method structure type.
+ llvm::StructType *ObjCIvarTy = llvm::StructType::get(
+ PtrToInt8Ty,
+ PtrToInt8Ty,
+ IntTy,
+ nullptr);
+ std::vector<llvm::Constant*> Ivars;
+ std::vector<llvm::Constant*> Elements;
+ for (unsigned int i = 0, e = IvarNames.size() ; i < e ; i++) {
+ Elements.clear();
+ Elements.push_back(IvarNames[i]);
+ Elements.push_back(IvarTypes[i]);
+ Elements.push_back(IvarOffsets[i]);
+ Ivars.push_back(llvm::ConstantStruct::get(ObjCIvarTy, Elements));
+ }
+
+ // Array of method structures
+ llvm::ArrayType *ObjCIvarArrayTy = llvm::ArrayType::get(ObjCIvarTy,
+ IvarNames.size());
+
+
+ Elements.clear();
+ Elements.push_back(llvm::ConstantInt::get(IntTy, (int)IvarNames.size()));
+ Elements.push_back(llvm::ConstantArray::get(ObjCIvarArrayTy, Ivars));
+ // Structure containing array and array count
+ llvm::StructType *ObjCIvarListTy = llvm::StructType::get(IntTy,
+ ObjCIvarArrayTy,
+ nullptr);
+
+ // Create an instance of the structure
+ return MakeGlobal(ObjCIvarListTy, Elements, CGM.getPointerAlign(),
+ ".objc_ivar_list");
+}
+
+/// Generate a class structure
+llvm::Constant *CGObjCGNU::GenerateClassStructure(
+ llvm::Constant *MetaClass,
+ llvm::Constant *SuperClass,
+ unsigned info,
+ const char *Name,
+ llvm::Constant *Version,
+ llvm::Constant *InstanceSize,
+ llvm::Constant *IVars,
+ llvm::Constant *Methods,
+ llvm::Constant *Protocols,
+ llvm::Constant *IvarOffsets,
+ llvm::Constant *Properties,
+ llvm::Constant *StrongIvarBitmap,
+ llvm::Constant *WeakIvarBitmap,
+ bool isMeta) {
+ // Set up the class structure
+ // Note: Several of these are char*s when they should be ids. This is
+ // because the runtime performs this translation on load.
+ //
+ // Fields marked New ABI are part of the GNUstep runtime. We emit them
+ // anyway; the classes will still work with the GNU runtime, they will just
+ // be ignored.
+ llvm::StructType *ClassTy = llvm::StructType::get(
+ PtrToInt8Ty, // isa
+ PtrToInt8Ty, // super_class
+ PtrToInt8Ty, // name
+ LongTy, // version
+ LongTy, // info
+ LongTy, // instance_size
+ IVars->getType(), // ivars
+ Methods->getType(), // methods
+ // These are all filled in by the runtime, so we pretend
+ PtrTy, // dtable
+ PtrTy, // subclass_list
+ PtrTy, // sibling_class
+ PtrTy, // protocols
+ PtrTy, // gc_object_type
+ // New ABI:
+ LongTy, // abi_version
+ IvarOffsets->getType(), // ivar_offsets
+ Properties->getType(), // properties
+ IntPtrTy, // strong_pointers
+ IntPtrTy, // weak_pointers
+ nullptr);
+ llvm::Constant *Zero = llvm::ConstantInt::get(LongTy, 0);
+ // Fill in the structure
+ std::vector<llvm::Constant*> Elements;
+ Elements.push_back(llvm::ConstantExpr::getBitCast(MetaClass, PtrToInt8Ty));
+ Elements.push_back(SuperClass);
+ Elements.push_back(MakeConstantString(Name, ".class_name"));
+ Elements.push_back(Zero);
+ Elements.push_back(llvm::ConstantInt::get(LongTy, info));
+ if (isMeta) {
+ llvm::DataLayout td(&TheModule);
+ Elements.push_back(
+ llvm::ConstantInt::get(LongTy,
+ td.getTypeSizeInBits(ClassTy) /
+ CGM.getContext().getCharWidth()));
+ } else
+ Elements.push_back(InstanceSize);
+ Elements.push_back(IVars);
+ Elements.push_back(Methods);
+ Elements.push_back(NULLPtr);
+ Elements.push_back(NULLPtr);
+ Elements.push_back(NULLPtr);
+ Elements.push_back(llvm::ConstantExpr::getBitCast(Protocols, PtrTy));
+ Elements.push_back(NULLPtr);
+ Elements.push_back(llvm::ConstantInt::get(LongTy, 1));
+ Elements.push_back(IvarOffsets);
+ Elements.push_back(Properties);
+ Elements.push_back(StrongIvarBitmap);
+ Elements.push_back(WeakIvarBitmap);
+ // Create an instance of the structure
+ // This is now an externally visible symbol, so that we can speed up class
+ // messages in the next ABI. We may already have some weak references to
+ // this, so check and fix them properly.
+ std::string ClassSym((isMeta ? "_OBJC_METACLASS_": "_OBJC_CLASS_") +
+ std::string(Name));
+ llvm::GlobalVariable *ClassRef = TheModule.getNamedGlobal(ClassSym);
+ llvm::Constant *Class =
+ MakeGlobal(ClassTy, Elements, CGM.getPointerAlign(), ClassSym,
+ llvm::GlobalValue::ExternalLinkage);
+ if (ClassRef) {
+ ClassRef->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(Class,
+ ClassRef->getType()));
+ ClassRef->removeFromParent();
+ Class->setName(ClassSym);
+ }
+ return Class;
+}
+
+llvm::Constant *CGObjCGNU::
+GenerateProtocolMethodList(ArrayRef<llvm::Constant *> MethodNames,
+ ArrayRef<llvm::Constant *> MethodTypes) {
+ // Get the method structure type.
+ llvm::StructType *ObjCMethodDescTy = llvm::StructType::get(
+ PtrToInt8Ty, // Really a selector, but the runtime does the casting for us.
+ PtrToInt8Ty,
+ nullptr);
+ std::vector<llvm::Constant*> Methods;
+ std::vector<llvm::Constant*> Elements;
+ for (unsigned int i = 0, e = MethodTypes.size() ; i < e ; i++) {
+ Elements.clear();
+ Elements.push_back(MethodNames[i]);
+ Elements.push_back(MethodTypes[i]);
+ Methods.push_back(llvm::ConstantStruct::get(ObjCMethodDescTy, Elements));
+ }
+ llvm::ArrayType *ObjCMethodArrayTy = llvm::ArrayType::get(ObjCMethodDescTy,
+ MethodNames.size());
+ llvm::Constant *Array = llvm::ConstantArray::get(ObjCMethodArrayTy,
+ Methods);
+ llvm::StructType *ObjCMethodDescListTy = llvm::StructType::get(
+ IntTy, ObjCMethodArrayTy, nullptr);
+ Methods.clear();
+ Methods.push_back(llvm::ConstantInt::get(IntTy, MethodNames.size()));
+ Methods.push_back(Array);
+ return MakeGlobal(ObjCMethodDescListTy, Methods, CGM.getPointerAlign(),
+ ".objc_method_list");
+}
+
+// Create the protocol list structure used in classes, categories and so on
+llvm::Constant *CGObjCGNU::GenerateProtocolList(ArrayRef<std::string>Protocols){
+ llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrToInt8Ty,
+ Protocols.size());
+ llvm::StructType *ProtocolListTy = llvm::StructType::get(
+ PtrTy, //Should be a recurisve pointer, but it's always NULL here.
+ SizeTy,
+ ProtocolArrayTy,
+ nullptr);
+ std::vector<llvm::Constant*> Elements;
+ for (const std::string *iter = Protocols.begin(), *endIter = Protocols.end();
+ iter != endIter ; iter++) {
+ llvm::Constant *protocol = nullptr;
+ llvm::StringMap<llvm::Constant*>::iterator value =
+ ExistingProtocols.find(*iter);
+ if (value == ExistingProtocols.end()) {
+ protocol = GenerateEmptyProtocol(*iter);
+ } else {
+ protocol = value->getValue();
+ }
+ llvm::Constant *Ptr = llvm::ConstantExpr::getBitCast(protocol,
+ PtrToInt8Ty);
+ Elements.push_back(Ptr);
+ }
+ llvm::Constant * ProtocolArray = llvm::ConstantArray::get(ProtocolArrayTy,
+ Elements);
+ Elements.clear();
+ Elements.push_back(NULLPtr);
+ Elements.push_back(llvm::ConstantInt::get(LongTy, Protocols.size()));
+ Elements.push_back(ProtocolArray);
+ return MakeGlobal(ProtocolListTy, Elements, CGM.getPointerAlign(),
+ ".objc_protocol_list");
+}
+
+llvm::Value *CGObjCGNU::GenerateProtocolRef(CodeGenFunction &CGF,
+ const ObjCProtocolDecl *PD) {
+ llvm::Value *protocol = ExistingProtocols[PD->getNameAsString()];
+ llvm::Type *T =
+ CGM.getTypes().ConvertType(CGM.getContext().getObjCProtoType());
+ return CGF.Builder.CreateBitCast(protocol, llvm::PointerType::getUnqual(T));
+}
+
+llvm::Constant *CGObjCGNU::GenerateEmptyProtocol(
+ const std::string &ProtocolName) {
+ SmallVector<std::string, 0> EmptyStringVector;
+ SmallVector<llvm::Constant*, 0> EmptyConstantVector;
+
+ llvm::Constant *ProtocolList = GenerateProtocolList(EmptyStringVector);
+ llvm::Constant *MethodList =
+ GenerateProtocolMethodList(EmptyConstantVector, EmptyConstantVector);
+ // Protocols are objects containing lists of the methods implemented and
+ // protocols adopted.
+ llvm::StructType *ProtocolTy = llvm::StructType::get(IdTy,
+ PtrToInt8Ty,
+ ProtocolList->getType(),
+ MethodList->getType(),
+ MethodList->getType(),
+ MethodList->getType(),
+ MethodList->getType(),
+ nullptr);
+ std::vector<llvm::Constant*> Elements;
+ // The isa pointer must be set to a magic number so the runtime knows it's
+ // the correct layout.
+ Elements.push_back(llvm::ConstantExpr::getIntToPtr(
+ llvm::ConstantInt::get(Int32Ty, ProtocolVersion), IdTy));
+ Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name"));
+ Elements.push_back(ProtocolList);
+ Elements.push_back(MethodList);
+ Elements.push_back(MethodList);
+ Elements.push_back(MethodList);
+ Elements.push_back(MethodList);
+ return MakeGlobal(ProtocolTy, Elements, CGM.getPointerAlign(),
+ ".objc_protocol");
+}
+
+void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
+ ASTContext &Context = CGM.getContext();
+ std::string ProtocolName = PD->getNameAsString();
+
+ // Use the protocol definition, if there is one.
+ if (const ObjCProtocolDecl *Def = PD->getDefinition())
+ PD = Def;
+
+ SmallVector<std::string, 16> Protocols;
+ for (const auto *PI : PD->protocols())
+ Protocols.push_back(PI->getNameAsString());
+ SmallVector<llvm::Constant*, 16> InstanceMethodNames;
+ SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
+ SmallVector<llvm::Constant*, 16> OptionalInstanceMethodNames;
+ SmallVector<llvm::Constant*, 16> OptionalInstanceMethodTypes;
+ for (const auto *I : PD->instance_methods()) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(I, TypeStr);
+ if (I->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptionalInstanceMethodNames.push_back(
+ MakeConstantString(I->getSelector().getAsString()));
+ OptionalInstanceMethodTypes.push_back(MakeConstantString(TypeStr));
+ } else {
+ InstanceMethodNames.push_back(
+ MakeConstantString(I->getSelector().getAsString()));
+ InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+ }
+ // Collect information about class methods:
+ SmallVector<llvm::Constant*, 16> ClassMethodNames;
+ SmallVector<llvm::Constant*, 16> ClassMethodTypes;
+ SmallVector<llvm::Constant*, 16> OptionalClassMethodNames;
+ SmallVector<llvm::Constant*, 16> OptionalClassMethodTypes;
+ for (const auto *I : PD->class_methods()) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(I,TypeStr);
+ if (I->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptionalClassMethodNames.push_back(
+ MakeConstantString(I->getSelector().getAsString()));
+ OptionalClassMethodTypes.push_back(MakeConstantString(TypeStr));
+ } else {
+ ClassMethodNames.push_back(
+ MakeConstantString(I->getSelector().getAsString()));
+ ClassMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+ }
+
+ llvm::Constant *ProtocolList = GenerateProtocolList(Protocols);
+ llvm::Constant *InstanceMethodList =
+ GenerateProtocolMethodList(InstanceMethodNames, InstanceMethodTypes);
+ llvm::Constant *ClassMethodList =
+ GenerateProtocolMethodList(ClassMethodNames, ClassMethodTypes);
+ llvm::Constant *OptionalInstanceMethodList =
+ GenerateProtocolMethodList(OptionalInstanceMethodNames,
+ OptionalInstanceMethodTypes);
+ llvm::Constant *OptionalClassMethodList =
+ GenerateProtocolMethodList(OptionalClassMethodNames,
+ OptionalClassMethodTypes);
+
+ // Property metadata: name, attributes, isSynthesized, setter name, setter
+ // types, getter name, getter types.
+ // The isSynthesized value is always set to 0 in a protocol. It exists to
+ // simplify the runtime library by allowing it to use the same data
+ // structures for protocol metadata everywhere.
+ llvm::StructType *PropertyMetadataTy = llvm::StructType::get(
+ PtrToInt8Ty, Int8Ty, Int8Ty, Int8Ty, Int8Ty, PtrToInt8Ty,
+ PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty, nullptr);
+ std::vector<llvm::Constant*> Properties;
+ std::vector<llvm::Constant*> OptionalProperties;
+
+ // Add all of the property methods need adding to the method list and to the
+ // property metadata list.
+ for (auto *property : PD->properties()) {
+ std::vector<llvm::Constant*> Fields;
+
+ Fields.push_back(MakePropertyEncodingString(property, nullptr));
+ PushPropertyAttributes(Fields, property);
+
+ if (ObjCMethodDecl *getter = property->getGetterMethodDecl()) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(getter,TypeStr);
+ llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
+ InstanceMethodTypes.push_back(TypeEncoding);
+ Fields.push_back(MakeConstantString(getter->getSelector().getAsString()));
+ Fields.push_back(TypeEncoding);
+ } else {
+ Fields.push_back(NULLPtr);
+ Fields.push_back(NULLPtr);
+ }
+ if (ObjCMethodDecl *setter = property->getSetterMethodDecl()) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(setter,TypeStr);
+ llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
+ InstanceMethodTypes.push_back(TypeEncoding);
+ Fields.push_back(MakeConstantString(setter->getSelector().getAsString()));
+ Fields.push_back(TypeEncoding);
+ } else {
+ Fields.push_back(NULLPtr);
+ Fields.push_back(NULLPtr);
+ }
+ if (property->getPropertyImplementation() == ObjCPropertyDecl::Optional) {
+ OptionalProperties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields));
+ } else {
+ Properties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields));
+ }
+ }
+ llvm::Constant *PropertyArray = llvm::ConstantArray::get(
+ llvm::ArrayType::get(PropertyMetadataTy, Properties.size()), Properties);
+ llvm::Constant* PropertyListInitFields[] =
+ {llvm::ConstantInt::get(IntTy, Properties.size()), NULLPtr, PropertyArray};
+
+ llvm::Constant *PropertyListInit =
+ llvm::ConstantStruct::getAnon(PropertyListInitFields);
+ llvm::Constant *PropertyList = new llvm::GlobalVariable(TheModule,
+ PropertyListInit->getType(), false, llvm::GlobalValue::InternalLinkage,
+ PropertyListInit, ".objc_property_list");
+
+ llvm::Constant *OptionalPropertyArray =
+ llvm::ConstantArray::get(llvm::ArrayType::get(PropertyMetadataTy,
+ OptionalProperties.size()) , OptionalProperties);
+ llvm::Constant* OptionalPropertyListInitFields[] = {
+ llvm::ConstantInt::get(IntTy, OptionalProperties.size()), NULLPtr,
+ OptionalPropertyArray };
+
+ llvm::Constant *OptionalPropertyListInit =
+ llvm::ConstantStruct::getAnon(OptionalPropertyListInitFields);
+ llvm::Constant *OptionalPropertyList = new llvm::GlobalVariable(TheModule,
+ OptionalPropertyListInit->getType(), false,
+ llvm::GlobalValue::InternalLinkage, OptionalPropertyListInit,
+ ".objc_property_list");
+
+ // Protocols are objects containing lists of the methods implemented and
+ // protocols adopted.
+ llvm::StructType *ProtocolTy = llvm::StructType::get(IdTy,
+ PtrToInt8Ty,
+ ProtocolList->getType(),
+ InstanceMethodList->getType(),
+ ClassMethodList->getType(),
+ OptionalInstanceMethodList->getType(),
+ OptionalClassMethodList->getType(),
+ PropertyList->getType(),
+ OptionalPropertyList->getType(),
+ nullptr);
+ std::vector<llvm::Constant*> Elements;
+ // The isa pointer must be set to a magic number so the runtime knows it's
+ // the correct layout.
+ Elements.push_back(llvm::ConstantExpr::getIntToPtr(
+ llvm::ConstantInt::get(Int32Ty, ProtocolVersion), IdTy));
+ Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name"));
+ Elements.push_back(ProtocolList);
+ Elements.push_back(InstanceMethodList);
+ Elements.push_back(ClassMethodList);
+ Elements.push_back(OptionalInstanceMethodList);
+ Elements.push_back(OptionalClassMethodList);
+ Elements.push_back(PropertyList);
+ Elements.push_back(OptionalPropertyList);
+ ExistingProtocols[ProtocolName] =
+ llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolTy, Elements,
+ CGM.getPointerAlign(), ".objc_protocol"), IdTy);
+}
+void CGObjCGNU::GenerateProtocolHolderCategory() {
+ // Collect information about instance methods
+ SmallVector<Selector, 1> MethodSels;
+ SmallVector<llvm::Constant*, 1> MethodTypes;
+
+ std::vector<llvm::Constant*> Elements;
+ const std::string ClassName = "__ObjC_Protocol_Holder_Ugly_Hack";
+ const std::string CategoryName = "AnotherHack";
+ Elements.push_back(MakeConstantString(CategoryName));
+ Elements.push_back(MakeConstantString(ClassName));
+ // Instance method list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+ ClassName, CategoryName, MethodSels, MethodTypes, false), PtrTy));
+ // Class method list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+ ClassName, CategoryName, MethodSels, MethodTypes, true), PtrTy));
+ // Protocol list
+ llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrTy,
+ ExistingProtocols.size());
+ llvm::StructType *ProtocolListTy = llvm::StructType::get(
+ PtrTy, //Should be a recurisve pointer, but it's always NULL here.
+ SizeTy,
+ ProtocolArrayTy,
+ nullptr);
+ std::vector<llvm::Constant*> ProtocolElements;
+ for (llvm::StringMapIterator<llvm::Constant*> iter =
+ ExistingProtocols.begin(), endIter = ExistingProtocols.end();
+ iter != endIter ; iter++) {
+ llvm::Constant *Ptr = llvm::ConstantExpr::getBitCast(iter->getValue(),
+ PtrTy);
+ ProtocolElements.push_back(Ptr);
+ }
+ llvm::Constant * ProtocolArray = llvm::ConstantArray::get(ProtocolArrayTy,
+ ProtocolElements);
+ ProtocolElements.clear();
+ ProtocolElements.push_back(NULLPtr);
+ ProtocolElements.push_back(llvm::ConstantInt::get(LongTy,
+ ExistingProtocols.size()));
+ ProtocolElements.push_back(ProtocolArray);
+ Elements.push_back(llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolListTy,
+ ProtocolElements, CGM.getPointerAlign(),
+ ".objc_protocol_list"), PtrTy));
+ Categories.push_back(llvm::ConstantExpr::getBitCast(
+ MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty,
+ PtrTy, PtrTy, PtrTy, nullptr), Elements, CGM.getPointerAlign()),
+ PtrTy));
+}
+
+/// Libobjc2 uses a bitfield representation where small(ish) bitfields are
+/// stored in a 64-bit value with the low bit set to 1 and the remaining 63
+/// bits set to their values, LSB first, while larger ones are stored in a
+/// structure of this / form:
+///
+/// struct { int32_t length; int32_t values[length]; };
+///
+/// The values in the array are stored in host-endian format, with the least
+/// significant bit being assumed to come first in the bitfield. Therefore, a
+/// bitfield with the 64th bit set will be (int64_t)&{ 2, [0, 1<<31] }, while a
+/// bitfield / with the 63rd bit set will be 1<<64.
+llvm::Constant *CGObjCGNU::MakeBitField(ArrayRef<bool> bits) {
+ int bitCount = bits.size();
+ int ptrBits = CGM.getDataLayout().getPointerSizeInBits();
+ if (bitCount < ptrBits) {
+ uint64_t val = 1;
+ for (int i=0 ; i<bitCount ; ++i) {
+ if (bits[i]) val |= 1ULL<<(i+1);
+ }
+ return llvm::ConstantInt::get(IntPtrTy, val);
+ }
+ SmallVector<llvm::Constant *, 8> values;
+ int v=0;
+ while (v < bitCount) {
+ int32_t word = 0;
+ for (int i=0 ; (i<32) && (v<bitCount) ; ++i) {
+ if (bits[v]) word |= 1<<i;
+ v++;
+ }
+ values.push_back(llvm::ConstantInt::get(Int32Ty, word));
+ }
+ llvm::ArrayType *arrayTy = llvm::ArrayType::get(Int32Ty, values.size());
+ llvm::Constant *array = llvm::ConstantArray::get(arrayTy, values);
+ llvm::Constant *fields[2] = {
+ llvm::ConstantInt::get(Int32Ty, values.size()),
+ array };
+ llvm::Constant *GS = MakeGlobal(llvm::StructType::get(Int32Ty, arrayTy,
+ nullptr), fields, CharUnits::fromQuantity(4));
+ llvm::Constant *ptr = llvm::ConstantExpr::getPtrToInt(GS, IntPtrTy);
+ return ptr;
+}
+
+void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
+ std::string ClassName = OCD->getClassInterface()->getNameAsString();
+ std::string CategoryName = OCD->getNameAsString();
+ // Collect information about instance methods
+ SmallVector<Selector, 16> InstanceMethodSels;
+ SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
+ for (const auto *I : OCD->instance_methods()) {
+ InstanceMethodSels.push_back(I->getSelector());
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForMethodDecl(I,TypeStr);
+ InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+
+ // Collect information about class methods
+ SmallVector<Selector, 16> ClassMethodSels;
+ SmallVector<llvm::Constant*, 16> ClassMethodTypes;
+ for (const auto *I : OCD->class_methods()) {
+ ClassMethodSels.push_back(I->getSelector());
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForMethodDecl(I,TypeStr);
+ ClassMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+
+ // Collect the names of referenced protocols
+ SmallVector<std::string, 16> Protocols;
+ const ObjCCategoryDecl *CatDecl = OCD->getCategoryDecl();
+ const ObjCList<ObjCProtocolDecl> &Protos = CatDecl->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protos.begin(),
+ E = Protos.end(); I != E; ++I)
+ Protocols.push_back((*I)->getNameAsString());
+
+ std::vector<llvm::Constant*> Elements;
+ Elements.push_back(MakeConstantString(CategoryName));
+ Elements.push_back(MakeConstantString(ClassName));
+ // Instance method list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+ ClassName, CategoryName, InstanceMethodSels, InstanceMethodTypes,
+ false), PtrTy));
+ // Class method list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+ ClassName, CategoryName, ClassMethodSels, ClassMethodTypes, true),
+ PtrTy));
+ // Protocol list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(
+ GenerateProtocolList(Protocols), PtrTy));
+ Categories.push_back(llvm::ConstantExpr::getBitCast(
+ MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty,
+ PtrTy, PtrTy, PtrTy, nullptr), Elements, CGM.getPointerAlign()),
+ PtrTy));
+}
+
+llvm::Constant *CGObjCGNU::GeneratePropertyList(const ObjCImplementationDecl *OID,
+ SmallVectorImpl<Selector> &InstanceMethodSels,
+ SmallVectorImpl<llvm::Constant*> &InstanceMethodTypes) {
+ ASTContext &Context = CGM.getContext();
+ // Property metadata: name, attributes, attributes2, padding1, padding2,
+ // setter name, setter types, getter name, getter types.
+ llvm::StructType *PropertyMetadataTy = llvm::StructType::get(
+ PtrToInt8Ty, Int8Ty, Int8Ty, Int8Ty, Int8Ty, PtrToInt8Ty,
+ PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty, nullptr);
+ std::vector<llvm::Constant*> Properties;
+
+ // Add all of the property methods need adding to the method list and to the
+ // property metadata list.
+ for (auto *propertyImpl : OID->property_impls()) {
+ std::vector<llvm::Constant*> Fields;
+ ObjCPropertyDecl *property = propertyImpl->getPropertyDecl();
+ bool isSynthesized = (propertyImpl->getPropertyImplementation() ==
+ ObjCPropertyImplDecl::Synthesize);
+ bool isDynamic = (propertyImpl->getPropertyImplementation() ==
+ ObjCPropertyImplDecl::Dynamic);
+
+ Fields.push_back(MakePropertyEncodingString(property, OID));
+ PushPropertyAttributes(Fields, property, isSynthesized, isDynamic);
+ if (ObjCMethodDecl *getter = property->getGetterMethodDecl()) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(getter,TypeStr);
+ llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
+ if (isSynthesized) {
+ InstanceMethodTypes.push_back(TypeEncoding);
+ InstanceMethodSels.push_back(getter->getSelector());
+ }
+ Fields.push_back(MakeConstantString(getter->getSelector().getAsString()));
+ Fields.push_back(TypeEncoding);
+ } else {
+ Fields.push_back(NULLPtr);
+ Fields.push_back(NULLPtr);
+ }
+ if (ObjCMethodDecl *setter = property->getSetterMethodDecl()) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(setter,TypeStr);
+ llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
+ if (isSynthesized) {
+ InstanceMethodTypes.push_back(TypeEncoding);
+ InstanceMethodSels.push_back(setter->getSelector());
+ }
+ Fields.push_back(MakeConstantString(setter->getSelector().getAsString()));
+ Fields.push_back(TypeEncoding);
+ } else {
+ Fields.push_back(NULLPtr);
+ Fields.push_back(NULLPtr);
+ }
+ Properties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields));
+ }
+ llvm::ArrayType *PropertyArrayTy =
+ llvm::ArrayType::get(PropertyMetadataTy, Properties.size());
+ llvm::Constant *PropertyArray = llvm::ConstantArray::get(PropertyArrayTy,
+ Properties);
+ llvm::Constant* PropertyListInitFields[] =
+ {llvm::ConstantInt::get(IntTy, Properties.size()), NULLPtr, PropertyArray};
+
+ llvm::Constant *PropertyListInit =
+ llvm::ConstantStruct::getAnon(PropertyListInitFields);
+ return new llvm::GlobalVariable(TheModule, PropertyListInit->getType(), false,
+ llvm::GlobalValue::InternalLinkage, PropertyListInit,
+ ".objc_property_list");
+}
+
+void CGObjCGNU::RegisterAlias(const ObjCCompatibleAliasDecl *OAD) {
+ // Get the class declaration for which the alias is specified.
+ ObjCInterfaceDecl *ClassDecl =
+ const_cast<ObjCInterfaceDecl *>(OAD->getClassInterface());
+ ClassAliases.emplace_back(ClassDecl->getNameAsString(),
+ OAD->getNameAsString());
+}
+
+void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
+ ASTContext &Context = CGM.getContext();
+
+ // Get the superclass name.
+ const ObjCInterfaceDecl * SuperClassDecl =
+ OID->getClassInterface()->getSuperClass();
+ std::string SuperClassName;
+ if (SuperClassDecl) {
+ SuperClassName = SuperClassDecl->getNameAsString();
+ EmitClassRef(SuperClassName);
+ }
+
+ // Get the class name
+ ObjCInterfaceDecl *ClassDecl =
+ const_cast<ObjCInterfaceDecl *>(OID->getClassInterface());
+ std::string ClassName = ClassDecl->getNameAsString();
+ // Emit the symbol that is used to generate linker errors if this class is
+ // referenced in other modules but not declared.
+ std::string classSymbolName = "__objc_class_name_" + ClassName;
+ if (llvm::GlobalVariable *symbol =
+ TheModule.getGlobalVariable(classSymbolName)) {
+ symbol->setInitializer(llvm::ConstantInt::get(LongTy, 0));
+ } else {
+ new llvm::GlobalVariable(TheModule, LongTy, false,
+ llvm::GlobalValue::ExternalLinkage, llvm::ConstantInt::get(LongTy, 0),
+ classSymbolName);
+ }
+
+ // Get the size of instances.
+ int instanceSize =
+ Context.getASTObjCImplementationLayout(OID).getSize().getQuantity();
+
+ // Collect information about instance variables.
+ SmallVector<llvm::Constant*, 16> IvarNames;
+ SmallVector<llvm::Constant*, 16> IvarTypes;
+ SmallVector<llvm::Constant*, 16> IvarOffsets;
+
+ std::vector<llvm::Constant*> IvarOffsetValues;
+ SmallVector<bool, 16> WeakIvars;
+ SmallVector<bool, 16> StrongIvars;
+
+ int superInstanceSize = !SuperClassDecl ? 0 :
+ Context.getASTObjCInterfaceLayout(SuperClassDecl).getSize().getQuantity();
+ // For non-fragile ivars, set the instance size to 0 - {the size of just this
+ // class}. The runtime will then set this to the correct value on load.
+ if (CGM.getLangOpts().ObjCRuntime.isNonFragile()) {
+ instanceSize = 0 - (instanceSize - superInstanceSize);
+ }
+
+ for (const ObjCIvarDecl *IVD = ClassDecl->all_declared_ivar_begin(); IVD;
+ IVD = IVD->getNextIvar()) {
+ // Store the name
+ IvarNames.push_back(MakeConstantString(IVD->getNameAsString()));
+ // Get the type encoding for this ivar
+ std::string TypeStr;
+ Context.getObjCEncodingForType(IVD->getType(), TypeStr);
+ IvarTypes.push_back(MakeConstantString(TypeStr));
+ // Get the offset
+ uint64_t BaseOffset = ComputeIvarBaseOffset(CGM, OID, IVD);
+ uint64_t Offset = BaseOffset;
+ if (CGM.getLangOpts().ObjCRuntime.isNonFragile()) {
+ Offset = BaseOffset - superInstanceSize;
+ }
+ llvm::Constant *OffsetValue = llvm::ConstantInt::get(IntTy, Offset);
+ // Create the direct offset value
+ std::string OffsetName = "__objc_ivar_offset_value_" + ClassName +"." +
+ IVD->getNameAsString();
+ llvm::GlobalVariable *OffsetVar = TheModule.getGlobalVariable(OffsetName);
+ if (OffsetVar) {
+ OffsetVar->setInitializer(OffsetValue);
+ // If this is the real definition, change its linkage type so that
+ // different modules will use this one, rather than their private
+ // copy.
+ OffsetVar->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ } else
+ OffsetVar = new llvm::GlobalVariable(TheModule, IntTy,
+ false, llvm::GlobalValue::ExternalLinkage,
+ OffsetValue,
+ "__objc_ivar_offset_value_" + ClassName +"." +
+ IVD->getNameAsString());
+ IvarOffsets.push_back(OffsetValue);
+ IvarOffsetValues.push_back(OffsetVar);
+ Qualifiers::ObjCLifetime lt = IVD->getType().getQualifiers().getObjCLifetime();
+ switch (lt) {
+ case Qualifiers::OCL_Strong:
+ StrongIvars.push_back(true);
+ WeakIvars.push_back(false);
+ break;
+ case Qualifiers::OCL_Weak:
+ StrongIvars.push_back(false);
+ WeakIvars.push_back(true);
+ break;
+ default:
+ StrongIvars.push_back(false);
+ WeakIvars.push_back(false);
+ }
+ }
+ llvm::Constant *StrongIvarBitmap = MakeBitField(StrongIvars);
+ llvm::Constant *WeakIvarBitmap = MakeBitField(WeakIvars);
+ llvm::GlobalVariable *IvarOffsetArray =
+ MakeGlobalArray(PtrToIntTy, IvarOffsetValues, CGM.getPointerAlign(),
+ ".ivar.offsets");
+
+
+ // Collect information about instance methods
+ SmallVector<Selector, 16> InstanceMethodSels;
+ SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
+ for (const auto *I : OID->instance_methods()) {
+ InstanceMethodSels.push_back(I->getSelector());
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(I,TypeStr);
+ InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+
+ llvm::Constant *Properties = GeneratePropertyList(OID, InstanceMethodSels,
+ InstanceMethodTypes);
+
+
+ // Collect information about class methods
+ SmallVector<Selector, 16> ClassMethodSels;
+ SmallVector<llvm::Constant*, 16> ClassMethodTypes;
+ for (const auto *I : OID->class_methods()) {
+ ClassMethodSels.push_back(I->getSelector());
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(I,TypeStr);
+ ClassMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+ // Collect the names of referenced protocols
+ SmallVector<std::string, 16> Protocols;
+ for (const auto *I : ClassDecl->protocols())
+ Protocols.push_back(I->getNameAsString());
+
+ // Get the superclass pointer.
+ llvm::Constant *SuperClass;
+ if (!SuperClassName.empty()) {
+ SuperClass = MakeConstantString(SuperClassName, ".super_class_name");
+ } else {
+ SuperClass = llvm::ConstantPointerNull::get(PtrToInt8Ty);
+ }
+ // Empty vector used to construct empty method lists
+ SmallVector<llvm::Constant*, 1> empty;
+ // Generate the method and instance variable lists
+ llvm::Constant *MethodList = GenerateMethodList(ClassName, "",
+ InstanceMethodSels, InstanceMethodTypes, false);
+ llvm::Constant *ClassMethodList = GenerateMethodList(ClassName, "",
+ ClassMethodSels, ClassMethodTypes, true);
+ llvm::Constant *IvarList = GenerateIvarList(IvarNames, IvarTypes,
+ IvarOffsets);
+ // Irrespective of whether we are compiling for a fragile or non-fragile ABI,
+ // we emit a symbol containing the offset for each ivar in the class. This
+ // allows code compiled for the non-Fragile ABI to inherit from code compiled
+ // for the legacy ABI, without causing problems. The converse is also
+ // possible, but causes all ivar accesses to be fragile.
+
+ // Offset pointer for getting at the correct field in the ivar list when
+ // setting up the alias. These are: The base address for the global, the
+ // ivar array (second field), the ivar in this list (set for each ivar), and
+ // the offset (third field in ivar structure)
+ llvm::Type *IndexTy = Int32Ty;
+ llvm::Constant *offsetPointerIndexes[] = {Zeros[0],
+ llvm::ConstantInt::get(IndexTy, 1), nullptr,
+ llvm::ConstantInt::get(IndexTy, 2) };
+
+ unsigned ivarIndex = 0;
+ for (const ObjCIvarDecl *IVD = ClassDecl->all_declared_ivar_begin(); IVD;
+ IVD = IVD->getNextIvar()) {
+ const std::string Name = "__objc_ivar_offset_" + ClassName + '.'
+ + IVD->getNameAsString();
+ offsetPointerIndexes[2] = llvm::ConstantInt::get(IndexTy, ivarIndex);
+ // Get the correct ivar field
+ llvm::Constant *offsetValue = llvm::ConstantExpr::getGetElementPtr(
+ cast<llvm::GlobalVariable>(IvarList)->getValueType(), IvarList,
+ offsetPointerIndexes);
+ // Get the existing variable, if one exists.
+ llvm::GlobalVariable *offset = TheModule.getNamedGlobal(Name);
+ if (offset) {
+ offset->setInitializer(offsetValue);
+ // If this is the real definition, change its linkage type so that
+ // different modules will use this one, rather than their private
+ // copy.
+ offset->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ } else {
+ // Add a new alias if there isn't one already.
+ offset = new llvm::GlobalVariable(TheModule, offsetValue->getType(),
+ false, llvm::GlobalValue::ExternalLinkage, offsetValue, Name);
+ (void) offset; // Silence dead store warning.
+ }
+ ++ivarIndex;
+ }
+ llvm::Constant *ZeroPtr = llvm::ConstantInt::get(IntPtrTy, 0);
+ //Generate metaclass for class methods
+ llvm::Constant *MetaClassStruct = GenerateClassStructure(NULLPtr,
+ NULLPtr, 0x12L, ClassName.c_str(), nullptr, Zeros[0], GenerateIvarList(
+ empty, empty, empty), ClassMethodList, NULLPtr,
+ NULLPtr, NULLPtr, ZeroPtr, ZeroPtr, true);
+
+ // Generate the class structure
+ llvm::Constant *ClassStruct =
+ GenerateClassStructure(MetaClassStruct, SuperClass, 0x11L,
+ ClassName.c_str(), nullptr,
+ llvm::ConstantInt::get(LongTy, instanceSize), IvarList,
+ MethodList, GenerateProtocolList(Protocols), IvarOffsetArray,
+ Properties, StrongIvarBitmap, WeakIvarBitmap);
+
+ // Resolve the class aliases, if they exist.
+ if (ClassPtrAlias) {
+ ClassPtrAlias->replaceAllUsesWith(
+ llvm::ConstantExpr::getBitCast(ClassStruct, IdTy));
+ ClassPtrAlias->eraseFromParent();
+ ClassPtrAlias = nullptr;
+ }
+ if (MetaClassPtrAlias) {
+ MetaClassPtrAlias->replaceAllUsesWith(
+ llvm::ConstantExpr::getBitCast(MetaClassStruct, IdTy));
+ MetaClassPtrAlias->eraseFromParent();
+ MetaClassPtrAlias = nullptr;
+ }
+
+ // Add class structure to list to be added to the symtab later
+ ClassStruct = llvm::ConstantExpr::getBitCast(ClassStruct, PtrToInt8Ty);
+ Classes.push_back(ClassStruct);
+}
+
+
+llvm::Function *CGObjCGNU::ModuleInitFunction() {
+ // Only emit an ObjC load function if no Objective-C stuff has been called
+ if (Classes.empty() && Categories.empty() && ConstantStrings.empty() &&
+ ExistingProtocols.empty() && SelectorTable.empty())
+ return nullptr;
+
+ // Add all referenced protocols to a category.
+ GenerateProtocolHolderCategory();
+
+ llvm::StructType *SelStructTy = dyn_cast<llvm::StructType>(
+ SelectorTy->getElementType());
+ llvm::Type *SelStructPtrTy = SelectorTy;
+ if (!SelStructTy) {
+ SelStructTy = llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, nullptr);
+ SelStructPtrTy = llvm::PointerType::getUnqual(SelStructTy);
+ }
+
+ std::vector<llvm::Constant*> Elements;
+ llvm::Constant *Statics = NULLPtr;
+ // Generate statics list:
+ if (!ConstantStrings.empty()) {
+ llvm::ArrayType *StaticsArrayTy = llvm::ArrayType::get(PtrToInt8Ty,
+ ConstantStrings.size() + 1);
+ ConstantStrings.push_back(NULLPtr);
+
+ StringRef StringClass = CGM.getLangOpts().ObjCConstantStringClass;
+
+ if (StringClass.empty()) StringClass = "NXConstantString";
+
+ Elements.push_back(MakeConstantString(StringClass,
+ ".objc_static_class_name"));
+ Elements.push_back(llvm::ConstantArray::get(StaticsArrayTy,
+ ConstantStrings));
+ llvm::StructType *StaticsListTy =
+ llvm::StructType::get(PtrToInt8Ty, StaticsArrayTy, nullptr);
+ llvm::Type *StaticsListPtrTy =
+ llvm::PointerType::getUnqual(StaticsListTy);
+ Statics = MakeGlobal(StaticsListTy, Elements, CGM.getPointerAlign(),
+ ".objc_statics");
+ llvm::ArrayType *StaticsListArrayTy =
+ llvm::ArrayType::get(StaticsListPtrTy, 2);
+ Elements.clear();
+ Elements.push_back(Statics);
+ Elements.push_back(llvm::Constant::getNullValue(StaticsListPtrTy));
+ Statics = MakeGlobal(StaticsListArrayTy, Elements,
+ CGM.getPointerAlign(), ".objc_statics_ptr");
+ Statics = llvm::ConstantExpr::getBitCast(Statics, PtrTy);
+ }
+ // Array of classes, categories, and constant objects
+ llvm::ArrayType *ClassListTy = llvm::ArrayType::get(PtrToInt8Ty,
+ Classes.size() + Categories.size() + 2);
+ llvm::StructType *SymTabTy = llvm::StructType::get(LongTy, SelStructPtrTy,
+ llvm::Type::getInt16Ty(VMContext),
+ llvm::Type::getInt16Ty(VMContext),
+ ClassListTy, nullptr);
+
+ Elements.clear();
+ // Pointer to an array of selectors used in this module.
+ std::vector<llvm::Constant*> Selectors;
+ std::vector<llvm::GlobalAlias*> SelectorAliases;
+ for (SelectorMap::iterator iter = SelectorTable.begin(),
+ iterEnd = SelectorTable.end(); iter != iterEnd ; ++iter) {
+
+ std::string SelNameStr = iter->first.getAsString();
+ llvm::Constant *SelName = ExportUniqueString(SelNameStr, ".objc_sel_name");
+
+ SmallVectorImpl<TypedSelector> &Types = iter->second;
+ for (SmallVectorImpl<TypedSelector>::iterator i = Types.begin(),
+ e = Types.end() ; i!=e ; i++) {
+
+ llvm::Constant *SelectorTypeEncoding = NULLPtr;
+ if (!i->first.empty())
+ SelectorTypeEncoding = MakeConstantString(i->first, ".objc_sel_types");
+
+ Elements.push_back(SelName);
+ Elements.push_back(SelectorTypeEncoding);
+ Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
+ Elements.clear();
+
+ // Store the selector alias for later replacement
+ SelectorAliases.push_back(i->second);
+ }
+ }
+ unsigned SelectorCount = Selectors.size();
+ // NULL-terminate the selector list. This should not actually be required,
+ // because the selector list has a length field. Unfortunately, the GCC
+ // runtime decides to ignore the length field and expects a NULL terminator,
+ // and GCC cooperates with this by always setting the length to 0.
+ Elements.push_back(NULLPtr);
+ Elements.push_back(NULLPtr);
+ Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
+ Elements.clear();
+
+ // Number of static selectors
+ Elements.push_back(llvm::ConstantInt::get(LongTy, SelectorCount));
+ llvm::GlobalVariable *SelectorList =
+ MakeGlobalArray(SelStructTy, Selectors, CGM.getPointerAlign(),
+ ".objc_selector_list");
+ Elements.push_back(llvm::ConstantExpr::getBitCast(SelectorList,
+ SelStructPtrTy));
+
+ // Now that all of the static selectors exist, create pointers to them.
+ for (unsigned int i=0 ; i<SelectorCount ; i++) {
+
+ llvm::Constant *Idxs[] = {Zeros[0],
+ llvm::ConstantInt::get(Int32Ty, i), Zeros[0]};
+ // FIXME: We're generating redundant loads and stores here!
+ llvm::Constant *SelPtr = llvm::ConstantExpr::getGetElementPtr(
+ SelectorList->getValueType(), SelectorList, makeArrayRef(Idxs, 2));
+ // If selectors are defined as an opaque type, cast the pointer to this
+ // type.
+ SelPtr = llvm::ConstantExpr::getBitCast(SelPtr, SelectorTy);
+ SelectorAliases[i]->replaceAllUsesWith(SelPtr);
+ SelectorAliases[i]->eraseFromParent();
+ }
+
+ // Number of classes defined.
+ Elements.push_back(llvm::ConstantInt::get(llvm::Type::getInt16Ty(VMContext),
+ Classes.size()));
+ // Number of categories defined
+ Elements.push_back(llvm::ConstantInt::get(llvm::Type::getInt16Ty(VMContext),
+ Categories.size()));
+ // Create an array of classes, then categories, then static object instances
+ Classes.insert(Classes.end(), Categories.begin(), Categories.end());
+ // NULL-terminated list of static object instances (mainly constant strings)
+ Classes.push_back(Statics);
+ Classes.push_back(NULLPtr);
+ llvm::Constant *ClassList = llvm::ConstantArray::get(ClassListTy, Classes);
+ Elements.push_back(ClassList);
+ // Construct the symbol table
+ llvm::Constant *SymTab =
+ MakeGlobal(SymTabTy, Elements, CGM.getPointerAlign());
+
+ // The symbol table is contained in a module which has some version-checking
+ // constants
+ llvm::StructType * ModuleTy = llvm::StructType::get(LongTy, LongTy,
+ PtrToInt8Ty, llvm::PointerType::getUnqual(SymTabTy),
+ (RuntimeVersion >= 10) ? IntTy : nullptr, nullptr);
+ Elements.clear();
+ // Runtime version, used for ABI compatibility checking.
+ Elements.push_back(llvm::ConstantInt::get(LongTy, RuntimeVersion));
+ // sizeof(ModuleTy)
+ llvm::DataLayout td(&TheModule);
+ Elements.push_back(
+ llvm::ConstantInt::get(LongTy,
+ td.getTypeSizeInBits(ModuleTy) /
+ CGM.getContext().getCharWidth()));
+
+ // The path to the source file where this module was declared
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ const FileEntry *mainFile = SM.getFileEntryForID(SM.getMainFileID());
+ std::string path =
+ std::string(mainFile->getDir()->getName()) + '/' + mainFile->getName();
+ Elements.push_back(MakeConstantString(path, ".objc_source_file_name"));
+ Elements.push_back(SymTab);
+
+ if (RuntimeVersion >= 10)
+ switch (CGM.getLangOpts().getGC()) {
+ case LangOptions::GCOnly:
+ Elements.push_back(llvm::ConstantInt::get(IntTy, 2));
+ break;
+ case LangOptions::NonGC:
+ if (CGM.getLangOpts().ObjCAutoRefCount)
+ Elements.push_back(llvm::ConstantInt::get(IntTy, 1));
+ else
+ Elements.push_back(llvm::ConstantInt::get(IntTy, 0));
+ break;
+ case LangOptions::HybridGC:
+ Elements.push_back(llvm::ConstantInt::get(IntTy, 1));
+ break;
+ }
+
+ llvm::Value *Module = MakeGlobal(ModuleTy, Elements, CGM.getPointerAlign());
+
+ // Create the load function calling the runtime entry point with the module
+ // structure
+ llvm::Function * LoadFunction = llvm::Function::Create(
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), false),
+ llvm::GlobalValue::InternalLinkage, ".objc_load_function",
+ &TheModule);
+ llvm::BasicBlock *EntryBB =
+ llvm::BasicBlock::Create(VMContext, "entry", LoadFunction);
+ CGBuilderTy Builder(CGM, VMContext);
+ Builder.SetInsertPoint(EntryBB);
+
+ llvm::FunctionType *FT =
+ llvm::FunctionType::get(Builder.getVoidTy(),
+ llvm::PointerType::getUnqual(ModuleTy), true);
+ llvm::Value *Register = CGM.CreateRuntimeFunction(FT, "__objc_exec_class");
+ Builder.CreateCall(Register, Module);
+
+ if (!ClassAliases.empty()) {
+ llvm::Type *ArgTypes[2] = {PtrTy, PtrToInt8Ty};
+ llvm::FunctionType *RegisterAliasTy =
+ llvm::FunctionType::get(Builder.getVoidTy(),
+ ArgTypes, false);
+ llvm::Function *RegisterAlias = llvm::Function::Create(
+ RegisterAliasTy,
+ llvm::GlobalValue::ExternalWeakLinkage, "class_registerAlias_np",
+ &TheModule);
+ llvm::BasicBlock *AliasBB =
+ llvm::BasicBlock::Create(VMContext, "alias", LoadFunction);
+ llvm::BasicBlock *NoAliasBB =
+ llvm::BasicBlock::Create(VMContext, "no_alias", LoadFunction);
+
+ // Branch based on whether the runtime provided class_registerAlias_np()
+ llvm::Value *HasRegisterAlias = Builder.CreateICmpNE(RegisterAlias,
+ llvm::Constant::getNullValue(RegisterAlias->getType()));
+ Builder.CreateCondBr(HasRegisterAlias, AliasBB, NoAliasBB);
+
+ // The true branch (has alias registration function):
+ Builder.SetInsertPoint(AliasBB);
+ // Emit alias registration calls:
+ for (std::vector<ClassAliasPair>::iterator iter = ClassAliases.begin();
+ iter != ClassAliases.end(); ++iter) {
+ llvm::Constant *TheClass =
+ TheModule.getGlobalVariable(("_OBJC_CLASS_" + iter->first).c_str(),
+ true);
+ if (TheClass) {
+ TheClass = llvm::ConstantExpr::getBitCast(TheClass, PtrTy);
+ Builder.CreateCall(RegisterAlias,
+ {TheClass, MakeConstantString(iter->second)});
+ }
+ }
+ // Jump to end:
+ Builder.CreateBr(NoAliasBB);
+
+ // Missing alias registration function, just return from the function:
+ Builder.SetInsertPoint(NoAliasBB);
+ }
+ Builder.CreateRetVoid();
+
+ return LoadFunction;
+}
+
+llvm::Function *CGObjCGNU::GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD) {
+ const ObjCCategoryImplDecl *OCD =
+ dyn_cast<ObjCCategoryImplDecl>(OMD->getDeclContext());
+ StringRef CategoryName = OCD ? OCD->getName() : "";
+ StringRef ClassName = CD->getName();
+ Selector MethodName = OMD->getSelector();
+ bool isClassMethod = !OMD->isInstanceMethod();
+
+ CodeGenTypes &Types = CGM.getTypes();
+ llvm::FunctionType *MethodTy =
+ Types.GetFunctionType(Types.arrangeObjCMethodDeclaration(OMD));
+ std::string FunctionName = SymbolNameForMethod(ClassName, CategoryName,
+ MethodName, isClassMethod);
+
+ llvm::Function *Method
+ = llvm::Function::Create(MethodTy,
+ llvm::GlobalValue::InternalLinkage,
+ FunctionName,
+ &TheModule);
+ return Method;
+}
+
+llvm::Constant *CGObjCGNU::GetPropertyGetFunction() {
+ return GetPropertyFn;
+}
+
+llvm::Constant *CGObjCGNU::GetPropertySetFunction() {
+ return SetPropertyFn;
+}
+
+llvm::Constant *CGObjCGNU::GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) {
+ return nullptr;
+}
+
+llvm::Constant *CGObjCGNU::GetGetStructFunction() {
+ return GetStructPropertyFn;
+}
+llvm::Constant *CGObjCGNU::GetSetStructFunction() {
+ return SetStructPropertyFn;
+}
+llvm::Constant *CGObjCGNU::GetCppAtomicObjectGetFunction() {
+ return nullptr;
+}
+llvm::Constant *CGObjCGNU::GetCppAtomicObjectSetFunction() {
+ return nullptr;
+}
+
+llvm::Constant *CGObjCGNU::EnumerationMutationFunction() {
+ return EnumerationMutationFn;
+}
+
+void CGObjCGNU::EmitSynchronizedStmt(CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) {
+ EmitAtSynchronizedStmt(CGF, S, SyncEnterFn, SyncExitFn);
+}
+
+
+void CGObjCGNU::EmitTryStmt(CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S) {
+ // Unlike the Apple non-fragile runtimes, which also uses
+ // unwind-based zero cost exceptions, the GNU Objective C runtime's
+ // EH support isn't a veneer over C++ EH. Instead, exception
+ // objects are created by objc_exception_throw and destroyed by
+ // the personality function; this avoids the need for bracketing
+ // catch handlers with calls to __blah_begin_catch/__blah_end_catch
+ // (or even _Unwind_DeleteException), but probably doesn't
+ // interoperate very well with foreign exceptions.
+ //
+ // In Objective-C++ mode, we actually emit something equivalent to the C++
+ // exception handler.
+ EmitTryCatchStmt(CGF, S, EnterCatchFn, ExitCatchFn, ExceptionReThrowFn);
+ return ;
+}
+
+void CGObjCGNU::EmitThrowStmt(CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S,
+ bool ClearInsertionPoint) {
+ llvm::Value *ExceptionAsObject;
+
+ if (const Expr *ThrowExpr = S.getThrowExpr()) {
+ llvm::Value *Exception = CGF.EmitObjCThrowOperand(ThrowExpr);
+ ExceptionAsObject = Exception;
+ } else {
+ assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
+ "Unexpected rethrow outside @catch block.");
+ ExceptionAsObject = CGF.ObjCEHValueStack.back();
+ }
+ ExceptionAsObject = CGF.Builder.CreateBitCast(ExceptionAsObject, IdTy);
+ llvm::CallSite Throw =
+ CGF.EmitRuntimeCallOrInvoke(ExceptionThrowFn, ExceptionAsObject);
+ Throw.setDoesNotReturn();
+ CGF.Builder.CreateUnreachable();
+ if (ClearInsertionPoint)
+ CGF.Builder.ClearInsertionPoint();
+}
+
+llvm::Value * CGObjCGNU::EmitObjCWeakRead(CodeGenFunction &CGF,
+ Address AddrWeakObj) {
+ CGBuilderTy &B = CGF.Builder;
+ AddrWeakObj = EnforceType(B, AddrWeakObj, PtrToIdTy);
+ return B.CreateCall(WeakReadFn.getType(), WeakReadFn,
+ AddrWeakObj.getPointer());
+}
+
+void CGObjCGNU::EmitObjCWeakAssign(CodeGenFunction &CGF,
+ llvm::Value *src, Address dst) {
+ CGBuilderTy &B = CGF.Builder;
+ src = EnforceType(B, src, IdTy);
+ dst = EnforceType(B, dst, PtrToIdTy);
+ B.CreateCall(WeakAssignFn.getType(), WeakAssignFn,
+ {src, dst.getPointer()});
+}
+
+void CGObjCGNU::EmitObjCGlobalAssign(CodeGenFunction &CGF,
+ llvm::Value *src, Address dst,
+ bool threadlocal) {
+ CGBuilderTy &B = CGF.Builder;
+ src = EnforceType(B, src, IdTy);
+ dst = EnforceType(B, dst, PtrToIdTy);
+ // FIXME. Add threadloca assign API
+ assert(!threadlocal && "EmitObjCGlobalAssign - Threal Local API NYI");
+ B.CreateCall(GlobalAssignFn.getType(), GlobalAssignFn,
+ {src, dst.getPointer()});
+}
+
+void CGObjCGNU::EmitObjCIvarAssign(CodeGenFunction &CGF,
+ llvm::Value *src, Address dst,
+ llvm::Value *ivarOffset) {
+ CGBuilderTy &B = CGF.Builder;
+ src = EnforceType(B, src, IdTy);
+ dst = EnforceType(B, dst, IdTy);
+ B.CreateCall(IvarAssignFn.getType(), IvarAssignFn,
+ {src, dst.getPointer(), ivarOffset});
+}
+
+void CGObjCGNU::EmitObjCStrongCastAssign(CodeGenFunction &CGF,
+ llvm::Value *src, Address dst) {
+ CGBuilderTy &B = CGF.Builder;
+ src = EnforceType(B, src, IdTy);
+ dst = EnforceType(B, dst, PtrToIdTy);
+ B.CreateCall(StrongCastAssignFn.getType(), StrongCastAssignFn,
+ {src, dst.getPointer()});
+}
+
+void CGObjCGNU::EmitGCMemmoveCollectable(CodeGenFunction &CGF,
+ Address DestPtr,
+ Address SrcPtr,
+ llvm::Value *Size) {
+ CGBuilderTy &B = CGF.Builder;
+ DestPtr = EnforceType(B, DestPtr, PtrTy);
+ SrcPtr = EnforceType(B, SrcPtr, PtrTy);
+
+ B.CreateCall(MemMoveFn.getType(), MemMoveFn,
+ {DestPtr.getPointer(), SrcPtr.getPointer(), Size});
+}
+
+llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable(
+ const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar) {
+ const std::string Name = "__objc_ivar_offset_" + ID->getNameAsString()
+ + '.' + Ivar->getNameAsString();
+ // Emit the variable and initialize it with what we think the correct value
+ // is. This allows code compiled with non-fragile ivars to work correctly
+ // when linked against code which isn't (most of the time).
+ llvm::GlobalVariable *IvarOffsetPointer = TheModule.getNamedGlobal(Name);
+ if (!IvarOffsetPointer) {
+ // This will cause a run-time crash if we accidentally use it. A value of
+ // 0 would seem more sensible, but will silently overwrite the isa pointer
+ // causing a great deal of confusion.
+ uint64_t Offset = -1;
+ // We can't call ComputeIvarBaseOffset() here if we have the
+ // implementation, because it will create an invalid ASTRecordLayout object
+ // that we are then stuck with forever, so we only initialize the ivar
+ // offset variable with a guess if we only have the interface. The
+ // initializer will be reset later anyway, when we are generating the class
+ // description.
+ if (!CGM.getContext().getObjCImplementation(
+ const_cast<ObjCInterfaceDecl *>(ID)))
+ Offset = ComputeIvarBaseOffset(CGM, ID, Ivar);
+
+ llvm::ConstantInt *OffsetGuess = llvm::ConstantInt::get(Int32Ty, Offset,
+ /*isSigned*/true);
+ // Don't emit the guess in non-PIC code because the linker will not be able
+ // to replace it with the real version for a library. In non-PIC code you
+ // must compile with the fragile ABI if you want to use ivars from a
+ // GCC-compiled class.
+ if (CGM.getLangOpts().PICLevel || CGM.getLangOpts().PIELevel) {
+ llvm::GlobalVariable *IvarOffsetGV = new llvm::GlobalVariable(TheModule,
+ Int32Ty, false,
+ llvm::GlobalValue::PrivateLinkage, OffsetGuess, Name+".guess");
+ IvarOffsetPointer = new llvm::GlobalVariable(TheModule,
+ IvarOffsetGV->getType(), false, llvm::GlobalValue::LinkOnceAnyLinkage,
+ IvarOffsetGV, Name);
+ } else {
+ IvarOffsetPointer = new llvm::GlobalVariable(TheModule,
+ llvm::Type::getInt32PtrTy(VMContext), false,
+ llvm::GlobalValue::ExternalLinkage, nullptr, Name);
+ }
+ }
+ return IvarOffsetPointer;
+}
+
+LValue CGObjCGNU::EmitObjCValueForIvar(CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) {
+ const ObjCInterfaceDecl *ID =
+ ObjectTy->getAs<ObjCObjectType>()->getInterface();
+ return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
+ EmitIvarOffset(CGF, ID, Ivar));
+}
+
+static const ObjCInterfaceDecl *FindIvarInterface(ASTContext &Context,
+ const ObjCInterfaceDecl *OID,
+ const ObjCIvarDecl *OIVD) {
+ for (const ObjCIvarDecl *next = OID->all_declared_ivar_begin(); next;
+ next = next->getNextIvar()) {
+ if (OIVD == next)
+ return OID;
+ }
+
+ // Otherwise check in the super class.
+ if (const ObjCInterfaceDecl *Super = OID->getSuperClass())
+ return FindIvarInterface(Context, Super, OIVD);
+
+ return nullptr;
+}
+
+llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ if (CGM.getLangOpts().ObjCRuntime.isNonFragile()) {
+ Interface = FindIvarInterface(CGM.getContext(), Interface, Ivar);
+ if (RuntimeVersion < 10)
+ return CGF.Builder.CreateZExtOrBitCast(
+ CGF.Builder.CreateDefaultAlignedLoad(CGF.Builder.CreateAlignedLoad(
+ ObjCIvarOffsetVariable(Interface, Ivar),
+ CGF.getPointerAlign(), "ivar")),
+ PtrDiffTy);
+ std::string name = "__objc_ivar_offset_value_" +
+ Interface->getNameAsString() +"." + Ivar->getNameAsString();
+ CharUnits Align = CGM.getIntAlign();
+ llvm::Value *Offset = TheModule.getGlobalVariable(name);
+ if (!Offset) {
+ auto GV = new llvm::GlobalVariable(TheModule, IntTy,
+ false, llvm::GlobalValue::LinkOnceAnyLinkage,
+ llvm::Constant::getNullValue(IntTy), name);
+ GV->setAlignment(Align.getQuantity());
+ Offset = GV;
+ }
+ Offset = CGF.Builder.CreateAlignedLoad(Offset, Align);
+ if (Offset->getType() != PtrDiffTy)
+ Offset = CGF.Builder.CreateZExtOrBitCast(Offset, PtrDiffTy);
+ return Offset;
+ }
+ uint64_t Offset = ComputeIvarBaseOffset(CGF.CGM, Interface, Ivar);
+ return llvm::ConstantInt::get(PtrDiffTy, Offset, /*isSigned*/true);
+}
+
+CGObjCRuntime *
+clang::CodeGen::CreateGNUObjCRuntime(CodeGenModule &CGM) {
+ switch (CGM.getLangOpts().ObjCRuntime.getKind()) {
+ case ObjCRuntime::GNUstep:
+ return new CGObjCGNUstep(CGM);
+
+ case ObjCRuntime::GCC:
+ return new CGObjCGCC(CGM);
+
+ case ObjCRuntime::ObjFW:
+ return new CGObjCObjFW(CGM);
+
+ case ObjCRuntime::FragileMacOSX:
+ case ObjCRuntime::MacOSX:
+ case ObjCRuntime::iOS:
+ case ObjCRuntime::WatchOS:
+ llvm_unreachable("these runtimes are not GNU runtimes");
+ }
+ llvm_unreachable("bad runtime");
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp
new file mode 100644
index 0000000..e30b287
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp
@@ -0,0 +1,7245 @@
+//===------- CGObjCMac.cpp - Interface to Apple Objective-C Runtime -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides Objective-C code generation targeting the Apple runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGObjCRuntime.h"
+#include "CGBlocks.h"
+#include "CGCleanup.h"
+#include "CGRecordLayout.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/CodeGen/CGFunctionInfo.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstdio>
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+
+// FIXME: We should find a nicer way to make the labels for metadata, string
+// concatenation is lame.
+
+class ObjCCommonTypesHelper {
+protected:
+ llvm::LLVMContext &VMContext;
+
+private:
+ // The types of these functions don't really matter because we
+ // should always bitcast before calling them.
+
+ /// id objc_msgSend (id, SEL, ...)
+ ///
+ /// The default messenger, used for sends whose ABI is unchanged from
+ /// the all-integer/pointer case.
+ llvm::Constant *getMessageSendFn() const {
+ // Add the non-lazy-bind attribute, since objc_msgSend is likely to
+ // be called a lot.
+ llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
+ return
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ params, true),
+ "objc_msgSend",
+ llvm::AttributeSet::get(CGM.getLLVMContext(),
+ llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::NonLazyBind));
+ }
+
+ /// void objc_msgSend_stret (id, SEL, ...)
+ ///
+ /// The messenger used when the return value is an aggregate returned
+ /// by indirect reference in the first argument, and therefore the
+ /// self and selector parameters are shifted over by one.
+ llvm::Constant *getMessageSendStretFn() const {
+ llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.VoidTy,
+ params, true),
+ "objc_msgSend_stret");
+
+ }
+
+ /// [double | long double] objc_msgSend_fpret(id self, SEL op, ...)
+ ///
+ /// The messenger used when the return value is returned on the x87
+ /// floating-point stack; without a special entrypoint, the nil case
+ /// would be unbalanced.
+ llvm::Constant *getMessageSendFpretFn() const {
+ llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.DoubleTy,
+ params, true),
+ "objc_msgSend_fpret");
+
+ }
+
+ /// _Complex long double objc_msgSend_fp2ret(id self, SEL op, ...)
+ ///
+ /// The messenger used when the return value is returned in two values on the
+ /// x87 floating point stack; without a special entrypoint, the nil case
+ /// would be unbalanced. Only used on 64-bit X86.
+ llvm::Constant *getMessageSendFp2retFn() const {
+ llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
+ llvm::Type *longDoubleType = llvm::Type::getX86_FP80Ty(VMContext);
+ llvm::Type *resultType =
+ llvm::StructType::get(longDoubleType, longDoubleType, nullptr);
+
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(resultType,
+ params, true),
+ "objc_msgSend_fp2ret");
+ }
+
+ /// id objc_msgSendSuper(struct objc_super *super, SEL op, ...)
+ ///
+ /// The messenger used for super calls, which have different dispatch
+ /// semantics. The class passed is the superclass of the current
+ /// class.
+ llvm::Constant *getMessageSendSuperFn() const {
+ llvm::Type *params[] = { SuperPtrTy, SelectorPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ params, true),
+ "objc_msgSendSuper");
+ }
+
+ /// id objc_msgSendSuper2(struct objc_super *super, SEL op, ...)
+ ///
+ /// A slightly different messenger used for super calls. The class
+ /// passed is the current class.
+ llvm::Constant *getMessageSendSuperFn2() const {
+ llvm::Type *params[] = { SuperPtrTy, SelectorPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ params, true),
+ "objc_msgSendSuper2");
+ }
+
+ /// void objc_msgSendSuper_stret(void *stretAddr, struct objc_super *super,
+ /// SEL op, ...)
+ ///
+ /// The messenger used for super calls which return an aggregate indirectly.
+ llvm::Constant *getMessageSendSuperStretFn() const {
+ llvm::Type *params[] = { Int8PtrTy, SuperPtrTy, SelectorPtrTy };
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(CGM.VoidTy, params, true),
+ "objc_msgSendSuper_stret");
+ }
+
+ /// void objc_msgSendSuper2_stret(void * stretAddr, struct objc_super *super,
+ /// SEL op, ...)
+ ///
+ /// objc_msgSendSuper_stret with the super2 semantics.
+ llvm::Constant *getMessageSendSuperStretFn2() const {
+ llvm::Type *params[] = { Int8PtrTy, SuperPtrTy, SelectorPtrTy };
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(CGM.VoidTy, params, true),
+ "objc_msgSendSuper2_stret");
+ }
+
+ llvm::Constant *getMessageSendSuperFpretFn() const {
+ // There is no objc_msgSendSuper_fpret? How can that work?
+ return getMessageSendSuperFn();
+ }
+
+ llvm::Constant *getMessageSendSuperFpretFn2() const {
+ // There is no objc_msgSendSuper_fpret? How can that work?
+ return getMessageSendSuperFn2();
+ }
+
+protected:
+ CodeGen::CodeGenModule &CGM;
+
+public:
+ llvm::Type *ShortTy, *IntTy, *LongTy, *LongLongTy;
+ llvm::Type *Int8PtrTy, *Int8PtrPtrTy;
+ llvm::Type *IvarOffsetVarTy;
+
+ /// ObjectPtrTy - LLVM type for object handles (typeof(id))
+ llvm::Type *ObjectPtrTy;
+
+ /// PtrObjectPtrTy - LLVM type for id *
+ llvm::Type *PtrObjectPtrTy;
+
+ /// SelectorPtrTy - LLVM type for selector handles (typeof(SEL))
+ llvm::Type *SelectorPtrTy;
+
+private:
+ /// ProtocolPtrTy - LLVM type for external protocol handles
+ /// (typeof(Protocol))
+ llvm::Type *ExternalProtocolPtrTy;
+
+public:
+ llvm::Type *getExternalProtocolPtrTy() {
+ if (!ExternalProtocolPtrTy) {
+ // FIXME: It would be nice to unify this with the opaque type, so that the
+ // IR comes out a bit cleaner.
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ llvm::Type *T = Types.ConvertType(Ctx.getObjCProtoType());
+ ExternalProtocolPtrTy = llvm::PointerType::getUnqual(T);
+ }
+
+ return ExternalProtocolPtrTy;
+ }
+
+ // SuperCTy - clang type for struct objc_super.
+ QualType SuperCTy;
+ // SuperPtrCTy - clang type for struct objc_super *.
+ QualType SuperPtrCTy;
+
+ /// SuperTy - LLVM type for struct objc_super.
+ llvm::StructType *SuperTy;
+ /// SuperPtrTy - LLVM type for struct objc_super *.
+ llvm::Type *SuperPtrTy;
+
+ /// PropertyTy - LLVM type for struct objc_property (struct _prop_t
+ /// in GCC parlance).
+ llvm::StructType *PropertyTy;
+
+ /// PropertyListTy - LLVM type for struct objc_property_list
+ /// (_prop_list_t in GCC parlance).
+ llvm::StructType *PropertyListTy;
+ /// PropertyListPtrTy - LLVM type for struct objc_property_list*.
+ llvm::Type *PropertyListPtrTy;
+
+ // MethodTy - LLVM type for struct objc_method.
+ llvm::StructType *MethodTy;
+
+ /// CacheTy - LLVM type for struct objc_cache.
+ llvm::Type *CacheTy;
+ /// CachePtrTy - LLVM type for struct objc_cache *.
+ llvm::Type *CachePtrTy;
+
+ llvm::Constant *getGetPropertyFn() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // id objc_getProperty (id, SEL, ptrdiff_t, bool)
+ SmallVector<CanQualType,4> Params;
+ CanQualType IdType = Ctx.getCanonicalParamType(Ctx.getObjCIdType());
+ CanQualType SelType = Ctx.getCanonicalParamType(Ctx.getObjCSelType());
+ Params.push_back(IdType);
+ Params.push_back(SelType);
+ Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified());
+ Params.push_back(Ctx.BoolTy);
+ llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(
+ IdType, false, false, Params, FunctionType::ExtInfo(),
+ RequiredArgs::All));
+ return CGM.CreateRuntimeFunction(FTy, "objc_getProperty");
+ }
+
+ llvm::Constant *getSetPropertyFn() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // void objc_setProperty (id, SEL, ptrdiff_t, id, bool, bool)
+ SmallVector<CanQualType,6> Params;
+ CanQualType IdType = Ctx.getCanonicalParamType(Ctx.getObjCIdType());
+ CanQualType SelType = Ctx.getCanonicalParamType(Ctx.getObjCSelType());
+ Params.push_back(IdType);
+ Params.push_back(SelType);
+ Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified());
+ Params.push_back(IdType);
+ Params.push_back(Ctx.BoolTy);
+ Params.push_back(Ctx.BoolTy);
+ llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(
+ Ctx.VoidTy, false, false, Params, FunctionType::ExtInfo(),
+ RequiredArgs::All));
+ return CGM.CreateRuntimeFunction(FTy, "objc_setProperty");
+ }
+
+ llvm::Constant *getOptimizedSetPropertyFn(bool atomic, bool copy) {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // void objc_setProperty_atomic(id self, SEL _cmd,
+ // id newValue, ptrdiff_t offset);
+ // void objc_setProperty_nonatomic(id self, SEL _cmd,
+ // id newValue, ptrdiff_t offset);
+ // void objc_setProperty_atomic_copy(id self, SEL _cmd,
+ // id newValue, ptrdiff_t offset);
+ // void objc_setProperty_nonatomic_copy(id self, SEL _cmd,
+ // id newValue, ptrdiff_t offset);
+
+ SmallVector<CanQualType,4> Params;
+ CanQualType IdType = Ctx.getCanonicalParamType(Ctx.getObjCIdType());
+ CanQualType SelType = Ctx.getCanonicalParamType(Ctx.getObjCSelType());
+ Params.push_back(IdType);
+ Params.push_back(SelType);
+ Params.push_back(IdType);
+ Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified());
+ llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(
+ Ctx.VoidTy, false, false, Params, FunctionType::ExtInfo(),
+ RequiredArgs::All));
+ const char *name;
+ if (atomic && copy)
+ name = "objc_setProperty_atomic_copy";
+ else if (atomic && !copy)
+ name = "objc_setProperty_atomic";
+ else if (!atomic && copy)
+ name = "objc_setProperty_nonatomic_copy";
+ else
+ name = "objc_setProperty_nonatomic";
+
+ return CGM.CreateRuntimeFunction(FTy, name);
+ }
+
+ llvm::Constant *getCopyStructFn() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // void objc_copyStruct (void *, const void *, size_t, bool, bool)
+ SmallVector<CanQualType,5> Params;
+ Params.push_back(Ctx.VoidPtrTy);
+ Params.push_back(Ctx.VoidPtrTy);
+ Params.push_back(Ctx.LongTy);
+ Params.push_back(Ctx.BoolTy);
+ Params.push_back(Ctx.BoolTy);
+ llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(
+ Ctx.VoidTy, false, false, Params, FunctionType::ExtInfo(),
+ RequiredArgs::All));
+ return CGM.CreateRuntimeFunction(FTy, "objc_copyStruct");
+ }
+
+ /// This routine declares and returns address of:
+ /// void objc_copyCppObjectAtomic(
+ /// void *dest, const void *src,
+ /// void (*copyHelper) (void *dest, const void *source));
+ llvm::Constant *getCppAtomicObjectFunction() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ /// void objc_copyCppObjectAtomic(void *dest, const void *src, void *helper);
+ SmallVector<CanQualType,3> Params;
+ Params.push_back(Ctx.VoidPtrTy);
+ Params.push_back(Ctx.VoidPtrTy);
+ Params.push_back(Ctx.VoidPtrTy);
+ llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, false, false,
+ Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
+ return CGM.CreateRuntimeFunction(FTy, "objc_copyCppObjectAtomic");
+ }
+
+ llvm::Constant *getEnumerationMutationFn() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // void objc_enumerationMutation (id)
+ SmallVector<CanQualType,1> Params;
+ Params.push_back(Ctx.getCanonicalParamType(Ctx.getObjCIdType()));
+ llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(
+ Ctx.VoidTy, false, false, Params, FunctionType::ExtInfo(),
+ RequiredArgs::All));
+ return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation");
+ }
+
+ /// GcReadWeakFn -- LLVM objc_read_weak (id *src) function.
+ llvm::Constant *getGcReadWeakFn() {
+ // id objc_read_weak (id *)
+ llvm::Type *args[] = { ObjectPtrTy->getPointerTo() };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_read_weak");
+ }
+
+ /// GcAssignWeakFn -- LLVM objc_assign_weak function.
+ llvm::Constant *getGcAssignWeakFn() {
+ // id objc_assign_weak (id, id *)
+ llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_weak");
+ }
+
+ /// GcAssignGlobalFn -- LLVM objc_assign_global function.
+ llvm::Constant *getGcAssignGlobalFn() {
+ // id objc_assign_global(id, id *)
+ llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_global");
+ }
+
+ /// GcAssignThreadLocalFn -- LLVM objc_assign_threadlocal function.
+ llvm::Constant *getGcAssignThreadLocalFn() {
+ // id objc_assign_threadlocal(id src, id * dest)
+ llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_threadlocal");
+ }
+
+ /// GcAssignIvarFn -- LLVM objc_assign_ivar function.
+ llvm::Constant *getGcAssignIvarFn() {
+ // id objc_assign_ivar(id, id *, ptrdiff_t)
+ llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo(),
+ CGM.PtrDiffTy };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_ivar");
+ }
+
+ /// GcMemmoveCollectableFn -- LLVM objc_memmove_collectable function.
+ llvm::Constant *GcMemmoveCollectableFn() {
+ // void *objc_memmove_collectable(void *dst, const void *src, size_t size)
+ llvm::Type *args[] = { Int8PtrTy, Int8PtrTy, LongTy };
+ llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_memmove_collectable");
+ }
+
+ /// GcAssignStrongCastFn -- LLVM objc_assign_strongCast function.
+ llvm::Constant *getGcAssignStrongCastFn() {
+ // id objc_assign_strongCast(id, id *)
+ llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_strongCast");
+ }
+
+ /// ExceptionThrowFn - LLVM objc_exception_throw function.
+ llvm::Constant *getExceptionThrowFn() {
+ // void objc_exception_throw(id)
+ llvm::Type *args[] = { ObjectPtrTy };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_exception_throw");
+ }
+
+ /// ExceptionRethrowFn - LLVM objc_exception_rethrow function.
+ llvm::Constant *getExceptionRethrowFn() {
+ // void objc_exception_rethrow(void)
+ llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_exception_rethrow");
+ }
+
+ /// SyncEnterFn - LLVM object_sync_enter function.
+ llvm::Constant *getSyncEnterFn() {
+ // int objc_sync_enter (id)
+ llvm::Type *args[] = { ObjectPtrTy };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.IntTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_sync_enter");
+ }
+
+ /// SyncExitFn - LLVM object_sync_exit function.
+ llvm::Constant *getSyncExitFn() {
+ // int objc_sync_exit (id)
+ llvm::Type *args[] = { ObjectPtrTy };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.IntTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
+ }
+
+ llvm::Constant *getSendFn(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFn() : getMessageSendFn();
+ }
+
+ llvm::Constant *getSendFn2(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFn2() : getMessageSendFn();
+ }
+
+ llvm::Constant *getSendStretFn(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperStretFn() : getMessageSendStretFn();
+ }
+
+ llvm::Constant *getSendStretFn2(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperStretFn2() : getMessageSendStretFn();
+ }
+
+ llvm::Constant *getSendFpretFn(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFpretFn() : getMessageSendFpretFn();
+ }
+
+ llvm::Constant *getSendFpretFn2(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFpretFn2() : getMessageSendFpretFn();
+ }
+
+ llvm::Constant *getSendFp2retFn(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFn() : getMessageSendFp2retFn();
+ }
+
+ llvm::Constant *getSendFp2RetFn2(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFn2() : getMessageSendFp2retFn();
+ }
+
+ ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm);
+};
+
+/// ObjCTypesHelper - Helper class that encapsulates lazy
+/// construction of varies types used during ObjC generation.
+class ObjCTypesHelper : public ObjCCommonTypesHelper {
+public:
+ /// SymtabTy - LLVM type for struct objc_symtab.
+ llvm::StructType *SymtabTy;
+ /// SymtabPtrTy - LLVM type for struct objc_symtab *.
+ llvm::Type *SymtabPtrTy;
+ /// ModuleTy - LLVM type for struct objc_module.
+ llvm::StructType *ModuleTy;
+
+ /// ProtocolTy - LLVM type for struct objc_protocol.
+ llvm::StructType *ProtocolTy;
+ /// ProtocolPtrTy - LLVM type for struct objc_protocol *.
+ llvm::Type *ProtocolPtrTy;
+ /// ProtocolExtensionTy - LLVM type for struct
+ /// objc_protocol_extension.
+ llvm::StructType *ProtocolExtensionTy;
+ /// ProtocolExtensionTy - LLVM type for struct
+ /// objc_protocol_extension *.
+ llvm::Type *ProtocolExtensionPtrTy;
+ /// MethodDescriptionTy - LLVM type for struct
+ /// objc_method_description.
+ llvm::StructType *MethodDescriptionTy;
+ /// MethodDescriptionListTy - LLVM type for struct
+ /// objc_method_description_list.
+ llvm::StructType *MethodDescriptionListTy;
+ /// MethodDescriptionListPtrTy - LLVM type for struct
+ /// objc_method_description_list *.
+ llvm::Type *MethodDescriptionListPtrTy;
+ /// ProtocolListTy - LLVM type for struct objc_property_list.
+ llvm::StructType *ProtocolListTy;
+ /// ProtocolListPtrTy - LLVM type for struct objc_property_list*.
+ llvm::Type *ProtocolListPtrTy;
+ /// CategoryTy - LLVM type for struct objc_category.
+ llvm::StructType *CategoryTy;
+ /// ClassTy - LLVM type for struct objc_class.
+ llvm::StructType *ClassTy;
+ /// ClassPtrTy - LLVM type for struct objc_class *.
+ llvm::Type *ClassPtrTy;
+ /// ClassExtensionTy - LLVM type for struct objc_class_ext.
+ llvm::StructType *ClassExtensionTy;
+ /// ClassExtensionPtrTy - LLVM type for struct objc_class_ext *.
+ llvm::Type *ClassExtensionPtrTy;
+ // IvarTy - LLVM type for struct objc_ivar.
+ llvm::StructType *IvarTy;
+ /// IvarListTy - LLVM type for struct objc_ivar_list.
+ llvm::Type *IvarListTy;
+ /// IvarListPtrTy - LLVM type for struct objc_ivar_list *.
+ llvm::Type *IvarListPtrTy;
+ /// MethodListTy - LLVM type for struct objc_method_list.
+ llvm::Type *MethodListTy;
+ /// MethodListPtrTy - LLVM type for struct objc_method_list *.
+ llvm::Type *MethodListPtrTy;
+
+ /// ExceptionDataTy - LLVM type for struct _objc_exception_data.
+ llvm::Type *ExceptionDataTy;
+
+ /// ExceptionTryEnterFn - LLVM objc_exception_try_enter function.
+ llvm::Constant *getExceptionTryEnterFn() {
+ llvm::Type *params[] = { ExceptionDataTy->getPointerTo() };
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(CGM.VoidTy, params, false),
+ "objc_exception_try_enter");
+ }
+
+ /// ExceptionTryExitFn - LLVM objc_exception_try_exit function.
+ llvm::Constant *getExceptionTryExitFn() {
+ llvm::Type *params[] = { ExceptionDataTy->getPointerTo() };
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(CGM.VoidTy, params, false),
+ "objc_exception_try_exit");
+ }
+
+ /// ExceptionExtractFn - LLVM objc_exception_extract function.
+ llvm::Constant *getExceptionExtractFn() {
+ llvm::Type *params[] = { ExceptionDataTy->getPointerTo() };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ params, false),
+ "objc_exception_extract");
+ }
+
+ /// ExceptionMatchFn - LLVM objc_exception_match function.
+ llvm::Constant *getExceptionMatchFn() {
+ llvm::Type *params[] = { ClassPtrTy, ObjectPtrTy };
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(CGM.Int32Ty, params, false),
+ "objc_exception_match");
+
+ }
+
+ /// SetJmpFn - LLVM _setjmp function.
+ llvm::Constant *getSetJmpFn() {
+ // This is specifically the prototype for x86.
+ llvm::Type *params[] = { CGM.Int32Ty->getPointerTo() };
+ return
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.Int32Ty,
+ params, false),
+ "_setjmp",
+ llvm::AttributeSet::get(CGM.getLLVMContext(),
+ llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::NonLazyBind));
+ }
+
+public:
+ ObjCTypesHelper(CodeGen::CodeGenModule &cgm);
+};
+
+/// ObjCNonFragileABITypesHelper - will have all types needed by objective-c's
+/// modern abi
+class ObjCNonFragileABITypesHelper : public ObjCCommonTypesHelper {
+public:
+
+ // MethodListnfABITy - LLVM for struct _method_list_t
+ llvm::StructType *MethodListnfABITy;
+
+ // MethodListnfABIPtrTy - LLVM for struct _method_list_t*
+ llvm::Type *MethodListnfABIPtrTy;
+
+ // ProtocolnfABITy = LLVM for struct _protocol_t
+ llvm::StructType *ProtocolnfABITy;
+
+ // ProtocolnfABIPtrTy = LLVM for struct _protocol_t*
+ llvm::Type *ProtocolnfABIPtrTy;
+
+ // ProtocolListnfABITy - LLVM for struct _objc_protocol_list
+ llvm::StructType *ProtocolListnfABITy;
+
+ // ProtocolListnfABIPtrTy - LLVM for struct _objc_protocol_list*
+ llvm::Type *ProtocolListnfABIPtrTy;
+
+ // ClassnfABITy - LLVM for struct _class_t
+ llvm::StructType *ClassnfABITy;
+
+ // ClassnfABIPtrTy - LLVM for struct _class_t*
+ llvm::Type *ClassnfABIPtrTy;
+
+ // IvarnfABITy - LLVM for struct _ivar_t
+ llvm::StructType *IvarnfABITy;
+
+ // IvarListnfABITy - LLVM for struct _ivar_list_t
+ llvm::StructType *IvarListnfABITy;
+
+ // IvarListnfABIPtrTy = LLVM for struct _ivar_list_t*
+ llvm::Type *IvarListnfABIPtrTy;
+
+ // ClassRonfABITy - LLVM for struct _class_ro_t
+ llvm::StructType *ClassRonfABITy;
+
+ // ImpnfABITy - LLVM for id (*)(id, SEL, ...)
+ llvm::Type *ImpnfABITy;
+
+ // CategorynfABITy - LLVM for struct _category_t
+ llvm::StructType *CategorynfABITy;
+
+ // New types for nonfragile abi messaging.
+
+ // MessageRefTy - LLVM for:
+ // struct _message_ref_t {
+ // IMP messenger;
+ // SEL name;
+ // };
+ llvm::StructType *MessageRefTy;
+ // MessageRefCTy - clang type for struct _message_ref_t
+ QualType MessageRefCTy;
+
+ // MessageRefPtrTy - LLVM for struct _message_ref_t*
+ llvm::Type *MessageRefPtrTy;
+ // MessageRefCPtrTy - clang type for struct _message_ref_t*
+ QualType MessageRefCPtrTy;
+
+ // SuperMessageRefTy - LLVM for:
+ // struct _super_message_ref_t {
+ // SUPER_IMP messenger;
+ // SEL name;
+ // };
+ llvm::StructType *SuperMessageRefTy;
+
+ // SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t*
+ llvm::Type *SuperMessageRefPtrTy;
+
+ llvm::Constant *getMessageSendFixupFn() {
+ // id objc_msgSend_fixup(id, struct message_ref_t*, ...)
+ llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ params, true),
+ "objc_msgSend_fixup");
+ }
+
+ llvm::Constant *getMessageSendFpretFixupFn() {
+ // id objc_msgSend_fpret_fixup(id, struct message_ref_t*, ...)
+ llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ params, true),
+ "objc_msgSend_fpret_fixup");
+ }
+
+ llvm::Constant *getMessageSendStretFixupFn() {
+ // id objc_msgSend_stret_fixup(id, struct message_ref_t*, ...)
+ llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ params, true),
+ "objc_msgSend_stret_fixup");
+ }
+
+ llvm::Constant *getMessageSendSuper2FixupFn() {
+ // id objc_msgSendSuper2_fixup (struct objc_super *,
+ // struct _super_message_ref_t*, ...)
+ llvm::Type *params[] = { SuperPtrTy, SuperMessageRefPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ params, true),
+ "objc_msgSendSuper2_fixup");
+ }
+
+ llvm::Constant *getMessageSendSuper2StretFixupFn() {
+ // id objc_msgSendSuper2_stret_fixup(struct objc_super *,
+ // struct _super_message_ref_t*, ...)
+ llvm::Type *params[] = { SuperPtrTy, SuperMessageRefPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ params, true),
+ "objc_msgSendSuper2_stret_fixup");
+ }
+
+ llvm::Constant *getObjCEndCatchFn() {
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.VoidTy, false),
+ "objc_end_catch");
+
+ }
+
+ llvm::Constant *getObjCBeginCatchFn() {
+ llvm::Type *params[] = { Int8PtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(Int8PtrTy,
+ params, false),
+ "objc_begin_catch");
+ }
+
+ llvm::StructType *EHTypeTy;
+ llvm::Type *EHTypePtrTy;
+
+ ObjCNonFragileABITypesHelper(CodeGen::CodeGenModule &cgm);
+};
+
+class CGObjCCommonMac : public CodeGen::CGObjCRuntime {
+public:
+ class SKIP_SCAN {
+ public:
+ unsigned skip;
+ unsigned scan;
+ SKIP_SCAN(unsigned _skip = 0, unsigned _scan = 0)
+ : skip(_skip), scan(_scan) {}
+ };
+
+ /// opcode for captured block variables layout 'instructions'.
+ /// In the following descriptions, 'I' is the value of the immediate field.
+ /// (field following the opcode).
+ ///
+ enum BLOCK_LAYOUT_OPCODE {
+ /// An operator which affects how the following layout should be
+ /// interpreted.
+ /// I == 0: Halt interpretation and treat everything else as
+ /// a non-pointer. Note that this instruction is equal
+ /// to '\0'.
+ /// I != 0: Currently unused.
+ BLOCK_LAYOUT_OPERATOR = 0,
+
+ /// The next I+1 bytes do not contain a value of object pointer type.
+ /// Note that this can leave the stream unaligned, meaning that
+ /// subsequent word-size instructions do not begin at a multiple of
+ /// the pointer size.
+ BLOCK_LAYOUT_NON_OBJECT_BYTES = 1,
+
+ /// The next I+1 words do not contain a value of object pointer type.
+ /// This is simply an optimized version of BLOCK_LAYOUT_BYTES for
+ /// when the required skip quantity is a multiple of the pointer size.
+ BLOCK_LAYOUT_NON_OBJECT_WORDS = 2,
+
+ /// The next I+1 words are __strong pointers to Objective-C
+ /// objects or blocks.
+ BLOCK_LAYOUT_STRONG = 3,
+
+ /// The next I+1 words are pointers to __block variables.
+ BLOCK_LAYOUT_BYREF = 4,
+
+ /// The next I+1 words are __weak pointers to Objective-C
+ /// objects or blocks.
+ BLOCK_LAYOUT_WEAK = 5,
+
+ /// The next I+1 words are __unsafe_unretained pointers to
+ /// Objective-C objects or blocks.
+ BLOCK_LAYOUT_UNRETAINED = 6
+
+ /// The next I+1 words are block or object pointers with some
+ /// as-yet-unspecified ownership semantics. If we add more
+ /// flavors of ownership semantics, values will be taken from
+ /// this range.
+ ///
+ /// This is included so that older tools can at least continue
+ /// processing the layout past such things.
+ //BLOCK_LAYOUT_OWNERSHIP_UNKNOWN = 7..10,
+
+ /// All other opcodes are reserved. Halt interpretation and
+ /// treat everything else as opaque.
+ };
+
+ class RUN_SKIP {
+ public:
+ enum BLOCK_LAYOUT_OPCODE opcode;
+ CharUnits block_var_bytepos;
+ CharUnits block_var_size;
+ RUN_SKIP(enum BLOCK_LAYOUT_OPCODE Opcode = BLOCK_LAYOUT_OPERATOR,
+ CharUnits BytePos = CharUnits::Zero(),
+ CharUnits Size = CharUnits::Zero())
+ : opcode(Opcode), block_var_bytepos(BytePos), block_var_size(Size) {}
+
+ // Allow sorting based on byte pos.
+ bool operator<(const RUN_SKIP &b) const {
+ return block_var_bytepos < b.block_var_bytepos;
+ }
+ };
+
+protected:
+ llvm::LLVMContext &VMContext;
+ // FIXME! May not be needing this after all.
+ unsigned ObjCABI;
+
+ // arc/mrr layout of captured block literal variables.
+ SmallVector<RUN_SKIP, 16> RunSkipBlockVars;
+
+ /// LazySymbols - Symbols to generate a lazy reference for. See
+ /// DefinedSymbols and FinishModule().
+ llvm::SetVector<IdentifierInfo*> LazySymbols;
+
+ /// DefinedSymbols - External symbols which are defined by this
+ /// module. The symbols in this list and LazySymbols are used to add
+ /// special linker symbols which ensure that Objective-C modules are
+ /// linked properly.
+ llvm::SetVector<IdentifierInfo*> DefinedSymbols;
+
+ /// ClassNames - uniqued class names.
+ llvm::StringMap<llvm::GlobalVariable*> ClassNames;
+
+ /// MethodVarNames - uniqued method variable names.
+ llvm::DenseMap<Selector, llvm::GlobalVariable*> MethodVarNames;
+
+ /// DefinedCategoryNames - list of category names in form Class_Category.
+ llvm::SmallSetVector<std::string, 16> DefinedCategoryNames;
+
+ /// MethodVarTypes - uniqued method type signatures. We have to use
+ /// a StringMap here because have no other unique reference.
+ llvm::StringMap<llvm::GlobalVariable*> MethodVarTypes;
+
+ /// MethodDefinitions - map of methods which have been defined in
+ /// this translation unit.
+ llvm::DenseMap<const ObjCMethodDecl*, llvm::Function*> MethodDefinitions;
+
+ /// PropertyNames - uniqued method variable names.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> PropertyNames;
+
+ /// ClassReferences - uniqued class references.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> ClassReferences;
+
+ /// SelectorReferences - uniqued selector references.
+ llvm::DenseMap<Selector, llvm::GlobalVariable*> SelectorReferences;
+
+ /// Protocols - Protocols for which an objc_protocol structure has
+ /// been emitted. Forward declarations are handled by creating an
+ /// empty structure whose initializer is filled in when/if defined.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> Protocols;
+
+ /// DefinedProtocols - Protocols which have actually been
+ /// defined. We should not need this, see FIXME in GenerateProtocol.
+ llvm::DenseSet<IdentifierInfo*> DefinedProtocols;
+
+ /// DefinedClasses - List of defined classes.
+ SmallVector<llvm::GlobalValue*, 16> DefinedClasses;
+
+ /// ImplementedClasses - List of @implemented classes.
+ SmallVector<const ObjCInterfaceDecl*, 16> ImplementedClasses;
+
+ /// DefinedNonLazyClasses - List of defined "non-lazy" classes.
+ SmallVector<llvm::GlobalValue*, 16> DefinedNonLazyClasses;
+
+ /// DefinedCategories - List of defined categories.
+ SmallVector<llvm::GlobalValue*, 16> DefinedCategories;
+
+ /// DefinedNonLazyCategories - List of defined "non-lazy" categories.
+ SmallVector<llvm::GlobalValue*, 16> DefinedNonLazyCategories;
+
+ /// GetNameForMethod - Return a name for the given method.
+ /// \param[out] NameOut - The return value.
+ void GetNameForMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD,
+ SmallVectorImpl<char> &NameOut);
+
+ /// GetMethodVarName - Return a unique constant for the given
+ /// selector's name. The return value has type char *.
+ llvm::Constant *GetMethodVarName(Selector Sel);
+ llvm::Constant *GetMethodVarName(IdentifierInfo *Ident);
+
+ /// GetMethodVarType - Return a unique constant for the given
+ /// method's type encoding string. The return value has type char *.
+
+ // FIXME: This is a horrible name.
+ llvm::Constant *GetMethodVarType(const ObjCMethodDecl *D,
+ bool Extended = false);
+ llvm::Constant *GetMethodVarType(const FieldDecl *D);
+
+ /// GetPropertyName - Return a unique constant for the given
+ /// name. The return value has type char *.
+ llvm::Constant *GetPropertyName(IdentifierInfo *Ident);
+
+ // FIXME: This can be dropped once string functions are unified.
+ llvm::Constant *GetPropertyTypeString(const ObjCPropertyDecl *PD,
+ const Decl *Container);
+
+ /// GetClassName - Return a unique constant for the given selector's
+ /// runtime name (which may change via use of objc_runtime_name attribute on
+ /// class or protocol definition. The return value has type char *.
+ llvm::Constant *GetClassName(StringRef RuntimeName);
+
+ llvm::Function *GetMethodDefinition(const ObjCMethodDecl *MD);
+
+ /// BuildIvarLayout - Builds ivar layout bitmap for the class
+ /// implementation for the __strong or __weak case.
+ ///
+ /// \param hasMRCWeakIvars - Whether we are compiling in MRC and there
+ /// are any weak ivars defined directly in the class. Meaningless unless
+ /// building a weak layout. Does not guarantee that the layout will
+ /// actually have any entries, because the ivar might be under-aligned.
+ llvm::Constant *BuildIvarLayout(const ObjCImplementationDecl *OI,
+ CharUnits beginOffset,
+ CharUnits endOffset,
+ bool forStrongLayout,
+ bool hasMRCWeakIvars);
+
+ llvm::Constant *BuildStrongIvarLayout(const ObjCImplementationDecl *OI,
+ CharUnits beginOffset,
+ CharUnits endOffset) {
+ return BuildIvarLayout(OI, beginOffset, endOffset, true, false);
+ }
+
+ llvm::Constant *BuildWeakIvarLayout(const ObjCImplementationDecl *OI,
+ CharUnits beginOffset,
+ CharUnits endOffset,
+ bool hasMRCWeakIvars) {
+ return BuildIvarLayout(OI, beginOffset, endOffset, false, hasMRCWeakIvars);
+ }
+
+ Qualifiers::ObjCLifetime getBlockCaptureLifetime(QualType QT, bool ByrefLayout);
+
+ void UpdateRunSkipBlockVars(bool IsByref,
+ Qualifiers::ObjCLifetime LifeTime,
+ CharUnits FieldOffset,
+ CharUnits FieldSize);
+
+ void BuildRCBlockVarRecordLayout(const RecordType *RT,
+ CharUnits BytePos, bool &HasUnion,
+ bool ByrefLayout=false);
+
+ void BuildRCRecordLayout(const llvm::StructLayout *RecLayout,
+ const RecordDecl *RD,
+ ArrayRef<const FieldDecl*> RecFields,
+ CharUnits BytePos, bool &HasUnion,
+ bool ByrefLayout);
+
+ uint64_t InlineLayoutInstruction(SmallVectorImpl<unsigned char> &Layout);
+
+ llvm::Constant *getBitmapBlockLayout(bool ComputeByrefLayout);
+
+ /// GetIvarLayoutName - Returns a unique constant for the given
+ /// ivar layout bitmap.
+ llvm::Constant *GetIvarLayoutName(IdentifierInfo *Ident,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
+ /// EmitPropertyList - Emit the given property list. The return
+ /// value has type PropertyListPtrTy.
+ llvm::Constant *EmitPropertyList(Twine Name,
+ const Decl *Container,
+ const ObjCContainerDecl *OCD,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
+ /// EmitProtocolMethodTypes - Generate the array of extended method type
+ /// strings. The return value has type Int8PtrPtrTy.
+ llvm::Constant *EmitProtocolMethodTypes(Twine Name,
+ ArrayRef<llvm::Constant*> MethodTypes,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
+ /// PushProtocolProperties - Push protocol's property on the input stack.
+ void PushProtocolProperties(
+ llvm::SmallPtrSet<const IdentifierInfo*, 16> &PropertySet,
+ SmallVectorImpl<llvm::Constant*> &Properties,
+ const Decl *Container,
+ const ObjCProtocolDecl *Proto,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
+ /// GetProtocolRef - Return a reference to the internal protocol
+ /// description, creating an empty one if it has not been
+ /// defined. The return value has type ProtocolPtrTy.
+ llvm::Constant *GetProtocolRef(const ObjCProtocolDecl *PD);
+
+public:
+ /// CreateMetadataVar - Create a global variable with internal
+ /// linkage for use by the Objective-C runtime.
+ ///
+ /// This is a convenience wrapper which not only creates the
+ /// variable, but also sets the section and alignment and adds the
+ /// global to the "llvm.used" list.
+ ///
+ /// \param Name - The variable name.
+ /// \param Init - The variable initializer; this is also used to
+ /// define the type of the variable.
+ /// \param Section - The section the variable should go into, or empty.
+ /// \param Align - The alignment for the variable, or 0.
+ /// \param AddToUsed - Whether the variable should be added to
+ /// "llvm.used".
+ llvm::GlobalVariable *CreateMetadataVar(Twine Name, llvm::Constant *Init,
+ StringRef Section, CharUnits Align,
+ bool AddToUsed);
+
+protected:
+ CodeGen::RValue EmitMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ llvm::Value *Sel,
+ llvm::Value *Arg0,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *OMD,
+ const ObjCInterfaceDecl *ClassReceiver,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
+ /// EmitImageInfo - Emit the image info marker used to encode some module
+ /// level information.
+ void EmitImageInfo();
+
+public:
+ CGObjCCommonMac(CodeGen::CodeGenModule &cgm) :
+ CGObjCRuntime(cgm), VMContext(cgm.getLLVMContext()) { }
+
+ bool isNonFragileABI() const {
+ return ObjCABI == 2;
+ }
+
+ ConstantAddress GenerateConstantString(const StringLiteral *SL) override;
+
+ llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD=nullptr) override;
+
+ void GenerateProtocol(const ObjCProtocolDecl *PD) override;
+
+ /// GetOrEmitProtocol - Get the protocol object for the given
+ /// declaration, emitting it if necessary. The return value has type
+ /// ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD)=0;
+
+ /// GetOrEmitProtocolRef - Get a forward reference to the protocol
+ /// object for the given declaration, emitting it if needed. These
+ /// forward references will be filled in with empty bodies if no
+ /// definition is seen. The return value has type ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD)=0;
+ llvm::Constant *BuildGCBlockLayout(CodeGen::CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) override;
+ llvm::Constant *BuildRCBlockLayout(CodeGen::CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) override;
+
+ llvm::Constant *BuildByrefLayout(CodeGen::CodeGenModule &CGM,
+ QualType T) override;
+};
+
+class CGObjCMac : public CGObjCCommonMac {
+private:
+ ObjCTypesHelper ObjCTypes;
+
+ /// EmitModuleInfo - Another marker encoding module level
+ /// information.
+ void EmitModuleInfo();
+
+ /// EmitModuleSymols - Emit module symbols, the list of defined
+ /// classes and categories. The result has type SymtabPtrTy.
+ llvm::Constant *EmitModuleSymbols();
+
+ /// FinishModule - Write out global data structures at the end of
+ /// processing a translation unit.
+ void FinishModule();
+
+ /// EmitClassExtension - Generate the class extension structure used
+ /// to store the weak ivar layout and properties. The return value
+ /// has type ClassExtensionPtrTy.
+ llvm::Constant *EmitClassExtension(const ObjCImplementationDecl *ID,
+ CharUnits instanceSize,
+ bool hasMRCWeakIvars);
+
+ /// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
+ /// for the given class.
+ llvm::Value *EmitClassRef(CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *ID);
+
+ llvm::Value *EmitClassRefFromId(CodeGenFunction &CGF,
+ IdentifierInfo *II);
+
+ llvm::Value *EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF) override;
+
+ /// EmitSuperClassRef - Emits reference to class's main metadata class.
+ llvm::Value *EmitSuperClassRef(const ObjCInterfaceDecl *ID);
+
+ /// EmitIvarList - Emit the ivar list for the given
+ /// implementation. If ForClass is true the list of class ivars
+ /// (i.e. metaclass ivars) is emitted, otherwise the list of
+ /// interface ivars will be emitted. The return value has type
+ /// IvarListPtrTy.
+ llvm::Constant *EmitIvarList(const ObjCImplementationDecl *ID,
+ bool ForClass);
+
+ /// EmitMetaClass - Emit a forward reference to the class structure
+ /// for the metaclass of the given interface. The return value has
+ /// type ClassPtrTy.
+ llvm::Constant *EmitMetaClassRef(const ObjCInterfaceDecl *ID);
+
+ /// EmitMetaClass - Emit a class structure for the metaclass of the
+ /// given implementation. The return value has type ClassPtrTy.
+ llvm::Constant *EmitMetaClass(const ObjCImplementationDecl *ID,
+ llvm::Constant *Protocols,
+ ArrayRef<llvm::Constant*> Methods);
+
+ llvm::Constant *GetMethodConstant(const ObjCMethodDecl *MD);
+
+ llvm::Constant *GetMethodDescriptionConstant(const ObjCMethodDecl *MD);
+
+ /// EmitMethodList - Emit the method list for the given
+ /// implementation. The return value has type MethodListPtrTy.
+ llvm::Constant *EmitMethodList(Twine Name,
+ const char *Section,
+ ArrayRef<llvm::Constant*> Methods);
+
+ /// EmitMethodDescList - Emit a method description list for a list of
+ /// method declarations.
+ /// - TypeName: The name for the type containing the methods.
+ /// - IsProtocol: True iff these methods are for a protocol.
+ /// - ClassMethds: True iff these are class methods.
+ /// - Required: When true, only "required" methods are
+ /// listed. Similarly, when false only "optional" methods are
+ /// listed. For classes this should always be true.
+ /// - begin, end: The method list to output.
+ ///
+ /// The return value has type MethodDescriptionListPtrTy.
+ llvm::Constant *EmitMethodDescList(Twine Name,
+ const char *Section,
+ ArrayRef<llvm::Constant*> Methods);
+
+ /// GetOrEmitProtocol - Get the protocol object for the given
+ /// declaration, emitting it if necessary. The return value has type
+ /// ProtocolPtrTy.
+ llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD) override;
+
+ /// GetOrEmitProtocolRef - Get a forward reference to the protocol
+ /// object for the given declaration, emitting it if needed. These
+ /// forward references will be filled in with empty bodies if no
+ /// definition is seen. The return value has type ProtocolPtrTy.
+ llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD) override;
+
+ /// EmitProtocolExtension - Generate the protocol extension
+ /// structure used to store optional instance and class methods, and
+ /// protocol properties. The return value has type
+ /// ProtocolExtensionPtrTy.
+ llvm::Constant *
+ EmitProtocolExtension(const ObjCProtocolDecl *PD,
+ ArrayRef<llvm::Constant*> OptInstanceMethods,
+ ArrayRef<llvm::Constant*> OptClassMethods,
+ ArrayRef<llvm::Constant*> MethodTypesExt);
+
+ /// EmitProtocolList - Generate the list of referenced
+ /// protocols. The return value has type ProtocolListPtrTy.
+ llvm::Constant *EmitProtocolList(Twine Name,
+ ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end);
+
+ /// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
+ /// for the given selector.
+ llvm::Value *EmitSelector(CodeGenFunction &CGF, Selector Sel);
+ Address EmitSelectorAddr(CodeGenFunction &CGF, Selector Sel);
+
+public:
+ CGObjCMac(CodeGen::CodeGenModule &cgm);
+
+ llvm::Function *ModuleInitFunction() override;
+
+ CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel, llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method) override;
+
+ CodeGen::RValue
+ GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return, QualType ResultType,
+ Selector Sel, const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl, llvm::Value *Receiver,
+ bool IsClassMessage, const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) override;
+
+ llvm::Value *GetClass(CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *ID) override;
+
+ llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel) override;
+ Address GetAddrOfSelector(CodeGenFunction &CGF, Selector Sel) override;
+
+ /// The NeXT/Apple runtimes do not support typed selectors; just emit an
+ /// untyped one.
+ llvm::Value *GetSelector(CodeGenFunction &CGF,
+ const ObjCMethodDecl *Method) override;
+
+ llvm::Constant *GetEHType(QualType T) override;
+
+ void GenerateCategory(const ObjCCategoryImplDecl *CMD) override;
+
+ void GenerateClass(const ObjCImplementationDecl *ClassDecl) override;
+
+ void RegisterAlias(const ObjCCompatibleAliasDecl *OAD) override {}
+
+ llvm::Value *GenerateProtocolRef(CodeGenFunction &CGF,
+ const ObjCProtocolDecl *PD) override;
+
+ llvm::Constant *GetPropertyGetFunction() override;
+ llvm::Constant *GetPropertySetFunction() override;
+ llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) override;
+ llvm::Constant *GetGetStructFunction() override;
+ llvm::Constant *GetSetStructFunction() override;
+ llvm::Constant *GetCppAtomicObjectGetFunction() override;
+ llvm::Constant *GetCppAtomicObjectSetFunction() override;
+ llvm::Constant *EnumerationMutationFunction() override;
+
+ void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S) override;
+ void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) override;
+ void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, const Stmt &S);
+ void EmitThrowStmt(CodeGen::CodeGenFunction &CGF, const ObjCAtThrowStmt &S,
+ bool ClearInsertionPoint=true) override;
+ llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ Address AddrWeakObj) override;
+ void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, Address dst) override;
+ void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, Address dest,
+ bool threadlocal = false) override;
+ void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, Address dest,
+ llvm::Value *ivarOffset) override;
+ void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, Address dest) override;
+ void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+ Address dest, Address src,
+ llvm::Value *size) override;
+
+ LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF, QualType ObjectTy,
+ llvm::Value *BaseValue, const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) override;
+ llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) override;
+
+ /// GetClassGlobal - Return the global variable for the Objective-C
+ /// class of the given name.
+ llvm::GlobalVariable *GetClassGlobal(const std::string &Name,
+ bool Weak = false) override {
+ llvm_unreachable("CGObjCMac::GetClassGlobal");
+ }
+};
+
+class CGObjCNonFragileABIMac : public CGObjCCommonMac {
+private:
+ ObjCNonFragileABITypesHelper ObjCTypes;
+ llvm::GlobalVariable* ObjCEmptyCacheVar;
+ llvm::GlobalVariable* ObjCEmptyVtableVar;
+
+ /// SuperClassReferences - uniqued super class references.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> SuperClassReferences;
+
+ /// MetaClassReferences - uniqued meta class references.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> MetaClassReferences;
+
+ /// EHTypeReferences - uniqued class ehtype references.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> EHTypeReferences;
+
+ /// VTableDispatchMethods - List of methods for which we generate
+ /// vtable-based message dispatch.
+ llvm::DenseSet<Selector> VTableDispatchMethods;
+
+ /// DefinedMetaClasses - List of defined meta-classes.
+ std::vector<llvm::GlobalValue*> DefinedMetaClasses;
+
+ /// isVTableDispatchedSelector - Returns true if SEL is a
+ /// vtable-based selector.
+ bool isVTableDispatchedSelector(Selector Sel);
+
+ /// FinishNonFragileABIModule - Write out global data structures at the end of
+ /// processing a translation unit.
+ void FinishNonFragileABIModule();
+
+ /// AddModuleClassList - Add the given list of class pointers to the
+ /// module with the provided symbol and section names.
+ void AddModuleClassList(ArrayRef<llvm::GlobalValue*> Container,
+ const char *SymbolName,
+ const char *SectionName);
+
+ llvm::GlobalVariable * BuildClassRoTInitializer(unsigned flags,
+ unsigned InstanceStart,
+ unsigned InstanceSize,
+ const ObjCImplementationDecl *ID);
+ llvm::GlobalVariable * BuildClassMetaData(const std::string &ClassName,
+ llvm::Constant *IsAGV,
+ llvm::Constant *SuperClassGV,
+ llvm::Constant *ClassRoGV,
+ bool HiddenVisibility,
+ bool Weak);
+
+ llvm::Constant *GetMethodConstant(const ObjCMethodDecl *MD);
+
+ llvm::Constant *GetMethodDescriptionConstant(const ObjCMethodDecl *MD);
+
+ /// EmitMethodList - Emit the method list for the given
+ /// implementation. The return value has type MethodListnfABITy.
+ llvm::Constant *EmitMethodList(Twine Name,
+ const char *Section,
+ ArrayRef<llvm::Constant*> Methods);
+ /// EmitIvarList - Emit the ivar list for the given
+ /// implementation. If ForClass is true the list of class ivars
+ /// (i.e. metaclass ivars) is emitted, otherwise the list of
+ /// interface ivars will be emitted. The return value has type
+ /// IvarListnfABIPtrTy.
+ llvm::Constant *EmitIvarList(const ObjCImplementationDecl *ID);
+
+ llvm::Constant *EmitIvarOffsetVar(const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar,
+ unsigned long int offset);
+
+ /// GetOrEmitProtocol - Get the protocol object for the given
+ /// declaration, emitting it if necessary. The return value has type
+ /// ProtocolPtrTy.
+ llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD) override;
+
+ /// GetOrEmitProtocolRef - Get a forward reference to the protocol
+ /// object for the given declaration, emitting it if needed. These
+ /// forward references will be filled in with empty bodies if no
+ /// definition is seen. The return value has type ProtocolPtrTy.
+ llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD) override;
+
+ /// EmitProtocolList - Generate the list of referenced
+ /// protocols. The return value has type ProtocolListPtrTy.
+ llvm::Constant *EmitProtocolList(Twine Name,
+ ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end);
+
+ CodeGen::RValue EmitVTableMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method);
+
+ /// GetClassGlobal - Return the global variable for the Objective-C
+ /// class of the given name.
+ llvm::GlobalVariable *GetClassGlobal(const std::string &Name,
+ bool Weak = false) override;
+
+ /// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
+ /// for the given class reference.
+ llvm::Value *EmitClassRef(CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *ID);
+
+ llvm::Value *EmitClassRefFromId(CodeGenFunction &CGF,
+ IdentifierInfo *II, bool Weak,
+ const ObjCInterfaceDecl *ID);
+
+ llvm::Value *EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF) override;
+
+ /// EmitSuperClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
+ /// for the given super class reference.
+ llvm::Value *EmitSuperClassRef(CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *ID);
+
+ /// EmitMetaClassRef - Return a Value * of the address of _class_t
+ /// meta-data
+ llvm::Value *EmitMetaClassRef(CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *ID, bool Weak);
+
+ /// ObjCIvarOffsetVariable - Returns the ivar offset variable for
+ /// the given ivar.
+ ///
+ llvm::GlobalVariable * ObjCIvarOffsetVariable(
+ const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar);
+
+ /// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
+ /// for the given selector.
+ llvm::Value *EmitSelector(CodeGenFunction &CGF, Selector Sel);
+ Address EmitSelectorAddr(CodeGenFunction &CGF, Selector Sel);
+
+ /// GetInterfaceEHType - Get the cached ehtype for the given Objective-C
+ /// interface. The return value has type EHTypePtrTy.
+ llvm::Constant *GetInterfaceEHType(const ObjCInterfaceDecl *ID,
+ bool ForDefinition);
+
+ const char *getMetaclassSymbolPrefix() const {
+ return "OBJC_METACLASS_$_";
+ }
+
+ const char *getClassSymbolPrefix() const {
+ return "OBJC_CLASS_$_";
+ }
+
+ void GetClassSizeInfo(const ObjCImplementationDecl *OID,
+ uint32_t &InstanceStart,
+ uint32_t &InstanceSize);
+
+ // Shamelessly stolen from Analysis/CFRefCount.cpp
+ Selector GetNullarySelector(const char* name) const {
+ IdentifierInfo* II = &CGM.getContext().Idents.get(name);
+ return CGM.getContext().Selectors.getSelector(0, &II);
+ }
+
+ Selector GetUnarySelector(const char* name) const {
+ IdentifierInfo* II = &CGM.getContext().Idents.get(name);
+ return CGM.getContext().Selectors.getSelector(1, &II);
+ }
+
+ /// ImplementationIsNonLazy - Check whether the given category or
+ /// class implementation is "non-lazy".
+ bool ImplementationIsNonLazy(const ObjCImplDecl *OD) const;
+
+ bool IsIvarOffsetKnownIdempotent(const CodeGen::CodeGenFunction &CGF,
+ const ObjCIvarDecl *IV) {
+ // Annotate the load as an invariant load iff inside an instance method
+ // and ivar belongs to instance method's class and one of its super class.
+ // This check is needed because the ivar offset is a lazily
+ // initialised value that may depend on objc_msgSend to perform a fixup on
+ // the first message dispatch.
+ //
+ // An additional opportunity to mark the load as invariant arises when the
+ // base of the ivar access is a parameter to an Objective C method.
+ // However, because the parameters are not available in the current
+ // interface, we cannot perform this check.
+ if (const ObjCMethodDecl *MD =
+ dyn_cast_or_null<ObjCMethodDecl>(CGF.CurFuncDecl))
+ if (MD->isInstanceMethod())
+ if (const ObjCInterfaceDecl *ID = MD->getClassInterface())
+ return IV->getContainingInterface()->isSuperClassOf(ID);
+ return false;
+ }
+
+public:
+ CGObjCNonFragileABIMac(CodeGen::CodeGenModule &cgm);
+ // FIXME. All stubs for now!
+ llvm::Function *ModuleInitFunction() override;
+
+ CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType, Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method) override;
+
+ CodeGen::RValue
+ GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return, QualType ResultType,
+ Selector Sel, const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl, llvm::Value *Receiver,
+ bool IsClassMessage, const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) override;
+
+ llvm::Value *GetClass(CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *ID) override;
+
+ llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel) override
+ { return EmitSelector(CGF, Sel); }
+ Address GetAddrOfSelector(CodeGenFunction &CGF, Selector Sel) override
+ { return EmitSelectorAddr(CGF, Sel); }
+
+ /// The NeXT/Apple runtimes do not support typed selectors; just emit an
+ /// untyped one.
+ llvm::Value *GetSelector(CodeGenFunction &CGF,
+ const ObjCMethodDecl *Method) override
+ { return EmitSelector(CGF, Method->getSelector()); }
+
+ void GenerateCategory(const ObjCCategoryImplDecl *CMD) override;
+
+ void GenerateClass(const ObjCImplementationDecl *ClassDecl) override;
+
+ void RegisterAlias(const ObjCCompatibleAliasDecl *OAD) override {}
+
+ llvm::Value *GenerateProtocolRef(CodeGenFunction &CGF,
+ const ObjCProtocolDecl *PD) override;
+
+ llvm::Constant *GetEHType(QualType T) override;
+
+ llvm::Constant *GetPropertyGetFunction() override {
+ return ObjCTypes.getGetPropertyFn();
+ }
+ llvm::Constant *GetPropertySetFunction() override {
+ return ObjCTypes.getSetPropertyFn();
+ }
+
+ llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) override {
+ return ObjCTypes.getOptimizedSetPropertyFn(atomic, copy);
+ }
+
+ llvm::Constant *GetSetStructFunction() override {
+ return ObjCTypes.getCopyStructFn();
+ }
+ llvm::Constant *GetGetStructFunction() override {
+ return ObjCTypes.getCopyStructFn();
+ }
+ llvm::Constant *GetCppAtomicObjectSetFunction() override {
+ return ObjCTypes.getCppAtomicObjectFunction();
+ }
+ llvm::Constant *GetCppAtomicObjectGetFunction() override {
+ return ObjCTypes.getCppAtomicObjectFunction();
+ }
+
+ llvm::Constant *EnumerationMutationFunction() override {
+ return ObjCTypes.getEnumerationMutationFn();
+ }
+
+ void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S) override;
+ void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) override;
+ void EmitThrowStmt(CodeGen::CodeGenFunction &CGF, const ObjCAtThrowStmt &S,
+ bool ClearInsertionPoint=true) override;
+ llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ Address AddrWeakObj) override;
+ void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, Address edst) override;
+ void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, Address dest,
+ bool threadlocal = false) override;
+ void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, Address dest,
+ llvm::Value *ivarOffset) override;
+ void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, Address dest) override;
+ void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+ Address dest, Address src,
+ llvm::Value *size) override;
+ LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF, QualType ObjectTy,
+ llvm::Value *BaseValue, const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) override;
+ llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) override;
+};
+
+/// A helper class for performing the null-initialization of a return
+/// value.
+struct NullReturnState {
+ llvm::BasicBlock *NullBB;
+ NullReturnState() : NullBB(nullptr) {}
+
+ /// Perform a null-check of the given receiver.
+ void init(CodeGenFunction &CGF, llvm::Value *receiver) {
+ // Make blocks for the null-receiver and call edges.
+ NullBB = CGF.createBasicBlock("msgSend.null-receiver");
+ llvm::BasicBlock *callBB = CGF.createBasicBlock("msgSend.call");
+
+ // Check for a null receiver and, if there is one, jump to the
+ // null-receiver block. There's no point in trying to avoid it:
+ // we're always going to put *something* there, because otherwise
+ // we shouldn't have done this null-check in the first place.
+ llvm::Value *isNull = CGF.Builder.CreateIsNull(receiver);
+ CGF.Builder.CreateCondBr(isNull, NullBB, callBB);
+
+ // Otherwise, start performing the call.
+ CGF.EmitBlock(callBB);
+ }
+
+ /// Complete the null-return operation. It is valid to call this
+ /// regardless of whether 'init' has been called.
+ RValue complete(CodeGenFunction &CGF, RValue result, QualType resultType,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) {
+ // If we never had to do a null-check, just use the raw result.
+ if (!NullBB) return result;
+
+ // The continuation block. This will be left null if we don't have an
+ // IP, which can happen if the method we're calling is marked noreturn.
+ llvm::BasicBlock *contBB = nullptr;
+
+ // Finish the call path.
+ llvm::BasicBlock *callBB = CGF.Builder.GetInsertBlock();
+ if (callBB) {
+ contBB = CGF.createBasicBlock("msgSend.cont");
+ CGF.Builder.CreateBr(contBB);
+ }
+
+ // Okay, start emitting the null-receiver block.
+ CGF.EmitBlock(NullBB);
+
+ // Release any consumed arguments we've got.
+ if (Method) {
+ CallArgList::const_iterator I = CallArgs.begin();
+ for (ObjCMethodDecl::param_const_iterator i = Method->param_begin(),
+ e = Method->param_end(); i != e; ++i, ++I) {
+ const ParmVarDecl *ParamDecl = (*i);
+ if (ParamDecl->hasAttr<NSConsumedAttr>()) {
+ RValue RV = I->RV;
+ assert(RV.isScalar() &&
+ "NullReturnState::complete - arg not on object");
+ CGF.EmitARCRelease(RV.getScalarVal(), ARCImpreciseLifetime);
+ }
+ }
+ }
+
+ // The phi code below assumes that we haven't needed any control flow yet.
+ assert(CGF.Builder.GetInsertBlock() == NullBB);
+
+ // If we've got a void return, just jump to the continuation block.
+ if (result.isScalar() && resultType->isVoidType()) {
+ // No jumps required if the message-send was noreturn.
+ if (contBB) CGF.EmitBlock(contBB);
+ return result;
+ }
+
+ // If we've got a scalar return, build a phi.
+ if (result.isScalar()) {
+ // Derive the null-initialization value.
+ llvm::Constant *null = CGF.CGM.EmitNullConstant(resultType);
+
+ // If no join is necessary, just flow out.
+ if (!contBB) return RValue::get(null);
+
+ // Otherwise, build a phi.
+ CGF.EmitBlock(contBB);
+ llvm::PHINode *phi = CGF.Builder.CreatePHI(null->getType(), 2);
+ phi->addIncoming(result.getScalarVal(), callBB);
+ phi->addIncoming(null, NullBB);
+ return RValue::get(phi);
+ }
+
+ // If we've got an aggregate return, null the buffer out.
+ // FIXME: maybe we should be doing things differently for all the
+ // cases where the ABI has us returning (1) non-agg values in
+ // memory or (2) agg values in registers.
+ if (result.isAggregate()) {
+ assert(result.isAggregate() && "null init of non-aggregate result?");
+ CGF.EmitNullInitialization(result.getAggregateAddress(), resultType);
+ if (contBB) CGF.EmitBlock(contBB);
+ return result;
+ }
+
+ // Complex types.
+ CGF.EmitBlock(contBB);
+ CodeGenFunction::ComplexPairTy callResult = result.getComplexVal();
+
+ // Find the scalar type and its zero value.
+ llvm::Type *scalarTy = callResult.first->getType();
+ llvm::Constant *scalarZero = llvm::Constant::getNullValue(scalarTy);
+
+ // Build phis for both coordinates.
+ llvm::PHINode *real = CGF.Builder.CreatePHI(scalarTy, 2);
+ real->addIncoming(callResult.first, callBB);
+ real->addIncoming(scalarZero, NullBB);
+ llvm::PHINode *imag = CGF.Builder.CreatePHI(scalarTy, 2);
+ imag->addIncoming(callResult.second, callBB);
+ imag->addIncoming(scalarZero, NullBB);
+ return RValue::getComplex(real, imag);
+ }
+};
+
+} // end anonymous namespace
+
+/* *** Helper Functions *** */
+
+/// getConstantGEP() - Help routine to construct simple GEPs.
+static llvm::Constant *getConstantGEP(llvm::LLVMContext &VMContext,
+ llvm::GlobalVariable *C, unsigned idx0,
+ unsigned idx1) {
+ llvm::Value *Idxs[] = {
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), idx0),
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), idx1)
+ };
+ return llvm::ConstantExpr::getGetElementPtr(C->getValueType(), C, Idxs);
+}
+
+/// hasObjCExceptionAttribute - Return true if this class or any super
+/// class has the __objc_exception__ attribute.
+static bool hasObjCExceptionAttribute(ASTContext &Context,
+ const ObjCInterfaceDecl *OID) {
+ if (OID->hasAttr<ObjCExceptionAttr>())
+ return true;
+ if (const ObjCInterfaceDecl *Super = OID->getSuperClass())
+ return hasObjCExceptionAttribute(Context, Super);
+ return false;
+}
+
+/* *** CGObjCMac Public Interface *** */
+
+CGObjCMac::CGObjCMac(CodeGen::CodeGenModule &cgm) : CGObjCCommonMac(cgm),
+ ObjCTypes(cgm) {
+ ObjCABI = 1;
+ EmitImageInfo();
+}
+
+/// GetClass - Return a reference to the class for the given interface
+/// decl.
+llvm::Value *CGObjCMac::GetClass(CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *ID) {
+ return EmitClassRef(CGF, ID);
+}
+
+/// GetSelector - Return the pointer to the unique'd string for this selector.
+llvm::Value *CGObjCMac::GetSelector(CodeGenFunction &CGF, Selector Sel) {
+ return EmitSelector(CGF, Sel);
+}
+Address CGObjCMac::GetAddrOfSelector(CodeGenFunction &CGF, Selector Sel) {
+ return EmitSelectorAddr(CGF, Sel);
+}
+llvm::Value *CGObjCMac::GetSelector(CodeGenFunction &CGF, const ObjCMethodDecl
+ *Method) {
+ return EmitSelector(CGF, Method->getSelector());
+}
+
+llvm::Constant *CGObjCMac::GetEHType(QualType T) {
+ if (T->isObjCIdType() ||
+ T->isObjCQualifiedIdType()) {
+ return CGM.GetAddrOfRTTIDescriptor(
+ CGM.getContext().getObjCIdRedefinitionType(), /*ForEH=*/true);
+ }
+ if (T->isObjCClassType() ||
+ T->isObjCQualifiedClassType()) {
+ return CGM.GetAddrOfRTTIDescriptor(
+ CGM.getContext().getObjCClassRedefinitionType(), /*ForEH=*/true);
+ }
+ if (T->isObjCObjectPointerType())
+ return CGM.GetAddrOfRTTIDescriptor(T, /*ForEH=*/true);
+
+ llvm_unreachable("asking for catch type for ObjC type in fragile runtime");
+}
+
+/// Generate a constant CFString object.
+/*
+ struct __builtin_CFString {
+ const int *isa; // point to __CFConstantStringClassReference
+ int flags;
+ const char *str;
+ long length;
+ };
+*/
+
+/// or Generate a constant NSString object.
+/*
+ struct __builtin_NSString {
+ const int *isa; // point to __NSConstantStringClassReference
+ const char *str;
+ unsigned int length;
+ };
+*/
+
+ConstantAddress CGObjCCommonMac::GenerateConstantString(
+ const StringLiteral *SL) {
+ return (CGM.getLangOpts().NoConstantCFStrings == 0 ?
+ CGM.GetAddrOfConstantCFString(SL) :
+ CGM.GetAddrOfConstantString(SL));
+}
+
+enum {
+ kCFTaggedObjectID_Integer = (1 << 1) + 1
+};
+
+/// Generates a message send where the super is the receiver. This is
+/// a message send to self with special delivery semantics indicating
+/// which class's method should be called.
+CodeGen::RValue
+CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CodeGen::CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) {
+ // Create and init a super structure; this is a (receiver, class)
+ // pair we will pass to objc_msgSendSuper.
+ Address ObjCSuper =
+ CGF.CreateTempAlloca(ObjCTypes.SuperTy, CGF.getPointerAlign(),
+ "objc_super");
+ llvm::Value *ReceiverAsObject =
+ CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy);
+ CGF.Builder.CreateStore(
+ ReceiverAsObject,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 0, CharUnits::Zero()));
+
+ // If this is a class message the metaclass is passed as the target.
+ llvm::Value *Target;
+ if (IsClassMessage) {
+ if (isCategoryImpl) {
+ // Message sent to 'super' in a class method defined in a category
+ // implementation requires an odd treatment.
+ // If we are in a class method, we must retrieve the
+ // _metaclass_ for the current class, pointed at by
+ // the class's "isa" pointer. The following assumes that
+ // isa" is the first ivar in a class (which it must be).
+ Target = EmitClassRef(CGF, Class->getSuperClass());
+ Target = CGF.Builder.CreateStructGEP(ObjCTypes.ClassTy, Target, 0);
+ Target = CGF.Builder.CreateAlignedLoad(Target, CGF.getPointerAlign());
+ } else {
+ llvm::Constant *MetaClassPtr = EmitMetaClassRef(Class);
+ llvm::Value *SuperPtr =
+ CGF.Builder.CreateStructGEP(ObjCTypes.ClassTy, MetaClassPtr, 1);
+ llvm::Value *Super =
+ CGF.Builder.CreateAlignedLoad(SuperPtr, CGF.getPointerAlign());
+ Target = Super;
+ }
+ } else if (isCategoryImpl)
+ Target = EmitClassRef(CGF, Class->getSuperClass());
+ else {
+ llvm::Value *ClassPtr = EmitSuperClassRef(Class);
+ ClassPtr = CGF.Builder.CreateStructGEP(ObjCTypes.ClassTy, ClassPtr, 1);
+ Target = CGF.Builder.CreateAlignedLoad(ClassPtr, CGF.getPointerAlign());
+ }
+ // FIXME: We shouldn't need to do this cast, rectify the ASTContext and
+ // ObjCTypes types.
+ llvm::Type *ClassTy =
+ CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType());
+ Target = CGF.Builder.CreateBitCast(Target, ClassTy);
+ CGF.Builder.CreateStore(Target,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 1, CGF.getPointerSize()));
+ return EmitMessageSend(CGF, Return, ResultType,
+ EmitSelector(CGF, Sel),
+ ObjCSuper.getPointer(), ObjCTypes.SuperPtrCTy,
+ true, CallArgs, Method, Class, ObjCTypes);
+}
+
+/// Generate code for a message send expression.
+CodeGen::RValue CGObjCMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method) {
+ return EmitMessageSend(CGF, Return, ResultType,
+ EmitSelector(CGF, Sel),
+ Receiver, CGF.getContext().getObjCIdType(),
+ false, CallArgs, Method, Class, ObjCTypes);
+}
+
+static bool isWeakLinkedClass(const ObjCInterfaceDecl *ID) {
+ do {
+ if (ID->isWeakImported())
+ return true;
+ } while ((ID = ID->getSuperClass()));
+
+ return false;
+}
+
+CodeGen::RValue
+CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ llvm::Value *Sel,
+ llvm::Value *Arg0,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method,
+ const ObjCInterfaceDecl *ClassReceiver,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ CallArgList ActualArgs;
+ if (!IsSuper)
+ Arg0 = CGF.Builder.CreateBitCast(Arg0, ObjCTypes.ObjectPtrTy);
+ ActualArgs.add(RValue::get(Arg0), Arg0Ty);
+ ActualArgs.add(RValue::get(Sel), CGF.getContext().getObjCSelType());
+ ActualArgs.addFrom(CallArgs);
+
+ // If we're calling a method, use the formal signature.
+ MessageSendInfo MSI = getMessageSendInfo(Method, ResultType, ActualArgs);
+
+ if (Method)
+ assert(CGM.getContext().getCanonicalType(Method->getReturnType()) ==
+ CGM.getContext().getCanonicalType(ResultType) &&
+ "Result type mismatch!");
+
+ bool ReceiverCanBeNull = true;
+
+ // Super dispatch assumes that self is non-null; even the messenger
+ // doesn't have a null check internally.
+ if (IsSuper) {
+ ReceiverCanBeNull = false;
+
+ // If this is a direct dispatch of a class method, check whether the class,
+ // or anything in its hierarchy, was weak-linked.
+ } else if (ClassReceiver && Method && Method->isClassMethod()) {
+ ReceiverCanBeNull = isWeakLinkedClass(ClassReceiver);
+
+ // If we're emitting a method, and self is const (meaning just ARC, for now),
+ // and the receiver is a load of self, then self is a valid object.
+ } else if (auto CurMethod =
+ dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl)) {
+ auto Self = CurMethod->getSelfDecl();
+ if (Self->getType().isConstQualified()) {
+ if (auto LI = dyn_cast<llvm::LoadInst>(Arg0->stripPointerCasts())) {
+ llvm::Value *SelfAddr = CGF.GetAddrOfLocalVar(Self).getPointer();
+ if (SelfAddr == LI->getPointerOperand()) {
+ ReceiverCanBeNull = false;
+ }
+ }
+ }
+ }
+
+ NullReturnState nullReturn;
+
+ llvm::Constant *Fn = nullptr;
+ if (CGM.ReturnSlotInterferesWithArgs(MSI.CallInfo)) {
+ if (ReceiverCanBeNull) nullReturn.init(CGF, Arg0);
+ Fn = (ObjCABI == 2) ? ObjCTypes.getSendStretFn2(IsSuper)
+ : ObjCTypes.getSendStretFn(IsSuper);
+ } else if (CGM.ReturnTypeUsesFPRet(ResultType)) {
+ Fn = (ObjCABI == 2) ? ObjCTypes.getSendFpretFn2(IsSuper)
+ : ObjCTypes.getSendFpretFn(IsSuper);
+ } else if (CGM.ReturnTypeUsesFP2Ret(ResultType)) {
+ Fn = (ObjCABI == 2) ? ObjCTypes.getSendFp2RetFn2(IsSuper)
+ : ObjCTypes.getSendFp2retFn(IsSuper);
+ } else {
+ // arm64 uses objc_msgSend for stret methods and yet null receiver check
+ // must be made for it.
+ if (ReceiverCanBeNull && CGM.ReturnTypeUsesSRet(MSI.CallInfo))
+ nullReturn.init(CGF, Arg0);
+ Fn = (ObjCABI == 2) ? ObjCTypes.getSendFn2(IsSuper)
+ : ObjCTypes.getSendFn(IsSuper);
+ }
+
+ // Emit a null-check if there's a consumed argument other than the receiver.
+ bool RequiresNullCheck = false;
+ if (ReceiverCanBeNull && CGM.getLangOpts().ObjCAutoRefCount && Method) {
+ for (const auto *ParamDecl : Method->params()) {
+ if (ParamDecl->hasAttr<NSConsumedAttr>()) {
+ if (!nullReturn.NullBB)
+ nullReturn.init(CGF, Arg0);
+ RequiresNullCheck = true;
+ break;
+ }
+ }
+ }
+
+ llvm::Instruction *CallSite;
+ Fn = llvm::ConstantExpr::getBitCast(Fn, MSI.MessengerType);
+ RValue rvalue = CGF.EmitCall(MSI.CallInfo, Fn, Return, ActualArgs,
+ CGCalleeInfo(), &CallSite);
+
+ // Mark the call as noreturn if the method is marked noreturn and the
+ // receiver cannot be null.
+ if (Method && Method->hasAttr<NoReturnAttr>() && !ReceiverCanBeNull) {
+ llvm::CallSite(CallSite).setDoesNotReturn();
+ }
+
+ return nullReturn.complete(CGF, rvalue, ResultType, CallArgs,
+ RequiresNullCheck ? Method : nullptr);
+}
+
+static Qualifiers::GC GetGCAttrTypeForType(ASTContext &Ctx, QualType FQT,
+ bool pointee = false) {
+ // Note that GC qualification applies recursively to C pointer types
+ // that aren't otherwise decorated. This is weird, but it's probably
+ // an intentional workaround to the unreliable placement of GC qualifiers.
+ if (FQT.isObjCGCStrong())
+ return Qualifiers::Strong;
+
+ if (FQT.isObjCGCWeak())
+ return Qualifiers::Weak;
+
+ if (auto ownership = FQT.getObjCLifetime()) {
+ // Ownership does not apply recursively to C pointer types.
+ if (pointee) return Qualifiers::GCNone;
+ switch (ownership) {
+ case Qualifiers::OCL_Weak: return Qualifiers::Weak;
+ case Qualifiers::OCL_Strong: return Qualifiers::Strong;
+ case Qualifiers::OCL_ExplicitNone: return Qualifiers::GCNone;
+ case Qualifiers::OCL_Autoreleasing: llvm_unreachable("autoreleasing ivar?");
+ case Qualifiers::OCL_None: llvm_unreachable("known nonzero");
+ }
+ llvm_unreachable("bad objc ownership");
+ }
+
+ // Treat unqualified retainable pointers as strong.
+ if (FQT->isObjCObjectPointerType() || FQT->isBlockPointerType())
+ return Qualifiers::Strong;
+
+ // Walk into C pointer types, but only in GC.
+ if (Ctx.getLangOpts().getGC() != LangOptions::NonGC) {
+ if (const PointerType *PT = FQT->getAs<PointerType>())
+ return GetGCAttrTypeForType(Ctx, PT->getPointeeType(), /*pointee*/ true);
+ }
+
+ return Qualifiers::GCNone;
+}
+
+namespace {
+ struct IvarInfo {
+ CharUnits Offset;
+ uint64_t SizeInWords;
+ IvarInfo(CharUnits offset, uint64_t sizeInWords)
+ : Offset(offset), SizeInWords(sizeInWords) {}
+
+ // Allow sorting based on byte pos.
+ bool operator<(const IvarInfo &other) const {
+ return Offset < other.Offset;
+ }
+ };
+
+ /// A helper class for building GC layout strings.
+ class IvarLayoutBuilder {
+ CodeGenModule &CGM;
+
+ /// The start of the layout. Offsets will be relative to this value,
+ /// and entries less than this value will be silently discarded.
+ CharUnits InstanceBegin;
+
+ /// The end of the layout. Offsets will never exceed this value.
+ CharUnits InstanceEnd;
+
+ /// Whether we're generating the strong layout or the weak layout.
+ bool ForStrongLayout;
+
+ /// Whether the offsets in IvarsInfo might be out-of-order.
+ bool IsDisordered = false;
+
+ llvm::SmallVector<IvarInfo, 8> IvarsInfo;
+ public:
+ IvarLayoutBuilder(CodeGenModule &CGM, CharUnits instanceBegin,
+ CharUnits instanceEnd, bool forStrongLayout)
+ : CGM(CGM), InstanceBegin(instanceBegin), InstanceEnd(instanceEnd),
+ ForStrongLayout(forStrongLayout) {
+ }
+
+ void visitRecord(const RecordType *RT, CharUnits offset);
+
+ template <class Iterator, class GetOffsetFn>
+ void visitAggregate(Iterator begin, Iterator end,
+ CharUnits aggrOffset,
+ const GetOffsetFn &getOffset);
+
+ void visitField(const FieldDecl *field, CharUnits offset);
+
+ /// Add the layout of a block implementation.
+ void visitBlock(const CGBlockInfo &blockInfo);
+
+ /// Is there any information for an interesting bitmap?
+ bool hasBitmapData() const { return !IvarsInfo.empty(); }
+
+ llvm::Constant *buildBitmap(CGObjCCommonMac &CGObjC,
+ llvm::SmallVectorImpl<unsigned char> &buffer);
+
+ static void dump(ArrayRef<unsigned char> buffer) {
+ const unsigned char *s = buffer.data();
+ for (unsigned i = 0, e = buffer.size(); i < e; i++)
+ if (!(s[i] & 0xf0))
+ printf("0x0%x%s", s[i], s[i] != 0 ? ", " : "");
+ else
+ printf("0x%x%s", s[i], s[i] != 0 ? ", " : "");
+ printf("\n");
+ }
+ };
+}
+
+llvm::Constant *CGObjCCommonMac::BuildGCBlockLayout(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+
+ llvm::Constant *nullPtr = llvm::Constant::getNullValue(CGM.Int8PtrTy);
+ if (CGM.getLangOpts().getGC() == LangOptions::NonGC)
+ return nullPtr;
+
+ IvarLayoutBuilder builder(CGM, CharUnits::Zero(), blockInfo.BlockSize,
+ /*for strong layout*/ true);
+
+ builder.visitBlock(blockInfo);
+
+ if (!builder.hasBitmapData())
+ return nullPtr;
+
+ llvm::SmallVector<unsigned char, 32> buffer;
+ llvm::Constant *C = builder.buildBitmap(*this, buffer);
+ if (CGM.getLangOpts().ObjCGCBitmapPrint && !buffer.empty()) {
+ printf("\n block variable layout for block: ");
+ builder.dump(buffer);
+ }
+
+ return C;
+}
+
+void IvarLayoutBuilder::visitBlock(const CGBlockInfo &blockInfo) {
+ // __isa is the first field in block descriptor and must assume by runtime's
+ // convention that it is GC'able.
+ IvarsInfo.push_back(IvarInfo(CharUnits::Zero(), 1));
+
+ const BlockDecl *blockDecl = blockInfo.getBlockDecl();
+
+ // Ignore the optional 'this' capture: C++ objects are not assumed
+ // to be GC'ed.
+
+ CharUnits lastFieldOffset;
+
+ // Walk the captured variables.
+ for (const auto &CI : blockDecl->captures()) {
+ const VarDecl *variable = CI.getVariable();
+ QualType type = variable->getType();
+
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+
+ // Ignore constant captures.
+ if (capture.isConstant()) continue;
+
+ CharUnits fieldOffset = capture.getOffset();
+
+ // Block fields are not necessarily ordered; if we detect that we're
+ // adding them out-of-order, make sure we sort later.
+ if (fieldOffset < lastFieldOffset)
+ IsDisordered = true;
+ lastFieldOffset = fieldOffset;
+
+ // __block variables are passed by their descriptor address.
+ if (CI.isByRef()) {
+ IvarsInfo.push_back(IvarInfo(fieldOffset, /*size in words*/ 1));
+ continue;
+ }
+
+ assert(!type->isArrayType() && "array variable should not be caught");
+ if (const RecordType *record = type->getAs<RecordType>()) {
+ visitRecord(record, fieldOffset);
+ continue;
+ }
+
+ Qualifiers::GC GCAttr = GetGCAttrTypeForType(CGM.getContext(), type);
+
+ if (GCAttr == Qualifiers::Strong) {
+ assert(CGM.getContext().getTypeSize(type)
+ == CGM.getTarget().getPointerWidth(0));
+ IvarsInfo.push_back(IvarInfo(fieldOffset, /*size in words*/ 1));
+ }
+ }
+}
+
+
+/// getBlockCaptureLifetime - This routine returns life time of the captured
+/// block variable for the purpose of block layout meta-data generation. FQT is
+/// the type of the variable captured in the block.
+Qualifiers::ObjCLifetime CGObjCCommonMac::getBlockCaptureLifetime(QualType FQT,
+ bool ByrefLayout) {
+ // If it has an ownership qualifier, we're done.
+ if (auto lifetime = FQT.getObjCLifetime())
+ return lifetime;
+
+ // If it doesn't, and this is ARC, it has no ownership.
+ if (CGM.getLangOpts().ObjCAutoRefCount)
+ return Qualifiers::OCL_None;
+
+ // In MRC, retainable pointers are owned by non-__block variables.
+ if (FQT->isObjCObjectPointerType() || FQT->isBlockPointerType())
+ return ByrefLayout ? Qualifiers::OCL_ExplicitNone : Qualifiers::OCL_Strong;
+
+ return Qualifiers::OCL_None;
+}
+
+void CGObjCCommonMac::UpdateRunSkipBlockVars(bool IsByref,
+ Qualifiers::ObjCLifetime LifeTime,
+ CharUnits FieldOffset,
+ CharUnits FieldSize) {
+ // __block variables are passed by their descriptor address.
+ if (IsByref)
+ RunSkipBlockVars.push_back(RUN_SKIP(BLOCK_LAYOUT_BYREF, FieldOffset,
+ FieldSize));
+ else if (LifeTime == Qualifiers::OCL_Strong)
+ RunSkipBlockVars.push_back(RUN_SKIP(BLOCK_LAYOUT_STRONG, FieldOffset,
+ FieldSize));
+ else if (LifeTime == Qualifiers::OCL_Weak)
+ RunSkipBlockVars.push_back(RUN_SKIP(BLOCK_LAYOUT_WEAK, FieldOffset,
+ FieldSize));
+ else if (LifeTime == Qualifiers::OCL_ExplicitNone)
+ RunSkipBlockVars.push_back(RUN_SKIP(BLOCK_LAYOUT_UNRETAINED, FieldOffset,
+ FieldSize));
+ else
+ RunSkipBlockVars.push_back(RUN_SKIP(BLOCK_LAYOUT_NON_OBJECT_BYTES,
+ FieldOffset,
+ FieldSize));
+}
+
+void CGObjCCommonMac::BuildRCRecordLayout(const llvm::StructLayout *RecLayout,
+ const RecordDecl *RD,
+ ArrayRef<const FieldDecl*> RecFields,
+ CharUnits BytePos, bool &HasUnion,
+ bool ByrefLayout) {
+ bool IsUnion = (RD && RD->isUnion());
+ CharUnits MaxUnionSize = CharUnits::Zero();
+ const FieldDecl *MaxField = nullptr;
+ const FieldDecl *LastFieldBitfieldOrUnnamed = nullptr;
+ CharUnits MaxFieldOffset = CharUnits::Zero();
+ CharUnits LastBitfieldOrUnnamedOffset = CharUnits::Zero();
+
+ if (RecFields.empty())
+ return;
+ unsigned ByteSizeInBits = CGM.getTarget().getCharWidth();
+
+ for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
+ const FieldDecl *Field = RecFields[i];
+ // Note that 'i' here is actually the field index inside RD of Field,
+ // although this dependency is hidden.
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+ CharUnits FieldOffset =
+ CGM.getContext().toCharUnitsFromBits(RL.getFieldOffset(i));
+
+ // Skip over unnamed or bitfields
+ if (!Field->getIdentifier() || Field->isBitField()) {
+ LastFieldBitfieldOrUnnamed = Field;
+ LastBitfieldOrUnnamedOffset = FieldOffset;
+ continue;
+ }
+
+ LastFieldBitfieldOrUnnamed = nullptr;
+ QualType FQT = Field->getType();
+ if (FQT->isRecordType() || FQT->isUnionType()) {
+ if (FQT->isUnionType())
+ HasUnion = true;
+
+ BuildRCBlockVarRecordLayout(FQT->getAs<RecordType>(),
+ BytePos + FieldOffset, HasUnion);
+ continue;
+ }
+
+ if (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) {
+ const ConstantArrayType *CArray =
+ dyn_cast_or_null<ConstantArrayType>(Array);
+ uint64_t ElCount = CArray->getSize().getZExtValue();
+ assert(CArray && "only array with known element size is supported");
+ FQT = CArray->getElementType();
+ while (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) {
+ const ConstantArrayType *CArray =
+ dyn_cast_or_null<ConstantArrayType>(Array);
+ ElCount *= CArray->getSize().getZExtValue();
+ FQT = CArray->getElementType();
+ }
+ if (FQT->isRecordType() && ElCount) {
+ int OldIndex = RunSkipBlockVars.size() - 1;
+ const RecordType *RT = FQT->getAs<RecordType>();
+ BuildRCBlockVarRecordLayout(RT, BytePos + FieldOffset,
+ HasUnion);
+
+ // Replicate layout information for each array element. Note that
+ // one element is already done.
+ uint64_t ElIx = 1;
+ for (int FirstIndex = RunSkipBlockVars.size() - 1 ;ElIx < ElCount; ElIx++) {
+ CharUnits Size = CGM.getContext().getTypeSizeInChars(RT);
+ for (int i = OldIndex+1; i <= FirstIndex; ++i)
+ RunSkipBlockVars.push_back(
+ RUN_SKIP(RunSkipBlockVars[i].opcode,
+ RunSkipBlockVars[i].block_var_bytepos + Size*ElIx,
+ RunSkipBlockVars[i].block_var_size));
+ }
+ continue;
+ }
+ }
+ CharUnits FieldSize = CGM.getContext().getTypeSizeInChars(Field->getType());
+ if (IsUnion) {
+ CharUnits UnionIvarSize = FieldSize;
+ if (UnionIvarSize > MaxUnionSize) {
+ MaxUnionSize = UnionIvarSize;
+ MaxField = Field;
+ MaxFieldOffset = FieldOffset;
+ }
+ } else {
+ UpdateRunSkipBlockVars(false,
+ getBlockCaptureLifetime(FQT, ByrefLayout),
+ BytePos + FieldOffset,
+ FieldSize);
+ }
+ }
+
+ if (LastFieldBitfieldOrUnnamed) {
+ if (LastFieldBitfieldOrUnnamed->isBitField()) {
+ // Last field was a bitfield. Must update the info.
+ uint64_t BitFieldSize
+ = LastFieldBitfieldOrUnnamed->getBitWidthValue(CGM.getContext());
+ unsigned UnsSize = (BitFieldSize / ByteSizeInBits) +
+ ((BitFieldSize % ByteSizeInBits) != 0);
+ CharUnits Size = CharUnits::fromQuantity(UnsSize);
+ Size += LastBitfieldOrUnnamedOffset;
+ UpdateRunSkipBlockVars(false,
+ getBlockCaptureLifetime(LastFieldBitfieldOrUnnamed->getType(),
+ ByrefLayout),
+ BytePos + LastBitfieldOrUnnamedOffset,
+ Size);
+ } else {
+ assert(!LastFieldBitfieldOrUnnamed->getIdentifier() &&"Expected unnamed");
+ // Last field was unnamed. Must update skip info.
+ CharUnits FieldSize
+ = CGM.getContext().getTypeSizeInChars(LastFieldBitfieldOrUnnamed->getType());
+ UpdateRunSkipBlockVars(false,
+ getBlockCaptureLifetime(LastFieldBitfieldOrUnnamed->getType(),
+ ByrefLayout),
+ BytePos + LastBitfieldOrUnnamedOffset,
+ FieldSize);
+ }
+ }
+
+ if (MaxField)
+ UpdateRunSkipBlockVars(false,
+ getBlockCaptureLifetime(MaxField->getType(), ByrefLayout),
+ BytePos + MaxFieldOffset,
+ MaxUnionSize);
+}
+
+void CGObjCCommonMac::BuildRCBlockVarRecordLayout(const RecordType *RT,
+ CharUnits BytePos,
+ bool &HasUnion,
+ bool ByrefLayout) {
+ const RecordDecl *RD = RT->getDecl();
+ SmallVector<const FieldDecl*, 16> Fields(RD->fields());
+ llvm::Type *Ty = CGM.getTypes().ConvertType(QualType(RT, 0));
+ const llvm::StructLayout *RecLayout =
+ CGM.getDataLayout().getStructLayout(cast<llvm::StructType>(Ty));
+
+ BuildRCRecordLayout(RecLayout, RD, Fields, BytePos, HasUnion, ByrefLayout);
+}
+
+/// InlineLayoutInstruction - This routine produce an inline instruction for the
+/// block variable layout if it can. If not, it returns 0. Rules are as follow:
+/// If ((uintptr_t) layout) < (1 << 12), the layout is inline. In the 64bit world,
+/// an inline layout of value 0x0000000000000xyz is interpreted as follows:
+/// x captured object pointers of BLOCK_LAYOUT_STRONG. Followed by
+/// y captured object of BLOCK_LAYOUT_BYREF. Followed by
+/// z captured object of BLOCK_LAYOUT_WEAK. If any of the above is missing, zero
+/// replaces it. For example, 0x00000x00 means x BLOCK_LAYOUT_STRONG and no
+/// BLOCK_LAYOUT_BYREF and no BLOCK_LAYOUT_WEAK objects are captured.
+uint64_t CGObjCCommonMac::InlineLayoutInstruction(
+ SmallVectorImpl<unsigned char> &Layout) {
+ uint64_t Result = 0;
+ if (Layout.size() <= 3) {
+ unsigned size = Layout.size();
+ unsigned strong_word_count = 0, byref_word_count=0, weak_word_count=0;
+ unsigned char inst;
+ enum BLOCK_LAYOUT_OPCODE opcode ;
+ switch (size) {
+ case 3:
+ inst = Layout[0];
+ opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
+ if (opcode == BLOCK_LAYOUT_STRONG)
+ strong_word_count = (inst & 0xF)+1;
+ else
+ return 0;
+ inst = Layout[1];
+ opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
+ if (opcode == BLOCK_LAYOUT_BYREF)
+ byref_word_count = (inst & 0xF)+1;
+ else
+ return 0;
+ inst = Layout[2];
+ opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
+ if (opcode == BLOCK_LAYOUT_WEAK)
+ weak_word_count = (inst & 0xF)+1;
+ else
+ return 0;
+ break;
+
+ case 2:
+ inst = Layout[0];
+ opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
+ if (opcode == BLOCK_LAYOUT_STRONG) {
+ strong_word_count = (inst & 0xF)+1;
+ inst = Layout[1];
+ opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
+ if (opcode == BLOCK_LAYOUT_BYREF)
+ byref_word_count = (inst & 0xF)+1;
+ else if (opcode == BLOCK_LAYOUT_WEAK)
+ weak_word_count = (inst & 0xF)+1;
+ else
+ return 0;
+ }
+ else if (opcode == BLOCK_LAYOUT_BYREF) {
+ byref_word_count = (inst & 0xF)+1;
+ inst = Layout[1];
+ opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
+ if (opcode == BLOCK_LAYOUT_WEAK)
+ weak_word_count = (inst & 0xF)+1;
+ else
+ return 0;
+ }
+ else
+ return 0;
+ break;
+
+ case 1:
+ inst = Layout[0];
+ opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
+ if (opcode == BLOCK_LAYOUT_STRONG)
+ strong_word_count = (inst & 0xF)+1;
+ else if (opcode == BLOCK_LAYOUT_BYREF)
+ byref_word_count = (inst & 0xF)+1;
+ else if (opcode == BLOCK_LAYOUT_WEAK)
+ weak_word_count = (inst & 0xF)+1;
+ else
+ return 0;
+ break;
+
+ default:
+ return 0;
+ }
+
+ // Cannot inline when any of the word counts is 15. Because this is one less
+ // than the actual work count (so 15 means 16 actual word counts),
+ // and we can only display 0 thru 15 word counts.
+ if (strong_word_count == 16 || byref_word_count == 16 || weak_word_count == 16)
+ return 0;
+
+ unsigned count =
+ (strong_word_count != 0) + (byref_word_count != 0) + (weak_word_count != 0);
+
+ if (size == count) {
+ if (strong_word_count)
+ Result = strong_word_count;
+ Result <<= 4;
+ if (byref_word_count)
+ Result += byref_word_count;
+ Result <<= 4;
+ if (weak_word_count)
+ Result += weak_word_count;
+ }
+ }
+ return Result;
+}
+
+llvm::Constant *CGObjCCommonMac::getBitmapBlockLayout(bool ComputeByrefLayout) {
+ llvm::Constant *nullPtr = llvm::Constant::getNullValue(CGM.Int8PtrTy);
+ if (RunSkipBlockVars.empty())
+ return nullPtr;
+ unsigned WordSizeInBits = CGM.getTarget().getPointerWidth(0);
+ unsigned ByteSizeInBits = CGM.getTarget().getCharWidth();
+ unsigned WordSizeInBytes = WordSizeInBits/ByteSizeInBits;
+
+ // Sort on byte position; captures might not be allocated in order,
+ // and unions can do funny things.
+ llvm::array_pod_sort(RunSkipBlockVars.begin(), RunSkipBlockVars.end());
+ SmallVector<unsigned char, 16> Layout;
+
+ unsigned size = RunSkipBlockVars.size();
+ for (unsigned i = 0; i < size; i++) {
+ enum BLOCK_LAYOUT_OPCODE opcode = RunSkipBlockVars[i].opcode;
+ CharUnits start_byte_pos = RunSkipBlockVars[i].block_var_bytepos;
+ CharUnits end_byte_pos = start_byte_pos;
+ unsigned j = i+1;
+ while (j < size) {
+ if (opcode == RunSkipBlockVars[j].opcode) {
+ end_byte_pos = RunSkipBlockVars[j++].block_var_bytepos;
+ i++;
+ }
+ else
+ break;
+ }
+ CharUnits size_in_bytes =
+ end_byte_pos - start_byte_pos + RunSkipBlockVars[j-1].block_var_size;
+ if (j < size) {
+ CharUnits gap =
+ RunSkipBlockVars[j].block_var_bytepos -
+ RunSkipBlockVars[j-1].block_var_bytepos - RunSkipBlockVars[j-1].block_var_size;
+ size_in_bytes += gap;
+ }
+ CharUnits residue_in_bytes = CharUnits::Zero();
+ if (opcode == BLOCK_LAYOUT_NON_OBJECT_BYTES) {
+ residue_in_bytes = size_in_bytes % WordSizeInBytes;
+ size_in_bytes -= residue_in_bytes;
+ opcode = BLOCK_LAYOUT_NON_OBJECT_WORDS;
+ }
+
+ unsigned size_in_words = size_in_bytes.getQuantity() / WordSizeInBytes;
+ while (size_in_words >= 16) {
+ // Note that value in imm. is one less that the actual
+ // value. So, 0xf means 16 words follow!
+ unsigned char inst = (opcode << 4) | 0xf;
+ Layout.push_back(inst);
+ size_in_words -= 16;
+ }
+ if (size_in_words > 0) {
+ // Note that value in imm. is one less that the actual
+ // value. So, we subtract 1 away!
+ unsigned char inst = (opcode << 4) | (size_in_words-1);
+ Layout.push_back(inst);
+ }
+ if (residue_in_bytes > CharUnits::Zero()) {
+ unsigned char inst =
+ (BLOCK_LAYOUT_NON_OBJECT_BYTES << 4) | (residue_in_bytes.getQuantity()-1);
+ Layout.push_back(inst);
+ }
+ }
+
+ while (!Layout.empty()) {
+ unsigned char inst = Layout.back();
+ enum BLOCK_LAYOUT_OPCODE opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
+ if (opcode == BLOCK_LAYOUT_NON_OBJECT_BYTES || opcode == BLOCK_LAYOUT_NON_OBJECT_WORDS)
+ Layout.pop_back();
+ else
+ break;
+ }
+
+ uint64_t Result = InlineLayoutInstruction(Layout);
+ if (Result != 0) {
+ // Block variable layout instruction has been inlined.
+ if (CGM.getLangOpts().ObjCGCBitmapPrint) {
+ if (ComputeByrefLayout)
+ printf("\n Inline BYREF variable layout: ");
+ else
+ printf("\n Inline block variable layout: ");
+ printf("0x0%" PRIx64 "", Result);
+ if (auto numStrong = (Result & 0xF00) >> 8)
+ printf(", BL_STRONG:%d", (int) numStrong);
+ if (auto numByref = (Result & 0x0F0) >> 4)
+ printf(", BL_BYREF:%d", (int) numByref);
+ if (auto numWeak = (Result & 0x00F) >> 0)
+ printf(", BL_WEAK:%d", (int) numWeak);
+ printf(", BL_OPERATOR:0\n");
+ }
+ return llvm::ConstantInt::get(CGM.IntPtrTy, Result);
+ }
+
+ unsigned char inst = (BLOCK_LAYOUT_OPERATOR << 4) | 0;
+ Layout.push_back(inst);
+ std::string BitMap;
+ for (unsigned i = 0, e = Layout.size(); i != e; i++)
+ BitMap += Layout[i];
+
+ if (CGM.getLangOpts().ObjCGCBitmapPrint) {
+ if (ComputeByrefLayout)
+ printf("\n Byref variable layout: ");
+ else
+ printf("\n Block variable layout: ");
+ for (unsigned i = 0, e = BitMap.size(); i != e; i++) {
+ unsigned char inst = BitMap[i];
+ enum BLOCK_LAYOUT_OPCODE opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
+ unsigned delta = 1;
+ switch (opcode) {
+ case BLOCK_LAYOUT_OPERATOR:
+ printf("BL_OPERATOR:");
+ delta = 0;
+ break;
+ case BLOCK_LAYOUT_NON_OBJECT_BYTES:
+ printf("BL_NON_OBJECT_BYTES:");
+ break;
+ case BLOCK_LAYOUT_NON_OBJECT_WORDS:
+ printf("BL_NON_OBJECT_WORD:");
+ break;
+ case BLOCK_LAYOUT_STRONG:
+ printf("BL_STRONG:");
+ break;
+ case BLOCK_LAYOUT_BYREF:
+ printf("BL_BYREF:");
+ break;
+ case BLOCK_LAYOUT_WEAK:
+ printf("BL_WEAK:");
+ break;
+ case BLOCK_LAYOUT_UNRETAINED:
+ printf("BL_UNRETAINED:");
+ break;
+ }
+ // Actual value of word count is one more that what is in the imm.
+ // field of the instruction
+ printf("%d", (inst & 0xf) + delta);
+ if (i < e-1)
+ printf(", ");
+ else
+ printf("\n");
+ }
+ }
+
+ llvm::GlobalVariable *Entry = CreateMetadataVar(
+ "OBJC_CLASS_NAME_",
+ llvm::ConstantDataArray::getString(VMContext, BitMap, false),
+ "__TEXT,__objc_classname,cstring_literals", CharUnits::One(), true);
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+llvm::Constant *CGObjCCommonMac::BuildRCBlockLayout(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+ assert(CGM.getLangOpts().getGC() == LangOptions::NonGC);
+
+ RunSkipBlockVars.clear();
+ bool hasUnion = false;
+
+ unsigned WordSizeInBits = CGM.getTarget().getPointerWidth(0);
+ unsigned ByteSizeInBits = CGM.getTarget().getCharWidth();
+ unsigned WordSizeInBytes = WordSizeInBits/ByteSizeInBits;
+
+ const BlockDecl *blockDecl = blockInfo.getBlockDecl();
+
+ // Calculate the basic layout of the block structure.
+ const llvm::StructLayout *layout =
+ CGM.getDataLayout().getStructLayout(blockInfo.StructureType);
+
+ // Ignore the optional 'this' capture: C++ objects are not assumed
+ // to be GC'ed.
+ if (blockInfo.BlockHeaderForcedGapSize != CharUnits::Zero())
+ UpdateRunSkipBlockVars(false, Qualifiers::OCL_None,
+ blockInfo.BlockHeaderForcedGapOffset,
+ blockInfo.BlockHeaderForcedGapSize);
+ // Walk the captured variables.
+ for (const auto &CI : blockDecl->captures()) {
+ const VarDecl *variable = CI.getVariable();
+ QualType type = variable->getType();
+
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+
+ // Ignore constant captures.
+ if (capture.isConstant()) continue;
+
+ CharUnits fieldOffset =
+ CharUnits::fromQuantity(layout->getElementOffset(capture.getIndex()));
+
+ assert(!type->isArrayType() && "array variable should not be caught");
+ if (!CI.isByRef())
+ if (const RecordType *record = type->getAs<RecordType>()) {
+ BuildRCBlockVarRecordLayout(record, fieldOffset, hasUnion);
+ continue;
+ }
+ CharUnits fieldSize;
+ if (CI.isByRef())
+ fieldSize = CharUnits::fromQuantity(WordSizeInBytes);
+ else
+ fieldSize = CGM.getContext().getTypeSizeInChars(type);
+ UpdateRunSkipBlockVars(CI.isByRef(), getBlockCaptureLifetime(type, false),
+ fieldOffset, fieldSize);
+ }
+ return getBitmapBlockLayout(false);
+}
+
+
+llvm::Constant *CGObjCCommonMac::BuildByrefLayout(CodeGen::CodeGenModule &CGM,
+ QualType T) {
+ assert(CGM.getLangOpts().getGC() == LangOptions::NonGC);
+ assert(!T->isArrayType() && "__block array variable should not be caught");
+ CharUnits fieldOffset;
+ RunSkipBlockVars.clear();
+ bool hasUnion = false;
+ if (const RecordType *record = T->getAs<RecordType>()) {
+ BuildRCBlockVarRecordLayout(record, fieldOffset, hasUnion, true /*ByrefLayout */);
+ llvm::Constant *Result = getBitmapBlockLayout(true);
+ if (isa<llvm::ConstantInt>(Result))
+ Result = llvm::ConstantExpr::getIntToPtr(Result, CGM.Int8PtrTy);
+ return Result;
+ }
+ llvm::Constant *nullPtr = llvm::Constant::getNullValue(CGM.Int8PtrTy);
+ return nullPtr;
+}
+
+llvm::Value *CGObjCMac::GenerateProtocolRef(CodeGenFunction &CGF,
+ const ObjCProtocolDecl *PD) {
+ // FIXME: I don't understand why gcc generates this, or where it is
+ // resolved. Investigate. Its also wasteful to look this up over and over.
+ LazySymbols.insert(&CGM.getContext().Idents.get("Protocol"));
+
+ return llvm::ConstantExpr::getBitCast(GetProtocolRef(PD),
+ ObjCTypes.getExternalProtocolPtrTy());
+}
+
+void CGObjCCommonMac::GenerateProtocol(const ObjCProtocolDecl *PD) {
+ // FIXME: We shouldn't need this, the protocol decl should contain enough
+ // information to tell us whether this was a declaration or a definition.
+ DefinedProtocols.insert(PD->getIdentifier());
+
+ // If we have generated a forward reference to this protocol, emit
+ // it now. Otherwise do nothing, the protocol objects are lazily
+ // emitted.
+ if (Protocols.count(PD->getIdentifier()))
+ GetOrEmitProtocol(PD);
+}
+
+llvm::Constant *CGObjCCommonMac::GetProtocolRef(const ObjCProtocolDecl *PD) {
+ if (DefinedProtocols.count(PD->getIdentifier()))
+ return GetOrEmitProtocol(PD);
+
+ return GetOrEmitProtocolRef(PD);
+}
+
+/*
+// Objective-C 1.0 extensions
+struct _objc_protocol {
+struct _objc_protocol_extension *isa;
+char *protocol_name;
+struct _objc_protocol_list *protocol_list;
+struct _objc__method_prototype_list *instance_methods;
+struct _objc__method_prototype_list *class_methods
+};
+
+See EmitProtocolExtension().
+*/
+llvm::Constant *CGObjCMac::GetOrEmitProtocol(const ObjCProtocolDecl *PD) {
+ llvm::GlobalVariable *Entry = Protocols[PD->getIdentifier()];
+
+ // Early exit if a defining object has already been generated.
+ if (Entry && Entry->hasInitializer())
+ return Entry;
+
+ // Use the protocol definition, if there is one.
+ if (const ObjCProtocolDecl *Def = PD->getDefinition())
+ PD = Def;
+
+ // FIXME: I don't understand why gcc generates this, or where it is
+ // resolved. Investigate. Its also wasteful to look this up over and over.
+ LazySymbols.insert(&CGM.getContext().Idents.get("Protocol"));
+
+ // Construct method lists.
+ std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+ std::vector<llvm::Constant*> OptInstanceMethods, OptClassMethods;
+ std::vector<llvm::Constant*> MethodTypesExt, OptMethodTypesExt;
+ for (const auto *MD : PD->instance_methods()) {
+ llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (!C)
+ return GetOrEmitProtocolRef(PD);
+
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptInstanceMethods.push_back(C);
+ OptMethodTypesExt.push_back(GetMethodVarType(MD, true));
+ } else {
+ InstanceMethods.push_back(C);
+ MethodTypesExt.push_back(GetMethodVarType(MD, true));
+ }
+ }
+
+ for (const auto *MD : PD->class_methods()) {
+ llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (!C)
+ return GetOrEmitProtocolRef(PD);
+
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptClassMethods.push_back(C);
+ OptMethodTypesExt.push_back(GetMethodVarType(MD, true));
+ } else {
+ ClassMethods.push_back(C);
+ MethodTypesExt.push_back(GetMethodVarType(MD, true));
+ }
+ }
+
+ MethodTypesExt.insert(MethodTypesExt.end(),
+ OptMethodTypesExt.begin(), OptMethodTypesExt.end());
+
+ llvm::Constant *Values[] = {
+ EmitProtocolExtension(PD, OptInstanceMethods, OptClassMethods,
+ MethodTypesExt),
+ GetClassName(PD->getObjCRuntimeNameAsString()),
+ EmitProtocolList("OBJC_PROTOCOL_REFS_" + PD->getName(),
+ PD->protocol_begin(), PD->protocol_end()),
+ EmitMethodDescList("OBJC_PROTOCOL_INSTANCE_METHODS_" + PD->getName(),
+ "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+ InstanceMethods),
+ EmitMethodDescList("OBJC_PROTOCOL_CLASS_METHODS_" + PD->getName(),
+ "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ ClassMethods)};
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolTy,
+ Values);
+
+ if (Entry) {
+ // Already created, update the initializer.
+ assert(Entry->hasPrivateLinkage());
+ Entry->setInitializer(Init);
+ } else {
+ Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolTy,
+ false, llvm::GlobalValue::PrivateLinkage,
+ Init, "OBJC_PROTOCOL_" + PD->getName());
+ Entry->setSection("__OBJC,__protocol,regular,no_dead_strip");
+ // FIXME: Is this necessary? Why only for protocol?
+ Entry->setAlignment(4);
+
+ Protocols[PD->getIdentifier()] = Entry;
+ }
+ CGM.addCompilerUsedGlobal(Entry);
+
+ return Entry;
+}
+
+llvm::Constant *CGObjCMac::GetOrEmitProtocolRef(const ObjCProtocolDecl *PD) {
+ llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+
+ if (!Entry) {
+ // We use the initializer as a marker of whether this is a forward
+ // reference or not. At module finalization we add the empty
+ // contents for protocols which were referenced but never defined.
+ Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolTy,
+ false, llvm::GlobalValue::PrivateLinkage,
+ nullptr, "OBJC_PROTOCOL_" + PD->getName());
+ Entry->setSection("__OBJC,__protocol,regular,no_dead_strip");
+ // FIXME: Is this necessary? Why only for protocol?
+ Entry->setAlignment(4);
+ }
+
+ return Entry;
+}
+
+/*
+ struct _objc_protocol_extension {
+ uint32_t size;
+ struct objc_method_description_list *optional_instance_methods;
+ struct objc_method_description_list *optional_class_methods;
+ struct objc_property_list *instance_properties;
+ const char ** extendedMethodTypes;
+ };
+*/
+llvm::Constant *
+CGObjCMac::EmitProtocolExtension(const ObjCProtocolDecl *PD,
+ ArrayRef<llvm::Constant*> OptInstanceMethods,
+ ArrayRef<llvm::Constant*> OptClassMethods,
+ ArrayRef<llvm::Constant*> MethodTypesExt) {
+ uint64_t Size =
+ CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ProtocolExtensionTy);
+ llvm::Constant *Values[] = {
+ llvm::ConstantInt::get(ObjCTypes.IntTy, Size),
+ EmitMethodDescList("OBJC_PROTOCOL_INSTANCE_METHODS_OPT_" + PD->getName(),
+ "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+ OptInstanceMethods),
+ EmitMethodDescList("OBJC_PROTOCOL_CLASS_METHODS_OPT_" + PD->getName(),
+ "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ OptClassMethods),
+ EmitPropertyList("OBJC_$_PROP_PROTO_LIST_" + PD->getName(), nullptr, PD,
+ ObjCTypes),
+ EmitProtocolMethodTypes("OBJC_PROTOCOL_METHOD_TYPES_" + PD->getName(),
+ MethodTypesExt, ObjCTypes)};
+
+ // Return null if no extension bits are used.
+ if (Values[1]->isNullValue() && Values[2]->isNullValue() &&
+ Values[3]->isNullValue() && Values[4]->isNullValue())
+ return llvm::Constant::getNullValue(ObjCTypes.ProtocolExtensionPtrTy);
+
+ llvm::Constant *Init =
+ llvm::ConstantStruct::get(ObjCTypes.ProtocolExtensionTy, Values);
+
+ // No special section, but goes in llvm.used
+ return CreateMetadataVar("\01l_OBJC_PROTOCOLEXT_" + PD->getName(), Init,
+ StringRef(), CGM.getPointerAlign(), true);
+}
+
+/*
+ struct objc_protocol_list {
+ struct objc_protocol_list *next;
+ long count;
+ Protocol *list[];
+ };
+*/
+llvm::Constant *
+CGObjCMac::EmitProtocolList(Twine Name,
+ ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end) {
+ SmallVector<llvm::Constant *, 16> ProtocolRefs;
+
+ for (; begin != end; ++begin)
+ ProtocolRefs.push_back(GetProtocolRef(*begin));
+
+ // Just return null for empty protocol lists
+ if (ProtocolRefs.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+
+ // This list is null terminated.
+ ProtocolRefs.push_back(llvm::Constant::getNullValue(ObjCTypes.ProtocolPtrTy));
+
+ llvm::Constant *Values[3];
+ // This field is only used by the runtime.
+ Values[0] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.LongTy,
+ ProtocolRefs.size() - 1);
+ Values[2] =
+ llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.ProtocolPtrTy,
+ ProtocolRefs.size()),
+ ProtocolRefs);
+
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar(Name, Init, "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ CGM.getPointerAlign(), false);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.ProtocolListPtrTy);
+}
+
+void CGObjCCommonMac::
+PushProtocolProperties(llvm::SmallPtrSet<const IdentifierInfo*,16> &PropertySet,
+ SmallVectorImpl<llvm::Constant *> &Properties,
+ const Decl *Container,
+ const ObjCProtocolDecl *Proto,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ for (const auto *P : Proto->protocols())
+ PushProtocolProperties(PropertySet, Properties, Container, P, ObjCTypes);
+ for (const auto *PD : Proto->properties()) {
+ if (!PropertySet.insert(PD->getIdentifier()).second)
+ continue;
+ llvm::Constant *Prop[] = {
+ GetPropertyName(PD->getIdentifier()),
+ GetPropertyTypeString(PD, Container)
+ };
+ Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy, Prop));
+ }
+}
+
+/*
+ struct _objc_property {
+ const char * const name;
+ const char * const attributes;
+ };
+
+ struct _objc_property_list {
+ uint32_t entsize; // sizeof (struct _objc_property)
+ uint32_t prop_count;
+ struct _objc_property[prop_count];
+ };
+*/
+llvm::Constant *CGObjCCommonMac::EmitPropertyList(Twine Name,
+ const Decl *Container,
+ const ObjCContainerDecl *OCD,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ SmallVector<llvm::Constant *, 16> Properties;
+ llvm::SmallPtrSet<const IdentifierInfo*, 16> PropertySet;
+
+ auto AddProperty = [&](const ObjCPropertyDecl *PD) {
+ llvm::Constant *Prop[] = {GetPropertyName(PD->getIdentifier()),
+ GetPropertyTypeString(PD, Container)};
+ Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy, Prop));
+ };
+ if (const ObjCInterfaceDecl *OID = dyn_cast<ObjCInterfaceDecl>(OCD))
+ for (const ObjCCategoryDecl *ClassExt : OID->known_extensions())
+ for (auto *PD : ClassExt->properties()) {
+ PropertySet.insert(PD->getIdentifier());
+ AddProperty(PD);
+ }
+ for (const auto *PD : OCD->properties()) {
+ // Don't emit duplicate metadata for properties that were already in a
+ // class extension.
+ if (!PropertySet.insert(PD->getIdentifier()).second)
+ continue;
+ AddProperty(PD);
+ }
+
+ if (const ObjCInterfaceDecl *OID = dyn_cast<ObjCInterfaceDecl>(OCD)) {
+ for (const auto *P : OID->all_referenced_protocols())
+ PushProtocolProperties(PropertySet, Properties, Container, P, ObjCTypes);
+ }
+ else if (const ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(OCD)) {
+ for (const auto *P : CD->protocols())
+ PushProtocolProperties(PropertySet, Properties, Container, P, ObjCTypes);
+ }
+
+ // Return null for empty list.
+ if (Properties.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+
+ unsigned PropertySize =
+ CGM.getDataLayout().getTypeAllocSize(ObjCTypes.PropertyTy);
+ llvm::Constant *Values[3];
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, PropertySize);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Properties.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.PropertyTy,
+ Properties.size());
+ Values[2] = llvm::ConstantArray::get(AT, Properties);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar(Name, Init,
+ (ObjCABI == 2) ? "__DATA, __objc_const" :
+ "__OBJC,__property,regular,no_dead_strip",
+ CGM.getPointerAlign(),
+ true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.PropertyListPtrTy);
+}
+
+llvm::Constant *
+CGObjCCommonMac::EmitProtocolMethodTypes(Twine Name,
+ ArrayRef<llvm::Constant*> MethodTypes,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ // Return null for empty list.
+ if (MethodTypes.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.Int8PtrPtrTy);
+
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.Int8PtrTy,
+ MethodTypes.size());
+ llvm::Constant *Init = llvm::ConstantArray::get(AT, MethodTypes);
+
+ llvm::GlobalVariable *GV = CreateMetadataVar(
+ Name, Init, (ObjCABI == 2) ? "__DATA, __objc_const" : StringRef(),
+ CGM.getPointerAlign(), true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.Int8PtrPtrTy);
+}
+
+/*
+ struct objc_method_description_list {
+ int count;
+ struct objc_method_description list[];
+ };
+*/
+llvm::Constant *
+CGObjCMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
+ llvm::Constant *Desc[] = {
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy),
+ GetMethodVarType(MD)
+ };
+ if (!Desc[1])
+ return nullptr;
+
+ return llvm::ConstantStruct::get(ObjCTypes.MethodDescriptionTy,
+ Desc);
+}
+
+llvm::Constant *
+CGObjCMac::EmitMethodDescList(Twine Name, const char *Section,
+ ArrayRef<llvm::Constant*> Methods) {
+ // Return null for empty list.
+ if (Methods.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy);
+
+ llvm::Constant *Values[2];
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodDescriptionTy,
+ Methods.size());
+ Values[1] = llvm::ConstantArray::get(AT, Methods);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar(Name, Init, Section, CGM.getPointerAlign(), true);
+ return llvm::ConstantExpr::getBitCast(GV,
+ ObjCTypes.MethodDescriptionListPtrTy);
+}
+
+/*
+ struct _objc_category {
+ char *category_name;
+ char *class_name;
+ struct _objc_method_list *instance_methods;
+ struct _objc_method_list *class_methods;
+ struct _objc_protocol_list *protocols;
+ uint32_t size; // <rdar://4585769>
+ struct _objc_property_list *instance_properties;
+ };
+*/
+void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.CategoryTy);
+
+ // FIXME: This is poor design, the OCD should have a pointer to the category
+ // decl. Additionally, note that Category can be null for the @implementation
+ // w/o an @interface case. Sema should just create one for us as it does for
+ // @implementation so everyone else can live life under a clear blue sky.
+ const ObjCInterfaceDecl *Interface = OCD->getClassInterface();
+ const ObjCCategoryDecl *Category =
+ Interface->FindCategoryDeclaration(OCD->getIdentifier());
+
+ SmallString<256> ExtName;
+ llvm::raw_svector_ostream(ExtName) << Interface->getName() << '_'
+ << OCD->getName();
+
+ SmallVector<llvm::Constant *, 16> InstanceMethods, ClassMethods;
+ for (const auto *I : OCD->instance_methods())
+ // Instance methods should always be defined.
+ InstanceMethods.push_back(GetMethodConstant(I));
+
+ for (const auto *I : OCD->class_methods())
+ // Class methods should always be defined.
+ ClassMethods.push_back(GetMethodConstant(I));
+
+ llvm::Constant *Values[7];
+ Values[0] = GetClassName(OCD->getName());
+ Values[1] = GetClassName(Interface->getObjCRuntimeNameAsString());
+ LazySymbols.insert(Interface->getIdentifier());
+ Values[2] = EmitMethodList("OBJC_CATEGORY_INSTANCE_METHODS_" + ExtName.str(),
+ "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+ InstanceMethods);
+ Values[3] = EmitMethodList("OBJC_CATEGORY_CLASS_METHODS_" + ExtName.str(),
+ "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ ClassMethods);
+ if (Category) {
+ Values[4] =
+ EmitProtocolList("OBJC_CATEGORY_PROTOCOLS_" + ExtName.str(),
+ Category->protocol_begin(), Category->protocol_end());
+ } else {
+ Values[4] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+ }
+ Values[5] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+
+ // If there is no category @interface then there can be no properties.
+ if (Category) {
+ Values[6] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ExtName.str(),
+ OCD, Category, ObjCTypes);
+ } else {
+ Values[6] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+ }
+
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.CategoryTy,
+ Values);
+
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar("OBJC_CATEGORY_" + ExtName.str(), Init,
+ "__OBJC,__category,regular,no_dead_strip",
+ CGM.getPointerAlign(), true);
+ DefinedCategories.push_back(GV);
+ DefinedCategoryNames.insert(ExtName.str());
+ // method definition entries must be clear for next implementation.
+ MethodDefinitions.clear();
+}
+
+enum FragileClassFlags {
+ /// Apparently: is not a meta-class.
+ FragileABI_Class_Factory = 0x00001,
+
+ /// Is a meta-class.
+ FragileABI_Class_Meta = 0x00002,
+
+ /// Has a non-trivial constructor or destructor.
+ FragileABI_Class_HasCXXStructors = 0x02000,
+
+ /// Has hidden visibility.
+ FragileABI_Class_Hidden = 0x20000,
+
+ /// Class implementation was compiled under ARC.
+ FragileABI_Class_CompiledByARC = 0x04000000,
+
+ /// Class implementation was compiled under MRC and has MRC weak ivars.
+ /// Exclusive with CompiledByARC.
+ FragileABI_Class_HasMRCWeakIvars = 0x08000000,
+};
+
+enum NonFragileClassFlags {
+ /// Is a meta-class.
+ NonFragileABI_Class_Meta = 0x00001,
+
+ /// Is a root class.
+ NonFragileABI_Class_Root = 0x00002,
+
+ /// Has a non-trivial constructor or destructor.
+ NonFragileABI_Class_HasCXXStructors = 0x00004,
+
+ /// Has hidden visibility.
+ NonFragileABI_Class_Hidden = 0x00010,
+
+ /// Has the exception attribute.
+ NonFragileABI_Class_Exception = 0x00020,
+
+ /// (Obsolete) ARC-specific: this class has a .release_ivars method
+ NonFragileABI_Class_HasIvarReleaser = 0x00040,
+
+ /// Class implementation was compiled under ARC.
+ NonFragileABI_Class_CompiledByARC = 0x00080,
+
+ /// Class has non-trivial destructors, but zero-initialization is okay.
+ NonFragileABI_Class_HasCXXDestructorOnly = 0x00100,
+
+ /// Class implementation was compiled under MRC and has MRC weak ivars.
+ /// Exclusive with CompiledByARC.
+ NonFragileABI_Class_HasMRCWeakIvars = 0x00200,
+};
+
+static bool hasWeakMember(QualType type) {
+ if (type.getObjCLifetime() == Qualifiers::OCL_Weak) {
+ return true;
+ }
+
+ if (auto recType = type->getAs<RecordType>()) {
+ for (auto field : recType->getDecl()->fields()) {
+ if (hasWeakMember(field->getType()))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/// For compatibility, we only want to set the "HasMRCWeakIvars" flag
+/// (and actually fill in a layout string) if we really do have any
+/// __weak ivars.
+static bool hasMRCWeakIvars(CodeGenModule &CGM,
+ const ObjCImplementationDecl *ID) {
+ if (!CGM.getLangOpts().ObjCWeak) return false;
+ assert(CGM.getLangOpts().getGC() == LangOptions::NonGC);
+
+ for (const ObjCIvarDecl *ivar =
+ ID->getClassInterface()->all_declared_ivar_begin();
+ ivar; ivar = ivar->getNextIvar()) {
+ if (hasWeakMember(ivar->getType()))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ struct _objc_class {
+ Class isa;
+ Class super_class;
+ const char *name;
+ long version;
+ long info;
+ long instance_size;
+ struct _objc_ivar_list *ivars;
+ struct _objc_method_list *methods;
+ struct _objc_cache *cache;
+ struct _objc_protocol_list *protocols;
+ // Objective-C 1.0 extensions (<rdr://4585769>)
+ const char *ivar_layout;
+ struct _objc_class_ext *ext;
+ };
+
+ See EmitClassExtension();
+*/
+void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
+ DefinedSymbols.insert(ID->getIdentifier());
+
+ std::string ClassName = ID->getNameAsString();
+ // FIXME: Gross
+ ObjCInterfaceDecl *Interface =
+ const_cast<ObjCInterfaceDecl*>(ID->getClassInterface());
+ llvm::Constant *Protocols =
+ EmitProtocolList("OBJC_CLASS_PROTOCOLS_" + ID->getName(),
+ Interface->all_referenced_protocol_begin(),
+ Interface->all_referenced_protocol_end());
+ unsigned Flags = FragileABI_Class_Factory;
+ if (ID->hasNonZeroConstructors() || ID->hasDestructors())
+ Flags |= FragileABI_Class_HasCXXStructors;
+
+ bool hasMRCWeak = false;
+
+ if (CGM.getLangOpts().ObjCAutoRefCount)
+ Flags |= FragileABI_Class_CompiledByARC;
+ else if ((hasMRCWeak = hasMRCWeakIvars(CGM, ID)))
+ Flags |= FragileABI_Class_HasMRCWeakIvars;
+
+ CharUnits Size =
+ CGM.getContext().getASTObjCImplementationLayout(ID).getSize();
+
+ // FIXME: Set CXX-structors flag.
+ if (ID->getClassInterface()->getVisibility() == HiddenVisibility)
+ Flags |= FragileABI_Class_Hidden;
+
+ SmallVector<llvm::Constant *, 16> InstanceMethods, ClassMethods;
+ for (const auto *I : ID->instance_methods())
+ // Instance methods should always be defined.
+ InstanceMethods.push_back(GetMethodConstant(I));
+
+ for (const auto *I : ID->class_methods())
+ // Class methods should always be defined.
+ ClassMethods.push_back(GetMethodConstant(I));
+
+ for (const auto *PID : ID->property_impls()) {
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) {
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+
+ if (ObjCMethodDecl *MD = PD->getGetterMethodDecl())
+ if (llvm::Constant *C = GetMethodConstant(MD))
+ InstanceMethods.push_back(C);
+ if (ObjCMethodDecl *MD = PD->getSetterMethodDecl())
+ if (llvm::Constant *C = GetMethodConstant(MD))
+ InstanceMethods.push_back(C);
+ }
+ }
+
+ llvm::Constant *Values[12];
+ Values[ 0] = EmitMetaClass(ID, Protocols, ClassMethods);
+ if (ObjCInterfaceDecl *Super = Interface->getSuperClass()) {
+ // Record a reference to the super class.
+ LazySymbols.insert(Super->getIdentifier());
+
+ Values[ 1] =
+ llvm::ConstantExpr::getBitCast(GetClassName(Super->getObjCRuntimeNameAsString()),
+ ObjCTypes.ClassPtrTy);
+ } else {
+ Values[ 1] = llvm::Constant::getNullValue(ObjCTypes.ClassPtrTy);
+ }
+ Values[ 2] = GetClassName(ID->getObjCRuntimeNameAsString());
+ // Version is always 0.
+ Values[ 3] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
+ Values[ 4] = llvm::ConstantInt::get(ObjCTypes.LongTy, Flags);
+ Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size.getQuantity());
+ Values[ 6] = EmitIvarList(ID, false);
+ Values[7] = EmitMethodList("OBJC_INSTANCE_METHODS_" + ID->getName(),
+ "__OBJC,__inst_meth,regular,no_dead_strip",
+ InstanceMethods);
+ // cache is always NULL.
+ Values[ 8] = llvm::Constant::getNullValue(ObjCTypes.CachePtrTy);
+ Values[ 9] = Protocols;
+ Values[10] = BuildStrongIvarLayout(ID, CharUnits::Zero(), Size);
+ Values[11] = EmitClassExtension(ID, Size, hasMRCWeak);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassTy,
+ Values);
+ std::string Name("OBJC_CLASS_");
+ Name += ClassName;
+ const char *Section = "__OBJC,__class,regular,no_dead_strip";
+ // Check for a forward reference.
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name, true);
+ if (GV) {
+ assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ "Forward metaclass reference has incorrect type.");
+ GV->setInitializer(Init);
+ GV->setSection(Section);
+ GV->setAlignment(CGM.getPointerAlign().getQuantity());
+ CGM.addCompilerUsedGlobal(GV);
+ } else
+ GV = CreateMetadataVar(Name, Init, Section, CGM.getPointerAlign(), true);
+ DefinedClasses.push_back(GV);
+ ImplementedClasses.push_back(Interface);
+ // method definition entries must be clear for next implementation.
+ MethodDefinitions.clear();
+}
+
+llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
+ llvm::Constant *Protocols,
+ ArrayRef<llvm::Constant*> Methods) {
+ unsigned Flags = FragileABI_Class_Meta;
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ClassTy);
+
+ if (ID->getClassInterface()->getVisibility() == HiddenVisibility)
+ Flags |= FragileABI_Class_Hidden;
+
+ llvm::Constant *Values[12];
+ // The isa for the metaclass is the root of the hierarchy.
+ const ObjCInterfaceDecl *Root = ID->getClassInterface();
+ while (const ObjCInterfaceDecl *Super = Root->getSuperClass())
+ Root = Super;
+ Values[ 0] =
+ llvm::ConstantExpr::getBitCast(GetClassName(Root->getObjCRuntimeNameAsString()),
+ ObjCTypes.ClassPtrTy);
+ // The super class for the metaclass is emitted as the name of the
+ // super class. The runtime fixes this up to point to the
+ // *metaclass* for the super class.
+ if (ObjCInterfaceDecl *Super = ID->getClassInterface()->getSuperClass()) {
+ Values[ 1] =
+ llvm::ConstantExpr::getBitCast(GetClassName(Super->getObjCRuntimeNameAsString()),
+ ObjCTypes.ClassPtrTy);
+ } else {
+ Values[ 1] = llvm::Constant::getNullValue(ObjCTypes.ClassPtrTy);
+ }
+ Values[ 2] = GetClassName(ID->getObjCRuntimeNameAsString());
+ // Version is always 0.
+ Values[ 3] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
+ Values[ 4] = llvm::ConstantInt::get(ObjCTypes.LongTy, Flags);
+ Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size);
+ Values[ 6] = EmitIvarList(ID, true);
+ Values[7] =
+ EmitMethodList("OBJC_CLASS_METHODS_" + ID->getNameAsString(),
+ "__OBJC,__cls_meth,regular,no_dead_strip", Methods);
+ // cache is always NULL.
+ Values[ 8] = llvm::Constant::getNullValue(ObjCTypes.CachePtrTy);
+ Values[ 9] = Protocols;
+ // ivar_layout for metaclass is always NULL.
+ Values[10] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+ // The class extension is always unused for metaclasses.
+ Values[11] = llvm::Constant::getNullValue(ObjCTypes.ClassExtensionPtrTy);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassTy,
+ Values);
+
+ std::string Name("OBJC_METACLASS_");
+ Name += ID->getName();
+
+ // Check for a forward reference.
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name, true);
+ if (GV) {
+ assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ "Forward metaclass reference has incorrect type.");
+ GV->setInitializer(Init);
+ } else {
+ GV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassTy, false,
+ llvm::GlobalValue::PrivateLinkage,
+ Init, Name);
+ }
+ GV->setSection("__OBJC,__meta_class,regular,no_dead_strip");
+ GV->setAlignment(4);
+ CGM.addCompilerUsedGlobal(GV);
+
+ return GV;
+}
+
+llvm::Constant *CGObjCMac::EmitMetaClassRef(const ObjCInterfaceDecl *ID) {
+ std::string Name = "OBJC_METACLASS_" + ID->getNameAsString();
+
+ // FIXME: Should we look these up somewhere other than the module. Its a bit
+ // silly since we only generate these while processing an implementation, so
+ // exactly one pointer would work if know when we entered/exitted an
+ // implementation block.
+
+ // Check for an existing forward reference.
+ // Previously, metaclass with internal linkage may have been defined.
+ // pass 'true' as 2nd argument so it is returned.
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name, true);
+ if (!GV)
+ GV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassTy, false,
+ llvm::GlobalValue::PrivateLinkage, nullptr,
+ Name);
+
+ assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ "Forward metaclass reference has incorrect type.");
+ return GV;
+}
+
+llvm::Value *CGObjCMac::EmitSuperClassRef(const ObjCInterfaceDecl *ID) {
+ std::string Name = "OBJC_CLASS_" + ID->getNameAsString();
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name, true);
+
+ if (!GV)
+ GV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassTy, false,
+ llvm::GlobalValue::PrivateLinkage, nullptr,
+ Name);
+
+ assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ "Forward class metadata reference has incorrect type.");
+ return GV;
+}
+
+/*
+ Emit a "class extension", which in this specific context means extra
+ data that doesn't fit in the normal fragile-ABI class structure, and
+ has nothing to do with the language concept of a class extension.
+
+ struct objc_class_ext {
+ uint32_t size;
+ const char *weak_ivar_layout;
+ struct _objc_property_list *properties;
+ };
+*/
+llvm::Constant *
+CGObjCMac::EmitClassExtension(const ObjCImplementationDecl *ID,
+ CharUnits InstanceSize, bool hasMRCWeakIvars) {
+ uint64_t Size =
+ CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ClassExtensionTy);
+
+ llvm::Constant *Values[3];
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Values[1] = BuildWeakIvarLayout(ID, CharUnits::Zero(), InstanceSize,
+ hasMRCWeakIvars);
+ Values[2] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ID->getName(),
+ ID, ID->getClassInterface(), ObjCTypes);
+
+ // Return null if no extension bits are used.
+ if (Values[1]->isNullValue() && Values[2]->isNullValue())
+ return llvm::Constant::getNullValue(ObjCTypes.ClassExtensionPtrTy);
+
+ llvm::Constant *Init =
+ llvm::ConstantStruct::get(ObjCTypes.ClassExtensionTy, Values);
+ return CreateMetadataVar("OBJC_CLASSEXT_" + ID->getName(), Init,
+ "__OBJC,__class_ext,regular,no_dead_strip",
+ CGM.getPointerAlign(), true);
+}
+
+/*
+ struct objc_ivar {
+ char *ivar_name;
+ char *ivar_type;
+ int ivar_offset;
+ };
+
+ struct objc_ivar_list {
+ int ivar_count;
+ struct objc_ivar list[count];
+ };
+*/
+llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
+ bool ForClass) {
+ std::vector<llvm::Constant*> Ivars;
+
+ // When emitting the root class GCC emits ivar entries for the
+ // actual class structure. It is not clear if we need to follow this
+ // behavior; for now lets try and get away with not doing it. If so,
+ // the cleanest solution would be to make up an ObjCInterfaceDecl
+ // for the class.
+ if (ForClass)
+ return llvm::Constant::getNullValue(ObjCTypes.IvarListPtrTy);
+
+ const ObjCInterfaceDecl *OID = ID->getClassInterface();
+
+ for (const ObjCIvarDecl *IVD = OID->all_declared_ivar_begin();
+ IVD; IVD = IVD->getNextIvar()) {
+ // Ignore unnamed bit-fields.
+ if (!IVD->getDeclName())
+ continue;
+ llvm::Constant *Ivar[] = {
+ GetMethodVarName(IVD->getIdentifier()),
+ GetMethodVarType(IVD),
+ llvm::ConstantInt::get(ObjCTypes.IntTy,
+ ComputeIvarBaseOffset(CGM, OID, IVD))
+ };
+ Ivars.push_back(llvm::ConstantStruct::get(ObjCTypes.IvarTy, Ivar));
+ }
+
+ // Return null for empty list.
+ if (Ivars.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.IvarListPtrTy);
+
+ llvm::Constant *Values[2];
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Ivars.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.IvarTy,
+ Ivars.size());
+ Values[1] = llvm::ConstantArray::get(AT, Ivars);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+
+ llvm::GlobalVariable *GV;
+ if (ForClass)
+ GV =
+ CreateMetadataVar("OBJC_CLASS_VARIABLES_" + ID->getName(), Init,
+ "__OBJC,__class_vars,regular,no_dead_strip",
+ CGM.getPointerAlign(), true);
+ else
+ GV = CreateMetadataVar("OBJC_INSTANCE_VARIABLES_" + ID->getName(), Init,
+ "__OBJC,__instance_vars,regular,no_dead_strip",
+ CGM.getPointerAlign(), true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.IvarListPtrTy);
+}
+
+/*
+ struct objc_method {
+ SEL method_name;
+ char *method_types;
+ void *method;
+ };
+
+ struct objc_method_list {
+ struct objc_method_list *obsolete;
+ int count;
+ struct objc_method methods_list[count];
+ };
+*/
+
+/// GetMethodConstant - Return a struct objc_method constant for the
+/// given method if it has been defined. The result is null if the
+/// method has not been defined. The return value has type MethodPtrTy.
+llvm::Constant *CGObjCMac::GetMethodConstant(const ObjCMethodDecl *MD) {
+ llvm::Function *Fn = GetMethodDefinition(MD);
+ if (!Fn)
+ return nullptr;
+
+ llvm::Constant *Method[] = {
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy),
+ GetMethodVarType(MD),
+ llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy)
+ };
+ return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Method);
+}
+
+llvm::Constant *CGObjCMac::EmitMethodList(Twine Name,
+ const char *Section,
+ ArrayRef<llvm::Constant*> Methods) {
+ // Return null for empty list.
+ if (Methods.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.MethodListPtrTy);
+
+ llvm::Constant *Values[3];
+ Values[0] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodTy,
+ Methods.size());
+ Values[2] = llvm::ConstantArray::get(AT, Methods);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar(Name, Init, Section, CGM.getPointerAlign(), true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.MethodListPtrTy);
+}
+
+llvm::Function *CGObjCCommonMac::GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD) {
+ SmallString<256> Name;
+ GetNameForMethod(OMD, CD, Name);
+
+ CodeGenTypes &Types = CGM.getTypes();
+ llvm::FunctionType *MethodTy =
+ Types.GetFunctionType(Types.arrangeObjCMethodDeclaration(OMD));
+ llvm::Function *Method =
+ llvm::Function::Create(MethodTy,
+ llvm::GlobalValue::InternalLinkage,
+ Name.str(),
+ &CGM.getModule());
+ MethodDefinitions.insert(std::make_pair(OMD, Method));
+
+ return Method;
+}
+
+llvm::GlobalVariable *CGObjCCommonMac::CreateMetadataVar(Twine Name,
+ llvm::Constant *Init,
+ StringRef Section,
+ CharUnits Align,
+ bool AddToUsed) {
+ llvm::Type *Ty = Init->getType();
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Ty, false,
+ llvm::GlobalValue::PrivateLinkage, Init, Name);
+ if (!Section.empty())
+ GV->setSection(Section);
+ GV->setAlignment(Align.getQuantity());
+ if (AddToUsed)
+ CGM.addCompilerUsedGlobal(GV);
+ return GV;
+}
+
+llvm::Function *CGObjCMac::ModuleInitFunction() {
+ // Abuse this interface function as a place to finalize.
+ FinishModule();
+ return nullptr;
+}
+
+llvm::Constant *CGObjCMac::GetPropertyGetFunction() {
+ return ObjCTypes.getGetPropertyFn();
+}
+
+llvm::Constant *CGObjCMac::GetPropertySetFunction() {
+ return ObjCTypes.getSetPropertyFn();
+}
+
+llvm::Constant *CGObjCMac::GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) {
+ return ObjCTypes.getOptimizedSetPropertyFn(atomic, copy);
+}
+
+llvm::Constant *CGObjCMac::GetGetStructFunction() {
+ return ObjCTypes.getCopyStructFn();
+}
+llvm::Constant *CGObjCMac::GetSetStructFunction() {
+ return ObjCTypes.getCopyStructFn();
+}
+
+llvm::Constant *CGObjCMac::GetCppAtomicObjectGetFunction() {
+ return ObjCTypes.getCppAtomicObjectFunction();
+}
+llvm::Constant *CGObjCMac::GetCppAtomicObjectSetFunction() {
+ return ObjCTypes.getCppAtomicObjectFunction();
+}
+
+llvm::Constant *CGObjCMac::EnumerationMutationFunction() {
+ return ObjCTypes.getEnumerationMutationFn();
+}
+
+void CGObjCMac::EmitTryStmt(CodeGenFunction &CGF, const ObjCAtTryStmt &S) {
+ return EmitTryOrSynchronizedStmt(CGF, S);
+}
+
+void CGObjCMac::EmitSynchronizedStmt(CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) {
+ return EmitTryOrSynchronizedStmt(CGF, S);
+}
+
+namespace {
+ struct PerformFragileFinally final : EHScopeStack::Cleanup {
+ const Stmt &S;
+ Address SyncArgSlot;
+ Address CallTryExitVar;
+ Address ExceptionData;
+ ObjCTypesHelper &ObjCTypes;
+ PerformFragileFinally(const Stmt *S,
+ Address SyncArgSlot,
+ Address CallTryExitVar,
+ Address ExceptionData,
+ ObjCTypesHelper *ObjCTypes)
+ : S(*S), SyncArgSlot(SyncArgSlot), CallTryExitVar(CallTryExitVar),
+ ExceptionData(ExceptionData), ObjCTypes(*ObjCTypes) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ // Check whether we need to call objc_exception_try_exit.
+ // In optimized code, this branch will always be folded.
+ llvm::BasicBlock *FinallyCallExit =
+ CGF.createBasicBlock("finally.call_exit");
+ llvm::BasicBlock *FinallyNoCallExit =
+ CGF.createBasicBlock("finally.no_call_exit");
+ CGF.Builder.CreateCondBr(CGF.Builder.CreateLoad(CallTryExitVar),
+ FinallyCallExit, FinallyNoCallExit);
+
+ CGF.EmitBlock(FinallyCallExit);
+ CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionTryExitFn(),
+ ExceptionData.getPointer());
+
+ CGF.EmitBlock(FinallyNoCallExit);
+
+ if (isa<ObjCAtTryStmt>(S)) {
+ if (const ObjCAtFinallyStmt* FinallyStmt =
+ cast<ObjCAtTryStmt>(S).getFinallyStmt()) {
+ // Don't try to do the @finally if this is an EH cleanup.
+ if (flags.isForEHCleanup()) return;
+
+ // Save the current cleanup destination in case there's
+ // control flow inside the finally statement.
+ llvm::Value *CurCleanupDest =
+ CGF.Builder.CreateLoad(CGF.getNormalCleanupDestSlot());
+
+ CGF.EmitStmt(FinallyStmt->getFinallyBody());
+
+ if (CGF.HaveInsertPoint()) {
+ CGF.Builder.CreateStore(CurCleanupDest,
+ CGF.getNormalCleanupDestSlot());
+ } else {
+ // Currently, the end of the cleanup must always exist.
+ CGF.EnsureInsertPoint();
+ }
+ }
+ } else {
+ // Emit objc_sync_exit(expr); as finally's sole statement for
+ // @synchronized.
+ llvm::Value *SyncArg = CGF.Builder.CreateLoad(SyncArgSlot);
+ CGF.EmitNounwindRuntimeCall(ObjCTypes.getSyncExitFn(), SyncArg);
+ }
+ }
+ };
+
+ class FragileHazards {
+ CodeGenFunction &CGF;
+ SmallVector<llvm::Value*, 20> Locals;
+ llvm::DenseSet<llvm::BasicBlock*> BlocksBeforeTry;
+
+ llvm::InlineAsm *ReadHazard;
+ llvm::InlineAsm *WriteHazard;
+
+ llvm::FunctionType *GetAsmFnType();
+
+ void collectLocals();
+ void emitReadHazard(CGBuilderTy &Builder);
+
+ public:
+ FragileHazards(CodeGenFunction &CGF);
+
+ void emitWriteHazard();
+ void emitHazardsInNewBlocks();
+ };
+}
+
+/// Create the fragile-ABI read and write hazards based on the current
+/// state of the function, which is presumed to be immediately prior
+/// to a @try block. These hazards are used to maintain correct
+/// semantics in the face of optimization and the fragile ABI's
+/// cavalier use of setjmp/longjmp.
+FragileHazards::FragileHazards(CodeGenFunction &CGF) : CGF(CGF) {
+ collectLocals();
+
+ if (Locals.empty()) return;
+
+ // Collect all the blocks in the function.
+ for (llvm::Function::iterator
+ I = CGF.CurFn->begin(), E = CGF.CurFn->end(); I != E; ++I)
+ BlocksBeforeTry.insert(&*I);
+
+ llvm::FunctionType *AsmFnTy = GetAsmFnType();
+
+ // Create a read hazard for the allocas. This inhibits dead-store
+ // optimizations and forces the values to memory. This hazard is
+ // inserted before any 'throwing' calls in the protected scope to
+ // reflect the possibility that the variables might be read from the
+ // catch block if the call throws.
+ {
+ std::string Constraint;
+ for (unsigned I = 0, E = Locals.size(); I != E; ++I) {
+ if (I) Constraint += ',';
+ Constraint += "*m";
+ }
+
+ ReadHazard = llvm::InlineAsm::get(AsmFnTy, "", Constraint, true, false);
+ }
+
+ // Create a write hazard for the allocas. This inhibits folding
+ // loads across the hazard. This hazard is inserted at the
+ // beginning of the catch path to reflect the possibility that the
+ // variables might have been written within the protected scope.
+ {
+ std::string Constraint;
+ for (unsigned I = 0, E = Locals.size(); I != E; ++I) {
+ if (I) Constraint += ',';
+ Constraint += "=*m";
+ }
+
+ WriteHazard = llvm::InlineAsm::get(AsmFnTy, "", Constraint, true, false);
+ }
+}
+
+/// Emit a write hazard at the current location.
+void FragileHazards::emitWriteHazard() {
+ if (Locals.empty()) return;
+
+ CGF.EmitNounwindRuntimeCall(WriteHazard, Locals);
+}
+
+void FragileHazards::emitReadHazard(CGBuilderTy &Builder) {
+ assert(!Locals.empty());
+ llvm::CallInst *call = Builder.CreateCall(ReadHazard, Locals);
+ call->setDoesNotThrow();
+ call->setCallingConv(CGF.getRuntimeCC());
+}
+
+/// Emit read hazards in all the protected blocks, i.e. all the blocks
+/// which have been inserted since the beginning of the try.
+void FragileHazards::emitHazardsInNewBlocks() {
+ if (Locals.empty()) return;
+
+ CGBuilderTy Builder(CGF, CGF.getLLVMContext());
+
+ // Iterate through all blocks, skipping those prior to the try.
+ for (llvm::Function::iterator
+ FI = CGF.CurFn->begin(), FE = CGF.CurFn->end(); FI != FE; ++FI) {
+ llvm::BasicBlock &BB = *FI;
+ if (BlocksBeforeTry.count(&BB)) continue;
+
+ // Walk through all the calls in the block.
+ for (llvm::BasicBlock::iterator
+ BI = BB.begin(), BE = BB.end(); BI != BE; ++BI) {
+ llvm::Instruction &I = *BI;
+
+ // Ignore instructions that aren't non-intrinsic calls.
+ // These are the only calls that can possibly call longjmp.
+ if (!isa<llvm::CallInst>(I) && !isa<llvm::InvokeInst>(I)) continue;
+ if (isa<llvm::IntrinsicInst>(I))
+ continue;
+
+ // Ignore call sites marked nounwind. This may be questionable,
+ // since 'nounwind' doesn't necessarily mean 'does not call longjmp'.
+ llvm::CallSite CS(&I);
+ if (CS.doesNotThrow()) continue;
+
+ // Insert a read hazard before the call. This will ensure that
+ // any writes to the locals are performed before making the
+ // call. If the call throws, then this is sufficient to
+ // guarantee correctness as long as it doesn't also write to any
+ // locals.
+ Builder.SetInsertPoint(&BB, BI);
+ emitReadHazard(Builder);
+ }
+ }
+}
+
+static void addIfPresent(llvm::DenseSet<llvm::Value*> &S, llvm::Value *V) {
+ if (V) S.insert(V);
+}
+
+static void addIfPresent(llvm::DenseSet<llvm::Value*> &S, Address V) {
+ if (V.isValid()) S.insert(V.getPointer());
+}
+
+void FragileHazards::collectLocals() {
+ // Compute a set of allocas to ignore.
+ llvm::DenseSet<llvm::Value*> AllocasToIgnore;
+ addIfPresent(AllocasToIgnore, CGF.ReturnValue);
+ addIfPresent(AllocasToIgnore, CGF.NormalCleanupDest);
+
+ // Collect all the allocas currently in the function. This is
+ // probably way too aggressive.
+ llvm::BasicBlock &Entry = CGF.CurFn->getEntryBlock();
+ for (llvm::BasicBlock::iterator
+ I = Entry.begin(), E = Entry.end(); I != E; ++I)
+ if (isa<llvm::AllocaInst>(*I) && !AllocasToIgnore.count(&*I))
+ Locals.push_back(&*I);
+}
+
+llvm::FunctionType *FragileHazards::GetAsmFnType() {
+ SmallVector<llvm::Type *, 16> tys(Locals.size());
+ for (unsigned i = 0, e = Locals.size(); i != e; ++i)
+ tys[i] = Locals[i]->getType();
+ return llvm::FunctionType::get(CGF.VoidTy, tys, false);
+}
+
+/*
+
+ Objective-C setjmp-longjmp (sjlj) Exception Handling
+ --
+
+ A catch buffer is a setjmp buffer plus:
+ - a pointer to the exception that was caught
+ - a pointer to the previous exception data buffer
+ - two pointers of reserved storage
+ Therefore catch buffers form a stack, with a pointer to the top
+ of the stack kept in thread-local storage.
+
+ objc_exception_try_enter pushes a catch buffer onto the EH stack.
+ objc_exception_try_exit pops the given catch buffer, which is
+ required to be the top of the EH stack.
+ objc_exception_throw pops the top of the EH stack, writes the
+ thrown exception into the appropriate field, and longjmps
+ to the setjmp buffer. It crashes the process (with a printf
+ and an abort()) if there are no catch buffers on the stack.
+ objc_exception_extract just reads the exception pointer out of the
+ catch buffer.
+
+ There's no reason an implementation couldn't use a light-weight
+ setjmp here --- something like __builtin_setjmp, but API-compatible
+ with the heavyweight setjmp. This will be more important if we ever
+ want to implement correct ObjC/C++ exception interactions for the
+ fragile ABI.
+
+ Note that for this use of setjmp/longjmp to be correct, we may need
+ to mark some local variables volatile: if a non-volatile local
+ variable is modified between the setjmp and the longjmp, it has
+ indeterminate value. For the purposes of LLVM IR, it may be
+ sufficient to make loads and stores within the @try (to variables
+ declared outside the @try) volatile. This is necessary for
+ optimized correctness, but is not currently being done; this is
+ being tracked as rdar://problem/8160285
+
+ The basic framework for a @try-catch-finally is as follows:
+ {
+ objc_exception_data d;
+ id _rethrow = null;
+ bool _call_try_exit = true;
+
+ objc_exception_try_enter(&d);
+ if (!setjmp(d.jmp_buf)) {
+ ... try body ...
+ } else {
+ // exception path
+ id _caught = objc_exception_extract(&d);
+
+ // enter new try scope for handlers
+ if (!setjmp(d.jmp_buf)) {
+ ... match exception and execute catch blocks ...
+
+ // fell off end, rethrow.
+ _rethrow = _caught;
+ ... jump-through-finally to finally_rethrow ...
+ } else {
+ // exception in catch block
+ _rethrow = objc_exception_extract(&d);
+ _call_try_exit = false;
+ ... jump-through-finally to finally_rethrow ...
+ }
+ }
+ ... jump-through-finally to finally_end ...
+
+ finally:
+ if (_call_try_exit)
+ objc_exception_try_exit(&d);
+
+ ... finally block ....
+ ... dispatch to finally destination ...
+
+ finally_rethrow:
+ objc_exception_throw(_rethrow);
+
+ finally_end:
+ }
+
+ This framework differs slightly from the one gcc uses, in that gcc
+ uses _rethrow to determine if objc_exception_try_exit should be called
+ and if the object should be rethrown. This breaks in the face of
+ throwing nil and introduces unnecessary branches.
+
+ We specialize this framework for a few particular circumstances:
+
+ - If there are no catch blocks, then we avoid emitting the second
+ exception handling context.
+
+ - If there is a catch-all catch block (i.e. @catch(...) or @catch(id
+ e)) we avoid emitting the code to rethrow an uncaught exception.
+
+ - FIXME: If there is no @finally block we can do a few more
+ simplifications.
+
+ Rethrows and Jumps-Through-Finally
+ --
+
+ '@throw;' is supported by pushing the currently-caught exception
+ onto ObjCEHStack while the @catch blocks are emitted.
+
+ Branches through the @finally block are handled with an ordinary
+ normal cleanup. We do not register an EH cleanup; fragile-ABI ObjC
+ exceptions are not compatible with C++ exceptions, and this is
+ hardly the only place where this will go wrong.
+
+ @synchronized(expr) { stmt; } is emitted as if it were:
+ id synch_value = expr;
+ objc_sync_enter(synch_value);
+ @try { stmt; } @finally { objc_sync_exit(synch_value); }
+*/
+
+void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const Stmt &S) {
+ bool isTry = isa<ObjCAtTryStmt>(S);
+
+ // A destination for the fall-through edges of the catch handlers to
+ // jump to.
+ CodeGenFunction::JumpDest FinallyEnd =
+ CGF.getJumpDestInCurrentScope("finally.end");
+
+ // A destination for the rethrow edge of the catch handlers to jump
+ // to.
+ CodeGenFunction::JumpDest FinallyRethrow =
+ CGF.getJumpDestInCurrentScope("finally.rethrow");
+
+ // For @synchronized, call objc_sync_enter(sync.expr). The
+ // evaluation of the expression must occur before we enter the
+ // @synchronized. We can't avoid a temp here because we need the
+ // value to be preserved. If the backend ever does liveness
+ // correctly after setjmp, this will be unnecessary.
+ Address SyncArgSlot = Address::invalid();
+ if (!isTry) {
+ llvm::Value *SyncArg =
+ CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
+ SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy);
+ CGF.EmitNounwindRuntimeCall(ObjCTypes.getSyncEnterFn(), SyncArg);
+
+ SyncArgSlot = CGF.CreateTempAlloca(SyncArg->getType(),
+ CGF.getPointerAlign(), "sync.arg");
+ CGF.Builder.CreateStore(SyncArg, SyncArgSlot);
+ }
+
+ // Allocate memory for the setjmp buffer. This needs to be kept
+ // live throughout the try and catch blocks.
+ Address ExceptionData = CGF.CreateTempAlloca(ObjCTypes.ExceptionDataTy,
+ CGF.getPointerAlign(),
+ "exceptiondata.ptr");
+
+ // Create the fragile hazards. Note that this will not capture any
+ // of the allocas required for exception processing, but will
+ // capture the current basic block (which extends all the way to the
+ // setjmp call) as "before the @try".
+ FragileHazards Hazards(CGF);
+
+ // Create a flag indicating whether the cleanup needs to call
+ // objc_exception_try_exit. This is true except when
+ // - no catches match and we're branching through the cleanup
+ // just to rethrow the exception, or
+ // - a catch matched and we're falling out of the catch handler.
+ // The setjmp-safety rule here is that we should always store to this
+ // variable in a place that dominates the branch through the cleanup
+ // without passing through any setjmps.
+ Address CallTryExitVar = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(),
+ CharUnits::One(),
+ "_call_try_exit");
+
+ // A slot containing the exception to rethrow. Only needed when we
+ // have both a @catch and a @finally.
+ Address PropagatingExnVar = Address::invalid();
+
+ // Push a normal cleanup to leave the try scope.
+ CGF.EHStack.pushCleanup<PerformFragileFinally>(NormalAndEHCleanup, &S,
+ SyncArgSlot,
+ CallTryExitVar,
+ ExceptionData,
+ &ObjCTypes);
+
+ // Enter a try block:
+ // - Call objc_exception_try_enter to push ExceptionData on top of
+ // the EH stack.
+ CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionTryEnterFn(),
+ ExceptionData.getPointer());
+
+ // - Call setjmp on the exception data buffer.
+ llvm::Constant *Zero = llvm::ConstantInt::get(CGF.Builder.getInt32Ty(), 0);
+ llvm::Value *GEPIndexes[] = { Zero, Zero, Zero };
+ llvm::Value *SetJmpBuffer = CGF.Builder.CreateGEP(
+ ObjCTypes.ExceptionDataTy, ExceptionData.getPointer(), GEPIndexes,
+ "setjmp_buffer");
+ llvm::CallInst *SetJmpResult = CGF.EmitNounwindRuntimeCall(
+ ObjCTypes.getSetJmpFn(), SetJmpBuffer, "setjmp_result");
+ SetJmpResult->setCanReturnTwice();
+
+ // If setjmp returned 0, enter the protected block; otherwise,
+ // branch to the handler.
+ llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try");
+ llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler");
+ llvm::Value *DidCatch =
+ CGF.Builder.CreateIsNotNull(SetJmpResult, "did_catch_exception");
+ CGF.Builder.CreateCondBr(DidCatch, TryHandler, TryBlock);
+
+ // Emit the protected block.
+ CGF.EmitBlock(TryBlock);
+ CGF.Builder.CreateStore(CGF.Builder.getTrue(), CallTryExitVar);
+ CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
+ : cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
+
+ CGBuilderTy::InsertPoint TryFallthroughIP = CGF.Builder.saveAndClearIP();
+
+ // Emit the exception handler block.
+ CGF.EmitBlock(TryHandler);
+
+ // Don't optimize loads of the in-scope locals across this point.
+ Hazards.emitWriteHazard();
+
+ // For a @synchronized (or a @try with no catches), just branch
+ // through the cleanup to the rethrow block.
+ if (!isTry || !cast<ObjCAtTryStmt>(S).getNumCatchStmts()) {
+ // Tell the cleanup not to re-pop the exit.
+ CGF.Builder.CreateStore(CGF.Builder.getFalse(), CallTryExitVar);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+
+ // Otherwise, we have to match against the caught exceptions.
+ } else {
+ // Retrieve the exception object. We may emit multiple blocks but
+ // nothing can cross this so the value is already in SSA form.
+ llvm::CallInst *Caught =
+ CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionExtractFn(),
+ ExceptionData.getPointer(), "caught");
+
+ // Push the exception to rethrow onto the EH value stack for the
+ // benefit of any @throws in the handlers.
+ CGF.ObjCEHValueStack.push_back(Caught);
+
+ const ObjCAtTryStmt* AtTryStmt = cast<ObjCAtTryStmt>(&S);
+
+ bool HasFinally = (AtTryStmt->getFinallyStmt() != nullptr);
+
+ llvm::BasicBlock *CatchBlock = nullptr;
+ llvm::BasicBlock *CatchHandler = nullptr;
+ if (HasFinally) {
+ // Save the currently-propagating exception before
+ // objc_exception_try_enter clears the exception slot.
+ PropagatingExnVar = CGF.CreateTempAlloca(Caught->getType(),
+ CGF.getPointerAlign(),
+ "propagating_exception");
+ CGF.Builder.CreateStore(Caught, PropagatingExnVar);
+
+ // Enter a new exception try block (in case a @catch block
+ // throws an exception).
+ CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionTryEnterFn(),
+ ExceptionData.getPointer());
+
+ llvm::CallInst *SetJmpResult =
+ CGF.EmitNounwindRuntimeCall(ObjCTypes.getSetJmpFn(),
+ SetJmpBuffer, "setjmp.result");
+ SetJmpResult->setCanReturnTwice();
+
+ llvm::Value *Threw =
+ CGF.Builder.CreateIsNotNull(SetJmpResult, "did_catch_exception");
+
+ CatchBlock = CGF.createBasicBlock("catch");
+ CatchHandler = CGF.createBasicBlock("catch_for_catch");
+ CGF.Builder.CreateCondBr(Threw, CatchHandler, CatchBlock);
+
+ CGF.EmitBlock(CatchBlock);
+ }
+
+ CGF.Builder.CreateStore(CGF.Builder.getInt1(HasFinally), CallTryExitVar);
+
+ // Handle catch list. As a special case we check if everything is
+ // matched and avoid generating code for falling off the end if
+ // so.
+ bool AllMatched = false;
+ for (unsigned I = 0, N = AtTryStmt->getNumCatchStmts(); I != N; ++I) {
+ const ObjCAtCatchStmt *CatchStmt = AtTryStmt->getCatchStmt(I);
+
+ const VarDecl *CatchParam = CatchStmt->getCatchParamDecl();
+ const ObjCObjectPointerType *OPT = nullptr;
+
+ // catch(...) always matches.
+ if (!CatchParam) {
+ AllMatched = true;
+ } else {
+ OPT = CatchParam->getType()->getAs<ObjCObjectPointerType>();
+
+ // catch(id e) always matches under this ABI, since only
+ // ObjC exceptions end up here in the first place.
+ // FIXME: For the time being we also match id<X>; this should
+ // be rejected by Sema instead.
+ if (OPT && (OPT->isObjCIdType() || OPT->isObjCQualifiedIdType()))
+ AllMatched = true;
+ }
+
+ // If this is a catch-all, we don't need to test anything.
+ if (AllMatched) {
+ CodeGenFunction::RunCleanupsScope CatchVarCleanups(CGF);
+
+ if (CatchParam) {
+ CGF.EmitAutoVarDecl(*CatchParam);
+ assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?");
+
+ // These types work out because ConvertType(id) == i8*.
+ EmitInitOfCatchParam(CGF, Caught, CatchParam);
+ }
+
+ CGF.EmitStmt(CatchStmt->getCatchBody());
+
+ // The scope of the catch variable ends right here.
+ CatchVarCleanups.ForceCleanup();
+
+ CGF.EmitBranchThroughCleanup(FinallyEnd);
+ break;
+ }
+
+ assert(OPT && "Unexpected non-object pointer type in @catch");
+ const ObjCObjectType *ObjTy = OPT->getObjectType();
+
+ // FIXME: @catch (Class c) ?
+ ObjCInterfaceDecl *IDecl = ObjTy->getInterface();
+ assert(IDecl && "Catch parameter must have Objective-C type!");
+
+ // Check if the @catch block matches the exception object.
+ llvm::Value *Class = EmitClassRef(CGF, IDecl);
+
+ llvm::Value *matchArgs[] = { Class, Caught };
+ llvm::CallInst *Match =
+ CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionMatchFn(),
+ matchArgs, "match");
+
+ llvm::BasicBlock *MatchedBlock = CGF.createBasicBlock("match");
+ llvm::BasicBlock *NextCatchBlock = CGF.createBasicBlock("catch.next");
+
+ CGF.Builder.CreateCondBr(CGF.Builder.CreateIsNotNull(Match, "matched"),
+ MatchedBlock, NextCatchBlock);
+
+ // Emit the @catch block.
+ CGF.EmitBlock(MatchedBlock);
+
+ // Collect any cleanups for the catch variable. The scope lasts until
+ // the end of the catch body.
+ CodeGenFunction::RunCleanupsScope CatchVarCleanups(CGF);
+
+ CGF.EmitAutoVarDecl(*CatchParam);
+ assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?");
+
+ // Initialize the catch variable.
+ llvm::Value *Tmp =
+ CGF.Builder.CreateBitCast(Caught,
+ CGF.ConvertType(CatchParam->getType()));
+ EmitInitOfCatchParam(CGF, Tmp, CatchParam);
+
+ CGF.EmitStmt(CatchStmt->getCatchBody());
+
+ // We're done with the catch variable.
+ CatchVarCleanups.ForceCleanup();
+
+ CGF.EmitBranchThroughCleanup(FinallyEnd);
+
+ CGF.EmitBlock(NextCatchBlock);
+ }
+
+ CGF.ObjCEHValueStack.pop_back();
+
+ // If nothing wanted anything to do with the caught exception,
+ // kill the extract call.
+ if (Caught->use_empty())
+ Caught->eraseFromParent();
+
+ if (!AllMatched)
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+
+ if (HasFinally) {
+ // Emit the exception handler for the @catch blocks.
+ CGF.EmitBlock(CatchHandler);
+
+ // In theory we might now need a write hazard, but actually it's
+ // unnecessary because there's no local-accessing code between
+ // the try's write hazard and here.
+ //Hazards.emitWriteHazard();
+
+ // Extract the new exception and save it to the
+ // propagating-exception slot.
+ assert(PropagatingExnVar.isValid());
+ llvm::CallInst *NewCaught =
+ CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionExtractFn(),
+ ExceptionData.getPointer(), "caught");
+ CGF.Builder.CreateStore(NewCaught, PropagatingExnVar);
+
+ // Don't pop the catch handler; the throw already did.
+ CGF.Builder.CreateStore(CGF.Builder.getFalse(), CallTryExitVar);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ }
+ }
+
+ // Insert read hazards as required in the new blocks.
+ Hazards.emitHazardsInNewBlocks();
+
+ // Pop the cleanup.
+ CGF.Builder.restoreIP(TryFallthroughIP);
+ if (CGF.HaveInsertPoint())
+ CGF.Builder.CreateStore(CGF.Builder.getTrue(), CallTryExitVar);
+ CGF.PopCleanupBlock();
+ CGF.EmitBlock(FinallyEnd.getBlock(), true);
+
+ // Emit the rethrow block.
+ CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
+ CGF.EmitBlock(FinallyRethrow.getBlock(), true);
+ if (CGF.HaveInsertPoint()) {
+ // If we have a propagating-exception variable, check it.
+ llvm::Value *PropagatingExn;
+ if (PropagatingExnVar.isValid()) {
+ PropagatingExn = CGF.Builder.CreateLoad(PropagatingExnVar);
+
+ // Otherwise, just look in the buffer for the exception to throw.
+ } else {
+ llvm::CallInst *Caught =
+ CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionExtractFn(),
+ ExceptionData.getPointer());
+ PropagatingExn = Caught;
+ }
+
+ CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionThrowFn(),
+ PropagatingExn);
+ CGF.Builder.CreateUnreachable();
+ }
+
+ CGF.Builder.restoreIP(SavedIP);
+}
+
+void CGObjCMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S,
+ bool ClearInsertionPoint) {
+ llvm::Value *ExceptionAsObject;
+
+ if (const Expr *ThrowExpr = S.getThrowExpr()) {
+ llvm::Value *Exception = CGF.EmitObjCThrowOperand(ThrowExpr);
+ ExceptionAsObject =
+ CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy);
+ } else {
+ assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
+ "Unexpected rethrow outside @catch block.");
+ ExceptionAsObject = CGF.ObjCEHValueStack.back();
+ }
+
+ CGF.EmitRuntimeCall(ObjCTypes.getExceptionThrowFn(), ExceptionAsObject)
+ ->setDoesNotReturn();
+ CGF.Builder.CreateUnreachable();
+
+ // Clear the insertion point to indicate we are in unreachable code.
+ if (ClearInsertionPoint)
+ CGF.Builder.ClearInsertionPoint();
+}
+
+/// EmitObjCWeakRead - Code gen for loading value of a __weak
+/// object: objc_read_weak (id *src)
+///
+llvm::Value * CGObjCMac::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ Address AddrWeakObj) {
+ llvm::Type* DestTy = AddrWeakObj.getElementType();
+ AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj,
+ ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *read_weak =
+ CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcReadWeakFn(),
+ AddrWeakObj.getPointer(), "weakread");
+ read_weak = CGF.Builder.CreateBitCast(read_weak, DestTy);
+ return read_weak;
+}
+
+/// EmitObjCWeakAssign - Code gen for assigning to a __weak object.
+/// objc_assign_weak (id src, id *dst)
+///
+void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, Address dst) {
+ llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *args[] = { src, dst.getPointer() };
+ CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignWeakFn(),
+ args, "weakassign");
+ return;
+}
+
+/// EmitObjCGlobalAssign - Code gen for assigning to a __strong object.
+/// objc_assign_global (id src, id *dst)
+///
+void CGObjCMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, Address dst,
+ bool threadlocal) {
+ llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *args[] = { src, dst.getPointer() };
+ if (!threadlocal)
+ CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignGlobalFn(),
+ args, "globalassign");
+ else
+ CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignThreadLocalFn(),
+ args, "threadlocalassign");
+ return;
+}
+
+/// EmitObjCIvarAssign - Code gen for assigning to a __strong object.
+/// objc_assign_ivar (id src, id *dst, ptrdiff_t ivaroffset)
+///
+void CGObjCMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, Address dst,
+ llvm::Value *ivarOffset) {
+ assert(ivarOffset && "EmitObjCIvarAssign - ivarOffset is NULL");
+ llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *args[] = { src, dst.getPointer(), ivarOffset };
+ CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignIvarFn(), args);
+ return;
+}
+
+/// EmitObjCStrongCastAssign - Code gen for assigning to a __strong cast object.
+/// objc_assign_strongCast (id src, id *dst)
+///
+void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, Address dst) {
+ llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *args[] = { src, dst.getPointer() };
+ CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignStrongCastFn(),
+ args, "strongassign");
+ return;
+}
+
+void CGObjCMac::EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+ Address DestPtr,
+ Address SrcPtr,
+ llvm::Value *size) {
+ SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, ObjCTypes.Int8PtrTy);
+ DestPtr = CGF.Builder.CreateBitCast(DestPtr, ObjCTypes.Int8PtrTy);
+ llvm::Value *args[] = { DestPtr.getPointer(), SrcPtr.getPointer(), size };
+ CGF.EmitNounwindRuntimeCall(ObjCTypes.GcMemmoveCollectableFn(), args);
+}
+
+/// EmitObjCValueForIvar - Code Gen for ivar reference.
+///
+LValue CGObjCMac::EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) {
+ const ObjCInterfaceDecl *ID =
+ ObjectTy->getAs<ObjCObjectType>()->getInterface();
+ return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
+ EmitIvarOffset(CGF, ID, Ivar));
+}
+
+llvm::Value *CGObjCMac::EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ uint64_t Offset = ComputeIvarBaseOffset(CGM, Interface, Ivar);
+ return llvm::ConstantInt::get(
+ CGM.getTypes().ConvertType(CGM.getContext().LongTy),
+ Offset);
+}
+
+/* *** Private Interface *** */
+
+/// EmitImageInfo - Emit the image info marker used to encode some module
+/// level information.
+///
+/// See: <rdr://4810609&4810587&4810587>
+/// struct IMAGE_INFO {
+/// unsigned version;
+/// unsigned flags;
+/// };
+enum ImageInfoFlags {
+ eImageInfo_FixAndContinue = (1 << 0), // This flag is no longer set by clang.
+ eImageInfo_GarbageCollected = (1 << 1),
+ eImageInfo_GCOnly = (1 << 2),
+ eImageInfo_OptimizedByDyld = (1 << 3), // This flag is set by the dyld shared cache.
+
+ // A flag indicating that the module has no instances of a @synthesize of a
+ // superclass variable. <rdar://problem/6803242>
+ eImageInfo_CorrectedSynthesize = (1 << 4), // This flag is no longer set by clang.
+ eImageInfo_ImageIsSimulated = (1 << 5)
+};
+
+void CGObjCCommonMac::EmitImageInfo() {
+ unsigned version = 0; // Version is unused?
+ const char *Section = (ObjCABI == 1) ?
+ "__OBJC, __image_info,regular" :
+ "__DATA, __objc_imageinfo, regular, no_dead_strip";
+
+ // Generate module-level named metadata to convey this information to the
+ // linker and code-gen.
+ llvm::Module &Mod = CGM.getModule();
+
+ // Add the ObjC ABI version to the module flags.
+ Mod.addModuleFlag(llvm::Module::Error, "Objective-C Version", ObjCABI);
+ Mod.addModuleFlag(llvm::Module::Error, "Objective-C Image Info Version",
+ version);
+ Mod.addModuleFlag(llvm::Module::Error, "Objective-C Image Info Section",
+ llvm::MDString::get(VMContext,Section));
+
+ if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
+ // Non-GC overrides those files which specify GC.
+ Mod.addModuleFlag(llvm::Module::Override,
+ "Objective-C Garbage Collection", (uint32_t)0);
+ } else {
+ // Add the ObjC garbage collection value.
+ Mod.addModuleFlag(llvm::Module::Error,
+ "Objective-C Garbage Collection",
+ eImageInfo_GarbageCollected);
+
+ if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) {
+ // Add the ObjC GC Only value.
+ Mod.addModuleFlag(llvm::Module::Error, "Objective-C GC Only",
+ eImageInfo_GCOnly);
+
+ // Require that GC be specified and set to eImageInfo_GarbageCollected.
+ llvm::Metadata *Ops[2] = {
+ llvm::MDString::get(VMContext, "Objective-C Garbage Collection"),
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), eImageInfo_GarbageCollected))};
+ Mod.addModuleFlag(llvm::Module::Require, "Objective-C GC Only",
+ llvm::MDNode::get(VMContext, Ops));
+ }
+ }
+
+ // Indicate whether we're compiling this to run on a simulator.
+ const llvm::Triple &Triple = CGM.getTarget().getTriple();
+ if ((Triple.isiOS() || Triple.isWatchOS()) &&
+ (Triple.getArch() == llvm::Triple::x86 ||
+ Triple.getArch() == llvm::Triple::x86_64))
+ Mod.addModuleFlag(llvm::Module::Error, "Objective-C Is Simulated",
+ eImageInfo_ImageIsSimulated);
+}
+
+// struct objc_module {
+// unsigned long version;
+// unsigned long size;
+// const char *name;
+// Symtab symtab;
+// };
+
+// FIXME: Get from somewhere
+static const int ModuleVersion = 7;
+
+void CGObjCMac::EmitModuleInfo() {
+ uint64_t Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ModuleTy);
+
+ llvm::Constant *Values[] = {
+ llvm::ConstantInt::get(ObjCTypes.LongTy, ModuleVersion),
+ llvm::ConstantInt::get(ObjCTypes.LongTy, Size),
+ // This used to be the filename, now it is unused. <rdr://4327263>
+ GetClassName(StringRef("")),
+ EmitModuleSymbols()
+ };
+ CreateMetadataVar("OBJC_MODULES",
+ llvm::ConstantStruct::get(ObjCTypes.ModuleTy, Values),
+ "__OBJC,__module_info,regular,no_dead_strip",
+ CGM.getPointerAlign(), true);
+}
+
+llvm::Constant *CGObjCMac::EmitModuleSymbols() {
+ unsigned NumClasses = DefinedClasses.size();
+ unsigned NumCategories = DefinedCategories.size();
+
+ // Return null if no symbols were defined.
+ if (!NumClasses && !NumCategories)
+ return llvm::Constant::getNullValue(ObjCTypes.SymtabPtrTy);
+
+ llvm::Constant *Values[5];
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
+ Values[1] = llvm::Constant::getNullValue(ObjCTypes.SelectorPtrTy);
+ Values[2] = llvm::ConstantInt::get(ObjCTypes.ShortTy, NumClasses);
+ Values[3] = llvm::ConstantInt::get(ObjCTypes.ShortTy, NumCategories);
+
+ // The runtime expects exactly the list of defined classes followed
+ // by the list of defined categories, in a single array.
+ SmallVector<llvm::Constant*, 8> Symbols(NumClasses + NumCategories);
+ for (unsigned i=0; i<NumClasses; i++) {
+ const ObjCInterfaceDecl *ID = ImplementedClasses[i];
+ assert(ID);
+ if (ObjCImplementationDecl *IMP = ID->getImplementation())
+ // We are implementing a weak imported interface. Give it external linkage
+ if (ID->isWeakImported() && !IMP->isWeakImported())
+ DefinedClasses[i]->setLinkage(llvm::GlobalVariable::ExternalLinkage);
+
+ Symbols[i] = llvm::ConstantExpr::getBitCast(DefinedClasses[i],
+ ObjCTypes.Int8PtrTy);
+ }
+ for (unsigned i=0; i<NumCategories; i++)
+ Symbols[NumClasses + i] =
+ llvm::ConstantExpr::getBitCast(DefinedCategories[i],
+ ObjCTypes.Int8PtrTy);
+
+ Values[4] =
+ llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.Int8PtrTy,
+ Symbols.size()),
+ Symbols);
+
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+
+ llvm::GlobalVariable *GV = CreateMetadataVar(
+ "OBJC_SYMBOLS", Init, "__OBJC,__symbols,regular,no_dead_strip",
+ CGM.getPointerAlign(), true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.SymtabPtrTy);
+}
+
+llvm::Value *CGObjCMac::EmitClassRefFromId(CodeGenFunction &CGF,
+ IdentifierInfo *II) {
+ LazySymbols.insert(II);
+
+ llvm::GlobalVariable *&Entry = ClassReferences[II];
+
+ if (!Entry) {
+ llvm::Constant *Casted =
+ llvm::ConstantExpr::getBitCast(GetClassName(II->getName()),
+ ObjCTypes.ClassPtrTy);
+ Entry = CreateMetadataVar(
+ "OBJC_CLASS_REFERENCES_", Casted,
+ "__OBJC,__cls_refs,literal_pointers,no_dead_strip",
+ CGM.getPointerAlign(), true);
+ }
+
+ return CGF.Builder.CreateAlignedLoad(Entry, CGF.getPointerAlign());
+}
+
+llvm::Value *CGObjCMac::EmitClassRef(CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *ID) {
+ return EmitClassRefFromId(CGF, ID->getIdentifier());
+}
+
+llvm::Value *CGObjCMac::EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF) {
+ IdentifierInfo *II = &CGM.getContext().Idents.get("NSAutoreleasePool");
+ return EmitClassRefFromId(CGF, II);
+}
+
+llvm::Value *CGObjCMac::EmitSelector(CodeGenFunction &CGF, Selector Sel) {
+ return CGF.Builder.CreateLoad(EmitSelectorAddr(CGF, Sel));
+}
+
+Address CGObjCMac::EmitSelectorAddr(CodeGenFunction &CGF, Selector Sel) {
+ CharUnits Align = CGF.getPointerAlign();
+
+ llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
+ if (!Entry) {
+ llvm::Constant *Casted =
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(Sel),
+ ObjCTypes.SelectorPtrTy);
+ Entry = CreateMetadataVar(
+ "OBJC_SELECTOR_REFERENCES_", Casted,
+ "__OBJC,__message_refs,literal_pointers,no_dead_strip", Align, true);
+ Entry->setExternallyInitialized(true);
+ }
+
+ return Address(Entry, Align);
+}
+
+llvm::Constant *CGObjCCommonMac::GetClassName(StringRef RuntimeName) {
+ llvm::GlobalVariable *&Entry = ClassNames[RuntimeName];
+ if (!Entry)
+ Entry = CreateMetadataVar(
+ "OBJC_CLASS_NAME_",
+ llvm::ConstantDataArray::getString(VMContext, RuntimeName),
+ ((ObjCABI == 2) ? "__TEXT,__objc_classname,cstring_literals"
+ : "__TEXT,__cstring,cstring_literals"),
+ CharUnits::One(), true);
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+llvm::Function *CGObjCCommonMac::GetMethodDefinition(const ObjCMethodDecl *MD) {
+ llvm::DenseMap<const ObjCMethodDecl*, llvm::Function*>::iterator
+ I = MethodDefinitions.find(MD);
+ if (I != MethodDefinitions.end())
+ return I->second;
+
+ return nullptr;
+}
+
+/// GetIvarLayoutName - Returns a unique constant for the given
+/// ivar layout bitmap.
+llvm::Constant *CGObjCCommonMac::GetIvarLayoutName(IdentifierInfo *Ident,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ return llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+}
+
+void IvarLayoutBuilder::visitRecord(const RecordType *RT,
+ CharUnits offset) {
+ const RecordDecl *RD = RT->getDecl();
+
+ // If this is a union, remember that we had one, because it might mess
+ // up the ordering of layout entries.
+ if (RD->isUnion())
+ IsDisordered = true;
+
+ const ASTRecordLayout *recLayout = nullptr;
+ visitAggregate(RD->field_begin(), RD->field_end(), offset,
+ [&](const FieldDecl *field) -> CharUnits {
+ if (!recLayout)
+ recLayout = &CGM.getContext().getASTRecordLayout(RD);
+ auto offsetInBits = recLayout->getFieldOffset(field->getFieldIndex());
+ return CGM.getContext().toCharUnitsFromBits(offsetInBits);
+ });
+}
+
+template <class Iterator, class GetOffsetFn>
+void IvarLayoutBuilder::visitAggregate(Iterator begin, Iterator end,
+ CharUnits aggregateOffset,
+ const GetOffsetFn &getOffset) {
+ for (; begin != end; ++begin) {
+ auto field = *begin;
+
+ // Skip over bitfields.
+ if (field->isBitField()) {
+ continue;
+ }
+
+ // Compute the offset of the field within the aggregate.
+ CharUnits fieldOffset = aggregateOffset + getOffset(field);
+
+ visitField(field, fieldOffset);
+ }
+}
+
+/// Collect layout information for the given fields into IvarsInfo.
+void IvarLayoutBuilder::visitField(const FieldDecl *field,
+ CharUnits fieldOffset) {
+ QualType fieldType = field->getType();
+
+ // Drill down into arrays.
+ uint64_t numElts = 1;
+ while (auto arrayType = CGM.getContext().getAsConstantArrayType(fieldType)) {
+ numElts *= arrayType->getSize().getZExtValue();
+ fieldType = arrayType->getElementType();
+ }
+
+ assert(!fieldType->isArrayType() && "ivar of non-constant array type?");
+
+ // If we ended up with a zero-sized array, we've done what we can do within
+ // the limits of this layout encoding.
+ if (numElts == 0) return;
+
+ // Recurse if the base element type is a record type.
+ if (auto recType = fieldType->getAs<RecordType>()) {
+ size_t oldEnd = IvarsInfo.size();
+
+ visitRecord(recType, fieldOffset);
+
+ // If we have an array, replicate the first entry's layout information.
+ auto numEltEntries = IvarsInfo.size() - oldEnd;
+ if (numElts != 1 && numEltEntries != 0) {
+ CharUnits eltSize = CGM.getContext().getTypeSizeInChars(recType);
+ for (uint64_t eltIndex = 1; eltIndex != numElts; ++eltIndex) {
+ // Copy the last numEltEntries onto the end of the array, adjusting
+ // each for the element size.
+ for (size_t i = 0; i != numEltEntries; ++i) {
+ auto firstEntry = IvarsInfo[oldEnd + i];
+ IvarsInfo.push_back(IvarInfo(firstEntry.Offset + eltIndex * eltSize,
+ firstEntry.SizeInWords));
+ }
+ }
+ }
+
+ return;
+ }
+
+ // Classify the element type.
+ Qualifiers::GC GCAttr = GetGCAttrTypeForType(CGM.getContext(), fieldType);
+
+ // If it matches what we're looking for, add an entry.
+ if ((ForStrongLayout && GCAttr == Qualifiers::Strong)
+ || (!ForStrongLayout && GCAttr == Qualifiers::Weak)) {
+ assert(CGM.getContext().getTypeSizeInChars(fieldType)
+ == CGM.getPointerSize());
+ IvarsInfo.push_back(IvarInfo(fieldOffset, numElts));
+ }
+}
+
+/// buildBitmap - This routine does the horsework of taking the offsets of
+/// strong/weak references and creating a bitmap. The bitmap is also
+/// returned in the given buffer, suitable for being passed to \c dump().
+llvm::Constant *IvarLayoutBuilder::buildBitmap(CGObjCCommonMac &CGObjC,
+ llvm::SmallVectorImpl<unsigned char> &buffer) {
+ // The bitmap is a series of skip/scan instructions, aligned to word
+ // boundaries. The skip is performed first.
+ const unsigned char MaxNibble = 0xF;
+ const unsigned char SkipMask = 0xF0, SkipShift = 4;
+ const unsigned char ScanMask = 0x0F, ScanShift = 0;
+
+ assert(!IvarsInfo.empty() && "generating bitmap for no data");
+
+ // Sort the ivar info on byte position in case we encounterred a
+ // union nested in the ivar list.
+ if (IsDisordered) {
+ // This isn't a stable sort, but our algorithm should handle it fine.
+ llvm::array_pod_sort(IvarsInfo.begin(), IvarsInfo.end());
+ } else {
+ assert(std::is_sorted(IvarsInfo.begin(), IvarsInfo.end()));
+ }
+ assert(IvarsInfo.back().Offset < InstanceEnd);
+
+ assert(buffer.empty());
+
+ // Skip the next N words.
+ auto skip = [&](unsigned numWords) {
+ assert(numWords > 0);
+
+ // Try to merge into the previous byte. Since scans happen second, we
+ // can't do this if it includes a scan.
+ if (!buffer.empty() && !(buffer.back() & ScanMask)) {
+ unsigned lastSkip = buffer.back() >> SkipShift;
+ if (lastSkip < MaxNibble) {
+ unsigned claimed = std::min(MaxNibble - lastSkip, numWords);
+ numWords -= claimed;
+ lastSkip += claimed;
+ buffer.back() = (lastSkip << SkipShift);
+ }
+ }
+
+ while (numWords >= MaxNibble) {
+ buffer.push_back(MaxNibble << SkipShift);
+ numWords -= MaxNibble;
+ }
+ if (numWords) {
+ buffer.push_back(numWords << SkipShift);
+ }
+ };
+
+ // Scan the next N words.
+ auto scan = [&](unsigned numWords) {
+ assert(numWords > 0);
+
+ // Try to merge into the previous byte. Since scans happen second, we can
+ // do this even if it includes a skip.
+ if (!buffer.empty()) {
+ unsigned lastScan = (buffer.back() & ScanMask) >> ScanShift;
+ if (lastScan < MaxNibble) {
+ unsigned claimed = std::min(MaxNibble - lastScan, numWords);
+ numWords -= claimed;
+ lastScan += claimed;
+ buffer.back() = (buffer.back() & SkipMask) | (lastScan << ScanShift);
+ }
+ }
+
+ while (numWords >= MaxNibble) {
+ buffer.push_back(MaxNibble << ScanShift);
+ numWords -= MaxNibble;
+ }
+ if (numWords) {
+ buffer.push_back(numWords << ScanShift);
+ }
+ };
+
+ // One past the end of the last scan.
+ unsigned endOfLastScanInWords = 0;
+ const CharUnits WordSize = CGM.getPointerSize();
+
+ // Consider all the scan requests.
+ for (auto &request : IvarsInfo) {
+ CharUnits beginOfScan = request.Offset - InstanceBegin;
+
+ // Ignore scan requests that don't start at an even multiple of the
+ // word size. We can't encode them.
+ if ((beginOfScan % WordSize) != 0) continue;
+
+ // Ignore scan requests that start before the instance start.
+ // This assumes that scans never span that boundary. The boundary
+ // isn't the true start of the ivars, because in the fragile-ARC case
+ // it's rounded up to word alignment, but the test above should leave
+ // us ignoring that possibility.
+ if (beginOfScan.isNegative()) {
+ assert(request.Offset + request.SizeInWords * WordSize <= InstanceBegin);
+ continue;
+ }
+
+ unsigned beginOfScanInWords = beginOfScan / WordSize;
+ unsigned endOfScanInWords = beginOfScanInWords + request.SizeInWords;
+
+ // If the scan starts some number of words after the last one ended,
+ // skip forward.
+ if (beginOfScanInWords > endOfLastScanInWords) {
+ skip(beginOfScanInWords - endOfLastScanInWords);
+
+ // Otherwise, start scanning where the last left off.
+ } else {
+ beginOfScanInWords = endOfLastScanInWords;
+
+ // If that leaves us with nothing to scan, ignore this request.
+ if (beginOfScanInWords >= endOfScanInWords) continue;
+ }
+
+ // Scan to the end of the request.
+ assert(beginOfScanInWords < endOfScanInWords);
+ scan(endOfScanInWords - beginOfScanInWords);
+ endOfLastScanInWords = endOfScanInWords;
+ }
+
+ if (buffer.empty())
+ return llvm::ConstantPointerNull::get(CGM.Int8PtrTy);
+
+ // For GC layouts, emit a skip to the end of the allocation so that we
+ // have precise information about the entire thing. This isn't useful
+ // or necessary for the ARC-style layout strings.
+ if (CGM.getLangOpts().getGC() != LangOptions::NonGC) {
+ unsigned lastOffsetInWords =
+ (InstanceEnd - InstanceBegin + WordSize - CharUnits::One()) / WordSize;
+ if (lastOffsetInWords > endOfLastScanInWords) {
+ skip(lastOffsetInWords - endOfLastScanInWords);
+ }
+ }
+
+ // Null terminate the string.
+ buffer.push_back(0);
+
+ bool isNonFragileABI = CGObjC.isNonFragileABI();
+
+ llvm::GlobalVariable *Entry = CGObjC.CreateMetadataVar(
+ "OBJC_CLASS_NAME_",
+ llvm::ConstantDataArray::get(CGM.getLLVMContext(), buffer),
+ (isNonFragileABI ? "__TEXT,__objc_classname,cstring_literals"
+ : "__TEXT,__cstring,cstring_literals"),
+ CharUnits::One(), true);
+ return getConstantGEP(CGM.getLLVMContext(), Entry, 0, 0);
+}
+
+/// BuildIvarLayout - Builds ivar layout bitmap for the class
+/// implementation for the __strong or __weak case.
+/// The layout map displays which words in ivar list must be skipped
+/// and which must be scanned by GC (see below). String is built of bytes.
+/// Each byte is divided up in two nibbles (4-bit each). Left nibble is count
+/// of words to skip and right nibble is count of words to scan. So, each
+/// nibble represents up to 15 workds to skip or scan. Skipping the rest is
+/// represented by a 0x00 byte which also ends the string.
+/// 1. when ForStrongLayout is true, following ivars are scanned:
+/// - id, Class
+/// - object *
+/// - __strong anything
+///
+/// 2. When ForStrongLayout is false, following ivars are scanned:
+/// - __weak anything
+///
+llvm::Constant *
+CGObjCCommonMac::BuildIvarLayout(const ObjCImplementationDecl *OMD,
+ CharUnits beginOffset, CharUnits endOffset,
+ bool ForStrongLayout, bool HasMRCWeakIvars) {
+ // If this is MRC, and we're either building a strong layout or there
+ // are no weak ivars, bail out early.
+ llvm::Type *PtrTy = CGM.Int8PtrTy;
+ if (CGM.getLangOpts().getGC() == LangOptions::NonGC &&
+ !CGM.getLangOpts().ObjCAutoRefCount &&
+ (ForStrongLayout || !HasMRCWeakIvars))
+ return llvm::Constant::getNullValue(PtrTy);
+
+ const ObjCInterfaceDecl *OI = OMD->getClassInterface();
+ SmallVector<const ObjCIvarDecl*, 32> ivars;
+
+ // GC layout strings include the complete object layout, possibly
+ // inaccurately in the non-fragile ABI; the runtime knows how to fix this
+ // up.
+ //
+ // ARC layout strings only include the class's ivars. In non-fragile
+ // runtimes, that means starting at InstanceStart, rounded up to word
+ // alignment. In fragile runtimes, there's no InstanceStart, so it means
+ // starting at the offset of the first ivar, rounded up to word alignment.
+ //
+ // MRC weak layout strings follow the ARC style.
+ CharUnits baseOffset;
+ if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
+ for (const ObjCIvarDecl *IVD = OI->all_declared_ivar_begin();
+ IVD; IVD = IVD->getNextIvar())
+ ivars.push_back(IVD);
+
+ if (isNonFragileABI()) {
+ baseOffset = beginOffset; // InstanceStart
+ } else if (!ivars.empty()) {
+ baseOffset =
+ CharUnits::fromQuantity(ComputeIvarBaseOffset(CGM, OMD, ivars[0]));
+ } else {
+ baseOffset = CharUnits::Zero();
+ }
+
+ baseOffset = baseOffset.RoundUpToAlignment(CGM.getPointerAlign());
+ }
+ else {
+ CGM.getContext().DeepCollectObjCIvars(OI, true, ivars);
+
+ baseOffset = CharUnits::Zero();
+ }
+
+ if (ivars.empty())
+ return llvm::Constant::getNullValue(PtrTy);
+
+ IvarLayoutBuilder builder(CGM, baseOffset, endOffset, ForStrongLayout);
+
+ builder.visitAggregate(ivars.begin(), ivars.end(), CharUnits::Zero(),
+ [&](const ObjCIvarDecl *ivar) -> CharUnits {
+ return CharUnits::fromQuantity(ComputeIvarBaseOffset(CGM, OMD, ivar));
+ });
+
+ if (!builder.hasBitmapData())
+ return llvm::Constant::getNullValue(PtrTy);
+
+ llvm::SmallVector<unsigned char, 4> buffer;
+ llvm::Constant *C = builder.buildBitmap(*this, buffer);
+
+ if (CGM.getLangOpts().ObjCGCBitmapPrint && !buffer.empty()) {
+ printf("\n%s ivar layout for class '%s': ",
+ ForStrongLayout ? "strong" : "weak",
+ OMD->getClassInterface()->getName().str().c_str());
+ builder.dump(buffer);
+ }
+ return C;
+}
+
+llvm::Constant *CGObjCCommonMac::GetMethodVarName(Selector Sel) {
+ llvm::GlobalVariable *&Entry = MethodVarNames[Sel];
+
+ // FIXME: Avoid std::string in "Sel.getAsString()"
+ if (!Entry)
+ Entry = CreateMetadataVar(
+ "OBJC_METH_VAR_NAME_",
+ llvm::ConstantDataArray::getString(VMContext, Sel.getAsString()),
+ ((ObjCABI == 2) ? "__TEXT,__objc_methname,cstring_literals"
+ : "__TEXT,__cstring,cstring_literals"),
+ CharUnits::One(), true);
+
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+// FIXME: Merge into a single cstring creation function.
+llvm::Constant *CGObjCCommonMac::GetMethodVarName(IdentifierInfo *ID) {
+ return GetMethodVarName(CGM.getContext().Selectors.getNullarySelector(ID));
+}
+
+llvm::Constant *CGObjCCommonMac::GetMethodVarType(const FieldDecl *Field) {
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForType(Field->getType(), TypeStr, Field);
+
+ llvm::GlobalVariable *&Entry = MethodVarTypes[TypeStr];
+
+ if (!Entry)
+ Entry = CreateMetadataVar(
+ "OBJC_METH_VAR_TYPE_",
+ llvm::ConstantDataArray::getString(VMContext, TypeStr),
+ ((ObjCABI == 2) ? "__TEXT,__objc_methtype,cstring_literals"
+ : "__TEXT,__cstring,cstring_literals"),
+ CharUnits::One(), true);
+
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+llvm::Constant *CGObjCCommonMac::GetMethodVarType(const ObjCMethodDecl *D,
+ bool Extended) {
+ std::string TypeStr;
+ if (CGM.getContext().getObjCEncodingForMethodDecl(D, TypeStr, Extended))
+ return nullptr;
+
+ llvm::GlobalVariable *&Entry = MethodVarTypes[TypeStr];
+
+ if (!Entry)
+ Entry = CreateMetadataVar(
+ "OBJC_METH_VAR_TYPE_",
+ llvm::ConstantDataArray::getString(VMContext, TypeStr),
+ ((ObjCABI == 2) ? "__TEXT,__objc_methtype,cstring_literals"
+ : "__TEXT,__cstring,cstring_literals"),
+ CharUnits::One(), true);
+
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+// FIXME: Merge into a single cstring creation function.
+llvm::Constant *CGObjCCommonMac::GetPropertyName(IdentifierInfo *Ident) {
+ llvm::GlobalVariable *&Entry = PropertyNames[Ident];
+
+ if (!Entry)
+ Entry = CreateMetadataVar(
+ "OBJC_PROP_NAME_ATTR_",
+ llvm::ConstantDataArray::getString(VMContext, Ident->getName()),
+ "__TEXT,__cstring,cstring_literals", CharUnits::One(), true);
+
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+// FIXME: Merge into a single cstring creation function.
+// FIXME: This Decl should be more precise.
+llvm::Constant *
+CGObjCCommonMac::GetPropertyTypeString(const ObjCPropertyDecl *PD,
+ const Decl *Container) {
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForPropertyDecl(PD, Container, TypeStr);
+ return GetPropertyName(&CGM.getContext().Idents.get(TypeStr));
+}
+
+void CGObjCCommonMac::GetNameForMethod(const ObjCMethodDecl *D,
+ const ObjCContainerDecl *CD,
+ SmallVectorImpl<char> &Name) {
+ llvm::raw_svector_ostream OS(Name);
+ assert (CD && "Missing container decl in GetNameForMethod");
+ OS << '\01' << (D->isInstanceMethod() ? '-' : '+')
+ << '[' << CD->getName();
+ if (const ObjCCategoryImplDecl *CID =
+ dyn_cast<ObjCCategoryImplDecl>(D->getDeclContext()))
+ OS << '(' << *CID << ')';
+ OS << ' ' << D->getSelector().getAsString() << ']';
+}
+
+void CGObjCMac::FinishModule() {
+ EmitModuleInfo();
+
+ // Emit the dummy bodies for any protocols which were referenced but
+ // never defined.
+ for (llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*>::iterator
+ I = Protocols.begin(), e = Protocols.end(); I != e; ++I) {
+ if (I->second->hasInitializer())
+ continue;
+
+ llvm::Constant *Values[5];
+ Values[0] = llvm::Constant::getNullValue(ObjCTypes.ProtocolExtensionPtrTy);
+ Values[1] = GetClassName(I->first->getName());
+ Values[2] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+ Values[3] = Values[4] =
+ llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy);
+ I->second->setInitializer(llvm::ConstantStruct::get(ObjCTypes.ProtocolTy,
+ Values));
+ CGM.addCompilerUsedGlobal(I->second);
+ }
+
+ // Add assembler directives to add lazy undefined symbol references
+ // for classes which are referenced but not defined. This is
+ // important for correct linker interaction.
+ //
+ // FIXME: It would be nice if we had an LLVM construct for this.
+ if (!LazySymbols.empty() || !DefinedSymbols.empty()) {
+ SmallString<256> Asm;
+ Asm += CGM.getModule().getModuleInlineAsm();
+ if (!Asm.empty() && Asm.back() != '\n')
+ Asm += '\n';
+
+ llvm::raw_svector_ostream OS(Asm);
+ for (llvm::SetVector<IdentifierInfo*>::iterator I = DefinedSymbols.begin(),
+ e = DefinedSymbols.end(); I != e; ++I)
+ OS << "\t.objc_class_name_" << (*I)->getName() << "=0\n"
+ << "\t.globl .objc_class_name_" << (*I)->getName() << "\n";
+ for (llvm::SetVector<IdentifierInfo*>::iterator I = LazySymbols.begin(),
+ e = LazySymbols.end(); I != e; ++I) {
+ OS << "\t.lazy_reference .objc_class_name_" << (*I)->getName() << "\n";
+ }
+
+ for (size_t i = 0, e = DefinedCategoryNames.size(); i < e; ++i) {
+ OS << "\t.objc_category_name_" << DefinedCategoryNames[i] << "=0\n"
+ << "\t.globl .objc_category_name_" << DefinedCategoryNames[i] << "\n";
+ }
+
+ CGM.getModule().setModuleInlineAsm(OS.str());
+ }
+}
+
+CGObjCNonFragileABIMac::CGObjCNonFragileABIMac(CodeGen::CodeGenModule &cgm)
+ : CGObjCCommonMac(cgm),
+ ObjCTypes(cgm) {
+ ObjCEmptyCacheVar = ObjCEmptyVtableVar = nullptr;
+ ObjCABI = 2;
+}
+
+/* *** */
+
+ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
+ : VMContext(cgm.getLLVMContext()), CGM(cgm), ExternalProtocolPtrTy(nullptr)
+{
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+
+ ShortTy = Types.ConvertType(Ctx.ShortTy);
+ IntTy = Types.ConvertType(Ctx.IntTy);
+ LongTy = Types.ConvertType(Ctx.LongTy);
+ LongLongTy = Types.ConvertType(Ctx.LongLongTy);
+ Int8PtrTy = CGM.Int8PtrTy;
+ Int8PtrPtrTy = CGM.Int8PtrPtrTy;
+
+ // arm64 targets use "int" ivar offset variables. All others,
+ // including OS X x86_64 and Windows x86_64, use "long" ivar offsets.
+ if (CGM.getTarget().getTriple().getArch() == llvm::Triple::aarch64)
+ IvarOffsetVarTy = IntTy;
+ else
+ IvarOffsetVarTy = LongTy;
+
+ ObjectPtrTy = Types.ConvertType(Ctx.getObjCIdType());
+ PtrObjectPtrTy = llvm::PointerType::getUnqual(ObjectPtrTy);
+ SelectorPtrTy = Types.ConvertType(Ctx.getObjCSelType());
+
+ // I'm not sure I like this. The implicit coordination is a bit
+ // gross. We should solve this in a reasonable fashion because this
+ // is a pretty common task (match some runtime data structure with
+ // an LLVM data structure).
+
+ // FIXME: This is leaked.
+ // FIXME: Merge with rewriter code?
+
+ // struct _objc_super {
+ // id self;
+ // Class cls;
+ // }
+ RecordDecl *RD = RecordDecl::Create(Ctx, TTK_Struct,
+ Ctx.getTranslationUnitDecl(),
+ SourceLocation(), SourceLocation(),
+ &Ctx.Idents.get("_objc_super"));
+ RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(),
+ nullptr, Ctx.getObjCIdType(), nullptr, nullptr,
+ false, ICIS_NoInit));
+ RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(),
+ nullptr, Ctx.getObjCClassType(), nullptr,
+ nullptr, false, ICIS_NoInit));
+ RD->completeDefinition();
+
+ SuperCTy = Ctx.getTagDeclType(RD);
+ SuperPtrCTy = Ctx.getPointerType(SuperCTy);
+
+ SuperTy = cast<llvm::StructType>(Types.ConvertType(SuperCTy));
+ SuperPtrTy = llvm::PointerType::getUnqual(SuperTy);
+
+ // struct _prop_t {
+ // char *name;
+ // char *attributes;
+ // }
+ PropertyTy = llvm::StructType::create("struct._prop_t",
+ Int8PtrTy, Int8PtrTy, nullptr);
+
+ // struct _prop_list_t {
+ // uint32_t entsize; // sizeof(struct _prop_t)
+ // uint32_t count_of_properties;
+ // struct _prop_t prop_list[count_of_properties];
+ // }
+ PropertyListTy =
+ llvm::StructType::create("struct._prop_list_t", IntTy, IntTy,
+ llvm::ArrayType::get(PropertyTy, 0), nullptr);
+ // struct _prop_list_t *
+ PropertyListPtrTy = llvm::PointerType::getUnqual(PropertyListTy);
+
+ // struct _objc_method {
+ // SEL _cmd;
+ // char *method_type;
+ // char *_imp;
+ // }
+ MethodTy = llvm::StructType::create("struct._objc_method",
+ SelectorPtrTy, Int8PtrTy, Int8PtrTy,
+ nullptr);
+
+ // struct _objc_cache *
+ CacheTy = llvm::StructType::create(VMContext, "struct._objc_cache");
+ CachePtrTy = llvm::PointerType::getUnqual(CacheTy);
+
+}
+
+ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
+ : ObjCCommonTypesHelper(cgm) {
+ // struct _objc_method_description {
+ // SEL name;
+ // char *types;
+ // }
+ MethodDescriptionTy =
+ llvm::StructType::create("struct._objc_method_description",
+ SelectorPtrTy, Int8PtrTy, nullptr);
+
+ // struct _objc_method_description_list {
+ // int count;
+ // struct _objc_method_description[1];
+ // }
+ MethodDescriptionListTy = llvm::StructType::create(
+ "struct._objc_method_description_list", IntTy,
+ llvm::ArrayType::get(MethodDescriptionTy, 0), nullptr);
+
+ // struct _objc_method_description_list *
+ MethodDescriptionListPtrTy =
+ llvm::PointerType::getUnqual(MethodDescriptionListTy);
+
+ // Protocol description structures
+
+ // struct _objc_protocol_extension {
+ // uint32_t size; // sizeof(struct _objc_protocol_extension)
+ // struct _objc_method_description_list *optional_instance_methods;
+ // struct _objc_method_description_list *optional_class_methods;
+ // struct _objc_property_list *instance_properties;
+ // const char ** extendedMethodTypes;
+ // }
+ ProtocolExtensionTy =
+ llvm::StructType::create("struct._objc_protocol_extension",
+ IntTy, MethodDescriptionListPtrTy,
+ MethodDescriptionListPtrTy, PropertyListPtrTy,
+ Int8PtrPtrTy, nullptr);
+
+ // struct _objc_protocol_extension *
+ ProtocolExtensionPtrTy = llvm::PointerType::getUnqual(ProtocolExtensionTy);
+
+ // Handle recursive construction of Protocol and ProtocolList types
+
+ ProtocolTy =
+ llvm::StructType::create(VMContext, "struct._objc_protocol");
+
+ ProtocolListTy =
+ llvm::StructType::create(VMContext, "struct._objc_protocol_list");
+ ProtocolListTy->setBody(llvm::PointerType::getUnqual(ProtocolListTy),
+ LongTy,
+ llvm::ArrayType::get(ProtocolTy, 0),
+ nullptr);
+
+ // struct _objc_protocol {
+ // struct _objc_protocol_extension *isa;
+ // char *protocol_name;
+ // struct _objc_protocol **_objc_protocol_list;
+ // struct _objc_method_description_list *instance_methods;
+ // struct _objc_method_description_list *class_methods;
+ // }
+ ProtocolTy->setBody(ProtocolExtensionPtrTy, Int8PtrTy,
+ llvm::PointerType::getUnqual(ProtocolListTy),
+ MethodDescriptionListPtrTy,
+ MethodDescriptionListPtrTy,
+ nullptr);
+
+ // struct _objc_protocol_list *
+ ProtocolListPtrTy = llvm::PointerType::getUnqual(ProtocolListTy);
+
+ ProtocolPtrTy = llvm::PointerType::getUnqual(ProtocolTy);
+
+ // Class description structures
+
+ // struct _objc_ivar {
+ // char *ivar_name;
+ // char *ivar_type;
+ // int ivar_offset;
+ // }
+ IvarTy = llvm::StructType::create("struct._objc_ivar",
+ Int8PtrTy, Int8PtrTy, IntTy, nullptr);
+
+ // struct _objc_ivar_list *
+ IvarListTy =
+ llvm::StructType::create(VMContext, "struct._objc_ivar_list");
+ IvarListPtrTy = llvm::PointerType::getUnqual(IvarListTy);
+
+ // struct _objc_method_list *
+ MethodListTy =
+ llvm::StructType::create(VMContext, "struct._objc_method_list");
+ MethodListPtrTy = llvm::PointerType::getUnqual(MethodListTy);
+
+ // struct _objc_class_extension *
+ ClassExtensionTy =
+ llvm::StructType::create("struct._objc_class_extension",
+ IntTy, Int8PtrTy, PropertyListPtrTy, nullptr);
+ ClassExtensionPtrTy = llvm::PointerType::getUnqual(ClassExtensionTy);
+
+ ClassTy = llvm::StructType::create(VMContext, "struct._objc_class");
+
+ // struct _objc_class {
+ // Class isa;
+ // Class super_class;
+ // char *name;
+ // long version;
+ // long info;
+ // long instance_size;
+ // struct _objc_ivar_list *ivars;
+ // struct _objc_method_list *methods;
+ // struct _objc_cache *cache;
+ // struct _objc_protocol_list *protocols;
+ // char *ivar_layout;
+ // struct _objc_class_ext *ext;
+ // };
+ ClassTy->setBody(llvm::PointerType::getUnqual(ClassTy),
+ llvm::PointerType::getUnqual(ClassTy),
+ Int8PtrTy,
+ LongTy,
+ LongTy,
+ LongTy,
+ IvarListPtrTy,
+ MethodListPtrTy,
+ CachePtrTy,
+ ProtocolListPtrTy,
+ Int8PtrTy,
+ ClassExtensionPtrTy,
+ nullptr);
+
+ ClassPtrTy = llvm::PointerType::getUnqual(ClassTy);
+
+ // struct _objc_category {
+ // char *category_name;
+ // char *class_name;
+ // struct _objc_method_list *instance_method;
+ // struct _objc_method_list *class_method;
+ // uint32_t size; // sizeof(struct _objc_category)
+ // struct _objc_property_list *instance_properties;// category's @property
+ // }
+ CategoryTy =
+ llvm::StructType::create("struct._objc_category",
+ Int8PtrTy, Int8PtrTy, MethodListPtrTy,
+ MethodListPtrTy, ProtocolListPtrTy,
+ IntTy, PropertyListPtrTy, nullptr);
+
+ // Global metadata structures
+
+ // struct _objc_symtab {
+ // long sel_ref_cnt;
+ // SEL *refs;
+ // short cls_def_cnt;
+ // short cat_def_cnt;
+ // char *defs[cls_def_cnt + cat_def_cnt];
+ // }
+ SymtabTy =
+ llvm::StructType::create("struct._objc_symtab",
+ LongTy, SelectorPtrTy, ShortTy, ShortTy,
+ llvm::ArrayType::get(Int8PtrTy, 0), nullptr);
+ SymtabPtrTy = llvm::PointerType::getUnqual(SymtabTy);
+
+ // struct _objc_module {
+ // long version;
+ // long size; // sizeof(struct _objc_module)
+ // char *name;
+ // struct _objc_symtab* symtab;
+ // }
+ ModuleTy =
+ llvm::StructType::create("struct._objc_module",
+ LongTy, LongTy, Int8PtrTy, SymtabPtrTy, nullptr);
+
+
+ // FIXME: This is the size of the setjmp buffer and should be target
+ // specific. 18 is what's used on 32-bit X86.
+ uint64_t SetJmpBufferSize = 18;
+
+ // Exceptions
+ llvm::Type *StackPtrTy = llvm::ArrayType::get(CGM.Int8PtrTy, 4);
+
+ ExceptionDataTy =
+ llvm::StructType::create("struct._objc_exception_data",
+ llvm::ArrayType::get(CGM.Int32Ty,SetJmpBufferSize),
+ StackPtrTy, nullptr);
+
+}
+
+ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModule &cgm)
+ : ObjCCommonTypesHelper(cgm) {
+ // struct _method_list_t {
+ // uint32_t entsize; // sizeof(struct _objc_method)
+ // uint32_t method_count;
+ // struct _objc_method method_list[method_count];
+ // }
+ MethodListnfABITy =
+ llvm::StructType::create("struct.__method_list_t", IntTy, IntTy,
+ llvm::ArrayType::get(MethodTy, 0), nullptr);
+ // struct method_list_t *
+ MethodListnfABIPtrTy = llvm::PointerType::getUnqual(MethodListnfABITy);
+
+ // struct _protocol_t {
+ // id isa; // NULL
+ // const char * const protocol_name;
+ // const struct _protocol_list_t * protocol_list; // super protocols
+ // const struct method_list_t * const instance_methods;
+ // const struct method_list_t * const class_methods;
+ // const struct method_list_t *optionalInstanceMethods;
+ // const struct method_list_t *optionalClassMethods;
+ // const struct _prop_list_t * properties;
+ // const uint32_t size; // sizeof(struct _protocol_t)
+ // const uint32_t flags; // = 0
+ // const char ** extendedMethodTypes;
+ // const char *demangledName;
+ // }
+
+ // Holder for struct _protocol_list_t *
+ ProtocolListnfABITy =
+ llvm::StructType::create(VMContext, "struct._objc_protocol_list");
+
+ ProtocolnfABITy =
+ llvm::StructType::create("struct._protocol_t", ObjectPtrTy, Int8PtrTy,
+ llvm::PointerType::getUnqual(ProtocolListnfABITy),
+ MethodListnfABIPtrTy, MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy, MethodListnfABIPtrTy,
+ PropertyListPtrTy, IntTy, IntTy, Int8PtrPtrTy,
+ Int8PtrTy,
+ nullptr);
+
+ // struct _protocol_t*
+ ProtocolnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolnfABITy);
+
+ // struct _protocol_list_t {
+ // long protocol_count; // Note, this is 32/64 bit
+ // struct _protocol_t *[protocol_count];
+ // }
+ ProtocolListnfABITy->setBody(LongTy,
+ llvm::ArrayType::get(ProtocolnfABIPtrTy, 0),
+ nullptr);
+
+ // struct _objc_protocol_list*
+ ProtocolListnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolListnfABITy);
+
+ // struct _ivar_t {
+ // unsigned [long] int *offset; // pointer to ivar offset location
+ // char *name;
+ // char *type;
+ // uint32_t alignment;
+ // uint32_t size;
+ // }
+ IvarnfABITy = llvm::StructType::create(
+ "struct._ivar_t", llvm::PointerType::getUnqual(IvarOffsetVarTy),
+ Int8PtrTy, Int8PtrTy, IntTy, IntTy, nullptr);
+
+ // struct _ivar_list_t {
+ // uint32 entsize; // sizeof(struct _ivar_t)
+ // uint32 count;
+ // struct _iver_t list[count];
+ // }
+ IvarListnfABITy =
+ llvm::StructType::create("struct._ivar_list_t", IntTy, IntTy,
+ llvm::ArrayType::get(IvarnfABITy, 0), nullptr);
+
+ IvarListnfABIPtrTy = llvm::PointerType::getUnqual(IvarListnfABITy);
+
+ // struct _class_ro_t {
+ // uint32_t const flags;
+ // uint32_t const instanceStart;
+ // uint32_t const instanceSize;
+ // uint32_t const reserved; // only when building for 64bit targets
+ // const uint8_t * const ivarLayout;
+ // const char *const name;
+ // const struct _method_list_t * const baseMethods;
+ // const struct _objc_protocol_list *const baseProtocols;
+ // const struct _ivar_list_t *const ivars;
+ // const uint8_t * const weakIvarLayout;
+ // const struct _prop_list_t * const properties;
+ // }
+
+ // FIXME. Add 'reserved' field in 64bit abi mode!
+ ClassRonfABITy = llvm::StructType::create("struct._class_ro_t",
+ IntTy, IntTy, IntTy, Int8PtrTy,
+ Int8PtrTy, MethodListnfABIPtrTy,
+ ProtocolListnfABIPtrTy,
+ IvarListnfABIPtrTy,
+ Int8PtrTy, PropertyListPtrTy,
+ nullptr);
+
+ // ImpnfABITy - LLVM for id (*)(id, SEL, ...)
+ llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
+ ImpnfABITy = llvm::FunctionType::get(ObjectPtrTy, params, false)
+ ->getPointerTo();
+
+ // struct _class_t {
+ // struct _class_t *isa;
+ // struct _class_t * const superclass;
+ // void *cache;
+ // IMP *vtable;
+ // struct class_ro_t *ro;
+ // }
+
+ ClassnfABITy = llvm::StructType::create(VMContext, "struct._class_t");
+ ClassnfABITy->setBody(llvm::PointerType::getUnqual(ClassnfABITy),
+ llvm::PointerType::getUnqual(ClassnfABITy),
+ CachePtrTy,
+ llvm::PointerType::getUnqual(ImpnfABITy),
+ llvm::PointerType::getUnqual(ClassRonfABITy),
+ nullptr);
+
+ // LLVM for struct _class_t *
+ ClassnfABIPtrTy = llvm::PointerType::getUnqual(ClassnfABITy);
+
+ // struct _category_t {
+ // const char * const name;
+ // struct _class_t *const cls;
+ // const struct _method_list_t * const instance_methods;
+ // const struct _method_list_t * const class_methods;
+ // const struct _protocol_list_t * const protocols;
+ // const struct _prop_list_t * const properties;
+ // }
+ CategorynfABITy = llvm::StructType::create("struct._category_t",
+ Int8PtrTy, ClassnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ ProtocolListnfABIPtrTy,
+ PropertyListPtrTy,
+ nullptr);
+
+ // New types for nonfragile abi messaging.
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+
+ // MessageRefTy - LLVM for:
+ // struct _message_ref_t {
+ // IMP messenger;
+ // SEL name;
+ // };
+
+ // First the clang type for struct _message_ref_t
+ RecordDecl *RD = RecordDecl::Create(Ctx, TTK_Struct,
+ Ctx.getTranslationUnitDecl(),
+ SourceLocation(), SourceLocation(),
+ &Ctx.Idents.get("_message_ref_t"));
+ RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(),
+ nullptr, Ctx.VoidPtrTy, nullptr, nullptr, false,
+ ICIS_NoInit));
+ RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(),
+ nullptr, Ctx.getObjCSelType(), nullptr, nullptr,
+ false, ICIS_NoInit));
+ RD->completeDefinition();
+
+ MessageRefCTy = Ctx.getTagDeclType(RD);
+ MessageRefCPtrTy = Ctx.getPointerType(MessageRefCTy);
+ MessageRefTy = cast<llvm::StructType>(Types.ConvertType(MessageRefCTy));
+
+ // MessageRefPtrTy - LLVM for struct _message_ref_t*
+ MessageRefPtrTy = llvm::PointerType::getUnqual(MessageRefTy);
+
+ // SuperMessageRefTy - LLVM for:
+ // struct _super_message_ref_t {
+ // SUPER_IMP messenger;
+ // SEL name;
+ // };
+ SuperMessageRefTy =
+ llvm::StructType::create("struct._super_message_ref_t",
+ ImpnfABITy, SelectorPtrTy, nullptr);
+
+ // SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t*
+ SuperMessageRefPtrTy = llvm::PointerType::getUnqual(SuperMessageRefTy);
+
+
+ // struct objc_typeinfo {
+ // const void** vtable; // objc_ehtype_vtable + 2
+ // const char* name; // c++ typeinfo string
+ // Class cls;
+ // };
+ EHTypeTy =
+ llvm::StructType::create("struct._objc_typeinfo",
+ llvm::PointerType::getUnqual(Int8PtrTy),
+ Int8PtrTy, ClassnfABIPtrTy, nullptr);
+ EHTypePtrTy = llvm::PointerType::getUnqual(EHTypeTy);
+}
+
+llvm::Function *CGObjCNonFragileABIMac::ModuleInitFunction() {
+ FinishNonFragileABIModule();
+
+ return nullptr;
+}
+
+void CGObjCNonFragileABIMac::
+AddModuleClassList(ArrayRef<llvm::GlobalValue*> Container,
+ const char *SymbolName,
+ const char *SectionName) {
+ unsigned NumClasses = Container.size();
+
+ if (!NumClasses)
+ return;
+
+ SmallVector<llvm::Constant*, 8> Symbols(NumClasses);
+ for (unsigned i=0; i<NumClasses; i++)
+ Symbols[i] = llvm::ConstantExpr::getBitCast(Container[i],
+ ObjCTypes.Int8PtrTy);
+ llvm::Constant *Init =
+ llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.Int8PtrTy,
+ Symbols.size()),
+ Symbols);
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
+ llvm::GlobalValue::PrivateLinkage,
+ Init,
+ SymbolName);
+ GV->setAlignment(CGM.getDataLayout().getABITypeAlignment(Init->getType()));
+ GV->setSection(SectionName);
+ CGM.addCompilerUsedGlobal(GV);
+}
+
+void CGObjCNonFragileABIMac::FinishNonFragileABIModule() {
+ // nonfragile abi has no module definition.
+
+ // Build list of all implemented class addresses in array
+ // L_OBJC_LABEL_CLASS_$.
+
+ for (unsigned i=0, NumClasses=ImplementedClasses.size(); i<NumClasses; i++) {
+ const ObjCInterfaceDecl *ID = ImplementedClasses[i];
+ assert(ID);
+ if (ObjCImplementationDecl *IMP = ID->getImplementation())
+ // We are implementing a weak imported interface. Give it external linkage
+ if (ID->isWeakImported() && !IMP->isWeakImported()) {
+ DefinedClasses[i]->setLinkage(llvm::GlobalVariable::ExternalLinkage);
+ DefinedMetaClasses[i]->setLinkage(llvm::GlobalVariable::ExternalLinkage);
+ }
+ }
+
+ AddModuleClassList(DefinedClasses, "OBJC_LABEL_CLASS_$",
+ "__DATA, __objc_classlist, regular, no_dead_strip");
+
+ AddModuleClassList(DefinedNonLazyClasses, "OBJC_LABEL_NONLAZY_CLASS_$",
+ "__DATA, __objc_nlclslist, regular, no_dead_strip");
+
+ // Build list of all implemented category addresses in array
+ // L_OBJC_LABEL_CATEGORY_$.
+ AddModuleClassList(DefinedCategories, "OBJC_LABEL_CATEGORY_$",
+ "__DATA, __objc_catlist, regular, no_dead_strip");
+ AddModuleClassList(DefinedNonLazyCategories, "OBJC_LABEL_NONLAZY_CATEGORY_$",
+ "__DATA, __objc_nlcatlist, regular, no_dead_strip");
+
+ EmitImageInfo();
+}
+
+/// isVTableDispatchedSelector - Returns true if SEL is not in the list of
+/// VTableDispatchMethods; false otherwise. What this means is that
+/// except for the 19 selectors in the list, we generate 32bit-style
+/// message dispatch call for all the rest.
+bool CGObjCNonFragileABIMac::isVTableDispatchedSelector(Selector Sel) {
+ // At various points we've experimented with using vtable-based
+ // dispatch for all methods.
+ switch (CGM.getCodeGenOpts().getObjCDispatchMethod()) {
+ case CodeGenOptions::Legacy:
+ return false;
+ case CodeGenOptions::NonLegacy:
+ return true;
+ case CodeGenOptions::Mixed:
+ break;
+ }
+
+ // If so, see whether this selector is in the white-list of things which must
+ // use the new dispatch convention. We lazily build a dense set for this.
+ if (VTableDispatchMethods.empty()) {
+ VTableDispatchMethods.insert(GetNullarySelector("alloc"));
+ VTableDispatchMethods.insert(GetNullarySelector("class"));
+ VTableDispatchMethods.insert(GetNullarySelector("self"));
+ VTableDispatchMethods.insert(GetNullarySelector("isFlipped"));
+ VTableDispatchMethods.insert(GetNullarySelector("length"));
+ VTableDispatchMethods.insert(GetNullarySelector("count"));
+
+ // These are vtable-based if GC is disabled.
+ // Optimistically use vtable dispatch for hybrid compiles.
+ if (CGM.getLangOpts().getGC() != LangOptions::GCOnly) {
+ VTableDispatchMethods.insert(GetNullarySelector("retain"));
+ VTableDispatchMethods.insert(GetNullarySelector("release"));
+ VTableDispatchMethods.insert(GetNullarySelector("autorelease"));
+ }
+
+ VTableDispatchMethods.insert(GetUnarySelector("allocWithZone"));
+ VTableDispatchMethods.insert(GetUnarySelector("isKindOfClass"));
+ VTableDispatchMethods.insert(GetUnarySelector("respondsToSelector"));
+ VTableDispatchMethods.insert(GetUnarySelector("objectForKey"));
+ VTableDispatchMethods.insert(GetUnarySelector("objectAtIndex"));
+ VTableDispatchMethods.insert(GetUnarySelector("isEqualToString"));
+ VTableDispatchMethods.insert(GetUnarySelector("isEqual"));
+
+ // These are vtable-based if GC is enabled.
+ // Optimistically use vtable dispatch for hybrid compiles.
+ if (CGM.getLangOpts().getGC() != LangOptions::NonGC) {
+ VTableDispatchMethods.insert(GetNullarySelector("hash"));
+ VTableDispatchMethods.insert(GetUnarySelector("addObject"));
+
+ // "countByEnumeratingWithState:objects:count"
+ IdentifierInfo *KeyIdents[] = {
+ &CGM.getContext().Idents.get("countByEnumeratingWithState"),
+ &CGM.getContext().Idents.get("objects"),
+ &CGM.getContext().Idents.get("count")
+ };
+ VTableDispatchMethods.insert(
+ CGM.getContext().Selectors.getSelector(3, KeyIdents));
+ }
+ }
+
+ return VTableDispatchMethods.count(Sel);
+}
+
+/// BuildClassRoTInitializer - generate meta-data for:
+/// struct _class_ro_t {
+/// uint32_t const flags;
+/// uint32_t const instanceStart;
+/// uint32_t const instanceSize;
+/// uint32_t const reserved; // only when building for 64bit targets
+/// const uint8_t * const ivarLayout;
+/// const char *const name;
+/// const struct _method_list_t * const baseMethods;
+/// const struct _protocol_list_t *const baseProtocols;
+/// const struct _ivar_list_t *const ivars;
+/// const uint8_t * const weakIvarLayout;
+/// const struct _prop_list_t * const properties;
+/// }
+///
+llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
+ unsigned flags,
+ unsigned InstanceStart,
+ unsigned InstanceSize,
+ const ObjCImplementationDecl *ID) {
+ std::string ClassName = ID->getObjCRuntimeNameAsString();
+ llvm::Constant *Values[10]; // 11 for 64bit targets!
+
+ CharUnits beginInstance = CharUnits::fromQuantity(InstanceStart);
+ CharUnits endInstance = CharUnits::fromQuantity(InstanceSize);
+
+ bool hasMRCWeak = false;
+ if (CGM.getLangOpts().ObjCAutoRefCount)
+ flags |= NonFragileABI_Class_CompiledByARC;
+ else if ((hasMRCWeak = hasMRCWeakIvars(CGM, ID)))
+ flags |= NonFragileABI_Class_HasMRCWeakIvars;
+
+ Values[ 0] = llvm::ConstantInt::get(ObjCTypes.IntTy, flags);
+ Values[ 1] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceStart);
+ Values[ 2] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceSize);
+ // FIXME. For 64bit targets add 0 here.
+ Values[ 3] = (flags & NonFragileABI_Class_Meta)
+ ? GetIvarLayoutName(nullptr, ObjCTypes)
+ : BuildStrongIvarLayout(ID, beginInstance, endInstance);
+ Values[ 4] = GetClassName(ID->getObjCRuntimeNameAsString());
+ // const struct _method_list_t * const baseMethods;
+ std::vector<llvm::Constant*> Methods;
+ std::string MethodListName("\01l_OBJC_$_");
+ if (flags & NonFragileABI_Class_Meta) {
+ MethodListName += "CLASS_METHODS_";
+ MethodListName += ID->getObjCRuntimeNameAsString();
+ for (const auto *I : ID->class_methods())
+ // Class methods should always be defined.
+ Methods.push_back(GetMethodConstant(I));
+ } else {
+ MethodListName += "INSTANCE_METHODS_";
+ MethodListName += ID->getObjCRuntimeNameAsString();
+ for (const auto *I : ID->instance_methods())
+ // Instance methods should always be defined.
+ Methods.push_back(GetMethodConstant(I));
+
+ for (const auto *PID : ID->property_impls()) {
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize){
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+
+ if (ObjCMethodDecl *MD = PD->getGetterMethodDecl())
+ if (llvm::Constant *C = GetMethodConstant(MD))
+ Methods.push_back(C);
+ if (ObjCMethodDecl *MD = PD->getSetterMethodDecl())
+ if (llvm::Constant *C = GetMethodConstant(MD))
+ Methods.push_back(C);
+ }
+ }
+ }
+ Values[ 5] = EmitMethodList(MethodListName,
+ "__DATA, __objc_const", Methods);
+
+ const ObjCInterfaceDecl *OID = ID->getClassInterface();
+ assert(OID && "CGObjCNonFragileABIMac::BuildClassRoTInitializer");
+ Values[ 6] = EmitProtocolList("\01l_OBJC_CLASS_PROTOCOLS_$_"
+ + OID->getObjCRuntimeNameAsString(),
+ OID->all_referenced_protocol_begin(),
+ OID->all_referenced_protocol_end());
+
+ if (flags & NonFragileABI_Class_Meta) {
+ Values[ 7] = llvm::Constant::getNullValue(ObjCTypes.IvarListnfABIPtrTy);
+ Values[ 8] = GetIvarLayoutName(nullptr, ObjCTypes);
+ Values[ 9] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+ } else {
+ Values[ 7] = EmitIvarList(ID);
+ Values[ 8] = BuildWeakIvarLayout(ID, beginInstance, endInstance,
+ hasMRCWeak);
+ Values[ 9] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ID->getObjCRuntimeNameAsString(),
+ ID, ID->getClassInterface(), ObjCTypes);
+ }
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassRonfABITy,
+ Values);
+ llvm::GlobalVariable *CLASS_RO_GV =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassRonfABITy, false,
+ llvm::GlobalValue::PrivateLinkage,
+ Init,
+ (flags & NonFragileABI_Class_Meta) ?
+ std::string("\01l_OBJC_METACLASS_RO_$_")+ClassName :
+ std::string("\01l_OBJC_CLASS_RO_$_")+ClassName);
+ CLASS_RO_GV->setAlignment(
+ CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ClassRonfABITy));
+ CLASS_RO_GV->setSection("__DATA, __objc_const");
+ return CLASS_RO_GV;
+
+}
+
+/// BuildClassMetaData - This routine defines that to-level meta-data
+/// for the given ClassName for:
+/// struct _class_t {
+/// struct _class_t *isa;
+/// struct _class_t * const superclass;
+/// void *cache;
+/// IMP *vtable;
+/// struct class_ro_t *ro;
+/// }
+///
+llvm::GlobalVariable *CGObjCNonFragileABIMac::BuildClassMetaData(
+ const std::string &ClassName, llvm::Constant *IsAGV, llvm::Constant *SuperClassGV,
+ llvm::Constant *ClassRoGV, bool HiddenVisibility, bool Weak) {
+ llvm::Constant *Values[] = {
+ IsAGV,
+ SuperClassGV,
+ ObjCEmptyCacheVar, // &ObjCEmptyCacheVar
+ ObjCEmptyVtableVar, // &ObjCEmptyVtableVar
+ ClassRoGV // &CLASS_RO_GV
+ };
+ if (!Values[1])
+ Values[1] = llvm::Constant::getNullValue(ObjCTypes.ClassnfABIPtrTy);
+ if (!Values[3])
+ Values[3] = llvm::Constant::getNullValue(
+ llvm::PointerType::getUnqual(ObjCTypes.ImpnfABITy));
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassnfABITy,
+ Values);
+ llvm::GlobalVariable *GV = GetClassGlobal(ClassName, Weak);
+ GV->setInitializer(Init);
+ GV->setSection("__DATA, __objc_data");
+ GV->setAlignment(
+ CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ClassnfABITy));
+ if (HiddenVisibility)
+ GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ return GV;
+}
+
+bool
+CGObjCNonFragileABIMac::ImplementationIsNonLazy(const ObjCImplDecl *OD) const {
+ return OD->getClassMethod(GetNullarySelector("load")) != nullptr;
+}
+
+void CGObjCNonFragileABIMac::GetClassSizeInfo(const ObjCImplementationDecl *OID,
+ uint32_t &InstanceStart,
+ uint32_t &InstanceSize) {
+ const ASTRecordLayout &RL =
+ CGM.getContext().getASTObjCImplementationLayout(OID);
+
+ // InstanceSize is really instance end.
+ InstanceSize = RL.getDataSize().getQuantity();
+
+ // If there are no fields, the start is the same as the end.
+ if (!RL.getFieldCount())
+ InstanceStart = InstanceSize;
+ else
+ InstanceStart = RL.getFieldOffset(0) / CGM.getContext().getCharWidth();
+}
+
+void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
+ std::string ClassName = ID->getObjCRuntimeNameAsString();
+ if (!ObjCEmptyCacheVar) {
+ ObjCEmptyCacheVar = new llvm::GlobalVariable(
+ CGM.getModule(),
+ ObjCTypes.CacheTy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ nullptr,
+ "_objc_empty_cache");
+
+ // Make this entry NULL for any iOS device target, any iOS simulator target,
+ // OS X with deployment target 10.9 or later.
+ const llvm::Triple &Triple = CGM.getTarget().getTriple();
+ if (Triple.isiOS() || Triple.isWatchOS() ||
+ (Triple.isMacOSX() && !Triple.isMacOSXVersionLT(10, 9)))
+ // This entry will be null.
+ ObjCEmptyVtableVar = nullptr;
+ else
+ ObjCEmptyVtableVar = new llvm::GlobalVariable(
+ CGM.getModule(),
+ ObjCTypes.ImpnfABITy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ nullptr,
+ "_objc_empty_vtable");
+ }
+ assert(ID->getClassInterface() &&
+ "CGObjCNonFragileABIMac::GenerateClass - class is 0");
+ // FIXME: Is this correct (that meta class size is never computed)?
+ uint32_t InstanceStart =
+ CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ClassnfABITy);
+ uint32_t InstanceSize = InstanceStart;
+ uint32_t flags = NonFragileABI_Class_Meta;
+ llvm::SmallString<64> ObjCMetaClassName(getMetaclassSymbolPrefix());
+ llvm::SmallString<64> ObjCClassName(getClassSymbolPrefix());
+ llvm::SmallString<64> TClassName;
+
+ llvm::GlobalVariable *SuperClassGV, *IsAGV;
+
+ // Build the flags for the metaclass.
+ bool classIsHidden =
+ ID->getClassInterface()->getVisibility() == HiddenVisibility;
+ if (classIsHidden)
+ flags |= NonFragileABI_Class_Hidden;
+
+ // FIXME: why is this flag set on the metaclass?
+ // ObjC metaclasses have no fields and don't really get constructed.
+ if (ID->hasNonZeroConstructors() || ID->hasDestructors()) {
+ flags |= NonFragileABI_Class_HasCXXStructors;
+ if (!ID->hasNonZeroConstructors())
+ flags |= NonFragileABI_Class_HasCXXDestructorOnly;
+ }
+
+ if (!ID->getClassInterface()->getSuperClass()) {
+ // class is root
+ flags |= NonFragileABI_Class_Root;
+ TClassName = ObjCClassName;
+ TClassName += ClassName;
+ SuperClassGV = GetClassGlobal(TClassName.str(),
+ ID->getClassInterface()->isWeakImported());
+ TClassName = ObjCMetaClassName;
+ TClassName += ClassName;
+ IsAGV = GetClassGlobal(TClassName.str(),
+ ID->getClassInterface()->isWeakImported());
+ } else {
+ // Has a root. Current class is not a root.
+ const ObjCInterfaceDecl *Root = ID->getClassInterface();
+ while (const ObjCInterfaceDecl *Super = Root->getSuperClass())
+ Root = Super;
+ TClassName = ObjCMetaClassName ;
+ TClassName += Root->getObjCRuntimeNameAsString();
+ IsAGV = GetClassGlobal(TClassName.str(),
+ Root->isWeakImported());
+
+ // work on super class metadata symbol.
+ TClassName = ObjCMetaClassName;
+ TClassName += ID->getClassInterface()->getSuperClass()->getObjCRuntimeNameAsString();
+ SuperClassGV = GetClassGlobal(
+ TClassName.str(),
+ ID->getClassInterface()->getSuperClass()->isWeakImported());
+ }
+ llvm::GlobalVariable *CLASS_RO_GV = BuildClassRoTInitializer(flags,
+ InstanceStart,
+ InstanceSize,ID);
+ TClassName = ObjCMetaClassName;
+ TClassName += ClassName;
+ llvm::GlobalVariable *MetaTClass = BuildClassMetaData(
+ TClassName.str(), IsAGV, SuperClassGV, CLASS_RO_GV, classIsHidden,
+ ID->getClassInterface()->isWeakImported());
+ DefinedMetaClasses.push_back(MetaTClass);
+
+ // Metadata for the class
+ flags = 0;
+ if (classIsHidden)
+ flags |= NonFragileABI_Class_Hidden;
+
+ if (ID->hasNonZeroConstructors() || ID->hasDestructors()) {
+ flags |= NonFragileABI_Class_HasCXXStructors;
+
+ // Set a flag to enable a runtime optimization when a class has
+ // fields that require destruction but which don't require
+ // anything except zero-initialization during construction. This
+ // is most notably true of __strong and __weak types, but you can
+ // also imagine there being C++ types with non-trivial default
+ // constructors that merely set all fields to null.
+ if (!ID->hasNonZeroConstructors())
+ flags |= NonFragileABI_Class_HasCXXDestructorOnly;
+ }
+
+ if (hasObjCExceptionAttribute(CGM.getContext(), ID->getClassInterface()))
+ flags |= NonFragileABI_Class_Exception;
+
+ if (!ID->getClassInterface()->getSuperClass()) {
+ flags |= NonFragileABI_Class_Root;
+ SuperClassGV = nullptr;
+ } else {
+ // Has a root. Current class is not a root.
+ TClassName = ObjCClassName;
+ TClassName += ID->getClassInterface()->getSuperClass()->getObjCRuntimeNameAsString();
+ SuperClassGV = GetClassGlobal(
+ TClassName.str(),
+ ID->getClassInterface()->getSuperClass()->isWeakImported());
+ }
+ GetClassSizeInfo(ID, InstanceStart, InstanceSize);
+ CLASS_RO_GV = BuildClassRoTInitializer(flags,
+ InstanceStart,
+ InstanceSize,
+ ID);
+
+ TClassName = ObjCClassName;
+ TClassName += ClassName;
+ llvm::GlobalVariable *ClassMD =
+ BuildClassMetaData(TClassName.str(), MetaTClass, SuperClassGV, CLASS_RO_GV,
+ classIsHidden,
+ ID->getClassInterface()->isWeakImported());
+ DefinedClasses.push_back(ClassMD);
+ ImplementedClasses.push_back(ID->getClassInterface());
+
+ // Determine if this class is also "non-lazy".
+ if (ImplementationIsNonLazy(ID))
+ DefinedNonLazyClasses.push_back(ClassMD);
+
+ // Force the definition of the EHType if necessary.
+ if (flags & NonFragileABI_Class_Exception)
+ GetInterfaceEHType(ID->getClassInterface(), true);
+ // Make sure method definition entries are all clear for next implementation.
+ MethodDefinitions.clear();
+}
+
+/// GenerateProtocolRef - This routine is called to generate code for
+/// a protocol reference expression; as in:
+/// @code
+/// @protocol(Proto1);
+/// @endcode
+/// It generates a weak reference to l_OBJC_PROTOCOL_REFERENCE_$_Proto1
+/// which will hold address of the protocol meta-data.
+///
+llvm::Value *CGObjCNonFragileABIMac::GenerateProtocolRef(CodeGenFunction &CGF,
+ const ObjCProtocolDecl *PD) {
+
+ // This routine is called for @protocol only. So, we must build definition
+ // of protocol's meta-data (not a reference to it!)
+ //
+ llvm::Constant *Init =
+ llvm::ConstantExpr::getBitCast(GetOrEmitProtocol(PD),
+ ObjCTypes.getExternalProtocolPtrTy());
+
+ std::string ProtocolName("\01l_OBJC_PROTOCOL_REFERENCE_$_");
+ ProtocolName += PD->getObjCRuntimeNameAsString();
+
+ CharUnits Align = CGF.getPointerAlign();
+
+ llvm::GlobalVariable *PTGV = CGM.getModule().getGlobalVariable(ProtocolName);
+ if (PTGV)
+ return CGF.Builder.CreateAlignedLoad(PTGV, Align);
+ PTGV = new llvm::GlobalVariable(
+ CGM.getModule(),
+ Init->getType(), false,
+ llvm::GlobalValue::WeakAnyLinkage,
+ Init,
+ ProtocolName);
+ PTGV->setSection("__DATA, __objc_protorefs, coalesced, no_dead_strip");
+ PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ PTGV->setAlignment(Align.getQuantity());
+ CGM.addCompilerUsedGlobal(PTGV);
+ return CGF.Builder.CreateAlignedLoad(PTGV, Align);
+}
+
+/// GenerateCategory - Build metadata for a category implementation.
+/// struct _category_t {
+/// const char * const name;
+/// struct _class_t *const cls;
+/// const struct _method_list_t * const instance_methods;
+/// const struct _method_list_t * const class_methods;
+/// const struct _protocol_list_t * const protocols;
+/// const struct _prop_list_t * const properties;
+/// }
+///
+void CGObjCNonFragileABIMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
+ const ObjCInterfaceDecl *Interface = OCD->getClassInterface();
+ const char *Prefix = "\01l_OBJC_$_CATEGORY_";
+
+ llvm::SmallString<64> ExtCatName(Prefix);
+ ExtCatName += Interface->getObjCRuntimeNameAsString();
+ ExtCatName += "_$_";
+ ExtCatName += OCD->getNameAsString();
+
+ llvm::SmallString<64> ExtClassName(getClassSymbolPrefix());
+ ExtClassName += Interface->getObjCRuntimeNameAsString();
+
+ llvm::Constant *Values[6];
+ Values[0] = GetClassName(OCD->getIdentifier()->getName());
+ // meta-class entry symbol
+ llvm::GlobalVariable *ClassGV =
+ GetClassGlobal(ExtClassName.str(), Interface->isWeakImported());
+
+ Values[1] = ClassGV;
+ std::vector<llvm::Constant*> Methods;
+ llvm::SmallString<64> MethodListName(Prefix);
+
+ MethodListName += "INSTANCE_METHODS_";
+ MethodListName += Interface->getObjCRuntimeNameAsString();
+ MethodListName += "_$_";
+ MethodListName += OCD->getName();
+
+ for (const auto *I : OCD->instance_methods())
+ // Instance methods should always be defined.
+ Methods.push_back(GetMethodConstant(I));
+
+ Values[2] = EmitMethodList(MethodListName.str(),
+ "__DATA, __objc_const",
+ Methods);
+
+ MethodListName = Prefix;
+ MethodListName += "CLASS_METHODS_";
+ MethodListName += Interface->getObjCRuntimeNameAsString();
+ MethodListName += "_$_";
+ MethodListName += OCD->getNameAsString();
+
+ Methods.clear();
+ for (const auto *I : OCD->class_methods())
+ // Class methods should always be defined.
+ Methods.push_back(GetMethodConstant(I));
+
+ Values[3] = EmitMethodList(MethodListName.str(),
+ "__DATA, __objc_const",
+ Methods);
+ const ObjCCategoryDecl *Category =
+ Interface->FindCategoryDeclaration(OCD->getIdentifier());
+ if (Category) {
+ SmallString<256> ExtName;
+ llvm::raw_svector_ostream(ExtName) << Interface->getObjCRuntimeNameAsString() << "_$_"
+ << OCD->getName();
+ Values[4] = EmitProtocolList("\01l_OBJC_CATEGORY_PROTOCOLS_$_"
+ + Interface->getObjCRuntimeNameAsString() + "_$_"
+ + Category->getName(),
+ Category->protocol_begin(),
+ Category->protocol_end());
+ Values[5] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ExtName.str(),
+ OCD, Category, ObjCTypes);
+ } else {
+ Values[4] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListnfABIPtrTy);
+ Values[5] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+ }
+
+ llvm::Constant *Init =
+ llvm::ConstantStruct::get(ObjCTypes.CategorynfABITy,
+ Values);
+ llvm::GlobalVariable *GCATV
+ = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.CategorynfABITy,
+ false,
+ llvm::GlobalValue::PrivateLinkage,
+ Init,
+ ExtCatName.str());
+ GCATV->setAlignment(
+ CGM.getDataLayout().getABITypeAlignment(ObjCTypes.CategorynfABITy));
+ GCATV->setSection("__DATA, __objc_const");
+ CGM.addCompilerUsedGlobal(GCATV);
+ DefinedCategories.push_back(GCATV);
+
+ // Determine if this category is also "non-lazy".
+ if (ImplementationIsNonLazy(OCD))
+ DefinedNonLazyCategories.push_back(GCATV);
+ // method definition entries must be clear for next implementation.
+ MethodDefinitions.clear();
+}
+
+/// GetMethodConstant - Return a struct objc_method constant for the
+/// given method if it has been defined. The result is null if the
+/// method has not been defined. The return value has type MethodPtrTy.
+llvm::Constant *CGObjCNonFragileABIMac::GetMethodConstant(
+ const ObjCMethodDecl *MD) {
+ llvm::Function *Fn = GetMethodDefinition(MD);
+ if (!Fn)
+ return nullptr;
+
+ llvm::Constant *Method[] = {
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy),
+ GetMethodVarType(MD),
+ llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy)
+ };
+ return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Method);
+}
+
+/// EmitMethodList - Build meta-data for method declarations
+/// struct _method_list_t {
+/// uint32_t entsize; // sizeof(struct _objc_method)
+/// uint32_t method_count;
+/// struct _objc_method method_list[method_count];
+/// }
+///
+llvm::Constant *
+CGObjCNonFragileABIMac::EmitMethodList(Twine Name,
+ const char *Section,
+ ArrayRef<llvm::Constant*> Methods) {
+ // Return null for empty list.
+ if (Methods.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.MethodListnfABIPtrTy);
+
+ llvm::Constant *Values[3];
+ // sizeof(struct _objc_method)
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.MethodTy);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ // method_count
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodTy,
+ Methods.size());
+ Values[2] = llvm::ConstantArray::get(AT, Methods);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
+ llvm::GlobalValue::PrivateLinkage, Init, Name);
+ GV->setAlignment(CGM.getDataLayout().getABITypeAlignment(Init->getType()));
+ GV->setSection(Section);
+ CGM.addCompilerUsedGlobal(GV);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.MethodListnfABIPtrTy);
+}
+
+/// ObjCIvarOffsetVariable - Returns the ivar offset variable for
+/// the given ivar.
+llvm::GlobalVariable *
+CGObjCNonFragileABIMac::ObjCIvarOffsetVariable(const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar) {
+
+ const ObjCInterfaceDecl *Container = Ivar->getContainingInterface();
+ llvm::SmallString<64> Name("OBJC_IVAR_$_");
+ Name += Container->getObjCRuntimeNameAsString();
+ Name += ".";
+ Name += Ivar->getName();
+ llvm::GlobalVariable *IvarOffsetGV =
+ CGM.getModule().getGlobalVariable(Name);
+ if (!IvarOffsetGV)
+ IvarOffsetGV = new llvm::GlobalVariable(
+ CGM.getModule(), ObjCTypes.IvarOffsetVarTy, false,
+ llvm::GlobalValue::ExternalLinkage, nullptr, Name.str());
+ return IvarOffsetGV;
+}
+
+llvm::Constant *
+CGObjCNonFragileABIMac::EmitIvarOffsetVar(const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar,
+ unsigned long int Offset) {
+ llvm::GlobalVariable *IvarOffsetGV = ObjCIvarOffsetVariable(ID, Ivar);
+ IvarOffsetGV->setInitializer(
+ llvm::ConstantInt::get(ObjCTypes.IvarOffsetVarTy, Offset));
+ IvarOffsetGV->setAlignment(
+ CGM.getDataLayout().getABITypeAlignment(ObjCTypes.IvarOffsetVarTy));
+
+ // FIXME: This matches gcc, but shouldn't the visibility be set on the use as
+ // well (i.e., in ObjCIvarOffsetVariable).
+ if (Ivar->getAccessControl() == ObjCIvarDecl::Private ||
+ Ivar->getAccessControl() == ObjCIvarDecl::Package ||
+ ID->getVisibility() == HiddenVisibility)
+ IvarOffsetGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ else
+ IvarOffsetGV->setVisibility(llvm::GlobalValue::DefaultVisibility);
+ IvarOffsetGV->setSection("__DATA, __objc_ivar");
+ return IvarOffsetGV;
+}
+
+/// EmitIvarList - Emit the ivar list for the given
+/// implementation. The return value has type
+/// IvarListnfABIPtrTy.
+/// struct _ivar_t {
+/// unsigned [long] int *offset; // pointer to ivar offset location
+/// char *name;
+/// char *type;
+/// uint32_t alignment;
+/// uint32_t size;
+/// }
+/// struct _ivar_list_t {
+/// uint32 entsize; // sizeof(struct _ivar_t)
+/// uint32 count;
+/// struct _iver_t list[count];
+/// }
+///
+
+llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
+ const ObjCImplementationDecl *ID) {
+
+ std::vector<llvm::Constant*> Ivars;
+
+ const ObjCInterfaceDecl *OID = ID->getClassInterface();
+ assert(OID && "CGObjCNonFragileABIMac::EmitIvarList - null interface");
+
+ // FIXME. Consolidate this with similar code in GenerateClass.
+
+ for (const ObjCIvarDecl *IVD = OID->all_declared_ivar_begin();
+ IVD; IVD = IVD->getNextIvar()) {
+ // Ignore unnamed bit-fields.
+ if (!IVD->getDeclName())
+ continue;
+ llvm::Constant *Ivar[5];
+ Ivar[0] = EmitIvarOffsetVar(ID->getClassInterface(), IVD,
+ ComputeIvarBaseOffset(CGM, ID, IVD));
+ Ivar[1] = GetMethodVarName(IVD->getIdentifier());
+ Ivar[2] = GetMethodVarType(IVD);
+ llvm::Type *FieldTy =
+ CGM.getTypes().ConvertTypeForMem(IVD->getType());
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(FieldTy);
+ unsigned Align = CGM.getContext().getPreferredTypeAlign(
+ IVD->getType().getTypePtr()) >> 3;
+ Align = llvm::Log2_32(Align);
+ Ivar[3] = llvm::ConstantInt::get(ObjCTypes.IntTy, Align);
+ // NOTE. Size of a bitfield does not match gcc's, because of the
+ // way bitfields are treated special in each. But I am told that
+ // 'size' for bitfield ivars is ignored by the runtime so it does
+ // not matter. If it matters, there is enough info to get the
+ // bitfield right!
+ Ivar[4] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Ivars.push_back(llvm::ConstantStruct::get(ObjCTypes.IvarnfABITy, Ivar));
+ }
+ // Return null for empty list.
+ if (Ivars.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.IvarListnfABIPtrTy);
+
+ llvm::Constant *Values[3];
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.IvarnfABITy);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Ivars.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.IvarnfABITy,
+ Ivars.size());
+ Values[2] = llvm::ConstantArray::get(AT, Ivars);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+ const char *Prefix = "\01l_OBJC_$_INSTANCE_VARIABLES_";
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
+ llvm::GlobalValue::PrivateLinkage,
+ Init,
+ Prefix + OID->getObjCRuntimeNameAsString());
+ GV->setAlignment(
+ CGM.getDataLayout().getABITypeAlignment(Init->getType()));
+ GV->setSection("__DATA, __objc_const");
+
+ CGM.addCompilerUsedGlobal(GV);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.IvarListnfABIPtrTy);
+}
+
+llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocolRef(
+ const ObjCProtocolDecl *PD) {
+ llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+
+ if (!Entry) {
+ // We use the initializer as a marker of whether this is a forward
+ // reference or not. At module finalization we add the empty
+ // contents for protocols which were referenced but never defined.
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABITy,
+ false, llvm::GlobalValue::ExternalLinkage,
+ nullptr,
+ "\01l_OBJC_PROTOCOL_$_" + PD->getObjCRuntimeNameAsString());
+ Entry->setSection("__DATA,__datacoal_nt,coalesced");
+ }
+
+ return Entry;
+}
+
+/// GetOrEmitProtocol - Generate the protocol meta-data:
+/// @code
+/// struct _protocol_t {
+/// id isa; // NULL
+/// const char * const protocol_name;
+/// const struct _protocol_list_t * protocol_list; // super protocols
+/// const struct method_list_t * const instance_methods;
+/// const struct method_list_t * const class_methods;
+/// const struct method_list_t *optionalInstanceMethods;
+/// const struct method_list_t *optionalClassMethods;
+/// const struct _prop_list_t * properties;
+/// const uint32_t size; // sizeof(struct _protocol_t)
+/// const uint32_t flags; // = 0
+/// const char ** extendedMethodTypes;
+/// const char *demangledName;
+/// }
+/// @endcode
+///
+
+llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
+ const ObjCProtocolDecl *PD) {
+ llvm::GlobalVariable *Entry = Protocols[PD->getIdentifier()];
+
+ // Early exit if a defining object has already been generated.
+ if (Entry && Entry->hasInitializer())
+ return Entry;
+
+ // Use the protocol definition, if there is one.
+ if (const ObjCProtocolDecl *Def = PD->getDefinition())
+ PD = Def;
+
+ // Construct method lists.
+ std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+ std::vector<llvm::Constant*> OptInstanceMethods, OptClassMethods;
+ std::vector<llvm::Constant*> MethodTypesExt, OptMethodTypesExt;
+ for (const auto *MD : PD->instance_methods()) {
+ llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (!C)
+ return GetOrEmitProtocolRef(PD);
+
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptInstanceMethods.push_back(C);
+ OptMethodTypesExt.push_back(GetMethodVarType(MD, true));
+ } else {
+ InstanceMethods.push_back(C);
+ MethodTypesExt.push_back(GetMethodVarType(MD, true));
+ }
+ }
+
+ for (const auto *MD : PD->class_methods()) {
+ llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (!C)
+ return GetOrEmitProtocolRef(PD);
+
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptClassMethods.push_back(C);
+ OptMethodTypesExt.push_back(GetMethodVarType(MD, true));
+ } else {
+ ClassMethods.push_back(C);
+ MethodTypesExt.push_back(GetMethodVarType(MD, true));
+ }
+ }
+
+ MethodTypesExt.insert(MethodTypesExt.end(),
+ OptMethodTypesExt.begin(), OptMethodTypesExt.end());
+
+ llvm::Constant *Values[12];
+ // isa is NULL
+ Values[0] = llvm::Constant::getNullValue(ObjCTypes.ObjectPtrTy);
+ Values[1] = GetClassName(PD->getObjCRuntimeNameAsString());
+ Values[2] = EmitProtocolList("\01l_OBJC_$_PROTOCOL_REFS_" + PD->getObjCRuntimeNameAsString(),
+ PD->protocol_begin(),
+ PD->protocol_end());
+
+ Values[3] = EmitMethodList("\01l_OBJC_$_PROTOCOL_INSTANCE_METHODS_"
+ + PD->getObjCRuntimeNameAsString(),
+ "__DATA, __objc_const",
+ InstanceMethods);
+ Values[4] = EmitMethodList("\01l_OBJC_$_PROTOCOL_CLASS_METHODS_"
+ + PD->getObjCRuntimeNameAsString(),
+ "__DATA, __objc_const",
+ ClassMethods);
+ Values[5] = EmitMethodList("\01l_OBJC_$_PROTOCOL_INSTANCE_METHODS_OPT_"
+ + PD->getObjCRuntimeNameAsString(),
+ "__DATA, __objc_const",
+ OptInstanceMethods);
+ Values[6] = EmitMethodList("\01l_OBJC_$_PROTOCOL_CLASS_METHODS_OPT_"
+ + PD->getObjCRuntimeNameAsString(),
+ "__DATA, __objc_const",
+ OptClassMethods);
+ Values[7] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + PD->getObjCRuntimeNameAsString(),
+ nullptr, PD, ObjCTypes);
+ uint32_t Size =
+ CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ProtocolnfABITy);
+ Values[8] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Values[9] = llvm::Constant::getNullValue(ObjCTypes.IntTy);
+ Values[10] = EmitProtocolMethodTypes("\01l_OBJC_$_PROTOCOL_METHOD_TYPES_"
+ + PD->getObjCRuntimeNameAsString(),
+ MethodTypesExt, ObjCTypes);
+ // const char *demangledName;
+ Values[11] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolnfABITy,
+ Values);
+
+ if (Entry) {
+ // Already created, fix the linkage and update the initializer.
+ Entry->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
+ Entry->setInitializer(Init);
+ } else {
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABITy,
+ false, llvm::GlobalValue::WeakAnyLinkage, Init,
+ "\01l_OBJC_PROTOCOL_$_" + PD->getObjCRuntimeNameAsString());
+ Entry->setAlignment(
+ CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ProtocolnfABITy));
+ Entry->setSection("__DATA,__datacoal_nt,coalesced");
+
+ Protocols[PD->getIdentifier()] = Entry;
+ }
+ Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ CGM.addCompilerUsedGlobal(Entry);
+
+ // Use this protocol meta-data to build protocol list table in section
+ // __DATA, __objc_protolist
+ llvm::GlobalVariable *PTGV =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABIPtrTy,
+ false, llvm::GlobalValue::WeakAnyLinkage, Entry,
+ "\01l_OBJC_LABEL_PROTOCOL_$_" + PD->getObjCRuntimeNameAsString());
+ PTGV->setAlignment(
+ CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ProtocolnfABIPtrTy));
+ PTGV->setSection("__DATA, __objc_protolist, coalesced, no_dead_strip");
+ PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ CGM.addCompilerUsedGlobal(PTGV);
+ return Entry;
+}
+
+/// EmitProtocolList - Generate protocol list meta-data:
+/// @code
+/// struct _protocol_list_t {
+/// long protocol_count; // Note, this is 32/64 bit
+/// struct _protocol_t[protocol_count];
+/// }
+/// @endcode
+///
+llvm::Constant *
+CGObjCNonFragileABIMac::EmitProtocolList(Twine Name,
+ ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end) {
+ SmallVector<llvm::Constant *, 16> ProtocolRefs;
+
+ // Just return null for empty protocol lists
+ if (begin == end)
+ return llvm::Constant::getNullValue(ObjCTypes.ProtocolListnfABIPtrTy);
+
+ // FIXME: We shouldn't need to do this lookup here, should we?
+ SmallString<256> TmpName;
+ Name.toVector(TmpName);
+ llvm::GlobalVariable *GV =
+ CGM.getModule().getGlobalVariable(TmpName.str(), true);
+ if (GV)
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.ProtocolListnfABIPtrTy);
+
+ for (; begin != end; ++begin)
+ ProtocolRefs.push_back(GetProtocolRef(*begin)); // Implemented???
+
+ // This list is null terminated.
+ ProtocolRefs.push_back(llvm::Constant::getNullValue(
+ ObjCTypes.ProtocolnfABIPtrTy));
+
+ llvm::Constant *Values[2];
+ Values[0] =
+ llvm::ConstantInt::get(ObjCTypes.LongTy, ProtocolRefs.size() - 1);
+ Values[1] =
+ llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.ProtocolnfABIPtrTy,
+ ProtocolRefs.size()),
+ ProtocolRefs);
+
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+ GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
+ llvm::GlobalValue::PrivateLinkage,
+ Init, Name);
+ GV->setSection("__DATA, __objc_const");
+ GV->setAlignment(
+ CGM.getDataLayout().getABITypeAlignment(Init->getType()));
+ CGM.addCompilerUsedGlobal(GV);
+ return llvm::ConstantExpr::getBitCast(GV,
+ ObjCTypes.ProtocolListnfABIPtrTy);
+}
+
+/// GetMethodDescriptionConstant - This routine build following meta-data:
+/// struct _objc_method {
+/// SEL _cmd;
+/// char *method_type;
+/// char *_imp;
+/// }
+
+llvm::Constant *
+CGObjCNonFragileABIMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
+ llvm::Constant *Desc[3];
+ Desc[0] =
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy);
+ Desc[1] = GetMethodVarType(MD);
+ if (!Desc[1])
+ return nullptr;
+
+ // Protocol methods have no implementation. So, this entry is always NULL.
+ Desc[2] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+ return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Desc);
+}
+
+/// EmitObjCValueForIvar - Code Gen for nonfragile ivar reference.
+/// This code gen. amounts to generating code for:
+/// @code
+/// (type *)((char *)base + _OBJC_IVAR_$_.ivar;
+/// @encode
+///
+LValue CGObjCNonFragileABIMac::EmitObjCValueForIvar(
+ CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) {
+ ObjCInterfaceDecl *ID = ObjectTy->getAs<ObjCObjectType>()->getInterface();
+ llvm::Value *Offset = EmitIvarOffset(CGF, ID, Ivar);
+ return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
+ Offset);
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitIvarOffset(
+ CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ llvm::Value *IvarOffsetValue = ObjCIvarOffsetVariable(Interface, Ivar);
+ IvarOffsetValue = CGF.Builder.CreateAlignedLoad(IvarOffsetValue,
+ CGF.getSizeAlign(), "ivar");
+ if (IsIvarOffsetKnownIdempotent(CGF, Ivar))
+ cast<llvm::LoadInst>(IvarOffsetValue)
+ ->setMetadata(CGM.getModule().getMDKindID("invariant.load"),
+ llvm::MDNode::get(VMContext, None));
+
+ // This could be 32bit int or 64bit integer depending on the architecture.
+ // Cast it to 64bit integer value, if it is a 32bit integer ivar offset value
+ // as this is what caller always expectes.
+ if (ObjCTypes.IvarOffsetVarTy == ObjCTypes.IntTy)
+ IvarOffsetValue = CGF.Builder.CreateIntCast(
+ IvarOffsetValue, ObjCTypes.LongTy, true, "ivar.conv");
+ return IvarOffsetValue;
+}
+
+static void appendSelectorForMessageRefTable(std::string &buffer,
+ Selector selector) {
+ if (selector.isUnarySelector()) {
+ buffer += selector.getNameForSlot(0);
+ return;
+ }
+
+ for (unsigned i = 0, e = selector.getNumArgs(); i != e; ++i) {
+ buffer += selector.getNameForSlot(i);
+ buffer += '_';
+ }
+}
+
+/// Emit a "v-table" message send. We emit a weak hidden-visibility
+/// struct, initially containing the selector pointer and a pointer to
+/// a "fixup" variant of the appropriate objc_msgSend. To call, we
+/// load and call the function pointer, passing the address of the
+/// struct as the second parameter. The runtime determines whether
+/// the selector is currently emitted using vtable dispatch; if so, it
+/// substitutes a stub function which simply tail-calls through the
+/// appropriate vtable slot, and if not, it substitues a stub function
+/// which tail-calls objc_msgSend. Both stubs adjust the selector
+/// argument to correctly point to the selector.
+RValue
+CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
+ ReturnValueSlot returnSlot,
+ QualType resultType,
+ Selector selector,
+ llvm::Value *arg0,
+ QualType arg0Type,
+ bool isSuper,
+ const CallArgList &formalArgs,
+ const ObjCMethodDecl *method) {
+ // Compute the actual arguments.
+ CallArgList args;
+
+ // First argument: the receiver / super-call structure.
+ if (!isSuper)
+ arg0 = CGF.Builder.CreateBitCast(arg0, ObjCTypes.ObjectPtrTy);
+ args.add(RValue::get(arg0), arg0Type);
+
+ // Second argument: a pointer to the message ref structure. Leave
+ // the actual argument value blank for now.
+ args.add(RValue::get(nullptr), ObjCTypes.MessageRefCPtrTy);
+
+ args.insert(args.end(), formalArgs.begin(), formalArgs.end());
+
+ MessageSendInfo MSI = getMessageSendInfo(method, resultType, args);
+
+ NullReturnState nullReturn;
+
+ // Find the function to call and the mangled name for the message
+ // ref structure. Using a different mangled name wouldn't actually
+ // be a problem; it would just be a waste.
+ //
+ // The runtime currently never uses vtable dispatch for anything
+ // except normal, non-super message-sends.
+ // FIXME: don't use this for that.
+ llvm::Constant *fn = nullptr;
+ std::string messageRefName("\01l_");
+ if (CGM.ReturnSlotInterferesWithArgs(MSI.CallInfo)) {
+ if (isSuper) {
+ fn = ObjCTypes.getMessageSendSuper2StretFixupFn();
+ messageRefName += "objc_msgSendSuper2_stret_fixup";
+ } else {
+ nullReturn.init(CGF, arg0);
+ fn = ObjCTypes.getMessageSendStretFixupFn();
+ messageRefName += "objc_msgSend_stret_fixup";
+ }
+ } else if (!isSuper && CGM.ReturnTypeUsesFPRet(resultType)) {
+ fn = ObjCTypes.getMessageSendFpretFixupFn();
+ messageRefName += "objc_msgSend_fpret_fixup";
+ } else {
+ if (isSuper) {
+ fn = ObjCTypes.getMessageSendSuper2FixupFn();
+ messageRefName += "objc_msgSendSuper2_fixup";
+ } else {
+ fn = ObjCTypes.getMessageSendFixupFn();
+ messageRefName += "objc_msgSend_fixup";
+ }
+ }
+ assert(fn && "CGObjCNonFragileABIMac::EmitMessageSend");
+ messageRefName += '_';
+
+ // Append the selector name, except use underscores anywhere we
+ // would have used colons.
+ appendSelectorForMessageRefTable(messageRefName, selector);
+
+ llvm::GlobalVariable *messageRef
+ = CGM.getModule().getGlobalVariable(messageRefName);
+ if (!messageRef) {
+ // Build the message ref structure.
+ llvm::Constant *values[] = { fn, GetMethodVarName(selector) };
+ llvm::Constant *init = llvm::ConstantStruct::getAnon(values);
+ messageRef = new llvm::GlobalVariable(CGM.getModule(),
+ init->getType(),
+ /*constant*/ false,
+ llvm::GlobalValue::WeakAnyLinkage,
+ init,
+ messageRefName);
+ messageRef->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ messageRef->setAlignment(16);
+ messageRef->setSection("__DATA, __objc_msgrefs, coalesced");
+ }
+
+ bool requiresnullCheck = false;
+ if (CGM.getLangOpts().ObjCAutoRefCount && method)
+ for (const auto *ParamDecl : method->params()) {
+ if (ParamDecl->hasAttr<NSConsumedAttr>()) {
+ if (!nullReturn.NullBB)
+ nullReturn.init(CGF, arg0);
+ requiresnullCheck = true;
+ break;
+ }
+ }
+
+ Address mref =
+ Address(CGF.Builder.CreateBitCast(messageRef, ObjCTypes.MessageRefPtrTy),
+ CGF.getPointerAlign());
+
+ // Update the message ref argument.
+ args[1].RV = RValue::get(mref.getPointer());
+
+ // Load the function to call from the message ref table.
+ Address calleeAddr =
+ CGF.Builder.CreateStructGEP(mref, 0, CharUnits::Zero());
+ llvm::Value *callee = CGF.Builder.CreateLoad(calleeAddr, "msgSend_fn");
+
+ callee = CGF.Builder.CreateBitCast(callee, MSI.MessengerType);
+
+ RValue result = CGF.EmitCall(MSI.CallInfo, callee, returnSlot, args);
+ return nullReturn.complete(CGF, result, resultType, formalArgs,
+ requiresnullCheck ? method : nullptr);
+}
+
+/// Generate code for a message send expression in the nonfragile abi.
+CodeGen::RValue
+CGObjCNonFragileABIMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method) {
+ return isVTableDispatchedSelector(Sel)
+ ? EmitVTableMessageSend(CGF, Return, ResultType, Sel,
+ Receiver, CGF.getContext().getObjCIdType(),
+ false, CallArgs, Method)
+ : EmitMessageSend(CGF, Return, ResultType,
+ EmitSelector(CGF, Sel),
+ Receiver, CGF.getContext().getObjCIdType(),
+ false, CallArgs, Method, Class, ObjCTypes);
+}
+
+llvm::GlobalVariable *
+CGObjCNonFragileABIMac::GetClassGlobal(const std::string &Name, bool Weak) {
+ llvm::GlobalValue::LinkageTypes L =
+ Weak ? llvm::GlobalValue::ExternalWeakLinkage
+ : llvm::GlobalValue::ExternalLinkage;
+
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+
+ if (!GV)
+ GV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABITy,
+ false, L, nullptr, Name);
+
+ assert(GV->getLinkage() == L);
+ return GV;
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitClassRefFromId(CodeGenFunction &CGF,
+ IdentifierInfo *II,
+ bool Weak,
+ const ObjCInterfaceDecl *ID) {
+ CharUnits Align = CGF.getPointerAlign();
+ llvm::GlobalVariable *&Entry = ClassReferences[II];
+
+ if (!Entry) {
+ std::string ClassName(
+ getClassSymbolPrefix() +
+ (ID ? ID->getObjCRuntimeNameAsString() : II->getName()).str());
+ llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName, Weak);
+ Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
+ false, llvm::GlobalValue::PrivateLinkage,
+ ClassGV, "OBJC_CLASSLIST_REFERENCES_$_");
+ Entry->setAlignment(Align.getQuantity());
+ Entry->setSection("__DATA, __objc_classrefs, regular, no_dead_strip");
+ CGM.addCompilerUsedGlobal(Entry);
+ }
+ return CGF.Builder.CreateAlignedLoad(Entry, Align);
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitClassRef(CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *ID) {
+ return EmitClassRefFromId(CGF, ID->getIdentifier(), ID->isWeakImported(), ID);
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitNSAutoreleasePoolClassRef(
+ CodeGenFunction &CGF) {
+ IdentifierInfo *II = &CGM.getContext().Idents.get("NSAutoreleasePool");
+ return EmitClassRefFromId(CGF, II, false, nullptr);
+}
+
+llvm::Value *
+CGObjCNonFragileABIMac::EmitSuperClassRef(CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *ID) {
+ CharUnits Align = CGF.getPointerAlign();
+ llvm::GlobalVariable *&Entry = SuperClassReferences[ID->getIdentifier()];
+
+ if (!Entry) {
+ llvm::SmallString<64> ClassName(getClassSymbolPrefix());
+ ClassName += ID->getObjCRuntimeNameAsString();
+ llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName.str(),
+ ID->isWeakImported());
+ Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
+ false, llvm::GlobalValue::PrivateLinkage,
+ ClassGV, "OBJC_CLASSLIST_SUP_REFS_$_");
+ Entry->setAlignment(Align.getQuantity());
+ Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
+ CGM.addCompilerUsedGlobal(Entry);
+ }
+ return CGF.Builder.CreateAlignedLoad(Entry, Align);
+}
+
+/// EmitMetaClassRef - Return a Value * of the address of _class_t
+/// meta-data
+///
+llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *ID,
+ bool Weak) {
+ CharUnits Align = CGF.getPointerAlign();
+ llvm::GlobalVariable * &Entry = MetaClassReferences[ID->getIdentifier()];
+ if (!Entry) {
+ llvm::SmallString<64> MetaClassName(getMetaclassSymbolPrefix());
+ MetaClassName += ID->getObjCRuntimeNameAsString();
+ llvm::GlobalVariable *MetaClassGV =
+ GetClassGlobal(MetaClassName.str(), Weak);
+
+ Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
+ false, llvm::GlobalValue::PrivateLinkage,
+ MetaClassGV, "OBJC_CLASSLIST_SUP_REFS_$_");
+ Entry->setAlignment(Align.getQuantity());
+
+ Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
+ CGM.addCompilerUsedGlobal(Entry);
+ }
+
+ return CGF.Builder.CreateAlignedLoad(Entry, Align);
+}
+
+/// GetClass - Return a reference to the class for the given interface
+/// decl.
+llvm::Value *CGObjCNonFragileABIMac::GetClass(CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *ID) {
+ if (ID->isWeakImported()) {
+ llvm::SmallString<64> ClassName(getClassSymbolPrefix());
+ ClassName += ID->getObjCRuntimeNameAsString();
+ llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName.str(), true);
+ (void)ClassGV;
+ assert(ClassGV->hasExternalWeakLinkage());
+ }
+
+ return EmitClassRef(CGF, ID);
+}
+
+/// Generates a message send where the super is the receiver. This is
+/// a message send to self with special delivery semantics indicating
+/// which class's method should be called.
+CodeGen::RValue
+CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CodeGen::CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) {
+ // ...
+ // Create and init a super structure; this is a (receiver, class)
+ // pair we will pass to objc_msgSendSuper.
+ Address ObjCSuper =
+ CGF.CreateTempAlloca(ObjCTypes.SuperTy, CGF.getPointerAlign(),
+ "objc_super");
+
+ llvm::Value *ReceiverAsObject =
+ CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy);
+ CGF.Builder.CreateStore(
+ ReceiverAsObject,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 0, CharUnits::Zero()));
+
+ // If this is a class message the metaclass is passed as the target.
+ llvm::Value *Target;
+ if (IsClassMessage)
+ Target = EmitMetaClassRef(CGF, Class, Class->isWeakImported());
+ else
+ Target = EmitSuperClassRef(CGF, Class);
+
+ // FIXME: We shouldn't need to do this cast, rectify the ASTContext and
+ // ObjCTypes types.
+ llvm::Type *ClassTy =
+ CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType());
+ Target = CGF.Builder.CreateBitCast(Target, ClassTy);
+ CGF.Builder.CreateStore(
+ Target, CGF.Builder.CreateStructGEP(ObjCSuper, 1, CGF.getPointerSize()));
+
+ return (isVTableDispatchedSelector(Sel))
+ ? EmitVTableMessageSend(CGF, Return, ResultType, Sel,
+ ObjCSuper.getPointer(), ObjCTypes.SuperPtrCTy,
+ true, CallArgs, Method)
+ : EmitMessageSend(CGF, Return, ResultType,
+ EmitSelector(CGF, Sel),
+ ObjCSuper.getPointer(), ObjCTypes.SuperPtrCTy,
+ true, CallArgs, Method, Class, ObjCTypes);
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CodeGenFunction &CGF,
+ Selector Sel) {
+ Address Addr = EmitSelectorAddr(CGF, Sel);
+
+ llvm::LoadInst* LI = CGF.Builder.CreateLoad(Addr);
+ LI->setMetadata(CGM.getModule().getMDKindID("invariant.load"),
+ llvm::MDNode::get(VMContext, None));
+ return LI;
+}
+
+Address CGObjCNonFragileABIMac::EmitSelectorAddr(CodeGenFunction &CGF,
+ Selector Sel) {
+ llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
+
+ CharUnits Align = CGF.getPointerAlign();
+ if (!Entry) {
+ llvm::Constant *Casted =
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(Sel),
+ ObjCTypes.SelectorPtrTy);
+ Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.SelectorPtrTy,
+ false, llvm::GlobalValue::PrivateLinkage,
+ Casted, "OBJC_SELECTOR_REFERENCES_");
+ Entry->setExternallyInitialized(true);
+ Entry->setSection("__DATA, __objc_selrefs, literal_pointers, no_dead_strip");
+ Entry->setAlignment(Align.getQuantity());
+ CGM.addCompilerUsedGlobal(Entry);
+ }
+
+ return Address(Entry, Align);
+}
+
+/// EmitObjCIvarAssign - Code gen for assigning to a __strong object.
+/// objc_assign_ivar (id src, id *dst, ptrdiff_t)
+///
+void CGObjCNonFragileABIMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src,
+ Address dst,
+ llvm::Value *ivarOffset) {
+ llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *args[] = { src, dst.getPointer(), ivarOffset };
+ CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignIvarFn(), args);
+}
+
+/// EmitObjCStrongCastAssign - Code gen for assigning to a __strong cast object.
+/// objc_assign_strongCast (id src, id *dst)
+///
+void CGObjCNonFragileABIMac::EmitObjCStrongCastAssign(
+ CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, Address dst) {
+ llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *args[] = { src, dst.getPointer() };
+ CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignStrongCastFn(),
+ args, "weakassign");
+}
+
+void CGObjCNonFragileABIMac::EmitGCMemmoveCollectable(
+ CodeGen::CodeGenFunction &CGF,
+ Address DestPtr,
+ Address SrcPtr,
+ llvm::Value *Size) {
+ SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, ObjCTypes.Int8PtrTy);
+ DestPtr = CGF.Builder.CreateBitCast(DestPtr, ObjCTypes.Int8PtrTy);
+ llvm::Value *args[] = { DestPtr.getPointer(), SrcPtr.getPointer(), Size };
+ CGF.EmitNounwindRuntimeCall(ObjCTypes.GcMemmoveCollectableFn(), args);
+}
+
+/// EmitObjCWeakRead - Code gen for loading value of a __weak
+/// object: objc_read_weak (id *src)
+///
+llvm::Value * CGObjCNonFragileABIMac::EmitObjCWeakRead(
+ CodeGen::CodeGenFunction &CGF,
+ Address AddrWeakObj) {
+ llvm::Type *DestTy = AddrWeakObj.getElementType();
+ AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj, ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *read_weak =
+ CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcReadWeakFn(),
+ AddrWeakObj.getPointer(), "weakread");
+ read_weak = CGF.Builder.CreateBitCast(read_weak, DestTy);
+ return read_weak;
+}
+
+/// EmitObjCWeakAssign - Code gen for assigning to a __weak object.
+/// objc_assign_weak (id src, id *dst)
+///
+void CGObjCNonFragileABIMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, Address dst) {
+ llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *args[] = { src, dst.getPointer() };
+ CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignWeakFn(),
+ args, "weakassign");
+}
+
+/// EmitObjCGlobalAssign - Code gen for assigning to a __strong object.
+/// objc_assign_global (id src, id *dst)
+///
+void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, Address dst,
+ bool threadlocal) {
+ llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *args[] = { src, dst.getPointer() };
+ if (!threadlocal)
+ CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignGlobalFn(),
+ args, "globalassign");
+ else
+ CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignThreadLocalFn(),
+ args, "threadlocalassign");
+}
+
+void
+CGObjCNonFragileABIMac::EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) {
+ EmitAtSynchronizedStmt(CGF, S,
+ cast<llvm::Function>(ObjCTypes.getSyncEnterFn()),
+ cast<llvm::Function>(ObjCTypes.getSyncExitFn()));
+}
+
+llvm::Constant *
+CGObjCNonFragileABIMac::GetEHType(QualType T) {
+ // There's a particular fixed type info for 'id'.
+ if (T->isObjCIdType() ||
+ T->isObjCQualifiedIdType()) {
+ llvm::Constant *IDEHType =
+ CGM.getModule().getGlobalVariable("OBJC_EHTYPE_id");
+ if (!IDEHType)
+ IDEHType =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.EHTypeTy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ nullptr, "OBJC_EHTYPE_id");
+ return IDEHType;
+ }
+
+ // All other types should be Objective-C interface pointer types.
+ const ObjCObjectPointerType *PT =
+ T->getAs<ObjCObjectPointerType>();
+ assert(PT && "Invalid @catch type.");
+ const ObjCInterfaceType *IT = PT->getInterfaceType();
+ assert(IT && "Invalid @catch type.");
+ return GetInterfaceEHType(IT->getDecl(), false);
+}
+
+void CGObjCNonFragileABIMac::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S) {
+ EmitTryCatchStmt(CGF, S,
+ cast<llvm::Function>(ObjCTypes.getObjCBeginCatchFn()),
+ cast<llvm::Function>(ObjCTypes.getObjCEndCatchFn()),
+ cast<llvm::Function>(ObjCTypes.getExceptionRethrowFn()));
+}
+
+/// EmitThrowStmt - Generate code for a throw statement.
+void CGObjCNonFragileABIMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S,
+ bool ClearInsertionPoint) {
+ if (const Expr *ThrowExpr = S.getThrowExpr()) {
+ llvm::Value *Exception = CGF.EmitObjCThrowOperand(ThrowExpr);
+ Exception = CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy);
+ CGF.EmitRuntimeCallOrInvoke(ObjCTypes.getExceptionThrowFn(), Exception)
+ .setDoesNotReturn();
+ } else {
+ CGF.EmitRuntimeCallOrInvoke(ObjCTypes.getExceptionRethrowFn())
+ .setDoesNotReturn();
+ }
+
+ CGF.Builder.CreateUnreachable();
+ if (ClearInsertionPoint)
+ CGF.Builder.ClearInsertionPoint();
+}
+
+llvm::Constant *
+CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
+ bool ForDefinition) {
+ llvm::GlobalVariable * &Entry = EHTypeReferences[ID->getIdentifier()];
+
+ // If we don't need a definition, return the entry if found or check
+ // if we use an external reference.
+ if (!ForDefinition) {
+ if (Entry)
+ return Entry;
+
+ // If this type (or a super class) has the __objc_exception__
+ // attribute, emit an external reference.
+ if (hasObjCExceptionAttribute(CGM.getContext(), ID))
+ return Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.EHTypeTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ nullptr,
+ ("OBJC_EHTYPE_$_" +
+ ID->getObjCRuntimeNameAsString()));
+ }
+
+ // Otherwise we need to either make a new entry or fill in the
+ // initializer.
+ assert((!Entry || !Entry->hasInitializer()) && "Duplicate EHType definition");
+ llvm::SmallString<64> ClassName(getClassSymbolPrefix());
+ ClassName += ID->getObjCRuntimeNameAsString();
+ std::string VTableName = "objc_ehtype_vtable";
+ llvm::GlobalVariable *VTableGV =
+ CGM.getModule().getGlobalVariable(VTableName);
+ if (!VTableGV)
+ VTableGV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.Int8PtrTy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ nullptr, VTableName);
+
+ llvm::Value *VTableIdx = llvm::ConstantInt::get(CGM.Int32Ty, 2);
+
+ llvm::Constant *Values[] = {
+ llvm::ConstantExpr::getGetElementPtr(VTableGV->getValueType(), VTableGV,
+ VTableIdx),
+ GetClassName(ID->getObjCRuntimeNameAsString()),
+ GetClassGlobal(ClassName.str())};
+ llvm::Constant *Init =
+ llvm::ConstantStruct::get(ObjCTypes.EHTypeTy, Values);
+
+ llvm::GlobalValue::LinkageTypes L = ForDefinition
+ ? llvm::GlobalValue::ExternalLinkage
+ : llvm::GlobalValue::WeakAnyLinkage;
+ if (Entry) {
+ Entry->setInitializer(Init);
+ } else {
+ llvm::SmallString<64> EHTYPEName("OBJC_EHTYPE_$_");
+ EHTYPEName += ID->getObjCRuntimeNameAsString();
+ Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.EHTypeTy, false,
+ L,
+ Init,
+ EHTYPEName.str());
+ }
+ assert(Entry->getLinkage() == L);
+
+ if (ID->getVisibility() == HiddenVisibility)
+ Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ Entry->setAlignment(CGM.getDataLayout().getABITypeAlignment(
+ ObjCTypes.EHTypeTy));
+
+ if (ForDefinition)
+ Entry->setSection("__DATA,__objc_const");
+ else
+ Entry->setSection("__DATA,__datacoal_nt,coalesced");
+
+ return Entry;
+}
+
+/* *** */
+
+CodeGen::CGObjCRuntime *
+CodeGen::CreateMacObjCRuntime(CodeGen::CodeGenModule &CGM) {
+ switch (CGM.getLangOpts().ObjCRuntime.getKind()) {
+ case ObjCRuntime::FragileMacOSX:
+ return new CGObjCMac(CGM);
+
+ case ObjCRuntime::MacOSX:
+ case ObjCRuntime::iOS:
+ case ObjCRuntime::WatchOS:
+ return new CGObjCNonFragileABIMac(CGM);
+
+ case ObjCRuntime::GNUstep:
+ case ObjCRuntime::GCC:
+ case ObjCRuntime::ObjFW:
+ llvm_unreachable("these runtimes are not Mac runtimes");
+ }
+ llvm_unreachable("bad runtime");
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.cpp
new file mode 100644
index 0000000..7be9ae9
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.cpp
@@ -0,0 +1,391 @@
+//==- CGObjCRuntime.cpp - Interface to Shared Objective-C Runtime Features ==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This abstract class defines the interface for Objective-C runtime-specific
+// code generation. It provides some concrete helper methods for functionality
+// shared between all (or most) of the Objective-C runtimes supported by clang.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGObjCRuntime.h"
+#include "CGCleanup.h"
+#include "CGRecordLayout.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/CodeGen/CGFunctionInfo.h"
+#include "llvm/IR/CallSite.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+static uint64_t LookupFieldBitOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCInterfaceDecl *OID,
+ const ObjCImplementationDecl *ID,
+ const ObjCIvarDecl *Ivar) {
+ const ObjCInterfaceDecl *Container = Ivar->getContainingInterface();
+
+ // FIXME: We should eliminate the need to have ObjCImplementationDecl passed
+ // in here; it should never be necessary because that should be the lexical
+ // decl context for the ivar.
+
+ // If we know have an implementation (and the ivar is in it) then
+ // look up in the implementation layout.
+ const ASTRecordLayout *RL;
+ if (ID && declaresSameEntity(ID->getClassInterface(), Container))
+ RL = &CGM.getContext().getASTObjCImplementationLayout(ID);
+ else
+ RL = &CGM.getContext().getASTObjCInterfaceLayout(Container);
+
+ // Compute field index.
+ //
+ // FIXME: The index here is closely tied to how ASTContext::getObjCLayout is
+ // implemented. This should be fixed to get the information from the layout
+ // directly.
+ unsigned Index = 0;
+
+ for (const ObjCIvarDecl *IVD = Container->all_declared_ivar_begin();
+ IVD; IVD = IVD->getNextIvar()) {
+ if (Ivar == IVD)
+ break;
+ ++Index;
+ }
+ assert(Index < RL->getFieldCount() && "Ivar is not inside record layout!");
+
+ return RL->getFieldOffset(Index);
+}
+
+uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCInterfaceDecl *OID,
+ const ObjCIvarDecl *Ivar) {
+ return LookupFieldBitOffset(CGM, OID, nullptr, Ivar) /
+ CGM.getContext().getCharWidth();
+}
+
+uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCImplementationDecl *OID,
+ const ObjCIvarDecl *Ivar) {
+ return LookupFieldBitOffset(CGM, OID->getClassInterface(), OID, Ivar) /
+ CGM.getContext().getCharWidth();
+}
+
+unsigned CGObjCRuntime::ComputeBitfieldBitOffset(
+ CodeGen::CodeGenModule &CGM,
+ const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar) {
+ return LookupFieldBitOffset(CGM, ID, ID->getImplementation(), Ivar);
+}
+
+LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *OID,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers,
+ llvm::Value *Offset) {
+ // Compute (type*) ( (char *) BaseValue + Offset)
+ QualType IvarTy = Ivar->getType();
+ llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
+ llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, CGF.Int8PtrTy);
+ V = CGF.Builder.CreateInBoundsGEP(V, Offset, "add.ptr");
+
+ if (!Ivar->isBitField()) {
+ V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy));
+ LValue LV = CGF.MakeNaturalAlignAddrLValue(V, IvarTy);
+ LV.getQuals().addCVRQualifiers(CVRQualifiers);
+ return LV;
+ }
+
+ // We need to compute an access strategy for this bit-field. We are given the
+ // offset to the first byte in the bit-field, the sub-byte offset is taken
+ // from the original layout. We reuse the normal bit-field access strategy by
+ // treating this as an access to a struct where the bit-field is in byte 0,
+ // and adjust the containing type size as appropriate.
+ //
+ // FIXME: Note that currently we make a very conservative estimate of the
+ // alignment of the bit-field, because (a) it is not clear what guarantees the
+ // runtime makes us, and (b) we don't have a way to specify that the struct is
+ // at an alignment plus offset.
+ //
+ // Note, there is a subtle invariant here: we can only call this routine on
+ // non-synthesized ivars but we may be called for synthesized ivars. However,
+ // a synthesized ivar can never be a bit-field, so this is safe.
+ uint64_t FieldBitOffset = LookupFieldBitOffset(CGF.CGM, OID, nullptr, Ivar);
+ uint64_t BitOffset = FieldBitOffset % CGF.CGM.getContext().getCharWidth();
+ uint64_t AlignmentBits = CGF.CGM.getTarget().getCharAlign();
+ uint64_t BitFieldSize = Ivar->getBitWidthValue(CGF.getContext());
+ CharUnits StorageSize =
+ CGF.CGM.getContext().toCharUnitsFromBits(
+ llvm::RoundUpToAlignment(BitOffset + BitFieldSize, AlignmentBits));
+ CharUnits Alignment = CGF.CGM.getContext().toCharUnitsFromBits(AlignmentBits);
+
+ // Allocate a new CGBitFieldInfo object to describe this access.
+ //
+ // FIXME: This is incredibly wasteful, these should be uniqued or part of some
+ // layout object. However, this is blocked on other cleanups to the
+ // Objective-C code, so for now we just live with allocating a bunch of these
+ // objects.
+ CGBitFieldInfo *Info = new (CGF.CGM.getContext()) CGBitFieldInfo(
+ CGBitFieldInfo::MakeInfo(CGF.CGM.getTypes(), Ivar, BitOffset, BitFieldSize,
+ CGF.CGM.getContext().toBits(StorageSize),
+ CharUnits::fromQuantity(0)));
+
+ Address Addr(V, Alignment);
+ Addr = CGF.Builder.CreateElementBitCast(Addr,
+ llvm::Type::getIntNTy(CGF.getLLVMContext(),
+ Info->StorageSize));
+ return LValue::MakeBitfield(Addr, *Info,
+ IvarTy.withCVRQualifiers(CVRQualifiers),
+ AlignmentSource::Decl);
+}
+
+namespace {
+ struct CatchHandler {
+ const VarDecl *Variable;
+ const Stmt *Body;
+ llvm::BasicBlock *Block;
+ llvm::Constant *TypeInfo;
+ };
+
+ struct CallObjCEndCatch final : EHScopeStack::Cleanup {
+ CallObjCEndCatch(bool MightThrow, llvm::Value *Fn) :
+ MightThrow(MightThrow), Fn(Fn) {}
+ bool MightThrow;
+ llvm::Value *Fn;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ if (!MightThrow) {
+ CGF.Builder.CreateCall(Fn)->setDoesNotThrow();
+ return;
+ }
+
+ CGF.EmitRuntimeCallOrInvoke(Fn);
+ }
+ };
+}
+
+
+void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S,
+ llvm::Constant *beginCatchFn,
+ llvm::Constant *endCatchFn,
+ llvm::Constant *exceptionRethrowFn) {
+ // Jump destination for falling out of catch bodies.
+ CodeGenFunction::JumpDest Cont;
+ if (S.getNumCatchStmts())
+ Cont = CGF.getJumpDestInCurrentScope("eh.cont");
+
+ CodeGenFunction::FinallyInfo FinallyInfo;
+ if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt())
+ FinallyInfo.enter(CGF, Finally->getFinallyBody(),
+ beginCatchFn, endCatchFn, exceptionRethrowFn);
+
+ SmallVector<CatchHandler, 8> Handlers;
+
+ // Enter the catch, if there is one.
+ if (S.getNumCatchStmts()) {
+ for (unsigned I = 0, N = S.getNumCatchStmts(); I != N; ++I) {
+ const ObjCAtCatchStmt *CatchStmt = S.getCatchStmt(I);
+ const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
+
+ Handlers.push_back(CatchHandler());
+ CatchHandler &Handler = Handlers.back();
+ Handler.Variable = CatchDecl;
+ Handler.Body = CatchStmt->getCatchBody();
+ Handler.Block = CGF.createBasicBlock("catch");
+
+ // @catch(...) always matches.
+ if (!CatchDecl) {
+ Handler.TypeInfo = nullptr; // catch-all
+ // Don't consider any other catches.
+ break;
+ }
+
+ Handler.TypeInfo = GetEHType(CatchDecl->getType());
+ }
+
+ EHCatchScope *Catch = CGF.EHStack.pushCatch(Handlers.size());
+ for (unsigned I = 0, E = Handlers.size(); I != E; ++I)
+ Catch->setHandler(I, Handlers[I].TypeInfo, Handlers[I].Block);
+ }
+
+ // Emit the try body.
+ CGF.EmitStmt(S.getTryBody());
+
+ // Leave the try.
+ if (S.getNumCatchStmts())
+ CGF.popCatchScope();
+
+ // Remember where we were.
+ CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
+
+ // Emit the handlers.
+ for (unsigned I = 0, E = Handlers.size(); I != E; ++I) {
+ CatchHandler &Handler = Handlers[I];
+
+ CGF.EmitBlock(Handler.Block);
+ llvm::Value *RawExn = CGF.getExceptionFromSlot();
+
+ // Enter the catch.
+ llvm::Value *Exn = RawExn;
+ if (beginCatchFn) {
+ Exn = CGF.Builder.CreateCall(beginCatchFn, RawExn, "exn.adjusted");
+ cast<llvm::CallInst>(Exn)->setDoesNotThrow();
+ }
+
+ CodeGenFunction::LexicalScope cleanups(CGF, Handler.Body->getSourceRange());
+
+ if (endCatchFn) {
+ // Add a cleanup to leave the catch.
+ bool EndCatchMightThrow = (Handler.Variable == nullptr);
+
+ CGF.EHStack.pushCleanup<CallObjCEndCatch>(NormalAndEHCleanup,
+ EndCatchMightThrow,
+ endCatchFn);
+ }
+
+ // Bind the catch parameter if it exists.
+ if (const VarDecl *CatchParam = Handler.Variable) {
+ llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType());
+ llvm::Value *CastExn = CGF.Builder.CreateBitCast(Exn, CatchType);
+
+ CGF.EmitAutoVarDecl(*CatchParam);
+ EmitInitOfCatchParam(CGF, CastExn, CatchParam);
+ }
+
+ CGF.ObjCEHValueStack.push_back(Exn);
+ CGF.EmitStmt(Handler.Body);
+ CGF.ObjCEHValueStack.pop_back();
+
+ // Leave any cleanups associated with the catch.
+ cleanups.ForceCleanup();
+
+ CGF.EmitBranchThroughCleanup(Cont);
+ }
+
+ // Go back to the try-statement fallthrough.
+ CGF.Builder.restoreIP(SavedIP);
+
+ // Pop out of the finally.
+ if (S.getFinallyStmt())
+ FinallyInfo.exit(CGF);
+
+ if (Cont.isValid())
+ CGF.EmitBlock(Cont.getBlock());
+}
+
+void CGObjCRuntime::EmitInitOfCatchParam(CodeGenFunction &CGF,
+ llvm::Value *exn,
+ const VarDecl *paramDecl) {
+
+ Address paramAddr = CGF.GetAddrOfLocalVar(paramDecl);
+
+ switch (paramDecl->getType().getQualifiers().getObjCLifetime()) {
+ case Qualifiers::OCL_Strong:
+ exn = CGF.EmitARCRetainNonBlock(exn);
+ // fallthrough
+
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ CGF.Builder.CreateStore(exn, paramAddr);
+ return;
+
+ case Qualifiers::OCL_Weak:
+ CGF.EmitARCInitWeak(paramAddr, exn);
+ return;
+ }
+ llvm_unreachable("invalid ownership qualifier");
+}
+
+namespace {
+ struct CallSyncExit final : EHScopeStack::Cleanup {
+ llvm::Value *SyncExitFn;
+ llvm::Value *SyncArg;
+ CallSyncExit(llvm::Value *SyncExitFn, llvm::Value *SyncArg)
+ : SyncExitFn(SyncExitFn), SyncArg(SyncArg) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ CGF.Builder.CreateCall(SyncExitFn, SyncArg)->setDoesNotThrow();
+ }
+ };
+}
+
+void CGObjCRuntime::EmitAtSynchronizedStmt(CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S,
+ llvm::Function *syncEnterFn,
+ llvm::Function *syncExitFn) {
+ CodeGenFunction::RunCleanupsScope cleanups(CGF);
+
+ // Evaluate the lock operand. This is guaranteed to dominate the
+ // ARC release and lock-release cleanups.
+ const Expr *lockExpr = S.getSynchExpr();
+ llvm::Value *lock;
+ if (CGF.getLangOpts().ObjCAutoRefCount) {
+ lock = CGF.EmitARCRetainScalarExpr(lockExpr);
+ lock = CGF.EmitObjCConsumeObject(lockExpr->getType(), lock);
+ } else {
+ lock = CGF.EmitScalarExpr(lockExpr);
+ }
+ lock = CGF.Builder.CreateBitCast(lock, CGF.VoidPtrTy);
+
+ // Acquire the lock.
+ CGF.Builder.CreateCall(syncEnterFn, lock)->setDoesNotThrow();
+
+ // Register an all-paths cleanup to release the lock.
+ CGF.EHStack.pushCleanup<CallSyncExit>(NormalAndEHCleanup, syncExitFn, lock);
+
+ // Emit the body of the statement.
+ CGF.EmitStmt(S.getSynchBody());
+}
+
+/// Compute the pointer-to-function type to which a message send
+/// should be casted in order to correctly call the given method
+/// with the given arguments.
+///
+/// \param method - may be null
+/// \param resultType - the result type to use if there's no method
+/// \param callArgs - the actual arguments, including implicit ones
+CGObjCRuntime::MessageSendInfo
+CGObjCRuntime::getMessageSendInfo(const ObjCMethodDecl *method,
+ QualType resultType,
+ CallArgList &callArgs) {
+ // If there's a method, use information from that.
+ if (method) {
+ const CGFunctionInfo &signature =
+ CGM.getTypes().arrangeObjCMessageSendSignature(method, callArgs[0].Ty);
+
+ llvm::PointerType *signatureType =
+ CGM.getTypes().GetFunctionType(signature)->getPointerTo();
+
+ // If that's not variadic, there's no need to recompute the ABI
+ // arrangement.
+ if (!signature.isVariadic())
+ return MessageSendInfo(signature, signatureType);
+
+ // Otherwise, there is.
+ FunctionType::ExtInfo einfo = signature.getExtInfo();
+ const CGFunctionInfo &argsInfo =
+ CGM.getTypes().arrangeFreeFunctionCall(resultType, callArgs, einfo,
+ signature.getRequiredArgs());
+
+ return MessageSendInfo(argsInfo, signatureType);
+ }
+
+ // There's no method; just use a default CC.
+ const CGFunctionInfo &argsInfo =
+ CGM.getTypes().arrangeFreeFunctionCall(resultType, callArgs,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All);
+
+ // Derive the signature to call from that.
+ llvm::PointerType *signatureType =
+ CGM.getTypes().GetFunctionType(argsInfo)->getPointerTo();
+ return MessageSendInfo(argsInfo, signatureType);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h
new file mode 100644
index 0000000..28d88dd
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h
@@ -0,0 +1,312 @@
+//===----- CGObjCRuntime.h - Interface to ObjC Runtimes ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for Objective-C code generation. Concrete
+// subclasses of this implement code generation for specific Objective-C
+// runtime libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGOBJCRUNTIME_H
+#define LLVM_CLANG_LIB_CODEGEN_CGOBJCRUNTIME_H
+#include "CGBuilder.h"
+#include "CGCall.h"
+#include "CGValue.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/IdentifierTable.h" // Selector
+
+namespace llvm {
+ class Constant;
+ class Function;
+ class Module;
+ class StructLayout;
+ class StructType;
+ class Type;
+ class Value;
+}
+
+namespace clang {
+namespace CodeGen {
+ class CodeGenFunction;
+}
+
+ class FieldDecl;
+ class ObjCAtTryStmt;
+ class ObjCAtThrowStmt;
+ class ObjCAtSynchronizedStmt;
+ class ObjCContainerDecl;
+ class ObjCCategoryImplDecl;
+ class ObjCImplementationDecl;
+ class ObjCInterfaceDecl;
+ class ObjCMessageExpr;
+ class ObjCMethodDecl;
+ class ObjCProtocolDecl;
+ class Selector;
+ class ObjCIvarDecl;
+ class ObjCStringLiteral;
+ class BlockDeclRefExpr;
+
+namespace CodeGen {
+ class CodeGenModule;
+ class CGBlockInfo;
+
+// FIXME: Several methods should be pure virtual but aren't to avoid the
+// partially-implemented subclass breaking.
+
+/// Implements runtime-specific code generation functions.
+class CGObjCRuntime {
+protected:
+ CodeGen::CodeGenModule &CGM;
+ CGObjCRuntime(CodeGen::CodeGenModule &CGM) : CGM(CGM) {}
+
+ // Utility functions for unified ivar access. These need to
+ // eventually be folded into other places (the structure layout
+ // code).
+
+ /// Compute an offset to the given ivar, suitable for passing to
+ /// EmitValueForIvarAtOffset. Note that the correct handling of
+ /// bit-fields is carefully coordinated by these two, use caution!
+ ///
+ /// The latter overload is suitable for computing the offset of a
+ /// sythesized ivar.
+ uint64_t ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCInterfaceDecl *OID,
+ const ObjCIvarDecl *Ivar);
+ uint64_t ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCImplementationDecl *OID,
+ const ObjCIvarDecl *Ivar);
+
+ LValue EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *OID,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers,
+ llvm::Value *Offset);
+ /// Emits a try / catch statement. This function is intended to be called by
+ /// subclasses, and provides a generic mechanism for generating these, which
+ /// should be usable by all runtimes. The caller must provide the functions
+ /// to call when entering and exiting a \@catch() block, and the function
+ /// used to rethrow exceptions. If the begin and end catch functions are
+ /// NULL, then the function assumes that the EH personality function provides
+ /// the thrown object directly.
+ void EmitTryCatchStmt(CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S,
+ llvm::Constant *beginCatchFn,
+ llvm::Constant *endCatchFn,
+ llvm::Constant *exceptionRethrowFn);
+
+ void EmitInitOfCatchParam(CodeGenFunction &CGF, llvm::Value *exn,
+ const VarDecl *paramDecl);
+
+ /// Emits an \@synchronize() statement, using the \p syncEnterFn and
+ /// \p syncExitFn arguments as the functions called to lock and unlock
+ /// the object. This function can be called by subclasses that use
+ /// zero-cost exception handling.
+ void EmitAtSynchronizedStmt(CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S,
+ llvm::Function *syncEnterFn,
+ llvm::Function *syncExitFn);
+
+public:
+ virtual ~CGObjCRuntime();
+
+ /// Generate the function required to register all Objective-C components in
+ /// this compilation unit with the runtime library.
+ virtual llvm::Function *ModuleInitFunction() = 0;
+
+ /// Get a selector for the specified name and type values.
+ /// The result should have the LLVM type for ASTContext::getObjCSelType().
+ virtual llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel) = 0;
+
+ /// Get the address of a selector for the specified name and type values.
+ /// This is a rarely-used language extension, but sadly it exists.
+ ///
+ /// The result should have the LLVM type for a pointer to
+ /// ASTContext::getObjCSelType().
+ virtual Address GetAddrOfSelector(CodeGenFunction &CGF, Selector Sel) = 0;
+
+ /// Get a typed selector.
+ virtual llvm::Value *GetSelector(CodeGenFunction &CGF,
+ const ObjCMethodDecl *Method) = 0;
+
+ /// Get the type constant to catch for the given ObjC pointer type.
+ /// This is used externally to implement catching ObjC types in C++.
+ /// Runtimes which don't support this should add the appropriate
+ /// error to Sema.
+ virtual llvm::Constant *GetEHType(QualType T) = 0;
+
+ /// Generate a constant string object.
+ virtual ConstantAddress GenerateConstantString(const StringLiteral *) = 0;
+
+ /// Generate a category. A category contains a list of methods (and
+ /// accompanying metadata) and a list of protocols.
+ virtual void GenerateCategory(const ObjCCategoryImplDecl *OCD) = 0;
+
+ /// Generate a class structure for this class.
+ virtual void GenerateClass(const ObjCImplementationDecl *OID) = 0;
+
+ /// Register an class alias.
+ virtual void RegisterAlias(const ObjCCompatibleAliasDecl *OAD) = 0;
+
+ /// Generate an Objective-C message send operation.
+ ///
+ /// \param Method - The method being called, this may be null if synthesizing
+ /// a property setter or getter.
+ virtual CodeGen::RValue
+ GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot ReturnSlot,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class = nullptr,
+ const ObjCMethodDecl *Method = nullptr) = 0;
+
+ /// Generate an Objective-C message send operation to the super
+ /// class initiated in a method for Class and with the given Self
+ /// object.
+ ///
+ /// \param Method - The method being called, this may be null if synthesizing
+ /// a property setter or getter.
+ virtual CodeGen::RValue
+ GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot ReturnSlot,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Self,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method = nullptr) = 0;
+
+ /// Emit the code to return the named protocol as an object, as in a
+ /// \@protocol expression.
+ virtual llvm::Value *GenerateProtocolRef(CodeGenFunction &CGF,
+ const ObjCProtocolDecl *OPD) = 0;
+
+ /// Generate the named protocol. Protocols contain method metadata but no
+ /// implementations.
+ virtual void GenerateProtocol(const ObjCProtocolDecl *OPD) = 0;
+
+ /// Generate a function preamble for a method with the specified
+ /// types.
+
+ // FIXME: Current this just generates the Function definition, but really this
+ // should also be generating the loads of the parameters, as the runtime
+ // should have full control over how parameters are passed.
+ virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD) = 0;
+
+ /// Return the runtime function for getting properties.
+ virtual llvm::Constant *GetPropertyGetFunction() = 0;
+
+ /// Return the runtime function for setting properties.
+ virtual llvm::Constant *GetPropertySetFunction() = 0;
+
+ /// Return the runtime function for optimized setting properties.
+ virtual llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) = 0;
+
+ // API for atomic copying of qualified aggregates in getter.
+ virtual llvm::Constant *GetGetStructFunction() = 0;
+ // API for atomic copying of qualified aggregates in setter.
+ virtual llvm::Constant *GetSetStructFunction() = 0;
+ /// API for atomic copying of qualified aggregates with non-trivial copy
+ /// assignment (c++) in setter.
+ virtual llvm::Constant *GetCppAtomicObjectSetFunction() = 0;
+ /// API for atomic copying of qualified aggregates with non-trivial copy
+ /// assignment (c++) in getter.
+ virtual llvm::Constant *GetCppAtomicObjectGetFunction() = 0;
+
+ /// GetClass - Return a reference to the class for the given
+ /// interface decl.
+ virtual llvm::Value *GetClass(CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *OID) = 0;
+
+
+ virtual llvm::Value *EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF) {
+ llvm_unreachable("autoreleasepool unsupported in this ABI");
+ }
+
+ /// EnumerationMutationFunction - Return the function that's called by the
+ /// compiler when a mutation is detected during foreach iteration.
+ virtual llvm::Constant *EnumerationMutationFunction() = 0;
+
+ virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) = 0;
+ virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S) = 0;
+ virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S,
+ bool ClearInsertionPoint=true) = 0;
+ virtual llvm::Value *EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ Address AddrWeakObj) = 0;
+ virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, Address dest) = 0;
+ virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, Address dest,
+ bool threadlocal=false) = 0;
+ virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, Address dest,
+ llvm::Value *ivarOffset) = 0;
+ virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, Address dest) = 0;
+
+ virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) = 0;
+ virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) = 0;
+ virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+ Address DestPtr,
+ Address SrcPtr,
+ llvm::Value *Size) = 0;
+ virtual llvm::Constant *BuildGCBlockLayout(CodeGen::CodeGenModule &CGM,
+ const CodeGen::CGBlockInfo &blockInfo) = 0;
+ virtual llvm::Constant *BuildRCBlockLayout(CodeGen::CodeGenModule &CGM,
+ const CodeGen::CGBlockInfo &blockInfo) = 0;
+
+ /// Returns an i8* which points to the byref layout information.
+ virtual llvm::Constant *BuildByrefLayout(CodeGen::CodeGenModule &CGM,
+ QualType T) = 0;
+
+ virtual llvm::GlobalVariable *GetClassGlobal(const std::string &Name,
+ bool Weak = false) = 0;
+
+ struct MessageSendInfo {
+ const CGFunctionInfo &CallInfo;
+ llvm::PointerType *MessengerType;
+
+ MessageSendInfo(const CGFunctionInfo &callInfo,
+ llvm::PointerType *messengerType)
+ : CallInfo(callInfo), MessengerType(messengerType) {}
+ };
+
+ MessageSendInfo getMessageSendInfo(const ObjCMethodDecl *method,
+ QualType resultType,
+ CallArgList &callArgs);
+
+ // FIXME: This probably shouldn't be here, but the code to compute
+ // it is here.
+ unsigned ComputeBitfieldBitOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar);
+};
+
+/// Creates an instance of an Objective-C runtime class.
+//TODO: This should include some way of selecting which runtime to target.
+CGObjCRuntime *CreateGNUObjCRuntime(CodeGenModule &CGM);
+CGObjCRuntime *CreateMacObjCRuntime(CodeGenModule &CGM);
+}
+}
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGOpenCLRuntime.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGOpenCLRuntime.cpp
new file mode 100644
index 0000000..8af39ce
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGOpenCLRuntime.cpp
@@ -0,0 +1,101 @@
+//===----- CGOpenCLRuntime.cpp - Interface to OpenCL Runtimes -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for OpenCL code generation. Concrete
+// subclasses of this implement code generation for specific OpenCL
+// runtime libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGOpenCLRuntime.h"
+#include "CodeGenFunction.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GlobalValue.h"
+#include <assert.h>
+
+using namespace clang;
+using namespace CodeGen;
+
+CGOpenCLRuntime::~CGOpenCLRuntime() {}
+
+void CGOpenCLRuntime::EmitWorkGroupLocalVarDecl(CodeGenFunction &CGF,
+ const VarDecl &D) {
+ return CGF.EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
+}
+
+llvm::Type *CGOpenCLRuntime::convertOpenCLSpecificType(const Type *T) {
+ assert(T->isOpenCLSpecificType() &&
+ "Not an OpenCL specific type!");
+
+ llvm::LLVMContext& Ctx = CGM.getLLVMContext();
+ uint32_t ImgAddrSpc =
+ CGM.getContext().getTargetAddressSpace(LangAS::opencl_global);
+ switch (cast<BuiltinType>(T)->getKind()) {
+ default:
+ llvm_unreachable("Unexpected opencl builtin type!");
+ return nullptr;
+ case BuiltinType::OCLImage1d:
+ return llvm::PointerType::get(llvm::StructType::create(
+ Ctx, "opencl.image1d_t"), ImgAddrSpc);
+ case BuiltinType::OCLImage1dArray:
+ return llvm::PointerType::get(llvm::StructType::create(
+ Ctx, "opencl.image1d_array_t"), ImgAddrSpc);
+ case BuiltinType::OCLImage1dBuffer:
+ return llvm::PointerType::get(llvm::StructType::create(
+ Ctx, "opencl.image1d_buffer_t"), ImgAddrSpc);
+ case BuiltinType::OCLImage2d:
+ return llvm::PointerType::get(llvm::StructType::create(
+ Ctx, "opencl.image2d_t"), ImgAddrSpc);
+ case BuiltinType::OCLImage2dArray:
+ return llvm::PointerType::get(llvm::StructType::create(
+ Ctx, "opencl.image2d_array_t"), ImgAddrSpc);
+ case BuiltinType::OCLImage2dDepth:
+ return llvm::PointerType::get(
+ llvm::StructType::create(Ctx, "opencl.image2d_depth_t"), ImgAddrSpc);
+ case BuiltinType::OCLImage2dArrayDepth:
+ return llvm::PointerType::get(
+ llvm::StructType::create(Ctx, "opencl.image2d_array_depth_t"),
+ ImgAddrSpc);
+ case BuiltinType::OCLImage2dMSAA:
+ return llvm::PointerType::get(
+ llvm::StructType::create(Ctx, "opencl.image2d_msaa_t"), ImgAddrSpc);
+ case BuiltinType::OCLImage2dArrayMSAA:
+ return llvm::PointerType::get(
+ llvm::StructType::create(Ctx, "opencl.image2d_array_msaa_t"),
+ ImgAddrSpc);
+ case BuiltinType::OCLImage2dMSAADepth:
+ return llvm::PointerType::get(
+ llvm::StructType::create(Ctx, "opencl.image2d_msaa_depth_t"),
+ ImgAddrSpc);
+ case BuiltinType::OCLImage2dArrayMSAADepth:
+ return llvm::PointerType::get(
+ llvm::StructType::create(Ctx, "opencl.image2d_array_msaa_depth_t"),
+ ImgAddrSpc);
+ case BuiltinType::OCLImage3d:
+ return llvm::PointerType::get(llvm::StructType::create(
+ Ctx, "opencl.image3d_t"), ImgAddrSpc);
+ case BuiltinType::OCLSampler:
+ return llvm::IntegerType::get(Ctx, 32);
+ case BuiltinType::OCLEvent:
+ return llvm::PointerType::get(llvm::StructType::create(
+ Ctx, "opencl.event_t"), 0);
+ case BuiltinType::OCLClkEvent:
+ return llvm::PointerType::get(
+ llvm::StructType::create(Ctx, "opencl.clk_event_t"), 0);
+ case BuiltinType::OCLQueue:
+ return llvm::PointerType::get(
+ llvm::StructType::create(Ctx, "opencl.queue_t"), 0);
+ case BuiltinType::OCLNDRange:
+ return llvm::PointerType::get(
+ llvm::StructType::create(Ctx, "opencl.ndrange_t"), 0);
+ case BuiltinType::OCLReserveID:
+ return llvm::PointerType::get(
+ llvm::StructType::create(Ctx, "opencl.reserve_id_t"), 0);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGOpenCLRuntime.h b/contrib/llvm/tools/clang/lib/CodeGen/CGOpenCLRuntime.h
new file mode 100644
index 0000000..0c50b92
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGOpenCLRuntime.h
@@ -0,0 +1,52 @@
+//===----- CGOpenCLRuntime.h - Interface to OpenCL Runtimes -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for OpenCL code generation. Concrete
+// subclasses of this implement code generation for specific OpenCL
+// runtime libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENCLRUNTIME_H
+#define LLVM_CLANG_LIB_CODEGEN_CGOPENCLRUNTIME_H
+
+#include "clang/AST/Type.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+
+namespace clang {
+
+class VarDecl;
+
+namespace CodeGen {
+
+class CodeGenFunction;
+class CodeGenModule;
+
+class CGOpenCLRuntime {
+protected:
+ CodeGenModule &CGM;
+
+public:
+ CGOpenCLRuntime(CodeGenModule &CGM) : CGM(CGM) {}
+ virtual ~CGOpenCLRuntime();
+
+ /// Emit the IR required for a work-group-local variable declaration, and add
+ /// an entry to CGF's LocalDeclMap for D. The base class does this using
+ /// CodeGenFunction::EmitStaticVarDecl to emit an internal global for D.
+ virtual void EmitWorkGroupLocalVarDecl(CodeGenFunction &CGF,
+ const VarDecl &D);
+
+ virtual llvm::Type *convertOpenCLSpecificType(const Type *T);
+};
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp
new file mode 100644
index 0000000..6d4fc9f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -0,0 +1,4258 @@
+//===----- CGOpenMPRuntime.cpp - Interface to OpenMP Runtimes -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides a class for OpenMP runtime code generation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCXXABI.h"
+#include "CGCleanup.h"
+#include "CGOpenMPRuntime.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/StmtOpenMP.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+/// \brief Base class for handling code generation inside OpenMP regions.
+class CGOpenMPRegionInfo : public CodeGenFunction::CGCapturedStmtInfo {
+public:
+ /// \brief Kinds of OpenMP regions used in codegen.
+ enum CGOpenMPRegionKind {
+ /// \brief Region with outlined function for standalone 'parallel'
+ /// directive.
+ ParallelOutlinedRegion,
+ /// \brief Region with outlined function for standalone 'task' directive.
+ TaskOutlinedRegion,
+ /// \brief Region for constructs that do not require function outlining,
+ /// like 'for', 'sections', 'atomic' etc. directives.
+ InlinedRegion,
+ /// \brief Region with outlined function for standalone 'target' directive.
+ TargetRegion,
+ };
+
+ CGOpenMPRegionInfo(const CapturedStmt &CS,
+ const CGOpenMPRegionKind RegionKind,
+ const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
+ bool HasCancel)
+ : CGCapturedStmtInfo(CS, CR_OpenMP), RegionKind(RegionKind),
+ CodeGen(CodeGen), Kind(Kind), HasCancel(HasCancel) {}
+
+ CGOpenMPRegionInfo(const CGOpenMPRegionKind RegionKind,
+ const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
+ bool HasCancel)
+ : CGCapturedStmtInfo(CR_OpenMP), RegionKind(RegionKind), CodeGen(CodeGen),
+ Kind(Kind), HasCancel(HasCancel) {}
+
+ /// \brief Get a variable or parameter for storing global thread id
+ /// inside OpenMP construct.
+ virtual const VarDecl *getThreadIDVariable() const = 0;
+
+ /// \brief Emit the captured statement body.
+ void EmitBody(CodeGenFunction &CGF, const Stmt *S) override;
+
+ /// \brief Get an LValue for the current ThreadID variable.
+ /// \return LValue for thread id variable. This LValue always has type int32*.
+ virtual LValue getThreadIDVariableLValue(CodeGenFunction &CGF);
+
+ CGOpenMPRegionKind getRegionKind() const { return RegionKind; }
+
+ OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
+
+ bool hasCancel() const { return HasCancel; }
+
+ static bool classof(const CGCapturedStmtInfo *Info) {
+ return Info->getKind() == CR_OpenMP;
+ }
+
+protected:
+ CGOpenMPRegionKind RegionKind;
+ const RegionCodeGenTy &CodeGen;
+ OpenMPDirectiveKind Kind;
+ bool HasCancel;
+};
+
+/// \brief API for captured statement code generation in OpenMP constructs.
+class CGOpenMPOutlinedRegionInfo : public CGOpenMPRegionInfo {
+public:
+ CGOpenMPOutlinedRegionInfo(const CapturedStmt &CS, const VarDecl *ThreadIDVar,
+ const RegionCodeGenTy &CodeGen,
+ OpenMPDirectiveKind Kind, bool HasCancel)
+ : CGOpenMPRegionInfo(CS, ParallelOutlinedRegion, CodeGen, Kind,
+ HasCancel),
+ ThreadIDVar(ThreadIDVar) {
+ assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
+ }
+ /// \brief Get a variable or parameter for storing global thread id
+ /// inside OpenMP construct.
+ const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
+
+ /// \brief Get the name of the capture helper.
+ StringRef getHelperName() const override { return ".omp_outlined."; }
+
+ static bool classof(const CGCapturedStmtInfo *Info) {
+ return CGOpenMPRegionInfo::classof(Info) &&
+ cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
+ ParallelOutlinedRegion;
+ }
+
+private:
+ /// \brief A variable or parameter storing global thread id for OpenMP
+ /// constructs.
+ const VarDecl *ThreadIDVar;
+};
+
+/// \brief API for captured statement code generation in OpenMP constructs.
+class CGOpenMPTaskOutlinedRegionInfo : public CGOpenMPRegionInfo {
+public:
+ CGOpenMPTaskOutlinedRegionInfo(const CapturedStmt &CS,
+ const VarDecl *ThreadIDVar,
+ const RegionCodeGenTy &CodeGen,
+ OpenMPDirectiveKind Kind, bool HasCancel)
+ : CGOpenMPRegionInfo(CS, TaskOutlinedRegion, CodeGen, Kind, HasCancel),
+ ThreadIDVar(ThreadIDVar) {
+ assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
+ }
+ /// \brief Get a variable or parameter for storing global thread id
+ /// inside OpenMP construct.
+ const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
+
+ /// \brief Get an LValue for the current ThreadID variable.
+ LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override;
+
+ /// \brief Get the name of the capture helper.
+ StringRef getHelperName() const override { return ".omp_outlined."; }
+
+ static bool classof(const CGCapturedStmtInfo *Info) {
+ return CGOpenMPRegionInfo::classof(Info) &&
+ cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
+ TaskOutlinedRegion;
+ }
+
+private:
+ /// \brief A variable or parameter storing global thread id for OpenMP
+ /// constructs.
+ const VarDecl *ThreadIDVar;
+};
+
+/// \brief API for inlined captured statement code generation in OpenMP
+/// constructs.
+class CGOpenMPInlinedRegionInfo : public CGOpenMPRegionInfo {
+public:
+ CGOpenMPInlinedRegionInfo(CodeGenFunction::CGCapturedStmtInfo *OldCSI,
+ const RegionCodeGenTy &CodeGen,
+ OpenMPDirectiveKind Kind, bool HasCancel)
+ : CGOpenMPRegionInfo(InlinedRegion, CodeGen, Kind, HasCancel),
+ OldCSI(OldCSI),
+ OuterRegionInfo(dyn_cast_or_null<CGOpenMPRegionInfo>(OldCSI)) {}
+ // \brief Retrieve the value of the context parameter.
+ llvm::Value *getContextValue() const override {
+ if (OuterRegionInfo)
+ return OuterRegionInfo->getContextValue();
+ llvm_unreachable("No context value for inlined OpenMP region");
+ }
+ void setContextValue(llvm::Value *V) override {
+ if (OuterRegionInfo) {
+ OuterRegionInfo->setContextValue(V);
+ return;
+ }
+ llvm_unreachable("No context value for inlined OpenMP region");
+ }
+ /// \brief Lookup the captured field decl for a variable.
+ const FieldDecl *lookup(const VarDecl *VD) const override {
+ if (OuterRegionInfo)
+ return OuterRegionInfo->lookup(VD);
+ // If there is no outer outlined region,no need to lookup in a list of
+ // captured variables, we can use the original one.
+ return nullptr;
+ }
+ FieldDecl *getThisFieldDecl() const override {
+ if (OuterRegionInfo)
+ return OuterRegionInfo->getThisFieldDecl();
+ return nullptr;
+ }
+ /// \brief Get a variable or parameter for storing global thread id
+ /// inside OpenMP construct.
+ const VarDecl *getThreadIDVariable() const override {
+ if (OuterRegionInfo)
+ return OuterRegionInfo->getThreadIDVariable();
+ return nullptr;
+ }
+
+ /// \brief Get the name of the capture helper.
+ StringRef getHelperName() const override {
+ if (auto *OuterRegionInfo = getOldCSI())
+ return OuterRegionInfo->getHelperName();
+ llvm_unreachable("No helper name for inlined OpenMP construct");
+ }
+
+ CodeGenFunction::CGCapturedStmtInfo *getOldCSI() const { return OldCSI; }
+
+ static bool classof(const CGCapturedStmtInfo *Info) {
+ return CGOpenMPRegionInfo::classof(Info) &&
+ cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == InlinedRegion;
+ }
+
+private:
+ /// \brief CodeGen info about outer OpenMP region.
+ CodeGenFunction::CGCapturedStmtInfo *OldCSI;
+ CGOpenMPRegionInfo *OuterRegionInfo;
+};
+
+/// \brief API for captured statement code generation in OpenMP target
+/// constructs. For this captures, implicit parameters are used instead of the
+/// captured fields. The name of the target region has to be unique in a given
+/// application so it is provided by the client, because only the client has
+/// the information to generate that.
+class CGOpenMPTargetRegionInfo : public CGOpenMPRegionInfo {
+public:
+ CGOpenMPTargetRegionInfo(const CapturedStmt &CS,
+ const RegionCodeGenTy &CodeGen, StringRef HelperName)
+ : CGOpenMPRegionInfo(CS, TargetRegion, CodeGen, OMPD_target,
+ /*HasCancel=*/false),
+ HelperName(HelperName) {}
+
+ /// \brief This is unused for target regions because each starts executing
+ /// with a single thread.
+ const VarDecl *getThreadIDVariable() const override { return nullptr; }
+
+ /// \brief Get the name of the capture helper.
+ StringRef getHelperName() const override { return HelperName; }
+
+ static bool classof(const CGCapturedStmtInfo *Info) {
+ return CGOpenMPRegionInfo::classof(Info) &&
+ cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == TargetRegion;
+ }
+
+private:
+ StringRef HelperName;
+};
+
+/// \brief RAII for emitting code of OpenMP constructs.
+class InlinedOpenMPRegionRAII {
+ CodeGenFunction &CGF;
+
+public:
+ /// \brief Constructs region for combined constructs.
+ /// \param CodeGen Code generation sequence for combined directives. Includes
+ /// a list of functions used for code generation of implicitly inlined
+ /// regions.
+ InlinedOpenMPRegionRAII(CodeGenFunction &CGF, const RegionCodeGenTy &CodeGen,
+ OpenMPDirectiveKind Kind, bool HasCancel)
+ : CGF(CGF) {
+ // Start emission for the construct.
+ CGF.CapturedStmtInfo = new CGOpenMPInlinedRegionInfo(
+ CGF.CapturedStmtInfo, CodeGen, Kind, HasCancel);
+ }
+ ~InlinedOpenMPRegionRAII() {
+ // Restore original CapturedStmtInfo only if we're done with code emission.
+ auto *OldCSI =
+ cast<CGOpenMPInlinedRegionInfo>(CGF.CapturedStmtInfo)->getOldCSI();
+ delete CGF.CapturedStmtInfo;
+ CGF.CapturedStmtInfo = OldCSI;
+ }
+};
+
+} // anonymous namespace
+
+static LValue emitLoadOfPointerLValue(CodeGenFunction &CGF, Address PtrAddr,
+ QualType Ty) {
+ AlignmentSource Source;
+ CharUnits Align = CGF.getNaturalPointeeTypeAlignment(Ty, &Source);
+ return CGF.MakeAddrLValue(Address(CGF.Builder.CreateLoad(PtrAddr), Align),
+ Ty->getPointeeType(), Source);
+}
+
+LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) {
+ return emitLoadOfPointerLValue(CGF,
+ CGF.GetAddrOfLocalVar(getThreadIDVariable()),
+ getThreadIDVariable()->getType());
+}
+
+void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, const Stmt * /*S*/) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CGF.EHStack.pushTerminate();
+ {
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ CodeGen(CGF);
+ }
+ CGF.EHStack.popTerminate();
+}
+
+LValue CGOpenMPTaskOutlinedRegionInfo::getThreadIDVariableLValue(
+ CodeGenFunction &CGF) {
+ return CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(getThreadIDVariable()),
+ getThreadIDVariable()->getType(),
+ AlignmentSource::Decl);
+}
+
+CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM)
+ : CGM(CGM), DefaultOpenMPPSource(nullptr), KmpRoutineEntryPtrTy(nullptr),
+ OffloadEntriesInfoManager(CGM) {
+ IdentTy = llvm::StructType::create(
+ "ident_t", CGM.Int32Ty /* reserved_1 */, CGM.Int32Ty /* flags */,
+ CGM.Int32Ty /* reserved_2 */, CGM.Int32Ty /* reserved_3 */,
+ CGM.Int8PtrTy /* psource */, nullptr);
+ // Build void (*kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
+ llvm::Type *MicroParams[] = {llvm::PointerType::getUnqual(CGM.Int32Ty),
+ llvm::PointerType::getUnqual(CGM.Int32Ty)};
+ Kmpc_MicroTy = llvm::FunctionType::get(CGM.VoidTy, MicroParams, true);
+ KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8);
+
+ loadOffloadInfoMetadata();
+}
+
+void CGOpenMPRuntime::clear() {
+ InternalVars.clear();
+}
+
+// Layout information for ident_t.
+static CharUnits getIdentAlign(CodeGenModule &CGM) {
+ return CGM.getPointerAlign();
+}
+static CharUnits getIdentSize(CodeGenModule &CGM) {
+ assert((4 * CGM.getPointerSize()).isMultipleOf(CGM.getPointerAlign()));
+ return CharUnits::fromQuantity(16) + CGM.getPointerSize();
+}
+static CharUnits getOffsetOfIdentField(CGOpenMPRuntime::IdentFieldIndex Field) {
+ // All the fields except the last are i32, so this works beautifully.
+ return unsigned(Field) * CharUnits::fromQuantity(4);
+}
+static Address createIdentFieldGEP(CodeGenFunction &CGF, Address Addr,
+ CGOpenMPRuntime::IdentFieldIndex Field,
+ const llvm::Twine &Name = "") {
+ auto Offset = getOffsetOfIdentField(Field);
+ return CGF.Builder.CreateStructGEP(Addr, Field, Offset, Name);
+}
+
+llvm::Value *CGOpenMPRuntime::emitParallelOutlinedFunction(
+ const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
+ OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
+ assert(ThreadIDVar->getType()->isPointerType() &&
+ "thread id variable must be of type kmp_int32 *");
+ const CapturedStmt *CS = cast<CapturedStmt>(D.getAssociatedStmt());
+ CodeGenFunction CGF(CGM, true);
+ bool HasCancel = false;
+ if (auto *OPD = dyn_cast<OMPParallelDirective>(&D))
+ HasCancel = OPD->hasCancel();
+ else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&D))
+ HasCancel = OPSD->hasCancel();
+ else if (auto *OPFD = dyn_cast<OMPParallelForDirective>(&D))
+ HasCancel = OPFD->hasCancel();
+ CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind,
+ HasCancel);
+ CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
+ return CGF.GenerateOpenMPCapturedStmtFunction(*CS);
+}
+
+llvm::Value *CGOpenMPRuntime::emitTaskOutlinedFunction(
+ const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
+ OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
+ assert(!ThreadIDVar->getType()->isPointerType() &&
+ "thread id variable must be of type kmp_int32 for tasks");
+ auto *CS = cast<CapturedStmt>(D.getAssociatedStmt());
+ CodeGenFunction CGF(CGM, true);
+ CGOpenMPTaskOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen,
+ InnermostKind,
+ cast<OMPTaskDirective>(D).hasCancel());
+ CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
+ return CGF.GenerateCapturedStmtFunction(*CS);
+}
+
+Address CGOpenMPRuntime::getOrCreateDefaultLocation(OpenMPLocationFlags Flags) {
+ CharUnits Align = getIdentAlign(CGM);
+ llvm::Value *Entry = OpenMPDefaultLocMap.lookup(Flags);
+ if (!Entry) {
+ if (!DefaultOpenMPPSource) {
+ // Initialize default location for psource field of ident_t structure of
+ // all ident_t objects. Format is ";file;function;line;column;;".
+ // Taken from
+ // http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp_str.c
+ DefaultOpenMPPSource =
+ CGM.GetAddrOfConstantCString(";unknown;unknown;0;0;;").getPointer();
+ DefaultOpenMPPSource =
+ llvm::ConstantExpr::getBitCast(DefaultOpenMPPSource, CGM.Int8PtrTy);
+ }
+ auto DefaultOpenMPLocation = new llvm::GlobalVariable(
+ CGM.getModule(), IdentTy, /*isConstant*/ true,
+ llvm::GlobalValue::PrivateLinkage, /*Initializer*/ nullptr);
+ DefaultOpenMPLocation->setUnnamedAddr(true);
+ DefaultOpenMPLocation->setAlignment(Align.getQuantity());
+
+ llvm::Constant *Zero = llvm::ConstantInt::get(CGM.Int32Ty, 0, true);
+ llvm::Constant *Values[] = {Zero,
+ llvm::ConstantInt::get(CGM.Int32Ty, Flags),
+ Zero, Zero, DefaultOpenMPPSource};
+ llvm::Constant *Init = llvm::ConstantStruct::get(IdentTy, Values);
+ DefaultOpenMPLocation->setInitializer(Init);
+ OpenMPDefaultLocMap[Flags] = Entry = DefaultOpenMPLocation;
+ }
+ return Address(Entry, Align);
+}
+
+llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ OpenMPLocationFlags Flags) {
+ // If no debug info is generated - return global default location.
+ if (CGM.getCodeGenOpts().getDebugInfo() == CodeGenOptions::NoDebugInfo ||
+ Loc.isInvalid())
+ return getOrCreateDefaultLocation(Flags).getPointer();
+
+ assert(CGF.CurFn && "No function in current CodeGenFunction.");
+
+ Address LocValue = Address::invalid();
+ auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
+ if (I != OpenMPLocThreadIDMap.end())
+ LocValue = Address(I->second.DebugLoc, getIdentAlign(CGF.CGM));
+
+ // OpenMPLocThreadIDMap may have null DebugLoc and non-null ThreadID, if
+ // GetOpenMPThreadID was called before this routine.
+ if (!LocValue.isValid()) {
+ // Generate "ident_t .kmpc_loc.addr;"
+ Address AI = CGF.CreateTempAlloca(IdentTy, getIdentAlign(CGF.CGM),
+ ".kmpc_loc.addr");
+ auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
+ Elem.second.DebugLoc = AI.getPointer();
+ LocValue = AI;
+
+ CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
+ CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt);
+ CGF.Builder.CreateMemCpy(LocValue, getOrCreateDefaultLocation(Flags),
+ CGM.getSize(getIdentSize(CGF.CGM)));
+ }
+
+ // char **psource = &.kmpc_loc_<flags>.addr.psource;
+ Address PSource = createIdentFieldGEP(CGF, LocValue, IdentField_PSource);
+
+ auto OMPDebugLoc = OpenMPDebugLocMap.lookup(Loc.getRawEncoding());
+ if (OMPDebugLoc == nullptr) {
+ SmallString<128> Buffer2;
+ llvm::raw_svector_ostream OS2(Buffer2);
+ // Build debug location
+ PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
+ OS2 << ";" << PLoc.getFilename() << ";";
+ if (const FunctionDecl *FD =
+ dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl)) {
+ OS2 << FD->getQualifiedNameAsString();
+ }
+ OS2 << ";" << PLoc.getLine() << ";" << PLoc.getColumn() << ";;";
+ OMPDebugLoc = CGF.Builder.CreateGlobalStringPtr(OS2.str());
+ OpenMPDebugLocMap[Loc.getRawEncoding()] = OMPDebugLoc;
+ }
+ // *psource = ";<File>;<Function>;<Line>;<Column>;;";
+ CGF.Builder.CreateStore(OMPDebugLoc, PSource);
+
+ // Our callers always pass this to a runtime function, so for
+ // convenience, go ahead and return a naked pointer.
+ return LocValue.getPointer();
+}
+
+llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
+ SourceLocation Loc) {
+ assert(CGF.CurFn && "No function in current CodeGenFunction.");
+
+ llvm::Value *ThreadID = nullptr;
+ // Check whether we've already cached a load of the thread id in this
+ // function.
+ auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
+ if (I != OpenMPLocThreadIDMap.end()) {
+ ThreadID = I->second.ThreadID;
+ if (ThreadID != nullptr)
+ return ThreadID;
+ }
+ if (auto OMPRegionInfo =
+ dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
+ if (OMPRegionInfo->getThreadIDVariable()) {
+ // Check if this an outlined function with thread id passed as argument.
+ auto LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF);
+ ThreadID = CGF.EmitLoadOfLValue(LVal, Loc).getScalarVal();
+ // If value loaded in entry block, cache it and use it everywhere in
+ // function.
+ if (CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) {
+ auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
+ Elem.second.ThreadID = ThreadID;
+ }
+ return ThreadID;
+ }
+ }
+
+ // This is not an outlined function region - need to call __kmpc_int32
+ // kmpc_global_thread_num(ident_t *loc).
+ // Generate thread id value and cache this value for use across the
+ // function.
+ CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
+ CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt);
+ ThreadID =
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_global_thread_num),
+ emitUpdateLocation(CGF, Loc));
+ auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
+ Elem.second.ThreadID = ThreadID;
+ return ThreadID;
+}
+
+void CGOpenMPRuntime::functionFinished(CodeGenFunction &CGF) {
+ assert(CGF.CurFn && "No function in current CodeGenFunction.");
+ if (OpenMPLocThreadIDMap.count(CGF.CurFn))
+ OpenMPLocThreadIDMap.erase(CGF.CurFn);
+}
+
+llvm::Type *CGOpenMPRuntime::getIdentTyPointerTy() {
+ return llvm::PointerType::getUnqual(IdentTy);
+}
+
+llvm::Type *CGOpenMPRuntime::getKmpc_MicroPointerTy() {
+ return llvm::PointerType::getUnqual(Kmpc_MicroTy);
+}
+
+llvm::Constant *
+CGOpenMPRuntime::createRuntimeFunction(OpenMPRTLFunction Function) {
+ llvm::Constant *RTLFn = nullptr;
+ switch (Function) {
+ case OMPRTL__kmpc_fork_call: {
+ // Build void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro
+ // microtask, ...);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
+ getKmpc_MicroPointerTy()};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_call");
+ break;
+ }
+ case OMPRTL__kmpc_global_thread_num: {
+ // Build kmp_int32 __kmpc_global_thread_num(ident_t *loc);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_global_thread_num");
+ break;
+ }
+ case OMPRTL__kmpc_threadprivate_cached: {
+ // Build void *__kmpc_threadprivate_cached(ident_t *loc,
+ // kmp_int32 global_tid, void *data, size_t size, void ***cache);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
+ CGM.VoidPtrTy, CGM.SizeTy,
+ CGM.VoidPtrTy->getPointerTo()->getPointerTo()};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_cached");
+ break;
+ }
+ case OMPRTL__kmpc_critical: {
+ // Build void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
+ // kmp_critical_name *crit);
+ llvm::Type *TypeParams[] = {
+ getIdentTyPointerTy(), CGM.Int32Ty,
+ llvm::PointerType::getUnqual(KmpCriticalNameTy)};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical");
+ break;
+ }
+ case OMPRTL__kmpc_critical_with_hint: {
+ // Build void __kmpc_critical_with_hint(ident_t *loc, kmp_int32 global_tid,
+ // kmp_critical_name *crit, uintptr_t hint);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
+ llvm::PointerType::getUnqual(KmpCriticalNameTy),
+ CGM.IntPtrTy};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical_with_hint");
+ break;
+ }
+ case OMPRTL__kmpc_threadprivate_register: {
+ // Build void __kmpc_threadprivate_register(ident_t *, void *data,
+ // kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
+ // typedef void *(*kmpc_ctor)(void *);
+ auto KmpcCtorTy =
+ llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
+ /*isVarArg*/ false)->getPointerTo();
+ // typedef void *(*kmpc_cctor)(void *, void *);
+ llvm::Type *KmpcCopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
+ auto KmpcCopyCtorTy =
+ llvm::FunctionType::get(CGM.VoidPtrTy, KmpcCopyCtorTyArgs,
+ /*isVarArg*/ false)->getPointerTo();
+ // typedef void (*kmpc_dtor)(void *);
+ auto KmpcDtorTy =
+ llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy, /*isVarArg*/ false)
+ ->getPointerTo();
+ llvm::Type *FnTyArgs[] = {getIdentTyPointerTy(), CGM.VoidPtrTy, KmpcCtorTy,
+ KmpcCopyCtorTy, KmpcDtorTy};
+ auto FnTy = llvm::FunctionType::get(CGM.VoidTy, FnTyArgs,
+ /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_register");
+ break;
+ }
+ case OMPRTL__kmpc_end_critical: {
+ // Build void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
+ // kmp_critical_name *crit);
+ llvm::Type *TypeParams[] = {
+ getIdentTyPointerTy(), CGM.Int32Ty,
+ llvm::PointerType::getUnqual(KmpCriticalNameTy)};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_critical");
+ break;
+ }
+ case OMPRTL__kmpc_cancel_barrier: {
+ // Build kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
+ // global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_cancel_barrier");
+ break;
+ }
+ case OMPRTL__kmpc_barrier: {
+ // Build void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier");
+ break;
+ }
+ case OMPRTL__kmpc_for_static_fini: {
+ // Build void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_fini");
+ break;
+ }
+ case OMPRTL__kmpc_push_num_threads: {
+ // Build void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
+ // kmp_int32 num_threads)
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
+ CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_threads");
+ break;
+ }
+ case OMPRTL__kmpc_serialized_parallel: {
+ // Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
+ // global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel");
+ break;
+ }
+ case OMPRTL__kmpc_end_serialized_parallel: {
+ // Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
+ // global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel");
+ break;
+ }
+ case OMPRTL__kmpc_flush: {
+ // Build void __kmpc_flush(ident_t *loc);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_flush");
+ break;
+ }
+ case OMPRTL__kmpc_master: {
+ // Build kmp_int32 __kmpc_master(ident_t *loc, kmp_int32 global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_master");
+ break;
+ }
+ case OMPRTL__kmpc_end_master: {
+ // Build void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_master");
+ break;
+ }
+ case OMPRTL__kmpc_omp_taskyield: {
+ // Build kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
+ // int end_part);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_taskyield");
+ break;
+ }
+ case OMPRTL__kmpc_single: {
+ // Build kmp_int32 __kmpc_single(ident_t *loc, kmp_int32 global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_single");
+ break;
+ }
+ case OMPRTL__kmpc_end_single: {
+ // Build void __kmpc_end_single(ident_t *loc, kmp_int32 global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_single");
+ break;
+ }
+ case OMPRTL__kmpc_omp_task_alloc: {
+ // Build kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
+ // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
+ // kmp_routine_entry_t *task_entry);
+ assert(KmpRoutineEntryPtrTy != nullptr &&
+ "Type kmp_routine_entry_t must be created.");
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
+ CGM.SizeTy, CGM.SizeTy, KmpRoutineEntryPtrTy};
+ // Return void * and then cast to particular kmp_task_t type.
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_alloc");
+ break;
+ }
+ case OMPRTL__kmpc_omp_task: {
+ // Build kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
+ // *new_task);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
+ CGM.VoidPtrTy};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task");
+ break;
+ }
+ case OMPRTL__kmpc_copyprivate: {
+ // Build void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
+ // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
+ // kmp_int32 didit);
+ llvm::Type *CpyTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
+ auto *CpyFnTy =
+ llvm::FunctionType::get(CGM.VoidTy, CpyTypeParams, /*isVarArg=*/false);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.SizeTy,
+ CGM.VoidPtrTy, CpyFnTy->getPointerTo(),
+ CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_copyprivate");
+ break;
+ }
+ case OMPRTL__kmpc_reduce: {
+ // Build kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
+ // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
+ // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
+ llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
+ auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
+ /*isVarArg=*/false);
+ llvm::Type *TypeParams[] = {
+ getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, CGM.SizeTy,
+ CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
+ llvm::PointerType::getUnqual(KmpCriticalNameTy)};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce");
+ break;
+ }
+ case OMPRTL__kmpc_reduce_nowait: {
+ // Build kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
+ // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
+ // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
+ // *lck);
+ llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
+ auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
+ /*isVarArg=*/false);
+ llvm::Type *TypeParams[] = {
+ getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, CGM.SizeTy,
+ CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
+ llvm::PointerType::getUnqual(KmpCriticalNameTy)};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce_nowait");
+ break;
+ }
+ case OMPRTL__kmpc_end_reduce: {
+ // Build void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
+ // kmp_critical_name *lck);
+ llvm::Type *TypeParams[] = {
+ getIdentTyPointerTy(), CGM.Int32Ty,
+ llvm::PointerType::getUnqual(KmpCriticalNameTy)};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce");
+ break;
+ }
+ case OMPRTL__kmpc_end_reduce_nowait: {
+ // Build __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
+ // kmp_critical_name *lck);
+ llvm::Type *TypeParams[] = {
+ getIdentTyPointerTy(), CGM.Int32Ty,
+ llvm::PointerType::getUnqual(KmpCriticalNameTy)};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
+ RTLFn =
+ CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce_nowait");
+ break;
+ }
+ case OMPRTL__kmpc_omp_task_begin_if0: {
+ // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
+ // *new_task);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
+ CGM.VoidPtrTy};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
+ RTLFn =
+ CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_begin_if0");
+ break;
+ }
+ case OMPRTL__kmpc_omp_task_complete_if0: {
+ // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
+ // *new_task);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
+ CGM.VoidPtrTy};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy,
+ /*Name=*/"__kmpc_omp_task_complete_if0");
+ break;
+ }
+ case OMPRTL__kmpc_ordered: {
+ // Build void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_ordered");
+ break;
+ }
+ case OMPRTL__kmpc_end_ordered: {
+ // Build void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_ordered");
+ break;
+ }
+ case OMPRTL__kmpc_omp_taskwait: {
+ // Build kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_omp_taskwait");
+ break;
+ }
+ case OMPRTL__kmpc_taskgroup: {
+ // Build void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_taskgroup");
+ break;
+ }
+ case OMPRTL__kmpc_end_taskgroup: {
+ // Build void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_taskgroup");
+ break;
+ }
+ case OMPRTL__kmpc_push_proc_bind: {
+ // Build void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
+ // int proc_bind)
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_proc_bind");
+ break;
+ }
+ case OMPRTL__kmpc_omp_task_with_deps: {
+ // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
+ // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
+ // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
+ llvm::Type *TypeParams[] = {
+ getIdentTyPointerTy(), CGM.Int32Ty, CGM.VoidPtrTy, CGM.Int32Ty,
+ CGM.VoidPtrTy, CGM.Int32Ty, CGM.VoidPtrTy};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
+ RTLFn =
+ CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_with_deps");
+ break;
+ }
+ case OMPRTL__kmpc_omp_wait_deps: {
+ // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
+ // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
+ // kmp_depend_info_t *noalias_dep_list);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
+ CGM.Int32Ty, CGM.VoidPtrTy,
+ CGM.Int32Ty, CGM.VoidPtrTy};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_wait_deps");
+ break;
+ }
+ case OMPRTL__kmpc_cancellationpoint: {
+ // Build kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
+ // global_tid, kmp_int32 cncl_kind)
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancellationpoint");
+ break;
+ }
+ case OMPRTL__kmpc_cancel: {
+ // Build kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
+ // kmp_int32 cncl_kind)
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancel");
+ break;
+ }
+ case OMPRTL__tgt_target: {
+ // Build int32_t __tgt_target(int32_t device_id, void *host_ptr, int32_t
+ // arg_num, void** args_base, void **args, size_t *arg_sizes, int32_t
+ // *arg_types);
+ llvm::Type *TypeParams[] = {CGM.Int32Ty,
+ CGM.VoidPtrTy,
+ CGM.Int32Ty,
+ CGM.VoidPtrPtrTy,
+ CGM.VoidPtrPtrTy,
+ CGM.SizeTy->getPointerTo(),
+ CGM.Int32Ty->getPointerTo()};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target");
+ break;
+ }
+ case OMPRTL__tgt_register_lib: {
+ // Build void __tgt_register_lib(__tgt_bin_desc *desc);
+ QualType ParamTy =
+ CGM.getContext().getPointerType(getTgtBinaryDescriptorQTy());
+ llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_register_lib");
+ break;
+ }
+ case OMPRTL__tgt_unregister_lib: {
+ // Build void __tgt_unregister_lib(__tgt_bin_desc *desc);
+ QualType ParamTy =
+ CGM.getContext().getPointerType(getTgtBinaryDescriptorQTy());
+ llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_unregister_lib");
+ break;
+ }
+ }
+ return RTLFn;
+}
+
+static llvm::Value *getTypeSize(CodeGenFunction &CGF, QualType Ty) {
+ auto &C = CGF.getContext();
+ llvm::Value *Size = nullptr;
+ auto SizeInChars = C.getTypeSizeInChars(Ty);
+ if (SizeInChars.isZero()) {
+ // getTypeSizeInChars() returns 0 for a VLA.
+ while (auto *VAT = C.getAsVariableArrayType(Ty)) {
+ llvm::Value *ArraySize;
+ std::tie(ArraySize, Ty) = CGF.getVLASize(VAT);
+ Size = Size ? CGF.Builder.CreateNUWMul(Size, ArraySize) : ArraySize;
+ }
+ SizeInChars = C.getTypeSizeInChars(Ty);
+ assert(!SizeInChars.isZero());
+ Size = CGF.Builder.CreateNUWMul(
+ Size, llvm::ConstantInt::get(CGF.SizeTy, SizeInChars.getQuantity()));
+ } else
+ Size = llvm::ConstantInt::get(CGF.SizeTy, SizeInChars.getQuantity());
+ return Size;
+}
+
+llvm::Constant *CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize,
+ bool IVSigned) {
+ assert((IVSize == 32 || IVSize == 64) &&
+ "IV size is not compatible with the omp runtime");
+ auto Name = IVSize == 32 ? (IVSigned ? "__kmpc_for_static_init_4"
+ : "__kmpc_for_static_init_4u")
+ : (IVSigned ? "__kmpc_for_static_init_8"
+ : "__kmpc_for_static_init_8u");
+ auto ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
+ auto PtrTy = llvm::PointerType::getUnqual(ITy);
+ llvm::Type *TypeParams[] = {
+ getIdentTyPointerTy(), // loc
+ CGM.Int32Ty, // tid
+ CGM.Int32Ty, // schedtype
+ llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
+ PtrTy, // p_lower
+ PtrTy, // p_upper
+ PtrTy, // p_stride
+ ITy, // incr
+ ITy // chunk
+ };
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ return CGM.CreateRuntimeFunction(FnTy, Name);
+}
+
+llvm::Constant *CGOpenMPRuntime::createDispatchInitFunction(unsigned IVSize,
+ bool IVSigned) {
+ assert((IVSize == 32 || IVSize == 64) &&
+ "IV size is not compatible with the omp runtime");
+ auto Name =
+ IVSize == 32
+ ? (IVSigned ? "__kmpc_dispatch_init_4" : "__kmpc_dispatch_init_4u")
+ : (IVSigned ? "__kmpc_dispatch_init_8" : "__kmpc_dispatch_init_8u");
+ auto ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
+ llvm::Type *TypeParams[] = { getIdentTyPointerTy(), // loc
+ CGM.Int32Ty, // tid
+ CGM.Int32Ty, // schedtype
+ ITy, // lower
+ ITy, // upper
+ ITy, // stride
+ ITy // chunk
+ };
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ return CGM.CreateRuntimeFunction(FnTy, Name);
+}
+
+llvm::Constant *CGOpenMPRuntime::createDispatchFiniFunction(unsigned IVSize,
+ bool IVSigned) {
+ assert((IVSize == 32 || IVSize == 64) &&
+ "IV size is not compatible with the omp runtime");
+ auto Name =
+ IVSize == 32
+ ? (IVSigned ? "__kmpc_dispatch_fini_4" : "__kmpc_dispatch_fini_4u")
+ : (IVSigned ? "__kmpc_dispatch_fini_8" : "__kmpc_dispatch_fini_8u");
+ llvm::Type *TypeParams[] = {
+ getIdentTyPointerTy(), // loc
+ CGM.Int32Ty, // tid
+ };
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
+ return CGM.CreateRuntimeFunction(FnTy, Name);
+}
+
+llvm::Constant *CGOpenMPRuntime::createDispatchNextFunction(unsigned IVSize,
+ bool IVSigned) {
+ assert((IVSize == 32 || IVSize == 64) &&
+ "IV size is not compatible with the omp runtime");
+ auto Name =
+ IVSize == 32
+ ? (IVSigned ? "__kmpc_dispatch_next_4" : "__kmpc_dispatch_next_4u")
+ : (IVSigned ? "__kmpc_dispatch_next_8" : "__kmpc_dispatch_next_8u");
+ auto ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
+ auto PtrTy = llvm::PointerType::getUnqual(ITy);
+ llvm::Type *TypeParams[] = {
+ getIdentTyPointerTy(), // loc
+ CGM.Int32Ty, // tid
+ llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
+ PtrTy, // p_lower
+ PtrTy, // p_upper
+ PtrTy // p_stride
+ };
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
+ return CGM.CreateRuntimeFunction(FnTy, Name);
+}
+
+llvm::Constant *
+CGOpenMPRuntime::getOrCreateThreadPrivateCache(const VarDecl *VD) {
+ assert(!CGM.getLangOpts().OpenMPUseTLS ||
+ !CGM.getContext().getTargetInfo().isTLSSupported());
+ // Lookup the entry, lazily creating it if necessary.
+ return getOrCreateInternalVariable(CGM.Int8PtrPtrTy,
+ Twine(CGM.getMangledName(VD)) + ".cache.");
+}
+
+Address CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
+ const VarDecl *VD,
+ Address VDAddr,
+ SourceLocation Loc) {
+ if (CGM.getLangOpts().OpenMPUseTLS &&
+ CGM.getContext().getTargetInfo().isTLSSupported())
+ return VDAddr;
+
+ auto VarTy = VDAddr.getElementType();
+ llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
+ CGF.Builder.CreatePointerCast(VDAddr.getPointer(),
+ CGM.Int8PtrTy),
+ CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)),
+ getOrCreateThreadPrivateCache(VD)};
+ return Address(CGF.EmitRuntimeCall(
+ createRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args),
+ VDAddr.getAlignment());
+}
+
+void CGOpenMPRuntime::emitThreadPrivateVarInit(
+ CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor,
+ llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc) {
+ // Call kmp_int32 __kmpc_global_thread_num(&loc) to init OpenMP runtime
+ // library.
+ auto OMPLoc = emitUpdateLocation(CGF, Loc);
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_global_thread_num),
+ OMPLoc);
+ // Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor)
+ // to register constructor/destructor for variable.
+ llvm::Value *Args[] = {OMPLoc,
+ CGF.Builder.CreatePointerCast(VDAddr.getPointer(),
+ CGM.VoidPtrTy),
+ Ctor, CopyCtor, Dtor};
+ CGF.EmitRuntimeCall(
+ createRuntimeFunction(OMPRTL__kmpc_threadprivate_register), Args);
+}
+
+llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
+ const VarDecl *VD, Address VDAddr, SourceLocation Loc,
+ bool PerformInit, CodeGenFunction *CGF) {
+ if (CGM.getLangOpts().OpenMPUseTLS &&
+ CGM.getContext().getTargetInfo().isTLSSupported())
+ return nullptr;
+
+ VD = VD->getDefinition(CGM.getContext());
+ if (VD && ThreadPrivateWithDefinition.count(VD) == 0) {
+ ThreadPrivateWithDefinition.insert(VD);
+ QualType ASTTy = VD->getType();
+
+ llvm::Value *Ctor = nullptr, *CopyCtor = nullptr, *Dtor = nullptr;
+ auto Init = VD->getAnyInitializer();
+ if (CGM.getLangOpts().CPlusPlus && PerformInit) {
+ // Generate function that re-emits the declaration's initializer into the
+ // threadprivate copy of the variable VD
+ CodeGenFunction CtorCGF(CGM);
+ FunctionArgList Args;
+ ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, SourceLocation(),
+ /*Id=*/nullptr, CGM.getContext().VoidPtrTy);
+ Args.push_back(&Dst);
+
+ auto &FI = CGM.getTypes().arrangeFreeFunctionDeclaration(
+ CGM.getContext().VoidPtrTy, Args, FunctionType::ExtInfo(),
+ /*isVariadic=*/false);
+ auto FTy = CGM.getTypes().GetFunctionType(FI);
+ auto Fn = CGM.CreateGlobalInitOrDestructFunction(
+ FTy, ".__kmpc_global_ctor_.", FI, Loc);
+ CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI,
+ Args, SourceLocation());
+ auto ArgVal = CtorCGF.EmitLoadOfScalar(
+ CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
+ CGM.getContext().VoidPtrTy, Dst.getLocation());
+ Address Arg = Address(ArgVal, VDAddr.getAlignment());
+ Arg = CtorCGF.Builder.CreateElementBitCast(Arg,
+ CtorCGF.ConvertTypeForMem(ASTTy));
+ CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(),
+ /*IsInitializer=*/true);
+ ArgVal = CtorCGF.EmitLoadOfScalar(
+ CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
+ CGM.getContext().VoidPtrTy, Dst.getLocation());
+ CtorCGF.Builder.CreateStore(ArgVal, CtorCGF.ReturnValue);
+ CtorCGF.FinishFunction();
+ Ctor = Fn;
+ }
+ if (VD->getType().isDestructedType() != QualType::DK_none) {
+ // Generate function that emits destructor call for the threadprivate copy
+ // of the variable VD
+ CodeGenFunction DtorCGF(CGM);
+ FunctionArgList Args;
+ ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, SourceLocation(),
+ /*Id=*/nullptr, CGM.getContext().VoidPtrTy);
+ Args.push_back(&Dst);
+
+ auto &FI = CGM.getTypes().arrangeFreeFunctionDeclaration(
+ CGM.getContext().VoidTy, Args, FunctionType::ExtInfo(),
+ /*isVariadic=*/false);
+ auto FTy = CGM.getTypes().GetFunctionType(FI);
+ auto Fn = CGM.CreateGlobalInitOrDestructFunction(
+ FTy, ".__kmpc_global_dtor_.", FI, Loc);
+ DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, Args,
+ SourceLocation());
+ auto ArgVal = DtorCGF.EmitLoadOfScalar(
+ DtorCGF.GetAddrOfLocalVar(&Dst),
+ /*Volatile=*/false, CGM.getContext().VoidPtrTy, Dst.getLocation());
+ DtorCGF.emitDestroy(Address(ArgVal, VDAddr.getAlignment()), ASTTy,
+ DtorCGF.getDestroyer(ASTTy.isDestructedType()),
+ DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
+ DtorCGF.FinishFunction();
+ Dtor = Fn;
+ }
+ // Do not emit init function if it is not required.
+ if (!Ctor && !Dtor)
+ return nullptr;
+
+ llvm::Type *CopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
+ auto CopyCtorTy =
+ llvm::FunctionType::get(CGM.VoidPtrTy, CopyCtorTyArgs,
+ /*isVarArg=*/false)->getPointerTo();
+ // Copying constructor for the threadprivate variable.
+ // Must be NULL - reserved by runtime, but currently it requires that this
+ // parameter is always NULL. Otherwise it fires assertion.
+ CopyCtor = llvm::Constant::getNullValue(CopyCtorTy);
+ if (Ctor == nullptr) {
+ auto CtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
+ /*isVarArg=*/false)->getPointerTo();
+ Ctor = llvm::Constant::getNullValue(CtorTy);
+ }
+ if (Dtor == nullptr) {
+ auto DtorTy = llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy,
+ /*isVarArg=*/false)->getPointerTo();
+ Dtor = llvm::Constant::getNullValue(DtorTy);
+ }
+ if (!CGF) {
+ auto InitFunctionTy =
+ llvm::FunctionType::get(CGM.VoidTy, /*isVarArg*/ false);
+ auto InitFunction = CGM.CreateGlobalInitOrDestructFunction(
+ InitFunctionTy, ".__omp_threadprivate_init_.",
+ CGM.getTypes().arrangeNullaryFunction());
+ CodeGenFunction InitCGF(CGM);
+ FunctionArgList ArgList;
+ InitCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, InitFunction,
+ CGM.getTypes().arrangeNullaryFunction(), ArgList,
+ Loc);
+ emitThreadPrivateVarInit(InitCGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
+ InitCGF.FinishFunction();
+ return InitFunction;
+ }
+ emitThreadPrivateVarInit(*CGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
+ }
+ return nullptr;
+}
+
+/// \brief Emits code for OpenMP 'if' clause using specified \a CodeGen
+/// function. Here is the logic:
+/// if (Cond) {
+/// ThenGen();
+/// } else {
+/// ElseGen();
+/// }
+static void emitOMPIfClause(CodeGenFunction &CGF, const Expr *Cond,
+ const RegionCodeGenTy &ThenGen,
+ const RegionCodeGenTy &ElseGen) {
+ CodeGenFunction::LexicalScope ConditionScope(CGF, Cond->getSourceRange());
+
+ // If the condition constant folds and can be elided, try to avoid emitting
+ // the condition and the dead arm of the if/else.
+ bool CondConstant;
+ if (CGF.ConstantFoldsToSimpleInteger(Cond, CondConstant)) {
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ if (CondConstant) {
+ ThenGen(CGF);
+ } else {
+ ElseGen(CGF);
+ }
+ return;
+ }
+
+ // Otherwise, the condition did not fold, or we couldn't elide it. Just
+ // emit the conditional branch.
+ auto ThenBlock = CGF.createBasicBlock("omp_if.then");
+ auto ElseBlock = CGF.createBasicBlock("omp_if.else");
+ auto ContBlock = CGF.createBasicBlock("omp_if.end");
+ CGF.EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, /*TrueCount=*/0);
+
+ // Emit the 'then' code.
+ CGF.EmitBlock(ThenBlock);
+ {
+ CodeGenFunction::RunCleanupsScope ThenScope(CGF);
+ ThenGen(CGF);
+ }
+ CGF.EmitBranch(ContBlock);
+ // Emit the 'else' code if present.
+ {
+ // There is no need to emit line number for unconditional branch.
+ auto NL = ApplyDebugLocation::CreateEmpty(CGF);
+ CGF.EmitBlock(ElseBlock);
+ }
+ {
+ CodeGenFunction::RunCleanupsScope ThenScope(CGF);
+ ElseGen(CGF);
+ }
+ {
+ // There is no need to emit line number for unconditional branch.
+ auto NL = ApplyDebugLocation::CreateEmpty(CGF);
+ CGF.EmitBranch(ContBlock);
+ }
+ // Emit the continuation block for code after the if.
+ CGF.EmitBlock(ContBlock, /*IsFinished=*/true);
+}
+
+void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
+ llvm::Value *OutlinedFn,
+ ArrayRef<llvm::Value *> CapturedVars,
+ const Expr *IfCond) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ auto *RTLoc = emitUpdateLocation(CGF, Loc);
+ auto &&ThenGen = [this, OutlinedFn, CapturedVars,
+ RTLoc](CodeGenFunction &CGF) {
+ // Build call __kmpc_fork_call(loc, n, microtask, var1, .., varn);
+ llvm::Value *Args[] = {
+ RTLoc,
+ CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
+ CGF.Builder.CreateBitCast(OutlinedFn, getKmpc_MicroPointerTy())};
+ llvm::SmallVector<llvm::Value *, 16> RealArgs;
+ RealArgs.append(std::begin(Args), std::end(Args));
+ RealArgs.append(CapturedVars.begin(), CapturedVars.end());
+
+ auto RTLFn = createRuntimeFunction(OMPRTL__kmpc_fork_call);
+ CGF.EmitRuntimeCall(RTLFn, RealArgs);
+ };
+ auto &&ElseGen = [this, OutlinedFn, CapturedVars, RTLoc,
+ Loc](CodeGenFunction &CGF) {
+ auto ThreadID = getThreadID(CGF, Loc);
+ // Build calls:
+ // __kmpc_serialized_parallel(&Loc, GTid);
+ llvm::Value *Args[] = {RTLoc, ThreadID};
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_serialized_parallel),
+ Args);
+
+ // OutlinedFn(&GTid, &zero, CapturedStruct);
+ auto ThreadIDAddr = emitThreadIDAddress(CGF, Loc);
+ Address ZeroAddr =
+ CGF.CreateTempAlloca(CGF.Int32Ty, CharUnits::fromQuantity(4),
+ /*Name*/ ".zero.addr");
+ CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
+ llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
+ OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
+ OutlinedFnArgs.push_back(ZeroAddr.getPointer());
+ OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
+ CGF.EmitCallOrInvoke(OutlinedFn, OutlinedFnArgs);
+
+ // __kmpc_end_serialized_parallel(&Loc, GTid);
+ llvm::Value *EndArgs[] = {emitUpdateLocation(CGF, Loc), ThreadID};
+ CGF.EmitRuntimeCall(
+ createRuntimeFunction(OMPRTL__kmpc_end_serialized_parallel), EndArgs);
+ };
+ if (IfCond) {
+ emitOMPIfClause(CGF, IfCond, ThenGen, ElseGen);
+ } else {
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ ThenGen(CGF);
+ }
+}
+
+// If we're inside an (outlined) parallel region, use the region info's
+// thread-ID variable (it is passed in a first argument of the outlined function
+// as "kmp_int32 *gtid"). Otherwise, if we're not inside parallel region, but in
+// regular serial code region, get thread ID by calling kmp_int32
+// kmpc_global_thread_num(ident_t *loc), stash this thread ID in a temporary and
+// return the address of that temp.
+Address CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
+ SourceLocation Loc) {
+ if (auto OMPRegionInfo =
+ dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
+ if (OMPRegionInfo->getThreadIDVariable())
+ return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress();
+
+ auto ThreadID = getThreadID(CGF, Loc);
+ auto Int32Ty =
+ CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true);
+ auto ThreadIDTemp = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".threadid_temp.");
+ CGF.EmitStoreOfScalar(ThreadID,
+ CGF.MakeAddrLValue(ThreadIDTemp, Int32Ty));
+
+ return ThreadIDTemp;
+}
+
+llvm::Constant *
+CGOpenMPRuntime::getOrCreateInternalVariable(llvm::Type *Ty,
+ const llvm::Twine &Name) {
+ SmallString<256> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ Out << Name;
+ auto RuntimeName = Out.str();
+ auto &Elem = *InternalVars.insert(std::make_pair(RuntimeName, nullptr)).first;
+ if (Elem.second) {
+ assert(Elem.second->getType()->getPointerElementType() == Ty &&
+ "OMP internal variable has different type than requested");
+ return &*Elem.second;
+ }
+
+ return Elem.second = new llvm::GlobalVariable(
+ CGM.getModule(), Ty, /*IsConstant*/ false,
+ llvm::GlobalValue::CommonLinkage, llvm::Constant::getNullValue(Ty),
+ Elem.first());
+}
+
+llvm::Value *CGOpenMPRuntime::getCriticalRegionLock(StringRef CriticalName) {
+ llvm::Twine Name(".gomp_critical_user_", CriticalName);
+ return getOrCreateInternalVariable(KmpCriticalNameTy, Name.concat(".var"));
+}
+
+namespace {
+template <size_t N> class CallEndCleanup final : public EHScopeStack::Cleanup {
+ llvm::Value *Callee;
+ llvm::Value *Args[N];
+
+public:
+ CallEndCleanup(llvm::Value *Callee, ArrayRef<llvm::Value *> CleanupArgs)
+ : Callee(Callee) {
+ assert(CleanupArgs.size() == N);
+ std::copy(CleanupArgs.begin(), CleanupArgs.end(), std::begin(Args));
+ }
+ void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
+ if (!CGF.HaveInsertPoint())
+ return;
+ CGF.EmitRuntimeCall(Callee, Args);
+ }
+};
+} // anonymous namespace
+
+void CGOpenMPRuntime::emitCriticalRegion(CodeGenFunction &CGF,
+ StringRef CriticalName,
+ const RegionCodeGenTy &CriticalOpGen,
+ SourceLocation Loc, const Expr *Hint) {
+ // __kmpc_critical[_with_hint](ident_t *, gtid, Lock[, hint]);
+ // CriticalOpGen();
+ // __kmpc_end_critical(ident_t *, gtid, Lock);
+ // Prepare arguments and build a call to __kmpc_critical
+ if (!CGF.HaveInsertPoint())
+ return;
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
+ getCriticalRegionLock(CriticalName)};
+ if (Hint) {
+ llvm::SmallVector<llvm::Value *, 8> ArgsWithHint(std::begin(Args),
+ std::end(Args));
+ auto *HintVal = CGF.EmitScalarExpr(Hint);
+ ArgsWithHint.push_back(
+ CGF.Builder.CreateIntCast(HintVal, CGM.IntPtrTy, /*isSigned=*/false));
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_critical_with_hint),
+ ArgsWithHint);
+ } else
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_critical), Args);
+ // Build a call to __kmpc_end_critical
+ CGF.EHStack.pushCleanup<CallEndCleanup<std::extent<decltype(Args)>::value>>(
+ NormalAndEHCleanup, createRuntimeFunction(OMPRTL__kmpc_end_critical),
+ llvm::makeArrayRef(Args));
+ emitInlinedDirective(CGF, OMPD_critical, CriticalOpGen);
+}
+
+static void emitIfStmt(CodeGenFunction &CGF, llvm::Value *IfCond,
+ OpenMPDirectiveKind Kind, SourceLocation Loc,
+ const RegionCodeGenTy &BodyOpGen) {
+ llvm::Value *CallBool = CGF.EmitScalarConversion(
+ IfCond,
+ CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true),
+ CGF.getContext().BoolTy, Loc);
+
+ auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
+ auto *ContBlock = CGF.createBasicBlock("omp_if.end");
+ // Generate the branch (If-stmt)
+ CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
+ CGF.EmitBlock(ThenBlock);
+ CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, Kind, BodyOpGen);
+ // Emit the rest of bblocks/branches
+ CGF.EmitBranch(ContBlock);
+ CGF.EmitBlock(ContBlock, true);
+}
+
+void CGOpenMPRuntime::emitMasterRegion(CodeGenFunction &CGF,
+ const RegionCodeGenTy &MasterOpGen,
+ SourceLocation Loc) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ // if(__kmpc_master(ident_t *, gtid)) {
+ // MasterOpGen();
+ // __kmpc_end_master(ident_t *, gtid);
+ // }
+ // Prepare arguments and build a call to __kmpc_master
+ llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
+ auto *IsMaster =
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_master), Args);
+ typedef CallEndCleanup<std::extent<decltype(Args)>::value>
+ MasterCallEndCleanup;
+ emitIfStmt(
+ CGF, IsMaster, OMPD_master, Loc, [&](CodeGenFunction &CGF) -> void {
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ CGF.EHStack.pushCleanup<MasterCallEndCleanup>(
+ NormalAndEHCleanup, createRuntimeFunction(OMPRTL__kmpc_end_master),
+ llvm::makeArrayRef(Args));
+ MasterOpGen(CGF);
+ });
+}
+
+void CGOpenMPRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
+ SourceLocation Loc) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
+ llvm::Value *Args[] = {
+ emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
+ llvm::ConstantInt::get(CGM.IntTy, /*V=*/0, /*isSigned=*/true)};
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_taskyield), Args);
+}
+
+void CGOpenMPRuntime::emitTaskgroupRegion(CodeGenFunction &CGF,
+ const RegionCodeGenTy &TaskgroupOpGen,
+ SourceLocation Loc) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ // __kmpc_taskgroup(ident_t *, gtid);
+ // TaskgroupOpGen();
+ // __kmpc_end_taskgroup(ident_t *, gtid);
+ // Prepare arguments and build a call to __kmpc_taskgroup
+ {
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_taskgroup), Args);
+ // Build a call to __kmpc_end_taskgroup
+ CGF.EHStack.pushCleanup<CallEndCleanup<std::extent<decltype(Args)>::value>>(
+ NormalAndEHCleanup, createRuntimeFunction(OMPRTL__kmpc_end_taskgroup),
+ llvm::makeArrayRef(Args));
+ emitInlinedDirective(CGF, OMPD_taskgroup, TaskgroupOpGen);
+ }
+}
+
+/// Given an array of pointers to variables, project the address of a
+/// given variable.
+static Address emitAddrOfVarFromArray(CodeGenFunction &CGF, Address Array,
+ unsigned Index, const VarDecl *Var) {
+ // Pull out the pointer to the variable.
+ Address PtrAddr =
+ CGF.Builder.CreateConstArrayGEP(Array, Index, CGF.getPointerSize());
+ llvm::Value *Ptr = CGF.Builder.CreateLoad(PtrAddr);
+
+ Address Addr = Address(Ptr, CGF.getContext().getDeclAlign(Var));
+ Addr = CGF.Builder.CreateElementBitCast(
+ Addr, CGF.ConvertTypeForMem(Var->getType()));
+ return Addr;
+}
+
+static llvm::Value *emitCopyprivateCopyFunction(
+ CodeGenModule &CGM, llvm::Type *ArgsType,
+ ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs,
+ ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps) {
+ auto &C = CGM.getContext();
+ // void copy_func(void *LHSArg, void *RHSArg);
+ FunctionArgList Args;
+ ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, SourceLocation(), /*Id=*/nullptr,
+ C.VoidPtrTy);
+ ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, SourceLocation(), /*Id=*/nullptr,
+ C.VoidPtrTy);
+ Args.push_back(&LHSArg);
+ Args.push_back(&RHSArg);
+ FunctionType::ExtInfo EI;
+ auto &CGFI = CGM.getTypes().arrangeFreeFunctionDeclaration(
+ C.VoidTy, Args, EI, /*isVariadic=*/false);
+ auto *Fn = llvm::Function::Create(
+ CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
+ ".omp.copyprivate.copy_func", &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, CGFI);
+ CodeGenFunction CGF(CGM);
+ CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args);
+ // Dest = (void*[n])(LHSArg);
+ // Src = (void*[n])(RHSArg);
+ Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
+ ArgsType), CGF.getPointerAlign());
+ Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
+ ArgsType), CGF.getPointerAlign());
+ // *(Type0*)Dst[0] = *(Type0*)Src[0];
+ // *(Type1*)Dst[1] = *(Type1*)Src[1];
+ // ...
+ // *(Typen*)Dst[n] = *(Typen*)Src[n];
+ for (unsigned I = 0, E = AssignmentOps.size(); I < E; ++I) {
+ auto DestVar = cast<VarDecl>(cast<DeclRefExpr>(DestExprs[I])->getDecl());
+ Address DestAddr = emitAddrOfVarFromArray(CGF, LHS, I, DestVar);
+
+ auto SrcVar = cast<VarDecl>(cast<DeclRefExpr>(SrcExprs[I])->getDecl());
+ Address SrcAddr = emitAddrOfVarFromArray(CGF, RHS, I, SrcVar);
+
+ auto *VD = cast<DeclRefExpr>(CopyprivateVars[I])->getDecl();
+ QualType Type = VD->getType();
+ CGF.EmitOMPCopy(Type, DestAddr, SrcAddr, DestVar, SrcVar, AssignmentOps[I]);
+ }
+ CGF.FinishFunction();
+ return Fn;
+}
+
+void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
+ const RegionCodeGenTy &SingleOpGen,
+ SourceLocation Loc,
+ ArrayRef<const Expr *> CopyprivateVars,
+ ArrayRef<const Expr *> SrcExprs,
+ ArrayRef<const Expr *> DstExprs,
+ ArrayRef<const Expr *> AssignmentOps) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ assert(CopyprivateVars.size() == SrcExprs.size() &&
+ CopyprivateVars.size() == DstExprs.size() &&
+ CopyprivateVars.size() == AssignmentOps.size());
+ auto &C = CGM.getContext();
+ // int32 did_it = 0;
+ // if(__kmpc_single(ident_t *, gtid)) {
+ // SingleOpGen();
+ // __kmpc_end_single(ident_t *, gtid);
+ // did_it = 1;
+ // }
+ // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
+ // <copy_func>, did_it);
+
+ Address DidIt = Address::invalid();
+ if (!CopyprivateVars.empty()) {
+ // int32 did_it = 0;
+ auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
+ DidIt = CGF.CreateMemTemp(KmpInt32Ty, ".omp.copyprivate.did_it");
+ CGF.Builder.CreateStore(CGF.Builder.getInt32(0), DidIt);
+ }
+ // Prepare arguments and build a call to __kmpc_single
+ llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
+ auto *IsSingle =
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_single), Args);
+ typedef CallEndCleanup<std::extent<decltype(Args)>::value>
+ SingleCallEndCleanup;
+ emitIfStmt(
+ CGF, IsSingle, OMPD_single, Loc, [&](CodeGenFunction &CGF) -> void {
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ CGF.EHStack.pushCleanup<SingleCallEndCleanup>(
+ NormalAndEHCleanup, createRuntimeFunction(OMPRTL__kmpc_end_single),
+ llvm::makeArrayRef(Args));
+ SingleOpGen(CGF);
+ if (DidIt.isValid()) {
+ // did_it = 1;
+ CGF.Builder.CreateStore(CGF.Builder.getInt32(1), DidIt);
+ }
+ });
+ // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
+ // <copy_func>, did_it);
+ if (DidIt.isValid()) {
+ llvm::APInt ArraySize(/*unsigned int numBits=*/32, CopyprivateVars.size());
+ auto CopyprivateArrayTy =
+ C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
+ /*IndexTypeQuals=*/0);
+ // Create a list of all private variables for copyprivate.
+ Address CopyprivateList =
+ CGF.CreateMemTemp(CopyprivateArrayTy, ".omp.copyprivate.cpr_list");
+ for (unsigned I = 0, E = CopyprivateVars.size(); I < E; ++I) {
+ Address Elem = CGF.Builder.CreateConstArrayGEP(
+ CopyprivateList, I, CGF.getPointerSize());
+ CGF.Builder.CreateStore(
+ CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.EmitLValue(CopyprivateVars[I]).getPointer(), CGF.VoidPtrTy),
+ Elem);
+ }
+ // Build function that copies private values from single region to all other
+ // threads in the corresponding parallel region.
+ auto *CpyFn = emitCopyprivateCopyFunction(
+ CGM, CGF.ConvertTypeForMem(CopyprivateArrayTy)->getPointerTo(),
+ CopyprivateVars, SrcExprs, DstExprs, AssignmentOps);
+ auto *BufSize = getTypeSize(CGF, CopyprivateArrayTy);
+ Address CL =
+ CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(CopyprivateList,
+ CGF.VoidPtrTy);
+ auto *DidItVal = CGF.Builder.CreateLoad(DidIt);
+ llvm::Value *Args[] = {
+ emitUpdateLocation(CGF, Loc), // ident_t *<loc>
+ getThreadID(CGF, Loc), // i32 <gtid>
+ BufSize, // size_t <buf_size>
+ CL.getPointer(), // void *<copyprivate list>
+ CpyFn, // void (*) (void *, void *) <copy_func>
+ DidItVal // i32 did_it
+ };
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_copyprivate), Args);
+ }
+}
+
+void CGOpenMPRuntime::emitOrderedRegion(CodeGenFunction &CGF,
+ const RegionCodeGenTy &OrderedOpGen,
+ SourceLocation Loc, bool IsThreads) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ // __kmpc_ordered(ident_t *, gtid);
+ // OrderedOpGen();
+ // __kmpc_end_ordered(ident_t *, gtid);
+ // Prepare arguments and build a call to __kmpc_ordered
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ if (IsThreads) {
+ llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_ordered), Args);
+ // Build a call to __kmpc_end_ordered
+ CGF.EHStack.pushCleanup<CallEndCleanup<std::extent<decltype(Args)>::value>>(
+ NormalAndEHCleanup, createRuntimeFunction(OMPRTL__kmpc_end_ordered),
+ llvm::makeArrayRef(Args));
+ }
+ emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
+}
+
+void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPDirectiveKind Kind, bool EmitChecks,
+ bool ForceSimpleCall) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ // Build call __kmpc_cancel_barrier(loc, thread_id);
+ // Build call __kmpc_barrier(loc, thread_id);
+ OpenMPLocationFlags Flags = OMP_IDENT_KMPC;
+ if (Kind == OMPD_for) {
+ Flags =
+ static_cast<OpenMPLocationFlags>(Flags | OMP_IDENT_BARRIER_IMPL_FOR);
+ } else if (Kind == OMPD_sections) {
+ Flags = static_cast<OpenMPLocationFlags>(Flags |
+ OMP_IDENT_BARRIER_IMPL_SECTIONS);
+ } else if (Kind == OMPD_single) {
+ Flags =
+ static_cast<OpenMPLocationFlags>(Flags | OMP_IDENT_BARRIER_IMPL_SINGLE);
+ } else if (Kind == OMPD_barrier) {
+ Flags = static_cast<OpenMPLocationFlags>(Flags | OMP_IDENT_BARRIER_EXPL);
+ } else {
+ Flags = static_cast<OpenMPLocationFlags>(Flags | OMP_IDENT_BARRIER_IMPL);
+ }
+ // Build call __kmpc_cancel_barrier(loc, thread_id) or __kmpc_barrier(loc,
+ // thread_id);
+ auto *OMPRegionInfo =
+ dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo);
+ // Do not emit barrier call in the single directive emitted in some rare cases
+ // for sections directives.
+ if (OMPRegionInfo && OMPRegionInfo->getDirectiveKind() == OMPD_single)
+ return;
+ llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
+ getThreadID(CGF, Loc)};
+ if (OMPRegionInfo) {
+ if (!ForceSimpleCall && OMPRegionInfo->hasCancel()) {
+ auto *Result = CGF.EmitRuntimeCall(
+ createRuntimeFunction(OMPRTL__kmpc_cancel_barrier), Args);
+ if (EmitChecks) {
+ // if (__kmpc_cancel_barrier()) {
+ // exit from construct;
+ // }
+ auto *ExitBB = CGF.createBasicBlock(".cancel.exit");
+ auto *ContBB = CGF.createBasicBlock(".cancel.continue");
+ auto *Cmp = CGF.Builder.CreateIsNotNull(Result);
+ CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
+ CGF.EmitBlock(ExitBB);
+ // exit from construct;
+ auto CancelDestination =
+ CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
+ CGF.EmitBranchThroughCleanup(CancelDestination);
+ CGF.EmitBlock(ContBB, /*IsFinished=*/true);
+ }
+ return;
+ }
+ }
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_barrier), Args);
+}
+
+/// \brief Schedule types for 'omp for' loops (these enumerators are taken from
+/// the enum sched_type in kmp.h).
+enum OpenMPSchedType {
+ /// \brief Lower bound for default (unordered) versions.
+ OMP_sch_lower = 32,
+ OMP_sch_static_chunked = 33,
+ OMP_sch_static = 34,
+ OMP_sch_dynamic_chunked = 35,
+ OMP_sch_guided_chunked = 36,
+ OMP_sch_runtime = 37,
+ OMP_sch_auto = 38,
+ /// \brief Lower bound for 'ordered' versions.
+ OMP_ord_lower = 64,
+ OMP_ord_static_chunked = 65,
+ OMP_ord_static = 66,
+ OMP_ord_dynamic_chunked = 67,
+ OMP_ord_guided_chunked = 68,
+ OMP_ord_runtime = 69,
+ OMP_ord_auto = 70,
+ OMP_sch_default = OMP_sch_static,
+};
+
+/// \brief Map the OpenMP loop schedule to the runtime enumeration.
+static OpenMPSchedType getRuntimeSchedule(OpenMPScheduleClauseKind ScheduleKind,
+ bool Chunked, bool Ordered) {
+ switch (ScheduleKind) {
+ case OMPC_SCHEDULE_static:
+ return Chunked ? (Ordered ? OMP_ord_static_chunked : OMP_sch_static_chunked)
+ : (Ordered ? OMP_ord_static : OMP_sch_static);
+ case OMPC_SCHEDULE_dynamic:
+ return Ordered ? OMP_ord_dynamic_chunked : OMP_sch_dynamic_chunked;
+ case OMPC_SCHEDULE_guided:
+ return Ordered ? OMP_ord_guided_chunked : OMP_sch_guided_chunked;
+ case OMPC_SCHEDULE_runtime:
+ return Ordered ? OMP_ord_runtime : OMP_sch_runtime;
+ case OMPC_SCHEDULE_auto:
+ return Ordered ? OMP_ord_auto : OMP_sch_auto;
+ case OMPC_SCHEDULE_unknown:
+ assert(!Chunked && "chunk was specified but schedule kind not known");
+ return Ordered ? OMP_ord_static : OMP_sch_static;
+ }
+ llvm_unreachable("Unexpected runtime schedule");
+}
+
+bool CGOpenMPRuntime::isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
+ bool Chunked) const {
+ auto Schedule = getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
+ return Schedule == OMP_sch_static;
+}
+
+bool CGOpenMPRuntime::isDynamic(OpenMPScheduleClauseKind ScheduleKind) const {
+ auto Schedule =
+ getRuntimeSchedule(ScheduleKind, /*Chunked=*/false, /*Ordered=*/false);
+ assert(Schedule != OMP_sch_static_chunked && "cannot be chunked here");
+ return Schedule != OMP_sch_static;
+}
+
+void CGOpenMPRuntime::emitForDispatchInit(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ OpenMPScheduleClauseKind ScheduleKind,
+ unsigned IVSize, bool IVSigned,
+ bool Ordered, llvm::Value *UB,
+ llvm::Value *Chunk) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ OpenMPSchedType Schedule =
+ getRuntimeSchedule(ScheduleKind, Chunk != nullptr, Ordered);
+ assert(Ordered ||
+ (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked &&
+ Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked));
+ // Call __kmpc_dispatch_init(
+ // ident_t *loc, kmp_int32 tid, kmp_int32 schedule,
+ // kmp_int[32|64] lower, kmp_int[32|64] upper,
+ // kmp_int[32|64] stride, kmp_int[32|64] chunk);
+
+ // If the Chunk was not specified in the clause - use default value 1.
+ if (Chunk == nullptr)
+ Chunk = CGF.Builder.getIntN(IVSize, 1);
+ llvm::Value *Args[] = {
+ emitUpdateLocation(CGF, Loc, OMP_IDENT_KMPC),
+ getThreadID(CGF, Loc),
+ CGF.Builder.getInt32(Schedule), // Schedule type
+ CGF.Builder.getIntN(IVSize, 0), // Lower
+ UB, // Upper
+ CGF.Builder.getIntN(IVSize, 1), // Stride
+ Chunk // Chunk
+ };
+ CGF.EmitRuntimeCall(createDispatchInitFunction(IVSize, IVSigned), Args);
+}
+
+void CGOpenMPRuntime::emitForStaticInit(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ OpenMPScheduleClauseKind ScheduleKind,
+ unsigned IVSize, bool IVSigned,
+ bool Ordered, Address IL, Address LB,
+ Address UB, Address ST,
+ llvm::Value *Chunk) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ OpenMPSchedType Schedule =
+ getRuntimeSchedule(ScheduleKind, Chunk != nullptr, Ordered);
+ assert(!Ordered);
+ assert(Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked ||
+ Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked);
+
+ // Call __kmpc_for_static_init(
+ // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype,
+ // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower,
+ // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride,
+ // kmp_int[32|64] incr, kmp_int[32|64] chunk);
+ if (Chunk == nullptr) {
+ assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static) &&
+ "expected static non-chunked schedule");
+ // If the Chunk was not specified in the clause - use default value 1.
+ Chunk = CGF.Builder.getIntN(IVSize, 1);
+ } else {
+ assert((Schedule == OMP_sch_static_chunked ||
+ Schedule == OMP_ord_static_chunked) &&
+ "expected static chunked schedule");
+ }
+ llvm::Value *Args[] = {
+ emitUpdateLocation(CGF, Loc, OMP_IDENT_KMPC),
+ getThreadID(CGF, Loc),
+ CGF.Builder.getInt32(Schedule), // Schedule type
+ IL.getPointer(), // &isLastIter
+ LB.getPointer(), // &LB
+ UB.getPointer(), // &UB
+ ST.getPointer(), // &Stride
+ CGF.Builder.getIntN(IVSize, 1), // Incr
+ Chunk // Chunk
+ };
+ CGF.EmitRuntimeCall(createForStaticInitFunction(IVSize, IVSigned), Args);
+}
+
+void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF,
+ SourceLocation Loc) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ // Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid);
+ llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, OMP_IDENT_KMPC),
+ getThreadID(CGF, Loc)};
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_for_static_fini),
+ Args);
+}
+
+void CGOpenMPRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ unsigned IVSize,
+ bool IVSigned) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ // Call __kmpc_for_dynamic_fini_(4|8)[u](ident_t *loc, kmp_int32 tid);
+ llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, OMP_IDENT_KMPC),
+ getThreadID(CGF, Loc)};
+ CGF.EmitRuntimeCall(createDispatchFiniFunction(IVSize, IVSigned), Args);
+}
+
+llvm::Value *CGOpenMPRuntime::emitForNext(CodeGenFunction &CGF,
+ SourceLocation Loc, unsigned IVSize,
+ bool IVSigned, Address IL,
+ Address LB, Address UB,
+ Address ST) {
+ // Call __kmpc_dispatch_next(
+ // ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
+ // kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
+ // kmp_int[32|64] *p_stride);
+ llvm::Value *Args[] = {
+ emitUpdateLocation(CGF, Loc, OMP_IDENT_KMPC), getThreadID(CGF, Loc),
+ IL.getPointer(), // &isLastIter
+ LB.getPointer(), // &Lower
+ UB.getPointer(), // &Upper
+ ST.getPointer() // &Stride
+ };
+ llvm::Value *Call =
+ CGF.EmitRuntimeCall(createDispatchNextFunction(IVSize, IVSigned), Args);
+ return CGF.EmitScalarConversion(
+ Call, CGF.getContext().getIntTypeForBitwidth(32, /* Signed */ true),
+ CGF.getContext().BoolTy, Loc);
+}
+
+void CGOpenMPRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
+ llvm::Value *NumThreads,
+ SourceLocation Loc) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ // Build call __kmpc_push_num_threads(&loc, global_tid, num_threads)
+ llvm::Value *Args[] = {
+ emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
+ CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned*/ true)};
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_num_threads),
+ Args);
+}
+
+void CGOpenMPRuntime::emitProcBindClause(CodeGenFunction &CGF,
+ OpenMPProcBindClauseKind ProcBind,
+ SourceLocation Loc) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ // Constants for proc bind value accepted by the runtime.
+ enum ProcBindTy {
+ ProcBindFalse = 0,
+ ProcBindTrue,
+ ProcBindMaster,
+ ProcBindClose,
+ ProcBindSpread,
+ ProcBindIntel,
+ ProcBindDefault
+ } RuntimeProcBind;
+ switch (ProcBind) {
+ case OMPC_PROC_BIND_master:
+ RuntimeProcBind = ProcBindMaster;
+ break;
+ case OMPC_PROC_BIND_close:
+ RuntimeProcBind = ProcBindClose;
+ break;
+ case OMPC_PROC_BIND_spread:
+ RuntimeProcBind = ProcBindSpread;
+ break;
+ case OMPC_PROC_BIND_unknown:
+ llvm_unreachable("Unsupported proc_bind value.");
+ }
+ // Build call __kmpc_push_proc_bind(&loc, global_tid, proc_bind)
+ llvm::Value *Args[] = {
+ emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
+ llvm::ConstantInt::get(CGM.IntTy, RuntimeProcBind, /*isSigned=*/true)};
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_proc_bind), Args);
+}
+
+void CGOpenMPRuntime::emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *>,
+ SourceLocation Loc) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ // Build call void __kmpc_flush(ident_t *loc)
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_flush),
+ emitUpdateLocation(CGF, Loc));
+}
+
+namespace {
+/// \brief Indexes of fields for type kmp_task_t.
+enum KmpTaskTFields {
+ /// \brief List of shared variables.
+ KmpTaskTShareds,
+ /// \brief Task routine.
+ KmpTaskTRoutine,
+ /// \brief Partition id for the untied tasks.
+ KmpTaskTPartId,
+ /// \brief Function with call of destructors for private variables.
+ KmpTaskTDestructors,
+};
+} // anonymous namespace
+
+bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::empty() const {
+ // FIXME: Add other entries type when they become supported.
+ return OffloadEntriesTargetRegion.empty();
+}
+
+/// \brief Initialize target region entry.
+void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
+ initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
+ StringRef ParentName, unsigned LineNum,
+ unsigned ColNum, unsigned Order) {
+ assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
+ "only required for the device "
+ "code generation.");
+ OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum][ColNum] =
+ OffloadEntryInfoTargetRegion(Order, /*Addr=*/nullptr, /*ID=*/nullptr);
+ ++OffloadingEntriesNum;
+}
+
+void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
+ registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
+ StringRef ParentName, unsigned LineNum,
+ unsigned ColNum, llvm::Constant *Addr,
+ llvm::Constant *ID) {
+ // If we are emitting code for a target, the entry is already initialized,
+ // only has to be registered.
+ if (CGM.getLangOpts().OpenMPIsDevice) {
+ assert(hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum,
+ ColNum) &&
+ "Entry must exist.");
+ auto &Entry = OffloadEntriesTargetRegion[DeviceID][FileID][ParentName]
+ [LineNum][ColNum];
+ assert(Entry.isValid() && "Entry not initialized!");
+ Entry.setAddress(Addr);
+ Entry.setID(ID);
+ return;
+ } else {
+ OffloadEntryInfoTargetRegion Entry(OffloadingEntriesNum++, Addr, ID);
+ OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum][ColNum] =
+ Entry;
+ }
+}
+
+bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::hasTargetRegionEntryInfo(
+ unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum,
+ unsigned ColNum) const {
+ auto PerDevice = OffloadEntriesTargetRegion.find(DeviceID);
+ if (PerDevice == OffloadEntriesTargetRegion.end())
+ return false;
+ auto PerFile = PerDevice->second.find(FileID);
+ if (PerFile == PerDevice->second.end())
+ return false;
+ auto PerParentName = PerFile->second.find(ParentName);
+ if (PerParentName == PerFile->second.end())
+ return false;
+ auto PerLine = PerParentName->second.find(LineNum);
+ if (PerLine == PerParentName->second.end())
+ return false;
+ auto PerColumn = PerLine->second.find(ColNum);
+ if (PerColumn == PerLine->second.end())
+ return false;
+ // Fail if this entry is already registered.
+ if (PerColumn->second.getAddress() || PerColumn->second.getID())
+ return false;
+ return true;
+}
+
+void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::actOnTargetRegionEntriesInfo(
+ const OffloadTargetRegionEntryInfoActTy &Action) {
+ // Scan all target region entries and perform the provided action.
+ for (auto &D : OffloadEntriesTargetRegion)
+ for (auto &F : D.second)
+ for (auto &P : F.second)
+ for (auto &L : P.second)
+ for (auto &C : L.second)
+ Action(D.first, F.first, P.first(), L.first, C.first, C.second);
+}
+
+/// \brief Create a Ctor/Dtor-like function whose body is emitted through
+/// \a Codegen. This is used to emit the two functions that register and
+/// unregister the descriptor of the current compilation unit.
+static llvm::Function *
+createOffloadingBinaryDescriptorFunction(CodeGenModule &CGM, StringRef Name,
+ const RegionCodeGenTy &Codegen) {
+ auto &C = CGM.getContext();
+ FunctionArgList Args;
+ ImplicitParamDecl DummyPtr(C, /*DC=*/nullptr, SourceLocation(),
+ /*Id=*/nullptr, C.VoidPtrTy);
+ Args.push_back(&DummyPtr);
+
+ CodeGenFunction CGF(CGM);
+ GlobalDecl();
+ auto &FI = CGM.getTypes().arrangeFreeFunctionDeclaration(
+ C.VoidTy, Args, FunctionType::ExtInfo(),
+ /*isVariadic=*/false);
+ auto FTy = CGM.getTypes().GetFunctionType(FI);
+ auto *Fn =
+ CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI, SourceLocation());
+ CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FI, Args, SourceLocation());
+ Codegen(CGF);
+ CGF.FinishFunction();
+ return Fn;
+}
+
+llvm::Function *
+CGOpenMPRuntime::createOffloadingBinaryDescriptorRegistration() {
+
+ // If we don't have entries or if we are emitting code for the device, we
+ // don't need to do anything.
+ if (CGM.getLangOpts().OpenMPIsDevice || OffloadEntriesInfoManager.empty())
+ return nullptr;
+
+ auto &M = CGM.getModule();
+ auto &C = CGM.getContext();
+
+ // Get list of devices we care about
+ auto &Devices = CGM.getLangOpts().OMPTargetTriples;
+
+ // We should be creating an offloading descriptor only if there are devices
+ // specified.
+ assert(!Devices.empty() && "No OpenMP offloading devices??");
+
+ // Create the external variables that will point to the begin and end of the
+ // host entries section. These will be defined by the linker.
+ auto *OffloadEntryTy =
+ CGM.getTypes().ConvertTypeForMem(getTgtOffloadEntryQTy());
+ llvm::GlobalVariable *HostEntriesBegin = new llvm::GlobalVariable(
+ M, OffloadEntryTy, /*isConstant=*/true,
+ llvm::GlobalValue::ExternalLinkage, /*Initializer=*/0,
+ ".omp_offloading.entries_begin");
+ llvm::GlobalVariable *HostEntriesEnd = new llvm::GlobalVariable(
+ M, OffloadEntryTy, /*isConstant=*/true,
+ llvm::GlobalValue::ExternalLinkage, /*Initializer=*/0,
+ ".omp_offloading.entries_end");
+
+ // Create all device images
+ llvm::SmallVector<llvm::Constant *, 4> DeviceImagesEntires;
+ auto *DeviceImageTy = cast<llvm::StructType>(
+ CGM.getTypes().ConvertTypeForMem(getTgtDeviceImageQTy()));
+
+ for (unsigned i = 0; i < Devices.size(); ++i) {
+ StringRef T = Devices[i].getTriple();
+ auto *ImgBegin = new llvm::GlobalVariable(
+ M, CGM.Int8Ty, /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage,
+ /*Initializer=*/0, Twine(".omp_offloading.img_start.") + Twine(T));
+ auto *ImgEnd = new llvm::GlobalVariable(
+ M, CGM.Int8Ty, /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage,
+ /*Initializer=*/0, Twine(".omp_offloading.img_end.") + Twine(T));
+
+ llvm::Constant *Dev =
+ llvm::ConstantStruct::get(DeviceImageTy, ImgBegin, ImgEnd,
+ HostEntriesBegin, HostEntriesEnd, nullptr);
+ DeviceImagesEntires.push_back(Dev);
+ }
+
+ // Create device images global array.
+ llvm::ArrayType *DeviceImagesInitTy =
+ llvm::ArrayType::get(DeviceImageTy, DeviceImagesEntires.size());
+ llvm::Constant *DeviceImagesInit =
+ llvm::ConstantArray::get(DeviceImagesInitTy, DeviceImagesEntires);
+
+ llvm::GlobalVariable *DeviceImages = new llvm::GlobalVariable(
+ M, DeviceImagesInitTy, /*isConstant=*/true,
+ llvm::GlobalValue::InternalLinkage, DeviceImagesInit,
+ ".omp_offloading.device_images");
+ DeviceImages->setUnnamedAddr(true);
+
+ // This is a Zero array to be used in the creation of the constant expressions
+ llvm::Constant *Index[] = {llvm::Constant::getNullValue(CGM.Int32Ty),
+ llvm::Constant::getNullValue(CGM.Int32Ty)};
+
+ // Create the target region descriptor.
+ auto *BinaryDescriptorTy = cast<llvm::StructType>(
+ CGM.getTypes().ConvertTypeForMem(getTgtBinaryDescriptorQTy()));
+ llvm::Constant *TargetRegionsDescriptorInit = llvm::ConstantStruct::get(
+ BinaryDescriptorTy, llvm::ConstantInt::get(CGM.Int32Ty, Devices.size()),
+ llvm::ConstantExpr::getGetElementPtr(DeviceImagesInitTy, DeviceImages,
+ Index),
+ HostEntriesBegin, HostEntriesEnd, nullptr);
+
+ auto *Desc = new llvm::GlobalVariable(
+ M, BinaryDescriptorTy, /*isConstant=*/true,
+ llvm::GlobalValue::InternalLinkage, TargetRegionsDescriptorInit,
+ ".omp_offloading.descriptor");
+
+ // Emit code to register or unregister the descriptor at execution
+ // startup or closing, respectively.
+
+ // Create a variable to drive the registration and unregistration of the
+ // descriptor, so we can reuse the logic that emits Ctors and Dtors.
+ auto *IdentInfo = &C.Idents.get(".omp_offloading.reg_unreg_var");
+ ImplicitParamDecl RegUnregVar(C, C.getTranslationUnitDecl(), SourceLocation(),
+ IdentInfo, C.CharTy);
+
+ auto *UnRegFn = createOffloadingBinaryDescriptorFunction(
+ CGM, ".omp_offloading.descriptor_unreg", [&](CodeGenFunction &CGF) {
+ CGF.EmitCallOrInvoke(createRuntimeFunction(OMPRTL__tgt_unregister_lib),
+ Desc);
+ });
+ auto *RegFn = createOffloadingBinaryDescriptorFunction(
+ CGM, ".omp_offloading.descriptor_reg", [&](CodeGenFunction &CGF) {
+ CGF.EmitCallOrInvoke(createRuntimeFunction(OMPRTL__tgt_register_lib),
+ Desc);
+ CGM.getCXXABI().registerGlobalDtor(CGF, RegUnregVar, UnRegFn, Desc);
+ });
+ return RegFn;
+}
+
+void CGOpenMPRuntime::createOffloadEntry(llvm::Constant *Addr, StringRef Name,
+ uint64_t Size) {
+ auto *TgtOffloadEntryType = cast<llvm::StructType>(
+ CGM.getTypes().ConvertTypeForMem(getTgtOffloadEntryQTy()));
+ llvm::LLVMContext &C = CGM.getModule().getContext();
+ llvm::Module &M = CGM.getModule();
+
+ // Make sure the address has the right type.
+ llvm::Constant *AddrPtr = llvm::ConstantExpr::getBitCast(Addr, CGM.VoidPtrTy);
+
+ // Create constant string with the name.
+ llvm::Constant *StrPtrInit = llvm::ConstantDataArray::getString(C, Name);
+
+ llvm::GlobalVariable *Str =
+ new llvm::GlobalVariable(M, StrPtrInit->getType(), /*isConstant=*/true,
+ llvm::GlobalValue::InternalLinkage, StrPtrInit,
+ ".omp_offloading.entry_name");
+ Str->setUnnamedAddr(true);
+ llvm::Constant *StrPtr = llvm::ConstantExpr::getBitCast(Str, CGM.Int8PtrTy);
+
+ // Create the entry struct.
+ llvm::Constant *EntryInit = llvm::ConstantStruct::get(
+ TgtOffloadEntryType, AddrPtr, StrPtr,
+ llvm::ConstantInt::get(CGM.SizeTy, Size), nullptr);
+ llvm::GlobalVariable *Entry = new llvm::GlobalVariable(
+ M, TgtOffloadEntryType, true, llvm::GlobalValue::ExternalLinkage,
+ EntryInit, ".omp_offloading.entry");
+
+ // The entry has to be created in the section the linker expects it to be.
+ Entry->setSection(".omp_offloading.entries");
+ // We can't have any padding between symbols, so we need to have 1-byte
+ // alignment.
+ Entry->setAlignment(1);
+ return;
+}
+
+void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
+ // Emit the offloading entries and metadata so that the device codegen side
+ // can
+ // easily figure out what to emit. The produced metadata looks like this:
+ //
+ // !omp_offload.info = !{!1, ...}
+ //
+ // Right now we only generate metadata for function that contain target
+ // regions.
+
+ // If we do not have entries, we dont need to do anything.
+ if (OffloadEntriesInfoManager.empty())
+ return;
+
+ llvm::Module &M = CGM.getModule();
+ llvm::LLVMContext &C = M.getContext();
+ SmallVector<OffloadEntriesInfoManagerTy::OffloadEntryInfo *, 16>
+ OrderedEntries(OffloadEntriesInfoManager.size());
+
+ // Create the offloading info metadata node.
+ llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("omp_offload.info");
+
+ // Auxiliar methods to create metadata values and strings.
+ auto getMDInt = [&](unsigned v) {
+ return llvm::ConstantAsMetadata::get(
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(C), v));
+ };
+
+ auto getMDString = [&](StringRef v) { return llvm::MDString::get(C, v); };
+
+ // Create function that emits metadata for each target region entry;
+ auto &&TargetRegionMetadataEmitter = [&](
+ unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned Line,
+ unsigned Column,
+ OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion &E) {
+ llvm::SmallVector<llvm::Metadata *, 32> Ops;
+ // Generate metadata for target regions. Each entry of this metadata
+ // contains:
+ // - Entry 0 -> Kind of this type of metadata (0).
+ // - Entry 1 -> Device ID of the file where the entry was identified.
+ // - Entry 2 -> File ID of the file where the entry was identified.
+ // - Entry 3 -> Mangled name of the function where the entry was identified.
+ // - Entry 4 -> Line in the file where the entry was identified.
+ // - Entry 5 -> Column in the file where the entry was identified.
+ // - Entry 6 -> Order the entry was created.
+ // The first element of the metadata node is the kind.
+ Ops.push_back(getMDInt(E.getKind()));
+ Ops.push_back(getMDInt(DeviceID));
+ Ops.push_back(getMDInt(FileID));
+ Ops.push_back(getMDString(ParentName));
+ Ops.push_back(getMDInt(Line));
+ Ops.push_back(getMDInt(Column));
+ Ops.push_back(getMDInt(E.getOrder()));
+
+ // Save this entry in the right position of the ordered entries array.
+ OrderedEntries[E.getOrder()] = &E;
+
+ // Add metadata to the named metadata node.
+ MD->addOperand(llvm::MDNode::get(C, Ops));
+ };
+
+ OffloadEntriesInfoManager.actOnTargetRegionEntriesInfo(
+ TargetRegionMetadataEmitter);
+
+ for (auto *E : OrderedEntries) {
+ assert(E && "All ordered entries must exist!");
+ if (auto *CE =
+ dyn_cast<OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion>(
+ E)) {
+ assert(CE->getID() && CE->getAddress() &&
+ "Entry ID and Addr are invalid!");
+ createOffloadEntry(CE->getID(), CE->getAddress()->getName(), /*Size=*/0);
+ } else
+ llvm_unreachable("Unsupported entry kind.");
+ }
+}
+
+/// \brief Loads all the offload entries information from the host IR
+/// metadata.
+void CGOpenMPRuntime::loadOffloadInfoMetadata() {
+ // If we are in target mode, load the metadata from the host IR. This code has
+ // to match the metadaata creation in createOffloadEntriesAndInfoMetadata().
+
+ if (!CGM.getLangOpts().OpenMPIsDevice)
+ return;
+
+ if (CGM.getLangOpts().OMPHostIRFile.empty())
+ return;
+
+ auto Buf = llvm::MemoryBuffer::getFile(CGM.getLangOpts().OMPHostIRFile);
+ if (Buf.getError())
+ return;
+
+ llvm::LLVMContext C;
+ auto ME = llvm::parseBitcodeFile(Buf.get()->getMemBufferRef(), C);
+
+ if (ME.getError())
+ return;
+
+ llvm::NamedMDNode *MD = ME.get()->getNamedMetadata("omp_offload.info");
+ if (!MD)
+ return;
+
+ for (auto I : MD->operands()) {
+ llvm::MDNode *MN = cast<llvm::MDNode>(I);
+
+ auto getMDInt = [&](unsigned Idx) {
+ llvm::ConstantAsMetadata *V =
+ cast<llvm::ConstantAsMetadata>(MN->getOperand(Idx));
+ return cast<llvm::ConstantInt>(V->getValue())->getZExtValue();
+ };
+
+ auto getMDString = [&](unsigned Idx) {
+ llvm::MDString *V = cast<llvm::MDString>(MN->getOperand(Idx));
+ return V->getString();
+ };
+
+ switch (getMDInt(0)) {
+ default:
+ llvm_unreachable("Unexpected metadata!");
+ break;
+ case OffloadEntriesInfoManagerTy::OffloadEntryInfo::
+ OFFLOAD_ENTRY_INFO_TARGET_REGION:
+ OffloadEntriesInfoManager.initializeTargetRegionEntryInfo(
+ /*DeviceID=*/getMDInt(1), /*FileID=*/getMDInt(2),
+ /*ParentName=*/getMDString(3), /*Line=*/getMDInt(4),
+ /*Column=*/getMDInt(5), /*Order=*/getMDInt(6));
+ break;
+ }
+ }
+}
+
+void CGOpenMPRuntime::emitKmpRoutineEntryT(QualType KmpInt32Ty) {
+ if (!KmpRoutineEntryPtrTy) {
+ // Build typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); type.
+ auto &C = CGM.getContext();
+ QualType KmpRoutineEntryTyArgs[] = {KmpInt32Ty, C.VoidPtrTy};
+ FunctionProtoType::ExtProtoInfo EPI;
+ KmpRoutineEntryPtrQTy = C.getPointerType(
+ C.getFunctionType(KmpInt32Ty, KmpRoutineEntryTyArgs, EPI));
+ KmpRoutineEntryPtrTy = CGM.getTypes().ConvertType(KmpRoutineEntryPtrQTy);
+ }
+}
+
+static FieldDecl *addFieldToRecordDecl(ASTContext &C, DeclContext *DC,
+ QualType FieldTy) {
+ auto *Field = FieldDecl::Create(
+ C, DC, SourceLocation(), SourceLocation(), /*Id=*/nullptr, FieldTy,
+ C.getTrivialTypeSourceInfo(FieldTy, SourceLocation()),
+ /*BW=*/nullptr, /*Mutable=*/false, /*InitStyle=*/ICIS_NoInit);
+ Field->setAccess(AS_public);
+ DC->addDecl(Field);
+ return Field;
+}
+
+QualType CGOpenMPRuntime::getTgtOffloadEntryQTy() {
+
+ // Make sure the type of the entry is already created. This is the type we
+ // have to create:
+ // struct __tgt_offload_entry{
+ // void *addr; // Pointer to the offload entry info.
+ // // (function or global)
+ // char *name; // Name of the function or global.
+ // size_t size; // Size of the entry info (0 if it a function).
+ // };
+ if (TgtOffloadEntryQTy.isNull()) {
+ ASTContext &C = CGM.getContext();
+ auto *RD = C.buildImplicitRecord("__tgt_offload_entry");
+ RD->startDefinition();
+ addFieldToRecordDecl(C, RD, C.VoidPtrTy);
+ addFieldToRecordDecl(C, RD, C.getPointerType(C.CharTy));
+ addFieldToRecordDecl(C, RD, C.getSizeType());
+ RD->completeDefinition();
+ TgtOffloadEntryQTy = C.getRecordType(RD);
+ }
+ return TgtOffloadEntryQTy;
+}
+
+QualType CGOpenMPRuntime::getTgtDeviceImageQTy() {
+ // These are the types we need to build:
+ // struct __tgt_device_image{
+ // void *ImageStart; // Pointer to the target code start.
+ // void *ImageEnd; // Pointer to the target code end.
+ // // We also add the host entries to the device image, as it may be useful
+ // // for the target runtime to have access to that information.
+ // __tgt_offload_entry *EntriesBegin; // Begin of the table with all
+ // // the entries.
+ // __tgt_offload_entry *EntriesEnd; // End of the table with all the
+ // // entries (non inclusive).
+ // };
+ if (TgtDeviceImageQTy.isNull()) {
+ ASTContext &C = CGM.getContext();
+ auto *RD = C.buildImplicitRecord("__tgt_device_image");
+ RD->startDefinition();
+ addFieldToRecordDecl(C, RD, C.VoidPtrTy);
+ addFieldToRecordDecl(C, RD, C.VoidPtrTy);
+ addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
+ addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
+ RD->completeDefinition();
+ TgtDeviceImageQTy = C.getRecordType(RD);
+ }
+ return TgtDeviceImageQTy;
+}
+
+QualType CGOpenMPRuntime::getTgtBinaryDescriptorQTy() {
+ // struct __tgt_bin_desc{
+ // int32_t NumDevices; // Number of devices supported.
+ // __tgt_device_image *DeviceImages; // Arrays of device images
+ // // (one per device).
+ // __tgt_offload_entry *EntriesBegin; // Begin of the table with all the
+ // // entries.
+ // __tgt_offload_entry *EntriesEnd; // End of the table with all the
+ // // entries (non inclusive).
+ // };
+ if (TgtBinaryDescriptorQTy.isNull()) {
+ ASTContext &C = CGM.getContext();
+ auto *RD = C.buildImplicitRecord("__tgt_bin_desc");
+ RD->startDefinition();
+ addFieldToRecordDecl(
+ C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
+ addFieldToRecordDecl(C, RD, C.getPointerType(getTgtDeviceImageQTy()));
+ addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
+ addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
+ RD->completeDefinition();
+ TgtBinaryDescriptorQTy = C.getRecordType(RD);
+ }
+ return TgtBinaryDescriptorQTy;
+}
+
+namespace {
+struct PrivateHelpersTy {
+ PrivateHelpersTy(const VarDecl *Original, const VarDecl *PrivateCopy,
+ const VarDecl *PrivateElemInit)
+ : Original(Original), PrivateCopy(PrivateCopy),
+ PrivateElemInit(PrivateElemInit) {}
+ const VarDecl *Original;
+ const VarDecl *PrivateCopy;
+ const VarDecl *PrivateElemInit;
+};
+typedef std::pair<CharUnits /*Align*/, PrivateHelpersTy> PrivateDataTy;
+} // anonymous namespace
+
+static RecordDecl *
+createPrivatesRecordDecl(CodeGenModule &CGM, ArrayRef<PrivateDataTy> Privates) {
+ if (!Privates.empty()) {
+ auto &C = CGM.getContext();
+ // Build struct .kmp_privates_t. {
+ // /* private vars */
+ // };
+ auto *RD = C.buildImplicitRecord(".kmp_privates.t");
+ RD->startDefinition();
+ for (auto &&Pair : Privates) {
+ auto *VD = Pair.second.Original;
+ auto Type = VD->getType();
+ Type = Type.getNonReferenceType();
+ auto *FD = addFieldToRecordDecl(C, RD, Type);
+ if (VD->hasAttrs()) {
+ for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
+ E(VD->getAttrs().end());
+ I != E; ++I)
+ FD->addAttr(*I);
+ }
+ }
+ RD->completeDefinition();
+ return RD;
+ }
+ return nullptr;
+}
+
+static RecordDecl *
+createKmpTaskTRecordDecl(CodeGenModule &CGM, QualType KmpInt32Ty,
+ QualType KmpRoutineEntryPointerQTy) {
+ auto &C = CGM.getContext();
+ // Build struct kmp_task_t {
+ // void * shareds;
+ // kmp_routine_entry_t routine;
+ // kmp_int32 part_id;
+ // kmp_routine_entry_t destructors;
+ // };
+ auto *RD = C.buildImplicitRecord("kmp_task_t");
+ RD->startDefinition();
+ addFieldToRecordDecl(C, RD, C.VoidPtrTy);
+ addFieldToRecordDecl(C, RD, KmpRoutineEntryPointerQTy);
+ addFieldToRecordDecl(C, RD, KmpInt32Ty);
+ addFieldToRecordDecl(C, RD, KmpRoutineEntryPointerQTy);
+ RD->completeDefinition();
+ return RD;
+}
+
+static RecordDecl *
+createKmpTaskTWithPrivatesRecordDecl(CodeGenModule &CGM, QualType KmpTaskTQTy,
+ ArrayRef<PrivateDataTy> Privates) {
+ auto &C = CGM.getContext();
+ // Build struct kmp_task_t_with_privates {
+ // kmp_task_t task_data;
+ // .kmp_privates_t. privates;
+ // };
+ auto *RD = C.buildImplicitRecord("kmp_task_t_with_privates");
+ RD->startDefinition();
+ addFieldToRecordDecl(C, RD, KmpTaskTQTy);
+ if (auto *PrivateRD = createPrivatesRecordDecl(CGM, Privates)) {
+ addFieldToRecordDecl(C, RD, C.getRecordType(PrivateRD));
+ }
+ RD->completeDefinition();
+ return RD;
+}
+
+/// \brief Emit a proxy function which accepts kmp_task_t as the second
+/// argument.
+/// \code
+/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
+/// TaskFunction(gtid, tt->part_id, &tt->privates, task_privates_map,
+/// tt->shareds);
+/// return 0;
+/// }
+/// \endcode
+static llvm::Value *
+emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
+ QualType KmpInt32Ty, QualType KmpTaskTWithPrivatesPtrQTy,
+ QualType KmpTaskTWithPrivatesQTy, QualType KmpTaskTQTy,
+ QualType SharedsPtrTy, llvm::Value *TaskFunction,
+ llvm::Value *TaskPrivatesMap) {
+ auto &C = CGM.getContext();
+ FunctionArgList Args;
+ ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty);
+ ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc,
+ /*Id=*/nullptr,
+ KmpTaskTWithPrivatesPtrQTy.withRestrict());
+ Args.push_back(&GtidArg);
+ Args.push_back(&TaskTypeArg);
+ FunctionType::ExtInfo Info;
+ auto &TaskEntryFnInfo =
+ CGM.getTypes().arrangeFreeFunctionDeclaration(KmpInt32Ty, Args, Info,
+ /*isVariadic=*/false);
+ auto *TaskEntryTy = CGM.getTypes().GetFunctionType(TaskEntryFnInfo);
+ auto *TaskEntry =
+ llvm::Function::Create(TaskEntryTy, llvm::GlobalValue::InternalLinkage,
+ ".omp_task_entry.", &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(/*D=*/nullptr, TaskEntry, TaskEntryFnInfo);
+ CodeGenFunction CGF(CGM);
+ CGF.disableDebugInfo();
+ CGF.StartFunction(GlobalDecl(), KmpInt32Ty, TaskEntry, TaskEntryFnInfo, Args);
+
+ // TaskFunction(gtid, tt->task_data.part_id, &tt->privates, task_privates_map,
+ // tt->task_data.shareds);
+ auto *GtidParam = CGF.EmitLoadOfScalar(
+ CGF.GetAddrOfLocalVar(&GtidArg), /*Volatile=*/false, KmpInt32Ty, Loc);
+ LValue TDBase = emitLoadOfPointerLValue(
+ CGF, CGF.GetAddrOfLocalVar(&TaskTypeArg), KmpTaskTWithPrivatesPtrQTy);
+ auto *KmpTaskTWithPrivatesQTyRD =
+ cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
+ LValue Base =
+ CGF.EmitLValueForField(TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
+ auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
+ auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
+ auto PartIdLVal = CGF.EmitLValueForField(Base, *PartIdFI);
+ auto *PartidParam = CGF.EmitLoadOfLValue(PartIdLVal, Loc).getScalarVal();
+
+ auto SharedsFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds);
+ auto SharedsLVal = CGF.EmitLValueForField(Base, *SharedsFI);
+ auto *SharedsParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.EmitLoadOfLValue(SharedsLVal, Loc).getScalarVal(),
+ CGF.ConvertTypeForMem(SharedsPtrTy));
+
+ auto PrivatesFI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1);
+ llvm::Value *PrivatesParam;
+ if (PrivatesFI != KmpTaskTWithPrivatesQTyRD->field_end()) {
+ auto PrivatesLVal = CGF.EmitLValueForField(TDBase, *PrivatesFI);
+ PrivatesParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ PrivatesLVal.getPointer(), CGF.VoidPtrTy);
+ } else {
+ PrivatesParam = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
+ }
+
+ llvm::Value *CallArgs[] = {GtidParam, PartidParam, PrivatesParam,
+ TaskPrivatesMap, SharedsParam};
+ CGF.EmitCallOrInvoke(TaskFunction, CallArgs);
+ CGF.EmitStoreThroughLValue(
+ RValue::get(CGF.Builder.getInt32(/*C=*/0)),
+ CGF.MakeAddrLValue(CGF.ReturnValue, KmpInt32Ty));
+ CGF.FinishFunction();
+ return TaskEntry;
+}
+
+static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM,
+ SourceLocation Loc,
+ QualType KmpInt32Ty,
+ QualType KmpTaskTWithPrivatesPtrQTy,
+ QualType KmpTaskTWithPrivatesQTy) {
+ auto &C = CGM.getContext();
+ FunctionArgList Args;
+ ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty);
+ ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc,
+ /*Id=*/nullptr,
+ KmpTaskTWithPrivatesPtrQTy.withRestrict());
+ Args.push_back(&GtidArg);
+ Args.push_back(&TaskTypeArg);
+ FunctionType::ExtInfo Info;
+ auto &DestructorFnInfo =
+ CGM.getTypes().arrangeFreeFunctionDeclaration(KmpInt32Ty, Args, Info,
+ /*isVariadic=*/false);
+ auto *DestructorFnTy = CGM.getTypes().GetFunctionType(DestructorFnInfo);
+ auto *DestructorFn =
+ llvm::Function::Create(DestructorFnTy, llvm::GlobalValue::InternalLinkage,
+ ".omp_task_destructor.", &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(/*D=*/nullptr, DestructorFn,
+ DestructorFnInfo);
+ CodeGenFunction CGF(CGM);
+ CGF.disableDebugInfo();
+ CGF.StartFunction(GlobalDecl(), KmpInt32Ty, DestructorFn, DestructorFnInfo,
+ Args);
+
+ LValue Base = emitLoadOfPointerLValue(
+ CGF, CGF.GetAddrOfLocalVar(&TaskTypeArg), KmpTaskTWithPrivatesPtrQTy);
+ auto *KmpTaskTWithPrivatesQTyRD =
+ cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
+ auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
+ Base = CGF.EmitLValueForField(Base, *FI);
+ for (auto *Field :
+ cast<RecordDecl>(FI->getType()->getAsTagDecl())->fields()) {
+ if (auto DtorKind = Field->getType().isDestructedType()) {
+ auto FieldLValue = CGF.EmitLValueForField(Base, Field);
+ CGF.pushDestroy(DtorKind, FieldLValue.getAddress(), Field->getType());
+ }
+ }
+ CGF.FinishFunction();
+ return DestructorFn;
+}
+
+/// \brief Emit a privates mapping function for correct handling of private and
+/// firstprivate variables.
+/// \code
+/// void .omp_task_privates_map.(const .privates. *noalias privs, <ty1>
+/// **noalias priv1,..., <tyn> **noalias privn) {
+/// *priv1 = &.privates.priv1;
+/// ...;
+/// *privn = &.privates.privn;
+/// }
+/// \endcode
+static llvm::Value *
+emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
+ ArrayRef<const Expr *> PrivateVars,
+ ArrayRef<const Expr *> FirstprivateVars,
+ QualType PrivatesQTy,
+ ArrayRef<PrivateDataTy> Privates) {
+ auto &C = CGM.getContext();
+ FunctionArgList Args;
+ ImplicitParamDecl TaskPrivatesArg(
+ C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.getPointerType(PrivatesQTy).withConst().withRestrict());
+ Args.push_back(&TaskPrivatesArg);
+ llvm::DenseMap<const VarDecl *, unsigned> PrivateVarsPos;
+ unsigned Counter = 1;
+ for (auto *E: PrivateVars) {
+ Args.push_back(ImplicitParamDecl::Create(
+ C, /*DC=*/nullptr, Loc,
+ /*Id=*/nullptr, C.getPointerType(C.getPointerType(E->getType()))
+ .withConst()
+ .withRestrict()));
+ auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
+ PrivateVarsPos[VD] = Counter;
+ ++Counter;
+ }
+ for (auto *E : FirstprivateVars) {
+ Args.push_back(ImplicitParamDecl::Create(
+ C, /*DC=*/nullptr, Loc,
+ /*Id=*/nullptr, C.getPointerType(C.getPointerType(E->getType()))
+ .withConst()
+ .withRestrict()));
+ auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
+ PrivateVarsPos[VD] = Counter;
+ ++Counter;
+ }
+ FunctionType::ExtInfo Info;
+ auto &TaskPrivatesMapFnInfo =
+ CGM.getTypes().arrangeFreeFunctionDeclaration(C.VoidTy, Args, Info,
+ /*isVariadic=*/false);
+ auto *TaskPrivatesMapTy =
+ CGM.getTypes().GetFunctionType(TaskPrivatesMapFnInfo);
+ auto *TaskPrivatesMap = llvm::Function::Create(
+ TaskPrivatesMapTy, llvm::GlobalValue::InternalLinkage,
+ ".omp_task_privates_map.", &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(/*D=*/nullptr, TaskPrivatesMap,
+ TaskPrivatesMapFnInfo);
+ TaskPrivatesMap->addFnAttr(llvm::Attribute::AlwaysInline);
+ CodeGenFunction CGF(CGM);
+ CGF.disableDebugInfo();
+ CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskPrivatesMap,
+ TaskPrivatesMapFnInfo, Args);
+
+ // *privi = &.privates.privi;
+ LValue Base = emitLoadOfPointerLValue(
+ CGF, CGF.GetAddrOfLocalVar(&TaskPrivatesArg), TaskPrivatesArg.getType());
+ auto *PrivatesQTyRD = cast<RecordDecl>(PrivatesQTy->getAsTagDecl());
+ Counter = 0;
+ for (auto *Field : PrivatesQTyRD->fields()) {
+ auto FieldLVal = CGF.EmitLValueForField(Base, Field);
+ auto *VD = Args[PrivateVarsPos[Privates[Counter].second.Original]];
+ auto RefLVal = CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
+ auto RefLoadLVal =
+ emitLoadOfPointerLValue(CGF, RefLVal.getAddress(), RefLVal.getType());
+ CGF.EmitStoreOfScalar(FieldLVal.getPointer(), RefLoadLVal);
+ ++Counter;
+ }
+ CGF.FinishFunction();
+ return TaskPrivatesMap;
+}
+
+static int array_pod_sort_comparator(const PrivateDataTy *P1,
+ const PrivateDataTy *P2) {
+ return P1->first < P2->first ? 1 : (P2->first < P1->first ? -1 : 0);
+}
+
+void CGOpenMPRuntime::emitTaskCall(
+ CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D,
+ bool Tied, llvm::PointerIntPair<llvm::Value *, 1, bool> Final,
+ llvm::Value *TaskFunction, QualType SharedsTy, Address Shareds,
+ const Expr *IfCond, ArrayRef<const Expr *> PrivateVars,
+ ArrayRef<const Expr *> PrivateCopies,
+ ArrayRef<const Expr *> FirstprivateVars,
+ ArrayRef<const Expr *> FirstprivateCopies,
+ ArrayRef<const Expr *> FirstprivateInits,
+ ArrayRef<std::pair<OpenMPDependClauseKind, const Expr *>> Dependences) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ auto &C = CGM.getContext();
+ llvm::SmallVector<PrivateDataTy, 8> Privates;
+ // Aggregate privates and sort them by the alignment.
+ auto I = PrivateCopies.begin();
+ for (auto *E : PrivateVars) {
+ auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
+ Privates.push_back(std::make_pair(
+ C.getDeclAlign(VD),
+ PrivateHelpersTy(VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
+ /*PrivateElemInit=*/nullptr)));
+ ++I;
+ }
+ I = FirstprivateCopies.begin();
+ auto IElemInitRef = FirstprivateInits.begin();
+ for (auto *E : FirstprivateVars) {
+ auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
+ Privates.push_back(std::make_pair(
+ C.getDeclAlign(VD),
+ PrivateHelpersTy(
+ VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
+ cast<VarDecl>(cast<DeclRefExpr>(*IElemInitRef)->getDecl()))));
+ ++I, ++IElemInitRef;
+ }
+ llvm::array_pod_sort(Privates.begin(), Privates.end(),
+ array_pod_sort_comparator);
+ auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
+ // Build type kmp_routine_entry_t (if not built yet).
+ emitKmpRoutineEntryT(KmpInt32Ty);
+ // Build type kmp_task_t (if not built yet).
+ if (KmpTaskTQTy.isNull()) {
+ KmpTaskTQTy = C.getRecordType(
+ createKmpTaskTRecordDecl(CGM, KmpInt32Ty, KmpRoutineEntryPtrQTy));
+ }
+ auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
+ // Build particular struct kmp_task_t for the given task.
+ auto *KmpTaskTWithPrivatesQTyRD =
+ createKmpTaskTWithPrivatesRecordDecl(CGM, KmpTaskTQTy, Privates);
+ auto KmpTaskTWithPrivatesQTy = C.getRecordType(KmpTaskTWithPrivatesQTyRD);
+ QualType KmpTaskTWithPrivatesPtrQTy =
+ C.getPointerType(KmpTaskTWithPrivatesQTy);
+ auto *KmpTaskTWithPrivatesTy = CGF.ConvertType(KmpTaskTWithPrivatesQTy);
+ auto *KmpTaskTWithPrivatesPtrTy = KmpTaskTWithPrivatesTy->getPointerTo();
+ auto *KmpTaskTWithPrivatesTySize = getTypeSize(CGF, KmpTaskTWithPrivatesQTy);
+ QualType SharedsPtrTy = C.getPointerType(SharedsTy);
+
+ // Emit initial values for private copies (if any).
+ llvm::Value *TaskPrivatesMap = nullptr;
+ auto *TaskPrivatesMapTy =
+ std::next(cast<llvm::Function>(TaskFunction)->getArgumentList().begin(),
+ 3)
+ ->getType();
+ if (!Privates.empty()) {
+ auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
+ TaskPrivatesMap = emitTaskPrivateMappingFunction(
+ CGM, Loc, PrivateVars, FirstprivateVars, FI->getType(), Privates);
+ TaskPrivatesMap = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ TaskPrivatesMap, TaskPrivatesMapTy);
+ } else {
+ TaskPrivatesMap = llvm::ConstantPointerNull::get(
+ cast<llvm::PointerType>(TaskPrivatesMapTy));
+ }
+ // Build a proxy function kmp_int32 .omp_task_entry.(kmp_int32 gtid,
+ // kmp_task_t *tt);
+ auto *TaskEntry = emitProxyTaskFunction(
+ CGM, Loc, KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy, KmpTaskTWithPrivatesQTy,
+ KmpTaskTQTy, SharedsPtrTy, TaskFunction, TaskPrivatesMap);
+
+ // Build call kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
+ // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
+ // kmp_routine_entry_t *task_entry);
+ // Task flags. Format is taken from
+ // http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h,
+ // description of kmp_tasking_flags struct.
+ const unsigned TiedFlag = 0x1;
+ const unsigned FinalFlag = 0x2;
+ unsigned Flags = Tied ? TiedFlag : 0;
+ auto *TaskFlags =
+ Final.getPointer()
+ ? CGF.Builder.CreateSelect(Final.getPointer(),
+ CGF.Builder.getInt32(FinalFlag),
+ CGF.Builder.getInt32(/*C=*/0))
+ : CGF.Builder.getInt32(Final.getInt() ? FinalFlag : 0);
+ TaskFlags = CGF.Builder.CreateOr(TaskFlags, CGF.Builder.getInt32(Flags));
+ auto *SharedsSize = CGM.getSize(C.getTypeSizeInChars(SharedsTy));
+ llvm::Value *AllocArgs[] = {emitUpdateLocation(CGF, Loc),
+ getThreadID(CGF, Loc), TaskFlags,
+ KmpTaskTWithPrivatesTySize, SharedsSize,
+ CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ TaskEntry, KmpRoutineEntryPtrTy)};
+ auto *NewTask = CGF.EmitRuntimeCall(
+ createRuntimeFunction(OMPRTL__kmpc_omp_task_alloc), AllocArgs);
+ auto *NewTaskNewTaskTTy = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ NewTask, KmpTaskTWithPrivatesPtrTy);
+ LValue Base = CGF.MakeNaturalAlignAddrLValue(NewTaskNewTaskTTy,
+ KmpTaskTWithPrivatesQTy);
+ LValue TDBase =
+ CGF.EmitLValueForField(Base, *KmpTaskTWithPrivatesQTyRD->field_begin());
+ // Fill the data in the resulting kmp_task_t record.
+ // Copy shareds if there are any.
+ Address KmpTaskSharedsPtr = Address::invalid();
+ if (!SharedsTy->getAsStructureType()->getDecl()->field_empty()) {
+ KmpTaskSharedsPtr =
+ Address(CGF.EmitLoadOfScalar(
+ CGF.EmitLValueForField(
+ TDBase, *std::next(KmpTaskTQTyRD->field_begin(),
+ KmpTaskTShareds)),
+ Loc),
+ CGF.getNaturalTypeAlignment(SharedsTy));
+ CGF.EmitAggregateCopy(KmpTaskSharedsPtr, Shareds, SharedsTy);
+ }
+ // Emit initial values for private copies (if any).
+ bool NeedsCleanup = false;
+ if (!Privates.empty()) {
+ auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
+ auto PrivatesBase = CGF.EmitLValueForField(Base, *FI);
+ FI = cast<RecordDecl>(FI->getType()->getAsTagDecl())->field_begin();
+ LValue SharedsBase;
+ if (!FirstprivateVars.empty()) {
+ SharedsBase = CGF.MakeAddrLValue(
+ CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ KmpTaskSharedsPtr, CGF.ConvertTypeForMem(SharedsPtrTy)),
+ SharedsTy);
+ }
+ CodeGenFunction::CGCapturedStmtInfo CapturesInfo(
+ cast<CapturedStmt>(*D.getAssociatedStmt()));
+ for (auto &&Pair : Privates) {
+ auto *VD = Pair.second.PrivateCopy;
+ auto *Init = VD->getAnyInitializer();
+ LValue PrivateLValue = CGF.EmitLValueForField(PrivatesBase, *FI);
+ if (Init) {
+ if (auto *Elem = Pair.second.PrivateElemInit) {
+ auto *OriginalVD = Pair.second.Original;
+ auto *SharedField = CapturesInfo.lookup(OriginalVD);
+ auto SharedRefLValue =
+ CGF.EmitLValueForField(SharedsBase, SharedField);
+ SharedRefLValue = CGF.MakeAddrLValue(
+ Address(SharedRefLValue.getPointer(), C.getDeclAlign(OriginalVD)),
+ SharedRefLValue.getType(), AlignmentSource::Decl);
+ QualType Type = OriginalVD->getType();
+ if (Type->isArrayType()) {
+ // Initialize firstprivate array.
+ if (!isa<CXXConstructExpr>(Init) ||
+ CGF.isTrivialInitializer(Init)) {
+ // Perform simple memcpy.
+ CGF.EmitAggregateAssign(PrivateLValue.getAddress(),
+ SharedRefLValue.getAddress(), Type);
+ } else {
+ // Initialize firstprivate array using element-by-element
+ // intialization.
+ CGF.EmitOMPAggregateAssign(
+ PrivateLValue.getAddress(), SharedRefLValue.getAddress(),
+ Type, [&CGF, Elem, Init, &CapturesInfo](
+ Address DestElement, Address SrcElement) {
+ // Clean up any temporaries needed by the initialization.
+ CodeGenFunction::OMPPrivateScope InitScope(CGF);
+ InitScope.addPrivate(Elem, [SrcElement]() -> Address {
+ return SrcElement;
+ });
+ (void)InitScope.Privatize();
+ // Emit initialization for single element.
+ CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(
+ CGF, &CapturesInfo);
+ CGF.EmitAnyExprToMem(Init, DestElement,
+ Init->getType().getQualifiers(),
+ /*IsInitializer=*/false);
+ });
+ }
+ } else {
+ CodeGenFunction::OMPPrivateScope InitScope(CGF);
+ InitScope.addPrivate(Elem, [SharedRefLValue]() -> Address {
+ return SharedRefLValue.getAddress();
+ });
+ (void)InitScope.Privatize();
+ CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CapturesInfo);
+ CGF.EmitExprAsInit(Init, VD, PrivateLValue,
+ /*capturedByInit=*/false);
+ }
+ } else {
+ CGF.EmitExprAsInit(Init, VD, PrivateLValue, /*capturedByInit=*/false);
+ }
+ }
+ NeedsCleanup = NeedsCleanup || FI->getType().isDestructedType();
+ ++FI;
+ }
+ }
+ // Provide pointer to function with destructors for privates.
+ llvm::Value *DestructorFn =
+ NeedsCleanup ? emitDestructorsFunction(CGM, Loc, KmpInt32Ty,
+ KmpTaskTWithPrivatesPtrQTy,
+ KmpTaskTWithPrivatesQTy)
+ : llvm::ConstantPointerNull::get(
+ cast<llvm::PointerType>(KmpRoutineEntryPtrTy));
+ LValue Destructor = CGF.EmitLValueForField(
+ TDBase, *std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTDestructors));
+ CGF.EmitStoreOfScalar(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ DestructorFn, KmpRoutineEntryPtrTy),
+ Destructor);
+
+ // Process list of dependences.
+ Address DependenciesArray = Address::invalid();
+ unsigned NumDependencies = Dependences.size();
+ if (NumDependencies) {
+ // Dependence kind for RTL.
+ enum RTLDependenceKindTy { DepIn = 0x01, DepInOut = 0x3 };
+ enum RTLDependInfoFieldsTy { BaseAddr, Len, Flags };
+ RecordDecl *KmpDependInfoRD;
+ QualType FlagsTy =
+ C.getIntTypeForBitwidth(C.getTypeSize(C.BoolTy), /*Signed=*/false);
+ llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
+ if (KmpDependInfoTy.isNull()) {
+ KmpDependInfoRD = C.buildImplicitRecord("kmp_depend_info");
+ KmpDependInfoRD->startDefinition();
+ addFieldToRecordDecl(C, KmpDependInfoRD, C.getIntPtrType());
+ addFieldToRecordDecl(C, KmpDependInfoRD, C.getSizeType());
+ addFieldToRecordDecl(C, KmpDependInfoRD, FlagsTy);
+ KmpDependInfoRD->completeDefinition();
+ KmpDependInfoTy = C.getRecordType(KmpDependInfoRD);
+ } else {
+ KmpDependInfoRD = cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
+ }
+ CharUnits DependencySize = C.getTypeSizeInChars(KmpDependInfoTy);
+ // Define type kmp_depend_info[<Dependences.size()>];
+ QualType KmpDependInfoArrayTy = C.getConstantArrayType(
+ KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies),
+ ArrayType::Normal, /*IndexTypeQuals=*/0);
+ // kmp_depend_info[<Dependences.size()>] deps;
+ DependenciesArray = CGF.CreateMemTemp(KmpDependInfoArrayTy);
+ for (unsigned i = 0; i < NumDependencies; ++i) {
+ const Expr *E = Dependences[i].second;
+ auto Addr = CGF.EmitLValue(E);
+ llvm::Value *Size;
+ QualType Ty = E->getType();
+ if (auto *ASE = dyn_cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts())) {
+ LValue UpAddrLVal =
+ CGF.EmitOMPArraySectionExpr(ASE, /*LowerBound=*/false);
+ llvm::Value *UpAddr =
+ CGF.Builder.CreateConstGEP1_32(UpAddrLVal.getPointer(), /*Idx0=*/1);
+ llvm::Value *LowIntPtr =
+ CGF.Builder.CreatePtrToInt(Addr.getPointer(), CGM.SizeTy);
+ llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGM.SizeTy);
+ Size = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr);
+ } else
+ Size = getTypeSize(CGF, Ty);
+ auto Base = CGF.MakeAddrLValue(
+ CGF.Builder.CreateConstArrayGEP(DependenciesArray, i, DependencySize),
+ KmpDependInfoTy);
+ // deps[i].base_addr = &<Dependences[i].second>;
+ auto BaseAddrLVal = CGF.EmitLValueForField(
+ Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
+ CGF.EmitStoreOfScalar(
+ CGF.Builder.CreatePtrToInt(Addr.getPointer(), CGF.IntPtrTy),
+ BaseAddrLVal);
+ // deps[i].len = sizeof(<Dependences[i].second>);
+ auto LenLVal = CGF.EmitLValueForField(
+ Base, *std::next(KmpDependInfoRD->field_begin(), Len));
+ CGF.EmitStoreOfScalar(Size, LenLVal);
+ // deps[i].flags = <Dependences[i].first>;
+ RTLDependenceKindTy DepKind;
+ switch (Dependences[i].first) {
+ case OMPC_DEPEND_in:
+ DepKind = DepIn;
+ break;
+ // Out and InOut dependencies must use the same code.
+ case OMPC_DEPEND_out:
+ case OMPC_DEPEND_inout:
+ DepKind = DepInOut;
+ break;
+ case OMPC_DEPEND_source:
+ case OMPC_DEPEND_sink:
+ case OMPC_DEPEND_unknown:
+ llvm_unreachable("Unknown task dependence type");
+ }
+ auto FlagsLVal = CGF.EmitLValueForField(
+ Base, *std::next(KmpDependInfoRD->field_begin(), Flags));
+ CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
+ FlagsLVal);
+ }
+ DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.Builder.CreateStructGEP(DependenciesArray, 0, CharUnits::Zero()),
+ CGF.VoidPtrTy);
+ }
+
+ // NOTE: routine and part_id fields are intialized by __kmpc_omp_task_alloc()
+ // libcall.
+ // Build kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
+ // *new_task);
+ // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
+ // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
+ // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list) if dependence
+ // list is not empty
+ auto *ThreadID = getThreadID(CGF, Loc);
+ auto *UpLoc = emitUpdateLocation(CGF, Loc);
+ llvm::Value *TaskArgs[] = { UpLoc, ThreadID, NewTask };
+ llvm::Value *DepTaskArgs[7];
+ if (NumDependencies) {
+ DepTaskArgs[0] = UpLoc;
+ DepTaskArgs[1] = ThreadID;
+ DepTaskArgs[2] = NewTask;
+ DepTaskArgs[3] = CGF.Builder.getInt32(NumDependencies);
+ DepTaskArgs[4] = DependenciesArray.getPointer();
+ DepTaskArgs[5] = CGF.Builder.getInt32(0);
+ DepTaskArgs[6] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
+ }
+ auto &&ThenCodeGen = [this, NumDependencies,
+ &TaskArgs, &DepTaskArgs](CodeGenFunction &CGF) {
+ // TODO: add check for untied tasks.
+ if (NumDependencies) {
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task_with_deps),
+ DepTaskArgs);
+ } else {
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task),
+ TaskArgs);
+ }
+ };
+ typedef CallEndCleanup<std::extent<decltype(TaskArgs)>::value>
+ IfCallEndCleanup;
+
+ llvm::Value *DepWaitTaskArgs[6];
+ if (NumDependencies) {
+ DepWaitTaskArgs[0] = UpLoc;
+ DepWaitTaskArgs[1] = ThreadID;
+ DepWaitTaskArgs[2] = CGF.Builder.getInt32(NumDependencies);
+ DepWaitTaskArgs[3] = DependenciesArray.getPointer();
+ DepWaitTaskArgs[4] = CGF.Builder.getInt32(0);
+ DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
+ }
+ auto &&ElseCodeGen = [this, &TaskArgs, ThreadID, NewTaskNewTaskTTy, TaskEntry,
+ NumDependencies, &DepWaitTaskArgs](CodeGenFunction &CGF) {
+ CodeGenFunction::RunCleanupsScope LocalScope(CGF);
+ // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
+ // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
+ // ndeps_noalias, kmp_depend_info_t *noalias_dep_list); if dependence info
+ // is specified.
+ if (NumDependencies)
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_wait_deps),
+ DepWaitTaskArgs);
+ // Build void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
+ // kmp_task_t *new_task);
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task_begin_if0),
+ TaskArgs);
+ // Build void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
+ // kmp_task_t *new_task);
+ CGF.EHStack.pushCleanup<IfCallEndCleanup>(
+ NormalAndEHCleanup,
+ createRuntimeFunction(OMPRTL__kmpc_omp_task_complete_if0),
+ llvm::makeArrayRef(TaskArgs));
+
+ // Call proxy_task_entry(gtid, new_task);
+ llvm::Value *OutlinedFnArgs[] = {ThreadID, NewTaskNewTaskTTy};
+ CGF.EmitCallOrInvoke(TaskEntry, OutlinedFnArgs);
+ };
+
+ if (IfCond) {
+ emitOMPIfClause(CGF, IfCond, ThenCodeGen, ElseCodeGen);
+ } else {
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ ThenCodeGen(CGF);
+ }
+}
+
+/// \brief Emit reduction operation for each element of array (required for
+/// array sections) LHS op = RHS.
+/// \param Type Type of array.
+/// \param LHSVar Variable on the left side of the reduction operation
+/// (references element of array in original variable).
+/// \param RHSVar Variable on the right side of the reduction operation
+/// (references element of array in original variable).
+/// \param RedOpGen Generator of reduction operation with use of LHSVar and
+/// RHSVar.
+static void EmitOMPAggregateReduction(
+ CodeGenFunction &CGF, QualType Type, const VarDecl *LHSVar,
+ const VarDecl *RHSVar,
+ const llvm::function_ref<void(CodeGenFunction &CGF, const Expr *,
+ const Expr *, const Expr *)> &RedOpGen,
+ const Expr *XExpr = nullptr, const Expr *EExpr = nullptr,
+ const Expr *UpExpr = nullptr) {
+ // Perform element-by-element initialization.
+ QualType ElementTy;
+ Address LHSAddr = CGF.GetAddrOfLocalVar(LHSVar);
+ Address RHSAddr = CGF.GetAddrOfLocalVar(RHSVar);
+
+ // Drill down to the base element type on both arrays.
+ auto ArrayTy = Type->getAsArrayTypeUnsafe();
+ auto NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, LHSAddr);
+
+ auto RHSBegin = RHSAddr.getPointer();
+ auto LHSBegin = LHSAddr.getPointer();
+ // Cast from pointer to array type to pointer to single element.
+ auto LHSEnd = CGF.Builder.CreateGEP(LHSBegin, NumElements);
+ // The basic structure here is a while-do loop.
+ auto BodyBB = CGF.createBasicBlock("omp.arraycpy.body");
+ auto DoneBB = CGF.createBasicBlock("omp.arraycpy.done");
+ auto IsEmpty =
+ CGF.Builder.CreateICmpEQ(LHSBegin, LHSEnd, "omp.arraycpy.isempty");
+ CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
+
+ // Enter the loop body, making that address the current address.
+ auto EntryBB = CGF.Builder.GetInsertBlock();
+ CGF.EmitBlock(BodyBB);
+
+ CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
+
+ llvm::PHINode *RHSElementPHI = CGF.Builder.CreatePHI(
+ RHSBegin->getType(), 2, "omp.arraycpy.srcElementPast");
+ RHSElementPHI->addIncoming(RHSBegin, EntryBB);
+ Address RHSElementCurrent =
+ Address(RHSElementPHI,
+ RHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
+
+ llvm::PHINode *LHSElementPHI = CGF.Builder.CreatePHI(
+ LHSBegin->getType(), 2, "omp.arraycpy.destElementPast");
+ LHSElementPHI->addIncoming(LHSBegin, EntryBB);
+ Address LHSElementCurrent =
+ Address(LHSElementPHI,
+ LHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
+
+ // Emit copy.
+ CodeGenFunction::OMPPrivateScope Scope(CGF);
+ Scope.addPrivate(LHSVar, [=]() -> Address { return LHSElementCurrent; });
+ Scope.addPrivate(RHSVar, [=]() -> Address { return RHSElementCurrent; });
+ Scope.Privatize();
+ RedOpGen(CGF, XExpr, EExpr, UpExpr);
+ Scope.ForceCleanup();
+
+ // Shift the address forward by one element.
+ auto LHSElementNext = CGF.Builder.CreateConstGEP1_32(
+ LHSElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
+ auto RHSElementNext = CGF.Builder.CreateConstGEP1_32(
+ RHSElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element");
+ // Check whether we've reached the end.
+ auto Done =
+ CGF.Builder.CreateICmpEQ(LHSElementNext, LHSEnd, "omp.arraycpy.done");
+ CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
+ LHSElementPHI->addIncoming(LHSElementNext, CGF.Builder.GetInsertBlock());
+ RHSElementPHI->addIncoming(RHSElementNext, CGF.Builder.GetInsertBlock());
+
+ // Done.
+ CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
+}
+
+static llvm::Value *emitReductionFunction(CodeGenModule &CGM,
+ llvm::Type *ArgsType,
+ ArrayRef<const Expr *> Privates,
+ ArrayRef<const Expr *> LHSExprs,
+ ArrayRef<const Expr *> RHSExprs,
+ ArrayRef<const Expr *> ReductionOps) {
+ auto &C = CGM.getContext();
+
+ // void reduction_func(void *LHSArg, void *RHSArg);
+ FunctionArgList Args;
+ ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, SourceLocation(), /*Id=*/nullptr,
+ C.VoidPtrTy);
+ ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, SourceLocation(), /*Id=*/nullptr,
+ C.VoidPtrTy);
+ Args.push_back(&LHSArg);
+ Args.push_back(&RHSArg);
+ FunctionType::ExtInfo EI;
+ auto &CGFI = CGM.getTypes().arrangeFreeFunctionDeclaration(
+ C.VoidTy, Args, EI, /*isVariadic=*/false);
+ auto *Fn = llvm::Function::Create(
+ CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
+ ".omp.reduction.reduction_func", &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, CGFI);
+ CodeGenFunction CGF(CGM);
+ CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args);
+
+ // Dst = (void*[n])(LHSArg);
+ // Src = (void*[n])(RHSArg);
+ Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
+ ArgsType), CGF.getPointerAlign());
+ Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
+ ArgsType), CGF.getPointerAlign());
+
+ // ...
+ // *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
+ // ...
+ CodeGenFunction::OMPPrivateScope Scope(CGF);
+ auto IPriv = Privates.begin();
+ unsigned Idx = 0;
+ for (unsigned I = 0, E = ReductionOps.size(); I < E; ++I, ++IPriv, ++Idx) {
+ auto RHSVar = cast<VarDecl>(cast<DeclRefExpr>(RHSExprs[I])->getDecl());
+ Scope.addPrivate(RHSVar, [&]() -> Address {
+ return emitAddrOfVarFromArray(CGF, RHS, Idx, RHSVar);
+ });
+ auto LHSVar = cast<VarDecl>(cast<DeclRefExpr>(LHSExprs[I])->getDecl());
+ Scope.addPrivate(LHSVar, [&]() -> Address {
+ return emitAddrOfVarFromArray(CGF, LHS, Idx, LHSVar);
+ });
+ QualType PrivTy = (*IPriv)->getType();
+ if (PrivTy->isArrayType()) {
+ // Get array size and emit VLA type.
+ ++Idx;
+ Address Elem =
+ CGF.Builder.CreateConstArrayGEP(LHS, Idx, CGF.getPointerSize());
+ llvm::Value *Ptr = CGF.Builder.CreateLoad(Elem);
+ CodeGenFunction::OpaqueValueMapping OpaqueMap(
+ CGF,
+ cast<OpaqueValueExpr>(
+ CGF.getContext().getAsVariableArrayType(PrivTy)->getSizeExpr()),
+ RValue::get(CGF.Builder.CreatePtrToInt(Ptr, CGF.SizeTy)));
+ CGF.EmitVariablyModifiedType(PrivTy);
+ }
+ }
+ Scope.Privatize();
+ IPriv = Privates.begin();
+ auto ILHS = LHSExprs.begin();
+ auto IRHS = RHSExprs.begin();
+ for (auto *E : ReductionOps) {
+ if ((*IPriv)->getType()->isArrayType()) {
+ // Emit reduction for array section.
+ auto *LHSVar = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
+ auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
+ EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), LHSVar, RHSVar,
+ [=](CodeGenFunction &CGF, const Expr *,
+ const Expr *,
+ const Expr *) { CGF.EmitIgnoredExpr(E); });
+ } else
+ // Emit reduction for array subscript or single variable.
+ CGF.EmitIgnoredExpr(E);
+ ++IPriv, ++ILHS, ++IRHS;
+ }
+ Scope.ForceCleanup();
+ CGF.FinishFunction();
+ return Fn;
+}
+
+void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
+ ArrayRef<const Expr *> Privates,
+ ArrayRef<const Expr *> LHSExprs,
+ ArrayRef<const Expr *> RHSExprs,
+ ArrayRef<const Expr *> ReductionOps,
+ bool WithNowait, bool SimpleReduction) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ // Next code should be emitted for reduction:
+ //
+ // static kmp_critical_name lock = { 0 };
+ //
+ // void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
+ // *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
+ // ...
+ // *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
+ // *(Type<n>-1*)rhs[<n>-1]);
+ // }
+ //
+ // ...
+ // void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
+ // switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
+ // RedList, reduce_func, &<lock>)) {
+ // case 1:
+ // ...
+ // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
+ // ...
+ // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
+ // break;
+ // case 2:
+ // ...
+ // Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
+ // ...
+ // [__kmpc_end_reduce(<loc>, <gtid>, &<lock>);]
+ // break;
+ // default:;
+ // }
+ //
+ // if SimpleReduction is true, only the next code is generated:
+ // ...
+ // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
+ // ...
+
+ auto &C = CGM.getContext();
+
+ if (SimpleReduction) {
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ auto IPriv = Privates.begin();
+ auto ILHS = LHSExprs.begin();
+ auto IRHS = RHSExprs.begin();
+ for (auto *E : ReductionOps) {
+ if ((*IPriv)->getType()->isArrayType()) {
+ auto *LHSVar = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
+ auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
+ EmitOMPAggregateReduction(
+ CGF, (*IPriv)->getType(), LHSVar, RHSVar,
+ [=](CodeGenFunction &CGF, const Expr *, const Expr *,
+ const Expr *) { CGF.EmitIgnoredExpr(E); });
+ } else
+ CGF.EmitIgnoredExpr(E);
+ ++IPriv, ++ILHS, ++IRHS;
+ }
+ return;
+ }
+
+ // 1. Build a list of reduction variables.
+ // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
+ auto Size = RHSExprs.size();
+ for (auto *E : Privates) {
+ if (E->getType()->isArrayType())
+ // Reserve place for array size.
+ ++Size;
+ }
+ llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
+ QualType ReductionArrayTy =
+ C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
+ /*IndexTypeQuals=*/0);
+ Address ReductionList =
+ CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
+ auto IPriv = Privates.begin();
+ unsigned Idx = 0;
+ for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
+ Address Elem =
+ CGF.Builder.CreateConstArrayGEP(ReductionList, Idx, CGF.getPointerSize());
+ CGF.Builder.CreateStore(
+ CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.EmitLValue(RHSExprs[I]).getPointer(), CGF.VoidPtrTy),
+ Elem);
+ if ((*IPriv)->getType()->isArrayType()) {
+ // Store array size.
+ ++Idx;
+ Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx,
+ CGF.getPointerSize());
+ CGF.Builder.CreateStore(
+ CGF.Builder.CreateIntToPtr(
+ CGF.Builder.CreateIntCast(
+ CGF.getVLASize(CGF.getContext().getAsVariableArrayType(
+ (*IPriv)->getType()))
+ .first,
+ CGF.SizeTy, /*isSigned=*/false),
+ CGF.VoidPtrTy),
+ Elem);
+ }
+ }
+
+ // 2. Emit reduce_func().
+ auto *ReductionFn = emitReductionFunction(
+ CGM, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
+ LHSExprs, RHSExprs, ReductionOps);
+
+ // 3. Create static kmp_critical_name lock = { 0 };
+ auto *Lock = getCriticalRegionLock(".reduction");
+
+ // 4. Build res = __kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
+ // RedList, reduce_func, &<lock>);
+ auto *IdentTLoc = emitUpdateLocation(
+ CGF, Loc,
+ static_cast<OpenMPLocationFlags>(OMP_IDENT_KMPC | OMP_ATOMIC_REDUCE));
+ auto *ThreadId = getThreadID(CGF, Loc);
+ auto *ReductionArrayTySize = getTypeSize(CGF, ReductionArrayTy);
+ auto *RL =
+ CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(ReductionList.getPointer(),
+ CGF.VoidPtrTy);
+ llvm::Value *Args[] = {
+ IdentTLoc, // ident_t *<loc>
+ ThreadId, // i32 <gtid>
+ CGF.Builder.getInt32(RHSExprs.size()), // i32 <n>
+ ReductionArrayTySize, // size_type sizeof(RedList)
+ RL, // void *RedList
+ ReductionFn, // void (*) (void *, void *) <reduce_func>
+ Lock // kmp_critical_name *&<lock>
+ };
+ auto Res = CGF.EmitRuntimeCall(
+ createRuntimeFunction(WithNowait ? OMPRTL__kmpc_reduce_nowait
+ : OMPRTL__kmpc_reduce),
+ Args);
+
+ // 5. Build switch(res)
+ auto *DefaultBB = CGF.createBasicBlock(".omp.reduction.default");
+ auto *SwInst = CGF.Builder.CreateSwitch(Res, DefaultBB, /*NumCases=*/2);
+
+ // 6. Build case 1:
+ // ...
+ // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
+ // ...
+ // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
+ // break;
+ auto *Case1BB = CGF.createBasicBlock(".omp.reduction.case1");
+ SwInst->addCase(CGF.Builder.getInt32(1), Case1BB);
+ CGF.EmitBlock(Case1BB);
+
+ {
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ // Add emission of __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
+ llvm::Value *EndArgs[] = {
+ IdentTLoc, // ident_t *<loc>
+ ThreadId, // i32 <gtid>
+ Lock // kmp_critical_name *&<lock>
+ };
+ CGF.EHStack
+ .pushCleanup<CallEndCleanup<std::extent<decltype(EndArgs)>::value>>(
+ NormalAndEHCleanup,
+ createRuntimeFunction(WithNowait ? OMPRTL__kmpc_end_reduce_nowait
+ : OMPRTL__kmpc_end_reduce),
+ llvm::makeArrayRef(EndArgs));
+ auto IPriv = Privates.begin();
+ auto ILHS = LHSExprs.begin();
+ auto IRHS = RHSExprs.begin();
+ for (auto *E : ReductionOps) {
+ if ((*IPriv)->getType()->isArrayType()) {
+ // Emit reduction for array section.
+ auto *LHSVar = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
+ auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
+ EmitOMPAggregateReduction(
+ CGF, (*IPriv)->getType(), LHSVar, RHSVar,
+ [=](CodeGenFunction &CGF, const Expr *, const Expr *,
+ const Expr *) { CGF.EmitIgnoredExpr(E); });
+ } else
+ // Emit reduction for array subscript or single variable.
+ CGF.EmitIgnoredExpr(E);
+ ++IPriv, ++ILHS, ++IRHS;
+ }
+ }
+
+ CGF.EmitBranch(DefaultBB);
+
+ // 7. Build case 2:
+ // ...
+ // Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
+ // ...
+ // break;
+ auto *Case2BB = CGF.createBasicBlock(".omp.reduction.case2");
+ SwInst->addCase(CGF.Builder.getInt32(2), Case2BB);
+ CGF.EmitBlock(Case2BB);
+
+ {
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ if (!WithNowait) {
+ // Add emission of __kmpc_end_reduce(<loc>, <gtid>, &<lock>);
+ llvm::Value *EndArgs[] = {
+ IdentTLoc, // ident_t *<loc>
+ ThreadId, // i32 <gtid>
+ Lock // kmp_critical_name *&<lock>
+ };
+ CGF.EHStack
+ .pushCleanup<CallEndCleanup<std::extent<decltype(EndArgs)>::value>>(
+ NormalAndEHCleanup,
+ createRuntimeFunction(OMPRTL__kmpc_end_reduce),
+ llvm::makeArrayRef(EndArgs));
+ }
+ auto ILHS = LHSExprs.begin();
+ auto IRHS = RHSExprs.begin();
+ auto IPriv = Privates.begin();
+ for (auto *E : ReductionOps) {
+ const Expr *XExpr = nullptr;
+ const Expr *EExpr = nullptr;
+ const Expr *UpExpr = nullptr;
+ BinaryOperatorKind BO = BO_Comma;
+ if (auto *BO = dyn_cast<BinaryOperator>(E)) {
+ if (BO->getOpcode() == BO_Assign) {
+ XExpr = BO->getLHS();
+ UpExpr = BO->getRHS();
+ }
+ }
+ // Try to emit update expression as a simple atomic.
+ auto *RHSExpr = UpExpr;
+ if (RHSExpr) {
+ // Analyze RHS part of the whole expression.
+ if (auto *ACO = dyn_cast<AbstractConditionalOperator>(
+ RHSExpr->IgnoreParenImpCasts())) {
+ // If this is a conditional operator, analyze its condition for
+ // min/max reduction operator.
+ RHSExpr = ACO->getCond();
+ }
+ if (auto *BORHS =
+ dyn_cast<BinaryOperator>(RHSExpr->IgnoreParenImpCasts())) {
+ EExpr = BORHS->getRHS();
+ BO = BORHS->getOpcode();
+ }
+ }
+ if (XExpr) {
+ auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
+ auto &&AtomicRedGen = [this, BO, VD, IPriv,
+ Loc](CodeGenFunction &CGF, const Expr *XExpr,
+ const Expr *EExpr, const Expr *UpExpr) {
+ LValue X = CGF.EmitLValue(XExpr);
+ RValue E;
+ if (EExpr)
+ E = CGF.EmitAnyExpr(EExpr);
+ CGF.EmitOMPAtomicSimpleUpdateExpr(
+ X, E, BO, /*IsXLHSInRHSPart=*/true, llvm::Monotonic, Loc,
+ [&CGF, UpExpr, VD, IPriv](RValue XRValue) {
+ CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
+ PrivateScope.addPrivate(VD, [&CGF, VD, XRValue]() -> Address {
+ Address LHSTemp = CGF.CreateMemTemp(VD->getType());
+ CGF.EmitStoreThroughLValue(
+ XRValue, CGF.MakeAddrLValue(LHSTemp, VD->getType()));
+ return LHSTemp;
+ });
+ (void)PrivateScope.Privatize();
+ return CGF.EmitAnyExpr(UpExpr);
+ });
+ };
+ if ((*IPriv)->getType()->isArrayType()) {
+ // Emit atomic reduction for array section.
+ auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
+ EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), VD, RHSVar,
+ AtomicRedGen, XExpr, EExpr, UpExpr);
+ } else
+ // Emit atomic reduction for array subscript or single variable.
+ AtomicRedGen(CGF, XExpr, EExpr, UpExpr);
+ } else {
+ // Emit as a critical region.
+ auto &&CritRedGen = [this, E, Loc](CodeGenFunction &CGF, const Expr *,
+ const Expr *, const Expr *) {
+ emitCriticalRegion(
+ CGF, ".atomic_reduction",
+ [E](CodeGenFunction &CGF) { CGF.EmitIgnoredExpr(E); }, Loc);
+ };
+ if ((*IPriv)->getType()->isArrayType()) {
+ auto *LHSVar = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
+ auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
+ EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), LHSVar, RHSVar,
+ CritRedGen);
+ } else
+ CritRedGen(CGF, nullptr, nullptr, nullptr);
+ }
+ ++ILHS, ++IRHS, ++IPriv;
+ }
+ }
+
+ CGF.EmitBranch(DefaultBB);
+ CGF.EmitBlock(DefaultBB, /*IsFinished=*/true);
+}
+
+void CGOpenMPRuntime::emitTaskwaitCall(CodeGenFunction &CGF,
+ SourceLocation Loc) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
+ // global_tid);
+ llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
+ // Ignore return result until untied tasks are supported.
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_taskwait), Args);
+}
+
+void CGOpenMPRuntime::emitInlinedDirective(CodeGenFunction &CGF,
+ OpenMPDirectiveKind InnerKind,
+ const RegionCodeGenTy &CodeGen,
+ bool HasCancel) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ InlinedOpenMPRegionRAII Region(CGF, CodeGen, InnerKind, HasCancel);
+ CGF.CapturedStmtInfo->EmitBody(CGF, /*S=*/nullptr);
+}
+
+namespace {
+enum RTCancelKind {
+ CancelNoreq = 0,
+ CancelParallel = 1,
+ CancelLoop = 2,
+ CancelSections = 3,
+ CancelTaskgroup = 4
+};
+}
+
+static RTCancelKind getCancellationKind(OpenMPDirectiveKind CancelRegion) {
+ RTCancelKind CancelKind = CancelNoreq;
+ if (CancelRegion == OMPD_parallel)
+ CancelKind = CancelParallel;
+ else if (CancelRegion == OMPD_for)
+ CancelKind = CancelLoop;
+ else if (CancelRegion == OMPD_sections)
+ CancelKind = CancelSections;
+ else {
+ assert(CancelRegion == OMPD_taskgroup);
+ CancelKind = CancelTaskgroup;
+ }
+ return CancelKind;
+}
+
+void CGOpenMPRuntime::emitCancellationPointCall(
+ CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPDirectiveKind CancelRegion) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ // Build call kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
+ // global_tid, kmp_int32 cncl_kind);
+ if (auto *OMPRegionInfo =
+ dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
+ if (OMPRegionInfo->getDirectiveKind() == OMPD_single)
+ return;
+ if (OMPRegionInfo->hasCancel()) {
+ llvm::Value *Args[] = {
+ emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
+ CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
+ // Ignore return result until untied tasks are supported.
+ auto *Result = CGF.EmitRuntimeCall(
+ createRuntimeFunction(OMPRTL__kmpc_cancellationpoint), Args);
+ // if (__kmpc_cancellationpoint()) {
+ // __kmpc_cancel_barrier();
+ // exit from construct;
+ // }
+ auto *ExitBB = CGF.createBasicBlock(".cancel.exit");
+ auto *ContBB = CGF.createBasicBlock(".cancel.continue");
+ auto *Cmp = CGF.Builder.CreateIsNotNull(Result);
+ CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
+ CGF.EmitBlock(ExitBB);
+ // __kmpc_cancel_barrier();
+ emitBarrierCall(CGF, Loc, OMPD_unknown, /*EmitChecks=*/false);
+ // exit from construct;
+ auto CancelDest =
+ CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
+ CGF.EmitBranchThroughCleanup(CancelDest);
+ CGF.EmitBlock(ContBB, /*IsFinished=*/true);
+ }
+ }
+}
+
+void CGOpenMPRuntime::emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
+ const Expr *IfCond,
+ OpenMPDirectiveKind CancelRegion) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ // Build call kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
+ // kmp_int32 cncl_kind);
+ if (auto *OMPRegionInfo =
+ dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
+ if (OMPRegionInfo->getDirectiveKind() == OMPD_single)
+ return;
+ auto &&ThenGen = [this, Loc, CancelRegion,
+ OMPRegionInfo](CodeGenFunction &CGF) {
+ llvm::Value *Args[] = {
+ emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
+ CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
+ // Ignore return result until untied tasks are supported.
+ auto *Result =
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_cancel), Args);
+ // if (__kmpc_cancel()) {
+ // __kmpc_cancel_barrier();
+ // exit from construct;
+ // }
+ auto *ExitBB = CGF.createBasicBlock(".cancel.exit");
+ auto *ContBB = CGF.createBasicBlock(".cancel.continue");
+ auto *Cmp = CGF.Builder.CreateIsNotNull(Result);
+ CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
+ CGF.EmitBlock(ExitBB);
+ // __kmpc_cancel_barrier();
+ emitBarrierCall(CGF, Loc, OMPD_unknown, /*EmitChecks=*/false);
+ // exit from construct;
+ auto CancelDest =
+ CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
+ CGF.EmitBranchThroughCleanup(CancelDest);
+ CGF.EmitBlock(ContBB, /*IsFinished=*/true);
+ };
+ if (IfCond)
+ emitOMPIfClause(CGF, IfCond, ThenGen, [](CodeGenFunction &) {});
+ else
+ ThenGen(CGF);
+ }
+}
+
+/// \brief Obtain information that uniquely identifies a target entry. This
+/// consists of the file and device IDs as well as line and column numbers
+/// associated with the relevant entry source location.
+static void getTargetEntryUniqueInfo(ASTContext &C, SourceLocation Loc,
+ unsigned &DeviceID, unsigned &FileID,
+ unsigned &LineNum, unsigned &ColumnNum) {
+
+ auto &SM = C.getSourceManager();
+
+ // The loc should be always valid and have a file ID (the user cannot use
+ // #pragma directives in macros)
+
+ assert(Loc.isValid() && "Source location is expected to be always valid.");
+ assert(Loc.isFileID() && "Source location is expected to refer to a file.");
+
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+ assert(PLoc.isValid() && "Source location is expected to be always valid.");
+
+ llvm::sys::fs::UniqueID ID;
+ if (llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID))
+ llvm_unreachable("Source file with target region no longer exists!");
+
+ DeviceID = ID.getDevice();
+ FileID = ID.getFile();
+ LineNum = PLoc.getLine();
+ ColumnNum = PLoc.getColumn();
+ return;
+}
+
+void CGOpenMPRuntime::emitTargetOutlinedFunction(
+ const OMPExecutableDirective &D, StringRef ParentName,
+ llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
+ bool IsOffloadEntry) {
+
+ assert(!ParentName.empty() && "Invalid target region parent name!");
+
+ const CapturedStmt &CS = *cast<CapturedStmt>(D.getAssociatedStmt());
+
+ // Emit target region as a standalone region.
+ auto &&CodeGen = [&CS](CodeGenFunction &CGF) {
+ CGF.EmitStmt(CS.getCapturedStmt());
+ };
+
+ // Create a unique name for the proxy/entry function that using the source
+ // location information of the current target region. The name will be
+ // something like:
+ //
+ // .omp_offloading.DD_FFFF.PP.lBB.cCC
+ //
+ // where DD_FFFF is an ID unique to the file (device and file IDs), PP is the
+ // mangled name of the function that encloses the target region, BB is the
+ // line number of the target region, and CC is the column number of the target
+ // region.
+
+ unsigned DeviceID;
+ unsigned FileID;
+ unsigned Line;
+ unsigned Column;
+ getTargetEntryUniqueInfo(CGM.getContext(), D.getLocStart(), DeviceID, FileID,
+ Line, Column);
+ SmallString<64> EntryFnName;
+ {
+ llvm::raw_svector_ostream OS(EntryFnName);
+ OS << ".omp_offloading" << llvm::format(".%x", DeviceID)
+ << llvm::format(".%x.", FileID) << ParentName << ".l" << Line << ".c"
+ << Column;
+ }
+
+ CodeGenFunction CGF(CGM, true);
+ CGOpenMPTargetRegionInfo CGInfo(CS, CodeGen, EntryFnName);
+ CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
+
+ OutlinedFn = CGF.GenerateOpenMPCapturedStmtFunction(CS);
+
+ // If this target outline function is not an offload entry, we don't need to
+ // register it.
+ if (!IsOffloadEntry)
+ return;
+
+ // The target region ID is used by the runtime library to identify the current
+ // target region, so it only has to be unique and not necessarily point to
+ // anything. It could be the pointer to the outlined function that implements
+ // the target region, but we aren't using that so that the compiler doesn't
+ // need to keep that, and could therefore inline the host function if proven
+ // worthwhile during optimization. In the other hand, if emitting code for the
+ // device, the ID has to be the function address so that it can retrieved from
+ // the offloading entry and launched by the runtime library. We also mark the
+ // outlined function to have external linkage in case we are emitting code for
+ // the device, because these functions will be entry points to the device.
+
+ if (CGM.getLangOpts().OpenMPIsDevice) {
+ OutlinedFnID = llvm::ConstantExpr::getBitCast(OutlinedFn, CGM.Int8PtrTy);
+ OutlinedFn->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ } else
+ OutlinedFnID = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
+ llvm::GlobalValue::PrivateLinkage,
+ llvm::Constant::getNullValue(CGM.Int8Ty), ".omp_offload.region_id");
+
+ // Register the information for the entry associated with this target region.
+ OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
+ DeviceID, FileID, ParentName, Line, Column, OutlinedFn, OutlinedFnID);
+ return;
+}
+
+void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
+ const OMPExecutableDirective &D,
+ llvm::Value *OutlinedFn,
+ llvm::Value *OutlinedFnID,
+ const Expr *IfCond, const Expr *Device,
+ ArrayRef<llvm::Value *> CapturedVars) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ /// \brief Values for bit flags used to specify the mapping type for
+ /// offloading.
+ enum OpenMPOffloadMappingFlags {
+ /// \brief Allocate memory on the device and move data from host to device.
+ OMP_MAP_TO = 0x01,
+ /// \brief Allocate memory on the device and move data from device to host.
+ OMP_MAP_FROM = 0x02,
+ /// \brief The element passed to the device is a pointer.
+ OMP_MAP_PTR = 0x20,
+ /// \brief Pass the element to the device by value.
+ OMP_MAP_BYCOPY = 0x80,
+ };
+
+ enum OpenMPOffloadingReservedDeviceIDs {
+ /// \brief Device ID if the device was not defined, runtime should get it
+ /// from environment variables in the spec.
+ OMP_DEVICEID_UNDEF = -1,
+ };
+
+ assert(OutlinedFn && "Invalid outlined function!");
+
+ auto &Ctx = CGF.getContext();
+
+ // Fill up the arrays with the all the captured variables.
+ SmallVector<llvm::Value *, 16> BasePointers;
+ SmallVector<llvm::Value *, 16> Pointers;
+ SmallVector<llvm::Value *, 16> Sizes;
+ SmallVector<unsigned, 16> MapTypes;
+
+ bool hasVLACaptures = false;
+
+ const CapturedStmt &CS = *cast<CapturedStmt>(D.getAssociatedStmt());
+ auto RI = CS.getCapturedRecordDecl()->field_begin();
+ // auto II = CS.capture_init_begin();
+ auto CV = CapturedVars.begin();
+ for (CapturedStmt::const_capture_iterator CI = CS.capture_begin(),
+ CE = CS.capture_end();
+ CI != CE; ++CI, ++RI, ++CV) {
+ StringRef Name;
+ QualType Ty;
+ llvm::Value *BasePointer;
+ llvm::Value *Pointer;
+ llvm::Value *Size;
+ unsigned MapType;
+
+ // VLA sizes are passed to the outlined region by copy.
+ if (CI->capturesVariableArrayType()) {
+ BasePointer = Pointer = *CV;
+ Size = getTypeSize(CGF, RI->getType());
+ // Copy to the device as an argument. No need to retrieve it.
+ MapType = OMP_MAP_BYCOPY;
+ hasVLACaptures = true;
+ } else if (CI->capturesThis()) {
+ BasePointer = Pointer = *CV;
+ const PointerType *PtrTy = cast<PointerType>(RI->getType().getTypePtr());
+ Size = getTypeSize(CGF, PtrTy->getPointeeType());
+ // Default map type.
+ MapType = OMP_MAP_TO | OMP_MAP_FROM;
+ } else if (CI->capturesVariableByCopy()) {
+ MapType = OMP_MAP_BYCOPY;
+ if (!RI->getType()->isAnyPointerType()) {
+ // If the field is not a pointer, we need to save the actual value and
+ // load it as a void pointer.
+ auto DstAddr = CGF.CreateMemTemp(
+ Ctx.getUIntPtrType(),
+ Twine(CI->getCapturedVar()->getName()) + ".casted");
+ LValue DstLV = CGF.MakeAddrLValue(DstAddr, Ctx.getUIntPtrType());
+
+ auto *SrcAddrVal = CGF.EmitScalarConversion(
+ DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()),
+ Ctx.getPointerType(RI->getType()), SourceLocation());
+ LValue SrcLV =
+ CGF.MakeNaturalAlignAddrLValue(SrcAddrVal, RI->getType());
+
+ // Store the value using the source type pointer.
+ CGF.EmitStoreThroughLValue(RValue::get(*CV), SrcLV);
+
+ // Load the value using the destination type pointer.
+ BasePointer = Pointer =
+ CGF.EmitLoadOfLValue(DstLV, SourceLocation()).getScalarVal();
+ } else {
+ MapType |= OMP_MAP_PTR;
+ BasePointer = Pointer = *CV;
+ }
+ Size = getTypeSize(CGF, RI->getType());
+ } else {
+ assert(CI->capturesVariable() && "Expected captured reference.");
+ BasePointer = Pointer = *CV;
+
+ const ReferenceType *PtrTy =
+ cast<ReferenceType>(RI->getType().getTypePtr());
+ QualType ElementType = PtrTy->getPointeeType();
+ Size = getTypeSize(CGF, ElementType);
+ // The default map type for a scalar/complex type is 'to' because by
+ // default the value doesn't have to be retrieved. For an aggregate type,
+ // the default is 'tofrom'.
+ MapType = ElementType->isAggregateType() ? (OMP_MAP_TO | OMP_MAP_FROM)
+ : OMP_MAP_TO;
+ if (ElementType->isAnyPointerType())
+ MapType |= OMP_MAP_PTR;
+ }
+
+ BasePointers.push_back(BasePointer);
+ Pointers.push_back(Pointer);
+ Sizes.push_back(Size);
+ MapTypes.push_back(MapType);
+ }
+
+ // Keep track on whether the host function has to be executed.
+ auto OffloadErrorQType =
+ Ctx.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true);
+ auto OffloadError = CGF.MakeAddrLValue(
+ CGF.CreateMemTemp(OffloadErrorQType, ".run_host_version"),
+ OffloadErrorQType);
+ CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.Int32Ty),
+ OffloadError);
+
+ // Fill up the pointer arrays and transfer execution to the device.
+ auto &&ThenGen = [this, &Ctx, &BasePointers, &Pointers, &Sizes, &MapTypes,
+ hasVLACaptures, Device, OutlinedFnID, OffloadError,
+ OffloadErrorQType](CodeGenFunction &CGF) {
+ unsigned PointerNumVal = BasePointers.size();
+ llvm::Value *PointerNum = CGF.Builder.getInt32(PointerNumVal);
+ llvm::Value *BasePointersArray;
+ llvm::Value *PointersArray;
+ llvm::Value *SizesArray;
+ llvm::Value *MapTypesArray;
+
+ if (PointerNumVal) {
+ llvm::APInt PointerNumAP(32, PointerNumVal, /*isSigned=*/true);
+ QualType PointerArrayType = Ctx.getConstantArrayType(
+ Ctx.VoidPtrTy, PointerNumAP, ArrayType::Normal,
+ /*IndexTypeQuals=*/0);
+
+ BasePointersArray =
+ CGF.CreateMemTemp(PointerArrayType, ".offload_baseptrs").getPointer();
+ PointersArray =
+ CGF.CreateMemTemp(PointerArrayType, ".offload_ptrs").getPointer();
+
+ // If we don't have any VLA types, we can use a constant array for the map
+ // sizes, otherwise we need to fill up the arrays as we do for the
+ // pointers.
+ if (hasVLACaptures) {
+ QualType SizeArrayType = Ctx.getConstantArrayType(
+ Ctx.getSizeType(), PointerNumAP, ArrayType::Normal,
+ /*IndexTypeQuals=*/0);
+ SizesArray =
+ CGF.CreateMemTemp(SizeArrayType, ".offload_sizes").getPointer();
+ } else {
+ // We expect all the sizes to be constant, so we collect them to create
+ // a constant array.
+ SmallVector<llvm::Constant *, 16> ConstSizes;
+ for (auto S : Sizes)
+ ConstSizes.push_back(cast<llvm::Constant>(S));
+
+ auto *SizesArrayInit = llvm::ConstantArray::get(
+ llvm::ArrayType::get(CGM.SizeTy, ConstSizes.size()), ConstSizes);
+ auto *SizesArrayGbl = new llvm::GlobalVariable(
+ CGM.getModule(), SizesArrayInit->getType(),
+ /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
+ SizesArrayInit, ".offload_sizes");
+ SizesArrayGbl->setUnnamedAddr(true);
+ SizesArray = SizesArrayGbl;
+ }
+
+ // The map types are always constant so we don't need to generate code to
+ // fill arrays. Instead, we create an array constant.
+ llvm::Constant *MapTypesArrayInit =
+ llvm::ConstantDataArray::get(CGF.Builder.getContext(), MapTypes);
+ auto *MapTypesArrayGbl = new llvm::GlobalVariable(
+ CGM.getModule(), MapTypesArrayInit->getType(),
+ /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
+ MapTypesArrayInit, ".offload_maptypes");
+ MapTypesArrayGbl->setUnnamedAddr(true);
+ MapTypesArray = MapTypesArrayGbl;
+
+ for (unsigned i = 0; i < PointerNumVal; ++i) {
+
+ llvm::Value *BPVal = BasePointers[i];
+ if (BPVal->getType()->isPointerTy())
+ BPVal = CGF.Builder.CreateBitCast(BPVal, CGM.VoidPtrTy);
+ else {
+ assert(BPVal->getType()->isIntegerTy() &&
+ "If not a pointer, the value type must be an integer.");
+ BPVal = CGF.Builder.CreateIntToPtr(BPVal, CGM.VoidPtrTy);
+ }
+ llvm::Value *BP = CGF.Builder.CreateConstInBoundsGEP2_32(
+ llvm::ArrayType::get(CGM.VoidPtrTy, PointerNumVal),
+ BasePointersArray, 0, i);
+ Address BPAddr(BP, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
+ CGF.Builder.CreateStore(BPVal, BPAddr);
+
+ llvm::Value *PVal = Pointers[i];
+ if (PVal->getType()->isPointerTy())
+ PVal = CGF.Builder.CreateBitCast(PVal, CGM.VoidPtrTy);
+ else {
+ assert(PVal->getType()->isIntegerTy() &&
+ "If not a pointer, the value type must be an integer.");
+ PVal = CGF.Builder.CreateIntToPtr(PVal, CGM.VoidPtrTy);
+ }
+ llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32(
+ llvm::ArrayType::get(CGM.VoidPtrTy, PointerNumVal), PointersArray,
+ 0, i);
+ Address PAddr(P, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
+ CGF.Builder.CreateStore(PVal, PAddr);
+
+ if (hasVLACaptures) {
+ llvm::Value *S = CGF.Builder.CreateConstInBoundsGEP2_32(
+ llvm::ArrayType::get(CGM.SizeTy, PointerNumVal), SizesArray,
+ /*Idx0=*/0,
+ /*Idx1=*/i);
+ Address SAddr(S, Ctx.getTypeAlignInChars(Ctx.getSizeType()));
+ CGF.Builder.CreateStore(CGF.Builder.CreateIntCast(
+ Sizes[i], CGM.SizeTy, /*isSigned=*/true),
+ SAddr);
+ }
+ }
+
+ BasePointersArray = CGF.Builder.CreateConstInBoundsGEP2_32(
+ llvm::ArrayType::get(CGM.VoidPtrTy, PointerNumVal), BasePointersArray,
+ /*Idx0=*/0, /*Idx1=*/0);
+ PointersArray = CGF.Builder.CreateConstInBoundsGEP2_32(
+ llvm::ArrayType::get(CGM.VoidPtrTy, PointerNumVal), PointersArray,
+ /*Idx0=*/0,
+ /*Idx1=*/0);
+ SizesArray = CGF.Builder.CreateConstInBoundsGEP2_32(
+ llvm::ArrayType::get(CGM.SizeTy, PointerNumVal), SizesArray,
+ /*Idx0=*/0, /*Idx1=*/0);
+ MapTypesArray = CGF.Builder.CreateConstInBoundsGEP2_32(
+ llvm::ArrayType::get(CGM.Int32Ty, PointerNumVal), MapTypesArray,
+ /*Idx0=*/0,
+ /*Idx1=*/0);
+
+ } else {
+ BasePointersArray = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
+ PointersArray = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
+ SizesArray = llvm::ConstantPointerNull::get(CGM.SizeTy->getPointerTo());
+ MapTypesArray =
+ llvm::ConstantPointerNull::get(CGM.Int32Ty->getPointerTo());
+ }
+
+ // On top of the arrays that were filled up, the target offloading call
+ // takes as arguments the device id as well as the host pointer. The host
+ // pointer is used by the runtime library to identify the current target
+ // region, so it only has to be unique and not necessarily point to
+ // anything. It could be the pointer to the outlined function that
+ // implements the target region, but we aren't using that so that the
+ // compiler doesn't need to keep that, and could therefore inline the host
+ // function if proven worthwhile during optimization.
+
+ // From this point on, we need to have an ID of the target region defined.
+ assert(OutlinedFnID && "Invalid outlined function ID!");
+
+ // Emit device ID if any.
+ llvm::Value *DeviceID;
+ if (Device)
+ DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
+ CGM.Int32Ty, /*isSigned=*/true);
+ else
+ DeviceID = CGF.Builder.getInt32(OMP_DEVICEID_UNDEF);
+
+ llvm::Value *OffloadingArgs[] = {
+ DeviceID, OutlinedFnID, PointerNum, BasePointersArray,
+ PointersArray, SizesArray, MapTypesArray};
+ auto Return = CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_target),
+ OffloadingArgs);
+
+ CGF.EmitStoreOfScalar(Return, OffloadError);
+ };
+
+ // Notify that the host version must be executed.
+ auto &&ElseGen = [this, OffloadError,
+ OffloadErrorQType](CodeGenFunction &CGF) {
+ CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/-1u),
+ OffloadError);
+ };
+
+ // If we have a target function ID it means that we need to support
+ // offloading, otherwise, just execute on the host. We need to execute on host
+ // regardless of the conditional in the if clause if, e.g., the user do not
+ // specify target triples.
+ if (OutlinedFnID) {
+ if (IfCond) {
+ emitOMPIfClause(CGF, IfCond, ThenGen, ElseGen);
+ } else {
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ ThenGen(CGF);
+ }
+ } else {
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ ElseGen(CGF);
+ }
+
+ // Check the error code and execute the host version if required.
+ auto OffloadFailedBlock = CGF.createBasicBlock("omp_offload.failed");
+ auto OffloadContBlock = CGF.createBasicBlock("omp_offload.cont");
+ auto OffloadErrorVal = CGF.EmitLoadOfScalar(OffloadError, SourceLocation());
+ auto Failed = CGF.Builder.CreateIsNotNull(OffloadErrorVal);
+ CGF.Builder.CreateCondBr(Failed, OffloadFailedBlock, OffloadContBlock);
+
+ CGF.EmitBlock(OffloadFailedBlock);
+ CGF.Builder.CreateCall(OutlinedFn, BasePointers);
+ CGF.EmitBranch(OffloadContBlock);
+
+ CGF.EmitBlock(OffloadContBlock, /*IsFinished=*/true);
+ return;
+}
+
+void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
+ StringRef ParentName) {
+ if (!S)
+ return;
+
+ // If we find a OMP target directive, codegen the outline function and
+ // register the result.
+ // FIXME: Add other directives with target when they become supported.
+ bool isTargetDirective = isa<OMPTargetDirective>(S);
+
+ if (isTargetDirective) {
+ auto *E = cast<OMPExecutableDirective>(S);
+ unsigned DeviceID;
+ unsigned FileID;
+ unsigned Line;
+ unsigned Column;
+ getTargetEntryUniqueInfo(CGM.getContext(), E->getLocStart(), DeviceID,
+ FileID, Line, Column);
+
+ // Is this a target region that should not be emitted as an entry point? If
+ // so just signal we are done with this target region.
+ if (!OffloadEntriesInfoManager.hasTargetRegionEntryInfo(
+ DeviceID, FileID, ParentName, Line, Column))
+ return;
+
+ llvm::Function *Fn;
+ llvm::Constant *Addr;
+ emitTargetOutlinedFunction(*E, ParentName, Fn, Addr,
+ /*isOffloadEntry=*/true);
+ assert(Fn && Addr && "Target region emission failed.");
+ return;
+ }
+
+ if (const OMPExecutableDirective *E = dyn_cast<OMPExecutableDirective>(S)) {
+ if (!E->getAssociatedStmt())
+ return;
+
+ scanForTargetRegionsFunctions(
+ cast<CapturedStmt>(E->getAssociatedStmt())->getCapturedStmt(),
+ ParentName);
+ return;
+ }
+
+ // If this is a lambda function, look into its body.
+ if (auto *L = dyn_cast<LambdaExpr>(S))
+ S = L->getBody();
+
+ // Keep looking for target regions recursively.
+ for (auto *II : S->children())
+ scanForTargetRegionsFunctions(II, ParentName);
+
+ return;
+}
+
+bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) {
+ auto &FD = *cast<FunctionDecl>(GD.getDecl());
+
+ // If emitting code for the host, we do not process FD here. Instead we do
+ // the normal code generation.
+ if (!CGM.getLangOpts().OpenMPIsDevice)
+ return false;
+
+ // Try to detect target regions in the function.
+ scanForTargetRegionsFunctions(FD.getBody(), CGM.getMangledName(GD));
+
+ // We should not emit any function othen that the ones created during the
+ // scanning. Therefore, we signal that this function is completely dealt
+ // with.
+ return true;
+}
+
+bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
+ if (!CGM.getLangOpts().OpenMPIsDevice)
+ return false;
+
+ // Check if there are Ctors/Dtors in this declaration and look for target
+ // regions in it. We use the complete variant to produce the kernel name
+ // mangling.
+ QualType RDTy = cast<VarDecl>(GD.getDecl())->getType();
+ if (auto *RD = RDTy->getBaseElementTypeUnsafe()->getAsCXXRecordDecl()) {
+ for (auto *Ctor : RD->ctors()) {
+ StringRef ParentName =
+ CGM.getMangledName(GlobalDecl(Ctor, Ctor_Complete));
+ scanForTargetRegionsFunctions(Ctor->getBody(), ParentName);
+ }
+ auto *Dtor = RD->getDestructor();
+ if (Dtor) {
+ StringRef ParentName =
+ CGM.getMangledName(GlobalDecl(Dtor, Dtor_Complete));
+ scanForTargetRegionsFunctions(Dtor->getBody(), ParentName);
+ }
+ }
+
+ // If we are in target mode we do not emit any global (declare target is not
+ // implemented yet). Therefore we signal that GD was processed in this case.
+ return true;
+}
+
+bool CGOpenMPRuntime::emitTargetGlobal(GlobalDecl GD) {
+ auto *VD = GD.getDecl();
+ if (isa<FunctionDecl>(VD))
+ return emitTargetFunctions(GD);
+
+ return emitTargetGlobalVariable(GD);
+}
+
+llvm::Function *CGOpenMPRuntime::emitRegistrationFunction() {
+ // If we have offloading in the current module, we need to emit the entries
+ // now and register the offloading descriptor.
+ createOffloadEntriesAndInfoMetadata();
+
+ // Create and register the offloading binary descriptors. This is the main
+ // entity that captures all the information about offloading in the current
+ // compilation unit.
+ return createOffloadingBinaryDescriptorRegistration();
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGOpenMPRuntime.h b/contrib/llvm/tools/clang/lib/CodeGen/CGOpenMPRuntime.h
new file mode 100644
index 0000000..6b04fbe
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGOpenMPRuntime.h
@@ -0,0 +1,980 @@
+//===----- CGOpenMPRuntime.h - Interface to OpenMP Runtimes -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides a class for OpenMP runtime code generation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
+#define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
+
+#include "clang/AST/Type.h"
+#include "clang/Basic/OpenMPKinds.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/IR/ValueHandle.h"
+
+namespace llvm {
+class ArrayType;
+class Constant;
+class Function;
+class FunctionType;
+class GlobalVariable;
+class StructType;
+class Type;
+class Value;
+} // namespace llvm
+
+namespace clang {
+class Expr;
+class GlobalDecl;
+class OMPExecutableDirective;
+class VarDecl;
+
+namespace CodeGen {
+class Address;
+class CodeGenFunction;
+class CodeGenModule;
+
+typedef llvm::function_ref<void(CodeGenFunction &)> RegionCodeGenTy;
+
+class CGOpenMPRuntime {
+private:
+ enum OpenMPRTLFunction {
+ /// \brief Call to void __kmpc_fork_call(ident_t *loc, kmp_int32 argc,
+ /// kmpc_micro microtask, ...);
+ OMPRTL__kmpc_fork_call,
+ /// \brief Call to void *__kmpc_threadprivate_cached(ident_t *loc,
+ /// kmp_int32 global_tid, void *data, size_t size, void ***cache);
+ OMPRTL__kmpc_threadprivate_cached,
+ /// \brief Call to void __kmpc_threadprivate_register( ident_t *,
+ /// void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
+ OMPRTL__kmpc_threadprivate_register,
+ // Call to __kmpc_int32 kmpc_global_thread_num(ident_t *loc);
+ OMPRTL__kmpc_global_thread_num,
+ // Call to void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
+ // kmp_critical_name *crit);
+ OMPRTL__kmpc_critical,
+ // Call to void __kmpc_critical_with_hint(ident_t *loc, kmp_int32
+ // global_tid, kmp_critical_name *crit, uintptr_t hint);
+ OMPRTL__kmpc_critical_with_hint,
+ // Call to void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
+ // kmp_critical_name *crit);
+ OMPRTL__kmpc_end_critical,
+ // Call to kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
+ // global_tid);
+ OMPRTL__kmpc_cancel_barrier,
+ // Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
+ OMPRTL__kmpc_barrier,
+ // Call to void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
+ OMPRTL__kmpc_for_static_fini,
+ // Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
+ // global_tid);
+ OMPRTL__kmpc_serialized_parallel,
+ // Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
+ // global_tid);
+ OMPRTL__kmpc_end_serialized_parallel,
+ // Call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
+ // kmp_int32 num_threads);
+ OMPRTL__kmpc_push_num_threads,
+ // Call to void __kmpc_flush(ident_t *loc);
+ OMPRTL__kmpc_flush,
+ // Call to kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid);
+ OMPRTL__kmpc_master,
+ // Call to void __kmpc_end_master(ident_t *, kmp_int32 global_tid);
+ OMPRTL__kmpc_end_master,
+ // Call to kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
+ // int end_part);
+ OMPRTL__kmpc_omp_taskyield,
+ // Call to kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid);
+ OMPRTL__kmpc_single,
+ // Call to void __kmpc_end_single(ident_t *, kmp_int32 global_tid);
+ OMPRTL__kmpc_end_single,
+ // Call to kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
+ // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
+ // kmp_routine_entry_t *task_entry);
+ OMPRTL__kmpc_omp_task_alloc,
+ // Call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *
+ // new_task);
+ OMPRTL__kmpc_omp_task,
+ // Call to void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
+ // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
+ // kmp_int32 didit);
+ OMPRTL__kmpc_copyprivate,
+ // Call to kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
+ // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
+ // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
+ OMPRTL__kmpc_reduce,
+ // Call to kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
+ // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
+ // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
+ // *lck);
+ OMPRTL__kmpc_reduce_nowait,
+ // Call to void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
+ // kmp_critical_name *lck);
+ OMPRTL__kmpc_end_reduce,
+ // Call to void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
+ // kmp_critical_name *lck);
+ OMPRTL__kmpc_end_reduce_nowait,
+ // Call to void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
+ // kmp_task_t * new_task);
+ OMPRTL__kmpc_omp_task_begin_if0,
+ // Call to void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
+ // kmp_task_t * new_task);
+ OMPRTL__kmpc_omp_task_complete_if0,
+ // Call to void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
+ OMPRTL__kmpc_ordered,
+ // Call to void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
+ OMPRTL__kmpc_end_ordered,
+ // Call to kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
+ // global_tid);
+ OMPRTL__kmpc_omp_taskwait,
+ // Call to void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
+ OMPRTL__kmpc_taskgroup,
+ // Call to void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
+ OMPRTL__kmpc_end_taskgroup,
+ // Call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
+ // int proc_bind);
+ OMPRTL__kmpc_push_proc_bind,
+ // Call to kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32
+ // gtid, kmp_task_t * new_task, kmp_int32 ndeps, kmp_depend_info_t
+ // *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
+ OMPRTL__kmpc_omp_task_with_deps,
+ // Call to void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32
+ // gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
+ // ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
+ OMPRTL__kmpc_omp_wait_deps,
+ // Call to kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
+ // global_tid, kmp_int32 cncl_kind);
+ OMPRTL__kmpc_cancellationpoint,
+ // Call to kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
+ // kmp_int32 cncl_kind);
+ OMPRTL__kmpc_cancel,
+
+ //
+ // Offloading related calls
+ //
+ // Call to int32_t __tgt_target(int32_t device_id, void *host_ptr, int32_t
+ // arg_num, void** args_base, void **args, size_t *arg_sizes, int32_t
+ // *arg_types);
+ OMPRTL__tgt_target,
+ // Call to void __tgt_register_lib(__tgt_bin_desc *desc);
+ OMPRTL__tgt_register_lib,
+ // Call to void __tgt_unregister_lib(__tgt_bin_desc *desc);
+ OMPRTL__tgt_unregister_lib,
+ };
+
+ /// \brief Values for bit flags used in the ident_t to describe the fields.
+ /// All enumeric elements are named and described in accordance with the code
+ /// from http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h
+ enum OpenMPLocationFlags {
+ /// \brief Use trampoline for internal microtask.
+ OMP_IDENT_IMD = 0x01,
+ /// \brief Use c-style ident structure.
+ OMP_IDENT_KMPC = 0x02,
+ /// \brief Atomic reduction option for kmpc_reduce.
+ OMP_ATOMIC_REDUCE = 0x10,
+ /// \brief Explicit 'barrier' directive.
+ OMP_IDENT_BARRIER_EXPL = 0x20,
+ /// \brief Implicit barrier in code.
+ OMP_IDENT_BARRIER_IMPL = 0x40,
+ /// \brief Implicit barrier in 'for' directive.
+ OMP_IDENT_BARRIER_IMPL_FOR = 0x40,
+ /// \brief Implicit barrier in 'sections' directive.
+ OMP_IDENT_BARRIER_IMPL_SECTIONS = 0xC0,
+ /// \brief Implicit barrier in 'single' directive.
+ OMP_IDENT_BARRIER_IMPL_SINGLE = 0x140
+ };
+ CodeGenModule &CGM;
+ /// \brief Default const ident_t object used for initialization of all other
+ /// ident_t objects.
+ llvm::Constant *DefaultOpenMPPSource;
+ /// \brief Map of flags and corresponding default locations.
+ typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDefaultLocMapTy;
+ OpenMPDefaultLocMapTy OpenMPDefaultLocMap;
+ Address getOrCreateDefaultLocation(OpenMPLocationFlags Flags);
+
+public:
+ /// \brief Describes ident structure that describes a source location.
+ /// All descriptions are taken from
+ /// http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h
+ /// Original structure:
+ /// typedef struct ident {
+ /// kmp_int32 reserved_1; /**< might be used in Fortran;
+ /// see above */
+ /// kmp_int32 flags; /**< also f.flags; KMP_IDENT_xxx flags;
+ /// KMP_IDENT_KMPC identifies this union
+ /// member */
+ /// kmp_int32 reserved_2; /**< not really used in Fortran any more;
+ /// see above */
+ ///#if USE_ITT_BUILD
+ /// /* but currently used for storing
+ /// region-specific ITT */
+ /// /* contextual information. */
+ ///#endif /* USE_ITT_BUILD */
+ /// kmp_int32 reserved_3; /**< source[4] in Fortran, do not use for
+ /// C++ */
+ /// char const *psource; /**< String describing the source location.
+ /// The string is composed of semi-colon separated
+ // fields which describe the source file,
+ /// the function and a pair of line numbers that
+ /// delimit the construct.
+ /// */
+ /// } ident_t;
+ enum IdentFieldIndex {
+ /// \brief might be used in Fortran
+ IdentField_Reserved_1,
+ /// \brief OMP_IDENT_xxx flags; OMP_IDENT_KMPC identifies this union member.
+ IdentField_Flags,
+ /// \brief Not really used in Fortran any more
+ IdentField_Reserved_2,
+ /// \brief Source[4] in Fortran, do not use for C++
+ IdentField_Reserved_3,
+ /// \brief String describing the source location. The string is composed of
+ /// semi-colon separated fields which describe the source file, the function
+ /// and a pair of line numbers that delimit the construct.
+ IdentField_PSource
+ };
+private:
+ llvm::StructType *IdentTy;
+ /// \brief Map for SourceLocation and OpenMP runtime library debug locations.
+ typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDebugLocMapTy;
+ OpenMPDebugLocMapTy OpenMPDebugLocMap;
+ /// \brief The type for a microtask which gets passed to __kmpc_fork_call().
+ /// Original representation is:
+ /// typedef void (kmpc_micro)(kmp_int32 global_tid, kmp_int32 bound_tid,...);
+ llvm::FunctionType *Kmpc_MicroTy;
+ /// \brief Stores debug location and ThreadID for the function.
+ struct DebugLocThreadIdTy {
+ llvm::Value *DebugLoc;
+ llvm::Value *ThreadID;
+ };
+ /// \brief Map of local debug location, ThreadId and functions.
+ typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy>
+ OpenMPLocThreadIDMapTy;
+ OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap;
+ /// \brief Type kmp_critical_name, originally defined as typedef kmp_int32
+ /// kmp_critical_name[8];
+ llvm::ArrayType *KmpCriticalNameTy;
+ /// \brief An ordered map of auto-generated variables to their unique names.
+ /// It stores variables with the following names: 1) ".gomp_critical_user_" +
+ /// <critical_section_name> + ".var" for "omp critical" directives; 2)
+ /// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
+ /// variables.
+ llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator>
+ InternalVars;
+ /// \brief Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *);
+ llvm::Type *KmpRoutineEntryPtrTy;
+ QualType KmpRoutineEntryPtrQTy;
+ /// \brief Type typedef struct kmp_task {
+ /// void * shareds; /**< pointer to block of pointers to
+ /// shared vars */
+ /// kmp_routine_entry_t routine; /**< pointer to routine to call for
+ /// executing task */
+ /// kmp_int32 part_id; /**< part id for the task */
+ /// kmp_routine_entry_t destructors; /* pointer to function to invoke
+ /// deconstructors of firstprivate C++ objects */
+ /// } kmp_task_t;
+ QualType KmpTaskTQTy;
+ /// \brief Type typedef struct kmp_depend_info {
+ /// kmp_intptr_t base_addr;
+ /// size_t len;
+ /// struct {
+ /// bool in:1;
+ /// bool out:1;
+ /// } flags;
+ /// } kmp_depend_info_t;
+ QualType KmpDependInfoTy;
+ /// \brief Type struct __tgt_offload_entry{
+ /// void *addr; // Pointer to the offload entry info.
+ /// // (function or global)
+ /// char *name; // Name of the function or global.
+ /// size_t size; // Size of the entry info (0 if it a function).
+ /// };
+ QualType TgtOffloadEntryQTy;
+ /// struct __tgt_device_image{
+ /// void *ImageStart; // Pointer to the target code start.
+ /// void *ImageEnd; // Pointer to the target code end.
+ /// // We also add the host entries to the device image, as it may be useful
+ /// // for the target runtime to have access to that information.
+ /// __tgt_offload_entry *EntriesBegin; // Begin of the table with all
+ /// // the entries.
+ /// __tgt_offload_entry *EntriesEnd; // End of the table with all the
+ /// // entries (non inclusive).
+ /// };
+ QualType TgtDeviceImageQTy;
+ /// struct __tgt_bin_desc{
+ /// int32_t NumDevices; // Number of devices supported.
+ /// __tgt_device_image *DeviceImages; // Arrays of device images
+ /// // (one per device).
+ /// __tgt_offload_entry *EntriesBegin; // Begin of the table with all the
+ /// // entries.
+ /// __tgt_offload_entry *EntriesEnd; // End of the table with all the
+ /// // entries (non inclusive).
+ /// };
+ QualType TgtBinaryDescriptorQTy;
+ /// \brief Entity that registers the offloading constants that were emitted so
+ /// far.
+ class OffloadEntriesInfoManagerTy {
+ CodeGenModule &CGM;
+
+ /// \brief Number of entries registered so far.
+ unsigned OffloadingEntriesNum;
+
+ public:
+ /// \brief Base class of the entries info.
+ class OffloadEntryInfo {
+ public:
+ /// \brief Kind of a given entry. Currently, only target regions are
+ /// supported.
+ enum OffloadingEntryInfoKinds {
+ // Entry is a target region.
+ OFFLOAD_ENTRY_INFO_TARGET_REGION = 0,
+ // Invalid entry info.
+ OFFLOAD_ENTRY_INFO_INVALID = ~0u
+ };
+
+ OffloadEntryInfo() : Order(~0u), Kind(OFFLOAD_ENTRY_INFO_INVALID) {}
+ explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order)
+ : Order(Order), Kind(Kind) {}
+
+ bool isValid() const { return Order != ~0u; }
+ unsigned getOrder() const { return Order; }
+ OffloadingEntryInfoKinds getKind() const { return Kind; }
+ static bool classof(const OffloadEntryInfo *Info) { return true; }
+
+ protected:
+ // \brief Order this entry was emitted.
+ unsigned Order;
+
+ OffloadingEntryInfoKinds Kind;
+ };
+
+ /// \brief Return true if a there are no entries defined.
+ bool empty() const;
+ /// \brief Return number of entries defined so far.
+ unsigned size() const { return OffloadingEntriesNum; }
+ OffloadEntriesInfoManagerTy(CodeGenModule &CGM)
+ : CGM(CGM), OffloadingEntriesNum(0) {}
+
+ ///
+ /// Target region entries related.
+ ///
+ /// \brief Target region entries info.
+ class OffloadEntryInfoTargetRegion : public OffloadEntryInfo {
+ // \brief Address of the entity that has to be mapped for offloading.
+ llvm::Constant *Addr;
+ // \brief Address that can be used as the ID of the entry.
+ llvm::Constant *ID;
+
+ public:
+ OffloadEntryInfoTargetRegion()
+ : OffloadEntryInfo(OFFLOAD_ENTRY_INFO_TARGET_REGION, ~0u),
+ Addr(nullptr), ID(nullptr) {}
+ explicit OffloadEntryInfoTargetRegion(unsigned Order,
+ llvm::Constant *Addr,
+ llvm::Constant *ID)
+ : OffloadEntryInfo(OFFLOAD_ENTRY_INFO_TARGET_REGION, Order),
+ Addr(Addr), ID(ID) {}
+
+ llvm::Constant *getAddress() const { return Addr; }
+ llvm::Constant *getID() const { return ID; }
+ void setAddress(llvm::Constant *V) {
+ assert(!Addr && "Address as been set before!");
+ Addr = V;
+ }
+ void setID(llvm::Constant *V) {
+ assert(!ID && "ID as been set before!");
+ ID = V;
+ }
+ static bool classof(const OffloadEntryInfo *Info) {
+ return Info->getKind() == OFFLOAD_ENTRY_INFO_TARGET_REGION;
+ }
+ };
+ /// \brief Initialize target region entry.
+ void initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
+ StringRef ParentName, unsigned LineNum,
+ unsigned ColNum, unsigned Order);
+ /// \brief Register target region entry.
+ void registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
+ StringRef ParentName, unsigned LineNum,
+ unsigned ColNum, llvm::Constant *Addr,
+ llvm::Constant *ID);
+ /// \brief Return true if a target region entry with the provided
+ /// information exists.
+ bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
+ StringRef ParentName, unsigned LineNum,
+ unsigned ColNum) const;
+ /// brief Applies action \a Action on all registered entries.
+ typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned,
+ unsigned, OffloadEntryInfoTargetRegion &)>
+ OffloadTargetRegionEntryInfoActTy;
+ void actOnTargetRegionEntriesInfo(
+ const OffloadTargetRegionEntryInfoActTy &Action);
+
+ private:
+ // Storage for target region entries kind. The storage is to be indexed by
+ // file ID, device ID, parent function name, lane number, and column number.
+ typedef llvm::DenseMap<unsigned, OffloadEntryInfoTargetRegion>
+ OffloadEntriesTargetRegionPerColumn;
+ typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerColumn>
+ OffloadEntriesTargetRegionPerLine;
+ typedef llvm::StringMap<OffloadEntriesTargetRegionPerLine>
+ OffloadEntriesTargetRegionPerParentName;
+ typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerParentName>
+ OffloadEntriesTargetRegionPerFile;
+ typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerFile>
+ OffloadEntriesTargetRegionPerDevice;
+ typedef OffloadEntriesTargetRegionPerDevice OffloadEntriesTargetRegionTy;
+ OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion;
+ };
+ OffloadEntriesInfoManagerTy OffloadEntriesInfoManager;
+
+ /// \brief Creates and registers offloading binary descriptor for the current
+ /// compilation unit. The function that does the registration is returned.
+ llvm::Function *createOffloadingBinaryDescriptorRegistration();
+
+ /// \brief Creates offloading entry for the provided address \a Addr,
+ /// name \a Name and size \a Size.
+ void createOffloadEntry(llvm::Constant *Addr, StringRef Name, uint64_t Size);
+
+ /// \brief Creates all the offload entries in the current compilation unit
+ /// along with the associated metadata.
+ void createOffloadEntriesAndInfoMetadata();
+
+ /// \brief Loads all the offload entries information from the host IR
+ /// metadata.
+ void loadOffloadInfoMetadata();
+
+ /// \brief Returns __tgt_offload_entry type.
+ QualType getTgtOffloadEntryQTy();
+
+ /// \brief Returns __tgt_device_image type.
+ QualType getTgtDeviceImageQTy();
+
+ /// \brief Returns __tgt_bin_desc type.
+ QualType getTgtBinaryDescriptorQTy();
+
+ /// \brief Start scanning from statement \a S and and emit all target regions
+ /// found along the way.
+ /// \param S Starting statement.
+ /// \param ParentName Name of the function declaration that is being scanned.
+ void scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName);
+
+ /// \brief Build type kmp_routine_entry_t (if not built yet).
+ void emitKmpRoutineEntryT(QualType KmpInt32Ty);
+
+ /// \brief Emits object of ident_t type with info for source location.
+ /// \param Flags Flags for OpenMP location.
+ ///
+ llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPLocationFlags Flags = OMP_IDENT_KMPC);
+
+ /// \brief Returns pointer to ident_t type.
+ llvm::Type *getIdentTyPointerTy();
+
+ /// \brief Returns pointer to kmpc_micro type.
+ llvm::Type *getKmpc_MicroPointerTy();
+
+ /// \brief Returns specified OpenMP runtime function.
+ /// \param Function OpenMP runtime function.
+ /// \return Specified function.
+ llvm::Constant *createRuntimeFunction(OpenMPRTLFunction Function);
+
+ /// \brief Returns __kmpc_for_static_init_* runtime function for the specified
+ /// size \a IVSize and sign \a IVSigned.
+ llvm::Constant *createForStaticInitFunction(unsigned IVSize, bool IVSigned);
+
+ /// \brief Returns __kmpc_dispatch_init_* runtime function for the specified
+ /// size \a IVSize and sign \a IVSigned.
+ llvm::Constant *createDispatchInitFunction(unsigned IVSize, bool IVSigned);
+
+ /// \brief Returns __kmpc_dispatch_next_* runtime function for the specified
+ /// size \a IVSize and sign \a IVSigned.
+ llvm::Constant *createDispatchNextFunction(unsigned IVSize, bool IVSigned);
+
+ /// \brief Returns __kmpc_dispatch_fini_* runtime function for the specified
+ /// size \a IVSize and sign \a IVSigned.
+ llvm::Constant *createDispatchFiniFunction(unsigned IVSize, bool IVSigned);
+
+ /// \brief If the specified mangled name is not in the module, create and
+ /// return threadprivate cache object. This object is a pointer's worth of
+ /// storage that's reserved for use by the OpenMP runtime.
+ /// \param VD Threadprivate variable.
+ /// \return Cache variable for the specified threadprivate.
+ llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD);
+
+ /// \brief Emits address of the word in a memory where current thread id is
+ /// stored.
+ virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc);
+
+ /// \brief Gets thread id value for the current thread.
+ ///
+ llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc);
+
+ /// \brief Gets (if variable with the given name already exist) or creates
+ /// internal global variable with the specified Name. The created variable has
+ /// linkage CommonLinkage by default and is initialized by null value.
+ /// \param Ty Type of the global variable. If it is exist already the type
+ /// must be the same.
+ /// \param Name Name of the variable.
+ llvm::Constant *getOrCreateInternalVariable(llvm::Type *Ty,
+ const llvm::Twine &Name);
+
+ /// \brief Set of threadprivate variables with the generated initializer.
+ llvm::DenseSet<const VarDecl *> ThreadPrivateWithDefinition;
+
+ /// \brief Emits initialization code for the threadprivate variables.
+ /// \param VDAddr Address of the global variable \a VD.
+ /// \param Ctor Pointer to a global init function for \a VD.
+ /// \param CopyCtor Pointer to a global copy function for \a VD.
+ /// \param Dtor Pointer to a global destructor function for \a VD.
+ /// \param Loc Location of threadprivate declaration.
+ void emitThreadPrivateVarInit(CodeGenFunction &CGF, Address VDAddr,
+ llvm::Value *Ctor, llvm::Value *CopyCtor,
+ llvm::Value *Dtor, SourceLocation Loc);
+
+ /// \brief Returns corresponding lock object for the specified critical region
+ /// name. If the lock object does not exist it is created, otherwise the
+ /// reference to the existing copy is returned.
+ /// \param CriticalName Name of the critical region.
+ ///
+ llvm::Value *getCriticalRegionLock(StringRef CriticalName);
+
+public:
+ explicit CGOpenMPRuntime(CodeGenModule &CGM);
+ virtual ~CGOpenMPRuntime() {}
+ virtual void clear();
+
+ /// \brief Emits outlined function for the specified OpenMP parallel directive
+ /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
+ /// kmp_int32 BoundID, struct context_vars*).
+ /// \param D OpenMP directive.
+ /// \param ThreadIDVar Variable for thread id in the current OpenMP region.
+ /// \param InnermostKind Kind of innermost directive (for simple directives it
+ /// is a directive itself, for combined - its innermost directive).
+ /// \param CodeGen Code generation sequence for the \a D directive.
+ virtual llvm::Value *emitParallelOutlinedFunction(
+ const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
+ OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
+
+ /// \brief Emits outlined function for the OpenMP task directive \a D. This
+ /// outlined function has type void(*)(kmp_int32 ThreadID, kmp_int32
+ /// PartID, struct context_vars*).
+ /// \param D OpenMP directive.
+ /// \param ThreadIDVar Variable for thread id in the current OpenMP region.
+ /// \param InnermostKind Kind of innermost directive (for simple directives it
+ /// is a directive itself, for combined - its innermost directive).
+ /// \param CodeGen Code generation sequence for the \a D directive.
+ ///
+ virtual llvm::Value *emitTaskOutlinedFunction(
+ const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
+ OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
+
+ /// \brief Cleans up references to the objects in finished function.
+ ///
+ void functionFinished(CodeGenFunction &CGF);
+
+ /// \brief Emits code for parallel or serial call of the \a OutlinedFn with
+ /// variables captured in a record which address is stored in \a
+ /// CapturedStruct.
+ /// \param OutlinedFn Outlined function to be run in parallel threads. Type of
+ /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
+ /// \param CapturedVars A pointer to the record with the references to
+ /// variables used in \a OutlinedFn function.
+ /// \param IfCond Condition in the associated 'if' clause, if it was
+ /// specified, nullptr otherwise.
+ ///
+ virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
+ llvm::Value *OutlinedFn,
+ ArrayRef<llvm::Value *> CapturedVars,
+ const Expr *IfCond);
+
+ /// \brief Emits a critical region.
+ /// \param CriticalName Name of the critical region.
+ /// \param CriticalOpGen Generator for the statement associated with the given
+ /// critical region.
+ /// \param Hint Value of the 'hint' clause (optional).
+ virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
+ const RegionCodeGenTy &CriticalOpGen,
+ SourceLocation Loc,
+ const Expr *Hint = nullptr);
+
+ /// \brief Emits a master region.
+ /// \param MasterOpGen Generator for the statement associated with the given
+ /// master region.
+ virtual void emitMasterRegion(CodeGenFunction &CGF,
+ const RegionCodeGenTy &MasterOpGen,
+ SourceLocation Loc);
+
+ /// \brief Emits code for a taskyield directive.
+ virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc);
+
+ /// \brief Emit a taskgroup region.
+ /// \param TaskgroupOpGen Generator for the statement associated with the
+ /// given taskgroup region.
+ virtual void emitTaskgroupRegion(CodeGenFunction &CGF,
+ const RegionCodeGenTy &TaskgroupOpGen,
+ SourceLocation Loc);
+
+ /// \brief Emits a single region.
+ /// \param SingleOpGen Generator for the statement associated with the given
+ /// single region.
+ virtual void emitSingleRegion(CodeGenFunction &CGF,
+ const RegionCodeGenTy &SingleOpGen,
+ SourceLocation Loc,
+ ArrayRef<const Expr *> CopyprivateVars,
+ ArrayRef<const Expr *> DestExprs,
+ ArrayRef<const Expr *> SrcExprs,
+ ArrayRef<const Expr *> AssignmentOps);
+
+ /// \brief Emit an ordered region.
+ /// \param OrderedOpGen Generator for the statement associated with the given
+ /// ordered region.
+ virtual void emitOrderedRegion(CodeGenFunction &CGF,
+ const RegionCodeGenTy &OrderedOpGen,
+ SourceLocation Loc, bool IsThreads);
+
+ /// \brief Emit an implicit/explicit barrier for OpenMP threads.
+ /// \param Kind Directive for which this implicit barrier call must be
+ /// generated. Must be OMPD_barrier for explicit barrier generation.
+ /// \param EmitChecks true if need to emit checks for cancellation barriers.
+ /// \param ForceSimpleCall true simple barrier call must be emitted, false if
+ /// runtime class decides which one to emit (simple or with cancellation
+ /// checks).
+ ///
+ virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPDirectiveKind Kind,
+ bool EmitChecks = true,
+ bool ForceSimpleCall = false);
+
+ /// \brief Check if the specified \a ScheduleKind is static non-chunked.
+ /// This kind of worksharing directive is emitted without outer loop.
+ /// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
+ /// \param Chunked True if chunk is specified in the clause.
+ ///
+ virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
+ bool Chunked) const;
+
+ /// \brief Check if the specified \a ScheduleKind is dynamic.
+ /// This kind of worksharing directive is emitted without outer loop.
+ /// \param ScheduleKind Schedule Kind specified in the 'schedule' clause.
+ ///
+ virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const;
+
+ virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPScheduleClauseKind SchedKind,
+ unsigned IVSize, bool IVSigned,
+ bool Ordered, llvm::Value *UB,
+ llvm::Value *Chunk = nullptr);
+
+ /// \brief Call the appropriate runtime routine to initialize it before start
+ /// of loop.
+ ///
+ /// Depending on the loop schedule, it is nesessary to call some runtime
+ /// routine before start of the OpenMP loop to get the loop upper / lower
+ /// bounds \a LB and \a UB and stride \a ST.
+ ///
+ /// \param CGF Reference to current CodeGenFunction.
+ /// \param Loc Clang source location.
+ /// \param SchedKind Schedule kind, specified by the 'schedule' clause.
+ /// \param IVSize Size of the iteration variable in bits.
+ /// \param IVSigned Sign of the interation variable.
+ /// \param Ordered true if loop is ordered, false otherwise.
+ /// \param IL Address of the output variable in which the flag of the
+ /// last iteration is returned.
+ /// \param LB Address of the output variable in which the lower iteration
+ /// number is returned.
+ /// \param UB Address of the output variable in which the upper iteration
+ /// number is returned.
+ /// \param ST Address of the output variable in which the stride value is
+ /// returned nesessary to generated the static_chunked scheduled loop.
+ /// \param Chunk Value of the chunk for the static_chunked scheduled loop.
+ /// For the default (nullptr) value, the chunk 1 will be used.
+ ///
+ virtual void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPScheduleClauseKind SchedKind,
+ unsigned IVSize, bool IVSigned, bool Ordered,
+ Address IL, Address LB,
+ Address UB, Address ST,
+ llvm::Value *Chunk = nullptr);
+
+ /// \brief Call the appropriate runtime routine to notify that we finished
+ /// iteration of the ordered loop with the dynamic scheduling.
+ ///
+ /// \param CGF Reference to current CodeGenFunction.
+ /// \param Loc Clang source location.
+ /// \param IVSize Size of the iteration variable in bits.
+ /// \param IVSigned Sign of the interation variable.
+ ///
+ virtual void emitForOrderedIterationEnd(CodeGenFunction &CGF,
+ SourceLocation Loc, unsigned IVSize,
+ bool IVSigned);
+
+ /// \brief Call the appropriate runtime routine to notify that we finished
+ /// all the work with current loop.
+ ///
+ /// \param CGF Reference to current CodeGenFunction.
+ /// \param Loc Clang source location.
+ ///
+ virtual void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc);
+
+ /// Call __kmpc_dispatch_next(
+ /// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
+ /// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
+ /// kmp_int[32|64] *p_stride);
+ /// \param IVSize Size of the iteration variable in bits.
+ /// \param IVSigned Sign of the interation variable.
+ /// \param IL Address of the output variable in which the flag of the
+ /// last iteration is returned.
+ /// \param LB Address of the output variable in which the lower iteration
+ /// number is returned.
+ /// \param UB Address of the output variable in which the upper iteration
+ /// number is returned.
+ /// \param ST Address of the output variable in which the stride value is
+ /// returned.
+ virtual llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
+ unsigned IVSize, bool IVSigned,
+ Address IL, Address LB,
+ Address UB, Address ST);
+
+ /// \brief Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
+ /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
+ /// clause.
+ /// \param NumThreads An integer value of threads.
+ virtual void emitNumThreadsClause(CodeGenFunction &CGF,
+ llvm::Value *NumThreads,
+ SourceLocation Loc);
+
+ /// \brief Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
+ /// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
+ virtual void emitProcBindClause(CodeGenFunction &CGF,
+ OpenMPProcBindClauseKind ProcBind,
+ SourceLocation Loc);
+
+ /// \brief Returns address of the threadprivate variable for the current
+ /// thread.
+ /// \param VD Threadprivate variable.
+ /// \param VDAddr Address of the global variable \a VD.
+ /// \param Loc Location of the reference to threadprivate var.
+ /// \return Address of the threadprivate variable for the current thread.
+ virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF,
+ const VarDecl *VD,
+ Address VDAddr,
+ SourceLocation Loc);
+
+ /// \brief Emit a code for initialization of threadprivate variable. It emits
+ /// a call to runtime library which adds initial value to the newly created
+ /// threadprivate variable (if it is not constant) and registers destructor
+ /// for the variable (if any).
+ /// \param VD Threadprivate variable.
+ /// \param VDAddr Address of the global variable \a VD.
+ /// \param Loc Location of threadprivate declaration.
+ /// \param PerformInit true if initialization expression is not constant.
+ virtual llvm::Function *
+ emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr,
+ SourceLocation Loc, bool PerformInit,
+ CodeGenFunction *CGF = nullptr);
+
+ /// \brief Emit flush of the variables specified in 'omp flush' directive.
+ /// \param Vars List of variables to flush.
+ virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
+ SourceLocation Loc);
+
+ /// \brief Emit task region for the task directive. The task region is
+ /// emitted in several steps:
+ /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
+ /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
+ /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
+ /// function:
+ /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
+ /// TaskFunction(gtid, tt->part_id, tt->shareds);
+ /// return 0;
+ /// }
+ /// 2. Copy a list of shared variables to field shareds of the resulting
+ /// structure kmp_task_t returned by the previous call (if any).
+ /// 3. Copy a pointer to destructions function to field destructions of the
+ /// resulting structure kmp_task_t.
+ /// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid,
+ /// kmp_task_t *new_task), where new_task is a resulting structure from
+ /// previous items.
+ /// \param D Current task directive.
+ /// \param Tied true if the task is tied (the task is tied to the thread that
+ /// can suspend its task region), false - untied (the task is not tied to any
+ /// thread).
+ /// \param Final Contains either constant bool value, or llvm::Value * of i1
+ /// type for final clause. If the value is true, the task forces all of its
+ /// child tasks to become final and included tasks.
+ /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
+ /// /*part_id*/, captured_struct */*__context*/);
+ /// \param SharedsTy A type which contains references the shared variables.
+ /// \param Shareds Context with the list of shared variables from the \p
+ /// TaskFunction.
+ /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
+ /// otherwise.
+ /// \param PrivateVars List of references to private variables for the task
+ /// directive.
+ /// \param PrivateCopies List of private copies for each private variable in
+ /// \p PrivateVars.
+ /// \param FirstprivateVars List of references to private variables for the
+ /// task directive.
+ /// \param FirstprivateCopies List of private copies for each private variable
+ /// in \p FirstprivateVars.
+ /// \param FirstprivateInits List of references to auto generated variables
+ /// used for initialization of a single array element. Used if firstprivate
+ /// variable is of array type.
+ /// \param Dependences List of dependences for the 'task' construct, including
+ /// original expression and dependency type.
+ virtual void emitTaskCall(
+ CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D,
+ bool Tied, llvm::PointerIntPair<llvm::Value *, 1, bool> Final,
+ llvm::Value *TaskFunction, QualType SharedsTy, Address Shareds,
+ const Expr *IfCond, ArrayRef<const Expr *> PrivateVars,
+ ArrayRef<const Expr *> PrivateCopies,
+ ArrayRef<const Expr *> FirstprivateVars,
+ ArrayRef<const Expr *> FirstprivateCopies,
+ ArrayRef<const Expr *> FirstprivateInits,
+ ArrayRef<std::pair<OpenMPDependClauseKind, const Expr *>> Dependences);
+
+ /// \brief Emit code for the directive that does not require outlining.
+ ///
+ /// \param InnermostKind Kind of innermost directive (for simple directives it
+ /// is a directive itself, for combined - its innermost directive).
+ /// \param CodeGen Code generation sequence for the \a D directive.
+ /// \param HasCancel true if region has inner cancel directive, false
+ /// otherwise.
+ virtual void emitInlinedDirective(CodeGenFunction &CGF,
+ OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen,
+ bool HasCancel = false);
+ /// \brief Emit a code for reduction clause. Next code should be emitted for
+ /// reduction:
+ /// \code
+ ///
+ /// static kmp_critical_name lock = { 0 };
+ ///
+ /// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
+ /// ...
+ /// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
+ /// ...
+ /// }
+ ///
+ /// ...
+ /// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
+ /// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
+ /// RedList, reduce_func, &<lock>)) {
+ /// case 1:
+ /// ...
+ /// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
+ /// ...
+ /// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
+ /// break;
+ /// case 2:
+ /// ...
+ /// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
+ /// ...
+ /// break;
+ /// default:;
+ /// }
+ /// \endcode
+ ///
+ /// \param Privates List of private copies for original reduction arguments.
+ /// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
+ /// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
+ /// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
+ /// or 'operator binop(LHS, RHS)'.
+ /// \param WithNowait true if parent directive has also nowait clause, false
+ /// otherwise.
+ virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
+ ArrayRef<const Expr *> Privates,
+ ArrayRef<const Expr *> LHSExprs,
+ ArrayRef<const Expr *> RHSExprs,
+ ArrayRef<const Expr *> ReductionOps,
+ bool WithNowait, bool SimpleReduction);
+
+ /// \brief Emit code for 'taskwait' directive.
+ virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc);
+
+ /// \brief Emit code for 'cancellation point' construct.
+ /// \param CancelRegion Region kind for which the cancellation point must be
+ /// emitted.
+ ///
+ virtual void emitCancellationPointCall(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ OpenMPDirectiveKind CancelRegion);
+
+ /// \brief Emit code for 'cancel' construct.
+ /// \param IfCond Condition in the associated 'if' clause, if it was
+ /// specified, nullptr otherwise.
+ /// \param CancelRegion Region kind for which the cancel must be emitted.
+ ///
+ virtual void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
+ const Expr *IfCond,
+ OpenMPDirectiveKind CancelRegion);
+
+ /// \brief Emit outilined function for 'target' directive.
+ /// \param D Directive to emit.
+ /// \param ParentName Name of the function that encloses the target region.
+ /// \param OutlinedFn Outlined function value to be defined by this call.
+ /// \param OutlinedFnID Outlined function ID value to be defined by this call.
+ /// \param IsOffloadEntry True if the outlined function is an offload entry.
+ /// An oulined function may not be an entry if, e.g. the if clause always
+ /// evaluates to false.
+ virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
+ StringRef ParentName,
+ llvm::Function *&OutlinedFn,
+ llvm::Constant *&OutlinedFnID,
+ bool IsOffloadEntry);
+
+ /// \brief Emit the target offloading code associated with \a D. The emitted
+ /// code attempts offloading the execution to the device, an the event of
+ /// a failure it executes the host version outlined in \a OutlinedFn.
+ /// \param D Directive to emit.
+ /// \param OutlinedFn Host version of the code to be offloaded.
+ /// \param OutlinedFnID ID of host version of the code to be offloaded.
+ /// \param IfCond Expression evaluated in if clause associated with the target
+ /// directive, or null if no if clause is used.
+ /// \param Device Expression evaluated in device clause associated with the
+ /// target directive, or null if no device clause is used.
+ /// \param CapturedVars Values captured in the current region.
+ virtual void emitTargetCall(CodeGenFunction &CGF,
+ const OMPExecutableDirective &D,
+ llvm::Value *OutlinedFn,
+ llvm::Value *OutlinedFnID, const Expr *IfCond,
+ const Expr *Device,
+ ArrayRef<llvm::Value *> CapturedVars);
+
+ /// \brief Emit the target regions enclosed in \a GD function definition or
+ /// the function itself in case it is a valid device function. Returns true if
+ /// \a GD was dealt with successfully.
+ /// \param FD Function to scan.
+ virtual bool emitTargetFunctions(GlobalDecl GD);
+
+ /// \brief Emit the global variable if it is a valid device global variable.
+ /// Returns true if \a GD was dealt with successfully.
+ /// \param GD Variable declaration to emit.
+ virtual bool emitTargetGlobalVariable(GlobalDecl GD);
+
+ /// \brief Emit the global \a GD if it is meaningful for the target. Returns
+ /// if it was emitted succesfully.
+ /// \param GD Global to scan.
+ virtual bool emitTargetGlobal(GlobalDecl GD);
+
+ /// \brief Creates the offloading descriptor in the event any target region
+ /// was emitted in the current module and return the function that registers
+ /// it.
+ virtual llvm::Function *emitRegistrationFunction();
+};
+
+} // namespace CodeGen
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h
new file mode 100644
index 0000000..d4ad33e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h
@@ -0,0 +1,220 @@
+//===--- CGRecordLayout.h - LLVM Record Layout Information ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGRECORDLAYOUT_H
+#define LLVM_CLANG_LIB_CODEGEN_CGRECORDLAYOUT_H
+
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/IR/DerivedTypes.h"
+
+namespace llvm {
+ class StructType;
+}
+
+namespace clang {
+namespace CodeGen {
+
+/// \brief Structure with information about how a bitfield should be accessed.
+///
+/// Often we layout a sequence of bitfields as a contiguous sequence of bits.
+/// When the AST record layout does this, we represent it in the LLVM IR's type
+/// as either a sequence of i8 members or a byte array to reserve the number of
+/// bytes touched without forcing any particular alignment beyond the basic
+/// character alignment.
+///
+/// Then accessing a particular bitfield involves converting this byte array
+/// into a single integer of that size (i24 or i40 -- may not be power-of-two
+/// size), loading it, and shifting and masking to extract the particular
+/// subsequence of bits which make up that particular bitfield. This structure
+/// encodes the information used to construct the extraction code sequences.
+/// The CGRecordLayout also has a field index which encodes which byte-sequence
+/// this bitfield falls within. Let's assume the following C struct:
+///
+/// struct S {
+/// char a, b, c;
+/// unsigned bits : 3;
+/// unsigned more_bits : 4;
+/// unsigned still_more_bits : 7;
+/// };
+///
+/// This will end up as the following LLVM type. The first array is the
+/// bitfield, and the second is the padding out to a 4-byte alignmnet.
+///
+/// %t = type { i8, i8, i8, i8, i8, [3 x i8] }
+///
+/// When generating code to access more_bits, we'll generate something
+/// essentially like this:
+///
+/// define i32 @foo(%t* %base) {
+/// %0 = gep %t* %base, i32 0, i32 3
+/// %2 = load i8* %1
+/// %3 = lshr i8 %2, 3
+/// %4 = and i8 %3, 15
+/// %5 = zext i8 %4 to i32
+/// ret i32 %i
+/// }
+///
+struct CGBitFieldInfo {
+ /// The offset within a contiguous run of bitfields that are represented as
+ /// a single "field" within the LLVM struct type. This offset is in bits.
+ unsigned Offset : 16;
+
+ /// The total size of the bit-field, in bits.
+ unsigned Size : 15;
+
+ /// Whether the bit-field is signed.
+ unsigned IsSigned : 1;
+
+ /// The storage size in bits which should be used when accessing this
+ /// bitfield.
+ unsigned StorageSize;
+
+ /// The offset of the bitfield storage from the start of the struct.
+ CharUnits StorageOffset;
+
+ CGBitFieldInfo()
+ : Offset(), Size(), IsSigned(), StorageSize(), StorageOffset() {}
+
+ CGBitFieldInfo(unsigned Offset, unsigned Size, bool IsSigned,
+ unsigned StorageSize, CharUnits StorageOffset)
+ : Offset(Offset), Size(Size), IsSigned(IsSigned),
+ StorageSize(StorageSize), StorageOffset(StorageOffset) {}
+
+ void print(raw_ostream &OS) const;
+ void dump() const;
+
+ /// \brief Given a bit-field decl, build an appropriate helper object for
+ /// accessing that field (which is expected to have the given offset and
+ /// size).
+ static CGBitFieldInfo MakeInfo(class CodeGenTypes &Types,
+ const FieldDecl *FD,
+ uint64_t Offset, uint64_t Size,
+ uint64_t StorageSize,
+ CharUnits StorageOffset);
+};
+
+/// CGRecordLayout - This class handles struct and union layout info while
+/// lowering AST types to LLVM types.
+///
+/// These layout objects are only created on demand as IR generation requires.
+class CGRecordLayout {
+ friend class CodeGenTypes;
+
+ CGRecordLayout(const CGRecordLayout &) = delete;
+ void operator=(const CGRecordLayout &) = delete;
+
+private:
+ /// The LLVM type corresponding to this record layout; used when
+ /// laying it out as a complete object.
+ llvm::StructType *CompleteObjectType;
+
+ /// The LLVM type for the non-virtual part of this record layout;
+ /// used when laying it out as a base subobject.
+ llvm::StructType *BaseSubobjectType;
+
+ /// Map from (non-bit-field) struct field to the corresponding llvm struct
+ /// type field no. This info is populated by record builder.
+ llvm::DenseMap<const FieldDecl *, unsigned> FieldInfo;
+
+ /// Map from (bit-field) struct field to the corresponding llvm struct type
+ /// field no. This info is populated by record builder.
+ llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
+
+ // FIXME: Maybe we could use a CXXBaseSpecifier as the key and use a single
+ // map for both virtual and non-virtual bases.
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
+
+ /// Map from virtual bases to their field index in the complete object.
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> CompleteObjectVirtualBases;
+
+ /// False if any direct or indirect subobject of this class, when
+ /// considered as a complete object, requires a non-zero bitpattern
+ /// when zero-initialized.
+ bool IsZeroInitializable : 1;
+
+ /// False if any direct or indirect subobject of this class, when
+ /// considered as a base subobject, requires a non-zero bitpattern
+ /// when zero-initialized.
+ bool IsZeroInitializableAsBase : 1;
+
+public:
+ CGRecordLayout(llvm::StructType *CompleteObjectType,
+ llvm::StructType *BaseSubobjectType,
+ bool IsZeroInitializable,
+ bool IsZeroInitializableAsBase)
+ : CompleteObjectType(CompleteObjectType),
+ BaseSubobjectType(BaseSubobjectType),
+ IsZeroInitializable(IsZeroInitializable),
+ IsZeroInitializableAsBase(IsZeroInitializableAsBase) {}
+
+ /// \brief Return the "complete object" LLVM type associated with
+ /// this record.
+ llvm::StructType *getLLVMType() const {
+ return CompleteObjectType;
+ }
+
+ /// \brief Return the "base subobject" LLVM type associated with
+ /// this record.
+ llvm::StructType *getBaseSubobjectLLVMType() const {
+ return BaseSubobjectType;
+ }
+
+ /// \brief Check whether this struct can be C++ zero-initialized
+ /// with a zeroinitializer.
+ bool isZeroInitializable() const {
+ return IsZeroInitializable;
+ }
+
+ /// \brief Check whether this struct can be C++ zero-initialized
+ /// with a zeroinitializer when considered as a base subobject.
+ bool isZeroInitializableAsBase() const {
+ return IsZeroInitializableAsBase;
+ }
+
+ /// \brief Return llvm::StructType element number that corresponds to the
+ /// field FD.
+ unsigned getLLVMFieldNo(const FieldDecl *FD) const {
+ FD = FD->getCanonicalDecl();
+ assert(FieldInfo.count(FD) && "Invalid field for record!");
+ return FieldInfo.lookup(FD);
+ }
+
+ unsigned getNonVirtualBaseLLVMFieldNo(const CXXRecordDecl *RD) const {
+ assert(NonVirtualBases.count(RD) && "Invalid non-virtual base!");
+ return NonVirtualBases.lookup(RD);
+ }
+
+ /// \brief Return the LLVM field index corresponding to the given
+ /// virtual base. Only valid when operating on the complete object.
+ unsigned getVirtualBaseIndex(const CXXRecordDecl *base) const {
+ assert(CompleteObjectVirtualBases.count(base) && "Invalid virtual base!");
+ return CompleteObjectVirtualBases.lookup(base);
+ }
+
+ /// \brief Return the BitFieldInfo that corresponds to the field FD.
+ const CGBitFieldInfo &getBitFieldInfo(const FieldDecl *FD) const {
+ FD = FD->getCanonicalDecl();
+ assert(FD->isBitField() && "Invalid call for non-bit-field decl!");
+ llvm::DenseMap<const FieldDecl *, CGBitFieldInfo>::const_iterator
+ it = BitFields.find(FD);
+ assert(it != BitFields.end() && "Unable to find bitfield info");
+ return it->second;
+ }
+
+ void print(raw_ostream &OS) const;
+ void dump() const;
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
new file mode 100644
index 0000000..375b59c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -0,0 +1,860 @@
+//===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Builder implementation for CGRecordLayout objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGRecordLayout.h"
+#include "CGCXXABI.h"
+#include "CodeGenTypes.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+/// The CGRecordLowering is responsible for lowering an ASTRecordLayout to an
+/// llvm::Type. Some of the lowering is straightforward, some is not. Here we
+/// detail some of the complexities and weirdnesses here.
+/// * LLVM does not have unions - Unions can, in theory be represented by any
+/// llvm::Type with correct size. We choose a field via a specific heuristic
+/// and add padding if necessary.
+/// * LLVM does not have bitfields - Bitfields are collected into contiguous
+/// runs and allocated as a single storage type for the run. ASTRecordLayout
+/// contains enough information to determine where the runs break. Microsoft
+/// and Itanium follow different rules and use different codepaths.
+/// * It is desired that, when possible, bitfields use the appropriate iN type
+/// when lowered to llvm types. For example unsigned x : 24 gets lowered to
+/// i24. This isn't always possible because i24 has storage size of 32 bit
+/// and if it is possible to use that extra byte of padding we must use
+/// [i8 x 3] instead of i24. The function clipTailPadding does this.
+/// C++ examples that require clipping:
+/// struct { int a : 24; char b; }; // a must be clipped, b goes at offset 3
+/// struct A { int a : 24; }; // a must be clipped because a struct like B
+// could exist: struct B : A { char b; }; // b goes at offset 3
+/// * Clang ignores 0 sized bitfields and 0 sized bases but *not* zero sized
+/// fields. The existing asserts suggest that LLVM assumes that *every* field
+/// has an underlying storage type. Therefore empty structures containing
+/// zero sized subobjects such as empty records or zero sized arrays still get
+/// a zero sized (empty struct) storage type.
+/// * Clang reads the complete type rather than the base type when generating
+/// code to access fields. Bitfields in tail position with tail padding may
+/// be clipped in the base class but not the complete class (we may discover
+/// that the tail padding is not used in the complete class.) However,
+/// because LLVM reads from the complete type it can generate incorrect code
+/// if we do not clip the tail padding off of the bitfield in the complete
+/// layout. This introduces a somewhat awkward extra unnecessary clip stage.
+/// The location of the clip is stored internally as a sentinal of type
+/// SCISSOR. If LLVM were updated to read base types (which it probably
+/// should because locations of things such as VBases are bogus in the llvm
+/// type anyway) then we could eliminate the SCISSOR.
+/// * Itanium allows nearly empty primary virtual bases. These bases don't get
+/// get their own storage because they're laid out as part of another base
+/// or at the beginning of the structure. Determining if a VBase actually
+/// gets storage awkwardly involves a walk of all bases.
+/// * VFPtrs and VBPtrs do *not* make a record NotZeroInitializable.
+struct CGRecordLowering {
+ // MemberInfo is a helper structure that contains information about a record
+ // member. In additional to the standard member types, there exists a
+ // sentinal member type that ensures correct rounding.
+ struct MemberInfo {
+ CharUnits Offset;
+ enum InfoKind { VFPtr, VBPtr, Field, Base, VBase, Scissor } Kind;
+ llvm::Type *Data;
+ union {
+ const FieldDecl *FD;
+ const CXXRecordDecl *RD;
+ };
+ MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data,
+ const FieldDecl *FD = nullptr)
+ : Offset(Offset), Kind(Kind), Data(Data), FD(FD) {}
+ MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data,
+ const CXXRecordDecl *RD)
+ : Offset(Offset), Kind(Kind), Data(Data), RD(RD) {}
+ // MemberInfos are sorted so we define a < operator.
+ bool operator <(const MemberInfo& a) const { return Offset < a.Offset; }
+ };
+ // The constructor.
+ CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D, bool Packed);
+ // Short helper routines.
+ /// \brief Constructs a MemberInfo instance from an offset and llvm::Type *.
+ MemberInfo StorageInfo(CharUnits Offset, llvm::Type *Data) {
+ return MemberInfo(Offset, MemberInfo::Field, Data);
+ }
+
+ /// The Microsoft bitfield layout rule allocates discrete storage
+ /// units of the field's formal type and only combines adjacent
+ /// fields of the same formal type. We want to emit a layout with
+ /// these discrete storage units instead of combining them into a
+ /// continuous run.
+ bool isDiscreteBitFieldABI() {
+ return Context.getTargetInfo().getCXXABI().isMicrosoft() ||
+ D->isMsStruct(Context);
+ }
+
+ /// The Itanium base layout rule allows virtual bases to overlap
+ /// other bases, which complicates layout in specific ways.
+ ///
+ /// Note specifically that the ms_struct attribute doesn't change this.
+ bool isOverlappingVBaseABI() {
+ return !Context.getTargetInfo().getCXXABI().isMicrosoft();
+ }
+
+ /// \brief Wraps llvm::Type::getIntNTy with some implicit arguments.
+ llvm::Type *getIntNType(uint64_t NumBits) {
+ return llvm::Type::getIntNTy(Types.getLLVMContext(),
+ (unsigned)llvm::RoundUpToAlignment(NumBits, 8));
+ }
+ /// \brief Gets an llvm type of size NumBytes and alignment 1.
+ llvm::Type *getByteArrayType(CharUnits NumBytes) {
+ assert(!NumBytes.isZero() && "Empty byte arrays aren't allowed.");
+ llvm::Type *Type = llvm::Type::getInt8Ty(Types.getLLVMContext());
+ return NumBytes == CharUnits::One() ? Type :
+ (llvm::Type *)llvm::ArrayType::get(Type, NumBytes.getQuantity());
+ }
+ /// \brief Gets the storage type for a field decl and handles storage
+ /// for itanium bitfields that are smaller than their declared type.
+ llvm::Type *getStorageType(const FieldDecl *FD) {
+ llvm::Type *Type = Types.ConvertTypeForMem(FD->getType());
+ if (!FD->isBitField()) return Type;
+ if (isDiscreteBitFieldABI()) return Type;
+ return getIntNType(std::min(FD->getBitWidthValue(Context),
+ (unsigned)Context.toBits(getSize(Type))));
+ }
+ /// \brief Gets the llvm Basesubobject type from a CXXRecordDecl.
+ llvm::Type *getStorageType(const CXXRecordDecl *RD) {
+ return Types.getCGRecordLayout(RD).getBaseSubobjectLLVMType();
+ }
+ CharUnits bitsToCharUnits(uint64_t BitOffset) {
+ return Context.toCharUnitsFromBits(BitOffset);
+ }
+ CharUnits getSize(llvm::Type *Type) {
+ return CharUnits::fromQuantity(DataLayout.getTypeAllocSize(Type));
+ }
+ CharUnits getAlignment(llvm::Type *Type) {
+ return CharUnits::fromQuantity(DataLayout.getABITypeAlignment(Type));
+ }
+ bool isZeroInitializable(const FieldDecl *FD) {
+ return Types.isZeroInitializable(FD->getType());
+ }
+ bool isZeroInitializable(const RecordDecl *RD) {
+ return Types.isZeroInitializable(RD);
+ }
+ void appendPaddingBytes(CharUnits Size) {
+ if (!Size.isZero())
+ FieldTypes.push_back(getByteArrayType(Size));
+ }
+ uint64_t getFieldBitOffset(const FieldDecl *FD) {
+ return Layout.getFieldOffset(FD->getFieldIndex());
+ }
+ // Layout routines.
+ void setBitFieldInfo(const FieldDecl *FD, CharUnits StartOffset,
+ llvm::Type *StorageType);
+ /// \brief Lowers an ASTRecordLayout to a llvm type.
+ void lower(bool NonVirtualBaseType);
+ void lowerUnion();
+ void accumulateFields();
+ void accumulateBitFields(RecordDecl::field_iterator Field,
+ RecordDecl::field_iterator FieldEnd);
+ void accumulateBases();
+ void accumulateVPtrs();
+ void accumulateVBases();
+ /// \brief Recursively searches all of the bases to find out if a vbase is
+ /// not the primary vbase of some base class.
+ bool hasOwnStorage(const CXXRecordDecl *Decl, const CXXRecordDecl *Query);
+ void calculateZeroInit();
+ /// \brief Lowers bitfield storage types to I8 arrays for bitfields with tail
+ /// padding that is or can potentially be used.
+ void clipTailPadding();
+ /// \brief Determines if we need a packed llvm struct.
+ void determinePacked(bool NVBaseType);
+ /// \brief Inserts padding everwhere it's needed.
+ void insertPadding();
+ /// \brief Fills out the structures that are ultimately consumed.
+ void fillOutputFields();
+ // Input memoization fields.
+ CodeGenTypes &Types;
+ const ASTContext &Context;
+ const RecordDecl *D;
+ const CXXRecordDecl *RD;
+ const ASTRecordLayout &Layout;
+ const llvm::DataLayout &DataLayout;
+ // Helpful intermediate data-structures.
+ std::vector<MemberInfo> Members;
+ // Output fields, consumed by CodeGenTypes::ComputeRecordLayout.
+ SmallVector<llvm::Type *, 16> FieldTypes;
+ llvm::DenseMap<const FieldDecl *, unsigned> Fields;
+ llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
+ bool IsZeroInitializable : 1;
+ bool IsZeroInitializableAsBase : 1;
+ bool Packed : 1;
+private:
+ CGRecordLowering(const CGRecordLowering &) = delete;
+ void operator =(const CGRecordLowering &) = delete;
+};
+} // namespace {
+
+CGRecordLowering::CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D, bool Packed)
+ : Types(Types), Context(Types.getContext()), D(D),
+ RD(dyn_cast<CXXRecordDecl>(D)),
+ Layout(Types.getContext().getASTRecordLayout(D)),
+ DataLayout(Types.getDataLayout()), IsZeroInitializable(true),
+ IsZeroInitializableAsBase(true), Packed(Packed) {}
+
+void CGRecordLowering::setBitFieldInfo(
+ const FieldDecl *FD, CharUnits StartOffset, llvm::Type *StorageType) {
+ CGBitFieldInfo &Info = BitFields[FD->getCanonicalDecl()];
+ Info.IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
+ Info.Offset = (unsigned)(getFieldBitOffset(FD) - Context.toBits(StartOffset));
+ Info.Size = FD->getBitWidthValue(Context);
+ Info.StorageSize = (unsigned)DataLayout.getTypeAllocSizeInBits(StorageType);
+ Info.StorageOffset = StartOffset;
+ if (Info.Size > Info.StorageSize)
+ Info.Size = Info.StorageSize;
+ // Reverse the bit offsets for big endian machines. Because we represent
+ // a bitfield as a single large integer load, we can imagine the bits
+ // counting from the most-significant-bit instead of the
+ // least-significant-bit.
+ if (DataLayout.isBigEndian())
+ Info.Offset = Info.StorageSize - (Info.Offset + Info.Size);
+}
+
+void CGRecordLowering::lower(bool NVBaseType) {
+ // The lowering process implemented in this function takes a variety of
+ // carefully ordered phases.
+ // 1) Store all members (fields and bases) in a list and sort them by offset.
+ // 2) Add a 1-byte capstone member at the Size of the structure.
+ // 3) Clip bitfield storages members if their tail padding is or might be
+ // used by another field or base. The clipping process uses the capstone
+ // by treating it as another object that occurs after the record.
+ // 4) Determine if the llvm-struct requires packing. It's important that this
+ // phase occur after clipping, because clipping changes the llvm type.
+ // This phase reads the offset of the capstone when determining packedness
+ // and updates the alignment of the capstone to be equal of the alignment
+ // of the record after doing so.
+ // 5) Insert padding everywhere it is needed. This phase requires 'Packed' to
+ // have been computed and needs to know the alignment of the record in
+ // order to understand if explicit tail padding is needed.
+ // 6) Remove the capstone, we don't need it anymore.
+ // 7) Determine if this record can be zero-initialized. This phase could have
+ // been placed anywhere after phase 1.
+ // 8) Format the complete list of members in a way that can be consumed by
+ // CodeGenTypes::ComputeRecordLayout.
+ CharUnits Size = NVBaseType ? Layout.getNonVirtualSize() : Layout.getSize();
+ if (D->isUnion())
+ return lowerUnion();
+ accumulateFields();
+ // RD implies C++.
+ if (RD) {
+ accumulateVPtrs();
+ accumulateBases();
+ if (Members.empty())
+ return appendPaddingBytes(Size);
+ if (!NVBaseType)
+ accumulateVBases();
+ }
+ std::stable_sort(Members.begin(), Members.end());
+ Members.push_back(StorageInfo(Size, getIntNType(8)));
+ clipTailPadding();
+ determinePacked(NVBaseType);
+ insertPadding();
+ Members.pop_back();
+ calculateZeroInit();
+ fillOutputFields();
+}
+
+void CGRecordLowering::lowerUnion() {
+ CharUnits LayoutSize = Layout.getSize();
+ llvm::Type *StorageType = nullptr;
+ bool SeenNamedMember = false;
+ // Iterate through the fields setting bitFieldInfo and the Fields array. Also
+ // locate the "most appropriate" storage type. The heuristic for finding the
+ // storage type isn't necessary, the first (non-0-length-bitfield) field's
+ // type would work fine and be simpler but would be different than what we've
+ // been doing and cause lit tests to change.
+ for (const auto *Field : D->fields()) {
+ if (Field->isBitField()) {
+ // Skip 0 sized bitfields.
+ if (Field->getBitWidthValue(Context) == 0)
+ continue;
+ llvm::Type *FieldType = getStorageType(Field);
+ if (LayoutSize < getSize(FieldType))
+ FieldType = getByteArrayType(LayoutSize);
+ setBitFieldInfo(Field, CharUnits::Zero(), FieldType);
+ }
+ Fields[Field->getCanonicalDecl()] = 0;
+ llvm::Type *FieldType = getStorageType(Field);
+ // Compute zero-initializable status.
+ // This union might not be zero initialized: it may contain a pointer to
+ // data member which might have some exotic initialization sequence.
+ // If this is the case, then we aught not to try and come up with a "better"
+ // type, it might not be very easy to come up with a Constant which
+ // correctly initializes it.
+ if (!SeenNamedMember) {
+ SeenNamedMember = Field->getIdentifier();
+ if (!SeenNamedMember)
+ if (const auto *FieldRD =
+ dyn_cast_or_null<RecordDecl>(Field->getType()->getAsTagDecl()))
+ SeenNamedMember = FieldRD->findFirstNamedDataMember();
+ if (SeenNamedMember && !isZeroInitializable(Field)) {
+ IsZeroInitializable = IsZeroInitializableAsBase = false;
+ StorageType = FieldType;
+ }
+ }
+ // Because our union isn't zero initializable, we won't be getting a better
+ // storage type.
+ if (!IsZeroInitializable)
+ continue;
+ // Conditionally update our storage type if we've got a new "better" one.
+ if (!StorageType ||
+ getAlignment(FieldType) > getAlignment(StorageType) ||
+ (getAlignment(FieldType) == getAlignment(StorageType) &&
+ getSize(FieldType) > getSize(StorageType)))
+ StorageType = FieldType;
+ }
+ // If we have no storage type just pad to the appropriate size and return.
+ if (!StorageType)
+ return appendPaddingBytes(LayoutSize);
+ // If our storage size was bigger than our required size (can happen in the
+ // case of packed bitfields on Itanium) then just use an I8 array.
+ if (LayoutSize < getSize(StorageType))
+ StorageType = getByteArrayType(LayoutSize);
+ FieldTypes.push_back(StorageType);
+ appendPaddingBytes(LayoutSize - getSize(StorageType));
+ // Set packed if we need it.
+ if (LayoutSize % getAlignment(StorageType))
+ Packed = true;
+}
+
+void CGRecordLowering::accumulateFields() {
+ for (RecordDecl::field_iterator Field = D->field_begin(),
+ FieldEnd = D->field_end();
+ Field != FieldEnd;)
+ if (Field->isBitField()) {
+ RecordDecl::field_iterator Start = Field;
+ // Iterate to gather the list of bitfields.
+ for (++Field; Field != FieldEnd && Field->isBitField(); ++Field);
+ accumulateBitFields(Start, Field);
+ } else {
+ Members.push_back(MemberInfo(
+ bitsToCharUnits(getFieldBitOffset(*Field)), MemberInfo::Field,
+ getStorageType(*Field), *Field));
+ ++Field;
+ }
+}
+
+void
+CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
+ RecordDecl::field_iterator FieldEnd) {
+ // Run stores the first element of the current run of bitfields. FieldEnd is
+ // used as a special value to note that we don't have a current run. A
+ // bitfield run is a contiguous collection of bitfields that can be stored in
+ // the same storage block. Zero-sized bitfields and bitfields that would
+ // cross an alignment boundary break a run and start a new one.
+ RecordDecl::field_iterator Run = FieldEnd;
+ // Tail is the offset of the first bit off the end of the current run. It's
+ // used to determine if the ASTRecordLayout is treating these two bitfields as
+ // contiguous. StartBitOffset is offset of the beginning of the Run.
+ uint64_t StartBitOffset, Tail = 0;
+ if (isDiscreteBitFieldABI()) {
+ for (; Field != FieldEnd; ++Field) {
+ uint64_t BitOffset = getFieldBitOffset(*Field);
+ // Zero-width bitfields end runs.
+ if (Field->getBitWidthValue(Context) == 0) {
+ Run = FieldEnd;
+ continue;
+ }
+ llvm::Type *Type = Types.ConvertTypeForMem(Field->getType());
+ // If we don't have a run yet, or don't live within the previous run's
+ // allocated storage then we allocate some storage and start a new run.
+ if (Run == FieldEnd || BitOffset >= Tail) {
+ Run = Field;
+ StartBitOffset = BitOffset;
+ Tail = StartBitOffset + DataLayout.getTypeAllocSizeInBits(Type);
+ // Add the storage member to the record. This must be added to the
+ // record before the bitfield members so that it gets laid out before
+ // the bitfields it contains get laid out.
+ Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
+ }
+ // Bitfields get the offset of their storage but come afterward and remain
+ // there after a stable sort.
+ Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
+ MemberInfo::Field, nullptr, *Field));
+ }
+ return;
+ }
+ for (;;) {
+ // Check to see if we need to start a new run.
+ if (Run == FieldEnd) {
+ // If we're out of fields, return.
+ if (Field == FieldEnd)
+ break;
+ // Any non-zero-length bitfield can start a new run.
+ if (Field->getBitWidthValue(Context) != 0) {
+ Run = Field;
+ StartBitOffset = getFieldBitOffset(*Field);
+ Tail = StartBitOffset + Field->getBitWidthValue(Context);
+ }
+ ++Field;
+ continue;
+ }
+ // Add bitfields to the run as long as they qualify.
+ if (Field != FieldEnd && Field->getBitWidthValue(Context) != 0 &&
+ Tail == getFieldBitOffset(*Field)) {
+ Tail += Field->getBitWidthValue(Context);
+ ++Field;
+ continue;
+ }
+ // We've hit a break-point in the run and need to emit a storage field.
+ llvm::Type *Type = getIntNType(Tail - StartBitOffset);
+ // Add the storage member to the record and set the bitfield info for all of
+ // the bitfields in the run. Bitfields get the offset of their storage but
+ // come afterward and remain there after a stable sort.
+ Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
+ for (; Run != Field; ++Run)
+ Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
+ MemberInfo::Field, nullptr, *Run));
+ Run = FieldEnd;
+ }
+}
+
+void CGRecordLowering::accumulateBases() {
+ // If we've got a primary virtual base, we need to add it with the bases.
+ if (Layout.isPrimaryBaseVirtual()) {
+ const CXXRecordDecl *BaseDecl = Layout.getPrimaryBase();
+ Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::Base,
+ getStorageType(BaseDecl), BaseDecl));
+ }
+ // Accumulate the non-virtual bases.
+ for (const auto &Base : RD->bases()) {
+ if (Base.isVirtual())
+ continue;
+
+ // Bases can be zero-sized even if not technically empty if they
+ // contain only a trailing array member.
+ const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
+ if (!BaseDecl->isEmpty() &&
+ !Context.getASTRecordLayout(BaseDecl).getNonVirtualSize().isZero())
+ Members.push_back(MemberInfo(Layout.getBaseClassOffset(BaseDecl),
+ MemberInfo::Base, getStorageType(BaseDecl), BaseDecl));
+ }
+}
+
+void CGRecordLowering::accumulateVPtrs() {
+ if (Layout.hasOwnVFPtr())
+ Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::VFPtr,
+ llvm::FunctionType::get(getIntNType(32), /*isVarArg=*/true)->
+ getPointerTo()->getPointerTo()));
+ if (Layout.hasOwnVBPtr())
+ Members.push_back(MemberInfo(Layout.getVBPtrOffset(), MemberInfo::VBPtr,
+ llvm::Type::getInt32PtrTy(Types.getLLVMContext())));
+}
+
+void CGRecordLowering::accumulateVBases() {
+ CharUnits ScissorOffset = Layout.getNonVirtualSize();
+ // In the itanium ABI, it's possible to place a vbase at a dsize that is
+ // smaller than the nvsize. Here we check to see if such a base is placed
+ // before the nvsize and set the scissor offset to that, instead of the
+ // nvsize.
+ if (isOverlappingVBaseABI())
+ for (const auto &Base : RD->vbases()) {
+ const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
+ if (BaseDecl->isEmpty())
+ continue;
+ // If the vbase is a primary virtual base of some base, then it doesn't
+ // get its own storage location but instead lives inside of that base.
+ if (Context.isNearlyEmpty(BaseDecl) && !hasOwnStorage(RD, BaseDecl))
+ continue;
+ ScissorOffset = std::min(ScissorOffset,
+ Layout.getVBaseClassOffset(BaseDecl));
+ }
+ Members.push_back(MemberInfo(ScissorOffset, MemberInfo::Scissor, nullptr,
+ RD));
+ for (const auto &Base : RD->vbases()) {
+ const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
+ if (BaseDecl->isEmpty())
+ continue;
+ CharUnits Offset = Layout.getVBaseClassOffset(BaseDecl);
+ // If the vbase is a primary virtual base of some base, then it doesn't
+ // get its own storage location but instead lives inside of that base.
+ if (isOverlappingVBaseABI() &&
+ Context.isNearlyEmpty(BaseDecl) &&
+ !hasOwnStorage(RD, BaseDecl)) {
+ Members.push_back(MemberInfo(Offset, MemberInfo::VBase, nullptr,
+ BaseDecl));
+ continue;
+ }
+ // If we've got a vtordisp, add it as a storage type.
+ if (Layout.getVBaseOffsetsMap().find(BaseDecl)->second.hasVtorDisp())
+ Members.push_back(StorageInfo(Offset - CharUnits::fromQuantity(4),
+ getIntNType(32)));
+ Members.push_back(MemberInfo(Offset, MemberInfo::VBase,
+ getStorageType(BaseDecl), BaseDecl));
+ }
+}
+
+bool CGRecordLowering::hasOwnStorage(const CXXRecordDecl *Decl,
+ const CXXRecordDecl *Query) {
+ const ASTRecordLayout &DeclLayout = Context.getASTRecordLayout(Decl);
+ if (DeclLayout.isPrimaryBaseVirtual() && DeclLayout.getPrimaryBase() == Query)
+ return false;
+ for (const auto &Base : Decl->bases())
+ if (!hasOwnStorage(Base.getType()->getAsCXXRecordDecl(), Query))
+ return false;
+ return true;
+}
+
+void CGRecordLowering::calculateZeroInit() {
+ for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
+ MemberEnd = Members.end();
+ IsZeroInitializableAsBase && Member != MemberEnd; ++Member) {
+ if (Member->Kind == MemberInfo::Field) {
+ if (!Member->FD || isZeroInitializable(Member->FD))
+ continue;
+ IsZeroInitializable = IsZeroInitializableAsBase = false;
+ } else if (Member->Kind == MemberInfo::Base ||
+ Member->Kind == MemberInfo::VBase) {
+ if (isZeroInitializable(Member->RD))
+ continue;
+ IsZeroInitializable = false;
+ if (Member->Kind == MemberInfo::Base)
+ IsZeroInitializableAsBase = false;
+ }
+ }
+}
+
+void CGRecordLowering::clipTailPadding() {
+ std::vector<MemberInfo>::iterator Prior = Members.begin();
+ CharUnits Tail = getSize(Prior->Data);
+ for (std::vector<MemberInfo>::iterator Member = Prior + 1,
+ MemberEnd = Members.end();
+ Member != MemberEnd; ++Member) {
+ // Only members with data and the scissor can cut into tail padding.
+ if (!Member->Data && Member->Kind != MemberInfo::Scissor)
+ continue;
+ if (Member->Offset < Tail) {
+ assert(Prior->Kind == MemberInfo::Field && !Prior->FD &&
+ "Only storage fields have tail padding!");
+ Prior->Data = getByteArrayType(bitsToCharUnits(llvm::RoundUpToAlignment(
+ cast<llvm::IntegerType>(Prior->Data)->getIntegerBitWidth(), 8)));
+ }
+ if (Member->Data)
+ Prior = Member;
+ Tail = Prior->Offset + getSize(Prior->Data);
+ }
+}
+
+void CGRecordLowering::determinePacked(bool NVBaseType) {
+ if (Packed)
+ return;
+ CharUnits Alignment = CharUnits::One();
+ CharUnits NVAlignment = CharUnits::One();
+ CharUnits NVSize =
+ !NVBaseType && RD ? Layout.getNonVirtualSize() : CharUnits::Zero();
+ for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
+ MemberEnd = Members.end();
+ Member != MemberEnd; ++Member) {
+ if (!Member->Data)
+ continue;
+ // If any member falls at an offset that it not a multiple of its alignment,
+ // then the entire record must be packed.
+ if (Member->Offset % getAlignment(Member->Data))
+ Packed = true;
+ if (Member->Offset < NVSize)
+ NVAlignment = std::max(NVAlignment, getAlignment(Member->Data));
+ Alignment = std::max(Alignment, getAlignment(Member->Data));
+ }
+ // If the size of the record (the capstone's offset) is not a multiple of the
+ // record's alignment, it must be packed.
+ if (Members.back().Offset % Alignment)
+ Packed = true;
+ // If the non-virtual sub-object is not a multiple of the non-virtual
+ // sub-object's alignment, it must be packed. We cannot have a packed
+ // non-virtual sub-object and an unpacked complete object or vise versa.
+ if (NVSize % NVAlignment)
+ Packed = true;
+ // Update the alignment of the sentinal.
+ if (!Packed)
+ Members.back().Data = getIntNType(Context.toBits(Alignment));
+}
+
+void CGRecordLowering::insertPadding() {
+ std::vector<std::pair<CharUnits, CharUnits> > Padding;
+ CharUnits Size = CharUnits::Zero();
+ for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
+ MemberEnd = Members.end();
+ Member != MemberEnd; ++Member) {
+ if (!Member->Data)
+ continue;
+ CharUnits Offset = Member->Offset;
+ assert(Offset >= Size);
+ // Insert padding if we need to.
+ if (Offset != Size.RoundUpToAlignment(Packed ? CharUnits::One() :
+ getAlignment(Member->Data)))
+ Padding.push_back(std::make_pair(Size, Offset - Size));
+ Size = Offset + getSize(Member->Data);
+ }
+ if (Padding.empty())
+ return;
+ // Add the padding to the Members list and sort it.
+ for (std::vector<std::pair<CharUnits, CharUnits> >::const_iterator
+ Pad = Padding.begin(), PadEnd = Padding.end();
+ Pad != PadEnd; ++Pad)
+ Members.push_back(StorageInfo(Pad->first, getByteArrayType(Pad->second)));
+ std::stable_sort(Members.begin(), Members.end());
+}
+
+void CGRecordLowering::fillOutputFields() {
+ for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
+ MemberEnd = Members.end();
+ Member != MemberEnd; ++Member) {
+ if (Member->Data)
+ FieldTypes.push_back(Member->Data);
+ if (Member->Kind == MemberInfo::Field) {
+ if (Member->FD)
+ Fields[Member->FD->getCanonicalDecl()] = FieldTypes.size() - 1;
+ // A field without storage must be a bitfield.
+ if (!Member->Data)
+ setBitFieldInfo(Member->FD, Member->Offset, FieldTypes.back());
+ } else if (Member->Kind == MemberInfo::Base)
+ NonVirtualBases[Member->RD] = FieldTypes.size() - 1;
+ else if (Member->Kind == MemberInfo::VBase)
+ VirtualBases[Member->RD] = FieldTypes.size() - 1;
+ }
+}
+
+CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
+ const FieldDecl *FD,
+ uint64_t Offset, uint64_t Size,
+ uint64_t StorageSize,
+ CharUnits StorageOffset) {
+ // This function is vestigial from CGRecordLayoutBuilder days but is still
+ // used in GCObjCRuntime.cpp. That usage has a "fixme" attached to it that
+ // when addressed will allow for the removal of this function.
+ llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
+ CharUnits TypeSizeInBytes =
+ CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(Ty));
+ uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
+
+ bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
+
+ if (Size > TypeSizeInBits) {
+ // We have a wide bit-field. The extra bits are only used for padding, so
+ // if we have a bitfield of type T, with size N:
+ //
+ // T t : N;
+ //
+ // We can just assume that it's:
+ //
+ // T t : sizeof(T);
+ //
+ Size = TypeSizeInBits;
+ }
+
+ // Reverse the bit offsets for big endian machines. Because we represent
+ // a bitfield as a single large integer load, we can imagine the bits
+ // counting from the most-significant-bit instead of the
+ // least-significant-bit.
+ if (Types.getDataLayout().isBigEndian()) {
+ Offset = StorageSize - (Offset + Size);
+ }
+
+ return CGBitFieldInfo(Offset, Size, IsSigned, StorageSize, StorageOffset);
+}
+
+CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
+ llvm::StructType *Ty) {
+ CGRecordLowering Builder(*this, D, /*Packed=*/false);
+
+ Builder.lower(/*NonVirtualBaseType=*/false);
+
+ // If we're in C++, compute the base subobject type.
+ llvm::StructType *BaseTy = nullptr;
+ if (isa<CXXRecordDecl>(D) && !D->isUnion() && !D->hasAttr<FinalAttr>()) {
+ BaseTy = Ty;
+ if (Builder.Layout.getNonVirtualSize() != Builder.Layout.getSize()) {
+ CGRecordLowering BaseBuilder(*this, D, /*Packed=*/Builder.Packed);
+ BaseBuilder.lower(/*NonVirtualBaseType=*/true);
+ BaseTy = llvm::StructType::create(
+ getLLVMContext(), BaseBuilder.FieldTypes, "", BaseBuilder.Packed);
+ addRecordTypeName(D, BaseTy, ".base");
+ // BaseTy and Ty must agree on their packedness for getLLVMFieldNo to work
+ // on both of them with the same index.
+ assert(Builder.Packed == BaseBuilder.Packed &&
+ "Non-virtual and complete types must agree on packedness");
+ }
+ }
+
+ // Fill in the struct *after* computing the base type. Filling in the body
+ // signifies that the type is no longer opaque and record layout is complete,
+ // but we may need to recursively layout D while laying D out as a base type.
+ Ty->setBody(Builder.FieldTypes, Builder.Packed);
+
+ CGRecordLayout *RL =
+ new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable,
+ Builder.IsZeroInitializableAsBase);
+
+ RL->NonVirtualBases.swap(Builder.NonVirtualBases);
+ RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
+
+ // Add all the field numbers.
+ RL->FieldInfo.swap(Builder.Fields);
+
+ // Add bitfield info.
+ RL->BitFields.swap(Builder.BitFields);
+
+ // Dump the layout, if requested.
+ if (getContext().getLangOpts().DumpRecordLayouts) {
+ llvm::outs() << "\n*** Dumping IRgen Record Layout\n";
+ llvm::outs() << "Record: ";
+ D->dump(llvm::outs());
+ llvm::outs() << "\nLayout: ";
+ RL->print(llvm::outs());
+ }
+
+#ifndef NDEBUG
+ // Verify that the computed LLVM struct size matches the AST layout size.
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
+
+ uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize());
+ assert(TypeSizeInBits == getDataLayout().getTypeAllocSizeInBits(Ty) &&
+ "Type size mismatch!");
+
+ if (BaseTy) {
+ CharUnits NonVirtualSize = Layout.getNonVirtualSize();
+
+ uint64_t AlignedNonVirtualTypeSizeInBits =
+ getContext().toBits(NonVirtualSize);
+
+ assert(AlignedNonVirtualTypeSizeInBits ==
+ getDataLayout().getTypeAllocSizeInBits(BaseTy) &&
+ "Type size mismatch!");
+ }
+
+ // Verify that the LLVM and AST field offsets agree.
+ llvm::StructType *ST =
+ dyn_cast<llvm::StructType>(RL->getLLVMType());
+ const llvm::StructLayout *SL = getDataLayout().getStructLayout(ST);
+
+ const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
+ RecordDecl::field_iterator it = D->field_begin();
+ for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
+ const FieldDecl *FD = *it;
+
+ // For non-bit-fields, just check that the LLVM struct offset matches the
+ // AST offset.
+ if (!FD->isBitField()) {
+ unsigned FieldNo = RL->getLLVMFieldNo(FD);
+ assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
+ "Invalid field offset!");
+ continue;
+ }
+
+ // Ignore unnamed bit-fields.
+ if (!FD->getDeclName())
+ continue;
+
+ // Don't inspect zero-length bitfields.
+ if (FD->getBitWidthValue(getContext()) == 0)
+ continue;
+
+ const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
+ llvm::Type *ElementTy = ST->getTypeAtIndex(RL->getLLVMFieldNo(FD));
+
+ // Unions have overlapping elements dictating their layout, but for
+ // non-unions we can verify that this section of the layout is the exact
+ // expected size.
+ if (D->isUnion()) {
+ // For unions we verify that the start is zero and the size
+ // is in-bounds. However, on BE systems, the offset may be non-zero, but
+ // the size + offset should match the storage size in that case as it
+ // "starts" at the back.
+ if (getDataLayout().isBigEndian())
+ assert(static_cast<unsigned>(Info.Offset + Info.Size) ==
+ Info.StorageSize &&
+ "Big endian union bitfield does not end at the back");
+ else
+ assert(Info.Offset == 0 &&
+ "Little endian union bitfield with a non-zero offset");
+ assert(Info.StorageSize <= SL->getSizeInBits() &&
+ "Union not large enough for bitfield storage");
+ } else {
+ assert(Info.StorageSize ==
+ getDataLayout().getTypeAllocSizeInBits(ElementTy) &&
+ "Storage size does not match the element type size");
+ }
+ assert(Info.Size > 0 && "Empty bitfield!");
+ assert(static_cast<unsigned>(Info.Offset) + Info.Size <= Info.StorageSize &&
+ "Bitfield outside of its allocated storage");
+ }
+#endif
+
+ return RL;
+}
+
+void CGRecordLayout::print(raw_ostream &OS) const {
+ OS << "<CGRecordLayout\n";
+ OS << " LLVMType:" << *CompleteObjectType << "\n";
+ if (BaseSubobjectType)
+ OS << " NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n";
+ OS << " IsZeroInitializable:" << IsZeroInitializable << "\n";
+ OS << " BitFields:[\n";
+
+ // Print bit-field infos in declaration order.
+ std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
+ for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
+ it = BitFields.begin(), ie = BitFields.end();
+ it != ie; ++it) {
+ const RecordDecl *RD = it->first->getParent();
+ unsigned Index = 0;
+ for (RecordDecl::field_iterator
+ it2 = RD->field_begin(); *it2 != it->first; ++it2)
+ ++Index;
+ BFIs.push_back(std::make_pair(Index, &it->second));
+ }
+ llvm::array_pod_sort(BFIs.begin(), BFIs.end());
+ for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
+ OS.indent(4);
+ BFIs[i].second->print(OS);
+ OS << "\n";
+ }
+
+ OS << "]>\n";
+}
+
+void CGRecordLayout::dump() const {
+ print(llvm::errs());
+}
+
+void CGBitFieldInfo::print(raw_ostream &OS) const {
+ OS << "<CGBitFieldInfo"
+ << " Offset:" << Offset
+ << " Size:" << Size
+ << " IsSigned:" << IsSigned
+ << " StorageSize:" << StorageSize
+ << " StorageOffset:" << StorageOffset.getQuantity() << ">";
+}
+
+void CGBitFieldInfo::dump() const {
+ print(llvm::errs());
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp
new file mode 100644
index 0000000..cc4fa2e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp
@@ -0,0 +1,2193 @@
+//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Stmt nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CGDebugInfo.h"
+#include "CodeGenModule.h"
+#include "TargetInfo.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/LoopHint.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/MDBuilder.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+//===----------------------------------------------------------------------===//
+// Statement Emission
+//===----------------------------------------------------------------------===//
+
+void CodeGenFunction::EmitStopPoint(const Stmt *S) {
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ SourceLocation Loc;
+ Loc = S->getLocStart();
+ DI->EmitLocation(Builder, Loc);
+
+ LastStopPoint = Loc;
+ }
+}
+
+void CodeGenFunction::EmitStmt(const Stmt *S) {
+ assert(S && "Null statement?");
+ PGO.setCurrentStmt(S);
+
+ // These statements have their own debug info handling.
+ if (EmitSimpleStmt(S))
+ return;
+
+ // Check if we are generating unreachable code.
+ if (!HaveInsertPoint()) {
+ // If so, and the statement doesn't contain a label, then we do not need to
+ // generate actual code. This is safe because (1) the current point is
+ // unreachable, so we don't need to execute the code, and (2) we've already
+ // handled the statements which update internal data structures (like the
+ // local variable map) which could be used by subsequent statements.
+ if (!ContainsLabel(S)) {
+ // Verify that any decl statements were handled as simple, they may be in
+ // scope of subsequent reachable statements.
+ assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
+ return;
+ }
+
+ // Otherwise, make a new block to hold the code.
+ EnsureInsertPoint();
+ }
+
+ // Generate a stoppoint if we are emitting debug info.
+ EmitStopPoint(S);
+
+ switch (S->getStmtClass()) {
+ case Stmt::NoStmtClass:
+ case Stmt::CXXCatchStmtClass:
+ case Stmt::SEHExceptStmtClass:
+ case Stmt::SEHFinallyStmtClass:
+ case Stmt::MSDependentExistsStmtClass:
+ llvm_unreachable("invalid statement class to emit generically");
+ case Stmt::NullStmtClass:
+ case Stmt::CompoundStmtClass:
+ case Stmt::DeclStmtClass:
+ case Stmt::LabelStmtClass:
+ case Stmt::AttributedStmtClass:
+ case Stmt::GotoStmtClass:
+ case Stmt::BreakStmtClass:
+ case Stmt::ContinueStmtClass:
+ case Stmt::DefaultStmtClass:
+ case Stmt::CaseStmtClass:
+ case Stmt::SEHLeaveStmtClass:
+ llvm_unreachable("should have emitted these statements as simple");
+
+#define STMT(Type, Base)
+#define ABSTRACT_STMT(Op)
+#define EXPR(Type, Base) \
+ case Stmt::Type##Class:
+#include "clang/AST/StmtNodes.inc"
+ {
+ // Remember the block we came in on.
+ llvm::BasicBlock *incoming = Builder.GetInsertBlock();
+ assert(incoming && "expression emission must have an insertion point");
+
+ EmitIgnoredExpr(cast<Expr>(S));
+
+ llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
+ assert(outgoing && "expression emission cleared block!");
+
+ // The expression emitters assume (reasonably!) that the insertion
+ // point is always set. To maintain that, the call-emission code
+ // for noreturn functions has to enter a new block with no
+ // predecessors. We want to kill that block and mark the current
+ // insertion point unreachable in the common case of a call like
+ // "exit();". Since expression emission doesn't otherwise create
+ // blocks with no predecessors, we can just test for that.
+ // However, we must be careful not to do this to our incoming
+ // block, because *statement* emission does sometimes create
+ // reachable blocks which will have no predecessors until later in
+ // the function. This occurs with, e.g., labels that are not
+ // reachable by fallthrough.
+ if (incoming != outgoing && outgoing->use_empty()) {
+ outgoing->eraseFromParent();
+ Builder.ClearInsertionPoint();
+ }
+ break;
+ }
+
+ case Stmt::IndirectGotoStmtClass:
+ EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
+
+ case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
+ case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S)); break;
+ case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S)); break;
+ case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S)); break;
+
+ case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
+
+ case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
+ case Stmt::GCCAsmStmtClass: // Intentional fall-through.
+ case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
+ case Stmt::CoroutineBodyStmtClass:
+ case Stmt::CoreturnStmtClass:
+ CGM.ErrorUnsupported(S, "coroutine");
+ break;
+ case Stmt::CapturedStmtClass: {
+ const CapturedStmt *CS = cast<CapturedStmt>(S);
+ EmitCapturedStmt(*CS, CS->getCapturedRegionKind());
+ }
+ break;
+ case Stmt::ObjCAtTryStmtClass:
+ EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
+ break;
+ case Stmt::ObjCAtCatchStmtClass:
+ llvm_unreachable(
+ "@catch statements should be handled by EmitObjCAtTryStmt");
+ case Stmt::ObjCAtFinallyStmtClass:
+ llvm_unreachable(
+ "@finally statements should be handled by EmitObjCAtTryStmt");
+ case Stmt::ObjCAtThrowStmtClass:
+ EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
+ break;
+ case Stmt::ObjCAtSynchronizedStmtClass:
+ EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
+ break;
+ case Stmt::ObjCForCollectionStmtClass:
+ EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
+ break;
+ case Stmt::ObjCAutoreleasePoolStmtClass:
+ EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
+ break;
+
+ case Stmt::CXXTryStmtClass:
+ EmitCXXTryStmt(cast<CXXTryStmt>(*S));
+ break;
+ case Stmt::CXXForRangeStmtClass:
+ EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S));
+ break;
+ case Stmt::SEHTryStmtClass:
+ EmitSEHTryStmt(cast<SEHTryStmt>(*S));
+ break;
+ case Stmt::OMPParallelDirectiveClass:
+ EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
+ break;
+ case Stmt::OMPSimdDirectiveClass:
+ EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
+ break;
+ case Stmt::OMPForDirectiveClass:
+ EmitOMPForDirective(cast<OMPForDirective>(*S));
+ break;
+ case Stmt::OMPForSimdDirectiveClass:
+ EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
+ break;
+ case Stmt::OMPSectionsDirectiveClass:
+ EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
+ break;
+ case Stmt::OMPSectionDirectiveClass:
+ EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
+ break;
+ case Stmt::OMPSingleDirectiveClass:
+ EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
+ break;
+ case Stmt::OMPMasterDirectiveClass:
+ EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
+ break;
+ case Stmt::OMPCriticalDirectiveClass:
+ EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
+ break;
+ case Stmt::OMPParallelForDirectiveClass:
+ EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
+ break;
+ case Stmt::OMPParallelForSimdDirectiveClass:
+ EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
+ break;
+ case Stmt::OMPParallelSectionsDirectiveClass:
+ EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
+ break;
+ case Stmt::OMPTaskDirectiveClass:
+ EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
+ break;
+ case Stmt::OMPTaskyieldDirectiveClass:
+ EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
+ break;
+ case Stmt::OMPBarrierDirectiveClass:
+ EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
+ break;
+ case Stmt::OMPTaskwaitDirectiveClass:
+ EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
+ break;
+ case Stmt::OMPTaskgroupDirectiveClass:
+ EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
+ break;
+ case Stmt::OMPFlushDirectiveClass:
+ EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
+ break;
+ case Stmt::OMPOrderedDirectiveClass:
+ EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
+ break;
+ case Stmt::OMPAtomicDirectiveClass:
+ EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
+ break;
+ case Stmt::OMPTargetDirectiveClass:
+ EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
+ break;
+ case Stmt::OMPTeamsDirectiveClass:
+ EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
+ break;
+ case Stmt::OMPCancellationPointDirectiveClass:
+ EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
+ break;
+ case Stmt::OMPCancelDirectiveClass:
+ EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
+ break;
+ case Stmt::OMPTargetDataDirectiveClass:
+ EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
+ break;
+ case Stmt::OMPTaskLoopDirectiveClass:
+ EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
+ break;
+ case Stmt::OMPTaskLoopSimdDirectiveClass:
+ EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
+ break;
+case Stmt::OMPDistributeDirectiveClass:
+ EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
+ break;
+ }
+}
+
+bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) {
+ switch (S->getStmtClass()) {
+ default: return false;
+ case Stmt::NullStmtClass: break;
+ case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break;
+ case Stmt::DeclStmtClass: EmitDeclStmt(cast<DeclStmt>(*S)); break;
+ case Stmt::LabelStmtClass: EmitLabelStmt(cast<LabelStmt>(*S)); break;
+ case Stmt::AttributedStmtClass:
+ EmitAttributedStmt(cast<AttributedStmt>(*S)); break;
+ case Stmt::GotoStmtClass: EmitGotoStmt(cast<GotoStmt>(*S)); break;
+ case Stmt::BreakStmtClass: EmitBreakStmt(cast<BreakStmt>(*S)); break;
+ case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break;
+ case Stmt::DefaultStmtClass: EmitDefaultStmt(cast<DefaultStmt>(*S)); break;
+ case Stmt::CaseStmtClass: EmitCaseStmt(cast<CaseStmt>(*S)); break;
+ case Stmt::SEHLeaveStmtClass: EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S)); break;
+ }
+
+ return true;
+}
+
+/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
+/// this captures the expression result of the last sub-statement and returns it
+/// (for use by the statement expression extension).
+Address CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
+ AggValueSlot AggSlot) {
+ PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
+ "LLVM IR generation of compound statement ('{}')");
+
+ // Keep track of the current cleanup stack depth, including debug scopes.
+ LexicalScope Scope(*this, S.getSourceRange());
+
+ return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
+}
+
+Address
+CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
+ bool GetLast,
+ AggValueSlot AggSlot) {
+
+ for (CompoundStmt::const_body_iterator I = S.body_begin(),
+ E = S.body_end()-GetLast; I != E; ++I)
+ EmitStmt(*I);
+
+ Address RetAlloca = Address::invalid();
+ if (GetLast) {
+ // We have to special case labels here. They are statements, but when put
+ // at the end of a statement expression, they yield the value of their
+ // subexpression. Handle this by walking through all labels we encounter,
+ // emitting them before we evaluate the subexpr.
+ const Stmt *LastStmt = S.body_back();
+ while (const LabelStmt *LS = dyn_cast<LabelStmt>(LastStmt)) {
+ EmitLabel(LS->getDecl());
+ LastStmt = LS->getSubStmt();
+ }
+
+ EnsureInsertPoint();
+
+ QualType ExprTy = cast<Expr>(LastStmt)->getType();
+ if (hasAggregateEvaluationKind(ExprTy)) {
+ EmitAggExpr(cast<Expr>(LastStmt), AggSlot);
+ } else {
+ // We can't return an RValue here because there might be cleanups at
+ // the end of the StmtExpr. Because of that, we have to emit the result
+ // here into a temporary alloca.
+ RetAlloca = CreateMemTemp(ExprTy);
+ EmitAnyExprToMem(cast<Expr>(LastStmt), RetAlloca, Qualifiers(),
+ /*IsInit*/false);
+ }
+
+ }
+
+ return RetAlloca;
+}
+
+void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
+ llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
+
+ // If there is a cleanup stack, then we it isn't worth trying to
+ // simplify this block (we would need to remove it from the scope map
+ // and cleanup entry).
+ if (!EHStack.empty())
+ return;
+
+ // Can only simplify direct branches.
+ if (!BI || !BI->isUnconditional())
+ return;
+
+ // Can only simplify empty blocks.
+ if (BI->getIterator() != BB->begin())
+ return;
+
+ BB->replaceAllUsesWith(BI->getSuccessor(0));
+ BI->eraseFromParent();
+ BB->eraseFromParent();
+}
+
+void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+ // Fall out of the current block (if necessary).
+ EmitBranch(BB);
+
+ if (IsFinished && BB->use_empty()) {
+ delete BB;
+ return;
+ }
+
+ // Place the block after the current block, if possible, or else at
+ // the end of the function.
+ if (CurBB && CurBB->getParent())
+ CurFn->getBasicBlockList().insertAfter(CurBB->getIterator(), BB);
+ else
+ CurFn->getBasicBlockList().push_back(BB);
+ Builder.SetInsertPoint(BB);
+}
+
+void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
+ // Emit a branch from the current block to the target one if this
+ // was a real block. If this was just a fall-through block after a
+ // terminator, don't emit it.
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+ if (!CurBB || CurBB->getTerminator()) {
+ // If there is no insert point or the previous block is already
+ // terminated, don't touch it.
+ } else {
+ // Otherwise, create a fall-through branch.
+ Builder.CreateBr(Target);
+ }
+
+ Builder.ClearInsertionPoint();
+}
+
+void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
+ bool inserted = false;
+ for (llvm::User *u : block->users()) {
+ if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
+ CurFn->getBasicBlockList().insertAfter(insn->getParent()->getIterator(),
+ block);
+ inserted = true;
+ break;
+ }
+ }
+
+ if (!inserted)
+ CurFn->getBasicBlockList().push_back(block);
+
+ Builder.SetInsertPoint(block);
+}
+
+CodeGenFunction::JumpDest
+CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
+ JumpDest &Dest = LabelMap[D];
+ if (Dest.isValid()) return Dest;
+
+ // Create, but don't insert, the new block.
+ Dest = JumpDest(createBasicBlock(D->getName()),
+ EHScopeStack::stable_iterator::invalid(),
+ NextCleanupDestIndex++);
+ return Dest;
+}
+
+void CodeGenFunction::EmitLabel(const LabelDecl *D) {
+ // Add this label to the current lexical scope if we're within any
+ // normal cleanups. Jumps "in" to this label --- when permitted by
+ // the language --- may need to be routed around such cleanups.
+ if (EHStack.hasNormalCleanups() && CurLexicalScope)
+ CurLexicalScope->addLabel(D);
+
+ JumpDest &Dest = LabelMap[D];
+
+ // If we didn't need a forward reference to this label, just go
+ // ahead and create a destination at the current scope.
+ if (!Dest.isValid()) {
+ Dest = getJumpDestInCurrentScope(D->getName());
+
+ // Otherwise, we need to give this label a target depth and remove
+ // it from the branch-fixups list.
+ } else {
+ assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
+ Dest.setScopeDepth(EHStack.stable_begin());
+ ResolveBranchFixups(Dest.getBlock());
+ }
+
+ EmitBlock(Dest.getBlock());
+ incrementProfileCounter(D->getStmt());
+}
+
+/// Change the cleanup scope of the labels in this lexical scope to
+/// match the scope of the enclosing context.
+void CodeGenFunction::LexicalScope::rescopeLabels() {
+ assert(!Labels.empty());
+ EHScopeStack::stable_iterator innermostScope
+ = CGF.EHStack.getInnermostNormalCleanup();
+
+ // Change the scope depth of all the labels.
+ for (SmallVectorImpl<const LabelDecl*>::const_iterator
+ i = Labels.begin(), e = Labels.end(); i != e; ++i) {
+ assert(CGF.LabelMap.count(*i));
+ JumpDest &dest = CGF.LabelMap.find(*i)->second;
+ assert(dest.getScopeDepth().isValid());
+ assert(innermostScope.encloses(dest.getScopeDepth()));
+ dest.setScopeDepth(innermostScope);
+ }
+
+ // Reparent the labels if the new scope also has cleanups.
+ if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
+ ParentScope->Labels.append(Labels.begin(), Labels.end());
+ }
+}
+
+
+void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
+ EmitLabel(S.getDecl());
+ EmitStmt(S.getSubStmt());
+}
+
+void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
+ const Stmt *SubStmt = S.getSubStmt();
+ switch (SubStmt->getStmtClass()) {
+ case Stmt::DoStmtClass:
+ EmitDoStmt(cast<DoStmt>(*SubStmt), S.getAttrs());
+ break;
+ case Stmt::ForStmtClass:
+ EmitForStmt(cast<ForStmt>(*SubStmt), S.getAttrs());
+ break;
+ case Stmt::WhileStmtClass:
+ EmitWhileStmt(cast<WhileStmt>(*SubStmt), S.getAttrs());
+ break;
+ case Stmt::CXXForRangeStmtClass:
+ EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*SubStmt), S.getAttrs());
+ break;
+ default:
+ EmitStmt(SubStmt);
+ }
+}
+
+void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
+ // If this code is reachable then emit a stop point (if generating
+ // debug info). We have to do this ourselves because we are on the
+ // "simple" statement path.
+ if (HaveInsertPoint())
+ EmitStopPoint(&S);
+
+ EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
+}
+
+
+void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
+ if (const LabelDecl *Target = S.getConstantTarget()) {
+ EmitBranchThroughCleanup(getJumpDestForLabel(Target));
+ return;
+ }
+
+ // Ensure that we have an i8* for our PHI node.
+ llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
+ Int8PtrTy, "addr");
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+ // Get the basic block for the indirect goto.
+ llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
+
+ // The first instruction in the block has to be the PHI for the switch dest,
+ // add an entry for this branch.
+ cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
+
+ EmitBranch(IndGotoBB);
+}
+
+void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
+ // C99 6.8.4.1: The first substatement is executed if the expression compares
+ // unequal to 0. The condition must be a scalar type.
+ LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
+
+ if (S.getConditionVariable())
+ EmitAutoVarDecl(*S.getConditionVariable());
+
+ // If the condition constant folds and can be elided, try to avoid emitting
+ // the condition and the dead arm of the if/else.
+ bool CondConstant;
+ if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant)) {
+ // Figure out which block (then or else) is executed.
+ const Stmt *Executed = S.getThen();
+ const Stmt *Skipped = S.getElse();
+ if (!CondConstant) // Condition false?
+ std::swap(Executed, Skipped);
+
+ // If the skipped block has no labels in it, just emit the executed block.
+ // This avoids emitting dead code and simplifies the CFG substantially.
+ if (!ContainsLabel(Skipped)) {
+ if (CondConstant)
+ incrementProfileCounter(&S);
+ if (Executed) {
+ RunCleanupsScope ExecutedScope(*this);
+ EmitStmt(Executed);
+ }
+ return;
+ }
+ }
+
+ // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
+ // the conditional branch.
+ llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
+ llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
+ llvm::BasicBlock *ElseBlock = ContBlock;
+ if (S.getElse())
+ ElseBlock = createBasicBlock("if.else");
+
+ EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock,
+ getProfileCount(S.getThen()));
+
+ // Emit the 'then' code.
+ EmitBlock(ThenBlock);
+ incrementProfileCounter(&S);
+ {
+ RunCleanupsScope ThenScope(*this);
+ EmitStmt(S.getThen());
+ }
+ EmitBranch(ContBlock);
+
+ // Emit the 'else' code if present.
+ if (const Stmt *Else = S.getElse()) {
+ {
+ // There is no need to emit line number for an unconditional branch.
+ auto NL = ApplyDebugLocation::CreateEmpty(*this);
+ EmitBlock(ElseBlock);
+ }
+ {
+ RunCleanupsScope ElseScope(*this);
+ EmitStmt(Else);
+ }
+ {
+ // There is no need to emit line number for an unconditional branch.
+ auto NL = ApplyDebugLocation::CreateEmpty(*this);
+ EmitBranch(ContBlock);
+ }
+ }
+
+ // Emit the continuation block for code after the if.
+ EmitBlock(ContBlock, true);
+}
+
+void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
+ ArrayRef<const Attr *> WhileAttrs) {
+ // Emit the header for the loop, which will also become
+ // the continue target.
+ JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
+ EmitBlock(LoopHeader.getBlock());
+
+ LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), WhileAttrs);
+
+ // Create an exit block for when the condition fails, which will
+ // also become the break target.
+ JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
+
+ // Store the blocks to use for break and continue.
+ BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
+
+ // C++ [stmt.while]p2:
+ // When the condition of a while statement is a declaration, the
+ // scope of the variable that is declared extends from its point
+ // of declaration (3.3.2) to the end of the while statement.
+ // [...]
+ // The object created in a condition is destroyed and created
+ // with each iteration of the loop.
+ RunCleanupsScope ConditionScope(*this);
+
+ if (S.getConditionVariable())
+ EmitAutoVarDecl(*S.getConditionVariable());
+
+ // Evaluate the conditional in the while header. C99 6.8.5.1: The
+ // evaluation of the controlling expression takes place before each
+ // execution of the loop body.
+ llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
+
+ // while(1) is common, avoid extra exit blocks. Be sure
+ // to correctly handle break/continue though.
+ bool EmitBoolCondBranch = true;
+ if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
+ if (C->isOne())
+ EmitBoolCondBranch = false;
+
+ // As long as the condition is true, go to the loop body.
+ llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
+ if (EmitBoolCondBranch) {
+ llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
+ if (ConditionScope.requiresCleanups())
+ ExitBlock = createBasicBlock("while.exit");
+ Builder.CreateCondBr(
+ BoolCondVal, LoopBody, ExitBlock,
+ createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
+
+ if (ExitBlock != LoopExit.getBlock()) {
+ EmitBlock(ExitBlock);
+ EmitBranchThroughCleanup(LoopExit);
+ }
+ }
+
+ // Emit the loop body. We have to emit this in a cleanup scope
+ // because it might be a singleton DeclStmt.
+ {
+ RunCleanupsScope BodyScope(*this);
+ EmitBlock(LoopBody);
+ incrementProfileCounter(&S);
+ EmitStmt(S.getBody());
+ }
+
+ BreakContinueStack.pop_back();
+
+ // Immediately force cleanup.
+ ConditionScope.ForceCleanup();
+
+ EmitStopPoint(&S);
+ // Branch to the loop header again.
+ EmitBranch(LoopHeader.getBlock());
+
+ LoopStack.pop();
+
+ // Emit the exit block.
+ EmitBlock(LoopExit.getBlock(), true);
+
+ // The LoopHeader typically is just a branch if we skipped emitting
+ // a branch, try to erase it.
+ if (!EmitBoolCondBranch)
+ SimplifyForwardingBlocks(LoopHeader.getBlock());
+}
+
+void CodeGenFunction::EmitDoStmt(const DoStmt &S,
+ ArrayRef<const Attr *> DoAttrs) {
+ JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
+ JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
+
+ uint64_t ParentCount = getCurrentProfileCount();
+
+ // Store the blocks to use for break and continue.
+ BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
+
+ // Emit the body of the loop.
+ llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
+
+ LoopStack.push(LoopBody, CGM.getContext(), DoAttrs);
+
+ EmitBlockWithFallThrough(LoopBody, &S);
+ {
+ RunCleanupsScope BodyScope(*this);
+ EmitStmt(S.getBody());
+ }
+
+ EmitBlock(LoopCond.getBlock());
+
+ // C99 6.8.5.2: "The evaluation of the controlling expression takes place
+ // after each execution of the loop body."
+
+ // Evaluate the conditional in the while header.
+ // C99 6.8.5p2/p4: The first substatement is executed if the expression
+ // compares unequal to 0. The condition must be a scalar type.
+ llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
+
+ BreakContinueStack.pop_back();
+
+ // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
+ // to correctly handle break/continue though.
+ bool EmitBoolCondBranch = true;
+ if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
+ if (C->isZero())
+ EmitBoolCondBranch = false;
+
+ // As long as the condition is true, iterate the loop.
+ if (EmitBoolCondBranch) {
+ uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
+ Builder.CreateCondBr(
+ BoolCondVal, LoopBody, LoopExit.getBlock(),
+ createProfileWeightsForLoop(S.getCond(), BackedgeCount));
+ }
+
+ LoopStack.pop();
+
+ // Emit the exit block.
+ EmitBlock(LoopExit.getBlock());
+
+ // The DoCond block typically is just a branch if we skipped
+ // emitting a branch, try to erase it.
+ if (!EmitBoolCondBranch)
+ SimplifyForwardingBlocks(LoopCond.getBlock());
+}
+
+void CodeGenFunction::EmitForStmt(const ForStmt &S,
+ ArrayRef<const Attr *> ForAttrs) {
+ JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
+
+ LexicalScope ForScope(*this, S.getSourceRange());
+
+ // Evaluate the first part before the loop.
+ if (S.getInit())
+ EmitStmt(S.getInit());
+
+ // Start the loop with a block that tests the condition.
+ // If there's an increment, the continue scope will be overwritten
+ // later.
+ JumpDest Continue = getJumpDestInCurrentScope("for.cond");
+ llvm::BasicBlock *CondBlock = Continue.getBlock();
+ EmitBlock(CondBlock);
+
+ LoopStack.push(CondBlock, CGM.getContext(), ForAttrs);
+
+ // If the for loop doesn't have an increment we can just use the
+ // condition as the continue block. Otherwise we'll need to create
+ // a block for it (in the current scope, i.e. in the scope of the
+ // condition), and that we will become our continue block.
+ if (S.getInc())
+ Continue = getJumpDestInCurrentScope("for.inc");
+
+ // Store the blocks to use for break and continue.
+ BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
+
+ // Create a cleanup scope for the condition variable cleanups.
+ LexicalScope ConditionScope(*this, S.getSourceRange());
+
+ if (S.getCond()) {
+ // If the for statement has a condition scope, emit the local variable
+ // declaration.
+ if (S.getConditionVariable()) {
+ EmitAutoVarDecl(*S.getConditionVariable());
+ }
+
+ llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
+ // If there are any cleanups between here and the loop-exit scope,
+ // create a block to stage a loop exit along.
+ if (ForScope.requiresCleanups())
+ ExitBlock = createBasicBlock("for.cond.cleanup");
+
+ // As long as the condition is true, iterate the loop.
+ llvm::BasicBlock *ForBody = createBasicBlock("for.body");
+
+ // C99 6.8.5p2/p4: The first substatement is executed if the expression
+ // compares unequal to 0. The condition must be a scalar type.
+ llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
+ Builder.CreateCondBr(
+ BoolCondVal, ForBody, ExitBlock,
+ createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
+
+ if (ExitBlock != LoopExit.getBlock()) {
+ EmitBlock(ExitBlock);
+ EmitBranchThroughCleanup(LoopExit);
+ }
+
+ EmitBlock(ForBody);
+ } else {
+ // Treat it as a non-zero constant. Don't even create a new block for the
+ // body, just fall into it.
+ }
+ incrementProfileCounter(&S);
+
+ {
+ // Create a separate cleanup scope for the body, in case it is not
+ // a compound statement.
+ RunCleanupsScope BodyScope(*this);
+ EmitStmt(S.getBody());
+ }
+
+ // If there is an increment, emit it next.
+ if (S.getInc()) {
+ EmitBlock(Continue.getBlock());
+ EmitStmt(S.getInc());
+ }
+
+ BreakContinueStack.pop_back();
+
+ ConditionScope.ForceCleanup();
+
+ EmitStopPoint(&S);
+ EmitBranch(CondBlock);
+
+ ForScope.ForceCleanup();
+
+ LoopStack.pop();
+
+ // Emit the fall-through block.
+ EmitBlock(LoopExit.getBlock(), true);
+}
+
+void
+CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
+ ArrayRef<const Attr *> ForAttrs) {
+ JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
+
+ LexicalScope ForScope(*this, S.getSourceRange());
+
+ // Evaluate the first pieces before the loop.
+ EmitStmt(S.getRangeStmt());
+ EmitStmt(S.getBeginEndStmt());
+
+ // Start the loop with a block that tests the condition.
+ // If there's an increment, the continue scope will be overwritten
+ // later.
+ llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
+ EmitBlock(CondBlock);
+
+ LoopStack.push(CondBlock, CGM.getContext(), ForAttrs);
+
+ // If there are any cleanups between here and the loop-exit scope,
+ // create a block to stage a loop exit along.
+ llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
+ if (ForScope.requiresCleanups())
+ ExitBlock = createBasicBlock("for.cond.cleanup");
+
+ // The loop body, consisting of the specified body and the loop variable.
+ llvm::BasicBlock *ForBody = createBasicBlock("for.body");
+
+ // The body is executed if the expression, contextually converted
+ // to bool, is true.
+ llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
+ Builder.CreateCondBr(
+ BoolCondVal, ForBody, ExitBlock,
+ createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
+
+ if (ExitBlock != LoopExit.getBlock()) {
+ EmitBlock(ExitBlock);
+ EmitBranchThroughCleanup(LoopExit);
+ }
+
+ EmitBlock(ForBody);
+ incrementProfileCounter(&S);
+
+ // Create a block for the increment. In case of a 'continue', we jump there.
+ JumpDest Continue = getJumpDestInCurrentScope("for.inc");
+
+ // Store the blocks to use for break and continue.
+ BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
+
+ {
+ // Create a separate cleanup scope for the loop variable and body.
+ LexicalScope BodyScope(*this, S.getSourceRange());
+ EmitStmt(S.getLoopVarStmt());
+ EmitStmt(S.getBody());
+ }
+
+ EmitStopPoint(&S);
+ // If there is an increment, emit it next.
+ EmitBlock(Continue.getBlock());
+ EmitStmt(S.getInc());
+
+ BreakContinueStack.pop_back();
+
+ EmitBranch(CondBlock);
+
+ ForScope.ForceCleanup();
+
+ LoopStack.pop();
+
+ // Emit the fall-through block.
+ EmitBlock(LoopExit.getBlock(), true);
+}
+
+void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
+ if (RV.isScalar()) {
+ Builder.CreateStore(RV.getScalarVal(), ReturnValue);
+ } else if (RV.isAggregate()) {
+ EmitAggregateCopy(ReturnValue, RV.getAggregateAddress(), Ty);
+ } else {
+ EmitStoreOfComplex(RV.getComplexVal(), MakeAddrLValue(ReturnValue, Ty),
+ /*init*/ true);
+ }
+ EmitBranchThroughCleanup(ReturnBlock);
+}
+
+/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
+/// if the function returns void, or may be missing one if the function returns
+/// non-void. Fun stuff :).
+void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
+ // Returning from an outlined SEH helper is UB, and we already warn on it.
+ if (IsOutlinedSEHHelper) {
+ Builder.CreateUnreachable();
+ Builder.ClearInsertionPoint();
+ }
+
+ // Emit the result value, even if unused, to evalute the side effects.
+ const Expr *RV = S.getRetValue();
+
+ // Treat block literals in a return expression as if they appeared
+ // in their own scope. This permits a small, easily-implemented
+ // exception to our over-conservative rules about not jumping to
+ // statements following block literals with non-trivial cleanups.
+ RunCleanupsScope cleanupScope(*this);
+ if (const ExprWithCleanups *cleanups =
+ dyn_cast_or_null<ExprWithCleanups>(RV)) {
+ enterFullExpression(cleanups);
+ RV = cleanups->getSubExpr();
+ }
+
+ // FIXME: Clean this up by using an LValue for ReturnTemp,
+ // EmitStoreThroughLValue, and EmitAnyExpr.
+ if (getLangOpts().ElideConstructors &&
+ S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable()) {
+ // Apply the named return value optimization for this return statement,
+ // which means doing nothing: the appropriate result has already been
+ // constructed into the NRVO variable.
+
+ // If there is an NRVO flag for this variable, set it to 1 into indicate
+ // that the cleanup code should not destroy the variable.
+ if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
+ Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
+ } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
+ // Make sure not to return anything, but evaluate the expression
+ // for side effects.
+ if (RV)
+ EmitAnyExpr(RV);
+ } else if (!RV) {
+ // Do nothing (return value is left uninitialized)
+ } else if (FnRetTy->isReferenceType()) {
+ // If this function returns a reference, take the address of the expression
+ // rather than the value.
+ RValue Result = EmitReferenceBindingToExpr(RV);
+ Builder.CreateStore(Result.getScalarVal(), ReturnValue);
+ } else {
+ switch (getEvaluationKind(RV->getType())) {
+ case TEK_Scalar:
+ Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
+ break;
+ case TEK_Complex:
+ EmitComplexExprIntoLValue(RV, MakeAddrLValue(ReturnValue, RV->getType()),
+ /*isInit*/ true);
+ break;
+ case TEK_Aggregate:
+ EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue,
+ Qualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
+ break;
+ }
+ }
+
+ ++NumReturnExprs;
+ if (!RV || RV->isEvaluatable(getContext()))
+ ++NumSimpleReturnExprs;
+
+ cleanupScope.ForceCleanup();
+ EmitBranchThroughCleanup(ReturnBlock);
+}
+
+void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
+ // As long as debug info is modeled with instructions, we have to ensure we
+ // have a place to insert here and write the stop point here.
+ if (HaveInsertPoint())
+ EmitStopPoint(&S);
+
+ for (const auto *I : S.decls())
+ EmitDecl(*I);
+}
+
+void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
+ assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
+
+ // If this code is reachable then emit a stop point (if generating
+ // debug info). We have to do this ourselves because we are on the
+ // "simple" statement path.
+ if (HaveInsertPoint())
+ EmitStopPoint(&S);
+
+ EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
+}
+
+void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
+ assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
+
+ // If this code is reachable then emit a stop point (if generating
+ // debug info). We have to do this ourselves because we are on the
+ // "simple" statement path.
+ if (HaveInsertPoint())
+ EmitStopPoint(&S);
+
+ EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
+}
+
+/// EmitCaseStmtRange - If case statement range is not too big then
+/// add multiple cases to switch instruction, one for each value within
+/// the range. If range is too big then emit "if" condition check.
+void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
+ assert(S.getRHS() && "Expected RHS value in CaseStmt");
+
+ llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
+ llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
+
+ // Emit the code for this case. We do this first to make sure it is
+ // properly chained from our predecessor before generating the
+ // switch machinery to enter this block.
+ llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
+ EmitBlockWithFallThrough(CaseDest, &S);
+ EmitStmt(S.getSubStmt());
+
+ // If range is empty, do nothing.
+ if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
+ return;
+
+ llvm::APInt Range = RHS - LHS;
+ // FIXME: parameters such as this should not be hardcoded.
+ if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
+ // Range is small enough to add multiple switch instruction cases.
+ uint64_t Total = getProfileCount(&S);
+ unsigned NCases = Range.getZExtValue() + 1;
+ // We only have one region counter for the entire set of cases here, so we
+ // need to divide the weights evenly between the generated cases, ensuring
+ // that the total weight is preserved. E.g., a weight of 5 over three cases
+ // will be distributed as weights of 2, 2, and 1.
+ uint64_t Weight = Total / NCases, Rem = Total % NCases;
+ for (unsigned I = 0; I != NCases; ++I) {
+ if (SwitchWeights)
+ SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
+ if (Rem)
+ Rem--;
+ SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
+ LHS++;
+ }
+ return;
+ }
+
+ // The range is too big. Emit "if" condition into a new block,
+ // making sure to save and restore the current insertion point.
+ llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
+
+ // Push this test onto the chain of range checks (which terminates
+ // in the default basic block). The switch's default will be changed
+ // to the top of this chain after switch emission is complete.
+ llvm::BasicBlock *FalseDest = CaseRangeBlock;
+ CaseRangeBlock = createBasicBlock("sw.caserange");
+
+ CurFn->getBasicBlockList().push_back(CaseRangeBlock);
+ Builder.SetInsertPoint(CaseRangeBlock);
+
+ // Emit range check.
+ llvm::Value *Diff =
+ Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
+ llvm::Value *Cond =
+ Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
+
+ llvm::MDNode *Weights = nullptr;
+ if (SwitchWeights) {
+ uint64_t ThisCount = getProfileCount(&S);
+ uint64_t DefaultCount = (*SwitchWeights)[0];
+ Weights = createProfileWeights(ThisCount, DefaultCount);
+
+ // Since we're chaining the switch default through each large case range, we
+ // need to update the weight for the default, ie, the first case, to include
+ // this case.
+ (*SwitchWeights)[0] += ThisCount;
+ }
+ Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
+
+ // Restore the appropriate insertion point.
+ if (RestoreBB)
+ Builder.SetInsertPoint(RestoreBB);
+ else
+ Builder.ClearInsertionPoint();
+}
+
+void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
+ // If there is no enclosing switch instance that we're aware of, then this
+ // case statement and its block can be elided. This situation only happens
+ // when we've constant-folded the switch, are emitting the constant case,
+ // and part of the constant case includes another case statement. For
+ // instance: switch (4) { case 4: do { case 5: } while (1); }
+ if (!SwitchInsn) {
+ EmitStmt(S.getSubStmt());
+ return;
+ }
+
+ // Handle case ranges.
+ if (S.getRHS()) {
+ EmitCaseStmtRange(S);
+ return;
+ }
+
+ llvm::ConstantInt *CaseVal =
+ Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
+
+ // If the body of the case is just a 'break', try to not emit an empty block.
+ // If we're profiling or we're not optimizing, leave the block in for better
+ // debug and coverage analysis.
+ if (!CGM.getCodeGenOpts().ProfileInstrGenerate &&
+ CGM.getCodeGenOpts().OptimizationLevel > 0 &&
+ isa<BreakStmt>(S.getSubStmt())) {
+ JumpDest Block = BreakContinueStack.back().BreakBlock;
+
+ // Only do this optimization if there are no cleanups that need emitting.
+ if (isObviouslyBranchWithoutCleanups(Block)) {
+ if (SwitchWeights)
+ SwitchWeights->push_back(getProfileCount(&S));
+ SwitchInsn->addCase(CaseVal, Block.getBlock());
+
+ // If there was a fallthrough into this case, make sure to redirect it to
+ // the end of the switch as well.
+ if (Builder.GetInsertBlock()) {
+ Builder.CreateBr(Block.getBlock());
+ Builder.ClearInsertionPoint();
+ }
+ return;
+ }
+ }
+
+ llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
+ EmitBlockWithFallThrough(CaseDest, &S);
+ if (SwitchWeights)
+ SwitchWeights->push_back(getProfileCount(&S));
+ SwitchInsn->addCase(CaseVal, CaseDest);
+
+ // Recursively emitting the statement is acceptable, but is not wonderful for
+ // code where we have many case statements nested together, i.e.:
+ // case 1:
+ // case 2:
+ // case 3: etc.
+ // Handling this recursively will create a new block for each case statement
+ // that falls through to the next case which is IR intensive. It also causes
+ // deep recursion which can run into stack depth limitations. Handle
+ // sequential non-range case statements specially.
+ const CaseStmt *CurCase = &S;
+ const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
+
+ // Otherwise, iteratively add consecutive cases to this switch stmt.
+ while (NextCase && NextCase->getRHS() == nullptr) {
+ CurCase = NextCase;
+ llvm::ConstantInt *CaseVal =
+ Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
+
+ if (SwitchWeights)
+ SwitchWeights->push_back(getProfileCount(NextCase));
+ if (CGM.getCodeGenOpts().ProfileInstrGenerate) {
+ CaseDest = createBasicBlock("sw.bb");
+ EmitBlockWithFallThrough(CaseDest, &S);
+ }
+
+ SwitchInsn->addCase(CaseVal, CaseDest);
+ NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
+ }
+
+ // Normal default recursion for non-cases.
+ EmitStmt(CurCase->getSubStmt());
+}
+
+void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) {
+ llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
+ assert(DefaultBlock->empty() &&
+ "EmitDefaultStmt: Default block already defined?");
+
+ EmitBlockWithFallThrough(DefaultBlock, &S);
+
+ EmitStmt(S.getSubStmt());
+}
+
+/// CollectStatementsForCase - Given the body of a 'switch' statement and a
+/// constant value that is being switched on, see if we can dead code eliminate
+/// the body of the switch to a simple series of statements to emit. Basically,
+/// on a switch (5) we want to find these statements:
+/// case 5:
+/// printf(...); <--
+/// ++i; <--
+/// break;
+///
+/// and add them to the ResultStmts vector. If it is unsafe to do this
+/// transformation (for example, one of the elided statements contains a label
+/// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
+/// should include statements after it (e.g. the printf() line is a substmt of
+/// the case) then return CSFC_FallThrough. If we handled it and found a break
+/// statement, then return CSFC_Success.
+///
+/// If Case is non-null, then we are looking for the specified case, checking
+/// that nothing we jump over contains labels. If Case is null, then we found
+/// the case and are looking for the break.
+///
+/// If the recursive walk actually finds our Case, then we set FoundCase to
+/// true.
+///
+enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
+static CSFC_Result CollectStatementsForCase(const Stmt *S,
+ const SwitchCase *Case,
+ bool &FoundCase,
+ SmallVectorImpl<const Stmt*> &ResultStmts) {
+ // If this is a null statement, just succeed.
+ if (!S)
+ return Case ? CSFC_Success : CSFC_FallThrough;
+
+ // If this is the switchcase (case 4: or default) that we're looking for, then
+ // we're in business. Just add the substatement.
+ if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
+ if (S == Case) {
+ FoundCase = true;
+ return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
+ ResultStmts);
+ }
+
+ // Otherwise, this is some other case or default statement, just ignore it.
+ return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
+ ResultStmts);
+ }
+
+ // If we are in the live part of the code and we found our break statement,
+ // return a success!
+ if (!Case && isa<BreakStmt>(S))
+ return CSFC_Success;
+
+ // If this is a switch statement, then it might contain the SwitchCase, the
+ // break, or neither.
+ if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
+ // Handle this as two cases: we might be looking for the SwitchCase (if so
+ // the skipped statements must be skippable) or we might already have it.
+ CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
+ if (Case) {
+ // Keep track of whether we see a skipped declaration. The code could be
+ // using the declaration even if it is skipped, so we can't optimize out
+ // the decl if the kept statements might refer to it.
+ bool HadSkippedDecl = false;
+
+ // If we're looking for the case, just see if we can skip each of the
+ // substatements.
+ for (; Case && I != E; ++I) {
+ HadSkippedDecl |= isa<DeclStmt>(*I);
+
+ switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
+ case CSFC_Failure: return CSFC_Failure;
+ case CSFC_Success:
+ // A successful result means that either 1) that the statement doesn't
+ // have the case and is skippable, or 2) does contain the case value
+ // and also contains the break to exit the switch. In the later case,
+ // we just verify the rest of the statements are elidable.
+ if (FoundCase) {
+ // If we found the case and skipped declarations, we can't do the
+ // optimization.
+ if (HadSkippedDecl)
+ return CSFC_Failure;
+
+ for (++I; I != E; ++I)
+ if (CodeGenFunction::ContainsLabel(*I, true))
+ return CSFC_Failure;
+ return CSFC_Success;
+ }
+ break;
+ case CSFC_FallThrough:
+ // If we have a fallthrough condition, then we must have found the
+ // case started to include statements. Consider the rest of the
+ // statements in the compound statement as candidates for inclusion.
+ assert(FoundCase && "Didn't find case but returned fallthrough?");
+ // We recursively found Case, so we're not looking for it anymore.
+ Case = nullptr;
+
+ // If we found the case and skipped declarations, we can't do the
+ // optimization.
+ if (HadSkippedDecl)
+ return CSFC_Failure;
+ break;
+ }
+ }
+ }
+
+ // If we have statements in our range, then we know that the statements are
+ // live and need to be added to the set of statements we're tracking.
+ for (; I != E; ++I) {
+ switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
+ case CSFC_Failure: return CSFC_Failure;
+ case CSFC_FallThrough:
+ // A fallthrough result means that the statement was simple and just
+ // included in ResultStmt, keep adding them afterwards.
+ break;
+ case CSFC_Success:
+ // A successful result means that we found the break statement and
+ // stopped statement inclusion. We just ensure that any leftover stmts
+ // are skippable and return success ourselves.
+ for (++I; I != E; ++I)
+ if (CodeGenFunction::ContainsLabel(*I, true))
+ return CSFC_Failure;
+ return CSFC_Success;
+ }
+ }
+
+ return Case ? CSFC_Success : CSFC_FallThrough;
+ }
+
+ // Okay, this is some other statement that we don't handle explicitly, like a
+ // for statement or increment etc. If we are skipping over this statement,
+ // just verify it doesn't have labels, which would make it invalid to elide.
+ if (Case) {
+ if (CodeGenFunction::ContainsLabel(S, true))
+ return CSFC_Failure;
+ return CSFC_Success;
+ }
+
+ // Otherwise, we want to include this statement. Everything is cool with that
+ // so long as it doesn't contain a break out of the switch we're in.
+ if (CodeGenFunction::containsBreak(S)) return CSFC_Failure;
+
+ // Otherwise, everything is great. Include the statement and tell the caller
+ // that we fall through and include the next statement as well.
+ ResultStmts.push_back(S);
+ return CSFC_FallThrough;
+}
+
+/// FindCaseStatementsForValue - Find the case statement being jumped to and
+/// then invoke CollectStatementsForCase to find the list of statements to emit
+/// for a switch on constant. See the comment above CollectStatementsForCase
+/// for more details.
+static bool FindCaseStatementsForValue(const SwitchStmt &S,
+ const llvm::APSInt &ConstantCondValue,
+ SmallVectorImpl<const Stmt*> &ResultStmts,
+ ASTContext &C,
+ const SwitchCase *&ResultCase) {
+ // First step, find the switch case that is being branched to. We can do this
+ // efficiently by scanning the SwitchCase list.
+ const SwitchCase *Case = S.getSwitchCaseList();
+ const DefaultStmt *DefaultCase = nullptr;
+
+ for (; Case; Case = Case->getNextSwitchCase()) {
+ // It's either a default or case. Just remember the default statement in
+ // case we're not jumping to any numbered cases.
+ if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
+ DefaultCase = DS;
+ continue;
+ }
+
+ // Check to see if this case is the one we're looking for.
+ const CaseStmt *CS = cast<CaseStmt>(Case);
+ // Don't handle case ranges yet.
+ if (CS->getRHS()) return false;
+
+ // If we found our case, remember it as 'case'.
+ if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
+ break;
+ }
+
+ // If we didn't find a matching case, we use a default if it exists, or we
+ // elide the whole switch body!
+ if (!Case) {
+ // It is safe to elide the body of the switch if it doesn't contain labels
+ // etc. If it is safe, return successfully with an empty ResultStmts list.
+ if (!DefaultCase)
+ return !CodeGenFunction::ContainsLabel(&S);
+ Case = DefaultCase;
+ }
+
+ // Ok, we know which case is being jumped to, try to collect all the
+ // statements that follow it. This can fail for a variety of reasons. Also,
+ // check to see that the recursive walk actually found our case statement.
+ // Insane cases like this can fail to find it in the recursive walk since we
+ // don't handle every stmt kind:
+ // switch (4) {
+ // while (1) {
+ // case 4: ...
+ bool FoundCase = false;
+ ResultCase = Case;
+ return CollectStatementsForCase(S.getBody(), Case, FoundCase,
+ ResultStmts) != CSFC_Failure &&
+ FoundCase;
+}
+
+void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
+ // Handle nested switch statements.
+ llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
+ SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
+ llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
+
+ // See if we can constant fold the condition of the switch and therefore only
+ // emit the live case statement (if any) of the switch.
+ llvm::APSInt ConstantCondValue;
+ if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
+ SmallVector<const Stmt*, 4> CaseStmts;
+ const SwitchCase *Case = nullptr;
+ if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
+ getContext(), Case)) {
+ if (Case)
+ incrementProfileCounter(Case);
+ RunCleanupsScope ExecutedScope(*this);
+
+ // Emit the condition variable if needed inside the entire cleanup scope
+ // used by this special case for constant folded switches.
+ if (S.getConditionVariable())
+ EmitAutoVarDecl(*S.getConditionVariable());
+
+ // At this point, we are no longer "within" a switch instance, so
+ // we can temporarily enforce this to ensure that any embedded case
+ // statements are not emitted.
+ SwitchInsn = nullptr;
+
+ // Okay, we can dead code eliminate everything except this case. Emit the
+ // specified series of statements and we're good.
+ for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
+ EmitStmt(CaseStmts[i]);
+ incrementProfileCounter(&S);
+
+ // Now we want to restore the saved switch instance so that nested
+ // switches continue to function properly
+ SwitchInsn = SavedSwitchInsn;
+
+ return;
+ }
+ }
+
+ JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
+
+ RunCleanupsScope ConditionScope(*this);
+ if (S.getConditionVariable())
+ EmitAutoVarDecl(*S.getConditionVariable());
+ llvm::Value *CondV = EmitScalarExpr(S.getCond());
+
+ // Create basic block to hold stuff that comes after switch
+ // statement. We also need to create a default block now so that
+ // explicit case ranges tests can have a place to jump to on
+ // failure.
+ llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
+ SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
+ if (PGO.haveRegionCounts()) {
+ // Walk the SwitchCase list to find how many there are.
+ uint64_t DefaultCount = 0;
+ unsigned NumCases = 0;
+ for (const SwitchCase *Case = S.getSwitchCaseList();
+ Case;
+ Case = Case->getNextSwitchCase()) {
+ if (isa<DefaultStmt>(Case))
+ DefaultCount = getProfileCount(Case);
+ NumCases += 1;
+ }
+ SwitchWeights = new SmallVector<uint64_t, 16>();
+ SwitchWeights->reserve(NumCases);
+ // The default needs to be first. We store the edge count, so we already
+ // know the right weight.
+ SwitchWeights->push_back(DefaultCount);
+ }
+ CaseRangeBlock = DefaultBlock;
+
+ // Clear the insertion point to indicate we are in unreachable code.
+ Builder.ClearInsertionPoint();
+
+ // All break statements jump to NextBlock. If BreakContinueStack is non-empty
+ // then reuse last ContinueBlock.
+ JumpDest OuterContinue;
+ if (!BreakContinueStack.empty())
+ OuterContinue = BreakContinueStack.back().ContinueBlock;
+
+ BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
+
+ // Emit switch body.
+ EmitStmt(S.getBody());
+
+ BreakContinueStack.pop_back();
+
+ // Update the default block in case explicit case range tests have
+ // been chained on top.
+ SwitchInsn->setDefaultDest(CaseRangeBlock);
+
+ // If a default was never emitted:
+ if (!DefaultBlock->getParent()) {
+ // If we have cleanups, emit the default block so that there's a
+ // place to jump through the cleanups from.
+ if (ConditionScope.requiresCleanups()) {
+ EmitBlock(DefaultBlock);
+
+ // Otherwise, just forward the default block to the switch end.
+ } else {
+ DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
+ delete DefaultBlock;
+ }
+ }
+
+ ConditionScope.ForceCleanup();
+
+ // Emit continuation.
+ EmitBlock(SwitchExit.getBlock(), true);
+ incrementProfileCounter(&S);
+
+ // If the switch has a condition wrapped by __builtin_unpredictable,
+ // create metadata that specifies that the switch is unpredictable.
+ // Don't bother if not optimizing because that metadata would not be used.
+ if (CGM.getCodeGenOpts().OptimizationLevel != 0) {
+ if (const CallExpr *Call = dyn_cast<CallExpr>(S.getCond())) {
+ const Decl *TargetDecl = Call->getCalleeDecl();
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
+ if (FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
+ llvm::MDBuilder MDHelper(getLLVMContext());
+ SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
+ MDHelper.createUnpredictable());
+ }
+ }
+ }
+ }
+
+ if (SwitchWeights) {
+ assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
+ "switch weights do not match switch cases");
+ // If there's only one jump destination there's no sense weighting it.
+ if (SwitchWeights->size() > 1)
+ SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
+ createProfileWeights(*SwitchWeights));
+ delete SwitchWeights;
+ }
+ SwitchInsn = SavedSwitchInsn;
+ SwitchWeights = SavedSwitchWeights;
+ CaseRangeBlock = SavedCRBlock;
+}
+
+static std::string
+SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
+ SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) {
+ std::string Result;
+
+ while (*Constraint) {
+ switch (*Constraint) {
+ default:
+ Result += Target.convertConstraint(Constraint);
+ break;
+ // Ignore these
+ case '*':
+ case '?':
+ case '!':
+ case '=': // Will see this and the following in mult-alt constraints.
+ case '+':
+ break;
+ case '#': // Ignore the rest of the constraint alternative.
+ while (Constraint[1] && Constraint[1] != ',')
+ Constraint++;
+ break;
+ case '&':
+ case '%':
+ Result += *Constraint;
+ while (Constraint[1] && Constraint[1] == *Constraint)
+ Constraint++;
+ break;
+ case ',':
+ Result += "|";
+ break;
+ case 'g':
+ Result += "imr";
+ break;
+ case '[': {
+ assert(OutCons &&
+ "Must pass output names to constraints with a symbolic name");
+ unsigned Index;
+ bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
+ assert(result && "Could not resolve symbolic name"); (void)result;
+ Result += llvm::utostr(Index);
+ break;
+ }
+ }
+
+ Constraint++;
+ }
+
+ return Result;
+}
+
+/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
+/// as using a particular register add that as a constraint that will be used
+/// in this asm stmt.
+static std::string
+AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
+ const TargetInfo &Target, CodeGenModule &CGM,
+ const AsmStmt &Stmt, const bool EarlyClobber) {
+ const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
+ if (!AsmDeclRef)
+ return Constraint;
+ const ValueDecl &Value = *AsmDeclRef->getDecl();
+ const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
+ if (!Variable)
+ return Constraint;
+ if (Variable->getStorageClass() != SC_Register)
+ return Constraint;
+ AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
+ if (!Attr)
+ return Constraint;
+ StringRef Register = Attr->getLabel();
+ assert(Target.isValidGCCRegisterName(Register));
+ // We're using validateOutputConstraint here because we only care if
+ // this is a register constraint.
+ TargetInfo::ConstraintInfo Info(Constraint, "");
+ if (Target.validateOutputConstraint(Info) &&
+ !Info.allowsRegister()) {
+ CGM.ErrorUnsupported(&Stmt, "__asm__");
+ return Constraint;
+ }
+ // Canonicalize the register here before returning it.
+ Register = Target.getNormalizedGCCRegisterName(Register);
+ return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
+}
+
+llvm::Value*
+CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
+ LValue InputValue, QualType InputType,
+ std::string &ConstraintStr,
+ SourceLocation Loc) {
+ llvm::Value *Arg;
+ if (Info.allowsRegister() || !Info.allowsMemory()) {
+ if (CodeGenFunction::hasScalarEvaluationKind(InputType)) {
+ Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal();
+ } else {
+ llvm::Type *Ty = ConvertType(InputType);
+ uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
+ if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
+ Ty = llvm::IntegerType::get(getLLVMContext(), Size);
+ Ty = llvm::PointerType::getUnqual(Ty);
+
+ Arg = Builder.CreateLoad(Builder.CreateBitCast(InputValue.getAddress(),
+ Ty));
+ } else {
+ Arg = InputValue.getPointer();
+ ConstraintStr += '*';
+ }
+ }
+ } else {
+ Arg = InputValue.getPointer();
+ ConstraintStr += '*';
+ }
+
+ return Arg;
+}
+
+llvm::Value* CodeGenFunction::EmitAsmInput(
+ const TargetInfo::ConstraintInfo &Info,
+ const Expr *InputExpr,
+ std::string &ConstraintStr) {
+ // If this can't be a register or memory, i.e., has to be a constant
+ // (immediate or symbolic), try to emit it as such.
+ if (!Info.allowsRegister() && !Info.allowsMemory()) {
+ llvm::APSInt Result;
+ if (InputExpr->EvaluateAsInt(Result, getContext()))
+ return llvm::ConstantInt::get(getLLVMContext(), Result);
+ assert(!Info.requiresImmediateConstant() &&
+ "Required-immediate inlineasm arg isn't constant?");
+ }
+
+ if (Info.allowsRegister() || !Info.allowsMemory())
+ if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType()))
+ return EmitScalarExpr(InputExpr);
+ if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
+ return EmitScalarExpr(InputExpr);
+ InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
+ LValue Dest = EmitLValue(InputExpr);
+ return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
+ InputExpr->getExprLoc());
+}
+
+/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
+/// asm call instruction. The !srcloc MDNode contains a list of constant
+/// integers which are the source locations of the start of each line in the
+/// asm.
+static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
+ CodeGenFunction &CGF) {
+ SmallVector<llvm::Metadata *, 8> Locs;
+ // Add the location of the first line to the MDNode.
+ Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
+ CGF.Int32Ty, Str->getLocStart().getRawEncoding())));
+ StringRef StrVal = Str->getString();
+ if (!StrVal.empty()) {
+ const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
+ const LangOptions &LangOpts = CGF.CGM.getLangOpts();
+ unsigned StartToken = 0;
+ unsigned ByteOffset = 0;
+
+ // Add the location of the start of each subsequent line of the asm to the
+ // MDNode.
+ for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
+ if (StrVal[i] != '\n') continue;
+ SourceLocation LineLoc = Str->getLocationOfByte(
+ i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
+ Locs.push_back(llvm::ConstantAsMetadata::get(
+ llvm::ConstantInt::get(CGF.Int32Ty, LineLoc.getRawEncoding())));
+ }
+ }
+
+ return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
+}
+
+void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
+ // Assemble the final asm string.
+ std::string AsmString = S.generateAsmString(getContext());
+
+ // Get all the output and input constraints together.
+ SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
+ SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
+
+ for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
+ StringRef Name;
+ if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
+ Name = GAS->getOutputName(i);
+ TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
+ bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
+ assert(IsValid && "Failed to parse output constraint");
+ OutputConstraintInfos.push_back(Info);
+ }
+
+ for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
+ StringRef Name;
+ if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
+ Name = GAS->getInputName(i);
+ TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
+ bool IsValid =
+ getTarget().validateInputConstraint(OutputConstraintInfos, Info);
+ assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
+ InputConstraintInfos.push_back(Info);
+ }
+
+ std::string Constraints;
+
+ std::vector<LValue> ResultRegDests;
+ std::vector<QualType> ResultRegQualTys;
+ std::vector<llvm::Type *> ResultRegTypes;
+ std::vector<llvm::Type *> ResultTruncRegTypes;
+ std::vector<llvm::Type *> ArgTypes;
+ std::vector<llvm::Value*> Args;
+
+ // Keep track of inout constraints.
+ std::string InOutConstraints;
+ std::vector<llvm::Value*> InOutArgs;
+ std::vector<llvm::Type*> InOutArgTypes;
+
+ // An inline asm can be marked readonly if it meets the following conditions:
+ // - it doesn't have any sideeffects
+ // - it doesn't clobber memory
+ // - it doesn't return a value by-reference
+ // It can be marked readnone if it doesn't have any input memory constraints
+ // in addition to meeting the conditions listed above.
+ bool ReadOnly = true, ReadNone = true;
+
+ for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
+ TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
+
+ // Simplify the output constraint.
+ std::string OutputConstraint(S.getOutputConstraint(i));
+ OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
+ getTarget());
+
+ const Expr *OutExpr = S.getOutputExpr(i);
+ OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
+
+ OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
+ getTarget(), CGM, S,
+ Info.earlyClobber());
+
+ LValue Dest = EmitLValue(OutExpr);
+ if (!Constraints.empty())
+ Constraints += ',';
+
+ // If this is a register output, then make the inline asm return it
+ // by-value. If this is a memory result, return the value by-reference.
+ if (!Info.allowsMemory() && hasScalarEvaluationKind(OutExpr->getType())) {
+ Constraints += "=" + OutputConstraint;
+ ResultRegQualTys.push_back(OutExpr->getType());
+ ResultRegDests.push_back(Dest);
+ ResultRegTypes.push_back(ConvertTypeForMem(OutExpr->getType()));
+ ResultTruncRegTypes.push_back(ResultRegTypes.back());
+
+ // If this output is tied to an input, and if the input is larger, then
+ // we need to set the actual result type of the inline asm node to be the
+ // same as the input type.
+ if (Info.hasMatchingInput()) {
+ unsigned InputNo;
+ for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
+ TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
+ if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
+ break;
+ }
+ assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
+
+ QualType InputTy = S.getInputExpr(InputNo)->getType();
+ QualType OutputType = OutExpr->getType();
+
+ uint64_t InputSize = getContext().getTypeSize(InputTy);
+ if (getContext().getTypeSize(OutputType) < InputSize) {
+ // Form the asm to return the value as a larger integer or fp type.
+ ResultRegTypes.back() = ConvertType(InputTy);
+ }
+ }
+ if (llvm::Type* AdjTy =
+ getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
+ ResultRegTypes.back()))
+ ResultRegTypes.back() = AdjTy;
+ else {
+ CGM.getDiags().Report(S.getAsmLoc(),
+ diag::err_asm_invalid_type_in_input)
+ << OutExpr->getType() << OutputConstraint;
+ }
+ } else {
+ ArgTypes.push_back(Dest.getAddress().getType());
+ Args.push_back(Dest.getPointer());
+ Constraints += "=*";
+ Constraints += OutputConstraint;
+ ReadOnly = ReadNone = false;
+ }
+
+ if (Info.isReadWrite()) {
+ InOutConstraints += ',';
+
+ const Expr *InputExpr = S.getOutputExpr(i);
+ llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(),
+ InOutConstraints,
+ InputExpr->getExprLoc());
+
+ if (llvm::Type* AdjTy =
+ getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
+ Arg->getType()))
+ Arg = Builder.CreateBitCast(Arg, AdjTy);
+
+ if (Info.allowsRegister())
+ InOutConstraints += llvm::utostr(i);
+ else
+ InOutConstraints += OutputConstraint;
+
+ InOutArgTypes.push_back(Arg->getType());
+ InOutArgs.push_back(Arg);
+ }
+ }
+
+ // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
+ // to the return value slot. Only do this when returning in registers.
+ if (isa<MSAsmStmt>(&S)) {
+ const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
+ if (RetAI.isDirect() || RetAI.isExtend()) {
+ // Make a fake lvalue for the return value slot.
+ LValue ReturnSlot = MakeAddrLValue(ReturnValue, FnRetTy);
+ CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
+ *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
+ ResultRegDests, AsmString, S.getNumOutputs());
+ SawAsmBlock = true;
+ }
+ }
+
+ for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
+ const Expr *InputExpr = S.getInputExpr(i);
+
+ TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
+
+ if (Info.allowsMemory())
+ ReadNone = false;
+
+ if (!Constraints.empty())
+ Constraints += ',';
+
+ // Simplify the input constraint.
+ std::string InputConstraint(S.getInputConstraint(i));
+ InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
+ &OutputConstraintInfos);
+
+ InputConstraint = AddVariableConstraints(
+ InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
+ getTarget(), CGM, S, false /* No EarlyClobber */);
+
+ llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints);
+
+ // If this input argument is tied to a larger output result, extend the
+ // input to be the same size as the output. The LLVM backend wants to see
+ // the input and output of a matching constraint be the same size. Note
+ // that GCC does not define what the top bits are here. We use zext because
+ // that is usually cheaper, but LLVM IR should really get an anyext someday.
+ if (Info.hasTiedOperand()) {
+ unsigned Output = Info.getTiedOperand();
+ QualType OutputType = S.getOutputExpr(Output)->getType();
+ QualType InputTy = InputExpr->getType();
+
+ if (getContext().getTypeSize(OutputType) >
+ getContext().getTypeSize(InputTy)) {
+ // Use ptrtoint as appropriate so that we can do our extension.
+ if (isa<llvm::PointerType>(Arg->getType()))
+ Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
+ llvm::Type *OutputTy = ConvertType(OutputType);
+ if (isa<llvm::IntegerType>(OutputTy))
+ Arg = Builder.CreateZExt(Arg, OutputTy);
+ else if (isa<llvm::PointerType>(OutputTy))
+ Arg = Builder.CreateZExt(Arg, IntPtrTy);
+ else {
+ assert(OutputTy->isFloatingPointTy() && "Unexpected output type");
+ Arg = Builder.CreateFPExt(Arg, OutputTy);
+ }
+ }
+ }
+ if (llvm::Type* AdjTy =
+ getTargetHooks().adjustInlineAsmType(*this, InputConstraint,
+ Arg->getType()))
+ Arg = Builder.CreateBitCast(Arg, AdjTy);
+ else
+ CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
+ << InputExpr->getType() << InputConstraint;
+
+ ArgTypes.push_back(Arg->getType());
+ Args.push_back(Arg);
+ Constraints += InputConstraint;
+ }
+
+ // Append the "input" part of inout constraints last.
+ for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
+ ArgTypes.push_back(InOutArgTypes[i]);
+ Args.push_back(InOutArgs[i]);
+ }
+ Constraints += InOutConstraints;
+
+ // Clobbers
+ for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
+ StringRef Clobber = S.getClobber(i);
+
+ if (Clobber == "memory")
+ ReadOnly = ReadNone = false;
+ else if (Clobber != "cc")
+ Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
+
+ if (!Constraints.empty())
+ Constraints += ',';
+
+ Constraints += "~{";
+ Constraints += Clobber;
+ Constraints += '}';
+ }
+
+ // Add machine specific clobbers
+ std::string MachineClobbers = getTarget().getClobbers();
+ if (!MachineClobbers.empty()) {
+ if (!Constraints.empty())
+ Constraints += ',';
+ Constraints += MachineClobbers;
+ }
+
+ llvm::Type *ResultType;
+ if (ResultRegTypes.empty())
+ ResultType = VoidTy;
+ else if (ResultRegTypes.size() == 1)
+ ResultType = ResultRegTypes[0];
+ else
+ ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
+
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ResultType, ArgTypes, false);
+
+ bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
+ llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
+ llvm::InlineAsm::AD_Intel : llvm::InlineAsm::AD_ATT;
+ llvm::InlineAsm *IA =
+ llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect,
+ /* IsAlignStack */ false, AsmDialect);
+ llvm::CallInst *Result = Builder.CreateCall(IA, Args);
+ Result->addAttribute(llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::NoUnwind);
+
+ if (isa<MSAsmStmt>(&S)) {
+ // If the assembly contains any labels, mark the call noduplicate to prevent
+ // defining the same ASM label twice (PR23715). This is pretty hacky, but it
+ // works.
+ if (AsmString.find("__MSASMLABEL_") != std::string::npos)
+ Result->addAttribute(llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::NoDuplicate);
+ }
+
+ // Attach readnone and readonly attributes.
+ if (!HasSideEffect) {
+ if (ReadNone)
+ Result->addAttribute(llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::ReadNone);
+ else if (ReadOnly)
+ Result->addAttribute(llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::ReadOnly);
+ }
+
+ // Slap the source location of the inline asm into a !srcloc metadata on the
+ // call.
+ if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S)) {
+ Result->setMetadata("srcloc", getAsmSrcLocInfo(gccAsmStmt->getAsmString(),
+ *this));
+ } else {
+ // At least put the line number on MS inline asm blobs.
+ auto Loc = llvm::ConstantInt::get(Int32Ty, S.getAsmLoc().getRawEncoding());
+ Result->setMetadata("srcloc",
+ llvm::MDNode::get(getLLVMContext(),
+ llvm::ConstantAsMetadata::get(Loc)));
+ }
+
+ // Extract all of the register value results from the asm.
+ std::vector<llvm::Value*> RegResults;
+ if (ResultRegTypes.size() == 1) {
+ RegResults.push_back(Result);
+ } else {
+ for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
+ llvm::Value *Tmp = Builder.CreateExtractValue(Result, i, "asmresult");
+ RegResults.push_back(Tmp);
+ }
+ }
+
+ assert(RegResults.size() == ResultRegTypes.size());
+ assert(RegResults.size() == ResultTruncRegTypes.size());
+ assert(RegResults.size() == ResultRegDests.size());
+ for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
+ llvm::Value *Tmp = RegResults[i];
+
+ // If the result type of the LLVM IR asm doesn't match the result type of
+ // the expression, do the conversion.
+ if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
+ llvm::Type *TruncTy = ResultTruncRegTypes[i];
+
+ // Truncate the integer result to the right size, note that TruncTy can be
+ // a pointer.
+ if (TruncTy->isFloatingPointTy())
+ Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
+ else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
+ uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
+ Tmp = Builder.CreateTrunc(Tmp,
+ llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
+ Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
+ } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
+ uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
+ Tmp = Builder.CreatePtrToInt(Tmp,
+ llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
+ Tmp = Builder.CreateTrunc(Tmp, TruncTy);
+ } else if (TruncTy->isIntegerTy()) {
+ Tmp = Builder.CreateTrunc(Tmp, TruncTy);
+ } else if (TruncTy->isVectorTy()) {
+ Tmp = Builder.CreateBitCast(Tmp, TruncTy);
+ }
+ }
+
+ EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i]);
+ }
+}
+
+LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
+ const RecordDecl *RD = S.getCapturedRecordDecl();
+ QualType RecordTy = getContext().getRecordType(RD);
+
+ // Initialize the captured struct.
+ LValue SlotLV =
+ MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
+
+ RecordDecl::field_iterator CurField = RD->field_begin();
+ for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
+ E = S.capture_init_end();
+ I != E; ++I, ++CurField) {
+ LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
+ if (CurField->hasCapturedVLAType()) {
+ auto VAT = CurField->getCapturedVLAType();
+ EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
+ } else {
+ EmitInitializerForField(*CurField, LV, *I, None);
+ }
+ }
+
+ return SlotLV;
+}
+
+/// Generate an outlined function for the body of a CapturedStmt, store any
+/// captured variables into the captured struct, and call the outlined function.
+llvm::Function *
+CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
+ LValue CapStruct = InitCapturedStruct(S);
+
+ // Emit the CapturedDecl
+ CodeGenFunction CGF(CGM, true);
+ CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
+ llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
+ delete CGF.CapturedStmtInfo;
+
+ // Emit call to the helper function.
+ EmitCallOrInvoke(F, CapStruct.getPointer());
+
+ return F;
+}
+
+Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
+ LValue CapStruct = InitCapturedStruct(S);
+ return CapStruct.getAddress();
+}
+
+/// Creates the outlined function for a CapturedStmt.
+llvm::Function *
+CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
+ assert(CapturedStmtInfo &&
+ "CapturedStmtInfo should be set when generating the captured function");
+ const CapturedDecl *CD = S.getCapturedDecl();
+ const RecordDecl *RD = S.getCapturedRecordDecl();
+ SourceLocation Loc = S.getLocStart();
+ assert(CD->hasBody() && "missing CapturedDecl body");
+
+ // Build the argument list.
+ ASTContext &Ctx = CGM.getContext();
+ FunctionArgList Args;
+ Args.append(CD->param_begin(), CD->param_end());
+
+ // Create the function declaration.
+ FunctionType::ExtInfo ExtInfo;
+ const CGFunctionInfo &FuncInfo =
+ CGM.getTypes().arrangeFreeFunctionDeclaration(Ctx.VoidTy, Args, ExtInfo,
+ /*IsVariadic=*/false);
+ llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
+
+ llvm::Function *F =
+ llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
+ CapturedStmtInfo->getHelperName(), &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
+ if (CD->isNothrow())
+ F->addFnAttr(llvm::Attribute::NoUnwind);
+
+ // Generate the function.
+ StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args,
+ CD->getLocation(),
+ CD->getBody()->getLocStart());
+ // Set the context parameter in CapturedStmtInfo.
+ Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
+ CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
+
+ // Initialize variable-length arrays.
+ LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(),
+ Ctx.getTagDeclType(RD));
+ for (auto *FD : RD->fields()) {
+ if (FD->hasCapturedVLAType()) {
+ auto *ExprArg = EmitLoadOfLValue(EmitLValueForField(Base, FD),
+ S.getLocStart()).getScalarVal();
+ auto VAT = FD->getCapturedVLAType();
+ VLASizeMap[VAT->getSizeExpr()] = ExprArg;
+ }
+ }
+
+ // If 'this' is captured, load it into CXXThisValue.
+ if (CapturedStmtInfo->isCXXThisExprCaptured()) {
+ FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
+ LValue ThisLValue = EmitLValueForField(Base, FD);
+ CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
+ }
+
+ PGO.assignRegionCounters(GlobalDecl(CD), F);
+ CapturedStmtInfo->EmitBody(*this, CD->getBody());
+ FinishFunction(CD->getBodyRBrace());
+
+ return F;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGStmtOpenMP.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGStmtOpenMP.cpp
new file mode 100644
index 0000000..14917c2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGStmtOpenMP.cpp
@@ -0,0 +1,2679 @@
+//===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit OpenMP nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGOpenMPRuntime.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "TargetInfo.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/StmtOpenMP.h"
+using namespace clang;
+using namespace CodeGen;
+
+void CodeGenFunction::GenerateOpenMPCapturedVars(
+ const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) {
+ const RecordDecl *RD = S.getCapturedRecordDecl();
+ auto CurField = RD->field_begin();
+ auto CurCap = S.captures().begin();
+ for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
+ E = S.capture_init_end();
+ I != E; ++I, ++CurField, ++CurCap) {
+ if (CurField->hasCapturedVLAType()) {
+ auto VAT = CurField->getCapturedVLAType();
+ auto *Val = VLASizeMap[VAT->getSizeExpr()];
+ CapturedVars.push_back(Val);
+ } else if (CurCap->capturesThis())
+ CapturedVars.push_back(CXXThisValue);
+ else if (CurCap->capturesVariableByCopy())
+ CapturedVars.push_back(
+ EmitLoadOfLValue(EmitLValue(*I), SourceLocation()).getScalarVal());
+ else {
+ assert(CurCap->capturesVariable() && "Expected capture by reference.");
+ CapturedVars.push_back(EmitLValue(*I).getAddress().getPointer());
+ }
+ }
+}
+
+static Address castValueFromUintptr(CodeGenFunction &CGF, QualType DstType,
+ StringRef Name, LValue AddrLV,
+ bool isReferenceType = false) {
+ ASTContext &Ctx = CGF.getContext();
+
+ auto *CastedPtr = CGF.EmitScalarConversion(
+ AddrLV.getAddress().getPointer(), Ctx.getUIntPtrType(),
+ Ctx.getPointerType(DstType), SourceLocation());
+ auto TmpAddr =
+ CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType))
+ .getAddress();
+
+ // If we are dealing with references we need to return the address of the
+ // reference instead of the reference of the value.
+ if (isReferenceType) {
+ QualType RefType = Ctx.getLValueReferenceType(DstType);
+ auto *RefVal = TmpAddr.getPointer();
+ TmpAddr = CGF.CreateMemTemp(RefType, Twine(Name) + ".ref");
+ auto TmpLVal = CGF.MakeAddrLValue(TmpAddr, RefType);
+ CGF.EmitScalarInit(RefVal, TmpLVal);
+ }
+
+ return TmpAddr;
+}
+
+llvm::Function *
+CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) {
+ assert(
+ CapturedStmtInfo &&
+ "CapturedStmtInfo should be set when generating the captured function");
+ const CapturedDecl *CD = S.getCapturedDecl();
+ const RecordDecl *RD = S.getCapturedRecordDecl();
+ assert(CD->hasBody() && "missing CapturedDecl body");
+
+ // Build the argument list.
+ ASTContext &Ctx = CGM.getContext();
+ FunctionArgList Args;
+ Args.append(CD->param_begin(),
+ std::next(CD->param_begin(), CD->getContextParamPosition()));
+ auto I = S.captures().begin();
+ for (auto *FD : RD->fields()) {
+ QualType ArgType = FD->getType();
+ IdentifierInfo *II = nullptr;
+ VarDecl *CapVar = nullptr;
+
+ // If this is a capture by copy and the type is not a pointer, the outlined
+ // function argument type should be uintptr and the value properly casted to
+ // uintptr. This is necessary given that the runtime library is only able to
+ // deal with pointers. We can pass in the same way the VLA type sizes to the
+ // outlined function.
+ if ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) ||
+ I->capturesVariableArrayType())
+ ArgType = Ctx.getUIntPtrType();
+
+ if (I->capturesVariable() || I->capturesVariableByCopy()) {
+ CapVar = I->getCapturedVar();
+ II = CapVar->getIdentifier();
+ } else if (I->capturesThis())
+ II = &getContext().Idents.get("this");
+ else {
+ assert(I->capturesVariableArrayType());
+ II = &getContext().Idents.get("vla");
+ }
+ if (ArgType->isVariablyModifiedType())
+ ArgType = getContext().getVariableArrayDecayedType(ArgType);
+ Args.push_back(ImplicitParamDecl::Create(getContext(), nullptr,
+ FD->getLocation(), II, ArgType));
+ ++I;
+ }
+ Args.append(
+ std::next(CD->param_begin(), CD->getContextParamPosition() + 1),
+ CD->param_end());
+
+ // Create the function declaration.
+ FunctionType::ExtInfo ExtInfo;
+ const CGFunctionInfo &FuncInfo =
+ CGM.getTypes().arrangeFreeFunctionDeclaration(Ctx.VoidTy, Args, ExtInfo,
+ /*IsVariadic=*/false);
+ llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
+
+ llvm::Function *F = llvm::Function::Create(
+ FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
+ CapturedStmtInfo->getHelperName(), &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
+ if (CD->isNothrow())
+ F->addFnAttr(llvm::Attribute::NoUnwind);
+
+ // Generate the function.
+ StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
+ CD->getBody()->getLocStart());
+ unsigned Cnt = CD->getContextParamPosition();
+ I = S.captures().begin();
+ for (auto *FD : RD->fields()) {
+ // If we are capturing a pointer by copy we don't need to do anything, just
+ // use the value that we get from the arguments.
+ if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) {
+ setAddrOfLocalVar(I->getCapturedVar(), GetAddrOfLocalVar(Args[Cnt]));
+ ++Cnt, ++I;
+ continue;
+ }
+
+ LValue ArgLVal =
+ MakeAddrLValue(GetAddrOfLocalVar(Args[Cnt]), Args[Cnt]->getType(),
+ AlignmentSource::Decl);
+ if (FD->hasCapturedVLAType()) {
+ LValue CastedArgLVal =
+ MakeAddrLValue(castValueFromUintptr(*this, FD->getType(),
+ Args[Cnt]->getName(), ArgLVal),
+ FD->getType(), AlignmentSource::Decl);
+ auto *ExprArg =
+ EmitLoadOfLValue(CastedArgLVal, SourceLocation()).getScalarVal();
+ auto VAT = FD->getCapturedVLAType();
+ VLASizeMap[VAT->getSizeExpr()] = ExprArg;
+ } else if (I->capturesVariable()) {
+ auto *Var = I->getCapturedVar();
+ QualType VarTy = Var->getType();
+ Address ArgAddr = ArgLVal.getAddress();
+ if (!VarTy->isReferenceType()) {
+ ArgAddr = EmitLoadOfReference(
+ ArgAddr, ArgLVal.getType()->castAs<ReferenceType>());
+ }
+ setAddrOfLocalVar(
+ Var, Address(ArgAddr.getPointer(), getContext().getDeclAlign(Var)));
+ } else if (I->capturesVariableByCopy()) {
+ assert(!FD->getType()->isAnyPointerType() &&
+ "Not expecting a captured pointer.");
+ auto *Var = I->getCapturedVar();
+ QualType VarTy = Var->getType();
+ setAddrOfLocalVar(I->getCapturedVar(),
+ castValueFromUintptr(*this, FD->getType(),
+ Args[Cnt]->getName(), ArgLVal,
+ VarTy->isReferenceType()));
+ } else {
+ // If 'this' is captured, load it into CXXThisValue.
+ assert(I->capturesThis());
+ CXXThisValue =
+ EmitLoadOfLValue(ArgLVal, Args[Cnt]->getLocation()).getScalarVal();
+ }
+ ++Cnt, ++I;
+ }
+
+ PGO.assignRegionCounters(GlobalDecl(CD), F);
+ CapturedStmtInfo->EmitBody(*this, CD->getBody());
+ FinishFunction(CD->getBodyRBrace());
+
+ return F;
+}
+
+//===----------------------------------------------------------------------===//
+// OpenMP Directive Emission
+//===----------------------------------------------------------------------===//
+void CodeGenFunction::EmitOMPAggregateAssign(
+ Address DestAddr, Address SrcAddr, QualType OriginalType,
+ const llvm::function_ref<void(Address, Address)> &CopyGen) {
+ // Perform element-by-element initialization.
+ QualType ElementTy;
+
+ // Drill down to the base element type on both arrays.
+ auto ArrayTy = OriginalType->getAsArrayTypeUnsafe();
+ auto NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr);
+ SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
+
+ auto SrcBegin = SrcAddr.getPointer();
+ auto DestBegin = DestAddr.getPointer();
+ // Cast from pointer to array type to pointer to single element.
+ auto DestEnd = Builder.CreateGEP(DestBegin, NumElements);
+ // The basic structure here is a while-do loop.
+ auto BodyBB = createBasicBlock("omp.arraycpy.body");
+ auto DoneBB = createBasicBlock("omp.arraycpy.done");
+ auto IsEmpty =
+ Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty");
+ Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
+
+ // Enter the loop body, making that address the current address.
+ auto EntryBB = Builder.GetInsertBlock();
+ EmitBlock(BodyBB);
+
+ CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy);
+
+ llvm::PHINode *SrcElementPHI =
+ Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast");
+ SrcElementPHI->addIncoming(SrcBegin, EntryBB);
+ Address SrcElementCurrent =
+ Address(SrcElementPHI,
+ SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
+
+ llvm::PHINode *DestElementPHI =
+ Builder.CreatePHI(DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
+ DestElementPHI->addIncoming(DestBegin, EntryBB);
+ Address DestElementCurrent =
+ Address(DestElementPHI,
+ DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
+
+ // Emit copy.
+ CopyGen(DestElementCurrent, SrcElementCurrent);
+
+ // Shift the address forward by one element.
+ auto DestElementNext = Builder.CreateConstGEP1_32(
+ DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
+ auto SrcElementNext = Builder.CreateConstGEP1_32(
+ SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element");
+ // Check whether we've reached the end.
+ auto Done =
+ Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
+ Builder.CreateCondBr(Done, DoneBB, BodyBB);
+ DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock());
+ SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock());
+
+ // Done.
+ EmitBlock(DoneBB, /*IsFinished=*/true);
+}
+
+/// \brief Emit initialization of arrays of complex types.
+/// \param DestAddr Address of the array.
+/// \param Type Type of array.
+/// \param Init Initial expression of array.
+static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
+ QualType Type, const Expr *Init) {
+ // Perform element-by-element initialization.
+ QualType ElementTy;
+
+ // Drill down to the base element type on both arrays.
+ auto ArrayTy = Type->getAsArrayTypeUnsafe();
+ auto NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
+ DestAddr =
+ CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType());
+
+ auto DestBegin = DestAddr.getPointer();
+ // Cast from pointer to array type to pointer to single element.
+ auto DestEnd = CGF.Builder.CreateGEP(DestBegin, NumElements);
+ // The basic structure here is a while-do loop.
+ auto BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
+ auto DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
+ auto IsEmpty =
+ CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty");
+ CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
+
+ // Enter the loop body, making that address the current address.
+ auto EntryBB = CGF.Builder.GetInsertBlock();
+ CGF.EmitBlock(BodyBB);
+
+ CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
+
+ llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI(
+ DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
+ DestElementPHI->addIncoming(DestBegin, EntryBB);
+ Address DestElementCurrent =
+ Address(DestElementPHI,
+ DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
+
+ // Emit copy.
+ {
+ CodeGenFunction::RunCleanupsScope InitScope(CGF);
+ CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(),
+ /*IsInitializer=*/false);
+ }
+
+ // Shift the address forward by one element.
+ auto DestElementNext = CGF.Builder.CreateConstGEP1_32(
+ DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
+ // Check whether we've reached the end.
+ auto Done =
+ CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
+ CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
+ DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock());
+
+ // Done.
+ CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
+}
+
+void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr,
+ Address SrcAddr, const VarDecl *DestVD,
+ const VarDecl *SrcVD, const Expr *Copy) {
+ if (OriginalType->isArrayType()) {
+ auto *BO = dyn_cast<BinaryOperator>(Copy);
+ if (BO && BO->getOpcode() == BO_Assign) {
+ // Perform simple memcpy for simple copying.
+ EmitAggregateAssign(DestAddr, SrcAddr, OriginalType);
+ } else {
+ // For arrays with complex element types perform element by element
+ // copying.
+ EmitOMPAggregateAssign(
+ DestAddr, SrcAddr, OriginalType,
+ [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) {
+ // Working with the single array element, so have to remap
+ // destination and source variables to corresponding array
+ // elements.
+ CodeGenFunction::OMPPrivateScope Remap(*this);
+ Remap.addPrivate(DestVD, [DestElement]() -> Address {
+ return DestElement;
+ });
+ Remap.addPrivate(
+ SrcVD, [SrcElement]() -> Address { return SrcElement; });
+ (void)Remap.Privatize();
+ EmitIgnoredExpr(Copy);
+ });
+ }
+ } else {
+ // Remap pseudo source variable to private copy.
+ CodeGenFunction::OMPPrivateScope Remap(*this);
+ Remap.addPrivate(SrcVD, [SrcAddr]() -> Address { return SrcAddr; });
+ Remap.addPrivate(DestVD, [DestAddr]() -> Address { return DestAddr; });
+ (void)Remap.Privatize();
+ // Emit copying of the whole variable.
+ EmitIgnoredExpr(Copy);
+ }
+}
+
+bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
+ OMPPrivateScope &PrivateScope) {
+ if (!HaveInsertPoint())
+ return false;
+ llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate;
+ for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) {
+ auto IRef = C->varlist_begin();
+ auto InitsRef = C->inits().begin();
+ for (auto IInit : C->private_copies()) {
+ auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
+ if (EmittedAsFirstprivate.count(OrigVD) == 0) {
+ EmittedAsFirstprivate.insert(OrigVD);
+ auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
+ auto *VDInit = cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl());
+ bool IsRegistered;
+ DeclRefExpr DRE(
+ const_cast<VarDecl *>(OrigVD),
+ /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup(
+ OrigVD) != nullptr,
+ (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
+ Address OriginalAddr = EmitLValue(&DRE).getAddress();
+ QualType Type = OrigVD->getType();
+ if (Type->isArrayType()) {
+ // Emit VarDecl with copy init for arrays.
+ // Get the address of the original variable captured in current
+ // captured region.
+ IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
+ auto Emission = EmitAutoVarAlloca(*VD);
+ auto *Init = VD->getInit();
+ if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) {
+ // Perform simple memcpy.
+ EmitAggregateAssign(Emission.getAllocatedAddress(), OriginalAddr,
+ Type);
+ } else {
+ EmitOMPAggregateAssign(
+ Emission.getAllocatedAddress(), OriginalAddr, Type,
+ [this, VDInit, Init](Address DestElement,
+ Address SrcElement) {
+ // Clean up any temporaries needed by the initialization.
+ RunCleanupsScope InitScope(*this);
+ // Emit initialization for single element.
+ setAddrOfLocalVar(VDInit, SrcElement);
+ EmitAnyExprToMem(Init, DestElement,
+ Init->getType().getQualifiers(),
+ /*IsInitializer*/ false);
+ LocalDeclMap.erase(VDInit);
+ });
+ }
+ EmitAutoVarCleanups(Emission);
+ return Emission.getAllocatedAddress();
+ });
+ } else {
+ IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
+ // Emit private VarDecl with copy init.
+ // Remap temp VDInit variable to the address of the original
+ // variable
+ // (for proper handling of captured global variables).
+ setAddrOfLocalVar(VDInit, OriginalAddr);
+ EmitDecl(*VD);
+ LocalDeclMap.erase(VDInit);
+ return GetAddrOfLocalVar(VD);
+ });
+ }
+ assert(IsRegistered &&
+ "firstprivate var already registered as private");
+ // Silence the warning about unused variable.
+ (void)IsRegistered;
+ }
+ ++IRef, ++InitsRef;
+ }
+ }
+ return !EmittedAsFirstprivate.empty();
+}
+
+void CodeGenFunction::EmitOMPPrivateClause(
+ const OMPExecutableDirective &D,
+ CodeGenFunction::OMPPrivateScope &PrivateScope) {
+ if (!HaveInsertPoint())
+ return;
+ llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
+ for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) {
+ auto IRef = C->varlist_begin();
+ for (auto IInit : C->private_copies()) {
+ auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
+ if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
+ auto VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
+ bool IsRegistered =
+ PrivateScope.addPrivate(OrigVD, [&]() -> Address {
+ // Emit private VarDecl with copy init.
+ EmitDecl(*VD);
+ return GetAddrOfLocalVar(VD);
+ });
+ assert(IsRegistered && "private var already registered as private");
+ // Silence the warning about unused variable.
+ (void)IsRegistered;
+ }
+ ++IRef;
+ }
+ }
+}
+
+bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
+ if (!HaveInsertPoint())
+ return false;
+ // threadprivate_var1 = master_threadprivate_var1;
+ // operator=(threadprivate_var2, master_threadprivate_var2);
+ // ...
+ // __kmpc_barrier(&loc, global_tid);
+ llvm::DenseSet<const VarDecl *> CopiedVars;
+ llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr;
+ for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) {
+ auto IRef = C->varlist_begin();
+ auto ISrcRef = C->source_exprs().begin();
+ auto IDestRef = C->destination_exprs().begin();
+ for (auto *AssignOp : C->assignment_ops()) {
+ auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
+ QualType Type = VD->getType();
+ if (CopiedVars.insert(VD->getCanonicalDecl()).second) {
+
+ // Get the address of the master variable. If we are emitting code with
+ // TLS support, the address is passed from the master as field in the
+ // captured declaration.
+ Address MasterAddr = Address::invalid();
+ if (getLangOpts().OpenMPUseTLS &&
+ getContext().getTargetInfo().isTLSSupported()) {
+ assert(CapturedStmtInfo->lookup(VD) &&
+ "Copyin threadprivates should have been captured!");
+ DeclRefExpr DRE(const_cast<VarDecl *>(VD), true, (*IRef)->getType(),
+ VK_LValue, (*IRef)->getExprLoc());
+ MasterAddr = EmitLValue(&DRE).getAddress();
+ LocalDeclMap.erase(VD);
+ } else {
+ MasterAddr =
+ Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD)
+ : CGM.GetAddrOfGlobal(VD),
+ getContext().getDeclAlign(VD));
+ }
+ // Get the address of the threadprivate variable.
+ Address PrivateAddr = EmitLValue(*IRef).getAddress();
+ if (CopiedVars.size() == 1) {
+ // At first check if current thread is a master thread. If it is, no
+ // need to copy data.
+ CopyBegin = createBasicBlock("copyin.not.master");
+ CopyEnd = createBasicBlock("copyin.not.master.end");
+ Builder.CreateCondBr(
+ Builder.CreateICmpNE(
+ Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy),
+ Builder.CreatePtrToInt(PrivateAddr.getPointer(), CGM.IntPtrTy)),
+ CopyBegin, CopyEnd);
+ EmitBlock(CopyBegin);
+ }
+ auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
+ auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
+ EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp);
+ }
+ ++IRef;
+ ++ISrcRef;
+ ++IDestRef;
+ }
+ }
+ if (CopyEnd) {
+ // Exit out of copying procedure for non-master thread.
+ EmitBlock(CopyEnd, /*IsFinished=*/true);
+ return true;
+ }
+ return false;
+}
+
+bool CodeGenFunction::EmitOMPLastprivateClauseInit(
+ const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) {
+ if (!HaveInsertPoint())
+ return false;
+ bool HasAtLeastOneLastprivate = false;
+ llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
+ for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
+ HasAtLeastOneLastprivate = true;
+ auto IRef = C->varlist_begin();
+ auto IDestRef = C->destination_exprs().begin();
+ for (auto *IInit : C->private_copies()) {
+ // Keep the address of the original variable for future update at the end
+ // of the loop.
+ auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
+ if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) {
+ auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
+ PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() -> Address {
+ DeclRefExpr DRE(
+ const_cast<VarDecl *>(OrigVD),
+ /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup(
+ OrigVD) != nullptr,
+ (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
+ return EmitLValue(&DRE).getAddress();
+ });
+ // Check if the variable is also a firstprivate: in this case IInit is
+ // not generated. Initialization of this variable will happen in codegen
+ // for 'firstprivate' clause.
+ if (IInit) {
+ auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
+ bool IsRegistered =
+ PrivateScope.addPrivate(OrigVD, [&]() -> Address {
+ // Emit private VarDecl with copy init.
+ EmitDecl(*VD);
+ return GetAddrOfLocalVar(VD);
+ });
+ assert(IsRegistered &&
+ "lastprivate var already registered as private");
+ (void)IsRegistered;
+ }
+ }
+ ++IRef, ++IDestRef;
+ }
+ }
+ return HasAtLeastOneLastprivate;
+}
+
+void CodeGenFunction::EmitOMPLastprivateClauseFinal(
+ const OMPExecutableDirective &D, llvm::Value *IsLastIterCond) {
+ if (!HaveInsertPoint())
+ return;
+ // Emit following code:
+ // if (<IsLastIterCond>) {
+ // orig_var1 = private_orig_var1;
+ // ...
+ // orig_varn = private_orig_varn;
+ // }
+ llvm::BasicBlock *ThenBB = nullptr;
+ llvm::BasicBlock *DoneBB = nullptr;
+ if (IsLastIterCond) {
+ ThenBB = createBasicBlock(".omp.lastprivate.then");
+ DoneBB = createBasicBlock(".omp.lastprivate.done");
+ Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB);
+ EmitBlock(ThenBB);
+ }
+ llvm::DenseMap<const Decl *, const Expr *> LoopCountersAndUpdates;
+ const Expr *LastIterVal = nullptr;
+ const Expr *IVExpr = nullptr;
+ const Expr *IncExpr = nullptr;
+ if (auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) {
+ if (isOpenMPWorksharingDirective(D.getDirectiveKind())) {
+ LastIterVal = cast<VarDecl>(cast<DeclRefExpr>(
+ LoopDirective->getUpperBoundVariable())
+ ->getDecl())
+ ->getAnyInitializer();
+ IVExpr = LoopDirective->getIterationVariable();
+ IncExpr = LoopDirective->getInc();
+ auto IUpdate = LoopDirective->updates().begin();
+ for (auto *E : LoopDirective->counters()) {
+ auto *D = cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl();
+ LoopCountersAndUpdates[D] = *IUpdate;
+ ++IUpdate;
+ }
+ }
+ }
+ {
+ llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
+ bool FirstLCV = true;
+ for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
+ auto IRef = C->varlist_begin();
+ auto ISrcRef = C->source_exprs().begin();
+ auto IDestRef = C->destination_exprs().begin();
+ for (auto *AssignOp : C->assignment_ops()) {
+ auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
+ QualType Type = PrivateVD->getType();
+ auto *CanonicalVD = PrivateVD->getCanonicalDecl();
+ if (AlreadyEmittedVars.insert(CanonicalVD).second) {
+ // If lastprivate variable is a loop control variable for loop-based
+ // directive, update its value before copyin back to original
+ // variable.
+ if (auto *UpExpr = LoopCountersAndUpdates.lookup(CanonicalVD)) {
+ if (FirstLCV && LastIterVal) {
+ EmitAnyExprToMem(LastIterVal, EmitLValue(IVExpr).getAddress(),
+ IVExpr->getType().getQualifiers(),
+ /*IsInitializer=*/false);
+ EmitIgnoredExpr(IncExpr);
+ FirstLCV = false;
+ }
+ EmitIgnoredExpr(UpExpr);
+ }
+ auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
+ auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
+ // Get the address of the original variable.
+ Address OriginalAddr = GetAddrOfLocalVar(DestVD);
+ // Get the address of the private variable.
+ Address PrivateAddr = GetAddrOfLocalVar(PrivateVD);
+ if (auto RefTy = PrivateVD->getType()->getAs<ReferenceType>())
+ PrivateAddr =
+ Address(Builder.CreateLoad(PrivateAddr),
+ getNaturalTypeAlignment(RefTy->getPointeeType()));
+ EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp);
+ }
+ ++IRef;
+ ++ISrcRef;
+ ++IDestRef;
+ }
+ }
+ }
+ if (IsLastIterCond) {
+ EmitBlock(DoneBB, /*IsFinished=*/true);
+ }
+}
+
+void CodeGenFunction::EmitOMPReductionClauseInit(
+ const OMPExecutableDirective &D,
+ CodeGenFunction::OMPPrivateScope &PrivateScope) {
+ if (!HaveInsertPoint())
+ return;
+ for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
+ auto ILHS = C->lhs_exprs().begin();
+ auto IRHS = C->rhs_exprs().begin();
+ auto IPriv = C->privates().begin();
+ for (auto IRef : C->varlists()) {
+ auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
+ auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
+ auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl());
+ if (auto *OASE = dyn_cast<OMPArraySectionExpr>(IRef)) {
+ auto *Base = OASE->getBase()->IgnoreParenImpCasts();
+ while (auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
+ Base = TempOASE->getBase()->IgnoreParenImpCasts();
+ while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
+ Base = TempASE->getBase()->IgnoreParenImpCasts();
+ auto *DE = cast<DeclRefExpr>(Base);
+ auto *OrigVD = cast<VarDecl>(DE->getDecl());
+ auto OASELValueLB = EmitOMPArraySectionExpr(OASE);
+ auto OASELValueUB =
+ EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false);
+ auto OriginalBaseLValue = EmitLValue(DE);
+ auto BaseLValue = OriginalBaseLValue;
+ auto *Zero = Builder.getInt64(/*C=*/0);
+ llvm::SmallVector<llvm::Value *, 4> Indexes;
+ Indexes.push_back(Zero);
+ auto *ItemTy =
+ OASELValueLB.getPointer()->getType()->getPointerElementType();
+ auto *Ty = BaseLValue.getPointer()->getType()->getPointerElementType();
+ while (Ty != ItemTy) {
+ Indexes.push_back(Zero);
+ Ty = Ty->getPointerElementType();
+ }
+ BaseLValue = MakeAddrLValue(
+ Address(Builder.CreateInBoundsGEP(BaseLValue.getPointer(), Indexes),
+ OASELValueLB.getAlignment()),
+ OASELValueLB.getType(), OASELValueLB.getAlignmentSource());
+ // Store the address of the original variable associated with the LHS
+ // implicit variable.
+ PrivateScope.addPrivate(LHSVD, [this, OASELValueLB]() -> Address {
+ return OASELValueLB.getAddress();
+ });
+ // Emit reduction copy.
+ bool IsRegistered = PrivateScope.addPrivate(
+ OrigVD, [this, PrivateVD, BaseLValue, OASELValueLB, OASELValueUB,
+ OriginalBaseLValue]() -> Address {
+ // Emit VarDecl with copy init for arrays.
+ // Get the address of the original variable captured in current
+ // captured region.
+ auto *Size = Builder.CreatePtrDiff(OASELValueUB.getPointer(),
+ OASELValueLB.getPointer());
+ Size = Builder.CreateNUWAdd(
+ Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
+ CodeGenFunction::OpaqueValueMapping OpaqueMap(
+ *this, cast<OpaqueValueExpr>(
+ getContext()
+ .getAsVariableArrayType(PrivateVD->getType())
+ ->getSizeExpr()),
+ RValue::get(Size));
+ EmitVariablyModifiedType(PrivateVD->getType());
+ auto Emission = EmitAutoVarAlloca(*PrivateVD);
+ auto Addr = Emission.getAllocatedAddress();
+ auto *Init = PrivateVD->getInit();
+ EmitOMPAggregateInit(*this, Addr, PrivateVD->getType(), Init);
+ EmitAutoVarCleanups(Emission);
+ // Emit private VarDecl with reduction init.
+ auto *Offset = Builder.CreatePtrDiff(BaseLValue.getPointer(),
+ OASELValueLB.getPointer());
+ auto *Ptr = Builder.CreateGEP(Addr.getPointer(), Offset);
+ Ptr = Builder.CreatePointerBitCastOrAddrSpaceCast(
+ Ptr, OriginalBaseLValue.getPointer()->getType());
+ return Address(Ptr, OriginalBaseLValue.getAlignment());
+ });
+ assert(IsRegistered && "private var already registered as private");
+ // Silence the warning about unused variable.
+ (void)IsRegistered;
+ PrivateScope.addPrivate(RHSVD, [this, PrivateVD]() -> Address {
+ return GetAddrOfLocalVar(PrivateVD);
+ });
+ } else if (auto *ASE = dyn_cast<ArraySubscriptExpr>(IRef)) {
+ auto *Base = ASE->getBase()->IgnoreParenImpCasts();
+ while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
+ Base = TempASE->getBase()->IgnoreParenImpCasts();
+ auto *DE = cast<DeclRefExpr>(Base);
+ auto *OrigVD = cast<VarDecl>(DE->getDecl());
+ auto ASELValue = EmitLValue(ASE);
+ auto OriginalBaseLValue = EmitLValue(DE);
+ auto BaseLValue = OriginalBaseLValue;
+ auto *Zero = Builder.getInt64(/*C=*/0);
+ llvm::SmallVector<llvm::Value *, 4> Indexes;
+ Indexes.push_back(Zero);
+ auto *ItemTy =
+ ASELValue.getPointer()->getType()->getPointerElementType();
+ auto *Ty = BaseLValue.getPointer()->getType()->getPointerElementType();
+ while (Ty != ItemTy) {
+ Indexes.push_back(Zero);
+ Ty = Ty->getPointerElementType();
+ }
+ BaseLValue = MakeAddrLValue(
+ Address(Builder.CreateInBoundsGEP(BaseLValue.getPointer(), Indexes),
+ ASELValue.getAlignment()),
+ ASELValue.getType(), ASELValue.getAlignmentSource());
+ // Store the address of the original variable associated with the LHS
+ // implicit variable.
+ PrivateScope.addPrivate(LHSVD, [this, ASELValue]() -> Address {
+ return ASELValue.getAddress();
+ });
+ // Emit reduction copy.
+ bool IsRegistered = PrivateScope.addPrivate(
+ OrigVD, [this, PrivateVD, BaseLValue, ASELValue,
+ OriginalBaseLValue]() -> Address {
+ // Emit private VarDecl with reduction init.
+ EmitDecl(*PrivateVD);
+ auto Addr = GetAddrOfLocalVar(PrivateVD);
+ auto *Offset = Builder.CreatePtrDiff(BaseLValue.getPointer(),
+ ASELValue.getPointer());
+ auto *Ptr = Builder.CreateGEP(Addr.getPointer(), Offset);
+ Ptr = Builder.CreatePointerBitCastOrAddrSpaceCast(
+ Ptr, OriginalBaseLValue.getPointer()->getType());
+ return Address(Ptr, OriginalBaseLValue.getAlignment());
+ });
+ assert(IsRegistered && "private var already registered as private");
+ // Silence the warning about unused variable.
+ (void)IsRegistered;
+ PrivateScope.addPrivate(RHSVD, [this, PrivateVD]() -> Address {
+ return GetAddrOfLocalVar(PrivateVD);
+ });
+ } else {
+ auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
+ // Store the address of the original variable associated with the LHS
+ // implicit variable.
+ PrivateScope.addPrivate(LHSVD, [this, OrigVD, IRef]() -> Address {
+ DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
+ CapturedStmtInfo->lookup(OrigVD) != nullptr,
+ IRef->getType(), VK_LValue, IRef->getExprLoc());
+ return EmitLValue(&DRE).getAddress();
+ });
+ // Emit reduction copy.
+ bool IsRegistered =
+ PrivateScope.addPrivate(OrigVD, [this, PrivateVD]() -> Address {
+ // Emit private VarDecl with reduction init.
+ EmitDecl(*PrivateVD);
+ return GetAddrOfLocalVar(PrivateVD);
+ });
+ assert(IsRegistered && "private var already registered as private");
+ // Silence the warning about unused variable.
+ (void)IsRegistered;
+ PrivateScope.addPrivate(RHSVD, [this, PrivateVD]() -> Address {
+ return GetAddrOfLocalVar(PrivateVD);
+ });
+ }
+ ++ILHS, ++IRHS, ++IPriv;
+ }
+ }
+}
+
+void CodeGenFunction::EmitOMPReductionClauseFinal(
+ const OMPExecutableDirective &D) {
+ if (!HaveInsertPoint())
+ return;
+ llvm::SmallVector<const Expr *, 8> Privates;
+ llvm::SmallVector<const Expr *, 8> LHSExprs;
+ llvm::SmallVector<const Expr *, 8> RHSExprs;
+ llvm::SmallVector<const Expr *, 8> ReductionOps;
+ bool HasAtLeastOneReduction = false;
+ for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
+ HasAtLeastOneReduction = true;
+ Privates.append(C->privates().begin(), C->privates().end());
+ LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
+ RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
+ ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
+ }
+ if (HasAtLeastOneReduction) {
+ // Emit nowait reduction if nowait clause is present or directive is a
+ // parallel directive (it always has implicit barrier).
+ CGM.getOpenMPRuntime().emitReduction(
+ *this, D.getLocEnd(), Privates, LHSExprs, RHSExprs, ReductionOps,
+ D.getSingleClause<OMPNowaitClause>() ||
+ isOpenMPParallelDirective(D.getDirectiveKind()) ||
+ D.getDirectiveKind() == OMPD_simd,
+ D.getDirectiveKind() == OMPD_simd);
+ }
+}
+
+static void emitCommonOMPParallelDirective(CodeGenFunction &CGF,
+ const OMPExecutableDirective &S,
+ OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen) {
+ auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
+ llvm::SmallVector<llvm::Value *, 16> CapturedVars;
+ CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
+ auto OutlinedFn = CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction(
+ S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
+ if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) {
+ CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
+ auto NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
+ /*IgnoreResultAssign*/ true);
+ CGF.CGM.getOpenMPRuntime().emitNumThreadsClause(
+ CGF, NumThreads, NumThreadsClause->getLocStart());
+ }
+ if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) {
+ CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
+ CGF.CGM.getOpenMPRuntime().emitProcBindClause(
+ CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getLocStart());
+ }
+ const Expr *IfCond = nullptr;
+ for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
+ if (C->getNameModifier() == OMPD_unknown ||
+ C->getNameModifier() == OMPD_parallel) {
+ IfCond = C->getCondition();
+ break;
+ }
+ }
+ CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getLocStart(), OutlinedFn,
+ CapturedVars, IfCond);
+}
+
+void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
+ LexicalScope Scope(*this, S.getSourceRange());
+ // Emit parallel region as a standalone region.
+ auto &&CodeGen = [&S](CodeGenFunction &CGF) {
+ OMPPrivateScope PrivateScope(CGF);
+ bool Copyins = CGF.EmitOMPCopyinClause(S);
+ bool Firstprivates = CGF.EmitOMPFirstprivateClause(S, PrivateScope);
+ if (Copyins || Firstprivates) {
+ // Emit implicit barrier to synchronize threads and avoid data races on
+ // initialization of firstprivate variables or propagation master's thread
+ // values of threadprivate variables to local instances of that variables
+ // of all other implicit threads.
+ CGF.CGM.getOpenMPRuntime().emitBarrierCall(
+ CGF, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
+ /*ForceSimpleCall=*/true);
+ }
+ CGF.EmitOMPPrivateClause(S, PrivateScope);
+ CGF.EmitOMPReductionClauseInit(S, PrivateScope);
+ (void)PrivateScope.Privatize();
+ CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ CGF.EmitOMPReductionClauseFinal(S);
+ };
+ emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen);
+}
+
+void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D,
+ JumpDest LoopExit) {
+ RunCleanupsScope BodyScope(*this);
+ // Update counters values on current iteration.
+ for (auto I : D.updates()) {
+ EmitIgnoredExpr(I);
+ }
+ // Update the linear variables.
+ for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
+ for (auto U : C->updates()) {
+ EmitIgnoredExpr(U);
+ }
+ }
+
+ // On a continue in the body, jump to the end.
+ auto Continue = getJumpDestInCurrentScope("omp.body.continue");
+ BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
+ // Emit loop body.
+ EmitStmt(D.getBody());
+ // The end (updates/cleanups).
+ EmitBlock(Continue.getBlock());
+ BreakContinueStack.pop_back();
+ // TODO: Update lastprivates if the SeparateIter flag is true.
+ // This will be implemented in a follow-up OMPLastprivateClause patch, but
+ // result should be still correct without it, as we do not make these
+ // variables private yet.
+}
+
+void CodeGenFunction::EmitOMPInnerLoop(
+ const Stmt &S, bool RequiresCleanup, const Expr *LoopCond,
+ const Expr *IncExpr,
+ const llvm::function_ref<void(CodeGenFunction &)> &BodyGen,
+ const llvm::function_ref<void(CodeGenFunction &)> &PostIncGen) {
+ auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end");
+
+ // Start the loop with a block that tests the condition.
+ auto CondBlock = createBasicBlock("omp.inner.for.cond");
+ EmitBlock(CondBlock);
+ LoopStack.push(CondBlock);
+
+ // If there are any cleanups between here and the loop-exit scope,
+ // create a block to stage a loop exit along.
+ auto ExitBlock = LoopExit.getBlock();
+ if (RequiresCleanup)
+ ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup");
+
+ auto LoopBody = createBasicBlock("omp.inner.for.body");
+
+ // Emit condition.
+ EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S));
+ if (ExitBlock != LoopExit.getBlock()) {
+ EmitBlock(ExitBlock);
+ EmitBranchThroughCleanup(LoopExit);
+ }
+
+ EmitBlock(LoopBody);
+ incrementProfileCounter(&S);
+
+ // Create a block for the increment.
+ auto Continue = getJumpDestInCurrentScope("omp.inner.for.inc");
+ BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
+
+ BodyGen(*this);
+
+ // Emit "IV = IV + 1" and a back-edge to the condition block.
+ EmitBlock(Continue.getBlock());
+ EmitIgnoredExpr(IncExpr);
+ PostIncGen(*this);
+ BreakContinueStack.pop_back();
+ EmitBranch(CondBlock);
+ LoopStack.pop();
+ // Emit the fall-through block.
+ EmitBlock(LoopExit.getBlock());
+}
+
+void CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) {
+ if (!HaveInsertPoint())
+ return;
+ // Emit inits for the linear variables.
+ for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
+ for (auto Init : C->inits()) {
+ auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl());
+ auto *OrigVD = cast<VarDecl>(
+ cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())->getDecl());
+ DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
+ CapturedStmtInfo->lookup(OrigVD) != nullptr,
+ VD->getInit()->getType(), VK_LValue,
+ VD->getInit()->getExprLoc());
+ AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
+ EmitExprAsInit(&DRE, VD,
+ MakeAddrLValue(Emission.getAllocatedAddress(), VD->getType()),
+ /*capturedByInit=*/false);
+ EmitAutoVarCleanups(Emission);
+ }
+ // Emit the linear steps for the linear clauses.
+ // If a step is not constant, it is pre-calculated before the loop.
+ if (auto CS = cast_or_null<BinaryOperator>(C->getCalcStep()))
+ if (auto SaveRef = cast<DeclRefExpr>(CS->getLHS())) {
+ EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl()));
+ // Emit calculation of the linear step.
+ EmitIgnoredExpr(CS);
+ }
+ }
+}
+
+static void emitLinearClauseFinal(CodeGenFunction &CGF,
+ const OMPLoopDirective &D) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ // Emit the final values of the linear variables.
+ for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
+ auto IC = C->varlist_begin();
+ for (auto F : C->finals()) {
+ auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl());
+ DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
+ CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr,
+ (*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
+ Address OrigAddr = CGF.EmitLValue(&DRE).getAddress();
+ CodeGenFunction::OMPPrivateScope VarScope(CGF);
+ VarScope.addPrivate(OrigVD,
+ [OrigAddr]() -> Address { return OrigAddr; });
+ (void)VarScope.Privatize();
+ CGF.EmitIgnoredExpr(F);
+ ++IC;
+ }
+ }
+}
+
+static void emitAlignedClause(CodeGenFunction &CGF,
+ const OMPExecutableDirective &D) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) {
+ unsigned ClauseAlignment = 0;
+ if (auto AlignmentExpr = Clause->getAlignment()) {
+ auto AlignmentCI =
+ cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr));
+ ClauseAlignment = static_cast<unsigned>(AlignmentCI->getZExtValue());
+ }
+ for (auto E : Clause->varlists()) {
+ unsigned Alignment = ClauseAlignment;
+ if (Alignment == 0) {
+ // OpenMP [2.8.1, Description]
+ // If no optional parameter is specified, implementation-defined default
+ // alignments for SIMD instructions on the target platforms are assumed.
+ Alignment =
+ CGF.getContext()
+ .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
+ E->getType()->getPointeeType()))
+ .getQuantity();
+ }
+ assert((Alignment == 0 || llvm::isPowerOf2_32(Alignment)) &&
+ "alignment is not power of 2");
+ if (Alignment != 0) {
+ llvm::Value *PtrValue = CGF.EmitScalarExpr(E);
+ CGF.EmitAlignmentAssumption(PtrValue, Alignment);
+ }
+ }
+ }
+}
+
+static void emitPrivateLoopCounters(CodeGenFunction &CGF,
+ CodeGenFunction::OMPPrivateScope &LoopScope,
+ ArrayRef<Expr *> Counters,
+ ArrayRef<Expr *> PrivateCounters) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ auto I = PrivateCounters.begin();
+ for (auto *E : Counters) {
+ auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
+ auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl());
+ Address Addr = Address::invalid();
+ (void)LoopScope.addPrivate(PrivateVD, [&]() -> Address {
+ // Emit var without initialization.
+ auto VarEmission = CGF.EmitAutoVarAlloca(*PrivateVD);
+ CGF.EmitAutoVarCleanups(VarEmission);
+ Addr = VarEmission.getAllocatedAddress();
+ return Addr;
+ });
+ (void)LoopScope.addPrivate(VD, [&]() -> Address { return Addr; });
+ ++I;
+ }
+}
+
+static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S,
+ const Expr *Cond, llvm::BasicBlock *TrueBlock,
+ llvm::BasicBlock *FalseBlock, uint64_t TrueCount) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ {
+ CodeGenFunction::OMPPrivateScope PreCondScope(CGF);
+ emitPrivateLoopCounters(CGF, PreCondScope, S.counters(),
+ S.private_counters());
+ (void)PreCondScope.Privatize();
+ // Get initial values of real counters.
+ for (auto I : S.inits()) {
+ CGF.EmitIgnoredExpr(I);
+ }
+ }
+ // Check that loop is executed at least one time.
+ CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount);
+}
+
+static void
+emitPrivateLinearVars(CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ CodeGenFunction::OMPPrivateScope &PrivateScope) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
+ auto CurPrivate = C->privates().begin();
+ for (auto *E : C->varlists()) {
+ auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
+ auto *PrivateVD =
+ cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl());
+ bool IsRegistered = PrivateScope.addPrivate(VD, [&]() -> Address {
+ // Emit private VarDecl with copy init.
+ CGF.EmitVarDecl(*PrivateVD);
+ return CGF.GetAddrOfLocalVar(PrivateVD);
+ });
+ assert(IsRegistered && "linear var already registered as private");
+ // Silence the warning about unused variable.
+ (void)IsRegistered;
+ ++CurPrivate;
+ }
+ }
+}
+
+static void emitSimdlenSafelenClause(CodeGenFunction &CGF,
+ const OMPExecutableDirective &D,
+ bool IsMonotonic) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) {
+ RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(),
+ /*ignoreResult=*/true);
+ llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
+ CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
+ // In presence of finite 'safelen', it may be unsafe to mark all
+ // the memory instructions parallel, because loop-carried
+ // dependences of 'safelen' iterations are possible.
+ if (!IsMonotonic)
+ CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>());
+ } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) {
+ RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(),
+ /*ignoreResult=*/true);
+ llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
+ CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
+ // In presence of finite 'safelen', it may be unsafe to mark all
+ // the memory instructions parallel, because loop-carried
+ // dependences of 'safelen' iterations are possible.
+ CGF.LoopStack.setParallel(false);
+ }
+}
+
+void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D,
+ bool IsMonotonic) {
+ // Walk clauses and process safelen/lastprivate.
+ LoopStack.setParallel(!IsMonotonic);
+ LoopStack.setVectorizeEnable(true);
+ emitSimdlenSafelenClause(*this, D, IsMonotonic);
+}
+
+void CodeGenFunction::EmitOMPSimdFinal(const OMPLoopDirective &D) {
+ if (!HaveInsertPoint())
+ return;
+ auto IC = D.counters().begin();
+ for (auto F : D.finals()) {
+ auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl());
+ if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD)) {
+ DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
+ CapturedStmtInfo->lookup(OrigVD) != nullptr,
+ (*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
+ Address OrigAddr = EmitLValue(&DRE).getAddress();
+ OMPPrivateScope VarScope(*this);
+ VarScope.addPrivate(OrigVD,
+ [OrigAddr]() -> Address { return OrigAddr; });
+ (void)VarScope.Privatize();
+ EmitIgnoredExpr(F);
+ }
+ ++IC;
+ }
+ emitLinearClauseFinal(*this, D);
+}
+
+void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
+ auto &&CodeGen = [&S](CodeGenFunction &CGF) {
+ // if (PreCond) {
+ // for (IV in 0..LastIteration) BODY;
+ // <Final counter/linear vars updates>;
+ // }
+ //
+
+ // Emit: if (PreCond) - begin.
+ // If the condition constant folds and can be elided, avoid emitting the
+ // whole loop.
+ bool CondConstant;
+ llvm::BasicBlock *ContBlock = nullptr;
+ if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
+ if (!CondConstant)
+ return;
+ } else {
+ auto *ThenBlock = CGF.createBasicBlock("simd.if.then");
+ ContBlock = CGF.createBasicBlock("simd.if.end");
+ emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
+ CGF.getProfileCount(&S));
+ CGF.EmitBlock(ThenBlock);
+ CGF.incrementProfileCounter(&S);
+ }
+
+ // Emit the loop iteration variable.
+ const Expr *IVExpr = S.getIterationVariable();
+ const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
+ CGF.EmitVarDecl(*IVDecl);
+ CGF.EmitIgnoredExpr(S.getInit());
+
+ // Emit the iterations count variable.
+ // If it is not a variable, Sema decided to calculate iterations count on
+ // each iteration (e.g., it is foldable into a constant).
+ if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
+ CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
+ // Emit calculation of the iterations count.
+ CGF.EmitIgnoredExpr(S.getCalcLastIteration());
+ }
+
+ CGF.EmitOMPSimdInit(S);
+
+ emitAlignedClause(CGF, S);
+ CGF.EmitOMPLinearClauseInit(S);
+ bool HasLastprivateClause;
+ {
+ OMPPrivateScope LoopScope(CGF);
+ emitPrivateLoopCounters(CGF, LoopScope, S.counters(),
+ S.private_counters());
+ emitPrivateLinearVars(CGF, S, LoopScope);
+ CGF.EmitOMPPrivateClause(S, LoopScope);
+ CGF.EmitOMPReductionClauseInit(S, LoopScope);
+ HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
+ (void)LoopScope.Privatize();
+ CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
+ S.getInc(),
+ [&S](CodeGenFunction &CGF) {
+ CGF.EmitOMPLoopBody(S, JumpDest());
+ CGF.EmitStopPoint(&S);
+ },
+ [](CodeGenFunction &) {});
+ // Emit final copy of the lastprivate variables at the end of loops.
+ if (HasLastprivateClause) {
+ CGF.EmitOMPLastprivateClauseFinal(S);
+ }
+ CGF.EmitOMPReductionClauseFinal(S);
+ }
+ CGF.EmitOMPSimdFinal(S);
+ // Emit: if (PreCond) - end.
+ if (ContBlock) {
+ CGF.EmitBranch(ContBlock);
+ CGF.EmitBlock(ContBlock, true);
+ }
+ };
+ CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
+}
+
+void CodeGenFunction::EmitOMPForOuterLoop(
+ OpenMPScheduleClauseKind ScheduleKind, bool IsMonotonic,
+ const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,
+ Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk) {
+ auto &RT = CGM.getOpenMPRuntime();
+
+ // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).
+ const bool DynamicOrOrdered = Ordered || RT.isDynamic(ScheduleKind);
+
+ assert((Ordered ||
+ !RT.isStaticNonchunked(ScheduleKind, /*Chunked=*/Chunk != nullptr)) &&
+ "static non-chunked schedule does not need outer loop");
+
+ // Emit outer loop.
+ //
+ // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
+ // When schedule(dynamic,chunk_size) is specified, the iterations are
+ // distributed to threads in the team in chunks as the threads request them.
+ // Each thread executes a chunk of iterations, then requests another chunk,
+ // until no chunks remain to be distributed. Each chunk contains chunk_size
+ // iterations, except for the last chunk to be distributed, which may have
+ // fewer iterations. When no chunk_size is specified, it defaults to 1.
+ //
+ // When schedule(guided,chunk_size) is specified, the iterations are assigned
+ // to threads in the team in chunks as the executing threads request them.
+ // Each thread executes a chunk of iterations, then requests another chunk,
+ // until no chunks remain to be assigned. For a chunk_size of 1, the size of
+ // each chunk is proportional to the number of unassigned iterations divided
+ // by the number of threads in the team, decreasing to 1. For a chunk_size
+ // with value k (greater than 1), the size of each chunk is determined in the
+ // same way, with the restriction that the chunks do not contain fewer than k
+ // iterations (except for the last chunk to be assigned, which may have fewer
+ // than k iterations).
+ //
+ // When schedule(auto) is specified, the decision regarding scheduling is
+ // delegated to the compiler and/or runtime system. The programmer gives the
+ // implementation the freedom to choose any possible mapping of iterations to
+ // threads in the team.
+ //
+ // When schedule(runtime) is specified, the decision regarding scheduling is
+ // deferred until run time, and the schedule and chunk size are taken from the
+ // run-sched-var ICV. If the ICV is set to auto, the schedule is
+ // implementation defined
+ //
+ // while(__kmpc_dispatch_next(&LB, &UB)) {
+ // idx = LB;
+ // while (idx <= UB) { BODY; ++idx;
+ // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only.
+ // } // inner loop
+ // }
+ //
+ // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
+ // When schedule(static, chunk_size) is specified, iterations are divided into
+ // chunks of size chunk_size, and the chunks are assigned to the threads in
+ // the team in a round-robin fashion in the order of the thread number.
+ //
+ // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) {
+ // while (idx <= UB) { BODY; ++idx; } // inner loop
+ // LB = LB + ST;
+ // UB = UB + ST;
+ // }
+ //
+
+ const Expr *IVExpr = S.getIterationVariable();
+ const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
+ const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
+
+ if (DynamicOrOrdered) {
+ llvm::Value *UBVal = EmitScalarExpr(S.getLastIteration());
+ RT.emitForDispatchInit(*this, S.getLocStart(), ScheduleKind,
+ IVSize, IVSigned, Ordered, UBVal, Chunk);
+ } else {
+ RT.emitForStaticInit(*this, S.getLocStart(), ScheduleKind,
+ IVSize, IVSigned, Ordered, IL, LB, UB, ST, Chunk);
+ }
+
+ auto LoopExit = getJumpDestInCurrentScope("omp.dispatch.end");
+
+ // Start the loop with a block that tests the condition.
+ auto CondBlock = createBasicBlock("omp.dispatch.cond");
+ EmitBlock(CondBlock);
+ LoopStack.push(CondBlock);
+
+ llvm::Value *BoolCondVal = nullptr;
+ if (!DynamicOrOrdered) {
+ // UB = min(UB, GlobalUB)
+ EmitIgnoredExpr(S.getEnsureUpperBound());
+ // IV = LB
+ EmitIgnoredExpr(S.getInit());
+ // IV < UB
+ BoolCondVal = EvaluateExprAsBool(S.getCond());
+ } else {
+ BoolCondVal = RT.emitForNext(*this, S.getLocStart(), IVSize, IVSigned,
+ IL, LB, UB, ST);
+ }
+
+ // If there are any cleanups between here and the loop-exit scope,
+ // create a block to stage a loop exit along.
+ auto ExitBlock = LoopExit.getBlock();
+ if (LoopScope.requiresCleanups())
+ ExitBlock = createBasicBlock("omp.dispatch.cleanup");
+
+ auto LoopBody = createBasicBlock("omp.dispatch.body");
+ Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
+ if (ExitBlock != LoopExit.getBlock()) {
+ EmitBlock(ExitBlock);
+ EmitBranchThroughCleanup(LoopExit);
+ }
+ EmitBlock(LoopBody);
+
+ // Emit "IV = LB" (in case of static schedule, we have already calculated new
+ // LB for loop condition and emitted it above).
+ if (DynamicOrOrdered)
+ EmitIgnoredExpr(S.getInit());
+
+ // Create a block for the increment.
+ auto Continue = getJumpDestInCurrentScope("omp.dispatch.inc");
+ BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
+
+ // Generate !llvm.loop.parallel metadata for loads and stores for loops
+ // with dynamic/guided scheduling and without ordered clause.
+ if (!isOpenMPSimdDirective(S.getDirectiveKind()))
+ LoopStack.setParallel(!IsMonotonic);
+ else
+ EmitOMPSimdInit(S, IsMonotonic);
+
+ SourceLocation Loc = S.getLocStart();
+ EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(),
+ [&S, LoopExit](CodeGenFunction &CGF) {
+ CGF.EmitOMPLoopBody(S, LoopExit);
+ CGF.EmitStopPoint(&S);
+ },
+ [Ordered, IVSize, IVSigned, Loc](CodeGenFunction &CGF) {
+ if (Ordered) {
+ CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(
+ CGF, Loc, IVSize, IVSigned);
+ }
+ });
+
+ EmitBlock(Continue.getBlock());
+ BreakContinueStack.pop_back();
+ if (!DynamicOrOrdered) {
+ // Emit "LB = LB + Stride", "UB = UB + Stride".
+ EmitIgnoredExpr(S.getNextLowerBound());
+ EmitIgnoredExpr(S.getNextUpperBound());
+ }
+
+ EmitBranch(CondBlock);
+ LoopStack.pop();
+ // Emit the fall-through block.
+ EmitBlock(LoopExit.getBlock());
+
+ // Tell the runtime we are done.
+ if (!DynamicOrOrdered)
+ RT.emitForStaticFinish(*this, S.getLocEnd());
+}
+
+/// \brief Emit a helper variable and return corresponding lvalue.
+static LValue EmitOMPHelperVar(CodeGenFunction &CGF,
+ const DeclRefExpr *Helper) {
+ auto VDecl = cast<VarDecl>(Helper->getDecl());
+ CGF.EmitVarDecl(*VDecl);
+ return CGF.EmitLValue(Helper);
+}
+
+namespace {
+ struct ScheduleKindModifiersTy {
+ OpenMPScheduleClauseKind Kind;
+ OpenMPScheduleClauseModifier M1;
+ OpenMPScheduleClauseModifier M2;
+ ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind,
+ OpenMPScheduleClauseModifier M1,
+ OpenMPScheduleClauseModifier M2)
+ : Kind(Kind), M1(M1), M2(M2) {}
+ };
+} // namespace
+
+static std::pair<llvm::Value * /*Chunk*/, ScheduleKindModifiersTy>
+emitScheduleClause(CodeGenFunction &CGF, const OMPLoopDirective &S,
+ bool OuterRegion) {
+ // Detect the loop schedule kind and chunk.
+ auto ScheduleKind = OMPC_SCHEDULE_unknown;
+ OpenMPScheduleClauseModifier M1 = OMPC_SCHEDULE_MODIFIER_unknown;
+ OpenMPScheduleClauseModifier M2 = OMPC_SCHEDULE_MODIFIER_unknown;
+ llvm::Value *Chunk = nullptr;
+ if (const auto *C = S.getSingleClause<OMPScheduleClause>()) {
+ ScheduleKind = C->getScheduleKind();
+ M1 = C->getFirstScheduleModifier();
+ M2 = C->getSecondScheduleModifier();
+ if (const auto *Ch = C->getChunkSize()) {
+ if (auto *ImpRef = cast_or_null<DeclRefExpr>(C->getHelperChunkSize())) {
+ if (OuterRegion) {
+ const VarDecl *ImpVar = cast<VarDecl>(ImpRef->getDecl());
+ CGF.EmitVarDecl(*ImpVar);
+ CGF.EmitStoreThroughLValue(
+ CGF.EmitAnyExpr(Ch),
+ CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(ImpVar),
+ ImpVar->getType()));
+ } else {
+ Ch = ImpRef;
+ }
+ }
+ if (!C->getHelperChunkSize() || !OuterRegion) {
+ Chunk = CGF.EmitScalarExpr(Ch);
+ Chunk = CGF.EmitScalarConversion(Chunk, Ch->getType(),
+ S.getIterationVariable()->getType(),
+ S.getLocStart());
+ }
+ }
+ }
+ return std::make_pair(Chunk, ScheduleKindModifiersTy(ScheduleKind, M1, M2));
+}
+
+bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
+ // Emit the loop iteration variable.
+ auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
+ auto IVDecl = cast<VarDecl>(IVExpr->getDecl());
+ EmitVarDecl(*IVDecl);
+
+ // Emit the iterations count variable.
+ // If it is not a variable, Sema decided to calculate iterations count on each
+ // iteration (e.g., it is foldable into a constant).
+ if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
+ EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
+ // Emit calculation of the iterations count.
+ EmitIgnoredExpr(S.getCalcLastIteration());
+ }
+
+ auto &RT = CGM.getOpenMPRuntime();
+
+ bool HasLastprivateClause;
+ // Check pre-condition.
+ {
+ // Skip the entire loop if we don't meet the precondition.
+ // If the condition constant folds and can be elided, avoid emitting the
+ // whole loop.
+ bool CondConstant;
+ llvm::BasicBlock *ContBlock = nullptr;
+ if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
+ if (!CondConstant)
+ return false;
+ } else {
+ auto *ThenBlock = createBasicBlock("omp.precond.then");
+ ContBlock = createBasicBlock("omp.precond.end");
+ emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
+ getProfileCount(&S));
+ EmitBlock(ThenBlock);
+ incrementProfileCounter(&S);
+ }
+
+ emitAlignedClause(*this, S);
+ EmitOMPLinearClauseInit(S);
+ // Emit 'then' code.
+ {
+ // Emit helper vars inits.
+ LValue LB =
+ EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getLowerBoundVariable()));
+ LValue UB =
+ EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getUpperBoundVariable()));
+ LValue ST =
+ EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
+ LValue IL =
+ EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
+
+ OMPPrivateScope LoopScope(*this);
+ if (EmitOMPFirstprivateClause(S, LoopScope)) {
+ // Emit implicit barrier to synchronize threads and avoid data races on
+ // initialization of firstprivate variables.
+ CGM.getOpenMPRuntime().emitBarrierCall(
+ *this, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
+ /*ForceSimpleCall=*/true);
+ }
+ EmitOMPPrivateClause(S, LoopScope);
+ HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
+ EmitOMPReductionClauseInit(S, LoopScope);
+ emitPrivateLoopCounters(*this, LoopScope, S.counters(),
+ S.private_counters());
+ emitPrivateLinearVars(*this, S, LoopScope);
+ (void)LoopScope.Privatize();
+
+ // Detect the loop schedule kind and chunk.
+ llvm::Value *Chunk;
+ OpenMPScheduleClauseKind ScheduleKind;
+ auto ScheduleInfo =
+ emitScheduleClause(*this, S, /*OuterRegion=*/false);
+ Chunk = ScheduleInfo.first;
+ ScheduleKind = ScheduleInfo.second.Kind;
+ const OpenMPScheduleClauseModifier M1 = ScheduleInfo.second.M1;
+ const OpenMPScheduleClauseModifier M2 = ScheduleInfo.second.M2;
+ const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
+ const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
+ const bool Ordered = S.getSingleClause<OMPOrderedClause>() != nullptr;
+ // OpenMP 4.5, 2.7.1 Loop Construct, Description.
+ // If the static schedule kind is specified or if the ordered clause is
+ // specified, and if no monotonic modifier is specified, the effect will
+ // be as if the monotonic modifier was specified.
+ if (RT.isStaticNonchunked(ScheduleKind,
+ /* Chunked */ Chunk != nullptr) &&
+ !Ordered) {
+ if (isOpenMPSimdDirective(S.getDirectiveKind()))
+ EmitOMPSimdInit(S, /*IsMonotonic=*/true);
+ // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
+ // When no chunk_size is specified, the iteration space is divided into
+ // chunks that are approximately equal in size, and at most one chunk is
+ // distributed to each thread. Note that the size of the chunks is
+ // unspecified in this case.
+ RT.emitForStaticInit(*this, S.getLocStart(), ScheduleKind,
+ IVSize, IVSigned, Ordered,
+ IL.getAddress(), LB.getAddress(),
+ UB.getAddress(), ST.getAddress());
+ auto LoopExit =
+ getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
+ // UB = min(UB, GlobalUB);
+ EmitIgnoredExpr(S.getEnsureUpperBound());
+ // IV = LB;
+ EmitIgnoredExpr(S.getInit());
+ // while (idx <= UB) { BODY; ++idx; }
+ EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
+ S.getInc(),
+ [&S, LoopExit](CodeGenFunction &CGF) {
+ CGF.EmitOMPLoopBody(S, LoopExit);
+ CGF.EmitStopPoint(&S);
+ },
+ [](CodeGenFunction &) {});
+ EmitBlock(LoopExit.getBlock());
+ // Tell the runtime we are done.
+ RT.emitForStaticFinish(*this, S.getLocStart());
+ } else {
+ const bool IsMonotonic = Ordered ||
+ ScheduleKind == OMPC_SCHEDULE_static ||
+ ScheduleKind == OMPC_SCHEDULE_unknown ||
+ M1 == OMPC_SCHEDULE_MODIFIER_monotonic ||
+ M2 == OMPC_SCHEDULE_MODIFIER_monotonic;
+ // Emit the outer loop, which requests its work chunk [LB..UB] from
+ // runtime and runs the inner loop to process it.
+ EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered,
+ LB.getAddress(), UB.getAddress(), ST.getAddress(),
+ IL.getAddress(), Chunk);
+ }
+ EmitOMPReductionClauseFinal(S);
+ // Emit final copy of the lastprivate variables if IsLastIter != 0.
+ if (HasLastprivateClause)
+ EmitOMPLastprivateClauseFinal(
+ S, Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getLocStart())));
+ }
+ if (isOpenMPSimdDirective(S.getDirectiveKind())) {
+ EmitOMPSimdFinal(S);
+ }
+ // We're now done with the loop, so jump to the continuation block.
+ if (ContBlock) {
+ EmitBranch(ContBlock);
+ EmitBlock(ContBlock, true);
+ }
+ }
+ return HasLastprivateClause;
+}
+
+void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
+ LexicalScope Scope(*this, S.getSourceRange());
+ bool HasLastprivates = false;
+ auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF) {
+ HasLastprivates = CGF.EmitOMPWorksharingLoop(S);
+ };
+ CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen,
+ S.hasCancel());
+
+ // Emit an implicit barrier at the end.
+ if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) {
+ CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
+ }
+}
+
+void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
+ LexicalScope Scope(*this, S.getSourceRange());
+ bool HasLastprivates = false;
+ auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF) {
+ HasLastprivates = CGF.EmitOMPWorksharingLoop(S);
+ };
+ CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
+
+ // Emit an implicit barrier at the end.
+ if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) {
+ CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
+ }
+}
+
+static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty,
+ const Twine &Name,
+ llvm::Value *Init = nullptr) {
+ auto LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty);
+ if (Init)
+ CGF.EmitScalarInit(Init, LVal);
+ return LVal;
+}
+
+OpenMPDirectiveKind
+CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
+ auto *Stmt = cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt();
+ auto *CS = dyn_cast<CompoundStmt>(Stmt);
+ if (CS && CS->size() > 1) {
+ bool HasLastprivates = false;
+ auto &&CodeGen = [&S, CS, &HasLastprivates](CodeGenFunction &CGF) {
+ auto &C = CGF.CGM.getContext();
+ auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
+ // Emit helper vars inits.
+ LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.",
+ CGF.Builder.getInt32(0));
+ auto *GlobalUBVal = CGF.Builder.getInt32(CS->size() - 1);
+ LValue UB =
+ createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal);
+ LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.",
+ CGF.Builder.getInt32(1));
+ LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.",
+ CGF.Builder.getInt32(0));
+ // Loop counter.
+ LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv.");
+ OpaqueValueExpr IVRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
+ CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV);
+ OpaqueValueExpr UBRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
+ CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB);
+ // Generate condition for loop.
+ BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue,
+ OK_Ordinary, S.getLocStart(),
+ /*fpContractable=*/false);
+ // Increment for loop counter.
+ UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue,
+ OK_Ordinary, S.getLocStart());
+ auto BodyGen = [CS, &S, &IV](CodeGenFunction &CGF) {
+ // Iterate through all sections and emit a switch construct:
+ // switch (IV) {
+ // case 0:
+ // <SectionStmt[0]>;
+ // break;
+ // ...
+ // case <NumSection> - 1:
+ // <SectionStmt[<NumSection> - 1]>;
+ // break;
+ // }
+ // .omp.sections.exit:
+ auto *ExitBB = CGF.createBasicBlock(".omp.sections.exit");
+ auto *SwitchStmt = CGF.Builder.CreateSwitch(
+ CGF.EmitLoadOfLValue(IV, S.getLocStart()).getScalarVal(), ExitBB,
+ CS->size());
+ unsigned CaseNumber = 0;
+ for (auto *SubStmt : CS->children()) {
+ auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
+ CGF.EmitBlock(CaseBB);
+ SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB);
+ CGF.EmitStmt(SubStmt);
+ CGF.EmitBranch(ExitBB);
+ ++CaseNumber;
+ }
+ CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
+ };
+
+ CodeGenFunction::OMPPrivateScope LoopScope(CGF);
+ if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) {
+ // Emit implicit barrier to synchronize threads and avoid data races on
+ // initialization of firstprivate variables.
+ CGF.CGM.getOpenMPRuntime().emitBarrierCall(
+ CGF, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
+ /*ForceSimpleCall=*/true);
+ }
+ CGF.EmitOMPPrivateClause(S, LoopScope);
+ HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
+ CGF.EmitOMPReductionClauseInit(S, LoopScope);
+ (void)LoopScope.Privatize();
+
+ // Emit static non-chunked loop.
+ CGF.CGM.getOpenMPRuntime().emitForStaticInit(
+ CGF, S.getLocStart(), OMPC_SCHEDULE_static, /*IVSize=*/32,
+ /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(),
+ LB.getAddress(), UB.getAddress(), ST.getAddress());
+ // UB = min(UB, GlobalUB);
+ auto *UBVal = CGF.EmitLoadOfScalar(UB, S.getLocStart());
+ auto *MinUBGlobalUB = CGF.Builder.CreateSelect(
+ CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal);
+ CGF.EmitStoreOfScalar(MinUBGlobalUB, UB);
+ // IV = LB;
+ CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getLocStart()), IV);
+ // while (idx <= UB) { BODY; ++idx; }
+ CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen,
+ [](CodeGenFunction &) {});
+ // Tell the runtime we are done.
+ CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocStart());
+ CGF.EmitOMPReductionClauseFinal(S);
+
+ // Emit final copy of the lastprivate variables if IsLastIter != 0.
+ if (HasLastprivates)
+ CGF.EmitOMPLastprivateClauseFinal(
+ S, CGF.Builder.CreateIsNotNull(
+ CGF.EmitLoadOfScalar(IL, S.getLocStart())));
+ };
+
+ bool HasCancel = false;
+ if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S))
+ HasCancel = OSD->hasCancel();
+ else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S))
+ HasCancel = OPSD->hasCancel();
+ CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen,
+ HasCancel);
+ // Emit barrier for lastprivates only if 'sections' directive has 'nowait'
+ // clause. Otherwise the barrier will be generated by the codegen for the
+ // directive.
+ if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) {
+ // Emit implicit barrier to synchronize threads and avoid data races on
+ // initialization of firstprivate variables.
+ CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
+ OMPD_unknown);
+ }
+ return OMPD_sections;
+ }
+ // If only one section is found - no need to generate loop, emit as a single
+ // region.
+ bool HasFirstprivates;
+ // No need to generate reductions for sections with single section region, we
+ // can use original shared variables for all operations.
+ bool HasReductions = S.hasClausesOfKind<OMPReductionClause>();
+ // No need to generate lastprivates for sections with single section region,
+ // we can use original shared variable for all calculations with barrier at
+ // the end of the sections.
+ bool HasLastprivates = S.hasClausesOfKind<OMPLastprivateClause>();
+ auto &&CodeGen = [Stmt, &S, &HasFirstprivates](CodeGenFunction &CGF) {
+ CodeGenFunction::OMPPrivateScope SingleScope(CGF);
+ HasFirstprivates = CGF.EmitOMPFirstprivateClause(S, SingleScope);
+ CGF.EmitOMPPrivateClause(S, SingleScope);
+ (void)SingleScope.Privatize();
+
+ CGF.EmitStmt(Stmt);
+ };
+ CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(),
+ llvm::None, llvm::None, llvm::None,
+ llvm::None);
+ // Emit barrier for firstprivates, lastprivates or reductions only if
+ // 'sections' directive has 'nowait' clause. Otherwise the barrier will be
+ // generated by the codegen for the directive.
+ if ((HasFirstprivates || HasLastprivates || HasReductions) &&
+ S.getSingleClause<OMPNowaitClause>()) {
+ // Emit implicit barrier to synchronize threads and avoid data races on
+ // initialization of firstprivate variables.
+ CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_unknown,
+ /*EmitChecks=*/false,
+ /*ForceSimpleCall=*/true);
+ }
+ return OMPD_single;
+}
+
+void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
+ LexicalScope Scope(*this, S.getSourceRange());
+ OpenMPDirectiveKind EmittedAs = EmitSections(S);
+ // Emit an implicit barrier at the end.
+ if (!S.getSingleClause<OMPNowaitClause>()) {
+ CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), EmittedAs);
+ }
+}
+
+void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) {
+ LexicalScope Scope(*this, S.getSourceRange());
+ auto &&CodeGen = [&S](CodeGenFunction &CGF) {
+ CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ };
+ CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_section, CodeGen,
+ S.hasCancel());
+}
+
+void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
+ llvm::SmallVector<const Expr *, 8> CopyprivateVars;
+ llvm::SmallVector<const Expr *, 8> DestExprs;
+ llvm::SmallVector<const Expr *, 8> SrcExprs;
+ llvm::SmallVector<const Expr *, 8> AssignmentOps;
+ // Check if there are any 'copyprivate' clauses associated with this
+ // 'single'
+ // construct.
+ // Build a list of copyprivate variables along with helper expressions
+ // (<source>, <destination>, <destination>=<source> expressions)
+ for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) {
+ CopyprivateVars.append(C->varlists().begin(), C->varlists().end());
+ DestExprs.append(C->destination_exprs().begin(),
+ C->destination_exprs().end());
+ SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end());
+ AssignmentOps.append(C->assignment_ops().begin(),
+ C->assignment_ops().end());
+ }
+ LexicalScope Scope(*this, S.getSourceRange());
+ // Emit code for 'single' region along with 'copyprivate' clauses
+ bool HasFirstprivates;
+ auto &&CodeGen = [&S, &HasFirstprivates](CodeGenFunction &CGF) {
+ CodeGenFunction::OMPPrivateScope SingleScope(CGF);
+ HasFirstprivates = CGF.EmitOMPFirstprivateClause(S, SingleScope);
+ CGF.EmitOMPPrivateClause(S, SingleScope);
+ (void)SingleScope.Privatize();
+
+ CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ };
+ CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(),
+ CopyprivateVars, DestExprs, SrcExprs,
+ AssignmentOps);
+ // Emit an implicit barrier at the end (to avoid data race on firstprivate
+ // init or if no 'nowait' clause was specified and no 'copyprivate' clause).
+ if ((!S.getSingleClause<OMPNowaitClause>() || HasFirstprivates) &&
+ CopyprivateVars.empty()) {
+ CGM.getOpenMPRuntime().emitBarrierCall(
+ *this, S.getLocStart(),
+ S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single);
+ }
+}
+
+void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
+ LexicalScope Scope(*this, S.getSourceRange());
+ auto &&CodeGen = [&S](CodeGenFunction &CGF) {
+ CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ };
+ CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getLocStart());
+}
+
+void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
+ LexicalScope Scope(*this, S.getSourceRange());
+ auto &&CodeGen = [&S](CodeGenFunction &CGF) {
+ CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ };
+ Expr *Hint = nullptr;
+ if (auto *HintClause = S.getSingleClause<OMPHintClause>())
+ Hint = HintClause->getHint();
+ CGM.getOpenMPRuntime().emitCriticalRegion(*this,
+ S.getDirectiveName().getAsString(),
+ CodeGen, S.getLocStart(), Hint);
+}
+
+void CodeGenFunction::EmitOMPParallelForDirective(
+ const OMPParallelForDirective &S) {
+ // Emit directive as a combined directive that consists of two implicit
+ // directives: 'parallel' with 'for' directive.
+ LexicalScope Scope(*this, S.getSourceRange());
+ (void)emitScheduleClause(*this, S, /*OuterRegion=*/true);
+ auto &&CodeGen = [&S](CodeGenFunction &CGF) {
+ CGF.EmitOMPWorksharingLoop(S);
+ };
+ emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen);
+}
+
+void CodeGenFunction::EmitOMPParallelForSimdDirective(
+ const OMPParallelForSimdDirective &S) {
+ // Emit directive as a combined directive that consists of two implicit
+ // directives: 'parallel' with 'for' directive.
+ LexicalScope Scope(*this, S.getSourceRange());
+ (void)emitScheduleClause(*this, S, /*OuterRegion=*/true);
+ auto &&CodeGen = [&S](CodeGenFunction &CGF) {
+ CGF.EmitOMPWorksharingLoop(S);
+ };
+ emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen);
+}
+
+void CodeGenFunction::EmitOMPParallelSectionsDirective(
+ const OMPParallelSectionsDirective &S) {
+ // Emit directive as a combined directive that consists of two implicit
+ // directives: 'parallel' with 'sections' directive.
+ LexicalScope Scope(*this, S.getSourceRange());
+ auto &&CodeGen = [&S](CodeGenFunction &CGF) {
+ (void)CGF.EmitSections(S);
+ };
+ emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen);
+}
+
+void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
+ // Emit outlined function for task construct.
+ LexicalScope Scope(*this, S.getSourceRange());
+ auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
+ auto CapturedStruct = GenerateCapturedStmtArgument(*CS);
+ auto *I = CS->getCapturedDecl()->param_begin();
+ auto *PartId = std::next(I);
+ // The first function argument for tasks is a thread id, the second one is a
+ // part id (0 for tied tasks, >=0 for untied task).
+ llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
+ // Get list of private variables.
+ llvm::SmallVector<const Expr *, 8> PrivateVars;
+ llvm::SmallVector<const Expr *, 8> PrivateCopies;
+ for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) {
+ auto IRef = C->varlist_begin();
+ for (auto *IInit : C->private_copies()) {
+ auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
+ if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
+ PrivateVars.push_back(*IRef);
+ PrivateCopies.push_back(IInit);
+ }
+ ++IRef;
+ }
+ }
+ EmittedAsPrivate.clear();
+ // Get list of firstprivate variables.
+ llvm::SmallVector<const Expr *, 8> FirstprivateVars;
+ llvm::SmallVector<const Expr *, 8> FirstprivateCopies;
+ llvm::SmallVector<const Expr *, 8> FirstprivateInits;
+ for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
+ auto IRef = C->varlist_begin();
+ auto IElemInitRef = C->inits().begin();
+ for (auto *IInit : C->private_copies()) {
+ auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
+ if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
+ FirstprivateVars.push_back(*IRef);
+ FirstprivateCopies.push_back(IInit);
+ FirstprivateInits.push_back(*IElemInitRef);
+ }
+ ++IRef, ++IElemInitRef;
+ }
+ }
+ // Build list of dependences.
+ llvm::SmallVector<std::pair<OpenMPDependClauseKind, const Expr *>, 8>
+ Dependences;
+ for (const auto *C : S.getClausesOfKind<OMPDependClause>()) {
+ for (auto *IRef : C->varlists()) {
+ Dependences.push_back(std::make_pair(C->getDependencyKind(), IRef));
+ }
+ }
+ auto &&CodeGen = [PartId, &S, &PrivateVars, &FirstprivateVars](
+ CodeGenFunction &CGF) {
+ // Set proper addresses for generated private copies.
+ auto *CS = cast<CapturedStmt>(S.getAssociatedStmt());
+ OMPPrivateScope Scope(CGF);
+ if (!PrivateVars.empty() || !FirstprivateVars.empty()) {
+ auto *CopyFn = CGF.Builder.CreateLoad(
+ CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(3)));
+ auto *PrivatesPtr = CGF.Builder.CreateLoad(
+ CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(2)));
+ // Map privates.
+ llvm::SmallVector<std::pair<const VarDecl *, Address>, 16>
+ PrivatePtrs;
+ llvm::SmallVector<llvm::Value *, 16> CallArgs;
+ CallArgs.push_back(PrivatesPtr);
+ for (auto *E : PrivateVars) {
+ auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
+ Address PrivatePtr =
+ CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()));
+ PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
+ CallArgs.push_back(PrivatePtr.getPointer());
+ }
+ for (auto *E : FirstprivateVars) {
+ auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
+ Address PrivatePtr =
+ CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()));
+ PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
+ CallArgs.push_back(PrivatePtr.getPointer());
+ }
+ CGF.EmitRuntimeCall(CopyFn, CallArgs);
+ for (auto &&Pair : PrivatePtrs) {
+ Address Replacement(CGF.Builder.CreateLoad(Pair.second),
+ CGF.getContext().getDeclAlign(Pair.first));
+ Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
+ }
+ }
+ (void)Scope.Privatize();
+ if (*PartId) {
+ // TODO: emit code for untied tasks.
+ }
+ CGF.EmitStmt(CS->getCapturedStmt());
+ };
+ auto OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
+ S, *I, OMPD_task, CodeGen);
+ // Check if we should emit tied or untied task.
+ bool Tied = !S.getSingleClause<OMPUntiedClause>();
+ // Check if the task is final
+ llvm::PointerIntPair<llvm::Value *, 1, bool> Final;
+ if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) {
+ // If the condition constant folds and can be elided, try to avoid emitting
+ // the condition and the dead arm of the if/else.
+ auto *Cond = Clause->getCondition();
+ bool CondConstant;
+ if (ConstantFoldsToSimpleInteger(Cond, CondConstant))
+ Final.setInt(CondConstant);
+ else
+ Final.setPointer(EvaluateExprAsBool(Cond));
+ } else {
+ // By default the task is not final.
+ Final.setInt(/*IntVal=*/false);
+ }
+ auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
+ const Expr *IfCond = nullptr;
+ for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
+ if (C->getNameModifier() == OMPD_unknown ||
+ C->getNameModifier() == OMPD_task) {
+ IfCond = C->getCondition();
+ break;
+ }
+ }
+ CGM.getOpenMPRuntime().emitTaskCall(
+ *this, S.getLocStart(), S, Tied, Final, OutlinedFn, SharedsTy,
+ CapturedStruct, IfCond, PrivateVars, PrivateCopies, FirstprivateVars,
+ FirstprivateCopies, FirstprivateInits, Dependences);
+}
+
+void CodeGenFunction::EmitOMPTaskyieldDirective(
+ const OMPTaskyieldDirective &S) {
+ CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getLocStart());
+}
+
+void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) {
+ CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_barrier);
+}
+
+void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) {
+ CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getLocStart());
+}
+
+void CodeGenFunction::EmitOMPTaskgroupDirective(
+ const OMPTaskgroupDirective &S) {
+ LexicalScope Scope(*this, S.getSourceRange());
+ auto &&CodeGen = [&S](CodeGenFunction &CGF) {
+ CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ };
+ CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getLocStart());
+}
+
+void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
+ CGM.getOpenMPRuntime().emitFlush(*this, [&]() -> ArrayRef<const Expr *> {
+ if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>()) {
+ return llvm::makeArrayRef(FlushClause->varlist_begin(),
+ FlushClause->varlist_end());
+ }
+ return llvm::None;
+ }(), S.getLocStart());
+}
+
+void CodeGenFunction::EmitOMPDistributeDirective(
+ const OMPDistributeDirective &S) {
+ llvm_unreachable("CodeGen for 'omp distribute' is not supported yet.");
+}
+
+static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM,
+ const CapturedStmt *S) {
+ CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
+ CodeGenFunction::CGCapturedStmtInfo CapStmtInfo;
+ CGF.CapturedStmtInfo = &CapStmtInfo;
+ auto *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S);
+ Fn->addFnAttr(llvm::Attribute::NoInline);
+ return Fn;
+}
+
+void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
+ if (!S.getAssociatedStmt())
+ return;
+ LexicalScope Scope(*this, S.getSourceRange());
+ auto *C = S.getSingleClause<OMPSIMDClause>();
+ auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF) {
+ if (C) {
+ auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
+ llvm::SmallVector<llvm::Value *, 16> CapturedVars;
+ CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
+ auto *OutlinedFn = emitOutlinedOrderedFunction(CGM, CS);
+ CGF.EmitNounwindRuntimeCall(OutlinedFn, CapturedVars);
+ } else {
+ CGF.EmitStmt(
+ cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ }
+ };
+ CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getLocStart(), !C);
+}
+
+static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val,
+ QualType SrcType, QualType DestType,
+ SourceLocation Loc) {
+ assert(CGF.hasScalarEvaluationKind(DestType) &&
+ "DestType must have scalar evaluation kind.");
+ assert(!Val.isAggregate() && "Must be a scalar or complex.");
+ return Val.isScalar()
+ ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestType,
+ Loc)
+ : CGF.EmitComplexToScalarConversion(Val.getComplexVal(), SrcType,
+ DestType, Loc);
+}
+
+static CodeGenFunction::ComplexPairTy
+convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType,
+ QualType DestType, SourceLocation Loc) {
+ assert(CGF.getEvaluationKind(DestType) == TEK_Complex &&
+ "DestType must have complex evaluation kind.");
+ CodeGenFunction::ComplexPairTy ComplexVal;
+ if (Val.isScalar()) {
+ // Convert the input element to the element type of the complex.
+ auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
+ auto ScalarVal = CGF.EmitScalarConversion(Val.getScalarVal(), SrcType,
+ DestElementType, Loc);
+ ComplexVal = CodeGenFunction::ComplexPairTy(
+ ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType()));
+ } else {
+ assert(Val.isComplex() && "Must be a scalar or complex.");
+ auto SrcElementType = SrcType->castAs<ComplexType>()->getElementType();
+ auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
+ ComplexVal.first = CGF.EmitScalarConversion(
+ Val.getComplexVal().first, SrcElementType, DestElementType, Loc);
+ ComplexVal.second = CGF.EmitScalarConversion(
+ Val.getComplexVal().second, SrcElementType, DestElementType, Loc);
+ }
+ return ComplexVal;
+}
+
+static void emitSimpleAtomicStore(CodeGenFunction &CGF, bool IsSeqCst,
+ LValue LVal, RValue RVal) {
+ if (LVal.isGlobalReg()) {
+ CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal);
+ } else {
+ CGF.EmitAtomicStore(RVal, LVal, IsSeqCst ? llvm::SequentiallyConsistent
+ : llvm::Monotonic,
+ LVal.isVolatile(), /*IsInit=*/false);
+ }
+}
+
+static void emitSimpleStore(CodeGenFunction &CGF, LValue LVal, RValue RVal,
+ QualType RValTy, SourceLocation Loc) {
+ switch (CGF.getEvaluationKind(LVal.getType())) {
+ case TEK_Scalar:
+ CGF.EmitStoreThroughLValue(RValue::get(convertToScalarValue(
+ CGF, RVal, RValTy, LVal.getType(), Loc)),
+ LVal);
+ break;
+ case TEK_Complex:
+ CGF.EmitStoreOfComplex(
+ convertToComplexValue(CGF, RVal, RValTy, LVal.getType(), Loc), LVal,
+ /*isInit=*/false);
+ break;
+ case TEK_Aggregate:
+ llvm_unreachable("Must be a scalar or complex.");
+ }
+}
+
+static void EmitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst,
+ const Expr *X, const Expr *V,
+ SourceLocation Loc) {
+ // v = x;
+ assert(V->isLValue() && "V of 'omp atomic read' is not lvalue");
+ assert(X->isLValue() && "X of 'omp atomic read' is not lvalue");
+ LValue XLValue = CGF.EmitLValue(X);
+ LValue VLValue = CGF.EmitLValue(V);
+ RValue Res = XLValue.isGlobalReg()
+ ? CGF.EmitLoadOfLValue(XLValue, Loc)
+ : CGF.EmitAtomicLoad(XLValue, Loc,
+ IsSeqCst ? llvm::SequentiallyConsistent
+ : llvm::Monotonic,
+ XLValue.isVolatile());
+ // OpenMP, 2.12.6, atomic Construct
+ // Any atomic construct with a seq_cst clause forces the atomically
+ // performed operation to include an implicit flush operation without a
+ // list.
+ if (IsSeqCst)
+ CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
+ emitSimpleStore(CGF, VLValue, Res, X->getType().getNonReferenceType(), Loc);
+}
+
+static void EmitOMPAtomicWriteExpr(CodeGenFunction &CGF, bool IsSeqCst,
+ const Expr *X, const Expr *E,
+ SourceLocation Loc) {
+ // x = expr;
+ assert(X->isLValue() && "X of 'omp atomic write' is not lvalue");
+ emitSimpleAtomicStore(CGF, IsSeqCst, CGF.EmitLValue(X), CGF.EmitAnyExpr(E));
+ // OpenMP, 2.12.6, atomic Construct
+ // Any atomic construct with a seq_cst clause forces the atomically
+ // performed operation to include an implicit flush operation without a
+ // list.
+ if (IsSeqCst)
+ CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
+}
+
+static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
+ RValue Update,
+ BinaryOperatorKind BO,
+ llvm::AtomicOrdering AO,
+ bool IsXLHSInRHSPart) {
+ auto &Context = CGF.CGM.getContext();
+ // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x'
+ // expression is simple and atomic is allowed for the given type for the
+ // target platform.
+ if (BO == BO_Comma || !Update.isScalar() ||
+ !Update.getScalarVal()->getType()->isIntegerTy() ||
+ !X.isSimple() || (!isa<llvm::ConstantInt>(Update.getScalarVal()) &&
+ (Update.getScalarVal()->getType() !=
+ X.getAddress().getElementType())) ||
+ !X.getAddress().getElementType()->isIntegerTy() ||
+ !Context.getTargetInfo().hasBuiltinAtomic(
+ Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment())))
+ return std::make_pair(false, RValue::get(nullptr));
+
+ llvm::AtomicRMWInst::BinOp RMWOp;
+ switch (BO) {
+ case BO_Add:
+ RMWOp = llvm::AtomicRMWInst::Add;
+ break;
+ case BO_Sub:
+ if (!IsXLHSInRHSPart)
+ return std::make_pair(false, RValue::get(nullptr));
+ RMWOp = llvm::AtomicRMWInst::Sub;
+ break;
+ case BO_And:
+ RMWOp = llvm::AtomicRMWInst::And;
+ break;
+ case BO_Or:
+ RMWOp = llvm::AtomicRMWInst::Or;
+ break;
+ case BO_Xor:
+ RMWOp = llvm::AtomicRMWInst::Xor;
+ break;
+ case BO_LT:
+ RMWOp = X.getType()->hasSignedIntegerRepresentation()
+ ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min
+ : llvm::AtomicRMWInst::Max)
+ : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin
+ : llvm::AtomicRMWInst::UMax);
+ break;
+ case BO_GT:
+ RMWOp = X.getType()->hasSignedIntegerRepresentation()
+ ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max
+ : llvm::AtomicRMWInst::Min)
+ : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax
+ : llvm::AtomicRMWInst::UMin);
+ break;
+ case BO_Assign:
+ RMWOp = llvm::AtomicRMWInst::Xchg;
+ break;
+ case BO_Mul:
+ case BO_Div:
+ case BO_Rem:
+ case BO_Shl:
+ case BO_Shr:
+ case BO_LAnd:
+ case BO_LOr:
+ return std::make_pair(false, RValue::get(nullptr));
+ case BO_PtrMemD:
+ case BO_PtrMemI:
+ case BO_LE:
+ case BO_GE:
+ case BO_EQ:
+ case BO_NE:
+ case BO_AddAssign:
+ case BO_SubAssign:
+ case BO_AndAssign:
+ case BO_OrAssign:
+ case BO_XorAssign:
+ case BO_MulAssign:
+ case BO_DivAssign:
+ case BO_RemAssign:
+ case BO_ShlAssign:
+ case BO_ShrAssign:
+ case BO_Comma:
+ llvm_unreachable("Unsupported atomic update operation");
+ }
+ auto *UpdateVal = Update.getScalarVal();
+ if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) {
+ UpdateVal = CGF.Builder.CreateIntCast(
+ IC, X.getAddress().getElementType(),
+ X.getType()->hasSignedIntegerRepresentation());
+ }
+ auto *Res = CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(), UpdateVal, AO);
+ return std::make_pair(true, RValue::get(Res));
+}
+
+std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr(
+ LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
+ llvm::AtomicOrdering AO, SourceLocation Loc,
+ const llvm::function_ref<RValue(RValue)> &CommonGen) {
+ // Update expressions are allowed to have the following forms:
+ // x binop= expr; -> xrval + expr;
+ // x++, ++x -> xrval + 1;
+ // x--, --x -> xrval - 1;
+ // x = x binop expr; -> xrval binop expr
+ // x = expr Op x; - > expr binop xrval;
+ auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart);
+ if (!Res.first) {
+ if (X.isGlobalReg()) {
+ // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop
+ // 'xrval'.
+ EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X);
+ } else {
+ // Perform compare-and-swap procedure.
+ EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified());
+ }
+ }
+ return Res;
+}
+
+static void EmitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst,
+ const Expr *X, const Expr *E,
+ const Expr *UE, bool IsXLHSInRHSPart,
+ SourceLocation Loc) {
+ assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
+ "Update expr in 'atomic update' must be a binary operator.");
+ auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
+ // Update expressions are allowed to have the following forms:
+ // x binop= expr; -> xrval + expr;
+ // x++, ++x -> xrval + 1;
+ // x--, --x -> xrval - 1;
+ // x = x binop expr; -> xrval binop expr
+ // x = expr Op x; - > expr binop xrval;
+ assert(X->isLValue() && "X of 'omp atomic update' is not lvalue");
+ LValue XLValue = CGF.EmitLValue(X);
+ RValue ExprRValue = CGF.EmitAnyExpr(E);
+ auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic;
+ auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
+ auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
+ auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
+ auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
+ auto Gen =
+ [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) -> RValue {
+ CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
+ CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
+ return CGF.EmitAnyExpr(UE);
+ };
+ (void)CGF.EmitOMPAtomicSimpleUpdateExpr(
+ XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
+ // OpenMP, 2.12.6, atomic Construct
+ // Any atomic construct with a seq_cst clause forces the atomically
+ // performed operation to include an implicit flush operation without a
+ // list.
+ if (IsSeqCst)
+ CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
+}
+
+static RValue convertToType(CodeGenFunction &CGF, RValue Value,
+ QualType SourceType, QualType ResType,
+ SourceLocation Loc) {
+ switch (CGF.getEvaluationKind(ResType)) {
+ case TEK_Scalar:
+ return RValue::get(
+ convertToScalarValue(CGF, Value, SourceType, ResType, Loc));
+ case TEK_Complex: {
+ auto Res = convertToComplexValue(CGF, Value, SourceType, ResType, Loc);
+ return RValue::getComplex(Res.first, Res.second);
+ }
+ case TEK_Aggregate:
+ break;
+ }
+ llvm_unreachable("Must be a scalar or complex.");
+}
+
+static void EmitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
+ bool IsPostfixUpdate, const Expr *V,
+ const Expr *X, const Expr *E,
+ const Expr *UE, bool IsXLHSInRHSPart,
+ SourceLocation Loc) {
+ assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue");
+ assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue");
+ RValue NewVVal;
+ LValue VLValue = CGF.EmitLValue(V);
+ LValue XLValue = CGF.EmitLValue(X);
+ RValue ExprRValue = CGF.EmitAnyExpr(E);
+ auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic;
+ QualType NewVValType;
+ if (UE) {
+ // 'x' is updated with some additional value.
+ assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
+ "Update expr in 'atomic capture' must be a binary operator.");
+ auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
+ // Update expressions are allowed to have the following forms:
+ // x binop= expr; -> xrval + expr;
+ // x++, ++x -> xrval + 1;
+ // x--, --x -> xrval - 1;
+ // x = x binop expr; -> xrval binop expr
+ // x = expr Op x; - > expr binop xrval;
+ auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
+ auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
+ auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
+ NewVValType = XRValExpr->getType();
+ auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
+ auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr,
+ IsSeqCst, IsPostfixUpdate](RValue XRValue) -> RValue {
+ CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
+ CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
+ RValue Res = CGF.EmitAnyExpr(UE);
+ NewVVal = IsPostfixUpdate ? XRValue : Res;
+ return Res;
+ };
+ auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
+ XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
+ if (Res.first) {
+ // 'atomicrmw' instruction was generated.
+ if (IsPostfixUpdate) {
+ // Use old value from 'atomicrmw'.
+ NewVVal = Res.second;
+ } else {
+ // 'atomicrmw' does not provide new value, so evaluate it using old
+ // value of 'x'.
+ CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
+ CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second);
+ NewVVal = CGF.EmitAnyExpr(UE);
+ }
+ }
+ } else {
+ // 'x' is simply rewritten with some 'expr'.
+ NewVValType = X->getType().getNonReferenceType();
+ ExprRValue = convertToType(CGF, ExprRValue, E->getType(),
+ X->getType().getNonReferenceType(), Loc);
+ auto &&Gen = [&CGF, &NewVVal, ExprRValue](RValue XRValue) -> RValue {
+ NewVVal = XRValue;
+ return ExprRValue;
+ };
+ // Try to perform atomicrmw xchg, otherwise simple exchange.
+ auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
+ XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO,
+ Loc, Gen);
+ if (Res.first) {
+ // 'atomicrmw' instruction was generated.
+ NewVVal = IsPostfixUpdate ? Res.second : ExprRValue;
+ }
+ }
+ // Emit post-update store to 'v' of old/new 'x' value.
+ emitSimpleStore(CGF, VLValue, NewVVal, NewVValType, Loc);
+ // OpenMP, 2.12.6, atomic Construct
+ // Any atomic construct with a seq_cst clause forces the atomically
+ // performed operation to include an implicit flush operation without a
+ // list.
+ if (IsSeqCst)
+ CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
+}
+
+static void EmitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
+ bool IsSeqCst, bool IsPostfixUpdate,
+ const Expr *X, const Expr *V, const Expr *E,
+ const Expr *UE, bool IsXLHSInRHSPart,
+ SourceLocation Loc) {
+ switch (Kind) {
+ case OMPC_read:
+ EmitOMPAtomicReadExpr(CGF, IsSeqCst, X, V, Loc);
+ break;
+ case OMPC_write:
+ EmitOMPAtomicWriteExpr(CGF, IsSeqCst, X, E, Loc);
+ break;
+ case OMPC_unknown:
+ case OMPC_update:
+ EmitOMPAtomicUpdateExpr(CGF, IsSeqCst, X, E, UE, IsXLHSInRHSPart, Loc);
+ break;
+ case OMPC_capture:
+ EmitOMPAtomicCaptureExpr(CGF, IsSeqCst, IsPostfixUpdate, V, X, E, UE,
+ IsXLHSInRHSPart, Loc);
+ break;
+ case OMPC_if:
+ case OMPC_final:
+ case OMPC_num_threads:
+ case OMPC_private:
+ case OMPC_firstprivate:
+ case OMPC_lastprivate:
+ case OMPC_reduction:
+ case OMPC_safelen:
+ case OMPC_simdlen:
+ case OMPC_collapse:
+ case OMPC_default:
+ case OMPC_seq_cst:
+ case OMPC_shared:
+ case OMPC_linear:
+ case OMPC_aligned:
+ case OMPC_copyin:
+ case OMPC_copyprivate:
+ case OMPC_flush:
+ case OMPC_proc_bind:
+ case OMPC_schedule:
+ case OMPC_ordered:
+ case OMPC_nowait:
+ case OMPC_untied:
+ case OMPC_threadprivate:
+ case OMPC_depend:
+ case OMPC_mergeable:
+ case OMPC_device:
+ case OMPC_threads:
+ case OMPC_simd:
+ case OMPC_map:
+ case OMPC_num_teams:
+ case OMPC_thread_limit:
+ case OMPC_priority:
+ case OMPC_grainsize:
+ case OMPC_nogroup:
+ case OMPC_num_tasks:
+ case OMPC_hint:
+ llvm_unreachable("Clause is not allowed in 'omp atomic'.");
+ }
+}
+
+void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
+ bool IsSeqCst = S.getSingleClause<OMPSeqCstClause>();
+ OpenMPClauseKind Kind = OMPC_unknown;
+ for (auto *C : S.clauses()) {
+ // Find first clause (skip seq_cst clause, if it is first).
+ if (C->getClauseKind() != OMPC_seq_cst) {
+ Kind = C->getClauseKind();
+ break;
+ }
+ }
+
+ const auto *CS =
+ S.getAssociatedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
+ if (const auto *EWC = dyn_cast<ExprWithCleanups>(CS)) {
+ enterFullExpression(EWC);
+ }
+ // Processing for statements under 'atomic capture'.
+ if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) {
+ for (const auto *C : Compound->body()) {
+ if (const auto *EWC = dyn_cast<ExprWithCleanups>(C)) {
+ enterFullExpression(EWC);
+ }
+ }
+ }
+
+ LexicalScope Scope(*this, S.getSourceRange());
+ auto &&CodeGen = [&S, Kind, IsSeqCst, CS](CodeGenFunction &CGF) {
+ CGF.EmitStopPoint(CS);
+ EmitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(),
+ S.getV(), S.getExpr(), S.getUpdateExpr(),
+ S.isXLHSInRHSPart(), S.getLocStart());
+ };
+ CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen);
+}
+
+void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) {
+ LexicalScope Scope(*this, S.getSourceRange());
+ const CapturedStmt &CS = *cast<CapturedStmt>(S.getAssociatedStmt());
+
+ llvm::SmallVector<llvm::Value *, 16> CapturedVars;
+ GenerateOpenMPCapturedVars(CS, CapturedVars);
+
+ llvm::Function *Fn = nullptr;
+ llvm::Constant *FnID = nullptr;
+
+ // Check if we have any if clause associated with the directive.
+ const Expr *IfCond = nullptr;
+
+ if (auto *C = S.getSingleClause<OMPIfClause>()) {
+ IfCond = C->getCondition();
+ }
+
+ // Check if we have any device clause associated with the directive.
+ const Expr *Device = nullptr;
+ if (auto *C = S.getSingleClause<OMPDeviceClause>()) {
+ Device = C->getDevice();
+ }
+
+ // Check if we have an if clause whose conditional always evaluates to false
+ // or if we do not have any targets specified. If so the target region is not
+ // an offload entry point.
+ bool IsOffloadEntry = true;
+ if (IfCond) {
+ bool Val;
+ if (ConstantFoldsToSimpleInteger(IfCond, Val) && !Val)
+ IsOffloadEntry = false;
+ }
+ if (CGM.getLangOpts().OMPTargetTriples.empty())
+ IsOffloadEntry = false;
+
+ assert(CurFuncDecl && "No parent declaration for target region!");
+ StringRef ParentName;
+ // In case we have Ctors/Dtors we use the complete type variant to produce
+ // the mangling of the device outlined kernel.
+ if (auto *D = dyn_cast<CXXConstructorDecl>(CurFuncDecl))
+ ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete));
+ else if (auto *D = dyn_cast<CXXDestructorDecl>(CurFuncDecl))
+ ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete));
+ else
+ ParentName =
+ CGM.getMangledName(GlobalDecl(cast<FunctionDecl>(CurFuncDecl)));
+
+ CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, ParentName, Fn, FnID,
+ IsOffloadEntry);
+
+ CGM.getOpenMPRuntime().emitTargetCall(*this, S, Fn, FnID, IfCond, Device,
+ CapturedVars);
+}
+
+void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &) {
+ llvm_unreachable("CodeGen for 'omp teams' is not supported yet.");
+}
+
+void CodeGenFunction::EmitOMPCancellationPointDirective(
+ const OMPCancellationPointDirective &S) {
+ CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getLocStart(),
+ S.getCancelRegion());
+}
+
+void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) {
+ const Expr *IfCond = nullptr;
+ for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
+ if (C->getNameModifier() == OMPD_unknown ||
+ C->getNameModifier() == OMPD_cancel) {
+ IfCond = C->getCondition();
+ break;
+ }
+ }
+ CGM.getOpenMPRuntime().emitCancelCall(*this, S.getLocStart(), IfCond,
+ S.getCancelRegion());
+}
+
+CodeGenFunction::JumpDest
+CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) {
+ if (Kind == OMPD_parallel || Kind == OMPD_task)
+ return ReturnBlock;
+ assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections ||
+ Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for);
+ return BreakContinueStack.back().BreakBlock;
+}
+
+// Generate the instructions for '#pragma omp target data' directive.
+void CodeGenFunction::EmitOMPTargetDataDirective(
+ const OMPTargetDataDirective &S) {
+ // emit the code inside the construct for now
+ auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
+ CGM.getOpenMPRuntime().emitInlinedDirective(
+ *this, OMPD_target_data,
+ [&CS](CodeGenFunction &CGF) { CGF.EmitStmt(CS->getCapturedStmt()); });
+}
+
+void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) {
+ // emit the code inside the construct for now
+ auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
+ CGM.getOpenMPRuntime().emitInlinedDirective(
+ *this, OMPD_taskloop,
+ [&CS](CodeGenFunction &CGF) { CGF.EmitStmt(CS->getCapturedStmt()); });
+}
+
+void CodeGenFunction::EmitOMPTaskLoopSimdDirective(
+ const OMPTaskLoopSimdDirective &S) {
+ // emit the code inside the construct for now
+ auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
+ CGM.getOpenMPRuntime().emitInlinedDirective(
+ *this, OMPD_taskloop_simd,
+ [&CS](CodeGenFunction &CGF) { CGF.EmitStmt(CS->getCapturedStmt()); });
+}
+
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp
new file mode 100644
index 0000000..4fb76710
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp
@@ -0,0 +1,178 @@
+//===--- CGVTT.cpp - Emit LLVM Code for C++ VTTs --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of VTTs (vtable tables).
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenModule.h"
+#include "CGCXXABI.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/VTTBuilder.h"
+using namespace clang;
+using namespace CodeGen;
+
+static llvm::GlobalVariable *
+GetAddrOfVTTVTable(CodeGenVTables &CGVT, CodeGenModule &CGM,
+ const CXXRecordDecl *MostDerivedClass,
+ const VTTVTable &VTable,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ llvm::DenseMap<BaseSubobject, uint64_t> &AddressPoints) {
+ if (VTable.getBase() == MostDerivedClass) {
+ assert(VTable.getBaseOffset().isZero() &&
+ "Most derived class vtable must have a zero offset!");
+ // This is a regular vtable.
+ return CGM.getCXXABI().getAddrOfVTable(MostDerivedClass, CharUnits());
+ }
+
+ return CGVT.GenerateConstructionVTable(MostDerivedClass,
+ VTable.getBaseSubobject(),
+ VTable.isVirtual(),
+ Linkage,
+ AddressPoints);
+}
+
+void
+CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD) {
+ VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/true);
+
+ llvm::Type *Int8PtrTy = CGM.Int8PtrTy, *Int64Ty = CGM.Int64Ty;
+ llvm::ArrayType *ArrayType =
+ llvm::ArrayType::get(Int8PtrTy, Builder.getVTTComponents().size());
+
+ SmallVector<llvm::GlobalVariable *, 8> VTables;
+ SmallVector<VTableAddressPointsMapTy, 8> VTableAddressPoints;
+ for (const VTTVTable *i = Builder.getVTTVTables().begin(),
+ *e = Builder.getVTTVTables().end(); i != e; ++i) {
+ VTableAddressPoints.push_back(VTableAddressPointsMapTy());
+ VTables.push_back(GetAddrOfVTTVTable(*this, CGM, RD, *i, Linkage,
+ VTableAddressPoints.back()));
+ }
+
+ SmallVector<llvm::Constant *, 8> VTTComponents;
+ for (const VTTComponent *i = Builder.getVTTComponents().begin(),
+ *e = Builder.getVTTComponents().end(); i != e; ++i) {
+ const VTTVTable &VTTVT = Builder.getVTTVTables()[i->VTableIndex];
+ llvm::GlobalVariable *VTable = VTables[i->VTableIndex];
+ uint64_t AddressPoint;
+ if (VTTVT.getBase() == RD) {
+ // Just get the address point for the regular vtable.
+ AddressPoint =
+ getItaniumVTableContext().getVTableLayout(RD).getAddressPoint(
+ i->VTableBase);
+ assert(AddressPoint != 0 && "Did not find vtable address point!");
+ } else {
+ AddressPoint = VTableAddressPoints[i->VTableIndex].lookup(i->VTableBase);
+ assert(AddressPoint != 0 && "Did not find ctor vtable address point!");
+ }
+
+ llvm::Value *Idxs[] = {
+ llvm::ConstantInt::get(Int64Ty, 0),
+ llvm::ConstantInt::get(Int64Ty, AddressPoint)
+ };
+
+ llvm::Constant *Init = llvm::ConstantExpr::getInBoundsGetElementPtr(
+ VTable->getValueType(), VTable, Idxs);
+
+ Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy);
+
+ VTTComponents.push_back(Init);
+ }
+
+ llvm::Constant *Init = llvm::ConstantArray::get(ArrayType, VTTComponents);
+
+ VTT->setInitializer(Init);
+
+ // Set the correct linkage.
+ VTT->setLinkage(Linkage);
+
+ if (CGM.supportsCOMDAT() && VTT->isWeakForLinker())
+ VTT->setComdat(CGM.getModule().getOrInsertComdat(VTT->getName()));
+
+ // Set the right visibility.
+ CGM.setGlobalVisibility(VTT, RD);
+}
+
+llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTT(const CXXRecordDecl *RD) {
+ assert(RD->getNumVBases() && "Only classes with virtual bases need a VTT");
+
+ SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ cast<ItaniumMangleContext>(CGM.getCXXABI().getMangleContext())
+ .mangleCXXVTT(RD, Out);
+ StringRef Name = OutName.str();
+
+ // This will also defer the definition of the VTT.
+ (void) CGM.getCXXABI().getAddrOfVTable(RD, CharUnits());
+
+ VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/false);
+
+ llvm::ArrayType *ArrayType =
+ llvm::ArrayType::get(CGM.Int8PtrTy, Builder.getVTTComponents().size());
+
+ llvm::GlobalVariable *GV =
+ CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType,
+ llvm::GlobalValue::ExternalLinkage);
+ GV->setUnnamedAddr(true);
+ return GV;
+}
+
+uint64_t CodeGenVTables::getSubVTTIndex(const CXXRecordDecl *RD,
+ BaseSubobject Base) {
+ BaseSubobjectPairTy ClassSubobjectPair(RD, Base);
+
+ SubVTTIndiciesMapTy::iterator I = SubVTTIndicies.find(ClassSubobjectPair);
+ if (I != SubVTTIndicies.end())
+ return I->second;
+
+ VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/false);
+
+ for (llvm::DenseMap<BaseSubobject, uint64_t>::const_iterator I =
+ Builder.getSubVTTIndicies().begin(),
+ E = Builder.getSubVTTIndicies().end(); I != E; ++I) {
+ // Insert all indices.
+ BaseSubobjectPairTy ClassSubobjectPair(RD, I->first);
+
+ SubVTTIndicies.insert(std::make_pair(ClassSubobjectPair, I->second));
+ }
+
+ I = SubVTTIndicies.find(ClassSubobjectPair);
+ assert(I != SubVTTIndicies.end() && "Did not find index!");
+
+ return I->second;
+}
+
+uint64_t
+CodeGenVTables::getSecondaryVirtualPointerIndex(const CXXRecordDecl *RD,
+ BaseSubobject Base) {
+ SecondaryVirtualPointerIndicesMapTy::iterator I =
+ SecondaryVirtualPointerIndices.find(std::make_pair(RD, Base));
+
+ if (I != SecondaryVirtualPointerIndices.end())
+ return I->second;
+
+ VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/false);
+
+ // Insert all secondary vpointer indices.
+ for (llvm::DenseMap<BaseSubobject, uint64_t>::const_iterator I =
+ Builder.getSecondaryVirtualPointerIndices().begin(),
+ E = Builder.getSecondaryVirtualPointerIndices().end(); I != E; ++I) {
+ std::pair<const CXXRecordDecl *, BaseSubobject> Pair =
+ std::make_pair(RD, I->first);
+
+ SecondaryVirtualPointerIndices.insert(std::make_pair(Pair, I->second));
+ }
+
+ I = SecondaryVirtualPointerIndices.find(std::make_pair(RD, Base));
+ assert(I != SecondaryVirtualPointerIndices.end() && "Did not find index!");
+
+ return I->second;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp
new file mode 100644
index 0000000..a40aab2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp
@@ -0,0 +1,958 @@
+//===--- CGVTables.cpp - Emit LLVM Code for C++ vtables -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of virtual tables.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CGCXXABI.h"
+#include "CodeGenModule.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/CodeGen/CGFunctionInfo.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Transforms/Utils/Cloning.h"
+#include <algorithm>
+#include <cstdio>
+
+using namespace clang;
+using namespace CodeGen;
+
+CodeGenVTables::CodeGenVTables(CodeGenModule &CGM)
+ : CGM(CGM), VTContext(CGM.getContext().getVTableContext()) {}
+
+llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
+ const ThunkInfo &Thunk) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+
+ // Compute the mangled name.
+ SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
+ if (const CXXDestructorDecl* DD = dyn_cast<CXXDestructorDecl>(MD))
+ getCXXABI().getMangleContext().mangleCXXDtorThunk(DD, GD.getDtorType(),
+ Thunk.This, Out);
+ else
+ getCXXABI().getMangleContext().mangleThunk(MD, Thunk, Out);
+
+ llvm::Type *Ty = getTypes().GetFunctionTypeForVTable(GD);
+ return GetOrCreateLLVMFunction(Name, Ty, GD, /*ForVTable=*/true,
+ /*DontDefer=*/true, /*IsThunk=*/true);
+}
+
+static void setThunkVisibility(CodeGenModule &CGM, const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk, llvm::Function *Fn) {
+ CGM.setGlobalVisibility(Fn, MD);
+}
+
+static void setThunkProperties(CodeGenModule &CGM, const ThunkInfo &Thunk,
+ llvm::Function *ThunkFn, bool ForVTable,
+ GlobalDecl GD) {
+ CGM.setFunctionLinkage(GD, ThunkFn);
+ CGM.getCXXABI().setThunkLinkage(ThunkFn, ForVTable, GD,
+ !Thunk.Return.isEmpty());
+
+ // Set the right visibility.
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ setThunkVisibility(CGM, MD, Thunk, ThunkFn);
+
+ if (CGM.supportsCOMDAT() && ThunkFn->isWeakForLinker())
+ ThunkFn->setComdat(CGM.getModule().getOrInsertComdat(ThunkFn->getName()));
+}
+
+#ifndef NDEBUG
+static bool similar(const ABIArgInfo &infoL, CanQualType typeL,
+ const ABIArgInfo &infoR, CanQualType typeR) {
+ return (infoL.getKind() == infoR.getKind() &&
+ (typeL == typeR ||
+ (isa<PointerType>(typeL) && isa<PointerType>(typeR)) ||
+ (isa<ReferenceType>(typeL) && isa<ReferenceType>(typeR))));
+}
+#endif
+
+static RValue PerformReturnAdjustment(CodeGenFunction &CGF,
+ QualType ResultType, RValue RV,
+ const ThunkInfo &Thunk) {
+ // Emit the return adjustment.
+ bool NullCheckValue = !ResultType->isReferenceType();
+
+ llvm::BasicBlock *AdjustNull = nullptr;
+ llvm::BasicBlock *AdjustNotNull = nullptr;
+ llvm::BasicBlock *AdjustEnd = nullptr;
+
+ llvm::Value *ReturnValue = RV.getScalarVal();
+
+ if (NullCheckValue) {
+ AdjustNull = CGF.createBasicBlock("adjust.null");
+ AdjustNotNull = CGF.createBasicBlock("adjust.notnull");
+ AdjustEnd = CGF.createBasicBlock("adjust.end");
+
+ llvm::Value *IsNull = CGF.Builder.CreateIsNull(ReturnValue);
+ CGF.Builder.CreateCondBr(IsNull, AdjustNull, AdjustNotNull);
+ CGF.EmitBlock(AdjustNotNull);
+ }
+
+ auto ClassDecl = ResultType->getPointeeType()->getAsCXXRecordDecl();
+ auto ClassAlign = CGF.CGM.getClassPointerAlignment(ClassDecl);
+ ReturnValue = CGF.CGM.getCXXABI().performReturnAdjustment(CGF,
+ Address(ReturnValue, ClassAlign),
+ Thunk.Return);
+
+ if (NullCheckValue) {
+ CGF.Builder.CreateBr(AdjustEnd);
+ CGF.EmitBlock(AdjustNull);
+ CGF.Builder.CreateBr(AdjustEnd);
+ CGF.EmitBlock(AdjustEnd);
+
+ llvm::PHINode *PHI = CGF.Builder.CreatePHI(ReturnValue->getType(), 2);
+ PHI->addIncoming(ReturnValue, AdjustNotNull);
+ PHI->addIncoming(llvm::Constant::getNullValue(ReturnValue->getType()),
+ AdjustNull);
+ ReturnValue = PHI;
+ }
+
+ return RValue::get(ReturnValue);
+}
+
+// This function does roughly the same thing as GenerateThunk, but in a
+// very different way, so that va_start and va_end work correctly.
+// FIXME: This function assumes "this" is the first non-sret LLVM argument of
+// a function, and that there is an alloca built in the entry block
+// for all accesses to "this".
+// FIXME: This function assumes there is only one "ret" statement per function.
+// FIXME: Cloning isn't correct in the presence of indirect goto!
+// FIXME: This implementation of thunks bloats codesize by duplicating the
+// function definition. There are alternatives:
+// 1. Add some sort of stub support to LLVM for cases where we can
+// do a this adjustment, then a sibcall.
+// 2. We could transform the definition to take a va_list instead of an
+// actual variable argument list, then have the thunks (including a
+// no-op thunk for the regular definition) call va_start/va_end.
+// There's a bit of per-call overhead for this solution, but it's
+// better for codesize if the definition is long.
+llvm::Function *
+CodeGenFunction::GenerateVarArgsThunk(llvm::Function *Fn,
+ const CGFunctionInfo &FnInfo,
+ GlobalDecl GD, const ThunkInfo &Thunk) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ QualType ResultType = FPT->getReturnType();
+
+ // Get the original function
+ assert(FnInfo.isVariadic());
+ llvm::Type *Ty = CGM.getTypes().GetFunctionType(FnInfo);
+ llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
+ llvm::Function *BaseFn = cast<llvm::Function>(Callee);
+
+ // Clone to thunk.
+ llvm::ValueToValueMapTy VMap;
+ llvm::Function *NewFn = llvm::CloneFunction(BaseFn, VMap,
+ /*ModuleLevelChanges=*/false);
+ CGM.getModule().getFunctionList().push_back(NewFn);
+ Fn->replaceAllUsesWith(NewFn);
+ NewFn->takeName(Fn);
+ Fn->eraseFromParent();
+ Fn = NewFn;
+
+ // "Initialize" CGF (minimally).
+ CurFn = Fn;
+
+ // Get the "this" value
+ llvm::Function::arg_iterator AI = Fn->arg_begin();
+ if (CGM.ReturnTypeUsesSRet(FnInfo))
+ ++AI;
+
+ // Find the first store of "this", which will be to the alloca associated
+ // with "this".
+ Address ThisPtr(&*AI, CGM.getClassPointerAlignment(MD->getParent()));
+ llvm::BasicBlock *EntryBB = &Fn->front();
+ llvm::BasicBlock::iterator ThisStore =
+ std::find_if(EntryBB->begin(), EntryBB->end(), [&](llvm::Instruction &I) {
+ return isa<llvm::StoreInst>(I) &&
+ I.getOperand(0) == ThisPtr.getPointer();
+ });
+ assert(ThisStore != EntryBB->end() &&
+ "Store of this should be in entry block?");
+ // Adjust "this", if necessary.
+ Builder.SetInsertPoint(&*ThisStore);
+ llvm::Value *AdjustedThisPtr =
+ CGM.getCXXABI().performThisAdjustment(*this, ThisPtr, Thunk.This);
+ ThisStore->setOperand(0, AdjustedThisPtr);
+
+ if (!Thunk.Return.isEmpty()) {
+ // Fix up the returned value, if necessary.
+ for (llvm::BasicBlock &BB : *Fn) {
+ llvm::Instruction *T = BB.getTerminator();
+ if (isa<llvm::ReturnInst>(T)) {
+ RValue RV = RValue::get(T->getOperand(0));
+ T->eraseFromParent();
+ Builder.SetInsertPoint(&BB);
+ RV = PerformReturnAdjustment(*this, ResultType, RV, Thunk);
+ Builder.CreateRet(RV.getScalarVal());
+ break;
+ }
+ }
+ }
+
+ return Fn;
+}
+
+void CodeGenFunction::StartThunk(llvm::Function *Fn, GlobalDecl GD,
+ const CGFunctionInfo &FnInfo) {
+ assert(!CurGD.getDecl() && "CurGD was already set!");
+ CurGD = GD;
+ CurFuncIsThunk = true;
+
+ // Build FunctionArgs.
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ QualType ThisType = MD->getThisType(getContext());
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ QualType ResultType = CGM.getCXXABI().HasThisReturn(GD)
+ ? ThisType
+ : CGM.getCXXABI().hasMostDerivedReturn(GD)
+ ? CGM.getContext().VoidPtrTy
+ : FPT->getReturnType();
+ FunctionArgList FunctionArgs;
+
+ // Create the implicit 'this' parameter declaration.
+ CGM.getCXXABI().buildThisParam(*this, FunctionArgs);
+
+ // Add the rest of the parameters.
+ FunctionArgs.append(MD->param_begin(), MD->param_end());
+
+ if (isa<CXXDestructorDecl>(MD))
+ CGM.getCXXABI().addImplicitStructorParams(*this, ResultType, FunctionArgs);
+
+ // Start defining the function.
+ StartFunction(GlobalDecl(), ResultType, Fn, FnInfo, FunctionArgs,
+ MD->getLocation(), MD->getLocation());
+
+ // Since we didn't pass a GlobalDecl to StartFunction, do this ourselves.
+ CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
+ CXXThisValue = CXXABIThisValue;
+ CurCodeDecl = MD;
+ CurFuncDecl = MD;
+}
+
+void CodeGenFunction::FinishThunk() {
+ // Clear these to restore the invariants expected by
+ // StartFunction/FinishFunction.
+ CurCodeDecl = nullptr;
+ CurFuncDecl = nullptr;
+
+ FinishFunction();
+}
+
+void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Value *Callee,
+ const ThunkInfo *Thunk) {
+ assert(isa<CXXMethodDecl>(CurGD.getDecl()) &&
+ "Please use a new CGF for this thunk");
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(CurGD.getDecl());
+
+ // Adjust the 'this' pointer if necessary
+ llvm::Value *AdjustedThisPtr =
+ Thunk ? CGM.getCXXABI().performThisAdjustment(
+ *this, LoadCXXThisAddress(), Thunk->This)
+ : LoadCXXThis();
+
+ if (CurFnInfo->usesInAlloca()) {
+ // We don't handle return adjusting thunks, because they require us to call
+ // the copy constructor. For now, fall through and pretend the return
+ // adjustment was empty so we don't crash.
+ if (Thunk && !Thunk->Return.isEmpty()) {
+ CGM.ErrorUnsupported(
+ MD, "non-trivial argument copy for return-adjusting thunk");
+ }
+ EmitMustTailThunk(MD, AdjustedThisPtr, Callee);
+ return;
+ }
+
+ // Start building CallArgs.
+ CallArgList CallArgs;
+ QualType ThisType = MD->getThisType(getContext());
+ CallArgs.add(RValue::get(AdjustedThisPtr), ThisType);
+
+ if (isa<CXXDestructorDecl>(MD))
+ CGM.getCXXABI().adjustCallArgsForDestructorThunk(*this, CurGD, CallArgs);
+
+ // Add the rest of the arguments.
+ for (const ParmVarDecl *PD : MD->params())
+ EmitDelegateCallArg(CallArgs, PD, PD->getLocStart());
+
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+
+#ifndef NDEBUG
+ const CGFunctionInfo &CallFnInfo =
+ CGM.getTypes().arrangeCXXMethodCall(CallArgs, FPT,
+ RequiredArgs::forPrototypePlus(FPT, 1));
+ assert(CallFnInfo.getRegParm() == CurFnInfo->getRegParm() &&
+ CallFnInfo.isNoReturn() == CurFnInfo->isNoReturn() &&
+ CallFnInfo.getCallingConvention() == CurFnInfo->getCallingConvention());
+ assert(isa<CXXDestructorDecl>(MD) || // ignore dtor return types
+ similar(CallFnInfo.getReturnInfo(), CallFnInfo.getReturnType(),
+ CurFnInfo->getReturnInfo(), CurFnInfo->getReturnType()));
+ assert(CallFnInfo.arg_size() == CurFnInfo->arg_size());
+ for (unsigned i = 0, e = CurFnInfo->arg_size(); i != e; ++i)
+ assert(similar(CallFnInfo.arg_begin()[i].info,
+ CallFnInfo.arg_begin()[i].type,
+ CurFnInfo->arg_begin()[i].info,
+ CurFnInfo->arg_begin()[i].type));
+#endif
+
+ // Determine whether we have a return value slot to use.
+ QualType ResultType = CGM.getCXXABI().HasThisReturn(CurGD)
+ ? ThisType
+ : CGM.getCXXABI().hasMostDerivedReturn(CurGD)
+ ? CGM.getContext().VoidPtrTy
+ : FPT->getReturnType();
+ ReturnValueSlot Slot;
+ if (!ResultType->isVoidType() &&
+ CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
+ !hasScalarEvaluationKind(CurFnInfo->getReturnType()))
+ Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified());
+
+ // Now emit our call.
+ llvm::Instruction *CallOrInvoke;
+ RValue RV = EmitCall(*CurFnInfo, Callee, Slot, CallArgs, MD, &CallOrInvoke);
+
+ // Consider return adjustment if we have ThunkInfo.
+ if (Thunk && !Thunk->Return.isEmpty())
+ RV = PerformReturnAdjustment(*this, ResultType, RV, *Thunk);
+ else if (llvm::CallInst* Call = dyn_cast<llvm::CallInst>(CallOrInvoke))
+ Call->setTailCallKind(llvm::CallInst::TCK_Tail);
+
+ // Emit return.
+ if (!ResultType->isVoidType() && Slot.isNull())
+ CGM.getCXXABI().EmitReturnFromThunk(*this, RV, ResultType);
+
+ // Disable the final ARC autorelease.
+ AutoreleaseResult = false;
+
+ FinishThunk();
+}
+
+void CodeGenFunction::EmitMustTailThunk(const CXXMethodDecl *MD,
+ llvm::Value *AdjustedThisPtr,
+ llvm::Value *Callee) {
+ // Emitting a musttail call thunk doesn't use any of the CGCall.cpp machinery
+ // to translate AST arguments into LLVM IR arguments. For thunks, we know
+ // that the caller prototype more or less matches the callee prototype with
+ // the exception of 'this'.
+ SmallVector<llvm::Value *, 8> Args;
+ for (llvm::Argument &A : CurFn->args())
+ Args.push_back(&A);
+
+ // Set the adjusted 'this' pointer.
+ const ABIArgInfo &ThisAI = CurFnInfo->arg_begin()->info;
+ if (ThisAI.isDirect()) {
+ const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
+ int ThisArgNo = RetAI.isIndirect() && !RetAI.isSRetAfterThis() ? 1 : 0;
+ llvm::Type *ThisType = Args[ThisArgNo]->getType();
+ if (ThisType != AdjustedThisPtr->getType())
+ AdjustedThisPtr = Builder.CreateBitCast(AdjustedThisPtr, ThisType);
+ Args[ThisArgNo] = AdjustedThisPtr;
+ } else {
+ assert(ThisAI.isInAlloca() && "this is passed directly or inalloca");
+ Address ThisAddr = GetAddrOfLocalVar(CXXABIThisDecl);
+ llvm::Type *ThisType = ThisAddr.getElementType();
+ if (ThisType != AdjustedThisPtr->getType())
+ AdjustedThisPtr = Builder.CreateBitCast(AdjustedThisPtr, ThisType);
+ Builder.CreateStore(AdjustedThisPtr, ThisAddr);
+ }
+
+ // Emit the musttail call manually. Even if the prologue pushed cleanups, we
+ // don't actually want to run them.
+ llvm::CallInst *Call = Builder.CreateCall(Callee, Args);
+ Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
+
+ // Apply the standard set of call attributes.
+ unsigned CallingConv;
+ CodeGen::AttributeListType AttributeList;
+ CGM.ConstructAttributeList(Callee->getName(), *CurFnInfo, MD, AttributeList,
+ CallingConv, /*AttrOnCallSite=*/true);
+ llvm::AttributeSet Attrs =
+ llvm::AttributeSet::get(getLLVMContext(), AttributeList);
+ Call->setAttributes(Attrs);
+ Call->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
+
+ if (Call->getType()->isVoidTy())
+ Builder.CreateRetVoid();
+ else
+ Builder.CreateRet(Call);
+
+ // Finish the function to maintain CodeGenFunction invariants.
+ // FIXME: Don't emit unreachable code.
+ EmitBlock(createBasicBlock());
+ FinishFunction();
+}
+
+void CodeGenFunction::generateThunk(llvm::Function *Fn,
+ const CGFunctionInfo &FnInfo,
+ GlobalDecl GD, const ThunkInfo &Thunk) {
+ StartThunk(Fn, GD, FnInfo);
+
+ // Get our callee.
+ llvm::Type *Ty =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().arrangeGlobalDeclaration(GD));
+ llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
+
+ // Make the call and return the result.
+ EmitCallAndReturnForThunk(Callee, &Thunk);
+}
+
+void CodeGenVTables::emitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
+ bool ForVTable) {
+ const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeGlobalDeclaration(GD);
+
+ // FIXME: re-use FnInfo in this computation.
+ llvm::Constant *C = CGM.GetAddrOfThunk(GD, Thunk);
+ llvm::GlobalValue *Entry;
+
+ // Strip off a bitcast if we got one back.
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(C)) {
+ assert(CE->getOpcode() == llvm::Instruction::BitCast);
+ Entry = cast<llvm::GlobalValue>(CE->getOperand(0));
+ } else {
+ Entry = cast<llvm::GlobalValue>(C);
+ }
+
+ // There's already a declaration with the same name, check if it has the same
+ // type or if we need to replace it.
+ if (Entry->getType()->getElementType() !=
+ CGM.getTypes().GetFunctionTypeForVTable(GD)) {
+ llvm::GlobalValue *OldThunkFn = Entry;
+
+ // If the types mismatch then we have to rewrite the definition.
+ assert(OldThunkFn->isDeclaration() &&
+ "Shouldn't replace non-declaration");
+
+ // Remove the name from the old thunk function and get a new thunk.
+ OldThunkFn->setName(StringRef());
+ Entry = cast<llvm::GlobalValue>(CGM.GetAddrOfThunk(GD, Thunk));
+
+ // If needed, replace the old thunk with a bitcast.
+ if (!OldThunkFn->use_empty()) {
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(Entry, OldThunkFn->getType());
+ OldThunkFn->replaceAllUsesWith(NewPtrForOldDecl);
+ }
+
+ // Remove the old thunk.
+ OldThunkFn->eraseFromParent();
+ }
+
+ llvm::Function *ThunkFn = cast<llvm::Function>(Entry);
+ bool ABIHasKeyFunctions = CGM.getTarget().getCXXABI().hasKeyFunctions();
+ bool UseAvailableExternallyLinkage = ForVTable && ABIHasKeyFunctions;
+
+ if (!ThunkFn->isDeclaration()) {
+ if (!ABIHasKeyFunctions || UseAvailableExternallyLinkage) {
+ // There is already a thunk emitted for this function, do nothing.
+ return;
+ }
+
+ setThunkProperties(CGM, Thunk, ThunkFn, ForVTable, GD);
+ return;
+ }
+
+ CGM.SetLLVMFunctionAttributesForDefinition(GD.getDecl(), ThunkFn);
+
+ if (ThunkFn->isVarArg()) {
+ // Varargs thunks are special; we can't just generate a call because
+ // we can't copy the varargs. Our implementation is rather
+ // expensive/sucky at the moment, so don't generate the thunk unless
+ // we have to.
+ // FIXME: Do something better here; GenerateVarArgsThunk is extremely ugly.
+ if (UseAvailableExternallyLinkage)
+ return;
+ ThunkFn =
+ CodeGenFunction(CGM).GenerateVarArgsThunk(ThunkFn, FnInfo, GD, Thunk);
+ } else {
+ // Normal thunk body generation.
+ CodeGenFunction(CGM).generateThunk(ThunkFn, FnInfo, GD, Thunk);
+ }
+
+ setThunkProperties(CGM, Thunk, ThunkFn, ForVTable, GD);
+}
+
+void CodeGenVTables::maybeEmitThunkForVTable(GlobalDecl GD,
+ const ThunkInfo &Thunk) {
+ // If the ABI has key functions, only the TU with the key function should emit
+ // the thunk. However, we can allow inlining of thunks if we emit them with
+ // available_externally linkage together with vtables when optimizations are
+ // enabled.
+ if (CGM.getTarget().getCXXABI().hasKeyFunctions() &&
+ !CGM.getCodeGenOpts().OptimizationLevel)
+ return;
+
+ // We can't emit thunks for member functions with incomplete types.
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ if (!CGM.getTypes().isFuncTypeConvertible(
+ MD->getType()->castAs<FunctionType>()))
+ return;
+
+ emitThunk(GD, Thunk, /*ForVTable=*/true);
+}
+
+void CodeGenVTables::EmitThunks(GlobalDecl GD)
+{
+ const CXXMethodDecl *MD =
+ cast<CXXMethodDecl>(GD.getDecl())->getCanonicalDecl();
+
+ // We don't need to generate thunks for the base destructor.
+ if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
+ return;
+
+ const VTableContextBase::ThunkInfoVectorTy *ThunkInfoVector =
+ VTContext->getThunkInfo(GD);
+
+ if (!ThunkInfoVector)
+ return;
+
+ for (const ThunkInfo& Thunk : *ThunkInfoVector)
+ emitThunk(GD, Thunk, /*ForVTable=*/false);
+}
+
+llvm::Constant *CodeGenVTables::CreateVTableInitializer(
+ const CXXRecordDecl *RD, const VTableComponent *Components,
+ unsigned NumComponents, const VTableLayout::VTableThunkTy *VTableThunks,
+ unsigned NumVTableThunks, llvm::Constant *RTTI) {
+ SmallVector<llvm::Constant *, 64> Inits;
+
+ llvm::Type *Int8PtrTy = CGM.Int8PtrTy;
+
+ llvm::Type *PtrDiffTy =
+ CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
+
+ unsigned NextVTableThunkIndex = 0;
+
+ llvm::Constant *PureVirtualFn = nullptr, *DeletedVirtualFn = nullptr;
+
+ for (unsigned I = 0; I != NumComponents; ++I) {
+ VTableComponent Component = Components[I];
+
+ llvm::Constant *Init = nullptr;
+
+ switch (Component.getKind()) {
+ case VTableComponent::CK_VCallOffset:
+ Init = llvm::ConstantInt::get(PtrDiffTy,
+ Component.getVCallOffset().getQuantity());
+ Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
+ break;
+ case VTableComponent::CK_VBaseOffset:
+ Init = llvm::ConstantInt::get(PtrDiffTy,
+ Component.getVBaseOffset().getQuantity());
+ Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
+ break;
+ case VTableComponent::CK_OffsetToTop:
+ Init = llvm::ConstantInt::get(PtrDiffTy,
+ Component.getOffsetToTop().getQuantity());
+ Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
+ break;
+ case VTableComponent::CK_RTTI:
+ Init = llvm::ConstantExpr::getBitCast(RTTI, Int8PtrTy);
+ break;
+ case VTableComponent::CK_FunctionPointer:
+ case VTableComponent::CK_CompleteDtorPointer:
+ case VTableComponent::CK_DeletingDtorPointer: {
+ GlobalDecl GD;
+
+ // Get the right global decl.
+ switch (Component.getKind()) {
+ default:
+ llvm_unreachable("Unexpected vtable component kind");
+ case VTableComponent::CK_FunctionPointer:
+ GD = Component.getFunctionDecl();
+ break;
+ case VTableComponent::CK_CompleteDtorPointer:
+ GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Complete);
+ break;
+ case VTableComponent::CK_DeletingDtorPointer:
+ GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Deleting);
+ break;
+ }
+
+ if (CGM.getLangOpts().CUDA) {
+ // Emit NULL for methods we can't codegen on this
+ // side. Otherwise we'd end up with vtable with unresolved
+ // references.
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ // OK on device side: functions w/ __device__ attribute
+ // OK on host side: anything except __device__-only functions.
+ bool CanEmitMethod = CGM.getLangOpts().CUDAIsDevice
+ ? MD->hasAttr<CUDADeviceAttr>()
+ : (MD->hasAttr<CUDAHostAttr>() ||
+ !MD->hasAttr<CUDADeviceAttr>());
+ if (!CanEmitMethod) {
+ Init = llvm::ConstantExpr::getNullValue(Int8PtrTy);
+ break;
+ }
+ // Method is acceptable, continue processing as usual.
+ }
+
+ if (cast<CXXMethodDecl>(GD.getDecl())->isPure()) {
+ // We have a pure virtual member function.
+ if (!PureVirtualFn) {
+ llvm::FunctionType *Ty =
+ llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
+ StringRef PureCallName = CGM.getCXXABI().GetPureVirtualCallName();
+ PureVirtualFn = CGM.CreateRuntimeFunction(Ty, PureCallName);
+ PureVirtualFn = llvm::ConstantExpr::getBitCast(PureVirtualFn,
+ CGM.Int8PtrTy);
+ }
+ Init = PureVirtualFn;
+ } else if (cast<CXXMethodDecl>(GD.getDecl())->isDeleted()) {
+ if (!DeletedVirtualFn) {
+ llvm::FunctionType *Ty =
+ llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
+ StringRef DeletedCallName =
+ CGM.getCXXABI().GetDeletedVirtualCallName();
+ DeletedVirtualFn = CGM.CreateRuntimeFunction(Ty, DeletedCallName);
+ DeletedVirtualFn = llvm::ConstantExpr::getBitCast(DeletedVirtualFn,
+ CGM.Int8PtrTy);
+ }
+ Init = DeletedVirtualFn;
+ } else {
+ // Check if we should use a thunk.
+ if (NextVTableThunkIndex < NumVTableThunks &&
+ VTableThunks[NextVTableThunkIndex].first == I) {
+ const ThunkInfo &Thunk = VTableThunks[NextVTableThunkIndex].second;
+
+ maybeEmitThunkForVTable(GD, Thunk);
+ Init = CGM.GetAddrOfThunk(GD, Thunk);
+
+ NextVTableThunkIndex++;
+ } else {
+ llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVTable(GD);
+
+ Init = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
+ }
+
+ Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy);
+ }
+ break;
+ }
+
+ case VTableComponent::CK_UnusedFunctionPointer:
+ Init = llvm::ConstantExpr::getNullValue(Int8PtrTy);
+ break;
+ };
+
+ Inits.push_back(Init);
+ }
+
+ llvm::ArrayType *ArrayType = llvm::ArrayType::get(Int8PtrTy, NumComponents);
+ return llvm::ConstantArray::get(ArrayType, Inits);
+}
+
+llvm::GlobalVariable *
+CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
+ const BaseSubobject &Base,
+ bool BaseIsVirtual,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ VTableAddressPointsMapTy& AddressPoints) {
+ if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
+ DI->completeClassData(Base.getBase());
+
+ std::unique_ptr<VTableLayout> VTLayout(
+ getItaniumVTableContext().createConstructionVTableLayout(
+ Base.getBase(), Base.getBaseOffset(), BaseIsVirtual, RD));
+
+ // Add the address points.
+ AddressPoints = VTLayout->getAddressPoints();
+
+ // Get the mangled construction vtable name.
+ SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ cast<ItaniumMangleContext>(CGM.getCXXABI().getMangleContext())
+ .mangleCXXCtorVTable(RD, Base.getBaseOffset().getQuantity(),
+ Base.getBase(), Out);
+ StringRef Name = OutName.str();
+
+ llvm::ArrayType *ArrayType =
+ llvm::ArrayType::get(CGM.Int8PtrTy, VTLayout->getNumVTableComponents());
+
+ // Construction vtable symbols are not part of the Itanium ABI, so we cannot
+ // guarantee that they actually will be available externally. Instead, when
+ // emitting an available_externally VTT, we provide references to an internal
+ // linkage construction vtable. The ABI only requires complete-object vtables
+ // to be the same for all instances of a type, not construction vtables.
+ if (Linkage == llvm::GlobalVariable::AvailableExternallyLinkage)
+ Linkage = llvm::GlobalVariable::InternalLinkage;
+
+ // Create the variable that will hold the construction vtable.
+ llvm::GlobalVariable *VTable =
+ CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType, Linkage);
+ CGM.setGlobalVisibility(VTable, RD);
+
+ // V-tables are always unnamed_addr.
+ VTable->setUnnamedAddr(true);
+
+ llvm::Constant *RTTI = CGM.GetAddrOfRTTIDescriptor(
+ CGM.getContext().getTagDeclType(Base.getBase()));
+
+ // Create and set the initializer.
+ llvm::Constant *Init = CreateVTableInitializer(
+ Base.getBase(), VTLayout->vtable_component_begin(),
+ VTLayout->getNumVTableComponents(), VTLayout->vtable_thunk_begin(),
+ VTLayout->getNumVTableThunks(), RTTI);
+ VTable->setInitializer(Init);
+
+ CGM.EmitVTableBitSetEntries(VTable, *VTLayout.get());
+
+ return VTable;
+}
+
+static bool shouldEmitAvailableExternallyVTable(const CodeGenModule &CGM,
+ const CXXRecordDecl *RD) {
+ return CGM.getCodeGenOpts().OptimizationLevel > 0 &&
+ CGM.getCXXABI().canSpeculativelyEmitVTable(RD);
+}
+
+/// Compute the required linkage of the v-table for the given class.
+///
+/// Note that we only call this at the end of the translation unit.
+llvm::GlobalVariable::LinkageTypes
+CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
+ if (!RD->isExternallyVisible())
+ return llvm::GlobalVariable::InternalLinkage;
+
+ // We're at the end of the translation unit, so the current key
+ // function is fully correct.
+ const CXXMethodDecl *keyFunction = Context.getCurrentKeyFunction(RD);
+ if (keyFunction && !RD->hasAttr<DLLImportAttr>()) {
+ // If this class has a key function, use that to determine the
+ // linkage of the vtable.
+ const FunctionDecl *def = nullptr;
+ if (keyFunction->hasBody(def))
+ keyFunction = cast<CXXMethodDecl>(def);
+
+ switch (keyFunction->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ assert((def || CodeGenOpts.OptimizationLevel > 0) &&
+ "Shouldn't query vtable linkage without key function or "
+ "optimizations");
+ if (!def && CodeGenOpts.OptimizationLevel > 0)
+ return llvm::GlobalVariable::AvailableExternallyLinkage;
+
+ if (keyFunction->isInlined())
+ return !Context.getLangOpts().AppleKext ?
+ llvm::GlobalVariable::LinkOnceODRLinkage :
+ llvm::Function::InternalLinkage;
+
+ return llvm::GlobalVariable::ExternalLinkage;
+
+ case TSK_ImplicitInstantiation:
+ return !Context.getLangOpts().AppleKext ?
+ llvm::GlobalVariable::LinkOnceODRLinkage :
+ llvm::Function::InternalLinkage;
+
+ case TSK_ExplicitInstantiationDefinition:
+ return !Context.getLangOpts().AppleKext ?
+ llvm::GlobalVariable::WeakODRLinkage :
+ llvm::Function::InternalLinkage;
+
+ case TSK_ExplicitInstantiationDeclaration:
+ llvm_unreachable("Should not have been asked to emit this");
+ }
+ }
+
+ // -fapple-kext mode does not support weak linkage, so we must use
+ // internal linkage.
+ if (Context.getLangOpts().AppleKext)
+ return llvm::Function::InternalLinkage;
+
+ llvm::GlobalVariable::LinkageTypes DiscardableODRLinkage =
+ llvm::GlobalValue::LinkOnceODRLinkage;
+ llvm::GlobalVariable::LinkageTypes NonDiscardableODRLinkage =
+ llvm::GlobalValue::WeakODRLinkage;
+ if (RD->hasAttr<DLLExportAttr>()) {
+ // Cannot discard exported vtables.
+ DiscardableODRLinkage = NonDiscardableODRLinkage;
+ } else if (RD->hasAttr<DLLImportAttr>()) {
+ // Imported vtables are available externally.
+ DiscardableODRLinkage = llvm::GlobalVariable::AvailableExternallyLinkage;
+ NonDiscardableODRLinkage = llvm::GlobalVariable::AvailableExternallyLinkage;
+ }
+
+ switch (RD->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ case TSK_ImplicitInstantiation:
+ return DiscardableODRLinkage;
+
+ case TSK_ExplicitInstantiationDeclaration:
+ return shouldEmitAvailableExternallyVTable(*this, RD)
+ ? llvm::GlobalVariable::AvailableExternallyLinkage
+ : llvm::GlobalVariable::ExternalLinkage;
+
+ case TSK_ExplicitInstantiationDefinition:
+ return NonDiscardableODRLinkage;
+ }
+
+ llvm_unreachable("Invalid TemplateSpecializationKind!");
+}
+
+/// This is a callback from Sema to tell us that that a particular v-table is
+/// required to be emitted in this translation unit.
+///
+/// This is only called for vtables that _must_ be emitted (mainly due to key
+/// functions). For weak vtables, CodeGen tracks when they are needed and
+/// emits them as-needed.
+void CodeGenModule::EmitVTable(CXXRecordDecl *theClass) {
+ VTables.GenerateClassData(theClass);
+}
+
+void
+CodeGenVTables::GenerateClassData(const CXXRecordDecl *RD) {
+ if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
+ DI->completeClassData(RD);
+
+ if (RD->getNumVBases())
+ CGM.getCXXABI().emitVirtualInheritanceTables(RD);
+
+ CGM.getCXXABI().emitVTableDefinitions(*this, RD);
+}
+
+/// At this point in the translation unit, does it appear that can we
+/// rely on the vtable being defined elsewhere in the program?
+///
+/// The response is really only definitive when called at the end of
+/// the translation unit.
+///
+/// The only semantic restriction here is that the object file should
+/// not contain a v-table definition when that v-table is defined
+/// strongly elsewhere. Otherwise, we'd just like to avoid emitting
+/// v-tables when unnecessary.
+bool CodeGenVTables::isVTableExternal(const CXXRecordDecl *RD) {
+ assert(RD->isDynamicClass() && "Non-dynamic classes have no VTable.");
+
+ // If we have an explicit instantiation declaration (and not a
+ // definition), the v-table is defined elsewhere.
+ TemplateSpecializationKind TSK = RD->getTemplateSpecializationKind();
+ if (TSK == TSK_ExplicitInstantiationDeclaration)
+ return true;
+
+ // Otherwise, if the class is an instantiated template, the
+ // v-table must be defined here.
+ if (TSK == TSK_ImplicitInstantiation ||
+ TSK == TSK_ExplicitInstantiationDefinition)
+ return false;
+
+ // Otherwise, if the class doesn't have a key function (possibly
+ // anymore), the v-table must be defined here.
+ const CXXMethodDecl *keyFunction = CGM.getContext().getCurrentKeyFunction(RD);
+ if (!keyFunction)
+ return false;
+
+ // Otherwise, if we don't have a definition of the key function, the
+ // v-table must be defined somewhere else.
+ return !keyFunction->hasBody();
+}
+
+/// Given that we're currently at the end of the translation unit, and
+/// we've emitted a reference to the v-table for this class, should
+/// we define that v-table?
+static bool shouldEmitVTableAtEndOfTranslationUnit(CodeGenModule &CGM,
+ const CXXRecordDecl *RD) {
+ // If vtable is internal then it has to be done.
+ if (!CGM.getVTables().isVTableExternal(RD))
+ return true;
+
+ // If it's external then maybe we will need it as available_externally.
+ return shouldEmitAvailableExternallyVTable(CGM, RD);
+}
+
+/// Given that at some point we emitted a reference to one or more
+/// v-tables, and that we are now at the end of the translation unit,
+/// decide whether we should emit them.
+void CodeGenModule::EmitDeferredVTables() {
+#ifndef NDEBUG
+ // Remember the size of DeferredVTables, because we're going to assume
+ // that this entire operation doesn't modify it.
+ size_t savedSize = DeferredVTables.size();
+#endif
+
+ for (const CXXRecordDecl *RD : DeferredVTables)
+ if (shouldEmitVTableAtEndOfTranslationUnit(*this, RD))
+ VTables.GenerateClassData(RD);
+
+ assert(savedSize == DeferredVTables.size() &&
+ "deferred extra v-tables during v-table emission?");
+ DeferredVTables.clear();
+}
+
+bool CodeGenModule::IsCFIBlacklistedRecord(const CXXRecordDecl *RD) {
+ if (RD->hasAttr<UuidAttr>() &&
+ getContext().getSanitizerBlacklist().isBlacklistedType("attr:uuid"))
+ return true;
+
+ return getContext().getSanitizerBlacklist().isBlacklistedType(
+ RD->getQualifiedNameAsString());
+}
+
+void CodeGenModule::EmitVTableBitSetEntries(llvm::GlobalVariable *VTable,
+ const VTableLayout &VTLayout) {
+ if (!LangOpts.Sanitize.has(SanitizerKind::CFIVCall) &&
+ !LangOpts.Sanitize.has(SanitizerKind::CFINVCall) &&
+ !LangOpts.Sanitize.has(SanitizerKind::CFIDerivedCast) &&
+ !LangOpts.Sanitize.has(SanitizerKind::CFIUnrelatedCast))
+ return;
+
+ CharUnits PointerWidth =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
+
+ typedef std::pair<const CXXRecordDecl *, unsigned> BSEntry;
+ std::vector<BSEntry> BitsetEntries;
+ // Create a bit set entry for each address point.
+ for (auto &&AP : VTLayout.getAddressPoints()) {
+ if (IsCFIBlacklistedRecord(AP.first.getBase()))
+ continue;
+
+ BitsetEntries.push_back(std::make_pair(AP.first.getBase(), AP.second));
+ }
+
+ // Sort the bit set entries for determinism.
+ std::sort(BitsetEntries.begin(), BitsetEntries.end(),
+ [this](const BSEntry &E1, const BSEntry &E2) {
+ if (&E1 == &E2)
+ return false;
+
+ std::string S1;
+ llvm::raw_string_ostream O1(S1);
+ getCXXABI().getMangleContext().mangleTypeName(
+ QualType(E1.first->getTypeForDecl(), 0), O1);
+ O1.flush();
+
+ std::string S2;
+ llvm::raw_string_ostream O2(S2);
+ getCXXABI().getMangleContext().mangleTypeName(
+ QualType(E2.first->getTypeForDecl(), 0), O2);
+ O2.flush();
+
+ if (S1 < S2)
+ return true;
+ if (S1 != S2)
+ return false;
+
+ return E1.second < E2.second;
+ });
+
+ llvm::NamedMDNode *BitsetsMD =
+ getModule().getOrInsertNamedMetadata("llvm.bitsets");
+ for (auto BitsetEntry : BitsetEntries)
+ CreateVTableBitSetEntry(BitsetsMD, VTable,
+ PointerWidth * BitsetEntry.second,
+ BitsetEntry.first);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h
new file mode 100644
index 0000000..c27e54a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h
@@ -0,0 +1,119 @@
+//===--- CGVTables.h - Emit LLVM Code for C++ vtables -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of virtual tables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGVTABLES_H
+#define LLVM_CLANG_LIB_CODEGEN_CGVTABLES_H
+
+#include "clang/AST/BaseSubobject.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/GlobalDecl.h"
+#include "clang/AST/VTableBuilder.h"
+#include "clang/Basic/ABI.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/IR/GlobalVariable.h"
+
+namespace clang {
+ class CXXRecordDecl;
+
+namespace CodeGen {
+ class CodeGenModule;
+
+class CodeGenVTables {
+ CodeGenModule &CGM;
+
+ VTableContextBase *VTContext;
+
+ /// VTableAddressPointsMapTy - Address points for a single vtable.
+ typedef llvm::DenseMap<BaseSubobject, uint64_t> VTableAddressPointsMapTy;
+
+ typedef std::pair<const CXXRecordDecl *, BaseSubobject> BaseSubobjectPairTy;
+ typedef llvm::DenseMap<BaseSubobjectPairTy, uint64_t> SubVTTIndiciesMapTy;
+
+ /// SubVTTIndicies - Contains indices into the various sub-VTTs.
+ SubVTTIndiciesMapTy SubVTTIndicies;
+
+ typedef llvm::DenseMap<BaseSubobjectPairTy, uint64_t>
+ SecondaryVirtualPointerIndicesMapTy;
+
+ /// SecondaryVirtualPointerIndices - Contains the secondary virtual pointer
+ /// indices.
+ SecondaryVirtualPointerIndicesMapTy SecondaryVirtualPointerIndices;
+
+ /// emitThunk - Emit a single thunk.
+ void emitThunk(GlobalDecl GD, const ThunkInfo &Thunk, bool ForVTable);
+
+ /// maybeEmitThunkForVTable - Emit the given thunk for the vtable if needed by
+ /// the ABI.
+ void maybeEmitThunkForVTable(GlobalDecl GD, const ThunkInfo &Thunk);
+
+public:
+ /// CreateVTableInitializer - Create a vtable initializer for the given record
+ /// decl.
+ /// \param Components - The vtable components; this is really an array of
+ /// VTableComponents.
+ llvm::Constant *CreateVTableInitializer(
+ const CXXRecordDecl *RD, const VTableComponent *Components,
+ unsigned NumComponents, const VTableLayout::VTableThunkTy *VTableThunks,
+ unsigned NumVTableThunks, llvm::Constant *RTTI);
+
+ CodeGenVTables(CodeGenModule &CGM);
+
+ ItaniumVTableContext &getItaniumVTableContext() {
+ return *cast<ItaniumVTableContext>(VTContext);
+ }
+
+ MicrosoftVTableContext &getMicrosoftVTableContext() {
+ return *cast<MicrosoftVTableContext>(VTContext);
+ }
+
+ /// getSubVTTIndex - Return the index of the sub-VTT for the base class of the
+ /// given record decl.
+ uint64_t getSubVTTIndex(const CXXRecordDecl *RD, BaseSubobject Base);
+
+ /// getSecondaryVirtualPointerIndex - Return the index in the VTT where the
+ /// virtual pointer for the given subobject is located.
+ uint64_t getSecondaryVirtualPointerIndex(const CXXRecordDecl *RD,
+ BaseSubobject Base);
+
+ /// GenerateConstructionVTable - Generate a construction vtable for the given
+ /// base subobject.
+ llvm::GlobalVariable *
+ GenerateConstructionVTable(const CXXRecordDecl *RD, const BaseSubobject &Base,
+ bool BaseIsVirtual,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ VTableAddressPointsMapTy& AddressPoints);
+
+
+ /// GetAddrOfVTT - Get the address of the VTT for the given record decl.
+ llvm::GlobalVariable *GetAddrOfVTT(const CXXRecordDecl *RD);
+
+ /// EmitVTTDefinition - Emit the definition of the given vtable.
+ void EmitVTTDefinition(llvm::GlobalVariable *VTT,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD);
+
+ /// EmitThunks - Emit the associated thunks for the given global decl.
+ void EmitThunks(GlobalDecl GD);
+
+ /// GenerateClassData - Generate all the class data required to be
+ /// generated upon definition of a KeyFunction. This includes the
+ /// vtable, the RTTI data structure (if RTTI is enabled) and the VTT
+ /// (if the class has virtual bases).
+ void GenerateClassData(const CXXRecordDecl *RD);
+
+ bool isVTableExternal(const CXXRecordDecl *RD);
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGValue.h b/contrib/llvm/tools/clang/lib/CodeGen/CGValue.h
new file mode 100644
index 0000000..3ccc4cd
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGValue.h
@@ -0,0 +1,595 @@
+//===-- CGValue.h - LLVM CodeGen wrappers for llvm::Value* ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes implement wrappers around llvm::Value in order to
+// fully represent the range of values for C L- and R- values.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGVALUE_H
+#define LLVM_CLANG_LIB_CODEGEN_CGVALUE_H
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/IR/Type.h"
+#include "Address.h"
+
+namespace llvm {
+ class Constant;
+ class MDNode;
+}
+
+namespace clang {
+namespace CodeGen {
+ class AggValueSlot;
+ struct CGBitFieldInfo;
+
+/// RValue - This trivial value class is used to represent the result of an
+/// expression that is evaluated. It can be one of three things: either a
+/// simple LLVM SSA value, a pair of SSA values for complex numbers, or the
+/// address of an aggregate value in memory.
+class RValue {
+ enum Flavor { Scalar, Complex, Aggregate };
+
+ // The shift to make to an aggregate's alignment to make it look
+ // like a pointer.
+ enum { AggAlignShift = 4 };
+
+ // Stores first value and flavor.
+ llvm::PointerIntPair<llvm::Value *, 2, Flavor> V1;
+ // Stores second value and volatility.
+ llvm::PointerIntPair<llvm::Value *, 1, bool> V2;
+
+public:
+ bool isScalar() const { return V1.getInt() == Scalar; }
+ bool isComplex() const { return V1.getInt() == Complex; }
+ bool isAggregate() const { return V1.getInt() == Aggregate; }
+
+ bool isVolatileQualified() const { return V2.getInt(); }
+
+ /// getScalarVal() - Return the Value* of this scalar value.
+ llvm::Value *getScalarVal() const {
+ assert(isScalar() && "Not a scalar!");
+ return V1.getPointer();
+ }
+
+ /// getComplexVal - Return the real/imag components of this complex value.
+ ///
+ std::pair<llvm::Value *, llvm::Value *> getComplexVal() const {
+ return std::make_pair(V1.getPointer(), V2.getPointer());
+ }
+
+ /// getAggregateAddr() - Return the Value* of the address of the aggregate.
+ Address getAggregateAddress() const {
+ assert(isAggregate() && "Not an aggregate!");
+ auto align = reinterpret_cast<uintptr_t>(V2.getPointer()) >> AggAlignShift;
+ return Address(V1.getPointer(), CharUnits::fromQuantity(align));
+ }
+ llvm::Value *getAggregatePointer() const {
+ assert(isAggregate() && "Not an aggregate!");
+ return V1.getPointer();
+ }
+
+ static RValue getIgnored() {
+ // FIXME: should we make this a more explicit state?
+ return get(nullptr);
+ }
+
+ static RValue get(llvm::Value *V) {
+ RValue ER;
+ ER.V1.setPointer(V);
+ ER.V1.setInt(Scalar);
+ ER.V2.setInt(false);
+ return ER;
+ }
+ static RValue getComplex(llvm::Value *V1, llvm::Value *V2) {
+ RValue ER;
+ ER.V1.setPointer(V1);
+ ER.V2.setPointer(V2);
+ ER.V1.setInt(Complex);
+ ER.V2.setInt(false);
+ return ER;
+ }
+ static RValue getComplex(const std::pair<llvm::Value *, llvm::Value *> &C) {
+ return getComplex(C.first, C.second);
+ }
+ // FIXME: Aggregate rvalues need to retain information about whether they are
+ // volatile or not. Remove default to find all places that probably get this
+ // wrong.
+ static RValue getAggregate(Address addr, bool isVolatile = false) {
+ RValue ER;
+ ER.V1.setPointer(addr.getPointer());
+ ER.V1.setInt(Aggregate);
+
+ auto align = static_cast<uintptr_t>(addr.getAlignment().getQuantity());
+ ER.V2.setPointer(reinterpret_cast<llvm::Value*>(align << AggAlignShift));
+ ER.V2.setInt(isVolatile);
+ return ER;
+ }
+};
+
+/// Does an ARC strong l-value have precise lifetime?
+enum ARCPreciseLifetime_t {
+ ARCImpreciseLifetime, ARCPreciseLifetime
+};
+
+/// The source of the alignment of an l-value; an expression of
+/// confidence in the alignment actually matching the estimate.
+enum class AlignmentSource {
+ /// The l-value was an access to a declared entity or something
+ /// equivalently strong, like the address of an array allocated by a
+ /// language runtime.
+ Decl,
+
+ /// The l-value was considered opaque, so the alignment was
+ /// determined from a type, but that type was an explicitly-aligned
+ /// typedef.
+ AttributedType,
+
+ /// The l-value was considered opaque, so the alignment was
+ /// determined from a type.
+ Type
+};
+
+/// Given that the base address has the given alignment source, what's
+/// our confidence in the alignment of the field?
+static inline AlignmentSource getFieldAlignmentSource(AlignmentSource Source) {
+ // For now, we don't distinguish fields of opaque pointers from
+ // top-level declarations, but maybe we should.
+ return AlignmentSource::Decl;
+}
+
+/// LValue - This represents an lvalue references. Because C/C++ allow
+/// bitfields, this is not a simple LLVM pointer, it may be a pointer plus a
+/// bitrange.
+class LValue {
+ enum {
+ Simple, // This is a normal l-value, use getAddress().
+ VectorElt, // This is a vector element l-value (V[i]), use getVector*
+ BitField, // This is a bitfield l-value, use getBitfield*.
+ ExtVectorElt, // This is an extended vector subset, use getExtVectorComp
+ GlobalReg // This is a register l-value, use getGlobalReg()
+ } LVType;
+
+ llvm::Value *V;
+
+ union {
+ // Index into a vector subscript: V[i]
+ llvm::Value *VectorIdx;
+
+ // ExtVector element subset: V.xyx
+ llvm::Constant *VectorElts;
+
+ // BitField start bit and size
+ const CGBitFieldInfo *BitFieldInfo;
+ };
+
+ QualType Type;
+
+ // 'const' is unused here
+ Qualifiers Quals;
+
+ // The alignment to use when accessing this lvalue. (For vector elements,
+ // this is the alignment of the whole vector.)
+ int64_t Alignment;
+
+ // objective-c's ivar
+ bool Ivar:1;
+
+ // objective-c's ivar is an array
+ bool ObjIsArray:1;
+
+ // LValue is non-gc'able for any reason, including being a parameter or local
+ // variable.
+ bool NonGC: 1;
+
+ // Lvalue is a global reference of an objective-c object
+ bool GlobalObjCRef : 1;
+
+ // Lvalue is a thread local reference
+ bool ThreadLocalRef : 1;
+
+ // Lvalue has ARC imprecise lifetime. We store this inverted to try
+ // to make the default bitfield pattern all-zeroes.
+ bool ImpreciseLifetime : 1;
+
+ unsigned AlignSource : 2;
+
+ // This flag shows if a nontemporal load/stores should be used when accessing
+ // this lvalue.
+ bool Nontemporal : 1;
+
+ Expr *BaseIvarExp;
+
+ /// Used by struct-path-aware TBAA.
+ QualType TBAABaseType;
+ /// Offset relative to the base type.
+ uint64_t TBAAOffset;
+
+ /// TBAAInfo - TBAA information to attach to dereferences of this LValue.
+ llvm::MDNode *TBAAInfo;
+
+private:
+ void Initialize(QualType Type, Qualifiers Quals,
+ CharUnits Alignment, AlignmentSource AlignSource,
+ llvm::MDNode *TBAAInfo = nullptr) {
+ assert((!Alignment.isZero() || Type->isIncompleteType()) &&
+ "initializing l-value with zero alignment!");
+ this->Type = Type;
+ this->Quals = Quals;
+ this->Alignment = Alignment.getQuantity();
+ assert(this->Alignment == Alignment.getQuantity() &&
+ "Alignment exceeds allowed max!");
+ this->AlignSource = unsigned(AlignSource);
+
+ // Initialize Objective-C flags.
+ this->Ivar = this->ObjIsArray = this->NonGC = this->GlobalObjCRef = false;
+ this->ImpreciseLifetime = false;
+ this->Nontemporal = false;
+ this->ThreadLocalRef = false;
+ this->BaseIvarExp = nullptr;
+
+ // Initialize fields for TBAA.
+ this->TBAABaseType = Type;
+ this->TBAAOffset = 0;
+ this->TBAAInfo = TBAAInfo;
+ }
+
+public:
+ bool isSimple() const { return LVType == Simple; }
+ bool isVectorElt() const { return LVType == VectorElt; }
+ bool isBitField() const { return LVType == BitField; }
+ bool isExtVectorElt() const { return LVType == ExtVectorElt; }
+ bool isGlobalReg() const { return LVType == GlobalReg; }
+
+ bool isVolatileQualified() const { return Quals.hasVolatile(); }
+ bool isRestrictQualified() const { return Quals.hasRestrict(); }
+ unsigned getVRQualifiers() const {
+ return Quals.getCVRQualifiers() & ~Qualifiers::Const;
+ }
+
+ QualType getType() const { return Type; }
+
+ Qualifiers::ObjCLifetime getObjCLifetime() const {
+ return Quals.getObjCLifetime();
+ }
+
+ bool isObjCIvar() const { return Ivar; }
+ void setObjCIvar(bool Value) { Ivar = Value; }
+
+ bool isObjCArray() const { return ObjIsArray; }
+ void setObjCArray(bool Value) { ObjIsArray = Value; }
+
+ bool isNonGC () const { return NonGC; }
+ void setNonGC(bool Value) { NonGC = Value; }
+
+ bool isGlobalObjCRef() const { return GlobalObjCRef; }
+ void setGlobalObjCRef(bool Value) { GlobalObjCRef = Value; }
+
+ bool isThreadLocalRef() const { return ThreadLocalRef; }
+ void setThreadLocalRef(bool Value) { ThreadLocalRef = Value;}
+
+ ARCPreciseLifetime_t isARCPreciseLifetime() const {
+ return ARCPreciseLifetime_t(!ImpreciseLifetime);
+ }
+ void setARCPreciseLifetime(ARCPreciseLifetime_t value) {
+ ImpreciseLifetime = (value == ARCImpreciseLifetime);
+ }
+ bool isNontemporal() const { return Nontemporal; }
+ void setNontemporal(bool Value) { Nontemporal = Value; }
+
+ bool isObjCWeak() const {
+ return Quals.getObjCGCAttr() == Qualifiers::Weak;
+ }
+ bool isObjCStrong() const {
+ return Quals.getObjCGCAttr() == Qualifiers::Strong;
+ }
+
+ bool isVolatile() const {
+ return Quals.hasVolatile();
+ }
+
+ Expr *getBaseIvarExp() const { return BaseIvarExp; }
+ void setBaseIvarExp(Expr *V) { BaseIvarExp = V; }
+
+ QualType getTBAABaseType() const { return TBAABaseType; }
+ void setTBAABaseType(QualType T) { TBAABaseType = T; }
+
+ uint64_t getTBAAOffset() const { return TBAAOffset; }
+ void setTBAAOffset(uint64_t O) { TBAAOffset = O; }
+
+ llvm::MDNode *getTBAAInfo() const { return TBAAInfo; }
+ void setTBAAInfo(llvm::MDNode *N) { TBAAInfo = N; }
+
+ const Qualifiers &getQuals() const { return Quals; }
+ Qualifiers &getQuals() { return Quals; }
+
+ unsigned getAddressSpace() const { return Quals.getAddressSpace(); }
+
+ CharUnits getAlignment() const { return CharUnits::fromQuantity(Alignment); }
+ void setAlignment(CharUnits A) { Alignment = A.getQuantity(); }
+
+ AlignmentSource getAlignmentSource() const {
+ return AlignmentSource(AlignSource);
+ }
+ void setAlignmentSource(AlignmentSource Source) {
+ AlignSource = unsigned(Source);
+ }
+
+ // simple lvalue
+ llvm::Value *getPointer() const {
+ assert(isSimple());
+ return V;
+ }
+ Address getAddress() const { return Address(getPointer(), getAlignment()); }
+ void setAddress(Address address) {
+ assert(isSimple());
+ V = address.getPointer();
+ Alignment = address.getAlignment().getQuantity();
+ }
+
+ // vector elt lvalue
+ Address getVectorAddress() const {
+ return Address(getVectorPointer(), getAlignment());
+ }
+ llvm::Value *getVectorPointer() const { assert(isVectorElt()); return V; }
+ llvm::Value *getVectorIdx() const { assert(isVectorElt()); return VectorIdx; }
+
+ // extended vector elements.
+ Address getExtVectorAddress() const {
+ return Address(getExtVectorPointer(), getAlignment());
+ }
+ llvm::Value *getExtVectorPointer() const {
+ assert(isExtVectorElt());
+ return V;
+ }
+ llvm::Constant *getExtVectorElts() const {
+ assert(isExtVectorElt());
+ return VectorElts;
+ }
+
+ // bitfield lvalue
+ Address getBitFieldAddress() const {
+ return Address(getBitFieldPointer(), getAlignment());
+ }
+ llvm::Value *getBitFieldPointer() const { assert(isBitField()); return V; }
+ const CGBitFieldInfo &getBitFieldInfo() const {
+ assert(isBitField());
+ return *BitFieldInfo;
+ }
+
+ // global register lvalue
+ llvm::Value *getGlobalReg() const { assert(isGlobalReg()); return V; }
+
+ static LValue MakeAddr(Address address, QualType type,
+ ASTContext &Context,
+ AlignmentSource alignSource,
+ llvm::MDNode *TBAAInfo = nullptr) {
+ Qualifiers qs = type.getQualifiers();
+ qs.setObjCGCAttr(Context.getObjCGCAttrKind(type));
+
+ LValue R;
+ R.LVType = Simple;
+ assert(address.getPointer()->getType()->isPointerTy());
+ R.V = address.getPointer();
+ R.Initialize(type, qs, address.getAlignment(), alignSource, TBAAInfo);
+ return R;
+ }
+
+ static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx,
+ QualType type, AlignmentSource alignSource) {
+ LValue R;
+ R.LVType = VectorElt;
+ R.V = vecAddress.getPointer();
+ R.VectorIdx = Idx;
+ R.Initialize(type, type.getQualifiers(), vecAddress.getAlignment(),
+ alignSource);
+ return R;
+ }
+
+ static LValue MakeExtVectorElt(Address vecAddress, llvm::Constant *Elts,
+ QualType type, AlignmentSource alignSource) {
+ LValue R;
+ R.LVType = ExtVectorElt;
+ R.V = vecAddress.getPointer();
+ R.VectorElts = Elts;
+ R.Initialize(type, type.getQualifiers(), vecAddress.getAlignment(),
+ alignSource);
+ return R;
+ }
+
+ /// \brief Create a new object to represent a bit-field access.
+ ///
+ /// \param Addr - The base address of the bit-field sequence this
+ /// bit-field refers to.
+ /// \param Info - The information describing how to perform the bit-field
+ /// access.
+ static LValue MakeBitfield(Address Addr,
+ const CGBitFieldInfo &Info,
+ QualType type,
+ AlignmentSource alignSource) {
+ LValue R;
+ R.LVType = BitField;
+ R.V = Addr.getPointer();
+ R.BitFieldInfo = &Info;
+ R.Initialize(type, type.getQualifiers(), Addr.getAlignment(), alignSource);
+ return R;
+ }
+
+ static LValue MakeGlobalReg(Address Reg, QualType type) {
+ LValue R;
+ R.LVType = GlobalReg;
+ R.V = Reg.getPointer();
+ R.Initialize(type, type.getQualifiers(), Reg.getAlignment(),
+ AlignmentSource::Decl);
+ return R;
+ }
+
+ RValue asAggregateRValue() const {
+ return RValue::getAggregate(getAddress(), isVolatileQualified());
+ }
+};
+
+/// An aggregate value slot.
+class AggValueSlot {
+ /// The address.
+ llvm::Value *Addr;
+
+ // Qualifiers
+ Qualifiers Quals;
+
+ unsigned short Alignment;
+
+ /// DestructedFlag - This is set to true if some external code is
+ /// responsible for setting up a destructor for the slot. Otherwise
+ /// the code which constructs it should push the appropriate cleanup.
+ bool DestructedFlag : 1;
+
+ /// ObjCGCFlag - This is set to true if writing to the memory in the
+ /// slot might require calling an appropriate Objective-C GC
+ /// barrier. The exact interaction here is unnecessarily mysterious.
+ bool ObjCGCFlag : 1;
+
+ /// ZeroedFlag - This is set to true if the memory in the slot is
+ /// known to be zero before the assignment into it. This means that
+ /// zero fields don't need to be set.
+ bool ZeroedFlag : 1;
+
+ /// AliasedFlag - This is set to true if the slot might be aliased
+ /// and it's not undefined behavior to access it through such an
+ /// alias. Note that it's always undefined behavior to access a C++
+ /// object that's under construction through an alias derived from
+ /// outside the construction process.
+ ///
+ /// This flag controls whether calls that produce the aggregate
+ /// value may be evaluated directly into the slot, or whether they
+ /// must be evaluated into an unaliased temporary and then memcpy'ed
+ /// over. Since it's invalid in general to memcpy a non-POD C++
+ /// object, it's important that this flag never be set when
+ /// evaluating an expression which constructs such an object.
+ bool AliasedFlag : 1;
+
+public:
+ enum IsAliased_t { IsNotAliased, IsAliased };
+ enum IsDestructed_t { IsNotDestructed, IsDestructed };
+ enum IsZeroed_t { IsNotZeroed, IsZeroed };
+ enum NeedsGCBarriers_t { DoesNotNeedGCBarriers, NeedsGCBarriers };
+
+ /// ignored - Returns an aggregate value slot indicating that the
+ /// aggregate value is being ignored.
+ static AggValueSlot ignored() {
+ return forAddr(Address::invalid(), Qualifiers(), IsNotDestructed,
+ DoesNotNeedGCBarriers, IsNotAliased);
+ }
+
+ /// forAddr - Make a slot for an aggregate value.
+ ///
+ /// \param quals - The qualifiers that dictate how the slot should
+ /// be initialied. Only 'volatile' and the Objective-C lifetime
+ /// qualifiers matter.
+ ///
+ /// \param isDestructed - true if something else is responsible
+ /// for calling destructors on this object
+ /// \param needsGC - true if the slot is potentially located
+ /// somewhere that ObjC GC calls should be emitted for
+ static AggValueSlot forAddr(Address addr,
+ Qualifiers quals,
+ IsDestructed_t isDestructed,
+ NeedsGCBarriers_t needsGC,
+ IsAliased_t isAliased,
+ IsZeroed_t isZeroed = IsNotZeroed) {
+ AggValueSlot AV;
+ if (addr.isValid()) {
+ AV.Addr = addr.getPointer();
+ AV.Alignment = addr.getAlignment().getQuantity();
+ } else {
+ AV.Addr = nullptr;
+ AV.Alignment = 0;
+ }
+ AV.Quals = quals;
+ AV.DestructedFlag = isDestructed;
+ AV.ObjCGCFlag = needsGC;
+ AV.ZeroedFlag = isZeroed;
+ AV.AliasedFlag = isAliased;
+ return AV;
+ }
+
+ static AggValueSlot forLValue(const LValue &LV,
+ IsDestructed_t isDestructed,
+ NeedsGCBarriers_t needsGC,
+ IsAliased_t isAliased,
+ IsZeroed_t isZeroed = IsNotZeroed) {
+ return forAddr(LV.getAddress(),
+ LV.getQuals(), isDestructed, needsGC, isAliased, isZeroed);
+ }
+
+ IsDestructed_t isExternallyDestructed() const {
+ return IsDestructed_t(DestructedFlag);
+ }
+ void setExternallyDestructed(bool destructed = true) {
+ DestructedFlag = destructed;
+ }
+
+ Qualifiers getQualifiers() const { return Quals; }
+
+ bool isVolatile() const {
+ return Quals.hasVolatile();
+ }
+
+ void setVolatile(bool flag) {
+ Quals.setVolatile(flag);
+ }
+
+ Qualifiers::ObjCLifetime getObjCLifetime() const {
+ return Quals.getObjCLifetime();
+ }
+
+ NeedsGCBarriers_t requiresGCollection() const {
+ return NeedsGCBarriers_t(ObjCGCFlag);
+ }
+
+ llvm::Value *getPointer() const {
+ return Addr;
+ }
+
+ Address getAddress() const {
+ return Address(Addr, getAlignment());
+ }
+
+ bool isIgnored() const {
+ return Addr == nullptr;
+ }
+
+ CharUnits getAlignment() const {
+ return CharUnits::fromQuantity(Alignment);
+ }
+
+ IsAliased_t isPotentiallyAliased() const {
+ return IsAliased_t(AliasedFlag);
+ }
+
+ RValue asRValue() const {
+ if (isIgnored()) {
+ return RValue::getIgnored();
+ } else {
+ return RValue::getAggregate(getAddress(), isVolatile());
+ }
+ }
+
+ void setZeroed(bool V = true) { ZeroedFlag = V; }
+ IsZeroed_t isZeroed() const {
+ return IsZeroed_t(ZeroedFlag);
+ }
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenABITypes.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenABITypes.cpp
new file mode 100644
index 0000000..643c996
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenABITypes.cpp
@@ -0,0 +1,70 @@
+//==--- CodeGenABITypes.cpp - Convert Clang types to LLVM types for ABI ----==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// CodeGenABITypes is a simple interface for getting LLVM types for
+// the parameters and the return value of a function given the Clang
+// types.
+//
+// The class is implemented as a public wrapper around the private
+// CodeGenTypes class in lib/CodeGen.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/CodeGen/CodeGenABITypes.h"
+#include "CodeGenModule.h"
+#include "clang/CodeGen/CGFunctionInfo.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Lex/HeaderSearchOptions.h"
+#include "clang/Lex/PreprocessorOptions.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+CodeGenABITypes::CodeGenABITypes(ASTContext &C, llvm::Module &M,
+ CoverageSourceInfo *CoverageInfo)
+ : CGO(new CodeGenOptions), HSO(new HeaderSearchOptions),
+ PPO(new PreprocessorOptions),
+ CGM(new CodeGen::CodeGenModule(C, *HSO, *PPO, *CGO, M, C.getDiagnostics(),
+ CoverageInfo)) {}
+
+// Explicitly out-of-line because ~CodeGenModule() is private but
+// CodeGenABITypes.h is part of clang's API.
+CodeGenABITypes::~CodeGenABITypes() = default;
+
+const CGFunctionInfo &
+CodeGenABITypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
+ QualType receiverType) {
+ return CGM->getTypes().arrangeObjCMessageSendSignature(MD, receiverType);
+}
+
+const CGFunctionInfo &
+CodeGenABITypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> Ty,
+ const FunctionDecl *FD) {
+ return CGM->getTypes().arrangeFreeFunctionType(Ty, FD);
+}
+
+const CGFunctionInfo &
+CodeGenABITypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> Ty) {
+ return CGM->getTypes().arrangeFreeFunctionType(Ty);
+}
+
+const CGFunctionInfo &
+CodeGenABITypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
+ const FunctionProtoType *FTP,
+ const CXXMethodDecl *MD) {
+ return CGM->getTypes().arrangeCXXMethodType(RD, FTP, MD);
+}
+
+const CGFunctionInfo &CodeGenABITypes::arrangeFreeFunctionCall(
+ CanQualType returnType, ArrayRef<CanQualType> argTypes,
+ FunctionType::ExtInfo info, RequiredArgs args) {
+ return CGM->getTypes().arrangeLLVMFunctionInfo(
+ returnType, /*IsInstanceMethod=*/false, /*IsChainCall=*/false, argTypes,
+ info, args);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp
new file mode 100644
index 0000000..abef543
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp
@@ -0,0 +1,852 @@
+//===--- CodeGenAction.cpp - LLVM Code Generation Frontend Action ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CoverageMappingGen.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclGroup.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/CodeGen/BackendUtil.h"
+#include "clang/CodeGen/CodeGenAction.h"
+#include "clang/CodeGen/ModuleBuilder.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/DiagnosticPrinter.h"
+#include "llvm/IR/FunctionInfo.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IRReader/IRReader.h"
+#include "llvm/Linker/Linker.h"
+#include "llvm/Object/FunctionIndexObjectFile.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/Timer.h"
+#include <memory>
+using namespace clang;
+using namespace llvm;
+
+namespace clang {
+ class BackendConsumer : public ASTConsumer {
+ virtual void anchor();
+ DiagnosticsEngine &Diags;
+ BackendAction Action;
+ const CodeGenOptions &CodeGenOpts;
+ const TargetOptions &TargetOpts;
+ const LangOptions &LangOpts;
+ raw_pwrite_stream *AsmOutStream;
+ ASTContext *Context;
+
+ Timer LLVMIRGeneration;
+
+ std::unique_ptr<CodeGenerator> Gen;
+
+ std::unique_ptr<llvm::Module> TheModule;
+ SmallVector<std::pair<unsigned, std::unique_ptr<llvm::Module>>, 4>
+ LinkModules;
+
+ // This is here so that the diagnostic printer knows the module a diagnostic
+ // refers to.
+ llvm::Module *CurLinkModule = nullptr;
+
+ public:
+ BackendConsumer(
+ BackendAction Action, DiagnosticsEngine &Diags,
+ const HeaderSearchOptions &HeaderSearchOpts,
+ const PreprocessorOptions &PPOpts, const CodeGenOptions &CodeGenOpts,
+ const TargetOptions &TargetOpts, const LangOptions &LangOpts,
+ bool TimePasses, const std::string &InFile,
+ const SmallVectorImpl<std::pair<unsigned, llvm::Module *>> &LinkModules,
+ raw_pwrite_stream *OS, LLVMContext &C,
+ CoverageSourceInfo *CoverageInfo = nullptr)
+ : Diags(Diags), Action(Action), CodeGenOpts(CodeGenOpts),
+ TargetOpts(TargetOpts), LangOpts(LangOpts), AsmOutStream(OS),
+ Context(nullptr), LLVMIRGeneration("LLVM IR Generation Time"),
+ Gen(CreateLLVMCodeGen(Diags, InFile, HeaderSearchOpts, PPOpts,
+ CodeGenOpts, C, CoverageInfo)) {
+ llvm::TimePassesIsEnabled = TimePasses;
+ for (auto &I : LinkModules)
+ this->LinkModules.push_back(
+ std::make_pair(I.first, std::unique_ptr<llvm::Module>(I.second)));
+ }
+ std::unique_ptr<llvm::Module> takeModule() { return std::move(TheModule); }
+ void releaseLinkModules() {
+ for (auto &I : LinkModules)
+ I.second.release();
+ }
+
+ void HandleCXXStaticMemberVarInstantiation(VarDecl *VD) override {
+ Gen->HandleCXXStaticMemberVarInstantiation(VD);
+ }
+
+ void Initialize(ASTContext &Ctx) override {
+ assert(!Context && "initialized multiple times");
+
+ Context = &Ctx;
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.startTimer();
+
+ Gen->Initialize(Ctx);
+
+ TheModule.reset(Gen->GetModule());
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.stopTimer();
+ }
+
+ bool HandleTopLevelDecl(DeclGroupRef D) override {
+ PrettyStackTraceDecl CrashInfo(*D.begin(), SourceLocation(),
+ Context->getSourceManager(),
+ "LLVM IR generation of declaration");
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.startTimer();
+
+ Gen->HandleTopLevelDecl(D);
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.stopTimer();
+
+ return true;
+ }
+
+ void HandleInlineMethodDefinition(CXXMethodDecl *D) override {
+ PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
+ Context->getSourceManager(),
+ "LLVM IR generation of inline method");
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.startTimer();
+
+ Gen->HandleInlineMethodDefinition(D);
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.stopTimer();
+ }
+
+ void HandleTranslationUnit(ASTContext &C) override {
+ {
+ PrettyStackTraceString CrashInfo("Per-file LLVM IR generation");
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.startTimer();
+
+ Gen->HandleTranslationUnit(C);
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.stopTimer();
+ }
+
+ // Silently ignore if we weren't initialized for some reason.
+ if (!TheModule)
+ return;
+
+ // Make sure IR generation is happy with the module. This is released by
+ // the module provider.
+ llvm::Module *M = Gen->ReleaseModule();
+ if (!M) {
+ // The module has been released by IR gen on failures, do not double
+ // free.
+ TheModule.release();
+ return;
+ }
+
+ assert(TheModule.get() == M &&
+ "Unexpected module change during IR generation");
+
+ // Install an inline asm handler so that diagnostics get printed through
+ // our diagnostics hooks.
+ LLVMContext &Ctx = TheModule->getContext();
+ LLVMContext::InlineAsmDiagHandlerTy OldHandler =
+ Ctx.getInlineAsmDiagnosticHandler();
+ void *OldContext = Ctx.getInlineAsmDiagnosticContext();
+ Ctx.setInlineAsmDiagnosticHandler(InlineAsmDiagHandler, this);
+
+ LLVMContext::DiagnosticHandlerTy OldDiagnosticHandler =
+ Ctx.getDiagnosticHandler();
+ void *OldDiagnosticContext = Ctx.getDiagnosticContext();
+ Ctx.setDiagnosticHandler(DiagnosticHandler, this);
+
+ // Link LinkModule into this module if present, preserving its validity.
+ for (auto &I : LinkModules) {
+ unsigned LinkFlags = I.first;
+ CurLinkModule = I.second.get();
+ if (Linker::linkModules(*M, std::move(I.second), LinkFlags))
+ return;
+ }
+
+ EmitBackendOutput(Diags, CodeGenOpts, TargetOpts, LangOpts,
+ C.getTargetInfo().getDataLayoutString(),
+ TheModule.get(), Action, AsmOutStream);
+
+ Ctx.setInlineAsmDiagnosticHandler(OldHandler, OldContext);
+
+ Ctx.setDiagnosticHandler(OldDiagnosticHandler, OldDiagnosticContext);
+ }
+
+ void HandleTagDeclDefinition(TagDecl *D) override {
+ PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
+ Context->getSourceManager(),
+ "LLVM IR generation of declaration");
+ Gen->HandleTagDeclDefinition(D);
+ }
+
+ void HandleTagDeclRequiredDefinition(const TagDecl *D) override {
+ Gen->HandleTagDeclRequiredDefinition(D);
+ }
+
+ void CompleteTentativeDefinition(VarDecl *D) override {
+ Gen->CompleteTentativeDefinition(D);
+ }
+
+ void HandleVTable(CXXRecordDecl *RD) override {
+ Gen->HandleVTable(RD);
+ }
+
+ void HandleLinkerOptionPragma(llvm::StringRef Opts) override {
+ Gen->HandleLinkerOptionPragma(Opts);
+ }
+
+ void HandleDetectMismatch(llvm::StringRef Name,
+ llvm::StringRef Value) override {
+ Gen->HandleDetectMismatch(Name, Value);
+ }
+
+ void HandleDependentLibrary(llvm::StringRef Opts) override {
+ Gen->HandleDependentLibrary(Opts);
+ }
+
+ static void InlineAsmDiagHandler(const llvm::SMDiagnostic &SM,void *Context,
+ unsigned LocCookie) {
+ SourceLocation Loc = SourceLocation::getFromRawEncoding(LocCookie);
+ ((BackendConsumer*)Context)->InlineAsmDiagHandler2(SM, Loc);
+ }
+
+ static void DiagnosticHandler(const llvm::DiagnosticInfo &DI,
+ void *Context) {
+ ((BackendConsumer *)Context)->DiagnosticHandlerImpl(DI);
+ }
+
+ void InlineAsmDiagHandler2(const llvm::SMDiagnostic &,
+ SourceLocation LocCookie);
+
+ void DiagnosticHandlerImpl(const llvm::DiagnosticInfo &DI);
+ /// \brief Specialized handler for InlineAsm diagnostic.
+ /// \return True if the diagnostic has been successfully reported, false
+ /// otherwise.
+ bool InlineAsmDiagHandler(const llvm::DiagnosticInfoInlineAsm &D);
+ /// \brief Specialized handler for StackSize diagnostic.
+ /// \return True if the diagnostic has been successfully reported, false
+ /// otherwise.
+ bool StackSizeDiagHandler(const llvm::DiagnosticInfoStackSize &D);
+ /// \brief Specialized handlers for optimization remarks.
+ /// Note that these handlers only accept remarks and they always handle
+ /// them.
+ void EmitOptimizationMessage(const llvm::DiagnosticInfoOptimizationBase &D,
+ unsigned DiagID);
+ void
+ OptimizationRemarkHandler(const llvm::DiagnosticInfoOptimizationRemark &D);
+ void OptimizationRemarkHandler(
+ const llvm::DiagnosticInfoOptimizationRemarkMissed &D);
+ void OptimizationRemarkHandler(
+ const llvm::DiagnosticInfoOptimizationRemarkAnalysis &D);
+ void OptimizationRemarkHandler(
+ const llvm::DiagnosticInfoOptimizationRemarkAnalysisFPCommute &D);
+ void OptimizationRemarkHandler(
+ const llvm::DiagnosticInfoOptimizationRemarkAnalysisAliasing &D);
+ void OptimizationFailureHandler(
+ const llvm::DiagnosticInfoOptimizationFailure &D);
+ };
+
+ void BackendConsumer::anchor() {}
+}
+
+/// ConvertBackendLocation - Convert a location in a temporary llvm::SourceMgr
+/// buffer to be a valid FullSourceLoc.
+static FullSourceLoc ConvertBackendLocation(const llvm::SMDiagnostic &D,
+ SourceManager &CSM) {
+ // Get both the clang and llvm source managers. The location is relative to
+ // a memory buffer that the LLVM Source Manager is handling, we need to add
+ // a copy to the Clang source manager.
+ const llvm::SourceMgr &LSM = *D.getSourceMgr();
+
+ // We need to copy the underlying LLVM memory buffer because llvm::SourceMgr
+ // already owns its one and clang::SourceManager wants to own its one.
+ const MemoryBuffer *LBuf =
+ LSM.getMemoryBuffer(LSM.FindBufferContainingLoc(D.getLoc()));
+
+ // Create the copy and transfer ownership to clang::SourceManager.
+ // TODO: Avoid copying files into memory.
+ std::unique_ptr<llvm::MemoryBuffer> CBuf =
+ llvm::MemoryBuffer::getMemBufferCopy(LBuf->getBuffer(),
+ LBuf->getBufferIdentifier());
+ // FIXME: Keep a file ID map instead of creating new IDs for each location.
+ FileID FID = CSM.createFileID(std::move(CBuf));
+
+ // Translate the offset into the file.
+ unsigned Offset = D.getLoc().getPointer() - LBuf->getBufferStart();
+ SourceLocation NewLoc =
+ CSM.getLocForStartOfFile(FID).getLocWithOffset(Offset);
+ return FullSourceLoc(NewLoc, CSM);
+}
+
+
+/// InlineAsmDiagHandler2 - This function is invoked when the backend hits an
+/// error parsing inline asm. The SMDiagnostic indicates the error relative to
+/// the temporary memory buffer that the inline asm parser has set up.
+void BackendConsumer::InlineAsmDiagHandler2(const llvm::SMDiagnostic &D,
+ SourceLocation LocCookie) {
+ // There are a couple of different kinds of errors we could get here. First,
+ // we re-format the SMDiagnostic in terms of a clang diagnostic.
+
+ // Strip "error: " off the start of the message string.
+ StringRef Message = D.getMessage();
+ if (Message.startswith("error: "))
+ Message = Message.substr(7);
+
+ // If the SMDiagnostic has an inline asm source location, translate it.
+ FullSourceLoc Loc;
+ if (D.getLoc() != SMLoc())
+ Loc = ConvertBackendLocation(D, Context->getSourceManager());
+
+ unsigned DiagID;
+ switch (D.getKind()) {
+ case llvm::SourceMgr::DK_Error:
+ DiagID = diag::err_fe_inline_asm;
+ break;
+ case llvm::SourceMgr::DK_Warning:
+ DiagID = diag::warn_fe_inline_asm;
+ break;
+ case llvm::SourceMgr::DK_Note:
+ DiagID = diag::note_fe_inline_asm;
+ break;
+ }
+ // If this problem has clang-level source location information, report the
+ // issue in the source with a note showing the instantiated
+ // code.
+ if (LocCookie.isValid()) {
+ Diags.Report(LocCookie, DiagID).AddString(Message);
+
+ if (D.getLoc().isValid()) {
+ DiagnosticBuilder B = Diags.Report(Loc, diag::note_fe_inline_asm_here);
+ // Convert the SMDiagnostic ranges into SourceRange and attach them
+ // to the diagnostic.
+ for (const std::pair<unsigned, unsigned> &Range : D.getRanges()) {
+ unsigned Column = D.getColumnNo();
+ B << SourceRange(Loc.getLocWithOffset(Range.first - Column),
+ Loc.getLocWithOffset(Range.second - Column));
+ }
+ }
+ return;
+ }
+
+ // Otherwise, report the backend issue as occurring in the generated .s file.
+ // If Loc is invalid, we still need to report the issue, it just gets no
+ // location info.
+ Diags.Report(Loc, DiagID).AddString(Message);
+}
+
+#define ComputeDiagID(Severity, GroupName, DiagID) \
+ do { \
+ switch (Severity) { \
+ case llvm::DS_Error: \
+ DiagID = diag::err_fe_##GroupName; \
+ break; \
+ case llvm::DS_Warning: \
+ DiagID = diag::warn_fe_##GroupName; \
+ break; \
+ case llvm::DS_Remark: \
+ llvm_unreachable("'remark' severity not expected"); \
+ break; \
+ case llvm::DS_Note: \
+ DiagID = diag::note_fe_##GroupName; \
+ break; \
+ } \
+ } while (false)
+
+#define ComputeDiagRemarkID(Severity, GroupName, DiagID) \
+ do { \
+ switch (Severity) { \
+ case llvm::DS_Error: \
+ DiagID = diag::err_fe_##GroupName; \
+ break; \
+ case llvm::DS_Warning: \
+ DiagID = diag::warn_fe_##GroupName; \
+ break; \
+ case llvm::DS_Remark: \
+ DiagID = diag::remark_fe_##GroupName; \
+ break; \
+ case llvm::DS_Note: \
+ DiagID = diag::note_fe_##GroupName; \
+ break; \
+ } \
+ } while (false)
+
+bool
+BackendConsumer::InlineAsmDiagHandler(const llvm::DiagnosticInfoInlineAsm &D) {
+ unsigned DiagID;
+ ComputeDiagID(D.getSeverity(), inline_asm, DiagID);
+ std::string Message = D.getMsgStr().str();
+
+ // If this problem has clang-level source location information, report the
+ // issue as being a problem in the source with a note showing the instantiated
+ // code.
+ SourceLocation LocCookie =
+ SourceLocation::getFromRawEncoding(D.getLocCookie());
+ if (LocCookie.isValid())
+ Diags.Report(LocCookie, DiagID).AddString(Message);
+ else {
+ // Otherwise, report the backend diagnostic as occurring in the generated
+ // .s file.
+ // If Loc is invalid, we still need to report the diagnostic, it just gets
+ // no location info.
+ FullSourceLoc Loc;
+ Diags.Report(Loc, DiagID).AddString(Message);
+ }
+ // We handled all the possible severities.
+ return true;
+}
+
+bool
+BackendConsumer::StackSizeDiagHandler(const llvm::DiagnosticInfoStackSize &D) {
+ if (D.getSeverity() != llvm::DS_Warning)
+ // For now, the only support we have for StackSize diagnostic is warning.
+ // We do not know how to format other severities.
+ return false;
+
+ if (const Decl *ND = Gen->GetDeclForMangledName(D.getFunction().getName())) {
+ Diags.Report(ND->getASTContext().getFullLoc(ND->getLocation()),
+ diag::warn_fe_frame_larger_than)
+ << D.getStackSize() << Decl::castToDeclContext(ND);
+ return true;
+ }
+
+ return false;
+}
+
+void BackendConsumer::EmitOptimizationMessage(
+ const llvm::DiagnosticInfoOptimizationBase &D, unsigned DiagID) {
+ // We only support warnings and remarks.
+ assert(D.getSeverity() == llvm::DS_Remark ||
+ D.getSeverity() == llvm::DS_Warning);
+
+ SourceManager &SourceMgr = Context->getSourceManager();
+ FileManager &FileMgr = SourceMgr.getFileManager();
+ StringRef Filename;
+ unsigned Line, Column;
+ SourceLocation DILoc;
+
+ if (D.isLocationAvailable()) {
+ D.getLocation(&Filename, &Line, &Column);
+ const FileEntry *FE = FileMgr.getFile(Filename);
+ if (FE && Line > 0) {
+ // If -gcolumn-info was not used, Column will be 0. This upsets the
+ // source manager, so pass 1 if Column is not set.
+ DILoc = SourceMgr.translateFileLineCol(FE, Line, Column ? Column : 1);
+ }
+ }
+
+ // If a location isn't available, try to approximate it using the associated
+ // function definition. We use the definition's right brace to differentiate
+ // from diagnostics that genuinely relate to the function itself.
+ FullSourceLoc Loc(DILoc, SourceMgr);
+ if (Loc.isInvalid())
+ if (const Decl *FD = Gen->GetDeclForMangledName(D.getFunction().getName()))
+ Loc = FD->getASTContext().getFullLoc(FD->getBodyRBrace());
+
+ Diags.Report(Loc, DiagID)
+ << AddFlagValue(D.getPassName() ? D.getPassName() : "")
+ << D.getMsg().str();
+
+ if (DILoc.isInvalid() && D.isLocationAvailable())
+ // If we were not able to translate the file:line:col information
+ // back to a SourceLocation, at least emit a note stating that
+ // we could not translate this location. This can happen in the
+ // case of #line directives.
+ Diags.Report(Loc, diag::note_fe_backend_optimization_remark_invalid_loc)
+ << Filename << Line << Column;
+}
+
+void BackendConsumer::OptimizationRemarkHandler(
+ const llvm::DiagnosticInfoOptimizationRemark &D) {
+ // Optimization remarks are active only if the -Rpass flag has a regular
+ // expression that matches the name of the pass name in \p D.
+ if (CodeGenOpts.OptimizationRemarkPattern &&
+ CodeGenOpts.OptimizationRemarkPattern->match(D.getPassName()))
+ EmitOptimizationMessage(D, diag::remark_fe_backend_optimization_remark);
+}
+
+void BackendConsumer::OptimizationRemarkHandler(
+ const llvm::DiagnosticInfoOptimizationRemarkMissed &D) {
+ // Missed optimization remarks are active only if the -Rpass-missed
+ // flag has a regular expression that matches the name of the pass
+ // name in \p D.
+ if (CodeGenOpts.OptimizationRemarkMissedPattern &&
+ CodeGenOpts.OptimizationRemarkMissedPattern->match(D.getPassName()))
+ EmitOptimizationMessage(D,
+ diag::remark_fe_backend_optimization_remark_missed);
+}
+
+void BackendConsumer::OptimizationRemarkHandler(
+ const llvm::DiagnosticInfoOptimizationRemarkAnalysis &D) {
+ // Optimization analysis remarks are active if the pass name is set to
+ // llvm::DiagnosticInfo::AlwasyPrint or if the -Rpass-analysis flag has a
+ // regular expression that matches the name of the pass name in \p D.
+
+ if (D.getPassName() == llvm::DiagnosticInfo::AlwaysPrint ||
+ (CodeGenOpts.OptimizationRemarkAnalysisPattern &&
+ CodeGenOpts.OptimizationRemarkAnalysisPattern->match(D.getPassName())))
+ EmitOptimizationMessage(
+ D, diag::remark_fe_backend_optimization_remark_analysis);
+}
+
+void BackendConsumer::OptimizationRemarkHandler(
+ const llvm::DiagnosticInfoOptimizationRemarkAnalysisFPCommute &D) {
+ // Optimization analysis remarks are active if the pass name is set to
+ // llvm::DiagnosticInfo::AlwasyPrint or if the -Rpass-analysis flag has a
+ // regular expression that matches the name of the pass name in \p D.
+
+ if (D.getPassName() == llvm::DiagnosticInfo::AlwaysPrint ||
+ (CodeGenOpts.OptimizationRemarkAnalysisPattern &&
+ CodeGenOpts.OptimizationRemarkAnalysisPattern->match(D.getPassName())))
+ EmitOptimizationMessage(
+ D, diag::remark_fe_backend_optimization_remark_analysis_fpcommute);
+}
+
+void BackendConsumer::OptimizationRemarkHandler(
+ const llvm::DiagnosticInfoOptimizationRemarkAnalysisAliasing &D) {
+ // Optimization analysis remarks are active if the pass name is set to
+ // llvm::DiagnosticInfo::AlwasyPrint or if the -Rpass-analysis flag has a
+ // regular expression that matches the name of the pass name in \p D.
+
+ if (D.getPassName() == llvm::DiagnosticInfo::AlwaysPrint ||
+ (CodeGenOpts.OptimizationRemarkAnalysisPattern &&
+ CodeGenOpts.OptimizationRemarkAnalysisPattern->match(D.getPassName())))
+ EmitOptimizationMessage(
+ D, diag::remark_fe_backend_optimization_remark_analysis_aliasing);
+}
+
+void BackendConsumer::OptimizationFailureHandler(
+ const llvm::DiagnosticInfoOptimizationFailure &D) {
+ EmitOptimizationMessage(D, diag::warn_fe_backend_optimization_failure);
+}
+
+/// \brief This function is invoked when the backend needs
+/// to report something to the user.
+void BackendConsumer::DiagnosticHandlerImpl(const DiagnosticInfo &DI) {
+ unsigned DiagID = diag::err_fe_inline_asm;
+ llvm::DiagnosticSeverity Severity = DI.getSeverity();
+ // Get the diagnostic ID based.
+ switch (DI.getKind()) {
+ case llvm::DK_InlineAsm:
+ if (InlineAsmDiagHandler(cast<DiagnosticInfoInlineAsm>(DI)))
+ return;
+ ComputeDiagID(Severity, inline_asm, DiagID);
+ break;
+ case llvm::DK_StackSize:
+ if (StackSizeDiagHandler(cast<DiagnosticInfoStackSize>(DI)))
+ return;
+ ComputeDiagID(Severity, backend_frame_larger_than, DiagID);
+ break;
+ case DK_Linker:
+ assert(CurLinkModule);
+ // FIXME: stop eating the warnings and notes.
+ if (Severity != DS_Error)
+ return;
+ DiagID = diag::err_fe_cannot_link_module;
+ break;
+ case llvm::DK_OptimizationRemark:
+ // Optimization remarks are always handled completely by this
+ // handler. There is no generic way of emitting them.
+ OptimizationRemarkHandler(cast<DiagnosticInfoOptimizationRemark>(DI));
+ return;
+ case llvm::DK_OptimizationRemarkMissed:
+ // Optimization remarks are always handled completely by this
+ // handler. There is no generic way of emitting them.
+ OptimizationRemarkHandler(cast<DiagnosticInfoOptimizationRemarkMissed>(DI));
+ return;
+ case llvm::DK_OptimizationRemarkAnalysis:
+ // Optimization remarks are always handled completely by this
+ // handler. There is no generic way of emitting them.
+ OptimizationRemarkHandler(
+ cast<DiagnosticInfoOptimizationRemarkAnalysis>(DI));
+ return;
+ case llvm::DK_OptimizationRemarkAnalysisFPCommute:
+ // Optimization remarks are always handled completely by this
+ // handler. There is no generic way of emitting them.
+ OptimizationRemarkHandler(
+ cast<DiagnosticInfoOptimizationRemarkAnalysisFPCommute>(DI));
+ return;
+ case llvm::DK_OptimizationRemarkAnalysisAliasing:
+ // Optimization remarks are always handled completely by this
+ // handler. There is no generic way of emitting them.
+ OptimizationRemarkHandler(
+ cast<DiagnosticInfoOptimizationRemarkAnalysisAliasing>(DI));
+ return;
+ case llvm::DK_OptimizationFailure:
+ // Optimization failures are always handled completely by this
+ // handler.
+ OptimizationFailureHandler(cast<DiagnosticInfoOptimizationFailure>(DI));
+ return;
+ default:
+ // Plugin IDs are not bound to any value as they are set dynamically.
+ ComputeDiagRemarkID(Severity, backend_plugin, DiagID);
+ break;
+ }
+ std::string MsgStorage;
+ {
+ raw_string_ostream Stream(MsgStorage);
+ DiagnosticPrinterRawOStream DP(Stream);
+ DI.print(DP);
+ }
+
+ if (DiagID == diag::err_fe_cannot_link_module) {
+ Diags.Report(diag::err_fe_cannot_link_module)
+ << CurLinkModule->getModuleIdentifier() << MsgStorage;
+ return;
+ }
+
+ // Report the backend message using the usual diagnostic mechanism.
+ FullSourceLoc Loc;
+ Diags.Report(Loc, DiagID).AddString(MsgStorage);
+}
+#undef ComputeDiagID
+
+CodeGenAction::CodeGenAction(unsigned _Act, LLVMContext *_VMContext)
+ : Act(_Act), VMContext(_VMContext ? _VMContext : new LLVMContext),
+ OwnsVMContext(!_VMContext) {}
+
+CodeGenAction::~CodeGenAction() {
+ TheModule.reset();
+ if (OwnsVMContext)
+ delete VMContext;
+}
+
+bool CodeGenAction::hasIRSupport() const { return true; }
+
+void CodeGenAction::EndSourceFileAction() {
+ // If the consumer creation failed, do nothing.
+ if (!getCompilerInstance().hasASTConsumer())
+ return;
+
+ // Take back ownership of link modules we passed to consumer.
+ if (!LinkModules.empty())
+ BEConsumer->releaseLinkModules();
+
+ // Steal the module from the consumer.
+ TheModule = BEConsumer->takeModule();
+}
+
+std::unique_ptr<llvm::Module> CodeGenAction::takeModule() {
+ return std::move(TheModule);
+}
+
+llvm::LLVMContext *CodeGenAction::takeLLVMContext() {
+ OwnsVMContext = false;
+ return VMContext;
+}
+
+static raw_pwrite_stream *
+GetOutputStream(CompilerInstance &CI, StringRef InFile, BackendAction Action) {
+ switch (Action) {
+ case Backend_EmitAssembly:
+ return CI.createDefaultOutputFile(false, InFile, "s");
+ case Backend_EmitLL:
+ return CI.createDefaultOutputFile(false, InFile, "ll");
+ case Backend_EmitBC:
+ return CI.createDefaultOutputFile(true, InFile, "bc");
+ case Backend_EmitNothing:
+ return nullptr;
+ case Backend_EmitMCNull:
+ return CI.createNullOutputFile();
+ case Backend_EmitObj:
+ return CI.createDefaultOutputFile(true, InFile, "o");
+ }
+
+ llvm_unreachable("Invalid action!");
+}
+
+std::unique_ptr<ASTConsumer>
+CodeGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
+ BackendAction BA = static_cast<BackendAction>(Act);
+ raw_pwrite_stream *OS = GetOutputStream(CI, InFile, BA);
+ if (BA != Backend_EmitNothing && !OS)
+ return nullptr;
+
+ // Load bitcode modules to link with, if we need to.
+ if (LinkModules.empty())
+ for (auto &I : CI.getCodeGenOpts().LinkBitcodeFiles) {
+ const std::string &LinkBCFile = I.second;
+
+ auto BCBuf = CI.getFileManager().getBufferForFile(LinkBCFile);
+ if (!BCBuf) {
+ CI.getDiagnostics().Report(diag::err_cannot_open_file)
+ << LinkBCFile << BCBuf.getError().message();
+ LinkModules.clear();
+ return nullptr;
+ }
+
+ ErrorOr<std::unique_ptr<llvm::Module>> ModuleOrErr =
+ getLazyBitcodeModule(std::move(*BCBuf), *VMContext);
+ if (std::error_code EC = ModuleOrErr.getError()) {
+ CI.getDiagnostics().Report(diag::err_cannot_open_file) << LinkBCFile
+ << EC.message();
+ LinkModules.clear();
+ return nullptr;
+ }
+ addLinkModule(ModuleOrErr.get().release(), I.first);
+ }
+
+ CoverageSourceInfo *CoverageInfo = nullptr;
+ // Add the preprocessor callback only when the coverage mapping is generated.
+ if (CI.getCodeGenOpts().CoverageMapping) {
+ CoverageInfo = new CoverageSourceInfo;
+ CI.getPreprocessor().addPPCallbacks(
+ std::unique_ptr<PPCallbacks>(CoverageInfo));
+ }
+
+ std::unique_ptr<BackendConsumer> Result(new BackendConsumer(
+ BA, CI.getDiagnostics(), CI.getHeaderSearchOpts(),
+ CI.getPreprocessorOpts(), CI.getCodeGenOpts(), CI.getTargetOpts(),
+ CI.getLangOpts(), CI.getFrontendOpts().ShowTimers, InFile, LinkModules,
+ OS, *VMContext, CoverageInfo));
+ BEConsumer = Result.get();
+ return std::move(Result);
+}
+
+static void BitcodeInlineAsmDiagHandler(const llvm::SMDiagnostic &SM,
+ void *Context,
+ unsigned LocCookie) {
+ SM.print(nullptr, llvm::errs());
+}
+
+void CodeGenAction::ExecuteAction() {
+ // If this is an IR file, we have to treat it specially.
+ if (getCurrentFileKind() == IK_LLVM_IR) {
+ BackendAction BA = static_cast<BackendAction>(Act);
+ CompilerInstance &CI = getCompilerInstance();
+ raw_pwrite_stream *OS = GetOutputStream(CI, getCurrentFile(), BA);
+ if (BA != Backend_EmitNothing && !OS)
+ return;
+
+ bool Invalid;
+ SourceManager &SM = CI.getSourceManager();
+ FileID FID = SM.getMainFileID();
+ llvm::MemoryBuffer *MainFile = SM.getBuffer(FID, &Invalid);
+ if (Invalid)
+ return;
+
+ llvm::SMDiagnostic Err;
+ TheModule = parseIR(MainFile->getMemBufferRef(), Err, *VMContext);
+ if (!TheModule) {
+ // Translate from the diagnostic info to the SourceManager location if
+ // available.
+ // TODO: Unify this with ConvertBackendLocation()
+ SourceLocation Loc;
+ if (Err.getLineNo() > 0) {
+ assert(Err.getColumnNo() >= 0);
+ Loc = SM.translateFileLineCol(SM.getFileEntryForID(FID),
+ Err.getLineNo(), Err.getColumnNo() + 1);
+ }
+
+ // Strip off a leading diagnostic code if there is one.
+ StringRef Msg = Err.getMessage();
+ if (Msg.startswith("error: "))
+ Msg = Msg.substr(7);
+
+ unsigned DiagID =
+ CI.getDiagnostics().getCustomDiagID(DiagnosticsEngine::Error, "%0");
+
+ CI.getDiagnostics().Report(Loc, DiagID) << Msg;
+ return;
+ }
+ const TargetOptions &TargetOpts = CI.getTargetOpts();
+ if (TheModule->getTargetTriple() != TargetOpts.Triple) {
+ CI.getDiagnostics().Report(SourceLocation(),
+ diag::warn_fe_override_module)
+ << TargetOpts.Triple;
+ TheModule->setTargetTriple(TargetOpts.Triple);
+ }
+
+ auto DiagHandler = [&](const DiagnosticInfo &DI) {
+ TheModule->getContext().diagnose(DI);
+ };
+
+ // If we are performing ThinLTO importing compilation (indicated by
+ // a non-empty index file option), then we need promote to global scope
+ // and rename any local values that are potentially exported to other
+ // modules. Do this early so that the rest of the compilation sees the
+ // promoted symbols.
+ std::unique_ptr<FunctionInfoIndex> Index;
+ if (!CI.getCodeGenOpts().ThinLTOIndexFile.empty()) {
+ ErrorOr<std::unique_ptr<FunctionInfoIndex>> IndexOrErr =
+ llvm::getFunctionIndexForFile(CI.getCodeGenOpts().ThinLTOIndexFile,
+ DiagHandler);
+ if (std::error_code EC = IndexOrErr.getError()) {
+ std::string Error = EC.message();
+ errs() << "Error loading index file '"
+ << CI.getCodeGenOpts().ThinLTOIndexFile << "': " << Error
+ << "\n";
+ return;
+ }
+ Index = std::move(IndexOrErr.get());
+ assert(Index);
+ // Currently this requires creating a new Module object.
+ std::unique_ptr<llvm::Module> RenamedModule =
+ renameModuleForThinLTO(std::move(TheModule), Index.get());
+ if (!RenamedModule)
+ return;
+
+ TheModule = std::move(RenamedModule);
+ }
+
+ LLVMContext &Ctx = TheModule->getContext();
+ Ctx.setInlineAsmDiagnosticHandler(BitcodeInlineAsmDiagHandler);
+ EmitBackendOutput(CI.getDiagnostics(), CI.getCodeGenOpts(), TargetOpts,
+ CI.getLangOpts(), CI.getTarget().getDataLayoutString(),
+ TheModule.get(), BA, OS, std::move(Index));
+ return;
+ }
+
+ // Otherwise follow the normal AST path.
+ this->ASTFrontendAction::ExecuteAction();
+}
+
+//
+
+void EmitAssemblyAction::anchor() { }
+EmitAssemblyAction::EmitAssemblyAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitAssembly, _VMContext) {}
+
+void EmitBCAction::anchor() { }
+EmitBCAction::EmitBCAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitBC, _VMContext) {}
+
+void EmitLLVMAction::anchor() { }
+EmitLLVMAction::EmitLLVMAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitLL, _VMContext) {}
+
+void EmitLLVMOnlyAction::anchor() { }
+EmitLLVMOnlyAction::EmitLLVMOnlyAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitNothing, _VMContext) {}
+
+void EmitCodeGenOnlyAction::anchor() { }
+EmitCodeGenOnlyAction::EmitCodeGenOnlyAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitMCNull, _VMContext) {}
+
+void EmitObjAction::anchor() { }
+EmitObjAction::EmitObjAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitObj, _VMContext) {}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp
new file mode 100644
index 0000000..048a043
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -0,0 +1,1939 @@
+//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This coordinates the per-function state used while generating code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CGBlocks.h"
+#include "CGCleanup.h"
+#include "CGCUDARuntime.h"
+#include "CGCXXABI.h"
+#include "CGDebugInfo.h"
+#include "CGOpenMPRuntime.h"
+#include "CodeGenModule.h"
+#include "CodeGenPGO.h"
+#include "TargetInfo.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/CodeGen/CGFunctionInfo.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/Operator.h"
+using namespace clang;
+using namespace CodeGen;
+
+CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
+ : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
+ Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
+ CGBuilderInserterTy(this)),
+ CurFn(nullptr), ReturnValue(Address::invalid()),
+ CapturedStmtInfo(nullptr),
+ SanOpts(CGM.getLangOpts().Sanitize), IsSanitizerScope(false),
+ CurFuncIsThunk(false), AutoreleaseResult(false), SawAsmBlock(false),
+ IsOutlinedSEHHelper(false),
+ BlockInfo(nullptr), BlockPointer(nullptr),
+ LambdaThisCaptureField(nullptr), NormalCleanupDest(nullptr),
+ NextCleanupDestIndex(1), FirstBlockInfo(nullptr), EHResumeBlock(nullptr),
+ ExceptionSlot(nullptr), EHSelectorSlot(nullptr),
+ DebugInfo(CGM.getModuleDebugInfo()),
+ DisableDebugInfo(false), DidCallStackSave(false), IndirectBranch(nullptr),
+ PGO(cgm), SwitchInsn(nullptr), SwitchWeights(nullptr),
+ CaseRangeBlock(nullptr), UnreachableBlock(nullptr), NumReturnExprs(0),
+ NumSimpleReturnExprs(0), CXXABIThisDecl(nullptr),
+ CXXABIThisValue(nullptr), CXXThisValue(nullptr),
+ CXXStructorImplicitParamDecl(nullptr),
+ CXXStructorImplicitParamValue(nullptr), OutermostConditional(nullptr),
+ CurLexicalScope(nullptr), TerminateLandingPad(nullptr),
+ TerminateHandler(nullptr), TrapBB(nullptr) {
+ if (!suppressNewContext)
+ CGM.getCXXABI().getMangleContext().startNewFunction();
+
+ llvm::FastMathFlags FMF;
+ if (CGM.getLangOpts().FastMath)
+ FMF.setUnsafeAlgebra();
+ if (CGM.getLangOpts().FiniteMathOnly) {
+ FMF.setNoNaNs();
+ FMF.setNoInfs();
+ }
+ if (CGM.getCodeGenOpts().NoNaNsFPMath) {
+ FMF.setNoNaNs();
+ }
+ if (CGM.getCodeGenOpts().NoSignedZeros) {
+ FMF.setNoSignedZeros();
+ }
+ if (CGM.getCodeGenOpts().ReciprocalMath) {
+ FMF.setAllowReciprocal();
+ }
+ Builder.SetFastMathFlags(FMF);
+}
+
+CodeGenFunction::~CodeGenFunction() {
+ assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
+
+ // If there are any unclaimed block infos, go ahead and destroy them
+ // now. This can happen if IR-gen gets clever and skips evaluating
+ // something.
+ if (FirstBlockInfo)
+ destroyBlockInfos(FirstBlockInfo);
+
+ if (getLangOpts().OpenMP) {
+ CGM.getOpenMPRuntime().functionFinished(*this);
+ }
+}
+
+CharUnits CodeGenFunction::getNaturalPointeeTypeAlignment(QualType T,
+ AlignmentSource *Source) {
+ return getNaturalTypeAlignment(T->getPointeeType(), Source,
+ /*forPointee*/ true);
+}
+
+CharUnits CodeGenFunction::getNaturalTypeAlignment(QualType T,
+ AlignmentSource *Source,
+ bool forPointeeType) {
+ // Honor alignment typedef attributes even on incomplete types.
+ // We also honor them straight for C++ class types, even as pointees;
+ // there's an expressivity gap here.
+ if (auto TT = T->getAs<TypedefType>()) {
+ if (auto Align = TT->getDecl()->getMaxAlignment()) {
+ if (Source) *Source = AlignmentSource::AttributedType;
+ return getContext().toCharUnitsFromBits(Align);
+ }
+ }
+
+ if (Source) *Source = AlignmentSource::Type;
+
+ CharUnits Alignment;
+ if (T->isIncompleteType()) {
+ Alignment = CharUnits::One(); // Shouldn't be used, but pessimistic is best.
+ } else {
+ // For C++ class pointees, we don't know whether we're pointing at a
+ // base or a complete object, so we generally need to use the
+ // non-virtual alignment.
+ const CXXRecordDecl *RD;
+ if (forPointeeType && (RD = T->getAsCXXRecordDecl())) {
+ Alignment = CGM.getClassPointerAlignment(RD);
+ } else {
+ Alignment = getContext().getTypeAlignInChars(T);
+ }
+
+ // Cap to the global maximum type alignment unless the alignment
+ // was somehow explicit on the type.
+ if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) {
+ if (Alignment.getQuantity() > MaxAlign &&
+ !getContext().isAlignmentRequired(T))
+ Alignment = CharUnits::fromQuantity(MaxAlign);
+ }
+ }
+ return Alignment;
+}
+
+LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
+ AlignmentSource AlignSource;
+ CharUnits Alignment = getNaturalTypeAlignment(T, &AlignSource);
+ return LValue::MakeAddr(Address(V, Alignment), T, getContext(), AlignSource,
+ CGM.getTBAAInfo(T));
+}
+
+/// Given a value of type T* that may not be to a complete object,
+/// construct an l-value with the natural pointee alignment of T.
+LValue
+CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
+ AlignmentSource AlignSource;
+ CharUnits Align = getNaturalTypeAlignment(T, &AlignSource, /*pointee*/ true);
+ return MakeAddrLValue(Address(V, Align), T, AlignSource);
+}
+
+
+llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
+ return CGM.getTypes().ConvertTypeForMem(T);
+}
+
+llvm::Type *CodeGenFunction::ConvertType(QualType T) {
+ return CGM.getTypes().ConvertType(T);
+}
+
+TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
+ type = type.getCanonicalType();
+ while (true) {
+ switch (type->getTypeClass()) {
+#define TYPE(name, parent)
+#define ABSTRACT_TYPE(name, parent)
+#define NON_CANONICAL_TYPE(name, parent) case Type::name:
+#define DEPENDENT_TYPE(name, parent) case Type::name:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("non-canonical or dependent type in IR-generation");
+
+ case Type::Auto:
+ llvm_unreachable("undeduced auto type in IR-generation");
+
+ // Various scalar types.
+ case Type::Builtin:
+ case Type::Pointer:
+ case Type::BlockPointer:
+ case Type::LValueReference:
+ case Type::RValueReference:
+ case Type::MemberPointer:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ case Type::Enum:
+ case Type::ObjCObjectPointer:
+ return TEK_Scalar;
+
+ // Complexes.
+ case Type::Complex:
+ return TEK_Complex;
+
+ // Arrays, records, and Objective-C objects.
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ case Type::Record:
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ return TEK_Aggregate;
+
+ // We operate on atomic values according to their underlying type.
+ case Type::Atomic:
+ type = cast<AtomicType>(type)->getValueType();
+ continue;
+ }
+ llvm_unreachable("unknown type kind!");
+ }
+}
+
+llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
+ // For cleanliness, we try to avoid emitting the return block for
+ // simple cases.
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+ if (CurBB) {
+ assert(!CurBB->getTerminator() && "Unexpected terminated block.");
+
+ // We have a valid insert point, reuse it if it is empty or there are no
+ // explicit jumps to the return block.
+ if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
+ ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
+ delete ReturnBlock.getBlock();
+ } else
+ EmitBlock(ReturnBlock.getBlock());
+ return llvm::DebugLoc();
+ }
+
+ // Otherwise, if the return block is the target of a single direct
+ // branch then we can just put the code in that block instead. This
+ // cleans up functions which started with a unified return block.
+ if (ReturnBlock.getBlock()->hasOneUse()) {
+ llvm::BranchInst *BI =
+ dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
+ if (BI && BI->isUnconditional() &&
+ BI->getSuccessor(0) == ReturnBlock.getBlock()) {
+ // Record/return the DebugLoc of the simple 'return' expression to be used
+ // later by the actual 'ret' instruction.
+ llvm::DebugLoc Loc = BI->getDebugLoc();
+ Builder.SetInsertPoint(BI->getParent());
+ BI->eraseFromParent();
+ delete ReturnBlock.getBlock();
+ return Loc;
+ }
+ }
+
+ // FIXME: We are at an unreachable point, there is no reason to emit the block
+ // unless it has uses. However, we still need a place to put the debug
+ // region.end for now.
+
+ EmitBlock(ReturnBlock.getBlock());
+ return llvm::DebugLoc();
+}
+
+static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
+ if (!BB) return;
+ if (!BB->use_empty())
+ return CGF.CurFn->getBasicBlockList().push_back(BB);
+ delete BB;
+}
+
+void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
+ assert(BreakContinueStack.empty() &&
+ "mismatched push/pop in break/continue stack!");
+
+ bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
+ && NumSimpleReturnExprs == NumReturnExprs
+ && ReturnBlock.getBlock()->use_empty();
+ // Usually the return expression is evaluated before the cleanup
+ // code. If the function contains only a simple return statement,
+ // such as a constant, the location before the cleanup code becomes
+ // the last useful breakpoint in the function, because the simple
+ // return expression will be evaluated after the cleanup code. To be
+ // safe, set the debug location for cleanup code to the location of
+ // the return statement. Otherwise the cleanup code should be at the
+ // end of the function's lexical scope.
+ //
+ // If there are multiple branches to the return block, the branch
+ // instructions will get the location of the return statements and
+ // all will be fine.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ if (OnlySimpleReturnStmts)
+ DI->EmitLocation(Builder, LastStopPoint);
+ else
+ DI->EmitLocation(Builder, EndLoc);
+ }
+
+ // Pop any cleanups that might have been associated with the
+ // parameters. Do this in whatever block we're currently in; it's
+ // important to do this before we enter the return block or return
+ // edges will be *really* confused.
+ bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
+ bool HasOnlyLifetimeMarkers =
+ HasCleanups && EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth);
+ bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers;
+ if (HasCleanups) {
+ // Make sure the line table doesn't jump back into the body for
+ // the ret after it's been at EndLoc.
+ if (CGDebugInfo *DI = getDebugInfo())
+ if (OnlySimpleReturnStmts)
+ DI->EmitLocation(Builder, EndLoc);
+
+ PopCleanupBlocks(PrologueCleanupDepth);
+ }
+
+ // Emit function epilog (to return).
+ llvm::DebugLoc Loc = EmitReturnBlock();
+
+ if (ShouldInstrumentFunction())
+ EmitFunctionInstrumentation("__cyg_profile_func_exit");
+
+ // Emit debug descriptor for function end.
+ if (CGDebugInfo *DI = getDebugInfo())
+ DI->EmitFunctionEnd(Builder);
+
+ // Reset the debug location to that of the simple 'return' expression, if any
+ // rather than that of the end of the function's scope '}'.
+ ApplyDebugLocation AL(*this, Loc);
+ EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
+ EmitEndEHSpec(CurCodeDecl);
+
+ assert(EHStack.empty() &&
+ "did not remove all scopes from cleanup stack!");
+
+ // If someone did an indirect goto, emit the indirect goto block at the end of
+ // the function.
+ if (IndirectBranch) {
+ EmitBlock(IndirectBranch->getParent());
+ Builder.ClearInsertionPoint();
+ }
+
+ // If some of our locals escaped, insert a call to llvm.localescape in the
+ // entry block.
+ if (!EscapedLocals.empty()) {
+ // Invert the map from local to index into a simple vector. There should be
+ // no holes.
+ SmallVector<llvm::Value *, 4> EscapeArgs;
+ EscapeArgs.resize(EscapedLocals.size());
+ for (auto &Pair : EscapedLocals)
+ EscapeArgs[Pair.second] = Pair.first;
+ llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
+ &CGM.getModule(), llvm::Intrinsic::localescape);
+ CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
+ }
+
+ // Remove the AllocaInsertPt instruction, which is just a convenience for us.
+ llvm::Instruction *Ptr = AllocaInsertPt;
+ AllocaInsertPt = nullptr;
+ Ptr->eraseFromParent();
+
+ // If someone took the address of a label but never did an indirect goto, we
+ // made a zero entry PHI node, which is illegal, zap it now.
+ if (IndirectBranch) {
+ llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
+ if (PN->getNumIncomingValues() == 0) {
+ PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
+ PN->eraseFromParent();
+ }
+ }
+
+ EmitIfUsed(*this, EHResumeBlock);
+ EmitIfUsed(*this, TerminateLandingPad);
+ EmitIfUsed(*this, TerminateHandler);
+ EmitIfUsed(*this, UnreachableBlock);
+
+ if (CGM.getCodeGenOpts().EmitDeclMetadata)
+ EmitDeclMetadata();
+
+ for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator
+ I = DeferredReplacements.begin(),
+ E = DeferredReplacements.end();
+ I != E; ++I) {
+ I->first->replaceAllUsesWith(I->second);
+ I->first->eraseFromParent();
+ }
+}
+
+/// ShouldInstrumentFunction - Return true if the current function should be
+/// instrumented with __cyg_profile_func_* calls
+bool CodeGenFunction::ShouldInstrumentFunction() {
+ if (!CGM.getCodeGenOpts().InstrumentFunctions)
+ return false;
+ if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
+ return false;
+ return true;
+}
+
+/// EmitFunctionInstrumentation - Emit LLVM code to call the specified
+/// instrumentation function with the current function and the call site, if
+/// function instrumentation is enabled.
+void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
+ // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site);
+ llvm::PointerType *PointerTy = Int8PtrTy;
+ llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy };
+ llvm::FunctionType *FunctionTy =
+ llvm::FunctionType::get(VoidTy, ProfileFuncArgs, false);
+
+ llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn);
+ llvm::CallInst *CallSite = Builder.CreateCall(
+ CGM.getIntrinsic(llvm::Intrinsic::returnaddress),
+ llvm::ConstantInt::get(Int32Ty, 0),
+ "callsite");
+
+ llvm::Value *args[] = {
+ llvm::ConstantExpr::getBitCast(CurFn, PointerTy),
+ CallSite
+ };
+
+ EmitNounwindRuntimeCall(F, args);
+}
+
+void CodeGenFunction::EmitMCountInstrumentation() {
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
+
+ llvm::Constant *MCountFn =
+ CGM.CreateRuntimeFunction(FTy, getTarget().getMCountName());
+ EmitNounwindRuntimeCall(MCountFn);
+}
+
+// OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument
+// information in the program executable. The argument information stored
+// includes the argument name, its type, the address and access qualifiers used.
+static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn,
+ CodeGenModule &CGM, llvm::LLVMContext &Context,
+ SmallVector<llvm::Metadata *, 5> &kernelMDArgs,
+ CGBuilderTy &Builder, ASTContext &ASTCtx) {
+ // Create MDNodes that represent the kernel arg metadata.
+ // Each MDNode is a list in the form of "key", N number of values which is
+ // the same number of values as their are kernel arguments.
+
+ const PrintingPolicy &Policy = ASTCtx.getPrintingPolicy();
+
+ // MDNode for the kernel argument address space qualifiers.
+ SmallVector<llvm::Metadata *, 8> addressQuals;
+ addressQuals.push_back(llvm::MDString::get(Context, "kernel_arg_addr_space"));
+
+ // MDNode for the kernel argument access qualifiers (images only).
+ SmallVector<llvm::Metadata *, 8> accessQuals;
+ accessQuals.push_back(llvm::MDString::get(Context, "kernel_arg_access_qual"));
+
+ // MDNode for the kernel argument type names.
+ SmallVector<llvm::Metadata *, 8> argTypeNames;
+ argTypeNames.push_back(llvm::MDString::get(Context, "kernel_arg_type"));
+
+ // MDNode for the kernel argument base type names.
+ SmallVector<llvm::Metadata *, 8> argBaseTypeNames;
+ argBaseTypeNames.push_back(
+ llvm::MDString::get(Context, "kernel_arg_base_type"));
+
+ // MDNode for the kernel argument type qualifiers.
+ SmallVector<llvm::Metadata *, 8> argTypeQuals;
+ argTypeQuals.push_back(llvm::MDString::get(Context, "kernel_arg_type_qual"));
+
+ // MDNode for the kernel argument names.
+ SmallVector<llvm::Metadata *, 8> argNames;
+ argNames.push_back(llvm::MDString::get(Context, "kernel_arg_name"));
+
+ for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
+ const ParmVarDecl *parm = FD->getParamDecl(i);
+ QualType ty = parm->getType();
+ std::string typeQuals;
+
+ if (ty->isPointerType()) {
+ QualType pointeeTy = ty->getPointeeType();
+
+ // Get address qualifier.
+ addressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(
+ ASTCtx.getTargetAddressSpace(pointeeTy.getAddressSpace()))));
+
+ // Get argument type name.
+ std::string typeName =
+ pointeeTy.getUnqualifiedType().getAsString(Policy) + "*";
+
+ // Turn "unsigned type" to "utype"
+ std::string::size_type pos = typeName.find("unsigned");
+ if (pointeeTy.isCanonical() && pos != std::string::npos)
+ typeName.erase(pos+1, 8);
+
+ argTypeNames.push_back(llvm::MDString::get(Context, typeName));
+
+ std::string baseTypeName =
+ pointeeTy.getUnqualifiedType().getCanonicalType().getAsString(
+ Policy) +
+ "*";
+
+ // Turn "unsigned type" to "utype"
+ pos = baseTypeName.find("unsigned");
+ if (pos != std::string::npos)
+ baseTypeName.erase(pos+1, 8);
+
+ argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName));
+
+ // Get argument type qualifiers:
+ if (ty.isRestrictQualified())
+ typeQuals = "restrict";
+ if (pointeeTy.isConstQualified() ||
+ (pointeeTy.getAddressSpace() == LangAS::opencl_constant))
+ typeQuals += typeQuals.empty() ? "const" : " const";
+ if (pointeeTy.isVolatileQualified())
+ typeQuals += typeQuals.empty() ? "volatile" : " volatile";
+ } else {
+ uint32_t AddrSpc = 0;
+ if (ty->isImageType())
+ AddrSpc =
+ CGM.getContext().getTargetAddressSpace(LangAS::opencl_global);
+
+ addressQuals.push_back(
+ llvm::ConstantAsMetadata::get(Builder.getInt32(AddrSpc)));
+
+ // Get argument type name.
+ std::string typeName = ty.getUnqualifiedType().getAsString(Policy);
+
+ // Turn "unsigned type" to "utype"
+ std::string::size_type pos = typeName.find("unsigned");
+ if (ty.isCanonical() && pos != std::string::npos)
+ typeName.erase(pos+1, 8);
+
+ argTypeNames.push_back(llvm::MDString::get(Context, typeName));
+
+ std::string baseTypeName =
+ ty.getUnqualifiedType().getCanonicalType().getAsString(Policy);
+
+ // Turn "unsigned type" to "utype"
+ pos = baseTypeName.find("unsigned");
+ if (pos != std::string::npos)
+ baseTypeName.erase(pos+1, 8);
+
+ argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName));
+
+ // Get argument type qualifiers:
+ if (ty.isConstQualified())
+ typeQuals = "const";
+ if (ty.isVolatileQualified())
+ typeQuals += typeQuals.empty() ? "volatile" : " volatile";
+ }
+
+ argTypeQuals.push_back(llvm::MDString::get(Context, typeQuals));
+
+ // Get image access qualifier:
+ if (ty->isImageType()) {
+ const OpenCLImageAccessAttr *A = parm->getAttr<OpenCLImageAccessAttr>();
+ if (A && A->isWriteOnly())
+ accessQuals.push_back(llvm::MDString::get(Context, "write_only"));
+ else
+ accessQuals.push_back(llvm::MDString::get(Context, "read_only"));
+ // FIXME: what about read_write?
+ } else
+ accessQuals.push_back(llvm::MDString::get(Context, "none"));
+
+ // Get argument name.
+ argNames.push_back(llvm::MDString::get(Context, parm->getName()));
+ }
+
+ kernelMDArgs.push_back(llvm::MDNode::get(Context, addressQuals));
+ kernelMDArgs.push_back(llvm::MDNode::get(Context, accessQuals));
+ kernelMDArgs.push_back(llvm::MDNode::get(Context, argTypeNames));
+ kernelMDArgs.push_back(llvm::MDNode::get(Context, argBaseTypeNames));
+ kernelMDArgs.push_back(llvm::MDNode::get(Context, argTypeQuals));
+ if (CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
+ kernelMDArgs.push_back(llvm::MDNode::get(Context, argNames));
+}
+
+void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
+ llvm::Function *Fn)
+{
+ if (!FD->hasAttr<OpenCLKernelAttr>())
+ return;
+
+ llvm::LLVMContext &Context = getLLVMContext();
+
+ SmallVector<llvm::Metadata *, 5> kernelMDArgs;
+ kernelMDArgs.push_back(llvm::ConstantAsMetadata::get(Fn));
+
+ GenOpenCLArgMetadata(FD, Fn, CGM, Context, kernelMDArgs, Builder,
+ getContext());
+
+ if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
+ QualType hintQTy = A->getTypeHint();
+ const ExtVectorType *hintEltQTy = hintQTy->getAs<ExtVectorType>();
+ bool isSignedInteger =
+ hintQTy->isSignedIntegerType() ||
+ (hintEltQTy && hintEltQTy->getElementType()->isSignedIntegerType());
+ llvm::Metadata *attrMDArgs[] = {
+ llvm::MDString::get(Context, "vec_type_hint"),
+ llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
+ CGM.getTypes().ConvertType(A->getTypeHint()))),
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
+ llvm::IntegerType::get(Context, 32),
+ llvm::APInt(32, (uint64_t)(isSignedInteger ? 1 : 0))))};
+ kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
+ }
+
+ if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
+ llvm::Metadata *attrMDArgs[] = {
+ llvm::MDString::get(Context, "work_group_size_hint"),
+ llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
+ llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
+ llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
+ kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
+ }
+
+ if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
+ llvm::Metadata *attrMDArgs[] = {
+ llvm::MDString::get(Context, "reqd_work_group_size"),
+ llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
+ llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
+ llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
+ kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
+ }
+
+ llvm::MDNode *kernelMDNode = llvm::MDNode::get(Context, kernelMDArgs);
+ llvm::NamedMDNode *OpenCLKernelMetadata =
+ CGM.getModule().getOrInsertNamedMetadata("opencl.kernels");
+ OpenCLKernelMetadata->addOperand(kernelMDNode);
+}
+
+/// Determine whether the function F ends with a return stmt.
+static bool endsWithReturn(const Decl* F) {
+ const Stmt *Body = nullptr;
+ if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
+ Body = FD->getBody();
+ else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
+ Body = OMD->getBody();
+
+ if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
+ auto LastStmt = CS->body_rbegin();
+ if (LastStmt != CS->body_rend())
+ return isa<ReturnStmt>(*LastStmt);
+ }
+ return false;
+}
+
+void CodeGenFunction::StartFunction(GlobalDecl GD,
+ QualType RetTy,
+ llvm::Function *Fn,
+ const CGFunctionInfo &FnInfo,
+ const FunctionArgList &Args,
+ SourceLocation Loc,
+ SourceLocation StartLoc) {
+ assert(!CurFn &&
+ "Do not use a CodeGenFunction object for more than one function");
+
+ const Decl *D = GD.getDecl();
+
+ DidCallStackSave = false;
+ CurCodeDecl = D;
+ CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
+ FnRetTy = RetTy;
+ CurFn = Fn;
+ CurFnInfo = &FnInfo;
+ assert(CurFn->isDeclaration() && "Function already has body?");
+
+ if (CGM.isInSanitizerBlacklist(Fn, Loc))
+ SanOpts.clear();
+
+ if (D) {
+ // Apply the no_sanitize* attributes to SanOpts.
+ for (auto Attr : D->specific_attrs<NoSanitizeAttr>())
+ SanOpts.Mask &= ~Attr->getMask();
+ }
+
+ // Apply sanitizer attributes to the function.
+ if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
+ Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
+ if (SanOpts.has(SanitizerKind::Thread))
+ Fn->addFnAttr(llvm::Attribute::SanitizeThread);
+ if (SanOpts.has(SanitizerKind::Memory))
+ Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
+ if (SanOpts.has(SanitizerKind::SafeStack))
+ Fn->addFnAttr(llvm::Attribute::SafeStack);
+
+ // Pass inline keyword to optimizer if it appears explicitly on any
+ // declaration. Also, in the case of -fno-inline attach NoInline
+ // attribute to all function that are not marked AlwaysInline.
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
+ if (!CGM.getCodeGenOpts().NoInline) {
+ for (auto RI : FD->redecls())
+ if (RI->isInlineSpecified()) {
+ Fn->addFnAttr(llvm::Attribute::InlineHint);
+ break;
+ }
+ } else if (!FD->hasAttr<AlwaysInlineAttr>())
+ Fn->addFnAttr(llvm::Attribute::NoInline);
+ }
+
+ if (getLangOpts().OpenCL) {
+ // Add metadata for a kernel function.
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
+ EmitOpenCLKernelMetadata(FD, Fn);
+ }
+
+ // If we are checking function types, emit a function type signature as
+ // prologue data.
+ if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) {
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
+ if (llvm::Constant *PrologueSig =
+ CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) {
+ llvm::Constant *FTRTTIConst =
+ CGM.GetAddrOfRTTIDescriptor(FD->getType(), /*ForEH=*/true);
+ llvm::Constant *PrologueStructElems[] = { PrologueSig, FTRTTIConst };
+ llvm::Constant *PrologueStructConst =
+ llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true);
+ Fn->setPrologueData(PrologueStructConst);
+ }
+ }
+ }
+
+ // If we're in C++ mode and the function name is "main", it is guaranteed
+ // to be norecurse by the standard (3.6.1.3 "The function main shall not be
+ // used within a program").
+ if (getLangOpts().CPlusPlus)
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
+ if (FD->isMain())
+ Fn->addFnAttr(llvm::Attribute::NoRecurse);
+
+ llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
+
+ // Create a marker to make it easy to insert allocas into the entryblock
+ // later. Don't create this with the builder, because we don't want it
+ // folded.
+ llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
+ AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB);
+ if (Builder.isNamePreserving())
+ AllocaInsertPt->setName("allocapt");
+
+ ReturnBlock = getJumpDestInCurrentScope("return");
+
+ Builder.SetInsertPoint(EntryBB);
+
+ // Emit subprogram debug descriptor.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ SmallVector<QualType, 16> ArgTypes;
+ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ i != e; ++i) {
+ ArgTypes.push_back((*i)->getType());
+ }
+
+ QualType FnType =
+ getContext().getFunctionType(RetTy, ArgTypes,
+ FunctionProtoType::ExtProtoInfo());
+ DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, Builder);
+ }
+
+ if (ShouldInstrumentFunction())
+ EmitFunctionInstrumentation("__cyg_profile_func_enter");
+
+ if (CGM.getCodeGenOpts().InstrumentForProfiling)
+ EmitMCountInstrumentation();
+
+ if (RetTy->isVoidType()) {
+ // Void type; nothing to return.
+ ReturnValue = Address::invalid();
+
+ // Count the implicit return.
+ if (!endsWithReturn(D))
+ ++NumReturnExprs;
+ } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
+ !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
+ // Indirect aggregate return; emit returned value directly into sret slot.
+ // This reduces code size, and affects correctness in C++.
+ auto AI = CurFn->arg_begin();
+ if (CurFnInfo->getReturnInfo().isSRetAfterThis())
+ ++AI;
+ ReturnValue = Address(&*AI, CurFnInfo->getReturnInfo().getIndirectAlign());
+ } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
+ !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
+ // Load the sret pointer from the argument struct and return into that.
+ unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
+ llvm::Function::arg_iterator EI = CurFn->arg_end();
+ --EI;
+ llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx);
+ Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result");
+ ReturnValue = Address(Addr, getNaturalTypeAlignment(RetTy));
+ } else {
+ ReturnValue = CreateIRTemp(RetTy, "retval");
+
+ // Tell the epilog emitter to autorelease the result. We do this
+ // now so that various specialized functions can suppress it
+ // during their IR-generation.
+ if (getLangOpts().ObjCAutoRefCount &&
+ !CurFnInfo->isReturnsRetained() &&
+ RetTy->isObjCRetainableType())
+ AutoreleaseResult = true;
+ }
+
+ EmitStartEHSpec(CurCodeDecl);
+
+ PrologueCleanupDepth = EHStack.stable_begin();
+ EmitFunctionProlog(*CurFnInfo, CurFn, Args);
+
+ if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
+ CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
+ if (MD->getParent()->isLambda() &&
+ MD->getOverloadedOperator() == OO_Call) {
+ // We're in a lambda; figure out the captures.
+ MD->getParent()->getCaptureFields(LambdaCaptureFields,
+ LambdaThisCaptureField);
+ if (LambdaThisCaptureField) {
+ // If this lambda captures this, load it.
+ LValue ThisLValue = EmitLValueForLambdaField(LambdaThisCaptureField);
+ CXXThisValue = EmitLoadOfLValue(ThisLValue,
+ SourceLocation()).getScalarVal();
+ }
+ for (auto *FD : MD->getParent()->fields()) {
+ if (FD->hasCapturedVLAType()) {
+ auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
+ SourceLocation()).getScalarVal();
+ auto VAT = FD->getCapturedVLAType();
+ VLASizeMap[VAT->getSizeExpr()] = ExprArg;
+ }
+ }
+ } else {
+ // Not in a lambda; just use 'this' from the method.
+ // FIXME: Should we generate a new load for each use of 'this'? The
+ // fast register allocator would be happier...
+ CXXThisValue = CXXABIThisValue;
+ }
+ }
+
+ // If any of the arguments have a variably modified type, make sure to
+ // emit the type size.
+ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ i != e; ++i) {
+ const VarDecl *VD = *i;
+
+ // Dig out the type as written from ParmVarDecls; it's unclear whether
+ // the standard (C99 6.9.1p10) requires this, but we're following the
+ // precedent set by gcc.
+ QualType Ty;
+ if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
+ Ty = PVD->getOriginalType();
+ else
+ Ty = VD->getType();
+
+ if (Ty->isVariablyModifiedType())
+ EmitVariablyModifiedType(Ty);
+ }
+ // Emit a location at the end of the prologue.
+ if (CGDebugInfo *DI = getDebugInfo())
+ DI->EmitLocation(Builder, StartLoc);
+}
+
+void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args,
+ const Stmt *Body) {
+ incrementProfileCounter(Body);
+ if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
+ EmitCompoundStmtWithoutScope(*S);
+ else
+ EmitStmt(Body);
+}
+
+/// When instrumenting to collect profile data, the counts for some blocks
+/// such as switch cases need to not include the fall-through counts, so
+/// emit a branch around the instrumentation code. When not instrumenting,
+/// this just calls EmitBlock().
+void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
+ const Stmt *S) {
+ llvm::BasicBlock *SkipCountBB = nullptr;
+ if (HaveInsertPoint() && CGM.getCodeGenOpts().ProfileInstrGenerate) {
+ // When instrumenting for profiling, the fallthrough to certain
+ // statements needs to skip over the instrumentation code so that we
+ // get an accurate count.
+ SkipCountBB = createBasicBlock("skipcount");
+ EmitBranch(SkipCountBB);
+ }
+ EmitBlock(BB);
+ uint64_t CurrentCount = getCurrentProfileCount();
+ incrementProfileCounter(S);
+ setCurrentProfileCount(getCurrentProfileCount() + CurrentCount);
+ if (SkipCountBB)
+ EmitBlock(SkipCountBB);
+}
+
+/// Tries to mark the given function nounwind based on the
+/// non-existence of any throwing calls within it. We believe this is
+/// lightweight enough to do at -O0.
+static void TryMarkNoThrow(llvm::Function *F) {
+ // LLVM treats 'nounwind' on a function as part of the type, so we
+ // can't do this on functions that can be overwritten.
+ if (F->mayBeOverridden()) return;
+
+ for (llvm::BasicBlock &BB : *F)
+ for (llvm::Instruction &I : BB)
+ if (I.mayThrow())
+ return;
+
+ F->setDoesNotThrow();
+}
+
+void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
+ const CGFunctionInfo &FnInfo) {
+ const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
+
+ // Check if we should generate debug info for this function.
+ if (FD->hasAttr<NoDebugAttr>())
+ DebugInfo = nullptr; // disable debug info indefinitely for this function
+
+ FunctionArgList Args;
+ QualType ResTy = FD->getReturnType();
+
+ CurGD = GD;
+ const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
+ if (MD && MD->isInstance()) {
+ if (CGM.getCXXABI().HasThisReturn(GD))
+ ResTy = MD->getThisType(getContext());
+ else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
+ ResTy = CGM.getContext().VoidPtrTy;
+ CGM.getCXXABI().buildThisParam(*this, Args);
+ }
+
+ for (auto *Param : FD->params()) {
+ Args.push_back(Param);
+ if (!Param->hasAttr<PassObjectSizeAttr>())
+ continue;
+
+ IdentifierInfo *NoID = nullptr;
+ auto *Implicit = ImplicitParamDecl::Create(
+ getContext(), Param->getDeclContext(), Param->getLocation(), NoID,
+ getContext().getSizeType());
+ SizeArguments[Param] = Implicit;
+ Args.push_back(Implicit);
+ }
+
+ if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
+ CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
+
+ SourceRange BodyRange;
+ if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange();
+ CurEHLocation = BodyRange.getEnd();
+
+ // Use the location of the start of the function to determine where
+ // the function definition is located. By default use the location
+ // of the declaration as the location for the subprogram. A function
+ // may lack a declaration in the source code if it is created by code
+ // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
+ SourceLocation Loc = FD->getLocation();
+
+ // If this is a function specialization then use the pattern body
+ // as the location for the function.
+ if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
+ if (SpecDecl->hasBody(SpecDecl))
+ Loc = SpecDecl->getLocation();
+
+ // Emit the standard function prologue.
+ StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
+
+ // Generate the body of the function.
+ PGO.assignRegionCounters(GD, CurFn);
+ if (isa<CXXDestructorDecl>(FD))
+ EmitDestructorBody(Args);
+ else if (isa<CXXConstructorDecl>(FD))
+ EmitConstructorBody(Args);
+ else if (getLangOpts().CUDA &&
+ !getLangOpts().CUDAIsDevice &&
+ FD->hasAttr<CUDAGlobalAttr>())
+ CGM.getCUDARuntime().emitDeviceStub(*this, Args);
+ else if (isa<CXXConversionDecl>(FD) &&
+ cast<CXXConversionDecl>(FD)->isLambdaToBlockPointerConversion()) {
+ // The lambda conversion to block pointer is special; the semantics can't be
+ // expressed in the AST, so IRGen needs to special-case it.
+ EmitLambdaToBlockPointerBody(Args);
+ } else if (isa<CXXMethodDecl>(FD) &&
+ cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
+ // The lambda static invoker function is special, because it forwards or
+ // clones the body of the function call operator (but is actually static).
+ EmitLambdaStaticInvokeFunction(cast<CXXMethodDecl>(FD));
+ } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
+ (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
+ cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
+ // Implicit copy-assignment gets the same special treatment as implicit
+ // copy-constructors.
+ emitImplicitAssignmentOperatorBody(Args);
+ } else if (Stmt *Body = FD->getBody()) {
+ EmitFunctionBody(Args, Body);
+ } else
+ llvm_unreachable("no definition for emitted function");
+
+ // C++11 [stmt.return]p2:
+ // Flowing off the end of a function [...] results in undefined behavior in
+ // a value-returning function.
+ // C11 6.9.1p12:
+ // If the '}' that terminates a function is reached, and the value of the
+ // function call is used by the caller, the behavior is undefined.
+ if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock &&
+ !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
+ if (SanOpts.has(SanitizerKind::Return)) {
+ SanitizerScope SanScope(this);
+ llvm::Value *IsFalse = Builder.getFalse();
+ EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
+ "missing_return", EmitCheckSourceLocation(FD->getLocation()),
+ None);
+ } else if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
+ EmitTrapCall(llvm::Intrinsic::trap);
+ }
+ Builder.CreateUnreachable();
+ Builder.ClearInsertionPoint();
+ }
+
+ // Emit the standard function epilogue.
+ FinishFunction(BodyRange.getEnd());
+
+ // If we haven't marked the function nothrow through other means, do
+ // a quick pass now to see if we can.
+ if (!CurFn->doesNotThrow())
+ TryMarkNoThrow(CurFn);
+}
+
+/// ContainsLabel - Return true if the statement contains a label in it. If
+/// this statement is not executed normally, it not containing a label means
+/// that we can just remove the code.
+bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
+ // Null statement, not a label!
+ if (!S) return false;
+
+ // If this is a label, we have to emit the code, consider something like:
+ // if (0) { ... foo: bar(); } goto foo;
+ //
+ // TODO: If anyone cared, we could track __label__'s, since we know that you
+ // can't jump to one from outside their declared region.
+ if (isa<LabelStmt>(S))
+ return true;
+
+ // If this is a case/default statement, and we haven't seen a switch, we have
+ // to emit the code.
+ if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
+ return true;
+
+ // If this is a switch statement, we want to ignore cases below it.
+ if (isa<SwitchStmt>(S))
+ IgnoreCaseStmts = true;
+
+ // Scan subexpressions for verboten labels.
+ for (const Stmt *SubStmt : S->children())
+ if (ContainsLabel(SubStmt, IgnoreCaseStmts))
+ return true;
+
+ return false;
+}
+
+/// containsBreak - Return true if the statement contains a break out of it.
+/// If the statement (recursively) contains a switch or loop with a break
+/// inside of it, this is fine.
+bool CodeGenFunction::containsBreak(const Stmt *S) {
+ // Null statement, not a label!
+ if (!S) return false;
+
+ // If this is a switch or loop that defines its own break scope, then we can
+ // include it and anything inside of it.
+ if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
+ isa<ForStmt>(S))
+ return false;
+
+ if (isa<BreakStmt>(S))
+ return true;
+
+ // Scan subexpressions for verboten breaks.
+ for (const Stmt *SubStmt : S->children())
+ if (containsBreak(SubStmt))
+ return true;
+
+ return false;
+}
+
+
+/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
+/// to a constant, or if it does but contains a label, return false. If it
+/// constant folds return true and set the boolean result in Result.
+bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
+ bool &ResultBool) {
+ llvm::APSInt ResultInt;
+ if (!ConstantFoldsToSimpleInteger(Cond, ResultInt))
+ return false;
+
+ ResultBool = ResultInt.getBoolValue();
+ return true;
+}
+
+/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
+/// to a constant, or if it does but contains a label, return false. If it
+/// constant folds return true and set the folded value.
+bool CodeGenFunction::
+ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &ResultInt) {
+ // FIXME: Rename and handle conversion of other evaluatable things
+ // to bool.
+ llvm::APSInt Int;
+ if (!Cond->EvaluateAsInt(Int, getContext()))
+ return false; // Not foldable, not integer or not fully evaluatable.
+
+ if (CodeGenFunction::ContainsLabel(Cond))
+ return false; // Contains a label.
+
+ ResultInt = Int;
+ return true;
+}
+
+
+
+/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
+/// statement) to the specified blocks. Based on the condition, this might try
+/// to simplify the codegen of the conditional based on the branch.
+///
+void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
+ llvm::BasicBlock *TrueBlock,
+ llvm::BasicBlock *FalseBlock,
+ uint64_t TrueCount) {
+ Cond = Cond->IgnoreParens();
+
+ if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
+
+ // Handle X && Y in a condition.
+ if (CondBOp->getOpcode() == BO_LAnd) {
+ // If we have "1 && X", simplify the code. "0 && X" would have constant
+ // folded if the case was simple enough.
+ bool ConstantBool = false;
+ if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
+ ConstantBool) {
+ // br(1 && X) -> br(X).
+ incrementProfileCounter(CondBOp);
+ return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
+ TrueCount);
+ }
+
+ // If we have "X && 1", simplify the code to use an uncond branch.
+ // "X && 0" would have been constant folded to 0.
+ if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
+ ConstantBool) {
+ // br(X && 1) -> br(X).
+ return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
+ TrueCount);
+ }
+
+ // Emit the LHS as a conditional. If the LHS conditional is false, we
+ // want to jump to the FalseBlock.
+ llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
+ // The counter tells us how often we evaluate RHS, and all of TrueCount
+ // can be propagated to that branch.
+ uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
+
+ ConditionalEvaluation eval(*this);
+ {
+ ApplyDebugLocation DL(*this, Cond);
+ EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount);
+ EmitBlock(LHSTrue);
+ }
+
+ incrementProfileCounter(CondBOp);
+ setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
+
+ // Any temporaries created here are conditional.
+ eval.begin(*this);
+ EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount);
+ eval.end(*this);
+
+ return;
+ }
+
+ if (CondBOp->getOpcode() == BO_LOr) {
+ // If we have "0 || X", simplify the code. "1 || X" would have constant
+ // folded if the case was simple enough.
+ bool ConstantBool = false;
+ if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
+ !ConstantBool) {
+ // br(0 || X) -> br(X).
+ incrementProfileCounter(CondBOp);
+ return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
+ TrueCount);
+ }
+
+ // If we have "X || 0", simplify the code to use an uncond branch.
+ // "X || 1" would have been constant folded to 1.
+ if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
+ !ConstantBool) {
+ // br(X || 0) -> br(X).
+ return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
+ TrueCount);
+ }
+
+ // Emit the LHS as a conditional. If the LHS conditional is true, we
+ // want to jump to the TrueBlock.
+ llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
+ // We have the count for entry to the RHS and for the whole expression
+ // being true, so we can divy up True count between the short circuit and
+ // the RHS.
+ uint64_t LHSCount =
+ getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
+ uint64_t RHSCount = TrueCount - LHSCount;
+
+ ConditionalEvaluation eval(*this);
+ {
+ ApplyDebugLocation DL(*this, Cond);
+ EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount);
+ EmitBlock(LHSFalse);
+ }
+
+ incrementProfileCounter(CondBOp);
+ setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
+
+ // Any temporaries created here are conditional.
+ eval.begin(*this);
+ EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount);
+
+ eval.end(*this);
+
+ return;
+ }
+ }
+
+ if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
+ // br(!x, t, f) -> br(x, f, t)
+ if (CondUOp->getOpcode() == UO_LNot) {
+ // Negate the count.
+ uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
+ // Negate the condition and swap the destination blocks.
+ return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
+ FalseCount);
+ }
+ }
+
+ if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
+ // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
+ llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
+
+ ConditionalEvaluation cond(*this);
+ EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
+ getProfileCount(CondOp));
+
+ // When computing PGO branch weights, we only know the overall count for
+ // the true block. This code is essentially doing tail duplication of the
+ // naive code-gen, introducing new edges for which counts are not
+ // available. Divide the counts proportionally between the LHS and RHS of
+ // the conditional operator.
+ uint64_t LHSScaledTrueCount = 0;
+ if (TrueCount) {
+ double LHSRatio =
+ getProfileCount(CondOp) / (double)getCurrentProfileCount();
+ LHSScaledTrueCount = TrueCount * LHSRatio;
+ }
+
+ cond.begin(*this);
+ EmitBlock(LHSBlock);
+ incrementProfileCounter(CondOp);
+ {
+ ApplyDebugLocation DL(*this, Cond);
+ EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
+ LHSScaledTrueCount);
+ }
+ cond.end(*this);
+
+ cond.begin(*this);
+ EmitBlock(RHSBlock);
+ EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
+ TrueCount - LHSScaledTrueCount);
+ cond.end(*this);
+
+ return;
+ }
+
+ if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
+ // Conditional operator handling can give us a throw expression as a
+ // condition for a case like:
+ // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
+ // Fold this to:
+ // br(c, throw x, br(y, t, f))
+ EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
+ return;
+ }
+
+ // If the branch has a condition wrapped by __builtin_unpredictable,
+ // create metadata that specifies that the branch is unpredictable.
+ // Don't bother if not optimizing because that metadata would not be used.
+ llvm::MDNode *Unpredictable = nullptr;
+ if (CGM.getCodeGenOpts().OptimizationLevel != 0) {
+ if (const CallExpr *Call = dyn_cast<CallExpr>(Cond)) {
+ const Decl *TargetDecl = Call->getCalleeDecl();
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
+ if (FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
+ llvm::MDBuilder MDHelper(getLLVMContext());
+ Unpredictable = MDHelper.createUnpredictable();
+ }
+ }
+ }
+ }
+
+ // Create branch weights based on the number of times we get here and the
+ // number of times the condition should be true.
+ uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
+ llvm::MDNode *Weights =
+ createProfileWeights(TrueCount, CurrentCount - TrueCount);
+
+ // Emit the code with the fully general case.
+ llvm::Value *CondV;
+ {
+ ApplyDebugLocation DL(*this, Cond);
+ CondV = EvaluateExprAsBool(Cond);
+ }
+ Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable);
+}
+
+/// ErrorUnsupported - Print out an error that codegen doesn't support the
+/// specified stmt yet.
+void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
+ CGM.ErrorUnsupported(S, Type);
+}
+
+/// emitNonZeroVLAInit - Emit the "zero" initialization of a
+/// variable-length array whose elements have a non-zero bit-pattern.
+///
+/// \param baseType the inner-most element type of the array
+/// \param src - a char* pointing to the bit-pattern for a single
+/// base element of the array
+/// \param sizeInChars - the total size of the VLA, in chars
+static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
+ Address dest, Address src,
+ llvm::Value *sizeInChars) {
+ CGBuilderTy &Builder = CGF.Builder;
+
+ CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
+ llvm::Value *baseSizeInChars
+ = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
+
+ Address begin =
+ Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin");
+ llvm::Value *end =
+ Builder.CreateInBoundsGEP(begin.getPointer(), sizeInChars, "vla.end");
+
+ llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
+ llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
+ llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
+
+ // Make a loop over the VLA. C99 guarantees that the VLA element
+ // count must be nonzero.
+ CGF.EmitBlock(loopBB);
+
+ llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
+ cur->addIncoming(begin.getPointer(), originBB);
+
+ CharUnits curAlign =
+ dest.getAlignment().alignmentOfArrayElement(baseSize);
+
+ // memcpy the individual element bit-pattern.
+ Builder.CreateMemCpy(Address(cur, curAlign), src, baseSizeInChars,
+ /*volatile*/ false);
+
+ // Go to the next element.
+ llvm::Value *next =
+ Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
+
+ // Leave if that's the end of the VLA.
+ llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
+ Builder.CreateCondBr(done, contBB, loopBB);
+ cur->addIncoming(next, loopBB);
+
+ CGF.EmitBlock(contBB);
+}
+
+void
+CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
+ // Ignore empty classes in C++.
+ if (getLangOpts().CPlusPlus) {
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
+ return;
+ }
+ }
+
+ // Cast the dest ptr to the appropriate i8 pointer type.
+ if (DestPtr.getElementType() != Int8Ty)
+ DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
+
+ // Get size and alignment info for this aggregate.
+ CharUnits size = getContext().getTypeSizeInChars(Ty);
+
+ llvm::Value *SizeVal;
+ const VariableArrayType *vla;
+
+ // Don't bother emitting a zero-byte memset.
+ if (size.isZero()) {
+ // But note that getTypeInfo returns 0 for a VLA.
+ if (const VariableArrayType *vlaType =
+ dyn_cast_or_null<VariableArrayType>(
+ getContext().getAsArrayType(Ty))) {
+ QualType eltType;
+ llvm::Value *numElts;
+ std::tie(numElts, eltType) = getVLASize(vlaType);
+
+ SizeVal = numElts;
+ CharUnits eltSize = getContext().getTypeSizeInChars(eltType);
+ if (!eltSize.isOne())
+ SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
+ vla = vlaType;
+ } else {
+ return;
+ }
+ } else {
+ SizeVal = CGM.getSize(size);
+ vla = nullptr;
+ }
+
+ // If the type contains a pointer to data member we can't memset it to zero.
+ // Instead, create a null constant and copy it to the destination.
+ // TODO: there are other patterns besides zero that we can usefully memset,
+ // like -1, which happens to be the pattern used by member-pointers.
+ if (!CGM.getTypes().isZeroInitializable(Ty)) {
+ // For a VLA, emit a single element, then splat that over the VLA.
+ if (vla) Ty = getContext().getBaseElementType(vla);
+
+ llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
+
+ llvm::GlobalVariable *NullVariable =
+ new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
+ /*isConstant=*/true,
+ llvm::GlobalVariable::PrivateLinkage,
+ NullConstant, Twine());
+ CharUnits NullAlign = DestPtr.getAlignment();
+ NullVariable->setAlignment(NullAlign.getQuantity());
+ Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()),
+ NullAlign);
+
+ if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
+
+ // Get and call the appropriate llvm.memcpy overload.
+ Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
+ return;
+ }
+
+ // Otherwise, just memset the whole thing to zero. This is legal
+ // because in LLVM, all default initializers (other than the ones we just
+ // handled above) are guaranteed to have a bit pattern of all zeros.
+ Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
+}
+
+llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
+ // Make sure that there is a block for the indirect goto.
+ if (!IndirectBranch)
+ GetIndirectGotoBlock();
+
+ llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
+
+ // Make sure the indirect branch includes all of the address-taken blocks.
+ IndirectBranch->addDestination(BB);
+ return llvm::BlockAddress::get(CurFn, BB);
+}
+
+llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
+ // If we already made the indirect branch for indirect goto, return its block.
+ if (IndirectBranch) return IndirectBranch->getParent();
+
+ CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
+
+ // Create the PHI node that indirect gotos will add entries to.
+ llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
+ "indirect.goto.dest");
+
+ // Create the indirect branch instruction.
+ IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
+ return IndirectBranch->getParent();
+}
+
+/// Computes the length of an array in elements, as well as the base
+/// element type and a properly-typed first element pointer.
+llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
+ QualType &baseType,
+ Address &addr) {
+ const ArrayType *arrayType = origArrayType;
+
+ // If it's a VLA, we have to load the stored size. Note that
+ // this is the size of the VLA in bytes, not its size in elements.
+ llvm::Value *numVLAElements = nullptr;
+ if (isa<VariableArrayType>(arrayType)) {
+ numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).first;
+
+ // Walk into all VLAs. This doesn't require changes to addr,
+ // which has type T* where T is the first non-VLA element type.
+ do {
+ QualType elementType = arrayType->getElementType();
+ arrayType = getContext().getAsArrayType(elementType);
+
+ // If we only have VLA components, 'addr' requires no adjustment.
+ if (!arrayType) {
+ baseType = elementType;
+ return numVLAElements;
+ }
+ } while (isa<VariableArrayType>(arrayType));
+
+ // We get out here only if we find a constant array type
+ // inside the VLA.
+ }
+
+ // We have some number of constant-length arrays, so addr should
+ // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
+ // down to the first element of addr.
+ SmallVector<llvm::Value*, 8> gepIndices;
+
+ // GEP down to the array type.
+ llvm::ConstantInt *zero = Builder.getInt32(0);
+ gepIndices.push_back(zero);
+
+ uint64_t countFromCLAs = 1;
+ QualType eltType;
+
+ llvm::ArrayType *llvmArrayType =
+ dyn_cast<llvm::ArrayType>(addr.getElementType());
+ while (llvmArrayType) {
+ assert(isa<ConstantArrayType>(arrayType));
+ assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
+ == llvmArrayType->getNumElements());
+
+ gepIndices.push_back(zero);
+ countFromCLAs *= llvmArrayType->getNumElements();
+ eltType = arrayType->getElementType();
+
+ llvmArrayType =
+ dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
+ arrayType = getContext().getAsArrayType(arrayType->getElementType());
+ assert((!llvmArrayType || arrayType) &&
+ "LLVM and Clang types are out-of-synch");
+ }
+
+ if (arrayType) {
+ // From this point onwards, the Clang array type has been emitted
+ // as some other type (probably a packed struct). Compute the array
+ // size, and just emit the 'begin' expression as a bitcast.
+ while (arrayType) {
+ countFromCLAs *=
+ cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
+ eltType = arrayType->getElementType();
+ arrayType = getContext().getAsArrayType(eltType);
+ }
+
+ llvm::Type *baseType = ConvertType(eltType);
+ addr = Builder.CreateElementBitCast(addr, baseType, "array.begin");
+ } else {
+ // Create the actual GEP.
+ addr = Address(Builder.CreateInBoundsGEP(addr.getPointer(),
+ gepIndices, "array.begin"),
+ addr.getAlignment());
+ }
+
+ baseType = eltType;
+
+ llvm::Value *numElements
+ = llvm::ConstantInt::get(SizeTy, countFromCLAs);
+
+ // If we had any VLA dimensions, factor them in.
+ if (numVLAElements)
+ numElements = Builder.CreateNUWMul(numVLAElements, numElements);
+
+ return numElements;
+}
+
+std::pair<llvm::Value*, QualType>
+CodeGenFunction::getVLASize(QualType type) {
+ const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
+ assert(vla && "type was not a variable array type!");
+ return getVLASize(vla);
+}
+
+std::pair<llvm::Value*, QualType>
+CodeGenFunction::getVLASize(const VariableArrayType *type) {
+ // The number of elements so far; always size_t.
+ llvm::Value *numElements = nullptr;
+
+ QualType elementType;
+ do {
+ elementType = type->getElementType();
+ llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
+ assert(vlaSize && "no size for VLA!");
+ assert(vlaSize->getType() == SizeTy);
+
+ if (!numElements) {
+ numElements = vlaSize;
+ } else {
+ // It's undefined behavior if this wraps around, so mark it that way.
+ // FIXME: Teach -fsanitize=undefined to trap this.
+ numElements = Builder.CreateNUWMul(numElements, vlaSize);
+ }
+ } while ((type = getContext().getAsVariableArrayType(elementType)));
+
+ return std::pair<llvm::Value*,QualType>(numElements, elementType);
+}
+
+void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
+ assert(type->isVariablyModifiedType() &&
+ "Must pass variably modified type to EmitVLASizes!");
+
+ EnsureInsertPoint();
+
+ // We're going to walk down into the type and look for VLA
+ // expressions.
+ do {
+ assert(type->isVariablyModifiedType());
+
+ const Type *ty = type.getTypePtr();
+ switch (ty->getTypeClass()) {
+
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("unexpected dependent type!");
+
+ // These types are never variably-modified.
+ case Type::Builtin:
+ case Type::Complex:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::Record:
+ case Type::Enum:
+ case Type::Elaborated:
+ case Type::TemplateSpecialization:
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::ObjCObjectPointer:
+ llvm_unreachable("type class is never variably-modified!");
+
+ case Type::Adjusted:
+ type = cast<AdjustedType>(ty)->getAdjustedType();
+ break;
+
+ case Type::Decayed:
+ type = cast<DecayedType>(ty)->getPointeeType();
+ break;
+
+ case Type::Pointer:
+ type = cast<PointerType>(ty)->getPointeeType();
+ break;
+
+ case Type::BlockPointer:
+ type = cast<BlockPointerType>(ty)->getPointeeType();
+ break;
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ type = cast<ReferenceType>(ty)->getPointeeType();
+ break;
+
+ case Type::MemberPointer:
+ type = cast<MemberPointerType>(ty)->getPointeeType();
+ break;
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ // Losing element qualification here is fine.
+ type = cast<ArrayType>(ty)->getElementType();
+ break;
+
+ case Type::VariableArray: {
+ // Losing element qualification here is fine.
+ const VariableArrayType *vat = cast<VariableArrayType>(ty);
+
+ // Unknown size indication requires no size computation.
+ // Otherwise, evaluate and record it.
+ if (const Expr *size = vat->getSizeExpr()) {
+ // It's possible that we might have emitted this already,
+ // e.g. with a typedef and a pointer to it.
+ llvm::Value *&entry = VLASizeMap[size];
+ if (!entry) {
+ llvm::Value *Size = EmitScalarExpr(size);
+
+ // C11 6.7.6.2p5:
+ // If the size is an expression that is not an integer constant
+ // expression [...] each time it is evaluated it shall have a value
+ // greater than zero.
+ if (SanOpts.has(SanitizerKind::VLABound) &&
+ size->getType()->isSignedIntegerType()) {
+ SanitizerScope SanScope(this);
+ llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
+ llvm::Constant *StaticArgs[] = {
+ EmitCheckSourceLocation(size->getLocStart()),
+ EmitCheckTypeDescriptor(size->getType())
+ };
+ EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero),
+ SanitizerKind::VLABound),
+ "vla_bound_not_positive", StaticArgs, Size);
+ }
+
+ // Always zexting here would be wrong if it weren't
+ // undefined behavior to have a negative bound.
+ entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false);
+ }
+ }
+ type = vat->getElementType();
+ break;
+ }
+
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ type = cast<FunctionType>(ty)->getReturnType();
+ break;
+
+ case Type::Paren:
+ case Type::TypeOf:
+ case Type::UnaryTransform:
+ case Type::Attributed:
+ case Type::SubstTemplateTypeParm:
+ case Type::PackExpansion:
+ // Keep walking after single level desugaring.
+ type = type.getSingleStepDesugaredType(getContext());
+ break;
+
+ case Type::Typedef:
+ case Type::Decltype:
+ case Type::Auto:
+ // Stop walking: nothing to do.
+ return;
+
+ case Type::TypeOfExpr:
+ // Stop walking: emit typeof expression.
+ EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
+ return;
+
+ case Type::Atomic:
+ type = cast<AtomicType>(ty)->getValueType();
+ break;
+ }
+ } while (type->isVariablyModifiedType());
+}
+
+Address CodeGenFunction::EmitVAListRef(const Expr* E) {
+ if (getContext().getBuiltinVaListType()->isArrayType())
+ return EmitPointerWithAlignment(E);
+ return EmitLValue(E).getAddress();
+}
+
+Address CodeGenFunction::EmitMSVAListRef(const Expr *E) {
+ return EmitLValue(E).getAddress();
+}
+
+void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
+ llvm::Constant *Init) {
+ assert (Init && "Invalid DeclRefExpr initializer!");
+ if (CGDebugInfo *Dbg = getDebugInfo())
+ if (CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo)
+ Dbg->EmitGlobalVariable(E->getDecl(), Init);
+}
+
+CodeGenFunction::PeepholeProtection
+CodeGenFunction::protectFromPeepholes(RValue rvalue) {
+ // At the moment, the only aggressive peephole we do in IR gen
+ // is trunc(zext) folding, but if we add more, we can easily
+ // extend this protection.
+
+ if (!rvalue.isScalar()) return PeepholeProtection();
+ llvm::Value *value = rvalue.getScalarVal();
+ if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
+
+ // Just make an extra bitcast.
+ assert(HaveInsertPoint());
+ llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
+ Builder.GetInsertBlock());
+
+ PeepholeProtection protection;
+ protection.Inst = inst;
+ return protection;
+}
+
+void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
+ if (!protection.Inst) return;
+
+ // In theory, we could try to duplicate the peepholes now, but whatever.
+ protection.Inst->eraseFromParent();
+}
+
+llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Value *AnnotationFn,
+ llvm::Value *AnnotatedVal,
+ StringRef AnnotationStr,
+ SourceLocation Location) {
+ llvm::Value *Args[4] = {
+ AnnotatedVal,
+ Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy),
+ Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy),
+ CGM.EmitAnnotationLineNo(Location)
+ };
+ return Builder.CreateCall(AnnotationFn, Args);
+}
+
+void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
+ assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
+ // FIXME We create a new bitcast for every annotation because that's what
+ // llvm-gcc was doing.
+ for (const auto *I : D->specific_attrs<AnnotateAttr>())
+ EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
+ Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
+ I->getAnnotation(), D->getLocation());
+}
+
+Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
+ Address Addr) {
+ assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
+ llvm::Value *V = Addr.getPointer();
+ llvm::Type *VTy = V->getType();
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
+ CGM.Int8PtrTy);
+
+ for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
+ // FIXME Always emit the cast inst so we can differentiate between
+ // annotation on the first field of a struct and annotation on the struct
+ // itself.
+ if (VTy != CGM.Int8PtrTy)
+ V = Builder.Insert(new llvm::BitCastInst(V, CGM.Int8PtrTy));
+ V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation());
+ V = Builder.CreateBitCast(V, VTy);
+ }
+
+ return Address(V, Addr.getAlignment());
+}
+
+CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
+
+CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF)
+ : CGF(CGF) {
+ assert(!CGF->IsSanitizerScope);
+ CGF->IsSanitizerScope = true;
+}
+
+CodeGenFunction::SanitizerScope::~SanitizerScope() {
+ CGF->IsSanitizerScope = false;
+}
+
+void CodeGenFunction::InsertHelper(llvm::Instruction *I,
+ const llvm::Twine &Name,
+ llvm::BasicBlock *BB,
+ llvm::BasicBlock::iterator InsertPt) const {
+ LoopStack.InsertHelper(I);
+ if (IsSanitizerScope)
+ CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I);
+}
+
+template <bool PreserveNames>
+void CGBuilderInserter<PreserveNames>::InsertHelper(
+ llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
+ llvm::BasicBlock::iterator InsertPt) const {
+ llvm::IRBuilderDefaultInserter<PreserveNames>::InsertHelper(I, Name, BB,
+ InsertPt);
+ if (CGF)
+ CGF->InsertHelper(I, Name, BB, InsertPt);
+}
+
+#ifdef NDEBUG
+#define PreserveNames false
+#else
+#define PreserveNames true
+#endif
+template void CGBuilderInserter<PreserveNames>::InsertHelper(
+ llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
+ llvm::BasicBlock::iterator InsertPt) const;
+#undef PreserveNames
+
+static bool hasRequiredFeatures(const SmallVectorImpl<StringRef> &ReqFeatures,
+ CodeGenModule &CGM, const FunctionDecl *FD,
+ std::string &FirstMissing) {
+ // If there aren't any required features listed then go ahead and return.
+ if (ReqFeatures.empty())
+ return false;
+
+ // Now build up the set of caller features and verify that all the required
+ // features are there.
+ llvm::StringMap<bool> CallerFeatureMap;
+ CGM.getFunctionFeatureMap(CallerFeatureMap, FD);
+
+ // If we have at least one of the features in the feature list return
+ // true, otherwise return false.
+ return std::all_of(
+ ReqFeatures.begin(), ReqFeatures.end(), [&](StringRef Feature) {
+ SmallVector<StringRef, 1> OrFeatures;
+ Feature.split(OrFeatures, "|");
+ return std::any_of(OrFeatures.begin(), OrFeatures.end(),
+ [&](StringRef Feature) {
+ if (!CallerFeatureMap.lookup(Feature)) {
+ FirstMissing = Feature.str();
+ return false;
+ }
+ return true;
+ });
+ });
+}
+
+// Emits an error if we don't have a valid set of target features for the
+// called function.
+void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
+ const FunctionDecl *TargetDecl) {
+ // Early exit if this is an indirect call.
+ if (!TargetDecl)
+ return;
+
+ // Get the current enclosing function if it exists. If it doesn't
+ // we can't check the target features anyhow.
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl);
+ if (!FD)
+ return;
+
+ // Grab the required features for the call. For a builtin this is listed in
+ // the td file with the default cpu, for an always_inline function this is any
+ // listed cpu and any listed features.
+ unsigned BuiltinID = TargetDecl->getBuiltinID();
+ std::string MissingFeature;
+ if (BuiltinID) {
+ SmallVector<StringRef, 1> ReqFeatures;
+ const char *FeatureList =
+ CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
+ // Return if the builtin doesn't have any required features.
+ if (!FeatureList || StringRef(FeatureList) == "")
+ return;
+ StringRef(FeatureList).split(ReqFeatures, ",");
+ if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
+ CGM.getDiags().Report(E->getLocStart(), diag::err_builtin_needs_feature)
+ << TargetDecl->getDeclName()
+ << CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
+
+ } else if (TargetDecl->hasAttr<TargetAttr>()) {
+ // Get the required features for the callee.
+ SmallVector<StringRef, 1> ReqFeatures;
+ llvm::StringMap<bool> CalleeFeatureMap;
+ CGM.getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
+ for (const auto &F : CalleeFeatureMap) {
+ // Only positive features are "required".
+ if (F.getValue())
+ ReqFeatures.push_back(F.getKey());
+ }
+ if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
+ CGM.getDiags().Report(E->getLocStart(), diag::err_function_needs_feature)
+ << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
new file mode 100644
index 0000000..b3d5035
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
@@ -0,0 +1,3312 @@
+//===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the internal per-function state used for llvm translation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
+#define LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
+
+#include "CGBuilder.h"
+#include "CGDebugInfo.h"
+#include "CGLoopInfo.h"
+#include "CGValue.h"
+#include "CodeGenModule.h"
+#include "CodeGenPGO.h"
+#include "EHScopeStack.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ExprOpenMP.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/ABI.h"
+#include "clang/Basic/CapturedStmt.h"
+#include "clang/Basic/OpenMPKinds.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/Debug.h"
+
+namespace llvm {
+class BasicBlock;
+class LLVMContext;
+class MDNode;
+class Module;
+class SwitchInst;
+class Twine;
+class Value;
+class CallSite;
+}
+
+namespace clang {
+class ASTContext;
+class BlockDecl;
+class CXXDestructorDecl;
+class CXXForRangeStmt;
+class CXXTryStmt;
+class Decl;
+class LabelDecl;
+class EnumConstantDecl;
+class FunctionDecl;
+class FunctionProtoType;
+class LabelStmt;
+class ObjCContainerDecl;
+class ObjCInterfaceDecl;
+class ObjCIvarDecl;
+class ObjCMethodDecl;
+class ObjCImplementationDecl;
+class ObjCPropertyImplDecl;
+class TargetInfo;
+class TargetCodeGenInfo;
+class VarDecl;
+class ObjCForCollectionStmt;
+class ObjCAtTryStmt;
+class ObjCAtThrowStmt;
+class ObjCAtSynchronizedStmt;
+class ObjCAutoreleasePoolStmt;
+
+namespace CodeGen {
+class CodeGenTypes;
+class CGFunctionInfo;
+class CGRecordLayout;
+class CGBlockInfo;
+class CGCXXABI;
+class BlockByrefHelpers;
+class BlockByrefInfo;
+class BlockFlags;
+class BlockFieldFlags;
+
+/// The kind of evaluation to perform on values of a particular
+/// type. Basically, is the code in CGExprScalar, CGExprComplex, or
+/// CGExprAgg?
+///
+/// TODO: should vectors maybe be split out into their own thing?
+enum TypeEvaluationKind {
+ TEK_Scalar,
+ TEK_Complex,
+ TEK_Aggregate
+};
+
+/// CodeGenFunction - This class organizes the per-function state that is used
+/// while generating LLVM code.
+class CodeGenFunction : public CodeGenTypeCache {
+ CodeGenFunction(const CodeGenFunction &) = delete;
+ void operator=(const CodeGenFunction &) = delete;
+
+ friend class CGCXXABI;
+public:
+ /// A jump destination is an abstract label, branching to which may
+ /// require a jump out through normal cleanups.
+ struct JumpDest {
+ JumpDest() : Block(nullptr), ScopeDepth(), Index(0) {}
+ JumpDest(llvm::BasicBlock *Block,
+ EHScopeStack::stable_iterator Depth,
+ unsigned Index)
+ : Block(Block), ScopeDepth(Depth), Index(Index) {}
+
+ bool isValid() const { return Block != nullptr; }
+ llvm::BasicBlock *getBlock() const { return Block; }
+ EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
+ unsigned getDestIndex() const { return Index; }
+
+ // This should be used cautiously.
+ void setScopeDepth(EHScopeStack::stable_iterator depth) {
+ ScopeDepth = depth;
+ }
+
+ private:
+ llvm::BasicBlock *Block;
+ EHScopeStack::stable_iterator ScopeDepth;
+ unsigned Index;
+ };
+
+ CodeGenModule &CGM; // Per-module state.
+ const TargetInfo &Target;
+
+ typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
+ LoopInfoStack LoopStack;
+ CGBuilderTy Builder;
+
+ /// \brief CGBuilder insert helper. This function is called after an
+ /// instruction is created using Builder.
+ void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
+ llvm::BasicBlock *BB,
+ llvm::BasicBlock::iterator InsertPt) const;
+
+ /// CurFuncDecl - Holds the Decl for the current outermost
+ /// non-closure context.
+ const Decl *CurFuncDecl;
+ /// CurCodeDecl - This is the inner-most code context, which includes blocks.
+ const Decl *CurCodeDecl;
+ const CGFunctionInfo *CurFnInfo;
+ QualType FnRetTy;
+ llvm::Function *CurFn;
+
+ /// CurGD - The GlobalDecl for the current function being compiled.
+ GlobalDecl CurGD;
+
+ /// PrologueCleanupDepth - The cleanup depth enclosing all the
+ /// cleanups associated with the parameters.
+ EHScopeStack::stable_iterator PrologueCleanupDepth;
+
+ /// ReturnBlock - Unified return block.
+ JumpDest ReturnBlock;
+
+ /// ReturnValue - The temporary alloca to hold the return
+ /// value. This is invalid iff the function has no return value.
+ Address ReturnValue;
+
+ /// AllocaInsertPoint - This is an instruction in the entry block before which
+ /// we prefer to insert allocas.
+ llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
+
+ /// \brief API for captured statement code generation.
+ class CGCapturedStmtInfo {
+ public:
+ explicit CGCapturedStmtInfo(CapturedRegionKind K = CR_Default)
+ : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {}
+ explicit CGCapturedStmtInfo(const CapturedStmt &S,
+ CapturedRegionKind K = CR_Default)
+ : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {
+
+ RecordDecl::field_iterator Field =
+ S.getCapturedRecordDecl()->field_begin();
+ for (CapturedStmt::const_capture_iterator I = S.capture_begin(),
+ E = S.capture_end();
+ I != E; ++I, ++Field) {
+ if (I->capturesThis())
+ CXXThisFieldDecl = *Field;
+ else if (I->capturesVariable())
+ CaptureFields[I->getCapturedVar()] = *Field;
+ }
+ }
+
+ virtual ~CGCapturedStmtInfo();
+
+ CapturedRegionKind getKind() const { return Kind; }
+
+ virtual void setContextValue(llvm::Value *V) { ThisValue = V; }
+ // \brief Retrieve the value of the context parameter.
+ virtual llvm::Value *getContextValue() const { return ThisValue; }
+
+ /// \brief Lookup the captured field decl for a variable.
+ virtual const FieldDecl *lookup(const VarDecl *VD) const {
+ return CaptureFields.lookup(VD);
+ }
+
+ bool isCXXThisExprCaptured() const { return getThisFieldDecl() != nullptr; }
+ virtual FieldDecl *getThisFieldDecl() const { return CXXThisFieldDecl; }
+
+ static bool classof(const CGCapturedStmtInfo *) {
+ return true;
+ }
+
+ /// \brief Emit the captured statement body.
+ virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S) {
+ CGF.incrementProfileCounter(S);
+ CGF.EmitStmt(S);
+ }
+
+ /// \brief Get the name of the capture helper.
+ virtual StringRef getHelperName() const { return "__captured_stmt"; }
+
+ private:
+ /// \brief The kind of captured statement being generated.
+ CapturedRegionKind Kind;
+
+ /// \brief Keep the map between VarDecl and FieldDecl.
+ llvm::SmallDenseMap<const VarDecl *, FieldDecl *> CaptureFields;
+
+ /// \brief The base address of the captured record, passed in as the first
+ /// argument of the parallel region function.
+ llvm::Value *ThisValue;
+
+ /// \brief Captured 'this' type.
+ FieldDecl *CXXThisFieldDecl;
+ };
+ CGCapturedStmtInfo *CapturedStmtInfo;
+
+ /// \brief RAII for correct setting/restoring of CapturedStmtInfo.
+ class CGCapturedStmtRAII {
+ private:
+ CodeGenFunction &CGF;
+ CGCapturedStmtInfo *PrevCapturedStmtInfo;
+ public:
+ CGCapturedStmtRAII(CodeGenFunction &CGF,
+ CGCapturedStmtInfo *NewCapturedStmtInfo)
+ : CGF(CGF), PrevCapturedStmtInfo(CGF.CapturedStmtInfo) {
+ CGF.CapturedStmtInfo = NewCapturedStmtInfo;
+ }
+ ~CGCapturedStmtRAII() { CGF.CapturedStmtInfo = PrevCapturedStmtInfo; }
+ };
+
+ /// \brief Sanitizers enabled for this function.
+ SanitizerSet SanOpts;
+
+ /// \brief True if CodeGen currently emits code implementing sanitizer checks.
+ bool IsSanitizerScope;
+
+ /// \brief RAII object to set/unset CodeGenFunction::IsSanitizerScope.
+ class SanitizerScope {
+ CodeGenFunction *CGF;
+ public:
+ SanitizerScope(CodeGenFunction *CGF);
+ ~SanitizerScope();
+ };
+
+ /// In C++, whether we are code generating a thunk. This controls whether we
+ /// should emit cleanups.
+ bool CurFuncIsThunk;
+
+ /// In ARC, whether we should autorelease the return value.
+ bool AutoreleaseResult;
+
+ /// Whether we processed a Microsoft-style asm block during CodeGen. These can
+ /// potentially set the return value.
+ bool SawAsmBlock;
+
+ /// True if the current function is an outlined SEH helper. This can be a
+ /// finally block or filter expression.
+ bool IsOutlinedSEHHelper;
+
+ const CodeGen::CGBlockInfo *BlockInfo;
+ llvm::Value *BlockPointer;
+
+ llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
+ FieldDecl *LambdaThisCaptureField;
+
+ /// \brief A mapping from NRVO variables to the flags used to indicate
+ /// when the NRVO has been applied to this variable.
+ llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
+
+ EHScopeStack EHStack;
+ llvm::SmallVector<char, 256> LifetimeExtendedCleanupStack;
+ llvm::SmallVector<const JumpDest *, 2> SEHTryEpilogueStack;
+
+ llvm::Instruction *CurrentFuncletPad = nullptr;
+
+ /// Header for data within LifetimeExtendedCleanupStack.
+ struct LifetimeExtendedCleanupHeader {
+ /// The size of the following cleanup object.
+ unsigned Size;
+ /// The kind of cleanup to push: a value from the CleanupKind enumeration.
+ CleanupKind Kind;
+
+ size_t getSize() const { return Size; }
+ CleanupKind getKind() const { return Kind; }
+ };
+
+ /// i32s containing the indexes of the cleanup destinations.
+ llvm::AllocaInst *NormalCleanupDest;
+
+ unsigned NextCleanupDestIndex;
+
+ /// FirstBlockInfo - The head of a singly-linked-list of block layouts.
+ CGBlockInfo *FirstBlockInfo;
+
+ /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
+ llvm::BasicBlock *EHResumeBlock;
+
+ /// The exception slot. All landing pads write the current exception pointer
+ /// into this alloca.
+ llvm::Value *ExceptionSlot;
+
+ /// The selector slot. Under the MandatoryCleanup model, all landing pads
+ /// write the current selector value into this alloca.
+ llvm::AllocaInst *EHSelectorSlot;
+
+ /// A stack of exception code slots. Entering an __except block pushes a slot
+ /// on the stack and leaving pops one. The __exception_code() intrinsic loads
+ /// a value from the top of the stack.
+ SmallVector<Address, 1> SEHCodeSlotStack;
+
+ /// Value returned by __exception_info intrinsic.
+ llvm::Value *SEHInfo = nullptr;
+
+ /// Emits a landing pad for the current EH stack.
+ llvm::BasicBlock *EmitLandingPad();
+
+ llvm::BasicBlock *getInvokeDestImpl();
+
+ template <class T>
+ typename DominatingValue<T>::saved_type saveValueInCond(T value) {
+ return DominatingValue<T>::save(*this, value);
+ }
+
+public:
+ /// ObjCEHValueStack - Stack of Objective-C exception values, used for
+ /// rethrows.
+ SmallVector<llvm::Value*, 8> ObjCEHValueStack;
+
+ /// A class controlling the emission of a finally block.
+ class FinallyInfo {
+ /// Where the catchall's edge through the cleanup should go.
+ JumpDest RethrowDest;
+
+ /// A function to call to enter the catch.
+ llvm::Constant *BeginCatchFn;
+
+ /// An i1 variable indicating whether or not the @finally is
+ /// running for an exception.
+ llvm::AllocaInst *ForEHVar;
+
+ /// An i8* variable into which the exception pointer to rethrow
+ /// has been saved.
+ llvm::AllocaInst *SavedExnVar;
+
+ public:
+ void enter(CodeGenFunction &CGF, const Stmt *Finally,
+ llvm::Constant *beginCatchFn, llvm::Constant *endCatchFn,
+ llvm::Constant *rethrowFn);
+ void exit(CodeGenFunction &CGF);
+ };
+
+ /// Returns true inside SEH __try blocks.
+ bool isSEHTryScope() const { return !SEHTryEpilogueStack.empty(); }
+
+ /// Returns true while emitting a cleanuppad.
+ bool isCleanupPadScope() const {
+ return CurrentFuncletPad && isa<llvm::CleanupPadInst>(CurrentFuncletPad);
+ }
+
+ /// pushFullExprCleanup - Push a cleanup to be run at the end of the
+ /// current full-expression. Safe against the possibility that
+ /// we're currently inside a conditionally-evaluated expression.
+ template <class T, class... As>
+ void pushFullExprCleanup(CleanupKind kind, As... A) {
+ // If we're not in a conditional branch, or if none of the
+ // arguments requires saving, then use the unconditional cleanup.
+ if (!isInConditionalBranch())
+ return EHStack.pushCleanup<T>(kind, A...);
+
+ // Stash values in a tuple so we can guarantee the order of saves.
+ typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
+ SavedTuple Saved{saveValueInCond(A)...};
+
+ typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
+ EHStack.pushCleanupTuple<CleanupType>(kind, Saved);
+ initFullExprCleanup();
+ }
+
+ /// \brief Queue a cleanup to be pushed after finishing the current
+ /// full-expression.
+ template <class T, class... As>
+ void pushCleanupAfterFullExpr(CleanupKind Kind, As... A) {
+ assert(!isInConditionalBranch() && "can't defer conditional cleanup");
+
+ LifetimeExtendedCleanupHeader Header = { sizeof(T), Kind };
+
+ size_t OldSize = LifetimeExtendedCleanupStack.size();
+ LifetimeExtendedCleanupStack.resize(
+ LifetimeExtendedCleanupStack.size() + sizeof(Header) + Header.Size);
+
+ static_assert(sizeof(Header) % llvm::AlignOf<T>::Alignment == 0,
+ "Cleanup will be allocated on misaligned address");
+ char *Buffer = &LifetimeExtendedCleanupStack[OldSize];
+ new (Buffer) LifetimeExtendedCleanupHeader(Header);
+ new (Buffer + sizeof(Header)) T(A...);
+ }
+
+ /// Set up the last cleaup that was pushed as a conditional
+ /// full-expression cleanup.
+ void initFullExprCleanup();
+
+ /// PushDestructorCleanup - Push a cleanup to call the
+ /// complete-object destructor of an object of the given type at the
+ /// given address. Does nothing if T is not a C++ class type with a
+ /// non-trivial destructor.
+ void PushDestructorCleanup(QualType T, Address Addr);
+
+ /// PushDestructorCleanup - Push a cleanup to call the
+ /// complete-object variant of the given destructor on the object at
+ /// the given address.
+ void PushDestructorCleanup(const CXXDestructorDecl *Dtor, Address Addr);
+
+ /// PopCleanupBlock - Will pop the cleanup entry on the stack and
+ /// process all branch fixups.
+ void PopCleanupBlock(bool FallThroughIsBranchThrough = false);
+
+ /// DeactivateCleanupBlock - Deactivates the given cleanup block.
+ /// The block cannot be reactivated. Pops it if it's the top of the
+ /// stack.
+ ///
+ /// \param DominatingIP - An instruction which is known to
+ /// dominate the current IP (if set) and which lies along
+ /// all paths of execution between the current IP and the
+ /// the point at which the cleanup comes into scope.
+ void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
+ llvm::Instruction *DominatingIP);
+
+ /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
+ /// Cannot be used to resurrect a deactivated cleanup.
+ ///
+ /// \param DominatingIP - An instruction which is known to
+ /// dominate the current IP (if set) and which lies along
+ /// all paths of execution between the current IP and the
+ /// the point at which the cleanup comes into scope.
+ void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
+ llvm::Instruction *DominatingIP);
+
+ /// \brief Enters a new scope for capturing cleanups, all of which
+ /// will be executed once the scope is exited.
+ class RunCleanupsScope {
+ EHScopeStack::stable_iterator CleanupStackDepth;
+ size_t LifetimeExtendedCleanupStackSize;
+ bool OldDidCallStackSave;
+ protected:
+ bool PerformCleanup;
+ private:
+
+ RunCleanupsScope(const RunCleanupsScope &) = delete;
+ void operator=(const RunCleanupsScope &) = delete;
+
+ protected:
+ CodeGenFunction& CGF;
+
+ public:
+ /// \brief Enter a new cleanup scope.
+ explicit RunCleanupsScope(CodeGenFunction &CGF)
+ : PerformCleanup(true), CGF(CGF)
+ {
+ CleanupStackDepth = CGF.EHStack.stable_begin();
+ LifetimeExtendedCleanupStackSize =
+ CGF.LifetimeExtendedCleanupStack.size();
+ OldDidCallStackSave = CGF.DidCallStackSave;
+ CGF.DidCallStackSave = false;
+ }
+
+ /// \brief Exit this cleanup scope, emitting any accumulated
+ /// cleanups.
+ ~RunCleanupsScope() {
+ if (PerformCleanup) {
+ CGF.DidCallStackSave = OldDidCallStackSave;
+ CGF.PopCleanupBlocks(CleanupStackDepth,
+ LifetimeExtendedCleanupStackSize);
+ }
+ }
+
+ /// \brief Determine whether this scope requires any cleanups.
+ bool requiresCleanups() const {
+ return CGF.EHStack.stable_begin() != CleanupStackDepth;
+ }
+
+ /// \brief Force the emission of cleanups now, instead of waiting
+ /// until this object is destroyed.
+ void ForceCleanup() {
+ assert(PerformCleanup && "Already forced cleanup");
+ CGF.DidCallStackSave = OldDidCallStackSave;
+ CGF.PopCleanupBlocks(CleanupStackDepth,
+ LifetimeExtendedCleanupStackSize);
+ PerformCleanup = false;
+ }
+ };
+
+ class LexicalScope : public RunCleanupsScope {
+ SourceRange Range;
+ SmallVector<const LabelDecl*, 4> Labels;
+ LexicalScope *ParentScope;
+
+ LexicalScope(const LexicalScope &) = delete;
+ void operator=(const LexicalScope &) = delete;
+
+ public:
+ /// \brief Enter a new cleanup scope.
+ explicit LexicalScope(CodeGenFunction &CGF, SourceRange Range)
+ : RunCleanupsScope(CGF), Range(Range), ParentScope(CGF.CurLexicalScope) {
+ CGF.CurLexicalScope = this;
+ if (CGDebugInfo *DI = CGF.getDebugInfo())
+ DI->EmitLexicalBlockStart(CGF.Builder, Range.getBegin());
+ }
+
+ void addLabel(const LabelDecl *label) {
+ assert(PerformCleanup && "adding label to dead scope?");
+ Labels.push_back(label);
+ }
+
+ /// \brief Exit this cleanup scope, emitting any accumulated
+ /// cleanups.
+ ~LexicalScope() {
+ if (CGDebugInfo *DI = CGF.getDebugInfo())
+ DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
+
+ // If we should perform a cleanup, force them now. Note that
+ // this ends the cleanup scope before rescoping any labels.
+ if (PerformCleanup) {
+ ApplyDebugLocation DL(CGF, Range.getEnd());
+ ForceCleanup();
+ }
+ }
+
+ /// \brief Force the emission of cleanups now, instead of waiting
+ /// until this object is destroyed.
+ void ForceCleanup() {
+ CGF.CurLexicalScope = ParentScope;
+ RunCleanupsScope::ForceCleanup();
+
+ if (!Labels.empty())
+ rescopeLabels();
+ }
+
+ void rescopeLabels();
+ };
+
+ typedef llvm::DenseMap<const Decl *, Address> DeclMapTy;
+
+ /// \brief The scope used to remap some variables as private in the OpenMP
+ /// loop body (or other captured region emitted without outlining), and to
+ /// restore old vars back on exit.
+ class OMPPrivateScope : public RunCleanupsScope {
+ DeclMapTy SavedLocals;
+ DeclMapTy SavedPrivates;
+
+ private:
+ OMPPrivateScope(const OMPPrivateScope &) = delete;
+ void operator=(const OMPPrivateScope &) = delete;
+
+ public:
+ /// \brief Enter a new OpenMP private scope.
+ explicit OMPPrivateScope(CodeGenFunction &CGF) : RunCleanupsScope(CGF) {}
+
+ /// \brief Registers \a LocalVD variable as a private and apply \a
+ /// PrivateGen function for it to generate corresponding private variable.
+ /// \a PrivateGen returns an address of the generated private variable.
+ /// \return true if the variable is registered as private, false if it has
+ /// been privatized already.
+ bool
+ addPrivate(const VarDecl *LocalVD,
+ llvm::function_ref<Address()> PrivateGen) {
+ assert(PerformCleanup && "adding private to dead scope");
+
+ // Only save it once.
+ if (SavedLocals.count(LocalVD)) return false;
+
+ // Copy the existing local entry to SavedLocals.
+ auto it = CGF.LocalDeclMap.find(LocalVD);
+ if (it != CGF.LocalDeclMap.end()) {
+ SavedLocals.insert({LocalVD, it->second});
+ } else {
+ SavedLocals.insert({LocalVD, Address::invalid()});
+ }
+
+ // Generate the private entry.
+ Address Addr = PrivateGen();
+ QualType VarTy = LocalVD->getType();
+ if (VarTy->isReferenceType()) {
+ Address Temp = CGF.CreateMemTemp(VarTy);
+ CGF.Builder.CreateStore(Addr.getPointer(), Temp);
+ Addr = Temp;
+ }
+ SavedPrivates.insert({LocalVD, Addr});
+
+ return true;
+ }
+
+ /// \brief Privatizes local variables previously registered as private.
+ /// Registration is separate from the actual privatization to allow
+ /// initializers use values of the original variables, not the private one.
+ /// This is important, for example, if the private variable is a class
+ /// variable initialized by a constructor that references other private
+ /// variables. But at initialization original variables must be used, not
+ /// private copies.
+ /// \return true if at least one variable was privatized, false otherwise.
+ bool Privatize() {
+ copyInto(SavedPrivates, CGF.LocalDeclMap);
+ SavedPrivates.clear();
+ return !SavedLocals.empty();
+ }
+
+ void ForceCleanup() {
+ RunCleanupsScope::ForceCleanup();
+ copyInto(SavedLocals, CGF.LocalDeclMap);
+ SavedLocals.clear();
+ }
+
+ /// \brief Exit scope - all the mapped variables are restored.
+ ~OMPPrivateScope() {
+ if (PerformCleanup)
+ ForceCleanup();
+ }
+
+ private:
+ /// Copy all the entries in the source map over the corresponding
+ /// entries in the destination, which must exist.
+ static void copyInto(const DeclMapTy &src, DeclMapTy &dest) {
+ for (auto &pair : src) {
+ if (!pair.second.isValid()) {
+ dest.erase(pair.first);
+ continue;
+ }
+
+ auto it = dest.find(pair.first);
+ if (it != dest.end()) {
+ it->second = pair.second;
+ } else {
+ dest.insert(pair);
+ }
+ }
+ }
+ };
+
+ /// \brief Takes the old cleanup stack size and emits the cleanup blocks
+ /// that have been added.
+ void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize);
+
+ /// \brief Takes the old cleanup stack size and emits the cleanup blocks
+ /// that have been added, then adds all lifetime-extended cleanups from
+ /// the given position to the stack.
+ void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize,
+ size_t OldLifetimeExtendedStackSize);
+
+ void ResolveBranchFixups(llvm::BasicBlock *Target);
+
+ /// The given basic block lies in the current EH scope, but may be a
+ /// target of a potentially scope-crossing jump; get a stable handle
+ /// to which we can perform this jump later.
+ JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) {
+ return JumpDest(Target,
+ EHStack.getInnermostNormalCleanup(),
+ NextCleanupDestIndex++);
+ }
+
+ /// The given basic block lies in the current EH scope, but may be a
+ /// target of a potentially scope-crossing jump; get a stable handle
+ /// to which we can perform this jump later.
+ JumpDest getJumpDestInCurrentScope(StringRef Name = StringRef()) {
+ return getJumpDestInCurrentScope(createBasicBlock(Name));
+ }
+
+ /// EmitBranchThroughCleanup - Emit a branch from the current insert
+ /// block through the normal cleanup handling code (if any) and then
+ /// on to \arg Dest.
+ void EmitBranchThroughCleanup(JumpDest Dest);
+
+ /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
+ /// specified destination obviously has no cleanups to run. 'false' is always
+ /// a conservatively correct answer for this method.
+ bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const;
+
+ /// popCatchScope - Pops the catch scope at the top of the EHScope
+ /// stack, emitting any required code (other than the catch handlers
+ /// themselves).
+ void popCatchScope();
+
+ llvm::BasicBlock *getEHResumeBlock(bool isCleanup);
+ llvm::BasicBlock *getEHDispatchBlock(EHScopeStack::stable_iterator scope);
+ llvm::BasicBlock *getMSVCDispatchBlock(EHScopeStack::stable_iterator scope);
+
+ /// An object to manage conditionally-evaluated expressions.
+ class ConditionalEvaluation {
+ llvm::BasicBlock *StartBB;
+
+ public:
+ ConditionalEvaluation(CodeGenFunction &CGF)
+ : StartBB(CGF.Builder.GetInsertBlock()) {}
+
+ void begin(CodeGenFunction &CGF) {
+ assert(CGF.OutermostConditional != this);
+ if (!CGF.OutermostConditional)
+ CGF.OutermostConditional = this;
+ }
+
+ void end(CodeGenFunction &CGF) {
+ assert(CGF.OutermostConditional != nullptr);
+ if (CGF.OutermostConditional == this)
+ CGF.OutermostConditional = nullptr;
+ }
+
+ /// Returns a block which will be executed prior to each
+ /// evaluation of the conditional code.
+ llvm::BasicBlock *getStartingBlock() const {
+ return StartBB;
+ }
+ };
+
+ /// isInConditionalBranch - Return true if we're currently emitting
+ /// one branch or the other of a conditional expression.
+ bool isInConditionalBranch() const { return OutermostConditional != nullptr; }
+
+ void setBeforeOutermostConditional(llvm::Value *value, Address addr) {
+ assert(isInConditionalBranch());
+ llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
+ auto store = new llvm::StoreInst(value, addr.getPointer(), &block->back());
+ store->setAlignment(addr.getAlignment().getQuantity());
+ }
+
+ /// An RAII object to record that we're evaluating a statement
+ /// expression.
+ class StmtExprEvaluation {
+ CodeGenFunction &CGF;
+
+ /// We have to save the outermost conditional: cleanups in a
+ /// statement expression aren't conditional just because the
+ /// StmtExpr is.
+ ConditionalEvaluation *SavedOutermostConditional;
+
+ public:
+ StmtExprEvaluation(CodeGenFunction &CGF)
+ : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
+ CGF.OutermostConditional = nullptr;
+ }
+
+ ~StmtExprEvaluation() {
+ CGF.OutermostConditional = SavedOutermostConditional;
+ CGF.EnsureInsertPoint();
+ }
+ };
+
+ /// An object which temporarily prevents a value from being
+ /// destroyed by aggressive peephole optimizations that assume that
+ /// all uses of a value have been realized in the IR.
+ class PeepholeProtection {
+ llvm::Instruction *Inst;
+ friend class CodeGenFunction;
+
+ public:
+ PeepholeProtection() : Inst(nullptr) {}
+ };
+
+ /// A non-RAII class containing all the information about a bound
+ /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
+ /// this which makes individual mappings very simple; using this
+ /// class directly is useful when you have a variable number of
+ /// opaque values or don't want the RAII functionality for some
+ /// reason.
+ class OpaqueValueMappingData {
+ const OpaqueValueExpr *OpaqueValue;
+ bool BoundLValue;
+ CodeGenFunction::PeepholeProtection Protection;
+
+ OpaqueValueMappingData(const OpaqueValueExpr *ov,
+ bool boundLValue)
+ : OpaqueValue(ov), BoundLValue(boundLValue) {}
+ public:
+ OpaqueValueMappingData() : OpaqueValue(nullptr) {}
+
+ static bool shouldBindAsLValue(const Expr *expr) {
+ // gl-values should be bound as l-values for obvious reasons.
+ // Records should be bound as l-values because IR generation
+ // always keeps them in memory. Expressions of function type
+ // act exactly like l-values but are formally required to be
+ // r-values in C.
+ return expr->isGLValue() ||
+ expr->getType()->isFunctionType() ||
+ hasAggregateEvaluationKind(expr->getType());
+ }
+
+ static OpaqueValueMappingData bind(CodeGenFunction &CGF,
+ const OpaqueValueExpr *ov,
+ const Expr *e) {
+ if (shouldBindAsLValue(ov))
+ return bind(CGF, ov, CGF.EmitLValue(e));
+ return bind(CGF, ov, CGF.EmitAnyExpr(e));
+ }
+
+ static OpaqueValueMappingData bind(CodeGenFunction &CGF,
+ const OpaqueValueExpr *ov,
+ const LValue &lv) {
+ assert(shouldBindAsLValue(ov));
+ CGF.OpaqueLValues.insert(std::make_pair(ov, lv));
+ return OpaqueValueMappingData(ov, true);
+ }
+
+ static OpaqueValueMappingData bind(CodeGenFunction &CGF,
+ const OpaqueValueExpr *ov,
+ const RValue &rv) {
+ assert(!shouldBindAsLValue(ov));
+ CGF.OpaqueRValues.insert(std::make_pair(ov, rv));
+
+ OpaqueValueMappingData data(ov, false);
+
+ // Work around an extremely aggressive peephole optimization in
+ // EmitScalarConversion which assumes that all other uses of a
+ // value are extant.
+ data.Protection = CGF.protectFromPeepholes(rv);
+
+ return data;
+ }
+
+ bool isValid() const { return OpaqueValue != nullptr; }
+ void clear() { OpaqueValue = nullptr; }
+
+ void unbind(CodeGenFunction &CGF) {
+ assert(OpaqueValue && "no data to unbind!");
+
+ if (BoundLValue) {
+ CGF.OpaqueLValues.erase(OpaqueValue);
+ } else {
+ CGF.OpaqueRValues.erase(OpaqueValue);
+ CGF.unprotectFromPeepholes(Protection);
+ }
+ }
+ };
+
+ /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
+ class OpaqueValueMapping {
+ CodeGenFunction &CGF;
+ OpaqueValueMappingData Data;
+
+ public:
+ static bool shouldBindAsLValue(const Expr *expr) {
+ return OpaqueValueMappingData::shouldBindAsLValue(expr);
+ }
+
+ /// Build the opaque value mapping for the given conditional
+ /// operator if it's the GNU ?: extension. This is a common
+ /// enough pattern that the convenience operator is really
+ /// helpful.
+ ///
+ OpaqueValueMapping(CodeGenFunction &CGF,
+ const AbstractConditionalOperator *op) : CGF(CGF) {
+ if (isa<ConditionalOperator>(op))
+ // Leave Data empty.
+ return;
+
+ const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
+ Data = OpaqueValueMappingData::bind(CGF, e->getOpaqueValue(),
+ e->getCommon());
+ }
+
+ OpaqueValueMapping(CodeGenFunction &CGF,
+ const OpaqueValueExpr *opaqueValue,
+ LValue lvalue)
+ : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) {
+ }
+
+ OpaqueValueMapping(CodeGenFunction &CGF,
+ const OpaqueValueExpr *opaqueValue,
+ RValue rvalue)
+ : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) {
+ }
+
+ void pop() {
+ Data.unbind(CGF);
+ Data.clear();
+ }
+
+ ~OpaqueValueMapping() {
+ if (Data.isValid()) Data.unbind(CGF);
+ }
+ };
+
+private:
+ CGDebugInfo *DebugInfo;
+ bool DisableDebugInfo;
+
+ /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
+ /// calling llvm.stacksave for multiple VLAs in the same scope.
+ bool DidCallStackSave;
+
+ /// IndirectBranch - The first time an indirect goto is seen we create a block
+ /// with an indirect branch. Every time we see the address of a label taken,
+ /// we add the label to the indirect goto. Every subsequent indirect goto is
+ /// codegen'd as a jump to the IndirectBranch's basic block.
+ llvm::IndirectBrInst *IndirectBranch;
+
+ /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
+ /// decls.
+ DeclMapTy LocalDeclMap;
+
+ /// SizeArguments - If a ParmVarDecl had the pass_object_size attribute, this
+ /// will contain a mapping from said ParmVarDecl to its implicit "object_size"
+ /// parameter.
+ llvm::SmallDenseMap<const ParmVarDecl *, const ImplicitParamDecl *, 2>
+ SizeArguments;
+
+ /// Track escaped local variables with auto storage. Used during SEH
+ /// outlining to produce a call to llvm.localescape.
+ llvm::DenseMap<llvm::AllocaInst *, int> EscapedLocals;
+
+ /// LabelMap - This keeps track of the LLVM basic block for each C label.
+ llvm::DenseMap<const LabelDecl*, JumpDest> LabelMap;
+
+ // BreakContinueStack - This keeps track of where break and continue
+ // statements should jump to.
+ struct BreakContinue {
+ BreakContinue(JumpDest Break, JumpDest Continue)
+ : BreakBlock(Break), ContinueBlock(Continue) {}
+
+ JumpDest BreakBlock;
+ JumpDest ContinueBlock;
+ };
+ SmallVector<BreakContinue, 8> BreakContinueStack;
+
+ CodeGenPGO PGO;
+
+ /// Calculate branch weights appropriate for PGO data
+ llvm::MDNode *createProfileWeights(uint64_t TrueCount, uint64_t FalseCount);
+ llvm::MDNode *createProfileWeights(ArrayRef<uint64_t> Weights);
+ llvm::MDNode *createProfileWeightsForLoop(const Stmt *Cond,
+ uint64_t LoopCount);
+
+public:
+ /// Increment the profiler's counter for the given statement.
+ void incrementProfileCounter(const Stmt *S) {
+ if (CGM.getCodeGenOpts().ProfileInstrGenerate)
+ PGO.emitCounterIncrement(Builder, S);
+ PGO.setCurrentStmt(S);
+ }
+
+ /// Get the profiler's count for the given statement.
+ uint64_t getProfileCount(const Stmt *S) {
+ Optional<uint64_t> Count = PGO.getStmtCount(S);
+ if (!Count.hasValue())
+ return 0;
+ return *Count;
+ }
+
+ /// Set the profiler's current count.
+ void setCurrentProfileCount(uint64_t Count) {
+ PGO.setCurrentRegionCount(Count);
+ }
+
+ /// Get the profiler's current count. This is generally the count for the most
+ /// recently incremented counter.
+ uint64_t getCurrentProfileCount() {
+ return PGO.getCurrentRegionCount();
+ }
+
+private:
+
+ /// SwitchInsn - This is nearest current switch instruction. It is null if
+ /// current context is not in a switch.
+ llvm::SwitchInst *SwitchInsn;
+ /// The branch weights of SwitchInsn when doing instrumentation based PGO.
+ SmallVector<uint64_t, 16> *SwitchWeights;
+
+ /// CaseRangeBlock - This block holds if condition check for last case
+ /// statement range in current switch instruction.
+ llvm::BasicBlock *CaseRangeBlock;
+
+ /// OpaqueLValues - Keeps track of the current set of opaque value
+ /// expressions.
+ llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
+ llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
+
+ // VLASizeMap - This keeps track of the associated size for each VLA type.
+ // We track this by the size expression rather than the type itself because
+ // in certain situations, like a const qualifier applied to an VLA typedef,
+ // multiple VLA types can share the same size expression.
+ // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
+ // enter/leave scopes.
+ llvm::DenseMap<const Expr*, llvm::Value*> VLASizeMap;
+
+ /// A block containing a single 'unreachable' instruction. Created
+ /// lazily by getUnreachableBlock().
+ llvm::BasicBlock *UnreachableBlock;
+
+ /// Counts of the number return expressions in the function.
+ unsigned NumReturnExprs;
+
+ /// Count the number of simple (constant) return expressions in the function.
+ unsigned NumSimpleReturnExprs;
+
+ /// The last regular (non-return) debug location (breakpoint) in the function.
+ SourceLocation LastStopPoint;
+
+public:
+ /// A scope within which we are constructing the fields of an object which
+ /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use
+ /// if we need to evaluate a CXXDefaultInitExpr within the evaluation.
+ class FieldConstructionScope {
+ public:
+ FieldConstructionScope(CodeGenFunction &CGF, Address This)
+ : CGF(CGF), OldCXXDefaultInitExprThis(CGF.CXXDefaultInitExprThis) {
+ CGF.CXXDefaultInitExprThis = This;
+ }
+ ~FieldConstructionScope() {
+ CGF.CXXDefaultInitExprThis = OldCXXDefaultInitExprThis;
+ }
+
+ private:
+ CodeGenFunction &CGF;
+ Address OldCXXDefaultInitExprThis;
+ };
+
+ /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
+ /// is overridden to be the object under construction.
+ class CXXDefaultInitExprScope {
+ public:
+ CXXDefaultInitExprScope(CodeGenFunction &CGF)
+ : CGF(CGF), OldCXXThisValue(CGF.CXXThisValue),
+ OldCXXThisAlignment(CGF.CXXThisAlignment) {
+ CGF.CXXThisValue = CGF.CXXDefaultInitExprThis.getPointer();
+ CGF.CXXThisAlignment = CGF.CXXDefaultInitExprThis.getAlignment();
+ }
+ ~CXXDefaultInitExprScope() {
+ CGF.CXXThisValue = OldCXXThisValue;
+ CGF.CXXThisAlignment = OldCXXThisAlignment;
+ }
+
+ public:
+ CodeGenFunction &CGF;
+ llvm::Value *OldCXXThisValue;
+ CharUnits OldCXXThisAlignment;
+ };
+
+private:
+ /// CXXThisDecl - When generating code for a C++ member function,
+ /// this will hold the implicit 'this' declaration.
+ ImplicitParamDecl *CXXABIThisDecl;
+ llvm::Value *CXXABIThisValue;
+ llvm::Value *CXXThisValue;
+ CharUnits CXXABIThisAlignment;
+ CharUnits CXXThisAlignment;
+
+ /// The value of 'this' to use when evaluating CXXDefaultInitExprs within
+ /// this expression.
+ Address CXXDefaultInitExprThis = Address::invalid();
+
+ /// CXXStructorImplicitParamDecl - When generating code for a constructor or
+ /// destructor, this will hold the implicit argument (e.g. VTT).
+ ImplicitParamDecl *CXXStructorImplicitParamDecl;
+ llvm::Value *CXXStructorImplicitParamValue;
+
+ /// OutermostConditional - Points to the outermost active
+ /// conditional control. This is used so that we know if a
+ /// temporary should be destroyed conditionally.
+ ConditionalEvaluation *OutermostConditional;
+
+ /// The current lexical scope.
+ LexicalScope *CurLexicalScope;
+
+ /// The current source location that should be used for exception
+ /// handling code.
+ SourceLocation CurEHLocation;
+
+ /// BlockByrefInfos - For each __block variable, contains
+ /// information about the layout of the variable.
+ llvm::DenseMap<const ValueDecl *, BlockByrefInfo> BlockByrefInfos;
+
+ llvm::BasicBlock *TerminateLandingPad;
+ llvm::BasicBlock *TerminateHandler;
+ llvm::BasicBlock *TrapBB;
+
+ /// Add a kernel metadata node to the named metadata node 'opencl.kernels'.
+ /// In the kernel metadata node, reference the kernel function and metadata
+ /// nodes for its optional attribute qualifiers (OpenCL 1.1 6.7.2):
+ /// - A node for the vec_type_hint(<type>) qualifier contains string
+ /// "vec_type_hint", an undefined value of the <type> data type,
+ /// and a Boolean that is true if the <type> is integer and signed.
+ /// - A node for the work_group_size_hint(X,Y,Z) qualifier contains string
+ /// "work_group_size_hint", and three 32-bit integers X, Y and Z.
+ /// - A node for the reqd_work_group_size(X,Y,Z) qualifier contains string
+ /// "reqd_work_group_size", and three 32-bit integers X, Y and Z.
+ void EmitOpenCLKernelMetadata(const FunctionDecl *FD,
+ llvm::Function *Fn);
+
+public:
+ CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext=false);
+ ~CodeGenFunction();
+
+ CodeGenTypes &getTypes() const { return CGM.getTypes(); }
+ ASTContext &getContext() const { return CGM.getContext(); }
+ CGDebugInfo *getDebugInfo() {
+ if (DisableDebugInfo)
+ return nullptr;
+ return DebugInfo;
+ }
+ void disableDebugInfo() { DisableDebugInfo = true; }
+ void enableDebugInfo() { DisableDebugInfo = false; }
+
+ bool shouldUseFusedARCCalls() {
+ return CGM.getCodeGenOpts().OptimizationLevel == 0;
+ }
+
+ const LangOptions &getLangOpts() const { return CGM.getLangOpts(); }
+
+ /// Returns a pointer to the function's exception object and selector slot,
+ /// which is assigned in every landing pad.
+ Address getExceptionSlot();
+ Address getEHSelectorSlot();
+
+ /// Returns the contents of the function's exception object and selector
+ /// slots.
+ llvm::Value *getExceptionFromSlot();
+ llvm::Value *getSelectorFromSlot();
+
+ Address getNormalCleanupDestSlot();
+
+ llvm::BasicBlock *getUnreachableBlock() {
+ if (!UnreachableBlock) {
+ UnreachableBlock = createBasicBlock("unreachable");
+ new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
+ }
+ return UnreachableBlock;
+ }
+
+ llvm::BasicBlock *getInvokeDest() {
+ if (!EHStack.requiresLandingPad()) return nullptr;
+ return getInvokeDestImpl();
+ }
+
+ bool currentFunctionUsesSEHTry() const {
+ const auto *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
+ return FD && FD->usesSEHTry();
+ }
+
+ const TargetInfo &getTarget() const { return Target; }
+ llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
+
+ //===--------------------------------------------------------------------===//
+ // Cleanups
+ //===--------------------------------------------------------------------===//
+
+ typedef void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty);
+
+ void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
+ Address arrayEndPointer,
+ QualType elementType,
+ CharUnits elementAlignment,
+ Destroyer *destroyer);
+ void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
+ llvm::Value *arrayEnd,
+ QualType elementType,
+ CharUnits elementAlignment,
+ Destroyer *destroyer);
+
+ void pushDestroy(QualType::DestructionKind dtorKind,
+ Address addr, QualType type);
+ void pushEHDestroy(QualType::DestructionKind dtorKind,
+ Address addr, QualType type);
+ void pushDestroy(CleanupKind kind, Address addr, QualType type,
+ Destroyer *destroyer, bool useEHCleanupForArray);
+ void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr,
+ QualType type, Destroyer *destroyer,
+ bool useEHCleanupForArray);
+ void pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
+ llvm::Value *CompletePtr,
+ QualType ElementType);
+ void pushStackRestore(CleanupKind kind, Address SPMem);
+ void emitDestroy(Address addr, QualType type, Destroyer *destroyer,
+ bool useEHCleanupForArray);
+ llvm::Function *generateDestroyHelper(Address addr, QualType type,
+ Destroyer *destroyer,
+ bool useEHCleanupForArray,
+ const VarDecl *VD);
+ void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
+ QualType elementType, CharUnits elementAlign,
+ Destroyer *destroyer,
+ bool checkZeroLength, bool useEHCleanup);
+
+ Destroyer *getDestroyer(QualType::DestructionKind destructionKind);
+
+ /// Determines whether an EH cleanup is required to destroy a type
+ /// with the given destruction kind.
+ bool needsEHCleanup(QualType::DestructionKind kind) {
+ switch (kind) {
+ case QualType::DK_none:
+ return false;
+ case QualType::DK_cxx_destructor:
+ case QualType::DK_objc_weak_lifetime:
+ return getLangOpts().Exceptions;
+ case QualType::DK_objc_strong_lifetime:
+ return getLangOpts().Exceptions &&
+ CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
+ }
+ llvm_unreachable("bad destruction kind");
+ }
+
+ CleanupKind getCleanupKind(QualType::DestructionKind kind) {
+ return (needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Objective-C
+ //===--------------------------------------------------------------------===//
+
+ void GenerateObjCMethod(const ObjCMethodDecl *OMD);
+
+ void StartObjCMethod(const ObjCMethodDecl *MD, const ObjCContainerDecl *CD);
+
+ /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
+ void GenerateObjCGetter(ObjCImplementationDecl *IMP,
+ const ObjCPropertyImplDecl *PID);
+ void generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
+ const ObjCPropertyImplDecl *propImpl,
+ const ObjCMethodDecl *GetterMothodDecl,
+ llvm::Constant *AtomicHelperFn);
+
+ void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
+ ObjCMethodDecl *MD, bool ctor);
+
+ /// GenerateObjCSetter - Synthesize an Objective-C property setter function
+ /// for the given property.
+ void GenerateObjCSetter(ObjCImplementationDecl *IMP,
+ const ObjCPropertyImplDecl *PID);
+ void generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
+ const ObjCPropertyImplDecl *propImpl,
+ llvm::Constant *AtomicHelperFn);
+
+ //===--------------------------------------------------------------------===//
+ // Block Bits
+ //===--------------------------------------------------------------------===//
+
+ llvm::Value *EmitBlockLiteral(const BlockExpr *);
+ llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
+ static void destroyBlockInfos(CGBlockInfo *info);
+
+ llvm::Function *GenerateBlockFunction(GlobalDecl GD,
+ const CGBlockInfo &Info,
+ const DeclMapTy &ldm,
+ bool IsLambdaConversionToBlock);
+
+ llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
+ llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
+ llvm::Constant *GenerateObjCAtomicSetterCopyHelperFunction(
+ const ObjCPropertyImplDecl *PID);
+ llvm::Constant *GenerateObjCAtomicGetterCopyHelperFunction(
+ const ObjCPropertyImplDecl *PID);
+ llvm::Value *EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty);
+
+ void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags);
+
+ class AutoVarEmission;
+
+ void emitByrefStructureInit(const AutoVarEmission &emission);
+ void enterByrefCleanup(const AutoVarEmission &emission);
+
+ void setBlockContextParameter(const ImplicitParamDecl *D, unsigned argNum,
+ llvm::Value *ptr);
+
+ Address LoadBlockStruct();
+ Address GetAddrOfBlockDecl(const VarDecl *var, bool ByRef);
+
+ /// BuildBlockByrefAddress - Computes the location of the
+ /// data in a variable which is declared as __block.
+ Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V,
+ bool followForward = true);
+ Address emitBlockByrefAddress(Address baseAddr,
+ const BlockByrefInfo &info,
+ bool followForward,
+ const llvm::Twine &name);
+
+ const BlockByrefInfo &getBlockByrefInfo(const VarDecl *var);
+
+ void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
+ const CGFunctionInfo &FnInfo);
+ /// \brief Emit code for the start of a function.
+ /// \param Loc The location to be associated with the function.
+ /// \param StartLoc The location of the function body.
+ void StartFunction(GlobalDecl GD,
+ QualType RetTy,
+ llvm::Function *Fn,
+ const CGFunctionInfo &FnInfo,
+ const FunctionArgList &Args,
+ SourceLocation Loc = SourceLocation(),
+ SourceLocation StartLoc = SourceLocation());
+
+ void EmitConstructorBody(FunctionArgList &Args);
+ void EmitDestructorBody(FunctionArgList &Args);
+ void emitImplicitAssignmentOperatorBody(FunctionArgList &Args);
+ void EmitFunctionBody(FunctionArgList &Args, const Stmt *Body);
+ void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S);
+
+ void EmitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator,
+ CallArgList &CallArgs);
+ void EmitLambdaToBlockPointerBody(FunctionArgList &Args);
+ void EmitLambdaBlockInvokeBody();
+ void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD);
+ void EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD);
+ void EmitAsanPrologueOrEpilogue(bool Prologue);
+
+ /// \brief Emit the unified return block, trying to avoid its emission when
+ /// possible.
+ /// \return The debug location of the user written return statement if the
+ /// return block is is avoided.
+ llvm::DebugLoc EmitReturnBlock();
+
+ /// FinishFunction - Complete IR generation of the current function. It is
+ /// legal to call this function even if there is no current insertion point.
+ void FinishFunction(SourceLocation EndLoc=SourceLocation());
+
+ void StartThunk(llvm::Function *Fn, GlobalDecl GD,
+ const CGFunctionInfo &FnInfo);
+
+ void EmitCallAndReturnForThunk(llvm::Value *Callee, const ThunkInfo *Thunk);
+
+ void FinishThunk();
+
+ /// Emit a musttail call for a thunk with a potentially adjusted this pointer.
+ void EmitMustTailThunk(const CXXMethodDecl *MD, llvm::Value *AdjustedThisPtr,
+ llvm::Value *Callee);
+
+ /// Generate a thunk for the given method.
+ void generateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
+ GlobalDecl GD, const ThunkInfo &Thunk);
+
+ llvm::Function *GenerateVarArgsThunk(llvm::Function *Fn,
+ const CGFunctionInfo &FnInfo,
+ GlobalDecl GD, const ThunkInfo &Thunk);
+
+ void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
+ FunctionArgList &Args);
+
+ void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init,
+ ArrayRef<VarDecl *> ArrayIndexes);
+
+ /// Struct with all informations about dynamic [sub]class needed to set vptr.
+ struct VPtr {
+ BaseSubobject Base;
+ const CXXRecordDecl *NearestVBase;
+ CharUnits OffsetFromNearestVBase;
+ const CXXRecordDecl *VTableClass;
+ };
+
+ /// Initialize the vtable pointer of the given subobject.
+ void InitializeVTablePointer(const VPtr &vptr);
+
+ typedef llvm::SmallVector<VPtr, 4> VPtrsVector;
+
+ typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
+ VPtrsVector getVTablePointers(const CXXRecordDecl *VTableClass);
+
+ void getVTablePointers(BaseSubobject Base, const CXXRecordDecl *NearestVBase,
+ CharUnits OffsetFromNearestVBase,
+ bool BaseIsNonVirtualPrimaryBase,
+ const CXXRecordDecl *VTableClass,
+ VisitedVirtualBasesSetTy &VBases, VPtrsVector &vptrs);
+
+ void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
+
+ /// GetVTablePtr - Return the Value of the vtable pointer member pointed
+ /// to by This.
+ llvm::Value *GetVTablePtr(Address This, llvm::Type *VTableTy,
+ const CXXRecordDecl *VTableClass);
+
+ enum CFITypeCheckKind {
+ CFITCK_VCall,
+ CFITCK_NVCall,
+ CFITCK_DerivedCast,
+ CFITCK_UnrelatedCast,
+ };
+
+ /// \brief Derived is the presumed address of an object of type T after a
+ /// cast. If T is a polymorphic class type, emit a check that the virtual
+ /// table for Derived belongs to a class derived from T.
+ void EmitVTablePtrCheckForCast(QualType T, llvm::Value *Derived,
+ bool MayBeNull, CFITypeCheckKind TCK,
+ SourceLocation Loc);
+
+ /// EmitVTablePtrCheckForCall - Virtual method MD is being called via VTable.
+ /// If vptr CFI is enabled, emit a check that VTable is valid.
+ void EmitVTablePtrCheckForCall(const CXXMethodDecl *MD, llvm::Value *VTable,
+ CFITypeCheckKind TCK, SourceLocation Loc);
+
+ /// EmitVTablePtrCheck - Emit a check that VTable is a valid virtual table for
+ /// RD using llvm.bitset.test.
+ void EmitVTablePtrCheck(const CXXRecordDecl *RD, llvm::Value *VTable,
+ CFITypeCheckKind TCK, SourceLocation Loc);
+
+ /// CanDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
+ /// expr can be devirtualized.
+ bool CanDevirtualizeMemberFunctionCall(const Expr *Base,
+ const CXXMethodDecl *MD);
+
+ /// EnterDtorCleanups - Enter the cleanups necessary to complete the
+ /// given phase of destruction for a destructor. The end result
+ /// should call destructors on members and base classes in reverse
+ /// order of their construction.
+ void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type);
+
+ /// ShouldInstrumentFunction - Return true if the current function should be
+ /// instrumented with __cyg_profile_func_* calls
+ bool ShouldInstrumentFunction();
+
+ /// EmitFunctionInstrumentation - Emit LLVM code to call the specified
+ /// instrumentation function with the current function and the call site, if
+ /// function instrumentation is enabled.
+ void EmitFunctionInstrumentation(const char *Fn);
+
+ /// EmitMCountInstrumentation - Emit call to .mcount.
+ void EmitMCountInstrumentation();
+
+ /// EmitFunctionProlog - Emit the target specific LLVM code to load the
+ /// arguments for the given function. This is also responsible for naming the
+ /// LLVM function arguments.
+ void EmitFunctionProlog(const CGFunctionInfo &FI,
+ llvm::Function *Fn,
+ const FunctionArgList &Args);
+
+ /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
+ /// given temporary.
+ void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc,
+ SourceLocation EndLoc);
+
+ /// EmitStartEHSpec - Emit the start of the exception spec.
+ void EmitStartEHSpec(const Decl *D);
+
+ /// EmitEndEHSpec - Emit the end of the exception spec.
+ void EmitEndEHSpec(const Decl *D);
+
+ /// getTerminateLandingPad - Return a landing pad that just calls terminate.
+ llvm::BasicBlock *getTerminateLandingPad();
+
+ /// getTerminateHandler - Return a handler (not a landing pad, just
+ /// a catch handler) that just calls terminate. This is used when
+ /// a terminate scope encloses a try.
+ llvm::BasicBlock *getTerminateHandler();
+
+ llvm::Type *ConvertTypeForMem(QualType T);
+ llvm::Type *ConvertType(QualType T);
+ llvm::Type *ConvertType(const TypeDecl *T) {
+ return ConvertType(getContext().getTypeDeclType(T));
+ }
+
+ /// LoadObjCSelf - Load the value of self. This function is only valid while
+ /// generating code for an Objective-C method.
+ llvm::Value *LoadObjCSelf();
+
+ /// TypeOfSelfObject - Return type of object that this self represents.
+ QualType TypeOfSelfObject();
+
+ /// hasAggregateLLVMType - Return true if the specified AST type will map into
+ /// an aggregate LLVM type or is void.
+ static TypeEvaluationKind getEvaluationKind(QualType T);
+
+ static bool hasScalarEvaluationKind(QualType T) {
+ return getEvaluationKind(T) == TEK_Scalar;
+ }
+
+ static bool hasAggregateEvaluationKind(QualType T) {
+ return getEvaluationKind(T) == TEK_Aggregate;
+ }
+
+ /// createBasicBlock - Create an LLVM basic block.
+ llvm::BasicBlock *createBasicBlock(const Twine &name = "",
+ llvm::Function *parent = nullptr,
+ llvm::BasicBlock *before = nullptr) {
+#ifdef NDEBUG
+ return llvm::BasicBlock::Create(getLLVMContext(), "", parent, before);
+#else
+ return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
+#endif
+ }
+
+ /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
+ /// label maps to.
+ JumpDest getJumpDestForLabel(const LabelDecl *S);
+
+ /// SimplifyForwardingBlocks - If the given basic block is only a branch to
+ /// another basic block, simplify it. This assumes that no other code could
+ /// potentially reference the basic block.
+ void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
+
+ /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
+ /// adding a fall-through branch from the current insert block if
+ /// necessary. It is legal to call this function even if there is no current
+ /// insertion point.
+ ///
+ /// IsFinished - If true, indicates that the caller has finished emitting
+ /// branches to the given block and does not expect to emit code into it. This
+ /// means the block can be ignored if it is unreachable.
+ void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
+
+ /// EmitBlockAfterUses - Emit the given block somewhere hopefully
+ /// near its uses, and leave the insertion point in it.
+ void EmitBlockAfterUses(llvm::BasicBlock *BB);
+
+ /// EmitBranch - Emit a branch to the specified basic block from the current
+ /// insert block, taking care to avoid creation of branches from dummy
+ /// blocks. It is legal to call this function even if there is no current
+ /// insertion point.
+ ///
+ /// This function clears the current insertion point. The caller should follow
+ /// calls to this function with calls to Emit*Block prior to generation new
+ /// code.
+ void EmitBranch(llvm::BasicBlock *Block);
+
+ /// HaveInsertPoint - True if an insertion point is defined. If not, this
+ /// indicates that the current code being emitted is unreachable.
+ bool HaveInsertPoint() const {
+ return Builder.GetInsertBlock() != nullptr;
+ }
+
+ /// EnsureInsertPoint - Ensure that an insertion point is defined so that
+ /// emitted IR has a place to go. Note that by definition, if this function
+ /// creates a block then that block is unreachable; callers may do better to
+ /// detect when no insertion point is defined and simply skip IR generation.
+ void EnsureInsertPoint() {
+ if (!HaveInsertPoint())
+ EmitBlock(createBasicBlock());
+ }
+
+ /// ErrorUnsupported - Print out an error that codegen doesn't support the
+ /// specified stmt yet.
+ void ErrorUnsupported(const Stmt *S, const char *Type);
+
+ //===--------------------------------------------------------------------===//
+ // Helpers
+ //===--------------------------------------------------------------------===//
+
+ LValue MakeAddrLValue(Address Addr, QualType T,
+ AlignmentSource AlignSource = AlignmentSource::Type) {
+ return LValue::MakeAddr(Addr, T, getContext(), AlignSource,
+ CGM.getTBAAInfo(T));
+ }
+
+ LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
+ AlignmentSource AlignSource = AlignmentSource::Type) {
+ return LValue::MakeAddr(Address(V, Alignment), T, getContext(),
+ AlignSource, CGM.getTBAAInfo(T));
+ }
+
+ LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T);
+ LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T);
+ CharUnits getNaturalTypeAlignment(QualType T,
+ AlignmentSource *Source = nullptr,
+ bool forPointeeType = false);
+ CharUnits getNaturalPointeeTypeAlignment(QualType T,
+ AlignmentSource *Source = nullptr);
+
+ Address EmitLoadOfReference(Address Ref, const ReferenceType *RefTy,
+ AlignmentSource *Source = nullptr);
+ LValue EmitLoadOfReferenceLValue(Address Ref, const ReferenceType *RefTy);
+
+ /// CreateTempAlloca - This creates a alloca and inserts it into the entry
+ /// block. The caller is responsible for setting an appropriate alignment on
+ /// the alloca.
+ llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty,
+ const Twine &Name = "tmp");
+ Address CreateTempAlloca(llvm::Type *Ty, CharUnits align,
+ const Twine &Name = "tmp");
+
+ /// CreateDefaultAlignedTempAlloca - This creates an alloca with the
+ /// default ABI alignment of the given LLVM type.
+ ///
+ /// IMPORTANT NOTE: This is *not* generally the right alignment for
+ /// any given AST type that happens to have been lowered to the
+ /// given IR type. This should only ever be used for function-local,
+ /// IR-driven manipulations like saving and restoring a value. Do
+ /// not hand this address off to arbitrary IRGen routines, and especially
+ /// do not pass it as an argument to a function that might expect a
+ /// properly ABI-aligned value.
+ Address CreateDefaultAlignTempAlloca(llvm::Type *Ty,
+ const Twine &Name = "tmp");
+
+ /// InitTempAlloca - Provide an initial value for the given alloca which
+ /// will be observable at all locations in the function.
+ ///
+ /// The address should be something that was returned from one of
+ /// the CreateTempAlloca or CreateMemTemp routines, and the
+ /// initializer must be valid in the entry block (i.e. it must
+ /// either be a constant or an argument value).
+ void InitTempAlloca(Address Alloca, llvm::Value *Value);
+
+ /// CreateIRTemp - Create a temporary IR object of the given type, with
+ /// appropriate alignment. This routine should only be used when an temporary
+ /// value needs to be stored into an alloca (for example, to avoid explicit
+ /// PHI construction), but the type is the IR type, not the type appropriate
+ /// for storing in memory.
+ ///
+ /// That is, this is exactly equivalent to CreateMemTemp, but calling
+ /// ConvertType instead of ConvertTypeForMem.
+ Address CreateIRTemp(QualType T, const Twine &Name = "tmp");
+
+ /// CreateMemTemp - Create a temporary memory object of the given type, with
+ /// appropriate alignment.
+ Address CreateMemTemp(QualType T, const Twine &Name = "tmp");
+ Address CreateMemTemp(QualType T, CharUnits Align, const Twine &Name = "tmp");
+
+ /// CreateAggTemp - Create a temporary memory object for the given
+ /// aggregate type.
+ AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp") {
+ return AggValueSlot::forAddr(CreateMemTemp(T, Name),
+ T.getQualifiers(),
+ AggValueSlot::IsNotDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+ }
+
+ /// Emit a cast to void* in the appropriate address space.
+ llvm::Value *EmitCastToVoidPtr(llvm::Value *value);
+
+ /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
+ /// expression and compare the result against zero, returning an Int1Ty value.
+ llvm::Value *EvaluateExprAsBool(const Expr *E);
+
+ /// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
+ void EmitIgnoredExpr(const Expr *E);
+
+ /// EmitAnyExpr - Emit code to compute the specified expression which can have
+ /// any type. The result is returned as an RValue struct. If this is an
+ /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
+ /// the result should be returned.
+ ///
+ /// \param ignoreResult True if the resulting value isn't used.
+ RValue EmitAnyExpr(const Expr *E,
+ AggValueSlot aggSlot = AggValueSlot::ignored(),
+ bool ignoreResult = false);
+
+ // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
+ // or the value of the expression, depending on how va_list is defined.
+ Address EmitVAListRef(const Expr *E);
+
+ /// Emit a "reference" to a __builtin_ms_va_list; this is
+ /// always the value of the expression, because a __builtin_ms_va_list is a
+ /// pointer to a char.
+ Address EmitMSVAListRef(const Expr *E);
+
+ /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
+ /// always be accessible even if no aggregate location is provided.
+ RValue EmitAnyExprToTemp(const Expr *E);
+
+ /// EmitAnyExprToMem - Emits the code necessary to evaluate an
+ /// arbitrary expression into the given memory location.
+ void EmitAnyExprToMem(const Expr *E, Address Location,
+ Qualifiers Quals, bool IsInitializer);
+
+ void EmitAnyExprToExn(const Expr *E, Address Addr);
+
+ /// EmitExprAsInit - Emits the code necessary to initialize a
+ /// location in memory with the given initializer.
+ void EmitExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue,
+ bool capturedByInit);
+
+ /// hasVolatileMember - returns true if aggregate type has a volatile
+ /// member.
+ bool hasVolatileMember(QualType T) {
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
+ return RD->hasVolatileMember();
+ }
+ return false;
+ }
+ /// EmitAggregateCopy - Emit an aggregate assignment.
+ ///
+ /// The difference to EmitAggregateCopy is that tail padding is not copied.
+ /// This is required for correctness when assigning non-POD structures in C++.
+ void EmitAggregateAssign(Address DestPtr, Address SrcPtr,
+ QualType EltTy) {
+ bool IsVolatile = hasVolatileMember(EltTy);
+ EmitAggregateCopy(DestPtr, SrcPtr, EltTy, IsVolatile, true);
+ }
+
+ void EmitAggregateCopyCtor(Address DestPtr, Address SrcPtr,
+ QualType DestTy, QualType SrcTy) {
+ EmitAggregateCopy(DestPtr, SrcPtr, SrcTy, /*IsVolatile=*/false,
+ /*IsAssignment=*/false);
+ }
+
+ /// EmitAggregateCopy - Emit an aggregate copy.
+ ///
+ /// \param isVolatile - True iff either the source or the destination is
+ /// volatile.
+ /// \param isAssignment - If false, allow padding to be copied. This often
+ /// yields more efficient.
+ void EmitAggregateCopy(Address DestPtr, Address SrcPtr,
+ QualType EltTy, bool isVolatile=false,
+ bool isAssignment = false);
+
+ /// GetAddrOfLocalVar - Return the address of a local variable.
+ Address GetAddrOfLocalVar(const VarDecl *VD) {
+ auto it = LocalDeclMap.find(VD);
+ assert(it != LocalDeclMap.end() &&
+ "Invalid argument to GetAddrOfLocalVar(), no decl!");
+ return it->second;
+ }
+
+ /// getOpaqueLValueMapping - Given an opaque value expression (which
+ /// must be mapped to an l-value), return its mapping.
+ const LValue &getOpaqueLValueMapping(const OpaqueValueExpr *e) {
+ assert(OpaqueValueMapping::shouldBindAsLValue(e));
+
+ llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
+ it = OpaqueLValues.find(e);
+ assert(it != OpaqueLValues.end() && "no mapping for opaque value!");
+ return it->second;
+ }
+
+ /// getOpaqueRValueMapping - Given an opaque value expression (which
+ /// must be mapped to an r-value), return its mapping.
+ const RValue &getOpaqueRValueMapping(const OpaqueValueExpr *e) {
+ assert(!OpaqueValueMapping::shouldBindAsLValue(e));
+
+ llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
+ it = OpaqueRValues.find(e);
+ assert(it != OpaqueRValues.end() && "no mapping for opaque value!");
+ return it->second;
+ }
+
+ /// getAccessedFieldNo - Given an encoded value and a result number, return
+ /// the input field number being accessed.
+ static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
+
+ llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
+ llvm::BasicBlock *GetIndirectGotoBlock();
+
+ /// EmitNullInitialization - Generate code to set a value of the given type to
+ /// null, If the type contains data member pointers, they will be initialized
+ /// to -1 in accordance with the Itanium C++ ABI.
+ void EmitNullInitialization(Address DestPtr, QualType Ty);
+
+ /// Emits a call to an LLVM variable-argument intrinsic, either
+ /// \c llvm.va_start or \c llvm.va_end.
+ /// \param ArgValue A reference to the \c va_list as emitted by either
+ /// \c EmitVAListRef or \c EmitMSVAListRef.
+ /// \param IsStart If \c true, emits a call to \c llvm.va_start; otherwise,
+ /// calls \c llvm.va_end.
+ llvm::Value *EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart);
+
+ /// Generate code to get an argument from the passed in pointer
+ /// and update it accordingly.
+ /// \param VE The \c VAArgExpr for which to generate code.
+ /// \param VAListAddr Receives a reference to the \c va_list as emitted by
+ /// either \c EmitVAListRef or \c EmitMSVAListRef.
+ /// \returns A pointer to the argument.
+ // FIXME: We should be able to get rid of this method and use the va_arg
+ // instruction in LLVM instead once it works well enough.
+ Address EmitVAArg(VAArgExpr *VE, Address &VAListAddr);
+
+ /// emitArrayLength - Compute the length of an array, even if it's a
+ /// VLA, and drill down to the base element type.
+ llvm::Value *emitArrayLength(const ArrayType *arrayType,
+ QualType &baseType,
+ Address &addr);
+
+ /// EmitVLASize - Capture all the sizes for the VLA expressions in
+ /// the given variably-modified type and store them in the VLASizeMap.
+ ///
+ /// This function can be called with a null (unreachable) insert point.
+ void EmitVariablyModifiedType(QualType Ty);
+
+ /// getVLASize - Returns an LLVM value that corresponds to the size,
+ /// in non-variably-sized elements, of a variable length array type,
+ /// plus that largest non-variably-sized element type. Assumes that
+ /// the type has already been emitted with EmitVariablyModifiedType.
+ std::pair<llvm::Value*,QualType> getVLASize(const VariableArrayType *vla);
+ std::pair<llvm::Value*,QualType> getVLASize(QualType vla);
+
+ /// LoadCXXThis - Load the value of 'this'. This function is only valid while
+ /// generating code for an C++ member function.
+ llvm::Value *LoadCXXThis() {
+ assert(CXXThisValue && "no 'this' value for this function");
+ return CXXThisValue;
+ }
+ Address LoadCXXThisAddress();
+
+ /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
+ /// virtual bases.
+ // FIXME: Every place that calls LoadCXXVTT is something
+ // that needs to be abstracted properly.
+ llvm::Value *LoadCXXVTT() {
+ assert(CXXStructorImplicitParamValue && "no VTT value for this function");
+ return CXXStructorImplicitParamValue;
+ }
+
+ /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
+ /// complete class to the given direct base.
+ Address
+ GetAddressOfDirectBaseInCompleteClass(Address Value,
+ const CXXRecordDecl *Derived,
+ const CXXRecordDecl *Base,
+ bool BaseIsVirtual);
+
+ static bool ShouldNullCheckClassCastValue(const CastExpr *Cast);
+
+ /// GetAddressOfBaseClass - This function will add the necessary delta to the
+ /// load of 'this' and returns address of the base class.
+ Address GetAddressOfBaseClass(Address Value,
+ const CXXRecordDecl *Derived,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd,
+ bool NullCheckValue, SourceLocation Loc);
+
+ Address GetAddressOfDerivedClass(Address Value,
+ const CXXRecordDecl *Derived,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd,
+ bool NullCheckValue);
+
+ /// GetVTTParameter - Return the VTT parameter that should be passed to a
+ /// base constructor/destructor with virtual bases.
+ /// FIXME: VTTs are Itanium ABI-specific, so the definition should move
+ /// to ItaniumCXXABI.cpp together with all the references to VTT.
+ llvm::Value *GetVTTParameter(GlobalDecl GD, bool ForVirtualBase,
+ bool Delegating);
+
+ void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
+ CXXCtorType CtorType,
+ const FunctionArgList &Args,
+ SourceLocation Loc);
+ // It's important not to confuse this and the previous function. Delegating
+ // constructors are the C++0x feature. The constructor delegate optimization
+ // is used to reduce duplication in the base and complete consturctors where
+ // they are substantially the same.
+ void EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
+ const FunctionArgList &Args);
+
+ void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
+ bool ForVirtualBase, bool Delegating,
+ Address This, const CXXConstructExpr *E);
+
+ /// Emit assumption load for all bases. Requires to be be called only on
+ /// most-derived class and not under construction of the object.
+ void EmitVTableAssumptionLoads(const CXXRecordDecl *ClassDecl, Address This);
+
+ /// Emit assumption that vptr load == global vtable.
+ void EmitVTableAssumptionLoad(const VPtr &vptr, Address This);
+
+ void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
+ Address This, Address Src,
+ const CXXConstructExpr *E);
+
+ void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
+ const ConstantArrayType *ArrayTy,
+ Address ArrayPtr,
+ const CXXConstructExpr *E,
+ bool ZeroInitialization = false);
+
+ void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
+ llvm::Value *NumElements,
+ Address ArrayPtr,
+ const CXXConstructExpr *E,
+ bool ZeroInitialization = false);
+
+ static Destroyer destroyCXXObject;
+
+ void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
+ bool ForVirtualBase, bool Delegating,
+ Address This);
+
+ void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType,
+ llvm::Type *ElementTy, Address NewPtr,
+ llvm::Value *NumElements,
+ llvm::Value *AllocSizeWithoutCookie);
+
+ void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
+ Address Ptr);
+
+ llvm::Value *EmitLifetimeStart(uint64_t Size, llvm::Value *Addr);
+ void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr);
+
+ llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
+ void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
+
+ void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
+ QualType DeleteTy);
+
+ RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
+ const Expr *Arg, bool IsDelete);
+
+ llvm::Value *EmitCXXTypeidExpr(const CXXTypeidExpr *E);
+ llvm::Value *EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE);
+ Address EmitCXXUuidofExpr(const CXXUuidofExpr *E);
+
+ /// \brief Situations in which we might emit a check for the suitability of a
+ /// pointer or glvalue.
+ enum TypeCheckKind {
+ /// Checking the operand of a load. Must be suitably sized and aligned.
+ TCK_Load,
+ /// Checking the destination of a store. Must be suitably sized and aligned.
+ TCK_Store,
+ /// Checking the bound value in a reference binding. Must be suitably sized
+ /// and aligned, but is not required to refer to an object (until the
+ /// reference is used), per core issue 453.
+ TCK_ReferenceBinding,
+ /// Checking the object expression in a non-static data member access. Must
+ /// be an object within its lifetime.
+ TCK_MemberAccess,
+ /// Checking the 'this' pointer for a call to a non-static member function.
+ /// Must be an object within its lifetime.
+ TCK_MemberCall,
+ /// Checking the 'this' pointer for a constructor call.
+ TCK_ConstructorCall,
+ /// Checking the operand of a static_cast to a derived pointer type. Must be
+ /// null or an object within its lifetime.
+ TCK_DowncastPointer,
+ /// Checking the operand of a static_cast to a derived reference type. Must
+ /// be an object within its lifetime.
+ TCK_DowncastReference,
+ /// Checking the operand of a cast to a base object. Must be suitably sized
+ /// and aligned.
+ TCK_Upcast,
+ /// Checking the operand of a cast to a virtual base object. Must be an
+ /// object within its lifetime.
+ TCK_UpcastToVirtualBase
+ };
+
+ /// \brief Whether any type-checking sanitizers are enabled. If \c false,
+ /// calls to EmitTypeCheck can be skipped.
+ bool sanitizePerformTypeCheck() const;
+
+ /// \brief Emit a check that \p V is the address of storage of the
+ /// appropriate size and alignment for an object of type \p Type.
+ void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V,
+ QualType Type, CharUnits Alignment = CharUnits::Zero(),
+ bool SkipNullCheck = false);
+
+ /// \brief Emit a check that \p Base points into an array object, which
+ /// we can access at index \p Index. \p Accessed should be \c false if we
+ /// this expression is used as an lvalue, for instance in "&Arr[Idx]".
+ void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index,
+ QualType IndexType, bool Accessed);
+
+ llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre);
+ ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre);
+
+ void EmitAlignmentAssumption(llvm::Value *PtrValue, unsigned Alignment,
+ llvm::Value *OffsetValue = nullptr) {
+ Builder.CreateAlignmentAssumption(CGM.getDataLayout(), PtrValue, Alignment,
+ OffsetValue);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Declaration Emission
+ //===--------------------------------------------------------------------===//
+
+ /// EmitDecl - Emit a declaration.
+ ///
+ /// This function can be called with a null (unreachable) insert point.
+ void EmitDecl(const Decl &D);
+
+ /// EmitVarDecl - Emit a local variable declaration.
+ ///
+ /// This function can be called with a null (unreachable) insert point.
+ void EmitVarDecl(const VarDecl &D);
+
+ void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue,
+ bool capturedByInit);
+ void EmitScalarInit(llvm::Value *init, LValue lvalue);
+
+ typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
+ llvm::Value *Address);
+
+ /// \brief Determine whether the given initializer is trivial in the sense
+ /// that it requires no code to be generated.
+ bool isTrivialInitializer(const Expr *Init);
+
+ /// EmitAutoVarDecl - Emit an auto variable declaration.
+ ///
+ /// This function can be called with a null (unreachable) insert point.
+ void EmitAutoVarDecl(const VarDecl &D);
+
+ class AutoVarEmission {
+ friend class CodeGenFunction;
+
+ const VarDecl *Variable;
+
+ /// The address of the alloca. Invalid if the variable was emitted
+ /// as a global constant.
+ Address Addr;
+
+ llvm::Value *NRVOFlag;
+
+ /// True if the variable is a __block variable.
+ bool IsByRef;
+
+ /// True if the variable is of aggregate type and has a constant
+ /// initializer.
+ bool IsConstantAggregate;
+
+ /// Non-null if we should use lifetime annotations.
+ llvm::Value *SizeForLifetimeMarkers;
+
+ struct Invalid {};
+ AutoVarEmission(Invalid) : Variable(nullptr), Addr(Address::invalid()) {}
+
+ AutoVarEmission(const VarDecl &variable)
+ : Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr),
+ IsByRef(false), IsConstantAggregate(false),
+ SizeForLifetimeMarkers(nullptr) {}
+
+ bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
+
+ public:
+ static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
+
+ bool useLifetimeMarkers() const {
+ return SizeForLifetimeMarkers != nullptr;
+ }
+ llvm::Value *getSizeForLifetimeMarkers() const {
+ assert(useLifetimeMarkers());
+ return SizeForLifetimeMarkers;
+ }
+
+ /// Returns the raw, allocated address, which is not necessarily
+ /// the address of the object itself.
+ Address getAllocatedAddress() const {
+ return Addr;
+ }
+
+ /// Returns the address of the object within this declaration.
+ /// Note that this does not chase the forwarding pointer for
+ /// __block decls.
+ Address getObjectAddress(CodeGenFunction &CGF) const {
+ if (!IsByRef) return Addr;
+
+ return CGF.emitBlockByrefAddress(Addr, Variable, /*forward*/ false);
+ }
+ };
+ AutoVarEmission EmitAutoVarAlloca(const VarDecl &var);
+ void EmitAutoVarInit(const AutoVarEmission &emission);
+ void EmitAutoVarCleanups(const AutoVarEmission &emission);
+ void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
+ QualType::DestructionKind dtorKind);
+
+ void EmitStaticVarDecl(const VarDecl &D,
+ llvm::GlobalValue::LinkageTypes Linkage);
+
+ class ParamValue {
+ llvm::Value *Value;
+ unsigned Alignment;
+ ParamValue(llvm::Value *V, unsigned A) : Value(V), Alignment(A) {}
+ public:
+ static ParamValue forDirect(llvm::Value *value) {
+ return ParamValue(value, 0);
+ }
+ static ParamValue forIndirect(Address addr) {
+ assert(!addr.getAlignment().isZero());
+ return ParamValue(addr.getPointer(), addr.getAlignment().getQuantity());
+ }
+
+ bool isIndirect() const { return Alignment != 0; }
+ llvm::Value *getAnyValue() const { return Value; }
+
+ llvm::Value *getDirectValue() const {
+ assert(!isIndirect());
+ return Value;
+ }
+
+ Address getIndirectAddress() const {
+ assert(isIndirect());
+ return Address(Value, CharUnits::fromQuantity(Alignment));
+ }
+ };
+
+ /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
+ void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo);
+
+ /// protectFromPeepholes - Protect a value that we're intending to
+ /// store to the side, but which will probably be used later, from
+ /// aggressive peepholing optimizations that might delete it.
+ ///
+ /// Pass the result to unprotectFromPeepholes to declare that
+ /// protection is no longer required.
+ ///
+ /// There's no particular reason why this shouldn't apply to
+ /// l-values, it's just that no existing peepholes work on pointers.
+ PeepholeProtection protectFromPeepholes(RValue rvalue);
+ void unprotectFromPeepholes(PeepholeProtection protection);
+
+ //===--------------------------------------------------------------------===//
+ // Statement Emission
+ //===--------------------------------------------------------------------===//
+
+ /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
+ void EmitStopPoint(const Stmt *S);
+
+ /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
+ /// this function even if there is no current insertion point.
+ ///
+ /// This function may clear the current insertion point; callers should use
+ /// EnsureInsertPoint if they wish to subsequently generate code without first
+ /// calling EmitBlock, EmitBranch, or EmitStmt.
+ void EmitStmt(const Stmt *S);
+
+ /// EmitSimpleStmt - Try to emit a "simple" statement which does not
+ /// necessarily require an insertion point or debug information; typically
+ /// because the statement amounts to a jump or a container of other
+ /// statements.
+ ///
+ /// \return True if the statement was handled.
+ bool EmitSimpleStmt(const Stmt *S);
+
+ Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
+ AggValueSlot AVS = AggValueSlot::ignored());
+ Address EmitCompoundStmtWithoutScope(const CompoundStmt &S,
+ bool GetLast = false,
+ AggValueSlot AVS =
+ AggValueSlot::ignored());
+
+ /// EmitLabel - Emit the block for the given label. It is legal to call this
+ /// function even if there is no current insertion point.
+ void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
+
+ void EmitLabelStmt(const LabelStmt &S);
+ void EmitAttributedStmt(const AttributedStmt &S);
+ void EmitGotoStmt(const GotoStmt &S);
+ void EmitIndirectGotoStmt(const IndirectGotoStmt &S);
+ void EmitIfStmt(const IfStmt &S);
+
+ void EmitWhileStmt(const WhileStmt &S,
+ ArrayRef<const Attr *> Attrs = None);
+ void EmitDoStmt(const DoStmt &S, ArrayRef<const Attr *> Attrs = None);
+ void EmitForStmt(const ForStmt &S,
+ ArrayRef<const Attr *> Attrs = None);
+ void EmitReturnStmt(const ReturnStmt &S);
+ void EmitDeclStmt(const DeclStmt &S);
+ void EmitBreakStmt(const BreakStmt &S);
+ void EmitContinueStmt(const ContinueStmt &S);
+ void EmitSwitchStmt(const SwitchStmt &S);
+ void EmitDefaultStmt(const DefaultStmt &S);
+ void EmitCaseStmt(const CaseStmt &S);
+ void EmitCaseStmtRange(const CaseStmt &S);
+ void EmitAsmStmt(const AsmStmt &S);
+
+ void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
+ void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
+ void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
+ void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
+ void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S);
+
+ void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
+ void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
+
+ void EmitCXXTryStmt(const CXXTryStmt &S);
+ void EmitSEHTryStmt(const SEHTryStmt &S);
+ void EmitSEHLeaveStmt(const SEHLeaveStmt &S);
+ void EnterSEHTryStmt(const SEHTryStmt &S);
+ void ExitSEHTryStmt(const SEHTryStmt &S);
+
+ void startOutlinedSEHHelper(CodeGenFunction &ParentCGF, bool IsFilter,
+ const Stmt *OutlinedStmt);
+
+ llvm::Function *GenerateSEHFilterFunction(CodeGenFunction &ParentCGF,
+ const SEHExceptStmt &Except);
+
+ llvm::Function *GenerateSEHFinallyFunction(CodeGenFunction &ParentCGF,
+ const SEHFinallyStmt &Finally);
+
+ void EmitSEHExceptionCodeSave(CodeGenFunction &ParentCGF,
+ llvm::Value *ParentFP,
+ llvm::Value *EntryEBP);
+ llvm::Value *EmitSEHExceptionCode();
+ llvm::Value *EmitSEHExceptionInfo();
+ llvm::Value *EmitSEHAbnormalTermination();
+
+ /// Scan the outlined statement for captures from the parent function. For
+ /// each capture, mark the capture as escaped and emit a call to
+ /// llvm.localrecover. Insert the localrecover result into the LocalDeclMap.
+ void EmitCapturedLocals(CodeGenFunction &ParentCGF, const Stmt *OutlinedStmt,
+ bool IsFilter);
+
+ /// Recovers the address of a local in a parent function. ParentVar is the
+ /// address of the variable used in the immediate parent function. It can
+ /// either be an alloca or a call to llvm.localrecover if there are nested
+ /// outlined functions. ParentFP is the frame pointer of the outermost parent
+ /// frame.
+ Address recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF,
+ Address ParentVar,
+ llvm::Value *ParentFP);
+
+ void EmitCXXForRangeStmt(const CXXForRangeStmt &S,
+ ArrayRef<const Attr *> Attrs = None);
+
+ LValue InitCapturedStruct(const CapturedStmt &S);
+ llvm::Function *EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K);
+ llvm::Function *GenerateCapturedStmtFunction(const CapturedStmt &S);
+ Address GenerateCapturedStmtArgument(const CapturedStmt &S);
+ llvm::Function *GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S);
+ void GenerateOpenMPCapturedVars(const CapturedStmt &S,
+ SmallVectorImpl<llvm::Value *> &CapturedVars);
+ /// \brief Perform element by element copying of arrays with type \a
+ /// OriginalType from \a SrcAddr to \a DestAddr using copying procedure
+ /// generated by \a CopyGen.
+ ///
+ /// \param DestAddr Address of the destination array.
+ /// \param SrcAddr Address of the source array.
+ /// \param OriginalType Type of destination and source arrays.
+ /// \param CopyGen Copying procedure that copies value of single array element
+ /// to another single array element.
+ void EmitOMPAggregateAssign(
+ Address DestAddr, Address SrcAddr, QualType OriginalType,
+ const llvm::function_ref<void(Address, Address)> &CopyGen);
+ /// \brief Emit proper copying of data from one variable to another.
+ ///
+ /// \param OriginalType Original type of the copied variables.
+ /// \param DestAddr Destination address.
+ /// \param SrcAddr Source address.
+ /// \param DestVD Destination variable used in \a CopyExpr (for arrays, has
+ /// type of the base array element).
+ /// \param SrcVD Source variable used in \a CopyExpr (for arrays, has type of
+ /// the base array element).
+ /// \param Copy Actual copygin expression for copying data from \a SrcVD to \a
+ /// DestVD.
+ void EmitOMPCopy(QualType OriginalType,
+ Address DestAddr, Address SrcAddr,
+ const VarDecl *DestVD, const VarDecl *SrcVD,
+ const Expr *Copy);
+ /// \brief Emit atomic update code for constructs: \a X = \a X \a BO \a E or
+ /// \a X = \a E \a BO \a E.
+ ///
+ /// \param X Value to be updated.
+ /// \param E Update value.
+ /// \param BO Binary operation for update operation.
+ /// \param IsXLHSInRHSPart true if \a X is LHS in RHS part of the update
+ /// expression, false otherwise.
+ /// \param AO Atomic ordering of the generated atomic instructions.
+ /// \param CommonGen Code generator for complex expressions that cannot be
+ /// expressed through atomicrmw instruction.
+ /// \returns <true, OldAtomicValue> if simple 'atomicrmw' instruction was
+ /// generated, <false, RValue::get(nullptr)> otherwise.
+ std::pair<bool, RValue> EmitOMPAtomicSimpleUpdateExpr(
+ LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
+ llvm::AtomicOrdering AO, SourceLocation Loc,
+ const llvm::function_ref<RValue(RValue)> &CommonGen);
+ bool EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
+ OMPPrivateScope &PrivateScope);
+ void EmitOMPPrivateClause(const OMPExecutableDirective &D,
+ OMPPrivateScope &PrivateScope);
+ /// \brief Emit code for copyin clause in \a D directive. The next code is
+ /// generated at the start of outlined functions for directives:
+ /// \code
+ /// threadprivate_var1 = master_threadprivate_var1;
+ /// operator=(threadprivate_var2, master_threadprivate_var2);
+ /// ...
+ /// __kmpc_barrier(&loc, global_tid);
+ /// \endcode
+ ///
+ /// \param D OpenMP directive possibly with 'copyin' clause(s).
+ /// \returns true if at least one copyin variable is found, false otherwise.
+ bool EmitOMPCopyinClause(const OMPExecutableDirective &D);
+ /// \brief Emit initial code for lastprivate variables. If some variable is
+ /// not also firstprivate, then the default initialization is used. Otherwise
+ /// initialization of this variable is performed by EmitOMPFirstprivateClause
+ /// method.
+ ///
+ /// \param D Directive that may have 'lastprivate' directives.
+ /// \param PrivateScope Private scope for capturing lastprivate variables for
+ /// proper codegen in internal captured statement.
+ ///
+ /// \returns true if there is at least one lastprivate variable, false
+ /// otherwise.
+ bool EmitOMPLastprivateClauseInit(const OMPExecutableDirective &D,
+ OMPPrivateScope &PrivateScope);
+ /// \brief Emit final copying of lastprivate values to original variables at
+ /// the end of the worksharing or simd directive.
+ ///
+ /// \param D Directive that has at least one 'lastprivate' directives.
+ /// \param IsLastIterCond Boolean condition that must be set to 'i1 true' if
+ /// it is the last iteration of the loop code in associated directive, or to
+ /// 'i1 false' otherwise. If this item is nullptr, no final check is required.
+ void EmitOMPLastprivateClauseFinal(const OMPExecutableDirective &D,
+ llvm::Value *IsLastIterCond = nullptr);
+ /// \brief Emit initial code for reduction variables. Creates reduction copies
+ /// and initializes them with the values according to OpenMP standard.
+ ///
+ /// \param D Directive (possibly) with the 'reduction' clause.
+ /// \param PrivateScope Private scope for capturing reduction variables for
+ /// proper codegen in internal captured statement.
+ ///
+ void EmitOMPReductionClauseInit(const OMPExecutableDirective &D,
+ OMPPrivateScope &PrivateScope);
+ /// \brief Emit final update of reduction values to original variables at
+ /// the end of the directive.
+ ///
+ /// \param D Directive that has at least one 'reduction' directives.
+ void EmitOMPReductionClauseFinal(const OMPExecutableDirective &D);
+ /// \brief Emit initial code for linear variables. Creates private copies
+ /// and initializes them with the values according to OpenMP standard.
+ ///
+ /// \param D Directive (possibly) with the 'linear' clause.
+ void EmitOMPLinearClauseInit(const OMPLoopDirective &D);
+
+ void EmitOMPParallelDirective(const OMPParallelDirective &S);
+ void EmitOMPSimdDirective(const OMPSimdDirective &S);
+ void EmitOMPForDirective(const OMPForDirective &S);
+ void EmitOMPForSimdDirective(const OMPForSimdDirective &S);
+ void EmitOMPSectionsDirective(const OMPSectionsDirective &S);
+ void EmitOMPSectionDirective(const OMPSectionDirective &S);
+ void EmitOMPSingleDirective(const OMPSingleDirective &S);
+ void EmitOMPMasterDirective(const OMPMasterDirective &S);
+ void EmitOMPCriticalDirective(const OMPCriticalDirective &S);
+ void EmitOMPParallelForDirective(const OMPParallelForDirective &S);
+ void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S);
+ void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S);
+ void EmitOMPTaskDirective(const OMPTaskDirective &S);
+ void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S);
+ void EmitOMPBarrierDirective(const OMPBarrierDirective &S);
+ void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S);
+ void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S);
+ void EmitOMPFlushDirective(const OMPFlushDirective &S);
+ void EmitOMPOrderedDirective(const OMPOrderedDirective &S);
+ void EmitOMPAtomicDirective(const OMPAtomicDirective &S);
+ void EmitOMPTargetDirective(const OMPTargetDirective &S);
+ void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S);
+ void EmitOMPTeamsDirective(const OMPTeamsDirective &S);
+ void
+ EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S);
+ void EmitOMPCancelDirective(const OMPCancelDirective &S);
+ void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S);
+ void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S);
+ void EmitOMPDistributeDirective(const OMPDistributeDirective &S);
+
+ /// \brief Emit inner loop of the worksharing/simd construct.
+ ///
+ /// \param S Directive, for which the inner loop must be emitted.
+ /// \param RequiresCleanup true, if directive has some associated private
+ /// variables.
+ /// \param LoopCond Bollean condition for loop continuation.
+ /// \param IncExpr Increment expression for loop control variable.
+ /// \param BodyGen Generator for the inner body of the inner loop.
+ /// \param PostIncGen Genrator for post-increment code (required for ordered
+ /// loop directvies).
+ void EmitOMPInnerLoop(
+ const Stmt &S, bool RequiresCleanup, const Expr *LoopCond,
+ const Expr *IncExpr,
+ const llvm::function_ref<void(CodeGenFunction &)> &BodyGen,
+ const llvm::function_ref<void(CodeGenFunction &)> &PostIncGen);
+
+ JumpDest getOMPCancelDestination(OpenMPDirectiveKind Kind);
+
+private:
+
+ /// Helpers for the OpenMP loop directives.
+ void EmitOMPLoopBody(const OMPLoopDirective &D, JumpDest LoopExit);
+ void EmitOMPSimdInit(const OMPLoopDirective &D, bool IsMonotonic = false);
+ void EmitOMPSimdFinal(const OMPLoopDirective &D);
+ /// \brief Emit code for the worksharing loop-based directive.
+ /// \return true, if this construct has any lastprivate clause, false -
+ /// otherwise.
+ bool EmitOMPWorksharingLoop(const OMPLoopDirective &S);
+ void EmitOMPForOuterLoop(OpenMPScheduleClauseKind ScheduleKind,
+ bool IsMonotonic, const OMPLoopDirective &S,
+ OMPPrivateScope &LoopScope, bool Ordered, Address LB,
+ Address UB, Address ST, Address IL,
+ llvm::Value *Chunk);
+ /// \brief Emit code for sections directive.
+ OpenMPDirectiveKind EmitSections(const OMPExecutableDirective &S);
+
+public:
+
+ //===--------------------------------------------------------------------===//
+ // LValue Expression Emission
+ //===--------------------------------------------------------------------===//
+
+ /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
+ RValue GetUndefRValue(QualType Ty);
+
+ /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
+ /// and issue an ErrorUnsupported style diagnostic (using the
+ /// provided Name).
+ RValue EmitUnsupportedRValue(const Expr *E,
+ const char *Name);
+
+ /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
+ /// an ErrorUnsupported style diagnostic (using the provided Name).
+ LValue EmitUnsupportedLValue(const Expr *E,
+ const char *Name);
+
+ /// EmitLValue - Emit code to compute a designator that specifies the location
+ /// of the expression.
+ ///
+ /// This can return one of two things: a simple address or a bitfield
+ /// reference. In either case, the LLVM Value* in the LValue structure is
+ /// guaranteed to be an LLVM pointer type.
+ ///
+ /// If this returns a bitfield reference, nothing about the pointee type of
+ /// the LLVM value is known: For example, it may not be a pointer to an
+ /// integer.
+ ///
+ /// If this returns a normal address, and if the lvalue's C type is fixed
+ /// size, this method guarantees that the returned pointer type will point to
+ /// an LLVM type of the same size of the lvalue's type. If the lvalue has a
+ /// variable length type, this is not possible.
+ ///
+ LValue EmitLValue(const Expr *E);
+
+ /// \brief Same as EmitLValue but additionally we generate checking code to
+ /// guard against undefined behavior. This is only suitable when we know
+ /// that the address will be used to access the object.
+ LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK);
+
+ RValue convertTempToRValue(Address addr, QualType type,
+ SourceLocation Loc);
+
+ void EmitAtomicInit(Expr *E, LValue lvalue);
+
+ bool LValueIsSuitableForInlineAtomic(LValue Src);
+ bool typeIsSuitableForInlineAtomic(QualType Ty, bool IsVolatile) const;
+
+ RValue EmitAtomicLoad(LValue LV, SourceLocation SL,
+ AggValueSlot Slot = AggValueSlot::ignored());
+
+ RValue EmitAtomicLoad(LValue lvalue, SourceLocation loc,
+ llvm::AtomicOrdering AO, bool IsVolatile = false,
+ AggValueSlot slot = AggValueSlot::ignored());
+
+ void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit);
+
+ void EmitAtomicStore(RValue rvalue, LValue lvalue, llvm::AtomicOrdering AO,
+ bool IsVolatile, bool isInit);
+
+ std::pair<RValue, llvm::Value *> EmitAtomicCompareExchange(
+ LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
+ llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
+ llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent,
+ bool IsWeak = false, AggValueSlot Slot = AggValueSlot::ignored());
+
+ void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO,
+ const llvm::function_ref<RValue(RValue)> &UpdateOp,
+ bool IsVolatile);
+
+ /// EmitToMemory - Change a scalar value from its value
+ /// representation to its in-memory representation.
+ llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
+
+ /// EmitFromMemory - Change a scalar value from its memory
+ /// representation to its value representation.
+ llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
+
+ /// EmitLoadOfScalar - Load a scalar value from an address, taking
+ /// care to appropriately convert from the memory representation to
+ /// the LLVM value representation.
+ llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty,
+ SourceLocation Loc,
+ AlignmentSource AlignSource =
+ AlignmentSource::Type,
+ llvm::MDNode *TBAAInfo = nullptr,
+ QualType TBAABaseTy = QualType(),
+ uint64_t TBAAOffset = 0,
+ bool isNontemporal = false);
+
+ /// EmitLoadOfScalar - Load a scalar value from an address, taking
+ /// care to appropriately convert from the memory representation to
+ /// the LLVM value representation. The l-value must be a simple
+ /// l-value.
+ llvm::Value *EmitLoadOfScalar(LValue lvalue, SourceLocation Loc);
+
+ /// EmitStoreOfScalar - Store a scalar value to an address, taking
+ /// care to appropriately convert from the memory representation to
+ /// the LLVM value representation.
+ void EmitStoreOfScalar(llvm::Value *Value, Address Addr,
+ bool Volatile, QualType Ty,
+ AlignmentSource AlignSource = AlignmentSource::Type,
+ llvm::MDNode *TBAAInfo = nullptr, bool isInit = false,
+ QualType TBAABaseTy = QualType(),
+ uint64_t TBAAOffset = 0, bool isNontemporal = false);
+
+ /// EmitStoreOfScalar - Store a scalar value to an address, taking
+ /// care to appropriately convert from the memory representation to
+ /// the LLVM value representation. The l-value must be a simple
+ /// l-value. The isInit flag indicates whether this is an initialization.
+ /// If so, atomic qualifiers are ignored and the store is always non-atomic.
+ void EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit=false);
+
+ /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
+ /// this method emits the address of the lvalue, then loads the result as an
+ /// rvalue, returning the rvalue.
+ RValue EmitLoadOfLValue(LValue V, SourceLocation Loc);
+ RValue EmitLoadOfExtVectorElementLValue(LValue V);
+ RValue EmitLoadOfBitfieldLValue(LValue LV);
+ RValue EmitLoadOfGlobalRegLValue(LValue LV);
+
+ /// EmitStoreThroughLValue - Store the specified rvalue into the specified
+ /// lvalue, where both are guaranteed to the have the same type, and that type
+ /// is 'Ty'.
+ void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit = false);
+ void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst);
+ void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst);
+
+ /// EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints
+ /// as EmitStoreThroughLValue.
+ ///
+ /// \param Result [out] - If non-null, this will be set to a Value* for the
+ /// bit-field contents after the store, appropriate for use as the result of
+ /// an assignment to the bit-field.
+ void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
+ llvm::Value **Result=nullptr);
+
+ /// Emit an l-value for an assignment (simple or compound) of complex type.
+ LValue EmitComplexAssignmentLValue(const BinaryOperator *E);
+ LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E);
+ LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E,
+ llvm::Value *&Result);
+
+ // Note: only available for agg return types
+ LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
+ LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E);
+ // Note: only available for agg return types
+ LValue EmitCallExprLValue(const CallExpr *E);
+ // Note: only available for agg return types
+ LValue EmitVAArgExprLValue(const VAArgExpr *E);
+ LValue EmitDeclRefLValue(const DeclRefExpr *E);
+ LValue EmitStringLiteralLValue(const StringLiteral *E);
+ LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
+ LValue EmitPredefinedLValue(const PredefinedExpr *E);
+ LValue EmitUnaryOpLValue(const UnaryOperator *E);
+ LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
+ bool Accessed = false);
+ LValue EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
+ bool IsLowerBound = true);
+ LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
+ LValue EmitMemberExpr(const MemberExpr *E);
+ LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
+ LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
+ LValue EmitInitListLValue(const InitListExpr *E);
+ LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
+ LValue EmitCastLValue(const CastExpr *E);
+ LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
+ LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
+
+ Address EmitExtVectorElementLValue(LValue V);
+
+ RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc);
+
+ Address EmitArrayToPointerDecay(const Expr *Array,
+ AlignmentSource *AlignSource = nullptr);
+
+ class ConstantEmission {
+ llvm::PointerIntPair<llvm::Constant*, 1, bool> ValueAndIsReference;
+ ConstantEmission(llvm::Constant *C, bool isReference)
+ : ValueAndIsReference(C, isReference) {}
+ public:
+ ConstantEmission() {}
+ static ConstantEmission forReference(llvm::Constant *C) {
+ return ConstantEmission(C, true);
+ }
+ static ConstantEmission forValue(llvm::Constant *C) {
+ return ConstantEmission(C, false);
+ }
+
+ explicit operator bool() const {
+ return ValueAndIsReference.getOpaqueValue() != nullptr;
+ }
+
+ bool isReference() const { return ValueAndIsReference.getInt(); }
+ LValue getReferenceLValue(CodeGenFunction &CGF, Expr *refExpr) const {
+ assert(isReference());
+ return CGF.MakeNaturalAlignAddrLValue(ValueAndIsReference.getPointer(),
+ refExpr->getType());
+ }
+
+ llvm::Constant *getValue() const {
+ assert(!isReference());
+ return ValueAndIsReference.getPointer();
+ }
+ };
+
+ ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr);
+
+ RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e,
+ AggValueSlot slot = AggValueSlot::ignored());
+ LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e);
+
+ llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar);
+ LValue EmitLValueForField(LValue Base, const FieldDecl* Field);
+ LValue EmitLValueForLambdaField(const FieldDecl *Field);
+
+ /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
+ /// if the Field is a reference, this will return the address of the reference
+ /// and not the address of the value stored in the reference.
+ LValue EmitLValueForFieldInitialization(LValue Base,
+ const FieldDecl* Field);
+
+ LValue EmitLValueForIvar(QualType ObjectTy,
+ llvm::Value* Base, const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers);
+
+ LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
+ LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
+ LValue EmitLambdaLValue(const LambdaExpr *E);
+ LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
+ LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E);
+
+ LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
+ LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
+ LValue EmitStmtExprLValue(const StmtExpr *E);
+ LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
+ LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
+ void EmitDeclRefExprDbgValue(const DeclRefExpr *E, llvm::Constant *Init);
+
+ //===--------------------------------------------------------------------===//
+ // Scalar Expression Emission
+ //===--------------------------------------------------------------------===//
+
+ /// EmitCall - Generate a call of the given function, expecting the given
+ /// result type, and using the given argument list which specifies both the
+ /// LLVM arguments and the types they were derived from.
+ RValue EmitCall(const CGFunctionInfo &FnInfo, llvm::Value *Callee,
+ ReturnValueSlot ReturnValue, const CallArgList &Args,
+ CGCalleeInfo CalleeInfo = CGCalleeInfo(),
+ llvm::Instruction **callOrInvoke = nullptr);
+
+ RValue EmitCall(QualType FnType, llvm::Value *Callee, const CallExpr *E,
+ ReturnValueSlot ReturnValue,
+ CGCalleeInfo CalleeInfo = CGCalleeInfo(),
+ llvm::Value *Chain = nullptr);
+ RValue EmitCallExpr(const CallExpr *E,
+ ReturnValueSlot ReturnValue = ReturnValueSlot());
+
+ void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl);
+
+ llvm::CallInst *EmitRuntimeCall(llvm::Value *callee,
+ const Twine &name = "");
+ llvm::CallInst *EmitRuntimeCall(llvm::Value *callee,
+ ArrayRef<llvm::Value*> args,
+ const Twine &name = "");
+ llvm::CallInst *EmitNounwindRuntimeCall(llvm::Value *callee,
+ const Twine &name = "");
+ llvm::CallInst *EmitNounwindRuntimeCall(llvm::Value *callee,
+ ArrayRef<llvm::Value*> args,
+ const Twine &name = "");
+
+ llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
+ ArrayRef<llvm::Value *> Args,
+ const Twine &Name = "");
+ llvm::CallSite EmitRuntimeCallOrInvoke(llvm::Value *callee,
+ ArrayRef<llvm::Value*> args,
+ const Twine &name = "");
+ llvm::CallSite EmitRuntimeCallOrInvoke(llvm::Value *callee,
+ const Twine &name = "");
+ void EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
+ ArrayRef<llvm::Value*> args);
+
+ llvm::Value *BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
+ NestedNameSpecifier *Qual,
+ llvm::Type *Ty);
+
+ llvm::Value *BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ const CXXRecordDecl *RD);
+
+ RValue
+ EmitCXXMemberOrOperatorCall(const CXXMethodDecl *MD, llvm::Value *Callee,
+ ReturnValueSlot ReturnValue, llvm::Value *This,
+ llvm::Value *ImplicitParam,
+ QualType ImplicitParamTy, const CallExpr *E);
+ RValue EmitCXXStructorCall(const CXXMethodDecl *MD, llvm::Value *Callee,
+ ReturnValueSlot ReturnValue, llvm::Value *This,
+ llvm::Value *ImplicitParam,
+ QualType ImplicitParamTy, const CallExpr *E,
+ StructorType Type);
+ RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E,
+ ReturnValueSlot ReturnValue);
+ RValue EmitCXXMemberOrOperatorMemberCallExpr(const CallExpr *CE,
+ const CXXMethodDecl *MD,
+ ReturnValueSlot ReturnValue,
+ bool HasQualifier,
+ NestedNameSpecifier *Qualifier,
+ bool IsArrow, const Expr *Base);
+ // Compute the object pointer.
+ Address EmitCXXMemberDataPointerAddress(const Expr *E, Address base,
+ llvm::Value *memberPtr,
+ const MemberPointerType *memberPtrType,
+ AlignmentSource *AlignSource = nullptr);
+ RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
+ ReturnValueSlot ReturnValue);
+
+ RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
+ const CXXMethodDecl *MD,
+ ReturnValueSlot ReturnValue);
+
+ RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
+ ReturnValueSlot ReturnValue);
+
+
+ RValue EmitBuiltinExpr(const FunctionDecl *FD,
+ unsigned BuiltinID, const CallExpr *E,
+ ReturnValueSlot ReturnValue);
+
+ RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
+
+ /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
+ /// is unhandled by the current target.
+ llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+
+ llvm::Value *EmitAArch64CompareBuiltinExpr(llvm::Value *Op, llvm::Type *Ty,
+ const llvm::CmpInst::Predicate Fp,
+ const llvm::CmpInst::Predicate Ip,
+ const llvm::Twine &Name = "");
+ llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+
+ llvm::Value *EmitCommonNeonBuiltinExpr(unsigned BuiltinID,
+ unsigned LLVMIntrinsic,
+ unsigned AltLLVMIntrinsic,
+ const char *NameHint,
+ unsigned Modifier,
+ const CallExpr *E,
+ SmallVectorImpl<llvm::Value *> &Ops,
+ Address PtrOp0, Address PtrOp1);
+ llvm::Function *LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
+ unsigned Modifier, llvm::Type *ArgTy,
+ const CallExpr *E);
+ llvm::Value *EmitNeonCall(llvm::Function *F,
+ SmallVectorImpl<llvm::Value*> &O,
+ const char *name,
+ unsigned shift = 0, bool rightshift = false);
+ llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
+ llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
+ bool negateForRightShift);
+ llvm::Value *EmitNeonRShiftImm(llvm::Value *Vec, llvm::Value *Amt,
+ llvm::Type *Ty, bool usgn, const char *name);
+ llvm::Value *vectorWrapScalar16(llvm::Value *Op);
+ llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+
+ llvm::Value *BuildVector(ArrayRef<llvm::Value*> Ops);
+ llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E);
+
+ llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
+ llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
+ llvm::Value *EmitObjCBoxedExpr(const ObjCBoxedExpr *E);
+ llvm::Value *EmitObjCArrayLiteral(const ObjCArrayLiteral *E);
+ llvm::Value *EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E);
+ llvm::Value *EmitObjCCollectionLiteral(const Expr *E,
+ const ObjCMethodDecl *MethodWithObjects);
+ llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
+ RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
+ ReturnValueSlot Return = ReturnValueSlot());
+
+ /// Retrieves the default cleanup kind for an ARC cleanup.
+ /// Except under -fobjc-arc-eh, ARC cleanups are normal-only.
+ CleanupKind getARCCleanupKind() {
+ return CGM.getCodeGenOpts().ObjCAutoRefCountExceptions
+ ? NormalAndEHCleanup : NormalCleanup;
+ }
+
+ // ARC primitives.
+ void EmitARCInitWeak(Address addr, llvm::Value *value);
+ void EmitARCDestroyWeak(Address addr);
+ llvm::Value *EmitARCLoadWeak(Address addr);
+ llvm::Value *EmitARCLoadWeakRetained(Address addr);
+ llvm::Value *EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored);
+ void EmitARCCopyWeak(Address dst, Address src);
+ void EmitARCMoveWeak(Address dst, Address src);
+ llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
+ llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
+ llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value,
+ bool resultIgnored);
+ llvm::Value *EmitARCStoreStrongCall(Address addr, llvm::Value *value,
+ bool resultIgnored);
+ llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
+ llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
+ llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
+ void EmitARCDestroyStrong(Address addr, ARCPreciseLifetime_t precise);
+ void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise);
+ llvm::Value *EmitARCAutorelease(llvm::Value *value);
+ llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
+ llvm::Value *EmitARCRetainAutoreleaseReturnValue(llvm::Value *value);
+ llvm::Value *EmitARCRetainAutoreleasedReturnValue(llvm::Value *value);
+
+ std::pair<LValue,llvm::Value*>
+ EmitARCStoreAutoreleasing(const BinaryOperator *e);
+ std::pair<LValue,llvm::Value*>
+ EmitARCStoreStrong(const BinaryOperator *e, bool ignored);
+
+ llvm::Value *EmitObjCThrowOperand(const Expr *expr);
+ llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
+ llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
+
+ llvm::Value *EmitARCExtendBlockObject(const Expr *expr);
+ llvm::Value *EmitARCRetainScalarExpr(const Expr *expr);
+ llvm::Value *EmitARCRetainAutoreleaseScalarExpr(const Expr *expr);
+
+ void EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values);
+
+ static Destroyer destroyARCStrongImprecise;
+ static Destroyer destroyARCStrongPrecise;
+ static Destroyer destroyARCWeak;
+
+ void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
+ llvm::Value *EmitObjCAutoreleasePoolPush();
+ llvm::Value *EmitObjCMRRAutoreleasePoolPush();
+ void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr);
+ void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr);
+
+ /// \brief Emits a reference binding to the passed in expression.
+ RValue EmitReferenceBindingToExpr(const Expr *E);
+
+ //===--------------------------------------------------------------------===//
+ // Expression Emission
+ //===--------------------------------------------------------------------===//
+
+ // Expressions are broken into three classes: scalar, complex, aggregate.
+
+ /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
+ /// scalar type, returning the result.
+ llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign = false);
+
+ /// Emit a conversion from the specified type to the specified destination
+ /// type, both of which are LLVM scalar types.
+ llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
+ QualType DstTy, SourceLocation Loc);
+
+ /// Emit a conversion from the specified complex type to the specified
+ /// destination type, where the destination type is an LLVM scalar type.
+ llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy,
+ QualType DstTy,
+ SourceLocation Loc);
+
+ /// EmitAggExpr - Emit the computation of the specified expression
+ /// of aggregate type. The result is computed into the given slot,
+ /// which may be null to indicate that the value is not needed.
+ void EmitAggExpr(const Expr *E, AggValueSlot AS);
+
+ /// EmitAggExprToLValue - Emit the computation of the specified expression of
+ /// aggregate type into a temporary LValue.
+ LValue EmitAggExprToLValue(const Expr *E);
+
+ /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
+ /// make sure it survives garbage collection until this point.
+ void EmitExtendGCLifetime(llvm::Value *object);
+
+ /// EmitComplexExpr - Emit the computation of the specified expression of
+ /// complex type, returning the result.
+ ComplexPairTy EmitComplexExpr(const Expr *E,
+ bool IgnoreReal = false,
+ bool IgnoreImag = false);
+
+ /// EmitComplexExprIntoLValue - Emit the given expression of complex
+ /// type and place its result into the specified l-value.
+ void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit);
+
+ /// EmitStoreOfComplex - Store a complex number into the specified l-value.
+ void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit);
+
+ /// EmitLoadOfComplex - Load a complex number from the specified l-value.
+ ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc);
+
+ Address emitAddrOfRealComponent(Address complex, QualType complexType);
+ Address emitAddrOfImagComponent(Address complex, QualType complexType);
+
+ /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
+ /// global variable that has already been created for it. If the initializer
+ /// has a different type than GV does, this may free GV and return a different
+ /// one. Otherwise it just returns GV.
+ llvm::GlobalVariable *
+ AddInitializerToStaticVarDecl(const VarDecl &D,
+ llvm::GlobalVariable *GV);
+
+
+ /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
+ /// variable with global storage.
+ void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr,
+ bool PerformInit);
+
+ llvm::Constant *createAtExitStub(const VarDecl &VD, llvm::Constant *Dtor,
+ llvm::Constant *Addr);
+
+ /// Call atexit() with a function that passes the given argument to
+ /// the given function.
+ void registerGlobalDtorWithAtExit(const VarDecl &D, llvm::Constant *fn,
+ llvm::Constant *addr);
+
+ /// Emit code in this function to perform a guarded variable
+ /// initialization. Guarded initializations are used when it's not
+ /// possible to prove that an initialization will be done exactly
+ /// once, e.g. with a static local variable or a static data member
+ /// of a class template.
+ void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr,
+ bool PerformInit);
+
+ /// GenerateCXXGlobalInitFunc - Generates code for initializing global
+ /// variables.
+ void GenerateCXXGlobalInitFunc(llvm::Function *Fn,
+ ArrayRef<llvm::Function *> CXXThreadLocals,
+ Address Guard = Address::invalid());
+
+ /// GenerateCXXGlobalDtorsFunc - Generates code for destroying global
+ /// variables.
+ void GenerateCXXGlobalDtorsFunc(llvm::Function *Fn,
+ const std::vector<std::pair<llvm::WeakVH,
+ llvm::Constant*> > &DtorsAndObjects);
+
+ void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
+ const VarDecl *D,
+ llvm::GlobalVariable *Addr,
+ bool PerformInit);
+
+ void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
+
+ void EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, const Expr *Exp);
+
+ void enterFullExpression(const ExprWithCleanups *E) {
+ if (E->getNumObjects() == 0) return;
+ enterNonTrivialFullExpression(E);
+ }
+ void enterNonTrivialFullExpression(const ExprWithCleanups *E);
+
+ void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint = true);
+
+ void EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Dest);
+
+ RValue EmitAtomicExpr(AtomicExpr *E);
+
+ //===--------------------------------------------------------------------===//
+ // Annotations Emission
+ //===--------------------------------------------------------------------===//
+
+ /// Emit an annotation call (intrinsic or builtin).
+ llvm::Value *EmitAnnotationCall(llvm::Value *AnnotationFn,
+ llvm::Value *AnnotatedVal,
+ StringRef AnnotationStr,
+ SourceLocation Location);
+
+ /// Emit local annotations for the local variable V, declared by D.
+ void EmitVarAnnotations(const VarDecl *D, llvm::Value *V);
+
+ /// Emit field annotations for the given field & value. Returns the
+ /// annotation result.
+ Address EmitFieldAnnotations(const FieldDecl *D, Address V);
+
+ //===--------------------------------------------------------------------===//
+ // Internal Helpers
+ //===--------------------------------------------------------------------===//
+
+ /// ContainsLabel - Return true if the statement contains a label in it. If
+ /// this statement is not executed normally, it not containing a label means
+ /// that we can just remove the code.
+ static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
+
+ /// containsBreak - Return true if the statement contains a break out of it.
+ /// If the statement (recursively) contains a switch or loop with a break
+ /// inside of it, this is fine.
+ static bool containsBreak(const Stmt *S);
+
+ /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
+ /// to a constant, or if it does but contains a label, return false. If it
+ /// constant folds return true and set the boolean result in Result.
+ bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result);
+
+ /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
+ /// to a constant, or if it does but contains a label, return false. If it
+ /// constant folds return true and set the folded value.
+ bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &Result);
+
+ /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
+ /// if statement) to the specified blocks. Based on the condition, this might
+ /// try to simplify the codegen of the conditional based on the branch.
+ /// TrueCount should be the number of times we expect the condition to
+ /// evaluate to true based on PGO data.
+ void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
+ llvm::BasicBlock *FalseBlock, uint64_t TrueCount);
+
+ /// \brief Emit a description of a type in a format suitable for passing to
+ /// a runtime sanitizer handler.
+ llvm::Constant *EmitCheckTypeDescriptor(QualType T);
+
+ /// \brief Convert a value into a format suitable for passing to a runtime
+ /// sanitizer handler.
+ llvm::Value *EmitCheckValue(llvm::Value *V);
+
+ /// \brief Emit a description of a source location in a format suitable for
+ /// passing to a runtime sanitizer handler.
+ llvm::Constant *EmitCheckSourceLocation(SourceLocation Loc);
+
+ /// \brief Create a basic block that will call a handler function in a
+ /// sanitizer runtime with the provided arguments, and create a conditional
+ /// branch to it.
+ void EmitCheck(ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked,
+ StringRef CheckName, ArrayRef<llvm::Constant *> StaticArgs,
+ ArrayRef<llvm::Value *> DynamicArgs);
+
+ /// \brief Emit a slow path cross-DSO CFI check which calls __cfi_slowpath
+ /// if Cond if false.
+ void EmitCfiSlowPathCheck(llvm::Value *Cond, llvm::ConstantInt *TypeId,
+ llvm::Value *Ptr);
+
+ /// \brief Create a basic block that will call the trap intrinsic, and emit a
+ /// conditional branch to it, for the -ftrapv checks.
+ void EmitTrapCheck(llvm::Value *Checked);
+
+ /// \brief Emit a call to trap or debugtrap and attach function attribute
+ /// "trap-func-name" if specified.
+ llvm::CallInst *EmitTrapCall(llvm::Intrinsic::ID IntrID);
+
+ /// \brief Create a check for a function parameter that may potentially be
+ /// declared as non-null.
+ void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc,
+ const FunctionDecl *FD, unsigned ParmNum);
+
+ /// EmitCallArg - Emit a single call argument.
+ void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
+
+ /// EmitDelegateCallArg - We are performing a delegate call; that
+ /// is, the current function is delegating to another one. Produce
+ /// a r-value suitable for passing the given parameter.
+ void EmitDelegateCallArg(CallArgList &args, const VarDecl *param,
+ SourceLocation loc);
+
+ /// SetFPAccuracy - Set the minimum required accuracy of the given floating
+ /// point operation, expressed as the maximum relative error in ulp.
+ void SetFPAccuracy(llvm::Value *Val, float Accuracy);
+
+private:
+ llvm::MDNode *getRangeForLoadFromType(QualType Ty);
+ void EmitReturnOfRValue(RValue RV, QualType Ty);
+
+ void deferPlaceholderReplacement(llvm::Instruction *Old, llvm::Value *New);
+
+ llvm::SmallVector<std::pair<llvm::Instruction *, llvm::Value *>, 4>
+ DeferredReplacements;
+
+ /// Set the address of a local variable.
+ void setAddrOfLocalVar(const VarDecl *VD, Address Addr) {
+ assert(!LocalDeclMap.count(VD) && "Decl already exists in LocalDeclMap!");
+ LocalDeclMap.insert({VD, Addr});
+ }
+
+ /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
+ /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
+ ///
+ /// \param AI - The first function argument of the expansion.
+ void ExpandTypeFromArgs(QualType Ty, LValue Dst,
+ SmallVectorImpl<llvm::Argument *>::iterator &AI);
+
+ /// ExpandTypeToArgs - Expand an RValue \arg RV, with the LLVM type for \arg
+ /// Ty, into individual arguments on the provided vector \arg IRCallArgs,
+ /// starting at index \arg IRCallArgPos. See ABIArgInfo::Expand.
+ void ExpandTypeToArgs(QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
+ SmallVectorImpl<llvm::Value *> &IRCallArgs,
+ unsigned &IRCallArgPos);
+
+ llvm::Value* EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
+ const Expr *InputExpr, std::string &ConstraintStr);
+
+ llvm::Value* EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
+ LValue InputValue, QualType InputType,
+ std::string &ConstraintStr,
+ SourceLocation Loc);
+
+ /// \brief Attempts to statically evaluate the object size of E. If that
+ /// fails, emits code to figure the size of E out for us. This is
+ /// pass_object_size aware.
+ llvm::Value *evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
+ llvm::IntegerType *ResType);
+
+ /// \brief Emits the size of E, as required by __builtin_object_size. This
+ /// function is aware of pass_object_size parameters, and will act accordingly
+ /// if E is a parameter with the pass_object_size attribute.
+ llvm::Value *emitBuiltinObjectSize(const Expr *E, unsigned Type,
+ llvm::IntegerType *ResType);
+
+public:
+#ifndef NDEBUG
+ // Determine whether the given argument is an Objective-C method
+ // that may have type parameters in its signature.
+ static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) {
+ const DeclContext *dc = method->getDeclContext();
+ if (const ObjCInterfaceDecl *classDecl= dyn_cast<ObjCInterfaceDecl>(dc)) {
+ return classDecl->getTypeParamListAsWritten();
+ }
+
+ if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(dc)) {
+ return catDecl->getTypeParamList();
+ }
+
+ return false;
+ }
+
+ template<typename T>
+ static bool isObjCMethodWithTypeParams(const T *) { return false; }
+#endif
+
+ /// EmitCallArgs - Emit call arguments for a function.
+ template <typename T>
+ void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo,
+ llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
+ const FunctionDecl *CalleeDecl = nullptr,
+ unsigned ParamsToSkip = 0) {
+ SmallVector<QualType, 16> ArgTypes;
+ CallExpr::const_arg_iterator Arg = ArgRange.begin();
+
+ assert((ParamsToSkip == 0 || CallArgTypeInfo) &&
+ "Can't skip parameters if type info is not provided");
+ if (CallArgTypeInfo) {
+#ifndef NDEBUG
+ bool isGenericMethod = isObjCMethodWithTypeParams(CallArgTypeInfo);
+#endif
+
+ // First, use the argument types that the type info knows about
+ for (auto I = CallArgTypeInfo->param_type_begin() + ParamsToSkip,
+ E = CallArgTypeInfo->param_type_end();
+ I != E; ++I, ++Arg) {
+ assert(Arg != ArgRange.end() && "Running over edge of argument list!");
+ assert((isGenericMethod ||
+ ((*I)->isVariablyModifiedType() ||
+ (*I).getNonReferenceType()->isObjCRetainableType() ||
+ getContext()
+ .getCanonicalType((*I).getNonReferenceType())
+ .getTypePtr() ==
+ getContext()
+ .getCanonicalType((*Arg)->getType())
+ .getTypePtr())) &&
+ "type mismatch in call argument!");
+ ArgTypes.push_back(*I);
+ }
+ }
+
+ // Either we've emitted all the call args, or we have a call to variadic
+ // function.
+ assert((Arg == ArgRange.end() || !CallArgTypeInfo ||
+ CallArgTypeInfo->isVariadic()) &&
+ "Extra arguments in non-variadic function!");
+
+ // If we still have any arguments, emit them using the type of the argument.
+ for (auto *A : llvm::make_range(Arg, ArgRange.end()))
+ ArgTypes.push_back(getVarArgType(A));
+
+ EmitCallArgs(Args, ArgTypes, ArgRange, CalleeDecl, ParamsToSkip);
+ }
+
+ void EmitCallArgs(CallArgList &Args, ArrayRef<QualType> ArgTypes,
+ llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
+ const FunctionDecl *CalleeDecl = nullptr,
+ unsigned ParamsToSkip = 0);
+
+ /// EmitPointerWithAlignment - Given an expression with a pointer
+ /// type, emit the value and compute our best estimate of the
+ /// alignment of the pointee.
+ ///
+ /// Note that this function will conservatively fall back on the type
+ /// when it doesn't
+ ///
+ /// \param Source - If non-null, this will be initialized with
+ /// information about the source of the alignment. Note that this
+ /// function will conservatively fall back on the type when it
+ /// doesn't recognize the expression, which means that sometimes
+ ///
+ /// a worst-case One
+ /// reasonable way to use this information is when there's a
+ /// language guarantee that the pointer must be aligned to some
+ /// stricter value, and we're simply trying to ensure that
+ /// sufficiently obvious uses of under-aligned objects don't get
+ /// miscompiled; for example, a placement new into the address of
+ /// a local variable. In such a case, it's quite reasonable to
+ /// just ignore the returned alignment when it isn't from an
+ /// explicit source.
+ Address EmitPointerWithAlignment(const Expr *Addr,
+ AlignmentSource *Source = nullptr);
+
+private:
+ QualType getVarArgType(const Expr *Arg);
+
+ const TargetCodeGenInfo &getTargetHooks() const {
+ return CGM.getTargetCodeGenInfo();
+ }
+
+ void EmitDeclMetadata();
+
+ BlockByrefHelpers *buildByrefHelpers(llvm::StructType &byrefType,
+ const AutoVarEmission &emission);
+
+ void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
+
+ llvm::Value *GetValueForARMHint(unsigned BuiltinID);
+};
+
+/// Helper class with most of the code for saving a value for a
+/// conditional expression cleanup.
+struct DominatingLLVMValue {
+ typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
+
+ /// Answer whether the given value needs extra work to be saved.
+ static bool needsSaving(llvm::Value *value) {
+ // If it's not an instruction, we don't need to save.
+ if (!isa<llvm::Instruction>(value)) return false;
+
+ // If it's an instruction in the entry block, we don't need to save.
+ llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
+ return (block != &block->getParent()->getEntryBlock());
+ }
+
+ /// Try to save the given value.
+ static saved_type save(CodeGenFunction &CGF, llvm::Value *value) {
+ if (!needsSaving(value)) return saved_type(value, false);
+
+ // Otherwise, we need an alloca.
+ auto align = CharUnits::fromQuantity(
+ CGF.CGM.getDataLayout().getPrefTypeAlignment(value->getType()));
+ Address alloca =
+ CGF.CreateTempAlloca(value->getType(), align, "cond-cleanup.save");
+ CGF.Builder.CreateStore(value, alloca);
+
+ return saved_type(alloca.getPointer(), true);
+ }
+
+ static llvm::Value *restore(CodeGenFunction &CGF, saved_type value) {
+ // If the value says it wasn't saved, trust that it's still dominating.
+ if (!value.getInt()) return value.getPointer();
+
+ // Otherwise, it should be an alloca instruction, as set up in save().
+ auto alloca = cast<llvm::AllocaInst>(value.getPointer());
+ return CGF.Builder.CreateAlignedLoad(alloca, alloca->getAlignment());
+ }
+};
+
+/// A partial specialization of DominatingValue for llvm::Values that
+/// might be llvm::Instructions.
+template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
+ typedef T *type;
+ static type restore(CodeGenFunction &CGF, saved_type value) {
+ return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
+ }
+};
+
+/// A specialization of DominatingValue for Address.
+template <> struct DominatingValue<Address> {
+ typedef Address type;
+
+ struct saved_type {
+ DominatingLLVMValue::saved_type SavedValue;
+ CharUnits Alignment;
+ };
+
+ static bool needsSaving(type value) {
+ return DominatingLLVMValue::needsSaving(value.getPointer());
+ }
+ static saved_type save(CodeGenFunction &CGF, type value) {
+ return { DominatingLLVMValue::save(CGF, value.getPointer()),
+ value.getAlignment() };
+ }
+ static type restore(CodeGenFunction &CGF, saved_type value) {
+ return Address(DominatingLLVMValue::restore(CGF, value.SavedValue),
+ value.Alignment);
+ }
+};
+
+/// A specialization of DominatingValue for RValue.
+template <> struct DominatingValue<RValue> {
+ typedef RValue type;
+ class saved_type {
+ enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
+ AggregateAddress, ComplexAddress };
+
+ llvm::Value *Value;
+ unsigned K : 3;
+ unsigned Align : 29;
+ saved_type(llvm::Value *v, Kind k, unsigned a = 0)
+ : Value(v), K(k), Align(a) {}
+
+ public:
+ static bool needsSaving(RValue value);
+ static saved_type save(CodeGenFunction &CGF, RValue value);
+ RValue restore(CodeGenFunction &CGF);
+
+ // implementations in CGCleanup.cpp
+ };
+
+ static bool needsSaving(type value) {
+ return saved_type::needsSaving(value);
+ }
+ static saved_type save(CodeGenFunction &CGF, type value) {
+ return saved_type::save(CGF, value);
+ }
+ static type restore(CodeGenFunction &CGF, saved_type value) {
+ return value.restore(CGF);
+ }
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp
new file mode 100644
index 0000000..536c55a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp
@@ -0,0 +1,3996 @@
+//===--- CodeGenModule.cpp - Emit LLVM Code from ASTs for a Module --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This coordinates the per-module state used while generating code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenModule.h"
+#include "CGBlocks.h"
+#include "CGCUDARuntime.h"
+#include "CGCXXABI.h"
+#include "CGCall.h"
+#include "CGDebugInfo.h"
+#include "CGObjCRuntime.h"
+#include "CGOpenCLRuntime.h"
+#include "CGOpenMPRuntime.h"
+#include "CodeGenFunction.h"
+#include "CodeGenPGO.h"
+#include "CodeGenTBAA.h"
+#include "CoverageMappingGen.h"
+#include "TargetInfo.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Mangle.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/Module.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/Version.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/ProfileData/InstrProfReader.h"
+#include "llvm/Support/ConvertUTF.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MD5.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+static const char AnnotationSection[] = "llvm.metadata";
+
+static CGCXXABI *createCXXABI(CodeGenModule &CGM) {
+ switch (CGM.getTarget().getCXXABI().getKind()) {
+ case TargetCXXABI::GenericAArch64:
+ case TargetCXXABI::GenericARM:
+ case TargetCXXABI::iOS:
+ case TargetCXXABI::iOS64:
+ case TargetCXXABI::WatchOS:
+ case TargetCXXABI::GenericMIPS:
+ case TargetCXXABI::GenericItanium:
+ case TargetCXXABI::WebAssembly:
+ return CreateItaniumCXXABI(CGM);
+ case TargetCXXABI::Microsoft:
+ return CreateMicrosoftCXXABI(CGM);
+ }
+
+ llvm_unreachable("invalid C++ ABI kind");
+}
+
+CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
+ const PreprocessorOptions &PPO,
+ const CodeGenOptions &CGO, llvm::Module &M,
+ DiagnosticsEngine &diags,
+ CoverageSourceInfo *CoverageInfo)
+ : Context(C), LangOpts(C.getLangOpts()), HeaderSearchOpts(HSO),
+ PreprocessorOpts(PPO), CodeGenOpts(CGO), TheModule(M), Diags(diags),
+ Target(C.getTargetInfo()), ABI(createCXXABI(*this)),
+ VMContext(M.getContext()), TBAA(nullptr), TheTargetCodeGenInfo(nullptr),
+ Types(*this), VTables(*this), ObjCRuntime(nullptr),
+ OpenCLRuntime(nullptr), OpenMPRuntime(nullptr), CUDARuntime(nullptr),
+ DebugInfo(nullptr), ObjCData(nullptr),
+ NoObjCARCExceptionsMetadata(nullptr), PGOReader(nullptr),
+ CFConstantStringClassRef(nullptr), ConstantStringClassRef(nullptr),
+ NSConstantStringType(nullptr), NSConcreteGlobalBlock(nullptr),
+ NSConcreteStackBlock(nullptr), BlockObjectAssign(nullptr),
+ BlockObjectDispose(nullptr), BlockDescriptorType(nullptr),
+ GenericBlockLiteralType(nullptr), LifetimeStartFn(nullptr),
+ LifetimeEndFn(nullptr), SanitizerMD(new SanitizerMetadata(*this)) {
+
+ // Initialize the type cache.
+ llvm::LLVMContext &LLVMContext = M.getContext();
+ VoidTy = llvm::Type::getVoidTy(LLVMContext);
+ Int8Ty = llvm::Type::getInt8Ty(LLVMContext);
+ Int16Ty = llvm::Type::getInt16Ty(LLVMContext);
+ Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
+ Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
+ FloatTy = llvm::Type::getFloatTy(LLVMContext);
+ DoubleTy = llvm::Type::getDoubleTy(LLVMContext);
+ PointerWidthInBits = C.getTargetInfo().getPointerWidth(0);
+ PointerAlignInBytes =
+ C.toCharUnitsFromBits(C.getTargetInfo().getPointerAlign(0)).getQuantity();
+ IntAlignInBytes =
+ C.toCharUnitsFromBits(C.getTargetInfo().getIntAlign()).getQuantity();
+ IntTy = llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getIntWidth());
+ IntPtrTy = llvm::IntegerType::get(LLVMContext, PointerWidthInBits);
+ Int8PtrTy = Int8Ty->getPointerTo(0);
+ Int8PtrPtrTy = Int8PtrTy->getPointerTo(0);
+
+ RuntimeCC = getTargetCodeGenInfo().getABIInfo().getRuntimeCC();
+ BuiltinCC = getTargetCodeGenInfo().getABIInfo().getBuiltinCC();
+
+ if (LangOpts.ObjC1)
+ createObjCRuntime();
+ if (LangOpts.OpenCL)
+ createOpenCLRuntime();
+ if (LangOpts.OpenMP)
+ createOpenMPRuntime();
+ if (LangOpts.CUDA)
+ createCUDARuntime();
+
+ // Enable TBAA unless it's suppressed. ThreadSanitizer needs TBAA even at O0.
+ if (LangOpts.Sanitize.has(SanitizerKind::Thread) ||
+ (!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0))
+ TBAA = new CodeGenTBAA(Context, VMContext, CodeGenOpts, getLangOpts(),
+ getCXXABI().getMangleContext());
+
+ // If debug info or coverage generation is enabled, create the CGDebugInfo
+ // object.
+ if (CodeGenOpts.getDebugInfo() != CodeGenOptions::NoDebugInfo ||
+ CodeGenOpts.EmitGcovArcs ||
+ CodeGenOpts.EmitGcovNotes)
+ DebugInfo = new CGDebugInfo(*this);
+
+ Block.GlobalUniqueCount = 0;
+
+ if (C.getLangOpts().ObjC1)
+ ObjCData = new ObjCEntrypoints();
+
+ if (!CodeGenOpts.InstrProfileInput.empty()) {
+ auto ReaderOrErr =
+ llvm::IndexedInstrProfReader::create(CodeGenOpts.InstrProfileInput);
+ if (std::error_code EC = ReaderOrErr.getError()) {
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "Could not read profile %0: %1");
+ getDiags().Report(DiagID) << CodeGenOpts.InstrProfileInput
+ << EC.message();
+ } else
+ PGOReader = std::move(ReaderOrErr.get());
+ }
+
+ // If coverage mapping generation is enabled, create the
+ // CoverageMappingModuleGen object.
+ if (CodeGenOpts.CoverageMapping)
+ CoverageMapping.reset(new CoverageMappingModuleGen(*this, *CoverageInfo));
+}
+
+CodeGenModule::~CodeGenModule() {
+ delete ObjCRuntime;
+ delete OpenCLRuntime;
+ delete OpenMPRuntime;
+ delete CUDARuntime;
+ delete TheTargetCodeGenInfo;
+ delete TBAA;
+ delete DebugInfo;
+ delete ObjCData;
+}
+
+void CodeGenModule::createObjCRuntime() {
+ // This is just isGNUFamily(), but we want to force implementors of
+ // new ABIs to decide how best to do this.
+ switch (LangOpts.ObjCRuntime.getKind()) {
+ case ObjCRuntime::GNUstep:
+ case ObjCRuntime::GCC:
+ case ObjCRuntime::ObjFW:
+ ObjCRuntime = CreateGNUObjCRuntime(*this);
+ return;
+
+ case ObjCRuntime::FragileMacOSX:
+ case ObjCRuntime::MacOSX:
+ case ObjCRuntime::iOS:
+ case ObjCRuntime::WatchOS:
+ ObjCRuntime = CreateMacObjCRuntime(*this);
+ return;
+ }
+ llvm_unreachable("bad runtime kind");
+}
+
+void CodeGenModule::createOpenCLRuntime() {
+ OpenCLRuntime = new CGOpenCLRuntime(*this);
+}
+
+void CodeGenModule::createOpenMPRuntime() {
+ OpenMPRuntime = new CGOpenMPRuntime(*this);
+}
+
+void CodeGenModule::createCUDARuntime() {
+ CUDARuntime = CreateNVCUDARuntime(*this);
+}
+
+void CodeGenModule::addReplacement(StringRef Name, llvm::Constant *C) {
+ Replacements[Name] = C;
+}
+
+void CodeGenModule::applyReplacements() {
+ for (auto &I : Replacements) {
+ StringRef MangledName = I.first();
+ llvm::Constant *Replacement = I.second;
+ llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
+ if (!Entry)
+ continue;
+ auto *OldF = cast<llvm::Function>(Entry);
+ auto *NewF = dyn_cast<llvm::Function>(Replacement);
+ if (!NewF) {
+ if (auto *Alias = dyn_cast<llvm::GlobalAlias>(Replacement)) {
+ NewF = dyn_cast<llvm::Function>(Alias->getAliasee());
+ } else {
+ auto *CE = cast<llvm::ConstantExpr>(Replacement);
+ assert(CE->getOpcode() == llvm::Instruction::BitCast ||
+ CE->getOpcode() == llvm::Instruction::GetElementPtr);
+ NewF = dyn_cast<llvm::Function>(CE->getOperand(0));
+ }
+ }
+
+ // Replace old with new, but keep the old order.
+ OldF->replaceAllUsesWith(Replacement);
+ if (NewF) {
+ NewF->removeFromParent();
+ OldF->getParent()->getFunctionList().insertAfter(OldF->getIterator(),
+ NewF);
+ }
+ OldF->eraseFromParent();
+ }
+}
+
+void CodeGenModule::addGlobalValReplacement(llvm::GlobalValue *GV, llvm::Constant *C) {
+ GlobalValReplacements.push_back(std::make_pair(GV, C));
+}
+
+void CodeGenModule::applyGlobalValReplacements() {
+ for (auto &I : GlobalValReplacements) {
+ llvm::GlobalValue *GV = I.first;
+ llvm::Constant *C = I.second;
+
+ GV->replaceAllUsesWith(C);
+ GV->eraseFromParent();
+ }
+}
+
+// This is only used in aliases that we created and we know they have a
+// linear structure.
+static const llvm::GlobalObject *getAliasedGlobal(const llvm::GlobalAlias &GA) {
+ llvm::SmallPtrSet<const llvm::GlobalAlias*, 4> Visited;
+ const llvm::Constant *C = &GA;
+ for (;;) {
+ C = C->stripPointerCasts();
+ if (auto *GO = dyn_cast<llvm::GlobalObject>(C))
+ return GO;
+ // stripPointerCasts will not walk over weak aliases.
+ auto *GA2 = dyn_cast<llvm::GlobalAlias>(C);
+ if (!GA2)
+ return nullptr;
+ if (!Visited.insert(GA2).second)
+ return nullptr;
+ C = GA2->getAliasee();
+ }
+}
+
+void CodeGenModule::checkAliases() {
+ // Check if the constructed aliases are well formed. It is really unfortunate
+ // that we have to do this in CodeGen, but we only construct mangled names
+ // and aliases during codegen.
+ bool Error = false;
+ DiagnosticsEngine &Diags = getDiags();
+ for (const GlobalDecl &GD : Aliases) {
+ const auto *D = cast<ValueDecl>(GD.getDecl());
+ const AliasAttr *AA = D->getAttr<AliasAttr>();
+ StringRef MangledName = getMangledName(GD);
+ llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
+ auto *Alias = cast<llvm::GlobalAlias>(Entry);
+ const llvm::GlobalValue *GV = getAliasedGlobal(*Alias);
+ if (!GV) {
+ Error = true;
+ Diags.Report(AA->getLocation(), diag::err_cyclic_alias);
+ } else if (GV->isDeclaration()) {
+ Error = true;
+ Diags.Report(AA->getLocation(), diag::err_alias_to_undefined);
+ }
+
+ llvm::Constant *Aliasee = Alias->getAliasee();
+ llvm::GlobalValue *AliaseeGV;
+ if (auto CE = dyn_cast<llvm::ConstantExpr>(Aliasee))
+ AliaseeGV = cast<llvm::GlobalValue>(CE->getOperand(0));
+ else
+ AliaseeGV = cast<llvm::GlobalValue>(Aliasee);
+
+ if (const SectionAttr *SA = D->getAttr<SectionAttr>()) {
+ StringRef AliasSection = SA->getName();
+ if (AliasSection != AliaseeGV->getSection())
+ Diags.Report(SA->getLocation(), diag::warn_alias_with_section)
+ << AliasSection;
+ }
+
+ // We have to handle alias to weak aliases in here. LLVM itself disallows
+ // this since the object semantics would not match the IL one. For
+ // compatibility with gcc we implement it by just pointing the alias
+ // to its aliasee's aliasee. We also warn, since the user is probably
+ // expecting the link to be weak.
+ if (auto GA = dyn_cast<llvm::GlobalAlias>(AliaseeGV)) {
+ if (GA->mayBeOverridden()) {
+ Diags.Report(AA->getLocation(), diag::warn_alias_to_weak_alias)
+ << GV->getName() << GA->getName();
+ Aliasee = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
+ GA->getAliasee(), Alias->getType());
+ Alias->setAliasee(Aliasee);
+ }
+ }
+ }
+ if (!Error)
+ return;
+
+ for (const GlobalDecl &GD : Aliases) {
+ StringRef MangledName = getMangledName(GD);
+ llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
+ auto *Alias = cast<llvm::GlobalAlias>(Entry);
+ Alias->replaceAllUsesWith(llvm::UndefValue::get(Alias->getType()));
+ Alias->eraseFromParent();
+ }
+}
+
+void CodeGenModule::clear() {
+ DeferredDeclsToEmit.clear();
+ if (OpenMPRuntime)
+ OpenMPRuntime->clear();
+}
+
+void InstrProfStats::reportDiagnostics(DiagnosticsEngine &Diags,
+ StringRef MainFile) {
+ if (!hasDiagnostics())
+ return;
+ if (VisitedInMainFile > 0 && VisitedInMainFile == MissingInMainFile) {
+ if (MainFile.empty())
+ MainFile = "<stdin>";
+ Diags.Report(diag::warn_profile_data_unprofiled) << MainFile;
+ } else
+ Diags.Report(diag::warn_profile_data_out_of_date) << Visited << Missing
+ << Mismatched;
+}
+
+void CodeGenModule::Release() {
+ EmitDeferred();
+ applyGlobalValReplacements();
+ applyReplacements();
+ checkAliases();
+ EmitCXXGlobalInitFunc();
+ EmitCXXGlobalDtorFunc();
+ EmitCXXThreadLocalInitFunc();
+ if (ObjCRuntime)
+ if (llvm::Function *ObjCInitFunction = ObjCRuntime->ModuleInitFunction())
+ AddGlobalCtor(ObjCInitFunction);
+ if (Context.getLangOpts().CUDA && !Context.getLangOpts().CUDAIsDevice &&
+ CUDARuntime) {
+ if (llvm::Function *CudaCtorFunction = CUDARuntime->makeModuleCtorFunction())
+ AddGlobalCtor(CudaCtorFunction);
+ if (llvm::Function *CudaDtorFunction = CUDARuntime->makeModuleDtorFunction())
+ AddGlobalDtor(CudaDtorFunction);
+ }
+ if (OpenMPRuntime)
+ if (llvm::Function *OpenMPRegistrationFunction =
+ OpenMPRuntime->emitRegistrationFunction())
+ AddGlobalCtor(OpenMPRegistrationFunction, 0);
+ if (PGOReader) {
+ getModule().setMaximumFunctionCount(PGOReader->getMaximumFunctionCount());
+ if (PGOStats.hasDiagnostics())
+ PGOStats.reportDiagnostics(getDiags(), getCodeGenOpts().MainFileName);
+ }
+ EmitCtorList(GlobalCtors, "llvm.global_ctors");
+ EmitCtorList(GlobalDtors, "llvm.global_dtors");
+ EmitGlobalAnnotations();
+ EmitStaticExternCAliases();
+ EmitDeferredUnusedCoverageMappings();
+ if (CoverageMapping)
+ CoverageMapping->emit();
+ emitLLVMUsed();
+
+ if (CodeGenOpts.Autolink &&
+ (Context.getLangOpts().Modules || !LinkerOptionsMetadata.empty())) {
+ EmitModuleLinkOptions();
+ }
+ if (CodeGenOpts.DwarfVersion) {
+ // We actually want the latest version when there are conflicts.
+ // We can change from Warning to Latest if such mode is supported.
+ getModule().addModuleFlag(llvm::Module::Warning, "Dwarf Version",
+ CodeGenOpts.DwarfVersion);
+ }
+ if (CodeGenOpts.EmitCodeView) {
+ // Indicate that we want CodeView in the metadata.
+ getModule().addModuleFlag(llvm::Module::Warning, "CodeView", 1);
+ }
+ if (CodeGenOpts.OptimizationLevel > 0 && CodeGenOpts.StrictVTablePointers) {
+ // We don't support LTO with 2 with different StrictVTablePointers
+ // FIXME: we could support it by stripping all the information introduced
+ // by StrictVTablePointers.
+
+ getModule().addModuleFlag(llvm::Module::Error, "StrictVTablePointers",1);
+
+ llvm::Metadata *Ops[2] = {
+ llvm::MDString::get(VMContext, "StrictVTablePointers"),
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), 1))};
+
+ getModule().addModuleFlag(llvm::Module::Require,
+ "StrictVTablePointersRequirement",
+ llvm::MDNode::get(VMContext, Ops));
+ }
+ if (DebugInfo)
+ // We support a single version in the linked module. The LLVM
+ // parser will drop debug info with a different version number
+ // (and warn about it, too).
+ getModule().addModuleFlag(llvm::Module::Warning, "Debug Info Version",
+ llvm::DEBUG_METADATA_VERSION);
+
+ // We need to record the widths of enums and wchar_t, so that we can generate
+ // the correct build attributes in the ARM backend.
+ llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch();
+ if ( Arch == llvm::Triple::arm
+ || Arch == llvm::Triple::armeb
+ || Arch == llvm::Triple::thumb
+ || Arch == llvm::Triple::thumbeb) {
+ // Width of wchar_t in bytes
+ uint64_t WCharWidth =
+ Context.getTypeSizeInChars(Context.getWideCharType()).getQuantity();
+ getModule().addModuleFlag(llvm::Module::Error, "wchar_size", WCharWidth);
+
+ // The minimum width of an enum in bytes
+ uint64_t EnumWidth = Context.getLangOpts().ShortEnums ? 1 : 4;
+ getModule().addModuleFlag(llvm::Module::Error, "min_enum_size", EnumWidth);
+ }
+
+ if (CodeGenOpts.SanitizeCfiCrossDso) {
+ // Indicate that we want cross-DSO control flow integrity checks.
+ getModule().addModuleFlag(llvm::Module::Override, "Cross-DSO CFI", 1);
+ }
+
+ if (uint32_t PLevel = Context.getLangOpts().PICLevel) {
+ llvm::PICLevel::Level PL = llvm::PICLevel::Default;
+ switch (PLevel) {
+ case 0: break;
+ case 1: PL = llvm::PICLevel::Small; break;
+ case 2: PL = llvm::PICLevel::Large; break;
+ default: llvm_unreachable("Invalid PIC Level");
+ }
+
+ getModule().setPICLevel(PL);
+ }
+
+ SimplifyPersonality();
+
+ if (getCodeGenOpts().EmitDeclMetadata)
+ EmitDeclMetadata();
+
+ if (getCodeGenOpts().EmitGcovArcs || getCodeGenOpts().EmitGcovNotes)
+ EmitCoverageFile();
+
+ if (DebugInfo)
+ DebugInfo->finalize();
+
+ EmitVersionIdentMetadata();
+
+ EmitTargetMetadata();
+}
+
+void CodeGenModule::UpdateCompletedType(const TagDecl *TD) {
+ // Make sure that this type is translated.
+ Types.UpdateCompletedType(TD);
+}
+
+llvm::MDNode *CodeGenModule::getTBAAInfo(QualType QTy) {
+ if (!TBAA)
+ return nullptr;
+ return TBAA->getTBAAInfo(QTy);
+}
+
+llvm::MDNode *CodeGenModule::getTBAAInfoForVTablePtr() {
+ if (!TBAA)
+ return nullptr;
+ return TBAA->getTBAAInfoForVTablePtr();
+}
+
+llvm::MDNode *CodeGenModule::getTBAAStructInfo(QualType QTy) {
+ if (!TBAA)
+ return nullptr;
+ return TBAA->getTBAAStructInfo(QTy);
+}
+
+llvm::MDNode *CodeGenModule::getTBAAStructTagInfo(QualType BaseTy,
+ llvm::MDNode *AccessN,
+ uint64_t O) {
+ if (!TBAA)
+ return nullptr;
+ return TBAA->getTBAAStructTagInfo(BaseTy, AccessN, O);
+}
+
+/// Decorate the instruction with a TBAA tag. For both scalar TBAA
+/// and struct-path aware TBAA, the tag has the same format:
+/// base type, access type and offset.
+/// When ConvertTypeToTag is true, we create a tag based on the scalar type.
+void CodeGenModule::DecorateInstructionWithTBAA(llvm::Instruction *Inst,
+ llvm::MDNode *TBAAInfo,
+ bool ConvertTypeToTag) {
+ if (ConvertTypeToTag && TBAA)
+ Inst->setMetadata(llvm::LLVMContext::MD_tbaa,
+ TBAA->getTBAAScalarTagInfo(TBAAInfo));
+ else
+ Inst->setMetadata(llvm::LLVMContext::MD_tbaa, TBAAInfo);
+}
+
+void CodeGenModule::DecorateInstructionWithInvariantGroup(
+ llvm::Instruction *I, const CXXRecordDecl *RD) {
+ llvm::Metadata *MD = CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0));
+ auto *MetaDataNode = dyn_cast<llvm::MDNode>(MD);
+ // Check if we have to wrap MDString in MDNode.
+ if (!MetaDataNode)
+ MetaDataNode = llvm::MDNode::get(getLLVMContext(), MD);
+ I->setMetadata(llvm::LLVMContext::MD_invariant_group, MetaDataNode);
+}
+
+void CodeGenModule::Error(SourceLocation loc, StringRef message) {
+ unsigned diagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, "%0");
+ getDiags().Report(Context.getFullLoc(loc), diagID) << message;
+}
+
+/// ErrorUnsupported - Print out an error that codegen doesn't support the
+/// specified stmt yet.
+void CodeGenModule::ErrorUnsupported(const Stmt *S, const char *Type) {
+ unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot compile this %0 yet");
+ std::string Msg = Type;
+ getDiags().Report(Context.getFullLoc(S->getLocStart()), DiagID)
+ << Msg << S->getSourceRange();
+}
+
+/// ErrorUnsupported - Print out an error that codegen doesn't support the
+/// specified decl yet.
+void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type) {
+ unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot compile this %0 yet");
+ std::string Msg = Type;
+ getDiags().Report(Context.getFullLoc(D->getLocation()), DiagID) << Msg;
+}
+
+llvm::ConstantInt *CodeGenModule::getSize(CharUnits size) {
+ return llvm::ConstantInt::get(SizeTy, size.getQuantity());
+}
+
+void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
+ const NamedDecl *D) const {
+ // Internal definitions always have default visibility.
+ if (GV->hasLocalLinkage()) {
+ GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
+ return;
+ }
+
+ // Set visibility for definitions.
+ LinkageInfo LV = D->getLinkageAndVisibility();
+ if (LV.isVisibilityExplicit() || !GV->hasAvailableExternallyLinkage())
+ GV->setVisibility(GetLLVMVisibility(LV.getVisibility()));
+}
+
+static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(StringRef S) {
+ return llvm::StringSwitch<llvm::GlobalVariable::ThreadLocalMode>(S)
+ .Case("global-dynamic", llvm::GlobalVariable::GeneralDynamicTLSModel)
+ .Case("local-dynamic", llvm::GlobalVariable::LocalDynamicTLSModel)
+ .Case("initial-exec", llvm::GlobalVariable::InitialExecTLSModel)
+ .Case("local-exec", llvm::GlobalVariable::LocalExecTLSModel);
+}
+
+static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(
+ CodeGenOptions::TLSModel M) {
+ switch (M) {
+ case CodeGenOptions::GeneralDynamicTLSModel:
+ return llvm::GlobalVariable::GeneralDynamicTLSModel;
+ case CodeGenOptions::LocalDynamicTLSModel:
+ return llvm::GlobalVariable::LocalDynamicTLSModel;
+ case CodeGenOptions::InitialExecTLSModel:
+ return llvm::GlobalVariable::InitialExecTLSModel;
+ case CodeGenOptions::LocalExecTLSModel:
+ return llvm::GlobalVariable::LocalExecTLSModel;
+ }
+ llvm_unreachable("Invalid TLS model!");
+}
+
+void CodeGenModule::setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const {
+ assert(D.getTLSKind() && "setting TLS mode on non-TLS var!");
+
+ llvm::GlobalValue::ThreadLocalMode TLM;
+ TLM = GetLLVMTLSModel(CodeGenOpts.getDefaultTLSModel());
+
+ // Override the TLS model if it is explicitly specified.
+ if (const TLSModelAttr *Attr = D.getAttr<TLSModelAttr>()) {
+ TLM = GetLLVMTLSModel(Attr->getModel());
+ }
+
+ GV->setThreadLocalMode(TLM);
+}
+
+StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
+ StringRef &FoundStr = MangledDeclNames[GD.getCanonicalDecl()];
+ if (!FoundStr.empty())
+ return FoundStr;
+
+ const auto *ND = cast<NamedDecl>(GD.getDecl());
+ SmallString<256> Buffer;
+ StringRef Str;
+ if (getCXXABI().getMangleContext().shouldMangleDeclName(ND)) {
+ llvm::raw_svector_ostream Out(Buffer);
+ if (const auto *D = dyn_cast<CXXConstructorDecl>(ND))
+ getCXXABI().getMangleContext().mangleCXXCtor(D, GD.getCtorType(), Out);
+ else if (const auto *D = dyn_cast<CXXDestructorDecl>(ND))
+ getCXXABI().getMangleContext().mangleCXXDtor(D, GD.getDtorType(), Out);
+ else
+ getCXXABI().getMangleContext().mangleName(ND, Out);
+ Str = Out.str();
+ } else {
+ IdentifierInfo *II = ND->getIdentifier();
+ assert(II && "Attempt to mangle unnamed decl.");
+ Str = II->getName();
+ }
+
+ // Keep the first result in the case of a mangling collision.
+ auto Result = Manglings.insert(std::make_pair(Str, GD));
+ return FoundStr = Result.first->first();
+}
+
+StringRef CodeGenModule::getBlockMangledName(GlobalDecl GD,
+ const BlockDecl *BD) {
+ MangleContext &MangleCtx = getCXXABI().getMangleContext();
+ const Decl *D = GD.getDecl();
+
+ SmallString<256> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ if (!D)
+ MangleCtx.mangleGlobalBlock(BD,
+ dyn_cast_or_null<VarDecl>(initializedGlobalDecl.getDecl()), Out);
+ else if (const auto *CD = dyn_cast<CXXConstructorDecl>(D))
+ MangleCtx.mangleCtorBlock(CD, GD.getCtorType(), BD, Out);
+ else if (const auto *DD = dyn_cast<CXXDestructorDecl>(D))
+ MangleCtx.mangleDtorBlock(DD, GD.getDtorType(), BD, Out);
+ else
+ MangleCtx.mangleBlock(cast<DeclContext>(D), BD, Out);
+
+ auto Result = Manglings.insert(std::make_pair(Out.str(), BD));
+ return Result.first->first();
+}
+
+llvm::GlobalValue *CodeGenModule::GetGlobalValue(StringRef Name) {
+ return getModule().getNamedValue(Name);
+}
+
+/// AddGlobalCtor - Add a function to the list that will be called before
+/// main() runs.
+void CodeGenModule::AddGlobalCtor(llvm::Function *Ctor, int Priority,
+ llvm::Constant *AssociatedData) {
+ // FIXME: Type coercion of void()* types.
+ GlobalCtors.push_back(Structor(Priority, Ctor, AssociatedData));
+}
+
+/// AddGlobalDtor - Add a function to the list that will be called
+/// when the module is unloaded.
+void CodeGenModule::AddGlobalDtor(llvm::Function *Dtor, int Priority) {
+ // FIXME: Type coercion of void()* types.
+ GlobalDtors.push_back(Structor(Priority, Dtor, nullptr));
+}
+
+void CodeGenModule::EmitCtorList(const CtorList &Fns, const char *GlobalName) {
+ // Ctor function type is void()*.
+ llvm::FunctionType* CtorFTy = llvm::FunctionType::get(VoidTy, false);
+ llvm::Type *CtorPFTy = llvm::PointerType::getUnqual(CtorFTy);
+
+ // Get the type of a ctor entry, { i32, void ()*, i8* }.
+ llvm::StructType *CtorStructTy = llvm::StructType::get(
+ Int32Ty, llvm::PointerType::getUnqual(CtorFTy), VoidPtrTy, nullptr);
+
+ // Construct the constructor and destructor arrays.
+ SmallVector<llvm::Constant *, 8> Ctors;
+ for (const auto &I : Fns) {
+ llvm::Constant *S[] = {
+ llvm::ConstantInt::get(Int32Ty, I.Priority, false),
+ llvm::ConstantExpr::getBitCast(I.Initializer, CtorPFTy),
+ (I.AssociatedData
+ ? llvm::ConstantExpr::getBitCast(I.AssociatedData, VoidPtrTy)
+ : llvm::Constant::getNullValue(VoidPtrTy))};
+ Ctors.push_back(llvm::ConstantStruct::get(CtorStructTy, S));
+ }
+
+ if (!Ctors.empty()) {
+ llvm::ArrayType *AT = llvm::ArrayType::get(CtorStructTy, Ctors.size());
+ new llvm::GlobalVariable(TheModule, AT, false,
+ llvm::GlobalValue::AppendingLinkage,
+ llvm::ConstantArray::get(AT, Ctors),
+ GlobalName);
+ }
+}
+
+llvm::GlobalValue::LinkageTypes
+CodeGenModule::getFunctionLinkage(GlobalDecl GD) {
+ const auto *D = cast<FunctionDecl>(GD.getDecl());
+
+ GVALinkage Linkage = getContext().GetGVALinkageForFunction(D);
+
+ if (isa<CXXDestructorDecl>(D) &&
+ getCXXABI().useThunkForDtorVariant(cast<CXXDestructorDecl>(D),
+ GD.getDtorType())) {
+ // Destructor variants in the Microsoft C++ ABI are always internal or
+ // linkonce_odr thunks emitted on an as-needed basis.
+ return Linkage == GVA_Internal ? llvm::GlobalValue::InternalLinkage
+ : llvm::GlobalValue::LinkOnceODRLinkage;
+ }
+
+ return getLLVMLinkageForDeclarator(D, Linkage, /*isConstantVariable=*/false);
+}
+
+void CodeGenModule::setFunctionDLLStorageClass(GlobalDecl GD, llvm::Function *F) {
+ const auto *FD = cast<FunctionDecl>(GD.getDecl());
+
+ if (const auto *Dtor = dyn_cast_or_null<CXXDestructorDecl>(FD)) {
+ if (getCXXABI().useThunkForDtorVariant(Dtor, GD.getDtorType())) {
+ // Don't dllexport/import destructor thunks.
+ F->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
+ return;
+ }
+ }
+
+ if (FD->hasAttr<DLLImportAttr>())
+ F->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
+ else if (FD->hasAttr<DLLExportAttr>())
+ F->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
+ else
+ F->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass);
+}
+
+llvm::ConstantInt *
+CodeGenModule::CreateCfiIdForTypeMetadata(llvm::Metadata *MD) {
+ llvm::MDString *MDS = dyn_cast<llvm::MDString>(MD);
+ if (!MDS) return nullptr;
+
+ llvm::MD5 md5;
+ llvm::MD5::MD5Result result;
+ md5.update(MDS->getString());
+ md5.final(result);
+ uint64_t id = 0;
+ for (int i = 0; i < 8; ++i)
+ id |= static_cast<uint64_t>(result[i]) << (i * 8);
+ return llvm::ConstantInt::get(Int64Ty, id);
+}
+
+void CodeGenModule::setFunctionDefinitionAttributes(const FunctionDecl *D,
+ llvm::Function *F) {
+ setNonAliasAttributes(D, F);
+}
+
+void CodeGenModule::SetLLVMFunctionAttributes(const Decl *D,
+ const CGFunctionInfo &Info,
+ llvm::Function *F) {
+ unsigned CallingConv;
+ AttributeListType AttributeList;
+ ConstructAttributeList(F->getName(), Info, D, AttributeList, CallingConv,
+ false);
+ F->setAttributes(llvm::AttributeSet::get(getLLVMContext(), AttributeList));
+ F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
+}
+
+/// Determines whether the language options require us to model
+/// unwind exceptions. We treat -fexceptions as mandating this
+/// except under the fragile ObjC ABI with only ObjC exceptions
+/// enabled. This means, for example, that C with -fexceptions
+/// enables this.
+static bool hasUnwindExceptions(const LangOptions &LangOpts) {
+ // If exceptions are completely disabled, obviously this is false.
+ if (!LangOpts.Exceptions) return false;
+
+ // If C++ exceptions are enabled, this is true.
+ if (LangOpts.CXXExceptions) return true;
+
+ // If ObjC exceptions are enabled, this depends on the ABI.
+ if (LangOpts.ObjCExceptions) {
+ return LangOpts.ObjCRuntime.hasUnwindExceptions();
+ }
+
+ return true;
+}
+
+void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
+ llvm::Function *F) {
+ llvm::AttrBuilder B;
+
+ if (CodeGenOpts.UnwindTables)
+ B.addAttribute(llvm::Attribute::UWTable);
+
+ if (!hasUnwindExceptions(LangOpts))
+ B.addAttribute(llvm::Attribute::NoUnwind);
+
+ if (LangOpts.getStackProtector() == LangOptions::SSPOn)
+ B.addAttribute(llvm::Attribute::StackProtect);
+ else if (LangOpts.getStackProtector() == LangOptions::SSPStrong)
+ B.addAttribute(llvm::Attribute::StackProtectStrong);
+ else if (LangOpts.getStackProtector() == LangOptions::SSPReq)
+ B.addAttribute(llvm::Attribute::StackProtectReq);
+
+ if (!D) {
+ F->addAttributes(llvm::AttributeSet::FunctionIndex,
+ llvm::AttributeSet::get(
+ F->getContext(),
+ llvm::AttributeSet::FunctionIndex, B));
+ return;
+ }
+
+ if (D->hasAttr<NakedAttr>()) {
+ // Naked implies noinline: we should not be inlining such functions.
+ B.addAttribute(llvm::Attribute::Naked);
+ B.addAttribute(llvm::Attribute::NoInline);
+ } else if (D->hasAttr<NoDuplicateAttr>()) {
+ B.addAttribute(llvm::Attribute::NoDuplicate);
+ } else if (D->hasAttr<NoInlineAttr>()) {
+ B.addAttribute(llvm::Attribute::NoInline);
+ } else if (D->hasAttr<AlwaysInlineAttr>() &&
+ !F->getAttributes().hasAttribute(llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::NoInline)) {
+ // (noinline wins over always_inline, and we can't specify both in IR)
+ B.addAttribute(llvm::Attribute::AlwaysInline);
+ }
+
+ if (D->hasAttr<ColdAttr>()) {
+ if (!D->hasAttr<OptimizeNoneAttr>())
+ B.addAttribute(llvm::Attribute::OptimizeForSize);
+ B.addAttribute(llvm::Attribute::Cold);
+ }
+
+ if (D->hasAttr<MinSizeAttr>())
+ B.addAttribute(llvm::Attribute::MinSize);
+
+ F->addAttributes(llvm::AttributeSet::FunctionIndex,
+ llvm::AttributeSet::get(
+ F->getContext(), llvm::AttributeSet::FunctionIndex, B));
+
+ if (D->hasAttr<OptimizeNoneAttr>()) {
+ // OptimizeNone implies noinline; we should not be inlining such functions.
+ F->addFnAttr(llvm::Attribute::OptimizeNone);
+ F->addFnAttr(llvm::Attribute::NoInline);
+
+ // OptimizeNone wins over OptimizeForSize, MinSize, AlwaysInline.
+ F->removeFnAttr(llvm::Attribute::OptimizeForSize);
+ F->removeFnAttr(llvm::Attribute::MinSize);
+ assert(!F->hasFnAttribute(llvm::Attribute::AlwaysInline) &&
+ "OptimizeNone and AlwaysInline on same function!");
+
+ // Attribute 'inlinehint' has no effect on 'optnone' functions.
+ // Explicitly remove it from the set of function attributes.
+ F->removeFnAttr(llvm::Attribute::InlineHint);
+ }
+
+ if (isa<CXXConstructorDecl>(D) || isa<CXXDestructorDecl>(D))
+ F->setUnnamedAddr(true);
+ else if (const auto *MD = dyn_cast<CXXMethodDecl>(D))
+ if (MD->isVirtual())
+ F->setUnnamedAddr(true);
+
+ unsigned alignment = D->getMaxAlignment() / Context.getCharWidth();
+ if (alignment)
+ F->setAlignment(alignment);
+
+ // Some C++ ABIs require 2-byte alignment for member functions, in order to
+ // reserve a bit for differentiating between virtual and non-virtual member
+ // functions. If the current target's C++ ABI requires this and this is a
+ // member function, set its alignment accordingly.
+ if (getTarget().getCXXABI().areMemberFunctionsAligned()) {
+ if (F->getAlignment() < 2 && isa<CXXMethodDecl>(D))
+ F->setAlignment(2);
+ }
+}
+
+void CodeGenModule::SetCommonAttributes(const Decl *D,
+ llvm::GlobalValue *GV) {
+ if (const auto *ND = dyn_cast_or_null<NamedDecl>(D))
+ setGlobalVisibility(GV, ND);
+ else
+ GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
+
+ if (D && D->hasAttr<UsedAttr>())
+ addUsedGlobal(GV);
+}
+
+void CodeGenModule::setAliasAttributes(const Decl *D,
+ llvm::GlobalValue *GV) {
+ SetCommonAttributes(D, GV);
+
+ // Process the dllexport attribute based on whether the original definition
+ // (not necessarily the aliasee) was exported.
+ if (D->hasAttr<DLLExportAttr>())
+ GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
+}
+
+void CodeGenModule::setNonAliasAttributes(const Decl *D,
+ llvm::GlobalObject *GO) {
+ SetCommonAttributes(D, GO);
+
+ if (D)
+ if (const SectionAttr *SA = D->getAttr<SectionAttr>())
+ GO->setSection(SA->getName());
+
+ getTargetCodeGenInfo().setTargetAttributes(D, GO, *this);
+}
+
+void CodeGenModule::SetInternalFunctionAttributes(const Decl *D,
+ llvm::Function *F,
+ const CGFunctionInfo &FI) {
+ SetLLVMFunctionAttributes(D, FI, F);
+ SetLLVMFunctionAttributesForDefinition(D, F);
+
+ F->setLinkage(llvm::Function::InternalLinkage);
+
+ setNonAliasAttributes(D, F);
+}
+
+static void setLinkageAndVisibilityForGV(llvm::GlobalValue *GV,
+ const NamedDecl *ND) {
+ // Set linkage and visibility in case we never see a definition.
+ LinkageInfo LV = ND->getLinkageAndVisibility();
+ if (LV.getLinkage() != ExternalLinkage) {
+ // Don't set internal linkage on declarations.
+ } else {
+ if (ND->hasAttr<DLLImportAttr>()) {
+ GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
+ } else if (ND->hasAttr<DLLExportAttr>()) {
+ GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
+ } else if (ND->hasAttr<WeakAttr>() || ND->isWeakImported()) {
+ // "extern_weak" is overloaded in LLVM; we probably should have
+ // separate linkage types for this.
+ GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+ }
+
+ // Set visibility on a declaration only if it's explicit.
+ if (LV.isVisibilityExplicit())
+ GV->setVisibility(CodeGenModule::GetLLVMVisibility(LV.getVisibility()));
+ }
+}
+
+void CodeGenModule::CreateFunctionBitSetEntry(const FunctionDecl *FD,
+ llvm::Function *F) {
+ // Only if we are checking indirect calls.
+ if (!LangOpts.Sanitize.has(SanitizerKind::CFIICall))
+ return;
+
+ // Non-static class methods are handled via vtable pointer checks elsewhere.
+ if (isa<CXXMethodDecl>(FD) && !cast<CXXMethodDecl>(FD)->isStatic())
+ return;
+
+ // Additionally, if building with cross-DSO support...
+ if (CodeGenOpts.SanitizeCfiCrossDso) {
+ // Don't emit entries for function declarations. In cross-DSO mode these are
+ // handled with better precision at run time.
+ if (!FD->hasBody())
+ return;
+ // Skip available_externally functions. They won't be codegen'ed in the
+ // current module anyway.
+ if (getContext().GetGVALinkageForFunction(FD) == GVA_AvailableExternally)
+ return;
+ }
+
+ llvm::NamedMDNode *BitsetsMD =
+ getModule().getOrInsertNamedMetadata("llvm.bitsets");
+
+ llvm::Metadata *MD = CreateMetadataIdentifierForType(FD->getType());
+ llvm::Metadata *BitsetOps[] = {
+ MD, llvm::ConstantAsMetadata::get(F),
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int64Ty, 0))};
+ BitsetsMD->addOperand(llvm::MDTuple::get(getLLVMContext(), BitsetOps));
+
+ // Emit a hash-based bit set entry for cross-DSO calls.
+ if (CodeGenOpts.SanitizeCfiCrossDso) {
+ if (auto TypeId = CreateCfiIdForTypeMetadata(MD)) {
+ llvm::Metadata *BitsetOps2[] = {
+ llvm::ConstantAsMetadata::get(TypeId),
+ llvm::ConstantAsMetadata::get(F),
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int64Ty, 0))};
+ BitsetsMD->addOperand(llvm::MDTuple::get(getLLVMContext(), BitsetOps2));
+ }
+ }
+}
+
+void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
+ bool IsIncompleteFunction,
+ bool IsThunk) {
+ if (llvm::Intrinsic::ID IID = F->getIntrinsicID()) {
+ // If this is an intrinsic function, set the function's attributes
+ // to the intrinsic's attributes.
+ F->setAttributes(llvm::Intrinsic::getAttributes(getLLVMContext(), IID));
+ return;
+ }
+
+ const auto *FD = cast<FunctionDecl>(GD.getDecl());
+
+ if (!IsIncompleteFunction)
+ SetLLVMFunctionAttributes(FD, getTypes().arrangeGlobalDeclaration(GD), F);
+
+ // Add the Returned attribute for "this", except for iOS 5 and earlier
+ // where substantial code, including the libstdc++ dylib, was compiled with
+ // GCC and does not actually return "this".
+ if (!IsThunk && getCXXABI().HasThisReturn(GD) &&
+ !(getTarget().getTriple().isiOS() &&
+ getTarget().getTriple().isOSVersionLT(6))) {
+ assert(!F->arg_empty() &&
+ F->arg_begin()->getType()
+ ->canLosslesslyBitCastTo(F->getReturnType()) &&
+ "unexpected this return");
+ F->addAttribute(1, llvm::Attribute::Returned);
+ }
+
+ // Only a few attributes are set on declarations; these may later be
+ // overridden by a definition.
+
+ setLinkageAndVisibilityForGV(F, FD);
+
+ if (const SectionAttr *SA = FD->getAttr<SectionAttr>())
+ F->setSection(SA->getName());
+
+ // A replaceable global allocation function does not act like a builtin by
+ // default, only if it is invoked by a new-expression or delete-expression.
+ if (FD->isReplaceableGlobalAllocationFunction())
+ F->addAttribute(llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::NoBuiltin);
+
+ CreateFunctionBitSetEntry(FD, F);
+}
+
+void CodeGenModule::addUsedGlobal(llvm::GlobalValue *GV) {
+ assert(!GV->isDeclaration() &&
+ "Only globals with definition can force usage.");
+ LLVMUsed.emplace_back(GV);
+}
+
+void CodeGenModule::addCompilerUsedGlobal(llvm::GlobalValue *GV) {
+ assert(!GV->isDeclaration() &&
+ "Only globals with definition can force usage.");
+ LLVMCompilerUsed.emplace_back(GV);
+}
+
+static void emitUsed(CodeGenModule &CGM, StringRef Name,
+ std::vector<llvm::WeakVH> &List) {
+ // Don't create llvm.used if there is no need.
+ if (List.empty())
+ return;
+
+ // Convert List to what ConstantArray needs.
+ SmallVector<llvm::Constant*, 8> UsedArray;
+ UsedArray.resize(List.size());
+ for (unsigned i = 0, e = List.size(); i != e; ++i) {
+ UsedArray[i] =
+ llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
+ cast<llvm::Constant>(&*List[i]), CGM.Int8PtrTy);
+ }
+
+ if (UsedArray.empty())
+ return;
+ llvm::ArrayType *ATy = llvm::ArrayType::get(CGM.Int8PtrTy, UsedArray.size());
+
+ auto *GV = new llvm::GlobalVariable(
+ CGM.getModule(), ATy, false, llvm::GlobalValue::AppendingLinkage,
+ llvm::ConstantArray::get(ATy, UsedArray), Name);
+
+ GV->setSection("llvm.metadata");
+}
+
+void CodeGenModule::emitLLVMUsed() {
+ emitUsed(*this, "llvm.used", LLVMUsed);
+ emitUsed(*this, "llvm.compiler.used", LLVMCompilerUsed);
+}
+
+void CodeGenModule::AppendLinkerOptions(StringRef Opts) {
+ auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opts);
+ LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
+}
+
+void CodeGenModule::AddDetectMismatch(StringRef Name, StringRef Value) {
+ llvm::SmallString<32> Opt;
+ getTargetCodeGenInfo().getDetectMismatchOption(Name, Value, Opt);
+ auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opt);
+ LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
+}
+
+void CodeGenModule::AddDependentLib(StringRef Lib) {
+ llvm::SmallString<24> Opt;
+ getTargetCodeGenInfo().getDependentLibraryOption(Lib, Opt);
+ auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opt);
+ LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
+}
+
+/// \brief Add link options implied by the given module, including modules
+/// it depends on, using a postorder walk.
+static void addLinkOptionsPostorder(CodeGenModule &CGM, Module *Mod,
+ SmallVectorImpl<llvm::Metadata *> &Metadata,
+ llvm::SmallPtrSet<Module *, 16> &Visited) {
+ // Import this module's parent.
+ if (Mod->Parent && Visited.insert(Mod->Parent).second) {
+ addLinkOptionsPostorder(CGM, Mod->Parent, Metadata, Visited);
+ }
+
+ // Import this module's dependencies.
+ for (unsigned I = Mod->Imports.size(); I > 0; --I) {
+ if (Visited.insert(Mod->Imports[I - 1]).second)
+ addLinkOptionsPostorder(CGM, Mod->Imports[I-1], Metadata, Visited);
+ }
+
+ // Add linker options to link against the libraries/frameworks
+ // described by this module.
+ llvm::LLVMContext &Context = CGM.getLLVMContext();
+ for (unsigned I = Mod->LinkLibraries.size(); I > 0; --I) {
+ // Link against a framework. Frameworks are currently Darwin only, so we
+ // don't to ask TargetCodeGenInfo for the spelling of the linker option.
+ if (Mod->LinkLibraries[I-1].IsFramework) {
+ llvm::Metadata *Args[2] = {
+ llvm::MDString::get(Context, "-framework"),
+ llvm::MDString::get(Context, Mod->LinkLibraries[I - 1].Library)};
+
+ Metadata.push_back(llvm::MDNode::get(Context, Args));
+ continue;
+ }
+
+ // Link against a library.
+ llvm::SmallString<24> Opt;
+ CGM.getTargetCodeGenInfo().getDependentLibraryOption(
+ Mod->LinkLibraries[I-1].Library, Opt);
+ auto *OptString = llvm::MDString::get(Context, Opt);
+ Metadata.push_back(llvm::MDNode::get(Context, OptString));
+ }
+}
+
+void CodeGenModule::EmitModuleLinkOptions() {
+ // Collect the set of all of the modules we want to visit to emit link
+ // options, which is essentially the imported modules and all of their
+ // non-explicit child modules.
+ llvm::SetVector<clang::Module *> LinkModules;
+ llvm::SmallPtrSet<clang::Module *, 16> Visited;
+ SmallVector<clang::Module *, 16> Stack;
+
+ // Seed the stack with imported modules.
+ for (Module *M : ImportedModules)
+ if (Visited.insert(M).second)
+ Stack.push_back(M);
+
+ // Find all of the modules to import, making a little effort to prune
+ // non-leaf modules.
+ while (!Stack.empty()) {
+ clang::Module *Mod = Stack.pop_back_val();
+
+ bool AnyChildren = false;
+
+ // Visit the submodules of this module.
+ for (clang::Module::submodule_iterator Sub = Mod->submodule_begin(),
+ SubEnd = Mod->submodule_end();
+ Sub != SubEnd; ++Sub) {
+ // Skip explicit children; they need to be explicitly imported to be
+ // linked against.
+ if ((*Sub)->IsExplicit)
+ continue;
+
+ if (Visited.insert(*Sub).second) {
+ Stack.push_back(*Sub);
+ AnyChildren = true;
+ }
+ }
+
+ // We didn't find any children, so add this module to the list of
+ // modules to link against.
+ if (!AnyChildren) {
+ LinkModules.insert(Mod);
+ }
+ }
+
+ // Add link options for all of the imported modules in reverse topological
+ // order. We don't do anything to try to order import link flags with respect
+ // to linker options inserted by things like #pragma comment().
+ SmallVector<llvm::Metadata *, 16> MetadataArgs;
+ Visited.clear();
+ for (Module *M : LinkModules)
+ if (Visited.insert(M).second)
+ addLinkOptionsPostorder(*this, M, MetadataArgs, Visited);
+ std::reverse(MetadataArgs.begin(), MetadataArgs.end());
+ LinkerOptionsMetadata.append(MetadataArgs.begin(), MetadataArgs.end());
+
+ // Add the linker options metadata flag.
+ getModule().addModuleFlag(llvm::Module::AppendUnique, "Linker Options",
+ llvm::MDNode::get(getLLVMContext(),
+ LinkerOptionsMetadata));
+}
+
+void CodeGenModule::EmitDeferred() {
+ // Emit code for any potentially referenced deferred decls. Since a
+ // previously unused static decl may become used during the generation of code
+ // for a static function, iterate until no changes are made.
+
+ if (!DeferredVTables.empty()) {
+ EmitDeferredVTables();
+
+ // Emitting a v-table doesn't directly cause more v-tables to
+ // become deferred, although it can cause functions to be
+ // emitted that then need those v-tables.
+ assert(DeferredVTables.empty());
+ }
+
+ // Stop if we're out of both deferred v-tables and deferred declarations.
+ if (DeferredDeclsToEmit.empty())
+ return;
+
+ // Grab the list of decls to emit. If EmitGlobalDefinition schedules more
+ // work, it will not interfere with this.
+ std::vector<DeferredGlobal> CurDeclsToEmit;
+ CurDeclsToEmit.swap(DeferredDeclsToEmit);
+
+ for (DeferredGlobal &G : CurDeclsToEmit) {
+ GlobalDecl D = G.GD;
+ llvm::GlobalValue *GV = G.GV;
+ G.GV = nullptr;
+
+ // We should call GetAddrOfGlobal with IsForDefinition set to true in order
+ // to get GlobalValue with exactly the type we need, not something that
+ // might had been created for another decl with the same mangled name but
+ // different type.
+ // FIXME: Support for variables is not implemented yet.
+ if (isa<FunctionDecl>(D.getDecl()))
+ GV = cast<llvm::GlobalValue>(GetAddrOfGlobal(D, /*IsForDefinition=*/true));
+ else
+ if (!GV)
+ GV = GetGlobalValue(getMangledName(D));
+
+ // Check to see if we've already emitted this. This is necessary
+ // for a couple of reasons: first, decls can end up in the
+ // deferred-decls queue multiple times, and second, decls can end
+ // up with definitions in unusual ways (e.g. by an extern inline
+ // function acquiring a strong function redefinition). Just
+ // ignore these cases.
+ if (GV && !GV->isDeclaration())
+ continue;
+
+ // Otherwise, emit the definition and move on to the next one.
+ EmitGlobalDefinition(D, GV);
+
+ // If we found out that we need to emit more decls, do that recursively.
+ // This has the advantage that the decls are emitted in a DFS and related
+ // ones are close together, which is convenient for testing.
+ if (!DeferredVTables.empty() || !DeferredDeclsToEmit.empty()) {
+ EmitDeferred();
+ assert(DeferredVTables.empty() && DeferredDeclsToEmit.empty());
+ }
+ }
+}
+
+void CodeGenModule::EmitGlobalAnnotations() {
+ if (Annotations.empty())
+ return;
+
+ // Create a new global variable for the ConstantStruct in the Module.
+ llvm::Constant *Array = llvm::ConstantArray::get(llvm::ArrayType::get(
+ Annotations[0]->getType(), Annotations.size()), Annotations);
+ auto *gv = new llvm::GlobalVariable(getModule(), Array->getType(), false,
+ llvm::GlobalValue::AppendingLinkage,
+ Array, "llvm.global.annotations");
+ gv->setSection(AnnotationSection);
+}
+
+llvm::Constant *CodeGenModule::EmitAnnotationString(StringRef Str) {
+ llvm::Constant *&AStr = AnnotationStrings[Str];
+ if (AStr)
+ return AStr;
+
+ // Not found yet, create a new global.
+ llvm::Constant *s = llvm::ConstantDataArray::getString(getLLVMContext(), Str);
+ auto *gv =
+ new llvm::GlobalVariable(getModule(), s->getType(), true,
+ llvm::GlobalValue::PrivateLinkage, s, ".str");
+ gv->setSection(AnnotationSection);
+ gv->setUnnamedAddr(true);
+ AStr = gv;
+ return gv;
+}
+
+llvm::Constant *CodeGenModule::EmitAnnotationUnit(SourceLocation Loc) {
+ SourceManager &SM = getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+ if (PLoc.isValid())
+ return EmitAnnotationString(PLoc.getFilename());
+ return EmitAnnotationString(SM.getBufferName(Loc));
+}
+
+llvm::Constant *CodeGenModule::EmitAnnotationLineNo(SourceLocation L) {
+ SourceManager &SM = getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(L);
+ unsigned LineNo = PLoc.isValid() ? PLoc.getLine() :
+ SM.getExpansionLineNumber(L);
+ return llvm::ConstantInt::get(Int32Ty, LineNo);
+}
+
+llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
+ const AnnotateAttr *AA,
+ SourceLocation L) {
+ // Get the globals for file name, annotation, and the line number.
+ llvm::Constant *AnnoGV = EmitAnnotationString(AA->getAnnotation()),
+ *UnitGV = EmitAnnotationUnit(L),
+ *LineNoCst = EmitAnnotationLineNo(L);
+
+ // Create the ConstantStruct for the global annotation.
+ llvm::Constant *Fields[4] = {
+ llvm::ConstantExpr::getBitCast(GV, Int8PtrTy),
+ llvm::ConstantExpr::getBitCast(AnnoGV, Int8PtrTy),
+ llvm::ConstantExpr::getBitCast(UnitGV, Int8PtrTy),
+ LineNoCst
+ };
+ return llvm::ConstantStruct::getAnon(Fields);
+}
+
+void CodeGenModule::AddGlobalAnnotations(const ValueDecl *D,
+ llvm::GlobalValue *GV) {
+ assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
+ // Get the struct elements for these annotations.
+ for (const auto *I : D->specific_attrs<AnnotateAttr>())
+ Annotations.push_back(EmitAnnotateAttr(GV, I, D->getLocation()));
+}
+
+bool CodeGenModule::isInSanitizerBlacklist(llvm::Function *Fn,
+ SourceLocation Loc) const {
+ const auto &SanitizerBL = getContext().getSanitizerBlacklist();
+ // Blacklist by function name.
+ if (SanitizerBL.isBlacklistedFunction(Fn->getName()))
+ return true;
+ // Blacklist by location.
+ if (Loc.isValid())
+ return SanitizerBL.isBlacklistedLocation(Loc);
+ // If location is unknown, this may be a compiler-generated function. Assume
+ // it's located in the main file.
+ auto &SM = Context.getSourceManager();
+ if (const auto *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
+ return SanitizerBL.isBlacklistedFile(MainFile->getName());
+ }
+ return false;
+}
+
+bool CodeGenModule::isInSanitizerBlacklist(llvm::GlobalVariable *GV,
+ SourceLocation Loc, QualType Ty,
+ StringRef Category) const {
+ // For now globals can be blacklisted only in ASan and KASan.
+ if (!LangOpts.Sanitize.hasOneOf(
+ SanitizerKind::Address | SanitizerKind::KernelAddress))
+ return false;
+ const auto &SanitizerBL = getContext().getSanitizerBlacklist();
+ if (SanitizerBL.isBlacklistedGlobal(GV->getName(), Category))
+ return true;
+ if (SanitizerBL.isBlacklistedLocation(Loc, Category))
+ return true;
+ // Check global type.
+ if (!Ty.isNull()) {
+ // Drill down the array types: if global variable of a fixed type is
+ // blacklisted, we also don't instrument arrays of them.
+ while (auto AT = dyn_cast<ArrayType>(Ty.getTypePtr()))
+ Ty = AT->getElementType();
+ Ty = Ty.getCanonicalType().getUnqualifiedType();
+ // We allow to blacklist only record types (classes, structs etc.)
+ if (Ty->isRecordType()) {
+ std::string TypeStr = Ty.getAsString(getContext().getPrintingPolicy());
+ if (SanitizerBL.isBlacklistedType(TypeStr, Category))
+ return true;
+ }
+ }
+ return false;
+}
+
+bool CodeGenModule::MustBeEmitted(const ValueDecl *Global) {
+ // Never defer when EmitAllDecls is specified.
+ if (LangOpts.EmitAllDecls)
+ return true;
+
+ return getContext().DeclMustBeEmitted(Global);
+}
+
+bool CodeGenModule::MayBeEmittedEagerly(const ValueDecl *Global) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(Global))
+ if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
+ // Implicit template instantiations may change linkage if they are later
+ // explicitly instantiated, so they should not be emitted eagerly.
+ return false;
+ // If OpenMP is enabled and threadprivates must be generated like TLS, delay
+ // codegen for global variables, because they may be marked as threadprivate.
+ if (LangOpts.OpenMP && LangOpts.OpenMPUseTLS &&
+ getContext().getTargetInfo().isTLSSupported() && isa<VarDecl>(Global))
+ return false;
+
+ return true;
+}
+
+ConstantAddress CodeGenModule::GetAddrOfUuidDescriptor(
+ const CXXUuidofExpr* E) {
+ // Sema has verified that IIDSource has a __declspec(uuid()), and that its
+ // well-formed.
+ StringRef Uuid = E->getUuidAsStringRef(Context);
+ std::string Name = "_GUID_" + Uuid.lower();
+ std::replace(Name.begin(), Name.end(), '-', '_');
+
+ // Contains a 32-bit field.
+ CharUnits Alignment = CharUnits::fromQuantity(4);
+
+ // Look for an existing global.
+ if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
+ return ConstantAddress(GV, Alignment);
+
+ llvm::Constant *Init = EmitUuidofInitializer(Uuid);
+ assert(Init && "failed to initialize as constant");
+
+ auto *GV = new llvm::GlobalVariable(
+ getModule(), Init->getType(),
+ /*isConstant=*/true, llvm::GlobalValue::LinkOnceODRLinkage, Init, Name);
+ if (supportsCOMDAT())
+ GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
+ return ConstantAddress(GV, Alignment);
+}
+
+ConstantAddress CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
+ const AliasAttr *AA = VD->getAttr<AliasAttr>();
+ assert(AA && "No alias?");
+
+ CharUnits Alignment = getContext().getDeclAlign(VD);
+ llvm::Type *DeclTy = getTypes().ConvertTypeForMem(VD->getType());
+
+ // See if there is already something with the target's name in the module.
+ llvm::GlobalValue *Entry = GetGlobalValue(AA->getAliasee());
+ if (Entry) {
+ unsigned AS = getContext().getTargetAddressSpace(VD->getType());
+ auto Ptr = llvm::ConstantExpr::getBitCast(Entry, DeclTy->getPointerTo(AS));
+ return ConstantAddress(Ptr, Alignment);
+ }
+
+ llvm::Constant *Aliasee;
+ if (isa<llvm::FunctionType>(DeclTy))
+ Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy,
+ GlobalDecl(cast<FunctionDecl>(VD)),
+ /*ForVTable=*/false);
+ else
+ Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(),
+ llvm::PointerType::getUnqual(DeclTy),
+ nullptr);
+
+ auto *F = cast<llvm::GlobalValue>(Aliasee);
+ F->setLinkage(llvm::Function::ExternalWeakLinkage);
+ WeakRefReferences.insert(F);
+
+ return ConstantAddress(Aliasee, Alignment);
+}
+
+void CodeGenModule::EmitGlobal(GlobalDecl GD) {
+ const auto *Global = cast<ValueDecl>(GD.getDecl());
+
+ // Weak references don't produce any output by themselves.
+ if (Global->hasAttr<WeakRefAttr>())
+ return;
+
+ // If this is an alias definition (which otherwise looks like a declaration)
+ // emit it now.
+ if (Global->hasAttr<AliasAttr>())
+ return EmitAliasDefinition(GD);
+
+ // If this is CUDA, be selective about which declarations we emit.
+ if (LangOpts.CUDA) {
+ if (LangOpts.CUDAIsDevice) {
+ if (!Global->hasAttr<CUDADeviceAttr>() &&
+ !Global->hasAttr<CUDAGlobalAttr>() &&
+ !Global->hasAttr<CUDAConstantAttr>() &&
+ !Global->hasAttr<CUDASharedAttr>())
+ return;
+ } else {
+ if (!Global->hasAttr<CUDAHostAttr>() && (
+ Global->hasAttr<CUDADeviceAttr>() ||
+ Global->hasAttr<CUDAConstantAttr>() ||
+ Global->hasAttr<CUDASharedAttr>()))
+ return;
+ }
+ }
+
+ // If this is OpenMP device, check if it is legal to emit this global
+ // normally.
+ if (OpenMPRuntime && OpenMPRuntime->emitTargetGlobal(GD))
+ return;
+
+ // Ignore declarations, they will be emitted on their first use.
+ if (const auto *FD = dyn_cast<FunctionDecl>(Global)) {
+ // Forward declarations are emitted lazily on first use.
+ if (!FD->doesThisDeclarationHaveABody()) {
+ if (!FD->doesDeclarationForceExternallyVisibleDefinition())
+ return;
+
+ StringRef MangledName = getMangledName(GD);
+
+ // Compute the function info and LLVM type.
+ const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
+ llvm::Type *Ty = getTypes().GetFunctionType(FI);
+
+ GetOrCreateLLVMFunction(MangledName, Ty, GD, /*ForVTable=*/false,
+ /*DontDefer=*/false);
+ return;
+ }
+ } else {
+ const auto *VD = cast<VarDecl>(Global);
+ assert(VD->isFileVarDecl() && "Cannot emit local var decl as global.");
+
+ if (VD->isThisDeclarationADefinition() != VarDecl::Definition &&
+ !Context.isMSStaticDataMemberInlineDefinition(VD))
+ return;
+ }
+
+ // Defer code generation to first use when possible, e.g. if this is an inline
+ // function. If the global must always be emitted, do it eagerly if possible
+ // to benefit from cache locality.
+ if (MustBeEmitted(Global) && MayBeEmittedEagerly(Global)) {
+ // Emit the definition if it can't be deferred.
+ EmitGlobalDefinition(GD);
+ return;
+ }
+
+ // If we're deferring emission of a C++ variable with an
+ // initializer, remember the order in which it appeared in the file.
+ if (getLangOpts().CPlusPlus && isa<VarDecl>(Global) &&
+ cast<VarDecl>(Global)->hasInit()) {
+ DelayedCXXInitPosition[Global] = CXXGlobalInits.size();
+ CXXGlobalInits.push_back(nullptr);
+ }
+
+ StringRef MangledName = getMangledName(GD);
+ if (llvm::GlobalValue *GV = GetGlobalValue(MangledName)) {
+ // The value has already been used and should therefore be emitted.
+ addDeferredDeclToEmit(GV, GD);
+ } else if (MustBeEmitted(Global)) {
+ // The value must be emitted, but cannot be emitted eagerly.
+ assert(!MayBeEmittedEagerly(Global));
+ addDeferredDeclToEmit(/*GV=*/nullptr, GD);
+ } else {
+ // Otherwise, remember that we saw a deferred decl with this name. The
+ // first use of the mangled name will cause it to move into
+ // DeferredDeclsToEmit.
+ DeferredDecls[MangledName] = GD;
+ }
+}
+
+namespace {
+ struct FunctionIsDirectlyRecursive :
+ public RecursiveASTVisitor<FunctionIsDirectlyRecursive> {
+ const StringRef Name;
+ const Builtin::Context &BI;
+ bool Result;
+ FunctionIsDirectlyRecursive(StringRef N, const Builtin::Context &C) :
+ Name(N), BI(C), Result(false) {
+ }
+ typedef RecursiveASTVisitor<FunctionIsDirectlyRecursive> Base;
+
+ bool TraverseCallExpr(CallExpr *E) {
+ const FunctionDecl *FD = E->getDirectCallee();
+ if (!FD)
+ return true;
+ AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
+ if (Attr && Name == Attr->getLabel()) {
+ Result = true;
+ return false;
+ }
+ unsigned BuiltinID = FD->getBuiltinID();
+ if (!BuiltinID || !BI.isLibFunction(BuiltinID))
+ return true;
+ StringRef BuiltinName = BI.getName(BuiltinID);
+ if (BuiltinName.startswith("__builtin_") &&
+ Name == BuiltinName.slice(strlen("__builtin_"), StringRef::npos)) {
+ Result = true;
+ return false;
+ }
+ return true;
+ }
+ };
+
+ struct DLLImportFunctionVisitor
+ : public RecursiveASTVisitor<DLLImportFunctionVisitor> {
+ bool SafeToInline = true;
+
+ bool VisitVarDecl(VarDecl *VD) {
+ // A thread-local variable cannot be imported.
+ SafeToInline = !VD->getTLSKind();
+ return SafeToInline;
+ }
+
+ // Make sure we're not referencing non-imported vars or functions.
+ bool VisitDeclRefExpr(DeclRefExpr *E) {
+ ValueDecl *VD = E->getDecl();
+ if (isa<FunctionDecl>(VD))
+ SafeToInline = VD->hasAttr<DLLImportAttr>();
+ else if (VarDecl *V = dyn_cast<VarDecl>(VD))
+ SafeToInline = !V->hasGlobalStorage() || V->hasAttr<DLLImportAttr>();
+ return SafeToInline;
+ }
+ bool VisitCXXDeleteExpr(CXXDeleteExpr *E) {
+ SafeToInline = E->getOperatorDelete()->hasAttr<DLLImportAttr>();
+ return SafeToInline;
+ }
+ bool VisitCXXNewExpr(CXXNewExpr *E) {
+ SafeToInline = E->getOperatorNew()->hasAttr<DLLImportAttr>();
+ return SafeToInline;
+ }
+ };
+}
+
+// isTriviallyRecursive - Check if this function calls another
+// decl that, because of the asm attribute or the other decl being a builtin,
+// ends up pointing to itself.
+bool
+CodeGenModule::isTriviallyRecursive(const FunctionDecl *FD) {
+ StringRef Name;
+ if (getCXXABI().getMangleContext().shouldMangleDeclName(FD)) {
+ // asm labels are a special kind of mangling we have to support.
+ AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
+ if (!Attr)
+ return false;
+ Name = Attr->getLabel();
+ } else {
+ Name = FD->getName();
+ }
+
+ FunctionIsDirectlyRecursive Walker(Name, Context.BuiltinInfo);
+ Walker.TraverseFunctionDecl(const_cast<FunctionDecl*>(FD));
+ return Walker.Result;
+}
+
+bool
+CodeGenModule::shouldEmitFunction(GlobalDecl GD) {
+ if (getFunctionLinkage(GD) != llvm::Function::AvailableExternallyLinkage)
+ return true;
+ const auto *F = cast<FunctionDecl>(GD.getDecl());
+ if (CodeGenOpts.OptimizationLevel == 0 && !F->hasAttr<AlwaysInlineAttr>())
+ return false;
+
+ if (F->hasAttr<DLLImportAttr>()) {
+ // Check whether it would be safe to inline this dllimport function.
+ DLLImportFunctionVisitor Visitor;
+ Visitor.TraverseFunctionDecl(const_cast<FunctionDecl*>(F));
+ if (!Visitor.SafeToInline)
+ return false;
+ }
+
+ // PR9614. Avoid cases where the source code is lying to us. An available
+ // externally function should have an equivalent function somewhere else,
+ // but a function that calls itself is clearly not equivalent to the real
+ // implementation.
+ // This happens in glibc's btowc and in some configure checks.
+ return !isTriviallyRecursive(F);
+}
+
+/// If the type for the method's class was generated by
+/// CGDebugInfo::createContextChain(), the cache contains only a
+/// limited DIType without any declarations. Since EmitFunctionStart()
+/// needs to find the canonical declaration for each method, we need
+/// to construct the complete type prior to emitting the method.
+void CodeGenModule::CompleteDIClassType(const CXXMethodDecl* D) {
+ if (!D->isInstance())
+ return;
+
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ if (getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo) {
+ const auto *ThisPtr = cast<PointerType>(D->getThisType(getContext()));
+ DI->getOrCreateRecordType(ThisPtr->getPointeeType(), D->getLocation());
+ }
+}
+
+void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
+ const auto *D = cast<ValueDecl>(GD.getDecl());
+
+ PrettyStackTraceDecl CrashInfo(const_cast<ValueDecl *>(D), D->getLocation(),
+ Context.getSourceManager(),
+ "Generating code for declaration");
+
+ if (isa<FunctionDecl>(D)) {
+ // At -O0, don't generate IR for functions with available_externally
+ // linkage.
+ if (!shouldEmitFunction(GD))
+ return;
+
+ if (const auto *Method = dyn_cast<CXXMethodDecl>(D)) {
+ CompleteDIClassType(Method);
+ // Make sure to emit the definition(s) before we emit the thunks.
+ // This is necessary for the generation of certain thunks.
+ if (const auto *CD = dyn_cast<CXXConstructorDecl>(Method))
+ ABI->emitCXXStructor(CD, getFromCtorType(GD.getCtorType()));
+ else if (const auto *DD = dyn_cast<CXXDestructorDecl>(Method))
+ ABI->emitCXXStructor(DD, getFromDtorType(GD.getDtorType()));
+ else
+ EmitGlobalFunctionDefinition(GD, GV);
+
+ if (Method->isVirtual())
+ getVTables().EmitThunks(GD);
+
+ return;
+ }
+
+ return EmitGlobalFunctionDefinition(GD, GV);
+ }
+
+ if (const auto *VD = dyn_cast<VarDecl>(D))
+ return EmitGlobalVarDefinition(VD);
+
+ llvm_unreachable("Invalid argument to EmitGlobalDefinition()");
+}
+
+static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
+ llvm::Function *NewFn);
+
+/// GetOrCreateLLVMFunction - If the specified mangled name is not in the
+/// module, create and return an llvm Function with the specified type. If there
+/// is something in the module with the specified name, return it potentially
+/// bitcasted to the right type.
+///
+/// If D is non-null, it specifies a decl that correspond to this. This is used
+/// to set the attributes on the function when it is first created.
+llvm::Constant *
+CodeGenModule::GetOrCreateLLVMFunction(StringRef MangledName,
+ llvm::Type *Ty,
+ GlobalDecl GD, bool ForVTable,
+ bool DontDefer, bool IsThunk,
+ llvm::AttributeSet ExtraAttrs,
+ bool IsForDefinition) {
+ const Decl *D = GD.getDecl();
+
+ // Lookup the entry, lazily creating it if necessary.
+ llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
+ if (Entry) {
+ if (WeakRefReferences.erase(Entry)) {
+ const FunctionDecl *FD = cast_or_null<FunctionDecl>(D);
+ if (FD && !FD->hasAttr<WeakAttr>())
+ Entry->setLinkage(llvm::Function::ExternalLinkage);
+ }
+
+ // Handle dropped DLL attributes.
+ if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>())
+ Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
+
+ // If there are two attempts to define the same mangled name, issue an
+ // error.
+ if (IsForDefinition && !Entry->isDeclaration()) {
+ GlobalDecl OtherGD;
+ // Check that GD is not yet in ExplicitDefinitions is required to make
+ // sure that we issue an error only once.
+ if (lookupRepresentativeDecl(MangledName, OtherGD) &&
+ (GD.getCanonicalDecl().getDecl() !=
+ OtherGD.getCanonicalDecl().getDecl()) &&
+ DiagnosedConflictingDefinitions.insert(GD).second) {
+ getDiags().Report(D->getLocation(),
+ diag::err_duplicate_mangled_name);
+ getDiags().Report(OtherGD.getDecl()->getLocation(),
+ diag::note_previous_definition);
+ }
+ }
+
+ if ((isa<llvm::Function>(Entry) || isa<llvm::GlobalAlias>(Entry)) &&
+ (Entry->getType()->getElementType() == Ty)) {
+ return Entry;
+ }
+
+ // Make sure the result is of the correct type.
+ // (If function is requested for a definition, we always need to create a new
+ // function, not just return a bitcast.)
+ if (!IsForDefinition)
+ return llvm::ConstantExpr::getBitCast(Entry, Ty->getPointerTo());
+ }
+
+ // This function doesn't have a complete type (for example, the return
+ // type is an incomplete struct). Use a fake type instead, and make
+ // sure not to try to set attributes.
+ bool IsIncompleteFunction = false;
+
+ llvm::FunctionType *FTy;
+ if (isa<llvm::FunctionType>(Ty)) {
+ FTy = cast<llvm::FunctionType>(Ty);
+ } else {
+ FTy = llvm::FunctionType::get(VoidTy, false);
+ IsIncompleteFunction = true;
+ }
+
+ llvm::Function *F =
+ llvm::Function::Create(FTy, llvm::Function::ExternalLinkage,
+ Entry ? StringRef() : MangledName, &getModule());
+
+ // If we already created a function with the same mangled name (but different
+ // type) before, take its name and add it to the list of functions to be
+ // replaced with F at the end of CodeGen.
+ //
+ // This happens if there is a prototype for a function (e.g. "int f()") and
+ // then a definition of a different type (e.g. "int f(int x)").
+ if (Entry) {
+ F->takeName(Entry);
+
+ // This might be an implementation of a function without a prototype, in
+ // which case, try to do special replacement of calls which match the new
+ // prototype. The really key thing here is that we also potentially drop
+ // arguments from the call site so as to make a direct call, which makes the
+ // inliner happier and suppresses a number of optimizer warnings (!) about
+ // dropping arguments.
+ if (!Entry->use_empty()) {
+ ReplaceUsesOfNonProtoTypeWithRealFunction(Entry, F);
+ Entry->removeDeadConstantUsers();
+ }
+
+ llvm::Constant *BC = llvm::ConstantExpr::getBitCast(
+ F, Entry->getType()->getElementType()->getPointerTo());
+ addGlobalValReplacement(Entry, BC);
+ }
+
+ assert(F->getName() == MangledName && "name was uniqued!");
+ if (D)
+ SetFunctionAttributes(GD, F, IsIncompleteFunction, IsThunk);
+ if (ExtraAttrs.hasAttributes(llvm::AttributeSet::FunctionIndex)) {
+ llvm::AttrBuilder B(ExtraAttrs, llvm::AttributeSet::FunctionIndex);
+ F->addAttributes(llvm::AttributeSet::FunctionIndex,
+ llvm::AttributeSet::get(VMContext,
+ llvm::AttributeSet::FunctionIndex,
+ B));
+ }
+
+ if (!DontDefer) {
+ // All MSVC dtors other than the base dtor are linkonce_odr and delegate to
+ // each other bottoming out with the base dtor. Therefore we emit non-base
+ // dtors on usage, even if there is no dtor definition in the TU.
+ if (D && isa<CXXDestructorDecl>(D) &&
+ getCXXABI().useThunkForDtorVariant(cast<CXXDestructorDecl>(D),
+ GD.getDtorType()))
+ addDeferredDeclToEmit(F, GD);
+
+ // This is the first use or definition of a mangled name. If there is a
+ // deferred decl with this name, remember that we need to emit it at the end
+ // of the file.
+ auto DDI = DeferredDecls.find(MangledName);
+ if (DDI != DeferredDecls.end()) {
+ // Move the potentially referenced deferred decl to the
+ // DeferredDeclsToEmit list, and remove it from DeferredDecls (since we
+ // don't need it anymore).
+ addDeferredDeclToEmit(F, DDI->second);
+ DeferredDecls.erase(DDI);
+
+ // Otherwise, there are cases we have to worry about where we're
+ // using a declaration for which we must emit a definition but where
+ // we might not find a top-level definition:
+ // - member functions defined inline in their classes
+ // - friend functions defined inline in some class
+ // - special member functions with implicit definitions
+ // If we ever change our AST traversal to walk into class methods,
+ // this will be unnecessary.
+ //
+ // We also don't emit a definition for a function if it's going to be an
+ // entry in a vtable, unless it's already marked as used.
+ } else if (getLangOpts().CPlusPlus && D) {
+ // Look for a declaration that's lexically in a record.
+ for (const auto *FD = cast<FunctionDecl>(D)->getMostRecentDecl(); FD;
+ FD = FD->getPreviousDecl()) {
+ if (isa<CXXRecordDecl>(FD->getLexicalDeclContext())) {
+ if (FD->doesThisDeclarationHaveABody()) {
+ addDeferredDeclToEmit(F, GD.getWithDecl(FD));
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ // Make sure the result is of the requested type.
+ if (!IsIncompleteFunction) {
+ assert(F->getType()->getElementType() == Ty);
+ return F;
+ }
+
+ llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
+ return llvm::ConstantExpr::getBitCast(F, PTy);
+}
+
+/// GetAddrOfFunction - Return the address of the given function. If Ty is
+/// non-null, then this function will use the specified type if it has to
+/// create it (this occurs when we see a definition of the function).
+llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
+ llvm::Type *Ty,
+ bool ForVTable,
+ bool DontDefer,
+ bool IsForDefinition) {
+ // If there was no specific requested type, just convert it now.
+ if (!Ty) {
+ const auto *FD = cast<FunctionDecl>(GD.getDecl());
+ auto CanonTy = Context.getCanonicalType(FD->getType());
+ Ty = getTypes().ConvertFunctionType(CanonTy, FD);
+ }
+
+ StringRef MangledName = getMangledName(GD);
+ return GetOrCreateLLVMFunction(MangledName, Ty, GD, ForVTable, DontDefer,
+ /*IsThunk=*/false, llvm::AttributeSet(),
+ IsForDefinition);
+}
+
+/// CreateRuntimeFunction - Create a new runtime function with the specified
+/// type and name.
+llvm::Constant *
+CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy,
+ StringRef Name,
+ llvm::AttributeSet ExtraAttrs) {
+ llvm::Constant *C =
+ GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false,
+ /*DontDefer=*/false, /*IsThunk=*/false, ExtraAttrs);
+ if (auto *F = dyn_cast<llvm::Function>(C))
+ if (F->empty())
+ F->setCallingConv(getRuntimeCC());
+ return C;
+}
+
+/// CreateBuiltinFunction - Create a new builtin function with the specified
+/// type and name.
+llvm::Constant *
+CodeGenModule::CreateBuiltinFunction(llvm::FunctionType *FTy,
+ StringRef Name,
+ llvm::AttributeSet ExtraAttrs) {
+ llvm::Constant *C =
+ GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false,
+ /*DontDefer=*/false, /*IsThunk=*/false, ExtraAttrs);
+ if (auto *F = dyn_cast<llvm::Function>(C))
+ if (F->empty())
+ F->setCallingConv(getBuiltinCC());
+ return C;
+}
+
+/// isTypeConstant - Determine whether an object of this type can be emitted
+/// as a constant.
+///
+/// If ExcludeCtor is true, the duration when the object's constructor runs
+/// will not be considered. The caller will need to verify that the object is
+/// not written to during its construction.
+bool CodeGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor) {
+ if (!Ty.isConstant(Context) && !Ty->isReferenceType())
+ return false;
+
+ if (Context.getLangOpts().CPlusPlus) {
+ if (const CXXRecordDecl *Record
+ = Context.getBaseElementType(Ty)->getAsCXXRecordDecl())
+ return ExcludeCtor && !Record->hasMutableFields() &&
+ Record->hasTrivialDestructor();
+ }
+
+ return true;
+}
+
+/// GetOrCreateLLVMGlobal - If the specified mangled name is not in the module,
+/// create and return an llvm GlobalVariable with the specified type. If there
+/// is something in the module with the specified name, return it potentially
+/// bitcasted to the right type.
+///
+/// If D is non-null, it specifies a decl that correspond to this. This is used
+/// to set the attributes on the global when it is first created.
+llvm::Constant *
+CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
+ llvm::PointerType *Ty,
+ const VarDecl *D) {
+ // Lookup the entry, lazily creating it if necessary.
+ llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
+ if (Entry) {
+ if (WeakRefReferences.erase(Entry)) {
+ if (D && !D->hasAttr<WeakAttr>())
+ Entry->setLinkage(llvm::Function::ExternalLinkage);
+ }
+
+ // Handle dropped DLL attributes.
+ if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>())
+ Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
+
+ if (Entry->getType() == Ty)
+ return Entry;
+
+ // Make sure the result is of the correct type.
+ if (Entry->getType()->getAddressSpace() != Ty->getAddressSpace())
+ return llvm::ConstantExpr::getAddrSpaceCast(Entry, Ty);
+
+ return llvm::ConstantExpr::getBitCast(Entry, Ty);
+ }
+
+ unsigned AddrSpace = GetGlobalVarAddressSpace(D, Ty->getAddressSpace());
+ auto *GV = new llvm::GlobalVariable(
+ getModule(), Ty->getElementType(), false,
+ llvm::GlobalValue::ExternalLinkage, nullptr, MangledName, nullptr,
+ llvm::GlobalVariable::NotThreadLocal, AddrSpace);
+
+ // This is the first use or definition of a mangled name. If there is a
+ // deferred decl with this name, remember that we need to emit it at the end
+ // of the file.
+ auto DDI = DeferredDecls.find(MangledName);
+ if (DDI != DeferredDecls.end()) {
+ // Move the potentially referenced deferred decl to the DeferredDeclsToEmit
+ // list, and remove it from DeferredDecls (since we don't need it anymore).
+ addDeferredDeclToEmit(GV, DDI->second);
+ DeferredDecls.erase(DDI);
+ }
+
+ // Handle things which are present even on external declarations.
+ if (D) {
+ // FIXME: This code is overly simple and should be merged with other global
+ // handling.
+ GV->setConstant(isTypeConstant(D->getType(), false));
+
+ GV->setAlignment(getContext().getDeclAlign(D).getQuantity());
+
+ setLinkageAndVisibilityForGV(GV, D);
+
+ if (D->getTLSKind()) {
+ if (D->getTLSKind() == VarDecl::TLS_Dynamic)
+ CXXThreadLocals.push_back(D);
+ setTLSMode(GV, *D);
+ }
+
+ // If required by the ABI, treat declarations of static data members with
+ // inline initializers as definitions.
+ if (getContext().isMSStaticDataMemberInlineDefinition(D)) {
+ EmitGlobalVarDefinition(D);
+ }
+
+ // Handle XCore specific ABI requirements.
+ if (getTarget().getTriple().getArch() == llvm::Triple::xcore &&
+ D->getLanguageLinkage() == CLanguageLinkage &&
+ D->getType().isConstant(Context) &&
+ isExternallyVisible(D->getLinkageAndVisibility().getLinkage()))
+ GV->setSection(".cp.rodata");
+ }
+
+ if (AddrSpace != Ty->getAddressSpace())
+ return llvm::ConstantExpr::getAddrSpaceCast(GV, Ty);
+
+ return GV;
+}
+
+llvm::Constant *
+CodeGenModule::GetAddrOfGlobal(GlobalDecl GD,
+ bool IsForDefinition) {
+ if (isa<CXXConstructorDecl>(GD.getDecl()))
+ return getAddrOfCXXStructor(cast<CXXConstructorDecl>(GD.getDecl()),
+ getFromCtorType(GD.getCtorType()),
+ /*FnInfo=*/nullptr, /*FnType=*/nullptr,
+ /*DontDefer=*/false, IsForDefinition);
+ else if (isa<CXXDestructorDecl>(GD.getDecl()))
+ return getAddrOfCXXStructor(cast<CXXDestructorDecl>(GD.getDecl()),
+ getFromDtorType(GD.getDtorType()),
+ /*FnInfo=*/nullptr, /*FnType=*/nullptr,
+ /*DontDefer=*/false, IsForDefinition);
+ else if (isa<CXXMethodDecl>(GD.getDecl())) {
+ auto FInfo = &getTypes().arrangeCXXMethodDeclaration(
+ cast<CXXMethodDecl>(GD.getDecl()));
+ auto Ty = getTypes().GetFunctionType(*FInfo);
+ return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false,
+ IsForDefinition);
+ } else if (isa<FunctionDecl>(GD.getDecl())) {
+ const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
+ llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
+ return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false,
+ IsForDefinition);
+ } else
+ return GetAddrOfGlobalVar(cast<VarDecl>(GD.getDecl()));
+}
+
+llvm::GlobalVariable *
+CodeGenModule::CreateOrReplaceCXXRuntimeVariable(StringRef Name,
+ llvm::Type *Ty,
+ llvm::GlobalValue::LinkageTypes Linkage) {
+ llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name);
+ llvm::GlobalVariable *OldGV = nullptr;
+
+ if (GV) {
+ // Check if the variable has the right type.
+ if (GV->getType()->getElementType() == Ty)
+ return GV;
+
+ // Because C++ name mangling, the only way we can end up with an already
+ // existing global with the same name is if it has been declared extern "C".
+ assert(GV->isDeclaration() && "Declaration has wrong type!");
+ OldGV = GV;
+ }
+
+ // Create a new variable.
+ GV = new llvm::GlobalVariable(getModule(), Ty, /*isConstant=*/true,
+ Linkage, nullptr, Name);
+
+ if (OldGV) {
+ // Replace occurrences of the old variable if needed.
+ GV->takeName(OldGV);
+
+ if (!OldGV->use_empty()) {
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
+ OldGV->replaceAllUsesWith(NewPtrForOldDecl);
+ }
+
+ OldGV->eraseFromParent();
+ }
+
+ if (supportsCOMDAT() && GV->isWeakForLinker() &&
+ !GV->hasAvailableExternallyLinkage())
+ GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
+
+ return GV;
+}
+
+/// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
+/// given global variable. If Ty is non-null and if the global doesn't exist,
+/// then it will be created with the specified type instead of whatever the
+/// normal requested type would be.
+llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
+ llvm::Type *Ty) {
+ assert(D->hasGlobalStorage() && "Not a global variable");
+ QualType ASTTy = D->getType();
+ if (!Ty)
+ Ty = getTypes().ConvertTypeForMem(ASTTy);
+
+ llvm::PointerType *PTy =
+ llvm::PointerType::get(Ty, getContext().getTargetAddressSpace(ASTTy));
+
+ StringRef MangledName = getMangledName(D);
+ return GetOrCreateLLVMGlobal(MangledName, PTy, D);
+}
+
+/// CreateRuntimeVariable - Create a new runtime global variable with the
+/// specified type and name.
+llvm::Constant *
+CodeGenModule::CreateRuntimeVariable(llvm::Type *Ty,
+ StringRef Name) {
+ return GetOrCreateLLVMGlobal(Name, llvm::PointerType::getUnqual(Ty), nullptr);
+}
+
+void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) {
+ assert(!D->getInit() && "Cannot emit definite definitions here!");
+
+ if (!MustBeEmitted(D)) {
+ // If we have not seen a reference to this variable yet, place it
+ // into the deferred declarations table to be emitted if needed
+ // later.
+ StringRef MangledName = getMangledName(D);
+ if (!GetGlobalValue(MangledName)) {
+ DeferredDecls[MangledName] = D;
+ return;
+ }
+ }
+
+ // The tentative definition is the only definition.
+ EmitGlobalVarDefinition(D);
+}
+
+CharUnits CodeGenModule::GetTargetTypeStoreSize(llvm::Type *Ty) const {
+ return Context.toCharUnitsFromBits(
+ getDataLayout().getTypeStoreSizeInBits(Ty));
+}
+
+unsigned CodeGenModule::GetGlobalVarAddressSpace(const VarDecl *D,
+ unsigned AddrSpace) {
+ if (LangOpts.CUDA && LangOpts.CUDAIsDevice) {
+ if (D->hasAttr<CUDAConstantAttr>())
+ AddrSpace = getContext().getTargetAddressSpace(LangAS::cuda_constant);
+ else if (D->hasAttr<CUDASharedAttr>())
+ AddrSpace = getContext().getTargetAddressSpace(LangAS::cuda_shared);
+ else
+ AddrSpace = getContext().getTargetAddressSpace(LangAS::cuda_device);
+ }
+
+ return AddrSpace;
+}
+
+template<typename SomeDecl>
+void CodeGenModule::MaybeHandleStaticInExternC(const SomeDecl *D,
+ llvm::GlobalValue *GV) {
+ if (!getLangOpts().CPlusPlus)
+ return;
+
+ // Must have 'used' attribute, or else inline assembly can't rely on
+ // the name existing.
+ if (!D->template hasAttr<UsedAttr>())
+ return;
+
+ // Must have internal linkage and an ordinary name.
+ if (!D->getIdentifier() || D->getFormalLinkage() != InternalLinkage)
+ return;
+
+ // Must be in an extern "C" context. Entities declared directly within
+ // a record are not extern "C" even if the record is in such a context.
+ const SomeDecl *First = D->getFirstDecl();
+ if (First->getDeclContext()->isRecord() || !First->isInExternCContext())
+ return;
+
+ // OK, this is an internal linkage entity inside an extern "C" linkage
+ // specification. Make a note of that so we can give it the "expected"
+ // mangled name if nothing else is using that name.
+ std::pair<StaticExternCMap::iterator, bool> R =
+ StaticExternCValues.insert(std::make_pair(D->getIdentifier(), GV));
+
+ // If we have multiple internal linkage entities with the same name
+ // in extern "C" regions, none of them gets that name.
+ if (!R.second)
+ R.first->second = nullptr;
+}
+
+static bool shouldBeInCOMDAT(CodeGenModule &CGM, const Decl &D) {
+ if (!CGM.supportsCOMDAT())
+ return false;
+
+ if (D.hasAttr<SelectAnyAttr>())
+ return true;
+
+ GVALinkage Linkage;
+ if (auto *VD = dyn_cast<VarDecl>(&D))
+ Linkage = CGM.getContext().GetGVALinkageForVariable(VD);
+ else
+ Linkage = CGM.getContext().GetGVALinkageForFunction(cast<FunctionDecl>(&D));
+
+ switch (Linkage) {
+ case GVA_Internal:
+ case GVA_AvailableExternally:
+ case GVA_StrongExternal:
+ return false;
+ case GVA_DiscardableODR:
+ case GVA_StrongODR:
+ return true;
+ }
+ llvm_unreachable("No such linkage");
+}
+
+void CodeGenModule::maybeSetTrivialComdat(const Decl &D,
+ llvm::GlobalObject &GO) {
+ if (!shouldBeInCOMDAT(*this, D))
+ return;
+ GO.setComdat(TheModule.getOrInsertComdat(GO.getName()));
+}
+
+void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
+ llvm::Constant *Init = nullptr;
+ QualType ASTTy = D->getType();
+ CXXRecordDecl *RD = ASTTy->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ bool NeedsGlobalCtor = false;
+ bool NeedsGlobalDtor = RD && !RD->hasTrivialDestructor();
+
+ const VarDecl *InitDecl;
+ const Expr *InitExpr = D->getAnyInitializer(InitDecl);
+
+ // CUDA E.2.4.1 "__shared__ variables cannot have an initialization as part
+ // of their declaration."
+ if (getLangOpts().CPlusPlus && getLangOpts().CUDAIsDevice
+ && D->hasAttr<CUDASharedAttr>()) {
+ if (InitExpr) {
+ const auto *C = dyn_cast<CXXConstructExpr>(InitExpr);
+ if (C == nullptr || !C->getConstructor()->hasTrivialBody())
+ Error(D->getLocation(),
+ "__shared__ variable cannot have an initialization.");
+ }
+ Init = llvm::UndefValue::get(getTypes().ConvertType(ASTTy));
+ } else if (!InitExpr) {
+ // This is a tentative definition; tentative definitions are
+ // implicitly initialized with { 0 }.
+ //
+ // Note that tentative definitions are only emitted at the end of
+ // a translation unit, so they should never have incomplete
+ // type. In addition, EmitTentativeDefinition makes sure that we
+ // never attempt to emit a tentative definition if a real one
+ // exists. A use may still exists, however, so we still may need
+ // to do a RAUW.
+ assert(!ASTTy->isIncompleteType() && "Unexpected incomplete type");
+ Init = EmitNullConstant(D->getType());
+ } else {
+ initializedGlobalDecl = GlobalDecl(D);
+ Init = EmitConstantInit(*InitDecl);
+
+ if (!Init) {
+ QualType T = InitExpr->getType();
+ if (D->getType()->isReferenceType())
+ T = D->getType();
+
+ if (getLangOpts().CPlusPlus) {
+ Init = EmitNullConstant(T);
+ NeedsGlobalCtor = true;
+ } else {
+ ErrorUnsupported(D, "static initializer");
+ Init = llvm::UndefValue::get(getTypes().ConvertType(T));
+ }
+ } else {
+ // We don't need an initializer, so remove the entry for the delayed
+ // initializer position (just in case this entry was delayed) if we
+ // also don't need to register a destructor.
+ if (getLangOpts().CPlusPlus && !NeedsGlobalDtor)
+ DelayedCXXInitPosition.erase(D);
+ }
+ }
+
+ llvm::Type* InitType = Init->getType();
+ llvm::Constant *Entry = GetAddrOfGlobalVar(D, InitType);
+
+ // Strip off a bitcast if we got one back.
+ if (auto *CE = dyn_cast<llvm::ConstantExpr>(Entry)) {
+ assert(CE->getOpcode() == llvm::Instruction::BitCast ||
+ CE->getOpcode() == llvm::Instruction::AddrSpaceCast ||
+ // All zero index gep.
+ CE->getOpcode() == llvm::Instruction::GetElementPtr);
+ Entry = CE->getOperand(0);
+ }
+
+ // Entry is now either a Function or GlobalVariable.
+ auto *GV = dyn_cast<llvm::GlobalVariable>(Entry);
+
+ // We have a definition after a declaration with the wrong type.
+ // We must make a new GlobalVariable* and update everything that used OldGV
+ // (a declaration or tentative definition) with the new GlobalVariable*
+ // (which will be a definition).
+ //
+ // This happens if there is a prototype for a global (e.g.
+ // "extern int x[];") and then a definition of a different type (e.g.
+ // "int x[10];"). This also happens when an initializer has a different type
+ // from the type of the global (this happens with unions).
+ if (!GV ||
+ GV->getType()->getElementType() != InitType ||
+ GV->getType()->getAddressSpace() !=
+ GetGlobalVarAddressSpace(D, getContext().getTargetAddressSpace(ASTTy))) {
+
+ // Move the old entry aside so that we'll create a new one.
+ Entry->setName(StringRef());
+
+ // Make a new global with the correct type, this is now guaranteed to work.
+ GV = cast<llvm::GlobalVariable>(GetAddrOfGlobalVar(D, InitType));
+
+ // Replace all uses of the old global with the new global
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(GV, Entry->getType());
+ Entry->replaceAllUsesWith(NewPtrForOldDecl);
+
+ // Erase the old global, since it is no longer used.
+ cast<llvm::GlobalValue>(Entry)->eraseFromParent();
+ }
+
+ MaybeHandleStaticInExternC(D, GV);
+
+ if (D->hasAttr<AnnotateAttr>())
+ AddGlobalAnnotations(D, GV);
+
+ // CUDA B.2.1 "The __device__ qualifier declares a variable that resides on
+ // the device. [...]"
+ // CUDA B.2.2 "The __constant__ qualifier, optionally used together with
+ // __device__, declares a variable that: [...]
+ // Is accessible from all the threads within the grid and from the host
+ // through the runtime library (cudaGetSymbolAddress() / cudaGetSymbolSize()
+ // / cudaMemcpyToSymbol() / cudaMemcpyFromSymbol())."
+ if (GV && LangOpts.CUDA && LangOpts.CUDAIsDevice &&
+ (D->hasAttr<CUDAConstantAttr>() || D->hasAttr<CUDADeviceAttr>())) {
+ GV->setExternallyInitialized(true);
+ }
+ GV->setInitializer(Init);
+
+ // If it is safe to mark the global 'constant', do so now.
+ GV->setConstant(!NeedsGlobalCtor && !NeedsGlobalDtor &&
+ isTypeConstant(D->getType(), true));
+
+ // If it is in a read-only section, mark it 'constant'.
+ if (const SectionAttr *SA = D->getAttr<SectionAttr>()) {
+ const ASTContext::SectionInfo &SI = Context.SectionInfos[SA->getName()];
+ if ((SI.SectionFlags & ASTContext::PSF_Write) == 0)
+ GV->setConstant(true);
+ }
+
+ GV->setAlignment(getContext().getDeclAlign(D).getQuantity());
+
+ // Set the llvm linkage type as appropriate.
+ llvm::GlobalValue::LinkageTypes Linkage =
+ getLLVMLinkageVarDefinition(D, GV->isConstant());
+
+ // On Darwin, if the normal linkage of a C++ thread_local variable is
+ // LinkOnce or Weak, we keep the normal linkage to prevent multiple
+ // copies within a linkage unit; otherwise, the backing variable has
+ // internal linkage and all accesses should just be calls to the
+ // Itanium-specified entry point, which has the normal linkage of the
+ // variable. This is to preserve the ability to change the implementation
+ // behind the scenes.
+ if (!D->isStaticLocal() && D->getTLSKind() == VarDecl::TLS_Dynamic &&
+ Context.getTargetInfo().getTriple().isOSDarwin() &&
+ !llvm::GlobalVariable::isLinkOnceLinkage(Linkage) &&
+ !llvm::GlobalVariable::isWeakLinkage(Linkage))
+ Linkage = llvm::GlobalValue::InternalLinkage;
+
+ GV->setLinkage(Linkage);
+ if (D->hasAttr<DLLImportAttr>())
+ GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
+ else if (D->hasAttr<DLLExportAttr>())
+ GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
+ else
+ GV->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass);
+
+ if (Linkage == llvm::GlobalVariable::CommonLinkage)
+ // common vars aren't constant even if declared const.
+ GV->setConstant(false);
+
+ setNonAliasAttributes(D, GV);
+
+ if (D->getTLSKind() && !GV->isThreadLocal()) {
+ if (D->getTLSKind() == VarDecl::TLS_Dynamic)
+ CXXThreadLocals.push_back(D);
+ setTLSMode(GV, *D);
+ }
+
+ maybeSetTrivialComdat(*D, *GV);
+
+ // Emit the initializer function if necessary.
+ if (NeedsGlobalCtor || NeedsGlobalDtor)
+ EmitCXXGlobalVarDeclInitFunc(D, GV, NeedsGlobalCtor);
+
+ SanitizerMD->reportGlobalToASan(GV, *D, NeedsGlobalCtor);
+
+ // Emit global variable debug information.
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ if (getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo)
+ DI->EmitGlobalVariable(GV, D);
+}
+
+static bool isVarDeclStrongDefinition(const ASTContext &Context,
+ CodeGenModule &CGM, const VarDecl *D,
+ bool NoCommon) {
+ // Don't give variables common linkage if -fno-common was specified unless it
+ // was overridden by a NoCommon attribute.
+ if ((NoCommon || D->hasAttr<NoCommonAttr>()) && !D->hasAttr<CommonAttr>())
+ return true;
+
+ // C11 6.9.2/2:
+ // A declaration of an identifier for an object that has file scope without
+ // an initializer, and without a storage-class specifier or with the
+ // storage-class specifier static, constitutes a tentative definition.
+ if (D->getInit() || D->hasExternalStorage())
+ return true;
+
+ // A variable cannot be both common and exist in a section.
+ if (D->hasAttr<SectionAttr>())
+ return true;
+
+ // Thread local vars aren't considered common linkage.
+ if (D->getTLSKind())
+ return true;
+
+ // Tentative definitions marked with WeakImportAttr are true definitions.
+ if (D->hasAttr<WeakImportAttr>())
+ return true;
+
+ // A variable cannot be both common and exist in a comdat.
+ if (shouldBeInCOMDAT(CGM, *D))
+ return true;
+
+ // Declarations with a required alignment do not have common linakge in MSVC
+ // mode.
+ if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+ if (D->hasAttr<AlignedAttr>())
+ return true;
+ QualType VarType = D->getType();
+ if (Context.isAlignmentRequired(VarType))
+ return true;
+
+ if (const auto *RT = VarType->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+ for (const FieldDecl *FD : RD->fields()) {
+ if (FD->isBitField())
+ continue;
+ if (FD->hasAttr<AlignedAttr>())
+ return true;
+ if (Context.isAlignmentRequired(FD->getType()))
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageForDeclarator(
+ const DeclaratorDecl *D, GVALinkage Linkage, bool IsConstantVariable) {
+ if (Linkage == GVA_Internal)
+ return llvm::Function::InternalLinkage;
+
+ if (D->hasAttr<WeakAttr>()) {
+ if (IsConstantVariable)
+ return llvm::GlobalVariable::WeakODRLinkage;
+ else
+ return llvm::GlobalVariable::WeakAnyLinkage;
+ }
+
+ // We are guaranteed to have a strong definition somewhere else,
+ // so we can use available_externally linkage.
+ if (Linkage == GVA_AvailableExternally)
+ return llvm::Function::AvailableExternallyLinkage;
+
+ // Note that Apple's kernel linker doesn't support symbol
+ // coalescing, so we need to avoid linkonce and weak linkages there.
+ // Normally, this means we just map to internal, but for explicit
+ // instantiations we'll map to external.
+
+ // In C++, the compiler has to emit a definition in every translation unit
+ // that references the function. We should use linkonce_odr because
+ // a) if all references in this translation unit are optimized away, we
+ // don't need to codegen it. b) if the function persists, it needs to be
+ // merged with other definitions. c) C++ has the ODR, so we know the
+ // definition is dependable.
+ if (Linkage == GVA_DiscardableODR)
+ return !Context.getLangOpts().AppleKext ? llvm::Function::LinkOnceODRLinkage
+ : llvm::Function::InternalLinkage;
+
+ // An explicit instantiation of a template has weak linkage, since
+ // explicit instantiations can occur in multiple translation units
+ // and must all be equivalent. However, we are not allowed to
+ // throw away these explicit instantiations.
+ if (Linkage == GVA_StrongODR)
+ return !Context.getLangOpts().AppleKext ? llvm::Function::WeakODRLinkage
+ : llvm::Function::ExternalLinkage;
+
+ // C++ doesn't have tentative definitions and thus cannot have common
+ // linkage.
+ if (!getLangOpts().CPlusPlus && isa<VarDecl>(D) &&
+ !isVarDeclStrongDefinition(Context, *this, cast<VarDecl>(D),
+ CodeGenOpts.NoCommon))
+ return llvm::GlobalVariable::CommonLinkage;
+
+ // selectany symbols are externally visible, so use weak instead of
+ // linkonce. MSVC optimizes away references to const selectany globals, so
+ // all definitions should be the same and ODR linkage should be used.
+ // http://msdn.microsoft.com/en-us/library/5tkz6s71.aspx
+ if (D->hasAttr<SelectAnyAttr>())
+ return llvm::GlobalVariable::WeakODRLinkage;
+
+ // Otherwise, we have strong external linkage.
+ assert(Linkage == GVA_StrongExternal);
+ return llvm::GlobalVariable::ExternalLinkage;
+}
+
+llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageVarDefinition(
+ const VarDecl *VD, bool IsConstant) {
+ GVALinkage Linkage = getContext().GetGVALinkageForVariable(VD);
+ return getLLVMLinkageForDeclarator(VD, Linkage, IsConstant);
+}
+
+/// Replace the uses of a function that was declared with a non-proto type.
+/// We want to silently drop extra arguments from call sites
+static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
+ llvm::Function *newFn) {
+ // Fast path.
+ if (old->use_empty()) return;
+
+ llvm::Type *newRetTy = newFn->getReturnType();
+ SmallVector<llvm::Value*, 4> newArgs;
+ SmallVector<llvm::OperandBundleDef, 1> newBundles;
+
+ for (llvm::Value::use_iterator ui = old->use_begin(), ue = old->use_end();
+ ui != ue; ) {
+ llvm::Value::use_iterator use = ui++; // Increment before the use is erased.
+ llvm::User *user = use->getUser();
+
+ // Recognize and replace uses of bitcasts. Most calls to
+ // unprototyped functions will use bitcasts.
+ if (auto *bitcast = dyn_cast<llvm::ConstantExpr>(user)) {
+ if (bitcast->getOpcode() == llvm::Instruction::BitCast)
+ replaceUsesOfNonProtoConstant(bitcast, newFn);
+ continue;
+ }
+
+ // Recognize calls to the function.
+ llvm::CallSite callSite(user);
+ if (!callSite) continue;
+ if (!callSite.isCallee(&*use)) continue;
+
+ // If the return types don't match exactly, then we can't
+ // transform this call unless it's dead.
+ if (callSite->getType() != newRetTy && !callSite->use_empty())
+ continue;
+
+ // Get the call site's attribute list.
+ SmallVector<llvm::AttributeSet, 8> newAttrs;
+ llvm::AttributeSet oldAttrs = callSite.getAttributes();
+
+ // Collect any return attributes from the call.
+ if (oldAttrs.hasAttributes(llvm::AttributeSet::ReturnIndex))
+ newAttrs.push_back(
+ llvm::AttributeSet::get(newFn->getContext(),
+ oldAttrs.getRetAttributes()));
+
+ // If the function was passed too few arguments, don't transform.
+ unsigned newNumArgs = newFn->arg_size();
+ if (callSite.arg_size() < newNumArgs) continue;
+
+ // If extra arguments were passed, we silently drop them.
+ // If any of the types mismatch, we don't transform.
+ unsigned argNo = 0;
+ bool dontTransform = false;
+ for (llvm::Function::arg_iterator ai = newFn->arg_begin(),
+ ae = newFn->arg_end(); ai != ae; ++ai, ++argNo) {
+ if (callSite.getArgument(argNo)->getType() != ai->getType()) {
+ dontTransform = true;
+ break;
+ }
+
+ // Add any parameter attributes.
+ if (oldAttrs.hasAttributes(argNo + 1))
+ newAttrs.
+ push_back(llvm::
+ AttributeSet::get(newFn->getContext(),
+ oldAttrs.getParamAttributes(argNo + 1)));
+ }
+ if (dontTransform)
+ continue;
+
+ if (oldAttrs.hasAttributes(llvm::AttributeSet::FunctionIndex))
+ newAttrs.push_back(llvm::AttributeSet::get(newFn->getContext(),
+ oldAttrs.getFnAttributes()));
+
+ // Okay, we can transform this. Create the new call instruction and copy
+ // over the required information.
+ newArgs.append(callSite.arg_begin(), callSite.arg_begin() + argNo);
+
+ // Copy over any operand bundles.
+ callSite.getOperandBundlesAsDefs(newBundles);
+
+ llvm::CallSite newCall;
+ if (callSite.isCall()) {
+ newCall = llvm::CallInst::Create(newFn, newArgs, newBundles, "",
+ callSite.getInstruction());
+ } else {
+ auto *oldInvoke = cast<llvm::InvokeInst>(callSite.getInstruction());
+ newCall = llvm::InvokeInst::Create(newFn,
+ oldInvoke->getNormalDest(),
+ oldInvoke->getUnwindDest(),
+ newArgs, newBundles, "",
+ callSite.getInstruction());
+ }
+ newArgs.clear(); // for the next iteration
+
+ if (!newCall->getType()->isVoidTy())
+ newCall->takeName(callSite.getInstruction());
+ newCall.setAttributes(
+ llvm::AttributeSet::get(newFn->getContext(), newAttrs));
+ newCall.setCallingConv(callSite.getCallingConv());
+
+ // Finally, remove the old call, replacing any uses with the new one.
+ if (!callSite->use_empty())
+ callSite->replaceAllUsesWith(newCall.getInstruction());
+
+ // Copy debug location attached to CI.
+ if (callSite->getDebugLoc())
+ newCall->setDebugLoc(callSite->getDebugLoc());
+
+ callSite->eraseFromParent();
+ }
+}
+
+/// ReplaceUsesOfNonProtoTypeWithRealFunction - This function is called when we
+/// implement a function with no prototype, e.g. "int foo() {}". If there are
+/// existing call uses of the old function in the module, this adjusts them to
+/// call the new function directly.
+///
+/// This is not just a cleanup: the always_inline pass requires direct calls to
+/// functions to be able to inline them. If there is a bitcast in the way, it
+/// won't inline them. Instcombine normally deletes these calls, but it isn't
+/// run at -O0.
+static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
+ llvm::Function *NewFn) {
+ // If we're redefining a global as a function, don't transform it.
+ if (!isa<llvm::Function>(Old)) return;
+
+ replaceUsesOfNonProtoConstant(Old, NewFn);
+}
+
+void CodeGenModule::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
+ TemplateSpecializationKind TSK = VD->getTemplateSpecializationKind();
+ // If we have a definition, this might be a deferred decl. If the
+ // instantiation is explicit, make sure we emit it at the end.
+ if (VD->getDefinition() && TSK == TSK_ExplicitInstantiationDefinition)
+ GetAddrOfGlobalVar(VD);
+
+ EmitTopLevelDecl(VD);
+}
+
+void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
+ llvm::GlobalValue *GV) {
+ const auto *D = cast<FunctionDecl>(GD.getDecl());
+
+ // Compute the function info and LLVM type.
+ const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
+ llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
+
+ // Get or create the prototype for the function.
+ if (!GV || (GV->getType()->getElementType() != Ty))
+ GV = cast<llvm::GlobalValue>(GetAddrOfFunction(GD, Ty, /*ForVTable=*/false,
+ /*DontDefer=*/true,
+ /*IsForDefinition=*/true));
+
+ // Already emitted.
+ if (!GV->isDeclaration())
+ return;
+
+ // We need to set linkage and visibility on the function before
+ // generating code for it because various parts of IR generation
+ // want to propagate this information down (e.g. to local static
+ // declarations).
+ auto *Fn = cast<llvm::Function>(GV);
+ setFunctionLinkage(GD, Fn);
+ setFunctionDLLStorageClass(GD, Fn);
+
+ // FIXME: this is redundant with part of setFunctionDefinitionAttributes
+ setGlobalVisibility(Fn, D);
+
+ MaybeHandleStaticInExternC(D, Fn);
+
+ maybeSetTrivialComdat(*D, *Fn);
+
+ CodeGenFunction(*this).GenerateCode(D, Fn, FI);
+
+ setFunctionDefinitionAttributes(D, Fn);
+ SetLLVMFunctionAttributesForDefinition(D, Fn);
+
+ if (const ConstructorAttr *CA = D->getAttr<ConstructorAttr>())
+ AddGlobalCtor(Fn, CA->getPriority());
+ if (const DestructorAttr *DA = D->getAttr<DestructorAttr>())
+ AddGlobalDtor(Fn, DA->getPriority());
+ if (D->hasAttr<AnnotateAttr>())
+ AddGlobalAnnotations(D, Fn);
+}
+
+void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
+ const auto *D = cast<ValueDecl>(GD.getDecl());
+ const AliasAttr *AA = D->getAttr<AliasAttr>();
+ assert(AA && "Not an alias?");
+
+ StringRef MangledName = getMangledName(GD);
+
+ if (AA->getAliasee() == MangledName) {
+ Diags.Report(AA->getLocation(), diag::err_cyclic_alias);
+ return;
+ }
+
+ // If there is a definition in the module, then it wins over the alias.
+ // This is dubious, but allow it to be safe. Just ignore the alias.
+ llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
+ if (Entry && !Entry->isDeclaration())
+ return;
+
+ Aliases.push_back(GD);
+
+ llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType());
+
+ // Create a reference to the named value. This ensures that it is emitted
+ // if a deferred decl.
+ llvm::Constant *Aliasee;
+ if (isa<llvm::FunctionType>(DeclTy))
+ Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GD,
+ /*ForVTable=*/false);
+ else
+ Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(),
+ llvm::PointerType::getUnqual(DeclTy),
+ /*D=*/nullptr);
+
+ // Create the new alias itself, but don't set a name yet.
+ auto *GA = llvm::GlobalAlias::create(
+ DeclTy, 0, llvm::Function::ExternalLinkage, "", Aliasee, &getModule());
+
+ if (Entry) {
+ if (GA->getAliasee() == Entry) {
+ Diags.Report(AA->getLocation(), diag::err_cyclic_alias);
+ return;
+ }
+
+ assert(Entry->isDeclaration());
+
+ // If there is a declaration in the module, then we had an extern followed
+ // by the alias, as in:
+ // extern int test6();
+ // ...
+ // int test6() __attribute__((alias("test7")));
+ //
+ // Remove it and replace uses of it with the alias.
+ GA->takeName(Entry);
+
+ Entry->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GA,
+ Entry->getType()));
+ Entry->eraseFromParent();
+ } else {
+ GA->setName(MangledName);
+ }
+
+ // Set attributes which are particular to an alias; this is a
+ // specialization of the attributes which may be set on a global
+ // variable/function.
+ if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakRefAttr>() ||
+ D->isWeakImported()) {
+ GA->setLinkage(llvm::Function::WeakAnyLinkage);
+ }
+
+ if (const auto *VD = dyn_cast<VarDecl>(D))
+ if (VD->getTLSKind())
+ setTLSMode(GA, *VD);
+
+ setAliasAttributes(D, GA);
+}
+
+llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,
+ ArrayRef<llvm::Type*> Tys) {
+ return llvm::Intrinsic::getDeclaration(&getModule(), (llvm::Intrinsic::ID)IID,
+ Tys);
+}
+
+static llvm::StringMapEntry<llvm::GlobalVariable *> &
+GetConstantCFStringEntry(llvm::StringMap<llvm::GlobalVariable *> &Map,
+ const StringLiteral *Literal, bool TargetIsLSB,
+ bool &IsUTF16, unsigned &StringLength) {
+ StringRef String = Literal->getString();
+ unsigned NumBytes = String.size();
+
+ // Check for simple case.
+ if (!Literal->containsNonAsciiOrNull()) {
+ StringLength = NumBytes;
+ return *Map.insert(std::make_pair(String, nullptr)).first;
+ }
+
+ // Otherwise, convert the UTF8 literals into a string of shorts.
+ IsUTF16 = true;
+
+ SmallVector<UTF16, 128> ToBuf(NumBytes + 1); // +1 for ending nulls.
+ const UTF8 *FromPtr = (const UTF8 *)String.data();
+ UTF16 *ToPtr = &ToBuf[0];
+
+ (void)ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes,
+ &ToPtr, ToPtr + NumBytes,
+ strictConversion);
+
+ // ConvertUTF8toUTF16 returns the length in ToPtr.
+ StringLength = ToPtr - &ToBuf[0];
+
+ // Add an explicit null.
+ *ToPtr = 0;
+ return *Map.insert(std::make_pair(
+ StringRef(reinterpret_cast<const char *>(ToBuf.data()),
+ (StringLength + 1) * 2),
+ nullptr)).first;
+}
+
+static llvm::StringMapEntry<llvm::GlobalVariable *> &
+GetConstantStringEntry(llvm::StringMap<llvm::GlobalVariable *> &Map,
+ const StringLiteral *Literal, unsigned &StringLength) {
+ StringRef String = Literal->getString();
+ StringLength = String.size();
+ return *Map.insert(std::make_pair(String, nullptr)).first;
+}
+
+ConstantAddress
+CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
+ unsigned StringLength = 0;
+ bool isUTF16 = false;
+ llvm::StringMapEntry<llvm::GlobalVariable *> &Entry =
+ GetConstantCFStringEntry(CFConstantStringMap, Literal,
+ getDataLayout().isLittleEndian(), isUTF16,
+ StringLength);
+
+ if (auto *C = Entry.second)
+ return ConstantAddress(C, CharUnits::fromQuantity(C->getAlignment()));
+
+ llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
+ llvm::Constant *Zeros[] = { Zero, Zero };
+ llvm::Value *V;
+
+ // If we don't already have it, get __CFConstantStringClassReference.
+ if (!CFConstantStringClassRef) {
+ llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
+ Ty = llvm::ArrayType::get(Ty, 0);
+ llvm::Constant *GV = CreateRuntimeVariable(Ty,
+ "__CFConstantStringClassReference");
+ // Decay array -> ptr
+ V = llvm::ConstantExpr::getGetElementPtr(Ty, GV, Zeros);
+ CFConstantStringClassRef = V;
+ }
+ else
+ V = CFConstantStringClassRef;
+
+ QualType CFTy = getContext().getCFConstantStringType();
+
+ auto *STy = cast<llvm::StructType>(getTypes().ConvertType(CFTy));
+
+ llvm::Constant *Fields[4];
+
+ // Class pointer.
+ Fields[0] = cast<llvm::ConstantExpr>(V);
+
+ // Flags.
+ llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy);
+ Fields[1] = isUTF16 ? llvm::ConstantInt::get(Ty, 0x07d0) :
+ llvm::ConstantInt::get(Ty, 0x07C8);
+
+ // String pointer.
+ llvm::Constant *C = nullptr;
+ if (isUTF16) {
+ auto Arr = llvm::makeArrayRef(
+ reinterpret_cast<uint16_t *>(const_cast<char *>(Entry.first().data())),
+ Entry.first().size() / 2);
+ C = llvm::ConstantDataArray::get(VMContext, Arr);
+ } else {
+ C = llvm::ConstantDataArray::getString(VMContext, Entry.first());
+ }
+
+ // Note: -fwritable-strings doesn't make the backing store strings of
+ // CFStrings writable. (See <rdar://problem/10657500>)
+ auto *GV =
+ new llvm::GlobalVariable(getModule(), C->getType(), /*isConstant=*/true,
+ llvm::GlobalValue::PrivateLinkage, C, ".str");
+ GV->setUnnamedAddr(true);
+ // Don't enforce the target's minimum global alignment, since the only use
+ // of the string is via this class initializer.
+ // FIXME: We set the section explicitly to avoid a bug in ld64 224.1. Without
+ // it LLVM can merge the string with a non unnamed_addr one during LTO. Doing
+ // that changes the section it ends in, which surprises ld64.
+ if (isUTF16) {
+ CharUnits Align = getContext().getTypeAlignInChars(getContext().ShortTy);
+ GV->setAlignment(Align.getQuantity());
+ GV->setSection("__TEXT,__ustring");
+ } else {
+ CharUnits Align = getContext().getTypeAlignInChars(getContext().CharTy);
+ GV->setAlignment(Align.getQuantity());
+ GV->setSection("__TEXT,__cstring,cstring_literals");
+ }
+
+ // String.
+ Fields[2] =
+ llvm::ConstantExpr::getGetElementPtr(GV->getValueType(), GV, Zeros);
+
+ if (isUTF16)
+ // Cast the UTF16 string to the correct type.
+ Fields[2] = llvm::ConstantExpr::getBitCast(Fields[2], Int8PtrTy);
+
+ // String length.
+ Ty = getTypes().ConvertType(getContext().LongTy);
+ Fields[3] = llvm::ConstantInt::get(Ty, StringLength);
+
+ CharUnits Alignment = getPointerAlign();
+
+ // The struct.
+ C = llvm::ConstantStruct::get(STy, Fields);
+ GV = new llvm::GlobalVariable(getModule(), C->getType(), true,
+ llvm::GlobalVariable::PrivateLinkage, C,
+ "_unnamed_cfstring_");
+ GV->setSection("__DATA,__cfstring");
+ GV->setAlignment(Alignment.getQuantity());
+ Entry.second = GV;
+
+ return ConstantAddress(GV, Alignment);
+}
+
+ConstantAddress
+CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
+ unsigned StringLength = 0;
+ llvm::StringMapEntry<llvm::GlobalVariable *> &Entry =
+ GetConstantStringEntry(CFConstantStringMap, Literal, StringLength);
+
+ if (auto *C = Entry.second)
+ return ConstantAddress(C, CharUnits::fromQuantity(C->getAlignment()));
+
+ llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
+ llvm::Constant *Zeros[] = { Zero, Zero };
+ llvm::Value *V;
+ // If we don't already have it, get _NSConstantStringClassReference.
+ if (!ConstantStringClassRef) {
+ std::string StringClass(getLangOpts().ObjCConstantStringClass);
+ llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
+ llvm::Constant *GV;
+ if (LangOpts.ObjCRuntime.isNonFragile()) {
+ std::string str =
+ StringClass.empty() ? "OBJC_CLASS_$_NSConstantString"
+ : "OBJC_CLASS_$_" + StringClass;
+ GV = getObjCRuntime().GetClassGlobal(str);
+ // Make sure the result is of the correct type.
+ llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
+ V = llvm::ConstantExpr::getBitCast(GV, PTy);
+ ConstantStringClassRef = V;
+ } else {
+ std::string str =
+ StringClass.empty() ? "_NSConstantStringClassReference"
+ : "_" + StringClass + "ClassReference";
+ llvm::Type *PTy = llvm::ArrayType::get(Ty, 0);
+ GV = CreateRuntimeVariable(PTy, str);
+ // Decay array -> ptr
+ V = llvm::ConstantExpr::getGetElementPtr(PTy, GV, Zeros);
+ ConstantStringClassRef = V;
+ }
+ } else
+ V = ConstantStringClassRef;
+
+ if (!NSConstantStringType) {
+ // Construct the type for a constant NSString.
+ RecordDecl *D = Context.buildImplicitRecord("__builtin_NSString");
+ D->startDefinition();
+
+ QualType FieldTypes[3];
+
+ // const int *isa;
+ FieldTypes[0] = Context.getPointerType(Context.IntTy.withConst());
+ // const char *str;
+ FieldTypes[1] = Context.getPointerType(Context.CharTy.withConst());
+ // unsigned int length;
+ FieldTypes[2] = Context.UnsignedIntTy;
+
+ // Create fields
+ for (unsigned i = 0; i < 3; ++i) {
+ FieldDecl *Field = FieldDecl::Create(Context, D,
+ SourceLocation(),
+ SourceLocation(), nullptr,
+ FieldTypes[i], /*TInfo=*/nullptr,
+ /*BitWidth=*/nullptr,
+ /*Mutable=*/false,
+ ICIS_NoInit);
+ Field->setAccess(AS_public);
+ D->addDecl(Field);
+ }
+
+ D->completeDefinition();
+ QualType NSTy = Context.getTagDeclType(D);
+ NSConstantStringType = cast<llvm::StructType>(getTypes().ConvertType(NSTy));
+ }
+
+ llvm::Constant *Fields[3];
+
+ // Class pointer.
+ Fields[0] = cast<llvm::ConstantExpr>(V);
+
+ // String pointer.
+ llvm::Constant *C =
+ llvm::ConstantDataArray::getString(VMContext, Entry.first());
+
+ llvm::GlobalValue::LinkageTypes Linkage;
+ bool isConstant;
+ Linkage = llvm::GlobalValue::PrivateLinkage;
+ isConstant = !LangOpts.WritableStrings;
+
+ auto *GV = new llvm::GlobalVariable(getModule(), C->getType(), isConstant,
+ Linkage, C, ".str");
+ GV->setUnnamedAddr(true);
+ // Don't enforce the target's minimum global alignment, since the only use
+ // of the string is via this class initializer.
+ CharUnits Align = getContext().getTypeAlignInChars(getContext().CharTy);
+ GV->setAlignment(Align.getQuantity());
+ Fields[1] =
+ llvm::ConstantExpr::getGetElementPtr(GV->getValueType(), GV, Zeros);
+
+ // String length.
+ llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy);
+ Fields[2] = llvm::ConstantInt::get(Ty, StringLength);
+
+ // The struct.
+ CharUnits Alignment = getPointerAlign();
+ C = llvm::ConstantStruct::get(NSConstantStringType, Fields);
+ GV = new llvm::GlobalVariable(getModule(), C->getType(), true,
+ llvm::GlobalVariable::PrivateLinkage, C,
+ "_unnamed_nsstring_");
+ GV->setAlignment(Alignment.getQuantity());
+ const char *NSStringSection = "__OBJC,__cstring_object,regular,no_dead_strip";
+ const char *NSStringNonFragileABISection =
+ "__DATA,__objc_stringobj,regular,no_dead_strip";
+ // FIXME. Fix section.
+ GV->setSection(LangOpts.ObjCRuntime.isNonFragile()
+ ? NSStringNonFragileABISection
+ : NSStringSection);
+ Entry.second = GV;
+
+ return ConstantAddress(GV, Alignment);
+}
+
+QualType CodeGenModule::getObjCFastEnumerationStateType() {
+ if (ObjCFastEnumerationStateType.isNull()) {
+ RecordDecl *D = Context.buildImplicitRecord("__objcFastEnumerationState");
+ D->startDefinition();
+
+ QualType FieldTypes[] = {
+ Context.UnsignedLongTy,
+ Context.getPointerType(Context.getObjCIdType()),
+ Context.getPointerType(Context.UnsignedLongTy),
+ Context.getConstantArrayType(Context.UnsignedLongTy,
+ llvm::APInt(32, 5), ArrayType::Normal, 0)
+ };
+
+ for (size_t i = 0; i < 4; ++i) {
+ FieldDecl *Field = FieldDecl::Create(Context,
+ D,
+ SourceLocation(),
+ SourceLocation(), nullptr,
+ FieldTypes[i], /*TInfo=*/nullptr,
+ /*BitWidth=*/nullptr,
+ /*Mutable=*/false,
+ ICIS_NoInit);
+ Field->setAccess(AS_public);
+ D->addDecl(Field);
+ }
+
+ D->completeDefinition();
+ ObjCFastEnumerationStateType = Context.getTagDeclType(D);
+ }
+
+ return ObjCFastEnumerationStateType;
+}
+
+llvm::Constant *
+CodeGenModule::GetConstantArrayFromStringLiteral(const StringLiteral *E) {
+ assert(!E->getType()->isPointerType() && "Strings are always arrays");
+
+ // Don't emit it as the address of the string, emit the string data itself
+ // as an inline array.
+ if (E->getCharByteWidth() == 1) {
+ SmallString<64> Str(E->getString());
+
+ // Resize the string to the right size, which is indicated by its type.
+ const ConstantArrayType *CAT = Context.getAsConstantArrayType(E->getType());
+ Str.resize(CAT->getSize().getZExtValue());
+ return llvm::ConstantDataArray::getString(VMContext, Str, false);
+ }
+
+ auto *AType = cast<llvm::ArrayType>(getTypes().ConvertType(E->getType()));
+ llvm::Type *ElemTy = AType->getElementType();
+ unsigned NumElements = AType->getNumElements();
+
+ // Wide strings have either 2-byte or 4-byte elements.
+ if (ElemTy->getPrimitiveSizeInBits() == 16) {
+ SmallVector<uint16_t, 32> Elements;
+ Elements.reserve(NumElements);
+
+ for(unsigned i = 0, e = E->getLength(); i != e; ++i)
+ Elements.push_back(E->getCodeUnit(i));
+ Elements.resize(NumElements);
+ return llvm::ConstantDataArray::get(VMContext, Elements);
+ }
+
+ assert(ElemTy->getPrimitiveSizeInBits() == 32);
+ SmallVector<uint32_t, 32> Elements;
+ Elements.reserve(NumElements);
+
+ for(unsigned i = 0, e = E->getLength(); i != e; ++i)
+ Elements.push_back(E->getCodeUnit(i));
+ Elements.resize(NumElements);
+ return llvm::ConstantDataArray::get(VMContext, Elements);
+}
+
+static llvm::GlobalVariable *
+GenerateStringLiteral(llvm::Constant *C, llvm::GlobalValue::LinkageTypes LT,
+ CodeGenModule &CGM, StringRef GlobalName,
+ CharUnits Alignment) {
+ // OpenCL v1.2 s6.5.3: a string literal is in the constant address space.
+ unsigned AddrSpace = 0;
+ if (CGM.getLangOpts().OpenCL)
+ AddrSpace = CGM.getContext().getTargetAddressSpace(LangAS::opencl_constant);
+
+ llvm::Module &M = CGM.getModule();
+ // Create a global variable for this string
+ auto *GV = new llvm::GlobalVariable(
+ M, C->getType(), !CGM.getLangOpts().WritableStrings, LT, C, GlobalName,
+ nullptr, llvm::GlobalVariable::NotThreadLocal, AddrSpace);
+ GV->setAlignment(Alignment.getQuantity());
+ GV->setUnnamedAddr(true);
+ if (GV->isWeakForLinker()) {
+ assert(CGM.supportsCOMDAT() && "Only COFF uses weak string literals");
+ GV->setComdat(M.getOrInsertComdat(GV->getName()));
+ }
+
+ return GV;
+}
+
+/// GetAddrOfConstantStringFromLiteral - Return a pointer to a
+/// constant array for the given string literal.
+ConstantAddress
+CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S,
+ StringRef Name) {
+ CharUnits Alignment = getContext().getAlignOfGlobalVarInChars(S->getType());
+
+ llvm::Constant *C = GetConstantArrayFromStringLiteral(S);
+ llvm::GlobalVariable **Entry = nullptr;
+ if (!LangOpts.WritableStrings) {
+ Entry = &ConstantStringMap[C];
+ if (auto GV = *Entry) {
+ if (Alignment.getQuantity() > GV->getAlignment())
+ GV->setAlignment(Alignment.getQuantity());
+ return ConstantAddress(GV, Alignment);
+ }
+ }
+
+ SmallString<256> MangledNameBuffer;
+ StringRef GlobalVariableName;
+ llvm::GlobalValue::LinkageTypes LT;
+
+ // Mangle the string literal if the ABI allows for it. However, we cannot
+ // do this if we are compiling with ASan or -fwritable-strings because they
+ // rely on strings having normal linkage.
+ if (!LangOpts.WritableStrings &&
+ !LangOpts.Sanitize.has(SanitizerKind::Address) &&
+ getCXXABI().getMangleContext().shouldMangleStringLiteral(S)) {
+ llvm::raw_svector_ostream Out(MangledNameBuffer);
+ getCXXABI().getMangleContext().mangleStringLiteral(S, Out);
+
+ LT = llvm::GlobalValue::LinkOnceODRLinkage;
+ GlobalVariableName = MangledNameBuffer;
+ } else {
+ LT = llvm::GlobalValue::PrivateLinkage;
+ GlobalVariableName = Name;
+ }
+
+ auto GV = GenerateStringLiteral(C, LT, *this, GlobalVariableName, Alignment);
+ if (Entry)
+ *Entry = GV;
+
+ SanitizerMD->reportGlobalToASan(GV, S->getStrTokenLoc(0), "<string literal>",
+ QualType());
+ return ConstantAddress(GV, Alignment);
+}
+
+/// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
+/// array for the given ObjCEncodeExpr node.
+ConstantAddress
+CodeGenModule::GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *E) {
+ std::string Str;
+ getContext().getObjCEncodingForType(E->getEncodedType(), Str);
+
+ return GetAddrOfConstantCString(Str);
+}
+
+/// GetAddrOfConstantCString - Returns a pointer to a character array containing
+/// the literal and a terminating '\0' character.
+/// The result has pointer to array type.
+ConstantAddress CodeGenModule::GetAddrOfConstantCString(
+ const std::string &Str, const char *GlobalName) {
+ StringRef StrWithNull(Str.c_str(), Str.size() + 1);
+ CharUnits Alignment =
+ getContext().getAlignOfGlobalVarInChars(getContext().CharTy);
+
+ llvm::Constant *C =
+ llvm::ConstantDataArray::getString(getLLVMContext(), StrWithNull, false);
+
+ // Don't share any string literals if strings aren't constant.
+ llvm::GlobalVariable **Entry = nullptr;
+ if (!LangOpts.WritableStrings) {
+ Entry = &ConstantStringMap[C];
+ if (auto GV = *Entry) {
+ if (Alignment.getQuantity() > GV->getAlignment())
+ GV->setAlignment(Alignment.getQuantity());
+ return ConstantAddress(GV, Alignment);
+ }
+ }
+
+ // Get the default prefix if a name wasn't specified.
+ if (!GlobalName)
+ GlobalName = ".str";
+ // Create a global variable for this.
+ auto GV = GenerateStringLiteral(C, llvm::GlobalValue::PrivateLinkage, *this,
+ GlobalName, Alignment);
+ if (Entry)
+ *Entry = GV;
+ return ConstantAddress(GV, Alignment);
+}
+
+ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
+ const MaterializeTemporaryExpr *E, const Expr *Init) {
+ assert((E->getStorageDuration() == SD_Static ||
+ E->getStorageDuration() == SD_Thread) && "not a global temporary");
+ const auto *VD = cast<VarDecl>(E->getExtendingDecl());
+
+ // If we're not materializing a subobject of the temporary, keep the
+ // cv-qualifiers from the type of the MaterializeTemporaryExpr.
+ QualType MaterializedType = Init->getType();
+ if (Init == E->GetTemporaryExpr())
+ MaterializedType = E->getType();
+
+ CharUnits Align = getContext().getTypeAlignInChars(MaterializedType);
+
+ if (llvm::Constant *Slot = MaterializedGlobalTemporaryMap[E])
+ return ConstantAddress(Slot, Align);
+
+ // FIXME: If an externally-visible declaration extends multiple temporaries,
+ // we need to give each temporary the same name in every translation unit (and
+ // we also need to make the temporaries externally-visible).
+ SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
+ getCXXABI().getMangleContext().mangleReferenceTemporary(
+ VD, E->getManglingNumber(), Out);
+
+ APValue *Value = nullptr;
+ if (E->getStorageDuration() == SD_Static) {
+ // We might have a cached constant initializer for this temporary. Note
+ // that this might have a different value from the value computed by
+ // evaluating the initializer if the surrounding constant expression
+ // modifies the temporary.
+ Value = getContext().getMaterializedTemporaryValue(E, false);
+ if (Value && Value->isUninit())
+ Value = nullptr;
+ }
+
+ // Try evaluating it now, it might have a constant initializer.
+ Expr::EvalResult EvalResult;
+ if (!Value && Init->EvaluateAsRValue(EvalResult, getContext()) &&
+ !EvalResult.hasSideEffects())
+ Value = &EvalResult.Val;
+
+ llvm::Constant *InitialValue = nullptr;
+ bool Constant = false;
+ llvm::Type *Type;
+ if (Value) {
+ // The temporary has a constant initializer, use it.
+ InitialValue = EmitConstantValue(*Value, MaterializedType, nullptr);
+ Constant = isTypeConstant(MaterializedType, /*ExcludeCtor*/Value);
+ Type = InitialValue->getType();
+ } else {
+ // No initializer, the initialization will be provided when we
+ // initialize the declaration which performed lifetime extension.
+ Type = getTypes().ConvertTypeForMem(MaterializedType);
+ }
+
+ // Create a global variable for this lifetime-extended temporary.
+ llvm::GlobalValue::LinkageTypes Linkage =
+ getLLVMLinkageVarDefinition(VD, Constant);
+ if (Linkage == llvm::GlobalVariable::ExternalLinkage) {
+ const VarDecl *InitVD;
+ if (VD->isStaticDataMember() && VD->getAnyInitializer(InitVD) &&
+ isa<CXXRecordDecl>(InitVD->getLexicalDeclContext())) {
+ // Temporaries defined inside a class get linkonce_odr linkage because the
+ // class can be defined in multipe translation units.
+ Linkage = llvm::GlobalVariable::LinkOnceODRLinkage;
+ } else {
+ // There is no need for this temporary to have external linkage if the
+ // VarDecl has external linkage.
+ Linkage = llvm::GlobalVariable::InternalLinkage;
+ }
+ }
+ unsigned AddrSpace = GetGlobalVarAddressSpace(
+ VD, getContext().getTargetAddressSpace(MaterializedType));
+ auto *GV = new llvm::GlobalVariable(
+ getModule(), Type, Constant, Linkage, InitialValue, Name.c_str(),
+ /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal,
+ AddrSpace);
+ setGlobalVisibility(GV, VD);
+ GV->setAlignment(Align.getQuantity());
+ if (supportsCOMDAT() && GV->isWeakForLinker())
+ GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
+ if (VD->getTLSKind())
+ setTLSMode(GV, *VD);
+ MaterializedGlobalTemporaryMap[E] = GV;
+ return ConstantAddress(GV, Align);
+}
+
+/// EmitObjCPropertyImplementations - Emit information for synthesized
+/// properties for an implementation.
+void CodeGenModule::EmitObjCPropertyImplementations(const
+ ObjCImplementationDecl *D) {
+ for (const auto *PID : D->property_impls()) {
+ // Dynamic is just for type-checking.
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) {
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+
+ // Determine which methods need to be implemented, some may have
+ // been overridden. Note that ::isPropertyAccessor is not the method
+ // we want, that just indicates if the decl came from a
+ // property. What we want to know is if the method is defined in
+ // this implementation.
+ if (!D->getInstanceMethod(PD->getGetterName()))
+ CodeGenFunction(*this).GenerateObjCGetter(
+ const_cast<ObjCImplementationDecl *>(D), PID);
+ if (!PD->isReadOnly() &&
+ !D->getInstanceMethod(PD->getSetterName()))
+ CodeGenFunction(*this).GenerateObjCSetter(
+ const_cast<ObjCImplementationDecl *>(D), PID);
+ }
+ }
+}
+
+static bool needsDestructMethod(ObjCImplementationDecl *impl) {
+ const ObjCInterfaceDecl *iface = impl->getClassInterface();
+ for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin();
+ ivar; ivar = ivar->getNextIvar())
+ if (ivar->getType().isDestructedType())
+ return true;
+
+ return false;
+}
+
+static bool AllTrivialInitializers(CodeGenModule &CGM,
+ ObjCImplementationDecl *D) {
+ CodeGenFunction CGF(CGM);
+ for (ObjCImplementationDecl::init_iterator B = D->init_begin(),
+ E = D->init_end(); B != E; ++B) {
+ CXXCtorInitializer *CtorInitExp = *B;
+ Expr *Init = CtorInitExp->getInit();
+ if (!CGF.isTrivialInitializer(Init))
+ return false;
+ }
+ return true;
+}
+
+/// EmitObjCIvarInitializations - Emit information for ivar initialization
+/// for an implementation.
+void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
+ // We might need a .cxx_destruct even if we don't have any ivar initializers.
+ if (needsDestructMethod(D)) {
+ IdentifierInfo *II = &getContext().Idents.get(".cxx_destruct");
+ Selector cxxSelector = getContext().Selectors.getSelector(0, &II);
+ ObjCMethodDecl *DTORMethod =
+ ObjCMethodDecl::Create(getContext(), D->getLocation(), D->getLocation(),
+ cxxSelector, getContext().VoidTy, nullptr, D,
+ /*isInstance=*/true, /*isVariadic=*/false,
+ /*isPropertyAccessor=*/true, /*isImplicitlyDeclared=*/true,
+ /*isDefined=*/false, ObjCMethodDecl::Required);
+ D->addInstanceMethod(DTORMethod);
+ CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, DTORMethod, false);
+ D->setHasDestructors(true);
+ }
+
+ // If the implementation doesn't have any ivar initializers, we don't need
+ // a .cxx_construct.
+ if (D->getNumIvarInitializers() == 0 ||
+ AllTrivialInitializers(*this, D))
+ return;
+
+ IdentifierInfo *II = &getContext().Idents.get(".cxx_construct");
+ Selector cxxSelector = getContext().Selectors.getSelector(0, &II);
+ // The constructor returns 'self'.
+ ObjCMethodDecl *CTORMethod = ObjCMethodDecl::Create(getContext(),
+ D->getLocation(),
+ D->getLocation(),
+ cxxSelector,
+ getContext().getObjCIdType(),
+ nullptr, D, /*isInstance=*/true,
+ /*isVariadic=*/false,
+ /*isPropertyAccessor=*/true,
+ /*isImplicitlyDeclared=*/true,
+ /*isDefined=*/false,
+ ObjCMethodDecl::Required);
+ D->addInstanceMethod(CTORMethod);
+ CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, CTORMethod, true);
+ D->setHasNonZeroConstructors(true);
+}
+
+/// EmitNamespace - Emit all declarations in a namespace.
+void CodeGenModule::EmitNamespace(const NamespaceDecl *ND) {
+ for (auto *I : ND->decls()) {
+ if (const auto *VD = dyn_cast<VarDecl>(I))
+ if (VD->getTemplateSpecializationKind() != TSK_ExplicitSpecialization &&
+ VD->getTemplateSpecializationKind() != TSK_Undeclared)
+ continue;
+ EmitTopLevelDecl(I);
+ }
+}
+
+// EmitLinkageSpec - Emit all declarations in a linkage spec.
+void CodeGenModule::EmitLinkageSpec(const LinkageSpecDecl *LSD) {
+ if (LSD->getLanguage() != LinkageSpecDecl::lang_c &&
+ LSD->getLanguage() != LinkageSpecDecl::lang_cxx) {
+ ErrorUnsupported(LSD, "linkage spec");
+ return;
+ }
+
+ for (auto *I : LSD->decls()) {
+ // Meta-data for ObjC class includes references to implemented methods.
+ // Generate class's method definitions first.
+ if (auto *OID = dyn_cast<ObjCImplDecl>(I)) {
+ for (auto *M : OID->methods())
+ EmitTopLevelDecl(M);
+ }
+ EmitTopLevelDecl(I);
+ }
+}
+
+/// EmitTopLevelDecl - Emit code for a single top level declaration.
+void CodeGenModule::EmitTopLevelDecl(Decl *D) {
+ // Ignore dependent declarations.
+ if (D->getDeclContext() && D->getDeclContext()->isDependentContext())
+ return;
+
+ switch (D->getKind()) {
+ case Decl::CXXConversion:
+ case Decl::CXXMethod:
+ case Decl::Function:
+ // Skip function templates
+ if (cast<FunctionDecl>(D)->getDescribedFunctionTemplate() ||
+ cast<FunctionDecl>(D)->isLateTemplateParsed())
+ return;
+
+ EmitGlobal(cast<FunctionDecl>(D));
+ // Always provide some coverage mapping
+ // even for the functions that aren't emitted.
+ AddDeferredUnusedCoverageMapping(D);
+ break;
+
+ case Decl::Var:
+ // Skip variable templates
+ if (cast<VarDecl>(D)->getDescribedVarTemplate())
+ return;
+ case Decl::VarTemplateSpecialization:
+ EmitGlobal(cast<VarDecl>(D));
+ break;
+
+ // Indirect fields from global anonymous structs and unions can be
+ // ignored; only the actual variable requires IR gen support.
+ case Decl::IndirectField:
+ break;
+
+ // C++ Decls
+ case Decl::Namespace:
+ EmitNamespace(cast<NamespaceDecl>(D));
+ break;
+ // No code generation needed.
+ case Decl::UsingShadow:
+ case Decl::ClassTemplate:
+ case Decl::VarTemplate:
+ case Decl::VarTemplatePartialSpecialization:
+ case Decl::FunctionTemplate:
+ case Decl::TypeAliasTemplate:
+ case Decl::Block:
+ case Decl::Empty:
+ break;
+ case Decl::Using: // using X; [C++]
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ DI->EmitUsingDecl(cast<UsingDecl>(*D));
+ return;
+ case Decl::NamespaceAlias:
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(*D));
+ return;
+ case Decl::UsingDirective: // using namespace X; [C++]
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ DI->EmitUsingDirective(cast<UsingDirectiveDecl>(*D));
+ return;
+ case Decl::CXXConstructor:
+ // Skip function templates
+ if (cast<FunctionDecl>(D)->getDescribedFunctionTemplate() ||
+ cast<FunctionDecl>(D)->isLateTemplateParsed())
+ return;
+
+ getCXXABI().EmitCXXConstructors(cast<CXXConstructorDecl>(D));
+ break;
+ case Decl::CXXDestructor:
+ if (cast<FunctionDecl>(D)->isLateTemplateParsed())
+ return;
+ getCXXABI().EmitCXXDestructors(cast<CXXDestructorDecl>(D));
+ break;
+
+ case Decl::StaticAssert:
+ // Nothing to do.
+ break;
+
+ // Objective-C Decls
+
+ // Forward declarations, no (immediate) code generation.
+ case Decl::ObjCInterface:
+ case Decl::ObjCCategory:
+ break;
+
+ case Decl::ObjCProtocol: {
+ auto *Proto = cast<ObjCProtocolDecl>(D);
+ if (Proto->isThisDeclarationADefinition())
+ ObjCRuntime->GenerateProtocol(Proto);
+ break;
+ }
+
+ case Decl::ObjCCategoryImpl:
+ // Categories have properties but don't support synthesize so we
+ // can ignore them here.
+ ObjCRuntime->GenerateCategory(cast<ObjCCategoryImplDecl>(D));
+ break;
+
+ case Decl::ObjCImplementation: {
+ auto *OMD = cast<ObjCImplementationDecl>(D);
+ EmitObjCPropertyImplementations(OMD);
+ EmitObjCIvarInitializations(OMD);
+ ObjCRuntime->GenerateClass(OMD);
+ // Emit global variable debug information.
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ if (getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo)
+ DI->getOrCreateInterfaceType(getContext().getObjCInterfaceType(
+ OMD->getClassInterface()), OMD->getLocation());
+ break;
+ }
+ case Decl::ObjCMethod: {
+ auto *OMD = cast<ObjCMethodDecl>(D);
+ // If this is not a prototype, emit the body.
+ if (OMD->getBody())
+ CodeGenFunction(*this).GenerateObjCMethod(OMD);
+ break;
+ }
+ case Decl::ObjCCompatibleAlias:
+ ObjCRuntime->RegisterAlias(cast<ObjCCompatibleAliasDecl>(D));
+ break;
+
+ case Decl::LinkageSpec:
+ EmitLinkageSpec(cast<LinkageSpecDecl>(D));
+ break;
+
+ case Decl::FileScopeAsm: {
+ // File-scope asm is ignored during device-side CUDA compilation.
+ if (LangOpts.CUDA && LangOpts.CUDAIsDevice)
+ break;
+ // File-scope asm is ignored during device-side OpenMP compilation.
+ if (LangOpts.OpenMPIsDevice)
+ break;
+ auto *AD = cast<FileScopeAsmDecl>(D);
+ getModule().appendModuleInlineAsm(AD->getAsmString()->getString());
+ break;
+ }
+
+ case Decl::Import: {
+ auto *Import = cast<ImportDecl>(D);
+
+ // Ignore import declarations that come from imported modules.
+ if (Import->getImportedOwningModule())
+ break;
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ DI->EmitImportDecl(*Import);
+
+ ImportedModules.insert(Import->getImportedModule());
+ break;
+ }
+
+ case Decl::OMPThreadPrivate:
+ EmitOMPThreadPrivateDecl(cast<OMPThreadPrivateDecl>(D));
+ break;
+
+ case Decl::ClassTemplateSpecialization: {
+ const auto *Spec = cast<ClassTemplateSpecializationDecl>(D);
+ if (DebugInfo &&
+ Spec->getSpecializationKind() == TSK_ExplicitInstantiationDefinition &&
+ Spec->hasDefinition())
+ DebugInfo->completeTemplateDefinition(*Spec);
+ break;
+ }
+
+ default:
+ // Make sure we handled everything we should, every other kind is a
+ // non-top-level decl. FIXME: Would be nice to have an isTopLevelDeclKind
+ // function. Need to recode Decl::Kind to do that easily.
+ assert(isa<TypeDecl>(D) && "Unsupported decl kind");
+ break;
+ }
+}
+
+void CodeGenModule::AddDeferredUnusedCoverageMapping(Decl *D) {
+ // Do we need to generate coverage mapping?
+ if (!CodeGenOpts.CoverageMapping)
+ return;
+ switch (D->getKind()) {
+ case Decl::CXXConversion:
+ case Decl::CXXMethod:
+ case Decl::Function:
+ case Decl::ObjCMethod:
+ case Decl::CXXConstructor:
+ case Decl::CXXDestructor: {
+ if (!cast<FunctionDecl>(D)->doesThisDeclarationHaveABody())
+ return;
+ auto I = DeferredEmptyCoverageMappingDecls.find(D);
+ if (I == DeferredEmptyCoverageMappingDecls.end())
+ DeferredEmptyCoverageMappingDecls[D] = true;
+ break;
+ }
+ default:
+ break;
+ };
+}
+
+void CodeGenModule::ClearUnusedCoverageMapping(const Decl *D) {
+ // Do we need to generate coverage mapping?
+ if (!CodeGenOpts.CoverageMapping)
+ return;
+ if (const auto *Fn = dyn_cast<FunctionDecl>(D)) {
+ if (Fn->isTemplateInstantiation())
+ ClearUnusedCoverageMapping(Fn->getTemplateInstantiationPattern());
+ }
+ auto I = DeferredEmptyCoverageMappingDecls.find(D);
+ if (I == DeferredEmptyCoverageMappingDecls.end())
+ DeferredEmptyCoverageMappingDecls[D] = false;
+ else
+ I->second = false;
+}
+
+void CodeGenModule::EmitDeferredUnusedCoverageMappings() {
+ std::vector<const Decl *> DeferredDecls;
+ for (const auto &I : DeferredEmptyCoverageMappingDecls) {
+ if (!I.second)
+ continue;
+ DeferredDecls.push_back(I.first);
+ }
+ // Sort the declarations by their location to make sure that the tests get a
+ // predictable order for the coverage mapping for the unused declarations.
+ if (CodeGenOpts.DumpCoverageMapping)
+ std::sort(DeferredDecls.begin(), DeferredDecls.end(),
+ [] (const Decl *LHS, const Decl *RHS) {
+ return LHS->getLocStart() < RHS->getLocStart();
+ });
+ for (const auto *D : DeferredDecls) {
+ switch (D->getKind()) {
+ case Decl::CXXConversion:
+ case Decl::CXXMethod:
+ case Decl::Function:
+ case Decl::ObjCMethod: {
+ CodeGenPGO PGO(*this);
+ GlobalDecl GD(cast<FunctionDecl>(D));
+ PGO.emitEmptyCounterMapping(D, getMangledName(GD),
+ getFunctionLinkage(GD));
+ break;
+ }
+ case Decl::CXXConstructor: {
+ CodeGenPGO PGO(*this);
+ GlobalDecl GD(cast<CXXConstructorDecl>(D), Ctor_Base);
+ PGO.emitEmptyCounterMapping(D, getMangledName(GD),
+ getFunctionLinkage(GD));
+ break;
+ }
+ case Decl::CXXDestructor: {
+ CodeGenPGO PGO(*this);
+ GlobalDecl GD(cast<CXXDestructorDecl>(D), Dtor_Base);
+ PGO.emitEmptyCounterMapping(D, getMangledName(GD),
+ getFunctionLinkage(GD));
+ break;
+ }
+ default:
+ break;
+ };
+ }
+}
+
+/// Turns the given pointer into a constant.
+static llvm::Constant *GetPointerConstant(llvm::LLVMContext &Context,
+ const void *Ptr) {
+ uintptr_t PtrInt = reinterpret_cast<uintptr_t>(Ptr);
+ llvm::Type *i64 = llvm::Type::getInt64Ty(Context);
+ return llvm::ConstantInt::get(i64, PtrInt);
+}
+
+static void EmitGlobalDeclMetadata(CodeGenModule &CGM,
+ llvm::NamedMDNode *&GlobalMetadata,
+ GlobalDecl D,
+ llvm::GlobalValue *Addr) {
+ if (!GlobalMetadata)
+ GlobalMetadata =
+ CGM.getModule().getOrInsertNamedMetadata("clang.global.decl.ptrs");
+
+ // TODO: should we report variant information for ctors/dtors?
+ llvm::Metadata *Ops[] = {llvm::ConstantAsMetadata::get(Addr),
+ llvm::ConstantAsMetadata::get(GetPointerConstant(
+ CGM.getLLVMContext(), D.getDecl()))};
+ GlobalMetadata->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
+}
+
+/// For each function which is declared within an extern "C" region and marked
+/// as 'used', but has internal linkage, create an alias from the unmangled
+/// name to the mangled name if possible. People expect to be able to refer
+/// to such functions with an unmangled name from inline assembly within the
+/// same translation unit.
+void CodeGenModule::EmitStaticExternCAliases() {
+ for (auto &I : StaticExternCValues) {
+ IdentifierInfo *Name = I.first;
+ llvm::GlobalValue *Val = I.second;
+ if (Val && !getModule().getNamedValue(Name->getName()))
+ addUsedGlobal(llvm::GlobalAlias::create(Name->getName(), Val));
+ }
+}
+
+bool CodeGenModule::lookupRepresentativeDecl(StringRef MangledName,
+ GlobalDecl &Result) const {
+ auto Res = Manglings.find(MangledName);
+ if (Res == Manglings.end())
+ return false;
+ Result = Res->getValue();
+ return true;
+}
+
+/// Emits metadata nodes associating all the global values in the
+/// current module with the Decls they came from. This is useful for
+/// projects using IR gen as a subroutine.
+///
+/// Since there's currently no way to associate an MDNode directly
+/// with an llvm::GlobalValue, we create a global named metadata
+/// with the name 'clang.global.decl.ptrs'.
+void CodeGenModule::EmitDeclMetadata() {
+ llvm::NamedMDNode *GlobalMetadata = nullptr;
+
+ for (auto &I : MangledDeclNames) {
+ llvm::GlobalValue *Addr = getModule().getNamedValue(I.second);
+ // Some mangled names don't necessarily have an associated GlobalValue
+ // in this module, e.g. if we mangled it for DebugInfo.
+ if (Addr)
+ EmitGlobalDeclMetadata(*this, GlobalMetadata, I.first, Addr);
+ }
+}
+
+/// Emits metadata nodes for all the local variables in the current
+/// function.
+void CodeGenFunction::EmitDeclMetadata() {
+ if (LocalDeclMap.empty()) return;
+
+ llvm::LLVMContext &Context = getLLVMContext();
+
+ // Find the unique metadata ID for this name.
+ unsigned DeclPtrKind = Context.getMDKindID("clang.decl.ptr");
+
+ llvm::NamedMDNode *GlobalMetadata = nullptr;
+
+ for (auto &I : LocalDeclMap) {
+ const Decl *D = I.first;
+ llvm::Value *Addr = I.second.getPointer();
+ if (auto *Alloca = dyn_cast<llvm::AllocaInst>(Addr)) {
+ llvm::Value *DAddr = GetPointerConstant(getLLVMContext(), D);
+ Alloca->setMetadata(
+ DeclPtrKind, llvm::MDNode::get(
+ Context, llvm::ValueAsMetadata::getConstant(DAddr)));
+ } else if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr)) {
+ GlobalDecl GD = GlobalDecl(cast<VarDecl>(D));
+ EmitGlobalDeclMetadata(CGM, GlobalMetadata, GD, GV);
+ }
+ }
+}
+
+void CodeGenModule::EmitVersionIdentMetadata() {
+ llvm::NamedMDNode *IdentMetadata =
+ TheModule.getOrInsertNamedMetadata("llvm.ident");
+ std::string Version = getClangFullVersion();
+ llvm::LLVMContext &Ctx = TheModule.getContext();
+
+ llvm::Metadata *IdentNode[] = {llvm::MDString::get(Ctx, Version)};
+ IdentMetadata->addOperand(llvm::MDNode::get(Ctx, IdentNode));
+}
+
+void CodeGenModule::EmitTargetMetadata() {
+ // Warning, new MangledDeclNames may be appended within this loop.
+ // We rely on MapVector insertions adding new elements to the end
+ // of the container.
+ // FIXME: Move this loop into the one target that needs it, and only
+ // loop over those declarations for which we couldn't emit the target
+ // metadata when we emitted the declaration.
+ for (unsigned I = 0; I != MangledDeclNames.size(); ++I) {
+ auto Val = *(MangledDeclNames.begin() + I);
+ const Decl *D = Val.first.getDecl()->getMostRecentDecl();
+ llvm::GlobalValue *GV = GetGlobalValue(Val.second);
+ getTargetCodeGenInfo().emitTargetMD(D, GV, *this);
+ }
+}
+
+void CodeGenModule::EmitCoverageFile() {
+ if (!getCodeGenOpts().CoverageFile.empty()) {
+ if (llvm::NamedMDNode *CUNode = TheModule.getNamedMetadata("llvm.dbg.cu")) {
+ llvm::NamedMDNode *GCov = TheModule.getOrInsertNamedMetadata("llvm.gcov");
+ llvm::LLVMContext &Ctx = TheModule.getContext();
+ llvm::MDString *CoverageFile =
+ llvm::MDString::get(Ctx, getCodeGenOpts().CoverageFile);
+ for (int i = 0, e = CUNode->getNumOperands(); i != e; ++i) {
+ llvm::MDNode *CU = CUNode->getOperand(i);
+ llvm::Metadata *Elts[] = {CoverageFile, CU};
+ GCov->addOperand(llvm::MDNode::get(Ctx, Elts));
+ }
+ }
+ }
+}
+
+llvm::Constant *CodeGenModule::EmitUuidofInitializer(StringRef Uuid) {
+ // Sema has checked that all uuid strings are of the form
+ // "12345678-1234-1234-1234-1234567890ab".
+ assert(Uuid.size() == 36);
+ for (unsigned i = 0; i < 36; ++i) {
+ if (i == 8 || i == 13 || i == 18 || i == 23) assert(Uuid[i] == '-');
+ else assert(isHexDigit(Uuid[i]));
+ }
+
+ // The starts of all bytes of Field3 in Uuid. Field 3 is "1234-1234567890ab".
+ const unsigned Field3ValueOffsets[8] = { 19, 21, 24, 26, 28, 30, 32, 34 };
+
+ llvm::Constant *Field3[8];
+ for (unsigned Idx = 0; Idx < 8; ++Idx)
+ Field3[Idx] = llvm::ConstantInt::get(
+ Int8Ty, Uuid.substr(Field3ValueOffsets[Idx], 2), 16);
+
+ llvm::Constant *Fields[4] = {
+ llvm::ConstantInt::get(Int32Ty, Uuid.substr(0, 8), 16),
+ llvm::ConstantInt::get(Int16Ty, Uuid.substr(9, 4), 16),
+ llvm::ConstantInt::get(Int16Ty, Uuid.substr(14, 4), 16),
+ llvm::ConstantArray::get(llvm::ArrayType::get(Int8Ty, 8), Field3)
+ };
+
+ return llvm::ConstantStruct::getAnon(Fields);
+}
+
+llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
+ bool ForEH) {
+ // Return a bogus pointer if RTTI is disabled, unless it's for EH.
+ // FIXME: should we even be calling this method if RTTI is disabled
+ // and it's not for EH?
+ if (!ForEH && !getLangOpts().RTTI)
+ return llvm::Constant::getNullValue(Int8PtrTy);
+
+ if (ForEH && Ty->isObjCObjectPointerType() &&
+ LangOpts.ObjCRuntime.isGNUFamily())
+ return ObjCRuntime->GetEHType(Ty);
+
+ return getCXXABI().getAddrOfRTTIDescriptor(Ty);
+}
+
+void CodeGenModule::EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D) {
+ for (auto RefExpr : D->varlists()) {
+ auto *VD = cast<VarDecl>(cast<DeclRefExpr>(RefExpr)->getDecl());
+ bool PerformInit =
+ VD->getAnyInitializer() &&
+ !VD->getAnyInitializer()->isConstantInitializer(getContext(),
+ /*ForRef=*/false);
+
+ Address Addr(GetAddrOfGlobalVar(VD), getContext().getDeclAlign(VD));
+ if (auto InitFunction = getOpenMPRuntime().emitThreadPrivateVarDefinition(
+ VD, Addr, RefExpr->getLocStart(), PerformInit))
+ CXXGlobalInits.push_back(InitFunction);
+ }
+}
+
+llvm::Metadata *CodeGenModule::CreateMetadataIdentifierForType(QualType T) {
+ llvm::Metadata *&InternalId = MetadataIdMap[T.getCanonicalType()];
+ if (InternalId)
+ return InternalId;
+
+ if (isExternallyVisible(T->getLinkage())) {
+ std::string OutName;
+ llvm::raw_string_ostream Out(OutName);
+ getCXXABI().getMangleContext().mangleTypeName(T, Out);
+
+ InternalId = llvm::MDString::get(getLLVMContext(), Out.str());
+ } else {
+ InternalId = llvm::MDNode::getDistinct(getLLVMContext(),
+ llvm::ArrayRef<llvm::Metadata *>());
+ }
+
+ return InternalId;
+}
+
+void CodeGenModule::CreateVTableBitSetEntry(llvm::NamedMDNode *BitsetsMD,
+ llvm::GlobalVariable *VTable,
+ CharUnits Offset,
+ const CXXRecordDecl *RD) {
+ llvm::Metadata *MD =
+ CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0));
+ llvm::Metadata *BitsetOps[] = {
+ MD, llvm::ConstantAsMetadata::get(VTable),
+ llvm::ConstantAsMetadata::get(
+ llvm::ConstantInt::get(Int64Ty, Offset.getQuantity()))};
+ BitsetsMD->addOperand(llvm::MDTuple::get(getLLVMContext(), BitsetOps));
+
+ if (CodeGenOpts.SanitizeCfiCrossDso) {
+ if (auto TypeId = CreateCfiIdForTypeMetadata(MD)) {
+ llvm::Metadata *BitsetOps2[] = {
+ llvm::ConstantAsMetadata::get(TypeId),
+ llvm::ConstantAsMetadata::get(VTable),
+ llvm::ConstantAsMetadata::get(
+ llvm::ConstantInt::get(Int64Ty, Offset.getQuantity()))};
+ BitsetsMD->addOperand(llvm::MDTuple::get(getLLVMContext(), BitsetOps2));
+ }
+ }
+}
+
+// Fills in the supplied string map with the set of target features for the
+// passed in function.
+void CodeGenModule::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
+ const FunctionDecl *FD) {
+ StringRef TargetCPU = Target.getTargetOpts().CPU;
+ if (const auto *TD = FD->getAttr<TargetAttr>()) {
+ // If we have a TargetAttr build up the feature map based on that.
+ TargetAttr::ParsedTargetAttr ParsedAttr = TD->parse();
+
+ // Make a copy of the features as passed on the command line into the
+ // beginning of the additional features from the function to override.
+ ParsedAttr.first.insert(ParsedAttr.first.begin(),
+ Target.getTargetOpts().FeaturesAsWritten.begin(),
+ Target.getTargetOpts().FeaturesAsWritten.end());
+
+ if (ParsedAttr.second != "")
+ TargetCPU = ParsedAttr.second;
+
+ // Now populate the feature map, first with the TargetCPU which is either
+ // the default or a new one from the target attribute string. Then we'll use
+ // the passed in features (FeaturesAsWritten) along with the new ones from
+ // the attribute.
+ Target.initFeatureMap(FeatureMap, getDiags(), TargetCPU, ParsedAttr.first);
+ } else {
+ Target.initFeatureMap(FeatureMap, getDiags(), TargetCPU,
+ Target.getTargetOpts().Features);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h
new file mode 100644
index 0000000..fdb4d78
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h
@@ -0,0 +1,1245 @@
+//===--- CodeGenModule.h - Per-Module state for LLVM CodeGen ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the internal per-translation-unit state used for llvm translation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENMODULE_H
+#define LLVM_CLANG_LIB_CODEGEN_CODEGENMODULE_H
+
+#include "CGVTables.h"
+#include "CodeGenTypeCache.h"
+#include "CodeGenTypes.h"
+#include "SanitizerMetadata.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/GlobalDecl.h"
+#include "clang/AST/Mangle.h"
+#include "clang/Basic/ABI.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/Module.h"
+#include "clang/Basic/SanitizerBlacklist.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/ValueHandle.h"
+
+namespace llvm {
+class Module;
+class Constant;
+class ConstantInt;
+class Function;
+class GlobalValue;
+class DataLayout;
+class FunctionType;
+class LLVMContext;
+class IndexedInstrProfReader;
+}
+
+namespace clang {
+class TargetCodeGenInfo;
+class ASTContext;
+class AtomicType;
+class FunctionDecl;
+class IdentifierInfo;
+class ObjCMethodDecl;
+class ObjCImplementationDecl;
+class ObjCCategoryImplDecl;
+class ObjCProtocolDecl;
+class ObjCEncodeExpr;
+class BlockExpr;
+class CharUnits;
+class Decl;
+class Expr;
+class Stmt;
+class InitListExpr;
+class StringLiteral;
+class NamedDecl;
+class ValueDecl;
+class VarDecl;
+class LangOptions;
+class CodeGenOptions;
+class HeaderSearchOptions;
+class PreprocessorOptions;
+class DiagnosticsEngine;
+class AnnotateAttr;
+class CXXDestructorDecl;
+class Module;
+class CoverageSourceInfo;
+
+namespace CodeGen {
+
+class CallArgList;
+class CodeGenFunction;
+class CodeGenTBAA;
+class CGCXXABI;
+class CGDebugInfo;
+class CGObjCRuntime;
+class CGOpenCLRuntime;
+class CGOpenMPRuntime;
+class CGCUDARuntime;
+class BlockFieldFlags;
+class FunctionArgList;
+class CoverageMappingModuleGen;
+
+struct OrderGlobalInits {
+ unsigned int priority;
+ unsigned int lex_order;
+ OrderGlobalInits(unsigned int p, unsigned int l)
+ : priority(p), lex_order(l) {}
+
+ bool operator==(const OrderGlobalInits &RHS) const {
+ return priority == RHS.priority && lex_order == RHS.lex_order;
+ }
+
+ bool operator<(const OrderGlobalInits &RHS) const {
+ return std::tie(priority, lex_order) <
+ std::tie(RHS.priority, RHS.lex_order);
+ }
+};
+
+struct ObjCEntrypoints {
+ ObjCEntrypoints() { memset(this, 0, sizeof(*this)); }
+
+ /// void objc_autoreleasePoolPop(void*);
+ llvm::Constant *objc_autoreleasePoolPop;
+
+ /// void *objc_autoreleasePoolPush(void);
+ llvm::Constant *objc_autoreleasePoolPush;
+
+ /// id objc_autorelease(id);
+ llvm::Constant *objc_autorelease;
+
+ /// id objc_autoreleaseReturnValue(id);
+ llvm::Constant *objc_autoreleaseReturnValue;
+
+ /// void objc_copyWeak(id *dest, id *src);
+ llvm::Constant *objc_copyWeak;
+
+ /// void objc_destroyWeak(id*);
+ llvm::Constant *objc_destroyWeak;
+
+ /// id objc_initWeak(id*, id);
+ llvm::Constant *objc_initWeak;
+
+ /// id objc_loadWeak(id*);
+ llvm::Constant *objc_loadWeak;
+
+ /// id objc_loadWeakRetained(id*);
+ llvm::Constant *objc_loadWeakRetained;
+
+ /// void objc_moveWeak(id *dest, id *src);
+ llvm::Constant *objc_moveWeak;
+
+ /// id objc_retain(id);
+ llvm::Constant *objc_retain;
+
+ /// id objc_retainAutorelease(id);
+ llvm::Constant *objc_retainAutorelease;
+
+ /// id objc_retainAutoreleaseReturnValue(id);
+ llvm::Constant *objc_retainAutoreleaseReturnValue;
+
+ /// id objc_retainAutoreleasedReturnValue(id);
+ llvm::Constant *objc_retainAutoreleasedReturnValue;
+
+ /// id objc_retainBlock(id);
+ llvm::Constant *objc_retainBlock;
+
+ /// void objc_release(id);
+ llvm::Constant *objc_release;
+
+ /// id objc_storeStrong(id*, id);
+ llvm::Constant *objc_storeStrong;
+
+ /// id objc_storeWeak(id*, id);
+ llvm::Constant *objc_storeWeak;
+
+ /// A void(void) inline asm to use to mark that the return value of
+ /// a call will be immediately retain.
+ llvm::InlineAsm *retainAutoreleasedReturnValueMarker;
+
+ /// void clang.arc.use(...);
+ llvm::Constant *clang_arc_use;
+};
+
+/// This class records statistics on instrumentation based profiling.
+class InstrProfStats {
+ uint32_t VisitedInMainFile;
+ uint32_t MissingInMainFile;
+ uint32_t Visited;
+ uint32_t Missing;
+ uint32_t Mismatched;
+
+public:
+ InstrProfStats()
+ : VisitedInMainFile(0), MissingInMainFile(0), Visited(0), Missing(0),
+ Mismatched(0) {}
+ /// Record that we've visited a function and whether or not that function was
+ /// in the main source file.
+ void addVisited(bool MainFile) {
+ if (MainFile)
+ ++VisitedInMainFile;
+ ++Visited;
+ }
+ /// Record that a function we've visited has no profile data.
+ void addMissing(bool MainFile) {
+ if (MainFile)
+ ++MissingInMainFile;
+ ++Missing;
+ }
+ /// Record that a function we've visited has mismatched profile data.
+ void addMismatched(bool MainFile) { ++Mismatched; }
+ /// Whether or not the stats we've gathered indicate any potential problems.
+ bool hasDiagnostics() { return Missing || Mismatched; }
+ /// Report potential problems we've found to \c Diags.
+ void reportDiagnostics(DiagnosticsEngine &Diags, StringRef MainFile);
+};
+
+/// A pair of helper functions for a __block variable.
+class BlockByrefHelpers : public llvm::FoldingSetNode {
+ // MSVC requires this type to be complete in order to process this
+ // header.
+public:
+ llvm::Constant *CopyHelper;
+ llvm::Constant *DisposeHelper;
+
+ /// The alignment of the field. This is important because
+ /// different offsets to the field within the byref struct need to
+ /// have different helper functions.
+ CharUnits Alignment;
+
+ BlockByrefHelpers(CharUnits alignment) : Alignment(alignment) {}
+ BlockByrefHelpers(const BlockByrefHelpers &) = default;
+ virtual ~BlockByrefHelpers();
+
+ void Profile(llvm::FoldingSetNodeID &id) const {
+ id.AddInteger(Alignment.getQuantity());
+ profileImpl(id);
+ }
+ virtual void profileImpl(llvm::FoldingSetNodeID &id) const = 0;
+
+ virtual bool needsCopy() const { return true; }
+ virtual void emitCopy(CodeGenFunction &CGF, Address dest, Address src) = 0;
+
+ virtual bool needsDispose() const { return true; }
+ virtual void emitDispose(CodeGenFunction &CGF, Address field) = 0;
+};
+
+/// This class organizes the cross-function state that is used while generating
+/// LLVM code.
+class CodeGenModule : public CodeGenTypeCache {
+ CodeGenModule(const CodeGenModule &) = delete;
+ void operator=(const CodeGenModule &) = delete;
+
+public:
+ struct Structor {
+ Structor() : Priority(0), Initializer(nullptr), AssociatedData(nullptr) {}
+ Structor(int Priority, llvm::Constant *Initializer,
+ llvm::Constant *AssociatedData)
+ : Priority(Priority), Initializer(Initializer),
+ AssociatedData(AssociatedData) {}
+ int Priority;
+ llvm::Constant *Initializer;
+ llvm::Constant *AssociatedData;
+ };
+
+ typedef std::vector<Structor> CtorList;
+
+private:
+ ASTContext &Context;
+ const LangOptions &LangOpts;
+ const HeaderSearchOptions &HeaderSearchOpts; // Only used for debug info.
+ const PreprocessorOptions &PreprocessorOpts; // Only used for debug info.
+ const CodeGenOptions &CodeGenOpts;
+ llvm::Module &TheModule;
+ DiagnosticsEngine &Diags;
+ const TargetInfo &Target;
+ std::unique_ptr<CGCXXABI> ABI;
+ llvm::LLVMContext &VMContext;
+
+ CodeGenTBAA *TBAA;
+
+ mutable const TargetCodeGenInfo *TheTargetCodeGenInfo;
+
+ // This should not be moved earlier, since its initialization depends on some
+ // of the previous reference members being already initialized and also checks
+ // if TheTargetCodeGenInfo is NULL
+ CodeGenTypes Types;
+
+ /// Holds information about C++ vtables.
+ CodeGenVTables VTables;
+
+ CGObjCRuntime* ObjCRuntime;
+ CGOpenCLRuntime* OpenCLRuntime;
+ CGOpenMPRuntime* OpenMPRuntime;
+ CGCUDARuntime* CUDARuntime;
+ CGDebugInfo* DebugInfo;
+ ObjCEntrypoints *ObjCData;
+ llvm::MDNode *NoObjCARCExceptionsMetadata;
+ std::unique_ptr<llvm::IndexedInstrProfReader> PGOReader;
+ InstrProfStats PGOStats;
+
+ // A set of references that have only been seen via a weakref so far. This is
+ // used to remove the weak of the reference if we ever see a direct reference
+ // or a definition.
+ llvm::SmallPtrSet<llvm::GlobalValue*, 10> WeakRefReferences;
+
+ /// This contains all the decls which have definitions but/ which are deferred
+ /// for emission and therefore should only be output if they are actually
+ /// used. If a decl is in this, then it is known to have not been referenced
+ /// yet.
+ std::map<StringRef, GlobalDecl> DeferredDecls;
+
+ /// This is a list of deferred decls which we have seen that *are* actually
+ /// referenced. These get code generated when the module is done.
+ struct DeferredGlobal {
+ DeferredGlobal(llvm::GlobalValue *GV, GlobalDecl GD) : GV(GV), GD(GD) {}
+ llvm::TrackingVH<llvm::GlobalValue> GV;
+ GlobalDecl GD;
+ };
+ std::vector<DeferredGlobal> DeferredDeclsToEmit;
+ void addDeferredDeclToEmit(llvm::GlobalValue *GV, GlobalDecl GD) {
+ DeferredDeclsToEmit.emplace_back(GV, GD);
+ }
+
+ /// List of alias we have emitted. Used to make sure that what they point to
+ /// is defined once we get to the end of the of the translation unit.
+ std::vector<GlobalDecl> Aliases;
+
+ typedef llvm::StringMap<llvm::TrackingVH<llvm::Constant> > ReplacementsTy;
+ ReplacementsTy Replacements;
+
+ /// List of global values to be replaced with something else. Used when we
+ /// want to replace a GlobalValue but can't identify it by its mangled name
+ /// anymore (because the name is already taken).
+ llvm::SmallVector<std::pair<llvm::GlobalValue *, llvm::Constant *>, 8>
+ GlobalValReplacements;
+
+ /// Set of global decls for which we already diagnosed mangled name conflict.
+ /// Required to not issue a warning (on a mangling conflict) multiple times
+ /// for the same decl.
+ llvm::DenseSet<GlobalDecl> DiagnosedConflictingDefinitions;
+
+ /// A queue of (optional) vtables to consider emitting.
+ std::vector<const CXXRecordDecl*> DeferredVTables;
+
+ /// List of global values which are required to be present in the object file;
+ /// bitcast to i8*. This is used for forcing visibility of symbols which may
+ /// otherwise be optimized out.
+ std::vector<llvm::WeakVH> LLVMUsed;
+ std::vector<llvm::WeakVH> LLVMCompilerUsed;
+
+ /// Store the list of global constructors and their respective priorities to
+ /// be emitted when the translation unit is complete.
+ CtorList GlobalCtors;
+
+ /// Store the list of global destructors and their respective priorities to be
+ /// emitted when the translation unit is complete.
+ CtorList GlobalDtors;
+
+ /// An ordered map of canonical GlobalDecls to their mangled names.
+ llvm::MapVector<GlobalDecl, StringRef> MangledDeclNames;
+ llvm::StringMap<GlobalDecl, llvm::BumpPtrAllocator> Manglings;
+
+ /// Global annotations.
+ std::vector<llvm::Constant*> Annotations;
+
+ /// Map used to get unique annotation strings.
+ llvm::StringMap<llvm::Constant*> AnnotationStrings;
+
+ llvm::StringMap<llvm::GlobalVariable *> CFConstantStringMap;
+
+ llvm::DenseMap<llvm::Constant *, llvm::GlobalVariable *> ConstantStringMap;
+ llvm::DenseMap<const Decl*, llvm::Constant *> StaticLocalDeclMap;
+ llvm::DenseMap<const Decl*, llvm::GlobalVariable*> StaticLocalDeclGuardMap;
+ llvm::DenseMap<const Expr*, llvm::Constant *> MaterializedGlobalTemporaryMap;
+
+ llvm::DenseMap<QualType, llvm::Constant *> AtomicSetterHelperFnMap;
+ llvm::DenseMap<QualType, llvm::Constant *> AtomicGetterHelperFnMap;
+
+ /// Map used to get unique type descriptor constants for sanitizers.
+ llvm::DenseMap<QualType, llvm::Constant *> TypeDescriptorMap;
+
+ /// Map used to track internal linkage functions declared within
+ /// extern "C" regions.
+ typedef llvm::MapVector<IdentifierInfo *,
+ llvm::GlobalValue *> StaticExternCMap;
+ StaticExternCMap StaticExternCValues;
+
+ /// \brief thread_local variables defined or used in this TU.
+ std::vector<const VarDecl *> CXXThreadLocals;
+
+ /// \brief thread_local variables with initializers that need to run
+ /// before any thread_local variable in this TU is odr-used.
+ std::vector<llvm::Function *> CXXThreadLocalInits;
+ std::vector<const VarDecl *> CXXThreadLocalInitVars;
+
+ /// Global variables with initializers that need to run before main.
+ std::vector<llvm::Function *> CXXGlobalInits;
+
+ /// When a C++ decl with an initializer is deferred, null is
+ /// appended to CXXGlobalInits, and the index of that null is placed
+ /// here so that the initializer will be performed in the correct
+ /// order. Once the decl is emitted, the index is replaced with ~0U to ensure
+ /// that we don't re-emit the initializer.
+ llvm::DenseMap<const Decl*, unsigned> DelayedCXXInitPosition;
+
+ typedef std::pair<OrderGlobalInits, llvm::Function*> GlobalInitData;
+
+ struct GlobalInitPriorityCmp {
+ bool operator()(const GlobalInitData &LHS,
+ const GlobalInitData &RHS) const {
+ return LHS.first.priority < RHS.first.priority;
+ }
+ };
+
+ /// Global variables with initializers whose order of initialization is set by
+ /// init_priority attribute.
+ SmallVector<GlobalInitData, 8> PrioritizedCXXGlobalInits;
+
+ /// Global destructor functions and arguments that need to run on termination.
+ std::vector<std::pair<llvm::WeakVH,llvm::Constant*> > CXXGlobalDtors;
+
+ /// \brief The complete set of modules that has been imported.
+ llvm::SetVector<clang::Module *> ImportedModules;
+
+ /// \brief A vector of metadata strings.
+ SmallVector<llvm::Metadata *, 16> LinkerOptionsMetadata;
+
+ /// @name Cache for Objective-C runtime types
+ /// @{
+
+ /// Cached reference to the class for constant strings. This value has type
+ /// int * but is actually an Obj-C class pointer.
+ llvm::WeakVH CFConstantStringClassRef;
+
+ /// Cached reference to the class for constant strings. This value has type
+ /// int * but is actually an Obj-C class pointer.
+ llvm::WeakVH ConstantStringClassRef;
+
+ /// \brief The LLVM type corresponding to NSConstantString.
+ llvm::StructType *NSConstantStringType;
+
+ /// \brief The type used to describe the state of a fast enumeration in
+ /// Objective-C's for..in loop.
+ QualType ObjCFastEnumerationStateType;
+
+ /// @}
+
+ /// Lazily create the Objective-C runtime
+ void createObjCRuntime();
+
+ void createOpenCLRuntime();
+ void createOpenMPRuntime();
+ void createCUDARuntime();
+
+ bool isTriviallyRecursive(const FunctionDecl *F);
+ bool shouldEmitFunction(GlobalDecl GD);
+
+ /// @name Cache for Blocks Runtime Globals
+ /// @{
+
+ llvm::Constant *NSConcreteGlobalBlock;
+ llvm::Constant *NSConcreteStackBlock;
+
+ llvm::Constant *BlockObjectAssign;
+ llvm::Constant *BlockObjectDispose;
+
+ llvm::Type *BlockDescriptorType;
+ llvm::Type *GenericBlockLiteralType;
+
+ struct {
+ int GlobalUniqueCount;
+ } Block;
+
+ /// void @llvm.lifetime.start(i64 %size, i8* nocapture <ptr>)
+ llvm::Constant *LifetimeStartFn;
+
+ /// void @llvm.lifetime.end(i64 %size, i8* nocapture <ptr>)
+ llvm::Constant *LifetimeEndFn;
+
+ GlobalDecl initializedGlobalDecl;
+
+ std::unique_ptr<SanitizerMetadata> SanitizerMD;
+
+ /// @}
+
+ llvm::DenseMap<const Decl *, bool> DeferredEmptyCoverageMappingDecls;
+
+ std::unique_ptr<CoverageMappingModuleGen> CoverageMapping;
+
+ /// Mapping from canonical types to their metadata identifiers. We need to
+ /// maintain this mapping because identifiers may be formed from distinct
+ /// MDNodes.
+ llvm::DenseMap<QualType, llvm::Metadata *> MetadataIdMap;
+
+public:
+ CodeGenModule(ASTContext &C, const HeaderSearchOptions &headersearchopts,
+ const PreprocessorOptions &ppopts,
+ const CodeGenOptions &CodeGenOpts, llvm::Module &M,
+ DiagnosticsEngine &Diags,
+ CoverageSourceInfo *CoverageInfo = nullptr);
+
+ ~CodeGenModule();
+
+ void clear();
+
+ /// Finalize LLVM code generation.
+ void Release();
+
+ /// Return a reference to the configured Objective-C runtime.
+ CGObjCRuntime &getObjCRuntime() {
+ if (!ObjCRuntime) createObjCRuntime();
+ return *ObjCRuntime;
+ }
+
+ /// Return true iff an Objective-C runtime has been configured.
+ bool hasObjCRuntime() { return !!ObjCRuntime; }
+
+ /// Return a reference to the configured OpenCL runtime.
+ CGOpenCLRuntime &getOpenCLRuntime() {
+ assert(OpenCLRuntime != nullptr);
+ return *OpenCLRuntime;
+ }
+
+ /// Return a reference to the configured OpenMP runtime.
+ CGOpenMPRuntime &getOpenMPRuntime() {
+ assert(OpenMPRuntime != nullptr);
+ return *OpenMPRuntime;
+ }
+
+ /// Return a reference to the configured CUDA runtime.
+ CGCUDARuntime &getCUDARuntime() {
+ assert(CUDARuntime != nullptr);
+ return *CUDARuntime;
+ }
+
+ ObjCEntrypoints &getObjCEntrypoints() const {
+ assert(ObjCData != nullptr);
+ return *ObjCData;
+ }
+
+ InstrProfStats &getPGOStats() { return PGOStats; }
+ llvm::IndexedInstrProfReader *getPGOReader() const { return PGOReader.get(); }
+
+ CoverageMappingModuleGen *getCoverageMapping() const {
+ return CoverageMapping.get();
+ }
+
+ llvm::Constant *getStaticLocalDeclAddress(const VarDecl *D) {
+ return StaticLocalDeclMap[D];
+ }
+ void setStaticLocalDeclAddress(const VarDecl *D,
+ llvm::Constant *C) {
+ StaticLocalDeclMap[D] = C;
+ }
+
+ llvm::Constant *
+ getOrCreateStaticVarDecl(const VarDecl &D,
+ llvm::GlobalValue::LinkageTypes Linkage);
+
+ llvm::GlobalVariable *getStaticLocalDeclGuardAddress(const VarDecl *D) {
+ return StaticLocalDeclGuardMap[D];
+ }
+ void setStaticLocalDeclGuardAddress(const VarDecl *D,
+ llvm::GlobalVariable *C) {
+ StaticLocalDeclGuardMap[D] = C;
+ }
+
+ bool lookupRepresentativeDecl(StringRef MangledName,
+ GlobalDecl &Result) const;
+
+ llvm::Constant *getAtomicSetterHelperFnMap(QualType Ty) {
+ return AtomicSetterHelperFnMap[Ty];
+ }
+ void setAtomicSetterHelperFnMap(QualType Ty,
+ llvm::Constant *Fn) {
+ AtomicSetterHelperFnMap[Ty] = Fn;
+ }
+
+ llvm::Constant *getAtomicGetterHelperFnMap(QualType Ty) {
+ return AtomicGetterHelperFnMap[Ty];
+ }
+ void setAtomicGetterHelperFnMap(QualType Ty,
+ llvm::Constant *Fn) {
+ AtomicGetterHelperFnMap[Ty] = Fn;
+ }
+
+ llvm::Constant *getTypeDescriptorFromMap(QualType Ty) {
+ return TypeDescriptorMap[Ty];
+ }
+ void setTypeDescriptorInMap(QualType Ty, llvm::Constant *C) {
+ TypeDescriptorMap[Ty] = C;
+ }
+
+ CGDebugInfo *getModuleDebugInfo() { return DebugInfo; }
+
+ llvm::MDNode *getNoObjCARCExceptionsMetadata() {
+ if (!NoObjCARCExceptionsMetadata)
+ NoObjCARCExceptionsMetadata = llvm::MDNode::get(getLLVMContext(), None);
+ return NoObjCARCExceptionsMetadata;
+ }
+
+ ASTContext &getContext() const { return Context; }
+ const LangOptions &getLangOpts() const { return LangOpts; }
+ const HeaderSearchOptions &getHeaderSearchOpts()
+ const { return HeaderSearchOpts; }
+ const PreprocessorOptions &getPreprocessorOpts()
+ const { return PreprocessorOpts; }
+ const CodeGenOptions &getCodeGenOpts() const { return CodeGenOpts; }
+ llvm::Module &getModule() const { return TheModule; }
+ DiagnosticsEngine &getDiags() const { return Diags; }
+ const llvm::DataLayout &getDataLayout() const {
+ return TheModule.getDataLayout();
+ }
+ const TargetInfo &getTarget() const { return Target; }
+ const llvm::Triple &getTriple() const;
+ bool supportsCOMDAT() const;
+ void maybeSetTrivialComdat(const Decl &D, llvm::GlobalObject &GO);
+
+ CGCXXABI &getCXXABI() const { return *ABI; }
+ llvm::LLVMContext &getLLVMContext() { return VMContext; }
+
+ bool shouldUseTBAA() const { return TBAA != nullptr; }
+
+ const TargetCodeGenInfo &getTargetCodeGenInfo();
+
+ CodeGenTypes &getTypes() { return Types; }
+
+ CodeGenVTables &getVTables() { return VTables; }
+
+ ItaniumVTableContext &getItaniumVTableContext() {
+ return VTables.getItaniumVTableContext();
+ }
+
+ MicrosoftVTableContext &getMicrosoftVTableContext() {
+ return VTables.getMicrosoftVTableContext();
+ }
+
+ CtorList &getGlobalCtors() { return GlobalCtors; }
+ CtorList &getGlobalDtors() { return GlobalDtors; }
+
+ llvm::MDNode *getTBAAInfo(QualType QTy);
+ llvm::MDNode *getTBAAInfoForVTablePtr();
+ llvm::MDNode *getTBAAStructInfo(QualType QTy);
+ /// Return the path-aware tag for given base type, access node and offset.
+ llvm::MDNode *getTBAAStructTagInfo(QualType BaseTy, llvm::MDNode *AccessN,
+ uint64_t O);
+
+ bool isTypeConstant(QualType QTy, bool ExcludeCtorDtor);
+
+ bool isPaddedAtomicType(QualType type);
+ bool isPaddedAtomicType(const AtomicType *type);
+
+ /// Decorate the instruction with a TBAA tag. For scalar TBAA, the tag
+ /// is the same as the type. For struct-path aware TBAA, the tag
+ /// is different from the type: base type, access type and offset.
+ /// When ConvertTypeToTag is true, we create a tag based on the scalar type.
+ void DecorateInstructionWithTBAA(llvm::Instruction *Inst,
+ llvm::MDNode *TBAAInfo,
+ bool ConvertTypeToTag = true);
+
+ /// Adds !invariant.barrier !tag to instruction
+ void DecorateInstructionWithInvariantGroup(llvm::Instruction *I,
+ const CXXRecordDecl *RD);
+
+ /// Emit the given number of characters as a value of type size_t.
+ llvm::ConstantInt *getSize(CharUnits numChars);
+
+ /// Set the visibility for the given LLVM GlobalValue.
+ void setGlobalVisibility(llvm::GlobalValue *GV, const NamedDecl *D) const;
+
+ /// Set the TLS mode for the given LLVM GlobalValue for the thread-local
+ /// variable declaration D.
+ void setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const;
+
+ static llvm::GlobalValue::VisibilityTypes GetLLVMVisibility(Visibility V) {
+ switch (V) {
+ case DefaultVisibility: return llvm::GlobalValue::DefaultVisibility;
+ case HiddenVisibility: return llvm::GlobalValue::HiddenVisibility;
+ case ProtectedVisibility: return llvm::GlobalValue::ProtectedVisibility;
+ }
+ llvm_unreachable("unknown visibility!");
+ }
+
+ llvm::Constant *GetAddrOfGlobal(GlobalDecl GD, bool IsForDefinition = false);
+
+ /// Will return a global variable of the given type. If a variable with a
+ /// different type already exists then a new variable with the right type
+ /// will be created and all uses of the old variable will be replaced with a
+ /// bitcast to the new variable.
+ llvm::GlobalVariable *
+ CreateOrReplaceCXXRuntimeVariable(StringRef Name, llvm::Type *Ty,
+ llvm::GlobalValue::LinkageTypes Linkage);
+
+ llvm::Function *
+ CreateGlobalInitOrDestructFunction(llvm::FunctionType *ty, const Twine &name,
+ const CGFunctionInfo &FI,
+ SourceLocation Loc = SourceLocation(),
+ bool TLS = false);
+
+ /// Return the address space of the underlying global variable for D, as
+ /// determined by its declaration. Normally this is the same as the address
+ /// space of D's type, but in CUDA, address spaces are associated with
+ /// declarations, not types.
+ unsigned GetGlobalVarAddressSpace(const VarDecl *D, unsigned AddrSpace);
+
+ /// Return the llvm::Constant for the address of the given global variable.
+ /// If Ty is non-null and if the global doesn't exist, then it will be greated
+ /// with the specified type instead of whatever the normal requested type
+ /// would be.
+ llvm::Constant *GetAddrOfGlobalVar(const VarDecl *D,
+ llvm::Type *Ty = nullptr);
+
+ /// Return the address of the given function. If Ty is non-null, then this
+ /// function will use the specified type if it has to create it.
+ llvm::Constant *GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty = nullptr,
+ bool ForVTable = false,
+ bool DontDefer = false,
+ bool IsForDefinition = false);
+
+ /// Get the address of the RTTI descriptor for the given type.
+ llvm::Constant *GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH = false);
+
+ /// Get the address of a uuid descriptor .
+ ConstantAddress GetAddrOfUuidDescriptor(const CXXUuidofExpr* E);
+
+ /// Get the address of the thunk for the given global decl.
+ llvm::Constant *GetAddrOfThunk(GlobalDecl GD, const ThunkInfo &Thunk);
+
+ /// Get a reference to the target of VD.
+ ConstantAddress GetWeakRefReference(const ValueDecl *VD);
+
+ /// Returns the assumed alignment of an opaque pointer to the given class.
+ CharUnits getClassPointerAlignment(const CXXRecordDecl *CD);
+
+ /// Returns the assumed alignment of a virtual base of a class.
+ CharUnits getVBaseAlignment(CharUnits DerivedAlign,
+ const CXXRecordDecl *Derived,
+ const CXXRecordDecl *VBase);
+
+ /// Given a class pointer with an actual known alignment, and the
+ /// expected alignment of an object at a dynamic offset w.r.t that
+ /// pointer, return the alignment to assume at the offset.
+ CharUnits getDynamicOffsetAlignment(CharUnits ActualAlign,
+ const CXXRecordDecl *Class,
+ CharUnits ExpectedTargetAlign);
+
+ CharUnits
+ computeNonVirtualBaseClassOffset(const CXXRecordDecl *DerivedClass,
+ CastExpr::path_const_iterator Start,
+ CastExpr::path_const_iterator End);
+
+ /// Returns the offset from a derived class to a class. Returns null if the
+ /// offset is 0.
+ llvm::Constant *
+ GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd);
+
+ llvm::FoldingSet<BlockByrefHelpers> ByrefHelpersCache;
+
+ /// Fetches the global unique block count.
+ int getUniqueBlockCount() { return ++Block.GlobalUniqueCount; }
+
+ /// Fetches the type of a generic block descriptor.
+ llvm::Type *getBlockDescriptorType();
+
+ /// The type of a generic block literal.
+ llvm::Type *getGenericBlockLiteralType();
+
+ /// Gets the address of a block which requires no captures.
+ llvm::Constant *GetAddrOfGlobalBlock(const BlockExpr *BE, const char *);
+
+ /// Return a pointer to a constant CFString object for the given string.
+ ConstantAddress GetAddrOfConstantCFString(const StringLiteral *Literal);
+
+ /// Return a pointer to a constant NSString object for the given string. Or a
+ /// user defined String object as defined via
+ /// -fconstant-string-class=class_name option.
+ ConstantAddress GetAddrOfConstantString(const StringLiteral *Literal);
+
+ /// Return a constant array for the given string.
+ llvm::Constant *GetConstantArrayFromStringLiteral(const StringLiteral *E);
+
+ /// Return a pointer to a constant array for the given string literal.
+ ConstantAddress
+ GetAddrOfConstantStringFromLiteral(const StringLiteral *S,
+ StringRef Name = ".str");
+
+ /// Return a pointer to a constant array for the given ObjCEncodeExpr node.
+ ConstantAddress
+ GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *);
+
+ /// Returns a pointer to a character array containing the literal and a
+ /// terminating '\0' character. The result has pointer to array type.
+ ///
+ /// \param GlobalName If provided, the name to use for the global (if one is
+ /// created).
+ ConstantAddress
+ GetAddrOfConstantCString(const std::string &Str,
+ const char *GlobalName = nullptr);
+
+ /// Returns a pointer to a constant global variable for the given file-scope
+ /// compound literal expression.
+ ConstantAddress GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr*E);
+
+ /// \brief Returns a pointer to a global variable representing a temporary
+ /// with static or thread storage duration.
+ ConstantAddress GetAddrOfGlobalTemporary(const MaterializeTemporaryExpr *E,
+ const Expr *Inner);
+
+ /// \brief Retrieve the record type that describes the state of an
+ /// Objective-C fast enumeration loop (for..in).
+ QualType getObjCFastEnumerationStateType();
+
+ // Produce code for this constructor/destructor. This method doesn't try
+ // to apply any ABI rules about which other constructors/destructors
+ // are needed or if they are alias to each other.
+ llvm::Function *codegenCXXStructor(const CXXMethodDecl *MD,
+ StructorType Type);
+
+ /// Return the address of the constructor/destructor of the given type.
+ llvm::Constant *
+ getAddrOfCXXStructor(const CXXMethodDecl *MD, StructorType Type,
+ const CGFunctionInfo *FnInfo = nullptr,
+ llvm::FunctionType *FnType = nullptr,
+ bool DontDefer = false, bool IsForDefinition = false);
+
+ /// Given a builtin id for a function like "__builtin_fabsf", return a
+ /// Function* for "fabsf".
+ llvm::Value *getBuiltinLibFunction(const FunctionDecl *FD,
+ unsigned BuiltinID);
+
+ llvm::Function *getIntrinsic(unsigned IID, ArrayRef<llvm::Type*> Tys = None);
+
+ /// Emit code for a single top level declaration.
+ void EmitTopLevelDecl(Decl *D);
+
+ /// \brief Stored a deferred empty coverage mapping for an unused
+ /// and thus uninstrumented top level declaration.
+ void AddDeferredUnusedCoverageMapping(Decl *D);
+
+ /// \brief Remove the deferred empty coverage mapping as this
+ /// declaration is actually instrumented.
+ void ClearUnusedCoverageMapping(const Decl *D);
+
+ /// \brief Emit all the deferred coverage mappings
+ /// for the uninstrumented functions.
+ void EmitDeferredUnusedCoverageMappings();
+
+ /// Tell the consumer that this variable has been instantiated.
+ void HandleCXXStaticMemberVarInstantiation(VarDecl *VD);
+
+ /// \brief If the declaration has internal linkage but is inside an
+ /// extern "C" linkage specification, prepare to emit an alias for it
+ /// to the expected name.
+ template<typename SomeDecl>
+ void MaybeHandleStaticInExternC(const SomeDecl *D, llvm::GlobalValue *GV);
+
+ /// Add a global to a list to be added to the llvm.used metadata.
+ void addUsedGlobal(llvm::GlobalValue *GV);
+
+ /// Add a global to a list to be added to the llvm.compiler.used metadata.
+ void addCompilerUsedGlobal(llvm::GlobalValue *GV);
+
+ /// Add a destructor and object to add to the C++ global destructor function.
+ void AddCXXDtorEntry(llvm::Constant *DtorFn, llvm::Constant *Object) {
+ CXXGlobalDtors.emplace_back(DtorFn, Object);
+ }
+
+ /// Create a new runtime function with the specified type and name.
+ llvm::Constant *CreateRuntimeFunction(llvm::FunctionType *Ty,
+ StringRef Name,
+ llvm::AttributeSet ExtraAttrs =
+ llvm::AttributeSet());
+ /// Create a new compiler builtin function with the specified type and name.
+ llvm::Constant *CreateBuiltinFunction(llvm::FunctionType *Ty,
+ StringRef Name,
+ llvm::AttributeSet ExtraAttrs =
+ llvm::AttributeSet());
+ /// Create a new runtime global variable with the specified type and name.
+ llvm::Constant *CreateRuntimeVariable(llvm::Type *Ty,
+ StringRef Name);
+
+ ///@name Custom Blocks Runtime Interfaces
+ ///@{
+
+ llvm::Constant *getNSConcreteGlobalBlock();
+ llvm::Constant *getNSConcreteStackBlock();
+ llvm::Constant *getBlockObjectAssign();
+ llvm::Constant *getBlockObjectDispose();
+
+ ///@}
+
+ llvm::Constant *getLLVMLifetimeStartFn();
+ llvm::Constant *getLLVMLifetimeEndFn();
+
+ // Make sure that this type is translated.
+ void UpdateCompletedType(const TagDecl *TD);
+
+ llvm::Constant *getMemberPointerConstant(const UnaryOperator *e);
+
+ /// Try to emit the initializer for the given declaration as a constant;
+ /// returns 0 if the expression cannot be emitted as a constant.
+ llvm::Constant *EmitConstantInit(const VarDecl &D,
+ CodeGenFunction *CGF = nullptr);
+
+ /// Try to emit the given expression as a constant; returns 0 if the
+ /// expression cannot be emitted as a constant.
+ llvm::Constant *EmitConstantExpr(const Expr *E, QualType DestType,
+ CodeGenFunction *CGF = nullptr);
+
+ /// Emit the given constant value as a constant, in the type's scalar
+ /// representation.
+ llvm::Constant *EmitConstantValue(const APValue &Value, QualType DestType,
+ CodeGenFunction *CGF = nullptr);
+
+ /// Emit the given constant value as a constant, in the type's memory
+ /// representation.
+ llvm::Constant *EmitConstantValueForMemory(const APValue &Value,
+ QualType DestType,
+ CodeGenFunction *CGF = nullptr);
+
+ /// \brief Emit type info if type of an expression is a variably modified
+ /// type. Also emit proper debug info for cast types.
+ void EmitExplicitCastExprType(const ExplicitCastExpr *E,
+ CodeGenFunction *CGF = nullptr);
+
+ /// Return the result of value-initializing the given type, i.e. a null
+ /// expression of the given type. This is usually, but not always, an LLVM
+ /// null constant.
+ llvm::Constant *EmitNullConstant(QualType T);
+
+ /// Return a null constant appropriate for zero-initializing a base class with
+ /// the given type. This is usually, but not always, an LLVM null constant.
+ llvm::Constant *EmitNullConstantForBase(const CXXRecordDecl *Record);
+
+ /// Emit a general error that something can't be done.
+ void Error(SourceLocation loc, StringRef error);
+
+ /// Print out an error that codegen doesn't support the specified stmt yet.
+ void ErrorUnsupported(const Stmt *S, const char *Type);
+
+ /// Print out an error that codegen doesn't support the specified decl yet.
+ void ErrorUnsupported(const Decl *D, const char *Type);
+
+ /// Set the attributes on the LLVM function for the given decl and function
+ /// info. This applies attributes necessary for handling the ABI as well as
+ /// user specified attributes like section.
+ void SetInternalFunctionAttributes(const Decl *D, llvm::Function *F,
+ const CGFunctionInfo &FI);
+
+ /// Set the LLVM function attributes (sext, zext, etc).
+ void SetLLVMFunctionAttributes(const Decl *D,
+ const CGFunctionInfo &Info,
+ llvm::Function *F);
+
+ /// Set the LLVM function attributes which only apply to a function
+ /// definition.
+ void SetLLVMFunctionAttributesForDefinition(const Decl *D, llvm::Function *F);
+
+ /// Return true iff the given type uses 'sret' when used as a return type.
+ bool ReturnTypeUsesSRet(const CGFunctionInfo &FI);
+
+ /// Return true iff the given type uses an argument slot when 'sret' is used
+ /// as a return type.
+ bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI);
+
+ /// Return true iff the given type uses 'fpret' when used as a return type.
+ bool ReturnTypeUsesFPRet(QualType ResultType);
+
+ /// Return true iff the given type uses 'fp2ret' when used as a return type.
+ bool ReturnTypeUsesFP2Ret(QualType ResultType);
+
+ /// Get the LLVM attributes and calling convention to use for a particular
+ /// function type.
+ ///
+ /// \param Name - The function name.
+ /// \param Info - The function type information.
+ /// \param CalleeInfo - The callee information these attributes are being
+ /// constructed for. If valid, the attributes applied to this decl may
+ /// contribute to the function attributes and calling convention.
+ /// \param PAL [out] - On return, the attribute list to use.
+ /// \param CallingConv [out] - On return, the LLVM calling convention to use.
+ void ConstructAttributeList(StringRef Name, const CGFunctionInfo &Info,
+ CGCalleeInfo CalleeInfo, AttributeListType &PAL,
+ unsigned &CallingConv, bool AttrOnCallSite);
+
+ // Fills in the supplied string map with the set of target features for the
+ // passed in function.
+ void getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
+ const FunctionDecl *FD);
+
+ StringRef getMangledName(GlobalDecl GD);
+ StringRef getBlockMangledName(GlobalDecl GD, const BlockDecl *BD);
+
+ void EmitTentativeDefinition(const VarDecl *D);
+
+ void EmitVTable(CXXRecordDecl *Class);
+
+ /// \brief Appends Opts to the "Linker Options" metadata value.
+ void AppendLinkerOptions(StringRef Opts);
+
+ /// \brief Appends a detect mismatch command to the linker options.
+ void AddDetectMismatch(StringRef Name, StringRef Value);
+
+ /// \brief Appends a dependent lib to the "Linker Options" metadata value.
+ void AddDependentLib(StringRef Lib);
+
+ llvm::GlobalVariable::LinkageTypes getFunctionLinkage(GlobalDecl GD);
+
+ void setFunctionLinkage(GlobalDecl GD, llvm::Function *F) {
+ F->setLinkage(getFunctionLinkage(GD));
+ }
+
+ /// Set the DLL storage class on F.
+ void setFunctionDLLStorageClass(GlobalDecl GD, llvm::Function *F);
+
+ /// Return the appropriate linkage for the vtable, VTT, and type information
+ /// of the given class.
+ llvm::GlobalVariable::LinkageTypes getVTableLinkage(const CXXRecordDecl *RD);
+
+ /// Return the store size, in character units, of the given LLVM type.
+ CharUnits GetTargetTypeStoreSize(llvm::Type *Ty) const;
+
+ /// Returns LLVM linkage for a declarator.
+ llvm::GlobalValue::LinkageTypes
+ getLLVMLinkageForDeclarator(const DeclaratorDecl *D, GVALinkage Linkage,
+ bool IsConstantVariable);
+
+ /// Returns LLVM linkage for a declarator.
+ llvm::GlobalValue::LinkageTypes
+ getLLVMLinkageVarDefinition(const VarDecl *VD, bool IsConstant);
+
+ /// Emit all the global annotations.
+ void EmitGlobalAnnotations();
+
+ /// Emit an annotation string.
+ llvm::Constant *EmitAnnotationString(StringRef Str);
+
+ /// Emit the annotation's translation unit.
+ llvm::Constant *EmitAnnotationUnit(SourceLocation Loc);
+
+ /// Emit the annotation line number.
+ llvm::Constant *EmitAnnotationLineNo(SourceLocation L);
+
+ /// Generate the llvm::ConstantStruct which contains the annotation
+ /// information for a given GlobalValue. The annotation struct is
+ /// {i8 *, i8 *, i8 *, i32}. The first field is a constant expression, the
+ /// GlobalValue being annotated. The second field is the constant string
+ /// created from the AnnotateAttr's annotation. The third field is a constant
+ /// string containing the name of the translation unit. The fourth field is
+ /// the line number in the file of the annotated value declaration.
+ llvm::Constant *EmitAnnotateAttr(llvm::GlobalValue *GV,
+ const AnnotateAttr *AA,
+ SourceLocation L);
+
+ /// Add global annotations that are set on D, for the global GV. Those
+ /// annotations are emitted during finalization of the LLVM code.
+ void AddGlobalAnnotations(const ValueDecl *D, llvm::GlobalValue *GV);
+
+ bool isInSanitizerBlacklist(llvm::Function *Fn, SourceLocation Loc) const;
+
+ bool isInSanitizerBlacklist(llvm::GlobalVariable *GV, SourceLocation Loc,
+ QualType Ty,
+ StringRef Category = StringRef()) const;
+
+ SanitizerMetadata *getSanitizerMetadata() {
+ return SanitizerMD.get();
+ }
+
+ void addDeferredVTable(const CXXRecordDecl *RD) {
+ DeferredVTables.push_back(RD);
+ }
+
+ /// Emit code for a singal global function or var decl. Forward declarations
+ /// are emitted lazily.
+ void EmitGlobal(GlobalDecl D);
+
+ bool TryEmitDefinitionAsAlias(GlobalDecl Alias, GlobalDecl Target,
+ bool InEveryTU);
+ bool TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D);
+
+ /// Set attributes for a global definition.
+ void setFunctionDefinitionAttributes(const FunctionDecl *D,
+ llvm::Function *F);
+
+ llvm::GlobalValue *GetGlobalValue(StringRef Ref);
+
+ /// Set attributes which are common to any form of a global definition (alias,
+ /// Objective-C method, function, global variable).
+ ///
+ /// NOTE: This should only be called for definitions.
+ void SetCommonAttributes(const Decl *D, llvm::GlobalValue *GV);
+
+ /// Set attributes which must be preserved by an alias. This includes common
+ /// attributes (i.e. it includes a call to SetCommonAttributes).
+ ///
+ /// NOTE: This should only be called for definitions.
+ void setAliasAttributes(const Decl *D, llvm::GlobalValue *GV);
+
+ void addReplacement(StringRef Name, llvm::Constant *C);
+
+ void addGlobalValReplacement(llvm::GlobalValue *GV, llvm::Constant *C);
+
+ /// \brief Emit a code for threadprivate directive.
+ /// \param D Threadprivate declaration.
+ void EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D);
+
+ /// Returns whether the given record is blacklisted from control flow
+ /// integrity checks.
+ bool IsCFIBlacklistedRecord(const CXXRecordDecl *RD);
+
+ /// Emit bit set entries for the given vtable using the given layout if
+ /// vptr CFI is enabled.
+ void EmitVTableBitSetEntries(llvm::GlobalVariable *VTable,
+ const VTableLayout &VTLayout);
+
+ /// Generate a cross-DSO type identifier for type.
+ llvm::ConstantInt *CreateCfiIdForTypeMetadata(llvm::Metadata *MD);
+
+ /// Create a metadata identifier for the given type. This may either be an
+ /// MDString (for external identifiers) or a distinct unnamed MDNode (for
+ /// internal identifiers).
+ llvm::Metadata *CreateMetadataIdentifierForType(QualType T);
+
+ /// Create a bitset entry for the given function and add it to BitsetsMD.
+ void CreateFunctionBitSetEntry(const FunctionDecl *FD, llvm::Function *F);
+
+ /// Create a bitset entry for the given vtable and add it to BitsetsMD.
+ void CreateVTableBitSetEntry(llvm::NamedMDNode *BitsetsMD,
+ llvm::GlobalVariable *VTable, CharUnits Offset,
+ const CXXRecordDecl *RD);
+
+ /// \breif Get the declaration of std::terminate for the platform.
+ llvm::Constant *getTerminateFn();
+
+private:
+ llvm::Constant *
+ GetOrCreateLLVMFunction(StringRef MangledName, llvm::Type *Ty, GlobalDecl D,
+ bool ForVTable, bool DontDefer = false,
+ bool IsThunk = false,
+ llvm::AttributeSet ExtraAttrs = llvm::AttributeSet(),
+ bool IsForDefinition = false);
+
+ llvm::Constant *GetOrCreateLLVMGlobal(StringRef MangledName,
+ llvm::PointerType *PTy,
+ const VarDecl *D);
+
+ void setNonAliasAttributes(const Decl *D, llvm::GlobalObject *GO);
+
+ /// Set function attributes for a function declaration.
+ void SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
+ bool IsIncompleteFunction, bool IsThunk);
+
+ void EmitGlobalDefinition(GlobalDecl D, llvm::GlobalValue *GV = nullptr);
+
+ void EmitGlobalFunctionDefinition(GlobalDecl GD, llvm::GlobalValue *GV);
+ void EmitGlobalVarDefinition(const VarDecl *D);
+ void EmitAliasDefinition(GlobalDecl GD);
+ void EmitObjCPropertyImplementations(const ObjCImplementationDecl *D);
+ void EmitObjCIvarInitializations(ObjCImplementationDecl *D);
+
+ // C++ related functions.
+
+ void EmitNamespace(const NamespaceDecl *D);
+ void EmitLinkageSpec(const LinkageSpecDecl *D);
+ void CompleteDIClassType(const CXXMethodDecl* D);
+
+ /// \brief Emit the function that initializes C++ thread_local variables.
+ void EmitCXXThreadLocalInitFunc();
+
+ /// Emit the function that initializes C++ globals.
+ void EmitCXXGlobalInitFunc();
+
+ /// Emit the function that destroys C++ globals.
+ void EmitCXXGlobalDtorFunc();
+
+ /// Emit the function that initializes the specified global (if PerformInit is
+ /// true) and registers its destructor.
+ void EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
+ llvm::GlobalVariable *Addr,
+ bool PerformInit);
+
+ void EmitPointerToInitFunc(const VarDecl *VD, llvm::GlobalVariable *Addr,
+ llvm::Function *InitFunc, InitSegAttr *ISA);
+
+ // FIXME: Hardcoding priority here is gross.
+ void AddGlobalCtor(llvm::Function *Ctor, int Priority = 65535,
+ llvm::Constant *AssociatedData = nullptr);
+ void AddGlobalDtor(llvm::Function *Dtor, int Priority = 65535);
+
+ /// Generates a global array of functions and priorities using the given list
+ /// and name. This array will have appending linkage and is suitable for use
+ /// as a LLVM constructor or destructor array.
+ void EmitCtorList(const CtorList &Fns, const char *GlobalName);
+
+ /// Emit any needed decls for which code generation was deferred.
+ void EmitDeferred();
+
+ /// Call replaceAllUsesWith on all pairs in Replacements.
+ void applyReplacements();
+
+ /// Call replaceAllUsesWith on all pairs in GlobalValReplacements.
+ void applyGlobalValReplacements();
+
+ void checkAliases();
+
+ /// Emit any vtables which we deferred and still have a use for.
+ void EmitDeferredVTables();
+
+ /// Emit the llvm.used and llvm.compiler.used metadata.
+ void emitLLVMUsed();
+
+ /// \brief Emit the link options introduced by imported modules.
+ void EmitModuleLinkOptions();
+
+ /// \brief Emit aliases for internal-linkage declarations inside "C" language
+ /// linkage specifications, giving them the "expected" name where possible.
+ void EmitStaticExternCAliases();
+
+ void EmitDeclMetadata();
+
+ /// \brief Emit the Clang version as llvm.ident metadata.
+ void EmitVersionIdentMetadata();
+
+ /// Emits target specific Metadata for global declarations.
+ void EmitTargetMetadata();
+
+ /// Emit the llvm.gcov metadata used to tell LLVM where to emit the .gcno and
+ /// .gcda files in a way that persists in .bc files.
+ void EmitCoverageFile();
+
+ /// Emits the initializer for a uuidof string.
+ llvm::Constant *EmitUuidofInitializer(StringRef uuidstr);
+
+ /// Determine whether the definition must be emitted; if this returns \c
+ /// false, the definition can be emitted lazily if it's used.
+ bool MustBeEmitted(const ValueDecl *D);
+
+ /// Determine whether the definition can be emitted eagerly, or should be
+ /// delayed until the end of the translation unit. This is relevant for
+ /// definitions whose linkage can change, e.g. implicit function instantions
+ /// which may later be explicitly instantiated.
+ bool MayBeEmittedEagerly(const ValueDecl *D);
+
+ /// Check whether we can use a "simpler", more core exceptions personality
+ /// function.
+ void SimplifyPersonality();
+};
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_CODEGEN_CODEGENMODULE_H
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenPGO.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenPGO.cpp
new file mode 100644
index 0000000..5ae861e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenPGO.cpp
@@ -0,0 +1,831 @@
+//===--- CodeGenPGO.cpp - PGO Instrumentation for LLVM CodeGen --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Instrumentation-based profile-guided optimization
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenPGO.h"
+#include "CodeGenFunction.h"
+#include "CoverageMappingGen.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/MDBuilder.h"
+#include "llvm/ProfileData/InstrProfReader.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MD5.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+void CodeGenPGO::setFuncName(StringRef Name,
+ llvm::GlobalValue::LinkageTypes Linkage) {
+ llvm::IndexedInstrProfReader *PGOReader = CGM.getPGOReader();
+ FuncName = llvm::getPGOFuncName(
+ Name, Linkage, CGM.getCodeGenOpts().MainFileName,
+ PGOReader ? PGOReader->getVersion() : llvm::IndexedInstrProf::Version);
+
+ // If we're generating a profile, create a variable for the name.
+ if (CGM.getCodeGenOpts().ProfileInstrGenerate)
+ FuncNameVar = llvm::createPGOFuncNameVar(CGM.getModule(), Linkage, FuncName);
+}
+
+void CodeGenPGO::setFuncName(llvm::Function *Fn) {
+ setFuncName(Fn->getName(), Fn->getLinkage());
+}
+
+namespace {
+/// \brief Stable hasher for PGO region counters.
+///
+/// PGOHash produces a stable hash of a given function's control flow.
+///
+/// Changing the output of this hash will invalidate all previously generated
+/// profiles -- i.e., don't do it.
+///
+/// \note When this hash does eventually change (years?), we still need to
+/// support old hashes. We'll need to pull in the version number from the
+/// profile data format and use the matching hash function.
+class PGOHash {
+ uint64_t Working;
+ unsigned Count;
+ llvm::MD5 MD5;
+
+ static const int NumBitsPerType = 6;
+ static const unsigned NumTypesPerWord = sizeof(uint64_t) * 8 / NumBitsPerType;
+ static const unsigned TooBig = 1u << NumBitsPerType;
+
+public:
+ /// \brief Hash values for AST nodes.
+ ///
+ /// Distinct values for AST nodes that have region counters attached.
+ ///
+ /// These values must be stable. All new members must be added at the end,
+ /// and no members should be removed. Changing the enumeration value for an
+ /// AST node will affect the hash of every function that contains that node.
+ enum HashType : unsigned char {
+ None = 0,
+ LabelStmt = 1,
+ WhileStmt,
+ DoStmt,
+ ForStmt,
+ CXXForRangeStmt,
+ ObjCForCollectionStmt,
+ SwitchStmt,
+ CaseStmt,
+ DefaultStmt,
+ IfStmt,
+ CXXTryStmt,
+ CXXCatchStmt,
+ ConditionalOperator,
+ BinaryOperatorLAnd,
+ BinaryOperatorLOr,
+ BinaryConditionalOperator,
+
+ // Keep this last. It's for the static assert that follows.
+ LastHashType
+ };
+ static_assert(LastHashType <= TooBig, "Too many types in HashType");
+
+ // TODO: When this format changes, take in a version number here, and use the
+ // old hash calculation for file formats that used the old hash.
+ PGOHash() : Working(0), Count(0) {}
+ void combine(HashType Type);
+ uint64_t finalize();
+};
+const int PGOHash::NumBitsPerType;
+const unsigned PGOHash::NumTypesPerWord;
+const unsigned PGOHash::TooBig;
+
+/// A RecursiveASTVisitor that fills a map of statements to PGO counters.
+struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> {
+ /// The next counter value to assign.
+ unsigned NextCounter;
+ /// The function hash.
+ PGOHash Hash;
+ /// The map of statements to counters.
+ llvm::DenseMap<const Stmt *, unsigned> &CounterMap;
+
+ MapRegionCounters(llvm::DenseMap<const Stmt *, unsigned> &CounterMap)
+ : NextCounter(0), CounterMap(CounterMap) {}
+
+ // Blocks and lambdas are handled as separate functions, so we need not
+ // traverse them in the parent context.
+ bool TraverseBlockExpr(BlockExpr *BE) { return true; }
+ bool TraverseLambdaBody(LambdaExpr *LE) { return true; }
+ bool TraverseCapturedStmt(CapturedStmt *CS) { return true; }
+
+ bool VisitDecl(const Decl *D) {
+ switch (D->getKind()) {
+ default:
+ break;
+ case Decl::Function:
+ case Decl::CXXMethod:
+ case Decl::CXXConstructor:
+ case Decl::CXXDestructor:
+ case Decl::CXXConversion:
+ case Decl::ObjCMethod:
+ case Decl::Block:
+ case Decl::Captured:
+ CounterMap[D->getBody()] = NextCounter++;
+ break;
+ }
+ return true;
+ }
+
+ bool VisitStmt(const Stmt *S) {
+ auto Type = getHashType(S);
+ if (Type == PGOHash::None)
+ return true;
+
+ CounterMap[S] = NextCounter++;
+ Hash.combine(Type);
+ return true;
+ }
+ PGOHash::HashType getHashType(const Stmt *S) {
+ switch (S->getStmtClass()) {
+ default:
+ break;
+ case Stmt::LabelStmtClass:
+ return PGOHash::LabelStmt;
+ case Stmt::WhileStmtClass:
+ return PGOHash::WhileStmt;
+ case Stmt::DoStmtClass:
+ return PGOHash::DoStmt;
+ case Stmt::ForStmtClass:
+ return PGOHash::ForStmt;
+ case Stmt::CXXForRangeStmtClass:
+ return PGOHash::CXXForRangeStmt;
+ case Stmt::ObjCForCollectionStmtClass:
+ return PGOHash::ObjCForCollectionStmt;
+ case Stmt::SwitchStmtClass:
+ return PGOHash::SwitchStmt;
+ case Stmt::CaseStmtClass:
+ return PGOHash::CaseStmt;
+ case Stmt::DefaultStmtClass:
+ return PGOHash::DefaultStmt;
+ case Stmt::IfStmtClass:
+ return PGOHash::IfStmt;
+ case Stmt::CXXTryStmtClass:
+ return PGOHash::CXXTryStmt;
+ case Stmt::CXXCatchStmtClass:
+ return PGOHash::CXXCatchStmt;
+ case Stmt::ConditionalOperatorClass:
+ return PGOHash::ConditionalOperator;
+ case Stmt::BinaryConditionalOperatorClass:
+ return PGOHash::BinaryConditionalOperator;
+ case Stmt::BinaryOperatorClass: {
+ const BinaryOperator *BO = cast<BinaryOperator>(S);
+ if (BO->getOpcode() == BO_LAnd)
+ return PGOHash::BinaryOperatorLAnd;
+ if (BO->getOpcode() == BO_LOr)
+ return PGOHash::BinaryOperatorLOr;
+ break;
+ }
+ }
+ return PGOHash::None;
+ }
+};
+
+/// A StmtVisitor that propagates the raw counts through the AST and
+/// records the count at statements where the value may change.
+struct ComputeRegionCounts : public ConstStmtVisitor<ComputeRegionCounts> {
+ /// PGO state.
+ CodeGenPGO &PGO;
+
+ /// A flag that is set when the current count should be recorded on the
+ /// next statement, such as at the exit of a loop.
+ bool RecordNextStmtCount;
+
+ /// The count at the current location in the traversal.
+ uint64_t CurrentCount;
+
+ /// The map of statements to count values.
+ llvm::DenseMap<const Stmt *, uint64_t> &CountMap;
+
+ /// BreakContinueStack - Keep counts of breaks and continues inside loops.
+ struct BreakContinue {
+ uint64_t BreakCount;
+ uint64_t ContinueCount;
+ BreakContinue() : BreakCount(0), ContinueCount(0) {}
+ };
+ SmallVector<BreakContinue, 8> BreakContinueStack;
+
+ ComputeRegionCounts(llvm::DenseMap<const Stmt *, uint64_t> &CountMap,
+ CodeGenPGO &PGO)
+ : PGO(PGO), RecordNextStmtCount(false), CountMap(CountMap) {}
+
+ void RecordStmtCount(const Stmt *S) {
+ if (RecordNextStmtCount) {
+ CountMap[S] = CurrentCount;
+ RecordNextStmtCount = false;
+ }
+ }
+
+ /// Set and return the current count.
+ uint64_t setCount(uint64_t Count) {
+ CurrentCount = Count;
+ return Count;
+ }
+
+ void VisitStmt(const Stmt *S) {
+ RecordStmtCount(S);
+ for (const Stmt *Child : S->children())
+ if (Child)
+ this->Visit(Child);
+ }
+
+ void VisitFunctionDecl(const FunctionDecl *D) {
+ // Counter tracks entry to the function body.
+ uint64_t BodyCount = setCount(PGO.getRegionCount(D->getBody()));
+ CountMap[D->getBody()] = BodyCount;
+ Visit(D->getBody());
+ }
+
+ // Skip lambda expressions. We visit these as FunctionDecls when we're
+ // generating them and aren't interested in the body when generating a
+ // parent context.
+ void VisitLambdaExpr(const LambdaExpr *LE) {}
+
+ void VisitCapturedDecl(const CapturedDecl *D) {
+ // Counter tracks entry to the capture body.
+ uint64_t BodyCount = setCount(PGO.getRegionCount(D->getBody()));
+ CountMap[D->getBody()] = BodyCount;
+ Visit(D->getBody());
+ }
+
+ void VisitObjCMethodDecl(const ObjCMethodDecl *D) {
+ // Counter tracks entry to the method body.
+ uint64_t BodyCount = setCount(PGO.getRegionCount(D->getBody()));
+ CountMap[D->getBody()] = BodyCount;
+ Visit(D->getBody());
+ }
+
+ void VisitBlockDecl(const BlockDecl *D) {
+ // Counter tracks entry to the block body.
+ uint64_t BodyCount = setCount(PGO.getRegionCount(D->getBody()));
+ CountMap[D->getBody()] = BodyCount;
+ Visit(D->getBody());
+ }
+
+ void VisitReturnStmt(const ReturnStmt *S) {
+ RecordStmtCount(S);
+ if (S->getRetValue())
+ Visit(S->getRetValue());
+ CurrentCount = 0;
+ RecordNextStmtCount = true;
+ }
+
+ void VisitCXXThrowExpr(const CXXThrowExpr *E) {
+ RecordStmtCount(E);
+ if (E->getSubExpr())
+ Visit(E->getSubExpr());
+ CurrentCount = 0;
+ RecordNextStmtCount = true;
+ }
+
+ void VisitGotoStmt(const GotoStmt *S) {
+ RecordStmtCount(S);
+ CurrentCount = 0;
+ RecordNextStmtCount = true;
+ }
+
+ void VisitLabelStmt(const LabelStmt *S) {
+ RecordNextStmtCount = false;
+ // Counter tracks the block following the label.
+ uint64_t BlockCount = setCount(PGO.getRegionCount(S));
+ CountMap[S] = BlockCount;
+ Visit(S->getSubStmt());
+ }
+
+ void VisitBreakStmt(const BreakStmt *S) {
+ RecordStmtCount(S);
+ assert(!BreakContinueStack.empty() && "break not in a loop or switch!");
+ BreakContinueStack.back().BreakCount += CurrentCount;
+ CurrentCount = 0;
+ RecordNextStmtCount = true;
+ }
+
+ void VisitContinueStmt(const ContinueStmt *S) {
+ RecordStmtCount(S);
+ assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
+ BreakContinueStack.back().ContinueCount += CurrentCount;
+ CurrentCount = 0;
+ RecordNextStmtCount = true;
+ }
+
+ void VisitWhileStmt(const WhileStmt *S) {
+ RecordStmtCount(S);
+ uint64_t ParentCount = CurrentCount;
+
+ BreakContinueStack.push_back(BreakContinue());
+ // Visit the body region first so the break/continue adjustments can be
+ // included when visiting the condition.
+ uint64_t BodyCount = setCount(PGO.getRegionCount(S));
+ CountMap[S->getBody()] = CurrentCount;
+ Visit(S->getBody());
+ uint64_t BackedgeCount = CurrentCount;
+
+ // ...then go back and propagate counts through the condition. The count
+ // at the start of the condition is the sum of the incoming edges,
+ // the backedge from the end of the loop body, and the edges from
+ // continue statements.
+ BreakContinue BC = BreakContinueStack.pop_back_val();
+ uint64_t CondCount =
+ setCount(ParentCount + BackedgeCount + BC.ContinueCount);
+ CountMap[S->getCond()] = CondCount;
+ Visit(S->getCond());
+ setCount(BC.BreakCount + CondCount - BodyCount);
+ RecordNextStmtCount = true;
+ }
+
+ void VisitDoStmt(const DoStmt *S) {
+ RecordStmtCount(S);
+ uint64_t LoopCount = PGO.getRegionCount(S);
+
+ BreakContinueStack.push_back(BreakContinue());
+ // The count doesn't include the fallthrough from the parent scope. Add it.
+ uint64_t BodyCount = setCount(LoopCount + CurrentCount);
+ CountMap[S->getBody()] = BodyCount;
+ Visit(S->getBody());
+ uint64_t BackedgeCount = CurrentCount;
+
+ BreakContinue BC = BreakContinueStack.pop_back_val();
+ // The count at the start of the condition is equal to the count at the
+ // end of the body, plus any continues.
+ uint64_t CondCount = setCount(BackedgeCount + BC.ContinueCount);
+ CountMap[S->getCond()] = CondCount;
+ Visit(S->getCond());
+ setCount(BC.BreakCount + CondCount - LoopCount);
+ RecordNextStmtCount = true;
+ }
+
+ void VisitForStmt(const ForStmt *S) {
+ RecordStmtCount(S);
+ if (S->getInit())
+ Visit(S->getInit());
+
+ uint64_t ParentCount = CurrentCount;
+
+ BreakContinueStack.push_back(BreakContinue());
+ // Visit the body region first. (This is basically the same as a while
+ // loop; see further comments in VisitWhileStmt.)
+ uint64_t BodyCount = setCount(PGO.getRegionCount(S));
+ CountMap[S->getBody()] = BodyCount;
+ Visit(S->getBody());
+ uint64_t BackedgeCount = CurrentCount;
+ BreakContinue BC = BreakContinueStack.pop_back_val();
+
+ // The increment is essentially part of the body but it needs to include
+ // the count for all the continue statements.
+ if (S->getInc()) {
+ uint64_t IncCount = setCount(BackedgeCount + BC.ContinueCount);
+ CountMap[S->getInc()] = IncCount;
+ Visit(S->getInc());
+ }
+
+ // ...then go back and propagate counts through the condition.
+ uint64_t CondCount =
+ setCount(ParentCount + BackedgeCount + BC.ContinueCount);
+ if (S->getCond()) {
+ CountMap[S->getCond()] = CondCount;
+ Visit(S->getCond());
+ }
+ setCount(BC.BreakCount + CondCount - BodyCount);
+ RecordNextStmtCount = true;
+ }
+
+ void VisitCXXForRangeStmt(const CXXForRangeStmt *S) {
+ RecordStmtCount(S);
+ Visit(S->getLoopVarStmt());
+ Visit(S->getRangeStmt());
+ Visit(S->getBeginEndStmt());
+
+ uint64_t ParentCount = CurrentCount;
+ BreakContinueStack.push_back(BreakContinue());
+ // Visit the body region first. (This is basically the same as a while
+ // loop; see further comments in VisitWhileStmt.)
+ uint64_t BodyCount = setCount(PGO.getRegionCount(S));
+ CountMap[S->getBody()] = BodyCount;
+ Visit(S->getBody());
+ uint64_t BackedgeCount = CurrentCount;
+ BreakContinue BC = BreakContinueStack.pop_back_val();
+
+ // The increment is essentially part of the body but it needs to include
+ // the count for all the continue statements.
+ uint64_t IncCount = setCount(BackedgeCount + BC.ContinueCount);
+ CountMap[S->getInc()] = IncCount;
+ Visit(S->getInc());
+
+ // ...then go back and propagate counts through the condition.
+ uint64_t CondCount =
+ setCount(ParentCount + BackedgeCount + BC.ContinueCount);
+ CountMap[S->getCond()] = CondCount;
+ Visit(S->getCond());
+ setCount(BC.BreakCount + CondCount - BodyCount);
+ RecordNextStmtCount = true;
+ }
+
+ void VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S) {
+ RecordStmtCount(S);
+ Visit(S->getElement());
+ uint64_t ParentCount = CurrentCount;
+ BreakContinueStack.push_back(BreakContinue());
+ // Counter tracks the body of the loop.
+ uint64_t BodyCount = setCount(PGO.getRegionCount(S));
+ CountMap[S->getBody()] = BodyCount;
+ Visit(S->getBody());
+ uint64_t BackedgeCount = CurrentCount;
+ BreakContinue BC = BreakContinueStack.pop_back_val();
+
+ setCount(BC.BreakCount + ParentCount + BackedgeCount + BC.ContinueCount -
+ BodyCount);
+ RecordNextStmtCount = true;
+ }
+
+ void VisitSwitchStmt(const SwitchStmt *S) {
+ RecordStmtCount(S);
+ Visit(S->getCond());
+ CurrentCount = 0;
+ BreakContinueStack.push_back(BreakContinue());
+ Visit(S->getBody());
+ // If the switch is inside a loop, add the continue counts.
+ BreakContinue BC = BreakContinueStack.pop_back_val();
+ if (!BreakContinueStack.empty())
+ BreakContinueStack.back().ContinueCount += BC.ContinueCount;
+ // Counter tracks the exit block of the switch.
+ setCount(PGO.getRegionCount(S));
+ RecordNextStmtCount = true;
+ }
+
+ void VisitSwitchCase(const SwitchCase *S) {
+ RecordNextStmtCount = false;
+ // Counter for this particular case. This counts only jumps from the
+ // switch header and does not include fallthrough from the case before
+ // this one.
+ uint64_t CaseCount = PGO.getRegionCount(S);
+ setCount(CurrentCount + CaseCount);
+ // We need the count without fallthrough in the mapping, so it's more useful
+ // for branch probabilities.
+ CountMap[S] = CaseCount;
+ RecordNextStmtCount = true;
+ Visit(S->getSubStmt());
+ }
+
+ void VisitIfStmt(const IfStmt *S) {
+ RecordStmtCount(S);
+ uint64_t ParentCount = CurrentCount;
+ Visit(S->getCond());
+
+ // Counter tracks the "then" part of an if statement. The count for
+ // the "else" part, if it exists, will be calculated from this counter.
+ uint64_t ThenCount = setCount(PGO.getRegionCount(S));
+ CountMap[S->getThen()] = ThenCount;
+ Visit(S->getThen());
+ uint64_t OutCount = CurrentCount;
+
+ uint64_t ElseCount = ParentCount - ThenCount;
+ if (S->getElse()) {
+ setCount(ElseCount);
+ CountMap[S->getElse()] = ElseCount;
+ Visit(S->getElse());
+ OutCount += CurrentCount;
+ } else
+ OutCount += ElseCount;
+ setCount(OutCount);
+ RecordNextStmtCount = true;
+ }
+
+ void VisitCXXTryStmt(const CXXTryStmt *S) {
+ RecordStmtCount(S);
+ Visit(S->getTryBlock());
+ for (unsigned I = 0, E = S->getNumHandlers(); I < E; ++I)
+ Visit(S->getHandler(I));
+ // Counter tracks the continuation block of the try statement.
+ setCount(PGO.getRegionCount(S));
+ RecordNextStmtCount = true;
+ }
+
+ void VisitCXXCatchStmt(const CXXCatchStmt *S) {
+ RecordNextStmtCount = false;
+ // Counter tracks the catch statement's handler block.
+ uint64_t CatchCount = setCount(PGO.getRegionCount(S));
+ CountMap[S] = CatchCount;
+ Visit(S->getHandlerBlock());
+ }
+
+ void VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
+ RecordStmtCount(E);
+ uint64_t ParentCount = CurrentCount;
+ Visit(E->getCond());
+
+ // Counter tracks the "true" part of a conditional operator. The
+ // count in the "false" part will be calculated from this counter.
+ uint64_t TrueCount = setCount(PGO.getRegionCount(E));
+ CountMap[E->getTrueExpr()] = TrueCount;
+ Visit(E->getTrueExpr());
+ uint64_t OutCount = CurrentCount;
+
+ uint64_t FalseCount = setCount(ParentCount - TrueCount);
+ CountMap[E->getFalseExpr()] = FalseCount;
+ Visit(E->getFalseExpr());
+ OutCount += CurrentCount;
+
+ setCount(OutCount);
+ RecordNextStmtCount = true;
+ }
+
+ void VisitBinLAnd(const BinaryOperator *E) {
+ RecordStmtCount(E);
+ uint64_t ParentCount = CurrentCount;
+ Visit(E->getLHS());
+ // Counter tracks the right hand side of a logical and operator.
+ uint64_t RHSCount = setCount(PGO.getRegionCount(E));
+ CountMap[E->getRHS()] = RHSCount;
+ Visit(E->getRHS());
+ setCount(ParentCount + RHSCount - CurrentCount);
+ RecordNextStmtCount = true;
+ }
+
+ void VisitBinLOr(const BinaryOperator *E) {
+ RecordStmtCount(E);
+ uint64_t ParentCount = CurrentCount;
+ Visit(E->getLHS());
+ // Counter tracks the right hand side of a logical or operator.
+ uint64_t RHSCount = setCount(PGO.getRegionCount(E));
+ CountMap[E->getRHS()] = RHSCount;
+ Visit(E->getRHS());
+ setCount(ParentCount + RHSCount - CurrentCount);
+ RecordNextStmtCount = true;
+ }
+};
+} // end anonymous namespace
+
+void PGOHash::combine(HashType Type) {
+ // Check that we never combine 0 and only have six bits.
+ assert(Type && "Hash is invalid: unexpected type 0");
+ assert(unsigned(Type) < TooBig && "Hash is invalid: too many types");
+
+ // Pass through MD5 if enough work has built up.
+ if (Count && Count % NumTypesPerWord == 0) {
+ using namespace llvm::support;
+ uint64_t Swapped = endian::byte_swap<uint64_t, little>(Working);
+ MD5.update(llvm::makeArrayRef((uint8_t *)&Swapped, sizeof(Swapped)));
+ Working = 0;
+ }
+
+ // Accumulate the current type.
+ ++Count;
+ Working = Working << NumBitsPerType | Type;
+}
+
+uint64_t PGOHash::finalize() {
+ // Use Working as the hash directly if we never used MD5.
+ if (Count <= NumTypesPerWord)
+ // No need to byte swap here, since none of the math was endian-dependent.
+ // This number will be byte-swapped as required on endianness transitions,
+ // so we will see the same value on the other side.
+ return Working;
+
+ // Check for remaining work in Working.
+ if (Working)
+ MD5.update(Working);
+
+ // Finalize the MD5 and return the hash.
+ llvm::MD5::MD5Result Result;
+ MD5.final(Result);
+ using namespace llvm::support;
+ return endian::read<uint64_t, little, unaligned>(Result);
+}
+
+void CodeGenPGO::assignRegionCounters(GlobalDecl GD, llvm::Function *Fn) {
+ const Decl *D = GD.getDecl();
+ bool InstrumentRegions = CGM.getCodeGenOpts().ProfileInstrGenerate;
+ llvm::IndexedInstrProfReader *PGOReader = CGM.getPGOReader();
+ if (!InstrumentRegions && !PGOReader)
+ return;
+ if (D->isImplicit())
+ return;
+ // Constructors and destructors may be represented by several functions in IR.
+ // If so, instrument only base variant, others are implemented by delegation
+ // to the base one, it would be counted twice otherwise.
+ if (CGM.getTarget().getCXXABI().hasConstructorVariants() &&
+ ((isa<CXXConstructorDecl>(GD.getDecl()) &&
+ GD.getCtorType() != Ctor_Base) ||
+ (isa<CXXDestructorDecl>(GD.getDecl()) &&
+ GD.getDtorType() != Dtor_Base))) {
+ return;
+ }
+ CGM.ClearUnusedCoverageMapping(D);
+ setFuncName(Fn);
+
+ mapRegionCounters(D);
+ if (CGM.getCodeGenOpts().CoverageMapping)
+ emitCounterRegionMapping(D);
+ if (PGOReader) {
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ loadRegionCounts(PGOReader, SM.isInMainFile(D->getLocation()));
+ computeRegionCounts(D);
+ applyFunctionAttributes(PGOReader, Fn);
+ }
+}
+
+void CodeGenPGO::mapRegionCounters(const Decl *D) {
+ RegionCounterMap.reset(new llvm::DenseMap<const Stmt *, unsigned>);
+ MapRegionCounters Walker(*RegionCounterMap);
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
+ Walker.TraverseDecl(const_cast<FunctionDecl *>(FD));
+ else if (const ObjCMethodDecl *MD = dyn_cast_or_null<ObjCMethodDecl>(D))
+ Walker.TraverseDecl(const_cast<ObjCMethodDecl *>(MD));
+ else if (const BlockDecl *BD = dyn_cast_or_null<BlockDecl>(D))
+ Walker.TraverseDecl(const_cast<BlockDecl *>(BD));
+ else if (const CapturedDecl *CD = dyn_cast_or_null<CapturedDecl>(D))
+ Walker.TraverseDecl(const_cast<CapturedDecl *>(CD));
+ assert(Walker.NextCounter > 0 && "no entry counter mapped for decl");
+ NumRegionCounters = Walker.NextCounter;
+ FunctionHash = Walker.Hash.finalize();
+}
+
+void CodeGenPGO::emitCounterRegionMapping(const Decl *D) {
+ if (SkipCoverageMapping)
+ return;
+ // Don't map the functions inside the system headers
+ auto Loc = D->getBody()->getLocStart();
+ if (CGM.getContext().getSourceManager().isInSystemHeader(Loc))
+ return;
+
+ std::string CoverageMapping;
+ llvm::raw_string_ostream OS(CoverageMapping);
+ CoverageMappingGen MappingGen(*CGM.getCoverageMapping(),
+ CGM.getContext().getSourceManager(),
+ CGM.getLangOpts(), RegionCounterMap.get());
+ MappingGen.emitCounterMapping(D, OS);
+ OS.flush();
+
+ if (CoverageMapping.empty())
+ return;
+
+ CGM.getCoverageMapping()->addFunctionMappingRecord(
+ FuncNameVar, FuncName, FunctionHash, CoverageMapping);
+}
+
+void
+CodeGenPGO::emitEmptyCounterMapping(const Decl *D, StringRef Name,
+ llvm::GlobalValue::LinkageTypes Linkage) {
+ if (SkipCoverageMapping)
+ return;
+ // Don't map the functions inside the system headers
+ auto Loc = D->getBody()->getLocStart();
+ if (CGM.getContext().getSourceManager().isInSystemHeader(Loc))
+ return;
+
+ std::string CoverageMapping;
+ llvm::raw_string_ostream OS(CoverageMapping);
+ CoverageMappingGen MappingGen(*CGM.getCoverageMapping(),
+ CGM.getContext().getSourceManager(),
+ CGM.getLangOpts());
+ MappingGen.emitEmptyMapping(D, OS);
+ OS.flush();
+
+ if (CoverageMapping.empty())
+ return;
+
+ setFuncName(Name, Linkage);
+ CGM.getCoverageMapping()->addFunctionMappingRecord(
+ FuncNameVar, FuncName, FunctionHash, CoverageMapping);
+}
+
+void CodeGenPGO::computeRegionCounts(const Decl *D) {
+ StmtCountMap.reset(new llvm::DenseMap<const Stmt *, uint64_t>);
+ ComputeRegionCounts Walker(*StmtCountMap, *this);
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
+ Walker.VisitFunctionDecl(FD);
+ else if (const ObjCMethodDecl *MD = dyn_cast_or_null<ObjCMethodDecl>(D))
+ Walker.VisitObjCMethodDecl(MD);
+ else if (const BlockDecl *BD = dyn_cast_or_null<BlockDecl>(D))
+ Walker.VisitBlockDecl(BD);
+ else if (const CapturedDecl *CD = dyn_cast_or_null<CapturedDecl>(D))
+ Walker.VisitCapturedDecl(const_cast<CapturedDecl *>(CD));
+}
+
+void
+CodeGenPGO::applyFunctionAttributes(llvm::IndexedInstrProfReader *PGOReader,
+ llvm::Function *Fn) {
+ if (!haveRegionCounts())
+ return;
+
+ uint64_t FunctionCount = getRegionCount(nullptr);
+ Fn->setEntryCount(FunctionCount);
+}
+
+void CodeGenPGO::emitCounterIncrement(CGBuilderTy &Builder, const Stmt *S) {
+ if (!CGM.getCodeGenOpts().ProfileInstrGenerate || !RegionCounterMap)
+ return;
+ if (!Builder.GetInsertBlock())
+ return;
+
+ unsigned Counter = (*RegionCounterMap)[S];
+ auto *I8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::instrprof_increment),
+ {llvm::ConstantExpr::getBitCast(FuncNameVar, I8PtrTy),
+ Builder.getInt64(FunctionHash),
+ Builder.getInt32(NumRegionCounters),
+ Builder.getInt32(Counter)});
+}
+
+void CodeGenPGO::loadRegionCounts(llvm::IndexedInstrProfReader *PGOReader,
+ bool IsInMainFile) {
+ CGM.getPGOStats().addVisited(IsInMainFile);
+ RegionCounts.clear();
+ if (std::error_code EC =
+ PGOReader->getFunctionCounts(FuncName, FunctionHash, RegionCounts)) {
+ if (EC == llvm::instrprof_error::unknown_function)
+ CGM.getPGOStats().addMissing(IsInMainFile);
+ else if (EC == llvm::instrprof_error::hash_mismatch)
+ CGM.getPGOStats().addMismatched(IsInMainFile);
+ else if (EC == llvm::instrprof_error::malformed)
+ // TODO: Consider a more specific warning for this case.
+ CGM.getPGOStats().addMismatched(IsInMainFile);
+ RegionCounts.clear();
+ }
+}
+
+/// \brief Calculate what to divide by to scale weights.
+///
+/// Given the maximum weight, calculate a divisor that will scale all the
+/// weights to strictly less than UINT32_MAX.
+static uint64_t calculateWeightScale(uint64_t MaxWeight) {
+ return MaxWeight < UINT32_MAX ? 1 : MaxWeight / UINT32_MAX + 1;
+}
+
+/// \brief Scale an individual branch weight (and add 1).
+///
+/// Scale a 64-bit weight down to 32-bits using \c Scale.
+///
+/// According to Laplace's Rule of Succession, it is better to compute the
+/// weight based on the count plus 1, so universally add 1 to the value.
+///
+/// \pre \c Scale was calculated by \a calculateWeightScale() with a weight no
+/// greater than \c Weight.
+static uint32_t scaleBranchWeight(uint64_t Weight, uint64_t Scale) {
+ assert(Scale && "scale by 0?");
+ uint64_t Scaled = Weight / Scale + 1;
+ assert(Scaled <= UINT32_MAX && "overflow 32-bits");
+ return Scaled;
+}
+
+llvm::MDNode *CodeGenFunction::createProfileWeights(uint64_t TrueCount,
+ uint64_t FalseCount) {
+ // Check for empty weights.
+ if (!TrueCount && !FalseCount)
+ return nullptr;
+
+ // Calculate how to scale down to 32-bits.
+ uint64_t Scale = calculateWeightScale(std::max(TrueCount, FalseCount));
+
+ llvm::MDBuilder MDHelper(CGM.getLLVMContext());
+ return MDHelper.createBranchWeights(scaleBranchWeight(TrueCount, Scale),
+ scaleBranchWeight(FalseCount, Scale));
+}
+
+llvm::MDNode *
+CodeGenFunction::createProfileWeights(ArrayRef<uint64_t> Weights) {
+ // We need at least two elements to create meaningful weights.
+ if (Weights.size() < 2)
+ return nullptr;
+
+ // Check for empty weights.
+ uint64_t MaxWeight = *std::max_element(Weights.begin(), Weights.end());
+ if (MaxWeight == 0)
+ return nullptr;
+
+ // Calculate how to scale down to 32-bits.
+ uint64_t Scale = calculateWeightScale(MaxWeight);
+
+ SmallVector<uint32_t, 16> ScaledWeights;
+ ScaledWeights.reserve(Weights.size());
+ for (uint64_t W : Weights)
+ ScaledWeights.push_back(scaleBranchWeight(W, Scale));
+
+ llvm::MDBuilder MDHelper(CGM.getLLVMContext());
+ return MDHelper.createBranchWeights(ScaledWeights);
+}
+
+llvm::MDNode *CodeGenFunction::createProfileWeightsForLoop(const Stmt *Cond,
+ uint64_t LoopCount) {
+ if (!PGO.haveRegionCounts())
+ return nullptr;
+ Optional<uint64_t> CondCount = PGO.getStmtCount(Cond);
+ assert(CondCount.hasValue() && "missing expected loop condition count");
+ if (*CondCount == 0)
+ return nullptr;
+ return createProfileWeights(LoopCount,
+ std::max(*CondCount, LoopCount) - LoopCount);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenPGO.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenPGO.h
new file mode 100644
index 0000000..6bf29ec
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenPGO.h
@@ -0,0 +1,117 @@
+//===--- CodeGenPGO.h - PGO Instrumentation for LLVM CodeGen ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Instrumentation-based profile-guided optimization
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENPGO_H
+#define LLVM_CLANG_LIB_CODEGEN_CODEGENPGO_H
+
+#include "CGBuilder.h"
+#include "CodeGenModule.h"
+#include "CodeGenTypes.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <memory>
+
+namespace clang {
+namespace CodeGen {
+
+/// Per-function PGO state.
+class CodeGenPGO {
+private:
+ CodeGenModule &CGM;
+ std::string FuncName;
+ llvm::GlobalVariable *FuncNameVar;
+
+ unsigned NumRegionCounters;
+ uint64_t FunctionHash;
+ std::unique_ptr<llvm::DenseMap<const Stmt *, unsigned>> RegionCounterMap;
+ std::unique_ptr<llvm::DenseMap<const Stmt *, uint64_t>> StmtCountMap;
+ std::vector<uint64_t> RegionCounts;
+ uint64_t CurrentRegionCount;
+ /// \brief A flag that is set to true when this function doesn't need
+ /// to have coverage mapping data.
+ bool SkipCoverageMapping;
+
+public:
+ CodeGenPGO(CodeGenModule &CGM)
+ : CGM(CGM), NumRegionCounters(0), FunctionHash(0), CurrentRegionCount(0),
+ SkipCoverageMapping(false) {}
+
+ /// Whether or not we have PGO region data for the current function. This is
+ /// false both when we have no data at all and when our data has been
+ /// discarded.
+ bool haveRegionCounts() const { return !RegionCounts.empty(); }
+
+ /// Return the counter value of the current region.
+ uint64_t getCurrentRegionCount() const { return CurrentRegionCount; }
+
+ /// Set the counter value for the current region. This is used to keep track
+ /// of changes to the most recent counter from control flow and non-local
+ /// exits.
+ void setCurrentRegionCount(uint64_t Count) { CurrentRegionCount = Count; }
+
+ /// Check if an execution count is known for a given statement. If so, return
+ /// true and put the value in Count; else return false.
+ Optional<uint64_t> getStmtCount(const Stmt *S) {
+ if (!StmtCountMap)
+ return None;
+ auto I = StmtCountMap->find(S);
+ if (I == StmtCountMap->end())
+ return None;
+ return I->second;
+ }
+
+ /// If the execution count for the current statement is known, record that
+ /// as the current count.
+ void setCurrentStmt(const Stmt *S) {
+ if (auto Count = getStmtCount(S))
+ setCurrentRegionCount(*Count);
+ }
+
+ /// Assign counters to regions and configure them for PGO of a given
+ /// function. Does nothing if instrumentation is not enabled and either
+ /// generates global variables or associates PGO data with each of the
+ /// counters depending on whether we are generating or using instrumentation.
+ void assignRegionCounters(GlobalDecl GD, llvm::Function *Fn);
+ /// Emit a coverage mapping range with a counter zero
+ /// for an unused declaration.
+ void emitEmptyCounterMapping(const Decl *D, StringRef FuncName,
+ llvm::GlobalValue::LinkageTypes Linkage);
+private:
+ void setFuncName(llvm::Function *Fn);
+ void setFuncName(StringRef Name, llvm::GlobalValue::LinkageTypes Linkage);
+ void mapRegionCounters(const Decl *D);
+ void computeRegionCounts(const Decl *D);
+ void applyFunctionAttributes(llvm::IndexedInstrProfReader *PGOReader,
+ llvm::Function *Fn);
+ void loadRegionCounts(llvm::IndexedInstrProfReader *PGOReader,
+ bool IsInMainFile);
+ void emitCounterRegionMapping(const Decl *D);
+
+public:
+ void emitCounterIncrement(CGBuilderTy &Builder, const Stmt *S);
+
+ /// Return the region count for the counter at the given index.
+ uint64_t getRegionCount(const Stmt *S) {
+ if (!RegionCounterMap)
+ return 0;
+ if (!haveRegionCounts())
+ return 0;
+ return RegionCounts[(*RegionCounterMap)[S]];
+ }
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.cpp
new file mode 100644
index 0000000..c3c925c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.cpp
@@ -0,0 +1,319 @@
+//===--- CodeGenTypes.cpp - TBAA information for LLVM CodeGen -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the code that manages TBAA information and defines the TBAA policy
+// for the optimizer to use. Relevant standards text includes:
+//
+// C99 6.5p7
+// C++ [basic.lval] (p10 in n3126, p15 in some earlier versions)
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenTBAA.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/Mangle.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Type.h"
+using namespace clang;
+using namespace CodeGen;
+
+CodeGenTBAA::CodeGenTBAA(ASTContext &Ctx, llvm::LLVMContext& VMContext,
+ const CodeGenOptions &CGO,
+ const LangOptions &Features, MangleContext &MContext)
+ : Context(Ctx), CodeGenOpts(CGO), Features(Features), MContext(MContext),
+ MDHelper(VMContext), Root(nullptr), Char(nullptr) {
+}
+
+CodeGenTBAA::~CodeGenTBAA() {
+}
+
+llvm::MDNode *CodeGenTBAA::getRoot() {
+ // Define the root of the tree. This identifies the tree, so that
+ // if our LLVM IR is linked with LLVM IR from a different front-end
+ // (or a different version of this front-end), their TBAA trees will
+ // remain distinct, and the optimizer will treat them conservatively.
+ if (!Root)
+ Root = MDHelper.createTBAARoot("Simple C/C++ TBAA");
+
+ return Root;
+}
+
+// For both scalar TBAA and struct-path aware TBAA, the scalar type has the
+// same format: name, parent node, and offset.
+llvm::MDNode *CodeGenTBAA::createTBAAScalarType(StringRef Name,
+ llvm::MDNode *Parent) {
+ return MDHelper.createTBAAScalarTypeNode(Name, Parent);
+}
+
+llvm::MDNode *CodeGenTBAA::getChar() {
+ // Define the root of the tree for user-accessible memory. C and C++
+ // give special powers to char and certain similar types. However,
+ // these special powers only cover user-accessible memory, and doesn't
+ // include things like vtables.
+ if (!Char)
+ Char = createTBAAScalarType("omnipotent char", getRoot());
+
+ return Char;
+}
+
+static bool TypeHasMayAlias(QualType QTy) {
+ // Tagged types have declarations, and therefore may have attributes.
+ if (const TagType *TTy = dyn_cast<TagType>(QTy))
+ return TTy->getDecl()->hasAttr<MayAliasAttr>();
+
+ // Typedef types have declarations, and therefore may have attributes.
+ if (const TypedefType *TTy = dyn_cast<TypedefType>(QTy)) {
+ if (TTy->getDecl()->hasAttr<MayAliasAttr>())
+ return true;
+ // Also, their underlying types may have relevant attributes.
+ return TypeHasMayAlias(TTy->desugar());
+ }
+
+ return false;
+}
+
+llvm::MDNode *
+CodeGenTBAA::getTBAAInfo(QualType QTy) {
+ // At -O0 or relaxed aliasing, TBAA is not emitted for regular types.
+ if (CodeGenOpts.OptimizationLevel == 0 || CodeGenOpts.RelaxedAliasing)
+ return nullptr;
+
+ // If the type has the may_alias attribute (even on a typedef), it is
+ // effectively in the general char alias class.
+ if (TypeHasMayAlias(QTy))
+ return getChar();
+
+ const Type *Ty = Context.getCanonicalType(QTy).getTypePtr();
+
+ if (llvm::MDNode *N = MetadataCache[Ty])
+ return N;
+
+ // Handle builtin types.
+ if (const BuiltinType *BTy = dyn_cast<BuiltinType>(Ty)) {
+ switch (BTy->getKind()) {
+ // Character types are special and can alias anything.
+ // In C++, this technically only includes "char" and "unsigned char",
+ // and not "signed char". In C, it includes all three. For now,
+ // the risk of exploiting this detail in C++ seems likely to outweigh
+ // the benefit.
+ case BuiltinType::Char_U:
+ case BuiltinType::Char_S:
+ case BuiltinType::UChar:
+ case BuiltinType::SChar:
+ return getChar();
+
+ // Unsigned types can alias their corresponding signed types.
+ case BuiltinType::UShort:
+ return getTBAAInfo(Context.ShortTy);
+ case BuiltinType::UInt:
+ return getTBAAInfo(Context.IntTy);
+ case BuiltinType::ULong:
+ return getTBAAInfo(Context.LongTy);
+ case BuiltinType::ULongLong:
+ return getTBAAInfo(Context.LongLongTy);
+ case BuiltinType::UInt128:
+ return getTBAAInfo(Context.Int128Ty);
+
+ // Treat all other builtin types as distinct types. This includes
+ // treating wchar_t, char16_t, and char32_t as distinct from their
+ // "underlying types".
+ default:
+ return MetadataCache[Ty] =
+ createTBAAScalarType(BTy->getName(Features), getChar());
+ }
+ }
+
+ // Handle pointers.
+ // TODO: Implement C++'s type "similarity" and consider dis-"similar"
+ // pointers distinct.
+ if (Ty->isPointerType())
+ return MetadataCache[Ty] = createTBAAScalarType("any pointer",
+ getChar());
+
+ // Enum types are distinct types. In C++ they have "underlying types",
+ // however they aren't related for TBAA.
+ if (const EnumType *ETy = dyn_cast<EnumType>(Ty)) {
+ // In C++ mode, types have linkage, so we can rely on the ODR and
+ // on their mangled names, if they're external.
+ // TODO: Is there a way to get a program-wide unique name for a
+ // decl with local linkage or no linkage?
+ if (!Features.CPlusPlus || !ETy->getDecl()->isExternallyVisible())
+ return MetadataCache[Ty] = getChar();
+
+ SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ MContext.mangleTypeName(QualType(ETy, 0), Out);
+ return MetadataCache[Ty] = createTBAAScalarType(OutName, getChar());
+ }
+
+ // For now, handle any other kind of type conservatively.
+ return MetadataCache[Ty] = getChar();
+}
+
+llvm::MDNode *CodeGenTBAA::getTBAAInfoForVTablePtr() {
+ return createTBAAScalarType("vtable pointer", getRoot());
+}
+
+bool
+CodeGenTBAA::CollectFields(uint64_t BaseOffset,
+ QualType QTy,
+ SmallVectorImpl<llvm::MDBuilder::TBAAStructField> &
+ Fields,
+ bool MayAlias) {
+ /* Things not handled yet include: C++ base classes, bitfields, */
+
+ if (const RecordType *TTy = QTy->getAs<RecordType>()) {
+ const RecordDecl *RD = TTy->getDecl()->getDefinition();
+ if (RD->hasFlexibleArrayMember())
+ return false;
+
+ // TODO: Handle C++ base classes.
+ if (const CXXRecordDecl *Decl = dyn_cast<CXXRecordDecl>(RD))
+ if (Decl->bases_begin() != Decl->bases_end())
+ return false;
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(),
+ e = RD->field_end(); i != e; ++i, ++idx) {
+ uint64_t Offset = BaseOffset +
+ Layout.getFieldOffset(idx) / Context.getCharWidth();
+ QualType FieldQTy = i->getType();
+ if (!CollectFields(Offset, FieldQTy, Fields,
+ MayAlias || TypeHasMayAlias(FieldQTy)))
+ return false;
+ }
+ return true;
+ }
+
+ /* Otherwise, treat whatever it is as a field. */
+ uint64_t Offset = BaseOffset;
+ uint64_t Size = Context.getTypeSizeInChars(QTy).getQuantity();
+ llvm::MDNode *TBAAInfo = MayAlias ? getChar() : getTBAAInfo(QTy);
+ llvm::MDNode *TBAATag = getTBAAScalarTagInfo(TBAAInfo);
+ Fields.push_back(llvm::MDBuilder::TBAAStructField(Offset, Size, TBAATag));
+ return true;
+}
+
+llvm::MDNode *
+CodeGenTBAA::getTBAAStructInfo(QualType QTy) {
+ const Type *Ty = Context.getCanonicalType(QTy).getTypePtr();
+
+ if (llvm::MDNode *N = StructMetadataCache[Ty])
+ return N;
+
+ SmallVector<llvm::MDBuilder::TBAAStructField, 4> Fields;
+ if (CollectFields(0, QTy, Fields, TypeHasMayAlias(QTy)))
+ return MDHelper.createTBAAStructNode(Fields);
+
+ // For now, handle any other kind of type conservatively.
+ return StructMetadataCache[Ty] = nullptr;
+}
+
+/// Check if the given type can be handled by path-aware TBAA.
+static bool isTBAAPathStruct(QualType QTy) {
+ if (const RecordType *TTy = QTy->getAs<RecordType>()) {
+ const RecordDecl *RD = TTy->getDecl()->getDefinition();
+ if (RD->hasFlexibleArrayMember())
+ return false;
+ // RD can be struct, union, class, interface or enum.
+ // For now, we only handle struct and class.
+ if (RD->isStruct() || RD->isClass())
+ return true;
+ }
+ return false;
+}
+
+llvm::MDNode *
+CodeGenTBAA::getTBAAStructTypeInfo(QualType QTy) {
+ const Type *Ty = Context.getCanonicalType(QTy).getTypePtr();
+ assert(isTBAAPathStruct(QTy));
+
+ if (llvm::MDNode *N = StructTypeMetadataCache[Ty])
+ return N;
+
+ if (const RecordType *TTy = QTy->getAs<RecordType>()) {
+ const RecordDecl *RD = TTy->getDecl()->getDefinition();
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ SmallVector <std::pair<llvm::MDNode*, uint64_t>, 4> Fields;
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(),
+ e = RD->field_end(); i != e; ++i, ++idx) {
+ QualType FieldQTy = i->getType();
+ llvm::MDNode *FieldNode;
+ if (isTBAAPathStruct(FieldQTy))
+ FieldNode = getTBAAStructTypeInfo(FieldQTy);
+ else
+ FieldNode = getTBAAInfo(FieldQTy);
+ if (!FieldNode)
+ return StructTypeMetadataCache[Ty] = nullptr;
+ Fields.push_back(std::make_pair(
+ FieldNode, Layout.getFieldOffset(idx) / Context.getCharWidth()));
+ }
+
+ SmallString<256> OutName;
+ if (Features.CPlusPlus) {
+ // Don't use the mangler for C code.
+ llvm::raw_svector_ostream Out(OutName);
+ MContext.mangleTypeName(QualType(Ty, 0), Out);
+ } else {
+ OutName = RD->getName();
+ }
+ // Create the struct type node with a vector of pairs (offset, type).
+ return StructTypeMetadataCache[Ty] =
+ MDHelper.createTBAAStructTypeNode(OutName, Fields);
+ }
+
+ return StructMetadataCache[Ty] = nullptr;
+}
+
+/// Return a TBAA tag node for both scalar TBAA and struct-path aware TBAA.
+llvm::MDNode *
+CodeGenTBAA::getTBAAStructTagInfo(QualType BaseQTy, llvm::MDNode *AccessNode,
+ uint64_t Offset) {
+ if (!AccessNode)
+ return nullptr;
+
+ if (!CodeGenOpts.StructPathTBAA)
+ return getTBAAScalarTagInfo(AccessNode);
+
+ const Type *BTy = Context.getCanonicalType(BaseQTy).getTypePtr();
+ TBAAPathTag PathTag = TBAAPathTag(BTy, AccessNode, Offset);
+ if (llvm::MDNode *N = StructTagMetadataCache[PathTag])
+ return N;
+
+ llvm::MDNode *BNode = nullptr;
+ if (isTBAAPathStruct(BaseQTy))
+ BNode = getTBAAStructTypeInfo(BaseQTy);
+ if (!BNode)
+ return StructTagMetadataCache[PathTag] =
+ MDHelper.createTBAAStructTagNode(AccessNode, AccessNode, 0);
+
+ return StructTagMetadataCache[PathTag] =
+ MDHelper.createTBAAStructTagNode(BNode, AccessNode, Offset);
+}
+
+llvm::MDNode *
+CodeGenTBAA::getTBAAScalarTagInfo(llvm::MDNode *AccessNode) {
+ if (!AccessNode)
+ return nullptr;
+ if (llvm::MDNode *N = ScalarTagMetadataCache[AccessNode])
+ return N;
+
+ return ScalarTagMetadataCache[AccessNode] =
+ MDHelper.createTBAAStructTagNode(AccessNode, AccessNode, 0);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.h
new file mode 100644
index 0000000..632cadd
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.h
@@ -0,0 +1,160 @@
+//===--- CodeGenTBAA.h - TBAA information for LLVM CodeGen ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the code that manages TBAA information and defines the TBAA policy
+// for the optimizer to use.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENTBAA_H
+#define LLVM_CLANG_LIB_CODEGEN_CODEGENTBAA_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/IR/MDBuilder.h"
+
+namespace llvm {
+ class LLVMContext;
+ class MDNode;
+}
+
+namespace clang {
+ class ASTContext;
+ class CodeGenOptions;
+ class LangOptions;
+ class MangleContext;
+ class QualType;
+ class Type;
+
+namespace CodeGen {
+ class CGRecordLayout;
+
+ struct TBAAPathTag {
+ TBAAPathTag(const Type *B, const llvm::MDNode *A, uint64_t O)
+ : BaseT(B), AccessN(A), Offset(O) {}
+ const Type *BaseT;
+ const llvm::MDNode *AccessN;
+ uint64_t Offset;
+ };
+
+/// CodeGenTBAA - This class organizes the cross-module state that is used
+/// while lowering AST types to LLVM types.
+class CodeGenTBAA {
+ ASTContext &Context;
+ const CodeGenOptions &CodeGenOpts;
+ const LangOptions &Features;
+ MangleContext &MContext;
+
+ // MDHelper - Helper for creating metadata.
+ llvm::MDBuilder MDHelper;
+
+ /// MetadataCache - This maps clang::Types to scalar llvm::MDNodes describing
+ /// them.
+ llvm::DenseMap<const Type *, llvm::MDNode *> MetadataCache;
+ /// This maps clang::Types to a struct node in the type DAG.
+ llvm::DenseMap<const Type *, llvm::MDNode *> StructTypeMetadataCache;
+ /// This maps TBAAPathTags to a tag node.
+ llvm::DenseMap<TBAAPathTag, llvm::MDNode *> StructTagMetadataCache;
+ /// This maps a scalar type to a scalar tag node.
+ llvm::DenseMap<const llvm::MDNode *, llvm::MDNode *> ScalarTagMetadataCache;
+
+ /// StructMetadataCache - This maps clang::Types to llvm::MDNodes describing
+ /// them for struct assignments.
+ llvm::DenseMap<const Type *, llvm::MDNode *> StructMetadataCache;
+
+ llvm::MDNode *Root;
+ llvm::MDNode *Char;
+
+ /// getRoot - This is the mdnode for the root of the metadata type graph
+ /// for this translation unit.
+ llvm::MDNode *getRoot();
+
+ /// getChar - This is the mdnode for "char", which is special, and any types
+ /// considered to be equivalent to it.
+ llvm::MDNode *getChar();
+
+ /// CollectFields - Collect information about the fields of a type for
+ /// !tbaa.struct metadata formation. Return false for an unsupported type.
+ bool CollectFields(uint64_t BaseOffset,
+ QualType Ty,
+ SmallVectorImpl<llvm::MDBuilder::TBAAStructField> &Fields,
+ bool MayAlias);
+
+ /// A wrapper function to create a scalar type. For struct-path aware TBAA,
+ /// the scalar type has the same format as the struct type: name, offset,
+ /// pointer to another node in the type DAG.
+ llvm::MDNode *createTBAAScalarType(StringRef Name, llvm::MDNode *Parent);
+
+public:
+ CodeGenTBAA(ASTContext &Ctx, llvm::LLVMContext &VMContext,
+ const CodeGenOptions &CGO,
+ const LangOptions &Features,
+ MangleContext &MContext);
+ ~CodeGenTBAA();
+
+ /// getTBAAInfo - Get the TBAA MDNode to be used for a dereference
+ /// of the given type.
+ llvm::MDNode *getTBAAInfo(QualType QTy);
+
+ /// getTBAAInfoForVTablePtr - Get the TBAA MDNode to be used for a
+ /// dereference of a vtable pointer.
+ llvm::MDNode *getTBAAInfoForVTablePtr();
+
+ /// getTBAAStructInfo - Get the TBAAStruct MDNode to be used for a memcpy of
+ /// the given type.
+ llvm::MDNode *getTBAAStructInfo(QualType QTy);
+
+ /// Get the MDNode in the type DAG for given struct type QType.
+ llvm::MDNode *getTBAAStructTypeInfo(QualType QType);
+ /// Get the tag MDNode for a given base type, the actual scalar access MDNode
+ /// and offset into the base type.
+ llvm::MDNode *getTBAAStructTagInfo(QualType BaseQType,
+ llvm::MDNode *AccessNode, uint64_t Offset);
+
+ /// Get the scalar tag MDNode for a given scalar type.
+ llvm::MDNode *getTBAAScalarTagInfo(llvm::MDNode *AccessNode);
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+namespace llvm {
+
+template<> struct DenseMapInfo<clang::CodeGen::TBAAPathTag> {
+ static clang::CodeGen::TBAAPathTag getEmptyKey() {
+ return clang::CodeGen::TBAAPathTag(
+ DenseMapInfo<const clang::Type *>::getEmptyKey(),
+ DenseMapInfo<const MDNode *>::getEmptyKey(),
+ DenseMapInfo<uint64_t>::getEmptyKey());
+ }
+
+ static clang::CodeGen::TBAAPathTag getTombstoneKey() {
+ return clang::CodeGen::TBAAPathTag(
+ DenseMapInfo<const clang::Type *>::getTombstoneKey(),
+ DenseMapInfo<const MDNode *>::getTombstoneKey(),
+ DenseMapInfo<uint64_t>::getTombstoneKey());
+ }
+
+ static unsigned getHashValue(const clang::CodeGen::TBAAPathTag &Val) {
+ return DenseMapInfo<const clang::Type *>::getHashValue(Val.BaseT) ^
+ DenseMapInfo<const MDNode *>::getHashValue(Val.AccessN) ^
+ DenseMapInfo<uint64_t>::getHashValue(Val.Offset);
+ }
+
+ static bool isEqual(const clang::CodeGen::TBAAPathTag &LHS,
+ const clang::CodeGen::TBAAPathTag &RHS) {
+ return LHS.BaseT == RHS.BaseT &&
+ LHS.AccessN == RHS.AccessN &&
+ LHS.Offset == RHS.Offset;
+ }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypeCache.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypeCache.h
new file mode 100644
index 0000000..c32b66d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypeCache.h
@@ -0,0 +1,108 @@
+//===--- CodeGenTypeCache.h - Commonly used LLVM types and info -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This structure provides a set of common types useful during IR emission.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENTYPECACHE_H
+#define LLVM_CLANG_LIB_CODEGEN_CODEGENTYPECACHE_H
+
+#include "clang/AST/CharUnits.h"
+#include "llvm/IR/CallingConv.h"
+
+namespace llvm {
+ class Type;
+ class IntegerType;
+ class PointerType;
+}
+
+namespace clang {
+namespace CodeGen {
+
+/// This structure provides a set of types that are commonly used
+/// during IR emission. It's initialized once in CodeGenModule's
+/// constructor and then copied around into new CodeGenFunctions.
+struct CodeGenTypeCache {
+ /// void
+ llvm::Type *VoidTy;
+
+ /// i8, i16, i32, and i64
+ llvm::IntegerType *Int8Ty, *Int16Ty, *Int32Ty, *Int64Ty;
+ /// float, double
+ llvm::Type *FloatTy, *DoubleTy;
+
+ /// int
+ llvm::IntegerType *IntTy;
+
+ /// intptr_t, size_t, and ptrdiff_t, which we assume are the same size.
+ union {
+ llvm::IntegerType *IntPtrTy;
+ llvm::IntegerType *SizeTy;
+ llvm::IntegerType *PtrDiffTy;
+ };
+
+ /// void* in address space 0
+ union {
+ llvm::PointerType *VoidPtrTy;
+ llvm::PointerType *Int8PtrTy;
+ };
+
+ /// void** in address space 0
+ union {
+ llvm::PointerType *VoidPtrPtrTy;
+ llvm::PointerType *Int8PtrPtrTy;
+ };
+
+ /// The size and alignment of the builtin C type 'int'. This comes
+ /// up enough in various ABI lowering tasks to be worth pre-computing.
+ union {
+ unsigned char IntSizeInBytes;
+ unsigned char IntAlignInBytes;
+ };
+ CharUnits getIntSize() const {
+ return CharUnits::fromQuantity(IntSizeInBytes);
+ }
+ CharUnits getIntAlign() const {
+ return CharUnits::fromQuantity(IntAlignInBytes);
+ }
+
+ /// The width of a pointer into the generic address space.
+ unsigned char PointerWidthInBits;
+
+ /// The size and alignment of a pointer into the generic address space.
+ union {
+ unsigned char PointerAlignInBytes;
+ unsigned char PointerSizeInBytes;
+ unsigned char SizeSizeInBytes; // sizeof(size_t)
+ unsigned char SizeAlignInBytes;
+ };
+ CharUnits getSizeSize() const {
+ return CharUnits::fromQuantity(SizeSizeInBytes);
+ }
+ CharUnits getSizeAlign() const {
+ return CharUnits::fromQuantity(SizeAlignInBytes);
+ }
+ CharUnits getPointerSize() const {
+ return CharUnits::fromQuantity(PointerSizeInBytes);
+ }
+ CharUnits getPointerAlign() const {
+ return CharUnits::fromQuantity(PointerAlignInBytes);
+ }
+
+ llvm::CallingConv::ID RuntimeCC;
+ llvm::CallingConv::ID getRuntimeCC() const { return RuntimeCC; }
+ llvm::CallingConv::ID BuiltinCC;
+ llvm::CallingConv::ID getBuiltinCC() const { return BuiltinCC; }
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp
new file mode 100644
index 0000000..fcda053
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp
@@ -0,0 +1,760 @@
+//===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the code that handles AST -> LLVM type lowering.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenTypes.h"
+#include "CGCXXABI.h"
+#include "CGCall.h"
+#include "CGOpenCLRuntime.h"
+#include "CGRecordLayout.h"
+#include "TargetInfo.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/CodeGen/CGFunctionInfo.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Module.h"
+using namespace clang;
+using namespace CodeGen;
+
+CodeGenTypes::CodeGenTypes(CodeGenModule &cgm)
+ : CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()),
+ Target(cgm.getTarget()), TheCXXABI(cgm.getCXXABI()),
+ TheABIInfo(cgm.getTargetCodeGenInfo().getABIInfo()) {
+ SkippedLayout = false;
+}
+
+CodeGenTypes::~CodeGenTypes() {
+ llvm::DeleteContainerSeconds(CGRecordLayouts);
+
+ for (llvm::FoldingSet<CGFunctionInfo>::iterator
+ I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; )
+ delete &*I++;
+}
+
+void CodeGenTypes::addRecordTypeName(const RecordDecl *RD,
+ llvm::StructType *Ty,
+ StringRef suffix) {
+ SmallString<256> TypeName;
+ llvm::raw_svector_ostream OS(TypeName);
+ OS << RD->getKindName() << '.';
+
+ // Name the codegen type after the typedef name
+ // if there is no tag type name available
+ if (RD->getIdentifier()) {
+ // FIXME: We should not have to check for a null decl context here.
+ // Right now we do it because the implicit Obj-C decls don't have one.
+ if (RD->getDeclContext())
+ RD->printQualifiedName(OS);
+ else
+ RD->printName(OS);
+ } else if (const TypedefNameDecl *TDD = RD->getTypedefNameForAnonDecl()) {
+ // FIXME: We should not have to check for a null decl context here.
+ // Right now we do it because the implicit Obj-C decls don't have one.
+ if (TDD->getDeclContext())
+ TDD->printQualifiedName(OS);
+ else
+ TDD->printName(OS);
+ } else
+ OS << "anon";
+
+ if (!suffix.empty())
+ OS << suffix;
+
+ Ty->setName(OS.str());
+}
+
+/// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
+/// ConvertType in that it is used to convert to the memory representation for
+/// a type. For example, the scalar representation for _Bool is i1, but the
+/// memory representation is usually i8 or i32, depending on the target.
+llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) {
+ llvm::Type *R = ConvertType(T);
+
+ // If this is a non-bool type, don't map it.
+ if (!R->isIntegerTy(1))
+ return R;
+
+ // Otherwise, return an integer of the target-specified size.
+ return llvm::IntegerType::get(getLLVMContext(),
+ (unsigned)Context.getTypeSize(T));
+}
+
+
+/// isRecordLayoutComplete - Return true if the specified type is already
+/// completely laid out.
+bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const {
+ llvm::DenseMap<const Type*, llvm::StructType *>::const_iterator I =
+ RecordDeclTypes.find(Ty);
+ return I != RecordDeclTypes.end() && !I->second->isOpaque();
+}
+
+static bool
+isSafeToConvert(QualType T, CodeGenTypes &CGT,
+ llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked);
+
+
+/// isSafeToConvert - Return true if it is safe to convert the specified record
+/// decl to IR and lay it out, false if doing so would cause us to get into a
+/// recursive compilation mess.
+static bool
+isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT,
+ llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) {
+ // If we have already checked this type (maybe the same type is used by-value
+ // multiple times in multiple structure fields, don't check again.
+ if (!AlreadyChecked.insert(RD).second)
+ return true;
+
+ const Type *Key = CGT.getContext().getTagDeclType(RD).getTypePtr();
+
+ // If this type is already laid out, converting it is a noop.
+ if (CGT.isRecordLayoutComplete(Key)) return true;
+
+ // If this type is currently being laid out, we can't recursively compile it.
+ if (CGT.isRecordBeingLaidOut(Key))
+ return false;
+
+ // If this type would require laying out bases that are currently being laid
+ // out, don't do it. This includes virtual base classes which get laid out
+ // when a class is translated, even though they aren't embedded by-value into
+ // the class.
+ if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (const auto &I : CRD->bases())
+ if (!isSafeToConvert(I.getType()->getAs<RecordType>()->getDecl(),
+ CGT, AlreadyChecked))
+ return false;
+ }
+
+ // If this type would require laying out members that are currently being laid
+ // out, don't do it.
+ for (const auto *I : RD->fields())
+ if (!isSafeToConvert(I->getType(), CGT, AlreadyChecked))
+ return false;
+
+ // If there are no problems, lets do it.
+ return true;
+}
+
+/// isSafeToConvert - Return true if it is safe to convert this field type,
+/// which requires the structure elements contained by-value to all be
+/// recursively safe to convert.
+static bool
+isSafeToConvert(QualType T, CodeGenTypes &CGT,
+ llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) {
+ // Strip off atomic type sugar.
+ if (const auto *AT = T->getAs<AtomicType>())
+ T = AT->getValueType();
+
+ // If this is a record, check it.
+ if (const auto *RT = T->getAs<RecordType>())
+ return isSafeToConvert(RT->getDecl(), CGT, AlreadyChecked);
+
+ // If this is an array, check the elements, which are embedded inline.
+ if (const auto *AT = CGT.getContext().getAsArrayType(T))
+ return isSafeToConvert(AT->getElementType(), CGT, AlreadyChecked);
+
+ // Otherwise, there is no concern about transforming this. We only care about
+ // things that are contained by-value in a structure that can have another
+ // structure as a member.
+ return true;
+}
+
+
+/// isSafeToConvert - Return true if it is safe to convert the specified record
+/// decl to IR and lay it out, false if doing so would cause us to get into a
+/// recursive compilation mess.
+static bool isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT) {
+ // If no structs are being laid out, we can certainly do this one.
+ if (CGT.noRecordsBeingLaidOut()) return true;
+
+ llvm::SmallPtrSet<const RecordDecl*, 16> AlreadyChecked;
+ return isSafeToConvert(RD, CGT, AlreadyChecked);
+}
+
+/// isFuncParamTypeConvertible - Return true if the specified type in a
+/// function parameter or result position can be converted to an IR type at this
+/// point. This boils down to being whether it is complete, as well as whether
+/// we've temporarily deferred expanding the type because we're in a recursive
+/// context.
+bool CodeGenTypes::isFuncParamTypeConvertible(QualType Ty) {
+ // Some ABIs cannot have their member pointers represented in IR unless
+ // certain circumstances have been reached.
+ if (const auto *MPT = Ty->getAs<MemberPointerType>())
+ return getCXXABI().isMemberPointerConvertible(MPT);
+
+ // If this isn't a tagged type, we can convert it!
+ const TagType *TT = Ty->getAs<TagType>();
+ if (!TT) return true;
+
+ // Incomplete types cannot be converted.
+ if (TT->isIncompleteType())
+ return false;
+
+ // If this is an enum, then it is always safe to convert.
+ const RecordType *RT = dyn_cast<RecordType>(TT);
+ if (!RT) return true;
+
+ // Otherwise, we have to be careful. If it is a struct that we're in the
+ // process of expanding, then we can't convert the function type. That's ok
+ // though because we must be in a pointer context under the struct, so we can
+ // just convert it to a dummy type.
+ //
+ // We decide this by checking whether ConvertRecordDeclType returns us an
+ // opaque type for a struct that we know is defined.
+ return isSafeToConvert(RT->getDecl(), *this);
+}
+
+
+/// Code to verify a given function type is complete, i.e. the return type
+/// and all of the parameter types are complete. Also check to see if we are in
+/// a RS_StructPointer context, and if so whether any struct types have been
+/// pended. If so, we don't want to ask the ABI lowering code to handle a type
+/// that cannot be converted to an IR type.
+bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) {
+ if (!isFuncParamTypeConvertible(FT->getReturnType()))
+ return false;
+
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT))
+ for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++)
+ if (!isFuncParamTypeConvertible(FPT->getParamType(i)))
+ return false;
+
+ return true;
+}
+
+/// UpdateCompletedType - When we find the full definition for a TagDecl,
+/// replace the 'opaque' type we previously made for it if applicable.
+void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
+ // If this is an enum being completed, then we flush all non-struct types from
+ // the cache. This allows function types and other things that may be derived
+ // from the enum to be recomputed.
+ if (const EnumDecl *ED = dyn_cast<EnumDecl>(TD)) {
+ // Only flush the cache if we've actually already converted this type.
+ if (TypeCache.count(ED->getTypeForDecl())) {
+ // Okay, we formed some types based on this. We speculated that the enum
+ // would be lowered to i32, so we only need to flush the cache if this
+ // didn't happen.
+ if (!ConvertType(ED->getIntegerType())->isIntegerTy(32))
+ TypeCache.clear();
+ }
+ // If necessary, provide the full definition of a type only used with a
+ // declaration so far.
+ if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
+ DI->completeType(ED);
+ return;
+ }
+
+ // If we completed a RecordDecl that we previously used and converted to an
+ // anonymous type, then go ahead and complete it now.
+ const RecordDecl *RD = cast<RecordDecl>(TD);
+ if (RD->isDependentType()) return;
+
+ // Only complete it if we converted it already. If we haven't converted it
+ // yet, we'll just do it lazily.
+ if (RecordDeclTypes.count(Context.getTagDeclType(RD).getTypePtr()))
+ ConvertRecordDeclType(RD);
+
+ // If necessary, provide the full definition of a type only used with a
+ // declaration so far.
+ if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
+ DI->completeType(RD);
+}
+
+static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext,
+ const llvm::fltSemantics &format,
+ bool UseNativeHalf = false) {
+ if (&format == &llvm::APFloat::IEEEhalf) {
+ if (UseNativeHalf)
+ return llvm::Type::getHalfTy(VMContext);
+ else
+ return llvm::Type::getInt16Ty(VMContext);
+ }
+ if (&format == &llvm::APFloat::IEEEsingle)
+ return llvm::Type::getFloatTy(VMContext);
+ if (&format == &llvm::APFloat::IEEEdouble)
+ return llvm::Type::getDoubleTy(VMContext);
+ if (&format == &llvm::APFloat::IEEEquad)
+ return llvm::Type::getFP128Ty(VMContext);
+ if (&format == &llvm::APFloat::PPCDoubleDouble)
+ return llvm::Type::getPPC_FP128Ty(VMContext);
+ if (&format == &llvm::APFloat::x87DoubleExtended)
+ return llvm::Type::getX86_FP80Ty(VMContext);
+ llvm_unreachable("Unknown float format!");
+}
+
+llvm::Type *CodeGenTypes::ConvertFunctionType(QualType QFT,
+ const FunctionDecl *FD) {
+ assert(QFT.isCanonical());
+ const Type *Ty = QFT.getTypePtr();
+ const FunctionType *FT = cast<FunctionType>(QFT.getTypePtr());
+ // First, check whether we can build the full function type. If the
+ // function type depends on an incomplete type (e.g. a struct or enum), we
+ // cannot lower the function type.
+ if (!isFuncTypeConvertible(FT)) {
+ // This function's type depends on an incomplete tag type.
+
+ // Force conversion of all the relevant record types, to make sure
+ // we re-convert the FunctionType when appropriate.
+ if (const RecordType *RT = FT->getReturnType()->getAs<RecordType>())
+ ConvertRecordDeclType(RT->getDecl());
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT))
+ for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++)
+ if (const RecordType *RT = FPT->getParamType(i)->getAs<RecordType>())
+ ConvertRecordDeclType(RT->getDecl());
+
+ SkippedLayout = true;
+
+ // Return a placeholder type.
+ return llvm::StructType::get(getLLVMContext());
+ }
+
+ // While we're converting the parameter types for a function, we don't want
+ // to recursively convert any pointed-to structs. Converting directly-used
+ // structs is ok though.
+ if (!RecordsBeingLaidOut.insert(Ty).second) {
+ SkippedLayout = true;
+ return llvm::StructType::get(getLLVMContext());
+ }
+
+ // The function type can be built; call the appropriate routines to
+ // build it.
+ const CGFunctionInfo *FI;
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) {
+ FI = &arrangeFreeFunctionType(
+ CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)), FD);
+ } else {
+ const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(FT);
+ FI = &arrangeFreeFunctionType(
+ CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)));
+ }
+
+ llvm::Type *ResultType = nullptr;
+ // If there is something higher level prodding our CGFunctionInfo, then
+ // don't recurse into it again.
+ if (FunctionsBeingProcessed.count(FI)) {
+
+ ResultType = llvm::StructType::get(getLLVMContext());
+ SkippedLayout = true;
+ } else {
+
+ // Otherwise, we're good to go, go ahead and convert it.
+ ResultType = GetFunctionType(*FI);
+ }
+
+ RecordsBeingLaidOut.erase(Ty);
+
+ if (SkippedLayout)
+ TypeCache.clear();
+
+ if (RecordsBeingLaidOut.empty())
+ while (!DeferredRecords.empty())
+ ConvertRecordDeclType(DeferredRecords.pop_back_val());
+ return ResultType;
+}
+
+/// ConvertType - Convert the specified type to its LLVM form.
+llvm::Type *CodeGenTypes::ConvertType(QualType T) {
+ T = Context.getCanonicalType(T);
+
+ const Type *Ty = T.getTypePtr();
+
+ // RecordTypes are cached and processed specially.
+ if (const RecordType *RT = dyn_cast<RecordType>(Ty))
+ return ConvertRecordDeclType(RT->getDecl());
+
+ // See if type is already cached.
+ llvm::DenseMap<const Type *, llvm::Type *>::iterator TCI = TypeCache.find(Ty);
+ // If type is found in map then use it. Otherwise, convert type T.
+ if (TCI != TypeCache.end())
+ return TCI->second;
+
+ // If we don't have it in the cache, convert it now.
+ llvm::Type *ResultType = nullptr;
+ switch (Ty->getTypeClass()) {
+ case Type::Record: // Handled above.
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("Non-canonical or dependent types aren't possible.");
+
+ case Type::Builtin: {
+ switch (cast<BuiltinType>(Ty)->getKind()) {
+ case BuiltinType::Void:
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCSel:
+ // LLVM void type can only be used as the result of a function call. Just
+ // map to the same as char.
+ ResultType = llvm::Type::getInt8Ty(getLLVMContext());
+ break;
+
+ case BuiltinType::Bool:
+ // Note that we always return bool as i1 for use as a scalar type.
+ ResultType = llvm::Type::getInt1Ty(getLLVMContext());
+ break;
+
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U:
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ case BuiltinType::Long:
+ case BuiltinType::ULong:
+ case BuiltinType::LongLong:
+ case BuiltinType::ULongLong:
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ ResultType = llvm::IntegerType::get(getLLVMContext(),
+ static_cast<unsigned>(Context.getTypeSize(T)));
+ break;
+
+ case BuiltinType::Half:
+ // Half FP can either be storage-only (lowered to i16) or native.
+ ResultType =
+ getTypeForFormat(getLLVMContext(), Context.getFloatTypeSemantics(T),
+ Context.getLangOpts().NativeHalfType ||
+ Context.getLangOpts().HalfArgsAndReturns);
+ break;
+ case BuiltinType::Float:
+ case BuiltinType::Double:
+ case BuiltinType::LongDouble:
+ ResultType = getTypeForFormat(getLLVMContext(),
+ Context.getFloatTypeSemantics(T),
+ /* UseNativeHalf = */ false);
+ break;
+
+ case BuiltinType::NullPtr:
+ // Model std::nullptr_t as i8*
+ ResultType = llvm::Type::getInt8PtrTy(getLLVMContext());
+ break;
+
+ case BuiltinType::UInt128:
+ case BuiltinType::Int128:
+ ResultType = llvm::IntegerType::get(getLLVMContext(), 128);
+ break;
+
+ case BuiltinType::OCLImage1d:
+ case BuiltinType::OCLImage1dArray:
+ case BuiltinType::OCLImage1dBuffer:
+ case BuiltinType::OCLImage2d:
+ case BuiltinType::OCLImage2dArray:
+ case BuiltinType::OCLImage2dDepth:
+ case BuiltinType::OCLImage2dArrayDepth:
+ case BuiltinType::OCLImage2dMSAA:
+ case BuiltinType::OCLImage2dArrayMSAA:
+ case BuiltinType::OCLImage2dMSAADepth:
+ case BuiltinType::OCLImage2dArrayMSAADepth:
+ case BuiltinType::OCLImage3d:
+ case BuiltinType::OCLSampler:
+ case BuiltinType::OCLEvent:
+ case BuiltinType::OCLClkEvent:
+ case BuiltinType::OCLQueue:
+ case BuiltinType::OCLNDRange:
+ case BuiltinType::OCLReserveID:
+ ResultType = CGM.getOpenCLRuntime().convertOpenCLSpecificType(Ty);
+ break;
+
+ case BuiltinType::Dependent:
+#define BUILTIN_TYPE(Id, SingletonId)
+#define PLACEHOLDER_TYPE(Id, SingletonId) \
+ case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
+ llvm_unreachable("Unexpected placeholder builtin type!");
+ }
+ break;
+ }
+ case Type::Auto:
+ llvm_unreachable("Unexpected undeduced auto type!");
+ case Type::Complex: {
+ llvm::Type *EltTy = ConvertType(cast<ComplexType>(Ty)->getElementType());
+ ResultType = llvm::StructType::get(EltTy, EltTy, nullptr);
+ break;
+ }
+ case Type::LValueReference:
+ case Type::RValueReference: {
+ const ReferenceType *RTy = cast<ReferenceType>(Ty);
+ QualType ETy = RTy->getPointeeType();
+ llvm::Type *PointeeType = ConvertTypeForMem(ETy);
+ unsigned AS = Context.getTargetAddressSpace(ETy);
+ ResultType = llvm::PointerType::get(PointeeType, AS);
+ break;
+ }
+ case Type::Pointer: {
+ const PointerType *PTy = cast<PointerType>(Ty);
+ QualType ETy = PTy->getPointeeType();
+ llvm::Type *PointeeType = ConvertTypeForMem(ETy);
+ if (PointeeType->isVoidTy())
+ PointeeType = llvm::Type::getInt8Ty(getLLVMContext());
+ unsigned AS = Context.getTargetAddressSpace(ETy);
+ ResultType = llvm::PointerType::get(PointeeType, AS);
+ break;
+ }
+
+ case Type::VariableArray: {
+ const VariableArrayType *A = cast<VariableArrayType>(Ty);
+ assert(A->getIndexTypeCVRQualifiers() == 0 &&
+ "FIXME: We only handle trivial array types so far!");
+ // VLAs resolve to the innermost element type; this matches
+ // the return of alloca, and there isn't any obviously better choice.
+ ResultType = ConvertTypeForMem(A->getElementType());
+ break;
+ }
+ case Type::IncompleteArray: {
+ const IncompleteArrayType *A = cast<IncompleteArrayType>(Ty);
+ assert(A->getIndexTypeCVRQualifiers() == 0 &&
+ "FIXME: We only handle trivial array types so far!");
+ // int X[] -> [0 x int], unless the element type is not sized. If it is
+ // unsized (e.g. an incomplete struct) just use [0 x i8].
+ ResultType = ConvertTypeForMem(A->getElementType());
+ if (!ResultType->isSized()) {
+ SkippedLayout = true;
+ ResultType = llvm::Type::getInt8Ty(getLLVMContext());
+ }
+ ResultType = llvm::ArrayType::get(ResultType, 0);
+ break;
+ }
+ case Type::ConstantArray: {
+ const ConstantArrayType *A = cast<ConstantArrayType>(Ty);
+ llvm::Type *EltTy = ConvertTypeForMem(A->getElementType());
+
+ // Lower arrays of undefined struct type to arrays of i8 just to have a
+ // concrete type.
+ if (!EltTy->isSized()) {
+ SkippedLayout = true;
+ EltTy = llvm::Type::getInt8Ty(getLLVMContext());
+ }
+
+ ResultType = llvm::ArrayType::get(EltTy, A->getSize().getZExtValue());
+ break;
+ }
+ case Type::ExtVector:
+ case Type::Vector: {
+ const VectorType *VT = cast<VectorType>(Ty);
+ ResultType = llvm::VectorType::get(ConvertType(VT->getElementType()),
+ VT->getNumElements());
+ break;
+ }
+ case Type::FunctionNoProto:
+ case Type::FunctionProto:
+ ResultType = ConvertFunctionType(T);
+ break;
+ case Type::ObjCObject:
+ ResultType = ConvertType(cast<ObjCObjectType>(Ty)->getBaseType());
+ break;
+
+ case Type::ObjCInterface: {
+ // Objective-C interfaces are always opaque (outside of the
+ // runtime, which can do whatever it likes); we never refine
+ // these.
+ llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(Ty)];
+ if (!T)
+ T = llvm::StructType::create(getLLVMContext());
+ ResultType = T;
+ break;
+ }
+
+ case Type::ObjCObjectPointer: {
+ // Protocol qualifications do not influence the LLVM type, we just return a
+ // pointer to the underlying interface type. We don't need to worry about
+ // recursive conversion.
+ llvm::Type *T =
+ ConvertTypeForMem(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
+ ResultType = T->getPointerTo();
+ break;
+ }
+
+ case Type::Enum: {
+ const EnumDecl *ED = cast<EnumType>(Ty)->getDecl();
+ if (ED->isCompleteDefinition() || ED->isFixed())
+ return ConvertType(ED->getIntegerType());
+ // Return a placeholder 'i32' type. This can be changed later when the
+ // type is defined (see UpdateCompletedType), but is likely to be the
+ // "right" answer.
+ ResultType = llvm::Type::getInt32Ty(getLLVMContext());
+ break;
+ }
+
+ case Type::BlockPointer: {
+ const QualType FTy = cast<BlockPointerType>(Ty)->getPointeeType();
+ llvm::Type *PointeeType = ConvertTypeForMem(FTy);
+ unsigned AS = Context.getTargetAddressSpace(FTy);
+ ResultType = llvm::PointerType::get(PointeeType, AS);
+ break;
+ }
+
+ case Type::MemberPointer: {
+ if (!getCXXABI().isMemberPointerConvertible(cast<MemberPointerType>(Ty)))
+ return llvm::StructType::create(getLLVMContext());
+ ResultType =
+ getCXXABI().ConvertMemberPointerType(cast<MemberPointerType>(Ty));
+ break;
+ }
+
+ case Type::Atomic: {
+ QualType valueType = cast<AtomicType>(Ty)->getValueType();
+ ResultType = ConvertTypeForMem(valueType);
+
+ // Pad out to the inflated size if necessary.
+ uint64_t valueSize = Context.getTypeSize(valueType);
+ uint64_t atomicSize = Context.getTypeSize(Ty);
+ if (valueSize != atomicSize) {
+ assert(valueSize < atomicSize);
+ llvm::Type *elts[] = {
+ ResultType,
+ llvm::ArrayType::get(CGM.Int8Ty, (atomicSize - valueSize) / 8)
+ };
+ ResultType = llvm::StructType::get(getLLVMContext(),
+ llvm::makeArrayRef(elts));
+ }
+ break;
+ }
+ }
+
+ assert(ResultType && "Didn't convert a type?");
+
+ TypeCache[Ty] = ResultType;
+ return ResultType;
+}
+
+bool CodeGenModule::isPaddedAtomicType(QualType type) {
+ return isPaddedAtomicType(type->castAs<AtomicType>());
+}
+
+bool CodeGenModule::isPaddedAtomicType(const AtomicType *type) {
+ return Context.getTypeSize(type) != Context.getTypeSize(type->getValueType());
+}
+
+/// ConvertRecordDeclType - Lay out a tagged decl type like struct or union.
+llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
+ // TagDecl's are not necessarily unique, instead use the (clang)
+ // type connected to the decl.
+ const Type *Key = Context.getTagDeclType(RD).getTypePtr();
+
+ llvm::StructType *&Entry = RecordDeclTypes[Key];
+
+ // If we don't have a StructType at all yet, create the forward declaration.
+ if (!Entry) {
+ Entry = llvm::StructType::create(getLLVMContext());
+ addRecordTypeName(RD, Entry, "");
+ }
+ llvm::StructType *Ty = Entry;
+
+ // If this is still a forward declaration, or the LLVM type is already
+ // complete, there's nothing more to do.
+ RD = RD->getDefinition();
+ if (!RD || !RD->isCompleteDefinition() || !Ty->isOpaque())
+ return Ty;
+
+ // If converting this type would cause us to infinitely loop, don't do it!
+ if (!isSafeToConvert(RD, *this)) {
+ DeferredRecords.push_back(RD);
+ return Ty;
+ }
+
+ // Okay, this is a definition of a type. Compile the implementation now.
+ bool InsertResult = RecordsBeingLaidOut.insert(Key).second;
+ (void)InsertResult;
+ assert(InsertResult && "Recursively compiling a struct?");
+
+ // Force conversion of non-virtual base classes recursively.
+ if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (const auto &I : CRD->bases()) {
+ if (I.isVirtual()) continue;
+
+ ConvertRecordDeclType(I.getType()->getAs<RecordType>()->getDecl());
+ }
+ }
+
+ // Layout fields.
+ CGRecordLayout *Layout = ComputeRecordLayout(RD, Ty);
+ CGRecordLayouts[Key] = Layout;
+
+ // We're done laying out this struct.
+ bool EraseResult = RecordsBeingLaidOut.erase(Key); (void)EraseResult;
+ assert(EraseResult && "struct not in RecordsBeingLaidOut set?");
+
+ // If this struct blocked a FunctionType conversion, then recompute whatever
+ // was derived from that.
+ // FIXME: This is hugely overconservative.
+ if (SkippedLayout)
+ TypeCache.clear();
+
+ // If we're done converting the outer-most record, then convert any deferred
+ // structs as well.
+ if (RecordsBeingLaidOut.empty())
+ while (!DeferredRecords.empty())
+ ConvertRecordDeclType(DeferredRecords.pop_back_val());
+
+ return Ty;
+}
+
+/// getCGRecordLayout - Return record layout info for the given record decl.
+const CGRecordLayout &
+CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) {
+ const Type *Key = Context.getTagDeclType(RD).getTypePtr();
+
+ const CGRecordLayout *Layout = CGRecordLayouts.lookup(Key);
+ if (!Layout) {
+ // Compute the type information.
+ ConvertRecordDeclType(RD);
+
+ // Now try again.
+ Layout = CGRecordLayouts.lookup(Key);
+ }
+
+ assert(Layout && "Unable to find record layout information for type");
+ return *Layout;
+}
+
+bool CodeGenTypes::isZeroInitializable(QualType T) {
+ // No need to check for member pointers when not compiling C++.
+ if (!Context.getLangOpts().CPlusPlus)
+ return true;
+
+ if (const auto *AT = Context.getAsArrayType(T)) {
+ if (isa<IncompleteArrayType>(AT))
+ return true;
+ if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
+ if (Context.getConstantArrayElementCount(CAT) == 0)
+ return true;
+ T = Context.getBaseElementType(T);
+ }
+
+ // Records are non-zero-initializable if they contain any
+ // non-zero-initializable subobjects.
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ return isZeroInitializable(RD);
+ }
+
+ // We have to ask the ABI about member pointers.
+ if (const MemberPointerType *MPT = T->getAs<MemberPointerType>())
+ return getCXXABI().isZeroInitializable(MPT);
+
+ // Everything else is okay.
+ return true;
+}
+
+bool CodeGenTypes::isZeroInitializable(const RecordDecl *RD) {
+ return getCGRecordLayout(RD).isZeroInitializable();
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h
new file mode 100644
index 0000000..a96f23c4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h
@@ -0,0 +1,335 @@
+//===--- CodeGenTypes.h - Type translation for LLVM CodeGen -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the code that handles AST -> LLVM type lowering.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENTYPES_H
+#define LLVM_CLANG_LIB_CODEGEN_CODEGENTYPES_H
+
+#include "CGCall.h"
+#include "clang/AST/GlobalDecl.h"
+#include "clang/CodeGen/CGFunctionInfo.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/IR/Module.h"
+#include <vector>
+
+namespace llvm {
+class FunctionType;
+class Module;
+class DataLayout;
+class Type;
+class LLVMContext;
+class StructType;
+}
+
+namespace clang {
+class ABIInfo;
+class ASTContext;
+template <typename> class CanQual;
+class CXXConstructorDecl;
+class CXXDestructorDecl;
+class CXXMethodDecl;
+class CodeGenOptions;
+class FieldDecl;
+class FunctionProtoType;
+class ObjCInterfaceDecl;
+class ObjCIvarDecl;
+class PointerType;
+class QualType;
+class RecordDecl;
+class TagDecl;
+class TargetInfo;
+class Type;
+typedef CanQual<Type> CanQualType;
+
+namespace CodeGen {
+class CGCXXABI;
+class CGRecordLayout;
+class CodeGenModule;
+class RequiredArgs;
+
+enum class StructorType {
+ Complete, // constructor or destructor
+ Base, // constructor or destructor
+ Deleting // destructor only
+};
+
+inline CXXCtorType toCXXCtorType(StructorType T) {
+ switch (T) {
+ case StructorType::Complete:
+ return Ctor_Complete;
+ case StructorType::Base:
+ return Ctor_Base;
+ case StructorType::Deleting:
+ llvm_unreachable("cannot have a deleting ctor");
+ }
+ llvm_unreachable("not a StructorType");
+}
+
+inline StructorType getFromCtorType(CXXCtorType T) {
+ switch (T) {
+ case Ctor_Complete:
+ return StructorType::Complete;
+ case Ctor_Base:
+ return StructorType::Base;
+ case Ctor_Comdat:
+ llvm_unreachable("not expecting a COMDAT");
+ case Ctor_CopyingClosure:
+ case Ctor_DefaultClosure:
+ llvm_unreachable("not expecting a closure");
+ }
+ llvm_unreachable("not a CXXCtorType");
+}
+
+inline CXXDtorType toCXXDtorType(StructorType T) {
+ switch (T) {
+ case StructorType::Complete:
+ return Dtor_Complete;
+ case StructorType::Base:
+ return Dtor_Base;
+ case StructorType::Deleting:
+ return Dtor_Deleting;
+ }
+ llvm_unreachable("not a StructorType");
+}
+
+inline StructorType getFromDtorType(CXXDtorType T) {
+ switch (T) {
+ case Dtor_Deleting:
+ return StructorType::Deleting;
+ case Dtor_Complete:
+ return StructorType::Complete;
+ case Dtor_Base:
+ return StructorType::Base;
+ case Dtor_Comdat:
+ llvm_unreachable("not expecting a COMDAT");
+ }
+ llvm_unreachable("not a CXXDtorType");
+}
+
+/// This class organizes the cross-module state that is used while lowering
+/// AST types to LLVM types.
+class CodeGenTypes {
+ CodeGenModule &CGM;
+ // Some of this stuff should probably be left on the CGM.
+ ASTContext &Context;
+ llvm::Module &TheModule;
+ const TargetInfo &Target;
+ CGCXXABI &TheCXXABI;
+
+ // This should not be moved earlier, since its initialization depends on some
+ // of the previous reference members being already initialized
+ const ABIInfo &TheABIInfo;
+
+ /// The opaque type map for Objective-C interfaces. All direct
+ /// manipulation is done by the runtime interfaces, which are
+ /// responsible for coercing to the appropriate type; these opaque
+ /// types are never refined.
+ llvm::DenseMap<const ObjCInterfaceType*, llvm::Type *> InterfaceTypes;
+
+ /// Maps clang struct type with corresponding record layout info.
+ llvm::DenseMap<const Type*, CGRecordLayout *> CGRecordLayouts;
+
+ /// Contains the LLVM IR type for any converted RecordDecl.
+ llvm::DenseMap<const Type*, llvm::StructType *> RecordDeclTypes;
+
+ /// Hold memoized CGFunctionInfo results.
+ llvm::FoldingSet<CGFunctionInfo> FunctionInfos;
+
+ /// This set keeps track of records that we're currently converting
+ /// to an IR type. For example, when converting:
+ /// struct A { struct B { int x; } } when processing 'x', the 'A' and 'B'
+ /// types will be in this set.
+ llvm::SmallPtrSet<const Type*, 4> RecordsBeingLaidOut;
+
+ llvm::SmallPtrSet<const CGFunctionInfo*, 4> FunctionsBeingProcessed;
+
+ /// True if we didn't layout a function due to a being inside
+ /// a recursive struct conversion, set this to true.
+ bool SkippedLayout;
+
+ SmallVector<const RecordDecl *, 8> DeferredRecords;
+
+ /// This map keeps cache of llvm::Types and maps clang::Type to
+ /// corresponding llvm::Type.
+ llvm::DenseMap<const Type *, llvm::Type *> TypeCache;
+
+public:
+ CodeGenTypes(CodeGenModule &cgm);
+ ~CodeGenTypes();
+
+ const llvm::DataLayout &getDataLayout() const {
+ return TheModule.getDataLayout();
+ }
+ ASTContext &getContext() const { return Context; }
+ const ABIInfo &getABIInfo() const { return TheABIInfo; }
+ const TargetInfo &getTarget() const { return Target; }
+ CGCXXABI &getCXXABI() const { return TheCXXABI; }
+ llvm::LLVMContext &getLLVMContext() { return TheModule.getContext(); }
+
+ /// ConvertType - Convert type T into a llvm::Type.
+ llvm::Type *ConvertType(QualType T);
+
+ /// \brief Converts the GlobalDecl into an llvm::Type. This should be used
+ /// when we know the target of the function we want to convert. This is
+ /// because some functions (explicitly, those with pass_object_size
+ /// parameters) may not have the same signature as their type portrays, and
+ /// can only be called directly.
+ llvm::Type *ConvertFunctionType(QualType FT,
+ const FunctionDecl *FD = nullptr);
+
+ /// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
+ /// ConvertType in that it is used to convert to the memory representation for
+ /// a type. For example, the scalar representation for _Bool is i1, but the
+ /// memory representation is usually i8 or i32, depending on the target.
+ llvm::Type *ConvertTypeForMem(QualType T);
+
+ /// GetFunctionType - Get the LLVM function type for \arg Info.
+ llvm::FunctionType *GetFunctionType(const CGFunctionInfo &Info);
+
+ llvm::FunctionType *GetFunctionType(GlobalDecl GD);
+
+ /// isFuncTypeConvertible - Utility to check whether a function type can
+ /// be converted to an LLVM type (i.e. doesn't depend on an incomplete tag
+ /// type).
+ bool isFuncTypeConvertible(const FunctionType *FT);
+ bool isFuncParamTypeConvertible(QualType Ty);
+
+ /// GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable,
+ /// given a CXXMethodDecl. If the method to has an incomplete return type,
+ /// and/or incomplete argument types, this will return the opaque type.
+ llvm::Type *GetFunctionTypeForVTable(GlobalDecl GD);
+
+ const CGRecordLayout &getCGRecordLayout(const RecordDecl*);
+
+ /// UpdateCompletedType - When we find the full definition for a TagDecl,
+ /// replace the 'opaque' type we previously made for it if applicable.
+ void UpdateCompletedType(const TagDecl *TD);
+
+ /// getNullaryFunctionInfo - Get the function info for a void()
+ /// function with standard CC.
+ const CGFunctionInfo &arrangeNullaryFunction();
+
+ // The arrangement methods are split into three families:
+ // - those meant to drive the signature and prologue/epilogue
+ // of a function declaration or definition,
+ // - those meant for the computation of the LLVM type for an abstract
+ // appearance of a function, and
+ // - those meant for performing the IR-generation of a call.
+ // They differ mainly in how they deal with optional (i.e. variadic)
+ // arguments, as well as unprototyped functions.
+ //
+ // Key points:
+ // - The CGFunctionInfo for emitting a specific call site must include
+ // entries for the optional arguments.
+ // - The function type used at the call site must reflect the formal
+ // signature of the declaration being called, or else the call will
+ // go awry.
+ // - For the most part, unprototyped functions are called by casting to
+ // a formal signature inferred from the specific argument types used
+ // at the call-site. However, some targets (e.g. x86-64) screw with
+ // this for compatibility reasons.
+
+ const CGFunctionInfo &arrangeGlobalDeclaration(GlobalDecl GD);
+ const CGFunctionInfo &arrangeFunctionDeclaration(const FunctionDecl *FD);
+ const CGFunctionInfo &
+ arrangeFreeFunctionDeclaration(QualType ResTy, const FunctionArgList &Args,
+ const FunctionType::ExtInfo &Info,
+ bool isVariadic);
+
+ const CGFunctionInfo &arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD);
+ const CGFunctionInfo &arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
+ QualType receiverType);
+
+ const CGFunctionInfo &arrangeCXXMethodDeclaration(const CXXMethodDecl *MD);
+ const CGFunctionInfo &arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
+ StructorType Type);
+ const CGFunctionInfo &arrangeCXXConstructorCall(const CallArgList &Args,
+ const CXXConstructorDecl *D,
+ CXXCtorType CtorKind,
+ unsigned ExtraArgs);
+ const CGFunctionInfo &arrangeFreeFunctionCall(const CallArgList &Args,
+ const FunctionType *Ty,
+ bool ChainCall);
+ const CGFunctionInfo &arrangeFreeFunctionCall(QualType ResTy,
+ const CallArgList &args,
+ FunctionType::ExtInfo info,
+ RequiredArgs required);
+ const CGFunctionInfo &arrangeBlockFunctionCall(const CallArgList &args,
+ const FunctionType *type);
+
+ const CGFunctionInfo &arrangeCXXMethodCall(const CallArgList &args,
+ const FunctionProtoType *type,
+ RequiredArgs required);
+ const CGFunctionInfo &arrangeMSMemberPointerThunk(const CXXMethodDecl *MD);
+ const CGFunctionInfo &arrangeMSCtorClosure(const CXXConstructorDecl *CD,
+ CXXCtorType CT);
+ const CGFunctionInfo &arrangeFreeFunctionType(CanQual<FunctionProtoType> Ty,
+ const FunctionDecl *FD);
+ const CGFunctionInfo &arrangeFreeFunctionType(CanQual<FunctionNoProtoType> Ty);
+ const CGFunctionInfo &arrangeCXXMethodType(const CXXRecordDecl *RD,
+ const FunctionProtoType *FTP,
+ const CXXMethodDecl *MD);
+
+ /// "Arrange" the LLVM information for a call or type with the given
+ /// signature. This is largely an internal method; other clients
+ /// should use one of the above routines, which ultimately defer to
+ /// this.
+ ///
+ /// \param argTypes - must all actually be canonical as params
+ const CGFunctionInfo &arrangeLLVMFunctionInfo(CanQualType returnType,
+ bool instanceMethod,
+ bool chainCall,
+ ArrayRef<CanQualType> argTypes,
+ FunctionType::ExtInfo info,
+ RequiredArgs args);
+
+ /// \brief Compute a new LLVM record layout object for the given record.
+ CGRecordLayout *ComputeRecordLayout(const RecordDecl *D,
+ llvm::StructType *Ty);
+
+ /// addRecordTypeName - Compute a name from the given record decl with an
+ /// optional suffix and name the given LLVM type using it.
+ void addRecordTypeName(const RecordDecl *RD, llvm::StructType *Ty,
+ StringRef suffix);
+
+
+public: // These are internal details of CGT that shouldn't be used externally.
+ /// ConvertRecordDeclType - Lay out a tagged decl type like struct or union.
+ llvm::StructType *ConvertRecordDeclType(const RecordDecl *TD);
+
+ /// getExpandedTypes - Expand the type \arg Ty into the LLVM
+ /// argument types it would be passed as. See ABIArgInfo::Expand.
+ void getExpandedTypes(QualType Ty,
+ SmallVectorImpl<llvm::Type *>::iterator &TI);
+
+ /// IsZeroInitializable - Return whether a type can be
+ /// zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
+ bool isZeroInitializable(QualType T);
+
+ /// IsZeroInitializable - Return whether a record type can be
+ /// zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
+ bool isZeroInitializable(const RecordDecl *RD);
+
+ bool isRecordLayoutComplete(const Type *Ty) const;
+ bool noRecordsBeingLaidOut() const {
+ return RecordsBeingLaidOut.empty();
+ }
+ bool isRecordBeingLaidOut(const Type *Ty) const {
+ return RecordsBeingLaidOut.count(Ty);
+ }
+
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CoverageMappingGen.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CoverageMappingGen.cpp
new file mode 100644
index 0000000..1d4d709
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CoverageMappingGen.cpp
@@ -0,0 +1,1050 @@
+//===--- CoverageMappingGen.cpp - Coverage mapping generation ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Instrumentation-based code coverage mapping generator
+//
+//===----------------------------------------------------------------------===//
+
+#include "CoverageMappingGen.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Lex/Lexer.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ProfileData/CoverageMapping.h"
+#include "llvm/ProfileData/CoverageMappingReader.h"
+#include "llvm/ProfileData/CoverageMappingWriter.h"
+#include "llvm/ProfileData/InstrProfReader.h"
+#include "llvm/Support/FileSystem.h"
+
+using namespace clang;
+using namespace CodeGen;
+using namespace llvm::coverage;
+
+void CoverageSourceInfo::SourceRangeSkipped(SourceRange Range) {
+ SkippedRanges.push_back(Range);
+}
+
+namespace {
+
+/// \brief A region of source code that can be mapped to a counter.
+class SourceMappingRegion {
+ Counter Count;
+
+ /// \brief The region's starting location.
+ Optional<SourceLocation> LocStart;
+
+ /// \brief The region's ending location.
+ Optional<SourceLocation> LocEnd;
+
+public:
+ SourceMappingRegion(Counter Count, Optional<SourceLocation> LocStart,
+ Optional<SourceLocation> LocEnd)
+ : Count(Count), LocStart(LocStart), LocEnd(LocEnd) {}
+
+ const Counter &getCounter() const { return Count; }
+
+ void setCounter(Counter C) { Count = C; }
+
+ bool hasStartLoc() const { return LocStart.hasValue(); }
+
+ void setStartLoc(SourceLocation Loc) { LocStart = Loc; }
+
+ SourceLocation getStartLoc() const {
+ assert(LocStart && "Region has no start location");
+ return *LocStart;
+ }
+
+ bool hasEndLoc() const { return LocEnd.hasValue(); }
+
+ void setEndLoc(SourceLocation Loc) { LocEnd = Loc; }
+
+ SourceLocation getEndLoc() const {
+ assert(LocEnd && "Region has no end location");
+ return *LocEnd;
+ }
+};
+
+/// \brief Provides the common functionality for the different
+/// coverage mapping region builders.
+class CoverageMappingBuilder {
+public:
+ CoverageMappingModuleGen &CVM;
+ SourceManager &SM;
+ const LangOptions &LangOpts;
+
+private:
+ /// \brief Map of clang's FileIDs to IDs used for coverage mapping.
+ llvm::SmallDenseMap<FileID, std::pair<unsigned, SourceLocation>, 8>
+ FileIDMapping;
+
+public:
+ /// \brief The coverage mapping regions for this function
+ llvm::SmallVector<CounterMappingRegion, 32> MappingRegions;
+ /// \brief The source mapping regions for this function.
+ std::vector<SourceMappingRegion> SourceRegions;
+
+ CoverageMappingBuilder(CoverageMappingModuleGen &CVM, SourceManager &SM,
+ const LangOptions &LangOpts)
+ : CVM(CVM), SM(SM), LangOpts(LangOpts) {}
+
+ /// \brief Return the precise end location for the given token.
+ SourceLocation getPreciseTokenLocEnd(SourceLocation Loc) {
+ // We avoid getLocForEndOfToken here, because it doesn't do what we want for
+ // macro locations, which we just treat as expanded files.
+ unsigned TokLen =
+ Lexer::MeasureTokenLength(SM.getSpellingLoc(Loc), SM, LangOpts);
+ return Loc.getLocWithOffset(TokLen);
+ }
+
+ /// \brief Return the start location of an included file or expanded macro.
+ SourceLocation getStartOfFileOrMacro(SourceLocation Loc) {
+ if (Loc.isMacroID())
+ return Loc.getLocWithOffset(-SM.getFileOffset(Loc));
+ return SM.getLocForStartOfFile(SM.getFileID(Loc));
+ }
+
+ /// \brief Return the end location of an included file or expanded macro.
+ SourceLocation getEndOfFileOrMacro(SourceLocation Loc) {
+ if (Loc.isMacroID())
+ return Loc.getLocWithOffset(SM.getFileIDSize(SM.getFileID(Loc)) -
+ SM.getFileOffset(Loc));
+ return SM.getLocForEndOfFile(SM.getFileID(Loc));
+ }
+
+ /// \brief Find out where the current file is included or macro is expanded.
+ SourceLocation getIncludeOrExpansionLoc(SourceLocation Loc) {
+ return Loc.isMacroID() ? SM.getImmediateExpansionRange(Loc).first
+ : SM.getIncludeLoc(SM.getFileID(Loc));
+ }
+
+ /// \brief Return true if \c Loc is a location in a built-in macro.
+ bool isInBuiltin(SourceLocation Loc) {
+ return strcmp(SM.getBufferName(SM.getSpellingLoc(Loc)), "<built-in>") == 0;
+ }
+
+ /// \brief Get the start of \c S ignoring macro arguments and builtin macros.
+ SourceLocation getStart(const Stmt *S) {
+ SourceLocation Loc = S->getLocStart();
+ while (SM.isMacroArgExpansion(Loc) || isInBuiltin(Loc))
+ Loc = SM.getImmediateExpansionRange(Loc).first;
+ return Loc;
+ }
+
+ /// \brief Get the end of \c S ignoring macro arguments and builtin macros.
+ SourceLocation getEnd(const Stmt *S) {
+ SourceLocation Loc = S->getLocEnd();
+ while (SM.isMacroArgExpansion(Loc) || isInBuiltin(Loc))
+ Loc = SM.getImmediateExpansionRange(Loc).first;
+ return getPreciseTokenLocEnd(Loc);
+ }
+
+ /// \brief Find the set of files we have regions for and assign IDs
+ ///
+ /// Fills \c Mapping with the virtual file mapping needed to write out
+ /// coverage and collects the necessary file information to emit source and
+ /// expansion regions.
+ void gatherFileIDs(SmallVectorImpl<unsigned> &Mapping) {
+ FileIDMapping.clear();
+
+ SmallVector<FileID, 8> Visited;
+ SmallVector<std::pair<SourceLocation, unsigned>, 8> FileLocs;
+ for (const auto &Region : SourceRegions) {
+ SourceLocation Loc = Region.getStartLoc();
+ FileID File = SM.getFileID(Loc);
+ if (std::find(Visited.begin(), Visited.end(), File) != Visited.end())
+ continue;
+ Visited.push_back(File);
+
+ unsigned Depth = 0;
+ for (SourceLocation Parent = getIncludeOrExpansionLoc(Loc);
+ Parent.isValid(); Parent = getIncludeOrExpansionLoc(Parent))
+ ++Depth;
+ FileLocs.push_back(std::make_pair(Loc, Depth));
+ }
+ std::stable_sort(FileLocs.begin(), FileLocs.end(), llvm::less_second());
+
+ for (const auto &FL : FileLocs) {
+ SourceLocation Loc = FL.first;
+ FileID SpellingFile = SM.getDecomposedSpellingLoc(Loc).first;
+ auto Entry = SM.getFileEntryForID(SpellingFile);
+ if (!Entry)
+ continue;
+
+ FileIDMapping[SM.getFileID(Loc)] = std::make_pair(Mapping.size(), Loc);
+ Mapping.push_back(CVM.getFileID(Entry));
+ }
+ }
+
+ /// \brief Get the coverage mapping file ID for \c Loc.
+ ///
+ /// If such file id doesn't exist, return None.
+ Optional<unsigned> getCoverageFileID(SourceLocation Loc) {
+ auto Mapping = FileIDMapping.find(SM.getFileID(Loc));
+ if (Mapping != FileIDMapping.end())
+ return Mapping->second.first;
+ return None;
+ }
+
+ /// \brief Return true if the given clang's file id has a corresponding
+ /// coverage file id.
+ bool hasExistingCoverageFileID(FileID File) const {
+ return FileIDMapping.count(File);
+ }
+
+ /// \brief Gather all the regions that were skipped by the preprocessor
+ /// using the constructs like #if.
+ void gatherSkippedRegions() {
+ /// An array of the minimum lineStarts and the maximum lineEnds
+ /// for mapping regions from the appropriate source files.
+ llvm::SmallVector<std::pair<unsigned, unsigned>, 8> FileLineRanges;
+ FileLineRanges.resize(
+ FileIDMapping.size(),
+ std::make_pair(std::numeric_limits<unsigned>::max(), 0));
+ for (const auto &R : MappingRegions) {
+ FileLineRanges[R.FileID].first =
+ std::min(FileLineRanges[R.FileID].first, R.LineStart);
+ FileLineRanges[R.FileID].second =
+ std::max(FileLineRanges[R.FileID].second, R.LineEnd);
+ }
+
+ auto SkippedRanges = CVM.getSourceInfo().getSkippedRanges();
+ for (const auto &I : SkippedRanges) {
+ auto LocStart = I.getBegin();
+ auto LocEnd = I.getEnd();
+ assert(SM.isWrittenInSameFile(LocStart, LocEnd) &&
+ "region spans multiple files");
+
+ auto CovFileID = getCoverageFileID(LocStart);
+ if (!CovFileID)
+ continue;
+ unsigned LineStart = SM.getSpellingLineNumber(LocStart);
+ unsigned ColumnStart = SM.getSpellingColumnNumber(LocStart);
+ unsigned LineEnd = SM.getSpellingLineNumber(LocEnd);
+ unsigned ColumnEnd = SM.getSpellingColumnNumber(LocEnd);
+ auto Region = CounterMappingRegion::makeSkipped(
+ *CovFileID, LineStart, ColumnStart, LineEnd, ColumnEnd);
+ // Make sure that we only collect the regions that are inside
+ // the souce code of this function.
+ if (Region.LineStart >= FileLineRanges[*CovFileID].first &&
+ Region.LineEnd <= FileLineRanges[*CovFileID].second)
+ MappingRegions.push_back(Region);
+ }
+ }
+
+ /// \brief Generate the coverage counter mapping regions from collected
+ /// source regions.
+ void emitSourceRegions() {
+ for (const auto &Region : SourceRegions) {
+ assert(Region.hasEndLoc() && "incomplete region");
+
+ SourceLocation LocStart = Region.getStartLoc();
+ assert(SM.getFileID(LocStart).isValid() && "region in invalid file");
+
+ auto CovFileID = getCoverageFileID(LocStart);
+ // Ignore regions that don't have a file, such as builtin macros.
+ if (!CovFileID)
+ continue;
+
+ SourceLocation LocEnd = Region.getEndLoc();
+ assert(SM.isWrittenInSameFile(LocStart, LocEnd) &&
+ "region spans multiple files");
+
+ // Find the spilling locations for the mapping region.
+ unsigned LineStart = SM.getSpellingLineNumber(LocStart);
+ unsigned ColumnStart = SM.getSpellingColumnNumber(LocStart);
+ unsigned LineEnd = SM.getSpellingLineNumber(LocEnd);
+ unsigned ColumnEnd = SM.getSpellingColumnNumber(LocEnd);
+
+ assert(LineStart <= LineEnd && "region start and end out of order");
+ MappingRegions.push_back(CounterMappingRegion::makeRegion(
+ Region.getCounter(), *CovFileID, LineStart, ColumnStart, LineEnd,
+ ColumnEnd));
+ }
+ }
+
+ /// \brief Generate expansion regions for each virtual file we've seen.
+ void emitExpansionRegions() {
+ for (const auto &FM : FileIDMapping) {
+ SourceLocation ExpandedLoc = FM.second.second;
+ SourceLocation ParentLoc = getIncludeOrExpansionLoc(ExpandedLoc);
+ if (ParentLoc.isInvalid())
+ continue;
+
+ auto ParentFileID = getCoverageFileID(ParentLoc);
+ if (!ParentFileID)
+ continue;
+ auto ExpandedFileID = getCoverageFileID(ExpandedLoc);
+ assert(ExpandedFileID && "expansion in uncovered file");
+
+ SourceLocation LocEnd = getPreciseTokenLocEnd(ParentLoc);
+ assert(SM.isWrittenInSameFile(ParentLoc, LocEnd) &&
+ "region spans multiple files");
+
+ unsigned LineStart = SM.getSpellingLineNumber(ParentLoc);
+ unsigned ColumnStart = SM.getSpellingColumnNumber(ParentLoc);
+ unsigned LineEnd = SM.getSpellingLineNumber(LocEnd);
+ unsigned ColumnEnd = SM.getSpellingColumnNumber(LocEnd);
+
+ MappingRegions.push_back(CounterMappingRegion::makeExpansion(
+ *ParentFileID, *ExpandedFileID, LineStart, ColumnStart, LineEnd,
+ ColumnEnd));
+ }
+ }
+};
+
+/// \brief Creates unreachable coverage regions for the functions that
+/// are not emitted.
+struct EmptyCoverageMappingBuilder : public CoverageMappingBuilder {
+ EmptyCoverageMappingBuilder(CoverageMappingModuleGen &CVM, SourceManager &SM,
+ const LangOptions &LangOpts)
+ : CoverageMappingBuilder(CVM, SM, LangOpts) {}
+
+ void VisitDecl(const Decl *D) {
+ if (!D->hasBody())
+ return;
+ auto Body = D->getBody();
+ SourceRegions.emplace_back(Counter(), getStart(Body), getEnd(Body));
+ }
+
+ /// \brief Write the mapping data to the output stream
+ void write(llvm::raw_ostream &OS) {
+ SmallVector<unsigned, 16> FileIDMapping;
+ gatherFileIDs(FileIDMapping);
+ emitSourceRegions();
+
+ CoverageMappingWriter Writer(FileIDMapping, None, MappingRegions);
+ Writer.write(OS);
+ }
+};
+
+/// \brief A StmtVisitor that creates coverage mapping regions which map
+/// from the source code locations to the PGO counters.
+struct CounterCoverageMappingBuilder
+ : public CoverageMappingBuilder,
+ public ConstStmtVisitor<CounterCoverageMappingBuilder> {
+ /// \brief The map of statements to count values.
+ llvm::DenseMap<const Stmt *, unsigned> &CounterMap;
+
+ /// \brief A stack of currently live regions.
+ std::vector<SourceMappingRegion> RegionStack;
+
+ CounterExpressionBuilder Builder;
+
+ /// \brief A location in the most recently visited file or macro.
+ ///
+ /// This is used to adjust the active source regions appropriately when
+ /// expressions cross file or macro boundaries.
+ SourceLocation MostRecentLocation;
+
+ /// \brief Return a counter for the subtraction of \c RHS from \c LHS
+ Counter subtractCounters(Counter LHS, Counter RHS) {
+ return Builder.subtract(LHS, RHS);
+ }
+
+ /// \brief Return a counter for the sum of \c LHS and \c RHS.
+ Counter addCounters(Counter LHS, Counter RHS) {
+ return Builder.add(LHS, RHS);
+ }
+
+ Counter addCounters(Counter C1, Counter C2, Counter C3) {
+ return addCounters(addCounters(C1, C2), C3);
+ }
+
+ Counter addCounters(Counter C1, Counter C2, Counter C3, Counter C4) {
+ return addCounters(addCounters(C1, C2, C3), C4);
+ }
+
+ /// \brief Return the region counter for the given statement.
+ ///
+ /// This should only be called on statements that have a dedicated counter.
+ Counter getRegionCounter(const Stmt *S) {
+ return Counter::getCounter(CounterMap[S]);
+ }
+
+ /// \brief Push a region onto the stack.
+ ///
+ /// Returns the index on the stack where the region was pushed. This can be
+ /// used with popRegions to exit a "scope", ending the region that was pushed.
+ size_t pushRegion(Counter Count, Optional<SourceLocation> StartLoc = None,
+ Optional<SourceLocation> EndLoc = None) {
+ if (StartLoc)
+ MostRecentLocation = *StartLoc;
+ RegionStack.emplace_back(Count, StartLoc, EndLoc);
+
+ return RegionStack.size() - 1;
+ }
+
+ /// \brief Pop regions from the stack into the function's list of regions.
+ ///
+ /// Adds all regions from \c ParentIndex to the top of the stack to the
+ /// function's \c SourceRegions.
+ void popRegions(size_t ParentIndex) {
+ assert(RegionStack.size() >= ParentIndex && "parent not in stack");
+ while (RegionStack.size() > ParentIndex) {
+ SourceMappingRegion &Region = RegionStack.back();
+ if (Region.hasStartLoc()) {
+ SourceLocation StartLoc = Region.getStartLoc();
+ SourceLocation EndLoc = Region.hasEndLoc()
+ ? Region.getEndLoc()
+ : RegionStack[ParentIndex].getEndLoc();
+ while (!SM.isWrittenInSameFile(StartLoc, EndLoc)) {
+ // The region ends in a nested file or macro expansion. Create a
+ // separate region for each expansion.
+ SourceLocation NestedLoc = getStartOfFileOrMacro(EndLoc);
+ assert(SM.isWrittenInSameFile(NestedLoc, EndLoc));
+
+ SourceRegions.emplace_back(Region.getCounter(), NestedLoc, EndLoc);
+
+ EndLoc = getPreciseTokenLocEnd(getIncludeOrExpansionLoc(EndLoc));
+ if (EndLoc.isInvalid())
+ llvm::report_fatal_error("File exit not handled before popRegions");
+ }
+ Region.setEndLoc(EndLoc);
+
+ MostRecentLocation = EndLoc;
+ // If this region happens to span an entire expansion, we need to make
+ // sure we don't overlap the parent region with it.
+ if (StartLoc == getStartOfFileOrMacro(StartLoc) &&
+ EndLoc == getEndOfFileOrMacro(EndLoc))
+ MostRecentLocation = getIncludeOrExpansionLoc(EndLoc);
+
+ assert(SM.isWrittenInSameFile(Region.getStartLoc(), EndLoc));
+ SourceRegions.push_back(Region);
+ }
+ RegionStack.pop_back();
+ }
+ }
+
+ /// \brief Return the currently active region.
+ SourceMappingRegion &getRegion() {
+ assert(!RegionStack.empty() && "statement has no region");
+ return RegionStack.back();
+ }
+
+ /// \brief Propagate counts through the children of \c S.
+ Counter propagateCounts(Counter TopCount, const Stmt *S) {
+ size_t Index = pushRegion(TopCount, getStart(S), getEnd(S));
+ Visit(S);
+ Counter ExitCount = getRegion().getCounter();
+ popRegions(Index);
+ return ExitCount;
+ }
+
+ /// \brief Adjust the most recently visited location to \c EndLoc.
+ ///
+ /// This should be used after visiting any statements in non-source order.
+ void adjustForOutOfOrderTraversal(SourceLocation EndLoc) {
+ MostRecentLocation = EndLoc;
+ // Avoid adding duplicate regions if we have a completed region on the top
+ // of the stack and are adjusting to the end of a virtual file.
+ if (getRegion().hasEndLoc() &&
+ MostRecentLocation == getEndOfFileOrMacro(MostRecentLocation))
+ MostRecentLocation = getIncludeOrExpansionLoc(MostRecentLocation);
+ }
+
+ /// \brief Check whether \c Loc is included or expanded from \c Parent.
+ bool isNestedIn(SourceLocation Loc, FileID Parent) {
+ do {
+ Loc = getIncludeOrExpansionLoc(Loc);
+ if (Loc.isInvalid())
+ return false;
+ } while (!SM.isInFileID(Loc, Parent));
+ return true;
+ }
+
+ /// \brief Adjust regions and state when \c NewLoc exits a file.
+ ///
+ /// If moving from our most recently tracked location to \c NewLoc exits any
+ /// files, this adjusts our current region stack and creates the file regions
+ /// for the exited file.
+ void handleFileExit(SourceLocation NewLoc) {
+ if (NewLoc.isInvalid() ||
+ SM.isWrittenInSameFile(MostRecentLocation, NewLoc))
+ return;
+
+ // If NewLoc is not in a file that contains MostRecentLocation, walk up to
+ // find the common ancestor.
+ SourceLocation LCA = NewLoc;
+ FileID ParentFile = SM.getFileID(LCA);
+ while (!isNestedIn(MostRecentLocation, ParentFile)) {
+ LCA = getIncludeOrExpansionLoc(LCA);
+ if (LCA.isInvalid() || SM.isWrittenInSameFile(LCA, MostRecentLocation)) {
+ // Since there isn't a common ancestor, no file was exited. We just need
+ // to adjust our location to the new file.
+ MostRecentLocation = NewLoc;
+ return;
+ }
+ ParentFile = SM.getFileID(LCA);
+ }
+
+ llvm::SmallSet<SourceLocation, 8> StartLocs;
+ Optional<Counter> ParentCounter;
+ for (SourceMappingRegion &I : llvm::reverse(RegionStack)) {
+ if (!I.hasStartLoc())
+ continue;
+ SourceLocation Loc = I.getStartLoc();
+ if (!isNestedIn(Loc, ParentFile)) {
+ ParentCounter = I.getCounter();
+ break;
+ }
+
+ while (!SM.isInFileID(Loc, ParentFile)) {
+ // The most nested region for each start location is the one with the
+ // correct count. We avoid creating redundant regions by stopping once
+ // we've seen this region.
+ if (StartLocs.insert(Loc).second)
+ SourceRegions.emplace_back(I.getCounter(), Loc,
+ getEndOfFileOrMacro(Loc));
+ Loc = getIncludeOrExpansionLoc(Loc);
+ }
+ I.setStartLoc(getPreciseTokenLocEnd(Loc));
+ }
+
+ if (ParentCounter) {
+ // If the file is contained completely by another region and doesn't
+ // immediately start its own region, the whole file gets a region
+ // corresponding to the parent.
+ SourceLocation Loc = MostRecentLocation;
+ while (isNestedIn(Loc, ParentFile)) {
+ SourceLocation FileStart = getStartOfFileOrMacro(Loc);
+ if (StartLocs.insert(FileStart).second)
+ SourceRegions.emplace_back(*ParentCounter, FileStart,
+ getEndOfFileOrMacro(Loc));
+ Loc = getIncludeOrExpansionLoc(Loc);
+ }
+ }
+
+ MostRecentLocation = NewLoc;
+ }
+
+ /// \brief Ensure that \c S is included in the current region.
+ void extendRegion(const Stmt *S) {
+ SourceMappingRegion &Region = getRegion();
+ SourceLocation StartLoc = getStart(S);
+
+ handleFileExit(StartLoc);
+ if (!Region.hasStartLoc())
+ Region.setStartLoc(StartLoc);
+ }
+
+ /// \brief Mark \c S as a terminator, starting a zero region.
+ void terminateRegion(const Stmt *S) {
+ extendRegion(S);
+ SourceMappingRegion &Region = getRegion();
+ if (!Region.hasEndLoc())
+ Region.setEndLoc(getEnd(S));
+ pushRegion(Counter::getZero());
+ }
+
+ /// \brief Keep counts of breaks and continues inside loops.
+ struct BreakContinue {
+ Counter BreakCount;
+ Counter ContinueCount;
+ };
+ SmallVector<BreakContinue, 8> BreakContinueStack;
+
+ CounterCoverageMappingBuilder(
+ CoverageMappingModuleGen &CVM,
+ llvm::DenseMap<const Stmt *, unsigned> &CounterMap, SourceManager &SM,
+ const LangOptions &LangOpts)
+ : CoverageMappingBuilder(CVM, SM, LangOpts), CounterMap(CounterMap) {}
+
+ /// \brief Write the mapping data to the output stream
+ void write(llvm::raw_ostream &OS) {
+ llvm::SmallVector<unsigned, 8> VirtualFileMapping;
+ gatherFileIDs(VirtualFileMapping);
+ emitSourceRegions();
+ emitExpansionRegions();
+ gatherSkippedRegions();
+
+ CoverageMappingWriter Writer(VirtualFileMapping, Builder.getExpressions(),
+ MappingRegions);
+ Writer.write(OS);
+ }
+
+ void VisitStmt(const Stmt *S) {
+ if (S->getLocStart().isValid())
+ extendRegion(S);
+ for (const Stmt *Child : S->children())
+ if (Child)
+ this->Visit(Child);
+ handleFileExit(getEnd(S));
+ }
+
+ void VisitDecl(const Decl *D) {
+ Stmt *Body = D->getBody();
+ propagateCounts(getRegionCounter(Body), Body);
+ }
+
+ void VisitReturnStmt(const ReturnStmt *S) {
+ extendRegion(S);
+ if (S->getRetValue())
+ Visit(S->getRetValue());
+ terminateRegion(S);
+ }
+
+ void VisitCXXThrowExpr(const CXXThrowExpr *E) {
+ extendRegion(E);
+ if (E->getSubExpr())
+ Visit(E->getSubExpr());
+ terminateRegion(E);
+ }
+
+ void VisitGotoStmt(const GotoStmt *S) { terminateRegion(S); }
+
+ void VisitLabelStmt(const LabelStmt *S) {
+ SourceLocation Start = getStart(S);
+ // We can't extendRegion here or we risk overlapping with our new region.
+ handleFileExit(Start);
+ pushRegion(getRegionCounter(S), Start);
+ Visit(S->getSubStmt());
+ }
+
+ void VisitBreakStmt(const BreakStmt *S) {
+ assert(!BreakContinueStack.empty() && "break not in a loop or switch!");
+ BreakContinueStack.back().BreakCount = addCounters(
+ BreakContinueStack.back().BreakCount, getRegion().getCounter());
+ terminateRegion(S);
+ }
+
+ void VisitContinueStmt(const ContinueStmt *S) {
+ assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
+ BreakContinueStack.back().ContinueCount = addCounters(
+ BreakContinueStack.back().ContinueCount, getRegion().getCounter());
+ terminateRegion(S);
+ }
+
+ void VisitWhileStmt(const WhileStmt *S) {
+ extendRegion(S);
+
+ Counter ParentCount = getRegion().getCounter();
+ Counter BodyCount = getRegionCounter(S);
+
+ // Handle the body first so that we can get the backedge count.
+ BreakContinueStack.push_back(BreakContinue());
+ extendRegion(S->getBody());
+ Counter BackedgeCount = propagateCounts(BodyCount, S->getBody());
+ BreakContinue BC = BreakContinueStack.pop_back_val();
+
+ // Go back to handle the condition.
+ Counter CondCount =
+ addCounters(ParentCount, BackedgeCount, BC.ContinueCount);
+ propagateCounts(CondCount, S->getCond());
+ adjustForOutOfOrderTraversal(getEnd(S));
+
+ Counter OutCount =
+ addCounters(BC.BreakCount, subtractCounters(CondCount, BodyCount));
+ if (OutCount != ParentCount)
+ pushRegion(OutCount);
+ }
+
+ void VisitDoStmt(const DoStmt *S) {
+ extendRegion(S);
+
+ Counter ParentCount = getRegion().getCounter();
+ Counter BodyCount = getRegionCounter(S);
+
+ BreakContinueStack.push_back(BreakContinue());
+ extendRegion(S->getBody());
+ Counter BackedgeCount =
+ propagateCounts(addCounters(ParentCount, BodyCount), S->getBody());
+ BreakContinue BC = BreakContinueStack.pop_back_val();
+
+ Counter CondCount = addCounters(BackedgeCount, BC.ContinueCount);
+ propagateCounts(CondCount, S->getCond());
+
+ Counter OutCount =
+ addCounters(BC.BreakCount, subtractCounters(CondCount, BodyCount));
+ if (OutCount != ParentCount)
+ pushRegion(OutCount);
+ }
+
+ void VisitForStmt(const ForStmt *S) {
+ extendRegion(S);
+ if (S->getInit())
+ Visit(S->getInit());
+
+ Counter ParentCount = getRegion().getCounter();
+ Counter BodyCount = getRegionCounter(S);
+
+ // Handle the body first so that we can get the backedge count.
+ BreakContinueStack.push_back(BreakContinue());
+ extendRegion(S->getBody());
+ Counter BackedgeCount = propagateCounts(BodyCount, S->getBody());
+ BreakContinue BC = BreakContinueStack.pop_back_val();
+
+ // The increment is essentially part of the body but it needs to include
+ // the count for all the continue statements.
+ if (const Stmt *Inc = S->getInc())
+ propagateCounts(addCounters(BackedgeCount, BC.ContinueCount), Inc);
+
+ // Go back to handle the condition.
+ Counter CondCount =
+ addCounters(ParentCount, BackedgeCount, BC.ContinueCount);
+ if (const Expr *Cond = S->getCond()) {
+ propagateCounts(CondCount, Cond);
+ adjustForOutOfOrderTraversal(getEnd(S));
+ }
+
+ Counter OutCount =
+ addCounters(BC.BreakCount, subtractCounters(CondCount, BodyCount));
+ if (OutCount != ParentCount)
+ pushRegion(OutCount);
+ }
+
+ void VisitCXXForRangeStmt(const CXXForRangeStmt *S) {
+ extendRegion(S);
+ Visit(S->getLoopVarStmt());
+ Visit(S->getRangeStmt());
+
+ Counter ParentCount = getRegion().getCounter();
+ Counter BodyCount = getRegionCounter(S);
+
+ BreakContinueStack.push_back(BreakContinue());
+ extendRegion(S->getBody());
+ Counter BackedgeCount = propagateCounts(BodyCount, S->getBody());
+ BreakContinue BC = BreakContinueStack.pop_back_val();
+
+ Counter LoopCount =
+ addCounters(ParentCount, BackedgeCount, BC.ContinueCount);
+ Counter OutCount =
+ addCounters(BC.BreakCount, subtractCounters(LoopCount, BodyCount));
+ if (OutCount != ParentCount)
+ pushRegion(OutCount);
+ }
+
+ void VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S) {
+ extendRegion(S);
+ Visit(S->getElement());
+
+ Counter ParentCount = getRegion().getCounter();
+ Counter BodyCount = getRegionCounter(S);
+
+ BreakContinueStack.push_back(BreakContinue());
+ extendRegion(S->getBody());
+ Counter BackedgeCount = propagateCounts(BodyCount, S->getBody());
+ BreakContinue BC = BreakContinueStack.pop_back_val();
+
+ Counter LoopCount =
+ addCounters(ParentCount, BackedgeCount, BC.ContinueCount);
+ Counter OutCount =
+ addCounters(BC.BreakCount, subtractCounters(LoopCount, BodyCount));
+ if (OutCount != ParentCount)
+ pushRegion(OutCount);
+ }
+
+ void VisitSwitchStmt(const SwitchStmt *S) {
+ extendRegion(S);
+ Visit(S->getCond());
+
+ BreakContinueStack.push_back(BreakContinue());
+
+ const Stmt *Body = S->getBody();
+ extendRegion(Body);
+ if (const auto *CS = dyn_cast<CompoundStmt>(Body)) {
+ if (!CS->body_empty()) {
+ // The body of the switch needs a zero region so that fallthrough counts
+ // behave correctly, but it would be misleading to include the braces of
+ // the compound statement in the zeroed area, so we need to handle this
+ // specially.
+ size_t Index =
+ pushRegion(Counter::getZero(), getStart(CS->body_front()),
+ getEnd(CS->body_back()));
+ for (const auto *Child : CS->children())
+ Visit(Child);
+ popRegions(Index);
+ }
+ } else
+ propagateCounts(Counter::getZero(), Body);
+ BreakContinue BC = BreakContinueStack.pop_back_val();
+
+ if (!BreakContinueStack.empty())
+ BreakContinueStack.back().ContinueCount = addCounters(
+ BreakContinueStack.back().ContinueCount, BC.ContinueCount);
+
+ Counter ExitCount = getRegionCounter(S);
+ pushRegion(ExitCount);
+ }
+
+ void VisitSwitchCase(const SwitchCase *S) {
+ extendRegion(S);
+
+ SourceMappingRegion &Parent = getRegion();
+
+ Counter Count = addCounters(Parent.getCounter(), getRegionCounter(S));
+ // Reuse the existing region if it starts at our label. This is typical of
+ // the first case in a switch.
+ if (Parent.hasStartLoc() && Parent.getStartLoc() == getStart(S))
+ Parent.setCounter(Count);
+ else
+ pushRegion(Count, getStart(S));
+
+ if (const auto *CS = dyn_cast<CaseStmt>(S)) {
+ Visit(CS->getLHS());
+ if (const Expr *RHS = CS->getRHS())
+ Visit(RHS);
+ }
+ Visit(S->getSubStmt());
+ }
+
+ void VisitIfStmt(const IfStmt *S) {
+ extendRegion(S);
+ // Extend into the condition before we propagate through it below - this is
+ // needed to handle macros that generate the "if" but not the condition.
+ extendRegion(S->getCond());
+
+ Counter ParentCount = getRegion().getCounter();
+ Counter ThenCount = getRegionCounter(S);
+
+ // Emitting a counter for the condition makes it easier to interpret the
+ // counter for the body when looking at the coverage.
+ propagateCounts(ParentCount, S->getCond());
+
+ extendRegion(S->getThen());
+ Counter OutCount = propagateCounts(ThenCount, S->getThen());
+
+ Counter ElseCount = subtractCounters(ParentCount, ThenCount);
+ if (const Stmt *Else = S->getElse()) {
+ extendRegion(S->getElse());
+ OutCount = addCounters(OutCount, propagateCounts(ElseCount, Else));
+ } else
+ OutCount = addCounters(OutCount, ElseCount);
+
+ if (OutCount != ParentCount)
+ pushRegion(OutCount);
+ }
+
+ void VisitCXXTryStmt(const CXXTryStmt *S) {
+ extendRegion(S);
+ Visit(S->getTryBlock());
+ for (unsigned I = 0, E = S->getNumHandlers(); I < E; ++I)
+ Visit(S->getHandler(I));
+
+ Counter ExitCount = getRegionCounter(S);
+ pushRegion(ExitCount);
+ }
+
+ void VisitCXXCatchStmt(const CXXCatchStmt *S) {
+ propagateCounts(getRegionCounter(S), S->getHandlerBlock());
+ }
+
+ void VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
+ extendRegion(E);
+
+ Counter ParentCount = getRegion().getCounter();
+ Counter TrueCount = getRegionCounter(E);
+
+ Visit(E->getCond());
+
+ if (!isa<BinaryConditionalOperator>(E)) {
+ extendRegion(E->getTrueExpr());
+ propagateCounts(TrueCount, E->getTrueExpr());
+ }
+ extendRegion(E->getFalseExpr());
+ propagateCounts(subtractCounters(ParentCount, TrueCount),
+ E->getFalseExpr());
+ }
+
+ void VisitBinLAnd(const BinaryOperator *E) {
+ extendRegion(E);
+ Visit(E->getLHS());
+
+ extendRegion(E->getRHS());
+ propagateCounts(getRegionCounter(E), E->getRHS());
+ }
+
+ void VisitBinLOr(const BinaryOperator *E) {
+ extendRegion(E);
+ Visit(E->getLHS());
+
+ extendRegion(E->getRHS());
+ propagateCounts(getRegionCounter(E), E->getRHS());
+ }
+
+ void VisitLambdaExpr(const LambdaExpr *LE) {
+ // Lambdas are treated as their own functions for now, so we shouldn't
+ // propagate counts into them.
+ }
+};
+}
+
+static bool isMachO(const CodeGenModule &CGM) {
+ return CGM.getTarget().getTriple().isOSBinFormatMachO();
+}
+
+static StringRef getCoverageSection(const CodeGenModule &CGM) {
+ return llvm::getInstrProfCoverageSectionName(isMachO(CGM));
+}
+
+static void dump(llvm::raw_ostream &OS, StringRef FunctionName,
+ ArrayRef<CounterExpression> Expressions,
+ ArrayRef<CounterMappingRegion> Regions) {
+ OS << FunctionName << ":\n";
+ CounterMappingContext Ctx(Expressions);
+ for (const auto &R : Regions) {
+ OS.indent(2);
+ switch (R.Kind) {
+ case CounterMappingRegion::CodeRegion:
+ break;
+ case CounterMappingRegion::ExpansionRegion:
+ OS << "Expansion,";
+ break;
+ case CounterMappingRegion::SkippedRegion:
+ OS << "Skipped,";
+ break;
+ }
+
+ OS << "File " << R.FileID << ", " << R.LineStart << ":" << R.ColumnStart
+ << " -> " << R.LineEnd << ":" << R.ColumnEnd << " = ";
+ Ctx.dump(R.Count, OS);
+ if (R.Kind == CounterMappingRegion::ExpansionRegion)
+ OS << " (Expanded file = " << R.ExpandedFileID << ")";
+ OS << "\n";
+ }
+}
+
+void CoverageMappingModuleGen::addFunctionMappingRecord(
+ llvm::GlobalVariable *NamePtr, StringRef NameValue,
+ uint64_t FuncHash, const std::string &CoverageMapping) {
+ llvm::LLVMContext &Ctx = CGM.getLLVMContext();
+ if (!FunctionRecordTy) {
+ #define COVMAP_FUNC_RECORD(Type, LLVMType, Name, Init) LLVMType,
+ llvm::Type *FunctionRecordTypes[] = {
+ #include "llvm/ProfileData/InstrProfData.inc"
+ };
+ FunctionRecordTy =
+ llvm::StructType::get(Ctx, makeArrayRef(FunctionRecordTypes),
+ /*isPacked=*/true);
+ }
+
+ #define COVMAP_FUNC_RECORD(Type, LLVMType, Name, Init) Init,
+ llvm::Constant *FunctionRecordVals[] = {
+ #include "llvm/ProfileData/InstrProfData.inc"
+ };
+ FunctionRecords.push_back(llvm::ConstantStruct::get(
+ FunctionRecordTy, makeArrayRef(FunctionRecordVals)));
+ CoverageMappings += CoverageMapping;
+
+ if (CGM.getCodeGenOpts().DumpCoverageMapping) {
+ // Dump the coverage mapping data for this function by decoding the
+ // encoded data. This allows us to dump the mapping regions which were
+ // also processed by the CoverageMappingWriter which performs
+ // additional minimization operations such as reducing the number of
+ // expressions.
+ std::vector<StringRef> Filenames;
+ std::vector<CounterExpression> Expressions;
+ std::vector<CounterMappingRegion> Regions;
+ llvm::SmallVector<StringRef, 16> FilenameRefs;
+ FilenameRefs.resize(FileEntries.size());
+ for (const auto &Entry : FileEntries)
+ FilenameRefs[Entry.second] = Entry.first->getName();
+ RawCoverageMappingReader Reader(CoverageMapping, FilenameRefs, Filenames,
+ Expressions, Regions);
+ if (Reader.read())
+ return;
+ dump(llvm::outs(), NameValue, Expressions, Regions);
+ }
+}
+
+void CoverageMappingModuleGen::emit() {
+ if (FunctionRecords.empty())
+ return;
+ llvm::LLVMContext &Ctx = CGM.getLLVMContext();
+ auto *Int32Ty = llvm::Type::getInt32Ty(Ctx);
+
+ // Create the filenames and merge them with coverage mappings
+ llvm::SmallVector<std::string, 16> FilenameStrs;
+ llvm::SmallVector<StringRef, 16> FilenameRefs;
+ FilenameStrs.resize(FileEntries.size());
+ FilenameRefs.resize(FileEntries.size());
+ for (const auto &Entry : FileEntries) {
+ llvm::SmallString<256> Path(Entry.first->getName());
+ llvm::sys::fs::make_absolute(Path);
+
+ auto I = Entry.second;
+ FilenameStrs[I] = std::string(Path.begin(), Path.end());
+ FilenameRefs[I] = FilenameStrs[I];
+ }
+
+ std::string FilenamesAndCoverageMappings;
+ llvm::raw_string_ostream OS(FilenamesAndCoverageMappings);
+ CoverageFilenamesSectionWriter(FilenameRefs).write(OS);
+ OS << CoverageMappings;
+ size_t CoverageMappingSize = CoverageMappings.size();
+ size_t FilenamesSize = OS.str().size() - CoverageMappingSize;
+ // Append extra zeroes if necessary to ensure that the size of the filenames
+ // and coverage mappings is a multiple of 8.
+ if (size_t Rem = OS.str().size() % 8) {
+ CoverageMappingSize += 8 - Rem;
+ for (size_t I = 0, S = 8 - Rem; I < S; ++I)
+ OS << '\0';
+ }
+ auto *FilenamesAndMappingsVal =
+ llvm::ConstantDataArray::getString(Ctx, OS.str(), false);
+
+ // Create the deferred function records array
+ auto RecordsTy =
+ llvm::ArrayType::get(FunctionRecordTy, FunctionRecords.size());
+ auto RecordsVal = llvm::ConstantArray::get(RecordsTy, FunctionRecords);
+
+ llvm::Type *CovDataHeaderTypes[] = {
+#define COVMAP_HEADER(Type, LLVMType, Name, Init) LLVMType,
+#include "llvm/ProfileData/InstrProfData.inc"
+ };
+ auto CovDataHeaderTy =
+ llvm::StructType::get(Ctx, makeArrayRef(CovDataHeaderTypes));
+ llvm::Constant *CovDataHeaderVals[] = {
+#define COVMAP_HEADER(Type, LLVMType, Name, Init) Init,
+#include "llvm/ProfileData/InstrProfData.inc"
+ };
+ auto CovDataHeaderVal = llvm::ConstantStruct::get(
+ CovDataHeaderTy, makeArrayRef(CovDataHeaderVals));
+
+ // Create the coverage data record
+ llvm::Type *CovDataTypes[] = {CovDataHeaderTy, RecordsTy,
+ FilenamesAndMappingsVal->getType()};
+ auto CovDataTy = llvm::StructType::get(Ctx, makeArrayRef(CovDataTypes));
+ llvm::Constant *TUDataVals[] = {CovDataHeaderVal, RecordsVal,
+ FilenamesAndMappingsVal};
+ auto CovDataVal =
+ llvm::ConstantStruct::get(CovDataTy, makeArrayRef(TUDataVals));
+ auto CovData = new llvm::GlobalVariable(
+ CGM.getModule(), CovDataTy, true, llvm::GlobalValue::InternalLinkage,
+ CovDataVal, llvm::getCoverageMappingVarName());
+
+ CovData->setSection(getCoverageSection(CGM));
+ CovData->setAlignment(8);
+
+ // Make sure the data doesn't get deleted.
+ CGM.addUsedGlobal(CovData);
+}
+
+unsigned CoverageMappingModuleGen::getFileID(const FileEntry *File) {
+ auto It = FileEntries.find(File);
+ if (It != FileEntries.end())
+ return It->second;
+ unsigned FileID = FileEntries.size();
+ FileEntries.insert(std::make_pair(File, FileID));
+ return FileID;
+}
+
+void CoverageMappingGen::emitCounterMapping(const Decl *D,
+ llvm::raw_ostream &OS) {
+ assert(CounterMap);
+ CounterCoverageMappingBuilder Walker(CVM, *CounterMap, SM, LangOpts);
+ Walker.VisitDecl(D);
+ Walker.write(OS);
+}
+
+void CoverageMappingGen::emitEmptyMapping(const Decl *D,
+ llvm::raw_ostream &OS) {
+ EmptyCoverageMappingBuilder Walker(CVM, SM, LangOpts);
+ Walker.VisitDecl(D);
+ Walker.write(OS);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CoverageMappingGen.h b/contrib/llvm/tools/clang/lib/CodeGen/CoverageMappingGen.h
new file mode 100644
index 0000000..0d1bf6d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CoverageMappingGen.h
@@ -0,0 +1,114 @@
+//===---- CoverageMappingGen.h - Coverage mapping generation ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Instrumentation-based code coverage mapping generator
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_COVERAGEMAPPINGGEN_H
+#define LLVM_CLANG_LIB_CODEGEN_COVERAGEMAPPINGGEN_H
+
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Lex/PPCallbacks.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace clang {
+
+class LangOptions;
+class SourceManager;
+class FileEntry;
+class Preprocessor;
+class Decl;
+class Stmt;
+
+/// \brief Stores additional source code information like skipped ranges which
+/// is required by the coverage mapping generator and is obtained from
+/// the preprocessor.
+class CoverageSourceInfo : public PPCallbacks {
+ std::vector<SourceRange> SkippedRanges;
+public:
+ ArrayRef<SourceRange> getSkippedRanges() const { return SkippedRanges; }
+
+ void SourceRangeSkipped(SourceRange Range) override;
+};
+
+namespace CodeGen {
+
+class CodeGenModule;
+
+/// \brief Organizes the cross-function state that is used while generating
+/// code coverage mapping data.
+class CoverageMappingModuleGen {
+ CodeGenModule &CGM;
+ CoverageSourceInfo &SourceInfo;
+ llvm::SmallDenseMap<const FileEntry *, unsigned, 8> FileEntries;
+ std::vector<llvm::Constant *> FunctionRecords;
+ llvm::StructType *FunctionRecordTy;
+ std::string CoverageMappings;
+
+public:
+ CoverageMappingModuleGen(CodeGenModule &CGM, CoverageSourceInfo &SourceInfo)
+ : CGM(CGM), SourceInfo(SourceInfo), FunctionRecordTy(nullptr) {}
+
+ CoverageSourceInfo &getSourceInfo() const {
+ return SourceInfo;
+ }
+
+ /// \brief Add a function's coverage mapping record to the collection of the
+ /// function mapping records.
+ void addFunctionMappingRecord(llvm::GlobalVariable *FunctionName,
+ StringRef FunctionNameValue,
+ uint64_t FunctionHash,
+ const std::string &CoverageMapping);
+
+ /// \brief Emit the coverage mapping data for a translation unit.
+ void emit();
+
+ /// \brief Return the coverage mapping translation unit file id
+ /// for the given file.
+ unsigned getFileID(const FileEntry *File);
+};
+
+/// \brief Organizes the per-function state that is used while generating
+/// code coverage mapping data.
+class CoverageMappingGen {
+ CoverageMappingModuleGen &CVM;
+ SourceManager &SM;
+ const LangOptions &LangOpts;
+ llvm::DenseMap<const Stmt *, unsigned> *CounterMap;
+
+public:
+ CoverageMappingGen(CoverageMappingModuleGen &CVM, SourceManager &SM,
+ const LangOptions &LangOpts)
+ : CVM(CVM), SM(SM), LangOpts(LangOpts), CounterMap(nullptr) {}
+
+ CoverageMappingGen(CoverageMappingModuleGen &CVM, SourceManager &SM,
+ const LangOptions &LangOpts,
+ llvm::DenseMap<const Stmt *, unsigned> *CounterMap)
+ : CVM(CVM), SM(SM), LangOpts(LangOpts), CounterMap(CounterMap) {}
+
+ /// \brief Emit the coverage mapping data which maps the regions of
+ /// code to counters that will be used to find the execution
+ /// counts for those regions.
+ void emitCounterMapping(const Decl *D, llvm::raw_ostream &OS);
+
+ /// \brief Emit the coverage mapping data for an unused function.
+ /// It creates mapping regions with the counter of zero.
+ void emitEmptyMapping(const Decl *D, llvm::raw_ostream &OS);
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/EHScopeStack.h b/contrib/llvm/tools/clang/lib/CodeGen/EHScopeStack.h
new file mode 100644
index 0000000..85cd154
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/EHScopeStack.h
@@ -0,0 +1,420 @@
+//===-- EHScopeStack.h - Stack for cleanup IR generation --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes should be the minimum interface required for other parts of
+// CodeGen to emit cleanups. The implementation is in CGCleanup.cpp and other
+// implemenentation details that are not widely needed are in CGCleanup.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_EHSCOPESTACK_H
+#define LLVM_CLANG_LIB_CODEGEN_EHSCOPESTACK_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Value.h"
+
+namespace clang {
+namespace CodeGen {
+
+class CodeGenFunction;
+
+/// A branch fixup. These are required when emitting a goto to a
+/// label which hasn't been emitted yet. The goto is optimistically
+/// emitted as a branch to the basic block for the label, and (if it
+/// occurs in a scope with non-trivial cleanups) a fixup is added to
+/// the innermost cleanup. When a (normal) cleanup is popped, any
+/// unresolved fixups in that scope are threaded through the cleanup.
+struct BranchFixup {
+ /// The block containing the terminator which needs to be modified
+ /// into a switch if this fixup is resolved into the current scope.
+ /// If null, LatestBranch points directly to the destination.
+ llvm::BasicBlock *OptimisticBranchBlock;
+
+ /// The ultimate destination of the branch.
+ ///
+ /// This can be set to null to indicate that this fixup was
+ /// successfully resolved.
+ llvm::BasicBlock *Destination;
+
+ /// The destination index value.
+ unsigned DestinationIndex;
+
+ /// The initial branch of the fixup.
+ llvm::BranchInst *InitialBranch;
+};
+
+template <class T> struct InvariantValue {
+ typedef T type;
+ typedef T saved_type;
+ static bool needsSaving(type value) { return false; }
+ static saved_type save(CodeGenFunction &CGF, type value) { return value; }
+ static type restore(CodeGenFunction &CGF, saved_type value) { return value; }
+};
+
+/// A metaprogramming class for ensuring that a value will dominate an
+/// arbitrary position in a function.
+template <class T> struct DominatingValue : InvariantValue<T> {};
+
+template <class T, bool mightBeInstruction =
+ std::is_base_of<llvm::Value, T>::value &&
+ !std::is_base_of<llvm::Constant, T>::value &&
+ !std::is_base_of<llvm::BasicBlock, T>::value>
+struct DominatingPointer;
+template <class T> struct DominatingPointer<T,false> : InvariantValue<T*> {};
+// template <class T> struct DominatingPointer<T,true> at end of file
+
+template <class T> struct DominatingValue<T*> : DominatingPointer<T> {};
+
+enum CleanupKind : unsigned {
+ /// Denotes a cleanup that should run when a scope is exited using exceptional
+ /// control flow (a throw statement leading to stack unwinding, ).
+ EHCleanup = 0x1,
+
+ /// Denotes a cleanup that should run when a scope is exited using normal
+ /// control flow (falling off the end of the scope, return, goto, ...).
+ NormalCleanup = 0x2,
+
+ NormalAndEHCleanup = EHCleanup | NormalCleanup,
+
+ InactiveCleanup = 0x4,
+ InactiveEHCleanup = EHCleanup | InactiveCleanup,
+ InactiveNormalCleanup = NormalCleanup | InactiveCleanup,
+ InactiveNormalAndEHCleanup = NormalAndEHCleanup | InactiveCleanup
+};
+
+/// A stack of scopes which respond to exceptions, including cleanups
+/// and catch blocks.
+class EHScopeStack {
+public:
+ /* Should switch to alignof(uint64_t) instead of 8, when EHCleanupScope can */
+ enum { ScopeStackAlignment = 8 };
+
+ /// A saved depth on the scope stack. This is necessary because
+ /// pushing scopes onto the stack invalidates iterators.
+ class stable_iterator {
+ friend class EHScopeStack;
+
+ /// Offset from StartOfData to EndOfBuffer.
+ ptrdiff_t Size;
+
+ stable_iterator(ptrdiff_t Size) : Size(Size) {}
+
+ public:
+ static stable_iterator invalid() { return stable_iterator(-1); }
+ stable_iterator() : Size(-1) {}
+
+ bool isValid() const { return Size >= 0; }
+
+ /// Returns true if this scope encloses I.
+ /// Returns false if I is invalid.
+ /// This scope must be valid.
+ bool encloses(stable_iterator I) const { return Size <= I.Size; }
+
+ /// Returns true if this scope strictly encloses I: that is,
+ /// if it encloses I and is not I.
+ /// Returns false is I is invalid.
+ /// This scope must be valid.
+ bool strictlyEncloses(stable_iterator I) const { return Size < I.Size; }
+
+ friend bool operator==(stable_iterator A, stable_iterator B) {
+ return A.Size == B.Size;
+ }
+ friend bool operator!=(stable_iterator A, stable_iterator B) {
+ return A.Size != B.Size;
+ }
+ };
+
+ /// Information for lazily generating a cleanup. Subclasses must be
+ /// POD-like: cleanups will not be destructed, and they will be
+ /// allocated on the cleanup stack and freely copied and moved
+ /// around.
+ ///
+ /// Cleanup implementations should generally be declared in an
+ /// anonymous namespace.
+ class Cleanup {
+ // Anchor the construction vtable.
+ virtual void anchor();
+
+ protected:
+ ~Cleanup() = default;
+
+ public:
+ Cleanup(const Cleanup &) = default;
+ Cleanup(Cleanup &&) {}
+ Cleanup() = default;
+
+ /// Generation flags.
+ class Flags {
+ enum {
+ F_IsForEH = 0x1,
+ F_IsNormalCleanupKind = 0x2,
+ F_IsEHCleanupKind = 0x4
+ };
+ unsigned flags;
+
+ public:
+ Flags() : flags(0) {}
+
+ /// isForEH - true if the current emission is for an EH cleanup.
+ bool isForEHCleanup() const { return flags & F_IsForEH; }
+ bool isForNormalCleanup() const { return !isForEHCleanup(); }
+ void setIsForEHCleanup() { flags |= F_IsForEH; }
+
+ bool isNormalCleanupKind() const { return flags & F_IsNormalCleanupKind; }
+ void setIsNormalCleanupKind() { flags |= F_IsNormalCleanupKind; }
+
+ /// isEHCleanupKind - true if the cleanup was pushed as an EH
+ /// cleanup.
+ bool isEHCleanupKind() const { return flags & F_IsEHCleanupKind; }
+ void setIsEHCleanupKind() { flags |= F_IsEHCleanupKind; }
+ };
+
+
+ /// Emit the cleanup. For normal cleanups, this is run in the
+ /// same EH context as when the cleanup was pushed, i.e. the
+ /// immediately-enclosing context of the cleanup scope. For
+ /// EH cleanups, this is run in a terminate context.
+ ///
+ // \param flags cleanup kind.
+ virtual void Emit(CodeGenFunction &CGF, Flags flags) = 0;
+ };
+
+ /// ConditionalCleanup stores the saved form of its parameters,
+ /// then restores them and performs the cleanup.
+ template <class T, class... As>
+ class ConditionalCleanup final : public Cleanup {
+ typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
+ SavedTuple Saved;
+
+ template <std::size_t... Is>
+ T restore(CodeGenFunction &CGF, llvm::index_sequence<Is...>) {
+ // It's important that the restores are emitted in order. The braced init
+ // list guarentees that.
+ return T{DominatingValue<As>::restore(CGF, std::get<Is>(Saved))...};
+ }
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ restore(CGF, llvm::index_sequence_for<As...>()).Emit(CGF, flags);
+ }
+
+ public:
+ ConditionalCleanup(typename DominatingValue<As>::saved_type... A)
+ : Saved(A...) {}
+
+ ConditionalCleanup(SavedTuple Tuple) : Saved(std::move(Tuple)) {}
+ };
+
+private:
+ // The implementation for this class is in CGException.h and
+ // CGException.cpp; the definition is here because it's used as a
+ // member of CodeGenFunction.
+
+ /// The start of the scope-stack buffer, i.e. the allocated pointer
+ /// for the buffer. All of these pointers are either simultaneously
+ /// null or simultaneously valid.
+ char *StartOfBuffer;
+
+ /// The end of the buffer.
+ char *EndOfBuffer;
+
+ /// The first valid entry in the buffer.
+ char *StartOfData;
+
+ /// The innermost normal cleanup on the stack.
+ stable_iterator InnermostNormalCleanup;
+
+ /// The innermost EH scope on the stack.
+ stable_iterator InnermostEHScope;
+
+ /// The current set of branch fixups. A branch fixup is a jump to
+ /// an as-yet unemitted label, i.e. a label for which we don't yet
+ /// know the EH stack depth. Whenever we pop a cleanup, we have
+ /// to thread all the current branch fixups through it.
+ ///
+ /// Fixups are recorded as the Use of the respective branch or
+ /// switch statement. The use points to the final destination.
+ /// When popping out of a cleanup, these uses are threaded through
+ /// the cleanup and adjusted to point to the new cleanup.
+ ///
+ /// Note that branches are allowed to jump into protected scopes
+ /// in certain situations; e.g. the following code is legal:
+ /// struct A { ~A(); }; // trivial ctor, non-trivial dtor
+ /// goto foo;
+ /// A a;
+ /// foo:
+ /// bar();
+ SmallVector<BranchFixup, 8> BranchFixups;
+
+ char *allocate(size_t Size);
+ void deallocate(size_t Size);
+
+ void *pushCleanup(CleanupKind K, size_t DataSize);
+
+public:
+ EHScopeStack() : StartOfBuffer(nullptr), EndOfBuffer(nullptr),
+ StartOfData(nullptr), InnermostNormalCleanup(stable_end()),
+ InnermostEHScope(stable_end()) {}
+ ~EHScopeStack() { delete[] StartOfBuffer; }
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class... As> void pushCleanup(CleanupKind Kind, As... A) {
+ static_assert(llvm::AlignOf<T>::Alignment <= ScopeStackAlignment,
+ "Cleanup's alignment is too large.");
+ void *Buffer = pushCleanup(Kind, sizeof(T));
+ Cleanup *Obj = new (Buffer) T(A...);
+ (void) Obj;
+ }
+
+ /// Push a lazily-created cleanup on the stack. Tuple version.
+ template <class T, class... As>
+ void pushCleanupTuple(CleanupKind Kind, std::tuple<As...> A) {
+ static_assert(llvm::AlignOf<T>::Alignment <= ScopeStackAlignment,
+ "Cleanup's alignment is too large.");
+ void *Buffer = pushCleanup(Kind, sizeof(T));
+ Cleanup *Obj = new (Buffer) T(std::move(A));
+ (void) Obj;
+ }
+
+ // Feel free to add more variants of the following:
+
+ /// Push a cleanup with non-constant storage requirements on the
+ /// stack. The cleanup type must provide an additional static method:
+ /// static size_t getExtraSize(size_t);
+ /// The argument to this method will be the value N, which will also
+ /// be passed as the first argument to the constructor.
+ ///
+ /// The data stored in the extra storage must obey the same
+ /// restrictions as normal cleanup member data.
+ ///
+ /// The pointer returned from this method is valid until the cleanup
+ /// stack is modified.
+ template <class T, class... As>
+ T *pushCleanupWithExtra(CleanupKind Kind, size_t N, As... A) {
+ static_assert(llvm::AlignOf<T>::Alignment <= ScopeStackAlignment,
+ "Cleanup's alignment is too large.");
+ void *Buffer = pushCleanup(Kind, sizeof(T) + T::getExtraSize(N));
+ return new (Buffer) T(N, A...);
+ }
+
+ void pushCopyOfCleanup(CleanupKind Kind, const void *Cleanup, size_t Size) {
+ void *Buffer = pushCleanup(Kind, Size);
+ std::memcpy(Buffer, Cleanup, Size);
+ }
+
+ /// Pops a cleanup scope off the stack. This is private to CGCleanup.cpp.
+ void popCleanup();
+
+ /// Push a set of catch handlers on the stack. The catch is
+ /// uninitialized and will need to have the given number of handlers
+ /// set on it.
+ class EHCatchScope *pushCatch(unsigned NumHandlers);
+
+ /// Pops a catch scope off the stack. This is private to CGException.cpp.
+ void popCatch();
+
+ /// Push an exceptions filter on the stack.
+ class EHFilterScope *pushFilter(unsigned NumFilters);
+
+ /// Pops an exceptions filter off the stack.
+ void popFilter();
+
+ /// Push a terminate handler on the stack.
+ void pushTerminate();
+
+ /// Pops a terminate handler off the stack.
+ void popTerminate();
+
+ // Returns true iff the current scope is either empty or contains only
+ // lifetime markers, i.e. no real cleanup code
+ bool containsOnlyLifetimeMarkers(stable_iterator Old) const;
+
+ /// Determines whether the exception-scopes stack is empty.
+ bool empty() const { return StartOfData == EndOfBuffer; }
+
+ bool requiresLandingPad() const {
+ return InnermostEHScope != stable_end();
+ }
+
+ /// Determines whether there are any normal cleanups on the stack.
+ bool hasNormalCleanups() const {
+ return InnermostNormalCleanup != stable_end();
+ }
+
+ /// Returns the innermost normal cleanup on the stack, or
+ /// stable_end() if there are no normal cleanups.
+ stable_iterator getInnermostNormalCleanup() const {
+ return InnermostNormalCleanup;
+ }
+ stable_iterator getInnermostActiveNormalCleanup() const;
+
+ stable_iterator getInnermostEHScope() const {
+ return InnermostEHScope;
+ }
+
+
+ /// An unstable reference to a scope-stack depth. Invalidated by
+ /// pushes but not pops.
+ class iterator;
+
+ /// Returns an iterator pointing to the innermost EH scope.
+ iterator begin() const;
+
+ /// Returns an iterator pointing to the outermost EH scope.
+ iterator end() const;
+
+ /// Create a stable reference to the top of the EH stack. The
+ /// returned reference is valid until that scope is popped off the
+ /// stack.
+ stable_iterator stable_begin() const {
+ return stable_iterator(EndOfBuffer - StartOfData);
+ }
+
+ /// Create a stable reference to the bottom of the EH stack.
+ static stable_iterator stable_end() {
+ return stable_iterator(0);
+ }
+
+ /// Translates an iterator into a stable_iterator.
+ stable_iterator stabilize(iterator it) const;
+
+ /// Turn a stable reference to a scope depth into a unstable pointer
+ /// to the EH stack.
+ iterator find(stable_iterator save) const;
+
+ /// Add a branch fixup to the current cleanup scope.
+ BranchFixup &addBranchFixup() {
+ assert(hasNormalCleanups() && "adding fixup in scope without cleanups");
+ BranchFixups.push_back(BranchFixup());
+ return BranchFixups.back();
+ }
+
+ unsigned getNumBranchFixups() const { return BranchFixups.size(); }
+ BranchFixup &getBranchFixup(unsigned I) {
+ assert(I < getNumBranchFixups());
+ return BranchFixups[I];
+ }
+
+ /// Pops lazily-removed fixups from the end of the list. This
+ /// should only be called by procedures which have just popped a
+ /// cleanup or resolved one or more fixups.
+ void popNullFixups();
+
+ /// Clears the branch-fixups list. This should only be called by
+ /// ResolveAllBranchFixups.
+ void clearFixups() { BranchFixups.clear(); }
+};
+
+} // namespace CodeGen
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp
new file mode 100644
index 0000000..0c4008f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -0,0 +1,3839 @@
+//===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides C++ code generation targeting the Itanium C++ ABI. The class
+// in this file generates structures that follow the Itanium C++ ABI, which is
+// documented at:
+// http://www.codesourcery.com/public/cxx-abi/abi.html
+// http://www.codesourcery.com/public/cxx-abi/abi-eh.html
+//
+// It also supports the closely-related ARM ABI, documented at:
+// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCXXABI.h"
+#include "CGCleanup.h"
+#include "CGRecordLayout.h"
+#include "CGVTables.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "TargetInfo.h"
+#include "clang/AST/Mangle.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/StmtCXX.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Value.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+class ItaniumCXXABI : public CodeGen::CGCXXABI {
+ /// VTables - All the vtables which have been defined.
+ llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
+
+protected:
+ bool UseARMMethodPtrABI;
+ bool UseARMGuardVarABI;
+
+ ItaniumMangleContext &getMangleContext() {
+ return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
+ }
+
+public:
+ ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
+ bool UseARMMethodPtrABI = false,
+ bool UseARMGuardVarABI = false) :
+ CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
+ UseARMGuardVarABI(UseARMGuardVarABI) { }
+
+ bool classifyReturnType(CGFunctionInfo &FI) const override;
+
+ RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ // FIXME: Use canCopyArgument() when it is fixed to handle lazily declared
+ // special members.
+ if (RD->hasNonTrivialDestructor() || RD->hasNonTrivialCopyConstructor())
+ return RAA_Indirect;
+ return RAA_Default;
+ }
+
+ bool isThisCompleteObject(GlobalDecl GD) const override {
+ // The Itanium ABI has separate complete-object vs. base-object
+ // variants of both constructors and destructors.
+ if (isa<CXXDestructorDecl>(GD.getDecl())) {
+ switch (GD.getDtorType()) {
+ case Dtor_Complete:
+ case Dtor_Deleting:
+ return true;
+
+ case Dtor_Base:
+ return false;
+
+ case Dtor_Comdat:
+ llvm_unreachable("emitting dtor comdat as function?");
+ }
+ llvm_unreachable("bad dtor kind");
+ }
+ if (isa<CXXConstructorDecl>(GD.getDecl())) {
+ switch (GD.getCtorType()) {
+ case Ctor_Complete:
+ return true;
+
+ case Ctor_Base:
+ return false;
+
+ case Ctor_CopyingClosure:
+ case Ctor_DefaultClosure:
+ llvm_unreachable("closure ctors in Itanium ABI?");
+
+ case Ctor_Comdat:
+ llvm_unreachable("emitting ctor comdat as function?");
+ }
+ llvm_unreachable("bad dtor kind");
+ }
+
+ // No other kinds.
+ return false;
+ }
+
+ bool isZeroInitializable(const MemberPointerType *MPT) override;
+
+ llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
+
+ llvm::Value *
+ EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
+ const Expr *E,
+ Address This,
+ llvm::Value *&ThisPtrForCall,
+ llvm::Value *MemFnPtr,
+ const MemberPointerType *MPT) override;
+
+ llvm::Value *
+ EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
+ Address Base,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) override;
+
+ llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
+ const CastExpr *E,
+ llvm::Value *Src) override;
+ llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
+ llvm::Constant *Src) override;
+
+ llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
+
+ llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
+ llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
+ CharUnits offset) override;
+ llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
+ llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
+ CharUnits ThisAdjustment);
+
+ llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
+ llvm::Value *L, llvm::Value *R,
+ const MemberPointerType *MPT,
+ bool Inequality) override;
+
+ llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
+ llvm::Value *Addr,
+ const MemberPointerType *MPT) override;
+
+ void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
+ Address Ptr, QualType ElementType,
+ const CXXDestructorDecl *Dtor) override;
+
+ /// Itanium says that an _Unwind_Exception has to be "double-word"
+ /// aligned (and thus the end of it is also so-aligned), meaning 16
+ /// bytes. Of course, that was written for the actual Itanium,
+ /// which is a 64-bit platform. Classically, the ABI doesn't really
+ /// specify the alignment on other platforms, but in practice
+ /// libUnwind declares the struct with __attribute__((aligned)), so
+ /// we assume that alignment here. (It's generally 16 bytes, but
+ /// some targets overwrite it.)
+ CharUnits getAlignmentOfExnObject() {
+ auto align = CGM.getContext().getTargetDefaultAlignForAttributeAligned();
+ return CGM.getContext().toCharUnitsFromBits(align);
+ }
+
+ void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
+ void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
+
+ void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
+
+ llvm::CallInst *
+ emitTerminateForUnexpectedException(CodeGenFunction &CGF,
+ llvm::Value *Exn) override;
+
+ void EmitFundamentalRTTIDescriptor(QualType Type);
+ void EmitFundamentalRTTIDescriptors();
+ llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
+ CatchTypeInfo
+ getAddrOfCXXCatchHandlerType(QualType Ty,
+ QualType CatchHandlerType) override {
+ return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
+ }
+
+ bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
+ void EmitBadTypeidCall(CodeGenFunction &CGF) override;
+ llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
+ Address ThisPtr,
+ llvm::Type *StdTypeInfoPtrTy) override;
+
+ bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
+ QualType SrcRecordTy) override;
+
+ llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
+ QualType SrcRecordTy, QualType DestTy,
+ QualType DestRecordTy,
+ llvm::BasicBlock *CastEnd) override;
+
+ llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
+ QualType SrcRecordTy,
+ QualType DestTy) override;
+
+ bool EmitBadCastCall(CodeGenFunction &CGF) override;
+
+ llvm::Value *
+ GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
+ const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *BaseClassDecl) override;
+
+ void EmitCXXConstructors(const CXXConstructorDecl *D) override;
+
+ void buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
+ SmallVectorImpl<CanQualType> &ArgTys) override;
+
+ bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
+ CXXDtorType DT) const override {
+ // Itanium does not emit any destructor variant as an inline thunk.
+ // Delegating may occur as an optimization, but all variants are either
+ // emitted with external linkage or as linkonce if they are inline and used.
+ return false;
+ }
+
+ void EmitCXXDestructors(const CXXDestructorDecl *D) override;
+
+ void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
+ FunctionArgList &Params) override;
+
+ void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
+
+ unsigned addImplicitConstructorArgs(CodeGenFunction &CGF,
+ const CXXConstructorDecl *D,
+ CXXCtorType Type, bool ForVirtualBase,
+ bool Delegating,
+ CallArgList &Args) override;
+
+ void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
+ CXXDtorType Type, bool ForVirtualBase,
+ bool Delegating, Address This) override;
+
+ void emitVTableDefinitions(CodeGenVTables &CGVT,
+ const CXXRecordDecl *RD) override;
+
+ bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
+ CodeGenFunction::VPtr Vptr) override;
+
+ bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
+ return true;
+ }
+
+ llvm::Constant *
+ getVTableAddressPoint(BaseSubobject Base,
+ const CXXRecordDecl *VTableClass) override;
+
+ llvm::Value *getVTableAddressPointInStructor(
+ CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
+ BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
+
+ llvm::Value *getVTableAddressPointInStructorWithVTT(
+ CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
+ BaseSubobject Base, const CXXRecordDecl *NearestVBase);
+
+ llvm::Constant *
+ getVTableAddressPointForConstExpr(BaseSubobject Base,
+ const CXXRecordDecl *VTableClass) override;
+
+ llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
+ CharUnits VPtrOffset) override;
+
+ llvm::Value *getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
+ Address This, llvm::Type *Ty,
+ SourceLocation Loc) override;
+
+ llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
+ const CXXDestructorDecl *Dtor,
+ CXXDtorType DtorType,
+ Address This,
+ const CXXMemberCallExpr *CE) override;
+
+ void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
+
+ bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
+
+ void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
+ bool ReturnAdjustment) override {
+ // Allow inlining of thunks by emitting them with available_externally
+ // linkage together with vtables when needed.
+ if (ForVTable && !Thunk->hasLocalLinkage())
+ Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
+ }
+
+ llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
+ const ThisAdjustment &TA) override;
+
+ llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
+ const ReturnAdjustment &RA) override;
+
+ size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
+ FunctionArgList &Args) const override {
+ assert(!Args.empty() && "expected the arglist to not be empty!");
+ return Args.size() - 1;
+ }
+
+ StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
+ StringRef GetDeletedVirtualCallName() override
+ { return "__cxa_deleted_virtual"; }
+
+ CharUnits getArrayCookieSizeImpl(QualType elementType) override;
+ Address InitializeArrayCookie(CodeGenFunction &CGF,
+ Address NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType) override;
+ llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
+ Address allocPtr,
+ CharUnits cookieSize) override;
+
+ void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::GlobalVariable *DeclPtr,
+ bool PerformInit) override;
+ void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::Constant *dtor, llvm::Constant *addr) override;
+
+ llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
+ llvm::Value *Val);
+ void EmitThreadLocalInitFuncs(
+ CodeGenModule &CGM,
+ ArrayRef<const VarDecl *> CXXThreadLocals,
+ ArrayRef<llvm::Function *> CXXThreadLocalInits,
+ ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
+
+ bool usesThreadWrapperFunction() const override { return true; }
+ LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
+ QualType LValType) override;
+
+ bool NeedsVTTParameter(GlobalDecl GD) override;
+
+ /**************************** RTTI Uniqueness ******************************/
+
+protected:
+ /// Returns true if the ABI requires RTTI type_info objects to be unique
+ /// across a program.
+ virtual bool shouldRTTIBeUnique() const { return true; }
+
+public:
+ /// What sort of unique-RTTI behavior should we use?
+ enum RTTIUniquenessKind {
+ /// We are guaranteeing, or need to guarantee, that the RTTI string
+ /// is unique.
+ RUK_Unique,
+
+ /// We are not guaranteeing uniqueness for the RTTI string, so we
+ /// can demote to hidden visibility but must use string comparisons.
+ RUK_NonUniqueHidden,
+
+ /// We are not guaranteeing uniqueness for the RTTI string, so we
+ /// have to use string comparisons, but we also have to emit it with
+ /// non-hidden visibility.
+ RUK_NonUniqueVisible
+ };
+
+ /// Return the required visibility status for the given type and linkage in
+ /// the current ABI.
+ RTTIUniquenessKind
+ classifyRTTIUniqueness(QualType CanTy,
+ llvm::GlobalValue::LinkageTypes Linkage) const;
+ friend class ItaniumRTTIBuilder;
+
+ void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) override;
+
+ private:
+ bool hasAnyUsedVirtualInlineFunction(const CXXRecordDecl *RD) const {
+ const auto &VtableLayout =
+ CGM.getItaniumVTableContext().getVTableLayout(RD);
+
+ for (const auto &VtableComponent : VtableLayout.vtable_components()) {
+ if (!VtableComponent.isUsedFunctionPointerKind())
+ continue;
+
+ const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
+ if (Method->getCanonicalDecl()->isInlined())
+ return true;
+ }
+ return false;
+ }
+
+ bool isVTableHidden(const CXXRecordDecl *RD) const {
+ const auto &VtableLayout =
+ CGM.getItaniumVTableContext().getVTableLayout(RD);
+
+ for (const auto &VtableComponent : VtableLayout.vtable_components()) {
+ if (VtableComponent.isRTTIKind()) {
+ const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
+ if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
+ return true;
+ } else if (VtableComponent.isUsedFunctionPointerKind()) {
+ const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
+ if (Method->getVisibility() == Visibility::HiddenVisibility &&
+ !Method->isDefined())
+ return true;
+ }
+ }
+ return false;
+ }
+};
+
+class ARMCXXABI : public ItaniumCXXABI {
+public:
+ ARMCXXABI(CodeGen::CodeGenModule &CGM) :
+ ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
+ /* UseARMGuardVarABI = */ true) {}
+
+ bool HasThisReturn(GlobalDecl GD) const override {
+ return (isa<CXXConstructorDecl>(GD.getDecl()) || (
+ isa<CXXDestructorDecl>(GD.getDecl()) &&
+ GD.getDtorType() != Dtor_Deleting));
+ }
+
+ void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
+ QualType ResTy) override;
+
+ CharUnits getArrayCookieSizeImpl(QualType elementType) override;
+ Address InitializeArrayCookie(CodeGenFunction &CGF,
+ Address NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType) override;
+ llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
+ CharUnits cookieSize) override;
+};
+
+class iOS64CXXABI : public ARMCXXABI {
+public:
+ iOS64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {}
+
+ // ARM64 libraries are prepared for non-unique RTTI.
+ bool shouldRTTIBeUnique() const override { return false; }
+};
+
+class WebAssemblyCXXABI final : public ItaniumCXXABI {
+public:
+ explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
+ : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
+ /*UseARMGuardVarABI=*/true) {}
+
+private:
+ bool HasThisReturn(GlobalDecl GD) const override {
+ return isa<CXXConstructorDecl>(GD.getDecl()) ||
+ (isa<CXXDestructorDecl>(GD.getDecl()) &&
+ GD.getDtorType() != Dtor_Deleting);
+ }
+};
+}
+
+CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
+ switch (CGM.getTarget().getCXXABI().getKind()) {
+ // For IR-generation purposes, there's no significant difference
+ // between the ARM and iOS ABIs.
+ case TargetCXXABI::GenericARM:
+ case TargetCXXABI::iOS:
+ case TargetCXXABI::WatchOS:
+ return new ARMCXXABI(CGM);
+
+ case TargetCXXABI::iOS64:
+ return new iOS64CXXABI(CGM);
+
+ // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
+ // include the other 32-bit ARM oddities: constructor/destructor return values
+ // and array cookies.
+ case TargetCXXABI::GenericAArch64:
+ return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
+ /* UseARMGuardVarABI = */ true);
+
+ case TargetCXXABI::GenericMIPS:
+ return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true);
+
+ case TargetCXXABI::WebAssembly:
+ return new WebAssemblyCXXABI(CGM);
+
+ case TargetCXXABI::GenericItanium:
+ if (CGM.getContext().getTargetInfo().getTriple().getArch()
+ == llvm::Triple::le32) {
+ // For PNaCl, use ARM-style method pointers so that PNaCl code
+ // does not assume anything about the alignment of function
+ // pointers.
+ return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
+ /* UseARMGuardVarABI = */ false);
+ }
+ return new ItaniumCXXABI(CGM);
+
+ case TargetCXXABI::Microsoft:
+ llvm_unreachable("Microsoft ABI is not Itanium-based");
+ }
+ llvm_unreachable("bad ABI kind");
+}
+
+llvm::Type *
+ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
+ if (MPT->isMemberDataPointer())
+ return CGM.PtrDiffTy;
+ return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy, nullptr);
+}
+
+/// In the Itanium and ARM ABIs, method pointers have the form:
+/// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
+///
+/// In the Itanium ABI:
+/// - method pointers are virtual if (memptr.ptr & 1) is nonzero
+/// - the this-adjustment is (memptr.adj)
+/// - the virtual offset is (memptr.ptr - 1)
+///
+/// In the ARM ABI:
+/// - method pointers are virtual if (memptr.adj & 1) is nonzero
+/// - the this-adjustment is (memptr.adj >> 1)
+/// - the virtual offset is (memptr.ptr)
+/// ARM uses 'adj' for the virtual flag because Thumb functions
+/// may be only single-byte aligned.
+///
+/// If the member is virtual, the adjusted 'this' pointer points
+/// to a vtable pointer from which the virtual offset is applied.
+///
+/// If the member is non-virtual, memptr.ptr is the address of
+/// the function to call.
+llvm::Value *ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
+ CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
+ llvm::Value *&ThisPtrForCall,
+ llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
+ CGBuilderTy &Builder = CGF.Builder;
+
+ const FunctionProtoType *FPT =
+ MPT->getPointeeType()->getAs<FunctionProtoType>();
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
+
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
+ CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
+
+ llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
+
+ llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
+ llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
+ llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
+
+ // Extract memptr.adj, which is in the second field.
+ llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
+
+ // Compute the true adjustment.
+ llvm::Value *Adj = RawAdj;
+ if (UseARMMethodPtrABI)
+ Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
+
+ // Apply the adjustment and cast back to the original struct type
+ // for consistency.
+ llvm::Value *This = ThisAddr.getPointer();
+ llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
+ Ptr = Builder.CreateInBoundsGEP(Ptr, Adj);
+ This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
+ ThisPtrForCall = This;
+
+ // Load the function pointer.
+ llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
+
+ // If the LSB in the function pointer is 1, the function pointer points to
+ // a virtual function.
+ llvm::Value *IsVirtual;
+ if (UseARMMethodPtrABI)
+ IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
+ else
+ IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
+ IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
+ Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
+
+ // In the virtual path, the adjustment left 'This' pointing to the
+ // vtable of the correct base subobject. The "function pointer" is an
+ // offset within the vtable (+1 for the virtual flag on non-ARM).
+ CGF.EmitBlock(FnVirtual);
+
+ // Cast the adjusted this to a pointer to vtable pointer and load.
+ llvm::Type *VTableTy = Builder.getInt8PtrTy();
+ CharUnits VTablePtrAlign =
+ CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
+ CGF.getPointerAlign());
+ llvm::Value *VTable =
+ CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy, RD);
+
+ // Apply the offset.
+ llvm::Value *VTableOffset = FnAsInt;
+ if (!UseARMMethodPtrABI)
+ VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
+ VTable = Builder.CreateGEP(VTable, VTableOffset);
+
+ // Load the virtual function to call.
+ VTable = Builder.CreateBitCast(VTable, FTy->getPointerTo()->getPointerTo());
+ llvm::Value *VirtualFn =
+ Builder.CreateAlignedLoad(VTable, CGF.getPointerAlign(),
+ "memptr.virtualfn");
+ CGF.EmitBranch(FnEnd);
+
+ // In the non-virtual path, the function pointer is actually a
+ // function pointer.
+ CGF.EmitBlock(FnNonVirtual);
+ llvm::Value *NonVirtualFn =
+ Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
+
+ // We're done.
+ CGF.EmitBlock(FnEnd);
+ llvm::PHINode *Callee = Builder.CreatePHI(FTy->getPointerTo(), 2);
+ Callee->addIncoming(VirtualFn, FnVirtual);
+ Callee->addIncoming(NonVirtualFn, FnNonVirtual);
+ return Callee;
+}
+
+/// Compute an l-value by applying the given pointer-to-member to a
+/// base object.
+llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
+ CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ assert(MemPtr->getType() == CGM.PtrDiffTy);
+
+ CGBuilderTy &Builder = CGF.Builder;
+
+ // Cast to char*.
+ Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
+
+ // Apply the offset, which we assume is non-null.
+ llvm::Value *Addr =
+ Builder.CreateInBoundsGEP(Base.getPointer(), MemPtr, "memptr.offset");
+
+ // Cast the address to the appropriate pointer type, adopting the
+ // address space of the base pointer.
+ llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
+ ->getPointerTo(Base.getAddressSpace());
+ return Builder.CreateBitCast(Addr, PType);
+}
+
+/// Perform a bitcast, derived-to-base, or base-to-derived member pointer
+/// conversion.
+///
+/// Bitcast conversions are always a no-op under Itanium.
+///
+/// Obligatory offset/adjustment diagram:
+/// <-- offset --> <-- adjustment -->
+/// |--------------------------|----------------------|--------------------|
+/// ^Derived address point ^Base address point ^Member address point
+///
+/// So when converting a base member pointer to a derived member pointer,
+/// we add the offset to the adjustment because the address point has
+/// decreased; and conversely, when converting a derived MP to a base MP
+/// we subtract the offset from the adjustment because the address point
+/// has increased.
+///
+/// The standard forbids (at compile time) conversion to and from
+/// virtual bases, which is why we don't have to consider them here.
+///
+/// The standard forbids (at run time) casting a derived MP to a base
+/// MP when the derived MP does not point to a member of the base.
+/// This is why -1 is a reasonable choice for null data member
+/// pointers.
+llvm::Value *
+ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
+ const CastExpr *E,
+ llvm::Value *src) {
+ assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
+ E->getCastKind() == CK_BaseToDerivedMemberPointer ||
+ E->getCastKind() == CK_ReinterpretMemberPointer);
+
+ // Under Itanium, reinterprets don't require any additional processing.
+ if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
+
+ // Use constant emission if we can.
+ if (isa<llvm::Constant>(src))
+ return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
+
+ llvm::Constant *adj = getMemberPointerAdjustment(E);
+ if (!adj) return src;
+
+ CGBuilderTy &Builder = CGF.Builder;
+ bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
+
+ const MemberPointerType *destTy =
+ E->getType()->castAs<MemberPointerType>();
+
+ // For member data pointers, this is just a matter of adding the
+ // offset if the source is non-null.
+ if (destTy->isMemberDataPointer()) {
+ llvm::Value *dst;
+ if (isDerivedToBase)
+ dst = Builder.CreateNSWSub(src, adj, "adj");
+ else
+ dst = Builder.CreateNSWAdd(src, adj, "adj");
+
+ // Null check.
+ llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
+ llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
+ return Builder.CreateSelect(isNull, src, dst);
+ }
+
+ // The this-adjustment is left-shifted by 1 on ARM.
+ if (UseARMMethodPtrABI) {
+ uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
+ offset <<= 1;
+ adj = llvm::ConstantInt::get(adj->getType(), offset);
+ }
+
+ llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
+ llvm::Value *dstAdj;
+ if (isDerivedToBase)
+ dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
+ else
+ dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
+
+ return Builder.CreateInsertValue(src, dstAdj, 1);
+}
+
+llvm::Constant *
+ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
+ llvm::Constant *src) {
+ assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
+ E->getCastKind() == CK_BaseToDerivedMemberPointer ||
+ E->getCastKind() == CK_ReinterpretMemberPointer);
+
+ // Under Itanium, reinterprets don't require any additional processing.
+ if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
+
+ // If the adjustment is trivial, we don't need to do anything.
+ llvm::Constant *adj = getMemberPointerAdjustment(E);
+ if (!adj) return src;
+
+ bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
+
+ const MemberPointerType *destTy =
+ E->getType()->castAs<MemberPointerType>();
+
+ // For member data pointers, this is just a matter of adding the
+ // offset if the source is non-null.
+ if (destTy->isMemberDataPointer()) {
+ // null maps to null.
+ if (src->isAllOnesValue()) return src;
+
+ if (isDerivedToBase)
+ return llvm::ConstantExpr::getNSWSub(src, adj);
+ else
+ return llvm::ConstantExpr::getNSWAdd(src, adj);
+ }
+
+ // The this-adjustment is left-shifted by 1 on ARM.
+ if (UseARMMethodPtrABI) {
+ uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
+ offset <<= 1;
+ adj = llvm::ConstantInt::get(adj->getType(), offset);
+ }
+
+ llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1);
+ llvm::Constant *dstAdj;
+ if (isDerivedToBase)
+ dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
+ else
+ dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
+
+ return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1);
+}
+
+llvm::Constant *
+ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
+ // Itanium C++ ABI 2.3:
+ // A NULL pointer is represented as -1.
+ if (MPT->isMemberDataPointer())
+ return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
+
+ llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
+ llvm::Constant *Values[2] = { Zero, Zero };
+ return llvm::ConstantStruct::getAnon(Values);
+}
+
+llvm::Constant *
+ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
+ CharUnits offset) {
+ // Itanium C++ ABI 2.3:
+ // A pointer to data member is an offset from the base address of
+ // the class object containing it, represented as a ptrdiff_t
+ return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
+}
+
+llvm::Constant *
+ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
+ return BuildMemberPointer(MD, CharUnits::Zero());
+}
+
+llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
+ CharUnits ThisAdjustment) {
+ assert(MD->isInstance() && "Member function must not be static!");
+ MD = MD->getCanonicalDecl();
+
+ CodeGenTypes &Types = CGM.getTypes();
+
+ // Get the function pointer (or index if this is a virtual function).
+ llvm::Constant *MemPtr[2];
+ if (MD->isVirtual()) {
+ uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
+
+ const ASTContext &Context = getContext();
+ CharUnits PointerWidth =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
+ uint64_t VTableOffset = (Index * PointerWidth.getQuantity());
+
+ if (UseARMMethodPtrABI) {
+ // ARM C++ ABI 3.2.1:
+ // This ABI specifies that adj contains twice the this
+ // adjustment, plus 1 if the member function is virtual. The
+ // least significant bit of adj then makes exactly the same
+ // discrimination as the least significant bit of ptr does for
+ // Itanium.
+ MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
+ MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
+ 2 * ThisAdjustment.getQuantity() + 1);
+ } else {
+ // Itanium C++ ABI 2.3:
+ // For a virtual function, [the pointer field] is 1 plus the
+ // virtual table offset (in bytes) of the function,
+ // represented as a ptrdiff_t.
+ MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
+ MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
+ ThisAdjustment.getQuantity());
+ }
+ } else {
+ const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
+ llvm::Type *Ty;
+ // Check whether the function has a computable LLVM signature.
+ if (Types.isFuncTypeConvertible(FPT)) {
+ // The function has a computable LLVM signature; use the correct type.
+ Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
+ } else {
+ // Use an arbitrary non-function type to tell GetAddrOfFunction that the
+ // function type is incomplete.
+ Ty = CGM.PtrDiffTy;
+ }
+ llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
+
+ MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
+ MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
+ (UseARMMethodPtrABI ? 2 : 1) *
+ ThisAdjustment.getQuantity());
+ }
+
+ return llvm::ConstantStruct::getAnon(MemPtr);
+}
+
+llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
+ QualType MPType) {
+ const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
+ const ValueDecl *MPD = MP.getMemberPointerDecl();
+ if (!MPD)
+ return EmitNullMemberPointer(MPT);
+
+ CharUnits ThisAdjustment = getMemberPointerPathAdjustment(MP);
+
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
+ return BuildMemberPointer(MD, ThisAdjustment);
+
+ CharUnits FieldOffset =
+ getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
+ return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
+}
+
+/// The comparison algorithm is pretty easy: the member pointers are
+/// the same if they're either bitwise identical *or* both null.
+///
+/// ARM is different here only because null-ness is more complicated.
+llvm::Value *
+ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
+ llvm::Value *L,
+ llvm::Value *R,
+ const MemberPointerType *MPT,
+ bool Inequality) {
+ CGBuilderTy &Builder = CGF.Builder;
+
+ llvm::ICmpInst::Predicate Eq;
+ llvm::Instruction::BinaryOps And, Or;
+ if (Inequality) {
+ Eq = llvm::ICmpInst::ICMP_NE;
+ And = llvm::Instruction::Or;
+ Or = llvm::Instruction::And;
+ } else {
+ Eq = llvm::ICmpInst::ICMP_EQ;
+ And = llvm::Instruction::And;
+ Or = llvm::Instruction::Or;
+ }
+
+ // Member data pointers are easy because there's a unique null
+ // value, so it just comes down to bitwise equality.
+ if (MPT->isMemberDataPointer())
+ return Builder.CreateICmp(Eq, L, R);
+
+ // For member function pointers, the tautologies are more complex.
+ // The Itanium tautology is:
+ // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
+ // The ARM tautology is:
+ // (L == R) <==> (L.ptr == R.ptr &&
+ // (L.adj == R.adj ||
+ // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
+ // The inequality tautologies have exactly the same structure, except
+ // applying De Morgan's laws.
+
+ llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
+ llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
+
+ // This condition tests whether L.ptr == R.ptr. This must always be
+ // true for equality to hold.
+ llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
+
+ // This condition, together with the assumption that L.ptr == R.ptr,
+ // tests whether the pointers are both null. ARM imposes an extra
+ // condition.
+ llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
+ llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
+
+ // This condition tests whether L.adj == R.adj. If this isn't
+ // true, the pointers are unequal unless they're both null.
+ llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
+ llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
+ llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
+
+ // Null member function pointers on ARM clear the low bit of Adj,
+ // so the zero condition has to check that neither low bit is set.
+ if (UseARMMethodPtrABI) {
+ llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
+
+ // Compute (l.adj | r.adj) & 1 and test it against zero.
+ llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
+ llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
+ llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
+ "cmp.or.adj");
+ EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
+ }
+
+ // Tie together all our conditions.
+ llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
+ Result = Builder.CreateBinOp(And, PtrEq, Result,
+ Inequality ? "memptr.ne" : "memptr.eq");
+ return Result;
+}
+
+llvm::Value *
+ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ CGBuilderTy &Builder = CGF.Builder;
+
+ /// For member data pointers, this is just a check against -1.
+ if (MPT->isMemberDataPointer()) {
+ assert(MemPtr->getType() == CGM.PtrDiffTy);
+ llvm::Value *NegativeOne =
+ llvm::Constant::getAllOnesValue(MemPtr->getType());
+ return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
+ }
+
+ // In Itanium, a member function pointer is not null if 'ptr' is not null.
+ llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
+
+ llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
+ llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
+
+ // On ARM, a member function pointer is also non-null if the low bit of 'adj'
+ // (the virtual bit) is set.
+ if (UseARMMethodPtrABI) {
+ llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
+ llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
+ llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
+ llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
+ "memptr.isvirtual");
+ Result = Builder.CreateOr(Result, IsVirtual);
+ }
+
+ return Result;
+}
+
+bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
+ const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
+ if (!RD)
+ return false;
+
+ // Return indirectly if we have a non-trivial copy ctor or non-trivial dtor.
+ // FIXME: Use canCopyArgument() when it is fixed to handle lazily declared
+ // special members.
+ if (RD->hasNonTrivialDestructor() || RD->hasNonTrivialCopyConstructor()) {
+ auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
+ FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
+ return true;
+ }
+ return false;
+}
+
+/// The Itanium ABI requires non-zero initialization only for data
+/// member pointers, for which '0' is a valid offset.
+bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
+ return MPT->isMemberFunctionPointer();
+}
+
+/// The Itanium ABI always places an offset to the complete object
+/// at entry -2 in the vtable.
+void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
+ const CXXDeleteExpr *DE,
+ Address Ptr,
+ QualType ElementType,
+ const CXXDestructorDecl *Dtor) {
+ bool UseGlobalDelete = DE->isGlobalDelete();
+ if (UseGlobalDelete) {
+ // Derive the complete-object pointer, which is what we need
+ // to pass to the deallocation function.
+
+ // Grab the vtable pointer as an intptr_t*.
+ auto *ClassDecl =
+ cast<CXXRecordDecl>(ElementType->getAs<RecordType>()->getDecl());
+ llvm::Value *VTable =
+ CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl);
+
+ // Track back to entry -2 and pull out the offset there.
+ llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
+ VTable, -2, "complete-offset.ptr");
+ llvm::Value *Offset =
+ CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
+
+ // Apply the offset.
+ llvm::Value *CompletePtr =
+ CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
+ CompletePtr = CGF.Builder.CreateInBoundsGEP(CompletePtr, Offset);
+
+ // If we're supposed to call the global delete, make sure we do so
+ // even if the destructor throws.
+ CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
+ ElementType);
+ }
+
+ // FIXME: Provide a source location here even though there's no
+ // CXXMemberCallExpr for dtor call.
+ CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
+ EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, /*CE=*/nullptr);
+
+ if (UseGlobalDelete)
+ CGF.PopCleanupBlock();
+}
+
+void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
+ // void __cxa_rethrow();
+
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false);
+
+ llvm::Constant *Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
+
+ if (isNoReturn)
+ CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None);
+ else
+ CGF.EmitRuntimeCallOrInvoke(Fn);
+}
+
+static llvm::Constant *getAllocateExceptionFn(CodeGenModule &CGM) {
+ // void *__cxa_allocate_exception(size_t thrown_size);
+
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*IsVarArgs=*/false);
+
+ return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
+}
+
+static llvm::Constant *getThrowFn(CodeGenModule &CGM) {
+ // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
+ // void (*dest) (void *));
+
+ llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, Args, /*IsVarArgs=*/false);
+
+ return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
+}
+
+void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
+ QualType ThrowType = E->getSubExpr()->getType();
+ // Now allocate the exception object.
+ llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
+ uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
+
+ llvm::Constant *AllocExceptionFn = getAllocateExceptionFn(CGM);
+ llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
+ AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
+
+ CharUnits ExnAlign = getAlignmentOfExnObject();
+ CGF.EmitAnyExprToExn(E->getSubExpr(), Address(ExceptionPtr, ExnAlign));
+
+ // Now throw the exception.
+ llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
+ /*ForEH=*/true);
+
+ // The address of the destructor. If the exception type has a
+ // trivial destructor (or isn't a record), we just pass null.
+ llvm::Constant *Dtor = nullptr;
+ if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (!Record->hasTrivialDestructor()) {
+ CXXDestructorDecl *DtorD = Record->getDestructor();
+ Dtor = CGM.getAddrOfCXXStructor(DtorD, StructorType::Complete);
+ Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
+ }
+ }
+ if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
+
+ llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
+ CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
+}
+
+static llvm::Constant *getItaniumDynamicCastFn(CodeGenFunction &CGF) {
+ // void *__dynamic_cast(const void *sub,
+ // const abi::__class_type_info *src,
+ // const abi::__class_type_info *dst,
+ // std::ptrdiff_t src2dst_offset);
+
+ llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
+ llvm::Type *PtrDiffTy =
+ CGF.ConvertType(CGF.getContext().getPointerDiffType());
+
+ llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
+
+ llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
+
+ // Mark the function as nounwind readonly.
+ llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
+ llvm::Attribute::ReadOnly };
+ llvm::AttributeSet Attrs = llvm::AttributeSet::get(
+ CGF.getLLVMContext(), llvm::AttributeSet::FunctionIndex, FuncAttrs);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
+}
+
+static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
+ // void __cxa_bad_cast();
+ llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
+}
+
+/// \brief Compute the src2dst_offset hint as described in the
+/// Itanium C++ ABI [2.9.7]
+static CharUnits computeOffsetHint(ASTContext &Context,
+ const CXXRecordDecl *Src,
+ const CXXRecordDecl *Dst) {
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/false);
+
+ // If Dst is not derived from Src we can skip the whole computation below and
+ // return that Src is not a public base of Dst. Record all inheritance paths.
+ if (!Dst->isDerivedFrom(Src, Paths))
+ return CharUnits::fromQuantity(-2ULL);
+
+ unsigned NumPublicPaths = 0;
+ CharUnits Offset;
+
+ // Now walk all possible inheritance paths.
+ for (const CXXBasePath &Path : Paths) {
+ if (Path.Access != AS_public) // Ignore non-public inheritance.
+ continue;
+
+ ++NumPublicPaths;
+
+ for (const CXXBasePathElement &PathElement : Path) {
+ // If the path contains a virtual base class we can't give any hint.
+ // -1: no hint.
+ if (PathElement.Base->isVirtual())
+ return CharUnits::fromQuantity(-1ULL);
+
+ if (NumPublicPaths > 1) // Won't use offsets, skip computation.
+ continue;
+
+ // Accumulate the base class offsets.
+ const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
+ Offset += L.getBaseClassOffset(
+ PathElement.Base->getType()->getAsCXXRecordDecl());
+ }
+ }
+
+ // -2: Src is not a public base of Dst.
+ if (NumPublicPaths == 0)
+ return CharUnits::fromQuantity(-2ULL);
+
+ // -3: Src is a multiple public base type but never a virtual base type.
+ if (NumPublicPaths > 1)
+ return CharUnits::fromQuantity(-3ULL);
+
+ // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
+ // Return the offset of Src from the origin of Dst.
+ return Offset;
+}
+
+static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
+ // void __cxa_bad_typeid();
+ llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
+}
+
+bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
+ QualType SrcRecordTy) {
+ return IsDeref;
+}
+
+void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
+ llvm::Value *Fn = getBadTypeidFn(CGF);
+ CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
+ CGF.Builder.CreateUnreachable();
+}
+
+llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
+ QualType SrcRecordTy,
+ Address ThisPtr,
+ llvm::Type *StdTypeInfoPtrTy) {
+ auto *ClassDecl =
+ cast<CXXRecordDecl>(SrcRecordTy->getAs<RecordType>()->getDecl());
+ llvm::Value *Value =
+ CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
+
+ // Load the type info.
+ Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
+ return CGF.Builder.CreateAlignedLoad(Value, CGF.getPointerAlign());
+}
+
+bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
+ QualType SrcRecordTy) {
+ return SrcIsPtr;
+}
+
+llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
+ CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
+ QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
+ llvm::Type *PtrDiffLTy =
+ CGF.ConvertType(CGF.getContext().getPointerDiffType());
+ llvm::Type *DestLTy = CGF.ConvertType(DestTy);
+
+ llvm::Value *SrcRTTI =
+ CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
+ llvm::Value *DestRTTI =
+ CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
+
+ // Compute the offset hint.
+ const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
+ const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
+ llvm::Value *OffsetHint = llvm::ConstantInt::get(
+ PtrDiffLTy,
+ computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
+
+ // Emit the call to __dynamic_cast.
+ llvm::Value *Value = ThisAddr.getPointer();
+ Value = CGF.EmitCastToVoidPtr(Value);
+
+ llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
+ Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
+ Value = CGF.Builder.CreateBitCast(Value, DestLTy);
+
+ /// C++ [expr.dynamic.cast]p9:
+ /// A failed cast to reference type throws std::bad_cast
+ if (DestTy->isReferenceType()) {
+ llvm::BasicBlock *BadCastBlock =
+ CGF.createBasicBlock("dynamic_cast.bad_cast");
+
+ llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
+ CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
+
+ CGF.EmitBlock(BadCastBlock);
+ EmitBadCastCall(CGF);
+ }
+
+ return Value;
+}
+
+llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
+ Address ThisAddr,
+ QualType SrcRecordTy,
+ QualType DestTy) {
+ llvm::Type *PtrDiffLTy =
+ CGF.ConvertType(CGF.getContext().getPointerDiffType());
+ llvm::Type *DestLTy = CGF.ConvertType(DestTy);
+
+ auto *ClassDecl =
+ cast<CXXRecordDecl>(SrcRecordTy->getAs<RecordType>()->getDecl());
+ // Get the vtable pointer.
+ llvm::Value *VTable = CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(),
+ ClassDecl);
+
+ // Get the offset-to-top from the vtable.
+ llvm::Value *OffsetToTop =
+ CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
+ OffsetToTop =
+ CGF.Builder.CreateAlignedLoad(OffsetToTop, CGF.getPointerAlign(),
+ "offset.to.top");
+
+ // Finally, add the offset to the pointer.
+ llvm::Value *Value = ThisAddr.getPointer();
+ Value = CGF.EmitCastToVoidPtr(Value);
+ Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
+
+ return CGF.Builder.CreateBitCast(Value, DestLTy);
+}
+
+bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
+ llvm::Value *Fn = getBadCastFn(CGF);
+ CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
+ CGF.Builder.CreateUnreachable();
+ return true;
+}
+
+llvm::Value *
+ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
+ Address This,
+ const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *BaseClassDecl) {
+ llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
+ CharUnits VBaseOffsetOffset =
+ CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
+ BaseClassDecl);
+
+ llvm::Value *VBaseOffsetPtr =
+ CGF.Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
+ "vbase.offset.ptr");
+ VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
+ CGM.PtrDiffTy->getPointerTo());
+
+ llvm::Value *VBaseOffset =
+ CGF.Builder.CreateAlignedLoad(VBaseOffsetPtr, CGF.getPointerAlign(),
+ "vbase.offset");
+
+ return VBaseOffset;
+}
+
+void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
+ // Just make sure we're in sync with TargetCXXABI.
+ assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
+
+ // The constructor used for constructing this as a base class;
+ // ignores virtual bases.
+ CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
+
+ // The constructor used for constructing this as a complete class;
+ // constructs the virtual bases, then calls the base constructor.
+ if (!D->getParent()->isAbstract()) {
+ // We don't need to emit the complete ctor if the class is abstract.
+ CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
+ }
+}
+
+void
+ItaniumCXXABI::buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
+ SmallVectorImpl<CanQualType> &ArgTys) {
+ ASTContext &Context = getContext();
+
+ // All parameters are already in place except VTT, which goes after 'this'.
+ // These are Clang types, so we don't need to worry about sret yet.
+
+ // Check if we need to add a VTT parameter (which has type void **).
+ if (T == StructorType::Base && MD->getParent()->getNumVBases() != 0)
+ ArgTys.insert(ArgTys.begin() + 1,
+ Context.getPointerType(Context.VoidPtrTy));
+}
+
+void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
+ // The destructor used for destructing this as a base class; ignores
+ // virtual bases.
+ CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
+
+ // The destructor used for destructing this as a most-derived class;
+ // call the base destructor and then destructs any virtual bases.
+ CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
+
+ // The destructor in a virtual table is always a 'deleting'
+ // destructor, which calls the complete destructor and then uses the
+ // appropriate operator delete.
+ if (D->isVirtual())
+ CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
+}
+
+void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
+ QualType &ResTy,
+ FunctionArgList &Params) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
+ assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
+
+ // Check if we need a VTT parameter as well.
+ if (NeedsVTTParameter(CGF.CurGD)) {
+ ASTContext &Context = getContext();
+
+ // FIXME: avoid the fake decl
+ QualType T = Context.getPointerType(Context.VoidPtrTy);
+ ImplicitParamDecl *VTTDecl
+ = ImplicitParamDecl::Create(Context, nullptr, MD->getLocation(),
+ &Context.Idents.get("vtt"), T);
+ Params.insert(Params.begin() + 1, VTTDecl);
+ getStructorImplicitParamDecl(CGF) = VTTDecl;
+ }
+}
+
+void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
+ /// Initialize the 'this' slot.
+ EmitThisParam(CGF);
+
+ /// Initialize the 'vtt' slot if needed.
+ if (getStructorImplicitParamDecl(CGF)) {
+ getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
+ CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
+ }
+
+ /// If this is a function that the ABI specifies returns 'this', initialize
+ /// the return slot to 'this' at the start of the function.
+ ///
+ /// Unlike the setting of return types, this is done within the ABI
+ /// implementation instead of by clients of CGCXXABI because:
+ /// 1) getThisValue is currently protected
+ /// 2) in theory, an ABI could implement 'this' returns some other way;
+ /// HasThisReturn only specifies a contract, not the implementation
+ if (HasThisReturn(CGF.CurGD))
+ CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
+}
+
+unsigned ItaniumCXXABI::addImplicitConstructorArgs(
+ CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
+ bool ForVirtualBase, bool Delegating, CallArgList &Args) {
+ if (!NeedsVTTParameter(GlobalDecl(D, Type)))
+ return 0;
+
+ // Insert the implicit 'vtt' argument as the second argument.
+ llvm::Value *VTT =
+ CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
+ QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
+ Args.insert(Args.begin() + 1,
+ CallArg(RValue::get(VTT), VTTTy, /*needscopy=*/false));
+ return 1; // Added one arg.
+}
+
+void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
+ const CXXDestructorDecl *DD,
+ CXXDtorType Type, bool ForVirtualBase,
+ bool Delegating, Address This) {
+ GlobalDecl GD(DD, Type);
+ llvm::Value *VTT = CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
+ QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
+
+ llvm::Value *Callee = nullptr;
+ if (getContext().getLangOpts().AppleKext)
+ Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
+
+ if (!Callee)
+ Callee = CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type));
+
+ CGF.EmitCXXMemberOrOperatorCall(DD, Callee, ReturnValueSlot(),
+ This.getPointer(), VTT, VTTTy, nullptr);
+}
+
+void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
+ const CXXRecordDecl *RD) {
+ llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
+ if (VTable->hasInitializer())
+ return;
+
+ ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
+ const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
+ llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
+ llvm::Constant *RTTI =
+ CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
+
+ // Create and set the initializer.
+ llvm::Constant *Init = CGVT.CreateVTableInitializer(
+ RD, VTLayout.vtable_component_begin(), VTLayout.getNumVTableComponents(),
+ VTLayout.vtable_thunk_begin(), VTLayout.getNumVTableThunks(), RTTI);
+ VTable->setInitializer(Init);
+
+ // Set the correct linkage.
+ VTable->setLinkage(Linkage);
+
+ if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
+ VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
+
+ // Set the right visibility.
+ CGM.setGlobalVisibility(VTable, RD);
+
+ // Use pointer alignment for the vtable. Otherwise we would align them based
+ // on the size of the initializer which doesn't make sense as only single
+ // values are read.
+ unsigned PAlign = CGM.getTarget().getPointerAlign(0);
+ VTable->setAlignment(getContext().toCharUnitsFromBits(PAlign).getQuantity());
+
+ // If this is the magic class __cxxabiv1::__fundamental_type_info,
+ // we will emit the typeinfo for the fundamental types. This is the
+ // same behaviour as GCC.
+ const DeclContext *DC = RD->getDeclContext();
+ if (RD->getIdentifier() &&
+ RD->getIdentifier()->isStr("__fundamental_type_info") &&
+ isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
+ cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
+ DC->getParent()->isTranslationUnit())
+ EmitFundamentalRTTIDescriptors();
+
+ CGM.EmitVTableBitSetEntries(VTable, VTLayout);
+}
+
+bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
+ CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
+ if (Vptr.NearestVBase == nullptr)
+ return false;
+ return NeedsVTTParameter(CGF.CurGD);
+}
+
+llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
+ CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
+ const CXXRecordDecl *NearestVBase) {
+
+ if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
+ NeedsVTTParameter(CGF.CurGD)) {
+ return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
+ NearestVBase);
+ }
+ return getVTableAddressPoint(Base, VTableClass);
+}
+
+llvm::Constant *
+ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
+ const CXXRecordDecl *VTableClass) {
+ llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
+
+ // Find the appropriate vtable within the vtable group.
+ uint64_t AddressPoint = CGM.getItaniumVTableContext()
+ .getVTableLayout(VTableClass)
+ .getAddressPoint(Base);
+ llvm::Value *Indices[] = {
+ llvm::ConstantInt::get(CGM.Int64Ty, 0),
+ llvm::ConstantInt::get(CGM.Int64Ty, AddressPoint)
+ };
+
+ return llvm::ConstantExpr::getInBoundsGetElementPtr(VTable->getValueType(),
+ VTable, Indices);
+}
+
+llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
+ CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
+ const CXXRecordDecl *NearestVBase) {
+ assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
+ NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
+
+ // Get the secondary vpointer index.
+ uint64_t VirtualPointerIndex =
+ CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
+
+ /// Load the VTT.
+ llvm::Value *VTT = CGF.LoadCXXVTT();
+ if (VirtualPointerIndex)
+ VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
+
+ // And load the address point from the VTT.
+ return CGF.Builder.CreateAlignedLoad(VTT, CGF.getPointerAlign());
+}
+
+llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
+ BaseSubobject Base, const CXXRecordDecl *VTableClass) {
+ return getVTableAddressPoint(Base, VTableClass);
+}
+
+llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
+ CharUnits VPtrOffset) {
+ assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
+
+ llvm::GlobalVariable *&VTable = VTables[RD];
+ if (VTable)
+ return VTable;
+
+ // Queue up this v-table for possible deferred emission.
+ CGM.addDeferredVTable(RD);
+
+ SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
+ getMangleContext().mangleCXXVTable(RD, Out);
+
+ ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
+ llvm::ArrayType *ArrayType = llvm::ArrayType::get(
+ CGM.Int8PtrTy, VTContext.getVTableLayout(RD).getNumVTableComponents());
+
+ VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
+ Name, ArrayType, llvm::GlobalValue::ExternalLinkage);
+ VTable->setUnnamedAddr(true);
+
+ if (RD->hasAttr<DLLImportAttr>())
+ VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
+ else if (RD->hasAttr<DLLExportAttr>())
+ VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
+
+ return VTable;
+}
+
+llvm::Value *ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
+ GlobalDecl GD,
+ Address This,
+ llvm::Type *Ty,
+ SourceLocation Loc) {
+ GD = GD.getCanonicalDecl();
+ Ty = Ty->getPointerTo()->getPointerTo();
+ auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
+ llvm::Value *VTable = CGF.GetVTablePtr(This, Ty, MethodDecl->getParent());
+
+ if (CGF.SanOpts.has(SanitizerKind::CFIVCall))
+ CGF.EmitVTablePtrCheckForCall(MethodDecl, VTable,
+ CodeGenFunction::CFITCK_VCall, Loc);
+
+ uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
+ llvm::Value *VFuncPtr =
+ CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
+ return CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
+}
+
+llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
+ CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
+ Address This, const CXXMemberCallExpr *CE) {
+ assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
+ assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
+
+ const CGFunctionInfo *FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
+ Dtor, getFromDtorType(DtorType));
+ llvm::Type *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
+ llvm::Value *Callee =
+ getVirtualFunctionPointer(CGF, GlobalDecl(Dtor, DtorType), This, Ty,
+ CE ? CE->getLocStart() : SourceLocation());
+
+ CGF.EmitCXXMemberOrOperatorCall(Dtor, Callee, ReturnValueSlot(),
+ This.getPointer(), /*ImplicitParam=*/nullptr,
+ QualType(), CE);
+ return nullptr;
+}
+
+void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
+ CodeGenVTables &VTables = CGM.getVTables();
+ llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
+ VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
+}
+
+bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
+ // We don't emit available_externally vtables if we are in -fapple-kext mode
+ // because kext mode does not permit devirtualization.
+ if (CGM.getLangOpts().AppleKext)
+ return false;
+
+ // If we don't have any inline virtual functions, and if vtable is not hidden,
+ // then we are safe to emit available_externally copy of vtable.
+ // FIXME we can still emit a copy of the vtable if we
+ // can emit definition of the inline functions.
+ return !hasAnyUsedVirtualInlineFunction(RD) && !isVTableHidden(RD);
+}
+static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
+ Address InitialPtr,
+ int64_t NonVirtualAdjustment,
+ int64_t VirtualAdjustment,
+ bool IsReturnAdjustment) {
+ if (!NonVirtualAdjustment && !VirtualAdjustment)
+ return InitialPtr.getPointer();
+
+ Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
+
+ // In a base-to-derived cast, the non-virtual adjustment is applied first.
+ if (NonVirtualAdjustment && !IsReturnAdjustment) {
+ V = CGF.Builder.CreateConstInBoundsByteGEP(V,
+ CharUnits::fromQuantity(NonVirtualAdjustment));
+ }
+
+ // Perform the virtual adjustment if we have one.
+ llvm::Value *ResultPtr;
+ if (VirtualAdjustment) {
+ llvm::Type *PtrDiffTy =
+ CGF.ConvertType(CGF.getContext().getPointerDiffType());
+
+ Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
+ llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
+
+ llvm::Value *OffsetPtr =
+ CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
+
+ OffsetPtr = CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
+
+ // Load the adjustment offset from the vtable.
+ llvm::Value *Offset =
+ CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
+
+ // Adjust our pointer.
+ ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getPointer(), Offset);
+ } else {
+ ResultPtr = V.getPointer();
+ }
+
+ // In a derived-to-base conversion, the non-virtual adjustment is
+ // applied second.
+ if (NonVirtualAdjustment && IsReturnAdjustment) {
+ ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ResultPtr,
+ NonVirtualAdjustment);
+ }
+
+ // Cast back to the original type.
+ return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
+}
+
+llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
+ Address This,
+ const ThisAdjustment &TA) {
+ return performTypeAdjustment(CGF, This, TA.NonVirtual,
+ TA.Virtual.Itanium.VCallOffsetOffset,
+ /*IsReturnAdjustment=*/false);
+}
+
+llvm::Value *
+ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
+ const ReturnAdjustment &RA) {
+ return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
+ RA.Virtual.Itanium.VBaseOffsetOffset,
+ /*IsReturnAdjustment=*/true);
+}
+
+void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
+ RValue RV, QualType ResultType) {
+ if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
+ return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
+
+ // Destructor thunks in the ARM ABI have indeterminate results.
+ llvm::Type *T = CGF.ReturnValue.getElementType();
+ RValue Undef = RValue::get(llvm::UndefValue::get(T));
+ return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
+}
+
+/************************** Array allocation cookies **************************/
+
+CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
+ // The array cookie is a size_t; pad that up to the element alignment.
+ // The cookie is actually right-justified in that space.
+ return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
+ CGM.getContext().getTypeAlignInChars(elementType));
+}
+
+Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
+ Address NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType) {
+ assert(requiresArrayCookie(expr));
+
+ unsigned AS = NewPtr.getAddressSpace();
+
+ ASTContext &Ctx = getContext();
+ CharUnits SizeSize = CGF.getSizeSize();
+
+ // The size of the cookie.
+ CharUnits CookieSize =
+ std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType));
+ assert(CookieSize == getArrayCookieSizeImpl(ElementType));
+
+ // Compute an offset to the cookie.
+ Address CookiePtr = NewPtr;
+ CharUnits CookieOffset = CookieSize - SizeSize;
+ if (!CookieOffset.isZero())
+ CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
+
+ // Write the number of elements into the appropriate slot.
+ Address NumElementsPtr =
+ CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
+ llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
+
+ // Handle the array cookie specially in ASan.
+ if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
+ expr->getOperatorNew()->isReplaceableGlobalAllocationFunction()) {
+ // The store to the CookiePtr does not need to be instrumented.
+ CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
+ llvm::Constant *F =
+ CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
+ CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
+ }
+
+ // Finally, compute a pointer to the actual data buffer by skipping
+ // over the cookie completely.
+ return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
+}
+
+llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
+ Address allocPtr,
+ CharUnits cookieSize) {
+ // The element size is right-justified in the cookie.
+ Address numElementsPtr = allocPtr;
+ CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
+ if (!numElementsOffset.isZero())
+ numElementsPtr =
+ CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
+
+ unsigned AS = allocPtr.getAddressSpace();
+ numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
+ if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
+ return CGF.Builder.CreateLoad(numElementsPtr);
+ // In asan mode emit a function call instead of a regular load and let the
+ // run-time deal with it: if the shadow is properly poisoned return the
+ // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
+ // We can't simply ignore this load using nosanitize metadata because
+ // the metadata may be lost.
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
+ llvm::Constant *F =
+ CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
+ return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
+}
+
+CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
+ // ARM says that the cookie is always:
+ // struct array_cookie {
+ // std::size_t element_size; // element_size != 0
+ // std::size_t element_count;
+ // };
+ // But the base ABI doesn't give anything an alignment greater than
+ // 8, so we can dismiss this as typical ABI-author blindness to
+ // actual language complexity and round up to the element alignment.
+ return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
+ CGM.getContext().getTypeAlignInChars(elementType));
+}
+
+Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
+ Address newPtr,
+ llvm::Value *numElements,
+ const CXXNewExpr *expr,
+ QualType elementType) {
+ assert(requiresArrayCookie(expr));
+
+ // The cookie is always at the start of the buffer.
+ Address cookie = newPtr;
+
+ // The first element is the element size.
+ cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
+ llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
+ getContext().getTypeSizeInChars(elementType).getQuantity());
+ CGF.Builder.CreateStore(elementSize, cookie);
+
+ // The second element is the element count.
+ cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1, CGF.getSizeSize());
+ CGF.Builder.CreateStore(numElements, cookie);
+
+ // Finally, compute a pointer to the actual data buffer by skipping
+ // over the cookie completely.
+ CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
+ return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
+}
+
+llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
+ Address allocPtr,
+ CharUnits cookieSize) {
+ // The number of elements is at offset sizeof(size_t) relative to
+ // the allocated pointer.
+ Address numElementsPtr
+ = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
+
+ numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
+ return CGF.Builder.CreateLoad(numElementsPtr);
+}
+
+/*********************** Static local initialization **************************/
+
+static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM,
+ llvm::PointerType *GuardPtrTy) {
+ // int __cxa_guard_acquire(__guard *guard_object);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
+ GuardPtrTy, /*isVarArg=*/false);
+ return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire",
+ llvm::AttributeSet::get(CGM.getLLVMContext(),
+ llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::NoUnwind));
+}
+
+static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
+ llvm::PointerType *GuardPtrTy) {
+ // void __cxa_guard_release(__guard *guard_object);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
+ return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release",
+ llvm::AttributeSet::get(CGM.getLLVMContext(),
+ llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::NoUnwind));
+}
+
+static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM,
+ llvm::PointerType *GuardPtrTy) {
+ // void __cxa_guard_abort(__guard *guard_object);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
+ return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort",
+ llvm::AttributeSet::get(CGM.getLLVMContext(),
+ llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::NoUnwind));
+}
+
+namespace {
+ struct CallGuardAbort final : EHScopeStack::Cleanup {
+ llvm::GlobalVariable *Guard;
+ CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
+ Guard);
+ }
+ };
+}
+
+/// The ARM code here follows the Itanium code closely enough that we
+/// just special-case it at particular places.
+void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
+ const VarDecl &D,
+ llvm::GlobalVariable *var,
+ bool shouldPerformInit) {
+ CGBuilderTy &Builder = CGF.Builder;
+
+ // We only need to use thread-safe statics for local non-TLS variables;
+ // global initialization is always single-threaded.
+ bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
+ D.isLocalVarDecl() && !D.getTLSKind();
+
+ // If we have a global variable with internal linkage and thread-safe statics
+ // are disabled, we can just let the guard variable be of type i8.
+ bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
+
+ llvm::IntegerType *guardTy;
+ CharUnits guardAlignment;
+ if (useInt8GuardVariable) {
+ guardTy = CGF.Int8Ty;
+ guardAlignment = CharUnits::One();
+ } else {
+ // Guard variables are 64 bits in the generic ABI and size width on ARM
+ // (i.e. 32-bit on AArch32, 64-bit on AArch64).
+ if (UseARMGuardVarABI) {
+ guardTy = CGF.SizeTy;
+ guardAlignment = CGF.getSizeAlign();
+ } else {
+ guardTy = CGF.Int64Ty;
+ guardAlignment = CharUnits::fromQuantity(
+ CGM.getDataLayout().getABITypeAlignment(guardTy));
+ }
+ }
+ llvm::PointerType *guardPtrTy = guardTy->getPointerTo();
+
+ // Create the guard variable if we don't already have it (as we
+ // might if we're double-emitting this function body).
+ llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
+ if (!guard) {
+ // Mangle the name for the guard.
+ SmallString<256> guardName;
+ {
+ llvm::raw_svector_ostream out(guardName);
+ getMangleContext().mangleStaticGuardVariable(&D, out);
+ }
+
+ // Create the guard variable with a zero-initializer.
+ // Just absorb linkage and visibility from the guarded variable.
+ guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
+ false, var->getLinkage(),
+ llvm::ConstantInt::get(guardTy, 0),
+ guardName.str());
+ guard->setVisibility(var->getVisibility());
+ // If the variable is thread-local, so is its guard variable.
+ guard->setThreadLocalMode(var->getThreadLocalMode());
+ guard->setAlignment(guardAlignment.getQuantity());
+
+ // The ABI says: "It is suggested that it be emitted in the same COMDAT
+ // group as the associated data object." In practice, this doesn't work for
+ // non-ELF object formats, so only do it for ELF.
+ llvm::Comdat *C = var->getComdat();
+ if (!D.isLocalVarDecl() && C &&
+ CGM.getTarget().getTriple().isOSBinFormatELF()) {
+ guard->setComdat(C);
+ CGF.CurFn->setComdat(C);
+ } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
+ guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
+ }
+
+ CGM.setStaticLocalDeclGuardAddress(&D, guard);
+ }
+
+ Address guardAddr = Address(guard, guardAlignment);
+
+ // Test whether the variable has completed initialization.
+ //
+ // Itanium C++ ABI 3.3.2:
+ // The following is pseudo-code showing how these functions can be used:
+ // if (obj_guard.first_byte == 0) {
+ // if ( __cxa_guard_acquire (&obj_guard) ) {
+ // try {
+ // ... initialize the object ...;
+ // } catch (...) {
+ // __cxa_guard_abort (&obj_guard);
+ // throw;
+ // }
+ // ... queue object destructor with __cxa_atexit() ...;
+ // __cxa_guard_release (&obj_guard);
+ // }
+ // }
+
+ // Load the first byte of the guard variable.
+ llvm::LoadInst *LI =
+ Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
+
+ // Itanium ABI:
+ // An implementation supporting thread-safety on multiprocessor
+ // systems must also guarantee that references to the initialized
+ // object do not occur before the load of the initialization flag.
+ //
+ // In LLVM, we do this by marking the load Acquire.
+ if (threadsafe)
+ LI->setAtomic(llvm::Acquire);
+
+ // For ARM, we should only check the first bit, rather than the entire byte:
+ //
+ // ARM C++ ABI 3.2.3.1:
+ // To support the potential use of initialization guard variables
+ // as semaphores that are the target of ARM SWP and LDREX/STREX
+ // synchronizing instructions we define a static initialization
+ // guard variable to be a 4-byte aligned, 4-byte word with the
+ // following inline access protocol.
+ // #define INITIALIZED 1
+ // if ((obj_guard & INITIALIZED) != INITIALIZED) {
+ // if (__cxa_guard_acquire(&obj_guard))
+ // ...
+ // }
+ //
+ // and similarly for ARM64:
+ //
+ // ARM64 C++ ABI 3.2.2:
+ // This ABI instead only specifies the value bit 0 of the static guard
+ // variable; all other bits are platform defined. Bit 0 shall be 0 when the
+ // variable is not initialized and 1 when it is.
+ llvm::Value *V =
+ (UseARMGuardVarABI && !useInt8GuardVariable)
+ ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
+ : LI;
+ llvm::Value *isInitialized = Builder.CreateIsNull(V, "guard.uninitialized");
+
+ llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
+ llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
+
+ // Check if the first byte of the guard variable is zero.
+ Builder.CreateCondBr(isInitialized, InitCheckBlock, EndBlock);
+
+ CGF.EmitBlock(InitCheckBlock);
+
+ // Variables used when coping with thread-safe statics and exceptions.
+ if (threadsafe) {
+ // Call __cxa_guard_acquire.
+ llvm::Value *V
+ = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
+
+ llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
+
+ Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
+ InitBlock, EndBlock);
+
+ // Call __cxa_guard_abort along the exceptional edge.
+ CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
+
+ CGF.EmitBlock(InitBlock);
+ }
+
+ // Emit the initializer and add a global destructor if appropriate.
+ CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
+
+ if (threadsafe) {
+ // Pop the guard-abort cleanup if we pushed one.
+ CGF.PopCleanupBlock();
+
+ // Call __cxa_guard_release. This cannot throw.
+ CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
+ guardAddr.getPointer());
+ } else {
+ Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guardAddr);
+ }
+
+ CGF.EmitBlock(EndBlock);
+}
+
+/// Register a global destructor using __cxa_atexit.
+static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
+ llvm::Constant *dtor,
+ llvm::Constant *addr,
+ bool TLS) {
+ const char *Name = "__cxa_atexit";
+ if (TLS) {
+ const llvm::Triple &T = CGF.getTarget().getTriple();
+ Name = T.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit";
+ }
+
+ // We're assuming that the destructor function is something we can
+ // reasonably call with the default CC. Go ahead and cast it to the
+ // right prototype.
+ llvm::Type *dtorTy =
+ llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
+
+ // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
+ llvm::Type *paramTys[] = { dtorTy, CGF.Int8PtrTy, CGF.Int8PtrTy };
+ llvm::FunctionType *atexitTy =
+ llvm::FunctionType::get(CGF.IntTy, paramTys, false);
+
+ // Fetch the actual function.
+ llvm::Constant *atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
+ if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit))
+ fn->setDoesNotThrow();
+
+ // Create a variable that binds the atexit to this shared object.
+ llvm::Constant *handle =
+ CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
+
+ llvm::Value *args[] = {
+ llvm::ConstantExpr::getBitCast(dtor, dtorTy),
+ llvm::ConstantExpr::getBitCast(addr, CGF.Int8PtrTy),
+ handle
+ };
+ CGF.EmitNounwindRuntimeCall(atexit, args);
+}
+
+/// Register a global destructor as best as we know how.
+void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF,
+ const VarDecl &D,
+ llvm::Constant *dtor,
+ llvm::Constant *addr) {
+ // Use __cxa_atexit if available.
+ if (CGM.getCodeGenOpts().CXAAtExit)
+ return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
+
+ if (D.getTLSKind())
+ CGM.ErrorUnsupported(&D, "non-trivial TLS destruction");
+
+ // In Apple kexts, we want to add a global destructor entry.
+ // FIXME: shouldn't this be guarded by some variable?
+ if (CGM.getLangOpts().AppleKext) {
+ // Generate a global destructor entry.
+ return CGM.AddCXXDtorEntry(dtor, addr);
+ }
+
+ CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
+}
+
+static bool isThreadWrapperReplaceable(const VarDecl *VD,
+ CodeGen::CodeGenModule &CGM) {
+ assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
+ // Darwin prefers to have references to thread local variables to go through
+ // the thread wrapper instead of directly referencing the backing variable.
+ return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
+ CGM.getTarget().getTriple().isOSDarwin();
+}
+
+/// Get the appropriate linkage for the wrapper function. This is essentially
+/// the weak form of the variable's linkage; every translation unit which needs
+/// the wrapper emits a copy, and we want the linker to merge them.
+static llvm::GlobalValue::LinkageTypes
+getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
+ llvm::GlobalValue::LinkageTypes VarLinkage =
+ CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false);
+
+ // For internal linkage variables, we don't need an external or weak wrapper.
+ if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
+ return VarLinkage;
+
+ // If the thread wrapper is replaceable, give it appropriate linkage.
+ if (isThreadWrapperReplaceable(VD, CGM))
+ if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
+ !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
+ return VarLinkage;
+ return llvm::GlobalValue::WeakODRLinkage;
+}
+
+llvm::Function *
+ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
+ llvm::Value *Val) {
+ // Mangle the name for the thread_local wrapper function.
+ SmallString<256> WrapperName;
+ {
+ llvm::raw_svector_ostream Out(WrapperName);
+ getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
+ }
+
+ if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
+ return cast<llvm::Function>(V);
+
+ llvm::Type *RetTy = Val->getType();
+ if (VD->getType()->isReferenceType())
+ RetTy = RetTy->getPointerElementType();
+
+ llvm::FunctionType *FnTy = llvm::FunctionType::get(RetTy, false);
+ llvm::Function *Wrapper =
+ llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
+ WrapperName.str(), &CGM.getModule());
+ // Always resolve references to the wrapper at link time.
+ if (!Wrapper->hasLocalLinkage() && !(isThreadWrapperReplaceable(VD, CGM) &&
+ !llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) &&
+ !llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage())))
+ Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
+
+ if (isThreadWrapperReplaceable(VD, CGM)) {
+ Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
+ Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
+ }
+ return Wrapper;
+}
+
+void ItaniumCXXABI::EmitThreadLocalInitFuncs(
+ CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
+ ArrayRef<llvm::Function *> CXXThreadLocalInits,
+ ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
+ llvm::Function *InitFunc = nullptr;
+ if (!CXXThreadLocalInits.empty()) {
+ // Generate a guarded initialization function.
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
+ const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
+ InitFunc = CGM.CreateGlobalInitOrDestructFunction(FTy, "__tls_init", FI,
+ SourceLocation(),
+ /*TLS=*/true);
+ llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
+ llvm::GlobalVariable::InternalLinkage,
+ llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
+ Guard->setThreadLocal(true);
+
+ CharUnits GuardAlign = CharUnits::One();
+ Guard->setAlignment(GuardAlign.getQuantity());
+
+ CodeGenFunction(CGM)
+ .GenerateCXXGlobalInitFunc(InitFunc, CXXThreadLocalInits,
+ Address(Guard, GuardAlign));
+ }
+ for (const VarDecl *VD : CXXThreadLocals) {
+ llvm::GlobalVariable *Var =
+ cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
+
+ // Some targets require that all access to thread local variables go through
+ // the thread wrapper. This means that we cannot attempt to create a thread
+ // wrapper or a thread helper.
+ if (isThreadWrapperReplaceable(VD, CGM) && !VD->hasDefinition())
+ continue;
+
+ // Mangle the name for the thread_local initialization function.
+ SmallString<256> InitFnName;
+ {
+ llvm::raw_svector_ostream Out(InitFnName);
+ getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
+ }
+
+ // If we have a definition for the variable, emit the initialization
+ // function as an alias to the global Init function (if any). Otherwise,
+ // produce a declaration of the initialization function.
+ llvm::GlobalValue *Init = nullptr;
+ bool InitIsInitFunc = false;
+ if (VD->hasDefinition()) {
+ InitIsInitFunc = true;
+ if (InitFunc)
+ Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
+ InitFunc);
+ } else {
+ // Emit a weak global function referring to the initialization function.
+ // This function will not exist if the TU defining the thread_local
+ // variable in question does not need any dynamic initialization for
+ // its thread_local variables.
+ llvm::FunctionType *FnTy = llvm::FunctionType::get(CGM.VoidTy, false);
+ Init = llvm::Function::Create(
+ FnTy, llvm::GlobalVariable::ExternalWeakLinkage, InitFnName.str(),
+ &CGM.getModule());
+ }
+
+ if (Init)
+ Init->setVisibility(Var->getVisibility());
+
+ llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Var);
+ llvm::LLVMContext &Context = CGM.getModule().getContext();
+ llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
+ CGBuilderTy Builder(CGM, Entry);
+ if (InitIsInitFunc) {
+ if (Init)
+ Builder.CreateCall(Init);
+ } else {
+ // Don't know whether we have an init function. Call it if it exists.
+ llvm::Value *Have = Builder.CreateIsNotNull(Init);
+ llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
+ llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
+ Builder.CreateCondBr(Have, InitBB, ExitBB);
+
+ Builder.SetInsertPoint(InitBB);
+ Builder.CreateCall(Init);
+ Builder.CreateBr(ExitBB);
+
+ Builder.SetInsertPoint(ExitBB);
+ }
+
+ // For a reference, the result of the wrapper function is a pointer to
+ // the referenced object.
+ llvm::Value *Val = Var;
+ if (VD->getType()->isReferenceType()) {
+ CharUnits Align = CGM.getContext().getDeclAlign(VD);
+ Val = Builder.CreateAlignedLoad(Val, Align);
+ }
+ if (Val->getType() != Wrapper->getReturnType())
+ Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
+ Val, Wrapper->getReturnType(), "");
+ Builder.CreateRet(Val);
+ }
+}
+
+LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
+ const VarDecl *VD,
+ QualType LValType) {
+ llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
+ llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
+
+ llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
+ if (isThreadWrapperReplaceable(VD, CGF.CGM))
+ CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
+
+ LValue LV;
+ if (VD->getType()->isReferenceType())
+ LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
+ else
+ LV = CGF.MakeAddrLValue(CallVal, LValType,
+ CGF.getContext().getDeclAlign(VD));
+ // FIXME: need setObjCGCLValueClass?
+ return LV;
+}
+
+/// Return whether the given global decl needs a VTT parameter, which it does
+/// if it's a base constructor or destructor with virtual bases.
+bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+
+ // We don't have any virtual bases, just return early.
+ if (!MD->getParent()->getNumVBases())
+ return false;
+
+ // Check if we have a base constructor.
+ if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
+ return true;
+
+ // Check if we have a base destructor.
+ if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
+ return true;
+
+ return false;
+}
+
+namespace {
+class ItaniumRTTIBuilder {
+ CodeGenModule &CGM; // Per-module state.
+ llvm::LLVMContext &VMContext;
+ const ItaniumCXXABI &CXXABI; // Per-module state.
+
+ /// Fields - The fields of the RTTI descriptor currently being built.
+ SmallVector<llvm::Constant *, 16> Fields;
+
+ /// GetAddrOfTypeName - Returns the mangled type name of the given type.
+ llvm::GlobalVariable *
+ GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
+
+ /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
+ /// descriptor of the given type.
+ llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
+
+ /// BuildVTablePointer - Build the vtable pointer for the given type.
+ void BuildVTablePointer(const Type *Ty);
+
+ /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
+ /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
+ void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
+
+ /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
+ /// classes with bases that do not satisfy the abi::__si_class_type_info
+ /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
+ void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
+
+ /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
+ /// for pointer types.
+ void BuildPointerTypeInfo(QualType PointeeTy);
+
+ /// BuildObjCObjectTypeInfo - Build the appropriate kind of
+ /// type_info for an object type.
+ void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
+
+ /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
+ /// struct, used for member pointer types.
+ void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
+
+public:
+ ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
+ : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
+
+ // Pointer type info flags.
+ enum {
+ /// PTI_Const - Type has const qualifier.
+ PTI_Const = 0x1,
+
+ /// PTI_Volatile - Type has volatile qualifier.
+ PTI_Volatile = 0x2,
+
+ /// PTI_Restrict - Type has restrict qualifier.
+ PTI_Restrict = 0x4,
+
+ /// PTI_Incomplete - Type is incomplete.
+ PTI_Incomplete = 0x8,
+
+ /// PTI_ContainingClassIncomplete - Containing class is incomplete.
+ /// (in pointer to member).
+ PTI_ContainingClassIncomplete = 0x10
+ };
+
+ // VMI type info flags.
+ enum {
+ /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
+ VMI_NonDiamondRepeat = 0x1,
+
+ /// VMI_DiamondShaped - Class is diamond shaped.
+ VMI_DiamondShaped = 0x2
+ };
+
+ // Base class type info flags.
+ enum {
+ /// BCTI_Virtual - Base class is virtual.
+ BCTI_Virtual = 0x1,
+
+ /// BCTI_Public - Base class is public.
+ BCTI_Public = 0x2
+ };
+
+ /// BuildTypeInfo - Build the RTTI type info struct for the given type.
+ ///
+ /// \param Force - true to force the creation of this RTTI value
+ llvm::Constant *BuildTypeInfo(QualType Ty, bool Force = false);
+};
+}
+
+llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
+ QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
+ SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
+
+ // We know that the mangled name of the type starts at index 4 of the
+ // mangled name of the typename, so we can just index into it in order to
+ // get the mangled name of the type.
+ llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
+ Name.substr(4));
+
+ llvm::GlobalVariable *GV =
+ CGM.CreateOrReplaceCXXRuntimeVariable(Name, Init->getType(), Linkage);
+
+ GV->setInitializer(Init);
+
+ return GV;
+}
+
+llvm::Constant *
+ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
+ // Mangle the RTTI name.
+ SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
+
+ // Look for an existing global.
+ llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
+
+ if (!GV) {
+ // Create a new global variable.
+ GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
+ /*Constant=*/true,
+ llvm::GlobalValue::ExternalLinkage, nullptr,
+ Name);
+ if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (RD->hasAttr<DLLImportAttr>())
+ GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
+ }
+ }
+
+ return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
+}
+
+/// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
+/// info for that type is defined in the standard library.
+static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
+ // Itanium C++ ABI 2.9.2:
+ // Basic type information (e.g. for "int", "bool", etc.) will be kept in
+ // the run-time support library. Specifically, the run-time support
+ // library should contain type_info objects for the types X, X* and
+ // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
+ // unsigned char, signed char, short, unsigned short, int, unsigned int,
+ // long, unsigned long, long long, unsigned long long, float, double,
+ // long double, char16_t, char32_t, and the IEEE 754r decimal and
+ // half-precision floating point types.
+ switch (Ty->getKind()) {
+ case BuiltinType::Void:
+ case BuiltinType::NullPtr:
+ case BuiltinType::Bool:
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ case BuiltinType::Char_U:
+ case BuiltinType::Char_S:
+ case BuiltinType::UChar:
+ case BuiltinType::SChar:
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ case BuiltinType::Long:
+ case BuiltinType::ULong:
+ case BuiltinType::LongLong:
+ case BuiltinType::ULongLong:
+ case BuiltinType::Half:
+ case BuiltinType::Float:
+ case BuiltinType::Double:
+ case BuiltinType::LongDouble:
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ case BuiltinType::Int128:
+ case BuiltinType::UInt128:
+ case BuiltinType::OCLImage1d:
+ case BuiltinType::OCLImage1dArray:
+ case BuiltinType::OCLImage1dBuffer:
+ case BuiltinType::OCLImage2d:
+ case BuiltinType::OCLImage2dArray:
+ case BuiltinType::OCLImage2dDepth:
+ case BuiltinType::OCLImage2dArrayDepth:
+ case BuiltinType::OCLImage2dMSAA:
+ case BuiltinType::OCLImage2dArrayMSAA:
+ case BuiltinType::OCLImage2dMSAADepth:
+ case BuiltinType::OCLImage2dArrayMSAADepth:
+ case BuiltinType::OCLImage3d:
+ case BuiltinType::OCLSampler:
+ case BuiltinType::OCLEvent:
+ case BuiltinType::OCLClkEvent:
+ case BuiltinType::OCLQueue:
+ case BuiltinType::OCLNDRange:
+ case BuiltinType::OCLReserveID:
+ return true;
+
+ case BuiltinType::Dependent:
+#define BUILTIN_TYPE(Id, SingletonId)
+#define PLACEHOLDER_TYPE(Id, SingletonId) \
+ case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
+ llvm_unreachable("asking for RRTI for a placeholder type!");
+
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCSel:
+ llvm_unreachable("FIXME: Objective-C types are unsupported!");
+ }
+
+ llvm_unreachable("Invalid BuiltinType Kind!");
+}
+
+static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
+ QualType PointeeTy = PointerTy->getPointeeType();
+ const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
+ if (!BuiltinTy)
+ return false;
+
+ // Check the qualifiers.
+ Qualifiers Quals = PointeeTy.getQualifiers();
+ Quals.removeConst();
+
+ if (!Quals.empty())
+ return false;
+
+ return TypeInfoIsInStandardLibrary(BuiltinTy);
+}
+
+/// IsStandardLibraryRTTIDescriptor - Returns whether the type
+/// information for the given type exists in the standard library.
+static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
+ // Type info for builtin types is defined in the standard library.
+ if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
+ return TypeInfoIsInStandardLibrary(BuiltinTy);
+
+ // Type info for some pointer types to builtin types is defined in the
+ // standard library.
+ if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
+ return TypeInfoIsInStandardLibrary(PointerTy);
+
+ return false;
+}
+
+/// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
+/// the given type exists somewhere else, and that we should not emit the type
+/// information in this translation unit. Assumes that it is not a
+/// standard-library type.
+static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
+ QualType Ty) {
+ ASTContext &Context = CGM.getContext();
+
+ // If RTTI is disabled, assume it might be disabled in the
+ // translation unit that defines any potential key function, too.
+ if (!Context.getLangOpts().RTTI) return false;
+
+ if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (!RD->hasDefinition())
+ return false;
+
+ if (!RD->isDynamicClass())
+ return false;
+
+ // FIXME: this may need to be reconsidered if the key function
+ // changes.
+ // N.B. We must always emit the RTTI data ourselves if there exists a key
+ // function.
+ bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
+ if (CGM.getVTables().isVTableExternal(RD))
+ return IsDLLImport ? false : true;
+
+ if (IsDLLImport)
+ return true;
+ }
+
+ return false;
+}
+
+/// IsIncompleteClassType - Returns whether the given record type is incomplete.
+static bool IsIncompleteClassType(const RecordType *RecordTy) {
+ return !RecordTy->getDecl()->isCompleteDefinition();
+}
+
+/// ContainsIncompleteClassType - Returns whether the given type contains an
+/// incomplete class type. This is true if
+///
+/// * The given type is an incomplete class type.
+/// * The given type is a pointer type whose pointee type contains an
+/// incomplete class type.
+/// * The given type is a member pointer type whose class is an incomplete
+/// class type.
+/// * The given type is a member pointer type whoise pointee type contains an
+/// incomplete class type.
+/// is an indirect or direct pointer to an incomplete class type.
+static bool ContainsIncompleteClassType(QualType Ty) {
+ if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
+ if (IsIncompleteClassType(RecordTy))
+ return true;
+ }
+
+ if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
+ return ContainsIncompleteClassType(PointerTy->getPointeeType());
+
+ if (const MemberPointerType *MemberPointerTy =
+ dyn_cast<MemberPointerType>(Ty)) {
+ // Check if the class type is incomplete.
+ const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
+ if (IsIncompleteClassType(ClassType))
+ return true;
+
+ return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
+ }
+
+ return false;
+}
+
+// CanUseSingleInheritance - Return whether the given record decl has a "single,
+// public, non-virtual base at offset zero (i.e. the derived class is dynamic
+// iff the base is)", according to Itanium C++ ABI, 2.95p6b.
+static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
+ // Check the number of bases.
+ if (RD->getNumBases() != 1)
+ return false;
+
+ // Get the base.
+ CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
+
+ // Check that the base is not virtual.
+ if (Base->isVirtual())
+ return false;
+
+ // Check that the base is public.
+ if (Base->getAccessSpecifier() != AS_public)
+ return false;
+
+ // Check that the class is dynamic iff the base is.
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+ if (!BaseDecl->isEmpty() &&
+ BaseDecl->isDynamicClass() != RD->isDynamicClass())
+ return false;
+
+ return true;
+}
+
+void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
+ // abi::__class_type_info.
+ static const char * const ClassTypeInfo =
+ "_ZTVN10__cxxabiv117__class_type_infoE";
+ // abi::__si_class_type_info.
+ static const char * const SIClassTypeInfo =
+ "_ZTVN10__cxxabiv120__si_class_type_infoE";
+ // abi::__vmi_class_type_info.
+ static const char * const VMIClassTypeInfo =
+ "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
+
+ const char *VTableName = nullptr;
+
+ switch (Ty->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("Non-canonical and dependent types shouldn't get here");
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ llvm_unreachable("References shouldn't get here");
+
+ case Type::Auto:
+ llvm_unreachable("Undeduced auto type shouldn't get here");
+
+ case Type::Builtin:
+ // GCC treats vector and complex types as fundamental types.
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::Complex:
+ case Type::Atomic:
+ // FIXME: GCC treats block pointers as fundamental types?!
+ case Type::BlockPointer:
+ // abi::__fundamental_type_info.
+ VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
+ break;
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ // abi::__array_type_info.
+ VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
+ break;
+
+ case Type::FunctionNoProto:
+ case Type::FunctionProto:
+ // abi::__function_type_info.
+ VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
+ break;
+
+ case Type::Enum:
+ // abi::__enum_type_info.
+ VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
+ break;
+
+ case Type::Record: {
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
+
+ if (!RD->hasDefinition() || !RD->getNumBases()) {
+ VTableName = ClassTypeInfo;
+ } else if (CanUseSingleInheritance(RD)) {
+ VTableName = SIClassTypeInfo;
+ } else {
+ VTableName = VMIClassTypeInfo;
+ }
+
+ break;
+ }
+
+ case Type::ObjCObject:
+ // Ignore protocol qualifiers.
+ Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
+
+ // Handle id and Class.
+ if (isa<BuiltinType>(Ty)) {
+ VTableName = ClassTypeInfo;
+ break;
+ }
+
+ assert(isa<ObjCInterfaceType>(Ty));
+ // Fall through.
+
+ case Type::ObjCInterface:
+ if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
+ VTableName = SIClassTypeInfo;
+ } else {
+ VTableName = ClassTypeInfo;
+ }
+ break;
+
+ case Type::ObjCObjectPointer:
+ case Type::Pointer:
+ // abi::__pointer_type_info.
+ VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
+ break;
+
+ case Type::MemberPointer:
+ // abi::__pointer_to_member_type_info.
+ VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
+ break;
+ }
+
+ llvm::Constant *VTable =
+ CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
+
+ llvm::Type *PtrDiffTy =
+ CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
+
+ // The vtable address point is 2.
+ llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
+ VTable =
+ llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable, Two);
+ VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
+
+ Fields.push_back(VTable);
+}
+
+/// \brief Return the linkage that the type info and type info name constants
+/// should have for the given type.
+static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
+ QualType Ty) {
+ // Itanium C++ ABI 2.9.5p7:
+ // In addition, it and all of the intermediate abi::__pointer_type_info
+ // structs in the chain down to the abi::__class_type_info for the
+ // incomplete class type must be prevented from resolving to the
+ // corresponding type_info structs for the complete class type, possibly
+ // by making them local static objects. Finally, a dummy class RTTI is
+ // generated for the incomplete type that will not resolve to the final
+ // complete class RTTI (because the latter need not exist), possibly by
+ // making it a local static object.
+ if (ContainsIncompleteClassType(Ty))
+ return llvm::GlobalValue::InternalLinkage;
+
+ switch (Ty->getLinkage()) {
+ case NoLinkage:
+ case InternalLinkage:
+ case UniqueExternalLinkage:
+ return llvm::GlobalValue::InternalLinkage;
+
+ case VisibleNoLinkage:
+ case ExternalLinkage:
+ if (!CGM.getLangOpts().RTTI) {
+ // RTTI is not enabled, which means that this type info struct is going
+ // to be used for exception handling. Give it linkonce_odr linkage.
+ return llvm::GlobalValue::LinkOnceODRLinkage;
+ }
+
+ if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
+ if (RD->hasAttr<WeakAttr>())
+ return llvm::GlobalValue::WeakODRLinkage;
+ if (RD->isDynamicClass()) {
+ llvm::GlobalValue::LinkageTypes LT = CGM.getVTableLinkage(RD);
+ // MinGW won't export the RTTI information when there is a key function.
+ // Make sure we emit our own copy instead of attempting to dllimport it.
+ if (RD->hasAttr<DLLImportAttr>() &&
+ llvm::GlobalValue::isAvailableExternallyLinkage(LT))
+ LT = llvm::GlobalValue::LinkOnceODRLinkage;
+ return LT;
+ }
+ }
+
+ return llvm::GlobalValue::LinkOnceODRLinkage;
+ }
+
+ llvm_unreachable("Invalid linkage!");
+}
+
+llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
+ // We want to operate on the canonical type.
+ Ty = CGM.getContext().getCanonicalType(Ty);
+
+ // Check if we've already emitted an RTTI descriptor for this type.
+ SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
+
+ llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
+ if (OldGV && !OldGV->isDeclaration()) {
+ assert(!OldGV->hasAvailableExternallyLinkage() &&
+ "available_externally typeinfos not yet implemented");
+
+ return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
+ }
+
+ // Check if there is already an external RTTI descriptor for this type.
+ bool IsStdLib = IsStandardLibraryRTTIDescriptor(Ty);
+ if (!Force && (IsStdLib || ShouldUseExternalRTTIDescriptor(CGM, Ty)))
+ return GetAddrOfExternalRTTIDescriptor(Ty);
+
+ // Emit the standard library with external linkage.
+ llvm::GlobalVariable::LinkageTypes Linkage;
+ if (IsStdLib)
+ Linkage = llvm::GlobalValue::ExternalLinkage;
+ else
+ Linkage = getTypeInfoLinkage(CGM, Ty);
+
+ // Add the vtable pointer.
+ BuildVTablePointer(cast<Type>(Ty));
+
+ // And the name.
+ llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
+ llvm::Constant *TypeNameField;
+
+ // If we're supposed to demote the visibility, be sure to set a flag
+ // to use a string comparison for type_info comparisons.
+ ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
+ CXXABI.classifyRTTIUniqueness(Ty, Linkage);
+ if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
+ // The flag is the sign bit, which on ARM64 is defined to be clear
+ // for global pointers. This is very ARM64-specific.
+ TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
+ llvm::Constant *flag =
+ llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
+ TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
+ TypeNameField =
+ llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy);
+ } else {
+ TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy);
+ }
+ Fields.push_back(TypeNameField);
+
+ switch (Ty->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("Non-canonical and dependent types shouldn't get here");
+
+ // GCC treats vector types as fundamental types.
+ case Type::Builtin:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::Complex:
+ case Type::BlockPointer:
+ // Itanium C++ ABI 2.9.5p4:
+ // abi::__fundamental_type_info adds no data members to std::type_info.
+ break;
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ llvm_unreachable("References shouldn't get here");
+
+ case Type::Auto:
+ llvm_unreachable("Undeduced auto type shouldn't get here");
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ // Itanium C++ ABI 2.9.5p5:
+ // abi::__array_type_info adds no data members to std::type_info.
+ break;
+
+ case Type::FunctionNoProto:
+ case Type::FunctionProto:
+ // Itanium C++ ABI 2.9.5p5:
+ // abi::__function_type_info adds no data members to std::type_info.
+ break;
+
+ case Type::Enum:
+ // Itanium C++ ABI 2.9.5p5:
+ // abi::__enum_type_info adds no data members to std::type_info.
+ break;
+
+ case Type::Record: {
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
+ if (!RD->hasDefinition() || !RD->getNumBases()) {
+ // We don't need to emit any fields.
+ break;
+ }
+
+ if (CanUseSingleInheritance(RD))
+ BuildSIClassTypeInfo(RD);
+ else
+ BuildVMIClassTypeInfo(RD);
+
+ break;
+ }
+
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
+ break;
+
+ case Type::ObjCObjectPointer:
+ BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
+ break;
+
+ case Type::Pointer:
+ BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
+ break;
+
+ case Type::MemberPointer:
+ BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
+ break;
+
+ case Type::Atomic:
+ // No fields, at least for the moment.
+ break;
+ }
+
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
+
+ llvm::Module &M = CGM.getModule();
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(M, Init->getType(),
+ /*Constant=*/true, Linkage, Init, Name);
+
+ // If there's already an old global variable, replace it with the new one.
+ if (OldGV) {
+ GV->takeName(OldGV);
+ llvm::Constant *NewPtr =
+ llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
+ OldGV->replaceAllUsesWith(NewPtr);
+ OldGV->eraseFromParent();
+ }
+
+ if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
+ GV->setComdat(M.getOrInsertComdat(GV->getName()));
+
+ // The Itanium ABI specifies that type_info objects must be globally
+ // unique, with one exception: if the type is an incomplete class
+ // type or a (possibly indirect) pointer to one. That exception
+ // affects the general case of comparing type_info objects produced
+ // by the typeid operator, which is why the comparison operators on
+ // std::type_info generally use the type_info name pointers instead
+ // of the object addresses. However, the language's built-in uses
+ // of RTTI generally require class types to be complete, even when
+ // manipulating pointers to those class types. This allows the
+ // implementation of dynamic_cast to rely on address equality tests,
+ // which is much faster.
+
+ // All of this is to say that it's important that both the type_info
+ // object and the type_info name be uniqued when weakly emitted.
+
+ // Give the type_info object and name the formal visibility of the
+ // type itself.
+ llvm::GlobalValue::VisibilityTypes llvmVisibility;
+ if (llvm::GlobalValue::isLocalLinkage(Linkage))
+ // If the linkage is local, only default visibility makes sense.
+ llvmVisibility = llvm::GlobalValue::DefaultVisibility;
+ else if (RTTIUniqueness == ItaniumCXXABI::RUK_NonUniqueHidden)
+ llvmVisibility = llvm::GlobalValue::HiddenVisibility;
+ else
+ llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
+ TypeName->setVisibility(llvmVisibility);
+ GV->setVisibility(llvmVisibility);
+
+ return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
+}
+
+/// ComputeQualifierFlags - Compute the pointer type info flags from the
+/// given qualifier.
+static unsigned ComputeQualifierFlags(Qualifiers Quals) {
+ unsigned Flags = 0;
+
+ if (Quals.hasConst())
+ Flags |= ItaniumRTTIBuilder::PTI_Const;
+ if (Quals.hasVolatile())
+ Flags |= ItaniumRTTIBuilder::PTI_Volatile;
+ if (Quals.hasRestrict())
+ Flags |= ItaniumRTTIBuilder::PTI_Restrict;
+
+ return Flags;
+}
+
+/// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
+/// for the given Objective-C object type.
+void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
+ // Drop qualifiers.
+ const Type *T = OT->getBaseType().getTypePtr();
+ assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
+
+ // The builtin types are abi::__class_type_infos and don't require
+ // extra fields.
+ if (isa<BuiltinType>(T)) return;
+
+ ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
+ ObjCInterfaceDecl *Super = Class->getSuperClass();
+
+ // Root classes are also __class_type_info.
+ if (!Super) return;
+
+ QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
+
+ // Everything else is single inheritance.
+ llvm::Constant *BaseTypeInfo =
+ ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
+ Fields.push_back(BaseTypeInfo);
+}
+
+/// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
+/// inheritance, according to the Itanium C++ ABI, 2.95p6b.
+void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
+ // Itanium C++ ABI 2.9.5p6b:
+ // It adds to abi::__class_type_info a single member pointing to the
+ // type_info structure for the base type,
+ llvm::Constant *BaseTypeInfo =
+ ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
+ Fields.push_back(BaseTypeInfo);
+}
+
+namespace {
+ /// SeenBases - Contains virtual and non-virtual bases seen when traversing
+ /// a class hierarchy.
+ struct SeenBases {
+ llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
+ llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
+ };
+}
+
+/// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
+/// abi::__vmi_class_type_info.
+///
+static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
+ SeenBases &Bases) {
+
+ unsigned Flags = 0;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+
+ if (Base->isVirtual()) {
+ // Mark the virtual base as seen.
+ if (!Bases.VirtualBases.insert(BaseDecl).second) {
+ // If this virtual base has been seen before, then the class is diamond
+ // shaped.
+ Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
+ } else {
+ if (Bases.NonVirtualBases.count(BaseDecl))
+ Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
+ }
+ } else {
+ // Mark the non-virtual base as seen.
+ if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
+ // If this non-virtual base has been seen before, then the class has non-
+ // diamond shaped repeated inheritance.
+ Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
+ } else {
+ if (Bases.VirtualBases.count(BaseDecl))
+ Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
+ }
+ }
+
+ // Walk all bases.
+ for (const auto &I : BaseDecl->bases())
+ Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
+
+ return Flags;
+}
+
+static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
+ unsigned Flags = 0;
+ SeenBases Bases;
+
+ // Walk all bases.
+ for (const auto &I : RD->bases())
+ Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
+
+ return Flags;
+}
+
+/// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
+/// classes with bases that do not satisfy the abi::__si_class_type_info
+/// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
+void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
+ llvm::Type *UnsignedIntLTy =
+ CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
+
+ // Itanium C++ ABI 2.9.5p6c:
+ // __flags is a word with flags describing details about the class
+ // structure, which may be referenced by using the __flags_masks
+ // enumeration. These flags refer to both direct and indirect bases.
+ unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
+ Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
+
+ // Itanium C++ ABI 2.9.5p6c:
+ // __base_count is a word with the number of direct proper base class
+ // descriptions that follow.
+ Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
+
+ if (!RD->getNumBases())
+ return;
+
+ llvm::Type *LongLTy =
+ CGM.getTypes().ConvertType(CGM.getContext().LongTy);
+
+ // Now add the base class descriptions.
+
+ // Itanium C++ ABI 2.9.5p6c:
+ // __base_info[] is an array of base class descriptions -- one for every
+ // direct proper base. Each description is of the type:
+ //
+ // struct abi::__base_class_type_info {
+ // public:
+ // const __class_type_info *__base_type;
+ // long __offset_flags;
+ //
+ // enum __offset_flags_masks {
+ // __virtual_mask = 0x1,
+ // __public_mask = 0x2,
+ // __offset_shift = 8
+ // };
+ // };
+ for (const auto &Base : RD->bases()) {
+ // The __base_type member points to the RTTI for the base type.
+ Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
+
+ int64_t OffsetFlags = 0;
+
+ // All but the lower 8 bits of __offset_flags are a signed offset.
+ // For a non-virtual base, this is the offset in the object of the base
+ // subobject. For a virtual base, this is the offset in the virtual table of
+ // the virtual base offset for the virtual base referenced (negative).
+ CharUnits Offset;
+ if (Base.isVirtual())
+ Offset =
+ CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
+ else {
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+ Offset = Layout.getBaseClassOffset(BaseDecl);
+ };
+
+ OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
+
+ // The low-order byte of __offset_flags contains flags, as given by the
+ // masks from the enumeration __offset_flags_masks.
+ if (Base.isVirtual())
+ OffsetFlags |= BCTI_Virtual;
+ if (Base.getAccessSpecifier() == AS_public)
+ OffsetFlags |= BCTI_Public;
+
+ Fields.push_back(llvm::ConstantInt::get(LongLTy, OffsetFlags));
+ }
+}
+
+/// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
+/// used for pointer types.
+void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
+ Qualifiers Quals;
+ QualType UnqualifiedPointeeTy =
+ CGM.getContext().getUnqualifiedArrayType(PointeeTy, Quals);
+
+ // Itanium C++ ABI 2.9.5p7:
+ // __flags is a flag word describing the cv-qualification and other
+ // attributes of the type pointed to
+ unsigned Flags = ComputeQualifierFlags(Quals);
+
+ // Itanium C++ ABI 2.9.5p7:
+ // When the abi::__pbase_type_info is for a direct or indirect pointer to an
+ // incomplete class type, the incomplete target type flag is set.
+ if (ContainsIncompleteClassType(UnqualifiedPointeeTy))
+ Flags |= PTI_Incomplete;
+
+ llvm::Type *UnsignedIntLTy =
+ CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
+ Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
+
+ // Itanium C++ ABI 2.9.5p7:
+ // __pointee is a pointer to the std::type_info derivation for the
+ // unqualified type being pointed to.
+ llvm::Constant *PointeeTypeInfo =
+ ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(UnqualifiedPointeeTy);
+ Fields.push_back(PointeeTypeInfo);
+}
+
+/// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
+/// struct, used for member pointer types.
+void
+ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
+ QualType PointeeTy = Ty->getPointeeType();
+
+ Qualifiers Quals;
+ QualType UnqualifiedPointeeTy =
+ CGM.getContext().getUnqualifiedArrayType(PointeeTy, Quals);
+
+ // Itanium C++ ABI 2.9.5p7:
+ // __flags is a flag word describing the cv-qualification and other
+ // attributes of the type pointed to.
+ unsigned Flags = ComputeQualifierFlags(Quals);
+
+ const RecordType *ClassType = cast<RecordType>(Ty->getClass());
+
+ // Itanium C++ ABI 2.9.5p7:
+ // When the abi::__pbase_type_info is for a direct or indirect pointer to an
+ // incomplete class type, the incomplete target type flag is set.
+ if (ContainsIncompleteClassType(UnqualifiedPointeeTy))
+ Flags |= PTI_Incomplete;
+
+ if (IsIncompleteClassType(ClassType))
+ Flags |= PTI_ContainingClassIncomplete;
+
+ llvm::Type *UnsignedIntLTy =
+ CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
+ Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
+
+ // Itanium C++ ABI 2.9.5p7:
+ // __pointee is a pointer to the std::type_info derivation for the
+ // unqualified type being pointed to.
+ llvm::Constant *PointeeTypeInfo =
+ ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(UnqualifiedPointeeTy);
+ Fields.push_back(PointeeTypeInfo);
+
+ // Itanium C++ ABI 2.9.5p9:
+ // __context is a pointer to an abi::__class_type_info corresponding to the
+ // class type containing the member pointed to
+ // (e.g., the "A" in "int A::*").
+ Fields.push_back(
+ ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
+}
+
+llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
+ return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
+}
+
+void ItaniumCXXABI::EmitFundamentalRTTIDescriptor(QualType Type) {
+ QualType PointerType = getContext().getPointerType(Type);
+ QualType PointerTypeConst = getContext().getPointerType(Type.withConst());
+ ItaniumRTTIBuilder(*this).BuildTypeInfo(Type, true);
+ ItaniumRTTIBuilder(*this).BuildTypeInfo(PointerType, true);
+ ItaniumRTTIBuilder(*this).BuildTypeInfo(PointerTypeConst, true);
+}
+
+void ItaniumCXXABI::EmitFundamentalRTTIDescriptors() {
+ QualType FundamentalTypes[] = {
+ getContext().VoidTy, getContext().NullPtrTy,
+ getContext().BoolTy, getContext().WCharTy,
+ getContext().CharTy, getContext().UnsignedCharTy,
+ getContext().SignedCharTy, getContext().ShortTy,
+ getContext().UnsignedShortTy, getContext().IntTy,
+ getContext().UnsignedIntTy, getContext().LongTy,
+ getContext().UnsignedLongTy, getContext().LongLongTy,
+ getContext().UnsignedLongLongTy, getContext().HalfTy,
+ getContext().FloatTy, getContext().DoubleTy,
+ getContext().LongDoubleTy, getContext().Char16Ty,
+ getContext().Char32Ty,
+ };
+ for (const QualType &FundamentalType : FundamentalTypes)
+ EmitFundamentalRTTIDescriptor(FundamentalType);
+}
+
+/// What sort of uniqueness rules should we use for the RTTI for the
+/// given type?
+ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
+ QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
+ if (shouldRTTIBeUnique())
+ return RUK_Unique;
+
+ // It's only necessary for linkonce_odr or weak_odr linkage.
+ if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
+ Linkage != llvm::GlobalValue::WeakODRLinkage)
+ return RUK_Unique;
+
+ // It's only necessary with default visibility.
+ if (CanTy->getVisibility() != DefaultVisibility)
+ return RUK_Unique;
+
+ // If we're not required to publish this symbol, hide it.
+ if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
+ return RUK_NonUniqueHidden;
+
+ // If we're required to publish this symbol, as we might be under an
+ // explicit instantiation, leave it with default visibility but
+ // enable string-comparisons.
+ assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
+ return RUK_NonUniqueVisible;
+}
+
+// Find out how to codegen the complete destructor and constructor
+namespace {
+enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
+}
+static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
+ const CXXMethodDecl *MD) {
+ if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
+ return StructorCodegen::Emit;
+
+ // The complete and base structors are not equivalent if there are any virtual
+ // bases, so emit separate functions.
+ if (MD->getParent()->getNumVBases())
+ return StructorCodegen::Emit;
+
+ GlobalDecl AliasDecl;
+ if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ AliasDecl = GlobalDecl(DD, Dtor_Complete);
+ } else {
+ const auto *CD = cast<CXXConstructorDecl>(MD);
+ AliasDecl = GlobalDecl(CD, Ctor_Complete);
+ }
+ llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
+
+ if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
+ return StructorCodegen::RAUW;
+
+ // FIXME: Should we allow available_externally aliases?
+ if (!llvm::GlobalAlias::isValidLinkage(Linkage))
+ return StructorCodegen::RAUW;
+
+ if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
+ // Only ELF supports COMDATs with arbitrary names (C5/D5).
+ if (CGM.getTarget().getTriple().isOSBinFormatELF())
+ return StructorCodegen::COMDAT;
+ return StructorCodegen::Emit;
+ }
+
+ return StructorCodegen::Alias;
+}
+
+static void emitConstructorDestructorAlias(CodeGenModule &CGM,
+ GlobalDecl AliasDecl,
+ GlobalDecl TargetDecl) {
+ llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
+
+ StringRef MangledName = CGM.getMangledName(AliasDecl);
+ llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
+ if (Entry && !Entry->isDeclaration())
+ return;
+
+ auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
+
+ // Create the alias with no name.
+ auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
+
+ // Switch any previous uses to the alias.
+ if (Entry) {
+ assert(Entry->getType() == Aliasee->getType() &&
+ "declaration exists with different type");
+ Alias->takeName(Entry);
+ Entry->replaceAllUsesWith(Alias);
+ Entry->eraseFromParent();
+ } else {
+ Alias->setName(MangledName);
+ }
+
+ // Finally, set up the alias with its proper name and attributes.
+ CGM.setAliasAttributes(cast<NamedDecl>(AliasDecl.getDecl()), Alias);
+}
+
+void ItaniumCXXABI::emitCXXStructor(const CXXMethodDecl *MD,
+ StructorType Type) {
+ auto *CD = dyn_cast<CXXConstructorDecl>(MD);
+ const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
+
+ StructorCodegen CGType = getCodegenToUse(CGM, MD);
+
+ if (Type == StructorType::Complete) {
+ GlobalDecl CompleteDecl;
+ GlobalDecl BaseDecl;
+ if (CD) {
+ CompleteDecl = GlobalDecl(CD, Ctor_Complete);
+ BaseDecl = GlobalDecl(CD, Ctor_Base);
+ } else {
+ CompleteDecl = GlobalDecl(DD, Dtor_Complete);
+ BaseDecl = GlobalDecl(DD, Dtor_Base);
+ }
+
+ if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
+ emitConstructorDestructorAlias(CGM, CompleteDecl, BaseDecl);
+ return;
+ }
+
+ if (CGType == StructorCodegen::RAUW) {
+ StringRef MangledName = CGM.getMangledName(CompleteDecl);
+ auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
+ CGM.addReplacement(MangledName, Aliasee);
+ return;
+ }
+ }
+
+ // The base destructor is equivalent to the base destructor of its
+ // base class if there is exactly one non-virtual base class with a
+ // non-trivial destructor, there are no fields with a non-trivial
+ // destructor, and the body of the destructor is trivial.
+ if (DD && Type == StructorType::Base && CGType != StructorCodegen::COMDAT &&
+ !CGM.TryEmitBaseDestructorAsAlias(DD))
+ return;
+
+ llvm::Function *Fn = CGM.codegenCXXStructor(MD, Type);
+
+ if (CGType == StructorCodegen::COMDAT) {
+ SmallString<256> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ if (DD)
+ getMangleContext().mangleCXXDtorComdat(DD, Out);
+ else
+ getMangleContext().mangleCXXCtorComdat(CD, Out);
+ llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
+ Fn->setComdat(C);
+ } else {
+ CGM.maybeSetTrivialComdat(*MD, *Fn);
+ }
+}
+
+static llvm::Constant *getBeginCatchFn(CodeGenModule &CGM) {
+ // void *__cxa_begin_catch(void*);
+ llvm::FunctionType *FTy = llvm::FunctionType::get(
+ CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
+
+ return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
+}
+
+static llvm::Constant *getEndCatchFn(CodeGenModule &CGM) {
+ // void __cxa_end_catch();
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false);
+
+ return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
+}
+
+static llvm::Constant *getGetExceptionPtrFn(CodeGenModule &CGM) {
+ // void *__cxa_get_exception_ptr(void*);
+ llvm::FunctionType *FTy = llvm::FunctionType::get(
+ CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
+
+ return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
+}
+
+namespace {
+ /// A cleanup to call __cxa_end_catch. In many cases, the caught
+ /// exception type lets us state definitively that the thrown exception
+ /// type does not have a destructor. In particular:
+ /// - Catch-alls tell us nothing, so we have to conservatively
+ /// assume that the thrown exception might have a destructor.
+ /// - Catches by reference behave according to their base types.
+ /// - Catches of non-record types will only trigger for exceptions
+ /// of non-record types, which never have destructors.
+ /// - Catches of record types can trigger for arbitrary subclasses
+ /// of the caught type, so we have to assume the actual thrown
+ /// exception type might have a throwing destructor, even if the
+ /// caught type's destructor is trivial or nothrow.
+ struct CallEndCatch final : EHScopeStack::Cleanup {
+ CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
+ bool MightThrow;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ if (!MightThrow) {
+ CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM));
+ return;
+ }
+
+ CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM));
+ }
+ };
+}
+
+/// Emits a call to __cxa_begin_catch and enters a cleanup to call
+/// __cxa_end_catch.
+///
+/// \param EndMightThrow - true if __cxa_end_catch might throw
+static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
+ llvm::Value *Exn,
+ bool EndMightThrow) {
+ llvm::CallInst *call =
+ CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn);
+
+ CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
+
+ return call;
+}
+
+/// A "special initializer" callback for initializing a catch
+/// parameter during catch initialization.
+static void InitCatchParam(CodeGenFunction &CGF,
+ const VarDecl &CatchParam,
+ Address ParamAddr,
+ SourceLocation Loc) {
+ // Load the exception from where the landing pad saved it.
+ llvm::Value *Exn = CGF.getExceptionFromSlot();
+
+ CanQualType CatchType =
+ CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
+ llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
+
+ // If we're catching by reference, we can just cast the object
+ // pointer to the appropriate pointer.
+ if (isa<ReferenceType>(CatchType)) {
+ QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
+ bool EndCatchMightThrow = CaughtType->isRecordType();
+
+ // __cxa_begin_catch returns the adjusted object pointer.
+ llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
+
+ // We have no way to tell the personality function that we're
+ // catching by reference, so if we're catching a pointer,
+ // __cxa_begin_catch will actually return that pointer by value.
+ if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
+ QualType PointeeType = PT->getPointeeType();
+
+ // When catching by reference, generally we should just ignore
+ // this by-value pointer and use the exception object instead.
+ if (!PointeeType->isRecordType()) {
+
+ // Exn points to the struct _Unwind_Exception header, which
+ // we have to skip past in order to reach the exception data.
+ unsigned HeaderSize =
+ CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
+ AdjustedExn = CGF.Builder.CreateConstGEP1_32(Exn, HeaderSize);
+
+ // However, if we're catching a pointer-to-record type that won't
+ // work, because the personality function might have adjusted
+ // the pointer. There's actually no way for us to fully satisfy
+ // the language/ABI contract here: we can't use Exn because it
+ // might have the wrong adjustment, but we can't use the by-value
+ // pointer because it's off by a level of abstraction.
+ //
+ // The current solution is to dump the adjusted pointer into an
+ // alloca, which breaks language semantics (because changing the
+ // pointer doesn't change the exception) but at least works.
+ // The better solution would be to filter out non-exact matches
+ // and rethrow them, but this is tricky because the rethrow
+ // really needs to be catchable by other sites at this landing
+ // pad. The best solution is to fix the personality function.
+ } else {
+ // Pull the pointer for the reference type off.
+ llvm::Type *PtrTy =
+ cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
+
+ // Create the temporary and write the adjusted pointer into it.
+ Address ExnPtrTmp =
+ CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
+ llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
+ CGF.Builder.CreateStore(Casted, ExnPtrTmp);
+
+ // Bind the reference to the temporary.
+ AdjustedExn = ExnPtrTmp.getPointer();
+ }
+ }
+
+ llvm::Value *ExnCast =
+ CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
+ CGF.Builder.CreateStore(ExnCast, ParamAddr);
+ return;
+ }
+
+ // Scalars and complexes.
+ TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
+ if (TEK != TEK_Aggregate) {
+ llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
+
+ // If the catch type is a pointer type, __cxa_begin_catch returns
+ // the pointer by value.
+ if (CatchType->hasPointerRepresentation()) {
+ llvm::Value *CastExn =
+ CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
+
+ switch (CatchType.getQualifiers().getObjCLifetime()) {
+ case Qualifiers::OCL_Strong:
+ CastExn = CGF.EmitARCRetainNonBlock(CastExn);
+ // fallthrough
+
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ CGF.Builder.CreateStore(CastExn, ParamAddr);
+ return;
+
+ case Qualifiers::OCL_Weak:
+ CGF.EmitARCInitWeak(ParamAddr, CastExn);
+ return;
+ }
+ llvm_unreachable("bad ownership qualifier!");
+ }
+
+ // Otherwise, it returns a pointer into the exception object.
+
+ llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
+ llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
+
+ LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
+ LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
+ switch (TEK) {
+ case TEK_Complex:
+ CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
+ /*init*/ true);
+ return;
+ case TEK_Scalar: {
+ llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
+ CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
+ return;
+ }
+ case TEK_Aggregate:
+ llvm_unreachable("evaluation kind filtered out!");
+ }
+ llvm_unreachable("bad evaluation kind");
+ }
+
+ assert(isa<RecordType>(CatchType) && "unexpected catch type!");
+ auto catchRD = CatchType->getAsCXXRecordDecl();
+ CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
+
+ llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
+
+ // Check for a copy expression. If we don't have a copy expression,
+ // that means a trivial copy is okay.
+ const Expr *copyExpr = CatchParam.getInit();
+ if (!copyExpr) {
+ llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
+ Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
+ caughtExnAlignment);
+ CGF.EmitAggregateCopy(ParamAddr, adjustedExn, CatchType);
+ return;
+ }
+
+ // We have to call __cxa_get_exception_ptr to get the adjusted
+ // pointer before copying.
+ llvm::CallInst *rawAdjustedExn =
+ CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn);
+
+ // Cast that to the appropriate type.
+ Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
+ caughtExnAlignment);
+
+ // The copy expression is defined in terms of an OpaqueValueExpr.
+ // Find it and map it to the adjusted expression.
+ CodeGenFunction::OpaqueValueMapping
+ opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
+ CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
+
+ // Call the copy ctor in a terminate scope.
+ CGF.EHStack.pushTerminate();
+
+ // Perform the copy construction.
+ CGF.EmitAggExpr(copyExpr,
+ AggValueSlot::forAddr(ParamAddr, Qualifiers(),
+ AggValueSlot::IsNotDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
+
+ // Leave the terminate scope.
+ CGF.EHStack.popTerminate();
+
+ // Undo the opaque value mapping.
+ opaque.pop();
+
+ // Finally we can call __cxa_begin_catch.
+ CallBeginCatch(CGF, Exn, true);
+}
+
+/// Begins a catch statement by initializing the catch variable and
+/// calling __cxa_begin_catch.
+void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
+ const CXXCatchStmt *S) {
+ // We have to be very careful with the ordering of cleanups here:
+ // C++ [except.throw]p4:
+ // The destruction [of the exception temporary] occurs
+ // immediately after the destruction of the object declared in
+ // the exception-declaration in the handler.
+ //
+ // So the precise ordering is:
+ // 1. Construct catch variable.
+ // 2. __cxa_begin_catch
+ // 3. Enter __cxa_end_catch cleanup
+ // 4. Enter dtor cleanup
+ //
+ // We do this by using a slightly abnormal initialization process.
+ // Delegation sequence:
+ // - ExitCXXTryStmt opens a RunCleanupsScope
+ // - EmitAutoVarAlloca creates the variable and debug info
+ // - InitCatchParam initializes the variable from the exception
+ // - CallBeginCatch calls __cxa_begin_catch
+ // - CallBeginCatch enters the __cxa_end_catch cleanup
+ // - EmitAutoVarCleanups enters the variable destructor cleanup
+ // - EmitCXXTryStmt emits the code for the catch body
+ // - EmitCXXTryStmt close the RunCleanupsScope
+
+ VarDecl *CatchParam = S->getExceptionDecl();
+ if (!CatchParam) {
+ llvm::Value *Exn = CGF.getExceptionFromSlot();
+ CallBeginCatch(CGF, Exn, true);
+ return;
+ }
+
+ // Emit the local.
+ CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
+ InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getLocStart());
+ CGF.EmitAutoVarCleanups(var);
+}
+
+/// Get or define the following function:
+/// void @__clang_call_terminate(i8* %exn) nounwind noreturn
+/// This code is used only in C++.
+static llvm::Constant *getClangCallTerminateFn(CodeGenModule &CGM) {
+ llvm::FunctionType *fnTy =
+ llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
+ llvm::Constant *fnRef =
+ CGM.CreateRuntimeFunction(fnTy, "__clang_call_terminate");
+
+ llvm::Function *fn = dyn_cast<llvm::Function>(fnRef);
+ if (fn && fn->empty()) {
+ fn->setDoesNotThrow();
+ fn->setDoesNotReturn();
+
+ // What we really want is to massively penalize inlining without
+ // forbidding it completely. The difference between that and
+ // 'noinline' is negligible.
+ fn->addFnAttr(llvm::Attribute::NoInline);
+
+ // Allow this function to be shared across translation units, but
+ // we don't want it to turn into an exported symbol.
+ fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
+ fn->setVisibility(llvm::Function::HiddenVisibility);
+ if (CGM.supportsCOMDAT())
+ fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
+
+ // Set up the function.
+ llvm::BasicBlock *entry =
+ llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
+ CGBuilderTy builder(CGM, entry);
+
+ // Pull the exception pointer out of the parameter list.
+ llvm::Value *exn = &*fn->arg_begin();
+
+ // Call __cxa_begin_catch(exn).
+ llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
+ catchCall->setDoesNotThrow();
+ catchCall->setCallingConv(CGM.getRuntimeCC());
+
+ // Call std::terminate().
+ llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
+ termCall->setDoesNotThrow();
+ termCall->setDoesNotReturn();
+ termCall->setCallingConv(CGM.getRuntimeCC());
+
+ // std::terminate cannot return.
+ builder.CreateUnreachable();
+ }
+
+ return fnRef;
+}
+
+llvm::CallInst *
+ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
+ llvm::Value *Exn) {
+ // In C++, we want to call __cxa_begin_catch() before terminating.
+ if (Exn) {
+ assert(CGF.CGM.getLangOpts().CPlusPlus);
+ return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn);
+ }
+ return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp
new file mode 100644
index 0000000..93210d5
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -0,0 +1,4175 @@
+//===--- MicrosoftCXXABI.cpp - Emit LLVM Code from ASTs for a Module ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides C++ code generation targeting the Microsoft Visual C++ ABI.
+// The class in this file generates structures that follow the Microsoft
+// Visual C++ ABI, which is actually not very well documented at all outside
+// of Microsoft.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCXXABI.h"
+#include "CGCleanup.h"
+#include "CGVTables.h"
+#include "CodeGenModule.h"
+#include "CodeGenTypes.h"
+#include "TargetInfo.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/VTableBuilder.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Intrinsics.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+
+/// Holds all the vbtable globals for a given class.
+struct VBTableGlobals {
+ const VPtrInfoVector *VBTables;
+ SmallVector<llvm::GlobalVariable *, 2> Globals;
+};
+
+class MicrosoftCXXABI : public CGCXXABI {
+public:
+ MicrosoftCXXABI(CodeGenModule &CGM)
+ : CGCXXABI(CGM), BaseClassDescriptorType(nullptr),
+ ClassHierarchyDescriptorType(nullptr),
+ CompleteObjectLocatorType(nullptr), CatchableTypeType(nullptr),
+ ThrowInfoType(nullptr) {}
+
+ bool HasThisReturn(GlobalDecl GD) const override;
+ bool hasMostDerivedReturn(GlobalDecl GD) const override;
+
+ bool classifyReturnType(CGFunctionInfo &FI) const override;
+
+ RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override;
+
+ bool isSRetParameterAfterThis() const override { return true; }
+
+ bool isThisCompleteObject(GlobalDecl GD) const override {
+ // The Microsoft ABI doesn't use separate complete-object vs.
+ // base-object variants of constructors, but it does of destructors.
+ if (isa<CXXDestructorDecl>(GD.getDecl())) {
+ switch (GD.getDtorType()) {
+ case Dtor_Complete:
+ case Dtor_Deleting:
+ return true;
+
+ case Dtor_Base:
+ return false;
+
+ case Dtor_Comdat: llvm_unreachable("emitting dtor comdat as function?");
+ }
+ llvm_unreachable("bad dtor kind");
+ }
+
+ // No other kinds.
+ return false;
+ }
+
+ size_t getSrcArgforCopyCtor(const CXXConstructorDecl *CD,
+ FunctionArgList &Args) const override {
+ assert(Args.size() >= 2 &&
+ "expected the arglist to have at least two args!");
+ // The 'most_derived' parameter goes second if the ctor is variadic and
+ // has v-bases.
+ if (CD->getParent()->getNumVBases() > 0 &&
+ CD->getType()->castAs<FunctionProtoType>()->isVariadic())
+ return 2;
+ return 1;
+ }
+
+ std::vector<CharUnits> getVBPtrOffsets(const CXXRecordDecl *RD) override {
+ std::vector<CharUnits> VBPtrOffsets;
+ const ASTContext &Context = getContext();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ const VBTableGlobals &VBGlobals = enumerateVBTables(RD);
+ for (const VPtrInfo *VBT : *VBGlobals.VBTables) {
+ const ASTRecordLayout &SubobjectLayout =
+ Context.getASTRecordLayout(VBT->BaseWithVPtr);
+ CharUnits Offs = VBT->NonVirtualOffset;
+ Offs += SubobjectLayout.getVBPtrOffset();
+ if (VBT->getVBaseWithVPtr())
+ Offs += Layout.getVBaseClassOffset(VBT->getVBaseWithVPtr());
+ VBPtrOffsets.push_back(Offs);
+ }
+ llvm::array_pod_sort(VBPtrOffsets.begin(), VBPtrOffsets.end());
+ return VBPtrOffsets;
+ }
+
+ StringRef GetPureVirtualCallName() override { return "_purecall"; }
+ StringRef GetDeletedVirtualCallName() override { return "_purecall"; }
+
+ void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
+ Address Ptr, QualType ElementType,
+ const CXXDestructorDecl *Dtor) override;
+
+ void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
+ void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
+
+ void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
+
+ llvm::GlobalVariable *getMSCompleteObjectLocator(const CXXRecordDecl *RD,
+ const VPtrInfo *Info);
+
+ llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
+ CatchTypeInfo
+ getAddrOfCXXCatchHandlerType(QualType Ty, QualType CatchHandlerType) override;
+
+ /// MSVC needs an extra flag to indicate a catchall.
+ CatchTypeInfo getCatchAllTypeInfo() override {
+ return CatchTypeInfo{nullptr, 0x40};
+ }
+
+ bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
+ void EmitBadTypeidCall(CodeGenFunction &CGF) override;
+ llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
+ Address ThisPtr,
+ llvm::Type *StdTypeInfoPtrTy) override;
+
+ bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
+ QualType SrcRecordTy) override;
+
+ llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
+ QualType SrcRecordTy, QualType DestTy,
+ QualType DestRecordTy,
+ llvm::BasicBlock *CastEnd) override;
+
+ llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
+ QualType SrcRecordTy,
+ QualType DestTy) override;
+
+ bool EmitBadCastCall(CodeGenFunction &CGF) override;
+ bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override {
+ return false;
+ }
+
+ llvm::Value *
+ GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
+ const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *BaseClassDecl) override;
+
+ llvm::BasicBlock *
+ EmitCtorCompleteObjectHandler(CodeGenFunction &CGF,
+ const CXXRecordDecl *RD) override;
+
+ void initializeHiddenVirtualInheritanceMembers(CodeGenFunction &CGF,
+ const CXXRecordDecl *RD) override;
+
+ void EmitCXXConstructors(const CXXConstructorDecl *D) override;
+
+ // Background on MSVC destructors
+ // ==============================
+ //
+ // Both Itanium and MSVC ABIs have destructor variants. The variant names
+ // roughly correspond in the following way:
+ // Itanium Microsoft
+ // Base -> no name, just ~Class
+ // Complete -> vbase destructor
+ // Deleting -> scalar deleting destructor
+ // vector deleting destructor
+ //
+ // The base and complete destructors are the same as in Itanium, although the
+ // complete destructor does not accept a VTT parameter when there are virtual
+ // bases. A separate mechanism involving vtordisps is used to ensure that
+ // virtual methods of destroyed subobjects are not called.
+ //
+ // The deleting destructors accept an i32 bitfield as a second parameter. Bit
+ // 1 indicates if the memory should be deleted. Bit 2 indicates if the this
+ // pointer points to an array. The scalar deleting destructor assumes that
+ // bit 2 is zero, and therefore does not contain a loop.
+ //
+ // For virtual destructors, only one entry is reserved in the vftable, and it
+ // always points to the vector deleting destructor. The vector deleting
+ // destructor is the most general, so it can be used to destroy objects in
+ // place, delete single heap objects, or delete arrays.
+ //
+ // A TU defining a non-inline destructor is only guaranteed to emit a base
+ // destructor, and all of the other variants are emitted on an as-needed basis
+ // in COMDATs. Because a non-base destructor can be emitted in a TU that
+ // lacks a definition for the destructor, non-base destructors must always
+ // delegate to or alias the base destructor.
+
+ void buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
+ SmallVectorImpl<CanQualType> &ArgTys) override;
+
+ /// Non-base dtors should be emitted as delegating thunks in this ABI.
+ bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
+ CXXDtorType DT) const override {
+ return DT != Dtor_Base;
+ }
+
+ void EmitCXXDestructors(const CXXDestructorDecl *D) override;
+
+ const CXXRecordDecl *
+ getThisArgumentTypeForMethod(const CXXMethodDecl *MD) override {
+ MD = MD->getCanonicalDecl();
+ if (MD->isVirtual() && !isa<CXXDestructorDecl>(MD)) {
+ MicrosoftVTableContext::MethodVFTableLocation ML =
+ CGM.getMicrosoftVTableContext().getMethodVFTableLocation(MD);
+ // The vbases might be ordered differently in the final overrider object
+ // and the complete object, so the "this" argument may sometimes point to
+ // memory that has no particular type (e.g. past the complete object).
+ // In this case, we just use a generic pointer type.
+ // FIXME: might want to have a more precise type in the non-virtual
+ // multiple inheritance case.
+ if (ML.VBase || !ML.VFPtrOffset.isZero())
+ return nullptr;
+ }
+ return MD->getParent();
+ }
+
+ Address
+ adjustThisArgumentForVirtualFunctionCall(CodeGenFunction &CGF, GlobalDecl GD,
+ Address This,
+ bool VirtualCall) override;
+
+ void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
+ FunctionArgList &Params) override;
+
+ llvm::Value *adjustThisParameterInVirtualFunctionPrologue(
+ CodeGenFunction &CGF, GlobalDecl GD, llvm::Value *This) override;
+
+ void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
+
+ unsigned addImplicitConstructorArgs(CodeGenFunction &CGF,
+ const CXXConstructorDecl *D,
+ CXXCtorType Type, bool ForVirtualBase,
+ bool Delegating,
+ CallArgList &Args) override;
+
+ void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
+ CXXDtorType Type, bool ForVirtualBase,
+ bool Delegating, Address This) override;
+
+ void emitVTableBitSetEntries(VPtrInfo *Info, const CXXRecordDecl *RD,
+ llvm::GlobalVariable *VTable);
+
+ void emitVTableDefinitions(CodeGenVTables &CGVT,
+ const CXXRecordDecl *RD) override;
+
+ bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
+ CodeGenFunction::VPtr Vptr) override;
+
+ /// Don't initialize vptrs if dynamic class
+ /// is marked with with the 'novtable' attribute.
+ bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
+ return !VTableClass->hasAttr<MSNoVTableAttr>();
+ }
+
+ llvm::Constant *
+ getVTableAddressPoint(BaseSubobject Base,
+ const CXXRecordDecl *VTableClass) override;
+
+ llvm::Value *getVTableAddressPointInStructor(
+ CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
+ BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
+
+ llvm::Constant *
+ getVTableAddressPointForConstExpr(BaseSubobject Base,
+ const CXXRecordDecl *VTableClass) override;
+
+ llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
+ CharUnits VPtrOffset) override;
+
+ llvm::Value *getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
+ Address This, llvm::Type *Ty,
+ SourceLocation Loc) override;
+
+ llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
+ const CXXDestructorDecl *Dtor,
+ CXXDtorType DtorType,
+ Address This,
+ const CXXMemberCallExpr *CE) override;
+
+ void adjustCallArgsForDestructorThunk(CodeGenFunction &CGF, GlobalDecl GD,
+ CallArgList &CallArgs) override {
+ assert(GD.getDtorType() == Dtor_Deleting &&
+ "Only deleting destructor thunks are available in this ABI");
+ CallArgs.add(RValue::get(getStructorImplicitParamValue(CGF)),
+ getContext().IntTy);
+ }
+
+ void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
+
+ llvm::GlobalVariable *
+ getAddrOfVBTable(const VPtrInfo &VBT, const CXXRecordDecl *RD,
+ llvm::GlobalVariable::LinkageTypes Linkage);
+
+ llvm::GlobalVariable *
+ getAddrOfVirtualDisplacementMap(const CXXRecordDecl *SrcRD,
+ const CXXRecordDecl *DstRD) {
+ SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ getMangleContext().mangleCXXVirtualDisplacementMap(SrcRD, DstRD, Out);
+ StringRef MangledName = OutName.str();
+
+ if (auto *VDispMap = CGM.getModule().getNamedGlobal(MangledName))
+ return VDispMap;
+
+ MicrosoftVTableContext &VTContext = CGM.getMicrosoftVTableContext();
+ unsigned NumEntries = 1 + SrcRD->getNumVBases();
+ SmallVector<llvm::Constant *, 4> Map(NumEntries,
+ llvm::UndefValue::get(CGM.IntTy));
+ Map[0] = llvm::ConstantInt::get(CGM.IntTy, 0);
+ bool AnyDifferent = false;
+ for (const auto &I : SrcRD->vbases()) {
+ const CXXRecordDecl *VBase = I.getType()->getAsCXXRecordDecl();
+ if (!DstRD->isVirtuallyDerivedFrom(VBase))
+ continue;
+
+ unsigned SrcVBIndex = VTContext.getVBTableIndex(SrcRD, VBase);
+ unsigned DstVBIndex = VTContext.getVBTableIndex(DstRD, VBase);
+ Map[SrcVBIndex] = llvm::ConstantInt::get(CGM.IntTy, DstVBIndex * 4);
+ AnyDifferent |= SrcVBIndex != DstVBIndex;
+ }
+ // This map would be useless, don't use it.
+ if (!AnyDifferent)
+ return nullptr;
+
+ llvm::ArrayType *VDispMapTy = llvm::ArrayType::get(CGM.IntTy, Map.size());
+ llvm::Constant *Init = llvm::ConstantArray::get(VDispMapTy, Map);
+ llvm::GlobalValue::LinkageTypes Linkage =
+ SrcRD->isExternallyVisible() && DstRD->isExternallyVisible()
+ ? llvm::GlobalValue::LinkOnceODRLinkage
+ : llvm::GlobalValue::InternalLinkage;
+ auto *VDispMap = new llvm::GlobalVariable(
+ CGM.getModule(), VDispMapTy, /*Constant=*/true, Linkage,
+ /*Initializer=*/Init, MangledName);
+ return VDispMap;
+ }
+
+ void emitVBTableDefinition(const VPtrInfo &VBT, const CXXRecordDecl *RD,
+ llvm::GlobalVariable *GV) const;
+
+ void setThunkLinkage(llvm::Function *Thunk, bool ForVTable,
+ GlobalDecl GD, bool ReturnAdjustment) override {
+ // Never dllimport/dllexport thunks.
+ Thunk->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
+
+ GVALinkage Linkage =
+ getContext().GetGVALinkageForFunction(cast<FunctionDecl>(GD.getDecl()));
+
+ if (Linkage == GVA_Internal)
+ Thunk->setLinkage(llvm::GlobalValue::InternalLinkage);
+ else if (ReturnAdjustment)
+ Thunk->setLinkage(llvm::GlobalValue::WeakODRLinkage);
+ else
+ Thunk->setLinkage(llvm::GlobalValue::LinkOnceODRLinkage);
+ }
+
+ llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
+ const ThisAdjustment &TA) override;
+
+ llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
+ const ReturnAdjustment &RA) override;
+
+ void EmitThreadLocalInitFuncs(
+ CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
+ ArrayRef<llvm::Function *> CXXThreadLocalInits,
+ ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
+
+ bool usesThreadWrapperFunction() const override { return false; }
+ LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
+ QualType LValType) override;
+
+ void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::GlobalVariable *DeclPtr,
+ bool PerformInit) override;
+ void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::Constant *Dtor, llvm::Constant *Addr) override;
+
+ // ==== Notes on array cookies =========
+ //
+ // MSVC seems to only use cookies when the class has a destructor; a
+ // two-argument usual array deallocation function isn't sufficient.
+ //
+ // For example, this code prints "100" and "1":
+ // struct A {
+ // char x;
+ // void *operator new[](size_t sz) {
+ // printf("%u\n", sz);
+ // return malloc(sz);
+ // }
+ // void operator delete[](void *p, size_t sz) {
+ // printf("%u\n", sz);
+ // free(p);
+ // }
+ // };
+ // int main() {
+ // A *p = new A[100];
+ // delete[] p;
+ // }
+ // Whereas it prints "104" and "104" if you give A a destructor.
+
+ bool requiresArrayCookie(const CXXDeleteExpr *expr,
+ QualType elementType) override;
+ bool requiresArrayCookie(const CXXNewExpr *expr) override;
+ CharUnits getArrayCookieSizeImpl(QualType type) override;
+ Address InitializeArrayCookie(CodeGenFunction &CGF,
+ Address NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType) override;
+ llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
+ Address allocPtr,
+ CharUnits cookieSize) override;
+
+ friend struct MSRTTIBuilder;
+
+ bool isImageRelative() const {
+ return CGM.getTarget().getPointerWidth(/*AddressSpace=*/0) == 64;
+ }
+
+ // 5 routines for constructing the llvm types for MS RTTI structs.
+ llvm::StructType *getTypeDescriptorType(StringRef TypeInfoString) {
+ llvm::SmallString<32> TDTypeName("rtti.TypeDescriptor");
+ TDTypeName += llvm::utostr(TypeInfoString.size());
+ llvm::StructType *&TypeDescriptorType =
+ TypeDescriptorTypeMap[TypeInfoString.size()];
+ if (TypeDescriptorType)
+ return TypeDescriptorType;
+ llvm::Type *FieldTypes[] = {
+ CGM.Int8PtrPtrTy,
+ CGM.Int8PtrTy,
+ llvm::ArrayType::get(CGM.Int8Ty, TypeInfoString.size() + 1)};
+ TypeDescriptorType =
+ llvm::StructType::create(CGM.getLLVMContext(), FieldTypes, TDTypeName);
+ return TypeDescriptorType;
+ }
+
+ llvm::Type *getImageRelativeType(llvm::Type *PtrType) {
+ if (!isImageRelative())
+ return PtrType;
+ return CGM.IntTy;
+ }
+
+ llvm::StructType *getBaseClassDescriptorType() {
+ if (BaseClassDescriptorType)
+ return BaseClassDescriptorType;
+ llvm::Type *FieldTypes[] = {
+ getImageRelativeType(CGM.Int8PtrTy),
+ CGM.IntTy,
+ CGM.IntTy,
+ CGM.IntTy,
+ CGM.IntTy,
+ CGM.IntTy,
+ getImageRelativeType(getClassHierarchyDescriptorType()->getPointerTo()),
+ };
+ BaseClassDescriptorType = llvm::StructType::create(
+ CGM.getLLVMContext(), FieldTypes, "rtti.BaseClassDescriptor");
+ return BaseClassDescriptorType;
+ }
+
+ llvm::StructType *getClassHierarchyDescriptorType() {
+ if (ClassHierarchyDescriptorType)
+ return ClassHierarchyDescriptorType;
+ // Forward-declare RTTIClassHierarchyDescriptor to break a cycle.
+ ClassHierarchyDescriptorType = llvm::StructType::create(
+ CGM.getLLVMContext(), "rtti.ClassHierarchyDescriptor");
+ llvm::Type *FieldTypes[] = {
+ CGM.IntTy,
+ CGM.IntTy,
+ CGM.IntTy,
+ getImageRelativeType(
+ getBaseClassDescriptorType()->getPointerTo()->getPointerTo()),
+ };
+ ClassHierarchyDescriptorType->setBody(FieldTypes);
+ return ClassHierarchyDescriptorType;
+ }
+
+ llvm::StructType *getCompleteObjectLocatorType() {
+ if (CompleteObjectLocatorType)
+ return CompleteObjectLocatorType;
+ CompleteObjectLocatorType = llvm::StructType::create(
+ CGM.getLLVMContext(), "rtti.CompleteObjectLocator");
+ llvm::Type *FieldTypes[] = {
+ CGM.IntTy,
+ CGM.IntTy,
+ CGM.IntTy,
+ getImageRelativeType(CGM.Int8PtrTy),
+ getImageRelativeType(getClassHierarchyDescriptorType()->getPointerTo()),
+ getImageRelativeType(CompleteObjectLocatorType),
+ };
+ llvm::ArrayRef<llvm::Type *> FieldTypesRef(FieldTypes);
+ if (!isImageRelative())
+ FieldTypesRef = FieldTypesRef.drop_back();
+ CompleteObjectLocatorType->setBody(FieldTypesRef);
+ return CompleteObjectLocatorType;
+ }
+
+ llvm::GlobalVariable *getImageBase() {
+ StringRef Name = "__ImageBase";
+ if (llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name))
+ return GV;
+
+ return new llvm::GlobalVariable(CGM.getModule(), CGM.Int8Ty,
+ /*isConstant=*/true,
+ llvm::GlobalValue::ExternalLinkage,
+ /*Initializer=*/nullptr, Name);
+ }
+
+ llvm::Constant *getImageRelativeConstant(llvm::Constant *PtrVal) {
+ if (!isImageRelative())
+ return PtrVal;
+
+ if (PtrVal->isNullValue())
+ return llvm::Constant::getNullValue(CGM.IntTy);
+
+ llvm::Constant *ImageBaseAsInt =
+ llvm::ConstantExpr::getPtrToInt(getImageBase(), CGM.IntPtrTy);
+ llvm::Constant *PtrValAsInt =
+ llvm::ConstantExpr::getPtrToInt(PtrVal, CGM.IntPtrTy);
+ llvm::Constant *Diff =
+ llvm::ConstantExpr::getSub(PtrValAsInt, ImageBaseAsInt,
+ /*HasNUW=*/true, /*HasNSW=*/true);
+ return llvm::ConstantExpr::getTrunc(Diff, CGM.IntTy);
+ }
+
+private:
+ MicrosoftMangleContext &getMangleContext() {
+ return cast<MicrosoftMangleContext>(CodeGen::CGCXXABI::getMangleContext());
+ }
+
+ llvm::Constant *getZeroInt() {
+ return llvm::ConstantInt::get(CGM.IntTy, 0);
+ }
+
+ llvm::Constant *getAllOnesInt() {
+ return llvm::Constant::getAllOnesValue(CGM.IntTy);
+ }
+
+ CharUnits getVirtualFunctionPrologueThisAdjustment(GlobalDecl GD);
+
+ void
+ GetNullMemberPointerFields(const MemberPointerType *MPT,
+ llvm::SmallVectorImpl<llvm::Constant *> &fields);
+
+ /// \brief Shared code for virtual base adjustment. Returns the offset from
+ /// the vbptr to the virtual base. Optionally returns the address of the
+ /// vbptr itself.
+ llvm::Value *GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
+ Address Base,
+ llvm::Value *VBPtrOffset,
+ llvm::Value *VBTableOffset,
+ llvm::Value **VBPtr = nullptr);
+
+ llvm::Value *GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
+ Address Base,
+ int32_t VBPtrOffset,
+ int32_t VBTableOffset,
+ llvm::Value **VBPtr = nullptr) {
+ assert(VBTableOffset % 4 == 0 && "should be byte offset into table of i32s");
+ llvm::Value *VBPOffset = llvm::ConstantInt::get(CGM.IntTy, VBPtrOffset),
+ *VBTOffset = llvm::ConstantInt::get(CGM.IntTy, VBTableOffset);
+ return GetVBaseOffsetFromVBPtr(CGF, Base, VBPOffset, VBTOffset, VBPtr);
+ }
+
+ std::pair<Address, llvm::Value *>
+ performBaseAdjustment(CodeGenFunction &CGF, Address Value,
+ QualType SrcRecordTy);
+
+ /// \brief Performs a full virtual base adjustment. Used to dereference
+ /// pointers to members of virtual bases.
+ llvm::Value *AdjustVirtualBase(CodeGenFunction &CGF, const Expr *E,
+ const CXXRecordDecl *RD, Address Base,
+ llvm::Value *VirtualBaseAdjustmentOffset,
+ llvm::Value *VBPtrOffset /* optional */);
+
+ /// \brief Emits a full member pointer with the fields common to data and
+ /// function member pointers.
+ llvm::Constant *EmitFullMemberPointer(llvm::Constant *FirstField,
+ bool IsMemberFunction,
+ const CXXRecordDecl *RD,
+ CharUnits NonVirtualBaseAdjustment,
+ unsigned VBTableIndex);
+
+ bool MemberPointerConstantIsNull(const MemberPointerType *MPT,
+ llvm::Constant *MP);
+
+ /// \brief - Initialize all vbptrs of 'this' with RD as the complete type.
+ void EmitVBPtrStores(CodeGenFunction &CGF, const CXXRecordDecl *RD);
+
+ /// \brief Caching wrapper around VBTableBuilder::enumerateVBTables().
+ const VBTableGlobals &enumerateVBTables(const CXXRecordDecl *RD);
+
+ /// \brief Generate a thunk for calling a virtual member function MD.
+ llvm::Function *EmitVirtualMemPtrThunk(
+ const CXXMethodDecl *MD,
+ const MicrosoftVTableContext::MethodVFTableLocation &ML);
+
+public:
+ llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
+
+ bool isZeroInitializable(const MemberPointerType *MPT) override;
+
+ bool isMemberPointerConvertible(const MemberPointerType *MPT) const override {
+ const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
+ return RD->hasAttr<MSInheritanceAttr>();
+ }
+
+ llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
+
+ llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
+ CharUnits offset) override;
+ llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
+ llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
+
+ llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
+ llvm::Value *L,
+ llvm::Value *R,
+ const MemberPointerType *MPT,
+ bool Inequality) override;
+
+ llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) override;
+
+ llvm::Value *
+ EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
+ Address Base, llvm::Value *MemPtr,
+ const MemberPointerType *MPT) override;
+
+ llvm::Value *EmitNonNullMemberPointerConversion(
+ const MemberPointerType *SrcTy, const MemberPointerType *DstTy,
+ CastKind CK, CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd, llvm::Value *Src,
+ CGBuilderTy &Builder);
+
+ llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
+ const CastExpr *E,
+ llvm::Value *Src) override;
+
+ llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
+ llvm::Constant *Src) override;
+
+ llvm::Constant *EmitMemberPointerConversion(
+ const MemberPointerType *SrcTy, const MemberPointerType *DstTy,
+ CastKind CK, CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd, llvm::Constant *Src);
+
+ llvm::Value *
+ EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF, const Expr *E,
+ Address This, llvm::Value *&ThisPtrForCall,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) override;
+
+ void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) override;
+
+ llvm::StructType *getCatchableTypeType() {
+ if (CatchableTypeType)
+ return CatchableTypeType;
+ llvm::Type *FieldTypes[] = {
+ CGM.IntTy, // Flags
+ getImageRelativeType(CGM.Int8PtrTy), // TypeDescriptor
+ CGM.IntTy, // NonVirtualAdjustment
+ CGM.IntTy, // OffsetToVBPtr
+ CGM.IntTy, // VBTableIndex
+ CGM.IntTy, // Size
+ getImageRelativeType(CGM.Int8PtrTy) // CopyCtor
+ };
+ CatchableTypeType = llvm::StructType::create(
+ CGM.getLLVMContext(), FieldTypes, "eh.CatchableType");
+ return CatchableTypeType;
+ }
+
+ llvm::StructType *getCatchableTypeArrayType(uint32_t NumEntries) {
+ llvm::StructType *&CatchableTypeArrayType =
+ CatchableTypeArrayTypeMap[NumEntries];
+ if (CatchableTypeArrayType)
+ return CatchableTypeArrayType;
+
+ llvm::SmallString<23> CTATypeName("eh.CatchableTypeArray.");
+ CTATypeName += llvm::utostr(NumEntries);
+ llvm::Type *CTType =
+ getImageRelativeType(getCatchableTypeType()->getPointerTo());
+ llvm::Type *FieldTypes[] = {
+ CGM.IntTy, // NumEntries
+ llvm::ArrayType::get(CTType, NumEntries) // CatchableTypes
+ };
+ CatchableTypeArrayType =
+ llvm::StructType::create(CGM.getLLVMContext(), FieldTypes, CTATypeName);
+ return CatchableTypeArrayType;
+ }
+
+ llvm::StructType *getThrowInfoType() {
+ if (ThrowInfoType)
+ return ThrowInfoType;
+ llvm::Type *FieldTypes[] = {
+ CGM.IntTy, // Flags
+ getImageRelativeType(CGM.Int8PtrTy), // CleanupFn
+ getImageRelativeType(CGM.Int8PtrTy), // ForwardCompat
+ getImageRelativeType(CGM.Int8PtrTy) // CatchableTypeArray
+ };
+ ThrowInfoType = llvm::StructType::create(CGM.getLLVMContext(), FieldTypes,
+ "eh.ThrowInfo");
+ return ThrowInfoType;
+ }
+
+ llvm::Constant *getThrowFn() {
+ // _CxxThrowException is passed an exception object and a ThrowInfo object
+ // which describes the exception.
+ llvm::Type *Args[] = {CGM.Int8PtrTy, getThrowInfoType()->getPointerTo()};
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, Args, /*IsVarArgs=*/false);
+ auto *Fn = cast<llvm::Function>(
+ CGM.CreateRuntimeFunction(FTy, "_CxxThrowException"));
+ // _CxxThrowException is stdcall on 32-bit x86 platforms.
+ if (CGM.getTarget().getTriple().getArch() == llvm::Triple::x86)
+ Fn->setCallingConv(llvm::CallingConv::X86_StdCall);
+ return Fn;
+ }
+
+ llvm::Function *getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD,
+ CXXCtorType CT);
+
+ llvm::Constant *getCatchableType(QualType T,
+ uint32_t NVOffset = 0,
+ int32_t VBPtrOffset = -1,
+ uint32_t VBIndex = 0);
+
+ llvm::GlobalVariable *getCatchableTypeArray(QualType T);
+
+ llvm::GlobalVariable *getThrowInfo(QualType T) override;
+
+private:
+ typedef std::pair<const CXXRecordDecl *, CharUnits> VFTableIdTy;
+ typedef llvm::DenseMap<VFTableIdTy, llvm::GlobalVariable *> VTablesMapTy;
+ typedef llvm::DenseMap<VFTableIdTy, llvm::GlobalValue *> VFTablesMapTy;
+ /// \brief All the vftables that have been referenced.
+ VFTablesMapTy VFTablesMap;
+ VTablesMapTy VTablesMap;
+
+ /// \brief This set holds the record decls we've deferred vtable emission for.
+ llvm::SmallPtrSet<const CXXRecordDecl *, 4> DeferredVFTables;
+
+
+ /// \brief All the vbtables which have been referenced.
+ llvm::DenseMap<const CXXRecordDecl *, VBTableGlobals> VBTablesMap;
+
+ /// Info on the global variable used to guard initialization of static locals.
+ /// The BitIndex field is only used for externally invisible declarations.
+ struct GuardInfo {
+ GuardInfo() : Guard(nullptr), BitIndex(0) {}
+ llvm::GlobalVariable *Guard;
+ unsigned BitIndex;
+ };
+
+ /// Map from DeclContext to the current guard variable. We assume that the
+ /// AST is visited in source code order.
+ llvm::DenseMap<const DeclContext *, GuardInfo> GuardVariableMap;
+ llvm::DenseMap<const DeclContext *, GuardInfo> ThreadLocalGuardVariableMap;
+ llvm::DenseMap<const DeclContext *, unsigned> ThreadSafeGuardNumMap;
+
+ llvm::DenseMap<size_t, llvm::StructType *> TypeDescriptorTypeMap;
+ llvm::StructType *BaseClassDescriptorType;
+ llvm::StructType *ClassHierarchyDescriptorType;
+ llvm::StructType *CompleteObjectLocatorType;
+
+ llvm::DenseMap<QualType, llvm::GlobalVariable *> CatchableTypeArrays;
+
+ llvm::StructType *CatchableTypeType;
+ llvm::DenseMap<uint32_t, llvm::StructType *> CatchableTypeArrayTypeMap;
+ llvm::StructType *ThrowInfoType;
+};
+
+}
+
+CGCXXABI::RecordArgABI
+MicrosoftCXXABI::getRecordArgABI(const CXXRecordDecl *RD) const {
+ switch (CGM.getTarget().getTriple().getArch()) {
+ default:
+ // FIXME: Implement for other architectures.
+ return RAA_Default;
+
+ case llvm::Triple::x86:
+ // All record arguments are passed in memory on x86. Decide whether to
+ // construct the object directly in argument memory, or to construct the
+ // argument elsewhere and copy the bytes during the call.
+
+ // If C++ prohibits us from making a copy, construct the arguments directly
+ // into argument memory.
+ if (!canCopyArgument(RD))
+ return RAA_DirectInMemory;
+
+ // Otherwise, construct the argument into a temporary and copy the bytes
+ // into the outgoing argument memory.
+ return RAA_Default;
+
+ case llvm::Triple::x86_64:
+ // Win64 passes objects with non-trivial copy ctors indirectly.
+ if (RD->hasNonTrivialCopyConstructor())
+ return RAA_Indirect;
+
+ // If an object has a destructor, we'd really like to pass it indirectly
+ // because it allows us to elide copies. Unfortunately, MSVC makes that
+ // impossible for small types, which it will pass in a single register or
+ // stack slot. Most objects with dtors are large-ish, so handle that early.
+ // We can't call out all large objects as being indirect because there are
+ // multiple x64 calling conventions and the C++ ABI code shouldn't dictate
+ // how we pass large POD types.
+ if (RD->hasNonTrivialDestructor() &&
+ getContext().getTypeSize(RD->getTypeForDecl()) > 64)
+ return RAA_Indirect;
+
+ // We have a trivial copy constructor or no copy constructors, but we have
+ // to make sure it isn't deleted.
+ bool CopyDeleted = false;
+ for (const CXXConstructorDecl *CD : RD->ctors()) {
+ if (CD->isCopyConstructor()) {
+ assert(CD->isTrivial());
+ // We had at least one undeleted trivial copy ctor. Return directly.
+ if (!CD->isDeleted())
+ return RAA_Default;
+ CopyDeleted = true;
+ }
+ }
+
+ // The trivial copy constructor was deleted. Return indirectly.
+ if (CopyDeleted)
+ return RAA_Indirect;
+
+ // There were no copy ctors. Return in RAX.
+ return RAA_Default;
+ }
+
+ llvm_unreachable("invalid enum");
+}
+
+void MicrosoftCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
+ const CXXDeleteExpr *DE,
+ Address Ptr,
+ QualType ElementType,
+ const CXXDestructorDecl *Dtor) {
+ // FIXME: Provide a source location here even though there's no
+ // CXXMemberCallExpr for dtor call.
+ bool UseGlobalDelete = DE->isGlobalDelete();
+ CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
+ llvm::Value *MDThis =
+ EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, /*CE=*/nullptr);
+ if (UseGlobalDelete)
+ CGF.EmitDeleteCall(DE->getOperatorDelete(), MDThis, ElementType);
+}
+
+void MicrosoftCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
+ llvm::Value *Args[] = {
+ llvm::ConstantPointerNull::get(CGM.Int8PtrTy),
+ llvm::ConstantPointerNull::get(getThrowInfoType()->getPointerTo())};
+ auto *Fn = getThrowFn();
+ if (isNoReturn)
+ CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, Args);
+ else
+ CGF.EmitRuntimeCallOrInvoke(Fn, Args);
+}
+
+namespace {
+struct CatchRetScope final : EHScopeStack::Cleanup {
+ llvm::CatchPadInst *CPI;
+
+ CatchRetScope(llvm::CatchPadInst *CPI) : CPI(CPI) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ llvm::BasicBlock *BB = CGF.createBasicBlock("catchret.dest");
+ CGF.Builder.CreateCatchRet(CPI, BB);
+ CGF.EmitBlock(BB);
+ }
+};
+}
+
+void MicrosoftCXXABI::emitBeginCatch(CodeGenFunction &CGF,
+ const CXXCatchStmt *S) {
+ // In the MS ABI, the runtime handles the copy, and the catch handler is
+ // responsible for destruction.
+ VarDecl *CatchParam = S->getExceptionDecl();
+ llvm::BasicBlock *CatchPadBB = CGF.Builder.GetInsertBlock();
+ llvm::CatchPadInst *CPI =
+ cast<llvm::CatchPadInst>(CatchPadBB->getFirstNonPHI());
+ CGF.CurrentFuncletPad = CPI;
+
+ // If this is a catch-all or the catch parameter is unnamed, we don't need to
+ // emit an alloca to the object.
+ if (!CatchParam || !CatchParam->getDeclName()) {
+ CGF.EHStack.pushCleanup<CatchRetScope>(NormalCleanup, CPI);
+ return;
+ }
+
+ CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
+ CPI->setArgOperand(2, var.getObjectAddress(CGF).getPointer());
+ CGF.EHStack.pushCleanup<CatchRetScope>(NormalCleanup, CPI);
+ CGF.EmitAutoVarCleanups(var);
+}
+
+/// We need to perform a generic polymorphic operation (like a typeid
+/// or a cast), which requires an object with a vfptr. Adjust the
+/// address to point to an object with a vfptr.
+std::pair<Address, llvm::Value *>
+MicrosoftCXXABI::performBaseAdjustment(CodeGenFunction &CGF, Address Value,
+ QualType SrcRecordTy) {
+ Value = CGF.Builder.CreateBitCast(Value, CGF.Int8PtrTy);
+ const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
+ const ASTContext &Context = getContext();
+
+ // If the class itself has a vfptr, great. This check implicitly
+ // covers non-virtual base subobjects: a class with its own virtual
+ // functions would be a candidate to be a primary base.
+ if (Context.getASTRecordLayout(SrcDecl).hasExtendableVFPtr())
+ return std::make_pair(Value, llvm::ConstantInt::get(CGF.Int32Ty, 0));
+
+ // Okay, one of the vbases must have a vfptr, or else this isn't
+ // actually a polymorphic class.
+ const CXXRecordDecl *PolymorphicBase = nullptr;
+ for (auto &Base : SrcDecl->vbases()) {
+ const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
+ if (Context.getASTRecordLayout(BaseDecl).hasExtendableVFPtr()) {
+ PolymorphicBase = BaseDecl;
+ break;
+ }
+ }
+ assert(PolymorphicBase && "polymorphic class has no apparent vfptr?");
+
+ llvm::Value *Offset =
+ GetVirtualBaseClassOffset(CGF, Value, SrcDecl, PolymorphicBase);
+ llvm::Value *Ptr = CGF.Builder.CreateInBoundsGEP(Value.getPointer(), Offset);
+ Offset = CGF.Builder.CreateTrunc(Offset, CGF.Int32Ty);
+ CharUnits VBaseAlign =
+ CGF.CGM.getVBaseAlignment(Value.getAlignment(), SrcDecl, PolymorphicBase);
+ return std::make_pair(Address(Ptr, VBaseAlign), Offset);
+}
+
+bool MicrosoftCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
+ QualType SrcRecordTy) {
+ const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
+ return IsDeref &&
+ !getContext().getASTRecordLayout(SrcDecl).hasExtendableVFPtr();
+}
+
+static llvm::CallSite emitRTtypeidCall(CodeGenFunction &CGF,
+ llvm::Value *Argument) {
+ llvm::Type *ArgTypes[] = {CGF.Int8PtrTy};
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.Int8PtrTy, ArgTypes, false);
+ llvm::Value *Args[] = {Argument};
+ llvm::Constant *Fn = CGF.CGM.CreateRuntimeFunction(FTy, "__RTtypeid");
+ return CGF.EmitRuntimeCallOrInvoke(Fn, Args);
+}
+
+void MicrosoftCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
+ llvm::CallSite Call =
+ emitRTtypeidCall(CGF, llvm::Constant::getNullValue(CGM.VoidPtrTy));
+ Call.setDoesNotReturn();
+ CGF.Builder.CreateUnreachable();
+}
+
+llvm::Value *MicrosoftCXXABI::EmitTypeid(CodeGenFunction &CGF,
+ QualType SrcRecordTy,
+ Address ThisPtr,
+ llvm::Type *StdTypeInfoPtrTy) {
+ llvm::Value *Offset;
+ std::tie(ThisPtr, Offset) = performBaseAdjustment(CGF, ThisPtr, SrcRecordTy);
+ auto Typeid = emitRTtypeidCall(CGF, ThisPtr.getPointer()).getInstruction();
+ return CGF.Builder.CreateBitCast(Typeid, StdTypeInfoPtrTy);
+}
+
+bool MicrosoftCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
+ QualType SrcRecordTy) {
+ const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
+ return SrcIsPtr &&
+ !getContext().getASTRecordLayout(SrcDecl).hasExtendableVFPtr();
+}
+
+llvm::Value *MicrosoftCXXABI::EmitDynamicCastCall(
+ CodeGenFunction &CGF, Address This, QualType SrcRecordTy,
+ QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
+ llvm::Type *DestLTy = CGF.ConvertType(DestTy);
+
+ llvm::Value *SrcRTTI =
+ CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
+ llvm::Value *DestRTTI =
+ CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
+
+ llvm::Value *Offset;
+ std::tie(This, Offset) = performBaseAdjustment(CGF, This, SrcRecordTy);
+ llvm::Value *ThisPtr = This.getPointer();
+
+ // PVOID __RTDynamicCast(
+ // PVOID inptr,
+ // LONG VfDelta,
+ // PVOID SrcType,
+ // PVOID TargetType,
+ // BOOL isReference)
+ llvm::Type *ArgTypes[] = {CGF.Int8PtrTy, CGF.Int32Ty, CGF.Int8PtrTy,
+ CGF.Int8PtrTy, CGF.Int32Ty};
+ llvm::Constant *Function = CGF.CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(CGF.Int8PtrTy, ArgTypes, false),
+ "__RTDynamicCast");
+ llvm::Value *Args[] = {
+ ThisPtr, Offset, SrcRTTI, DestRTTI,
+ llvm::ConstantInt::get(CGF.Int32Ty, DestTy->isReferenceType())};
+ ThisPtr = CGF.EmitRuntimeCallOrInvoke(Function, Args).getInstruction();
+ return CGF.Builder.CreateBitCast(ThisPtr, DestLTy);
+}
+
+llvm::Value *
+MicrosoftCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
+ QualType SrcRecordTy,
+ QualType DestTy) {
+ llvm::Value *Offset;
+ std::tie(Value, Offset) = performBaseAdjustment(CGF, Value, SrcRecordTy);
+
+ // PVOID __RTCastToVoid(
+ // PVOID inptr)
+ llvm::Type *ArgTypes[] = {CGF.Int8PtrTy};
+ llvm::Constant *Function = CGF.CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(CGF.Int8PtrTy, ArgTypes, false),
+ "__RTCastToVoid");
+ llvm::Value *Args[] = {Value.getPointer()};
+ return CGF.EmitRuntimeCall(Function, Args);
+}
+
+bool MicrosoftCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
+ return false;
+}
+
+llvm::Value *MicrosoftCXXABI::GetVirtualBaseClassOffset(
+ CodeGenFunction &CGF, Address This, const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *BaseClassDecl) {
+ const ASTContext &Context = getContext();
+ int64_t VBPtrChars =
+ Context.getASTRecordLayout(ClassDecl).getVBPtrOffset().getQuantity();
+ llvm::Value *VBPtrOffset = llvm::ConstantInt::get(CGM.PtrDiffTy, VBPtrChars);
+ CharUnits IntSize = Context.getTypeSizeInChars(Context.IntTy);
+ CharUnits VBTableChars =
+ IntSize *
+ CGM.getMicrosoftVTableContext().getVBTableIndex(ClassDecl, BaseClassDecl);
+ llvm::Value *VBTableOffset =
+ llvm::ConstantInt::get(CGM.IntTy, VBTableChars.getQuantity());
+
+ llvm::Value *VBPtrToNewBase =
+ GetVBaseOffsetFromVBPtr(CGF, This, VBPtrOffset, VBTableOffset);
+ VBPtrToNewBase =
+ CGF.Builder.CreateSExtOrBitCast(VBPtrToNewBase, CGM.PtrDiffTy);
+ return CGF.Builder.CreateNSWAdd(VBPtrOffset, VBPtrToNewBase);
+}
+
+bool MicrosoftCXXABI::HasThisReturn(GlobalDecl GD) const {
+ return isa<CXXConstructorDecl>(GD.getDecl());
+}
+
+static bool isDeletingDtor(GlobalDecl GD) {
+ return isa<CXXDestructorDecl>(GD.getDecl()) &&
+ GD.getDtorType() == Dtor_Deleting;
+}
+
+bool MicrosoftCXXABI::hasMostDerivedReturn(GlobalDecl GD) const {
+ return isDeletingDtor(GD);
+}
+
+bool MicrosoftCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
+ const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
+ if (!RD)
+ return false;
+
+ CharUnits Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
+ if (FI.isInstanceMethod()) {
+ // If it's an instance method, aggregates are always returned indirectly via
+ // the second parameter.
+ FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
+ FI.getReturnInfo().setSRetAfterThis(FI.isInstanceMethod());
+ return true;
+ } else if (!RD->isPOD()) {
+ // If it's a free function, non-POD types are returned indirectly.
+ FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
+ return true;
+ }
+
+ // Otherwise, use the C ABI rules.
+ return false;
+}
+
+llvm::BasicBlock *
+MicrosoftCXXABI::EmitCtorCompleteObjectHandler(CodeGenFunction &CGF,
+ const CXXRecordDecl *RD) {
+ llvm::Value *IsMostDerivedClass = getStructorImplicitParamValue(CGF);
+ assert(IsMostDerivedClass &&
+ "ctor for a class with virtual bases must have an implicit parameter");
+ llvm::Value *IsCompleteObject =
+ CGF.Builder.CreateIsNotNull(IsMostDerivedClass, "is_complete_object");
+
+ llvm::BasicBlock *CallVbaseCtorsBB = CGF.createBasicBlock("ctor.init_vbases");
+ llvm::BasicBlock *SkipVbaseCtorsBB = CGF.createBasicBlock("ctor.skip_vbases");
+ CGF.Builder.CreateCondBr(IsCompleteObject,
+ CallVbaseCtorsBB, SkipVbaseCtorsBB);
+
+ CGF.EmitBlock(CallVbaseCtorsBB);
+
+ // Fill in the vbtable pointers here.
+ EmitVBPtrStores(CGF, RD);
+
+ // CGF will put the base ctor calls in this basic block for us later.
+
+ return SkipVbaseCtorsBB;
+}
+
+void MicrosoftCXXABI::initializeHiddenVirtualInheritanceMembers(
+ CodeGenFunction &CGF, const CXXRecordDecl *RD) {
+ // In most cases, an override for a vbase virtual method can adjust
+ // the "this" parameter by applying a constant offset.
+ // However, this is not enough while a constructor or a destructor of some
+ // class X is being executed if all the following conditions are met:
+ // - X has virtual bases, (1)
+ // - X overrides a virtual method M of a vbase Y, (2)
+ // - X itself is a vbase of the most derived class.
+ //
+ // If (1) and (2) are true, the vtorDisp for vbase Y is a hidden member of X
+ // which holds the extra amount of "this" adjustment we must do when we use
+ // the X vftables (i.e. during X ctor or dtor).
+ // Outside the ctors and dtors, the values of vtorDisps are zero.
+
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+ typedef ASTRecordLayout::VBaseOffsetsMapTy VBOffsets;
+ const VBOffsets &VBaseMap = Layout.getVBaseOffsetsMap();
+ CGBuilderTy &Builder = CGF.Builder;
+
+ unsigned AS = getThisAddress(CGF).getAddressSpace();
+ llvm::Value *Int8This = nullptr; // Initialize lazily.
+
+ for (VBOffsets::const_iterator I = VBaseMap.begin(), E = VBaseMap.end();
+ I != E; ++I) {
+ if (!I->second.hasVtorDisp())
+ continue;
+
+ llvm::Value *VBaseOffset =
+ GetVirtualBaseClassOffset(CGF, getThisAddress(CGF), RD, I->first);
+ // FIXME: it doesn't look right that we SExt in GetVirtualBaseClassOffset()
+ // just to Trunc back immediately.
+ VBaseOffset = Builder.CreateTruncOrBitCast(VBaseOffset, CGF.Int32Ty);
+ uint64_t ConstantVBaseOffset =
+ Layout.getVBaseClassOffset(I->first).getQuantity();
+
+ // vtorDisp_for_vbase = vbptr[vbase_idx] - offsetof(RD, vbase).
+ llvm::Value *VtorDispValue = Builder.CreateSub(
+ VBaseOffset, llvm::ConstantInt::get(CGM.Int32Ty, ConstantVBaseOffset),
+ "vtordisp.value");
+
+ if (!Int8This)
+ Int8This = Builder.CreateBitCast(getThisValue(CGF),
+ CGF.Int8Ty->getPointerTo(AS));
+ llvm::Value *VtorDispPtr = Builder.CreateInBoundsGEP(Int8This, VBaseOffset);
+ // vtorDisp is always the 32-bits before the vbase in the class layout.
+ VtorDispPtr = Builder.CreateConstGEP1_32(VtorDispPtr, -4);
+ VtorDispPtr = Builder.CreateBitCast(
+ VtorDispPtr, CGF.Int32Ty->getPointerTo(AS), "vtordisp.ptr");
+
+ Builder.CreateAlignedStore(VtorDispValue, VtorDispPtr,
+ CharUnits::fromQuantity(4));
+ }
+}
+
+static bool hasDefaultCXXMethodCC(ASTContext &Context,
+ const CXXMethodDecl *MD) {
+ CallingConv ExpectedCallingConv = Context.getDefaultCallingConvention(
+ /*IsVariadic=*/false, /*IsCXXMethod=*/true);
+ CallingConv ActualCallingConv =
+ MD->getType()->getAs<FunctionProtoType>()->getCallConv();
+ return ExpectedCallingConv == ActualCallingConv;
+}
+
+void MicrosoftCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
+ // There's only one constructor type in this ABI.
+ CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
+
+ // Exported default constructors either have a simple call-site where they use
+ // the typical calling convention and have a single 'this' pointer for an
+ // argument -or- they get a wrapper function which appropriately thunks to the
+ // real default constructor. This thunk is the default constructor closure.
+ if (D->hasAttr<DLLExportAttr>() && D->isDefaultConstructor())
+ if (!hasDefaultCXXMethodCC(getContext(), D) || D->getNumParams() != 0) {
+ llvm::Function *Fn = getAddrOfCXXCtorClosure(D, Ctor_DefaultClosure);
+ Fn->setLinkage(llvm::GlobalValue::WeakODRLinkage);
+ Fn->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
+ }
+}
+
+void MicrosoftCXXABI::EmitVBPtrStores(CodeGenFunction &CGF,
+ const CXXRecordDecl *RD) {
+ Address This = getThisAddress(CGF);
+ This = CGF.Builder.CreateElementBitCast(This, CGM.Int8Ty, "this.int8");
+ const ASTContext &Context = getContext();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ const VBTableGlobals &VBGlobals = enumerateVBTables(RD);
+ for (unsigned I = 0, E = VBGlobals.VBTables->size(); I != E; ++I) {
+ const VPtrInfo *VBT = (*VBGlobals.VBTables)[I];
+ llvm::GlobalVariable *GV = VBGlobals.Globals[I];
+ const ASTRecordLayout &SubobjectLayout =
+ Context.getASTRecordLayout(VBT->BaseWithVPtr);
+ CharUnits Offs = VBT->NonVirtualOffset;
+ Offs += SubobjectLayout.getVBPtrOffset();
+ if (VBT->getVBaseWithVPtr())
+ Offs += Layout.getVBaseClassOffset(VBT->getVBaseWithVPtr());
+ Address VBPtr = CGF.Builder.CreateConstInBoundsByteGEP(This, Offs);
+ llvm::Value *GVPtr =
+ CGF.Builder.CreateConstInBoundsGEP2_32(GV->getValueType(), GV, 0, 0);
+ VBPtr = CGF.Builder.CreateElementBitCast(VBPtr, GVPtr->getType(),
+ "vbptr." + VBT->ReusingBase->getName());
+ CGF.Builder.CreateStore(GVPtr, VBPtr);
+ }
+}
+
+void
+MicrosoftCXXABI::buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
+ SmallVectorImpl<CanQualType> &ArgTys) {
+ // TODO: 'for base' flag
+ if (T == StructorType::Deleting) {
+ // The scalar deleting destructor takes an implicit int parameter.
+ ArgTys.push_back(getContext().IntTy);
+ }
+ auto *CD = dyn_cast<CXXConstructorDecl>(MD);
+ if (!CD)
+ return;
+
+ // All parameters are already in place except is_most_derived, which goes
+ // after 'this' if it's variadic and last if it's not.
+
+ const CXXRecordDecl *Class = CD->getParent();
+ const FunctionProtoType *FPT = CD->getType()->castAs<FunctionProtoType>();
+ if (Class->getNumVBases()) {
+ if (FPT->isVariadic())
+ ArgTys.insert(ArgTys.begin() + 1, getContext().IntTy);
+ else
+ ArgTys.push_back(getContext().IntTy);
+ }
+}
+
+void MicrosoftCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
+ // The TU defining a dtor is only guaranteed to emit a base destructor. All
+ // other destructor variants are delegating thunks.
+ CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
+}
+
+CharUnits
+MicrosoftCXXABI::getVirtualFunctionPrologueThisAdjustment(GlobalDecl GD) {
+ GD = GD.getCanonicalDecl();
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+
+ GlobalDecl LookupGD = GD;
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ // Complete destructors take a pointer to the complete object as a
+ // parameter, thus don't need this adjustment.
+ if (GD.getDtorType() == Dtor_Complete)
+ return CharUnits();
+
+ // There's no Dtor_Base in vftable but it shares the this adjustment with
+ // the deleting one, so look it up instead.
+ LookupGD = GlobalDecl(DD, Dtor_Deleting);
+ }
+
+ MicrosoftVTableContext::MethodVFTableLocation ML =
+ CGM.getMicrosoftVTableContext().getMethodVFTableLocation(LookupGD);
+ CharUnits Adjustment = ML.VFPtrOffset;
+
+ // Normal virtual instance methods need to adjust from the vfptr that first
+ // defined the virtual method to the virtual base subobject, but destructors
+ // do not. The vector deleting destructor thunk applies this adjustment for
+ // us if necessary.
+ if (isa<CXXDestructorDecl>(MD))
+ Adjustment = CharUnits::Zero();
+
+ if (ML.VBase) {
+ const ASTRecordLayout &DerivedLayout =
+ getContext().getASTRecordLayout(MD->getParent());
+ Adjustment += DerivedLayout.getVBaseClassOffset(ML.VBase);
+ }
+
+ return Adjustment;
+}
+
+Address MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall(
+ CodeGenFunction &CGF, GlobalDecl GD, Address This,
+ bool VirtualCall) {
+ if (!VirtualCall) {
+ // If the call of a virtual function is not virtual, we just have to
+ // compensate for the adjustment the virtual function does in its prologue.
+ CharUnits Adjustment = getVirtualFunctionPrologueThisAdjustment(GD);
+ if (Adjustment.isZero())
+ return This;
+
+ This = CGF.Builder.CreateElementBitCast(This, CGF.Int8Ty);
+ assert(Adjustment.isPositive());
+ return CGF.Builder.CreateConstByteGEP(This, Adjustment);
+ }
+
+ GD = GD.getCanonicalDecl();
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+
+ GlobalDecl LookupGD = GD;
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ // Complete dtors take a pointer to the complete object,
+ // thus don't need adjustment.
+ if (GD.getDtorType() == Dtor_Complete)
+ return This;
+
+ // There's only Dtor_Deleting in vftable but it shares the this adjustment
+ // with the base one, so look up the deleting one instead.
+ LookupGD = GlobalDecl(DD, Dtor_Deleting);
+ }
+ MicrosoftVTableContext::MethodVFTableLocation ML =
+ CGM.getMicrosoftVTableContext().getMethodVFTableLocation(LookupGD);
+
+ CharUnits StaticOffset = ML.VFPtrOffset;
+
+ // Base destructors expect 'this' to point to the beginning of the base
+ // subobject, not the first vfptr that happens to contain the virtual dtor.
+ // However, we still need to apply the virtual base adjustment.
+ if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
+ StaticOffset = CharUnits::Zero();
+
+ Address Result = This;
+ if (ML.VBase) {
+ Result = CGF.Builder.CreateElementBitCast(Result, CGF.Int8Ty);
+
+ const CXXRecordDecl *Derived = MD->getParent();
+ const CXXRecordDecl *VBase = ML.VBase;
+ llvm::Value *VBaseOffset =
+ GetVirtualBaseClassOffset(CGF, Result, Derived, VBase);
+ llvm::Value *VBasePtr =
+ CGF.Builder.CreateInBoundsGEP(Result.getPointer(), VBaseOffset);
+ CharUnits VBaseAlign =
+ CGF.CGM.getVBaseAlignment(Result.getAlignment(), Derived, VBase);
+ Result = Address(VBasePtr, VBaseAlign);
+ }
+ if (!StaticOffset.isZero()) {
+ assert(StaticOffset.isPositive());
+ Result = CGF.Builder.CreateElementBitCast(Result, CGF.Int8Ty);
+ if (ML.VBase) {
+ // Non-virtual adjustment might result in a pointer outside the allocated
+ // object, e.g. if the final overrider class is laid out after the virtual
+ // base that declares a method in the most derived class.
+ // FIXME: Update the code that emits this adjustment in thunks prologues.
+ Result = CGF.Builder.CreateConstByteGEP(Result, StaticOffset);
+ } else {
+ Result = CGF.Builder.CreateConstInBoundsByteGEP(Result, StaticOffset);
+ }
+ }
+ return Result;
+}
+
+void MicrosoftCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
+ QualType &ResTy,
+ FunctionArgList &Params) {
+ ASTContext &Context = getContext();
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
+ assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
+ if (isa<CXXConstructorDecl>(MD) && MD->getParent()->getNumVBases()) {
+ ImplicitParamDecl *IsMostDerived
+ = ImplicitParamDecl::Create(Context, nullptr,
+ CGF.CurGD.getDecl()->getLocation(),
+ &Context.Idents.get("is_most_derived"),
+ Context.IntTy);
+ // The 'most_derived' parameter goes second if the ctor is variadic and last
+ // if it's not. Dtors can't be variadic.
+ const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
+ if (FPT->isVariadic())
+ Params.insert(Params.begin() + 1, IsMostDerived);
+ else
+ Params.push_back(IsMostDerived);
+ getStructorImplicitParamDecl(CGF) = IsMostDerived;
+ } else if (isDeletingDtor(CGF.CurGD)) {
+ ImplicitParamDecl *ShouldDelete
+ = ImplicitParamDecl::Create(Context, nullptr,
+ CGF.CurGD.getDecl()->getLocation(),
+ &Context.Idents.get("should_call_delete"),
+ Context.IntTy);
+ Params.push_back(ShouldDelete);
+ getStructorImplicitParamDecl(CGF) = ShouldDelete;
+ }
+}
+
+llvm::Value *MicrosoftCXXABI::adjustThisParameterInVirtualFunctionPrologue(
+ CodeGenFunction &CGF, GlobalDecl GD, llvm::Value *This) {
+ // In this ABI, every virtual function takes a pointer to one of the
+ // subobjects that first defines it as the 'this' parameter, rather than a
+ // pointer to the final overrider subobject. Thus, we need to adjust it back
+ // to the final overrider subobject before use.
+ // See comments in the MicrosoftVFTableContext implementation for the details.
+ CharUnits Adjustment = getVirtualFunctionPrologueThisAdjustment(GD);
+ if (Adjustment.isZero())
+ return This;
+
+ unsigned AS = cast<llvm::PointerType>(This->getType())->getAddressSpace();
+ llvm::Type *charPtrTy = CGF.Int8Ty->getPointerTo(AS),
+ *thisTy = This->getType();
+
+ This = CGF.Builder.CreateBitCast(This, charPtrTy);
+ assert(Adjustment.isPositive());
+ This = CGF.Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, This,
+ -Adjustment.getQuantity());
+ return CGF.Builder.CreateBitCast(This, thisTy);
+}
+
+void MicrosoftCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
+ EmitThisParam(CGF);
+
+ /// If this is a function that the ABI specifies returns 'this', initialize
+ /// the return slot to 'this' at the start of the function.
+ ///
+ /// Unlike the setting of return types, this is done within the ABI
+ /// implementation instead of by clients of CGCXXABI because:
+ /// 1) getThisValue is currently protected
+ /// 2) in theory, an ABI could implement 'this' returns some other way;
+ /// HasThisReturn only specifies a contract, not the implementation
+ if (HasThisReturn(CGF.CurGD))
+ CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
+ else if (hasMostDerivedReturn(CGF.CurGD))
+ CGF.Builder.CreateStore(CGF.EmitCastToVoidPtr(getThisValue(CGF)),
+ CGF.ReturnValue);
+
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
+ if (isa<CXXConstructorDecl>(MD) && MD->getParent()->getNumVBases()) {
+ assert(getStructorImplicitParamDecl(CGF) &&
+ "no implicit parameter for a constructor with virtual bases?");
+ getStructorImplicitParamValue(CGF)
+ = CGF.Builder.CreateLoad(
+ CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)),
+ "is_most_derived");
+ }
+
+ if (isDeletingDtor(CGF.CurGD)) {
+ assert(getStructorImplicitParamDecl(CGF) &&
+ "no implicit parameter for a deleting destructor?");
+ getStructorImplicitParamValue(CGF)
+ = CGF.Builder.CreateLoad(
+ CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)),
+ "should_call_delete");
+ }
+}
+
+unsigned MicrosoftCXXABI::addImplicitConstructorArgs(
+ CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
+ bool ForVirtualBase, bool Delegating, CallArgList &Args) {
+ assert(Type == Ctor_Complete || Type == Ctor_Base);
+
+ // Check if we need a 'most_derived' parameter.
+ if (!D->getParent()->getNumVBases())
+ return 0;
+
+ // Add the 'most_derived' argument second if we are variadic or last if not.
+ const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>();
+ llvm::Value *MostDerivedArg =
+ llvm::ConstantInt::get(CGM.Int32Ty, Type == Ctor_Complete);
+ RValue RV = RValue::get(MostDerivedArg);
+ if (MostDerivedArg) {
+ if (FPT->isVariadic())
+ Args.insert(Args.begin() + 1,
+ CallArg(RV, getContext().IntTy, /*needscopy=*/false));
+ else
+ Args.add(RV, getContext().IntTy);
+ }
+
+ return 1; // Added one arg.
+}
+
+void MicrosoftCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
+ const CXXDestructorDecl *DD,
+ CXXDtorType Type, bool ForVirtualBase,
+ bool Delegating, Address This) {
+ llvm::Value *Callee = CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type));
+
+ if (DD->isVirtual()) {
+ assert(Type != CXXDtorType::Dtor_Deleting &&
+ "The deleting destructor should only be called via a virtual call");
+ This = adjustThisArgumentForVirtualFunctionCall(CGF, GlobalDecl(DD, Type),
+ This, false);
+ }
+
+ CGF.EmitCXXStructorCall(DD, Callee, ReturnValueSlot(), This.getPointer(),
+ /*ImplicitParam=*/nullptr,
+ /*ImplicitParamTy=*/QualType(), nullptr,
+ getFromDtorType(Type));
+}
+
+void MicrosoftCXXABI::emitVTableBitSetEntries(VPtrInfo *Info,
+ const CXXRecordDecl *RD,
+ llvm::GlobalVariable *VTable) {
+ if (!getContext().getLangOpts().Sanitize.has(SanitizerKind::CFIVCall) &&
+ !getContext().getLangOpts().Sanitize.has(SanitizerKind::CFINVCall) &&
+ !getContext().getLangOpts().Sanitize.has(SanitizerKind::CFIDerivedCast) &&
+ !getContext().getLangOpts().Sanitize.has(SanitizerKind::CFIUnrelatedCast))
+ return;
+
+ llvm::NamedMDNode *BitsetsMD =
+ CGM.getModule().getOrInsertNamedMetadata("llvm.bitsets");
+
+ // The location of the first virtual function pointer in the virtual table,
+ // aka the "address point" on Itanium. This is at offset 0 if RTTI is
+ // disabled, or sizeof(void*) if RTTI is enabled.
+ CharUnits AddressPoint =
+ getContext().getLangOpts().RTTIData
+ ? getContext().toCharUnitsFromBits(
+ getContext().getTargetInfo().getPointerWidth(0))
+ : CharUnits::Zero();
+
+ if (Info->PathToBaseWithVPtr.empty()) {
+ if (!CGM.IsCFIBlacklistedRecord(RD))
+ CGM.CreateVTableBitSetEntry(BitsetsMD, VTable, AddressPoint, RD);
+ return;
+ }
+
+ // Add a bitset entry for the least derived base belonging to this vftable.
+ if (!CGM.IsCFIBlacklistedRecord(Info->PathToBaseWithVPtr.back()))
+ CGM.CreateVTableBitSetEntry(BitsetsMD, VTable, AddressPoint,
+ Info->PathToBaseWithVPtr.back());
+
+ // Add a bitset entry for each derived class that is laid out at the same
+ // offset as the least derived base.
+ for (unsigned I = Info->PathToBaseWithVPtr.size() - 1; I != 0; --I) {
+ const CXXRecordDecl *DerivedRD = Info->PathToBaseWithVPtr[I - 1];
+ const CXXRecordDecl *BaseRD = Info->PathToBaseWithVPtr[I];
+
+ const ASTRecordLayout &Layout =
+ getContext().getASTRecordLayout(DerivedRD);
+ CharUnits Offset;
+ auto VBI = Layout.getVBaseOffsetsMap().find(BaseRD);
+ if (VBI == Layout.getVBaseOffsetsMap().end())
+ Offset = Layout.getBaseClassOffset(BaseRD);
+ else
+ Offset = VBI->second.VBaseOffset;
+ if (!Offset.isZero())
+ return;
+ if (!CGM.IsCFIBlacklistedRecord(DerivedRD))
+ CGM.CreateVTableBitSetEntry(BitsetsMD, VTable, AddressPoint, DerivedRD);
+ }
+
+ // Finally do the same for the most derived class.
+ if (Info->FullOffsetInMDC.isZero() && !CGM.IsCFIBlacklistedRecord(RD))
+ CGM.CreateVTableBitSetEntry(BitsetsMD, VTable, AddressPoint, RD);
+}
+
+void MicrosoftCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
+ const CXXRecordDecl *RD) {
+ MicrosoftVTableContext &VFTContext = CGM.getMicrosoftVTableContext();
+ const VPtrInfoVector &VFPtrs = VFTContext.getVFPtrOffsets(RD);
+
+ for (VPtrInfo *Info : VFPtrs) {
+ llvm::GlobalVariable *VTable = getAddrOfVTable(RD, Info->FullOffsetInMDC);
+ if (VTable->hasInitializer())
+ continue;
+
+ llvm::Constant *RTTI = getContext().getLangOpts().RTTIData
+ ? getMSCompleteObjectLocator(RD, Info)
+ : nullptr;
+
+ const VTableLayout &VTLayout =
+ VFTContext.getVFTableLayout(RD, Info->FullOffsetInMDC);
+ llvm::Constant *Init = CGVT.CreateVTableInitializer(
+ RD, VTLayout.vtable_component_begin(),
+ VTLayout.getNumVTableComponents(), VTLayout.vtable_thunk_begin(),
+ VTLayout.getNumVTableThunks(), RTTI);
+
+ VTable->setInitializer(Init);
+
+ emitVTableBitSetEntries(Info, RD, VTable);
+ }
+}
+
+bool MicrosoftCXXABI::isVirtualOffsetNeededForVTableField(
+ CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
+ return Vptr.NearestVBase != nullptr;
+}
+
+llvm::Value *MicrosoftCXXABI::getVTableAddressPointInStructor(
+ CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
+ const CXXRecordDecl *NearestVBase) {
+ llvm::Constant *VTableAddressPoint = getVTableAddressPoint(Base, VTableClass);
+ if (!VTableAddressPoint) {
+ assert(Base.getBase()->getNumVBases() &&
+ !getContext().getASTRecordLayout(Base.getBase()).hasOwnVFPtr());
+ }
+ return VTableAddressPoint;
+}
+
+static void mangleVFTableName(MicrosoftMangleContext &MangleContext,
+ const CXXRecordDecl *RD, const VPtrInfo *VFPtr,
+ SmallString<256> &Name) {
+ llvm::raw_svector_ostream Out(Name);
+ MangleContext.mangleCXXVFTable(RD, VFPtr->MangledPath, Out);
+}
+
+llvm::Constant *
+MicrosoftCXXABI::getVTableAddressPoint(BaseSubobject Base,
+ const CXXRecordDecl *VTableClass) {
+ (void)getAddrOfVTable(VTableClass, Base.getBaseOffset());
+ VFTableIdTy ID(VTableClass, Base.getBaseOffset());
+ return VFTablesMap[ID];
+}
+
+llvm::Constant *MicrosoftCXXABI::getVTableAddressPointForConstExpr(
+ BaseSubobject Base, const CXXRecordDecl *VTableClass) {
+ llvm::Constant *VFTable = getVTableAddressPoint(Base, VTableClass);
+ assert(VFTable && "Couldn't find a vftable for the given base?");
+ return VFTable;
+}
+
+llvm::GlobalVariable *MicrosoftCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
+ CharUnits VPtrOffset) {
+ // getAddrOfVTable may return 0 if asked to get an address of a vtable which
+ // shouldn't be used in the given record type. We want to cache this result in
+ // VFTablesMap, thus a simple zero check is not sufficient.
+
+ VFTableIdTy ID(RD, VPtrOffset);
+ VTablesMapTy::iterator I;
+ bool Inserted;
+ std::tie(I, Inserted) = VTablesMap.insert(std::make_pair(ID, nullptr));
+ if (!Inserted)
+ return I->second;
+
+ llvm::GlobalVariable *&VTable = I->second;
+
+ MicrosoftVTableContext &VTContext = CGM.getMicrosoftVTableContext();
+ const VPtrInfoVector &VFPtrs = VTContext.getVFPtrOffsets(RD);
+
+ if (DeferredVFTables.insert(RD).second) {
+ // We haven't processed this record type before.
+ // Queue up this v-table for possible deferred emission.
+ CGM.addDeferredVTable(RD);
+
+#ifndef NDEBUG
+ // Create all the vftables at once in order to make sure each vftable has
+ // a unique mangled name.
+ llvm::StringSet<> ObservedMangledNames;
+ for (size_t J = 0, F = VFPtrs.size(); J != F; ++J) {
+ SmallString<256> Name;
+ mangleVFTableName(getMangleContext(), RD, VFPtrs[J], Name);
+ if (!ObservedMangledNames.insert(Name.str()).second)
+ llvm_unreachable("Already saw this mangling before?");
+ }
+#endif
+ }
+
+ VPtrInfo *const *VFPtrI =
+ std::find_if(VFPtrs.begin(), VFPtrs.end(), [&](VPtrInfo *VPI) {
+ return VPI->FullOffsetInMDC == VPtrOffset;
+ });
+ if (VFPtrI == VFPtrs.end()) {
+ VFTablesMap[ID] = nullptr;
+ return nullptr;
+ }
+ VPtrInfo *VFPtr = *VFPtrI;
+
+ SmallString<256> VFTableName;
+ mangleVFTableName(getMangleContext(), RD, VFPtr, VFTableName);
+
+ llvm::GlobalValue::LinkageTypes VFTableLinkage = CGM.getVTableLinkage(RD);
+ bool VFTableComesFromAnotherTU =
+ llvm::GlobalValue::isAvailableExternallyLinkage(VFTableLinkage) ||
+ llvm::GlobalValue::isExternalLinkage(VFTableLinkage);
+ bool VTableAliasIsRequred =
+ !VFTableComesFromAnotherTU && getContext().getLangOpts().RTTIData;
+
+ if (llvm::GlobalValue *VFTable =
+ CGM.getModule().getNamedGlobal(VFTableName)) {
+ VFTablesMap[ID] = VFTable;
+ VTable = VTableAliasIsRequred
+ ? cast<llvm::GlobalVariable>(
+ cast<llvm::GlobalAlias>(VFTable)->getBaseObject())
+ : cast<llvm::GlobalVariable>(VFTable);
+ return VTable;
+ }
+
+ uint64_t NumVTableSlots =
+ VTContext.getVFTableLayout(RD, VFPtr->FullOffsetInMDC)
+ .getNumVTableComponents();
+ llvm::GlobalValue::LinkageTypes VTableLinkage =
+ VTableAliasIsRequred ? llvm::GlobalValue::PrivateLinkage : VFTableLinkage;
+
+ StringRef VTableName = VTableAliasIsRequred ? StringRef() : VFTableName.str();
+
+ llvm::ArrayType *VTableType =
+ llvm::ArrayType::get(CGM.Int8PtrTy, NumVTableSlots);
+
+ // Create a backing variable for the contents of VTable. The VTable may
+ // or may not include space for a pointer to RTTI data.
+ llvm::GlobalValue *VFTable;
+ VTable = new llvm::GlobalVariable(CGM.getModule(), VTableType,
+ /*isConstant=*/true, VTableLinkage,
+ /*Initializer=*/nullptr, VTableName);
+ VTable->setUnnamedAddr(true);
+
+ llvm::Comdat *C = nullptr;
+ if (!VFTableComesFromAnotherTU &&
+ (llvm::GlobalValue::isWeakForLinker(VFTableLinkage) ||
+ (llvm::GlobalValue::isLocalLinkage(VFTableLinkage) &&
+ VTableAliasIsRequred)))
+ C = CGM.getModule().getOrInsertComdat(VFTableName.str());
+
+ // Only insert a pointer into the VFTable for RTTI data if we are not
+ // importing it. We never reference the RTTI data directly so there is no
+ // need to make room for it.
+ if (VTableAliasIsRequred) {
+ llvm::Value *GEPIndices[] = {llvm::ConstantInt::get(CGM.IntTy, 0),
+ llvm::ConstantInt::get(CGM.IntTy, 1)};
+ // Create a GEP which points just after the first entry in the VFTable,
+ // this should be the location of the first virtual method.
+ llvm::Constant *VTableGEP = llvm::ConstantExpr::getInBoundsGetElementPtr(
+ VTable->getValueType(), VTable, GEPIndices);
+ if (llvm::GlobalValue::isWeakForLinker(VFTableLinkage)) {
+ VFTableLinkage = llvm::GlobalValue::ExternalLinkage;
+ if (C)
+ C->setSelectionKind(llvm::Comdat::Largest);
+ }
+ VFTable = llvm::GlobalAlias::create(CGM.Int8PtrTy,
+ /*AddressSpace=*/0, VFTableLinkage,
+ VFTableName.str(), VTableGEP,
+ &CGM.getModule());
+ VFTable->setUnnamedAddr(true);
+ } else {
+ // We don't need a GlobalAlias to be a symbol for the VTable if we won't
+ // be referencing any RTTI data.
+ // The GlobalVariable will end up being an appropriate definition of the
+ // VFTable.
+ VFTable = VTable;
+ }
+ if (C)
+ VTable->setComdat(C);
+
+ if (RD->hasAttr<DLLImportAttr>())
+ VFTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
+ else if (RD->hasAttr<DLLExportAttr>())
+ VFTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
+
+ VFTablesMap[ID] = VFTable;
+ return VTable;
+}
+
+// Compute the identity of the most derived class whose virtual table is located
+// at the given offset into RD.
+static const CXXRecordDecl *getClassAtVTableLocation(ASTContext &Ctx,
+ const CXXRecordDecl *RD,
+ CharUnits Offset) {
+ if (Offset.isZero())
+ return RD;
+
+ const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
+ const CXXRecordDecl *MaxBase = nullptr;
+ CharUnits MaxBaseOffset;
+ for (auto &&B : RD->bases()) {
+ const CXXRecordDecl *Base = B.getType()->getAsCXXRecordDecl();
+ CharUnits BaseOffset = Layout.getBaseClassOffset(Base);
+ if (BaseOffset <= Offset && BaseOffset >= MaxBaseOffset) {
+ MaxBase = Base;
+ MaxBaseOffset = BaseOffset;
+ }
+ }
+ for (auto &&B : RD->vbases()) {
+ const CXXRecordDecl *Base = B.getType()->getAsCXXRecordDecl();
+ CharUnits BaseOffset = Layout.getVBaseClassOffset(Base);
+ if (BaseOffset <= Offset && BaseOffset >= MaxBaseOffset) {
+ MaxBase = Base;
+ MaxBaseOffset = BaseOffset;
+ }
+ }
+ assert(MaxBase);
+ return getClassAtVTableLocation(Ctx, MaxBase, Offset - MaxBaseOffset);
+}
+
+// Compute the identity of the most derived class whose virtual table is located
+// at the MethodVFTableLocation ML.
+static const CXXRecordDecl *
+getClassAtVTableLocation(ASTContext &Ctx, GlobalDecl GD,
+ MicrosoftVTableContext::MethodVFTableLocation &ML) {
+ const CXXRecordDecl *RD = ML.VBase;
+ if (!RD)
+ RD = cast<CXXMethodDecl>(GD.getDecl())->getParent();
+
+ return getClassAtVTableLocation(Ctx, RD, ML.VFPtrOffset);
+}
+
+llvm::Value *MicrosoftCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
+ GlobalDecl GD,
+ Address This,
+ llvm::Type *Ty,
+ SourceLocation Loc) {
+ GD = GD.getCanonicalDecl();
+ CGBuilderTy &Builder = CGF.Builder;
+
+ Ty = Ty->getPointerTo()->getPointerTo();
+ Address VPtr =
+ adjustThisArgumentForVirtualFunctionCall(CGF, GD, This, true);
+
+ auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
+ llvm::Value *VTable = CGF.GetVTablePtr(VPtr, Ty, MethodDecl->getParent());
+
+ MicrosoftVTableContext::MethodVFTableLocation ML =
+ CGM.getMicrosoftVTableContext().getMethodVFTableLocation(GD);
+ if (CGF.SanOpts.has(SanitizerKind::CFIVCall))
+ CGF.EmitVTablePtrCheck(getClassAtVTableLocation(getContext(), GD, ML),
+ VTable, CodeGenFunction::CFITCK_VCall, Loc);
+
+ llvm::Value *VFuncPtr =
+ Builder.CreateConstInBoundsGEP1_64(VTable, ML.Index, "vfn");
+ return Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
+}
+
+llvm::Value *MicrosoftCXXABI::EmitVirtualDestructorCall(
+ CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
+ Address This, const CXXMemberCallExpr *CE) {
+ assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
+ assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
+
+ // We have only one destructor in the vftable but can get both behaviors
+ // by passing an implicit int parameter.
+ GlobalDecl GD(Dtor, Dtor_Deleting);
+ const CGFunctionInfo *FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
+ Dtor, StructorType::Deleting);
+ llvm::Type *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
+ llvm::Value *Callee = getVirtualFunctionPointer(
+ CGF, GD, This, Ty, CE ? CE->getLocStart() : SourceLocation());
+
+ ASTContext &Context = getContext();
+ llvm::Value *ImplicitParam = llvm::ConstantInt::get(
+ llvm::IntegerType::getInt32Ty(CGF.getLLVMContext()),
+ DtorType == Dtor_Deleting);
+
+ This = adjustThisArgumentForVirtualFunctionCall(CGF, GD, This, true);
+ RValue RV = CGF.EmitCXXStructorCall(Dtor, Callee, ReturnValueSlot(),
+ This.getPointer(),
+ ImplicitParam, Context.IntTy, CE,
+ StructorType::Deleting);
+ return RV.getScalarVal();
+}
+
+const VBTableGlobals &
+MicrosoftCXXABI::enumerateVBTables(const CXXRecordDecl *RD) {
+ // At this layer, we can key the cache off of a single class, which is much
+ // easier than caching each vbtable individually.
+ llvm::DenseMap<const CXXRecordDecl*, VBTableGlobals>::iterator Entry;
+ bool Added;
+ std::tie(Entry, Added) =
+ VBTablesMap.insert(std::make_pair(RD, VBTableGlobals()));
+ VBTableGlobals &VBGlobals = Entry->second;
+ if (!Added)
+ return VBGlobals;
+
+ MicrosoftVTableContext &Context = CGM.getMicrosoftVTableContext();
+ VBGlobals.VBTables = &Context.enumerateVBTables(RD);
+
+ // Cache the globals for all vbtables so we don't have to recompute the
+ // mangled names.
+ llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
+ for (VPtrInfoVector::const_iterator I = VBGlobals.VBTables->begin(),
+ E = VBGlobals.VBTables->end();
+ I != E; ++I) {
+ VBGlobals.Globals.push_back(getAddrOfVBTable(**I, RD, Linkage));
+ }
+
+ return VBGlobals;
+}
+
+llvm::Function *MicrosoftCXXABI::EmitVirtualMemPtrThunk(
+ const CXXMethodDecl *MD,
+ const MicrosoftVTableContext::MethodVFTableLocation &ML) {
+ assert(!isa<CXXConstructorDecl>(MD) && !isa<CXXDestructorDecl>(MD) &&
+ "can't form pointers to ctors or virtual dtors");
+
+ // Calculate the mangled name.
+ SmallString<256> ThunkName;
+ llvm::raw_svector_ostream Out(ThunkName);
+ getMangleContext().mangleVirtualMemPtrThunk(MD, Out);
+
+ // If the thunk has been generated previously, just return it.
+ if (llvm::GlobalValue *GV = CGM.getModule().getNamedValue(ThunkName))
+ return cast<llvm::Function>(GV);
+
+ // Create the llvm::Function.
+ const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeMSMemberPointerThunk(MD);
+ llvm::FunctionType *ThunkTy = CGM.getTypes().GetFunctionType(FnInfo);
+ llvm::Function *ThunkFn =
+ llvm::Function::Create(ThunkTy, llvm::Function::ExternalLinkage,
+ ThunkName.str(), &CGM.getModule());
+ assert(ThunkFn->getName() == ThunkName && "name was uniqued!");
+
+ ThunkFn->setLinkage(MD->isExternallyVisible()
+ ? llvm::GlobalValue::LinkOnceODRLinkage
+ : llvm::GlobalValue::InternalLinkage);
+ if (MD->isExternallyVisible())
+ ThunkFn->setComdat(CGM.getModule().getOrInsertComdat(ThunkFn->getName()));
+
+ CGM.SetLLVMFunctionAttributes(MD, FnInfo, ThunkFn);
+ CGM.SetLLVMFunctionAttributesForDefinition(MD, ThunkFn);
+
+ // Add the "thunk" attribute so that LLVM knows that the return type is
+ // meaningless. These thunks can be used to call functions with differing
+ // return types, and the caller is required to cast the prototype
+ // appropriately to extract the correct value.
+ ThunkFn->addFnAttr("thunk");
+
+ // These thunks can be compared, so they are not unnamed.
+ ThunkFn->setUnnamedAddr(false);
+
+ // Start codegen.
+ CodeGenFunction CGF(CGM);
+ CGF.CurGD = GlobalDecl(MD);
+ CGF.CurFuncIsThunk = true;
+
+ // Build FunctionArgs, but only include the implicit 'this' parameter
+ // declaration.
+ FunctionArgList FunctionArgs;
+ buildThisParam(CGF, FunctionArgs);
+
+ // Start defining the function.
+ CGF.StartFunction(GlobalDecl(), FnInfo.getReturnType(), ThunkFn, FnInfo,
+ FunctionArgs, MD->getLocation(), SourceLocation());
+ EmitThisParam(CGF);
+
+ // Load the vfptr and then callee from the vftable. The callee should have
+ // adjusted 'this' so that the vfptr is at offset zero.
+ llvm::Value *VTable = CGF.GetVTablePtr(
+ getThisAddress(CGF), ThunkTy->getPointerTo()->getPointerTo(), MD->getParent());
+
+ llvm::Value *VFuncPtr =
+ CGF.Builder.CreateConstInBoundsGEP1_64(VTable, ML.Index, "vfn");
+ llvm::Value *Callee =
+ CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
+
+ CGF.EmitMustTailThunk(MD, getThisValue(CGF), Callee);
+
+ return ThunkFn;
+}
+
+void MicrosoftCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
+ const VBTableGlobals &VBGlobals = enumerateVBTables(RD);
+ for (unsigned I = 0, E = VBGlobals.VBTables->size(); I != E; ++I) {
+ const VPtrInfo *VBT = (*VBGlobals.VBTables)[I];
+ llvm::GlobalVariable *GV = VBGlobals.Globals[I];
+ if (GV->isDeclaration())
+ emitVBTableDefinition(*VBT, RD, GV);
+ }
+}
+
+llvm::GlobalVariable *
+MicrosoftCXXABI::getAddrOfVBTable(const VPtrInfo &VBT, const CXXRecordDecl *RD,
+ llvm::GlobalVariable::LinkageTypes Linkage) {
+ SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ getMangleContext().mangleCXXVBTable(RD, VBT.MangledPath, Out);
+ StringRef Name = OutName.str();
+
+ llvm::ArrayType *VBTableType =
+ llvm::ArrayType::get(CGM.IntTy, 1 + VBT.ReusingBase->getNumVBases());
+
+ assert(!CGM.getModule().getNamedGlobal(Name) &&
+ "vbtable with this name already exists: mangling bug?");
+ llvm::GlobalVariable *GV =
+ CGM.CreateOrReplaceCXXRuntimeVariable(Name, VBTableType, Linkage);
+ GV->setUnnamedAddr(true);
+
+ if (RD->hasAttr<DLLImportAttr>())
+ GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
+ else if (RD->hasAttr<DLLExportAttr>())
+ GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
+
+ if (!GV->hasExternalLinkage())
+ emitVBTableDefinition(VBT, RD, GV);
+
+ return GV;
+}
+
+void MicrosoftCXXABI::emitVBTableDefinition(const VPtrInfo &VBT,
+ const CXXRecordDecl *RD,
+ llvm::GlobalVariable *GV) const {
+ const CXXRecordDecl *ReusingBase = VBT.ReusingBase;
+
+ assert(RD->getNumVBases() && ReusingBase->getNumVBases() &&
+ "should only emit vbtables for classes with vbtables");
+
+ const ASTRecordLayout &BaseLayout =
+ getContext().getASTRecordLayout(VBT.BaseWithVPtr);
+ const ASTRecordLayout &DerivedLayout = getContext().getASTRecordLayout(RD);
+
+ SmallVector<llvm::Constant *, 4> Offsets(1 + ReusingBase->getNumVBases(),
+ nullptr);
+
+ // The offset from ReusingBase's vbptr to itself always leads.
+ CharUnits VBPtrOffset = BaseLayout.getVBPtrOffset();
+ Offsets[0] = llvm::ConstantInt::get(CGM.IntTy, -VBPtrOffset.getQuantity());
+
+ MicrosoftVTableContext &Context = CGM.getMicrosoftVTableContext();
+ for (const auto &I : ReusingBase->vbases()) {
+ const CXXRecordDecl *VBase = I.getType()->getAsCXXRecordDecl();
+ CharUnits Offset = DerivedLayout.getVBaseClassOffset(VBase);
+ assert(!Offset.isNegative());
+
+ // Make it relative to the subobject vbptr.
+ CharUnits CompleteVBPtrOffset = VBT.NonVirtualOffset + VBPtrOffset;
+ if (VBT.getVBaseWithVPtr())
+ CompleteVBPtrOffset +=
+ DerivedLayout.getVBaseClassOffset(VBT.getVBaseWithVPtr());
+ Offset -= CompleteVBPtrOffset;
+
+ unsigned VBIndex = Context.getVBTableIndex(ReusingBase, VBase);
+ assert(Offsets[VBIndex] == nullptr && "The same vbindex seen twice?");
+ Offsets[VBIndex] = llvm::ConstantInt::get(CGM.IntTy, Offset.getQuantity());
+ }
+
+ assert(Offsets.size() ==
+ cast<llvm::ArrayType>(cast<llvm::PointerType>(GV->getType())
+ ->getElementType())->getNumElements());
+ llvm::ArrayType *VBTableType =
+ llvm::ArrayType::get(CGM.IntTy, Offsets.size());
+ llvm::Constant *Init = llvm::ConstantArray::get(VBTableType, Offsets);
+ GV->setInitializer(Init);
+}
+
+llvm::Value *MicrosoftCXXABI::performThisAdjustment(CodeGenFunction &CGF,
+ Address This,
+ const ThisAdjustment &TA) {
+ if (TA.isEmpty())
+ return This.getPointer();
+
+ This = CGF.Builder.CreateElementBitCast(This, CGF.Int8Ty);
+
+ llvm::Value *V;
+ if (TA.Virtual.isEmpty()) {
+ V = This.getPointer();
+ } else {
+ assert(TA.Virtual.Microsoft.VtordispOffset < 0);
+ // Adjust the this argument based on the vtordisp value.
+ Address VtorDispPtr =
+ CGF.Builder.CreateConstInBoundsByteGEP(This,
+ CharUnits::fromQuantity(TA.Virtual.Microsoft.VtordispOffset));
+ VtorDispPtr = CGF.Builder.CreateElementBitCast(VtorDispPtr, CGF.Int32Ty);
+ llvm::Value *VtorDisp = CGF.Builder.CreateLoad(VtorDispPtr, "vtordisp");
+ V = CGF.Builder.CreateGEP(This.getPointer(),
+ CGF.Builder.CreateNeg(VtorDisp));
+
+ // Unfortunately, having applied the vtordisp means that we no
+ // longer really have a known alignment for the vbptr step.
+ // We'll assume the vbptr is pointer-aligned.
+
+ if (TA.Virtual.Microsoft.VBPtrOffset) {
+ // If the final overrider is defined in a virtual base other than the one
+ // that holds the vfptr, we have to use a vtordispex thunk which looks up
+ // the vbtable of the derived class.
+ assert(TA.Virtual.Microsoft.VBPtrOffset > 0);
+ assert(TA.Virtual.Microsoft.VBOffsetOffset >= 0);
+ llvm::Value *VBPtr;
+ llvm::Value *VBaseOffset =
+ GetVBaseOffsetFromVBPtr(CGF, Address(V, CGF.getPointerAlign()),
+ -TA.Virtual.Microsoft.VBPtrOffset,
+ TA.Virtual.Microsoft.VBOffsetOffset, &VBPtr);
+ V = CGF.Builder.CreateInBoundsGEP(VBPtr, VBaseOffset);
+ }
+ }
+
+ if (TA.NonVirtual) {
+ // Non-virtual adjustment might result in a pointer outside the allocated
+ // object, e.g. if the final overrider class is laid out after the virtual
+ // base that declares a method in the most derived class.
+ V = CGF.Builder.CreateConstGEP1_32(V, TA.NonVirtual);
+ }
+
+ // Don't need to bitcast back, the call CodeGen will handle this.
+ return V;
+}
+
+llvm::Value *
+MicrosoftCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
+ const ReturnAdjustment &RA) {
+ if (RA.isEmpty())
+ return Ret.getPointer();
+
+ auto OrigTy = Ret.getType();
+ Ret = CGF.Builder.CreateElementBitCast(Ret, CGF.Int8Ty);
+
+ llvm::Value *V = Ret.getPointer();
+ if (RA.Virtual.Microsoft.VBIndex) {
+ assert(RA.Virtual.Microsoft.VBIndex > 0);
+ int32_t IntSize = CGF.getIntSize().getQuantity();
+ llvm::Value *VBPtr;
+ llvm::Value *VBaseOffset =
+ GetVBaseOffsetFromVBPtr(CGF, Ret, RA.Virtual.Microsoft.VBPtrOffset,
+ IntSize * RA.Virtual.Microsoft.VBIndex, &VBPtr);
+ V = CGF.Builder.CreateInBoundsGEP(VBPtr, VBaseOffset);
+ }
+
+ if (RA.NonVirtual)
+ V = CGF.Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, V, RA.NonVirtual);
+
+ // Cast back to the original type.
+ return CGF.Builder.CreateBitCast(V, OrigTy);
+}
+
+bool MicrosoftCXXABI::requiresArrayCookie(const CXXDeleteExpr *expr,
+ QualType elementType) {
+ // Microsoft seems to completely ignore the possibility of a
+ // two-argument usual deallocation function.
+ return elementType.isDestructedType();
+}
+
+bool MicrosoftCXXABI::requiresArrayCookie(const CXXNewExpr *expr) {
+ // Microsoft seems to completely ignore the possibility of a
+ // two-argument usual deallocation function.
+ return expr->getAllocatedType().isDestructedType();
+}
+
+CharUnits MicrosoftCXXABI::getArrayCookieSizeImpl(QualType type) {
+ // The array cookie is always a size_t; we then pad that out to the
+ // alignment of the element type.
+ ASTContext &Ctx = getContext();
+ return std::max(Ctx.getTypeSizeInChars(Ctx.getSizeType()),
+ Ctx.getTypeAlignInChars(type));
+}
+
+llvm::Value *MicrosoftCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
+ Address allocPtr,
+ CharUnits cookieSize) {
+ Address numElementsPtr =
+ CGF.Builder.CreateElementBitCast(allocPtr, CGF.SizeTy);
+ return CGF.Builder.CreateLoad(numElementsPtr);
+}
+
+Address MicrosoftCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
+ Address newPtr,
+ llvm::Value *numElements,
+ const CXXNewExpr *expr,
+ QualType elementType) {
+ assert(requiresArrayCookie(expr));
+
+ // The size of the cookie.
+ CharUnits cookieSize = getArrayCookieSizeImpl(elementType);
+
+ // Compute an offset to the cookie.
+ Address cookiePtr = newPtr;
+
+ // Write the number of elements into the appropriate slot.
+ Address numElementsPtr
+ = CGF.Builder.CreateElementBitCast(cookiePtr, CGF.SizeTy);
+ CGF.Builder.CreateStore(numElements, numElementsPtr);
+
+ // Finally, compute a pointer to the actual data buffer by skipping
+ // over the cookie completely.
+ return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
+}
+
+static void emitGlobalDtorWithTLRegDtor(CodeGenFunction &CGF, const VarDecl &VD,
+ llvm::Constant *Dtor,
+ llvm::Constant *Addr) {
+ // Create a function which calls the destructor.
+ llvm::Constant *DtorStub = CGF.createAtExitStub(VD, Dtor, Addr);
+
+ // extern "C" int __tlregdtor(void (*f)(void));
+ llvm::FunctionType *TLRegDtorTy = llvm::FunctionType::get(
+ CGF.IntTy, DtorStub->getType(), /*IsVarArg=*/false);
+
+ llvm::Constant *TLRegDtor =
+ CGF.CGM.CreateRuntimeFunction(TLRegDtorTy, "__tlregdtor");
+ if (llvm::Function *TLRegDtorFn = dyn_cast<llvm::Function>(TLRegDtor))
+ TLRegDtorFn->setDoesNotThrow();
+
+ CGF.EmitNounwindRuntimeCall(TLRegDtor, DtorStub);
+}
+
+void MicrosoftCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::Constant *Dtor,
+ llvm::Constant *Addr) {
+ if (D.getTLSKind())
+ return emitGlobalDtorWithTLRegDtor(CGF, D, Dtor, Addr);
+
+ // The default behavior is to use atexit.
+ CGF.registerGlobalDtorWithAtExit(D, Dtor, Addr);
+}
+
+void MicrosoftCXXABI::EmitThreadLocalInitFuncs(
+ CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
+ ArrayRef<llvm::Function *> CXXThreadLocalInits,
+ ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
+ // This will create a GV in the .CRT$XDU section. It will point to our
+ // initialization function. The CRT will call all of these function
+ // pointers at start-up time and, eventually, at thread-creation time.
+ auto AddToXDU = [&CGM](llvm::Function *InitFunc) {
+ llvm::GlobalVariable *InitFuncPtr = new llvm::GlobalVariable(
+ CGM.getModule(), InitFunc->getType(), /*IsConstant=*/true,
+ llvm::GlobalVariable::InternalLinkage, InitFunc,
+ Twine(InitFunc->getName(), "$initializer$"));
+ InitFuncPtr->setSection(".CRT$XDU");
+ // This variable has discardable linkage, we have to add it to @llvm.used to
+ // ensure it won't get discarded.
+ CGM.addUsedGlobal(InitFuncPtr);
+ return InitFuncPtr;
+ };
+
+ std::vector<llvm::Function *> NonComdatInits;
+ for (size_t I = 0, E = CXXThreadLocalInitVars.size(); I != E; ++I) {
+ llvm::GlobalVariable *GV = cast<llvm::GlobalVariable>(
+ CGM.GetGlobalValue(CGM.getMangledName(CXXThreadLocalInitVars[I])));
+ llvm::Function *F = CXXThreadLocalInits[I];
+
+ // If the GV is already in a comdat group, then we have to join it.
+ if (llvm::Comdat *C = GV->getComdat())
+ AddToXDU(F)->setComdat(C);
+ else
+ NonComdatInits.push_back(F);
+ }
+
+ if (!NonComdatInits.empty()) {
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
+ llvm::Function *InitFunc = CGM.CreateGlobalInitOrDestructFunction(
+ FTy, "__tls_init", CGM.getTypes().arrangeNullaryFunction(),
+ SourceLocation(), /*TLS=*/true);
+ CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(InitFunc, NonComdatInits);
+
+ AddToXDU(InitFunc);
+ }
+}
+
+LValue MicrosoftCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
+ const VarDecl *VD,
+ QualType LValType) {
+ CGF.CGM.ErrorUnsupported(VD, "thread wrappers");
+ return LValue();
+}
+
+static ConstantAddress getInitThreadEpochPtr(CodeGenModule &CGM) {
+ StringRef VarName("_Init_thread_epoch");
+ CharUnits Align = CGM.getIntAlign();
+ if (auto *GV = CGM.getModule().getNamedGlobal(VarName))
+ return ConstantAddress(GV, Align);
+ auto *GV = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.IntTy,
+ /*Constant=*/false, llvm::GlobalVariable::ExternalLinkage,
+ /*Initializer=*/nullptr, VarName,
+ /*InsertBefore=*/nullptr, llvm::GlobalVariable::GeneralDynamicTLSModel);
+ GV->setAlignment(Align.getQuantity());
+ return ConstantAddress(GV, Align);
+}
+
+static llvm::Constant *getInitThreadHeaderFn(CodeGenModule &CGM) {
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
+ CGM.IntTy->getPointerTo(), /*isVarArg=*/false);
+ return CGM.CreateRuntimeFunction(
+ FTy, "_Init_thread_header",
+ llvm::AttributeSet::get(CGM.getLLVMContext(),
+ llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::NoUnwind));
+}
+
+static llvm::Constant *getInitThreadFooterFn(CodeGenModule &CGM) {
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
+ CGM.IntTy->getPointerTo(), /*isVarArg=*/false);
+ return CGM.CreateRuntimeFunction(
+ FTy, "_Init_thread_footer",
+ llvm::AttributeSet::get(CGM.getLLVMContext(),
+ llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::NoUnwind));
+}
+
+static llvm::Constant *getInitThreadAbortFn(CodeGenModule &CGM) {
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
+ CGM.IntTy->getPointerTo(), /*isVarArg=*/false);
+ return CGM.CreateRuntimeFunction(
+ FTy, "_Init_thread_abort",
+ llvm::AttributeSet::get(CGM.getLLVMContext(),
+ llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::NoUnwind));
+}
+
+namespace {
+struct ResetGuardBit final : EHScopeStack::Cleanup {
+ Address Guard;
+ unsigned GuardNum;
+ ResetGuardBit(Address Guard, unsigned GuardNum)
+ : Guard(Guard), GuardNum(GuardNum) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ // Reset the bit in the mask so that the static variable may be
+ // reinitialized.
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::LoadInst *LI = Builder.CreateLoad(Guard);
+ llvm::ConstantInt *Mask =
+ llvm::ConstantInt::get(CGF.IntTy, ~(1U << GuardNum));
+ Builder.CreateStore(Builder.CreateAnd(LI, Mask), Guard);
+ }
+};
+
+struct CallInitThreadAbort final : EHScopeStack::Cleanup {
+ llvm::Value *Guard;
+ CallInitThreadAbort(Address Guard) : Guard(Guard.getPointer()) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ // Calling _Init_thread_abort will reset the guard's state.
+ CGF.EmitNounwindRuntimeCall(getInitThreadAbortFn(CGF.CGM), Guard);
+ }
+};
+}
+
+void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::GlobalVariable *GV,
+ bool PerformInit) {
+ // MSVC only uses guards for static locals.
+ if (!D.isStaticLocal()) {
+ assert(GV->hasWeakLinkage() || GV->hasLinkOnceLinkage());
+ // GlobalOpt is allowed to discard the initializer, so use linkonce_odr.
+ llvm::Function *F = CGF.CurFn;
+ F->setLinkage(llvm::GlobalValue::LinkOnceODRLinkage);
+ F->setComdat(CGM.getModule().getOrInsertComdat(F->getName()));
+ CGF.EmitCXXGlobalVarDeclInit(D, GV, PerformInit);
+ return;
+ }
+
+ bool ThreadlocalStatic = D.getTLSKind();
+ bool ThreadsafeStatic = getContext().getLangOpts().ThreadsafeStatics;
+
+ // Thread-safe static variables which aren't thread-specific have a
+ // per-variable guard.
+ bool HasPerVariableGuard = ThreadsafeStatic && !ThreadlocalStatic;
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::IntegerType *GuardTy = CGF.Int32Ty;
+ llvm::ConstantInt *Zero = llvm::ConstantInt::get(GuardTy, 0);
+ CharUnits GuardAlign = CharUnits::fromQuantity(4);
+
+ // Get the guard variable for this function if we have one already.
+ GuardInfo *GI = nullptr;
+ if (ThreadlocalStatic)
+ GI = &ThreadLocalGuardVariableMap[D.getDeclContext()];
+ else if (!ThreadsafeStatic)
+ GI = &GuardVariableMap[D.getDeclContext()];
+
+ llvm::GlobalVariable *GuardVar = GI ? GI->Guard : nullptr;
+ unsigned GuardNum;
+ if (D.isExternallyVisible()) {
+ // Externally visible variables have to be numbered in Sema to properly
+ // handle unreachable VarDecls.
+ GuardNum = getContext().getStaticLocalNumber(&D);
+ assert(GuardNum > 0);
+ GuardNum--;
+ } else if (HasPerVariableGuard) {
+ GuardNum = ThreadSafeGuardNumMap[D.getDeclContext()]++;
+ } else {
+ // Non-externally visible variables are numbered here in CodeGen.
+ GuardNum = GI->BitIndex++;
+ }
+
+ if (!HasPerVariableGuard && GuardNum >= 32) {
+ if (D.isExternallyVisible())
+ ErrorUnsupportedABI(CGF, "more than 32 guarded initializations");
+ GuardNum %= 32;
+ GuardVar = nullptr;
+ }
+
+ if (!GuardVar) {
+ // Mangle the name for the guard.
+ SmallString<256> GuardName;
+ {
+ llvm::raw_svector_ostream Out(GuardName);
+ if (HasPerVariableGuard)
+ getMangleContext().mangleThreadSafeStaticGuardVariable(&D, GuardNum,
+ Out);
+ else
+ getMangleContext().mangleStaticGuardVariable(&D, Out);
+ }
+
+ // Create the guard variable with a zero-initializer. Just absorb linkage,
+ // visibility and dll storage class from the guarded variable.
+ GuardVar =
+ new llvm::GlobalVariable(CGM.getModule(), GuardTy, /*isConstant=*/false,
+ GV->getLinkage(), Zero, GuardName.str());
+ GuardVar->setVisibility(GV->getVisibility());
+ GuardVar->setDLLStorageClass(GV->getDLLStorageClass());
+ GuardVar->setAlignment(GuardAlign.getQuantity());
+ if (GuardVar->isWeakForLinker())
+ GuardVar->setComdat(
+ CGM.getModule().getOrInsertComdat(GuardVar->getName()));
+ if (D.getTLSKind())
+ GuardVar->setThreadLocal(true);
+ if (GI && !HasPerVariableGuard)
+ GI->Guard = GuardVar;
+ }
+
+ ConstantAddress GuardAddr(GuardVar, GuardAlign);
+
+ assert(GuardVar->getLinkage() == GV->getLinkage() &&
+ "static local from the same function had different linkage");
+
+ if (!HasPerVariableGuard) {
+ // Pseudo code for the test:
+ // if (!(GuardVar & MyGuardBit)) {
+ // GuardVar |= MyGuardBit;
+ // ... initialize the object ...;
+ // }
+
+ // Test our bit from the guard variable.
+ llvm::ConstantInt *Bit = llvm::ConstantInt::get(GuardTy, 1U << GuardNum);
+ llvm::LoadInst *LI = Builder.CreateLoad(GuardAddr);
+ llvm::Value *IsInitialized =
+ Builder.CreateICmpNE(Builder.CreateAnd(LI, Bit), Zero);
+ llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
+ llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
+ Builder.CreateCondBr(IsInitialized, EndBlock, InitBlock);
+
+ // Set our bit in the guard variable and emit the initializer and add a global
+ // destructor if appropriate.
+ CGF.EmitBlock(InitBlock);
+ Builder.CreateStore(Builder.CreateOr(LI, Bit), GuardAddr);
+ CGF.EHStack.pushCleanup<ResetGuardBit>(EHCleanup, GuardAddr, GuardNum);
+ CGF.EmitCXXGlobalVarDeclInit(D, GV, PerformInit);
+ CGF.PopCleanupBlock();
+ Builder.CreateBr(EndBlock);
+
+ // Continue.
+ CGF.EmitBlock(EndBlock);
+ } else {
+ // Pseudo code for the test:
+ // if (TSS > _Init_thread_epoch) {
+ // _Init_thread_header(&TSS);
+ // if (TSS == -1) {
+ // ... initialize the object ...;
+ // _Init_thread_footer(&TSS);
+ // }
+ // }
+ //
+ // The algorithm is almost identical to what can be found in the appendix
+ // found in N2325.
+
+ // This BasicBLock determines whether or not we have any work to do.
+ llvm::LoadInst *FirstGuardLoad = Builder.CreateLoad(GuardAddr);
+ FirstGuardLoad->setOrdering(llvm::AtomicOrdering::Unordered);
+ llvm::LoadInst *InitThreadEpoch =
+ Builder.CreateLoad(getInitThreadEpochPtr(CGM));
+ llvm::Value *IsUninitialized =
+ Builder.CreateICmpSGT(FirstGuardLoad, InitThreadEpoch);
+ llvm::BasicBlock *AttemptInitBlock = CGF.createBasicBlock("init.attempt");
+ llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
+ Builder.CreateCondBr(IsUninitialized, AttemptInitBlock, EndBlock);
+
+ // This BasicBlock attempts to determine whether or not this thread is
+ // responsible for doing the initialization.
+ CGF.EmitBlock(AttemptInitBlock);
+ CGF.EmitNounwindRuntimeCall(getInitThreadHeaderFn(CGM),
+ GuardAddr.getPointer());
+ llvm::LoadInst *SecondGuardLoad = Builder.CreateLoad(GuardAddr);
+ SecondGuardLoad->setOrdering(llvm::AtomicOrdering::Unordered);
+ llvm::Value *ShouldDoInit =
+ Builder.CreateICmpEQ(SecondGuardLoad, getAllOnesInt());
+ llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
+ Builder.CreateCondBr(ShouldDoInit, InitBlock, EndBlock);
+
+ // Ok, we ended up getting selected as the initializing thread.
+ CGF.EmitBlock(InitBlock);
+ CGF.EHStack.pushCleanup<CallInitThreadAbort>(EHCleanup, GuardAddr);
+ CGF.EmitCXXGlobalVarDeclInit(D, GV, PerformInit);
+ CGF.PopCleanupBlock();
+ CGF.EmitNounwindRuntimeCall(getInitThreadFooterFn(CGM),
+ GuardAddr.getPointer());
+ Builder.CreateBr(EndBlock);
+
+ CGF.EmitBlock(EndBlock);
+ }
+}
+
+bool MicrosoftCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
+ // Null-ness for function memptrs only depends on the first field, which is
+ // the function pointer. The rest don't matter, so we can zero initialize.
+ if (MPT->isMemberFunctionPointer())
+ return true;
+
+ // The virtual base adjustment field is always -1 for null, so if we have one
+ // we can't zero initialize. The field offset is sometimes also -1 if 0 is a
+ // valid field offset.
+ const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
+ MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel();
+ return (!MSInheritanceAttr::hasVBTableOffsetField(Inheritance) &&
+ RD->nullFieldOffsetIsZero());
+}
+
+llvm::Type *
+MicrosoftCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
+ const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
+ MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel();
+ llvm::SmallVector<llvm::Type *, 4> fields;
+ if (MPT->isMemberFunctionPointer())
+ fields.push_back(CGM.VoidPtrTy); // FunctionPointerOrVirtualThunk
+ else
+ fields.push_back(CGM.IntTy); // FieldOffset
+
+ if (MSInheritanceAttr::hasNVOffsetField(MPT->isMemberFunctionPointer(),
+ Inheritance))
+ fields.push_back(CGM.IntTy);
+ if (MSInheritanceAttr::hasVBPtrOffsetField(Inheritance))
+ fields.push_back(CGM.IntTy);
+ if (MSInheritanceAttr::hasVBTableOffsetField(Inheritance))
+ fields.push_back(CGM.IntTy); // VirtualBaseAdjustmentOffset
+
+ if (fields.size() == 1)
+ return fields[0];
+ return llvm::StructType::get(CGM.getLLVMContext(), fields);
+}
+
+void MicrosoftCXXABI::
+GetNullMemberPointerFields(const MemberPointerType *MPT,
+ llvm::SmallVectorImpl<llvm::Constant *> &fields) {
+ assert(fields.empty());
+ const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
+ MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel();
+ if (MPT->isMemberFunctionPointer()) {
+ // FunctionPointerOrVirtualThunk
+ fields.push_back(llvm::Constant::getNullValue(CGM.VoidPtrTy));
+ } else {
+ if (RD->nullFieldOffsetIsZero())
+ fields.push_back(getZeroInt()); // FieldOffset
+ else
+ fields.push_back(getAllOnesInt()); // FieldOffset
+ }
+
+ if (MSInheritanceAttr::hasNVOffsetField(MPT->isMemberFunctionPointer(),
+ Inheritance))
+ fields.push_back(getZeroInt());
+ if (MSInheritanceAttr::hasVBPtrOffsetField(Inheritance))
+ fields.push_back(getZeroInt());
+ if (MSInheritanceAttr::hasVBTableOffsetField(Inheritance))
+ fields.push_back(getAllOnesInt());
+}
+
+llvm::Constant *
+MicrosoftCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
+ llvm::SmallVector<llvm::Constant *, 4> fields;
+ GetNullMemberPointerFields(MPT, fields);
+ if (fields.size() == 1)
+ return fields[0];
+ llvm::Constant *Res = llvm::ConstantStruct::getAnon(fields);
+ assert(Res->getType() == ConvertMemberPointerType(MPT));
+ return Res;
+}
+
+llvm::Constant *
+MicrosoftCXXABI::EmitFullMemberPointer(llvm::Constant *FirstField,
+ bool IsMemberFunction,
+ const CXXRecordDecl *RD,
+ CharUnits NonVirtualBaseAdjustment,
+ unsigned VBTableIndex) {
+ MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel();
+
+ // Single inheritance class member pointer are represented as scalars instead
+ // of aggregates.
+ if (MSInheritanceAttr::hasOnlyOneField(IsMemberFunction, Inheritance))
+ return FirstField;
+
+ llvm::SmallVector<llvm::Constant *, 4> fields;
+ fields.push_back(FirstField);
+
+ if (MSInheritanceAttr::hasNVOffsetField(IsMemberFunction, Inheritance))
+ fields.push_back(llvm::ConstantInt::get(
+ CGM.IntTy, NonVirtualBaseAdjustment.getQuantity()));
+
+ if (MSInheritanceAttr::hasVBPtrOffsetField(Inheritance)) {
+ CharUnits Offs = CharUnits::Zero();
+ if (VBTableIndex)
+ Offs = getContext().getASTRecordLayout(RD).getVBPtrOffset();
+ fields.push_back(llvm::ConstantInt::get(CGM.IntTy, Offs.getQuantity()));
+ }
+
+ // The rest of the fields are adjusted by conversions to a more derived class.
+ if (MSInheritanceAttr::hasVBTableOffsetField(Inheritance))
+ fields.push_back(llvm::ConstantInt::get(CGM.IntTy, VBTableIndex));
+
+ return llvm::ConstantStruct::getAnon(fields);
+}
+
+llvm::Constant *
+MicrosoftCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
+ CharUnits offset) {
+ const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
+ if (RD->getMSInheritanceModel() ==
+ MSInheritanceAttr::Keyword_virtual_inheritance)
+ offset -= getContext().getOffsetOfBaseWithVBPtr(RD);
+ llvm::Constant *FirstField =
+ llvm::ConstantInt::get(CGM.IntTy, offset.getQuantity());
+ return EmitFullMemberPointer(FirstField, /*IsMemberFunction=*/false, RD,
+ CharUnits::Zero(), /*VBTableIndex=*/0);
+}
+
+llvm::Constant *MicrosoftCXXABI::EmitMemberPointer(const APValue &MP,
+ QualType MPType) {
+ const MemberPointerType *DstTy = MPType->castAs<MemberPointerType>();
+ const ValueDecl *MPD = MP.getMemberPointerDecl();
+ if (!MPD)
+ return EmitNullMemberPointer(DstTy);
+
+ ASTContext &Ctx = getContext();
+ ArrayRef<const CXXRecordDecl *> MemberPointerPath = MP.getMemberPointerPath();
+
+ llvm::Constant *C;
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD)) {
+ C = EmitMemberFunctionPointer(MD);
+ } else {
+ CharUnits FieldOffset = Ctx.toCharUnitsFromBits(Ctx.getFieldOffset(MPD));
+ C = EmitMemberDataPointer(DstTy, FieldOffset);
+ }
+
+ if (!MemberPointerPath.empty()) {
+ const CXXRecordDecl *SrcRD = cast<CXXRecordDecl>(MPD->getDeclContext());
+ const Type *SrcRecTy = Ctx.getTypeDeclType(SrcRD).getTypePtr();
+ const MemberPointerType *SrcTy =
+ Ctx.getMemberPointerType(DstTy->getPointeeType(), SrcRecTy)
+ ->castAs<MemberPointerType>();
+
+ bool DerivedMember = MP.isMemberPointerToDerivedMember();
+ SmallVector<const CXXBaseSpecifier *, 4> DerivedToBasePath;
+ const CXXRecordDecl *PrevRD = SrcRD;
+ for (const CXXRecordDecl *PathElem : MemberPointerPath) {
+ const CXXRecordDecl *Base = nullptr;
+ const CXXRecordDecl *Derived = nullptr;
+ if (DerivedMember) {
+ Base = PathElem;
+ Derived = PrevRD;
+ } else {
+ Base = PrevRD;
+ Derived = PathElem;
+ }
+ for (const CXXBaseSpecifier &BS : Derived->bases())
+ if (BS.getType()->getAsCXXRecordDecl()->getCanonicalDecl() ==
+ Base->getCanonicalDecl())
+ DerivedToBasePath.push_back(&BS);
+ PrevRD = PathElem;
+ }
+ assert(DerivedToBasePath.size() == MemberPointerPath.size());
+
+ CastKind CK = DerivedMember ? CK_DerivedToBaseMemberPointer
+ : CK_BaseToDerivedMemberPointer;
+ C = EmitMemberPointerConversion(SrcTy, DstTy, CK, DerivedToBasePath.begin(),
+ DerivedToBasePath.end(), C);
+ }
+ return C;
+}
+
+llvm::Constant *
+MicrosoftCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
+ assert(MD->isInstance() && "Member function must not be static!");
+
+ MD = MD->getCanonicalDecl();
+ CharUnits NonVirtualBaseAdjustment = CharUnits::Zero();
+ const CXXRecordDecl *RD = MD->getParent()->getMostRecentDecl();
+ CodeGenTypes &Types = CGM.getTypes();
+
+ unsigned VBTableIndex = 0;
+ llvm::Constant *FirstField;
+ const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
+ if (!MD->isVirtual()) {
+ llvm::Type *Ty;
+ // Check whether the function has a computable LLVM signature.
+ if (Types.isFuncTypeConvertible(FPT)) {
+ // The function has a computable LLVM signature; use the correct type.
+ Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
+ } else {
+ // Use an arbitrary non-function type to tell GetAddrOfFunction that the
+ // function type is incomplete.
+ Ty = CGM.PtrDiffTy;
+ }
+ FirstField = CGM.GetAddrOfFunction(MD, Ty);
+ } else {
+ auto &VTableContext = CGM.getMicrosoftVTableContext();
+ MicrosoftVTableContext::MethodVFTableLocation ML =
+ VTableContext.getMethodVFTableLocation(MD);
+ FirstField = EmitVirtualMemPtrThunk(MD, ML);
+ // Include the vfptr adjustment if the method is in a non-primary vftable.
+ NonVirtualBaseAdjustment += ML.VFPtrOffset;
+ if (ML.VBase)
+ VBTableIndex = VTableContext.getVBTableIndex(RD, ML.VBase) * 4;
+ }
+
+ if (VBTableIndex == 0 &&
+ RD->getMSInheritanceModel() ==
+ MSInheritanceAttr::Keyword_virtual_inheritance)
+ NonVirtualBaseAdjustment -= getContext().getOffsetOfBaseWithVBPtr(RD);
+
+ // The rest of the fields are common with data member pointers.
+ FirstField = llvm::ConstantExpr::getBitCast(FirstField, CGM.VoidPtrTy);
+ return EmitFullMemberPointer(FirstField, /*IsMemberFunction=*/true, RD,
+ NonVirtualBaseAdjustment, VBTableIndex);
+}
+
+/// Member pointers are the same if they're either bitwise identical *or* both
+/// null. Null-ness for function members is determined by the first field,
+/// while for data member pointers we must compare all fields.
+llvm::Value *
+MicrosoftCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
+ llvm::Value *L,
+ llvm::Value *R,
+ const MemberPointerType *MPT,
+ bool Inequality) {
+ CGBuilderTy &Builder = CGF.Builder;
+
+ // Handle != comparisons by switching the sense of all boolean operations.
+ llvm::ICmpInst::Predicate Eq;
+ llvm::Instruction::BinaryOps And, Or;
+ if (Inequality) {
+ Eq = llvm::ICmpInst::ICMP_NE;
+ And = llvm::Instruction::Or;
+ Or = llvm::Instruction::And;
+ } else {
+ Eq = llvm::ICmpInst::ICMP_EQ;
+ And = llvm::Instruction::And;
+ Or = llvm::Instruction::Or;
+ }
+
+ // If this is a single field member pointer (single inheritance), this is a
+ // single icmp.
+ const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
+ MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel();
+ if (MSInheritanceAttr::hasOnlyOneField(MPT->isMemberFunctionPointer(),
+ Inheritance))
+ return Builder.CreateICmp(Eq, L, R);
+
+ // Compare the first field.
+ llvm::Value *L0 = Builder.CreateExtractValue(L, 0, "lhs.0");
+ llvm::Value *R0 = Builder.CreateExtractValue(R, 0, "rhs.0");
+ llvm::Value *Cmp0 = Builder.CreateICmp(Eq, L0, R0, "memptr.cmp.first");
+
+ // Compare everything other than the first field.
+ llvm::Value *Res = nullptr;
+ llvm::StructType *LType = cast<llvm::StructType>(L->getType());
+ for (unsigned I = 1, E = LType->getNumElements(); I != E; ++I) {
+ llvm::Value *LF = Builder.CreateExtractValue(L, I);
+ llvm::Value *RF = Builder.CreateExtractValue(R, I);
+ llvm::Value *Cmp = Builder.CreateICmp(Eq, LF, RF, "memptr.cmp.rest");
+ if (Res)
+ Res = Builder.CreateBinOp(And, Res, Cmp);
+ else
+ Res = Cmp;
+ }
+
+ // Check if the first field is 0 if this is a function pointer.
+ if (MPT->isMemberFunctionPointer()) {
+ // (l1 == r1 && ...) || l0 == 0
+ llvm::Value *Zero = llvm::Constant::getNullValue(L0->getType());
+ llvm::Value *IsZero = Builder.CreateICmp(Eq, L0, Zero, "memptr.cmp.iszero");
+ Res = Builder.CreateBinOp(Or, Res, IsZero);
+ }
+
+ // Combine the comparison of the first field, which must always be true for
+ // this comparison to succeeed.
+ return Builder.CreateBinOp(And, Res, Cmp0, "memptr.cmp");
+}
+
+llvm::Value *
+MicrosoftCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::SmallVector<llvm::Constant *, 4> fields;
+ // We only need one field for member functions.
+ if (MPT->isMemberFunctionPointer())
+ fields.push_back(llvm::Constant::getNullValue(CGM.VoidPtrTy));
+ else
+ GetNullMemberPointerFields(MPT, fields);
+ assert(!fields.empty());
+ llvm::Value *FirstField = MemPtr;
+ if (MemPtr->getType()->isStructTy())
+ FirstField = Builder.CreateExtractValue(MemPtr, 0);
+ llvm::Value *Res = Builder.CreateICmpNE(FirstField, fields[0], "memptr.cmp0");
+
+ // For function member pointers, we only need to test the function pointer
+ // field. The other fields if any can be garbage.
+ if (MPT->isMemberFunctionPointer())
+ return Res;
+
+ // Otherwise, emit a series of compares and combine the results.
+ for (int I = 1, E = fields.size(); I < E; ++I) {
+ llvm::Value *Field = Builder.CreateExtractValue(MemPtr, I);
+ llvm::Value *Next = Builder.CreateICmpNE(Field, fields[I], "memptr.cmp");
+ Res = Builder.CreateOr(Res, Next, "memptr.tobool");
+ }
+ return Res;
+}
+
+bool MicrosoftCXXABI::MemberPointerConstantIsNull(const MemberPointerType *MPT,
+ llvm::Constant *Val) {
+ // Function pointers are null if the pointer in the first field is null.
+ if (MPT->isMemberFunctionPointer()) {
+ llvm::Constant *FirstField = Val->getType()->isStructTy() ?
+ Val->getAggregateElement(0U) : Val;
+ return FirstField->isNullValue();
+ }
+
+ // If it's not a function pointer and it's zero initializable, we can easily
+ // check zero.
+ if (isZeroInitializable(MPT) && Val->isNullValue())
+ return true;
+
+ // Otherwise, break down all the fields for comparison. Hopefully these
+ // little Constants are reused, while a big null struct might not be.
+ llvm::SmallVector<llvm::Constant *, 4> Fields;
+ GetNullMemberPointerFields(MPT, Fields);
+ if (Fields.size() == 1) {
+ assert(Val->getType()->isIntegerTy());
+ return Val == Fields[0];
+ }
+
+ unsigned I, E;
+ for (I = 0, E = Fields.size(); I != E; ++I) {
+ if (Val->getAggregateElement(I) != Fields[I])
+ break;
+ }
+ return I == E;
+}
+
+llvm::Value *
+MicrosoftCXXABI::GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
+ Address This,
+ llvm::Value *VBPtrOffset,
+ llvm::Value *VBTableOffset,
+ llvm::Value **VBPtrOut) {
+ CGBuilderTy &Builder = CGF.Builder;
+ // Load the vbtable pointer from the vbptr in the instance.
+ This = Builder.CreateElementBitCast(This, CGM.Int8Ty);
+ llvm::Value *VBPtr =
+ Builder.CreateInBoundsGEP(This.getPointer(), VBPtrOffset, "vbptr");
+ if (VBPtrOut) *VBPtrOut = VBPtr;
+ VBPtr = Builder.CreateBitCast(VBPtr,
+ CGM.Int32Ty->getPointerTo(0)->getPointerTo(This.getAddressSpace()));
+
+ CharUnits VBPtrAlign;
+ if (auto CI = dyn_cast<llvm::ConstantInt>(VBPtrOffset)) {
+ VBPtrAlign = This.getAlignment().alignmentAtOffset(
+ CharUnits::fromQuantity(CI->getSExtValue()));
+ } else {
+ VBPtrAlign = CGF.getPointerAlign();
+ }
+
+ llvm::Value *VBTable = Builder.CreateAlignedLoad(VBPtr, VBPtrAlign, "vbtable");
+
+ // Translate from byte offset to table index. It improves analyzability.
+ llvm::Value *VBTableIndex = Builder.CreateAShr(
+ VBTableOffset, llvm::ConstantInt::get(VBTableOffset->getType(), 2),
+ "vbtindex", /*isExact=*/true);
+
+ // Load an i32 offset from the vb-table.
+ llvm::Value *VBaseOffs = Builder.CreateInBoundsGEP(VBTable, VBTableIndex);
+ VBaseOffs = Builder.CreateBitCast(VBaseOffs, CGM.Int32Ty->getPointerTo(0));
+ return Builder.CreateAlignedLoad(VBaseOffs, CharUnits::fromQuantity(4),
+ "vbase_offs");
+}
+
+// Returns an adjusted base cast to i8*, since we do more address arithmetic on
+// it.
+llvm::Value *MicrosoftCXXABI::AdjustVirtualBase(
+ CodeGenFunction &CGF, const Expr *E, const CXXRecordDecl *RD,
+ Address Base, llvm::Value *VBTableOffset, llvm::Value *VBPtrOffset) {
+ CGBuilderTy &Builder = CGF.Builder;
+ Base = Builder.CreateElementBitCast(Base, CGM.Int8Ty);
+ llvm::BasicBlock *OriginalBB = nullptr;
+ llvm::BasicBlock *SkipAdjustBB = nullptr;
+ llvm::BasicBlock *VBaseAdjustBB = nullptr;
+
+ // In the unspecified inheritance model, there might not be a vbtable at all,
+ // in which case we need to skip the virtual base lookup. If there is a
+ // vbtable, the first entry is a no-op entry that gives back the original
+ // base, so look for a virtual base adjustment offset of zero.
+ if (VBPtrOffset) {
+ OriginalBB = Builder.GetInsertBlock();
+ VBaseAdjustBB = CGF.createBasicBlock("memptr.vadjust");
+ SkipAdjustBB = CGF.createBasicBlock("memptr.skip_vadjust");
+ llvm::Value *IsVirtual =
+ Builder.CreateICmpNE(VBTableOffset, getZeroInt(),
+ "memptr.is_vbase");
+ Builder.CreateCondBr(IsVirtual, VBaseAdjustBB, SkipAdjustBB);
+ CGF.EmitBlock(VBaseAdjustBB);
+ }
+
+ // If we weren't given a dynamic vbptr offset, RD should be complete and we'll
+ // know the vbptr offset.
+ if (!VBPtrOffset) {
+ CharUnits offs = CharUnits::Zero();
+ if (!RD->hasDefinition()) {
+ DiagnosticsEngine &Diags = CGF.CGM.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error,
+ "member pointer representation requires a "
+ "complete class type for %0 to perform this expression");
+ Diags.Report(E->getExprLoc(), DiagID) << RD << E->getSourceRange();
+ } else if (RD->getNumVBases())
+ offs = getContext().getASTRecordLayout(RD).getVBPtrOffset();
+ VBPtrOffset = llvm::ConstantInt::get(CGM.IntTy, offs.getQuantity());
+ }
+ llvm::Value *VBPtr = nullptr;
+ llvm::Value *VBaseOffs =
+ GetVBaseOffsetFromVBPtr(CGF, Base, VBPtrOffset, VBTableOffset, &VBPtr);
+ llvm::Value *AdjustedBase = Builder.CreateInBoundsGEP(VBPtr, VBaseOffs);
+
+ // Merge control flow with the case where we didn't have to adjust.
+ if (VBaseAdjustBB) {
+ Builder.CreateBr(SkipAdjustBB);
+ CGF.EmitBlock(SkipAdjustBB);
+ llvm::PHINode *Phi = Builder.CreatePHI(CGM.Int8PtrTy, 2, "memptr.base");
+ Phi->addIncoming(Base.getPointer(), OriginalBB);
+ Phi->addIncoming(AdjustedBase, VBaseAdjustBB);
+ return Phi;
+ }
+ return AdjustedBase;
+}
+
+llvm::Value *MicrosoftCXXABI::EmitMemberDataPointerAddress(
+ CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ assert(MPT->isMemberDataPointer());
+ unsigned AS = Base.getAddressSpace();
+ llvm::Type *PType =
+ CGF.ConvertTypeForMem(MPT->getPointeeType())->getPointerTo(AS);
+ CGBuilderTy &Builder = CGF.Builder;
+ const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
+ MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel();
+
+ // Extract the fields we need, regardless of model. We'll apply them if we
+ // have them.
+ llvm::Value *FieldOffset = MemPtr;
+ llvm::Value *VirtualBaseAdjustmentOffset = nullptr;
+ llvm::Value *VBPtrOffset = nullptr;
+ if (MemPtr->getType()->isStructTy()) {
+ // We need to extract values.
+ unsigned I = 0;
+ FieldOffset = Builder.CreateExtractValue(MemPtr, I++);
+ if (MSInheritanceAttr::hasVBPtrOffsetField(Inheritance))
+ VBPtrOffset = Builder.CreateExtractValue(MemPtr, I++);
+ if (MSInheritanceAttr::hasVBTableOffsetField(Inheritance))
+ VirtualBaseAdjustmentOffset = Builder.CreateExtractValue(MemPtr, I++);
+ }
+
+ llvm::Value *Addr;
+ if (VirtualBaseAdjustmentOffset) {
+ Addr = AdjustVirtualBase(CGF, E, RD, Base, VirtualBaseAdjustmentOffset,
+ VBPtrOffset);
+ } else {
+ Addr = Base.getPointer();
+ }
+
+ // Cast to char*.
+ Addr = Builder.CreateBitCast(Addr, CGF.Int8Ty->getPointerTo(AS));
+
+ // Apply the offset, which we assume is non-null.
+ Addr = Builder.CreateInBoundsGEP(Addr, FieldOffset, "memptr.offset");
+
+ // Cast the address to the appropriate pointer type, adopting the address
+ // space of the base pointer.
+ return Builder.CreateBitCast(Addr, PType);
+}
+
+llvm::Value *
+MicrosoftCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
+ const CastExpr *E,
+ llvm::Value *Src) {
+ assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
+ E->getCastKind() == CK_BaseToDerivedMemberPointer ||
+ E->getCastKind() == CK_ReinterpretMemberPointer);
+
+ // Use constant emission if we can.
+ if (isa<llvm::Constant>(Src))
+ return EmitMemberPointerConversion(E, cast<llvm::Constant>(Src));
+
+ // We may be adding or dropping fields from the member pointer, so we need
+ // both types and the inheritance models of both records.
+ const MemberPointerType *SrcTy =
+ E->getSubExpr()->getType()->castAs<MemberPointerType>();
+ const MemberPointerType *DstTy = E->getType()->castAs<MemberPointerType>();
+ bool IsFunc = SrcTy->isMemberFunctionPointer();
+
+ // If the classes use the same null representation, reinterpret_cast is a nop.
+ bool IsReinterpret = E->getCastKind() == CK_ReinterpretMemberPointer;
+ if (IsReinterpret && IsFunc)
+ return Src;
+
+ CXXRecordDecl *SrcRD = SrcTy->getMostRecentCXXRecordDecl();
+ CXXRecordDecl *DstRD = DstTy->getMostRecentCXXRecordDecl();
+ if (IsReinterpret &&
+ SrcRD->nullFieldOffsetIsZero() == DstRD->nullFieldOffsetIsZero())
+ return Src;
+
+ CGBuilderTy &Builder = CGF.Builder;
+
+ // Branch past the conversion if Src is null.
+ llvm::Value *IsNotNull = EmitMemberPointerIsNotNull(CGF, Src, SrcTy);
+ llvm::Constant *DstNull = EmitNullMemberPointer(DstTy);
+
+ // C++ 5.2.10p9: The null member pointer value is converted to the null member
+ // pointer value of the destination type.
+ if (IsReinterpret) {
+ // For reinterpret casts, sema ensures that src and dst are both functions
+ // or data and have the same size, which means the LLVM types should match.
+ assert(Src->getType() == DstNull->getType());
+ return Builder.CreateSelect(IsNotNull, Src, DstNull);
+ }
+
+ llvm::BasicBlock *OriginalBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *ConvertBB = CGF.createBasicBlock("memptr.convert");
+ llvm::BasicBlock *ContinueBB = CGF.createBasicBlock("memptr.converted");
+ Builder.CreateCondBr(IsNotNull, ConvertBB, ContinueBB);
+ CGF.EmitBlock(ConvertBB);
+
+ llvm::Value *Dst = EmitNonNullMemberPointerConversion(
+ SrcTy, DstTy, E->getCastKind(), E->path_begin(), E->path_end(), Src,
+ Builder);
+
+ Builder.CreateBr(ContinueBB);
+
+ // In the continuation, choose between DstNull and Dst.
+ CGF.EmitBlock(ContinueBB);
+ llvm::PHINode *Phi = Builder.CreatePHI(DstNull->getType(), 2, "memptr.converted");
+ Phi->addIncoming(DstNull, OriginalBB);
+ Phi->addIncoming(Dst, ConvertBB);
+ return Phi;
+}
+
+llvm::Value *MicrosoftCXXABI::EmitNonNullMemberPointerConversion(
+ const MemberPointerType *SrcTy, const MemberPointerType *DstTy, CastKind CK,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd, llvm::Value *Src,
+ CGBuilderTy &Builder) {
+ const CXXRecordDecl *SrcRD = SrcTy->getMostRecentCXXRecordDecl();
+ const CXXRecordDecl *DstRD = DstTy->getMostRecentCXXRecordDecl();
+ MSInheritanceAttr::Spelling SrcInheritance = SrcRD->getMSInheritanceModel();
+ MSInheritanceAttr::Spelling DstInheritance = DstRD->getMSInheritanceModel();
+ bool IsFunc = SrcTy->isMemberFunctionPointer();
+ bool IsConstant = isa<llvm::Constant>(Src);
+
+ // Decompose src.
+ llvm::Value *FirstField = Src;
+ llvm::Value *NonVirtualBaseAdjustment = getZeroInt();
+ llvm::Value *VirtualBaseAdjustmentOffset = getZeroInt();
+ llvm::Value *VBPtrOffset = getZeroInt();
+ if (!MSInheritanceAttr::hasOnlyOneField(IsFunc, SrcInheritance)) {
+ // We need to extract values.
+ unsigned I = 0;
+ FirstField = Builder.CreateExtractValue(Src, I++);
+ if (MSInheritanceAttr::hasNVOffsetField(IsFunc, SrcInheritance))
+ NonVirtualBaseAdjustment = Builder.CreateExtractValue(Src, I++);
+ if (MSInheritanceAttr::hasVBPtrOffsetField(SrcInheritance))
+ VBPtrOffset = Builder.CreateExtractValue(Src, I++);
+ if (MSInheritanceAttr::hasVBTableOffsetField(SrcInheritance))
+ VirtualBaseAdjustmentOffset = Builder.CreateExtractValue(Src, I++);
+ }
+
+ bool IsDerivedToBase = (CK == CK_DerivedToBaseMemberPointer);
+ const MemberPointerType *DerivedTy = IsDerivedToBase ? SrcTy : DstTy;
+ const CXXRecordDecl *DerivedClass = DerivedTy->getMostRecentCXXRecordDecl();
+
+ // For data pointers, we adjust the field offset directly. For functions, we
+ // have a separate field.
+ llvm::Value *&NVAdjustField = IsFunc ? NonVirtualBaseAdjustment : FirstField;
+
+ // The virtual inheritance model has a quirk: the virtual base table is always
+ // referenced when dereferencing a member pointer even if the member pointer
+ // is non-virtual. This is accounted for by adjusting the non-virtual offset
+ // to point backwards to the top of the MDC from the first VBase. Undo this
+ // adjustment to normalize the member pointer.
+ llvm::Value *SrcVBIndexEqZero =
+ Builder.CreateICmpEQ(VirtualBaseAdjustmentOffset, getZeroInt());
+ if (SrcInheritance == MSInheritanceAttr::Keyword_virtual_inheritance) {
+ if (int64_t SrcOffsetToFirstVBase =
+ getContext().getOffsetOfBaseWithVBPtr(SrcRD).getQuantity()) {
+ llvm::Value *UndoSrcAdjustment = Builder.CreateSelect(
+ SrcVBIndexEqZero,
+ llvm::ConstantInt::get(CGM.IntTy, SrcOffsetToFirstVBase),
+ getZeroInt());
+ NVAdjustField = Builder.CreateNSWAdd(NVAdjustField, UndoSrcAdjustment);
+ }
+ }
+
+ // A non-zero vbindex implies that we are dealing with a source member in a
+ // floating virtual base in addition to some non-virtual offset. If the
+ // vbindex is zero, we are dealing with a source that exists in a non-virtual,
+ // fixed, base. The difference between these two cases is that the vbindex +
+ // nvoffset *always* point to the member regardless of what context they are
+ // evaluated in so long as the vbindex is adjusted. A member inside a fixed
+ // base requires explicit nv adjustment.
+ llvm::Constant *BaseClassOffset = llvm::ConstantInt::get(
+ CGM.IntTy,
+ CGM.computeNonVirtualBaseClassOffset(DerivedClass, PathBegin, PathEnd)
+ .getQuantity());
+
+ llvm::Value *NVDisp;
+ if (IsDerivedToBase)
+ NVDisp = Builder.CreateNSWSub(NVAdjustField, BaseClassOffset, "adj");
+ else
+ NVDisp = Builder.CreateNSWAdd(NVAdjustField, BaseClassOffset, "adj");
+
+ NVAdjustField = Builder.CreateSelect(SrcVBIndexEqZero, NVDisp, getZeroInt());
+
+ // Update the vbindex to an appropriate value in the destination because
+ // SrcRD's vbtable might not be a strict prefix of the one in DstRD.
+ llvm::Value *DstVBIndexEqZero = SrcVBIndexEqZero;
+ if (MSInheritanceAttr::hasVBTableOffsetField(DstInheritance) &&
+ MSInheritanceAttr::hasVBTableOffsetField(SrcInheritance)) {
+ if (llvm::GlobalVariable *VDispMap =
+ getAddrOfVirtualDisplacementMap(SrcRD, DstRD)) {
+ llvm::Value *VBIndex = Builder.CreateExactUDiv(
+ VirtualBaseAdjustmentOffset, llvm::ConstantInt::get(CGM.IntTy, 4));
+ if (IsConstant) {
+ llvm::Constant *Mapping = VDispMap->getInitializer();
+ VirtualBaseAdjustmentOffset =
+ Mapping->getAggregateElement(cast<llvm::Constant>(VBIndex));
+ } else {
+ llvm::Value *Idxs[] = {getZeroInt(), VBIndex};
+ VirtualBaseAdjustmentOffset =
+ Builder.CreateAlignedLoad(Builder.CreateInBoundsGEP(VDispMap, Idxs),
+ CharUnits::fromQuantity(4));
+ }
+
+ DstVBIndexEqZero =
+ Builder.CreateICmpEQ(VirtualBaseAdjustmentOffset, getZeroInt());
+ }
+ }
+
+ // Set the VBPtrOffset to zero if the vbindex is zero. Otherwise, initialize
+ // it to the offset of the vbptr.
+ if (MSInheritanceAttr::hasVBPtrOffsetField(DstInheritance)) {
+ llvm::Value *DstVBPtrOffset = llvm::ConstantInt::get(
+ CGM.IntTy,
+ getContext().getASTRecordLayout(DstRD).getVBPtrOffset().getQuantity());
+ VBPtrOffset =
+ Builder.CreateSelect(DstVBIndexEqZero, getZeroInt(), DstVBPtrOffset);
+ }
+
+ // Likewise, apply a similar adjustment so that dereferencing the member
+ // pointer correctly accounts for the distance between the start of the first
+ // virtual base and the top of the MDC.
+ if (DstInheritance == MSInheritanceAttr::Keyword_virtual_inheritance) {
+ if (int64_t DstOffsetToFirstVBase =
+ getContext().getOffsetOfBaseWithVBPtr(DstRD).getQuantity()) {
+ llvm::Value *DoDstAdjustment = Builder.CreateSelect(
+ DstVBIndexEqZero,
+ llvm::ConstantInt::get(CGM.IntTy, DstOffsetToFirstVBase),
+ getZeroInt());
+ NVAdjustField = Builder.CreateNSWSub(NVAdjustField, DoDstAdjustment);
+ }
+ }
+
+ // Recompose dst from the null struct and the adjusted fields from src.
+ llvm::Value *Dst;
+ if (MSInheritanceAttr::hasOnlyOneField(IsFunc, DstInheritance)) {
+ Dst = FirstField;
+ } else {
+ Dst = llvm::UndefValue::get(ConvertMemberPointerType(DstTy));
+ unsigned Idx = 0;
+ Dst = Builder.CreateInsertValue(Dst, FirstField, Idx++);
+ if (MSInheritanceAttr::hasNVOffsetField(IsFunc, DstInheritance))
+ Dst = Builder.CreateInsertValue(Dst, NonVirtualBaseAdjustment, Idx++);
+ if (MSInheritanceAttr::hasVBPtrOffsetField(DstInheritance))
+ Dst = Builder.CreateInsertValue(Dst, VBPtrOffset, Idx++);
+ if (MSInheritanceAttr::hasVBTableOffsetField(DstInheritance))
+ Dst = Builder.CreateInsertValue(Dst, VirtualBaseAdjustmentOffset, Idx++);
+ }
+ return Dst;
+}
+
+llvm::Constant *
+MicrosoftCXXABI::EmitMemberPointerConversion(const CastExpr *E,
+ llvm::Constant *Src) {
+ const MemberPointerType *SrcTy =
+ E->getSubExpr()->getType()->castAs<MemberPointerType>();
+ const MemberPointerType *DstTy = E->getType()->castAs<MemberPointerType>();
+
+ CastKind CK = E->getCastKind();
+
+ return EmitMemberPointerConversion(SrcTy, DstTy, CK, E->path_begin(),
+ E->path_end(), Src);
+}
+
+llvm::Constant *MicrosoftCXXABI::EmitMemberPointerConversion(
+ const MemberPointerType *SrcTy, const MemberPointerType *DstTy, CastKind CK,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd, llvm::Constant *Src) {
+ assert(CK == CK_DerivedToBaseMemberPointer ||
+ CK == CK_BaseToDerivedMemberPointer ||
+ CK == CK_ReinterpretMemberPointer);
+ // If src is null, emit a new null for dst. We can't return src because dst
+ // might have a new representation.
+ if (MemberPointerConstantIsNull(SrcTy, Src))
+ return EmitNullMemberPointer(DstTy);
+
+ // We don't need to do anything for reinterpret_casts of non-null member
+ // pointers. We should only get here when the two type representations have
+ // the same size.
+ if (CK == CK_ReinterpretMemberPointer)
+ return Src;
+
+ CGBuilderTy Builder(CGM, CGM.getLLVMContext());
+ auto *Dst = cast<llvm::Constant>(EmitNonNullMemberPointerConversion(
+ SrcTy, DstTy, CK, PathBegin, PathEnd, Src, Builder));
+
+ return Dst;
+}
+
+llvm::Value *MicrosoftCXXABI::EmitLoadOfMemberFunctionPointer(
+ CodeGenFunction &CGF, const Expr *E, Address This,
+ llvm::Value *&ThisPtrForCall, llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ assert(MPT->isMemberFunctionPointer());
+ const FunctionProtoType *FPT =
+ MPT->getPointeeType()->castAs<FunctionProtoType>();
+ const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
+ CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
+ CGBuilderTy &Builder = CGF.Builder;
+
+ MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel();
+
+ // Extract the fields we need, regardless of model. We'll apply them if we
+ // have them.
+ llvm::Value *FunctionPointer = MemPtr;
+ llvm::Value *NonVirtualBaseAdjustment = nullptr;
+ llvm::Value *VirtualBaseAdjustmentOffset = nullptr;
+ llvm::Value *VBPtrOffset = nullptr;
+ if (MemPtr->getType()->isStructTy()) {
+ // We need to extract values.
+ unsigned I = 0;
+ FunctionPointer = Builder.CreateExtractValue(MemPtr, I++);
+ if (MSInheritanceAttr::hasNVOffsetField(MPT, Inheritance))
+ NonVirtualBaseAdjustment = Builder.CreateExtractValue(MemPtr, I++);
+ if (MSInheritanceAttr::hasVBPtrOffsetField(Inheritance))
+ VBPtrOffset = Builder.CreateExtractValue(MemPtr, I++);
+ if (MSInheritanceAttr::hasVBTableOffsetField(Inheritance))
+ VirtualBaseAdjustmentOffset = Builder.CreateExtractValue(MemPtr, I++);
+ }
+
+ if (VirtualBaseAdjustmentOffset) {
+ ThisPtrForCall = AdjustVirtualBase(CGF, E, RD, This,
+ VirtualBaseAdjustmentOffset, VBPtrOffset);
+ } else {
+ ThisPtrForCall = This.getPointer();
+ }
+
+ if (NonVirtualBaseAdjustment) {
+ // Apply the adjustment and cast back to the original struct type.
+ llvm::Value *Ptr = Builder.CreateBitCast(ThisPtrForCall, CGF.Int8PtrTy);
+ Ptr = Builder.CreateInBoundsGEP(Ptr, NonVirtualBaseAdjustment);
+ ThisPtrForCall = Builder.CreateBitCast(Ptr, ThisPtrForCall->getType(),
+ "this.adjusted");
+ }
+
+ return Builder.CreateBitCast(FunctionPointer, FTy->getPointerTo());
+}
+
+CGCXXABI *clang::CodeGen::CreateMicrosoftCXXABI(CodeGenModule &CGM) {
+ return new MicrosoftCXXABI(CGM);
+}
+
+// MS RTTI Overview:
+// The run time type information emitted by cl.exe contains 5 distinct types of
+// structures. Many of them reference each other.
+//
+// TypeInfo: Static classes that are returned by typeid.
+//
+// CompleteObjectLocator: Referenced by vftables. They contain information
+// required for dynamic casting, including OffsetFromTop. They also contain
+// a reference to the TypeInfo for the type and a reference to the
+// CompleteHierarchyDescriptor for the type.
+//
+// ClassHieararchyDescriptor: Contains information about a class hierarchy.
+// Used during dynamic_cast to walk a class hierarchy. References a base
+// class array and the size of said array.
+//
+// BaseClassArray: Contains a list of classes in a hierarchy. BaseClassArray is
+// somewhat of a misnomer because the most derived class is also in the list
+// as well as multiple copies of virtual bases (if they occur multiple times
+// in the hiearchy.) The BaseClassArray contains one BaseClassDescriptor for
+// every path in the hierarchy, in pre-order depth first order. Note, we do
+// not declare a specific llvm type for BaseClassArray, it's merely an array
+// of BaseClassDescriptor pointers.
+//
+// BaseClassDescriptor: Contains information about a class in a class hierarchy.
+// BaseClassDescriptor is also somewhat of a misnomer for the same reason that
+// BaseClassArray is. It contains information about a class within a
+// hierarchy such as: is this base is ambiguous and what is its offset in the
+// vbtable. The names of the BaseClassDescriptors have all of their fields
+// mangled into them so they can be aggressively deduplicated by the linker.
+
+static llvm::GlobalVariable *getTypeInfoVTable(CodeGenModule &CGM) {
+ StringRef MangledName("\01??_7type_info@@6B@");
+ if (auto VTable = CGM.getModule().getNamedGlobal(MangledName))
+ return VTable;
+ return new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
+ /*Constant=*/true,
+ llvm::GlobalVariable::ExternalLinkage,
+ /*Initializer=*/nullptr, MangledName);
+}
+
+namespace {
+
+/// \brief A Helper struct that stores information about a class in a class
+/// hierarchy. The information stored in these structs struct is used during
+/// the generation of ClassHierarchyDescriptors and BaseClassDescriptors.
+// During RTTI creation, MSRTTIClasses are stored in a contiguous array with
+// implicit depth first pre-order tree connectivity. getFirstChild and
+// getNextSibling allow us to walk the tree efficiently.
+struct MSRTTIClass {
+ enum {
+ IsPrivateOnPath = 1 | 8,
+ IsAmbiguous = 2,
+ IsPrivate = 4,
+ IsVirtual = 16,
+ HasHierarchyDescriptor = 64
+ };
+ MSRTTIClass(const CXXRecordDecl *RD) : RD(RD) {}
+ uint32_t initialize(const MSRTTIClass *Parent,
+ const CXXBaseSpecifier *Specifier);
+
+ MSRTTIClass *getFirstChild() { return this + 1; }
+ static MSRTTIClass *getNextChild(MSRTTIClass *Child) {
+ return Child + 1 + Child->NumBases;
+ }
+
+ const CXXRecordDecl *RD, *VirtualRoot;
+ uint32_t Flags, NumBases, OffsetInVBase;
+};
+
+/// \brief Recursively initialize the base class array.
+uint32_t MSRTTIClass::initialize(const MSRTTIClass *Parent,
+ const CXXBaseSpecifier *Specifier) {
+ Flags = HasHierarchyDescriptor;
+ if (!Parent) {
+ VirtualRoot = nullptr;
+ OffsetInVBase = 0;
+ } else {
+ if (Specifier->getAccessSpecifier() != AS_public)
+ Flags |= IsPrivate | IsPrivateOnPath;
+ if (Specifier->isVirtual()) {
+ Flags |= IsVirtual;
+ VirtualRoot = RD;
+ OffsetInVBase = 0;
+ } else {
+ if (Parent->Flags & IsPrivateOnPath)
+ Flags |= IsPrivateOnPath;
+ VirtualRoot = Parent->VirtualRoot;
+ OffsetInVBase = Parent->OffsetInVBase + RD->getASTContext()
+ .getASTRecordLayout(Parent->RD).getBaseClassOffset(RD).getQuantity();
+ }
+ }
+ NumBases = 0;
+ MSRTTIClass *Child = getFirstChild();
+ for (const CXXBaseSpecifier &Base : RD->bases()) {
+ NumBases += Child->initialize(this, &Base) + 1;
+ Child = getNextChild(Child);
+ }
+ return NumBases;
+}
+
+static llvm::GlobalValue::LinkageTypes getLinkageForRTTI(QualType Ty) {
+ switch (Ty->getLinkage()) {
+ case NoLinkage:
+ case InternalLinkage:
+ case UniqueExternalLinkage:
+ return llvm::GlobalValue::InternalLinkage;
+
+ case VisibleNoLinkage:
+ case ExternalLinkage:
+ return llvm::GlobalValue::LinkOnceODRLinkage;
+ }
+ llvm_unreachable("Invalid linkage!");
+}
+
+/// \brief An ephemeral helper class for building MS RTTI types. It caches some
+/// calls to the module and information about the most derived class in a
+/// hierarchy.
+struct MSRTTIBuilder {
+ enum {
+ HasBranchingHierarchy = 1,
+ HasVirtualBranchingHierarchy = 2,
+ HasAmbiguousBases = 4
+ };
+
+ MSRTTIBuilder(MicrosoftCXXABI &ABI, const CXXRecordDecl *RD)
+ : CGM(ABI.CGM), Context(CGM.getContext()),
+ VMContext(CGM.getLLVMContext()), Module(CGM.getModule()), RD(RD),
+ Linkage(getLinkageForRTTI(CGM.getContext().getTagDeclType(RD))),
+ ABI(ABI) {}
+
+ llvm::GlobalVariable *getBaseClassDescriptor(const MSRTTIClass &Classes);
+ llvm::GlobalVariable *
+ getBaseClassArray(SmallVectorImpl<MSRTTIClass> &Classes);
+ llvm::GlobalVariable *getClassHierarchyDescriptor();
+ llvm::GlobalVariable *getCompleteObjectLocator(const VPtrInfo *Info);
+
+ CodeGenModule &CGM;
+ ASTContext &Context;
+ llvm::LLVMContext &VMContext;
+ llvm::Module &Module;
+ const CXXRecordDecl *RD;
+ llvm::GlobalVariable::LinkageTypes Linkage;
+ MicrosoftCXXABI &ABI;
+};
+
+} // namespace
+
+/// \brief Recursively serializes a class hierarchy in pre-order depth first
+/// order.
+static void serializeClassHierarchy(SmallVectorImpl<MSRTTIClass> &Classes,
+ const CXXRecordDecl *RD) {
+ Classes.push_back(MSRTTIClass(RD));
+ for (const CXXBaseSpecifier &Base : RD->bases())
+ serializeClassHierarchy(Classes, Base.getType()->getAsCXXRecordDecl());
+}
+
+/// \brief Find ambiguity among base classes.
+static void
+detectAmbiguousBases(SmallVectorImpl<MSRTTIClass> &Classes) {
+ llvm::SmallPtrSet<const CXXRecordDecl *, 8> VirtualBases;
+ llvm::SmallPtrSet<const CXXRecordDecl *, 8> UniqueBases;
+ llvm::SmallPtrSet<const CXXRecordDecl *, 8> AmbiguousBases;
+ for (MSRTTIClass *Class = &Classes.front(); Class <= &Classes.back();) {
+ if ((Class->Flags & MSRTTIClass::IsVirtual) &&
+ !VirtualBases.insert(Class->RD).second) {
+ Class = MSRTTIClass::getNextChild(Class);
+ continue;
+ }
+ if (!UniqueBases.insert(Class->RD).second)
+ AmbiguousBases.insert(Class->RD);
+ Class++;
+ }
+ if (AmbiguousBases.empty())
+ return;
+ for (MSRTTIClass &Class : Classes)
+ if (AmbiguousBases.count(Class.RD))
+ Class.Flags |= MSRTTIClass::IsAmbiguous;
+}
+
+llvm::GlobalVariable *MSRTTIBuilder::getClassHierarchyDescriptor() {
+ SmallString<256> MangledName;
+ {
+ llvm::raw_svector_ostream Out(MangledName);
+ ABI.getMangleContext().mangleCXXRTTIClassHierarchyDescriptor(RD, Out);
+ }
+
+ // Check to see if we've already declared this ClassHierarchyDescriptor.
+ if (auto CHD = Module.getNamedGlobal(MangledName))
+ return CHD;
+
+ // Serialize the class hierarchy and initialize the CHD Fields.
+ SmallVector<MSRTTIClass, 8> Classes;
+ serializeClassHierarchy(Classes, RD);
+ Classes.front().initialize(/*Parent=*/nullptr, /*Specifier=*/nullptr);
+ detectAmbiguousBases(Classes);
+ int Flags = 0;
+ for (auto Class : Classes) {
+ if (Class.RD->getNumBases() > 1)
+ Flags |= HasBranchingHierarchy;
+ // Note: cl.exe does not calculate "HasAmbiguousBases" correctly. We
+ // believe the field isn't actually used.
+ if (Class.Flags & MSRTTIClass::IsAmbiguous)
+ Flags |= HasAmbiguousBases;
+ }
+ if ((Flags & HasBranchingHierarchy) && RD->getNumVBases() != 0)
+ Flags |= HasVirtualBranchingHierarchy;
+ // These gep indices are used to get the address of the first element of the
+ // base class array.
+ llvm::Value *GEPIndices[] = {llvm::ConstantInt::get(CGM.IntTy, 0),
+ llvm::ConstantInt::get(CGM.IntTy, 0)};
+
+ // Forward-declare the class hierarchy descriptor
+ auto Type = ABI.getClassHierarchyDescriptorType();
+ auto CHD = new llvm::GlobalVariable(Module, Type, /*Constant=*/true, Linkage,
+ /*Initializer=*/nullptr,
+ MangledName);
+ if (CHD->isWeakForLinker())
+ CHD->setComdat(CGM.getModule().getOrInsertComdat(CHD->getName()));
+
+ auto *Bases = getBaseClassArray(Classes);
+
+ // Initialize the base class ClassHierarchyDescriptor.
+ llvm::Constant *Fields[] = {
+ llvm::ConstantInt::get(CGM.IntTy, 0), // Unknown
+ llvm::ConstantInt::get(CGM.IntTy, Flags),
+ llvm::ConstantInt::get(CGM.IntTy, Classes.size()),
+ ABI.getImageRelativeConstant(llvm::ConstantExpr::getInBoundsGetElementPtr(
+ Bases->getValueType(), Bases,
+ llvm::ArrayRef<llvm::Value *>(GEPIndices))),
+ };
+ CHD->setInitializer(llvm::ConstantStruct::get(Type, Fields));
+ return CHD;
+}
+
+llvm::GlobalVariable *
+MSRTTIBuilder::getBaseClassArray(SmallVectorImpl<MSRTTIClass> &Classes) {
+ SmallString<256> MangledName;
+ {
+ llvm::raw_svector_ostream Out(MangledName);
+ ABI.getMangleContext().mangleCXXRTTIBaseClassArray(RD, Out);
+ }
+
+ // Forward-declare the base class array.
+ // cl.exe pads the base class array with 1 (in 32 bit mode) or 4 (in 64 bit
+ // mode) bytes of padding. We provide a pointer sized amount of padding by
+ // adding +1 to Classes.size(). The sections have pointer alignment and are
+ // marked pick-any so it shouldn't matter.
+ llvm::Type *PtrType = ABI.getImageRelativeType(
+ ABI.getBaseClassDescriptorType()->getPointerTo());
+ auto *ArrType = llvm::ArrayType::get(PtrType, Classes.size() + 1);
+ auto *BCA =
+ new llvm::GlobalVariable(Module, ArrType,
+ /*Constant=*/true, Linkage,
+ /*Initializer=*/nullptr, MangledName);
+ if (BCA->isWeakForLinker())
+ BCA->setComdat(CGM.getModule().getOrInsertComdat(BCA->getName()));
+
+ // Initialize the BaseClassArray.
+ SmallVector<llvm::Constant *, 8> BaseClassArrayData;
+ for (MSRTTIClass &Class : Classes)
+ BaseClassArrayData.push_back(
+ ABI.getImageRelativeConstant(getBaseClassDescriptor(Class)));
+ BaseClassArrayData.push_back(llvm::Constant::getNullValue(PtrType));
+ BCA->setInitializer(llvm::ConstantArray::get(ArrType, BaseClassArrayData));
+ return BCA;
+}
+
+llvm::GlobalVariable *
+MSRTTIBuilder::getBaseClassDescriptor(const MSRTTIClass &Class) {
+ // Compute the fields for the BaseClassDescriptor. They are computed up front
+ // because they are mangled into the name of the object.
+ uint32_t OffsetInVBTable = 0;
+ int32_t VBPtrOffset = -1;
+ if (Class.VirtualRoot) {
+ auto &VTableContext = CGM.getMicrosoftVTableContext();
+ OffsetInVBTable = VTableContext.getVBTableIndex(RD, Class.VirtualRoot) * 4;
+ VBPtrOffset = Context.getASTRecordLayout(RD).getVBPtrOffset().getQuantity();
+ }
+
+ SmallString<256> MangledName;
+ {
+ llvm::raw_svector_ostream Out(MangledName);
+ ABI.getMangleContext().mangleCXXRTTIBaseClassDescriptor(
+ Class.RD, Class.OffsetInVBase, VBPtrOffset, OffsetInVBTable,
+ Class.Flags, Out);
+ }
+
+ // Check to see if we've already declared this object.
+ if (auto BCD = Module.getNamedGlobal(MangledName))
+ return BCD;
+
+ // Forward-declare the base class descriptor.
+ auto Type = ABI.getBaseClassDescriptorType();
+ auto BCD =
+ new llvm::GlobalVariable(Module, Type, /*Constant=*/true, Linkage,
+ /*Initializer=*/nullptr, MangledName);
+ if (BCD->isWeakForLinker())
+ BCD->setComdat(CGM.getModule().getOrInsertComdat(BCD->getName()));
+
+ // Initialize the BaseClassDescriptor.
+ llvm::Constant *Fields[] = {
+ ABI.getImageRelativeConstant(
+ ABI.getAddrOfRTTIDescriptor(Context.getTypeDeclType(Class.RD))),
+ llvm::ConstantInt::get(CGM.IntTy, Class.NumBases),
+ llvm::ConstantInt::get(CGM.IntTy, Class.OffsetInVBase),
+ llvm::ConstantInt::get(CGM.IntTy, VBPtrOffset),
+ llvm::ConstantInt::get(CGM.IntTy, OffsetInVBTable),
+ llvm::ConstantInt::get(CGM.IntTy, Class.Flags),
+ ABI.getImageRelativeConstant(
+ MSRTTIBuilder(ABI, Class.RD).getClassHierarchyDescriptor()),
+ };
+ BCD->setInitializer(llvm::ConstantStruct::get(Type, Fields));
+ return BCD;
+}
+
+llvm::GlobalVariable *
+MSRTTIBuilder::getCompleteObjectLocator(const VPtrInfo *Info) {
+ SmallString<256> MangledName;
+ {
+ llvm::raw_svector_ostream Out(MangledName);
+ ABI.getMangleContext().mangleCXXRTTICompleteObjectLocator(RD, Info->MangledPath, Out);
+ }
+
+ // Check to see if we've already computed this complete object locator.
+ if (auto COL = Module.getNamedGlobal(MangledName))
+ return COL;
+
+ // Compute the fields of the complete object locator.
+ int OffsetToTop = Info->FullOffsetInMDC.getQuantity();
+ int VFPtrOffset = 0;
+ // The offset includes the vtordisp if one exists.
+ if (const CXXRecordDecl *VBase = Info->getVBaseWithVPtr())
+ if (Context.getASTRecordLayout(RD)
+ .getVBaseOffsetsMap()
+ .find(VBase)
+ ->second.hasVtorDisp())
+ VFPtrOffset = Info->NonVirtualOffset.getQuantity() + 4;
+
+ // Forward-declare the complete object locator.
+ llvm::StructType *Type = ABI.getCompleteObjectLocatorType();
+ auto COL = new llvm::GlobalVariable(Module, Type, /*Constant=*/true, Linkage,
+ /*Initializer=*/nullptr, MangledName);
+
+ // Initialize the CompleteObjectLocator.
+ llvm::Constant *Fields[] = {
+ llvm::ConstantInt::get(CGM.IntTy, ABI.isImageRelative()),
+ llvm::ConstantInt::get(CGM.IntTy, OffsetToTop),
+ llvm::ConstantInt::get(CGM.IntTy, VFPtrOffset),
+ ABI.getImageRelativeConstant(
+ CGM.GetAddrOfRTTIDescriptor(Context.getTypeDeclType(RD))),
+ ABI.getImageRelativeConstant(getClassHierarchyDescriptor()),
+ ABI.getImageRelativeConstant(COL),
+ };
+ llvm::ArrayRef<llvm::Constant *> FieldsRef(Fields);
+ if (!ABI.isImageRelative())
+ FieldsRef = FieldsRef.drop_back();
+ COL->setInitializer(llvm::ConstantStruct::get(Type, FieldsRef));
+ if (COL->isWeakForLinker())
+ COL->setComdat(CGM.getModule().getOrInsertComdat(COL->getName()));
+ return COL;
+}
+
+static QualType decomposeTypeForEH(ASTContext &Context, QualType T,
+ bool &IsConst, bool &IsVolatile) {
+ T = Context.getExceptionObjectType(T);
+
+ // C++14 [except.handle]p3:
+ // A handler is a match for an exception object of type E if [...]
+ // - the handler is of type cv T or const T& where T is a pointer type and
+ // E is a pointer type that can be converted to T by [...]
+ // - a qualification conversion
+ IsConst = false;
+ IsVolatile = false;
+ QualType PointeeType = T->getPointeeType();
+ if (!PointeeType.isNull()) {
+ IsConst = PointeeType.isConstQualified();
+ IsVolatile = PointeeType.isVolatileQualified();
+ }
+
+ // Member pointer types like "const int A::*" are represented by having RTTI
+ // for "int A::*" and separately storing the const qualifier.
+ if (const auto *MPTy = T->getAs<MemberPointerType>())
+ T = Context.getMemberPointerType(PointeeType.getUnqualifiedType(),
+ MPTy->getClass());
+
+ // Pointer types like "const int * const *" are represented by having RTTI
+ // for "const int **" and separately storing the const qualifier.
+ if (T->isPointerType())
+ T = Context.getPointerType(PointeeType.getUnqualifiedType());
+
+ return T;
+}
+
+CatchTypeInfo
+MicrosoftCXXABI::getAddrOfCXXCatchHandlerType(QualType Type,
+ QualType CatchHandlerType) {
+ // TypeDescriptors for exceptions never have qualified pointer types,
+ // qualifiers are stored seperately in order to support qualification
+ // conversions.
+ bool IsConst, IsVolatile;
+ Type = decomposeTypeForEH(getContext(), Type, IsConst, IsVolatile);
+
+ bool IsReference = CatchHandlerType->isReferenceType();
+
+ uint32_t Flags = 0;
+ if (IsConst)
+ Flags |= 1;
+ if (IsVolatile)
+ Flags |= 2;
+ if (IsReference)
+ Flags |= 8;
+
+ return CatchTypeInfo{getAddrOfRTTIDescriptor(Type)->stripPointerCasts(),
+ Flags};
+}
+
+/// \brief Gets a TypeDescriptor. Returns a llvm::Constant * rather than a
+/// llvm::GlobalVariable * because different type descriptors have different
+/// types, and need to be abstracted. They are abstracting by casting the
+/// address to an Int8PtrTy.
+llvm::Constant *MicrosoftCXXABI::getAddrOfRTTIDescriptor(QualType Type) {
+ SmallString<256> MangledName;
+ {
+ llvm::raw_svector_ostream Out(MangledName);
+ getMangleContext().mangleCXXRTTI(Type, Out);
+ }
+
+ // Check to see if we've already declared this TypeDescriptor.
+ if (llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(MangledName))
+ return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
+
+ // Compute the fields for the TypeDescriptor.
+ SmallString<256> TypeInfoString;
+ {
+ llvm::raw_svector_ostream Out(TypeInfoString);
+ getMangleContext().mangleCXXRTTIName(Type, Out);
+ }
+
+ // Declare and initialize the TypeDescriptor.
+ llvm::Constant *Fields[] = {
+ getTypeInfoVTable(CGM), // VFPtr
+ llvm::ConstantPointerNull::get(CGM.Int8PtrTy), // Runtime data
+ llvm::ConstantDataArray::getString(CGM.getLLVMContext(), TypeInfoString)};
+ llvm::StructType *TypeDescriptorType =
+ getTypeDescriptorType(TypeInfoString);
+ auto *Var = new llvm::GlobalVariable(
+ CGM.getModule(), TypeDescriptorType, /*Constant=*/false,
+ getLinkageForRTTI(Type),
+ llvm::ConstantStruct::get(TypeDescriptorType, Fields),
+ MangledName);
+ if (Var->isWeakForLinker())
+ Var->setComdat(CGM.getModule().getOrInsertComdat(Var->getName()));
+ return llvm::ConstantExpr::getBitCast(Var, CGM.Int8PtrTy);
+}
+
+/// \brief Gets or a creates a Microsoft CompleteObjectLocator.
+llvm::GlobalVariable *
+MicrosoftCXXABI::getMSCompleteObjectLocator(const CXXRecordDecl *RD,
+ const VPtrInfo *Info) {
+ return MSRTTIBuilder(*this, RD).getCompleteObjectLocator(Info);
+}
+
+static void emitCXXConstructor(CodeGenModule &CGM,
+ const CXXConstructorDecl *ctor,
+ StructorType ctorType) {
+ // There are no constructor variants, always emit the complete destructor.
+ llvm::Function *Fn = CGM.codegenCXXStructor(ctor, StructorType::Complete);
+ CGM.maybeSetTrivialComdat(*ctor, *Fn);
+}
+
+static void emitCXXDestructor(CodeGenModule &CGM, const CXXDestructorDecl *dtor,
+ StructorType dtorType) {
+ // The complete destructor is equivalent to the base destructor for
+ // classes with no virtual bases, so try to emit it as an alias.
+ if (!dtor->getParent()->getNumVBases() &&
+ (dtorType == StructorType::Complete || dtorType == StructorType::Base)) {
+ bool ProducedAlias = !CGM.TryEmitDefinitionAsAlias(
+ GlobalDecl(dtor, Dtor_Complete), GlobalDecl(dtor, Dtor_Base), true);
+ if (ProducedAlias) {
+ if (dtorType == StructorType::Complete)
+ return;
+ if (dtor->isVirtual())
+ CGM.getVTables().EmitThunks(GlobalDecl(dtor, Dtor_Complete));
+ }
+ }
+
+ // The base destructor is equivalent to the base destructor of its
+ // base class if there is exactly one non-virtual base class with a
+ // non-trivial destructor, there are no fields with a non-trivial
+ // destructor, and the body of the destructor is trivial.
+ if (dtorType == StructorType::Base && !CGM.TryEmitBaseDestructorAsAlias(dtor))
+ return;
+
+ llvm::Function *Fn = CGM.codegenCXXStructor(dtor, dtorType);
+ if (Fn->isWeakForLinker())
+ Fn->setComdat(CGM.getModule().getOrInsertComdat(Fn->getName()));
+}
+
+void MicrosoftCXXABI::emitCXXStructor(const CXXMethodDecl *MD,
+ StructorType Type) {
+ if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
+ emitCXXConstructor(CGM, CD, Type);
+ return;
+ }
+ emitCXXDestructor(CGM, cast<CXXDestructorDecl>(MD), Type);
+}
+
+llvm::Function *
+MicrosoftCXXABI::getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD,
+ CXXCtorType CT) {
+ assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
+
+ // Calculate the mangled name.
+ SmallString<256> ThunkName;
+ llvm::raw_svector_ostream Out(ThunkName);
+ getMangleContext().mangleCXXCtor(CD, CT, Out);
+
+ // If the thunk has been generated previously, just return it.
+ if (llvm::GlobalValue *GV = CGM.getModule().getNamedValue(ThunkName))
+ return cast<llvm::Function>(GV);
+
+ // Create the llvm::Function.
+ const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeMSCtorClosure(CD, CT);
+ llvm::FunctionType *ThunkTy = CGM.getTypes().GetFunctionType(FnInfo);
+ const CXXRecordDecl *RD = CD->getParent();
+ QualType RecordTy = getContext().getRecordType(RD);
+ llvm::Function *ThunkFn = llvm::Function::Create(
+ ThunkTy, getLinkageForRTTI(RecordTy), ThunkName.str(), &CGM.getModule());
+ ThunkFn->setCallingConv(static_cast<llvm::CallingConv::ID>(
+ FnInfo.getEffectiveCallingConvention()));
+ if (ThunkFn->isWeakForLinker())
+ ThunkFn->setComdat(CGM.getModule().getOrInsertComdat(ThunkFn->getName()));
+ bool IsCopy = CT == Ctor_CopyingClosure;
+
+ // Start codegen.
+ CodeGenFunction CGF(CGM);
+ CGF.CurGD = GlobalDecl(CD, Ctor_Complete);
+
+ // Build FunctionArgs.
+ FunctionArgList FunctionArgs;
+
+ // A constructor always starts with a 'this' pointer as its first argument.
+ buildThisParam(CGF, FunctionArgs);
+
+ // Following the 'this' pointer is a reference to the source object that we
+ // are copying from.
+ ImplicitParamDecl SrcParam(
+ getContext(), nullptr, SourceLocation(), &getContext().Idents.get("src"),
+ getContext().getLValueReferenceType(RecordTy,
+ /*SpelledAsLValue=*/true));
+ if (IsCopy)
+ FunctionArgs.push_back(&SrcParam);
+
+ // Constructors for classes which utilize virtual bases have an additional
+ // parameter which indicates whether or not it is being delegated to by a more
+ // derived constructor.
+ ImplicitParamDecl IsMostDerived(getContext(), nullptr, SourceLocation(),
+ &getContext().Idents.get("is_most_derived"),
+ getContext().IntTy);
+ // Only add the parameter to the list if thie class has virtual bases.
+ if (RD->getNumVBases() > 0)
+ FunctionArgs.push_back(&IsMostDerived);
+
+ // Start defining the function.
+ CGF.StartFunction(GlobalDecl(), FnInfo.getReturnType(), ThunkFn, FnInfo,
+ FunctionArgs, CD->getLocation(), SourceLocation());
+ EmitThisParam(CGF);
+ llvm::Value *This = getThisValue(CGF);
+
+ llvm::Value *SrcVal =
+ IsCopy ? CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&SrcParam), "src")
+ : nullptr;
+
+ CallArgList Args;
+
+ // Push the this ptr.
+ Args.add(RValue::get(This), CD->getThisType(getContext()));
+
+ // Push the src ptr.
+ if (SrcVal)
+ Args.add(RValue::get(SrcVal), SrcParam.getType());
+
+ // Add the rest of the default arguments.
+ std::vector<Stmt *> ArgVec;
+ for (unsigned I = IsCopy ? 1 : 0, E = CD->getNumParams(); I != E; ++I) {
+ Stmt *DefaultArg = getContext().getDefaultArgExprForConstructor(CD, I);
+ assert(DefaultArg && "sema forgot to instantiate default args");
+ ArgVec.push_back(DefaultArg);
+ }
+
+ CodeGenFunction::RunCleanupsScope Cleanups(CGF);
+
+ const auto *FPT = CD->getType()->castAs<FunctionProtoType>();
+ CGF.EmitCallArgs(Args, FPT, llvm::makeArrayRef(ArgVec), CD, IsCopy ? 1 : 0);
+
+ // Insert any ABI-specific implicit constructor arguments.
+ unsigned ExtraArgs = addImplicitConstructorArgs(CGF, CD, Ctor_Complete,
+ /*ForVirtualBase=*/false,
+ /*Delegating=*/false, Args);
+
+ // Call the destructor with our arguments.
+ llvm::Value *CalleeFn = CGM.getAddrOfCXXStructor(CD, StructorType::Complete);
+ const CGFunctionInfo &CalleeInfo = CGM.getTypes().arrangeCXXConstructorCall(
+ Args, CD, Ctor_Complete, ExtraArgs);
+ CGF.EmitCall(CalleeInfo, CalleeFn, ReturnValueSlot(), Args, CD);
+
+ Cleanups.ForceCleanup();
+
+ // Emit the ret instruction, remove any temporary instructions created for the
+ // aid of CodeGen.
+ CGF.FinishFunction(SourceLocation());
+
+ return ThunkFn;
+}
+
+llvm::Constant *MicrosoftCXXABI::getCatchableType(QualType T,
+ uint32_t NVOffset,
+ int32_t VBPtrOffset,
+ uint32_t VBIndex) {
+ assert(!T->isReferenceType());
+
+ CXXRecordDecl *RD = T->getAsCXXRecordDecl();
+ const CXXConstructorDecl *CD =
+ RD ? CGM.getContext().getCopyConstructorForExceptionObject(RD) : nullptr;
+ CXXCtorType CT = Ctor_Complete;
+ if (CD)
+ if (!hasDefaultCXXMethodCC(getContext(), CD) || CD->getNumParams() != 1)
+ CT = Ctor_CopyingClosure;
+
+ uint32_t Size = getContext().getTypeSizeInChars(T).getQuantity();
+ SmallString<256> MangledName;
+ {
+ llvm::raw_svector_ostream Out(MangledName);
+ getMangleContext().mangleCXXCatchableType(T, CD, CT, Size, NVOffset,
+ VBPtrOffset, VBIndex, Out);
+ }
+ if (llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(MangledName))
+ return getImageRelativeConstant(GV);
+
+ // The TypeDescriptor is used by the runtime to determine if a catch handler
+ // is appropriate for the exception object.
+ llvm::Constant *TD = getImageRelativeConstant(getAddrOfRTTIDescriptor(T));
+
+ // The runtime is responsible for calling the copy constructor if the
+ // exception is caught by value.
+ llvm::Constant *CopyCtor;
+ if (CD) {
+ if (CT == Ctor_CopyingClosure)
+ CopyCtor = getAddrOfCXXCtorClosure(CD, Ctor_CopyingClosure);
+ else
+ CopyCtor = CGM.getAddrOfCXXStructor(CD, StructorType::Complete);
+
+ CopyCtor = llvm::ConstantExpr::getBitCast(CopyCtor, CGM.Int8PtrTy);
+ } else {
+ CopyCtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
+ }
+ CopyCtor = getImageRelativeConstant(CopyCtor);
+
+ bool IsScalar = !RD;
+ bool HasVirtualBases = false;
+ bool IsStdBadAlloc = false; // std::bad_alloc is special for some reason.
+ QualType PointeeType = T;
+ if (T->isPointerType())
+ PointeeType = T->getPointeeType();
+ if (const CXXRecordDecl *RD = PointeeType->getAsCXXRecordDecl()) {
+ HasVirtualBases = RD->getNumVBases() > 0;
+ if (IdentifierInfo *II = RD->getIdentifier())
+ IsStdBadAlloc = II->isStr("bad_alloc") && RD->isInStdNamespace();
+ }
+
+ // Encode the relevant CatchableType properties into the Flags bitfield.
+ // FIXME: Figure out how bits 2 or 8 can get set.
+ uint32_t Flags = 0;
+ if (IsScalar)
+ Flags |= 1;
+ if (HasVirtualBases)
+ Flags |= 4;
+ if (IsStdBadAlloc)
+ Flags |= 16;
+
+ llvm::Constant *Fields[] = {
+ llvm::ConstantInt::get(CGM.IntTy, Flags), // Flags
+ TD, // TypeDescriptor
+ llvm::ConstantInt::get(CGM.IntTy, NVOffset), // NonVirtualAdjustment
+ llvm::ConstantInt::get(CGM.IntTy, VBPtrOffset), // OffsetToVBPtr
+ llvm::ConstantInt::get(CGM.IntTy, VBIndex), // VBTableIndex
+ llvm::ConstantInt::get(CGM.IntTy, Size), // Size
+ CopyCtor // CopyCtor
+ };
+ llvm::StructType *CTType = getCatchableTypeType();
+ auto *GV = new llvm::GlobalVariable(
+ CGM.getModule(), CTType, /*Constant=*/true, getLinkageForRTTI(T),
+ llvm::ConstantStruct::get(CTType, Fields), MangledName);
+ GV->setUnnamedAddr(true);
+ GV->setSection(".xdata");
+ if (GV->isWeakForLinker())
+ GV->setComdat(CGM.getModule().getOrInsertComdat(GV->getName()));
+ return getImageRelativeConstant(GV);
+}
+
+llvm::GlobalVariable *MicrosoftCXXABI::getCatchableTypeArray(QualType T) {
+ assert(!T->isReferenceType());
+
+ // See if we've already generated a CatchableTypeArray for this type before.
+ llvm::GlobalVariable *&CTA = CatchableTypeArrays[T];
+ if (CTA)
+ return CTA;
+
+ // Ensure that we don't have duplicate entries in our CatchableTypeArray by
+ // using a SmallSetVector. Duplicates may arise due to virtual bases
+ // occurring more than once in the hierarchy.
+ llvm::SmallSetVector<llvm::Constant *, 2> CatchableTypes;
+
+ // C++14 [except.handle]p3:
+ // A handler is a match for an exception object of type E if [...]
+ // - the handler is of type cv T or cv T& and T is an unambiguous public
+ // base class of E, or
+ // - the handler is of type cv T or const T& where T is a pointer type and
+ // E is a pointer type that can be converted to T by [...]
+ // - a standard pointer conversion (4.10) not involving conversions to
+ // pointers to private or protected or ambiguous classes
+ const CXXRecordDecl *MostDerivedClass = nullptr;
+ bool IsPointer = T->isPointerType();
+ if (IsPointer)
+ MostDerivedClass = T->getPointeeType()->getAsCXXRecordDecl();
+ else
+ MostDerivedClass = T->getAsCXXRecordDecl();
+
+ // Collect all the unambiguous public bases of the MostDerivedClass.
+ if (MostDerivedClass) {
+ const ASTContext &Context = getContext();
+ const ASTRecordLayout &MostDerivedLayout =
+ Context.getASTRecordLayout(MostDerivedClass);
+ MicrosoftVTableContext &VTableContext = CGM.getMicrosoftVTableContext();
+ SmallVector<MSRTTIClass, 8> Classes;
+ serializeClassHierarchy(Classes, MostDerivedClass);
+ Classes.front().initialize(/*Parent=*/nullptr, /*Specifier=*/nullptr);
+ detectAmbiguousBases(Classes);
+ for (const MSRTTIClass &Class : Classes) {
+ // Skip any ambiguous or private bases.
+ if (Class.Flags &
+ (MSRTTIClass::IsPrivateOnPath | MSRTTIClass::IsAmbiguous))
+ continue;
+ // Write down how to convert from a derived pointer to a base pointer.
+ uint32_t OffsetInVBTable = 0;
+ int32_t VBPtrOffset = -1;
+ if (Class.VirtualRoot) {
+ OffsetInVBTable =
+ VTableContext.getVBTableIndex(MostDerivedClass, Class.VirtualRoot)*4;
+ VBPtrOffset = MostDerivedLayout.getVBPtrOffset().getQuantity();
+ }
+
+ // Turn our record back into a pointer if the exception object is a
+ // pointer.
+ QualType RTTITy = QualType(Class.RD->getTypeForDecl(), 0);
+ if (IsPointer)
+ RTTITy = Context.getPointerType(RTTITy);
+ CatchableTypes.insert(getCatchableType(RTTITy, Class.OffsetInVBase,
+ VBPtrOffset, OffsetInVBTable));
+ }
+ }
+
+ // C++14 [except.handle]p3:
+ // A handler is a match for an exception object of type E if
+ // - The handler is of type cv T or cv T& and E and T are the same type
+ // (ignoring the top-level cv-qualifiers)
+ CatchableTypes.insert(getCatchableType(T));
+
+ // C++14 [except.handle]p3:
+ // A handler is a match for an exception object of type E if
+ // - the handler is of type cv T or const T& where T is a pointer type and
+ // E is a pointer type that can be converted to T by [...]
+ // - a standard pointer conversion (4.10) not involving conversions to
+ // pointers to private or protected or ambiguous classes
+ //
+ // C++14 [conv.ptr]p2:
+ // A prvalue of type "pointer to cv T," where T is an object type, can be
+ // converted to a prvalue of type "pointer to cv void".
+ if (IsPointer && T->getPointeeType()->isObjectType())
+ CatchableTypes.insert(getCatchableType(getContext().VoidPtrTy));
+
+ // C++14 [except.handle]p3:
+ // A handler is a match for an exception object of type E if [...]
+ // - the handler is of type cv T or const T& where T is a pointer or
+ // pointer to member type and E is std::nullptr_t.
+ //
+ // We cannot possibly list all possible pointer types here, making this
+ // implementation incompatible with the standard. However, MSVC includes an
+ // entry for pointer-to-void in this case. Let's do the same.
+ if (T->isNullPtrType())
+ CatchableTypes.insert(getCatchableType(getContext().VoidPtrTy));
+
+ uint32_t NumEntries = CatchableTypes.size();
+ llvm::Type *CTType =
+ getImageRelativeType(getCatchableTypeType()->getPointerTo());
+ llvm::ArrayType *AT = llvm::ArrayType::get(CTType, NumEntries);
+ llvm::StructType *CTAType = getCatchableTypeArrayType(NumEntries);
+ llvm::Constant *Fields[] = {
+ llvm::ConstantInt::get(CGM.IntTy, NumEntries), // NumEntries
+ llvm::ConstantArray::get(
+ AT, llvm::makeArrayRef(CatchableTypes.begin(),
+ CatchableTypes.end())) // CatchableTypes
+ };
+ SmallString<256> MangledName;
+ {
+ llvm::raw_svector_ostream Out(MangledName);
+ getMangleContext().mangleCXXCatchableTypeArray(T, NumEntries, Out);
+ }
+ CTA = new llvm::GlobalVariable(
+ CGM.getModule(), CTAType, /*Constant=*/true, getLinkageForRTTI(T),
+ llvm::ConstantStruct::get(CTAType, Fields), MangledName);
+ CTA->setUnnamedAddr(true);
+ CTA->setSection(".xdata");
+ if (CTA->isWeakForLinker())
+ CTA->setComdat(CGM.getModule().getOrInsertComdat(CTA->getName()));
+ return CTA;
+}
+
+llvm::GlobalVariable *MicrosoftCXXABI::getThrowInfo(QualType T) {
+ bool IsConst, IsVolatile;
+ T = decomposeTypeForEH(getContext(), T, IsConst, IsVolatile);
+
+ // The CatchableTypeArray enumerates the various (CV-unqualified) types that
+ // the exception object may be caught as.
+ llvm::GlobalVariable *CTA = getCatchableTypeArray(T);
+ // The first field in a CatchableTypeArray is the number of CatchableTypes.
+ // This is used as a component of the mangled name which means that we need to
+ // know what it is in order to see if we have previously generated the
+ // ThrowInfo.
+ uint32_t NumEntries =
+ cast<llvm::ConstantInt>(CTA->getInitializer()->getAggregateElement(0U))
+ ->getLimitedValue();
+
+ SmallString<256> MangledName;
+ {
+ llvm::raw_svector_ostream Out(MangledName);
+ getMangleContext().mangleCXXThrowInfo(T, IsConst, IsVolatile, NumEntries,
+ Out);
+ }
+
+ // Reuse a previously generated ThrowInfo if we have generated an appropriate
+ // one before.
+ if (llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(MangledName))
+ return GV;
+
+ // The RTTI TypeDescriptor uses an unqualified type but catch clauses must
+ // be at least as CV qualified. Encode this requirement into the Flags
+ // bitfield.
+ uint32_t Flags = 0;
+ if (IsConst)
+ Flags |= 1;
+ if (IsVolatile)
+ Flags |= 2;
+
+ // The cleanup-function (a destructor) must be called when the exception
+ // object's lifetime ends.
+ llvm::Constant *CleanupFn = llvm::Constant::getNullValue(CGM.Int8PtrTy);
+ if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
+ if (CXXDestructorDecl *DtorD = RD->getDestructor())
+ if (!DtorD->isTrivial())
+ CleanupFn = llvm::ConstantExpr::getBitCast(
+ CGM.getAddrOfCXXStructor(DtorD, StructorType::Complete),
+ CGM.Int8PtrTy);
+ // This is unused as far as we can tell, initialize it to null.
+ llvm::Constant *ForwardCompat =
+ getImageRelativeConstant(llvm::Constant::getNullValue(CGM.Int8PtrTy));
+ llvm::Constant *PointerToCatchableTypes = getImageRelativeConstant(
+ llvm::ConstantExpr::getBitCast(CTA, CGM.Int8PtrTy));
+ llvm::StructType *TIType = getThrowInfoType();
+ llvm::Constant *Fields[] = {
+ llvm::ConstantInt::get(CGM.IntTy, Flags), // Flags
+ getImageRelativeConstant(CleanupFn), // CleanupFn
+ ForwardCompat, // ForwardCompat
+ PointerToCatchableTypes // CatchableTypeArray
+ };
+ auto *GV = new llvm::GlobalVariable(
+ CGM.getModule(), TIType, /*Constant=*/true, getLinkageForRTTI(T),
+ llvm::ConstantStruct::get(TIType, Fields), StringRef(MangledName));
+ GV->setUnnamedAddr(true);
+ GV->setSection(".xdata");
+ if (GV->isWeakForLinker())
+ GV->setComdat(CGM.getModule().getOrInsertComdat(GV->getName()));
+ return GV;
+}
+
+void MicrosoftCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
+ const Expr *SubExpr = E->getSubExpr();
+ QualType ThrowType = SubExpr->getType();
+ // The exception object lives on the stack and it's address is passed to the
+ // runtime function.
+ Address AI = CGF.CreateMemTemp(ThrowType);
+ CGF.EmitAnyExprToMem(SubExpr, AI, ThrowType.getQualifiers(),
+ /*IsInit=*/true);
+
+ // The so-called ThrowInfo is used to describe how the exception object may be
+ // caught.
+ llvm::GlobalVariable *TI = getThrowInfo(ThrowType);
+
+ // Call into the runtime to throw the exception.
+ llvm::Value *Args[] = {
+ CGF.Builder.CreateBitCast(AI.getPointer(), CGM.Int8PtrTy),
+ TI
+ };
+ CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(), Args);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp b/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp
new file mode 100644
index 0000000..0be5c55
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp
@@ -0,0 +1,249 @@
+//===--- ModuleBuilder.cpp - Emit LLVM Code from ASTs ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This builds an AST and converts it to LLVM Code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/CodeGen/ModuleBuilder.h"
+#include "CGDebugInfo.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include <memory>
+using namespace clang;
+
+namespace {
+ class CodeGeneratorImpl : public CodeGenerator {
+ DiagnosticsEngine &Diags;
+ ASTContext *Ctx;
+ const HeaderSearchOptions &HeaderSearchOpts; // Only used for debug info.
+ const PreprocessorOptions &PreprocessorOpts; // Only used for debug info.
+ const CodeGenOptions CodeGenOpts; // Intentionally copied in.
+
+ unsigned HandlingTopLevelDecls;
+ struct HandlingTopLevelDeclRAII {
+ CodeGeneratorImpl &Self;
+ HandlingTopLevelDeclRAII(CodeGeneratorImpl &Self) : Self(Self) {
+ ++Self.HandlingTopLevelDecls;
+ }
+ ~HandlingTopLevelDeclRAII() {
+ if (--Self.HandlingTopLevelDecls == 0)
+ Self.EmitDeferredDecls();
+ }
+ };
+
+ CoverageSourceInfo *CoverageInfo;
+
+ protected:
+ std::unique_ptr<llvm::Module> M;
+ std::unique_ptr<CodeGen::CodeGenModule> Builder;
+
+ private:
+ SmallVector<CXXMethodDecl *, 8> DeferredInlineMethodDefinitions;
+
+ public:
+ CodeGeneratorImpl(DiagnosticsEngine &diags, const std::string &ModuleName,
+ const HeaderSearchOptions &HSO,
+ const PreprocessorOptions &PPO, const CodeGenOptions &CGO,
+ llvm::LLVMContext &C,
+ CoverageSourceInfo *CoverageInfo = nullptr)
+ : Diags(diags), Ctx(nullptr), HeaderSearchOpts(HSO),
+ PreprocessorOpts(PPO), CodeGenOpts(CGO), HandlingTopLevelDecls(0),
+ CoverageInfo(CoverageInfo),
+ M(new llvm::Module(ModuleName, C)) {}
+
+ ~CodeGeneratorImpl() override {
+ // There should normally not be any leftover inline method definitions.
+ assert(DeferredInlineMethodDefinitions.empty() ||
+ Diags.hasErrorOccurred());
+ }
+
+ llvm::Module* GetModule() override {
+ return M.get();
+ }
+
+ const Decl *GetDeclForMangledName(StringRef MangledName) override {
+ GlobalDecl Result;
+ if (!Builder->lookupRepresentativeDecl(MangledName, Result))
+ return nullptr;
+ const Decl *D = Result.getCanonicalDecl().getDecl();
+ if (auto FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->hasBody(FD))
+ return FD;
+ } else if (auto TD = dyn_cast<TagDecl>(D)) {
+ if (auto Def = TD->getDefinition())
+ return Def;
+ }
+ return D;
+ }
+
+ llvm::Module *ReleaseModule() override { return M.release(); }
+
+ void Initialize(ASTContext &Context) override {
+ Ctx = &Context;
+
+ M->setTargetTriple(Ctx->getTargetInfo().getTriple().getTriple());
+ M->setDataLayout(Ctx->getTargetInfo().getDataLayoutString());
+ Builder.reset(new CodeGen::CodeGenModule(Context, HeaderSearchOpts,
+ PreprocessorOpts, CodeGenOpts,
+ *M, Diags, CoverageInfo));
+
+ for (size_t i = 0, e = CodeGenOpts.DependentLibraries.size(); i < e; ++i)
+ HandleDependentLibrary(CodeGenOpts.DependentLibraries[i]);
+ }
+
+ void HandleCXXStaticMemberVarInstantiation(VarDecl *VD) override {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ Builder->HandleCXXStaticMemberVarInstantiation(VD);
+ }
+
+ bool HandleTopLevelDecl(DeclGroupRef DG) override {
+ if (Diags.hasErrorOccurred())
+ return true;
+
+ HandlingTopLevelDeclRAII HandlingDecl(*this);
+
+ // Make sure to emit all elements of a Decl.
+ for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I)
+ Builder->EmitTopLevelDecl(*I);
+
+ return true;
+ }
+
+ void EmitDeferredDecls() {
+ if (DeferredInlineMethodDefinitions.empty())
+ return;
+
+ // Emit any deferred inline method definitions. Note that more deferred
+ // methods may be added during this loop, since ASTConsumer callbacks
+ // can be invoked if AST inspection results in declarations being added.
+ HandlingTopLevelDeclRAII HandlingDecl(*this);
+ for (unsigned I = 0; I != DeferredInlineMethodDefinitions.size(); ++I)
+ Builder->EmitTopLevelDecl(DeferredInlineMethodDefinitions[I]);
+ DeferredInlineMethodDefinitions.clear();
+ }
+
+ void HandleInlineMethodDefinition(CXXMethodDecl *D) override {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ assert(D->doesThisDeclarationHaveABody());
+
+ // We may want to emit this definition. However, that decision might be
+ // based on computing the linkage, and we have to defer that in case we
+ // are inside of something that will change the method's final linkage,
+ // e.g.
+ // typedef struct {
+ // void bar();
+ // void foo() { bar(); }
+ // } A;
+ DeferredInlineMethodDefinitions.push_back(D);
+
+ // Provide some coverage mapping even for methods that aren't emitted.
+ // Don't do this for templated classes though, as they may not be
+ // instantiable.
+ if (!D->getParent()->getDescribedClassTemplate())
+ Builder->AddDeferredUnusedCoverageMapping(D);
+ }
+
+ /// HandleTagDeclDefinition - This callback is invoked each time a TagDecl
+ /// to (e.g. struct, union, enum, class) is completed. This allows the
+ /// client hack on the type, which can occur at any point in the file
+ /// (because these can be defined in declspecs).
+ void HandleTagDeclDefinition(TagDecl *D) override {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ Builder->UpdateCompletedType(D);
+
+ // For MSVC compatibility, treat declarations of static data members with
+ // inline initializers as definitions.
+ if (Ctx->getTargetInfo().getCXXABI().isMicrosoft()) {
+ for (Decl *Member : D->decls()) {
+ if (VarDecl *VD = dyn_cast<VarDecl>(Member)) {
+ if (Ctx->isMSStaticDataMemberInlineDefinition(VD) &&
+ Ctx->DeclMustBeEmitted(VD)) {
+ Builder->EmitGlobal(VD);
+ }
+ }
+ }
+ }
+ }
+
+ void HandleTagDeclRequiredDefinition(const TagDecl *D) override {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ if (CodeGen::CGDebugInfo *DI = Builder->getModuleDebugInfo())
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(D))
+ DI->completeRequiredType(RD);
+ }
+
+ void HandleTranslationUnit(ASTContext &Ctx) override {
+ if (Diags.hasErrorOccurred()) {
+ if (Builder)
+ Builder->clear();
+ M.reset();
+ return;
+ }
+
+ if (Builder)
+ Builder->Release();
+ }
+
+ void CompleteTentativeDefinition(VarDecl *D) override {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ Builder->EmitTentativeDefinition(D);
+ }
+
+ void HandleVTable(CXXRecordDecl *RD) override {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ Builder->EmitVTable(RD);
+ }
+
+ void HandleLinkerOptionPragma(llvm::StringRef Opts) override {
+ Builder->AppendLinkerOptions(Opts);
+ }
+
+ void HandleDetectMismatch(llvm::StringRef Name,
+ llvm::StringRef Value) override {
+ Builder->AddDetectMismatch(Name, Value);
+ }
+
+ void HandleDependentLibrary(llvm::StringRef Lib) override {
+ Builder->AddDependentLib(Lib);
+ }
+ };
+}
+
+void CodeGenerator::anchor() { }
+
+CodeGenerator *clang::CreateLLVMCodeGen(
+ DiagnosticsEngine &Diags, const std::string &ModuleName,
+ const HeaderSearchOptions &HeaderSearchOpts,
+ const PreprocessorOptions &PreprocessorOpts, const CodeGenOptions &CGO,
+ llvm::LLVMContext &C, CoverageSourceInfo *CoverageInfo) {
+ return new CodeGeneratorImpl(Diags, ModuleName, HeaderSearchOpts,
+ PreprocessorOpts, CGO, C, CoverageInfo);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp b/contrib/llvm/tools/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
new file mode 100644
index 0000000..b397eb3
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
@@ -0,0 +1,300 @@
+//===--- ObjectFilePCHContainerOperations.cpp -----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/CodeGen/ObjectFilePCHContainerOperations.h"
+#include "CGDebugInfo.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/CodeGen/BackendUtil.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Serialization/ASTWriter.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Bitcode/BitstreamReader.h"
+#include "llvm/DebugInfo/DWARF/DWARFContext.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Object/COFF.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/TargetRegistry.h"
+#include <memory>
+
+using namespace clang;
+
+#define DEBUG_TYPE "pchcontainer"
+
+namespace {
+class PCHContainerGenerator : public ASTConsumer {
+ DiagnosticsEngine &Diags;
+ const std::string MainFileName;
+ ASTContext *Ctx;
+ ModuleMap &MMap;
+ const HeaderSearchOptions &HeaderSearchOpts;
+ const PreprocessorOptions &PreprocessorOpts;
+ CodeGenOptions CodeGenOpts;
+ const TargetOptions TargetOpts;
+ const LangOptions LangOpts;
+ std::unique_ptr<llvm::LLVMContext> VMContext;
+ std::unique_ptr<llvm::Module> M;
+ std::unique_ptr<CodeGen::CodeGenModule> Builder;
+ raw_pwrite_stream *OS;
+ std::shared_ptr<PCHBuffer> Buffer;
+
+ /// Visit every type and emit debug info for it.
+ struct DebugTypeVisitor : public RecursiveASTVisitor<DebugTypeVisitor> {
+ clang::CodeGen::CGDebugInfo &DI;
+ ASTContext &Ctx;
+ DebugTypeVisitor(clang::CodeGen::CGDebugInfo &DI, ASTContext &Ctx)
+ : DI(DI), Ctx(Ctx) {}
+
+ /// Determine whether this type can be represented in DWARF.
+ static bool CanRepresent(const Type *Ty) {
+ return !Ty->isDependentType() && !Ty->isUndeducedType();
+ }
+
+ bool VisitImportDecl(ImportDecl *D) {
+ auto *Import = cast<ImportDecl>(D);
+ if (!Import->getImportedOwningModule())
+ DI.EmitImportDecl(*Import);
+ return true;
+ }
+
+ bool VisitTypeDecl(TypeDecl *D) {
+ QualType QualTy = Ctx.getTypeDeclType(D);
+ if (!QualTy.isNull() && CanRepresent(QualTy.getTypePtr()))
+ DI.getOrCreateStandaloneType(QualTy, D->getLocation());
+ return true;
+ }
+
+ bool VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
+ QualType QualTy(D->getTypeForDecl(), 0);
+ if (!QualTy.isNull() && CanRepresent(QualTy.getTypePtr()))
+ DI.getOrCreateStandaloneType(QualTy, D->getLocation());
+ return true;
+ }
+
+ bool VisitFunctionDecl(FunctionDecl *D) {
+ if (isa<CXXMethodDecl>(D))
+ // This is not yet supported. Constructing the `this' argument
+ // mandates a CodeGenFunction.
+ return true;
+
+ SmallVector<QualType, 16> ArgTypes;
+ for (auto i : D->params())
+ ArgTypes.push_back(i->getType());
+ QualType RetTy = D->getReturnType();
+ QualType FnTy = Ctx.getFunctionType(RetTy, ArgTypes,
+ FunctionProtoType::ExtProtoInfo());
+ if (CanRepresent(FnTy.getTypePtr()))
+ DI.EmitFunctionDecl(D, D->getLocation(), FnTy);
+ return true;
+ }
+
+ bool VisitObjCMethodDecl(ObjCMethodDecl *D) {
+ if (!D->getClassInterface())
+ return true;
+
+ bool selfIsPseudoStrong, selfIsConsumed;
+ SmallVector<QualType, 16> ArgTypes;
+ ArgTypes.push_back(D->getSelfType(Ctx, D->getClassInterface(),
+ selfIsPseudoStrong, selfIsConsumed));
+ ArgTypes.push_back(Ctx.getObjCSelType());
+ for (auto i : D->params())
+ ArgTypes.push_back(i->getType());
+ QualType RetTy = D->getReturnType();
+ QualType FnTy = Ctx.getFunctionType(RetTy, ArgTypes,
+ FunctionProtoType::ExtProtoInfo());
+ if (CanRepresent(FnTy.getTypePtr()))
+ DI.EmitFunctionDecl(D, D->getLocation(), FnTy);
+ return true;
+ }
+ };
+
+public:
+ PCHContainerGenerator(CompilerInstance &CI, const std::string &MainFileName,
+ const std::string &OutputFileName,
+ raw_pwrite_stream *OS,
+ std::shared_ptr<PCHBuffer> Buffer)
+ : Diags(CI.getDiagnostics()), Ctx(nullptr),
+ MMap(CI.getPreprocessor().getHeaderSearchInfo().getModuleMap()),
+ HeaderSearchOpts(CI.getHeaderSearchOpts()),
+ PreprocessorOpts(CI.getPreprocessorOpts()),
+ TargetOpts(CI.getTargetOpts()), LangOpts(CI.getLangOpts()), OS(OS),
+ Buffer(Buffer) {
+ // The debug info output isn't affected by CodeModel and
+ // ThreadModel, but the backend expects them to be nonempty.
+ CodeGenOpts.CodeModel = "default";
+ CodeGenOpts.ThreadModel = "single";
+ CodeGenOpts.DebugTypeExtRefs = true;
+ CodeGenOpts.setDebugInfo(CodeGenOptions::FullDebugInfo);
+ }
+
+ ~PCHContainerGenerator() override = default;
+
+ void Initialize(ASTContext &Context) override {
+ assert(!Ctx && "initialized multiple times");
+
+ Ctx = &Context;
+ VMContext.reset(new llvm::LLVMContext());
+ M.reset(new llvm::Module(MainFileName, *VMContext));
+ M->setDataLayout(Ctx->getTargetInfo().getDataLayoutString());
+ Builder.reset(new CodeGen::CodeGenModule(
+ *Ctx, HeaderSearchOpts, PreprocessorOpts, CodeGenOpts, *M, Diags));
+ Builder->getModuleDebugInfo()->setModuleMap(MMap);
+ }
+
+ bool HandleTopLevelDecl(DeclGroupRef D) override {
+ if (Diags.hasErrorOccurred())
+ return true;
+
+ // Collect debug info for all decls in this group.
+ for (auto *I : D)
+ if (!I->isFromASTFile()) {
+ DebugTypeVisitor DTV(*Builder->getModuleDebugInfo(), *Ctx);
+ DTV.TraverseDecl(I);
+ }
+ return true;
+ }
+
+ void HandleTopLevelDeclInObjCContainer(DeclGroupRef D) override {
+ HandleTopLevelDecl(D);
+ }
+
+ void HandleTagDeclDefinition(TagDecl *D) override {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ Builder->UpdateCompletedType(D);
+ }
+
+ void HandleTagDeclRequiredDefinition(const TagDecl *D) override {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(D))
+ Builder->getModuleDebugInfo()->completeRequiredType(RD);
+ }
+
+ /// Emit a container holding the serialized AST.
+ void HandleTranslationUnit(ASTContext &Ctx) override {
+ assert(M && VMContext && Builder);
+ // Delete these on function exit.
+ std::unique_ptr<llvm::LLVMContext> VMContext = std::move(this->VMContext);
+ std::unique_ptr<llvm::Module> M = std::move(this->M);
+ std::unique_ptr<CodeGen::CodeGenModule> Builder = std::move(this->Builder);
+
+ if (Diags.hasErrorOccurred())
+ return;
+
+ M->setTargetTriple(Ctx.getTargetInfo().getTriple().getTriple());
+ M->setDataLayout(Ctx.getTargetInfo().getDataLayoutString());
+ Builder->getModuleDebugInfo()->setDwoId(Buffer->Signature);
+
+ // Finalize the Builder.
+ if (Builder)
+ Builder->Release();
+
+ // Ensure the target exists.
+ std::string Error;
+ auto Triple = Ctx.getTargetInfo().getTriple();
+ if (!llvm::TargetRegistry::lookupTarget(Triple.getTriple(), Error))
+ llvm::report_fatal_error(Error);
+
+ // Emit the serialized Clang AST into its own section.
+ assert(Buffer->IsComplete && "serialization did not complete");
+ auto &SerializedAST = Buffer->Data;
+ auto Size = SerializedAST.size();
+ auto Int8Ty = llvm::Type::getInt8Ty(*VMContext);
+ auto *Ty = llvm::ArrayType::get(Int8Ty, Size);
+ auto *Data = llvm::ConstantDataArray::getString(
+ *VMContext, StringRef(SerializedAST.data(), Size),
+ /*AddNull=*/false);
+ auto *ASTSym = new llvm::GlobalVariable(
+ *M, Ty, /*constant*/ true, llvm::GlobalVariable::InternalLinkage, Data,
+ "__clang_ast");
+ // The on-disk hashtable needs to be aligned.
+ ASTSym->setAlignment(8);
+
+ // Mach-O also needs a segment name.
+ if (Triple.isOSBinFormatMachO())
+ ASTSym->setSection("__CLANG,__clangast");
+ // COFF has an eight character length limit.
+ else if (Triple.isOSBinFormatCOFF())
+ ASTSym->setSection("clangast");
+ else
+ ASTSym->setSection("__clangast");
+
+ DEBUG({
+ // Print the IR for the PCH container to the debug output.
+ llvm::SmallString<0> Buffer;
+ llvm::raw_svector_ostream OS(Buffer);
+ clang::EmitBackendOutput(Diags, CodeGenOpts, TargetOpts, LangOpts,
+ Ctx.getTargetInfo().getDataLayoutString(),
+ M.get(), BackendAction::Backend_EmitLL, &OS);
+ llvm::dbgs() << Buffer;
+ });
+
+ // Use the LLVM backend to emit the pch container.
+ clang::EmitBackendOutput(Diags, CodeGenOpts, TargetOpts, LangOpts,
+ Ctx.getTargetInfo().getDataLayoutString(),
+ M.get(), BackendAction::Backend_EmitObj, OS);
+
+ // Make sure the pch container hits disk.
+ OS->flush();
+
+ // Free the memory for the temporary buffer.
+ llvm::SmallVector<char, 0> Empty;
+ SerializedAST = std::move(Empty);
+ }
+};
+
+} // anonymous namespace
+
+std::unique_ptr<ASTConsumer>
+ObjectFilePCHContainerWriter::CreatePCHContainerGenerator(
+ CompilerInstance &CI, const std::string &MainFileName,
+ const std::string &OutputFileName, llvm::raw_pwrite_stream *OS,
+ std::shared_ptr<PCHBuffer> Buffer) const {
+ return llvm::make_unique<PCHContainerGenerator>(CI, MainFileName,
+ OutputFileName, OS, Buffer);
+}
+
+void ObjectFilePCHContainerReader::ExtractPCH(
+ llvm::MemoryBufferRef Buffer, llvm::BitstreamReader &StreamFile) const {
+ if (auto OF = llvm::object::ObjectFile::createObjectFile(Buffer)) {
+ auto *Obj = OF.get().get();
+ bool IsCOFF = isa<llvm::object::COFFObjectFile>(Obj);
+ // Find the clang AST section in the container.
+ for (auto &Section : OF->get()->sections()) {
+ StringRef Name;
+ Section.getName(Name);
+ if ((!IsCOFF && Name == "__clangast") ||
+ ( IsCOFF && Name == "clangast")) {
+ StringRef Buf;
+ Section.getContents(Buf);
+ StreamFile.init((const unsigned char *)Buf.begin(),
+ (const unsigned char *)Buf.end());
+ return;
+ }
+ }
+ }
+
+ // As a fallback, treat the buffer as a raw AST.
+ StreamFile.init((const unsigned char *)Buffer.getBufferStart(),
+ (const unsigned char *)Buffer.getBufferEnd());
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/SanitizerMetadata.cpp b/contrib/llvm/tools/clang/lib/CodeGen/SanitizerMetadata.cpp
new file mode 100644
index 0000000..2a338ba
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/SanitizerMetadata.cpp
@@ -0,0 +1,95 @@
+//===--- SanitizerMetadata.cpp - Blacklist for sanitizers -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Class which emits metadata consumed by sanitizer instrumentation passes.
+//
+//===----------------------------------------------------------------------===//
+#include "SanitizerMetadata.h"
+#include "CodeGenModule.h"
+#include "clang/AST/Type.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/Constants.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+SanitizerMetadata::SanitizerMetadata(CodeGenModule &CGM) : CGM(CGM) {}
+
+void SanitizerMetadata::reportGlobalToASan(llvm::GlobalVariable *GV,
+ SourceLocation Loc, StringRef Name,
+ QualType Ty, bool IsDynInit,
+ bool IsBlacklisted) {
+ if (!CGM.getLangOpts().Sanitize.hasOneOf(SanitizerKind::Address |
+ SanitizerKind::KernelAddress))
+ return;
+ IsDynInit &= !CGM.isInSanitizerBlacklist(GV, Loc, Ty, "init");
+ IsBlacklisted |= CGM.isInSanitizerBlacklist(GV, Loc, Ty);
+
+ llvm::Metadata *LocDescr = nullptr;
+ llvm::Metadata *GlobalName = nullptr;
+ llvm::LLVMContext &VMContext = CGM.getLLVMContext();
+ if (!IsBlacklisted) {
+ // Don't generate source location and global name if it is blacklisted -
+ // it won't be instrumented anyway.
+ LocDescr = getLocationMetadata(Loc);
+ if (!Name.empty())
+ GlobalName = llvm::MDString::get(VMContext, Name);
+ }
+
+ llvm::Metadata *GlobalMetadata[] = {
+ llvm::ConstantAsMetadata::get(GV), LocDescr, GlobalName,
+ llvm::ConstantAsMetadata::get(
+ llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), IsDynInit)),
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
+ llvm::Type::getInt1Ty(VMContext), IsBlacklisted))};
+
+ llvm::MDNode *ThisGlobal = llvm::MDNode::get(VMContext, GlobalMetadata);
+ llvm::NamedMDNode *AsanGlobals =
+ CGM.getModule().getOrInsertNamedMetadata("llvm.asan.globals");
+ AsanGlobals->addOperand(ThisGlobal);
+}
+
+void SanitizerMetadata::reportGlobalToASan(llvm::GlobalVariable *GV,
+ const VarDecl &D, bool IsDynInit) {
+ if (!CGM.getLangOpts().Sanitize.hasOneOf(SanitizerKind::Address |
+ SanitizerKind::KernelAddress))
+ return;
+ std::string QualName;
+ llvm::raw_string_ostream OS(QualName);
+ D.printQualifiedName(OS);
+ reportGlobalToASan(GV, D.getLocation(), OS.str(), D.getType(), IsDynInit);
+}
+
+void SanitizerMetadata::disableSanitizerForGlobal(llvm::GlobalVariable *GV) {
+ // For now, just make sure the global is not modified by the ASan
+ // instrumentation.
+ if (CGM.getLangOpts().Sanitize.hasOneOf(SanitizerKind::Address |
+ SanitizerKind::KernelAddress))
+ reportGlobalToASan(GV, SourceLocation(), "", QualType(), false, true);
+}
+
+void SanitizerMetadata::disableSanitizerForInstruction(llvm::Instruction *I) {
+ I->setMetadata(CGM.getModule().getMDKindID("nosanitize"),
+ llvm::MDNode::get(CGM.getLLVMContext(), None));
+}
+
+llvm::MDNode *SanitizerMetadata::getLocationMetadata(SourceLocation Loc) {
+ PresumedLoc PLoc = CGM.getContext().getSourceManager().getPresumedLoc(Loc);
+ if (!PLoc.isValid())
+ return nullptr;
+ llvm::LLVMContext &VMContext = CGM.getLLVMContext();
+ llvm::Metadata *LocMetadata[] = {
+ llvm::MDString::get(VMContext, PLoc.getFilename()),
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), PLoc.getLine())),
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), PLoc.getColumn())),
+ };
+ return llvm::MDNode::get(VMContext, LocMetadata);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/SanitizerMetadata.h b/contrib/llvm/tools/clang/lib/CodeGen/SanitizerMetadata.h
new file mode 100644
index 0000000..166f0e6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/SanitizerMetadata.h
@@ -0,0 +1,53 @@
+//===--- SanitizerMetadata.h - Metadata for sanitizers ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Class which emits metadata consumed by sanitizer instrumentation passes.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_LIB_CODEGEN_SANITIZERMETADATA_H
+#define LLVM_CLANG_LIB_CODEGEN_SANITIZERMETADATA_H
+
+#include "clang/AST/Type.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/SourceLocation.h"
+
+namespace llvm {
+class GlobalVariable;
+class Instruction;
+class MDNode;
+}
+
+namespace clang {
+class VarDecl;
+
+namespace CodeGen {
+
+class CodeGenModule;
+
+class SanitizerMetadata {
+ SanitizerMetadata(const SanitizerMetadata &) = delete;
+ void operator=(const SanitizerMetadata &) = delete;
+
+ CodeGenModule &CGM;
+public:
+ SanitizerMetadata(CodeGenModule &CGM);
+ void reportGlobalToASan(llvm::GlobalVariable *GV, const VarDecl &D,
+ bool IsDynInit = false);
+ void reportGlobalToASan(llvm::GlobalVariable *GV, SourceLocation Loc,
+ StringRef Name, QualType Ty, bool IsDynInit = false,
+ bool IsBlacklisted = false);
+ void disableSanitizerForGlobal(llvm::GlobalVariable *GV);
+ void disableSanitizerForInstruction(llvm::Instruction *I);
+private:
+ llvm::MDNode *getLocationMetadata(SourceLocation Loc);
+};
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp
new file mode 100644
index 0000000..4566fdb
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp
@@ -0,0 +1,7601 @@
+//===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#include "TargetInfo.h"
+#include "ABIInfo.h"
+#include "CGCXXABI.h"
+#include "CGValue.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/CodeGen/CGFunctionInfo.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm> // std::sort
+
+using namespace clang;
+using namespace CodeGen;
+
+static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
+ llvm::Value *Array,
+ llvm::Value *Value,
+ unsigned FirstIndex,
+ unsigned LastIndex) {
+ // Alternatively, we could emit this as a loop in the source.
+ for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
+ llvm::Value *Cell =
+ Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
+ Builder.CreateAlignedStore(Value, Cell, CharUnits::One());
+ }
+}
+
+static bool isAggregateTypeForABI(QualType T) {
+ return !CodeGenFunction::hasScalarEvaluationKind(T) ||
+ T->isMemberFunctionPointerType();
+}
+
+ABIArgInfo
+ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByRef, bool Realign,
+ llvm::Type *Padding) const {
+ return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty),
+ ByRef, Realign, Padding);
+}
+
+ABIArgInfo
+ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const {
+ return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty),
+ /*ByRef*/ false, Realign);
+}
+
+Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ return Address::invalid();
+}
+
+ABIInfo::~ABIInfo() {}
+
+static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
+ CGCXXABI &CXXABI) {
+ const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
+ if (!RD)
+ return CGCXXABI::RAA_Default;
+ return CXXABI.getRecordArgABI(RD);
+}
+
+static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
+ CGCXXABI &CXXABI) {
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return CGCXXABI::RAA_Default;
+ return getRecordArgABI(RT, CXXABI);
+}
+
+/// Pass transparent unions as if they were the type of the first element. Sema
+/// should ensure that all elements of the union have the same "machine type".
+static QualType useFirstFieldIfTransparentUnion(QualType Ty) {
+ if (const RecordType *UT = Ty->getAsUnionType()) {
+ const RecordDecl *UD = UT->getDecl();
+ if (UD->hasAttr<TransparentUnionAttr>()) {
+ assert(!UD->field_empty() && "sema created an empty transparent union");
+ return UD->field_begin()->getType();
+ }
+ }
+ return Ty;
+}
+
+CGCXXABI &ABIInfo::getCXXABI() const {
+ return CGT.getCXXABI();
+}
+
+ASTContext &ABIInfo::getContext() const {
+ return CGT.getContext();
+}
+
+llvm::LLVMContext &ABIInfo::getVMContext() const {
+ return CGT.getLLVMContext();
+}
+
+const llvm::DataLayout &ABIInfo::getDataLayout() const {
+ return CGT.getDataLayout();
+}
+
+const TargetInfo &ABIInfo::getTarget() const {
+ return CGT.getTarget();
+}
+
+bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
+ return false;
+}
+
+bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
+ uint64_t Members) const {
+ return false;
+}
+
+bool ABIInfo::shouldSignExtUnsignedType(QualType Ty) const {
+ return false;
+}
+
+void ABIArgInfo::dump() const {
+ raw_ostream &OS = llvm::errs();
+ OS << "(ABIArgInfo Kind=";
+ switch (TheKind) {
+ case Direct:
+ OS << "Direct Type=";
+ if (llvm::Type *Ty = getCoerceToType())
+ Ty->print(OS);
+ else
+ OS << "null";
+ break;
+ case Extend:
+ OS << "Extend";
+ break;
+ case Ignore:
+ OS << "Ignore";
+ break;
+ case InAlloca:
+ OS << "InAlloca Offset=" << getInAllocaFieldIndex();
+ break;
+ case Indirect:
+ OS << "Indirect Align=" << getIndirectAlign().getQuantity()
+ << " ByVal=" << getIndirectByVal()
+ << " Realign=" << getIndirectRealign();
+ break;
+ case Expand:
+ OS << "Expand";
+ break;
+ }
+ OS << ")\n";
+}
+
+// Dynamically round a pointer up to a multiple of the given alignment.
+static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF,
+ llvm::Value *Ptr,
+ CharUnits Align) {
+ llvm::Value *PtrAsInt = Ptr;
+ // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align;
+ PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy);
+ PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt,
+ llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1));
+ PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt,
+ llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity()));
+ PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt,
+ Ptr->getType(),
+ Ptr->getName() + ".aligned");
+ return PtrAsInt;
+}
+
+/// Emit va_arg for a platform using the common void* representation,
+/// where arguments are simply emitted in an array of slots on the stack.
+///
+/// This version implements the core direct-value passing rules.
+///
+/// \param SlotSize - The size and alignment of a stack slot.
+/// Each argument will be allocated to a multiple of this number of
+/// slots, and all the slots will be aligned to this value.
+/// \param AllowHigherAlign - The slot alignment is not a cap;
+/// an argument type with an alignment greater than the slot size
+/// will be emitted on a higher-alignment address, potentially
+/// leaving one or more empty slots behind as padding. If this
+/// is false, the returned address might be less-aligned than
+/// DirectAlign.
+static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF,
+ Address VAListAddr,
+ llvm::Type *DirectTy,
+ CharUnits DirectSize,
+ CharUnits DirectAlign,
+ CharUnits SlotSize,
+ bool AllowHigherAlign) {
+ // Cast the element type to i8* if necessary. Some platforms define
+ // va_list as a struct containing an i8* instead of just an i8*.
+ if (VAListAddr.getElementType() != CGF.Int8PtrTy)
+ VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy);
+
+ llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur");
+
+ // If the CC aligns values higher than the slot size, do so if needed.
+ Address Addr = Address::invalid();
+ if (AllowHigherAlign && DirectAlign > SlotSize) {
+ Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign),
+ DirectAlign);
+ } else {
+ Addr = Address(Ptr, SlotSize);
+ }
+
+ // Advance the pointer past the argument, then store that back.
+ CharUnits FullDirectSize = DirectSize.RoundUpToAlignment(SlotSize);
+ llvm::Value *NextPtr =
+ CGF.Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), FullDirectSize,
+ "argp.next");
+ CGF.Builder.CreateStore(NextPtr, VAListAddr);
+
+ // If the argument is smaller than a slot, and this is a big-endian
+ // target, the argument will be right-adjusted in its slot.
+ if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian()) {
+ Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize);
+ }
+
+ Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy);
+ return Addr;
+}
+
+/// Emit va_arg for a platform using the common void* representation,
+/// where arguments are simply emitted in an array of slots on the stack.
+///
+/// \param IsIndirect - Values of this type are passed indirectly.
+/// \param ValueInfo - The size and alignment of this type, generally
+/// computed with getContext().getTypeInfoInChars(ValueTy).
+/// \param SlotSizeAndAlign - The size and alignment of a stack slot.
+/// Each argument will be allocated to a multiple of this number of
+/// slots, and all the slots will be aligned to this value.
+/// \param AllowHigherAlign - The slot alignment is not a cap;
+/// an argument type with an alignment greater than the slot size
+/// will be emitted on a higher-alignment address, potentially
+/// leaving one or more empty slots behind as padding.
+static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType ValueTy, bool IsIndirect,
+ std::pair<CharUnits, CharUnits> ValueInfo,
+ CharUnits SlotSizeAndAlign,
+ bool AllowHigherAlign) {
+ // The size and alignment of the value that was passed directly.
+ CharUnits DirectSize, DirectAlign;
+ if (IsIndirect) {
+ DirectSize = CGF.getPointerSize();
+ DirectAlign = CGF.getPointerAlign();
+ } else {
+ DirectSize = ValueInfo.first;
+ DirectAlign = ValueInfo.second;
+ }
+
+ // Cast the address we've calculated to the right type.
+ llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy);
+ if (IsIndirect)
+ DirectTy = DirectTy->getPointerTo(0);
+
+ Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy,
+ DirectSize, DirectAlign,
+ SlotSizeAndAlign,
+ AllowHigherAlign);
+
+ if (IsIndirect) {
+ Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.second);
+ }
+
+ return Addr;
+
+}
+
+static Address emitMergePHI(CodeGenFunction &CGF,
+ Address Addr1, llvm::BasicBlock *Block1,
+ Address Addr2, llvm::BasicBlock *Block2,
+ const llvm::Twine &Name = "") {
+ assert(Addr1.getType() == Addr2.getType());
+ llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name);
+ PHI->addIncoming(Addr1.getPointer(), Block1);
+ PHI->addIncoming(Addr2.getPointer(), Block2);
+ CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment());
+ return Address(PHI, Align);
+}
+
+TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
+
+// If someone can figure out a general rule for this, that would be great.
+// It's probably just doomed to be platform-dependent, though.
+unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
+ // Verified for:
+ // x86-64 FreeBSD, Linux, Darwin
+ // x86-32 FreeBSD, Linux, Darwin
+ // PowerPC Linux, Darwin
+ // ARM Darwin (*not* EABI)
+ // AArch64 Linux
+ return 32;
+}
+
+bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
+ const FunctionNoProtoType *fnType) const {
+ // The following conventions are known to require this to be false:
+ // x86_stdcall
+ // MIPS
+ // For everything else, we just prefer false unless we opt out.
+ return false;
+}
+
+void
+TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
+ llvm::SmallString<24> &Opt) const {
+ // This assumes the user is passing a library name like "rt" instead of a
+ // filename like "librt.a/so", and that they don't care whether it's static or
+ // dynamic.
+ Opt = "-l";
+ Opt += Lib;
+}
+
+static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
+
+/// isEmptyField - Return true iff a the field is "empty", that is it
+/// is an unnamed bit-field or an (array of) empty record(s).
+static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
+ bool AllowArrays) {
+ if (FD->isUnnamedBitfield())
+ return true;
+
+ QualType FT = FD->getType();
+
+ // Constant arrays of empty records count as empty, strip them off.
+ // Constant arrays of zero length always count as empty.
+ if (AllowArrays)
+ while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
+ if (AT->getSize() == 0)
+ return true;
+ FT = AT->getElementType();
+ }
+
+ const RecordType *RT = FT->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ // C++ record fields are never empty, at least in the Itanium ABI.
+ //
+ // FIXME: We should use a predicate for whether this behavior is true in the
+ // current ABI.
+ if (isa<CXXRecordDecl>(RT->getDecl()))
+ return false;
+
+ return isEmptyRecord(Context, FT, AllowArrays);
+}
+
+/// isEmptyRecord - Return true iff a structure contains only empty
+/// fields. Note that a structure with a flexible array member is not
+/// considered empty.
+static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return 0;
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return false;
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ for (const auto &I : CXXRD->bases())
+ if (!isEmptyRecord(Context, I.getType(), true))
+ return false;
+
+ for (const auto *I : RD->fields())
+ if (!isEmptyField(Context, I, AllowArrays))
+ return false;
+ return true;
+}
+
+/// isSingleElementStruct - Determine if a structure is a "single
+/// element struct", i.e. it has exactly one non-empty field or
+/// exactly one field which is itself a single element
+/// struct. Structures with flexible array members are never
+/// considered single element structs.
+///
+/// \return The field declaration for the single non-empty field, if
+/// it exists.
+static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return nullptr;
+
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return nullptr;
+
+ const Type *Found = nullptr;
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (const auto &I : CXXRD->bases()) {
+ // Ignore empty records.
+ if (isEmptyRecord(Context, I.getType(), true))
+ continue;
+
+ // If we already found an element then this isn't a single-element struct.
+ if (Found)
+ return nullptr;
+
+ // If this is non-empty and not a single element struct, the composite
+ // cannot be a single element struct.
+ Found = isSingleElementStruct(I.getType(), Context);
+ if (!Found)
+ return nullptr;
+ }
+ }
+
+ // Check for single element.
+ for (const auto *FD : RD->fields()) {
+ QualType FT = FD->getType();
+
+ // Ignore empty fields.
+ if (isEmptyField(Context, FD, true))
+ continue;
+
+ // If we already found an element then this isn't a single-element
+ // struct.
+ if (Found)
+ return nullptr;
+
+ // Treat single element arrays as the element.
+ while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
+ if (AT->getSize().getZExtValue() != 1)
+ break;
+ FT = AT->getElementType();
+ }
+
+ if (!isAggregateTypeForABI(FT)) {
+ Found = FT.getTypePtr();
+ } else {
+ Found = isSingleElementStruct(FT, Context);
+ if (!Found)
+ return nullptr;
+ }
+ }
+
+ // We don't consider a struct a single-element struct if it has
+ // padding beyond the element type.
+ if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
+ return nullptr;
+
+ return Found;
+}
+
+static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
+ // Treat complex types as the element type.
+ if (const ComplexType *CTy = Ty->getAs<ComplexType>())
+ Ty = CTy->getElementType();
+
+ // Check for a type which we know has a simple scalar argument-passing
+ // convention without any padding. (We're specifically looking for 32
+ // and 64-bit integer and integer-equivalents, float, and double.)
+ if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
+ !Ty->isEnumeralType() && !Ty->isBlockPointerType())
+ return false;
+
+ uint64_t Size = Context.getTypeSize(Ty);
+ return Size == 32 || Size == 64;
+}
+
+/// canExpandIndirectArgument - Test whether an argument type which is to be
+/// passed indirectly (on the stack) would have the equivalent layout if it was
+/// expanded into separate arguments. If so, we prefer to do the latter to avoid
+/// inhibiting optimizations.
+///
+// FIXME: This predicate is missing many cases, currently it just follows
+// llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We
+// should probably make this smarter, or better yet make the LLVM backend
+// capable of handling it.
+static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
+ // We can only expand structure types.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ // We can only expand (C) structures.
+ //
+ // FIXME: This needs to be generalized to handle classes as well.
+ const RecordDecl *RD = RT->getDecl();
+ if (!RD->isStruct())
+ return false;
+
+ // We try to expand CLike CXXRecordDecl.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ if (!CXXRD->isCLike())
+ return false;
+ }
+
+ uint64_t Size = 0;
+
+ for (const auto *FD : RD->fields()) {
+ if (!is32Or64BitBasicType(FD->getType(), Context))
+ return false;
+
+ // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
+ // how to expand them yet, and the predicate for telling if a bitfield still
+ // counts as "basic" is more complicated than what we were doing previously.
+ if (FD->isBitField())
+ return false;
+
+ Size += Context.getTypeSize(FD->getType());
+ }
+
+ // Make sure there are not any holes in the struct.
+ if (Size != Context.getTypeSize(Ty))
+ return false;
+
+ return true;
+}
+
+namespace {
+/// DefaultABIInfo - The default implementation for ABI specific
+/// details. This implementation provides information which results in
+/// self-consistent and sensible LLVM IR generation, but does not
+/// conform to any particular ABI.
+class DefaultABIInfo : public ABIInfo {
+public:
+ DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
+ }
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+};
+
+class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
+};
+
+Address DefaultABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ return Address::invalid();
+}
+
+ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ if (isAggregateTypeForABI(Ty)) {
+ // Records with non-trivial destructors/copy-constructors should not be
+ // passed by value.
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+
+ return getNaturalAlignIndirect(Ty);
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (isAggregateTypeForABI(RetTy))
+ return getNaturalAlignIndirect(RetTy);
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+//===----------------------------------------------------------------------===//
+// WebAssembly ABI Implementation
+//
+// This is a very simple ABI that relies a lot on DefaultABIInfo.
+//===----------------------------------------------------------------------===//
+
+class WebAssemblyABIInfo final : public DefaultABIInfo {
+public:
+ explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT)
+ : DefaultABIInfo(CGT) {}
+
+private:
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType Ty) const;
+
+ // DefaultABIInfo's classifyReturnType and classifyArgumentType are
+ // non-virtual, but computeInfo is virtual, so we overload that.
+ void computeInfo(CGFunctionInfo &FI) const override {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &Arg : FI.arguments())
+ Arg.info = classifyArgumentType(Arg.type);
+ }
+};
+
+class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo {
+public:
+ explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new WebAssemblyABIInfo(CGT)) {}
+};
+
+/// \brief Classify argument of given type \p Ty.
+ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ if (isAggregateTypeForABI(Ty)) {
+ // Records with non-trivial destructors/copy-constructors should not be
+ // passed by value.
+ if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+ // Lower single-element structs to just pass a regular value. TODO: We
+ // could do reasonable-size multiple-element structs too, using getExpand(),
+ // though watch out for things like bitfields.
+ if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
+ return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
+ }
+
+ // Otherwise just do the default thing.
+ return DefaultABIInfo::classifyArgumentType(Ty);
+}
+
+ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const {
+ if (isAggregateTypeForABI(RetTy)) {
+ // Records with non-trivial destructors/copy-constructors should not be
+ // returned by value.
+ if (!getRecordArgABI(RetTy, getCXXABI())) {
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), RetTy, true))
+ return ABIArgInfo::getIgnore();
+ // Lower single-element structs to just return a regular value. TODO: We
+ // could do reasonable-size multiple-element structs too, using
+ // ABIArgInfo::getDirect().
+ if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
+ return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
+ }
+ }
+
+ // Otherwise just do the default thing.
+ return DefaultABIInfo::classifyReturnType(RetTy);
+}
+
+//===----------------------------------------------------------------------===//
+// le32/PNaCl bitcode ABI Implementation
+//
+// This is a simplified version of the x86_32 ABI. Arguments and return values
+// are always passed on the stack.
+//===----------------------------------------------------------------------===//
+
+class PNaClABIInfo : public ABIInfo {
+ public:
+ PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+ Address EmitVAArg(CodeGenFunction &CGF,
+ Address VAListAddr, QualType Ty) const override;
+};
+
+class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
+ public:
+ PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {}
+};
+
+void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
+}
+
+Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ return Address::invalid();
+}
+
+/// \brief Classify argument of given type \p Ty.
+ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
+ if (isAggregateTypeForABI(Ty)) {
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+ return getNaturalAlignIndirect(Ty);
+ } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
+ // Treat an enum type as its underlying type.
+ Ty = EnumTy->getDecl()->getIntegerType();
+ } else if (Ty->isFloatingType()) {
+ // Floating-point types don't go inreg.
+ return ABIArgInfo::getDirect();
+ }
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ // In the PNaCl ABI we always return records/structures on the stack.
+ if (isAggregateTypeForABI(RetTy))
+ return getNaturalAlignIndirect(RetTy);
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+/// IsX86_MMXType - Return true if this is an MMX type.
+bool IsX86_MMXType(llvm::Type *IRType) {
+ // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
+ return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
+ cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
+ IRType->getScalarSizeInBits() != 64;
+}
+
+static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ StringRef Constraint,
+ llvm::Type* Ty) {
+ if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) {
+ if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
+ // Invalid MMX constraint
+ return nullptr;
+ }
+
+ return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
+ }
+
+ // No operation needed
+ return Ty;
+}
+
+/// Returns true if this type can be passed in SSE registers with the
+/// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
+static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half)
+ return true;
+ } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
+ // registers specially.
+ unsigned VecSize = Context.getTypeSize(VT);
+ if (VecSize == 128 || VecSize == 256 || VecSize == 512)
+ return true;
+ }
+ return false;
+}
+
+/// Returns true if this aggregate is small enough to be passed in SSE registers
+/// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
+static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
+ return NumMembers <= 4;
+}
+
+//===----------------------------------------------------------------------===//
+// X86-32 ABI Implementation
+//===----------------------------------------------------------------------===//
+
+/// \brief Similar to llvm::CCState, but for Clang.
+struct CCState {
+ CCState(unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {}
+
+ unsigned CC;
+ unsigned FreeRegs;
+ unsigned FreeSSERegs;
+};
+
+/// X86_32ABIInfo - The X86-32 ABI information.
+class X86_32ABIInfo : public ABIInfo {
+ enum Class {
+ Integer,
+ Float
+ };
+
+ static const unsigned MinABIStackAlignInBytes = 4;
+
+ bool IsDarwinVectorABI;
+ bool IsRetSmallStructInRegABI;
+ bool IsWin32StructABI;
+ bool IsSoftFloatABI;
+ bool IsMCUABI;
+ unsigned DefaultNumRegisterParameters;
+
+ static bool isRegisterSize(unsigned Size) {
+ return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
+ }
+
+ bool isHomogeneousAggregateBaseType(QualType Ty) const override {
+ // FIXME: Assumes vectorcall is in use.
+ return isX86VectorTypeForVectorCall(getContext(), Ty);
+ }
+
+ bool isHomogeneousAggregateSmallEnough(const Type *Ty,
+ uint64_t NumMembers) const override {
+ // FIXME: Assumes vectorcall is in use.
+ return isX86VectorCallAggregateSmallEnough(NumMembers);
+ }
+
+ bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
+
+ /// getIndirectResult - Give a source type \arg Ty, return a suitable result
+ /// such that the argument will be passed in memory.
+ ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
+
+ ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const;
+
+ /// \brief Return the alignment to use for the given type on the stack.
+ unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
+
+ Class classify(QualType Ty) const;
+ ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
+ /// \brief Updates the number of available free registers, returns
+ /// true if any registers were allocated.
+ bool updateFreeRegs(QualType Ty, CCState &State) const;
+
+ bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg,
+ bool &NeedsPadding) const;
+ bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const;
+
+ /// \brief Rewrite the function info so that all memory arguments use
+ /// inalloca.
+ void rewriteWithInAlloca(CGFunctionInfo &FI) const;
+
+ void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
+ CharUnits &StackOffset, ABIArgInfo &Info,
+ QualType Type) const;
+
+public:
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+
+ X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
+ bool RetSmallStructInRegABI, bool Win32StructABI,
+ unsigned NumRegisterParameters, bool SoftFloatABI)
+ : ABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
+ IsRetSmallStructInRegABI(RetSmallStructInRegABI),
+ IsWin32StructABI(Win32StructABI),
+ IsSoftFloatABI(SoftFloatABI),
+ IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
+ DefaultNumRegisterParameters(NumRegisterParameters) {}
+};
+
+class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
+ bool RetSmallStructInRegABI, bool Win32StructABI,
+ unsigned NumRegisterParameters, bool SoftFloatABI)
+ : TargetCodeGenInfo(new X86_32ABIInfo(
+ CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
+ NumRegisterParameters, SoftFloatABI)) {}
+
+ static bool isStructReturnInRegABI(
+ const llvm::Triple &Triple, const CodeGenOptions &Opts);
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override;
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
+ // Darwin uses different dwarf register numbers for EH.
+ if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
+ return 4;
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override;
+
+ llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ StringRef Constraint,
+ llvm::Type* Ty) const override {
+ return X86AdjustInlineAsmType(CGF, Constraint, Ty);
+ }
+
+ void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
+ std::string &Constraints,
+ std::vector<llvm::Type *> &ResultRegTypes,
+ std::vector<llvm::Type *> &ResultTruncRegTypes,
+ std::vector<LValue> &ResultRegDests,
+ std::string &AsmString,
+ unsigned NumOutputs) const override;
+
+ llvm::Constant *
+ getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
+ unsigned Sig = (0xeb << 0) | // jmp rel8
+ (0x06 << 8) | // .+0x08
+ ('F' << 16) |
+ ('T' << 24);
+ return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
+ }
+};
+
+}
+
+/// Rewrite input constraint references after adding some output constraints.
+/// In the case where there is one output and one input and we add one output,
+/// we need to replace all operand references greater than or equal to 1:
+/// mov $0, $1
+/// mov eax, $1
+/// The result will be:
+/// mov $0, $2
+/// mov eax, $2
+static void rewriteInputConstraintReferences(unsigned FirstIn,
+ unsigned NumNewOuts,
+ std::string &AsmString) {
+ std::string Buf;
+ llvm::raw_string_ostream OS(Buf);
+ size_t Pos = 0;
+ while (Pos < AsmString.size()) {
+ size_t DollarStart = AsmString.find('$', Pos);
+ if (DollarStart == std::string::npos)
+ DollarStart = AsmString.size();
+ size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
+ if (DollarEnd == std::string::npos)
+ DollarEnd = AsmString.size();
+ OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
+ Pos = DollarEnd;
+ size_t NumDollars = DollarEnd - DollarStart;
+ if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
+ // We have an operand reference.
+ size_t DigitStart = Pos;
+ size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
+ if (DigitEnd == std::string::npos)
+ DigitEnd = AsmString.size();
+ StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
+ unsigned OperandIndex;
+ if (!OperandStr.getAsInteger(10, OperandIndex)) {
+ if (OperandIndex >= FirstIn)
+ OperandIndex += NumNewOuts;
+ OS << OperandIndex;
+ } else {
+ OS << OperandStr;
+ }
+ Pos = DigitEnd;
+ }
+ }
+ AsmString = std::move(OS.str());
+}
+
+/// Add output constraints for EAX:EDX because they are return registers.
+void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
+ CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
+ std::vector<llvm::Type *> &ResultRegTypes,
+ std::vector<llvm::Type *> &ResultTruncRegTypes,
+ std::vector<LValue> &ResultRegDests, std::string &AsmString,
+ unsigned NumOutputs) const {
+ uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
+
+ // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
+ // larger.
+ if (!Constraints.empty())
+ Constraints += ',';
+ if (RetWidth <= 32) {
+ Constraints += "={eax}";
+ ResultRegTypes.push_back(CGF.Int32Ty);
+ } else {
+ // Use the 'A' constraint for EAX:EDX.
+ Constraints += "=A";
+ ResultRegTypes.push_back(CGF.Int64Ty);
+ }
+
+ // Truncate EAX or EAX:EDX to an integer of the appropriate size.
+ llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
+ ResultTruncRegTypes.push_back(CoerceTy);
+
+ // Coerce the integer by bitcasting the return slot pointer.
+ ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(),
+ CoerceTy->getPointerTo()));
+ ResultRegDests.push_back(ReturnSlot);
+
+ rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
+}
+
+/// shouldReturnTypeInRegister - Determine if the given type should be
+/// returned in a register (for the Darwin and MCU ABI).
+bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
+ ASTContext &Context) const {
+ uint64_t Size = Context.getTypeSize(Ty);
+
+ // For i386, type must be register sized.
+ // For the MCU ABI, it only needs to be <= 8-byte
+ if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
+ return false;
+
+ if (Ty->isVectorType()) {
+ // 64- and 128- bit vectors inside structures are not returned in
+ // registers.
+ if (Size == 64 || Size == 128)
+ return false;
+
+ return true;
+ }
+
+ // If this is a builtin, pointer, enum, complex type, member pointer, or
+ // member function pointer it is ok.
+ if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
+ Ty->isAnyComplexType() || Ty->isEnumeralType() ||
+ Ty->isBlockPointerType() || Ty->isMemberPointerType())
+ return true;
+
+ // Arrays are treated like records.
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
+ return shouldReturnTypeInRegister(AT->getElementType(), Context);
+
+ // Otherwise, it must be a record type.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT) return false;
+
+ // FIXME: Traverse bases here too.
+
+ // Structure types are passed in register if all fields would be
+ // passed in a register.
+ for (const auto *FD : RT->getDecl()->fields()) {
+ // Empty fields are ignored.
+ if (isEmptyField(Context, FD, true))
+ continue;
+
+ // Check fields recursively.
+ if (!shouldReturnTypeInRegister(FD->getType(), Context))
+ return false;
+ }
+ return true;
+}
+
+ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const {
+ // If the return value is indirect, then the hidden argument is consuming one
+ // integer register.
+ if (State.FreeRegs) {
+ --State.FreeRegs;
+ if (!IsMCUABI)
+ return getNaturalAlignIndirectInReg(RetTy);
+ }
+ return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
+}
+
+ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
+ CCState &State) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ const Type *Base = nullptr;
+ uint64_t NumElts = 0;
+ if (State.CC == llvm::CallingConv::X86_VectorCall &&
+ isHomogeneousAggregate(RetTy, Base, NumElts)) {
+ // The LLVM struct type for such an aggregate should lower properly.
+ return ABIArgInfo::getDirect();
+ }
+
+ if (const VectorType *VT = RetTy->getAs<VectorType>()) {
+ // On Darwin, some vectors are returned in registers.
+ if (IsDarwinVectorABI) {
+ uint64_t Size = getContext().getTypeSize(RetTy);
+
+ // 128-bit vectors are a special case; they are returned in
+ // registers and we need to make sure to pick a type the LLVM
+ // backend will like.
+ if (Size == 128)
+ return ABIArgInfo::getDirect(llvm::VectorType::get(
+ llvm::Type::getInt64Ty(getVMContext()), 2));
+
+ // Always return in register if it fits in a general purpose
+ // register, or if it is 64 bits and has a single element.
+ if ((Size == 8 || Size == 16 || Size == 32) ||
+ (Size == 64 && VT->getNumElements() == 1))
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ Size));
+
+ return getIndirectReturnResult(RetTy, State);
+ }
+
+ return ABIArgInfo::getDirect();
+ }
+
+ if (isAggregateTypeForABI(RetTy)) {
+ if (const RecordType *RT = RetTy->getAs<RecordType>()) {
+ // Structures with flexible arrays are always indirect.
+ if (RT->getDecl()->hasFlexibleArrayMember())
+ return getIndirectReturnResult(RetTy, State);
+ }
+
+ // If specified, structs and unions are always indirect.
+ if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType())
+ return getIndirectReturnResult(RetTy, State);
+
+ // Small structures which are register sized are generally returned
+ // in a register.
+ if (shouldReturnTypeInRegister(RetTy, getContext())) {
+ uint64_t Size = getContext().getTypeSize(RetTy);
+
+ // As a special-case, if the struct is a "single-element" struct, and
+ // the field is of type "float" or "double", return it in a
+ // floating-point register. (MSVC does not apply this special case.)
+ // We apply a similar transformation for pointer types to improve the
+ // quality of the generated IR.
+ if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
+ if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
+ || SeltTy->hasPointerRepresentation())
+ return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
+
+ // FIXME: We should be able to narrow this integer in cases with dead
+ // padding.
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
+ }
+
+ return getIndirectReturnResult(RetTy, State);
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
+ return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
+}
+
+static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT)
+ return 0;
+ const RecordDecl *RD = RT->getDecl();
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ for (const auto &I : CXXRD->bases())
+ if (!isRecordWithSSEVectorType(Context, I.getType()))
+ return false;
+
+ for (const auto *i : RD->fields()) {
+ QualType FT = i->getType();
+
+ if (isSSEVectorType(Context, FT))
+ return true;
+
+ if (isRecordWithSSEVectorType(Context, FT))
+ return true;
+ }
+
+ return false;
+}
+
+unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
+ unsigned Align) const {
+ // Otherwise, if the alignment is less than or equal to the minimum ABI
+ // alignment, just use the default; the backend will handle this.
+ if (Align <= MinABIStackAlignInBytes)
+ return 0; // Use default alignment.
+
+ // On non-Darwin, the stack type alignment is always 4.
+ if (!IsDarwinVectorABI) {
+ // Set explicit alignment, since we may need to realign the top.
+ return MinABIStackAlignInBytes;
+ }
+
+ // Otherwise, if the type contains an SSE vector type, the alignment is 16.
+ if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
+ isRecordWithSSEVectorType(getContext(), Ty)))
+ return 16;
+
+ return MinABIStackAlignInBytes;
+}
+
+ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
+ CCState &State) const {
+ if (!ByVal) {
+ if (State.FreeRegs) {
+ --State.FreeRegs; // Non-byval indirects just use one pointer.
+ if (!IsMCUABI)
+ return getNaturalAlignIndirectInReg(Ty);
+ }
+ return getNaturalAlignIndirect(Ty, false);
+ }
+
+ // Compute the byval alignment.
+ unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
+ unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
+ if (StackAlign == 0)
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true);
+
+ // If the stack alignment is less than the type alignment, realign the
+ // argument.
+ bool Realign = TypeAlign > StackAlign;
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign),
+ /*ByVal=*/true, Realign);
+}
+
+X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
+ const Type *T = isSingleElementStruct(Ty, getContext());
+ if (!T)
+ T = Ty.getTypePtr();
+
+ if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
+ BuiltinType::Kind K = BT->getKind();
+ if (K == BuiltinType::Float || K == BuiltinType::Double)
+ return Float;
+ }
+ return Integer;
+}
+
+bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const {
+ if (!IsSoftFloatABI) {
+ Class C = classify(Ty);
+ if (C == Float)
+ return false;
+ }
+
+ unsigned Size = getContext().getTypeSize(Ty);
+ unsigned SizeInRegs = (Size + 31) / 32;
+
+ if (SizeInRegs == 0)
+ return false;
+
+ if (!IsMCUABI) {
+ if (SizeInRegs > State.FreeRegs) {
+ State.FreeRegs = 0;
+ return false;
+ }
+ } else {
+ // The MCU psABI allows passing parameters in-reg even if there are
+ // earlier parameters that are passed on the stack. Also,
+ // it does not allow passing >8-byte structs in-register,
+ // even if there are 3 free registers available.
+ if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
+ return false;
+ }
+
+ State.FreeRegs -= SizeInRegs;
+ return true;
+}
+
+bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
+ bool &InReg,
+ bool &NeedsPadding) const {
+ NeedsPadding = false;
+ InReg = !IsMCUABI;
+
+ if (!updateFreeRegs(Ty, State))
+ return false;
+
+ if (IsMCUABI)
+ return true;
+
+ if (State.CC == llvm::CallingConv::X86_FastCall ||
+ State.CC == llvm::CallingConv::X86_VectorCall) {
+ if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
+ NeedsPadding = true;
+
+ return false;
+ }
+
+ return true;
+}
+
+bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const {
+ if (!updateFreeRegs(Ty, State))
+ return false;
+
+ if (IsMCUABI)
+ return false;
+
+ if (State.CC == llvm::CallingConv::X86_FastCall ||
+ State.CC == llvm::CallingConv::X86_VectorCall) {
+ if (getContext().getTypeSize(Ty) > 32)
+ return false;
+
+ return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() ||
+ Ty->isReferenceType());
+ }
+
+ return true;
+}
+
+ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
+ CCState &State) const {
+ // FIXME: Set alignment on indirect arguments.
+
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ // Check with the C++ ABI first.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (RT) {
+ CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
+ if (RAA == CGCXXABI::RAA_Indirect) {
+ return getIndirectResult(Ty, false, State);
+ } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
+ // The field index doesn't matter, we'll fix it up later.
+ return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
+ }
+ }
+
+ // vectorcall adds the concept of a homogenous vector aggregate, similar
+ // to other targets.
+ const Type *Base = nullptr;
+ uint64_t NumElts = 0;
+ if (State.CC == llvm::CallingConv::X86_VectorCall &&
+ isHomogeneousAggregate(Ty, Base, NumElts)) {
+ if (State.FreeSSERegs >= NumElts) {
+ State.FreeSSERegs -= NumElts;
+ if (Ty->isBuiltinType() || Ty->isVectorType())
+ return ABIArgInfo::getDirect();
+ return ABIArgInfo::getExpand();
+ }
+ return getIndirectResult(Ty, /*ByVal=*/false, State);
+ }
+
+ if (isAggregateTypeForABI(Ty)) {
+ if (RT) {
+ // Structs are always byval on win32, regardless of what they contain.
+ if (IsWin32StructABI)
+ return getIndirectResult(Ty, true, State);
+
+ // Structures with flexible arrays are always indirect.
+ if (RT->getDecl()->hasFlexibleArrayMember())
+ return getIndirectResult(Ty, true, State);
+ }
+
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ llvm::LLVMContext &LLVMContext = getVMContext();
+ llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
+ bool NeedsPadding, InReg;
+ if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
+ unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
+ SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
+ llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
+ if (InReg)
+ return ABIArgInfo::getDirectInReg(Result);
+ else
+ return ABIArgInfo::getDirect(Result);
+ }
+ llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
+
+ // Expand small (<= 128-bit) record types when we know that the stack layout
+ // of those arguments will match the struct. This is important because the
+ // LLVM backend isn't smart enough to remove byval, which inhibits many
+ // optimizations.
+ // Don't do this for the MCU if there are still free integer registers
+ // (see X86_64 ABI for full explanation).
+ if (getContext().getTypeSize(Ty) <= 4*32 &&
+ canExpandIndirectArgument(Ty, getContext()) &&
+ (!IsMCUABI || State.FreeRegs == 0))
+ return ABIArgInfo::getExpandWithPadding(
+ State.CC == llvm::CallingConv::X86_FastCall ||
+ State.CC == llvm::CallingConv::X86_VectorCall,
+ PaddingType);
+
+ return getIndirectResult(Ty, true, State);
+ }
+
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ // On Darwin, some vectors are passed in memory, we handle this by passing
+ // it as an i8/i16/i32/i64.
+ if (IsDarwinVectorABI) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if ((Size == 8 || Size == 16 || Size == 32) ||
+ (Size == 64 && VT->getNumElements() == 1))
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ Size));
+ }
+
+ if (IsX86_MMXType(CGT.ConvertType(Ty)))
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
+
+ return ABIArgInfo::getDirect();
+ }
+
+
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ bool InReg = shouldPrimitiveUseInReg(Ty, State);
+
+ if (Ty->isPromotableIntegerType()) {
+ if (InReg)
+ return ABIArgInfo::getExtendInReg();
+ return ABIArgInfo::getExtend();
+ }
+
+ if (InReg)
+ return ABIArgInfo::getDirectInReg();
+ return ABIArgInfo::getDirect();
+}
+
+void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ CCState State(FI.getCallingConvention());
+ if (IsMCUABI)
+ State.FreeRegs = 3;
+ else if (State.CC == llvm::CallingConv::X86_FastCall)
+ State.FreeRegs = 2;
+ else if (State.CC == llvm::CallingConv::X86_VectorCall) {
+ State.FreeRegs = 2;
+ State.FreeSSERegs = 6;
+ } else if (FI.getHasRegParm())
+ State.FreeRegs = FI.getRegParm();
+ else
+ State.FreeRegs = DefaultNumRegisterParameters;
+
+ if (!getCXXABI().classifyReturnType(FI)) {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
+ } else if (FI.getReturnInfo().isIndirect()) {
+ // The C++ ABI is not aware of register usage, so we have to check if the
+ // return value was sret and put it in a register ourselves if appropriate.
+ if (State.FreeRegs) {
+ --State.FreeRegs; // The sret parameter consumes a register.
+ if (!IsMCUABI)
+ FI.getReturnInfo().setInReg(true);
+ }
+ }
+
+ // The chain argument effectively gives us another free register.
+ if (FI.isChainCall())
+ ++State.FreeRegs;
+
+ bool UsedInAlloca = false;
+ for (auto &I : FI.arguments()) {
+ I.info = classifyArgumentType(I.type, State);
+ UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
+ }
+
+ // If we needed to use inalloca for any argument, do a second pass and rewrite
+ // all the memory arguments to use inalloca.
+ if (UsedInAlloca)
+ rewriteWithInAlloca(FI);
+}
+
+void
+X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
+ CharUnits &StackOffset, ABIArgInfo &Info,
+ QualType Type) const {
+ // Arguments are always 4-byte-aligned.
+ CharUnits FieldAlign = CharUnits::fromQuantity(4);
+
+ assert(StackOffset.isMultipleOf(FieldAlign) && "unaligned inalloca struct");
+ Info = ABIArgInfo::getInAlloca(FrameFields.size());
+ FrameFields.push_back(CGT.ConvertTypeForMem(Type));
+ StackOffset += getContext().getTypeSizeInChars(Type);
+
+ // Insert padding bytes to respect alignment.
+ CharUnits FieldEnd = StackOffset;
+ StackOffset = FieldEnd.RoundUpToAlignment(FieldAlign);
+ if (StackOffset != FieldEnd) {
+ CharUnits NumBytes = StackOffset - FieldEnd;
+ llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
+ Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity());
+ FrameFields.push_back(Ty);
+ }
+}
+
+static bool isArgInAlloca(const ABIArgInfo &Info) {
+ // Leave ignored and inreg arguments alone.
+ switch (Info.getKind()) {
+ case ABIArgInfo::InAlloca:
+ return true;
+ case ABIArgInfo::Indirect:
+ assert(Info.getIndirectByVal());
+ return true;
+ case ABIArgInfo::Ignore:
+ return false;
+ case ABIArgInfo::Direct:
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Expand:
+ if (Info.getInReg())
+ return false;
+ return true;
+ }
+ llvm_unreachable("invalid enum");
+}
+
+void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
+ assert(IsWin32StructABI && "inalloca only supported on win32");
+
+ // Build a packed struct type for all of the arguments in memory.
+ SmallVector<llvm::Type *, 6> FrameFields;
+
+ // The stack alignment is always 4.
+ CharUnits StackAlign = CharUnits::fromQuantity(4);
+
+ CharUnits StackOffset;
+ CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
+
+ // Put 'this' into the struct before 'sret', if necessary.
+ bool IsThisCall =
+ FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
+ ABIArgInfo &Ret = FI.getReturnInfo();
+ if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
+ isArgInAlloca(I->info)) {
+ addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
+ ++I;
+ }
+
+ // Put the sret parameter into the inalloca struct if it's in memory.
+ if (Ret.isIndirect() && !Ret.getInReg()) {
+ CanQualType PtrTy = getContext().getPointerType(FI.getReturnType());
+ addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
+ // On Windows, the hidden sret parameter is always returned in eax.
+ Ret.setInAllocaSRet(IsWin32StructABI);
+ }
+
+ // Skip the 'this' parameter in ecx.
+ if (IsThisCall)
+ ++I;
+
+ // Put arguments passed in memory into the struct.
+ for (; I != E; ++I) {
+ if (isArgInAlloca(I->info))
+ addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
+ }
+
+ FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
+ /*isPacked=*/true),
+ StackAlign);
+}
+
+Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
+ Address VAListAddr, QualType Ty) const {
+
+ auto TypeInfo = getContext().getTypeInfoInChars(Ty);
+
+ // x86-32 changes the alignment of certain arguments on the stack.
+ //
+ // Just messing with TypeInfo like this works because we never pass
+ // anything indirectly.
+ TypeInfo.second = CharUnits::fromQuantity(
+ getTypeStackAlignInBytes(Ty, TypeInfo.second.getQuantity()));
+
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
+ TypeInfo, CharUnits::fromQuantity(4),
+ /*AllowHigherAlign*/ true);
+}
+
+bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
+ const llvm::Triple &Triple, const CodeGenOptions &Opts) {
+ assert(Triple.getArch() == llvm::Triple::x86);
+
+ switch (Opts.getStructReturnConvention()) {
+ case CodeGenOptions::SRCK_Default:
+ break;
+ case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
+ return false;
+ case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
+ return true;
+ }
+
+ if (Triple.isOSDarwin() || Triple.isOSIAMCU())
+ return true;
+
+ switch (Triple.getOS()) {
+ case llvm::Triple::DragonFly:
+ case llvm::Triple::FreeBSD:
+ case llvm::Triple::OpenBSD:
+ case llvm::Triple::Bitrig:
+ case llvm::Triple::Win32:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void X86_32TargetCodeGenInfo::setTargetAttributes(const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const {
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
+ if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
+ // Get the LLVM function.
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+
+ // Now add the 'alignstack' attribute with a value of 16.
+ llvm::AttrBuilder B;
+ B.addStackAlignmentAttr(16);
+ Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
+ llvm::AttributeSet::get(CGM.getLLVMContext(),
+ llvm::AttributeSet::FunctionIndex,
+ B));
+ }
+ }
+}
+
+bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
+ CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+
+ llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
+
+ // 0-7 are the eight integer registers; the order is different
+ // on Darwin (for EH), but the range is the same.
+ // 8 is %eip.
+ AssignToArrayRange(Builder, Address, Four8, 0, 8);
+
+ if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
+ // 12-16 are st(0..4). Not sure why we stop at 4.
+ // These have size 16, which is sizeof(long double) on
+ // platforms with 8-byte alignment for that type.
+ llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
+ AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
+
+ } else {
+ // 9 is %eflags, which doesn't get a size on Darwin for some
+ // reason.
+ Builder.CreateAlignedStore(
+ Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9),
+ CharUnits::One());
+
+ // 11-16 are st(0..5). Not sure why we stop at 5.
+ // These have size 12, which is sizeof(long double) on
+ // platforms with 4-byte alignment for that type.
+ llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
+ AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
+ }
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// X86-64 ABI Implementation
+//===----------------------------------------------------------------------===//
+
+
+namespace {
+/// The AVX ABI level for X86 targets.
+enum class X86AVXABILevel {
+ None,
+ AVX,
+ AVX512
+};
+
+/// \p returns the size in bits of the largest (native) vector for \p AVXLevel.
+static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
+ switch (AVXLevel) {
+ case X86AVXABILevel::AVX512:
+ return 512;
+ case X86AVXABILevel::AVX:
+ return 256;
+ case X86AVXABILevel::None:
+ return 128;
+ }
+ llvm_unreachable("Unknown AVXLevel");
+}
+
+/// X86_64ABIInfo - The X86_64 ABI information.
+class X86_64ABIInfo : public ABIInfo {
+ enum Class {
+ Integer = 0,
+ SSE,
+ SSEUp,
+ X87,
+ X87Up,
+ ComplexX87,
+ NoClass,
+ Memory
+ };
+
+ /// merge - Implement the X86_64 ABI merging algorithm.
+ ///
+ /// Merge an accumulating classification \arg Accum with a field
+ /// classification \arg Field.
+ ///
+ /// \param Accum - The accumulating classification. This should
+ /// always be either NoClass or the result of a previous merge
+ /// call. In addition, this should never be Memory (the caller
+ /// should just return Memory for the aggregate).
+ static Class merge(Class Accum, Class Field);
+
+ /// postMerge - Implement the X86_64 ABI post merging algorithm.
+ ///
+ /// Post merger cleanup, reduces a malformed Hi and Lo pair to
+ /// final MEMORY or SSE classes when necessary.
+ ///
+ /// \param AggregateSize - The size of the current aggregate in
+ /// the classification process.
+ ///
+ /// \param Lo - The classification for the parts of the type
+ /// residing in the low word of the containing object.
+ ///
+ /// \param Hi - The classification for the parts of the type
+ /// residing in the higher words of the containing object.
+ ///
+ void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
+
+ /// classify - Determine the x86_64 register classes in which the
+ /// given type T should be passed.
+ ///
+ /// \param Lo - The classification for the parts of the type
+ /// residing in the low word of the containing object.
+ ///
+ /// \param Hi - The classification for the parts of the type
+ /// residing in the high word of the containing object.
+ ///
+ /// \param OffsetBase - The bit offset of this type in the
+ /// containing object. Some parameters are classified different
+ /// depending on whether they straddle an eightbyte boundary.
+ ///
+ /// \param isNamedArg - Whether the argument in question is a "named"
+ /// argument, as used in AMD64-ABI 3.5.7.
+ ///
+ /// If a word is unused its result will be NoClass; if a type should
+ /// be passed in Memory then at least the classification of \arg Lo
+ /// will be Memory.
+ ///
+ /// The \arg Lo class will be NoClass iff the argument is ignored.
+ ///
+ /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
+ /// also be ComplexX87.
+ void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
+ bool isNamedArg) const;
+
+ llvm::Type *GetByteVectorType(QualType Ty) const;
+ llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
+ unsigned IROffset, QualType SourceTy,
+ unsigned SourceOffset) const;
+ llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
+ unsigned IROffset, QualType SourceTy,
+ unsigned SourceOffset) const;
+
+ /// getIndirectResult - Give a source type \arg Ty, return a suitable result
+ /// such that the argument will be returned in memory.
+ ABIArgInfo getIndirectReturnResult(QualType Ty) const;
+
+ /// getIndirectResult - Give a source type \arg Ty, return a suitable result
+ /// such that the argument will be passed in memory.
+ ///
+ /// \param freeIntRegs - The number of free integer registers remaining
+ /// available.
+ ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+
+ ABIArgInfo classifyArgumentType(QualType Ty,
+ unsigned freeIntRegs,
+ unsigned &neededInt,
+ unsigned &neededSSE,
+ bool isNamedArg) const;
+
+ bool IsIllegalVectorType(QualType Ty) const;
+
+ /// The 0.98 ABI revision clarified a lot of ambiguities,
+ /// unfortunately in ways that were not always consistent with
+ /// certain previous compilers. In particular, platforms which
+ /// required strict binary compatibility with older versions of GCC
+ /// may need to exempt themselves.
+ bool honorsRevision0_98() const {
+ return !getTarget().getTriple().isOSDarwin();
+ }
+
+ X86AVXABILevel AVXLevel;
+ // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
+ // 64-bit hardware.
+ bool Has64BitPointers;
+
+public:
+ X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) :
+ ABIInfo(CGT), AVXLevel(AVXLevel),
+ Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
+ }
+
+ bool isPassedUsingAVXType(QualType type) const {
+ unsigned neededInt, neededSSE;
+ // The freeIntRegs argument doesn't matter here.
+ ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
+ /*isNamedArg*/true);
+ if (info.isDirect()) {
+ llvm::Type *ty = info.getCoerceToType();
+ if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
+ return (vectorTy->getBitWidth() > 128);
+ }
+ return false;
+ }
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+ Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+
+ bool has64BitPointers() const {
+ return Has64BitPointers;
+ }
+};
+
+/// WinX86_64ABIInfo - The Windows X86_64 ABI information.
+class WinX86_64ABIInfo : public ABIInfo {
+public:
+ WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT)
+ : ABIInfo(CGT),
+ IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+
+ bool isHomogeneousAggregateBaseType(QualType Ty) const override {
+ // FIXME: Assumes vectorcall is in use.
+ return isX86VectorTypeForVectorCall(getContext(), Ty);
+ }
+
+ bool isHomogeneousAggregateSmallEnough(const Type *Ty,
+ uint64_t NumMembers) const override {
+ // FIXME: Assumes vectorcall is in use.
+ return isX86VectorCallAggregateSmallEnough(NumMembers);
+ }
+
+private:
+ ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs,
+ bool IsReturnType) const;
+
+ bool IsMingw64;
+};
+
+class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
+ : TargetCodeGenInfo(new X86_64ABIInfo(CGT, AVXLevel)) {}
+
+ const X86_64ABIInfo &getABIInfo() const {
+ return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
+ }
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
+ return 7;
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override {
+ llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
+
+ // 0-15 are the 16 integer registers.
+ // 16 is %rip.
+ AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
+ return false;
+ }
+
+ llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ StringRef Constraint,
+ llvm::Type* Ty) const override {
+ return X86AdjustInlineAsmType(CGF, Constraint, Ty);
+ }
+
+ bool isNoProtoCallVariadic(const CallArgList &args,
+ const FunctionNoProtoType *fnType) const override {
+ // The default CC on x86-64 sets %al to the number of SSA
+ // registers used, and GCC sets this when calling an unprototyped
+ // function, so we override the default behavior. However, don't do
+ // that when AVX types are involved: the ABI explicitly states it is
+ // undefined, and it doesn't work in practice because of how the ABI
+ // defines varargs anyway.
+ if (fnType->getCallConv() == CC_C) {
+ bool HasAVXType = false;
+ for (CallArgList::const_iterator
+ it = args.begin(), ie = args.end(); it != ie; ++it) {
+ if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
+ HasAVXType = true;
+ break;
+ }
+ }
+
+ if (!HasAVXType)
+ return true;
+ }
+
+ return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
+ }
+
+ llvm::Constant *
+ getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
+ unsigned Sig;
+ if (getABIInfo().has64BitPointers())
+ Sig = (0xeb << 0) | // jmp rel8
+ (0x0a << 8) | // .+0x0c
+ ('F' << 16) |
+ ('T' << 24);
+ else
+ Sig = (0xeb << 0) | // jmp rel8
+ (0x06 << 8) | // .+0x08
+ ('F' << 16) |
+ ('T' << 24);
+ return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
+ }
+};
+
+class PS4TargetCodeGenInfo : public X86_64TargetCodeGenInfo {
+public:
+ PS4TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
+ : X86_64TargetCodeGenInfo(CGT, AVXLevel) {}
+
+ void getDependentLibraryOption(llvm::StringRef Lib,
+ llvm::SmallString<24> &Opt) const override {
+ Opt = "\01";
+ // If the argument contains a space, enclose it in quotes.
+ if (Lib.find(" ") != StringRef::npos)
+ Opt += "\"" + Lib.str() + "\"";
+ else
+ Opt += Lib;
+ }
+};
+
+static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
+ // If the argument does not end in .lib, automatically add the suffix.
+ // If the argument contains a space, enclose it in quotes.
+ // This matches the behavior of MSVC.
+ bool Quote = (Lib.find(" ") != StringRef::npos);
+ std::string ArgStr = Quote ? "\"" : "";
+ ArgStr += Lib;
+ if (!Lib.endswith_lower(".lib"))
+ ArgStr += ".lib";
+ ArgStr += Quote ? "\"" : "";
+ return ArgStr;
+}
+
+class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
+public:
+ WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
+ bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI,
+ unsigned NumRegisterParameters)
+ : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
+ Win32StructABI, NumRegisterParameters, false) {}
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override;
+
+ void getDependentLibraryOption(llvm::StringRef Lib,
+ llvm::SmallString<24> &Opt) const override {
+ Opt = "/DEFAULTLIB:";
+ Opt += qualifyWindowsLibrary(Lib);
+ }
+
+ void getDetectMismatchOption(llvm::StringRef Name,
+ llvm::StringRef Value,
+ llvm::SmallString<32> &Opt) const override {
+ Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
+ }
+};
+
+static void addStackProbeSizeTargetAttribute(const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) {
+ if (D && isa<FunctionDecl>(D)) {
+ if (CGM.getCodeGenOpts().StackProbeSize != 4096) {
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+
+ Fn->addFnAttr("stack-probe-size",
+ llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
+ }
+ }
+}
+
+void WinX86_32TargetCodeGenInfo::setTargetAttributes(const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const {
+ X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
+
+ addStackProbeSizeTargetAttribute(D, GV, CGM);
+}
+
+class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
+ X86AVXABILevel AVXLevel)
+ : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override;
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
+ return 7;
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override {
+ llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
+
+ // 0-15 are the 16 integer registers.
+ // 16 is %rip.
+ AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
+ return false;
+ }
+
+ void getDependentLibraryOption(llvm::StringRef Lib,
+ llvm::SmallString<24> &Opt) const override {
+ Opt = "/DEFAULTLIB:";
+ Opt += qualifyWindowsLibrary(Lib);
+ }
+
+ void getDetectMismatchOption(llvm::StringRef Name,
+ llvm::StringRef Value,
+ llvm::SmallString<32> &Opt) const override {
+ Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
+ }
+};
+
+void WinX86_64TargetCodeGenInfo::setTargetAttributes(const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const {
+ TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
+
+ addStackProbeSizeTargetAttribute(D, GV, CGM);
+}
+}
+
+void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
+ Class &Hi) const {
+ // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
+ //
+ // (a) If one of the classes is Memory, the whole argument is passed in
+ // memory.
+ //
+ // (b) If X87UP is not preceded by X87, the whole argument is passed in
+ // memory.
+ //
+ // (c) If the size of the aggregate exceeds two eightbytes and the first
+ // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
+ // argument is passed in memory. NOTE: This is necessary to keep the
+ // ABI working for processors that don't support the __m256 type.
+ //
+ // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
+ //
+ // Some of these are enforced by the merging logic. Others can arise
+ // only with unions; for example:
+ // union { _Complex double; unsigned; }
+ //
+ // Note that clauses (b) and (c) were added in 0.98.
+ //
+ if (Hi == Memory)
+ Lo = Memory;
+ if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
+ Lo = Memory;
+ if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
+ Lo = Memory;
+ if (Hi == SSEUp && Lo != SSE)
+ Hi = SSE;
+}
+
+X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
+ // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
+ // classified recursively so that always two fields are
+ // considered. The resulting class is calculated according to
+ // the classes of the fields in the eightbyte:
+ //
+ // (a) If both classes are equal, this is the resulting class.
+ //
+ // (b) If one of the classes is NO_CLASS, the resulting class is
+ // the other class.
+ //
+ // (c) If one of the classes is MEMORY, the result is the MEMORY
+ // class.
+ //
+ // (d) If one of the classes is INTEGER, the result is the
+ // INTEGER.
+ //
+ // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
+ // MEMORY is used as class.
+ //
+ // (f) Otherwise class SSE is used.
+
+ // Accum should never be memory (we should have returned) or
+ // ComplexX87 (because this cannot be passed in a structure).
+ assert((Accum != Memory && Accum != ComplexX87) &&
+ "Invalid accumulated classification during merge.");
+ if (Accum == Field || Field == NoClass)
+ return Accum;
+ if (Field == Memory)
+ return Memory;
+ if (Accum == NoClass)
+ return Field;
+ if (Accum == Integer || Field == Integer)
+ return Integer;
+ if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
+ Accum == X87 || Accum == X87Up)
+ return Memory;
+ return SSE;
+}
+
+void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
+ Class &Lo, Class &Hi, bool isNamedArg) const {
+ // FIXME: This code can be simplified by introducing a simple value class for
+ // Class pairs with appropriate constructor methods for the various
+ // situations.
+
+ // FIXME: Some of the split computations are wrong; unaligned vectors
+ // shouldn't be passed in registers for example, so there is no chance they
+ // can straddle an eightbyte. Verify & simplify.
+
+ Lo = Hi = NoClass;
+
+ Class &Current = OffsetBase < 64 ? Lo : Hi;
+ Current = Memory;
+
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ BuiltinType::Kind k = BT->getKind();
+
+ if (k == BuiltinType::Void) {
+ Current = NoClass;
+ } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
+ Lo = Integer;
+ Hi = Integer;
+ } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
+ Current = Integer;
+ } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
+ Current = SSE;
+ } else if (k == BuiltinType::LongDouble) {
+ const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
+ if (LDF == &llvm::APFloat::IEEEquad) {
+ Lo = SSE;
+ Hi = SSEUp;
+ } else if (LDF == &llvm::APFloat::x87DoubleExtended) {
+ Lo = X87;
+ Hi = X87Up;
+ } else if (LDF == &llvm::APFloat::IEEEdouble) {
+ Current = SSE;
+ } else
+ llvm_unreachable("unexpected long double representation!");
+ }
+ // FIXME: _Decimal32 and _Decimal64 are SSE.
+ // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
+ return;
+ }
+
+ if (const EnumType *ET = Ty->getAs<EnumType>()) {
+ // Classify the underlying integer type.
+ classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
+ return;
+ }
+
+ if (Ty->hasPointerRepresentation()) {
+ Current = Integer;
+ return;
+ }
+
+ if (Ty->isMemberPointerType()) {
+ if (Ty->isMemberFunctionPointerType()) {
+ if (Has64BitPointers) {
+ // If Has64BitPointers, this is an {i64, i64}, so classify both
+ // Lo and Hi now.
+ Lo = Hi = Integer;
+ } else {
+ // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
+ // straddles an eightbyte boundary, Hi should be classified as well.
+ uint64_t EB_FuncPtr = (OffsetBase) / 64;
+ uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
+ if (EB_FuncPtr != EB_ThisAdj) {
+ Lo = Hi = Integer;
+ } else {
+ Current = Integer;
+ }
+ }
+ } else {
+ Current = Integer;
+ }
+ return;
+ }
+
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ uint64_t Size = getContext().getTypeSize(VT);
+ if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
+ // gcc passes the following as integer:
+ // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float>
+ // 2 bytes - <2 x char>, <1 x short>
+ // 1 byte - <1 x char>
+ Current = Integer;
+
+ // If this type crosses an eightbyte boundary, it should be
+ // split.
+ uint64_t EB_Lo = (OffsetBase) / 64;
+ uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
+ if (EB_Lo != EB_Hi)
+ Hi = Lo;
+ } else if (Size == 64) {
+ // gcc passes <1 x double> in memory. :(
+ if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
+ return;
+
+ // gcc passes <1 x long long> as INTEGER.
+ if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) ||
+ VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) ||
+ VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) ||
+ VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong))
+ Current = Integer;
+ else
+ Current = SSE;
+
+ // If this type crosses an eightbyte boundary, it should be
+ // split.
+ if (OffsetBase && OffsetBase != 64)
+ Hi = Lo;
+ } else if (Size == 128 ||
+ (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
+ // Arguments of 256-bits are split into four eightbyte chunks. The
+ // least significant one belongs to class SSE and all the others to class
+ // SSEUP. The original Lo and Hi design considers that types can't be
+ // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
+ // This design isn't correct for 256-bits, but since there're no cases
+ // where the upper parts would need to be inspected, avoid adding
+ // complexity and just consider Hi to match the 64-256 part.
+ //
+ // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
+ // registers if they are "named", i.e. not part of the "..." of a
+ // variadic function.
+ //
+ // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are
+ // split into eight eightbyte chunks, one SSE and seven SSEUP.
+ Lo = SSE;
+ Hi = SSEUp;
+ }
+ return;
+ }
+
+ if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
+ QualType ET = getContext().getCanonicalType(CT->getElementType());
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (ET->isIntegralOrEnumerationType()) {
+ if (Size <= 64)
+ Current = Integer;
+ else if (Size <= 128)
+ Lo = Hi = Integer;
+ } else if (ET == getContext().FloatTy) {
+ Current = SSE;
+ } else if (ET == getContext().DoubleTy) {
+ Lo = Hi = SSE;
+ } else if (ET == getContext().LongDoubleTy) {
+ const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
+ if (LDF == &llvm::APFloat::IEEEquad)
+ Current = Memory;
+ else if (LDF == &llvm::APFloat::x87DoubleExtended)
+ Current = ComplexX87;
+ else if (LDF == &llvm::APFloat::IEEEdouble)
+ Lo = Hi = SSE;
+ else
+ llvm_unreachable("unexpected long double representation!");
+ }
+
+ // If this complex type crosses an eightbyte boundary then it
+ // should be split.
+ uint64_t EB_Real = (OffsetBase) / 64;
+ uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
+ if (Hi == NoClass && EB_Real != EB_Imag)
+ Hi = Lo;
+
+ return;
+ }
+
+ if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
+ // Arrays are treated like structures.
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
+ // than four eightbytes, ..., it has class MEMORY.
+ if (Size > 256)
+ return;
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
+ // fields, it has class MEMORY.
+ //
+ // Only need to check alignment of array base.
+ if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
+ return;
+
+ // Otherwise implement simplified merge. We could be smarter about
+ // this, but it isn't worth it and would be harder to verify.
+ Current = NoClass;
+ uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
+ uint64_t ArraySize = AT->getSize().getZExtValue();
+
+ // The only case a 256-bit wide vector could be used is when the array
+ // contains a single 256-bit element. Since Lo and Hi logic isn't extended
+ // to work for sizes wider than 128, early check and fallback to memory.
+ if (Size > 128 && EltSize != 256)
+ return;
+
+ for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
+ Class FieldLo, FieldHi;
+ classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory)
+ break;
+ }
+
+ postMerge(Size, Lo, Hi);
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
+ return;
+ }
+
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
+ // than four eightbytes, ..., it has class MEMORY.
+ if (Size > 256)
+ return;
+
+ // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
+ // copy constructor or a non-trivial destructor, it is passed by invisible
+ // reference.
+ if (getRecordArgABI(RT, getCXXABI()))
+ return;
+
+ const RecordDecl *RD = RT->getDecl();
+
+ // Assume variable sized types are passed in memory.
+ if (RD->hasFlexibleArrayMember())
+ return;
+
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+
+ // Reset Lo class, this will be recomputed.
+ Current = NoClass;
+
+ // If this is a C++ record, classify the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (const auto &I : CXXRD->bases()) {
+ assert(!I.isVirtual() && !I.getType()->isDependentType() &&
+ "Unexpected base class!");
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
+
+ // Classify this field.
+ //
+ // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
+ // single eightbyte, each is classified separately. Each eightbyte gets
+ // initialized to class NO_CLASS.
+ Class FieldLo, FieldHi;
+ uint64_t Offset =
+ OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
+ classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory) {
+ postMerge(Size, Lo, Hi);
+ return;
+ }
+ }
+ }
+
+ // Classify the fields one at a time, merging the results.
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
+ bool BitField = i->isBitField();
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
+ // four eightbytes, or it contains unaligned fields, it has class MEMORY.
+ //
+ // The only case a 256-bit wide vector could be used is when the struct
+ // contains a single 256-bit element. Since Lo and Hi logic isn't extended
+ // to work for sizes wider than 128, early check and fallback to memory.
+ //
+ if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) {
+ Lo = Memory;
+ postMerge(Size, Lo, Hi);
+ return;
+ }
+ // Note, skip this test for bit-fields, see below.
+ if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
+ Lo = Memory;
+ postMerge(Size, Lo, Hi);
+ return;
+ }
+
+ // Classify this field.
+ //
+ // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
+ // exceeds a single eightbyte, each is classified
+ // separately. Each eightbyte gets initialized to class
+ // NO_CLASS.
+ Class FieldLo, FieldHi;
+
+ // Bit-fields require special handling, they do not force the
+ // structure to be passed in memory even if unaligned, and
+ // therefore they can straddle an eightbyte.
+ if (BitField) {
+ // Ignore padding bit-fields.
+ if (i->isUnnamedBitfield())
+ continue;
+
+ uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
+ uint64_t Size = i->getBitWidthValue(getContext());
+
+ uint64_t EB_Lo = Offset / 64;
+ uint64_t EB_Hi = (Offset + Size - 1) / 64;
+
+ if (EB_Lo) {
+ assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
+ FieldLo = NoClass;
+ FieldHi = Integer;
+ } else {
+ FieldLo = Integer;
+ FieldHi = EB_Hi ? Integer : NoClass;
+ }
+ } else
+ classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory)
+ break;
+ }
+
+ postMerge(Size, Lo, Hi);
+ }
+}
+
+ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
+ // If this is a scalar LLVM value then assume LLVM will pass it in the right
+ // place naturally.
+ if (!isAggregateTypeForABI(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ return getNaturalAlignIndirect(Ty);
+}
+
+bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
+ if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
+ uint64_t Size = getContext().getTypeSize(VecTy);
+ unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
+ if (Size <= 64 || Size > LargestVector)
+ return true;
+ }
+
+ return false;
+}
+
+ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
+ unsigned freeIntRegs) const {
+ // If this is a scalar LLVM value then assume LLVM will pass it in the right
+ // place naturally.
+ //
+ // This assumption is optimistic, as there could be free registers available
+ // when we need to pass this argument in memory, and LLVM could try to pass
+ // the argument in the free register. This does not seem to happen currently,
+ // but this code would be much safer if we could mark the argument with
+ // 'onstack'. See PR12193.
+ if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+
+ // Compute the byval alignment. We specify the alignment of the byval in all
+ // cases so that the mid-level optimizer knows the alignment of the byval.
+ unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
+
+ // Attempt to avoid passing indirect results using byval when possible. This
+ // is important for good codegen.
+ //
+ // We do this by coercing the value into a scalar type which the backend can
+ // handle naturally (i.e., without using byval).
+ //
+ // For simplicity, we currently only do this when we have exhausted all of the
+ // free integer registers. Doing this when there are free integer registers
+ // would require more care, as we would have to ensure that the coerced value
+ // did not claim the unused register. That would require either reording the
+ // arguments to the function (so that any subsequent inreg values came first),
+ // or only doing this optimization when there were no following arguments that
+ // might be inreg.
+ //
+ // We currently expect it to be rare (particularly in well written code) for
+ // arguments to be passed on the stack when there are still free integer
+ // registers available (this would typically imply large structs being passed
+ // by value), so this seems like a fair tradeoff for now.
+ //
+ // We can revisit this if the backend grows support for 'onstack' parameter
+ // attributes. See PR12193.
+ if (freeIntRegs == 0) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+
+ // If this type fits in an eightbyte, coerce it into the matching integral
+ // type, which will end up on the stack (with alignment 8).
+ if (Align == 8 && Size <= 64)
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ Size));
+ }
+
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align));
+}
+
+/// The ABI specifies that a value should be passed in a full vector XMM/YMM
+/// register. Pick an LLVM IR type that will be passed as a vector register.
+llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
+ // Wrapper structs/arrays that only contain vectors are passed just like
+ // vectors; strip them off if present.
+ if (const Type *InnerTy = isSingleElementStruct(Ty, getContext()))
+ Ty = QualType(InnerTy, 0);
+
+ llvm::Type *IRType = CGT.ConvertType(Ty);
+ if (isa<llvm::VectorType>(IRType) ||
+ IRType->getTypeID() == llvm::Type::FP128TyID)
+ return IRType;
+
+ // We couldn't find the preferred IR vector type for 'Ty'.
+ uint64_t Size = getContext().getTypeSize(Ty);
+ assert((Size == 128 || Size == 256) && "Invalid type found!");
+
+ // Return a LLVM IR vector type based on the size of 'Ty'.
+ return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()),
+ Size / 64);
+}
+
+/// BitsContainNoUserData - Return true if the specified [start,end) bit range
+/// is known to either be off the end of the specified type or being in
+/// alignment padding. The user type specified is known to be at most 128 bits
+/// in size, and have passed through X86_64ABIInfo::classify with a successful
+/// classification that put one of the two halves in the INTEGER class.
+///
+/// It is conservatively correct to return false.
+static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
+ unsigned EndBit, ASTContext &Context) {
+ // If the bytes being queried are off the end of the type, there is no user
+ // data hiding here. This handles analysis of builtins, vectors and other
+ // types that don't contain interesting padding.
+ unsigned TySize = (unsigned)Context.getTypeSize(Ty);
+ if (TySize <= StartBit)
+ return true;
+
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
+ unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
+ unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
+
+ // Check each element to see if the element overlaps with the queried range.
+ for (unsigned i = 0; i != NumElts; ++i) {
+ // If the element is after the span we care about, then we're done..
+ unsigned EltOffset = i*EltSize;
+ if (EltOffset >= EndBit) break;
+
+ unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
+ if (!BitsContainNoUserData(AT->getElementType(), EltStart,
+ EndBit-EltOffset, Context))
+ return false;
+ }
+ // If it overlaps no elements, then it is safe to process as padding.
+ return true;
+ }
+
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (const auto &I : CXXRD->bases()) {
+ assert(!I.isVirtual() && !I.getType()->isDependentType() &&
+ "Unexpected base class!");
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
+
+ // If the base is after the span we care about, ignore it.
+ unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
+ if (BaseOffset >= EndBit) continue;
+
+ unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
+ if (!BitsContainNoUserData(I.getType(), BaseStart,
+ EndBit-BaseOffset, Context))
+ return false;
+ }
+ }
+
+ // Verify that no field has data that overlaps the region of interest. Yes
+ // this could be sped up a lot by being smarter about queried fields,
+ // however we're only looking at structs up to 16 bytes, so we don't care
+ // much.
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
+
+ // If we found a field after the region we care about, then we're done.
+ if (FieldOffset >= EndBit) break;
+
+ unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
+ if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
+ Context))
+ return false;
+ }
+
+ // If nothing in this record overlapped the area of interest, then we're
+ // clean.
+ return true;
+ }
+
+ return false;
+}
+
+/// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
+/// float member at the specified offset. For example, {int,{float}} has a
+/// float at offset 4. It is conservatively correct for this routine to return
+/// false.
+static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
+ const llvm::DataLayout &TD) {
+ // Base case if we find a float.
+ if (IROffset == 0 && IRType->isFloatTy())
+ return true;
+
+ // If this is a struct, recurse into the field at the specified offset.
+ if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
+ const llvm::StructLayout *SL = TD.getStructLayout(STy);
+ unsigned Elt = SL->getElementContainingOffset(IROffset);
+ IROffset -= SL->getElementOffset(Elt);
+ return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
+ }
+
+ // If this is an array, recurse into the field at the specified offset.
+ if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
+ llvm::Type *EltTy = ATy->getElementType();
+ unsigned EltSize = TD.getTypeAllocSize(EltTy);
+ IROffset -= IROffset/EltSize*EltSize;
+ return ContainsFloatAtOffset(EltTy, IROffset, TD);
+ }
+
+ return false;
+}
+
+
+/// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
+/// low 8 bytes of an XMM register, corresponding to the SSE class.
+llvm::Type *X86_64ABIInfo::
+GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
+ QualType SourceTy, unsigned SourceOffset) const {
+ // The only three choices we have are either double, <2 x float>, or float. We
+ // pass as float if the last 4 bytes is just padding. This happens for
+ // structs that contain 3 floats.
+ if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
+ SourceOffset*8+64, getContext()))
+ return llvm::Type::getFloatTy(getVMContext());
+
+ // We want to pass as <2 x float> if the LLVM IR type contains a float at
+ // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
+ // case.
+ if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
+ ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
+ return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
+
+ return llvm::Type::getDoubleTy(getVMContext());
+}
+
+
+/// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
+/// an 8-byte GPR. This means that we either have a scalar or we are talking
+/// about the high or low part of an up-to-16-byte struct. This routine picks
+/// the best LLVM IR type to represent this, which may be i64 or may be anything
+/// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
+/// etc).
+///
+/// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
+/// the source type. IROffset is an offset in bytes into the LLVM IR type that
+/// the 8-byte value references. PrefType may be null.
+///
+/// SourceTy is the source-level type for the entire argument. SourceOffset is
+/// an offset into this that we're processing (which is always either 0 or 8).
+///
+llvm::Type *X86_64ABIInfo::
+GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
+ QualType SourceTy, unsigned SourceOffset) const {
+ // If we're dealing with an un-offset LLVM IR type, then it means that we're
+ // returning an 8-byte unit starting with it. See if we can safely use it.
+ if (IROffset == 0) {
+ // Pointers and int64's always fill the 8-byte unit.
+ if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
+ IRType->isIntegerTy(64))
+ return IRType;
+
+ // If we have a 1/2/4-byte integer, we can use it only if the rest of the
+ // goodness in the source type is just tail padding. This is allowed to
+ // kick in for struct {double,int} on the int, but not on
+ // struct{double,int,int} because we wouldn't return the second int. We
+ // have to do this analysis on the source type because we can't depend on
+ // unions being lowered a specific way etc.
+ if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
+ IRType->isIntegerTy(32) ||
+ (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
+ unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
+ cast<llvm::IntegerType>(IRType)->getBitWidth();
+
+ if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
+ SourceOffset*8+64, getContext()))
+ return IRType;
+ }
+ }
+
+ if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
+ // If this is a struct, recurse into the field at the specified offset.
+ const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
+ if (IROffset < SL->getSizeInBytes()) {
+ unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
+ IROffset -= SL->getElementOffset(FieldIdx);
+
+ return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
+ SourceTy, SourceOffset);
+ }
+ }
+
+ if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
+ llvm::Type *EltTy = ATy->getElementType();
+ unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
+ unsigned EltOffset = IROffset/EltSize*EltSize;
+ return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
+ SourceOffset);
+ }
+
+ // Okay, we don't have any better idea of what to pass, so we pass this in an
+ // integer register that isn't too big to fit the rest of the struct.
+ unsigned TySizeInBytes =
+ (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
+
+ assert(TySizeInBytes != SourceOffset && "Empty field?");
+
+ // It is always safe to classify this as an integer type up to i64 that
+ // isn't larger than the structure.
+ return llvm::IntegerType::get(getVMContext(),
+ std::min(TySizeInBytes-SourceOffset, 8U)*8);
+}
+
+
+/// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
+/// be used as elements of a two register pair to pass or return, return a
+/// first class aggregate to represent them. For example, if the low part of
+/// a by-value argument should be passed as i32* and the high part as float,
+/// return {i32*, float}.
+static llvm::Type *
+GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
+ const llvm::DataLayout &TD) {
+ // In order to correctly satisfy the ABI, we need to the high part to start
+ // at offset 8. If the high and low parts we inferred are both 4-byte types
+ // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
+ // the second element at offset 8. Check for this:
+ unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
+ unsigned HiAlign = TD.getABITypeAlignment(Hi);
+ unsigned HiStart = llvm::RoundUpToAlignment(LoSize, HiAlign);
+ assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
+
+ // To handle this, we have to increase the size of the low part so that the
+ // second element will start at an 8 byte offset. We can't increase the size
+ // of the second element because it might make us access off the end of the
+ // struct.
+ if (HiStart != 8) {
+ // There are usually two sorts of types the ABI generation code can produce
+ // for the low part of a pair that aren't 8 bytes in size: float or
+ // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and
+ // NaCl).
+ // Promote these to a larger type.
+ if (Lo->isFloatTy())
+ Lo = llvm::Type::getDoubleTy(Lo->getContext());
+ else {
+ assert((Lo->isIntegerTy() || Lo->isPointerTy())
+ && "Invalid/unknown lo type");
+ Lo = llvm::Type::getInt64Ty(Lo->getContext());
+ }
+ }
+
+ llvm::StructType *Result = llvm::StructType::get(Lo, Hi, nullptr);
+
+
+ // Verify that the second element is at an 8-byte offset.
+ assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
+ "Invalid x86-64 argument pair!");
+ return Result;
+}
+
+ABIArgInfo X86_64ABIInfo::
+classifyReturnType(QualType RetTy) const {
+ // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
+ // classification algorithm.
+ X86_64ABIInfo::Class Lo, Hi;
+ classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
+
+ // Check some invariants.
+ assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
+
+ llvm::Type *ResType = nullptr;
+ switch (Lo) {
+ case NoClass:
+ if (Hi == NoClass)
+ return ABIArgInfo::getIgnore();
+ // If the low part is just padding, it takes no register, leave ResType
+ // null.
+ assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
+ "Unknown missing lo part");
+ break;
+
+ case SSEUp:
+ case X87Up:
+ llvm_unreachable("Invalid classification for lo word.");
+
+ // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
+ // hidden argument.
+ case Memory:
+ return getIndirectReturnResult(RetTy);
+
+ // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
+ // available register of the sequence %rax, %rdx is used.
+ case Integer:
+ ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
+
+ // If we have a sign or zero extended integer, make sure to return Extend
+ // so that the parameter gets the right LLVM IR attributes.
+ if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ if (RetTy->isIntegralOrEnumerationType() &&
+ RetTy->isPromotableIntegerType())
+ return ABIArgInfo::getExtend();
+ }
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
+ // available SSE register of the sequence %xmm0, %xmm1 is used.
+ case SSE:
+ ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
+ // returned on the X87 stack in %st0 as 80-bit x87 number.
+ case X87:
+ ResType = llvm::Type::getX86_FP80Ty(getVMContext());
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
+ // part of the value is returned in %st0 and the imaginary part in
+ // %st1.
+ case ComplexX87:
+ assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
+ ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
+ llvm::Type::getX86_FP80Ty(getVMContext()),
+ nullptr);
+ break;
+ }
+
+ llvm::Type *HighPart = nullptr;
+ switch (Hi) {
+ // Memory was handled previously and X87 should
+ // never occur as a hi class.
+ case Memory:
+ case X87:
+ llvm_unreachable("Invalid classification for hi word.");
+
+ case ComplexX87: // Previously handled.
+ case NoClass:
+ break;
+
+ case Integer:
+ HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
+ if (Lo == NoClass) // Return HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+ break;
+ case SSE:
+ HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
+ if (Lo == NoClass) // Return HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
+ // is passed in the next available eightbyte chunk if the last used
+ // vector register.
+ //
+ // SSEUP should always be preceded by SSE, just widen.
+ case SSEUp:
+ assert(Lo == SSE && "Unexpected SSEUp classification.");
+ ResType = GetByteVectorType(RetTy);
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
+ // returned together with the previous X87 value in %st0.
+ case X87Up:
+ // If X87Up is preceded by X87, we don't need to do
+ // anything. However, in some cases with unions it may not be
+ // preceded by X87. In such situations we follow gcc and pass the
+ // extra bits in an SSE reg.
+ if (Lo != X87) {
+ HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
+ if (Lo == NoClass) // Return HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+ }
+ break;
+ }
+
+ // If a high part was specified, merge it together with the low part. It is
+ // known to pass in the high eightbyte of the result. We do this by forming a
+ // first class struct aggregate with the high and low part: {low, high}
+ if (HighPart)
+ ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
+
+ return ABIArgInfo::getDirect(ResType);
+}
+
+ABIArgInfo X86_64ABIInfo::classifyArgumentType(
+ QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
+ bool isNamedArg)
+ const
+{
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ X86_64ABIInfo::Class Lo, Hi;
+ classify(Ty, 0, Lo, Hi, isNamedArg);
+
+ // Check some invariants.
+ // FIXME: Enforce these by construction.
+ assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
+
+ neededInt = 0;
+ neededSSE = 0;
+ llvm::Type *ResType = nullptr;
+ switch (Lo) {
+ case NoClass:
+ if (Hi == NoClass)
+ return ABIArgInfo::getIgnore();
+ // If the low part is just padding, it takes no register, leave ResType
+ // null.
+ assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
+ "Unknown missing lo part");
+ break;
+
+ // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
+ // on the stack.
+ case Memory:
+
+ // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
+ // COMPLEX_X87, it is passed in memory.
+ case X87:
+ case ComplexX87:
+ if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
+ ++neededInt;
+ return getIndirectResult(Ty, freeIntRegs);
+
+ case SSEUp:
+ case X87Up:
+ llvm_unreachable("Invalid classification for lo word.");
+
+ // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
+ // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
+ // and %r9 is used.
+ case Integer:
+ ++neededInt;
+
+ // Pick an 8-byte type based on the preferred type.
+ ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
+
+ // If we have a sign or zero extended integer, make sure to return Extend
+ // so that the parameter gets the right LLVM IR attributes.
+ if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ if (Ty->isIntegralOrEnumerationType() &&
+ Ty->isPromotableIntegerType())
+ return ABIArgInfo::getExtend();
+ }
+
+ break;
+
+ // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
+ // available SSE register is used, the registers are taken in the
+ // order from %xmm0 to %xmm7.
+ case SSE: {
+ llvm::Type *IRType = CGT.ConvertType(Ty);
+ ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
+ ++neededSSE;
+ break;
+ }
+ }
+
+ llvm::Type *HighPart = nullptr;
+ switch (Hi) {
+ // Memory was handled previously, ComplexX87 and X87 should
+ // never occur as hi classes, and X87Up must be preceded by X87,
+ // which is passed in memory.
+ case Memory:
+ case X87:
+ case ComplexX87:
+ llvm_unreachable("Invalid classification for hi word.");
+
+ case NoClass: break;
+
+ case Integer:
+ ++neededInt;
+ // Pick an 8-byte type based on the preferred type.
+ HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
+
+ if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+ break;
+
+ // X87Up generally doesn't occur here (long double is passed in
+ // memory), except in situations involving unions.
+ case X87Up:
+ case SSE:
+ HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
+
+ if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+
+ ++neededSSE;
+ break;
+
+ // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
+ // eightbyte is passed in the upper half of the last used SSE
+ // register. This only happens when 128-bit vectors are passed.
+ case SSEUp:
+ assert(Lo == SSE && "Unexpected SSEUp classification");
+ ResType = GetByteVectorType(Ty);
+ break;
+ }
+
+ // If a high part was specified, merge it together with the low part. It is
+ // known to pass in the high eightbyte of the result. We do this by forming a
+ // first class struct aggregate with the high and low part: {low, high}
+ if (HighPart)
+ ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
+
+ return ABIArgInfo::getDirect(ResType);
+}
+
+void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
+
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+
+ // Keep track of the number of assigned registers.
+ unsigned freeIntRegs = 6, freeSSERegs = 8;
+
+ // If the return value is indirect, then the hidden argument is consuming one
+ // integer register.
+ if (FI.getReturnInfo().isIndirect())
+ --freeIntRegs;
+
+ // The chain argument effectively gives us another free register.
+ if (FI.isChainCall())
+ ++freeIntRegs;
+
+ unsigned NumRequiredArgs = FI.getNumRequiredArgs();
+ // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
+ // get assigned (in left-to-right order) for passing as follows...
+ unsigned ArgNo = 0;
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it, ++ArgNo) {
+ bool IsNamedArg = ArgNo < NumRequiredArgs;
+
+ unsigned neededInt, neededSSE;
+ it->info = classifyArgumentType(it->type, freeIntRegs, neededInt,
+ neededSSE, IsNamedArg);
+
+ // AMD64-ABI 3.2.3p3: If there are no registers available for any
+ // eightbyte of an argument, the whole argument is passed on the
+ // stack. If registers have already been assigned for some
+ // eightbytes of such an argument, the assignments get reverted.
+ if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
+ freeIntRegs -= neededInt;
+ freeSSERegs -= neededSSE;
+ } else {
+ it->info = getIndirectResult(it->type, freeIntRegs);
+ }
+ }
+}
+
+static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
+ Address VAListAddr, QualType Ty) {
+ Address overflow_arg_area_p = CGF.Builder.CreateStructGEP(
+ VAListAddr, 2, CharUnits::fromQuantity(8), "overflow_arg_area_p");
+ llvm::Value *overflow_arg_area =
+ CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
+
+ // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
+ // byte boundary if alignment needed by type exceeds 8 byte boundary.
+ // It isn't stated explicitly in the standard, but in practice we use
+ // alignment greater than 16 where necessary.
+ CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
+ if (Align > CharUnits::fromQuantity(8)) {
+ overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area,
+ Align);
+ }
+
+ // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
+ llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Value *Res =
+ CGF.Builder.CreateBitCast(overflow_arg_area,
+ llvm::PointerType::getUnqual(LTy));
+
+ // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
+ // l->overflow_arg_area + sizeof(type).
+ // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
+ // an 8 byte boundary.
+
+ uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
+ llvm::Value *Offset =
+ llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
+ overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
+ "overflow_arg_area.next");
+ CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
+
+ // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
+ return Address(Res, Align);
+}
+
+Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ // Assume that va_list type is correct; should be pointer to LLVM type:
+ // struct {
+ // i32 gp_offset;
+ // i32 fp_offset;
+ // i8* overflow_arg_area;
+ // i8* reg_save_area;
+ // };
+ unsigned neededInt, neededSSE;
+
+ Ty = getContext().getCanonicalType(Ty);
+ ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
+ /*isNamedArg*/false);
+
+ // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
+ // in the registers. If not go to step 7.
+ if (!neededInt && !neededSSE)
+ return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
+
+ // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
+ // general purpose registers needed to pass type and num_fp to hold
+ // the number of floating point registers needed.
+
+ // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
+ // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
+ // l->fp_offset > 304 - num_fp * 16 go to step 7.
+ //
+ // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
+ // register save space).
+
+ llvm::Value *InRegs = nullptr;
+ Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid();
+ llvm::Value *gp_offset = nullptr, *fp_offset = nullptr;
+ if (neededInt) {
+ gp_offset_p =
+ CGF.Builder.CreateStructGEP(VAListAddr, 0, CharUnits::Zero(),
+ "gp_offset_p");
+ gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
+ InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
+ InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
+ }
+
+ if (neededSSE) {
+ fp_offset_p =
+ CGF.Builder.CreateStructGEP(VAListAddr, 1, CharUnits::fromQuantity(4),
+ "fp_offset_p");
+ fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
+ llvm::Value *FitsInFP =
+ llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
+ FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
+ InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
+ }
+
+ llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
+ llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
+ CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
+
+ // Emit code to load the value if it was passed in registers.
+
+ CGF.EmitBlock(InRegBlock);
+
+ // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
+ // an offset of l->gp_offset and/or l->fp_offset. This may require
+ // copying to a temporary location in case the parameter is passed
+ // in different register classes or requires an alignment greater
+ // than 8 for general purpose registers and 16 for XMM registers.
+ //
+ // FIXME: This really results in shameful code when we end up needing to
+ // collect arguments from different places; often what should result in a
+ // simple assembling of a structure from scattered addresses has many more
+ // loads than necessary. Can we clean this up?
+ llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Value *RegSaveArea = CGF.Builder.CreateLoad(
+ CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(16)),
+ "reg_save_area");
+
+ Address RegAddr = Address::invalid();
+ if (neededInt && neededSSE) {
+ // FIXME: Cleanup.
+ assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
+ llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
+ Address Tmp = CGF.CreateMemTemp(Ty);
+ Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
+ assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
+ llvm::Type *TyLo = ST->getElementType(0);
+ llvm::Type *TyHi = ST->getElementType(1);
+ assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
+ "Unexpected ABI info for mixed regs");
+ llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
+ llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
+ llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegSaveArea, gp_offset);
+ llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegSaveArea, fp_offset);
+ llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
+ llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
+
+ // Copy the first element.
+ llvm::Value *V =
+ CGF.Builder.CreateDefaultAlignedLoad(
+ CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
+ CGF.Builder.CreateStore(V,
+ CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero()));
+
+ // Copy the second element.
+ V = CGF.Builder.CreateDefaultAlignedLoad(
+ CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
+ CharUnits Offset = CharUnits::fromQuantity(
+ getDataLayout().getStructLayout(ST)->getElementOffset(1));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1, Offset));
+
+ RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
+ } else if (neededInt) {
+ RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, gp_offset),
+ CharUnits::fromQuantity(8));
+ RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
+
+ // Copy to a temporary if necessary to ensure the appropriate alignment.
+ std::pair<CharUnits, CharUnits> SizeAlign =
+ getContext().getTypeInfoInChars(Ty);
+ uint64_t TySize = SizeAlign.first.getQuantity();
+ CharUnits TyAlign = SizeAlign.second;
+
+ // Copy into a temporary if the type is more aligned than the
+ // register save area.
+ if (TyAlign.getQuantity() > 8) {
+ Address Tmp = CGF.CreateMemTemp(Ty);
+ CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false);
+ RegAddr = Tmp;
+ }
+
+ } else if (neededSSE == 1) {
+ RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
+ CharUnits::fromQuantity(16));
+ RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
+ } else {
+ assert(neededSSE == 2 && "Invalid number of needed registers!");
+ // SSE registers are spaced 16 bytes apart in the register save
+ // area, we need to collect the two eightbytes together.
+ // The ABI isn't explicit about this, but it seems reasonable
+ // to assume that the slots are 16-byte aligned, since the stack is
+ // naturally 16-byte aligned and the prologue is expected to store
+ // all the SSE registers to the RSA.
+ Address RegAddrLo = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
+ CharUnits::fromQuantity(16));
+ Address RegAddrHi =
+ CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
+ CharUnits::fromQuantity(16));
+ llvm::Type *DoubleTy = CGF.DoubleTy;
+ llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, nullptr);
+ llvm::Value *V;
+ Address Tmp = CGF.CreateMemTemp(Ty);
+ Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
+ V = CGF.Builder.CreateLoad(
+ CGF.Builder.CreateElementBitCast(RegAddrLo, DoubleTy));
+ CGF.Builder.CreateStore(V,
+ CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero()));
+ V = CGF.Builder.CreateLoad(
+ CGF.Builder.CreateElementBitCast(RegAddrHi, DoubleTy));
+ CGF.Builder.CreateStore(V,
+ CGF.Builder.CreateStructGEP(Tmp, 1, CharUnits::fromQuantity(8)));
+
+ RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
+ }
+
+ // AMD64-ABI 3.5.7p5: Step 5. Set:
+ // l->gp_offset = l->gp_offset + num_gp * 8
+ // l->fp_offset = l->fp_offset + num_fp * 16.
+ if (neededInt) {
+ llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
+ CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
+ gp_offset_p);
+ }
+ if (neededSSE) {
+ llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
+ CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
+ fp_offset_p);
+ }
+ CGF.EmitBranch(ContBlock);
+
+ // Emit code to load the value if it was passed in memory.
+
+ CGF.EmitBlock(InMemBlock);
+ Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
+
+ // Return the appropriate result.
+
+ CGF.EmitBlock(ContBlock);
+ Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
+ "vaarg.addr");
+ return ResAddr;
+}
+
+Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
+ CGF.getContext().getTypeInfoInChars(Ty),
+ CharUnits::fromQuantity(8),
+ /*allowHigherAlign*/ false);
+}
+
+ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
+ bool IsReturnType) const {
+
+ if (Ty->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ TypeInfo Info = getContext().getTypeInfo(Ty);
+ uint64_t Width = Info.Width;
+ CharUnits Align = getContext().toCharUnitsFromBits(Info.Align);
+
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (RT) {
+ if (!IsReturnType) {
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+ }
+
+ if (RT->getDecl()->hasFlexibleArrayMember())
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+
+ }
+
+ // vectorcall adds the concept of a homogenous vector aggregate, similar to
+ // other targets.
+ const Type *Base = nullptr;
+ uint64_t NumElts = 0;
+ if (FreeSSERegs && isHomogeneousAggregate(Ty, Base, NumElts)) {
+ if (FreeSSERegs >= NumElts) {
+ FreeSSERegs -= NumElts;
+ if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
+ return ABIArgInfo::getDirect();
+ return ABIArgInfo::getExpand();
+ }
+ return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
+ }
+
+
+ if (Ty->isMemberPointerType()) {
+ // If the member pointer is represented by an LLVM int or ptr, pass it
+ // directly.
+ llvm::Type *LLTy = CGT.ConvertType(Ty);
+ if (LLTy->isPointerTy() || LLTy->isIntegerTy())
+ return ABIArgInfo::getDirect();
+ }
+
+ if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) {
+ // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
+ // not 1, 2, 4, or 8 bytes, must be passed by reference."
+ if (Width > 64 || !llvm::isPowerOf2_64(Width))
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+
+ // Otherwise, coerce it to a small integer.
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
+ }
+
+ // Bool type is always extended to the ABI, other builtin types are not
+ // extended.
+ const BuiltinType *BT = Ty->getAs<BuiltinType>();
+ if (BT && BT->getKind() == BuiltinType::Bool)
+ return ABIArgInfo::getExtend();
+
+ // Mingw64 GCC uses the old 80 bit extended precision floating point unit. It
+ // passes them indirectly through memory.
+ if (IsMingw64 && BT && BT->getKind() == BuiltinType::LongDouble) {
+ const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
+ if (LDF == &llvm::APFloat::x87DoubleExtended)
+ return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
+ }
+
+ return ABIArgInfo::getDirect();
+}
+
+void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ bool IsVectorCall =
+ FI.getCallingConvention() == llvm::CallingConv::X86_VectorCall;
+
+ // We can use up to 4 SSE return registers with vectorcall.
+ unsigned FreeSSERegs = IsVectorCall ? 4 : 0;
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true);
+
+ // We can use up to 6 SSE register parameters with vectorcall.
+ FreeSSERegs = IsVectorCall ? 6 : 0;
+ for (auto &I : FI.arguments())
+ I.info = classify(I.type, FreeSSERegs, false);
+}
+
+Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
+ CGF.getContext().getTypeInfoInChars(Ty),
+ CharUnits::fromQuantity(8),
+ /*allowHigherAlign*/ false);
+}
+
+// PowerPC-32
+namespace {
+/// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
+class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
+bool IsSoftFloatABI;
+public:
+ PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI)
+ : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI) {}
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+};
+
+class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI)
+ : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT, SoftFloatABI)) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
+ // This is recovered from gcc output.
+ return 1; // r1 is the dedicated stack pointer
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override;
+};
+
+}
+
+Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
+ QualType Ty) const {
+ if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
+ // TODO: Implement this. For now ignore.
+ (void)CTy;
+ return Address::invalid();
+ }
+
+ // struct __va_list_tag {
+ // unsigned char gpr;
+ // unsigned char fpr;
+ // unsigned short reserved;
+ // void *overflow_arg_area;
+ // void *reg_save_area;
+ // };
+
+ bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
+ bool isInt =
+ Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType();
+ bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64;
+
+ // All aggregates are passed indirectly? That doesn't seem consistent
+ // with the argument-lowering code.
+ bool isIndirect = Ty->isAggregateType();
+
+ CGBuilderTy &Builder = CGF.Builder;
+
+ // The calling convention either uses 1-2 GPRs or 1 FPR.
+ Address NumRegsAddr = Address::invalid();
+ if (isInt || IsSoftFloatABI) {
+ NumRegsAddr = Builder.CreateStructGEP(VAList, 0, CharUnits::Zero(), "gpr");
+ } else {
+ NumRegsAddr = Builder.CreateStructGEP(VAList, 1, CharUnits::One(), "fpr");
+ }
+
+ llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs");
+
+ // "Align" the register count when TY is i64.
+ if (isI64 || (isF64 && IsSoftFloatABI)) {
+ NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
+ NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
+ }
+
+ llvm::Value *CC =
+ Builder.CreateICmpULT(NumRegs, Builder.getInt8(8), "cond");
+
+ llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
+ llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
+ llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
+
+ Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
+
+ llvm::Type *DirectTy = CGF.ConvertType(Ty);
+ if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
+
+ // Case 1: consume registers.
+ Address RegAddr = Address::invalid();
+ {
+ CGF.EmitBlock(UsingRegs);
+
+ Address RegSaveAreaPtr =
+ Builder.CreateStructGEP(VAList, 4, CharUnits::fromQuantity(8));
+ RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr),
+ CharUnits::fromQuantity(8));
+ assert(RegAddr.getElementType() == CGF.Int8Ty);
+
+ // Floating-point registers start after the general-purpose registers.
+ if (!(isInt || IsSoftFloatABI)) {
+ RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
+ CharUnits::fromQuantity(32));
+ }
+
+ // Get the address of the saved value by scaling the number of
+ // registers we've used by the number of
+ CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
+ llvm::Value *RegOffset =
+ Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
+ RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty,
+ RegAddr.getPointer(), RegOffset),
+ RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
+ RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy);
+
+ // Increase the used-register count.
+ NumRegs =
+ Builder.CreateAdd(NumRegs,
+ Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
+ Builder.CreateStore(NumRegs, NumRegsAddr);
+
+ CGF.EmitBranch(Cont);
+ }
+
+ // Case 2: consume space in the overflow area.
+ Address MemAddr = Address::invalid();
+ {
+ CGF.EmitBlock(UsingOverflow);
+
+ // Everything in the overflow area is rounded up to a size of at least 4.
+ CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4);
+
+ CharUnits Size;
+ if (!isIndirect) {
+ auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
+ Size = TypeInfo.first.RoundUpToAlignment(OverflowAreaAlign);
+ } else {
+ Size = CGF.getPointerSize();
+ }
+
+ Address OverflowAreaAddr =
+ Builder.CreateStructGEP(VAList, 3, CharUnits::fromQuantity(4));
+ Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"),
+ OverflowAreaAlign);
+ // Round up address of argument to alignment
+ CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
+ if (Align > OverflowAreaAlign) {
+ llvm::Value *Ptr = OverflowArea.getPointer();
+ OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
+ Align);
+ }
+
+ MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy);
+
+ // Increase the overflow area.
+ OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
+ Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
+ CGF.EmitBranch(Cont);
+ }
+
+ CGF.EmitBlock(Cont);
+
+ // Merge the cases with a phi.
+ Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
+ "vaarg.addr");
+
+ // Load the pointer if the argument was passed indirectly.
+ if (isIndirect) {
+ Result = Address(Builder.CreateLoad(Result, "aggr"),
+ getContext().getTypeAlignInChars(Ty));
+ }
+
+ return Result;
+}
+
+bool
+PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ // This is calculated from the LLVM and GCC tables and verified
+ // against gcc output. AFAIK all ABIs use the same encoding.
+
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+
+ llvm::IntegerType *i8 = CGF.Int8Ty;
+ llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+ llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
+ llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
+
+ // 0-31: r0-31, the 4-byte general-purpose registers
+ AssignToArrayRange(Builder, Address, Four8, 0, 31);
+
+ // 32-63: fp0-31, the 8-byte floating-point registers
+ AssignToArrayRange(Builder, Address, Eight8, 32, 63);
+
+ // 64-76 are various 4-byte special-purpose registers:
+ // 64: mq
+ // 65: lr
+ // 66: ctr
+ // 67: ap
+ // 68-75 cr0-7
+ // 76: xer
+ AssignToArrayRange(Builder, Address, Four8, 64, 76);
+
+ // 77-108: v0-31, the 16-byte vector registers
+ AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
+
+ // 109: vrsave
+ // 110: vscr
+ // 111: spe_acc
+ // 112: spefscr
+ // 113: sfp
+ AssignToArrayRange(Builder, Address, Four8, 109, 113);
+
+ return false;
+}
+
+// PowerPC-64
+
+namespace {
+/// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
+class PPC64_SVR4_ABIInfo : public DefaultABIInfo {
+public:
+ enum ABIKind {
+ ELFv1 = 0,
+ ELFv2
+ };
+
+private:
+ static const unsigned GPRBits = 64;
+ ABIKind Kind;
+ bool HasQPX;
+
+ // A vector of float or double will be promoted to <4 x f32> or <4 x f64> and
+ // will be passed in a QPX register.
+ bool IsQPXVectorTy(const Type *Ty) const {
+ if (!HasQPX)
+ return false;
+
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ unsigned NumElements = VT->getNumElements();
+ if (NumElements == 1)
+ return false;
+
+ if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
+ if (getContext().getTypeSize(Ty) <= 256)
+ return true;
+ } else if (VT->getElementType()->
+ isSpecificBuiltinType(BuiltinType::Float)) {
+ if (getContext().getTypeSize(Ty) <= 128)
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ bool IsQPXVectorTy(QualType Ty) const {
+ return IsQPXVectorTy(Ty.getTypePtr());
+ }
+
+public:
+ PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX)
+ : DefaultABIInfo(CGT), Kind(Kind), HasQPX(HasQPX) {}
+
+ bool isPromotableTypeForABI(QualType Ty) const;
+ CharUnits getParamTypeAlignment(QualType Ty) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType Ty) const;
+
+ bool isHomogeneousAggregateBaseType(QualType Ty) const override;
+ bool isHomogeneousAggregateSmallEnough(const Type *Ty,
+ uint64_t Members) const override;
+
+ // TODO: We can add more logic to computeInfo to improve performance.
+ // Example: For aggregate arguments that fit in a register, we could
+ // use getDirectInReg (as is done below for structs containing a single
+ // floating-point value) to avoid pushing them to memory on function
+ // entry. This would require changing the logic in PPCISelLowering
+ // when lowering the parameters in the caller and args in the callee.
+ void computeInfo(CGFunctionInfo &FI) const override {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &I : FI.arguments()) {
+ // We rely on the default argument classification for the most part.
+ // One exception: An aggregate containing a single floating-point
+ // or vector item must be passed in a register if one is available.
+ const Type *T = isSingleElementStruct(I.type, getContext());
+ if (T) {
+ const BuiltinType *BT = T->getAs<BuiltinType>();
+ if (IsQPXVectorTy(T) ||
+ (T->isVectorType() && getContext().getTypeSize(T) == 128) ||
+ (BT && BT->isFloatingPoint())) {
+ QualType QT(T, 0);
+ I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
+ continue;
+ }
+ }
+ I.info = classifyArgumentType(I.type);
+ }
+ }
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+};
+
+class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
+
+public:
+ PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
+ PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX)
+ : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind, HasQPX)) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
+ // This is recovered from gcc output.
+ return 1; // r1 is the dedicated stack pointer
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override;
+};
+
+class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
+public:
+ PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
+ // This is recovered from gcc output.
+ return 1; // r1 is the dedicated stack pointer
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override;
+};
+
+}
+
+// Return true if the ABI requires Ty to be passed sign- or zero-
+// extended to 64 bits.
+bool
+PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // Promotable integer types are required to be promoted by the ABI.
+ if (Ty->isPromotableIntegerType())
+ return true;
+
+ // In addition to the usual promotable integer types, we also need to
+ // extend all 32-bit types, since the ABI requires promotion to 64 bits.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/// isAlignedParamType - Determine whether a type requires 16-byte or
+/// higher alignment in the parameter area. Always returns at least 8.
+CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
+ // Complex types are passed just like their elements.
+ if (const ComplexType *CTy = Ty->getAs<ComplexType>())
+ Ty = CTy->getElementType();
+
+ // Only vector types of size 16 bytes need alignment (larger types are
+ // passed via reference, smaller types are not aligned).
+ if (IsQPXVectorTy(Ty)) {
+ if (getContext().getTypeSize(Ty) > 128)
+ return CharUnits::fromQuantity(32);
+
+ return CharUnits::fromQuantity(16);
+ } else if (Ty->isVectorType()) {
+ return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
+ }
+
+ // For single-element float/vector structs, we consider the whole type
+ // to have the same alignment requirements as its single element.
+ const Type *AlignAsType = nullptr;
+ const Type *EltType = isSingleElementStruct(Ty, getContext());
+ if (EltType) {
+ const BuiltinType *BT = EltType->getAs<BuiltinType>();
+ if (IsQPXVectorTy(EltType) || (EltType->isVectorType() &&
+ getContext().getTypeSize(EltType) == 128) ||
+ (BT && BT->isFloatingPoint()))
+ AlignAsType = EltType;
+ }
+
+ // Likewise for ELFv2 homogeneous aggregates.
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ if (!AlignAsType && Kind == ELFv2 &&
+ isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
+ AlignAsType = Base;
+
+ // With special case aggregates, only vector base types need alignment.
+ if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
+ if (getContext().getTypeSize(AlignAsType) > 128)
+ return CharUnits::fromQuantity(32);
+
+ return CharUnits::fromQuantity(16);
+ } else if (AlignAsType) {
+ return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8);
+ }
+
+ // Otherwise, we only need alignment for any aggregate type that
+ // has an alignment requirement of >= 16 bytes.
+ if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
+ if (HasQPX && getContext().getTypeAlign(Ty) >= 256)
+ return CharUnits::fromQuantity(32);
+ return CharUnits::fromQuantity(16);
+ }
+
+ return CharUnits::fromQuantity(8);
+}
+
+/// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
+/// aggregate. Base is set to the base element type, and Members is set
+/// to the number of base elements.
+bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
+ uint64_t &Members) const {
+ if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
+ uint64_t NElements = AT->getSize().getZExtValue();
+ if (NElements == 0)
+ return false;
+ if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
+ return false;
+ Members *= NElements;
+ } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return false;
+
+ Members = 0;
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (const auto &I : CXXRD->bases()) {
+ // Ignore empty records.
+ if (isEmptyRecord(getContext(), I.getType(), true))
+ continue;
+
+ uint64_t FldMembers;
+ if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
+ return false;
+
+ Members += FldMembers;
+ }
+ }
+
+ for (const auto *FD : RD->fields()) {
+ // Ignore (non-zero arrays of) empty records.
+ QualType FT = FD->getType();
+ while (const ConstantArrayType *AT =
+ getContext().getAsConstantArrayType(FT)) {
+ if (AT->getSize().getZExtValue() == 0)
+ return false;
+ FT = AT->getElementType();
+ }
+ if (isEmptyRecord(getContext(), FT, true))
+ continue;
+
+ // For compatibility with GCC, ignore empty bitfields in C++ mode.
+ if (getContext().getLangOpts().CPlusPlus &&
+ FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
+ continue;
+
+ uint64_t FldMembers;
+ if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
+ return false;
+
+ Members = (RD->isUnion() ?
+ std::max(Members, FldMembers) : Members + FldMembers);
+ }
+
+ if (!Base)
+ return false;
+
+ // Ensure there is no padding.
+ if (getContext().getTypeSize(Base) * Members !=
+ getContext().getTypeSize(Ty))
+ return false;
+ } else {
+ Members = 1;
+ if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
+ Members = 2;
+ Ty = CT->getElementType();
+ }
+
+ // Most ABIs only support float, double, and some vector type widths.
+ if (!isHomogeneousAggregateBaseType(Ty))
+ return false;
+
+ // The base type must be the same for all members. Types that
+ // agree in both total size and mode (float vs. vector) are
+ // treated as being equivalent here.
+ const Type *TyPtr = Ty.getTypePtr();
+ if (!Base)
+ Base = TyPtr;
+
+ if (Base->isVectorType() != TyPtr->isVectorType() ||
+ getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
+ return false;
+ }
+ return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
+}
+
+bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
+ // Homogeneous aggregates for ELFv2 must have base types of float,
+ // double, long double, or 128-bit vectors.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ if (BT->getKind() == BuiltinType::Float ||
+ BT->getKind() == BuiltinType::Double ||
+ BT->getKind() == BuiltinType::LongDouble)
+ return true;
+ }
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty))
+ return true;
+ }
+ return false;
+}
+
+bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
+ const Type *Base, uint64_t Members) const {
+ // Vector types require one register, floating point types require one
+ // or two registers depending on their size.
+ uint32_t NumRegs =
+ Base->isVectorType() ? 1 : (getContext().getTypeSize(Base) + 63) / 64;
+
+ // Homogeneous Aggregates may occupy at most 8 registers.
+ return Members * NumRegs <= 8;
+}
+
+ABIArgInfo
+PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ if (Ty->isAnyComplexType())
+ return ABIArgInfo::getDirect();
+
+ // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
+ // or via reference (larger than 16 bytes).
+ if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size > 128)
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+ else if (Size < 128) {
+ llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+ }
+
+ if (isAggregateTypeForABI(Ty)) {
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+
+ uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
+ uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
+
+ // ELFv2 homogeneous aggregates are passed as array types.
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ if (Kind == ELFv2 &&
+ isHomogeneousAggregate(Ty, Base, Members)) {
+ llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
+ llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+
+ // If an aggregate may end up fully in registers, we do not
+ // use the ByVal method, but pass the aggregate as array.
+ // This is usually beneficial since we avoid forcing the
+ // back-end to store the argument to memory.
+ uint64_t Bits = getContext().getTypeSize(Ty);
+ if (Bits > 0 && Bits <= 8 * GPRBits) {
+ llvm::Type *CoerceTy;
+
+ // Types up to 8 bytes are passed as integer type (which will be
+ // properly aligned in the argument save area doubleword).
+ if (Bits <= GPRBits)
+ CoerceTy = llvm::IntegerType::get(getVMContext(),
+ llvm::RoundUpToAlignment(Bits, 8));
+ // Larger types are passed as arrays, with the base type selected
+ // according to the required alignment in the save area.
+ else {
+ uint64_t RegBits = ABIAlign * 8;
+ uint64_t NumRegs = llvm::RoundUpToAlignment(Bits, RegBits) / RegBits;
+ llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
+ CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
+ }
+
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+
+ // All other aggregates are passed ByVal.
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
+ /*ByVal=*/true,
+ /*Realign=*/TyAlign > ABIAlign);
+ }
+
+ return (isPromotableTypeForABI(Ty) ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo
+PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (RetTy->isAnyComplexType())
+ return ABIArgInfo::getDirect();
+
+ // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
+ // or via reference (larger than 16 bytes).
+ if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) {
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ if (Size > 128)
+ return getNaturalAlignIndirect(RetTy);
+ else if (Size < 128) {
+ llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+ }
+
+ if (isAggregateTypeForABI(RetTy)) {
+ // ELFv2 homogeneous aggregates are returned as array types.
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ if (Kind == ELFv2 &&
+ isHomogeneousAggregate(RetTy, Base, Members)) {
+ llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
+ llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+
+ // ELFv2 small aggregates are returned in up to two registers.
+ uint64_t Bits = getContext().getTypeSize(RetTy);
+ if (Kind == ELFv2 && Bits <= 2 * GPRBits) {
+ if (Bits == 0)
+ return ABIArgInfo::getIgnore();
+
+ llvm::Type *CoerceTy;
+ if (Bits > GPRBits) {
+ CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
+ CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy, nullptr);
+ } else
+ CoerceTy = llvm::IntegerType::get(getVMContext(),
+ llvm::RoundUpToAlignment(Bits, 8));
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+
+ // All other aggregates are returned indirectly.
+ return getNaturalAlignIndirect(RetTy);
+ }
+
+ return (isPromotableTypeForABI(RetTy) ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+// Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
+Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ auto TypeInfo = getContext().getTypeInfoInChars(Ty);
+ TypeInfo.second = getParamTypeAlignment(Ty);
+
+ CharUnits SlotSize = CharUnits::fromQuantity(8);
+
+ // If we have a complex type and the base type is smaller than 8 bytes,
+ // the ABI calls for the real and imaginary parts to be right-adjusted
+ // in separate doublewords. However, Clang expects us to produce a
+ // pointer to a structure with the two parts packed tightly. So generate
+ // loads of the real and imaginary parts relative to the va_list pointer,
+ // and store them to a temporary structure.
+ if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
+ CharUnits EltSize = TypeInfo.first / 2;
+ if (EltSize < SlotSize) {
+ Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty,
+ SlotSize * 2, SlotSize,
+ SlotSize, /*AllowHigher*/ true);
+
+ Address RealAddr = Addr;
+ Address ImagAddr = RealAddr;
+ if (CGF.CGM.getDataLayout().isBigEndian()) {
+ RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr,
+ SlotSize - EltSize);
+ ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr,
+ 2 * SlotSize - EltSize);
+ } else {
+ ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize);
+ }
+
+ llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType());
+ RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy);
+ ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy);
+ llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal");
+ llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag");
+
+ Address Temp = CGF.CreateMemTemp(Ty, "vacplx");
+ CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty),
+ /*init*/ true);
+ return Temp;
+ }
+ }
+
+ // Otherwise, just use the general rule.
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
+ TypeInfo, SlotSize, /*AllowHigher*/ true);
+}
+
+static bool
+PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) {
+ // This is calculated from the LLVM and GCC tables and verified
+ // against gcc output. AFAIK all ABIs use the same encoding.
+
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+
+ llvm::IntegerType *i8 = CGF.Int8Ty;
+ llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+ llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
+ llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
+
+ // 0-31: r0-31, the 8-byte general-purpose registers
+ AssignToArrayRange(Builder, Address, Eight8, 0, 31);
+
+ // 32-63: fp0-31, the 8-byte floating-point registers
+ AssignToArrayRange(Builder, Address, Eight8, 32, 63);
+
+ // 64-76 are various 4-byte special-purpose registers:
+ // 64: mq
+ // 65: lr
+ // 66: ctr
+ // 67: ap
+ // 68-75 cr0-7
+ // 76: xer
+ AssignToArrayRange(Builder, Address, Four8, 64, 76);
+
+ // 77-108: v0-31, the 16-byte vector registers
+ AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
+
+ // 109: vrsave
+ // 110: vscr
+ // 111: spe_acc
+ // 112: spefscr
+ // 113: sfp
+ AssignToArrayRange(Builder, Address, Four8, 109, 113);
+
+ return false;
+}
+
+bool
+PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
+ CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+
+ return PPC64_initDwarfEHRegSizeTable(CGF, Address);
+}
+
+bool
+PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+
+ return PPC64_initDwarfEHRegSizeTable(CGF, Address);
+}
+
+//===----------------------------------------------------------------------===//
+// AArch64 ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class AArch64ABIInfo : public ABIInfo {
+public:
+ enum ABIKind {
+ AAPCS = 0,
+ DarwinPCS
+ };
+
+private:
+ ABIKind Kind;
+
+public:
+ AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) : ABIInfo(CGT), Kind(Kind) {}
+
+private:
+ ABIKind getABIKind() const { return Kind; }
+ bool isDarwinPCS() const { return Kind == DarwinPCS; }
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+ bool isHomogeneousAggregateBaseType(QualType Ty) const override;
+ bool isHomogeneousAggregateSmallEnough(const Type *Ty,
+ uint64_t Members) const override;
+
+ bool isIllegalVectorType(QualType Ty) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+
+ for (auto &it : FI.arguments())
+ it.info = classifyArgumentType(it.type);
+ }
+
+ Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+
+ Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override {
+ return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
+ : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
+ }
+};
+
+class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
+ : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {}
+
+ StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
+ return "mov\tfp, fp\t\t; marker for objc_retainAutoreleaseReturnValue";
+ }
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
+ return 31;
+ }
+
+ bool doesReturnSlotInterfereWithArgs() const override { return false; }
+};
+}
+
+ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ // Handle illegal vector types here.
+ if (isIllegalVectorType(Ty)) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size <= 32) {
+ llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
+ return ABIArgInfo::getDirect(ResType);
+ }
+ if (Size == 64) {
+ llvm::Type *ResType =
+ llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
+ return ABIArgInfo::getDirect(ResType);
+ }
+ if (Size == 128) {
+ llvm::Type *ResType =
+ llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
+ return ABIArgInfo::getDirect(ResType);
+ }
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+ }
+
+ if (!isAggregateTypeForABI(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() && isDarwinPCS()
+ ? ABIArgInfo::getExtend()
+ : ABIArgInfo::getDirect());
+ }
+
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
+ CGCXXABI::RAA_DirectInMemory);
+ }
+
+ // Empty records are always ignored on Darwin, but actually passed in C++ mode
+ // elsewhere for GNU compatibility.
+ if (isEmptyRecord(getContext(), Ty, true)) {
+ if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
+ return ABIArgInfo::getIgnore();
+
+ return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
+ }
+
+ // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ if (isHomogeneousAggregate(Ty, Base, Members)) {
+ return ABIArgInfo::getDirect(
+ llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
+ }
+
+ // Aggregates <= 16 bytes are passed directly in registers or on the stack.
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size <= 128) {
+ unsigned Alignment = getContext().getTypeAlign(Ty);
+ Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
+
+ // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
+ // For aggregates with 16-byte alignment, we use i128.
+ if (Alignment < 128 && Size == 128) {
+ llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
+ return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
+ }
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
+ }
+
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+}
+
+ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ // Large vector types should be returned via memory.
+ if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
+ return getNaturalAlignIndirect(RetTy);
+
+ if (!isAggregateTypeForABI(RetTy)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() && isDarwinPCS()
+ ? ABIArgInfo::getExtend()
+ : ABIArgInfo::getDirect());
+ }
+
+ if (isEmptyRecord(getContext(), RetTy, true))
+ return ABIArgInfo::getIgnore();
+
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ if (isHomogeneousAggregate(RetTy, Base, Members))
+ // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
+ return ABIArgInfo::getDirect();
+
+ // Aggregates <= 16 bytes are returned directly in registers or on the stack.
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ if (Size <= 128) {
+ unsigned Alignment = getContext().getTypeAlign(RetTy);
+ Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
+
+ // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
+ // For aggregates with 16-byte alignment, we use i128.
+ if (Alignment < 128 && Size == 128) {
+ llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
+ return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
+ }
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
+ }
+
+ return getNaturalAlignIndirect(RetTy);
+}
+
+/// isIllegalVectorType - check whether the vector type is legal for AArch64.
+bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ // Check whether VT is legal.
+ unsigned NumElements = VT->getNumElements();
+ uint64_t Size = getContext().getTypeSize(VT);
+ // NumElements should be power of 2 between 1 and 16.
+ if ((NumElements & (NumElements - 1)) != 0 || NumElements > 16)
+ return true;
+ return Size != 64 && (Size != 128 || NumElements == 1);
+ }
+ return false;
+}
+
+bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
+ // Homogeneous aggregates for AAPCS64 must have base types of a floating
+ // point type or a short-vector type. This is the same as the 32-bit ABI,
+ // but with the difference that any floating-point type is allowed,
+ // including __fp16.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ if (BT->isFloatingPoint())
+ return true;
+ } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ unsigned VecSize = getContext().getTypeSize(VT);
+ if (VecSize == 64 || VecSize == 128)
+ return true;
+ }
+ return false;
+}
+
+bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
+ uint64_t Members) const {
+ return Members <= 4;
+}
+
+Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
+ QualType Ty,
+ CodeGenFunction &CGF) const {
+ ABIArgInfo AI = classifyArgumentType(Ty);
+ bool IsIndirect = AI.isIndirect();
+
+ llvm::Type *BaseTy = CGF.ConvertType(Ty);
+ if (IsIndirect)
+ BaseTy = llvm::PointerType::getUnqual(BaseTy);
+ else if (AI.getCoerceToType())
+ BaseTy = AI.getCoerceToType();
+
+ unsigned NumRegs = 1;
+ if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
+ BaseTy = ArrTy->getElementType();
+ NumRegs = ArrTy->getNumElements();
+ }
+ bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
+
+ // The AArch64 va_list type and handling is specified in the Procedure Call
+ // Standard, section B.4:
+ //
+ // struct {
+ // void *__stack;
+ // void *__gr_top;
+ // void *__vr_top;
+ // int __gr_offs;
+ // int __vr_offs;
+ // };
+
+ llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
+ llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
+ llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
+
+ auto TyInfo = getContext().getTypeInfoInChars(Ty);
+ CharUnits TyAlign = TyInfo.second;
+
+ Address reg_offs_p = Address::invalid();
+ llvm::Value *reg_offs = nullptr;
+ int reg_top_index;
+ CharUnits reg_top_offset;
+ int RegSize = IsIndirect ? 8 : TyInfo.first.getQuantity();
+ if (!IsFPR) {
+ // 3 is the field number of __gr_offs
+ reg_offs_p =
+ CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24),
+ "gr_offs_p");
+ reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
+ reg_top_index = 1; // field number for __gr_top
+ reg_top_offset = CharUnits::fromQuantity(8);
+ RegSize = llvm::RoundUpToAlignment(RegSize, 8);
+ } else {
+ // 4 is the field number of __vr_offs.
+ reg_offs_p =
+ CGF.Builder.CreateStructGEP(VAListAddr, 4, CharUnits::fromQuantity(28),
+ "vr_offs_p");
+ reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
+ reg_top_index = 2; // field number for __vr_top
+ reg_top_offset = CharUnits::fromQuantity(16);
+ RegSize = 16 * NumRegs;
+ }
+
+ //=======================================
+ // Find out where argument was passed
+ //=======================================
+
+ // If reg_offs >= 0 we're already using the stack for this type of
+ // argument. We don't want to keep updating reg_offs (in case it overflows,
+ // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
+ // whatever they get).
+ llvm::Value *UsingStack = nullptr;
+ UsingStack = CGF.Builder.CreateICmpSGE(
+ reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
+
+ CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
+
+ // Otherwise, at least some kind of argument could go in these registers, the
+ // question is whether this particular type is too big.
+ CGF.EmitBlock(MaybeRegBlock);
+
+ // Integer arguments may need to correct register alignment (for example a
+ // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
+ // align __gr_offs to calculate the potential address.
+ if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
+ int Align = TyAlign.getQuantity();
+
+ reg_offs = CGF.Builder.CreateAdd(
+ reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
+ "align_regoffs");
+ reg_offs = CGF.Builder.CreateAnd(
+ reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
+ "aligned_regoffs");
+ }
+
+ // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
+ // The fact that this is done unconditionally reflects the fact that
+ // allocating an argument to the stack also uses up all the remaining
+ // registers of the appropriate kind.
+ llvm::Value *NewOffset = nullptr;
+ NewOffset = CGF.Builder.CreateAdd(
+ reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
+ CGF.Builder.CreateStore(NewOffset, reg_offs_p);
+
+ // Now we're in a position to decide whether this argument really was in
+ // registers or not.
+ llvm::Value *InRegs = nullptr;
+ InRegs = CGF.Builder.CreateICmpSLE(
+ NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
+
+ CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
+
+ //=======================================
+ // Argument was in registers
+ //=======================================
+
+ // Now we emit the code for if the argument was originally passed in
+ // registers. First start the appropriate block:
+ CGF.EmitBlock(InRegBlock);
+
+ llvm::Value *reg_top = nullptr;
+ Address reg_top_p = CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index,
+ reg_top_offset, "reg_top_p");
+ reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
+ Address BaseAddr(CGF.Builder.CreateInBoundsGEP(reg_top, reg_offs),
+ CharUnits::fromQuantity(IsFPR ? 16 : 8));
+ Address RegAddr = Address::invalid();
+ llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty);
+
+ if (IsIndirect) {
+ // If it's been passed indirectly (actually a struct), whatever we find from
+ // stored registers or on the stack will actually be a struct **.
+ MemTy = llvm::PointerType::getUnqual(MemTy);
+ }
+
+ const Type *Base = nullptr;
+ uint64_t NumMembers = 0;
+ bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
+ if (IsHFA && NumMembers > 1) {
+ // Homogeneous aggregates passed in registers will have their elements split
+ // and stored 16-bytes apart regardless of size (they're notionally in qN,
+ // qN+1, ...). We reload and store into a temporary local variable
+ // contiguously.
+ assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
+ auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
+ llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
+ llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
+ Address Tmp = CGF.CreateTempAlloca(HFATy,
+ std::max(TyAlign, BaseTyInfo.second));
+
+ // On big-endian platforms, the value will be right-aligned in its slot.
+ int Offset = 0;
+ if (CGF.CGM.getDataLayout().isBigEndian() &&
+ BaseTyInfo.first.getQuantity() < 16)
+ Offset = 16 - BaseTyInfo.first.getQuantity();
+
+ for (unsigned i = 0; i < NumMembers; ++i) {
+ CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
+ Address LoadAddr =
+ CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
+ LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy);
+
+ Address StoreAddr =
+ CGF.Builder.CreateConstArrayGEP(Tmp, i, BaseTyInfo.first);
+
+ llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
+ CGF.Builder.CreateStore(Elem, StoreAddr);
+ }
+
+ RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy);
+ } else {
+ // Otherwise the object is contiguous in memory.
+
+ // It might be right-aligned in its slot.
+ CharUnits SlotSize = BaseAddr.getAlignment();
+ if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
+ (IsHFA || !isAggregateTypeForABI(Ty)) &&
+ TyInfo.first < SlotSize) {
+ CharUnits Offset = SlotSize - TyInfo.first;
+ BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
+ }
+
+ RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy);
+ }
+
+ CGF.EmitBranch(ContBlock);
+
+ //=======================================
+ // Argument was on the stack
+ //=======================================
+ CGF.EmitBlock(OnStackBlock);
+
+ Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0,
+ CharUnits::Zero(), "stack_p");
+ llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
+
+ // Again, stack arguments may need realignment. In this case both integer and
+ // floating-point ones might be affected.
+ if (!IsIndirect && TyAlign.getQuantity() > 8) {
+ int Align = TyAlign.getQuantity();
+
+ OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
+
+ OnStackPtr = CGF.Builder.CreateAdd(
+ OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
+ "align_stack");
+ OnStackPtr = CGF.Builder.CreateAnd(
+ OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
+ "align_stack");
+
+ OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
+ }
+ Address OnStackAddr(OnStackPtr,
+ std::max(CharUnits::fromQuantity(8), TyAlign));
+
+ // All stack slots are multiples of 8 bytes.
+ CharUnits StackSlotSize = CharUnits::fromQuantity(8);
+ CharUnits StackSize;
+ if (IsIndirect)
+ StackSize = StackSlotSize;
+ else
+ StackSize = TyInfo.first.RoundUpToAlignment(StackSlotSize);
+
+ llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
+ llvm::Value *NewStack =
+ CGF.Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC, "new_stack");
+
+ // Write the new value of __stack for the next call to va_arg
+ CGF.Builder.CreateStore(NewStack, stack_p);
+
+ if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
+ TyInfo.first < StackSlotSize) {
+ CharUnits Offset = StackSlotSize - TyInfo.first;
+ OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
+ }
+
+ OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy);
+
+ CGF.EmitBranch(ContBlock);
+
+ //=======================================
+ // Tidy up
+ //=======================================
+ CGF.EmitBlock(ContBlock);
+
+ Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
+ OnStackAddr, OnStackBlock, "vaargs.addr");
+
+ if (IsIndirect)
+ return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"),
+ TyInfo.second);
+
+ return ResAddr;
+}
+
+Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // The backend's lowering doesn't support va_arg for aggregates or
+ // illegal vector types. Lower VAArg here for these cases and use
+ // the LLVM va_arg instruction for everything else.
+ if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
+ return Address::invalid();
+
+ CharUnits SlotSize = CharUnits::fromQuantity(8);
+
+ // Empty records are ignored for parameter passing purposes.
+ if (isEmptyRecord(getContext(), Ty, true)) {
+ Address Addr(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
+ Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
+ return Addr;
+ }
+
+ // The size of the actual thing passed, which might end up just
+ // being a pointer for indirect types.
+ auto TyInfo = getContext().getTypeInfoInChars(Ty);
+
+ // Arguments bigger than 16 bytes which aren't homogeneous
+ // aggregates should be passed indirectly.
+ bool IsIndirect = false;
+ if (TyInfo.first.getQuantity() > 16) {
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
+ }
+
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
+ TyInfo, SlotSize, /*AllowHigherAlign*/ true);
+}
+
+//===----------------------------------------------------------------------===//
+// ARM ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class ARMABIInfo : public ABIInfo {
+public:
+ enum ABIKind {
+ APCS = 0,
+ AAPCS = 1,
+ AAPCS_VFP = 2,
+ AAPCS16_VFP = 3,
+ };
+
+private:
+ ABIKind Kind;
+
+public:
+ ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {
+ setCCs();
+ }
+
+ bool isEABI() const {
+ switch (getTarget().getTriple().getEnvironment()) {
+ case llvm::Triple::Android:
+ case llvm::Triple::EABI:
+ case llvm::Triple::EABIHF:
+ case llvm::Triple::GNUEABI:
+ case llvm::Triple::GNUEABIHF:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isEABIHF() const {
+ switch (getTarget().getTriple().getEnvironment()) {
+ case llvm::Triple::EABIHF:
+ case llvm::Triple::GNUEABIHF:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isAndroid() const {
+ return (getTarget().getTriple().getEnvironment() ==
+ llvm::Triple::Android);
+ }
+
+ ABIKind getABIKind() const { return Kind; }
+
+private:
+ ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic) const;
+ bool isIllegalVectorType(QualType Ty) const;
+
+ bool isHomogeneousAggregateBaseType(QualType Ty) const override;
+ bool isHomogeneousAggregateSmallEnough(const Type *Ty,
+ uint64_t Members) const override;
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+
+ llvm::CallingConv::ID getLLVMDefaultCC() const;
+ llvm::CallingConv::ID getABIDefaultCC() const;
+ void setCCs();
+};
+
+class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
+ :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
+
+ const ARMABIInfo &getABIInfo() const {
+ return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
+ }
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
+ return 13;
+ }
+
+ StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
+ return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override {
+ llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
+
+ // 0-15 are the 16 integer registers.
+ AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
+ return false;
+ }
+
+ unsigned getSizeOfUnwindException() const override {
+ if (getABIInfo().isEABI()) return 88;
+ return TargetCodeGenInfo::getSizeOfUnwindException();
+ }
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override {
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD)
+ return;
+
+ const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
+ if (!Attr)
+ return;
+
+ const char *Kind;
+ switch (Attr->getInterrupt()) {
+ case ARMInterruptAttr::Generic: Kind = ""; break;
+ case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
+ case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
+ case ARMInterruptAttr::SWI: Kind = "SWI"; break;
+ case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
+ case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
+ }
+
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+
+ Fn->addFnAttr("interrupt", Kind);
+
+ ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
+ if (ABI == ARMABIInfo::APCS)
+ return;
+
+ // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
+ // however this is not necessarily true on taking any interrupt. Instruct
+ // the backend to perform a realignment as part of the function prologue.
+ llvm::AttrBuilder B;
+ B.addStackAlignmentAttr(8);
+ Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
+ llvm::AttributeSet::get(CGM.getLLVMContext(),
+ llvm::AttributeSet::FunctionIndex,
+ B));
+ }
+};
+
+class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo {
+ void addStackProbeSizeTargetAttribute(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const;
+
+public:
+ WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
+ : ARMTargetCodeGenInfo(CGT, K) {}
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override;
+};
+
+void WindowsARMTargetCodeGenInfo::addStackProbeSizeTargetAttribute(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
+ if (!isa<FunctionDecl>(D))
+ return;
+ if (CGM.getCodeGenOpts().StackProbeSize == 4096)
+ return;
+
+ llvm::Function *F = cast<llvm::Function>(GV);
+ F->addFnAttr("stack-probe-size",
+ llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
+}
+
+void WindowsARMTargetCodeGenInfo::setTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
+ ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
+ addStackProbeSizeTargetAttribute(D, GV, CGM);
+}
+}
+
+void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() =
+ classifyReturnType(FI.getReturnType(), FI.isVariadic());
+
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type, FI.isVariadic());
+
+ // Always honor user-specified calling convention.
+ if (FI.getCallingConvention() != llvm::CallingConv::C)
+ return;
+
+ llvm::CallingConv::ID cc = getRuntimeCC();
+ if (cc != llvm::CallingConv::C)
+ FI.setEffectiveCallingConvention(cc);
+}
+
+/// Return the default calling convention that LLVM will use.
+llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
+ // The default calling convention that LLVM will infer.
+ if (isEABIHF() || getTarget().getTriple().isWatchOS())
+ return llvm::CallingConv::ARM_AAPCS_VFP;
+ else if (isEABI())
+ return llvm::CallingConv::ARM_AAPCS;
+ else
+ return llvm::CallingConv::ARM_APCS;
+}
+
+/// Return the calling convention that our ABI would like us to use
+/// as the C calling convention.
+llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
+ switch (getABIKind()) {
+ case APCS: return llvm::CallingConv::ARM_APCS;
+ case AAPCS: return llvm::CallingConv::ARM_AAPCS;
+ case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
+ case AAPCS16_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
+ }
+ llvm_unreachable("bad ABI kind");
+}
+
+void ARMABIInfo::setCCs() {
+ assert(getRuntimeCC() == llvm::CallingConv::C);
+
+ // Don't muddy up the IR with a ton of explicit annotations if
+ // they'd just match what LLVM will infer from the triple.
+ llvm::CallingConv::ID abiCC = getABIDefaultCC();
+ if (abiCC != getLLVMDefaultCC())
+ RuntimeCC = abiCC;
+
+ // AAPCS apparently requires runtime support functions to be soft-float, but
+ // that's almost certainly for historic reasons (Thumb1 not supporting VFP
+ // most likely). It's more convenient for AAPCS16_VFP to be hard-float.
+ switch (getABIKind()) {
+ case APCS:
+ case AAPCS16_VFP:
+ if (abiCC != getLLVMDefaultCC())
+ BuiltinCC = abiCC;
+ break;
+ case AAPCS:
+ case AAPCS_VFP:
+ BuiltinCC = llvm::CallingConv::ARM_AAPCS;
+ break;
+ }
+}
+
+ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
+ bool isVariadic) const {
+ // 6.1.2.1 The following argument types are VFP CPRCs:
+ // A single-precision floating-point type (including promoted
+ // half-precision types); A double-precision floating-point type;
+ // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
+ // with a Base Type of a single- or double-precision floating-point type,
+ // 64-bit containerized vectors or 128-bit containerized vectors with one
+ // to four Elements.
+ bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
+
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ // Handle illegal vector types here.
+ if (isIllegalVectorType(Ty)) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size <= 32) {
+ llvm::Type *ResType =
+ llvm::Type::getInt32Ty(getVMContext());
+ return ABIArgInfo::getDirect(ResType);
+ }
+ if (Size == 64) {
+ llvm::Type *ResType = llvm::VectorType::get(
+ llvm::Type::getInt32Ty(getVMContext()), 2);
+ return ABIArgInfo::getDirect(ResType);
+ }
+ if (Size == 128) {
+ llvm::Type *ResType = llvm::VectorType::get(
+ llvm::Type::getInt32Ty(getVMContext()), 4);
+ return ABIArgInfo::getDirect(ResType);
+ }
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+ }
+
+ // __fp16 gets passed as if it were an int or float, but with the top 16 bits
+ // unspecified. This is not done for OpenCL as it handles the half type
+ // natively, and does not need to interwork with AAPCS code.
+ if (Ty->isHalfType() && !getContext().getLangOpts().OpenCL) {
+ llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
+ llvm::Type::getFloatTy(getVMContext()) :
+ llvm::Type::getInt32Ty(getVMContext());
+ return ABIArgInfo::getDirect(ResType);
+ }
+
+ if (!isAggregateTypeForABI(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
+ Ty = EnumTy->getDecl()->getIntegerType();
+ }
+
+ return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend()
+ : ABIArgInfo::getDirect());
+ }
+
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+ }
+
+ // Ignore empty records.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ if (IsEffectivelyAAPCS_VFP) {
+ // Homogeneous Aggregates need to be expanded when we can fit the aggregate
+ // into VFP registers.
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ if (isHomogeneousAggregate(Ty, Base, Members)) {
+ assert(Base && "Base class should be set for homogeneous aggregate");
+ // Base can be a floating-point or a vector.
+ return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
+ }
+ } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
+ // WatchOS does have homogeneous aggregates. Note that we intentionally use
+ // this convention even for a variadic function: the backend will use GPRs
+ // if needed.
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ if (isHomogeneousAggregate(Ty, Base, Members)) {
+ assert(Base && Members <= 4 && "unexpected homogeneous aggregate");
+ llvm::Type *Ty =
+ llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members);
+ return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
+ }
+ }
+
+ if (getABIKind() == ARMABIInfo::AAPCS16_VFP &&
+ getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) {
+ // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're
+ // bigger than 128-bits, they get placed in space allocated by the caller,
+ // and a pointer is passed.
+ return ABIArgInfo::getIndirect(
+ CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false);
+ }
+
+ // Support byval for ARM.
+ // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
+ // most 8-byte. We realign the indirect argument if type alignment is bigger
+ // than ABI alignment.
+ uint64_t ABIAlign = 4;
+ uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
+ if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
+ getABIKind() == ARMABIInfo::AAPCS)
+ ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
+
+ if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
+ assert(getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval");
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
+ /*ByVal=*/true,
+ /*Realign=*/TyAlign > ABIAlign);
+ }
+
+ // Otherwise, pass by coercing to a structure of the appropriate size.
+ llvm::Type* ElemTy;
+ unsigned SizeRegs;
+ // FIXME: Try to match the types of the arguments more accurately where
+ // we can.
+ if (getContext().getTypeAlign(Ty) <= 32) {
+ ElemTy = llvm::Type::getInt32Ty(getVMContext());
+ SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
+ } else {
+ ElemTy = llvm::Type::getInt64Ty(getVMContext());
+ SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
+ }
+
+ return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs));
+}
+
+static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
+ llvm::LLVMContext &VMContext) {
+ // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
+ // is called integer-like if its size is less than or equal to one word, and
+ // the offset of each of its addressable sub-fields is zero.
+
+ uint64_t Size = Context.getTypeSize(Ty);
+
+ // Check that the type fits in a word.
+ if (Size > 32)
+ return false;
+
+ // FIXME: Handle vector types!
+ if (Ty->isVectorType())
+ return false;
+
+ // Float types are never treated as "integer like".
+ if (Ty->isRealFloatingType())
+ return false;
+
+ // If this is a builtin or pointer type then it is ok.
+ if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
+ return true;
+
+ // Small complex integer types are "integer like".
+ if (const ComplexType *CT = Ty->getAs<ComplexType>())
+ return isIntegerLikeType(CT->getElementType(), Context, VMContext);
+
+ // Single element and zero sized arrays should be allowed, by the definition
+ // above, but they are not.
+
+ // Otherwise, it must be a record type.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT) return false;
+
+ // Ignore records with flexible arrays.
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return false;
+
+ // Check that all sub-fields are at offset 0, and are themselves "integer
+ // like".
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ bool HadField = false;
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ const FieldDecl *FD = *i;
+
+ // Bit-fields are not addressable, we only need to verify they are "integer
+ // like". We still have to disallow a subsequent non-bitfield, for example:
+ // struct { int : 0; int x }
+ // is non-integer like according to gcc.
+ if (FD->isBitField()) {
+ if (!RD->isUnion())
+ HadField = true;
+
+ if (!isIntegerLikeType(FD->getType(), Context, VMContext))
+ return false;
+
+ continue;
+ }
+
+ // Check if this field is at offset 0.
+ if (Layout.getFieldOffset(idx) != 0)
+ return false;
+
+ if (!isIntegerLikeType(FD->getType(), Context, VMContext))
+ return false;
+
+ // Only allow at most one field in a structure. This doesn't match the
+ // wording above, but follows gcc in situations with a field following an
+ // empty structure.
+ if (!RD->isUnion()) {
+ if (HadField)
+ return false;
+
+ HadField = true;
+ }
+ }
+
+ return true;
+}
+
+ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
+ bool isVariadic) const {
+ bool IsEffectivelyAAPCS_VFP =
+ (getABIKind() == AAPCS_VFP || getABIKind() == AAPCS16_VFP) && !isVariadic;
+
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ // Large vector types should be returned via memory.
+ if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) {
+ return getNaturalAlignIndirect(RetTy);
+ }
+
+ // __fp16 gets returned as if it were an int or float, but with the top 16
+ // bits unspecified. This is not done for OpenCL as it handles the half type
+ // natively, and does not need to interwork with AAPCS code.
+ if (RetTy->isHalfType() && !getContext().getLangOpts().OpenCL) {
+ llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
+ llvm::Type::getFloatTy(getVMContext()) :
+ llvm::Type::getInt32Ty(getVMContext());
+ return ABIArgInfo::getDirect(ResType);
+ }
+
+ if (!isAggregateTypeForABI(RetTy)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend()
+ : ABIArgInfo::getDirect();
+ }
+
+ // Are we following APCS?
+ if (getABIKind() == APCS) {
+ if (isEmptyRecord(getContext(), RetTy, false))
+ return ABIArgInfo::getIgnore();
+
+ // Complex types are all returned as packed integers.
+ //
+ // FIXME: Consider using 2 x vector types if the back end handles them
+ // correctly.
+ if (RetTy->isAnyComplexType())
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(
+ getVMContext(), getContext().getTypeSize(RetTy)));
+
+ // Integer like structures are returned in r0.
+ if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
+ // Return in the smallest viable integer type.
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ if (Size <= 8)
+ return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
+ if (Size <= 16)
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+ }
+
+ // Otherwise return in memory.
+ return getNaturalAlignIndirect(RetTy);
+ }
+
+ // Otherwise this is an AAPCS variant.
+
+ if (isEmptyRecord(getContext(), RetTy, true))
+ return ABIArgInfo::getIgnore();
+
+ // Check for homogeneous aggregates with AAPCS-VFP.
+ if (IsEffectivelyAAPCS_VFP) {
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ if (isHomogeneousAggregate(RetTy, Base, Members)) {
+ assert(Base && "Base class should be set for homogeneous aggregate");
+ // Homogeneous Aggregates are returned directly.
+ return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
+ }
+ }
+
+ // Aggregates <= 4 bytes are returned in r0; other aggregates
+ // are returned indirectly.
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ if (Size <= 32) {
+ if (getDataLayout().isBigEndian())
+ // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+
+ // Return in the smallest viable integer type.
+ if (Size <= 8)
+ return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
+ if (Size <= 16)
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+ } else if (Size <= 128 && getABIKind() == AAPCS16_VFP) {
+ llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext());
+ llvm::Type *CoerceTy =
+ llvm::ArrayType::get(Int32Ty, llvm::RoundUpToAlignment(Size, 32) / 32);
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+
+ return getNaturalAlignIndirect(RetTy);
+}
+
+/// isIllegalVector - check whether Ty is an illegal vector type.
+bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
+ if (const VectorType *VT = Ty->getAs<VectorType> ()) {
+ if (isAndroid()) {
+ // Android shipped using Clang 3.1, which supported a slightly different
+ // vector ABI. The primary differences were that 3-element vector types
+ // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path
+ // accepts that legacy behavior for Android only.
+ // Check whether VT is legal.
+ unsigned NumElements = VT->getNumElements();
+ // NumElements should be power of 2 or equal to 3.
+ if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
+ return true;
+ } else {
+ // Check whether VT is legal.
+ unsigned NumElements = VT->getNumElements();
+ uint64_t Size = getContext().getTypeSize(VT);
+ // NumElements should be power of 2.
+ if (!llvm::isPowerOf2_32(NumElements))
+ return true;
+ // Size should be greater than 32 bits.
+ return Size <= 32;
+ }
+ }
+ return false;
+}
+
+bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
+ // Homogeneous aggregates for AAPCS-VFP must have base types of float,
+ // double, or 64-bit or 128-bit vectors.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ if (BT->getKind() == BuiltinType::Float ||
+ BT->getKind() == BuiltinType::Double ||
+ BT->getKind() == BuiltinType::LongDouble)
+ return true;
+ } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ unsigned VecSize = getContext().getTypeSize(VT);
+ if (VecSize == 64 || VecSize == 128)
+ return true;
+ }
+ return false;
+}
+
+bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
+ uint64_t Members) const {
+ return Members <= 4;
+}
+
+Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ CharUnits SlotSize = CharUnits::fromQuantity(4);
+
+ // Empty records are ignored for parameter passing purposes.
+ if (isEmptyRecord(getContext(), Ty, true)) {
+ Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
+ Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
+ return Addr;
+ }
+
+ auto TyInfo = getContext().getTypeInfoInChars(Ty);
+ CharUnits TyAlignForABI = TyInfo.second;
+
+ // Use indirect if size of the illegal vector is bigger than 16 bytes.
+ bool IsIndirect = false;
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ if (TyInfo.first > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) {
+ IsIndirect = true;
+
+ // ARMv7k passes structs bigger than 16 bytes indirectly, in space
+ // allocated by the caller.
+ } else if (TyInfo.first > CharUnits::fromQuantity(16) &&
+ getABIKind() == ARMABIInfo::AAPCS16_VFP &&
+ !isHomogeneousAggregate(Ty, Base, Members)) {
+ IsIndirect = true;
+
+ // Otherwise, bound the type's ABI alignment.
+ // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
+ // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
+ // Our callers should be prepared to handle an under-aligned address.
+ } else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
+ getABIKind() == ARMABIInfo::AAPCS) {
+ TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
+ TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8));
+ } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
+ // ARMv7k allows type alignment up to 16 bytes.
+ TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
+ TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16));
+ } else {
+ TyAlignForABI = CharUnits::fromQuantity(4);
+ }
+ TyInfo.second = TyAlignForABI;
+
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo,
+ SlotSize, /*AllowHigherAlign*/ true);
+}
+
+//===----------------------------------------------------------------------===//
+// NVPTX ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class NVPTXABIInfo : public ABIInfo {
+public:
+ NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType Ty) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+};
+
+class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const override;
+private:
+ // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the
+ // resulting MDNode to the nvvm.annotations MDNode.
+ static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand);
+};
+
+ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ // note: this is different from default ABI
+ if (!RetTy->isScalarType())
+ return ABIArgInfo::getDirect();
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // Return aggregates type as indirect by value
+ if (isAggregateTypeForABI(Ty))
+ return getNaturalAlignIndirect(Ty, /* byval */ true);
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
+
+ // Always honor user-specified calling convention.
+ if (FI.getCallingConvention() != llvm::CallingConv::C)
+ return;
+
+ FI.setEffectiveCallingConvention(getRuntimeCC());
+}
+
+Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ llvm_unreachable("NVPTX does not support varargs");
+}
+
+void NVPTXTargetCodeGenInfo::
+setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const{
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD) return;
+
+ llvm::Function *F = cast<llvm::Function>(GV);
+
+ // Perform special handling in OpenCL mode
+ if (M.getLangOpts().OpenCL) {
+ // Use OpenCL function attributes to check for kernel functions
+ // By default, all functions are device functions
+ if (FD->hasAttr<OpenCLKernelAttr>()) {
+ // OpenCL __kernel functions get kernel metadata
+ // Create !{<func-ref>, metadata !"kernel", i32 1} node
+ addNVVMMetadata(F, "kernel", 1);
+ // And kernel functions are not subject to inlining
+ F->addFnAttr(llvm::Attribute::NoInline);
+ }
+ }
+
+ // Perform special handling in CUDA mode.
+ if (M.getLangOpts().CUDA) {
+ // CUDA __global__ functions get a kernel metadata entry. Since
+ // __global__ functions cannot be called from the device, we do not
+ // need to set the noinline attribute.
+ if (FD->hasAttr<CUDAGlobalAttr>()) {
+ // Create !{<func-ref>, metadata !"kernel", i32 1} node
+ addNVVMMetadata(F, "kernel", 1);
+ }
+ if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) {
+ // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
+ llvm::APSInt MaxThreads(32);
+ MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext());
+ if (MaxThreads > 0)
+ addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue());
+
+ // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was
+ // not specified in __launch_bounds__ or if the user specified a 0 value,
+ // we don't have to add a PTX directive.
+ if (Attr->getMinBlocks()) {
+ llvm::APSInt MinBlocks(32);
+ MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext());
+ if (MinBlocks > 0)
+ // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
+ addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue());
+ }
+ }
+ }
+}
+
+void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
+ int Operand) {
+ llvm::Module *M = F->getParent();
+ llvm::LLVMContext &Ctx = M->getContext();
+
+ // Get "nvvm.annotations" metadata node
+ llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
+
+ llvm::Metadata *MDVals[] = {
+ llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
+ llvm::ConstantAsMetadata::get(
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
+ // Append metadata to nvvm.annotations
+ MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
+}
+}
+
+//===----------------------------------------------------------------------===//
+// SystemZ ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class SystemZABIInfo : public ABIInfo {
+ bool HasVector;
+
+public:
+ SystemZABIInfo(CodeGenTypes &CGT, bool HV)
+ : ABIInfo(CGT), HasVector(HV) {}
+
+ bool isPromotableIntegerType(QualType Ty) const;
+ bool isCompoundType(QualType Ty) const;
+ bool isVectorArgumentType(QualType Ty) const;
+ bool isFPArgumentType(QualType Ty) const;
+ QualType GetSingleElementType(QualType Ty) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType ArgTy) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
+ }
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+};
+
+class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector)
+ : TargetCodeGenInfo(new SystemZABIInfo(CGT, HasVector)) {}
+};
+
+}
+
+bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // Promotable integer types are required to be promoted by the ABI.
+ if (Ty->isPromotableIntegerType())
+ return true;
+
+ // 32-bit values must also be promoted.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ return true;
+ default:
+ return false;
+ }
+ return false;
+}
+
+bool SystemZABIInfo::isCompoundType(QualType Ty) const {
+ return (Ty->isAnyComplexType() ||
+ Ty->isVectorType() ||
+ isAggregateTypeForABI(Ty));
+}
+
+bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const {
+ return (HasVector &&
+ Ty->isVectorType() &&
+ getContext().getTypeSize(Ty) <= 128);
+}
+
+bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Float:
+ case BuiltinType::Double:
+ return true;
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
+ if (const RecordType *RT = Ty->getAsStructureType()) {
+ const RecordDecl *RD = RT->getDecl();
+ QualType Found;
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ for (const auto &I : CXXRD->bases()) {
+ QualType Base = I.getType();
+
+ // Empty bases don't affect things either way.
+ if (isEmptyRecord(getContext(), Base, true))
+ continue;
+
+ if (!Found.isNull())
+ return Ty;
+ Found = GetSingleElementType(Base);
+ }
+
+ // Check the fields.
+ for (const auto *FD : RD->fields()) {
+ // For compatibility with GCC, ignore empty bitfields in C++ mode.
+ // Unlike isSingleElementStruct(), empty structure and array fields
+ // do count. So do anonymous bitfields that aren't zero-sized.
+ if (getContext().getLangOpts().CPlusPlus &&
+ FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
+ continue;
+
+ // Unlike isSingleElementStruct(), arrays do not count.
+ // Nested structures still do though.
+ if (!Found.isNull())
+ return Ty;
+ Found = GetSingleElementType(FD->getType());
+ }
+
+ // Unlike isSingleElementStruct(), trailing padding is allowed.
+ // An 8-byte aligned struct s { float f; } is passed as a double.
+ if (!Found.isNull())
+ return Found;
+ }
+
+ return Ty;
+}
+
+Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ // Assume that va_list type is correct; should be pointer to LLVM type:
+ // struct {
+ // i64 __gpr;
+ // i64 __fpr;
+ // i8 *__overflow_arg_area;
+ // i8 *__reg_save_area;
+ // };
+
+ // Every non-vector argument occupies 8 bytes and is passed by preference
+ // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are
+ // always passed on the stack.
+ Ty = getContext().getCanonicalType(Ty);
+ auto TyInfo = getContext().getTypeInfoInChars(Ty);
+ llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Type *DirectTy = ArgTy;
+ ABIArgInfo AI = classifyArgumentType(Ty);
+ bool IsIndirect = AI.isIndirect();
+ bool InFPRs = false;
+ bool IsVector = false;
+ CharUnits UnpaddedSize;
+ CharUnits DirectAlign;
+ if (IsIndirect) {
+ DirectTy = llvm::PointerType::getUnqual(DirectTy);
+ UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8);
+ } else {
+ if (AI.getCoerceToType())
+ ArgTy = AI.getCoerceToType();
+ InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
+ IsVector = ArgTy->isVectorTy();
+ UnpaddedSize = TyInfo.first;
+ DirectAlign = TyInfo.second;
+ }
+ CharUnits PaddedSize = CharUnits::fromQuantity(8);
+ if (IsVector && UnpaddedSize > PaddedSize)
+ PaddedSize = CharUnits::fromQuantity(16);
+ assert((UnpaddedSize <= PaddedSize) && "Invalid argument size.");
+
+ CharUnits Padding = (PaddedSize - UnpaddedSize);
+
+ llvm::Type *IndexTy = CGF.Int64Ty;
+ llvm::Value *PaddedSizeV =
+ llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity());
+
+ if (IsVector) {
+ // Work out the address of a vector argument on the stack.
+ // Vector arguments are always passed in the high bits of a
+ // single (8 byte) or double (16 byte) stack slot.
+ Address OverflowArgAreaPtr =
+ CGF.Builder.CreateStructGEP(VAListAddr, 2, CharUnits::fromQuantity(16),
+ "overflow_arg_area_ptr");
+ Address OverflowArgArea =
+ Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
+ TyInfo.second);
+ Address MemAddr =
+ CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr");
+
+ // Update overflow_arg_area_ptr pointer
+ llvm::Value *NewOverflowArgArea =
+ CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
+ "overflow_arg_area");
+ CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
+
+ return MemAddr;
+ }
+
+ assert(PaddedSize.getQuantity() == 8);
+
+ unsigned MaxRegs, RegCountField, RegSaveIndex;
+ CharUnits RegPadding;
+ if (InFPRs) {
+ MaxRegs = 4; // Maximum of 4 FPR arguments
+ RegCountField = 1; // __fpr
+ RegSaveIndex = 16; // save offset for f0
+ RegPadding = CharUnits(); // floats are passed in the high bits of an FPR
+ } else {
+ MaxRegs = 5; // Maximum of 5 GPR arguments
+ RegCountField = 0; // __gpr
+ RegSaveIndex = 2; // save offset for r2
+ RegPadding = Padding; // values are passed in the low bits of a GPR
+ }
+
+ Address RegCountPtr = CGF.Builder.CreateStructGEP(
+ VAListAddr, RegCountField, RegCountField * CharUnits::fromQuantity(8),
+ "reg_count_ptr");
+ llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
+ llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
+ llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
+ "fits_in_regs");
+
+ llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
+ llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
+ CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
+
+ // Emit code to load the value if it was passed in registers.
+ CGF.EmitBlock(InRegBlock);
+
+ // Work out the address of an argument register.
+ llvm::Value *ScaledRegCount =
+ CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
+ llvm::Value *RegBase =
+ llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity()
+ + RegPadding.getQuantity());
+ llvm::Value *RegOffset =
+ CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
+ Address RegSaveAreaPtr =
+ CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24),
+ "reg_save_area_ptr");
+ llvm::Value *RegSaveArea =
+ CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
+ Address RawRegAddr(CGF.Builder.CreateGEP(RegSaveArea, RegOffset,
+ "raw_reg_addr"),
+ PaddedSize);
+ Address RegAddr =
+ CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr");
+
+ // Update the register count
+ llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
+ llvm::Value *NewRegCount =
+ CGF.Builder.CreateAdd(RegCount, One, "reg_count");
+ CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
+ CGF.EmitBranch(ContBlock);
+
+ // Emit code to load the value if it was passed in memory.
+ CGF.EmitBlock(InMemBlock);
+
+ // Work out the address of a stack argument.
+ Address OverflowArgAreaPtr = CGF.Builder.CreateStructGEP(
+ VAListAddr, 2, CharUnits::fromQuantity(16), "overflow_arg_area_ptr");
+ Address OverflowArgArea =
+ Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
+ PaddedSize);
+ Address RawMemAddr =
+ CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr");
+ Address MemAddr =
+ CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr");
+
+ // Update overflow_arg_area_ptr pointer
+ llvm::Value *NewOverflowArgArea =
+ CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
+ "overflow_arg_area");
+ CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
+ CGF.EmitBranch(ContBlock);
+
+ // Return the appropriate result.
+ CGF.EmitBlock(ContBlock);
+ Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
+ MemAddr, InMemBlock, "va_arg.addr");
+
+ if (IsIndirect)
+ ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"),
+ TyInfo.second);
+
+ return ResAddr;
+}
+
+ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+ if (isVectorArgumentType(RetTy))
+ return ABIArgInfo::getDirect();
+ if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
+ return getNaturalAlignIndirect(RetTy);
+ return (isPromotableIntegerType(RetTy) ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
+ // Handle the generic C++ ABI.
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+
+ // Integers and enums are extended to full register width.
+ if (isPromotableIntegerType(Ty))
+ return ABIArgInfo::getExtend();
+
+ // Handle vector types and vector-like structure types. Note that
+ // as opposed to float-like structure types, we do not allow any
+ // padding for vector-like structures, so verify the sizes match.
+ uint64_t Size = getContext().getTypeSize(Ty);
+ QualType SingleElementTy = GetSingleElementType(Ty);
+ if (isVectorArgumentType(SingleElementTy) &&
+ getContext().getTypeSize(SingleElementTy) == Size)
+ return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy));
+
+ // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
+ if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+
+ // Handle small structures.
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ // Structures with flexible arrays have variable length, so really
+ // fail the size test above.
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+
+ // The structure is passed as an unextended integer, a float, or a double.
+ llvm::Type *PassTy;
+ if (isFPArgumentType(SingleElementTy)) {
+ assert(Size == 32 || Size == 64);
+ if (Size == 32)
+ PassTy = llvm::Type::getFloatTy(getVMContext());
+ else
+ PassTy = llvm::Type::getDoubleTy(getVMContext());
+ } else
+ PassTy = llvm::IntegerType::get(getVMContext(), Size);
+ return ABIArgInfo::getDirect(PassTy);
+ }
+
+ // Non-structure compounds are passed indirectly.
+ if (isCompoundType(Ty))
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+
+ return ABIArgInfo::getDirect(nullptr);
+}
+
+//===----------------------------------------------------------------------===//
+// MSP430 ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const override;
+};
+
+}
+
+void MSP430TargetCodeGenInfo::setTargetAttributes(const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const {
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
+ if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
+ // Handle 'interrupt' attribute:
+ llvm::Function *F = cast<llvm::Function>(GV);
+
+ // Step 1: Set ISR calling convention.
+ F->setCallingConv(llvm::CallingConv::MSP430_INTR);
+
+ // Step 2: Add attributes goodness.
+ F->addFnAttr(llvm::Attribute::NoInline);
+
+ // Step 3: Emit ISR vector alias.
+ unsigned Num = attr->getNumber() / 2;
+ llvm::GlobalAlias::create(llvm::Function::ExternalLinkage,
+ "__isr_" + Twine(Num), F);
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// MIPS ABI Implementation. This works for both little-endian and
+// big-endian variants.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class MipsABIInfo : public ABIInfo {
+ bool IsO32;
+ unsigned MinABIStackAlignInBytes, StackAlignInBytes;
+ void CoerceToIntArgs(uint64_t TySize,
+ SmallVectorImpl<llvm::Type *> &ArgList) const;
+ llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
+ llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
+ llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
+public:
+ MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
+ ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
+ StackAlignInBytes(IsO32 ? 8 : 16) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
+ void computeInfo(CGFunctionInfo &FI) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+ bool shouldSignExtUnsignedType(QualType Ty) const override;
+};
+
+class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
+ unsigned SizeOfUnwindException;
+public:
+ MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
+ : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
+ SizeOfUnwindException(IsO32 ? 24 : 32) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
+ return 29;
+ }
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override {
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD) return;
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+ if (FD->hasAttr<Mips16Attr>()) {
+ Fn->addFnAttr("mips16");
+ }
+ else if (FD->hasAttr<NoMips16Attr>()) {
+ Fn->addFnAttr("nomips16");
+ }
+
+ const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>();
+ if (!Attr)
+ return;
+
+ const char *Kind;
+ switch (Attr->getInterrupt()) {
+ case MipsInterruptAttr::eic: Kind = "eic"; break;
+ case MipsInterruptAttr::sw0: Kind = "sw0"; break;
+ case MipsInterruptAttr::sw1: Kind = "sw1"; break;
+ case MipsInterruptAttr::hw0: Kind = "hw0"; break;
+ case MipsInterruptAttr::hw1: Kind = "hw1"; break;
+ case MipsInterruptAttr::hw2: Kind = "hw2"; break;
+ case MipsInterruptAttr::hw3: Kind = "hw3"; break;
+ case MipsInterruptAttr::hw4: Kind = "hw4"; break;
+ case MipsInterruptAttr::hw5: Kind = "hw5"; break;
+ }
+
+ Fn->addFnAttr("interrupt", Kind);
+
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override;
+
+ unsigned getSizeOfUnwindException() const override {
+ return SizeOfUnwindException;
+ }
+};
+}
+
+void MipsABIInfo::CoerceToIntArgs(
+ uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const {
+ llvm::IntegerType *IntTy =
+ llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
+
+ // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
+ for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
+ ArgList.push_back(IntTy);
+
+ // If necessary, add one more integer type to ArgList.
+ unsigned R = TySize % (MinABIStackAlignInBytes * 8);
+
+ if (R)
+ ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
+}
+
+// In N32/64, an aligned double precision floating point field is passed in
+// a register.
+llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
+ SmallVector<llvm::Type*, 8> ArgList, IntArgList;
+
+ if (IsO32) {
+ CoerceToIntArgs(TySize, ArgList);
+ return llvm::StructType::get(getVMContext(), ArgList);
+ }
+
+ if (Ty->isComplexType())
+ return CGT.ConvertType(Ty);
+
+ const RecordType *RT = Ty->getAs<RecordType>();
+
+ // Unions/vectors are passed in integer registers.
+ if (!RT || !RT->isStructureOrClassType()) {
+ CoerceToIntArgs(TySize, ArgList);
+ return llvm::StructType::get(getVMContext(), ArgList);
+ }
+
+ const RecordDecl *RD = RT->getDecl();
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+ assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
+
+ uint64_t LastOffset = 0;
+ unsigned idx = 0;
+ llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
+
+ // Iterate over fields in the struct/class and check if there are any aligned
+ // double fields.
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ const QualType Ty = i->getType();
+ const BuiltinType *BT = Ty->getAs<BuiltinType>();
+
+ if (!BT || BT->getKind() != BuiltinType::Double)
+ continue;
+
+ uint64_t Offset = Layout.getFieldOffset(idx);
+ if (Offset % 64) // Ignore doubles that are not aligned.
+ continue;
+
+ // Add ((Offset - LastOffset) / 64) args of type i64.
+ for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
+ ArgList.push_back(I64);
+
+ // Add double type.
+ ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
+ LastOffset = Offset + 64;
+ }
+
+ CoerceToIntArgs(TySize - LastOffset, IntArgList);
+ ArgList.append(IntArgList.begin(), IntArgList.end());
+
+ return llvm::StructType::get(getVMContext(), ArgList);
+}
+
+llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
+ uint64_t Offset) const {
+ if (OrigOffset + MinABIStackAlignInBytes > Offset)
+ return nullptr;
+
+ return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
+}
+
+ABIArgInfo
+MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ uint64_t OrigOffset = Offset;
+ uint64_t TySize = getContext().getTypeSize(Ty);
+ uint64_t Align = getContext().getTypeAlign(Ty) / 8;
+
+ Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
+ (uint64_t)StackAlignInBytes);
+ unsigned CurrOffset = llvm::RoundUpToAlignment(Offset, Align);
+ Offset = CurrOffset + llvm::RoundUpToAlignment(TySize, Align * 8) / 8;
+
+ if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
+ // Ignore empty aggregates.
+ if (TySize == 0)
+ return ABIArgInfo::getIgnore();
+
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
+ Offset = OrigOffset + MinABIStackAlignInBytes;
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+ }
+
+ // If we have reached here, aggregates are passed directly by coercing to
+ // another structure type. Padding is inserted if the offset of the
+ // aggregate is unaligned.
+ ABIArgInfo ArgInfo =
+ ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
+ getPaddingType(OrigOffset, CurrOffset));
+ ArgInfo.setInReg(true);
+ return ArgInfo;
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // All integral types are promoted to the GPR width.
+ if (Ty->isIntegralOrEnumerationType())
+ return ABIArgInfo::getExtend();
+
+ return ABIArgInfo::getDirect(
+ nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset));
+}
+
+llvm::Type*
+MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
+ const RecordType *RT = RetTy->getAs<RecordType>();
+ SmallVector<llvm::Type*, 8> RTList;
+
+ if (RT && RT->isStructureOrClassType()) {
+ const RecordDecl *RD = RT->getDecl();
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+ unsigned FieldCnt = Layout.getFieldCount();
+
+ // N32/64 returns struct/classes in floating point registers if the
+ // following conditions are met:
+ // 1. The size of the struct/class is no larger than 128-bit.
+ // 2. The struct/class has one or two fields all of which are floating
+ // point types.
+ // 3. The offset of the first field is zero (this follows what gcc does).
+ //
+ // Any other composite results are returned in integer registers.
+ //
+ if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
+ RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
+ for (; b != e; ++b) {
+ const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
+
+ if (!BT || !BT->isFloatingPoint())
+ break;
+
+ RTList.push_back(CGT.ConvertType(b->getType()));
+ }
+
+ if (b == e)
+ return llvm::StructType::get(getVMContext(), RTList,
+ RD->hasAttr<PackedAttr>());
+
+ RTList.clear();
+ }
+ }
+
+ CoerceToIntArgs(Size, RTList);
+ return llvm::StructType::get(getVMContext(), RTList);
+}
+
+ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
+ uint64_t Size = getContext().getTypeSize(RetTy);
+
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ // O32 doesn't treat zero-sized structs differently from other structs.
+ // However, N32/N64 ignores zero sized return values.
+ if (!IsO32 && Size == 0)
+ return ABIArgInfo::getIgnore();
+
+ if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
+ if (Size <= 128) {
+ if (RetTy->isAnyComplexType())
+ return ABIArgInfo::getDirect();
+
+ // O32 returns integer vectors in registers and N32/N64 returns all small
+ // aggregates in registers.
+ if (!IsO32 ||
+ (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) {
+ ABIArgInfo ArgInfo =
+ ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
+ ArgInfo.setInReg(true);
+ return ArgInfo;
+ }
+ }
+
+ return getNaturalAlignIndirect(RetTy);
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ ABIArgInfo &RetInfo = FI.getReturnInfo();
+ if (!getCXXABI().classifyReturnType(FI))
+ RetInfo = classifyReturnType(FI.getReturnType());
+
+ // Check if a pointer to an aggregate is passed as a hidden argument.
+ uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
+
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type, Offset);
+}
+
+Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType OrigTy) const {
+ QualType Ty = OrigTy;
+
+ // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64.
+ // Pointers are also promoted in the same way but this only matters for N32.
+ unsigned SlotSizeInBits = IsO32 ? 32 : 64;
+ unsigned PtrWidth = getTarget().getPointerWidth(0);
+ bool DidPromote = false;
+ if ((Ty->isIntegerType() &&
+ getContext().getIntWidth(Ty) < SlotSizeInBits) ||
+ (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) {
+ DidPromote = true;
+ Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits,
+ Ty->isSignedIntegerType());
+ }
+
+ auto TyInfo = getContext().getTypeInfoInChars(Ty);
+
+ // The alignment of things in the argument area is never larger than
+ // StackAlignInBytes.
+ TyInfo.second =
+ std::min(TyInfo.second, CharUnits::fromQuantity(StackAlignInBytes));
+
+ // MinABIStackAlignInBytes is the size of argument slots on the stack.
+ CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes);
+
+ Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
+ TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true);
+
+
+ // If there was a promotion, "unpromote" into a temporary.
+ // TODO: can we just use a pointer into a subset of the original slot?
+ if (DidPromote) {
+ Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp");
+ llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr);
+
+ // Truncate down to the right width.
+ llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType()
+ : CGF.IntPtrTy);
+ llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy);
+ if (OrigTy->isPointerType())
+ V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType());
+
+ CGF.Builder.CreateStore(V, Temp);
+ Addr = Temp;
+ }
+
+ return Addr;
+}
+
+bool MipsABIInfo::shouldSignExtUnsignedType(QualType Ty) const {
+ int TySize = getContext().getTypeSize(Ty);
+
+ // MIPS64 ABI requires unsigned 32 bit integers to be sign extended.
+ if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
+ return true;
+
+ return false;
+}
+
+bool
+MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ // This information comes from gcc's implementation, which seems to
+ // as canonical as it gets.
+
+ // Everything on MIPS is 4 bytes. Double-precision FP registers
+ // are aliased to pairs of single-precision FP registers.
+ llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
+
+ // 0-31 are the general purpose registers, $0 - $31.
+ // 32-63 are the floating-point registers, $f0 - $f31.
+ // 64 and 65 are the multiply/divide registers, $hi and $lo.
+ // 66 is the (notional, I think) register for signal-handler return.
+ AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
+
+ // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
+ // They are one bit wide and ignored here.
+
+ // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
+ // (coprocessor 1 is the FP unit)
+ // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
+ // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
+ // 176-181 are the DSP accumulator registers.
+ AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
+// Currently subclassed only to implement custom OpenCL C function attribute
+// handling.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
+public:
+ TCETargetCodeGenInfo(CodeGenTypes &CGT)
+ : DefaultTargetCodeGenInfo(CGT) {}
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const override;
+};
+
+void TCETargetCodeGenInfo::setTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD) return;
+
+ llvm::Function *F = cast<llvm::Function>(GV);
+
+ if (M.getLangOpts().OpenCL) {
+ if (FD->hasAttr<OpenCLKernelAttr>()) {
+ // OpenCL C Kernel functions are not subject to inlining
+ F->addFnAttr(llvm::Attribute::NoInline);
+ const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
+ if (Attr) {
+ // Convert the reqd_work_group_size() attributes to metadata.
+ llvm::LLVMContext &Context = F->getContext();
+ llvm::NamedMDNode *OpenCLMetadata =
+ M.getModule().getOrInsertNamedMetadata(
+ "opencl.kernel_wg_size_info");
+
+ SmallVector<llvm::Metadata *, 5> Operands;
+ Operands.push_back(llvm::ConstantAsMetadata::get(F));
+
+ Operands.push_back(
+ llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
+ M.Int32Ty, llvm::APInt(32, Attr->getXDim()))));
+ Operands.push_back(
+ llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
+ M.Int32Ty, llvm::APInt(32, Attr->getYDim()))));
+ Operands.push_back(
+ llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
+ M.Int32Ty, llvm::APInt(32, Attr->getZDim()))));
+
+ // Add a boolean constant operand for "required" (true) or "hint"
+ // (false) for implementing the work_group_size_hint attr later.
+ // Currently always true as the hint is not yet implemented.
+ Operands.push_back(
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
+ OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
+ }
+ }
+ }
+}
+
+}
+
+//===----------------------------------------------------------------------===//
+// Hexagon ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class HexagonABIInfo : public ABIInfo {
+
+
+public:
+ HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+private:
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+};
+
+class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
+ :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
+ return 29;
+ }
+};
+
+}
+
+void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
+}
+
+ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
+ if (!isAggregateTypeForABI(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ // Ignore empty records.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size > 64)
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
+ // Pass in the smallest viable integer type.
+ else if (Size > 32)
+ return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
+ else if (Size > 16)
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+ else if (Size > 8)
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+ else
+ return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
+}
+
+ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ // Large vector types should be returned via memory.
+ if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
+ return getNaturalAlignIndirect(RetTy);
+
+ if (!isAggregateTypeForABI(RetTy)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ if (isEmptyRecord(getContext(), RetTy, true))
+ return ABIArgInfo::getIgnore();
+
+ // Aggregates <= 8 bytes are returned in r0; other aggregates
+ // are returned indirectly.
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ if (Size <= 64) {
+ // Return in the smallest viable integer type.
+ if (Size <= 8)
+ return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
+ if (Size <= 16)
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+ if (Size <= 32)
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+ return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
+ }
+
+ return getNaturalAlignIndirect(RetTy, /*ByVal=*/true);
+}
+
+Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ // FIXME: Someone needs to audit that this handle alignment correctly.
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
+ getContext().getTypeInfoInChars(Ty),
+ CharUnits::fromQuantity(4),
+ /*AllowHigherAlign*/ true);
+}
+
+//===----------------------------------------------------------------------===//
+// AMDGPU ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const override;
+};
+
+}
+
+void AMDGPUTargetCodeGenInfo::setTargetAttributes(
+ const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const {
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD)
+ return;
+
+ if (const auto Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
+ llvm::Function *F = cast<llvm::Function>(GV);
+ uint32_t NumVGPR = Attr->getNumVGPR();
+ if (NumVGPR != 0)
+ F->addFnAttr("amdgpu_num_vgpr", llvm::utostr(NumVGPR));
+ }
+
+ if (const auto Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
+ llvm::Function *F = cast<llvm::Function>(GV);
+ unsigned NumSGPR = Attr->getNumSGPR();
+ if (NumSGPR != 0)
+ F->addFnAttr("amdgpu_num_sgpr", llvm::utostr(NumSGPR));
+ }
+}
+
+
+//===----------------------------------------------------------------------===//
+// SPARC v9 ABI Implementation.
+// Based on the SPARC Compliance Definition version 2.4.1.
+//
+// Function arguments a mapped to a nominal "parameter array" and promoted to
+// registers depending on their type. Each argument occupies 8 or 16 bytes in
+// the array, structs larger than 16 bytes are passed indirectly.
+//
+// One case requires special care:
+//
+// struct mixed {
+// int i;
+// float f;
+// };
+//
+// When a struct mixed is passed by value, it only occupies 8 bytes in the
+// parameter array, but the int is passed in an integer register, and the float
+// is passed in a floating point register. This is represented as two arguments
+// with the LLVM IR inreg attribute:
+//
+// declare void f(i32 inreg %i, float inreg %f)
+//
+// The code generator will only allocate 4 bytes from the parameter array for
+// the inreg arguments. All other arguments are allocated a multiple of 8
+// bytes.
+//
+namespace {
+class SparcV9ABIInfo : public ABIInfo {
+public:
+ SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+private:
+ ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
+ void computeInfo(CGFunctionInfo &FI) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+
+ // Coercion type builder for structs passed in registers. The coercion type
+ // serves two purposes:
+ //
+ // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
+ // in registers.
+ // 2. Expose aligned floating point elements as first-level elements, so the
+ // code generator knows to pass them in floating point registers.
+ //
+ // We also compute the InReg flag which indicates that the struct contains
+ // aligned 32-bit floats.
+ //
+ struct CoerceBuilder {
+ llvm::LLVMContext &Context;
+ const llvm::DataLayout &DL;
+ SmallVector<llvm::Type*, 8> Elems;
+ uint64_t Size;
+ bool InReg;
+
+ CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
+ : Context(c), DL(dl), Size(0), InReg(false) {}
+
+ // Pad Elems with integers until Size is ToSize.
+ void pad(uint64_t ToSize) {
+ assert(ToSize >= Size && "Cannot remove elements");
+ if (ToSize == Size)
+ return;
+
+ // Finish the current 64-bit word.
+ uint64_t Aligned = llvm::RoundUpToAlignment(Size, 64);
+ if (Aligned > Size && Aligned <= ToSize) {
+ Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
+ Size = Aligned;
+ }
+
+ // Add whole 64-bit words.
+ while (Size + 64 <= ToSize) {
+ Elems.push_back(llvm::Type::getInt64Ty(Context));
+ Size += 64;
+ }
+
+ // Final in-word padding.
+ if (Size < ToSize) {
+ Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
+ Size = ToSize;
+ }
+ }
+
+ // Add a floating point element at Offset.
+ void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
+ // Unaligned floats are treated as integers.
+ if (Offset % Bits)
+ return;
+ // The InReg flag is only required if there are any floats < 64 bits.
+ if (Bits < 64)
+ InReg = true;
+ pad(Offset);
+ Elems.push_back(Ty);
+ Size = Offset + Bits;
+ }
+
+ // Add a struct type to the coercion type, starting at Offset (in bits).
+ void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
+ const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
+ for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
+ llvm::Type *ElemTy = StrTy->getElementType(i);
+ uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
+ switch (ElemTy->getTypeID()) {
+ case llvm::Type::StructTyID:
+ addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
+ break;
+ case llvm::Type::FloatTyID:
+ addFloat(ElemOffset, ElemTy, 32);
+ break;
+ case llvm::Type::DoubleTyID:
+ addFloat(ElemOffset, ElemTy, 64);
+ break;
+ case llvm::Type::FP128TyID:
+ addFloat(ElemOffset, ElemTy, 128);
+ break;
+ case llvm::Type::PointerTyID:
+ if (ElemOffset % 64 == 0) {
+ pad(ElemOffset);
+ Elems.push_back(ElemTy);
+ Size += 64;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ // Check if Ty is a usable substitute for the coercion type.
+ bool isUsableType(llvm::StructType *Ty) const {
+ return llvm::makeArrayRef(Elems) == Ty->elements();
+ }
+
+ // Get the coercion type as a literal struct type.
+ llvm::Type *getType() const {
+ if (Elems.size() == 1)
+ return Elems.front();
+ else
+ return llvm::StructType::get(Context, Elems);
+ }
+ };
+};
+} // end anonymous namespace
+
+ABIArgInfo
+SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
+ if (Ty->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+
+ // Anything too big to fit in registers is passed with an explicit indirect
+ // pointer / sret pointer.
+ if (Size > SizeLimit)
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // Integer types smaller than a register are extended.
+ if (Size < 64 && Ty->isIntegerType())
+ return ABIArgInfo::getExtend();
+
+ // Other non-aggregates go in registers.
+ if (!isAggregateTypeForABI(Ty))
+ return ABIArgInfo::getDirect();
+
+ // If a C++ object has either a non-trivial copy constructor or a non-trivial
+ // destructor, it is passed with an explicit indirect pointer / sret pointer.
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+
+ // This is a small aggregate type that should be passed in registers.
+ // Build a coercion type from the LLVM struct type.
+ llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
+ if (!StrTy)
+ return ABIArgInfo::getDirect();
+
+ CoerceBuilder CB(getVMContext(), getDataLayout());
+ CB.addStruct(0, StrTy);
+ CB.pad(llvm::RoundUpToAlignment(CB.DL.getTypeSizeInBits(StrTy), 64));
+
+ // Try to use the original type for coercion.
+ llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
+
+ if (CB.InReg)
+ return ABIArgInfo::getDirectInReg(CoerceTy);
+ else
+ return ABIArgInfo::getDirect(CoerceTy);
+}
+
+Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ ABIArgInfo AI = classifyType(Ty, 16 * 8);
+ llvm::Type *ArgTy = CGT.ConvertType(Ty);
+ if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
+ AI.setCoerceToType(ArgTy);
+
+ CharUnits SlotSize = CharUnits::fromQuantity(8);
+
+ CGBuilderTy &Builder = CGF.Builder;
+ Address Addr(Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
+ llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
+
+ auto TypeInfo = getContext().getTypeInfoInChars(Ty);
+
+ Address ArgAddr = Address::invalid();
+ CharUnits Stride;
+ switch (AI.getKind()) {
+ case ABIArgInfo::Expand:
+ case ABIArgInfo::InAlloca:
+ llvm_unreachable("Unsupported ABI kind for va_arg");
+
+ case ABIArgInfo::Extend: {
+ Stride = SlotSize;
+ CharUnits Offset = SlotSize - TypeInfo.first;
+ ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend");
+ break;
+ }
+
+ case ABIArgInfo::Direct: {
+ auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
+ Stride = CharUnits::fromQuantity(AllocSize).RoundUpToAlignment(SlotSize);
+ ArgAddr = Addr;
+ break;
+ }
+
+ case ABIArgInfo::Indirect:
+ Stride = SlotSize;
+ ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect");
+ ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"),
+ TypeInfo.second);
+ break;
+
+ case ABIArgInfo::Ignore:
+ return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.second);
+ }
+
+ // Update VAList.
+ llvm::Value *NextPtr =
+ Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), Stride, "ap.next");
+ Builder.CreateStore(NextPtr, VAListAddr);
+
+ return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr");
+}
+
+void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
+ for (auto &I : FI.arguments())
+ I.info = classifyType(I.type, 16 * 8);
+}
+
+namespace {
+class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
+ return 14;
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override;
+};
+} // end anonymous namespace
+
+bool
+SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ // This is calculated from the LLVM and GCC tables and verified
+ // against gcc output. AFAIK all ABIs use the same encoding.
+
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+
+ llvm::IntegerType *i8 = CGF.Int8Ty;
+ llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+ llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
+
+ // 0-31: the 8-byte general-purpose registers
+ AssignToArrayRange(Builder, Address, Eight8, 0, 31);
+
+ // 32-63: f0-31, the 4-byte floating-point registers
+ AssignToArrayRange(Builder, Address, Four8, 32, 63);
+
+ // Y = 64
+ // PSR = 65
+ // WIM = 66
+ // TBR = 67
+ // PC = 68
+ // NPC = 69
+ // FSR = 70
+ // CSR = 71
+ AssignToArrayRange(Builder, Address, Eight8, 64, 71);
+
+ // 72-87: d0-15, the 8-byte floating-point registers
+ AssignToArrayRange(Builder, Address, Eight8, 72, 87);
+
+ return false;
+}
+
+
+//===----------------------------------------------------------------------===//
+// XCore ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+/// A SmallStringEnc instance is used to build up the TypeString by passing
+/// it by reference between functions that append to it.
+typedef llvm::SmallString<128> SmallStringEnc;
+
+/// TypeStringCache caches the meta encodings of Types.
+///
+/// The reason for caching TypeStrings is two fold:
+/// 1. To cache a type's encoding for later uses;
+/// 2. As a means to break recursive member type inclusion.
+///
+/// A cache Entry can have a Status of:
+/// NonRecursive: The type encoding is not recursive;
+/// Recursive: The type encoding is recursive;
+/// Incomplete: An incomplete TypeString;
+/// IncompleteUsed: An incomplete TypeString that has been used in a
+/// Recursive type encoding.
+///
+/// A NonRecursive entry will have all of its sub-members expanded as fully
+/// as possible. Whilst it may contain types which are recursive, the type
+/// itself is not recursive and thus its encoding may be safely used whenever
+/// the type is encountered.
+///
+/// A Recursive entry will have all of its sub-members expanded as fully as
+/// possible. The type itself is recursive and it may contain other types which
+/// are recursive. The Recursive encoding must not be used during the expansion
+/// of a recursive type's recursive branch. For simplicity the code uses
+/// IncompleteCount to reject all usage of Recursive encodings for member types.
+///
+/// An Incomplete entry is always a RecordType and only encodes its
+/// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and
+/// are placed into the cache during type expansion as a means to identify and
+/// handle recursive inclusion of types as sub-members. If there is recursion
+/// the entry becomes IncompleteUsed.
+///
+/// During the expansion of a RecordType's members:
+///
+/// If the cache contains a NonRecursive encoding for the member type, the
+/// cached encoding is used;
+///
+/// If the cache contains a Recursive encoding for the member type, the
+/// cached encoding is 'Swapped' out, as it may be incorrect, and...
+///
+/// If the member is a RecordType, an Incomplete encoding is placed into the
+/// cache to break potential recursive inclusion of itself as a sub-member;
+///
+/// Once a member RecordType has been expanded, its temporary incomplete
+/// entry is removed from the cache. If a Recursive encoding was swapped out
+/// it is swapped back in;
+///
+/// If an incomplete entry is used to expand a sub-member, the incomplete
+/// entry is marked as IncompleteUsed. The cache keeps count of how many
+/// IncompleteUsed entries it currently contains in IncompleteUsedCount;
+///
+/// If a member's encoding is found to be a NonRecursive or Recursive viz:
+/// IncompleteUsedCount==0, the member's encoding is added to the cache.
+/// Else the member is part of a recursive type and thus the recursion has
+/// been exited too soon for the encoding to be correct for the member.
+///
+class TypeStringCache {
+ enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
+ struct Entry {
+ std::string Str; // The encoded TypeString for the type.
+ enum Status State; // Information about the encoding in 'Str'.
+ std::string Swapped; // A temporary place holder for a Recursive encoding
+ // during the expansion of RecordType's members.
+ };
+ std::map<const IdentifierInfo *, struct Entry> Map;
+ unsigned IncompleteCount; // Number of Incomplete entries in the Map.
+ unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map.
+public:
+ TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
+ void addIncomplete(const IdentifierInfo *ID, std::string StubEnc);
+ bool removeIncomplete(const IdentifierInfo *ID);
+ void addIfComplete(const IdentifierInfo *ID, StringRef Str,
+ bool IsRecursive);
+ StringRef lookupStr(const IdentifierInfo *ID);
+};
+
+/// TypeString encodings for enum & union fields must be order.
+/// FieldEncoding is a helper for this ordering process.
+class FieldEncoding {
+ bool HasName;
+ std::string Enc;
+public:
+ FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
+ StringRef str() {return Enc.c_str();}
+ bool operator<(const FieldEncoding &rhs) const {
+ if (HasName != rhs.HasName) return HasName;
+ return Enc < rhs.Enc;
+ }
+};
+
+class XCoreABIInfo : public DefaultABIInfo {
+public:
+ XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+};
+
+class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
+ mutable TypeStringCache TSC;
+public:
+ XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
+ :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {}
+ void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const override;
+};
+
+} // End anonymous namespace.
+
+Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ CGBuilderTy &Builder = CGF.Builder;
+
+ // Get the VAList.
+ CharUnits SlotSize = CharUnits::fromQuantity(4);
+ Address AP(Builder.CreateLoad(VAListAddr), SlotSize);
+
+ // Handle the argument.
+ ABIArgInfo AI = classifyArgumentType(Ty);
+ CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty);
+ llvm::Type *ArgTy = CGT.ConvertType(Ty);
+ if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
+ AI.setCoerceToType(ArgTy);
+ llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
+
+ Address Val = Address::invalid();
+ CharUnits ArgSize = CharUnits::Zero();
+ switch (AI.getKind()) {
+ case ABIArgInfo::Expand:
+ case ABIArgInfo::InAlloca:
+ llvm_unreachable("Unsupported ABI kind for va_arg");
+ case ABIArgInfo::Ignore:
+ Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
+ ArgSize = CharUnits::Zero();
+ break;
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct:
+ Val = Builder.CreateBitCast(AP, ArgPtrTy);
+ ArgSize = CharUnits::fromQuantity(
+ getDataLayout().getTypeAllocSize(AI.getCoerceToType()));
+ ArgSize = ArgSize.RoundUpToAlignment(SlotSize);
+ break;
+ case ABIArgInfo::Indirect:
+ Val = Builder.CreateElementBitCast(AP, ArgPtrTy);
+ Val = Address(Builder.CreateLoad(Val), TypeAlign);
+ ArgSize = SlotSize;
+ break;
+ }
+
+ // Increment the VAList.
+ if (!ArgSize.isZero()) {
+ llvm::Value *APN =
+ Builder.CreateConstInBoundsByteGEP(AP.getPointer(), ArgSize);
+ Builder.CreateStore(APN, VAListAddr);
+ }
+
+ return Val;
+}
+
+/// During the expansion of a RecordType, an incomplete TypeString is placed
+/// into the cache as a means to identify and break recursion.
+/// If there is a Recursive encoding in the cache, it is swapped out and will
+/// be reinserted by removeIncomplete().
+/// All other types of encoding should have been used rather than arriving here.
+void TypeStringCache::addIncomplete(const IdentifierInfo *ID,
+ std::string StubEnc) {
+ if (!ID)
+ return;
+ Entry &E = Map[ID];
+ assert( (E.Str.empty() || E.State == Recursive) &&
+ "Incorrectly use of addIncomplete");
+ assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()");
+ E.Swapped.swap(E.Str); // swap out the Recursive
+ E.Str.swap(StubEnc);
+ E.State = Incomplete;
+ ++IncompleteCount;
+}
+
+/// Once the RecordType has been expanded, the temporary incomplete TypeString
+/// must be removed from the cache.
+/// If a Recursive was swapped out by addIncomplete(), it will be replaced.
+/// Returns true if the RecordType was defined recursively.
+bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) {
+ if (!ID)
+ return false;
+ auto I = Map.find(ID);
+ assert(I != Map.end() && "Entry not present");
+ Entry &E = I->second;
+ assert( (E.State == Incomplete ||
+ E.State == IncompleteUsed) &&
+ "Entry must be an incomplete type");
+ bool IsRecursive = false;
+ if (E.State == IncompleteUsed) {
+ // We made use of our Incomplete encoding, thus we are recursive.
+ IsRecursive = true;
+ --IncompleteUsedCount;
+ }
+ if (E.Swapped.empty())
+ Map.erase(I);
+ else {
+ // Swap the Recursive back.
+ E.Swapped.swap(E.Str);
+ E.Swapped.clear();
+ E.State = Recursive;
+ }
+ --IncompleteCount;
+ return IsRecursive;
+}
+
+/// Add the encoded TypeString to the cache only if it is NonRecursive or
+/// Recursive (viz: all sub-members were expanded as fully as possible).
+void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str,
+ bool IsRecursive) {
+ if (!ID || IncompleteUsedCount)
+ return; // No key or it is is an incomplete sub-type so don't add.
+ Entry &E = Map[ID];
+ if (IsRecursive && !E.Str.empty()) {
+ assert(E.State==Recursive && E.Str.size() == Str.size() &&
+ "This is not the same Recursive entry");
+ // The parent container was not recursive after all, so we could have used
+ // this Recursive sub-member entry after all, but we assumed the worse when
+ // we started viz: IncompleteCount!=0.
+ return;
+ }
+ assert(E.Str.empty() && "Entry already present");
+ E.Str = Str.str();
+ E.State = IsRecursive? Recursive : NonRecursive;
+}
+
+/// Return a cached TypeString encoding for the ID. If there isn't one, or we
+/// are recursively expanding a type (IncompleteCount != 0) and the cached
+/// encoding is Recursive, return an empty StringRef.
+StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
+ if (!ID)
+ return StringRef(); // We have no key.
+ auto I = Map.find(ID);
+ if (I == Map.end())
+ return StringRef(); // We have no encoding.
+ Entry &E = I->second;
+ if (E.State == Recursive && IncompleteCount)
+ return StringRef(); // We don't use Recursive encodings for member types.
+
+ if (E.State == Incomplete) {
+ // The incomplete type is being used to break out of recursion.
+ E.State = IncompleteUsed;
+ ++IncompleteUsedCount;
+ }
+ return E.Str.c_str();
+}
+
+/// The XCore ABI includes a type information section that communicates symbol
+/// type information to the linker. The linker uses this information to verify
+/// safety/correctness of things such as array bound and pointers et al.
+/// The ABI only requires C (and XC) language modules to emit TypeStrings.
+/// This type information (TypeString) is emitted into meta data for all global
+/// symbols: definitions, declarations, functions & variables.
+///
+/// The TypeString carries type, qualifier, name, size & value details.
+/// Please see 'Tools Development Guide' section 2.16.2 for format details:
+/// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf
+/// The output is tested by test/CodeGen/xcore-stringtype.c.
+///
+static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
+ CodeGen::CodeGenModule &CGM, TypeStringCache &TSC);
+
+/// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
+void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const {
+ SmallStringEnc Enc;
+ if (getTypeString(Enc, D, CGM, TSC)) {
+ llvm::LLVMContext &Ctx = CGM.getModule().getContext();
+ llvm::SmallVector<llvm::Metadata *, 2> MDVals;
+ MDVals.push_back(llvm::ConstantAsMetadata::get(GV));
+ MDVals.push_back(llvm::MDString::get(Ctx, Enc.str()));
+ llvm::NamedMDNode *MD =
+ CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings");
+ MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
+ }
+}
+
+static bool appendType(SmallStringEnc &Enc, QualType QType,
+ const CodeGen::CodeGenModule &CGM,
+ TypeStringCache &TSC);
+
+/// Helper function for appendRecordType().
+/// Builds a SmallVector containing the encoded field types in declaration
+/// order.
+static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
+ const RecordDecl *RD,
+ const CodeGen::CodeGenModule &CGM,
+ TypeStringCache &TSC) {
+ for (const auto *Field : RD->fields()) {
+ SmallStringEnc Enc;
+ Enc += "m(";
+ Enc += Field->getName();
+ Enc += "){";
+ if (Field->isBitField()) {
+ Enc += "b(";
+ llvm::raw_svector_ostream OS(Enc);
+ OS << Field->getBitWidthValue(CGM.getContext());
+ Enc += ':';
+ }
+ if (!appendType(Enc, Field->getType(), CGM, TSC))
+ return false;
+ if (Field->isBitField())
+ Enc += ')';
+ Enc += '}';
+ FE.emplace_back(!Field->getName().empty(), Enc);
+ }
+ return true;
+}
+
+/// Appends structure and union types to Enc and adds encoding to cache.
+/// Recursively calls appendType (via extractFieldType) for each field.
+/// Union types have their fields ordered according to the ABI.
+static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
+ const CodeGen::CodeGenModule &CGM,
+ TypeStringCache &TSC, const IdentifierInfo *ID) {
+ // Append the cached TypeString if we have one.
+ StringRef TypeString = TSC.lookupStr(ID);
+ if (!TypeString.empty()) {
+ Enc += TypeString;
+ return true;
+ }
+
+ // Start to emit an incomplete TypeString.
+ size_t Start = Enc.size();
+ Enc += (RT->isUnionType()? 'u' : 's');
+ Enc += '(';
+ if (ID)
+ Enc += ID->getName();
+ Enc += "){";
+
+ // We collect all encoded fields and order as necessary.
+ bool IsRecursive = false;
+ const RecordDecl *RD = RT->getDecl()->getDefinition();
+ if (RD && !RD->field_empty()) {
+ // An incomplete TypeString stub is placed in the cache for this RecordType
+ // so that recursive calls to this RecordType will use it whilst building a
+ // complete TypeString for this RecordType.
+ SmallVector<FieldEncoding, 16> FE;
+ std::string StubEnc(Enc.substr(Start).str());
+ StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString.
+ TSC.addIncomplete(ID, std::move(StubEnc));
+ if (!extractFieldType(FE, RD, CGM, TSC)) {
+ (void) TSC.removeIncomplete(ID);
+ return false;
+ }
+ IsRecursive = TSC.removeIncomplete(ID);
+ // The ABI requires unions to be sorted but not structures.
+ // See FieldEncoding::operator< for sort algorithm.
+ if (RT->isUnionType())
+ std::sort(FE.begin(), FE.end());
+ // We can now complete the TypeString.
+ unsigned E = FE.size();
+ for (unsigned I = 0; I != E; ++I) {
+ if (I)
+ Enc += ',';
+ Enc += FE[I].str();
+ }
+ }
+ Enc += '}';
+ TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
+ return true;
+}
+
+/// Appends enum types to Enc and adds the encoding to the cache.
+static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
+ TypeStringCache &TSC,
+ const IdentifierInfo *ID) {
+ // Append the cached TypeString if we have one.
+ StringRef TypeString = TSC.lookupStr(ID);
+ if (!TypeString.empty()) {
+ Enc += TypeString;
+ return true;
+ }
+
+ size_t Start = Enc.size();
+ Enc += "e(";
+ if (ID)
+ Enc += ID->getName();
+ Enc += "){";
+
+ // We collect all encoded enumerations and order them alphanumerically.
+ if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
+ SmallVector<FieldEncoding, 16> FE;
+ for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
+ ++I) {
+ SmallStringEnc EnumEnc;
+ EnumEnc += "m(";
+ EnumEnc += I->getName();
+ EnumEnc += "){";
+ I->getInitVal().toString(EnumEnc);
+ EnumEnc += '}';
+ FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
+ }
+ std::sort(FE.begin(), FE.end());
+ unsigned E = FE.size();
+ for (unsigned I = 0; I != E; ++I) {
+ if (I)
+ Enc += ',';
+ Enc += FE[I].str();
+ }
+ }
+ Enc += '}';
+ TSC.addIfComplete(ID, Enc.substr(Start), false);
+ return true;
+}
+
+/// Appends type's qualifier to Enc.
+/// This is done prior to appending the type's encoding.
+static void appendQualifier(SmallStringEnc &Enc, QualType QT) {
+ // Qualifiers are emitted in alphabetical order.
+ static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"};
+ int Lookup = 0;
+ if (QT.isConstQualified())
+ Lookup += 1<<0;
+ if (QT.isRestrictQualified())
+ Lookup += 1<<1;
+ if (QT.isVolatileQualified())
+ Lookup += 1<<2;
+ Enc += Table[Lookup];
+}
+
+/// Appends built-in types to Enc.
+static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) {
+ const char *EncType;
+ switch (BT->getKind()) {
+ case BuiltinType::Void:
+ EncType = "0";
+ break;
+ case BuiltinType::Bool:
+ EncType = "b";
+ break;
+ case BuiltinType::Char_U:
+ EncType = "uc";
+ break;
+ case BuiltinType::UChar:
+ EncType = "uc";
+ break;
+ case BuiltinType::SChar:
+ EncType = "sc";
+ break;
+ case BuiltinType::UShort:
+ EncType = "us";
+ break;
+ case BuiltinType::Short:
+ EncType = "ss";
+ break;
+ case BuiltinType::UInt:
+ EncType = "ui";
+ break;
+ case BuiltinType::Int:
+ EncType = "si";
+ break;
+ case BuiltinType::ULong:
+ EncType = "ul";
+ break;
+ case BuiltinType::Long:
+ EncType = "sl";
+ break;
+ case BuiltinType::ULongLong:
+ EncType = "ull";
+ break;
+ case BuiltinType::LongLong:
+ EncType = "sll";
+ break;
+ case BuiltinType::Float:
+ EncType = "ft";
+ break;
+ case BuiltinType::Double:
+ EncType = "d";
+ break;
+ case BuiltinType::LongDouble:
+ EncType = "ld";
+ break;
+ default:
+ return false;
+ }
+ Enc += EncType;
+ return true;
+}
+
+/// Appends a pointer encoding to Enc before calling appendType for the pointee.
+static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT,
+ const CodeGen::CodeGenModule &CGM,
+ TypeStringCache &TSC) {
+ Enc += "p(";
+ if (!appendType(Enc, PT->getPointeeType(), CGM, TSC))
+ return false;
+ Enc += ')';
+ return true;
+}
+
+/// Appends array encoding to Enc before calling appendType for the element.
+static bool appendArrayType(SmallStringEnc &Enc, QualType QT,
+ const ArrayType *AT,
+ const CodeGen::CodeGenModule &CGM,
+ TypeStringCache &TSC, StringRef NoSizeEnc) {
+ if (AT->getSizeModifier() != ArrayType::Normal)
+ return false;
+ Enc += "a(";
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
+ CAT->getSize().toStringUnsigned(Enc);
+ else
+ Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "".
+ Enc += ':';
+ // The Qualifiers should be attached to the type rather than the array.
+ appendQualifier(Enc, QT);
+ if (!appendType(Enc, AT->getElementType(), CGM, TSC))
+ return false;
+ Enc += ')';
+ return true;
+}
+
+/// Appends a function encoding to Enc, calling appendType for the return type
+/// and the arguments.
+static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT,
+ const CodeGen::CodeGenModule &CGM,
+ TypeStringCache &TSC) {
+ Enc += "f{";
+ if (!appendType(Enc, FT->getReturnType(), CGM, TSC))
+ return false;
+ Enc += "}(";
+ if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) {
+ // N.B. we are only interested in the adjusted param types.
+ auto I = FPT->param_type_begin();
+ auto E = FPT->param_type_end();
+ if (I != E) {
+ do {
+ if (!appendType(Enc, *I, CGM, TSC))
+ return false;
+ ++I;
+ if (I != E)
+ Enc += ',';
+ } while (I != E);
+ if (FPT->isVariadic())
+ Enc += ",va";
+ } else {
+ if (FPT->isVariadic())
+ Enc += "va";
+ else
+ Enc += '0';
+ }
+ }
+ Enc += ')';
+ return true;
+}
+
+/// Handles the type's qualifier before dispatching a call to handle specific
+/// type encodings.
+static bool appendType(SmallStringEnc &Enc, QualType QType,
+ const CodeGen::CodeGenModule &CGM,
+ TypeStringCache &TSC) {
+
+ QualType QT = QType.getCanonicalType();
+
+ if (const ArrayType *AT = QT->getAsArrayTypeUnsafe())
+ // The Qualifiers should be attached to the type rather than the array.
+ // Thus we don't call appendQualifier() here.
+ return appendArrayType(Enc, QT, AT, CGM, TSC, "");
+
+ appendQualifier(Enc, QT);
+
+ if (const BuiltinType *BT = QT->getAs<BuiltinType>())
+ return appendBuiltinType(Enc, BT);
+
+ if (const PointerType *PT = QT->getAs<PointerType>())
+ return appendPointerType(Enc, PT, CGM, TSC);
+
+ if (const EnumType *ET = QT->getAs<EnumType>())
+ return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier());
+
+ if (const RecordType *RT = QT->getAsStructureType())
+ return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
+
+ if (const RecordType *RT = QT->getAsUnionType())
+ return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
+
+ if (const FunctionType *FT = QT->getAs<FunctionType>())
+ return appendFunctionType(Enc, FT, CGM, TSC);
+
+ return false;
+}
+
+static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
+ CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) {
+ if (!D)
+ return false;
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->getLanguageLinkage() != CLanguageLinkage)
+ return false;
+ return appendType(Enc, FD->getType(), CGM, TSC);
+ }
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (VD->getLanguageLinkage() != CLanguageLinkage)
+ return false;
+ QualType QT = VD->getType().getCanonicalType();
+ if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) {
+ // Global ArrayTypes are given a size of '*' if the size is unknown.
+ // The Qualifiers should be attached to the type rather than the array.
+ // Thus we don't call appendQualifier() here.
+ return appendArrayType(Enc, QT, AT, CGM, TSC, "*");
+ }
+ return appendType(Enc, QT, CGM, TSC);
+ }
+ return false;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Driver code
+//===----------------------------------------------------------------------===//
+
+const llvm::Triple &CodeGenModule::getTriple() const {
+ return getTarget().getTriple();
+}
+
+bool CodeGenModule::supportsCOMDAT() const {
+ return !getTriple().isOSBinFormatMachO();
+}
+
+const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
+ if (TheTargetCodeGenInfo)
+ return *TheTargetCodeGenInfo;
+
+ const llvm::Triple &Triple = getTarget().getTriple();
+ switch (Triple.getArch()) {
+ default:
+ return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types));
+
+ case llvm::Triple::le32:
+ return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types));
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ if (Triple.getOS() == llvm::Triple::NaCl)
+ return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types));
+ return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true));
+
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false));
+
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_be: {
+ AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
+ if (getTarget().getABI() == "darwinpcs")
+ Kind = AArch64ABIInfo::DarwinPCS;
+
+ return *(TheTargetCodeGenInfo = new AArch64TargetCodeGenInfo(Types, Kind));
+ }
+
+ case llvm::Triple::wasm32:
+ case llvm::Triple::wasm64:
+ return *(TheTargetCodeGenInfo = new WebAssemblyTargetCodeGenInfo(Types));
+
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ {
+ if (Triple.getOS() == llvm::Triple::Win32) {
+ TheTargetCodeGenInfo =
+ new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP);
+ return *TheTargetCodeGenInfo;
+ }
+
+ ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
+ StringRef ABIStr = getTarget().getABI();
+ if (ABIStr == "apcs-gnu")
+ Kind = ARMABIInfo::APCS;
+ else if (ABIStr == "aapcs16")
+ Kind = ARMABIInfo::AAPCS16_VFP;
+ else if (CodeGenOpts.FloatABI == "hard" ||
+ (CodeGenOpts.FloatABI != "soft" &&
+ Triple.getEnvironment() == llvm::Triple::GNUEABIHF))
+ Kind = ARMABIInfo::AAPCS_VFP;
+
+ return *(TheTargetCodeGenInfo = new ARMTargetCodeGenInfo(Types, Kind));
+ }
+
+ case llvm::Triple::ppc:
+ return *(TheTargetCodeGenInfo =
+ new PPC32TargetCodeGenInfo(Types, CodeGenOpts.FloatABI == "soft"));
+ case llvm::Triple::ppc64:
+ if (Triple.isOSBinFormatELF()) {
+ PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
+ if (getTarget().getABI() == "elfv2")
+ Kind = PPC64_SVR4_ABIInfo::ELFv2;
+ bool HasQPX = getTarget().getABI() == "elfv1-qpx";
+
+ return *(TheTargetCodeGenInfo =
+ new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX));
+ } else
+ return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types));
+ case llvm::Triple::ppc64le: {
+ assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
+ PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
+ if (getTarget().getABI() == "elfv1" || getTarget().getABI() == "elfv1-qpx")
+ Kind = PPC64_SVR4_ABIInfo::ELFv1;
+ bool HasQPX = getTarget().getABI() == "elfv1-qpx";
+
+ return *(TheTargetCodeGenInfo =
+ new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX));
+ }
+
+ case llvm::Triple::nvptx:
+ case llvm::Triple::nvptx64:
+ return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types));
+
+ case llvm::Triple::msp430:
+ return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
+
+ case llvm::Triple::systemz: {
+ bool HasVector = getTarget().getABI() == "vector";
+ return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types,
+ HasVector));
+ }
+
+ case llvm::Triple::tce:
+ return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types));
+
+ case llvm::Triple::x86: {
+ bool IsDarwinVectorABI = Triple.isOSDarwin();
+ bool RetSmallStructInRegABI =
+ X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
+ bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
+
+ if (Triple.getOS() == llvm::Triple::Win32) {
+ return *(TheTargetCodeGenInfo = new WinX86_32TargetCodeGenInfo(
+ Types, IsDarwinVectorABI, RetSmallStructInRegABI,
+ IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
+ } else {
+ return *(TheTargetCodeGenInfo = new X86_32TargetCodeGenInfo(
+ Types, IsDarwinVectorABI, RetSmallStructInRegABI,
+ IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters,
+ CodeGenOpts.FloatABI == "soft"));
+ }
+ }
+
+ case llvm::Triple::x86_64: {
+ StringRef ABI = getTarget().getABI();
+ X86AVXABILevel AVXLevel = (ABI == "avx512" ? X86AVXABILevel::AVX512 :
+ ABI == "avx" ? X86AVXABILevel::AVX :
+ X86AVXABILevel::None);
+
+ switch (Triple.getOS()) {
+ case llvm::Triple::Win32:
+ return *(TheTargetCodeGenInfo =
+ new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
+ case llvm::Triple::PS4:
+ return *(TheTargetCodeGenInfo =
+ new PS4TargetCodeGenInfo(Types, AVXLevel));
+ default:
+ return *(TheTargetCodeGenInfo =
+ new X86_64TargetCodeGenInfo(Types, AVXLevel));
+ }
+ }
+ case llvm::Triple::hexagon:
+ return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types));
+ case llvm::Triple::r600:
+ return *(TheTargetCodeGenInfo = new AMDGPUTargetCodeGenInfo(Types));
+ case llvm::Triple::amdgcn:
+ return *(TheTargetCodeGenInfo = new AMDGPUTargetCodeGenInfo(Types));
+ case llvm::Triple::sparcv9:
+ return *(TheTargetCodeGenInfo = new SparcV9TargetCodeGenInfo(Types));
+ case llvm::Triple::xcore:
+ return *(TheTargetCodeGenInfo = new XCoreTargetCodeGenInfo(Types));
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.h b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.h
new file mode 100644
index 0000000..87b4704
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.h
@@ -0,0 +1,224 @@
+//===---- TargetInfo.h - Encapsulate target details -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_TARGETINFO_H
+#define LLVM_CLANG_LIB_CODEGEN_TARGETINFO_H
+
+#include "CGValue.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+class Constant;
+class GlobalValue;
+class Type;
+class Value;
+}
+
+namespace clang {
+class ABIInfo;
+class Decl;
+
+namespace CodeGen {
+class CallArgList;
+class CodeGenModule;
+class CodeGenFunction;
+class CGFunctionInfo;
+}
+
+/// TargetCodeGenInfo - This class organizes various target-specific
+/// codegeneration issues, like target-specific attributes, builtins and so
+/// on.
+class TargetCodeGenInfo {
+ ABIInfo *Info;
+
+public:
+ // WARNING: Acquires the ownership of ABIInfo.
+ TargetCodeGenInfo(ABIInfo *info = nullptr) : Info(info) {}
+ virtual ~TargetCodeGenInfo();
+
+ /// getABIInfo() - Returns ABI info helper for the target.
+ const ABIInfo &getABIInfo() const { return *Info; }
+
+ /// setTargetAttributes - Provides a convenient hook to handle extra
+ /// target-specific attributes for the given global.
+ virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const {}
+
+ /// emitTargetMD - Provides a convenient hook to handle extra
+ /// target-specific metadata for the given global.
+ virtual void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const {}
+
+ /// Determines the size of struct _Unwind_Exception on this platform,
+ /// in 8-bit units. The Itanium ABI defines this as:
+ /// struct _Unwind_Exception {
+ /// uint64 exception_class;
+ /// _Unwind_Exception_Cleanup_Fn exception_cleanup;
+ /// uint64 private_1;
+ /// uint64 private_2;
+ /// };
+ virtual unsigned getSizeOfUnwindException() const;
+
+ /// Controls whether __builtin_extend_pointer should sign-extend
+ /// pointers to uint64_t or zero-extend them (the default). Has
+ /// no effect for targets:
+ /// - that have 64-bit pointers, or
+ /// - that cannot address through registers larger than pointers, or
+ /// - that implicitly ignore/truncate the top bits when addressing
+ /// through such registers.
+ virtual bool extendPointerWithSExt() const { return false; }
+
+ /// Determines the DWARF register number for the stack pointer, for
+ /// exception-handling purposes. Implements __builtin_dwarf_sp_column.
+ ///
+ /// Returns -1 if the operation is unsupported by this target.
+ virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
+ return -1;
+ }
+
+ /// Initializes the given DWARF EH register-size table, a char*.
+ /// Implements __builtin_init_dwarf_reg_size_table.
+ ///
+ /// Returns true if the operation is unsupported by this target.
+ virtual bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ return true;
+ }
+
+ /// Performs the code-generation required to convert a return
+ /// address as stored by the system into the actual address of the
+ /// next instruction that will be executed.
+ ///
+ /// Used by __builtin_extract_return_addr().
+ virtual llvm::Value *decodeReturnAddress(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ return Address;
+ }
+
+ /// Performs the code-generation required to convert the address
+ /// of an instruction into a return address suitable for storage
+ /// by the system in a return slot.
+ ///
+ /// Used by __builtin_frob_return_addr().
+ virtual llvm::Value *encodeReturnAddress(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ return Address;
+ }
+
+ /// Corrects the low-level LLVM type for a given constraint and "usual"
+ /// type.
+ ///
+ /// \returns A pointer to a new LLVM type, possibly the same as the original
+ /// on success; 0 on failure.
+ virtual llvm::Type *adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ StringRef Constraint,
+ llvm::Type *Ty) const {
+ return Ty;
+ }
+
+ /// Adds constraints and types for result registers.
+ virtual void addReturnRegisterOutputs(
+ CodeGen::CodeGenFunction &CGF, CodeGen::LValue ReturnValue,
+ std::string &Constraints, std::vector<llvm::Type *> &ResultRegTypes,
+ std::vector<llvm::Type *> &ResultTruncRegTypes,
+ std::vector<CodeGen::LValue> &ResultRegDests, std::string &AsmString,
+ unsigned NumOutputs) const {}
+
+ /// doesReturnSlotInterfereWithArgs - Return true if the target uses an
+ /// argument slot for an 'sret' type.
+ virtual bool doesReturnSlotInterfereWithArgs() const { return true; }
+
+ /// Retrieve the address of a function to call immediately before
+ /// calling objc_retainAutoreleasedReturnValue. The
+ /// implementation of objc_autoreleaseReturnValue sniffs the
+ /// instruction stream following its return address to decide
+ /// whether it's a call to objc_retainAutoreleasedReturnValue.
+ /// This can be prohibitively expensive, depending on the
+ /// relocation model, and so on some targets it instead sniffs for
+ /// a particular instruction sequence. This functions returns
+ /// that instruction sequence in inline assembly, which will be
+ /// empty if none is required.
+ virtual StringRef getARCRetainAutoreleasedReturnValueMarker() const {
+ return "";
+ }
+
+ /// Return a constant used by UBSan as a signature to identify functions
+ /// possessing type information, or 0 if the platform is unsupported.
+ virtual llvm::Constant *
+ getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const {
+ return nullptr;
+ }
+
+ /// Determine whether a call to an unprototyped functions under
+ /// the given calling convention should use the variadic
+ /// convention or the non-variadic convention.
+ ///
+ /// There's a good reason to make a platform's variadic calling
+ /// convention be different from its non-variadic calling
+ /// convention: the non-variadic arguments can be passed in
+ /// registers (better for performance), and the variadic arguments
+ /// can be passed on the stack (also better for performance). If
+ /// this is done, however, unprototyped functions *must* use the
+ /// non-variadic convention, because C99 states that a call
+ /// through an unprototyped function type must succeed if the
+ /// function was defined with a non-variadic prototype with
+ /// compatible parameters. Therefore, splitting the conventions
+ /// makes it impossible to call a variadic function through an
+ /// unprototyped type. Since function prototypes came out in the
+ /// late 1970s, this is probably an acceptable trade-off.
+ /// Nonetheless, not all platforms are willing to make it, and in
+ /// particularly x86-64 bends over backwards to make the
+ /// conventions compatible.
+ ///
+ /// The default is false. This is correct whenever:
+ /// - the conventions are exactly the same, because it does not
+ /// matter and the resulting IR will be somewhat prettier in
+ /// certain cases; or
+ /// - the conventions are substantively different in how they pass
+ /// arguments, because in this case using the variadic convention
+ /// will lead to C99 violations.
+ ///
+ /// However, some platforms make the conventions identical except
+ /// for passing additional out-of-band information to a variadic
+ /// function: for example, x86-64 passes the number of SSE
+ /// arguments in %al. On these platforms, it is desirable to
+ /// call unprototyped functions using the variadic convention so
+ /// that unprototyped calls to varargs functions still succeed.
+ ///
+ /// Relatedly, platforms which pass the fixed arguments to this:
+ /// A foo(B, C, D);
+ /// differently than they would pass them to this:
+ /// A foo(B, C, D, ...);
+ /// may need to adjust the debugger-support code in Sema to do the
+ /// right thing when calling a function with no know signature.
+ virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args,
+ const FunctionNoProtoType *fnType) const;
+
+ /// Gets the linker options necessary to link a dependent library on this
+ /// platform.
+ virtual void getDependentLibraryOption(llvm::StringRef Lib,
+ llvm::SmallString<24> &Opt) const;
+
+ /// Gets the linker options necessary to detect object file mismatches on
+ /// this platform.
+ virtual void getDetectMismatchOption(llvm::StringRef Name,
+ llvm::StringRef Value,
+ llvm::SmallString<32> &Opt) const {}
+};
+} // namespace clang
+
+#endif // LLVM_CLANG_LIB_CODEGEN_TARGETINFO_H
diff --git a/contrib/llvm/tools/clang/lib/Driver/Action.cpp b/contrib/llvm/tools/clang/lib/Driver/Action.cpp
new file mode 100644
index 0000000..49dccd2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/Action.cpp
@@ -0,0 +1,165 @@
+//===--- Action.cpp - Abstract compilation steps --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Action.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+using namespace clang::driver;
+using namespace llvm::opt;
+
+Action::~Action() {
+ if (OwnsInputs) {
+ for (iterator it = begin(), ie = end(); it != ie; ++it)
+ delete *it;
+ }
+}
+
+const char *Action::getClassName(ActionClass AC) {
+ switch (AC) {
+ case InputClass: return "input";
+ case BindArchClass: return "bind-arch";
+ case CudaDeviceClass: return "cuda-device";
+ case CudaHostClass: return "cuda-host";
+ case PreprocessJobClass: return "preprocessor";
+ case PrecompileJobClass: return "precompiler";
+ case AnalyzeJobClass: return "analyzer";
+ case MigrateJobClass: return "migrator";
+ case CompileJobClass: return "compiler";
+ case BackendJobClass: return "backend";
+ case AssembleJobClass: return "assembler";
+ case LinkJobClass: return "linker";
+ case LipoJobClass: return "lipo";
+ case DsymutilJobClass: return "dsymutil";
+ case VerifyDebugInfoJobClass: return "verify-debug-info";
+ case VerifyPCHJobClass: return "verify-pch";
+ }
+
+ llvm_unreachable("invalid class");
+}
+
+void InputAction::anchor() {}
+
+InputAction::InputAction(const Arg &_Input, types::ID _Type)
+ : Action(InputClass, _Type), Input(_Input) {
+}
+
+void BindArchAction::anchor() {}
+
+BindArchAction::BindArchAction(std::unique_ptr<Action> Input,
+ const char *_ArchName)
+ : Action(BindArchClass, std::move(Input)), ArchName(_ArchName) {}
+
+void CudaDeviceAction::anchor() {}
+
+CudaDeviceAction::CudaDeviceAction(std::unique_ptr<Action> Input,
+ const char *ArchName, bool AtTopLevel)
+ : Action(CudaDeviceClass, std::move(Input)), GpuArchName(ArchName),
+ AtTopLevel(AtTopLevel) {}
+
+void CudaHostAction::anchor() {}
+
+CudaHostAction::CudaHostAction(std::unique_ptr<Action> Input,
+ const ActionList &DeviceActions)
+ : Action(CudaHostClass, std::move(Input)), DeviceActions(DeviceActions) {}
+
+CudaHostAction::~CudaHostAction() {
+ for (auto &DA : DeviceActions)
+ delete DA;
+}
+
+void JobAction::anchor() {}
+
+JobAction::JobAction(ActionClass Kind, std::unique_ptr<Action> Input,
+ types::ID Type)
+ : Action(Kind, std::move(Input), Type) {}
+
+JobAction::JobAction(ActionClass Kind, const ActionList &Inputs, types::ID Type)
+ : Action(Kind, Inputs, Type) {
+}
+
+void PreprocessJobAction::anchor() {}
+
+PreprocessJobAction::PreprocessJobAction(std::unique_ptr<Action> Input,
+ types::ID OutputType)
+ : JobAction(PreprocessJobClass, std::move(Input), OutputType) {}
+
+void PrecompileJobAction::anchor() {}
+
+PrecompileJobAction::PrecompileJobAction(std::unique_ptr<Action> Input,
+ types::ID OutputType)
+ : JobAction(PrecompileJobClass, std::move(Input), OutputType) {}
+
+void AnalyzeJobAction::anchor() {}
+
+AnalyzeJobAction::AnalyzeJobAction(std::unique_ptr<Action> Input,
+ types::ID OutputType)
+ : JobAction(AnalyzeJobClass, std::move(Input), OutputType) {}
+
+void MigrateJobAction::anchor() {}
+
+MigrateJobAction::MigrateJobAction(std::unique_ptr<Action> Input,
+ types::ID OutputType)
+ : JobAction(MigrateJobClass, std::move(Input), OutputType) {}
+
+void CompileJobAction::anchor() {}
+
+CompileJobAction::CompileJobAction(std::unique_ptr<Action> Input,
+ types::ID OutputType)
+ : JobAction(CompileJobClass, std::move(Input), OutputType) {}
+
+void BackendJobAction::anchor() {}
+
+BackendJobAction::BackendJobAction(std::unique_ptr<Action> Input,
+ types::ID OutputType)
+ : JobAction(BackendJobClass, std::move(Input), OutputType) {}
+
+void AssembleJobAction::anchor() {}
+
+AssembleJobAction::AssembleJobAction(std::unique_ptr<Action> Input,
+ types::ID OutputType)
+ : JobAction(AssembleJobClass, std::move(Input), OutputType) {}
+
+void LinkJobAction::anchor() {}
+
+LinkJobAction::LinkJobAction(ActionList &Inputs, types::ID Type)
+ : JobAction(LinkJobClass, Inputs, Type) {
+}
+
+void LipoJobAction::anchor() {}
+
+LipoJobAction::LipoJobAction(ActionList &Inputs, types::ID Type)
+ : JobAction(LipoJobClass, Inputs, Type) {
+}
+
+void DsymutilJobAction::anchor() {}
+
+DsymutilJobAction::DsymutilJobAction(ActionList &Inputs, types::ID Type)
+ : JobAction(DsymutilJobClass, Inputs, Type) {
+}
+
+void VerifyJobAction::anchor() {}
+
+VerifyJobAction::VerifyJobAction(ActionClass Kind,
+ std::unique_ptr<Action> Input, types::ID Type)
+ : JobAction(Kind, std::move(Input), Type) {
+ assert((Kind == VerifyDebugInfoJobClass || Kind == VerifyPCHJobClass) &&
+ "ActionClass is not a valid VerifyJobAction");
+}
+
+void VerifyDebugInfoJobAction::anchor() {}
+
+VerifyDebugInfoJobAction::VerifyDebugInfoJobAction(
+ std::unique_ptr<Action> Input, types::ID Type)
+ : VerifyJobAction(VerifyDebugInfoJobClass, std::move(Input), Type) {}
+
+void VerifyPCHJobAction::anchor() {}
+
+VerifyPCHJobAction::VerifyPCHJobAction(std::unique_ptr<Action> Input,
+ types::ID Type)
+ : VerifyJobAction(VerifyPCHJobClass, std::move(Input), Type) {}
diff --git a/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp b/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp
new file mode 100644
index 0000000..e4af2a6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp
@@ -0,0 +1,238 @@
+//===--- Compilation.cpp - Compilation Task Implementation ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Action.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Options.h"
+#include "clang/Driver/ToolChain.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang::driver;
+using namespace clang;
+using namespace llvm::opt;
+
+Compilation::Compilation(const Driver &D, const ToolChain &_DefaultToolChain,
+ InputArgList *_Args, DerivedArgList *_TranslatedArgs)
+ : TheDriver(D), DefaultToolChain(_DefaultToolChain),
+ CudaHostToolChain(&DefaultToolChain), CudaDeviceToolChain(nullptr),
+ Args(_Args), TranslatedArgs(_TranslatedArgs), Redirects(nullptr),
+ ForDiagnostics(false) {}
+
+Compilation::~Compilation() {
+ delete TranslatedArgs;
+ delete Args;
+
+ // Free any derived arg lists.
+ for (llvm::DenseMap<std::pair<const ToolChain*, const char*>,
+ DerivedArgList*>::iterator it = TCArgs.begin(),
+ ie = TCArgs.end(); it != ie; ++it)
+ if (it->second != TranslatedArgs)
+ delete it->second;
+
+ // Free the actions, if built.
+ for (ActionList::iterator it = Actions.begin(), ie = Actions.end();
+ it != ie; ++it)
+ delete *it;
+
+ // Free redirections of stdout/stderr.
+ if (Redirects) {
+ delete Redirects[1];
+ delete Redirects[2];
+ delete [] Redirects;
+ }
+}
+
+const DerivedArgList &Compilation::getArgsForToolChain(const ToolChain *TC,
+ const char *BoundArch) {
+ if (!TC)
+ TC = &DefaultToolChain;
+
+ DerivedArgList *&Entry = TCArgs[std::make_pair(TC, BoundArch)];
+ if (!Entry) {
+ Entry = TC->TranslateArgs(*TranslatedArgs, BoundArch);
+ if (!Entry)
+ Entry = TranslatedArgs;
+ }
+
+ return *Entry;
+}
+
+bool Compilation::CleanupFile(const char *File, bool IssueErrors) const {
+ // FIXME: Why are we trying to remove files that we have not created? For
+ // example we should only try to remove a temporary assembly file if
+ // "clang -cc1" succeed in writing it. Was this a workaround for when
+ // clang was writing directly to a .s file and sometimes leaving it behind
+ // during a failure?
+
+ // FIXME: If this is necessary, we can still try to split
+ // llvm::sys::fs::remove into a removeFile and a removeDir and avoid the
+ // duplicated stat from is_regular_file.
+
+ // Don't try to remove files which we don't have write access to (but may be
+ // able to remove), or non-regular files. Underlying tools may have
+ // intentionally not overwritten them.
+ if (!llvm::sys::fs::can_write(File) || !llvm::sys::fs::is_regular_file(File))
+ return true;
+
+ if (std::error_code EC = llvm::sys::fs::remove(File)) {
+ // Failure is only failure if the file exists and is "regular". We checked
+ // for it being regular before, and llvm::sys::fs::remove ignores ENOENT,
+ // so we don't need to check again.
+
+ if (IssueErrors)
+ getDriver().Diag(clang::diag::err_drv_unable_to_remove_file)
+ << EC.message();
+ return false;
+ }
+ return true;
+}
+
+bool Compilation::CleanupFileList(const ArgStringList &Files,
+ bool IssueErrors) const {
+ bool Success = true;
+ for (ArgStringList::const_iterator
+ it = Files.begin(), ie = Files.end(); it != ie; ++it)
+ Success &= CleanupFile(*it, IssueErrors);
+ return Success;
+}
+
+bool Compilation::CleanupFileMap(const ArgStringMap &Files,
+ const JobAction *JA,
+ bool IssueErrors) const {
+ bool Success = true;
+ for (ArgStringMap::const_iterator
+ it = Files.begin(), ie = Files.end(); it != ie; ++it) {
+
+ // If specified, only delete the files associated with the JobAction.
+ // Otherwise, delete all files in the map.
+ if (JA && it->first != JA)
+ continue;
+ Success &= CleanupFile(it->second, IssueErrors);
+ }
+ return Success;
+}
+
+int Compilation::ExecuteCommand(const Command &C,
+ const Command *&FailingCommand) const {
+ if ((getDriver().CCPrintOptions ||
+ getArgs().hasArg(options::OPT_v)) && !getDriver().CCGenDiagnostics) {
+ raw_ostream *OS = &llvm::errs();
+
+ // Follow gcc implementation of CC_PRINT_OPTIONS; we could also cache the
+ // output stream.
+ if (getDriver().CCPrintOptions && getDriver().CCPrintOptionsFilename) {
+ std::error_code EC;
+ OS = new llvm::raw_fd_ostream(getDriver().CCPrintOptionsFilename, EC,
+ llvm::sys::fs::F_Append |
+ llvm::sys::fs::F_Text);
+ if (EC) {
+ getDriver().Diag(clang::diag::err_drv_cc_print_options_failure)
+ << EC.message();
+ FailingCommand = &C;
+ delete OS;
+ return 1;
+ }
+ }
+
+ if (getDriver().CCPrintOptions)
+ *OS << "[Logging clang options]";
+
+ C.Print(*OS, "\n", /*Quote=*/getDriver().CCPrintOptions);
+
+ if (OS != &llvm::errs())
+ delete OS;
+ }
+
+ std::string Error;
+ bool ExecutionFailed;
+ int Res = C.Execute(Redirects, &Error, &ExecutionFailed);
+ if (!Error.empty()) {
+ assert(Res && "Error string set with 0 result code!");
+ getDriver().Diag(clang::diag::err_drv_command_failure) << Error;
+ }
+
+ if (Res)
+ FailingCommand = &C;
+
+ return ExecutionFailed ? 1 : Res;
+}
+
+typedef SmallVectorImpl< std::pair<int, const Command *> > FailingCommandList;
+
+static bool ActionFailed(const Action *A,
+ const FailingCommandList &FailingCommands) {
+
+ if (FailingCommands.empty())
+ return false;
+
+ for (FailingCommandList::const_iterator CI = FailingCommands.begin(),
+ CE = FailingCommands.end(); CI != CE; ++CI)
+ if (A == &(CI->second->getSource()))
+ return true;
+
+ for (Action::const_iterator AI = A->begin(), AE = A->end(); AI != AE; ++AI)
+ if (ActionFailed(*AI, FailingCommands))
+ return true;
+
+ return false;
+}
+
+static bool InputsOk(const Command &C,
+ const FailingCommandList &FailingCommands) {
+ return !ActionFailed(&C.getSource(), FailingCommands);
+}
+
+void Compilation::ExecuteJobs(const JobList &Jobs,
+ FailingCommandList &FailingCommands) const {
+ for (const auto &Job : Jobs) {
+ if (!InputsOk(Job, FailingCommands))
+ continue;
+ const Command *FailingCommand = nullptr;
+ if (int Res = ExecuteCommand(Job, FailingCommand))
+ FailingCommands.push_back(std::make_pair(Res, FailingCommand));
+ }
+}
+
+void Compilation::initCompilationForDiagnostics() {
+ ForDiagnostics = true;
+
+ // Free actions and jobs.
+ DeleteContainerPointers(Actions);
+ Jobs.clear();
+
+ // Clear temporary/results file lists.
+ TempFiles.clear();
+ ResultFiles.clear();
+ FailureResultFiles.clear();
+
+ // Remove any user specified output. Claim any unclaimed arguments, so as
+ // to avoid emitting warnings about unused args.
+ OptSpecifier OutputOpts[] = { options::OPT_o, options::OPT_MD,
+ options::OPT_MMD };
+ for (unsigned i = 0, e = llvm::array_lengthof(OutputOpts); i != e; ++i) {
+ if (TranslatedArgs->hasArg(OutputOpts[i]))
+ TranslatedArgs->eraseArg(OutputOpts[i]);
+ }
+ TranslatedArgs->ClaimAllArgs();
+
+ // Redirect stdout/stderr to /dev/null.
+ Redirects = new const StringRef*[3]();
+ Redirects[0] = nullptr;
+ Redirects[1] = new StringRef();
+ Redirects[2] = new StringRef();
+}
+
+StringRef Compilation::getSysRoot() const {
+ return getDriver().SysRoot;
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/CrossWindowsToolChain.cpp b/contrib/llvm/tools/clang/lib/Driver/CrossWindowsToolChain.cpp
new file mode 100644
index 0000000..57bf896
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/CrossWindowsToolChain.cpp
@@ -0,0 +1,122 @@
+//===--- CrossWindowsToolChain.cpp - Cross Windows Tool Chain -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ToolChains.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/Options.h"
+#include "llvm/Option/ArgList.h"
+
+using namespace clang::driver;
+using namespace clang::driver::toolchains;
+
+CrossWindowsToolChain::CrossWindowsToolChain(const Driver &D,
+ const llvm::Triple &T,
+ const llvm::opt::ArgList &Args)
+ : Generic_GCC(D, T, Args) {
+ if (GetCXXStdlibType(Args) == ToolChain::CST_Libstdcxx) {
+ const std::string &SysRoot = D.SysRoot;
+
+ // libstdc++ resides in /usr/lib, but depends on libgcc which is placed in
+ // /usr/lib/gcc.
+ getFilePaths().push_back(SysRoot + "/usr/lib");
+ getFilePaths().push_back(SysRoot + "/usr/lib/gcc");
+ }
+}
+
+bool CrossWindowsToolChain::IsUnwindTablesDefault() const {
+ // FIXME: all non-x86 targets need unwind tables, however, LLVM currently does
+ // not know how to emit them.
+ return getArch() == llvm::Triple::x86_64;
+}
+
+bool CrossWindowsToolChain::isPICDefault() const {
+ return getArch() == llvm::Triple::x86_64;
+}
+
+bool CrossWindowsToolChain::isPIEDefault() const {
+ return getArch() == llvm::Triple::x86_64;
+}
+
+bool CrossWindowsToolChain::isPICDefaultForced() const {
+ return getArch() == llvm::Triple::x86_64;
+}
+
+void CrossWindowsToolChain::
+AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ const Driver &D = getDriver();
+ const std::string &SysRoot = D.SysRoot;
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ addSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/local/include");
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<128> ResourceDir(D.ResourceDir);
+ llvm::sys::path::append(ResourceDir, "include");
+ addSystemInclude(DriverArgs, CC1Args, ResourceDir);
+ }
+ addExternCSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/include");
+}
+
+void CrossWindowsToolChain::
+AddClangCXXStdlibIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ const llvm::Triple &Triple = getTriple();
+ const std::string &SysRoot = getDriver().SysRoot;
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx))
+ return;
+
+ switch (GetCXXStdlibType(DriverArgs)) {
+ case ToolChain::CST_Libcxx:
+ addSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/include/c++/v1");
+ break;
+
+ case ToolChain::CST_Libstdcxx:
+ addSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/include/c++");
+ addSystemInclude(DriverArgs, CC1Args,
+ SysRoot + "/usr/include/c++/" + Triple.str());
+ addSystemInclude(DriverArgs, CC1Args,
+ SysRoot + "/usr/include/c++/backwards");
+ }
+}
+
+void CrossWindowsToolChain::
+AddCXXStdlibLibArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ switch (GetCXXStdlibType(DriverArgs)) {
+ case ToolChain::CST_Libcxx:
+ CC1Args.push_back("-lc++");
+ break;
+ case ToolChain::CST_Libstdcxx:
+ CC1Args.push_back("-lstdc++");
+ CC1Args.push_back("-lmingw32");
+ CC1Args.push_back("-lmingwex");
+ CC1Args.push_back("-lgcc");
+ CC1Args.push_back("-lmoldname");
+ CC1Args.push_back("-lmingw32");
+ break;
+ }
+}
+
+clang::SanitizerMask CrossWindowsToolChain::getSupportedSanitizers() const {
+ SanitizerMask Res = ToolChain::getSupportedSanitizers();
+ Res |= SanitizerKind::Address;
+ return Res;
+}
+
+Tool *CrossWindowsToolChain::buildLinker() const {
+ return new tools::CrossWindows::Linker(*this);
+}
+
+Tool *CrossWindowsToolChain::buildAssembler() const {
+ return new tools::CrossWindows::Assembler(*this);
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/Driver.cpp b/contrib/llvm/tools/clang/lib/Driver/Driver.cpp
new file mode 100644
index 0000000..85bbcb4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/Driver.cpp
@@ -0,0 +1,2407 @@
+//===--- Driver.cpp - Clang GCC Compatible Driver -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Driver.h"
+#include "InputInfo.h"
+#include "ToolChains.h"
+#include "clang/Basic/Version.h"
+#include "clang/Basic/VirtualFileSystem.h"
+#include "clang/Config/config.h"
+#include "clang/Driver/Action.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Job.h"
+#include "clang/Driver/Options.h"
+#include "clang/Driver/SanitizerArgs.h"
+#include "clang/Driver/Tool.h"
+#include "clang/Driver/ToolChain.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Option/Arg.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Option/OptSpecifier.h"
+#include "llvm/Option/OptTable.h"
+#include "llvm/Option/Option.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Support/Program.h"
+#include "llvm/Support/raw_ostream.h"
+#include <map>
+#include <memory>
+
+using namespace clang::driver;
+using namespace clang;
+using namespace llvm::opt;
+
+Driver::Driver(StringRef ClangExecutable, StringRef DefaultTargetTriple,
+ DiagnosticsEngine &Diags,
+ IntrusiveRefCntPtr<vfs::FileSystem> VFS)
+ : Opts(createDriverOptTable()), Diags(Diags), VFS(VFS), Mode(GCCMode),
+ SaveTemps(SaveTempsNone), LTOMode(LTOK_None),
+ ClangExecutable(ClangExecutable),
+ SysRoot(DEFAULT_SYSROOT), UseStdLib(true),
+ DefaultTargetTriple(DefaultTargetTriple),
+ DriverTitle("clang LLVM compiler"), CCPrintOptionsFilename(nullptr),
+ CCPrintHeadersFilename(nullptr), CCLogDiagnosticsFilename(nullptr),
+ CCCPrintBindings(false), CCPrintHeaders(false), CCLogDiagnostics(false),
+ CCGenDiagnostics(false), CCCGenericGCCName(""), CheckInputsExist(true),
+ CCCUsePCH(true), SuppressMissingInputWarning(false) {
+
+ // Provide a sane fallback if no VFS is specified.
+ if (!this->VFS)
+ this->VFS = vfs::getRealFileSystem();
+
+ Name = llvm::sys::path::filename(ClangExecutable);
+ Dir = llvm::sys::path::parent_path(ClangExecutable);
+ InstalledDir = Dir; // Provide a sensible default installed dir.
+
+ // Compute the path to the resource directory.
+ StringRef ClangResourceDir(CLANG_RESOURCE_DIR);
+ SmallString<128> P(Dir);
+ if (ClangResourceDir != "") {
+ llvm::sys::path::append(P, ClangResourceDir);
+ } else {
+ StringRef ClangLibdirSuffix(CLANG_LIBDIR_SUFFIX);
+ llvm::sys::path::append(P, "..", Twine("lib") + ClangLibdirSuffix, "clang",
+ CLANG_VERSION_STRING);
+ }
+ ResourceDir = P.str();
+}
+
+Driver::~Driver() {
+ delete Opts;
+
+ llvm::DeleteContainerSeconds(ToolChains);
+}
+
+void Driver::ParseDriverMode(ArrayRef<const char *> Args) {
+ const std::string OptName =
+ getOpts().getOption(options::OPT_driver_mode).getPrefixedName();
+
+ for (const char *ArgPtr : Args) {
+ // Ingore nullptrs, they are response file's EOL markers
+ if (ArgPtr == nullptr)
+ continue;
+ const StringRef Arg = ArgPtr;
+ if (!Arg.startswith(OptName))
+ continue;
+
+ const StringRef Value = Arg.drop_front(OptName.size());
+ const unsigned M = llvm::StringSwitch<unsigned>(Value)
+ .Case("gcc", GCCMode)
+ .Case("g++", GXXMode)
+ .Case("cpp", CPPMode)
+ .Case("cl", CLMode)
+ .Default(~0U);
+
+ if (M != ~0U)
+ Mode = static_cast<DriverMode>(M);
+ else
+ Diag(diag::err_drv_unsupported_option_argument) << OptName << Value;
+ }
+}
+
+InputArgList Driver::ParseArgStrings(ArrayRef<const char *> ArgStrings) {
+ llvm::PrettyStackTraceString CrashInfo("Command line argument parsing");
+
+ unsigned IncludedFlagsBitmask;
+ unsigned ExcludedFlagsBitmask;
+ std::tie(IncludedFlagsBitmask, ExcludedFlagsBitmask) =
+ getIncludeExcludeOptionFlagMasks();
+
+ unsigned MissingArgIndex, MissingArgCount;
+ InputArgList Args =
+ getOpts().ParseArgs(ArgStrings, MissingArgIndex, MissingArgCount,
+ IncludedFlagsBitmask, ExcludedFlagsBitmask);
+
+ // Check for missing argument error.
+ if (MissingArgCount)
+ Diag(clang::diag::err_drv_missing_argument)
+ << Args.getArgString(MissingArgIndex) << MissingArgCount;
+
+ // Check for unsupported options.
+ for (const Arg *A : Args) {
+ if (A->getOption().hasFlag(options::Unsupported)) {
+ Diag(clang::diag::err_drv_unsupported_opt) << A->getAsString(Args);
+ continue;
+ }
+
+ // Warn about -mcpu= without an argument.
+ if (A->getOption().matches(options::OPT_mcpu_EQ) && A->containsValue("")) {
+ Diag(clang::diag::warn_drv_empty_joined_argument) << A->getAsString(Args);
+ }
+ }
+
+ for (const Arg *A : Args.filtered(options::OPT_UNKNOWN))
+ Diags.Report(diag::err_drv_unknown_argument) << A->getAsString(Args);
+
+ return Args;
+}
+
+// Determine which compilation mode we are in. We look for options which
+// affect the phase, starting with the earliest phases, and record which
+// option we used to determine the final phase.
+phases::ID Driver::getFinalPhase(const DerivedArgList &DAL,
+ Arg **FinalPhaseArg) const {
+ Arg *PhaseArg = nullptr;
+ phases::ID FinalPhase;
+
+ // -{E,EP,P,M,MM} only run the preprocessor.
+ if (CCCIsCPP() || (PhaseArg = DAL.getLastArg(options::OPT_E)) ||
+ (PhaseArg = DAL.getLastArg(options::OPT__SLASH_EP)) ||
+ (PhaseArg = DAL.getLastArg(options::OPT_M, options::OPT_MM)) ||
+ (PhaseArg = DAL.getLastArg(options::OPT__SLASH_P))) {
+ FinalPhase = phases::Preprocess;
+
+ // -{fsyntax-only,-analyze,emit-ast} only run up to the compiler.
+ } else if ((PhaseArg = DAL.getLastArg(options::OPT_fsyntax_only)) ||
+ (PhaseArg = DAL.getLastArg(options::OPT_module_file_info)) ||
+ (PhaseArg = DAL.getLastArg(options::OPT_verify_pch)) ||
+ (PhaseArg = DAL.getLastArg(options::OPT_rewrite_objc)) ||
+ (PhaseArg = DAL.getLastArg(options::OPT_rewrite_legacy_objc)) ||
+ (PhaseArg = DAL.getLastArg(options::OPT__migrate)) ||
+ (PhaseArg = DAL.getLastArg(options::OPT__analyze,
+ options::OPT__analyze_auto)) ||
+ (PhaseArg = DAL.getLastArg(options::OPT_emit_ast))) {
+ FinalPhase = phases::Compile;
+
+ // -S only runs up to the backend.
+ } else if ((PhaseArg = DAL.getLastArg(options::OPT_S))) {
+ FinalPhase = phases::Backend;
+
+ // -c compilation only runs up to the assembler.
+ } else if ((PhaseArg = DAL.getLastArg(options::OPT_c))) {
+ FinalPhase = phases::Assemble;
+
+ // Otherwise do everything.
+ } else
+ FinalPhase = phases::Link;
+
+ if (FinalPhaseArg)
+ *FinalPhaseArg = PhaseArg;
+
+ return FinalPhase;
+}
+
+static Arg *MakeInputArg(DerivedArgList &Args, OptTable *Opts,
+ StringRef Value) {
+ Arg *A = new Arg(Opts->getOption(options::OPT_INPUT), Value,
+ Args.getBaseArgs().MakeIndex(Value), Value.data());
+ Args.AddSynthesizedArg(A);
+ A->claim();
+ return A;
+}
+
+DerivedArgList *Driver::TranslateInputArgs(const InputArgList &Args) const {
+ DerivedArgList *DAL = new DerivedArgList(Args);
+
+ bool HasNostdlib = Args.hasArg(options::OPT_nostdlib);
+ bool HasNodefaultlib = Args.hasArg(options::OPT_nodefaultlibs);
+ for (Arg *A : Args) {
+ // Unfortunately, we have to parse some forwarding options (-Xassembler,
+ // -Xlinker, -Xpreprocessor) because we either integrate their functionality
+ // (assembler and preprocessor), or bypass a previous driver ('collect2').
+
+ // Rewrite linker options, to replace --no-demangle with a custom internal
+ // option.
+ if ((A->getOption().matches(options::OPT_Wl_COMMA) ||
+ A->getOption().matches(options::OPT_Xlinker)) &&
+ A->containsValue("--no-demangle")) {
+ // Add the rewritten no-demangle argument.
+ DAL->AddFlagArg(A, Opts->getOption(options::OPT_Z_Xlinker__no_demangle));
+
+ // Add the remaining values as Xlinker arguments.
+ for (StringRef Val : A->getValues())
+ if (Val != "--no-demangle")
+ DAL->AddSeparateArg(A, Opts->getOption(options::OPT_Xlinker), Val);
+
+ continue;
+ }
+
+ // Rewrite preprocessor options, to replace -Wp,-MD,FOO which is used by
+ // some build systems. We don't try to be complete here because we don't
+ // care to encourage this usage model.
+ if (A->getOption().matches(options::OPT_Wp_COMMA) &&
+ (A->getValue(0) == StringRef("-MD") ||
+ A->getValue(0) == StringRef("-MMD"))) {
+ // Rewrite to -MD/-MMD along with -MF.
+ if (A->getValue(0) == StringRef("-MD"))
+ DAL->AddFlagArg(A, Opts->getOption(options::OPT_MD));
+ else
+ DAL->AddFlagArg(A, Opts->getOption(options::OPT_MMD));
+ if (A->getNumValues() == 2)
+ DAL->AddSeparateArg(A, Opts->getOption(options::OPT_MF),
+ A->getValue(1));
+ continue;
+ }
+
+ // Rewrite reserved library names.
+ if (A->getOption().matches(options::OPT_l)) {
+ StringRef Value = A->getValue();
+
+ // Rewrite unless -nostdlib is present.
+ if (!HasNostdlib && !HasNodefaultlib && Value == "stdc++") {
+ DAL->AddFlagArg(A, Opts->getOption(options::OPT_Z_reserved_lib_stdcxx));
+ continue;
+ }
+
+ // Rewrite unconditionally.
+ if (Value == "cc_kext") {
+ DAL->AddFlagArg(A, Opts->getOption(options::OPT_Z_reserved_lib_cckext));
+ continue;
+ }
+ }
+
+ // Pick up inputs via the -- option.
+ if (A->getOption().matches(options::OPT__DASH_DASH)) {
+ A->claim();
+ for (StringRef Val : A->getValues())
+ DAL->append(MakeInputArg(*DAL, Opts, Val));
+ continue;
+ }
+
+ DAL->append(A);
+ }
+
+// Add a default value of -mlinker-version=, if one was given and the user
+// didn't specify one.
+#if defined(HOST_LINK_VERSION)
+ if (!Args.hasArg(options::OPT_mlinker_version_EQ) &&
+ strlen(HOST_LINK_VERSION) > 0) {
+ DAL->AddJoinedArg(0, Opts->getOption(options::OPT_mlinker_version_EQ),
+ HOST_LINK_VERSION);
+ DAL->getLastArg(options::OPT_mlinker_version_EQ)->claim();
+ }
+#endif
+
+ return DAL;
+}
+
+/// \brief Compute target triple from args.
+///
+/// This routine provides the logic to compute a target triple from various
+/// args passed to the driver and the default triple string.
+static llvm::Triple computeTargetTriple(StringRef DefaultTargetTriple,
+ const ArgList &Args,
+ StringRef DarwinArchName = "") {
+ // FIXME: Already done in Compilation *Driver::BuildCompilation
+ if (const Arg *A = Args.getLastArg(options::OPT_target))
+ DefaultTargetTriple = A->getValue();
+
+ llvm::Triple Target(llvm::Triple::normalize(DefaultTargetTriple));
+
+ // Handle Apple-specific options available here.
+ if (Target.isOSBinFormatMachO()) {
+ // If an explict Darwin arch name is given, that trumps all.
+ if (!DarwinArchName.empty()) {
+ tools::darwin::setTripleTypeForMachOArchName(Target, DarwinArchName);
+ return Target;
+ }
+
+ // Handle the Darwin '-arch' flag.
+ if (Arg *A = Args.getLastArg(options::OPT_arch)) {
+ StringRef ArchName = A->getValue();
+ tools::darwin::setTripleTypeForMachOArchName(Target, ArchName);
+ }
+ }
+
+ // Handle pseudo-target flags '-mlittle-endian'/'-EL' and
+ // '-mbig-endian'/'-EB'.
+ if (Arg *A = Args.getLastArg(options::OPT_mlittle_endian,
+ options::OPT_mbig_endian)) {
+ if (A->getOption().matches(options::OPT_mlittle_endian)) {
+ llvm::Triple LE = Target.getLittleEndianArchVariant();
+ if (LE.getArch() != llvm::Triple::UnknownArch)
+ Target = std::move(LE);
+ } else {
+ llvm::Triple BE = Target.getBigEndianArchVariant();
+ if (BE.getArch() != llvm::Triple::UnknownArch)
+ Target = std::move(BE);
+ }
+ }
+
+ // Skip further flag support on OSes which don't support '-m32' or '-m64'.
+ if (Target.getArch() == llvm::Triple::tce ||
+ Target.getOS() == llvm::Triple::Minix)
+ return Target;
+
+ // Handle pseudo-target flags '-m64', '-mx32', '-m32' and '-m16'.
+ if (Arg *A = Args.getLastArg(options::OPT_m64, options::OPT_mx32,
+ options::OPT_m32, options::OPT_m16)) {
+ llvm::Triple::ArchType AT = llvm::Triple::UnknownArch;
+
+ if (A->getOption().matches(options::OPT_m64)) {
+ AT = Target.get64BitArchVariant().getArch();
+ if (Target.getEnvironment() == llvm::Triple::GNUX32)
+ Target.setEnvironment(llvm::Triple::GNU);
+ } else if (A->getOption().matches(options::OPT_mx32) &&
+ Target.get64BitArchVariant().getArch() == llvm::Triple::x86_64) {
+ AT = llvm::Triple::x86_64;
+ Target.setEnvironment(llvm::Triple::GNUX32);
+ } else if (A->getOption().matches(options::OPT_m32)) {
+ AT = Target.get32BitArchVariant().getArch();
+ if (Target.getEnvironment() == llvm::Triple::GNUX32)
+ Target.setEnvironment(llvm::Triple::GNU);
+ } else if (A->getOption().matches(options::OPT_m16) &&
+ Target.get32BitArchVariant().getArch() == llvm::Triple::x86) {
+ AT = llvm::Triple::x86;
+ Target.setEnvironment(llvm::Triple::CODE16);
+ }
+
+ if (AT != llvm::Triple::UnknownArch && AT != Target.getArch())
+ Target.setArch(AT);
+ }
+
+ return Target;
+}
+
+// \brief Parse the LTO options and record the type of LTO compilation
+// based on which -f(no-)?lto(=.*)? option occurs last.
+void Driver::setLTOMode(const llvm::opt::ArgList &Args) {
+ LTOMode = LTOK_None;
+ if (!Args.hasFlag(options::OPT_flto, options::OPT_flto_EQ,
+ options::OPT_fno_lto, false))
+ return;
+
+ StringRef LTOName("full");
+
+ const Arg *A = Args.getLastArg(options::OPT_flto_EQ);
+ if (A)
+ LTOName = A->getValue();
+
+ LTOMode = llvm::StringSwitch<LTOKind>(LTOName)
+ .Case("full", LTOK_Full)
+ .Case("thin", LTOK_Thin)
+ .Default(LTOK_Unknown);
+
+ if (LTOMode == LTOK_Unknown) {
+ assert(A);
+ Diag(diag::err_drv_unsupported_option_argument) << A->getOption().getName()
+ << A->getValue();
+ }
+}
+
+Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
+ llvm::PrettyStackTraceString CrashInfo("Compilation construction");
+
+ // FIXME: Handle environment options which affect driver behavior, somewhere
+ // (client?). GCC_EXEC_PREFIX, LPATH, CC_PRINT_OPTIONS.
+
+ if (char *env = ::getenv("COMPILER_PATH")) {
+ StringRef CompilerPath = env;
+ while (!CompilerPath.empty()) {
+ std::pair<StringRef, StringRef> Split =
+ CompilerPath.split(llvm::sys::EnvPathSeparator);
+ PrefixDirs.push_back(Split.first);
+ CompilerPath = Split.second;
+ }
+ }
+
+ // We look for the driver mode option early, because the mode can affect
+ // how other options are parsed.
+ ParseDriverMode(ArgList.slice(1));
+
+ // FIXME: What are we going to do with -V and -b?
+
+ // FIXME: This stuff needs to go into the Compilation, not the driver.
+ bool CCCPrintPhases;
+
+ InputArgList Args = ParseArgStrings(ArgList.slice(1));
+
+ // Silence driver warnings if requested
+ Diags.setIgnoreAllWarnings(Args.hasArg(options::OPT_w));
+
+ // -no-canonical-prefixes is used very early in main.
+ Args.ClaimAllArgs(options::OPT_no_canonical_prefixes);
+
+ // Ignore -pipe.
+ Args.ClaimAllArgs(options::OPT_pipe);
+
+ // Extract -ccc args.
+ //
+ // FIXME: We need to figure out where this behavior should live. Most of it
+ // should be outside in the client; the parts that aren't should have proper
+ // options, either by introducing new ones or by overloading gcc ones like -V
+ // or -b.
+ CCCPrintPhases = Args.hasArg(options::OPT_ccc_print_phases);
+ CCCPrintBindings = Args.hasArg(options::OPT_ccc_print_bindings);
+ if (const Arg *A = Args.getLastArg(options::OPT_ccc_gcc_name))
+ CCCGenericGCCName = A->getValue();
+ CCCUsePCH =
+ Args.hasFlag(options::OPT_ccc_pch_is_pch, options::OPT_ccc_pch_is_pth);
+ // FIXME: DefaultTargetTriple is used by the target-prefixed calls to as/ld
+ // and getToolChain is const.
+ if (IsCLMode()) {
+ // clang-cl targets MSVC-style Win32.
+ llvm::Triple T(DefaultTargetTriple);
+ T.setOS(llvm::Triple::Win32);
+ T.setVendor(llvm::Triple::PC);
+ T.setEnvironment(llvm::Triple::MSVC);
+ DefaultTargetTriple = T.str();
+ }
+ if (const Arg *A = Args.getLastArg(options::OPT_target))
+ DefaultTargetTriple = A->getValue();
+ if (const Arg *A = Args.getLastArg(options::OPT_ccc_install_dir))
+ Dir = InstalledDir = A->getValue();
+ for (const Arg *A : Args.filtered(options::OPT_B)) {
+ A->claim();
+ PrefixDirs.push_back(A->getValue(0));
+ }
+ if (const Arg *A = Args.getLastArg(options::OPT__sysroot_EQ))
+ SysRoot = A->getValue();
+ if (const Arg *A = Args.getLastArg(options::OPT__dyld_prefix_EQ))
+ DyldPrefix = A->getValue();
+ if (Args.hasArg(options::OPT_nostdlib))
+ UseStdLib = false;
+
+ if (const Arg *A = Args.getLastArg(options::OPT_resource_dir))
+ ResourceDir = A->getValue();
+
+ if (const Arg *A = Args.getLastArg(options::OPT_save_temps_EQ)) {
+ SaveTemps = llvm::StringSwitch<SaveTempsMode>(A->getValue())
+ .Case("cwd", SaveTempsCwd)
+ .Case("obj", SaveTempsObj)
+ .Default(SaveTempsCwd);
+ }
+
+ setLTOMode(Args);
+
+ std::unique_ptr<llvm::opt::InputArgList> UArgs =
+ llvm::make_unique<InputArgList>(std::move(Args));
+
+ // Perform the default argument translations.
+ DerivedArgList *TranslatedArgs = TranslateInputArgs(*UArgs);
+
+ // Owned by the host.
+ const ToolChain &TC =
+ getToolChain(*UArgs, computeTargetTriple(DefaultTargetTriple, *UArgs));
+
+ // The compilation takes ownership of Args.
+ Compilation *C = new Compilation(*this, TC, UArgs.release(), TranslatedArgs);
+
+ C->setCudaDeviceToolChain(
+ &getToolChain(C->getArgs(), llvm::Triple(TC.getTriple().isArch64Bit()
+ ? "nvptx64-nvidia-cuda"
+ : "nvptx-nvidia-cuda")));
+ if (!HandleImmediateArgs(*C))
+ return C;
+
+ // Construct the list of inputs.
+ InputList Inputs;
+ BuildInputs(C->getDefaultToolChain(), *TranslatedArgs, Inputs);
+
+ // Construct the list of abstract actions to perform for this compilation. On
+ // MachO targets this uses the driver-driver and universal actions.
+ if (TC.getTriple().isOSBinFormatMachO())
+ BuildUniversalActions(*C, C->getDefaultToolChain(), Inputs);
+ else
+ BuildActions(*C, C->getDefaultToolChain(), C->getArgs(), Inputs,
+ C->getActions());
+
+ if (CCCPrintPhases) {
+ PrintActions(*C);
+ return C;
+ }
+
+ BuildJobs(*C);
+
+ return C;
+}
+
+static void printArgList(raw_ostream &OS, const llvm::opt::ArgList &Args) {
+ llvm::opt::ArgStringList ASL;
+ for (const auto *A : Args)
+ A->render(Args, ASL);
+
+ for (auto I = ASL.begin(), E = ASL.end(); I != E; ++I) {
+ if (I != ASL.begin())
+ OS << ' ';
+ Command::printArg(OS, *I, true);
+ }
+ OS << '\n';
+}
+
+// When clang crashes, produce diagnostic information including the fully
+// preprocessed source file(s). Request that the developer attach the
+// diagnostic information to a bug report.
+void Driver::generateCompilationDiagnostics(Compilation &C,
+ const Command &FailingCommand) {
+ if (C.getArgs().hasArg(options::OPT_fno_crash_diagnostics))
+ return;
+
+ // Don't try to generate diagnostics for link or dsymutil jobs.
+ if (FailingCommand.getCreator().isLinkJob() ||
+ FailingCommand.getCreator().isDsymutilJob())
+ return;
+
+ // Print the version of the compiler.
+ PrintVersion(C, llvm::errs());
+
+ Diag(clang::diag::note_drv_command_failed_diag_msg)
+ << "PLEASE submit a bug report to " BUG_REPORT_URL " and include the "
+ "crash backtrace, preprocessed source, and associated run script.";
+
+ // Suppress driver output and emit preprocessor output to temp file.
+ Mode = CPPMode;
+ CCGenDiagnostics = true;
+
+ // Save the original job command(s).
+ Command Cmd = FailingCommand;
+
+ // Keep track of whether we produce any errors while trying to produce
+ // preprocessed sources.
+ DiagnosticErrorTrap Trap(Diags);
+
+ // Suppress tool output.
+ C.initCompilationForDiagnostics();
+
+ // Construct the list of inputs.
+ InputList Inputs;
+ BuildInputs(C.getDefaultToolChain(), C.getArgs(), Inputs);
+
+ for (InputList::iterator it = Inputs.begin(), ie = Inputs.end(); it != ie;) {
+ bool IgnoreInput = false;
+
+ // Ignore input from stdin or any inputs that cannot be preprocessed.
+ // Check type first as not all linker inputs have a value.
+ if (types::getPreprocessedType(it->first) == types::TY_INVALID) {
+ IgnoreInput = true;
+ } else if (!strcmp(it->second->getValue(), "-")) {
+ Diag(clang::diag::note_drv_command_failed_diag_msg)
+ << "Error generating preprocessed source(s) - "
+ "ignoring input from stdin.";
+ IgnoreInput = true;
+ }
+
+ if (IgnoreInput) {
+ it = Inputs.erase(it);
+ ie = Inputs.end();
+ } else {
+ ++it;
+ }
+ }
+
+ if (Inputs.empty()) {
+ Diag(clang::diag::note_drv_command_failed_diag_msg)
+ << "Error generating preprocessed source(s) - "
+ "no preprocessable inputs.";
+ return;
+ }
+
+ // Don't attempt to generate preprocessed files if multiple -arch options are
+ // used, unless they're all duplicates.
+ llvm::StringSet<> ArchNames;
+ for (const Arg *A : C.getArgs()) {
+ if (A->getOption().matches(options::OPT_arch)) {
+ StringRef ArchName = A->getValue();
+ ArchNames.insert(ArchName);
+ }
+ }
+ if (ArchNames.size() > 1) {
+ Diag(clang::diag::note_drv_command_failed_diag_msg)
+ << "Error generating preprocessed source(s) - cannot generate "
+ "preprocessed source with multiple -arch options.";
+ return;
+ }
+
+ // Construct the list of abstract actions to perform for this compilation. On
+ // Darwin OSes this uses the driver-driver and builds universal actions.
+ const ToolChain &TC = C.getDefaultToolChain();
+ if (TC.getTriple().isOSBinFormatMachO())
+ BuildUniversalActions(C, TC, Inputs);
+ else
+ BuildActions(C, TC, C.getArgs(), Inputs, C.getActions());
+
+ BuildJobs(C);
+
+ // If there were errors building the compilation, quit now.
+ if (Trap.hasErrorOccurred()) {
+ Diag(clang::diag::note_drv_command_failed_diag_msg)
+ << "Error generating preprocessed source(s).";
+ return;
+ }
+
+ // Generate preprocessed output.
+ SmallVector<std::pair<int, const Command *>, 4> FailingCommands;
+ C.ExecuteJobs(C.getJobs(), FailingCommands);
+
+ // If any of the preprocessing commands failed, clean up and exit.
+ if (!FailingCommands.empty()) {
+ if (!isSaveTempsEnabled())
+ C.CleanupFileList(C.getTempFiles(), true);
+
+ Diag(clang::diag::note_drv_command_failed_diag_msg)
+ << "Error generating preprocessed source(s).";
+ return;
+ }
+
+ const ArgStringList &TempFiles = C.getTempFiles();
+ if (TempFiles.empty()) {
+ Diag(clang::diag::note_drv_command_failed_diag_msg)
+ << "Error generating preprocessed source(s).";
+ return;
+ }
+
+ Diag(clang::diag::note_drv_command_failed_diag_msg)
+ << "\n********************\n\n"
+ "PLEASE ATTACH THE FOLLOWING FILES TO THE BUG REPORT:\n"
+ "Preprocessed source(s) and associated run script(s) are located at:";
+
+ SmallString<128> VFS;
+ for (const char *TempFile : TempFiles) {
+ Diag(clang::diag::note_drv_command_failed_diag_msg) << TempFile;
+ if (StringRef(TempFile).endswith(".cache")) {
+ // In some cases (modules) we'll dump extra data to help with reproducing
+ // the crash into a directory next to the output.
+ VFS = llvm::sys::path::filename(TempFile);
+ llvm::sys::path::append(VFS, "vfs", "vfs.yaml");
+ }
+ }
+
+ // Assume associated files are based off of the first temporary file.
+ CrashReportInfo CrashInfo(TempFiles[0], VFS);
+
+ std::string Script = CrashInfo.Filename.rsplit('.').first.str() + ".sh";
+ std::error_code EC;
+ llvm::raw_fd_ostream ScriptOS(Script, EC, llvm::sys::fs::F_Excl);
+ if (EC) {
+ Diag(clang::diag::note_drv_command_failed_diag_msg)
+ << "Error generating run script: " + Script + " " + EC.message();
+ } else {
+ ScriptOS << "# Crash reproducer for " << getClangFullVersion() << "\n"
+ << "# Driver args: ";
+ printArgList(ScriptOS, C.getInputArgs());
+ ScriptOS << "# Original command: ";
+ Cmd.Print(ScriptOS, "\n", /*Quote=*/true);
+ Cmd.Print(ScriptOS, "\n", /*Quote=*/true, &CrashInfo);
+ Diag(clang::diag::note_drv_command_failed_diag_msg) << Script;
+ }
+
+ for (const auto &A : C.getArgs().filtered(options::OPT_frewrite_map_file,
+ options::OPT_frewrite_map_file_EQ))
+ Diag(clang::diag::note_drv_command_failed_diag_msg) << A->getValue();
+
+ Diag(clang::diag::note_drv_command_failed_diag_msg)
+ << "\n\n********************";
+}
+
+void Driver::setUpResponseFiles(Compilation &C, Command &Cmd) {
+ // Since commandLineFitsWithinSystemLimits() may underestimate system's capacity
+ // if the tool does not support response files, there is a chance/ that things
+ // will just work without a response file, so we silently just skip it.
+ if (Cmd.getCreator().getResponseFilesSupport() == Tool::RF_None ||
+ llvm::sys::commandLineFitsWithinSystemLimits(Cmd.getExecutable(), Cmd.getArguments()))
+ return;
+
+ std::string TmpName = GetTemporaryPath("response", "txt");
+ Cmd.setResponseFile(
+ C.addTempFile(C.getArgs().MakeArgString(TmpName.c_str())));
+}
+
+int Driver::ExecuteCompilation(
+ Compilation &C,
+ SmallVectorImpl<std::pair<int, const Command *>> &FailingCommands) {
+ // Just print if -### was present.
+ if (C.getArgs().hasArg(options::OPT__HASH_HASH_HASH)) {
+ C.getJobs().Print(llvm::errs(), "\n", true);
+ return 0;
+ }
+
+ // If there were errors building the compilation, quit now.
+ if (Diags.hasErrorOccurred())
+ return 1;
+
+ // Set up response file names for each command, if necessary
+ for (auto &Job : C.getJobs())
+ setUpResponseFiles(C, Job);
+
+ C.ExecuteJobs(C.getJobs(), FailingCommands);
+
+ // Remove temp files.
+ C.CleanupFileList(C.getTempFiles());
+
+ // If the command succeeded, we are done.
+ if (FailingCommands.empty())
+ return 0;
+
+ // Otherwise, remove result files and print extra information about abnormal
+ // failures.
+ for (const auto &CmdPair : FailingCommands) {
+ int Res = CmdPair.first;
+ const Command *FailingCommand = CmdPair.second;
+
+ // Remove result files if we're not saving temps.
+ if (!isSaveTempsEnabled()) {
+ const JobAction *JA = cast<JobAction>(&FailingCommand->getSource());
+ C.CleanupFileMap(C.getResultFiles(), JA, true);
+
+ // Failure result files are valid unless we crashed.
+ if (Res < 0)
+ C.CleanupFileMap(C.getFailureResultFiles(), JA, true);
+ }
+
+ // Print extra information about abnormal failures, if possible.
+ //
+ // This is ad-hoc, but we don't want to be excessively noisy. If the result
+ // status was 1, assume the command failed normally. In particular, if it
+ // was the compiler then assume it gave a reasonable error code. Failures
+ // in other tools are less common, and they generally have worse
+ // diagnostics, so always print the diagnostic there.
+ const Tool &FailingTool = FailingCommand->getCreator();
+
+ if (!FailingCommand->getCreator().hasGoodDiagnostics() || Res != 1) {
+ // FIXME: See FIXME above regarding result code interpretation.
+ if (Res < 0)
+ Diag(clang::diag::err_drv_command_signalled)
+ << FailingTool.getShortName();
+ else
+ Diag(clang::diag::err_drv_command_failed) << FailingTool.getShortName()
+ << Res;
+ }
+ }
+ return 0;
+}
+
+void Driver::PrintHelp(bool ShowHidden) const {
+ unsigned IncludedFlagsBitmask;
+ unsigned ExcludedFlagsBitmask;
+ std::tie(IncludedFlagsBitmask, ExcludedFlagsBitmask) =
+ getIncludeExcludeOptionFlagMasks();
+
+ ExcludedFlagsBitmask |= options::NoDriverOption;
+ if (!ShowHidden)
+ ExcludedFlagsBitmask |= HelpHidden;
+
+ getOpts().PrintHelp(llvm::outs(), Name.c_str(), DriverTitle.c_str(),
+ IncludedFlagsBitmask, ExcludedFlagsBitmask);
+}
+
+void Driver::PrintVersion(const Compilation &C, raw_ostream &OS) const {
+ // FIXME: The following handlers should use a callback mechanism, we don't
+ // know what the client would like to do.
+ OS << getClangFullVersion() << '\n';
+ const ToolChain &TC = C.getDefaultToolChain();
+ OS << "Target: " << TC.getTripleString() << '\n';
+
+ // Print the threading model.
+ if (Arg *A = C.getArgs().getLastArg(options::OPT_mthread_model)) {
+ // Don't print if the ToolChain would have barfed on it already
+ if (TC.isThreadModelSupported(A->getValue()))
+ OS << "Thread model: " << A->getValue();
+ } else
+ OS << "Thread model: " << TC.getThreadModel();
+ OS << '\n';
+
+ // Print out the install directory.
+ OS << "InstalledDir: " << InstalledDir << '\n';
+}
+
+/// PrintDiagnosticCategories - Implement the --print-diagnostic-categories
+/// option.
+static void PrintDiagnosticCategories(raw_ostream &OS) {
+ // Skip the empty category.
+ for (unsigned i = 1, max = DiagnosticIDs::getNumberOfCategories(); i != max;
+ ++i)
+ OS << i << ',' << DiagnosticIDs::getCategoryNameFromID(i) << '\n';
+}
+
+bool Driver::HandleImmediateArgs(const Compilation &C) {
+ // The order these options are handled in gcc is all over the place, but we
+ // don't expect inconsistencies w.r.t. that to matter in practice.
+
+ if (C.getArgs().hasArg(options::OPT_dumpmachine)) {
+ llvm::outs() << C.getDefaultToolChain().getTripleString() << '\n';
+ return false;
+ }
+
+ if (C.getArgs().hasArg(options::OPT_dumpversion)) {
+ // Since -dumpversion is only implemented for pedantic GCC compatibility, we
+ // return an answer which matches our definition of __VERSION__.
+ //
+ // If we want to return a more correct answer some day, then we should
+ // introduce a non-pedantically GCC compatible mode to Clang in which we
+ // provide sensible definitions for -dumpversion, __VERSION__, etc.
+ llvm::outs() << "4.2.1\n";
+ return false;
+ }
+
+ if (C.getArgs().hasArg(options::OPT__print_diagnostic_categories)) {
+ PrintDiagnosticCategories(llvm::outs());
+ return false;
+ }
+
+ if (C.getArgs().hasArg(options::OPT_help) ||
+ C.getArgs().hasArg(options::OPT__help_hidden)) {
+ PrintHelp(C.getArgs().hasArg(options::OPT__help_hidden));
+ return false;
+ }
+
+ if (C.getArgs().hasArg(options::OPT__version)) {
+ // Follow gcc behavior and use stdout for --version and stderr for -v.
+ PrintVersion(C, llvm::outs());
+ return false;
+ }
+
+ if (C.getArgs().hasArg(options::OPT_v) ||
+ C.getArgs().hasArg(options::OPT__HASH_HASH_HASH)) {
+ PrintVersion(C, llvm::errs());
+ SuppressMissingInputWarning = true;
+ }
+
+ const ToolChain &TC = C.getDefaultToolChain();
+
+ if (C.getArgs().hasArg(options::OPT_v))
+ TC.printVerboseInfo(llvm::errs());
+
+ if (C.getArgs().hasArg(options::OPT_print_search_dirs)) {
+ llvm::outs() << "programs: =";
+ bool separator = false;
+ for (const std::string &Path : TC.getProgramPaths()) {
+ if (separator)
+ llvm::outs() << ':';
+ llvm::outs() << Path;
+ separator = true;
+ }
+ llvm::outs() << "\n";
+ llvm::outs() << "libraries: =" << ResourceDir;
+
+ StringRef sysroot = C.getSysRoot();
+
+ for (const std::string &Path : TC.getFilePaths()) {
+ // Always print a separator. ResourceDir was the first item shown.
+ llvm::outs() << ':';
+ // Interpretation of leading '=' is needed only for NetBSD.
+ if (Path[0] == '=')
+ llvm::outs() << sysroot << Path.substr(1);
+ else
+ llvm::outs() << Path;
+ }
+ llvm::outs() << "\n";
+ return false;
+ }
+
+ // FIXME: The following handlers should use a callback mechanism, we don't
+ // know what the client would like to do.
+ if (Arg *A = C.getArgs().getLastArg(options::OPT_print_file_name_EQ)) {
+ llvm::outs() << GetFilePath(A->getValue(), TC) << "\n";
+ return false;
+ }
+
+ if (Arg *A = C.getArgs().getLastArg(options::OPT_print_prog_name_EQ)) {
+ llvm::outs() << GetProgramPath(A->getValue(), TC) << "\n";
+ return false;
+ }
+
+ if (C.getArgs().hasArg(options::OPT_print_libgcc_file_name)) {
+ llvm::outs() << GetFilePath("libgcc.a", TC) << "\n";
+ return false;
+ }
+
+ if (C.getArgs().hasArg(options::OPT_print_multi_lib)) {
+ for (const Multilib &Multilib : TC.getMultilibs())
+ llvm::outs() << Multilib << "\n";
+ return false;
+ }
+
+ if (C.getArgs().hasArg(options::OPT_print_multi_directory)) {
+ for (const Multilib &Multilib : TC.getMultilibs()) {
+ if (Multilib.gccSuffix().empty())
+ llvm::outs() << ".\n";
+ else {
+ StringRef Suffix(Multilib.gccSuffix());
+ assert(Suffix.front() == '/');
+ llvm::outs() << Suffix.substr(1) << "\n";
+ }
+ }
+ return false;
+ }
+ return true;
+}
+
+// Display an action graph human-readably. Action A is the "sink" node
+// and latest-occuring action. Traversal is in pre-order, visiting the
+// inputs to each action before printing the action itself.
+static unsigned PrintActions1(const Compilation &C, Action *A,
+ std::map<Action *, unsigned> &Ids) {
+ if (Ids.count(A)) // A was already visited.
+ return Ids[A];
+
+ std::string str;
+ llvm::raw_string_ostream os(str);
+
+ os << Action::getClassName(A->getKind()) << ", ";
+ if (InputAction *IA = dyn_cast<InputAction>(A)) {
+ os << "\"" << IA->getInputArg().getValue() << "\"";
+ } else if (BindArchAction *BIA = dyn_cast<BindArchAction>(A)) {
+ os << '"' << BIA->getArchName() << '"' << ", {"
+ << PrintActions1(C, *BIA->begin(), Ids) << "}";
+ } else if (CudaDeviceAction *CDA = dyn_cast<CudaDeviceAction>(A)) {
+ os << '"' << CDA->getGpuArchName() << '"' << ", {"
+ << PrintActions1(C, *CDA->begin(), Ids) << "}";
+ } else {
+ const ActionList *AL;
+ if (CudaHostAction *CHA = dyn_cast<CudaHostAction>(A)) {
+ os << "{" << PrintActions1(C, *CHA->begin(), Ids) << "}"
+ << ", gpu binaries ";
+ AL = &CHA->getDeviceActions();
+ } else
+ AL = &A->getInputs();
+
+ if (AL->size()) {
+ const char *Prefix = "{";
+ for (Action *PreRequisite : *AL) {
+ os << Prefix << PrintActions1(C, PreRequisite, Ids);
+ Prefix = ", ";
+ }
+ os << "}";
+ } else
+ os << "{}";
+ }
+
+ unsigned Id = Ids.size();
+ Ids[A] = Id;
+ llvm::errs() << Id << ": " << os.str() << ", "
+ << types::getTypeName(A->getType()) << "\n";
+
+ return Id;
+}
+
+// Print the action graphs in a compilation C.
+// For example "clang -c file1.c file2.c" is composed of two subgraphs.
+void Driver::PrintActions(const Compilation &C) const {
+ std::map<Action *, unsigned> Ids;
+ for (Action *A : C.getActions())
+ PrintActions1(C, A, Ids);
+}
+
+/// \brief Check whether the given input tree contains any compilation or
+/// assembly actions.
+static bool ContainsCompileOrAssembleAction(const Action *A) {
+ if (isa<CompileJobAction>(A) || isa<BackendJobAction>(A) ||
+ isa<AssembleJobAction>(A))
+ return true;
+
+ for (const Action *Input : *A)
+ if (ContainsCompileOrAssembleAction(Input))
+ return true;
+
+ return false;
+}
+
+void Driver::BuildUniversalActions(Compilation &C, const ToolChain &TC,
+ const InputList &BAInputs) const {
+ DerivedArgList &Args = C.getArgs();
+ ActionList &Actions = C.getActions();
+ llvm::PrettyStackTraceString CrashInfo("Building universal build actions");
+ // Collect the list of architectures. Duplicates are allowed, but should only
+ // be handled once (in the order seen).
+ llvm::StringSet<> ArchNames;
+ SmallVector<const char *, 4> Archs;
+ for (Arg *A : Args) {
+ if (A->getOption().matches(options::OPT_arch)) {
+ // Validate the option here; we don't save the type here because its
+ // particular spelling may participate in other driver choices.
+ llvm::Triple::ArchType Arch =
+ tools::darwin::getArchTypeForMachOArchName(A->getValue());
+ if (Arch == llvm::Triple::UnknownArch) {
+ Diag(clang::diag::err_drv_invalid_arch_name) << A->getAsString(Args);
+ continue;
+ }
+
+ A->claim();
+ if (ArchNames.insert(A->getValue()).second)
+ Archs.push_back(A->getValue());
+ }
+ }
+
+ // When there is no explicit arch for this platform, make sure we still bind
+ // the architecture (to the default) so that -Xarch_ is handled correctly.
+ if (!Archs.size())
+ Archs.push_back(Args.MakeArgString(TC.getDefaultUniversalArchName()));
+
+ ActionList SingleActions;
+ BuildActions(C, TC, Args, BAInputs, SingleActions);
+
+ // Add in arch bindings for every top level action, as well as lipo and
+ // dsymutil steps if needed.
+ for (Action* Act : SingleActions) {
+ // Make sure we can lipo this kind of output. If not (and it is an actual
+ // output) then we disallow, since we can't create an output file with the
+ // right name without overwriting it. We could remove this oddity by just
+ // changing the output names to include the arch, which would also fix
+ // -save-temps. Compatibility wins for now.
+
+ if (Archs.size() > 1 && !types::canLipoType(Act->getType()))
+ Diag(clang::diag::err_drv_invalid_output_with_multiple_archs)
+ << types::getTypeName(Act->getType());
+
+ ActionList Inputs;
+ for (unsigned i = 0, e = Archs.size(); i != e; ++i) {
+ Inputs.push_back(
+ new BindArchAction(std::unique_ptr<Action>(Act), Archs[i]));
+ if (i != 0)
+ Inputs.back()->setOwnsInputs(false);
+ }
+
+ // Lipo if necessary, we do it this way because we need to set the arch flag
+ // so that -Xarch_ gets overwritten.
+ if (Inputs.size() == 1 || Act->getType() == types::TY_Nothing)
+ Actions.append(Inputs.begin(), Inputs.end());
+ else
+ Actions.push_back(new LipoJobAction(Inputs, Act->getType()));
+
+ // Handle debug info queries.
+ Arg *A = Args.getLastArg(options::OPT_g_Group);
+ if (A && !A->getOption().matches(options::OPT_g0) &&
+ !A->getOption().matches(options::OPT_gstabs) &&
+ ContainsCompileOrAssembleAction(Actions.back())) {
+
+ // Add a 'dsymutil' step if necessary, when debug info is enabled and we
+ // have a compile input. We need to run 'dsymutil' ourselves in such cases
+ // because the debug info will refer to a temporary object file which
+ // will be removed at the end of the compilation process.
+ if (Act->getType() == types::TY_Image) {
+ ActionList Inputs;
+ Inputs.push_back(Actions.back());
+ Actions.pop_back();
+ Actions.push_back(new DsymutilJobAction(Inputs, types::TY_dSYM));
+ }
+
+ // Verify the debug info output.
+ if (Args.hasArg(options::OPT_verify_debug_info)) {
+ std::unique_ptr<Action> VerifyInput(Actions.back());
+ Actions.pop_back();
+ Actions.push_back(new VerifyDebugInfoJobAction(std::move(VerifyInput),
+ types::TY_Nothing));
+ }
+ }
+ }
+}
+
+/// \brief Check that the file referenced by Value exists. If it doesn't,
+/// issue a diagnostic and return false.
+static bool DiagnoseInputExistence(const Driver &D, const DerivedArgList &Args,
+ StringRef Value) {
+ if (!D.getCheckInputsExist())
+ return true;
+
+ // stdin always exists.
+ if (Value == "-")
+ return true;
+
+ SmallString<64> Path(Value);
+ if (Arg *WorkDir = Args.getLastArg(options::OPT_working_directory)) {
+ if (!llvm::sys::path::is_absolute(Path)) {
+ SmallString<64> Directory(WorkDir->getValue());
+ llvm::sys::path::append(Directory, Value);
+ Path.assign(Directory);
+ }
+ }
+
+ if (llvm::sys::fs::exists(Twine(Path)))
+ return true;
+
+ if (D.IsCLMode() && !llvm::sys::path::is_absolute(Twine(Path)) &&
+ llvm::sys::Process::FindInEnvPath("LIB", Value))
+ return true;
+
+ D.Diag(clang::diag::err_drv_no_such_file) << Path;
+ return false;
+}
+
+// Construct a the list of inputs and their types.
+void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
+ InputList &Inputs) const {
+ // Track the current user specified (-x) input. We also explicitly track the
+ // argument used to set the type; we only want to claim the type when we
+ // actually use it, so we warn about unused -x arguments.
+ types::ID InputType = types::TY_Nothing;
+ Arg *InputTypeArg = nullptr;
+
+ // The last /TC or /TP option sets the input type to C or C++ globally.
+ if (Arg *TCTP = Args.getLastArgNoClaim(options::OPT__SLASH_TC,
+ options::OPT__SLASH_TP)) {
+ InputTypeArg = TCTP;
+ InputType = TCTP->getOption().matches(options::OPT__SLASH_TC)
+ ? types::TY_C
+ : types::TY_CXX;
+
+ arg_iterator it =
+ Args.filtered_begin(options::OPT__SLASH_TC, options::OPT__SLASH_TP);
+ const arg_iterator ie = Args.filtered_end();
+ Arg *Previous = *it++;
+ bool ShowNote = false;
+ while (it != ie) {
+ Diag(clang::diag::warn_drv_overriding_flag_option)
+ << Previous->getSpelling() << (*it)->getSpelling();
+ Previous = *it++;
+ ShowNote = true;
+ }
+ if (ShowNote)
+ Diag(clang::diag::note_drv_t_option_is_global);
+
+ // No driver mode exposes -x and /TC or /TP; we don't support mixing them.
+ assert(!Args.hasArg(options::OPT_x) && "-x and /TC or /TP is not allowed");
+ }
+
+ for (Arg *A : Args) {
+ if (A->getOption().getKind() == Option::InputClass) {
+ const char *Value = A->getValue();
+ types::ID Ty = types::TY_INVALID;
+
+ // Infer the input type if necessary.
+ if (InputType == types::TY_Nothing) {
+ // If there was an explicit arg for this, claim it.
+ if (InputTypeArg)
+ InputTypeArg->claim();
+
+ // stdin must be handled specially.
+ if (memcmp(Value, "-", 2) == 0) {
+ // If running with -E, treat as a C input (this changes the builtin
+ // macros, for example). This may be overridden by -ObjC below.
+ //
+ // Otherwise emit an error but still use a valid type to avoid
+ // spurious errors (e.g., no inputs).
+ if (!Args.hasArgNoClaim(options::OPT_E) && !CCCIsCPP())
+ Diag(IsCLMode() ? clang::diag::err_drv_unknown_stdin_type_clang_cl
+ : clang::diag::err_drv_unknown_stdin_type);
+ Ty = types::TY_C;
+ } else {
+ // Otherwise lookup by extension.
+ // Fallback is C if invoked as C preprocessor or Object otherwise.
+ // We use a host hook here because Darwin at least has its own
+ // idea of what .s is.
+ if (const char *Ext = strrchr(Value, '.'))
+ Ty = TC.LookupTypeForExtension(Ext + 1);
+
+ if (Ty == types::TY_INVALID) {
+ if (CCCIsCPP())
+ Ty = types::TY_C;
+ else
+ Ty = types::TY_Object;
+ }
+
+ // If the driver is invoked as C++ compiler (like clang++ or c++) it
+ // should autodetect some input files as C++ for g++ compatibility.
+ if (CCCIsCXX()) {
+ types::ID OldTy = Ty;
+ Ty = types::lookupCXXTypeForCType(Ty);
+
+ if (Ty != OldTy)
+ Diag(clang::diag::warn_drv_treating_input_as_cxx)
+ << getTypeName(OldTy) << getTypeName(Ty);
+ }
+ }
+
+ // -ObjC and -ObjC++ override the default language, but only for "source
+ // files". We just treat everything that isn't a linker input as a
+ // source file.
+ //
+ // FIXME: Clean this up if we move the phase sequence into the type.
+ if (Ty != types::TY_Object) {
+ if (Args.hasArg(options::OPT_ObjC))
+ Ty = types::TY_ObjC;
+ else if (Args.hasArg(options::OPT_ObjCXX))
+ Ty = types::TY_ObjCXX;
+ }
+ } else {
+ assert(InputTypeArg && "InputType set w/o InputTypeArg");
+ if (!InputTypeArg->getOption().matches(options::OPT_x)) {
+ // If emulating cl.exe, make sure that /TC and /TP don't affect input
+ // object files.
+ const char *Ext = strrchr(Value, '.');
+ if (Ext && TC.LookupTypeForExtension(Ext + 1) == types::TY_Object)
+ Ty = types::TY_Object;
+ }
+ if (Ty == types::TY_INVALID) {
+ Ty = InputType;
+ InputTypeArg->claim();
+ }
+ }
+
+ if (DiagnoseInputExistence(*this, Args, Value))
+ Inputs.push_back(std::make_pair(Ty, A));
+
+ } else if (A->getOption().matches(options::OPT__SLASH_Tc)) {
+ StringRef Value = A->getValue();
+ if (DiagnoseInputExistence(*this, Args, Value)) {
+ Arg *InputArg = MakeInputArg(Args, Opts, A->getValue());
+ Inputs.push_back(std::make_pair(types::TY_C, InputArg));
+ }
+ A->claim();
+ } else if (A->getOption().matches(options::OPT__SLASH_Tp)) {
+ StringRef Value = A->getValue();
+ if (DiagnoseInputExistence(*this, Args, Value)) {
+ Arg *InputArg = MakeInputArg(Args, Opts, A->getValue());
+ Inputs.push_back(std::make_pair(types::TY_CXX, InputArg));
+ }
+ A->claim();
+ } else if (A->getOption().hasFlag(options::LinkerInput)) {
+ // Just treat as object type, we could make a special type for this if
+ // necessary.
+ Inputs.push_back(std::make_pair(types::TY_Object, A));
+
+ } else if (A->getOption().matches(options::OPT_x)) {
+ InputTypeArg = A;
+ InputType = types::lookupTypeForTypeSpecifier(A->getValue());
+ A->claim();
+
+ // Follow gcc behavior and treat as linker input for invalid -x
+ // options. Its not clear why we shouldn't just revert to unknown; but
+ // this isn't very important, we might as well be bug compatible.
+ if (!InputType) {
+ Diag(clang::diag::err_drv_unknown_language) << A->getValue();
+ InputType = types::TY_Object;
+ }
+ }
+ }
+ if (CCCIsCPP() && Inputs.empty()) {
+ // If called as standalone preprocessor, stdin is processed
+ // if no other input is present.
+ Arg *A = MakeInputArg(Args, Opts, "-");
+ Inputs.push_back(std::make_pair(types::TY_C, A));
+ }
+}
+
+// For each unique --cuda-gpu-arch= argument creates a TY_CUDA_DEVICE
+// input action and then wraps each in CudaDeviceAction paired with
+// appropriate GPU arch name. In case of partial (i.e preprocessing
+// only) or device-only compilation, each device action is added to /p
+// Actions and /p Current is released. Otherwise the function creates
+// and returns a new CudaHostAction which wraps /p Current and device
+// side actions.
+static std::unique_ptr<Action>
+buildCudaActions(Compilation &C, DerivedArgList &Args, const Arg *InputArg,
+ std::unique_ptr<Action> HostAction, ActionList &Actions) {
+ Arg *PartialCompilationArg = Args.getLastArg(options::OPT_cuda_host_only,
+ options::OPT_cuda_device_only);
+ // Host-only compilation case.
+ if (PartialCompilationArg &&
+ PartialCompilationArg->getOption().matches(options::OPT_cuda_host_only))
+ return std::unique_ptr<Action>(
+ new CudaHostAction(std::move(HostAction), {}));
+
+ // Collect all cuda_gpu_arch parameters, removing duplicates.
+ SmallVector<const char *, 4> GpuArchList;
+ llvm::StringSet<> GpuArchNames;
+ for (Arg *A : Args) {
+ if (A->getOption().matches(options::OPT_cuda_gpu_arch_EQ)) {
+ A->claim();
+ if (GpuArchNames.insert(A->getValue()).second)
+ GpuArchList.push_back(A->getValue());
+ }
+ }
+
+ // Default to sm_20 which is the lowest common denominator for supported GPUs.
+ // sm_20 code should work correctly, if suboptimally, on all newer GPUs.
+ if (GpuArchList.empty())
+ GpuArchList.push_back("sm_20");
+
+ // Replicate inputs for each GPU architecture.
+ Driver::InputList CudaDeviceInputs;
+ for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I)
+ CudaDeviceInputs.push_back(std::make_pair(types::TY_CUDA_DEVICE, InputArg));
+
+ // Build actions for all device inputs.
+ assert(C.getCudaDeviceToolChain() &&
+ "Missing toolchain for device-side compilation.");
+ ActionList CudaDeviceActions;
+ C.getDriver().BuildActions(C, *C.getCudaDeviceToolChain(), Args,
+ CudaDeviceInputs, CudaDeviceActions);
+ assert(GpuArchList.size() == CudaDeviceActions.size() &&
+ "Failed to create actions for all devices");
+
+ // Check whether any of device actions stopped before they could generate PTX.
+ bool PartialCompilation = false;
+ for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I) {
+ if (CudaDeviceActions[I]->getKind() != Action::BackendJobClass) {
+ PartialCompilation = true;
+ break;
+ }
+ }
+
+ // Figure out what to do with device actions -- pass them as inputs to the
+ // host action or run each of them independently.
+ bool DeviceOnlyCompilation = PartialCompilationArg != nullptr;
+ if (PartialCompilation || DeviceOnlyCompilation) {
+ // In case of partial or device-only compilation results of device actions
+ // are not consumed by the host action device actions have to be added to
+ // top-level actions list with AtTopLevel=true and run independently.
+
+ // -o is ambiguous if we have more than one top-level action.
+ if (Args.hasArg(options::OPT_o) &&
+ (!DeviceOnlyCompilation || GpuArchList.size() > 1)) {
+ C.getDriver().Diag(
+ clang::diag::err_drv_output_argument_with_multiple_files);
+ return nullptr;
+ }
+
+ for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I)
+ Actions.push_back(new CudaDeviceAction(
+ std::unique_ptr<Action>(CudaDeviceActions[I]), GpuArchList[I],
+ /* AtTopLevel */ true));
+ // Kill host action in case of device-only compilation.
+ if (DeviceOnlyCompilation)
+ HostAction.reset(nullptr);
+ return HostAction;
+ }
+
+ // Outputs of device actions during complete CUDA compilation get created
+ // with AtTopLevel=false and become inputs for the host action.
+ ActionList DeviceActions;
+ for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I)
+ DeviceActions.push_back(new CudaDeviceAction(
+ std::unique_ptr<Action>(CudaDeviceActions[I]), GpuArchList[I],
+ /* AtTopLevel */ false));
+ // Return a new host action that incorporates original host action and all
+ // device actions.
+ return std::unique_ptr<Action>(
+ new CudaHostAction(std::move(HostAction), DeviceActions));
+}
+
+void Driver::BuildActions(Compilation &C, const ToolChain &TC,
+ DerivedArgList &Args, const InputList &Inputs,
+ ActionList &Actions) const {
+ llvm::PrettyStackTraceString CrashInfo("Building compilation actions");
+
+ if (!SuppressMissingInputWarning && Inputs.empty()) {
+ Diag(clang::diag::err_drv_no_input_files);
+ return;
+ }
+
+ Arg *FinalPhaseArg;
+ phases::ID FinalPhase = getFinalPhase(Args, &FinalPhaseArg);
+
+ if (FinalPhase == phases::Link && Args.hasArg(options::OPT_emit_llvm)) {
+ Diag(clang::diag::err_drv_emit_llvm_link);
+ }
+
+ // Reject -Z* at the top level, these options should never have been exposed
+ // by gcc.
+ if (Arg *A = Args.getLastArg(options::OPT_Z_Joined))
+ Diag(clang::diag::err_drv_use_of_Z_option) << A->getAsString(Args);
+
+ // Diagnose misuse of /Fo.
+ if (Arg *A = Args.getLastArg(options::OPT__SLASH_Fo)) {
+ StringRef V = A->getValue();
+ if (Inputs.size() > 1 && !V.empty() &&
+ !llvm::sys::path::is_separator(V.back())) {
+ // Check whether /Fo tries to name an output file for multiple inputs.
+ Diag(clang::diag::err_drv_out_file_argument_with_multiple_sources)
+ << A->getSpelling() << V;
+ Args.eraseArg(options::OPT__SLASH_Fo);
+ }
+ }
+
+ // Diagnose misuse of /Fa.
+ if (Arg *A = Args.getLastArg(options::OPT__SLASH_Fa)) {
+ StringRef V = A->getValue();
+ if (Inputs.size() > 1 && !V.empty() &&
+ !llvm::sys::path::is_separator(V.back())) {
+ // Check whether /Fa tries to name an asm file for multiple inputs.
+ Diag(clang::diag::err_drv_out_file_argument_with_multiple_sources)
+ << A->getSpelling() << V;
+ Args.eraseArg(options::OPT__SLASH_Fa);
+ }
+ }
+
+ // Diagnose misuse of /o.
+ if (Arg *A = Args.getLastArg(options::OPT__SLASH_o)) {
+ if (A->getValue()[0] == '\0') {
+ // It has to have a value.
+ Diag(clang::diag::err_drv_missing_argument) << A->getSpelling() << 1;
+ Args.eraseArg(options::OPT__SLASH_o);
+ }
+ }
+
+ // Construct the actions to perform.
+ ActionList LinkerInputs;
+
+ llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases> PL;
+ for (auto &I : Inputs) {
+ types::ID InputType = I.first;
+ const Arg *InputArg = I.second;
+
+ PL.clear();
+ types::getCompilationPhases(InputType, PL);
+
+ // If the first step comes after the final phase we are doing as part of
+ // this compilation, warn the user about it.
+ phases::ID InitialPhase = PL[0];
+ if (InitialPhase > FinalPhase) {
+ // Claim here to avoid the more general unused warning.
+ InputArg->claim();
+
+ // Suppress all unused style warnings with -Qunused-arguments
+ if (Args.hasArg(options::OPT_Qunused_arguments))
+ continue;
+
+ // Special case when final phase determined by binary name, rather than
+ // by a command-line argument with a corresponding Arg.
+ if (CCCIsCPP())
+ Diag(clang::diag::warn_drv_input_file_unused_by_cpp)
+ << InputArg->getAsString(Args) << getPhaseName(InitialPhase);
+ // Special case '-E' warning on a previously preprocessed file to make
+ // more sense.
+ else if (InitialPhase == phases::Compile &&
+ FinalPhase == phases::Preprocess &&
+ getPreprocessedType(InputType) == types::TY_INVALID)
+ Diag(clang::diag::warn_drv_preprocessed_input_file_unused)
+ << InputArg->getAsString(Args) << !!FinalPhaseArg
+ << (FinalPhaseArg ? FinalPhaseArg->getOption().getName() : "");
+ else
+ Diag(clang::diag::warn_drv_input_file_unused)
+ << InputArg->getAsString(Args) << getPhaseName(InitialPhase)
+ << !!FinalPhaseArg
+ << (FinalPhaseArg ? FinalPhaseArg->getOption().getName() : "");
+ continue;
+ }
+
+ phases::ID CudaInjectionPhase = FinalPhase;
+ for (const auto &Phase : PL)
+ if (Phase <= FinalPhase && Phase == phases::Compile) {
+ CudaInjectionPhase = Phase;
+ break;
+ }
+
+ // Build the pipeline for this file.
+ std::unique_ptr<Action> Current(new InputAction(*InputArg, InputType));
+ for (SmallVectorImpl<phases::ID>::iterator i = PL.begin(), e = PL.end();
+ i != e; ++i) {
+ phases::ID Phase = *i;
+
+ // We are done if this step is past what the user requested.
+ if (Phase > FinalPhase)
+ break;
+
+ // Queue linker inputs.
+ if (Phase == phases::Link) {
+ assert((i + 1) == e && "linking must be final compilation step.");
+ LinkerInputs.push_back(Current.release());
+ break;
+ }
+
+ // Some types skip the assembler phase (e.g., llvm-bc), but we can't
+ // encode this in the steps because the intermediate type depends on
+ // arguments. Just special case here.
+ if (Phase == phases::Assemble && Current->getType() != types::TY_PP_Asm)
+ continue;
+
+ // Otherwise construct the appropriate action.
+ Current = ConstructPhaseAction(TC, Args, Phase, std::move(Current));
+
+ if (InputType == types::TY_CUDA && Phase == CudaInjectionPhase) {
+ Current =
+ buildCudaActions(C, Args, InputArg, std::move(Current), Actions);
+ if (!Current)
+ break;
+ }
+
+ if (Current->getType() == types::TY_Nothing)
+ break;
+ }
+
+ // If we ended with something, add to the output list.
+ if (Current)
+ Actions.push_back(Current.release());
+ }
+
+ // Add a link action if necessary.
+ if (!LinkerInputs.empty())
+ Actions.push_back(new LinkJobAction(LinkerInputs, types::TY_Image));
+
+ // If we are linking, claim any options which are obviously only used for
+ // compilation.
+ if (FinalPhase == phases::Link && PL.size() == 1) {
+ Args.ClaimAllArgs(options::OPT_CompileOnly_Group);
+ Args.ClaimAllArgs(options::OPT_cl_compile_Group);
+ }
+
+ // Claim ignored clang-cl options.
+ Args.ClaimAllArgs(options::OPT_cl_ignored_Group);
+
+ // Claim --cuda-host-only arg which may be passed to non-CUDA
+ // compilations and should not trigger warnings there.
+ Args.ClaimAllArgs(options::OPT_cuda_host_only);
+}
+
+std::unique_ptr<Action>
+Driver::ConstructPhaseAction(const ToolChain &TC, const ArgList &Args,
+ phases::ID Phase,
+ std::unique_ptr<Action> Input) const {
+ llvm::PrettyStackTraceString CrashInfo("Constructing phase actions");
+ // Build the appropriate action.
+ switch (Phase) {
+ case phases::Link:
+ llvm_unreachable("link action invalid here.");
+ case phases::Preprocess: {
+ types::ID OutputTy;
+ // -{M, MM} alter the output type.
+ if (Args.hasArg(options::OPT_M, options::OPT_MM)) {
+ OutputTy = types::TY_Dependencies;
+ } else {
+ OutputTy = Input->getType();
+ if (!Args.hasFlag(options::OPT_frewrite_includes,
+ options::OPT_fno_rewrite_includes, false) &&
+ !CCGenDiagnostics)
+ OutputTy = types::getPreprocessedType(OutputTy);
+ assert(OutputTy != types::TY_INVALID &&
+ "Cannot preprocess this input type!");
+ }
+ return llvm::make_unique<PreprocessJobAction>(std::move(Input), OutputTy);
+ }
+ case phases::Precompile: {
+ types::ID OutputTy = types::TY_PCH;
+ if (Args.hasArg(options::OPT_fsyntax_only)) {
+ // Syntax checks should not emit a PCH file
+ OutputTy = types::TY_Nothing;
+ }
+ return llvm::make_unique<PrecompileJobAction>(std::move(Input), OutputTy);
+ }
+ case phases::Compile: {
+ if (Args.hasArg(options::OPT_fsyntax_only))
+ return llvm::make_unique<CompileJobAction>(std::move(Input),
+ types::TY_Nothing);
+ if (Args.hasArg(options::OPT_rewrite_objc))
+ return llvm::make_unique<CompileJobAction>(std::move(Input),
+ types::TY_RewrittenObjC);
+ if (Args.hasArg(options::OPT_rewrite_legacy_objc))
+ return llvm::make_unique<CompileJobAction>(std::move(Input),
+ types::TY_RewrittenLegacyObjC);
+ if (Args.hasArg(options::OPT__analyze, options::OPT__analyze_auto))
+ return llvm::make_unique<AnalyzeJobAction>(std::move(Input),
+ types::TY_Plist);
+ if (Args.hasArg(options::OPT__migrate))
+ return llvm::make_unique<MigrateJobAction>(std::move(Input),
+ types::TY_Remap);
+ if (Args.hasArg(options::OPT_emit_ast))
+ return llvm::make_unique<CompileJobAction>(std::move(Input),
+ types::TY_AST);
+ if (Args.hasArg(options::OPT_module_file_info))
+ return llvm::make_unique<CompileJobAction>(std::move(Input),
+ types::TY_ModuleFile);
+ if (Args.hasArg(options::OPT_verify_pch))
+ return llvm::make_unique<VerifyPCHJobAction>(std::move(Input),
+ types::TY_Nothing);
+ return llvm::make_unique<CompileJobAction>(std::move(Input),
+ types::TY_LLVM_BC);
+ }
+ case phases::Backend: {
+ if (isUsingLTO()) {
+ types::ID Output =
+ Args.hasArg(options::OPT_S) ? types::TY_LTO_IR : types::TY_LTO_BC;
+ return llvm::make_unique<BackendJobAction>(std::move(Input), Output);
+ }
+ if (Args.hasArg(options::OPT_emit_llvm)) {
+ types::ID Output =
+ Args.hasArg(options::OPT_S) ? types::TY_LLVM_IR : types::TY_LLVM_BC;
+ return llvm::make_unique<BackendJobAction>(std::move(Input), Output);
+ }
+ return llvm::make_unique<BackendJobAction>(std::move(Input),
+ types::TY_PP_Asm);
+ }
+ case phases::Assemble:
+ return llvm::make_unique<AssembleJobAction>(std::move(Input),
+ types::TY_Object);
+ }
+
+ llvm_unreachable("invalid phase in ConstructPhaseAction");
+}
+
+void Driver::BuildJobs(Compilation &C) const {
+ llvm::PrettyStackTraceString CrashInfo("Building compilation jobs");
+
+ Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o);
+
+ // It is an error to provide a -o option if we are making multiple output
+ // files.
+ if (FinalOutput) {
+ unsigned NumOutputs = 0;
+ for (const Action *A : C.getActions())
+ if (A->getType() != types::TY_Nothing)
+ ++NumOutputs;
+
+ if (NumOutputs > 1) {
+ Diag(clang::diag::err_drv_output_argument_with_multiple_files);
+ FinalOutput = nullptr;
+ }
+ }
+
+ // Collect the list of architectures.
+ llvm::StringSet<> ArchNames;
+ if (C.getDefaultToolChain().getTriple().isOSBinFormatMachO())
+ for (const Arg *A : C.getArgs())
+ if (A->getOption().matches(options::OPT_arch))
+ ArchNames.insert(A->getValue());
+
+ for (Action *A : C.getActions()) {
+ // If we are linking an image for multiple archs then the linker wants
+ // -arch_multiple and -final_output <final image name>. Unfortunately, this
+ // doesn't fit in cleanly because we have to pass this information down.
+ //
+ // FIXME: This is a hack; find a cleaner way to integrate this into the
+ // process.
+ const char *LinkingOutput = nullptr;
+ if (isa<LipoJobAction>(A)) {
+ if (FinalOutput)
+ LinkingOutput = FinalOutput->getValue();
+ else
+ LinkingOutput = getDefaultImageName();
+ }
+
+ InputInfo II;
+ BuildJobsForAction(C, A, &C.getDefaultToolChain(),
+ /*BoundArch*/ nullptr,
+ /*AtTopLevel*/ true,
+ /*MultipleArchs*/ ArchNames.size() > 1,
+ /*LinkingOutput*/ LinkingOutput, II);
+ }
+
+ // If the user passed -Qunused-arguments or there were errors, don't warn
+ // about any unused arguments.
+ if (Diags.hasErrorOccurred() ||
+ C.getArgs().hasArg(options::OPT_Qunused_arguments))
+ return;
+
+ // Claim -### here.
+ (void)C.getArgs().hasArg(options::OPT__HASH_HASH_HASH);
+
+ // Claim --driver-mode, it was handled earlier.
+ (void)C.getArgs().hasArg(options::OPT_driver_mode);
+
+ for (Arg *A : C.getArgs()) {
+ // FIXME: It would be nice to be able to send the argument to the
+ // DiagnosticsEngine, so that extra values, position, and so on could be
+ // printed.
+ if (!A->isClaimed()) {
+ if (A->getOption().hasFlag(options::NoArgumentUnused))
+ continue;
+
+ // Suppress the warning automatically if this is just a flag, and it is an
+ // instance of an argument we already claimed.
+ const Option &Opt = A->getOption();
+ if (Opt.getKind() == Option::FlagClass) {
+ bool DuplicateClaimed = false;
+
+ for (const Arg *AA : C.getArgs().filtered(&Opt)) {
+ if (AA->isClaimed()) {
+ DuplicateClaimed = true;
+ break;
+ }
+ }
+
+ if (DuplicateClaimed)
+ continue;
+ }
+
+ Diag(clang::diag::warn_drv_unused_argument)
+ << A->getAsString(C.getArgs());
+ }
+ }
+}
+
+// Returns a Tool for a given JobAction. In case the action and its
+// predecessors can be combined, updates Inputs with the inputs of the
+// first combined action. If one of the collapsed actions is a
+// CudaHostAction, updates CollapsedCHA with the pointer to it so the
+// caller can deal with extra handling such action requires.
+static const Tool *selectToolForJob(Compilation &C, bool SaveTemps,
+ const ToolChain *TC, const JobAction *JA,
+ const ActionList *&Inputs,
+ const CudaHostAction *&CollapsedCHA) {
+ const Tool *ToolForJob = nullptr;
+ CollapsedCHA = nullptr;
+
+ // See if we should look for a compiler with an integrated assembler. We match
+ // bottom up, so what we are actually looking for is an assembler job with a
+ // compiler input.
+
+ if (TC->useIntegratedAs() && !SaveTemps &&
+ !C.getArgs().hasArg(options::OPT_via_file_asm) &&
+ !C.getArgs().hasArg(options::OPT__SLASH_FA) &&
+ !C.getArgs().hasArg(options::OPT__SLASH_Fa) &&
+ isa<AssembleJobAction>(JA) && Inputs->size() == 1 &&
+ isa<BackendJobAction>(*Inputs->begin())) {
+ // A BackendJob is always preceded by a CompileJob, and without
+ // -save-temps they will always get combined together, so instead of
+ // checking the backend tool, check if the tool for the CompileJob
+ // has an integrated assembler.
+ const ActionList *BackendInputs = &(*Inputs)[0]->getInputs();
+ // Compile job may be wrapped in CudaHostAction, extract it if
+ // that's the case and update CollapsedCHA if we combine phases.
+ CudaHostAction *CHA = dyn_cast<CudaHostAction>(*BackendInputs->begin());
+ JobAction *CompileJA =
+ cast<CompileJobAction>(CHA ? *CHA->begin() : *BackendInputs->begin());
+ assert(CompileJA && "Backend job is not preceeded by compile job.");
+ const Tool *Compiler = TC->SelectTool(*CompileJA);
+ if (!Compiler)
+ return nullptr;
+ if (Compiler->hasIntegratedAssembler()) {
+ Inputs = &CompileJA->getInputs();
+ ToolForJob = Compiler;
+ CollapsedCHA = CHA;
+ }
+ }
+
+ // A backend job should always be combined with the preceding compile job
+ // unless OPT_save_temps is enabled and the compiler is capable of emitting
+ // LLVM IR as an intermediate output.
+ if (isa<BackendJobAction>(JA)) {
+ // Check if the compiler supports emitting LLVM IR.
+ assert(Inputs->size() == 1);
+ // Compile job may be wrapped in CudaHostAction, extract it if
+ // that's the case and update CollapsedCHA if we combine phases.
+ CudaHostAction *CHA = dyn_cast<CudaHostAction>(*Inputs->begin());
+ JobAction *CompileJA =
+ cast<CompileJobAction>(CHA ? *CHA->begin() : *Inputs->begin());
+ assert(CompileJA && "Backend job is not preceeded by compile job.");
+ const Tool *Compiler = TC->SelectTool(*CompileJA);
+ if (!Compiler)
+ return nullptr;
+ if (!Compiler->canEmitIR() || !SaveTemps) {
+ Inputs = &CompileJA->getInputs();
+ ToolForJob = Compiler;
+ CollapsedCHA = CHA;
+ }
+ }
+
+ // Otherwise use the tool for the current job.
+ if (!ToolForJob)
+ ToolForJob = TC->SelectTool(*JA);
+
+ // See if we should use an integrated preprocessor. We do so when we have
+ // exactly one input, since this is the only use case we care about
+ // (irrelevant since we don't support combine yet).
+ if (Inputs->size() == 1 && isa<PreprocessJobAction>(*Inputs->begin()) &&
+ !C.getArgs().hasArg(options::OPT_no_integrated_cpp) &&
+ !C.getArgs().hasArg(options::OPT_traditional_cpp) && !SaveTemps &&
+ !C.getArgs().hasArg(options::OPT_rewrite_objc) &&
+ ToolForJob->hasIntegratedCPP())
+ Inputs = &(*Inputs)[0]->getInputs();
+
+ return ToolForJob;
+}
+
+void Driver::BuildJobsForAction(Compilation &C, const Action *A,
+ const ToolChain *TC, const char *BoundArch,
+ bool AtTopLevel, bool MultipleArchs,
+ const char *LinkingOutput,
+ InputInfo &Result) const {
+ llvm::PrettyStackTraceString CrashInfo("Building compilation jobs");
+
+ InputInfoList CudaDeviceInputInfos;
+ if (const CudaHostAction *CHA = dyn_cast<CudaHostAction>(A)) {
+ InputInfo II;
+ // Append outputs of device jobs to the input list.
+ for (const Action *DA : CHA->getDeviceActions()) {
+ BuildJobsForAction(C, DA, TC, nullptr, AtTopLevel,
+ /*MultipleArchs*/ false, LinkingOutput, II);
+ CudaDeviceInputInfos.push_back(II);
+ }
+ // Override current action with a real host compile action and continue
+ // processing it.
+ A = *CHA->begin();
+ }
+
+ if (const InputAction *IA = dyn_cast<InputAction>(A)) {
+ // FIXME: It would be nice to not claim this here; maybe the old scheme of
+ // just using Args was better?
+ const Arg &Input = IA->getInputArg();
+ Input.claim();
+ if (Input.getOption().matches(options::OPT_INPUT)) {
+ const char *Name = Input.getValue();
+ Result = InputInfo(Name, A->getType(), Name);
+ } else {
+ Result = InputInfo(&Input, A->getType(), "");
+ }
+ return;
+ }
+
+ if (const BindArchAction *BAA = dyn_cast<BindArchAction>(A)) {
+ const ToolChain *TC;
+ const char *ArchName = BAA->getArchName();
+
+ if (ArchName)
+ TC = &getToolChain(
+ C.getArgs(),
+ computeTargetTriple(DefaultTargetTriple, C.getArgs(), ArchName));
+ else
+ TC = &C.getDefaultToolChain();
+
+ BuildJobsForAction(C, *BAA->begin(), TC, ArchName, AtTopLevel,
+ MultipleArchs, LinkingOutput, Result);
+ return;
+ }
+
+ if (const CudaDeviceAction *CDA = dyn_cast<CudaDeviceAction>(A)) {
+ // Initial processing of CudaDeviceAction carries host params.
+ // Call BuildJobsForAction() again, now with correct device parameters.
+ assert(CDA->getGpuArchName() && "No GPU name in device action.");
+ BuildJobsForAction(C, *CDA->begin(), C.getCudaDeviceToolChain(),
+ CDA->getGpuArchName(), CDA->isAtTopLevel(),
+ /*MultipleArchs*/ true, LinkingOutput, Result);
+ return;
+ }
+
+ const ActionList *Inputs = &A->getInputs();
+
+ const JobAction *JA = cast<JobAction>(A);
+ const CudaHostAction *CollapsedCHA = nullptr;
+ const Tool *T =
+ selectToolForJob(C, isSaveTempsEnabled(), TC, JA, Inputs, CollapsedCHA);
+ if (!T)
+ return;
+
+ // If we've collapsed action list that contained CudaHostAction we
+ // need to build jobs for device-side inputs it may have held.
+ if (CollapsedCHA) {
+ InputInfo II;
+ for (const Action *DA : CollapsedCHA->getDeviceActions()) {
+ BuildJobsForAction(C, DA, TC, "", AtTopLevel,
+ /*MultipleArchs*/ false, LinkingOutput, II);
+ CudaDeviceInputInfos.push_back(II);
+ }
+ }
+
+ // Only use pipes when there is exactly one input.
+ InputInfoList InputInfos;
+ for (const Action *Input : *Inputs) {
+ // Treat dsymutil and verify sub-jobs as being at the top-level too, they
+ // shouldn't get temporary output names.
+ // FIXME: Clean this up.
+ bool SubJobAtTopLevel = false;
+ if (AtTopLevel && (isa<DsymutilJobAction>(A) || isa<VerifyJobAction>(A)))
+ SubJobAtTopLevel = true;
+
+ InputInfo II;
+ BuildJobsForAction(C, Input, TC, BoundArch, SubJobAtTopLevel, MultipleArchs,
+ LinkingOutput, II);
+ InputInfos.push_back(II);
+ }
+
+ // Always use the first input as the base input.
+ const char *BaseInput = InputInfos[0].getBaseInput();
+
+ // ... except dsymutil actions, which use their actual input as the base
+ // input.
+ if (JA->getType() == types::TY_dSYM)
+ BaseInput = InputInfos[0].getFilename();
+
+ // Append outputs of cuda device jobs to the input list
+ if (CudaDeviceInputInfos.size())
+ InputInfos.append(CudaDeviceInputInfos.begin(), CudaDeviceInputInfos.end());
+
+ // Determine the place to write output to, if any.
+ if (JA->getType() == types::TY_Nothing)
+ Result = InputInfo(A->getType(), BaseInput);
+ else
+ Result = InputInfo(GetNamedOutputPath(C, *JA, BaseInput, BoundArch,
+ AtTopLevel, MultipleArchs),
+ A->getType(), BaseInput);
+
+ if (CCCPrintBindings && !CCGenDiagnostics) {
+ llvm::errs() << "# \"" << T->getToolChain().getTripleString() << '"'
+ << " - \"" << T->getName() << "\", inputs: [";
+ for (unsigned i = 0, e = InputInfos.size(); i != e; ++i) {
+ llvm::errs() << InputInfos[i].getAsString();
+ if (i + 1 != e)
+ llvm::errs() << ", ";
+ }
+ llvm::errs() << "], output: " << Result.getAsString() << "\n";
+ } else {
+ T->ConstructJob(C, *JA, Result, InputInfos,
+ C.getArgsForToolChain(TC, BoundArch), LinkingOutput);
+ }
+}
+
+const char *Driver::getDefaultImageName() const {
+ llvm::Triple Target(llvm::Triple::normalize(DefaultTargetTriple));
+ return Target.isOSWindows() ? "a.exe" : "a.out";
+}
+
+/// \brief Create output filename based on ArgValue, which could either be a
+/// full filename, filename without extension, or a directory. If ArgValue
+/// does not provide a filename, then use BaseName, and use the extension
+/// suitable for FileType.
+static const char *MakeCLOutputFilename(const ArgList &Args, StringRef ArgValue,
+ StringRef BaseName,
+ types::ID FileType) {
+ SmallString<128> Filename = ArgValue;
+
+ if (ArgValue.empty()) {
+ // If the argument is empty, output to BaseName in the current dir.
+ Filename = BaseName;
+ } else if (llvm::sys::path::is_separator(Filename.back())) {
+ // If the argument is a directory, output to BaseName in that dir.
+ llvm::sys::path::append(Filename, BaseName);
+ }
+
+ if (!llvm::sys::path::has_extension(ArgValue)) {
+ // If the argument didn't provide an extension, then set it.
+ const char *Extension = types::getTypeTempSuffix(FileType, true);
+
+ if (FileType == types::TY_Image &&
+ Args.hasArg(options::OPT__SLASH_LD, options::OPT__SLASH_LDd)) {
+ // The output file is a dll.
+ Extension = "dll";
+ }
+
+ llvm::sys::path::replace_extension(Filename, Extension);
+ }
+
+ return Args.MakeArgString(Filename.c_str());
+}
+
+const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
+ const char *BaseInput,
+ const char *BoundArch, bool AtTopLevel,
+ bool MultipleArchs) const {
+ llvm::PrettyStackTraceString CrashInfo("Computing output path");
+ // Output to a user requested destination?
+ if (AtTopLevel && !isa<DsymutilJobAction>(JA) && !isa<VerifyJobAction>(JA)) {
+ if (Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o))
+ return C.addResultFile(FinalOutput->getValue(), &JA);
+ }
+
+ // For /P, preprocess to file named after BaseInput.
+ if (C.getArgs().hasArg(options::OPT__SLASH_P)) {
+ assert(AtTopLevel && isa<PreprocessJobAction>(JA));
+ StringRef BaseName = llvm::sys::path::filename(BaseInput);
+ StringRef NameArg;
+ if (Arg *A = C.getArgs().getLastArg(options::OPT__SLASH_Fi))
+ NameArg = A->getValue();
+ return C.addResultFile(
+ MakeCLOutputFilename(C.getArgs(), NameArg, BaseName, types::TY_PP_C),
+ &JA);
+ }
+
+ // Default to writing to stdout?
+ if (AtTopLevel && !CCGenDiagnostics &&
+ (isa<PreprocessJobAction>(JA) || JA.getType() == types::TY_ModuleFile))
+ return "-";
+
+ // Is this the assembly listing for /FA?
+ if (JA.getType() == types::TY_PP_Asm &&
+ (C.getArgs().hasArg(options::OPT__SLASH_FA) ||
+ C.getArgs().hasArg(options::OPT__SLASH_Fa))) {
+ // Use /Fa and the input filename to determine the asm file name.
+ StringRef BaseName = llvm::sys::path::filename(BaseInput);
+ StringRef FaValue = C.getArgs().getLastArgValue(options::OPT__SLASH_Fa);
+ return C.addResultFile(
+ MakeCLOutputFilename(C.getArgs(), FaValue, BaseName, JA.getType()),
+ &JA);
+ }
+
+ // Output to a temporary file?
+ if ((!AtTopLevel && !isSaveTempsEnabled() &&
+ !C.getArgs().hasArg(options::OPT__SLASH_Fo)) ||
+ CCGenDiagnostics) {
+ StringRef Name = llvm::sys::path::filename(BaseInput);
+ std::pair<StringRef, StringRef> Split = Name.split('.');
+ std::string TmpName = GetTemporaryPath(
+ Split.first, types::getTypeTempSuffix(JA.getType(), IsCLMode()));
+ return C.addTempFile(C.getArgs().MakeArgString(TmpName.c_str()));
+ }
+
+ SmallString<128> BasePath(BaseInput);
+ StringRef BaseName;
+
+ // Dsymutil actions should use the full path.
+ if (isa<DsymutilJobAction>(JA) || isa<VerifyJobAction>(JA))
+ BaseName = BasePath;
+ else
+ BaseName = llvm::sys::path::filename(BasePath);
+
+ // Determine what the derived output name should be.
+ const char *NamedOutput;
+
+ if (JA.getType() == types::TY_Object &&
+ C.getArgs().hasArg(options::OPT__SLASH_Fo, options::OPT__SLASH_o)) {
+ // The /Fo or /o flag decides the object filename.
+ StringRef Val =
+ C.getArgs()
+ .getLastArg(options::OPT__SLASH_Fo, options::OPT__SLASH_o)
+ ->getValue();
+ NamedOutput =
+ MakeCLOutputFilename(C.getArgs(), Val, BaseName, types::TY_Object);
+ } else if (JA.getType() == types::TY_Image &&
+ C.getArgs().hasArg(options::OPT__SLASH_Fe,
+ options::OPT__SLASH_o)) {
+ // The /Fe or /o flag names the linked file.
+ StringRef Val =
+ C.getArgs()
+ .getLastArg(options::OPT__SLASH_Fe, options::OPT__SLASH_o)
+ ->getValue();
+ NamedOutput =
+ MakeCLOutputFilename(C.getArgs(), Val, BaseName, types::TY_Image);
+ } else if (JA.getType() == types::TY_Image) {
+ if (IsCLMode()) {
+ // clang-cl uses BaseName for the executable name.
+ NamedOutput =
+ MakeCLOutputFilename(C.getArgs(), "", BaseName, types::TY_Image);
+ } else if (MultipleArchs && BoundArch) {
+ SmallString<128> Output(getDefaultImageName());
+ Output += "-";
+ Output.append(BoundArch);
+ NamedOutput = C.getArgs().MakeArgString(Output.c_str());
+ } else
+ NamedOutput = getDefaultImageName();
+ } else {
+ const char *Suffix = types::getTypeTempSuffix(JA.getType(), IsCLMode());
+ assert(Suffix && "All types used for output should have a suffix.");
+
+ std::string::size_type End = std::string::npos;
+ if (!types::appendSuffixForType(JA.getType()))
+ End = BaseName.rfind('.');
+ SmallString<128> Suffixed(BaseName.substr(0, End));
+ if (MultipleArchs && BoundArch) {
+ Suffixed += "-";
+ Suffixed.append(BoundArch);
+ }
+ // When using both -save-temps and -emit-llvm, use a ".tmp.bc" suffix for
+ // the unoptimized bitcode so that it does not get overwritten by the ".bc"
+ // optimized bitcode output.
+ if (!AtTopLevel && C.getArgs().hasArg(options::OPT_emit_llvm) &&
+ JA.getType() == types::TY_LLVM_BC)
+ Suffixed += ".tmp";
+ Suffixed += '.';
+ Suffixed += Suffix;
+ NamedOutput = C.getArgs().MakeArgString(Suffixed.c_str());
+ }
+
+ // Prepend object file path if -save-temps=obj
+ if (!AtTopLevel && isSaveTempsObj() && C.getArgs().hasArg(options::OPT_o) &&
+ JA.getType() != types::TY_PCH) {
+ Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o);
+ SmallString<128> TempPath(FinalOutput->getValue());
+ llvm::sys::path::remove_filename(TempPath);
+ StringRef OutputFileName = llvm::sys::path::filename(NamedOutput);
+ llvm::sys::path::append(TempPath, OutputFileName);
+ NamedOutput = C.getArgs().MakeArgString(TempPath.c_str());
+ }
+
+ // If we're saving temps and the temp file conflicts with the input file,
+ // then avoid overwriting input file.
+ if (!AtTopLevel && isSaveTempsEnabled() && NamedOutput == BaseName) {
+ bool SameFile = false;
+ SmallString<256> Result;
+ llvm::sys::fs::current_path(Result);
+ llvm::sys::path::append(Result, BaseName);
+ llvm::sys::fs::equivalent(BaseInput, Result.c_str(), SameFile);
+ // Must share the same path to conflict.
+ if (SameFile) {
+ StringRef Name = llvm::sys::path::filename(BaseInput);
+ std::pair<StringRef, StringRef> Split = Name.split('.');
+ std::string TmpName = GetTemporaryPath(
+ Split.first, types::getTypeTempSuffix(JA.getType(), IsCLMode()));
+ return C.addTempFile(C.getArgs().MakeArgString(TmpName.c_str()));
+ }
+ }
+
+ // As an annoying special case, PCH generation doesn't strip the pathname.
+ if (JA.getType() == types::TY_PCH) {
+ llvm::sys::path::remove_filename(BasePath);
+ if (BasePath.empty())
+ BasePath = NamedOutput;
+ else
+ llvm::sys::path::append(BasePath, NamedOutput);
+ return C.addResultFile(C.getArgs().MakeArgString(BasePath.c_str()), &JA);
+ } else {
+ return C.addResultFile(NamedOutput, &JA);
+ }
+}
+
+std::string Driver::GetFilePath(const char *Name, const ToolChain &TC) const {
+ // Respect a limited subset of the '-Bprefix' functionality in GCC by
+ // attempting to use this prefix when looking for file paths.
+ for (const std::string &Dir : PrefixDirs) {
+ if (Dir.empty())
+ continue;
+ SmallString<128> P(Dir[0] == '=' ? SysRoot + Dir.substr(1) : Dir);
+ llvm::sys::path::append(P, Name);
+ if (llvm::sys::fs::exists(Twine(P)))
+ return P.str();
+ }
+
+ SmallString<128> P(ResourceDir);
+ llvm::sys::path::append(P, Name);
+ if (llvm::sys::fs::exists(Twine(P)))
+ return P.str();
+
+ for (const std::string &Dir : TC.getFilePaths()) {
+ if (Dir.empty())
+ continue;
+ SmallString<128> P(Dir[0] == '=' ? SysRoot + Dir.substr(1) : Dir);
+ llvm::sys::path::append(P, Name);
+ if (llvm::sys::fs::exists(Twine(P)))
+ return P.str();
+ }
+
+ return Name;
+}
+
+void Driver::generatePrefixedToolNames(
+ const char *Tool, const ToolChain &TC,
+ SmallVectorImpl<std::string> &Names) const {
+ // FIXME: Needs a better variable than DefaultTargetTriple
+ Names.emplace_back(DefaultTargetTriple + "-" + Tool);
+ Names.emplace_back(Tool);
+
+ // Allow the discovery of tools prefixed with LLVM's default target triple.
+ std::string LLVMDefaultTargetTriple = llvm::sys::getDefaultTargetTriple();
+ if (LLVMDefaultTargetTriple != DefaultTargetTriple)
+ Names.emplace_back(LLVMDefaultTargetTriple + "-" + Tool);
+}
+
+static bool ScanDirForExecutable(SmallString<128> &Dir,
+ ArrayRef<std::string> Names) {
+ for (const auto &Name : Names) {
+ llvm::sys::path::append(Dir, Name);
+ if (llvm::sys::fs::can_execute(Twine(Dir)))
+ return true;
+ llvm::sys::path::remove_filename(Dir);
+ }
+ return false;
+}
+
+std::string Driver::GetProgramPath(const char *Name,
+ const ToolChain &TC) const {
+ SmallVector<std::string, 2> TargetSpecificExecutables;
+ generatePrefixedToolNames(Name, TC, TargetSpecificExecutables);
+
+ // Respect a limited subset of the '-Bprefix' functionality in GCC by
+ // attempting to use this prefix when looking for program paths.
+ for (const auto &PrefixDir : PrefixDirs) {
+ if (llvm::sys::fs::is_directory(PrefixDir)) {
+ SmallString<128> P(PrefixDir);
+ if (ScanDirForExecutable(P, TargetSpecificExecutables))
+ return P.str();
+ } else {
+ SmallString<128> P(PrefixDir + Name);
+ if (llvm::sys::fs::can_execute(Twine(P)))
+ return P.str();
+ }
+ }
+
+ const ToolChain::path_list &List = TC.getProgramPaths();
+ for (const auto &Path : List) {
+ SmallString<128> P(Path);
+ if (ScanDirForExecutable(P, TargetSpecificExecutables))
+ return P.str();
+ }
+
+ // If all else failed, search the path.
+ for (const auto &TargetSpecificExecutable : TargetSpecificExecutables)
+ if (llvm::ErrorOr<std::string> P =
+ llvm::sys::findProgramByName(TargetSpecificExecutable))
+ return *P;
+
+ return Name;
+}
+
+std::string Driver::GetTemporaryPath(StringRef Prefix,
+ const char *Suffix) const {
+ SmallString<128> Path;
+ std::error_code EC = llvm::sys::fs::createTemporaryFile(Prefix, Suffix, Path);
+ if (EC) {
+ Diag(clang::diag::err_unable_to_make_temp) << EC.message();
+ return "";
+ }
+
+ return Path.str();
+}
+
+const ToolChain &Driver::getToolChain(const ArgList &Args,
+ const llvm::Triple &Target) const {
+
+ ToolChain *&TC = ToolChains[Target.str()];
+ if (!TC) {
+ switch (Target.getOS()) {
+ case llvm::Triple::CloudABI:
+ TC = new toolchains::CloudABI(*this, Target, Args);
+ break;
+ case llvm::Triple::Darwin:
+ case llvm::Triple::MacOSX:
+ case llvm::Triple::IOS:
+ case llvm::Triple::TvOS:
+ case llvm::Triple::WatchOS:
+ TC = new toolchains::DarwinClang(*this, Target, Args);
+ break;
+ case llvm::Triple::DragonFly:
+ TC = new toolchains::DragonFly(*this, Target, Args);
+ break;
+ case llvm::Triple::OpenBSD:
+ TC = new toolchains::OpenBSD(*this, Target, Args);
+ break;
+ case llvm::Triple::Bitrig:
+ TC = new toolchains::Bitrig(*this, Target, Args);
+ break;
+ case llvm::Triple::NetBSD:
+ TC = new toolchains::NetBSD(*this, Target, Args);
+ break;
+ case llvm::Triple::FreeBSD:
+ TC = new toolchains::FreeBSD(*this, Target, Args);
+ break;
+ case llvm::Triple::Minix:
+ TC = new toolchains::Minix(*this, Target, Args);
+ break;
+ case llvm::Triple::Linux:
+ if (Target.getArch() == llvm::Triple::hexagon)
+ TC = new toolchains::HexagonToolChain(*this, Target, Args);
+ else if ((Target.getVendor() == llvm::Triple::MipsTechnologies) &&
+ !Target.hasEnvironment())
+ TC = new toolchains::MipsLLVMToolChain(*this, Target, Args);
+ else
+ TC = new toolchains::Linux(*this, Target, Args);
+ break;
+ case llvm::Triple::NaCl:
+ TC = new toolchains::NaClToolChain(*this, Target, Args);
+ break;
+ case llvm::Triple::Solaris:
+ TC = new toolchains::Solaris(*this, Target, Args);
+ break;
+ case llvm::Triple::AMDHSA:
+ TC = new toolchains::AMDGPUToolChain(*this, Target, Args);
+ break;
+ case llvm::Triple::Win32:
+ switch (Target.getEnvironment()) {
+ default:
+ if (Target.isOSBinFormatELF())
+ TC = new toolchains::Generic_ELF(*this, Target, Args);
+ else if (Target.isOSBinFormatMachO())
+ TC = new toolchains::MachO(*this, Target, Args);
+ else
+ TC = new toolchains::Generic_GCC(*this, Target, Args);
+ break;
+ case llvm::Triple::GNU:
+ TC = new toolchains::MinGW(*this, Target, Args);
+ break;
+ case llvm::Triple::Itanium:
+ TC = new toolchains::CrossWindowsToolChain(*this, Target, Args);
+ break;
+ case llvm::Triple::MSVC:
+ case llvm::Triple::UnknownEnvironment:
+ TC = new toolchains::MSVCToolChain(*this, Target, Args);
+ break;
+ }
+ break;
+ case llvm::Triple::CUDA:
+ TC = new toolchains::CudaToolChain(*this, Target, Args);
+ break;
+ case llvm::Triple::PS4:
+ TC = new toolchains::PS4CPU(*this, Target, Args);
+ break;
+ default:
+ // Of these targets, Hexagon is the only one that might have
+ // an OS of Linux, in which case it got handled above already.
+ switch (Target.getArch()) {
+ case llvm::Triple::tce:
+ TC = new toolchains::TCEToolChain(*this, Target, Args);
+ break;
+ case llvm::Triple::hexagon:
+ TC = new toolchains::HexagonToolChain(*this, Target, Args);
+ break;
+ case llvm::Triple::xcore:
+ TC = new toolchains::XCoreToolChain(*this, Target, Args);
+ break;
+ case llvm::Triple::wasm32:
+ case llvm::Triple::wasm64:
+ TC = new toolchains::WebAssembly(*this, Target, Args);
+ break;
+ default:
+ if (Target.getVendor() == llvm::Triple::Myriad)
+ TC = new toolchains::MyriadToolChain(*this, Target, Args);
+ else if (Target.isOSBinFormatELF())
+ TC = new toolchains::Generic_ELF(*this, Target, Args);
+ else if (Target.isOSBinFormatMachO())
+ TC = new toolchains::MachO(*this, Target, Args);
+ else
+ TC = new toolchains::Generic_GCC(*this, Target, Args);
+ }
+ }
+ }
+ return *TC;
+}
+
+bool Driver::ShouldUseClangCompiler(const JobAction &JA) const {
+ // Say "no" if there is not exactly one input of a type clang understands.
+ if (JA.size() != 1 || !types::isAcceptedByClang((*JA.begin())->getType()))
+ return false;
+
+ // And say "no" if this is not a kind of action clang understands.
+ if (!isa<PreprocessJobAction>(JA) && !isa<PrecompileJobAction>(JA) &&
+ !isa<CompileJobAction>(JA) && !isa<BackendJobAction>(JA))
+ return false;
+
+ return true;
+}
+
+/// GetReleaseVersion - Parse (([0-9]+)(.([0-9]+)(.([0-9]+)?))?)? and return the
+/// grouped values as integers. Numbers which are not provided are set to 0.
+///
+/// \return True if the entire string was parsed (9.2), or all groups were
+/// parsed (10.3.5extrastuff).
+bool Driver::GetReleaseVersion(const char *Str, unsigned &Major,
+ unsigned &Minor, unsigned &Micro,
+ bool &HadExtra) {
+ HadExtra = false;
+
+ Major = Minor = Micro = 0;
+ if (*Str == '\0')
+ return false;
+
+ char *End;
+ Major = (unsigned)strtol(Str, &End, 10);
+ if (*Str != '\0' && *End == '\0')
+ return true;
+ if (*End != '.')
+ return false;
+
+ Str = End + 1;
+ Minor = (unsigned)strtol(Str, &End, 10);
+ if (*Str != '\0' && *End == '\0')
+ return true;
+ if (*End != '.')
+ return false;
+
+ Str = End + 1;
+ Micro = (unsigned)strtol(Str, &End, 10);
+ if (*Str != '\0' && *End == '\0')
+ return true;
+ if (Str == End)
+ return false;
+ HadExtra = true;
+ return true;
+}
+
+std::pair<unsigned, unsigned> Driver::getIncludeExcludeOptionFlagMasks() const {
+ unsigned IncludedFlagsBitmask = 0;
+ unsigned ExcludedFlagsBitmask = options::NoDriverOption;
+
+ if (Mode == CLMode) {
+ // Include CL and Core options.
+ IncludedFlagsBitmask |= options::CLOption;
+ IncludedFlagsBitmask |= options::CoreOption;
+ } else {
+ ExcludedFlagsBitmask |= options::CLOption;
+ }
+
+ return std::make_pair(IncludedFlagsBitmask, ExcludedFlagsBitmask);
+}
+
+bool clang::driver::isOptimizationLevelFast(const ArgList &Args) {
+ return Args.hasFlag(options::OPT_Ofast, options::OPT_O_Group, false);
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/DriverOptions.cpp b/contrib/llvm/tools/clang/lib/Driver/DriverOptions.cpp
new file mode 100644
index 0000000..8d5332b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/DriverOptions.cpp
@@ -0,0 +1,44 @@
+//===--- DriverOptions.cpp - Driver Options Table -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Options.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Option/OptTable.h"
+#include "llvm/Option/Option.h"
+
+using namespace clang::driver;
+using namespace clang::driver::options;
+using namespace llvm::opt;
+
+#define PREFIX(NAME, VALUE) static const char *const NAME[] = VALUE;
+#include "clang/Driver/Options.inc"
+#undef PREFIX
+
+static const OptTable::Info InfoTable[] = {
+#define OPTION(PREFIX, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR) \
+ { PREFIX, NAME, HELPTEXT, METAVAR, OPT_##ID, Option::KIND##Class, PARAM, \
+ FLAGS, OPT_##GROUP, OPT_##ALIAS, ALIASARGS },
+#include "clang/Driver/Options.inc"
+#undef OPTION
+};
+
+namespace {
+
+class DriverOptTable : public OptTable {
+public:
+ DriverOptTable()
+ : OptTable(InfoTable) {}
+};
+
+}
+
+OptTable *clang::driver::createDriverOptTable() {
+ return new DriverOptTable();
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/InputInfo.h b/contrib/llvm/tools/clang/lib/Driver/InputInfo.h
new file mode 100644
index 0000000..b23ba57
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/InputInfo.h
@@ -0,0 +1,89 @@
+//===--- InputInfo.h - Input Source & Type Information ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_INPUTINFO_H
+#define LLVM_CLANG_LIB_DRIVER_INPUTINFO_H
+
+#include "clang/Driver/Types.h"
+#include "llvm/Option/Arg.h"
+#include <cassert>
+#include <string>
+
+namespace clang {
+namespace driver {
+
+/// InputInfo - Wrapper for information about an input source.
+class InputInfo {
+ // FIXME: The distinction between filenames and inputarg here is
+ // gross; we should probably drop the idea of a "linker
+ // input". Doing so means tweaking pipelining to still create link
+ // steps when it sees linker inputs (but not treat them as
+ // arguments), and making sure that arguments get rendered
+ // correctly.
+ enum Class {
+ Nothing,
+ Filename,
+ InputArg,
+ Pipe
+ };
+
+ union {
+ const char *Filename;
+ const llvm::opt::Arg *InputArg;
+ } Data;
+ Class Kind;
+ types::ID Type;
+ const char *BaseInput;
+
+public:
+ InputInfo() {}
+ InputInfo(types::ID _Type, const char *_BaseInput)
+ : Kind(Nothing), Type(_Type), BaseInput(_BaseInput) {
+ }
+ InputInfo(const char *_Filename, types::ID _Type, const char *_BaseInput)
+ : Kind(Filename), Type(_Type), BaseInput(_BaseInput) {
+ Data.Filename = _Filename;
+ }
+ InputInfo(const llvm::opt::Arg *_InputArg, types::ID _Type,
+ const char *_BaseInput)
+ : Kind(InputArg), Type(_Type), BaseInput(_BaseInput) {
+ Data.InputArg = _InputArg;
+ }
+
+ bool isNothing() const { return Kind == Nothing; }
+ bool isFilename() const { return Kind == Filename; }
+ bool isInputArg() const { return Kind == InputArg; }
+ types::ID getType() const { return Type; }
+ const char *getBaseInput() const { return BaseInput; }
+
+ const char *getFilename() const {
+ assert(isFilename() && "Invalid accessor.");
+ return Data.Filename;
+ }
+ const llvm::opt::Arg &getInputArg() const {
+ assert(isInputArg() && "Invalid accessor.");
+ return *Data.InputArg;
+ }
+
+ /// getAsString - Return a string name for this input, for
+ /// debugging.
+ std::string getAsString() const {
+ if (isFilename())
+ return std::string("\"") + getFilename() + '"';
+ else if (isInputArg())
+ return "(input arg)";
+ else
+ return "(nothing)";
+ }
+};
+
+} // end namespace driver
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Driver/Job.cpp b/contrib/llvm/tools/clang/lib/Driver/Job.cpp
new file mode 100644
index 0000000..22904e5
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/Job.cpp
@@ -0,0 +1,306 @@
+//===--- Job.cpp - Command to Execute -------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "InputInfo.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Job.h"
+#include "clang/Driver/Tool.h"
+#include "clang/Driver/ToolChain.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/Program.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+using namespace clang::driver;
+using llvm::raw_ostream;
+using llvm::StringRef;
+using llvm::ArrayRef;
+
+Command::Command(const Action &Source, const Tool &Creator,
+ const char *Executable, const ArgStringList &Arguments,
+ ArrayRef<InputInfo> Inputs)
+ : Source(Source), Creator(Creator), Executable(Executable),
+ Arguments(Arguments), ResponseFile(nullptr) {
+ for (const auto &II : Inputs)
+ if (II.isFilename())
+ InputFilenames.push_back(II.getFilename());
+}
+
+static int skipArgs(const char *Flag, bool HaveCrashVFS) {
+ // These flags are all of the form -Flag <Arg> and are treated as two
+ // arguments. Therefore, we need to skip the flag and the next argument.
+ bool Res = llvm::StringSwitch<bool>(Flag)
+ .Cases("-I", "-MF", "-MT", "-MQ", true)
+ .Cases("-o", "-coverage-file", "-dependency-file", true)
+ .Cases("-fdebug-compilation-dir", "-idirafter", true)
+ .Cases("-include", "-include-pch", "-internal-isystem", true)
+ .Cases("-internal-externc-isystem", "-iprefix", "-iwithprefix", true)
+ .Cases("-iwithprefixbefore", "-isystem", "-iquote", true)
+ .Cases("-resource-dir", "-serialize-diagnostic-file", true)
+ .Cases("-dwarf-debug-flags", "-ivfsoverlay", true)
+ .Cases("-header-include-file", "-diagnostic-log-file", true)
+ // Some include flags shouldn't be skipped if we have a crash VFS
+ .Case("-isysroot", !HaveCrashVFS)
+ .Default(false);
+
+ // Match found.
+ if (Res)
+ return 2;
+
+ // The remaining flags are treated as a single argument.
+
+ // These flags are all of the form -Flag and have no second argument.
+ Res = llvm::StringSwitch<bool>(Flag)
+ .Cases("-M", "-MM", "-MG", "-MP", "-MD", true)
+ .Case("-MMD", true)
+ .Default(false);
+
+ // Match found.
+ if (Res)
+ return 1;
+
+ // These flags are treated as a single argument (e.g., -F<Dir>).
+ StringRef FlagRef(Flag);
+ if (FlagRef.startswith("-F") || FlagRef.startswith("-I") ||
+ FlagRef.startswith("-fmodules-cache-path="))
+ return 1;
+
+ return 0;
+}
+
+void Command::printArg(raw_ostream &OS, const char *Arg, bool Quote) {
+ const bool Escape = std::strpbrk(Arg, "\"\\$");
+
+ if (!Quote && !Escape) {
+ OS << Arg;
+ return;
+ }
+
+ // Quote and escape. This isn't really complete, but good enough.
+ OS << '"';
+ while (const char c = *Arg++) {
+ if (c == '"' || c == '\\' || c == '$')
+ OS << '\\';
+ OS << c;
+ }
+ OS << '"';
+}
+
+void Command::writeResponseFile(raw_ostream &OS) const {
+ // In a file list, we only write the set of inputs to the response file
+ if (Creator.getResponseFilesSupport() == Tool::RF_FileList) {
+ for (const char *Arg : InputFileList) {
+ OS << Arg << '\n';
+ }
+ return;
+ }
+
+ // In regular response files, we send all arguments to the response file.
+ // Wrapping all arguments in double quotes ensures that both Unix tools and
+ // Windows tools understand the response file.
+ for (const char *Arg : Arguments) {
+ OS << '"';
+
+ for (; *Arg != '\0'; Arg++) {
+ if (*Arg == '\"' || *Arg == '\\') {
+ OS << '\\';
+ }
+ OS << *Arg;
+ }
+
+ OS << "\" ";
+ }
+}
+
+void Command::buildArgvForResponseFile(
+ llvm::SmallVectorImpl<const char *> &Out) const {
+ // When not a file list, all arguments are sent to the response file.
+ // This leaves us to set the argv to a single parameter, requesting the tool
+ // to read the response file.
+ if (Creator.getResponseFilesSupport() != Tool::RF_FileList) {
+ Out.push_back(Executable);
+ Out.push_back(ResponseFileFlag.c_str());
+ return;
+ }
+
+ llvm::StringSet<> Inputs;
+ for (const char *InputName : InputFileList)
+ Inputs.insert(InputName);
+ Out.push_back(Executable);
+ // In a file list, build args vector ignoring parameters that will go in the
+ // response file (elements of the InputFileList vector)
+ bool FirstInput = true;
+ for (const char *Arg : Arguments) {
+ if (Inputs.count(Arg) == 0) {
+ Out.push_back(Arg);
+ } else if (FirstInput) {
+ FirstInput = false;
+ Out.push_back(Creator.getResponseFileFlag());
+ Out.push_back(ResponseFile);
+ }
+ }
+}
+
+void Command::Print(raw_ostream &OS, const char *Terminator, bool Quote,
+ CrashReportInfo *CrashInfo) const {
+ // Always quote the exe.
+ OS << ' ';
+ printArg(OS, Executable, /*Quote=*/true);
+
+ llvm::ArrayRef<const char *> Args = Arguments;
+ llvm::SmallVector<const char *, 128> ArgsRespFile;
+ if (ResponseFile != nullptr) {
+ buildArgvForResponseFile(ArgsRespFile);
+ Args = ArrayRef<const char *>(ArgsRespFile).slice(1); // no executable name
+ }
+
+ bool HaveCrashVFS = CrashInfo && !CrashInfo->VFSPath.empty();
+ for (size_t i = 0, e = Args.size(); i < e; ++i) {
+ const char *const Arg = Args[i];
+
+ if (CrashInfo) {
+ if (int Skip = skipArgs(Arg, HaveCrashVFS)) {
+ i += Skip - 1;
+ continue;
+ }
+ auto Found = std::find_if(InputFilenames.begin(), InputFilenames.end(),
+ [&Arg](StringRef IF) { return IF == Arg; });
+ if (Found != InputFilenames.end() &&
+ (i == 0 || StringRef(Args[i - 1]) != "-main-file-name")) {
+ // Replace the input file name with the crashinfo's file name.
+ OS << ' ';
+ StringRef ShortName = llvm::sys::path::filename(CrashInfo->Filename);
+ printArg(OS, ShortName.str().c_str(), Quote);
+ continue;
+ }
+ }
+
+ OS << ' ';
+ printArg(OS, Arg, Quote);
+ }
+
+ if (CrashInfo && HaveCrashVFS) {
+ OS << ' ';
+ printArg(OS, "-ivfsoverlay", Quote);
+ OS << ' ';
+ printArg(OS, CrashInfo->VFSPath.str().c_str(), Quote);
+ }
+
+ if (ResponseFile != nullptr) {
+ OS << "\n Arguments passed via response file:\n";
+ writeResponseFile(OS);
+ // Avoiding duplicated newline terminator, since FileLists are
+ // newline-separated.
+ if (Creator.getResponseFilesSupport() != Tool::RF_FileList)
+ OS << "\n";
+ OS << " (end of response file)";
+ }
+
+ OS << Terminator;
+}
+
+void Command::setResponseFile(const char *FileName) {
+ ResponseFile = FileName;
+ ResponseFileFlag = Creator.getResponseFileFlag();
+ ResponseFileFlag += FileName;
+}
+
+int Command::Execute(const StringRef **Redirects, std::string *ErrMsg,
+ bool *ExecutionFailed) const {
+ SmallVector<const char*, 128> Argv;
+
+ if (ResponseFile == nullptr) {
+ Argv.push_back(Executable);
+ Argv.append(Arguments.begin(), Arguments.end());
+ Argv.push_back(nullptr);
+
+ return llvm::sys::ExecuteAndWait(Executable, Argv.data(), /*env*/ nullptr,
+ Redirects, /*secondsToWait*/ 0,
+ /*memoryLimit*/ 0, ErrMsg,
+ ExecutionFailed);
+ }
+
+ // We need to put arguments in a response file (command is too large)
+ // Open stream to store the response file contents
+ std::string RespContents;
+ llvm::raw_string_ostream SS(RespContents);
+
+ // Write file contents and build the Argv vector
+ writeResponseFile(SS);
+ buildArgvForResponseFile(Argv);
+ Argv.push_back(nullptr);
+ SS.flush();
+
+ // Save the response file in the appropriate encoding
+ if (std::error_code EC = writeFileWithEncoding(
+ ResponseFile, RespContents, Creator.getResponseFileEncoding())) {
+ if (ErrMsg)
+ *ErrMsg = EC.message();
+ if (ExecutionFailed)
+ *ExecutionFailed = true;
+ return -1;
+ }
+
+ return llvm::sys::ExecuteAndWait(Executable, Argv.data(), /*env*/ nullptr,
+ Redirects, /*secondsToWait*/ 0,
+ /*memoryLimit*/ 0, ErrMsg, ExecutionFailed);
+}
+
+FallbackCommand::FallbackCommand(const Action &Source_, const Tool &Creator_,
+ const char *Executable_,
+ const ArgStringList &Arguments_,
+ ArrayRef<InputInfo> Inputs,
+ std::unique_ptr<Command> Fallback_)
+ : Command(Source_, Creator_, Executable_, Arguments_, Inputs),
+ Fallback(std::move(Fallback_)) {}
+
+void FallbackCommand::Print(raw_ostream &OS, const char *Terminator,
+ bool Quote, CrashReportInfo *CrashInfo) const {
+ Command::Print(OS, "", Quote, CrashInfo);
+ OS << " ||";
+ Fallback->Print(OS, Terminator, Quote, CrashInfo);
+}
+
+static bool ShouldFallback(int ExitCode) {
+ // FIXME: We really just want to fall back for internal errors, such
+ // as when some symbol cannot be mangled, when we should be able to
+ // parse something but can't, etc.
+ return ExitCode != 0;
+}
+
+int FallbackCommand::Execute(const StringRef **Redirects, std::string *ErrMsg,
+ bool *ExecutionFailed) const {
+ int PrimaryStatus = Command::Execute(Redirects, ErrMsg, ExecutionFailed);
+ if (!ShouldFallback(PrimaryStatus))
+ return PrimaryStatus;
+
+ // Clear ExecutionFailed and ErrMsg before falling back.
+ if (ErrMsg)
+ ErrMsg->clear();
+ if (ExecutionFailed)
+ *ExecutionFailed = false;
+
+ const Driver &D = getCreator().getToolChain().getDriver();
+ D.Diag(diag::warn_drv_invoking_fallback) << Fallback->getExecutable();
+
+ int SecondaryStatus = Fallback->Execute(Redirects, ErrMsg, ExecutionFailed);
+ return SecondaryStatus;
+}
+
+void JobList::Print(raw_ostream &OS, const char *Terminator, bool Quote,
+ CrashReportInfo *CrashInfo) const {
+ for (const auto &Job : *this)
+ Job.Print(OS, Terminator, Quote, CrashInfo);
+}
+
+void JobList::clear() { Jobs.clear(); }
diff --git a/contrib/llvm/tools/clang/lib/Driver/MSVCToolChain.cpp b/contrib/llvm/tools/clang/lib/Driver/MSVCToolChain.cpp
new file mode 100644
index 0000000..b7e576e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/MSVCToolChain.cpp
@@ -0,0 +1,744 @@
+//===--- ToolChains.cpp - ToolChain Implementations -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ToolChains.h"
+#include "Tools.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/Version.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Options.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/Option/Arg.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Process.h"
+#include <cstdio>
+
+// Include the necessary headers to interface with the Windows registry and
+// environment.
+#if defined(LLVM_ON_WIN32)
+#define USE_WIN32
+#endif
+
+#ifdef USE_WIN32
+ #define WIN32_LEAN_AND_MEAN
+ #define NOGDI
+ #ifndef NOMINMAX
+ #define NOMINMAX
+ #endif
+ #include <windows.h>
+#endif
+
+using namespace clang::driver;
+using namespace clang::driver::toolchains;
+using namespace clang;
+using namespace llvm::opt;
+
+MSVCToolChain::MSVCToolChain(const Driver &D, const llvm::Triple& Triple,
+ const ArgList &Args)
+ : ToolChain(D, Triple, Args) {
+ getProgramPaths().push_back(getDriver().getInstalledDir());
+ if (getDriver().getInstalledDir() != getDriver().Dir)
+ getProgramPaths().push_back(getDriver().Dir);
+}
+
+Tool *MSVCToolChain::buildLinker() const {
+ return new tools::visualstudio::Linker(*this);
+}
+
+Tool *MSVCToolChain::buildAssembler() const {
+ if (getTriple().isOSBinFormatMachO())
+ return new tools::darwin::Assembler(*this);
+ getDriver().Diag(clang::diag::err_no_external_assembler);
+ return nullptr;
+}
+
+bool MSVCToolChain::IsIntegratedAssemblerDefault() const {
+ return true;
+}
+
+bool MSVCToolChain::IsUnwindTablesDefault() const {
+ // Emit unwind tables by default on Win64. All non-x86_32 Windows platforms
+ // such as ARM and PPC actually require unwind tables, but LLVM doesn't know
+ // how to generate them yet.
+ return getArch() == llvm::Triple::x86_64;
+}
+
+bool MSVCToolChain::isPICDefault() const {
+ return getArch() == llvm::Triple::x86_64;
+}
+
+bool MSVCToolChain::isPIEDefault() const {
+ return false;
+}
+
+bool MSVCToolChain::isPICDefaultForced() const {
+ return getArch() == llvm::Triple::x86_64;
+}
+
+#ifdef USE_WIN32
+static bool readFullStringValue(HKEY hkey, const char *valueName,
+ std::string &value) {
+ // FIXME: We should be using the W versions of the registry functions, but
+ // doing so requires UTF8 / UTF16 conversions similar to how we handle command
+ // line arguments. The UTF8 conversion functions are not exposed publicly
+ // from LLVM though, so in order to do this we will probably need to create
+ // a registry abstraction in LLVMSupport that is Windows only.
+ DWORD result = 0;
+ DWORD valueSize = 0;
+ DWORD type = 0;
+ // First just query for the required size.
+ result = RegQueryValueEx(hkey, valueName, NULL, &type, NULL, &valueSize);
+ if (result != ERROR_SUCCESS || type != REG_SZ)
+ return false;
+ std::vector<BYTE> buffer(valueSize);
+ result = RegQueryValueEx(hkey, valueName, NULL, NULL, &buffer[0], &valueSize);
+ if (result == ERROR_SUCCESS)
+ value.assign(reinterpret_cast<const char *>(buffer.data()));
+ return result;
+}
+#endif
+
+/// \brief Read registry string.
+/// This also supports a means to look for high-versioned keys by use
+/// of a $VERSION placeholder in the key path.
+/// $VERSION in the key path is a placeholder for the version number,
+/// causing the highest value path to be searched for and used.
+/// I.e. "SOFTWARE\\Microsoft\\VisualStudio\\$VERSION".
+/// There can be additional characters in the component. Only the numeric
+/// characters are compared. This function only searches HKLM.
+static bool getSystemRegistryString(const char *keyPath, const char *valueName,
+ std::string &value, std::string *phValue) {
+#ifndef USE_WIN32
+ return false;
+#else
+ HKEY hRootKey = HKEY_LOCAL_MACHINE;
+ HKEY hKey = NULL;
+ long lResult;
+ bool returnValue = false;
+
+ const char *placeHolder = strstr(keyPath, "$VERSION");
+ std::string bestName;
+ // If we have a $VERSION placeholder, do the highest-version search.
+ if (placeHolder) {
+ const char *keyEnd = placeHolder - 1;
+ const char *nextKey = placeHolder;
+ // Find end of previous key.
+ while ((keyEnd > keyPath) && (*keyEnd != '\\'))
+ keyEnd--;
+ // Find end of key containing $VERSION.
+ while (*nextKey && (*nextKey != '\\'))
+ nextKey++;
+ size_t partialKeyLength = keyEnd - keyPath;
+ char partialKey[256];
+ if (partialKeyLength > sizeof(partialKey))
+ partialKeyLength = sizeof(partialKey);
+ strncpy(partialKey, keyPath, partialKeyLength);
+ partialKey[partialKeyLength] = '\0';
+ HKEY hTopKey = NULL;
+ lResult = RegOpenKeyEx(hRootKey, partialKey, 0, KEY_READ | KEY_WOW64_32KEY,
+ &hTopKey);
+ if (lResult == ERROR_SUCCESS) {
+ char keyName[256];
+ double bestValue = 0.0;
+ DWORD index, size = sizeof(keyName) - 1;
+ for (index = 0; RegEnumKeyEx(hTopKey, index, keyName, &size, NULL,
+ NULL, NULL, NULL) == ERROR_SUCCESS; index++) {
+ const char *sp = keyName;
+ while (*sp && !isDigit(*sp))
+ sp++;
+ if (!*sp)
+ continue;
+ const char *ep = sp + 1;
+ while (*ep && (isDigit(*ep) || (*ep == '.')))
+ ep++;
+ char numBuf[32];
+ strncpy(numBuf, sp, sizeof(numBuf) - 1);
+ numBuf[sizeof(numBuf) - 1] = '\0';
+ double dvalue = strtod(numBuf, NULL);
+ if (dvalue > bestValue) {
+ // Test that InstallDir is indeed there before keeping this index.
+ // Open the chosen key path remainder.
+ bestName = keyName;
+ // Append rest of key.
+ bestName.append(nextKey);
+ lResult = RegOpenKeyEx(hTopKey, bestName.c_str(), 0,
+ KEY_READ | KEY_WOW64_32KEY, &hKey);
+ if (lResult == ERROR_SUCCESS) {
+ lResult = readFullStringValue(hKey, valueName, value);
+ if (lResult == ERROR_SUCCESS) {
+ bestValue = dvalue;
+ if (phValue)
+ *phValue = bestName;
+ returnValue = true;
+ }
+ RegCloseKey(hKey);
+ }
+ }
+ size = sizeof(keyName) - 1;
+ }
+ RegCloseKey(hTopKey);
+ }
+ } else {
+ lResult =
+ RegOpenKeyEx(hRootKey, keyPath, 0, KEY_READ | KEY_WOW64_32KEY, &hKey);
+ if (lResult == ERROR_SUCCESS) {
+ lResult = readFullStringValue(hKey, valueName, value);
+ if (lResult == ERROR_SUCCESS)
+ returnValue = true;
+ if (phValue)
+ phValue->clear();
+ RegCloseKey(hKey);
+ }
+ }
+ return returnValue;
+#endif // USE_WIN32
+}
+
+// Convert LLVM's ArchType
+// to the corresponding name of Windows SDK libraries subfolder
+static StringRef getWindowsSDKArch(llvm::Triple::ArchType Arch) {
+ switch (Arch) {
+ case llvm::Triple::x86:
+ return "x86";
+ case llvm::Triple::x86_64:
+ return "x64";
+ case llvm::Triple::arm:
+ return "arm";
+ default:
+ return "";
+ }
+}
+
+// Find the most recent version of Universal CRT or Windows 10 SDK.
+// vcvarsqueryregistry.bat from Visual Studio 2015 sorts entries in the include
+// directory by name and uses the last one of the list.
+// So we compare entry names lexicographically to find the greatest one.
+static bool getWindows10SDKVersion(const std::string &SDKPath,
+ std::string &SDKVersion) {
+ SDKVersion.clear();
+
+ std::error_code EC;
+ llvm::SmallString<128> IncludePath(SDKPath);
+ llvm::sys::path::append(IncludePath, "Include");
+ for (llvm::sys::fs::directory_iterator DirIt(IncludePath, EC), DirEnd;
+ DirIt != DirEnd && !EC; DirIt.increment(EC)) {
+ if (!llvm::sys::fs::is_directory(DirIt->path()))
+ continue;
+ StringRef CandidateName = llvm::sys::path::filename(DirIt->path());
+ // If WDK is installed, there could be subfolders like "wdf" in the
+ // "Include" directory.
+ // Allow only directories which names start with "10.".
+ if (!CandidateName.startswith("10."))
+ continue;
+ if (CandidateName > SDKVersion)
+ SDKVersion = CandidateName;
+ }
+
+ return !SDKVersion.empty();
+}
+
+/// \brief Get Windows SDK installation directory.
+bool MSVCToolChain::getWindowsSDKDir(std::string &Path, int &Major,
+ std::string &WindowsSDKIncludeVersion,
+ std::string &WindowsSDKLibVersion) const {
+ std::string RegistrySDKVersion;
+ // Try the Windows registry.
+ if (!getSystemRegistryString(
+ "SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows\\$VERSION",
+ "InstallationFolder", Path, &RegistrySDKVersion))
+ return false;
+ if (Path.empty() || RegistrySDKVersion.empty())
+ return false;
+
+ WindowsSDKIncludeVersion.clear();
+ WindowsSDKLibVersion.clear();
+ Major = 0;
+ std::sscanf(RegistrySDKVersion.c_str(), "v%d.", &Major);
+ if (Major <= 7)
+ return true;
+ if (Major == 8) {
+ // Windows SDK 8.x installs libraries in a folder whose names depend on the
+ // version of the OS you're targeting. By default choose the newest, which
+ // usually corresponds to the version of the OS you've installed the SDK on.
+ const char *Tests[] = {"winv6.3", "win8", "win7"};
+ for (const char *Test : Tests) {
+ llvm::SmallString<128> TestPath(Path);
+ llvm::sys::path::append(TestPath, "Lib", Test);
+ if (llvm::sys::fs::exists(TestPath.c_str())) {
+ WindowsSDKLibVersion = Test;
+ break;
+ }
+ }
+ return !WindowsSDKLibVersion.empty();
+ }
+ if (Major == 10) {
+ if (!getWindows10SDKVersion(Path, WindowsSDKIncludeVersion))
+ return false;
+ WindowsSDKLibVersion = WindowsSDKIncludeVersion;
+ return true;
+ }
+ // Unsupported SDK version
+ return false;
+}
+
+// Gets the library path required to link against the Windows SDK.
+bool MSVCToolChain::getWindowsSDKLibraryPath(std::string &path) const {
+ std::string sdkPath;
+ int sdkMajor = 0;
+ std::string windowsSDKIncludeVersion;
+ std::string windowsSDKLibVersion;
+
+ path.clear();
+ if (!getWindowsSDKDir(sdkPath, sdkMajor, windowsSDKIncludeVersion,
+ windowsSDKLibVersion))
+ return false;
+
+ llvm::SmallString<128> libPath(sdkPath);
+ llvm::sys::path::append(libPath, "Lib");
+ if (sdkMajor <= 7) {
+ switch (getArch()) {
+ // In Windows SDK 7.x, x86 libraries are directly in the Lib folder.
+ case llvm::Triple::x86:
+ break;
+ case llvm::Triple::x86_64:
+ llvm::sys::path::append(libPath, "x64");
+ break;
+ case llvm::Triple::arm:
+ // It is not necessary to link against Windows SDK 7.x when targeting ARM.
+ return false;
+ default:
+ return false;
+ }
+ } else {
+ const StringRef archName = getWindowsSDKArch(getArch());
+ if (archName.empty())
+ return false;
+ llvm::sys::path::append(libPath, windowsSDKLibVersion, "um", archName);
+ }
+
+ path = libPath.str();
+ return true;
+}
+
+// Check if the Include path of a specified version of Visual Studio contains
+// specific header files. If not, they are probably shipped with Universal CRT.
+bool clang::driver::toolchains::MSVCToolChain::useUniversalCRT(
+ std::string &VisualStudioDir) const {
+ llvm::SmallString<128> TestPath(VisualStudioDir);
+ llvm::sys::path::append(TestPath, "VC\\include\\stdlib.h");
+
+ return !llvm::sys::fs::exists(TestPath);
+}
+
+bool MSVCToolChain::getUniversalCRTSdkDir(std::string &Path,
+ std::string &UCRTVersion) const {
+ // vcvarsqueryregistry.bat for Visual Studio 2015 queries the registry
+ // for the specific key "KitsRoot10". So do we.
+ if (!getSystemRegistryString(
+ "SOFTWARE\\Microsoft\\Windows Kits\\Installed Roots", "KitsRoot10",
+ Path, nullptr))
+ return false;
+
+ return getWindows10SDKVersion(Path, UCRTVersion);
+}
+
+bool MSVCToolChain::getUniversalCRTLibraryPath(std::string &Path) const {
+ std::string UniversalCRTSdkPath;
+ std::string UCRTVersion;
+
+ Path.clear();
+ if (!getUniversalCRTSdkDir(UniversalCRTSdkPath, UCRTVersion))
+ return false;
+
+ StringRef ArchName = getWindowsSDKArch(getArch());
+ if (ArchName.empty())
+ return false;
+
+ llvm::SmallString<128> LibPath(UniversalCRTSdkPath);
+ llvm::sys::path::append(LibPath, "Lib", UCRTVersion, "ucrt", ArchName);
+
+ Path = LibPath.str();
+ return true;
+}
+
+// Get the location to use for Visual Studio binaries. The location priority
+// is: %VCINSTALLDIR% > %PATH% > newest copy of Visual Studio installed on
+// system (as reported by the registry).
+bool MSVCToolChain::getVisualStudioBinariesFolder(const char *clangProgramPath,
+ std::string &path) const {
+ path.clear();
+
+ SmallString<128> BinDir;
+
+ // First check the environment variables that vsvars32.bat sets.
+ llvm::Optional<std::string> VcInstallDir =
+ llvm::sys::Process::GetEnv("VCINSTALLDIR");
+ if (VcInstallDir.hasValue()) {
+ BinDir = VcInstallDir.getValue();
+ llvm::sys::path::append(BinDir, "bin");
+ } else {
+ // Next walk the PATH, trying to find a cl.exe in the path. If we find one,
+ // use that. However, make sure it's not clang's cl.exe.
+ llvm::Optional<std::string> OptPath = llvm::sys::Process::GetEnv("PATH");
+ if (OptPath.hasValue()) {
+ const char EnvPathSeparatorStr[] = {llvm::sys::EnvPathSeparator, '\0'};
+ SmallVector<StringRef, 8> PathSegments;
+ llvm::SplitString(OptPath.getValue(), PathSegments, EnvPathSeparatorStr);
+
+ for (StringRef PathSegment : PathSegments) {
+ if (PathSegment.empty())
+ continue;
+
+ SmallString<128> FilePath(PathSegment);
+ llvm::sys::path::append(FilePath, "cl.exe");
+ if (llvm::sys::fs::can_execute(FilePath.c_str()) &&
+ !llvm::sys::fs::equivalent(FilePath.c_str(), clangProgramPath)) {
+ // If we found it on the PATH, use it exactly as is with no
+ // modifications.
+ path = PathSegment;
+ return true;
+ }
+ }
+ }
+
+ std::string installDir;
+ // With no VCINSTALLDIR and nothing on the PATH, if we can't find it in the
+ // registry then we have no choice but to fail.
+ if (!getVisualStudioInstallDir(installDir))
+ return false;
+
+ // Regardless of what binary we're ultimately trying to find, we make sure
+ // that this is a Visual Studio directory by checking for cl.exe. We use
+ // cl.exe instead of other binaries like link.exe because programs such as
+ // GnuWin32 also have a utility called link.exe, so cl.exe is the least
+ // ambiguous.
+ BinDir = installDir;
+ llvm::sys::path::append(BinDir, "VC", "bin");
+ SmallString<128> ClPath(BinDir);
+ llvm::sys::path::append(ClPath, "cl.exe");
+
+ if (!llvm::sys::fs::can_execute(ClPath.c_str()))
+ return false;
+ }
+
+ if (BinDir.empty())
+ return false;
+
+ switch (getArch()) {
+ case llvm::Triple::x86:
+ break;
+ case llvm::Triple::x86_64:
+ llvm::sys::path::append(BinDir, "amd64");
+ break;
+ case llvm::Triple::arm:
+ llvm::sys::path::append(BinDir, "arm");
+ break;
+ default:
+ // Whatever this is, Visual Studio doesn't have a toolchain for it.
+ return false;
+ }
+ path = BinDir.str();
+ return true;
+}
+
+// Get Visual Studio installation directory.
+bool MSVCToolChain::getVisualStudioInstallDir(std::string &path) const {
+ // First check the environment variables that vsvars32.bat sets.
+ const char *vcinstalldir = getenv("VCINSTALLDIR");
+ if (vcinstalldir) {
+ path = vcinstalldir;
+ path = path.substr(0, path.find("\\VC"));
+ return true;
+ }
+
+ std::string vsIDEInstallDir;
+ std::string vsExpressIDEInstallDir;
+ // Then try the windows registry.
+ bool hasVCDir =
+ getSystemRegistryString("SOFTWARE\\Microsoft\\VisualStudio\\$VERSION",
+ "InstallDir", vsIDEInstallDir, nullptr);
+ if (hasVCDir && !vsIDEInstallDir.empty()) {
+ path = vsIDEInstallDir.substr(0, vsIDEInstallDir.find("\\Common7\\IDE"));
+ return true;
+ }
+
+ bool hasVCExpressDir =
+ getSystemRegistryString("SOFTWARE\\Microsoft\\VCExpress\\$VERSION",
+ "InstallDir", vsExpressIDEInstallDir, nullptr);
+ if (hasVCExpressDir && !vsExpressIDEInstallDir.empty()) {
+ path = vsExpressIDEInstallDir.substr(
+ 0, vsIDEInstallDir.find("\\Common7\\IDE"));
+ return true;
+ }
+
+ // Try the environment.
+ const char *vs120comntools = getenv("VS120COMNTOOLS");
+ const char *vs100comntools = getenv("VS100COMNTOOLS");
+ const char *vs90comntools = getenv("VS90COMNTOOLS");
+ const char *vs80comntools = getenv("VS80COMNTOOLS");
+
+ const char *vscomntools = nullptr;
+
+ // Find any version we can
+ if (vs120comntools)
+ vscomntools = vs120comntools;
+ else if (vs100comntools)
+ vscomntools = vs100comntools;
+ else if (vs90comntools)
+ vscomntools = vs90comntools;
+ else if (vs80comntools)
+ vscomntools = vs80comntools;
+
+ if (vscomntools && *vscomntools) {
+ const char *p = strstr(vscomntools, "\\Common7\\Tools");
+ path = p ? std::string(vscomntools, p) : vscomntools;
+ return true;
+ }
+ return false;
+}
+
+void MSVCToolChain::AddSystemIncludeWithSubfolder(
+ const ArgList &DriverArgs, ArgStringList &CC1Args,
+ const std::string &folder, const Twine &subfolder1, const Twine &subfolder2,
+ const Twine &subfolder3) const {
+ llvm::SmallString<128> path(folder);
+ llvm::sys::path::append(path, subfolder1, subfolder2, subfolder3);
+ addSystemInclude(DriverArgs, CC1Args, path);
+}
+
+void MSVCToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdinc))
+ return;
+
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, getDriver().ResourceDir,
+ "include");
+ }
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ // Honor %INCLUDE%. It should know essential search paths with vcvarsall.bat.
+ if (const char *cl_include_dir = getenv("INCLUDE")) {
+ SmallVector<StringRef, 8> Dirs;
+ StringRef(cl_include_dir)
+ .split(Dirs, ";", /*MaxSplit=*/-1, /*KeepEmpty=*/false);
+ for (StringRef Dir : Dirs)
+ addSystemInclude(DriverArgs, CC1Args, Dir);
+ if (!Dirs.empty())
+ return;
+ }
+
+ std::string VSDir;
+
+ // When built with access to the proper Windows APIs, try to actually find
+ // the correct include paths first.
+ if (getVisualStudioInstallDir(VSDir)) {
+ AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, VSDir, "VC\\include");
+
+ if (useUniversalCRT(VSDir)) {
+ std::string UniversalCRTSdkPath;
+ std::string UCRTVersion;
+ if (getUniversalCRTSdkDir(UniversalCRTSdkPath, UCRTVersion)) {
+ AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, UniversalCRTSdkPath,
+ "Include", UCRTVersion, "ucrt");
+ }
+ }
+
+ std::string WindowsSDKDir;
+ int major;
+ std::string windowsSDKIncludeVersion;
+ std::string windowsSDKLibVersion;
+ if (getWindowsSDKDir(WindowsSDKDir, major, windowsSDKIncludeVersion,
+ windowsSDKLibVersion)) {
+ if (major >= 8) {
+ // Note: windowsSDKIncludeVersion is empty for SDKs prior to v10.
+ // Anyway, llvm::sys::path::append is able to manage it.
+ AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, WindowsSDKDir,
+ "include", windowsSDKIncludeVersion,
+ "shared");
+ AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, WindowsSDKDir,
+ "include", windowsSDKIncludeVersion,
+ "um");
+ AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, WindowsSDKDir,
+ "include", windowsSDKIncludeVersion,
+ "winrt");
+ } else {
+ AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, WindowsSDKDir,
+ "include");
+ }
+ } else {
+ addSystemInclude(DriverArgs, CC1Args, VSDir);
+ }
+ return;
+ }
+
+ // As a fallback, select default install paths.
+ // FIXME: Don't guess drives and paths like this on Windows.
+ const StringRef Paths[] = {
+ "C:/Program Files/Microsoft Visual Studio 10.0/VC/include",
+ "C:/Program Files/Microsoft Visual Studio 9.0/VC/include",
+ "C:/Program Files/Microsoft Visual Studio 9.0/VC/PlatformSDK/Include",
+ "C:/Program Files/Microsoft Visual Studio 8/VC/include",
+ "C:/Program Files/Microsoft Visual Studio 8/VC/PlatformSDK/Include"
+ };
+ addSystemIncludes(DriverArgs, CC1Args, Paths);
+}
+
+void MSVCToolChain::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ // FIXME: There should probably be logic here to find libc++ on Windows.
+}
+
+std::string
+MSVCToolChain::ComputeEffectiveClangTriple(const ArgList &Args,
+ types::ID InputType) const {
+ std::string TripleStr =
+ ToolChain::ComputeEffectiveClangTriple(Args, InputType);
+ llvm::Triple Triple(TripleStr);
+ VersionTuple MSVT =
+ tools::visualstudio::getMSVCVersion(/*D=*/nullptr, Triple, Args,
+ /*IsWindowsMSVC=*/true);
+ if (MSVT.empty())
+ return TripleStr;
+
+ MSVT = VersionTuple(MSVT.getMajor(), MSVT.getMinor().getValueOr(0),
+ MSVT.getSubminor().getValueOr(0));
+
+ if (Triple.getEnvironment() == llvm::Triple::MSVC) {
+ StringRef ObjFmt = Triple.getEnvironmentName().split('-').second;
+ if (ObjFmt.empty())
+ Triple.setEnvironmentName((Twine("msvc") + MSVT.getAsString()).str());
+ else
+ Triple.setEnvironmentName(
+ (Twine("msvc") + MSVT.getAsString() + Twine('-') + ObjFmt).str());
+ }
+ return Triple.getTriple();
+}
+
+SanitizerMask MSVCToolChain::getSupportedSanitizers() const {
+ SanitizerMask Res = ToolChain::getSupportedSanitizers();
+ Res |= SanitizerKind::Address;
+ return Res;
+}
+
+llvm::opt::DerivedArgList *
+MSVCToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
+ const char *BoundArch) const {
+ DerivedArgList *DAL = new DerivedArgList(Args.getBaseArgs());
+ const OptTable &Opts = getDriver().getOpts();
+
+ // /Oy and /Oy- only has an effect under X86-32.
+ bool SupportsForcingFramePointer = getArch() == llvm::Triple::x86;
+
+ // The -O[12xd] flag actually expands to several flags. We must desugar the
+ // flags so that options embedded can be negated. For example, the '-O2' flag
+ // enables '-Oy'. Expanding '-O2' into its constituent flags allows us to
+ // correctly handle '-O2 -Oy-' where the trailing '-Oy-' disables a single
+ // aspect of '-O2'.
+ //
+ // Note that this expansion logic only applies to the *last* of '[12xd]'.
+
+ // First step is to search for the character we'd like to expand.
+ const char *ExpandChar = nullptr;
+ for (Arg *A : Args) {
+ if (!A->getOption().matches(options::OPT__SLASH_O))
+ continue;
+ StringRef OptStr = A->getValue();
+ for (size_t I = 0, E = OptStr.size(); I != E; ++I) {
+ const char &OptChar = *(OptStr.data() + I);
+ if (OptChar == '1' || OptChar == '2' || OptChar == 'x' || OptChar == 'd')
+ ExpandChar = OptStr.data() + I;
+ }
+ }
+
+ // The -O flag actually takes an amalgam of other options. For example,
+ // '/Ogyb2' is equivalent to '/Og' '/Oy' '/Ob2'.
+ for (Arg *A : Args) {
+ if (!A->getOption().matches(options::OPT__SLASH_O)) {
+ DAL->append(A);
+ continue;
+ }
+
+ StringRef OptStr = A->getValue();
+ for (size_t I = 0, E = OptStr.size(); I != E; ++I) {
+ const char &OptChar = *(OptStr.data() + I);
+ switch (OptChar) {
+ default:
+ break;
+ case '1':
+ case '2':
+ case 'x':
+ case 'd':
+ if (&OptChar == ExpandChar) {
+ if (OptChar == 'd') {
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_O0));
+ } else {
+ if (OptChar == '1') {
+ DAL->AddJoinedArg(A, Opts.getOption(options::OPT_O), "s");
+ } else if (OptChar == '2' || OptChar == 'x') {
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_fbuiltin));
+ DAL->AddJoinedArg(A, Opts.getOption(options::OPT_O), "2");
+ }
+ if (SupportsForcingFramePointer)
+ DAL->AddFlagArg(A,
+ Opts.getOption(options::OPT_fomit_frame_pointer));
+ if (OptChar == '1' || OptChar == '2')
+ DAL->AddFlagArg(A,
+ Opts.getOption(options::OPT_ffunction_sections));
+ }
+ }
+ break;
+ case 'b':
+ if (I + 1 != E && isdigit(OptStr[I + 1]))
+ ++I;
+ break;
+ case 'g':
+ break;
+ case 'i':
+ if (I + 1 != E && OptStr[I + 1] == '-') {
+ ++I;
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_fno_builtin));
+ } else {
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_fbuiltin));
+ }
+ break;
+ case 's':
+ DAL->AddJoinedArg(A, Opts.getOption(options::OPT_O), "s");
+ break;
+ case 't':
+ DAL->AddJoinedArg(A, Opts.getOption(options::OPT_O), "2");
+ break;
+ case 'y': {
+ bool OmitFramePointer = true;
+ if (I + 1 != E && OptStr[I + 1] == '-') {
+ OmitFramePointer = false;
+ ++I;
+ }
+ if (SupportsForcingFramePointer) {
+ if (OmitFramePointer)
+ DAL->AddFlagArg(A,
+ Opts.getOption(options::OPT_fomit_frame_pointer));
+ else
+ DAL->AddFlagArg(
+ A, Opts.getOption(options::OPT_fno_omit_frame_pointer));
+ }
+ break;
+ }
+ }
+ }
+ }
+ return DAL;
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/MinGWToolChain.cpp b/contrib/llvm/tools/clang/lib/Driver/MinGWToolChain.cpp
new file mode 100644
index 0000000..c5287bb
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/MinGWToolChain.cpp
@@ -0,0 +1,244 @@
+//===--- MinGWToolChain.cpp - MinGWToolChain Implementation ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ToolChains.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/Options.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+
+using namespace clang::diag;
+using namespace clang::driver;
+using namespace clang::driver::toolchains;
+using namespace clang;
+using namespace llvm::opt;
+
+namespace {
+// Simplified from Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple.
+bool findGccVersion(StringRef LibDir, std::string &GccLibDir,
+ std::string &Ver) {
+ Generic_GCC::GCCVersion Version = Generic_GCC::GCCVersion::Parse("0.0.0");
+ std::error_code EC;
+ for (llvm::sys::fs::directory_iterator LI(LibDir, EC), LE; !EC && LI != LE;
+ LI = LI.increment(EC)) {
+ StringRef VersionText = llvm::sys::path::filename(LI->path());
+ Generic_GCC::GCCVersion CandidateVersion =
+ Generic_GCC::GCCVersion::Parse(VersionText);
+ if (CandidateVersion.Major == -1)
+ continue;
+ if (CandidateVersion <= Version)
+ continue;
+ Ver = VersionText;
+ GccLibDir = LI->path();
+ }
+ return Ver.size();
+}
+}
+
+void MinGW::findGccLibDir() {
+ llvm::SmallVector<llvm::SmallString<32>, 2> Archs;
+ Archs.emplace_back(getTriple().getArchName());
+ Archs[0] += "-w64-mingw32";
+ Archs.emplace_back("mingw32");
+ Arch = Archs[0].str();
+ // lib: Arch Linux, Ubuntu, Windows
+ // lib64: openSUSE Linux
+ for (StringRef CandidateLib : {"lib", "lib64"}) {
+ for (StringRef CandidateArch : Archs) {
+ llvm::SmallString<1024> LibDir(Base);
+ llvm::sys::path::append(LibDir, CandidateLib, "gcc", CandidateArch);
+ if (findGccVersion(LibDir, GccLibDir, Ver)) {
+ Arch = CandidateArch;
+ return;
+ }
+ }
+ }
+}
+
+MinGW::MinGW(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
+ : ToolChain(D, Triple, Args) {
+ getProgramPaths().push_back(getDriver().getInstalledDir());
+
+ // On Windows if there is no sysroot we search for gcc on the PATH.
+ if (getDriver().SysRoot.size())
+ Base = getDriver().SysRoot;
+#ifdef LLVM_ON_WIN32
+ else if (llvm::ErrorOr<std::string> GPPName =
+ llvm::sys::findProgramByName("gcc"))
+ Base = llvm::sys::path::parent_path(
+ llvm::sys::path::parent_path(GPPName.get()));
+#endif
+ if (!Base.size())
+ Base = llvm::sys::path::parent_path(getDriver().getInstalledDir());
+
+ Base += llvm::sys::path::get_separator();
+ findGccLibDir();
+ // GccLibDir must precede Base/lib so that the
+ // correct crtbegin.o ,cetend.o would be found.
+ getFilePaths().push_back(GccLibDir);
+ getFilePaths().push_back(
+ (Base + Arch + llvm::sys::path::get_separator() + "lib").str());
+ getFilePaths().push_back(Base + "lib");
+ // openSUSE
+ getFilePaths().push_back(Base + Arch + "/sys-root/mingw/lib");
+}
+
+bool MinGW::IsIntegratedAssemblerDefault() const { return true; }
+
+Tool *MinGW::getTool(Action::ActionClass AC) const {
+ switch (AC) {
+ case Action::PreprocessJobClass:
+ if (!Preprocessor)
+ Preprocessor.reset(new tools::gcc::Preprocessor(*this));
+ return Preprocessor.get();
+ case Action::CompileJobClass:
+ if (!Compiler)
+ Compiler.reset(new tools::gcc::Compiler(*this));
+ return Compiler.get();
+ default:
+ return ToolChain::getTool(AC);
+ }
+}
+
+Tool *MinGW::buildAssembler() const {
+ return new tools::MinGW::Assembler(*this);
+}
+
+Tool *MinGW::buildLinker() const { return new tools::MinGW::Linker(*this); }
+
+bool MinGW::IsUnwindTablesDefault() const {
+ return getArch() == llvm::Triple::x86_64;
+}
+
+bool MinGW::isPICDefault() const { return getArch() == llvm::Triple::x86_64; }
+
+bool MinGW::isPIEDefault() const { return false; }
+
+bool MinGW::isPICDefaultForced() const {
+ return getArch() == llvm::Triple::x86_64;
+}
+
+bool MinGW::UseSEHExceptions() const {
+ return getArch() == llvm::Triple::x86_64;
+}
+
+// Include directories for various hosts:
+
+// Windows, mingw.org
+// c:\mingw\lib\gcc\mingw32\4.8.1\include\c++
+// c:\mingw\lib\gcc\mingw32\4.8.1\include\c++\mingw32
+// c:\mingw\lib\gcc\mingw32\4.8.1\include\c++\backward
+// c:\mingw\lib\gcc\mingw32\4.8.1\include
+// c:\mingw\include
+// c:\mingw\lib\gcc\mingw32\4.8.1\include-fixed
+// c:\mingw\mingw32\include
+
+// Windows, mingw-w64 mingw-builds
+// c:\mingw32\lib\gcc\i686-w64-mingw32\4.9.1\include
+// c:\mingw32\lib\gcc\i686-w64-mingw32\4.9.1\include-fixed
+// c:\mingw32\i686-w64-mingw32\include
+// c:\mingw32\i686-w64-mingw32\include\c++
+// c:\mingw32\i686-w64-mingw32\include\c++\i686-w64-mingw32
+// c:\mingw32\i686-w64-mingw32\include\c++\backward
+
+// Windows, mingw-w64 msys2
+// c:\msys64\mingw32\lib\gcc\i686-w64-mingw32\4.9.2\include
+// c:\msys64\mingw32\include
+// c:\msys64\mingw32\lib\gcc\i686-w64-mingw32\4.9.2\include-fixed
+// c:\msys64\mingw32\i686-w64-mingw32\include
+// c:\msys64\mingw32\include\c++\4.9.2
+// c:\msys64\mingw32\include\c++\4.9.2\i686-w64-mingw32
+// c:\msys64\mingw32\include\c++\4.9.2\backward
+
+// openSUSE
+// /usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include/c++
+// /usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include/c++/x86_64-w64-mingw32
+// /usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include/c++/backward
+// /usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include
+// /usr/lib64/gcc/x86_64-w64-mingw32/5.1.0/include-fixed
+// /usr/x86_64-w64-mingw32/sys-root/mingw/include
+
+// Arch Linux
+// /usr/i686-w64-mingw32/include/c++/5.1.0
+// /usr/i686-w64-mingw32/include/c++/5.1.0/i686-w64-mingw32
+// /usr/i686-w64-mingw32/include/c++/5.1.0/backward
+// /usr/lib/gcc/i686-w64-mingw32/5.1.0/include
+// /usr/lib/gcc/i686-w64-mingw32/5.1.0/include-fixed
+// /usr/i686-w64-mingw32/include
+
+// Ubuntu
+// /usr/include/c++/4.8
+// /usr/include/c++/4.8/x86_64-w64-mingw32
+// /usr/include/c++/4.8/backward
+// /usr/lib/gcc/x86_64-w64-mingw32/4.8/include
+// /usr/lib/gcc/x86_64-w64-mingw32/4.8/include-fixed
+// /usr/x86_64-w64-mingw32/include
+
+void MinGW::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdinc))
+ return;
+
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<1024> P(getDriver().ResourceDir);
+ llvm::sys::path::append(P, "include");
+ addSystemInclude(DriverArgs, CC1Args, P.str());
+ }
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ if (GetRuntimeLibType(DriverArgs) == ToolChain::RLT_Libgcc) {
+ llvm::SmallString<1024> IncludeDir(GccLibDir);
+ llvm::sys::path::append(IncludeDir, "include");
+ addSystemInclude(DriverArgs, CC1Args, IncludeDir.c_str());
+ IncludeDir += "-fixed";
+ // openSUSE
+ addSystemInclude(DriverArgs, CC1Args,
+ Base + Arch + "/sys-root/mingw/include");
+ addSystemInclude(DriverArgs, CC1Args, IncludeDir.c_str());
+ }
+ addSystemInclude(DriverArgs, CC1Args,
+ Base + Arch + llvm::sys::path::get_separator() + "include");
+ addSystemInclude(DriverArgs, CC1Args, Base + "include");
+}
+
+void MinGW::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx))
+ return;
+
+ switch (GetCXXStdlibType(DriverArgs)) {
+ case ToolChain::CST_Libcxx:
+ addSystemInclude(DriverArgs, CC1Args,
+ Base + "include" + llvm::sys::path::get_separator() +
+ "c++" + llvm::sys::path::get_separator() + "v1");
+ break;
+
+ case ToolChain::CST_Libstdcxx:
+ llvm::SmallVector<llvm::SmallString<1024>, 4> CppIncludeBases;
+ CppIncludeBases.emplace_back(Base);
+ llvm::sys::path::append(CppIncludeBases[0], Arch, "include", "c++");
+ CppIncludeBases.emplace_back(Base);
+ llvm::sys::path::append(CppIncludeBases[1], Arch, "include", "c++", Ver);
+ CppIncludeBases.emplace_back(Base);
+ llvm::sys::path::append(CppIncludeBases[2], "include", "c++", Ver);
+ CppIncludeBases.emplace_back(GccLibDir);
+ llvm::sys::path::append(CppIncludeBases[3], "include", "c++");
+ for (auto &CppIncludeBase : CppIncludeBases) {
+ addSystemInclude(DriverArgs, CC1Args, CppIncludeBase);
+ CppIncludeBase += llvm::sys::path::get_separator();
+ addSystemInclude(DriverArgs, CC1Args, CppIncludeBase + Arch);
+ addSystemInclude(DriverArgs, CC1Args, CppIncludeBase + "backward");
+ }
+ break;
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/Multilib.cpp b/contrib/llvm/tools/clang/lib/Driver/Multilib.cpp
new file mode 100644
index 0000000..34ad6a7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/Multilib.cpp
@@ -0,0 +1,294 @@
+//===--- Multilib.cpp - Multilib Implementation ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Multilib.h"
+#include "Tools.h"
+#include "clang/Driver/Options.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Option/Arg.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Option/OptTable.h"
+#include "llvm/Option/Option.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Regex.h"
+#include "llvm/Support/YAMLParser.h"
+#include "llvm/Support/YAMLTraits.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+
+using namespace clang::driver;
+using namespace clang;
+using namespace llvm::opt;
+using namespace llvm::sys;
+
+/// normalize Segment to "/foo/bar" or "".
+static void normalizePathSegment(std::string &Segment) {
+ StringRef seg = Segment;
+
+ // Prune trailing "/" or "./"
+ while (1) {
+ StringRef last = path::filename(seg);
+ if (last != ".")
+ break;
+ seg = path::parent_path(seg);
+ }
+
+ if (seg.empty() || seg == "/") {
+ Segment = "";
+ return;
+ }
+
+ // Add leading '/'
+ if (seg.front() != '/') {
+ Segment = "/" + seg.str();
+ } else {
+ Segment = seg;
+ }
+}
+
+Multilib::Multilib(StringRef GCCSuffix, StringRef OSSuffix,
+ StringRef IncludeSuffix)
+ : GCCSuffix(GCCSuffix), OSSuffix(OSSuffix), IncludeSuffix(IncludeSuffix) {
+ normalizePathSegment(this->GCCSuffix);
+ normalizePathSegment(this->OSSuffix);
+ normalizePathSegment(this->IncludeSuffix);
+}
+
+Multilib &Multilib::gccSuffix(StringRef S) {
+ GCCSuffix = S;
+ normalizePathSegment(GCCSuffix);
+ return *this;
+}
+
+Multilib &Multilib::osSuffix(StringRef S) {
+ OSSuffix = S;
+ normalizePathSegment(OSSuffix);
+ return *this;
+}
+
+Multilib &Multilib::includeSuffix(StringRef S) {
+ IncludeSuffix = S;
+ normalizePathSegment(IncludeSuffix);
+ return *this;
+}
+
+void Multilib::print(raw_ostream &OS) const {
+ assert(GCCSuffix.empty() || (StringRef(GCCSuffix).front() == '/'));
+ if (GCCSuffix.empty())
+ OS << ".";
+ else {
+ OS << StringRef(GCCSuffix).drop_front();
+ }
+ OS << ";";
+ for (StringRef Flag : Flags) {
+ if (Flag.front() == '+')
+ OS << "@" << Flag.substr(1);
+ }
+}
+
+bool Multilib::isValid() const {
+ llvm::StringMap<int> FlagSet;
+ for (unsigned I = 0, N = Flags.size(); I != N; ++I) {
+ StringRef Flag(Flags[I]);
+ llvm::StringMap<int>::iterator SI = FlagSet.find(Flag.substr(1));
+
+ assert(StringRef(Flag).front() == '+' || StringRef(Flag).front() == '-');
+
+ if (SI == FlagSet.end())
+ FlagSet[Flag.substr(1)] = I;
+ else if (Flags[I] != Flags[SI->getValue()])
+ return false;
+ }
+ return true;
+}
+
+bool Multilib::operator==(const Multilib &Other) const {
+ // Check whether the flags sets match
+ // allowing for the match to be order invariant
+ llvm::StringSet<> MyFlags;
+ for (const auto &Flag : Flags)
+ MyFlags.insert(Flag);
+
+ for (const auto &Flag : Other.Flags)
+ if (MyFlags.find(Flag) == MyFlags.end())
+ return false;
+
+ if (osSuffix() != Other.osSuffix())
+ return false;
+
+ if (gccSuffix() != Other.gccSuffix())
+ return false;
+
+ if (includeSuffix() != Other.includeSuffix())
+ return false;
+
+ return true;
+}
+
+raw_ostream &clang::driver::operator<<(raw_ostream &OS, const Multilib &M) {
+ M.print(OS);
+ return OS;
+}
+
+MultilibSet &MultilibSet::Maybe(const Multilib &M) {
+ Multilib Opposite;
+ // Negate any '+' flags
+ for (StringRef Flag : M.flags()) {
+ if (Flag.front() == '+')
+ Opposite.flags().push_back(("-" + Flag.substr(1)).str());
+ }
+ return Either(M, Opposite);
+}
+
+MultilibSet &MultilibSet::Either(const Multilib &M1, const Multilib &M2) {
+ return Either({M1, M2});
+}
+
+MultilibSet &MultilibSet::Either(const Multilib &M1, const Multilib &M2,
+ const Multilib &M3) {
+ return Either({M1, M2, M3});
+}
+
+MultilibSet &MultilibSet::Either(const Multilib &M1, const Multilib &M2,
+ const Multilib &M3, const Multilib &M4) {
+ return Either({M1, M2, M3, M4});
+}
+
+MultilibSet &MultilibSet::Either(const Multilib &M1, const Multilib &M2,
+ const Multilib &M3, const Multilib &M4,
+ const Multilib &M5) {
+ return Either({M1, M2, M3, M4, M5});
+}
+
+static Multilib compose(const Multilib &Base, const Multilib &New) {
+ SmallString<128> GCCSuffix;
+ llvm::sys::path::append(GCCSuffix, "/", Base.gccSuffix(), New.gccSuffix());
+ SmallString<128> OSSuffix;
+ llvm::sys::path::append(OSSuffix, "/", Base.osSuffix(), New.osSuffix());
+ SmallString<128> IncludeSuffix;
+ llvm::sys::path::append(IncludeSuffix, "/", Base.includeSuffix(),
+ New.includeSuffix());
+
+ Multilib Composed(GCCSuffix, OSSuffix, IncludeSuffix);
+
+ Multilib::flags_list &Flags = Composed.flags();
+
+ Flags.insert(Flags.end(), Base.flags().begin(), Base.flags().end());
+ Flags.insert(Flags.end(), New.flags().begin(), New.flags().end());
+
+ return Composed;
+}
+
+MultilibSet &MultilibSet::Either(ArrayRef<Multilib> MultilibSegments) {
+ multilib_list Composed;
+
+ if (Multilibs.empty())
+ Multilibs.insert(Multilibs.end(), MultilibSegments.begin(),
+ MultilibSegments.end());
+ else {
+ for (const Multilib &New : MultilibSegments) {
+ for (const Multilib &Base : *this) {
+ Multilib MO = compose(Base, New);
+ if (MO.isValid())
+ Composed.push_back(MO);
+ }
+ }
+
+ Multilibs = Composed;
+ }
+
+ return *this;
+}
+
+MultilibSet &MultilibSet::FilterOut(FilterCallback F) {
+ filterInPlace(F, Multilibs);
+ return *this;
+}
+
+MultilibSet &MultilibSet::FilterOut(const char *Regex) {
+ llvm::Regex R(Regex);
+#ifndef NDEBUG
+ std::string Error;
+ if (!R.isValid(Error)) {
+ llvm::errs() << Error;
+ llvm_unreachable("Invalid regex!");
+ }
+#endif
+
+ filterInPlace([&R](const Multilib &M) { return R.match(M.gccSuffix()); },
+ Multilibs);
+ return *this;
+}
+
+void MultilibSet::push_back(const Multilib &M) { Multilibs.push_back(M); }
+
+void MultilibSet::combineWith(const MultilibSet &Other) {
+ Multilibs.insert(Multilibs.end(), Other.begin(), Other.end());
+}
+
+static bool isFlagEnabled(StringRef Flag) {
+ char Indicator = Flag.front();
+ assert(Indicator == '+' || Indicator == '-');
+ return Indicator == '+';
+}
+
+bool MultilibSet::select(const Multilib::flags_list &Flags, Multilib &M) const {
+ llvm::StringMap<bool> FlagSet;
+
+ // Stuff all of the flags into the FlagSet such that a true mappend indicates
+ // the flag was enabled, and a false mappend indicates the flag was disabled.
+ for (StringRef Flag : Flags)
+ FlagSet[Flag.substr(1)] = isFlagEnabled(Flag);
+
+ multilib_list Filtered = filterCopy([&FlagSet](const Multilib &M) {
+ for (StringRef Flag : M.flags()) {
+ llvm::StringMap<bool>::const_iterator SI = FlagSet.find(Flag.substr(1));
+ if (SI != FlagSet.end())
+ if (SI->getValue() != isFlagEnabled(Flag))
+ return true;
+ }
+ return false;
+ }, Multilibs);
+
+ if (Filtered.size() == 0)
+ return false;
+ if (Filtered.size() == 1) {
+ M = Filtered[0];
+ return true;
+ }
+
+ // TODO: pick the "best" multlib when more than one is suitable
+ assert(false);
+ return false;
+}
+
+void MultilibSet::print(raw_ostream &OS) const {
+ for (const Multilib &M : *this)
+ OS << M << "\n";
+}
+
+MultilibSet::multilib_list MultilibSet::filterCopy(FilterCallback F,
+ const multilib_list &Ms) {
+ multilib_list Copy(Ms);
+ filterInPlace(F, Copy);
+ return Copy;
+}
+
+void MultilibSet::filterInPlace(FilterCallback F, multilib_list &Ms) {
+ Ms.erase(std::remove_if(Ms.begin(), Ms.end(), F), Ms.end());
+}
+
+raw_ostream &clang::driver::operator<<(raw_ostream &OS, const MultilibSet &MS) {
+ MS.print(OS);
+ return OS;
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/Phases.cpp b/contrib/llvm/tools/clang/lib/Driver/Phases.cpp
new file mode 100644
index 0000000..7ae2708
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/Phases.cpp
@@ -0,0 +1,27 @@
+//===--- Phases.cpp - Transformations on Driver Types ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Phases.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+
+using namespace clang::driver;
+
+const char *phases::getPhaseName(ID Id) {
+ switch (Id) {
+ case Preprocess: return "preprocessor";
+ case Precompile: return "precompiler";
+ case Compile: return "compiler";
+ case Backend: return "backend";
+ case Assemble: return "assembler";
+ case Link: return "linker";
+ }
+
+ llvm_unreachable("Invalid phase id.");
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/SanitizerArgs.cpp b/contrib/llvm/tools/clang/lib/Driver/SanitizerArgs.cpp
new file mode 100644
index 0000000..2fded1c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/SanitizerArgs.cpp
@@ -0,0 +1,730 @@
+//===--- SanitizerArgs.cpp - Arguments for sanitizer tools ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Driver/SanitizerArgs.h"
+#include "Tools.h"
+#include "clang/Basic/Sanitizers.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Options.h"
+#include "clang/Driver/ToolChain.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/SpecialCaseList.h"
+#include <memory>
+
+using namespace clang;
+using namespace clang::SanitizerKind;
+using namespace clang::driver;
+using namespace llvm::opt;
+
+enum : SanitizerMask {
+ NeedsUbsanRt = Undefined | Integer | CFI,
+ NeedsUbsanCxxRt = Vptr | CFI,
+ NotAllowedWithTrap = Vptr,
+ RequiresPIE = DataFlow,
+ NeedsUnwindTables = Address | Thread | Memory | DataFlow,
+ SupportsCoverage = Address | Memory | Leak | Undefined | Integer | DataFlow,
+ RecoverableByDefault = Undefined | Integer,
+ Unrecoverable = Unreachable | Return,
+ LegacyFsanitizeRecoverMask = Undefined | Integer,
+ NeedsLTO = CFI,
+ TrappingSupported =
+ (Undefined & ~Vptr) | UnsignedIntegerOverflow | LocalBounds | CFI,
+ TrappingDefault = CFI,
+};
+
+enum CoverageFeature {
+ CoverageFunc = 1 << 0,
+ CoverageBB = 1 << 1,
+ CoverageEdge = 1 << 2,
+ CoverageIndirCall = 1 << 3,
+ CoverageTraceBB = 1 << 4,
+ CoverageTraceCmp = 1 << 5,
+ Coverage8bitCounters = 1 << 6,
+};
+
+/// Parse a -fsanitize= or -fno-sanitize= argument's values, diagnosing any
+/// invalid components. Returns a SanitizerMask.
+static SanitizerMask parseArgValues(const Driver &D, const llvm::opt::Arg *A,
+ bool DiagnoseErrors);
+
+/// Parse -f(no-)?sanitize-coverage= flag values, diagnosing any invalid
+/// components. Returns OR of members of \c CoverageFeature enumeration.
+static int parseCoverageFeatures(const Driver &D, const llvm::opt::Arg *A);
+
+/// Produce an argument string from ArgList \p Args, which shows how it
+/// provides some sanitizer kind from \p Mask. For example, the argument list
+/// "-fsanitize=thread,vptr -fsanitize=address" with mask \c NeedsUbsanRt
+/// would produce "-fsanitize=vptr".
+static std::string lastArgumentForMask(const Driver &D,
+ const llvm::opt::ArgList &Args,
+ SanitizerMask Mask);
+
+/// Produce an argument string from argument \p A, which shows how it provides
+/// a value in \p Mask. For instance, the argument
+/// "-fsanitize=address,alignment" with mask \c NeedsUbsanRt would produce
+/// "-fsanitize=alignment".
+static std::string describeSanitizeArg(const llvm::opt::Arg *A,
+ SanitizerMask Mask);
+
+/// Produce a string containing comma-separated names of sanitizers in \p
+/// Sanitizers set.
+static std::string toString(const clang::SanitizerSet &Sanitizers);
+
+static bool getDefaultBlacklist(const Driver &D, SanitizerMask Kinds,
+ std::string &BLPath) {
+ const char *BlacklistFile = nullptr;
+ if (Kinds & Address)
+ BlacklistFile = "asan_blacklist.txt";
+ else if (Kinds & Memory)
+ BlacklistFile = "msan_blacklist.txt";
+ else if (Kinds & Thread)
+ BlacklistFile = "tsan_blacklist.txt";
+ else if (Kinds & DataFlow)
+ BlacklistFile = "dfsan_abilist.txt";
+ else if (Kinds & CFI)
+ BlacklistFile = "cfi_blacklist.txt";
+
+ if (BlacklistFile) {
+ clang::SmallString<64> Path(D.ResourceDir);
+ llvm::sys::path::append(Path, BlacklistFile);
+ BLPath = Path.str();
+ return true;
+ }
+ return false;
+}
+
+/// Sets group bits for every group that has at least one representative already
+/// enabled in \p Kinds.
+static SanitizerMask setGroupBits(SanitizerMask Kinds) {
+#define SANITIZER(NAME, ID)
+#define SANITIZER_GROUP(NAME, ID, ALIAS) \
+ if (Kinds & SanitizerKind::ID) \
+ Kinds |= SanitizerKind::ID##Group;
+#include "clang/Basic/Sanitizers.def"
+ return Kinds;
+}
+
+static SanitizerMask parseSanitizeTrapArgs(const Driver &D,
+ const llvm::opt::ArgList &Args) {
+ SanitizerMask TrapRemove = 0; // During the loop below, the accumulated set of
+ // sanitizers disabled by the current sanitizer
+ // argument or any argument after it.
+ SanitizerMask TrappingKinds = 0;
+ SanitizerMask TrappingSupportedWithGroups = setGroupBits(TrappingSupported);
+
+ for (ArgList::const_reverse_iterator I = Args.rbegin(), E = Args.rend();
+ I != E; ++I) {
+ const auto *Arg = *I;
+ if (Arg->getOption().matches(options::OPT_fsanitize_trap_EQ)) {
+ Arg->claim();
+ SanitizerMask Add = parseArgValues(D, Arg, true);
+ Add &= ~TrapRemove;
+ if (SanitizerMask InvalidValues = Add & ~TrappingSupportedWithGroups) {
+ SanitizerSet S;
+ S.Mask = InvalidValues;
+ D.Diag(diag::err_drv_unsupported_option_argument) << "-fsanitize-trap"
+ << toString(S);
+ }
+ TrappingKinds |= expandSanitizerGroups(Add) & ~TrapRemove;
+ } else if (Arg->getOption().matches(options::OPT_fno_sanitize_trap_EQ)) {
+ Arg->claim();
+ TrapRemove |= expandSanitizerGroups(parseArgValues(D, Arg, true));
+ } else if (Arg->getOption().matches(
+ options::OPT_fsanitize_undefined_trap_on_error)) {
+ Arg->claim();
+ TrappingKinds |=
+ expandSanitizerGroups(UndefinedGroup & ~TrapRemove) & ~TrapRemove;
+ } else if (Arg->getOption().matches(
+ options::OPT_fno_sanitize_undefined_trap_on_error)) {
+ Arg->claim();
+ TrapRemove |= expandSanitizerGroups(UndefinedGroup);
+ }
+ }
+
+ // Apply default trapping behavior.
+ TrappingKinds |= TrappingDefault & ~TrapRemove;
+
+ return TrappingKinds;
+}
+
+bool SanitizerArgs::needsUbsanRt() const {
+ return (Sanitizers.Mask & NeedsUbsanRt & ~TrapSanitizers.Mask) &&
+ !Sanitizers.has(Address) &&
+ !Sanitizers.has(Memory) &&
+ !Sanitizers.has(Thread) &&
+ !CfiCrossDso;
+}
+
+bool SanitizerArgs::needsCfiRt() const {
+ return !(Sanitizers.Mask & CFI & ~TrapSanitizers.Mask) && CfiCrossDso;
+}
+
+bool SanitizerArgs::needsCfiDiagRt() const {
+ return (Sanitizers.Mask & CFI & ~TrapSanitizers.Mask) && CfiCrossDso;
+}
+
+bool SanitizerArgs::requiresPIE() const {
+ return NeedPIE || (Sanitizers.Mask & RequiresPIE);
+}
+
+bool SanitizerArgs::needsUnwindTables() const {
+ return Sanitizers.Mask & NeedsUnwindTables;
+}
+
+void SanitizerArgs::clear() {
+ Sanitizers.clear();
+ RecoverableSanitizers.clear();
+ TrapSanitizers.clear();
+ BlacklistFiles.clear();
+ ExtraDeps.clear();
+ CoverageFeatures = 0;
+ MsanTrackOrigins = 0;
+ MsanUseAfterDtor = false;
+ NeedPIE = false;
+ AsanFieldPadding = 0;
+ AsanSharedRuntime = false;
+ LinkCXXRuntimes = false;
+ CfiCrossDso = false;
+}
+
+SanitizerArgs::SanitizerArgs(const ToolChain &TC,
+ const llvm::opt::ArgList &Args) {
+ clear();
+ SanitizerMask AllRemove = 0; // During the loop below, the accumulated set of
+ // sanitizers disabled by the current sanitizer
+ // argument or any argument after it.
+ SanitizerMask AllAddedKinds = 0; // Mask of all sanitizers ever enabled by
+ // -fsanitize= flags (directly or via group
+ // expansion), some of which may be disabled
+ // later. Used to carefully prune
+ // unused-argument diagnostics.
+ SanitizerMask DiagnosedKinds = 0; // All Kinds we have diagnosed up to now.
+ // Used to deduplicate diagnostics.
+ SanitizerMask Kinds = 0;
+ const SanitizerMask Supported = setGroupBits(TC.getSupportedSanitizers());
+ ToolChain::RTTIMode RTTIMode = TC.getRTTIMode();
+
+ const Driver &D = TC.getDriver();
+ SanitizerMask TrappingKinds = parseSanitizeTrapArgs(D, Args);
+ SanitizerMask InvalidTrappingKinds = TrappingKinds & NotAllowedWithTrap;
+
+ for (ArgList::const_reverse_iterator I = Args.rbegin(), E = Args.rend();
+ I != E; ++I) {
+ const auto *Arg = *I;
+ if (Arg->getOption().matches(options::OPT_fsanitize_EQ)) {
+ Arg->claim();
+ SanitizerMask Add = parseArgValues(D, Arg, true);
+ AllAddedKinds |= expandSanitizerGroups(Add);
+
+ // Avoid diagnosing any sanitizer which is disabled later.
+ Add &= ~AllRemove;
+ // At this point we have not expanded groups, so any unsupported
+ // sanitizers in Add are those which have been explicitly enabled.
+ // Diagnose them.
+ if (SanitizerMask KindsToDiagnose =
+ Add & InvalidTrappingKinds & ~DiagnosedKinds) {
+ std::string Desc = describeSanitizeArg(*I, KindsToDiagnose);
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << Desc << "-fsanitize-trap=undefined";
+ DiagnosedKinds |= KindsToDiagnose;
+ }
+ Add &= ~InvalidTrappingKinds;
+ if (SanitizerMask KindsToDiagnose = Add & ~Supported & ~DiagnosedKinds) {
+ std::string Desc = describeSanitizeArg(*I, KindsToDiagnose);
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << Desc << TC.getTriple().str();
+ DiagnosedKinds |= KindsToDiagnose;
+ }
+ Add &= Supported;
+
+ // Test for -fno-rtti + explicit -fsanitizer=vptr before expanding groups
+ // so we don't error out if -fno-rtti and -fsanitize=undefined were
+ // passed.
+ if (Add & Vptr &&
+ (RTTIMode == ToolChain::RM_DisabledImplicitly ||
+ RTTIMode == ToolChain::RM_DisabledExplicitly)) {
+ if (RTTIMode == ToolChain::RM_DisabledImplicitly)
+ // Warn about not having rtti enabled if the vptr sanitizer is
+ // explicitly enabled
+ D.Diag(diag::warn_drv_disabling_vptr_no_rtti_default);
+ else {
+ const llvm::opt::Arg *NoRTTIArg = TC.getRTTIArg();
+ assert(NoRTTIArg &&
+ "RTTI disabled explicitly but we have no argument!");
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << "-fsanitize=vptr" << NoRTTIArg->getAsString(Args);
+ }
+
+ // Take out the Vptr sanitizer from the enabled sanitizers
+ AllRemove |= Vptr;
+ }
+
+ Add = expandSanitizerGroups(Add);
+ // Group expansion may have enabled a sanitizer which is disabled later.
+ Add &= ~AllRemove;
+ // Silently discard any unsupported sanitizers implicitly enabled through
+ // group expansion.
+ Add &= ~InvalidTrappingKinds;
+ Add &= Supported;
+
+ Kinds |= Add;
+ } else if (Arg->getOption().matches(options::OPT_fno_sanitize_EQ)) {
+ Arg->claim();
+ SanitizerMask Remove = parseArgValues(D, Arg, true);
+ AllRemove |= expandSanitizerGroups(Remove);
+ }
+ }
+
+ // We disable the vptr sanitizer if it was enabled by group expansion but RTTI
+ // is disabled.
+ if ((Kinds & Vptr) &&
+ (RTTIMode == ToolChain::RM_DisabledImplicitly ||
+ RTTIMode == ToolChain::RM_DisabledExplicitly)) {
+ Kinds &= ~Vptr;
+ }
+
+ // Check that LTO is enabled if we need it.
+ if ((Kinds & NeedsLTO) && !D.isUsingLTO()) {
+ D.Diag(diag::err_drv_argument_only_allowed_with)
+ << lastArgumentForMask(D, Args, Kinds & NeedsLTO) << "-flto";
+ }
+
+ // Report error if there are non-trapping sanitizers that require
+ // c++abi-specific parts of UBSan runtime, and they are not provided by the
+ // toolchain. We don't have a good way to check the latter, so we just
+ // check if the toolchan supports vptr.
+ if (~Supported & Vptr) {
+ SanitizerMask KindsToDiagnose = Kinds & ~TrappingKinds & NeedsUbsanCxxRt;
+ // The runtime library supports the Microsoft C++ ABI, but only well enough
+ // for CFI. FIXME: Remove this once we support vptr on Windows.
+ if (TC.getTriple().isOSWindows())
+ KindsToDiagnose &= ~CFI;
+ if (KindsToDiagnose) {
+ SanitizerSet S;
+ S.Mask = KindsToDiagnose;
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << ("-fno-sanitize-trap=" + toString(S)) << TC.getTriple().str();
+ Kinds &= ~KindsToDiagnose;
+ }
+ }
+
+ // Warn about incompatible groups of sanitizers.
+ std::pair<SanitizerMask, SanitizerMask> IncompatibleGroups[] = {
+ std::make_pair(Address, Thread), std::make_pair(Address, Memory),
+ std::make_pair(Thread, Memory), std::make_pair(Leak, Thread),
+ std::make_pair(Leak, Memory), std::make_pair(KernelAddress, Address),
+ std::make_pair(KernelAddress, Leak),
+ std::make_pair(KernelAddress, Thread),
+ std::make_pair(KernelAddress, Memory)};
+ for (auto G : IncompatibleGroups) {
+ SanitizerMask Group = G.first;
+ if (Kinds & Group) {
+ if (SanitizerMask Incompatible = Kinds & G.second) {
+ D.Diag(clang::diag::err_drv_argument_not_allowed_with)
+ << lastArgumentForMask(D, Args, Group)
+ << lastArgumentForMask(D, Args, Incompatible);
+ Kinds &= ~Incompatible;
+ }
+ }
+ }
+ // FIXME: Currently -fsanitize=leak is silently ignored in the presence of
+ // -fsanitize=address. Perhaps it should print an error, or perhaps
+ // -f(-no)sanitize=leak should change whether leak detection is enabled by
+ // default in ASan?
+
+ // Parse -f(no-)?sanitize-recover flags.
+ SanitizerMask RecoverableKinds = RecoverableByDefault;
+ SanitizerMask DiagnosedUnrecoverableKinds = 0;
+ for (const auto *Arg : Args) {
+ const char *DeprecatedReplacement = nullptr;
+ if (Arg->getOption().matches(options::OPT_fsanitize_recover)) {
+ DeprecatedReplacement = "-fsanitize-recover=undefined,integer";
+ RecoverableKinds |= expandSanitizerGroups(LegacyFsanitizeRecoverMask);
+ Arg->claim();
+ } else if (Arg->getOption().matches(options::OPT_fno_sanitize_recover)) {
+ DeprecatedReplacement = "-fno-sanitize-recover=undefined,integer";
+ RecoverableKinds &= ~expandSanitizerGroups(LegacyFsanitizeRecoverMask);
+ Arg->claim();
+ } else if (Arg->getOption().matches(options::OPT_fsanitize_recover_EQ)) {
+ SanitizerMask Add = parseArgValues(D, Arg, true);
+ // Report error if user explicitly tries to recover from unrecoverable
+ // sanitizer.
+ if (SanitizerMask KindsToDiagnose =
+ Add & Unrecoverable & ~DiagnosedUnrecoverableKinds) {
+ SanitizerSet SetToDiagnose;
+ SetToDiagnose.Mask |= KindsToDiagnose;
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << Arg->getOption().getName() << toString(SetToDiagnose);
+ DiagnosedUnrecoverableKinds |= KindsToDiagnose;
+ }
+ RecoverableKinds |= expandSanitizerGroups(Add);
+ Arg->claim();
+ } else if (Arg->getOption().matches(options::OPT_fno_sanitize_recover_EQ)) {
+ RecoverableKinds &= ~expandSanitizerGroups(parseArgValues(D, Arg, true));
+ Arg->claim();
+ }
+ if (DeprecatedReplacement) {
+ D.Diag(diag::warn_drv_deprecated_arg) << Arg->getAsString(Args)
+ << DeprecatedReplacement;
+ }
+ }
+ RecoverableKinds &= Kinds;
+ RecoverableKinds &= ~Unrecoverable;
+
+ TrappingKinds &= Kinds;
+
+ // Setup blacklist files.
+ // Add default blacklist from resource directory.
+ {
+ std::string BLPath;
+ if (getDefaultBlacklist(D, Kinds, BLPath) && llvm::sys::fs::exists(BLPath))
+ BlacklistFiles.push_back(BLPath);
+ }
+ // Parse -f(no-)sanitize-blacklist options.
+ for (const auto *Arg : Args) {
+ if (Arg->getOption().matches(options::OPT_fsanitize_blacklist)) {
+ Arg->claim();
+ std::string BLPath = Arg->getValue();
+ if (llvm::sys::fs::exists(BLPath)) {
+ BlacklistFiles.push_back(BLPath);
+ ExtraDeps.push_back(BLPath);
+ } else
+ D.Diag(clang::diag::err_drv_no_such_file) << BLPath;
+
+ } else if (Arg->getOption().matches(options::OPT_fno_sanitize_blacklist)) {
+ Arg->claim();
+ BlacklistFiles.clear();
+ ExtraDeps.clear();
+ }
+ }
+ // Validate blacklists format.
+ {
+ std::string BLError;
+ std::unique_ptr<llvm::SpecialCaseList> SCL(
+ llvm::SpecialCaseList::create(BlacklistFiles, BLError));
+ if (!SCL.get())
+ D.Diag(clang::diag::err_drv_malformed_sanitizer_blacklist) << BLError;
+ }
+
+ // Parse -f[no-]sanitize-memory-track-origins[=level] options.
+ if (AllAddedKinds & Memory) {
+ if (Arg *A =
+ Args.getLastArg(options::OPT_fsanitize_memory_track_origins_EQ,
+ options::OPT_fsanitize_memory_track_origins,
+ options::OPT_fno_sanitize_memory_track_origins)) {
+ if (A->getOption().matches(options::OPT_fsanitize_memory_track_origins)) {
+ MsanTrackOrigins = 2;
+ } else if (A->getOption().matches(
+ options::OPT_fno_sanitize_memory_track_origins)) {
+ MsanTrackOrigins = 0;
+ } else {
+ StringRef S = A->getValue();
+ if (S.getAsInteger(0, MsanTrackOrigins) || MsanTrackOrigins < 0 ||
+ MsanTrackOrigins > 2) {
+ D.Diag(clang::diag::err_drv_invalid_value) << A->getAsString(Args) << S;
+ }
+ }
+ }
+ MsanUseAfterDtor =
+ Args.hasArg(options::OPT_fsanitize_memory_use_after_dtor);
+ NeedPIE |= !(TC.getTriple().isOSLinux() &&
+ TC.getTriple().getArch() == llvm::Triple::x86_64);
+ }
+
+ if (AllAddedKinds & CFI) {
+ CfiCrossDso = Args.hasFlag(options::OPT_fsanitize_cfi_cross_dso,
+ options::OPT_fno_sanitize_cfi_cross_dso, false);
+ // Without PIE, external function address may resolve to a PLT record, which
+ // can not be verified by the target module.
+ NeedPIE |= CfiCrossDso;
+ }
+
+ // Parse -f(no-)?sanitize-coverage flags if coverage is supported by the
+ // enabled sanitizers.
+ if (AllAddedKinds & SupportsCoverage) {
+ for (const auto *Arg : Args) {
+ if (Arg->getOption().matches(options::OPT_fsanitize_coverage)) {
+ Arg->claim();
+ int LegacySanitizeCoverage;
+ if (Arg->getNumValues() == 1 &&
+ !StringRef(Arg->getValue(0))
+ .getAsInteger(0, LegacySanitizeCoverage) &&
+ LegacySanitizeCoverage >= 0 && LegacySanitizeCoverage <= 4) {
+ // TODO: Add deprecation notice for this form.
+ switch (LegacySanitizeCoverage) {
+ case 0:
+ CoverageFeatures = 0;
+ break;
+ case 1:
+ CoverageFeatures = CoverageFunc;
+ break;
+ case 2:
+ CoverageFeatures = CoverageBB;
+ break;
+ case 3:
+ CoverageFeatures = CoverageEdge;
+ break;
+ case 4:
+ CoverageFeatures = CoverageEdge | CoverageIndirCall;
+ break;
+ }
+ continue;
+ }
+ CoverageFeatures |= parseCoverageFeatures(D, Arg);
+ } else if (Arg->getOption().matches(options::OPT_fno_sanitize_coverage)) {
+ Arg->claim();
+ CoverageFeatures &= ~parseCoverageFeatures(D, Arg);
+ }
+ }
+ }
+ // Choose at most one coverage type: function, bb, or edge.
+ if ((CoverageFeatures & CoverageFunc) && (CoverageFeatures & CoverageBB))
+ D.Diag(clang::diag::err_drv_argument_not_allowed_with)
+ << "-fsanitize-coverage=func"
+ << "-fsanitize-coverage=bb";
+ if ((CoverageFeatures & CoverageFunc) && (CoverageFeatures & CoverageEdge))
+ D.Diag(clang::diag::err_drv_argument_not_allowed_with)
+ << "-fsanitize-coverage=func"
+ << "-fsanitize-coverage=edge";
+ if ((CoverageFeatures & CoverageBB) && (CoverageFeatures & CoverageEdge))
+ D.Diag(clang::diag::err_drv_argument_not_allowed_with)
+ << "-fsanitize-coverage=bb"
+ << "-fsanitize-coverage=edge";
+ // Basic block tracing and 8-bit counters require some type of coverage
+ // enabled.
+ int CoverageTypes = CoverageFunc | CoverageBB | CoverageEdge;
+ if ((CoverageFeatures & CoverageTraceBB) &&
+ !(CoverageFeatures & CoverageTypes))
+ D.Diag(clang::diag::err_drv_argument_only_allowed_with)
+ << "-fsanitize-coverage=trace-bb"
+ << "-fsanitize-coverage=(func|bb|edge)";
+ if ((CoverageFeatures & Coverage8bitCounters) &&
+ !(CoverageFeatures & CoverageTypes))
+ D.Diag(clang::diag::err_drv_argument_only_allowed_with)
+ << "-fsanitize-coverage=8bit-counters"
+ << "-fsanitize-coverage=(func|bb|edge)";
+
+ if (AllAddedKinds & Address) {
+ AsanSharedRuntime =
+ Args.hasArg(options::OPT_shared_libasan) || TC.getTriple().isAndroid();
+ NeedPIE |= TC.getTriple().isAndroid();
+ if (Arg *A =
+ Args.getLastArg(options::OPT_fsanitize_address_field_padding)) {
+ StringRef S = A->getValue();
+ // Legal values are 0 and 1, 2, but in future we may add more levels.
+ if (S.getAsInteger(0, AsanFieldPadding) || AsanFieldPadding < 0 ||
+ AsanFieldPadding > 2) {
+ D.Diag(clang::diag::err_drv_invalid_value) << A->getAsString(Args) << S;
+ }
+ }
+
+ if (Arg *WindowsDebugRTArg =
+ Args.getLastArg(options::OPT__SLASH_MTd, options::OPT__SLASH_MT,
+ options::OPT__SLASH_MDd, options::OPT__SLASH_MD,
+ options::OPT__SLASH_LDd, options::OPT__SLASH_LD)) {
+ switch (WindowsDebugRTArg->getOption().getID()) {
+ case options::OPT__SLASH_MTd:
+ case options::OPT__SLASH_MDd:
+ case options::OPT__SLASH_LDd:
+ D.Diag(clang::diag::err_drv_argument_not_allowed_with)
+ << WindowsDebugRTArg->getAsString(Args)
+ << lastArgumentForMask(D, Args, Address);
+ D.Diag(clang::diag::note_drv_address_sanitizer_debug_runtime);
+ }
+ }
+ }
+
+ // Parse -link-cxx-sanitizer flag.
+ LinkCXXRuntimes =
+ Args.hasArg(options::OPT_fsanitize_link_cxx_runtime) || D.CCCIsCXX();
+
+ // Finally, initialize the set of available and recoverable sanitizers.
+ Sanitizers.Mask |= Kinds;
+ RecoverableSanitizers.Mask |= RecoverableKinds;
+ TrapSanitizers.Mask |= TrappingKinds;
+}
+
+static std::string toString(const clang::SanitizerSet &Sanitizers) {
+ std::string Res;
+#define SANITIZER(NAME, ID) \
+ if (Sanitizers.has(ID)) { \
+ if (!Res.empty()) \
+ Res += ","; \
+ Res += NAME; \
+ }
+#include "clang/Basic/Sanitizers.def"
+ return Res;
+}
+
+void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs,
+ types::ID InputType) const {
+ if (Sanitizers.empty())
+ return;
+ CmdArgs.push_back(Args.MakeArgString("-fsanitize=" + toString(Sanitizers)));
+
+ if (!RecoverableSanitizers.empty())
+ CmdArgs.push_back(Args.MakeArgString("-fsanitize-recover=" +
+ toString(RecoverableSanitizers)));
+
+ if (!TrapSanitizers.empty())
+ CmdArgs.push_back(
+ Args.MakeArgString("-fsanitize-trap=" + toString(TrapSanitizers)));
+
+ for (const auto &BLPath : BlacklistFiles) {
+ SmallString<64> BlacklistOpt("-fsanitize-blacklist=");
+ BlacklistOpt += BLPath;
+ CmdArgs.push_back(Args.MakeArgString(BlacklistOpt));
+ }
+ for (const auto &Dep : ExtraDeps) {
+ SmallString<64> ExtraDepOpt("-fdepfile-entry=");
+ ExtraDepOpt += Dep;
+ CmdArgs.push_back(Args.MakeArgString(ExtraDepOpt));
+ }
+
+ if (MsanTrackOrigins)
+ CmdArgs.push_back(Args.MakeArgString("-fsanitize-memory-track-origins=" +
+ llvm::utostr(MsanTrackOrigins)));
+
+ if (MsanUseAfterDtor)
+ CmdArgs.push_back(Args.MakeArgString("-fsanitize-memory-use-after-dtor"));
+
+ if (CfiCrossDso)
+ CmdArgs.push_back(Args.MakeArgString("-fsanitize-cfi-cross-dso"));
+
+ if (AsanFieldPadding)
+ CmdArgs.push_back(Args.MakeArgString("-fsanitize-address-field-padding=" +
+ llvm::utostr(AsanFieldPadding)));
+ // Translate available CoverageFeatures to corresponding clang-cc1 flags.
+ std::pair<int, const char *> CoverageFlags[] = {
+ std::make_pair(CoverageFunc, "-fsanitize-coverage-type=1"),
+ std::make_pair(CoverageBB, "-fsanitize-coverage-type=2"),
+ std::make_pair(CoverageEdge, "-fsanitize-coverage-type=3"),
+ std::make_pair(CoverageIndirCall, "-fsanitize-coverage-indirect-calls"),
+ std::make_pair(CoverageTraceBB, "-fsanitize-coverage-trace-bb"),
+ std::make_pair(CoverageTraceCmp, "-fsanitize-coverage-trace-cmp"),
+ std::make_pair(Coverage8bitCounters, "-fsanitize-coverage-8bit-counters")};
+ for (auto F : CoverageFlags) {
+ if (CoverageFeatures & F.first)
+ CmdArgs.push_back(Args.MakeArgString(F.second));
+ }
+
+
+ // MSan: Workaround for PR16386.
+ // ASan: This is mainly to help LSan with cases such as
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=373
+ // We can't make this conditional on -fsanitize=leak, as that flag shouldn't
+ // affect compilation.
+ if (Sanitizers.has(Memory) || Sanitizers.has(Address))
+ CmdArgs.push_back(Args.MakeArgString("-fno-assume-sane-operator-new"));
+
+ if (TC.getTriple().isOSWindows() && needsUbsanRt()) {
+ // Instruct the code generator to embed linker directives in the object file
+ // that cause the required runtime libraries to be linked.
+ CmdArgs.push_back(Args.MakeArgString(
+ "--dependent-lib=" + TC.getCompilerRT(Args, "ubsan_standalone")));
+ if (types::isCXX(InputType))
+ CmdArgs.push_back(Args.MakeArgString(
+ "--dependent-lib=" + TC.getCompilerRT(Args, "ubsan_standalone_cxx")));
+ }
+}
+
+SanitizerMask parseArgValues(const Driver &D, const llvm::opt::Arg *A,
+ bool DiagnoseErrors) {
+ assert((A->getOption().matches(options::OPT_fsanitize_EQ) ||
+ A->getOption().matches(options::OPT_fno_sanitize_EQ) ||
+ A->getOption().matches(options::OPT_fsanitize_recover_EQ) ||
+ A->getOption().matches(options::OPT_fno_sanitize_recover_EQ) ||
+ A->getOption().matches(options::OPT_fsanitize_trap_EQ) ||
+ A->getOption().matches(options::OPT_fno_sanitize_trap_EQ)) &&
+ "Invalid argument in parseArgValues!");
+ SanitizerMask Kinds = 0;
+ for (int i = 0, n = A->getNumValues(); i != n; ++i) {
+ const char *Value = A->getValue(i);
+ SanitizerMask Kind;
+ // Special case: don't accept -fsanitize=all.
+ if (A->getOption().matches(options::OPT_fsanitize_EQ) &&
+ 0 == strcmp("all", Value))
+ Kind = 0;
+ else
+ Kind = parseSanitizerValue(Value, /*AllowGroups=*/true);
+
+ if (Kind)
+ Kinds |= Kind;
+ else if (DiagnoseErrors)
+ D.Diag(clang::diag::err_drv_unsupported_option_argument)
+ << A->getOption().getName() << Value;
+ }
+ return Kinds;
+}
+
+int parseCoverageFeatures(const Driver &D, const llvm::opt::Arg *A) {
+ assert(A->getOption().matches(options::OPT_fsanitize_coverage) ||
+ A->getOption().matches(options::OPT_fno_sanitize_coverage));
+ int Features = 0;
+ for (int i = 0, n = A->getNumValues(); i != n; ++i) {
+ const char *Value = A->getValue(i);
+ int F = llvm::StringSwitch<int>(Value)
+ .Case("func", CoverageFunc)
+ .Case("bb", CoverageBB)
+ .Case("edge", CoverageEdge)
+ .Case("indirect-calls", CoverageIndirCall)
+ .Case("trace-bb", CoverageTraceBB)
+ .Case("trace-cmp", CoverageTraceCmp)
+ .Case("8bit-counters", Coverage8bitCounters)
+ .Default(0);
+ if (F == 0)
+ D.Diag(clang::diag::err_drv_unsupported_option_argument)
+ << A->getOption().getName() << Value;
+ Features |= F;
+ }
+ return Features;
+}
+
+std::string lastArgumentForMask(const Driver &D, const llvm::opt::ArgList &Args,
+ SanitizerMask Mask) {
+ for (llvm::opt::ArgList::const_reverse_iterator I = Args.rbegin(),
+ E = Args.rend();
+ I != E; ++I) {
+ const auto *Arg = *I;
+ if (Arg->getOption().matches(options::OPT_fsanitize_EQ)) {
+ SanitizerMask AddKinds =
+ expandSanitizerGroups(parseArgValues(D, Arg, false));
+ if (AddKinds & Mask)
+ return describeSanitizeArg(Arg, Mask);
+ } else if (Arg->getOption().matches(options::OPT_fno_sanitize_EQ)) {
+ SanitizerMask RemoveKinds =
+ expandSanitizerGroups(parseArgValues(D, Arg, false));
+ Mask &= ~RemoveKinds;
+ }
+ }
+ llvm_unreachable("arg list didn't provide expected value");
+}
+
+std::string describeSanitizeArg(const llvm::opt::Arg *A, SanitizerMask Mask) {
+ assert(A->getOption().matches(options::OPT_fsanitize_EQ)
+ && "Invalid argument in describeSanitizerArg!");
+
+ std::string Sanitizers;
+ for (int i = 0, n = A->getNumValues(); i != n; ++i) {
+ if (expandSanitizerGroups(
+ parseSanitizerValue(A->getValue(i), /*AllowGroups=*/true)) &
+ Mask) {
+ if (!Sanitizers.empty())
+ Sanitizers += ",";
+ Sanitizers += A->getValue(i);
+ }
+ }
+
+ assert(!Sanitizers.empty() && "arg didn't provide expected value");
+ return "-fsanitize=" + Sanitizers;
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/Tool.cpp b/contrib/llvm/tools/clang/lib/Driver/Tool.cpp
new file mode 100644
index 0000000..7142e82
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/Tool.cpp
@@ -0,0 +1,23 @@
+//===--- Tool.cpp - Compilation Tools -------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Tool.h"
+
+using namespace clang::driver;
+
+Tool::Tool(const char *_Name, const char *_ShortName, const ToolChain &TC,
+ ResponseFileSupport _ResponseSupport,
+ llvm::sys::WindowsEncodingMethod _ResponseEncoding,
+ const char *_ResponseFlag)
+ : Name(_Name), ShortName(_ShortName), TheToolChain(TC),
+ ResponseSupport(_ResponseSupport), ResponseEncoding(_ResponseEncoding),
+ ResponseFlag(_ResponseFlag) {}
+
+Tool::~Tool() {
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp b/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp
new file mode 100644
index 0000000..cbbd485
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp
@@ -0,0 +1,668 @@
+//===--- ToolChain.cpp - Collections of tools for one platform ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Tools.h"
+#include "clang/Basic/ObjCRuntime.h"
+#include "clang/Driver/Action.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Options.h"
+#include "clang/Driver/SanitizerArgs.h"
+#include "clang/Driver/ToolChain.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Option/Arg.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Option/Option.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/TargetParser.h"
+
+using namespace clang::driver;
+using namespace clang::driver::tools;
+using namespace clang;
+using namespace llvm;
+using namespace llvm::opt;
+
+static llvm::opt::Arg *GetRTTIArgument(const ArgList &Args) {
+ return Args.getLastArg(options::OPT_mkernel, options::OPT_fapple_kext,
+ options::OPT_fno_rtti, options::OPT_frtti);
+}
+
+static ToolChain::RTTIMode CalculateRTTIMode(const ArgList &Args,
+ const llvm::Triple &Triple,
+ const Arg *CachedRTTIArg) {
+ // Explicit rtti/no-rtti args
+ if (CachedRTTIArg) {
+ if (CachedRTTIArg->getOption().matches(options::OPT_frtti))
+ return ToolChain::RM_EnabledExplicitly;
+ else
+ return ToolChain::RM_DisabledExplicitly;
+ }
+
+ // -frtti is default, except for the PS4 CPU.
+ if (!Triple.isPS4CPU())
+ return ToolChain::RM_EnabledImplicitly;
+
+ // On the PS4, turning on c++ exceptions turns on rtti.
+ // We're assuming that, if we see -fexceptions, rtti gets turned on.
+ Arg *Exceptions = Args.getLastArgNoClaim(
+ options::OPT_fcxx_exceptions, options::OPT_fno_cxx_exceptions,
+ options::OPT_fexceptions, options::OPT_fno_exceptions);
+ if (Exceptions &&
+ (Exceptions->getOption().matches(options::OPT_fexceptions) ||
+ Exceptions->getOption().matches(options::OPT_fcxx_exceptions)))
+ return ToolChain::RM_EnabledImplicitly;
+
+ return ToolChain::RM_DisabledImplicitly;
+}
+
+ToolChain::ToolChain(const Driver &D, const llvm::Triple &T,
+ const ArgList &Args)
+ : D(D), Triple(T), Args(Args), CachedRTTIArg(GetRTTIArgument(Args)),
+ CachedRTTIMode(CalculateRTTIMode(Args, Triple, CachedRTTIArg)) {
+ if (Arg *A = Args.getLastArg(options::OPT_mthread_model))
+ if (!isThreadModelSupported(A->getValue()))
+ D.Diag(diag::err_drv_invalid_thread_model_for_target)
+ << A->getValue() << A->getAsString(Args);
+}
+
+ToolChain::~ToolChain() {
+}
+
+vfs::FileSystem &ToolChain::getVFS() const { return getDriver().getVFS(); }
+
+bool ToolChain::useIntegratedAs() const {
+ return Args.hasFlag(options::OPT_fintegrated_as,
+ options::OPT_fno_integrated_as,
+ IsIntegratedAssemblerDefault());
+}
+
+const SanitizerArgs& ToolChain::getSanitizerArgs() const {
+ if (!SanitizerArguments.get())
+ SanitizerArguments.reset(new SanitizerArgs(*this, Args));
+ return *SanitizerArguments.get();
+}
+
+namespace {
+struct DriverSuffix {
+ const char *Suffix;
+ const char *ModeFlag;
+};
+
+const DriverSuffix *FindDriverSuffix(StringRef ProgName) {
+ // A list of known driver suffixes. Suffixes are compared against the
+ // program name in order. If there is a match, the frontend type is updated as
+ // necessary by applying the ModeFlag.
+ static const DriverSuffix DriverSuffixes[] = {
+ {"clang", nullptr},
+ {"clang++", "--driver-mode=g++"},
+ {"clang-c++", "--driver-mode=g++"},
+ {"clang-cc", nullptr},
+ {"clang-cpp", "--driver-mode=cpp"},
+ {"clang-g++", "--driver-mode=g++"},
+ {"clang-gcc", nullptr},
+ {"clang-cl", "--driver-mode=cl"},
+ {"cc", nullptr},
+ {"cpp", "--driver-mode=cpp"},
+ {"cl", "--driver-mode=cl"},
+ {"++", "--driver-mode=g++"},
+ };
+
+ for (size_t i = 0; i < llvm::array_lengthof(DriverSuffixes); ++i)
+ if (ProgName.endswith(DriverSuffixes[i].Suffix))
+ return &DriverSuffixes[i];
+ return nullptr;
+}
+
+/// Normalize the program name from argv[0] by stripping the file extension if
+/// present and lower-casing the string on Windows.
+std::string normalizeProgramName(llvm::StringRef Argv0) {
+ std::string ProgName = llvm::sys::path::stem(Argv0);
+#ifdef LLVM_ON_WIN32
+ // Transform to lowercase for case insensitive file systems.
+ std::transform(ProgName.begin(), ProgName.end(), ProgName.begin(), ::tolower);
+#endif
+ return ProgName;
+}
+
+const DriverSuffix *parseDriverSuffix(StringRef ProgName) {
+ // Try to infer frontend type and default target from the program name by
+ // comparing it against DriverSuffixes in order.
+
+ // If there is a match, the function tries to identify a target as prefix.
+ // E.g. "x86_64-linux-clang" as interpreted as suffix "clang" with target
+ // prefix "x86_64-linux". If such a target prefix is found, it may be
+ // added via -target as implicit first argument.
+ const DriverSuffix *DS = FindDriverSuffix(ProgName);
+
+ if (!DS) {
+ // Try again after stripping any trailing version number:
+ // clang++3.5 -> clang++
+ ProgName = ProgName.rtrim("0123456789.");
+ DS = FindDriverSuffix(ProgName);
+ }
+
+ if (!DS) {
+ // Try again after stripping trailing -component.
+ // clang++-tot -> clang++
+ ProgName = ProgName.slice(0, ProgName.rfind('-'));
+ DS = FindDriverSuffix(ProgName);
+ }
+ return DS;
+}
+} // anonymous namespace
+
+std::pair<std::string, std::string>
+ToolChain::getTargetAndModeFromProgramName(StringRef PN) {
+ std::string ProgName = normalizeProgramName(PN);
+ const DriverSuffix *DS = parseDriverSuffix(ProgName);
+ if (!DS)
+ return std::make_pair("", "");
+ std::string ModeFlag = DS->ModeFlag == nullptr ? "" : DS->ModeFlag;
+
+ std::string::size_type LastComponent =
+ ProgName.rfind('-', ProgName.size() - strlen(DS->Suffix));
+ if (LastComponent == std::string::npos)
+ return std::make_pair("", ModeFlag);
+
+ // Infer target from the prefix.
+ StringRef Prefix(ProgName);
+ Prefix = Prefix.slice(0, LastComponent);
+ std::string IgnoredError;
+ std::string Target;
+ if (llvm::TargetRegistry::lookupTarget(Prefix, IgnoredError)) {
+ Target = Prefix;
+ }
+ return std::make_pair(Target, ModeFlag);
+}
+
+StringRef ToolChain::getDefaultUniversalArchName() const {
+ // In universal driver terms, the arch name accepted by -arch isn't exactly
+ // the same as the ones that appear in the triple. Roughly speaking, this is
+ // an inverse of the darwin::getArchTypeForDarwinArchName() function, but the
+ // only interesting special case is powerpc.
+ switch (Triple.getArch()) {
+ case llvm::Triple::ppc:
+ return "ppc";
+ case llvm::Triple::ppc64:
+ return "ppc64";
+ case llvm::Triple::ppc64le:
+ return "ppc64le";
+ default:
+ return Triple.getArchName();
+ }
+}
+
+bool ToolChain::IsUnwindTablesDefault() const {
+ return false;
+}
+
+Tool *ToolChain::getClang() const {
+ if (!Clang)
+ Clang.reset(new tools::Clang(*this));
+ return Clang.get();
+}
+
+Tool *ToolChain::buildAssembler() const {
+ return new tools::ClangAs(*this);
+}
+
+Tool *ToolChain::buildLinker() const {
+ llvm_unreachable("Linking is not supported by this toolchain");
+}
+
+Tool *ToolChain::getAssemble() const {
+ if (!Assemble)
+ Assemble.reset(buildAssembler());
+ return Assemble.get();
+}
+
+Tool *ToolChain::getClangAs() const {
+ if (!Assemble)
+ Assemble.reset(new tools::ClangAs(*this));
+ return Assemble.get();
+}
+
+Tool *ToolChain::getLink() const {
+ if (!Link)
+ Link.reset(buildLinker());
+ return Link.get();
+}
+
+Tool *ToolChain::getTool(Action::ActionClass AC) const {
+ switch (AC) {
+ case Action::AssembleJobClass:
+ return getAssemble();
+
+ case Action::LinkJobClass:
+ return getLink();
+
+ case Action::InputClass:
+ case Action::BindArchClass:
+ case Action::CudaDeviceClass:
+ case Action::CudaHostClass:
+ case Action::LipoJobClass:
+ case Action::DsymutilJobClass:
+ case Action::VerifyDebugInfoJobClass:
+ llvm_unreachable("Invalid tool kind.");
+
+ case Action::CompileJobClass:
+ case Action::PrecompileJobClass:
+ case Action::PreprocessJobClass:
+ case Action::AnalyzeJobClass:
+ case Action::MigrateJobClass:
+ case Action::VerifyPCHJobClass:
+ case Action::BackendJobClass:
+ return getClang();
+ }
+
+ llvm_unreachable("Invalid tool kind.");
+}
+
+static StringRef getArchNameForCompilerRTLib(const ToolChain &TC,
+ const ArgList &Args) {
+ const llvm::Triple &Triple = TC.getTriple();
+ bool IsWindows = Triple.isOSWindows();
+
+ if (Triple.isWindowsMSVCEnvironment() && TC.getArch() == llvm::Triple::x86)
+ return "i386";
+
+ if (TC.getArch() == llvm::Triple::arm || TC.getArch() == llvm::Triple::armeb)
+ return (arm::getARMFloatABI(TC, Args) == arm::FloatABI::Hard && !IsWindows)
+ ? "armhf"
+ : "arm";
+
+ return TC.getArchName();
+}
+
+std::string ToolChain::getCompilerRT(const ArgList &Args, StringRef Component,
+ bool Shared) const {
+ const llvm::Triple &TT = getTriple();
+ const char *Env = TT.isAndroid() ? "-android" : "";
+ bool IsITANMSVCWindows =
+ TT.isWindowsMSVCEnvironment() || TT.isWindowsItaniumEnvironment();
+
+ StringRef Arch = getArchNameForCompilerRTLib(*this, Args);
+ const char *Prefix = IsITANMSVCWindows ? "" : "lib";
+ const char *Suffix = Shared ? (Triple.isOSWindows() ? ".dll" : ".so")
+ : (IsITANMSVCWindows ? ".lib" : ".a");
+
+ SmallString<128> Path(getDriver().ResourceDir);
+ StringRef OSLibName = Triple.isOSFreeBSD() ? "freebsd" : getOS();
+ llvm::sys::path::append(Path, "lib", OSLibName);
+ llvm::sys::path::append(Path, Prefix + Twine("clang_rt.") + Component + "-" +
+ Arch + Env + Suffix);
+ return Path.str();
+}
+
+const char *ToolChain::getCompilerRTArgString(const llvm::opt::ArgList &Args,
+ StringRef Component,
+ bool Shared) const {
+ return Args.MakeArgString(getCompilerRT(Args, Component, Shared));
+}
+
+bool ToolChain::needsProfileRT(const ArgList &Args) {
+ if (Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs,
+ false) ||
+ Args.hasArg(options::OPT_fprofile_generate) ||
+ Args.hasArg(options::OPT_fprofile_generate_EQ) ||
+ Args.hasArg(options::OPT_fprofile_instr_generate) ||
+ Args.hasArg(options::OPT_fprofile_instr_generate_EQ) ||
+ Args.hasArg(options::OPT_fcreate_profile) ||
+ Args.hasArg(options::OPT_coverage))
+ return true;
+
+ return false;
+}
+
+Tool *ToolChain::SelectTool(const JobAction &JA) const {
+ if (getDriver().ShouldUseClangCompiler(JA)) return getClang();
+ Action::ActionClass AC = JA.getKind();
+ if (AC == Action::AssembleJobClass && useIntegratedAs())
+ return getClangAs();
+ return getTool(AC);
+}
+
+std::string ToolChain::GetFilePath(const char *Name) const {
+ return D.GetFilePath(Name, *this);
+}
+
+std::string ToolChain::GetProgramPath(const char *Name) const {
+ return D.GetProgramPath(Name, *this);
+}
+
+std::string ToolChain::GetLinkerPath() const {
+ if (Arg *A = Args.getLastArg(options::OPT_fuse_ld_EQ)) {
+ StringRef Suffix = A->getValue();
+
+ // If we're passed -fuse-ld= with no argument, or with the argument ld,
+ // then use whatever the default system linker is.
+ if (Suffix.empty() || Suffix == "ld")
+ return GetProgramPath("ld");
+
+ llvm::SmallString<8> LinkerName("ld.");
+ LinkerName.append(Suffix);
+
+ std::string LinkerPath(GetProgramPath(LinkerName.c_str()));
+ if (llvm::sys::fs::exists(LinkerPath))
+ return LinkerPath;
+
+ getDriver().Diag(diag::err_drv_invalid_linker_name) << A->getAsString(Args);
+ return "";
+ }
+
+ return GetProgramPath(DefaultLinker);
+}
+
+types::ID ToolChain::LookupTypeForExtension(const char *Ext) const {
+ return types::lookupTypeForExtension(Ext);
+}
+
+bool ToolChain::HasNativeLLVMSupport() const {
+ return false;
+}
+
+bool ToolChain::isCrossCompiling() const {
+ llvm::Triple HostTriple(LLVM_HOST_TRIPLE);
+ switch (HostTriple.getArch()) {
+ // The A32/T32/T16 instruction sets are not separate architectures in this
+ // context.
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ return getArch() != llvm::Triple::arm && getArch() != llvm::Triple::thumb &&
+ getArch() != llvm::Triple::armeb && getArch() != llvm::Triple::thumbeb;
+ default:
+ return HostTriple.getArch() != getArch();
+ }
+}
+
+ObjCRuntime ToolChain::getDefaultObjCRuntime(bool isNonFragile) const {
+ return ObjCRuntime(isNonFragile ? ObjCRuntime::GNUstep : ObjCRuntime::GCC,
+ VersionTuple());
+}
+
+bool ToolChain::isThreadModelSupported(const StringRef Model) const {
+ if (Model == "single") {
+ // FIXME: 'single' is only supported on ARM and WebAssembly so far.
+ return Triple.getArch() == llvm::Triple::arm ||
+ Triple.getArch() == llvm::Triple::armeb ||
+ Triple.getArch() == llvm::Triple::thumb ||
+ Triple.getArch() == llvm::Triple::thumbeb ||
+ Triple.getArch() == llvm::Triple::wasm32 ||
+ Triple.getArch() == llvm::Triple::wasm64;
+ } else if (Model == "posix")
+ return true;
+
+ return false;
+}
+
+std::string ToolChain::ComputeLLVMTriple(const ArgList &Args,
+ types::ID InputType) const {
+ switch (getTriple().getArch()) {
+ default:
+ return getTripleString();
+
+ case llvm::Triple::x86_64: {
+ llvm::Triple Triple = getTriple();
+ if (!Triple.isOSBinFormatMachO())
+ return getTripleString();
+
+ if (Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
+ // x86_64h goes in the triple. Other -march options just use the
+ // vanilla triple we already have.
+ StringRef MArch = A->getValue();
+ if (MArch == "x86_64h")
+ Triple.setArchName(MArch);
+ }
+ return Triple.getTriple();
+ }
+ case llvm::Triple::aarch64: {
+ llvm::Triple Triple = getTriple();
+ if (!Triple.isOSBinFormatMachO())
+ return getTripleString();
+
+ // FIXME: older versions of ld64 expect the "arm64" component in the actual
+ // triple string and query it to determine whether an LTO file can be
+ // handled. Remove this when we don't care any more.
+ Triple.setArchName("arm64");
+ return Triple.getTriple();
+ }
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb: {
+ // FIXME: Factor into subclasses.
+ llvm::Triple Triple = getTriple();
+ bool IsBigEndian = getTriple().getArch() == llvm::Triple::armeb ||
+ getTriple().getArch() == llvm::Triple::thumbeb;
+
+ // Handle pseudo-target flags '-mlittle-endian'/'-EL' and
+ // '-mbig-endian'/'-EB'.
+ if (Arg *A = Args.getLastArg(options::OPT_mlittle_endian,
+ options::OPT_mbig_endian)) {
+ IsBigEndian = !A->getOption().matches(options::OPT_mlittle_endian);
+ }
+
+ // Thumb2 is the default for V7 on Darwin.
+ //
+ // FIXME: Thumb should just be another -target-feaure, not in the triple.
+ StringRef MCPU, MArch;
+ if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
+ MCPU = A->getValue();
+ if (const Arg *A = Args.getLastArg(options::OPT_march_EQ))
+ MArch = A->getValue();
+ std::string CPU =
+ Triple.isOSBinFormatMachO()
+ ? tools::arm::getARMCPUForMArch(MArch, Triple).str()
+ : tools::arm::getARMTargetCPU(MCPU, MArch, Triple);
+ StringRef Suffix =
+ tools::arm::getLLVMArchSuffixForARM(CPU, MArch, Triple);
+ bool IsMProfile = ARM::parseArchProfile(Suffix) == ARM::PK_M;
+ bool ThumbDefault = IsMProfile || (ARM::parseArchVersion(Suffix) == 7 &&
+ getTriple().isOSBinFormatMachO());
+ // FIXME: this is invalid for WindowsCE
+ if (getTriple().isOSWindows())
+ ThumbDefault = true;
+ std::string ArchName;
+ if (IsBigEndian)
+ ArchName = "armeb";
+ else
+ ArchName = "arm";
+
+ // Assembly files should start in ARM mode, unless arch is M-profile.
+ if ((InputType != types::TY_PP_Asm && Args.hasFlag(options::OPT_mthumb,
+ options::OPT_mno_thumb, ThumbDefault)) || IsMProfile) {
+ if (IsBigEndian)
+ ArchName = "thumbeb";
+ else
+ ArchName = "thumb";
+ }
+ Triple.setArchName(ArchName + Suffix.str());
+
+ return Triple.getTriple();
+ }
+ }
+}
+
+std::string ToolChain::ComputeEffectiveClangTriple(const ArgList &Args,
+ types::ID InputType) const {
+ return ComputeLLVMTriple(Args, InputType);
+}
+
+void ToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ // Each toolchain should provide the appropriate include flags.
+}
+
+void ToolChain::addClangTargetOptions(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+}
+
+void ToolChain::addClangWarningOptions(ArgStringList &CC1Args) const {}
+
+void ToolChain::addProfileRTLibs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const {
+ if (!needsProfileRT(Args)) return;
+
+ CmdArgs.push_back(getCompilerRTArgString(Args, "profile"));
+ return;
+}
+
+ToolChain::RuntimeLibType ToolChain::GetRuntimeLibType(
+ const ArgList &Args) const {
+ if (Arg *A = Args.getLastArg(options::OPT_rtlib_EQ)) {
+ StringRef Value = A->getValue();
+ if (Value == "compiler-rt")
+ return ToolChain::RLT_CompilerRT;
+ if (Value == "libgcc")
+ return ToolChain::RLT_Libgcc;
+ getDriver().Diag(diag::err_drv_invalid_rtlib_name)
+ << A->getAsString(Args);
+ }
+
+ return GetDefaultRuntimeLibType();
+}
+
+ToolChain::CXXStdlibType ToolChain::GetCXXStdlibType(const ArgList &Args) const{
+ if (Arg *A = Args.getLastArg(options::OPT_stdlib_EQ)) {
+ StringRef Value = A->getValue();
+ if (Value == "libc++")
+ return ToolChain::CST_Libcxx;
+ if (Value == "libstdc++")
+ return ToolChain::CST_Libstdcxx;
+ getDriver().Diag(diag::err_drv_invalid_stdlib_name)
+ << A->getAsString(Args);
+ }
+
+ return ToolChain::CST_Libstdcxx;
+}
+
+/// \brief Utility function to add a system include directory to CC1 arguments.
+/*static*/ void ToolChain::addSystemInclude(const ArgList &DriverArgs,
+ ArgStringList &CC1Args,
+ const Twine &Path) {
+ CC1Args.push_back("-internal-isystem");
+ CC1Args.push_back(DriverArgs.MakeArgString(Path));
+}
+
+/// \brief Utility function to add a system include directory with extern "C"
+/// semantics to CC1 arguments.
+///
+/// Note that this should be used rarely, and only for directories that
+/// historically and for legacy reasons are treated as having implicit extern
+/// "C" semantics. These semantics are *ignored* by and large today, but its
+/// important to preserve the preprocessor changes resulting from the
+/// classification.
+/*static*/ void ToolChain::addExternCSystemInclude(const ArgList &DriverArgs,
+ ArgStringList &CC1Args,
+ const Twine &Path) {
+ CC1Args.push_back("-internal-externc-isystem");
+ CC1Args.push_back(DriverArgs.MakeArgString(Path));
+}
+
+void ToolChain::addExternCSystemIncludeIfExists(const ArgList &DriverArgs,
+ ArgStringList &CC1Args,
+ const Twine &Path) {
+ if (llvm::sys::fs::exists(Path))
+ addExternCSystemInclude(DriverArgs, CC1Args, Path);
+}
+
+/// \brief Utility function to add a list of system include directories to CC1.
+/*static*/ void ToolChain::addSystemIncludes(const ArgList &DriverArgs,
+ ArgStringList &CC1Args,
+ ArrayRef<StringRef> Paths) {
+ for (StringRef Path : Paths) {
+ CC1Args.push_back("-internal-isystem");
+ CC1Args.push_back(DriverArgs.MakeArgString(Path));
+ }
+}
+
+void ToolChain::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ // Header search paths should be handled by each of the subclasses.
+ // Historically, they have not been, and instead have been handled inside of
+ // the CC1-layer frontend. As the logic is hoisted out, this generic function
+ // will slowly stop being called.
+ //
+ // While it is being called, replicate a bit of a hack to propagate the
+ // '-stdlib=' flag down to CC1 so that it can in turn customize the C++
+ // header search paths with it. Once all systems are overriding this
+ // function, the CC1 flag and this line can be removed.
+ DriverArgs.AddAllArgs(CC1Args, options::OPT_stdlib_EQ);
+}
+
+void ToolChain::AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ CXXStdlibType Type = GetCXXStdlibType(Args);
+
+ switch (Type) {
+ case ToolChain::CST_Libcxx:
+ CmdArgs.push_back("-lc++");
+ break;
+
+ case ToolChain::CST_Libstdcxx:
+ CmdArgs.push_back("-lstdc++");
+ break;
+ }
+}
+
+void ToolChain::AddFilePathLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ for (const auto &LibPath : getFilePaths())
+ if(LibPath.length() > 0)
+ CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + LibPath));
+}
+
+void ToolChain::AddCCKextLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ CmdArgs.push_back("-lcc_kext");
+}
+
+bool ToolChain::AddFastMathRuntimeIfAvailable(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ // Do not check for -fno-fast-math or -fno-unsafe-math when -Ofast passed
+ // (to keep the linker options consistent with gcc and clang itself).
+ if (!isOptimizationLevelFast(Args)) {
+ // Check if -ffast-math or -funsafe-math.
+ Arg *A =
+ Args.getLastArg(options::OPT_ffast_math, options::OPT_fno_fast_math,
+ options::OPT_funsafe_math_optimizations,
+ options::OPT_fno_unsafe_math_optimizations);
+
+ if (!A || A->getOption().getID() == options::OPT_fno_fast_math ||
+ A->getOption().getID() == options::OPT_fno_unsafe_math_optimizations)
+ return false;
+ }
+ // If crtfastmath.o exists add it to the arguments.
+ std::string Path = GetFilePath("crtfastmath.o");
+ if (Path == "crtfastmath.o") // Not found.
+ return false;
+
+ CmdArgs.push_back(Args.MakeArgString(Path));
+ return true;
+}
+
+SanitizerMask ToolChain::getSupportedSanitizers() const {
+ // Return sanitizers which don't require runtime support and are not
+ // platform dependent.
+ using namespace SanitizerKind;
+ SanitizerMask Res = (Undefined & ~Vptr & ~Function) | (CFI & ~CFIICall) |
+ CFICastStrict | UnsignedIntegerOverflow | LocalBounds;
+ if (getTriple().getArch() == llvm::Triple::x86 ||
+ getTriple().getArch() == llvm::Triple::x86_64)
+ Res |= CFIICall;
+ return Res;
+}
+
+void ToolChain::AddCudaIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {}
diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp b/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp
new file mode 100644
index 0000000..7ece321
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp
@@ -0,0 +1,4533 @@
+//===--- ToolChains.cpp - ToolChain Implementations -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ToolChains.h"
+#include "clang/Basic/ObjCRuntime.h"
+#include "clang/Basic/Version.h"
+#include "clang/Basic/VirtualFileSystem.h"
+#include "clang/Config/config.h" // for GCC_INSTALL_PREFIX
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Options.h"
+#include "clang/Driver/SanitizerArgs.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Option/Arg.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Option/OptTable.h"
+#include "llvm/Option/Option.h"
+#include "llvm/ProfileData/InstrProf.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Program.h"
+#include "llvm/Support/TargetParser.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstdlib> // ::getenv
+#include <system_error>
+
+using namespace clang::driver;
+using namespace clang::driver::toolchains;
+using namespace clang;
+using namespace llvm::opt;
+
+MachO::MachO(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
+ : ToolChain(D, Triple, Args) {
+ // We expect 'as', 'ld', etc. to be adjacent to our install dir.
+ getProgramPaths().push_back(getDriver().getInstalledDir());
+ if (getDriver().getInstalledDir() != getDriver().Dir)
+ getProgramPaths().push_back(getDriver().Dir);
+}
+
+/// Darwin - Darwin tool chain for i386 and x86_64.
+Darwin::Darwin(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
+ : MachO(D, Triple, Args), TargetInitialized(false) {}
+
+types::ID MachO::LookupTypeForExtension(const char *Ext) const {
+ types::ID Ty = types::lookupTypeForExtension(Ext);
+
+ // Darwin always preprocesses assembly files (unless -x is used explicitly).
+ if (Ty == types::TY_PP_Asm)
+ return types::TY_Asm;
+
+ return Ty;
+}
+
+bool MachO::HasNativeLLVMSupport() const { return true; }
+
+/// Darwin provides an ARC runtime starting in MacOS X 10.7 and iOS 5.0.
+ObjCRuntime Darwin::getDefaultObjCRuntime(bool isNonFragile) const {
+ if (isTargetWatchOSBased())
+ return ObjCRuntime(ObjCRuntime::WatchOS, TargetVersion);
+ if (isTargetIOSBased())
+ return ObjCRuntime(ObjCRuntime::iOS, TargetVersion);
+ if (isNonFragile)
+ return ObjCRuntime(ObjCRuntime::MacOSX, TargetVersion);
+ return ObjCRuntime(ObjCRuntime::FragileMacOSX, TargetVersion);
+}
+
+/// Darwin provides a blocks runtime starting in MacOS X 10.6 and iOS 3.2.
+bool Darwin::hasBlocksRuntime() const {
+ if (isTargetWatchOSBased())
+ return true;
+ else if (isTargetIOSBased())
+ return !isIPhoneOSVersionLT(3, 2);
+ else {
+ assert(isTargetMacOS() && "unexpected darwin target");
+ return !isMacosxVersionLT(10, 6);
+ }
+}
+
+// This is just a MachO name translation routine and there's no
+// way to join this into ARMTargetParser without breaking all
+// other assumptions. Maybe MachO should consider standardising
+// their nomenclature.
+static const char *ArmMachOArchName(StringRef Arch) {
+ return llvm::StringSwitch<const char *>(Arch)
+ .Case("armv6k", "armv6")
+ .Case("armv6m", "armv6m")
+ .Case("armv5tej", "armv5")
+ .Case("xscale", "xscale")
+ .Case("armv4t", "armv4t")
+ .Case("armv7", "armv7")
+ .Cases("armv7a", "armv7-a", "armv7")
+ .Cases("armv7r", "armv7-r", "armv7")
+ .Cases("armv7em", "armv7e-m", "armv7em")
+ .Cases("armv7k", "armv7-k", "armv7k")
+ .Cases("armv7m", "armv7-m", "armv7m")
+ .Cases("armv7s", "armv7-s", "armv7s")
+ .Default(nullptr);
+}
+
+static const char *ArmMachOArchNameCPU(StringRef CPU) {
+ unsigned ArchKind = llvm::ARM::parseCPUArch(CPU);
+ if (ArchKind == llvm::ARM::AK_INVALID)
+ return nullptr;
+ StringRef Arch = llvm::ARM::getArchName(ArchKind);
+
+ // FIXME: Make sure this MachO triple mangling is really necessary.
+ // ARMv5* normalises to ARMv5.
+ if (Arch.startswith("armv5"))
+ Arch = Arch.substr(0, 5);
+ // ARMv6*, except ARMv6M, normalises to ARMv6.
+ else if (Arch.startswith("armv6") && !Arch.endswith("6m"))
+ Arch = Arch.substr(0, 5);
+ // ARMv7A normalises to ARMv7.
+ else if (Arch.endswith("v7a"))
+ Arch = Arch.substr(0, 5);
+ return Arch.data();
+}
+
+static bool isSoftFloatABI(const ArgList &Args) {
+ Arg *A = Args.getLastArg(options::OPT_msoft_float, options::OPT_mhard_float,
+ options::OPT_mfloat_abi_EQ);
+ if (!A)
+ return false;
+
+ return A->getOption().matches(options::OPT_msoft_float) ||
+ (A->getOption().matches(options::OPT_mfloat_abi_EQ) &&
+ A->getValue() == StringRef("soft"));
+}
+
+StringRef MachO::getMachOArchName(const ArgList &Args) const {
+ switch (getTriple().getArch()) {
+ default:
+ return getDefaultUniversalArchName();
+
+ case llvm::Triple::aarch64:
+ return "arm64";
+
+ case llvm::Triple::thumb:
+ case llvm::Triple::arm:
+ if (const Arg *A = Args.getLastArg(options::OPT_march_EQ))
+ if (const char *Arch = ArmMachOArchName(A->getValue()))
+ return Arch;
+
+ if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
+ if (const char *Arch = ArmMachOArchNameCPU(A->getValue()))
+ return Arch;
+
+ return "arm";
+ }
+}
+
+Darwin::~Darwin() {}
+
+MachO::~MachO() {}
+
+std::string MachO::ComputeEffectiveClangTriple(const ArgList &Args,
+ types::ID InputType) const {
+ llvm::Triple Triple(ComputeLLVMTriple(Args, InputType));
+
+ return Triple.getTriple();
+}
+
+std::string Darwin::ComputeEffectiveClangTriple(const ArgList &Args,
+ types::ID InputType) const {
+ llvm::Triple Triple(ComputeLLVMTriple(Args, InputType));
+
+ // If the target isn't initialized (e.g., an unknown Darwin platform, return
+ // the default triple).
+ if (!isTargetInitialized())
+ return Triple.getTriple();
+
+ SmallString<16> Str;
+ if (isTargetWatchOSBased())
+ Str += "watchos";
+ else if (isTargetTvOSBased())
+ Str += "tvos";
+ else if (isTargetIOSBased())
+ Str += "ios";
+ else
+ Str += "macosx";
+ Str += getTargetVersion().getAsString();
+ Triple.setOSName(Str);
+
+ return Triple.getTriple();
+}
+
+void Generic_ELF::anchor() {}
+
+Tool *MachO::getTool(Action::ActionClass AC) const {
+ switch (AC) {
+ case Action::LipoJobClass:
+ if (!Lipo)
+ Lipo.reset(new tools::darwin::Lipo(*this));
+ return Lipo.get();
+ case Action::DsymutilJobClass:
+ if (!Dsymutil)
+ Dsymutil.reset(new tools::darwin::Dsymutil(*this));
+ return Dsymutil.get();
+ case Action::VerifyDebugInfoJobClass:
+ if (!VerifyDebug)
+ VerifyDebug.reset(new tools::darwin::VerifyDebug(*this));
+ return VerifyDebug.get();
+ default:
+ return ToolChain::getTool(AC);
+ }
+}
+
+Tool *MachO::buildLinker() const { return new tools::darwin::Linker(*this); }
+
+Tool *MachO::buildAssembler() const {
+ return new tools::darwin::Assembler(*this);
+}
+
+DarwinClang::DarwinClang(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
+ : Darwin(D, Triple, Args) {}
+
+void DarwinClang::addClangWarningOptions(ArgStringList &CC1Args) const {
+ // For modern targets, promote certain warnings to errors.
+ if (isTargetWatchOSBased() || getTriple().isArch64Bit()) {
+ // Always enable -Wdeprecated-objc-isa-usage and promote it
+ // to an error.
+ CC1Args.push_back("-Wdeprecated-objc-isa-usage");
+ CC1Args.push_back("-Werror=deprecated-objc-isa-usage");
+
+ // For iOS and watchOS, also error about implicit function declarations,
+ // as that can impact calling conventions.
+ if (!isTargetMacOS())
+ CC1Args.push_back("-Werror=implicit-function-declaration");
+ }
+}
+
+/// \brief Determine whether Objective-C automated reference counting is
+/// enabled.
+static bool isObjCAutoRefCount(const ArgList &Args) {
+ return Args.hasFlag(options::OPT_fobjc_arc, options::OPT_fno_objc_arc, false);
+}
+
+void DarwinClang::AddLinkARCArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ // Avoid linking compatibility stubs on i386 mac.
+ if (isTargetMacOS() && getArch() == llvm::Triple::x86)
+ return;
+
+ ObjCRuntime runtime = getDefaultObjCRuntime(/*nonfragile*/ true);
+
+ if ((runtime.hasNativeARC() || !isObjCAutoRefCount(Args)) &&
+ runtime.hasSubscripting())
+ return;
+
+ CmdArgs.push_back("-force_load");
+ SmallString<128> P(getDriver().ClangExecutable);
+ llvm::sys::path::remove_filename(P); // 'clang'
+ llvm::sys::path::remove_filename(P); // 'bin'
+ llvm::sys::path::append(P, "lib", "arc", "libarclite_");
+ // Mash in the platform.
+ if (isTargetWatchOSSimulator())
+ P += "watchsimulator";
+ else if (isTargetWatchOS())
+ P += "watchos";
+ else if (isTargetTvOSSimulator())
+ P += "appletvsimulator";
+ else if (isTargetTvOS())
+ P += "appletvos";
+ else if (isTargetIOSSimulator())
+ P += "iphonesimulator";
+ else if (isTargetIPhoneOS())
+ P += "iphoneos";
+ else
+ P += "macosx";
+ P += ".a";
+
+ CmdArgs.push_back(Args.MakeArgString(P));
+}
+
+void MachO::AddLinkRuntimeLib(const ArgList &Args, ArgStringList &CmdArgs,
+ StringRef DarwinLibName, bool AlwaysLink,
+ bool IsEmbedded, bool AddRPath) const {
+ SmallString<128> Dir(getDriver().ResourceDir);
+ llvm::sys::path::append(Dir, "lib", IsEmbedded ? "macho_embedded" : "darwin");
+
+ SmallString<128> P(Dir);
+ llvm::sys::path::append(P, DarwinLibName);
+
+ // For now, allow missing resource libraries to support developers who may
+ // not have compiler-rt checked out or integrated into their build (unless
+ // we explicitly force linking with this library).
+ if (AlwaysLink || getVFS().exists(P))
+ CmdArgs.push_back(Args.MakeArgString(P));
+
+ // Adding the rpaths might negatively interact when other rpaths are involved,
+ // so we should make sure we add the rpaths last, after all user-specified
+ // rpaths. This is currently true from this place, but we need to be
+ // careful if this function is ever called before user's rpaths are emitted.
+ if (AddRPath) {
+ assert(DarwinLibName.endswith(".dylib") && "must be a dynamic library");
+
+ // Add @executable_path to rpath to support having the dylib copied with
+ // the executable.
+ CmdArgs.push_back("-rpath");
+ CmdArgs.push_back("@executable_path");
+
+ // Add the path to the resource dir to rpath to support using the dylib
+ // from the default location without copying.
+ CmdArgs.push_back("-rpath");
+ CmdArgs.push_back(Args.MakeArgString(Dir));
+ }
+}
+
+void Darwin::addProfileRTLibs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ if (!needsProfileRT(Args)) return;
+
+ // TODO: Clean this up once autoconf is gone
+ SmallString<128> P(getDriver().ResourceDir);
+ llvm::sys::path::append(P, "lib", "darwin");
+ const char *Library = "libclang_rt.profile_osx.a";
+
+ // Select the appropriate runtime library for the target.
+ if (isTargetWatchOS()) {
+ Library = "libclang_rt.profile_watchos.a";
+ } else if (isTargetWatchOSSimulator()) {
+ llvm::sys::path::append(P, "libclang_rt.profile_watchossim.a");
+ Library = getVFS().exists(P) ? "libclang_rt.profile_watchossim.a"
+ : "libclang_rt.profile_watchos.a";
+ } else if (isTargetTvOS()) {
+ Library = "libclang_rt.profile_tvos.a";
+ } else if (isTargetTvOSSimulator()) {
+ llvm::sys::path::append(P, "libclang_rt.profile_tvossim.a");
+ Library = getVFS().exists(P) ? "libclang_rt.profile_tvossim.a"
+ : "libclang_rt.profile_tvos.a";
+ } else if (isTargetIPhoneOS()) {
+ Library = "libclang_rt.profile_ios.a";
+ } else if (isTargetIOSSimulator()) {
+ llvm::sys::path::append(P, "libclang_rt.profile_iossim.a");
+ Library = getVFS().exists(P) ? "libclang_rt.profile_iossim.a"
+ : "libclang_rt.profile_ios.a";
+ } else {
+ assert(isTargetMacOS() && "unexpected non MacOS platform");
+ }
+ AddLinkRuntimeLib(Args, CmdArgs, Library,
+ /*AlwaysLink*/ true);
+ return;
+}
+
+void DarwinClang::AddLinkSanitizerLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs,
+ StringRef Sanitizer) const {
+ if (!Args.hasArg(options::OPT_dynamiclib) &&
+ !Args.hasArg(options::OPT_bundle)) {
+ // Sanitizer runtime libraries requires C++.
+ AddCXXStdlibLibArgs(Args, CmdArgs);
+ }
+ // ASan is not supported on watchOS.
+ assert(isTargetMacOS() || isTargetIOSSimulator());
+ StringRef OS = isTargetMacOS() ? "osx" : "iossim";
+ AddLinkRuntimeLib(
+ Args, CmdArgs,
+ (Twine("libclang_rt.") + Sanitizer + "_" + OS + "_dynamic.dylib").str(),
+ /*AlwaysLink*/ true, /*IsEmbedded*/ false,
+ /*AddRPath*/ true);
+
+ if (GetCXXStdlibType(Args) == ToolChain::CST_Libcxx) {
+ // Add explicit dependcy on -lc++abi, as -lc++ doesn't re-export
+ // all RTTI-related symbols that UBSan uses.
+ CmdArgs.push_back("-lc++abi");
+ }
+}
+
+void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ // Darwin only supports the compiler-rt based runtime libraries.
+ switch (GetRuntimeLibType(Args)) {
+ case ToolChain::RLT_CompilerRT:
+ break;
+ default:
+ getDriver().Diag(diag::err_drv_unsupported_rtlib_for_platform)
+ << Args.getLastArg(options::OPT_rtlib_EQ)->getValue() << "darwin";
+ return;
+ }
+
+ // Darwin doesn't support real static executables, don't link any runtime
+ // libraries with -static.
+ if (Args.hasArg(options::OPT_static) ||
+ Args.hasArg(options::OPT_fapple_kext) ||
+ Args.hasArg(options::OPT_mkernel))
+ return;
+
+ // Reject -static-libgcc for now, we can deal with this when and if someone
+ // cares. This is useful in situations where someone wants to statically link
+ // something like libstdc++, and needs its runtime support routines.
+ if (const Arg *A = Args.getLastArg(options::OPT_static_libgcc)) {
+ getDriver().Diag(diag::err_drv_unsupported_opt) << A->getAsString(Args);
+ return;
+ }
+
+ const SanitizerArgs &Sanitize = getSanitizerArgs();
+ if (Sanitize.needsAsanRt())
+ AddLinkSanitizerLibArgs(Args, CmdArgs, "asan");
+ if (Sanitize.needsUbsanRt())
+ AddLinkSanitizerLibArgs(Args, CmdArgs, "ubsan");
+ if (Sanitize.needsTsanRt())
+ AddLinkSanitizerLibArgs(Args, CmdArgs, "tsan");
+
+ // Otherwise link libSystem, then the dynamic runtime library, and finally any
+ // target specific static runtime library.
+ CmdArgs.push_back("-lSystem");
+
+ // Select the dynamic runtime library and the target specific static library.
+ if (isTargetWatchOSBased()) {
+ // We currently always need a static runtime library for watchOS.
+ AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.watchos.a");
+ } else if (isTargetTvOSBased()) {
+ // We currently always need a static runtime library for tvOS.
+ AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.tvos.a");
+ } else if (isTargetIOSBased()) {
+ // If we are compiling as iOS / simulator, don't attempt to link libgcc_s.1,
+ // it never went into the SDK.
+ // Linking against libgcc_s.1 isn't needed for iOS 5.0+
+ if (isIPhoneOSVersionLT(5, 0) && !isTargetIOSSimulator() &&
+ getTriple().getArch() != llvm::Triple::aarch64)
+ CmdArgs.push_back("-lgcc_s.1");
+
+ // We currently always need a static runtime library for iOS.
+ AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.ios.a");
+ } else {
+ assert(isTargetMacOS() && "unexpected non MacOS platform");
+ // The dynamic runtime library was merged with libSystem for 10.6 and
+ // beyond; only 10.4 and 10.5 need an additional runtime library.
+ if (isMacosxVersionLT(10, 5))
+ CmdArgs.push_back("-lgcc_s.10.4");
+ else if (isMacosxVersionLT(10, 6))
+ CmdArgs.push_back("-lgcc_s.10.5");
+
+ // For OS X, we thought we would only need a static runtime library when
+ // targeting 10.4, to provide versions of the static functions which were
+ // omitted from 10.4.dylib.
+ //
+ // Unfortunately, that turned out to not be true, because Darwin system
+ // headers can still use eprintf on i386, and it is not exported from
+ // libSystem. Therefore, we still must provide a runtime library just for
+ // the tiny tiny handful of projects that *might* use that symbol.
+ if (isMacosxVersionLT(10, 5)) {
+ AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.10.4.a");
+ } else {
+ if (getTriple().getArch() == llvm::Triple::x86)
+ AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.eprintf.a");
+ AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.osx.a");
+ }
+ }
+}
+
+void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
+ const OptTable &Opts = getDriver().getOpts();
+
+ // Support allowing the SDKROOT environment variable used by xcrun and other
+ // Xcode tools to define the default sysroot, by making it the default for
+ // isysroot.
+ if (const Arg *A = Args.getLastArg(options::OPT_isysroot)) {
+ // Warn if the path does not exist.
+ if (!getVFS().exists(A->getValue()))
+ getDriver().Diag(clang::diag::warn_missing_sysroot) << A->getValue();
+ } else {
+ if (char *env = ::getenv("SDKROOT")) {
+ // We only use this value as the default if it is an absolute path,
+ // exists, and it is not the root path.
+ if (llvm::sys::path::is_absolute(env) && getVFS().exists(env) &&
+ StringRef(env) != "/") {
+ Args.append(Args.MakeSeparateArg(
+ nullptr, Opts.getOption(options::OPT_isysroot), env));
+ }
+ }
+ }
+
+ Arg *OSXVersion = Args.getLastArg(options::OPT_mmacosx_version_min_EQ);
+ Arg *iOSVersion = Args.getLastArg(options::OPT_miphoneos_version_min_EQ);
+ Arg *TvOSVersion = Args.getLastArg(options::OPT_mtvos_version_min_EQ);
+ Arg *WatchOSVersion = Args.getLastArg(options::OPT_mwatchos_version_min_EQ);
+
+ if (OSXVersion && (iOSVersion || TvOSVersion || WatchOSVersion)) {
+ getDriver().Diag(diag::err_drv_argument_not_allowed_with)
+ << OSXVersion->getAsString(Args)
+ << (iOSVersion ? iOSVersion :
+ TvOSVersion ? TvOSVersion : WatchOSVersion)->getAsString(Args);
+ iOSVersion = TvOSVersion = WatchOSVersion = nullptr;
+ } else if (iOSVersion && (TvOSVersion || WatchOSVersion)) {
+ getDriver().Diag(diag::err_drv_argument_not_allowed_with)
+ << iOSVersion->getAsString(Args)
+ << (TvOSVersion ? TvOSVersion : WatchOSVersion)->getAsString(Args);
+ TvOSVersion = WatchOSVersion = nullptr;
+ } else if (TvOSVersion && WatchOSVersion) {
+ getDriver().Diag(diag::err_drv_argument_not_allowed_with)
+ << TvOSVersion->getAsString(Args)
+ << WatchOSVersion->getAsString(Args);
+ WatchOSVersion = nullptr;
+ } else if (!OSXVersion && !iOSVersion && !TvOSVersion && !WatchOSVersion) {
+ // If no deployment target was specified on the command line, check for
+ // environment defines.
+ std::string OSXTarget;
+ std::string iOSTarget;
+ std::string TvOSTarget;
+ std::string WatchOSTarget;
+
+ if (char *env = ::getenv("MACOSX_DEPLOYMENT_TARGET"))
+ OSXTarget = env;
+ if (char *env = ::getenv("IPHONEOS_DEPLOYMENT_TARGET"))
+ iOSTarget = env;
+ if (char *env = ::getenv("TVOS_DEPLOYMENT_TARGET"))
+ TvOSTarget = env;
+ if (char *env = ::getenv("WATCHOS_DEPLOYMENT_TARGET"))
+ WatchOSTarget = env;
+
+ // If there is no command-line argument to specify the Target version and
+ // no environment variable defined, see if we can set the default based
+ // on -isysroot.
+ if (OSXTarget.empty() && iOSTarget.empty() && WatchOSTarget.empty() &&
+ Args.hasArg(options::OPT_isysroot)) {
+ if (const Arg *A = Args.getLastArg(options::OPT_isysroot)) {
+ StringRef isysroot = A->getValue();
+ // Assume SDK has path: SOME_PATH/SDKs/PlatformXX.YY.sdk
+ size_t BeginSDK = isysroot.rfind("SDKs/");
+ size_t EndSDK = isysroot.rfind(".sdk");
+ if (BeginSDK != StringRef::npos && EndSDK != StringRef::npos) {
+ StringRef SDK = isysroot.slice(BeginSDK + 5, EndSDK);
+ // Slice the version number out.
+ // Version number is between the first and the last number.
+ size_t StartVer = SDK.find_first_of("0123456789");
+ size_t EndVer = SDK.find_last_of("0123456789");
+ if (StartVer != StringRef::npos && EndVer > StartVer) {
+ StringRef Version = SDK.slice(StartVer, EndVer + 1);
+ if (SDK.startswith("iPhoneOS") ||
+ SDK.startswith("iPhoneSimulator"))
+ iOSTarget = Version;
+ else if (SDK.startswith("MacOSX"))
+ OSXTarget = Version;
+ else if (SDK.startswith("WatchOS") ||
+ SDK.startswith("WatchSimulator"))
+ WatchOSTarget = Version;
+ else if (SDK.startswith("AppleTVOS") ||
+ SDK.startswith("AppleTVSimulator"))
+ TvOSTarget = Version;
+ }
+ }
+ }
+ }
+
+ // If no OSX or iOS target has been specified, try to guess platform
+ // from arch name and compute the version from the triple.
+ if (OSXTarget.empty() && iOSTarget.empty() && TvOSTarget.empty() &&
+ WatchOSTarget.empty()) {
+ StringRef MachOArchName = getMachOArchName(Args);
+ unsigned Major, Minor, Micro;
+ if (MachOArchName == "armv7" || MachOArchName == "armv7s" ||
+ MachOArchName == "arm64") {
+ getTriple().getiOSVersion(Major, Minor, Micro);
+ llvm::raw_string_ostream(iOSTarget) << Major << '.' << Minor << '.'
+ << Micro;
+ } else if (MachOArchName == "armv7k") {
+ getTriple().getWatchOSVersion(Major, Minor, Micro);
+ llvm::raw_string_ostream(WatchOSTarget) << Major << '.' << Minor << '.'
+ << Micro;
+ } else if (MachOArchName != "armv6m" && MachOArchName != "armv7m" &&
+ MachOArchName != "armv7em") {
+ if (!getTriple().getMacOSXVersion(Major, Minor, Micro)) {
+ getDriver().Diag(diag::err_drv_invalid_darwin_version)
+ << getTriple().getOSName();
+ }
+ llvm::raw_string_ostream(OSXTarget) << Major << '.' << Minor << '.'
+ << Micro;
+ }
+ }
+
+ // Do not allow conflicts with the watchOS target.
+ if (!WatchOSTarget.empty() && (!iOSTarget.empty() || !TvOSTarget.empty())) {
+ getDriver().Diag(diag::err_drv_conflicting_deployment_targets)
+ << "WATCHOS_DEPLOYMENT_TARGET"
+ << (!iOSTarget.empty() ? "IPHONEOS_DEPLOYMENT_TARGET" :
+ "TVOS_DEPLOYMENT_TARGET");
+ }
+
+ // Do not allow conflicts with the tvOS target.
+ if (!TvOSTarget.empty() && !iOSTarget.empty()) {
+ getDriver().Diag(diag::err_drv_conflicting_deployment_targets)
+ << "TVOS_DEPLOYMENT_TARGET"
+ << "IPHONEOS_DEPLOYMENT_TARGET";
+ }
+
+ // Allow conflicts among OSX and iOS for historical reasons, but choose the
+ // default platform.
+ if (!OSXTarget.empty() && (!iOSTarget.empty() ||
+ !WatchOSTarget.empty() ||
+ !TvOSTarget.empty())) {
+ if (getTriple().getArch() == llvm::Triple::arm ||
+ getTriple().getArch() == llvm::Triple::aarch64 ||
+ getTriple().getArch() == llvm::Triple::thumb)
+ OSXTarget = "";
+ else
+ iOSTarget = WatchOSTarget = TvOSTarget = "";
+ }
+
+ if (!OSXTarget.empty()) {
+ const Option O = Opts.getOption(options::OPT_mmacosx_version_min_EQ);
+ OSXVersion = Args.MakeJoinedArg(nullptr, O, OSXTarget);
+ Args.append(OSXVersion);
+ } else if (!iOSTarget.empty()) {
+ const Option O = Opts.getOption(options::OPT_miphoneos_version_min_EQ);
+ iOSVersion = Args.MakeJoinedArg(nullptr, O, iOSTarget);
+ Args.append(iOSVersion);
+ } else if (!TvOSTarget.empty()) {
+ const Option O = Opts.getOption(options::OPT_mtvos_version_min_EQ);
+ TvOSVersion = Args.MakeJoinedArg(nullptr, O, TvOSTarget);
+ Args.append(TvOSVersion);
+ } else if (!WatchOSTarget.empty()) {
+ const Option O = Opts.getOption(options::OPT_mwatchos_version_min_EQ);
+ WatchOSVersion = Args.MakeJoinedArg(nullptr, O, WatchOSTarget);
+ Args.append(WatchOSVersion);
+ }
+ }
+
+ DarwinPlatformKind Platform;
+ if (OSXVersion)
+ Platform = MacOS;
+ else if (iOSVersion)
+ Platform = IPhoneOS;
+ else if (TvOSVersion)
+ Platform = TvOS;
+ else if (WatchOSVersion)
+ Platform = WatchOS;
+ else
+ llvm_unreachable("Unable to infer Darwin variant");
+
+ // Set the tool chain target information.
+ unsigned Major, Minor, Micro;
+ bool HadExtra;
+ if (Platform == MacOS) {
+ assert((!iOSVersion && !TvOSVersion && !WatchOSVersion) &&
+ "Unknown target platform!");
+ if (!Driver::GetReleaseVersion(OSXVersion->getValue(), Major, Minor, Micro,
+ HadExtra) ||
+ HadExtra || Major != 10 || Minor >= 100 || Micro >= 100)
+ getDriver().Diag(diag::err_drv_invalid_version_number)
+ << OSXVersion->getAsString(Args);
+ } else if (Platform == IPhoneOS) {
+ assert(iOSVersion && "Unknown target platform!");
+ if (!Driver::GetReleaseVersion(iOSVersion->getValue(), Major, Minor, Micro,
+ HadExtra) ||
+ HadExtra || Major >= 10 || Minor >= 100 || Micro >= 100)
+ getDriver().Diag(diag::err_drv_invalid_version_number)
+ << iOSVersion->getAsString(Args);
+ } else if (Platform == TvOS) {
+ if (!Driver::GetReleaseVersion(TvOSVersion->getValue(), Major, Minor,
+ Micro, HadExtra) || HadExtra ||
+ Major >= 10 || Minor >= 100 || Micro >= 100)
+ getDriver().Diag(diag::err_drv_invalid_version_number)
+ << TvOSVersion->getAsString(Args);
+ } else if (Platform == WatchOS) {
+ if (!Driver::GetReleaseVersion(WatchOSVersion->getValue(), Major, Minor,
+ Micro, HadExtra) || HadExtra ||
+ Major >= 10 || Minor >= 100 || Micro >= 100)
+ getDriver().Diag(diag::err_drv_invalid_version_number)
+ << WatchOSVersion->getAsString(Args);
+ } else
+ llvm_unreachable("unknown kind of Darwin platform");
+
+ // Recognize iOS targets with an x86 architecture as the iOS simulator.
+ if (iOSVersion && (getTriple().getArch() == llvm::Triple::x86 ||
+ getTriple().getArch() == llvm::Triple::x86_64))
+ Platform = IPhoneOSSimulator;
+ if (TvOSVersion && (getTriple().getArch() == llvm::Triple::x86 ||
+ getTriple().getArch() == llvm::Triple::x86_64))
+ Platform = TvOSSimulator;
+ if (WatchOSVersion && (getTriple().getArch() == llvm::Triple::x86 ||
+ getTriple().getArch() == llvm::Triple::x86_64))
+ Platform = WatchOSSimulator;
+
+ setTarget(Platform, Major, Minor, Micro);
+}
+
+void DarwinClang::AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ CXXStdlibType Type = GetCXXStdlibType(Args);
+
+ switch (Type) {
+ case ToolChain::CST_Libcxx:
+ CmdArgs.push_back("-lc++");
+ break;
+
+ case ToolChain::CST_Libstdcxx:
+ // Unfortunately, -lstdc++ doesn't always exist in the standard search path;
+ // it was previously found in the gcc lib dir. However, for all the Darwin
+ // platforms we care about it was -lstdc++.6, so we search for that
+ // explicitly if we can't see an obvious -lstdc++ candidate.
+
+ // Check in the sysroot first.
+ if (const Arg *A = Args.getLastArg(options::OPT_isysroot)) {
+ SmallString<128> P(A->getValue());
+ llvm::sys::path::append(P, "usr", "lib", "libstdc++.dylib");
+
+ if (!getVFS().exists(P)) {
+ llvm::sys::path::remove_filename(P);
+ llvm::sys::path::append(P, "libstdc++.6.dylib");
+ if (getVFS().exists(P)) {
+ CmdArgs.push_back(Args.MakeArgString(P));
+ return;
+ }
+ }
+ }
+
+ // Otherwise, look in the root.
+ // FIXME: This should be removed someday when we don't have to care about
+ // 10.6 and earlier, where /usr/lib/libstdc++.dylib does not exist.
+ if (!getVFS().exists("/usr/lib/libstdc++.dylib") &&
+ getVFS().exists("/usr/lib/libstdc++.6.dylib")) {
+ CmdArgs.push_back("/usr/lib/libstdc++.6.dylib");
+ return;
+ }
+
+ // Otherwise, let the linker search.
+ CmdArgs.push_back("-lstdc++");
+ break;
+ }
+}
+
+void DarwinClang::AddCCKextLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+
+ // For Darwin platforms, use the compiler-rt-based support library
+ // instead of the gcc-provided one (which is also incidentally
+ // only present in the gcc lib dir, which makes it hard to find).
+
+ SmallString<128> P(getDriver().ResourceDir);
+ llvm::sys::path::append(P, "lib", "darwin");
+
+ // Use the newer cc_kext for iOS ARM after 6.0.
+ if (isTargetWatchOS()) {
+ llvm::sys::path::append(P, "libclang_rt.cc_kext_watchos.a");
+ } else if (isTargetTvOS()) {
+ llvm::sys::path::append(P, "libclang_rt.cc_kext_tvos.a");
+ } else if (isTargetIPhoneOS()) {
+ llvm::sys::path::append(P, "libclang_rt.cc_kext_ios.a");
+ } else {
+ llvm::sys::path::append(P, "libclang_rt.cc_kext.a");
+ }
+
+ // For now, allow missing resource libraries to support developers who may
+ // not have compiler-rt checked out or integrated into their build.
+ if (getVFS().exists(P))
+ CmdArgs.push_back(Args.MakeArgString(P));
+}
+
+DerivedArgList *MachO::TranslateArgs(const DerivedArgList &Args,
+ const char *BoundArch) const {
+ DerivedArgList *DAL = new DerivedArgList(Args.getBaseArgs());
+ const OptTable &Opts = getDriver().getOpts();
+
+ // FIXME: We really want to get out of the tool chain level argument
+ // translation business, as it makes the driver functionality much
+ // more opaque. For now, we follow gcc closely solely for the
+ // purpose of easily achieving feature parity & testability. Once we
+ // have something that works, we should reevaluate each translation
+ // and try to push it down into tool specific logic.
+
+ for (Arg *A : Args) {
+ if (A->getOption().matches(options::OPT_Xarch__)) {
+ // Skip this argument unless the architecture matches either the toolchain
+ // triple arch, or the arch being bound.
+ llvm::Triple::ArchType XarchArch =
+ tools::darwin::getArchTypeForMachOArchName(A->getValue(0));
+ if (!(XarchArch == getArch() ||
+ (BoundArch &&
+ XarchArch ==
+ tools::darwin::getArchTypeForMachOArchName(BoundArch))))
+ continue;
+
+ Arg *OriginalArg = A;
+ unsigned Index = Args.getBaseArgs().MakeIndex(A->getValue(1));
+ unsigned Prev = Index;
+ std::unique_ptr<Arg> XarchArg(Opts.ParseOneArg(Args, Index));
+
+ // If the argument parsing failed or more than one argument was
+ // consumed, the -Xarch_ argument's parameter tried to consume
+ // extra arguments. Emit an error and ignore.
+ //
+ // We also want to disallow any options which would alter the
+ // driver behavior; that isn't going to work in our model. We
+ // use isDriverOption() as an approximation, although things
+ // like -O4 are going to slip through.
+ if (!XarchArg || Index > Prev + 1) {
+ getDriver().Diag(diag::err_drv_invalid_Xarch_argument_with_args)
+ << A->getAsString(Args);
+ continue;
+ } else if (XarchArg->getOption().hasFlag(options::DriverOption)) {
+ getDriver().Diag(diag::err_drv_invalid_Xarch_argument_isdriver)
+ << A->getAsString(Args);
+ continue;
+ }
+
+ XarchArg->setBaseArg(A);
+
+ A = XarchArg.release();
+ DAL->AddSynthesizedArg(A);
+
+ // Linker input arguments require custom handling. The problem is that we
+ // have already constructed the phase actions, so we can not treat them as
+ // "input arguments".
+ if (A->getOption().hasFlag(options::LinkerInput)) {
+ // Convert the argument into individual Zlinker_input_args.
+ for (const char *Value : A->getValues()) {
+ DAL->AddSeparateArg(
+ OriginalArg, Opts.getOption(options::OPT_Zlinker_input), Value);
+ }
+ continue;
+ }
+ }
+
+ // Sob. These is strictly gcc compatible for the time being. Apple
+ // gcc translates options twice, which means that self-expanding
+ // options add duplicates.
+ switch ((options::ID)A->getOption().getID()) {
+ default:
+ DAL->append(A);
+ break;
+
+ case options::OPT_mkernel:
+ case options::OPT_fapple_kext:
+ DAL->append(A);
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_static));
+ break;
+
+ case options::OPT_dependency_file:
+ DAL->AddSeparateArg(A, Opts.getOption(options::OPT_MF), A->getValue());
+ break;
+
+ case options::OPT_gfull:
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_g_Flag));
+ DAL->AddFlagArg(
+ A, Opts.getOption(options::OPT_fno_eliminate_unused_debug_symbols));
+ break;
+
+ case options::OPT_gused:
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_g_Flag));
+ DAL->AddFlagArg(
+ A, Opts.getOption(options::OPT_feliminate_unused_debug_symbols));
+ break;
+
+ case options::OPT_shared:
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_dynamiclib));
+ break;
+
+ case options::OPT_fconstant_cfstrings:
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_mconstant_cfstrings));
+ break;
+
+ case options::OPT_fno_constant_cfstrings:
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_mno_constant_cfstrings));
+ break;
+
+ case options::OPT_Wnonportable_cfstrings:
+ DAL->AddFlagArg(A,
+ Opts.getOption(options::OPT_mwarn_nonportable_cfstrings));
+ break;
+
+ case options::OPT_Wno_nonportable_cfstrings:
+ DAL->AddFlagArg(
+ A, Opts.getOption(options::OPT_mno_warn_nonportable_cfstrings));
+ break;
+
+ case options::OPT_fpascal_strings:
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_mpascal_strings));
+ break;
+
+ case options::OPT_fno_pascal_strings:
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_mno_pascal_strings));
+ break;
+ }
+ }
+
+ if (getTriple().getArch() == llvm::Triple::x86 ||
+ getTriple().getArch() == llvm::Triple::x86_64)
+ if (!Args.hasArgNoClaim(options::OPT_mtune_EQ))
+ DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_mtune_EQ),
+ "core2");
+
+ // Add the arch options based on the particular spelling of -arch, to match
+ // how the driver driver works.
+ if (BoundArch) {
+ StringRef Name = BoundArch;
+ const Option MCpu = Opts.getOption(options::OPT_mcpu_EQ);
+ const Option MArch = Opts.getOption(options::OPT_march_EQ);
+
+ // This code must be kept in sync with LLVM's getArchTypeForDarwinArch,
+ // which defines the list of which architectures we accept.
+ if (Name == "ppc")
+ ;
+ else if (Name == "ppc601")
+ DAL->AddJoinedArg(nullptr, MCpu, "601");
+ else if (Name == "ppc603")
+ DAL->AddJoinedArg(nullptr, MCpu, "603");
+ else if (Name == "ppc604")
+ DAL->AddJoinedArg(nullptr, MCpu, "604");
+ else if (Name == "ppc604e")
+ DAL->AddJoinedArg(nullptr, MCpu, "604e");
+ else if (Name == "ppc750")
+ DAL->AddJoinedArg(nullptr, MCpu, "750");
+ else if (Name == "ppc7400")
+ DAL->AddJoinedArg(nullptr, MCpu, "7400");
+ else if (Name == "ppc7450")
+ DAL->AddJoinedArg(nullptr, MCpu, "7450");
+ else if (Name == "ppc970")
+ DAL->AddJoinedArg(nullptr, MCpu, "970");
+
+ else if (Name == "ppc64" || Name == "ppc64le")
+ DAL->AddFlagArg(nullptr, Opts.getOption(options::OPT_m64));
+
+ else if (Name == "i386")
+ ;
+ else if (Name == "i486")
+ DAL->AddJoinedArg(nullptr, MArch, "i486");
+ else if (Name == "i586")
+ DAL->AddJoinedArg(nullptr, MArch, "i586");
+ else if (Name == "i686")
+ DAL->AddJoinedArg(nullptr, MArch, "i686");
+ else if (Name == "pentium")
+ DAL->AddJoinedArg(nullptr, MArch, "pentium");
+ else if (Name == "pentium2")
+ DAL->AddJoinedArg(nullptr, MArch, "pentium2");
+ else if (Name == "pentpro")
+ DAL->AddJoinedArg(nullptr, MArch, "pentiumpro");
+ else if (Name == "pentIIm3")
+ DAL->AddJoinedArg(nullptr, MArch, "pentium2");
+
+ else if (Name == "x86_64")
+ DAL->AddFlagArg(nullptr, Opts.getOption(options::OPT_m64));
+ else if (Name == "x86_64h") {
+ DAL->AddFlagArg(nullptr, Opts.getOption(options::OPT_m64));
+ DAL->AddJoinedArg(nullptr, MArch, "x86_64h");
+ }
+
+ else if (Name == "arm")
+ DAL->AddJoinedArg(nullptr, MArch, "armv4t");
+ else if (Name == "armv4t")
+ DAL->AddJoinedArg(nullptr, MArch, "armv4t");
+ else if (Name == "armv5")
+ DAL->AddJoinedArg(nullptr, MArch, "armv5tej");
+ else if (Name == "xscale")
+ DAL->AddJoinedArg(nullptr, MArch, "xscale");
+ else if (Name == "armv6")
+ DAL->AddJoinedArg(nullptr, MArch, "armv6k");
+ else if (Name == "armv6m")
+ DAL->AddJoinedArg(nullptr, MArch, "armv6m");
+ else if (Name == "armv7")
+ DAL->AddJoinedArg(nullptr, MArch, "armv7a");
+ else if (Name == "armv7em")
+ DAL->AddJoinedArg(nullptr, MArch, "armv7em");
+ else if (Name == "armv7k")
+ DAL->AddJoinedArg(nullptr, MArch, "armv7k");
+ else if (Name == "armv7m")
+ DAL->AddJoinedArg(nullptr, MArch, "armv7m");
+ else if (Name == "armv7s")
+ DAL->AddJoinedArg(nullptr, MArch, "armv7s");
+ }
+
+ return DAL;
+}
+
+void MachO::AddLinkRuntimeLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ // Embedded targets are simple at the moment, not supporting sanitizers and
+ // with different libraries for each member of the product { static, PIC } x
+ // { hard-float, soft-float }
+ llvm::SmallString<32> CompilerRT = StringRef("libclang_rt.");
+ CompilerRT +=
+ (tools::arm::getARMFloatABI(*this, Args) == tools::arm::FloatABI::Hard)
+ ? "hard"
+ : "soft";
+ CompilerRT += Args.hasArg(options::OPT_fPIC) ? "_pic.a" : "_static.a";
+
+ AddLinkRuntimeLib(Args, CmdArgs, CompilerRT, false, true);
+}
+
+DerivedArgList *Darwin::TranslateArgs(const DerivedArgList &Args,
+ const char *BoundArch) const {
+ // First get the generic Apple args, before moving onto Darwin-specific ones.
+ DerivedArgList *DAL = MachO::TranslateArgs(Args, BoundArch);
+ const OptTable &Opts = getDriver().getOpts();
+
+ // If no architecture is bound, none of the translations here are relevant.
+ if (!BoundArch)
+ return DAL;
+
+ // Add an explicit version min argument for the deployment target. We do this
+ // after argument translation because -Xarch_ arguments may add a version min
+ // argument.
+ AddDeploymentTarget(*DAL);
+
+ // For iOS 6, undo the translation to add -static for -mkernel/-fapple-kext.
+ // FIXME: It would be far better to avoid inserting those -static arguments,
+ // but we can't check the deployment target in the translation code until
+ // it is set here.
+ if (isTargetWatchOSBased() ||
+ (isTargetIOSBased() && !isIPhoneOSVersionLT(6, 0))) {
+ for (ArgList::iterator it = DAL->begin(), ie = DAL->end(); it != ie; ) {
+ Arg *A = *it;
+ ++it;
+ if (A->getOption().getID() != options::OPT_mkernel &&
+ A->getOption().getID() != options::OPT_fapple_kext)
+ continue;
+ assert(it != ie && "unexpected argument translation");
+ A = *it;
+ assert(A->getOption().getID() == options::OPT_static &&
+ "missing expected -static argument");
+ it = DAL->getArgs().erase(it);
+ }
+ }
+
+ // Default to use libc++ on OS X 10.9+ and iOS 7+.
+ if (((isTargetMacOS() && !isMacosxVersionLT(10, 9)) ||
+ (isTargetIOSBased() && !isIPhoneOSVersionLT(7, 0)) ||
+ isTargetWatchOSBased()) &&
+ !Args.getLastArg(options::OPT_stdlib_EQ))
+ DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_stdlib_EQ),
+ "libc++");
+
+ // Validate the C++ standard library choice.
+ CXXStdlibType Type = GetCXXStdlibType(*DAL);
+ if (Type == ToolChain::CST_Libcxx) {
+ // Check whether the target provides libc++.
+ StringRef where;
+
+ // Complain about targeting iOS < 5.0 in any way.
+ if (isTargetIOSBased() && isIPhoneOSVersionLT(5, 0))
+ where = "iOS 5.0";
+
+ if (where != StringRef()) {
+ getDriver().Diag(clang::diag::err_drv_invalid_libcxx_deployment) << where;
+ }
+ }
+
+ return DAL;
+}
+
+bool MachO::IsUnwindTablesDefault() const {
+ return getArch() == llvm::Triple::x86_64;
+}
+
+bool MachO::UseDwarfDebugFlags() const {
+ if (const char *S = ::getenv("RC_DEBUG_OPTIONS"))
+ return S[0] != '\0';
+ return false;
+}
+
+bool Darwin::UseSjLjExceptions(const ArgList &Args) const {
+ // Darwin uses SjLj exceptions on ARM.
+ if (getTriple().getArch() != llvm::Triple::arm &&
+ getTriple().getArch() != llvm::Triple::thumb)
+ return false;
+
+ // Only watchOS uses the new DWARF/Compact unwinding method.
+ return !isTargetWatchOS();
+}
+
+bool MachO::isPICDefault() const { return true; }
+
+bool MachO::isPIEDefault() const { return false; }
+
+bool MachO::isPICDefaultForced() const {
+ return (getArch() == llvm::Triple::x86_64 ||
+ getArch() == llvm::Triple::aarch64);
+}
+
+bool MachO::SupportsProfiling() const {
+ // Profiling instrumentation is only supported on x86.
+ return getArch() == llvm::Triple::x86 || getArch() == llvm::Triple::x86_64;
+}
+
+void Darwin::addMinVersionArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ VersionTuple TargetVersion = getTargetVersion();
+
+ if (isTargetWatchOS())
+ CmdArgs.push_back("-watchos_version_min");
+ else if (isTargetWatchOSSimulator())
+ CmdArgs.push_back("-watchos_simulator_version_min");
+ else if (isTargetTvOS())
+ CmdArgs.push_back("-tvos_version_min");
+ else if (isTargetTvOSSimulator())
+ CmdArgs.push_back("-tvos_simulator_version_min");
+ else if (isTargetIOSSimulator())
+ CmdArgs.push_back("-ios_simulator_version_min");
+ else if (isTargetIOSBased())
+ CmdArgs.push_back("-iphoneos_version_min");
+ else {
+ assert(isTargetMacOS() && "unexpected target");
+ CmdArgs.push_back("-macosx_version_min");
+ }
+
+ CmdArgs.push_back(Args.MakeArgString(TargetVersion.getAsString()));
+}
+
+void Darwin::addStartObjectFileArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ // Derived from startfile spec.
+ if (Args.hasArg(options::OPT_dynamiclib)) {
+ // Derived from darwin_dylib1 spec.
+ if (isTargetWatchOSBased()) {
+ ; // watchOS does not need dylib1.o.
+ } else if (isTargetIOSSimulator()) {
+ ; // iOS simulator does not need dylib1.o.
+ } else if (isTargetIPhoneOS()) {
+ if (isIPhoneOSVersionLT(3, 1))
+ CmdArgs.push_back("-ldylib1.o");
+ } else {
+ if (isMacosxVersionLT(10, 5))
+ CmdArgs.push_back("-ldylib1.o");
+ else if (isMacosxVersionLT(10, 6))
+ CmdArgs.push_back("-ldylib1.10.5.o");
+ }
+ } else {
+ if (Args.hasArg(options::OPT_bundle)) {
+ if (!Args.hasArg(options::OPT_static)) {
+ // Derived from darwin_bundle1 spec.
+ if (isTargetWatchOSBased()) {
+ ; // watchOS does not need bundle1.o.
+ } else if (isTargetIOSSimulator()) {
+ ; // iOS simulator does not need bundle1.o.
+ } else if (isTargetIPhoneOS()) {
+ if (isIPhoneOSVersionLT(3, 1))
+ CmdArgs.push_back("-lbundle1.o");
+ } else {
+ if (isMacosxVersionLT(10, 6))
+ CmdArgs.push_back("-lbundle1.o");
+ }
+ }
+ } else {
+ if (Args.hasArg(options::OPT_pg) && SupportsProfiling()) {
+ if (Args.hasArg(options::OPT_static) ||
+ Args.hasArg(options::OPT_object) ||
+ Args.hasArg(options::OPT_preload)) {
+ CmdArgs.push_back("-lgcrt0.o");
+ } else {
+ CmdArgs.push_back("-lgcrt1.o");
+
+ // darwin_crt2 spec is empty.
+ }
+ // By default on OS X 10.8 and later, we don't link with a crt1.o
+ // file and the linker knows to use _main as the entry point. But,
+ // when compiling with -pg, we need to link with the gcrt1.o file,
+ // so pass the -no_new_main option to tell the linker to use the
+ // "start" symbol as the entry point.
+ if (isTargetMacOS() && !isMacosxVersionLT(10, 8))
+ CmdArgs.push_back("-no_new_main");
+ } else {
+ if (Args.hasArg(options::OPT_static) ||
+ Args.hasArg(options::OPT_object) ||
+ Args.hasArg(options::OPT_preload)) {
+ CmdArgs.push_back("-lcrt0.o");
+ } else {
+ // Derived from darwin_crt1 spec.
+ if (isTargetWatchOSBased()) {
+ ; // watchOS does not need crt1.o.
+ } else if (isTargetIOSSimulator()) {
+ ; // iOS simulator does not need crt1.o.
+ } else if (isTargetIPhoneOS()) {
+ if (getArch() == llvm::Triple::aarch64)
+ ; // iOS does not need any crt1 files for arm64
+ else if (isIPhoneOSVersionLT(3, 1))
+ CmdArgs.push_back("-lcrt1.o");
+ else if (isIPhoneOSVersionLT(6, 0))
+ CmdArgs.push_back("-lcrt1.3.1.o");
+ } else {
+ if (isMacosxVersionLT(10, 5))
+ CmdArgs.push_back("-lcrt1.o");
+ else if (isMacosxVersionLT(10, 6))
+ CmdArgs.push_back("-lcrt1.10.5.o");
+ else if (isMacosxVersionLT(10, 8))
+ CmdArgs.push_back("-lcrt1.10.6.o");
+
+ // darwin_crt2 spec is empty.
+ }
+ }
+ }
+ }
+ }
+
+ if (!isTargetIPhoneOS() && Args.hasArg(options::OPT_shared_libgcc) &&
+ !isTargetWatchOS() &&
+ isMacosxVersionLT(10, 5)) {
+ const char *Str = Args.MakeArgString(GetFilePath("crt3.o"));
+ CmdArgs.push_back(Str);
+ }
+}
+
+bool Darwin::SupportsObjCGC() const { return isTargetMacOS(); }
+
+void Darwin::CheckObjCARC() const {
+ if (isTargetIOSBased() || isTargetWatchOSBased() ||
+ (isTargetMacOS() && !isMacosxVersionLT(10, 6)))
+ return;
+ getDriver().Diag(diag::err_arc_unsupported_on_toolchain);
+}
+
+SanitizerMask Darwin::getSupportedSanitizers() const {
+ SanitizerMask Res = ToolChain::getSupportedSanitizers();
+ if (isTargetMacOS() || isTargetIOSSimulator())
+ Res |= SanitizerKind::Address;
+ if (isTargetMacOS()) {
+ if (!isMacosxVersionLT(10, 9))
+ Res |= SanitizerKind::Vptr;
+ Res |= SanitizerKind::SafeStack;
+ Res |= SanitizerKind::Thread;
+ }
+ return Res;
+}
+
+/// Generic_GCC - A tool chain using the 'gcc' command to perform
+/// all subcommands; this relies on gcc translating the majority of
+/// command line options.
+
+/// \brief Parse a GCCVersion object out of a string of text.
+///
+/// This is the primary means of forming GCCVersion objects.
+/*static*/
+Generic_GCC::GCCVersion Linux::GCCVersion::Parse(StringRef VersionText) {
+ const GCCVersion BadVersion = {VersionText.str(), -1, -1, -1, "", "", ""};
+ std::pair<StringRef, StringRef> First = VersionText.split('.');
+ std::pair<StringRef, StringRef> Second = First.second.split('.');
+
+ GCCVersion GoodVersion = {VersionText.str(), -1, -1, -1, "", "", ""};
+ if (First.first.getAsInteger(10, GoodVersion.Major) || GoodVersion.Major < 0)
+ return BadVersion;
+ GoodVersion.MajorStr = First.first.str();
+ if (Second.first.getAsInteger(10, GoodVersion.Minor) || GoodVersion.Minor < 0)
+ return BadVersion;
+ GoodVersion.MinorStr = Second.first.str();
+
+ // First look for a number prefix and parse that if present. Otherwise just
+ // stash the entire patch string in the suffix, and leave the number
+ // unspecified. This covers versions strings such as:
+ // 4.4
+ // 4.4.0
+ // 4.4.x
+ // 4.4.2-rc4
+ // 4.4.x-patched
+ // And retains any patch number it finds.
+ StringRef PatchText = GoodVersion.PatchSuffix = Second.second.str();
+ if (!PatchText.empty()) {
+ if (size_t EndNumber = PatchText.find_first_not_of("0123456789")) {
+ // Try to parse the number and any suffix.
+ if (PatchText.slice(0, EndNumber).getAsInteger(10, GoodVersion.Patch) ||
+ GoodVersion.Patch < 0)
+ return BadVersion;
+ GoodVersion.PatchSuffix = PatchText.substr(EndNumber);
+ }
+ }
+
+ return GoodVersion;
+}
+
+/// \brief Less-than for GCCVersion, implementing a Strict Weak Ordering.
+bool Generic_GCC::GCCVersion::isOlderThan(int RHSMajor, int RHSMinor,
+ int RHSPatch,
+ StringRef RHSPatchSuffix) const {
+ if (Major != RHSMajor)
+ return Major < RHSMajor;
+ if (Minor != RHSMinor)
+ return Minor < RHSMinor;
+ if (Patch != RHSPatch) {
+ // Note that versions without a specified patch sort higher than those with
+ // a patch.
+ if (RHSPatch == -1)
+ return true;
+ if (Patch == -1)
+ return false;
+
+ // Otherwise just sort on the patch itself.
+ return Patch < RHSPatch;
+ }
+ if (PatchSuffix != RHSPatchSuffix) {
+ // Sort empty suffixes higher.
+ if (RHSPatchSuffix.empty())
+ return true;
+ if (PatchSuffix.empty())
+ return false;
+
+ // Provide a lexicographic sort to make this a total ordering.
+ return PatchSuffix < RHSPatchSuffix;
+ }
+
+ // The versions are equal.
+ return false;
+}
+
+static llvm::StringRef getGCCToolchainDir(const ArgList &Args) {
+ const Arg *A = Args.getLastArg(options::OPT_gcc_toolchain);
+ if (A)
+ return A->getValue();
+ return GCC_INSTALL_PREFIX;
+}
+
+/// \brief Initialize a GCCInstallationDetector from the driver.
+///
+/// This performs all of the autodetection and sets up the various paths.
+/// Once constructed, a GCCInstallationDetector is essentially immutable.
+///
+/// FIXME: We shouldn't need an explicit TargetTriple parameter here, and
+/// should instead pull the target out of the driver. This is currently
+/// necessary because the driver doesn't store the final version of the target
+/// triple.
+void Generic_GCC::GCCInstallationDetector::init(
+ const llvm::Triple &TargetTriple, const ArgList &Args,
+ ArrayRef<std::string> ExtraTripleAliases) {
+ llvm::Triple BiarchVariantTriple = TargetTriple.isArch32Bit()
+ ? TargetTriple.get64BitArchVariant()
+ : TargetTriple.get32BitArchVariant();
+ // The library directories which may contain GCC installations.
+ SmallVector<StringRef, 4> CandidateLibDirs, CandidateBiarchLibDirs;
+ // The compatible GCC triples for this particular architecture.
+ SmallVector<StringRef, 16> CandidateTripleAliases;
+ SmallVector<StringRef, 16> CandidateBiarchTripleAliases;
+ CollectLibDirsAndTriples(TargetTriple, BiarchVariantTriple, CandidateLibDirs,
+ CandidateTripleAliases, CandidateBiarchLibDirs,
+ CandidateBiarchTripleAliases);
+
+ // Compute the set of prefixes for our search.
+ SmallVector<std::string, 8> Prefixes(D.PrefixDirs.begin(),
+ D.PrefixDirs.end());
+
+ StringRef GCCToolchainDir = getGCCToolchainDir(Args);
+ if (GCCToolchainDir != "") {
+ if (GCCToolchainDir.back() == '/')
+ GCCToolchainDir = GCCToolchainDir.drop_back(); // remove the /
+
+ Prefixes.push_back(GCCToolchainDir);
+ } else {
+ // If we have a SysRoot, try that first.
+ if (!D.SysRoot.empty()) {
+ Prefixes.push_back(D.SysRoot);
+ Prefixes.push_back(D.SysRoot + "/usr");
+ }
+
+ // Then look for gcc installed alongside clang.
+ Prefixes.push_back(D.InstalledDir + "/..");
+
+ // And finally in /usr.
+ if (D.SysRoot.empty())
+ Prefixes.push_back("/usr");
+ }
+
+ // Loop over the various components which exist and select the best GCC
+ // installation available. GCC installs are ranked by version number.
+ Version = GCCVersion::Parse("0.0.0");
+ for (const std::string &Prefix : Prefixes) {
+ if (!D.getVFS().exists(Prefix))
+ continue;
+ for (StringRef Suffix : CandidateLibDirs) {
+ const std::string LibDir = Prefix + Suffix.str();
+ if (!D.getVFS().exists(LibDir))
+ continue;
+ for (StringRef Candidate : ExtraTripleAliases) // Try these first.
+ ScanLibDirForGCCTriple(TargetTriple, Args, LibDir, Candidate);
+ for (StringRef Candidate : CandidateTripleAliases)
+ ScanLibDirForGCCTriple(TargetTriple, Args, LibDir, Candidate);
+ }
+ for (StringRef Suffix : CandidateBiarchLibDirs) {
+ const std::string LibDir = Prefix + Suffix.str();
+ if (!D.getVFS().exists(LibDir))
+ continue;
+ for (StringRef Candidate : CandidateBiarchTripleAliases)
+ ScanLibDirForGCCTriple(TargetTriple, Args, LibDir, Candidate,
+ /*NeedsBiarchSuffix=*/ true);
+ }
+ }
+}
+
+void Generic_GCC::GCCInstallationDetector::print(raw_ostream &OS) const {
+ for (const auto &InstallPath : CandidateGCCInstallPaths)
+ OS << "Found candidate GCC installation: " << InstallPath << "\n";
+
+ if (!GCCInstallPath.empty())
+ OS << "Selected GCC installation: " << GCCInstallPath << "\n";
+
+ for (const auto &Multilib : Multilibs)
+ OS << "Candidate multilib: " << Multilib << "\n";
+
+ if (Multilibs.size() != 0 || !SelectedMultilib.isDefault())
+ OS << "Selected multilib: " << SelectedMultilib << "\n";
+}
+
+bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
+ if (BiarchSibling.hasValue()) {
+ M = BiarchSibling.getValue();
+ return true;
+ }
+ return false;
+}
+
+/*static*/ void Generic_GCC::GCCInstallationDetector::CollectLibDirsAndTriples(
+ const llvm::Triple &TargetTriple, const llvm::Triple &BiarchTriple,
+ SmallVectorImpl<StringRef> &LibDirs,
+ SmallVectorImpl<StringRef> &TripleAliases,
+ SmallVectorImpl<StringRef> &BiarchLibDirs,
+ SmallVectorImpl<StringRef> &BiarchTripleAliases) {
+ // Declare a bunch of static data sets that we'll select between below. These
+ // are specifically designed to always refer to string literals to avoid any
+ // lifetime or initialization issues.
+ static const char *const AArch64LibDirs[] = {"/lib64", "/lib"};
+ static const char *const AArch64Triples[] = {
+ "aarch64-none-linux-gnu", "aarch64-linux-gnu", "aarch64-linux-android",
+ "aarch64-redhat-linux"};
+ static const char *const AArch64beLibDirs[] = {"/lib"};
+ static const char *const AArch64beTriples[] = {"aarch64_be-none-linux-gnu",
+ "aarch64_be-linux-gnu"};
+
+ static const char *const ARMLibDirs[] = {"/lib"};
+ static const char *const ARMTriples[] = {"arm-linux-gnueabi",
+ "arm-linux-androideabi"};
+ static const char *const ARMHFTriples[] = {"arm-linux-gnueabihf",
+ "armv7hl-redhat-linux-gnueabi"};
+ static const char *const ARMebLibDirs[] = {"/lib"};
+ static const char *const ARMebTriples[] = {"armeb-linux-gnueabi",
+ "armeb-linux-androideabi"};
+ static const char *const ARMebHFTriples[] = {
+ "armeb-linux-gnueabihf", "armebv7hl-redhat-linux-gnueabi"};
+
+ static const char *const X86_64LibDirs[] = {"/lib64", "/lib"};
+ static const char *const X86_64Triples[] = {
+ "x86_64-linux-gnu", "x86_64-unknown-linux-gnu",
+ "x86_64-pc-linux-gnu", "x86_64-redhat-linux6E",
+ "x86_64-redhat-linux", "x86_64-suse-linux",
+ "x86_64-manbo-linux-gnu", "x86_64-linux-gnu",
+ "x86_64-slackware-linux", "x86_64-linux-android",
+ "x86_64-unknown-linux"};
+ static const char *const X32LibDirs[] = {"/libx32"};
+ static const char *const X86LibDirs[] = {"/lib32", "/lib"};
+ static const char *const X86Triples[] = {
+ "i686-linux-gnu", "i686-pc-linux-gnu", "i486-linux-gnu",
+ "i386-linux-gnu", "i386-redhat-linux6E", "i686-redhat-linux",
+ "i586-redhat-linux", "i386-redhat-linux", "i586-suse-linux",
+ "i486-slackware-linux", "i686-montavista-linux", "i686-linux-android",
+ "i586-linux-gnu"};
+
+ static const char *const MIPSLibDirs[] = {"/lib"};
+ static const char *const MIPSTriples[] = {"mips-linux-gnu", "mips-mti-linux",
+ "mips-mti-linux-gnu",
+ "mips-img-linux-gnu"};
+ static const char *const MIPSELLibDirs[] = {"/lib"};
+ static const char *const MIPSELTriples[] = {
+ "mipsel-linux-gnu", "mipsel-linux-android", "mips-img-linux-gnu"};
+
+ static const char *const MIPS64LibDirs[] = {"/lib64", "/lib"};
+ static const char *const MIPS64Triples[] = {
+ "mips64-linux-gnu", "mips-mti-linux-gnu", "mips-img-linux-gnu",
+ "mips64-linux-gnuabi64"};
+ static const char *const MIPS64ELLibDirs[] = {"/lib64", "/lib"};
+ static const char *const MIPS64ELTriples[] = {
+ "mips64el-linux-gnu", "mips-mti-linux-gnu", "mips-img-linux-gnu",
+ "mips64el-linux-android", "mips64el-linux-gnuabi64"};
+
+ static const char *const PPCLibDirs[] = {"/lib32", "/lib"};
+ static const char *const PPCTriples[] = {
+ "powerpc-linux-gnu", "powerpc-unknown-linux-gnu", "powerpc-linux-gnuspe",
+ "powerpc-suse-linux", "powerpc-montavista-linuxspe"};
+ static const char *const PPC64LibDirs[] = {"/lib64", "/lib"};
+ static const char *const PPC64Triples[] = {
+ "powerpc64-linux-gnu", "powerpc64-unknown-linux-gnu",
+ "powerpc64-suse-linux", "ppc64-redhat-linux"};
+ static const char *const PPC64LELibDirs[] = {"/lib64", "/lib"};
+ static const char *const PPC64LETriples[] = {
+ "powerpc64le-linux-gnu", "powerpc64le-unknown-linux-gnu",
+ "powerpc64le-suse-linux", "ppc64le-redhat-linux"};
+
+ static const char *const SPARCv8LibDirs[] = {"/lib32", "/lib"};
+ static const char *const SPARCv8Triples[] = {"sparc-linux-gnu",
+ "sparcv8-linux-gnu"};
+ static const char *const SPARCv9LibDirs[] = {"/lib64", "/lib"};
+ static const char *const SPARCv9Triples[] = {"sparc64-linux-gnu",
+ "sparcv9-linux-gnu"};
+
+ static const char *const SystemZLibDirs[] = {"/lib64", "/lib"};
+ static const char *const SystemZTriples[] = {
+ "s390x-linux-gnu", "s390x-unknown-linux-gnu", "s390x-ibm-linux-gnu",
+ "s390x-suse-linux", "s390x-redhat-linux"};
+
+ // Solaris.
+ static const char *const SolarisSPARCLibDirs[] = {"/gcc"};
+ static const char *const SolarisSPARCTriples[] = {"sparc-sun-solaris2.11",
+ "i386-pc-solaris2.11"};
+
+ using std::begin;
+ using std::end;
+
+ if (TargetTriple.getOS() == llvm::Triple::Solaris) {
+ LibDirs.append(begin(SolarisSPARCLibDirs), end(SolarisSPARCLibDirs));
+ TripleAliases.append(begin(SolarisSPARCTriples), end(SolarisSPARCTriples));
+ return;
+ }
+
+ switch (TargetTriple.getArch()) {
+ case llvm::Triple::aarch64:
+ LibDirs.append(begin(AArch64LibDirs), end(AArch64LibDirs));
+ TripleAliases.append(begin(AArch64Triples), end(AArch64Triples));
+ BiarchLibDirs.append(begin(AArch64LibDirs), end(AArch64LibDirs));
+ BiarchTripleAliases.append(begin(AArch64Triples), end(AArch64Triples));
+ break;
+ case llvm::Triple::aarch64_be:
+ LibDirs.append(begin(AArch64beLibDirs), end(AArch64beLibDirs));
+ TripleAliases.append(begin(AArch64beTriples), end(AArch64beTriples));
+ BiarchLibDirs.append(begin(AArch64beLibDirs), end(AArch64beLibDirs));
+ BiarchTripleAliases.append(begin(AArch64beTriples), end(AArch64beTriples));
+ break;
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ LibDirs.append(begin(ARMLibDirs), end(ARMLibDirs));
+ if (TargetTriple.getEnvironment() == llvm::Triple::GNUEABIHF) {
+ TripleAliases.append(begin(ARMHFTriples), end(ARMHFTriples));
+ } else {
+ TripleAliases.append(begin(ARMTriples), end(ARMTriples));
+ }
+ break;
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumbeb:
+ LibDirs.append(begin(ARMebLibDirs), end(ARMebLibDirs));
+ if (TargetTriple.getEnvironment() == llvm::Triple::GNUEABIHF) {
+ TripleAliases.append(begin(ARMebHFTriples), end(ARMebHFTriples));
+ } else {
+ TripleAliases.append(begin(ARMebTriples), end(ARMebTriples));
+ }
+ break;
+ case llvm::Triple::x86_64:
+ LibDirs.append(begin(X86_64LibDirs), end(X86_64LibDirs));
+ TripleAliases.append(begin(X86_64Triples), end(X86_64Triples));
+ // x32 is always available when x86_64 is available, so adding it as
+ // secondary arch with x86_64 triples
+ if (TargetTriple.getEnvironment() == llvm::Triple::GNUX32) {
+ BiarchLibDirs.append(begin(X32LibDirs), end(X32LibDirs));
+ BiarchTripleAliases.append(begin(X86_64Triples), end(X86_64Triples));
+ } else {
+ BiarchLibDirs.append(begin(X86LibDirs), end(X86LibDirs));
+ BiarchTripleAliases.append(begin(X86Triples), end(X86Triples));
+ }
+ break;
+ case llvm::Triple::x86:
+ LibDirs.append(begin(X86LibDirs), end(X86LibDirs));
+ TripleAliases.append(begin(X86Triples), end(X86Triples));
+ BiarchLibDirs.append(begin(X86_64LibDirs), end(X86_64LibDirs));
+ BiarchTripleAliases.append(begin(X86_64Triples), end(X86_64Triples));
+ break;
+ case llvm::Triple::mips:
+ LibDirs.append(begin(MIPSLibDirs), end(MIPSLibDirs));
+ TripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
+ BiarchLibDirs.append(begin(MIPS64LibDirs), end(MIPS64LibDirs));
+ BiarchTripleAliases.append(begin(MIPS64Triples), end(MIPS64Triples));
+ break;
+ case llvm::Triple::mipsel:
+ LibDirs.append(begin(MIPSELLibDirs), end(MIPSELLibDirs));
+ TripleAliases.append(begin(MIPSELTriples), end(MIPSELTriples));
+ TripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
+ BiarchLibDirs.append(begin(MIPS64ELLibDirs), end(MIPS64ELLibDirs));
+ BiarchTripleAliases.append(begin(MIPS64ELTriples), end(MIPS64ELTriples));
+ break;
+ case llvm::Triple::mips64:
+ LibDirs.append(begin(MIPS64LibDirs), end(MIPS64LibDirs));
+ TripleAliases.append(begin(MIPS64Triples), end(MIPS64Triples));
+ BiarchLibDirs.append(begin(MIPSLibDirs), end(MIPSLibDirs));
+ BiarchTripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
+ break;
+ case llvm::Triple::mips64el:
+ LibDirs.append(begin(MIPS64ELLibDirs), end(MIPS64ELLibDirs));
+ TripleAliases.append(begin(MIPS64ELTriples), end(MIPS64ELTriples));
+ BiarchLibDirs.append(begin(MIPSELLibDirs), end(MIPSELLibDirs));
+ BiarchTripleAliases.append(begin(MIPSELTriples), end(MIPSELTriples));
+ BiarchTripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
+ break;
+ case llvm::Triple::ppc:
+ LibDirs.append(begin(PPCLibDirs), end(PPCLibDirs));
+ TripleAliases.append(begin(PPCTriples), end(PPCTriples));
+ BiarchLibDirs.append(begin(PPC64LibDirs), end(PPC64LibDirs));
+ BiarchTripleAliases.append(begin(PPC64Triples), end(PPC64Triples));
+ break;
+ case llvm::Triple::ppc64:
+ LibDirs.append(begin(PPC64LibDirs), end(PPC64LibDirs));
+ TripleAliases.append(begin(PPC64Triples), end(PPC64Triples));
+ BiarchLibDirs.append(begin(PPCLibDirs), end(PPCLibDirs));
+ BiarchTripleAliases.append(begin(PPCTriples), end(PPCTriples));
+ break;
+ case llvm::Triple::ppc64le:
+ LibDirs.append(begin(PPC64LELibDirs), end(PPC64LELibDirs));
+ TripleAliases.append(begin(PPC64LETriples), end(PPC64LETriples));
+ break;
+ case llvm::Triple::sparc:
+ case llvm::Triple::sparcel:
+ LibDirs.append(begin(SPARCv8LibDirs), end(SPARCv8LibDirs));
+ TripleAliases.append(begin(SPARCv8Triples), end(SPARCv8Triples));
+ BiarchLibDirs.append(begin(SPARCv9LibDirs), end(SPARCv9LibDirs));
+ BiarchTripleAliases.append(begin(SPARCv9Triples), end(SPARCv9Triples));
+ break;
+ case llvm::Triple::sparcv9:
+ LibDirs.append(begin(SPARCv9LibDirs), end(SPARCv9LibDirs));
+ TripleAliases.append(begin(SPARCv9Triples), end(SPARCv9Triples));
+ BiarchLibDirs.append(begin(SPARCv8LibDirs), end(SPARCv8LibDirs));
+ BiarchTripleAliases.append(begin(SPARCv8Triples), end(SPARCv8Triples));
+ break;
+ case llvm::Triple::systemz:
+ LibDirs.append(begin(SystemZLibDirs), end(SystemZLibDirs));
+ TripleAliases.append(begin(SystemZTriples), end(SystemZTriples));
+ break;
+ default:
+ // By default, just rely on the standard lib directories and the original
+ // triple.
+ break;
+ }
+
+ // Always append the drivers target triple to the end, in case it doesn't
+ // match any of our aliases.
+ TripleAliases.push_back(TargetTriple.str());
+
+ // Also include the multiarch variant if it's different.
+ if (TargetTriple.str() != BiarchTriple.str())
+ BiarchTripleAliases.push_back(BiarchTriple.str());
+}
+
+// \brief -- try common CUDA installation paths looking for files we need for
+// CUDA compilation.
+
+void Generic_GCC::CudaInstallationDetector::init(
+ const llvm::Triple &TargetTriple, const llvm::opt::ArgList &Args) {
+ SmallVector<std::string, 4> CudaPathCandidates;
+
+ if (Args.hasArg(options::OPT_cuda_path_EQ))
+ CudaPathCandidates.push_back(
+ Args.getLastArgValue(options::OPT_cuda_path_EQ));
+ else {
+ CudaPathCandidates.push_back(D.SysRoot + "/usr/local/cuda");
+ CudaPathCandidates.push_back(D.SysRoot + "/usr/local/cuda-7.5");
+ CudaPathCandidates.push_back(D.SysRoot + "/usr/local/cuda-7.0");
+ }
+
+ for (const auto &CudaPath : CudaPathCandidates) {
+ if (CudaPath.empty() || !D.getVFS().exists(CudaPath))
+ continue;
+
+ CudaInstallPath = CudaPath;
+ CudaIncludePath = CudaInstallPath + "/include";
+ CudaLibDevicePath = CudaInstallPath + "/nvvm/libdevice";
+ CudaLibPath =
+ CudaInstallPath + (TargetTriple.isArch64Bit() ? "/lib64" : "/lib");
+
+ if (!(D.getVFS().exists(CudaIncludePath) &&
+ D.getVFS().exists(CudaLibPath) &&
+ D.getVFS().exists(CudaLibDevicePath)))
+ continue;
+
+ std::error_code EC;
+ for (llvm::sys::fs::directory_iterator LI(CudaLibDevicePath, EC), LE;
+ !EC && LI != LE; LI = LI.increment(EC)) {
+ StringRef FilePath = LI->path();
+ StringRef FileName = llvm::sys::path::filename(FilePath);
+ // Process all bitcode filenames that look like libdevice.compute_XX.YY.bc
+ const StringRef LibDeviceName = "libdevice.";
+ if (!(FileName.startswith(LibDeviceName) && FileName.endswith(".bc")))
+ continue;
+ StringRef GpuArch = FileName.slice(
+ LibDeviceName.size(), FileName.find('.', LibDeviceName.size()));
+ CudaLibDeviceMap[GpuArch] = FilePath.str();
+ // Insert map entries for specifc devices with this compute capability.
+ if (GpuArch == "compute_20") {
+ CudaLibDeviceMap["sm_20"] = FilePath;
+ CudaLibDeviceMap["sm_21"] = FilePath;
+ } else if (GpuArch == "compute_30") {
+ CudaLibDeviceMap["sm_30"] = FilePath;
+ CudaLibDeviceMap["sm_32"] = FilePath;
+ } else if (GpuArch == "compute_35") {
+ CudaLibDeviceMap["sm_35"] = FilePath;
+ CudaLibDeviceMap["sm_37"] = FilePath;
+ }
+ }
+
+ IsValid = true;
+ break;
+ }
+}
+
+void Generic_GCC::CudaInstallationDetector::print(raw_ostream &OS) const {
+ if (isValid())
+ OS << "Found CUDA installation: " << CudaInstallPath << "\n";
+}
+
+namespace {
+// Filter to remove Multilibs that don't exist as a suffix to Path
+class FilterNonExistent {
+ StringRef Base;
+ vfs::FileSystem &VFS;
+
+public:
+ FilterNonExistent(StringRef Base, vfs::FileSystem &VFS)
+ : Base(Base), VFS(VFS) {}
+ bool operator()(const Multilib &M) {
+ return !VFS.exists(Base + M.gccSuffix() + "/crtbegin.o");
+ }
+};
+} // end anonymous namespace
+
+static void addMultilibFlag(bool Enabled, const char *const Flag,
+ std::vector<std::string> &Flags) {
+ if (Enabled)
+ Flags.push_back(std::string("+") + Flag);
+ else
+ Flags.push_back(std::string("-") + Flag);
+}
+
+static bool isMipsArch(llvm::Triple::ArchType Arch) {
+ return Arch == llvm::Triple::mips || Arch == llvm::Triple::mipsel ||
+ Arch == llvm::Triple::mips64 || Arch == llvm::Triple::mips64el;
+}
+
+static bool isMips32(llvm::Triple::ArchType Arch) {
+ return Arch == llvm::Triple::mips || Arch == llvm::Triple::mipsel;
+}
+
+static bool isMips64(llvm::Triple::ArchType Arch) {
+ return Arch == llvm::Triple::mips64 || Arch == llvm::Triple::mips64el;
+}
+
+static bool isMipsEL(llvm::Triple::ArchType Arch) {
+ return Arch == llvm::Triple::mipsel || Arch == llvm::Triple::mips64el;
+}
+
+static bool isMips16(const ArgList &Args) {
+ Arg *A = Args.getLastArg(options::OPT_mips16, options::OPT_mno_mips16);
+ return A && A->getOption().matches(options::OPT_mips16);
+}
+
+static bool isMicroMips(const ArgList &Args) {
+ Arg *A = Args.getLastArg(options::OPT_mmicromips, options::OPT_mno_micromips);
+ return A && A->getOption().matches(options::OPT_mmicromips);
+}
+
+namespace {
+struct DetectedMultilibs {
+ /// The set of multilibs that the detected installation supports.
+ MultilibSet Multilibs;
+
+ /// The primary multilib appropriate for the given flags.
+ Multilib SelectedMultilib;
+
+ /// On Biarch systems, this corresponds to the default multilib when
+ /// targeting the non-default multilib. Otherwise, it is empty.
+ llvm::Optional<Multilib> BiarchSibling;
+};
+} // end anonymous namespace
+
+static Multilib makeMultilib(StringRef commonSuffix) {
+ return Multilib(commonSuffix, commonSuffix, commonSuffix);
+}
+
+static bool findMIPSMultilibs(const Driver &D, const llvm::Triple &TargetTriple,
+ StringRef Path, const ArgList &Args,
+ DetectedMultilibs &Result) {
+ // Some MIPS toolchains put libraries and object files compiled
+ // using different options in to the sub-directoris which names
+ // reflects the flags used for compilation. For example sysroot
+ // directory might looks like the following examples:
+ //
+ // /usr
+ // /lib <= crt*.o files compiled with '-mips32'
+ // /mips16
+ // /usr
+ // /lib <= crt*.o files compiled with '-mips16'
+ // /el
+ // /usr
+ // /lib <= crt*.o files compiled with '-mips16 -EL'
+ //
+ // or
+ //
+ // /usr
+ // /lib <= crt*.o files compiled with '-mips32r2'
+ // /mips16
+ // /usr
+ // /lib <= crt*.o files compiled with '-mips32r2 -mips16'
+ // /mips32
+ // /usr
+ // /lib <= crt*.o files compiled with '-mips32'
+
+ FilterNonExistent NonExistent(Path, D.getVFS());
+
+ // Check for FSF toolchain multilibs
+ MultilibSet FSFMipsMultilibs;
+ {
+ auto MArchMips32 = makeMultilib("/mips32")
+ .flag("+m32")
+ .flag("-m64")
+ .flag("-mmicromips")
+ .flag("+march=mips32");
+
+ auto MArchMicroMips = makeMultilib("/micromips")
+ .flag("+m32")
+ .flag("-m64")
+ .flag("+mmicromips");
+
+ auto MArchMips64r2 = makeMultilib("/mips64r2")
+ .flag("-m32")
+ .flag("+m64")
+ .flag("+march=mips64r2");
+
+ auto MArchMips64 = makeMultilib("/mips64").flag("-m32").flag("+m64").flag(
+ "-march=mips64r2");
+
+ auto MArchDefault = makeMultilib("")
+ .flag("+m32")
+ .flag("-m64")
+ .flag("-mmicromips")
+ .flag("+march=mips32r2");
+
+ auto Mips16 = makeMultilib("/mips16").flag("+mips16");
+
+ auto UCLibc = makeMultilib("/uclibc").flag("+muclibc");
+
+ auto MAbi64 =
+ makeMultilib("/64").flag("+mabi=n64").flag("-mabi=n32").flag("-m32");
+
+ auto BigEndian = makeMultilib("").flag("+EB").flag("-EL");
+
+ auto LittleEndian = makeMultilib("/el").flag("+EL").flag("-EB");
+
+ auto SoftFloat = makeMultilib("/sof").flag("+msoft-float");
+
+ auto Nan2008 = makeMultilib("/nan2008").flag("+mnan=2008");
+
+ FSFMipsMultilibs =
+ MultilibSet()
+ .Either(MArchMips32, MArchMicroMips, MArchMips64r2, MArchMips64,
+ MArchDefault)
+ .Maybe(UCLibc)
+ .Maybe(Mips16)
+ .FilterOut("/mips64/mips16")
+ .FilterOut("/mips64r2/mips16")
+ .FilterOut("/micromips/mips16")
+ .Maybe(MAbi64)
+ .FilterOut("/micromips/64")
+ .FilterOut("/mips32/64")
+ .FilterOut("^/64")
+ .FilterOut("/mips16/64")
+ .Either(BigEndian, LittleEndian)
+ .Maybe(SoftFloat)
+ .Maybe(Nan2008)
+ .FilterOut(".*sof/nan2008")
+ .FilterOut(NonExistent)
+ .setIncludeDirsCallback([](StringRef InstallDir,
+ StringRef TripleStr, const Multilib &M) {
+ std::vector<std::string> Dirs;
+ Dirs.push_back((InstallDir + "/include").str());
+ std::string SysRootInc =
+ InstallDir.str() + "/../../../../sysroot";
+ if (StringRef(M.includeSuffix()).startswith("/uclibc"))
+ Dirs.push_back(SysRootInc + "/uclibc/usr/include");
+ else
+ Dirs.push_back(SysRootInc + "/usr/include");
+ return Dirs;
+ });
+ }
+
+ // Check for Musl toolchain multilibs
+ MultilibSet MuslMipsMultilibs;
+ {
+ auto MArchMipsR2 = makeMultilib("")
+ .osSuffix("/mips-r2-hard-musl")
+ .flag("+EB")
+ .flag("-EL")
+ .flag("+march=mips32r2");
+
+ auto MArchMipselR2 = makeMultilib("/mipsel-r2-hard-musl")
+ .flag("-EB")
+ .flag("+EL")
+ .flag("+march=mips32r2");
+
+ MuslMipsMultilibs = MultilibSet().Either(MArchMipsR2, MArchMipselR2);
+
+ // Specify the callback that computes the include directories.
+ MuslMipsMultilibs.setIncludeDirsCallback([](
+ StringRef InstallDir, StringRef TripleStr, const Multilib &M) {
+ std::vector<std::string> Dirs;
+ Dirs.push_back(
+ (InstallDir + "/../sysroot" + M.osSuffix() + "/usr/include").str());
+ return Dirs;
+ });
+ }
+
+ // Check for Code Sourcery toolchain multilibs
+ MultilibSet CSMipsMultilibs;
+ {
+ auto MArchMips16 = makeMultilib("/mips16").flag("+m32").flag("+mips16");
+
+ auto MArchMicroMips =
+ makeMultilib("/micromips").flag("+m32").flag("+mmicromips");
+
+ auto MArchDefault = makeMultilib("").flag("-mips16").flag("-mmicromips");
+
+ auto UCLibc = makeMultilib("/uclibc").flag("+muclibc");
+
+ auto SoftFloat = makeMultilib("/soft-float").flag("+msoft-float");
+
+ auto Nan2008 = makeMultilib("/nan2008").flag("+mnan=2008");
+
+ auto DefaultFloat =
+ makeMultilib("").flag("-msoft-float").flag("-mnan=2008");
+
+ auto BigEndian = makeMultilib("").flag("+EB").flag("-EL");
+
+ auto LittleEndian = makeMultilib("/el").flag("+EL").flag("-EB");
+
+ // Note that this one's osSuffix is ""
+ auto MAbi64 = makeMultilib("")
+ .gccSuffix("/64")
+ .includeSuffix("/64")
+ .flag("+mabi=n64")
+ .flag("-mabi=n32")
+ .flag("-m32");
+
+ CSMipsMultilibs =
+ MultilibSet()
+ .Either(MArchMips16, MArchMicroMips, MArchDefault)
+ .Maybe(UCLibc)
+ .Either(SoftFloat, Nan2008, DefaultFloat)
+ .FilterOut("/micromips/nan2008")
+ .FilterOut("/mips16/nan2008")
+ .Either(BigEndian, LittleEndian)
+ .Maybe(MAbi64)
+ .FilterOut("/mips16.*/64")
+ .FilterOut("/micromips.*/64")
+ .FilterOut(NonExistent)
+ .setIncludeDirsCallback([](StringRef InstallDir,
+ StringRef TripleStr, const Multilib &M) {
+ std::vector<std::string> Dirs;
+ Dirs.push_back((InstallDir + "/include").str());
+ std::string SysRootInc =
+ InstallDir.str() + "/../../../../" + TripleStr.str();
+ if (StringRef(M.includeSuffix()).startswith("/uclibc"))
+ Dirs.push_back(SysRootInc + "/libc/uclibc/usr/include");
+ else
+ Dirs.push_back(SysRootInc + "/libc/usr/include");
+ return Dirs;
+ });
+ }
+
+ MultilibSet AndroidMipsMultilibs =
+ MultilibSet()
+ .Maybe(Multilib("/mips-r2").flag("+march=mips32r2"))
+ .Maybe(Multilib("/mips-r6").flag("+march=mips32r6"))
+ .FilterOut(NonExistent);
+
+ MultilibSet DebianMipsMultilibs;
+ {
+ Multilib MAbiN32 =
+ Multilib().gccSuffix("/n32").includeSuffix("/n32").flag("+mabi=n32");
+
+ Multilib M64 = Multilib()
+ .gccSuffix("/64")
+ .includeSuffix("/64")
+ .flag("+m64")
+ .flag("-m32")
+ .flag("-mabi=n32");
+
+ Multilib M32 = Multilib().flag("-m64").flag("+m32").flag("-mabi=n32");
+
+ DebianMipsMultilibs =
+ MultilibSet().Either(M32, M64, MAbiN32).FilterOut(NonExistent);
+ }
+
+ MultilibSet ImgMultilibs;
+ {
+ auto Mips64r6 = makeMultilib("/mips64r6").flag("+m64").flag("-m32");
+
+ auto LittleEndian = makeMultilib("/el").flag("+EL").flag("-EB");
+
+ auto MAbi64 =
+ makeMultilib("/64").flag("+mabi=n64").flag("-mabi=n32").flag("-m32");
+
+ ImgMultilibs =
+ MultilibSet()
+ .Maybe(Mips64r6)
+ .Maybe(MAbi64)
+ .Maybe(LittleEndian)
+ .FilterOut(NonExistent)
+ .setIncludeDirsCallback([](StringRef InstallDir,
+ StringRef TripleStr, const Multilib &M) {
+ std::vector<std::string> Dirs;
+ Dirs.push_back((InstallDir + "/include").str());
+ Dirs.push_back(
+ (InstallDir + "/../../../../sysroot/usr/include").str());
+ return Dirs;
+ });
+ }
+
+ StringRef CPUName;
+ StringRef ABIName;
+ tools::mips::getMipsCPUAndABI(Args, TargetTriple, CPUName, ABIName);
+
+ llvm::Triple::ArchType TargetArch = TargetTriple.getArch();
+
+ Multilib::flags_list Flags;
+ addMultilibFlag(isMips32(TargetArch), "m32", Flags);
+ addMultilibFlag(isMips64(TargetArch), "m64", Flags);
+ addMultilibFlag(isMips16(Args), "mips16", Flags);
+ addMultilibFlag(CPUName == "mips32", "march=mips32", Flags);
+ addMultilibFlag(CPUName == "mips32r2" || CPUName == "mips32r3" ||
+ CPUName == "mips32r5" || CPUName == "p5600",
+ "march=mips32r2", Flags);
+ addMultilibFlag(CPUName == "mips32r6", "march=mips32r6", Flags);
+ addMultilibFlag(CPUName == "mips64", "march=mips64", Flags);
+ addMultilibFlag(CPUName == "mips64r2" || CPUName == "mips64r3" ||
+ CPUName == "mips64r5" || CPUName == "octeon",
+ "march=mips64r2", Flags);
+ addMultilibFlag(isMicroMips(Args), "mmicromips", Flags);
+ addMultilibFlag(tools::mips::isUCLibc(Args), "muclibc", Flags);
+ addMultilibFlag(tools::mips::isNaN2008(Args, TargetTriple), "mnan=2008",
+ Flags);
+ addMultilibFlag(ABIName == "n32", "mabi=n32", Flags);
+ addMultilibFlag(ABIName == "n64", "mabi=n64", Flags);
+ addMultilibFlag(isSoftFloatABI(Args), "msoft-float", Flags);
+ addMultilibFlag(!isSoftFloatABI(Args), "mhard-float", Flags);
+ addMultilibFlag(isMipsEL(TargetArch), "EL", Flags);
+ addMultilibFlag(!isMipsEL(TargetArch), "EB", Flags);
+
+ if (TargetTriple.isAndroid()) {
+ // Select Android toolchain. It's the only choice in that case.
+ if (AndroidMipsMultilibs.select(Flags, Result.SelectedMultilib)) {
+ Result.Multilibs = AndroidMipsMultilibs;
+ return true;
+ }
+ return false;
+ }
+
+ if (TargetTriple.getVendor() == llvm::Triple::MipsTechnologies &&
+ TargetTriple.getOS() == llvm::Triple::Linux &&
+ TargetTriple.getEnvironment() == llvm::Triple::UnknownEnvironment) {
+ if (MuslMipsMultilibs.select(Flags, Result.SelectedMultilib)) {
+ Result.Multilibs = MuslMipsMultilibs;
+ return true;
+ }
+ return false;
+ }
+
+ if (TargetTriple.getVendor() == llvm::Triple::ImaginationTechnologies &&
+ TargetTriple.getOS() == llvm::Triple::Linux &&
+ TargetTriple.getEnvironment() == llvm::Triple::GNU) {
+ // Select mips-img-linux-gnu toolchain.
+ if (ImgMultilibs.select(Flags, Result.SelectedMultilib)) {
+ Result.Multilibs = ImgMultilibs;
+ return true;
+ }
+ return false;
+ }
+
+ // Sort candidates. Toolchain that best meets the directories goes first.
+ // Then select the first toolchains matches command line flags.
+ MultilibSet *candidates[] = {&DebianMipsMultilibs, &FSFMipsMultilibs,
+ &CSMipsMultilibs};
+ std::sort(
+ std::begin(candidates), std::end(candidates),
+ [](MultilibSet *a, MultilibSet *b) { return a->size() > b->size(); });
+ for (const auto &candidate : candidates) {
+ if (candidate->select(Flags, Result.SelectedMultilib)) {
+ if (candidate == &DebianMipsMultilibs)
+ Result.BiarchSibling = Multilib();
+ Result.Multilibs = *candidate;
+ return true;
+ }
+ }
+
+ {
+ // Fallback to the regular toolchain-tree structure.
+ Multilib Default;
+ Result.Multilibs.push_back(Default);
+ Result.Multilibs.FilterOut(NonExistent);
+
+ if (Result.Multilibs.select(Flags, Result.SelectedMultilib)) {
+ Result.BiarchSibling = Multilib();
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool findBiarchMultilibs(const Driver &D,
+ const llvm::Triple &TargetTriple,
+ StringRef Path, const ArgList &Args,
+ bool NeedsBiarchSuffix,
+ DetectedMultilibs &Result) {
+ // Some versions of SUSE and Fedora on ppc64 put 32-bit libs
+ // in what would normally be GCCInstallPath and put the 64-bit
+ // libs in a subdirectory named 64. The simple logic we follow is that
+ // *if* there is a subdirectory of the right name with crtbegin.o in it,
+ // we use that. If not, and if not a biarch triple alias, we look for
+ // crtbegin.o without the subdirectory.
+
+ Multilib Default;
+ Multilib Alt64 = Multilib()
+ .gccSuffix("/64")
+ .includeSuffix("/64")
+ .flag("-m32")
+ .flag("+m64")
+ .flag("-mx32");
+ Multilib Alt32 = Multilib()
+ .gccSuffix("/32")
+ .includeSuffix("/32")
+ .flag("+m32")
+ .flag("-m64")
+ .flag("-mx32");
+ Multilib Altx32 = Multilib()
+ .gccSuffix("/x32")
+ .includeSuffix("/x32")
+ .flag("-m32")
+ .flag("-m64")
+ .flag("+mx32");
+
+ FilterNonExistent NonExistent(Path, D.getVFS());
+
+ // Determine default multilib from: 32, 64, x32
+ // Also handle cases such as 64 on 32, 32 on 64, etc.
+ enum { UNKNOWN, WANT32, WANT64, WANTX32 } Want = UNKNOWN;
+ const bool IsX32 = TargetTriple.getEnvironment() == llvm::Triple::GNUX32;
+ if (TargetTriple.isArch32Bit() && !NonExistent(Alt32))
+ Want = WANT64;
+ else if (TargetTriple.isArch64Bit() && IsX32 && !NonExistent(Altx32))
+ Want = WANT64;
+ else if (TargetTriple.isArch64Bit() && !IsX32 && !NonExistent(Alt64))
+ Want = WANT32;
+ else {
+ if (TargetTriple.isArch32Bit())
+ Want = NeedsBiarchSuffix ? WANT64 : WANT32;
+ else if (IsX32)
+ Want = NeedsBiarchSuffix ? WANT64 : WANTX32;
+ else
+ Want = NeedsBiarchSuffix ? WANT32 : WANT64;
+ }
+
+ if (Want == WANT32)
+ Default.flag("+m32").flag("-m64").flag("-mx32");
+ else if (Want == WANT64)
+ Default.flag("-m32").flag("+m64").flag("-mx32");
+ else if (Want == WANTX32)
+ Default.flag("-m32").flag("-m64").flag("+mx32");
+ else
+ return false;
+
+ Result.Multilibs.push_back(Default);
+ Result.Multilibs.push_back(Alt64);
+ Result.Multilibs.push_back(Alt32);
+ Result.Multilibs.push_back(Altx32);
+
+ Result.Multilibs.FilterOut(NonExistent);
+
+ Multilib::flags_list Flags;
+ addMultilibFlag(TargetTriple.isArch64Bit() && !IsX32, "m64", Flags);
+ addMultilibFlag(TargetTriple.isArch32Bit(), "m32", Flags);
+ addMultilibFlag(TargetTriple.isArch64Bit() && IsX32, "mx32", Flags);
+
+ if (!Result.Multilibs.select(Flags, Result.SelectedMultilib))
+ return false;
+
+ if (Result.SelectedMultilib == Alt64 || Result.SelectedMultilib == Alt32 ||
+ Result.SelectedMultilib == Altx32)
+ Result.BiarchSibling = Default;
+
+ return true;
+}
+
+void Generic_GCC::GCCInstallationDetector::scanLibDirForGCCTripleSolaris(
+ const llvm::Triple &TargetArch, const llvm::opt::ArgList &Args,
+ const std::string &LibDir, StringRef CandidateTriple,
+ bool NeedsBiarchSuffix) {
+ // Solaris is a special case. The GCC installation is under
+ // /usr/gcc/<major>.<minor>/lib/gcc/<triple>/<major>.<minor>.<patch>/, so we
+ // need to iterate twice.
+ std::error_code EC;
+ for (vfs::directory_iterator LI = D.getVFS().dir_begin(LibDir, EC), LE;
+ !EC && LI != LE; LI = LI.increment(EC)) {
+ StringRef VersionText = llvm::sys::path::filename(LI->getName());
+ GCCVersion CandidateVersion = GCCVersion::Parse(VersionText);
+
+ if (CandidateVersion.Major != -1) // Filter obviously bad entries.
+ if (!CandidateGCCInstallPaths.insert(LI->getName()).second)
+ continue; // Saw this path before; no need to look at it again.
+ if (CandidateVersion.isOlderThan(4, 1, 1))
+ continue;
+ if (CandidateVersion <= Version)
+ continue;
+
+ GCCInstallPath =
+ LibDir + "/" + VersionText.str() + "/lib/gcc/" + CandidateTriple.str();
+ if (!D.getVFS().exists(GCCInstallPath))
+ continue;
+
+ // If we make it here there has to be at least one GCC version, let's just
+ // use the latest one.
+ std::error_code EEC;
+ for (vfs::directory_iterator
+ LLI = D.getVFS().dir_begin(GCCInstallPath, EEC),
+ LLE;
+ !EEC && LLI != LLE; LLI = LLI.increment(EEC)) {
+
+ StringRef SubVersionText = llvm::sys::path::filename(LLI->getName());
+ GCCVersion CandidateSubVersion = GCCVersion::Parse(SubVersionText);
+
+ if (CandidateSubVersion > Version)
+ Version = CandidateSubVersion;
+ }
+
+ GCCTriple.setTriple(CandidateTriple);
+
+ GCCInstallPath += "/" + Version.Text;
+ GCCParentLibPath = GCCInstallPath + "/../../../../";
+
+ IsValid = true;
+ }
+}
+
+void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
+ const llvm::Triple &TargetTriple, const ArgList &Args,
+ const std::string &LibDir, StringRef CandidateTriple,
+ bool NeedsBiarchSuffix) {
+ llvm::Triple::ArchType TargetArch = TargetTriple.getArch();
+ // There are various different suffixes involving the triple we
+ // check for. We also record what is necessary to walk from each back
+ // up to the lib directory. Specifically, the number of "up" steps
+ // in the second half of each row is 1 + the number of path separators
+ // in the first half.
+ const std::string LibAndInstallSuffixes[][2] = {
+ {"/gcc/" + CandidateTriple.str(), "/../../.."},
+
+ // Debian puts cross-compilers in gcc-cross
+ {"/gcc-cross/" + CandidateTriple.str(), "/../../.."},
+
+ {"/" + CandidateTriple.str() + "/gcc/" + CandidateTriple.str(),
+ "/../../../.."},
+
+ // The Freescale PPC SDK has the gcc libraries in
+ // <sysroot>/usr/lib/<triple>/x.y.z so have a look there as well.
+ {"/" + CandidateTriple.str(), "/../.."},
+
+ // Ubuntu has a strange mis-matched pair of triples that this happens to
+ // match.
+ // FIXME: It may be worthwhile to generalize this and look for a second
+ // triple.
+ {"/i386-linux-gnu/gcc/" + CandidateTriple.str(), "/../../../.."}};
+
+ if (TargetTriple.getOS() == llvm::Triple::Solaris) {
+ scanLibDirForGCCTripleSolaris(TargetTriple, Args, LibDir, CandidateTriple,
+ NeedsBiarchSuffix);
+ return;
+ }
+
+ // Only look at the final, weird Ubuntu suffix for i386-linux-gnu.
+ const unsigned NumLibSuffixes = (llvm::array_lengthof(LibAndInstallSuffixes) -
+ (TargetArch != llvm::Triple::x86));
+ for (unsigned i = 0; i < NumLibSuffixes; ++i) {
+ StringRef LibSuffix = LibAndInstallSuffixes[i][0];
+ std::error_code EC;
+ for (vfs::directory_iterator
+ LI = D.getVFS().dir_begin(LibDir + LibSuffix, EC),
+ LE;
+ !EC && LI != LE; LI = LI.increment(EC)) {
+ StringRef VersionText = llvm::sys::path::filename(LI->getName());
+ GCCVersion CandidateVersion = GCCVersion::Parse(VersionText);
+ if (CandidateVersion.Major != -1) // Filter obviously bad entries.
+ if (!CandidateGCCInstallPaths.insert(LI->getName()).second)
+ continue; // Saw this path before; no need to look at it again.
+ if (CandidateVersion.isOlderThan(4, 1, 1))
+ continue;
+ if (CandidateVersion <= Version)
+ continue;
+
+ DetectedMultilibs Detected;
+
+ // Debian mips multilibs behave more like the rest of the biarch ones,
+ // so handle them there
+ if (isMipsArch(TargetArch)) {
+ if (!findMIPSMultilibs(D, TargetTriple, LI->getName(), Args, Detected))
+ continue;
+ } else if (!findBiarchMultilibs(D, TargetTriple, LI->getName(), Args,
+ NeedsBiarchSuffix, Detected)) {
+ continue;
+ }
+
+ Multilibs = Detected.Multilibs;
+ SelectedMultilib = Detected.SelectedMultilib;
+ BiarchSibling = Detected.BiarchSibling;
+ Version = CandidateVersion;
+ GCCTriple.setTriple(CandidateTriple);
+ // FIXME: We hack together the directory name here instead of
+ // using LI to ensure stable path separators across Windows and
+ // Linux.
+ GCCInstallPath =
+ LibDir + LibAndInstallSuffixes[i][0] + "/" + VersionText.str();
+ GCCParentLibPath = GCCInstallPath + LibAndInstallSuffixes[i][1];
+ IsValid = true;
+ }
+ }
+}
+
+Generic_GCC::Generic_GCC(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
+ : ToolChain(D, Triple, Args), GCCInstallation(D), CudaInstallation(D) {
+ getProgramPaths().push_back(getDriver().getInstalledDir());
+ if (getDriver().getInstalledDir() != getDriver().Dir)
+ getProgramPaths().push_back(getDriver().Dir);
+}
+
+Generic_GCC::~Generic_GCC() {}
+
+Tool *Generic_GCC::getTool(Action::ActionClass AC) const {
+ switch (AC) {
+ case Action::PreprocessJobClass:
+ if (!Preprocess)
+ Preprocess.reset(new tools::gcc::Preprocessor(*this));
+ return Preprocess.get();
+ case Action::CompileJobClass:
+ if (!Compile)
+ Compile.reset(new tools::gcc::Compiler(*this));
+ return Compile.get();
+ default:
+ return ToolChain::getTool(AC);
+ }
+}
+
+Tool *Generic_GCC::buildAssembler() const {
+ return new tools::gnutools::Assembler(*this);
+}
+
+Tool *Generic_GCC::buildLinker() const { return new tools::gcc::Linker(*this); }
+
+void Generic_GCC::printVerboseInfo(raw_ostream &OS) const {
+ // Print the information about how we detected the GCC installation.
+ GCCInstallation.print(OS);
+ CudaInstallation.print(OS);
+}
+
+bool Generic_GCC::IsUnwindTablesDefault() const {
+ return getArch() == llvm::Triple::x86_64;
+}
+
+bool Generic_GCC::isPICDefault() const {
+ return getArch() == llvm::Triple::x86_64 && getTriple().isOSWindows();
+}
+
+bool Generic_GCC::isPIEDefault() const { return false; }
+
+bool Generic_GCC::isPICDefaultForced() const {
+ return getArch() == llvm::Triple::x86_64 && getTriple().isOSWindows();
+}
+
+bool Generic_GCC::IsIntegratedAssemblerDefault() const {
+ switch (getTriple().getArch()) {
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_be:
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::bpfel:
+ case llvm::Triple::bpfeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
+ case llvm::Triple::systemz:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/// \brief Helper to add the variant paths of a libstdc++ installation.
+bool Generic_GCC::addLibStdCXXIncludePaths(
+ Twine Base, Twine Suffix, StringRef GCCTriple, StringRef GCCMultiarchTriple,
+ StringRef TargetMultiarchTriple, Twine IncludeSuffix,
+ const ArgList &DriverArgs, ArgStringList &CC1Args) const {
+ if (!getVFS().exists(Base + Suffix))
+ return false;
+
+ addSystemInclude(DriverArgs, CC1Args, Base + Suffix);
+
+ // The vanilla GCC layout of libstdc++ headers uses a triple subdirectory. If
+ // that path exists or we have neither a GCC nor target multiarch triple, use
+ // this vanilla search path.
+ if ((GCCMultiarchTriple.empty() && TargetMultiarchTriple.empty()) ||
+ getVFS().exists(Base + Suffix + "/" + GCCTriple + IncludeSuffix)) {
+ addSystemInclude(DriverArgs, CC1Args,
+ Base + Suffix + "/" + GCCTriple + IncludeSuffix);
+ } else {
+ // Otherwise try to use multiarch naming schemes which have normalized the
+ // triples and put the triple before the suffix.
+ //
+ // GCC surprisingly uses *both* the GCC triple with a multilib suffix and
+ // the target triple, so we support that here.
+ addSystemInclude(DriverArgs, CC1Args,
+ Base + "/" + GCCMultiarchTriple + Suffix + IncludeSuffix);
+ addSystemInclude(DriverArgs, CC1Args,
+ Base + "/" + TargetMultiarchTriple + Suffix);
+ }
+
+ addSystemInclude(DriverArgs, CC1Args, Base + Suffix + "/backward");
+ return true;
+}
+
+
+void Generic_ELF::addClangTargetOptions(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ const Generic_GCC::GCCVersion &V = GCCInstallation.getVersion();
+ bool UseInitArrayDefault =
+ getTriple().getArch() == llvm::Triple::aarch64 ||
+ getTriple().getArch() == llvm::Triple::aarch64_be ||
+ (getTriple().getOS() == llvm::Triple::Linux &&
+ (!V.isOlderThan(4, 7, 0) || getTriple().isAndroid())) ||
+ getTriple().getOS() == llvm::Triple::NaCl ||
+ (getTriple().getVendor() == llvm::Triple::MipsTechnologies &&
+ !getTriple().hasEnvironment());
+
+ if (DriverArgs.hasFlag(options::OPT_fuse_init_array,
+ options::OPT_fno_use_init_array, UseInitArrayDefault))
+ CC1Args.push_back("-fuse-init-array");
+}
+
+/// Mips Toolchain
+MipsLLVMToolChain::MipsLLVMToolChain(const Driver &D,
+ const llvm::Triple &Triple,
+ const ArgList &Args)
+ : Linux(D, Triple, Args) {
+ // Select the correct multilib according to the given arguments.
+ DetectedMultilibs Result;
+ findMIPSMultilibs(D, Triple, "", Args, Result);
+ Multilibs = Result.Multilibs;
+ SelectedMultilib = Result.SelectedMultilib;
+
+ // Find out the library suffix based on the ABI.
+ LibSuffix = tools::mips::getMipsABILibSuffix(Args, Triple);
+ getFilePaths().clear();
+ getFilePaths().push_back(computeSysRoot() + "/usr/lib" + LibSuffix);
+
+ // Use LLD by default.
+ DefaultLinker = "lld";
+}
+
+void MipsLLVMToolChain::AddClangSystemIncludeArgs(
+ const ArgList &DriverArgs, ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdinc))
+ return;
+
+ const Driver &D = getDriver();
+
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<128> P(D.ResourceDir);
+ llvm::sys::path::append(P, "include");
+ addSystemInclude(DriverArgs, CC1Args, P);
+ }
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ const auto &Callback = Multilibs.includeDirsCallback();
+ if (Callback) {
+ const auto IncludePaths =
+ Callback(D.getInstalledDir(), getTripleString(), SelectedMultilib);
+ for (const auto &Path : IncludePaths)
+ addExternCSystemIncludeIfExists(DriverArgs, CC1Args, Path);
+ }
+}
+
+Tool *MipsLLVMToolChain::buildLinker() const {
+ return new tools::gnutools::Linker(*this);
+}
+
+std::string MipsLLVMToolChain::computeSysRoot() const {
+ if (!getDriver().SysRoot.empty())
+ return getDriver().SysRoot + SelectedMultilib.osSuffix();
+
+ const std::string InstalledDir(getDriver().getInstalledDir());
+ std::string SysRootPath =
+ InstalledDir + "/../sysroot" + SelectedMultilib.osSuffix();
+ if (llvm::sys::fs::exists(SysRootPath))
+ return SysRootPath;
+
+ return std::string();
+}
+
+ToolChain::CXXStdlibType
+MipsLLVMToolChain::GetCXXStdlibType(const ArgList &Args) const {
+ Arg *A = Args.getLastArg(options::OPT_stdlib_EQ);
+ if (A) {
+ StringRef Value = A->getValue();
+ if (Value != "libc++")
+ getDriver().Diag(diag::err_drv_invalid_stdlib_name)
+ << A->getAsString(Args);
+ }
+
+ return ToolChain::CST_Libcxx;
+}
+
+void MipsLLVMToolChain::AddClangCXXStdlibIncludeArgs(
+ const ArgList &DriverArgs, ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx))
+ return;
+
+ assert((GetCXXStdlibType(DriverArgs) == ToolChain::CST_Libcxx) &&
+ "Only -lc++ (aka libcxx) is suported in this toolchain.");
+
+ const auto &Callback = Multilibs.includeDirsCallback();
+ if (Callback) {
+ const auto IncludePaths = Callback(getDriver().getInstalledDir(),
+ getTripleString(), SelectedMultilib);
+ for (const auto &Path : IncludePaths) {
+ if (llvm::sys::fs::exists(Path + "/c++/v1")) {
+ addSystemInclude(DriverArgs, CC1Args, Path + "/c++/v1");
+ break;
+ }
+ }
+ }
+}
+
+void MipsLLVMToolChain::AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ assert((GetCXXStdlibType(Args) == ToolChain::CST_Libcxx) &&
+ "Only -lc++ (aka libxx) is suported in this toolchain.");
+
+ CmdArgs.push_back("-lc++");
+ CmdArgs.push_back("-lc++abi");
+ CmdArgs.push_back("-lunwind");
+}
+
+std::string MipsLLVMToolChain::getCompilerRT(const ArgList &Args,
+ StringRef Component,
+ bool Shared) const {
+ SmallString<128> Path(getDriver().ResourceDir);
+ llvm::sys::path::append(Path, SelectedMultilib.osSuffix(), "lib" + LibSuffix,
+ getOS());
+ llvm::sys::path::append(Path, Twine("libclang_rt." + Component + "-" +
+ "mips" + (Shared ? ".so" : ".a")));
+ return Path.str();
+}
+
+/// Hexagon Toolchain
+
+std::string HexagonToolChain::getHexagonTargetDir(
+ const std::string &InstalledDir,
+ const SmallVectorImpl<std::string> &PrefixDirs) const {
+ std::string InstallRelDir;
+ const Driver &D = getDriver();
+
+ // Locate the rest of the toolchain ...
+ for (auto &I : PrefixDirs)
+ if (D.getVFS().exists(I))
+ return I;
+
+ if (getVFS().exists(InstallRelDir = InstalledDir + "/../target"))
+ return InstallRelDir;
+
+ std::string PrefixRelDir = std::string(LLVM_PREFIX) + "/target";
+ if (getVFS().exists(PrefixRelDir))
+ return PrefixRelDir;
+
+ return InstallRelDir;
+}
+
+
+Optional<unsigned> HexagonToolChain::getSmallDataThreshold(
+ const ArgList &Args) {
+ StringRef Gn = "";
+ if (Arg *A = Args.getLastArg(options::OPT_G, options::OPT_G_EQ,
+ options::OPT_msmall_data_threshold_EQ)) {
+ Gn = A->getValue();
+ } else if (Args.getLastArg(options::OPT_shared, options::OPT_fpic,
+ options::OPT_fPIC)) {
+ Gn = "0";
+ }
+
+ unsigned G;
+ if (!Gn.getAsInteger(10, G))
+ return G;
+
+ return None;
+}
+
+
+void HexagonToolChain::getHexagonLibraryPaths(const ArgList &Args,
+ ToolChain::path_list &LibPaths) const {
+ const Driver &D = getDriver();
+
+ //----------------------------------------------------------------------------
+ // -L Args
+ //----------------------------------------------------------------------------
+ for (Arg *A : Args.filtered(options::OPT_L))
+ for (const char *Value : A->getValues())
+ LibPaths.push_back(Value);
+
+ //----------------------------------------------------------------------------
+ // Other standard paths
+ //----------------------------------------------------------------------------
+ std::vector<std::string> RootDirs;
+ std::copy(D.PrefixDirs.begin(), D.PrefixDirs.end(),
+ std::back_inserter(RootDirs));
+
+ std::string TargetDir = getHexagonTargetDir(D.getInstalledDir(),
+ D.PrefixDirs);
+ if (std::find(RootDirs.begin(), RootDirs.end(), TargetDir) == RootDirs.end())
+ RootDirs.push_back(TargetDir);
+
+ bool HasPIC = Args.hasArg(options::OPT_fpic, options::OPT_fPIC);
+ // Assume G0 with -shared.
+ bool HasG0 = Args.hasArg(options::OPT_shared);
+ if (auto G = getSmallDataThreshold(Args))
+ HasG0 = G.getValue() == 0;
+
+ const std::string CpuVer = GetTargetCPUVersion(Args).str();
+ for (auto &Dir : RootDirs) {
+ std::string LibDir = Dir + "/hexagon/lib";
+ std::string LibDirCpu = LibDir + '/' + CpuVer;
+ if (HasG0) {
+ if (HasPIC)
+ LibPaths.push_back(LibDirCpu + "/G0/pic");
+ LibPaths.push_back(LibDirCpu + "/G0");
+ }
+ LibPaths.push_back(LibDirCpu);
+ LibPaths.push_back(LibDir);
+ }
+}
+
+HexagonToolChain::HexagonToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args)
+ : Linux(D, Triple, Args) {
+ const std::string TargetDir = getHexagonTargetDir(D.getInstalledDir(),
+ D.PrefixDirs);
+
+ // Note: Generic_GCC::Generic_GCC adds InstalledDir and getDriver().Dir to
+ // program paths
+ const std::string BinDir(TargetDir + "/bin");
+ if (D.getVFS().exists(BinDir))
+ getProgramPaths().push_back(BinDir);
+
+ ToolChain::path_list &LibPaths = getFilePaths();
+
+ // Remove paths added by Linux toolchain. Currently Hexagon_TC really targets
+ // 'elf' OS type, so the Linux paths are not appropriate. When we actually
+ // support 'linux' we'll need to fix this up
+ LibPaths.clear();
+ getHexagonLibraryPaths(Args, LibPaths);
+}
+
+HexagonToolChain::~HexagonToolChain() {}
+
+Tool *HexagonToolChain::buildAssembler() const {
+ return new tools::hexagon::Assembler(*this);
+}
+
+Tool *HexagonToolChain::buildLinker() const {
+ return new tools::hexagon::Linker(*this);
+}
+
+void HexagonToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdinc) ||
+ DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ const Driver &D = getDriver();
+ std::string TargetDir = getHexagonTargetDir(D.getInstalledDir(),
+ D.PrefixDirs);
+ addExternCSystemInclude(DriverArgs, CC1Args, TargetDir + "/hexagon/include");
+}
+
+void HexagonToolChain::AddClangCXXStdlibIncludeArgs(
+ const ArgList &DriverArgs, ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx))
+ return;
+
+ const Driver &D = getDriver();
+ std::string TargetDir = getHexagonTargetDir(D.InstalledDir, D.PrefixDirs);
+ addSystemInclude(DriverArgs, CC1Args, TargetDir + "/hexagon/include/c++");
+}
+
+ToolChain::CXXStdlibType
+HexagonToolChain::GetCXXStdlibType(const ArgList &Args) const {
+ Arg *A = Args.getLastArg(options::OPT_stdlib_EQ);
+ if (!A)
+ return ToolChain::CST_Libstdcxx;
+
+ StringRef Value = A->getValue();
+ if (Value != "libstdc++")
+ getDriver().Diag(diag::err_drv_invalid_stdlib_name) << A->getAsString(Args);
+
+ return ToolChain::CST_Libstdcxx;
+}
+
+//
+// Returns the default CPU for Hexagon. This is the default compilation target
+// if no Hexagon processor is selected at the command-line.
+//
+const StringRef HexagonToolChain::GetDefaultCPU() {
+ return "hexagonv60";
+}
+
+const StringRef HexagonToolChain::GetTargetCPUVersion(const ArgList &Args) {
+ Arg *CpuArg = nullptr;
+
+ for (auto &A : Args) {
+ if (A->getOption().matches(options::OPT_mcpu_EQ)) {
+ CpuArg = A;
+ A->claim();
+ }
+ }
+
+ StringRef CPU = CpuArg ? CpuArg->getValue() : GetDefaultCPU();
+ if (CPU.startswith("hexagon"))
+ return CPU.substr(sizeof("hexagon") - 1);
+ return CPU;
+}
+// End Hexagon
+
+/// AMDGPU Toolchain
+AMDGPUToolChain::AMDGPUToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) { }
+
+Tool *AMDGPUToolChain::buildLinker() const {
+ return new tools::amdgpu::Linker(*this);
+}
+// End AMDGPU
+
+/// NaCl Toolchain
+NaClToolChain::NaClToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
+
+ // Remove paths added by Generic_GCC. NaCl Toolchain cannot use the
+ // default paths, and must instead only use the paths provided
+ // with this toolchain based on architecture.
+ path_list &file_paths = getFilePaths();
+ path_list &prog_paths = getProgramPaths();
+
+ file_paths.clear();
+ prog_paths.clear();
+
+ // Path for library files (libc.a, ...)
+ std::string FilePath(getDriver().Dir + "/../");
+
+ // Path for tools (clang, ld, etc..)
+ std::string ProgPath(getDriver().Dir + "/../");
+
+ // Path for toolchain libraries (libgcc.a, ...)
+ std::string ToolPath(getDriver().ResourceDir + "/lib/");
+
+ switch (Triple.getArch()) {
+ case llvm::Triple::x86:
+ file_paths.push_back(FilePath + "x86_64-nacl/lib32");
+ file_paths.push_back(FilePath + "i686-nacl/usr/lib");
+ prog_paths.push_back(ProgPath + "x86_64-nacl/bin");
+ file_paths.push_back(ToolPath + "i686-nacl");
+ break;
+ case llvm::Triple::x86_64:
+ file_paths.push_back(FilePath + "x86_64-nacl/lib");
+ file_paths.push_back(FilePath + "x86_64-nacl/usr/lib");
+ prog_paths.push_back(ProgPath + "x86_64-nacl/bin");
+ file_paths.push_back(ToolPath + "x86_64-nacl");
+ break;
+ case llvm::Triple::arm:
+ file_paths.push_back(FilePath + "arm-nacl/lib");
+ file_paths.push_back(FilePath + "arm-nacl/usr/lib");
+ prog_paths.push_back(ProgPath + "arm-nacl/bin");
+ file_paths.push_back(ToolPath + "arm-nacl");
+ break;
+ case llvm::Triple::mipsel:
+ file_paths.push_back(FilePath + "mipsel-nacl/lib");
+ file_paths.push_back(FilePath + "mipsel-nacl/usr/lib");
+ prog_paths.push_back(ProgPath + "bin");
+ file_paths.push_back(ToolPath + "mipsel-nacl");
+ break;
+ default:
+ break;
+ }
+
+ NaClArmMacrosPath = GetFilePath("nacl-arm-macros.s");
+}
+
+void NaClToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ const Driver &D = getDriver();
+ if (DriverArgs.hasArg(options::OPT_nostdinc))
+ return;
+
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<128> P(D.ResourceDir);
+ llvm::sys::path::append(P, "include");
+ addSystemInclude(DriverArgs, CC1Args, P.str());
+ }
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ SmallString<128> P(D.Dir + "/../");
+ switch (getTriple().getArch()) {
+ case llvm::Triple::x86:
+ // x86 is special because multilib style uses x86_64-nacl/include for libc
+ // headers but the SDK wants i686-nacl/usr/include. The other architectures
+ // have the same substring.
+ llvm::sys::path::append(P, "i686-nacl/usr/include");
+ addSystemInclude(DriverArgs, CC1Args, P.str());
+ llvm::sys::path::remove_filename(P);
+ llvm::sys::path::remove_filename(P);
+ llvm::sys::path::remove_filename(P);
+ llvm::sys::path::append(P, "x86_64-nacl/include");
+ addSystemInclude(DriverArgs, CC1Args, P.str());
+ return;
+ case llvm::Triple::arm:
+ llvm::sys::path::append(P, "arm-nacl/usr/include");
+ break;
+ case llvm::Triple::x86_64:
+ llvm::sys::path::append(P, "x86_64-nacl/usr/include");
+ break;
+ case llvm::Triple::mipsel:
+ llvm::sys::path::append(P, "mipsel-nacl/usr/include");
+ break;
+ default:
+ return;
+ }
+
+ addSystemInclude(DriverArgs, CC1Args, P.str());
+ llvm::sys::path::remove_filename(P);
+ llvm::sys::path::remove_filename(P);
+ llvm::sys::path::append(P, "include");
+ addSystemInclude(DriverArgs, CC1Args, P.str());
+}
+
+void NaClToolChain::AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ // Check for -stdlib= flags. We only support libc++ but this consumes the arg
+ // if the value is libc++, and emits an error for other values.
+ GetCXXStdlibType(Args);
+ CmdArgs.push_back("-lc++");
+}
+
+void NaClToolChain::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ const Driver &D = getDriver();
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx))
+ return;
+
+ // Check for -stdlib= flags. We only support libc++ but this consumes the arg
+ // if the value is libc++, and emits an error for other values.
+ GetCXXStdlibType(DriverArgs);
+
+ SmallString<128> P(D.Dir + "/../");
+ switch (getTriple().getArch()) {
+ case llvm::Triple::arm:
+ llvm::sys::path::append(P, "arm-nacl/include/c++/v1");
+ addSystemInclude(DriverArgs, CC1Args, P.str());
+ break;
+ case llvm::Triple::x86:
+ llvm::sys::path::append(P, "x86_64-nacl/include/c++/v1");
+ addSystemInclude(DriverArgs, CC1Args, P.str());
+ break;
+ case llvm::Triple::x86_64:
+ llvm::sys::path::append(P, "x86_64-nacl/include/c++/v1");
+ addSystemInclude(DriverArgs, CC1Args, P.str());
+ break;
+ case llvm::Triple::mipsel:
+ llvm::sys::path::append(P, "mipsel-nacl/include/c++/v1");
+ addSystemInclude(DriverArgs, CC1Args, P.str());
+ break;
+ default:
+ break;
+ }
+}
+
+ToolChain::CXXStdlibType
+NaClToolChain::GetCXXStdlibType(const ArgList &Args) const {
+ if (Arg *A = Args.getLastArg(options::OPT_stdlib_EQ)) {
+ StringRef Value = A->getValue();
+ if (Value == "libc++")
+ return ToolChain::CST_Libcxx;
+ getDriver().Diag(diag::err_drv_invalid_stdlib_name) << A->getAsString(Args);
+ }
+
+ return ToolChain::CST_Libcxx;
+}
+
+std::string
+NaClToolChain::ComputeEffectiveClangTriple(const ArgList &Args,
+ types::ID InputType) const {
+ llvm::Triple TheTriple(ComputeLLVMTriple(Args, InputType));
+ if (TheTriple.getArch() == llvm::Triple::arm &&
+ TheTriple.getEnvironment() == llvm::Triple::UnknownEnvironment)
+ TheTriple.setEnvironment(llvm::Triple::GNUEABIHF);
+ return TheTriple.getTriple();
+}
+
+Tool *NaClToolChain::buildLinker() const {
+ return new tools::nacltools::Linker(*this);
+}
+
+Tool *NaClToolChain::buildAssembler() const {
+ if (getTriple().getArch() == llvm::Triple::arm)
+ return new tools::nacltools::AssemblerARM(*this);
+ return new tools::gnutools::Assembler(*this);
+}
+// End NaCl
+
+/// TCEToolChain - A tool chain using the llvm bitcode tools to perform
+/// all subcommands. See http://tce.cs.tut.fi for our peculiar target.
+/// Currently does not support anything else but compilation.
+
+TCEToolChain::TCEToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
+ : ToolChain(D, Triple, Args) {
+ // Path mangling to find libexec
+ std::string Path(getDriver().Dir);
+
+ Path += "/../libexec";
+ getProgramPaths().push_back(Path);
+}
+
+TCEToolChain::~TCEToolChain() {}
+
+bool TCEToolChain::IsMathErrnoDefault() const { return true; }
+
+bool TCEToolChain::isPICDefault() const { return false; }
+
+bool TCEToolChain::isPIEDefault() const { return false; }
+
+bool TCEToolChain::isPICDefaultForced() const { return false; }
+
+// CloudABI - CloudABI tool chain which can call ld(1) directly.
+
+CloudABI::CloudABI(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
+ SmallString<128> P(getDriver().Dir);
+ llvm::sys::path::append(P, "..", getTriple().str(), "lib");
+ getFilePaths().push_back(P.str());
+}
+
+void CloudABI::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc) &&
+ DriverArgs.hasArg(options::OPT_nostdincxx))
+ return;
+
+ SmallString<128> P(getDriver().Dir);
+ llvm::sys::path::append(P, "..", getTriple().str(), "include/c++/v1");
+ addSystemInclude(DriverArgs, CC1Args, P.str());
+}
+
+void CloudABI::AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ CmdArgs.push_back("-lc++");
+ CmdArgs.push_back("-lc++abi");
+ CmdArgs.push_back("-lunwind");
+}
+
+Tool *CloudABI::buildLinker() const {
+ return new tools::cloudabi::Linker(*this);
+}
+
+/// OpenBSD - OpenBSD tool chain which can call as(1) and ld(1) directly.
+
+OpenBSD::OpenBSD(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
+ getFilePaths().push_back(getDriver().Dir + "/../lib");
+ getFilePaths().push_back("/usr/lib");
+}
+
+Tool *OpenBSD::buildAssembler() const {
+ return new tools::openbsd::Assembler(*this);
+}
+
+Tool *OpenBSD::buildLinker() const { return new tools::openbsd::Linker(*this); }
+
+/// Bitrig - Bitrig tool chain which can call as(1) and ld(1) directly.
+
+Bitrig::Bitrig(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
+ getFilePaths().push_back(getDriver().Dir + "/../lib");
+ getFilePaths().push_back("/usr/lib");
+}
+
+Tool *Bitrig::buildAssembler() const {
+ return new tools::bitrig::Assembler(*this);
+}
+
+Tool *Bitrig::buildLinker() const { return new tools::bitrig::Linker(*this); }
+
+ToolChain::CXXStdlibType Bitrig::GetCXXStdlibType(const ArgList &Args) const {
+ if (Arg *A = Args.getLastArg(options::OPT_stdlib_EQ)) {
+ StringRef Value = A->getValue();
+ if (Value == "libstdc++")
+ return ToolChain::CST_Libstdcxx;
+ if (Value == "libc++")
+ return ToolChain::CST_Libcxx;
+
+ getDriver().Diag(diag::err_drv_invalid_stdlib_name) << A->getAsString(Args);
+ }
+ return ToolChain::CST_Libcxx;
+}
+
+void Bitrig::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx))
+ return;
+
+ switch (GetCXXStdlibType(DriverArgs)) {
+ case ToolChain::CST_Libcxx:
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/usr/include/c++/v1");
+ break;
+ case ToolChain::CST_Libstdcxx:
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/usr/include/c++/stdc++");
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/usr/include/c++/stdc++/backward");
+
+ StringRef Triple = getTriple().str();
+ if (Triple.startswith("amd64"))
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/usr/include/c++/stdc++/x86_64" +
+ Triple.substr(5));
+ else
+ addSystemInclude(DriverArgs, CC1Args, getDriver().SysRoot +
+ "/usr/include/c++/stdc++/" +
+ Triple);
+ break;
+ }
+}
+
+void Bitrig::AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ switch (GetCXXStdlibType(Args)) {
+ case ToolChain::CST_Libcxx:
+ CmdArgs.push_back("-lc++");
+ CmdArgs.push_back("-lc++abi");
+ CmdArgs.push_back("-lpthread");
+ break;
+ case ToolChain::CST_Libstdcxx:
+ CmdArgs.push_back("-lstdc++");
+ break;
+ }
+}
+
+/// FreeBSD - FreeBSD tool chain which can call as(1) and ld(1) directly.
+
+FreeBSD::FreeBSD(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
+
+ // When targeting 32-bit platforms, look for '/usr/lib32/crt1.o' and fall
+ // back to '/usr/lib' if it doesn't exist.
+ if ((Triple.getArch() == llvm::Triple::x86 ||
+ Triple.getArch() == llvm::Triple::ppc) &&
+ D.getVFS().exists(getDriver().SysRoot + "/usr/lib32/crt1.o"))
+ getFilePaths().push_back(getDriver().SysRoot + "/usr/lib32");
+ else
+ getFilePaths().push_back(getDriver().SysRoot + "/usr/lib");
+}
+
+ToolChain::CXXStdlibType FreeBSD::GetCXXStdlibType(const ArgList &Args) const {
+ if (Arg *A = Args.getLastArg(options::OPT_stdlib_EQ)) {
+ StringRef Value = A->getValue();
+ if (Value == "libstdc++")
+ return ToolChain::CST_Libstdcxx;
+ if (Value == "libc++")
+ return ToolChain::CST_Libcxx;
+
+ getDriver().Diag(diag::err_drv_invalid_stdlib_name) << A->getAsString(Args);
+ }
+ if (getTriple().getOSMajorVersion() >= 10)
+ return ToolChain::CST_Libcxx;
+ return ToolChain::CST_Libstdcxx;
+}
+
+void FreeBSD::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx))
+ return;
+
+ switch (GetCXXStdlibType(DriverArgs)) {
+ case ToolChain::CST_Libcxx:
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/usr/include/c++/v1");
+ break;
+ case ToolChain::CST_Libstdcxx:
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/usr/include/c++/4.2");
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/usr/include/c++/4.2/backward");
+ break;
+ }
+}
+
+Tool *FreeBSD::buildAssembler() const {
+ return new tools::freebsd::Assembler(*this);
+}
+
+Tool *FreeBSD::buildLinker() const { return new tools::freebsd::Linker(*this); }
+
+bool FreeBSD::UseSjLjExceptions(const ArgList &Args) const {
+ // FreeBSD uses SjLj exceptions on ARM oabi.
+ switch (getTriple().getEnvironment()) {
+ case llvm::Triple::GNUEABIHF:
+ case llvm::Triple::GNUEABI:
+ case llvm::Triple::EABI:
+ return false;
+
+ default:
+ return (getTriple().getArch() == llvm::Triple::arm ||
+ getTriple().getArch() == llvm::Triple::thumb);
+ }
+}
+
+bool FreeBSD::HasNativeLLVMSupport() const { return true; }
+
+bool FreeBSD::isPIEDefault() const { return getSanitizerArgs().requiresPIE(); }
+
+SanitizerMask FreeBSD::getSupportedSanitizers() const {
+ const bool IsX86 = getTriple().getArch() == llvm::Triple::x86;
+ const bool IsX86_64 = getTriple().getArch() == llvm::Triple::x86_64;
+ const bool IsMIPS64 = getTriple().getArch() == llvm::Triple::mips64 ||
+ getTriple().getArch() == llvm::Triple::mips64el;
+ SanitizerMask Res = ToolChain::getSupportedSanitizers();
+ Res |= SanitizerKind::Address;
+ Res |= SanitizerKind::Vptr;
+ if (IsX86_64 || IsMIPS64) {
+ Res |= SanitizerKind::Leak;
+ Res |= SanitizerKind::Thread;
+ }
+ if (IsX86 || IsX86_64) {
+ Res |= SanitizerKind::SafeStack;
+ }
+ return Res;
+}
+
+/// NetBSD - NetBSD tool chain which can call as(1) and ld(1) directly.
+
+NetBSD::NetBSD(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
+
+ if (getDriver().UseStdLib) {
+ // When targeting a 32-bit platform, try the special directory used on
+ // 64-bit hosts, and only fall back to the main library directory if that
+ // doesn't work.
+ // FIXME: It'd be nicer to test if this directory exists, but I'm not sure
+ // what all logic is needed to emulate the '=' prefix here.
+ switch (Triple.getArch()) {
+ case llvm::Triple::x86:
+ getFilePaths().push_back("=/usr/lib/i386");
+ break;
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ switch (Triple.getEnvironment()) {
+ case llvm::Triple::EABI:
+ case llvm::Triple::GNUEABI:
+ getFilePaths().push_back("=/usr/lib/eabi");
+ break;
+ case llvm::Triple::EABIHF:
+ case llvm::Triple::GNUEABIHF:
+ getFilePaths().push_back("=/usr/lib/eabihf");
+ break;
+ default:
+ getFilePaths().push_back("=/usr/lib/oabi");
+ break;
+ }
+ break;
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ if (tools::mips::hasMipsAbiArg(Args, "o32"))
+ getFilePaths().push_back("=/usr/lib/o32");
+ else if (tools::mips::hasMipsAbiArg(Args, "64"))
+ getFilePaths().push_back("=/usr/lib/64");
+ break;
+ case llvm::Triple::ppc:
+ getFilePaths().push_back("=/usr/lib/powerpc");
+ break;
+ case llvm::Triple::sparc:
+ getFilePaths().push_back("=/usr/lib/sparc");
+ break;
+ default:
+ break;
+ }
+
+ getFilePaths().push_back("=/usr/lib");
+ }
+}
+
+Tool *NetBSD::buildAssembler() const {
+ return new tools::netbsd::Assembler(*this);
+}
+
+Tool *NetBSD::buildLinker() const { return new tools::netbsd::Linker(*this); }
+
+ToolChain::CXXStdlibType NetBSD::GetCXXStdlibType(const ArgList &Args) const {
+ if (Arg *A = Args.getLastArg(options::OPT_stdlib_EQ)) {
+ StringRef Value = A->getValue();
+ if (Value == "libstdc++")
+ return ToolChain::CST_Libstdcxx;
+ if (Value == "libc++")
+ return ToolChain::CST_Libcxx;
+
+ getDriver().Diag(diag::err_drv_invalid_stdlib_name) << A->getAsString(Args);
+ }
+
+ unsigned Major, Minor, Micro;
+ getTriple().getOSVersion(Major, Minor, Micro);
+ if (Major >= 7 || (Major == 6 && Minor == 99 && Micro >= 49) || Major == 0) {
+ switch (getArch()) {
+ case llvm::Triple::aarch64:
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ return ToolChain::CST_Libcxx;
+ default:
+ break;
+ }
+ }
+ return ToolChain::CST_Libstdcxx;
+}
+
+void NetBSD::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx))
+ return;
+
+ switch (GetCXXStdlibType(DriverArgs)) {
+ case ToolChain::CST_Libcxx:
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/usr/include/c++/");
+ break;
+ case ToolChain::CST_Libstdcxx:
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/usr/include/g++");
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/usr/include/g++/backward");
+ break;
+ }
+}
+
+/// Minix - Minix tool chain which can call as(1) and ld(1) directly.
+
+Minix::Minix(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
+ getFilePaths().push_back(getDriver().Dir + "/../lib");
+ getFilePaths().push_back("/usr/lib");
+}
+
+Tool *Minix::buildAssembler() const {
+ return new tools::minix::Assembler(*this);
+}
+
+Tool *Minix::buildLinker() const { return new tools::minix::Linker(*this); }
+
+static void addPathIfExists(const Driver &D, const Twine &Path,
+ ToolChain::path_list &Paths) {
+ if (D.getVFS().exists(Path))
+ Paths.push_back(Path.str());
+}
+
+/// Solaris - Solaris tool chain which can call as(1) and ld(1) directly.
+
+Solaris::Solaris(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
+ : Generic_GCC(D, Triple, Args) {
+
+ GCCInstallation.init(Triple, Args);
+
+ path_list &Paths = getFilePaths();
+ if (GCCInstallation.isValid())
+ addPathIfExists(D, GCCInstallation.getInstallPath(), Paths);
+
+ addPathIfExists(D, getDriver().getInstalledDir(), Paths);
+ if (getDriver().getInstalledDir() != getDriver().Dir)
+ addPathIfExists(D, getDriver().Dir, Paths);
+
+ addPathIfExists(D, getDriver().SysRoot + getDriver().Dir + "/../lib", Paths);
+
+ std::string LibPath = "/usr/lib/";
+ switch (Triple.getArch()) {
+ case llvm::Triple::x86:
+ case llvm::Triple::sparc:
+ break;
+ case llvm::Triple::x86_64:
+ LibPath += "amd64/";
+ break;
+ case llvm::Triple::sparcv9:
+ LibPath += "sparcv9/";
+ break;
+ default:
+ llvm_unreachable("Unsupported architecture");
+ }
+
+ addPathIfExists(D, getDriver().SysRoot + LibPath, Paths);
+}
+
+Tool *Solaris::buildAssembler() const {
+ return new tools::solaris::Assembler(*this);
+}
+
+Tool *Solaris::buildLinker() const { return new tools::solaris::Linker(*this); }
+
+void Solaris::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx))
+ return;
+
+ // Include the support directory for things like xlocale and fudged system
+ // headers.
+ addSystemInclude(DriverArgs, CC1Args, "/usr/include/c++/v1/support/solaris");
+
+ if (GCCInstallation.isValid()) {
+ GCCVersion Version = GCCInstallation.getVersion();
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/usr/gcc/" +
+ Version.MajorStr + "." +
+ Version.MinorStr +
+ "/include/c++/" + Version.Text);
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/usr/gcc/" + Version.MajorStr +
+ "." + Version.MinorStr + "/include/c++/" +
+ Version.Text + "/" +
+ GCCInstallation.getTriple().str());
+ }
+}
+
+/// Distribution (very bare-bones at the moment).
+
+enum Distro {
+ // NB: Releases of a particular Linux distro should be kept together
+ // in this enum, because some tests are done by integer comparison against
+ // the first and last known member in the family, e.g. IsRedHat().
+ ArchLinux,
+ DebianLenny,
+ DebianSqueeze,
+ DebianWheezy,
+ DebianJessie,
+ DebianStretch,
+ Exherbo,
+ RHEL4,
+ RHEL5,
+ RHEL6,
+ RHEL7,
+ Fedora,
+ OpenSUSE,
+ UbuntuHardy,
+ UbuntuIntrepid,
+ UbuntuJaunty,
+ UbuntuKarmic,
+ UbuntuLucid,
+ UbuntuMaverick,
+ UbuntuNatty,
+ UbuntuOneiric,
+ UbuntuPrecise,
+ UbuntuQuantal,
+ UbuntuRaring,
+ UbuntuSaucy,
+ UbuntuTrusty,
+ UbuntuUtopic,
+ UbuntuVivid,
+ UbuntuWily,
+ UbuntuXenial,
+ UnknownDistro
+};
+
+static bool IsRedhat(enum Distro Distro) {
+ return Distro == Fedora || (Distro >= RHEL4 && Distro <= RHEL7);
+}
+
+static bool IsOpenSUSE(enum Distro Distro) { return Distro == OpenSUSE; }
+
+static bool IsDebian(enum Distro Distro) {
+ return Distro >= DebianLenny && Distro <= DebianStretch;
+}
+
+static bool IsUbuntu(enum Distro Distro) {
+ return Distro >= UbuntuHardy && Distro <= UbuntuXenial;
+}
+
+static Distro DetectDistro(const Driver &D, llvm::Triple::ArchType Arch) {
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> File =
+ llvm::MemoryBuffer::getFile("/etc/lsb-release");
+ if (File) {
+ StringRef Data = File.get()->getBuffer();
+ SmallVector<StringRef, 16> Lines;
+ Data.split(Lines, "\n");
+ Distro Version = UnknownDistro;
+ for (StringRef Line : Lines)
+ if (Version == UnknownDistro && Line.startswith("DISTRIB_CODENAME="))
+ Version = llvm::StringSwitch<Distro>(Line.substr(17))
+ .Case("hardy", UbuntuHardy)
+ .Case("intrepid", UbuntuIntrepid)
+ .Case("jaunty", UbuntuJaunty)
+ .Case("karmic", UbuntuKarmic)
+ .Case("lucid", UbuntuLucid)
+ .Case("maverick", UbuntuMaverick)
+ .Case("natty", UbuntuNatty)
+ .Case("oneiric", UbuntuOneiric)
+ .Case("precise", UbuntuPrecise)
+ .Case("quantal", UbuntuQuantal)
+ .Case("raring", UbuntuRaring)
+ .Case("saucy", UbuntuSaucy)
+ .Case("trusty", UbuntuTrusty)
+ .Case("utopic", UbuntuUtopic)
+ .Case("vivid", UbuntuVivid)
+ .Case("wily", UbuntuWily)
+ .Case("xenial", UbuntuXenial)
+ .Default(UnknownDistro);
+ return Version;
+ }
+
+ File = llvm::MemoryBuffer::getFile("/etc/redhat-release");
+ if (File) {
+ StringRef Data = File.get()->getBuffer();
+ if (Data.startswith("Fedora release"))
+ return Fedora;
+ if (Data.startswith("Red Hat Enterprise Linux") ||
+ Data.startswith("CentOS")) {
+ if (Data.find("release 7") != StringRef::npos)
+ return RHEL7;
+ else if (Data.find("release 6") != StringRef::npos)
+ return RHEL6;
+ else if (Data.find("release 5") != StringRef::npos)
+ return RHEL5;
+ else if (Data.find("release 4") != StringRef::npos)
+ return RHEL4;
+ }
+ return UnknownDistro;
+ }
+
+ File = llvm::MemoryBuffer::getFile("/etc/debian_version");
+ if (File) {
+ StringRef Data = File.get()->getBuffer();
+ if (Data[0] == '5')
+ return DebianLenny;
+ else if (Data.startswith("squeeze/sid") || Data[0] == '6')
+ return DebianSqueeze;
+ else if (Data.startswith("wheezy/sid") || Data[0] == '7')
+ return DebianWheezy;
+ else if (Data.startswith("jessie/sid") || Data[0] == '8')
+ return DebianJessie;
+ else if (Data.startswith("stretch/sid") || Data[0] == '9')
+ return DebianStretch;
+ return UnknownDistro;
+ }
+
+ if (D.getVFS().exists("/etc/SuSE-release"))
+ return OpenSUSE;
+
+ if (D.getVFS().exists("/etc/exherbo-release"))
+ return Exherbo;
+
+ if (D.getVFS().exists("/etc/arch-release"))
+ return ArchLinux;
+
+ return UnknownDistro;
+}
+
+/// \brief Get our best guess at the multiarch triple for a target.
+///
+/// Debian-based systems are starting to use a multiarch setup where they use
+/// a target-triple directory in the library and header search paths.
+/// Unfortunately, this triple does not align with the vanilla target triple,
+/// so we provide a rough mapping here.
+static std::string getMultiarchTriple(const Driver &D,
+ const llvm::Triple &TargetTriple,
+ StringRef SysRoot) {
+ llvm::Triple::EnvironmentType TargetEnvironment =
+ TargetTriple.getEnvironment();
+
+ // For most architectures, just use whatever we have rather than trying to be
+ // clever.
+ switch (TargetTriple.getArch()) {
+ default:
+ break;
+
+ // We use the existence of '/lib/<triple>' as a directory to detect some
+ // common linux triples that don't quite match the Clang triple for both
+ // 32-bit and 64-bit targets. Multiarch fixes its install triples to these
+ // regardless of what the actual target triple is.
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ if (TargetEnvironment == llvm::Triple::GNUEABIHF) {
+ if (D.getVFS().exists(SysRoot + "/lib/arm-linux-gnueabihf"))
+ return "arm-linux-gnueabihf";
+ } else {
+ if (D.getVFS().exists(SysRoot + "/lib/arm-linux-gnueabi"))
+ return "arm-linux-gnueabi";
+ }
+ break;
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumbeb:
+ if (TargetEnvironment == llvm::Triple::GNUEABIHF) {
+ if (D.getVFS().exists(SysRoot + "/lib/armeb-linux-gnueabihf"))
+ return "armeb-linux-gnueabihf";
+ } else {
+ if (D.getVFS().exists(SysRoot + "/lib/armeb-linux-gnueabi"))
+ return "armeb-linux-gnueabi";
+ }
+ break;
+ case llvm::Triple::x86:
+ if (D.getVFS().exists(SysRoot + "/lib/i386-linux-gnu"))
+ return "i386-linux-gnu";
+ break;
+ case llvm::Triple::x86_64:
+ // We don't want this for x32, otherwise it will match x86_64 libs
+ if (TargetEnvironment != llvm::Triple::GNUX32 &&
+ D.getVFS().exists(SysRoot + "/lib/x86_64-linux-gnu"))
+ return "x86_64-linux-gnu";
+ break;
+ case llvm::Triple::aarch64:
+ if (D.getVFS().exists(SysRoot + "/lib/aarch64-linux-gnu"))
+ return "aarch64-linux-gnu";
+ break;
+ case llvm::Triple::aarch64_be:
+ if (D.getVFS().exists(SysRoot + "/lib/aarch64_be-linux-gnu"))
+ return "aarch64_be-linux-gnu";
+ break;
+ case llvm::Triple::mips:
+ if (D.getVFS().exists(SysRoot + "/lib/mips-linux-gnu"))
+ return "mips-linux-gnu";
+ break;
+ case llvm::Triple::mipsel:
+ if (D.getVFS().exists(SysRoot + "/lib/mipsel-linux-gnu"))
+ return "mipsel-linux-gnu";
+ break;
+ case llvm::Triple::mips64:
+ if (D.getVFS().exists(SysRoot + "/lib/mips64-linux-gnu"))
+ return "mips64-linux-gnu";
+ if (D.getVFS().exists(SysRoot + "/lib/mips64-linux-gnuabi64"))
+ return "mips64-linux-gnuabi64";
+ break;
+ case llvm::Triple::mips64el:
+ if (D.getVFS().exists(SysRoot + "/lib/mips64el-linux-gnu"))
+ return "mips64el-linux-gnu";
+ if (D.getVFS().exists(SysRoot + "/lib/mips64el-linux-gnuabi64"))
+ return "mips64el-linux-gnuabi64";
+ break;
+ case llvm::Triple::ppc:
+ if (D.getVFS().exists(SysRoot + "/lib/powerpc-linux-gnuspe"))
+ return "powerpc-linux-gnuspe";
+ if (D.getVFS().exists(SysRoot + "/lib/powerpc-linux-gnu"))
+ return "powerpc-linux-gnu";
+ break;
+ case llvm::Triple::ppc64:
+ if (D.getVFS().exists(SysRoot + "/lib/powerpc64-linux-gnu"))
+ return "powerpc64-linux-gnu";
+ break;
+ case llvm::Triple::ppc64le:
+ if (D.getVFS().exists(SysRoot + "/lib/powerpc64le-linux-gnu"))
+ return "powerpc64le-linux-gnu";
+ break;
+ case llvm::Triple::sparc:
+ if (D.getVFS().exists(SysRoot + "/lib/sparc-linux-gnu"))
+ return "sparc-linux-gnu";
+ break;
+ case llvm::Triple::sparcv9:
+ if (D.getVFS().exists(SysRoot + "/lib/sparc64-linux-gnu"))
+ return "sparc64-linux-gnu";
+ break;
+ case llvm::Triple::systemz:
+ if (D.getVFS().exists(SysRoot + "/lib/s390x-linux-gnu"))
+ return "s390x-linux-gnu";
+ break;
+ }
+ return TargetTriple.str();
+}
+
+static StringRef getOSLibDir(const llvm::Triple &Triple, const ArgList &Args) {
+ if (isMipsArch(Triple.getArch())) {
+ // lib32 directory has a special meaning on MIPS targets.
+ // It contains N32 ABI binaries. Use this folder if produce
+ // code for N32 ABI only.
+ if (tools::mips::hasMipsAbiArg(Args, "n32"))
+ return "lib32";
+ return Triple.isArch32Bit() ? "lib" : "lib64";
+ }
+
+ // It happens that only x86 and PPC use the 'lib32' variant of oslibdir, and
+ // using that variant while targeting other architectures causes problems
+ // because the libraries are laid out in shared system roots that can't cope
+ // with a 'lib32' library search path being considered. So we only enable
+ // them when we know we may need it.
+ //
+ // FIXME: This is a bit of a hack. We should really unify this code for
+ // reasoning about oslibdir spellings with the lib dir spellings in the
+ // GCCInstallationDetector, but that is a more significant refactoring.
+ if (Triple.getArch() == llvm::Triple::x86 ||
+ Triple.getArch() == llvm::Triple::ppc)
+ return "lib32";
+
+ if (Triple.getArch() == llvm::Triple::x86_64 &&
+ Triple.getEnvironment() == llvm::Triple::GNUX32)
+ return "libx32";
+
+ return Triple.isArch32Bit() ? "lib" : "lib64";
+}
+
+Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
+ GCCInstallation.init(Triple, Args);
+ CudaInstallation.init(Triple, Args);
+ Multilibs = GCCInstallation.getMultilibs();
+ llvm::Triple::ArchType Arch = Triple.getArch();
+ std::string SysRoot = computeSysRoot();
+
+ // Cross-compiling binutils and GCC installations (vanilla and openSUSE at
+ // least) put various tools in a triple-prefixed directory off of the parent
+ // of the GCC installation. We use the GCC triple here to ensure that we end
+ // up with tools that support the same amount of cross compiling as the
+ // detected GCC installation. For example, if we find a GCC installation
+ // targeting x86_64, but it is a bi-arch GCC installation, it can also be
+ // used to target i386.
+ // FIXME: This seems unlikely to be Linux-specific.
+ ToolChain::path_list &PPaths = getProgramPaths();
+ PPaths.push_back(Twine(GCCInstallation.getParentLibPath() + "/../" +
+ GCCInstallation.getTriple().str() + "/bin")
+ .str());
+
+ Distro Distro = DetectDistro(D, Arch);
+
+ if (IsOpenSUSE(Distro) || IsUbuntu(Distro)) {
+ ExtraOpts.push_back("-z");
+ ExtraOpts.push_back("relro");
+ }
+
+ if (Arch == llvm::Triple::arm || Arch == llvm::Triple::thumb)
+ ExtraOpts.push_back("-X");
+
+ const bool IsAndroid = Triple.isAndroid();
+ const bool IsMips = isMipsArch(Arch);
+
+ if (IsMips && !SysRoot.empty())
+ ExtraOpts.push_back("--sysroot=" + SysRoot);
+
+ // Do not use 'gnu' hash style for Mips targets because .gnu.hash
+ // and the MIPS ABI require .dynsym to be sorted in different ways.
+ // .gnu.hash needs symbols to be grouped by hash code whereas the MIPS
+ // ABI requires a mapping between the GOT and the symbol table.
+ // Android loader does not support .gnu.hash.
+ if (!IsMips && !IsAndroid) {
+ if (IsRedhat(Distro) || IsOpenSUSE(Distro) ||
+ (IsUbuntu(Distro) && Distro >= UbuntuMaverick))
+ ExtraOpts.push_back("--hash-style=gnu");
+
+ if (IsDebian(Distro) || IsOpenSUSE(Distro) || Distro == UbuntuLucid ||
+ Distro == UbuntuJaunty || Distro == UbuntuKarmic)
+ ExtraOpts.push_back("--hash-style=both");
+ }
+
+ if (IsRedhat(Distro))
+ ExtraOpts.push_back("--no-add-needed");
+
+ if ((IsDebian(Distro) && Distro >= DebianSqueeze) || IsOpenSUSE(Distro) ||
+ (IsRedhat(Distro) && Distro != RHEL4 && Distro != RHEL5) ||
+ (IsUbuntu(Distro) && Distro >= UbuntuKarmic))
+ ExtraOpts.push_back("--build-id");
+
+ if (IsOpenSUSE(Distro))
+ ExtraOpts.push_back("--enable-new-dtags");
+
+ // The selection of paths to try here is designed to match the patterns which
+ // the GCC driver itself uses, as this is part of the GCC-compatible driver.
+ // This was determined by running GCC in a fake filesystem, creating all
+ // possible permutations of these directories, and seeing which ones it added
+ // to the link paths.
+ path_list &Paths = getFilePaths();
+
+ const std::string OSLibDir = getOSLibDir(Triple, Args);
+ const std::string MultiarchTriple = getMultiarchTriple(D, Triple, SysRoot);
+
+ // Add the multilib suffixed paths where they are available.
+ if (GCCInstallation.isValid()) {
+ const llvm::Triple &GCCTriple = GCCInstallation.getTriple();
+ const std::string &LibPath = GCCInstallation.getParentLibPath();
+ const Multilib &Multilib = GCCInstallation.getMultilib();
+
+ // Sourcery CodeBench MIPS toolchain holds some libraries under
+ // a biarch-like suffix of the GCC installation.
+ addPathIfExists(D, GCCInstallation.getInstallPath() + Multilib.gccSuffix(),
+ Paths);
+
+ // GCC cross compiling toolchains will install target libraries which ship
+ // as part of the toolchain under <prefix>/<triple>/<libdir> rather than as
+ // any part of the GCC installation in
+ // <prefix>/<libdir>/gcc/<triple>/<version>. This decision is somewhat
+ // debatable, but is the reality today. We need to search this tree even
+ // when we have a sysroot somewhere else. It is the responsibility of
+ // whomever is doing the cross build targeting a sysroot using a GCC
+ // installation that is *not* within the system root to ensure two things:
+ //
+ // 1) Any DSOs that are linked in from this tree or from the install path
+ // above must be present on the system root and found via an
+ // appropriate rpath.
+ // 2) There must not be libraries installed into
+ // <prefix>/<triple>/<libdir> unless they should be preferred over
+ // those within the system root.
+ //
+ // Note that this matches the GCC behavior. See the below comment for where
+ // Clang diverges from GCC's behavior.
+ addPathIfExists(D, LibPath + "/../" + GCCTriple.str() + "/lib/../" +
+ OSLibDir + Multilib.osSuffix(),
+ Paths);
+
+ // If the GCC installation we found is inside of the sysroot, we want to
+ // prefer libraries installed in the parent prefix of the GCC installation.
+ // It is important to *not* use these paths when the GCC installation is
+ // outside of the system root as that can pick up unintended libraries.
+ // This usually happens when there is an external cross compiler on the
+ // host system, and a more minimal sysroot available that is the target of
+ // the cross. Note that GCC does include some of these directories in some
+ // configurations but this seems somewhere between questionable and simply
+ // a bug.
+ if (StringRef(LibPath).startswith(SysRoot)) {
+ addPathIfExists(D, LibPath + "/" + MultiarchTriple, Paths);
+ addPathIfExists(D, LibPath + "/../" + OSLibDir, Paths);
+ }
+ }
+
+ // Similar to the logic for GCC above, if we currently running Clang inside
+ // of the requested system root, add its parent library paths to
+ // those searched.
+ // FIXME: It's not clear whether we should use the driver's installed
+ // directory ('Dir' below) or the ResourceDir.
+ if (StringRef(D.Dir).startswith(SysRoot)) {
+ addPathIfExists(D, D.Dir + "/../lib/" + MultiarchTriple, Paths);
+ addPathIfExists(D, D.Dir + "/../" + OSLibDir, Paths);
+ }
+
+ addPathIfExists(D, SysRoot + "/lib/" + MultiarchTriple, Paths);
+ addPathIfExists(D, SysRoot + "/lib/../" + OSLibDir, Paths);
+ addPathIfExists(D, SysRoot + "/usr/lib/" + MultiarchTriple, Paths);
+ addPathIfExists(D, SysRoot + "/usr/lib/../" + OSLibDir, Paths);
+
+ // Try walking via the GCC triple path in case of biarch or multiarch GCC
+ // installations with strange symlinks.
+ if (GCCInstallation.isValid()) {
+ addPathIfExists(D,
+ SysRoot + "/usr/lib/" + GCCInstallation.getTriple().str() +
+ "/../../" + OSLibDir,
+ Paths);
+
+ // Add the 'other' biarch variant path
+ Multilib BiarchSibling;
+ if (GCCInstallation.getBiarchSibling(BiarchSibling)) {
+ addPathIfExists(D, GCCInstallation.getInstallPath() +
+ BiarchSibling.gccSuffix(),
+ Paths);
+ }
+
+ // See comments above on the multilib variant for details of why this is
+ // included even from outside the sysroot.
+ const std::string &LibPath = GCCInstallation.getParentLibPath();
+ const llvm::Triple &GCCTriple = GCCInstallation.getTriple();
+ const Multilib &Multilib = GCCInstallation.getMultilib();
+ addPathIfExists(D, LibPath + "/../" + GCCTriple.str() + "/lib" +
+ Multilib.osSuffix(),
+ Paths);
+
+ // See comments above on the multilib variant for details of why this is
+ // only included from within the sysroot.
+ if (StringRef(LibPath).startswith(SysRoot))
+ addPathIfExists(D, LibPath, Paths);
+ }
+
+ // Similar to the logic for GCC above, if we are currently running Clang
+ // inside of the requested system root, add its parent library path to those
+ // searched.
+ // FIXME: It's not clear whether we should use the driver's installed
+ // directory ('Dir' below) or the ResourceDir.
+ if (StringRef(D.Dir).startswith(SysRoot))
+ addPathIfExists(D, D.Dir + "/../lib", Paths);
+
+ addPathIfExists(D, SysRoot + "/lib", Paths);
+ addPathIfExists(D, SysRoot + "/usr/lib", Paths);
+}
+
+bool Linux::HasNativeLLVMSupport() const { return true; }
+
+Tool *Linux::buildLinker() const { return new tools::gnutools::Linker(*this); }
+
+Tool *Linux::buildAssembler() const {
+ return new tools::gnutools::Assembler(*this);
+}
+
+std::string Linux::computeSysRoot() const {
+ if (!getDriver().SysRoot.empty())
+ return getDriver().SysRoot;
+
+ if (!GCCInstallation.isValid() || !isMipsArch(getTriple().getArch()))
+ return std::string();
+
+ // Standalone MIPS toolchains use different names for sysroot folder
+ // and put it into different places. Here we try to check some known
+ // variants.
+
+ const StringRef InstallDir = GCCInstallation.getInstallPath();
+ const StringRef TripleStr = GCCInstallation.getTriple().str();
+ const Multilib &Multilib = GCCInstallation.getMultilib();
+
+ std::string Path =
+ (InstallDir + "/../../../../" + TripleStr + "/libc" + Multilib.osSuffix())
+ .str();
+
+ if (getVFS().exists(Path))
+ return Path;
+
+ Path = (InstallDir + "/../../../../sysroot" + Multilib.osSuffix()).str();
+
+ if (getVFS().exists(Path))
+ return Path;
+
+ return std::string();
+}
+
+void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ const Driver &D = getDriver();
+ std::string SysRoot = computeSysRoot();
+
+ if (DriverArgs.hasArg(options::OPT_nostdinc))
+ return;
+
+ if (!DriverArgs.hasArg(options::OPT_nostdlibinc))
+ addSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/local/include");
+
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<128> P(D.ResourceDir);
+ llvm::sys::path::append(P, "include");
+ addSystemInclude(DriverArgs, CC1Args, P);
+ }
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ // Check for configure-time C include directories.
+ StringRef CIncludeDirs(C_INCLUDE_DIRS);
+ if (CIncludeDirs != "") {
+ SmallVector<StringRef, 5> dirs;
+ CIncludeDirs.split(dirs, ":");
+ for (StringRef dir : dirs) {
+ StringRef Prefix =
+ llvm::sys::path::is_absolute(dir) ? StringRef(SysRoot) : "";
+ addExternCSystemInclude(DriverArgs, CC1Args, Prefix + dir);
+ }
+ return;
+ }
+
+ // Lacking those, try to detect the correct set of system includes for the
+ // target triple.
+
+ // Add include directories specific to the selected multilib set and multilib.
+ if (GCCInstallation.isValid()) {
+ const auto &Callback = Multilibs.includeDirsCallback();
+ if (Callback) {
+ const auto IncludePaths = Callback(GCCInstallation.getInstallPath(),
+ GCCInstallation.getTriple().str(),
+ GCCInstallation.getMultilib());
+ for (const auto &Path : IncludePaths)
+ addExternCSystemIncludeIfExists(DriverArgs, CC1Args, Path);
+ }
+ }
+
+ // Implement generic Debian multiarch support.
+ const StringRef X86_64MultiarchIncludeDirs[] = {
+ "/usr/include/x86_64-linux-gnu",
+
+ // FIXME: These are older forms of multiarch. It's not clear that they're
+ // in use in any released version of Debian, so we should consider
+ // removing them.
+ "/usr/include/i686-linux-gnu/64", "/usr/include/i486-linux-gnu/64"};
+ const StringRef X86MultiarchIncludeDirs[] = {
+ "/usr/include/i386-linux-gnu",
+
+ // FIXME: These are older forms of multiarch. It's not clear that they're
+ // in use in any released version of Debian, so we should consider
+ // removing them.
+ "/usr/include/x86_64-linux-gnu/32", "/usr/include/i686-linux-gnu",
+ "/usr/include/i486-linux-gnu"};
+ const StringRef AArch64MultiarchIncludeDirs[] = {
+ "/usr/include/aarch64-linux-gnu"};
+ const StringRef ARMMultiarchIncludeDirs[] = {
+ "/usr/include/arm-linux-gnueabi"};
+ const StringRef ARMHFMultiarchIncludeDirs[] = {
+ "/usr/include/arm-linux-gnueabihf"};
+ const StringRef ARMEBMultiarchIncludeDirs[] = {
+ "/usr/include/armeb-linux-gnueabi"};
+ const StringRef ARMEBHFMultiarchIncludeDirs[] = {
+ "/usr/include/armeb-linux-gnueabihf"};
+ const StringRef MIPSMultiarchIncludeDirs[] = {"/usr/include/mips-linux-gnu"};
+ const StringRef MIPSELMultiarchIncludeDirs[] = {
+ "/usr/include/mipsel-linux-gnu"};
+ const StringRef MIPS64MultiarchIncludeDirs[] = {
+ "/usr/include/mips64-linux-gnu", "/usr/include/mips64-linux-gnuabi64"};
+ const StringRef MIPS64ELMultiarchIncludeDirs[] = {
+ "/usr/include/mips64el-linux-gnu",
+ "/usr/include/mips64el-linux-gnuabi64"};
+ const StringRef PPCMultiarchIncludeDirs[] = {
+ "/usr/include/powerpc-linux-gnu"};
+ const StringRef PPC64MultiarchIncludeDirs[] = {
+ "/usr/include/powerpc64-linux-gnu"};
+ const StringRef PPC64LEMultiarchIncludeDirs[] = {
+ "/usr/include/powerpc64le-linux-gnu"};
+ const StringRef SparcMultiarchIncludeDirs[] = {
+ "/usr/include/sparc-linux-gnu"};
+ const StringRef Sparc64MultiarchIncludeDirs[] = {
+ "/usr/include/sparc64-linux-gnu"};
+ const StringRef SYSTEMZMultiarchIncludeDirs[] = {
+ "/usr/include/s390x-linux-gnu"};
+ ArrayRef<StringRef> MultiarchIncludeDirs;
+ switch (getTriple().getArch()) {
+ case llvm::Triple::x86_64:
+ MultiarchIncludeDirs = X86_64MultiarchIncludeDirs;
+ break;
+ case llvm::Triple::x86:
+ MultiarchIncludeDirs = X86MultiarchIncludeDirs;
+ break;
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_be:
+ MultiarchIncludeDirs = AArch64MultiarchIncludeDirs;
+ break;
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ if (getTriple().getEnvironment() == llvm::Triple::GNUEABIHF)
+ MultiarchIncludeDirs = ARMHFMultiarchIncludeDirs;
+ else
+ MultiarchIncludeDirs = ARMMultiarchIncludeDirs;
+ break;
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumbeb:
+ if (getTriple().getEnvironment() == llvm::Triple::GNUEABIHF)
+ MultiarchIncludeDirs = ARMEBHFMultiarchIncludeDirs;
+ else
+ MultiarchIncludeDirs = ARMEBMultiarchIncludeDirs;
+ break;
+ case llvm::Triple::mips:
+ MultiarchIncludeDirs = MIPSMultiarchIncludeDirs;
+ break;
+ case llvm::Triple::mipsel:
+ MultiarchIncludeDirs = MIPSELMultiarchIncludeDirs;
+ break;
+ case llvm::Triple::mips64:
+ MultiarchIncludeDirs = MIPS64MultiarchIncludeDirs;
+ break;
+ case llvm::Triple::mips64el:
+ MultiarchIncludeDirs = MIPS64ELMultiarchIncludeDirs;
+ break;
+ case llvm::Triple::ppc:
+ MultiarchIncludeDirs = PPCMultiarchIncludeDirs;
+ break;
+ case llvm::Triple::ppc64:
+ MultiarchIncludeDirs = PPC64MultiarchIncludeDirs;
+ break;
+ case llvm::Triple::ppc64le:
+ MultiarchIncludeDirs = PPC64LEMultiarchIncludeDirs;
+ break;
+ case llvm::Triple::sparc:
+ MultiarchIncludeDirs = SparcMultiarchIncludeDirs;
+ break;
+ case llvm::Triple::sparcv9:
+ MultiarchIncludeDirs = Sparc64MultiarchIncludeDirs;
+ break;
+ case llvm::Triple::systemz:
+ MultiarchIncludeDirs = SYSTEMZMultiarchIncludeDirs;
+ break;
+ default:
+ break;
+ }
+ for (StringRef Dir : MultiarchIncludeDirs) {
+ if (D.getVFS().exists(SysRoot + Dir)) {
+ addExternCSystemInclude(DriverArgs, CC1Args, SysRoot + Dir);
+ break;
+ }
+ }
+
+ if (getTriple().getOS() == llvm::Triple::RTEMS)
+ return;
+
+ // Add an include of '/include' directly. This isn't provided by default by
+ // system GCCs, but is often used with cross-compiling GCCs, and harmless to
+ // add even when Clang is acting as-if it were a system compiler.
+ addExternCSystemInclude(DriverArgs, CC1Args, SysRoot + "/include");
+
+ addExternCSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/include");
+}
+
+
+static std::string DetectLibcxxIncludePath(StringRef base) {
+ std::error_code EC;
+ int MaxVersion = 0;
+ std::string MaxVersionString = "";
+ for (llvm::sys::fs::directory_iterator LI(base, EC), LE; !EC && LI != LE;
+ LI = LI.increment(EC)) {
+ StringRef VersionText = llvm::sys::path::filename(LI->path());
+ int Version;
+ if (VersionText[0] == 'v' &&
+ !VersionText.slice(1, StringRef::npos).getAsInteger(10, Version)) {
+ if (Version > MaxVersion) {
+ MaxVersion = Version;
+ MaxVersionString = VersionText;
+ }
+ }
+ }
+ return MaxVersion ? (base + "/" + MaxVersionString).str() : "";
+}
+
+void Linux::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx))
+ return;
+
+ // Check if libc++ has been enabled and provide its include paths if so.
+ if (GetCXXStdlibType(DriverArgs) == ToolChain::CST_Libcxx) {
+ const std::string LibCXXIncludePathCandidates[] = {
+ DetectLibcxxIncludePath(getDriver().Dir + "/../include/c++"),
+
+ // We also check the system as for a long time this is the only place
+ // Clang looked.
+ // FIXME: We should really remove this. It doesn't make any sense.
+ DetectLibcxxIncludePath(getDriver().SysRoot + "/usr/include/c++")};
+ for (const auto &IncludePath : LibCXXIncludePathCandidates) {
+ if (IncludePath.empty() || !getVFS().exists(IncludePath))
+ continue;
+ // Add the first candidate that exists.
+ addSystemInclude(DriverArgs, CC1Args, IncludePath);
+ break;
+ }
+ return;
+ }
+
+ // We need a detected GCC installation on Linux to provide libstdc++'s
+ // headers. We handled the libc++ case above.
+ if (!GCCInstallation.isValid())
+ return;
+
+ // By default, look for the C++ headers in an include directory adjacent to
+ // the lib directory of the GCC installation. Note that this is expect to be
+ // equivalent to '/usr/include/c++/X.Y' in almost all cases.
+ StringRef LibDir = GCCInstallation.getParentLibPath();
+ StringRef InstallDir = GCCInstallation.getInstallPath();
+ StringRef TripleStr = GCCInstallation.getTriple().str();
+ const Multilib &Multilib = GCCInstallation.getMultilib();
+ const std::string GCCMultiarchTriple = getMultiarchTriple(
+ getDriver(), GCCInstallation.getTriple(), getDriver().SysRoot);
+ const std::string TargetMultiarchTriple =
+ getMultiarchTriple(getDriver(), getTriple(), getDriver().SysRoot);
+ const GCCVersion &Version = GCCInstallation.getVersion();
+
+ // The primary search for libstdc++ supports multiarch variants.
+ if (addLibStdCXXIncludePaths(LibDir.str() + "/../include",
+ "/c++/" + Version.Text, TripleStr,
+ GCCMultiarchTriple, TargetMultiarchTriple,
+ Multilib.includeSuffix(), DriverArgs, CC1Args))
+ return;
+
+ // Otherwise, fall back on a bunch of options which don't use multiarch
+ // layouts for simplicity.
+ const std::string LibStdCXXIncludePathCandidates[] = {
+ // Gentoo is weird and places its headers inside the GCC install,
+ // so if the first attempt to find the headers fails, try these patterns.
+ InstallDir.str() + "/include/g++-v" + Version.MajorStr + "." +
+ Version.MinorStr,
+ InstallDir.str() + "/include/g++-v" + Version.MajorStr,
+ // Android standalone toolchain has C++ headers in yet another place.
+ LibDir.str() + "/../" + TripleStr.str() + "/include/c++/" + Version.Text,
+ // Freescale SDK C++ headers are directly in <sysroot>/usr/include/c++,
+ // without a subdirectory corresponding to the gcc version.
+ LibDir.str() + "/../include/c++",
+ };
+
+ for (const auto &IncludePath : LibStdCXXIncludePathCandidates) {
+ if (addLibStdCXXIncludePaths(IncludePath, /*Suffix*/ "", TripleStr,
+ /*GCCMultiarchTriple*/ "",
+ /*TargetMultiarchTriple*/ "",
+ Multilib.includeSuffix(), DriverArgs, CC1Args))
+ break;
+ }
+}
+
+void Linux::AddCudaIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nocudainc))
+ return;
+
+ if (CudaInstallation.isValid()) {
+ addSystemInclude(DriverArgs, CC1Args, CudaInstallation.getIncludePath());
+ CC1Args.push_back("-include");
+ CC1Args.push_back("__clang_cuda_runtime_wrapper.h");
+ }
+}
+
+bool Linux::isPIEDefault() const { return getSanitizerArgs().requiresPIE(); }
+
+SanitizerMask Linux::getSupportedSanitizers() const {
+ const bool IsX86 = getTriple().getArch() == llvm::Triple::x86;
+ const bool IsX86_64 = getTriple().getArch() == llvm::Triple::x86_64;
+ const bool IsMIPS64 = getTriple().getArch() == llvm::Triple::mips64 ||
+ getTriple().getArch() == llvm::Triple::mips64el;
+ const bool IsPowerPC64 = getTriple().getArch() == llvm::Triple::ppc64 ||
+ getTriple().getArch() == llvm::Triple::ppc64le;
+ const bool IsAArch64 = getTriple().getArch() == llvm::Triple::aarch64 ||
+ getTriple().getArch() == llvm::Triple::aarch64_be;
+ SanitizerMask Res = ToolChain::getSupportedSanitizers();
+ Res |= SanitizerKind::Address;
+ Res |= SanitizerKind::KernelAddress;
+ Res |= SanitizerKind::Vptr;
+ Res |= SanitizerKind::SafeStack;
+ if (IsX86_64 || IsMIPS64 || IsAArch64)
+ Res |= SanitizerKind::DataFlow;
+ if (IsX86_64 || IsMIPS64 || IsAArch64)
+ Res |= SanitizerKind::Leak;
+ if (IsX86_64 || IsMIPS64 || IsAArch64 || IsPowerPC64)
+ Res |= SanitizerKind::Thread;
+ if (IsX86_64 || IsMIPS64 || IsPowerPC64 || IsAArch64)
+ Res |= SanitizerKind::Memory;
+ if (IsX86 || IsX86_64) {
+ Res |= SanitizerKind::Function;
+ }
+ return Res;
+}
+
+void Linux::addProfileRTLibs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const {
+ if (!needsProfileRT(Args)) return;
+
+ // Add linker option -u__llvm_runtime_variable to cause runtime
+ // initialization module to be linked in.
+ if (!Args.hasArg(options::OPT_coverage))
+ CmdArgs.push_back(Args.MakeArgString(
+ Twine("-u", llvm::getInstrProfRuntimeHookVarName())));
+ ToolChain::addProfileRTLibs(Args, CmdArgs);
+}
+
+/// DragonFly - DragonFly tool chain which can call as(1) and ld(1) directly.
+
+DragonFly::DragonFly(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
+
+ // Path mangling to find libexec
+ getProgramPaths().push_back(getDriver().getInstalledDir());
+ if (getDriver().getInstalledDir() != getDriver().Dir)
+ getProgramPaths().push_back(getDriver().Dir);
+
+ getFilePaths().push_back(getDriver().Dir + "/../lib");
+ getFilePaths().push_back("/usr/lib");
+ getFilePaths().push_back("/usr/lib/gcc50");
+}
+
+Tool *DragonFly::buildAssembler() const {
+ return new tools::dragonfly::Assembler(*this);
+}
+
+Tool *DragonFly::buildLinker() const {
+ return new tools::dragonfly::Linker(*this);
+}
+
+/// Stub for CUDA toolchain. At the moment we don't have assembler or
+/// linker and need toolchain mainly to propagate device-side options
+/// to CC1.
+
+CudaToolChain::CudaToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
+ : Linux(D, Triple, Args) {}
+
+void
+CudaToolChain::addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ Linux::addClangTargetOptions(DriverArgs, CC1Args);
+ CC1Args.push_back("-fcuda-is-device");
+
+ if (DriverArgs.hasArg(options::OPT_nocudalib))
+ return;
+
+ std::string LibDeviceFile = CudaInstallation.getLibDeviceFile(
+ DriverArgs.getLastArgValue(options::OPT_march_EQ));
+ if (!LibDeviceFile.empty()) {
+ CC1Args.push_back("-mlink-cuda-bitcode");
+ CC1Args.push_back(DriverArgs.MakeArgString(LibDeviceFile));
+
+ // Libdevice in CUDA-7.0 requires PTX version that's more recent
+ // than LLVM defaults to. Use PTX4.2 which is the PTX version that
+ // came with CUDA-7.0.
+ CC1Args.push_back("-target-feature");
+ CC1Args.push_back("+ptx42");
+ }
+}
+
+llvm::opt::DerivedArgList *
+CudaToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
+ const char *BoundArch) const {
+ DerivedArgList *DAL = new DerivedArgList(Args.getBaseArgs());
+ const OptTable &Opts = getDriver().getOpts();
+
+ for (Arg *A : Args) {
+ if (A->getOption().matches(options::OPT_Xarch__)) {
+ // Skip this argument unless the architecture matches BoundArch
+ if (A->getValue(0) != StringRef(BoundArch))
+ continue;
+
+ unsigned Index = Args.getBaseArgs().MakeIndex(A->getValue(1));
+ unsigned Prev = Index;
+ std::unique_ptr<Arg> XarchArg(Opts.ParseOneArg(Args, Index));
+
+ // If the argument parsing failed or more than one argument was
+ // consumed, the -Xarch_ argument's parameter tried to consume
+ // extra arguments. Emit an error and ignore.
+ //
+ // We also want to disallow any options which would alter the
+ // driver behavior; that isn't going to work in our model. We
+ // use isDriverOption() as an approximation, although things
+ // like -O4 are going to slip through.
+ if (!XarchArg || Index > Prev + 1) {
+ getDriver().Diag(diag::err_drv_invalid_Xarch_argument_with_args)
+ << A->getAsString(Args);
+ continue;
+ } else if (XarchArg->getOption().hasFlag(options::DriverOption)) {
+ getDriver().Diag(diag::err_drv_invalid_Xarch_argument_isdriver)
+ << A->getAsString(Args);
+ continue;
+ }
+ XarchArg->setBaseArg(A);
+ A = XarchArg.release();
+ DAL->AddSynthesizedArg(A);
+ }
+ DAL->append(A);
+ }
+
+ DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_march_EQ), BoundArch);
+ return DAL;
+}
+
+/// XCore tool chain
+XCoreToolChain::XCoreToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
+ : ToolChain(D, Triple, Args) {
+ // ProgramPaths are found via 'PATH' environment variable.
+}
+
+Tool *XCoreToolChain::buildAssembler() const {
+ return new tools::XCore::Assembler(*this);
+}
+
+Tool *XCoreToolChain::buildLinker() const {
+ return new tools::XCore::Linker(*this);
+}
+
+bool XCoreToolChain::isPICDefault() const { return false; }
+
+bool XCoreToolChain::isPIEDefault() const { return false; }
+
+bool XCoreToolChain::isPICDefaultForced() const { return false; }
+
+bool XCoreToolChain::SupportsProfiling() const { return false; }
+
+bool XCoreToolChain::hasBlocksRuntime() const { return false; }
+
+void XCoreToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdinc) ||
+ DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+ if (const char *cl_include_dir = getenv("XCC_C_INCLUDE_PATH")) {
+ SmallVector<StringRef, 4> Dirs;
+ const char EnvPathSeparatorStr[] = {llvm::sys::EnvPathSeparator, '\0'};
+ StringRef(cl_include_dir).split(Dirs, StringRef(EnvPathSeparatorStr));
+ ArrayRef<StringRef> DirVec(Dirs);
+ addSystemIncludes(DriverArgs, CC1Args, DirVec);
+ }
+}
+
+void XCoreToolChain::addClangTargetOptions(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ CC1Args.push_back("-nostdsysteminc");
+}
+
+void XCoreToolChain::AddClangCXXStdlibIncludeArgs(
+ const ArgList &DriverArgs, ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdinc) ||
+ DriverArgs.hasArg(options::OPT_nostdlibinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx))
+ return;
+ if (const char *cl_include_dir = getenv("XCC_CPLUS_INCLUDE_PATH")) {
+ SmallVector<StringRef, 4> Dirs;
+ const char EnvPathSeparatorStr[] = {llvm::sys::EnvPathSeparator, '\0'};
+ StringRef(cl_include_dir).split(Dirs, StringRef(EnvPathSeparatorStr));
+ ArrayRef<StringRef> DirVec(Dirs);
+ addSystemIncludes(DriverArgs, CC1Args, DirVec);
+ }
+}
+
+void XCoreToolChain::AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ // We don't output any lib args. This is handled by xcc.
+}
+
+MyriadToolChain::MyriadToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
+ : Generic_GCC(D, Triple, Args) {
+ // If a target of 'sparc-myriad-elf' is specified to clang, it wants to use
+ // 'sparc-myriad--elf' (note the unknown OS) as the canonical triple.
+ // This won't work to find gcc. Instead we give the installation detector an
+ // extra triple, which is preferable to further hacks of the logic that at
+ // present is based solely on getArch(). In particular, it would be wrong to
+ // choose the myriad installation when targeting a non-myriad sparc install.
+ switch (Triple.getArch()) {
+ default:
+ D.Diag(diag::err_target_unsupported_arch) << Triple.getArchName()
+ << "myriad";
+ case llvm::Triple::sparc:
+ case llvm::Triple::sparcel:
+ case llvm::Triple::shave:
+ GCCInstallation.init(Triple, Args, {"sparc-myriad-elf"});
+ }
+
+ if (GCCInstallation.isValid()) {
+ // The contents of LibDir are independent of the version of gcc.
+ // This contains libc, libg (a superset of libc), libm, libstdc++, libssp.
+ SmallString<128> LibDir(GCCInstallation.getParentLibPath());
+ if (Triple.getArch() == llvm::Triple::sparcel)
+ llvm::sys::path::append(LibDir, "../sparc-myriad-elf/lib/le");
+ else
+ llvm::sys::path::append(LibDir, "../sparc-myriad-elf/lib");
+ addPathIfExists(D, LibDir, getFilePaths());
+
+ // This directory contains crt{i,n,begin,end}.o as well as libgcc.
+ // These files are tied to a particular version of gcc.
+ SmallString<128> CompilerSupportDir(GCCInstallation.getInstallPath());
+ // There are actually 4 choices: {le,be} x {fpu,nofpu}
+ // but as this toolchain is for LEON sparc, it can assume FPU.
+ if (Triple.getArch() == llvm::Triple::sparcel)
+ llvm::sys::path::append(CompilerSupportDir, "le");
+ addPathIfExists(D, CompilerSupportDir, getFilePaths());
+ }
+}
+
+MyriadToolChain::~MyriadToolChain() {}
+
+void MyriadToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (!DriverArgs.hasArg(options::OPT_nostdinc))
+ addSystemInclude(DriverArgs, CC1Args, getDriver().SysRoot + "/include");
+}
+
+void MyriadToolChain::AddClangCXXStdlibIncludeArgs(
+ const ArgList &DriverArgs, ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx))
+ return;
+
+ // Only libstdc++, for now.
+ StringRef LibDir = GCCInstallation.getParentLibPath();
+ const GCCVersion &Version = GCCInstallation.getVersion();
+ StringRef TripleStr = GCCInstallation.getTriple().str();
+ const Multilib &Multilib = GCCInstallation.getMultilib();
+
+ addLibStdCXXIncludePaths(
+ LibDir.str() + "/../" + TripleStr.str() + "/include/c++/" + Version.Text,
+ "", TripleStr, "", "", Multilib.includeSuffix(), DriverArgs, CC1Args);
+}
+
+// MyriadToolChain handles several triples:
+// {shave,sparc{,el}}-myriad-{rtems,unknown}-elf
+Tool *MyriadToolChain::SelectTool(const JobAction &JA) const {
+ // The inherited method works fine if not targeting the SHAVE.
+ if (!isShaveCompilation(getTriple()))
+ return ToolChain::SelectTool(JA);
+ switch (JA.getKind()) {
+ case Action::PreprocessJobClass:
+ case Action::CompileJobClass:
+ if (!Compiler)
+ Compiler.reset(new tools::SHAVE::Compiler(*this));
+ return Compiler.get();
+ case Action::AssembleJobClass:
+ if (!Assembler)
+ Assembler.reset(new tools::SHAVE::Assembler(*this));
+ return Assembler.get();
+ default:
+ return ToolChain::getTool(JA.getKind());
+ }
+}
+
+Tool *MyriadToolChain::buildLinker() const {
+ return new tools::Myriad::Linker(*this);
+}
+
+WebAssembly::WebAssembly(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args)
+ : ToolChain(D, Triple, Args) {
+ // Use LLD by default.
+ DefaultLinker = "lld";
+}
+
+bool WebAssembly::IsMathErrnoDefault() const { return false; }
+
+bool WebAssembly::IsObjCNonFragileABIDefault() const { return true; }
+
+bool WebAssembly::UseObjCMixedDispatch() const { return true; }
+
+bool WebAssembly::isPICDefault() const { return false; }
+
+bool WebAssembly::isPIEDefault() const { return false; }
+
+bool WebAssembly::isPICDefaultForced() const { return false; }
+
+bool WebAssembly::IsIntegratedAssemblerDefault() const { return true; }
+
+// TODO: Support Objective C stuff.
+bool WebAssembly::SupportsObjCGC() const { return false; }
+
+bool WebAssembly::hasBlocksRuntime() const { return false; }
+
+// TODO: Support profiling.
+bool WebAssembly::SupportsProfiling() const { return false; }
+
+bool WebAssembly::HasNativeLLVMSupport() const { return true; }
+
+void WebAssembly::addClangTargetOptions(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasFlag(options::OPT_fuse_init_array,
+ options::OPT_fno_use_init_array, true))
+ CC1Args.push_back("-fuse-init-array");
+}
+
+Tool *WebAssembly::buildLinker() const {
+ return new tools::wasm::Linker(*this);
+}
+
+PS4CPU::PS4CPU(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
+ if (Args.hasArg(options::OPT_static))
+ D.Diag(diag::err_drv_unsupported_opt_for_target) << "-static" << "PS4";
+
+ // Determine where to find the PS4 libraries. We use SCE_PS4_SDK_DIR
+ // if it exists; otherwise use the driver's installation path, which
+ // should be <SDK_DIR>/host_tools/bin.
+
+ SmallString<512> PS4SDKDir;
+ if (const char *EnvValue = getenv("SCE_PS4_SDK_DIR")) {
+ if (!llvm::sys::fs::exists(EnvValue))
+ getDriver().Diag(clang::diag::warn_drv_ps4_sdk_dir) << EnvValue;
+ PS4SDKDir = EnvValue;
+ } else {
+ PS4SDKDir = getDriver().Dir;
+ llvm::sys::path::append(PS4SDKDir, "/../../");
+ }
+
+ // By default, the driver won't report a warning if it can't find
+ // PS4's include or lib directories. This behavior could be changed if
+ // -Weverything or -Winvalid-or-nonexistent-directory options are passed.
+ // If -isysroot was passed, use that as the SDK base path.
+ std::string PrefixDir;
+ if (const Arg *A = Args.getLastArg(options::OPT_isysroot)) {
+ PrefixDir = A->getValue();
+ if (!llvm::sys::fs::exists(PrefixDir))
+ getDriver().Diag(clang::diag::warn_missing_sysroot) << PrefixDir;
+ } else
+ PrefixDir = PS4SDKDir.str();
+
+ SmallString<512> PS4SDKIncludeDir(PrefixDir);
+ llvm::sys::path::append(PS4SDKIncludeDir, "target/include");
+ if (!Args.hasArg(options::OPT_nostdinc) &&
+ !Args.hasArg(options::OPT_nostdlibinc) &&
+ !Args.hasArg(options::OPT_isysroot) &&
+ !Args.hasArg(options::OPT__sysroot_EQ) &&
+ !llvm::sys::fs::exists(PS4SDKIncludeDir)) {
+ getDriver().Diag(clang::diag::warn_drv_unable_to_find_directory_expected)
+ << "PS4 system headers" << PS4SDKIncludeDir;
+ }
+
+ SmallString<512> PS4SDKLibDir(PS4SDKDir);
+ llvm::sys::path::append(PS4SDKLibDir, "target/lib");
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nodefaultlibs) &&
+ !Args.hasArg(options::OPT__sysroot_EQ) && !Args.hasArg(options::OPT_E) &&
+ !Args.hasArg(options::OPT_c) && !Args.hasArg(options::OPT_S) &&
+ !Args.hasArg(options::OPT_emit_ast) &&
+ !llvm::sys::fs::exists(PS4SDKLibDir)) {
+ getDriver().Diag(clang::diag::warn_drv_unable_to_find_directory_expected)
+ << "PS4 system libraries" << PS4SDKLibDir;
+ return;
+ }
+ getFilePaths().push_back(PS4SDKLibDir.str());
+}
+
+Tool *PS4CPU::buildAssembler() const {
+ return new tools::PS4cpu::Assemble(*this);
+}
+
+Tool *PS4CPU::buildLinker() const { return new tools::PS4cpu::Link(*this); }
+
+bool PS4CPU::isPICDefault() const { return true; }
+
+bool PS4CPU::HasNativeLLVMSupport() const { return true; }
+
+SanitizerMask PS4CPU::getSupportedSanitizers() const {
+ SanitizerMask Res = ToolChain::getSupportedSanitizers();
+ Res |= SanitizerKind::Address;
+ Res |= SanitizerKind::Vptr;
+ return Res;
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChains.h b/contrib/llvm/tools/clang/lib/Driver/ToolChains.h
new file mode 100644
index 0000000..f4b6b15
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/ToolChains.h
@@ -0,0 +1,1146 @@
+//===--- ToolChains.h - ToolChain Implementations ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_H
+
+#include "Tools.h"
+#include "clang/Basic/VersionTuple.h"
+#include "clang/Driver/Action.h"
+#include "clang/Driver/Multilib.h"
+#include "clang/Driver/ToolChain.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/Support/Compiler.h"
+#include <set>
+#include <vector>
+
+namespace clang {
+namespace driver {
+namespace toolchains {
+
+/// Generic_GCC - A tool chain using the 'gcc' command to perform
+/// all subcommands; this relies on gcc translating the majority of
+/// command line options.
+class LLVM_LIBRARY_VISIBILITY Generic_GCC : public ToolChain {
+public:
+ /// \brief Struct to store and manipulate GCC versions.
+ ///
+ /// We rely on assumptions about the form and structure of GCC version
+ /// numbers: they consist of at most three '.'-separated components, and each
+ /// component is a non-negative integer except for the last component. For
+ /// the last component we are very flexible in order to tolerate release
+ /// candidates or 'x' wildcards.
+ ///
+ /// Note that the ordering established among GCCVersions is based on the
+ /// preferred version string to use. For example we prefer versions without
+ /// a hard-coded patch number to those with a hard coded patch number.
+ ///
+ /// Currently this doesn't provide any logic for textual suffixes to patches
+ /// in the way that (for example) Debian's version format does. If that ever
+ /// becomes necessary, it can be added.
+ struct GCCVersion {
+ /// \brief The unparsed text of the version.
+ std::string Text;
+
+ /// \brief The parsed major, minor, and patch numbers.
+ int Major, Minor, Patch;
+
+ /// \brief The text of the parsed major, and major+minor versions.
+ std::string MajorStr, MinorStr;
+
+ /// \brief Any textual suffix on the patch number.
+ std::string PatchSuffix;
+
+ static GCCVersion Parse(StringRef VersionText);
+ bool isOlderThan(int RHSMajor, int RHSMinor, int RHSPatch,
+ StringRef RHSPatchSuffix = StringRef()) const;
+ bool operator<(const GCCVersion &RHS) const {
+ return isOlderThan(RHS.Major, RHS.Minor, RHS.Patch, RHS.PatchSuffix);
+ }
+ bool operator>(const GCCVersion &RHS) const { return RHS < *this; }
+ bool operator<=(const GCCVersion &RHS) const { return !(*this > RHS); }
+ bool operator>=(const GCCVersion &RHS) const { return !(*this < RHS); }
+ };
+
+ /// \brief This is a class to find a viable GCC installation for Clang to
+ /// use.
+ ///
+ /// This class tries to find a GCC installation on the system, and report
+ /// information about it. It starts from the host information provided to the
+ /// Driver, and has logic for fuzzing that where appropriate.
+ class GCCInstallationDetector {
+ bool IsValid;
+ llvm::Triple GCCTriple;
+ const Driver &D;
+
+ // FIXME: These might be better as path objects.
+ std::string GCCInstallPath;
+ std::string GCCParentLibPath;
+
+ /// The primary multilib appropriate for the given flags.
+ Multilib SelectedMultilib;
+ /// On Biarch systems, this corresponds to the default multilib when
+ /// targeting the non-default multilib. Otherwise, it is empty.
+ llvm::Optional<Multilib> BiarchSibling;
+
+ GCCVersion Version;
+
+ // We retain the list of install paths that were considered and rejected in
+ // order to print out detailed information in verbose mode.
+ std::set<std::string> CandidateGCCInstallPaths;
+
+ /// The set of multilibs that the detected installation supports.
+ MultilibSet Multilibs;
+
+ public:
+ explicit GCCInstallationDetector(const Driver &D) : IsValid(false), D(D) {}
+ void init(const llvm::Triple &TargetTriple, const llvm::opt::ArgList &Args,
+ ArrayRef<std::string> ExtraTripleAliases = None);
+
+ /// \brief Check whether we detected a valid GCC install.
+ bool isValid() const { return IsValid; }
+
+ /// \brief Get the GCC triple for the detected install.
+ const llvm::Triple &getTriple() const { return GCCTriple; }
+
+ /// \brief Get the detected GCC installation path.
+ StringRef getInstallPath() const { return GCCInstallPath; }
+
+ /// \brief Get the detected GCC parent lib path.
+ StringRef getParentLibPath() const { return GCCParentLibPath; }
+
+ /// \brief Get the detected Multilib
+ const Multilib &getMultilib() const { return SelectedMultilib; }
+
+ /// \brief Get the whole MultilibSet
+ const MultilibSet &getMultilibs() const { return Multilibs; }
+
+ /// Get the biarch sibling multilib (if it exists).
+ /// \return true iff such a sibling exists
+ bool getBiarchSibling(Multilib &M) const;
+
+ /// \brief Get the detected GCC version string.
+ const GCCVersion &getVersion() const { return Version; }
+
+ /// \brief Print information about the detected GCC installation.
+ void print(raw_ostream &OS) const;
+
+ private:
+ static void
+ CollectLibDirsAndTriples(const llvm::Triple &TargetTriple,
+ const llvm::Triple &BiarchTriple,
+ SmallVectorImpl<StringRef> &LibDirs,
+ SmallVectorImpl<StringRef> &TripleAliases,
+ SmallVectorImpl<StringRef> &BiarchLibDirs,
+ SmallVectorImpl<StringRef> &BiarchTripleAliases);
+
+ void ScanLibDirForGCCTriple(const llvm::Triple &TargetArch,
+ const llvm::opt::ArgList &Args,
+ const std::string &LibDir,
+ StringRef CandidateTriple,
+ bool NeedsBiarchSuffix = false);
+
+ void scanLibDirForGCCTripleSolaris(const llvm::Triple &TargetArch,
+ const llvm::opt::ArgList &Args,
+ const std::string &LibDir,
+ StringRef CandidateTriple,
+ bool NeedsBiarchSuffix = false);
+ };
+
+protected:
+ GCCInstallationDetector GCCInstallation;
+
+ // \brief A class to find a viable CUDA installation
+
+ class CudaInstallationDetector {
+ bool IsValid;
+ const Driver &D;
+ std::string CudaInstallPath;
+ std::string CudaLibPath;
+ std::string CudaLibDevicePath;
+ std::string CudaIncludePath;
+ llvm::StringMap<std::string> CudaLibDeviceMap;
+
+ public:
+ CudaInstallationDetector(const Driver &D) : IsValid(false), D(D) {}
+ void init(const llvm::Triple &TargetTriple, const llvm::opt::ArgList &Args);
+
+ /// \brief Check whether we detected a valid Cuda install.
+ bool isValid() const { return IsValid; }
+ /// \brief Print information about the detected CUDA installation.
+ void print(raw_ostream &OS) const;
+
+ /// \brief Get the detected Cuda installation path.
+ StringRef getInstallPath() const { return CudaInstallPath; }
+ /// \brief Get the detected Cuda Include path.
+ StringRef getIncludePath() const { return CudaIncludePath; }
+ /// \brief Get the detected Cuda library path.
+ StringRef getLibPath() const { return CudaLibPath; }
+ /// \brief Get the detected Cuda device library path.
+ StringRef getLibDevicePath() const { return CudaLibDevicePath; }
+ /// \brief Get libdevice file for given architecture
+ std::string getLibDeviceFile(StringRef Gpu) const {
+ return CudaLibDeviceMap.lookup(Gpu);
+ }
+ };
+
+ CudaInstallationDetector CudaInstallation;
+
+public:
+ Generic_GCC(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+ ~Generic_GCC() override;
+
+ void printVerboseInfo(raw_ostream &OS) const override;
+
+ bool IsUnwindTablesDefault() const override;
+ bool isPICDefault() const override;
+ bool isPIEDefault() const override;
+ bool isPICDefaultForced() const override;
+ bool IsIntegratedAssemblerDefault() const override;
+
+protected:
+ Tool *getTool(Action::ActionClass AC) const override;
+ Tool *buildAssembler() const override;
+ Tool *buildLinker() const override;
+
+ /// \name ToolChain Implementation Helper Functions
+ /// @{
+
+ /// \brief Check whether the target triple's architecture is 64-bits.
+ bool isTarget64Bit() const { return getTriple().isArch64Bit(); }
+
+ /// \brief Check whether the target triple's architecture is 32-bits.
+ bool isTarget32Bit() const { return getTriple().isArch32Bit(); }
+
+ bool addLibStdCXXIncludePaths(Twine Base, Twine Suffix, StringRef GCCTriple,
+ StringRef GCCMultiarchTriple,
+ StringRef TargetMultiarchTriple,
+ Twine IncludeSuffix,
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const;
+
+ /// @}
+
+private:
+ mutable std::unique_ptr<tools::gcc::Preprocessor> Preprocess;
+ mutable std::unique_ptr<tools::gcc::Compiler> Compile;
+};
+
+class LLVM_LIBRARY_VISIBILITY MachO : public ToolChain {
+protected:
+ Tool *buildAssembler() const override;
+ Tool *buildLinker() const override;
+ Tool *getTool(Action::ActionClass AC) const override;
+
+private:
+ mutable std::unique_ptr<tools::darwin::Lipo> Lipo;
+ mutable std::unique_ptr<tools::darwin::Dsymutil> Dsymutil;
+ mutable std::unique_ptr<tools::darwin::VerifyDebug> VerifyDebug;
+
+public:
+ MachO(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+ ~MachO() override;
+
+ /// @name MachO specific toolchain API
+ /// {
+
+ /// Get the "MachO" arch name for a particular compiler invocation. For
+ /// example, Apple treats different ARM variations as distinct architectures.
+ StringRef getMachOArchName(const llvm::opt::ArgList &Args) const;
+
+ /// Add the linker arguments to link the ARC runtime library.
+ virtual void AddLinkARCArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const {}
+
+ /// Add the linker arguments to link the compiler runtime library.
+ virtual void AddLinkRuntimeLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+
+ virtual void addStartObjectFileArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const {
+ }
+
+ virtual void addMinVersionArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const {}
+
+ /// On some iOS platforms, kernel and kernel modules were built statically. Is
+ /// this such a target?
+ virtual bool isKernelStatic() const { return false; }
+
+ /// Is the target either iOS or an iOS simulator?
+ bool isTargetIOSBased() const { return false; }
+
+ void AddLinkRuntimeLib(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs,
+ StringRef DarwinLibName, bool AlwaysLink = false,
+ bool IsEmbedded = false, bool AddRPath = false) const;
+
+ /// Add any profiling runtime libraries that are needed. This is essentially a
+ /// MachO specific version of addProfileRT in Tools.cpp.
+ void addProfileRTLibs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override {
+ // There aren't any profiling libs for embedded targets currently.
+ }
+
+ /// }
+ /// @name ToolChain Implementation
+ /// {
+
+ std::string ComputeEffectiveClangTriple(const llvm::opt::ArgList &Args,
+ types::ID InputType) const override;
+
+ types::ID LookupTypeForExtension(const char *Ext) const override;
+
+ bool HasNativeLLVMSupport() const override;
+
+ llvm::opt::DerivedArgList *
+ TranslateArgs(const llvm::opt::DerivedArgList &Args,
+ const char *BoundArch) const override;
+
+ bool IsBlocksDefault() const override {
+ // Always allow blocks on Apple; users interested in versioning are
+ // expected to use /usr/include/Block.h.
+ return true;
+ }
+ bool IsIntegratedAssemblerDefault() const override {
+ // Default integrated assembler to on for Apple's MachO targets.
+ return true;
+ }
+
+ bool IsMathErrnoDefault() const override { return false; }
+
+ bool IsEncodeExtendedBlockSignatureDefault() const override { return true; }
+
+ bool IsObjCNonFragileABIDefault() const override {
+ // Non-fragile ABI is default for everything but i386.
+ return getTriple().getArch() != llvm::Triple::x86;
+ }
+
+ bool UseObjCMixedDispatch() const override { return true; }
+
+ bool IsUnwindTablesDefault() const override;
+
+ RuntimeLibType GetDefaultRuntimeLibType() const override {
+ return ToolChain::RLT_CompilerRT;
+ }
+
+ bool isPICDefault() const override;
+ bool isPIEDefault() const override;
+ bool isPICDefaultForced() const override;
+
+ bool SupportsProfiling() const override;
+
+ bool SupportsObjCGC() const override { return false; }
+
+ bool UseDwarfDebugFlags() const override;
+
+ bool UseSjLjExceptions(const llvm::opt::ArgList &Args) const override {
+ return false;
+ }
+
+ /// }
+};
+
+/// Darwin - The base Darwin tool chain.
+class LLVM_LIBRARY_VISIBILITY Darwin : public MachO {
+public:
+ /// Whether the information on the target has been initialized.
+ //
+ // FIXME: This should be eliminated. What we want to do is make this part of
+ // the "default target for arguments" selection process, once we get out of
+ // the argument translation business.
+ mutable bool TargetInitialized;
+
+ enum DarwinPlatformKind {
+ MacOS,
+ IPhoneOS,
+ IPhoneOSSimulator,
+ TvOS,
+ TvOSSimulator,
+ WatchOS,
+ WatchOSSimulator
+ };
+
+ mutable DarwinPlatformKind TargetPlatform;
+
+ /// The OS version we are targeting.
+ mutable VersionTuple TargetVersion;
+
+private:
+ void AddDeploymentTarget(llvm::opt::DerivedArgList &Args) const;
+
+public:
+ Darwin(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+ ~Darwin() override;
+
+ std::string ComputeEffectiveClangTriple(const llvm::opt::ArgList &Args,
+ types::ID InputType) const override;
+
+ /// @name Apple Specific Toolchain Implementation
+ /// {
+
+ void addMinVersionArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+
+ void addStartObjectFileArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+
+ bool isKernelStatic() const override {
+ return (!(isTargetIPhoneOS() && !isIPhoneOSVersionLT(6, 0)) &&
+ !isTargetWatchOS());
+ }
+
+ void addProfileRTLibs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+
+protected:
+ /// }
+ /// @name Darwin specific Toolchain functions
+ /// {
+
+ // FIXME: Eliminate these ...Target functions and derive separate tool chains
+ // for these targets and put version in constructor.
+ void setTarget(DarwinPlatformKind Platform, unsigned Major, unsigned Minor,
+ unsigned Micro) const {
+ // FIXME: For now, allow reinitialization as long as values don't
+ // change. This will go away when we move away from argument translation.
+ if (TargetInitialized && TargetPlatform == Platform &&
+ TargetVersion == VersionTuple(Major, Minor, Micro))
+ return;
+
+ assert(!TargetInitialized && "Target already initialized!");
+ TargetInitialized = true;
+ TargetPlatform = Platform;
+ TargetVersion = VersionTuple(Major, Minor, Micro);
+ }
+
+ bool isTargetIPhoneOS() const {
+ assert(TargetInitialized && "Target not initialized!");
+ return TargetPlatform == IPhoneOS || TargetPlatform == TvOS;
+ }
+
+ bool isTargetIOSSimulator() const {
+ assert(TargetInitialized && "Target not initialized!");
+ return TargetPlatform == IPhoneOSSimulator ||
+ TargetPlatform == TvOSSimulator;
+ }
+
+ bool isTargetIOSBased() const {
+ assert(TargetInitialized && "Target not initialized!");
+ return isTargetIPhoneOS() || isTargetIOSSimulator();
+ }
+
+ bool isTargetTvOS() const {
+ assert(TargetInitialized && "Target not initialized!");
+ return TargetPlatform == TvOS;
+ }
+
+ bool isTargetTvOSSimulator() const {
+ assert(TargetInitialized && "Target not initialized!");
+ return TargetPlatform == TvOSSimulator;
+ }
+
+ bool isTargetTvOSBased() const {
+ assert(TargetInitialized && "Target not initialized!");
+ return TargetPlatform == TvOS || TargetPlatform == TvOSSimulator;
+ }
+
+ bool isTargetWatchOS() const {
+ assert(TargetInitialized && "Target not initialized!");
+ return TargetPlatform == WatchOS;
+ }
+
+ bool isTargetWatchOSSimulator() const {
+ assert(TargetInitialized && "Target not initialized!");
+ return TargetPlatform == WatchOSSimulator;
+ }
+
+ bool isTargetWatchOSBased() const {
+ assert(TargetInitialized && "Target not initialized!");
+ return TargetPlatform == WatchOS || TargetPlatform == WatchOSSimulator;
+ }
+
+ bool isTargetMacOS() const {
+ assert(TargetInitialized && "Target not initialized!");
+ return TargetPlatform == MacOS;
+ }
+
+ bool isTargetInitialized() const { return TargetInitialized; }
+
+ VersionTuple getTargetVersion() const {
+ assert(TargetInitialized && "Target not initialized!");
+ return TargetVersion;
+ }
+
+ bool isIPhoneOSVersionLT(unsigned V0, unsigned V1 = 0,
+ unsigned V2 = 0) const {
+ assert(isTargetIOSBased() && "Unexpected call for non iOS target!");
+ return TargetVersion < VersionTuple(V0, V1, V2);
+ }
+
+ bool isMacosxVersionLT(unsigned V0, unsigned V1 = 0, unsigned V2 = 0) const {
+ assert(isTargetMacOS() && "Unexpected call for non OS X target!");
+ return TargetVersion < VersionTuple(V0, V1, V2);
+ }
+
+public:
+ /// }
+ /// @name ToolChain Implementation
+ /// {
+
+ // Darwin tools support multiple architecture (e.g., i386 and x86_64) and
+ // most development is done against SDKs, so compiling for a different
+ // architecture should not get any special treatment.
+ bool isCrossCompiling() const override { return false; }
+
+ llvm::opt::DerivedArgList *
+ TranslateArgs(const llvm::opt::DerivedArgList &Args,
+ const char *BoundArch) const override;
+
+ ObjCRuntime getDefaultObjCRuntime(bool isNonFragile) const override;
+ bool hasBlocksRuntime() const override;
+
+ bool UseObjCMixedDispatch() const override {
+ // This is only used with the non-fragile ABI and non-legacy dispatch.
+
+ // Mixed dispatch is used everywhere except OS X before 10.6.
+ return !(isTargetMacOS() && isMacosxVersionLT(10, 6));
+ }
+
+ unsigned GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
+ // Stack protectors default to on for user code on 10.5,
+ // and for everything in 10.6 and beyond
+ if (isTargetIOSBased() || isTargetWatchOSBased())
+ return 1;
+ else if (isTargetMacOS() && !isMacosxVersionLT(10, 6))
+ return 1;
+ else if (isTargetMacOS() && !isMacosxVersionLT(10, 5) && !KernelOrKext)
+ return 1;
+
+ return 0;
+ }
+
+ bool SupportsObjCGC() const override;
+
+ void CheckObjCARC() const override;
+
+ bool UseSjLjExceptions(const llvm::opt::ArgList &Args) const override;
+
+ SanitizerMask getSupportedSanitizers() const override;
+};
+
+/// DarwinClang - The Darwin toolchain used by Clang.
+class LLVM_LIBRARY_VISIBILITY DarwinClang : public Darwin {
+public:
+ DarwinClang(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+ /// @name Apple ToolChain Implementation
+ /// {
+
+ void AddLinkRuntimeLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+
+ void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+
+ void AddCCKextLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+
+ void addClangWarningOptions(llvm::opt::ArgStringList &CC1Args) const override;
+
+ void AddLinkARCArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+
+ unsigned GetDefaultDwarfVersion() const override { return 2; }
+ // Until dtrace (via CTF) and LLDB can deal with distributed debug info,
+ // Darwin defaults to standalone/full debug info.
+ bool GetDefaultStandaloneDebug() const override { return true; }
+ llvm::DebuggerKind getDefaultDebuggerTuning() const override {
+ return llvm::DebuggerKind::LLDB;
+ }
+
+ /// }
+
+private:
+ void AddLinkSanitizerLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs,
+ StringRef Sanitizer) const;
+};
+
+class LLVM_LIBRARY_VISIBILITY Generic_ELF : public Generic_GCC {
+ virtual void anchor();
+
+public:
+ Generic_ELF(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args)
+ : Generic_GCC(D, Triple, Args) {}
+
+ void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY CloudABI : public Generic_ELF {
+public:
+ CloudABI(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+ bool HasNativeLLVMSupport() const override { return true; }
+
+ bool IsMathErrnoDefault() const override { return false; }
+ bool IsObjCNonFragileABIDefault() const override { return true; }
+
+ CXXStdlibType
+ GetCXXStdlibType(const llvm::opt::ArgList &Args) const override {
+ return ToolChain::CST_Libcxx;
+ }
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+
+ bool isPIEDefault() const override { return false; }
+
+protected:
+ Tool *buildLinker() const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Solaris : public Generic_GCC {
+public:
+ Solaris(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+ bool IsIntegratedAssemblerDefault() const override { return true; }
+
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
+ unsigned GetDefaultDwarfVersion() const override { return 2; }
+
+protected:
+ Tool *buildAssembler() const override;
+ Tool *buildLinker() const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY MinGW : public ToolChain {
+public:
+ MinGW(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+ bool IsIntegratedAssemblerDefault() const override;
+ bool IsUnwindTablesDefault() const override;
+ bool isPICDefault() const override;
+ bool isPIEDefault() const override;
+ bool isPICDefaultForced() const override;
+ bool UseSEHExceptions() const;
+
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
+protected:
+ Tool *getTool(Action::ActionClass AC) const override;
+ Tool *buildLinker() const override;
+ Tool *buildAssembler() const override;
+
+private:
+ std::string Base;
+ std::string GccLibDir;
+ std::string Ver;
+ std::string Arch;
+ mutable std::unique_ptr<tools::gcc::Preprocessor> Preprocessor;
+ mutable std::unique_ptr<tools::gcc::Compiler> Compiler;
+ void findGccLibDir();
+};
+
+class LLVM_LIBRARY_VISIBILITY OpenBSD : public Generic_ELF {
+public:
+ OpenBSD(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+ bool IsMathErrnoDefault() const override { return false; }
+ bool IsObjCNonFragileABIDefault() const override { return true; }
+ bool isPIEDefault() const override { return true; }
+
+ unsigned GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
+ return 2;
+ }
+ unsigned GetDefaultDwarfVersion() const override { return 2; }
+
+protected:
+ Tool *buildAssembler() const override;
+ Tool *buildLinker() const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Bitrig : public Generic_ELF {
+public:
+ Bitrig(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+ bool IsMathErrnoDefault() const override { return false; }
+ bool IsObjCNonFragileABIDefault() const override { return true; }
+
+ CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+ unsigned GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
+ return 1;
+ }
+
+protected:
+ Tool *buildAssembler() const override;
+ Tool *buildLinker() const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY FreeBSD : public Generic_ELF {
+public:
+ FreeBSD(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+ bool HasNativeLLVMSupport() const override;
+
+ bool IsMathErrnoDefault() const override { return false; }
+ bool IsObjCNonFragileABIDefault() const override { return true; }
+
+ CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
+ bool UseSjLjExceptions(const llvm::opt::ArgList &Args) const override;
+ bool isPIEDefault() const override;
+ SanitizerMask getSupportedSanitizers() const override;
+ unsigned GetDefaultDwarfVersion() const override { return 2; }
+ // Until dtrace (via CTF) and LLDB can deal with distributed debug info,
+ // FreeBSD defaults to standalone/full debug info.
+ bool GetDefaultStandaloneDebug() const override { return true; }
+ llvm::DebuggerKind getDefaultDebuggerTuning() const override {
+ return llvm::DebuggerKind::LLDB;
+ }
+
+protected:
+ Tool *buildAssembler() const override;
+ Tool *buildLinker() const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY NetBSD : public Generic_ELF {
+public:
+ NetBSD(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+ bool IsMathErrnoDefault() const override { return false; }
+ bool IsObjCNonFragileABIDefault() const override { return true; }
+
+ CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
+
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ bool IsUnwindTablesDefault() const override { return true; }
+
+protected:
+ Tool *buildAssembler() const override;
+ Tool *buildLinker() const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Minix : public Generic_ELF {
+public:
+ Minix(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+protected:
+ Tool *buildAssembler() const override;
+ Tool *buildLinker() const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY DragonFly : public Generic_ELF {
+public:
+ DragonFly(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+ bool IsMathErrnoDefault() const override { return false; }
+
+protected:
+ Tool *buildAssembler() const override;
+ Tool *buildLinker() const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Linux : public Generic_ELF {
+public:
+ Linux(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+ bool HasNativeLLVMSupport() const override;
+
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddCudaIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ bool isPIEDefault() const override;
+ SanitizerMask getSupportedSanitizers() const override;
+ void addProfileRTLibs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+ virtual std::string computeSysRoot() const;
+
+ std::vector<std::string> ExtraOpts;
+
+protected:
+ Tool *buildAssembler() const override;
+ Tool *buildLinker() const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY CudaToolChain : public Linux {
+public:
+ CudaToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+ llvm::opt::DerivedArgList *
+ TranslateArgs(const llvm::opt::DerivedArgList &Args,
+ const char *BoundArch) const override;
+ void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY MipsLLVMToolChain : public Linux {
+protected:
+ Tool *buildLinker() const override;
+
+public:
+ MipsLLVMToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
+ CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
+
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
+ void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+
+ std::string getCompilerRT(const llvm::opt::ArgList &Args, StringRef Component,
+ bool Shared = false) const override;
+
+ std::string computeSysRoot() const override;
+
+ RuntimeLibType GetDefaultRuntimeLibType() const override {
+ return GCCInstallation.isValid() ? RuntimeLibType::RLT_Libgcc
+ : RuntimeLibType::RLT_CompilerRT;
+ }
+
+private:
+ Multilib SelectedMultilib;
+ std::string LibSuffix;
+};
+
+class LLVM_LIBRARY_VISIBILITY HexagonToolChain : public Linux {
+protected:
+ GCCVersion GCCLibAndIncVersion;
+ Tool *buildAssembler() const override;
+ Tool *buildLinker() const override;
+
+public:
+ HexagonToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+ ~HexagonToolChain() override;
+
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
+
+ StringRef GetGCCLibAndIncVersion() const { return GCCLibAndIncVersion.Text; }
+ bool IsIntegratedAssemblerDefault() const override {
+ return true;
+ }
+
+ std::string getHexagonTargetDir(
+ const std::string &InstalledDir,
+ const SmallVectorImpl<std::string> &PrefixDirs) const;
+ void getHexagonLibraryPaths(const llvm::opt::ArgList &Args,
+ ToolChain::path_list &LibPaths) const;
+
+ static const StringRef GetDefaultCPU();
+ static const StringRef GetTargetCPUVersion(const llvm::opt::ArgList &Args);
+
+ static Optional<unsigned> getSmallDataThreshold(
+ const llvm::opt::ArgList &Args);
+};
+
+class LLVM_LIBRARY_VISIBILITY AMDGPUToolChain : public Generic_ELF {
+protected:
+ Tool *buildLinker() const override;
+
+public:
+ AMDGPUToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+ bool IsIntegratedAssemblerDefault() const override { return true; }
+};
+
+class LLVM_LIBRARY_VISIBILITY NaClToolChain : public Generic_ELF {
+public:
+ NaClToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
+ CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
+
+ void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+
+ bool IsIntegratedAssemblerDefault() const override {
+ return getTriple().getArch() == llvm::Triple::mipsel;
+ }
+
+ // Get the path to the file containing NaCl's ARM macros.
+ // It lives in NaClToolChain because the ARMAssembler tool needs a
+ // const char * that it can pass around,
+ const char *GetNaClArmMacrosPath() const { return NaClArmMacrosPath.c_str(); }
+
+ std::string ComputeEffectiveClangTriple(const llvm::opt::ArgList &Args,
+ types::ID InputType) const override;
+
+protected:
+ Tool *buildLinker() const override;
+ Tool *buildAssembler() const override;
+
+private:
+ std::string NaClArmMacrosPath;
+};
+
+/// TCEToolChain - A tool chain using the llvm bitcode tools to perform
+/// all subcommands. See http://tce.cs.tut.fi for our peculiar target.
+class LLVM_LIBRARY_VISIBILITY TCEToolChain : public ToolChain {
+public:
+ TCEToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+ ~TCEToolChain() override;
+
+ bool IsMathErrnoDefault() const override;
+ bool isPICDefault() const override;
+ bool isPIEDefault() const override;
+ bool isPICDefaultForced() const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY MSVCToolChain : public ToolChain {
+public:
+ MSVCToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+ llvm::opt::DerivedArgList *
+ TranslateArgs(const llvm::opt::DerivedArgList &Args,
+ const char *BoundArch) const override;
+
+ bool IsIntegratedAssemblerDefault() const override;
+ bool IsUnwindTablesDefault() const override;
+ bool isPICDefault() const override;
+ bool isPIEDefault() const override;
+ bool isPICDefaultForced() const override;
+
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
+ bool getWindowsSDKDir(std::string &path, int &major,
+ std::string &windowsSDKIncludeVersion,
+ std::string &windowsSDKLibVersion) const;
+ bool getWindowsSDKLibraryPath(std::string &path) const;
+ /// \brief Check if Universal CRT should be used if available
+ bool useUniversalCRT(std::string &visualStudioDir) const;
+ bool getUniversalCRTSdkDir(std::string &path, std::string &ucrtVersion) const;
+ bool getUniversalCRTLibraryPath(std::string &path) const;
+ bool getVisualStudioInstallDir(std::string &path) const;
+ bool getVisualStudioBinariesFolder(const char *clangProgramPath,
+ std::string &path) const;
+
+ std::string ComputeEffectiveClangTriple(const llvm::opt::ArgList &Args,
+ types::ID InputType) const override;
+ SanitizerMask getSupportedSanitizers() const override;
+
+protected:
+ void AddSystemIncludeWithSubfolder(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ const std::string &folder,
+ const Twine &subfolder1,
+ const Twine &subfolder2 = "",
+ const Twine &subfolder3 = "") const;
+
+ Tool *buildLinker() const override;
+ Tool *buildAssembler() const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY CrossWindowsToolChain : public Generic_GCC {
+public:
+ CrossWindowsToolChain(const Driver &D, const llvm::Triple &T,
+ const llvm::opt::ArgList &Args);
+
+ bool IsIntegratedAssemblerDefault() const override { return true; }
+ bool IsUnwindTablesDefault() const override;
+ bool isPICDefault() const override;
+ bool isPIEDefault() const override;
+ bool isPICDefaultForced() const override;
+
+ unsigned int GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
+ return 0;
+ }
+
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+
+ SanitizerMask getSupportedSanitizers() const override;
+
+protected:
+ Tool *buildLinker() const override;
+ Tool *buildAssembler() const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY XCoreToolChain : public ToolChain {
+public:
+ XCoreToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+protected:
+ Tool *buildAssembler() const override;
+ Tool *buildLinker() const override;
+
+public:
+ bool isPICDefault() const override;
+ bool isPIEDefault() const override;
+ bool isPICDefaultForced() const override;
+ bool SupportsProfiling() const override;
+ bool hasBlocksRuntime() const override;
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+};
+
+/// MyriadToolChain - A tool chain using either clang or the external compiler
+/// installed by the Movidius SDK to perform all subcommands.
+class LLVM_LIBRARY_VISIBILITY MyriadToolChain : public Generic_GCC {
+public:
+ MyriadToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+ ~MyriadToolChain() override;
+
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ Tool *SelectTool(const JobAction &JA) const override;
+ unsigned GetDefaultDwarfVersion() const override { return 2; }
+
+protected:
+ Tool *buildLinker() const override;
+ bool isShaveCompilation(const llvm::Triple &T) const {
+ return T.getArch() == llvm::Triple::shave;
+ }
+
+private:
+ mutable std::unique_ptr<Tool> Compiler;
+ mutable std::unique_ptr<Tool> Assembler;
+};
+
+class LLVM_LIBRARY_VISIBILITY WebAssembly final : public ToolChain {
+public:
+ WebAssembly(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+private:
+ bool IsMathErrnoDefault() const override;
+ bool IsObjCNonFragileABIDefault() const override;
+ bool UseObjCMixedDispatch() const override;
+ bool isPICDefault() const override;
+ bool isPIEDefault() const override;
+ bool isPICDefaultForced() const override;
+ bool IsIntegratedAssemblerDefault() const override;
+ bool hasBlocksRuntime() const override;
+ bool SupportsObjCGC() const override;
+ bool SupportsProfiling() const override;
+ bool HasNativeLLVMSupport() const override;
+ void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
+ Tool *buildLinker() const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY PS4CPU : public Generic_ELF {
+public:
+ PS4CPU(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+ bool IsMathErrnoDefault() const override { return false; }
+ bool IsObjCNonFragileABIDefault() const override { return true; }
+ bool HasNativeLLVMSupport() const override;
+ bool isPICDefault() const override;
+
+ unsigned GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
+ return 2; // SSPStrong
+ }
+
+ llvm::DebuggerKind getDefaultDebuggerTuning() const override {
+ return llvm::DebuggerKind::SCE;
+ }
+
+ SanitizerMask getSupportedSanitizers() const override;
+
+protected:
+ Tool *buildAssembler() const override;
+ Tool *buildLinker() const override;
+};
+
+} // end namespace toolchains
+} // end namespace driver
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_H
diff --git a/contrib/llvm/tools/clang/lib/Driver/Tools.cpp b/contrib/llvm/tools/clang/lib/Driver/Tools.cpp
new file mode 100644
index 0000000..8468105
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/Tools.cpp
@@ -0,0 +1,10580 @@
+//===--- Tools.cpp - Tools Implementations ----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Tools.h"
+#include "InputInfo.h"
+#include "ToolChains.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/ObjCRuntime.h"
+#include "clang/Basic/Version.h"
+#include "clang/Config/config.h"
+#include "clang/Driver/Action.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Job.h"
+#include "clang/Driver/Options.h"
+#include "clang/Driver/SanitizerArgs.h"
+#include "clang/Driver/ToolChain.h"
+#include "clang/Driver/Util.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Option/Arg.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Option/Option.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/Compression.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Support/Program.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/TargetParser.h"
+
+#ifdef LLVM_ON_UNIX
+#include <unistd.h> // For getuid().
+#endif
+
+using namespace clang::driver;
+using namespace clang::driver::tools;
+using namespace clang;
+using namespace llvm::opt;
+
+static void handleTargetFeaturesGroup(const ArgList &Args,
+ std::vector<const char *> &Features,
+ OptSpecifier Group) {
+ for (const Arg *A : Args.filtered(Group)) {
+ StringRef Name = A->getOption().getName();
+ A->claim();
+
+ // Skip over "-m".
+ assert(Name.startswith("m") && "Invalid feature name.");
+ Name = Name.substr(1);
+
+ bool IsNegative = Name.startswith("no-");
+ if (IsNegative)
+ Name = Name.substr(3);
+ Features.push_back(Args.MakeArgString((IsNegative ? "-" : "+") + Name));
+ }
+}
+
+static const char *getSparcAsmModeForCPU(StringRef Name,
+ const llvm::Triple &Triple) {
+ if (Triple.getArch() == llvm::Triple::sparcv9) {
+ return llvm::StringSwitch<const char *>(Name)
+ .Case("niagara", "-Av9b")
+ .Case("niagara2", "-Av9b")
+ .Case("niagara3", "-Av9d")
+ .Case("niagara4", "-Av9d")
+ .Default("-Av9");
+ } else {
+ return llvm::StringSwitch<const char *>(Name)
+ .Case("v8", "-Av8")
+ .Case("supersparc", "-Av8")
+ .Case("sparclite", "-Asparclite")
+ .Case("f934", "-Asparclite")
+ .Case("hypersparc", "-Av8")
+ .Case("sparclite86x", "-Asparclite")
+ .Case("sparclet", "-Asparclet")
+ .Case("tsc701", "-Asparclet")
+ .Case("v9", "-Av8plus")
+ .Case("ultrasparc", "-Av8plus")
+ .Case("ultrasparc3", "-Av8plus")
+ .Case("niagara", "-Av8plusb")
+ .Case("niagara2", "-Av8plusb")
+ .Case("niagara3", "-Av8plusd")
+ .Case("niagara4", "-Av8plusd")
+ .Default("-Av8");
+ }
+}
+
+/// CheckPreprocessingOptions - Perform some validation of preprocessing
+/// arguments that is shared with gcc.
+static void CheckPreprocessingOptions(const Driver &D, const ArgList &Args) {
+ if (Arg *A = Args.getLastArg(options::OPT_C, options::OPT_CC)) {
+ if (!Args.hasArg(options::OPT_E) && !Args.hasArg(options::OPT__SLASH_P) &&
+ !Args.hasArg(options::OPT__SLASH_EP) && !D.CCCIsCPP()) {
+ D.Diag(diag::err_drv_argument_only_allowed_with)
+ << A->getBaseArg().getAsString(Args)
+ << (D.IsCLMode() ? "/E, /P or /EP" : "-E");
+ }
+ }
+}
+
+/// CheckCodeGenerationOptions - Perform some validation of code generation
+/// arguments that is shared with gcc.
+static void CheckCodeGenerationOptions(const Driver &D, const ArgList &Args) {
+ // In gcc, only ARM checks this, but it seems reasonable to check universally.
+ if (Args.hasArg(options::OPT_static))
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_dynamic, options::OPT_mdynamic_no_pic))
+ D.Diag(diag::err_drv_argument_not_allowed_with) << A->getAsString(Args)
+ << "-static";
+}
+
+// Add backslashes to escape spaces and other backslashes.
+// This is used for the space-separated argument list specified with
+// the -dwarf-debug-flags option.
+static void EscapeSpacesAndBackslashes(const char *Arg,
+ SmallVectorImpl<char> &Res) {
+ for (; *Arg; ++Arg) {
+ switch (*Arg) {
+ default:
+ break;
+ case ' ':
+ case '\\':
+ Res.push_back('\\');
+ break;
+ }
+ Res.push_back(*Arg);
+ }
+}
+
+// Quote target names for inclusion in GNU Make dependency files.
+// Only the characters '$', '#', ' ', '\t' are quoted.
+static void QuoteTarget(StringRef Target, SmallVectorImpl<char> &Res) {
+ for (unsigned i = 0, e = Target.size(); i != e; ++i) {
+ switch (Target[i]) {
+ case ' ':
+ case '\t':
+ // Escape the preceding backslashes
+ for (int j = i - 1; j >= 0 && Target[j] == '\\'; --j)
+ Res.push_back('\\');
+
+ // Escape the space/tab
+ Res.push_back('\\');
+ break;
+ case '$':
+ Res.push_back('$');
+ break;
+ case '#':
+ Res.push_back('\\');
+ break;
+ default:
+ break;
+ }
+
+ Res.push_back(Target[i]);
+ }
+}
+
+static void addDirectoryList(const ArgList &Args, ArgStringList &CmdArgs,
+ const char *ArgName, const char *EnvVar) {
+ const char *DirList = ::getenv(EnvVar);
+ bool CombinedArg = false;
+
+ if (!DirList)
+ return; // Nothing to do.
+
+ StringRef Name(ArgName);
+ if (Name.equals("-I") || Name.equals("-L"))
+ CombinedArg = true;
+
+ StringRef Dirs(DirList);
+ if (Dirs.empty()) // Empty string should not add '.'.
+ return;
+
+ StringRef::size_type Delim;
+ while ((Delim = Dirs.find(llvm::sys::EnvPathSeparator)) != StringRef::npos) {
+ if (Delim == 0) { // Leading colon.
+ if (CombinedArg) {
+ CmdArgs.push_back(Args.MakeArgString(std::string(ArgName) + "."));
+ } else {
+ CmdArgs.push_back(ArgName);
+ CmdArgs.push_back(".");
+ }
+ } else {
+ if (CombinedArg) {
+ CmdArgs.push_back(
+ Args.MakeArgString(std::string(ArgName) + Dirs.substr(0, Delim)));
+ } else {
+ CmdArgs.push_back(ArgName);
+ CmdArgs.push_back(Args.MakeArgString(Dirs.substr(0, Delim)));
+ }
+ }
+ Dirs = Dirs.substr(Delim + 1);
+ }
+
+ if (Dirs.empty()) { // Trailing colon.
+ if (CombinedArg) {
+ CmdArgs.push_back(Args.MakeArgString(std::string(ArgName) + "."));
+ } else {
+ CmdArgs.push_back(ArgName);
+ CmdArgs.push_back(".");
+ }
+ } else { // Add the last path.
+ if (CombinedArg) {
+ CmdArgs.push_back(Args.MakeArgString(std::string(ArgName) + Dirs));
+ } else {
+ CmdArgs.push_back(ArgName);
+ CmdArgs.push_back(Args.MakeArgString(Dirs));
+ }
+ }
+}
+
+static void AddLinkerInputs(const ToolChain &TC, const InputInfoList &Inputs,
+ const ArgList &Args, ArgStringList &CmdArgs) {
+ const Driver &D = TC.getDriver();
+
+ // Add extra linker input arguments which are not treated as inputs
+ // (constructed via -Xarch_).
+ Args.AddAllArgValues(CmdArgs, options::OPT_Zlinker_input);
+
+ for (const auto &II : Inputs) {
+ if (!TC.HasNativeLLVMSupport() && types::isLLVMIR(II.getType()))
+ // Don't try to pass LLVM inputs unless we have native support.
+ D.Diag(diag::err_drv_no_linker_llvm_support) << TC.getTripleString();
+
+ // Add filenames immediately.
+ if (II.isFilename()) {
+ CmdArgs.push_back(II.getFilename());
+ continue;
+ }
+
+ // Otherwise, this is a linker input argument.
+ const Arg &A = II.getInputArg();
+
+ // Handle reserved library options.
+ if (A.getOption().matches(options::OPT_Z_reserved_lib_stdcxx))
+ TC.AddCXXStdlibLibArgs(Args, CmdArgs);
+ else if (A.getOption().matches(options::OPT_Z_reserved_lib_cckext))
+ TC.AddCCKextLibArgs(Args, CmdArgs);
+ else if (A.getOption().matches(options::OPT_z)) {
+ // Pass -z prefix for gcc linker compatibility.
+ A.claim();
+ A.render(Args, CmdArgs);
+ } else {
+ A.renderAsInput(Args, CmdArgs);
+ }
+ }
+
+ // LIBRARY_PATH - included following the user specified library paths.
+ // and only supported on native toolchains.
+ if (!TC.isCrossCompiling())
+ addDirectoryList(Args, CmdArgs, "-L", "LIBRARY_PATH");
+}
+
+/// \brief Determine whether Objective-C automated reference counting is
+/// enabled.
+static bool isObjCAutoRefCount(const ArgList &Args) {
+ return Args.hasFlag(options::OPT_fobjc_arc, options::OPT_fno_objc_arc, false);
+}
+
+/// \brief Determine whether we are linking the ObjC runtime.
+static bool isObjCRuntimeLinked(const ArgList &Args) {
+ if (isObjCAutoRefCount(Args)) {
+ Args.ClaimAllArgs(options::OPT_fobjc_link_runtime);
+ return true;
+ }
+ return Args.hasArg(options::OPT_fobjc_link_runtime);
+}
+
+static bool forwardToGCC(const Option &O) {
+ // Don't forward inputs from the original command line. They are added from
+ // InputInfoList.
+ return O.getKind() != Option::InputClass &&
+ !O.hasFlag(options::DriverOption) && !O.hasFlag(options::LinkerInput);
+}
+
+void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
+ const Driver &D, const ArgList &Args,
+ ArgStringList &CmdArgs,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ToolChain *AuxToolChain) const {
+ Arg *A;
+
+ CheckPreprocessingOptions(D, Args);
+
+ Args.AddLastArg(CmdArgs, options::OPT_C);
+ Args.AddLastArg(CmdArgs, options::OPT_CC);
+
+ // Handle dependency file generation.
+ if ((A = Args.getLastArg(options::OPT_M, options::OPT_MM)) ||
+ (A = Args.getLastArg(options::OPT_MD)) ||
+ (A = Args.getLastArg(options::OPT_MMD))) {
+ // Determine the output location.
+ const char *DepFile;
+ if (Arg *MF = Args.getLastArg(options::OPT_MF)) {
+ DepFile = MF->getValue();
+ C.addFailureResultFile(DepFile, &JA);
+ } else if (Output.getType() == types::TY_Dependencies) {
+ DepFile = Output.getFilename();
+ } else if (A->getOption().matches(options::OPT_M) ||
+ A->getOption().matches(options::OPT_MM)) {
+ DepFile = "-";
+ } else {
+ DepFile = getDependencyFileName(Args, Inputs);
+ C.addFailureResultFile(DepFile, &JA);
+ }
+ CmdArgs.push_back("-dependency-file");
+ CmdArgs.push_back(DepFile);
+
+ // Add a default target if one wasn't specified.
+ if (!Args.hasArg(options::OPT_MT) && !Args.hasArg(options::OPT_MQ)) {
+ const char *DepTarget;
+
+ // If user provided -o, that is the dependency target, except
+ // when we are only generating a dependency file.
+ Arg *OutputOpt = Args.getLastArg(options::OPT_o);
+ if (OutputOpt && Output.getType() != types::TY_Dependencies) {
+ DepTarget = OutputOpt->getValue();
+ } else {
+ // Otherwise derive from the base input.
+ //
+ // FIXME: This should use the computed output file location.
+ SmallString<128> P(Inputs[0].getBaseInput());
+ llvm::sys::path::replace_extension(P, "o");
+ DepTarget = Args.MakeArgString(llvm::sys::path::filename(P));
+ }
+
+ CmdArgs.push_back("-MT");
+ SmallString<128> Quoted;
+ QuoteTarget(DepTarget, Quoted);
+ CmdArgs.push_back(Args.MakeArgString(Quoted));
+ }
+
+ if (A->getOption().matches(options::OPT_M) ||
+ A->getOption().matches(options::OPT_MD))
+ CmdArgs.push_back("-sys-header-deps");
+ if ((isa<PrecompileJobAction>(JA) &&
+ !Args.hasArg(options::OPT_fno_module_file_deps)) ||
+ Args.hasArg(options::OPT_fmodule_file_deps))
+ CmdArgs.push_back("-module-file-deps");
+ }
+
+ if (Args.hasArg(options::OPT_MG)) {
+ if (!A || A->getOption().matches(options::OPT_MD) ||
+ A->getOption().matches(options::OPT_MMD))
+ D.Diag(diag::err_drv_mg_requires_m_or_mm);
+ CmdArgs.push_back("-MG");
+ }
+
+ Args.AddLastArg(CmdArgs, options::OPT_MP);
+ Args.AddLastArg(CmdArgs, options::OPT_MV);
+
+ // Convert all -MQ <target> args to -MT <quoted target>
+ for (const Arg *A : Args.filtered(options::OPT_MT, options::OPT_MQ)) {
+ A->claim();
+
+ if (A->getOption().matches(options::OPT_MQ)) {
+ CmdArgs.push_back("-MT");
+ SmallString<128> Quoted;
+ QuoteTarget(A->getValue(), Quoted);
+ CmdArgs.push_back(Args.MakeArgString(Quoted));
+
+ // -MT flag - no change
+ } else {
+ A->render(Args, CmdArgs);
+ }
+ }
+
+ // Add -i* options, and automatically translate to
+ // -include-pch/-include-pth for transparent PCH support. It's
+ // wonky, but we include looking for .gch so we can support seamless
+ // replacement into a build system already set up to be generating
+ // .gch files.
+ bool RenderedImplicitInclude = false;
+ for (const Arg *A : Args.filtered(options::OPT_clang_i_Group)) {
+ if (A->getOption().matches(options::OPT_include)) {
+ bool IsFirstImplicitInclude = !RenderedImplicitInclude;
+ RenderedImplicitInclude = true;
+
+ // Use PCH if the user requested it.
+ bool UsePCH = D.CCCUsePCH;
+
+ bool FoundPTH = false;
+ bool FoundPCH = false;
+ SmallString<128> P(A->getValue());
+ // We want the files to have a name like foo.h.pch. Add a dummy extension
+ // so that replace_extension does the right thing.
+ P += ".dummy";
+ if (UsePCH) {
+ llvm::sys::path::replace_extension(P, "pch");
+ if (llvm::sys::fs::exists(P))
+ FoundPCH = true;
+ }
+
+ if (!FoundPCH) {
+ llvm::sys::path::replace_extension(P, "pth");
+ if (llvm::sys::fs::exists(P))
+ FoundPTH = true;
+ }
+
+ if (!FoundPCH && !FoundPTH) {
+ llvm::sys::path::replace_extension(P, "gch");
+ if (llvm::sys::fs::exists(P)) {
+ FoundPCH = UsePCH;
+ FoundPTH = !UsePCH;
+ }
+ }
+
+ if (FoundPCH || FoundPTH) {
+ if (IsFirstImplicitInclude) {
+ A->claim();
+ if (UsePCH)
+ CmdArgs.push_back("-include-pch");
+ else
+ CmdArgs.push_back("-include-pth");
+ CmdArgs.push_back(Args.MakeArgString(P));
+ continue;
+ } else {
+ // Ignore the PCH if not first on command line and emit warning.
+ D.Diag(diag::warn_drv_pch_not_first_include) << P
+ << A->getAsString(Args);
+ }
+ }
+ }
+
+ // Not translated, render as usual.
+ A->claim();
+ A->render(Args, CmdArgs);
+ }
+
+ Args.AddAllArgs(CmdArgs,
+ {options::OPT_D, options::OPT_U, options::OPT_I_Group,
+ options::OPT_F, options::OPT_index_header_map});
+
+ // Add -Wp, and -Xpreprocessor if using the preprocessor.
+
+ // FIXME: There is a very unfortunate problem here, some troubled
+ // souls abuse -Wp, to pass preprocessor options in gcc syntax. To
+ // really support that we would have to parse and then translate
+ // those options. :(
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wp_COMMA,
+ options::OPT_Xpreprocessor);
+
+ // -I- is a deprecated GCC feature, reject it.
+ if (Arg *A = Args.getLastArg(options::OPT_I_))
+ D.Diag(diag::err_drv_I_dash_not_supported) << A->getAsString(Args);
+
+ // If we have a --sysroot, and don't have an explicit -isysroot flag, add an
+ // -isysroot to the CC1 invocation.
+ StringRef sysroot = C.getSysRoot();
+ if (sysroot != "") {
+ if (!Args.hasArg(options::OPT_isysroot)) {
+ CmdArgs.push_back("-isysroot");
+ CmdArgs.push_back(C.getArgs().MakeArgString(sysroot));
+ }
+ }
+
+ // Parse additional include paths from environment variables.
+ // FIXME: We should probably sink the logic for handling these from the
+ // frontend into the driver. It will allow deleting 4 otherwise unused flags.
+ // CPATH - included following the user specified includes (but prior to
+ // builtin and standard includes).
+ addDirectoryList(Args, CmdArgs, "-I", "CPATH");
+ // C_INCLUDE_PATH - system includes enabled when compiling C.
+ addDirectoryList(Args, CmdArgs, "-c-isystem", "C_INCLUDE_PATH");
+ // CPLUS_INCLUDE_PATH - system includes enabled when compiling C++.
+ addDirectoryList(Args, CmdArgs, "-cxx-isystem", "CPLUS_INCLUDE_PATH");
+ // OBJC_INCLUDE_PATH - system includes enabled when compiling ObjC.
+ addDirectoryList(Args, CmdArgs, "-objc-isystem", "OBJC_INCLUDE_PATH");
+ // OBJCPLUS_INCLUDE_PATH - system includes enabled when compiling ObjC++.
+ addDirectoryList(Args, CmdArgs, "-objcxx-isystem", "OBJCPLUS_INCLUDE_PATH");
+
+ // Optional AuxToolChain indicates that we need to include headers
+ // for more than one target. If that's the case, add include paths
+ // from AuxToolChain right after include paths of the same kind for
+ // the current target.
+
+ // Add C++ include arguments, if needed.
+ if (types::isCXX(Inputs[0].getType())) {
+ getToolChain().AddClangCXXStdlibIncludeArgs(Args, CmdArgs);
+ if (AuxToolChain)
+ AuxToolChain->AddClangCXXStdlibIncludeArgs(Args, CmdArgs);
+ }
+
+ // Add system include arguments.
+ getToolChain().AddClangSystemIncludeArgs(Args, CmdArgs);
+ if (AuxToolChain)
+ AuxToolChain->AddClangCXXStdlibIncludeArgs(Args, CmdArgs);
+
+ // Add CUDA include arguments, if needed.
+ if (types::isCuda(Inputs[0].getType()))
+ getToolChain().AddCudaIncludeArgs(Args, CmdArgs);
+}
+
+// FIXME: Move to target hook.
+static bool isSignedCharDefault(const llvm::Triple &Triple) {
+ switch (Triple.getArch()) {
+ default:
+ return true;
+
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_be:
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ if (Triple.isOSDarwin() || Triple.isOSWindows())
+ return true;
+ return false;
+
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ if (Triple.isOSDarwin())
+ return true;
+ return false;
+
+ case llvm::Triple::hexagon:
+ case llvm::Triple::ppc64le:
+ case llvm::Triple::systemz:
+ case llvm::Triple::xcore:
+ return false;
+ }
+}
+
+static bool isNoCommonDefault(const llvm::Triple &Triple) {
+ switch (Triple.getArch()) {
+ default:
+ return false;
+
+ case llvm::Triple::xcore:
+ case llvm::Triple::wasm32:
+ case llvm::Triple::wasm64:
+ return true;
+ }
+}
+
+// ARM tools start.
+
+// Get SubArch (vN).
+static int getARMSubArchVersionNumber(const llvm::Triple &Triple) {
+ llvm::StringRef Arch = Triple.getArchName();
+ return llvm::ARM::parseArchVersion(Arch);
+}
+
+// True if M-profile.
+static bool isARMMProfile(const llvm::Triple &Triple) {
+ llvm::StringRef Arch = Triple.getArchName();
+ unsigned Profile = llvm::ARM::parseArchProfile(Arch);
+ return Profile == llvm::ARM::PK_M;
+}
+
+// Get Arch/CPU from args.
+static void getARMArchCPUFromArgs(const ArgList &Args, llvm::StringRef &Arch,
+ llvm::StringRef &CPU, bool FromAs = false) {
+ if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
+ CPU = A->getValue();
+ if (const Arg *A = Args.getLastArg(options::OPT_march_EQ))
+ Arch = A->getValue();
+ if (!FromAs)
+ return;
+
+ for (const Arg *A :
+ Args.filtered(options::OPT_Wa_COMMA, options::OPT_Xassembler)) {
+ StringRef Value = A->getValue();
+ if (Value.startswith("-mcpu="))
+ CPU = Value.substr(6);
+ if (Value.startswith("-march="))
+ Arch = Value.substr(7);
+ }
+}
+
+// Handle -mhwdiv=.
+// FIXME: Use ARMTargetParser.
+static void getARMHWDivFeatures(const Driver &D, const Arg *A,
+ const ArgList &Args, StringRef HWDiv,
+ std::vector<const char *> &Features) {
+ unsigned HWDivID = llvm::ARM::parseHWDiv(HWDiv);
+ if (!llvm::ARM::getHWDivFeatures(HWDivID, Features))
+ D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
+}
+
+// Handle -mfpu=.
+static void getARMFPUFeatures(const Driver &D, const Arg *A,
+ const ArgList &Args, StringRef FPU,
+ std::vector<const char *> &Features) {
+ unsigned FPUID = llvm::ARM::parseFPU(FPU);
+ if (!llvm::ARM::getFPUFeatures(FPUID, Features))
+ D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
+}
+
+// Decode ARM features from string like +[no]featureA+[no]featureB+...
+static bool DecodeARMFeatures(const Driver &D, StringRef text,
+ std::vector<const char *> &Features) {
+ SmallVector<StringRef, 8> Split;
+ text.split(Split, StringRef("+"), -1, false);
+
+ for (StringRef Feature : Split) {
+ const char *FeatureName = llvm::ARM::getArchExtFeature(Feature);
+ if (FeatureName)
+ Features.push_back(FeatureName);
+ else
+ return false;
+ }
+ return true;
+}
+
+// Check if -march is valid by checking if it can be canonicalised and parsed.
+// getARMArch is used here instead of just checking the -march value in order
+// to handle -march=native correctly.
+static void checkARMArchName(const Driver &D, const Arg *A, const ArgList &Args,
+ llvm::StringRef ArchName,
+ std::vector<const char *> &Features,
+ const llvm::Triple &Triple) {
+ std::pair<StringRef, StringRef> Split = ArchName.split("+");
+
+ std::string MArch = arm::getARMArch(ArchName, Triple);
+ if (llvm::ARM::parseArch(MArch) == llvm::ARM::AK_INVALID ||
+ (Split.second.size() && !DecodeARMFeatures(D, Split.second, Features)))
+ D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
+}
+
+// Check -mcpu=. Needs ArchName to handle -mcpu=generic.
+static void checkARMCPUName(const Driver &D, const Arg *A, const ArgList &Args,
+ llvm::StringRef CPUName, llvm::StringRef ArchName,
+ std::vector<const char *> &Features,
+ const llvm::Triple &Triple) {
+ std::pair<StringRef, StringRef> Split = CPUName.split("+");
+
+ std::string CPU = arm::getARMTargetCPU(CPUName, ArchName, Triple);
+ if (arm::getLLVMArchSuffixForARM(CPU, ArchName, Triple).empty() ||
+ (Split.second.size() && !DecodeARMFeatures(D, Split.second, Features)))
+ D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
+}
+
+static bool useAAPCSForMachO(const llvm::Triple &T) {
+ // The backend is hardwired to assume AAPCS for M-class processors, ensure
+ // the frontend matches that.
+ return T.getEnvironment() == llvm::Triple::EABI ||
+ T.getOS() == llvm::Triple::UnknownOS || isARMMProfile(T);
+}
+
+// Select the float ABI as determined by -msoft-float, -mhard-float, and
+// -mfloat-abi=.
+arm::FloatABI arm::getARMFloatABI(const ToolChain &TC, const ArgList &Args) {
+ const Driver &D = TC.getDriver();
+ const llvm::Triple Triple(TC.ComputeEffectiveClangTriple(Args));
+ auto SubArch = getARMSubArchVersionNumber(Triple);
+ arm::FloatABI ABI = FloatABI::Invalid;
+ if (Arg *A =
+ Args.getLastArg(options::OPT_msoft_float, options::OPT_mhard_float,
+ options::OPT_mfloat_abi_EQ)) {
+ if (A->getOption().matches(options::OPT_msoft_float)) {
+ ABI = FloatABI::Soft;
+ } else if (A->getOption().matches(options::OPT_mhard_float)) {
+ ABI = FloatABI::Hard;
+ } else {
+ ABI = llvm::StringSwitch<arm::FloatABI>(A->getValue())
+ .Case("soft", FloatABI::Soft)
+ .Case("softfp", FloatABI::SoftFP)
+ .Case("hard", FloatABI::Hard)
+ .Default(FloatABI::Invalid);
+ if (ABI == FloatABI::Invalid && !StringRef(A->getValue()).empty()) {
+ D.Diag(diag::err_drv_invalid_mfloat_abi) << A->getAsString(Args);
+ ABI = FloatABI::Soft;
+ }
+ }
+
+ // It is incorrect to select hard float ABI on MachO platforms if the ABI is
+ // "apcs-gnu".
+ if (Triple.isOSBinFormatMachO() && !useAAPCSForMachO(Triple) &&
+ ABI == FloatABI::Hard) {
+ D.Diag(diag::err_drv_unsupported_opt_for_target) << A->getAsString(Args)
+ << Triple.getArchName();
+ }
+ }
+
+ // If unspecified, choose the default based on the platform.
+ if (ABI == FloatABI::Invalid) {
+ switch (Triple.getOS()) {
+ case llvm::Triple::Darwin:
+ case llvm::Triple::MacOSX:
+ case llvm::Triple::IOS:
+ case llvm::Triple::TvOS: {
+ // Darwin defaults to "softfp" for v6 and v7.
+ ABI = (SubArch == 6 || SubArch == 7) ? FloatABI::SoftFP : FloatABI::Soft;
+ break;
+ }
+ case llvm::Triple::WatchOS:
+ ABI = FloatABI::Hard;
+ break;
+
+ // FIXME: this is invalid for WindowsCE
+ case llvm::Triple::Win32:
+ ABI = FloatABI::Hard;
+ break;
+
+ case llvm::Triple::FreeBSD:
+ switch (Triple.getEnvironment()) {
+ case llvm::Triple::GNUEABIHF:
+ ABI = FloatABI::Hard;
+ break;
+ default:
+ // FreeBSD defaults to soft float
+ ABI = FloatABI::Soft;
+ break;
+ }
+ break;
+
+ default:
+ switch (Triple.getEnvironment()) {
+ case llvm::Triple::GNUEABIHF:
+ case llvm::Triple::EABIHF:
+ ABI = FloatABI::Hard;
+ break;
+ case llvm::Triple::GNUEABI:
+ case llvm::Triple::EABI:
+ // EABI is always AAPCS, and if it was not marked 'hard', it's softfp
+ ABI = FloatABI::SoftFP;
+ break;
+ case llvm::Triple::Android:
+ ABI = (SubArch == 7) ? FloatABI::SoftFP : FloatABI::Soft;
+ break;
+ default:
+ // Assume "soft", but warn the user we are guessing.
+ ABI = FloatABI::Soft;
+ if (Triple.getOS() != llvm::Triple::UnknownOS ||
+ !Triple.isOSBinFormatMachO())
+ D.Diag(diag::warn_drv_assuming_mfloat_abi_is) << "soft";
+ break;
+ }
+ }
+ }
+
+ assert(ABI != FloatABI::Invalid && "must select an ABI");
+ return ABI;
+}
+
+static void getARMTargetFeatures(const ToolChain &TC,
+ const llvm::Triple &Triple,
+ const ArgList &Args,
+ std::vector<const char *> &Features,
+ bool ForAS) {
+ const Driver &D = TC.getDriver();
+
+ bool KernelOrKext =
+ Args.hasArg(options::OPT_mkernel, options::OPT_fapple_kext);
+ arm::FloatABI ABI = arm::getARMFloatABI(TC, Args);
+ const Arg *WaCPU = nullptr, *WaFPU = nullptr;
+ const Arg *WaHDiv = nullptr, *WaArch = nullptr;
+
+ if (!ForAS) {
+ // FIXME: Note, this is a hack, the LLVM backend doesn't actually use these
+ // yet (it uses the -mfloat-abi and -msoft-float options), and it is
+ // stripped out by the ARM target. We should probably pass this a new
+ // -target-option, which is handled by the -cc1/-cc1as invocation.
+ //
+ // FIXME2: For consistency, it would be ideal if we set up the target
+ // machine state the same when using the frontend or the assembler. We don't
+ // currently do that for the assembler, we pass the options directly to the
+ // backend and never even instantiate the frontend TargetInfo. If we did,
+ // and used its handleTargetFeatures hook, then we could ensure the
+ // assembler and the frontend behave the same.
+
+ // Use software floating point operations?
+ if (ABI == arm::FloatABI::Soft)
+ Features.push_back("+soft-float");
+
+ // Use software floating point argument passing?
+ if (ABI != arm::FloatABI::Hard)
+ Features.push_back("+soft-float-abi");
+ } else {
+ // Here, we make sure that -Wa,-mfpu/cpu/arch/hwdiv will be passed down
+ // to the assembler correctly.
+ for (const Arg *A :
+ Args.filtered(options::OPT_Wa_COMMA, options::OPT_Xassembler)) {
+ StringRef Value = A->getValue();
+ if (Value.startswith("-mfpu=")) {
+ WaFPU = A;
+ } else if (Value.startswith("-mcpu=")) {
+ WaCPU = A;
+ } else if (Value.startswith("-mhwdiv=")) {
+ WaHDiv = A;
+ } else if (Value.startswith("-march=")) {
+ WaArch = A;
+ }
+ }
+ }
+
+ // Check -march. ClangAs gives preference to -Wa,-march=.
+ const Arg *ArchArg = Args.getLastArg(options::OPT_march_EQ);
+ StringRef ArchName;
+ if (WaArch) {
+ if (ArchArg)
+ D.Diag(clang::diag::warn_drv_unused_argument)
+ << ArchArg->getAsString(Args);
+ ArchName = StringRef(WaArch->getValue()).substr(7);
+ checkARMArchName(D, WaArch, Args, ArchName, Features, Triple);
+ // FIXME: Set Arch.
+ D.Diag(clang::diag::warn_drv_unused_argument) << WaArch->getAsString(Args);
+ } else if (ArchArg) {
+ ArchName = ArchArg->getValue();
+ checkARMArchName(D, ArchArg, Args, ArchName, Features, Triple);
+ }
+
+ // Check -mcpu. ClangAs gives preference to -Wa,-mcpu=.
+ const Arg *CPUArg = Args.getLastArg(options::OPT_mcpu_EQ);
+ StringRef CPUName;
+ if (WaCPU) {
+ if (CPUArg)
+ D.Diag(clang::diag::warn_drv_unused_argument)
+ << CPUArg->getAsString(Args);
+ CPUName = StringRef(WaCPU->getValue()).substr(6);
+ checkARMCPUName(D, WaCPU, Args, CPUName, ArchName, Features, Triple);
+ } else if (CPUArg) {
+ CPUName = CPUArg->getValue();
+ checkARMCPUName(D, CPUArg, Args, CPUName, ArchName, Features, Triple);
+ }
+
+ // Add CPU features for generic CPUs
+ if (CPUName == "native") {
+ llvm::StringMap<bool> HostFeatures;
+ if (llvm::sys::getHostCPUFeatures(HostFeatures))
+ for (auto &F : HostFeatures)
+ Features.push_back(
+ Args.MakeArgString((F.second ? "+" : "-") + F.first()));
+ }
+
+ // Honor -mfpu=. ClangAs gives preference to -Wa,-mfpu=.
+ const Arg *FPUArg = Args.getLastArg(options::OPT_mfpu_EQ);
+ if (WaFPU) {
+ if (FPUArg)
+ D.Diag(clang::diag::warn_drv_unused_argument)
+ << FPUArg->getAsString(Args);
+ getARMFPUFeatures(D, WaFPU, Args, StringRef(WaFPU->getValue()).substr(6),
+ Features);
+ } else if (FPUArg) {
+ getARMFPUFeatures(D, FPUArg, Args, FPUArg->getValue(), Features);
+ }
+
+ // Honor -mhwdiv=. ClangAs gives preference to -Wa,-mhwdiv=.
+ const Arg *HDivArg = Args.getLastArg(options::OPT_mhwdiv_EQ);
+ if (WaHDiv) {
+ if (HDivArg)
+ D.Diag(clang::diag::warn_drv_unused_argument)
+ << HDivArg->getAsString(Args);
+ getARMHWDivFeatures(D, WaHDiv, Args,
+ StringRef(WaHDiv->getValue()).substr(8), Features);
+ } else if (HDivArg)
+ getARMHWDivFeatures(D, HDivArg, Args, HDivArg->getValue(), Features);
+
+ // Setting -msoft-float effectively disables NEON because of the GCC
+ // implementation, although the same isn't true of VFP or VFP3.
+ if (ABI == arm::FloatABI::Soft) {
+ Features.push_back("-neon");
+ // Also need to explicitly disable features which imply NEON.
+ Features.push_back("-crypto");
+ }
+
+ // En/disable crc code generation.
+ if (Arg *A = Args.getLastArg(options::OPT_mcrc, options::OPT_mnocrc)) {
+ if (A->getOption().matches(options::OPT_mcrc))
+ Features.push_back("+crc");
+ else
+ Features.push_back("-crc");
+ }
+
+ if (Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v8_1a) {
+ Features.insert(Features.begin(), "+v8.1a");
+ }
+
+ // Look for the last occurrence of -mlong-calls or -mno-long-calls. If
+ // neither options are specified, see if we are compiling for kernel/kext and
+ // decide whether to pass "+long-calls" based on the OS and its version.
+ if (Arg *A = Args.getLastArg(options::OPT_mlong_calls,
+ options::OPT_mno_long_calls)) {
+ if (A->getOption().matches(options::OPT_mlong_calls))
+ Features.push_back("+long-calls");
+ } else if (KernelOrKext && (!Triple.isiOS() || Triple.isOSVersionLT(6)) &&
+ !Triple.isWatchOS()) {
+ Features.push_back("+long-calls");
+ }
+
+ // Kernel code has more strict alignment requirements.
+ if (KernelOrKext)
+ Features.push_back("+strict-align");
+ else if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access,
+ options::OPT_munaligned_access)) {
+ if (A->getOption().matches(options::OPT_munaligned_access)) {
+ // No v6M core supports unaligned memory access (v6M ARM ARM A3.2).
+ if (Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v6m)
+ D.Diag(diag::err_target_unsupported_unaligned) << "v6m";
+ } else
+ Features.push_back("+strict-align");
+ } else {
+ // Assume pre-ARMv6 doesn't support unaligned accesses.
+ //
+ // ARMv6 may or may not support unaligned accesses depending on the
+ // SCTLR.U bit, which is architecture-specific. We assume ARMv6
+ // Darwin and NetBSD targets support unaligned accesses, and others don't.
+ //
+ // ARMv7 always has SCTLR.U set to 1, but it has a new SCTLR.A bit
+ // which raises an alignment fault on unaligned accesses. Linux
+ // defaults this bit to 0 and handles it as a system-wide (not
+ // per-process) setting. It is therefore safe to assume that ARMv7+
+ // Linux targets support unaligned accesses. The same goes for NaCl.
+ //
+ // The above behavior is consistent with GCC.
+ int VersionNum = getARMSubArchVersionNumber(Triple);
+ if (Triple.isOSDarwin() || Triple.isOSNetBSD()) {
+ if (VersionNum < 6 ||
+ Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v6m)
+ Features.push_back("+strict-align");
+ } else if (Triple.isOSLinux() || Triple.isOSNaCl()) {
+ if (VersionNum < 7)
+ Features.push_back("+strict-align");
+ } else
+ Features.push_back("+strict-align");
+ }
+
+ // llvm does not support reserving registers in general. There is support
+ // for reserving r9 on ARM though (defined as a platform-specific register
+ // in ARM EABI).
+ if (Args.hasArg(options::OPT_ffixed_r9))
+ Features.push_back("+reserve-r9");
+
+ // The kext linker doesn't know how to deal with movw/movt.
+ if (KernelOrKext || Args.hasArg(options::OPT_mno_movt))
+ Features.push_back("+no-movt");
+}
+
+void Clang::AddARMTargetArgs(const llvm::Triple &Triple, const ArgList &Args,
+ ArgStringList &CmdArgs, bool KernelOrKext) const {
+ // Select the ABI to use.
+ // FIXME: Support -meabi.
+ // FIXME: Parts of this are duplicated in the backend, unify this somehow.
+ const char *ABIName = nullptr;
+ if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ)) {
+ ABIName = A->getValue();
+ } else if (Triple.isOSBinFormatMachO()) {
+ if (useAAPCSForMachO(Triple)) {
+ ABIName = "aapcs";
+ } else if (Triple.isWatchOS()) {
+ ABIName = "aapcs16";
+ } else {
+ ABIName = "apcs-gnu";
+ }
+ } else if (Triple.isOSWindows()) {
+ // FIXME: this is invalid for WindowsCE
+ ABIName = "aapcs";
+ } else {
+ // Select the default based on the platform.
+ switch (Triple.getEnvironment()) {
+ case llvm::Triple::Android:
+ case llvm::Triple::GNUEABI:
+ case llvm::Triple::GNUEABIHF:
+ ABIName = "aapcs-linux";
+ break;
+ case llvm::Triple::EABIHF:
+ case llvm::Triple::EABI:
+ ABIName = "aapcs";
+ break;
+ default:
+ if (Triple.getOS() == llvm::Triple::NetBSD)
+ ABIName = "apcs-gnu";
+ else
+ ABIName = "aapcs";
+ break;
+ }
+ }
+ CmdArgs.push_back("-target-abi");
+ CmdArgs.push_back(ABIName);
+
+ // Determine floating point ABI from the options & target defaults.
+ arm::FloatABI ABI = arm::getARMFloatABI(getToolChain(), Args);
+ if (ABI == arm::FloatABI::Soft) {
+ // Floating point operations and argument passing are soft.
+ // FIXME: This changes CPP defines, we need -target-soft-float.
+ CmdArgs.push_back("-msoft-float");
+ CmdArgs.push_back("-mfloat-abi");
+ CmdArgs.push_back("soft");
+ } else if (ABI == arm::FloatABI::SoftFP) {
+ // Floating point operations are hard, but argument passing is soft.
+ CmdArgs.push_back("-mfloat-abi");
+ CmdArgs.push_back("soft");
+ } else {
+ // Floating point operations and argument passing are hard.
+ assert(ABI == arm::FloatABI::Hard && "Invalid float abi!");
+ CmdArgs.push_back("-mfloat-abi");
+ CmdArgs.push_back("hard");
+ }
+
+ // Forward the -mglobal-merge option for explicit control over the pass.
+ if (Arg *A = Args.getLastArg(options::OPT_mglobal_merge,
+ options::OPT_mno_global_merge)) {
+ CmdArgs.push_back("-backend-option");
+ if (A->getOption().matches(options::OPT_mno_global_merge))
+ CmdArgs.push_back("-arm-global-merge=false");
+ else
+ CmdArgs.push_back("-arm-global-merge=true");
+ }
+
+ if (!Args.hasFlag(options::OPT_mimplicit_float,
+ options::OPT_mno_implicit_float, true))
+ CmdArgs.push_back("-no-implicit-float");
+}
+// ARM tools end.
+
+/// getAArch64TargetCPU - Get the (LLVM) name of the AArch64 cpu we are
+/// targeting.
+static std::string getAArch64TargetCPU(const ArgList &Args) {
+ Arg *A;
+ std::string CPU;
+ // If we have -mtune or -mcpu, use that.
+ if ((A = Args.getLastArg(options::OPT_mtune_EQ))) {
+ CPU = StringRef(A->getValue()).lower();
+ } else if ((A = Args.getLastArg(options::OPT_mcpu_EQ))) {
+ StringRef Mcpu = A->getValue();
+ CPU = Mcpu.split("+").first.lower();
+ }
+
+ // Handle CPU name is 'native'.
+ if (CPU == "native")
+ return llvm::sys::getHostCPUName();
+ else if (CPU.size())
+ return CPU;
+
+ // Make sure we pick "cyclone" if -arch is used.
+ // FIXME: Should this be picked by checking the target triple instead?
+ if (Args.getLastArg(options::OPT_arch))
+ return "cyclone";
+
+ return "generic";
+}
+
+void Clang::AddAArch64TargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ std::string TripleStr = getToolChain().ComputeEffectiveClangTriple(Args);
+ llvm::Triple Triple(TripleStr);
+
+ if (!Args.hasFlag(options::OPT_mred_zone, options::OPT_mno_red_zone, true) ||
+ Args.hasArg(options::OPT_mkernel) ||
+ Args.hasArg(options::OPT_fapple_kext))
+ CmdArgs.push_back("-disable-red-zone");
+
+ if (!Args.hasFlag(options::OPT_mimplicit_float,
+ options::OPT_mno_implicit_float, true))
+ CmdArgs.push_back("-no-implicit-float");
+
+ const char *ABIName = nullptr;
+ if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ))
+ ABIName = A->getValue();
+ else if (Triple.isOSDarwin())
+ ABIName = "darwinpcs";
+ else
+ ABIName = "aapcs";
+
+ CmdArgs.push_back("-target-abi");
+ CmdArgs.push_back(ABIName);
+
+ if (Arg *A = Args.getLastArg(options::OPT_mfix_cortex_a53_835769,
+ options::OPT_mno_fix_cortex_a53_835769)) {
+ CmdArgs.push_back("-backend-option");
+ if (A->getOption().matches(options::OPT_mfix_cortex_a53_835769))
+ CmdArgs.push_back("-aarch64-fix-cortex-a53-835769=1");
+ else
+ CmdArgs.push_back("-aarch64-fix-cortex-a53-835769=0");
+ } else if (Triple.isAndroid()) {
+ // Enabled A53 errata (835769) workaround by default on android
+ CmdArgs.push_back("-backend-option");
+ CmdArgs.push_back("-aarch64-fix-cortex-a53-835769=1");
+ }
+
+ // Forward the -mglobal-merge option for explicit control over the pass.
+ if (Arg *A = Args.getLastArg(options::OPT_mglobal_merge,
+ options::OPT_mno_global_merge)) {
+ CmdArgs.push_back("-backend-option");
+ if (A->getOption().matches(options::OPT_mno_global_merge))
+ CmdArgs.push_back("-aarch64-global-merge=false");
+ else
+ CmdArgs.push_back("-aarch64-global-merge=true");
+ }
+}
+
+// Get CPU and ABI names. They are not independent
+// so we have to calculate them together.
+void mips::getMipsCPUAndABI(const ArgList &Args, const llvm::Triple &Triple,
+ StringRef &CPUName, StringRef &ABIName) {
+ const char *DefMips32CPU = "mips32r2";
+ const char *DefMips64CPU = "mips64r2";
+
+ // MIPS32r6 is the default for mips(el)?-img-linux-gnu and MIPS64r6 is the
+ // default for mips64(el)?-img-linux-gnu.
+ if (Triple.getVendor() == llvm::Triple::ImaginationTechnologies &&
+ Triple.getEnvironment() == llvm::Triple::GNU) {
+ DefMips32CPU = "mips32r6";
+ DefMips64CPU = "mips64r6";
+ }
+
+ // MIPS64r6 is the default for Android MIPS64 (mips64el-linux-android).
+ if (Triple.isAndroid())
+ DefMips64CPU = "mips64r6";
+
+ // MIPS3 is the default for mips64*-unknown-openbsd.
+ if (Triple.getOS() == llvm::Triple::OpenBSD)
+ DefMips64CPU = "mips3";
+
+ if (Arg *A = Args.getLastArg(options::OPT_march_EQ, options::OPT_mcpu_EQ))
+ CPUName = A->getValue();
+
+ if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ)) {
+ ABIName = A->getValue();
+ // Convert a GNU style Mips ABI name to the name
+ // accepted by LLVM Mips backend.
+ ABIName = llvm::StringSwitch<llvm::StringRef>(ABIName)
+ .Case("32", "o32")
+ .Case("64", "n64")
+ .Default(ABIName);
+ }
+
+ // Setup default CPU and ABI names.
+ if (CPUName.empty() && ABIName.empty()) {
+ switch (Triple.getArch()) {
+ default:
+ llvm_unreachable("Unexpected triple arch name");
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ CPUName = DefMips32CPU;
+ break;
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ CPUName = DefMips64CPU;
+ break;
+ }
+ }
+
+ if (ABIName.empty()) {
+ // Deduce ABI name from the target triple.
+ if (Triple.getArch() == llvm::Triple::mips ||
+ Triple.getArch() == llvm::Triple::mipsel)
+ ABIName = "o32";
+ else
+ ABIName = "n64";
+ }
+
+ if (CPUName.empty()) {
+ // Deduce CPU name from ABI name.
+ CPUName = llvm::StringSwitch<const char *>(ABIName)
+ .Cases("o32", "eabi", DefMips32CPU)
+ .Cases("n32", "n64", DefMips64CPU)
+ .Default("");
+ }
+
+ // FIXME: Warn on inconsistent use of -march and -mabi.
+}
+
+std::string mips::getMipsABILibSuffix(const ArgList &Args,
+ const llvm::Triple &Triple) {
+ StringRef CPUName, ABIName;
+ tools::mips::getMipsCPUAndABI(Args, Triple, CPUName, ABIName);
+ return llvm::StringSwitch<std::string>(ABIName)
+ .Case("o32", "")
+ .Case("n32", "32")
+ .Case("n64", "64");
+}
+
+// Convert ABI name to the GNU tools acceptable variant.
+static StringRef getGnuCompatibleMipsABIName(StringRef ABI) {
+ return llvm::StringSwitch<llvm::StringRef>(ABI)
+ .Case("o32", "32")
+ .Case("n64", "64")
+ .Default(ABI);
+}
+
+// Select the MIPS float ABI as determined by -msoft-float, -mhard-float,
+// and -mfloat-abi=.
+static mips::FloatABI getMipsFloatABI(const Driver &D, const ArgList &Args) {
+ mips::FloatABI ABI = mips::FloatABI::Invalid;
+ if (Arg *A =
+ Args.getLastArg(options::OPT_msoft_float, options::OPT_mhard_float,
+ options::OPT_mfloat_abi_EQ)) {
+ if (A->getOption().matches(options::OPT_msoft_float))
+ ABI = mips::FloatABI::Soft;
+ else if (A->getOption().matches(options::OPT_mhard_float))
+ ABI = mips::FloatABI::Hard;
+ else {
+ ABI = llvm::StringSwitch<mips::FloatABI>(A->getValue())
+ .Case("soft", mips::FloatABI::Soft)
+ .Case("hard", mips::FloatABI::Hard)
+ .Default(mips::FloatABI::Invalid);
+ if (ABI == mips::FloatABI::Invalid && !StringRef(A->getValue()).empty()) {
+ D.Diag(diag::err_drv_invalid_mfloat_abi) << A->getAsString(Args);
+ ABI = mips::FloatABI::Hard;
+ }
+ }
+ }
+
+ // If unspecified, choose the default based on the platform.
+ if (ABI == mips::FloatABI::Invalid) {
+ // Assume "hard", because it's a default value used by gcc.
+ // When we start to recognize specific target MIPS processors,
+ // we will be able to select the default more correctly.
+ ABI = mips::FloatABI::Hard;
+ }
+
+ assert(ABI != mips::FloatABI::Invalid && "must select an ABI");
+ return ABI;
+}
+
+static void AddTargetFeature(const ArgList &Args,
+ std::vector<const char *> &Features,
+ OptSpecifier OnOpt, OptSpecifier OffOpt,
+ StringRef FeatureName) {
+ if (Arg *A = Args.getLastArg(OnOpt, OffOpt)) {
+ if (A->getOption().matches(OnOpt))
+ Features.push_back(Args.MakeArgString("+" + FeatureName));
+ else
+ Features.push_back(Args.MakeArgString("-" + FeatureName));
+ }
+}
+
+static void getMIPSTargetFeatures(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args,
+ std::vector<const char *> &Features) {
+ StringRef CPUName;
+ StringRef ABIName;
+ mips::getMipsCPUAndABI(Args, Triple, CPUName, ABIName);
+ ABIName = getGnuCompatibleMipsABIName(ABIName);
+
+ AddTargetFeature(Args, Features, options::OPT_mno_abicalls,
+ options::OPT_mabicalls, "noabicalls");
+
+ mips::FloatABI FloatABI = getMipsFloatABI(D, Args);
+ if (FloatABI == mips::FloatABI::Soft) {
+ // FIXME: Note, this is a hack. We need to pass the selected float
+ // mode to the MipsTargetInfoBase to define appropriate macros there.
+ // Now it is the only method.
+ Features.push_back("+soft-float");
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_mnan_EQ)) {
+ StringRef Val = StringRef(A->getValue());
+ if (Val == "2008") {
+ if (mips::getSupportedNanEncoding(CPUName) & mips::Nan2008)
+ Features.push_back("+nan2008");
+ else {
+ Features.push_back("-nan2008");
+ D.Diag(diag::warn_target_unsupported_nan2008) << CPUName;
+ }
+ } else if (Val == "legacy") {
+ if (mips::getSupportedNanEncoding(CPUName) & mips::NanLegacy)
+ Features.push_back("-nan2008");
+ else {
+ Features.push_back("+nan2008");
+ D.Diag(diag::warn_target_unsupported_nanlegacy) << CPUName;
+ }
+ } else
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getOption().getName() << Val;
+ }
+
+ AddTargetFeature(Args, Features, options::OPT_msingle_float,
+ options::OPT_mdouble_float, "single-float");
+ AddTargetFeature(Args, Features, options::OPT_mips16, options::OPT_mno_mips16,
+ "mips16");
+ AddTargetFeature(Args, Features, options::OPT_mmicromips,
+ options::OPT_mno_micromips, "micromips");
+ AddTargetFeature(Args, Features, options::OPT_mdsp, options::OPT_mno_dsp,
+ "dsp");
+ AddTargetFeature(Args, Features, options::OPT_mdspr2, options::OPT_mno_dspr2,
+ "dspr2");
+ AddTargetFeature(Args, Features, options::OPT_mmsa, options::OPT_mno_msa,
+ "msa");
+
+ // Add the last -mfp32/-mfpxx/-mfp64 or if none are given and the ABI is O32
+ // pass -mfpxx
+ if (Arg *A = Args.getLastArg(options::OPT_mfp32, options::OPT_mfpxx,
+ options::OPT_mfp64)) {
+ if (A->getOption().matches(options::OPT_mfp32))
+ Features.push_back(Args.MakeArgString("-fp64"));
+ else if (A->getOption().matches(options::OPT_mfpxx)) {
+ Features.push_back(Args.MakeArgString("+fpxx"));
+ Features.push_back(Args.MakeArgString("+nooddspreg"));
+ } else
+ Features.push_back(Args.MakeArgString("+fp64"));
+ } else if (mips::shouldUseFPXX(Args, Triple, CPUName, ABIName, FloatABI)) {
+ Features.push_back(Args.MakeArgString("+fpxx"));
+ Features.push_back(Args.MakeArgString("+nooddspreg"));
+ }
+
+ AddTargetFeature(Args, Features, options::OPT_mno_odd_spreg,
+ options::OPT_modd_spreg, "nooddspreg");
+}
+
+void Clang::AddMIPSTargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ const Driver &D = getToolChain().getDriver();
+ StringRef CPUName;
+ StringRef ABIName;
+ const llvm::Triple &Triple = getToolChain().getTriple();
+ mips::getMipsCPUAndABI(Args, Triple, CPUName, ABIName);
+
+ CmdArgs.push_back("-target-abi");
+ CmdArgs.push_back(ABIName.data());
+
+ mips::FloatABI ABI = getMipsFloatABI(D, Args);
+ if (ABI == mips::FloatABI::Soft) {
+ // Floating point operations and argument passing are soft.
+ CmdArgs.push_back("-msoft-float");
+ CmdArgs.push_back("-mfloat-abi");
+ CmdArgs.push_back("soft");
+ } else {
+ // Floating point operations and argument passing are hard.
+ assert(ABI == mips::FloatABI::Hard && "Invalid float abi!");
+ CmdArgs.push_back("-mfloat-abi");
+ CmdArgs.push_back("hard");
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_mxgot, options::OPT_mno_xgot)) {
+ if (A->getOption().matches(options::OPT_mxgot)) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-mxgot");
+ }
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_mldc1_sdc1,
+ options::OPT_mno_ldc1_sdc1)) {
+ if (A->getOption().matches(options::OPT_mno_ldc1_sdc1)) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-mno-ldc1-sdc1");
+ }
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_mcheck_zero_division,
+ options::OPT_mno_check_zero_division)) {
+ if (A->getOption().matches(options::OPT_mno_check_zero_division)) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-mno-check-zero-division");
+ }
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_G)) {
+ StringRef v = A->getValue();
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back(Args.MakeArgString("-mips-ssection-threshold=" + v));
+ A->claim();
+ }
+}
+
+/// getPPCTargetCPU - Get the (LLVM) name of the PowerPC cpu we are targeting.
+static std::string getPPCTargetCPU(const ArgList &Args) {
+ if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
+ StringRef CPUName = A->getValue();
+
+ if (CPUName == "native") {
+ std::string CPU = llvm::sys::getHostCPUName();
+ if (!CPU.empty() && CPU != "generic")
+ return CPU;
+ else
+ return "";
+ }
+
+ return llvm::StringSwitch<const char *>(CPUName)
+ .Case("common", "generic")
+ .Case("440", "440")
+ .Case("440fp", "440")
+ .Case("450", "450")
+ .Case("601", "601")
+ .Case("602", "602")
+ .Case("603", "603")
+ .Case("603e", "603e")
+ .Case("603ev", "603ev")
+ .Case("604", "604")
+ .Case("604e", "604e")
+ .Case("620", "620")
+ .Case("630", "pwr3")
+ .Case("G3", "g3")
+ .Case("7400", "7400")
+ .Case("G4", "g4")
+ .Case("7450", "7450")
+ .Case("G4+", "g4+")
+ .Case("750", "750")
+ .Case("970", "970")
+ .Case("G5", "g5")
+ .Case("a2", "a2")
+ .Case("a2q", "a2q")
+ .Case("e500mc", "e500mc")
+ .Case("e5500", "e5500")
+ .Case("power3", "pwr3")
+ .Case("power4", "pwr4")
+ .Case("power5", "pwr5")
+ .Case("power5x", "pwr5x")
+ .Case("power6", "pwr6")
+ .Case("power6x", "pwr6x")
+ .Case("power7", "pwr7")
+ .Case("power8", "pwr8")
+ .Case("pwr3", "pwr3")
+ .Case("pwr4", "pwr4")
+ .Case("pwr5", "pwr5")
+ .Case("pwr5x", "pwr5x")
+ .Case("pwr6", "pwr6")
+ .Case("pwr6x", "pwr6x")
+ .Case("pwr7", "pwr7")
+ .Case("pwr8", "pwr8")
+ .Case("powerpc", "ppc")
+ .Case("powerpc64", "ppc64")
+ .Case("powerpc64le", "ppc64le")
+ .Default("");
+ }
+
+ return "";
+}
+
+static void getPPCTargetFeatures(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args,
+ std::vector<const char *> &Features) {
+ handleTargetFeaturesGroup(Args, Features, options::OPT_m_ppc_Features_Group);
+
+ ppc::FloatABI FloatABI = ppc::getPPCFloatABI(D, Args);
+ if (FloatABI == ppc::FloatABI::Soft &&
+ !(Triple.getArch() == llvm::Triple::ppc64 ||
+ Triple.getArch() == llvm::Triple::ppc64le))
+ Features.push_back("+soft-float");
+ else if (FloatABI == ppc::FloatABI::Soft &&
+ (Triple.getArch() == llvm::Triple::ppc64 ||
+ Triple.getArch() == llvm::Triple::ppc64le))
+ D.Diag(diag::err_drv_invalid_mfloat_abi)
+ << "soft float is not supported for ppc64";
+
+ // Altivec is a bit weird, allow overriding of the Altivec feature here.
+ AddTargetFeature(Args, Features, options::OPT_faltivec,
+ options::OPT_fno_altivec, "altivec");
+}
+
+ppc::FloatABI ppc::getPPCFloatABI(const Driver &D, const ArgList &Args) {
+ ppc::FloatABI ABI = ppc::FloatABI::Invalid;
+ if (Arg *A =
+ Args.getLastArg(options::OPT_msoft_float, options::OPT_mhard_float,
+ options::OPT_mfloat_abi_EQ)) {
+ if (A->getOption().matches(options::OPT_msoft_float))
+ ABI = ppc::FloatABI::Soft;
+ else if (A->getOption().matches(options::OPT_mhard_float))
+ ABI = ppc::FloatABI::Hard;
+ else {
+ ABI = llvm::StringSwitch<ppc::FloatABI>(A->getValue())
+ .Case("soft", ppc::FloatABI::Soft)
+ .Case("hard", ppc::FloatABI::Hard)
+ .Default(ppc::FloatABI::Invalid);
+ if (ABI == ppc::FloatABI::Invalid && !StringRef(A->getValue()).empty()) {
+ D.Diag(diag::err_drv_invalid_mfloat_abi) << A->getAsString(Args);
+ ABI = ppc::FloatABI::Hard;
+ }
+ }
+ }
+
+ // If unspecified, choose the default based on the platform.
+ if (ABI == ppc::FloatABI::Invalid) {
+ ABI = ppc::FloatABI::Hard;
+ }
+
+ return ABI;
+}
+
+void Clang::AddPPCTargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ // Select the ABI to use.
+ const char *ABIName = nullptr;
+ if (getToolChain().getTriple().isOSLinux())
+ switch (getToolChain().getArch()) {
+ case llvm::Triple::ppc64: {
+ // When targeting a processor that supports QPX, or if QPX is
+ // specifically enabled, default to using the ABI that supports QPX (so
+ // long as it is not specifically disabled).
+ bool HasQPX = false;
+ if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
+ HasQPX = A->getValue() == StringRef("a2q");
+ HasQPX = Args.hasFlag(options::OPT_mqpx, options::OPT_mno_qpx, HasQPX);
+ if (HasQPX) {
+ ABIName = "elfv1-qpx";
+ break;
+ }
+
+ ABIName = "elfv1";
+ break;
+ }
+ case llvm::Triple::ppc64le:
+ ABIName = "elfv2";
+ break;
+ default:
+ break;
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ))
+ // The ppc64 linux abis are all "altivec" abis by default. Accept and ignore
+ // the option if given as we don't have backend support for any targets
+ // that don't use the altivec abi.
+ if (StringRef(A->getValue()) != "altivec")
+ ABIName = A->getValue();
+
+ ppc::FloatABI FloatABI =
+ ppc::getPPCFloatABI(getToolChain().getDriver(), Args);
+
+ if (FloatABI == ppc::FloatABI::Soft) {
+ // Floating point operations and argument passing are soft.
+ CmdArgs.push_back("-msoft-float");
+ CmdArgs.push_back("-mfloat-abi");
+ CmdArgs.push_back("soft");
+ } else {
+ // Floating point operations and argument passing are hard.
+ assert(FloatABI == ppc::FloatABI::Hard && "Invalid float abi!");
+ CmdArgs.push_back("-mfloat-abi");
+ CmdArgs.push_back("hard");
+ }
+
+ if (ABIName) {
+ CmdArgs.push_back("-target-abi");
+ CmdArgs.push_back(ABIName);
+ }
+}
+
+bool ppc::hasPPCAbiArg(const ArgList &Args, const char *Value) {
+ Arg *A = Args.getLastArg(options::OPT_mabi_EQ);
+ return A && (A->getValue() == StringRef(Value));
+}
+
+/// Get the (LLVM) name of the R600 gpu we are targeting.
+static std::string getR600TargetGPU(const ArgList &Args) {
+ if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
+ const char *GPUName = A->getValue();
+ return llvm::StringSwitch<const char *>(GPUName)
+ .Cases("rv630", "rv635", "r600")
+ .Cases("rv610", "rv620", "rs780", "rs880")
+ .Case("rv740", "rv770")
+ .Case("palm", "cedar")
+ .Cases("sumo", "sumo2", "sumo")
+ .Case("hemlock", "cypress")
+ .Case("aruba", "cayman")
+ .Default(GPUName);
+ }
+ return "";
+}
+
+void Clang::AddSparcTargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ const Driver &D = getToolChain().getDriver();
+ std::string Triple = getToolChain().ComputeEffectiveClangTriple(Args);
+
+ bool SoftFloatABI = false;
+ if (Arg *A =
+ Args.getLastArg(options::OPT_msoft_float, options::OPT_mhard_float)) {
+ if (A->getOption().matches(options::OPT_msoft_float))
+ SoftFloatABI = true;
+ }
+
+ // Only the hard-float ABI on Sparc is standardized, and it is the
+ // default. GCC also supports a nonstandard soft-float ABI mode, and
+ // perhaps LLVM should implement that, too. However, since llvm
+ // currently does not support Sparc soft-float, at all, display an
+ // error if it's requested.
+ if (SoftFloatABI) {
+ D.Diag(diag::err_drv_unsupported_opt_for_target) << "-msoft-float"
+ << Triple;
+ }
+}
+
+static const char *getSystemZTargetCPU(const ArgList &Args) {
+ if (const Arg *A = Args.getLastArg(options::OPT_march_EQ))
+ return A->getValue();
+ return "z10";
+}
+
+static void getSystemZTargetFeatures(const ArgList &Args,
+ std::vector<const char *> &Features) {
+ // -m(no-)htm overrides use of the transactional-execution facility.
+ if (Arg *A = Args.getLastArg(options::OPT_mhtm, options::OPT_mno_htm)) {
+ if (A->getOption().matches(options::OPT_mhtm))
+ Features.push_back("+transactional-execution");
+ else
+ Features.push_back("-transactional-execution");
+ }
+ // -m(no-)vx overrides use of the vector facility.
+ if (Arg *A = Args.getLastArg(options::OPT_mvx, options::OPT_mno_vx)) {
+ if (A->getOption().matches(options::OPT_mvx))
+ Features.push_back("+vector");
+ else
+ Features.push_back("-vector");
+ }
+}
+
+static const char *getX86TargetCPU(const ArgList &Args,
+ const llvm::Triple &Triple) {
+ if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
+ if (StringRef(A->getValue()) != "native") {
+ if (Triple.isOSDarwin() && Triple.getArchName() == "x86_64h")
+ return "core-avx2";
+
+ return A->getValue();
+ }
+
+ // FIXME: Reject attempts to use -march=native unless the target matches
+ // the host.
+ //
+ // FIXME: We should also incorporate the detected target features for use
+ // with -native.
+ std::string CPU = llvm::sys::getHostCPUName();
+ if (!CPU.empty() && CPU != "generic")
+ return Args.MakeArgString(CPU);
+ }
+
+ if (const Arg *A = Args.getLastArg(options::OPT__SLASH_arch)) {
+ // Mapping built by referring to X86TargetInfo::getDefaultFeatures().
+ StringRef Arch = A->getValue();
+ const char *CPU;
+ if (Triple.getArch() == llvm::Triple::x86) {
+ CPU = llvm::StringSwitch<const char *>(Arch)
+ .Case("IA32", "i386")
+ .Case("SSE", "pentium3")
+ .Case("SSE2", "pentium4")
+ .Case("AVX", "sandybridge")
+ .Case("AVX2", "haswell")
+ .Default(nullptr);
+ } else {
+ CPU = llvm::StringSwitch<const char *>(Arch)
+ .Case("AVX", "sandybridge")
+ .Case("AVX2", "haswell")
+ .Default(nullptr);
+ }
+ if (CPU)
+ return CPU;
+ }
+
+ // Select the default CPU if none was given (or detection failed).
+
+ if (Triple.getArch() != llvm::Triple::x86_64 &&
+ Triple.getArch() != llvm::Triple::x86)
+ return nullptr; // This routine is only handling x86 targets.
+
+ bool Is64Bit = Triple.getArch() == llvm::Triple::x86_64;
+
+ // FIXME: Need target hooks.
+ if (Triple.isOSDarwin()) {
+ if (Triple.getArchName() == "x86_64h")
+ return "core-avx2";
+ return Is64Bit ? "core2" : "yonah";
+ }
+
+ // Set up default CPU name for PS4 compilers.
+ if (Triple.isPS4CPU())
+ return "btver2";
+
+ // On Android use targets compatible with gcc
+ if (Triple.isAndroid())
+ return Is64Bit ? "x86-64" : "i686";
+
+ // Everything else goes to x86-64 in 64-bit mode.
+ if (Is64Bit)
+ return "x86-64";
+
+ switch (Triple.getOS()) {
+ case llvm::Triple::FreeBSD:
+ case llvm::Triple::NetBSD:
+ case llvm::Triple::OpenBSD:
+ return "i486";
+ case llvm::Triple::Haiku:
+ return "i586";
+ case llvm::Triple::Bitrig:
+ return "i686";
+ default:
+ // Fallback to p4.
+ return "pentium4";
+ }
+}
+
+/// Get the (LLVM) name of the WebAssembly cpu we are targeting.
+static StringRef getWebAssemblyTargetCPU(const ArgList &Args) {
+ // If we have -mcpu=, use that.
+ if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
+ StringRef CPU = A->getValue();
+
+#ifdef __wasm__
+ // Handle "native" by examining the host. "native" isn't meaningful when
+ // cross compiling, so only support this when the host is also WebAssembly.
+ if (CPU == "native")
+ return llvm::sys::getHostCPUName();
+#endif
+
+ return CPU;
+ }
+
+ return "generic";
+}
+
+static std::string getCPUName(const ArgList &Args, const llvm::Triple &T,
+ bool FromAs = false) {
+ switch (T.getArch()) {
+ default:
+ return "";
+
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_be:
+ return getAArch64TargetCPU(Args);
+
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb: {
+ StringRef MArch, MCPU;
+ getARMArchCPUFromArgs(Args, MArch, MCPU, FromAs);
+ return arm::getARMTargetCPU(MCPU, MArch, T);
+ }
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el: {
+ StringRef CPUName;
+ StringRef ABIName;
+ mips::getMipsCPUAndABI(Args, T, CPUName, ABIName);
+ return CPUName;
+ }
+
+ case llvm::Triple::nvptx:
+ case llvm::Triple::nvptx64:
+ if (const Arg *A = Args.getLastArg(options::OPT_march_EQ))
+ return A->getValue();
+ return "";
+
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le: {
+ std::string TargetCPUName = getPPCTargetCPU(Args);
+ // LLVM may default to generating code for the native CPU,
+ // but, like gcc, we default to a more generic option for
+ // each architecture. (except on Darwin)
+ if (TargetCPUName.empty() && !T.isOSDarwin()) {
+ if (T.getArch() == llvm::Triple::ppc64)
+ TargetCPUName = "ppc64";
+ else if (T.getArch() == llvm::Triple::ppc64le)
+ TargetCPUName = "ppc64le";
+ else
+ TargetCPUName = "ppc";
+ }
+ return TargetCPUName;
+ }
+
+ case llvm::Triple::sparc:
+ case llvm::Triple::sparcel:
+ case llvm::Triple::sparcv9:
+ if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
+ return A->getValue();
+ return "";
+
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ return getX86TargetCPU(Args, T);
+
+ case llvm::Triple::hexagon:
+ return "hexagon" +
+ toolchains::HexagonToolChain::GetTargetCPUVersion(Args).str();
+
+ case llvm::Triple::systemz:
+ return getSystemZTargetCPU(Args);
+
+ case llvm::Triple::r600:
+ case llvm::Triple::amdgcn:
+ return getR600TargetGPU(Args);
+
+ case llvm::Triple::wasm32:
+ case llvm::Triple::wasm64:
+ return getWebAssemblyTargetCPU(Args);
+ }
+}
+
+static void AddGoldPlugin(const ToolChain &ToolChain, const ArgList &Args,
+ ArgStringList &CmdArgs, bool IsThinLTO) {
+ // Tell the linker to load the plugin. This has to come before AddLinkerInputs
+ // as gold requires -plugin to come before any -plugin-opt that -Wl might
+ // forward.
+ CmdArgs.push_back("-plugin");
+ std::string Plugin =
+ ToolChain.getDriver().Dir + "/../lib" CLANG_LIBDIR_SUFFIX "/LLVMgold.so";
+ CmdArgs.push_back(Args.MakeArgString(Plugin));
+
+ // Try to pass driver level flags relevant to LTO code generation down to
+ // the plugin.
+
+ // Handle flags for selecting CPU variants.
+ std::string CPU = getCPUName(Args, ToolChain.getTriple());
+ if (!CPU.empty())
+ CmdArgs.push_back(Args.MakeArgString(Twine("-plugin-opt=mcpu=") + CPU));
+
+ if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
+ StringRef OOpt;
+ if (A->getOption().matches(options::OPT_O4) ||
+ A->getOption().matches(options::OPT_Ofast))
+ OOpt = "3";
+ else if (A->getOption().matches(options::OPT_O))
+ OOpt = A->getValue();
+ else if (A->getOption().matches(options::OPT_O0))
+ OOpt = "0";
+ if (!OOpt.empty())
+ CmdArgs.push_back(Args.MakeArgString(Twine("-plugin-opt=O") + OOpt));
+ }
+
+ if (IsThinLTO)
+ CmdArgs.push_back("-plugin-opt=thinlto");
+}
+
+/// This is a helper function for validating the optional refinement step
+/// parameter in reciprocal argument strings. Return false if there is an error
+/// parsing the refinement step. Otherwise, return true and set the Position
+/// of the refinement step in the input string.
+static bool getRefinementStep(StringRef In, const Driver &D,
+ const Arg &A, size_t &Position) {
+ const char RefinementStepToken = ':';
+ Position = In.find(RefinementStepToken);
+ if (Position != StringRef::npos) {
+ StringRef Option = A.getOption().getName();
+ StringRef RefStep = In.substr(Position + 1);
+ // Allow exactly one numeric character for the additional refinement
+ // step parameter. This is reasonable for all currently-supported
+ // operations and architectures because we would expect that a larger value
+ // of refinement steps would cause the estimate "optimization" to
+ // under-perform the native operation. Also, if the estimate does not
+ // converge quickly, it probably will not ever converge, so further
+ // refinement steps will not produce a better answer.
+ if (RefStep.size() != 1) {
+ D.Diag(diag::err_drv_invalid_value) << Option << RefStep;
+ return false;
+ }
+ char RefStepChar = RefStep[0];
+ if (RefStepChar < '0' || RefStepChar > '9') {
+ D.Diag(diag::err_drv_invalid_value) << Option << RefStep;
+ return false;
+ }
+ }
+ return true;
+}
+
+/// The -mrecip flag requires processing of many optional parameters.
+static void ParseMRecip(const Driver &D, const ArgList &Args,
+ ArgStringList &OutStrings) {
+ StringRef DisabledPrefixIn = "!";
+ StringRef DisabledPrefixOut = "!";
+ StringRef EnabledPrefixOut = "";
+ StringRef Out = "-mrecip=";
+
+ Arg *A = Args.getLastArg(options::OPT_mrecip, options::OPT_mrecip_EQ);
+ if (!A)
+ return;
+
+ unsigned NumOptions = A->getNumValues();
+ if (NumOptions == 0) {
+ // No option is the same as "all".
+ OutStrings.push_back(Args.MakeArgString(Out + "all"));
+ return;
+ }
+
+ // Pass through "all", "none", or "default" with an optional refinement step.
+ if (NumOptions == 1) {
+ StringRef Val = A->getValue(0);
+ size_t RefStepLoc;
+ if (!getRefinementStep(Val, D, *A, RefStepLoc))
+ return;
+ StringRef ValBase = Val.slice(0, RefStepLoc);
+ if (ValBase == "all" || ValBase == "none" || ValBase == "default") {
+ OutStrings.push_back(Args.MakeArgString(Out + Val));
+ return;
+ }
+ }
+
+ // Each reciprocal type may be enabled or disabled individually.
+ // Check each input value for validity, concatenate them all back together,
+ // and pass through.
+
+ llvm::StringMap<bool> OptionStrings;
+ OptionStrings.insert(std::make_pair("divd", false));
+ OptionStrings.insert(std::make_pair("divf", false));
+ OptionStrings.insert(std::make_pair("vec-divd", false));
+ OptionStrings.insert(std::make_pair("vec-divf", false));
+ OptionStrings.insert(std::make_pair("sqrtd", false));
+ OptionStrings.insert(std::make_pair("sqrtf", false));
+ OptionStrings.insert(std::make_pair("vec-sqrtd", false));
+ OptionStrings.insert(std::make_pair("vec-sqrtf", false));
+
+ for (unsigned i = 0; i != NumOptions; ++i) {
+ StringRef Val = A->getValue(i);
+
+ bool IsDisabled = Val.startswith(DisabledPrefixIn);
+ // Ignore the disablement token for string matching.
+ if (IsDisabled)
+ Val = Val.substr(1);
+
+ size_t RefStep;
+ if (!getRefinementStep(Val, D, *A, RefStep))
+ return;
+
+ StringRef ValBase = Val.slice(0, RefStep);
+ llvm::StringMap<bool>::iterator OptionIter = OptionStrings.find(ValBase);
+ if (OptionIter == OptionStrings.end()) {
+ // Try again specifying float suffix.
+ OptionIter = OptionStrings.find(ValBase.str() + 'f');
+ if (OptionIter == OptionStrings.end()) {
+ // The input name did not match any known option string.
+ D.Diag(diag::err_drv_unknown_argument) << Val;
+ return;
+ }
+ // The option was specified without a float or double suffix.
+ // Make sure that the double entry was not already specified.
+ // The float entry will be checked below.
+ if (OptionStrings[ValBase.str() + 'd']) {
+ D.Diag(diag::err_drv_invalid_value) << A->getOption().getName() << Val;
+ return;
+ }
+ }
+
+ if (OptionIter->second == true) {
+ // Duplicate option specified.
+ D.Diag(diag::err_drv_invalid_value) << A->getOption().getName() << Val;
+ return;
+ }
+
+ // Mark the matched option as found. Do not allow duplicate specifiers.
+ OptionIter->second = true;
+
+ // If the precision was not specified, also mark the double entry as found.
+ if (ValBase.back() != 'f' && ValBase.back() != 'd')
+ OptionStrings[ValBase.str() + 'd'] = true;
+
+ // Build the output string.
+ StringRef Prefix = IsDisabled ? DisabledPrefixOut : EnabledPrefixOut;
+ Out = Args.MakeArgString(Out + Prefix + Val);
+ if (i != NumOptions - 1)
+ Out = Args.MakeArgString(Out + ",");
+ }
+
+ OutStrings.push_back(Args.MakeArgString(Out));
+}
+
+static void getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args,
+ std::vector<const char *> &Features) {
+ // If -march=native, autodetect the feature list.
+ if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
+ if (StringRef(A->getValue()) == "native") {
+ llvm::StringMap<bool> HostFeatures;
+ if (llvm::sys::getHostCPUFeatures(HostFeatures))
+ for (auto &F : HostFeatures)
+ Features.push_back(
+ Args.MakeArgString((F.second ? "+" : "-") + F.first()));
+ }
+ }
+
+ if (Triple.getArchName() == "x86_64h") {
+ // x86_64h implies quite a few of the more modern subtarget features
+ // for Haswell class CPUs, but not all of them. Opt-out of a few.
+ Features.push_back("-rdrnd");
+ Features.push_back("-aes");
+ Features.push_back("-pclmul");
+ Features.push_back("-rtm");
+ Features.push_back("-hle");
+ Features.push_back("-fsgsbase");
+ }
+
+ const llvm::Triple::ArchType ArchType = Triple.getArch();
+ // Add features to be compatible with gcc for Android.
+ if (Triple.isAndroid()) {
+ if (ArchType == llvm::Triple::x86_64) {
+ Features.push_back("+sse4.2");
+ Features.push_back("+popcnt");
+ } else
+ Features.push_back("+ssse3");
+ }
+
+ // Set features according to the -arch flag on MSVC.
+ if (Arg *A = Args.getLastArg(options::OPT__SLASH_arch)) {
+ StringRef Arch = A->getValue();
+ bool ArchUsed = false;
+ // First, look for flags that are shared in x86 and x86-64.
+ if (ArchType == llvm::Triple::x86_64 || ArchType == llvm::Triple::x86) {
+ if (Arch == "AVX" || Arch == "AVX2") {
+ ArchUsed = true;
+ Features.push_back(Args.MakeArgString("+" + Arch.lower()));
+ }
+ }
+ // Then, look for x86-specific flags.
+ if (ArchType == llvm::Triple::x86) {
+ if (Arch == "IA32") {
+ ArchUsed = true;
+ } else if (Arch == "SSE" || Arch == "SSE2") {
+ ArchUsed = true;
+ Features.push_back(Args.MakeArgString("+" + Arch.lower()));
+ }
+ }
+ if (!ArchUsed)
+ D.Diag(clang::diag::warn_drv_unused_argument) << A->getAsString(Args);
+ }
+
+ // Now add any that the user explicitly requested on the command line,
+ // which may override the defaults.
+ handleTargetFeaturesGroup(Args, Features, options::OPT_m_x86_Features_Group);
+}
+
+void Clang::AddX86TargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ if (!Args.hasFlag(options::OPT_mred_zone, options::OPT_mno_red_zone, true) ||
+ Args.hasArg(options::OPT_mkernel) ||
+ Args.hasArg(options::OPT_fapple_kext))
+ CmdArgs.push_back("-disable-red-zone");
+
+ // Default to avoid implicit floating-point for kernel/kext code, but allow
+ // that to be overridden with -mno-soft-float.
+ bool NoImplicitFloat = (Args.hasArg(options::OPT_mkernel) ||
+ Args.hasArg(options::OPT_fapple_kext));
+ if (Arg *A = Args.getLastArg(
+ options::OPT_msoft_float, options::OPT_mno_soft_float,
+ options::OPT_mimplicit_float, options::OPT_mno_implicit_float)) {
+ const Option &O = A->getOption();
+ NoImplicitFloat = (O.matches(options::OPT_mno_implicit_float) ||
+ O.matches(options::OPT_msoft_float));
+ }
+ if (NoImplicitFloat)
+ CmdArgs.push_back("-no-implicit-float");
+
+ if (Arg *A = Args.getLastArg(options::OPT_masm_EQ)) {
+ StringRef Value = A->getValue();
+ if (Value == "intel" || Value == "att") {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back(Args.MakeArgString("-x86-asm-syntax=" + Value));
+ } else {
+ getToolChain().getDriver().Diag(diag::err_drv_unsupported_option_argument)
+ << A->getOption().getName() << Value;
+ }
+ }
+}
+
+void Clang::AddHexagonTargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ CmdArgs.push_back("-mqdsp6-compat");
+ CmdArgs.push_back("-Wreturn-type");
+
+ if (auto G = toolchains::HexagonToolChain::getSmallDataThreshold(Args)) {
+ std::string N = llvm::utostr(G.getValue());
+ std::string Opt = std::string("-hexagon-small-data-threshold=") + N;
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back(Args.MakeArgString(Opt));
+ }
+
+ if (!Args.hasArg(options::OPT_fno_short_enums))
+ CmdArgs.push_back("-fshort-enums");
+ if (Args.getLastArg(options::OPT_mieee_rnd_near)) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-enable-hexagon-ieee-rnd-near");
+ }
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-machine-sink-split=0");
+}
+
+// Decode AArch64 features from string like +[no]featureA+[no]featureB+...
+static bool DecodeAArch64Features(const Driver &D, StringRef text,
+ std::vector<const char *> &Features) {
+ SmallVector<StringRef, 8> Split;
+ text.split(Split, StringRef("+"), -1, false);
+
+ for (StringRef Feature : Split) {
+ const char *result = llvm::StringSwitch<const char *>(Feature)
+ .Case("fp", "+fp-armv8")
+ .Case("simd", "+neon")
+ .Case("crc", "+crc")
+ .Case("crypto", "+crypto")
+ .Case("fp16", "+fullfp16")
+ .Case("profile", "+spe")
+ .Case("nofp", "-fp-armv8")
+ .Case("nosimd", "-neon")
+ .Case("nocrc", "-crc")
+ .Case("nocrypto", "-crypto")
+ .Case("nofp16", "-fullfp16")
+ .Case("noprofile", "-spe")
+ .Default(nullptr);
+ if (result)
+ Features.push_back(result);
+ else if (Feature == "neon" || Feature == "noneon")
+ D.Diag(diag::err_drv_no_neon_modifier);
+ else
+ return false;
+ }
+ return true;
+}
+
+// Check if the CPU name and feature modifiers in -mcpu are legal. If yes,
+// decode CPU and feature.
+static bool DecodeAArch64Mcpu(const Driver &D, StringRef Mcpu, StringRef &CPU,
+ std::vector<const char *> &Features) {
+ std::pair<StringRef, StringRef> Split = Mcpu.split("+");
+ CPU = Split.first;
+ if (CPU == "cyclone" || CPU == "cortex-a53" || CPU == "cortex-a57" ||
+ CPU == "cortex-a72" || CPU == "cortex-a35" || CPU == "exynos-m1") {
+ Features.push_back("+neon");
+ Features.push_back("+crc");
+ Features.push_back("+crypto");
+ } else if (CPU == "generic") {
+ Features.push_back("+neon");
+ } else {
+ return false;
+ }
+
+ if (Split.second.size() && !DecodeAArch64Features(D, Split.second, Features))
+ return false;
+
+ return true;
+}
+
+static bool
+getAArch64ArchFeaturesFromMarch(const Driver &D, StringRef March,
+ const ArgList &Args,
+ std::vector<const char *> &Features) {
+ std::string MarchLowerCase = March.lower();
+ std::pair<StringRef, StringRef> Split = StringRef(MarchLowerCase).split("+");
+
+ if (Split.first == "armv8-a" || Split.first == "armv8a") {
+ // ok, no additional features.
+ } else if (Split.first == "armv8.1-a" || Split.first == "armv8.1a") {
+ Features.push_back("+v8.1a");
+ } else if (Split.first == "armv8.2-a" || Split.first == "armv8.2a" ) {
+ Features.push_back("+v8.2a");
+ } else {
+ return false;
+ }
+
+ if (Split.second.size() && !DecodeAArch64Features(D, Split.second, Features))
+ return false;
+
+ return true;
+}
+
+static bool
+getAArch64ArchFeaturesFromMcpu(const Driver &D, StringRef Mcpu,
+ const ArgList &Args,
+ std::vector<const char *> &Features) {
+ StringRef CPU;
+ std::string McpuLowerCase = Mcpu.lower();
+ if (!DecodeAArch64Mcpu(D, McpuLowerCase, CPU, Features))
+ return false;
+
+ return true;
+}
+
+static bool
+getAArch64MicroArchFeaturesFromMtune(const Driver &D, StringRef Mtune,
+ const ArgList &Args,
+ std::vector<const char *> &Features) {
+ std::string MtuneLowerCase = Mtune.lower();
+ // Handle CPU name is 'native'.
+ if (MtuneLowerCase == "native")
+ MtuneLowerCase = llvm::sys::getHostCPUName();
+ if (MtuneLowerCase == "cyclone") {
+ Features.push_back("+zcm");
+ Features.push_back("+zcz");
+ }
+ return true;
+}
+
+static bool
+getAArch64MicroArchFeaturesFromMcpu(const Driver &D, StringRef Mcpu,
+ const ArgList &Args,
+ std::vector<const char *> &Features) {
+ StringRef CPU;
+ std::vector<const char *> DecodedFeature;
+ std::string McpuLowerCase = Mcpu.lower();
+ if (!DecodeAArch64Mcpu(D, McpuLowerCase, CPU, DecodedFeature))
+ return false;
+
+ return getAArch64MicroArchFeaturesFromMtune(D, CPU, Args, Features);
+}
+
+static void getAArch64TargetFeatures(const Driver &D, const ArgList &Args,
+ std::vector<const char *> &Features) {
+ Arg *A;
+ bool success = true;
+ // Enable NEON by default.
+ Features.push_back("+neon");
+ if ((A = Args.getLastArg(options::OPT_march_EQ)))
+ success = getAArch64ArchFeaturesFromMarch(D, A->getValue(), Args, Features);
+ else if ((A = Args.getLastArg(options::OPT_mcpu_EQ)))
+ success = getAArch64ArchFeaturesFromMcpu(D, A->getValue(), Args, Features);
+ else if (Args.hasArg(options::OPT_arch))
+ success = getAArch64ArchFeaturesFromMcpu(D, getAArch64TargetCPU(Args), Args,
+ Features);
+
+ if (success && (A = Args.getLastArg(options::OPT_mtune_EQ)))
+ success =
+ getAArch64MicroArchFeaturesFromMtune(D, A->getValue(), Args, Features);
+ else if (success && (A = Args.getLastArg(options::OPT_mcpu_EQ)))
+ success =
+ getAArch64MicroArchFeaturesFromMcpu(D, A->getValue(), Args, Features);
+ else if (Args.hasArg(options::OPT_arch))
+ success = getAArch64MicroArchFeaturesFromMcpu(D, getAArch64TargetCPU(Args),
+ Args, Features);
+
+ if (!success)
+ D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
+
+ if (Args.getLastArg(options::OPT_mgeneral_regs_only)) {
+ Features.push_back("-fp-armv8");
+ Features.push_back("-crypto");
+ Features.push_back("-neon");
+ }
+
+ // En/disable crc
+ if (Arg *A = Args.getLastArg(options::OPT_mcrc, options::OPT_mnocrc)) {
+ if (A->getOption().matches(options::OPT_mcrc))
+ Features.push_back("+crc");
+ else
+ Features.push_back("-crc");
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access,
+ options::OPT_munaligned_access))
+ if (A->getOption().matches(options::OPT_mno_unaligned_access))
+ Features.push_back("+strict-align");
+
+ if (Args.hasArg(options::OPT_ffixed_x18))
+ Features.push_back("+reserve-x18");
+}
+
+static void getHexagonTargetFeatures(const ArgList &Args,
+ std::vector<const char *> &Features) {
+ bool HasHVX = false, HasHVXD = false;
+
+ // FIXME: This should be able to use handleTargetFeaturesGroup except it is
+ // doing dependent option handling here rather than in initFeatureMap or a
+ // similar handler.
+ for (auto &A : Args) {
+ auto &Opt = A->getOption();
+ if (Opt.matches(options::OPT_mhexagon_hvx))
+ HasHVX = true;
+ else if (Opt.matches(options::OPT_mno_hexagon_hvx))
+ HasHVXD = HasHVX = false;
+ else if (Opt.matches(options::OPT_mhexagon_hvx_double))
+ HasHVXD = HasHVX = true;
+ else if (Opt.matches(options::OPT_mno_hexagon_hvx_double))
+ HasHVXD = false;
+ else
+ continue;
+ A->claim();
+ }
+
+ Features.push_back(HasHVX ? "+hvx" : "-hvx");
+ Features.push_back(HasHVXD ? "+hvx-double" : "-hvx-double");
+}
+
+static void getWebAssemblyTargetFeatures(const ArgList &Args,
+ std::vector<const char *> &Features) {
+ handleTargetFeaturesGroup(Args, Features, options::OPT_m_wasm_Features_Group);
+}
+
+static void getTargetFeatures(const ToolChain &TC, const llvm::Triple &Triple,
+ const ArgList &Args, ArgStringList &CmdArgs,
+ bool ForAS) {
+ const Driver &D = TC.getDriver();
+ std::vector<const char *> Features;
+ switch (Triple.getArch()) {
+ default:
+ break;
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ getMIPSTargetFeatures(D, Triple, Args, Features);
+ break;
+
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ getARMTargetFeatures(TC, Triple, Args, Features, ForAS);
+ break;
+
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
+ getPPCTargetFeatures(D, Triple, Args, Features);
+ break;
+ case llvm::Triple::systemz:
+ getSystemZTargetFeatures(Args, Features);
+ break;
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_be:
+ getAArch64TargetFeatures(D, Args, Features);
+ break;
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ getX86TargetFeatures(D, Triple, Args, Features);
+ break;
+ case llvm::Triple::hexagon:
+ getHexagonTargetFeatures(Args, Features);
+ break;
+ case llvm::Triple::wasm32:
+ case llvm::Triple::wasm64:
+ getWebAssemblyTargetFeatures(Args, Features);
+ break;
+ }
+
+ // Find the last of each feature.
+ llvm::StringMap<unsigned> LastOpt;
+ for (unsigned I = 0, N = Features.size(); I < N; ++I) {
+ const char *Name = Features[I];
+ assert(Name[0] == '-' || Name[0] == '+');
+ LastOpt[Name + 1] = I;
+ }
+
+ for (unsigned I = 0, N = Features.size(); I < N; ++I) {
+ // If this feature was overridden, ignore it.
+ const char *Name = Features[I];
+ llvm::StringMap<unsigned>::iterator LastI = LastOpt.find(Name + 1);
+ assert(LastI != LastOpt.end());
+ unsigned Last = LastI->second;
+ if (Last != I)
+ continue;
+
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back(Name);
+ }
+}
+
+static bool
+shouldUseExceptionTablesForObjCExceptions(const ObjCRuntime &runtime,
+ const llvm::Triple &Triple) {
+ // We use the zero-cost exception tables for Objective-C if the non-fragile
+ // ABI is enabled or when compiling for x86_64 and ARM on Snow Leopard and
+ // later.
+ if (runtime.isNonFragile())
+ return true;
+
+ if (!Triple.isMacOSX())
+ return false;
+
+ return (!Triple.isMacOSXVersionLT(10, 5) &&
+ (Triple.getArch() == llvm::Triple::x86_64 ||
+ Triple.getArch() == llvm::Triple::arm));
+}
+
+/// Adds exception related arguments to the driver command arguments. There's a
+/// master flag, -fexceptions and also language specific flags to enable/disable
+/// C++ and Objective-C exceptions. This makes it possible to for example
+/// disable C++ exceptions but enable Objective-C exceptions.
+static void addExceptionArgs(const ArgList &Args, types::ID InputType,
+ const ToolChain &TC, bool KernelOrKext,
+ const ObjCRuntime &objcRuntime,
+ ArgStringList &CmdArgs) {
+ const Driver &D = TC.getDriver();
+ const llvm::Triple &Triple = TC.getTriple();
+
+ if (KernelOrKext) {
+ // -mkernel and -fapple-kext imply no exceptions, so claim exception related
+ // arguments now to avoid warnings about unused arguments.
+ Args.ClaimAllArgs(options::OPT_fexceptions);
+ Args.ClaimAllArgs(options::OPT_fno_exceptions);
+ Args.ClaimAllArgs(options::OPT_fobjc_exceptions);
+ Args.ClaimAllArgs(options::OPT_fno_objc_exceptions);
+ Args.ClaimAllArgs(options::OPT_fcxx_exceptions);
+ Args.ClaimAllArgs(options::OPT_fno_cxx_exceptions);
+ return;
+ }
+
+ // See if the user explicitly enabled exceptions.
+ bool EH = Args.hasFlag(options::OPT_fexceptions, options::OPT_fno_exceptions,
+ false);
+
+ // Obj-C exceptions are enabled by default, regardless of -fexceptions. This
+ // is not necessarily sensible, but follows GCC.
+ if (types::isObjC(InputType) &&
+ Args.hasFlag(options::OPT_fobjc_exceptions,
+ options::OPT_fno_objc_exceptions, true)) {
+ CmdArgs.push_back("-fobjc-exceptions");
+
+ EH |= shouldUseExceptionTablesForObjCExceptions(objcRuntime, Triple);
+ }
+
+ if (types::isCXX(InputType)) {
+ // Disable C++ EH by default on XCore, PS4, and MSVC.
+ // FIXME: Remove MSVC from this list once things work.
+ bool CXXExceptionsEnabled = Triple.getArch() != llvm::Triple::xcore &&
+ !Triple.isPS4CPU() &&
+ !Triple.isWindowsMSVCEnvironment();
+ Arg *ExceptionArg = Args.getLastArg(
+ options::OPT_fcxx_exceptions, options::OPT_fno_cxx_exceptions,
+ options::OPT_fexceptions, options::OPT_fno_exceptions);
+ if (ExceptionArg)
+ CXXExceptionsEnabled =
+ ExceptionArg->getOption().matches(options::OPT_fcxx_exceptions) ||
+ ExceptionArg->getOption().matches(options::OPT_fexceptions);
+
+ if (CXXExceptionsEnabled) {
+ if (Triple.isPS4CPU()) {
+ ToolChain::RTTIMode RTTIMode = TC.getRTTIMode();
+ assert(ExceptionArg &&
+ "On the PS4 exceptions should only be enabled if passing "
+ "an argument");
+ if (RTTIMode == ToolChain::RM_DisabledExplicitly) {
+ const Arg *RTTIArg = TC.getRTTIArg();
+ assert(RTTIArg && "RTTI disabled explicitly but no RTTIArg!");
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << RTTIArg->getAsString(Args) << ExceptionArg->getAsString(Args);
+ } else if (RTTIMode == ToolChain::RM_EnabledImplicitly)
+ D.Diag(diag::warn_drv_enabling_rtti_with_exceptions);
+ } else
+ assert(TC.getRTTIMode() != ToolChain::RM_DisabledImplicitly);
+
+ CmdArgs.push_back("-fcxx-exceptions");
+
+ EH = true;
+ }
+ }
+
+ if (EH)
+ CmdArgs.push_back("-fexceptions");
+}
+
+static bool ShouldDisableAutolink(const ArgList &Args, const ToolChain &TC) {
+ bool Default = true;
+ if (TC.getTriple().isOSDarwin()) {
+ // The native darwin assembler doesn't support the linker_option directives,
+ // so we disable them if we think the .s file will be passed to it.
+ Default = TC.useIntegratedAs();
+ }
+ return !Args.hasFlag(options::OPT_fautolink, options::OPT_fno_autolink,
+ Default);
+}
+
+static bool ShouldDisableDwarfDirectory(const ArgList &Args,
+ const ToolChain &TC) {
+ bool UseDwarfDirectory =
+ Args.hasFlag(options::OPT_fdwarf_directory_asm,
+ options::OPT_fno_dwarf_directory_asm, TC.useIntegratedAs());
+ return !UseDwarfDirectory;
+}
+
+/// \brief Check whether the given input tree contains any compilation actions.
+static bool ContainsCompileAction(const Action *A) {
+ if (isa<CompileJobAction>(A) || isa<BackendJobAction>(A))
+ return true;
+
+ for (const auto &Act : *A)
+ if (ContainsCompileAction(Act))
+ return true;
+
+ return false;
+}
+
+/// \brief Check if -relax-all should be passed to the internal assembler.
+/// This is done by default when compiling non-assembler source with -O0.
+static bool UseRelaxAll(Compilation &C, const ArgList &Args) {
+ bool RelaxDefault = true;
+
+ if (Arg *A = Args.getLastArg(options::OPT_O_Group))
+ RelaxDefault = A->getOption().matches(options::OPT_O0);
+
+ if (RelaxDefault) {
+ RelaxDefault = false;
+ for (const auto &Act : C.getActions()) {
+ if (ContainsCompileAction(Act)) {
+ RelaxDefault = true;
+ break;
+ }
+ }
+ }
+
+ return Args.hasFlag(options::OPT_mrelax_all, options::OPT_mno_relax_all,
+ RelaxDefault);
+}
+
+// Convert an arg of the form "-gN" or "-ggdbN" or one of their aliases
+// to the corresponding DebugInfoKind.
+static CodeGenOptions::DebugInfoKind DebugLevelToInfoKind(const Arg &A) {
+ assert(A.getOption().matches(options::OPT_gN_Group) &&
+ "Not a -g option that specifies a debug-info level");
+ if (A.getOption().matches(options::OPT_g0) ||
+ A.getOption().matches(options::OPT_ggdb0))
+ return CodeGenOptions::NoDebugInfo;
+ if (A.getOption().matches(options::OPT_gline_tables_only) ||
+ A.getOption().matches(options::OPT_ggdb1))
+ return CodeGenOptions::DebugLineTablesOnly;
+ return CodeGenOptions::LimitedDebugInfo;
+}
+
+// Extract the integer N from a string spelled "-dwarf-N", returning 0
+// on mismatch. The StringRef input (rather than an Arg) allows
+// for use by the "-Xassembler" option parser.
+static unsigned DwarfVersionNum(StringRef ArgValue) {
+ return llvm::StringSwitch<unsigned>(ArgValue)
+ .Case("-gdwarf-2", 2)
+ .Case("-gdwarf-3", 3)
+ .Case("-gdwarf-4", 4)
+ .Case("-gdwarf-5", 5)
+ .Default(0);
+}
+
+static void RenderDebugEnablingArgs(const ArgList &Args, ArgStringList &CmdArgs,
+ CodeGenOptions::DebugInfoKind DebugInfoKind,
+ unsigned DwarfVersion,
+ llvm::DebuggerKind DebuggerTuning) {
+ switch (DebugInfoKind) {
+ case CodeGenOptions::DebugLineTablesOnly:
+ CmdArgs.push_back("-debug-info-kind=line-tables-only");
+ break;
+ case CodeGenOptions::LimitedDebugInfo:
+ CmdArgs.push_back("-debug-info-kind=limited");
+ break;
+ case CodeGenOptions::FullDebugInfo:
+ CmdArgs.push_back("-debug-info-kind=standalone");
+ break;
+ default:
+ break;
+ }
+ if (DwarfVersion > 0)
+ CmdArgs.push_back(
+ Args.MakeArgString("-dwarf-version=" + Twine(DwarfVersion)));
+ switch (DebuggerTuning) {
+ case llvm::DebuggerKind::GDB:
+ CmdArgs.push_back("-debugger-tuning=gdb");
+ break;
+ case llvm::DebuggerKind::LLDB:
+ CmdArgs.push_back("-debugger-tuning=lldb");
+ break;
+ case llvm::DebuggerKind::SCE:
+ CmdArgs.push_back("-debugger-tuning=sce");
+ break;
+ default:
+ break;
+ }
+}
+
+static void CollectArgsForIntegratedAssembler(Compilation &C,
+ const ArgList &Args,
+ ArgStringList &CmdArgs,
+ const Driver &D) {
+ if (UseRelaxAll(C, Args))
+ CmdArgs.push_back("-mrelax-all");
+
+ // Only default to -mincremental-linker-compatible if we think we are
+ // targeting the MSVC linker.
+ bool DefaultIncrementalLinkerCompatible =
+ C.getDefaultToolChain().getTriple().isWindowsMSVCEnvironment();
+ if (Args.hasFlag(options::OPT_mincremental_linker_compatible,
+ options::OPT_mno_incremental_linker_compatible,
+ DefaultIncrementalLinkerCompatible))
+ CmdArgs.push_back("-mincremental-linker-compatible");
+
+ // When passing -I arguments to the assembler we sometimes need to
+ // unconditionally take the next argument. For example, when parsing
+ // '-Wa,-I -Wa,foo' we need to accept the -Wa,foo arg after seeing the
+ // -Wa,-I arg and when parsing '-Wa,-I,foo' we need to accept the 'foo'
+ // arg after parsing the '-I' arg.
+ bool TakeNextArg = false;
+
+ // When using an integrated assembler, translate -Wa, and -Xassembler
+ // options.
+ bool CompressDebugSections = false;
+ for (const Arg *A :
+ Args.filtered(options::OPT_Wa_COMMA, options::OPT_Xassembler)) {
+ A->claim();
+
+ for (StringRef Value : A->getValues()) {
+ if (TakeNextArg) {
+ CmdArgs.push_back(Value.data());
+ TakeNextArg = false;
+ continue;
+ }
+
+ switch (C.getDefaultToolChain().getArch()) {
+ default:
+ break;
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ if (Value == "--trap") {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("+use-tcc-in-div");
+ continue;
+ }
+ if (Value == "--break") {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("-use-tcc-in-div");
+ continue;
+ }
+ if (Value.startswith("-msoft-float")) {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("+soft-float");
+ continue;
+ }
+ if (Value.startswith("-mhard-float")) {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("-soft-float");
+ continue;
+ }
+ break;
+ }
+
+ if (Value == "-force_cpusubtype_ALL") {
+ // Do nothing, this is the default and we don't support anything else.
+ } else if (Value == "-L") {
+ CmdArgs.push_back("-msave-temp-labels");
+ } else if (Value == "--fatal-warnings") {
+ CmdArgs.push_back("-massembler-fatal-warnings");
+ } else if (Value == "--noexecstack") {
+ CmdArgs.push_back("-mnoexecstack");
+ } else if (Value == "-compress-debug-sections" ||
+ Value == "--compress-debug-sections") {
+ CompressDebugSections = true;
+ } else if (Value == "-nocompress-debug-sections" ||
+ Value == "--nocompress-debug-sections") {
+ CompressDebugSections = false;
+ } else if (Value.startswith("-I")) {
+ CmdArgs.push_back(Value.data());
+ // We need to consume the next argument if the current arg is a plain
+ // -I. The next arg will be the include directory.
+ if (Value == "-I")
+ TakeNextArg = true;
+ } else if (Value.startswith("-gdwarf-")) {
+ // "-gdwarf-N" options are not cc1as options.
+ unsigned DwarfVersion = DwarfVersionNum(Value);
+ if (DwarfVersion == 0) { // Send it onward, and let cc1as complain.
+ CmdArgs.push_back(Value.data());
+ } else {
+ RenderDebugEnablingArgs(
+ Args, CmdArgs, CodeGenOptions::LimitedDebugInfo, DwarfVersion,
+ llvm::DebuggerKind::Default);
+ }
+ } else if (Value.startswith("-mcpu") || Value.startswith("-mfpu") ||
+ Value.startswith("-mhwdiv") || Value.startswith("-march")) {
+ // Do nothing, we'll validate it later.
+ } else {
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getOption().getName() << Value;
+ }
+ }
+ }
+ if (CompressDebugSections) {
+ if (llvm::zlib::isAvailable())
+ CmdArgs.push_back("-compress-debug-sections");
+ else
+ D.Diag(diag::warn_debug_compression_unavailable);
+ }
+}
+
+// This adds the static libclang_rt.builtins-arch.a directly to the command line
+// FIXME: Make sure we can also emit shared objects if they're requested
+// and available, check for possible errors, etc.
+static void addClangRT(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ CmdArgs.push_back(TC.getCompilerRTArgString(Args, "builtins"));
+}
+
+namespace {
+enum OpenMPRuntimeKind {
+ /// An unknown OpenMP runtime. We can't generate effective OpenMP code
+ /// without knowing what runtime to target.
+ OMPRT_Unknown,
+
+ /// The LLVM OpenMP runtime. When completed and integrated, this will become
+ /// the default for Clang.
+ OMPRT_OMP,
+
+ /// The GNU OpenMP runtime. Clang doesn't support generating OpenMP code for
+ /// this runtime but can swallow the pragmas, and find and link against the
+ /// runtime library itself.
+ OMPRT_GOMP,
+
+ /// The legacy name for the LLVM OpenMP runtime from when it was the Intel
+ /// OpenMP runtime. We support this mode for users with existing dependencies
+ /// on this runtime library name.
+ OMPRT_IOMP5
+};
+}
+
+/// Compute the desired OpenMP runtime from the flag provided.
+static OpenMPRuntimeKind getOpenMPRuntime(const ToolChain &TC,
+ const ArgList &Args) {
+ StringRef RuntimeName(CLANG_DEFAULT_OPENMP_RUNTIME);
+
+ const Arg *A = Args.getLastArg(options::OPT_fopenmp_EQ);
+ if (A)
+ RuntimeName = A->getValue();
+
+ auto RT = llvm::StringSwitch<OpenMPRuntimeKind>(RuntimeName)
+ .Case("libomp", OMPRT_OMP)
+ .Case("libgomp", OMPRT_GOMP)
+ .Case("libiomp5", OMPRT_IOMP5)
+ .Default(OMPRT_Unknown);
+
+ if (RT == OMPRT_Unknown) {
+ if (A)
+ TC.getDriver().Diag(diag::err_drv_unsupported_option_argument)
+ << A->getOption().getName() << A->getValue();
+ else
+ // FIXME: We could use a nicer diagnostic here.
+ TC.getDriver().Diag(diag::err_drv_unsupported_opt) << "-fopenmp";
+ }
+
+ return RT;
+}
+
+static void addOpenMPRuntime(ArgStringList &CmdArgs, const ToolChain &TC,
+ const ArgList &Args) {
+ if (!Args.hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ,
+ options::OPT_fno_openmp, false))
+ return;
+
+ switch (getOpenMPRuntime(TC, Args)) {
+ case OMPRT_OMP:
+ CmdArgs.push_back("-lomp");
+ break;
+ case OMPRT_GOMP:
+ CmdArgs.push_back("-lgomp");
+ break;
+ case OMPRT_IOMP5:
+ CmdArgs.push_back("-liomp5");
+ break;
+ case OMPRT_Unknown:
+ // Already diagnosed.
+ break;
+ }
+}
+
+static void addSanitizerRuntime(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs, StringRef Sanitizer,
+ bool IsShared) {
+ // Static runtimes must be forced into executable, so we wrap them in
+ // whole-archive.
+ if (!IsShared) CmdArgs.push_back("-whole-archive");
+ CmdArgs.push_back(TC.getCompilerRTArgString(Args, Sanitizer, IsShared));
+ if (!IsShared) CmdArgs.push_back("-no-whole-archive");
+}
+
+// Tries to use a file with the list of dynamic symbols that need to be exported
+// from the runtime library. Returns true if the file was found.
+static bool addSanitizerDynamicList(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs,
+ StringRef Sanitizer) {
+ SmallString<128> SanRT(TC.getCompilerRT(Args, Sanitizer));
+ if (llvm::sys::fs::exists(SanRT + ".syms")) {
+ CmdArgs.push_back(Args.MakeArgString("--dynamic-list=" + SanRT + ".syms"));
+ return true;
+ }
+ return false;
+}
+
+static void linkSanitizerRuntimeDeps(const ToolChain &TC,
+ ArgStringList &CmdArgs) {
+ // Force linking against the system libraries sanitizers depends on
+ // (see PR15823 why this is necessary).
+ CmdArgs.push_back("--no-as-needed");
+ CmdArgs.push_back("-lpthread");
+ CmdArgs.push_back("-lrt");
+ CmdArgs.push_back("-lm");
+ // There's no libdl on FreeBSD.
+ if (TC.getTriple().getOS() != llvm::Triple::FreeBSD)
+ CmdArgs.push_back("-ldl");
+}
+
+static void
+collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
+ SmallVectorImpl<StringRef> &SharedRuntimes,
+ SmallVectorImpl<StringRef> &StaticRuntimes,
+ SmallVectorImpl<StringRef> &HelperStaticRuntimes) {
+ const SanitizerArgs &SanArgs = TC.getSanitizerArgs();
+ // Collect shared runtimes.
+ if (SanArgs.needsAsanRt() && SanArgs.needsSharedAsanRt()) {
+ SharedRuntimes.push_back("asan");
+ }
+
+ // Collect static runtimes.
+ if (Args.hasArg(options::OPT_shared) || TC.getTriple().isAndroid()) {
+ // Don't link static runtimes into DSOs or if compiling for Android.
+ return;
+ }
+ if (SanArgs.needsAsanRt()) {
+ if (SanArgs.needsSharedAsanRt()) {
+ HelperStaticRuntimes.push_back("asan-preinit");
+ } else {
+ StaticRuntimes.push_back("asan");
+ if (SanArgs.linkCXXRuntimes())
+ StaticRuntimes.push_back("asan_cxx");
+ }
+ }
+ if (SanArgs.needsDfsanRt())
+ StaticRuntimes.push_back("dfsan");
+ if (SanArgs.needsLsanRt())
+ StaticRuntimes.push_back("lsan");
+ if (SanArgs.needsMsanRt()) {
+ StaticRuntimes.push_back("msan");
+ if (SanArgs.linkCXXRuntimes())
+ StaticRuntimes.push_back("msan_cxx");
+ }
+ if (SanArgs.needsTsanRt()) {
+ StaticRuntimes.push_back("tsan");
+ if (SanArgs.linkCXXRuntimes())
+ StaticRuntimes.push_back("tsan_cxx");
+ }
+ if (SanArgs.needsUbsanRt()) {
+ StaticRuntimes.push_back("ubsan_standalone");
+ if (SanArgs.linkCXXRuntimes())
+ StaticRuntimes.push_back("ubsan_standalone_cxx");
+ }
+ if (SanArgs.needsSafeStackRt())
+ StaticRuntimes.push_back("safestack");
+ if (SanArgs.needsCfiRt())
+ StaticRuntimes.push_back("cfi");
+ if (SanArgs.needsCfiDiagRt())
+ StaticRuntimes.push_back("cfi_diag");
+}
+
+// Should be called before we add system libraries (C++ ABI, libstdc++/libc++,
+// C runtime, etc). Returns true if sanitizer system deps need to be linked in.
+static bool addSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ SmallVector<StringRef, 4> SharedRuntimes, StaticRuntimes,
+ HelperStaticRuntimes;
+ collectSanitizerRuntimes(TC, Args, SharedRuntimes, StaticRuntimes,
+ HelperStaticRuntimes);
+ for (auto RT : SharedRuntimes)
+ addSanitizerRuntime(TC, Args, CmdArgs, RT, true);
+ for (auto RT : HelperStaticRuntimes)
+ addSanitizerRuntime(TC, Args, CmdArgs, RT, false);
+ bool AddExportDynamic = false;
+ for (auto RT : StaticRuntimes) {
+ addSanitizerRuntime(TC, Args, CmdArgs, RT, false);
+ AddExportDynamic |= !addSanitizerDynamicList(TC, Args, CmdArgs, RT);
+ }
+ // If there is a static runtime with no dynamic list, force all the symbols
+ // to be dynamic to be sure we export sanitizer interface functions.
+ if (AddExportDynamic)
+ CmdArgs.push_back("-export-dynamic");
+ return !StaticRuntimes.empty();
+}
+
+static bool areOptimizationsEnabled(const ArgList &Args) {
+ // Find the last -O arg and see if it is non-zero.
+ if (Arg *A = Args.getLastArg(options::OPT_O_Group))
+ return !A->getOption().matches(options::OPT_O0);
+ // Defaults to -O0.
+ return false;
+}
+
+static bool shouldUseFramePointerForTarget(const ArgList &Args,
+ const llvm::Triple &Triple) {
+ switch (Triple.getArch()) {
+ case llvm::Triple::xcore:
+ case llvm::Triple::wasm32:
+ case llvm::Triple::wasm64:
+ // XCore never wants frame pointers, regardless of OS.
+ // WebAssembly never wants frame pointers.
+ return false;
+ default:
+ break;
+ }
+
+ if (Triple.isOSLinux()) {
+ switch (Triple.getArch()) {
+ // Don't use a frame pointer on linux if optimizing for certain targets.
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::systemz:
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ return !areOptimizationsEnabled(Args);
+ default:
+ return true;
+ }
+ }
+
+ if (Triple.isOSWindows()) {
+ switch (Triple.getArch()) {
+ case llvm::Triple::x86:
+ return !areOptimizationsEnabled(Args);
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ // Windows on ARM builds with FPO disabled to aid fast stack walking
+ return true;
+ default:
+ // All other supported Windows ISAs use xdata unwind information, so frame
+ // pointers are not generally useful.
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool shouldUseFramePointer(const ArgList &Args,
+ const llvm::Triple &Triple) {
+ if (Arg *A = Args.getLastArg(options::OPT_fno_omit_frame_pointer,
+ options::OPT_fomit_frame_pointer))
+ return A->getOption().matches(options::OPT_fno_omit_frame_pointer);
+ if (Args.hasArg(options::OPT_pg))
+ return true;
+
+ return shouldUseFramePointerForTarget(Args, Triple);
+}
+
+static bool shouldUseLeafFramePointer(const ArgList &Args,
+ const llvm::Triple &Triple) {
+ if (Arg *A = Args.getLastArg(options::OPT_mno_omit_leaf_frame_pointer,
+ options::OPT_momit_leaf_frame_pointer))
+ return A->getOption().matches(options::OPT_mno_omit_leaf_frame_pointer);
+ if (Args.hasArg(options::OPT_pg))
+ return true;
+
+ if (Triple.isPS4CPU())
+ return false;
+
+ return shouldUseFramePointerForTarget(Args, Triple);
+}
+
+/// Add a CC1 option to specify the debug compilation directory.
+static void addDebugCompDirArg(const ArgList &Args, ArgStringList &CmdArgs) {
+ SmallString<128> cwd;
+ if (!llvm::sys::fs::current_path(cwd)) {
+ CmdArgs.push_back("-fdebug-compilation-dir");
+ CmdArgs.push_back(Args.MakeArgString(cwd));
+ }
+}
+
+static const char *SplitDebugName(const ArgList &Args, const InputInfo &Input) {
+ Arg *FinalOutput = Args.getLastArg(options::OPT_o);
+ if (FinalOutput && Args.hasArg(options::OPT_c)) {
+ SmallString<128> T(FinalOutput->getValue());
+ llvm::sys::path::replace_extension(T, "dwo");
+ return Args.MakeArgString(T);
+ } else {
+ // Use the compilation dir.
+ SmallString<128> T(
+ Args.getLastArgValue(options::OPT_fdebug_compilation_dir));
+ SmallString<128> F(llvm::sys::path::stem(Input.getBaseInput()));
+ llvm::sys::path::replace_extension(F, "dwo");
+ T += F;
+ return Args.MakeArgString(F);
+ }
+}
+
+static void SplitDebugInfo(const ToolChain &TC, Compilation &C, const Tool &T,
+ const JobAction &JA, const ArgList &Args,
+ const InputInfo &Output, const char *OutFile) {
+ ArgStringList ExtractArgs;
+ ExtractArgs.push_back("--extract-dwo");
+
+ ArgStringList StripArgs;
+ StripArgs.push_back("--strip-dwo");
+
+ // Grabbing the output of the earlier compile step.
+ StripArgs.push_back(Output.getFilename());
+ ExtractArgs.push_back(Output.getFilename());
+ ExtractArgs.push_back(OutFile);
+
+ const char *Exec = Args.MakeArgString(TC.GetProgramPath("objcopy"));
+ InputInfo II(Output.getFilename(), types::TY_Object, Output.getFilename());
+
+ // First extract the dwo sections.
+ C.addCommand(llvm::make_unique<Command>(JA, T, Exec, ExtractArgs, II));
+
+ // Then remove them from the original .o file.
+ C.addCommand(llvm::make_unique<Command>(JA, T, Exec, StripArgs, II));
+}
+
+/// \brief Vectorize at all optimization levels greater than 1 except for -Oz.
+/// For -Oz the loop vectorizer is disable, while the slp vectorizer is enabled.
+static bool shouldEnableVectorizerAtOLevel(const ArgList &Args, bool isSlpVec) {
+ if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
+ if (A->getOption().matches(options::OPT_O4) ||
+ A->getOption().matches(options::OPT_Ofast))
+ return true;
+
+ if (A->getOption().matches(options::OPT_O0))
+ return false;
+
+ assert(A->getOption().matches(options::OPT_O) && "Must have a -O flag");
+
+ // Vectorize -Os.
+ StringRef S(A->getValue());
+ if (S == "s")
+ return true;
+
+ // Don't vectorize -Oz, unless it's the slp vectorizer.
+ if (S == "z")
+ return isSlpVec;
+
+ unsigned OptLevel = 0;
+ if (S.getAsInteger(10, OptLevel))
+ return false;
+
+ return OptLevel > 1;
+ }
+
+ return false;
+}
+
+/// Add -x lang to \p CmdArgs for \p Input.
+static void addDashXForInput(const ArgList &Args, const InputInfo &Input,
+ ArgStringList &CmdArgs) {
+ // When using -verify-pch, we don't want to provide the type
+ // 'precompiled-header' if it was inferred from the file extension
+ if (Args.hasArg(options::OPT_verify_pch) && Input.getType() == types::TY_PCH)
+ return;
+
+ CmdArgs.push_back("-x");
+ if (Args.hasArg(options::OPT_rewrite_objc))
+ CmdArgs.push_back(types::getTypeName(types::TY_PP_ObjCXX));
+ else
+ CmdArgs.push_back(types::getTypeName(Input.getType()));
+}
+
+static VersionTuple getMSCompatibilityVersion(unsigned Version) {
+ if (Version < 100)
+ return VersionTuple(Version);
+
+ if (Version < 10000)
+ return VersionTuple(Version / 100, Version % 100);
+
+ unsigned Build = 0, Factor = 1;
+ for (; Version > 10000; Version = Version / 10, Factor = Factor * 10)
+ Build = Build + (Version % 10) * Factor;
+ return VersionTuple(Version / 100, Version % 100, Build);
+}
+
+// Claim options we don't want to warn if they are unused. We do this for
+// options that build systems might add but are unused when assembling or only
+// running the preprocessor for example.
+static void claimNoWarnArgs(const ArgList &Args) {
+ // Don't warn about unused -f(no-)?lto. This can happen when we're
+ // preprocessing, precompiling or assembling.
+ Args.ClaimAllArgs(options::OPT_flto_EQ);
+ Args.ClaimAllArgs(options::OPT_flto);
+ Args.ClaimAllArgs(options::OPT_fno_lto);
+}
+
+static void appendUserToPath(SmallVectorImpl<char> &Result) {
+#ifdef LLVM_ON_UNIX
+ const char *Username = getenv("LOGNAME");
+#else
+ const char *Username = getenv("USERNAME");
+#endif
+ if (Username) {
+ // Validate that LoginName can be used in a path, and get its length.
+ size_t Len = 0;
+ for (const char *P = Username; *P; ++P, ++Len) {
+ if (!isAlphanumeric(*P) && *P != '_') {
+ Username = nullptr;
+ break;
+ }
+ }
+
+ if (Username && Len > 0) {
+ Result.append(Username, Username + Len);
+ return;
+ }
+ }
+
+// Fallback to user id.
+#ifdef LLVM_ON_UNIX
+ std::string UID = llvm::utostr(getuid());
+#else
+ // FIXME: Windows seems to have an 'SID' that might work.
+ std::string UID = "9999";
+#endif
+ Result.append(UID.begin(), UID.end());
+}
+
+VersionTuple visualstudio::getMSVCVersion(const Driver *D,
+ const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args,
+ bool IsWindowsMSVC) {
+ if (Args.hasFlag(options::OPT_fms_extensions, options::OPT_fno_ms_extensions,
+ IsWindowsMSVC) ||
+ Args.hasArg(options::OPT_fmsc_version) ||
+ Args.hasArg(options::OPT_fms_compatibility_version)) {
+ const Arg *MSCVersion = Args.getLastArg(options::OPT_fmsc_version);
+ const Arg *MSCompatibilityVersion =
+ Args.getLastArg(options::OPT_fms_compatibility_version);
+
+ if (MSCVersion && MSCompatibilityVersion) {
+ if (D)
+ D->Diag(diag::err_drv_argument_not_allowed_with)
+ << MSCVersion->getAsString(Args)
+ << MSCompatibilityVersion->getAsString(Args);
+ return VersionTuple();
+ }
+
+ if (MSCompatibilityVersion) {
+ VersionTuple MSVT;
+ if (MSVT.tryParse(MSCompatibilityVersion->getValue()) && D)
+ D->Diag(diag::err_drv_invalid_value)
+ << MSCompatibilityVersion->getAsString(Args)
+ << MSCompatibilityVersion->getValue();
+ return MSVT;
+ }
+
+ if (MSCVersion) {
+ unsigned Version = 0;
+ if (StringRef(MSCVersion->getValue()).getAsInteger(10, Version) && D)
+ D->Diag(diag::err_drv_invalid_value) << MSCVersion->getAsString(Args)
+ << MSCVersion->getValue();
+ return getMSCompatibilityVersion(Version);
+ }
+
+ unsigned Major, Minor, Micro;
+ Triple.getEnvironmentVersion(Major, Minor, Micro);
+ if (Major || Minor || Micro)
+ return VersionTuple(Major, Minor, Micro);
+
+ return VersionTuple(18);
+ }
+ return VersionTuple();
+}
+
+static void addPGOAndCoverageFlags(Compilation &C, const Driver &D,
+ const InputInfo &Output, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ auto *ProfileGenerateArg = Args.getLastArg(
+ options::OPT_fprofile_instr_generate,
+ options::OPT_fprofile_instr_generate_EQ, options::OPT_fprofile_generate,
+ options::OPT_fprofile_generate_EQ,
+ options::OPT_fno_profile_instr_generate);
+ if (ProfileGenerateArg &&
+ ProfileGenerateArg->getOption().matches(
+ options::OPT_fno_profile_instr_generate))
+ ProfileGenerateArg = nullptr;
+
+ auto *ProfileUseArg = Args.getLastArg(
+ options::OPT_fprofile_instr_use, options::OPT_fprofile_instr_use_EQ,
+ options::OPT_fprofile_use, options::OPT_fprofile_use_EQ,
+ options::OPT_fno_profile_instr_use);
+ if (ProfileUseArg &&
+ ProfileUseArg->getOption().matches(options::OPT_fno_profile_instr_use))
+ ProfileUseArg = nullptr;
+
+ if (ProfileGenerateArg && ProfileUseArg)
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << ProfileGenerateArg->getSpelling() << ProfileUseArg->getSpelling();
+
+ if (ProfileGenerateArg) {
+ if (ProfileGenerateArg->getOption().matches(
+ options::OPT_fprofile_instr_generate_EQ))
+ ProfileGenerateArg->render(Args, CmdArgs);
+ else if (ProfileGenerateArg->getOption().matches(
+ options::OPT_fprofile_generate_EQ)) {
+ SmallString<128> Path(ProfileGenerateArg->getValue());
+ llvm::sys::path::append(Path, "default.profraw");
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-fprofile-instr-generate=") + Path));
+ } else
+ Args.AddAllArgs(CmdArgs, options::OPT_fprofile_instr_generate);
+ }
+
+ if (ProfileUseArg) {
+ if (ProfileUseArg->getOption().matches(options::OPT_fprofile_instr_use_EQ))
+ ProfileUseArg->render(Args, CmdArgs);
+ else if ((ProfileUseArg->getOption().matches(
+ options::OPT_fprofile_use_EQ) ||
+ ProfileUseArg->getOption().matches(
+ options::OPT_fprofile_instr_use))) {
+ SmallString<128> Path(
+ ProfileUseArg->getNumValues() == 0 ? "" : ProfileUseArg->getValue());
+ if (Path.empty() || llvm::sys::fs::is_directory(Path))
+ llvm::sys::path::append(Path, "default.profdata");
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-fprofile-instr-use=") + Path));
+ }
+ }
+
+ if (Args.hasArg(options::OPT_ftest_coverage) ||
+ Args.hasArg(options::OPT_coverage))
+ CmdArgs.push_back("-femit-coverage-notes");
+ if (Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs,
+ false) ||
+ Args.hasArg(options::OPT_coverage))
+ CmdArgs.push_back("-femit-coverage-data");
+
+ if (Args.hasFlag(options::OPT_fcoverage_mapping,
+ options::OPT_fno_coverage_mapping, false) &&
+ !ProfileGenerateArg)
+ D.Diag(diag::err_drv_argument_only_allowed_with)
+ << "-fcoverage-mapping"
+ << "-fprofile-instr-generate";
+
+ if (Args.hasFlag(options::OPT_fcoverage_mapping,
+ options::OPT_fno_coverage_mapping, false))
+ CmdArgs.push_back("-fcoverage-mapping");
+
+ if (C.getArgs().hasArg(options::OPT_c) ||
+ C.getArgs().hasArg(options::OPT_S)) {
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-coverage-file");
+ SmallString<128> CoverageFilename;
+ if (Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o)) {
+ CoverageFilename = FinalOutput->getValue();
+ } else {
+ CoverageFilename = llvm::sys::path::filename(Output.getBaseInput());
+ }
+ if (llvm::sys::path::is_relative(CoverageFilename)) {
+ SmallString<128> Pwd;
+ if (!llvm::sys::fs::current_path(Pwd)) {
+ llvm::sys::path::append(Pwd, CoverageFilename);
+ CoverageFilename.swap(Pwd);
+ }
+ }
+ CmdArgs.push_back(Args.MakeArgString(CoverageFilename));
+ }
+ }
+}
+
+static void addPS4ProfileRTArgs(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ if ((Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs,
+ false) ||
+ Args.hasFlag(options::OPT_fprofile_generate,
+ options::OPT_fno_profile_instr_generate, false) ||
+ Args.hasFlag(options::OPT_fprofile_generate_EQ,
+ options::OPT_fno_profile_instr_generate, false) ||
+ Args.hasFlag(options::OPT_fprofile_instr_generate,
+ options::OPT_fno_profile_instr_generate, false) ||
+ Args.hasFlag(options::OPT_fprofile_instr_generate_EQ,
+ options::OPT_fno_profile_instr_generate, false) ||
+ Args.hasArg(options::OPT_fcreate_profile) ||
+ Args.hasArg(options::OPT_coverage)))
+ CmdArgs.push_back("--dependent-lib=libclang_rt.profile-x86_64.a");
+}
+
+/// Parses the various -fpic/-fPIC/-fpie/-fPIE arguments. Then,
+/// smooshes them together with platform defaults, to decide whether
+/// this compile should be using PIC mode or not. Returns a tuple of
+/// (RelocationModel, PICLevel, IsPIE).
+static std::tuple<llvm::Reloc::Model, unsigned, bool>
+ParsePICArgs(const ToolChain &ToolChain, const llvm::Triple &Triple,
+ const ArgList &Args) {
+ // FIXME: why does this code...and so much everywhere else, use both
+ // ToolChain.getTriple() and Triple?
+ bool PIE = ToolChain.isPIEDefault();
+ bool PIC = PIE || ToolChain.isPICDefault();
+ // The Darwin default to use PIC does not apply when using -static.
+ if (ToolChain.getTriple().isOSDarwin() && Args.hasArg(options::OPT_static))
+ PIE = PIC = false;
+ bool IsPICLevelTwo = PIC;
+
+ bool KernelOrKext =
+ Args.hasArg(options::OPT_mkernel, options::OPT_fapple_kext);
+
+ // Android-specific defaults for PIC/PIE
+ if (ToolChain.getTriple().isAndroid()) {
+ switch (ToolChain.getArch()) {
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ case llvm::Triple::aarch64:
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ PIC = true; // "-fpic"
+ break;
+
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ PIC = true; // "-fPIC"
+ IsPICLevelTwo = true;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ // OpenBSD-specific defaults for PIE
+ if (ToolChain.getTriple().getOS() == llvm::Triple::OpenBSD) {
+ switch (ToolChain.getArch()) {
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ case llvm::Triple::sparcel:
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ IsPICLevelTwo = false; // "-fpie"
+ break;
+
+ case llvm::Triple::ppc:
+ case llvm::Triple::sparc:
+ case llvm::Triple::sparcv9:
+ IsPICLevelTwo = true; // "-fPIE"
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ // The last argument relating to either PIC or PIE wins, and no
+ // other argument is used. If the last argument is any flavor of the
+ // '-fno-...' arguments, both PIC and PIE are disabled. Any PIE
+ // option implicitly enables PIC at the same level.
+ Arg *LastPICArg = Args.getLastArg(options::OPT_fPIC, options::OPT_fno_PIC,
+ options::OPT_fpic, options::OPT_fno_pic,
+ options::OPT_fPIE, options::OPT_fno_PIE,
+ options::OPT_fpie, options::OPT_fno_pie);
+ // Check whether the tool chain trumps the PIC-ness decision. If the PIC-ness
+ // is forced, then neither PIC nor PIE flags will have no effect.
+ if (!ToolChain.isPICDefaultForced()) {
+ if (LastPICArg) {
+ Option O = LastPICArg->getOption();
+ if (O.matches(options::OPT_fPIC) || O.matches(options::OPT_fpic) ||
+ O.matches(options::OPT_fPIE) || O.matches(options::OPT_fpie)) {
+ PIE = O.matches(options::OPT_fPIE) || O.matches(options::OPT_fpie);
+ PIC =
+ PIE || O.matches(options::OPT_fPIC) || O.matches(options::OPT_fpic);
+ IsPICLevelTwo =
+ O.matches(options::OPT_fPIE) || O.matches(options::OPT_fPIC);
+ } else {
+ PIE = PIC = false;
+ if (Triple.isPS4CPU()) {
+ Arg *ModelArg = Args.getLastArg(options::OPT_mcmodel_EQ);
+ StringRef Model = ModelArg ? ModelArg->getValue() : "";
+ if (Model != "kernel") {
+ PIC = true;
+ ToolChain.getDriver().Diag(diag::warn_drv_ps4_force_pic)
+ << LastPICArg->getSpelling();
+ }
+ }
+ }
+ }
+ }
+
+ // Introduce a Darwin and PS4-specific hack. If the default is PIC, but the
+ // PIC level would've been set to level 1, force it back to level 2 PIC
+ // instead.
+ if (PIC && (ToolChain.getTriple().isOSDarwin() || Triple.isPS4CPU()))
+ IsPICLevelTwo |= ToolChain.isPICDefault();
+
+ // This kernel flags are a trump-card: they will disable PIC/PIE
+ // generation, independent of the argument order.
+ if (KernelOrKext && ((!Triple.isiOS() || Triple.isOSVersionLT(6)) &&
+ !Triple.isWatchOS()))
+ PIC = PIE = false;
+
+ if (Arg *A = Args.getLastArg(options::OPT_mdynamic_no_pic)) {
+ // This is a very special mode. It trumps the other modes, almost no one
+ // uses it, and it isn't even valid on any OS but Darwin.
+ if (!ToolChain.getTriple().isOSDarwin())
+ ToolChain.getDriver().Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getSpelling() << ToolChain.getTriple().str();
+
+ // FIXME: Warn when this flag trumps some other PIC or PIE flag.
+
+ // Only a forced PIC mode can cause the actual compile to have PIC defines
+ // etc., no flags are sufficient. This behavior was selected to closely
+ // match that of llvm-gcc and Apple GCC before that.
+ PIC = ToolChain.isPICDefault() && ToolChain.isPICDefaultForced();
+
+ return std::make_tuple(llvm::Reloc::DynamicNoPIC, PIC ? 2 : 0, false);
+ }
+
+ if (PIC)
+ return std::make_tuple(llvm::Reloc::PIC_, IsPICLevelTwo ? 2 : 1, PIE);
+
+ return std::make_tuple(llvm::Reloc::Static, 0, false);
+}
+
+static const char *RelocationModelName(llvm::Reloc::Model Model) {
+ switch (Model) {
+ case llvm::Reloc::Default:
+ return nullptr;
+ case llvm::Reloc::Static:
+ return "static";
+ case llvm::Reloc::PIC_:
+ return "pic";
+ case llvm::Reloc::DynamicNoPIC:
+ return "dynamic-no-pic";
+ }
+ llvm_unreachable("Unknown Reloc::Model kind");
+}
+
+static void AddAssemblerKPIC(const ToolChain &ToolChain, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ llvm::Reloc::Model RelocationModel;
+ unsigned PICLevel;
+ bool IsPIE;
+ std::tie(RelocationModel, PICLevel, IsPIE) =
+ ParsePICArgs(ToolChain, ToolChain.getTriple(), Args);
+
+ if (RelocationModel != llvm::Reloc::Static)
+ CmdArgs.push_back("-KPIC");
+}
+
+void Clang::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const ArgList &Args, const char *LinkingOutput) const {
+ std::string TripleStr = getToolChain().ComputeEffectiveClangTriple(Args);
+ const llvm::Triple Triple(TripleStr);
+
+ bool KernelOrKext =
+ Args.hasArg(options::OPT_mkernel, options::OPT_fapple_kext);
+ const Driver &D = getToolChain().getDriver();
+ ArgStringList CmdArgs;
+
+ bool IsWindowsGNU = getToolChain().getTriple().isWindowsGNUEnvironment();
+ bool IsWindowsCygnus =
+ getToolChain().getTriple().isWindowsCygwinEnvironment();
+ bool IsWindowsMSVC = getToolChain().getTriple().isWindowsMSVCEnvironment();
+ bool IsPS4CPU = getToolChain().getTriple().isPS4CPU();
+
+ // Check number of inputs for sanity. We need at least one input.
+ assert(Inputs.size() >= 1 && "Must have at least one input.");
+ const InputInfo &Input = Inputs[0];
+ // CUDA compilation may have multiple inputs (source file + results of
+ // device-side compilations). All other jobs are expected to have exactly one
+ // input.
+ bool IsCuda = types::isCuda(Input.getType());
+ assert((IsCuda || Inputs.size() == 1) && "Unable to handle multiple inputs.");
+
+ // Invoke ourselves in -cc1 mode.
+ //
+ // FIXME: Implement custom jobs for internal actions.
+ CmdArgs.push_back("-cc1");
+
+ // Add the "effective" target triple.
+ CmdArgs.push_back("-triple");
+ CmdArgs.push_back(Args.MakeArgString(TripleStr));
+
+ const ToolChain *AuxToolChain = nullptr;
+ if (IsCuda) {
+ // FIXME: We need a (better) way to pass information about
+ // particular compilation pass we're constructing here. For now we
+ // can check which toolchain we're using and pick the other one to
+ // extract the triple.
+ if (&getToolChain() == C.getCudaDeviceToolChain())
+ AuxToolChain = C.getCudaHostToolChain();
+ else if (&getToolChain() == C.getCudaHostToolChain())
+ AuxToolChain = C.getCudaDeviceToolChain();
+ else
+ llvm_unreachable("Can't figure out CUDA compilation mode.");
+ assert(AuxToolChain != nullptr && "No aux toolchain.");
+ CmdArgs.push_back("-aux-triple");
+ CmdArgs.push_back(Args.MakeArgString(AuxToolChain->getTriple().str()));
+ CmdArgs.push_back("-fcuda-target-overloads");
+ CmdArgs.push_back("-fcuda-disable-target-call-checks");
+ }
+
+ if (Triple.isOSWindows() && (Triple.getArch() == llvm::Triple::arm ||
+ Triple.getArch() == llvm::Triple::thumb)) {
+ unsigned Offset = Triple.getArch() == llvm::Triple::arm ? 4 : 6;
+ unsigned Version;
+ Triple.getArchName().substr(Offset).getAsInteger(10, Version);
+ if (Version < 7)
+ D.Diag(diag::err_target_unsupported_arch) << Triple.getArchName()
+ << TripleStr;
+ }
+
+ // Push all default warning arguments that are specific to
+ // the given target. These come before user provided warning options
+ // are provided.
+ getToolChain().addClangWarningOptions(CmdArgs);
+
+ // Select the appropriate action.
+ RewriteKind rewriteKind = RK_None;
+
+ if (isa<AnalyzeJobAction>(JA)) {
+ assert(JA.getType() == types::TY_Plist && "Invalid output type.");
+ CmdArgs.push_back("-analyze");
+ } else if (isa<MigrateJobAction>(JA)) {
+ CmdArgs.push_back("-migrate");
+ } else if (isa<PreprocessJobAction>(JA)) {
+ if (Output.getType() == types::TY_Dependencies)
+ CmdArgs.push_back("-Eonly");
+ else {
+ CmdArgs.push_back("-E");
+ if (Args.hasArg(options::OPT_rewrite_objc) &&
+ !Args.hasArg(options::OPT_g_Group))
+ CmdArgs.push_back("-P");
+ }
+ } else if (isa<AssembleJobAction>(JA)) {
+ CmdArgs.push_back("-emit-obj");
+
+ CollectArgsForIntegratedAssembler(C, Args, CmdArgs, D);
+
+ // Also ignore explicit -force_cpusubtype_ALL option.
+ (void)Args.hasArg(options::OPT_force__cpusubtype__ALL);
+ } else if (isa<PrecompileJobAction>(JA)) {
+ // Use PCH if the user requested it.
+ bool UsePCH = D.CCCUsePCH;
+
+ if (JA.getType() == types::TY_Nothing)
+ CmdArgs.push_back("-fsyntax-only");
+ else if (UsePCH)
+ CmdArgs.push_back("-emit-pch");
+ else
+ CmdArgs.push_back("-emit-pth");
+ } else if (isa<VerifyPCHJobAction>(JA)) {
+ CmdArgs.push_back("-verify-pch");
+ } else {
+ assert((isa<CompileJobAction>(JA) || isa<BackendJobAction>(JA)) &&
+ "Invalid action for clang tool.");
+ if (JA.getType() == types::TY_Nothing) {
+ CmdArgs.push_back("-fsyntax-only");
+ } else if (JA.getType() == types::TY_LLVM_IR ||
+ JA.getType() == types::TY_LTO_IR) {
+ CmdArgs.push_back("-emit-llvm");
+ } else if (JA.getType() == types::TY_LLVM_BC ||
+ JA.getType() == types::TY_LTO_BC) {
+ CmdArgs.push_back("-emit-llvm-bc");
+ } else if (JA.getType() == types::TY_PP_Asm) {
+ CmdArgs.push_back("-S");
+ } else if (JA.getType() == types::TY_AST) {
+ CmdArgs.push_back("-emit-pch");
+ } else if (JA.getType() == types::TY_ModuleFile) {
+ CmdArgs.push_back("-module-file-info");
+ } else if (JA.getType() == types::TY_RewrittenObjC) {
+ CmdArgs.push_back("-rewrite-objc");
+ rewriteKind = RK_NonFragile;
+ } else if (JA.getType() == types::TY_RewrittenLegacyObjC) {
+ CmdArgs.push_back("-rewrite-objc");
+ rewriteKind = RK_Fragile;
+ } else {
+ assert(JA.getType() == types::TY_PP_Asm && "Unexpected output type!");
+ }
+
+ // Preserve use-list order by default when emitting bitcode, so that
+ // loading the bitcode up in 'opt' or 'llc' and running passes gives the
+ // same result as running passes here. For LTO, we don't need to preserve
+ // the use-list order, since serialization to bitcode is part of the flow.
+ if (JA.getType() == types::TY_LLVM_BC)
+ CmdArgs.push_back("-emit-llvm-uselists");
+
+ if (D.isUsingLTO())
+ Args.AddLastArg(CmdArgs, options::OPT_flto, options::OPT_flto_EQ);
+ }
+
+ if (const Arg *A = Args.getLastArg(options::OPT_fthinlto_index_EQ)) {
+ if (!types::isLLVMIR(Input.getType()))
+ D.Diag(diag::err_drv_argument_only_allowed_with) << A->getAsString(Args)
+ << "-x ir";
+ Args.AddLastArg(CmdArgs, options::OPT_fthinlto_index_EQ);
+ }
+
+ // We normally speed up the clang process a bit by skipping destructors at
+ // exit, but when we're generating diagnostics we can rely on some of the
+ // cleanup.
+ if (!C.isForDiagnostics())
+ CmdArgs.push_back("-disable-free");
+
+// Disable the verification pass in -asserts builds.
+#ifdef NDEBUG
+ CmdArgs.push_back("-disable-llvm-verifier");
+#endif
+
+ // Set the main file name, so that debug info works even with
+ // -save-temps.
+ CmdArgs.push_back("-main-file-name");
+ CmdArgs.push_back(getBaseInputName(Args, Input));
+
+ // Some flags which affect the language (via preprocessor
+ // defines).
+ if (Args.hasArg(options::OPT_static))
+ CmdArgs.push_back("-static-define");
+
+ if (isa<AnalyzeJobAction>(JA)) {
+ // Enable region store model by default.
+ CmdArgs.push_back("-analyzer-store=region");
+
+ // Treat blocks as analysis entry points.
+ CmdArgs.push_back("-analyzer-opt-analyze-nested-blocks");
+
+ CmdArgs.push_back("-analyzer-eagerly-assume");
+
+ // Add default argument set.
+ if (!Args.hasArg(options::OPT__analyzer_no_default_checks)) {
+ CmdArgs.push_back("-analyzer-checker=core");
+
+ if (!IsWindowsMSVC)
+ CmdArgs.push_back("-analyzer-checker=unix");
+
+ // Disable some unix checkers for PS4.
+ if (IsPS4CPU) {
+ CmdArgs.push_back("-analyzer-disable-checker=unix.API");
+ CmdArgs.push_back("-analyzer-disable-checker=unix.Vfork");
+ }
+
+ if (getToolChain().getTriple().getVendor() == llvm::Triple::Apple)
+ CmdArgs.push_back("-analyzer-checker=osx");
+
+ CmdArgs.push_back("-analyzer-checker=deadcode");
+
+ if (types::isCXX(Input.getType()))
+ CmdArgs.push_back("-analyzer-checker=cplusplus");
+
+ if (!IsPS4CPU) {
+ CmdArgs.push_back(
+ "-analyzer-checker=security.insecureAPI.UncheckedReturn");
+ CmdArgs.push_back("-analyzer-checker=security.insecureAPI.getpw");
+ CmdArgs.push_back("-analyzer-checker=security.insecureAPI.gets");
+ CmdArgs.push_back("-analyzer-checker=security.insecureAPI.mktemp");
+ CmdArgs.push_back("-analyzer-checker=security.insecureAPI.mkstemp");
+ CmdArgs.push_back("-analyzer-checker=security.insecureAPI.vfork");
+ }
+
+ // Default nullability checks.
+ CmdArgs.push_back("-analyzer-checker=nullability.NullPassedToNonnull");
+ CmdArgs.push_back(
+ "-analyzer-checker=nullability.NullReturnedFromNonnull");
+ }
+
+ // Set the output format. The default is plist, for (lame) historical
+ // reasons.
+ CmdArgs.push_back("-analyzer-output");
+ if (Arg *A = Args.getLastArg(options::OPT__analyzer_output))
+ CmdArgs.push_back(A->getValue());
+ else
+ CmdArgs.push_back("plist");
+
+ // Disable the presentation of standard compiler warnings when
+ // using --analyze. We only want to show static analyzer diagnostics
+ // or frontend errors.
+ CmdArgs.push_back("-w");
+
+ // Add -Xanalyzer arguments when running as analyzer.
+ Args.AddAllArgValues(CmdArgs, options::OPT_Xanalyzer);
+ }
+
+ CheckCodeGenerationOptions(D, Args);
+
+ llvm::Reloc::Model RelocationModel;
+ unsigned PICLevel;
+ bool IsPIE;
+ std::tie(RelocationModel, PICLevel, IsPIE) =
+ ParsePICArgs(getToolChain(), Triple, Args);
+
+ const char *RMName = RelocationModelName(RelocationModel);
+ if (RMName) {
+ CmdArgs.push_back("-mrelocation-model");
+ CmdArgs.push_back(RMName);
+ }
+ if (PICLevel > 0) {
+ CmdArgs.push_back("-pic-level");
+ CmdArgs.push_back(PICLevel == 1 ? "1" : "2");
+ if (IsPIE) {
+ CmdArgs.push_back("-pie-level");
+ CmdArgs.push_back(PICLevel == 1 ? "1" : "2");
+ }
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_meabi)) {
+ CmdArgs.push_back("-meabi");
+ CmdArgs.push_back(A->getValue());
+ }
+
+ CmdArgs.push_back("-mthread-model");
+ if (Arg *A = Args.getLastArg(options::OPT_mthread_model))
+ CmdArgs.push_back(A->getValue());
+ else
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().getThreadModel()));
+
+ Args.AddLastArg(CmdArgs, options::OPT_fveclib);
+
+ if (!Args.hasFlag(options::OPT_fmerge_all_constants,
+ options::OPT_fno_merge_all_constants))
+ CmdArgs.push_back("-fno-merge-all-constants");
+
+ // LLVM Code Generator Options.
+
+ if (Args.hasArg(options::OPT_frewrite_map_file) ||
+ Args.hasArg(options::OPT_frewrite_map_file_EQ)) {
+ for (const Arg *A : Args.filtered(options::OPT_frewrite_map_file,
+ options::OPT_frewrite_map_file_EQ)) {
+ CmdArgs.push_back("-frewrite-map-file");
+ CmdArgs.push_back(A->getValue());
+ A->claim();
+ }
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_Wframe_larger_than_EQ)) {
+ StringRef v = A->getValue();
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back(Args.MakeArgString("-warn-stack-size=" + v));
+ A->claim();
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_mregparm_EQ)) {
+ CmdArgs.push_back("-mregparm");
+ CmdArgs.push_back(A->getValue());
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_fpcc_struct_return,
+ options::OPT_freg_struct_return)) {
+ if (getToolChain().getArch() != llvm::Triple::x86) {
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getSpelling() << getToolChain().getTriple().str();
+ } else if (A->getOption().matches(options::OPT_fpcc_struct_return)) {
+ CmdArgs.push_back("-fpcc-struct-return");
+ } else {
+ assert(A->getOption().matches(options::OPT_freg_struct_return));
+ CmdArgs.push_back("-freg-struct-return");
+ }
+ }
+
+ if (Args.hasFlag(options::OPT_mrtd, options::OPT_mno_rtd, false))
+ CmdArgs.push_back("-mrtd");
+
+ if (shouldUseFramePointer(Args, getToolChain().getTriple()))
+ CmdArgs.push_back("-mdisable-fp-elim");
+ if (!Args.hasFlag(options::OPT_fzero_initialized_in_bss,
+ options::OPT_fno_zero_initialized_in_bss))
+ CmdArgs.push_back("-mno-zero-initialized-in-bss");
+
+ bool OFastEnabled = isOptimizationLevelFast(Args);
+ // If -Ofast is the optimization level, then -fstrict-aliasing should be
+ // enabled. This alias option is being used to simplify the hasFlag logic.
+ OptSpecifier StrictAliasingAliasOption =
+ OFastEnabled ? options::OPT_Ofast : options::OPT_fstrict_aliasing;
+ // We turn strict aliasing off by default if we're in CL mode, since MSVC
+ // doesn't do any TBAA.
+ bool TBAAOnByDefault = !getToolChain().getDriver().IsCLMode();
+ if (!Args.hasFlag(options::OPT_fstrict_aliasing, StrictAliasingAliasOption,
+ options::OPT_fno_strict_aliasing, TBAAOnByDefault))
+ CmdArgs.push_back("-relaxed-aliasing");
+ if (!Args.hasFlag(options::OPT_fstruct_path_tbaa,
+ options::OPT_fno_struct_path_tbaa))
+ CmdArgs.push_back("-no-struct-path-tbaa");
+ if (Args.hasFlag(options::OPT_fstrict_enums, options::OPT_fno_strict_enums,
+ false))
+ CmdArgs.push_back("-fstrict-enums");
+ if (Args.hasFlag(options::OPT_fstrict_vtable_pointers,
+ options::OPT_fno_strict_vtable_pointers,
+ false))
+ CmdArgs.push_back("-fstrict-vtable-pointers");
+ if (!Args.hasFlag(options::OPT_foptimize_sibling_calls,
+ options::OPT_fno_optimize_sibling_calls))
+ CmdArgs.push_back("-mdisable-tail-calls");
+
+ // Handle segmented stacks.
+ if (Args.hasArg(options::OPT_fsplit_stack))
+ CmdArgs.push_back("-split-stacks");
+
+ // If -Ofast is the optimization level, then -ffast-math should be enabled.
+ // This alias option is being used to simplify the getLastArg logic.
+ OptSpecifier FastMathAliasOption =
+ OFastEnabled ? options::OPT_Ofast : options::OPT_ffast_math;
+
+ // Handle various floating point optimization flags, mapping them to the
+ // appropriate LLVM code generation flags. The pattern for all of these is to
+ // default off the codegen optimizations, and if any flag enables them and no
+ // flag disables them after the flag enabling them, enable the codegen
+ // optimization. This is complicated by several "umbrella" flags.
+ if (Arg *A = Args.getLastArg(
+ options::OPT_ffast_math, FastMathAliasOption,
+ options::OPT_fno_fast_math, options::OPT_ffinite_math_only,
+ options::OPT_fno_finite_math_only, options::OPT_fhonor_infinities,
+ options::OPT_fno_honor_infinities))
+ if (A->getOption().getID() != options::OPT_fno_fast_math &&
+ A->getOption().getID() != options::OPT_fno_finite_math_only &&
+ A->getOption().getID() != options::OPT_fhonor_infinities)
+ CmdArgs.push_back("-menable-no-infs");
+ if (Arg *A = Args.getLastArg(
+ options::OPT_ffast_math, FastMathAliasOption,
+ options::OPT_fno_fast_math, options::OPT_ffinite_math_only,
+ options::OPT_fno_finite_math_only, options::OPT_fhonor_nans,
+ options::OPT_fno_honor_nans))
+ if (A->getOption().getID() != options::OPT_fno_fast_math &&
+ A->getOption().getID() != options::OPT_fno_finite_math_only &&
+ A->getOption().getID() != options::OPT_fhonor_nans)
+ CmdArgs.push_back("-menable-no-nans");
+
+ // -fmath-errno is the default on some platforms, e.g. BSD-derived OSes.
+ bool MathErrno = getToolChain().IsMathErrnoDefault();
+ if (Arg *A =
+ Args.getLastArg(options::OPT_ffast_math, FastMathAliasOption,
+ options::OPT_fno_fast_math, options::OPT_fmath_errno,
+ options::OPT_fno_math_errno)) {
+ // Turning on -ffast_math (with either flag) removes the need for MathErrno.
+ // However, turning *off* -ffast_math merely restores the toolchain default
+ // (which may be false).
+ if (A->getOption().getID() == options::OPT_fno_math_errno ||
+ A->getOption().getID() == options::OPT_ffast_math ||
+ A->getOption().getID() == options::OPT_Ofast)
+ MathErrno = false;
+ else if (A->getOption().getID() == options::OPT_fmath_errno)
+ MathErrno = true;
+ }
+ if (MathErrno)
+ CmdArgs.push_back("-fmath-errno");
+
+ // There are several flags which require disabling very specific
+ // optimizations. Any of these being disabled forces us to turn off the
+ // entire set of LLVM optimizations, so collect them through all the flag
+ // madness.
+ bool AssociativeMath = false;
+ if (Arg *A = Args.getLastArg(
+ options::OPT_ffast_math, FastMathAliasOption,
+ options::OPT_fno_fast_math, options::OPT_funsafe_math_optimizations,
+ options::OPT_fno_unsafe_math_optimizations,
+ options::OPT_fassociative_math, options::OPT_fno_associative_math))
+ if (A->getOption().getID() != options::OPT_fno_fast_math &&
+ A->getOption().getID() != options::OPT_fno_unsafe_math_optimizations &&
+ A->getOption().getID() != options::OPT_fno_associative_math)
+ AssociativeMath = true;
+ bool ReciprocalMath = false;
+ if (Arg *A = Args.getLastArg(
+ options::OPT_ffast_math, FastMathAliasOption,
+ options::OPT_fno_fast_math, options::OPT_funsafe_math_optimizations,
+ options::OPT_fno_unsafe_math_optimizations,
+ options::OPT_freciprocal_math, options::OPT_fno_reciprocal_math))
+ if (A->getOption().getID() != options::OPT_fno_fast_math &&
+ A->getOption().getID() != options::OPT_fno_unsafe_math_optimizations &&
+ A->getOption().getID() != options::OPT_fno_reciprocal_math)
+ ReciprocalMath = true;
+ bool SignedZeros = true;
+ if (Arg *A = Args.getLastArg(
+ options::OPT_ffast_math, FastMathAliasOption,
+ options::OPT_fno_fast_math, options::OPT_funsafe_math_optimizations,
+ options::OPT_fno_unsafe_math_optimizations,
+ options::OPT_fsigned_zeros, options::OPT_fno_signed_zeros))
+ if (A->getOption().getID() != options::OPT_fno_fast_math &&
+ A->getOption().getID() != options::OPT_fno_unsafe_math_optimizations &&
+ A->getOption().getID() != options::OPT_fsigned_zeros)
+ SignedZeros = false;
+ bool TrappingMath = true;
+ if (Arg *A = Args.getLastArg(
+ options::OPT_ffast_math, FastMathAliasOption,
+ options::OPT_fno_fast_math, options::OPT_funsafe_math_optimizations,
+ options::OPT_fno_unsafe_math_optimizations,
+ options::OPT_ftrapping_math, options::OPT_fno_trapping_math))
+ if (A->getOption().getID() != options::OPT_fno_fast_math &&
+ A->getOption().getID() != options::OPT_fno_unsafe_math_optimizations &&
+ A->getOption().getID() != options::OPT_ftrapping_math)
+ TrappingMath = false;
+ if (!MathErrno && AssociativeMath && ReciprocalMath && !SignedZeros &&
+ !TrappingMath)
+ CmdArgs.push_back("-menable-unsafe-fp-math");
+
+ if (!SignedZeros)
+ CmdArgs.push_back("-fno-signed-zeros");
+
+ if (ReciprocalMath)
+ CmdArgs.push_back("-freciprocal-math");
+
+ // Validate and pass through -fp-contract option.
+ if (Arg *A = Args.getLastArg(options::OPT_ffast_math, FastMathAliasOption,
+ options::OPT_fno_fast_math,
+ options::OPT_ffp_contract)) {
+ if (A->getOption().getID() == options::OPT_ffp_contract) {
+ StringRef Val = A->getValue();
+ if (Val == "fast" || Val == "on" || Val == "off") {
+ CmdArgs.push_back(Args.MakeArgString("-ffp-contract=" + Val));
+ } else {
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getOption().getName() << Val;
+ }
+ } else if (A->getOption().matches(options::OPT_ffast_math) ||
+ (OFastEnabled && A->getOption().matches(options::OPT_Ofast))) {
+ // If fast-math is set then set the fp-contract mode to fast.
+ CmdArgs.push_back(Args.MakeArgString("-ffp-contract=fast"));
+ }
+ }
+
+ ParseMRecip(getToolChain().getDriver(), Args, CmdArgs);
+
+ // We separately look for the '-ffast-math' and '-ffinite-math-only' flags,
+ // and if we find them, tell the frontend to provide the appropriate
+ // preprocessor macros. This is distinct from enabling any optimizations as
+ // these options induce language changes which must survive serialization
+ // and deserialization, etc.
+ if (Arg *A = Args.getLastArg(options::OPT_ffast_math, FastMathAliasOption,
+ options::OPT_fno_fast_math))
+ if (!A->getOption().matches(options::OPT_fno_fast_math))
+ CmdArgs.push_back("-ffast-math");
+ if (Arg *A = Args.getLastArg(options::OPT_ffinite_math_only,
+ options::OPT_fno_fast_math))
+ if (A->getOption().matches(options::OPT_ffinite_math_only))
+ CmdArgs.push_back("-ffinite-math-only");
+
+ // Decide whether to use verbose asm. Verbose assembly is the default on
+ // toolchains which have the integrated assembler on by default.
+ bool IsIntegratedAssemblerDefault =
+ getToolChain().IsIntegratedAssemblerDefault();
+ if (Args.hasFlag(options::OPT_fverbose_asm, options::OPT_fno_verbose_asm,
+ IsIntegratedAssemblerDefault) ||
+ Args.hasArg(options::OPT_dA))
+ CmdArgs.push_back("-masm-verbose");
+
+ if (!Args.hasFlag(options::OPT_fintegrated_as, options::OPT_fno_integrated_as,
+ IsIntegratedAssemblerDefault))
+ CmdArgs.push_back("-no-integrated-as");
+
+ if (Args.hasArg(options::OPT_fdebug_pass_structure)) {
+ CmdArgs.push_back("-mdebug-pass");
+ CmdArgs.push_back("Structure");
+ }
+ if (Args.hasArg(options::OPT_fdebug_pass_arguments)) {
+ CmdArgs.push_back("-mdebug-pass");
+ CmdArgs.push_back("Arguments");
+ }
+
+ // Enable -mconstructor-aliases except on darwin, where we have to
+ // work around a linker bug; see <rdar://problem/7651567>.
+ if (!getToolChain().getTriple().isOSDarwin())
+ CmdArgs.push_back("-mconstructor-aliases");
+
+ // Darwin's kernel doesn't support guard variables; just die if we
+ // try to use them.
+ if (KernelOrKext && getToolChain().getTriple().isOSDarwin())
+ CmdArgs.push_back("-fforbid-guard-variables");
+
+ if (Args.hasFlag(options::OPT_mms_bitfields, options::OPT_mno_ms_bitfields,
+ false)) {
+ CmdArgs.push_back("-mms-bitfields");
+ }
+
+ // This is a coarse approximation of what llvm-gcc actually does, both
+ // -fasynchronous-unwind-tables and -fnon-call-exceptions interact in more
+ // complicated ways.
+ bool AsynchronousUnwindTables =
+ Args.hasFlag(options::OPT_fasynchronous_unwind_tables,
+ options::OPT_fno_asynchronous_unwind_tables,
+ (getToolChain().IsUnwindTablesDefault() ||
+ getToolChain().getSanitizerArgs().needsUnwindTables()) &&
+ !KernelOrKext);
+ if (Args.hasFlag(options::OPT_funwind_tables, options::OPT_fno_unwind_tables,
+ AsynchronousUnwindTables))
+ CmdArgs.push_back("-munwind-tables");
+
+ getToolChain().addClangTargetOptions(Args, CmdArgs);
+
+ if (Arg *A = Args.getLastArg(options::OPT_flimited_precision_EQ)) {
+ CmdArgs.push_back("-mlimit-float-precision");
+ CmdArgs.push_back(A->getValue());
+ }
+
+ // FIXME: Handle -mtune=.
+ (void)Args.hasArg(options::OPT_mtune_EQ);
+
+ if (Arg *A = Args.getLastArg(options::OPT_mcmodel_EQ)) {
+ CmdArgs.push_back("-mcode-model");
+ CmdArgs.push_back(A->getValue());
+ }
+
+ // Add the target cpu
+ std::string CPU = getCPUName(Args, Triple, /*FromAs*/ false);
+ if (!CPU.empty()) {
+ CmdArgs.push_back("-target-cpu");
+ CmdArgs.push_back(Args.MakeArgString(CPU));
+ }
+
+ if (const Arg *A = Args.getLastArg(options::OPT_mfpmath_EQ)) {
+ CmdArgs.push_back("-mfpmath");
+ CmdArgs.push_back(A->getValue());
+ }
+
+ // Add the target features
+ getTargetFeatures(getToolChain(), Triple, Args, CmdArgs, false);
+
+ // Add target specific flags.
+ switch (getToolChain().getArch()) {
+ default:
+ break;
+
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ // Use the effective triple, which takes into account the deployment target.
+ AddARMTargetArgs(Triple, Args, CmdArgs, KernelOrKext);
+ break;
+
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_be:
+ AddAArch64TargetArgs(Args, CmdArgs);
+ break;
+
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ AddMIPSTargetArgs(Args, CmdArgs);
+ break;
+
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
+ AddPPCTargetArgs(Args, CmdArgs);
+ break;
+
+ case llvm::Triple::sparc:
+ case llvm::Triple::sparcel:
+ case llvm::Triple::sparcv9:
+ AddSparcTargetArgs(Args, CmdArgs);
+ break;
+
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ AddX86TargetArgs(Args, CmdArgs);
+ break;
+
+ case llvm::Triple::hexagon:
+ AddHexagonTargetArgs(Args, CmdArgs);
+ break;
+ }
+
+ // The 'g' groups options involve a somewhat intricate sequence of decisions
+ // about what to pass from the driver to the frontend, but by the time they
+ // reach cc1 they've been factored into three well-defined orthogonal choices:
+ // * what level of debug info to generate
+ // * what dwarf version to write
+ // * what debugger tuning to use
+ // This avoids having to monkey around further in cc1 other than to disable
+ // codeview if not running in a Windows environment. Perhaps even that
+ // decision should be made in the driver as well though.
+ unsigned DwarfVersion = 0;
+ llvm::DebuggerKind DebuggerTuning = getToolChain().getDefaultDebuggerTuning();
+ // These two are potentially updated by AddClangCLArgs.
+ enum CodeGenOptions::DebugInfoKind DebugInfoKind =
+ CodeGenOptions::NoDebugInfo;
+ bool EmitCodeView = false;
+
+ // Add clang-cl arguments.
+ if (getToolChain().getDriver().IsCLMode())
+ AddClangCLArgs(Args, CmdArgs, &DebugInfoKind, &EmitCodeView);
+
+ // Pass the linker version in use.
+ if (Arg *A = Args.getLastArg(options::OPT_mlinker_version_EQ)) {
+ CmdArgs.push_back("-target-linker-version");
+ CmdArgs.push_back(A->getValue());
+ }
+
+ if (!shouldUseLeafFramePointer(Args, getToolChain().getTriple()))
+ CmdArgs.push_back("-momit-leaf-frame-pointer");
+
+ // Explicitly error on some things we know we don't support and can't just
+ // ignore.
+ types::ID InputType = Input.getType();
+ if (!Args.hasArg(options::OPT_fallow_unsupported)) {
+ Arg *Unsupported;
+ if (types::isCXX(InputType) && getToolChain().getTriple().isOSDarwin() &&
+ getToolChain().getArch() == llvm::Triple::x86) {
+ if ((Unsupported = Args.getLastArg(options::OPT_fapple_kext)) ||
+ (Unsupported = Args.getLastArg(options::OPT_mkernel)))
+ D.Diag(diag::err_drv_clang_unsupported_opt_cxx_darwin_i386)
+ << Unsupported->getOption().getName();
+ }
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_v);
+ Args.AddLastArg(CmdArgs, options::OPT_H);
+ if (D.CCPrintHeaders && !D.CCGenDiagnostics) {
+ CmdArgs.push_back("-header-include-file");
+ CmdArgs.push_back(D.CCPrintHeadersFilename ? D.CCPrintHeadersFilename
+ : "-");
+ }
+ Args.AddLastArg(CmdArgs, options::OPT_P);
+ Args.AddLastArg(CmdArgs, options::OPT_print_ivar_layout);
+
+ if (D.CCLogDiagnostics && !D.CCGenDiagnostics) {
+ CmdArgs.push_back("-diagnostic-log-file");
+ CmdArgs.push_back(D.CCLogDiagnosticsFilename ? D.CCLogDiagnosticsFilename
+ : "-");
+ }
+
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ Arg *SplitDwarfArg = Args.getLastArg(options::OPT_gsplit_dwarf);
+ if (Arg *A = Args.getLastArg(options::OPT_g_Group)) {
+ // If the last option explicitly specified a debug-info level, use it.
+ if (A->getOption().matches(options::OPT_gN_Group)) {
+ DebugInfoKind = DebugLevelToInfoKind(*A);
+ // If you say "-gsplit-dwarf -gline-tables-only", -gsplit-dwarf loses.
+ // But -gsplit-dwarf is not a g_group option, hence we have to check the
+ // order explicitly. (If -gsplit-dwarf wins, we fix DebugInfoKind later.)
+ if (SplitDwarfArg && DebugInfoKind < CodeGenOptions::LimitedDebugInfo &&
+ A->getIndex() > SplitDwarfArg->getIndex())
+ SplitDwarfArg = nullptr;
+ } else
+ // For any other 'g' option, use Limited.
+ DebugInfoKind = CodeGenOptions::LimitedDebugInfo;
+ }
+
+ // If a debugger tuning argument appeared, remember it.
+ if (Arg *A = Args.getLastArg(options::OPT_gTune_Group,
+ options::OPT_ggdbN_Group)) {
+ if (A->getOption().matches(options::OPT_glldb))
+ DebuggerTuning = llvm::DebuggerKind::LLDB;
+ else if (A->getOption().matches(options::OPT_gsce))
+ DebuggerTuning = llvm::DebuggerKind::SCE;
+ else
+ DebuggerTuning = llvm::DebuggerKind::GDB;
+ }
+
+ // If a -gdwarf argument appeared, remember it.
+ if (Arg *A = Args.getLastArg(options::OPT_gdwarf_2, options::OPT_gdwarf_3,
+ options::OPT_gdwarf_4, options::OPT_gdwarf_5))
+ DwarfVersion = DwarfVersionNum(A->getSpelling());
+
+ // Forward -gcodeview.
+ // 'EmitCodeView might have been set by CL-compatibility argument parsing.
+ if (Args.hasArg(options::OPT_gcodeview) || EmitCodeView) {
+ // DwarfVersion remains at 0 if no explicit choice was made.
+ CmdArgs.push_back("-gcodeview");
+ } else if (DwarfVersion == 0 &&
+ DebugInfoKind != CodeGenOptions::NoDebugInfo) {
+ DwarfVersion = getToolChain().GetDefaultDwarfVersion();
+ }
+
+ // We ignore flags -gstrict-dwarf and -grecord-gcc-switches for now.
+ Args.ClaimAllArgs(options::OPT_g_flags_Group);
+
+ // PS4 defaults to no column info
+ if (Args.hasFlag(options::OPT_gcolumn_info, options::OPT_gno_column_info,
+ /*Default=*/ !IsPS4CPU))
+ CmdArgs.push_back("-dwarf-column-info");
+
+ // FIXME: Move backend command line options to the module.
+ if (Args.hasArg(options::OPT_gmodules)) {
+ DebugInfoKind = CodeGenOptions::LimitedDebugInfo;
+ CmdArgs.push_back("-dwarf-ext-refs");
+ CmdArgs.push_back("-fmodule-format=obj");
+ }
+
+ // -gsplit-dwarf should turn on -g and enable the backend dwarf
+ // splitting and extraction.
+ // FIXME: Currently only works on Linux.
+ if (getToolChain().getTriple().isOSLinux() && SplitDwarfArg) {
+ DebugInfoKind = CodeGenOptions::LimitedDebugInfo;
+ CmdArgs.push_back("-backend-option");
+ CmdArgs.push_back("-split-dwarf=Enable");
+ }
+
+ // After we've dealt with all combinations of things that could
+ // make DebugInfoKind be other than None or DebugLineTablesOnly,
+ // figure out if we need to "upgrade" it to standalone debug info.
+ // We parse these two '-f' options whether or not they will be used,
+ // to claim them even if you wrote "-fstandalone-debug -gline-tables-only"
+ bool NeedFullDebug = Args.hasFlag(options::OPT_fstandalone_debug,
+ options::OPT_fno_standalone_debug,
+ getToolChain().GetDefaultStandaloneDebug());
+ if (DebugInfoKind == CodeGenOptions::LimitedDebugInfo && NeedFullDebug)
+ DebugInfoKind = CodeGenOptions::FullDebugInfo;
+ RenderDebugEnablingArgs(Args, CmdArgs, DebugInfoKind, DwarfVersion,
+ DebuggerTuning);
+
+ // -ggnu-pubnames turns on gnu style pubnames in the backend.
+ if (Args.hasArg(options::OPT_ggnu_pubnames)) {
+ CmdArgs.push_back("-backend-option");
+ CmdArgs.push_back("-generate-gnu-dwarf-pub-sections");
+ }
+
+ // -gdwarf-aranges turns on the emission of the aranges section in the
+ // backend.
+ // Always enabled on the PS4.
+ if (Args.hasArg(options::OPT_gdwarf_aranges) || IsPS4CPU) {
+ CmdArgs.push_back("-backend-option");
+ CmdArgs.push_back("-generate-arange-section");
+ }
+
+ if (Args.hasFlag(options::OPT_fdebug_types_section,
+ options::OPT_fno_debug_types_section, false)) {
+ CmdArgs.push_back("-backend-option");
+ CmdArgs.push_back("-generate-type-units");
+ }
+
+ // CloudABI uses -ffunction-sections and -fdata-sections by default.
+ bool UseSeparateSections = Triple.getOS() == llvm::Triple::CloudABI;
+
+ if (Args.hasFlag(options::OPT_ffunction_sections,
+ options::OPT_fno_function_sections, UseSeparateSections)) {
+ CmdArgs.push_back("-ffunction-sections");
+ }
+
+ if (Args.hasFlag(options::OPT_fdata_sections, options::OPT_fno_data_sections,
+ UseSeparateSections)) {
+ CmdArgs.push_back("-fdata-sections");
+ }
+
+ if (!Args.hasFlag(options::OPT_funique_section_names,
+ options::OPT_fno_unique_section_names, true))
+ CmdArgs.push_back("-fno-unique-section-names");
+
+ Args.AddAllArgs(CmdArgs, options::OPT_finstrument_functions);
+
+ addPGOAndCoverageFlags(C, D, Output, Args, CmdArgs);
+
+ // Add runtime flag for PS4 when PGO or Coverage are enabled.
+ if (getToolChain().getTriple().isPS4CPU())
+ addPS4ProfileRTArgs(getToolChain(), Args, CmdArgs);
+
+ // Pass options for controlling the default header search paths.
+ if (Args.hasArg(options::OPT_nostdinc)) {
+ CmdArgs.push_back("-nostdsysteminc");
+ CmdArgs.push_back("-nobuiltininc");
+ } else {
+ if (Args.hasArg(options::OPT_nostdlibinc))
+ CmdArgs.push_back("-nostdsysteminc");
+ Args.AddLastArg(CmdArgs, options::OPT_nostdincxx);
+ Args.AddLastArg(CmdArgs, options::OPT_nobuiltininc);
+ }
+
+ // Pass the path to compiler resource files.
+ CmdArgs.push_back("-resource-dir");
+ CmdArgs.push_back(D.ResourceDir.c_str());
+
+ Args.AddLastArg(CmdArgs, options::OPT_working_directory);
+
+ bool ARCMTEnabled = false;
+ if (!Args.hasArg(options::OPT_fno_objc_arc, options::OPT_fobjc_arc)) {
+ if (const Arg *A = Args.getLastArg(options::OPT_ccc_arcmt_check,
+ options::OPT_ccc_arcmt_modify,
+ options::OPT_ccc_arcmt_migrate)) {
+ ARCMTEnabled = true;
+ switch (A->getOption().getID()) {
+ default:
+ llvm_unreachable("missed a case");
+ case options::OPT_ccc_arcmt_check:
+ CmdArgs.push_back("-arcmt-check");
+ break;
+ case options::OPT_ccc_arcmt_modify:
+ CmdArgs.push_back("-arcmt-modify");
+ break;
+ case options::OPT_ccc_arcmt_migrate:
+ CmdArgs.push_back("-arcmt-migrate");
+ CmdArgs.push_back("-mt-migrate-directory");
+ CmdArgs.push_back(A->getValue());
+
+ Args.AddLastArg(CmdArgs, options::OPT_arcmt_migrate_report_output);
+ Args.AddLastArg(CmdArgs, options::OPT_arcmt_migrate_emit_arc_errors);
+ break;
+ }
+ }
+ } else {
+ Args.ClaimAllArgs(options::OPT_ccc_arcmt_check);
+ Args.ClaimAllArgs(options::OPT_ccc_arcmt_modify);
+ Args.ClaimAllArgs(options::OPT_ccc_arcmt_migrate);
+ }
+
+ if (const Arg *A = Args.getLastArg(options::OPT_ccc_objcmt_migrate)) {
+ if (ARCMTEnabled) {
+ D.Diag(diag::err_drv_argument_not_allowed_with) << A->getAsString(Args)
+ << "-ccc-arcmt-migrate";
+ }
+ CmdArgs.push_back("-mt-migrate-directory");
+ CmdArgs.push_back(A->getValue());
+
+ if (!Args.hasArg(options::OPT_objcmt_migrate_literals,
+ options::OPT_objcmt_migrate_subscripting,
+ options::OPT_objcmt_migrate_property)) {
+ // None specified, means enable them all.
+ CmdArgs.push_back("-objcmt-migrate-literals");
+ CmdArgs.push_back("-objcmt-migrate-subscripting");
+ CmdArgs.push_back("-objcmt-migrate-property");
+ } else {
+ Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_literals);
+ Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_subscripting);
+ Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_property);
+ }
+ } else {
+ Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_literals);
+ Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_subscripting);
+ Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_property);
+ Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_all);
+ Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_readonly_property);
+ Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_readwrite_property);
+ Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_property_dot_syntax);
+ Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_annotation);
+ Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_instancetype);
+ Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_nsmacros);
+ Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_protocol_conformance);
+ Args.AddLastArg(CmdArgs, options::OPT_objcmt_atomic_property);
+ Args.AddLastArg(CmdArgs, options::OPT_objcmt_returns_innerpointer_property);
+ Args.AddLastArg(CmdArgs, options::OPT_objcmt_ns_nonatomic_iosonly);
+ Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_designated_init);
+ Args.AddLastArg(CmdArgs, options::OPT_objcmt_whitelist_dir_path);
+ }
+
+ // Add preprocessing options like -I, -D, etc. if we are using the
+ // preprocessor.
+ //
+ // FIXME: Support -fpreprocessed
+ if (types::getPreprocessedType(InputType) != types::TY_INVALID)
+ AddPreprocessingOptions(C, JA, D, Args, CmdArgs, Output, Inputs,
+ AuxToolChain);
+
+ // Don't warn about "clang -c -DPIC -fPIC test.i" because libtool.m4 assumes
+ // that "The compiler can only warn and ignore the option if not recognized".
+ // When building with ccache, it will pass -D options to clang even on
+ // preprocessed inputs and configure concludes that -fPIC is not supported.
+ Args.ClaimAllArgs(options::OPT_D);
+
+ // Manually translate -O4 to -O3; let clang reject others.
+ if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
+ if (A->getOption().matches(options::OPT_O4)) {
+ CmdArgs.push_back("-O3");
+ D.Diag(diag::warn_O4_is_O3);
+ } else {
+ A->render(Args, CmdArgs);
+ }
+ }
+
+ // Warn about ignored options to clang.
+ for (const Arg *A :
+ Args.filtered(options::OPT_clang_ignored_gcc_optimization_f_Group)) {
+ D.Diag(diag::warn_ignored_gcc_optimization) << A->getAsString(Args);
+ A->claim();
+ }
+
+ claimNoWarnArgs(Args);
+
+ Args.AddAllArgs(CmdArgs, options::OPT_R_Group);
+ Args.AddAllArgs(CmdArgs, options::OPT_W_Group);
+ if (Args.hasFlag(options::OPT_pedantic, options::OPT_no_pedantic, false))
+ CmdArgs.push_back("-pedantic");
+ Args.AddLastArg(CmdArgs, options::OPT_pedantic_errors);
+ Args.AddLastArg(CmdArgs, options::OPT_w);
+
+ // Handle -{std, ansi, trigraphs} -- take the last of -{std, ansi}
+ // (-ansi is equivalent to -std=c89 or -std=c++98).
+ //
+ // If a std is supplied, only add -trigraphs if it follows the
+ // option.
+ bool ImplyVCPPCXXVer = false;
+ if (Arg *Std = Args.getLastArg(options::OPT_std_EQ, options::OPT_ansi)) {
+ if (Std->getOption().matches(options::OPT_ansi))
+ if (types::isCXX(InputType))
+ CmdArgs.push_back("-std=c++98");
+ else
+ CmdArgs.push_back("-std=c89");
+ else
+ Std->render(Args, CmdArgs);
+
+ // If -f(no-)trigraphs appears after the language standard flag, honor it.
+ if (Arg *A = Args.getLastArg(options::OPT_std_EQ, options::OPT_ansi,
+ options::OPT_ftrigraphs,
+ options::OPT_fno_trigraphs))
+ if (A != Std)
+ A->render(Args, CmdArgs);
+ } else {
+ // Honor -std-default.
+ //
+ // FIXME: Clang doesn't correctly handle -std= when the input language
+ // doesn't match. For the time being just ignore this for C++ inputs;
+ // eventually we want to do all the standard defaulting here instead of
+ // splitting it between the driver and clang -cc1.
+ if (!types::isCXX(InputType))
+ Args.AddAllArgsTranslated(CmdArgs, options::OPT_std_default_EQ, "-std=",
+ /*Joined=*/true);
+ else if (IsWindowsMSVC)
+ ImplyVCPPCXXVer = true;
+
+ Args.AddLastArg(CmdArgs, options::OPT_ftrigraphs,
+ options::OPT_fno_trigraphs);
+ }
+
+ // GCC's behavior for -Wwrite-strings is a bit strange:
+ // * In C, this "warning flag" changes the types of string literals from
+ // 'char[N]' to 'const char[N]', and thus triggers an unrelated warning
+ // for the discarded qualifier.
+ // * In C++, this is just a normal warning flag.
+ //
+ // Implementing this warning correctly in C is hard, so we follow GCC's
+ // behavior for now. FIXME: Directly diagnose uses of a string literal as
+ // a non-const char* in C, rather than using this crude hack.
+ if (!types::isCXX(InputType)) {
+ // FIXME: This should behave just like a warning flag, and thus should also
+ // respect -Weverything, -Wno-everything, -Werror=write-strings, and so on.
+ Arg *WriteStrings =
+ Args.getLastArg(options::OPT_Wwrite_strings,
+ options::OPT_Wno_write_strings, options::OPT_w);
+ if (WriteStrings &&
+ WriteStrings->getOption().matches(options::OPT_Wwrite_strings))
+ CmdArgs.push_back("-fconst-strings");
+ }
+
+ // GCC provides a macro definition '__DEPRECATED' when -Wdeprecated is active
+ // during C++ compilation, which it is by default. GCC keeps this define even
+ // in the presence of '-w', match this behavior bug-for-bug.
+ if (types::isCXX(InputType) &&
+ Args.hasFlag(options::OPT_Wdeprecated, options::OPT_Wno_deprecated,
+ true)) {
+ CmdArgs.push_back("-fdeprecated-macro");
+ }
+
+ // Translate GCC's misnamer '-fasm' arguments to '-fgnu-keywords'.
+ if (Arg *Asm = Args.getLastArg(options::OPT_fasm, options::OPT_fno_asm)) {
+ if (Asm->getOption().matches(options::OPT_fasm))
+ CmdArgs.push_back("-fgnu-keywords");
+ else
+ CmdArgs.push_back("-fno-gnu-keywords");
+ }
+
+ if (ShouldDisableDwarfDirectory(Args, getToolChain()))
+ CmdArgs.push_back("-fno-dwarf-directory-asm");
+
+ if (ShouldDisableAutolink(Args, getToolChain()))
+ CmdArgs.push_back("-fno-autolink");
+
+ // Add in -fdebug-compilation-dir if necessary.
+ addDebugCompDirArg(Args, CmdArgs);
+
+ for (const Arg *A : Args.filtered(options::OPT_fdebug_prefix_map_EQ)) {
+ StringRef Map = A->getValue();
+ if (Map.find('=') == StringRef::npos)
+ D.Diag(diag::err_drv_invalid_argument_to_fdebug_prefix_map) << Map;
+ else
+ CmdArgs.push_back(Args.MakeArgString("-fdebug-prefix-map=" + Map));
+ A->claim();
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_ftemplate_depth_,
+ options::OPT_ftemplate_depth_EQ)) {
+ CmdArgs.push_back("-ftemplate-depth");
+ CmdArgs.push_back(A->getValue());
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_foperator_arrow_depth_EQ)) {
+ CmdArgs.push_back("-foperator-arrow-depth");
+ CmdArgs.push_back(A->getValue());
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_fconstexpr_depth_EQ)) {
+ CmdArgs.push_back("-fconstexpr-depth");
+ CmdArgs.push_back(A->getValue());
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_fconstexpr_steps_EQ)) {
+ CmdArgs.push_back("-fconstexpr-steps");
+ CmdArgs.push_back(A->getValue());
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_fbracket_depth_EQ)) {
+ CmdArgs.push_back("-fbracket-depth");
+ CmdArgs.push_back(A->getValue());
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_Wlarge_by_value_copy_EQ,
+ options::OPT_Wlarge_by_value_copy_def)) {
+ if (A->getNumValues()) {
+ StringRef bytes = A->getValue();
+ CmdArgs.push_back(Args.MakeArgString("-Wlarge-by-value-copy=" + bytes));
+ } else
+ CmdArgs.push_back("-Wlarge-by-value-copy=64"); // default value
+ }
+
+ if (Args.hasArg(options::OPT_relocatable_pch))
+ CmdArgs.push_back("-relocatable-pch");
+
+ if (Arg *A = Args.getLastArg(options::OPT_fconstant_string_class_EQ)) {
+ CmdArgs.push_back("-fconstant-string-class");
+ CmdArgs.push_back(A->getValue());
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_ftabstop_EQ)) {
+ CmdArgs.push_back("-ftabstop");
+ CmdArgs.push_back(A->getValue());
+ }
+
+ CmdArgs.push_back("-ferror-limit");
+ if (Arg *A = Args.getLastArg(options::OPT_ferror_limit_EQ))
+ CmdArgs.push_back(A->getValue());
+ else
+ CmdArgs.push_back("19");
+
+ if (Arg *A = Args.getLastArg(options::OPT_fmacro_backtrace_limit_EQ)) {
+ CmdArgs.push_back("-fmacro-backtrace-limit");
+ CmdArgs.push_back(A->getValue());
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_ftemplate_backtrace_limit_EQ)) {
+ CmdArgs.push_back("-ftemplate-backtrace-limit");
+ CmdArgs.push_back(A->getValue());
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_fconstexpr_backtrace_limit_EQ)) {
+ CmdArgs.push_back("-fconstexpr-backtrace-limit");
+ CmdArgs.push_back(A->getValue());
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_fspell_checking_limit_EQ)) {
+ CmdArgs.push_back("-fspell-checking-limit");
+ CmdArgs.push_back(A->getValue());
+ }
+
+ // Pass -fmessage-length=.
+ CmdArgs.push_back("-fmessage-length");
+ if (Arg *A = Args.getLastArg(options::OPT_fmessage_length_EQ)) {
+ CmdArgs.push_back(A->getValue());
+ } else {
+ // If -fmessage-length=N was not specified, determine whether this is a
+ // terminal and, if so, implicitly define -fmessage-length appropriately.
+ unsigned N = llvm::sys::Process::StandardErrColumns();
+ CmdArgs.push_back(Args.MakeArgString(Twine(N)));
+ }
+
+ // -fvisibility= and -fvisibility-ms-compat are of a piece.
+ if (const Arg *A = Args.getLastArg(options::OPT_fvisibility_EQ,
+ options::OPT_fvisibility_ms_compat)) {
+ if (A->getOption().matches(options::OPT_fvisibility_EQ)) {
+ CmdArgs.push_back("-fvisibility");
+ CmdArgs.push_back(A->getValue());
+ } else {
+ assert(A->getOption().matches(options::OPT_fvisibility_ms_compat));
+ CmdArgs.push_back("-fvisibility");
+ CmdArgs.push_back("hidden");
+ CmdArgs.push_back("-ftype-visibility");
+ CmdArgs.push_back("default");
+ }
+ }
+
+ Args.AddLastArg(CmdArgs, options::OPT_fvisibility_inlines_hidden);
+
+ Args.AddLastArg(CmdArgs, options::OPT_ftlsmodel_EQ);
+
+ // -fhosted is default.
+ if (Args.hasFlag(options::OPT_ffreestanding, options::OPT_fhosted, false) ||
+ KernelOrKext)
+ CmdArgs.push_back("-ffreestanding");
+
+ // Forward -f (flag) options which we can pass directly.
+ Args.AddLastArg(CmdArgs, options::OPT_femit_all_decls);
+ Args.AddLastArg(CmdArgs, options::OPT_fheinous_gnu_extensions);
+ Args.AddLastArg(CmdArgs, options::OPT_fno_operator_names);
+ // Emulated TLS is enabled by default on Android, and can be enabled manually
+ // with -femulated-tls.
+ bool EmulatedTLSDefault = Triple.isAndroid();
+ if (Args.hasFlag(options::OPT_femulated_tls, options::OPT_fno_emulated_tls,
+ EmulatedTLSDefault))
+ CmdArgs.push_back("-femulated-tls");
+ // AltiVec-like language extensions aren't relevant for assembling.
+ if (!isa<PreprocessJobAction>(JA) || Output.getType() != types::TY_PP_Asm) {
+ Args.AddLastArg(CmdArgs, options::OPT_faltivec);
+ Args.AddLastArg(CmdArgs, options::OPT_fzvector);
+ }
+ Args.AddLastArg(CmdArgs, options::OPT_fdiagnostics_show_template_tree);
+ Args.AddLastArg(CmdArgs, options::OPT_fno_elide_type);
+
+ // Forward flags for OpenMP
+ if (Args.hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ,
+ options::OPT_fno_openmp, false))
+ switch (getOpenMPRuntime(getToolChain(), Args)) {
+ case OMPRT_OMP:
+ case OMPRT_IOMP5:
+ // Clang can generate useful OpenMP code for these two runtime libraries.
+ CmdArgs.push_back("-fopenmp");
+
+ // If no option regarding the use of TLS in OpenMP codegeneration is
+ // given, decide a default based on the target. Otherwise rely on the
+ // options and pass the right information to the frontend.
+ if (!Args.hasFlag(options::OPT_fopenmp_use_tls,
+ options::OPT_fnoopenmp_use_tls, /*Default=*/true))
+ CmdArgs.push_back("-fnoopenmp-use-tls");
+ break;
+ default:
+ // By default, if Clang doesn't know how to generate useful OpenMP code
+ // for a specific runtime library, we just don't pass the '-fopenmp' flag
+ // down to the actual compilation.
+ // FIXME: It would be better to have a mode which *only* omits IR
+ // generation based on the OpenMP support so that we get consistent
+ // semantic analysis, etc.
+ break;
+ }
+
+ const SanitizerArgs &Sanitize = getToolChain().getSanitizerArgs();
+ Sanitize.addArgs(getToolChain(), Args, CmdArgs, InputType);
+
+ // Report an error for -faltivec on anything other than PowerPC.
+ if (const Arg *A = Args.getLastArg(options::OPT_faltivec)) {
+ const llvm::Triple::ArchType Arch = getToolChain().getArch();
+ if (!(Arch == llvm::Triple::ppc || Arch == llvm::Triple::ppc64 ||
+ Arch == llvm::Triple::ppc64le))
+ D.Diag(diag::err_drv_argument_only_allowed_with) << A->getAsString(Args)
+ << "ppc/ppc64/ppc64le";
+ }
+
+ // -fzvector is incompatible with -faltivec.
+ if (Arg *A = Args.getLastArg(options::OPT_fzvector))
+ if (Args.hasArg(options::OPT_faltivec))
+ D.Diag(diag::err_drv_argument_not_allowed_with) << A->getAsString(Args)
+ << "-faltivec";
+
+ if (getToolChain().SupportsProfiling())
+ Args.AddLastArg(CmdArgs, options::OPT_pg);
+
+ // -flax-vector-conversions is default.
+ if (!Args.hasFlag(options::OPT_flax_vector_conversions,
+ options::OPT_fno_lax_vector_conversions))
+ CmdArgs.push_back("-fno-lax-vector-conversions");
+
+ if (Args.getLastArg(options::OPT_fapple_kext) ||
+ (Args.hasArg(options::OPT_mkernel) && types::isCXX(InputType)))
+ CmdArgs.push_back("-fapple-kext");
+
+ Args.AddLastArg(CmdArgs, options::OPT_fobjc_sender_dependent_dispatch);
+ Args.AddLastArg(CmdArgs, options::OPT_fdiagnostics_print_source_range_info);
+ Args.AddLastArg(CmdArgs, options::OPT_fdiagnostics_parseable_fixits);
+ Args.AddLastArg(CmdArgs, options::OPT_ftime_report);
+ Args.AddLastArg(CmdArgs, options::OPT_ftrapv);
+
+ if (Arg *A = Args.getLastArg(options::OPT_ftrapv_handler_EQ)) {
+ CmdArgs.push_back("-ftrapv-handler");
+ CmdArgs.push_back(A->getValue());
+ }
+
+ Args.AddLastArg(CmdArgs, options::OPT_ftrap_function_EQ);
+
+ // -fno-strict-overflow implies -fwrapv if it isn't disabled, but
+ // -fstrict-overflow won't turn off an explicitly enabled -fwrapv.
+ if (Arg *A = Args.getLastArg(options::OPT_fwrapv, options::OPT_fno_wrapv)) {
+ if (A->getOption().matches(options::OPT_fwrapv))
+ CmdArgs.push_back("-fwrapv");
+ } else if (Arg *A = Args.getLastArg(options::OPT_fstrict_overflow,
+ options::OPT_fno_strict_overflow)) {
+ if (A->getOption().matches(options::OPT_fno_strict_overflow))
+ CmdArgs.push_back("-fwrapv");
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_freroll_loops,
+ options::OPT_fno_reroll_loops))
+ if (A->getOption().matches(options::OPT_freroll_loops))
+ CmdArgs.push_back("-freroll-loops");
+
+ Args.AddLastArg(CmdArgs, options::OPT_fwritable_strings);
+ Args.AddLastArg(CmdArgs, options::OPT_funroll_loops,
+ options::OPT_fno_unroll_loops);
+
+ Args.AddLastArg(CmdArgs, options::OPT_pthread);
+
+ // -stack-protector=0 is default.
+ unsigned StackProtectorLevel = 0;
+ if (getToolChain().getSanitizerArgs().needsSafeStackRt()) {
+ Args.ClaimAllArgs(options::OPT_fno_stack_protector);
+ Args.ClaimAllArgs(options::OPT_fstack_protector_all);
+ Args.ClaimAllArgs(options::OPT_fstack_protector_strong);
+ Args.ClaimAllArgs(options::OPT_fstack_protector);
+ } else if (Arg *A = Args.getLastArg(options::OPT_fno_stack_protector,
+ options::OPT_fstack_protector_all,
+ options::OPT_fstack_protector_strong,
+ options::OPT_fstack_protector)) {
+ if (A->getOption().matches(options::OPT_fstack_protector)) {
+ StackProtectorLevel = std::max<unsigned>(
+ LangOptions::SSPOn,
+ getToolChain().GetDefaultStackProtectorLevel(KernelOrKext));
+ } else if (A->getOption().matches(options::OPT_fstack_protector_strong))
+ StackProtectorLevel = LangOptions::SSPStrong;
+ else if (A->getOption().matches(options::OPT_fstack_protector_all))
+ StackProtectorLevel = LangOptions::SSPReq;
+ } else {
+ StackProtectorLevel =
+ getToolChain().GetDefaultStackProtectorLevel(KernelOrKext);
+ }
+ if (StackProtectorLevel) {
+ CmdArgs.push_back("-stack-protector");
+ CmdArgs.push_back(Args.MakeArgString(Twine(StackProtectorLevel)));
+ }
+
+ // --param ssp-buffer-size=
+ for (const Arg *A : Args.filtered(options::OPT__param)) {
+ StringRef Str(A->getValue());
+ if (Str.startswith("ssp-buffer-size=")) {
+ if (StackProtectorLevel) {
+ CmdArgs.push_back("-stack-protector-buffer-size");
+ // FIXME: Verify the argument is a valid integer.
+ CmdArgs.push_back(Args.MakeArgString(Str.drop_front(16)));
+ }
+ A->claim();
+ }
+ }
+
+ // Translate -mstackrealign
+ if (Args.hasFlag(options::OPT_mstackrealign, options::OPT_mno_stackrealign,
+ false))
+ CmdArgs.push_back(Args.MakeArgString("-mstackrealign"));
+
+ if (Args.hasArg(options::OPT_mstack_alignment)) {
+ StringRef alignment = Args.getLastArgValue(options::OPT_mstack_alignment);
+ CmdArgs.push_back(Args.MakeArgString("-mstack-alignment=" + alignment));
+ }
+
+ if (Args.hasArg(options::OPT_mstack_probe_size)) {
+ StringRef Size = Args.getLastArgValue(options::OPT_mstack_probe_size);
+
+ if (!Size.empty())
+ CmdArgs.push_back(Args.MakeArgString("-mstack-probe-size=" + Size));
+ else
+ CmdArgs.push_back("-mstack-probe-size=0");
+ }
+
+ switch (getToolChain().getArch()) {
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_be:
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ CmdArgs.push_back("-fallow-half-arguments-and-returns");
+ break;
+
+ default:
+ break;
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_mrestrict_it,
+ options::OPT_mno_restrict_it)) {
+ if (A->getOption().matches(options::OPT_mrestrict_it)) {
+ CmdArgs.push_back("-backend-option");
+ CmdArgs.push_back("-arm-restrict-it");
+ } else {
+ CmdArgs.push_back("-backend-option");
+ CmdArgs.push_back("-arm-no-restrict-it");
+ }
+ } else if (Triple.isOSWindows() &&
+ (Triple.getArch() == llvm::Triple::arm ||
+ Triple.getArch() == llvm::Triple::thumb)) {
+ // Windows on ARM expects restricted IT blocks
+ CmdArgs.push_back("-backend-option");
+ CmdArgs.push_back("-arm-restrict-it");
+ }
+
+ // Forward -f options with positive and negative forms; we translate
+ // these by hand.
+ if (Arg *A = Args.getLastArg(options::OPT_fprofile_sample_use_EQ)) {
+ StringRef fname = A->getValue();
+ if (!llvm::sys::fs::exists(fname))
+ D.Diag(diag::err_drv_no_such_file) << fname;
+ else
+ A->render(Args, CmdArgs);
+ }
+
+ // -fbuiltin is default unless -mkernel is used.
+ bool UseBuiltins =
+ Args.hasFlag(options::OPT_fbuiltin, options::OPT_fno_builtin,
+ !Args.hasArg(options::OPT_mkernel));
+ if (!UseBuiltins)
+ CmdArgs.push_back("-fno-builtin");
+
+ // -ffreestanding implies -fno-builtin.
+ if (Args.hasArg(options::OPT_ffreestanding))
+ UseBuiltins = false;
+
+ // Process the -fno-builtin-* options.
+ for (const auto &Arg : Args) {
+ const Option &O = Arg->getOption();
+ if (!O.matches(options::OPT_fno_builtin_))
+ continue;
+
+ Arg->claim();
+ // If -fno-builtin is specified, then there's no need to pass the option to
+ // the frontend.
+ if (!UseBuiltins)
+ continue;
+
+ StringRef FuncName = Arg->getValue();
+ CmdArgs.push_back(Args.MakeArgString("-fno-builtin-" + FuncName));
+ }
+
+ if (!Args.hasFlag(options::OPT_fassume_sane_operator_new,
+ options::OPT_fno_assume_sane_operator_new))
+ CmdArgs.push_back("-fno-assume-sane-operator-new");
+
+ // -fblocks=0 is default.
+ if (Args.hasFlag(options::OPT_fblocks, options::OPT_fno_blocks,
+ getToolChain().IsBlocksDefault()) ||
+ (Args.hasArg(options::OPT_fgnu_runtime) &&
+ Args.hasArg(options::OPT_fobjc_nonfragile_abi) &&
+ !Args.hasArg(options::OPT_fno_blocks))) {
+ CmdArgs.push_back("-fblocks");
+
+ if (!Args.hasArg(options::OPT_fgnu_runtime) &&
+ !getToolChain().hasBlocksRuntime())
+ CmdArgs.push_back("-fblocks-runtime-optional");
+ }
+
+ // -fmodules enables the use of precompiled modules (off by default).
+ // Users can pass -fno-cxx-modules to turn off modules support for
+ // C++/Objective-C++ programs.
+ bool HaveModules = false;
+ if (Args.hasFlag(options::OPT_fmodules, options::OPT_fno_modules, false)) {
+ bool AllowedInCXX = Args.hasFlag(options::OPT_fcxx_modules,
+ options::OPT_fno_cxx_modules, true);
+ if (AllowedInCXX || !types::isCXX(InputType)) {
+ CmdArgs.push_back("-fmodules");
+ HaveModules = true;
+ }
+ }
+
+ // -fmodule-maps enables implicit reading of module map files. By default,
+ // this is enabled if we are using precompiled modules.
+ if (Args.hasFlag(options::OPT_fimplicit_module_maps,
+ options::OPT_fno_implicit_module_maps, HaveModules)) {
+ CmdArgs.push_back("-fimplicit-module-maps");
+ }
+
+ // -fmodules-decluse checks that modules used are declared so (off by
+ // default).
+ if (Args.hasFlag(options::OPT_fmodules_decluse,
+ options::OPT_fno_modules_decluse, false)) {
+ CmdArgs.push_back("-fmodules-decluse");
+ }
+
+ // -fmodules-strict-decluse is like -fmodule-decluse, but also checks that
+ // all #included headers are part of modules.
+ if (Args.hasFlag(options::OPT_fmodules_strict_decluse,
+ options::OPT_fno_modules_strict_decluse, false)) {
+ CmdArgs.push_back("-fmodules-strict-decluse");
+ }
+
+ // -fno-implicit-modules turns off implicitly compiling modules on demand.
+ if (!Args.hasFlag(options::OPT_fimplicit_modules,
+ options::OPT_fno_implicit_modules)) {
+ CmdArgs.push_back("-fno-implicit-modules");
+ }
+
+ // -fmodule-name specifies the module that is currently being built (or
+ // used for header checking by -fmodule-maps).
+ Args.AddLastArg(CmdArgs, options::OPT_fmodule_name);
+
+ // -fmodule-map-file can be used to specify files containing module
+ // definitions.
+ Args.AddAllArgs(CmdArgs, options::OPT_fmodule_map_file);
+
+ // -fmodule-file can be used to specify files containing precompiled modules.
+ if (HaveModules)
+ Args.AddAllArgs(CmdArgs, options::OPT_fmodule_file);
+ else
+ Args.ClaimAllArgs(options::OPT_fmodule_file);
+
+ // -fmodule-cache-path specifies where our implicitly-built module files
+ // should be written.
+ SmallString<128> Path;
+ if (Arg *A = Args.getLastArg(options::OPT_fmodules_cache_path))
+ Path = A->getValue();
+ if (HaveModules) {
+ if (C.isForDiagnostics()) {
+ // When generating crash reports, we want to emit the modules along with
+ // the reproduction sources, so we ignore any provided module path.
+ Path = Output.getFilename();
+ llvm::sys::path::replace_extension(Path, ".cache");
+ llvm::sys::path::append(Path, "modules");
+ } else if (Path.empty()) {
+ // No module path was provided: use the default.
+ llvm::sys::path::system_temp_directory(/*erasedOnReboot=*/false, Path);
+ llvm::sys::path::append(Path, "org.llvm.clang.");
+ appendUserToPath(Path);
+ llvm::sys::path::append(Path, "ModuleCache");
+ }
+ const char Arg[] = "-fmodules-cache-path=";
+ Path.insert(Path.begin(), Arg, Arg + strlen(Arg));
+ CmdArgs.push_back(Args.MakeArgString(Path));
+ }
+
+ // When building modules and generating crashdumps, we need to dump a module
+ // dependency VFS alongside the output.
+ if (HaveModules && C.isForDiagnostics()) {
+ SmallString<128> VFSDir(Output.getFilename());
+ llvm::sys::path::replace_extension(VFSDir, ".cache");
+ // Add the cache directory as a temp so the crash diagnostics pick it up.
+ C.addTempFile(Args.MakeArgString(VFSDir));
+
+ llvm::sys::path::append(VFSDir, "vfs");
+ CmdArgs.push_back("-module-dependency-dir");
+ CmdArgs.push_back(Args.MakeArgString(VFSDir));
+ }
+
+ if (HaveModules)
+ Args.AddLastArg(CmdArgs, options::OPT_fmodules_user_build_path);
+
+ // Pass through all -fmodules-ignore-macro arguments.
+ Args.AddAllArgs(CmdArgs, options::OPT_fmodules_ignore_macro);
+ Args.AddLastArg(CmdArgs, options::OPT_fmodules_prune_interval);
+ Args.AddLastArg(CmdArgs, options::OPT_fmodules_prune_after);
+
+ Args.AddLastArg(CmdArgs, options::OPT_fbuild_session_timestamp);
+
+ if (Arg *A = Args.getLastArg(options::OPT_fbuild_session_file)) {
+ if (Args.hasArg(options::OPT_fbuild_session_timestamp))
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << A->getAsString(Args) << "-fbuild-session-timestamp";
+
+ llvm::sys::fs::file_status Status;
+ if (llvm::sys::fs::status(A->getValue(), Status))
+ D.Diag(diag::err_drv_no_such_file) << A->getValue();
+ CmdArgs.push_back(Args.MakeArgString(
+ "-fbuild-session-timestamp=" +
+ Twine((uint64_t)Status.getLastModificationTime().toEpochTime())));
+ }
+
+ if (Args.getLastArg(options::OPT_fmodules_validate_once_per_build_session)) {
+ if (!Args.getLastArg(options::OPT_fbuild_session_timestamp,
+ options::OPT_fbuild_session_file))
+ D.Diag(diag::err_drv_modules_validate_once_requires_timestamp);
+
+ Args.AddLastArg(CmdArgs,
+ options::OPT_fmodules_validate_once_per_build_session);
+ }
+
+ Args.AddLastArg(CmdArgs, options::OPT_fmodules_validate_system_headers);
+
+ // -faccess-control is default.
+ if (Args.hasFlag(options::OPT_fno_access_control,
+ options::OPT_faccess_control, false))
+ CmdArgs.push_back("-fno-access-control");
+
+ // -felide-constructors is the default.
+ if (Args.hasFlag(options::OPT_fno_elide_constructors,
+ options::OPT_felide_constructors, false))
+ CmdArgs.push_back("-fno-elide-constructors");
+
+ ToolChain::RTTIMode RTTIMode = getToolChain().getRTTIMode();
+
+ if (KernelOrKext || (types::isCXX(InputType) &&
+ (RTTIMode == ToolChain::RM_DisabledExplicitly ||
+ RTTIMode == ToolChain::RM_DisabledImplicitly)))
+ CmdArgs.push_back("-fno-rtti");
+
+ // -fshort-enums=0 is default for all architectures except Hexagon.
+ if (Args.hasFlag(options::OPT_fshort_enums, options::OPT_fno_short_enums,
+ getToolChain().getArch() == llvm::Triple::hexagon))
+ CmdArgs.push_back("-fshort-enums");
+
+ // -fsigned-char is default.
+ if (Arg *A = Args.getLastArg(
+ options::OPT_fsigned_char, options::OPT_fno_signed_char,
+ options::OPT_funsigned_char, options::OPT_fno_unsigned_char)) {
+ if (A->getOption().matches(options::OPT_funsigned_char) ||
+ A->getOption().matches(options::OPT_fno_signed_char)) {
+ CmdArgs.push_back("-fno-signed-char");
+ }
+ } else if (!isSignedCharDefault(getToolChain().getTriple())) {
+ CmdArgs.push_back("-fno-signed-char");
+ }
+
+ // -fuse-cxa-atexit is default.
+ if (!Args.hasFlag(
+ options::OPT_fuse_cxa_atexit, options::OPT_fno_use_cxa_atexit,
+ !IsWindowsCygnus && !IsWindowsGNU &&
+ getToolChain().getTriple().getOS() != llvm::Triple::Solaris &&
+ getToolChain().getArch() != llvm::Triple::hexagon &&
+ getToolChain().getArch() != llvm::Triple::xcore &&
+ ((getToolChain().getTriple().getVendor() !=
+ llvm::Triple::MipsTechnologies) ||
+ getToolChain().getTriple().hasEnvironment())) ||
+ KernelOrKext)
+ CmdArgs.push_back("-fno-use-cxa-atexit");
+
+ // -fms-extensions=0 is default.
+ if (Args.hasFlag(options::OPT_fms_extensions, options::OPT_fno_ms_extensions,
+ IsWindowsMSVC))
+ CmdArgs.push_back("-fms-extensions");
+
+ // -fno-use-line-directives is default.
+ if (Args.hasFlag(options::OPT_fuse_line_directives,
+ options::OPT_fno_use_line_directives, false))
+ CmdArgs.push_back("-fuse-line-directives");
+
+ // -fms-compatibility=0 is default.
+ if (Args.hasFlag(options::OPT_fms_compatibility,
+ options::OPT_fno_ms_compatibility,
+ (IsWindowsMSVC &&
+ Args.hasFlag(options::OPT_fms_extensions,
+ options::OPT_fno_ms_extensions, true))))
+ CmdArgs.push_back("-fms-compatibility");
+
+ // -fms-compatibility-version=18.00 is default.
+ VersionTuple MSVT = visualstudio::getMSVCVersion(
+ &D, getToolChain().getTriple(), Args, IsWindowsMSVC);
+ if (!MSVT.empty())
+ CmdArgs.push_back(
+ Args.MakeArgString("-fms-compatibility-version=" + MSVT.getAsString()));
+
+ bool IsMSVC2015Compatible = MSVT.getMajor() >= 19;
+ if (ImplyVCPPCXXVer) {
+ if (IsMSVC2015Compatible)
+ CmdArgs.push_back("-std=c++14");
+ else
+ CmdArgs.push_back("-std=c++11");
+ }
+
+ // -fno-borland-extensions is default.
+ if (Args.hasFlag(options::OPT_fborland_extensions,
+ options::OPT_fno_borland_extensions, false))
+ CmdArgs.push_back("-fborland-extensions");
+
+ // -fno-declspec is default, except for PS4.
+ if (Args.hasFlag(options::OPT_fdeclspec, options::OPT_fno_declspec,
+ getToolChain().getTriple().isPS4()))
+ CmdArgs.push_back("-fdeclspec");
+ else if (Args.hasArg(options::OPT_fno_declspec))
+ CmdArgs.push_back("-fno-declspec"); // Explicitly disabling __declspec.
+
+ // -fthreadsafe-static is default, except for MSVC compatibility versions less
+ // than 19.
+ if (!Args.hasFlag(options::OPT_fthreadsafe_statics,
+ options::OPT_fno_threadsafe_statics,
+ !IsWindowsMSVC || IsMSVC2015Compatible))
+ CmdArgs.push_back("-fno-threadsafe-statics");
+
+ // -fno-delayed-template-parsing is default, except for Windows where MSVC STL
+ // needs it.
+ if (Args.hasFlag(options::OPT_fdelayed_template_parsing,
+ options::OPT_fno_delayed_template_parsing, IsWindowsMSVC))
+ CmdArgs.push_back("-fdelayed-template-parsing");
+
+ // -fgnu-keywords default varies depending on language; only pass if
+ // specified.
+ if (Arg *A = Args.getLastArg(options::OPT_fgnu_keywords,
+ options::OPT_fno_gnu_keywords))
+ A->render(Args, CmdArgs);
+
+ if (Args.hasFlag(options::OPT_fgnu89_inline, options::OPT_fno_gnu89_inline,
+ false))
+ CmdArgs.push_back("-fgnu89-inline");
+
+ if (Args.hasArg(options::OPT_fno_inline))
+ CmdArgs.push_back("-fno-inline");
+
+ if (Args.hasArg(options::OPT_fno_inline_functions))
+ CmdArgs.push_back("-fno-inline-functions");
+
+ ObjCRuntime objcRuntime = AddObjCRuntimeArgs(Args, CmdArgs, rewriteKind);
+
+ // -fobjc-dispatch-method is only relevant with the nonfragile-abi, and
+ // legacy is the default. Except for deployment taget of 10.5,
+ // next runtime is always legacy dispatch and -fno-objc-legacy-dispatch
+ // gets ignored silently.
+ if (objcRuntime.isNonFragile()) {
+ if (!Args.hasFlag(options::OPT_fobjc_legacy_dispatch,
+ options::OPT_fno_objc_legacy_dispatch,
+ objcRuntime.isLegacyDispatchDefaultForArch(
+ getToolChain().getArch()))) {
+ if (getToolChain().UseObjCMixedDispatch())
+ CmdArgs.push_back("-fobjc-dispatch-method=mixed");
+ else
+ CmdArgs.push_back("-fobjc-dispatch-method=non-legacy");
+ }
+ }
+
+ // When ObjectiveC legacy runtime is in effect on MacOSX,
+ // turn on the option to do Array/Dictionary subscripting
+ // by default.
+ if (getToolChain().getArch() == llvm::Triple::x86 &&
+ getToolChain().getTriple().isMacOSX() &&
+ !getToolChain().getTriple().isMacOSXVersionLT(10, 7) &&
+ objcRuntime.getKind() == ObjCRuntime::FragileMacOSX &&
+ objcRuntime.isNeXTFamily())
+ CmdArgs.push_back("-fobjc-subscripting-legacy-runtime");
+
+ // -fencode-extended-block-signature=1 is default.
+ if (getToolChain().IsEncodeExtendedBlockSignatureDefault()) {
+ CmdArgs.push_back("-fencode-extended-block-signature");
+ }
+
+ // Allow -fno-objc-arr to trump -fobjc-arr/-fobjc-arc.
+ // NOTE: This logic is duplicated in ToolChains.cpp.
+ bool ARC = isObjCAutoRefCount(Args);
+ if (ARC) {
+ getToolChain().CheckObjCARC();
+
+ CmdArgs.push_back("-fobjc-arc");
+
+ // FIXME: It seems like this entire block, and several around it should be
+ // wrapped in isObjC, but for now we just use it here as this is where it
+ // was being used previously.
+ if (types::isCXX(InputType) && types::isObjC(InputType)) {
+ if (getToolChain().GetCXXStdlibType(Args) == ToolChain::CST_Libcxx)
+ CmdArgs.push_back("-fobjc-arc-cxxlib=libc++");
+ else
+ CmdArgs.push_back("-fobjc-arc-cxxlib=libstdc++");
+ }
+
+ // Allow the user to enable full exceptions code emission.
+ // We define off for Objective-CC, on for Objective-C++.
+ if (Args.hasFlag(options::OPT_fobjc_arc_exceptions,
+ options::OPT_fno_objc_arc_exceptions,
+ /*default*/ types::isCXX(InputType)))
+ CmdArgs.push_back("-fobjc-arc-exceptions");
+
+ }
+
+ // -fobjc-infer-related-result-type is the default, except in the Objective-C
+ // rewriter.
+ if (rewriteKind != RK_None)
+ CmdArgs.push_back("-fno-objc-infer-related-result-type");
+
+ // Handle -fobjc-gc and -fobjc-gc-only. They are exclusive, and -fobjc-gc-only
+ // takes precedence.
+ const Arg *GCArg = Args.getLastArg(options::OPT_fobjc_gc_only);
+ if (!GCArg)
+ GCArg = Args.getLastArg(options::OPT_fobjc_gc);
+ if (GCArg) {
+ if (ARC) {
+ D.Diag(diag::err_drv_objc_gc_arr) << GCArg->getAsString(Args);
+ } else if (getToolChain().SupportsObjCGC()) {
+ GCArg->render(Args, CmdArgs);
+ } else {
+ // FIXME: We should move this to a hard error.
+ D.Diag(diag::warn_drv_objc_gc_unsupported) << GCArg->getAsString(Args);
+ }
+ }
+
+ // Pass down -fobjc-weak or -fno-objc-weak if present.
+ if (types::isObjC(InputType)) {
+ auto WeakArg = Args.getLastArg(options::OPT_fobjc_weak,
+ options::OPT_fno_objc_weak);
+ if (!WeakArg) {
+ // nothing to do
+ } else if (GCArg) {
+ if (WeakArg->getOption().matches(options::OPT_fobjc_weak))
+ D.Diag(diag::err_objc_weak_with_gc);
+ } else if (!objcRuntime.allowsWeak()) {
+ if (WeakArg->getOption().matches(options::OPT_fobjc_weak))
+ D.Diag(diag::err_objc_weak_unsupported);
+ } else {
+ WeakArg->render(Args, CmdArgs);
+ }
+ }
+
+ if (Args.hasFlag(options::OPT_fapplication_extension,
+ options::OPT_fno_application_extension, false))
+ CmdArgs.push_back("-fapplication-extension");
+
+ // Handle GCC-style exception args.
+ if (!C.getDriver().IsCLMode())
+ addExceptionArgs(Args, InputType, getToolChain(), KernelOrKext, objcRuntime,
+ CmdArgs);
+
+ if (getToolChain().UseSjLjExceptions(Args))
+ CmdArgs.push_back("-fsjlj-exceptions");
+
+ // C++ "sane" operator new.
+ if (!Args.hasFlag(options::OPT_fassume_sane_operator_new,
+ options::OPT_fno_assume_sane_operator_new))
+ CmdArgs.push_back("-fno-assume-sane-operator-new");
+
+ // -fsized-deallocation is off by default, as it is an ABI-breaking change for
+ // most platforms.
+ if (Args.hasFlag(options::OPT_fsized_deallocation,
+ options::OPT_fno_sized_deallocation, false))
+ CmdArgs.push_back("-fsized-deallocation");
+
+ // -fconstant-cfstrings is default, and may be subject to argument translation
+ // on Darwin.
+ if (!Args.hasFlag(options::OPT_fconstant_cfstrings,
+ options::OPT_fno_constant_cfstrings) ||
+ !Args.hasFlag(options::OPT_mconstant_cfstrings,
+ options::OPT_mno_constant_cfstrings))
+ CmdArgs.push_back("-fno-constant-cfstrings");
+
+ // -fshort-wchar default varies depending on platform; only
+ // pass if specified.
+ if (Arg *A = Args.getLastArg(options::OPT_fshort_wchar,
+ options::OPT_fno_short_wchar))
+ A->render(Args, CmdArgs);
+
+ // -fno-pascal-strings is default, only pass non-default.
+ if (Args.hasFlag(options::OPT_fpascal_strings,
+ options::OPT_fno_pascal_strings, false))
+ CmdArgs.push_back("-fpascal-strings");
+
+ // Honor -fpack-struct= and -fpack-struct, if given. Note that
+ // -fno-pack-struct doesn't apply to -fpack-struct=.
+ if (Arg *A = Args.getLastArg(options::OPT_fpack_struct_EQ)) {
+ std::string PackStructStr = "-fpack-struct=";
+ PackStructStr += A->getValue();
+ CmdArgs.push_back(Args.MakeArgString(PackStructStr));
+ } else if (Args.hasFlag(options::OPT_fpack_struct,
+ options::OPT_fno_pack_struct, false)) {
+ CmdArgs.push_back("-fpack-struct=1");
+ }
+
+ // Handle -fmax-type-align=N and -fno-type-align
+ bool SkipMaxTypeAlign = Args.hasArg(options::OPT_fno_max_type_align);
+ if (Arg *A = Args.getLastArg(options::OPT_fmax_type_align_EQ)) {
+ if (!SkipMaxTypeAlign) {
+ std::string MaxTypeAlignStr = "-fmax-type-align=";
+ MaxTypeAlignStr += A->getValue();
+ CmdArgs.push_back(Args.MakeArgString(MaxTypeAlignStr));
+ }
+ } else if (getToolChain().getTriple().isOSDarwin()) {
+ if (!SkipMaxTypeAlign) {
+ std::string MaxTypeAlignStr = "-fmax-type-align=16";
+ CmdArgs.push_back(Args.MakeArgString(MaxTypeAlignStr));
+ }
+ }
+
+ // -fcommon is the default unless compiling kernel code or the target says so
+ bool NoCommonDefault =
+ KernelOrKext || isNoCommonDefault(getToolChain().getTriple());
+ if (!Args.hasFlag(options::OPT_fcommon, options::OPT_fno_common,
+ !NoCommonDefault))
+ CmdArgs.push_back("-fno-common");
+
+ // -fsigned-bitfields is default, and clang doesn't yet support
+ // -funsigned-bitfields.
+ if (!Args.hasFlag(options::OPT_fsigned_bitfields,
+ options::OPT_funsigned_bitfields))
+ D.Diag(diag::warn_drv_clang_unsupported)
+ << Args.getLastArg(options::OPT_funsigned_bitfields)->getAsString(Args);
+
+ // -fsigned-bitfields is default, and clang doesn't support -fno-for-scope.
+ if (!Args.hasFlag(options::OPT_ffor_scope, options::OPT_fno_for_scope))
+ D.Diag(diag::err_drv_clang_unsupported)
+ << Args.getLastArg(options::OPT_fno_for_scope)->getAsString(Args);
+
+ // -finput_charset=UTF-8 is default. Reject others
+ if (Arg *inputCharset = Args.getLastArg(options::OPT_finput_charset_EQ)) {
+ StringRef value = inputCharset->getValue();
+ if (value != "UTF-8")
+ D.Diag(diag::err_drv_invalid_value) << inputCharset->getAsString(Args)
+ << value;
+ }
+
+ // -fexec_charset=UTF-8 is default. Reject others
+ if (Arg *execCharset = Args.getLastArg(options::OPT_fexec_charset_EQ)) {
+ StringRef value = execCharset->getValue();
+ if (value != "UTF-8")
+ D.Diag(diag::err_drv_invalid_value) << execCharset->getAsString(Args)
+ << value;
+ }
+
+ // -fcaret-diagnostics is default.
+ if (!Args.hasFlag(options::OPT_fcaret_diagnostics,
+ options::OPT_fno_caret_diagnostics, true))
+ CmdArgs.push_back("-fno-caret-diagnostics");
+
+ // -fdiagnostics-fixit-info is default, only pass non-default.
+ if (!Args.hasFlag(options::OPT_fdiagnostics_fixit_info,
+ options::OPT_fno_diagnostics_fixit_info))
+ CmdArgs.push_back("-fno-diagnostics-fixit-info");
+
+ // Enable -fdiagnostics-show-option by default.
+ if (Args.hasFlag(options::OPT_fdiagnostics_show_option,
+ options::OPT_fno_diagnostics_show_option))
+ CmdArgs.push_back("-fdiagnostics-show-option");
+
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_fdiagnostics_show_category_EQ)) {
+ CmdArgs.push_back("-fdiagnostics-show-category");
+ CmdArgs.push_back(A->getValue());
+ }
+
+ if (const Arg *A = Args.getLastArg(options::OPT_fdiagnostics_format_EQ)) {
+ CmdArgs.push_back("-fdiagnostics-format");
+ CmdArgs.push_back(A->getValue());
+ }
+
+ if (Arg *A = Args.getLastArg(
+ options::OPT_fdiagnostics_show_note_include_stack,
+ options::OPT_fno_diagnostics_show_note_include_stack)) {
+ if (A->getOption().matches(
+ options::OPT_fdiagnostics_show_note_include_stack))
+ CmdArgs.push_back("-fdiagnostics-show-note-include-stack");
+ else
+ CmdArgs.push_back("-fno-diagnostics-show-note-include-stack");
+ }
+
+ // Color diagnostics are the default, unless the terminal doesn't support
+ // them.
+ // Support both clang's -f[no-]color-diagnostics and gcc's
+ // -f[no-]diagnostics-colors[=never|always|auto].
+ enum { Colors_On, Colors_Off, Colors_Auto } ShowColors = Colors_Auto;
+ for (const auto &Arg : Args) {
+ const Option &O = Arg->getOption();
+ if (!O.matches(options::OPT_fcolor_diagnostics) &&
+ !O.matches(options::OPT_fdiagnostics_color) &&
+ !O.matches(options::OPT_fno_color_diagnostics) &&
+ !O.matches(options::OPT_fno_diagnostics_color) &&
+ !O.matches(options::OPT_fdiagnostics_color_EQ))
+ continue;
+
+ Arg->claim();
+ if (O.matches(options::OPT_fcolor_diagnostics) ||
+ O.matches(options::OPT_fdiagnostics_color)) {
+ ShowColors = Colors_On;
+ } else if (O.matches(options::OPT_fno_color_diagnostics) ||
+ O.matches(options::OPT_fno_diagnostics_color)) {
+ ShowColors = Colors_Off;
+ } else {
+ assert(O.matches(options::OPT_fdiagnostics_color_EQ));
+ StringRef value(Arg->getValue());
+ if (value == "always")
+ ShowColors = Colors_On;
+ else if (value == "never")
+ ShowColors = Colors_Off;
+ else if (value == "auto")
+ ShowColors = Colors_Auto;
+ else
+ getToolChain().getDriver().Diag(diag::err_drv_clang_unsupported)
+ << ("-fdiagnostics-color=" + value).str();
+ }
+ }
+ if (ShowColors == Colors_On ||
+ (ShowColors == Colors_Auto && llvm::sys::Process::StandardErrHasColors()))
+ CmdArgs.push_back("-fcolor-diagnostics");
+
+ if (Args.hasArg(options::OPT_fansi_escape_codes))
+ CmdArgs.push_back("-fansi-escape-codes");
+
+ if (!Args.hasFlag(options::OPT_fshow_source_location,
+ options::OPT_fno_show_source_location))
+ CmdArgs.push_back("-fno-show-source-location");
+
+ if (!Args.hasFlag(options::OPT_fshow_column, options::OPT_fno_show_column,
+ true))
+ CmdArgs.push_back("-fno-show-column");
+
+ if (!Args.hasFlag(options::OPT_fspell_checking,
+ options::OPT_fno_spell_checking))
+ CmdArgs.push_back("-fno-spell-checking");
+
+ // -fno-asm-blocks is default.
+ if (Args.hasFlag(options::OPT_fasm_blocks, options::OPT_fno_asm_blocks,
+ false))
+ CmdArgs.push_back("-fasm-blocks");
+
+ // -fgnu-inline-asm is default.
+ if (!Args.hasFlag(options::OPT_fgnu_inline_asm,
+ options::OPT_fno_gnu_inline_asm, true))
+ CmdArgs.push_back("-fno-gnu-inline-asm");
+
+ // Enable vectorization per default according to the optimization level
+ // selected. For optimization levels that want vectorization we use the alias
+ // option to simplify the hasFlag logic.
+ bool EnableVec = shouldEnableVectorizerAtOLevel(Args, false);
+ OptSpecifier VectorizeAliasOption =
+ EnableVec ? options::OPT_O_Group : options::OPT_fvectorize;
+ if (Args.hasFlag(options::OPT_fvectorize, VectorizeAliasOption,
+ options::OPT_fno_vectorize, EnableVec))
+ CmdArgs.push_back("-vectorize-loops");
+
+ // -fslp-vectorize is enabled based on the optimization level selected.
+ bool EnableSLPVec = shouldEnableVectorizerAtOLevel(Args, true);
+ OptSpecifier SLPVectAliasOption =
+ EnableSLPVec ? options::OPT_O_Group : options::OPT_fslp_vectorize;
+ if (Args.hasFlag(options::OPT_fslp_vectorize, SLPVectAliasOption,
+ options::OPT_fno_slp_vectorize, EnableSLPVec))
+ CmdArgs.push_back("-vectorize-slp");
+
+ // -fno-slp-vectorize-aggressive is default.
+ if (Args.hasFlag(options::OPT_fslp_vectorize_aggressive,
+ options::OPT_fno_slp_vectorize_aggressive, false))
+ CmdArgs.push_back("-vectorize-slp-aggressive");
+
+ if (Arg *A = Args.getLastArg(options::OPT_fshow_overloads_EQ))
+ A->render(Args, CmdArgs);
+
+ // -fdollars-in-identifiers default varies depending on platform and
+ // language; only pass if specified.
+ if (Arg *A = Args.getLastArg(options::OPT_fdollars_in_identifiers,
+ options::OPT_fno_dollars_in_identifiers)) {
+ if (A->getOption().matches(options::OPT_fdollars_in_identifiers))
+ CmdArgs.push_back("-fdollars-in-identifiers");
+ else
+ CmdArgs.push_back("-fno-dollars-in-identifiers");
+ }
+
+ // -funit-at-a-time is default, and we don't support -fno-unit-at-a-time for
+ // practical purposes.
+ if (Arg *A = Args.getLastArg(options::OPT_funit_at_a_time,
+ options::OPT_fno_unit_at_a_time)) {
+ if (A->getOption().matches(options::OPT_fno_unit_at_a_time))
+ D.Diag(diag::warn_drv_clang_unsupported) << A->getAsString(Args);
+ }
+
+ if (Args.hasFlag(options::OPT_fapple_pragma_pack,
+ options::OPT_fno_apple_pragma_pack, false))
+ CmdArgs.push_back("-fapple-pragma-pack");
+
+ // le32-specific flags:
+ // -fno-math-builtin: clang should not convert math builtins to intrinsics
+ // by default.
+ if (getToolChain().getArch() == llvm::Triple::le32) {
+ CmdArgs.push_back("-fno-math-builtin");
+ }
+
+// Default to -fno-builtin-str{cat,cpy} on Darwin for ARM.
+//
+// FIXME: This is disabled until clang -cc1 supports -fno-builtin-foo. PR4941.
+#if 0
+ if (getToolChain().getTriple().isOSDarwin() &&
+ (getToolChain().getArch() == llvm::Triple::arm ||
+ getToolChain().getArch() == llvm::Triple::thumb)) {
+ if (!Args.hasArg(options::OPT_fbuiltin_strcat))
+ CmdArgs.push_back("-fno-builtin-strcat");
+ if (!Args.hasArg(options::OPT_fbuiltin_strcpy))
+ CmdArgs.push_back("-fno-builtin-strcpy");
+ }
+#endif
+
+ // Enable rewrite includes if the user's asked for it or if we're generating
+ // diagnostics.
+ // TODO: Once -module-dependency-dir works with -frewrite-includes it'd be
+ // nice to enable this when doing a crashdump for modules as well.
+ if (Args.hasFlag(options::OPT_frewrite_includes,
+ options::OPT_fno_rewrite_includes, false) ||
+ (C.isForDiagnostics() && !HaveModules))
+ CmdArgs.push_back("-frewrite-includes");
+
+ // Only allow -traditional or -traditional-cpp outside in preprocessing modes.
+ if (Arg *A = Args.getLastArg(options::OPT_traditional,
+ options::OPT_traditional_cpp)) {
+ if (isa<PreprocessJobAction>(JA))
+ CmdArgs.push_back("-traditional-cpp");
+ else
+ D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
+ }
+
+ Args.AddLastArg(CmdArgs, options::OPT_dM);
+ Args.AddLastArg(CmdArgs, options::OPT_dD);
+
+ // Handle serialized diagnostics.
+ if (Arg *A = Args.getLastArg(options::OPT__serialize_diags)) {
+ CmdArgs.push_back("-serialize-diagnostic-file");
+ CmdArgs.push_back(Args.MakeArgString(A->getValue()));
+ }
+
+ if (Args.hasArg(options::OPT_fretain_comments_from_system_headers))
+ CmdArgs.push_back("-fretain-comments-from-system-headers");
+
+ // Forward -fcomment-block-commands to -cc1.
+ Args.AddAllArgs(CmdArgs, options::OPT_fcomment_block_commands);
+ // Forward -fparse-all-comments to -cc1.
+ Args.AddAllArgs(CmdArgs, options::OPT_fparse_all_comments);
+
+ // Turn -fplugin=name.so into -load name.so
+ for (const Arg *A : Args.filtered(options::OPT_fplugin_EQ)) {
+ CmdArgs.push_back("-load");
+ CmdArgs.push_back(A->getValue());
+ A->claim();
+ }
+
+ // Forward -Xclang arguments to -cc1, and -mllvm arguments to the LLVM option
+ // parser.
+ Args.AddAllArgValues(CmdArgs, options::OPT_Xclang);
+ for (const Arg *A : Args.filtered(options::OPT_mllvm)) {
+ A->claim();
+
+ // We translate this by hand to the -cc1 argument, since nightly test uses
+ // it and developers have been trained to spell it with -mllvm.
+ if (StringRef(A->getValue(0)) == "-disable-llvm-optzns") {
+ CmdArgs.push_back("-disable-llvm-optzns");
+ } else
+ A->render(Args, CmdArgs);
+ }
+
+ // With -save-temps, we want to save the unoptimized bitcode output from the
+ // CompileJobAction, use -disable-llvm-passes to get pristine IR generated
+ // by the frontend.
+ if (C.getDriver().isSaveTempsEnabled() && isa<CompileJobAction>(JA))
+ CmdArgs.push_back("-disable-llvm-passes");
+
+ if (Output.getType() == types::TY_Dependencies) {
+ // Handled with other dependency code.
+ } else if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ addDashXForInput(Args, Input, CmdArgs);
+
+ if (Input.isFilename())
+ CmdArgs.push_back(Input.getFilename());
+ else
+ Input.getInputArg().renderAsInput(Args, CmdArgs);
+
+ Args.AddAllArgs(CmdArgs, options::OPT_undef);
+
+ const char *Exec = getToolChain().getDriver().getClangProgramPath();
+
+ // Optionally embed the -cc1 level arguments into the debug info, for build
+ // analysis.
+ if (getToolChain().UseDwarfDebugFlags()) {
+ ArgStringList OriginalArgs;
+ for (const auto &Arg : Args)
+ Arg->render(Args, OriginalArgs);
+
+ SmallString<256> Flags;
+ Flags += Exec;
+ for (const char *OriginalArg : OriginalArgs) {
+ SmallString<128> EscapedArg;
+ EscapeSpacesAndBackslashes(OriginalArg, EscapedArg);
+ Flags += " ";
+ Flags += EscapedArg;
+ }
+ CmdArgs.push_back("-dwarf-debug-flags");
+ CmdArgs.push_back(Args.MakeArgString(Flags));
+ }
+
+ // Add the split debug info name to the command lines here so we
+ // can propagate it to the backend.
+ bool SplitDwarf = SplitDwarfArg && getToolChain().getTriple().isOSLinux() &&
+ (isa<AssembleJobAction>(JA) || isa<CompileJobAction>(JA) ||
+ isa<BackendJobAction>(JA));
+ const char *SplitDwarfOut;
+ if (SplitDwarf) {
+ CmdArgs.push_back("-split-dwarf-file");
+ SplitDwarfOut = SplitDebugName(Args, Input);
+ CmdArgs.push_back(SplitDwarfOut);
+ }
+
+ // Host-side cuda compilation receives device-side outputs as Inputs[1...].
+ // Include them with -fcuda-include-gpubinary.
+ if (IsCuda && Inputs.size() > 1)
+ for (auto I = std::next(Inputs.begin()), E = Inputs.end(); I != E; ++I) {
+ CmdArgs.push_back("-fcuda-include-gpubinary");
+ CmdArgs.push_back(I->getFilename());
+ }
+
+ // Finally add the compile command to the compilation.
+ if (Args.hasArg(options::OPT__SLASH_fallback) &&
+ Output.getType() == types::TY_Object &&
+ (InputType == types::TY_C || InputType == types::TY_CXX)) {
+ auto CLCommand =
+ getCLFallback()->GetCommand(C, JA, Output, Inputs, Args, LinkingOutput);
+ C.addCommand(llvm::make_unique<FallbackCommand>(
+ JA, *this, Exec, CmdArgs, Inputs, std::move(CLCommand)));
+ } else {
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ }
+
+ // Handle the debug info splitting at object creation time if we're
+ // creating an object.
+ // TODO: Currently only works on linux with newer objcopy.
+ if (SplitDwarf && !isa<CompileJobAction>(JA) && !isa<BackendJobAction>(JA))
+ SplitDebugInfo(getToolChain(), C, *this, JA, Args, Output, SplitDwarfOut);
+
+ if (Arg *A = Args.getLastArg(options::OPT_pg))
+ if (Args.hasArg(options::OPT_fomit_frame_pointer))
+ D.Diag(diag::err_drv_argument_not_allowed_with) << "-fomit-frame-pointer"
+ << A->getAsString(Args);
+
+ // Claim some arguments which clang supports automatically.
+
+ // -fpch-preprocess is used with gcc to add a special marker in the output to
+ // include the PCH file. Clang's PTH solution is completely transparent, so we
+ // do not need to deal with it at all.
+ Args.ClaimAllArgs(options::OPT_fpch_preprocess);
+
+ // Claim some arguments which clang doesn't support, but we don't
+ // care to warn the user about.
+ Args.ClaimAllArgs(options::OPT_clang_ignored_f_Group);
+ Args.ClaimAllArgs(options::OPT_clang_ignored_m_Group);
+
+ // Disable warnings for clang -E -emit-llvm foo.c
+ Args.ClaimAllArgs(options::OPT_emit_llvm);
+}
+
+/// Add options related to the Objective-C runtime/ABI.
+///
+/// Returns true if the runtime is non-fragile.
+ObjCRuntime Clang::AddObjCRuntimeArgs(const ArgList &args,
+ ArgStringList &cmdArgs,
+ RewriteKind rewriteKind) const {
+ // Look for the controlling runtime option.
+ Arg *runtimeArg =
+ args.getLastArg(options::OPT_fnext_runtime, options::OPT_fgnu_runtime,
+ options::OPT_fobjc_runtime_EQ);
+
+ // Just forward -fobjc-runtime= to the frontend. This supercedes
+ // options about fragility.
+ if (runtimeArg &&
+ runtimeArg->getOption().matches(options::OPT_fobjc_runtime_EQ)) {
+ ObjCRuntime runtime;
+ StringRef value = runtimeArg->getValue();
+ if (runtime.tryParse(value)) {
+ getToolChain().getDriver().Diag(diag::err_drv_unknown_objc_runtime)
+ << value;
+ }
+
+ runtimeArg->render(args, cmdArgs);
+ return runtime;
+ }
+
+ // Otherwise, we'll need the ABI "version". Version numbers are
+ // slightly confusing for historical reasons:
+ // 1 - Traditional "fragile" ABI
+ // 2 - Non-fragile ABI, version 1
+ // 3 - Non-fragile ABI, version 2
+ unsigned objcABIVersion = 1;
+ // If -fobjc-abi-version= is present, use that to set the version.
+ if (Arg *abiArg = args.getLastArg(options::OPT_fobjc_abi_version_EQ)) {
+ StringRef value = abiArg->getValue();
+ if (value == "1")
+ objcABIVersion = 1;
+ else if (value == "2")
+ objcABIVersion = 2;
+ else if (value == "3")
+ objcABIVersion = 3;
+ else
+ getToolChain().getDriver().Diag(diag::err_drv_clang_unsupported) << value;
+ } else {
+ // Otherwise, determine if we are using the non-fragile ABI.
+ bool nonFragileABIIsDefault =
+ (rewriteKind == RK_NonFragile ||
+ (rewriteKind == RK_None &&
+ getToolChain().IsObjCNonFragileABIDefault()));
+ if (args.hasFlag(options::OPT_fobjc_nonfragile_abi,
+ options::OPT_fno_objc_nonfragile_abi,
+ nonFragileABIIsDefault)) {
+// Determine the non-fragile ABI version to use.
+#ifdef DISABLE_DEFAULT_NONFRAGILEABI_TWO
+ unsigned nonFragileABIVersion = 1;
+#else
+ unsigned nonFragileABIVersion = 2;
+#endif
+
+ if (Arg *abiArg =
+ args.getLastArg(options::OPT_fobjc_nonfragile_abi_version_EQ)) {
+ StringRef value = abiArg->getValue();
+ if (value == "1")
+ nonFragileABIVersion = 1;
+ else if (value == "2")
+ nonFragileABIVersion = 2;
+ else
+ getToolChain().getDriver().Diag(diag::err_drv_clang_unsupported)
+ << value;
+ }
+
+ objcABIVersion = 1 + nonFragileABIVersion;
+ } else {
+ objcABIVersion = 1;
+ }
+ }
+
+ // We don't actually care about the ABI version other than whether
+ // it's non-fragile.
+ bool isNonFragile = objcABIVersion != 1;
+
+ // If we have no runtime argument, ask the toolchain for its default runtime.
+ // However, the rewriter only really supports the Mac runtime, so assume that.
+ ObjCRuntime runtime;
+ if (!runtimeArg) {
+ switch (rewriteKind) {
+ case RK_None:
+ runtime = getToolChain().getDefaultObjCRuntime(isNonFragile);
+ break;
+ case RK_Fragile:
+ runtime = ObjCRuntime(ObjCRuntime::FragileMacOSX, VersionTuple());
+ break;
+ case RK_NonFragile:
+ runtime = ObjCRuntime(ObjCRuntime::MacOSX, VersionTuple());
+ break;
+ }
+
+ // -fnext-runtime
+ } else if (runtimeArg->getOption().matches(options::OPT_fnext_runtime)) {
+ // On Darwin, make this use the default behavior for the toolchain.
+ if (getToolChain().getTriple().isOSDarwin()) {
+ runtime = getToolChain().getDefaultObjCRuntime(isNonFragile);
+
+ // Otherwise, build for a generic macosx port.
+ } else {
+ runtime = ObjCRuntime(ObjCRuntime::MacOSX, VersionTuple());
+ }
+
+ // -fgnu-runtime
+ } else {
+ assert(runtimeArg->getOption().matches(options::OPT_fgnu_runtime));
+ // Legacy behaviour is to target the gnustep runtime if we are in
+ // non-fragile mode or the GCC runtime in fragile mode.
+ if (isNonFragile)
+ runtime = ObjCRuntime(ObjCRuntime::GNUstep, VersionTuple(1, 6));
+ else
+ runtime = ObjCRuntime(ObjCRuntime::GCC, VersionTuple());
+ }
+
+ cmdArgs.push_back(
+ args.MakeArgString("-fobjc-runtime=" + runtime.getAsString()));
+ return runtime;
+}
+
+static bool maybeConsumeDash(const std::string &EH, size_t &I) {
+ bool HaveDash = (I + 1 < EH.size() && EH[I + 1] == '-');
+ I += HaveDash;
+ return !HaveDash;
+}
+
+namespace {
+struct EHFlags {
+ EHFlags() : Synch(false), Asynch(false), NoExceptC(false) {}
+ bool Synch;
+ bool Asynch;
+ bool NoExceptC;
+};
+} // end anonymous namespace
+
+/// /EH controls whether to run destructor cleanups when exceptions are
+/// thrown. There are three modifiers:
+/// - s: Cleanup after "synchronous" exceptions, aka C++ exceptions.
+/// - a: Cleanup after "asynchronous" exceptions, aka structured exceptions.
+/// The 'a' modifier is unimplemented and fundamentally hard in LLVM IR.
+/// - c: Assume that extern "C" functions are implicitly noexcept. This
+/// modifier is an optimization, so we ignore it for now.
+/// The default is /EHs-c-, meaning cleanups are disabled.
+static EHFlags parseClangCLEHFlags(const Driver &D, const ArgList &Args) {
+ EHFlags EH;
+
+ std::vector<std::string> EHArgs =
+ Args.getAllArgValues(options::OPT__SLASH_EH);
+ for (auto EHVal : EHArgs) {
+ for (size_t I = 0, E = EHVal.size(); I != E; ++I) {
+ switch (EHVal[I]) {
+ case 'a':
+ EH.Asynch = maybeConsumeDash(EHVal, I);
+ continue;
+ case 'c':
+ EH.NoExceptC = maybeConsumeDash(EHVal, I);
+ continue;
+ case 's':
+ EH.Synch = maybeConsumeDash(EHVal, I);
+ continue;
+ default:
+ break;
+ }
+ D.Diag(clang::diag::err_drv_invalid_value) << "/EH" << EHVal;
+ break;
+ }
+ }
+
+ return EH;
+}
+
+void Clang::AddClangCLArgs(const ArgList &Args, ArgStringList &CmdArgs,
+ enum CodeGenOptions::DebugInfoKind *DebugInfoKind,
+ bool *EmitCodeView) const {
+ unsigned RTOptionID = options::OPT__SLASH_MT;
+
+ if (Args.hasArg(options::OPT__SLASH_LDd))
+ // The /LDd option implies /MTd. The dependent lib part can be overridden,
+ // but defining _DEBUG is sticky.
+ RTOptionID = options::OPT__SLASH_MTd;
+
+ if (Arg *A = Args.getLastArg(options::OPT__SLASH_M_Group))
+ RTOptionID = A->getOption().getID();
+
+ StringRef FlagForCRT;
+ switch (RTOptionID) {
+ case options::OPT__SLASH_MD:
+ if (Args.hasArg(options::OPT__SLASH_LDd))
+ CmdArgs.push_back("-D_DEBUG");
+ CmdArgs.push_back("-D_MT");
+ CmdArgs.push_back("-D_DLL");
+ FlagForCRT = "--dependent-lib=msvcrt";
+ break;
+ case options::OPT__SLASH_MDd:
+ CmdArgs.push_back("-D_DEBUG");
+ CmdArgs.push_back("-D_MT");
+ CmdArgs.push_back("-D_DLL");
+ FlagForCRT = "--dependent-lib=msvcrtd";
+ break;
+ case options::OPT__SLASH_MT:
+ if (Args.hasArg(options::OPT__SLASH_LDd))
+ CmdArgs.push_back("-D_DEBUG");
+ CmdArgs.push_back("-D_MT");
+ FlagForCRT = "--dependent-lib=libcmt";
+ break;
+ case options::OPT__SLASH_MTd:
+ CmdArgs.push_back("-D_DEBUG");
+ CmdArgs.push_back("-D_MT");
+ FlagForCRT = "--dependent-lib=libcmtd";
+ break;
+ default:
+ llvm_unreachable("Unexpected option ID.");
+ }
+
+ if (Args.hasArg(options::OPT__SLASH_Zl)) {
+ CmdArgs.push_back("-D_VC_NODEFAULTLIB");
+ } else {
+ CmdArgs.push_back(FlagForCRT.data());
+
+ // This provides POSIX compatibility (maps 'open' to '_open'), which most
+ // users want. The /Za flag to cl.exe turns this off, but it's not
+ // implemented in clang.
+ CmdArgs.push_back("--dependent-lib=oldnames");
+ }
+
+ // Both /showIncludes and /E (and /EP) write to stdout. Allowing both
+ // would produce interleaved output, so ignore /showIncludes in such cases.
+ if (!Args.hasArg(options::OPT_E) && !Args.hasArg(options::OPT__SLASH_EP))
+ if (Arg *A = Args.getLastArg(options::OPT_show_includes))
+ A->render(Args, CmdArgs);
+
+ // This controls whether or not we emit RTTI data for polymorphic types.
+ if (Args.hasFlag(options::OPT__SLASH_GR_, options::OPT__SLASH_GR,
+ /*default=*/false))
+ CmdArgs.push_back("-fno-rtti-data");
+
+ // Emit CodeView if -Z7 is present.
+ *EmitCodeView = Args.hasArg(options::OPT__SLASH_Z7);
+ bool EmitDwarf = Args.hasArg(options::OPT_gdwarf);
+ // If we are emitting CV but not DWARF, don't build information that LLVM
+ // can't yet process.
+ if (*EmitCodeView && !EmitDwarf)
+ *DebugInfoKind = CodeGenOptions::DebugLineTablesOnly;
+ if (*EmitCodeView)
+ CmdArgs.push_back("-gcodeview");
+
+ const Driver &D = getToolChain().getDriver();
+ EHFlags EH = parseClangCLEHFlags(D, Args);
+ // FIXME: Do something with NoExceptC.
+ if (EH.Synch || EH.Asynch) {
+ CmdArgs.push_back("-fcxx-exceptions");
+ CmdArgs.push_back("-fexceptions");
+ }
+
+ // /EP should expand to -E -P.
+ if (Args.hasArg(options::OPT__SLASH_EP)) {
+ CmdArgs.push_back("-E");
+ CmdArgs.push_back("-P");
+ }
+
+ unsigned VolatileOptionID;
+ if (getToolChain().getArch() == llvm::Triple::x86_64 ||
+ getToolChain().getArch() == llvm::Triple::x86)
+ VolatileOptionID = options::OPT__SLASH_volatile_ms;
+ else
+ VolatileOptionID = options::OPT__SLASH_volatile_iso;
+
+ if (Arg *A = Args.getLastArg(options::OPT__SLASH_volatile_Group))
+ VolatileOptionID = A->getOption().getID();
+
+ if (VolatileOptionID == options::OPT__SLASH_volatile_ms)
+ CmdArgs.push_back("-fms-volatile");
+
+ Arg *MostGeneralArg = Args.getLastArg(options::OPT__SLASH_vmg);
+ Arg *BestCaseArg = Args.getLastArg(options::OPT__SLASH_vmb);
+ if (MostGeneralArg && BestCaseArg)
+ D.Diag(clang::diag::err_drv_argument_not_allowed_with)
+ << MostGeneralArg->getAsString(Args) << BestCaseArg->getAsString(Args);
+
+ if (MostGeneralArg) {
+ Arg *SingleArg = Args.getLastArg(options::OPT__SLASH_vms);
+ Arg *MultipleArg = Args.getLastArg(options::OPT__SLASH_vmm);
+ Arg *VirtualArg = Args.getLastArg(options::OPT__SLASH_vmv);
+
+ Arg *FirstConflict = SingleArg ? SingleArg : MultipleArg;
+ Arg *SecondConflict = VirtualArg ? VirtualArg : MultipleArg;
+ if (FirstConflict && SecondConflict && FirstConflict != SecondConflict)
+ D.Diag(clang::diag::err_drv_argument_not_allowed_with)
+ << FirstConflict->getAsString(Args)
+ << SecondConflict->getAsString(Args);
+
+ if (SingleArg)
+ CmdArgs.push_back("-fms-memptr-rep=single");
+ else if (MultipleArg)
+ CmdArgs.push_back("-fms-memptr-rep=multiple");
+ else
+ CmdArgs.push_back("-fms-memptr-rep=virtual");
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_vtordisp_mode_EQ))
+ A->render(Args, CmdArgs);
+
+ if (!Args.hasArg(options::OPT_fdiagnostics_format_EQ)) {
+ CmdArgs.push_back("-fdiagnostics-format");
+ if (Args.hasArg(options::OPT__SLASH_fallback))
+ CmdArgs.push_back("msvc-fallback");
+ else
+ CmdArgs.push_back("msvc");
+ }
+}
+
+visualstudio::Compiler *Clang::getCLFallback() const {
+ if (!CLFallback)
+ CLFallback.reset(new visualstudio::Compiler(getToolChain()));
+ return CLFallback.get();
+}
+
+void ClangAs::AddMIPSTargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ StringRef CPUName;
+ StringRef ABIName;
+ const llvm::Triple &Triple = getToolChain().getTriple();
+ mips::getMipsCPUAndABI(Args, Triple, CPUName, ABIName);
+
+ CmdArgs.push_back("-target-abi");
+ CmdArgs.push_back(ABIName.data());
+}
+
+void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ assert(Inputs.size() == 1 && "Unexpected number of inputs.");
+ const InputInfo &Input = Inputs[0];
+
+ std::string TripleStr =
+ getToolChain().ComputeEffectiveClangTriple(Args, Input.getType());
+ const llvm::Triple Triple(TripleStr);
+
+ // Don't warn about "clang -w -c foo.s"
+ Args.ClaimAllArgs(options::OPT_w);
+ // and "clang -emit-llvm -c foo.s"
+ Args.ClaimAllArgs(options::OPT_emit_llvm);
+
+ claimNoWarnArgs(Args);
+
+ // Invoke ourselves in -cc1as mode.
+ //
+ // FIXME: Implement custom jobs for internal actions.
+ CmdArgs.push_back("-cc1as");
+
+ // Add the "effective" target triple.
+ CmdArgs.push_back("-triple");
+ CmdArgs.push_back(Args.MakeArgString(TripleStr));
+
+ // Set the output mode, we currently only expect to be used as a real
+ // assembler.
+ CmdArgs.push_back("-filetype");
+ CmdArgs.push_back("obj");
+
+ // Set the main file name, so that debug info works even with
+ // -save-temps or preprocessed assembly.
+ CmdArgs.push_back("-main-file-name");
+ CmdArgs.push_back(Clang::getBaseInputName(Args, Input));
+
+ // Add the target cpu
+ std::string CPU = getCPUName(Args, Triple, /*FromAs*/ true);
+ if (!CPU.empty()) {
+ CmdArgs.push_back("-target-cpu");
+ CmdArgs.push_back(Args.MakeArgString(CPU));
+ }
+
+ // Add the target features
+ getTargetFeatures(getToolChain(), Triple, Args, CmdArgs, true);
+
+ // Ignore explicit -force_cpusubtype_ALL option.
+ (void)Args.hasArg(options::OPT_force__cpusubtype__ALL);
+
+ // Pass along any -I options so we get proper .include search paths.
+ Args.AddAllArgs(CmdArgs, options::OPT_I_Group);
+
+ // Determine the original source input.
+ const Action *SourceAction = &JA;
+ while (SourceAction->getKind() != Action::InputClass) {
+ assert(!SourceAction->getInputs().empty() && "unexpected root action!");
+ SourceAction = SourceAction->getInputs()[0];
+ }
+
+ // Forward -g and handle debug info related flags, assuming we are dealing
+ // with an actual assembly file.
+ if (SourceAction->getType() == types::TY_Asm ||
+ SourceAction->getType() == types::TY_PP_Asm) {
+ bool WantDebug = false;
+ unsigned DwarfVersion = 0;
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ if (Arg *A = Args.getLastArg(options::OPT_g_Group)) {
+ WantDebug = !A->getOption().matches(options::OPT_g0) &&
+ !A->getOption().matches(options::OPT_ggdb0);
+ if (WantDebug)
+ DwarfVersion = DwarfVersionNum(A->getSpelling());
+ }
+ if (DwarfVersion == 0)
+ DwarfVersion = getToolChain().GetDefaultDwarfVersion();
+ RenderDebugEnablingArgs(Args, CmdArgs,
+ (WantDebug ? CodeGenOptions::LimitedDebugInfo
+ : CodeGenOptions::NoDebugInfo),
+ DwarfVersion, llvm::DebuggerKind::Default);
+
+ // Add the -fdebug-compilation-dir flag if needed.
+ addDebugCompDirArg(Args, CmdArgs);
+
+ // Set the AT_producer to the clang version when using the integrated
+ // assembler on assembly source files.
+ CmdArgs.push_back("-dwarf-debug-producer");
+ CmdArgs.push_back(Args.MakeArgString(getClangFullVersion()));
+
+ // And pass along -I options
+ Args.AddAllArgs(CmdArgs, options::OPT_I);
+ }
+
+ // Handle -fPIC et al -- the relocation-model affects the assembler
+ // for some targets.
+ llvm::Reloc::Model RelocationModel;
+ unsigned PICLevel;
+ bool IsPIE;
+ std::tie(RelocationModel, PICLevel, IsPIE) =
+ ParsePICArgs(getToolChain(), Triple, Args);
+
+ const char *RMName = RelocationModelName(RelocationModel);
+ if (RMName) {
+ CmdArgs.push_back("-mrelocation-model");
+ CmdArgs.push_back(RMName);
+ }
+
+ // Optionally embed the -cc1as level arguments into the debug info, for build
+ // analysis.
+ if (getToolChain().UseDwarfDebugFlags()) {
+ ArgStringList OriginalArgs;
+ for (const auto &Arg : Args)
+ Arg->render(Args, OriginalArgs);
+
+ SmallString<256> Flags;
+ const char *Exec = getToolChain().getDriver().getClangProgramPath();
+ Flags += Exec;
+ for (const char *OriginalArg : OriginalArgs) {
+ SmallString<128> EscapedArg;
+ EscapeSpacesAndBackslashes(OriginalArg, EscapedArg);
+ Flags += " ";
+ Flags += EscapedArg;
+ }
+ CmdArgs.push_back("-dwarf-debug-flags");
+ CmdArgs.push_back(Args.MakeArgString(Flags));
+ }
+
+ // FIXME: Add -static support, once we have it.
+
+ // Add target specific flags.
+ switch (getToolChain().getArch()) {
+ default:
+ break;
+
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ AddMIPSTargetArgs(Args, CmdArgs);
+ break;
+ }
+
+ // Consume all the warning flags. Usually this would be handled more
+ // gracefully by -cc1 (warning about unknown warning flags, etc) but -cc1as
+ // doesn't handle that so rather than warning about unused flags that are
+ // actually used, we'll lie by omission instead.
+ // FIXME: Stop lying and consume only the appropriate driver flags
+ for (const Arg *A : Args.filtered(options::OPT_W_Group))
+ A->claim();
+
+ CollectArgsForIntegratedAssembler(C, Args, CmdArgs,
+ getToolChain().getDriver());
+
+ Args.AddAllArgs(CmdArgs, options::OPT_mllvm);
+
+ assert(Output.isFilename() && "Unexpected lipo output.");
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ assert(Input.isFilename() && "Invalid input.");
+ CmdArgs.push_back(Input.getFilename());
+
+ const char *Exec = getToolChain().getDriver().getClangProgramPath();
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+
+ // Handle the debug info splitting at object creation time if we're
+ // creating an object.
+ // TODO: Currently only works on linux with newer objcopy.
+ if (Args.hasArg(options::OPT_gsplit_dwarf) &&
+ getToolChain().getTriple().isOSLinux())
+ SplitDebugInfo(getToolChain(), C, *this, JA, Args, Output,
+ SplitDebugName(Args, Input));
+}
+
+void GnuTool::anchor() {}
+
+void gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs, const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getDriver();
+ ArgStringList CmdArgs;
+
+ for (const auto &A : Args) {
+ if (forwardToGCC(A->getOption())) {
+ // Don't forward any -g arguments to assembly steps.
+ if (isa<AssembleJobAction>(JA) &&
+ A->getOption().matches(options::OPT_g_Group))
+ continue;
+
+ // Don't forward any -W arguments to assembly and link steps.
+ if ((isa<AssembleJobAction>(JA) || isa<LinkJobAction>(JA)) &&
+ A->getOption().matches(options::OPT_W_Group))
+ continue;
+
+ // It is unfortunate that we have to claim here, as this means
+ // we will basically never report anything interesting for
+ // platforms using a generic gcc, even if we are just using gcc
+ // to get to the assembler.
+ A->claim();
+ A->render(Args, CmdArgs);
+ }
+ }
+
+ RenderExtraToolArgs(JA, CmdArgs);
+
+ // If using a driver driver, force the arch.
+ if (getToolChain().getTriple().isOSDarwin()) {
+ CmdArgs.push_back("-arch");
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().getDefaultUniversalArchName()));
+ }
+
+ // Try to force gcc to match the tool chain we want, if we recognize
+ // the arch.
+ //
+ // FIXME: The triple class should directly provide the information we want
+ // here.
+ switch (getToolChain().getArch()) {
+ default:
+ break;
+ case llvm::Triple::x86:
+ case llvm::Triple::ppc:
+ CmdArgs.push_back("-m32");
+ break;
+ case llvm::Triple::x86_64:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
+ CmdArgs.push_back("-m64");
+ break;
+ case llvm::Triple::sparcel:
+ CmdArgs.push_back("-EL");
+ break;
+ }
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Unexpected output");
+ CmdArgs.push_back("-fsyntax-only");
+ }
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
+
+ // Only pass -x if gcc will understand it; otherwise hope gcc
+ // understands the suffix correctly. The main use case this would go
+ // wrong in is for linker inputs if they happened to have an odd
+ // suffix; really the only way to get this to happen is a command
+ // like '-x foobar a.c' which will treat a.c like a linker input.
+ //
+ // FIXME: For the linker case specifically, can we safely convert
+ // inputs into '-Wl,' options?
+ for (const auto &II : Inputs) {
+ // Don't try to pass LLVM or AST inputs to a generic gcc.
+ if (types::isLLVMIR(II.getType()))
+ D.Diag(diag::err_drv_no_linker_llvm_support)
+ << getToolChain().getTripleString();
+ else if (II.getType() == types::TY_AST)
+ D.Diag(diag::err_drv_no_ast_support) << getToolChain().getTripleString();
+ else if (II.getType() == types::TY_ModuleFile)
+ D.Diag(diag::err_drv_no_module_support)
+ << getToolChain().getTripleString();
+
+ if (types::canTypeBeUserSpecified(II.getType())) {
+ CmdArgs.push_back("-x");
+ CmdArgs.push_back(types::getTypeName(II.getType()));
+ }
+
+ if (II.isFilename())
+ CmdArgs.push_back(II.getFilename());
+ else {
+ const Arg &A = II.getInputArg();
+
+ // Reverse translate some rewritten options.
+ if (A.getOption().matches(options::OPT_Z_reserved_lib_stdcxx)) {
+ CmdArgs.push_back("-lstdc++");
+ continue;
+ }
+
+ // Don't render as input, we need gcc to do the translations.
+ A.render(Args, CmdArgs);
+ }
+ }
+
+ const std::string customGCCName = D.getCCCGenericGCCName();
+ const char *GCCName;
+ if (!customGCCName.empty())
+ GCCName = customGCCName.c_str();
+ else if (D.CCCIsCXX()) {
+ GCCName = "g++";
+ } else
+ GCCName = "gcc";
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath(GCCName));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+void gcc::Preprocessor::RenderExtraToolArgs(const JobAction &JA,
+ ArgStringList &CmdArgs) const {
+ CmdArgs.push_back("-E");
+}
+
+void gcc::Compiler::RenderExtraToolArgs(const JobAction &JA,
+ ArgStringList &CmdArgs) const {
+ const Driver &D = getToolChain().getDriver();
+
+ switch (JA.getType()) {
+ // If -flto, etc. are present then make sure not to force assembly output.
+ case types::TY_LLVM_IR:
+ case types::TY_LTO_IR:
+ case types::TY_LLVM_BC:
+ case types::TY_LTO_BC:
+ CmdArgs.push_back("-c");
+ break;
+ // We assume we've got an "integrated" assembler in that gcc will produce an
+ // object file itself.
+ case types::TY_Object:
+ CmdArgs.push_back("-c");
+ break;
+ case types::TY_PP_Asm:
+ CmdArgs.push_back("-S");
+ break;
+ case types::TY_Nothing:
+ CmdArgs.push_back("-fsyntax-only");
+ break;
+ default:
+ D.Diag(diag::err_drv_invalid_gcc_output_type) << getTypeName(JA.getType());
+ }
+}
+
+void gcc::Linker::RenderExtraToolArgs(const JobAction &JA,
+ ArgStringList &CmdArgs) const {
+ // The types are (hopefully) good enough.
+}
+
+// Hexagon tools start.
+void hexagon::Assembler::RenderExtraToolArgs(const JobAction &JA,
+ ArgStringList &CmdArgs) const {
+}
+
+void hexagon::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ claimNoWarnArgs(Args);
+
+ auto &HTC = static_cast<const toolchains::HexagonToolChain&>(getToolChain());
+ const Driver &D = HTC.getDriver();
+ ArgStringList CmdArgs;
+
+ std::string MArchString = "-march=hexagon";
+ CmdArgs.push_back(Args.MakeArgString(MArchString));
+
+ RenderExtraToolArgs(JA, CmdArgs);
+
+ std::string AsName = "hexagon-llvm-mc";
+ std::string MCpuString = "-mcpu=hexagon" +
+ toolchains::HexagonToolChain::GetTargetCPUVersion(Args).str();
+ CmdArgs.push_back("-filetype=obj");
+ CmdArgs.push_back(Args.MakeArgString(MCpuString));
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Unexpected output");
+ CmdArgs.push_back("-fsyntax-only");
+ }
+
+ if (auto G = toolchains::HexagonToolChain::getSmallDataThreshold(Args)) {
+ std::string N = llvm::utostr(G.getValue());
+ CmdArgs.push_back(Args.MakeArgString(std::string("-gpsize=") + N));
+ }
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
+
+ // Only pass -x if gcc will understand it; otherwise hope gcc
+ // understands the suffix correctly. The main use case this would go
+ // wrong in is for linker inputs if they happened to have an odd
+ // suffix; really the only way to get this to happen is a command
+ // like '-x foobar a.c' which will treat a.c like a linker input.
+ //
+ // FIXME: For the linker case specifically, can we safely convert
+ // inputs into '-Wl,' options?
+ for (const auto &II : Inputs) {
+ // Don't try to pass LLVM or AST inputs to a generic gcc.
+ if (types::isLLVMIR(II.getType()))
+ D.Diag(clang::diag::err_drv_no_linker_llvm_support)
+ << HTC.getTripleString();
+ else if (II.getType() == types::TY_AST)
+ D.Diag(clang::diag::err_drv_no_ast_support)
+ << HTC.getTripleString();
+ else if (II.getType() == types::TY_ModuleFile)
+ D.Diag(diag::err_drv_no_module_support)
+ << HTC.getTripleString();
+
+ if (II.isFilename())
+ CmdArgs.push_back(II.getFilename());
+ else
+ // Don't render as input, we need gcc to do the translations.
+ // FIXME: What is this?
+ II.getInputArg().render(Args, CmdArgs);
+ }
+
+ auto *Exec = Args.MakeArgString(HTC.GetProgramPath(AsName.c_str()));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+void hexagon::Linker::RenderExtraToolArgs(const JobAction &JA,
+ ArgStringList &CmdArgs) const {
+}
+
+static void
+constructHexagonLinkArgs(Compilation &C, const JobAction &JA,
+ const toolchains::HexagonToolChain &HTC,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const ArgList &Args, ArgStringList &CmdArgs,
+ const char *LinkingOutput) {
+
+ const Driver &D = HTC.getDriver();
+
+ //----------------------------------------------------------------------------
+ //
+ //----------------------------------------------------------------------------
+ bool IsStatic = Args.hasArg(options::OPT_static);
+ bool IsShared = Args.hasArg(options::OPT_shared);
+ bool IsPIE = Args.hasArg(options::OPT_pie);
+ bool IncStdLib = !Args.hasArg(options::OPT_nostdlib);
+ bool IncStartFiles = !Args.hasArg(options::OPT_nostartfiles);
+ bool IncDefLibs = !Args.hasArg(options::OPT_nodefaultlibs);
+ bool UseG0 = false;
+ bool UseShared = IsShared && !IsStatic;
+
+ //----------------------------------------------------------------------------
+ // Silence warnings for various options
+ //----------------------------------------------------------------------------
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ Args.ClaimAllArgs(options::OPT_emit_llvm);
+ Args.ClaimAllArgs(options::OPT_w); // Other warning options are already
+ // handled somewhere else.
+ Args.ClaimAllArgs(options::OPT_static_libgcc);
+
+ //----------------------------------------------------------------------------
+ //
+ //----------------------------------------------------------------------------
+ if (Args.hasArg(options::OPT_s))
+ CmdArgs.push_back("-s");
+
+ if (Args.hasArg(options::OPT_r))
+ CmdArgs.push_back("-r");
+
+ for (const auto &Opt : HTC.ExtraOpts)
+ CmdArgs.push_back(Opt.c_str());
+
+ CmdArgs.push_back("-march=hexagon");
+ std::string CpuVer =
+ toolchains::HexagonToolChain::GetTargetCPUVersion(Args).str();
+ std::string MCpuString = "-mcpu=hexagon" + CpuVer;
+ CmdArgs.push_back(Args.MakeArgString(MCpuString));
+
+ if (IsShared) {
+ CmdArgs.push_back("-shared");
+ // The following should be the default, but doing as hexagon-gcc does.
+ CmdArgs.push_back("-call_shared");
+ }
+
+ if (IsStatic)
+ CmdArgs.push_back("-static");
+
+ if (IsPIE && !IsShared)
+ CmdArgs.push_back("-pie");
+
+ if (auto G = toolchains::HexagonToolChain::getSmallDataThreshold(Args)) {
+ std::string N = llvm::utostr(G.getValue());
+ CmdArgs.push_back(Args.MakeArgString(std::string("-G") + N));
+ UseG0 = G.getValue() == 0;
+ }
+
+ //----------------------------------------------------------------------------
+ //
+ //----------------------------------------------------------------------------
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ //----------------------------------------------------------------------------
+ // moslib
+ //----------------------------------------------------------------------------
+ std::vector<std::string> OsLibs;
+ bool HasStandalone = false;
+
+ for (const Arg *A : Args.filtered(options::OPT_moslib_EQ)) {
+ A->claim();
+ OsLibs.emplace_back(A->getValue());
+ HasStandalone = HasStandalone || (OsLibs.back() == "standalone");
+ }
+ if (OsLibs.empty()) {
+ OsLibs.push_back("standalone");
+ HasStandalone = true;
+ }
+
+ //----------------------------------------------------------------------------
+ // Start Files
+ //----------------------------------------------------------------------------
+ const std::string MCpuSuffix = "/" + CpuVer;
+ const std::string MCpuG0Suffix = MCpuSuffix + "/G0";
+ const std::string RootDir =
+ HTC.getHexagonTargetDir(D.InstalledDir, D.PrefixDirs) + "/";
+ const std::string StartSubDir =
+ "hexagon/lib" + (UseG0 ? MCpuG0Suffix : MCpuSuffix);
+
+ auto Find = [&HTC] (const std::string &RootDir, const std::string &SubDir,
+ const char *Name) -> std::string {
+ std::string RelName = SubDir + Name;
+ std::string P = HTC.GetFilePath(RelName.c_str());
+ if (llvm::sys::fs::exists(P))
+ return P;
+ return RootDir + RelName;
+ };
+
+ if (IncStdLib && IncStartFiles) {
+ if (!IsShared) {
+ if (HasStandalone) {
+ std::string Crt0SA = Find(RootDir, StartSubDir, "/crt0_standalone.o");
+ CmdArgs.push_back(Args.MakeArgString(Crt0SA));
+ }
+ std::string Crt0 = Find(RootDir, StartSubDir, "/crt0.o");
+ CmdArgs.push_back(Args.MakeArgString(Crt0));
+ }
+ std::string Init = UseShared
+ ? Find(RootDir, StartSubDir + "/pic", "/initS.o")
+ : Find(RootDir, StartSubDir, "/init.o");
+ CmdArgs.push_back(Args.MakeArgString(Init));
+ }
+
+ //----------------------------------------------------------------------------
+ // Library Search Paths
+ //----------------------------------------------------------------------------
+ const ToolChain::path_list &LibPaths = HTC.getFilePaths();
+ for (const auto &LibPath : LibPaths)
+ CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + LibPath));
+
+ //----------------------------------------------------------------------------
+ //
+ //----------------------------------------------------------------------------
+ Args.AddAllArgs(CmdArgs,
+ {options::OPT_T_Group, options::OPT_e, options::OPT_s,
+ options::OPT_t, options::OPT_u_Group});
+
+ AddLinkerInputs(HTC, Inputs, Args, CmdArgs);
+
+ //----------------------------------------------------------------------------
+ // Libraries
+ //----------------------------------------------------------------------------
+ if (IncStdLib && IncDefLibs) {
+ if (D.CCCIsCXX()) {
+ HTC.AddCXXStdlibLibArgs(Args, CmdArgs);
+ CmdArgs.push_back("-lm");
+ }
+
+ CmdArgs.push_back("--start-group");
+
+ if (!IsShared) {
+ for (const std::string &Lib : OsLibs)
+ CmdArgs.push_back(Args.MakeArgString("-l" + Lib));
+ CmdArgs.push_back("-lc");
+ }
+ CmdArgs.push_back("-lgcc");
+
+ CmdArgs.push_back("--end-group");
+ }
+
+ //----------------------------------------------------------------------------
+ // End files
+ //----------------------------------------------------------------------------
+ if (IncStdLib && IncStartFiles) {
+ std::string Fini = UseShared
+ ? Find(RootDir, StartSubDir + "/pic", "/finiS.o")
+ : Find(RootDir, StartSubDir, "/fini.o");
+ CmdArgs.push_back(Args.MakeArgString(Fini));
+ }
+}
+
+void hexagon::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ auto &HTC = static_cast<const toolchains::HexagonToolChain&>(getToolChain());
+
+ ArgStringList CmdArgs;
+ constructHexagonLinkArgs(C, JA, HTC, Output, Inputs, Args, CmdArgs,
+ LinkingOutput);
+
+ std::string Linker = HTC.GetProgramPath("hexagon-link");
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Args.MakeArgString(Linker),
+ CmdArgs, Inputs));
+}
+// Hexagon tools end.
+
+void amdgpu::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+
+ std::string Linker = getToolChain().GetProgramPath(getShortName());
+ ArgStringList CmdArgs;
+ CmdArgs.push_back("-flavor");
+ CmdArgs.push_back("old-gnu");
+ CmdArgs.push_back("-target");
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().getTripleString()));
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Args.MakeArgString(Linker),
+ CmdArgs, Inputs));
+}
+// AMDGPU tools end.
+
+wasm::Linker::Linker(const ToolChain &TC)
+ : GnuTool("wasm::Linker", "lld", TC) {}
+
+bool wasm::Linker::isLinkJob() const {
+ return true;
+}
+
+bool wasm::Linker::hasIntegratedCPP() const {
+ return false;
+}
+
+void wasm::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const char *Linker = Args.MakeArgString(getToolChain().GetLinkerPath());
+ ArgStringList CmdArgs;
+ CmdArgs.push_back("-flavor");
+ CmdArgs.push_back("ld");
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Linker, CmdArgs, Inputs));
+}
+
+const std::string arm::getARMArch(StringRef Arch, const llvm::Triple &Triple) {
+ std::string MArch;
+ if (!Arch.empty())
+ MArch = Arch;
+ else
+ MArch = Triple.getArchName();
+ MArch = StringRef(MArch).split("+").first.lower();
+
+ // Handle -march=native.
+ if (MArch == "native") {
+ std::string CPU = llvm::sys::getHostCPUName();
+ if (CPU != "generic") {
+ // Translate the native cpu into the architecture suffix for that CPU.
+ StringRef Suffix = arm::getLLVMArchSuffixForARM(CPU, MArch, Triple);
+ // If there is no valid architecture suffix for this CPU we don't know how
+ // to handle it, so return no architecture.
+ if (Suffix.empty())
+ MArch = "";
+ else
+ MArch = std::string("arm") + Suffix.str();
+ }
+ }
+
+ return MArch;
+}
+
+/// Get the (LLVM) name of the minimum ARM CPU for the arch we are targeting.
+StringRef arm::getARMCPUForMArch(StringRef Arch, const llvm::Triple &Triple) {
+ std::string MArch = getARMArch(Arch, Triple);
+ // getARMCPUForArch defaults to the triple if MArch is empty, but empty MArch
+ // here means an -march=native that we can't handle, so instead return no CPU.
+ if (MArch.empty())
+ return StringRef();
+
+ // We need to return an empty string here on invalid MArch values as the
+ // various places that call this function can't cope with a null result.
+ return Triple.getARMCPUForArch(MArch);
+}
+
+/// getARMTargetCPU - Get the (LLVM) name of the ARM cpu we are targeting.
+std::string arm::getARMTargetCPU(StringRef CPU, StringRef Arch,
+ const llvm::Triple &Triple) {
+ // FIXME: Warn on inconsistent use of -mcpu and -march.
+ // If we have -mcpu=, use that.
+ if (!CPU.empty()) {
+ std::string MCPU = StringRef(CPU).split("+").first.lower();
+ // Handle -mcpu=native.
+ if (MCPU == "native")
+ return llvm::sys::getHostCPUName();
+ else
+ return MCPU;
+ }
+
+ return getARMCPUForMArch(Arch, Triple);
+}
+
+/// getLLVMArchSuffixForARM - Get the LLVM arch name to use for a particular
+/// CPU (or Arch, if CPU is generic).
+// FIXME: This is redundant with -mcpu, why does LLVM use this.
+StringRef arm::getLLVMArchSuffixForARM(StringRef CPU, StringRef Arch,
+ const llvm::Triple &Triple) {
+ unsigned ArchKind;
+ if (CPU == "generic") {
+ std::string ARMArch = tools::arm::getARMArch(Arch, Triple);
+ ArchKind = llvm::ARM::parseArch(ARMArch);
+ if (ArchKind == llvm::ARM::AK_INVALID)
+ // In case of generic Arch, i.e. "arm",
+ // extract arch from default cpu of the Triple
+ ArchKind = llvm::ARM::parseCPUArch(Triple.getARMCPUForArch(ARMArch));
+ } else {
+ // FIXME: horrible hack to get around the fact that Cortex-A7 is only an
+ // armv7k triple if it's actually been specified via "-arch armv7k".
+ ArchKind = (Arch == "armv7k" || Arch == "thumbv7k")
+ ? (unsigned)llvm::ARM::AK_ARMV7K
+ : llvm::ARM::parseCPUArch(CPU);
+ }
+ if (ArchKind == llvm::ARM::AK_INVALID)
+ return "";
+ return llvm::ARM::getSubArch(ArchKind);
+}
+
+void arm::appendEBLinkFlags(const ArgList &Args, ArgStringList &CmdArgs,
+ const llvm::Triple &Triple) {
+ if (Args.hasArg(options::OPT_r))
+ return;
+
+ // ARMv7 (and later) and ARMv6-M do not support BE-32, so instruct the linker
+ // to generate BE-8 executables.
+ if (getARMSubArchVersionNumber(Triple) >= 7 || isARMMProfile(Triple))
+ CmdArgs.push_back("--be8");
+}
+
+mips::NanEncoding mips::getSupportedNanEncoding(StringRef &CPU) {
+ // Strictly speaking, mips32r2 and mips64r2 are NanLegacy-only since Nan2008
+ // was first introduced in Release 3. However, other compilers have
+ // traditionally allowed it for Release 2 so we should do the same.
+ return (NanEncoding)llvm::StringSwitch<int>(CPU)
+ .Case("mips1", NanLegacy)
+ .Case("mips2", NanLegacy)
+ .Case("mips3", NanLegacy)
+ .Case("mips4", NanLegacy)
+ .Case("mips5", NanLegacy)
+ .Case("mips32", NanLegacy)
+ .Case("mips32r2", NanLegacy | Nan2008)
+ .Case("mips32r3", NanLegacy | Nan2008)
+ .Case("mips32r5", NanLegacy | Nan2008)
+ .Case("mips32r6", Nan2008)
+ .Case("mips64", NanLegacy)
+ .Case("mips64r2", NanLegacy | Nan2008)
+ .Case("mips64r3", NanLegacy | Nan2008)
+ .Case("mips64r5", NanLegacy | Nan2008)
+ .Case("mips64r6", Nan2008)
+ .Default(NanLegacy);
+}
+
+bool mips::hasMipsAbiArg(const ArgList &Args, const char *Value) {
+ Arg *A = Args.getLastArg(options::OPT_mabi_EQ);
+ return A && (A->getValue() == StringRef(Value));
+}
+
+bool mips::isUCLibc(const ArgList &Args) {
+ Arg *A = Args.getLastArg(options::OPT_m_libc_Group);
+ return A && A->getOption().matches(options::OPT_muclibc);
+}
+
+bool mips::isNaN2008(const ArgList &Args, const llvm::Triple &Triple) {
+ if (Arg *NaNArg = Args.getLastArg(options::OPT_mnan_EQ))
+ return llvm::StringSwitch<bool>(NaNArg->getValue())
+ .Case("2008", true)
+ .Case("legacy", false)
+ .Default(false);
+
+ // NaN2008 is the default for MIPS32r6/MIPS64r6.
+ return llvm::StringSwitch<bool>(getCPUName(Args, Triple))
+ .Cases("mips32r6", "mips64r6", true)
+ .Default(false);
+
+ return false;
+}
+
+bool mips::isFPXXDefault(const llvm::Triple &Triple, StringRef CPUName,
+ StringRef ABIName, mips::FloatABI FloatABI) {
+ if (Triple.getVendor() != llvm::Triple::ImaginationTechnologies &&
+ Triple.getVendor() != llvm::Triple::MipsTechnologies)
+ return false;
+
+ if (ABIName != "32")
+ return false;
+
+ // FPXX shouldn't be used if either -msoft-float or -mfloat-abi=soft is
+ // present.
+ if (FloatABI == mips::FloatABI::Soft)
+ return false;
+
+ return llvm::StringSwitch<bool>(CPUName)
+ .Cases("mips2", "mips3", "mips4", "mips5", true)
+ .Cases("mips32", "mips32r2", "mips32r3", "mips32r5", true)
+ .Cases("mips64", "mips64r2", "mips64r3", "mips64r5", true)
+ .Default(false);
+}
+
+bool mips::shouldUseFPXX(const ArgList &Args, const llvm::Triple &Triple,
+ StringRef CPUName, StringRef ABIName,
+ mips::FloatABI FloatABI) {
+ bool UseFPXX = isFPXXDefault(Triple, CPUName, ABIName, FloatABI);
+
+ // FPXX shouldn't be used if -msingle-float is present.
+ if (Arg *A = Args.getLastArg(options::OPT_msingle_float,
+ options::OPT_mdouble_float))
+ if (A->getOption().matches(options::OPT_msingle_float))
+ UseFPXX = false;
+
+ return UseFPXX;
+}
+
+llvm::Triple::ArchType darwin::getArchTypeForMachOArchName(StringRef Str) {
+ // See arch(3) and llvm-gcc's driver-driver.c. We don't implement support for
+ // archs which Darwin doesn't use.
+
+ // The matching this routine does is fairly pointless, since it is neither the
+ // complete architecture list, nor a reasonable subset. The problem is that
+ // historically the driver driver accepts this and also ties its -march=
+ // handling to the architecture name, so we need to be careful before removing
+ // support for it.
+
+ // This code must be kept in sync with Clang's Darwin specific argument
+ // translation.
+
+ return llvm::StringSwitch<llvm::Triple::ArchType>(Str)
+ .Cases("ppc", "ppc601", "ppc603", "ppc604", "ppc604e", llvm::Triple::ppc)
+ .Cases("ppc750", "ppc7400", "ppc7450", "ppc970", llvm::Triple::ppc)
+ .Case("ppc64", llvm::Triple::ppc64)
+ .Cases("i386", "i486", "i486SX", "i586", "i686", llvm::Triple::x86)
+ .Cases("pentium", "pentpro", "pentIIm3", "pentIIm5", "pentium4",
+ llvm::Triple::x86)
+ .Cases("x86_64", "x86_64h", llvm::Triple::x86_64)
+ // This is derived from the driver driver.
+ .Cases("arm", "armv4t", "armv5", "armv6", "armv6m", llvm::Triple::arm)
+ .Cases("armv7", "armv7em", "armv7k", "armv7m", llvm::Triple::arm)
+ .Cases("armv7s", "xscale", llvm::Triple::arm)
+ .Case("arm64", llvm::Triple::aarch64)
+ .Case("r600", llvm::Triple::r600)
+ .Case("amdgcn", llvm::Triple::amdgcn)
+ .Case("nvptx", llvm::Triple::nvptx)
+ .Case("nvptx64", llvm::Triple::nvptx64)
+ .Case("amdil", llvm::Triple::amdil)
+ .Case("spir", llvm::Triple::spir)
+ .Default(llvm::Triple::UnknownArch);
+}
+
+void darwin::setTripleTypeForMachOArchName(llvm::Triple &T, StringRef Str) {
+ const llvm::Triple::ArchType Arch = getArchTypeForMachOArchName(Str);
+ T.setArch(Arch);
+
+ if (Str == "x86_64h")
+ T.setArchName(Str);
+ else if (Str == "armv6m" || Str == "armv7m" || Str == "armv7em") {
+ T.setOS(llvm::Triple::UnknownOS);
+ T.setObjectFormat(llvm::Triple::MachO);
+ }
+}
+
+const char *Clang::getBaseInputName(const ArgList &Args,
+ const InputInfo &Input) {
+ return Args.MakeArgString(llvm::sys::path::filename(Input.getBaseInput()));
+}
+
+const char *Clang::getBaseInputStem(const ArgList &Args,
+ const InputInfoList &Inputs) {
+ const char *Str = getBaseInputName(Args, Inputs[0]);
+
+ if (const char *End = strrchr(Str, '.'))
+ return Args.MakeArgString(std::string(Str, End));
+
+ return Str;
+}
+
+const char *Clang::getDependencyFileName(const ArgList &Args,
+ const InputInfoList &Inputs) {
+ // FIXME: Think about this more.
+ std::string Res;
+
+ if (Arg *OutputOpt = Args.getLastArg(options::OPT_o)) {
+ std::string Str(OutputOpt->getValue());
+ Res = Str.substr(0, Str.rfind('.'));
+ } else {
+ Res = getBaseInputStem(Args, Inputs);
+ }
+ return Args.MakeArgString(Res + ".d");
+}
+
+void cloudabi::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const ToolChain &ToolChain = getToolChain();
+ const Driver &D = ToolChain.getDriver();
+ ArgStringList CmdArgs;
+
+ // Silence warning for "clang -g foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ // and "clang -emit-llvm foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_emit_llvm);
+ // and for "clang -w foo.o -o foo". Other warning options are already
+ // handled somewhere else.
+ Args.ClaimAllArgs(options::OPT_w);
+
+ if (!D.SysRoot.empty())
+ CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
+
+ // CloudABI only supports static linkage.
+ CmdArgs.push_back("-Bstatic");
+ CmdArgs.push_back("--eh-frame-hdr");
+ CmdArgs.push_back("--gc-sections");
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt0.o")));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtbegin.o")));
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ ToolChain.AddFilePathLibArgs(Args, CmdArgs);
+ Args.AddAllArgs(CmdArgs,
+ {options::OPT_T_Group, options::OPT_e, options::OPT_s,
+ options::OPT_t, options::OPT_Z_Flag, options::OPT_r});
+
+ if (D.isUsingLTO())
+ AddGoldPlugin(ToolChain, Args, CmdArgs, D.getLTOMode() == LTOK_Thin);
+
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ if (D.CCCIsCXX())
+ ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
+ CmdArgs.push_back("-lc");
+ CmdArgs.push_back("-lcompiler_rt");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles))
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtend.o")));
+
+ const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+void darwin::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ assert(Inputs.size() == 1 && "Unexpected number of inputs.");
+ const InputInfo &Input = Inputs[0];
+
+ // Determine the original source input.
+ const Action *SourceAction = &JA;
+ while (SourceAction->getKind() != Action::InputClass) {
+ assert(!SourceAction->getInputs().empty() && "unexpected root action!");
+ SourceAction = SourceAction->getInputs()[0];
+ }
+
+ // If -fno-integrated-as is used add -Q to the darwin assember driver to make
+ // sure it runs its system assembler not clang's integrated assembler.
+ // Applicable to darwin11+ and Xcode 4+. darwin<10 lacked integrated-as.
+ // FIXME: at run-time detect assembler capabilities or rely on version
+ // information forwarded by -target-assembler-version.
+ if (Args.hasArg(options::OPT_fno_integrated_as)) {
+ const llvm::Triple &T(getToolChain().getTriple());
+ if (!(T.isMacOSX() && T.isMacOSXVersionLT(10, 7)))
+ CmdArgs.push_back("-Q");
+ }
+
+ // Forward -g, assuming we are dealing with an actual assembly file.
+ if (SourceAction->getType() == types::TY_Asm ||
+ SourceAction->getType() == types::TY_PP_Asm) {
+ if (Args.hasArg(options::OPT_gstabs))
+ CmdArgs.push_back("--gstabs");
+ else if (Args.hasArg(options::OPT_g_Group))
+ CmdArgs.push_back("-g");
+ }
+
+ // Derived from asm spec.
+ AddMachOArch(Args, CmdArgs);
+
+ // Use -force_cpusubtype_ALL on x86 by default.
+ if (getToolChain().getArch() == llvm::Triple::x86 ||
+ getToolChain().getArch() == llvm::Triple::x86_64 ||
+ Args.hasArg(options::OPT_force__cpusubtype__ALL))
+ CmdArgs.push_back("-force_cpusubtype_ALL");
+
+ if (getToolChain().getArch() != llvm::Triple::x86_64 &&
+ (((Args.hasArg(options::OPT_mkernel) ||
+ Args.hasArg(options::OPT_fapple_kext)) &&
+ getMachOToolChain().isKernelStatic()) ||
+ Args.hasArg(options::OPT_static)))
+ CmdArgs.push_back("-static");
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
+
+ assert(Output.isFilename() && "Unexpected lipo output.");
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ assert(Input.isFilename() && "Invalid input.");
+ CmdArgs.push_back(Input.getFilename());
+
+ // asm_final spec is empty.
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+void darwin::MachOTool::anchor() {}
+
+void darwin::MachOTool::AddMachOArch(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ StringRef ArchName = getMachOToolChain().getMachOArchName(Args);
+
+ // Derived from darwin_arch spec.
+ CmdArgs.push_back("-arch");
+ CmdArgs.push_back(Args.MakeArgString(ArchName));
+
+ // FIXME: Is this needed anymore?
+ if (ArchName == "arm")
+ CmdArgs.push_back("-force_cpusubtype_ALL");
+}
+
+bool darwin::Linker::NeedsTempPath(const InputInfoList &Inputs) const {
+ // We only need to generate a temp path for LTO if we aren't compiling object
+ // files. When compiling source files, we run 'dsymutil' after linking. We
+ // don't run 'dsymutil' when compiling object files.
+ for (const auto &Input : Inputs)
+ if (Input.getType() != types::TY_Object)
+ return true;
+
+ return false;
+}
+
+void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
+ ArgStringList &CmdArgs,
+ const InputInfoList &Inputs) const {
+ const Driver &D = getToolChain().getDriver();
+ const toolchains::MachO &MachOTC = getMachOToolChain();
+
+ unsigned Version[3] = {0, 0, 0};
+ if (Arg *A = Args.getLastArg(options::OPT_mlinker_version_EQ)) {
+ bool HadExtra;
+ if (!Driver::GetReleaseVersion(A->getValue(), Version[0], Version[1],
+ Version[2], HadExtra) ||
+ HadExtra)
+ D.Diag(diag::err_drv_invalid_version_number) << A->getAsString(Args);
+ }
+
+ // Newer linkers support -demangle. Pass it if supported and not disabled by
+ // the user.
+ if (Version[0] >= 100 && !Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
+ CmdArgs.push_back("-demangle");
+
+ if (Args.hasArg(options::OPT_rdynamic) && Version[0] >= 137)
+ CmdArgs.push_back("-export_dynamic");
+
+ // If we are using App Extension restrictions, pass a flag to the linker
+ // telling it that the compiled code has been audited.
+ if (Args.hasFlag(options::OPT_fapplication_extension,
+ options::OPT_fno_application_extension, false))
+ CmdArgs.push_back("-application_extension");
+
+ if (D.isUsingLTO()) {
+ // If we are using LTO, then automatically create a temporary file path for
+ // the linker to use, so that it's lifetime will extend past a possible
+ // dsymutil step.
+ if (Version[0] >= 116 && NeedsTempPath(Inputs)) {
+ const char *TmpPath = C.getArgs().MakeArgString(
+ D.GetTemporaryPath("cc", types::getTypeTempSuffix(types::TY_Object)));
+ C.addTempFile(TmpPath);
+ CmdArgs.push_back("-object_path_lto");
+ CmdArgs.push_back(TmpPath);
+ }
+
+ // Use -lto_library option to specify the libLTO.dylib path. Try to find
+ // it in clang installed libraries. If not found, the option is not used
+ // and 'ld' will use its default mechanism to search for libLTO.dylib.
+ if (Version[0] >= 133) {
+ // Search for libLTO in <InstalledDir>/../lib/libLTO.dylib
+ StringRef P = llvm::sys::path::parent_path(D.getInstalledDir());
+ SmallString<128> LibLTOPath(P);
+ llvm::sys::path::append(LibLTOPath, "lib");
+ llvm::sys::path::append(LibLTOPath, "libLTO.dylib");
+ if (llvm::sys::fs::exists(LibLTOPath)) {
+ CmdArgs.push_back("-lto_library");
+ CmdArgs.push_back(C.getArgs().MakeArgString(LibLTOPath));
+ } else {
+ D.Diag(diag::warn_drv_lto_libpath);
+ }
+ }
+ }
+
+ // Derived from the "link" spec.
+ Args.AddAllArgs(CmdArgs, options::OPT_static);
+ if (!Args.hasArg(options::OPT_static))
+ CmdArgs.push_back("-dynamic");
+ if (Args.hasArg(options::OPT_fgnu_runtime)) {
+ // FIXME: gcc replaces -lobjc in forward args with -lobjc-gnu
+ // here. How do we wish to handle such things?
+ }
+
+ if (!Args.hasArg(options::OPT_dynamiclib)) {
+ AddMachOArch(Args, CmdArgs);
+ // FIXME: Why do this only on this path?
+ Args.AddLastArg(CmdArgs, options::OPT_force__cpusubtype__ALL);
+
+ Args.AddLastArg(CmdArgs, options::OPT_bundle);
+ Args.AddAllArgs(CmdArgs, options::OPT_bundle__loader);
+ Args.AddAllArgs(CmdArgs, options::OPT_client__name);
+
+ Arg *A;
+ if ((A = Args.getLastArg(options::OPT_compatibility__version)) ||
+ (A = Args.getLastArg(options::OPT_current__version)) ||
+ (A = Args.getLastArg(options::OPT_install__name)))
+ D.Diag(diag::err_drv_argument_only_allowed_with) << A->getAsString(Args)
+ << "-dynamiclib";
+
+ Args.AddLastArg(CmdArgs, options::OPT_force__flat__namespace);
+ Args.AddLastArg(CmdArgs, options::OPT_keep__private__externs);
+ Args.AddLastArg(CmdArgs, options::OPT_private__bundle);
+ } else {
+ CmdArgs.push_back("-dylib");
+
+ Arg *A;
+ if ((A = Args.getLastArg(options::OPT_bundle)) ||
+ (A = Args.getLastArg(options::OPT_bundle__loader)) ||
+ (A = Args.getLastArg(options::OPT_client__name)) ||
+ (A = Args.getLastArg(options::OPT_force__flat__namespace)) ||
+ (A = Args.getLastArg(options::OPT_keep__private__externs)) ||
+ (A = Args.getLastArg(options::OPT_private__bundle)))
+ D.Diag(diag::err_drv_argument_not_allowed_with) << A->getAsString(Args)
+ << "-dynamiclib";
+
+ Args.AddAllArgsTranslated(CmdArgs, options::OPT_compatibility__version,
+ "-dylib_compatibility_version");
+ Args.AddAllArgsTranslated(CmdArgs, options::OPT_current__version,
+ "-dylib_current_version");
+
+ AddMachOArch(Args, CmdArgs);
+
+ Args.AddAllArgsTranslated(CmdArgs, options::OPT_install__name,
+ "-dylib_install_name");
+ }
+
+ Args.AddLastArg(CmdArgs, options::OPT_all__load);
+ Args.AddAllArgs(CmdArgs, options::OPT_allowable__client);
+ Args.AddLastArg(CmdArgs, options::OPT_bind__at__load);
+ if (MachOTC.isTargetIOSBased())
+ Args.AddLastArg(CmdArgs, options::OPT_arch__errors__fatal);
+ Args.AddLastArg(CmdArgs, options::OPT_dead__strip);
+ Args.AddLastArg(CmdArgs, options::OPT_no__dead__strip__inits__and__terms);
+ Args.AddAllArgs(CmdArgs, options::OPT_dylib__file);
+ Args.AddLastArg(CmdArgs, options::OPT_dynamic);
+ Args.AddAllArgs(CmdArgs, options::OPT_exported__symbols__list);
+ Args.AddLastArg(CmdArgs, options::OPT_flat__namespace);
+ Args.AddAllArgs(CmdArgs, options::OPT_force__load);
+ Args.AddAllArgs(CmdArgs, options::OPT_headerpad__max__install__names);
+ Args.AddAllArgs(CmdArgs, options::OPT_image__base);
+ Args.AddAllArgs(CmdArgs, options::OPT_init);
+
+ // Add the deployment target.
+ MachOTC.addMinVersionArgs(Args, CmdArgs);
+
+ Args.AddLastArg(CmdArgs, options::OPT_nomultidefs);
+ Args.AddLastArg(CmdArgs, options::OPT_multi__module);
+ Args.AddLastArg(CmdArgs, options::OPT_single__module);
+ Args.AddAllArgs(CmdArgs, options::OPT_multiply__defined);
+ Args.AddAllArgs(CmdArgs, options::OPT_multiply__defined__unused);
+
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_fpie, options::OPT_fPIE,
+ options::OPT_fno_pie, options::OPT_fno_PIE)) {
+ if (A->getOption().matches(options::OPT_fpie) ||
+ A->getOption().matches(options::OPT_fPIE))
+ CmdArgs.push_back("-pie");
+ else
+ CmdArgs.push_back("-no_pie");
+ }
+
+ Args.AddLastArg(CmdArgs, options::OPT_prebind);
+ Args.AddLastArg(CmdArgs, options::OPT_noprebind);
+ Args.AddLastArg(CmdArgs, options::OPT_nofixprebinding);
+ Args.AddLastArg(CmdArgs, options::OPT_prebind__all__twolevel__modules);
+ Args.AddLastArg(CmdArgs, options::OPT_read__only__relocs);
+ Args.AddAllArgs(CmdArgs, options::OPT_sectcreate);
+ Args.AddAllArgs(CmdArgs, options::OPT_sectorder);
+ Args.AddAllArgs(CmdArgs, options::OPT_seg1addr);
+ Args.AddAllArgs(CmdArgs, options::OPT_segprot);
+ Args.AddAllArgs(CmdArgs, options::OPT_segaddr);
+ Args.AddAllArgs(CmdArgs, options::OPT_segs__read__only__addr);
+ Args.AddAllArgs(CmdArgs, options::OPT_segs__read__write__addr);
+ Args.AddAllArgs(CmdArgs, options::OPT_seg__addr__table);
+ Args.AddAllArgs(CmdArgs, options::OPT_seg__addr__table__filename);
+ Args.AddAllArgs(CmdArgs, options::OPT_sub__library);
+ Args.AddAllArgs(CmdArgs, options::OPT_sub__umbrella);
+
+ // Give --sysroot= preference, over the Apple specific behavior to also use
+ // --isysroot as the syslibroot.
+ StringRef sysroot = C.getSysRoot();
+ if (sysroot != "") {
+ CmdArgs.push_back("-syslibroot");
+ CmdArgs.push_back(C.getArgs().MakeArgString(sysroot));
+ } else if (const Arg *A = Args.getLastArg(options::OPT_isysroot)) {
+ CmdArgs.push_back("-syslibroot");
+ CmdArgs.push_back(A->getValue());
+ }
+
+ Args.AddLastArg(CmdArgs, options::OPT_twolevel__namespace);
+ Args.AddLastArg(CmdArgs, options::OPT_twolevel__namespace__hints);
+ Args.AddAllArgs(CmdArgs, options::OPT_umbrella);
+ Args.AddAllArgs(CmdArgs, options::OPT_undefined);
+ Args.AddAllArgs(CmdArgs, options::OPT_unexported__symbols__list);
+ Args.AddAllArgs(CmdArgs, options::OPT_weak__reference__mismatches);
+ Args.AddLastArg(CmdArgs, options::OPT_X_Flag);
+ Args.AddAllArgs(CmdArgs, options::OPT_y);
+ Args.AddLastArg(CmdArgs, options::OPT_w);
+ Args.AddAllArgs(CmdArgs, options::OPT_pagezero__size);
+ Args.AddAllArgs(CmdArgs, options::OPT_segs__read__);
+ Args.AddLastArg(CmdArgs, options::OPT_seglinkedit);
+ Args.AddLastArg(CmdArgs, options::OPT_noseglinkedit);
+ Args.AddAllArgs(CmdArgs, options::OPT_sectalign);
+ Args.AddAllArgs(CmdArgs, options::OPT_sectobjectsymbols);
+ Args.AddAllArgs(CmdArgs, options::OPT_segcreate);
+ Args.AddLastArg(CmdArgs, options::OPT_whyload);
+ Args.AddLastArg(CmdArgs, options::OPT_whatsloaded);
+ Args.AddAllArgs(CmdArgs, options::OPT_dylinker__install__name);
+ Args.AddLastArg(CmdArgs, options::OPT_dylinker);
+ Args.AddLastArg(CmdArgs, options::OPT_Mach);
+}
+
+void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ assert(Output.getType() == types::TY_Image && "Invalid linker output type.");
+
+ // If the number of arguments surpasses the system limits, we will encode the
+ // input files in a separate file, shortening the command line. To this end,
+ // build a list of input file names that can be passed via a file with the
+ // -filelist linker option.
+ llvm::opt::ArgStringList InputFileList;
+
+ // The logic here is derived from gcc's behavior; most of which
+ // comes from specs (starting with link_command). Consult gcc for
+ // more information.
+ ArgStringList CmdArgs;
+
+ /// Hack(tm) to ignore linking errors when we are doing ARC migration.
+ if (Args.hasArg(options::OPT_ccc_arcmt_check,
+ options::OPT_ccc_arcmt_migrate)) {
+ for (const auto &Arg : Args)
+ Arg->claim();
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("touch"));
+ CmdArgs.push_back(Output.getFilename());
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, None));
+ return;
+ }
+
+ // I'm not sure why this particular decomposition exists in gcc, but
+ // we follow suite for ease of comparison.
+ AddLinkArgs(C, Args, CmdArgs, Inputs);
+
+ // It seems that the 'e' option is completely ignored for dynamic executables
+ // (the default), and with static executables, the last one wins, as expected.
+ Args.AddAllArgs(CmdArgs, {options::OPT_d_Flag, options::OPT_s, options::OPT_t,
+ options::OPT_Z_Flag, options::OPT_u_Group,
+ options::OPT_e, options::OPT_r});
+
+ // Forward -ObjC when either -ObjC or -ObjC++ is used, to force loading
+ // members of static archive libraries which implement Objective-C classes or
+ // categories.
+ if (Args.hasArg(options::OPT_ObjC) || Args.hasArg(options::OPT_ObjCXX))
+ CmdArgs.push_back("-ObjC");
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles))
+ getMachOToolChain().addStartObjectFileArgs(Args, CmdArgs);
+
+ // SafeStack requires its own runtime libraries
+ // These libraries should be linked first, to make sure the
+ // __safestack_init constructor executes before everything else
+ if (getToolChain().getSanitizerArgs().needsSafeStackRt()) {
+ getMachOToolChain().AddLinkRuntimeLib(Args, CmdArgs,
+ "libclang_rt.safestack_osx.a",
+ /*AlwaysLink=*/true);
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+ // Build the input file for -filelist (list of linker input files) in case we
+ // need it later
+ for (const auto &II : Inputs) {
+ if (!II.isFilename()) {
+ // This is a linker input argument.
+ // We cannot mix input arguments and file names in a -filelist input, thus
+ // we prematurely stop our list (remaining files shall be passed as
+ // arguments).
+ if (InputFileList.size() > 0)
+ break;
+
+ continue;
+ }
+
+ InputFileList.push_back(II.getFilename());
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs))
+ addOpenMPRuntime(CmdArgs, getToolChain(), Args);
+
+ if (isObjCRuntimeLinked(Args) &&
+ !Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ // We use arclite library for both ARC and subscripting support.
+ getMachOToolChain().AddLinkARCArgs(Args, CmdArgs);
+
+ CmdArgs.push_back("-framework");
+ CmdArgs.push_back("Foundation");
+ // Link libobj.
+ CmdArgs.push_back("-lobjc");
+ }
+
+ if (LinkingOutput) {
+ CmdArgs.push_back("-arch_multiple");
+ CmdArgs.push_back("-final_output");
+ CmdArgs.push_back(LinkingOutput);
+ }
+
+ if (Args.hasArg(options::OPT_fnested_functions))
+ CmdArgs.push_back("-allow_stack_execute");
+
+ getMachOToolChain().addProfileRTLibs(Args, CmdArgs);
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ if (getToolChain().getDriver().CCCIsCXX())
+ getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
+
+ // link_ssp spec is empty.
+
+ // Let the tool chain choose which runtime library to link.
+ getMachOToolChain().AddLinkRuntimeLibArgs(Args, CmdArgs);
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ // endfile_spec is empty.
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
+ Args.AddAllArgs(CmdArgs, options::OPT_F);
+
+ // -iframework should be forwarded as -F.
+ for (const Arg *A : Args.filtered(options::OPT_iframework))
+ CmdArgs.push_back(Args.MakeArgString(std::string("-F") + A->getValue()));
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ if (Arg *A = Args.getLastArg(options::OPT_fveclib)) {
+ if (A->getValue() == StringRef("Accelerate")) {
+ CmdArgs.push_back("-framework");
+ CmdArgs.push_back("Accelerate");
+ }
+ }
+ }
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
+ std::unique_ptr<Command> Cmd =
+ llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs);
+ Cmd->setInputFileList(std::move(InputFileList));
+ C.addCommand(std::move(Cmd));
+}
+
+void darwin::Lipo::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ CmdArgs.push_back("-create");
+ assert(Output.isFilename() && "Unexpected lipo output.");
+
+ CmdArgs.push_back("-output");
+ CmdArgs.push_back(Output.getFilename());
+
+ for (const auto &II : Inputs) {
+ assert(II.isFilename() && "Unexpected lipo input.");
+ CmdArgs.push_back(II.getFilename());
+ }
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("lipo"));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+void darwin::Dsymutil::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ assert(Inputs.size() == 1 && "Unable to handle multiple inputs.");
+ const InputInfo &Input = Inputs[0];
+ assert(Input.isFilename() && "Unexpected dsymutil input.");
+ CmdArgs.push_back(Input.getFilename());
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("dsymutil"));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+void darwin::VerifyDebug::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+ CmdArgs.push_back("--verify");
+ CmdArgs.push_back("--debug-info");
+ CmdArgs.push_back("--eh-frame");
+ CmdArgs.push_back("--quiet");
+
+ assert(Inputs.size() == 1 && "Unable to handle multiple inputs.");
+ const InputInfo &Input = Inputs[0];
+ assert(Input.isFilename() && "Unexpected verify input");
+
+ // Grabbing the output of the earlier dsymutil run.
+ CmdArgs.push_back(Input.getFilename());
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("dwarfdump"));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+void solaris::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ claimNoWarnArgs(Args);
+ ArgStringList CmdArgs;
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ for (const auto &II : Inputs)
+ CmdArgs.push_back(II.getFilename());
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ // Demangle C++ names in errors
+ CmdArgs.push_back("-C");
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_shared)) {
+ CmdArgs.push_back("-e");
+ CmdArgs.push_back("_start");
+ }
+
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-Bstatic");
+ CmdArgs.push_back("-dn");
+ } else {
+ CmdArgs.push_back("-Bdynamic");
+ if (Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-shared");
+ } else {
+ CmdArgs.push_back("--dynamic-linker");
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("ld.so.1")));
+ }
+ }
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crt1.o")));
+
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crti.o")));
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("values-Xa.o")));
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crtbegin.o")));
+ }
+
+ getToolChain().AddFilePathLibArgs(Args, CmdArgs);
+
+ Args.AddAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
+ options::OPT_e, options::OPT_r});
+
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ if (getToolChain().getDriver().CCCIsCXX())
+ getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
+ CmdArgs.push_back("-lgcc_s");
+ CmdArgs.push_back("-lc");
+ if (!Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-lgcc");
+ CmdArgs.push_back("-lm");
+ }
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crtend.o")));
+ }
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crtn.o")));
+
+ getToolChain().addProfileRTLibs(Args, CmdArgs);
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+void openbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ claimNoWarnArgs(Args);
+ ArgStringList CmdArgs;
+
+ switch (getToolChain().getArch()) {
+ case llvm::Triple::x86:
+ // When building 32-bit code on OpenBSD/amd64, we have to explicitly
+ // instruct as in the base system to assemble 32-bit code.
+ CmdArgs.push_back("--32");
+ break;
+
+ case llvm::Triple::ppc:
+ CmdArgs.push_back("-mppc");
+ CmdArgs.push_back("-many");
+ break;
+
+ case llvm::Triple::sparc:
+ case llvm::Triple::sparcel: {
+ CmdArgs.push_back("-32");
+ std::string CPU = getCPUName(Args, getToolChain().getTriple());
+ CmdArgs.push_back(getSparcAsmModeForCPU(CPU, getToolChain().getTriple()));
+ AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
+ break;
+ }
+
+ case llvm::Triple::sparcv9: {
+ CmdArgs.push_back("-64");
+ std::string CPU = getCPUName(Args, getToolChain().getTriple());
+ CmdArgs.push_back(getSparcAsmModeForCPU(CPU, getToolChain().getTriple()));
+ AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
+ break;
+ }
+
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el: {
+ StringRef CPUName;
+ StringRef ABIName;
+ mips::getMipsCPUAndABI(Args, getToolChain().getTriple(), CPUName, ABIName);
+
+ CmdArgs.push_back("-mabi");
+ CmdArgs.push_back(getGnuCompatibleMipsABIName(ABIName).data());
+
+ if (getToolChain().getArch() == llvm::Triple::mips64)
+ CmdArgs.push_back("-EB");
+ else
+ CmdArgs.push_back("-EL");
+
+ AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ for (const auto &II : Inputs)
+ CmdArgs.push_back(II.getFilename());
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getDriver();
+ ArgStringList CmdArgs;
+
+ // Silence warning for "clang -g foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ // and "clang -emit-llvm foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_emit_llvm);
+ // and for "clang -w foo.o -o foo". Other warning options are already
+ // handled somewhere else.
+ Args.ClaimAllArgs(options::OPT_w);
+
+ if (getToolChain().getArch() == llvm::Triple::mips64)
+ CmdArgs.push_back("-EB");
+ else if (getToolChain().getArch() == llvm::Triple::mips64el)
+ CmdArgs.push_back("-EL");
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_shared)) {
+ CmdArgs.push_back("-e");
+ CmdArgs.push_back("__start");
+ }
+
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-Bstatic");
+ } else {
+ if (Args.hasArg(options::OPT_rdynamic))
+ CmdArgs.push_back("-export-dynamic");
+ CmdArgs.push_back("--eh-frame-hdr");
+ CmdArgs.push_back("-Bdynamic");
+ if (Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-shared");
+ } else {
+ CmdArgs.push_back("-dynamic-linker");
+ CmdArgs.push_back("/usr/libexec/ld.so");
+ }
+ }
+
+ if (Args.hasArg(options::OPT_nopie))
+ CmdArgs.push_back("-nopie");
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared)) {
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("gcrt0.o")));
+ else
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crt0.o")));
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crtbegin.o")));
+ } else {
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crtbeginS.o")));
+ }
+ }
+
+ std::string Triple = getToolChain().getTripleString();
+ if (Triple.substr(0, 6) == "x86_64")
+ Triple.replace(0, 6, "amd64");
+ CmdArgs.push_back(
+ Args.MakeArgString("-L/usr/lib/gcc-lib/" + Triple + "/4.2.1"));
+
+ Args.AddAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
+ options::OPT_e, options::OPT_s, options::OPT_t,
+ options::OPT_Z_Flag, options::OPT_r});
+
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ if (D.CCCIsCXX()) {
+ getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lm_p");
+ else
+ CmdArgs.push_back("-lm");
+ }
+
+ // FIXME: For some reason GCC passes -lgcc before adding
+ // the default system libraries. Just mimic this for now.
+ CmdArgs.push_back("-lgcc");
+
+ if (Args.hasArg(options::OPT_pthread)) {
+ if (!Args.hasArg(options::OPT_shared) && Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lpthread_p");
+ else
+ CmdArgs.push_back("-lpthread");
+ }
+
+ if (!Args.hasArg(options::OPT_shared)) {
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lc_p");
+ else
+ CmdArgs.push_back("-lc");
+ }
+
+ CmdArgs.push_back("-lgcc");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crtend.o")));
+ else
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crtendS.o")));
+ }
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+void bitrig::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ claimNoWarnArgs(Args);
+ ArgStringList CmdArgs;
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ for (const auto &II : Inputs)
+ CmdArgs.push_back(II.getFilename());
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+void bitrig::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getDriver();
+ ArgStringList CmdArgs;
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_shared)) {
+ CmdArgs.push_back("-e");
+ CmdArgs.push_back("__start");
+ }
+
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-Bstatic");
+ } else {
+ if (Args.hasArg(options::OPT_rdynamic))
+ CmdArgs.push_back("-export-dynamic");
+ CmdArgs.push_back("--eh-frame-hdr");
+ CmdArgs.push_back("-Bdynamic");
+ if (Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-shared");
+ } else {
+ CmdArgs.push_back("-dynamic-linker");
+ CmdArgs.push_back("/usr/libexec/ld.so");
+ }
+ }
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared)) {
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("gcrt0.o")));
+ else
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crt0.o")));
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crtbegin.o")));
+ } else {
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crtbeginS.o")));
+ }
+ }
+
+ Args.AddAllArgs(CmdArgs,
+ {options::OPT_L, options::OPT_T_Group, options::OPT_e});
+
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ if (D.CCCIsCXX()) {
+ getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lm_p");
+ else
+ CmdArgs.push_back("-lm");
+ }
+
+ if (Args.hasArg(options::OPT_pthread)) {
+ if (!Args.hasArg(options::OPT_shared) && Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lpthread_p");
+ else
+ CmdArgs.push_back("-lpthread");
+ }
+
+ if (!Args.hasArg(options::OPT_shared)) {
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lc_p");
+ else
+ CmdArgs.push_back("-lc");
+ }
+
+ StringRef MyArch;
+ switch (getToolChain().getArch()) {
+ case llvm::Triple::arm:
+ MyArch = "arm";
+ break;
+ case llvm::Triple::x86:
+ MyArch = "i386";
+ break;
+ case llvm::Triple::x86_64:
+ MyArch = "amd64";
+ break;
+ default:
+ llvm_unreachable("Unsupported architecture");
+ }
+ CmdArgs.push_back(Args.MakeArgString("-lclang_rt." + MyArch));
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crtend.o")));
+ else
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crtendS.o")));
+ }
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+void freebsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ claimNoWarnArgs(Args);
+ ArgStringList CmdArgs;
+
+ // When building 32-bit code on FreeBSD/amd64, we have to explicitly
+ // instruct as in the base system to assemble 32-bit code.
+ switch (getToolChain().getArch()) {
+ default:
+ break;
+ case llvm::Triple::x86:
+ CmdArgs.push_back("--32");
+ break;
+ case llvm::Triple::ppc:
+ CmdArgs.push_back("-a32");
+ break;
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el: {
+ StringRef CPUName;
+ StringRef ABIName;
+ mips::getMipsCPUAndABI(Args, getToolChain().getTriple(), CPUName, ABIName);
+
+ CmdArgs.push_back("-march");
+ CmdArgs.push_back(CPUName.data());
+
+ CmdArgs.push_back("-mabi");
+ CmdArgs.push_back(getGnuCompatibleMipsABIName(ABIName).data());
+
+ if (getToolChain().getArch() == llvm::Triple::mips ||
+ getToolChain().getArch() == llvm::Triple::mips64)
+ CmdArgs.push_back("-EB");
+ else
+ CmdArgs.push_back("-EL");
+
+ if (Arg *A = Args.getLastArg(options::OPT_G)) {
+ StringRef v = A->getValue();
+ CmdArgs.push_back(Args.MakeArgString("-G" + v));
+ A->claim();
+ }
+
+ AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
+ break;
+ }
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb: {
+ arm::FloatABI ABI = arm::getARMFloatABI(getToolChain(), Args);
+
+ if (ABI == arm::FloatABI::Hard)
+ CmdArgs.push_back("-mfpu=vfp");
+ else
+ CmdArgs.push_back("-mfpu=softvfp");
+
+ switch (getToolChain().getTriple().getEnvironment()) {
+ case llvm::Triple::GNUEABIHF:
+ case llvm::Triple::GNUEABI:
+ case llvm::Triple::EABI:
+ CmdArgs.push_back("-meabi=5");
+ break;
+
+ default:
+ CmdArgs.push_back("-matpcs");
+ }
+ break;
+ }
+ case llvm::Triple::sparc:
+ case llvm::Triple::sparcel:
+ case llvm::Triple::sparcv9: {
+ std::string CPU = getCPUName(Args, getToolChain().getTriple());
+ CmdArgs.push_back(getSparcAsmModeForCPU(CPU, getToolChain().getTriple()));
+ AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
+ break;
+ }
+ }
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ for (const auto &II : Inputs)
+ CmdArgs.push_back(II.getFilename());
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const toolchains::FreeBSD &ToolChain =
+ static_cast<const toolchains::FreeBSD &>(getToolChain());
+ const Driver &D = ToolChain.getDriver();
+ const llvm::Triple::ArchType Arch = ToolChain.getArch();
+ const bool IsPIE =
+ !Args.hasArg(options::OPT_shared) &&
+ (Args.hasArg(options::OPT_pie) || ToolChain.isPIEDefault());
+ ArgStringList CmdArgs;
+
+ // Silence warning for "clang -g foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ // and "clang -emit-llvm foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_emit_llvm);
+ // and for "clang -w foo.o -o foo". Other warning options are already
+ // handled somewhere else.
+ Args.ClaimAllArgs(options::OPT_w);
+
+ if (!D.SysRoot.empty())
+ CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
+
+ if (IsPIE)
+ CmdArgs.push_back("-pie");
+
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-Bstatic");
+ } else {
+ if (Args.hasArg(options::OPT_rdynamic))
+ CmdArgs.push_back("-export-dynamic");
+ CmdArgs.push_back("--eh-frame-hdr");
+ if (Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-Bshareable");
+ } else {
+ CmdArgs.push_back("-dynamic-linker");
+ CmdArgs.push_back("/libexec/ld-elf.so.1");
+ }
+ if (ToolChain.getTriple().getOSMajorVersion() >= 9) {
+ if (Arch == llvm::Triple::arm || Arch == llvm::Triple::sparc ||
+ Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64) {
+ CmdArgs.push_back("--hash-style=both");
+ }
+ }
+ CmdArgs.push_back("--enable-new-dtags");
+ }
+
+ // When building 32-bit code on FreeBSD/amd64, we have to explicitly
+ // instruct ld in the base system to link 32-bit code.
+ if (Arch == llvm::Triple::x86) {
+ CmdArgs.push_back("-m");
+ CmdArgs.push_back("elf_i386_fbsd");
+ }
+
+ if (Arch == llvm::Triple::ppc) {
+ CmdArgs.push_back("-m");
+ CmdArgs.push_back("elf32ppc_fbsd");
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_G)) {
+ if (ToolChain.getArch() == llvm::Triple::mips ||
+ ToolChain.getArch() == llvm::Triple::mipsel ||
+ ToolChain.getArch() == llvm::Triple::mips64 ||
+ ToolChain.getArch() == llvm::Triple::mips64el) {
+ StringRef v = A->getValue();
+ CmdArgs.push_back(Args.MakeArgString("-G" + v));
+ A->claim();
+ }
+ }
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ const char *crt1 = nullptr;
+ if (!Args.hasArg(options::OPT_shared)) {
+ if (Args.hasArg(options::OPT_pg))
+ crt1 = "gcrt1.o";
+ else if (IsPIE)
+ crt1 = "Scrt1.o";
+ else
+ crt1 = "crt1.o";
+ }
+ if (crt1)
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crt1)));
+
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o")));
+
+ const char *crtbegin = nullptr;
+ if (Args.hasArg(options::OPT_static))
+ crtbegin = "crtbeginT.o";
+ else if (Args.hasArg(options::OPT_shared) || IsPIE)
+ crtbegin = "crtbeginS.o";
+ else
+ crtbegin = "crtbegin.o";
+
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin)));
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ ToolChain.AddFilePathLibArgs(Args, CmdArgs);
+ Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
+ Args.AddAllArgs(CmdArgs, options::OPT_e);
+ Args.AddAllArgs(CmdArgs, options::OPT_s);
+ Args.AddAllArgs(CmdArgs, options::OPT_t);
+ Args.AddAllArgs(CmdArgs, options::OPT_Z_Flag);
+ Args.AddAllArgs(CmdArgs, options::OPT_r);
+
+ if (D.isUsingLTO())
+ AddGoldPlugin(ToolChain, Args, CmdArgs, D.getLTOMode() == LTOK_Thin);
+
+ bool NeedsSanitizerDeps = addSanitizerRuntimes(ToolChain, Args, CmdArgs);
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ addOpenMPRuntime(CmdArgs, ToolChain, Args);
+ if (D.CCCIsCXX()) {
+ ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lm_p");
+ else
+ CmdArgs.push_back("-lm");
+ }
+ if (NeedsSanitizerDeps)
+ linkSanitizerRuntimeDeps(ToolChain, CmdArgs);
+ // FIXME: For some reason GCC passes -lgcc and -lgcc_s before adding
+ // the default system libraries. Just mimic this for now.
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lgcc_p");
+ else
+ CmdArgs.push_back("-lgcc");
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-lgcc_eh");
+ } else if (Args.hasArg(options::OPT_pg)) {
+ CmdArgs.push_back("-lgcc_eh_p");
+ } else {
+ CmdArgs.push_back("--as-needed");
+ CmdArgs.push_back("-lgcc_s");
+ CmdArgs.push_back("--no-as-needed");
+ }
+
+ if (Args.hasArg(options::OPT_pthread)) {
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lpthread_p");
+ else
+ CmdArgs.push_back("-lpthread");
+ }
+
+ if (Args.hasArg(options::OPT_pg)) {
+ if (Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back("-lc");
+ else
+ CmdArgs.push_back("-lc_p");
+ CmdArgs.push_back("-lgcc_p");
+ } else {
+ CmdArgs.push_back("-lc");
+ CmdArgs.push_back("-lgcc");
+ }
+
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-lgcc_eh");
+ } else if (Args.hasArg(options::OPT_pg)) {
+ CmdArgs.push_back("-lgcc_eh_p");
+ } else {
+ CmdArgs.push_back("--as-needed");
+ CmdArgs.push_back("-lgcc_s");
+ CmdArgs.push_back("--no-as-needed");
+ }
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (Args.hasArg(options::OPT_shared) || IsPIE)
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtendS.o")));
+ else
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtend.o")));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
+ }
+
+ ToolChain.addProfileRTLibs(Args, CmdArgs);
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+void netbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ claimNoWarnArgs(Args);
+ ArgStringList CmdArgs;
+
+ // GNU as needs different flags for creating the correct output format
+ // on architectures with different ABIs or optional feature sets.
+ switch (getToolChain().getArch()) {
+ case llvm::Triple::x86:
+ CmdArgs.push_back("--32");
+ break;
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb: {
+ StringRef MArch, MCPU;
+ getARMArchCPUFromArgs(Args, MArch, MCPU, /*FromAs*/ true);
+ std::string Arch =
+ arm::getARMTargetCPU(MCPU, MArch, getToolChain().getTriple());
+ CmdArgs.push_back(Args.MakeArgString("-mcpu=" + Arch));
+ break;
+ }
+
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el: {
+ StringRef CPUName;
+ StringRef ABIName;
+ mips::getMipsCPUAndABI(Args, getToolChain().getTriple(), CPUName, ABIName);
+
+ CmdArgs.push_back("-march");
+ CmdArgs.push_back(CPUName.data());
+
+ CmdArgs.push_back("-mabi");
+ CmdArgs.push_back(getGnuCompatibleMipsABIName(ABIName).data());
+
+ if (getToolChain().getArch() == llvm::Triple::mips ||
+ getToolChain().getArch() == llvm::Triple::mips64)
+ CmdArgs.push_back("-EB");
+ else
+ CmdArgs.push_back("-EL");
+
+ AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
+ break;
+ }
+
+ case llvm::Triple::sparc:
+ case llvm::Triple::sparcel: {
+ CmdArgs.push_back("-32");
+ std::string CPU = getCPUName(Args, getToolChain().getTriple());
+ CmdArgs.push_back(getSparcAsmModeForCPU(CPU, getToolChain().getTriple()));
+ AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
+ break;
+ }
+
+ case llvm::Triple::sparcv9: {
+ CmdArgs.push_back("-64");
+ std::string CPU = getCPUName(Args, getToolChain().getTriple());
+ CmdArgs.push_back(getSparcAsmModeForCPU(CPU, getToolChain().getTriple()));
+ AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ for (const auto &II : Inputs)
+ CmdArgs.push_back(II.getFilename());
+
+ const char *Exec = Args.MakeArgString((getToolChain().GetProgramPath("as")));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getDriver();
+ ArgStringList CmdArgs;
+
+ if (!D.SysRoot.empty())
+ CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
+
+ CmdArgs.push_back("--eh-frame-hdr");
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-Bstatic");
+ } else {
+ if (Args.hasArg(options::OPT_rdynamic))
+ CmdArgs.push_back("-export-dynamic");
+ if (Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-Bshareable");
+ } else {
+ CmdArgs.push_back("-dynamic-linker");
+ CmdArgs.push_back("/libexec/ld.elf_so");
+ }
+ }
+
+ // Many NetBSD architectures support more than one ABI.
+ // Determine the correct emulation for ld.
+ switch (getToolChain().getArch()) {
+ case llvm::Triple::x86:
+ CmdArgs.push_back("-m");
+ CmdArgs.push_back("elf_i386");
+ break;
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ CmdArgs.push_back("-m");
+ switch (getToolChain().getTriple().getEnvironment()) {
+ case llvm::Triple::EABI:
+ case llvm::Triple::GNUEABI:
+ CmdArgs.push_back("armelf_nbsd_eabi");
+ break;
+ case llvm::Triple::EABIHF:
+ case llvm::Triple::GNUEABIHF:
+ CmdArgs.push_back("armelf_nbsd_eabihf");
+ break;
+ default:
+ CmdArgs.push_back("armelf_nbsd");
+ break;
+ }
+ break;
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumbeb:
+ arm::appendEBLinkFlags(
+ Args, CmdArgs,
+ llvm::Triple(getToolChain().ComputeEffectiveClangTriple(Args)));
+ CmdArgs.push_back("-m");
+ switch (getToolChain().getTriple().getEnvironment()) {
+ case llvm::Triple::EABI:
+ case llvm::Triple::GNUEABI:
+ CmdArgs.push_back("armelfb_nbsd_eabi");
+ break;
+ case llvm::Triple::EABIHF:
+ case llvm::Triple::GNUEABIHF:
+ CmdArgs.push_back("armelfb_nbsd_eabihf");
+ break;
+ default:
+ CmdArgs.push_back("armelfb_nbsd");
+ break;
+ }
+ break;
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ if (mips::hasMipsAbiArg(Args, "32")) {
+ CmdArgs.push_back("-m");
+ if (getToolChain().getArch() == llvm::Triple::mips64)
+ CmdArgs.push_back("elf32btsmip");
+ else
+ CmdArgs.push_back("elf32ltsmip");
+ } else if (mips::hasMipsAbiArg(Args, "64")) {
+ CmdArgs.push_back("-m");
+ if (getToolChain().getArch() == llvm::Triple::mips64)
+ CmdArgs.push_back("elf64btsmip");
+ else
+ CmdArgs.push_back("elf64ltsmip");
+ }
+ break;
+ case llvm::Triple::ppc:
+ CmdArgs.push_back("-m");
+ CmdArgs.push_back("elf32ppc_nbsd");
+ break;
+
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
+ CmdArgs.push_back("-m");
+ CmdArgs.push_back("elf64ppc");
+ break;
+
+ case llvm::Triple::sparc:
+ CmdArgs.push_back("-m");
+ CmdArgs.push_back("elf32_sparc");
+ break;
+
+ case llvm::Triple::sparcv9:
+ CmdArgs.push_back("-m");
+ CmdArgs.push_back("elf64_sparc");
+ break;
+
+ default:
+ break;
+ }
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crt0.o")));
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crti.o")));
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crtbegin.o")));
+ } else {
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crti.o")));
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crtbeginS.o")));
+ }
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
+ Args.AddAllArgs(CmdArgs, options::OPT_e);
+ Args.AddAllArgs(CmdArgs, options::OPT_s);
+ Args.AddAllArgs(CmdArgs, options::OPT_t);
+ Args.AddAllArgs(CmdArgs, options::OPT_Z_Flag);
+ Args.AddAllArgs(CmdArgs, options::OPT_r);
+
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+
+ unsigned Major, Minor, Micro;
+ getToolChain().getTriple().getOSVersion(Major, Minor, Micro);
+ bool useLibgcc = true;
+ if (Major >= 7 || (Major == 6 && Minor == 99 && Micro >= 49) || Major == 0) {
+ switch (getToolChain().getArch()) {
+ case llvm::Triple::aarch64:
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ useLibgcc = false;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ addOpenMPRuntime(CmdArgs, getToolChain(), Args);
+ if (D.CCCIsCXX()) {
+ getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
+ CmdArgs.push_back("-lm");
+ }
+ if (Args.hasArg(options::OPT_pthread))
+ CmdArgs.push_back("-lpthread");
+ CmdArgs.push_back("-lc");
+
+ if (useLibgcc) {
+ if (Args.hasArg(options::OPT_static)) {
+ // libgcc_eh depends on libc, so resolve as much as possible,
+ // pull in any new requirements from libc and then get the rest
+ // of libgcc.
+ CmdArgs.push_back("-lgcc_eh");
+ CmdArgs.push_back("-lc");
+ CmdArgs.push_back("-lgcc");
+ } else {
+ CmdArgs.push_back("-lgcc");
+ CmdArgs.push_back("--as-needed");
+ CmdArgs.push_back("-lgcc_s");
+ CmdArgs.push_back("--no-as-needed");
+ }
+ }
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crtend.o")));
+ else
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crtendS.o")));
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crtn.o")));
+ }
+
+ getToolChain().addProfileRTLibs(Args, CmdArgs);
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+void gnutools::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ claimNoWarnArgs(Args);
+
+ std::string TripleStr = getToolChain().ComputeEffectiveClangTriple(Args);
+ llvm::Triple Triple = llvm::Triple(TripleStr);
+
+ ArgStringList CmdArgs;
+
+ llvm::Reloc::Model RelocationModel;
+ unsigned PICLevel;
+ bool IsPIE;
+ std::tie(RelocationModel, PICLevel, IsPIE) =
+ ParsePICArgs(getToolChain(), Triple, Args);
+
+ switch (getToolChain().getArch()) {
+ default:
+ break;
+ // Add --32/--64 to make sure we get the format we want.
+ // This is incomplete
+ case llvm::Triple::x86:
+ CmdArgs.push_back("--32");
+ break;
+ case llvm::Triple::x86_64:
+ if (getToolChain().getTriple().getEnvironment() == llvm::Triple::GNUX32)
+ CmdArgs.push_back("--x32");
+ else
+ CmdArgs.push_back("--64");
+ break;
+ case llvm::Triple::ppc:
+ CmdArgs.push_back("-a32");
+ CmdArgs.push_back("-mppc");
+ CmdArgs.push_back("-many");
+ break;
+ case llvm::Triple::ppc64:
+ CmdArgs.push_back("-a64");
+ CmdArgs.push_back("-mppc64");
+ CmdArgs.push_back("-many");
+ break;
+ case llvm::Triple::ppc64le:
+ CmdArgs.push_back("-a64");
+ CmdArgs.push_back("-mppc64");
+ CmdArgs.push_back("-many");
+ CmdArgs.push_back("-mlittle-endian");
+ break;
+ case llvm::Triple::sparc:
+ case llvm::Triple::sparcel: {
+ CmdArgs.push_back("-32");
+ std::string CPU = getCPUName(Args, getToolChain().getTriple());
+ CmdArgs.push_back(getSparcAsmModeForCPU(CPU, getToolChain().getTriple()));
+ AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
+ break;
+ }
+ case llvm::Triple::sparcv9: {
+ CmdArgs.push_back("-64");
+ std::string CPU = getCPUName(Args, getToolChain().getTriple());
+ CmdArgs.push_back(getSparcAsmModeForCPU(CPU, getToolChain().getTriple()));
+ AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
+ break;
+ }
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb: {
+ const llvm::Triple &Triple2 = getToolChain().getTriple();
+ switch (Triple2.getSubArch()) {
+ case llvm::Triple::ARMSubArch_v7:
+ CmdArgs.push_back("-mfpu=neon");
+ break;
+ case llvm::Triple::ARMSubArch_v8:
+ CmdArgs.push_back("-mfpu=crypto-neon-fp-armv8");
+ break;
+ default:
+ break;
+ }
+
+ switch (arm::getARMFloatABI(getToolChain(), Args)) {
+ case arm::FloatABI::Invalid: llvm_unreachable("must have an ABI!");
+ case arm::FloatABI::Soft:
+ CmdArgs.push_back(Args.MakeArgString("-mfloat-abi=soft"));
+ break;
+ case arm::FloatABI::SoftFP:
+ CmdArgs.push_back(Args.MakeArgString("-mfloat-abi=softfp"));
+ break;
+ case arm::FloatABI::Hard:
+ CmdArgs.push_back(Args.MakeArgString("-mfloat-abi=hard"));
+ break;
+ }
+
+ Args.AddLastArg(CmdArgs, options::OPT_march_EQ);
+
+ // FIXME: remove krait check when GNU tools support krait cpu
+ // for now replace it with -march=armv7-a to avoid a lower
+ // march from being picked in the absence of a cpu flag.
+ Arg *A;
+ if ((A = Args.getLastArg(options::OPT_mcpu_EQ)) &&
+ StringRef(A->getValue()).lower() == "krait")
+ CmdArgs.push_back("-march=armv7-a");
+ else
+ Args.AddLastArg(CmdArgs, options::OPT_mcpu_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_mfpu_EQ);
+ break;
+ }
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el: {
+ StringRef CPUName;
+ StringRef ABIName;
+ mips::getMipsCPUAndABI(Args, getToolChain().getTriple(), CPUName, ABIName);
+ ABIName = getGnuCompatibleMipsABIName(ABIName);
+
+ CmdArgs.push_back("-march");
+ CmdArgs.push_back(CPUName.data());
+
+ CmdArgs.push_back("-mabi");
+ CmdArgs.push_back(ABIName.data());
+
+ // -mno-shared should be emitted unless -fpic, -fpie, -fPIC, -fPIE,
+ // or -mshared (not implemented) is in effect.
+ if (RelocationModel == llvm::Reloc::Static)
+ CmdArgs.push_back("-mno-shared");
+
+ // LLVM doesn't support -mplt yet and acts as if it is always given.
+ // However, -mplt has no effect with the N64 ABI.
+ CmdArgs.push_back(ABIName == "64" ? "-KPIC" : "-call_nonpic");
+
+ if (getToolChain().getArch() == llvm::Triple::mips ||
+ getToolChain().getArch() == llvm::Triple::mips64)
+ CmdArgs.push_back("-EB");
+ else
+ CmdArgs.push_back("-EL");
+
+ if (Arg *A = Args.getLastArg(options::OPT_mnan_EQ)) {
+ if (StringRef(A->getValue()) == "2008")
+ CmdArgs.push_back(Args.MakeArgString("-mnan=2008"));
+ }
+
+ // Add the last -mfp32/-mfpxx/-mfp64 or -mfpxx if it is enabled by default.
+ if (Arg *A = Args.getLastArg(options::OPT_mfp32, options::OPT_mfpxx,
+ options::OPT_mfp64)) {
+ A->claim();
+ A->render(Args, CmdArgs);
+ } else if (mips::shouldUseFPXX(
+ Args, getToolChain().getTriple(), CPUName, ABIName,
+ getMipsFloatABI(getToolChain().getDriver(), Args)))
+ CmdArgs.push_back("-mfpxx");
+
+ // Pass on -mmips16 or -mno-mips16. However, the assembler equivalent of
+ // -mno-mips16 is actually -no-mips16.
+ if (Arg *A =
+ Args.getLastArg(options::OPT_mips16, options::OPT_mno_mips16)) {
+ if (A->getOption().matches(options::OPT_mips16)) {
+ A->claim();
+ A->render(Args, CmdArgs);
+ } else {
+ A->claim();
+ CmdArgs.push_back("-no-mips16");
+ }
+ }
+
+ Args.AddLastArg(CmdArgs, options::OPT_mmicromips,
+ options::OPT_mno_micromips);
+ Args.AddLastArg(CmdArgs, options::OPT_mdsp, options::OPT_mno_dsp);
+ Args.AddLastArg(CmdArgs, options::OPT_mdspr2, options::OPT_mno_dspr2);
+
+ if (Arg *A = Args.getLastArg(options::OPT_mmsa, options::OPT_mno_msa)) {
+ // Do not use AddLastArg because not all versions of MIPS assembler
+ // support -mmsa / -mno-msa options.
+ if (A->getOption().matches(options::OPT_mmsa))
+ CmdArgs.push_back(Args.MakeArgString("-mmsa"));
+ }
+
+ Args.AddLastArg(CmdArgs, options::OPT_mhard_float,
+ options::OPT_msoft_float);
+
+ Args.AddLastArg(CmdArgs, options::OPT_mdouble_float,
+ options::OPT_msingle_float);
+
+ Args.AddLastArg(CmdArgs, options::OPT_modd_spreg,
+ options::OPT_mno_odd_spreg);
+
+ AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
+ break;
+ }
+ case llvm::Triple::systemz: {
+ // Always pass an -march option, since our default of z10 is later
+ // than the GNU assembler's default.
+ StringRef CPUName = getSystemZTargetCPU(Args);
+ CmdArgs.push_back(Args.MakeArgString("-march=" + CPUName));
+ break;
+ }
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_I);
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ for (const auto &II : Inputs)
+ CmdArgs.push_back(II.getFilename());
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+
+ // Handle the debug info splitting at object creation time if we're
+ // creating an object.
+ // TODO: Currently only works on linux with newer objcopy.
+ if (Args.hasArg(options::OPT_gsplit_dwarf) &&
+ getToolChain().getTriple().isOSLinux())
+ SplitDebugInfo(getToolChain(), C, *this, JA, Args, Output,
+ SplitDebugName(Args, Inputs[0]));
+}
+
+static void AddLibgcc(const llvm::Triple &Triple, const Driver &D,
+ ArgStringList &CmdArgs, const ArgList &Args) {
+ bool isAndroid = Triple.isAndroid();
+ bool isCygMing = Triple.isOSCygMing();
+ bool StaticLibgcc = Args.hasArg(options::OPT_static_libgcc) ||
+ Args.hasArg(options::OPT_static);
+ if (!D.CCCIsCXX())
+ CmdArgs.push_back("-lgcc");
+
+ if (StaticLibgcc || isAndroid) {
+ if (D.CCCIsCXX())
+ CmdArgs.push_back("-lgcc");
+ } else {
+ if (!D.CCCIsCXX() && !isCygMing)
+ CmdArgs.push_back("--as-needed");
+ CmdArgs.push_back("-lgcc_s");
+ if (!D.CCCIsCXX() && !isCygMing)
+ CmdArgs.push_back("--no-as-needed");
+ }
+
+ if (StaticLibgcc && !isAndroid)
+ CmdArgs.push_back("-lgcc_eh");
+ else if (!Args.hasArg(options::OPT_shared) && D.CCCIsCXX())
+ CmdArgs.push_back("-lgcc");
+
+ // According to Android ABI, we have to link with libdl if we are
+ // linking with non-static libgcc.
+ //
+ // NOTE: This fixes a link error on Android MIPS as well. The non-static
+ // libgcc for MIPS relies on _Unwind_Find_FDE and dl_iterate_phdr from libdl.
+ if (isAndroid && !StaticLibgcc)
+ CmdArgs.push_back("-ldl");
+}
+
+static std::string getLinuxDynamicLinker(const ArgList &Args,
+ const toolchains::Linux &ToolChain) {
+ const llvm::Triple::ArchType Arch = ToolChain.getArch();
+
+ if (ToolChain.getTriple().isAndroid()) {
+ if (ToolChain.getTriple().isArch64Bit())
+ return "/system/bin/linker64";
+ else
+ return "/system/bin/linker";
+ } else if (Arch == llvm::Triple::x86 || Arch == llvm::Triple::sparc ||
+ Arch == llvm::Triple::sparcel)
+ return "/lib/ld-linux.so.2";
+ else if (Arch == llvm::Triple::aarch64)
+ return "/lib/ld-linux-aarch64.so.1";
+ else if (Arch == llvm::Triple::aarch64_be)
+ return "/lib/ld-linux-aarch64_be.so.1";
+ else if (Arch == llvm::Triple::arm || Arch == llvm::Triple::thumb) {
+ if (ToolChain.getTriple().getEnvironment() == llvm::Triple::GNUEABIHF ||
+ arm::getARMFloatABI(ToolChain, Args) == arm::FloatABI::Hard)
+ return "/lib/ld-linux-armhf.so.3";
+ else
+ return "/lib/ld-linux.so.3";
+ } else if (Arch == llvm::Triple::armeb || Arch == llvm::Triple::thumbeb) {
+ // TODO: check which dynamic linker name.
+ if (ToolChain.getTriple().getEnvironment() == llvm::Triple::GNUEABIHF ||
+ arm::getARMFloatABI(ToolChain, Args) == arm::FloatABI::Hard)
+ return "/lib/ld-linux-armhf.so.3";
+ else
+ return "/lib/ld-linux.so.3";
+ } else if (Arch == llvm::Triple::mips || Arch == llvm::Triple::mipsel ||
+ Arch == llvm::Triple::mips64 || Arch == llvm::Triple::mips64el) {
+ std::string LibDir =
+ "/lib" + mips::getMipsABILibSuffix(Args, ToolChain.getTriple());
+ StringRef LibName;
+ bool IsNaN2008 = mips::isNaN2008(Args, ToolChain.getTriple());
+ if (mips::isUCLibc(Args))
+ LibName = IsNaN2008 ? "ld-uClibc-mipsn8.so.0" : "ld-uClibc.so.0";
+ else if (!ToolChain.getTriple().hasEnvironment()) {
+ bool LE = (ToolChain.getTriple().getArch() == llvm::Triple::mipsel) ||
+ (ToolChain.getTriple().getArch() == llvm::Triple::mips64el);
+ LibName = LE ? "ld-musl-mipsel.so.1" : "ld-musl-mips.so.1";
+ } else
+ LibName = IsNaN2008 ? "ld-linux-mipsn8.so.1" : "ld.so.1";
+
+ return (LibDir + "/" + LibName).str();
+ } else if (Arch == llvm::Triple::ppc)
+ return "/lib/ld.so.1";
+ else if (Arch == llvm::Triple::ppc64) {
+ if (ppc::hasPPCAbiArg(Args, "elfv2"))
+ return "/lib64/ld64.so.2";
+ return "/lib64/ld64.so.1";
+ } else if (Arch == llvm::Triple::ppc64le) {
+ if (ppc::hasPPCAbiArg(Args, "elfv1"))
+ return "/lib64/ld64.so.1";
+ return "/lib64/ld64.so.2";
+ } else if (Arch == llvm::Triple::systemz)
+ return "/lib/ld64.so.1";
+ else if (Arch == llvm::Triple::sparcv9)
+ return "/lib64/ld-linux.so.2";
+ else if (Arch == llvm::Triple::x86_64 &&
+ ToolChain.getTriple().getEnvironment() == llvm::Triple::GNUX32)
+ return "/libx32/ld-linux-x32.so.2";
+ else
+ return "/lib64/ld-linux-x86-64.so.2";
+}
+
+static void AddRunTimeLibs(const ToolChain &TC, const Driver &D,
+ ArgStringList &CmdArgs, const ArgList &Args) {
+ // Make use of compiler-rt if --rtlib option is used
+ ToolChain::RuntimeLibType RLT = TC.GetRuntimeLibType(Args);
+
+ switch (RLT) {
+ case ToolChain::RLT_CompilerRT:
+ switch (TC.getTriple().getOS()) {
+ default:
+ llvm_unreachable("unsupported OS");
+ case llvm::Triple::Win32:
+ case llvm::Triple::Linux:
+ addClangRT(TC, Args, CmdArgs);
+ break;
+ }
+ break;
+ case ToolChain::RLT_Libgcc:
+ AddLibgcc(TC.getTriple(), D, CmdArgs, Args);
+ break;
+ }
+}
+
+static const char *getLDMOption(const llvm::Triple &T, const ArgList &Args) {
+ switch (T.getArch()) {
+ case llvm::Triple::x86:
+ return "elf_i386";
+ case llvm::Triple::aarch64:
+ return "aarch64linux";
+ case llvm::Triple::aarch64_be:
+ return "aarch64_be_linux";
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ return "armelf_linux_eabi";
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumbeb:
+ return "armebelf_linux_eabi"; /* TODO: check which NAME. */
+ case llvm::Triple::ppc:
+ return "elf32ppclinux";
+ case llvm::Triple::ppc64:
+ return "elf64ppc";
+ case llvm::Triple::ppc64le:
+ return "elf64lppc";
+ case llvm::Triple::sparc:
+ case llvm::Triple::sparcel:
+ return "elf32_sparc";
+ case llvm::Triple::sparcv9:
+ return "elf64_sparc";
+ case llvm::Triple::mips:
+ return "elf32btsmip";
+ case llvm::Triple::mipsel:
+ return "elf32ltsmip";
+ case llvm::Triple::mips64:
+ if (mips::hasMipsAbiArg(Args, "n32"))
+ return "elf32btsmipn32";
+ return "elf64btsmip";
+ case llvm::Triple::mips64el:
+ if (mips::hasMipsAbiArg(Args, "n32"))
+ return "elf32ltsmipn32";
+ return "elf64ltsmip";
+ case llvm::Triple::systemz:
+ return "elf64_s390";
+ case llvm::Triple::x86_64:
+ if (T.getEnvironment() == llvm::Triple::GNUX32)
+ return "elf32_x86_64";
+ return "elf_x86_64";
+ default:
+ llvm_unreachable("Unexpected arch");
+ }
+}
+
+void gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const toolchains::Linux &ToolChain =
+ static_cast<const toolchains::Linux &>(getToolChain());
+ const Driver &D = ToolChain.getDriver();
+
+ std::string TripleStr = getToolChain().ComputeEffectiveClangTriple(Args);
+ llvm::Triple Triple = llvm::Triple(TripleStr);
+
+ const llvm::Triple::ArchType Arch = ToolChain.getArch();
+ const bool isAndroid = ToolChain.getTriple().isAndroid();
+ const bool IsPIE =
+ !Args.hasArg(options::OPT_shared) && !Args.hasArg(options::OPT_static) &&
+ (Args.hasArg(options::OPT_pie) || ToolChain.isPIEDefault());
+ const bool HasCRTBeginEndFiles =
+ ToolChain.getTriple().hasEnvironment() ||
+ (ToolChain.getTriple().getVendor() != llvm::Triple::MipsTechnologies);
+
+ ArgStringList CmdArgs;
+
+ // Silence warning for "clang -g foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ // and "clang -emit-llvm foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_emit_llvm);
+ // and for "clang -w foo.o -o foo". Other warning options are already
+ // handled somewhere else.
+ Args.ClaimAllArgs(options::OPT_w);
+
+ const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
+ if (llvm::sys::path::filename(Exec) == "lld") {
+ CmdArgs.push_back("-flavor");
+ CmdArgs.push_back("old-gnu");
+ CmdArgs.push_back("-target");
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().getTripleString()));
+ }
+
+ if (!D.SysRoot.empty())
+ CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
+
+ if (IsPIE)
+ CmdArgs.push_back("-pie");
+
+ if (Args.hasArg(options::OPT_rdynamic))
+ CmdArgs.push_back("-export-dynamic");
+
+ if (Args.hasArg(options::OPT_s))
+ CmdArgs.push_back("-s");
+
+ if (Arch == llvm::Triple::armeb || Arch == llvm::Triple::thumbeb)
+ arm::appendEBLinkFlags(Args, CmdArgs, Triple);
+
+ for (const auto &Opt : ToolChain.ExtraOpts)
+ CmdArgs.push_back(Opt.c_str());
+
+ if (!Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("--eh-frame-hdr");
+ }
+
+ CmdArgs.push_back("-m");
+ CmdArgs.push_back(getLDMOption(ToolChain.getTriple(), Args));
+
+ if (Args.hasArg(options::OPT_static)) {
+ if (Arch == llvm::Triple::arm || Arch == llvm::Triple::armeb ||
+ Arch == llvm::Triple::thumb || Arch == llvm::Triple::thumbeb)
+ CmdArgs.push_back("-Bstatic");
+ else
+ CmdArgs.push_back("-static");
+ } else if (Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-shared");
+ }
+
+ if (Arch == llvm::Triple::arm || Arch == llvm::Triple::armeb ||
+ Arch == llvm::Triple::thumb || Arch == llvm::Triple::thumbeb ||
+ (!Args.hasArg(options::OPT_static) &&
+ !Args.hasArg(options::OPT_shared))) {
+ CmdArgs.push_back("-dynamic-linker");
+ CmdArgs.push_back(Args.MakeArgString(
+ D.DyldPrefix + getLinuxDynamicLinker(Args, ToolChain)));
+ }
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (!isAndroid) {
+ const char *crt1 = nullptr;
+ if (!Args.hasArg(options::OPT_shared)) {
+ if (Args.hasArg(options::OPT_pg))
+ crt1 = "gcrt1.o";
+ else if (IsPIE)
+ crt1 = "Scrt1.o";
+ else
+ crt1 = "crt1.o";
+ }
+ if (crt1)
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crt1)));
+
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o")));
+ }
+
+ const char *crtbegin;
+ if (Args.hasArg(options::OPT_static))
+ crtbegin = isAndroid ? "crtbegin_static.o" : "crtbeginT.o";
+ else if (Args.hasArg(options::OPT_shared))
+ crtbegin = isAndroid ? "crtbegin_so.o" : "crtbeginS.o";
+ else if (IsPIE)
+ crtbegin = isAndroid ? "crtbegin_dynamic.o" : "crtbeginS.o";
+ else
+ crtbegin = isAndroid ? "crtbegin_dynamic.o" : "crtbegin.o";
+
+ if (HasCRTBeginEndFiles)
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin)));
+
+ // Add crtfastmath.o if available and fast math is enabled.
+ ToolChain.AddFastMathRuntimeIfAvailable(Args, CmdArgs);
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ Args.AddAllArgs(CmdArgs, options::OPT_u);
+
+ ToolChain.AddFilePathLibArgs(Args, CmdArgs);
+
+ if (D.isUsingLTO())
+ AddGoldPlugin(ToolChain, Args, CmdArgs, D.getLTOMode() == LTOK_Thin);
+
+ if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
+ CmdArgs.push_back("--no-demangle");
+
+ bool NeedsSanitizerDeps = addSanitizerRuntimes(ToolChain, Args, CmdArgs);
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
+ // The profile runtime also needs access to system libraries.
+ getToolChain().addProfileRTLibs(Args, CmdArgs);
+
+ if (D.CCCIsCXX() &&
+ !Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ bool OnlyLibstdcxxStatic = Args.hasArg(options::OPT_static_libstdcxx) &&
+ !Args.hasArg(options::OPT_static);
+ if (OnlyLibstdcxxStatic)
+ CmdArgs.push_back("-Bstatic");
+ ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
+ if (OnlyLibstdcxxStatic)
+ CmdArgs.push_back("-Bdynamic");
+ CmdArgs.push_back("-lm");
+ }
+ // Silence warnings when linking C code with a C++ '-stdlib' argument.
+ Args.ClaimAllArgs(options::OPT_stdlib_EQ);
+
+ if (!Args.hasArg(options::OPT_nostdlib)) {
+ if (!Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (Args.hasArg(options::OPT_static))
+ CmdArgs.push_back("--start-group");
+
+ if (NeedsSanitizerDeps)
+ linkSanitizerRuntimeDeps(ToolChain, CmdArgs);
+
+ bool WantPthread = Args.hasArg(options::OPT_pthread) ||
+ Args.hasArg(options::OPT_pthreads);
+
+ if (Args.hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ,
+ options::OPT_fno_openmp, false)) {
+ // OpenMP runtimes implies pthreads when using the GNU toolchain.
+ // FIXME: Does this really make sense for all GNU toolchains?
+ WantPthread = true;
+
+ // Also link the particular OpenMP runtimes.
+ switch (getOpenMPRuntime(ToolChain, Args)) {
+ case OMPRT_OMP:
+ CmdArgs.push_back("-lomp");
+ break;
+ case OMPRT_GOMP:
+ CmdArgs.push_back("-lgomp");
+
+ // FIXME: Exclude this for platforms with libgomp that don't require
+ // librt. Most modern Linux platforms require it, but some may not.
+ CmdArgs.push_back("-lrt");
+ break;
+ case OMPRT_IOMP5:
+ CmdArgs.push_back("-liomp5");
+ break;
+ case OMPRT_Unknown:
+ // Already diagnosed.
+ break;
+ }
+ }
+
+ AddRunTimeLibs(ToolChain, D, CmdArgs, Args);
+
+ if (WantPthread && !isAndroid)
+ CmdArgs.push_back("-lpthread");
+
+ CmdArgs.push_back("-lc");
+
+ if (Args.hasArg(options::OPT_static))
+ CmdArgs.push_back("--end-group");
+ else
+ AddRunTimeLibs(ToolChain, D, CmdArgs, Args);
+ }
+
+ if (!Args.hasArg(options::OPT_nostartfiles)) {
+ const char *crtend;
+ if (Args.hasArg(options::OPT_shared))
+ crtend = isAndroid ? "crtend_so.o" : "crtendS.o";
+ else if (IsPIE)
+ crtend = isAndroid ? "crtend_android.o" : "crtendS.o";
+ else
+ crtend = isAndroid ? "crtend_android.o" : "crtend.o";
+
+ if (HasCRTBeginEndFiles)
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtend)));
+ if (!isAndroid)
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
+ }
+ }
+
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+// NaCl ARM assembly (inline or standalone) can be written with a set of macros
+// for the various SFI requirements like register masking. The assembly tool
+// inserts the file containing the macros as an input into all the assembly
+// jobs.
+void nacltools::AssemblerARM::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const toolchains::NaClToolChain &ToolChain =
+ static_cast<const toolchains::NaClToolChain &>(getToolChain());
+ InputInfo NaClMacros(ToolChain.GetNaClArmMacrosPath(), types::TY_PP_Asm,
+ "nacl-arm-macros.s");
+ InputInfoList NewInputs;
+ NewInputs.push_back(NaClMacros);
+ NewInputs.append(Inputs.begin(), Inputs.end());
+ gnutools::Assembler::ConstructJob(C, JA, Output, NewInputs, Args,
+ LinkingOutput);
+}
+
+// This is quite similar to gnutools::Linker::ConstructJob with changes that
+// we use static by default, do not yet support sanitizers or LTO, and a few
+// others. Eventually we can support more of that and hopefully migrate back
+// to gnutools::Linker.
+void nacltools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+
+ const toolchains::NaClToolChain &ToolChain =
+ static_cast<const toolchains::NaClToolChain &>(getToolChain());
+ const Driver &D = ToolChain.getDriver();
+ const llvm::Triple::ArchType Arch = ToolChain.getArch();
+ const bool IsStatic =
+ !Args.hasArg(options::OPT_dynamic) && !Args.hasArg(options::OPT_shared);
+
+ ArgStringList CmdArgs;
+
+ // Silence warning for "clang -g foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ // and "clang -emit-llvm foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_emit_llvm);
+ // and for "clang -w foo.o -o foo". Other warning options are already
+ // handled somewhere else.
+ Args.ClaimAllArgs(options::OPT_w);
+
+ if (!D.SysRoot.empty())
+ CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
+
+ if (Args.hasArg(options::OPT_rdynamic))
+ CmdArgs.push_back("-export-dynamic");
+
+ if (Args.hasArg(options::OPT_s))
+ CmdArgs.push_back("-s");
+
+ // NaClToolChain doesn't have ExtraOpts like Linux; the only relevant flag
+ // from there is --build-id, which we do want.
+ CmdArgs.push_back("--build-id");
+
+ if (!IsStatic)
+ CmdArgs.push_back("--eh-frame-hdr");
+
+ CmdArgs.push_back("-m");
+ if (Arch == llvm::Triple::x86)
+ CmdArgs.push_back("elf_i386_nacl");
+ else if (Arch == llvm::Triple::arm)
+ CmdArgs.push_back("armelf_nacl");
+ else if (Arch == llvm::Triple::x86_64)
+ CmdArgs.push_back("elf_x86_64_nacl");
+ else if (Arch == llvm::Triple::mipsel)
+ CmdArgs.push_back("mipselelf_nacl");
+ else
+ D.Diag(diag::err_target_unsupported_arch) << ToolChain.getArchName()
+ << "Native Client";
+
+ if (IsStatic)
+ CmdArgs.push_back("-static");
+ else if (Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back("-shared");
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt1.o")));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o")));
+
+ const char *crtbegin;
+ if (IsStatic)
+ crtbegin = "crtbeginT.o";
+ else if (Args.hasArg(options::OPT_shared))
+ crtbegin = "crtbeginS.o";
+ else
+ crtbegin = "crtbegin.o";
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin)));
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ Args.AddAllArgs(CmdArgs, options::OPT_u);
+
+ ToolChain.AddFilePathLibArgs(Args, CmdArgs);
+
+ if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
+ CmdArgs.push_back("--no-demangle");
+
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
+
+ if (D.CCCIsCXX() &&
+ !Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ bool OnlyLibstdcxxStatic =
+ Args.hasArg(options::OPT_static_libstdcxx) && !IsStatic;
+ if (OnlyLibstdcxxStatic)
+ CmdArgs.push_back("-Bstatic");
+ ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
+ if (OnlyLibstdcxxStatic)
+ CmdArgs.push_back("-Bdynamic");
+ CmdArgs.push_back("-lm");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib)) {
+ if (!Args.hasArg(options::OPT_nodefaultlibs)) {
+ // Always use groups, since it has no effect on dynamic libraries.
+ CmdArgs.push_back("--start-group");
+ CmdArgs.push_back("-lc");
+ // NaCl's libc++ currently requires libpthread, so just always include it
+ // in the group for C++.
+ if (Args.hasArg(options::OPT_pthread) ||
+ Args.hasArg(options::OPT_pthreads) || D.CCCIsCXX()) {
+ // Gold, used by Mips, handles nested groups differently than ld, and
+ // without '-lnacl' it prefers symbols from libpthread.a over libnacl.a,
+ // which is not a desired behaviour here.
+ // See https://sourceware.org/ml/binutils/2015-03/msg00034.html
+ if (getToolChain().getArch() == llvm::Triple::mipsel)
+ CmdArgs.push_back("-lnacl");
+
+ CmdArgs.push_back("-lpthread");
+ }
+
+ CmdArgs.push_back("-lgcc");
+ CmdArgs.push_back("--as-needed");
+ if (IsStatic)
+ CmdArgs.push_back("-lgcc_eh");
+ else
+ CmdArgs.push_back("-lgcc_s");
+ CmdArgs.push_back("--no-as-needed");
+
+ // Mips needs to create and use pnacl_legacy library that contains
+ // definitions from bitcode/pnaclmm.c and definitions for
+ // __nacl_tp_tls_offset() and __nacl_tp_tdb_offset().
+ if (getToolChain().getArch() == llvm::Triple::mipsel)
+ CmdArgs.push_back("-lpnacl_legacy");
+
+ CmdArgs.push_back("--end-group");
+ }
+
+ if (!Args.hasArg(options::OPT_nostartfiles)) {
+ const char *crtend;
+ if (Args.hasArg(options::OPT_shared))
+ crtend = "crtendS.o";
+ else
+ crtend = "crtend.o";
+
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtend)));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
+ }
+ }
+
+ const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+void minix::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ claimNoWarnArgs(Args);
+ ArgStringList CmdArgs;
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ for (const auto &II : Inputs)
+ CmdArgs.push_back(II.getFilename());
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+void minix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getDriver();
+ ArgStringList CmdArgs;
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crt1.o")));
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crti.o")));
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crtbegin.o")));
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crtn.o")));
+ }
+
+ Args.AddAllArgs(CmdArgs,
+ {options::OPT_L, options::OPT_T_Group, options::OPT_e});
+
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+
+ getToolChain().addProfileRTLibs(Args, CmdArgs);
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ if (D.CCCIsCXX()) {
+ getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
+ CmdArgs.push_back("-lm");
+ }
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (Args.hasArg(options::OPT_pthread))
+ CmdArgs.push_back("-lpthread");
+ CmdArgs.push_back("-lc");
+ CmdArgs.push_back("-lCompilerRT-Generic");
+ CmdArgs.push_back("-L/usr/pkg/compiler-rt/lib");
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crtend.o")));
+ }
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+/// DragonFly Tools
+
+// For now, DragonFly Assemble does just about the same as for
+// FreeBSD, but this may change soon.
+void dragonfly::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ claimNoWarnArgs(Args);
+ ArgStringList CmdArgs;
+
+ // When building 32-bit code on DragonFly/pc64, we have to explicitly
+ // instruct as in the base system to assemble 32-bit code.
+ if (getToolChain().getArch() == llvm::Triple::x86)
+ CmdArgs.push_back("--32");
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ for (const auto &II : Inputs)
+ CmdArgs.push_back(II.getFilename());
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+void dragonfly::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getDriver();
+ ArgStringList CmdArgs;
+
+ if (!D.SysRoot.empty())
+ CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
+
+ CmdArgs.push_back("--eh-frame-hdr");
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-Bstatic");
+ } else {
+ if (Args.hasArg(options::OPT_rdynamic))
+ CmdArgs.push_back("-export-dynamic");
+ if (Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back("-Bshareable");
+ else {
+ CmdArgs.push_back("-dynamic-linker");
+ CmdArgs.push_back("/usr/libexec/ld-elf.so.2");
+ }
+ CmdArgs.push_back("--hash-style=gnu");
+ CmdArgs.push_back("--enable-new-dtags");
+ }
+
+ // When building 32-bit code on DragonFly/pc64, we have to explicitly
+ // instruct ld in the base system to link 32-bit code.
+ if (getToolChain().getArch() == llvm::Triple::x86) {
+ CmdArgs.push_back("-m");
+ CmdArgs.push_back("elf_i386");
+ }
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared)) {
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("gcrt1.o")));
+ else {
+ if (Args.hasArg(options::OPT_pie))
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("Scrt1.o")));
+ else
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crt1.o")));
+ }
+ }
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crti.o")));
+ if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie))
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crtbeginS.o")));
+ else
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crtbegin.o")));
+ }
+
+ Args.AddAllArgs(CmdArgs,
+ {options::OPT_L, options::OPT_T_Group, options::OPT_e});
+
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ CmdArgs.push_back("-L/usr/lib/gcc50");
+
+ if (!Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-rpath");
+ CmdArgs.push_back("/usr/lib/gcc50");
+ }
+
+ if (D.CCCIsCXX()) {
+ getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
+ CmdArgs.push_back("-lm");
+ }
+
+ if (Args.hasArg(options::OPT_pthread))
+ CmdArgs.push_back("-lpthread");
+
+ if (!Args.hasArg(options::OPT_nolibc)) {
+ CmdArgs.push_back("-lc");
+ }
+
+ if (Args.hasArg(options::OPT_static) ||
+ Args.hasArg(options::OPT_static_libgcc)) {
+ CmdArgs.push_back("-lgcc");
+ CmdArgs.push_back("-lgcc_eh");
+ } else {
+ if (Args.hasArg(options::OPT_shared_libgcc)) {
+ CmdArgs.push_back("-lgcc_pic");
+ if (!Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back("-lgcc");
+ } else {
+ CmdArgs.push_back("-lgcc");
+ CmdArgs.push_back("--as-needed");
+ CmdArgs.push_back("-lgcc_pic");
+ CmdArgs.push_back("--no-as-needed");
+ }
+ }
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie))
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crtendS.o")));
+ else
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crtend.o")));
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crtn.o")));
+ }
+
+ getToolChain().addProfileRTLibs(Args, CmdArgs);
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+// Try to find Exe from a Visual Studio distribution. This first tries to find
+// an installed copy of Visual Studio and, failing that, looks in the PATH,
+// making sure that whatever executable that's found is not a same-named exe
+// from clang itself to prevent clang from falling back to itself.
+static std::string FindVisualStudioExecutable(const ToolChain &TC,
+ const char *Exe,
+ const char *ClangProgramPath) {
+ const auto &MSVC = static_cast<const toolchains::MSVCToolChain &>(TC);
+ std::string visualStudioBinDir;
+ if (MSVC.getVisualStudioBinariesFolder(ClangProgramPath,
+ visualStudioBinDir)) {
+ SmallString<128> FilePath(visualStudioBinDir);
+ llvm::sys::path::append(FilePath, Exe);
+ if (llvm::sys::fs::can_execute(FilePath.c_str()))
+ return FilePath.str();
+ }
+
+ return Exe;
+}
+
+void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+ const ToolChain &TC = getToolChain();
+
+ assert((Output.isFilename() || Output.isNothing()) && "invalid output");
+ if (Output.isFilename())
+ CmdArgs.push_back(
+ Args.MakeArgString(std::string("-out:") + Output.getFilename()));
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles) &&
+ !C.getDriver().IsCLMode())
+ CmdArgs.push_back("-defaultlib:libcmt");
+
+ if (!llvm::sys::Process::GetEnv("LIB")) {
+ // If the VC environment hasn't been configured (perhaps because the user
+ // did not run vcvarsall), try to build a consistent link environment. If
+ // the environment variable is set however, assume the user knows what
+ // they're doing.
+ std::string VisualStudioDir;
+ const auto &MSVC = static_cast<const toolchains::MSVCToolChain &>(TC);
+ if (MSVC.getVisualStudioInstallDir(VisualStudioDir)) {
+ SmallString<128> LibDir(VisualStudioDir);
+ llvm::sys::path::append(LibDir, "VC", "lib");
+ switch (MSVC.getArch()) {
+ case llvm::Triple::x86:
+ // x86 just puts the libraries directly in lib
+ break;
+ case llvm::Triple::x86_64:
+ llvm::sys::path::append(LibDir, "amd64");
+ break;
+ case llvm::Triple::arm:
+ llvm::sys::path::append(LibDir, "arm");
+ break;
+ default:
+ break;
+ }
+ CmdArgs.push_back(
+ Args.MakeArgString(std::string("-libpath:") + LibDir.c_str()));
+
+ if (MSVC.useUniversalCRT(VisualStudioDir)) {
+ std::string UniversalCRTLibPath;
+ if (MSVC.getUniversalCRTLibraryPath(UniversalCRTLibPath))
+ CmdArgs.push_back(Args.MakeArgString(std::string("-libpath:") +
+ UniversalCRTLibPath.c_str()));
+ }
+ }
+
+ std::string WindowsSdkLibPath;
+ if (MSVC.getWindowsSDKLibraryPath(WindowsSdkLibPath))
+ CmdArgs.push_back(Args.MakeArgString(std::string("-libpath:") +
+ WindowsSdkLibPath.c_str()));
+ }
+
+ CmdArgs.push_back("-nologo");
+
+ if (Args.hasArg(options::OPT_g_Group, options::OPT__SLASH_Z7))
+ CmdArgs.push_back("-debug");
+
+ bool DLL = Args.hasArg(options::OPT__SLASH_LD, options::OPT__SLASH_LDd,
+ options::OPT_shared);
+ if (DLL) {
+ CmdArgs.push_back(Args.MakeArgString("-dll"));
+
+ SmallString<128> ImplibName(Output.getFilename());
+ llvm::sys::path::replace_extension(ImplibName, "lib");
+ CmdArgs.push_back(Args.MakeArgString(std::string("-implib:") + ImplibName));
+ }
+
+ if (TC.getSanitizerArgs().needsAsanRt()) {
+ CmdArgs.push_back(Args.MakeArgString("-debug"));
+ CmdArgs.push_back(Args.MakeArgString("-incremental:no"));
+ if (Args.hasArg(options::OPT__SLASH_MD, options::OPT__SLASH_MDd)) {
+ for (const auto &Lib : {"asan_dynamic", "asan_dynamic_runtime_thunk"})
+ CmdArgs.push_back(TC.getCompilerRTArgString(Args, Lib));
+ // Make sure the dynamic runtime thunk is not optimized out at link time
+ // to ensure proper SEH handling.
+ CmdArgs.push_back(Args.MakeArgString("-include:___asan_seh_interceptor"));
+ } else if (DLL) {
+ CmdArgs.push_back(TC.getCompilerRTArgString(Args, "asan_dll_thunk"));
+ } else {
+ for (const auto &Lib : {"asan", "asan_cxx"})
+ CmdArgs.push_back(TC.getCompilerRTArgString(Args, Lib));
+ }
+ }
+
+ Args.AddAllArgValues(CmdArgs, options::OPT__SLASH_link);
+
+ if (Args.hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ,
+ options::OPT_fno_openmp, false)) {
+ CmdArgs.push_back("-nodefaultlib:vcomp.lib");
+ CmdArgs.push_back("-nodefaultlib:vcompd.lib");
+ CmdArgs.push_back(Args.MakeArgString(std::string("-libpath:") +
+ TC.getDriver().Dir + "/../lib"));
+ switch (getOpenMPRuntime(getToolChain(), Args)) {
+ case OMPRT_OMP:
+ CmdArgs.push_back("-defaultlib:libomp.lib");
+ break;
+ case OMPRT_IOMP5:
+ CmdArgs.push_back("-defaultlib:libiomp5md.lib");
+ break;
+ case OMPRT_GOMP:
+ break;
+ case OMPRT_Unknown:
+ // Already diagnosed.
+ break;
+ }
+ }
+
+ // Add filenames, libraries, and other linker inputs.
+ for (const auto &Input : Inputs) {
+ if (Input.isFilename()) {
+ CmdArgs.push_back(Input.getFilename());
+ continue;
+ }
+
+ const Arg &A = Input.getInputArg();
+
+ // Render -l options differently for the MSVC linker.
+ if (A.getOption().matches(options::OPT_l)) {
+ StringRef Lib = A.getValue();
+ const char *LinkLibArg;
+ if (Lib.endswith(".lib"))
+ LinkLibArg = Args.MakeArgString(Lib);
+ else
+ LinkLibArg = Args.MakeArgString(Lib + ".lib");
+ CmdArgs.push_back(LinkLibArg);
+ continue;
+ }
+
+ // Otherwise, this is some other kind of linker input option like -Wl, -z,
+ // or -L. Render it, even if MSVC doesn't understand it.
+ A.renderAsInput(Args, CmdArgs);
+ }
+
+ TC.addProfileRTLibs(Args, CmdArgs);
+
+ // We need to special case some linker paths. In the case of lld, we need to
+ // translate 'lld' into 'lld-link', and in the case of the regular msvc
+ // linker, we need to use a special search algorithm.
+ llvm::SmallString<128> linkPath;
+ StringRef Linker = Args.getLastArgValue(options::OPT_fuse_ld_EQ, "link");
+ if (Linker.equals_lower("lld"))
+ Linker = "lld-link";
+
+ if (Linker.equals_lower("link")) {
+ // If we're using the MSVC linker, it's not sufficient to just use link
+ // from the program PATH, because other environments like GnuWin32 install
+ // their own link.exe which may come first.
+ linkPath = FindVisualStudioExecutable(TC, "link.exe",
+ C.getDriver().getClangProgramPath());
+ } else {
+ linkPath = Linker;
+ llvm::sys::path::replace_extension(linkPath, "exe");
+ linkPath = TC.GetProgramPath(linkPath.c_str());
+ }
+
+ const char *Exec = Args.MakeArgString(linkPath);
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+void visualstudio::Compiler::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ C.addCommand(GetCommand(C, JA, Output, Inputs, Args, LinkingOutput));
+}
+
+std::unique_ptr<Command> visualstudio::Compiler::GetCommand(
+ Compilation &C, const JobAction &JA, const InputInfo &Output,
+ const InputInfoList &Inputs, const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+ CmdArgs.push_back("/nologo");
+ CmdArgs.push_back("/c"); // Compile only.
+ CmdArgs.push_back("/W0"); // No warnings.
+
+ // The goal is to be able to invoke this tool correctly based on
+ // any flag accepted by clang-cl.
+
+ // These are spelled the same way in clang and cl.exe,.
+ Args.AddAllArgs(CmdArgs, {options::OPT_D, options::OPT_U, options::OPT_I});
+
+ // Optimization level.
+ if (Arg *A = Args.getLastArg(options::OPT_fbuiltin, options::OPT_fno_builtin))
+ CmdArgs.push_back(A->getOption().getID() == options::OPT_fbuiltin ? "/Oi"
+ : "/Oi-");
+ if (Arg *A = Args.getLastArg(options::OPT_O, options::OPT_O0)) {
+ if (A->getOption().getID() == options::OPT_O0) {
+ CmdArgs.push_back("/Od");
+ } else {
+ CmdArgs.push_back("/Og");
+
+ StringRef OptLevel = A->getValue();
+ if (OptLevel == "s" || OptLevel == "z")
+ CmdArgs.push_back("/Os");
+ else
+ CmdArgs.push_back("/Ot");
+
+ CmdArgs.push_back("/Ob2");
+ }
+ }
+ if (Arg *A = Args.getLastArg(options::OPT_fomit_frame_pointer,
+ options::OPT_fno_omit_frame_pointer))
+ CmdArgs.push_back(A->getOption().getID() == options::OPT_fomit_frame_pointer
+ ? "/Oy"
+ : "/Oy-");
+ if (!Args.hasArg(options::OPT_fwritable_strings))
+ CmdArgs.push_back("/GF");
+
+ // Flags for which clang-cl has an alias.
+ // FIXME: How can we ensure this stays in sync with relevant clang-cl options?
+
+ if (Args.hasFlag(options::OPT__SLASH_GR_, options::OPT__SLASH_GR,
+ /*default=*/false))
+ CmdArgs.push_back("/GR-");
+ if (Arg *A = Args.getLastArg(options::OPT_ffunction_sections,
+ options::OPT_fno_function_sections))
+ CmdArgs.push_back(A->getOption().getID() == options::OPT_ffunction_sections
+ ? "/Gy"
+ : "/Gy-");
+ if (Arg *A = Args.getLastArg(options::OPT_fdata_sections,
+ options::OPT_fno_data_sections))
+ CmdArgs.push_back(
+ A->getOption().getID() == options::OPT_fdata_sections ? "/Gw" : "/Gw-");
+ if (Args.hasArg(options::OPT_fsyntax_only))
+ CmdArgs.push_back("/Zs");
+ if (Args.hasArg(options::OPT_g_Flag, options::OPT_gline_tables_only,
+ options::OPT__SLASH_Z7))
+ CmdArgs.push_back("/Z7");
+
+ std::vector<std::string> Includes =
+ Args.getAllArgValues(options::OPT_include);
+ for (const auto &Include : Includes)
+ CmdArgs.push_back(Args.MakeArgString(std::string("/FI") + Include));
+
+ // Flags that can simply be passed through.
+ Args.AddAllArgs(CmdArgs, options::OPT__SLASH_LD);
+ Args.AddAllArgs(CmdArgs, options::OPT__SLASH_LDd);
+ Args.AddAllArgs(CmdArgs, options::OPT__SLASH_EH);
+ Args.AddAllArgs(CmdArgs, options::OPT__SLASH_Zl);
+
+ // The order of these flags is relevant, so pick the last one.
+ if (Arg *A = Args.getLastArg(options::OPT__SLASH_MD, options::OPT__SLASH_MDd,
+ options::OPT__SLASH_MT, options::OPT__SLASH_MTd))
+ A->render(Args, CmdArgs);
+
+ // Input filename.
+ assert(Inputs.size() == 1);
+ const InputInfo &II = Inputs[0];
+ assert(II.getType() == types::TY_C || II.getType() == types::TY_CXX);
+ CmdArgs.push_back(II.getType() == types::TY_C ? "/Tc" : "/Tp");
+ if (II.isFilename())
+ CmdArgs.push_back(II.getFilename());
+ else
+ II.getInputArg().renderAsInput(Args, CmdArgs);
+
+ // Output filename.
+ assert(Output.getType() == types::TY_Object);
+ const char *Fo =
+ Args.MakeArgString(std::string("/Fo") + Output.getFilename());
+ CmdArgs.push_back(Fo);
+
+ const Driver &D = getToolChain().getDriver();
+ std::string Exec = FindVisualStudioExecutable(getToolChain(), "cl.exe",
+ D.getClangProgramPath());
+ return llvm::make_unique<Command>(JA, *this, Args.MakeArgString(Exec),
+ CmdArgs, Inputs);
+}
+
+/// MinGW Tools
+void MinGW::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ claimNoWarnArgs(Args);
+ ArgStringList CmdArgs;
+
+ if (getToolChain().getArch() == llvm::Triple::x86) {
+ CmdArgs.push_back("--32");
+ } else if (getToolChain().getArch() == llvm::Triple::x86_64) {
+ CmdArgs.push_back("--64");
+ }
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ for (const auto &II : Inputs)
+ CmdArgs.push_back(II.getFilename());
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+
+ if (Args.hasArg(options::OPT_gsplit_dwarf))
+ SplitDebugInfo(getToolChain(), C, *this, JA, Args, Output,
+ SplitDebugName(Args, Inputs[0]));
+}
+
+void MinGW::Linker::AddLibGCC(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ if (Args.hasArg(options::OPT_mthreads))
+ CmdArgs.push_back("-lmingwthrd");
+ CmdArgs.push_back("-lmingw32");
+
+ // Make use of compiler-rt if --rtlib option is used
+ ToolChain::RuntimeLibType RLT = getToolChain().GetRuntimeLibType(Args);
+ if (RLT == ToolChain::RLT_Libgcc) {
+ bool Static = Args.hasArg(options::OPT_static_libgcc) ||
+ Args.hasArg(options::OPT_static);
+ bool Shared = Args.hasArg(options::OPT_shared);
+ bool CXX = getToolChain().getDriver().CCCIsCXX();
+
+ if (Static || (!CXX && !Shared)) {
+ CmdArgs.push_back("-lgcc");
+ CmdArgs.push_back("-lgcc_eh");
+ } else {
+ CmdArgs.push_back("-lgcc_s");
+ CmdArgs.push_back("-lgcc");
+ }
+ } else {
+ AddRunTimeLibs(getToolChain(), getToolChain().getDriver(), CmdArgs, Args);
+ }
+
+ CmdArgs.push_back("-lmoldname");
+ CmdArgs.push_back("-lmingwex");
+ CmdArgs.push_back("-lmsvcrt");
+}
+
+void MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const ToolChain &TC = getToolChain();
+ const Driver &D = TC.getDriver();
+ // const SanitizerArgs &Sanitize = TC.getSanitizerArgs();
+
+ ArgStringList CmdArgs;
+
+ // Silence warning for "clang -g foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ // and "clang -emit-llvm foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_emit_llvm);
+ // and for "clang -w foo.o -o foo". Other warning options are already
+ // handled somewhere else.
+ Args.ClaimAllArgs(options::OPT_w);
+
+ StringRef LinkerName = Args.getLastArgValue(options::OPT_fuse_ld_EQ, "ld");
+ if (LinkerName.equals_lower("lld")) {
+ CmdArgs.push_back("-flavor");
+ CmdArgs.push_back("gnu");
+ } else if (!LinkerName.equals_lower("ld")) {
+ D.Diag(diag::err_drv_unsupported_linker) << LinkerName;
+ }
+
+ if (!D.SysRoot.empty())
+ CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
+
+ if (Args.hasArg(options::OPT_s))
+ CmdArgs.push_back("-s");
+
+ CmdArgs.push_back("-m");
+ if (TC.getArch() == llvm::Triple::x86)
+ CmdArgs.push_back("i386pe");
+ if (TC.getArch() == llvm::Triple::x86_64)
+ CmdArgs.push_back("i386pep");
+ if (TC.getArch() == llvm::Triple::arm)
+ CmdArgs.push_back("thumb2pe");
+
+ if (Args.hasArg(options::OPT_mwindows)) {
+ CmdArgs.push_back("--subsystem");
+ CmdArgs.push_back("windows");
+ } else if (Args.hasArg(options::OPT_mconsole)) {
+ CmdArgs.push_back("--subsystem");
+ CmdArgs.push_back("console");
+ }
+
+ if (Args.hasArg(options::OPT_static))
+ CmdArgs.push_back("-Bstatic");
+ else {
+ if (Args.hasArg(options::OPT_mdll))
+ CmdArgs.push_back("--dll");
+ else if (Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back("--shared");
+ CmdArgs.push_back("-Bdynamic");
+ if (Args.hasArg(options::OPT_mdll) || Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-e");
+ if (TC.getArch() == llvm::Triple::x86)
+ CmdArgs.push_back("_DllMainCRTStartup@12");
+ else
+ CmdArgs.push_back("DllMainCRTStartup");
+ CmdArgs.push_back("--enable-auto-image-base");
+ }
+ }
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ Args.AddAllArgs(CmdArgs, options::OPT_e);
+ // FIXME: add -N, -n flags
+ Args.AddLastArg(CmdArgs, options::OPT_r);
+ Args.AddLastArg(CmdArgs, options::OPT_s);
+ Args.AddLastArg(CmdArgs, options::OPT_t);
+ Args.AddAllArgs(CmdArgs, options::OPT_u_Group);
+ Args.AddLastArg(CmdArgs, options::OPT_Z_Flag);
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_mdll)) {
+ CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("dllcrt2.o")));
+ } else {
+ if (Args.hasArg(options::OPT_municode))
+ CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("crt2u.o")));
+ else
+ CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("crt2.o")));
+ }
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("gcrt2.o")));
+ CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("crtbegin.o")));
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ TC.AddFilePathLibArgs(Args, CmdArgs);
+ AddLinkerInputs(TC, Inputs, Args, CmdArgs);
+
+ // TODO: Add ASan stuff here
+
+ // TODO: Add profile stuff here
+
+ if (D.CCCIsCXX() &&
+ !Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ bool OnlyLibstdcxxStatic = Args.hasArg(options::OPT_static_libstdcxx) &&
+ !Args.hasArg(options::OPT_static);
+ if (OnlyLibstdcxxStatic)
+ CmdArgs.push_back("-Bstatic");
+ TC.AddCXXStdlibLibArgs(Args, CmdArgs);
+ if (OnlyLibstdcxxStatic)
+ CmdArgs.push_back("-Bdynamic");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib)) {
+ if (!Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (Args.hasArg(options::OPT_static))
+ CmdArgs.push_back("--start-group");
+
+ if (Args.hasArg(options::OPT_fstack_protector) ||
+ Args.hasArg(options::OPT_fstack_protector_strong) ||
+ Args.hasArg(options::OPT_fstack_protector_all)) {
+ CmdArgs.push_back("-lssp_nonshared");
+ CmdArgs.push_back("-lssp");
+ }
+ if (Args.hasArg(options::OPT_fopenmp))
+ CmdArgs.push_back("-lgomp");
+
+ AddLibGCC(Args, CmdArgs);
+
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lgmon");
+
+ if (Args.hasArg(options::OPT_pthread))
+ CmdArgs.push_back("-lpthread");
+
+ // add system libraries
+ if (Args.hasArg(options::OPT_mwindows)) {
+ CmdArgs.push_back("-lgdi32");
+ CmdArgs.push_back("-lcomdlg32");
+ }
+ CmdArgs.push_back("-ladvapi32");
+ CmdArgs.push_back("-lshell32");
+ CmdArgs.push_back("-luser32");
+ CmdArgs.push_back("-lkernel32");
+
+ if (Args.hasArg(options::OPT_static))
+ CmdArgs.push_back("--end-group");
+ else if (!LinkerName.equals_lower("lld"))
+ AddLibGCC(Args, CmdArgs);
+ }
+
+ if (!Args.hasArg(options::OPT_nostartfiles)) {
+ // Add crtfastmath.o if available and fast math is enabled.
+ TC.AddFastMathRuntimeIfAvailable(Args, CmdArgs);
+
+ CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("crtend.o")));
+ }
+ }
+ const char *Exec = Args.MakeArgString(TC.GetProgramPath(LinkerName.data()));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+/// XCore Tools
+// We pass assemble and link construction to the xcc tool.
+
+void XCore::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ claimNoWarnArgs(Args);
+ ArgStringList CmdArgs;
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ CmdArgs.push_back("-c");
+
+ if (Args.hasArg(options::OPT_v))
+ CmdArgs.push_back("-v");
+
+ if (Arg *A = Args.getLastArg(options::OPT_g_Group))
+ if (!A->getOption().matches(options::OPT_g0))
+ CmdArgs.push_back("-g");
+
+ if (Args.hasFlag(options::OPT_fverbose_asm, options::OPT_fno_verbose_asm,
+ false))
+ CmdArgs.push_back("-fverbose-asm");
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
+
+ for (const auto &II : Inputs)
+ CmdArgs.push_back(II.getFilename());
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("xcc"));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+void XCore::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ if (Args.hasArg(options::OPT_v))
+ CmdArgs.push_back("-v");
+
+ // Pass -fexceptions through to the linker if it was present.
+ if (Args.hasFlag(options::OPT_fexceptions, options::OPT_fno_exceptions,
+ false))
+ CmdArgs.push_back("-fexceptions");
+
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("xcc"));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+void CrossWindows::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ claimNoWarnArgs(Args);
+ const auto &TC =
+ static_cast<const toolchains::CrossWindowsToolChain &>(getToolChain());
+ ArgStringList CmdArgs;
+ const char *Exec;
+
+ switch (TC.getArch()) {
+ default:
+ llvm_unreachable("unsupported architecture");
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ break;
+ case llvm::Triple::x86:
+ CmdArgs.push_back("--32");
+ break;
+ case llvm::Triple::x86_64:
+ CmdArgs.push_back("--64");
+ break;
+ }
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ for (const auto &Input : Inputs)
+ CmdArgs.push_back(Input.getFilename());
+
+ const std::string Assembler = TC.GetProgramPath("as");
+ Exec = Args.MakeArgString(Assembler);
+
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+void CrossWindows::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const auto &TC =
+ static_cast<const toolchains::CrossWindowsToolChain &>(getToolChain());
+ const llvm::Triple &T = TC.getTriple();
+ const Driver &D = TC.getDriver();
+ SmallString<128> EntryPoint;
+ ArgStringList CmdArgs;
+ const char *Exec;
+
+ // Silence warning for "clang -g foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ // and "clang -emit-llvm foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_emit_llvm);
+ // and for "clang -w foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_w);
+ // Other warning options are already handled somewhere else.
+
+ if (!D.SysRoot.empty())
+ CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
+
+ if (Args.hasArg(options::OPT_pie))
+ CmdArgs.push_back("-pie");
+ if (Args.hasArg(options::OPT_rdynamic))
+ CmdArgs.push_back("-export-dynamic");
+ if (Args.hasArg(options::OPT_s))
+ CmdArgs.push_back("--strip-all");
+
+ CmdArgs.push_back("-m");
+ switch (TC.getArch()) {
+ default:
+ llvm_unreachable("unsupported architecture");
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ // FIXME: this is incorrect for WinCE
+ CmdArgs.push_back("thumb2pe");
+ break;
+ case llvm::Triple::x86:
+ CmdArgs.push_back("i386pe");
+ EntryPoint.append("_");
+ break;
+ case llvm::Triple::x86_64:
+ CmdArgs.push_back("i386pep");
+ break;
+ }
+
+ if (Args.hasArg(options::OPT_shared)) {
+ switch (T.getArch()) {
+ default:
+ llvm_unreachable("unsupported architecture");
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ case llvm::Triple::x86_64:
+ EntryPoint.append("_DllMainCRTStartup");
+ break;
+ case llvm::Triple::x86:
+ EntryPoint.append("_DllMainCRTStartup@12");
+ break;
+ }
+
+ CmdArgs.push_back("-shared");
+ CmdArgs.push_back("-Bdynamic");
+
+ CmdArgs.push_back("--enable-auto-image-base");
+
+ CmdArgs.push_back("--entry");
+ CmdArgs.push_back(Args.MakeArgString(EntryPoint));
+ } else {
+ EntryPoint.append("mainCRTStartup");
+
+ CmdArgs.push_back(Args.hasArg(options::OPT_static) ? "-Bstatic"
+ : "-Bdynamic");
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ CmdArgs.push_back("--entry");
+ CmdArgs.push_back(Args.MakeArgString(EntryPoint));
+ }
+
+ // FIXME: handle subsystem
+ }
+
+ // NOTE: deal with multiple definitions on Windows (e.g. COMDAT)
+ CmdArgs.push_back("--allow-multiple-definition");
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_rdynamic)) {
+ SmallString<261> ImpLib(Output.getFilename());
+ llvm::sys::path::replace_extension(ImpLib, ".lib");
+
+ CmdArgs.push_back("--out-implib");
+ CmdArgs.push_back(Args.MakeArgString(ImpLib));
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ const std::string CRTPath(D.SysRoot + "/usr/lib/");
+ const char *CRTBegin;
+
+ CRTBegin =
+ Args.hasArg(options::OPT_shared) ? "crtbeginS.obj" : "crtbegin.obj";
+ CmdArgs.push_back(Args.MakeArgString(CRTPath + CRTBegin));
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ TC.AddFilePathLibArgs(Args, CmdArgs);
+ AddLinkerInputs(TC, Inputs, Args, CmdArgs);
+
+ if (D.CCCIsCXX() && !Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nodefaultlibs)) {
+ bool StaticCXX = Args.hasArg(options::OPT_static_libstdcxx) &&
+ !Args.hasArg(options::OPT_static);
+ if (StaticCXX)
+ CmdArgs.push_back("-Bstatic");
+ TC.AddCXXStdlibLibArgs(Args, CmdArgs);
+ if (StaticCXX)
+ CmdArgs.push_back("-Bdynamic");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib)) {
+ if (!Args.hasArg(options::OPT_nodefaultlibs)) {
+ // TODO handle /MT[d] /MD[d]
+ CmdArgs.push_back("-lmsvcrt");
+ AddRunTimeLibs(TC, D, CmdArgs, Args);
+ }
+ }
+
+ if (TC.getSanitizerArgs().needsAsanRt()) {
+ // TODO handle /MT[d] /MD[d]
+ if (Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back(TC.getCompilerRTArgString(Args, "asan_dll_thunk"));
+ } else {
+ for (const auto &Lib : {"asan_dynamic", "asan_dynamic_runtime_thunk"})
+ CmdArgs.push_back(TC.getCompilerRTArgString(Args, Lib));
+ // Make sure the dynamic runtime thunk is not optimized out at link time
+ // to ensure proper SEH handling.
+ CmdArgs.push_back(Args.MakeArgString("--undefined"));
+ CmdArgs.push_back(Args.MakeArgString(TC.getArch() == llvm::Triple::x86
+ ? "___asan_seh_interceptor"
+ : "__asan_seh_interceptor"));
+ }
+ }
+
+ Exec = Args.MakeArgString(TC.GetLinkerPath());
+
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+void tools::SHAVE::Compiler::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+ assert(Inputs.size() == 1);
+ const InputInfo &II = Inputs[0];
+ assert(II.getType() == types::TY_C || II.getType() == types::TY_CXX ||
+ II.getType() == types::TY_PP_CXX);
+
+ if (JA.getKind() == Action::PreprocessJobClass) {
+ Args.ClaimAllArgs();
+ CmdArgs.push_back("-E");
+ } else {
+ assert(Output.getType() == types::TY_PP_Asm); // Require preprocessed asm.
+ CmdArgs.push_back("-S");
+ CmdArgs.push_back("-fno-exceptions"); // Always do this even if unspecified.
+ }
+ CmdArgs.push_back("-mcpu=myriad2");
+ CmdArgs.push_back("-DMYRIAD2");
+
+ // Append all -I, -iquote, -isystem paths, defines/undefines,
+ // 'f' flags, optimize flags, and warning options.
+ // These are spelled the same way in clang and moviCompile.
+ Args.AddAllArgs(CmdArgs, {options::OPT_I_Group, options::OPT_clang_i_Group,
+ options::OPT_std_EQ, options::OPT_D, options::OPT_U,
+ options::OPT_f_Group, options::OPT_f_clang_Group,
+ options::OPT_g_Group, options::OPT_M_Group,
+ options::OPT_O_Group, options::OPT_W_Group});
+
+ // If we're producing a dependency file, and assembly is the final action,
+ // then the name of the target in the dependency file should be the '.o'
+ // file, not the '.s' file produced by this step. For example, instead of
+ // /tmp/mumble.s: mumble.c .../someheader.h
+ // the filename on the lefthand side should be "mumble.o"
+ if (Args.getLastArg(options::OPT_MF) && !Args.getLastArg(options::OPT_MT) &&
+ C.getActions().size() == 1 &&
+ C.getActions()[0]->getKind() == Action::AssembleJobClass) {
+ Arg *A = Args.getLastArg(options::OPT_o);
+ if (A) {
+ CmdArgs.push_back("-MT");
+ CmdArgs.push_back(Args.MakeArgString(A->getValue()));
+ }
+ }
+
+ CmdArgs.push_back(II.getFilename());
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ std::string Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("moviCompile"));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Args.MakeArgString(Exec),
+ CmdArgs, Inputs));
+}
+
+void tools::SHAVE::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ assert(Inputs.size() == 1);
+ const InputInfo &II = Inputs[0];
+ assert(II.getType() == types::TY_PP_Asm); // Require preprocessed asm input.
+ assert(Output.getType() == types::TY_Object);
+
+ CmdArgs.push_back("-no6thSlotCompression");
+ CmdArgs.push_back("-cv:myriad2"); // Chip Version
+ CmdArgs.push_back("-noSPrefixing");
+ CmdArgs.push_back("-a"); // Mystery option.
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
+ for (const Arg *A : Args.filtered(options::OPT_I, options::OPT_isystem)) {
+ A->claim();
+ CmdArgs.push_back(
+ Args.MakeArgString(std::string("-i:") + A->getValue(0)));
+ }
+ CmdArgs.push_back("-elf"); // Output format.
+ CmdArgs.push_back(II.getFilename());
+ CmdArgs.push_back(
+ Args.MakeArgString(std::string("-o:") + Output.getFilename()));
+
+ std::string Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("moviAsm"));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Args.MakeArgString(Exec),
+ CmdArgs, Inputs));
+}
+
+void tools::Myriad::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const auto &TC =
+ static_cast<const toolchains::MyriadToolChain &>(getToolChain());
+ const llvm::Triple &T = TC.getTriple();
+ ArgStringList CmdArgs;
+ bool UseStartfiles =
+ !Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles);
+ bool UseDefaultLibs =
+ !Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs);
+
+ if (T.getArch() == llvm::Triple::sparc)
+ CmdArgs.push_back("-EB");
+ else // SHAVE assumes little-endian, and sparcel is expressly so.
+ CmdArgs.push_back("-EL");
+
+ // The remaining logic is mostly like gnutools::Linker::ConstructJob,
+ // but we never pass through a --sysroot option and various other bits.
+ // For example, there are no sanitizers (yet) nor gold linker.
+
+ // Eat some arguments that may be present but have no effect.
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ Args.ClaimAllArgs(options::OPT_w);
+ Args.ClaimAllArgs(options::OPT_static_libgcc);
+
+ if (Args.hasArg(options::OPT_s)) // Pass the 'strip' option.
+ CmdArgs.push_back("-s");
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ if (UseStartfiles) {
+ // If you want startfiles, it means you want the builtin crti and crtbegin,
+ // but not crt0. Myriad link commands provide their own crt0.o as needed.
+ CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("crti.o")));
+ CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("crtbegin.o")));
+ }
+
+ Args.AddAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
+ options::OPT_e, options::OPT_s, options::OPT_t,
+ options::OPT_Z_Flag, options::OPT_r});
+
+ TC.AddFilePathLibArgs(Args, CmdArgs);
+
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+
+ if (UseDefaultLibs) {
+ if (C.getDriver().CCCIsCXX())
+ CmdArgs.push_back("-lstdc++");
+ if (T.getOS() == llvm::Triple::RTEMS) {
+ CmdArgs.push_back("--start-group");
+ CmdArgs.push_back("-lc");
+ // You must provide your own "-L" option to enable finding these.
+ CmdArgs.push_back("-lrtemscpu");
+ CmdArgs.push_back("-lrtemsbsp");
+ CmdArgs.push_back("--end-group");
+ } else {
+ CmdArgs.push_back("-lc");
+ }
+ CmdArgs.push_back("-lgcc");
+ }
+ if (UseStartfiles) {
+ CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("crtend.o")));
+ CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("crtn.o")));
+ }
+
+ std::string Exec =
+ Args.MakeArgString(TC.GetProgramPath("sparc-myriad-elf-ld"));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Args.MakeArgString(Exec),
+ CmdArgs, Inputs));
+}
+
+void PS4cpu::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ claimNoWarnArgs(Args);
+ ArgStringList CmdArgs;
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ assert(Inputs.size() == 1 && "Unexpected number of inputs.");
+ const InputInfo &Input = Inputs[0];
+ assert(Input.isFilename() && "Invalid input.");
+ CmdArgs.push_back(Input.getFilename());
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("ps4-as"));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+static void AddPS4SanitizerArgs(const ToolChain &TC, ArgStringList &CmdArgs) {
+ const SanitizerArgs &SanArgs = TC.getSanitizerArgs();
+ if (SanArgs.needsUbsanRt()) {
+ CmdArgs.push_back("-lSceDbgUBSanitizer_stub_weak");
+ }
+ if (SanArgs.needsAsanRt()) {
+ CmdArgs.push_back("-lSceDbgAddressSanitizer_stub_weak");
+ }
+}
+
+static void ConstructPS4LinkJob(const Tool &T, Compilation &C,
+ const JobAction &JA, const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) {
+ const toolchains::FreeBSD &ToolChain =
+ static_cast<const toolchains::FreeBSD &>(T.getToolChain());
+ const Driver &D = ToolChain.getDriver();
+ ArgStringList CmdArgs;
+
+ // Silence warning for "clang -g foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ // and "clang -emit-llvm foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_emit_llvm);
+ // and for "clang -w foo.o -o foo". Other warning options are already
+ // handled somewhere else.
+ Args.ClaimAllArgs(options::OPT_w);
+
+ if (!D.SysRoot.empty())
+ CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
+
+ if (Args.hasArg(options::OPT_pie))
+ CmdArgs.push_back("-pie");
+
+ if (Args.hasArg(options::OPT_rdynamic))
+ CmdArgs.push_back("-export-dynamic");
+ if (Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back("--oformat=so");
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ AddPS4SanitizerArgs(ToolChain, CmdArgs);
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
+ Args.AddAllArgs(CmdArgs, options::OPT_e);
+ Args.AddAllArgs(CmdArgs, options::OPT_s);
+ Args.AddAllArgs(CmdArgs, options::OPT_t);
+ Args.AddAllArgs(CmdArgs, options::OPT_r);
+
+ if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
+ CmdArgs.push_back("--no-demangle");
+
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
+
+ if (Args.hasArg(options::OPT_pthread)) {
+ CmdArgs.push_back("-lpthread");
+ }
+
+ const char *Exec = Args.MakeArgString(ToolChain.GetProgramPath("ps4-ld"));
+
+ C.addCommand(llvm::make_unique<Command>(JA, T, Exec, CmdArgs, Inputs));
+}
+
+static void ConstructGoldLinkJob(const Tool &T, Compilation &C,
+ const JobAction &JA, const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) {
+ const toolchains::FreeBSD &ToolChain =
+ static_cast<const toolchains::FreeBSD &>(T.getToolChain());
+ const Driver &D = ToolChain.getDriver();
+ ArgStringList CmdArgs;
+
+ // Silence warning for "clang -g foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ // and "clang -emit-llvm foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_emit_llvm);
+ // and for "clang -w foo.o -o foo". Other warning options are already
+ // handled somewhere else.
+ Args.ClaimAllArgs(options::OPT_w);
+
+ if (!D.SysRoot.empty())
+ CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
+
+ if (Args.hasArg(options::OPT_pie))
+ CmdArgs.push_back("-pie");
+
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-Bstatic");
+ } else {
+ if (Args.hasArg(options::OPT_rdynamic))
+ CmdArgs.push_back("-export-dynamic");
+ CmdArgs.push_back("--eh-frame-hdr");
+ if (Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-Bshareable");
+ } else {
+ CmdArgs.push_back("-dynamic-linker");
+ CmdArgs.push_back("/libexec/ld-elf.so.1");
+ }
+ CmdArgs.push_back("--enable-new-dtags");
+ }
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ AddPS4SanitizerArgs(ToolChain, CmdArgs);
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ const char *crt1 = nullptr;
+ if (!Args.hasArg(options::OPT_shared)) {
+ if (Args.hasArg(options::OPT_pg))
+ crt1 = "gcrt1.o";
+ else if (Args.hasArg(options::OPT_pie))
+ crt1 = "Scrt1.o";
+ else
+ crt1 = "crt1.o";
+ }
+ if (crt1)
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crt1)));
+
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o")));
+
+ const char *crtbegin = nullptr;
+ if (Args.hasArg(options::OPT_static))
+ crtbegin = "crtbeginT.o";
+ else if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie))
+ crtbegin = "crtbeginS.o";
+ else
+ crtbegin = "crtbegin.o";
+
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin)));
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ ToolChain.AddFilePathLibArgs(Args, CmdArgs);
+ Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
+ Args.AddAllArgs(CmdArgs, options::OPT_e);
+ Args.AddAllArgs(CmdArgs, options::OPT_s);
+ Args.AddAllArgs(CmdArgs, options::OPT_t);
+ Args.AddAllArgs(CmdArgs, options::OPT_r);
+
+ if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
+ CmdArgs.push_back("--no-demangle");
+
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ // For PS4, we always want to pass libm, libstdc++ and libkernel
+ // libraries for both C and C++ compilations.
+ CmdArgs.push_back("-lkernel");
+ if (D.CCCIsCXX()) {
+ ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lm_p");
+ else
+ CmdArgs.push_back("-lm");
+ }
+ // FIXME: For some reason GCC passes -lgcc and -lgcc_s before adding
+ // the default system libraries. Just mimic this for now.
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lgcc_p");
+ else
+ CmdArgs.push_back("-lcompiler_rt");
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-lstdc++");
+ } else if (Args.hasArg(options::OPT_pg)) {
+ CmdArgs.push_back("-lgcc_eh_p");
+ } else {
+ CmdArgs.push_back("--as-needed");
+ CmdArgs.push_back("-lstdc++");
+ CmdArgs.push_back("--no-as-needed");
+ }
+
+ if (Args.hasArg(options::OPT_pthread)) {
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lpthread_p");
+ else
+ CmdArgs.push_back("-lpthread");
+ }
+
+ if (Args.hasArg(options::OPT_pg)) {
+ if (Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back("-lc");
+ else {
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("--start-group");
+ CmdArgs.push_back("-lc_p");
+ CmdArgs.push_back("-lpthread_p");
+ CmdArgs.push_back("--end-group");
+ } else {
+ CmdArgs.push_back("-lc_p");
+ }
+ }
+ CmdArgs.push_back("-lgcc_p");
+ } else {
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("--start-group");
+ CmdArgs.push_back("-lc");
+ CmdArgs.push_back("-lpthread");
+ CmdArgs.push_back("--end-group");
+ } else {
+ CmdArgs.push_back("-lc");
+ }
+ CmdArgs.push_back("-lcompiler_rt");
+ }
+
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-lstdc++");
+ } else if (Args.hasArg(options::OPT_pg)) {
+ CmdArgs.push_back("-lgcc_eh_p");
+ } else {
+ CmdArgs.push_back("--as-needed");
+ CmdArgs.push_back("-lstdc++");
+ CmdArgs.push_back("--no-as-needed");
+ }
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie))
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtendS.o")));
+ else
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtend.o")));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
+ }
+
+ const char *Exec =
+#ifdef LLVM_ON_WIN32
+ Args.MakeArgString(ToolChain.GetProgramPath("ps4-ld.gold"));
+#else
+ Args.MakeArgString(ToolChain.GetProgramPath("ps4-ld"));
+#endif
+
+ C.addCommand(llvm::make_unique<Command>(JA, T, Exec, CmdArgs, Inputs));
+}
+
+void PS4cpu::Link::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const toolchains::FreeBSD &ToolChain =
+ static_cast<const toolchains::FreeBSD &>(getToolChain());
+ const Driver &D = ToolChain.getDriver();
+ bool PS4Linker;
+ StringRef LinkerOptName;
+ if (const Arg *A = Args.getLastArg(options::OPT_fuse_ld_EQ)) {
+ LinkerOptName = A->getValue();
+ if (LinkerOptName != "ps4" && LinkerOptName != "gold")
+ D.Diag(diag::err_drv_unsupported_linker) << LinkerOptName;
+ }
+
+ if (LinkerOptName == "gold")
+ PS4Linker = false;
+ else if (LinkerOptName == "ps4")
+ PS4Linker = true;
+ else
+ PS4Linker = !Args.hasArg(options::OPT_shared);
+
+ if (PS4Linker)
+ ConstructPS4LinkJob(*this, C, JA, Output, Inputs, Args, LinkingOutput);
+ else
+ ConstructGoldLinkJob(*this, C, JA, Output, Inputs, Args, LinkingOutput);
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/Tools.h b/contrib/llvm/tools/clang/lib/Driver/Tools.h
new file mode 100644
index 0000000..168662f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/Tools.h
@@ -0,0 +1,908 @@
+//===--- Tools.h - Tool Implementations -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLS_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLS_H
+
+#include "clang/Basic/VersionTuple.h"
+#include "clang/Driver/Tool.h"
+#include "clang/Driver/Types.h"
+#include "clang/Driver/Util.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Option/Option.h"
+#include "llvm/Support/Compiler.h"
+
+namespace clang {
+class ObjCRuntime;
+
+namespace driver {
+class Command;
+class Driver;
+
+namespace toolchains {
+class MachO;
+}
+
+namespace tools {
+
+namespace visualstudio {
+class Compiler;
+}
+
+using llvm::opt::ArgStringList;
+
+SmallString<128> getCompilerRT(const ToolChain &TC,
+ const llvm::opt::ArgList &Args,
+ StringRef Component, bool Shared = false);
+
+/// \brief Clang compiler tool.
+class LLVM_LIBRARY_VISIBILITY Clang : public Tool {
+public:
+ static const char *getBaseInputName(const llvm::opt::ArgList &Args,
+ const InputInfo &Input);
+ static const char *getBaseInputStem(const llvm::opt::ArgList &Args,
+ const InputInfoList &Inputs);
+ static const char *getDependencyFileName(const llvm::opt::ArgList &Args,
+ const InputInfoList &Inputs);
+
+private:
+ void AddPreprocessingOptions(Compilation &C, const JobAction &JA,
+ const Driver &D, const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ToolChain *AuxToolChain) const;
+
+ void AddAArch64TargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+ void AddARMTargetArgs(const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs,
+ bool KernelOrKext) const;
+ void AddARM64TargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+ void AddMIPSTargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+ void AddPPCTargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+ void AddR600TargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+ void AddSparcTargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+ void AddSystemZTargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+ void AddX86TargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+ void AddHexagonTargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+
+ enum RewriteKind { RK_None, RK_Fragile, RK_NonFragile };
+
+ ObjCRuntime AddObjCRuntimeArgs(const llvm::opt::ArgList &args,
+ llvm::opt::ArgStringList &cmdArgs,
+ RewriteKind rewrite) const;
+
+ void AddClangCLArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs,
+ enum CodeGenOptions::DebugInfoKind *DebugInfoKind,
+ bool *EmitCodeView) const;
+
+ visualstudio::Compiler *getCLFallback() const;
+
+ mutable std::unique_ptr<visualstudio::Compiler> CLFallback;
+
+public:
+ // CAUTION! The first constructor argument ("clang") is not arbitrary,
+ // as it is for other tools. Some operations on a Tool actually test
+ // whether that tool is Clang based on the Tool's Name as a string.
+ Clang(const ToolChain &TC) : Tool("clang", "clang frontend", TC, RF_Full) {}
+
+ bool hasGoodDiagnostics() const override { return true; }
+ bool hasIntegratedAssembler() const override { return true; }
+ bool hasIntegratedCPP() const override { return true; }
+ bool canEmitIR() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+/// \brief Clang integrated assembler tool.
+class LLVM_LIBRARY_VISIBILITY ClangAs : public Tool {
+public:
+ ClangAs(const ToolChain &TC)
+ : Tool("clang::as", "clang integrated assembler", TC, RF_Full) {}
+ void AddMIPSTargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+ bool hasGoodDiagnostics() const override { return true; }
+ bool hasIntegratedAssembler() const override { return false; }
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+/// \brief Base class for all GNU tools that provide the same behavior when
+/// it comes to response files support
+class LLVM_LIBRARY_VISIBILITY GnuTool : public Tool {
+ virtual void anchor();
+
+public:
+ GnuTool(const char *Name, const char *ShortName, const ToolChain &TC)
+ : Tool(Name, ShortName, TC, RF_Full, llvm::sys::WEM_CurrentCodePage) {}
+};
+
+/// gcc - Generic GCC tool implementations.
+namespace gcc {
+class LLVM_LIBRARY_VISIBILITY Common : public GnuTool {
+public:
+ Common(const char *Name, const char *ShortName, const ToolChain &TC)
+ : GnuTool(Name, ShortName, TC) {}
+
+ // A gcc tool has an "integrated" assembler that it will call to produce an
+ // object. Let it use that assembler so that we don't have to deal with
+ // assembly syntax incompatibilities.
+ bool hasIntegratedAssembler() const override { return true; }
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+
+ /// RenderExtraToolArgs - Render any arguments necessary to force
+ /// the particular tool mode.
+ virtual void RenderExtraToolArgs(const JobAction &JA,
+ llvm::opt::ArgStringList &CmdArgs) const = 0;
+};
+
+class LLVM_LIBRARY_VISIBILITY Preprocessor : public Common {
+public:
+ Preprocessor(const ToolChain &TC)
+ : Common("gcc::Preprocessor", "gcc preprocessor", TC) {}
+
+ bool hasGoodDiagnostics() const override { return true; }
+ bool hasIntegratedCPP() const override { return false; }
+
+ void RenderExtraToolArgs(const JobAction &JA,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Compiler : public Common {
+public:
+ Compiler(const ToolChain &TC) : Common("gcc::Compiler", "gcc frontend", TC) {}
+
+ bool hasGoodDiagnostics() const override { return true; }
+ bool hasIntegratedCPP() const override { return true; }
+
+ void RenderExtraToolArgs(const JobAction &JA,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Linker : public Common {
+public:
+ Linker(const ToolChain &TC) : Common("gcc::Linker", "linker (via gcc)", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void RenderExtraToolArgs(const JobAction &JA,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+};
+} // end namespace gcc
+
+namespace hexagon {
+// For Hexagon, we do not need to instantiate tools for PreProcess, PreCompile
+// and Compile.
+// We simply use "clang -cc1" for those actions.
+class LLVM_LIBRARY_VISIBILITY Assembler : public GnuTool {
+public:
+ Assembler(const ToolChain &TC)
+ : GnuTool("hexagon::Assembler", "hexagon-as", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void RenderExtraToolArgs(const JobAction &JA,
+ llvm::opt::ArgStringList &CmdArgs) const;
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+public:
+ Linker(const ToolChain &TC) : GnuTool("hexagon::Linker", "hexagon-ld", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ virtual void RenderExtraToolArgs(const JobAction &JA,
+ llvm::opt::ArgStringList &CmdArgs) const;
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // end namespace hexagon.
+
+namespace amdgpu {
+
+class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+public:
+ Linker(const ToolChain &TC) : GnuTool("amdgpu::Linker", "lld", TC) {}
+ bool isLinkJob() const override { return true; }
+ bool hasIntegratedCPP() const override { return false; }
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+} // end namespace amdgpu
+
+namespace wasm {
+
+class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+public:
+ explicit Linker(const ToolChain &TC);
+ bool isLinkJob() const override;
+ bool hasIntegratedCPP() const override;
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+} // end namespace wasm
+
+namespace arm {
+std::string getARMTargetCPU(StringRef CPU, StringRef Arch,
+ const llvm::Triple &Triple);
+const std::string getARMArch(StringRef Arch,
+ const llvm::Triple &Triple);
+StringRef getARMCPUForMArch(StringRef Arch, const llvm::Triple &Triple);
+StringRef getLLVMArchSuffixForARM(StringRef CPU, StringRef Arch,
+ const llvm::Triple &Triple);
+
+void appendEBLinkFlags(const llvm::opt::ArgList &Args, ArgStringList &CmdArgs,
+ const llvm::Triple &Triple);
+} // end namespace arm
+
+namespace mips {
+typedef enum { NanLegacy = 1, Nan2008 = 2 } NanEncoding;
+
+enum class FloatABI {
+ Invalid,
+ Soft,
+ Hard,
+};
+
+NanEncoding getSupportedNanEncoding(StringRef &CPU);
+void getMipsCPUAndABI(const llvm::opt::ArgList &Args,
+ const llvm::Triple &Triple, StringRef &CPUName,
+ StringRef &ABIName);
+std::string getMipsABILibSuffix(const llvm::opt::ArgList &Args,
+ const llvm::Triple &Triple);
+bool hasMipsAbiArg(const llvm::opt::ArgList &Args, const char *Value);
+bool isUCLibc(const llvm::opt::ArgList &Args);
+bool isNaN2008(const llvm::opt::ArgList &Args, const llvm::Triple &Triple);
+bool isFPXXDefault(const llvm::Triple &Triple, StringRef CPUName,
+ StringRef ABIName, mips::FloatABI FloatABI);
+bool shouldUseFPXX(const llvm::opt::ArgList &Args, const llvm::Triple &Triple,
+ StringRef CPUName, StringRef ABIName,
+ mips::FloatABI FloatABI);
+} // end namespace mips
+
+namespace ppc {
+bool hasPPCAbiArg(const llvm::opt::ArgList &Args, const char *Value);
+} // end namespace ppc
+
+/// cloudabi -- Directly call GNU Binutils linker
+namespace cloudabi {
+class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+public:
+ Linker(const ToolChain &TC) : GnuTool("cloudabi::Linker", "linker", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // end namespace cloudabi
+
+namespace darwin {
+llvm::Triple::ArchType getArchTypeForMachOArchName(StringRef Str);
+void setTripleTypeForMachOArchName(llvm::Triple &T, StringRef Str);
+
+class LLVM_LIBRARY_VISIBILITY MachOTool : public Tool {
+ virtual void anchor();
+
+protected:
+ void AddMachOArch(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+
+ const toolchains::MachO &getMachOToolChain() const {
+ return reinterpret_cast<const toolchains::MachO &>(getToolChain());
+ }
+
+public:
+ MachOTool(
+ const char *Name, const char *ShortName, const ToolChain &TC,
+ ResponseFileSupport ResponseSupport = RF_None,
+ llvm::sys::WindowsEncodingMethod ResponseEncoding = llvm::sys::WEM_UTF8,
+ const char *ResponseFlag = "@")
+ : Tool(Name, ShortName, TC, ResponseSupport, ResponseEncoding,
+ ResponseFlag) {}
+};
+
+class LLVM_LIBRARY_VISIBILITY Assembler : public MachOTool {
+public:
+ Assembler(const ToolChain &TC)
+ : MachOTool("darwin::Assembler", "assembler", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Linker : public MachOTool {
+ bool NeedsTempPath(const InputInfoList &Inputs) const;
+ void AddLinkArgs(Compilation &C, const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs,
+ const InputInfoList &Inputs) const;
+
+public:
+ Linker(const ToolChain &TC)
+ : MachOTool("darwin::Linker", "linker", TC, RF_FileList,
+ llvm::sys::WEM_UTF8, "-filelist") {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Lipo : public MachOTool {
+public:
+ Lipo(const ToolChain &TC) : MachOTool("darwin::Lipo", "lipo", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Dsymutil : public MachOTool {
+public:
+ Dsymutil(const ToolChain &TC)
+ : MachOTool("darwin::Dsymutil", "dsymutil", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isDsymutilJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY VerifyDebug : public MachOTool {
+public:
+ VerifyDebug(const ToolChain &TC)
+ : MachOTool("darwin::VerifyDebug", "dwarfdump", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // end namespace darwin
+
+/// openbsd -- Directly call GNU Binutils assembler and linker
+namespace openbsd {
+class LLVM_LIBRARY_VISIBILITY Assembler : public GnuTool {
+public:
+ Assembler(const ToolChain &TC)
+ : GnuTool("openbsd::Assembler", "assembler", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+public:
+ Linker(const ToolChain &TC) : GnuTool("openbsd::Linker", "linker", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // end namespace openbsd
+
+/// bitrig -- Directly call GNU Binutils assembler and linker
+namespace bitrig {
+class LLVM_LIBRARY_VISIBILITY Assembler : public GnuTool {
+public:
+ Assembler(const ToolChain &TC)
+ : GnuTool("bitrig::Assembler", "assembler", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+public:
+ Linker(const ToolChain &TC) : GnuTool("bitrig::Linker", "linker", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // end namespace bitrig
+
+/// freebsd -- Directly call GNU Binutils assembler and linker
+namespace freebsd {
+class LLVM_LIBRARY_VISIBILITY Assembler : public GnuTool {
+public:
+ Assembler(const ToolChain &TC)
+ : GnuTool("freebsd::Assembler", "assembler", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+public:
+ Linker(const ToolChain &TC) : GnuTool("freebsd::Linker", "linker", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // end namespace freebsd
+
+/// netbsd -- Directly call GNU Binutils assembler and linker
+namespace netbsd {
+class LLVM_LIBRARY_VISIBILITY Assembler : public GnuTool {
+public:
+ Assembler(const ToolChain &TC)
+ : GnuTool("netbsd::Assembler", "assembler", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+public:
+ Linker(const ToolChain &TC) : GnuTool("netbsd::Linker", "linker", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // end namespace netbsd
+
+/// Directly call GNU Binutils' assembler and linker.
+namespace gnutools {
+class LLVM_LIBRARY_VISIBILITY Assembler : public GnuTool {
+public:
+ Assembler(const ToolChain &TC) : GnuTool("GNU::Assembler", "assembler", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+public:
+ Linker(const ToolChain &TC) : GnuTool("GNU::Linker", "linker", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // end namespace gnutools
+
+namespace nacltools {
+class LLVM_LIBRARY_VISIBILITY AssemblerARM : public gnutools::Assembler {
+public:
+ AssemblerARM(const ToolChain &TC) : gnutools::Assembler(TC) {}
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+public:
+ Linker(const ToolChain &TC) : GnuTool("NaCl::Linker", "linker", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // end namespace nacltools
+
+/// minix -- Directly call GNU Binutils assembler and linker
+namespace minix {
+class LLVM_LIBRARY_VISIBILITY Assembler : public GnuTool {
+public:
+ Assembler(const ToolChain &TC)
+ : GnuTool("minix::Assembler", "assembler", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+public:
+ Linker(const ToolChain &TC) : GnuTool("minix::Linker", "linker", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // end namespace minix
+
+/// solaris -- Directly call Solaris assembler and linker
+namespace solaris {
+class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
+public:
+ Assembler(const ToolChain &TC)
+ : Tool("solaris::Assembler", "assembler", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+public:
+ Linker(const ToolChain &TC) : Tool("solaris::Linker", "linker", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // end namespace solaris
+
+/// dragonfly -- Directly call GNU Binutils assembler and linker
+namespace dragonfly {
+class LLVM_LIBRARY_VISIBILITY Assembler : public GnuTool {
+public:
+ Assembler(const ToolChain &TC)
+ : GnuTool("dragonfly::Assembler", "assembler", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+public:
+ Linker(const ToolChain &TC) : GnuTool("dragonfly::Linker", "linker", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // end namespace dragonfly
+
+/// Visual studio tools.
+namespace visualstudio {
+VersionTuple getMSVCVersion(const Driver *D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args, bool IsWindowsMSVC);
+
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+public:
+ Linker(const ToolChain &TC)
+ : Tool("visualstudio::Linker", "linker", TC, RF_Full,
+ llvm::sys::WEM_UTF16) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Compiler : public Tool {
+public:
+ Compiler(const ToolChain &TC)
+ : Tool("visualstudio::Compiler", "compiler", TC, RF_Full,
+ llvm::sys::WEM_UTF16) {}
+
+ bool hasIntegratedAssembler() const override { return true; }
+ bool hasIntegratedCPP() const override { return true; }
+ bool isLinkJob() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+
+ std::unique_ptr<Command> GetCommand(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const;
+};
+} // end namespace visualstudio
+
+/// MinGW -- Directly call GNU Binutils assembler and linker
+namespace MinGW {
+class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
+public:
+ Assembler(const ToolChain &TC) : Tool("MinGW::Assemble", "assembler", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+public:
+ Linker(const ToolChain &TC) : Tool("MinGW::Linker", "linker", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+
+private:
+ void AddLibGCC(const llvm::opt::ArgList &Args, ArgStringList &CmdArgs) const;
+};
+} // end namespace MinGW
+
+namespace arm {
+enum class FloatABI {
+ Invalid,
+ Soft,
+ SoftFP,
+ Hard,
+};
+
+FloatABI getARMFloatABI(const ToolChain &TC, const llvm::opt::ArgList &Args);
+} // end namespace arm
+
+namespace ppc {
+enum class FloatABI {
+ Invalid,
+ Soft,
+ Hard,
+};
+
+FloatABI getPPCFloatABI(const Driver &D, const llvm::opt::ArgList &Args);
+} // end namespace ppc
+
+namespace XCore {
+// For XCore, we do not need to instantiate tools for PreProcess, PreCompile and
+// Compile.
+// We simply use "clang -cc1" for those actions.
+class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
+public:
+ Assembler(const ToolChain &TC) : Tool("XCore::Assembler", "XCore-as", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+public:
+ Linker(const ToolChain &TC) : Tool("XCore::Linker", "XCore-ld", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // end namespace XCore.
+
+namespace CrossWindows {
+class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
+public:
+ Assembler(const ToolChain &TC) : Tool("CrossWindows::Assembler", "as", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+public:
+ Linker(const ToolChain &TC)
+ : Tool("CrossWindows::Linker", "ld", TC, RF_Full) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // end namespace CrossWindows
+
+/// SHAVE tools -- Directly call moviCompile and moviAsm
+namespace SHAVE {
+class LLVM_LIBRARY_VISIBILITY Compiler : public Tool {
+public:
+ Compiler(const ToolChain &TC) : Tool("moviCompile", "movicompile", TC) {}
+
+ bool hasIntegratedCPP() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
+public:
+ Assembler(const ToolChain &TC) : Tool("moviAsm", "moviAsm", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; } // not sure.
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // end namespace SHAVE
+
+/// The Myriad toolchain uses tools that are in two different namespaces.
+/// The Compiler and Assembler as defined above are in the SHAVE namespace,
+/// whereas the linker, which accepts code for a mixture of Sparc and SHAVE,
+/// is in the Myriad namespace.
+namespace Myriad {
+class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+public:
+ Linker(const ToolChain &TC) : GnuTool("shave::Linker", "ld", TC) {}
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // end namespace Myriad
+
+namespace PS4cpu {
+class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
+public:
+ Assemble(const ToolChain &TC)
+ : Tool("PS4cpu::Assemble", "assembler", TC, RF_Full) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Link : public Tool {
+public:
+ Link(const ToolChain &TC) : Tool("PS4cpu::Link", "linker", TC, RF_Full) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // end namespace PS4cpu
+
+} // end namespace tools
+} // end namespace driver
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLS_H
diff --git a/contrib/llvm/tools/clang/lib/Driver/Types.cpp b/contrib/llvm/tools/clang/lib/Driver/Types.cpp
new file mode 100644
index 0000000..c29ce94
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/Types.cpp
@@ -0,0 +1,262 @@
+//===--- Types.cpp - Driver input & temporary type information ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Types.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringSwitch.h"
+#include <cassert>
+#include <string.h>
+
+using namespace clang::driver;
+using namespace clang::driver::types;
+
+struct TypeInfo {
+ const char *Name;
+ const char *Flags;
+ const char *TempSuffix;
+ ID PreprocessedType;
+};
+
+static const TypeInfo TypeInfos[] = {
+#define TYPE(NAME, ID, PP_TYPE, TEMP_SUFFIX, FLAGS) \
+ { NAME, FLAGS, TEMP_SUFFIX, TY_##PP_TYPE, },
+#include "clang/Driver/Types.def"
+#undef TYPE
+};
+static const unsigned numTypes = llvm::array_lengthof(TypeInfos);
+
+static const TypeInfo &getInfo(unsigned id) {
+ assert(id > 0 && id - 1 < numTypes && "Invalid Type ID.");
+ return TypeInfos[id - 1];
+}
+
+const char *types::getTypeName(ID Id) {
+ return getInfo(Id).Name;
+}
+
+types::ID types::getPreprocessedType(ID Id) {
+ return getInfo(Id).PreprocessedType;
+}
+
+const char *types::getTypeTempSuffix(ID Id, bool CLMode) {
+ if (Id == TY_Object && CLMode)
+ return "obj";
+ if (Id == TY_Image && CLMode)
+ return "exe";
+ if (Id == TY_PP_Asm && CLMode)
+ return "asm";
+ return getInfo(Id).TempSuffix;
+}
+
+bool types::onlyAssembleType(ID Id) {
+ return strchr(getInfo(Id).Flags, 'a');
+}
+
+bool types::onlyPrecompileType(ID Id) {
+ return strchr(getInfo(Id).Flags, 'p');
+}
+
+bool types::canTypeBeUserSpecified(ID Id) {
+ return strchr(getInfo(Id).Flags, 'u');
+}
+
+bool types::appendSuffixForType(ID Id) {
+ return strchr(getInfo(Id).Flags, 'A');
+}
+
+bool types::canLipoType(ID Id) {
+ return (Id == TY_Nothing ||
+ Id == TY_Image ||
+ Id == TY_Object ||
+ Id == TY_LTO_BC);
+}
+
+bool types::isAcceptedByClang(ID Id) {
+ switch (Id) {
+ default:
+ return false;
+
+ case TY_Asm:
+ case TY_C: case TY_PP_C:
+ case TY_CL:
+ case TY_CUDA: case TY_PP_CUDA:
+ case TY_CUDA_DEVICE:
+ case TY_ObjC: case TY_PP_ObjC: case TY_PP_ObjC_Alias:
+ case TY_CXX: case TY_PP_CXX:
+ case TY_ObjCXX: case TY_PP_ObjCXX: case TY_PP_ObjCXX_Alias:
+ case TY_CHeader: case TY_PP_CHeader:
+ case TY_CLHeader:
+ case TY_ObjCHeader: case TY_PP_ObjCHeader:
+ case TY_CXXHeader: case TY_PP_CXXHeader:
+ case TY_ObjCXXHeader: case TY_PP_ObjCXXHeader:
+ case TY_AST: case TY_ModuleFile:
+ case TY_LLVM_IR: case TY_LLVM_BC:
+ return true;
+ }
+}
+
+bool types::isObjC(ID Id) {
+ switch (Id) {
+ default:
+ return false;
+
+ case TY_ObjC: case TY_PP_ObjC: case TY_PP_ObjC_Alias:
+ case TY_ObjCXX: case TY_PP_ObjCXX:
+ case TY_ObjCHeader: case TY_PP_ObjCHeader:
+ case TY_ObjCXXHeader: case TY_PP_ObjCXXHeader: case TY_PP_ObjCXX_Alias:
+ return true;
+ }
+}
+
+bool types::isCXX(ID Id) {
+ switch (Id) {
+ default:
+ return false;
+
+ case TY_CXX: case TY_PP_CXX:
+ case TY_ObjCXX: case TY_PP_ObjCXX: case TY_PP_ObjCXX_Alias:
+ case TY_CXXHeader: case TY_PP_CXXHeader:
+ case TY_ObjCXXHeader: case TY_PP_ObjCXXHeader:
+ case TY_CUDA: case TY_PP_CUDA: case TY_CUDA_DEVICE:
+ return true;
+ }
+}
+
+bool types::isLLVMIR(ID Id) {
+ switch (Id) {
+ default:
+ return false;
+
+ case TY_LLVM_IR:
+ case TY_LLVM_BC:
+ case TY_LTO_IR:
+ case TY_LTO_BC:
+ return true;
+ }
+}
+
+bool types::isCuda(ID Id) {
+ switch (Id) {
+ default:
+ return false;
+
+ case TY_CUDA:
+ case TY_PP_CUDA:
+ case TY_CUDA_DEVICE:
+ return true;
+ }
+}
+
+types::ID types::lookupTypeForExtension(const char *Ext) {
+ return llvm::StringSwitch<types::ID>(Ext)
+ .Case("c", TY_C)
+ .Case("i", TY_PP_C)
+ .Case("m", TY_ObjC)
+ .Case("M", TY_ObjCXX)
+ .Case("h", TY_CHeader)
+ .Case("C", TY_CXX)
+ .Case("H", TY_CXXHeader)
+ .Case("f", TY_PP_Fortran)
+ .Case("F", TY_Fortran)
+ .Case("s", TY_PP_Asm)
+ .Case("asm", TY_PP_Asm)
+ .Case("S", TY_Asm)
+ .Case("o", TY_Object)
+ .Case("obj", TY_Object)
+ .Case("lib", TY_Object)
+ .Case("ii", TY_PP_CXX)
+ .Case("mi", TY_PP_ObjC)
+ .Case("mm", TY_ObjCXX)
+ .Case("bc", TY_LLVM_BC)
+ .Case("cc", TY_CXX)
+ .Case("CC", TY_CXX)
+ .Case("cl", TY_CL)
+ .Case("cp", TY_CXX)
+ .Case("cu", TY_CUDA)
+ .Case("cui", TY_PP_CUDA)
+ .Case("hh", TY_CXXHeader)
+ .Case("ll", TY_LLVM_IR)
+ .Case("hpp", TY_CXXHeader)
+ .Case("ads", TY_Ada)
+ .Case("adb", TY_Ada)
+ .Case("ast", TY_AST)
+ .Case("c++", TY_CXX)
+ .Case("C++", TY_CXX)
+ .Case("cxx", TY_CXX)
+ .Case("cpp", TY_CXX)
+ .Case("CPP", TY_CXX)
+ .Case("CXX", TY_CXX)
+ .Case("for", TY_PP_Fortran)
+ .Case("FOR", TY_PP_Fortran)
+ .Case("fpp", TY_Fortran)
+ .Case("FPP", TY_Fortran)
+ .Case("f90", TY_PP_Fortran)
+ .Case("f95", TY_PP_Fortran)
+ .Case("F90", TY_Fortran)
+ .Case("F95", TY_Fortran)
+ .Case("mii", TY_PP_ObjCXX)
+ .Case("pcm", TY_ModuleFile)
+ .Case("pch", TY_PCH)
+ .Case("gch", TY_PCH)
+ .Default(TY_INVALID);
+}
+
+types::ID types::lookupTypeForTypeSpecifier(const char *Name) {
+ for (unsigned i=0; i<numTypes; ++i) {
+ types::ID Id = (types::ID) (i + 1);
+ if (canTypeBeUserSpecified(Id) &&
+ strcmp(Name, getInfo(Id).Name) == 0)
+ return Id;
+ }
+
+ return TY_INVALID;
+}
+
+// FIXME: Why don't we just put this list in the defs file, eh.
+void types::getCompilationPhases(ID Id, llvm::SmallVectorImpl<phases::ID> &P) {
+ if (Id != TY_Object) {
+ if (getPreprocessedType(Id) != TY_INVALID) {
+ P.push_back(phases::Preprocess);
+ }
+
+ if (onlyPrecompileType(Id)) {
+ P.push_back(phases::Precompile);
+ } else {
+ if (!onlyAssembleType(Id)) {
+ P.push_back(phases::Compile);
+ P.push_back(phases::Backend);
+ }
+ if (Id != TY_CUDA_DEVICE)
+ P.push_back(phases::Assemble);
+ }
+ }
+
+ if (!onlyPrecompileType(Id) && Id != TY_CUDA_DEVICE) {
+ P.push_back(phases::Link);
+ }
+ assert(0 < P.size() && "Not enough phases in list");
+ assert(P.size() <= phases::MaxNumberOfPhases && "Too many phases in list");
+ return;
+}
+
+ID types::lookupCXXTypeForCType(ID Id) {
+ switch (Id) {
+ default:
+ return Id;
+
+ case types::TY_C:
+ return types::TY_CXX;
+ case types::TY_PP_C:
+ return types::TY_PP_CXX;
+ case types::TY_CHeader:
+ return types::TY_CXXHeader;
+ case types::TY_PP_CHeader:
+ return types::TY_PP_CXXHeader;
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Edit/Commit.cpp b/contrib/llvm/tools/clang/lib/Edit/Commit.cpp
new file mode 100644
index 0000000..cb7a784
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Edit/Commit.cpp
@@ -0,0 +1,346 @@
+//===----- Commit.cpp - A unit of edits -----------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Edit/Commit.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Edit/EditedSource.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Lex/PPConditionalDirectiveRecord.h"
+
+using namespace clang;
+using namespace edit;
+
+SourceLocation Commit::Edit::getFileLocation(SourceManager &SM) const {
+ SourceLocation Loc = SM.getLocForStartOfFile(Offset.getFID());
+ Loc = Loc.getLocWithOffset(Offset.getOffset());
+ assert(Loc.isFileID());
+ return Loc;
+}
+
+CharSourceRange Commit::Edit::getFileRange(SourceManager &SM) const {
+ SourceLocation Loc = getFileLocation(SM);
+ return CharSourceRange::getCharRange(Loc, Loc.getLocWithOffset(Length));
+}
+
+CharSourceRange Commit::Edit::getInsertFromRange(SourceManager &SM) const {
+ SourceLocation Loc = SM.getLocForStartOfFile(InsertFromRangeOffs.getFID());
+ Loc = Loc.getLocWithOffset(InsertFromRangeOffs.getOffset());
+ assert(Loc.isFileID());
+ return CharSourceRange::getCharRange(Loc, Loc.getLocWithOffset(Length));
+}
+
+Commit::Commit(EditedSource &Editor)
+ : SourceMgr(Editor.getSourceManager()), LangOpts(Editor.getLangOpts()),
+ PPRec(Editor.getPPCondDirectiveRecord()),
+ Editor(&Editor), IsCommitable(true) { }
+
+bool Commit::insert(SourceLocation loc, StringRef text,
+ bool afterToken, bool beforePreviousInsertions) {
+ if (text.empty())
+ return true;
+
+ FileOffset Offs;
+ if ((!afterToken && !canInsert(loc, Offs)) ||
+ ( afterToken && !canInsertAfterToken(loc, Offs, loc))) {
+ IsCommitable = false;
+ return false;
+ }
+
+ addInsert(loc, Offs, text, beforePreviousInsertions);
+ return true;
+}
+
+bool Commit::insertFromRange(SourceLocation loc,
+ CharSourceRange range,
+ bool afterToken, bool beforePreviousInsertions) {
+ FileOffset RangeOffs;
+ unsigned RangeLen;
+ if (!canRemoveRange(range, RangeOffs, RangeLen)) {
+ IsCommitable = false;
+ return false;
+ }
+
+ FileOffset Offs;
+ if ((!afterToken && !canInsert(loc, Offs)) ||
+ ( afterToken && !canInsertAfterToken(loc, Offs, loc))) {
+ IsCommitable = false;
+ return false;
+ }
+
+ if (PPRec &&
+ PPRec->areInDifferentConditionalDirectiveRegion(loc, range.getBegin())) {
+ IsCommitable = false;
+ return false;
+ }
+
+ addInsertFromRange(loc, Offs, RangeOffs, RangeLen, beforePreviousInsertions);
+ return true;
+}
+
+bool Commit::remove(CharSourceRange range) {
+ FileOffset Offs;
+ unsigned Len;
+ if (!canRemoveRange(range, Offs, Len)) {
+ IsCommitable = false;
+ return false;
+ }
+
+ addRemove(range.getBegin(), Offs, Len);
+ return true;
+}
+
+bool Commit::insertWrap(StringRef before, CharSourceRange range,
+ StringRef after) {
+ bool commitableBefore = insert(range.getBegin(), before, /*afterToken=*/false,
+ /*beforePreviousInsertions=*/true);
+ bool commitableAfter;
+ if (range.isTokenRange())
+ commitableAfter = insertAfterToken(range.getEnd(), after);
+ else
+ commitableAfter = insert(range.getEnd(), after);
+
+ return commitableBefore && commitableAfter;
+}
+
+bool Commit::replace(CharSourceRange range, StringRef text) {
+ if (text.empty())
+ return remove(range);
+
+ FileOffset Offs;
+ unsigned Len;
+ if (!canInsert(range.getBegin(), Offs) || !canRemoveRange(range, Offs, Len)) {
+ IsCommitable = false;
+ return false;
+ }
+
+ addRemove(range.getBegin(), Offs, Len);
+ addInsert(range.getBegin(), Offs, text, false);
+ return true;
+}
+
+bool Commit::replaceWithInner(CharSourceRange range,
+ CharSourceRange replacementRange) {
+ FileOffset OuterBegin;
+ unsigned OuterLen;
+ if (!canRemoveRange(range, OuterBegin, OuterLen)) {
+ IsCommitable = false;
+ return false;
+ }
+
+ FileOffset InnerBegin;
+ unsigned InnerLen;
+ if (!canRemoveRange(replacementRange, InnerBegin, InnerLen)) {
+ IsCommitable = false;
+ return false;
+ }
+
+ FileOffset OuterEnd = OuterBegin.getWithOffset(OuterLen);
+ FileOffset InnerEnd = InnerBegin.getWithOffset(InnerLen);
+ if (OuterBegin.getFID() != InnerBegin.getFID() ||
+ InnerBegin < OuterBegin ||
+ InnerBegin > OuterEnd ||
+ InnerEnd > OuterEnd) {
+ IsCommitable = false;
+ return false;
+ }
+
+ addRemove(range.getBegin(),
+ OuterBegin, InnerBegin.getOffset() - OuterBegin.getOffset());
+ addRemove(replacementRange.getEnd(),
+ InnerEnd, OuterEnd.getOffset() - InnerEnd.getOffset());
+ return true;
+}
+
+bool Commit::replaceText(SourceLocation loc, StringRef text,
+ StringRef replacementText) {
+ if (text.empty() || replacementText.empty())
+ return true;
+
+ FileOffset Offs;
+ unsigned Len;
+ if (!canReplaceText(loc, replacementText, Offs, Len)) {
+ IsCommitable = false;
+ return false;
+ }
+
+ addRemove(loc, Offs, Len);
+ addInsert(loc, Offs, text, false);
+ return true;
+}
+
+void Commit::addInsert(SourceLocation OrigLoc, FileOffset Offs, StringRef text,
+ bool beforePreviousInsertions) {
+ if (text.empty())
+ return;
+
+ Edit data;
+ data.Kind = Act_Insert;
+ data.OrigLoc = OrigLoc;
+ data.Offset = Offs;
+ data.Text = text.copy(StrAlloc);
+ data.BeforePrev = beforePreviousInsertions;
+ CachedEdits.push_back(data);
+}
+
+void Commit::addInsertFromRange(SourceLocation OrigLoc, FileOffset Offs,
+ FileOffset RangeOffs, unsigned RangeLen,
+ bool beforePreviousInsertions) {
+ if (RangeLen == 0)
+ return;
+
+ Edit data;
+ data.Kind = Act_InsertFromRange;
+ data.OrigLoc = OrigLoc;
+ data.Offset = Offs;
+ data.InsertFromRangeOffs = RangeOffs;
+ data.Length = RangeLen;
+ data.BeforePrev = beforePreviousInsertions;
+ CachedEdits.push_back(data);
+}
+
+void Commit::addRemove(SourceLocation OrigLoc,
+ FileOffset Offs, unsigned Len) {
+ if (Len == 0)
+ return;
+
+ Edit data;
+ data.Kind = Act_Remove;
+ data.OrigLoc = OrigLoc;
+ data.Offset = Offs;
+ data.Length = Len;
+ CachedEdits.push_back(data);
+}
+
+bool Commit::canInsert(SourceLocation loc, FileOffset &offs) {
+ if (loc.isInvalid())
+ return false;
+
+ if (loc.isMacroID())
+ isAtStartOfMacroExpansion(loc, &loc);
+
+ const SourceManager &SM = SourceMgr;
+ while (SM.isMacroArgExpansion(loc))
+ loc = SM.getImmediateSpellingLoc(loc);
+
+ if (loc.isMacroID())
+ if (!isAtStartOfMacroExpansion(loc, &loc))
+ return false;
+
+ if (SM.isInSystemHeader(loc))
+ return false;
+
+ std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(loc);
+ if (locInfo.first.isInvalid())
+ return false;
+ offs = FileOffset(locInfo.first, locInfo.second);
+ return canInsertInOffset(loc, offs);
+}
+
+bool Commit::canInsertAfterToken(SourceLocation loc, FileOffset &offs,
+ SourceLocation &AfterLoc) {
+ if (loc.isInvalid())
+
+ return false;
+
+ SourceLocation spellLoc = SourceMgr.getSpellingLoc(loc);
+ unsigned tokLen = Lexer::MeasureTokenLength(spellLoc, SourceMgr, LangOpts);
+ AfterLoc = loc.getLocWithOffset(tokLen);
+
+ if (loc.isMacroID())
+ isAtEndOfMacroExpansion(loc, &loc);
+
+ const SourceManager &SM = SourceMgr;
+ while (SM.isMacroArgExpansion(loc))
+ loc = SM.getImmediateSpellingLoc(loc);
+
+ if (loc.isMacroID())
+ if (!isAtEndOfMacroExpansion(loc, &loc))
+ return false;
+
+ if (SM.isInSystemHeader(loc))
+ return false;
+
+ loc = Lexer::getLocForEndOfToken(loc, 0, SourceMgr, LangOpts);
+ if (loc.isInvalid())
+ return false;
+
+ std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(loc);
+ if (locInfo.first.isInvalid())
+ return false;
+ offs = FileOffset(locInfo.first, locInfo.second);
+ return canInsertInOffset(loc, offs);
+}
+
+bool Commit::canInsertInOffset(SourceLocation OrigLoc, FileOffset Offs) {
+ for (unsigned i = 0, e = CachedEdits.size(); i != e; ++i) {
+ Edit &act = CachedEdits[i];
+ if (act.Kind == Act_Remove) {
+ if (act.Offset.getFID() == Offs.getFID() &&
+ Offs > act.Offset && Offs < act.Offset.getWithOffset(act.Length))
+ return false; // position has been removed.
+ }
+ }
+
+ if (!Editor)
+ return true;
+ return Editor->canInsertInOffset(OrigLoc, Offs);
+}
+
+bool Commit::canRemoveRange(CharSourceRange range,
+ FileOffset &Offs, unsigned &Len) {
+ const SourceManager &SM = SourceMgr;
+ range = Lexer::makeFileCharRange(range, SM, LangOpts);
+ if (range.isInvalid())
+ return false;
+
+ if (range.getBegin().isMacroID() || range.getEnd().isMacroID())
+ return false;
+ if (SM.isInSystemHeader(range.getBegin()) ||
+ SM.isInSystemHeader(range.getEnd()))
+ return false;
+
+ if (PPRec && PPRec->rangeIntersectsConditionalDirective(range.getAsRange()))
+ return false;
+
+ std::pair<FileID, unsigned> beginInfo = SM.getDecomposedLoc(range.getBegin());
+ std::pair<FileID, unsigned> endInfo = SM.getDecomposedLoc(range.getEnd());
+ if (beginInfo.first != endInfo.first ||
+ beginInfo.second > endInfo.second)
+ return false;
+
+ Offs = FileOffset(beginInfo.first, beginInfo.second);
+ Len = endInfo.second - beginInfo.second;
+ return true;
+}
+
+bool Commit::canReplaceText(SourceLocation loc, StringRef text,
+ FileOffset &Offs, unsigned &Len) {
+ assert(!text.empty());
+
+ if (!canInsert(loc, Offs))
+ return false;
+
+ // Try to load the file buffer.
+ bool invalidTemp = false;
+ StringRef file = SourceMgr.getBufferData(Offs.getFID(), &invalidTemp);
+ if (invalidTemp)
+ return false;
+
+ Len = text.size();
+ return file.substr(Offs.getOffset()).startswith(text);
+}
+
+bool Commit::isAtStartOfMacroExpansion(SourceLocation loc,
+ SourceLocation *MacroBegin) const {
+ return Lexer::isAtStartOfMacroExpansion(loc, SourceMgr, LangOpts, MacroBegin);
+}
+bool Commit::isAtEndOfMacroExpansion(SourceLocation loc,
+ SourceLocation *MacroEnd) const {
+ return Lexer::isAtEndOfMacroExpansion(loc, SourceMgr, LangOpts, MacroEnd);
+}
diff --git a/contrib/llvm/tools/clang/lib/Edit/EditedSource.cpp b/contrib/llvm/tools/clang/lib/Edit/EditedSource.cpp
new file mode 100644
index 0000000..5292a58
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Edit/EditedSource.cpp
@@ -0,0 +1,458 @@
+//===----- EditedSource.cpp - Collection of source edits ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Edit/EditedSource.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Edit/Commit.h"
+#include "clang/Edit/EditsReceiver.h"
+#include "clang/Lex/Lexer.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Twine.h"
+
+using namespace clang;
+using namespace edit;
+
+void EditsReceiver::remove(CharSourceRange range) {
+ replace(range, StringRef());
+}
+
+void EditedSource::deconstructMacroArgLoc(SourceLocation Loc,
+ SourceLocation &ExpansionLoc,
+ IdentifierInfo *&II) {
+ assert(SourceMgr.isMacroArgExpansion(Loc));
+ SourceLocation DefArgLoc = SourceMgr.getImmediateExpansionRange(Loc).first;
+ ExpansionLoc = SourceMgr.getImmediateExpansionRange(DefArgLoc).first;
+ SmallString<20> Buf;
+ StringRef ArgName = Lexer::getSpelling(SourceMgr.getSpellingLoc(DefArgLoc),
+ Buf, SourceMgr, LangOpts);
+ II = nullptr;
+ if (!ArgName.empty()) {
+ II = &IdentTable.get(ArgName);
+ }
+}
+
+void EditedSource::startingCommit() {}
+
+void EditedSource::finishedCommit() {
+ for (auto &ExpArg : CurrCommitMacroArgExps) {
+ SourceLocation ExpLoc;
+ IdentifierInfo *II;
+ std::tie(ExpLoc, II) = ExpArg;
+ auto &ArgNames = ExpansionToArgMap[ExpLoc.getRawEncoding()];
+ if (std::find(ArgNames.begin(), ArgNames.end(), II) == ArgNames.end()) {
+ ArgNames.push_back(II);
+ }
+ }
+ CurrCommitMacroArgExps.clear();
+}
+
+StringRef EditedSource::copyString(const Twine &twine) {
+ SmallString<128> Data;
+ return copyString(twine.toStringRef(Data));
+}
+
+bool EditedSource::canInsertInOffset(SourceLocation OrigLoc, FileOffset Offs) {
+ FileEditsTy::iterator FA = getActionForOffset(Offs);
+ if (FA != FileEdits.end()) {
+ if (FA->first != Offs)
+ return false; // position has been removed.
+ }
+
+ if (SourceMgr.isMacroArgExpansion(OrigLoc)) {
+ IdentifierInfo *II;
+ SourceLocation ExpLoc;
+ deconstructMacroArgLoc(OrigLoc, ExpLoc, II);
+ auto I = ExpansionToArgMap.find(ExpLoc.getRawEncoding());
+ if (I != ExpansionToArgMap.end() &&
+ std::find(I->second.begin(), I->second.end(), II) != I->second.end()) {
+ // Trying to write in a macro argument input that has already been
+ // written by a previous commit for another expansion of the same macro
+ // argument name. For example:
+ //
+ // \code
+ // #define MAC(x) ((x)+(x))
+ // MAC(a)
+ // \endcode
+ //
+ // A commit modified the macro argument 'a' due to the first '(x)'
+ // expansion inside the macro definition, and a subsequent commit tried
+ // to modify 'a' again for the second '(x)' expansion. The edits of the
+ // second commit will be rejected.
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool EditedSource::commitInsert(SourceLocation OrigLoc,
+ FileOffset Offs, StringRef text,
+ bool beforePreviousInsertions) {
+ if (!canInsertInOffset(OrigLoc, Offs))
+ return false;
+ if (text.empty())
+ return true;
+
+ if (SourceMgr.isMacroArgExpansion(OrigLoc)) {
+ IdentifierInfo *II;
+ SourceLocation ExpLoc;
+ deconstructMacroArgLoc(OrigLoc, ExpLoc, II);
+ if (II)
+ CurrCommitMacroArgExps.emplace_back(ExpLoc, II);
+ }
+
+ FileEdit &FA = FileEdits[Offs];
+ if (FA.Text.empty()) {
+ FA.Text = copyString(text);
+ return true;
+ }
+
+ if (beforePreviousInsertions)
+ FA.Text = copyString(Twine(text) + FA.Text);
+ else
+ FA.Text = copyString(Twine(FA.Text) + text);
+
+ return true;
+}
+
+bool EditedSource::commitInsertFromRange(SourceLocation OrigLoc,
+ FileOffset Offs,
+ FileOffset InsertFromRangeOffs, unsigned Len,
+ bool beforePreviousInsertions) {
+ if (Len == 0)
+ return true;
+
+ SmallString<128> StrVec;
+ FileOffset BeginOffs = InsertFromRangeOffs;
+ FileOffset EndOffs = BeginOffs.getWithOffset(Len);
+ FileEditsTy::iterator I = FileEdits.upper_bound(BeginOffs);
+ if (I != FileEdits.begin())
+ --I;
+
+ for (; I != FileEdits.end(); ++I) {
+ FileEdit &FA = I->second;
+ FileOffset B = I->first;
+ FileOffset E = B.getWithOffset(FA.RemoveLen);
+
+ if (BeginOffs == B)
+ break;
+
+ if (BeginOffs < E) {
+ if (BeginOffs > B) {
+ BeginOffs = E;
+ ++I;
+ }
+ break;
+ }
+ }
+
+ for (; I != FileEdits.end() && EndOffs > I->first; ++I) {
+ FileEdit &FA = I->second;
+ FileOffset B = I->first;
+ FileOffset E = B.getWithOffset(FA.RemoveLen);
+
+ if (BeginOffs < B) {
+ bool Invalid = false;
+ StringRef text = getSourceText(BeginOffs, B, Invalid);
+ if (Invalid)
+ return false;
+ StrVec += text;
+ }
+ StrVec += FA.Text;
+ BeginOffs = E;
+ }
+
+ if (BeginOffs < EndOffs) {
+ bool Invalid = false;
+ StringRef text = getSourceText(BeginOffs, EndOffs, Invalid);
+ if (Invalid)
+ return false;
+ StrVec += text;
+ }
+
+ return commitInsert(OrigLoc, Offs, StrVec, beforePreviousInsertions);
+}
+
+void EditedSource::commitRemove(SourceLocation OrigLoc,
+ FileOffset BeginOffs, unsigned Len) {
+ if (Len == 0)
+ return;
+
+ FileOffset EndOffs = BeginOffs.getWithOffset(Len);
+ FileEditsTy::iterator I = FileEdits.upper_bound(BeginOffs);
+ if (I != FileEdits.begin())
+ --I;
+
+ for (; I != FileEdits.end(); ++I) {
+ FileEdit &FA = I->second;
+ FileOffset B = I->first;
+ FileOffset E = B.getWithOffset(FA.RemoveLen);
+
+ if (BeginOffs < E)
+ break;
+ }
+
+ FileOffset TopBegin, TopEnd;
+ FileEdit *TopFA = nullptr;
+
+ if (I == FileEdits.end()) {
+ FileEditsTy::iterator
+ NewI = FileEdits.insert(I, std::make_pair(BeginOffs, FileEdit()));
+ NewI->second.RemoveLen = Len;
+ return;
+ }
+
+ FileEdit &FA = I->second;
+ FileOffset B = I->first;
+ FileOffset E = B.getWithOffset(FA.RemoveLen);
+ if (BeginOffs < B) {
+ FileEditsTy::iterator
+ NewI = FileEdits.insert(I, std::make_pair(BeginOffs, FileEdit()));
+ TopBegin = BeginOffs;
+ TopEnd = EndOffs;
+ TopFA = &NewI->second;
+ TopFA->RemoveLen = Len;
+ } else {
+ TopBegin = B;
+ TopEnd = E;
+ TopFA = &I->second;
+ if (TopEnd >= EndOffs)
+ return;
+ unsigned diff = EndOffs.getOffset() - TopEnd.getOffset();
+ TopEnd = EndOffs;
+ TopFA->RemoveLen += diff;
+ if (B == BeginOffs)
+ TopFA->Text = StringRef();
+ ++I;
+ }
+
+ while (I != FileEdits.end()) {
+ FileEdit &FA = I->second;
+ FileOffset B = I->first;
+ FileOffset E = B.getWithOffset(FA.RemoveLen);
+
+ if (B >= TopEnd)
+ break;
+
+ if (E <= TopEnd) {
+ FileEdits.erase(I++);
+ continue;
+ }
+
+ if (B < TopEnd) {
+ unsigned diff = E.getOffset() - TopEnd.getOffset();
+ TopEnd = E;
+ TopFA->RemoveLen += diff;
+ FileEdits.erase(I);
+ }
+
+ break;
+ }
+}
+
+bool EditedSource::commit(const Commit &commit) {
+ if (!commit.isCommitable())
+ return false;
+
+ struct CommitRAII {
+ EditedSource &Editor;
+ CommitRAII(EditedSource &Editor) : Editor(Editor) {
+ Editor.startingCommit();
+ }
+ ~CommitRAII() {
+ Editor.finishedCommit();
+ }
+ } CommitRAII(*this);
+
+ for (edit::Commit::edit_iterator
+ I = commit.edit_begin(), E = commit.edit_end(); I != E; ++I) {
+ const edit::Commit::Edit &edit = *I;
+ switch (edit.Kind) {
+ case edit::Commit::Act_Insert:
+ commitInsert(edit.OrigLoc, edit.Offset, edit.Text, edit.BeforePrev);
+ break;
+ case edit::Commit::Act_InsertFromRange:
+ commitInsertFromRange(edit.OrigLoc, edit.Offset,
+ edit.InsertFromRangeOffs, edit.Length,
+ edit.BeforePrev);
+ break;
+ case edit::Commit::Act_Remove:
+ commitRemove(edit.OrigLoc, edit.Offset, edit.Length);
+ break;
+ }
+ }
+
+ return true;
+}
+
+// \brief Returns true if it is ok to make the two given characters adjacent.
+static bool canBeJoined(char left, char right, const LangOptions &LangOpts) {
+ // FIXME: Should use TokenConcatenation to make sure we don't allow stuff like
+ // making two '<' adjacent.
+ return !(Lexer::isIdentifierBodyChar(left, LangOpts) &&
+ Lexer::isIdentifierBodyChar(right, LangOpts));
+}
+
+/// \brief Returns true if it is ok to eliminate the trailing whitespace between
+/// the given characters.
+static bool canRemoveWhitespace(char left, char beforeWSpace, char right,
+ const LangOptions &LangOpts) {
+ if (!canBeJoined(left, right, LangOpts))
+ return false;
+ if (isWhitespace(left) || isWhitespace(right))
+ return true;
+ if (canBeJoined(beforeWSpace, right, LangOpts))
+ return false; // the whitespace was intentional, keep it.
+ return true;
+}
+
+/// \brief Check the range that we are going to remove and:
+/// -Remove any trailing whitespace if possible.
+/// -Insert a space if removing the range is going to mess up the source tokens.
+static void adjustRemoval(const SourceManager &SM, const LangOptions &LangOpts,
+ SourceLocation Loc, FileOffset offs,
+ unsigned &len, StringRef &text) {
+ assert(len && text.empty());
+ SourceLocation BeginTokLoc = Lexer::GetBeginningOfToken(Loc, SM, LangOpts);
+ if (BeginTokLoc != Loc)
+ return; // the range is not at the beginning of a token, keep the range.
+
+ bool Invalid = false;
+ StringRef buffer = SM.getBufferData(offs.getFID(), &Invalid);
+ if (Invalid)
+ return;
+
+ unsigned begin = offs.getOffset();
+ unsigned end = begin + len;
+
+ // Do not try to extend the removal if we're at the end of the buffer already.
+ if (end == buffer.size())
+ return;
+
+ assert(begin < buffer.size() && end < buffer.size() && "Invalid range!");
+
+ // FIXME: Remove newline.
+
+ if (begin == 0) {
+ if (buffer[end] == ' ')
+ ++len;
+ return;
+ }
+
+ if (buffer[end] == ' ') {
+ assert((end + 1 != buffer.size() || buffer.data()[end + 1] == 0) &&
+ "buffer not zero-terminated!");
+ if (canRemoveWhitespace(/*left=*/buffer[begin-1],
+ /*beforeWSpace=*/buffer[end-1],
+ /*right=*/buffer.data()[end + 1], // zero-terminated
+ LangOpts))
+ ++len;
+ return;
+ }
+
+ if (!canBeJoined(buffer[begin-1], buffer[end], LangOpts))
+ text = " ";
+}
+
+static void applyRewrite(EditsReceiver &receiver,
+ StringRef text, FileOffset offs, unsigned len,
+ const SourceManager &SM, const LangOptions &LangOpts) {
+ assert(offs.getFID().isValid());
+ SourceLocation Loc = SM.getLocForStartOfFile(offs.getFID());
+ Loc = Loc.getLocWithOffset(offs.getOffset());
+ assert(Loc.isFileID());
+
+ if (text.empty())
+ adjustRemoval(SM, LangOpts, Loc, offs, len, text);
+
+ CharSourceRange range = CharSourceRange::getCharRange(Loc,
+ Loc.getLocWithOffset(len));
+
+ if (text.empty()) {
+ assert(len);
+ receiver.remove(range);
+ return;
+ }
+
+ if (len)
+ receiver.replace(range, text);
+ else
+ receiver.insert(Loc, text);
+}
+
+void EditedSource::applyRewrites(EditsReceiver &receiver) {
+ SmallString<128> StrVec;
+ FileOffset CurOffs, CurEnd;
+ unsigned CurLen;
+
+ if (FileEdits.empty())
+ return;
+
+ FileEditsTy::iterator I = FileEdits.begin();
+ CurOffs = I->first;
+ StrVec = I->second.Text;
+ CurLen = I->second.RemoveLen;
+ CurEnd = CurOffs.getWithOffset(CurLen);
+ ++I;
+
+ for (FileEditsTy::iterator E = FileEdits.end(); I != E; ++I) {
+ FileOffset offs = I->first;
+ FileEdit act = I->second;
+ assert(offs >= CurEnd);
+
+ if (offs == CurEnd) {
+ StrVec += act.Text;
+ CurLen += act.RemoveLen;
+ CurEnd.getWithOffset(act.RemoveLen);
+ continue;
+ }
+
+ applyRewrite(receiver, StrVec, CurOffs, CurLen, SourceMgr, LangOpts);
+ CurOffs = offs;
+ StrVec = act.Text;
+ CurLen = act.RemoveLen;
+ CurEnd = CurOffs.getWithOffset(CurLen);
+ }
+
+ applyRewrite(receiver, StrVec, CurOffs, CurLen, SourceMgr, LangOpts);
+}
+
+void EditedSource::clearRewrites() {
+ FileEdits.clear();
+ StrAlloc.Reset();
+}
+
+StringRef EditedSource::getSourceText(FileOffset BeginOffs, FileOffset EndOffs,
+ bool &Invalid) {
+ assert(BeginOffs.getFID() == EndOffs.getFID());
+ assert(BeginOffs <= EndOffs);
+ SourceLocation BLoc = SourceMgr.getLocForStartOfFile(BeginOffs.getFID());
+ BLoc = BLoc.getLocWithOffset(BeginOffs.getOffset());
+ assert(BLoc.isFileID());
+ SourceLocation
+ ELoc = BLoc.getLocWithOffset(EndOffs.getOffset() - BeginOffs.getOffset());
+ return Lexer::getSourceText(CharSourceRange::getCharRange(BLoc, ELoc),
+ SourceMgr, LangOpts, &Invalid);
+}
+
+EditedSource::FileEditsTy::iterator
+EditedSource::getActionForOffset(FileOffset Offs) {
+ FileEditsTy::iterator I = FileEdits.upper_bound(Offs);
+ if (I == FileEdits.begin())
+ return FileEdits.end();
+ --I;
+ FileEdit &FA = I->second;
+ FileOffset B = I->first;
+ FileOffset E = B.getWithOffset(FA.RemoveLen);
+ if (Offs >= B && Offs < E)
+ return I;
+
+ return FileEdits.end();
+}
diff --git a/contrib/llvm/tools/clang/lib/Edit/RewriteObjCFoundationAPI.cpp b/contrib/llvm/tools/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
new file mode 100644
index 0000000..9f71168
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
@@ -0,0 +1,1170 @@
+//===--- RewriteObjCFoundationAPI.cpp - Foundation API Rewriter -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Rewrites legacy method calls to modern syntax.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Edit/Rewriters.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/NSAPI.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Edit/Commit.h"
+#include "clang/Lex/Lexer.h"
+
+using namespace clang;
+using namespace edit;
+
+static bool checkForLiteralCreation(const ObjCMessageExpr *Msg,
+ IdentifierInfo *&ClassId,
+ const LangOptions &LangOpts) {
+ if (!Msg || Msg->isImplicit() || !Msg->getMethodDecl())
+ return false;
+
+ const ObjCInterfaceDecl *Receiver = Msg->getReceiverInterface();
+ if (!Receiver)
+ return false;
+ ClassId = Receiver->getIdentifier();
+
+ if (Msg->getReceiverKind() == ObjCMessageExpr::Class)
+ return true;
+
+ // When in ARC mode we also convert "[[.. alloc] init]" messages to literals,
+ // since the change from +1 to +0 will be handled fine by ARC.
+ if (LangOpts.ObjCAutoRefCount) {
+ if (Msg->getReceiverKind() == ObjCMessageExpr::Instance) {
+ if (const ObjCMessageExpr *Rec = dyn_cast<ObjCMessageExpr>(
+ Msg->getInstanceReceiver()->IgnoreParenImpCasts())) {
+ if (Rec->getMethodFamily() == OMF_alloc)
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// rewriteObjCRedundantCallWithLiteral.
+//===----------------------------------------------------------------------===//
+
+bool edit::rewriteObjCRedundantCallWithLiteral(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit) {
+ IdentifierInfo *II = nullptr;
+ if (!checkForLiteralCreation(Msg, II, NS.getASTContext().getLangOpts()))
+ return false;
+ if (Msg->getNumArgs() != 1)
+ return false;
+
+ const Expr *Arg = Msg->getArg(0)->IgnoreParenImpCasts();
+ Selector Sel = Msg->getSelector();
+
+ if ((isa<ObjCStringLiteral>(Arg) &&
+ NS.getNSClassId(NSAPI::ClassId_NSString) == II &&
+ (NS.getNSStringSelector(NSAPI::NSStr_stringWithString) == Sel ||
+ NS.getNSStringSelector(NSAPI::NSStr_initWithString) == Sel)) ||
+
+ (isa<ObjCArrayLiteral>(Arg) &&
+ NS.getNSClassId(NSAPI::ClassId_NSArray) == II &&
+ (NS.getNSArraySelector(NSAPI::NSArr_arrayWithArray) == Sel ||
+ NS.getNSArraySelector(NSAPI::NSArr_initWithArray) == Sel)) ||
+
+ (isa<ObjCDictionaryLiteral>(Arg) &&
+ NS.getNSClassId(NSAPI::ClassId_NSDictionary) == II &&
+ (NS.getNSDictionarySelector(
+ NSAPI::NSDict_dictionaryWithDictionary) == Sel ||
+ NS.getNSDictionarySelector(NSAPI::NSDict_initWithDictionary) == Sel))) {
+
+ commit.replaceWithInner(Msg->getSourceRange(),
+ Msg->getArg(0)->getSourceRange());
+ return true;
+ }
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// rewriteToObjCSubscriptSyntax.
+//===----------------------------------------------------------------------===//
+
+/// \brief Check for classes that accept 'objectForKey:' (or the other selectors
+/// that the migrator handles) but return their instances as 'id', resulting
+/// in the compiler resolving 'objectForKey:' as the method from NSDictionary.
+///
+/// When checking if we can convert to subscripting syntax, check whether
+/// the receiver is a result of a class method from a hardcoded list of
+/// such classes. In such a case return the specific class as the interface
+/// of the receiver.
+///
+/// FIXME: Remove this when these classes start using 'instancetype'.
+static const ObjCInterfaceDecl *
+maybeAdjustInterfaceForSubscriptingCheck(const ObjCInterfaceDecl *IFace,
+ const Expr *Receiver,
+ ASTContext &Ctx) {
+ assert(IFace && Receiver);
+
+ // If the receiver has type 'id'...
+ if (!Ctx.isObjCIdType(Receiver->getType().getUnqualifiedType()))
+ return IFace;
+
+ const ObjCMessageExpr *
+ InnerMsg = dyn_cast<ObjCMessageExpr>(Receiver->IgnoreParenCasts());
+ if (!InnerMsg)
+ return IFace;
+
+ QualType ClassRec;
+ switch (InnerMsg->getReceiverKind()) {
+ case ObjCMessageExpr::Instance:
+ case ObjCMessageExpr::SuperInstance:
+ return IFace;
+
+ case ObjCMessageExpr::Class:
+ ClassRec = InnerMsg->getClassReceiver();
+ break;
+ case ObjCMessageExpr::SuperClass:
+ ClassRec = InnerMsg->getSuperType();
+ break;
+ }
+
+ if (ClassRec.isNull())
+ return IFace;
+
+ // ...and it is the result of a class message...
+
+ const ObjCObjectType *ObjTy = ClassRec->getAs<ObjCObjectType>();
+ if (!ObjTy)
+ return IFace;
+ const ObjCInterfaceDecl *OID = ObjTy->getInterface();
+
+ // ...and the receiving class is NSMapTable or NSLocale, return that
+ // class as the receiving interface.
+ if (OID->getName() == "NSMapTable" ||
+ OID->getName() == "NSLocale")
+ return OID;
+
+ return IFace;
+}
+
+static bool canRewriteToSubscriptSyntax(const ObjCInterfaceDecl *&IFace,
+ const ObjCMessageExpr *Msg,
+ ASTContext &Ctx,
+ Selector subscriptSel) {
+ const Expr *Rec = Msg->getInstanceReceiver();
+ if (!Rec)
+ return false;
+ IFace = maybeAdjustInterfaceForSubscriptingCheck(IFace, Rec, Ctx);
+
+ if (const ObjCMethodDecl *MD = IFace->lookupInstanceMethod(subscriptSel)) {
+ if (!MD->isUnavailable())
+ return true;
+ }
+ return false;
+}
+
+static bool subscriptOperatorNeedsParens(const Expr *FullExpr);
+
+static void maybePutParensOnReceiver(const Expr *Receiver, Commit &commit) {
+ if (subscriptOperatorNeedsParens(Receiver)) {
+ SourceRange RecRange = Receiver->getSourceRange();
+ commit.insertWrap("(", RecRange, ")");
+ }
+}
+
+static bool rewriteToSubscriptGetCommon(const ObjCMessageExpr *Msg,
+ Commit &commit) {
+ if (Msg->getNumArgs() != 1)
+ return false;
+ const Expr *Rec = Msg->getInstanceReceiver();
+ if (!Rec)
+ return false;
+
+ SourceRange MsgRange = Msg->getSourceRange();
+ SourceRange RecRange = Rec->getSourceRange();
+ SourceRange ArgRange = Msg->getArg(0)->getSourceRange();
+
+ commit.replaceWithInner(CharSourceRange::getCharRange(MsgRange.getBegin(),
+ ArgRange.getBegin()),
+ CharSourceRange::getTokenRange(RecRange));
+ commit.replaceWithInner(SourceRange(ArgRange.getBegin(), MsgRange.getEnd()),
+ ArgRange);
+ commit.insertWrap("[", ArgRange, "]");
+ maybePutParensOnReceiver(Rec, commit);
+ return true;
+}
+
+static bool rewriteToArraySubscriptGet(const ObjCInterfaceDecl *IFace,
+ const ObjCMessageExpr *Msg,
+ const NSAPI &NS,
+ Commit &commit) {
+ if (!canRewriteToSubscriptSyntax(IFace, Msg, NS.getASTContext(),
+ NS.getObjectAtIndexedSubscriptSelector()))
+ return false;
+ return rewriteToSubscriptGetCommon(Msg, commit);
+}
+
+static bool rewriteToDictionarySubscriptGet(const ObjCInterfaceDecl *IFace,
+ const ObjCMessageExpr *Msg,
+ const NSAPI &NS,
+ Commit &commit) {
+ if (!canRewriteToSubscriptSyntax(IFace, Msg, NS.getASTContext(),
+ NS.getObjectForKeyedSubscriptSelector()))
+ return false;
+ return rewriteToSubscriptGetCommon(Msg, commit);
+}
+
+static bool rewriteToArraySubscriptSet(const ObjCInterfaceDecl *IFace,
+ const ObjCMessageExpr *Msg,
+ const NSAPI &NS,
+ Commit &commit) {
+ if (!canRewriteToSubscriptSyntax(IFace, Msg, NS.getASTContext(),
+ NS.getSetObjectAtIndexedSubscriptSelector()))
+ return false;
+
+ if (Msg->getNumArgs() != 2)
+ return false;
+ const Expr *Rec = Msg->getInstanceReceiver();
+ if (!Rec)
+ return false;
+
+ SourceRange MsgRange = Msg->getSourceRange();
+ SourceRange RecRange = Rec->getSourceRange();
+ SourceRange Arg0Range = Msg->getArg(0)->getSourceRange();
+ SourceRange Arg1Range = Msg->getArg(1)->getSourceRange();
+
+ commit.replaceWithInner(CharSourceRange::getCharRange(MsgRange.getBegin(),
+ Arg0Range.getBegin()),
+ CharSourceRange::getTokenRange(RecRange));
+ commit.replaceWithInner(CharSourceRange::getCharRange(Arg0Range.getBegin(),
+ Arg1Range.getBegin()),
+ CharSourceRange::getTokenRange(Arg0Range));
+ commit.replaceWithInner(SourceRange(Arg1Range.getBegin(), MsgRange.getEnd()),
+ Arg1Range);
+ commit.insertWrap("[", CharSourceRange::getCharRange(Arg0Range.getBegin(),
+ Arg1Range.getBegin()),
+ "] = ");
+ maybePutParensOnReceiver(Rec, commit);
+ return true;
+}
+
+static bool rewriteToDictionarySubscriptSet(const ObjCInterfaceDecl *IFace,
+ const ObjCMessageExpr *Msg,
+ const NSAPI &NS,
+ Commit &commit) {
+ if (!canRewriteToSubscriptSyntax(IFace, Msg, NS.getASTContext(),
+ NS.getSetObjectForKeyedSubscriptSelector()))
+ return false;
+
+ if (Msg->getNumArgs() != 2)
+ return false;
+ const Expr *Rec = Msg->getInstanceReceiver();
+ if (!Rec)
+ return false;
+
+ SourceRange MsgRange = Msg->getSourceRange();
+ SourceRange RecRange = Rec->getSourceRange();
+ SourceRange Arg0Range = Msg->getArg(0)->getSourceRange();
+ SourceRange Arg1Range = Msg->getArg(1)->getSourceRange();
+
+ SourceLocation LocBeforeVal = Arg0Range.getBegin();
+ commit.insertBefore(LocBeforeVal, "] = ");
+ commit.insertFromRange(LocBeforeVal, Arg1Range, /*afterToken=*/false,
+ /*beforePreviousInsertions=*/true);
+ commit.insertBefore(LocBeforeVal, "[");
+ commit.replaceWithInner(CharSourceRange::getCharRange(MsgRange.getBegin(),
+ Arg0Range.getBegin()),
+ CharSourceRange::getTokenRange(RecRange));
+ commit.replaceWithInner(SourceRange(Arg0Range.getBegin(), MsgRange.getEnd()),
+ Arg0Range);
+ maybePutParensOnReceiver(Rec, commit);
+ return true;
+}
+
+bool edit::rewriteToObjCSubscriptSyntax(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit) {
+ if (!Msg || Msg->isImplicit() ||
+ Msg->getReceiverKind() != ObjCMessageExpr::Instance)
+ return false;
+ const ObjCMethodDecl *Method = Msg->getMethodDecl();
+ if (!Method)
+ return false;
+
+ const ObjCInterfaceDecl *IFace =
+ NS.getASTContext().getObjContainingInterface(Method);
+ if (!IFace)
+ return false;
+ Selector Sel = Msg->getSelector();
+
+ if (Sel == NS.getNSArraySelector(NSAPI::NSArr_objectAtIndex))
+ return rewriteToArraySubscriptGet(IFace, Msg, NS, commit);
+
+ if (Sel == NS.getNSDictionarySelector(NSAPI::NSDict_objectForKey))
+ return rewriteToDictionarySubscriptGet(IFace, Msg, NS, commit);
+
+ if (Msg->getNumArgs() != 2)
+ return false;
+
+ if (Sel == NS.getNSArraySelector(NSAPI::NSMutableArr_replaceObjectAtIndex))
+ return rewriteToArraySubscriptSet(IFace, Msg, NS, commit);
+
+ if (Sel == NS.getNSDictionarySelector(NSAPI::NSMutableDict_setObjectForKey))
+ return rewriteToDictionarySubscriptSet(IFace, Msg, NS, commit);
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// rewriteToObjCLiteralSyntax.
+//===----------------------------------------------------------------------===//
+
+static bool rewriteToArrayLiteral(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit,
+ const ParentMap *PMap);
+static bool rewriteToDictionaryLiteral(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit);
+static bool rewriteToNumberLiteral(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit);
+static bool rewriteToNumericBoxedExpression(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit);
+static bool rewriteToStringBoxedExpression(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit);
+
+bool edit::rewriteToObjCLiteralSyntax(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit,
+ const ParentMap *PMap) {
+ IdentifierInfo *II = nullptr;
+ if (!checkForLiteralCreation(Msg, II, NS.getASTContext().getLangOpts()))
+ return false;
+
+ if (II == NS.getNSClassId(NSAPI::ClassId_NSArray))
+ return rewriteToArrayLiteral(Msg, NS, commit, PMap);
+ if (II == NS.getNSClassId(NSAPI::ClassId_NSDictionary))
+ return rewriteToDictionaryLiteral(Msg, NS, commit);
+ if (II == NS.getNSClassId(NSAPI::ClassId_NSNumber))
+ return rewriteToNumberLiteral(Msg, NS, commit);
+ if (II == NS.getNSClassId(NSAPI::ClassId_NSString))
+ return rewriteToStringBoxedExpression(Msg, NS, commit);
+
+ return false;
+}
+
+/// \brief Returns true if the immediate message arguments of \c Msg should not
+/// be rewritten because it will interfere with the rewrite of the parent
+/// message expression. e.g.
+/// \code
+/// [NSDictionary dictionaryWithObjects:
+/// [NSArray arrayWithObjects:@"1", @"2", nil]
+/// forKeys:[NSArray arrayWithObjects:@"A", @"B", nil]];
+/// \endcode
+/// It will return true for this because we are going to rewrite this directly
+/// to a dictionary literal without any array literals.
+static bool shouldNotRewriteImmediateMessageArgs(const ObjCMessageExpr *Msg,
+ const NSAPI &NS);
+
+//===----------------------------------------------------------------------===//
+// rewriteToArrayLiteral.
+//===----------------------------------------------------------------------===//
+
+/// \brief Adds an explicit cast to 'id' if the type is not objc object.
+static void objectifyExpr(const Expr *E, Commit &commit);
+
+static bool rewriteToArrayLiteral(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit,
+ const ParentMap *PMap) {
+ if (PMap) {
+ const ObjCMessageExpr *ParentMsg =
+ dyn_cast_or_null<ObjCMessageExpr>(PMap->getParentIgnoreParenCasts(Msg));
+ if (shouldNotRewriteImmediateMessageArgs(ParentMsg, NS))
+ return false;
+ }
+
+ Selector Sel = Msg->getSelector();
+ SourceRange MsgRange = Msg->getSourceRange();
+
+ if (Sel == NS.getNSArraySelector(NSAPI::NSArr_array)) {
+ if (Msg->getNumArgs() != 0)
+ return false;
+ commit.replace(MsgRange, "@[]");
+ return true;
+ }
+
+ if (Sel == NS.getNSArraySelector(NSAPI::NSArr_arrayWithObject)) {
+ if (Msg->getNumArgs() != 1)
+ return false;
+ objectifyExpr(Msg->getArg(0), commit);
+ SourceRange ArgRange = Msg->getArg(0)->getSourceRange();
+ commit.replaceWithInner(MsgRange, ArgRange);
+ commit.insertWrap("@[", ArgRange, "]");
+ return true;
+ }
+
+ if (Sel == NS.getNSArraySelector(NSAPI::NSArr_arrayWithObjects) ||
+ Sel == NS.getNSArraySelector(NSAPI::NSArr_initWithObjects)) {
+ if (Msg->getNumArgs() == 0)
+ return false;
+ const Expr *SentinelExpr = Msg->getArg(Msg->getNumArgs() - 1);
+ if (!NS.getASTContext().isSentinelNullExpr(SentinelExpr))
+ return false;
+
+ for (unsigned i = 0, e = Msg->getNumArgs() - 1; i != e; ++i)
+ objectifyExpr(Msg->getArg(i), commit);
+
+ if (Msg->getNumArgs() == 1) {
+ commit.replace(MsgRange, "@[]");
+ return true;
+ }
+ SourceRange ArgRange(Msg->getArg(0)->getLocStart(),
+ Msg->getArg(Msg->getNumArgs()-2)->getLocEnd());
+ commit.replaceWithInner(MsgRange, ArgRange);
+ commit.insertWrap("@[", ArgRange, "]");
+ return true;
+ }
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// rewriteToDictionaryLiteral.
+//===----------------------------------------------------------------------===//
+
+/// \brief If \c Msg is an NSArray creation message or literal, this gets the
+/// objects that were used to create it.
+/// \returns true if it is an NSArray and we got objects, or false otherwise.
+static bool getNSArrayObjects(const Expr *E, const NSAPI &NS,
+ SmallVectorImpl<const Expr *> &Objs) {
+ if (!E)
+ return false;
+
+ E = E->IgnoreParenCasts();
+ if (!E)
+ return false;
+
+ if (const ObjCMessageExpr *Msg = dyn_cast<ObjCMessageExpr>(E)) {
+ IdentifierInfo *Cls = nullptr;
+ if (!checkForLiteralCreation(Msg, Cls, NS.getASTContext().getLangOpts()))
+ return false;
+
+ if (Cls != NS.getNSClassId(NSAPI::ClassId_NSArray))
+ return false;
+
+ Selector Sel = Msg->getSelector();
+ if (Sel == NS.getNSArraySelector(NSAPI::NSArr_array))
+ return true; // empty array.
+
+ if (Sel == NS.getNSArraySelector(NSAPI::NSArr_arrayWithObject)) {
+ if (Msg->getNumArgs() != 1)
+ return false;
+ Objs.push_back(Msg->getArg(0));
+ return true;
+ }
+
+ if (Sel == NS.getNSArraySelector(NSAPI::NSArr_arrayWithObjects) ||
+ Sel == NS.getNSArraySelector(NSAPI::NSArr_initWithObjects)) {
+ if (Msg->getNumArgs() == 0)
+ return false;
+ const Expr *SentinelExpr = Msg->getArg(Msg->getNumArgs() - 1);
+ if (!NS.getASTContext().isSentinelNullExpr(SentinelExpr))
+ return false;
+
+ for (unsigned i = 0, e = Msg->getNumArgs() - 1; i != e; ++i)
+ Objs.push_back(Msg->getArg(i));
+ return true;
+ }
+
+ } else if (const ObjCArrayLiteral *ArrLit = dyn_cast<ObjCArrayLiteral>(E)) {
+ for (unsigned i = 0, e = ArrLit->getNumElements(); i != e; ++i)
+ Objs.push_back(ArrLit->getElement(i));
+ return true;
+ }
+
+ return false;
+}
+
+static bool rewriteToDictionaryLiteral(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit) {
+ Selector Sel = Msg->getSelector();
+ SourceRange MsgRange = Msg->getSourceRange();
+
+ if (Sel == NS.getNSDictionarySelector(NSAPI::NSDict_dictionary)) {
+ if (Msg->getNumArgs() != 0)
+ return false;
+ commit.replace(MsgRange, "@{}");
+ return true;
+ }
+
+ if (Sel == NS.getNSDictionarySelector(
+ NSAPI::NSDict_dictionaryWithObjectForKey)) {
+ if (Msg->getNumArgs() != 2)
+ return false;
+
+ objectifyExpr(Msg->getArg(0), commit);
+ objectifyExpr(Msg->getArg(1), commit);
+
+ SourceRange ValRange = Msg->getArg(0)->getSourceRange();
+ SourceRange KeyRange = Msg->getArg(1)->getSourceRange();
+ // Insert key before the value.
+ commit.insertBefore(ValRange.getBegin(), ": ");
+ commit.insertFromRange(ValRange.getBegin(),
+ CharSourceRange::getTokenRange(KeyRange),
+ /*afterToken=*/false, /*beforePreviousInsertions=*/true);
+ commit.insertBefore(ValRange.getBegin(), "@{");
+ commit.insertAfterToken(ValRange.getEnd(), "}");
+ commit.replaceWithInner(MsgRange, ValRange);
+ return true;
+ }
+
+ if (Sel == NS.getNSDictionarySelector(
+ NSAPI::NSDict_dictionaryWithObjectsAndKeys) ||
+ Sel == NS.getNSDictionarySelector(NSAPI::NSDict_initWithObjectsAndKeys)) {
+ if (Msg->getNumArgs() % 2 != 1)
+ return false;
+ unsigned SentinelIdx = Msg->getNumArgs() - 1;
+ const Expr *SentinelExpr = Msg->getArg(SentinelIdx);
+ if (!NS.getASTContext().isSentinelNullExpr(SentinelExpr))
+ return false;
+
+ if (Msg->getNumArgs() == 1) {
+ commit.replace(MsgRange, "@{}");
+ return true;
+ }
+
+ for (unsigned i = 0; i < SentinelIdx; i += 2) {
+ objectifyExpr(Msg->getArg(i), commit);
+ objectifyExpr(Msg->getArg(i+1), commit);
+
+ SourceRange ValRange = Msg->getArg(i)->getSourceRange();
+ SourceRange KeyRange = Msg->getArg(i+1)->getSourceRange();
+ // Insert value after key.
+ commit.insertAfterToken(KeyRange.getEnd(), ": ");
+ commit.insertFromRange(KeyRange.getEnd(), ValRange, /*afterToken=*/true);
+ commit.remove(CharSourceRange::getCharRange(ValRange.getBegin(),
+ KeyRange.getBegin()));
+ }
+ // Range of arguments up until and including the last key.
+ // The sentinel and first value are cut off, the value will move after the
+ // key.
+ SourceRange ArgRange(Msg->getArg(1)->getLocStart(),
+ Msg->getArg(SentinelIdx-1)->getLocEnd());
+ commit.insertWrap("@{", ArgRange, "}");
+ commit.replaceWithInner(MsgRange, ArgRange);
+ return true;
+ }
+
+ if (Sel == NS.getNSDictionarySelector(
+ NSAPI::NSDict_dictionaryWithObjectsForKeys) ||
+ Sel == NS.getNSDictionarySelector(NSAPI::NSDict_initWithObjectsForKeys)) {
+ if (Msg->getNumArgs() != 2)
+ return false;
+
+ SmallVector<const Expr *, 8> Vals;
+ if (!getNSArrayObjects(Msg->getArg(0), NS, Vals))
+ return false;
+
+ SmallVector<const Expr *, 8> Keys;
+ if (!getNSArrayObjects(Msg->getArg(1), NS, Keys))
+ return false;
+
+ if (Vals.size() != Keys.size())
+ return false;
+
+ if (Vals.empty()) {
+ commit.replace(MsgRange, "@{}");
+ return true;
+ }
+
+ for (unsigned i = 0, n = Vals.size(); i < n; ++i) {
+ objectifyExpr(Vals[i], commit);
+ objectifyExpr(Keys[i], commit);
+
+ SourceRange ValRange = Vals[i]->getSourceRange();
+ SourceRange KeyRange = Keys[i]->getSourceRange();
+ // Insert value after key.
+ commit.insertAfterToken(KeyRange.getEnd(), ": ");
+ commit.insertFromRange(KeyRange.getEnd(), ValRange, /*afterToken=*/true);
+ }
+ // Range of arguments up until and including the last key.
+ // The first value is cut off, the value will move after the key.
+ SourceRange ArgRange(Keys.front()->getLocStart(),
+ Keys.back()->getLocEnd());
+ commit.insertWrap("@{", ArgRange, "}");
+ commit.replaceWithInner(MsgRange, ArgRange);
+ return true;
+ }
+
+ return false;
+}
+
+static bool shouldNotRewriteImmediateMessageArgs(const ObjCMessageExpr *Msg,
+ const NSAPI &NS) {
+ if (!Msg)
+ return false;
+
+ IdentifierInfo *II = nullptr;
+ if (!checkForLiteralCreation(Msg, II, NS.getASTContext().getLangOpts()))
+ return false;
+
+ if (II != NS.getNSClassId(NSAPI::ClassId_NSDictionary))
+ return false;
+
+ Selector Sel = Msg->getSelector();
+ if (Sel == NS.getNSDictionarySelector(
+ NSAPI::NSDict_dictionaryWithObjectsForKeys) ||
+ Sel == NS.getNSDictionarySelector(NSAPI::NSDict_initWithObjectsForKeys)) {
+ if (Msg->getNumArgs() != 2)
+ return false;
+
+ SmallVector<const Expr *, 8> Vals;
+ if (!getNSArrayObjects(Msg->getArg(0), NS, Vals))
+ return false;
+
+ SmallVector<const Expr *, 8> Keys;
+ if (!getNSArrayObjects(Msg->getArg(1), NS, Keys))
+ return false;
+
+ if (Vals.size() != Keys.size())
+ return false;
+
+ return true;
+ }
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// rewriteToNumberLiteral.
+//===----------------------------------------------------------------------===//
+
+static bool rewriteToCharLiteral(const ObjCMessageExpr *Msg,
+ const CharacterLiteral *Arg,
+ const NSAPI &NS, Commit &commit) {
+ if (Arg->getKind() != CharacterLiteral::Ascii)
+ return false;
+ if (NS.isNSNumberLiteralSelector(NSAPI::NSNumberWithChar,
+ Msg->getSelector())) {
+ SourceRange ArgRange = Arg->getSourceRange();
+ commit.replaceWithInner(Msg->getSourceRange(), ArgRange);
+ commit.insert(ArgRange.getBegin(), "@");
+ return true;
+ }
+
+ return rewriteToNumericBoxedExpression(Msg, NS, commit);
+}
+
+static bool rewriteToBoolLiteral(const ObjCMessageExpr *Msg,
+ const Expr *Arg,
+ const NSAPI &NS, Commit &commit) {
+ if (NS.isNSNumberLiteralSelector(NSAPI::NSNumberWithBool,
+ Msg->getSelector())) {
+ SourceRange ArgRange = Arg->getSourceRange();
+ commit.replaceWithInner(Msg->getSourceRange(), ArgRange);
+ commit.insert(ArgRange.getBegin(), "@");
+ return true;
+ }
+
+ return rewriteToNumericBoxedExpression(Msg, NS, commit);
+}
+
+namespace {
+
+struct LiteralInfo {
+ bool Hex, Octal;
+ StringRef U, F, L, LL;
+ CharSourceRange WithoutSuffRange;
+};
+
+}
+
+static bool getLiteralInfo(SourceRange literalRange,
+ bool isFloat, bool isIntZero,
+ ASTContext &Ctx, LiteralInfo &Info) {
+ if (literalRange.getBegin().isMacroID() ||
+ literalRange.getEnd().isMacroID())
+ return false;
+ StringRef text = Lexer::getSourceText(
+ CharSourceRange::getTokenRange(literalRange),
+ Ctx.getSourceManager(), Ctx.getLangOpts());
+ if (text.empty())
+ return false;
+
+ Optional<bool> UpperU, UpperL;
+ bool UpperF = false;
+
+ struct Suff {
+ static bool has(StringRef suff, StringRef &text) {
+ if (text.endswith(suff)) {
+ text = text.substr(0, text.size()-suff.size());
+ return true;
+ }
+ return false;
+ }
+ };
+
+ while (1) {
+ if (Suff::has("u", text)) {
+ UpperU = false;
+ } else if (Suff::has("U", text)) {
+ UpperU = true;
+ } else if (Suff::has("ll", text)) {
+ UpperL = false;
+ } else if (Suff::has("LL", text)) {
+ UpperL = true;
+ } else if (Suff::has("l", text)) {
+ UpperL = false;
+ } else if (Suff::has("L", text)) {
+ UpperL = true;
+ } else if (isFloat && Suff::has("f", text)) {
+ UpperF = false;
+ } else if (isFloat && Suff::has("F", text)) {
+ UpperF = true;
+ } else
+ break;
+ }
+
+ if (!UpperU.hasValue() && !UpperL.hasValue())
+ UpperU = UpperL = true;
+ else if (UpperU.hasValue() && !UpperL.hasValue())
+ UpperL = UpperU;
+ else if (UpperL.hasValue() && !UpperU.hasValue())
+ UpperU = UpperL;
+
+ Info.U = *UpperU ? "U" : "u";
+ Info.L = *UpperL ? "L" : "l";
+ Info.LL = *UpperL ? "LL" : "ll";
+ Info.F = UpperF ? "F" : "f";
+
+ Info.Hex = Info.Octal = false;
+ if (text.startswith("0x"))
+ Info.Hex = true;
+ else if (!isFloat && !isIntZero && text.startswith("0"))
+ Info.Octal = true;
+
+ SourceLocation B = literalRange.getBegin();
+ Info.WithoutSuffRange =
+ CharSourceRange::getCharRange(B, B.getLocWithOffset(text.size()));
+ return true;
+}
+
+static bool rewriteToNumberLiteral(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit) {
+ if (Msg->getNumArgs() != 1)
+ return false;
+
+ const Expr *Arg = Msg->getArg(0)->IgnoreParenImpCasts();
+ if (const CharacterLiteral *CharE = dyn_cast<CharacterLiteral>(Arg))
+ return rewriteToCharLiteral(Msg, CharE, NS, commit);
+ if (const ObjCBoolLiteralExpr *BE = dyn_cast<ObjCBoolLiteralExpr>(Arg))
+ return rewriteToBoolLiteral(Msg, BE, NS, commit);
+ if (const CXXBoolLiteralExpr *BE = dyn_cast<CXXBoolLiteralExpr>(Arg))
+ return rewriteToBoolLiteral(Msg, BE, NS, commit);
+
+ const Expr *literalE = Arg;
+ if (const UnaryOperator *UOE = dyn_cast<UnaryOperator>(literalE)) {
+ if (UOE->getOpcode() == UO_Plus || UOE->getOpcode() == UO_Minus)
+ literalE = UOE->getSubExpr();
+ }
+
+ // Only integer and floating literals, otherwise try to rewrite to boxed
+ // expression.
+ if (!isa<IntegerLiteral>(literalE) && !isa<FloatingLiteral>(literalE))
+ return rewriteToNumericBoxedExpression(Msg, NS, commit);
+
+ ASTContext &Ctx = NS.getASTContext();
+ Selector Sel = Msg->getSelector();
+ Optional<NSAPI::NSNumberLiteralMethodKind>
+ MKOpt = NS.getNSNumberLiteralMethodKind(Sel);
+ if (!MKOpt)
+ return false;
+ NSAPI::NSNumberLiteralMethodKind MK = *MKOpt;
+
+ bool CallIsUnsigned = false, CallIsLong = false, CallIsLongLong = false;
+ bool CallIsFloating = false, CallIsDouble = false;
+
+ switch (MK) {
+ // We cannot have these calls with int/float literals.
+ case NSAPI::NSNumberWithChar:
+ case NSAPI::NSNumberWithUnsignedChar:
+ case NSAPI::NSNumberWithShort:
+ case NSAPI::NSNumberWithUnsignedShort:
+ case NSAPI::NSNumberWithBool:
+ return rewriteToNumericBoxedExpression(Msg, NS, commit);
+
+ case NSAPI::NSNumberWithUnsignedInt:
+ case NSAPI::NSNumberWithUnsignedInteger:
+ CallIsUnsigned = true;
+ case NSAPI::NSNumberWithInt:
+ case NSAPI::NSNumberWithInteger:
+ break;
+
+ case NSAPI::NSNumberWithUnsignedLong:
+ CallIsUnsigned = true;
+ case NSAPI::NSNumberWithLong:
+ CallIsLong = true;
+ break;
+
+ case NSAPI::NSNumberWithUnsignedLongLong:
+ CallIsUnsigned = true;
+ case NSAPI::NSNumberWithLongLong:
+ CallIsLongLong = true;
+ break;
+
+ case NSAPI::NSNumberWithDouble:
+ CallIsDouble = true;
+ case NSAPI::NSNumberWithFloat:
+ CallIsFloating = true;
+ break;
+ }
+
+ SourceRange ArgRange = Arg->getSourceRange();
+ QualType ArgTy = Arg->getType();
+ QualType CallTy = Msg->getArg(0)->getType();
+
+ // Check for the easy case, the literal maps directly to the call.
+ if (Ctx.hasSameType(ArgTy, CallTy)) {
+ commit.replaceWithInner(Msg->getSourceRange(), ArgRange);
+ commit.insert(ArgRange.getBegin(), "@");
+ return true;
+ }
+
+ // We will need to modify the literal suffix to get the same type as the call.
+ // Try with boxed expression if it came from a macro.
+ if (ArgRange.getBegin().isMacroID())
+ return rewriteToNumericBoxedExpression(Msg, NS, commit);
+
+ bool LitIsFloat = ArgTy->isFloatingType();
+ // For a float passed to integer call, don't try rewriting to objc literal.
+ // It is difficult and a very uncommon case anyway.
+ // But try with boxed expression.
+ if (LitIsFloat && !CallIsFloating)
+ return rewriteToNumericBoxedExpression(Msg, NS, commit);
+
+ // Try to modify the literal make it the same type as the method call.
+ // -Modify the suffix, and/or
+ // -Change integer to float
+
+ LiteralInfo LitInfo;
+ bool isIntZero = false;
+ if (const IntegerLiteral *IntE = dyn_cast<IntegerLiteral>(literalE))
+ isIntZero = !IntE->getValue().getBoolValue();
+ if (!getLiteralInfo(ArgRange, LitIsFloat, isIntZero, Ctx, LitInfo))
+ return rewriteToNumericBoxedExpression(Msg, NS, commit);
+
+ // Not easy to do int -> float with hex/octal and uncommon anyway.
+ if (!LitIsFloat && CallIsFloating && (LitInfo.Hex || LitInfo.Octal))
+ return rewriteToNumericBoxedExpression(Msg, NS, commit);
+
+ SourceLocation LitB = LitInfo.WithoutSuffRange.getBegin();
+ SourceLocation LitE = LitInfo.WithoutSuffRange.getEnd();
+
+ commit.replaceWithInner(CharSourceRange::getTokenRange(Msg->getSourceRange()),
+ LitInfo.WithoutSuffRange);
+ commit.insert(LitB, "@");
+
+ if (!LitIsFloat && CallIsFloating)
+ commit.insert(LitE, ".0");
+
+ if (CallIsFloating) {
+ if (!CallIsDouble)
+ commit.insert(LitE, LitInfo.F);
+ } else {
+ if (CallIsUnsigned)
+ commit.insert(LitE, LitInfo.U);
+
+ if (CallIsLong)
+ commit.insert(LitE, LitInfo.L);
+ else if (CallIsLongLong)
+ commit.insert(LitE, LitInfo.LL);
+ }
+ return true;
+}
+
+// FIXME: Make determination of operator precedence more general and
+// make it broadly available.
+static bool subscriptOperatorNeedsParens(const Expr *FullExpr) {
+ const Expr* Expr = FullExpr->IgnoreImpCasts();
+ if (isa<ArraySubscriptExpr>(Expr) ||
+ isa<CallExpr>(Expr) ||
+ isa<DeclRefExpr>(Expr) ||
+ isa<CXXNamedCastExpr>(Expr) ||
+ isa<CXXConstructExpr>(Expr) ||
+ isa<CXXThisExpr>(Expr) ||
+ isa<CXXTypeidExpr>(Expr) ||
+ isa<CXXUnresolvedConstructExpr>(Expr) ||
+ isa<ObjCMessageExpr>(Expr) ||
+ isa<ObjCPropertyRefExpr>(Expr) ||
+ isa<ObjCProtocolExpr>(Expr) ||
+ isa<MemberExpr>(Expr) ||
+ isa<ObjCIvarRefExpr>(Expr) ||
+ isa<ParenExpr>(FullExpr) ||
+ isa<ParenListExpr>(Expr) ||
+ isa<SizeOfPackExpr>(Expr))
+ return false;
+
+ return true;
+}
+static bool castOperatorNeedsParens(const Expr *FullExpr) {
+ const Expr* Expr = FullExpr->IgnoreImpCasts();
+ if (isa<ArraySubscriptExpr>(Expr) ||
+ isa<CallExpr>(Expr) ||
+ isa<DeclRefExpr>(Expr) ||
+ isa<CastExpr>(Expr) ||
+ isa<CXXNewExpr>(Expr) ||
+ isa<CXXConstructExpr>(Expr) ||
+ isa<CXXDeleteExpr>(Expr) ||
+ isa<CXXNoexceptExpr>(Expr) ||
+ isa<CXXPseudoDestructorExpr>(Expr) ||
+ isa<CXXScalarValueInitExpr>(Expr) ||
+ isa<CXXThisExpr>(Expr) ||
+ isa<CXXTypeidExpr>(Expr) ||
+ isa<CXXUnresolvedConstructExpr>(Expr) ||
+ isa<ObjCMessageExpr>(Expr) ||
+ isa<ObjCPropertyRefExpr>(Expr) ||
+ isa<ObjCProtocolExpr>(Expr) ||
+ isa<MemberExpr>(Expr) ||
+ isa<ObjCIvarRefExpr>(Expr) ||
+ isa<ParenExpr>(FullExpr) ||
+ isa<ParenListExpr>(Expr) ||
+ isa<SizeOfPackExpr>(Expr) ||
+ isa<UnaryOperator>(Expr))
+ return false;
+
+ return true;
+}
+
+static void objectifyExpr(const Expr *E, Commit &commit) {
+ if (!E) return;
+
+ QualType T = E->getType();
+ if (T->isObjCObjectPointerType()) {
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
+ if (ICE->getCastKind() != CK_CPointerToObjCPointerCast)
+ return;
+ } else {
+ return;
+ }
+ } else if (!T->isPointerType()) {
+ return;
+ }
+
+ SourceRange Range = E->getSourceRange();
+ if (castOperatorNeedsParens(E))
+ commit.insertWrap("(", Range, ")");
+ commit.insertBefore(Range.getBegin(), "(id)");
+}
+
+//===----------------------------------------------------------------------===//
+// rewriteToNumericBoxedExpression.
+//===----------------------------------------------------------------------===//
+
+static bool isEnumConstant(const Expr *E) {
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts()))
+ if (const ValueDecl *VD = DRE->getDecl())
+ return isa<EnumConstantDecl>(VD);
+
+ return false;
+}
+
+static bool rewriteToNumericBoxedExpression(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit) {
+ if (Msg->getNumArgs() != 1)
+ return false;
+
+ const Expr *Arg = Msg->getArg(0);
+ if (Arg->isTypeDependent())
+ return false;
+
+ ASTContext &Ctx = NS.getASTContext();
+ Selector Sel = Msg->getSelector();
+ Optional<NSAPI::NSNumberLiteralMethodKind>
+ MKOpt = NS.getNSNumberLiteralMethodKind(Sel);
+ if (!MKOpt)
+ return false;
+ NSAPI::NSNumberLiteralMethodKind MK = *MKOpt;
+
+ const Expr *OrigArg = Arg->IgnoreImpCasts();
+ QualType FinalTy = Arg->getType();
+ QualType OrigTy = OrigArg->getType();
+ uint64_t FinalTySize = Ctx.getTypeSize(FinalTy);
+ uint64_t OrigTySize = Ctx.getTypeSize(OrigTy);
+
+ bool isTruncated = FinalTySize < OrigTySize;
+ bool needsCast = false;
+
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
+ switch (ICE->getCastKind()) {
+ case CK_LValueToRValue:
+ case CK_NoOp:
+ case CK_UserDefinedConversion:
+ break;
+
+ case CK_IntegralCast: {
+ if (MK == NSAPI::NSNumberWithBool && OrigTy->isBooleanType())
+ break;
+ // Be more liberal with Integer/UnsignedInteger which are very commonly
+ // used.
+ if ((MK == NSAPI::NSNumberWithInteger ||
+ MK == NSAPI::NSNumberWithUnsignedInteger) &&
+ !isTruncated) {
+ if (OrigTy->getAs<EnumType>() || isEnumConstant(OrigArg))
+ break;
+ if ((MK==NSAPI::NSNumberWithInteger) == OrigTy->isSignedIntegerType() &&
+ OrigTySize >= Ctx.getTypeSize(Ctx.IntTy))
+ break;
+ }
+
+ needsCast = true;
+ break;
+ }
+
+ case CK_PointerToBoolean:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_AtomicToNonAtomic:
+ case CK_AddressSpaceConversion:
+ needsCast = true;
+ break;
+
+ case CK_Dependent:
+ case CK_BitCast:
+ case CK_LValueBitCast:
+ case CK_BaseToDerived:
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_Dynamic:
+ case CK_ToUnion:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_NullToPointer:
+ case CK_NullToMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_MemberPointerToBoolean:
+ case CK_ReinterpretMemberPointer:
+ case CK_ConstructorConversion:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_ToVoid:
+ case CK_VectorSplat:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ObjCObjectLValueCast:
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
+ case CK_NonAtomicToAtomic:
+ case CK_CopyAndAutoreleaseBlockObject:
+ case CK_BuiltinFnToFnPtr:
+ case CK_ZeroToOCLEvent:
+ return false;
+ }
+ }
+
+ if (needsCast) {
+ DiagnosticsEngine &Diags = Ctx.getDiagnostics();
+ // FIXME: Use a custom category name to distinguish migration diagnostics.
+ unsigned diagID = Diags.getCustomDiagID(DiagnosticsEngine::Warning,
+ "converting to boxing syntax requires casting %0 to %1");
+ Diags.Report(Msg->getExprLoc(), diagID) << OrigTy << FinalTy
+ << Msg->getSourceRange();
+ return false;
+ }
+
+ SourceRange ArgRange = OrigArg->getSourceRange();
+ commit.replaceWithInner(Msg->getSourceRange(), ArgRange);
+
+ if (isa<ParenExpr>(OrigArg) || isa<IntegerLiteral>(OrigArg))
+ commit.insertBefore(ArgRange.getBegin(), "@");
+ else
+ commit.insertWrap("@(", ArgRange, ")");
+
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// rewriteToStringBoxedExpression.
+//===----------------------------------------------------------------------===//
+
+static bool doRewriteToUTF8StringBoxedExpressionHelper(
+ const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit) {
+ const Expr *Arg = Msg->getArg(0);
+ if (Arg->isTypeDependent())
+ return false;
+
+ ASTContext &Ctx = NS.getASTContext();
+
+ const Expr *OrigArg = Arg->IgnoreImpCasts();
+ QualType OrigTy = OrigArg->getType();
+ if (OrigTy->isArrayType())
+ OrigTy = Ctx.getArrayDecayedType(OrigTy);
+
+ if (const StringLiteral *
+ StrE = dyn_cast<StringLiteral>(OrigArg->IgnoreParens())) {
+ commit.replaceWithInner(Msg->getSourceRange(), StrE->getSourceRange());
+ commit.insert(StrE->getLocStart(), "@");
+ return true;
+ }
+
+ if (const PointerType *PT = OrigTy->getAs<PointerType>()) {
+ QualType PointeeType = PT->getPointeeType();
+ if (Ctx.hasSameUnqualifiedType(PointeeType, Ctx.CharTy)) {
+ SourceRange ArgRange = OrigArg->getSourceRange();
+ commit.replaceWithInner(Msg->getSourceRange(), ArgRange);
+
+ if (isa<ParenExpr>(OrigArg) || isa<IntegerLiteral>(OrigArg))
+ commit.insertBefore(ArgRange.getBegin(), "@");
+ else
+ commit.insertWrap("@(", ArgRange, ")");
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool rewriteToStringBoxedExpression(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit) {
+ Selector Sel = Msg->getSelector();
+
+ if (Sel == NS.getNSStringSelector(NSAPI::NSStr_stringWithUTF8String) ||
+ Sel == NS.getNSStringSelector(NSAPI::NSStr_stringWithCString) ||
+ Sel == NS.getNSStringSelector(NSAPI::NSStr_initWithUTF8String)) {
+ if (Msg->getNumArgs() != 1)
+ return false;
+ return doRewriteToUTF8StringBoxedExpressionHelper(Msg, NS, commit);
+ }
+
+ if (Sel == NS.getNSStringSelector(NSAPI::NSStr_stringWithCStringEncoding)) {
+ if (Msg->getNumArgs() != 2)
+ return false;
+
+ const Expr *encodingArg = Msg->getArg(1);
+ if (NS.isNSUTF8StringEncodingConstant(encodingArg) ||
+ NS.isNSASCIIStringEncodingConstant(encodingArg))
+ return doRewriteToUTF8StringBoxedExpressionHelper(Msg, NS, commit);
+ }
+
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Format/BreakableToken.cpp b/contrib/llvm/tools/clang/lib/Format/BreakableToken.cpp
new file mode 100644
index 0000000..36a8c4d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Format/BreakableToken.cpp
@@ -0,0 +1,460 @@
+//===--- BreakableToken.cpp - Format C++ code -----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Contains implementation of BreakableToken class and classes derived
+/// from it.
+///
+//===----------------------------------------------------------------------===//
+
+#include "BreakableToken.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Format/Format.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Debug.h"
+#include <algorithm>
+
+#define DEBUG_TYPE "format-token-breaker"
+
+namespace clang {
+namespace format {
+
+static const char *const Blanks = " \t\v\f\r";
+static bool IsBlank(char C) {
+ switch (C) {
+ case ' ':
+ case '\t':
+ case '\v':
+ case '\f':
+ case '\r':
+ return true;
+ default:
+ return false;
+ }
+}
+
+static BreakableToken::Split getCommentSplit(StringRef Text,
+ unsigned ContentStartColumn,
+ unsigned ColumnLimit,
+ unsigned TabWidth,
+ encoding::Encoding Encoding) {
+ if (ColumnLimit <= ContentStartColumn + 1)
+ return BreakableToken::Split(StringRef::npos, 0);
+
+ unsigned MaxSplit = ColumnLimit - ContentStartColumn + 1;
+ unsigned MaxSplitBytes = 0;
+
+ for (unsigned NumChars = 0;
+ NumChars < MaxSplit && MaxSplitBytes < Text.size();) {
+ unsigned BytesInChar =
+ encoding::getCodePointNumBytes(Text[MaxSplitBytes], Encoding);
+ NumChars +=
+ encoding::columnWidthWithTabs(Text.substr(MaxSplitBytes, BytesInChar),
+ ContentStartColumn, TabWidth, Encoding);
+ MaxSplitBytes += BytesInChar;
+ }
+
+ StringRef::size_type SpaceOffset = Text.find_last_of(Blanks, MaxSplitBytes);
+ if (SpaceOffset == StringRef::npos ||
+ // Don't break at leading whitespace.
+ Text.find_last_not_of(Blanks, SpaceOffset) == StringRef::npos) {
+ // Make sure that we don't break at leading whitespace that
+ // reaches past MaxSplit.
+ StringRef::size_type FirstNonWhitespace = Text.find_first_not_of(Blanks);
+ if (FirstNonWhitespace == StringRef::npos)
+ // If the comment is only whitespace, we cannot split.
+ return BreakableToken::Split(StringRef::npos, 0);
+ SpaceOffset = Text.find_first_of(
+ Blanks, std::max<unsigned>(MaxSplitBytes, FirstNonWhitespace));
+ }
+ if (SpaceOffset != StringRef::npos && SpaceOffset != 0) {
+ StringRef BeforeCut = Text.substr(0, SpaceOffset).rtrim(Blanks);
+ StringRef AfterCut = Text.substr(SpaceOffset).ltrim(Blanks);
+ return BreakableToken::Split(BeforeCut.size(),
+ AfterCut.begin() - BeforeCut.end());
+ }
+ return BreakableToken::Split(StringRef::npos, 0);
+}
+
+static BreakableToken::Split
+getStringSplit(StringRef Text, unsigned UsedColumns, unsigned ColumnLimit,
+ unsigned TabWidth, encoding::Encoding Encoding) {
+ // FIXME: Reduce unit test case.
+ if (Text.empty())
+ return BreakableToken::Split(StringRef::npos, 0);
+ if (ColumnLimit <= UsedColumns)
+ return BreakableToken::Split(StringRef::npos, 0);
+ unsigned MaxSplit = ColumnLimit - UsedColumns;
+ StringRef::size_type SpaceOffset = 0;
+ StringRef::size_type SlashOffset = 0;
+ StringRef::size_type WordStartOffset = 0;
+ StringRef::size_type SplitPoint = 0;
+ for (unsigned Chars = 0;;) {
+ unsigned Advance;
+ if (Text[0] == '\\') {
+ Advance = encoding::getEscapeSequenceLength(Text);
+ Chars += Advance;
+ } else {
+ Advance = encoding::getCodePointNumBytes(Text[0], Encoding);
+ Chars += encoding::columnWidthWithTabs(
+ Text.substr(0, Advance), UsedColumns + Chars, TabWidth, Encoding);
+ }
+
+ if (Chars > MaxSplit || Text.size() <= Advance)
+ break;
+
+ if (IsBlank(Text[0]))
+ SpaceOffset = SplitPoint;
+ if (Text[0] == '/')
+ SlashOffset = SplitPoint;
+ if (Advance == 1 && !isAlphanumeric(Text[0]))
+ WordStartOffset = SplitPoint;
+
+ SplitPoint += Advance;
+ Text = Text.substr(Advance);
+ }
+
+ if (SpaceOffset != 0)
+ return BreakableToken::Split(SpaceOffset + 1, 0);
+ if (SlashOffset != 0)
+ return BreakableToken::Split(SlashOffset + 1, 0);
+ if (WordStartOffset != 0)
+ return BreakableToken::Split(WordStartOffset + 1, 0);
+ if (SplitPoint != 0)
+ return BreakableToken::Split(SplitPoint, 0);
+ return BreakableToken::Split(StringRef::npos, 0);
+}
+
+unsigned BreakableSingleLineToken::getLineCount() const { return 1; }
+
+unsigned BreakableSingleLineToken::getLineLengthAfterSplit(
+ unsigned LineIndex, unsigned Offset, StringRef::size_type Length) const {
+ return StartColumn + Prefix.size() + Postfix.size() +
+ encoding::columnWidthWithTabs(Line.substr(Offset, Length),
+ StartColumn + Prefix.size(),
+ Style.TabWidth, Encoding);
+}
+
+BreakableSingleLineToken::BreakableSingleLineToken(
+ const FormatToken &Tok, unsigned IndentLevel, unsigned StartColumn,
+ StringRef Prefix, StringRef Postfix, bool InPPDirective,
+ encoding::Encoding Encoding, const FormatStyle &Style)
+ : BreakableToken(Tok, IndentLevel, InPPDirective, Encoding, Style),
+ StartColumn(StartColumn), Prefix(Prefix), Postfix(Postfix) {
+ assert(Tok.TokenText.endswith(Postfix));
+ Line = Tok.TokenText.substr(
+ Prefix.size(), Tok.TokenText.size() - Prefix.size() - Postfix.size());
+}
+
+BreakableStringLiteral::BreakableStringLiteral(
+ const FormatToken &Tok, unsigned IndentLevel, unsigned StartColumn,
+ StringRef Prefix, StringRef Postfix, bool InPPDirective,
+ encoding::Encoding Encoding, const FormatStyle &Style)
+ : BreakableSingleLineToken(Tok, IndentLevel, StartColumn, Prefix, Postfix,
+ InPPDirective, Encoding, Style) {}
+
+BreakableToken::Split
+BreakableStringLiteral::getSplit(unsigned LineIndex, unsigned TailOffset,
+ unsigned ColumnLimit) const {
+ return getStringSplit(Line.substr(TailOffset),
+ StartColumn + Prefix.size() + Postfix.size(),
+ ColumnLimit, Style.TabWidth, Encoding);
+}
+
+void BreakableStringLiteral::insertBreak(unsigned LineIndex,
+ unsigned TailOffset, Split Split,
+ WhitespaceManager &Whitespaces) {
+ unsigned LeadingSpaces = StartColumn;
+ // The '@' of an ObjC string literal (@"Test") does not become part of the
+ // string token.
+ // FIXME: It might be a cleaner solution to merge the tokens as a
+ // precomputation step.
+ if (Prefix.startswith("@"))
+ --LeadingSpaces;
+ Whitespaces.replaceWhitespaceInToken(
+ Tok, Prefix.size() + TailOffset + Split.first, Split.second, Postfix,
+ Prefix, InPPDirective, 1, IndentLevel, LeadingSpaces);
+}
+
+static StringRef getLineCommentIndentPrefix(StringRef Comment) {
+ static const char *const KnownPrefixes[] = {"///", "//", "//!"};
+ StringRef LongestPrefix;
+ for (StringRef KnownPrefix : KnownPrefixes) {
+ if (Comment.startswith(KnownPrefix)) {
+ size_t PrefixLength = KnownPrefix.size();
+ while (PrefixLength < Comment.size() && Comment[PrefixLength] == ' ')
+ ++PrefixLength;
+ if (PrefixLength > LongestPrefix.size())
+ LongestPrefix = Comment.substr(0, PrefixLength);
+ }
+ }
+ return LongestPrefix;
+}
+
+BreakableLineComment::BreakableLineComment(
+ const FormatToken &Token, unsigned IndentLevel, unsigned StartColumn,
+ bool InPPDirective, encoding::Encoding Encoding, const FormatStyle &Style)
+ : BreakableSingleLineToken(Token, IndentLevel, StartColumn,
+ getLineCommentIndentPrefix(Token.TokenText), "",
+ InPPDirective, Encoding, Style) {
+ OriginalPrefix = Prefix;
+ if (Token.TokenText.size() > Prefix.size() &&
+ isAlphanumeric(Token.TokenText[Prefix.size()])) {
+ if (Prefix == "//")
+ Prefix = "// ";
+ else if (Prefix == "///")
+ Prefix = "/// ";
+ else if (Prefix == "//!")
+ Prefix = "//! ";
+ }
+}
+
+BreakableToken::Split
+BreakableLineComment::getSplit(unsigned LineIndex, unsigned TailOffset,
+ unsigned ColumnLimit) const {
+ return getCommentSplit(Line.substr(TailOffset), StartColumn + Prefix.size(),
+ ColumnLimit, Style.TabWidth, Encoding);
+}
+
+void BreakableLineComment::insertBreak(unsigned LineIndex, unsigned TailOffset,
+ Split Split,
+ WhitespaceManager &Whitespaces) {
+ Whitespaces.replaceWhitespaceInToken(
+ Tok, OriginalPrefix.size() + TailOffset + Split.first, Split.second,
+ Postfix, Prefix, InPPDirective, /*Newlines=*/1, IndentLevel, StartColumn);
+}
+
+void BreakableLineComment::replaceWhitespace(unsigned LineIndex,
+ unsigned TailOffset, Split Split,
+ WhitespaceManager &Whitespaces) {
+ Whitespaces.replaceWhitespaceInToken(
+ Tok, OriginalPrefix.size() + TailOffset + Split.first, Split.second, "",
+ "", /*InPPDirective=*/false, /*Newlines=*/0, /*IndentLevel=*/0,
+ /*Spaces=*/1);
+}
+
+void BreakableLineComment::replaceWhitespaceBefore(
+ unsigned LineIndex, WhitespaceManager &Whitespaces) {
+ if (OriginalPrefix != Prefix) {
+ Whitespaces.replaceWhitespaceInToken(Tok, OriginalPrefix.size(), 0, "", "",
+ /*InPPDirective=*/false,
+ /*Newlines=*/0, /*IndentLevel=*/0,
+ /*Spaces=*/1);
+ }
+}
+
+BreakableBlockComment::BreakableBlockComment(
+ const FormatToken &Token, unsigned IndentLevel, unsigned StartColumn,
+ unsigned OriginalStartColumn, bool FirstInLine, bool InPPDirective,
+ encoding::Encoding Encoding, const FormatStyle &Style)
+ : BreakableToken(Token, IndentLevel, InPPDirective, Encoding, Style) {
+ StringRef TokenText(Token.TokenText);
+ assert(TokenText.startswith("/*") && TokenText.endswith("*/"));
+ TokenText.substr(2, TokenText.size() - 4).split(Lines, "\n");
+
+ int IndentDelta = StartColumn - OriginalStartColumn;
+ LeadingWhitespace.resize(Lines.size());
+ StartOfLineColumn.resize(Lines.size());
+ StartOfLineColumn[0] = StartColumn + 2;
+ for (size_t i = 1; i < Lines.size(); ++i)
+ adjustWhitespace(i, IndentDelta);
+
+ Decoration = "* ";
+ if (Lines.size() == 1 && !FirstInLine) {
+ // Comments for which FirstInLine is false can start on arbitrary column,
+ // and available horizontal space can be too small to align consecutive
+ // lines with the first one.
+ // FIXME: We could, probably, align them to current indentation level, but
+ // now we just wrap them without stars.
+ Decoration = "";
+ }
+ for (size_t i = 1, e = Lines.size(); i < e && !Decoration.empty(); ++i) {
+ // If the last line is empty, the closing "*/" will have a star.
+ if (i + 1 == e && Lines[i].empty())
+ break;
+ if (!Lines[i].empty() && i + 1 != e && Decoration.startswith(Lines[i]))
+ continue;
+ while (!Lines[i].startswith(Decoration))
+ Decoration = Decoration.substr(0, Decoration.size() - 1);
+ }
+
+ LastLineNeedsDecoration = true;
+ IndentAtLineBreak = StartOfLineColumn[0] + 1;
+ for (size_t i = 1; i < Lines.size(); ++i) {
+ if (Lines[i].empty()) {
+ if (i + 1 == Lines.size()) {
+ // Empty last line means that we already have a star as a part of the
+ // trailing */. We also need to preserve whitespace, so that */ is
+ // correctly indented.
+ LastLineNeedsDecoration = false;
+ } else if (Decoration.empty()) {
+ // For all other lines, set the start column to 0 if they're empty, so
+ // we do not insert trailing whitespace anywhere.
+ StartOfLineColumn[i] = 0;
+ }
+ continue;
+ }
+
+ // The first line already excludes the star.
+ // For all other lines, adjust the line to exclude the star and
+ // (optionally) the first whitespace.
+ unsigned DecorationSize =
+ Decoration.startswith(Lines[i]) ? Lines[i].size() : Decoration.size();
+ StartOfLineColumn[i] += DecorationSize;
+ Lines[i] = Lines[i].substr(DecorationSize);
+ LeadingWhitespace[i] += DecorationSize;
+ if (!Decoration.startswith(Lines[i]))
+ IndentAtLineBreak =
+ std::min<int>(IndentAtLineBreak, std::max(0, StartOfLineColumn[i]));
+ }
+ IndentAtLineBreak = std::max<unsigned>(IndentAtLineBreak, Decoration.size());
+ DEBUG({
+ llvm::dbgs() << "IndentAtLineBreak " << IndentAtLineBreak << "\n";
+ for (size_t i = 0; i < Lines.size(); ++i) {
+ llvm::dbgs() << i << " |" << Lines[i] << "| " << LeadingWhitespace[i]
+ << "\n";
+ }
+ });
+}
+
+void BreakableBlockComment::adjustWhitespace(unsigned LineIndex,
+ int IndentDelta) {
+ // When in a preprocessor directive, the trailing backslash in a block comment
+ // is not needed, but can serve a purpose of uniformity with necessary escaped
+ // newlines outside the comment. In this case we remove it here before
+ // trimming the trailing whitespace. The backslash will be re-added later when
+ // inserting a line break.
+ size_t EndOfPreviousLine = Lines[LineIndex - 1].size();
+ if (InPPDirective && Lines[LineIndex - 1].endswith("\\"))
+ --EndOfPreviousLine;
+
+ // Calculate the end of the non-whitespace text in the previous line.
+ EndOfPreviousLine =
+ Lines[LineIndex - 1].find_last_not_of(Blanks, EndOfPreviousLine);
+ if (EndOfPreviousLine == StringRef::npos)
+ EndOfPreviousLine = 0;
+ else
+ ++EndOfPreviousLine;
+ // Calculate the start of the non-whitespace text in the current line.
+ size_t StartOfLine = Lines[LineIndex].find_first_not_of(Blanks);
+ if (StartOfLine == StringRef::npos)
+ StartOfLine = Lines[LineIndex].rtrim("\r\n").size();
+
+ StringRef Whitespace = Lines[LineIndex].substr(0, StartOfLine);
+ // Adjust Lines to only contain relevant text.
+ Lines[LineIndex - 1] = Lines[LineIndex - 1].substr(0, EndOfPreviousLine);
+ Lines[LineIndex] = Lines[LineIndex].substr(StartOfLine);
+ // Adjust LeadingWhitespace to account all whitespace between the lines
+ // to the current line.
+ LeadingWhitespace[LineIndex] =
+ Lines[LineIndex].begin() - Lines[LineIndex - 1].end();
+
+ // Adjust the start column uniformly across all lines.
+ StartOfLineColumn[LineIndex] =
+ encoding::columnWidthWithTabs(Whitespace, 0, Style.TabWidth, Encoding) +
+ IndentDelta;
+}
+
+unsigned BreakableBlockComment::getLineCount() const { return Lines.size(); }
+
+unsigned BreakableBlockComment::getLineLengthAfterSplit(
+ unsigned LineIndex, unsigned Offset, StringRef::size_type Length) const {
+ unsigned ContentStartColumn = getContentStartColumn(LineIndex, Offset);
+ return ContentStartColumn +
+ encoding::columnWidthWithTabs(Lines[LineIndex].substr(Offset, Length),
+ ContentStartColumn, Style.TabWidth,
+ Encoding) +
+ // The last line gets a "*/" postfix.
+ (LineIndex + 1 == Lines.size() ? 2 : 0);
+}
+
+BreakableToken::Split
+BreakableBlockComment::getSplit(unsigned LineIndex, unsigned TailOffset,
+ unsigned ColumnLimit) const {
+ return getCommentSplit(Lines[LineIndex].substr(TailOffset),
+ getContentStartColumn(LineIndex, TailOffset),
+ ColumnLimit, Style.TabWidth, Encoding);
+}
+
+void BreakableBlockComment::insertBreak(unsigned LineIndex, unsigned TailOffset,
+ Split Split,
+ WhitespaceManager &Whitespaces) {
+ StringRef Text = Lines[LineIndex].substr(TailOffset);
+ StringRef Prefix = Decoration;
+ if (LineIndex + 1 == Lines.size() &&
+ Text.size() == Split.first + Split.second) {
+ // For the last line we need to break before "*/", but not to add "* ".
+ Prefix = "";
+ }
+
+ unsigned BreakOffsetInToken =
+ Text.data() - Tok.TokenText.data() + Split.first;
+ unsigned CharsToRemove = Split.second;
+ assert(IndentAtLineBreak >= Decoration.size());
+ Whitespaces.replaceWhitespaceInToken(
+ Tok, BreakOffsetInToken, CharsToRemove, "", Prefix, InPPDirective, 1,
+ IndentLevel, IndentAtLineBreak - Decoration.size());
+}
+
+void BreakableBlockComment::replaceWhitespace(unsigned LineIndex,
+ unsigned TailOffset, Split Split,
+ WhitespaceManager &Whitespaces) {
+ StringRef Text = Lines[LineIndex].substr(TailOffset);
+ unsigned BreakOffsetInToken =
+ Text.data() - Tok.TokenText.data() + Split.first;
+ unsigned CharsToRemove = Split.second;
+ Whitespaces.replaceWhitespaceInToken(
+ Tok, BreakOffsetInToken, CharsToRemove, "", "", /*InPPDirective=*/false,
+ /*Newlines=*/0, /*IndentLevel=*/0, /*Spaces=*/1);
+}
+
+void BreakableBlockComment::replaceWhitespaceBefore(
+ unsigned LineIndex, WhitespaceManager &Whitespaces) {
+ if (LineIndex == 0)
+ return;
+ StringRef Prefix = Decoration;
+ if (Lines[LineIndex].empty()) {
+ if (LineIndex + 1 == Lines.size()) {
+ if (!LastLineNeedsDecoration) {
+ // If the last line was empty, we don't need a prefix, as the */ will
+ // line up with the decoration (if it exists).
+ Prefix = "";
+ }
+ } else if (!Decoration.empty()) {
+ // For other empty lines, if we do have a decoration, adapt it to not
+ // contain a trailing whitespace.
+ Prefix = Prefix.substr(0, 1);
+ }
+ } else {
+ if (StartOfLineColumn[LineIndex] == 1) {
+ // This line starts immediately after the decorating *.
+ Prefix = Prefix.substr(0, 1);
+ }
+ }
+
+ unsigned WhitespaceOffsetInToken = Lines[LineIndex].data() -
+ Tok.TokenText.data() -
+ LeadingWhitespace[LineIndex];
+ Whitespaces.replaceWhitespaceInToken(
+ Tok, WhitespaceOffsetInToken, LeadingWhitespace[LineIndex], "", Prefix,
+ InPPDirective, 1, IndentLevel,
+ StartOfLineColumn[LineIndex] - Prefix.size());
+}
+
+unsigned
+BreakableBlockComment::getContentStartColumn(unsigned LineIndex,
+ unsigned TailOffset) const {
+ // If we break, we always break at the predefined indent.
+ if (TailOffset != 0)
+ return IndentAtLineBreak;
+ return std::max(0, StartOfLineColumn[LineIndex]);
+}
+
+} // namespace format
+} // namespace clang
diff --git a/contrib/llvm/tools/clang/lib/Format/BreakableToken.h b/contrib/llvm/tools/clang/lib/Format/BreakableToken.h
new file mode 100644
index 0000000..eb1f9fd
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Format/BreakableToken.h
@@ -0,0 +1,245 @@
+//===--- BreakableToken.h - Format C++ code -------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Declares BreakableToken, BreakableStringLiteral, and
+/// BreakableBlockComment classes, that contain token type-specific logic to
+/// break long lines in tokens.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_FORMAT_BREAKABLETOKEN_H
+#define LLVM_CLANG_LIB_FORMAT_BREAKABLETOKEN_H
+
+#include "Encoding.h"
+#include "TokenAnnotator.h"
+#include "WhitespaceManager.h"
+#include <utility>
+
+namespace clang {
+namespace format {
+
+struct FormatStyle;
+
+/// \brief Base class for strategies on how to break tokens.
+///
+/// FIXME: The interface seems set in stone, so we might want to just pull the
+/// strategy into the class, instead of controlling it from the outside.
+class BreakableToken {
+public:
+ /// \brief Contains starting character index and length of split.
+ typedef std::pair<StringRef::size_type, unsigned> Split;
+
+ virtual ~BreakableToken() {}
+
+ /// \brief Returns the number of lines in this token in the original code.
+ virtual unsigned getLineCount() const = 0;
+
+ /// \brief Returns the number of columns required to format the piece of line
+ /// at \p LineIndex, from byte offset \p Offset with length \p Length.
+ ///
+ /// Note that previous breaks are not taken into account. \p Offset is always
+ /// specified from the start of the (original) line.
+ /// \p Length can be set to StringRef::npos, which means "to the end of line".
+ virtual unsigned
+ getLineLengthAfterSplit(unsigned LineIndex, unsigned Offset,
+ StringRef::size_type Length) const = 0;
+
+ /// \brief Returns a range (offset, length) at which to break the line at
+ /// \p LineIndex, if previously broken at \p TailOffset. If possible, do not
+ /// violate \p ColumnLimit.
+ virtual Split getSplit(unsigned LineIndex, unsigned TailOffset,
+ unsigned ColumnLimit) const = 0;
+
+ /// \brief Emits the previously retrieved \p Split via \p Whitespaces.
+ virtual void insertBreak(unsigned LineIndex, unsigned TailOffset, Split Split,
+ WhitespaceManager &Whitespaces) = 0;
+
+ /// \brief Replaces the whitespace range described by \p Split with a single
+ /// space.
+ virtual void replaceWhitespace(unsigned LineIndex, unsigned TailOffset,
+ Split Split,
+ WhitespaceManager &Whitespaces) = 0;
+
+ /// \brief Replaces the whitespace between \p LineIndex-1 and \p LineIndex.
+ virtual void replaceWhitespaceBefore(unsigned LineIndex,
+ WhitespaceManager &Whitespaces) {}
+
+protected:
+ BreakableToken(const FormatToken &Tok, unsigned IndentLevel,
+ bool InPPDirective, encoding::Encoding Encoding,
+ const FormatStyle &Style)
+ : Tok(Tok), IndentLevel(IndentLevel), InPPDirective(InPPDirective),
+ Encoding(Encoding), Style(Style) {}
+
+ const FormatToken &Tok;
+ const unsigned IndentLevel;
+ const bool InPPDirective;
+ const encoding::Encoding Encoding;
+ const FormatStyle &Style;
+};
+
+/// \brief Base class for single line tokens that can be broken.
+///
+/// \c getSplit() needs to be implemented by child classes.
+class BreakableSingleLineToken : public BreakableToken {
+public:
+ unsigned getLineCount() const override;
+ unsigned getLineLengthAfterSplit(unsigned LineIndex, unsigned TailOffset,
+ StringRef::size_type Length) const override;
+
+protected:
+ BreakableSingleLineToken(const FormatToken &Tok, unsigned IndentLevel,
+ unsigned StartColumn, StringRef Prefix,
+ StringRef Postfix, bool InPPDirective,
+ encoding::Encoding Encoding,
+ const FormatStyle &Style);
+
+ // The column in which the token starts.
+ unsigned StartColumn;
+ // The prefix a line needs after a break in the token.
+ StringRef Prefix;
+ // The postfix a line needs before introducing a break.
+ StringRef Postfix;
+ // The token text excluding the prefix and postfix.
+ StringRef Line;
+};
+
+class BreakableStringLiteral : public BreakableSingleLineToken {
+public:
+ /// \brief Creates a breakable token for a single line string literal.
+ ///
+ /// \p StartColumn specifies the column in which the token will start
+ /// after formatting.
+ BreakableStringLiteral(const FormatToken &Tok, unsigned IndentLevel,
+ unsigned StartColumn, StringRef Prefix,
+ StringRef Postfix, bool InPPDirective,
+ encoding::Encoding Encoding, const FormatStyle &Style);
+
+ Split getSplit(unsigned LineIndex, unsigned TailOffset,
+ unsigned ColumnLimit) const override;
+ void insertBreak(unsigned LineIndex, unsigned TailOffset, Split Split,
+ WhitespaceManager &Whitespaces) override;
+ void replaceWhitespace(unsigned LineIndex, unsigned TailOffset, Split Split,
+ WhitespaceManager &Whitespaces) override {}
+};
+
+class BreakableLineComment : public BreakableSingleLineToken {
+public:
+ /// \brief Creates a breakable token for a line comment.
+ ///
+ /// \p StartColumn specifies the column in which the comment will start
+ /// after formatting.
+ BreakableLineComment(const FormatToken &Token, unsigned IndentLevel,
+ unsigned StartColumn, bool InPPDirective,
+ encoding::Encoding Encoding, const FormatStyle &Style);
+
+ Split getSplit(unsigned LineIndex, unsigned TailOffset,
+ unsigned ColumnLimit) const override;
+ void insertBreak(unsigned LineIndex, unsigned TailOffset, Split Split,
+ WhitespaceManager &Whitespaces) override;
+ void replaceWhitespace(unsigned LineIndex, unsigned TailOffset, Split Split,
+ WhitespaceManager &Whitespaces) override;
+ void replaceWhitespaceBefore(unsigned LineIndex,
+ WhitespaceManager &Whitespaces) override;
+
+private:
+ // The prefix without an additional space if one was added.
+ StringRef OriginalPrefix;
+};
+
+class BreakableBlockComment : public BreakableToken {
+public:
+ /// \brief Creates a breakable token for a block comment.
+ ///
+ /// \p StartColumn specifies the column in which the comment will start
+ /// after formatting, while \p OriginalStartColumn specifies in which
+ /// column the comment started before formatting.
+ /// If the comment starts a line after formatting, set \p FirstInLine to true.
+ BreakableBlockComment(const FormatToken &Token, unsigned IndentLevel,
+ unsigned StartColumn, unsigned OriginaStartColumn,
+ bool FirstInLine, bool InPPDirective,
+ encoding::Encoding Encoding, const FormatStyle &Style);
+
+ unsigned getLineCount() const override;
+ unsigned getLineLengthAfterSplit(unsigned LineIndex, unsigned TailOffset,
+ StringRef::size_type Length) const override;
+ Split getSplit(unsigned LineIndex, unsigned TailOffset,
+ unsigned ColumnLimit) const override;
+ void insertBreak(unsigned LineIndex, unsigned TailOffset, Split Split,
+ WhitespaceManager &Whitespaces) override;
+ void replaceWhitespace(unsigned LineIndex, unsigned TailOffset, Split Split,
+ WhitespaceManager &Whitespaces) override;
+ void replaceWhitespaceBefore(unsigned LineIndex,
+ WhitespaceManager &Whitespaces) override;
+
+private:
+ // Rearranges the whitespace between Lines[LineIndex-1] and Lines[LineIndex],
+ // so that all whitespace between the lines is accounted to Lines[LineIndex]
+ // as leading whitespace:
+ // - Lines[LineIndex] points to the text after that whitespace
+ // - Lines[LineIndex-1] shrinks by its trailing whitespace
+ // - LeadingWhitespace[LineIndex] is updated with the complete whitespace
+ // between the end of the text of Lines[LineIndex-1] and Lines[LineIndex]
+ //
+ // Sets StartOfLineColumn to the intended column in which the text at
+ // Lines[LineIndex] starts (note that the decoration, if present, is not
+ // considered part of the text).
+ void adjustWhitespace(unsigned LineIndex, int IndentDelta);
+
+ // Returns the column at which the text in line LineIndex starts, when broken
+ // at TailOffset. Note that the decoration (if present) is not considered part
+ // of the text.
+ unsigned getContentStartColumn(unsigned LineIndex, unsigned TailOffset) const;
+
+ // Contains the text of the lines of the block comment, excluding the leading
+ // /* in the first line and trailing */ in the last line, and excluding all
+ // trailing whitespace between the lines. Note that the decoration (if
+ // present) is also not considered part of the text.
+ SmallVector<StringRef, 16> Lines;
+
+ // LeadingWhitespace[i] is the number of characters regarded as whitespace in
+ // front of Lines[i]. Note that this can include "* " sequences, which we
+ // regard as whitespace when all lines have a "*" prefix.
+ SmallVector<unsigned, 16> LeadingWhitespace;
+
+ // StartOfLineColumn[i] is the target column at which Line[i] should be.
+ // Note that this excludes a leading "* " or "*" in case all lines have
+ // a "*" prefix.
+ // The first line's target column is always positive. The remaining lines'
+ // target columns are relative to the first line to allow correct indentation
+ // of comments in \c WhitespaceManager. Thus they can be negative as well (in
+ // case the first line needs to be unindented more than there's actual
+ // whitespace in another line).
+ SmallVector<int, 16> StartOfLineColumn;
+
+ // The column at which the text of a broken line should start.
+ // Note that an optional decoration would go before that column.
+ // IndentAtLineBreak is a uniform position for all lines in a block comment,
+ // regardless of their relative position.
+ // FIXME: Revisit the decision to do this; the main reason was to support
+ // patterns like
+ // /**************//**
+ // * Comment
+ // We could also support such patterns by special casing the first line
+ // instead.
+ unsigned IndentAtLineBreak;
+
+ // This is to distinguish between the case when the last line was empty and
+ // the case when it started with a decoration ("*" or "* ").
+ bool LastLineNeedsDecoration;
+
+ // Either "* " if all lines begin with a "*", or empty.
+ StringRef Decoration;
+};
+
+} // namespace format
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Format/ContinuationIndenter.cpp b/contrib/llvm/tools/clang/lib/Format/ContinuationIndenter.cpp
new file mode 100644
index 0000000..8faab28
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Format/ContinuationIndenter.cpp
@@ -0,0 +1,1215 @@
+//===--- ContinuationIndenter.cpp - Format C++ code -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements the continuation indenter.
+///
+//===----------------------------------------------------------------------===//
+
+#include "BreakableToken.h"
+#include "ContinuationIndenter.h"
+#include "WhitespaceManager.h"
+#include "clang/Basic/OperatorPrecedence.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Format/Format.h"
+#include "llvm/Support/Debug.h"
+#include <string>
+
+#define DEBUG_TYPE "format-formatter"
+
+namespace clang {
+namespace format {
+
+// Returns the length of everything up to the first possible line break after
+// the ), ], } or > matching \c Tok.
+static unsigned getLengthToMatchingParen(const FormatToken &Tok) {
+ if (!Tok.MatchingParen)
+ return 0;
+ FormatToken *End = Tok.MatchingParen;
+ while (End->Next && !End->Next->CanBreakBefore) {
+ End = End->Next;
+ }
+ return End->TotalLength - Tok.TotalLength + 1;
+}
+
+static unsigned getLengthToNextOperator(const FormatToken &Tok) {
+ if (!Tok.NextOperator)
+ return 0;
+ return Tok.NextOperator->TotalLength - Tok.TotalLength;
+}
+
+// Returns \c true if \c Tok is the "." or "->" of a call and starts the next
+// segment of a builder type call.
+static bool startsSegmentOfBuilderTypeCall(const FormatToken &Tok) {
+ return Tok.isMemberAccess() && Tok.Previous && Tok.Previous->closesScope();
+}
+
+// Returns \c true if \c Current starts a new parameter.
+static bool startsNextParameter(const FormatToken &Current,
+ const FormatStyle &Style) {
+ const FormatToken &Previous = *Current.Previous;
+ if (Current.is(TT_CtorInitializerComma) &&
+ Style.BreakConstructorInitializersBeforeComma)
+ return true;
+ return Previous.is(tok::comma) && !Current.isTrailingComment() &&
+ (Previous.isNot(TT_CtorInitializerComma) ||
+ !Style.BreakConstructorInitializersBeforeComma);
+}
+
+ContinuationIndenter::ContinuationIndenter(const FormatStyle &Style,
+ const AdditionalKeywords &Keywords,
+ SourceManager &SourceMgr,
+ WhitespaceManager &Whitespaces,
+ encoding::Encoding Encoding,
+ bool BinPackInconclusiveFunctions)
+ : Style(Style), Keywords(Keywords), SourceMgr(SourceMgr),
+ Whitespaces(Whitespaces), Encoding(Encoding),
+ BinPackInconclusiveFunctions(BinPackInconclusiveFunctions),
+ CommentPragmasRegex(Style.CommentPragmas) {}
+
+LineState ContinuationIndenter::getInitialState(unsigned FirstIndent,
+ const AnnotatedLine *Line,
+ bool DryRun) {
+ LineState State;
+ State.FirstIndent = FirstIndent;
+ State.Column = FirstIndent;
+ State.Line = Line;
+ State.NextToken = Line->First;
+ State.Stack.push_back(ParenState(FirstIndent, Line->Level, FirstIndent,
+ /*AvoidBinPacking=*/false,
+ /*NoLineBreak=*/false));
+ State.LineContainsContinuedForLoopSection = false;
+ State.StartOfStringLiteral = 0;
+ State.StartOfLineLevel = 0;
+ State.LowestLevelOnLine = 0;
+ State.IgnoreStackForComparison = false;
+
+ // The first token has already been indented and thus consumed.
+ moveStateToNextToken(State, DryRun, /*Newline=*/false);
+ return State;
+}
+
+bool ContinuationIndenter::canBreak(const LineState &State) {
+ const FormatToken &Current = *State.NextToken;
+ const FormatToken &Previous = *Current.Previous;
+ assert(&Previous == Current.Previous);
+ if (!Current.CanBreakBefore &&
+ !(State.Stack.back().BreakBeforeClosingBrace &&
+ Current.closesBlockOrBlockTypeList(Style)))
+ return false;
+ // The opening "{" of a braced list has to be on the same line as the first
+ // element if it is nested in another braced init list or function call.
+ if (!Current.MustBreakBefore && Previous.is(tok::l_brace) &&
+ Previous.isNot(TT_DictLiteral) && Previous.BlockKind == BK_BracedInit &&
+ Previous.Previous &&
+ Previous.Previous->isOneOf(tok::l_brace, tok::l_paren, tok::comma))
+ return false;
+ // This prevents breaks like:
+ // ...
+ // SomeParameter, OtherParameter).DoSomething(
+ // ...
+ // As they hide "DoSomething" and are generally bad for readability.
+ if (Previous.opensScope() && Previous.isNot(tok::l_brace) &&
+ State.LowestLevelOnLine < State.StartOfLineLevel &&
+ State.LowestLevelOnLine < Current.NestingLevel)
+ return false;
+ if (Current.isMemberAccess() && State.Stack.back().ContainsUnwrappedBuilder)
+ return false;
+
+ // Don't create a 'hanging' indent if there are multiple blocks in a single
+ // statement.
+ if (Previous.is(tok::l_brace) && State.Stack.size() > 1 &&
+ State.Stack[State.Stack.size() - 2].NestedBlockInlined &&
+ State.Stack[State.Stack.size() - 2].HasMultipleNestedBlocks)
+ return false;
+
+ // Don't break after very short return types (e.g. "void") as that is often
+ // unexpected.
+ if (Current.is(TT_FunctionDeclarationName) && State.Column < 6) {
+ if (Style.AlwaysBreakAfterReturnType == FormatStyle::RTBS_None)
+ return false;
+ }
+
+ return !State.Stack.back().NoLineBreak;
+}
+
+bool ContinuationIndenter::mustBreak(const LineState &State) {
+ const FormatToken &Current = *State.NextToken;
+ const FormatToken &Previous = *Current.Previous;
+ if (Current.MustBreakBefore || Current.is(TT_InlineASMColon))
+ return true;
+ if (State.Stack.back().BreakBeforeClosingBrace &&
+ Current.closesBlockOrBlockTypeList(Style))
+ return true;
+ if (Previous.is(tok::semi) && State.LineContainsContinuedForLoopSection)
+ return true;
+ if ((startsNextParameter(Current, Style) || Previous.is(tok::semi) ||
+ (Previous.is(TT_TemplateCloser) && Current.is(TT_StartOfName)) ||
+ (Style.BreakBeforeTernaryOperators && Current.is(TT_ConditionalExpr) &&
+ Previous.isNot(tok::question)) ||
+ (!Style.BreakBeforeTernaryOperators &&
+ Previous.is(TT_ConditionalExpr))) &&
+ State.Stack.back().BreakBeforeParameter && !Current.isTrailingComment() &&
+ !Current.isOneOf(tok::r_paren, tok::r_brace))
+ return true;
+ if (((Previous.is(TT_DictLiteral) && Previous.is(tok::l_brace)) ||
+ (Previous.is(TT_ArrayInitializerLSquare) &&
+ Previous.ParameterCount > 1)) &&
+ Style.ColumnLimit > 0 &&
+ getLengthToMatchingParen(Previous) + State.Column - 1 >
+ getColumnLimit(State))
+ return true;
+ if (Current.is(TT_CtorInitializerColon) &&
+ (State.Column + State.Line->Last->TotalLength - Current.TotalLength + 2 >
+ getColumnLimit(State) ||
+ State.Stack.back().BreakBeforeParameter) &&
+ ((Style.AllowShortFunctionsOnASingleLine != FormatStyle::SFS_All) ||
+ Style.BreakConstructorInitializersBeforeComma || Style.ColumnLimit != 0))
+ return true;
+ if (Current.is(TT_SelectorName) && State.Stack.back().ObjCSelectorNameFound &&
+ State.Stack.back().BreakBeforeParameter)
+ return true;
+
+ unsigned NewLineColumn = getNewLineColumn(State);
+ if (State.Column <= NewLineColumn)
+ return false;
+
+ if (Current.isMemberAccess() &&
+ State.Column + getLengthToNextOperator(Current) > Style.ColumnLimit)
+ return true;
+
+ if (Style.AlwaysBreakBeforeMultilineStrings &&
+ (NewLineColumn == State.FirstIndent + Style.ContinuationIndentWidth ||
+ Previous.is(tok::comma) || Current.NestingLevel < 2) &&
+ !Previous.isOneOf(tok::kw_return, tok::lessless, tok::at) &&
+ !Previous.isOneOf(TT_InlineASMColon, TT_ConditionalExpr) &&
+ nextIsMultilineString(State))
+ return true;
+
+ // Using CanBreakBefore here and below takes care of the decision whether the
+ // current style uses wrapping before or after operators for the given
+ // operator.
+ if (Previous.is(TT_BinaryOperator) && Current.CanBreakBefore) {
+ // If we need to break somewhere inside the LHS of a binary expression, we
+ // should also break after the operator. Otherwise, the formatting would
+ // hide the operator precedence, e.g. in:
+ // if (aaaaaaaaaaaaaa ==
+ // bbbbbbbbbbbbbb && c) {..
+ // For comparisons, we only apply this rule, if the LHS is a binary
+ // expression itself as otherwise, the line breaks seem superfluous.
+ // We need special cases for ">>" which we have split into two ">" while
+ // lexing in order to make template parsing easier.
+ bool IsComparison = (Previous.getPrecedence() == prec::Relational ||
+ Previous.getPrecedence() == prec::Equality) &&
+ Previous.Previous &&
+ Previous.Previous->isNot(TT_BinaryOperator); // For >>.
+ bool LHSIsBinaryExpr =
+ Previous.Previous && Previous.Previous->EndsBinaryExpression;
+ if ((!IsComparison || LHSIsBinaryExpr) && !Current.isTrailingComment() &&
+ Previous.getPrecedence() != prec::Assignment &&
+ State.Stack.back().BreakBeforeParameter)
+ return true;
+ } else if (Current.is(TT_BinaryOperator) && Current.CanBreakBefore &&
+ State.Stack.back().BreakBeforeParameter) {
+ return true;
+ }
+
+ // Same as above, but for the first "<<" operator.
+ if (Current.is(tok::lessless) && Current.isNot(TT_OverloadedOperator) &&
+ State.Stack.back().BreakBeforeParameter &&
+ State.Stack.back().FirstLessLess == 0)
+ return true;
+
+ if (Current.NestingLevel == 0 && !Current.isTrailingComment()) {
+ // Always break after "template <...>" and leading annotations. This is only
+ // for cases where the entire line does not fit on a single line as a
+ // different LineFormatter would be used otherwise.
+ if (Previous.ClosesTemplateDeclaration)
+ return true;
+ if (Previous.is(TT_FunctionAnnotationRParen))
+ return true;
+ if (Previous.is(TT_LeadingJavaAnnotation) && Current.isNot(tok::l_paren) &&
+ Current.isNot(TT_LeadingJavaAnnotation))
+ return true;
+ }
+
+ // If the return type spans multiple lines, wrap before the function name.
+ if ((Current.is(TT_FunctionDeclarationName) ||
+ (Current.is(tok::kw_operator) && !Previous.is(tok::coloncolon))) &&
+ State.Stack.back().BreakBeforeParameter)
+ return true;
+
+ if (startsSegmentOfBuilderTypeCall(Current) &&
+ (State.Stack.back().CallContinuation != 0 ||
+ State.Stack.back().BreakBeforeParameter))
+ return true;
+
+ // The following could be precomputed as they do not depend on the state.
+ // However, as they should take effect only if the UnwrappedLine does not fit
+ // into the ColumnLimit, they are checked here in the ContinuationIndenter.
+ if (Style.ColumnLimit != 0 && Previous.BlockKind == BK_Block &&
+ Previous.is(tok::l_brace) && !Current.isOneOf(tok::r_brace, tok::comment))
+ return true;
+
+ if (Current.is(tok::lessless) &&
+ ((Previous.is(tok::identifier) && Previous.TokenText == "endl") ||
+ (Previous.Tok.isLiteral() && (Previous.TokenText.endswith("\\n\"") ||
+ Previous.TokenText == "\'\\n\'"))))
+ return true;
+
+ return false;
+}
+
+unsigned ContinuationIndenter::addTokenToState(LineState &State, bool Newline,
+ bool DryRun,
+ unsigned ExtraSpaces) {
+ const FormatToken &Current = *State.NextToken;
+
+ assert(!State.Stack.empty());
+ if ((Current.is(TT_ImplicitStringLiteral) &&
+ (Current.Previous->Tok.getIdentifierInfo() == nullptr ||
+ Current.Previous->Tok.getIdentifierInfo()->getPPKeywordID() ==
+ tok::pp_not_keyword))) {
+ unsigned EndColumn =
+ SourceMgr.getSpellingColumnNumber(Current.WhitespaceRange.getEnd());
+ if (Current.LastNewlineOffset != 0) {
+ // If there is a newline within this token, the final column will solely
+ // determined by the current end column.
+ State.Column = EndColumn;
+ } else {
+ unsigned StartColumn =
+ SourceMgr.getSpellingColumnNumber(Current.WhitespaceRange.getBegin());
+ assert(EndColumn >= StartColumn);
+ State.Column += EndColumn - StartColumn;
+ }
+ moveStateToNextToken(State, DryRun, /*Newline=*/false);
+ return 0;
+ }
+
+ unsigned Penalty = 0;
+ if (Newline)
+ Penalty = addTokenOnNewLine(State, DryRun);
+ else
+ addTokenOnCurrentLine(State, DryRun, ExtraSpaces);
+
+ return moveStateToNextToken(State, DryRun, Newline) + Penalty;
+}
+
+void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
+ unsigned ExtraSpaces) {
+ FormatToken &Current = *State.NextToken;
+ const FormatToken &Previous = *State.NextToken->Previous;
+ if (Current.is(tok::equal) &&
+ (State.Line->First->is(tok::kw_for) || Current.NestingLevel == 0) &&
+ State.Stack.back().VariablePos == 0) {
+ State.Stack.back().VariablePos = State.Column;
+ // Move over * and & if they are bound to the variable name.
+ const FormatToken *Tok = &Previous;
+ while (Tok && State.Stack.back().VariablePos >= Tok->ColumnWidth) {
+ State.Stack.back().VariablePos -= Tok->ColumnWidth;
+ if (Tok->SpacesRequiredBefore != 0)
+ break;
+ Tok = Tok->Previous;
+ }
+ if (Previous.PartOfMultiVariableDeclStmt)
+ State.Stack.back().LastSpace = State.Stack.back().VariablePos;
+ }
+
+ unsigned Spaces = Current.SpacesRequiredBefore + ExtraSpaces;
+
+ if (!DryRun)
+ Whitespaces.replaceWhitespace(Current, /*Newlines=*/0, /*IndentLevel=*/0,
+ Spaces, State.Column + Spaces);
+
+ if (Current.is(TT_SelectorName) &&
+ !State.Stack.back().ObjCSelectorNameFound) {
+ unsigned MinIndent =
+ std::max(State.FirstIndent + Style.ContinuationIndentWidth,
+ State.Stack.back().Indent);
+ unsigned FirstColonPos = State.Column + Spaces + Current.ColumnWidth;
+ if (Current.LongestObjCSelectorName == 0)
+ State.Stack.back().AlignColons = false;
+ else if (MinIndent + Current.LongestObjCSelectorName > FirstColonPos)
+ State.Stack.back().ColonPos = MinIndent + Current.LongestObjCSelectorName;
+ else
+ State.Stack.back().ColonPos = FirstColonPos;
+ }
+
+ // In "AlwaysBreak" mode, enforce wrapping directly after the parenthesis by
+ // disallowing any further line breaks if there is no line break after the
+ // opening parenthesis. Don't break if it doesn't conserve columns.
+ if (Style.AlignAfterOpenBracket == FormatStyle::BAS_AlwaysBreak &&
+ Previous.is(tok::l_paren) && State.Column > getNewLineColumn(State) &&
+ (!Previous.Previous ||
+ !Previous.Previous->isOneOf(tok::kw_for, tok::kw_while, tok::kw_switch)))
+ State.Stack.back().NoLineBreak = true;
+
+ if (Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign &&
+ Previous.opensScope() && Previous.isNot(TT_ObjCMethodExpr) &&
+ (Current.isNot(TT_LineComment) || Previous.BlockKind == BK_BracedInit))
+ State.Stack.back().Indent = State.Column + Spaces;
+ if (State.Stack.back().AvoidBinPacking && startsNextParameter(Current, Style))
+ State.Stack.back().NoLineBreak = true;
+ if (startsSegmentOfBuilderTypeCall(Current) &&
+ State.Column > getNewLineColumn(State))
+ State.Stack.back().ContainsUnwrappedBuilder = true;
+
+ if (Current.is(TT_LambdaArrow) && Style.Language == FormatStyle::LK_Java)
+ State.Stack.back().NoLineBreak = true;
+ if (Current.isMemberAccess() && Previous.is(tok::r_paren) &&
+ (Previous.MatchingParen &&
+ (Previous.TotalLength - Previous.MatchingParen->TotalLength > 10))) {
+ // If there is a function call with long parameters, break before trailing
+ // calls. This prevents things like:
+ // EXPECT_CALL(SomeLongParameter).Times(
+ // 2);
+ // We don't want to do this for short parameters as they can just be
+ // indexes.
+ State.Stack.back().NoLineBreak = true;
+ }
+
+ State.Column += Spaces;
+ if (Current.isNot(tok::comment) && Previous.is(tok::l_paren) &&
+ Previous.Previous &&
+ Previous.Previous->isOneOf(tok::kw_if, tok::kw_for)) {
+ // Treat the condition inside an if as if it was a second function
+ // parameter, i.e. let nested calls have a continuation indent.
+ State.Stack.back().LastSpace = State.Column;
+ State.Stack.back().NestedBlockIndent = State.Column;
+ } else if (!Current.isOneOf(tok::comment, tok::caret) &&
+ (Previous.is(tok::comma) ||
+ (Previous.is(tok::colon) && Previous.is(TT_ObjCMethodExpr)))) {
+ State.Stack.back().LastSpace = State.Column;
+ } else if ((Previous.isOneOf(TT_BinaryOperator, TT_ConditionalExpr,
+ TT_CtorInitializerColon)) &&
+ ((Previous.getPrecedence() != prec::Assignment &&
+ (Previous.isNot(tok::lessless) || Previous.OperatorIndex != 0 ||
+ Previous.NextOperator)) ||
+ Current.StartsBinaryExpression)) {
+ // Always indent relative to the RHS of the expression unless this is a
+ // simple assignment without binary expression on the RHS. Also indent
+ // relative to unary operators and the colons of constructor initializers.
+ State.Stack.back().LastSpace = State.Column;
+ } else if (Previous.is(TT_InheritanceColon)) {
+ State.Stack.back().Indent = State.Column;
+ State.Stack.back().LastSpace = State.Column;
+ } else if (Previous.opensScope()) {
+ // If a function has a trailing call, indent all parameters from the
+ // opening parenthesis. This avoids confusing indents like:
+ // OuterFunction(InnerFunctionCall( // break
+ // ParameterToInnerFunction)) // break
+ // .SecondInnerFunctionCall();
+ bool HasTrailingCall = false;
+ if (Previous.MatchingParen) {
+ const FormatToken *Next = Previous.MatchingParen->getNextNonComment();
+ HasTrailingCall = Next && Next->isMemberAccess();
+ }
+ if (HasTrailingCall && State.Stack.size() > 1 &&
+ State.Stack[State.Stack.size() - 2].CallContinuation == 0)
+ State.Stack.back().LastSpace = State.Column;
+ }
+}
+
+unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
+ bool DryRun) {
+ FormatToken &Current = *State.NextToken;
+ const FormatToken &Previous = *State.NextToken->Previous;
+
+ // Extra penalty that needs to be added because of the way certain line
+ // breaks are chosen.
+ unsigned Penalty = 0;
+
+ const FormatToken *PreviousNonComment = Current.getPreviousNonComment();
+ const FormatToken *NextNonComment = Previous.getNextNonComment();
+ if (!NextNonComment)
+ NextNonComment = &Current;
+ // The first line break on any NestingLevel causes an extra penalty in order
+ // prefer similar line breaks.
+ if (!State.Stack.back().ContainsLineBreak)
+ Penalty += 15;
+ State.Stack.back().ContainsLineBreak = true;
+
+ Penalty += State.NextToken->SplitPenalty;
+
+ // Breaking before the first "<<" is generally not desirable if the LHS is
+ // short. Also always add the penalty if the LHS is split over mutliple lines
+ // to avoid unnecessary line breaks that just work around this penalty.
+ if (NextNonComment->is(tok::lessless) &&
+ State.Stack.back().FirstLessLess == 0 &&
+ (State.Column <= Style.ColumnLimit / 3 ||
+ State.Stack.back().BreakBeforeParameter))
+ Penalty += Style.PenaltyBreakFirstLessLess;
+
+ State.Column = getNewLineColumn(State);
+
+ // Indent nested blocks relative to this column, unless in a very specific
+ // JavaScript special case where:
+ //
+ // var loooooong_name =
+ // function() {
+ // // code
+ // }
+ //
+ // is common and should be formatted like a free-standing function.
+ if (Style.Language != FormatStyle::LK_JavaScript ||
+ Current.NestingLevel != 0 || !PreviousNonComment->is(tok::equal) ||
+ !Current.is(Keywords.kw_function))
+ State.Stack.back().NestedBlockIndent = State.Column;
+
+ if (NextNonComment->isMemberAccess()) {
+ if (State.Stack.back().CallContinuation == 0)
+ State.Stack.back().CallContinuation = State.Column;
+ } else if (NextNonComment->is(TT_SelectorName)) {
+ if (!State.Stack.back().ObjCSelectorNameFound) {
+ if (NextNonComment->LongestObjCSelectorName == 0) {
+ State.Stack.back().AlignColons = false;
+ } else {
+ State.Stack.back().ColonPos =
+ (Style.IndentWrappedFunctionNames
+ ? std::max(State.Stack.back().Indent,
+ State.FirstIndent + Style.ContinuationIndentWidth)
+ : State.Stack.back().Indent) +
+ NextNonComment->LongestObjCSelectorName;
+ }
+ } else if (State.Stack.back().AlignColons &&
+ State.Stack.back().ColonPos <= NextNonComment->ColumnWidth) {
+ State.Stack.back().ColonPos = State.Column + NextNonComment->ColumnWidth;
+ }
+ } else if (PreviousNonComment && PreviousNonComment->is(tok::colon) &&
+ PreviousNonComment->isOneOf(TT_ObjCMethodExpr, TT_DictLiteral)) {
+ // FIXME: This is hacky, find a better way. The problem is that in an ObjC
+ // method expression, the block should be aligned to the line starting it,
+ // e.g.:
+ // [aaaaaaaaaaaaaaa aaaaaaaaa: \\ break for some reason
+ // ^(int *i) {
+ // // ...
+ // }];
+ // Thus, we set LastSpace of the next higher NestingLevel, to which we move
+ // when we consume all of the "}"'s FakeRParens at the "{".
+ if (State.Stack.size() > 1)
+ State.Stack[State.Stack.size() - 2].LastSpace =
+ std::max(State.Stack.back().LastSpace, State.Stack.back().Indent) +
+ Style.ContinuationIndentWidth;
+ }
+
+ if ((Previous.isOneOf(tok::comma, tok::semi) &&
+ !State.Stack.back().AvoidBinPacking) ||
+ Previous.is(TT_BinaryOperator))
+ State.Stack.back().BreakBeforeParameter = false;
+ if (Previous.isOneOf(TT_TemplateCloser, TT_JavaAnnotation) &&
+ Current.NestingLevel == 0)
+ State.Stack.back().BreakBeforeParameter = false;
+ if (NextNonComment->is(tok::question) ||
+ (PreviousNonComment && PreviousNonComment->is(tok::question)))
+ State.Stack.back().BreakBeforeParameter = true;
+ if (Current.is(TT_BinaryOperator) && Current.CanBreakBefore)
+ State.Stack.back().BreakBeforeParameter = false;
+
+ if (!DryRun) {
+ unsigned Newlines = std::max(
+ 1u, std::min(Current.NewlinesBefore, Style.MaxEmptyLinesToKeep + 1));
+ Whitespaces.replaceWhitespace(Current, Newlines,
+ State.Stack.back().IndentLevel, State.Column,
+ State.Column, State.Line->InPPDirective);
+ }
+
+ if (!Current.isTrailingComment())
+ State.Stack.back().LastSpace = State.Column;
+ State.StartOfLineLevel = Current.NestingLevel;
+ State.LowestLevelOnLine = Current.NestingLevel;
+
+ // Any break on this level means that the parent level has been broken
+ // and we need to avoid bin packing there.
+ bool NestedBlockSpecialCase =
+ Style.Language != FormatStyle::LK_Cpp &&
+ Current.is(tok::r_brace) && State.Stack.size() > 1 &&
+ State.Stack[State.Stack.size() - 2].NestedBlockInlined;
+ if (!NestedBlockSpecialCase)
+ for (unsigned i = 0, e = State.Stack.size() - 1; i != e; ++i)
+ State.Stack[i].BreakBeforeParameter = true;
+
+ if (PreviousNonComment &&
+ !PreviousNonComment->isOneOf(tok::comma, tok::semi) &&
+ (PreviousNonComment->isNot(TT_TemplateCloser) ||
+ Current.NestingLevel != 0) &&
+ !PreviousNonComment->isOneOf(
+ TT_BinaryOperator, TT_FunctionAnnotationRParen, TT_JavaAnnotation,
+ TT_LeadingJavaAnnotation) &&
+ Current.isNot(TT_BinaryOperator) && !PreviousNonComment->opensScope())
+ State.Stack.back().BreakBeforeParameter = true;
+
+ // If we break after { or the [ of an array initializer, we should also break
+ // before the corresponding } or ].
+ if (PreviousNonComment &&
+ (PreviousNonComment->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare)))
+ State.Stack.back().BreakBeforeClosingBrace = true;
+
+ if (State.Stack.back().AvoidBinPacking) {
+ // If we are breaking after '(', '{', '<', this is not bin packing
+ // unless AllowAllParametersOfDeclarationOnNextLine is false or this is a
+ // dict/object literal.
+ if (!Previous.isOneOf(tok::l_paren, tok::l_brace, TT_BinaryOperator) ||
+ (!Style.AllowAllParametersOfDeclarationOnNextLine &&
+ State.Line->MustBeDeclaration) ||
+ Previous.is(TT_DictLiteral))
+ State.Stack.back().BreakBeforeParameter = true;
+ }
+
+ return Penalty;
+}
+
+unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
+ if (!State.NextToken || !State.NextToken->Previous)
+ return 0;
+ FormatToken &Current = *State.NextToken;
+ const FormatToken &Previous = *Current.Previous;
+ // If we are continuing an expression, we want to use the continuation indent.
+ unsigned ContinuationIndent =
+ std::max(State.Stack.back().LastSpace, State.Stack.back().Indent) +
+ Style.ContinuationIndentWidth;
+ const FormatToken *PreviousNonComment = Current.getPreviousNonComment();
+ const FormatToken *NextNonComment = Previous.getNextNonComment();
+ if (!NextNonComment)
+ NextNonComment = &Current;
+
+ // Java specific bits.
+ if (Style.Language == FormatStyle::LK_Java &&
+ Current.isOneOf(Keywords.kw_implements, Keywords.kw_extends))
+ return std::max(State.Stack.back().LastSpace,
+ State.Stack.back().Indent + Style.ContinuationIndentWidth);
+
+ if (NextNonComment->is(tok::l_brace) && NextNonComment->BlockKind == BK_Block)
+ return Current.NestingLevel == 0 ? State.FirstIndent
+ : State.Stack.back().Indent;
+ if (Current.isOneOf(tok::r_brace, tok::r_square) && State.Stack.size() > 1) {
+ if (Current.closesBlockOrBlockTypeList(Style))
+ return State.Stack[State.Stack.size() - 2].NestedBlockIndent;
+ if (Current.MatchingParen &&
+ Current.MatchingParen->BlockKind == BK_BracedInit)
+ return State.Stack[State.Stack.size() - 2].LastSpace;
+ return State.FirstIndent;
+ }
+ if (Current.is(tok::identifier) && Current.Next &&
+ Current.Next->is(TT_DictLiteral))
+ return State.Stack.back().Indent;
+ if (NextNonComment->isStringLiteral() && State.StartOfStringLiteral != 0)
+ return State.StartOfStringLiteral;
+ if (NextNonComment->is(TT_ObjCStringLiteral) &&
+ State.StartOfStringLiteral != 0)
+ return State.StartOfStringLiteral - 1;
+ if (NextNonComment->is(tok::lessless) &&
+ State.Stack.back().FirstLessLess != 0)
+ return State.Stack.back().FirstLessLess;
+ if (NextNonComment->isMemberAccess()) {
+ if (State.Stack.back().CallContinuation == 0)
+ return ContinuationIndent;
+ return State.Stack.back().CallContinuation;
+ }
+ if (State.Stack.back().QuestionColumn != 0 &&
+ ((NextNonComment->is(tok::colon) &&
+ NextNonComment->is(TT_ConditionalExpr)) ||
+ Previous.is(TT_ConditionalExpr)))
+ return State.Stack.back().QuestionColumn;
+ if (Previous.is(tok::comma) && State.Stack.back().VariablePos != 0)
+ return State.Stack.back().VariablePos;
+ if ((PreviousNonComment &&
+ (PreviousNonComment->ClosesTemplateDeclaration ||
+ PreviousNonComment->isOneOf(
+ TT_AttributeParen, TT_FunctionAnnotationRParen, TT_JavaAnnotation,
+ TT_LeadingJavaAnnotation))) ||
+ (!Style.IndentWrappedFunctionNames &&
+ NextNonComment->isOneOf(tok::kw_operator, TT_FunctionDeclarationName)))
+ return std::max(State.Stack.back().LastSpace, State.Stack.back().Indent);
+ if (NextNonComment->is(TT_SelectorName)) {
+ if (!State.Stack.back().ObjCSelectorNameFound) {
+ if (NextNonComment->LongestObjCSelectorName == 0)
+ return State.Stack.back().Indent;
+ return (Style.IndentWrappedFunctionNames
+ ? std::max(State.Stack.back().Indent,
+ State.FirstIndent + Style.ContinuationIndentWidth)
+ : State.Stack.back().Indent) +
+ NextNonComment->LongestObjCSelectorName -
+ NextNonComment->ColumnWidth;
+ }
+ if (!State.Stack.back().AlignColons)
+ return State.Stack.back().Indent;
+ if (State.Stack.back().ColonPos > NextNonComment->ColumnWidth)
+ return State.Stack.back().ColonPos - NextNonComment->ColumnWidth;
+ return State.Stack.back().Indent;
+ }
+ if (NextNonComment->is(TT_ArraySubscriptLSquare)) {
+ if (State.Stack.back().StartOfArraySubscripts != 0)
+ return State.Stack.back().StartOfArraySubscripts;
+ return ContinuationIndent;
+ }
+
+ // This ensure that we correctly format ObjC methods calls without inputs,
+ // i.e. where the last element isn't selector like: [callee method];
+ if (NextNonComment->is(tok::identifier) && NextNonComment->FakeRParens == 0 &&
+ NextNonComment->Next && NextNonComment->Next->is(TT_ObjCMethodExpr))
+ return State.Stack.back().Indent;
+
+ if (NextNonComment->isOneOf(TT_StartOfName, TT_PointerOrReference) ||
+ Previous.isOneOf(tok::coloncolon, tok::equal, TT_JsTypeColon))
+ return ContinuationIndent;
+ if (PreviousNonComment && PreviousNonComment->is(tok::colon) &&
+ PreviousNonComment->isOneOf(TT_ObjCMethodExpr, TT_DictLiteral))
+ return ContinuationIndent;
+ if (NextNonComment->is(TT_CtorInitializerColon))
+ return State.FirstIndent + Style.ConstructorInitializerIndentWidth;
+ if (NextNonComment->is(TT_CtorInitializerComma))
+ return State.Stack.back().Indent;
+ if (Previous.is(tok::r_paren) && !Current.isBinaryOperator() &&
+ !Current.isOneOf(tok::colon, tok::comment))
+ return ContinuationIndent;
+ if (State.Stack.back().Indent == State.FirstIndent && PreviousNonComment &&
+ PreviousNonComment->isNot(tok::r_brace))
+ // Ensure that we fall back to the continuation indent width instead of
+ // just flushing continuations left.
+ return State.Stack.back().Indent + Style.ContinuationIndentWidth;
+ return State.Stack.back().Indent;
+}
+
+unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
+ bool DryRun, bool Newline) {
+ assert(State.Stack.size());
+ const FormatToken &Current = *State.NextToken;
+
+ if (Current.is(TT_InheritanceColon))
+ State.Stack.back().AvoidBinPacking = true;
+ if (Current.is(tok::lessless) && Current.isNot(TT_OverloadedOperator)) {
+ if (State.Stack.back().FirstLessLess == 0)
+ State.Stack.back().FirstLessLess = State.Column;
+ else
+ State.Stack.back().LastOperatorWrapped = Newline;
+ }
+ if ((Current.is(TT_BinaryOperator) && Current.isNot(tok::lessless)) ||
+ Current.is(TT_ConditionalExpr))
+ State.Stack.back().LastOperatorWrapped = Newline;
+ if (Current.is(TT_ArraySubscriptLSquare) &&
+ State.Stack.back().StartOfArraySubscripts == 0)
+ State.Stack.back().StartOfArraySubscripts = State.Column;
+ if ((Current.is(tok::question) && Style.BreakBeforeTernaryOperators) ||
+ (Current.getPreviousNonComment() && Current.isNot(tok::colon) &&
+ Current.getPreviousNonComment()->is(tok::question) &&
+ !Style.BreakBeforeTernaryOperators))
+ State.Stack.back().QuestionColumn = State.Column;
+ if (!Current.opensScope() && !Current.closesScope())
+ State.LowestLevelOnLine =
+ std::min(State.LowestLevelOnLine, Current.NestingLevel);
+ if (Current.isMemberAccess())
+ State.Stack.back().StartOfFunctionCall =
+ !Current.NextOperator ? 0 : State.Column;
+ if (Current.is(TT_SelectorName)) {
+ State.Stack.back().ObjCSelectorNameFound = true;
+ if (Style.IndentWrappedFunctionNames) {
+ State.Stack.back().Indent =
+ State.FirstIndent + Style.ContinuationIndentWidth;
+ }
+ }
+ if (Current.is(TT_CtorInitializerColon)) {
+ // Indent 2 from the column, so:
+ // SomeClass::SomeClass()
+ // : First(...), ...
+ // Next(...)
+ // ^ line up here.
+ State.Stack.back().Indent =
+ State.Column + (Style.BreakConstructorInitializersBeforeComma ? 0 : 2);
+ State.Stack.back().NestedBlockIndent = State.Stack.back().Indent;
+ if (Style.ConstructorInitializerAllOnOneLineOrOnePerLine)
+ State.Stack.back().AvoidBinPacking = true;
+ State.Stack.back().BreakBeforeParameter = false;
+ }
+ if (Current.isOneOf(TT_BinaryOperator, TT_ConditionalExpr) && Newline)
+ State.Stack.back().NestedBlockIndent =
+ State.Column + Current.ColumnWidth + 1;
+
+ // Insert scopes created by fake parenthesis.
+ const FormatToken *Previous = Current.getPreviousNonComment();
+
+ // Add special behavior to support a format commonly used for JavaScript
+ // closures:
+ // SomeFunction(function() {
+ // foo();
+ // bar();
+ // }, a, b, c);
+ if (Current.isNot(tok::comment) && Previous &&
+ Previous->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) &&
+ !Previous->is(TT_DictLiteral) && State.Stack.size() > 1) {
+ if (State.Stack[State.Stack.size() - 2].NestedBlockInlined && Newline)
+ for (unsigned i = 0, e = State.Stack.size() - 1; i != e; ++i)
+ State.Stack[i].NoLineBreak = true;
+ State.Stack[State.Stack.size() - 2].NestedBlockInlined = false;
+ }
+ if (Previous && (Previous->isOneOf(tok::l_paren, tok::comma, tok::colon) ||
+ Previous->isOneOf(TT_BinaryOperator, TT_ConditionalExpr)) &&
+ !Previous->isOneOf(TT_DictLiteral, TT_ObjCMethodExpr)) {
+ State.Stack.back().NestedBlockInlined =
+ !Newline &&
+ (Previous->isNot(tok::l_paren) || Previous->ParameterCount > 1);
+ }
+
+ moveStatePastFakeLParens(State, Newline);
+ moveStatePastScopeOpener(State, Newline);
+ moveStatePastScopeCloser(State);
+ moveStatePastFakeRParens(State);
+
+ if (Current.isStringLiteral() && State.StartOfStringLiteral == 0)
+ State.StartOfStringLiteral = State.Column;
+ if (Current.is(TT_ObjCStringLiteral) && State.StartOfStringLiteral == 0)
+ State.StartOfStringLiteral = State.Column + 1;
+ else if (!Current.isOneOf(tok::comment, tok::identifier, tok::hash) &&
+ !Current.isStringLiteral())
+ State.StartOfStringLiteral = 0;
+
+ State.Column += Current.ColumnWidth;
+ State.NextToken = State.NextToken->Next;
+ unsigned Penalty = breakProtrudingToken(Current, State, DryRun);
+ if (State.Column > getColumnLimit(State)) {
+ unsigned ExcessCharacters = State.Column - getColumnLimit(State);
+ Penalty += Style.PenaltyExcessCharacter * ExcessCharacters;
+ }
+
+ if (Current.Role)
+ Current.Role->formatFromToken(State, this, DryRun);
+ // If the previous has a special role, let it consume tokens as appropriate.
+ // It is necessary to start at the previous token for the only implemented
+ // role (comma separated list). That way, the decision whether or not to break
+ // after the "{" is already done and both options are tried and evaluated.
+ // FIXME: This is ugly, find a better way.
+ if (Previous && Previous->Role)
+ Penalty += Previous->Role->formatAfterToken(State, this, DryRun);
+
+ return Penalty;
+}
+
+void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
+ bool Newline) {
+ const FormatToken &Current = *State.NextToken;
+ const FormatToken *Previous = Current.getPreviousNonComment();
+
+ // Don't add extra indentation for the first fake parenthesis after
+ // 'return', assignments or opening <({[. The indentation for these cases
+ // is special cased.
+ bool SkipFirstExtraIndent =
+ (Previous && (Previous->opensScope() ||
+ Previous->isOneOf(tok::semi, tok::kw_return) ||
+ (Previous->getPrecedence() == prec::Assignment &&
+ Style.AlignOperands) ||
+ Previous->is(TT_ObjCMethodExpr)));
+ for (SmallVectorImpl<prec::Level>::const_reverse_iterator
+ I = Current.FakeLParens.rbegin(),
+ E = Current.FakeLParens.rend();
+ I != E; ++I) {
+ ParenState NewParenState = State.Stack.back();
+ NewParenState.ContainsLineBreak = false;
+
+ // Indent from 'LastSpace' unless these are fake parentheses encapsulating
+ // a builder type call after 'return' or, if the alignment after opening
+ // brackets is disabled.
+ if (!Current.isTrailingComment() &&
+ (Style.AlignOperands || *I < prec::Assignment) &&
+ (!Previous || Previous->isNot(tok::kw_return) ||
+ (Style.Language != FormatStyle::LK_Java && *I > 0)) &&
+ (Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign ||
+ *I != prec::Comma || Current.NestingLevel == 0))
+ NewParenState.Indent =
+ std::max(std::max(State.Column, NewParenState.Indent),
+ State.Stack.back().LastSpace);
+
+ // Don't allow the RHS of an operator to be split over multiple lines unless
+ // there is a line-break right after the operator.
+ // Exclude relational operators, as there, it is always more desirable to
+ // have the LHS 'left' of the RHS.
+ if (Previous && Previous->getPrecedence() > prec::Assignment &&
+ Previous->isOneOf(TT_BinaryOperator, TT_ConditionalExpr) &&
+ Previous->getPrecedence() != prec::Relational) {
+ bool BreakBeforeOperator =
+ Previous->is(tok::lessless) ||
+ (Previous->is(TT_BinaryOperator) &&
+ Style.BreakBeforeBinaryOperators != FormatStyle::BOS_None) ||
+ (Previous->is(TT_ConditionalExpr) &&
+ Style.BreakBeforeTernaryOperators);
+ if ((!Newline && !BreakBeforeOperator) ||
+ (!State.Stack.back().LastOperatorWrapped && BreakBeforeOperator))
+ NewParenState.NoLineBreak = true;
+ }
+
+ // Do not indent relative to the fake parentheses inserted for "." or "->".
+ // This is a special case to make the following to statements consistent:
+ // OuterFunction(InnerFunctionCall( // break
+ // ParameterToInnerFunction));
+ // OuterFunction(SomeObject.InnerFunctionCall( // break
+ // ParameterToInnerFunction));
+ if (*I > prec::Unknown)
+ NewParenState.LastSpace = std::max(NewParenState.LastSpace, State.Column);
+ if (*I != prec::Conditional && !Current.is(TT_UnaryOperator))
+ NewParenState.StartOfFunctionCall = State.Column;
+
+ // Always indent conditional expressions. Never indent expression where
+ // the 'operator' is ',', ';' or an assignment (i.e. *I <=
+ // prec::Assignment) as those have different indentation rules. Indent
+ // other expression, unless the indentation needs to be skipped.
+ if (*I == prec::Conditional ||
+ (!SkipFirstExtraIndent && *I > prec::Assignment &&
+ !Current.isTrailingComment()))
+ NewParenState.Indent += Style.ContinuationIndentWidth;
+ if ((Previous && !Previous->opensScope()) || *I > prec::Comma)
+ NewParenState.BreakBeforeParameter = false;
+ State.Stack.push_back(NewParenState);
+ SkipFirstExtraIndent = false;
+ }
+}
+
+void ContinuationIndenter::moveStatePastFakeRParens(LineState &State) {
+ for (unsigned i = 0, e = State.NextToken->FakeRParens; i != e; ++i) {
+ unsigned VariablePos = State.Stack.back().VariablePos;
+ if (State.Stack.size() == 1) {
+ // Do not pop the last element.
+ break;
+ }
+ State.Stack.pop_back();
+ State.Stack.back().VariablePos = VariablePos;
+ }
+}
+
+void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
+ bool Newline) {
+ const FormatToken &Current = *State.NextToken;
+ if (!Current.opensScope())
+ return;
+
+ if (Current.MatchingParen && Current.BlockKind == BK_Block) {
+ moveStateToNewBlock(State);
+ return;
+ }
+
+ unsigned NewIndent;
+ unsigned NewIndentLevel = State.Stack.back().IndentLevel;
+ unsigned LastSpace = State.Stack.back().LastSpace;
+ bool AvoidBinPacking;
+ bool BreakBeforeParameter = false;
+ unsigned NestedBlockIndent = std::max(State.Stack.back().StartOfFunctionCall,
+ State.Stack.back().NestedBlockIndent);
+ if (Current.isOneOf(tok::l_brace, TT_ArrayInitializerLSquare)) {
+ if (Current.opensBlockOrBlockTypeList(Style)) {
+ NewIndent = State.Stack.back().NestedBlockIndent + Style.IndentWidth;
+ NewIndent = std::min(State.Column + 2, NewIndent);
+ ++NewIndentLevel;
+ } else {
+ NewIndent = State.Stack.back().LastSpace + Style.ContinuationIndentWidth;
+ }
+ const FormatToken *NextNoComment = Current.getNextNonComment();
+ AvoidBinPacking =
+ Current.isOneOf(TT_ArrayInitializerLSquare, TT_DictLiteral) ||
+ Style.Language == FormatStyle::LK_Proto || !Style.BinPackArguments ||
+ (NextNoComment && NextNoComment->is(TT_DesignatedInitializerPeriod));
+ if (Current.ParameterCount > 1)
+ NestedBlockIndent = std::max(NestedBlockIndent, State.Column + 1);
+ } else {
+ NewIndent = Style.ContinuationIndentWidth +
+ std::max(State.Stack.back().LastSpace,
+ State.Stack.back().StartOfFunctionCall);
+
+ // Ensure that different different brackets force relative alignment, e.g.:
+ // void SomeFunction(vector< // break
+ // int> v);
+ // FIXME: We likely want to do this for more combinations of brackets.
+ // Verify that it is wanted for ObjC, too.
+ if (Current.Tok.getKind() == tok::less &&
+ Current.ParentBracket == tok::l_paren) {
+ NewIndent = std::max(NewIndent, State.Stack.back().Indent);
+ LastSpace = std::max(LastSpace, State.Stack.back().Indent);
+ }
+
+ AvoidBinPacking =
+ (State.Line->MustBeDeclaration && !Style.BinPackParameters) ||
+ (!State.Line->MustBeDeclaration && !Style.BinPackArguments) ||
+ (Style.ExperimentalAutoDetectBinPacking &&
+ (Current.PackingKind == PPK_OnePerLine ||
+ (!BinPackInconclusiveFunctions &&
+ Current.PackingKind == PPK_Inconclusive)));
+ if (Current.is(TT_ObjCMethodExpr) && Current.MatchingParen) {
+ if (Style.ColumnLimit) {
+ // If this '[' opens an ObjC call, determine whether all parameters fit
+ // into one line and put one per line if they don't.
+ if (getLengthToMatchingParen(Current) + State.Column >
+ getColumnLimit(State))
+ BreakBeforeParameter = true;
+ } else {
+ // For ColumnLimit = 0, we have to figure out whether there is or has to
+ // be a line break within this call.
+ for (const FormatToken *Tok = &Current;
+ Tok && Tok != Current.MatchingParen; Tok = Tok->Next) {
+ if (Tok->MustBreakBefore ||
+ (Tok->CanBreakBefore && Tok->NewlinesBefore > 0)) {
+ BreakBeforeParameter = true;
+ break;
+ }
+ }
+ }
+ }
+ }
+ // Generally inherit NoLineBreak from the current scope to nested scope.
+ // However, don't do this for non-empty nested blocks, dict literals and
+ // array literals as these follow different indentation rules.
+ bool NoLineBreak =
+ Current.Children.empty() &&
+ !Current.isOneOf(TT_DictLiteral, TT_ArrayInitializerLSquare) &&
+ (State.Stack.back().NoLineBreak ||
+ (Current.is(TT_TemplateOpener) &&
+ State.Stack.back().ContainsUnwrappedBuilder));
+ State.Stack.push_back(ParenState(NewIndent, NewIndentLevel, LastSpace,
+ AvoidBinPacking, NoLineBreak));
+ State.Stack.back().NestedBlockIndent = NestedBlockIndent;
+ State.Stack.back().BreakBeforeParameter = BreakBeforeParameter;
+ State.Stack.back().HasMultipleNestedBlocks = Current.BlockParameterCount > 1;
+}
+
+void ContinuationIndenter::moveStatePastScopeCloser(LineState &State) {
+ const FormatToken &Current = *State.NextToken;
+ if (!Current.closesScope())
+ return;
+
+ // If we encounter a closing ), ], } or >, we can remove a level from our
+ // stacks.
+ if (State.Stack.size() > 1 &&
+ (Current.isOneOf(tok::r_paren, tok::r_square) ||
+ (Current.is(tok::r_brace) && State.NextToken != State.Line->First) ||
+ State.NextToken->is(TT_TemplateCloser)))
+ State.Stack.pop_back();
+
+ if (Current.is(tok::r_square)) {
+ // If this ends the array subscript expr, reset the corresponding value.
+ const FormatToken *NextNonComment = Current.getNextNonComment();
+ if (NextNonComment && NextNonComment->isNot(tok::l_square))
+ State.Stack.back().StartOfArraySubscripts = 0;
+ }
+}
+
+void ContinuationIndenter::moveStateToNewBlock(LineState &State) {
+ unsigned NestedBlockIndent = State.Stack.back().NestedBlockIndent;
+ // ObjC block sometimes follow special indentation rules.
+ unsigned NewIndent =
+ NestedBlockIndent + (State.NextToken->is(TT_ObjCBlockLBrace)
+ ? Style.ObjCBlockIndentWidth
+ : Style.IndentWidth);
+ State.Stack.push_back(ParenState(
+ NewIndent, /*NewIndentLevel=*/State.Stack.back().IndentLevel + 1,
+ State.Stack.back().LastSpace, /*AvoidBinPacking=*/true,
+ /*NoLineBreak=*/false));
+ State.Stack.back().NestedBlockIndent = NestedBlockIndent;
+ State.Stack.back().BreakBeforeParameter = true;
+}
+
+unsigned ContinuationIndenter::addMultilineToken(const FormatToken &Current,
+ LineState &State) {
+ // Break before further function parameters on all levels.
+ for (unsigned i = 0, e = State.Stack.size(); i != e; ++i)
+ State.Stack[i].BreakBeforeParameter = true;
+
+ unsigned ColumnsUsed = State.Column;
+ // We can only affect layout of the first and the last line, so the penalty
+ // for all other lines is constant, and we ignore it.
+ State.Column = Current.LastLineColumnWidth;
+
+ if (ColumnsUsed > getColumnLimit(State))
+ return Style.PenaltyExcessCharacter * (ColumnsUsed - getColumnLimit(State));
+ return 0;
+}
+
+unsigned ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
+ LineState &State,
+ bool DryRun) {
+ // Don't break multi-line tokens other than block comments. Instead, just
+ // update the state.
+ if (Current.isNot(TT_BlockComment) && Current.IsMultiline)
+ return addMultilineToken(Current, State);
+
+ // Don't break implicit string literals or import statements.
+ if (Current.is(TT_ImplicitStringLiteral) ||
+ State.Line->Type == LT_ImportStatement)
+ return 0;
+
+ if (!Current.isStringLiteral() && !Current.is(tok::comment))
+ return 0;
+
+ std::unique_ptr<BreakableToken> Token;
+ unsigned StartColumn = State.Column - Current.ColumnWidth;
+ unsigned ColumnLimit = getColumnLimit(State);
+
+ if (Current.isStringLiteral()) {
+ // FIXME: String literal breaking is currently disabled for Java and JS, as
+ // it requires strings to be merged using "+" which we don't support.
+ if (Style.Language == FormatStyle::LK_Java ||
+ Style.Language == FormatStyle::LK_JavaScript)
+ return 0;
+
+ // Don't break string literals inside preprocessor directives (except for
+ // #define directives, as their contents are stored in separate lines and
+ // are not affected by this check).
+ // This way we avoid breaking code with line directives and unknown
+ // preprocessor directives that contain long string literals.
+ if (State.Line->Type == LT_PreprocessorDirective)
+ return 0;
+ // Exempts unterminated string literals from line breaking. The user will
+ // likely want to terminate the string before any line breaking is done.
+ if (Current.IsUnterminatedLiteral)
+ return 0;
+
+ StringRef Text = Current.TokenText;
+ StringRef Prefix;
+ StringRef Postfix;
+ bool IsNSStringLiteral = false;
+ // FIXME: Handle whitespace between '_T', '(', '"..."', and ')'.
+ // FIXME: Store Prefix and Suffix (or PrefixLength and SuffixLength to
+ // reduce the overhead) for each FormatToken, which is a string, so that we
+ // don't run multiple checks here on the hot path.
+ if (Text.startswith("\"") && Current.Previous &&
+ Current.Previous->is(tok::at)) {
+ IsNSStringLiteral = true;
+ Prefix = "@\"";
+ }
+ if ((Text.endswith(Postfix = "\"") &&
+ (IsNSStringLiteral || Text.startswith(Prefix = "\"") ||
+ Text.startswith(Prefix = "u\"") || Text.startswith(Prefix = "U\"") ||
+ Text.startswith(Prefix = "u8\"") ||
+ Text.startswith(Prefix = "L\""))) ||
+ (Text.startswith(Prefix = "_T(\"") && Text.endswith(Postfix = "\")"))) {
+ Token.reset(new BreakableStringLiteral(
+ Current, State.Line->Level, StartColumn, Prefix, Postfix,
+ State.Line->InPPDirective, Encoding, Style));
+ } else {
+ return 0;
+ }
+ } else if (Current.is(TT_BlockComment) && Current.isTrailingComment()) {
+ if (!Style.ReflowComments ||
+ CommentPragmasRegex.match(Current.TokenText.substr(2)))
+ return 0;
+ Token.reset(new BreakableBlockComment(
+ Current, State.Line->Level, StartColumn, Current.OriginalColumn,
+ !Current.Previous, State.Line->InPPDirective, Encoding, Style));
+ } else if (Current.is(TT_LineComment) &&
+ (Current.Previous == nullptr ||
+ Current.Previous->isNot(TT_ImplicitStringLiteral))) {
+ if (!Style.ReflowComments ||
+ CommentPragmasRegex.match(Current.TokenText.substr(2)))
+ return 0;
+ Token.reset(new BreakableLineComment(Current, State.Line->Level,
+ StartColumn, /*InPPDirective=*/false,
+ Encoding, Style));
+ // We don't insert backslashes when breaking line comments.
+ ColumnLimit = Style.ColumnLimit;
+ } else {
+ return 0;
+ }
+ if (Current.UnbreakableTailLength >= ColumnLimit)
+ return 0;
+
+ unsigned RemainingSpace = ColumnLimit - Current.UnbreakableTailLength;
+ bool BreakInserted = false;
+ unsigned Penalty = 0;
+ unsigned RemainingTokenColumns = 0;
+ for (unsigned LineIndex = 0, EndIndex = Token->getLineCount();
+ LineIndex != EndIndex; ++LineIndex) {
+ if (!DryRun)
+ Token->replaceWhitespaceBefore(LineIndex, Whitespaces);
+ unsigned TailOffset = 0;
+ RemainingTokenColumns =
+ Token->getLineLengthAfterSplit(LineIndex, TailOffset, StringRef::npos);
+ while (RemainingTokenColumns > RemainingSpace) {
+ BreakableToken::Split Split =
+ Token->getSplit(LineIndex, TailOffset, ColumnLimit);
+ if (Split.first == StringRef::npos) {
+ // The last line's penalty is handled in addNextStateToQueue().
+ if (LineIndex < EndIndex - 1)
+ Penalty += Style.PenaltyExcessCharacter *
+ (RemainingTokenColumns - RemainingSpace);
+ break;
+ }
+ assert(Split.first != 0);
+ unsigned NewRemainingTokenColumns = Token->getLineLengthAfterSplit(
+ LineIndex, TailOffset + Split.first + Split.second, StringRef::npos);
+
+ // We can remove extra whitespace instead of breaking the line.
+ if (RemainingTokenColumns + 1 - Split.second <= RemainingSpace) {
+ RemainingTokenColumns = 0;
+ if (!DryRun)
+ Token->replaceWhitespace(LineIndex, TailOffset, Split, Whitespaces);
+ break;
+ }
+
+ // When breaking before a tab character, it may be moved by a few columns,
+ // but will still be expanded to the next tab stop, so we don't save any
+ // columns.
+ if (NewRemainingTokenColumns == RemainingTokenColumns)
+ break;
+
+ assert(NewRemainingTokenColumns < RemainingTokenColumns);
+ if (!DryRun)
+ Token->insertBreak(LineIndex, TailOffset, Split, Whitespaces);
+ Penalty += Current.SplitPenalty;
+ unsigned ColumnsUsed =
+ Token->getLineLengthAfterSplit(LineIndex, TailOffset, Split.first);
+ if (ColumnsUsed > ColumnLimit) {
+ Penalty += Style.PenaltyExcessCharacter * (ColumnsUsed - ColumnLimit);
+ }
+ TailOffset += Split.first + Split.second;
+ RemainingTokenColumns = NewRemainingTokenColumns;
+ BreakInserted = true;
+ }
+ }
+
+ State.Column = RemainingTokenColumns;
+
+ if (BreakInserted) {
+ // If we break the token inside a parameter list, we need to break before
+ // the next parameter on all levels, so that the next parameter is clearly
+ // visible. Line comments already introduce a break.
+ if (Current.isNot(TT_LineComment)) {
+ for (unsigned i = 0, e = State.Stack.size(); i != e; ++i)
+ State.Stack[i].BreakBeforeParameter = true;
+ }
+
+ Penalty += Current.isStringLiteral() ? Style.PenaltyBreakString
+ : Style.PenaltyBreakComment;
+
+ State.Stack.back().LastSpace = StartColumn;
+ }
+ return Penalty;
+}
+
+unsigned ContinuationIndenter::getColumnLimit(const LineState &State) const {
+ // In preprocessor directives reserve two chars for trailing " \"
+ return Style.ColumnLimit - (State.Line->InPPDirective ? 2 : 0);
+}
+
+bool ContinuationIndenter::nextIsMultilineString(const LineState &State) {
+ const FormatToken &Current = *State.NextToken;
+ if (!Current.isStringLiteral() || Current.is(TT_ImplicitStringLiteral))
+ return false;
+ // We never consider raw string literals "multiline" for the purpose of
+ // AlwaysBreakBeforeMultilineStrings implementation as they are special-cased
+ // (see TokenAnnotator::mustBreakBefore().
+ if (Current.TokenText.startswith("R\""))
+ return false;
+ if (Current.IsMultiline)
+ return true;
+ if (Current.getNextNonComment() &&
+ Current.getNextNonComment()->isStringLiteral())
+ return true; // Implicit concatenation.
+ if (Style.ColumnLimit != 0 &&
+ State.Column + Current.ColumnWidth + Current.UnbreakableTailLength >
+ Style.ColumnLimit)
+ return true; // String will be split.
+ return false;
+}
+
+} // namespace format
+} // namespace clang
diff --git a/contrib/llvm/tools/clang/lib/Format/ContinuationIndenter.h b/contrib/llvm/tools/clang/lib/Format/ContinuationIndenter.h
new file mode 100644
index 0000000..9b9154e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Format/ContinuationIndenter.h
@@ -0,0 +1,380 @@
+//===--- ContinuationIndenter.h - Format C++ code ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements an indenter that manages the indentation of
+/// continuations.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_FORMAT_CONTINUATIONINDENTER_H
+#define LLVM_CLANG_LIB_FORMAT_CONTINUATIONINDENTER_H
+
+#include "Encoding.h"
+#include "FormatToken.h"
+#include "clang/Format/Format.h"
+#include "llvm/Support/Regex.h"
+
+namespace clang {
+class SourceManager;
+
+namespace format {
+
+class AnnotatedLine;
+struct FormatToken;
+struct LineState;
+struct ParenState;
+class WhitespaceManager;
+
+class ContinuationIndenter {
+public:
+ /// \brief Constructs a \c ContinuationIndenter to format \p Line starting in
+ /// column \p FirstIndent.
+ ContinuationIndenter(const FormatStyle &Style,
+ const AdditionalKeywords &Keywords,
+ SourceManager &SourceMgr, WhitespaceManager &Whitespaces,
+ encoding::Encoding Encoding,
+ bool BinPackInconclusiveFunctions);
+
+ /// \brief Get the initial state, i.e. the state after placing \p Line's
+ /// first token at \p FirstIndent.
+ LineState getInitialState(unsigned FirstIndent, const AnnotatedLine *Line,
+ bool DryRun);
+
+ // FIXME: canBreak and mustBreak aren't strictly indentation-related. Find a
+ // better home.
+ /// \brief Returns \c true, if a line break after \p State is allowed.
+ bool canBreak(const LineState &State);
+
+ /// \brief Returns \c true, if a line break after \p State is mandatory.
+ bool mustBreak(const LineState &State);
+
+ /// \brief Appends the next token to \p State and updates information
+ /// necessary for indentation.
+ ///
+ /// Puts the token on the current line if \p Newline is \c false and adds a
+ /// line break and necessary indentation otherwise.
+ ///
+ /// If \p DryRun is \c false, also creates and stores the required
+ /// \c Replacement.
+ unsigned addTokenToState(LineState &State, bool Newline, bool DryRun,
+ unsigned ExtraSpaces = 0);
+
+ /// \brief Get the column limit for this line. This is the style's column
+ /// limit, potentially reduced for preprocessor definitions.
+ unsigned getColumnLimit(const LineState &State) const;
+
+private:
+ /// \brief Mark the next token as consumed in \p State and modify its stacks
+ /// accordingly.
+ unsigned moveStateToNextToken(LineState &State, bool DryRun, bool Newline);
+
+ /// \brief Update 'State' according to the next token's fake left parentheses.
+ void moveStatePastFakeLParens(LineState &State, bool Newline);
+ /// \brief Update 'State' according to the next token's fake r_parens.
+ void moveStatePastFakeRParens(LineState &State);
+
+ /// \brief Update 'State' according to the next token being one of "(<{[".
+ void moveStatePastScopeOpener(LineState &State, bool Newline);
+ /// \brief Update 'State' according to the next token being one of ")>}]".
+ void moveStatePastScopeCloser(LineState &State);
+ /// \brief Update 'State' with the next token opening a nested block.
+ void moveStateToNewBlock(LineState &State);
+
+ /// \brief If the current token sticks out over the end of the line, break
+ /// it if possible.
+ ///
+ /// \returns An extra penalty if a token was broken, otherwise 0.
+ ///
+ /// The returned penalty will cover the cost of the additional line breaks and
+ /// column limit violation in all lines except for the last one. The penalty
+ /// for the column limit violation in the last line (and in single line
+ /// tokens) is handled in \c addNextStateToQueue.
+ unsigned breakProtrudingToken(const FormatToken &Current, LineState &State,
+ bool DryRun);
+
+ /// \brief Appends the next token to \p State and updates information
+ /// necessary for indentation.
+ ///
+ /// Puts the token on the current line.
+ ///
+ /// If \p DryRun is \c false, also creates and stores the required
+ /// \c Replacement.
+ void addTokenOnCurrentLine(LineState &State, bool DryRun,
+ unsigned ExtraSpaces);
+
+ /// \brief Appends the next token to \p State and updates information
+ /// necessary for indentation.
+ ///
+ /// Adds a line break and necessary indentation.
+ ///
+ /// If \p DryRun is \c false, also creates and stores the required
+ /// \c Replacement.
+ unsigned addTokenOnNewLine(LineState &State, bool DryRun);
+
+ /// \brief Calculate the new column for a line wrap before the next token.
+ unsigned getNewLineColumn(const LineState &State);
+
+ /// \brief Adds a multiline token to the \p State.
+ ///
+ /// \returns Extra penalty for the first line of the literal: last line is
+ /// handled in \c addNextStateToQueue, and the penalty for other lines doesn't
+ /// matter, as we don't change them.
+ unsigned addMultilineToken(const FormatToken &Current, LineState &State);
+
+ /// \brief Returns \c true if the next token starts a multiline string
+ /// literal.
+ ///
+ /// This includes implicitly concatenated strings, strings that will be broken
+ /// by clang-format and string literals with escaped newlines.
+ bool nextIsMultilineString(const LineState &State);
+
+ FormatStyle Style;
+ const AdditionalKeywords &Keywords;
+ SourceManager &SourceMgr;
+ WhitespaceManager &Whitespaces;
+ encoding::Encoding Encoding;
+ bool BinPackInconclusiveFunctions;
+ llvm::Regex CommentPragmasRegex;
+};
+
+struct ParenState {
+ ParenState(unsigned Indent, unsigned IndentLevel, unsigned LastSpace,
+ bool AvoidBinPacking, bool NoLineBreak)
+ : Indent(Indent), IndentLevel(IndentLevel), LastSpace(LastSpace),
+ NestedBlockIndent(Indent), BreakBeforeClosingBrace(false),
+ AvoidBinPacking(AvoidBinPacking), BreakBeforeParameter(false),
+ NoLineBreak(NoLineBreak), LastOperatorWrapped(true),
+ ContainsLineBreak(false), ContainsUnwrappedBuilder(false),
+ AlignColons(true), ObjCSelectorNameFound(false),
+ HasMultipleNestedBlocks(false), NestedBlockInlined(false) {}
+
+ /// \brief The position to which a specific parenthesis level needs to be
+ /// indented.
+ unsigned Indent;
+
+ /// \brief The number of indentation levels of the block.
+ unsigned IndentLevel;
+
+ /// \brief The position of the last space on each level.
+ ///
+ /// Used e.g. to break like:
+ /// functionCall(Parameter, otherCall(
+ /// OtherParameter));
+ unsigned LastSpace;
+
+ /// \brief If a block relative to this parenthesis level gets wrapped, indent
+ /// it this much.
+ unsigned NestedBlockIndent;
+
+ /// \brief The position the first "<<" operator encountered on each level.
+ ///
+ /// Used to align "<<" operators. 0 if no such operator has been encountered
+ /// on a level.
+ unsigned FirstLessLess = 0;
+
+ /// \brief The column of a \c ? in a conditional expression;
+ unsigned QuestionColumn = 0;
+
+ /// \brief The position of the colon in an ObjC method declaration/call.
+ unsigned ColonPos = 0;
+
+ /// \brief The start of the most recent function in a builder-type call.
+ unsigned StartOfFunctionCall = 0;
+
+ /// \brief Contains the start of array subscript expressions, so that they
+ /// can be aligned.
+ unsigned StartOfArraySubscripts = 0;
+
+ /// \brief If a nested name specifier was broken over multiple lines, this
+ /// contains the start column of the second line. Otherwise 0.
+ unsigned NestedNameSpecifierContinuation = 0;
+
+ /// \brief If a call expression was broken over multiple lines, this
+ /// contains the start column of the second line. Otherwise 0.
+ unsigned CallContinuation = 0;
+
+ /// \brief The column of the first variable name in a variable declaration.
+ ///
+ /// Used to align further variables if necessary.
+ unsigned VariablePos = 0;
+
+ /// \brief Whether a newline needs to be inserted before the block's closing
+ /// brace.
+ ///
+ /// We only want to insert a newline before the closing brace if there also
+ /// was a newline after the beginning left brace.
+ bool BreakBeforeClosingBrace : 1;
+
+ /// \brief Avoid bin packing, i.e. multiple parameters/elements on multiple
+ /// lines, in this context.
+ bool AvoidBinPacking : 1;
+
+ /// \brief Break after the next comma (or all the commas in this context if
+ /// \c AvoidBinPacking is \c true).
+ bool BreakBeforeParameter : 1;
+
+ /// \brief Line breaking in this context would break a formatting rule.
+ bool NoLineBreak : 1;
+
+ /// \brief True if the last binary operator on this level was wrapped to the
+ /// next line.
+ bool LastOperatorWrapped : 1;
+
+ /// \brief \c true if this \c ParenState already contains a line-break.
+ ///
+ /// The first line break in a certain \c ParenState causes extra penalty so
+ /// that clang-format prefers similar breaks, i.e. breaks in the same
+ /// parenthesis.
+ bool ContainsLineBreak : 1;
+
+ /// \brief \c true if this \c ParenState contains multiple segments of a
+ /// builder-type call on one line.
+ bool ContainsUnwrappedBuilder : 1;
+
+ /// \brief \c true if the colons of the curren ObjC method expression should
+ /// be aligned.
+ ///
+ /// Not considered for memoization as it will always have the same value at
+ /// the same token.
+ bool AlignColons : 1;
+
+ /// \brief \c true if at least one selector name was found in the current
+ /// ObjC method expression.
+ ///
+ /// Not considered for memoization as it will always have the same value at
+ /// the same token.
+ bool ObjCSelectorNameFound : 1;
+
+ /// \brief \c true if there are multiple nested blocks inside these parens.
+ ///
+ /// Not considered for memoization as it will always have the same value at
+ /// the same token.
+ bool HasMultipleNestedBlocks : 1;
+
+ // \brief The start of a nested block (e.g. lambda introducer in C++ or
+ // "function" in JavaScript) is not wrapped to a new line.
+ bool NestedBlockInlined : 1;
+
+ bool operator<(const ParenState &Other) const {
+ if (Indent != Other.Indent)
+ return Indent < Other.Indent;
+ if (LastSpace != Other.LastSpace)
+ return LastSpace < Other.LastSpace;
+ if (NestedBlockIndent != Other.NestedBlockIndent)
+ return NestedBlockIndent < Other.NestedBlockIndent;
+ if (FirstLessLess != Other.FirstLessLess)
+ return FirstLessLess < Other.FirstLessLess;
+ if (BreakBeforeClosingBrace != Other.BreakBeforeClosingBrace)
+ return BreakBeforeClosingBrace;
+ if (QuestionColumn != Other.QuestionColumn)
+ return QuestionColumn < Other.QuestionColumn;
+ if (AvoidBinPacking != Other.AvoidBinPacking)
+ return AvoidBinPacking;
+ if (BreakBeforeParameter != Other.BreakBeforeParameter)
+ return BreakBeforeParameter;
+ if (NoLineBreak != Other.NoLineBreak)
+ return NoLineBreak;
+ if (LastOperatorWrapped != Other.LastOperatorWrapped)
+ return LastOperatorWrapped;
+ if (ColonPos != Other.ColonPos)
+ return ColonPos < Other.ColonPos;
+ if (StartOfFunctionCall != Other.StartOfFunctionCall)
+ return StartOfFunctionCall < Other.StartOfFunctionCall;
+ if (StartOfArraySubscripts != Other.StartOfArraySubscripts)
+ return StartOfArraySubscripts < Other.StartOfArraySubscripts;
+ if (CallContinuation != Other.CallContinuation)
+ return CallContinuation < Other.CallContinuation;
+ if (VariablePos != Other.VariablePos)
+ return VariablePos < Other.VariablePos;
+ if (ContainsLineBreak != Other.ContainsLineBreak)
+ return ContainsLineBreak;
+ if (ContainsUnwrappedBuilder != Other.ContainsUnwrappedBuilder)
+ return ContainsUnwrappedBuilder;
+ if (NestedBlockInlined != Other.NestedBlockInlined)
+ return NestedBlockInlined;
+ return false;
+ }
+};
+
+/// \brief The current state when indenting a unwrapped line.
+///
+/// As the indenting tries different combinations this is copied by value.
+struct LineState {
+ /// \brief The number of used columns in the current line.
+ unsigned Column;
+
+ /// \brief The token that needs to be next formatted.
+ FormatToken *NextToken;
+
+ /// \brief \c true if this line contains a continued for-loop section.
+ bool LineContainsContinuedForLoopSection;
+
+ /// \brief The \c NestingLevel at the start of this line.
+ unsigned StartOfLineLevel;
+
+ /// \brief The lowest \c NestingLevel on the current line.
+ unsigned LowestLevelOnLine;
+
+ /// \brief The start column of the string literal, if we're in a string
+ /// literal sequence, 0 otherwise.
+ unsigned StartOfStringLiteral;
+
+ /// \brief A stack keeping track of properties applying to parenthesis
+ /// levels.
+ std::vector<ParenState> Stack;
+
+ /// \brief Ignore the stack of \c ParenStates for state comparison.
+ ///
+ /// In long and deeply nested unwrapped lines, the current algorithm can
+ /// be insufficient for finding the best formatting with a reasonable amount
+ /// of time and memory. Setting this flag will effectively lead to the
+ /// algorithm not analyzing some combinations. However, these combinations
+ /// rarely contain the optimal solution: In short, accepting a higher
+ /// penalty early would need to lead to different values in the \c
+ /// ParenState stack (in an otherwise identical state) and these different
+ /// values would need to lead to a significant amount of avoided penalty
+ /// later.
+ ///
+ /// FIXME: Come up with a better algorithm instead.
+ bool IgnoreStackForComparison;
+
+ /// \brief The indent of the first token.
+ unsigned FirstIndent;
+
+ /// \brief The line that is being formatted.
+ ///
+ /// Does not need to be considered for memoization because it doesn't change.
+ const AnnotatedLine *Line;
+
+ /// \brief Comparison operator to be able to used \c LineState in \c map.
+ bool operator<(const LineState &Other) const {
+ if (NextToken != Other.NextToken)
+ return NextToken < Other.NextToken;
+ if (Column != Other.Column)
+ return Column < Other.Column;
+ if (LineContainsContinuedForLoopSection !=
+ Other.LineContainsContinuedForLoopSection)
+ return LineContainsContinuedForLoopSection;
+ if (StartOfLineLevel != Other.StartOfLineLevel)
+ return StartOfLineLevel < Other.StartOfLineLevel;
+ if (LowestLevelOnLine != Other.LowestLevelOnLine)
+ return LowestLevelOnLine < Other.LowestLevelOnLine;
+ if (StartOfStringLiteral != Other.StartOfStringLiteral)
+ return StartOfStringLiteral < Other.StartOfStringLiteral;
+ if (IgnoreStackForComparison || Other.IgnoreStackForComparison)
+ return false;
+ return Stack < Other.Stack;
+ }
+};
+
+} // end namespace format
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Format/Encoding.h b/contrib/llvm/tools/clang/lib/Format/Encoding.h
new file mode 100644
index 0000000..592d720
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Format/Encoding.h
@@ -0,0 +1,146 @@
+//===--- Encoding.h - Format C++ code -------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Contains functions for text encoding manipulation. Supports UTF-8,
+/// 8-bit encodings and escape sequences in C++ string literals.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_FORMAT_ENCODING_H
+#define LLVM_CLANG_LIB_FORMAT_ENCODING_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/Support/ConvertUTF.h"
+#include "llvm/Support/Unicode.h"
+
+namespace clang {
+namespace format {
+namespace encoding {
+
+enum Encoding {
+ Encoding_UTF8,
+ Encoding_Unknown // We treat all other encodings as 8-bit encodings.
+};
+
+/// \brief Detects encoding of the Text. If the Text can be decoded using UTF-8,
+/// it is considered UTF8, otherwise we treat it as some 8-bit encoding.
+inline Encoding detectEncoding(StringRef Text) {
+ const UTF8 *Ptr = reinterpret_cast<const UTF8 *>(Text.begin());
+ const UTF8 *BufEnd = reinterpret_cast<const UTF8 *>(Text.end());
+ if (::isLegalUTF8String(&Ptr, BufEnd))
+ return Encoding_UTF8;
+ return Encoding_Unknown;
+}
+
+inline unsigned getCodePointCountUTF8(StringRef Text) {
+ unsigned CodePoints = 0;
+ for (size_t i = 0, e = Text.size(); i < e; i += getNumBytesForUTF8(Text[i])) {
+ ++CodePoints;
+ }
+ return CodePoints;
+}
+
+/// \brief Gets the number of code points in the Text using the specified
+/// Encoding.
+inline unsigned getCodePointCount(StringRef Text, Encoding Encoding) {
+ switch (Encoding) {
+ case Encoding_UTF8:
+ return getCodePointCountUTF8(Text);
+ default:
+ return Text.size();
+ }
+}
+
+/// \brief Returns the number of columns required to display the \p Text on a
+/// generic Unicode-capable terminal. Text is assumed to use the specified
+/// \p Encoding.
+inline unsigned columnWidth(StringRef Text, Encoding Encoding) {
+ if (Encoding == Encoding_UTF8) {
+ int ContentWidth = llvm::sys::unicode::columnWidthUTF8(Text);
+ // FIXME: Figure out the correct way to handle this in the presence of both
+ // printable and unprintable multi-byte UTF-8 characters. Falling back to
+ // returning the number of bytes may cause problems, as columnWidth suddenly
+ // becomes non-additive.
+ if (ContentWidth >= 0)
+ return ContentWidth;
+ }
+ return Text.size();
+}
+
+/// \brief Returns the number of columns required to display the \p Text,
+/// starting from the \p StartColumn on a terminal with the \p TabWidth. The
+/// text is assumed to use the specified \p Encoding.
+inline unsigned columnWidthWithTabs(StringRef Text, unsigned StartColumn,
+ unsigned TabWidth, Encoding Encoding) {
+ unsigned TotalWidth = 0;
+ StringRef Tail = Text;
+ for (;;) {
+ StringRef::size_type TabPos = Tail.find('\t');
+ if (TabPos == StringRef::npos)
+ return TotalWidth + columnWidth(Tail, Encoding);
+ TotalWidth += columnWidth(Tail.substr(0, TabPos), Encoding);
+ TotalWidth += TabWidth - (TotalWidth + StartColumn) % TabWidth;
+ Tail = Tail.substr(TabPos + 1);
+ }
+}
+
+/// \brief Gets the number of bytes in a sequence representing a single
+/// codepoint and starting with FirstChar in the specified Encoding.
+inline unsigned getCodePointNumBytes(char FirstChar, Encoding Encoding) {
+ switch (Encoding) {
+ case Encoding_UTF8:
+ return getNumBytesForUTF8(FirstChar);
+ default:
+ return 1;
+ }
+}
+
+inline bool isOctDigit(char c) { return '0' <= c && c <= '7'; }
+
+inline bool isHexDigit(char c) {
+ return ('0' <= c && c <= '9') || ('a' <= c && c <= 'f') ||
+ ('A' <= c && c <= 'F');
+}
+
+/// \brief Gets the length of an escape sequence inside a C++ string literal.
+/// Text should span from the beginning of the escape sequence (starting with a
+/// backslash) to the end of the string literal.
+inline unsigned getEscapeSequenceLength(StringRef Text) {
+ assert(Text[0] == '\\');
+ if (Text.size() < 2)
+ return 1;
+
+ switch (Text[1]) {
+ case 'u':
+ return 6;
+ case 'U':
+ return 10;
+ case 'x': {
+ unsigned I = 2; // Point after '\x'.
+ while (I < Text.size() && isHexDigit(Text[I]))
+ ++I;
+ return I;
+ }
+ default:
+ if (isOctDigit(Text[1])) {
+ unsigned I = 1;
+ while (I < Text.size() && I < 4 && isOctDigit(Text[I]))
+ ++I;
+ return I;
+ }
+ return 1 + getNumBytesForUTF8(Text[1]);
+ }
+}
+
+} // namespace encoding
+} // namespace format
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Format/Format.cpp b/contrib/llvm/tools/clang/lib/Format/Format.cpp
new file mode 100644
index 0000000..5068fca
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Format/Format.cpp
@@ -0,0 +1,2041 @@
+//===--- Format.cpp - Format C++ code -------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements functions declared in Format.h. This will be
+/// split into separate files as we go.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/Format/Format.h"
+#include "ContinuationIndenter.h"
+#include "TokenAnnotator.h"
+#include "UnwrappedLineFormatter.h"
+#include "UnwrappedLineParser.h"
+#include "WhitespaceManager.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Lexer.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Regex.h"
+#include "llvm/Support/YAMLTraits.h"
+#include <queue>
+#include <string>
+
+#define DEBUG_TYPE "format-formatter"
+
+using clang::format::FormatStyle;
+
+LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(std::string)
+LLVM_YAML_IS_SEQUENCE_VECTOR(clang::format::FormatStyle::IncludeCategory)
+
+namespace llvm {
+namespace yaml {
+template <> struct ScalarEnumerationTraits<FormatStyle::LanguageKind> {
+ static void enumeration(IO &IO, FormatStyle::LanguageKind &Value) {
+ IO.enumCase(Value, "Cpp", FormatStyle::LK_Cpp);
+ IO.enumCase(Value, "Java", FormatStyle::LK_Java);
+ IO.enumCase(Value, "JavaScript", FormatStyle::LK_JavaScript);
+ IO.enumCase(Value, "Proto", FormatStyle::LK_Proto);
+ IO.enumCase(Value, "TableGen", FormatStyle::LK_TableGen);
+ }
+};
+
+template <> struct ScalarEnumerationTraits<FormatStyle::LanguageStandard> {
+ static void enumeration(IO &IO, FormatStyle::LanguageStandard &Value) {
+ IO.enumCase(Value, "Cpp03", FormatStyle::LS_Cpp03);
+ IO.enumCase(Value, "C++03", FormatStyle::LS_Cpp03);
+ IO.enumCase(Value, "Cpp11", FormatStyle::LS_Cpp11);
+ IO.enumCase(Value, "C++11", FormatStyle::LS_Cpp11);
+ IO.enumCase(Value, "Auto", FormatStyle::LS_Auto);
+ }
+};
+
+template <> struct ScalarEnumerationTraits<FormatStyle::UseTabStyle> {
+ static void enumeration(IO &IO, FormatStyle::UseTabStyle &Value) {
+ IO.enumCase(Value, "Never", FormatStyle::UT_Never);
+ IO.enumCase(Value, "false", FormatStyle::UT_Never);
+ IO.enumCase(Value, "Always", FormatStyle::UT_Always);
+ IO.enumCase(Value, "true", FormatStyle::UT_Always);
+ IO.enumCase(Value, "ForIndentation", FormatStyle::UT_ForIndentation);
+ }
+};
+
+template <> struct ScalarEnumerationTraits<FormatStyle::ShortFunctionStyle> {
+ static void enumeration(IO &IO, FormatStyle::ShortFunctionStyle &Value) {
+ IO.enumCase(Value, "None", FormatStyle::SFS_None);
+ IO.enumCase(Value, "false", FormatStyle::SFS_None);
+ IO.enumCase(Value, "All", FormatStyle::SFS_All);
+ IO.enumCase(Value, "true", FormatStyle::SFS_All);
+ IO.enumCase(Value, "Inline", FormatStyle::SFS_Inline);
+ IO.enumCase(Value, "Empty", FormatStyle::SFS_Empty);
+ }
+};
+
+template <> struct ScalarEnumerationTraits<FormatStyle::BinaryOperatorStyle> {
+ static void enumeration(IO &IO, FormatStyle::BinaryOperatorStyle &Value) {
+ IO.enumCase(Value, "All", FormatStyle::BOS_All);
+ IO.enumCase(Value, "true", FormatStyle::BOS_All);
+ IO.enumCase(Value, "None", FormatStyle::BOS_None);
+ IO.enumCase(Value, "false", FormatStyle::BOS_None);
+ IO.enumCase(Value, "NonAssignment", FormatStyle::BOS_NonAssignment);
+ }
+};
+
+template <> struct ScalarEnumerationTraits<FormatStyle::BraceBreakingStyle> {
+ static void enumeration(IO &IO, FormatStyle::BraceBreakingStyle &Value) {
+ IO.enumCase(Value, "Attach", FormatStyle::BS_Attach);
+ IO.enumCase(Value, "Linux", FormatStyle::BS_Linux);
+ IO.enumCase(Value, "Mozilla", FormatStyle::BS_Mozilla);
+ IO.enumCase(Value, "Stroustrup", FormatStyle::BS_Stroustrup);
+ IO.enumCase(Value, "Allman", FormatStyle::BS_Allman);
+ IO.enumCase(Value, "GNU", FormatStyle::BS_GNU);
+ IO.enumCase(Value, "WebKit", FormatStyle::BS_WebKit);
+ IO.enumCase(Value, "Custom", FormatStyle::BS_Custom);
+ }
+};
+
+template <>
+struct ScalarEnumerationTraits<FormatStyle::ReturnTypeBreakingStyle> {
+ static void enumeration(IO &IO, FormatStyle::ReturnTypeBreakingStyle &Value) {
+ IO.enumCase(Value, "None", FormatStyle::RTBS_None);
+ IO.enumCase(Value, "All", FormatStyle::RTBS_All);
+ IO.enumCase(Value, "TopLevel", FormatStyle::RTBS_TopLevel);
+ IO.enumCase(Value, "TopLevelDefinitions",
+ FormatStyle::RTBS_TopLevelDefinitions);
+ IO.enumCase(Value, "AllDefinitions", FormatStyle::RTBS_AllDefinitions);
+ }
+};
+
+template <>
+struct ScalarEnumerationTraits<FormatStyle::DefinitionReturnTypeBreakingStyle> {
+ static void
+ enumeration(IO &IO, FormatStyle::DefinitionReturnTypeBreakingStyle &Value) {
+ IO.enumCase(Value, "None", FormatStyle::DRTBS_None);
+ IO.enumCase(Value, "All", FormatStyle::DRTBS_All);
+ IO.enumCase(Value, "TopLevel", FormatStyle::DRTBS_TopLevel);
+
+ // For backward compatibility.
+ IO.enumCase(Value, "false", FormatStyle::DRTBS_None);
+ IO.enumCase(Value, "true", FormatStyle::DRTBS_All);
+ }
+};
+
+template <>
+struct ScalarEnumerationTraits<FormatStyle::NamespaceIndentationKind> {
+ static void enumeration(IO &IO,
+ FormatStyle::NamespaceIndentationKind &Value) {
+ IO.enumCase(Value, "None", FormatStyle::NI_None);
+ IO.enumCase(Value, "Inner", FormatStyle::NI_Inner);
+ IO.enumCase(Value, "All", FormatStyle::NI_All);
+ }
+};
+
+template <> struct ScalarEnumerationTraits<FormatStyle::BracketAlignmentStyle> {
+ static void enumeration(IO &IO, FormatStyle::BracketAlignmentStyle &Value) {
+ IO.enumCase(Value, "Align", FormatStyle::BAS_Align);
+ IO.enumCase(Value, "DontAlign", FormatStyle::BAS_DontAlign);
+ IO.enumCase(Value, "AlwaysBreak", FormatStyle::BAS_AlwaysBreak);
+
+ // For backward compatibility.
+ IO.enumCase(Value, "true", FormatStyle::BAS_Align);
+ IO.enumCase(Value, "false", FormatStyle::BAS_DontAlign);
+ }
+};
+
+template <> struct ScalarEnumerationTraits<FormatStyle::PointerAlignmentStyle> {
+ static void enumeration(IO &IO, FormatStyle::PointerAlignmentStyle &Value) {
+ IO.enumCase(Value, "Middle", FormatStyle::PAS_Middle);
+ IO.enumCase(Value, "Left", FormatStyle::PAS_Left);
+ IO.enumCase(Value, "Right", FormatStyle::PAS_Right);
+
+ // For backward compatibility.
+ IO.enumCase(Value, "true", FormatStyle::PAS_Left);
+ IO.enumCase(Value, "false", FormatStyle::PAS_Right);
+ }
+};
+
+template <>
+struct ScalarEnumerationTraits<FormatStyle::SpaceBeforeParensOptions> {
+ static void enumeration(IO &IO,
+ FormatStyle::SpaceBeforeParensOptions &Value) {
+ IO.enumCase(Value, "Never", FormatStyle::SBPO_Never);
+ IO.enumCase(Value, "ControlStatements",
+ FormatStyle::SBPO_ControlStatements);
+ IO.enumCase(Value, "Always", FormatStyle::SBPO_Always);
+
+ // For backward compatibility.
+ IO.enumCase(Value, "false", FormatStyle::SBPO_Never);
+ IO.enumCase(Value, "true", FormatStyle::SBPO_ControlStatements);
+ }
+};
+
+template <> struct MappingTraits<FormatStyle> {
+ static void mapping(IO &IO, FormatStyle &Style) {
+ // When reading, read the language first, we need it for getPredefinedStyle.
+ IO.mapOptional("Language", Style.Language);
+
+ if (IO.outputting()) {
+ StringRef StylesArray[] = {"LLVM", "Google", "Chromium",
+ "Mozilla", "WebKit", "GNU"};
+ ArrayRef<StringRef> Styles(StylesArray);
+ for (size_t i = 0, e = Styles.size(); i < e; ++i) {
+ StringRef StyleName(Styles[i]);
+ FormatStyle PredefinedStyle;
+ if (getPredefinedStyle(StyleName, Style.Language, &PredefinedStyle) &&
+ Style == PredefinedStyle) {
+ IO.mapOptional("# BasedOnStyle", StyleName);
+ break;
+ }
+ }
+ } else {
+ StringRef BasedOnStyle;
+ IO.mapOptional("BasedOnStyle", BasedOnStyle);
+ if (!BasedOnStyle.empty()) {
+ FormatStyle::LanguageKind OldLanguage = Style.Language;
+ FormatStyle::LanguageKind Language =
+ ((FormatStyle *)IO.getContext())->Language;
+ if (!getPredefinedStyle(BasedOnStyle, Language, &Style)) {
+ IO.setError(Twine("Unknown value for BasedOnStyle: ", BasedOnStyle));
+ return;
+ }
+ Style.Language = OldLanguage;
+ }
+ }
+
+ // For backward compatibility.
+ if (!IO.outputting()) {
+ IO.mapOptional("DerivePointerBinding", Style.DerivePointerAlignment);
+ IO.mapOptional("IndentFunctionDeclarationAfterType",
+ Style.IndentWrappedFunctionNames);
+ IO.mapOptional("PointerBindsToType", Style.PointerAlignment);
+ IO.mapOptional("SpaceAfterControlStatementKeyword",
+ Style.SpaceBeforeParens);
+ }
+
+ IO.mapOptional("AccessModifierOffset", Style.AccessModifierOffset);
+ IO.mapOptional("AlignAfterOpenBracket", Style.AlignAfterOpenBracket);
+ IO.mapOptional("AlignConsecutiveAssignments",
+ Style.AlignConsecutiveAssignments);
+ IO.mapOptional("AlignConsecutiveDeclarations",
+ Style.AlignConsecutiveDeclarations);
+ IO.mapOptional("AlignEscapedNewlinesLeft", Style.AlignEscapedNewlinesLeft);
+ IO.mapOptional("AlignOperands", Style.AlignOperands);
+ IO.mapOptional("AlignTrailingComments", Style.AlignTrailingComments);
+ IO.mapOptional("AllowAllParametersOfDeclarationOnNextLine",
+ Style.AllowAllParametersOfDeclarationOnNextLine);
+ IO.mapOptional("AllowShortBlocksOnASingleLine",
+ Style.AllowShortBlocksOnASingleLine);
+ IO.mapOptional("AllowShortCaseLabelsOnASingleLine",
+ Style.AllowShortCaseLabelsOnASingleLine);
+ IO.mapOptional("AllowShortFunctionsOnASingleLine",
+ Style.AllowShortFunctionsOnASingleLine);
+ IO.mapOptional("AllowShortIfStatementsOnASingleLine",
+ Style.AllowShortIfStatementsOnASingleLine);
+ IO.mapOptional("AllowShortLoopsOnASingleLine",
+ Style.AllowShortLoopsOnASingleLine);
+ IO.mapOptional("AlwaysBreakAfterDefinitionReturnType",
+ Style.AlwaysBreakAfterDefinitionReturnType);
+ IO.mapOptional("AlwaysBreakAfterReturnType",
+ Style.AlwaysBreakAfterReturnType);
+ // If AlwaysBreakAfterDefinitionReturnType was specified but
+ // AlwaysBreakAfterReturnType was not, initialize the latter from the
+ // former for backwards compatibility.
+ if (Style.AlwaysBreakAfterDefinitionReturnType != FormatStyle::DRTBS_None &&
+ Style.AlwaysBreakAfterReturnType == FormatStyle::RTBS_None) {
+ if (Style.AlwaysBreakAfterDefinitionReturnType == FormatStyle::DRTBS_All)
+ Style.AlwaysBreakAfterReturnType = FormatStyle::RTBS_AllDefinitions;
+ else if (Style.AlwaysBreakAfterDefinitionReturnType ==
+ FormatStyle::DRTBS_TopLevel)
+ Style.AlwaysBreakAfterReturnType =
+ FormatStyle::RTBS_TopLevelDefinitions;
+ }
+
+ IO.mapOptional("AlwaysBreakBeforeMultilineStrings",
+ Style.AlwaysBreakBeforeMultilineStrings);
+ IO.mapOptional("AlwaysBreakTemplateDeclarations",
+ Style.AlwaysBreakTemplateDeclarations);
+ IO.mapOptional("BinPackArguments", Style.BinPackArguments);
+ IO.mapOptional("BinPackParameters", Style.BinPackParameters);
+ IO.mapOptional("BraceWrapping", Style.BraceWrapping);
+ IO.mapOptional("BreakBeforeBinaryOperators",
+ Style.BreakBeforeBinaryOperators);
+ IO.mapOptional("BreakBeforeBraces", Style.BreakBeforeBraces);
+ IO.mapOptional("BreakBeforeTernaryOperators",
+ Style.BreakBeforeTernaryOperators);
+ IO.mapOptional("BreakConstructorInitializersBeforeComma",
+ Style.BreakConstructorInitializersBeforeComma);
+ IO.mapOptional("ColumnLimit", Style.ColumnLimit);
+ IO.mapOptional("CommentPragmas", Style.CommentPragmas);
+ IO.mapOptional("ConstructorInitializerAllOnOneLineOrOnePerLine",
+ Style.ConstructorInitializerAllOnOneLineOrOnePerLine);
+ IO.mapOptional("ConstructorInitializerIndentWidth",
+ Style.ConstructorInitializerIndentWidth);
+ IO.mapOptional("ContinuationIndentWidth", Style.ContinuationIndentWidth);
+ IO.mapOptional("Cpp11BracedListStyle", Style.Cpp11BracedListStyle);
+ IO.mapOptional("DerivePointerAlignment", Style.DerivePointerAlignment);
+ IO.mapOptional("DisableFormat", Style.DisableFormat);
+ IO.mapOptional("ExperimentalAutoDetectBinPacking",
+ Style.ExperimentalAutoDetectBinPacking);
+ IO.mapOptional("ForEachMacros", Style.ForEachMacros);
+ IO.mapOptional("IncludeCategories", Style.IncludeCategories);
+ IO.mapOptional("IndentCaseLabels", Style.IndentCaseLabels);
+ IO.mapOptional("IndentWidth", Style.IndentWidth);
+ IO.mapOptional("IndentWrappedFunctionNames",
+ Style.IndentWrappedFunctionNames);
+ IO.mapOptional("KeepEmptyLinesAtTheStartOfBlocks",
+ Style.KeepEmptyLinesAtTheStartOfBlocks);
+ IO.mapOptional("MacroBlockBegin", Style.MacroBlockBegin);
+ IO.mapOptional("MacroBlockEnd", Style.MacroBlockEnd);
+ IO.mapOptional("MaxEmptyLinesToKeep", Style.MaxEmptyLinesToKeep);
+ IO.mapOptional("NamespaceIndentation", Style.NamespaceIndentation);
+ IO.mapOptional("ObjCBlockIndentWidth", Style.ObjCBlockIndentWidth);
+ IO.mapOptional("ObjCSpaceAfterProperty", Style.ObjCSpaceAfterProperty);
+ IO.mapOptional("ObjCSpaceBeforeProtocolList",
+ Style.ObjCSpaceBeforeProtocolList);
+ IO.mapOptional("PenaltyBreakBeforeFirstCallParameter",
+ Style.PenaltyBreakBeforeFirstCallParameter);
+ IO.mapOptional("PenaltyBreakComment", Style.PenaltyBreakComment);
+ IO.mapOptional("PenaltyBreakFirstLessLess",
+ Style.PenaltyBreakFirstLessLess);
+ IO.mapOptional("PenaltyBreakString", Style.PenaltyBreakString);
+ IO.mapOptional("PenaltyExcessCharacter", Style.PenaltyExcessCharacter);
+ IO.mapOptional("PenaltyReturnTypeOnItsOwnLine",
+ Style.PenaltyReturnTypeOnItsOwnLine);
+ IO.mapOptional("PointerAlignment", Style.PointerAlignment);
+ IO.mapOptional("ReflowComments", Style.ReflowComments);
+ IO.mapOptional("SortIncludes", Style.SortIncludes);
+ IO.mapOptional("SpaceAfterCStyleCast", Style.SpaceAfterCStyleCast);
+ IO.mapOptional("SpaceBeforeAssignmentOperators",
+ Style.SpaceBeforeAssignmentOperators);
+ IO.mapOptional("SpaceBeforeParens", Style.SpaceBeforeParens);
+ IO.mapOptional("SpaceInEmptyParentheses", Style.SpaceInEmptyParentheses);
+ IO.mapOptional("SpacesBeforeTrailingComments",
+ Style.SpacesBeforeTrailingComments);
+ IO.mapOptional("SpacesInAngles", Style.SpacesInAngles);
+ IO.mapOptional("SpacesInContainerLiterals",
+ Style.SpacesInContainerLiterals);
+ IO.mapOptional("SpacesInCStyleCastParentheses",
+ Style.SpacesInCStyleCastParentheses);
+ IO.mapOptional("SpacesInParentheses", Style.SpacesInParentheses);
+ IO.mapOptional("SpacesInSquareBrackets", Style.SpacesInSquareBrackets);
+ IO.mapOptional("Standard", Style.Standard);
+ IO.mapOptional("TabWidth", Style.TabWidth);
+ IO.mapOptional("UseTab", Style.UseTab);
+ }
+};
+
+template <> struct MappingTraits<FormatStyle::BraceWrappingFlags> {
+ static void mapping(IO &IO, FormatStyle::BraceWrappingFlags &Wrapping) {
+ IO.mapOptional("AfterClass", Wrapping.AfterClass);
+ IO.mapOptional("AfterControlStatement", Wrapping.AfterControlStatement);
+ IO.mapOptional("AfterEnum", Wrapping.AfterEnum);
+ IO.mapOptional("AfterFunction", Wrapping.AfterFunction);
+ IO.mapOptional("AfterNamespace", Wrapping.AfterNamespace);
+ IO.mapOptional("AfterObjCDeclaration", Wrapping.AfterObjCDeclaration);
+ IO.mapOptional("AfterStruct", Wrapping.AfterStruct);
+ IO.mapOptional("AfterUnion", Wrapping.AfterUnion);
+ IO.mapOptional("BeforeCatch", Wrapping.BeforeCatch);
+ IO.mapOptional("BeforeElse", Wrapping.BeforeElse);
+ IO.mapOptional("IndentBraces", Wrapping.IndentBraces);
+ }
+};
+
+template <> struct MappingTraits<FormatStyle::IncludeCategory> {
+ static void mapping(IO &IO, FormatStyle::IncludeCategory &Category) {
+ IO.mapOptional("Regex", Category.Regex);
+ IO.mapOptional("Priority", Category.Priority);
+ }
+};
+
+// Allows to read vector<FormatStyle> while keeping default values.
+// IO.getContext() should contain a pointer to the FormatStyle structure, that
+// will be used to get default values for missing keys.
+// If the first element has no Language specified, it will be treated as the
+// default one for the following elements.
+template <> struct DocumentListTraits<std::vector<FormatStyle>> {
+ static size_t size(IO &IO, std::vector<FormatStyle> &Seq) {
+ return Seq.size();
+ }
+ static FormatStyle &element(IO &IO, std::vector<FormatStyle> &Seq,
+ size_t Index) {
+ if (Index >= Seq.size()) {
+ assert(Index == Seq.size());
+ FormatStyle Template;
+ if (Seq.size() > 0 && Seq[0].Language == FormatStyle::LK_None) {
+ Template = Seq[0];
+ } else {
+ Template = *((const FormatStyle *)IO.getContext());
+ Template.Language = FormatStyle::LK_None;
+ }
+ Seq.resize(Index + 1, Template);
+ }
+ return Seq[Index];
+ }
+};
+} // namespace yaml
+} // namespace llvm
+
+namespace clang {
+namespace format {
+
+const std::error_category &getParseCategory() {
+ static ParseErrorCategory C;
+ return C;
+}
+std::error_code make_error_code(ParseError e) {
+ return std::error_code(static_cast<int>(e), getParseCategory());
+}
+
+const char *ParseErrorCategory::name() const LLVM_NOEXCEPT {
+ return "clang-format.parse_error";
+}
+
+std::string ParseErrorCategory::message(int EV) const {
+ switch (static_cast<ParseError>(EV)) {
+ case ParseError::Success:
+ return "Success";
+ case ParseError::Error:
+ return "Invalid argument";
+ case ParseError::Unsuitable:
+ return "Unsuitable";
+ }
+ llvm_unreachable("unexpected parse error");
+}
+
+static FormatStyle expandPresets(const FormatStyle &Style) {
+ if (Style.BreakBeforeBraces == FormatStyle::BS_Custom)
+ return Style;
+ FormatStyle Expanded = Style;
+ Expanded.BraceWrapping = {false, false, false, false, false, false,
+ false, false, false, false, false};
+ switch (Style.BreakBeforeBraces) {
+ case FormatStyle::BS_Linux:
+ Expanded.BraceWrapping.AfterClass = true;
+ Expanded.BraceWrapping.AfterFunction = true;
+ Expanded.BraceWrapping.AfterNamespace = true;
+ break;
+ case FormatStyle::BS_Mozilla:
+ Expanded.BraceWrapping.AfterClass = true;
+ Expanded.BraceWrapping.AfterEnum = true;
+ Expanded.BraceWrapping.AfterFunction = true;
+ Expanded.BraceWrapping.AfterStruct = true;
+ Expanded.BraceWrapping.AfterUnion = true;
+ break;
+ case FormatStyle::BS_Stroustrup:
+ Expanded.BraceWrapping.AfterFunction = true;
+ Expanded.BraceWrapping.BeforeCatch = true;
+ Expanded.BraceWrapping.BeforeElse = true;
+ break;
+ case FormatStyle::BS_Allman:
+ Expanded.BraceWrapping.AfterClass = true;
+ Expanded.BraceWrapping.AfterControlStatement = true;
+ Expanded.BraceWrapping.AfterEnum = true;
+ Expanded.BraceWrapping.AfterFunction = true;
+ Expanded.BraceWrapping.AfterNamespace = true;
+ Expanded.BraceWrapping.AfterObjCDeclaration = true;
+ Expanded.BraceWrapping.AfterStruct = true;
+ Expanded.BraceWrapping.BeforeCatch = true;
+ Expanded.BraceWrapping.BeforeElse = true;
+ break;
+ case FormatStyle::BS_GNU:
+ Expanded.BraceWrapping = {true, true, true, true, true, true,
+ true, true, true, true, true};
+ break;
+ case FormatStyle::BS_WebKit:
+ Expanded.BraceWrapping.AfterFunction = true;
+ break;
+ default:
+ break;
+ }
+ return Expanded;
+}
+
+FormatStyle getLLVMStyle() {
+ FormatStyle LLVMStyle;
+ LLVMStyle.Language = FormatStyle::LK_Cpp;
+ LLVMStyle.AccessModifierOffset = -2;
+ LLVMStyle.AlignEscapedNewlinesLeft = false;
+ LLVMStyle.AlignAfterOpenBracket = FormatStyle::BAS_Align;
+ LLVMStyle.AlignOperands = true;
+ LLVMStyle.AlignTrailingComments = true;
+ LLVMStyle.AlignConsecutiveAssignments = false;
+ LLVMStyle.AlignConsecutiveDeclarations = false;
+ LLVMStyle.AllowAllParametersOfDeclarationOnNextLine = true;
+ LLVMStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_All;
+ LLVMStyle.AllowShortBlocksOnASingleLine = false;
+ LLVMStyle.AllowShortCaseLabelsOnASingleLine = false;
+ LLVMStyle.AllowShortIfStatementsOnASingleLine = false;
+ LLVMStyle.AllowShortLoopsOnASingleLine = false;
+ LLVMStyle.AlwaysBreakAfterReturnType = FormatStyle::RTBS_None;
+ LLVMStyle.AlwaysBreakAfterDefinitionReturnType = FormatStyle::DRTBS_None;
+ LLVMStyle.AlwaysBreakBeforeMultilineStrings = false;
+ LLVMStyle.AlwaysBreakTemplateDeclarations = false;
+ LLVMStyle.BinPackParameters = true;
+ LLVMStyle.BinPackArguments = true;
+ LLVMStyle.BreakBeforeBinaryOperators = FormatStyle::BOS_None;
+ LLVMStyle.BreakBeforeTernaryOperators = true;
+ LLVMStyle.BreakBeforeBraces = FormatStyle::BS_Attach;
+ LLVMStyle.BraceWrapping = {false, false, false, false, false, false,
+ false, false, false, false, false};
+ LLVMStyle.BreakConstructorInitializersBeforeComma = false;
+ LLVMStyle.BreakAfterJavaFieldAnnotations = false;
+ LLVMStyle.ColumnLimit = 80;
+ LLVMStyle.CommentPragmas = "^ IWYU pragma:";
+ LLVMStyle.ConstructorInitializerAllOnOneLineOrOnePerLine = false;
+ LLVMStyle.ConstructorInitializerIndentWidth = 4;
+ LLVMStyle.ContinuationIndentWidth = 4;
+ LLVMStyle.Cpp11BracedListStyle = true;
+ LLVMStyle.DerivePointerAlignment = false;
+ LLVMStyle.ExperimentalAutoDetectBinPacking = false;
+ LLVMStyle.ForEachMacros.push_back("foreach");
+ LLVMStyle.ForEachMacros.push_back("Q_FOREACH");
+ LLVMStyle.ForEachMacros.push_back("BOOST_FOREACH");
+ LLVMStyle.IncludeCategories = {{"^\"(llvm|llvm-c|clang|clang-c)/", 2},
+ {"^(<|\"(gtest|isl|json)/)", 3},
+ {".*", 1}};
+ LLVMStyle.IndentCaseLabels = false;
+ LLVMStyle.IndentWrappedFunctionNames = false;
+ LLVMStyle.IndentWidth = 2;
+ LLVMStyle.TabWidth = 8;
+ LLVMStyle.MaxEmptyLinesToKeep = 1;
+ LLVMStyle.KeepEmptyLinesAtTheStartOfBlocks = true;
+ LLVMStyle.NamespaceIndentation = FormatStyle::NI_None;
+ LLVMStyle.ObjCBlockIndentWidth = 2;
+ LLVMStyle.ObjCSpaceAfterProperty = false;
+ LLVMStyle.ObjCSpaceBeforeProtocolList = true;
+ LLVMStyle.PointerAlignment = FormatStyle::PAS_Right;
+ LLVMStyle.SpacesBeforeTrailingComments = 1;
+ LLVMStyle.Standard = FormatStyle::LS_Cpp11;
+ LLVMStyle.UseTab = FormatStyle::UT_Never;
+ LLVMStyle.ReflowComments = true;
+ LLVMStyle.SpacesInParentheses = false;
+ LLVMStyle.SpacesInSquareBrackets = false;
+ LLVMStyle.SpaceInEmptyParentheses = false;
+ LLVMStyle.SpacesInContainerLiterals = true;
+ LLVMStyle.SpacesInCStyleCastParentheses = false;
+ LLVMStyle.SpaceAfterCStyleCast = false;
+ LLVMStyle.SpaceBeforeParens = FormatStyle::SBPO_ControlStatements;
+ LLVMStyle.SpaceBeforeAssignmentOperators = true;
+ LLVMStyle.SpacesInAngles = false;
+
+ LLVMStyle.PenaltyBreakComment = 300;
+ LLVMStyle.PenaltyBreakFirstLessLess = 120;
+ LLVMStyle.PenaltyBreakString = 1000;
+ LLVMStyle.PenaltyExcessCharacter = 1000000;
+ LLVMStyle.PenaltyReturnTypeOnItsOwnLine = 60;
+ LLVMStyle.PenaltyBreakBeforeFirstCallParameter = 19;
+
+ LLVMStyle.DisableFormat = false;
+ LLVMStyle.SortIncludes = true;
+
+ return LLVMStyle;
+}
+
+FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
+ FormatStyle GoogleStyle = getLLVMStyle();
+ GoogleStyle.Language = Language;
+
+ GoogleStyle.AccessModifierOffset = -1;
+ GoogleStyle.AlignEscapedNewlinesLeft = true;
+ GoogleStyle.AllowShortIfStatementsOnASingleLine = true;
+ GoogleStyle.AllowShortLoopsOnASingleLine = true;
+ GoogleStyle.AlwaysBreakBeforeMultilineStrings = true;
+ GoogleStyle.AlwaysBreakTemplateDeclarations = true;
+ GoogleStyle.ConstructorInitializerAllOnOneLineOrOnePerLine = true;
+ GoogleStyle.DerivePointerAlignment = true;
+ GoogleStyle.IncludeCategories = {{"^<.*\\.h>", 1}, {"^<.*", 2}, {".*", 3}};
+ GoogleStyle.IndentCaseLabels = true;
+ GoogleStyle.KeepEmptyLinesAtTheStartOfBlocks = false;
+ GoogleStyle.ObjCSpaceAfterProperty = false;
+ GoogleStyle.ObjCSpaceBeforeProtocolList = false;
+ GoogleStyle.PointerAlignment = FormatStyle::PAS_Left;
+ GoogleStyle.SpacesBeforeTrailingComments = 2;
+ GoogleStyle.Standard = FormatStyle::LS_Auto;
+
+ GoogleStyle.PenaltyReturnTypeOnItsOwnLine = 200;
+ GoogleStyle.PenaltyBreakBeforeFirstCallParameter = 1;
+
+ if (Language == FormatStyle::LK_Java) {
+ GoogleStyle.AlignAfterOpenBracket = FormatStyle::BAS_DontAlign;
+ GoogleStyle.AlignOperands = false;
+ GoogleStyle.AlignTrailingComments = false;
+ GoogleStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Empty;
+ GoogleStyle.AllowShortIfStatementsOnASingleLine = false;
+ GoogleStyle.AlwaysBreakBeforeMultilineStrings = false;
+ GoogleStyle.BreakBeforeBinaryOperators = FormatStyle::BOS_NonAssignment;
+ GoogleStyle.ColumnLimit = 100;
+ GoogleStyle.SpaceAfterCStyleCast = true;
+ GoogleStyle.SpacesBeforeTrailingComments = 1;
+ } else if (Language == FormatStyle::LK_JavaScript) {
+ GoogleStyle.AlignAfterOpenBracket = FormatStyle::BAS_AlwaysBreak;
+ GoogleStyle.AlignOperands = false;
+ GoogleStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Inline;
+ GoogleStyle.AlwaysBreakBeforeMultilineStrings = false;
+ GoogleStyle.BreakBeforeTernaryOperators = false;
+ GoogleStyle.MaxEmptyLinesToKeep = 3;
+ GoogleStyle.SpacesInContainerLiterals = false;
+ } else if (Language == FormatStyle::LK_Proto) {
+ GoogleStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_None;
+ GoogleStyle.SpacesInContainerLiterals = false;
+ }
+
+ return GoogleStyle;
+}
+
+FormatStyle getChromiumStyle(FormatStyle::LanguageKind Language) {
+ FormatStyle ChromiumStyle = getGoogleStyle(Language);
+ if (Language == FormatStyle::LK_Java) {
+ ChromiumStyle.AllowShortIfStatementsOnASingleLine = true;
+ ChromiumStyle.BreakAfterJavaFieldAnnotations = true;
+ ChromiumStyle.ContinuationIndentWidth = 8;
+ ChromiumStyle.IndentWidth = 4;
+ } else {
+ ChromiumStyle.AllowAllParametersOfDeclarationOnNextLine = false;
+ ChromiumStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Inline;
+ ChromiumStyle.AllowShortIfStatementsOnASingleLine = false;
+ ChromiumStyle.AllowShortLoopsOnASingleLine = false;
+ ChromiumStyle.BinPackParameters = false;
+ ChromiumStyle.DerivePointerAlignment = false;
+ }
+ ChromiumStyle.SortIncludes = false;
+ return ChromiumStyle;
+}
+
+FormatStyle getMozillaStyle() {
+ FormatStyle MozillaStyle = getLLVMStyle();
+ MozillaStyle.AllowAllParametersOfDeclarationOnNextLine = false;
+ MozillaStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Inline;
+ MozillaStyle.AlwaysBreakAfterReturnType =
+ FormatStyle::RTBS_TopLevelDefinitions;
+ MozillaStyle.AlwaysBreakAfterDefinitionReturnType =
+ FormatStyle::DRTBS_TopLevel;
+ MozillaStyle.AlwaysBreakTemplateDeclarations = true;
+ MozillaStyle.BreakBeforeBraces = FormatStyle::BS_Mozilla;
+ MozillaStyle.BreakConstructorInitializersBeforeComma = true;
+ MozillaStyle.ConstructorInitializerIndentWidth = 2;
+ MozillaStyle.ContinuationIndentWidth = 2;
+ MozillaStyle.Cpp11BracedListStyle = false;
+ MozillaStyle.IndentCaseLabels = true;
+ MozillaStyle.ObjCSpaceAfterProperty = true;
+ MozillaStyle.ObjCSpaceBeforeProtocolList = false;
+ MozillaStyle.PenaltyReturnTypeOnItsOwnLine = 200;
+ MozillaStyle.PointerAlignment = FormatStyle::PAS_Left;
+ return MozillaStyle;
+}
+
+FormatStyle getWebKitStyle() {
+ FormatStyle Style = getLLVMStyle();
+ Style.AccessModifierOffset = -4;
+ Style.AlignAfterOpenBracket = FormatStyle::BAS_DontAlign;
+ Style.AlignOperands = false;
+ Style.AlignTrailingComments = false;
+ Style.BreakBeforeBinaryOperators = FormatStyle::BOS_All;
+ Style.BreakBeforeBraces = FormatStyle::BS_WebKit;
+ Style.BreakConstructorInitializersBeforeComma = true;
+ Style.Cpp11BracedListStyle = false;
+ Style.ColumnLimit = 0;
+ Style.IndentWidth = 4;
+ Style.NamespaceIndentation = FormatStyle::NI_Inner;
+ Style.ObjCBlockIndentWidth = 4;
+ Style.ObjCSpaceAfterProperty = true;
+ Style.PointerAlignment = FormatStyle::PAS_Left;
+ Style.Standard = FormatStyle::LS_Cpp03;
+ return Style;
+}
+
+FormatStyle getGNUStyle() {
+ FormatStyle Style = getLLVMStyle();
+ Style.AlwaysBreakAfterDefinitionReturnType = FormatStyle::DRTBS_All;
+ Style.AlwaysBreakAfterReturnType = FormatStyle::RTBS_AllDefinitions;
+ Style.BreakBeforeBinaryOperators = FormatStyle::BOS_All;
+ Style.BreakBeforeBraces = FormatStyle::BS_GNU;
+ Style.BreakBeforeTernaryOperators = true;
+ Style.Cpp11BracedListStyle = false;
+ Style.ColumnLimit = 79;
+ Style.SpaceBeforeParens = FormatStyle::SBPO_Always;
+ Style.Standard = FormatStyle::LS_Cpp03;
+ return Style;
+}
+
+FormatStyle getNoStyle() {
+ FormatStyle NoStyle = getLLVMStyle();
+ NoStyle.DisableFormat = true;
+ NoStyle.SortIncludes = false;
+ return NoStyle;
+}
+
+bool getPredefinedStyle(StringRef Name, FormatStyle::LanguageKind Language,
+ FormatStyle *Style) {
+ if (Name.equals_lower("llvm")) {
+ *Style = getLLVMStyle();
+ } else if (Name.equals_lower("chromium")) {
+ *Style = getChromiumStyle(Language);
+ } else if (Name.equals_lower("mozilla")) {
+ *Style = getMozillaStyle();
+ } else if (Name.equals_lower("google")) {
+ *Style = getGoogleStyle(Language);
+ } else if (Name.equals_lower("webkit")) {
+ *Style = getWebKitStyle();
+ } else if (Name.equals_lower("gnu")) {
+ *Style = getGNUStyle();
+ } else if (Name.equals_lower("none")) {
+ *Style = getNoStyle();
+ } else {
+ return false;
+ }
+
+ Style->Language = Language;
+ return true;
+}
+
+std::error_code parseConfiguration(StringRef Text, FormatStyle *Style) {
+ assert(Style);
+ FormatStyle::LanguageKind Language = Style->Language;
+ assert(Language != FormatStyle::LK_None);
+ if (Text.trim().empty())
+ return make_error_code(ParseError::Error);
+
+ std::vector<FormatStyle> Styles;
+ llvm::yaml::Input Input(Text);
+ // DocumentListTraits<vector<FormatStyle>> uses the context to get default
+ // values for the fields, keys for which are missing from the configuration.
+ // Mapping also uses the context to get the language to find the correct
+ // base style.
+ Input.setContext(Style);
+ Input >> Styles;
+ if (Input.error())
+ return Input.error();
+
+ for (unsigned i = 0; i < Styles.size(); ++i) {
+ // Ensures that only the first configuration can skip the Language option.
+ if (Styles[i].Language == FormatStyle::LK_None && i != 0)
+ return make_error_code(ParseError::Error);
+ // Ensure that each language is configured at most once.
+ for (unsigned j = 0; j < i; ++j) {
+ if (Styles[i].Language == Styles[j].Language) {
+ DEBUG(llvm::dbgs()
+ << "Duplicate languages in the config file on positions " << j
+ << " and " << i << "\n");
+ return make_error_code(ParseError::Error);
+ }
+ }
+ }
+ // Look for a suitable configuration starting from the end, so we can
+ // find the configuration for the specific language first, and the default
+ // configuration (which can only be at slot 0) after it.
+ for (int i = Styles.size() - 1; i >= 0; --i) {
+ if (Styles[i].Language == Language ||
+ Styles[i].Language == FormatStyle::LK_None) {
+ *Style = Styles[i];
+ Style->Language = Language;
+ return make_error_code(ParseError::Success);
+ }
+ }
+ return make_error_code(ParseError::Unsuitable);
+}
+
+std::string configurationAsText(const FormatStyle &Style) {
+ std::string Text;
+ llvm::raw_string_ostream Stream(Text);
+ llvm::yaml::Output Output(Stream);
+ // We use the same mapping method for input and output, so we need a non-const
+ // reference here.
+ FormatStyle NonConstStyle = expandPresets(Style);
+ Output << NonConstStyle;
+ return Stream.str();
+}
+
+namespace {
+
+class FormatTokenLexer {
+public:
+ FormatTokenLexer(SourceManager &SourceMgr, FileID ID, FormatStyle &Style,
+ encoding::Encoding Encoding)
+ : FormatTok(nullptr), IsFirstToken(true), GreaterStashed(false),
+ LessStashed(false), Column(0), TrailingWhitespace(0),
+ SourceMgr(SourceMgr), ID(ID), Style(Style),
+ IdentTable(getFormattingLangOpts(Style)), Keywords(IdentTable),
+ Encoding(Encoding), FirstInLineIndex(0), FormattingDisabled(false),
+ MacroBlockBeginRegex(Style.MacroBlockBegin),
+ MacroBlockEndRegex(Style.MacroBlockEnd) {
+ Lex.reset(new Lexer(ID, SourceMgr.getBuffer(ID), SourceMgr,
+ getFormattingLangOpts(Style)));
+ Lex->SetKeepWhitespaceMode(true);
+
+ for (const std::string &ForEachMacro : Style.ForEachMacros)
+ ForEachMacros.push_back(&IdentTable.get(ForEachMacro));
+ std::sort(ForEachMacros.begin(), ForEachMacros.end());
+ }
+
+ ArrayRef<FormatToken *> lex() {
+ assert(Tokens.empty());
+ assert(FirstInLineIndex == 0);
+ do {
+ Tokens.push_back(getNextToken());
+ if (Style.Language == FormatStyle::LK_JavaScript)
+ tryParseJSRegexLiteral();
+ tryMergePreviousTokens();
+ if (Tokens.back()->NewlinesBefore > 0 || Tokens.back()->IsMultiline)
+ FirstInLineIndex = Tokens.size() - 1;
+ } while (Tokens.back()->Tok.isNot(tok::eof));
+ return Tokens;
+ }
+
+ const AdditionalKeywords &getKeywords() { return Keywords; }
+
+private:
+ void tryMergePreviousTokens() {
+ if (tryMerge_TMacro())
+ return;
+ if (tryMergeConflictMarkers())
+ return;
+ if (tryMergeLessLess())
+ return;
+
+ if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (tryMergeTemplateString())
+ return;
+
+ static const tok::TokenKind JSIdentity[] = {tok::equalequal, tok::equal};
+ static const tok::TokenKind JSNotIdentity[] = {tok::exclaimequal,
+ tok::equal};
+ static const tok::TokenKind JSShiftEqual[] = {tok::greater, tok::greater,
+ tok::greaterequal};
+ static const tok::TokenKind JSRightArrow[] = {tok::equal, tok::greater};
+ // FIXME: Investigate what token type gives the correct operator priority.
+ if (tryMergeTokens(JSIdentity, TT_BinaryOperator))
+ return;
+ if (tryMergeTokens(JSNotIdentity, TT_BinaryOperator))
+ return;
+ if (tryMergeTokens(JSShiftEqual, TT_BinaryOperator))
+ return;
+ if (tryMergeTokens(JSRightArrow, TT_JsFatArrow))
+ return;
+ }
+ }
+
+ bool tryMergeLessLess() {
+ // Merge X,less,less,Y into X,lessless,Y unless X or Y is less.
+ if (Tokens.size() < 3)
+ return false;
+
+ bool FourthTokenIsLess = false;
+ if (Tokens.size() > 3)
+ FourthTokenIsLess = (Tokens.end() - 4)[0]->is(tok::less);
+
+ auto First = Tokens.end() - 3;
+ if (First[2]->is(tok::less) || First[1]->isNot(tok::less) ||
+ First[0]->isNot(tok::less) || FourthTokenIsLess)
+ return false;
+
+ // Only merge if there currently is no whitespace between the two "<".
+ if (First[1]->WhitespaceRange.getBegin() !=
+ First[1]->WhitespaceRange.getEnd())
+ return false;
+
+ First[0]->Tok.setKind(tok::lessless);
+ First[0]->TokenText = "<<";
+ First[0]->ColumnWidth += 1;
+ Tokens.erase(Tokens.end() - 2);
+ return true;
+ }
+
+ bool tryMergeTokens(ArrayRef<tok::TokenKind> Kinds, TokenType NewType) {
+ if (Tokens.size() < Kinds.size())
+ return false;
+
+ SmallVectorImpl<FormatToken *>::const_iterator First =
+ Tokens.end() - Kinds.size();
+ if (!First[0]->is(Kinds[0]))
+ return false;
+ unsigned AddLength = 0;
+ for (unsigned i = 1; i < Kinds.size(); ++i) {
+ if (!First[i]->is(Kinds[i]) ||
+ First[i]->WhitespaceRange.getBegin() !=
+ First[i]->WhitespaceRange.getEnd())
+ return false;
+ AddLength += First[i]->TokenText.size();
+ }
+ Tokens.resize(Tokens.size() - Kinds.size() + 1);
+ First[0]->TokenText = StringRef(First[0]->TokenText.data(),
+ First[0]->TokenText.size() + AddLength);
+ First[0]->ColumnWidth += AddLength;
+ First[0]->Type = NewType;
+ return true;
+ }
+
+ // Returns \c true if \p Tok can only be followed by an operand in JavaScript.
+ bool precedesOperand(FormatToken *Tok) {
+ // NB: This is not entirely correct, as an r_paren can introduce an operand
+ // location in e.g. `if (foo) /bar/.exec(...);`. That is a rare enough
+ // corner case to not matter in practice, though.
+ return Tok->isOneOf(tok::period, tok::l_paren, tok::comma, tok::l_brace,
+ tok::r_brace, tok::l_square, tok::semi, tok::exclaim,
+ tok::colon, tok::question, tok::tilde) ||
+ Tok->isOneOf(tok::kw_return, tok::kw_do, tok::kw_case, tok::kw_throw,
+ tok::kw_else, tok::kw_new, tok::kw_delete, tok::kw_void,
+ tok::kw_typeof, Keywords.kw_instanceof,
+ Keywords.kw_in) ||
+ Tok->isBinaryOperator();
+ }
+
+ bool canPrecedeRegexLiteral(FormatToken *Prev) {
+ if (!Prev)
+ return true;
+
+ // Regex literals can only follow after prefix unary operators, not after
+ // postfix unary operators. If the '++' is followed by a non-operand
+ // introducing token, the slash here is the operand and not the start of a
+ // regex.
+ if (Prev->isOneOf(tok::plusplus, tok::minusminus))
+ return (Tokens.size() < 3 || precedesOperand(Tokens[Tokens.size() - 3]));
+
+ // The previous token must introduce an operand location where regex
+ // literals can occur.
+ if (!precedesOperand(Prev))
+ return false;
+
+ return true;
+ }
+
+ // Tries to parse a JavaScript Regex literal starting at the current token,
+ // if that begins with a slash and is in a location where JavaScript allows
+ // regex literals. Changes the current token to a regex literal and updates
+ // its text if successful.
+ void tryParseJSRegexLiteral() {
+ FormatToken *RegexToken = Tokens.back();
+ if (!RegexToken->isOneOf(tok::slash, tok::slashequal))
+ return;
+
+ FormatToken *Prev = nullptr;
+ for (auto I = Tokens.rbegin() + 1, E = Tokens.rend(); I != E; ++I) {
+ // NB: Because previous pointers are not initialized yet, this cannot use
+ // Token.getPreviousNonComment.
+ if ((*I)->isNot(tok::comment)) {
+ Prev = *I;
+ break;
+ }
+ }
+
+ if (!canPrecedeRegexLiteral(Prev))
+ return;
+
+ // 'Manually' lex ahead in the current file buffer.
+ const char *Offset = Lex->getBufferLocation();
+ const char *RegexBegin = Offset - RegexToken->TokenText.size();
+ StringRef Buffer = Lex->getBuffer();
+ bool InCharacterClass = false;
+ bool HaveClosingSlash = false;
+ for (; !HaveClosingSlash && Offset != Buffer.end(); ++Offset) {
+ // Regular expressions are terminated with a '/', which can only be
+ // escaped using '\' or a character class between '[' and ']'.
+ // See http://www.ecma-international.org/ecma-262/5.1/#sec-7.8.5.
+ switch (*Offset) {
+ case '\\':
+ // Skip the escaped character.
+ ++Offset;
+ break;
+ case '[':
+ InCharacterClass = true;
+ break;
+ case ']':
+ InCharacterClass = false;
+ break;
+ case '/':
+ if (!InCharacterClass)
+ HaveClosingSlash = true;
+ break;
+ }
+ }
+
+ RegexToken->Type = TT_RegexLiteral;
+ // Treat regex literals like other string_literals.
+ RegexToken->Tok.setKind(tok::string_literal);
+ RegexToken->TokenText = StringRef(RegexBegin, Offset - RegexBegin);
+ RegexToken->ColumnWidth = RegexToken->TokenText.size();
+
+ resetLexer(SourceMgr.getFileOffset(Lex->getSourceLocation(Offset)));
+ }
+
+ bool tryMergeTemplateString() {
+ if (Tokens.size() < 2)
+ return false;
+
+ FormatToken *EndBacktick = Tokens.back();
+ // Backticks get lexed as tok::unknown tokens. If a template string contains
+ // a comment start, it gets lexed as a tok::comment, or tok::unknown if
+ // unterminated.
+ if (!EndBacktick->isOneOf(tok::comment, tok::string_literal,
+ tok::char_constant, tok::unknown))
+ return false;
+ size_t CommentBacktickPos = EndBacktick->TokenText.find('`');
+ // Unknown token that's not actually a backtick, or a comment that doesn't
+ // contain a backtick.
+ if (CommentBacktickPos == StringRef::npos)
+ return false;
+
+ unsigned TokenCount = 0;
+ bool IsMultiline = false;
+ unsigned EndColumnInFirstLine =
+ EndBacktick->OriginalColumn + EndBacktick->ColumnWidth;
+ for (auto I = Tokens.rbegin() + 1, E = Tokens.rend(); I != E; I++) {
+ ++TokenCount;
+ if (I[0]->IsMultiline)
+ IsMultiline = true;
+
+ // If there was a preceding template string, this must be the start of a
+ // template string, not the end.
+ if (I[0]->is(TT_TemplateString))
+ return false;
+
+ if (I[0]->isNot(tok::unknown) || I[0]->TokenText != "`") {
+ // Keep track of the rhs offset of the last token to wrap across lines -
+ // its the rhs offset of the first line of the template string, used to
+ // determine its width.
+ if (I[0]->IsMultiline)
+ EndColumnInFirstLine = I[0]->OriginalColumn + I[0]->ColumnWidth;
+ // If the token has newlines, the token before it (if it exists) is the
+ // rhs end of the previous line.
+ if (I[0]->NewlinesBefore > 0 && (I + 1 != E)) {
+ EndColumnInFirstLine = I[1]->OriginalColumn + I[1]->ColumnWidth;
+ IsMultiline = true;
+ }
+ continue;
+ }
+
+ Tokens.resize(Tokens.size() - TokenCount);
+ Tokens.back()->Type = TT_TemplateString;
+ const char *EndOffset =
+ EndBacktick->TokenText.data() + 1 + CommentBacktickPos;
+ if (CommentBacktickPos != 0) {
+ // If the backtick was not the first character (e.g. in a comment),
+ // re-lex after the backtick position.
+ SourceLocation Loc = EndBacktick->Tok.getLocation();
+ resetLexer(SourceMgr.getFileOffset(Loc) + CommentBacktickPos + 1);
+ }
+ Tokens.back()->TokenText =
+ StringRef(Tokens.back()->TokenText.data(),
+ EndOffset - Tokens.back()->TokenText.data());
+
+ unsigned EndOriginalColumn = EndBacktick->OriginalColumn;
+ if (EndOriginalColumn == 0) {
+ SourceLocation Loc = EndBacktick->Tok.getLocation();
+ EndOriginalColumn = SourceMgr.getSpellingColumnNumber(Loc);
+ }
+ // If the ` is further down within the token (e.g. in a comment).
+ EndOriginalColumn += CommentBacktickPos;
+
+ if (IsMultiline) {
+ // ColumnWidth is from backtick to last token in line.
+ // LastLineColumnWidth is 0 to backtick.
+ // x = `some content
+ // until here`;
+ Tokens.back()->ColumnWidth =
+ EndColumnInFirstLine - Tokens.back()->OriginalColumn;
+ // +1 for the ` itself.
+ Tokens.back()->LastLineColumnWidth = EndOriginalColumn + 1;
+ Tokens.back()->IsMultiline = true;
+ } else {
+ // Token simply spans from start to end, +1 for the ` itself.
+ Tokens.back()->ColumnWidth =
+ EndOriginalColumn - Tokens.back()->OriginalColumn + 1;
+ }
+ return true;
+ }
+ return false;
+ }
+
+ bool tryMerge_TMacro() {
+ if (Tokens.size() < 4)
+ return false;
+ FormatToken *Last = Tokens.back();
+ if (!Last->is(tok::r_paren))
+ return false;
+
+ FormatToken *String = Tokens[Tokens.size() - 2];
+ if (!String->is(tok::string_literal) || String->IsMultiline)
+ return false;
+
+ if (!Tokens[Tokens.size() - 3]->is(tok::l_paren))
+ return false;
+
+ FormatToken *Macro = Tokens[Tokens.size() - 4];
+ if (Macro->TokenText != "_T")
+ return false;
+
+ const char *Start = Macro->TokenText.data();
+ const char *End = Last->TokenText.data() + Last->TokenText.size();
+ String->TokenText = StringRef(Start, End - Start);
+ String->IsFirst = Macro->IsFirst;
+ String->LastNewlineOffset = Macro->LastNewlineOffset;
+ String->WhitespaceRange = Macro->WhitespaceRange;
+ String->OriginalColumn = Macro->OriginalColumn;
+ String->ColumnWidth = encoding::columnWidthWithTabs(
+ String->TokenText, String->OriginalColumn, Style.TabWidth, Encoding);
+ String->NewlinesBefore = Macro->NewlinesBefore;
+ String->HasUnescapedNewline = Macro->HasUnescapedNewline;
+
+ Tokens.pop_back();
+ Tokens.pop_back();
+ Tokens.pop_back();
+ Tokens.back() = String;
+ return true;
+ }
+
+ bool tryMergeConflictMarkers() {
+ if (Tokens.back()->NewlinesBefore == 0 && Tokens.back()->isNot(tok::eof))
+ return false;
+
+ // Conflict lines look like:
+ // <marker> <text from the vcs>
+ // For example:
+ // >>>>>>> /file/in/file/system at revision 1234
+ //
+ // We merge all tokens in a line that starts with a conflict marker
+ // into a single token with a special token type that the unwrapped line
+ // parser will use to correctly rebuild the underlying code.
+
+ FileID ID;
+ // Get the position of the first token in the line.
+ unsigned FirstInLineOffset;
+ std::tie(ID, FirstInLineOffset) = SourceMgr.getDecomposedLoc(
+ Tokens[FirstInLineIndex]->getStartOfNonWhitespace());
+ StringRef Buffer = SourceMgr.getBuffer(ID)->getBuffer();
+ // Calculate the offset of the start of the current line.
+ auto LineOffset = Buffer.rfind('\n', FirstInLineOffset);
+ if (LineOffset == StringRef::npos) {
+ LineOffset = 0;
+ } else {
+ ++LineOffset;
+ }
+
+ auto FirstSpace = Buffer.find_first_of(" \n", LineOffset);
+ StringRef LineStart;
+ if (FirstSpace == StringRef::npos) {
+ LineStart = Buffer.substr(LineOffset);
+ } else {
+ LineStart = Buffer.substr(LineOffset, FirstSpace - LineOffset);
+ }
+
+ TokenType Type = TT_Unknown;
+ if (LineStart == "<<<<<<<" || LineStart == ">>>>") {
+ Type = TT_ConflictStart;
+ } else if (LineStart == "|||||||" || LineStart == "=======" ||
+ LineStart == "====") {
+ Type = TT_ConflictAlternative;
+ } else if (LineStart == ">>>>>>>" || LineStart == "<<<<") {
+ Type = TT_ConflictEnd;
+ }
+
+ if (Type != TT_Unknown) {
+ FormatToken *Next = Tokens.back();
+
+ Tokens.resize(FirstInLineIndex + 1);
+ // We do not need to build a complete token here, as we will skip it
+ // during parsing anyway (as we must not touch whitespace around conflict
+ // markers).
+ Tokens.back()->Type = Type;
+ Tokens.back()->Tok.setKind(tok::kw___unknown_anytype);
+
+ Tokens.push_back(Next);
+ return true;
+ }
+
+ return false;
+ }
+
+ FormatToken *getStashedToken() {
+ // Create a synthesized second '>' or '<' token.
+ Token Tok = FormatTok->Tok;
+ StringRef TokenText = FormatTok->TokenText;
+
+ unsigned OriginalColumn = FormatTok->OriginalColumn;
+ FormatTok = new (Allocator.Allocate()) FormatToken;
+ FormatTok->Tok = Tok;
+ SourceLocation TokLocation =
+ FormatTok->Tok.getLocation().getLocWithOffset(Tok.getLength() - 1);
+ FormatTok->Tok.setLocation(TokLocation);
+ FormatTok->WhitespaceRange = SourceRange(TokLocation, TokLocation);
+ FormatTok->TokenText = TokenText;
+ FormatTok->ColumnWidth = 1;
+ FormatTok->OriginalColumn = OriginalColumn + 1;
+
+ return FormatTok;
+ }
+
+ FormatToken *getNextToken() {
+ if (GreaterStashed) {
+ GreaterStashed = false;
+ return getStashedToken();
+ }
+ if (LessStashed) {
+ LessStashed = false;
+ return getStashedToken();
+ }
+
+ FormatTok = new (Allocator.Allocate()) FormatToken;
+ readRawToken(*FormatTok);
+ SourceLocation WhitespaceStart =
+ FormatTok->Tok.getLocation().getLocWithOffset(-TrailingWhitespace);
+ FormatTok->IsFirst = IsFirstToken;
+ IsFirstToken = false;
+
+ // Consume and record whitespace until we find a significant token.
+ unsigned WhitespaceLength = TrailingWhitespace;
+ while (FormatTok->Tok.is(tok::unknown)) {
+ StringRef Text = FormatTok->TokenText;
+ auto EscapesNewline = [&](int pos) {
+ // A '\r' here is just part of '\r\n'. Skip it.
+ if (pos >= 0 && Text[pos] == '\r')
+ --pos;
+ // See whether there is an odd number of '\' before this.
+ unsigned count = 0;
+ for (; pos >= 0; --pos, ++count)
+ if (Text[pos] != '\\')
+ break;
+ return count & 1;
+ };
+ // FIXME: This miscounts tok:unknown tokens that are not just
+ // whitespace, e.g. a '`' character.
+ for (int i = 0, e = Text.size(); i != e; ++i) {
+ switch (Text[i]) {
+ case '\n':
+ ++FormatTok->NewlinesBefore;
+ FormatTok->HasUnescapedNewline = !EscapesNewline(i - 1);
+ FormatTok->LastNewlineOffset = WhitespaceLength + i + 1;
+ Column = 0;
+ break;
+ case '\r':
+ FormatTok->LastNewlineOffset = WhitespaceLength + i + 1;
+ Column = 0;
+ break;
+ case '\f':
+ case '\v':
+ Column = 0;
+ break;
+ case ' ':
+ ++Column;
+ break;
+ case '\t':
+ Column += Style.TabWidth - Column % Style.TabWidth;
+ break;
+ case '\\':
+ if (i + 1 == e || (Text[i + 1] != '\r' && Text[i + 1] != '\n'))
+ FormatTok->Type = TT_ImplicitStringLiteral;
+ break;
+ default:
+ FormatTok->Type = TT_ImplicitStringLiteral;
+ break;
+ }
+ }
+
+ if (FormatTok->is(TT_ImplicitStringLiteral))
+ break;
+ WhitespaceLength += FormatTok->Tok.getLength();
+
+ readRawToken(*FormatTok);
+ }
+
+ // In case the token starts with escaped newlines, we want to
+ // take them into account as whitespace - this pattern is quite frequent
+ // in macro definitions.
+ // FIXME: Add a more explicit test.
+ while (FormatTok->TokenText.size() > 1 && FormatTok->TokenText[0] == '\\' &&
+ FormatTok->TokenText[1] == '\n') {
+ ++FormatTok->NewlinesBefore;
+ WhitespaceLength += 2;
+ FormatTok->LastNewlineOffset = 2;
+ Column = 0;
+ FormatTok->TokenText = FormatTok->TokenText.substr(2);
+ }
+
+ FormatTok->WhitespaceRange = SourceRange(
+ WhitespaceStart, WhitespaceStart.getLocWithOffset(WhitespaceLength));
+
+ FormatTok->OriginalColumn = Column;
+
+ TrailingWhitespace = 0;
+ if (FormatTok->Tok.is(tok::comment)) {
+ // FIXME: Add the trimmed whitespace to Column.
+ StringRef UntrimmedText = FormatTok->TokenText;
+ FormatTok->TokenText = FormatTok->TokenText.rtrim(" \t\v\f");
+ TrailingWhitespace = UntrimmedText.size() - FormatTok->TokenText.size();
+ } else if (FormatTok->Tok.is(tok::raw_identifier)) {
+ IdentifierInfo &Info = IdentTable.get(FormatTok->TokenText);
+ FormatTok->Tok.setIdentifierInfo(&Info);
+ FormatTok->Tok.setKind(Info.getTokenID());
+ if (Style.Language == FormatStyle::LK_Java &&
+ FormatTok->isOneOf(tok::kw_struct, tok::kw_union, tok::kw_delete,
+ tok::kw_operator)) {
+ FormatTok->Tok.setKind(tok::identifier);
+ FormatTok->Tok.setIdentifierInfo(nullptr);
+ } else if (Style.Language == FormatStyle::LK_JavaScript &&
+ FormatTok->isOneOf(tok::kw_struct, tok::kw_union,
+ tok::kw_operator)) {
+ FormatTok->Tok.setKind(tok::identifier);
+ FormatTok->Tok.setIdentifierInfo(nullptr);
+ }
+ } else if (FormatTok->Tok.is(tok::greatergreater)) {
+ FormatTok->Tok.setKind(tok::greater);
+ FormatTok->TokenText = FormatTok->TokenText.substr(0, 1);
+ GreaterStashed = true;
+ } else if (FormatTok->Tok.is(tok::lessless)) {
+ FormatTok->Tok.setKind(tok::less);
+ FormatTok->TokenText = FormatTok->TokenText.substr(0, 1);
+ LessStashed = true;
+ }
+
+ // Now FormatTok is the next non-whitespace token.
+
+ StringRef Text = FormatTok->TokenText;
+ size_t FirstNewlinePos = Text.find('\n');
+ if (FirstNewlinePos == StringRef::npos) {
+ // FIXME: ColumnWidth actually depends on the start column, we need to
+ // take this into account when the token is moved.
+ FormatTok->ColumnWidth =
+ encoding::columnWidthWithTabs(Text, Column, Style.TabWidth, Encoding);
+ Column += FormatTok->ColumnWidth;
+ } else {
+ FormatTok->IsMultiline = true;
+ // FIXME: ColumnWidth actually depends on the start column, we need to
+ // take this into account when the token is moved.
+ FormatTok->ColumnWidth = encoding::columnWidthWithTabs(
+ Text.substr(0, FirstNewlinePos), Column, Style.TabWidth, Encoding);
+
+ // The last line of the token always starts in column 0.
+ // Thus, the length can be precomputed even in the presence of tabs.
+ FormatTok->LastLineColumnWidth = encoding::columnWidthWithTabs(
+ Text.substr(Text.find_last_of('\n') + 1), 0, Style.TabWidth,
+ Encoding);
+ Column = FormatTok->LastLineColumnWidth;
+ }
+
+ if (Style.Language == FormatStyle::LK_Cpp) {
+ if (!(Tokens.size() > 0 && Tokens.back()->Tok.getIdentifierInfo() &&
+ Tokens.back()->Tok.getIdentifierInfo()->getPPKeywordID() ==
+ tok::pp_define) &&
+ std::find(ForEachMacros.begin(), ForEachMacros.end(),
+ FormatTok->Tok.getIdentifierInfo()) != ForEachMacros.end()) {
+ FormatTok->Type = TT_ForEachMacro;
+ } else if (FormatTok->is(tok::identifier)) {
+ if (MacroBlockBeginRegex.match(Text)) {
+ FormatTok->Type = TT_MacroBlockBegin;
+ } else if (MacroBlockEndRegex.match(Text)) {
+ FormatTok->Type = TT_MacroBlockEnd;
+ }
+ }
+ }
+
+ return FormatTok;
+ }
+
+ FormatToken *FormatTok;
+ bool IsFirstToken;
+ bool GreaterStashed, LessStashed;
+ unsigned Column;
+ unsigned TrailingWhitespace;
+ std::unique_ptr<Lexer> Lex;
+ SourceManager &SourceMgr;
+ FileID ID;
+ FormatStyle &Style;
+ IdentifierTable IdentTable;
+ AdditionalKeywords Keywords;
+ encoding::Encoding Encoding;
+ llvm::SpecificBumpPtrAllocator<FormatToken> Allocator;
+ // Index (in 'Tokens') of the last token that starts a new line.
+ unsigned FirstInLineIndex;
+ SmallVector<FormatToken *, 16> Tokens;
+ SmallVector<IdentifierInfo *, 8> ForEachMacros;
+
+ bool FormattingDisabled;
+
+ llvm::Regex MacroBlockBeginRegex;
+ llvm::Regex MacroBlockEndRegex;
+
+ void readRawToken(FormatToken &Tok) {
+ Lex->LexFromRawLexer(Tok.Tok);
+ Tok.TokenText = StringRef(SourceMgr.getCharacterData(Tok.Tok.getLocation()),
+ Tok.Tok.getLength());
+ // For formatting, treat unterminated string literals like normal string
+ // literals.
+ if (Tok.is(tok::unknown)) {
+ if (!Tok.TokenText.empty() && Tok.TokenText[0] == '"') {
+ Tok.Tok.setKind(tok::string_literal);
+ Tok.IsUnterminatedLiteral = true;
+ } else if (Style.Language == FormatStyle::LK_JavaScript &&
+ Tok.TokenText == "''") {
+ Tok.Tok.setKind(tok::char_constant);
+ }
+ }
+
+ if (Tok.is(tok::comment) && (Tok.TokenText == "// clang-format on" ||
+ Tok.TokenText == "/* clang-format on */")) {
+ FormattingDisabled = false;
+ }
+
+ Tok.Finalized = FormattingDisabled;
+
+ if (Tok.is(tok::comment) && (Tok.TokenText == "// clang-format off" ||
+ Tok.TokenText == "/* clang-format off */")) {
+ FormattingDisabled = true;
+ }
+ }
+
+ void resetLexer(unsigned Offset) {
+ StringRef Buffer = SourceMgr.getBufferData(ID);
+ Lex.reset(new Lexer(SourceMgr.getLocForStartOfFile(ID),
+ getFormattingLangOpts(Style), Buffer.begin(),
+ Buffer.begin() + Offset, Buffer.end()));
+ Lex->SetKeepWhitespaceMode(true);
+ TrailingWhitespace = 0;
+ }
+};
+
+static StringRef getLanguageName(FormatStyle::LanguageKind Language) {
+ switch (Language) {
+ case FormatStyle::LK_Cpp:
+ return "C++";
+ case FormatStyle::LK_Java:
+ return "Java";
+ case FormatStyle::LK_JavaScript:
+ return "JavaScript";
+ case FormatStyle::LK_Proto:
+ return "Proto";
+ default:
+ return "Unknown";
+ }
+}
+
+class Formatter : public UnwrappedLineConsumer {
+public:
+ Formatter(const FormatStyle &Style, SourceManager &SourceMgr, FileID ID,
+ ArrayRef<CharSourceRange> Ranges)
+ : Style(Style), ID(ID), SourceMgr(SourceMgr),
+ Whitespaces(SourceMgr, Style,
+ inputUsesCRLF(SourceMgr.getBufferData(ID))),
+ Ranges(Ranges.begin(), Ranges.end()), UnwrappedLines(1),
+ Encoding(encoding::detectEncoding(SourceMgr.getBufferData(ID))) {
+ DEBUG(llvm::dbgs() << "File encoding: "
+ << (Encoding == encoding::Encoding_UTF8 ? "UTF8"
+ : "unknown")
+ << "\n");
+ DEBUG(llvm::dbgs() << "Language: " << getLanguageName(Style.Language)
+ << "\n");
+ }
+
+ tooling::Replacements format(bool *IncompleteFormat) {
+ tooling::Replacements Result;
+ FormatTokenLexer Tokens(SourceMgr, ID, Style, Encoding);
+
+ UnwrappedLineParser Parser(Style, Tokens.getKeywords(), Tokens.lex(),
+ *this);
+ Parser.parse();
+ assert(UnwrappedLines.rbegin()->empty());
+ for (unsigned Run = 0, RunE = UnwrappedLines.size(); Run + 1 != RunE;
+ ++Run) {
+ DEBUG(llvm::dbgs() << "Run " << Run << "...\n");
+ SmallVector<AnnotatedLine *, 16> AnnotatedLines;
+ for (unsigned i = 0, e = UnwrappedLines[Run].size(); i != e; ++i) {
+ AnnotatedLines.push_back(new AnnotatedLine(UnwrappedLines[Run][i]));
+ }
+ tooling::Replacements RunResult =
+ format(AnnotatedLines, Tokens, IncompleteFormat);
+ DEBUG({
+ llvm::dbgs() << "Replacements for run " << Run << ":\n";
+ for (tooling::Replacements::iterator I = RunResult.begin(),
+ E = RunResult.end();
+ I != E; ++I) {
+ llvm::dbgs() << I->toString() << "\n";
+ }
+ });
+ for (unsigned i = 0, e = AnnotatedLines.size(); i != e; ++i) {
+ delete AnnotatedLines[i];
+ }
+ Result.insert(RunResult.begin(), RunResult.end());
+ Whitespaces.reset();
+ }
+ return Result;
+ }
+
+ tooling::Replacements format(SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
+ FormatTokenLexer &Tokens,
+ bool *IncompleteFormat) {
+ TokenAnnotator Annotator(Style, Tokens.getKeywords());
+ for (unsigned i = 0, e = AnnotatedLines.size(); i != e; ++i) {
+ Annotator.annotate(*AnnotatedLines[i]);
+ }
+ deriveLocalStyle(AnnotatedLines);
+ for (unsigned i = 0, e = AnnotatedLines.size(); i != e; ++i) {
+ Annotator.calculateFormattingInformation(*AnnotatedLines[i]);
+ }
+ computeAffectedLines(AnnotatedLines.begin(), AnnotatedLines.end());
+
+ Annotator.setCommentLineLevels(AnnotatedLines);
+ ContinuationIndenter Indenter(Style, Tokens.getKeywords(), SourceMgr,
+ Whitespaces, Encoding,
+ BinPackInconclusiveFunctions);
+ UnwrappedLineFormatter(&Indenter, &Whitespaces, Style, Tokens.getKeywords(),
+ IncompleteFormat)
+ .format(AnnotatedLines);
+ return Whitespaces.generateReplacements();
+ }
+
+private:
+ // Determines which lines are affected by the SourceRanges given as input.
+ // Returns \c true if at least one line between I and E or one of their
+ // children is affected.
+ bool computeAffectedLines(SmallVectorImpl<AnnotatedLine *>::iterator I,
+ SmallVectorImpl<AnnotatedLine *>::iterator E) {
+ bool SomeLineAffected = false;
+ const AnnotatedLine *PreviousLine = nullptr;
+ while (I != E) {
+ AnnotatedLine *Line = *I;
+ Line->LeadingEmptyLinesAffected = affectsLeadingEmptyLines(*Line->First);
+
+ // If a line is part of a preprocessor directive, it needs to be formatted
+ // if any token within the directive is affected.
+ if (Line->InPPDirective) {
+ FormatToken *Last = Line->Last;
+ SmallVectorImpl<AnnotatedLine *>::iterator PPEnd = I + 1;
+ while (PPEnd != E && !(*PPEnd)->First->HasUnescapedNewline) {
+ Last = (*PPEnd)->Last;
+ ++PPEnd;
+ }
+
+ if (affectsTokenRange(*Line->First, *Last,
+ /*IncludeLeadingNewlines=*/false)) {
+ SomeLineAffected = true;
+ markAllAsAffected(I, PPEnd);
+ }
+ I = PPEnd;
+ continue;
+ }
+
+ if (nonPPLineAffected(Line, PreviousLine))
+ SomeLineAffected = true;
+
+ PreviousLine = Line;
+ ++I;
+ }
+ return SomeLineAffected;
+ }
+
+ // Determines whether 'Line' is affected by the SourceRanges given as input.
+ // Returns \c true if line or one if its children is affected.
+ bool nonPPLineAffected(AnnotatedLine *Line,
+ const AnnotatedLine *PreviousLine) {
+ bool SomeLineAffected = false;
+ Line->ChildrenAffected =
+ computeAffectedLines(Line->Children.begin(), Line->Children.end());
+ if (Line->ChildrenAffected)
+ SomeLineAffected = true;
+
+ // Stores whether one of the line's tokens is directly affected.
+ bool SomeTokenAffected = false;
+ // Stores whether we need to look at the leading newlines of the next token
+ // in order to determine whether it was affected.
+ bool IncludeLeadingNewlines = false;
+
+ // Stores whether the first child line of any of this line's tokens is
+ // affected.
+ bool SomeFirstChildAffected = false;
+
+ for (FormatToken *Tok = Line->First; Tok; Tok = Tok->Next) {
+ // Determine whether 'Tok' was affected.
+ if (affectsTokenRange(*Tok, *Tok, IncludeLeadingNewlines))
+ SomeTokenAffected = true;
+
+ // Determine whether the first child of 'Tok' was affected.
+ if (!Tok->Children.empty() && Tok->Children.front()->Affected)
+ SomeFirstChildAffected = true;
+
+ IncludeLeadingNewlines = Tok->Children.empty();
+ }
+
+ // Was this line moved, i.e. has it previously been on the same line as an
+ // affected line?
+ bool LineMoved = PreviousLine && PreviousLine->Affected &&
+ Line->First->NewlinesBefore == 0;
+
+ bool IsContinuedComment =
+ Line->First->is(tok::comment) && Line->First->Next == nullptr &&
+ Line->First->NewlinesBefore < 2 && PreviousLine &&
+ PreviousLine->Affected && PreviousLine->Last->is(tok::comment);
+
+ if (SomeTokenAffected || SomeFirstChildAffected || LineMoved ||
+ IsContinuedComment) {
+ Line->Affected = true;
+ SomeLineAffected = true;
+ }
+ return SomeLineAffected;
+ }
+
+ // Marks all lines between I and E as well as all their children as affected.
+ void markAllAsAffected(SmallVectorImpl<AnnotatedLine *>::iterator I,
+ SmallVectorImpl<AnnotatedLine *>::iterator E) {
+ while (I != E) {
+ (*I)->Affected = true;
+ markAllAsAffected((*I)->Children.begin(), (*I)->Children.end());
+ ++I;
+ }
+ }
+
+ // Returns true if the range from 'First' to 'Last' intersects with one of the
+ // input ranges.
+ bool affectsTokenRange(const FormatToken &First, const FormatToken &Last,
+ bool IncludeLeadingNewlines) {
+ SourceLocation Start = First.WhitespaceRange.getBegin();
+ if (!IncludeLeadingNewlines)
+ Start = Start.getLocWithOffset(First.LastNewlineOffset);
+ SourceLocation End = Last.getStartOfNonWhitespace();
+ End = End.getLocWithOffset(Last.TokenText.size());
+ CharSourceRange Range = CharSourceRange::getCharRange(Start, End);
+ return affectsCharSourceRange(Range);
+ }
+
+ // Returns true if one of the input ranges intersect the leading empty lines
+ // before 'Tok'.
+ bool affectsLeadingEmptyLines(const FormatToken &Tok) {
+ CharSourceRange EmptyLineRange = CharSourceRange::getCharRange(
+ Tok.WhitespaceRange.getBegin(),
+ Tok.WhitespaceRange.getBegin().getLocWithOffset(Tok.LastNewlineOffset));
+ return affectsCharSourceRange(EmptyLineRange);
+ }
+
+ // Returns true if 'Range' intersects with one of the input ranges.
+ bool affectsCharSourceRange(const CharSourceRange &Range) {
+ for (SmallVectorImpl<CharSourceRange>::const_iterator I = Ranges.begin(),
+ E = Ranges.end();
+ I != E; ++I) {
+ if (!SourceMgr.isBeforeInTranslationUnit(Range.getEnd(), I->getBegin()) &&
+ !SourceMgr.isBeforeInTranslationUnit(I->getEnd(), Range.getBegin()))
+ return true;
+ }
+ return false;
+ }
+
+ static bool inputUsesCRLF(StringRef Text) {
+ return Text.count('\r') * 2 > Text.count('\n');
+ }
+
+ bool
+ hasCpp03IncompatibleFormat(const SmallVectorImpl<AnnotatedLine *> &Lines) {
+ for (const AnnotatedLine* Line : Lines) {
+ if (hasCpp03IncompatibleFormat(Line->Children))
+ return true;
+ for (FormatToken *Tok = Line->First->Next; Tok; Tok = Tok->Next) {
+ if (Tok->WhitespaceRange.getBegin() == Tok->WhitespaceRange.getEnd()) {
+ if (Tok->is(tok::coloncolon) && Tok->Previous->is(TT_TemplateOpener))
+ return true;
+ if (Tok->is(TT_TemplateCloser) &&
+ Tok->Previous->is(TT_TemplateCloser))
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ int countVariableAlignments(const SmallVectorImpl<AnnotatedLine *> &Lines) {
+ int AlignmentDiff = 0;
+ for (const AnnotatedLine* Line : Lines) {
+ AlignmentDiff += countVariableAlignments(Line->Children);
+ for (FormatToken *Tok = Line->First; Tok && Tok->Next; Tok = Tok->Next) {
+ if (!Tok->is(TT_PointerOrReference))
+ continue;
+ bool SpaceBefore =
+ Tok->WhitespaceRange.getBegin() != Tok->WhitespaceRange.getEnd();
+ bool SpaceAfter = Tok->Next->WhitespaceRange.getBegin() !=
+ Tok->Next->WhitespaceRange.getEnd();
+ if (SpaceBefore && !SpaceAfter)
+ ++AlignmentDiff;
+ if (!SpaceBefore && SpaceAfter)
+ --AlignmentDiff;
+ }
+ }
+ return AlignmentDiff;
+ }
+
+ void
+ deriveLocalStyle(const SmallVectorImpl<AnnotatedLine *> &AnnotatedLines) {
+ bool HasBinPackedFunction = false;
+ bool HasOnePerLineFunction = false;
+ for (unsigned i = 0, e = AnnotatedLines.size(); i != e; ++i) {
+ if (!AnnotatedLines[i]->First->Next)
+ continue;
+ FormatToken *Tok = AnnotatedLines[i]->First->Next;
+ while (Tok->Next) {
+ if (Tok->PackingKind == PPK_BinPacked)
+ HasBinPackedFunction = true;
+ if (Tok->PackingKind == PPK_OnePerLine)
+ HasOnePerLineFunction = true;
+
+ Tok = Tok->Next;
+ }
+ }
+ if (Style.DerivePointerAlignment)
+ Style.PointerAlignment = countVariableAlignments(AnnotatedLines) <= 0
+ ? FormatStyle::PAS_Left
+ : FormatStyle::PAS_Right;
+ if (Style.Standard == FormatStyle::LS_Auto)
+ Style.Standard = hasCpp03IncompatibleFormat(AnnotatedLines)
+ ? FormatStyle::LS_Cpp11
+ : FormatStyle::LS_Cpp03;
+ BinPackInconclusiveFunctions =
+ HasBinPackedFunction || !HasOnePerLineFunction;
+ }
+
+ void consumeUnwrappedLine(const UnwrappedLine &TheLine) override {
+ assert(!UnwrappedLines.empty());
+ UnwrappedLines.back().push_back(TheLine);
+ }
+
+ void finishRun() override {
+ UnwrappedLines.push_back(SmallVector<UnwrappedLine, 16>());
+ }
+
+ FormatStyle Style;
+ FileID ID;
+ SourceManager &SourceMgr;
+ WhitespaceManager Whitespaces;
+ SmallVector<CharSourceRange, 8> Ranges;
+ SmallVector<SmallVector<UnwrappedLine, 16>, 2> UnwrappedLines;
+
+ encoding::Encoding Encoding;
+ bool BinPackInconclusiveFunctions;
+};
+
+struct IncludeDirective {
+ StringRef Filename;
+ StringRef Text;
+ unsigned Offset;
+ int Category;
+};
+
+} // end anonymous namespace
+
+// Determines whether 'Ranges' intersects with ('Start', 'End').
+static bool affectsRange(ArrayRef<tooling::Range> Ranges, unsigned Start,
+ unsigned End) {
+ for (auto Range : Ranges) {
+ if (Range.getOffset() < End &&
+ Range.getOffset() + Range.getLength() > Start)
+ return true;
+ }
+ return false;
+}
+
+// Sorts a block of includes given by 'Includes' alphabetically adding the
+// necessary replacement to 'Replaces'. 'Includes' must be in strict source
+// order.
+static void sortIncludes(const FormatStyle &Style,
+ const SmallVectorImpl<IncludeDirective> &Includes,
+ ArrayRef<tooling::Range> Ranges, StringRef FileName,
+ tooling::Replacements &Replaces, unsigned *Cursor) {
+ if (!affectsRange(Ranges, Includes.front().Offset,
+ Includes.back().Offset + Includes.back().Text.size()))
+ return;
+ SmallVector<unsigned, 16> Indices;
+ for (unsigned i = 0, e = Includes.size(); i != e; ++i)
+ Indices.push_back(i);
+ std::sort(Indices.begin(), Indices.end(), [&](unsigned LHSI, unsigned RHSI) {
+ return std::tie(Includes[LHSI].Category, Includes[LHSI].Filename) <
+ std::tie(Includes[RHSI].Category, Includes[RHSI].Filename);
+ });
+
+ // If the #includes are out of order, we generate a single replacement fixing
+ // the entire block. Otherwise, no replacement is generated.
+ bool OutOfOrder = false;
+ for (unsigned i = 1, e = Indices.size(); i != e; ++i) {
+ if (Indices[i] != i) {
+ OutOfOrder = true;
+ break;
+ }
+ }
+ if (!OutOfOrder)
+ return;
+
+ std::string result;
+ bool CursorMoved = false;
+ for (unsigned Index : Indices) {
+ if (!result.empty())
+ result += "\n";
+ result += Includes[Index].Text;
+
+ if (Cursor && !CursorMoved) {
+ unsigned Start = Includes[Index].Offset;
+ unsigned End = Start + Includes[Index].Text.size();
+ if (*Cursor >= Start && *Cursor < End) {
+ *Cursor = Includes.front().Offset + result.size() + *Cursor - End;
+ CursorMoved = true;
+ }
+ }
+ }
+
+ // Sorting #includes shouldn't change their total number of characters.
+ // This would otherwise mess up 'Ranges'.
+ assert(result.size() ==
+ Includes.back().Offset + Includes.back().Text.size() -
+ Includes.front().Offset);
+
+ Replaces.insert(tooling::Replacement(FileName, Includes.front().Offset,
+ result.size(), result));
+}
+
+tooling::Replacements sortIncludes(const FormatStyle &Style, StringRef Code,
+ ArrayRef<tooling::Range> Ranges,
+ StringRef FileName, unsigned *Cursor) {
+ tooling::Replacements Replaces;
+ if (!Style.SortIncludes)
+ return Replaces;
+
+ unsigned Prev = 0;
+ unsigned SearchFrom = 0;
+ llvm::Regex IncludeRegex(
+ R"(^[\t\ ]*#[\t\ ]*(import|include)[^"<]*(["<][^">]*[">]))");
+ SmallVector<StringRef, 4> Matches;
+ SmallVector<IncludeDirective, 16> IncludesInBlock;
+
+ // In compiled files, consider the first #include to be the main #include of
+ // the file if it is not a system #include. This ensures that the header
+ // doesn't have hidden dependencies
+ // (http://llvm.org/docs/CodingStandards.html#include-style).
+ //
+ // FIXME: Do some sanity checking, e.g. edit distance of the base name, to fix
+ // cases where the first #include is unlikely to be the main header.
+ bool IsSource = FileName.endswith(".c") || FileName.endswith(".cc") ||
+ FileName.endswith(".cpp") || FileName.endswith(".c++") ||
+ FileName.endswith(".cxx") || FileName.endswith(".m") ||
+ FileName.endswith(".mm");
+ StringRef FileStem = llvm::sys::path::stem(FileName);
+ bool FirstIncludeBlock = true;
+ bool MainIncludeFound = false;
+
+ // Create pre-compiled regular expressions for the #include categories.
+ SmallVector<llvm::Regex, 4> CategoryRegexs;
+ for (const auto &Category : Style.IncludeCategories)
+ CategoryRegexs.emplace_back(Category.Regex);
+
+ bool FormattingOff = false;
+
+ for (;;) {
+ auto Pos = Code.find('\n', SearchFrom);
+ StringRef Line =
+ Code.substr(Prev, (Pos != StringRef::npos ? Pos : Code.size()) - Prev);
+
+ StringRef Trimmed = Line.trim();
+ if (Trimmed == "// clang-format off")
+ FormattingOff = true;
+ else if (Trimmed == "// clang-format on")
+ FormattingOff = false;
+
+ if (!FormattingOff && !Line.endswith("\\")) {
+ if (IncludeRegex.match(Line, &Matches)) {
+ StringRef IncludeName = Matches[2];
+ int Category = INT_MAX;
+ for (unsigned i = 0, e = CategoryRegexs.size(); i != e; ++i) {
+ if (CategoryRegexs[i].match(IncludeName)) {
+ Category = Style.IncludeCategories[i].Priority;
+ break;
+ }
+ }
+ if (IsSource && !MainIncludeFound && Category > 0 &&
+ FirstIncludeBlock && IncludeName.startswith("\"")) {
+ StringRef HeaderStem =
+ llvm::sys::path::stem(IncludeName.drop_front(1).drop_back(1));
+ if (FileStem.startswith(HeaderStem)) {
+ Category = 0;
+ MainIncludeFound = true;
+ }
+ }
+ IncludesInBlock.push_back({IncludeName, Line, Prev, Category});
+ } else if (!IncludesInBlock.empty()) {
+ sortIncludes(Style, IncludesInBlock, Ranges, FileName, Replaces,
+ Cursor);
+ IncludesInBlock.clear();
+ FirstIncludeBlock = false;
+ }
+ Prev = Pos + 1;
+ }
+ if (Pos == StringRef::npos || Pos + 1 == Code.size())
+ break;
+ SearchFrom = Pos + 1;
+ }
+ if (!IncludesInBlock.empty())
+ sortIncludes(Style, IncludesInBlock, Ranges, FileName, Replaces, Cursor);
+ return Replaces;
+}
+
+tooling::Replacements reformat(const FormatStyle &Style,
+ SourceManager &SourceMgr, FileID ID,
+ ArrayRef<CharSourceRange> Ranges,
+ bool *IncompleteFormat) {
+ FormatStyle Expanded = expandPresets(Style);
+ if (Expanded.DisableFormat)
+ return tooling::Replacements();
+ Formatter formatter(Expanded, SourceMgr, ID, Ranges);
+ return formatter.format(IncompleteFormat);
+}
+
+tooling::Replacements reformat(const FormatStyle &Style, StringRef Code,
+ ArrayRef<tooling::Range> Ranges,
+ StringRef FileName, bool *IncompleteFormat) {
+ if (Style.DisableFormat)
+ return tooling::Replacements();
+
+ IntrusiveRefCntPtr<vfs::InMemoryFileSystem> InMemoryFileSystem(
+ new vfs::InMemoryFileSystem);
+ FileManager Files(FileSystemOptions(), InMemoryFileSystem);
+ DiagnosticsEngine Diagnostics(
+ IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs),
+ new DiagnosticOptions);
+ SourceManager SourceMgr(Diagnostics, Files);
+ InMemoryFileSystem->addFile(FileName, 0,
+ llvm::MemoryBuffer::getMemBuffer(Code, FileName));
+ FileID ID = SourceMgr.createFileID(Files.getFile(FileName), SourceLocation(),
+ clang::SrcMgr::C_User);
+ SourceLocation StartOfFile = SourceMgr.getLocForStartOfFile(ID);
+ std::vector<CharSourceRange> CharRanges;
+ for (const tooling::Range &Range : Ranges) {
+ SourceLocation Start = StartOfFile.getLocWithOffset(Range.getOffset());
+ SourceLocation End = Start.getLocWithOffset(Range.getLength());
+ CharRanges.push_back(CharSourceRange::getCharRange(Start, End));
+ }
+ return reformat(Style, SourceMgr, ID, CharRanges, IncompleteFormat);
+}
+
+LangOptions getFormattingLangOpts(const FormatStyle &Style) {
+ LangOptions LangOpts;
+ LangOpts.CPlusPlus = 1;
+ LangOpts.CPlusPlus11 = Style.Standard == FormatStyle::LS_Cpp03 ? 0 : 1;
+ LangOpts.CPlusPlus14 = Style.Standard == FormatStyle::LS_Cpp03 ? 0 : 1;
+ LangOpts.LineComment = 1;
+ bool AlternativeOperators = Style.Language == FormatStyle::LK_Cpp;
+ LangOpts.CXXOperatorNames = AlternativeOperators ? 1 : 0;
+ LangOpts.Bool = 1;
+ LangOpts.ObjC1 = 1;
+ LangOpts.ObjC2 = 1;
+ LangOpts.MicrosoftExt = 1; // To get kw___try, kw___finally.
+ LangOpts.DeclSpecKeyword = 1; // To get __declspec.
+ return LangOpts;
+}
+
+const char *StyleOptionHelpDescription =
+ "Coding style, currently supports:\n"
+ " LLVM, Google, Chromium, Mozilla, WebKit.\n"
+ "Use -style=file to load style configuration from\n"
+ ".clang-format file located in one of the parent\n"
+ "directories of the source file (or current\n"
+ "directory for stdin).\n"
+ "Use -style=\"{key: value, ...}\" to set specific\n"
+ "parameters, e.g.:\n"
+ " -style=\"{BasedOnStyle: llvm, IndentWidth: 8}\"";
+
+static FormatStyle::LanguageKind getLanguageByFileName(StringRef FileName) {
+ if (FileName.endswith(".java"))
+ return FormatStyle::LK_Java;
+ if (FileName.endswith_lower(".js") || FileName.endswith_lower(".ts"))
+ return FormatStyle::LK_JavaScript; // JavaScript or TypeScript.
+ if (FileName.endswith_lower(".proto") ||
+ FileName.endswith_lower(".protodevel"))
+ return FormatStyle::LK_Proto;
+ if (FileName.endswith_lower(".td"))
+ return FormatStyle::LK_TableGen;
+ return FormatStyle::LK_Cpp;
+}
+
+FormatStyle getStyle(StringRef StyleName, StringRef FileName,
+ StringRef FallbackStyle) {
+ FormatStyle Style = getLLVMStyle();
+ Style.Language = getLanguageByFileName(FileName);
+ if (!getPredefinedStyle(FallbackStyle, Style.Language, &Style)) {
+ llvm::errs() << "Invalid fallback style \"" << FallbackStyle
+ << "\" using LLVM style\n";
+ return Style;
+ }
+
+ if (StyleName.startswith("{")) {
+ // Parse YAML/JSON style from the command line.
+ if (std::error_code ec = parseConfiguration(StyleName, &Style)) {
+ llvm::errs() << "Error parsing -style: " << ec.message() << ", using "
+ << FallbackStyle << " style\n";
+ }
+ return Style;
+ }
+
+ if (!StyleName.equals_lower("file")) {
+ if (!getPredefinedStyle(StyleName, Style.Language, &Style))
+ llvm::errs() << "Invalid value for -style, using " << FallbackStyle
+ << " style\n";
+ return Style;
+ }
+
+ // Look for .clang-format/_clang-format file in the file's parent directories.
+ SmallString<128> UnsuitableConfigFiles;
+ SmallString<128> Path(FileName);
+ llvm::sys::fs::make_absolute(Path);
+ for (StringRef Directory = Path; !Directory.empty();
+ Directory = llvm::sys::path::parent_path(Directory)) {
+ if (!llvm::sys::fs::is_directory(Directory))
+ continue;
+ SmallString<128> ConfigFile(Directory);
+
+ llvm::sys::path::append(ConfigFile, ".clang-format");
+ DEBUG(llvm::dbgs() << "Trying " << ConfigFile << "...\n");
+ bool IsFile = false;
+ // Ignore errors from is_regular_file: we only need to know if we can read
+ // the file or not.
+ llvm::sys::fs::is_regular_file(Twine(ConfigFile), IsFile);
+
+ if (!IsFile) {
+ // Try _clang-format too, since dotfiles are not commonly used on Windows.
+ ConfigFile = Directory;
+ llvm::sys::path::append(ConfigFile, "_clang-format");
+ DEBUG(llvm::dbgs() << "Trying " << ConfigFile << "...\n");
+ llvm::sys::fs::is_regular_file(Twine(ConfigFile), IsFile);
+ }
+
+ if (IsFile) {
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Text =
+ llvm::MemoryBuffer::getFile(ConfigFile.c_str());
+ if (std::error_code EC = Text.getError()) {
+ llvm::errs() << EC.message() << "\n";
+ break;
+ }
+ if (std::error_code ec =
+ parseConfiguration(Text.get()->getBuffer(), &Style)) {
+ if (ec == ParseError::Unsuitable) {
+ if (!UnsuitableConfigFiles.empty())
+ UnsuitableConfigFiles.append(", ");
+ UnsuitableConfigFiles.append(ConfigFile);
+ continue;
+ }
+ llvm::errs() << "Error reading " << ConfigFile << ": " << ec.message()
+ << "\n";
+ break;
+ }
+ DEBUG(llvm::dbgs() << "Using configuration file " << ConfigFile << "\n");
+ return Style;
+ }
+ }
+ if (!UnsuitableConfigFiles.empty()) {
+ llvm::errs() << "Configuration file(s) do(es) not support "
+ << getLanguageName(Style.Language) << ": "
+ << UnsuitableConfigFiles << "\n";
+ }
+ return Style;
+}
+
+} // namespace format
+} // namespace clang
diff --git a/contrib/llvm/tools/clang/lib/Format/FormatToken.cpp b/contrib/llvm/tools/clang/lib/Format/FormatToken.cpp
new file mode 100644
index 0000000..d6cd450
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Format/FormatToken.cpp
@@ -0,0 +1,300 @@
+//===--- FormatToken.cpp - Format C++ code --------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements specific functions of \c FormatTokens and their
+/// roles.
+///
+//===----------------------------------------------------------------------===//
+
+#include "ContinuationIndenter.h"
+#include "FormatToken.h"
+#include "clang/Format/Format.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Debug.h"
+#include <climits>
+
+namespace clang {
+namespace format {
+
+const char *getTokenTypeName(TokenType Type) {
+ static const char *const TokNames[] = {
+#define TYPE(X) #X,
+LIST_TOKEN_TYPES
+#undef TYPE
+ nullptr
+ };
+
+ if (Type < NUM_TOKEN_TYPES)
+ return TokNames[Type];
+ llvm_unreachable("unknown TokenType");
+ return nullptr;
+}
+
+// FIXME: This is copy&pasted from Sema. Put it in a common place and remove
+// duplication.
+bool FormatToken::isSimpleTypeSpecifier() const {
+ switch (Tok.getKind()) {
+ case tok::kw_short:
+ case tok::kw_long:
+ case tok::kw___int64:
+ case tok::kw___int128:
+ case tok::kw_signed:
+ case tok::kw_unsigned:
+ case tok::kw_void:
+ case tok::kw_char:
+ case tok::kw_int:
+ case tok::kw_half:
+ case tok::kw_float:
+ case tok::kw_double:
+ case tok::kw_wchar_t:
+ case tok::kw_bool:
+ case tok::kw___underlying_type:
+ case tok::annot_typename:
+ case tok::kw_char16_t:
+ case tok::kw_char32_t:
+ case tok::kw_typeof:
+ case tok::kw_decltype:
+ return true;
+ default:
+ return false;
+ }
+}
+
+TokenRole::~TokenRole() {}
+
+void TokenRole::precomputeFormattingInfos(const FormatToken *Token) {}
+
+unsigned CommaSeparatedList::formatAfterToken(LineState &State,
+ ContinuationIndenter *Indenter,
+ bool DryRun) {
+ if (State.NextToken == nullptr || !State.NextToken->Previous)
+ return 0;
+
+ // Ensure that we start on the opening brace.
+ const FormatToken *LBrace =
+ State.NextToken->Previous->getPreviousNonComment();
+ if (!LBrace || !LBrace->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) ||
+ LBrace->BlockKind == BK_Block || LBrace->Type == TT_DictLiteral ||
+ LBrace->Next->Type == TT_DesignatedInitializerPeriod)
+ return 0;
+
+ // Calculate the number of code points we have to format this list. As the
+ // first token is already placed, we have to subtract it.
+ unsigned RemainingCodePoints =
+ Style.ColumnLimit - State.Column + State.NextToken->Previous->ColumnWidth;
+
+ // Find the best ColumnFormat, i.e. the best number of columns to use.
+ const ColumnFormat *Format = getColumnFormat(RemainingCodePoints);
+ // If no ColumnFormat can be used, the braced list would generally be
+ // bin-packed. Add a severe penalty to this so that column layouts are
+ // preferred if possible.
+ if (!Format)
+ return 10000;
+
+ // Format the entire list.
+ unsigned Penalty = 0;
+ unsigned Column = 0;
+ unsigned Item = 0;
+ while (State.NextToken != LBrace->MatchingParen) {
+ bool NewLine = false;
+ unsigned ExtraSpaces = 0;
+
+ // If the previous token was one of our commas, we are now on the next item.
+ if (Item < Commas.size() && State.NextToken->Previous == Commas[Item]) {
+ if (!State.NextToken->isTrailingComment()) {
+ ExtraSpaces += Format->ColumnSizes[Column] - ItemLengths[Item];
+ ++Column;
+ }
+ ++Item;
+ }
+
+ if (Column == Format->Columns || State.NextToken->MustBreakBefore) {
+ Column = 0;
+ NewLine = true;
+ }
+
+ // Place token using the continuation indenter and store the penalty.
+ Penalty += Indenter->addTokenToState(State, NewLine, DryRun, ExtraSpaces);
+ }
+ return Penalty;
+}
+
+unsigned CommaSeparatedList::formatFromToken(LineState &State,
+ ContinuationIndenter *Indenter,
+ bool DryRun) {
+ if (HasNestedBracedList)
+ State.Stack.back().AvoidBinPacking = true;
+ return 0;
+}
+
+// Returns the lengths in code points between Begin and End (both included),
+// assuming that the entire sequence is put on a single line.
+static unsigned CodePointsBetween(const FormatToken *Begin,
+ const FormatToken *End) {
+ assert(End->TotalLength >= Begin->TotalLength);
+ return End->TotalLength - Begin->TotalLength + Begin->ColumnWidth;
+}
+
+void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
+ // FIXME: At some point we might want to do this for other lists, too.
+ if (!Token->MatchingParen ||
+ !Token->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare))
+ return;
+
+ // In C++11 braced list style, we should not format in columns unless they
+ // have many items (20 or more) or we allow bin-packing of function call
+ // arguments.
+ if (Style.Cpp11BracedListStyle && !Style.BinPackArguments &&
+ Commas.size() < 19)
+ return;
+
+ // Limit column layout for JavaScript array initializers to 20 or more items
+ // for now to introduce it carefully. We can become more aggressive if this
+ // necessary.
+ if (Token->is(TT_ArrayInitializerLSquare) && Commas.size() < 19)
+ return;
+
+ // Column format doesn't really make sense if we don't align after brackets.
+ if (Style.AlignAfterOpenBracket == FormatStyle::BAS_DontAlign)
+ return;
+
+ FormatToken *ItemBegin = Token->Next;
+ while (ItemBegin->isTrailingComment())
+ ItemBegin = ItemBegin->Next;
+ SmallVector<bool, 8> MustBreakBeforeItem;
+
+ // The lengths of an item if it is put at the end of the line. This includes
+ // trailing comments which are otherwise ignored for column alignment.
+ SmallVector<unsigned, 8> EndOfLineItemLength;
+
+ bool HasSeparatingComment = false;
+ for (unsigned i = 0, e = Commas.size() + 1; i != e; ++i) {
+ // Skip comments on their own line.
+ while (ItemBegin->HasUnescapedNewline && ItemBegin->isTrailingComment()) {
+ ItemBegin = ItemBegin->Next;
+ HasSeparatingComment = i > 0;
+ }
+
+ MustBreakBeforeItem.push_back(ItemBegin->MustBreakBefore);
+ if (ItemBegin->is(tok::l_brace))
+ HasNestedBracedList = true;
+ const FormatToken *ItemEnd = nullptr;
+ if (i == Commas.size()) {
+ ItemEnd = Token->MatchingParen;
+ const FormatToken *NonCommentEnd = ItemEnd->getPreviousNonComment();
+ ItemLengths.push_back(CodePointsBetween(ItemBegin, NonCommentEnd));
+ if (Style.Cpp11BracedListStyle &&
+ !ItemEnd->Previous->isTrailingComment()) {
+ // In Cpp11 braced list style, the } and possibly other subsequent
+ // tokens will need to stay on a line with the last element.
+ while (ItemEnd->Next && !ItemEnd->Next->CanBreakBefore)
+ ItemEnd = ItemEnd->Next;
+ } else {
+ // In other braced lists styles, the "}" can be wrapped to the new line.
+ ItemEnd = Token->MatchingParen->Previous;
+ }
+ } else {
+ ItemEnd = Commas[i];
+ // The comma is counted as part of the item when calculating the length.
+ ItemLengths.push_back(CodePointsBetween(ItemBegin, ItemEnd));
+
+ // Consume trailing comments so the are included in EndOfLineItemLength.
+ if (ItemEnd->Next && !ItemEnd->Next->HasUnescapedNewline &&
+ ItemEnd->Next->isTrailingComment())
+ ItemEnd = ItemEnd->Next;
+ }
+ EndOfLineItemLength.push_back(CodePointsBetween(ItemBegin, ItemEnd));
+ // If there is a trailing comma in the list, the next item will start at the
+ // closing brace. Don't create an extra item for this.
+ if (ItemEnd->getNextNonComment() == Token->MatchingParen)
+ break;
+ ItemBegin = ItemEnd->Next;
+ }
+
+ // Don't use column layout for lists with few elements and in presence of
+ // separating comments.
+ if (Commas.size() < 5 || HasSeparatingComment)
+ return;
+
+ if (Token->NestingLevel != 0 && Token->is(tok::l_brace) && Commas.size() < 19)
+ return;
+
+ // We can never place more than ColumnLimit / 3 items in a row (because of the
+ // spaces and the comma).
+ unsigned MaxItems = Style.ColumnLimit / 3;
+ std::vector<unsigned> MinSizeInColumn;
+ MinSizeInColumn.reserve(MaxItems);
+ for (unsigned Columns = 1; Columns <= MaxItems; ++Columns) {
+ ColumnFormat Format;
+ Format.Columns = Columns;
+ Format.ColumnSizes.resize(Columns);
+ MinSizeInColumn.assign(Columns, UINT_MAX);
+ Format.LineCount = 1;
+ bool HasRowWithSufficientColumns = false;
+ unsigned Column = 0;
+ for (unsigned i = 0, e = ItemLengths.size(); i != e; ++i) {
+ assert(i < MustBreakBeforeItem.size());
+ if (MustBreakBeforeItem[i] || Column == Columns) {
+ ++Format.LineCount;
+ Column = 0;
+ }
+ if (Column == Columns - 1)
+ HasRowWithSufficientColumns = true;
+ unsigned Length =
+ (Column == Columns - 1) ? EndOfLineItemLength[i] : ItemLengths[i];
+ Format.ColumnSizes[Column] = std::max(Format.ColumnSizes[Column], Length);
+ MinSizeInColumn[Column] = std::min(MinSizeInColumn[Column], Length);
+ ++Column;
+ }
+ // If all rows are terminated early (e.g. by trailing comments), we don't
+ // need to look further.
+ if (!HasRowWithSufficientColumns)
+ break;
+ Format.TotalWidth = Columns - 1; // Width of the N-1 spaces.
+
+ for (unsigned i = 0; i < Columns; ++i)
+ Format.TotalWidth += Format.ColumnSizes[i];
+
+ // Don't use this Format, if the difference between the longest and shortest
+ // element in a column exceeds a threshold to avoid excessive spaces.
+ if ([&] {
+ for (unsigned i = 0; i < Columns - 1; ++i)
+ if (Format.ColumnSizes[i] - MinSizeInColumn[i] > 10)
+ return true;
+ return false;
+ }())
+ continue;
+
+ // Ignore layouts that are bound to violate the column limit.
+ if (Format.TotalWidth > Style.ColumnLimit)
+ continue;
+
+ Formats.push_back(Format);
+ }
+}
+
+const CommaSeparatedList::ColumnFormat *
+CommaSeparatedList::getColumnFormat(unsigned RemainingCharacters) const {
+ const ColumnFormat *BestFormat = nullptr;
+ for (SmallVector<ColumnFormat, 4>::const_reverse_iterator
+ I = Formats.rbegin(),
+ E = Formats.rend();
+ I != E; ++I) {
+ if (I->TotalWidth <= RemainingCharacters) {
+ if (BestFormat && I->LineCount > BestFormat->LineCount)
+ break;
+ BestFormat = &*I;
+ }
+ }
+ return BestFormat;
+}
+
+} // namespace format
+} // namespace clang
diff --git a/contrib/llvm/tools/clang/lib/Format/FormatToken.h b/contrib/llvm/tools/clang/lib/Format/FormatToken.h
new file mode 100644
index 0000000..b683660
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Format/FormatToken.h
@@ -0,0 +1,621 @@
+//===--- FormatToken.h - Format C++ code ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file contains the declaration of the FormatToken, a wrapper
+/// around Token with additional information related to formatting.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_FORMAT_FORMATTOKEN_H
+#define LLVM_CLANG_LIB_FORMAT_FORMATTOKEN_H
+
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/OperatorPrecedence.h"
+#include "clang/Format/Format.h"
+#include "clang/Lex/Lexer.h"
+#include <memory>
+
+namespace clang {
+namespace format {
+
+#define LIST_TOKEN_TYPES \
+ TYPE(ArrayInitializerLSquare) \
+ TYPE(ArraySubscriptLSquare) \
+ TYPE(AttributeParen) \
+ TYPE(BinaryOperator) \
+ TYPE(BitFieldColon) \
+ TYPE(BlockComment) \
+ TYPE(CastRParen) \
+ TYPE(ConditionalExpr) \
+ TYPE(ConflictAlternative) \
+ TYPE(ConflictEnd) \
+ TYPE(ConflictStart) \
+ TYPE(CtorInitializerColon) \
+ TYPE(CtorInitializerComma) \
+ TYPE(DesignatedInitializerPeriod) \
+ TYPE(DictLiteral) \
+ TYPE(ForEachMacro) \
+ TYPE(FunctionAnnotationRParen) \
+ TYPE(FunctionDeclarationName) \
+ TYPE(FunctionLBrace) \
+ TYPE(FunctionTypeLParen) \
+ TYPE(ImplicitStringLiteral) \
+ TYPE(InheritanceColon) \
+ TYPE(InlineASMBrace) \
+ TYPE(InlineASMColon) \
+ TYPE(JavaAnnotation) \
+ TYPE(JsComputedPropertyName) \
+ TYPE(JsFatArrow) \
+ TYPE(JsTypeColon) \
+ TYPE(JsTypeOptionalQuestion) \
+ TYPE(LambdaArrow) \
+ TYPE(LambdaLSquare) \
+ TYPE(LeadingJavaAnnotation) \
+ TYPE(LineComment) \
+ TYPE(MacroBlockBegin) \
+ TYPE(MacroBlockEnd) \
+ TYPE(ObjCBlockLBrace) \
+ TYPE(ObjCBlockLParen) \
+ TYPE(ObjCDecl) \
+ TYPE(ObjCForIn) \
+ TYPE(ObjCMethodExpr) \
+ TYPE(ObjCMethodSpecifier) \
+ TYPE(ObjCProperty) \
+ TYPE(ObjCStringLiteral) \
+ TYPE(OverloadedOperator) \
+ TYPE(OverloadedOperatorLParen) \
+ TYPE(PointerOrReference) \
+ TYPE(PureVirtualSpecifier) \
+ TYPE(RangeBasedForLoopColon) \
+ TYPE(RegexLiteral) \
+ TYPE(SelectorName) \
+ TYPE(StartOfName) \
+ TYPE(TemplateCloser) \
+ TYPE(TemplateOpener) \
+ TYPE(TemplateString) \
+ TYPE(TrailingAnnotation) \
+ TYPE(TrailingReturnArrow) \
+ TYPE(TrailingUnaryOperator) \
+ TYPE(UnaryOperator) \
+ TYPE(Unknown)
+
+enum TokenType {
+#define TYPE(X) TT_##X,
+LIST_TOKEN_TYPES
+#undef TYPE
+ NUM_TOKEN_TYPES
+};
+
+/// \brief Determines the name of a token type.
+const char *getTokenTypeName(TokenType Type);
+
+// Represents what type of block a set of braces open.
+enum BraceBlockKind { BK_Unknown, BK_Block, BK_BracedInit };
+
+// The packing kind of a function's parameters.
+enum ParameterPackingKind { PPK_BinPacked, PPK_OnePerLine, PPK_Inconclusive };
+
+enum FormatDecision { FD_Unformatted, FD_Continue, FD_Break };
+
+class TokenRole;
+class AnnotatedLine;
+
+/// \brief A wrapper around a \c Token storing information about the
+/// whitespace characters preceding it.
+struct FormatToken {
+ FormatToken() {}
+
+ /// \brief The \c Token.
+ Token Tok;
+
+ /// \brief The number of newlines immediately before the \c Token.
+ ///
+ /// This can be used to determine what the user wrote in the original code
+ /// and thereby e.g. leave an empty line between two function definitions.
+ unsigned NewlinesBefore = 0;
+
+ /// \brief Whether there is at least one unescaped newline before the \c
+ /// Token.
+ bool HasUnescapedNewline = false;
+
+ /// \brief The range of the whitespace immediately preceding the \c Token.
+ SourceRange WhitespaceRange;
+
+ /// \brief The offset just past the last '\n' in this token's leading
+ /// whitespace (relative to \c WhiteSpaceStart). 0 if there is no '\n'.
+ unsigned LastNewlineOffset = 0;
+
+ /// \brief The width of the non-whitespace parts of the token (or its first
+ /// line for multi-line tokens) in columns.
+ /// We need this to correctly measure number of columns a token spans.
+ unsigned ColumnWidth = 0;
+
+ /// \brief Contains the width in columns of the last line of a multi-line
+ /// token.
+ unsigned LastLineColumnWidth = 0;
+
+ /// \brief Whether the token text contains newlines (escaped or not).
+ bool IsMultiline = false;
+
+ /// \brief Indicates that this is the first token.
+ bool IsFirst = false;
+
+ /// \brief Whether there must be a line break before this token.
+ ///
+ /// This happens for example when a preprocessor directive ended directly
+ /// before the token.
+ bool MustBreakBefore = false;
+
+ /// \brief The raw text of the token.
+ ///
+ /// Contains the raw token text without leading whitespace and without leading
+ /// escaped newlines.
+ StringRef TokenText;
+
+ /// \brief Set to \c true if this token is an unterminated literal.
+ bool IsUnterminatedLiteral = 0;
+
+ /// \brief Contains the kind of block if this token is a brace.
+ BraceBlockKind BlockKind = BK_Unknown;
+
+ TokenType Type = TT_Unknown;
+
+ /// \brief The number of spaces that should be inserted before this token.
+ unsigned SpacesRequiredBefore = 0;
+
+ /// \brief \c true if it is allowed to break before this token.
+ bool CanBreakBefore = false;
+
+ /// \brief \c true if this is the ">" of "template<..>".
+ bool ClosesTemplateDeclaration = false;
+
+ /// \brief Number of parameters, if this is "(", "[" or "<".
+ ///
+ /// This is initialized to 1 as we don't need to distinguish functions with
+ /// 0 parameters from functions with 1 parameter. Thus, we can simply count
+ /// the number of commas.
+ unsigned ParameterCount = 0;
+
+ /// \brief Number of parameters that are nested blocks,
+ /// if this is "(", "[" or "<".
+ unsigned BlockParameterCount = 0;
+
+ /// \brief If this is a bracket ("<", "(", "[" or "{"), contains the kind of
+ /// the surrounding bracket.
+ tok::TokenKind ParentBracket = tok::unknown;
+
+ /// \brief A token can have a special role that can carry extra information
+ /// about the token's formatting.
+ std::unique_ptr<TokenRole> Role;
+
+ /// \brief If this is an opening parenthesis, how are the parameters packed?
+ ParameterPackingKind PackingKind = PPK_Inconclusive;
+
+ /// \brief The total length of the unwrapped line up to and including this
+ /// token.
+ unsigned TotalLength = 0;
+
+ /// \brief The original 0-based column of this token, including expanded tabs.
+ /// The configured TabWidth is used as tab width.
+ unsigned OriginalColumn = 0;
+
+ /// \brief The length of following tokens until the next natural split point,
+ /// or the next token that can be broken.
+ unsigned UnbreakableTailLength = 0;
+
+ // FIXME: Come up with a 'cleaner' concept.
+ /// \brief The binding strength of a token. This is a combined value of
+ /// operator precedence, parenthesis nesting, etc.
+ unsigned BindingStrength = 0;
+
+ /// \brief The nesting level of this token, i.e. the number of surrounding (),
+ /// [], {} or <>.
+ unsigned NestingLevel = 0;
+
+ /// \brief Penalty for inserting a line break before this token.
+ unsigned SplitPenalty = 0;
+
+ /// \brief If this is the first ObjC selector name in an ObjC method
+ /// definition or call, this contains the length of the longest name.
+ ///
+ /// This being set to 0 means that the selectors should not be colon-aligned,
+ /// e.g. because several of them are block-type.
+ unsigned LongestObjCSelectorName = 0;
+
+ /// \brief Stores the number of required fake parentheses and the
+ /// corresponding operator precedence.
+ ///
+ /// If multiple fake parentheses start at a token, this vector stores them in
+ /// reverse order, i.e. inner fake parenthesis first.
+ SmallVector<prec::Level, 4> FakeLParens;
+ /// \brief Insert this many fake ) after this token for correct indentation.
+ unsigned FakeRParens = 0;
+
+ /// \brief \c true if this token starts a binary expression, i.e. has at least
+ /// one fake l_paren with a precedence greater than prec::Unknown.
+ bool StartsBinaryExpression = false;
+ /// \brief \c true if this token ends a binary expression.
+ bool EndsBinaryExpression = false;
+
+ /// \brief Is this is an operator (or "."/"->") in a sequence of operators
+ /// with the same precedence, contains the 0-based operator index.
+ unsigned OperatorIndex = 0;
+
+ /// \brief If this is an operator (or "."/"->") in a sequence of operators
+ /// with the same precedence, points to the next operator.
+ FormatToken *NextOperator = nullptr;
+
+ /// \brief Is this token part of a \c DeclStmt defining multiple variables?
+ ///
+ /// Only set if \c Type == \c TT_StartOfName.
+ bool PartOfMultiVariableDeclStmt = false;
+
+ /// \brief If this is a bracket, this points to the matching one.
+ FormatToken *MatchingParen = nullptr;
+
+ /// \brief The previous token in the unwrapped line.
+ FormatToken *Previous = nullptr;
+
+ /// \brief The next token in the unwrapped line.
+ FormatToken *Next = nullptr;
+
+ /// \brief If this token starts a block, this contains all the unwrapped lines
+ /// in it.
+ SmallVector<AnnotatedLine *, 1> Children;
+
+ /// \brief Stores the formatting decision for the token once it was made.
+ FormatDecision Decision = FD_Unformatted;
+
+ /// \brief If \c true, this token has been fully formatted (indented and
+ /// potentially re-formatted inside), and we do not allow further formatting
+ /// changes.
+ bool Finalized = false;
+
+ bool is(tok::TokenKind Kind) const { return Tok.is(Kind); }
+ bool is(TokenType TT) const { return Type == TT; }
+ bool is(const IdentifierInfo *II) const {
+ return II && II == Tok.getIdentifierInfo();
+ }
+ bool is(tok::PPKeywordKind Kind) const {
+ return Tok.getIdentifierInfo() &&
+ Tok.getIdentifierInfo()->getPPKeywordID() == Kind;
+ }
+ template <typename A, typename B> bool isOneOf(A K1, B K2) const {
+ return is(K1) || is(K2);
+ }
+ template <typename A, typename B, typename... Ts>
+ bool isOneOf(A K1, B K2, Ts... Ks) const {
+ return is(K1) || isOneOf(K2, Ks...);
+ }
+ template <typename T> bool isNot(T Kind) const { return !is(Kind); }
+
+ bool isStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); }
+
+ bool isObjCAtKeyword(tok::ObjCKeywordKind Kind) const {
+ return Tok.isObjCAtKeyword(Kind);
+ }
+
+ bool isAccessSpecifier(bool ColonRequired = true) const {
+ return isOneOf(tok::kw_public, tok::kw_protected, tok::kw_private) &&
+ (!ColonRequired || (Next && Next->is(tok::colon)));
+ }
+
+ /// \brief Determine whether the token is a simple-type-specifier.
+ bool isSimpleTypeSpecifier() const;
+
+ bool isObjCAccessSpecifier() const {
+ return is(tok::at) && Next && (Next->isObjCAtKeyword(tok::objc_public) ||
+ Next->isObjCAtKeyword(tok::objc_protected) ||
+ Next->isObjCAtKeyword(tok::objc_package) ||
+ Next->isObjCAtKeyword(tok::objc_private));
+ }
+
+ /// \brief Returns whether \p Tok is ([{ or a template opening <.
+ bool opensScope() const {
+ return isOneOf(tok::l_paren, tok::l_brace, tok::l_square,
+ TT_TemplateOpener);
+ }
+ /// \brief Returns whether \p Tok is )]} or a template closing >.
+ bool closesScope() const {
+ return isOneOf(tok::r_paren, tok::r_brace, tok::r_square,
+ TT_TemplateCloser);
+ }
+
+ /// \brief Returns \c true if this is a "." or "->" accessing a member.
+ bool isMemberAccess() const {
+ return isOneOf(tok::arrow, tok::period, tok::arrowstar) &&
+ !isOneOf(TT_DesignatedInitializerPeriod, TT_TrailingReturnArrow,
+ TT_LambdaArrow);
+ }
+
+ bool isUnaryOperator() const {
+ switch (Tok.getKind()) {
+ case tok::plus:
+ case tok::plusplus:
+ case tok::minus:
+ case tok::minusminus:
+ case tok::exclaim:
+ case tok::tilde:
+ case tok::kw_sizeof:
+ case tok::kw_alignof:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isBinaryOperator() const {
+ // Comma is a binary operator, but does not behave as such wrt. formatting.
+ return getPrecedence() > prec::Comma;
+ }
+
+ bool isTrailingComment() const {
+ return is(tok::comment) &&
+ (is(TT_LineComment) || !Next || Next->NewlinesBefore > 0);
+ }
+
+ /// \brief Returns \c true if this is a keyword that can be used
+ /// like a function call (e.g. sizeof, typeid, ...).
+ bool isFunctionLikeKeyword() const {
+ switch (Tok.getKind()) {
+ case tok::kw_throw:
+ case tok::kw_typeid:
+ case tok::kw_return:
+ case tok::kw_sizeof:
+ case tok::kw_alignof:
+ case tok::kw_alignas:
+ case tok::kw_decltype:
+ case tok::kw_noexcept:
+ case tok::kw_static_assert:
+ case tok::kw___attribute:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ /// \brief Returns actual token start location without leading escaped
+ /// newlines and whitespace.
+ ///
+ /// This can be different to Tok.getLocation(), which includes leading escaped
+ /// newlines.
+ SourceLocation getStartOfNonWhitespace() const {
+ return WhitespaceRange.getEnd();
+ }
+
+ prec::Level getPrecedence() const {
+ return getBinOpPrecedence(Tok.getKind(), true, true);
+ }
+
+ /// \brief Returns the previous token ignoring comments.
+ FormatToken *getPreviousNonComment() const {
+ FormatToken *Tok = Previous;
+ while (Tok && Tok->is(tok::comment))
+ Tok = Tok->Previous;
+ return Tok;
+ }
+
+ /// \brief Returns the next token ignoring comments.
+ const FormatToken *getNextNonComment() const {
+ const FormatToken *Tok = Next;
+ while (Tok && Tok->is(tok::comment))
+ Tok = Tok->Next;
+ return Tok;
+ }
+
+ /// \brief Returns \c true if this tokens starts a block-type list, i.e. a
+ /// list that should be indented with a block indent.
+ bool opensBlockOrBlockTypeList(const FormatStyle &Style) const {
+ return is(TT_ArrayInitializerLSquare) ||
+ (is(tok::l_brace) &&
+ (BlockKind == BK_Block || is(TT_DictLiteral) ||
+ (!Style.Cpp11BracedListStyle && NestingLevel == 0)));
+ }
+
+ /// \brief Same as opensBlockOrBlockTypeList, but for the closing token.
+ bool closesBlockOrBlockTypeList(const FormatStyle &Style) const {
+ return MatchingParen && MatchingParen->opensBlockOrBlockTypeList(Style);
+ }
+
+private:
+ // Disallow copying.
+ FormatToken(const FormatToken &) = delete;
+ void operator=(const FormatToken &) = delete;
+};
+
+class ContinuationIndenter;
+struct LineState;
+
+class TokenRole {
+public:
+ TokenRole(const FormatStyle &Style) : Style(Style) {}
+ virtual ~TokenRole();
+
+ /// \brief After the \c TokenAnnotator has finished annotating all the tokens,
+ /// this function precomputes required information for formatting.
+ virtual void precomputeFormattingInfos(const FormatToken *Token);
+
+ /// \brief Apply the special formatting that the given role demands.
+ ///
+ /// Assumes that the token having this role is already formatted.
+ ///
+ /// Continues formatting from \p State leaving indentation to \p Indenter and
+ /// returns the total penalty that this formatting incurs.
+ virtual unsigned formatFromToken(LineState &State,
+ ContinuationIndenter *Indenter,
+ bool DryRun) {
+ return 0;
+ }
+
+ /// \brief Same as \c formatFromToken, but assumes that the first token has
+ /// already been set thereby deciding on the first line break.
+ virtual unsigned formatAfterToken(LineState &State,
+ ContinuationIndenter *Indenter,
+ bool DryRun) {
+ return 0;
+ }
+
+ /// \brief Notifies the \c Role that a comma was found.
+ virtual void CommaFound(const FormatToken *Token) {}
+
+protected:
+ const FormatStyle &Style;
+};
+
+class CommaSeparatedList : public TokenRole {
+public:
+ CommaSeparatedList(const FormatStyle &Style)
+ : TokenRole(Style), HasNestedBracedList(false) {}
+
+ void precomputeFormattingInfos(const FormatToken *Token) override;
+
+ unsigned formatAfterToken(LineState &State, ContinuationIndenter *Indenter,
+ bool DryRun) override;
+
+ unsigned formatFromToken(LineState &State, ContinuationIndenter *Indenter,
+ bool DryRun) override;
+
+ /// \brief Adds \p Token as the next comma to the \c CommaSeparated list.
+ void CommaFound(const FormatToken *Token) override {
+ Commas.push_back(Token);
+ }
+
+private:
+ /// \brief A struct that holds information on how to format a given list with
+ /// a specific number of columns.
+ struct ColumnFormat {
+ /// \brief The number of columns to use.
+ unsigned Columns;
+
+ /// \brief The total width in characters.
+ unsigned TotalWidth;
+
+ /// \brief The number of lines required for this format.
+ unsigned LineCount;
+
+ /// \brief The size of each column in characters.
+ SmallVector<unsigned, 8> ColumnSizes;
+ };
+
+ /// \brief Calculate which \c ColumnFormat fits best into
+ /// \p RemainingCharacters.
+ const ColumnFormat *getColumnFormat(unsigned RemainingCharacters) const;
+
+ /// \brief The ordered \c FormatTokens making up the commas of this list.
+ SmallVector<const FormatToken *, 8> Commas;
+
+ /// \brief The length of each of the list's items in characters including the
+ /// trailing comma.
+ SmallVector<unsigned, 8> ItemLengths;
+
+ /// \brief Precomputed formats that can be used for this list.
+ SmallVector<ColumnFormat, 4> Formats;
+
+ bool HasNestedBracedList;
+};
+
+/// \brief Encapsulates keywords that are context sensitive or for languages not
+/// properly supported by Clang's lexer.
+struct AdditionalKeywords {
+ AdditionalKeywords(IdentifierTable &IdentTable) {
+ kw_final = &IdentTable.get("final");
+ kw_override = &IdentTable.get("override");
+ kw_in = &IdentTable.get("in");
+ kw_CF_ENUM = &IdentTable.get("CF_ENUM");
+ kw_CF_OPTIONS = &IdentTable.get("CF_OPTIONS");
+ kw_NS_ENUM = &IdentTable.get("NS_ENUM");
+ kw_NS_OPTIONS = &IdentTable.get("NS_OPTIONS");
+
+ kw_finally = &IdentTable.get("finally");
+ kw_function = &IdentTable.get("function");
+ kw_import = &IdentTable.get("import");
+ kw_is = &IdentTable.get("is");
+ kw_let = &IdentTable.get("let");
+ kw_var = &IdentTable.get("var");
+
+ kw_abstract = &IdentTable.get("abstract");
+ kw_assert = &IdentTable.get("assert");
+ kw_extends = &IdentTable.get("extends");
+ kw_implements = &IdentTable.get("implements");
+ kw_instanceof = &IdentTable.get("instanceof");
+ kw_interface = &IdentTable.get("interface");
+ kw_native = &IdentTable.get("native");
+ kw_package = &IdentTable.get("package");
+ kw_synchronized = &IdentTable.get("synchronized");
+ kw_throws = &IdentTable.get("throws");
+ kw___except = &IdentTable.get("__except");
+
+ kw_mark = &IdentTable.get("mark");
+
+ kw_extend = &IdentTable.get("extend");
+ kw_option = &IdentTable.get("option");
+ kw_optional = &IdentTable.get("optional");
+ kw_repeated = &IdentTable.get("repeated");
+ kw_required = &IdentTable.get("required");
+ kw_returns = &IdentTable.get("returns");
+
+ kw_signals = &IdentTable.get("signals");
+ kw_qsignals = &IdentTable.get("Q_SIGNALS");
+ kw_slots = &IdentTable.get("slots");
+ kw_qslots = &IdentTable.get("Q_SLOTS");
+ }
+
+ // Context sensitive keywords.
+ IdentifierInfo *kw_final;
+ IdentifierInfo *kw_override;
+ IdentifierInfo *kw_in;
+ IdentifierInfo *kw_CF_ENUM;
+ IdentifierInfo *kw_CF_OPTIONS;
+ IdentifierInfo *kw_NS_ENUM;
+ IdentifierInfo *kw_NS_OPTIONS;
+ IdentifierInfo *kw___except;
+
+ // JavaScript keywords.
+ IdentifierInfo *kw_finally;
+ IdentifierInfo *kw_function;
+ IdentifierInfo *kw_import;
+ IdentifierInfo *kw_is;
+ IdentifierInfo *kw_let;
+ IdentifierInfo *kw_var;
+
+ // Java keywords.
+ IdentifierInfo *kw_abstract;
+ IdentifierInfo *kw_assert;
+ IdentifierInfo *kw_extends;
+ IdentifierInfo *kw_implements;
+ IdentifierInfo *kw_instanceof;
+ IdentifierInfo *kw_interface;
+ IdentifierInfo *kw_native;
+ IdentifierInfo *kw_package;
+ IdentifierInfo *kw_synchronized;
+ IdentifierInfo *kw_throws;
+
+ // Pragma keywords.
+ IdentifierInfo *kw_mark;
+
+ // Proto keywords.
+ IdentifierInfo *kw_extend;
+ IdentifierInfo *kw_option;
+ IdentifierInfo *kw_optional;
+ IdentifierInfo *kw_repeated;
+ IdentifierInfo *kw_required;
+ IdentifierInfo *kw_returns;
+
+ // QT keywords.
+ IdentifierInfo *kw_signals;
+ IdentifierInfo *kw_qsignals;
+ IdentifierInfo *kw_slots;
+ IdentifierInfo *kw_qslots;
+};
+
+} // namespace format
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Format/TokenAnnotator.cpp b/contrib/llvm/tools/clang/lib/Format/TokenAnnotator.cpp
new file mode 100644
index 0000000..caff131
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Format/TokenAnnotator.cpp
@@ -0,0 +1,2412 @@
+//===--- TokenAnnotator.cpp - Format C++ code -----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements a token annotator, i.e. creates
+/// \c AnnotatedTokens out of \c FormatTokens with required extra information.
+///
+//===----------------------------------------------------------------------===//
+
+#include "TokenAnnotator.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "format-token-annotator"
+
+namespace clang {
+namespace format {
+
+namespace {
+
+/// \brief A parser that gathers additional information about tokens.
+///
+/// The \c TokenAnnotator tries to match parenthesis and square brakets and
+/// store a parenthesis levels. It also tries to resolve matching "<" and ">"
+/// into template parameter lists.
+class AnnotatingParser {
+public:
+ AnnotatingParser(const FormatStyle &Style, AnnotatedLine &Line,
+ const AdditionalKeywords &Keywords)
+ : Style(Style), Line(Line), CurrentToken(Line.First), AutoFound(false),
+ Keywords(Keywords) {
+ Contexts.push_back(Context(tok::unknown, 1, /*IsExpression=*/false));
+ resetTokenMetadata(CurrentToken);
+ }
+
+private:
+ bool parseAngle() {
+ if (!CurrentToken)
+ return false;
+ FormatToken *Left = CurrentToken->Previous;
+ Left->ParentBracket = Contexts.back().ContextKind;
+ ScopedContextCreator ContextCreator(*this, tok::less, 10);
+
+ // If this angle is in the context of an expression, we need to be more
+ // hesitant to detect it as opening template parameters.
+ bool InExprContext = Contexts.back().IsExpression;
+
+ Contexts.back().IsExpression = false;
+ // If there's a template keyword before the opening angle bracket, this is a
+ // template parameter, not an argument.
+ Contexts.back().InTemplateArgument =
+ Left->Previous && Left->Previous->Tok.isNot(tok::kw_template);
+
+ if (Style.Language == FormatStyle::LK_Java &&
+ CurrentToken->is(tok::question))
+ next();
+
+ while (CurrentToken) {
+ if (CurrentToken->is(tok::greater)) {
+ Left->MatchingParen = CurrentToken;
+ CurrentToken->MatchingParen = Left;
+ CurrentToken->Type = TT_TemplateCloser;
+ next();
+ return true;
+ }
+ if (CurrentToken->is(tok::question) &&
+ Style.Language == FormatStyle::LK_Java) {
+ next();
+ continue;
+ }
+ if (CurrentToken->isOneOf(tok::r_paren, tok::r_square, tok::r_brace) ||
+ (CurrentToken->isOneOf(tok::colon, tok::question) && InExprContext))
+ return false;
+ // If a && or || is found and interpreted as a binary operator, this set
+ // of angles is likely part of something like "a < b && c > d". If the
+ // angles are inside an expression, the ||/&& might also be a binary
+ // operator that was misinterpreted because we are parsing template
+ // parameters.
+ // FIXME: This is getting out of hand, write a decent parser.
+ if (CurrentToken->Previous->isOneOf(tok::pipepipe, tok::ampamp) &&
+ CurrentToken->Previous->is(TT_BinaryOperator) &&
+ Contexts[Contexts.size() - 2].IsExpression &&
+ !Line.startsWith(tok::kw_template))
+ return false;
+ updateParameterCount(Left, CurrentToken);
+ if (!consumeToken())
+ return false;
+ }
+ return false;
+ }
+
+ bool parseParens(bool LookForDecls = false) {
+ if (!CurrentToken)
+ return false;
+ FormatToken *Left = CurrentToken->Previous;
+ Left->ParentBracket = Contexts.back().ContextKind;
+ ScopedContextCreator ContextCreator(*this, tok::l_paren, 1);
+
+ // FIXME: This is a bit of a hack. Do better.
+ Contexts.back().ColonIsForRangeExpr =
+ Contexts.size() == 2 && Contexts[0].ColonIsForRangeExpr;
+
+ bool StartsObjCMethodExpr = false;
+ if (CurrentToken->is(tok::caret)) {
+ // (^ can start a block type.
+ Left->Type = TT_ObjCBlockLParen;
+ } else if (FormatToken *MaybeSel = Left->Previous) {
+ // @selector( starts a selector.
+ if (MaybeSel->isObjCAtKeyword(tok::objc_selector) && MaybeSel->Previous &&
+ MaybeSel->Previous->is(tok::at)) {
+ StartsObjCMethodExpr = true;
+ }
+ }
+
+ if (Left->Previous &&
+ (Left->Previous->isOneOf(tok::kw_static_assert, tok::kw_decltype,
+ tok::kw_if, tok::kw_while, tok::l_paren,
+ tok::comma) ||
+ Left->Previous->is(TT_BinaryOperator))) {
+ // static_assert, if and while usually contain expressions.
+ Contexts.back().IsExpression = true;
+ } else if (Left->Previous && Left->Previous->is(tok::r_square) &&
+ Left->Previous->MatchingParen &&
+ Left->Previous->MatchingParen->is(TT_LambdaLSquare)) {
+ // This is a parameter list of a lambda expression.
+ Contexts.back().IsExpression = false;
+ } else if (Line.InPPDirective &&
+ (!Left->Previous ||
+ !Left->Previous->isOneOf(tok::identifier,
+ TT_OverloadedOperator))) {
+ Contexts.back().IsExpression = true;
+ } else if (Contexts[Contexts.size() - 2].CaretFound) {
+ // This is the parameter list of an ObjC block.
+ Contexts.back().IsExpression = false;
+ } else if (Left->Previous && Left->Previous->is(tok::kw___attribute)) {
+ Left->Type = TT_AttributeParen;
+ } else if (Left->Previous && Left->Previous->is(TT_ForEachMacro)) {
+ // The first argument to a foreach macro is a declaration.
+ Contexts.back().IsForEachMacro = true;
+ Contexts.back().IsExpression = false;
+ } else if (Left->Previous && Left->Previous->MatchingParen &&
+ Left->Previous->MatchingParen->is(TT_ObjCBlockLParen)) {
+ Contexts.back().IsExpression = false;
+ } else if (!Line.MustBeDeclaration && !Line.InPPDirective) {
+ bool IsForOrCatch =
+ Left->Previous && Left->Previous->isOneOf(tok::kw_for, tok::kw_catch);
+ Contexts.back().IsExpression = !IsForOrCatch;
+ }
+
+ if (StartsObjCMethodExpr) {
+ Contexts.back().ColonIsObjCMethodExpr = true;
+ Left->Type = TT_ObjCMethodExpr;
+ }
+
+ bool MightBeFunctionType = CurrentToken->isOneOf(tok::star, tok::amp) &&
+ !Contexts[Contexts.size() - 2].IsExpression;
+ bool HasMultipleLines = false;
+ bool HasMultipleParametersOnALine = false;
+ bool MightBeObjCForRangeLoop =
+ Left->Previous && Left->Previous->is(tok::kw_for);
+ while (CurrentToken) {
+ // LookForDecls is set when "if (" has been seen. Check for
+ // 'identifier' '*' 'identifier' followed by not '=' -- this
+ // '*' has to be a binary operator but determineStarAmpUsage() will
+ // categorize it as an unary operator, so set the right type here.
+ if (LookForDecls && CurrentToken->Next) {
+ FormatToken *Prev = CurrentToken->getPreviousNonComment();
+ if (Prev) {
+ FormatToken *PrevPrev = Prev->getPreviousNonComment();
+ FormatToken *Next = CurrentToken->Next;
+ if (PrevPrev && PrevPrev->is(tok::identifier) &&
+ Prev->isOneOf(tok::star, tok::amp, tok::ampamp) &&
+ CurrentToken->is(tok::identifier) && Next->isNot(tok::equal)) {
+ Prev->Type = TT_BinaryOperator;
+ LookForDecls = false;
+ }
+ }
+ }
+
+ if (CurrentToken->Previous->is(TT_PointerOrReference) &&
+ CurrentToken->Previous->Previous->isOneOf(tok::l_paren,
+ tok::coloncolon))
+ MightBeFunctionType = true;
+ if (CurrentToken->Previous->is(TT_BinaryOperator))
+ Contexts.back().IsExpression = true;
+ if (CurrentToken->is(tok::r_paren)) {
+ if (MightBeFunctionType && CurrentToken->Next &&
+ (CurrentToken->Next->is(tok::l_paren) ||
+ (CurrentToken->Next->is(tok::l_square) &&
+ Line.MustBeDeclaration)))
+ Left->Type = TT_FunctionTypeLParen;
+ Left->MatchingParen = CurrentToken;
+ CurrentToken->MatchingParen = Left;
+
+ if (StartsObjCMethodExpr) {
+ CurrentToken->Type = TT_ObjCMethodExpr;
+ if (Contexts.back().FirstObjCSelectorName) {
+ Contexts.back().FirstObjCSelectorName->LongestObjCSelectorName =
+ Contexts.back().LongestObjCSelectorName;
+ }
+ }
+
+ if (Left->is(TT_AttributeParen))
+ CurrentToken->Type = TT_AttributeParen;
+ if (Left->Previous && Left->Previous->is(TT_JavaAnnotation))
+ CurrentToken->Type = TT_JavaAnnotation;
+ if (Left->Previous && Left->Previous->is(TT_LeadingJavaAnnotation))
+ CurrentToken->Type = TT_LeadingJavaAnnotation;
+
+ if (!HasMultipleLines)
+ Left->PackingKind = PPK_Inconclusive;
+ else if (HasMultipleParametersOnALine)
+ Left->PackingKind = PPK_BinPacked;
+ else
+ Left->PackingKind = PPK_OnePerLine;
+
+ next();
+ return true;
+ }
+ if (CurrentToken->isOneOf(tok::r_square, tok::r_brace))
+ return false;
+
+ if (CurrentToken->is(tok::l_brace))
+ Left->Type = TT_Unknown; // Not TT_ObjCBlockLParen
+ if (CurrentToken->is(tok::comma) && CurrentToken->Next &&
+ !CurrentToken->Next->HasUnescapedNewline &&
+ !CurrentToken->Next->isTrailingComment())
+ HasMultipleParametersOnALine = true;
+ if (CurrentToken->isOneOf(tok::kw_const, tok::kw_auto) ||
+ CurrentToken->isSimpleTypeSpecifier())
+ Contexts.back().IsExpression = false;
+ if (CurrentToken->isOneOf(tok::semi, tok::colon))
+ MightBeObjCForRangeLoop = false;
+ if (MightBeObjCForRangeLoop && CurrentToken->is(Keywords.kw_in))
+ CurrentToken->Type = TT_ObjCForIn;
+ // When we discover a 'new', we set CanBeExpression to 'false' in order to
+ // parse the type correctly. Reset that after a comma.
+ if (CurrentToken->is(tok::comma))
+ Contexts.back().CanBeExpression = true;
+
+ FormatToken *Tok = CurrentToken;
+ if (!consumeToken())
+ return false;
+ updateParameterCount(Left, Tok);
+ if (CurrentToken && CurrentToken->HasUnescapedNewline)
+ HasMultipleLines = true;
+ }
+ return false;
+ }
+
+ bool parseSquare() {
+ if (!CurrentToken)
+ return false;
+
+ // A '[' could be an index subscript (after an identifier or after
+ // ')' or ']'), it could be the start of an Objective-C method
+ // expression, or it could the start of an Objective-C array literal.
+ FormatToken *Left = CurrentToken->Previous;
+ Left->ParentBracket = Contexts.back().ContextKind;
+ FormatToken *Parent = Left->getPreviousNonComment();
+ bool StartsObjCMethodExpr =
+ Style.Language == FormatStyle::LK_Cpp &&
+ Contexts.back().CanBeExpression && Left->isNot(TT_LambdaLSquare) &&
+ CurrentToken->isNot(tok::l_brace) &&
+ (!Parent ||
+ Parent->isOneOf(tok::colon, tok::l_square, tok::l_paren,
+ tok::kw_return, tok::kw_throw) ||
+ Parent->isUnaryOperator() ||
+ Parent->isOneOf(TT_ObjCForIn, TT_CastRParen) ||
+ getBinOpPrecedence(Parent->Tok.getKind(), true, true) > prec::Unknown);
+ bool ColonFound = false;
+
+ unsigned BindingIncrease = 1;
+ if (Left->is(TT_Unknown)) {
+ if (StartsObjCMethodExpr) {
+ Left->Type = TT_ObjCMethodExpr;
+ } else if (Style.Language == FormatStyle::LK_JavaScript && Parent &&
+ Contexts.back().ContextKind == tok::l_brace &&
+ Parent->isOneOf(tok::l_brace, tok::comma)) {
+ Left->Type = TT_JsComputedPropertyName;
+ } else if (Style.Language == FormatStyle::LK_Proto ||
+ (Parent &&
+ Parent->isOneOf(TT_BinaryOperator, tok::at, tok::comma,
+ tok::l_paren, tok::l_square, tok::question,
+ tok::colon, tok::kw_return,
+ // Should only be relevant to JavaScript:
+ tok::kw_default))) {
+ Left->Type = TT_ArrayInitializerLSquare;
+ } else {
+ BindingIncrease = 10;
+ Left->Type = TT_ArraySubscriptLSquare;
+ }
+ }
+
+ ScopedContextCreator ContextCreator(*this, tok::l_square, BindingIncrease);
+ Contexts.back().IsExpression = true;
+ Contexts.back().ColonIsObjCMethodExpr = StartsObjCMethodExpr;
+
+ while (CurrentToken) {
+ if (CurrentToken->is(tok::r_square)) {
+ if (CurrentToken->Next && CurrentToken->Next->is(tok::l_paren) &&
+ Left->is(TT_ObjCMethodExpr)) {
+ // An ObjC method call is rarely followed by an open parenthesis.
+ // FIXME: Do we incorrectly label ":" with this?
+ StartsObjCMethodExpr = false;
+ Left->Type = TT_Unknown;
+ }
+ if (StartsObjCMethodExpr && CurrentToken->Previous != Left) {
+ CurrentToken->Type = TT_ObjCMethodExpr;
+ // determineStarAmpUsage() thinks that '*' '[' is allocating an
+ // array of pointers, but if '[' starts a selector then '*' is a
+ // binary operator.
+ if (Parent && Parent->is(TT_PointerOrReference))
+ Parent->Type = TT_BinaryOperator;
+ }
+ Left->MatchingParen = CurrentToken;
+ CurrentToken->MatchingParen = Left;
+ if (Contexts.back().FirstObjCSelectorName) {
+ Contexts.back().FirstObjCSelectorName->LongestObjCSelectorName =
+ Contexts.back().LongestObjCSelectorName;
+ if (Left->BlockParameterCount > 1)
+ Contexts.back().FirstObjCSelectorName->LongestObjCSelectorName = 0;
+ }
+ next();
+ return true;
+ }
+ if (CurrentToken->isOneOf(tok::r_paren, tok::r_brace))
+ return false;
+ if (CurrentToken->is(tok::colon)) {
+ if (Left->is(TT_ArraySubscriptLSquare)) {
+ Left->Type = TT_ObjCMethodExpr;
+ StartsObjCMethodExpr = true;
+ Contexts.back().ColonIsObjCMethodExpr = true;
+ if (Parent && Parent->is(tok::r_paren))
+ Parent->Type = TT_CastRParen;
+ }
+ ColonFound = true;
+ }
+ if (CurrentToken->is(tok::comma) && Left->is(TT_ObjCMethodExpr) &&
+ !ColonFound)
+ Left->Type = TT_ArrayInitializerLSquare;
+ FormatToken *Tok = CurrentToken;
+ if (!consumeToken())
+ return false;
+ updateParameterCount(Left, Tok);
+ }
+ return false;
+ }
+
+ bool parseBrace() {
+ if (CurrentToken) {
+ FormatToken *Left = CurrentToken->Previous;
+ Left->ParentBracket = Contexts.back().ContextKind;
+
+ if (Contexts.back().CaretFound)
+ Left->Type = TT_ObjCBlockLBrace;
+ Contexts.back().CaretFound = false;
+
+ ScopedContextCreator ContextCreator(*this, tok::l_brace, 1);
+ Contexts.back().ColonIsDictLiteral = true;
+ if (Left->BlockKind == BK_BracedInit)
+ Contexts.back().IsExpression = true;
+
+ while (CurrentToken) {
+ if (CurrentToken->is(tok::r_brace)) {
+ Left->MatchingParen = CurrentToken;
+ CurrentToken->MatchingParen = Left;
+ next();
+ return true;
+ }
+ if (CurrentToken->isOneOf(tok::r_paren, tok::r_square))
+ return false;
+ updateParameterCount(Left, CurrentToken);
+ if (CurrentToken->isOneOf(tok::colon, tok::l_brace)) {
+ FormatToken *Previous = CurrentToken->getPreviousNonComment();
+ if (((CurrentToken->is(tok::colon) &&
+ (!Contexts.back().ColonIsDictLiteral ||
+ Style.Language != FormatStyle::LK_Cpp)) ||
+ Style.Language == FormatStyle::LK_Proto) &&
+ Previous->Tok.getIdentifierInfo())
+ Previous->Type = TT_SelectorName;
+ if (CurrentToken->is(tok::colon) ||
+ Style.Language == FormatStyle::LK_JavaScript)
+ Left->Type = TT_DictLiteral;
+ }
+ if (!consumeToken())
+ return false;
+ }
+ }
+ return true;
+ }
+
+ void updateParameterCount(FormatToken *Left, FormatToken *Current) {
+ if (Current->is(tok::l_brace) && !Current->is(TT_DictLiteral))
+ ++Left->BlockParameterCount;
+ if (Current->is(tok::comma)) {
+ ++Left->ParameterCount;
+ if (!Left->Role)
+ Left->Role.reset(new CommaSeparatedList(Style));
+ Left->Role->CommaFound(Current);
+ } else if (Left->ParameterCount == 0 && Current->isNot(tok::comment)) {
+ Left->ParameterCount = 1;
+ }
+ }
+
+ bool parseConditional() {
+ while (CurrentToken) {
+ if (CurrentToken->is(tok::colon)) {
+ CurrentToken->Type = TT_ConditionalExpr;
+ next();
+ return true;
+ }
+ if (!consumeToken())
+ return false;
+ }
+ return false;
+ }
+
+ bool parseTemplateDeclaration() {
+ if (CurrentToken && CurrentToken->is(tok::less)) {
+ CurrentToken->Type = TT_TemplateOpener;
+ next();
+ if (!parseAngle())
+ return false;
+ if (CurrentToken)
+ CurrentToken->Previous->ClosesTemplateDeclaration = true;
+ return true;
+ }
+ return false;
+ }
+
+ bool consumeToken() {
+ FormatToken *Tok = CurrentToken;
+ next();
+ switch (Tok->Tok.getKind()) {
+ case tok::plus:
+ case tok::minus:
+ if (!Tok->Previous && Line.MustBeDeclaration)
+ Tok->Type = TT_ObjCMethodSpecifier;
+ break;
+ case tok::colon:
+ if (!Tok->Previous)
+ return false;
+ // Colons from ?: are handled in parseConditional().
+ if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Contexts.back().ColonIsForRangeExpr || // colon in for loop
+ (Contexts.size() == 1 && // switch/case labels
+ !Line.First->isOneOf(tok::kw_enum, tok::kw_case)) ||
+ Contexts.back().ContextKind == tok::l_paren || // function params
+ Contexts.back().ContextKind == tok::l_square || // array type
+ (Contexts.size() == 1 &&
+ Line.MustBeDeclaration)) { // method/property declaration
+ Tok->Type = TT_JsTypeColon;
+ break;
+ }
+ }
+ if (Contexts.back().ColonIsDictLiteral ||
+ Style.Language == FormatStyle::LK_Proto) {
+ Tok->Type = TT_DictLiteral;
+ } else if (Contexts.back().ColonIsObjCMethodExpr ||
+ Line.startsWith(TT_ObjCMethodSpecifier)) {
+ Tok->Type = TT_ObjCMethodExpr;
+ Tok->Previous->Type = TT_SelectorName;
+ if (Tok->Previous->ColumnWidth >
+ Contexts.back().LongestObjCSelectorName)
+ Contexts.back().LongestObjCSelectorName = Tok->Previous->ColumnWidth;
+ if (!Contexts.back().FirstObjCSelectorName)
+ Contexts.back().FirstObjCSelectorName = Tok->Previous;
+ } else if (Contexts.back().ColonIsForRangeExpr) {
+ Tok->Type = TT_RangeBasedForLoopColon;
+ } else if (CurrentToken && CurrentToken->is(tok::numeric_constant)) {
+ Tok->Type = TT_BitFieldColon;
+ } else if (Contexts.size() == 1 &&
+ !Line.First->isOneOf(tok::kw_enum, tok::kw_case)) {
+ if (Tok->Previous->is(tok::r_paren))
+ Tok->Type = TT_CtorInitializerColon;
+ else
+ Tok->Type = TT_InheritanceColon;
+ } else if (Tok->Previous->is(tok::identifier) && Tok->Next &&
+ Tok->Next->isOneOf(tok::r_paren, tok::comma)) {
+ // This handles a special macro in ObjC code where selectors including
+ // the colon are passed as macro arguments.
+ Tok->Type = TT_ObjCMethodExpr;
+ } else if (Contexts.back().ContextKind == tok::l_paren) {
+ Tok->Type = TT_InlineASMColon;
+ }
+ break;
+ case tok::kw_if:
+ case tok::kw_while:
+ if (CurrentToken && CurrentToken->is(tok::l_paren)) {
+ next();
+ if (!parseParens(/*LookForDecls=*/true))
+ return false;
+ }
+ break;
+ case tok::kw_for:
+ Contexts.back().ColonIsForRangeExpr = true;
+ next();
+ if (!parseParens())
+ return false;
+ break;
+ case tok::l_paren:
+ // When faced with 'operator()()', the kw_operator handler incorrectly
+ // marks the first l_paren as a OverloadedOperatorLParen. Here, we make
+ // the first two parens OverloadedOperators and the second l_paren an
+ // OverloadedOperatorLParen.
+ if (Tok->Previous &&
+ Tok->Previous->is(tok::r_paren) &&
+ Tok->Previous->MatchingParen &&
+ Tok->Previous->MatchingParen->is(TT_OverloadedOperatorLParen)) {
+ Tok->Previous->Type = TT_OverloadedOperator;
+ Tok->Previous->MatchingParen->Type = TT_OverloadedOperator;
+ Tok->Type = TT_OverloadedOperatorLParen;
+ }
+
+ if (!parseParens())
+ return false;
+ if (Line.MustBeDeclaration && Contexts.size() == 1 &&
+ !Contexts.back().IsExpression && !Line.startsWith(TT_ObjCProperty) &&
+ (!Tok->Previous ||
+ !Tok->Previous->isOneOf(tok::kw_decltype, tok::kw___attribute,
+ TT_LeadingJavaAnnotation)))
+ Line.MightBeFunctionDecl = true;
+ break;
+ case tok::l_square:
+ if (!parseSquare())
+ return false;
+ break;
+ case tok::l_brace:
+ if (!parseBrace())
+ return false;
+ break;
+ case tok::less:
+ if (!NonTemplateLess.count(Tok) &&
+ (!Tok->Previous ||
+ (!Tok->Previous->Tok.isLiteral() &&
+ !(Tok->Previous->is(tok::r_paren) && Contexts.size() > 1))) &&
+ parseAngle()) {
+ Tok->Type = TT_TemplateOpener;
+ } else {
+ Tok->Type = TT_BinaryOperator;
+ NonTemplateLess.insert(Tok);
+ CurrentToken = Tok;
+ next();
+ }
+ break;
+ case tok::r_paren:
+ case tok::r_square:
+ return false;
+ case tok::r_brace:
+ // Lines can start with '}'.
+ if (Tok->Previous)
+ return false;
+ break;
+ case tok::greater:
+ Tok->Type = TT_BinaryOperator;
+ break;
+ case tok::kw_operator:
+ while (CurrentToken &&
+ !CurrentToken->isOneOf(tok::l_paren, tok::semi, tok::r_paren)) {
+ if (CurrentToken->isOneOf(tok::star, tok::amp))
+ CurrentToken->Type = TT_PointerOrReference;
+ consumeToken();
+ if (CurrentToken && CurrentToken->Previous->is(TT_BinaryOperator))
+ CurrentToken->Previous->Type = TT_OverloadedOperator;
+ }
+ if (CurrentToken) {
+ CurrentToken->Type = TT_OverloadedOperatorLParen;
+ if (CurrentToken->Previous->is(TT_BinaryOperator))
+ CurrentToken->Previous->Type = TT_OverloadedOperator;
+ }
+ break;
+ case tok::question:
+ if (Style.Language == FormatStyle::LK_JavaScript && Tok->Next &&
+ Tok->Next->isOneOf(tok::semi, tok::comma, tok::colon, tok::r_paren,
+ tok::r_brace)) {
+ // Question marks before semicolons, colons, etc. indicate optional
+ // types (fields, parameters), e.g.
+ // function(x?: string, y?) {...}
+ // class X { y?; }
+ Tok->Type = TT_JsTypeOptionalQuestion;
+ break;
+ }
+ // Declarations cannot be conditional expressions, this can only be part
+ // of a type declaration.
+ if (Line.MustBeDeclaration &&
+ Style.Language == FormatStyle::LK_JavaScript)
+ break;
+ parseConditional();
+ break;
+ case tok::kw_template:
+ parseTemplateDeclaration();
+ break;
+ case tok::comma:
+ if (Contexts.back().InCtorInitializer)
+ Tok->Type = TT_CtorInitializerComma;
+ else if (Contexts.back().FirstStartOfName &&
+ (Contexts.size() == 1 || Line.startsWith(tok::kw_for))) {
+ Contexts.back().FirstStartOfName->PartOfMultiVariableDeclStmt = true;
+ Line.IsMultiVariableDeclStmt = true;
+ }
+ if (Contexts.back().IsForEachMacro)
+ Contexts.back().IsExpression = true;
+ break;
+ default:
+ break;
+ }
+ return true;
+ }
+
+ void parseIncludeDirective() {
+ if (CurrentToken && CurrentToken->is(tok::less)) {
+ next();
+ while (CurrentToken) {
+ if (CurrentToken->isNot(tok::comment) || CurrentToken->Next)
+ CurrentToken->Type = TT_ImplicitStringLiteral;
+ next();
+ }
+ }
+ }
+
+ void parseWarningOrError() {
+ next();
+ // We still want to format the whitespace left of the first token of the
+ // warning or error.
+ next();
+ while (CurrentToken) {
+ CurrentToken->Type = TT_ImplicitStringLiteral;
+ next();
+ }
+ }
+
+ void parsePragma() {
+ next(); // Consume "pragma".
+ if (CurrentToken &&
+ CurrentToken->isOneOf(Keywords.kw_mark, Keywords.kw_option)) {
+ bool IsMark = CurrentToken->is(Keywords.kw_mark);
+ next(); // Consume "mark".
+ next(); // Consume first token (so we fix leading whitespace).
+ while (CurrentToken) {
+ if (IsMark || CurrentToken->Previous->is(TT_BinaryOperator))
+ CurrentToken->Type = TT_ImplicitStringLiteral;
+ next();
+ }
+ }
+ }
+
+ LineType parsePreprocessorDirective() {
+ LineType Type = LT_PreprocessorDirective;
+ next();
+ if (!CurrentToken)
+ return Type;
+ if (CurrentToken->Tok.is(tok::numeric_constant)) {
+ CurrentToken->SpacesRequiredBefore = 1;
+ return Type;
+ }
+ // Hashes in the middle of a line can lead to any strange token
+ // sequence.
+ if (!CurrentToken->Tok.getIdentifierInfo())
+ return Type;
+ switch (CurrentToken->Tok.getIdentifierInfo()->getPPKeywordID()) {
+ case tok::pp_include:
+ case tok::pp_include_next:
+ case tok::pp_import:
+ next();
+ parseIncludeDirective();
+ Type = LT_ImportStatement;
+ break;
+ case tok::pp_error:
+ case tok::pp_warning:
+ parseWarningOrError();
+ break;
+ case tok::pp_pragma:
+ parsePragma();
+ break;
+ case tok::pp_if:
+ case tok::pp_elif:
+ Contexts.back().IsExpression = true;
+ parseLine();
+ break;
+ default:
+ break;
+ }
+ while (CurrentToken)
+ next();
+ return Type;
+ }
+
+public:
+ LineType parseLine() {
+ NonTemplateLess.clear();
+ if (CurrentToken->is(tok::hash))
+ return parsePreprocessorDirective();
+
+ // Directly allow to 'import <string-literal>' to support protocol buffer
+ // definitions (code.google.com/p/protobuf) or missing "#" (either way we
+ // should not break the line).
+ IdentifierInfo *Info = CurrentToken->Tok.getIdentifierInfo();
+ if ((Style.Language == FormatStyle::LK_Java &&
+ CurrentToken->is(Keywords.kw_package)) ||
+ (Info && Info->getPPKeywordID() == tok::pp_import &&
+ CurrentToken->Next &&
+ CurrentToken->Next->isOneOf(tok::string_literal, tok::identifier,
+ tok::kw_static))) {
+ next();
+ parseIncludeDirective();
+ return LT_ImportStatement;
+ }
+
+ // If this line starts and ends in '<' and '>', respectively, it is likely
+ // part of "#define <a/b.h>".
+ if (CurrentToken->is(tok::less) && Line.Last->is(tok::greater)) {
+ parseIncludeDirective();
+ return LT_ImportStatement;
+ }
+
+ // In .proto files, top-level options are very similar to import statements
+ // and should not be line-wrapped.
+ if (Style.Language == FormatStyle::LK_Proto && Line.Level == 0 &&
+ CurrentToken->is(Keywords.kw_option)) {
+ next();
+ if (CurrentToken && CurrentToken->is(tok::identifier))
+ return LT_ImportStatement;
+ }
+
+ bool KeywordVirtualFound = false;
+ bool ImportStatement = false;
+ while (CurrentToken) {
+ if (CurrentToken->is(tok::kw_virtual))
+ KeywordVirtualFound = true;
+ if (isImportStatement(*CurrentToken))
+ ImportStatement = true;
+ if (!consumeToken())
+ return LT_Invalid;
+ }
+ if (KeywordVirtualFound)
+ return LT_VirtualFunctionDecl;
+ if (ImportStatement)
+ return LT_ImportStatement;
+
+ if (Line.startsWith(TT_ObjCMethodSpecifier)) {
+ if (Contexts.back().FirstObjCSelectorName)
+ Contexts.back().FirstObjCSelectorName->LongestObjCSelectorName =
+ Contexts.back().LongestObjCSelectorName;
+ return LT_ObjCMethodDecl;
+ }
+
+ return LT_Other;
+ }
+
+private:
+ bool isImportStatement(const FormatToken &Tok) {
+ // FIXME: Closure-library specific stuff should not be hard-coded but be
+ // configurable.
+ return Style.Language == FormatStyle::LK_JavaScript &&
+ Tok.TokenText == "goog" && Tok.Next && Tok.Next->is(tok::period) &&
+ Tok.Next->Next && (Tok.Next->Next->TokenText == "module" ||
+ Tok.Next->Next->TokenText == "provide" ||
+ Tok.Next->Next->TokenText == "require" ||
+ Tok.Next->Next->TokenText == "setTestOnly") &&
+ Tok.Next->Next->Next && Tok.Next->Next->Next->is(tok::l_paren);
+ }
+
+ void resetTokenMetadata(FormatToken *Token) {
+ if (!Token)
+ return;
+
+ // Reset token type in case we have already looked at it and then
+ // recovered from an error (e.g. failure to find the matching >).
+ if (!CurrentToken->isOneOf(TT_LambdaLSquare, TT_ForEachMacro,
+ TT_FunctionLBrace, TT_ImplicitStringLiteral,
+ TT_InlineASMBrace, TT_JsFatArrow, TT_LambdaArrow,
+ TT_RegexLiteral))
+ CurrentToken->Type = TT_Unknown;
+ CurrentToken->Role.reset();
+ CurrentToken->MatchingParen = nullptr;
+ CurrentToken->FakeLParens.clear();
+ CurrentToken->FakeRParens = 0;
+ }
+
+ void next() {
+ if (CurrentToken) {
+ CurrentToken->NestingLevel = Contexts.size() - 1;
+ CurrentToken->BindingStrength = Contexts.back().BindingStrength;
+ modifyContext(*CurrentToken);
+ determineTokenType(*CurrentToken);
+ CurrentToken = CurrentToken->Next;
+ }
+
+ resetTokenMetadata(CurrentToken);
+ }
+
+ /// \brief A struct to hold information valid in a specific context, e.g.
+ /// a pair of parenthesis.
+ struct Context {
+ Context(tok::TokenKind ContextKind, unsigned BindingStrength,
+ bool IsExpression)
+ : ContextKind(ContextKind), BindingStrength(BindingStrength),
+ IsExpression(IsExpression) {}
+
+ tok::TokenKind ContextKind;
+ unsigned BindingStrength;
+ bool IsExpression;
+ unsigned LongestObjCSelectorName = 0;
+ bool ColonIsForRangeExpr = false;
+ bool ColonIsDictLiteral = false;
+ bool ColonIsObjCMethodExpr = false;
+ FormatToken *FirstObjCSelectorName = nullptr;
+ FormatToken *FirstStartOfName = nullptr;
+ bool CanBeExpression = true;
+ bool InTemplateArgument = false;
+ bool InCtorInitializer = false;
+ bool CaretFound = false;
+ bool IsForEachMacro = false;
+ };
+
+ /// \brief Puts a new \c Context onto the stack \c Contexts for the lifetime
+ /// of each instance.
+ struct ScopedContextCreator {
+ AnnotatingParser &P;
+
+ ScopedContextCreator(AnnotatingParser &P, tok::TokenKind ContextKind,
+ unsigned Increase)
+ : P(P) {
+ P.Contexts.push_back(Context(ContextKind,
+ P.Contexts.back().BindingStrength + Increase,
+ P.Contexts.back().IsExpression));
+ }
+
+ ~ScopedContextCreator() { P.Contexts.pop_back(); }
+ };
+
+ void modifyContext(const FormatToken &Current) {
+ if (Current.getPrecedence() == prec::Assignment &&
+ !Line.First->isOneOf(tok::kw_template, tok::kw_using, tok::kw_return) &&
+ (!Current.Previous || Current.Previous->isNot(tok::kw_operator))) {
+ Contexts.back().IsExpression = true;
+ if (!Line.startsWith(TT_UnaryOperator)) {
+ for (FormatToken *Previous = Current.Previous;
+ Previous && Previous->Previous &&
+ !Previous->Previous->isOneOf(tok::comma, tok::semi);
+ Previous = Previous->Previous) {
+ if (Previous->isOneOf(tok::r_square, tok::r_paren)) {
+ Previous = Previous->MatchingParen;
+ if (!Previous)
+ break;
+ }
+ if (Previous->opensScope())
+ break;
+ if (Previous->isOneOf(TT_BinaryOperator, TT_UnaryOperator) &&
+ Previous->isOneOf(tok::star, tok::amp, tok::ampamp) &&
+ Previous->Previous && Previous->Previous->isNot(tok::equal))
+ Previous->Type = TT_PointerOrReference;
+ }
+ }
+ } else if (Current.is(tok::lessless) &&
+ (!Current.Previous || !Current.Previous->is(tok::kw_operator))) {
+ Contexts.back().IsExpression = true;
+ } else if (Current.isOneOf(tok::kw_return, tok::kw_throw)) {
+ Contexts.back().IsExpression = true;
+ } else if (Current.is(TT_TrailingReturnArrow)) {
+ Contexts.back().IsExpression = false;
+ } else if (Current.is(TT_LambdaArrow) || Current.is(Keywords.kw_assert)) {
+ Contexts.back().IsExpression = Style.Language == FormatStyle::LK_Java;
+ } else if (Current.isOneOf(tok::r_paren, tok::greater, tok::comma)) {
+ for (FormatToken *Previous = Current.Previous;
+ Previous && Previous->isOneOf(tok::star, tok::amp);
+ Previous = Previous->Previous)
+ Previous->Type = TT_PointerOrReference;
+ if (Line.MustBeDeclaration)
+ Contexts.back().IsExpression = Contexts.front().InCtorInitializer;
+ } else if (Current.Previous &&
+ Current.Previous->is(TT_CtorInitializerColon)) {
+ Contexts.back().IsExpression = true;
+ Contexts.back().InCtorInitializer = true;
+ } else if (Current.is(tok::kw_new)) {
+ Contexts.back().CanBeExpression = false;
+ } else if (Current.isOneOf(tok::semi, tok::exclaim)) {
+ // This should be the condition or increment in a for-loop.
+ Contexts.back().IsExpression = true;
+ }
+ }
+
+ void determineTokenType(FormatToken &Current) {
+ if (!Current.is(TT_Unknown))
+ // The token type is already known.
+ return;
+
+ // Line.MightBeFunctionDecl can only be true after the parentheses of a
+ // function declaration have been found. In this case, 'Current' is a
+ // trailing token of this declaration and thus cannot be a name.
+ if (Current.is(Keywords.kw_instanceof)) {
+ Current.Type = TT_BinaryOperator;
+ } else if (isStartOfName(Current) &&
+ (!Line.MightBeFunctionDecl || Current.NestingLevel != 0)) {
+ Contexts.back().FirstStartOfName = &Current;
+ Current.Type = TT_StartOfName;
+ } else if (Current.isOneOf(tok::kw_auto, tok::kw___auto_type)) {
+ AutoFound = true;
+ } else if (Current.is(tok::arrow) &&
+ Style.Language == FormatStyle::LK_Java) {
+ Current.Type = TT_LambdaArrow;
+ } else if (Current.is(tok::arrow) && AutoFound && Line.MustBeDeclaration &&
+ Current.NestingLevel == 0) {
+ Current.Type = TT_TrailingReturnArrow;
+ } else if (Current.isOneOf(tok::star, tok::amp, tok::ampamp)) {
+ Current.Type =
+ determineStarAmpUsage(Current, Contexts.back().CanBeExpression &&
+ Contexts.back().IsExpression,
+ Contexts.back().InTemplateArgument);
+ } else if (Current.isOneOf(tok::minus, tok::plus, tok::caret)) {
+ Current.Type = determinePlusMinusCaretUsage(Current);
+ if (Current.is(TT_UnaryOperator) && Current.is(tok::caret))
+ Contexts.back().CaretFound = true;
+ } else if (Current.isOneOf(tok::minusminus, tok::plusplus)) {
+ Current.Type = determineIncrementUsage(Current);
+ } else if (Current.isOneOf(tok::exclaim, tok::tilde)) {
+ Current.Type = TT_UnaryOperator;
+ } else if (Current.is(tok::question)) {
+ if (Style.Language == FormatStyle::LK_JavaScript &&
+ Line.MustBeDeclaration) {
+ // In JavaScript, `interface X { foo?(): bar; }` is an optional method
+ // on the interface, not a ternary expression.
+ Current.Type = TT_JsTypeOptionalQuestion;
+ } else {
+ Current.Type = TT_ConditionalExpr;
+ }
+ } else if (Current.isBinaryOperator() &&
+ (!Current.Previous || Current.Previous->isNot(tok::l_square))) {
+ Current.Type = TT_BinaryOperator;
+ } else if (Current.is(tok::comment)) {
+ if (Current.TokenText.startswith("/*")) {
+ if (Current.TokenText.endswith("*/"))
+ Current.Type = TT_BlockComment;
+ else
+ // The lexer has for some reason determined a comment here. But we
+ // cannot really handle it, if it isn't properly terminated.
+ Current.Tok.setKind(tok::unknown);
+ } else {
+ Current.Type = TT_LineComment;
+ }
+ } else if (Current.is(tok::r_paren)) {
+ if (rParenEndsCast(Current))
+ Current.Type = TT_CastRParen;
+ if (Current.MatchingParen && Current.Next &&
+ !Current.Next->isBinaryOperator() &&
+ !Current.Next->isOneOf(tok::semi, tok::colon, tok::l_brace))
+ if (FormatToken *BeforeParen = Current.MatchingParen->Previous)
+ if (BeforeParen->is(tok::identifier) &&
+ BeforeParen->TokenText == BeforeParen->TokenText.upper() &&
+ (!BeforeParen->Previous ||
+ BeforeParen->Previous->ClosesTemplateDeclaration))
+ Current.Type = TT_FunctionAnnotationRParen;
+ } else if (Current.is(tok::at) && Current.Next) {
+ if (Current.Next->isStringLiteral()) {
+ Current.Type = TT_ObjCStringLiteral;
+ } else {
+ switch (Current.Next->Tok.getObjCKeywordID()) {
+ case tok::objc_interface:
+ case tok::objc_implementation:
+ case tok::objc_protocol:
+ Current.Type = TT_ObjCDecl;
+ break;
+ case tok::objc_property:
+ Current.Type = TT_ObjCProperty;
+ break;
+ default:
+ break;
+ }
+ }
+ } else if (Current.is(tok::period)) {
+ FormatToken *PreviousNoComment = Current.getPreviousNonComment();
+ if (PreviousNoComment &&
+ PreviousNoComment->isOneOf(tok::comma, tok::l_brace))
+ Current.Type = TT_DesignatedInitializerPeriod;
+ else if (Style.Language == FormatStyle::LK_Java && Current.Previous &&
+ Current.Previous->isOneOf(TT_JavaAnnotation,
+ TT_LeadingJavaAnnotation)) {
+ Current.Type = Current.Previous->Type;
+ }
+ } else if (Current.isOneOf(tok::identifier, tok::kw_const) &&
+ Current.Previous &&
+ !Current.Previous->isOneOf(tok::equal, tok::at) &&
+ Line.MightBeFunctionDecl && Contexts.size() == 1) {
+ // Line.MightBeFunctionDecl can only be true after the parentheses of a
+ // function declaration have been found.
+ Current.Type = TT_TrailingAnnotation;
+ } else if ((Style.Language == FormatStyle::LK_Java ||
+ Style.Language == FormatStyle::LK_JavaScript) &&
+ Current.Previous) {
+ if (Current.Previous->is(tok::at) &&
+ Current.isNot(Keywords.kw_interface)) {
+ const FormatToken &AtToken = *Current.Previous;
+ const FormatToken *Previous = AtToken.getPreviousNonComment();
+ if (!Previous || Previous->is(TT_LeadingJavaAnnotation))
+ Current.Type = TT_LeadingJavaAnnotation;
+ else
+ Current.Type = TT_JavaAnnotation;
+ } else if (Current.Previous->is(tok::period) &&
+ Current.Previous->isOneOf(TT_JavaAnnotation,
+ TT_LeadingJavaAnnotation)) {
+ Current.Type = Current.Previous->Type;
+ }
+ }
+ }
+
+ /// \brief Take a guess at whether \p Tok starts a name of a function or
+ /// variable declaration.
+ ///
+ /// This is a heuristic based on whether \p Tok is an identifier following
+ /// something that is likely a type.
+ bool isStartOfName(const FormatToken &Tok) {
+ if (Tok.isNot(tok::identifier) || !Tok.Previous)
+ return false;
+
+ if (Tok.Previous->isOneOf(TT_LeadingJavaAnnotation, Keywords.kw_instanceof))
+ return false;
+
+ // Skip "const" as it does not have an influence on whether this is a name.
+ FormatToken *PreviousNotConst = Tok.Previous;
+ while (PreviousNotConst && PreviousNotConst->is(tok::kw_const))
+ PreviousNotConst = PreviousNotConst->Previous;
+
+ if (!PreviousNotConst)
+ return false;
+
+ bool IsPPKeyword = PreviousNotConst->is(tok::identifier) &&
+ PreviousNotConst->Previous &&
+ PreviousNotConst->Previous->is(tok::hash);
+
+ if (PreviousNotConst->is(TT_TemplateCloser))
+ return PreviousNotConst && PreviousNotConst->MatchingParen &&
+ PreviousNotConst->MatchingParen->Previous &&
+ PreviousNotConst->MatchingParen->Previous->isNot(tok::period) &&
+ PreviousNotConst->MatchingParen->Previous->isNot(tok::kw_template);
+
+ if (PreviousNotConst->is(tok::r_paren) && PreviousNotConst->MatchingParen &&
+ PreviousNotConst->MatchingParen->Previous &&
+ PreviousNotConst->MatchingParen->Previous->is(tok::kw_decltype))
+ return true;
+
+ return (!IsPPKeyword &&
+ PreviousNotConst->isOneOf(tok::identifier, tok::kw_auto)) ||
+ PreviousNotConst->is(TT_PointerOrReference) ||
+ PreviousNotConst->isSimpleTypeSpecifier();
+ }
+
+ /// \brief Determine whether ')' is ending a cast.
+ bool rParenEndsCast(const FormatToken &Tok) {
+ // C-style casts are only used in C++ and Java.
+ if (Style.Language != FormatStyle::LK_Cpp &&
+ Style.Language != FormatStyle::LK_Java)
+ return false;
+
+ // Empty parens aren't casts and there are no casts at the end of the line.
+ if (Tok.Previous == Tok.MatchingParen || !Tok.Next || !Tok.MatchingParen)
+ return false;
+
+ FormatToken *LeftOfParens = Tok.MatchingParen->getPreviousNonComment();
+ if (LeftOfParens) {
+ // If there is an opening parenthesis left of the current parentheses,
+ // look past it as these might be chained casts.
+ if (LeftOfParens->is(tok::r_paren)) {
+ if (!LeftOfParens->MatchingParen ||
+ !LeftOfParens->MatchingParen->Previous)
+ return false;
+ LeftOfParens = LeftOfParens->MatchingParen->Previous;
+ }
+
+ // If there is an identifier (or with a few exceptions a keyword) right
+ // before the parentheses, this is unlikely to be a cast.
+ if (LeftOfParens->Tok.getIdentifierInfo() &&
+ !LeftOfParens->isOneOf(Keywords.kw_in, tok::kw_return, tok::kw_case,
+ tok::kw_delete))
+ return false;
+
+ // Certain other tokens right before the parentheses are also signals that
+ // this cannot be a cast.
+ if (LeftOfParens->isOneOf(tok::at, tok::r_square, TT_OverloadedOperator,
+ TT_TemplateCloser))
+ return false;
+ }
+
+ if (Tok.Next->is(tok::question))
+ return false;
+
+ // As Java has no function types, a "(" after the ")" likely means that this
+ // is a cast.
+ if (Style.Language == FormatStyle::LK_Java && Tok.Next->is(tok::l_paren))
+ return true;
+
+ // If a (non-string) literal follows, this is likely a cast.
+ if (Tok.Next->isNot(tok::string_literal) &&
+ (Tok.Next->Tok.isLiteral() ||
+ Tok.Next->isOneOf(tok::kw_sizeof, tok::kw_alignof)))
+ return true;
+
+ // Heuristically try to determine whether the parentheses contain a type.
+ bool ParensAreType =
+ !Tok.Previous ||
+ Tok.Previous->isOneOf(TT_PointerOrReference, TT_TemplateCloser) ||
+ Tok.Previous->isSimpleTypeSpecifier();
+ bool ParensCouldEndDecl =
+ Tok.Next->isOneOf(tok::equal, tok::semi, tok::l_brace, tok::greater);
+ if (ParensAreType && !ParensCouldEndDecl)
+ return true;
+
+ // At this point, we heuristically assume that there are no casts at the
+ // start of the line. We assume that we have found most cases where there
+ // are by the logic above, e.g. "(void)x;".
+ if (!LeftOfParens)
+ return false;
+
+ // If the following token is an identifier, this is a cast. All cases where
+ // this can be something else are handled above.
+ if (Tok.Next->is(tok::identifier))
+ return true;
+
+ if (!Tok.Next->Next)
+ return false;
+
+ // If the next token after the parenthesis is a unary operator, assume
+ // that this is cast, unless there are unexpected tokens inside the
+ // parenthesis.
+ bool NextIsUnary =
+ Tok.Next->isUnaryOperator() || Tok.Next->isOneOf(tok::amp, tok::star);
+ if (!NextIsUnary || Tok.Next->is(tok::plus) ||
+ !Tok.Next->Next->isOneOf(tok::identifier, tok::numeric_constant))
+ return false;
+ // Search for unexpected tokens.
+ for (FormatToken *Prev = Tok.Previous; Prev != Tok.MatchingParen;
+ Prev = Prev->Previous) {
+ if (!Prev->isOneOf(tok::kw_const, tok::identifier, tok::coloncolon))
+ return false;
+ }
+ return true;
+ }
+
+ /// \brief Return the type of the given token assuming it is * or &.
+ TokenType determineStarAmpUsage(const FormatToken &Tok, bool IsExpression,
+ bool InTemplateArgument) {
+ if (Style.Language == FormatStyle::LK_JavaScript)
+ return TT_BinaryOperator;
+
+ const FormatToken *PrevToken = Tok.getPreviousNonComment();
+ if (!PrevToken)
+ return TT_UnaryOperator;
+
+ const FormatToken *NextToken = Tok.getNextNonComment();
+ if (!NextToken ||
+ NextToken->isOneOf(tok::arrow, Keywords.kw_final,
+ Keywords.kw_override) ||
+ (NextToken->is(tok::l_brace) && !NextToken->getNextNonComment()))
+ return TT_PointerOrReference;
+
+ if (PrevToken->is(tok::coloncolon))
+ return TT_PointerOrReference;
+
+ if (PrevToken->isOneOf(tok::l_paren, tok::l_square, tok::l_brace,
+ tok::comma, tok::semi, tok::kw_return, tok::colon,
+ tok::equal, tok::kw_delete, tok::kw_sizeof) ||
+ PrevToken->isOneOf(TT_BinaryOperator, TT_ConditionalExpr,
+ TT_UnaryOperator, TT_CastRParen))
+ return TT_UnaryOperator;
+
+ if (NextToken->is(tok::l_square) && NextToken->isNot(TT_LambdaLSquare))
+ return TT_PointerOrReference;
+ if (NextToken->is(tok::kw_operator) && !IsExpression)
+ return TT_PointerOrReference;
+ if (NextToken->isOneOf(tok::comma, tok::semi))
+ return TT_PointerOrReference;
+
+ if (PrevToken->is(tok::r_paren) && PrevToken->MatchingParen &&
+ PrevToken->MatchingParen->Previous &&
+ PrevToken->MatchingParen->Previous->isOneOf(tok::kw_typeof,
+ tok::kw_decltype))
+ return TT_PointerOrReference;
+
+ if (PrevToken->Tok.isLiteral() ||
+ PrevToken->isOneOf(tok::r_paren, tok::r_square, tok::kw_true,
+ tok::kw_false, tok::r_brace) ||
+ NextToken->Tok.isLiteral() ||
+ NextToken->isOneOf(tok::kw_true, tok::kw_false) ||
+ NextToken->isUnaryOperator() ||
+ // If we know we're in a template argument, there are no named
+ // declarations. Thus, having an identifier on the right-hand side
+ // indicates a binary operator.
+ (InTemplateArgument && NextToken->Tok.isAnyIdentifier()))
+ return TT_BinaryOperator;
+
+ // "&&(" is quite unlikely to be two successive unary "&".
+ if (Tok.is(tok::ampamp) && NextToken && NextToken->is(tok::l_paren))
+ return TT_BinaryOperator;
+
+ // This catches some cases where evaluation order is used as control flow:
+ // aaa && aaa->f();
+ const FormatToken *NextNextToken = NextToken->getNextNonComment();
+ if (NextNextToken && NextNextToken->is(tok::arrow))
+ return TT_BinaryOperator;
+
+ // It is very unlikely that we are going to find a pointer or reference type
+ // definition on the RHS of an assignment.
+ if (IsExpression && !Contexts.back().CaretFound)
+ return TT_BinaryOperator;
+
+ return TT_PointerOrReference;
+ }
+
+ TokenType determinePlusMinusCaretUsage(const FormatToken &Tok) {
+ const FormatToken *PrevToken = Tok.getPreviousNonComment();
+ if (!PrevToken || PrevToken->is(TT_CastRParen))
+ return TT_UnaryOperator;
+
+ // Use heuristics to recognize unary operators.
+ if (PrevToken->isOneOf(tok::equal, tok::l_paren, tok::comma, tok::l_square,
+ tok::question, tok::colon, tok::kw_return,
+ tok::kw_case, tok::at, tok::l_brace))
+ return TT_UnaryOperator;
+
+ // There can't be two consecutive binary operators.
+ if (PrevToken->is(TT_BinaryOperator))
+ return TT_UnaryOperator;
+
+ // Fall back to marking the token as binary operator.
+ return TT_BinaryOperator;
+ }
+
+ /// \brief Determine whether ++/-- are pre- or post-increments/-decrements.
+ TokenType determineIncrementUsage(const FormatToken &Tok) {
+ const FormatToken *PrevToken = Tok.getPreviousNonComment();
+ if (!PrevToken || PrevToken->is(TT_CastRParen))
+ return TT_UnaryOperator;
+ if (PrevToken->isOneOf(tok::r_paren, tok::r_square, tok::identifier))
+ return TT_TrailingUnaryOperator;
+
+ return TT_UnaryOperator;
+ }
+
+ SmallVector<Context, 8> Contexts;
+
+ const FormatStyle &Style;
+ AnnotatedLine &Line;
+ FormatToken *CurrentToken;
+ bool AutoFound;
+ const AdditionalKeywords &Keywords;
+
+ // Set of "<" tokens that do not open a template parameter list. If parseAngle
+ // determines that a specific token can't be a template opener, it will make
+ // same decision irrespective of the decisions for tokens leading up to it.
+ // Store this information to prevent this from causing exponential runtime.
+ llvm::SmallPtrSet<FormatToken *, 16> NonTemplateLess;
+};
+
+static const int PrecedenceUnaryOperator = prec::PointerToMember + 1;
+static const int PrecedenceArrowAndPeriod = prec::PointerToMember + 2;
+
+/// \brief Parses binary expressions by inserting fake parenthesis based on
+/// operator precedence.
+class ExpressionParser {
+public:
+ ExpressionParser(const FormatStyle &Style, const AdditionalKeywords &Keywords,
+ AnnotatedLine &Line)
+ : Style(Style), Keywords(Keywords), Current(Line.First) {}
+
+ /// \brief Parse expressions with the given operatore precedence.
+ void parse(int Precedence = 0) {
+ // Skip 'return' and ObjC selector colons as they are not part of a binary
+ // expression.
+ while (Current && (Current->is(tok::kw_return) ||
+ (Current->is(tok::colon) &&
+ Current->isOneOf(TT_ObjCMethodExpr, TT_DictLiteral))))
+ next();
+
+ if (!Current || Precedence > PrecedenceArrowAndPeriod)
+ return;
+
+ // Conditional expressions need to be parsed separately for proper nesting.
+ if (Precedence == prec::Conditional) {
+ parseConditionalExpr();
+ return;
+ }
+
+ // Parse unary operators, which all have a higher precedence than binary
+ // operators.
+ if (Precedence == PrecedenceUnaryOperator) {
+ parseUnaryOperator();
+ return;
+ }
+
+ FormatToken *Start = Current;
+ FormatToken *LatestOperator = nullptr;
+ unsigned OperatorIndex = 0;
+
+ while (Current) {
+ // Consume operators with higher precedence.
+ parse(Precedence + 1);
+
+ int CurrentPrecedence = getCurrentPrecedence();
+
+ if (Current && Current->is(TT_SelectorName) &&
+ Precedence == CurrentPrecedence) {
+ if (LatestOperator)
+ addFakeParenthesis(Start, prec::Level(Precedence));
+ Start = Current;
+ }
+
+ // At the end of the line or when an operator with higher precedence is
+ // found, insert fake parenthesis and return.
+ if (!Current || (Current->closesScope() && Current->MatchingParen) ||
+ (CurrentPrecedence != -1 && CurrentPrecedence < Precedence) ||
+ (CurrentPrecedence == prec::Conditional &&
+ Precedence == prec::Assignment && Current->is(tok::colon))) {
+ break;
+ }
+
+ // Consume scopes: (), [], <> and {}
+ if (Current->opensScope()) {
+ while (Current && !Current->closesScope()) {
+ next();
+ parse();
+ }
+ next();
+ } else {
+ // Operator found.
+ if (CurrentPrecedence == Precedence) {
+ if (LatestOperator)
+ LatestOperator->NextOperator = Current;
+ LatestOperator = Current;
+ Current->OperatorIndex = OperatorIndex;
+ ++OperatorIndex;
+ }
+ next(/*SkipPastLeadingComments=*/Precedence > 0);
+ }
+ }
+
+ if (LatestOperator && (Current || Precedence > 0)) {
+ // LatestOperator->LastOperator = true;
+ if (Precedence == PrecedenceArrowAndPeriod) {
+ // Call expressions don't have a binary operator precedence.
+ addFakeParenthesis(Start, prec::Unknown);
+ } else {
+ addFakeParenthesis(Start, prec::Level(Precedence));
+ }
+ }
+ }
+
+private:
+ /// \brief Gets the precedence (+1) of the given token for binary operators
+ /// and other tokens that we treat like binary operators.
+ int getCurrentPrecedence() {
+ if (Current) {
+ const FormatToken *NextNonComment = Current->getNextNonComment();
+ if (Current->is(TT_ConditionalExpr))
+ return prec::Conditional;
+ if (NextNonComment && NextNonComment->is(tok::colon) &&
+ NextNonComment->is(TT_DictLiteral))
+ return prec::Comma;
+ if (Current->is(TT_LambdaArrow))
+ return prec::Comma;
+ if (Current->is(TT_JsFatArrow))
+ return prec::Assignment;
+ if (Current->isOneOf(tok::semi, TT_InlineASMColon, TT_SelectorName,
+ TT_JsComputedPropertyName) ||
+ (Current->is(tok::comment) && NextNonComment &&
+ NextNonComment->is(TT_SelectorName)))
+ return 0;
+ if (Current->is(TT_RangeBasedForLoopColon))
+ return prec::Comma;
+ if ((Style.Language == FormatStyle::LK_Java ||
+ Style.Language == FormatStyle::LK_JavaScript) &&
+ Current->is(Keywords.kw_instanceof))
+ return prec::Relational;
+ if (Current->is(TT_BinaryOperator) || Current->is(tok::comma))
+ return Current->getPrecedence();
+ if (Current->isOneOf(tok::period, tok::arrow))
+ return PrecedenceArrowAndPeriod;
+ if (Style.Language == FormatStyle::LK_Java &&
+ Current->isOneOf(Keywords.kw_extends, Keywords.kw_implements,
+ Keywords.kw_throws))
+ return 0;
+ }
+ return -1;
+ }
+
+ void addFakeParenthesis(FormatToken *Start, prec::Level Precedence) {
+ Start->FakeLParens.push_back(Precedence);
+ if (Precedence > prec::Unknown)
+ Start->StartsBinaryExpression = true;
+ if (Current) {
+ FormatToken *Previous = Current->Previous;
+ while (Previous->is(tok::comment) && Previous->Previous)
+ Previous = Previous->Previous;
+ ++Previous->FakeRParens;
+ if (Precedence > prec::Unknown)
+ Previous->EndsBinaryExpression = true;
+ }
+ }
+
+ /// \brief Parse unary operator expressions and surround them with fake
+ /// parentheses if appropriate.
+ void parseUnaryOperator() {
+ if (!Current || Current->isNot(TT_UnaryOperator)) {
+ parse(PrecedenceArrowAndPeriod);
+ return;
+ }
+
+ FormatToken *Start = Current;
+ next();
+ parseUnaryOperator();
+
+ // The actual precedence doesn't matter.
+ addFakeParenthesis(Start, prec::Unknown);
+ }
+
+ void parseConditionalExpr() {
+ while (Current && Current->isTrailingComment()) {
+ next();
+ }
+ FormatToken *Start = Current;
+ parse(prec::LogicalOr);
+ if (!Current || !Current->is(tok::question))
+ return;
+ next();
+ parse(prec::Assignment);
+ if (!Current || Current->isNot(TT_ConditionalExpr))
+ return;
+ next();
+ parse(prec::Assignment);
+ addFakeParenthesis(Start, prec::Conditional);
+ }
+
+ void next(bool SkipPastLeadingComments = true) {
+ if (Current)
+ Current = Current->Next;
+ while (Current &&
+ (Current->NewlinesBefore == 0 || SkipPastLeadingComments) &&
+ Current->isTrailingComment())
+ Current = Current->Next;
+ }
+
+ const FormatStyle &Style;
+ const AdditionalKeywords &Keywords;
+ FormatToken *Current;
+};
+
+} // end anonymous namespace
+
+void TokenAnnotator::setCommentLineLevels(
+ SmallVectorImpl<AnnotatedLine *> &Lines) {
+ const AnnotatedLine *NextNonCommentLine = nullptr;
+ for (SmallVectorImpl<AnnotatedLine *>::reverse_iterator I = Lines.rbegin(),
+ E = Lines.rend();
+ I != E; ++I) {
+ if (NextNonCommentLine && (*I)->First->is(tok::comment) &&
+ (*I)->First->Next == nullptr)
+ (*I)->Level = NextNonCommentLine->Level;
+ else
+ NextNonCommentLine = (*I)->First->isNot(tok::r_brace) ? (*I) : nullptr;
+
+ setCommentLineLevels((*I)->Children);
+ }
+}
+
+void TokenAnnotator::annotate(AnnotatedLine &Line) {
+ for (SmallVectorImpl<AnnotatedLine *>::iterator I = Line.Children.begin(),
+ E = Line.Children.end();
+ I != E; ++I) {
+ annotate(**I);
+ }
+ AnnotatingParser Parser(Style, Line, Keywords);
+ Line.Type = Parser.parseLine();
+ if (Line.Type == LT_Invalid)
+ return;
+
+ ExpressionParser ExprParser(Style, Keywords, Line);
+ ExprParser.parse();
+
+ if (Line.startsWith(TT_ObjCMethodSpecifier))
+ Line.Type = LT_ObjCMethodDecl;
+ else if (Line.startsWith(TT_ObjCDecl))
+ Line.Type = LT_ObjCDecl;
+ else if (Line.startsWith(TT_ObjCProperty))
+ Line.Type = LT_ObjCProperty;
+
+ Line.First->SpacesRequiredBefore = 1;
+ Line.First->CanBreakBefore = Line.First->MustBreakBefore;
+}
+
+// This function heuristically determines whether 'Current' starts the name of a
+// function declaration.
+static bool isFunctionDeclarationName(const FormatToken &Current) {
+ auto skipOperatorName = [](const FormatToken* Next) -> const FormatToken* {
+ for (; Next; Next = Next->Next) {
+ if (Next->is(TT_OverloadedOperatorLParen))
+ return Next;
+ if (Next->is(TT_OverloadedOperator))
+ continue;
+ if (Next->isOneOf(tok::kw_new, tok::kw_delete)) {
+ // For 'new[]' and 'delete[]'.
+ if (Next->Next && Next->Next->is(tok::l_square) &&
+ Next->Next->Next && Next->Next->Next->is(tok::r_square))
+ Next = Next->Next->Next;
+ continue;
+ }
+
+ break;
+ }
+ return nullptr;
+ };
+
+ const FormatToken *Next = Current.Next;
+ if (Current.is(tok::kw_operator)) {
+ if (Current.Previous && Current.Previous->is(tok::coloncolon))
+ return false;
+ Next = skipOperatorName(Next);
+ } else {
+ if (!Current.is(TT_StartOfName) || Current.NestingLevel != 0)
+ return false;
+ for (; Next; Next = Next->Next) {
+ if (Next->is(TT_TemplateOpener)) {
+ Next = Next->MatchingParen;
+ } else if (Next->is(tok::coloncolon)) {
+ Next = Next->Next;
+ if (!Next)
+ return false;
+ if (Next->is(tok::kw_operator)) {
+ Next = skipOperatorName(Next->Next);
+ break;
+ }
+ if (!Next->is(tok::identifier))
+ return false;
+ } else if (Next->is(tok::l_paren)) {
+ break;
+ } else {
+ return false;
+ }
+ }
+ }
+
+ if (!Next || !Next->is(tok::l_paren))
+ return false;
+ if (Next->Next == Next->MatchingParen)
+ return true;
+ for (const FormatToken *Tok = Next->Next; Tok && Tok != Next->MatchingParen;
+ Tok = Tok->Next) {
+ if (Tok->is(tok::kw_const) || Tok->isSimpleTypeSpecifier() ||
+ Tok->isOneOf(TT_PointerOrReference, TT_StartOfName))
+ return true;
+ if (Tok->isOneOf(tok::l_brace, tok::string_literal, TT_ObjCMethodExpr) ||
+ Tok->Tok.isLiteral())
+ return false;
+ }
+ return false;
+}
+
+bool TokenAnnotator::mustBreakForReturnType(const AnnotatedLine &Line) const {
+ assert(Line.MightBeFunctionDecl);
+
+ if ((Style.AlwaysBreakAfterReturnType == FormatStyle::RTBS_TopLevel ||
+ Style.AlwaysBreakAfterReturnType ==
+ FormatStyle::RTBS_TopLevelDefinitions) &&
+ Line.Level > 0)
+ return false;
+
+ switch (Style.AlwaysBreakAfterReturnType) {
+ case FormatStyle::RTBS_None:
+ return false;
+ case FormatStyle::RTBS_All:
+ case FormatStyle::RTBS_TopLevel:
+ return true;
+ case FormatStyle::RTBS_AllDefinitions:
+ case FormatStyle::RTBS_TopLevelDefinitions:
+ return Line.mightBeFunctionDefinition();
+ }
+
+ return false;
+}
+
+void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) {
+ for (SmallVectorImpl<AnnotatedLine *>::iterator I = Line.Children.begin(),
+ E = Line.Children.end();
+ I != E; ++I) {
+ calculateFormattingInformation(**I);
+ }
+
+ Line.First->TotalLength =
+ Line.First->IsMultiline ? Style.ColumnLimit : Line.First->ColumnWidth;
+ if (!Line.First->Next)
+ return;
+ FormatToken *Current = Line.First->Next;
+ bool InFunctionDecl = Line.MightBeFunctionDecl;
+ while (Current) {
+ if (isFunctionDeclarationName(*Current))
+ Current->Type = TT_FunctionDeclarationName;
+ if (Current->is(TT_LineComment)) {
+ if (Current->Previous->BlockKind == BK_BracedInit &&
+ Current->Previous->opensScope())
+ Current->SpacesRequiredBefore = Style.Cpp11BracedListStyle ? 0 : 1;
+ else
+ Current->SpacesRequiredBefore = Style.SpacesBeforeTrailingComments;
+
+ // If we find a trailing comment, iterate backwards to determine whether
+ // it seems to relate to a specific parameter. If so, break before that
+ // parameter to avoid changing the comment's meaning. E.g. don't move 'b'
+ // to the previous line in:
+ // SomeFunction(a,
+ // b, // comment
+ // c);
+ if (!Current->HasUnescapedNewline) {
+ for (FormatToken *Parameter = Current->Previous; Parameter;
+ Parameter = Parameter->Previous) {
+ if (Parameter->isOneOf(tok::comment, tok::r_brace))
+ break;
+ if (Parameter->Previous && Parameter->Previous->is(tok::comma)) {
+ if (!Parameter->Previous->is(TT_CtorInitializerComma) &&
+ Parameter->HasUnescapedNewline)
+ Parameter->MustBreakBefore = true;
+ break;
+ }
+ }
+ }
+ } else if (Current->SpacesRequiredBefore == 0 &&
+ spaceRequiredBefore(Line, *Current)) {
+ Current->SpacesRequiredBefore = 1;
+ }
+
+ Current->MustBreakBefore =
+ Current->MustBreakBefore || mustBreakBefore(Line, *Current);
+
+ if (!Current->MustBreakBefore && InFunctionDecl &&
+ Current->is(TT_FunctionDeclarationName))
+ Current->MustBreakBefore = mustBreakForReturnType(Line);
+
+ Current->CanBreakBefore =
+ Current->MustBreakBefore || canBreakBefore(Line, *Current);
+ unsigned ChildSize = 0;
+ if (Current->Previous->Children.size() == 1) {
+ FormatToken &LastOfChild = *Current->Previous->Children[0]->Last;
+ ChildSize = LastOfChild.isTrailingComment() ? Style.ColumnLimit
+ : LastOfChild.TotalLength + 1;
+ }
+ const FormatToken *Prev = Current->Previous;
+ if (Current->MustBreakBefore || Prev->Children.size() > 1 ||
+ (Prev->Children.size() == 1 &&
+ Prev->Children[0]->First->MustBreakBefore) ||
+ Current->IsMultiline)
+ Current->TotalLength = Prev->TotalLength + Style.ColumnLimit;
+ else
+ Current->TotalLength = Prev->TotalLength + Current->ColumnWidth +
+ ChildSize + Current->SpacesRequiredBefore;
+
+ if (Current->is(TT_CtorInitializerColon))
+ InFunctionDecl = false;
+
+ // FIXME: Only calculate this if CanBreakBefore is true once static
+ // initializers etc. are sorted out.
+ // FIXME: Move magic numbers to a better place.
+ Current->SplitPenalty = 20 * Current->BindingStrength +
+ splitPenalty(Line, *Current, InFunctionDecl);
+
+ Current = Current->Next;
+ }
+
+ calculateUnbreakableTailLengths(Line);
+ for (Current = Line.First; Current != nullptr; Current = Current->Next) {
+ if (Current->Role)
+ Current->Role->precomputeFormattingInfos(Current);
+ }
+
+ DEBUG({ printDebugInfo(Line); });
+}
+
+void TokenAnnotator::calculateUnbreakableTailLengths(AnnotatedLine &Line) {
+ unsigned UnbreakableTailLength = 0;
+ FormatToken *Current = Line.Last;
+ while (Current) {
+ Current->UnbreakableTailLength = UnbreakableTailLength;
+ if (Current->CanBreakBefore ||
+ Current->isOneOf(tok::comment, tok::string_literal)) {
+ UnbreakableTailLength = 0;
+ } else {
+ UnbreakableTailLength +=
+ Current->ColumnWidth + Current->SpacesRequiredBefore;
+ }
+ Current = Current->Previous;
+ }
+}
+
+unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
+ const FormatToken &Tok,
+ bool InFunctionDecl) {
+ const FormatToken &Left = *Tok.Previous;
+ const FormatToken &Right = Tok;
+
+ if (Left.is(tok::semi))
+ return 0;
+
+ if (Style.Language == FormatStyle::LK_Java) {
+ if (Right.isOneOf(Keywords.kw_extends, Keywords.kw_throws))
+ return 1;
+ if (Right.is(Keywords.kw_implements))
+ return 2;
+ if (Left.is(tok::comma) && Left.NestingLevel == 0)
+ return 3;
+ } else if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Right.is(Keywords.kw_function) && Left.isNot(tok::comma))
+ return 100;
+ if (Left.is(TT_JsTypeColon))
+ return 100;
+ }
+
+ if (Left.is(tok::comma) || (Right.is(tok::identifier) && Right.Next &&
+ Right.Next->is(TT_DictLiteral)))
+ return 1;
+ if (Right.is(tok::l_square)) {
+ if (Style.Language == FormatStyle::LK_Proto)
+ return 1;
+ if (Left.is(tok::r_square))
+ return 25;
+ // Slightly prefer formatting local lambda definitions like functions.
+ if (Right.is(TT_LambdaLSquare) && Left.is(tok::equal))
+ return 35;
+ if (!Right.isOneOf(TT_ObjCMethodExpr, TT_LambdaLSquare,
+ TT_ArrayInitializerLSquare))
+ return 500;
+ }
+
+ if (Right.isOneOf(TT_StartOfName, TT_FunctionDeclarationName) ||
+ Right.is(tok::kw_operator)) {
+ if (Line.startsWith(tok::kw_for) && Right.PartOfMultiVariableDeclStmt)
+ return 3;
+ if (Left.is(TT_StartOfName))
+ return 110;
+ if (InFunctionDecl && Right.NestingLevel == 0)
+ return Style.PenaltyReturnTypeOnItsOwnLine;
+ return 200;
+ }
+ if (Right.is(TT_PointerOrReference))
+ return 190;
+ if (Right.is(TT_LambdaArrow))
+ return 110;
+ if (Left.is(tok::equal) && Right.is(tok::l_brace))
+ return 150;
+ if (Left.is(TT_CastRParen))
+ return 100;
+ if (Left.is(tok::coloncolon) ||
+ (Right.is(tok::period) && Style.Language == FormatStyle::LK_Proto))
+ return 500;
+ if (Left.isOneOf(tok::kw_class, tok::kw_struct))
+ return 5000;
+
+ if (Left.isOneOf(TT_RangeBasedForLoopColon, TT_InheritanceColon))
+ return 2;
+
+ if (Right.isMemberAccess()) {
+ // Breaking before the "./->" of a chained call/member access is reasonably
+ // cheap, as formatting those with one call per line is generally
+ // desirable. In particular, it should be cheaper to break before the call
+ // than it is to break inside a call's parameters, which could lead to weird
+ // "hanging" indents. The exception is the very last "./->" to support this
+ // frequent pattern:
+ //
+ // aaaaaaaa.aaaaaaaa.bbbbbbb().ccccccccccccccccccccc(
+ // dddddddd);
+ //
+ // which might otherwise be blown up onto many lines. Here, clang-format
+ // won't produce "hanging" indents anyway as there is no other trailing
+ // call.
+ //
+ // Also apply higher penalty is not a call as that might lead to a wrapping
+ // like:
+ //
+ // aaaaaaa
+ // .aaaaaaaaa.bbbbbbbb(cccccccc);
+ return !Right.NextOperator || !Right.NextOperator->Previous->closesScope()
+ ? 150
+ : 35;
+ }
+
+ if (Right.is(TT_TrailingAnnotation) &&
+ (!Right.Next || Right.Next->isNot(tok::l_paren))) {
+ // Moving trailing annotations to the next line is fine for ObjC method
+ // declarations.
+ if (Line.startsWith(TT_ObjCMethodSpecifier))
+ return 10;
+ // Generally, breaking before a trailing annotation is bad unless it is
+ // function-like. It seems to be especially preferable to keep standard
+ // annotations (i.e. "const", "final" and "override") on the same line.
+ // Use a slightly higher penalty after ")" so that annotations like
+ // "const override" are kept together.
+ bool is_short_annotation = Right.TokenText.size() < 10;
+ return (Left.is(tok::r_paren) ? 100 : 120) + (is_short_annotation ? 50 : 0);
+ }
+
+ // In for-loops, prefer breaking at ',' and ';'.
+ if (Line.startsWith(tok::kw_for) && Left.is(tok::equal))
+ return 4;
+
+ // In Objective-C method expressions, prefer breaking before "param:" over
+ // breaking after it.
+ if (Right.is(TT_SelectorName))
+ return 0;
+ if (Left.is(tok::colon) && Left.is(TT_ObjCMethodExpr))
+ return Line.MightBeFunctionDecl ? 50 : 500;
+
+ if (Left.is(tok::l_paren) && InFunctionDecl &&
+ Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign)
+ return 100;
+ if (Left.is(tok::l_paren) && Left.Previous &&
+ Left.Previous->isOneOf(tok::kw_if, tok::kw_for))
+ return 1000;
+ if (Left.is(tok::equal) && InFunctionDecl)
+ return 110;
+ if (Right.is(tok::r_brace))
+ return 1;
+ if (Left.is(TT_TemplateOpener))
+ return 100;
+ if (Left.opensScope()) {
+ if (Style.AlignAfterOpenBracket == FormatStyle::BAS_DontAlign)
+ return 0;
+ return Left.ParameterCount > 1 ? Style.PenaltyBreakBeforeFirstCallParameter
+ : 19;
+ }
+ if (Left.is(TT_JavaAnnotation))
+ return 50;
+
+ if (Right.is(tok::lessless)) {
+ if (Left.is(tok::string_literal) &&
+ (Right.NextOperator || Right.OperatorIndex != 1)) {
+ StringRef Content = Left.TokenText;
+ if (Content.startswith("\""))
+ Content = Content.drop_front(1);
+ if (Content.endswith("\""))
+ Content = Content.drop_back(1);
+ Content = Content.trim();
+ if (Content.size() > 1 &&
+ (Content.back() == ':' || Content.back() == '='))
+ return 25;
+ }
+ return 1; // Breaking at a << is really cheap.
+ }
+ if (Left.is(TT_ConditionalExpr))
+ return prec::Conditional;
+ prec::Level Level = Left.getPrecedence();
+ if (Level != prec::Unknown)
+ return Level;
+ Level = Right.getPrecedence();
+ if (Level != prec::Unknown)
+ return Level;
+
+ return 3;
+}
+
+bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
+ const FormatToken &Left,
+ const FormatToken &Right) {
+ if (Left.is(tok::kw_return) && Right.isNot(tok::semi))
+ return true;
+ if (Style.ObjCSpaceAfterProperty && Line.Type == LT_ObjCProperty &&
+ Left.Tok.getObjCKeywordID() == tok::objc_property)
+ return true;
+ if (Right.is(tok::hashhash))
+ return Left.is(tok::hash);
+ if (Left.isOneOf(tok::hashhash, tok::hash))
+ return Right.is(tok::hash);
+ if (Left.is(tok::l_paren) && Right.is(tok::r_paren))
+ return Style.SpaceInEmptyParentheses;
+ if (Left.is(tok::l_paren) || Right.is(tok::r_paren))
+ return (Right.is(TT_CastRParen) ||
+ (Left.MatchingParen && Left.MatchingParen->is(TT_CastRParen)))
+ ? Style.SpacesInCStyleCastParentheses
+ : Style.SpacesInParentheses;
+ if (Right.isOneOf(tok::semi, tok::comma))
+ return false;
+ if (Right.is(tok::less) &&
+ (Left.is(tok::kw_template) ||
+ (Line.Type == LT_ObjCDecl && Style.ObjCSpaceBeforeProtocolList)))
+ return true;
+ if (Left.isOneOf(tok::exclaim, tok::tilde))
+ return false;
+ if (Left.is(tok::at) &&
+ Right.isOneOf(tok::identifier, tok::string_literal, tok::char_constant,
+ tok::numeric_constant, tok::l_paren, tok::l_brace,
+ tok::kw_true, tok::kw_false))
+ return false;
+ if (Left.is(tok::colon))
+ return !Left.is(TT_ObjCMethodExpr);
+ if (Left.is(tok::coloncolon))
+ return false;
+ if (Left.is(tok::less) || Right.isOneOf(tok::greater, tok::less))
+ return false;
+ if (Right.is(tok::ellipsis))
+ return Left.Tok.isLiteral();
+ if (Left.is(tok::l_square) && Right.is(tok::amp))
+ return false;
+ if (Right.is(TT_PointerOrReference))
+ return (Left.is(tok::r_paren) && Left.MatchingParen &&
+ (Left.MatchingParen->is(TT_OverloadedOperatorLParen) ||
+ (Left.MatchingParen->Previous &&
+ Left.MatchingParen->Previous->is(TT_FunctionDeclarationName)))) ||
+ (Left.Tok.isLiteral() ||
+ (!Left.isOneOf(TT_PointerOrReference, tok::l_paren) &&
+ (Style.PointerAlignment != FormatStyle::PAS_Left ||
+ Line.IsMultiVariableDeclStmt)));
+ if (Right.is(TT_FunctionTypeLParen) && Left.isNot(tok::l_paren) &&
+ (!Left.is(TT_PointerOrReference) ||
+ (Style.PointerAlignment != FormatStyle::PAS_Right &&
+ !Line.IsMultiVariableDeclStmt)))
+ return true;
+ if (Left.is(TT_PointerOrReference))
+ return Right.Tok.isLiteral() ||
+ Right.isOneOf(TT_BlockComment, Keywords.kw_final,
+ Keywords.kw_override) ||
+ (Right.is(tok::l_brace) && Right.BlockKind == BK_Block) ||
+ (!Right.isOneOf(TT_PointerOrReference, TT_ArraySubscriptLSquare,
+ tok::l_paren) &&
+ (Style.PointerAlignment != FormatStyle::PAS_Right &&
+ !Line.IsMultiVariableDeclStmt) &&
+ Left.Previous &&
+ !Left.Previous->isOneOf(tok::l_paren, tok::coloncolon));
+ if (Right.is(tok::star) && Left.is(tok::l_paren))
+ return false;
+ if (Left.is(tok::l_square))
+ return (Left.is(TT_ArrayInitializerLSquare) &&
+ Style.SpacesInContainerLiterals && Right.isNot(tok::r_square)) ||
+ (Left.is(TT_ArraySubscriptLSquare) && Style.SpacesInSquareBrackets &&
+ Right.isNot(tok::r_square));
+ if (Right.is(tok::r_square))
+ return Right.MatchingParen &&
+ ((Style.SpacesInContainerLiterals &&
+ Right.MatchingParen->is(TT_ArrayInitializerLSquare)) ||
+ (Style.SpacesInSquareBrackets &&
+ Right.MatchingParen->is(TT_ArraySubscriptLSquare)));
+ if (Right.is(tok::l_square) &&
+ !Right.isOneOf(TT_ObjCMethodExpr, TT_LambdaLSquare) &&
+ !Left.isOneOf(tok::numeric_constant, TT_DictLiteral))
+ return false;
+ if (Left.is(tok::l_brace) && Right.is(tok::r_brace))
+ return !Left.Children.empty(); // No spaces in "{}".
+ if ((Left.is(tok::l_brace) && Left.BlockKind != BK_Block) ||
+ (Right.is(tok::r_brace) && Right.MatchingParen &&
+ Right.MatchingParen->BlockKind != BK_Block))
+ return !Style.Cpp11BracedListStyle;
+ if (Left.is(TT_BlockComment))
+ return !Left.TokenText.endswith("=*/");
+ if (Right.is(tok::l_paren)) {
+ if (Left.is(tok::r_paren) && Left.is(TT_AttributeParen))
+ return true;
+ return Line.Type == LT_ObjCDecl || Left.is(tok::semi) ||
+ (Style.SpaceBeforeParens != FormatStyle::SBPO_Never &&
+ (Left.isOneOf(tok::kw_if, tok::pp_elif, tok::kw_for, tok::kw_while,
+ tok::kw_switch, tok::kw_case, TT_ForEachMacro,
+ TT_ObjCForIn) ||
+ (Left.isOneOf(tok::kw_try, Keywords.kw___except, tok::kw_catch,
+ tok::kw_new, tok::kw_delete) &&
+ (!Left.Previous || Left.Previous->isNot(tok::period))))) ||
+ (Style.SpaceBeforeParens == FormatStyle::SBPO_Always &&
+ (Left.is(tok::identifier) || Left.isFunctionLikeKeyword() ||
+ Left.is(tok::r_paren)) &&
+ Line.Type != LT_PreprocessorDirective);
+ }
+ if (Left.is(tok::at) && Right.Tok.getObjCKeywordID() != tok::objc_not_keyword)
+ return false;
+ if (Right.is(TT_UnaryOperator))
+ return !Left.isOneOf(tok::l_paren, tok::l_square, tok::at) &&
+ (Left.isNot(tok::colon) || Left.isNot(TT_ObjCMethodExpr));
+ if ((Left.isOneOf(tok::identifier, tok::greater, tok::r_square,
+ tok::r_paren) ||
+ Left.isSimpleTypeSpecifier()) &&
+ Right.is(tok::l_brace) && Right.getNextNonComment() &&
+ Right.BlockKind != BK_Block)
+ return false;
+ if (Left.is(tok::period) || Right.is(tok::period))
+ return false;
+ if (Right.is(tok::hash) && Left.is(tok::identifier) && Left.TokenText == "L")
+ return false;
+ if (Left.is(TT_TemplateCloser) && Left.MatchingParen &&
+ Left.MatchingParen->Previous &&
+ Left.MatchingParen->Previous->is(tok::period))
+ // A.<B>DoSomething();
+ return false;
+ if (Left.is(TT_TemplateCloser) && Right.is(tok::l_square))
+ return false;
+ return true;
+}
+
+bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
+ const FormatToken &Right) {
+ const FormatToken &Left = *Right.Previous;
+ if (Right.Tok.getIdentifierInfo() && Left.Tok.getIdentifierInfo())
+ return true; // Never ever merge two identifiers.
+ if (Style.Language == FormatStyle::LK_Cpp) {
+ if (Left.is(tok::kw_operator))
+ return Right.is(tok::coloncolon);
+ } else if (Style.Language == FormatStyle::LK_Proto) {
+ if (Right.is(tok::period) &&
+ Left.isOneOf(Keywords.kw_optional, Keywords.kw_required,
+ Keywords.kw_repeated, Keywords.kw_extend))
+ return true;
+ if (Right.is(tok::l_paren) &&
+ Left.isOneOf(Keywords.kw_returns, Keywords.kw_option))
+ return true;
+ } else if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Left.isOneOf(Keywords.kw_let, Keywords.kw_var, TT_JsFatArrow,
+ Keywords.kw_in))
+ return true;
+ if (Left.is(tok::kw_default) && Left.Previous &&
+ Left.Previous->is(tok::kw_export))
+ return true;
+ if (Left.is(Keywords.kw_is) && Right.is(tok::l_brace))
+ return true;
+ if (Right.isOneOf(TT_JsTypeColon, TT_JsTypeOptionalQuestion))
+ return false;
+ if ((Left.is(tok::l_brace) || Right.is(tok::r_brace)) &&
+ Line.First->isOneOf(Keywords.kw_import, tok::kw_export))
+ return false;
+ if (Left.is(tok::ellipsis))
+ return false;
+ if (Left.is(TT_TemplateCloser) &&
+ !Right.isOneOf(tok::equal, tok::l_brace, tok::comma, tok::l_square,
+ Keywords.kw_implements, Keywords.kw_extends))
+ // Type assertions ('<type>expr') are not followed by whitespace. Other
+ // locations that should have whitespace following are identified by the
+ // above set of follower tokens.
+ return false;
+ } else if (Style.Language == FormatStyle::LK_Java) {
+ if (Left.is(tok::r_square) && Right.is(tok::l_brace))
+ return true;
+ if (Left.is(Keywords.kw_synchronized) && Right.is(tok::l_paren))
+ return Style.SpaceBeforeParens != FormatStyle::SBPO_Never;
+ if ((Left.isOneOf(tok::kw_static, tok::kw_public, tok::kw_private,
+ tok::kw_protected) ||
+ Left.isOneOf(Keywords.kw_final, Keywords.kw_abstract,
+ Keywords.kw_native)) &&
+ Right.is(TT_TemplateOpener))
+ return true;
+ }
+ if (Left.is(TT_ImplicitStringLiteral))
+ return Right.WhitespaceRange.getBegin() != Right.WhitespaceRange.getEnd();
+ if (Line.Type == LT_ObjCMethodDecl) {
+ if (Left.is(TT_ObjCMethodSpecifier))
+ return true;
+ if (Left.is(tok::r_paren) && Right.is(tok::identifier))
+ // Don't space between ')' and <id>
+ return false;
+ }
+ if (Line.Type == LT_ObjCProperty &&
+ (Right.is(tok::equal) || Left.is(tok::equal)))
+ return false;
+
+ if (Right.isOneOf(TT_TrailingReturnArrow, TT_LambdaArrow) ||
+ Left.isOneOf(TT_TrailingReturnArrow, TT_LambdaArrow))
+ return true;
+ if (Left.is(tok::comma))
+ return true;
+ if (Right.is(tok::comma))
+ return false;
+ if (Right.isOneOf(TT_CtorInitializerColon, TT_ObjCBlockLParen))
+ return true;
+ if (Right.is(TT_OverloadedOperatorLParen))
+ return Style.SpaceBeforeParens == FormatStyle::SBPO_Always;
+ if (Right.is(tok::colon)) {
+ if (Line.First->isOneOf(tok::kw_case, tok::kw_default) ||
+ !Right.getNextNonComment() || Right.getNextNonComment()->is(tok::semi))
+ return false;
+ if (Right.is(TT_ObjCMethodExpr))
+ return false;
+ if (Left.is(tok::question))
+ return false;
+ if (Right.is(TT_InlineASMColon) && Left.is(tok::coloncolon))
+ return false;
+ if (Right.is(TT_DictLiteral))
+ return Style.SpacesInContainerLiterals;
+ return true;
+ }
+ if (Left.is(TT_UnaryOperator))
+ return Right.is(TT_BinaryOperator);
+
+ // If the next token is a binary operator or a selector name, we have
+ // incorrectly classified the parenthesis as a cast. FIXME: Detect correctly.
+ if (Left.is(TT_CastRParen))
+ return Style.SpaceAfterCStyleCast ||
+ Right.isOneOf(TT_BinaryOperator, TT_SelectorName);
+
+ if (Left.is(tok::greater) && Right.is(tok::greater))
+ return Right.is(TT_TemplateCloser) && Left.is(TT_TemplateCloser) &&
+ (Style.Standard != FormatStyle::LS_Cpp11 || Style.SpacesInAngles);
+ if (Right.isOneOf(tok::arrow, tok::period, tok::arrowstar, tok::periodstar) ||
+ Left.isOneOf(tok::arrow, tok::period, tok::arrowstar, tok::periodstar))
+ return false;
+ if (!Style.SpaceBeforeAssignmentOperators &&
+ Right.getPrecedence() == prec::Assignment)
+ return false;
+ if (Right.is(tok::coloncolon) && Left.isNot(tok::l_brace))
+ return (Left.is(TT_TemplateOpener) &&
+ Style.Standard == FormatStyle::LS_Cpp03) ||
+ !(Left.isOneOf(tok::identifier, tok::l_paren, tok::r_paren) ||
+ Left.isOneOf(TT_TemplateCloser, TT_TemplateOpener));
+ if ((Left.is(TT_TemplateOpener)) != (Right.is(TT_TemplateCloser)))
+ return Style.SpacesInAngles;
+ if ((Right.is(TT_BinaryOperator) && !Left.is(tok::l_paren)) ||
+ (Left.isOneOf(TT_BinaryOperator, TT_ConditionalExpr) &&
+ !Right.is(tok::r_paren)))
+ return true;
+ if (Left.is(TT_TemplateCloser) && Right.is(tok::l_paren) &&
+ Right.isNot(TT_FunctionTypeLParen))
+ return Style.SpaceBeforeParens == FormatStyle::SBPO_Always;
+ if (Right.is(TT_TemplateOpener) && Left.is(tok::r_paren) &&
+ Left.MatchingParen && Left.MatchingParen->is(TT_OverloadedOperatorLParen))
+ return false;
+ if (Right.is(tok::less) && Left.isNot(tok::l_paren) &&
+ Line.startsWith(tok::hash))
+ return true;
+ if (Right.is(TT_TrailingUnaryOperator))
+ return false;
+ if (Left.is(TT_RegexLiteral))
+ return false;
+ return spaceRequiredBetween(Line, Left, Right);
+}
+
+// Returns 'true' if 'Tok' is a brace we'd want to break before in Allman style.
+static bool isAllmanBrace(const FormatToken &Tok) {
+ return Tok.is(tok::l_brace) && Tok.BlockKind == BK_Block &&
+ !Tok.isOneOf(TT_ObjCBlockLBrace, TT_DictLiteral);
+}
+
+bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
+ const FormatToken &Right) {
+ const FormatToken &Left = *Right.Previous;
+ if (Right.NewlinesBefore > 1 && Style.MaxEmptyLinesToKeep > 0)
+ return true;
+
+ if (Style.Language == FormatStyle::LK_JavaScript) {
+ // FIXME: This might apply to other languages and token kinds.
+ if (Right.is(tok::char_constant) && Left.is(tok::plus) && Left.Previous &&
+ Left.Previous->is(tok::char_constant))
+ return true;
+ if (Left.is(TT_DictLiteral) && Left.is(tok::l_brace) && Line.Level == 0 &&
+ Left.Previous && Left.Previous->is(tok::equal) &&
+ Line.First->isOneOf(tok::identifier, Keywords.kw_import, tok::kw_export,
+ tok::kw_const) &&
+ // kw_var/kw_let are pseudo-tokens that are tok::identifier, so match
+ // above.
+ !Line.First->isOneOf(Keywords.kw_var, Keywords.kw_let))
+ // Object literals on the top level of a file are treated as "enum-style".
+ // Each key/value pair is put on a separate line, instead of bin-packing.
+ return true;
+ if (Left.is(tok::l_brace) && Line.Level == 0 &&
+ (Line.startsWith(tok::kw_enum) ||
+ Line.startsWith(tok::kw_export, tok::kw_enum)))
+ // JavaScript top-level enum key/value pairs are put on separate lines
+ // instead of bin-packing.
+ return true;
+ if (Right.is(tok::r_brace) && Left.is(tok::l_brace) &&
+ !Left.Children.empty())
+ // Support AllowShortFunctionsOnASingleLine for JavaScript.
+ return Style.AllowShortFunctionsOnASingleLine == FormatStyle::SFS_None ||
+ Style.AllowShortFunctionsOnASingleLine == FormatStyle::SFS_Empty ||
+ (Left.NestingLevel == 0 && Line.Level == 0 &&
+ Style.AllowShortFunctionsOnASingleLine ==
+ FormatStyle::SFS_Inline);
+ } else if (Style.Language == FormatStyle::LK_Java) {
+ if (Right.is(tok::plus) && Left.is(tok::string_literal) && Right.Next &&
+ Right.Next->is(tok::string_literal))
+ return true;
+ }
+
+ // If the last token before a '}' is a comma or a trailing comment, the
+ // intention is to insert a line break after it in order to make shuffling
+ // around entries easier.
+ const FormatToken *BeforeClosingBrace = nullptr;
+ if (Left.isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) &&
+ Left.BlockKind != BK_Block && Left.MatchingParen)
+ BeforeClosingBrace = Left.MatchingParen->Previous;
+ else if (Right.MatchingParen &&
+ Right.MatchingParen->isOneOf(tok::l_brace,
+ TT_ArrayInitializerLSquare))
+ BeforeClosingBrace = &Left;
+ if (BeforeClosingBrace && (BeforeClosingBrace->is(tok::comma) ||
+ BeforeClosingBrace->isTrailingComment()))
+ return true;
+
+ if (Right.is(tok::comment))
+ return Left.BlockKind != BK_BracedInit &&
+ Left.isNot(TT_CtorInitializerColon) &&
+ (Right.NewlinesBefore > 0 && Right.HasUnescapedNewline);
+ if (Left.isTrailingComment())
+ return true;
+ if (Left.isStringLiteral() &&
+ (Right.isStringLiteral() || Right.is(TT_ObjCStringLiteral)))
+ return true;
+ if (Right.Previous->IsUnterminatedLiteral)
+ return true;
+ if (Right.is(tok::lessless) && Right.Next &&
+ Right.Previous->is(tok::string_literal) &&
+ Right.Next->is(tok::string_literal))
+ return true;
+ if (Right.Previous->ClosesTemplateDeclaration &&
+ Right.Previous->MatchingParen &&
+ Right.Previous->MatchingParen->NestingLevel == 0 &&
+ Style.AlwaysBreakTemplateDeclarations)
+ return true;
+ if ((Right.isOneOf(TT_CtorInitializerComma, TT_CtorInitializerColon)) &&
+ Style.BreakConstructorInitializersBeforeComma &&
+ !Style.ConstructorInitializerAllOnOneLineOrOnePerLine)
+ return true;
+ if (Right.is(tok::string_literal) && Right.TokenText.startswith("R\""))
+ // Raw string literals are special wrt. line breaks. The author has made a
+ // deliberate choice and might have aligned the contents of the string
+ // literal accordingly. Thus, we try keep existing line breaks.
+ return Right.NewlinesBefore > 0;
+ if (Right.Previous->is(tok::l_brace) && Right.NestingLevel == 1 &&
+ Style.Language == FormatStyle::LK_Proto)
+ // Don't put enums onto single lines in protocol buffers.
+ return true;
+ if (Right.is(TT_InlineASMBrace))
+ return Right.HasUnescapedNewline;
+ if (isAllmanBrace(Left) || isAllmanBrace(Right))
+ return (Line.startsWith(tok::kw_enum) && Style.BraceWrapping.AfterEnum) ||
+ (Line.startsWith(tok::kw_class) && Style.BraceWrapping.AfterClass) ||
+ (Line.startsWith(tok::kw_struct) && Style.BraceWrapping.AfterStruct);
+ if (Style.Language == FormatStyle::LK_Proto && Left.isNot(tok::l_brace) &&
+ Right.is(TT_SelectorName))
+ return true;
+ if (Left.is(TT_ObjCBlockLBrace) && !Style.AllowShortBlocksOnASingleLine)
+ return true;
+
+ if ((Style.Language == FormatStyle::LK_Java ||
+ Style.Language == FormatStyle::LK_JavaScript) &&
+ Left.is(TT_LeadingJavaAnnotation) &&
+ Right.isNot(TT_LeadingJavaAnnotation) && Right.isNot(tok::l_paren) &&
+ (Line.Last->is(tok::l_brace) || Style.BreakAfterJavaFieldAnnotations))
+ return true;
+
+ return false;
+}
+
+bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
+ const FormatToken &Right) {
+ const FormatToken &Left = *Right.Previous;
+
+ // Language-specific stuff.
+ if (Style.Language == FormatStyle::LK_Java) {
+ if (Left.isOneOf(Keywords.kw_throws, Keywords.kw_extends,
+ Keywords.kw_implements))
+ return false;
+ if (Right.isOneOf(Keywords.kw_throws, Keywords.kw_extends,
+ Keywords.kw_implements))
+ return true;
+ } else if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Left.is(TT_JsFatArrow) && Right.is(tok::l_brace))
+ return false;
+ if (Left.is(TT_JsTypeColon))
+ return true;
+ if (Right.NestingLevel == 0 && Right.is(Keywords.kw_is))
+ return false;
+ }
+
+ if (Left.is(tok::at))
+ return false;
+ if (Left.Tok.getObjCKeywordID() == tok::objc_interface)
+ return false;
+ if (Left.isOneOf(TT_JavaAnnotation, TT_LeadingJavaAnnotation))
+ return !Right.is(tok::l_paren);
+ if (Right.is(TT_PointerOrReference))
+ return Line.IsMultiVariableDeclStmt ||
+ (Style.PointerAlignment == FormatStyle::PAS_Right &&
+ (!Right.Next || Right.Next->isNot(TT_FunctionDeclarationName)));
+ if (Right.isOneOf(TT_StartOfName, TT_FunctionDeclarationName) ||
+ Right.is(tok::kw_operator))
+ return true;
+ if (Left.is(TT_PointerOrReference))
+ return false;
+ if (Right.isTrailingComment())
+ // We rely on MustBreakBefore being set correctly here as we should not
+ // change the "binding" behavior of a comment.
+ // The first comment in a braced lists is always interpreted as belonging to
+ // the first list element. Otherwise, it should be placed outside of the
+ // list.
+ return Left.BlockKind == BK_BracedInit;
+ if (Left.is(tok::question) && Right.is(tok::colon))
+ return false;
+ if (Right.is(TT_ConditionalExpr) || Right.is(tok::question))
+ return Style.BreakBeforeTernaryOperators;
+ if (Left.is(TT_ConditionalExpr) || Left.is(tok::question))
+ return !Style.BreakBeforeTernaryOperators;
+ if (Right.is(TT_InheritanceColon))
+ return true;
+ if (Right.is(tok::colon) &&
+ !Right.isOneOf(TT_CtorInitializerColon, TT_InlineASMColon))
+ return false;
+ if (Left.is(tok::colon) && (Left.isOneOf(TT_DictLiteral, TT_ObjCMethodExpr)))
+ return true;
+ if (Right.is(TT_SelectorName) || (Right.is(tok::identifier) && Right.Next &&
+ Right.Next->is(TT_ObjCMethodExpr)))
+ return Left.isNot(tok::period); // FIXME: Properly parse ObjC calls.
+ if (Left.is(tok::r_paren) && Line.Type == LT_ObjCProperty)
+ return true;
+ if (Left.ClosesTemplateDeclaration || Left.is(TT_FunctionAnnotationRParen))
+ return true;
+ if (Right.isOneOf(TT_RangeBasedForLoopColon, TT_OverloadedOperatorLParen,
+ TT_OverloadedOperator))
+ return false;
+ if (Left.is(TT_RangeBasedForLoopColon))
+ return true;
+ if (Right.is(TT_RangeBasedForLoopColon))
+ return false;
+ if (Left.isOneOf(TT_TemplateCloser, TT_UnaryOperator) ||
+ Left.is(tok::kw_operator))
+ return false;
+ if (Left.is(tok::equal) && !Right.isOneOf(tok::kw_default, tok::kw_delete) &&
+ Line.Type == LT_VirtualFunctionDecl && Left.NestingLevel == 0)
+ return false;
+ if (Left.is(tok::l_paren) && Left.is(TT_AttributeParen))
+ return false;
+ if (Left.is(tok::l_paren) && Left.Previous &&
+ (Left.Previous->isOneOf(TT_BinaryOperator, TT_CastRParen)))
+ return false;
+ if (Right.is(TT_ImplicitStringLiteral))
+ return false;
+
+ if (Right.is(tok::r_paren) || Right.is(TT_TemplateCloser))
+ return false;
+ if (Right.is(tok::r_square) && Right.MatchingParen &&
+ Right.MatchingParen->is(TT_LambdaLSquare))
+ return false;
+
+ // We only break before r_brace if there was a corresponding break before
+ // the l_brace, which is tracked by BreakBeforeClosingBrace.
+ if (Right.is(tok::r_brace))
+ return Right.MatchingParen && Right.MatchingParen->BlockKind == BK_Block;
+
+ // Allow breaking after a trailing annotation, e.g. after a method
+ // declaration.
+ if (Left.is(TT_TrailingAnnotation))
+ return !Right.isOneOf(tok::l_brace, tok::semi, tok::equal, tok::l_paren,
+ tok::less, tok::coloncolon);
+
+ if (Right.is(tok::kw___attribute))
+ return true;
+
+ if (Left.is(tok::identifier) && Right.is(tok::string_literal))
+ return true;
+
+ if (Right.is(tok::identifier) && Right.Next && Right.Next->is(TT_DictLiteral))
+ return true;
+
+ if (Left.is(TT_CtorInitializerComma) &&
+ Style.BreakConstructorInitializersBeforeComma)
+ return false;
+ if (Right.is(TT_CtorInitializerComma) &&
+ Style.BreakConstructorInitializersBeforeComma)
+ return true;
+ if ((Left.is(tok::greater) && Right.is(tok::greater)) ||
+ (Left.is(tok::less) && Right.is(tok::less)))
+ return false;
+ if (Right.is(TT_BinaryOperator) &&
+ Style.BreakBeforeBinaryOperators != FormatStyle::BOS_None &&
+ (Style.BreakBeforeBinaryOperators == FormatStyle::BOS_All ||
+ Right.getPrecedence() != prec::Assignment))
+ return true;
+ if (Left.is(TT_ArrayInitializerLSquare))
+ return true;
+ if (Right.is(tok::kw_typename) && Left.isNot(tok::kw_const))
+ return true;
+ if ((Left.isBinaryOperator() || Left.is(TT_BinaryOperator)) &&
+ !Left.isOneOf(tok::arrowstar, tok::lessless) &&
+ Style.BreakBeforeBinaryOperators != FormatStyle::BOS_All &&
+ (Style.BreakBeforeBinaryOperators == FormatStyle::BOS_None ||
+ Left.getPrecedence() == prec::Assignment))
+ return true;
+ return Left.isOneOf(tok::comma, tok::coloncolon, tok::semi, tok::l_brace,
+ tok::kw_class, tok::kw_struct) ||
+ Right.isMemberAccess() ||
+ Right.isOneOf(TT_TrailingReturnArrow, TT_LambdaArrow, tok::lessless,
+ tok::colon, tok::l_square, tok::at) ||
+ (Left.is(tok::r_paren) &&
+ Right.isOneOf(tok::identifier, tok::kw_const)) ||
+ (Left.is(tok::l_paren) && !Right.is(tok::r_paren));
+}
+
+void TokenAnnotator::printDebugInfo(const AnnotatedLine &Line) {
+ llvm::errs() << "AnnotatedTokens:\n";
+ const FormatToken *Tok = Line.First;
+ while (Tok) {
+ llvm::errs() << " M=" << Tok->MustBreakBefore
+ << " C=" << Tok->CanBreakBefore
+ << " T=" << getTokenTypeName(Tok->Type)
+ << " S=" << Tok->SpacesRequiredBefore
+ << " B=" << Tok->BlockParameterCount
+ << " P=" << Tok->SplitPenalty << " Name=" << Tok->Tok.getName()
+ << " L=" << Tok->TotalLength << " PPK=" << Tok->PackingKind
+ << " FakeLParens=";
+ for (unsigned i = 0, e = Tok->FakeLParens.size(); i != e; ++i)
+ llvm::errs() << Tok->FakeLParens[i] << "/";
+ llvm::errs() << " FakeRParens=" << Tok->FakeRParens << "\n";
+ if (!Tok->Next)
+ assert(Tok == Line.Last);
+ Tok = Tok->Next;
+ }
+ llvm::errs() << "----\n";
+}
+
+} // namespace format
+} // namespace clang
diff --git a/contrib/llvm/tools/clang/lib/Format/TokenAnnotator.h b/contrib/llvm/tools/clang/lib/Format/TokenAnnotator.h
new file mode 100644
index 0000000..5329f1f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Format/TokenAnnotator.h
@@ -0,0 +1,182 @@
+//===--- TokenAnnotator.h - Format C++ code ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements a token annotator, i.e. creates
+/// \c AnnotatedTokens out of \c FormatTokens with required extra information.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_FORMAT_TOKENANNOTATOR_H
+#define LLVM_CLANG_LIB_FORMAT_TOKENANNOTATOR_H
+
+#include "UnwrappedLineParser.h"
+#include "clang/Format/Format.h"
+#include <string>
+
+namespace clang {
+class SourceManager;
+
+namespace format {
+
+enum LineType {
+ LT_Invalid,
+ LT_ImportStatement,
+ LT_ObjCDecl, // An @interface, @implementation, or @protocol line.
+ LT_ObjCMethodDecl,
+ LT_ObjCProperty, // An @property line.
+ LT_Other,
+ LT_PreprocessorDirective,
+ LT_VirtualFunctionDecl
+};
+
+class AnnotatedLine {
+public:
+ AnnotatedLine(const UnwrappedLine &Line)
+ : First(Line.Tokens.front().Tok), Level(Line.Level),
+ InPPDirective(Line.InPPDirective),
+ MustBeDeclaration(Line.MustBeDeclaration), MightBeFunctionDecl(false),
+ IsMultiVariableDeclStmt(false), Affected(false),
+ LeadingEmptyLinesAffected(false), ChildrenAffected(false) {
+ assert(!Line.Tokens.empty());
+
+ // Calculate Next and Previous for all tokens. Note that we must overwrite
+ // Next and Previous for every token, as previous formatting runs might have
+ // left them in a different state.
+ First->Previous = nullptr;
+ FormatToken *Current = First;
+ for (std::list<UnwrappedLineNode>::const_iterator I = ++Line.Tokens.begin(),
+ E = Line.Tokens.end();
+ I != E; ++I) {
+ const UnwrappedLineNode &Node = *I;
+ Current->Next = I->Tok;
+ I->Tok->Previous = Current;
+ Current = Current->Next;
+ Current->Children.clear();
+ for (const auto &Child : Node.Children) {
+ Children.push_back(new AnnotatedLine(Child));
+ Current->Children.push_back(Children.back());
+ }
+ }
+ Last = Current;
+ Last->Next = nullptr;
+ }
+
+ ~AnnotatedLine() {
+ for (unsigned i = 0, e = Children.size(); i != e; ++i) {
+ delete Children[i];
+ }
+ FormatToken *Current = First;
+ while (Current) {
+ Current->Children.clear();
+ Current->Role.reset();
+ Current = Current->Next;
+ }
+ }
+
+ /// \c true if this line starts with the given tokens in order, ignoring
+ /// comments.
+ template <typename... Ts> bool startsWith(Ts... Tokens) const {
+ return startsWith(First, Tokens...);
+ }
+
+ /// \c true if this line looks like a function definition instead of a
+ /// function declaration. Asserts MightBeFunctionDecl.
+ bool mightBeFunctionDefinition() const {
+ assert(MightBeFunctionDecl);
+ // FIXME: Line.Last points to other characters than tok::semi
+ // and tok::lbrace.
+ return !Last->isOneOf(tok::semi, tok::comment);
+ }
+
+ FormatToken *First;
+ FormatToken *Last;
+
+ SmallVector<AnnotatedLine *, 0> Children;
+
+ LineType Type;
+ unsigned Level;
+ bool InPPDirective;
+ bool MustBeDeclaration;
+ bool MightBeFunctionDecl;
+ bool IsMultiVariableDeclStmt;
+
+ /// \c True if this line should be formatted, i.e. intersects directly or
+ /// indirectly with one of the input ranges.
+ bool Affected;
+
+ /// \c True if the leading empty lines of this line intersect with one of the
+ /// input ranges.
+ bool LeadingEmptyLinesAffected;
+
+ /// \c True if a one of this line's children intersects with an input range.
+ bool ChildrenAffected;
+
+private:
+ // Disallow copying.
+ AnnotatedLine(const AnnotatedLine &) = delete;
+ void operator=(const AnnotatedLine &) = delete;
+
+ template <typename A, typename... Ts>
+ bool startsWith(FormatToken *Tok, A K1) const {
+ while (Tok && Tok->is(tok::comment))
+ Tok = Tok->Next;
+ return Tok && Tok->is(K1);
+ }
+
+ template <typename A, typename... Ts>
+ bool startsWith(FormatToken *Tok, A K1, Ts... Tokens) const {
+ return startsWith(Tok, K1) && startsWith(Tok->Next, Tokens...);
+ }
+};
+
+/// \brief Determines extra information about the tokens comprising an
+/// \c UnwrappedLine.
+class TokenAnnotator {
+public:
+ TokenAnnotator(const FormatStyle &Style, const AdditionalKeywords &Keywords)
+ : Style(Style), Keywords(Keywords) {}
+
+ /// \brief Adapts the indent levels of comment lines to the indent of the
+ /// subsequent line.
+ // FIXME: Can/should this be done in the UnwrappedLineParser?
+ void setCommentLineLevels(SmallVectorImpl<AnnotatedLine *> &Lines);
+
+ void annotate(AnnotatedLine &Line);
+ void calculateFormattingInformation(AnnotatedLine &Line);
+
+private:
+ /// \brief Calculate the penalty for splitting before \c Tok.
+ unsigned splitPenalty(const AnnotatedLine &Line, const FormatToken &Tok,
+ bool InFunctionDecl);
+
+ bool spaceRequiredBetween(const AnnotatedLine &Line, const FormatToken &Left,
+ const FormatToken &Right);
+
+ bool spaceRequiredBefore(const AnnotatedLine &Line, const FormatToken &Tok);
+
+ bool mustBreakBefore(const AnnotatedLine &Line, const FormatToken &Right);
+
+ bool canBreakBefore(const AnnotatedLine &Line, const FormatToken &Right);
+
+ bool mustBreakForReturnType(const AnnotatedLine &Line) const;
+
+ void printDebugInfo(const AnnotatedLine &Line);
+
+ void calculateUnbreakableTailLengths(AnnotatedLine &Line);
+
+ const FormatStyle &Style;
+
+ const AdditionalKeywords &Keywords;
+};
+
+} // end namespace format
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Format/UnwrappedLineFormatter.cpp b/contrib/llvm/tools/clang/lib/Format/UnwrappedLineFormatter.cpp
new file mode 100644
index 0000000..f650569
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Format/UnwrappedLineFormatter.cpp
@@ -0,0 +1,965 @@
+//===--- UnwrappedLineFormatter.cpp - Format C++ code ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "UnwrappedLineFormatter.h"
+#include "WhitespaceManager.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "format-formatter"
+
+namespace clang {
+namespace format {
+
+namespace {
+
+bool startsExternCBlock(const AnnotatedLine &Line) {
+ const FormatToken *Next = Line.First->getNextNonComment();
+ const FormatToken *NextNext = Next ? Next->getNextNonComment() : nullptr;
+ return Line.startsWith(tok::kw_extern) && Next && Next->isStringLiteral() &&
+ NextNext && NextNext->is(tok::l_brace);
+}
+
+/// \brief Tracks the indent level of \c AnnotatedLines across levels.
+///
+/// \c nextLine must be called for each \c AnnotatedLine, after which \c
+/// getIndent() will return the indent for the last line \c nextLine was called
+/// with.
+/// If the line is not formatted (and thus the indent does not change), calling
+/// \c adjustToUnmodifiedLine after the call to \c nextLine will cause
+/// subsequent lines on the same level to be indented at the same level as the
+/// given line.
+class LevelIndentTracker {
+public:
+ LevelIndentTracker(const FormatStyle &Style,
+ const AdditionalKeywords &Keywords, unsigned StartLevel,
+ int AdditionalIndent)
+ : Style(Style), Keywords(Keywords), AdditionalIndent(AdditionalIndent) {
+ for (unsigned i = 0; i != StartLevel; ++i)
+ IndentForLevel.push_back(Style.IndentWidth * i + AdditionalIndent);
+ }
+
+ /// \brief Returns the indent for the current line.
+ unsigned getIndent() const { return Indent; }
+
+ /// \brief Update the indent state given that \p Line is going to be formatted
+ /// next.
+ void nextLine(const AnnotatedLine &Line) {
+ Offset = getIndentOffset(*Line.First);
+ // Update the indent level cache size so that we can rely on it
+ // having the right size in adjustToUnmodifiedline.
+ while (IndentForLevel.size() <= Line.Level)
+ IndentForLevel.push_back(-1);
+ if (Line.InPPDirective) {
+ Indent = Line.Level * Style.IndentWidth + AdditionalIndent;
+ } else {
+ IndentForLevel.resize(Line.Level + 1);
+ Indent = getIndent(IndentForLevel, Line.Level);
+ }
+ if (static_cast<int>(Indent) + Offset >= 0)
+ Indent += Offset;
+ }
+
+ /// \brief Update the level indent to adapt to the given \p Line.
+ ///
+ /// When a line is not formatted, we move the subsequent lines on the same
+ /// level to the same indent.
+ /// Note that \c nextLine must have been called before this method.
+ void adjustToUnmodifiedLine(const AnnotatedLine &Line) {
+ unsigned LevelIndent = Line.First->OriginalColumn;
+ if (static_cast<int>(LevelIndent) - Offset >= 0)
+ LevelIndent -= Offset;
+ if ((!Line.First->is(tok::comment) || IndentForLevel[Line.Level] == -1) &&
+ !Line.InPPDirective)
+ IndentForLevel[Line.Level] = LevelIndent;
+ }
+
+private:
+ /// \brief Get the offset of the line relatively to the level.
+ ///
+ /// For example, 'public:' labels in classes are offset by 1 or 2
+ /// characters to the left from their level.
+ int getIndentOffset(const FormatToken &RootToken) {
+ if (Style.Language == FormatStyle::LK_Java ||
+ Style.Language == FormatStyle::LK_JavaScript)
+ return 0;
+ if (RootToken.isAccessSpecifier(false) ||
+ RootToken.isObjCAccessSpecifier() ||
+ (RootToken.isOneOf(Keywords.kw_signals, Keywords.kw_qsignals) &&
+ RootToken.Next && RootToken.Next->is(tok::colon)))
+ return Style.AccessModifierOffset;
+ return 0;
+ }
+
+ /// \brief Get the indent of \p Level from \p IndentForLevel.
+ ///
+ /// \p IndentForLevel must contain the indent for the level \c l
+ /// at \p IndentForLevel[l], or a value < 0 if the indent for
+ /// that level is unknown.
+ unsigned getIndent(ArrayRef<int> IndentForLevel, unsigned Level) {
+ if (IndentForLevel[Level] != -1)
+ return IndentForLevel[Level];
+ if (Level == 0)
+ return 0;
+ return getIndent(IndentForLevel, Level - 1) + Style.IndentWidth;
+ }
+
+ const FormatStyle &Style;
+ const AdditionalKeywords &Keywords;
+ const unsigned AdditionalIndent;
+
+ /// \brief The indent in characters for each level.
+ std::vector<int> IndentForLevel;
+
+ /// \brief Offset of the current line relative to the indent level.
+ ///
+ /// For example, the 'public' keywords is often indented with a negative
+ /// offset.
+ int Offset = 0;
+
+ /// \brief The current line's indent.
+ unsigned Indent = 0;
+};
+
+class LineJoiner {
+public:
+ LineJoiner(const FormatStyle &Style, const AdditionalKeywords &Keywords,
+ const SmallVectorImpl<AnnotatedLine *> &Lines)
+ : Style(Style), Keywords(Keywords), End(Lines.end()),
+ Next(Lines.begin()) {}
+
+ /// \brief Returns the next line, merging multiple lines into one if possible.
+ const AnnotatedLine *getNextMergedLine(bool DryRun,
+ LevelIndentTracker &IndentTracker) {
+ if (Next == End)
+ return nullptr;
+ const AnnotatedLine *Current = *Next;
+ IndentTracker.nextLine(*Current);
+ unsigned MergedLines =
+ tryFitMultipleLinesInOne(IndentTracker.getIndent(), Next, End);
+ if (MergedLines > 0 && Style.ColumnLimit == 0)
+ // Disallow line merging if there is a break at the start of one of the
+ // input lines.
+ for (unsigned i = 0; i < MergedLines; ++i)
+ if (Next[i + 1]->First->NewlinesBefore > 0)
+ MergedLines = 0;
+ if (!DryRun)
+ for (unsigned i = 0; i < MergedLines; ++i)
+ join(*Next[i], *Next[i + 1]);
+ Next = Next + MergedLines + 1;
+ return Current;
+ }
+
+private:
+ /// \brief Calculates how many lines can be merged into 1 starting at \p I.
+ unsigned
+ tryFitMultipleLinesInOne(unsigned Indent,
+ SmallVectorImpl<AnnotatedLine *>::const_iterator I,
+ SmallVectorImpl<AnnotatedLine *>::const_iterator E) {
+ // Can't join the last line with anything.
+ if (I + 1 == E)
+ return 0;
+ // We can never merge stuff if there are trailing line comments.
+ const AnnotatedLine *TheLine = *I;
+ if (TheLine->Last->is(TT_LineComment))
+ return 0;
+ if (I[1]->Type == LT_Invalid || I[1]->First->MustBreakBefore)
+ return 0;
+ if (TheLine->InPPDirective &&
+ (!I[1]->InPPDirective || I[1]->First->HasUnescapedNewline))
+ return 0;
+
+ if (Style.ColumnLimit > 0 && Indent > Style.ColumnLimit)
+ return 0;
+
+ unsigned Limit =
+ Style.ColumnLimit == 0 ? UINT_MAX : Style.ColumnLimit - Indent;
+ // If we already exceed the column limit, we set 'Limit' to 0. The different
+ // tryMerge..() functions can then decide whether to still do merging.
+ Limit = TheLine->Last->TotalLength > Limit
+ ? 0
+ : Limit - TheLine->Last->TotalLength;
+
+ // FIXME: TheLine->Level != 0 might or might not be the right check to do.
+ // If necessary, change to something smarter.
+ bool MergeShortFunctions =
+ Style.AllowShortFunctionsOnASingleLine == FormatStyle::SFS_All ||
+ (Style.AllowShortFunctionsOnASingleLine >= FormatStyle::SFS_Empty &&
+ I[1]->First->is(tok::r_brace)) ||
+ (Style.AllowShortFunctionsOnASingleLine == FormatStyle::SFS_Inline &&
+ TheLine->Level != 0);
+
+ if (TheLine->Last->is(TT_FunctionLBrace) &&
+ TheLine->First != TheLine->Last) {
+ return MergeShortFunctions ? tryMergeSimpleBlock(I, E, Limit) : 0;
+ }
+ if (TheLine->Last->is(tok::l_brace)) {
+ return !Style.BraceWrapping.AfterFunction
+ ? tryMergeSimpleBlock(I, E, Limit)
+ : 0;
+ }
+ if (I[1]->First->is(TT_FunctionLBrace) &&
+ Style.BraceWrapping.AfterFunction) {
+ if (I[1]->Last->is(TT_LineComment))
+ return 0;
+
+ // Check for Limit <= 2 to account for the " {".
+ if (Limit <= 2 || (Style.ColumnLimit == 0 && containsMustBreak(TheLine)))
+ return 0;
+ Limit -= 2;
+
+ unsigned MergedLines = 0;
+ if (MergeShortFunctions) {
+ MergedLines = tryMergeSimpleBlock(I + 1, E, Limit);
+ // If we managed to merge the block, count the function header, which is
+ // on a separate line.
+ if (MergedLines > 0)
+ ++MergedLines;
+ }
+ return MergedLines;
+ }
+ if (TheLine->First->is(tok::kw_if)) {
+ return Style.AllowShortIfStatementsOnASingleLine
+ ? tryMergeSimpleControlStatement(I, E, Limit)
+ : 0;
+ }
+ if (TheLine->First->isOneOf(tok::kw_for, tok::kw_while)) {
+ return Style.AllowShortLoopsOnASingleLine
+ ? tryMergeSimpleControlStatement(I, E, Limit)
+ : 0;
+ }
+ if (TheLine->First->isOneOf(tok::kw_case, tok::kw_default)) {
+ return Style.AllowShortCaseLabelsOnASingleLine
+ ? tryMergeShortCaseLabels(I, E, Limit)
+ : 0;
+ }
+ if (TheLine->InPPDirective &&
+ (TheLine->First->HasUnescapedNewline || TheLine->First->IsFirst)) {
+ return tryMergeSimplePPDirective(I, E, Limit);
+ }
+ return 0;
+ }
+
+ unsigned
+ tryMergeSimplePPDirective(SmallVectorImpl<AnnotatedLine *>::const_iterator I,
+ SmallVectorImpl<AnnotatedLine *>::const_iterator E,
+ unsigned Limit) {
+ if (Limit == 0)
+ return 0;
+ if (I + 2 != E && I[2]->InPPDirective && !I[2]->First->HasUnescapedNewline)
+ return 0;
+ if (1 + I[1]->Last->TotalLength > Limit)
+ return 0;
+ return 1;
+ }
+
+ unsigned tryMergeSimpleControlStatement(
+ SmallVectorImpl<AnnotatedLine *>::const_iterator I,
+ SmallVectorImpl<AnnotatedLine *>::const_iterator E, unsigned Limit) {
+ if (Limit == 0)
+ return 0;
+ if (Style.BraceWrapping.AfterControlStatement &&
+ (I[1]->First->is(tok::l_brace) && !Style.AllowShortBlocksOnASingleLine))
+ return 0;
+ if (I[1]->InPPDirective != (*I)->InPPDirective ||
+ (I[1]->InPPDirective && I[1]->First->HasUnescapedNewline))
+ return 0;
+ Limit = limitConsideringMacros(I + 1, E, Limit);
+ AnnotatedLine &Line = **I;
+ if (Line.Last->isNot(tok::r_paren))
+ return 0;
+ if (1 + I[1]->Last->TotalLength > Limit)
+ return 0;
+ if (I[1]->First->isOneOf(tok::semi, tok::kw_if, tok::kw_for, tok::kw_while,
+ TT_LineComment))
+ return 0;
+ // Only inline simple if's (no nested if or else).
+ if (I + 2 != E && Line.startsWith(tok::kw_if) &&
+ I[2]->First->is(tok::kw_else))
+ return 0;
+ return 1;
+ }
+
+ unsigned
+ tryMergeShortCaseLabels(SmallVectorImpl<AnnotatedLine *>::const_iterator I,
+ SmallVectorImpl<AnnotatedLine *>::const_iterator E,
+ unsigned Limit) {
+ if (Limit == 0 || I + 1 == E ||
+ I[1]->First->isOneOf(tok::kw_case, tok::kw_default))
+ return 0;
+ unsigned NumStmts = 0;
+ unsigned Length = 0;
+ bool InPPDirective = I[0]->InPPDirective;
+ for (; NumStmts < 3; ++NumStmts) {
+ if (I + 1 + NumStmts == E)
+ break;
+ const AnnotatedLine *Line = I[1 + NumStmts];
+ if (Line->InPPDirective != InPPDirective)
+ break;
+ if (Line->First->isOneOf(tok::kw_case, tok::kw_default, tok::r_brace))
+ break;
+ if (Line->First->isOneOf(tok::kw_if, tok::kw_for, tok::kw_switch,
+ tok::kw_while, tok::comment) ||
+ Line->Last->is(tok::comment))
+ return 0;
+ Length += I[1 + NumStmts]->Last->TotalLength + 1; // 1 for the space.
+ }
+ if (NumStmts == 0 || NumStmts == 3 || Length > Limit)
+ return 0;
+ return NumStmts;
+ }
+
+ unsigned
+ tryMergeSimpleBlock(SmallVectorImpl<AnnotatedLine *>::const_iterator I,
+ SmallVectorImpl<AnnotatedLine *>::const_iterator E,
+ unsigned Limit) {
+ AnnotatedLine &Line = **I;
+
+ // Don't merge ObjC @ keywords and methods.
+ // FIXME: If an option to allow short exception handling clauses on a single
+ // line is added, change this to not return for @try and friends.
+ if (Style.Language != FormatStyle::LK_Java &&
+ Line.First->isOneOf(tok::at, tok::minus, tok::plus))
+ return 0;
+
+ // Check that the current line allows merging. This depends on whether we
+ // are in a control flow statements as well as several style flags.
+ if (Line.First->isOneOf(tok::kw_else, tok::kw_case) ||
+ (Line.First->Next && Line.First->Next->is(tok::kw_else)))
+ return 0;
+ if (Line.First->isOneOf(tok::kw_if, tok::kw_while, tok::kw_do, tok::kw_try,
+ tok::kw___try, tok::kw_catch, tok::kw___finally,
+ tok::kw_for, tok::r_brace, Keywords.kw___except)) {
+ if (!Style.AllowShortBlocksOnASingleLine)
+ return 0;
+ if (!Style.AllowShortIfStatementsOnASingleLine &&
+ Line.startsWith(tok::kw_if))
+ return 0;
+ if (!Style.AllowShortLoopsOnASingleLine &&
+ Line.First->isOneOf(tok::kw_while, tok::kw_do, tok::kw_for))
+ return 0;
+ // FIXME: Consider an option to allow short exception handling clauses on
+ // a single line.
+ // FIXME: This isn't covered by tests.
+ // FIXME: For catch, __except, __finally the first token on the line
+ // is '}', so this isn't correct here.
+ if (Line.First->isOneOf(tok::kw_try, tok::kw___try, tok::kw_catch,
+ Keywords.kw___except, tok::kw___finally))
+ return 0;
+ }
+
+ FormatToken *Tok = I[1]->First;
+ if (Tok->is(tok::r_brace) && !Tok->MustBreakBefore &&
+ (Tok->getNextNonComment() == nullptr ||
+ Tok->getNextNonComment()->is(tok::semi))) {
+ // We merge empty blocks even if the line exceeds the column limit.
+ Tok->SpacesRequiredBefore = 0;
+ Tok->CanBreakBefore = true;
+ return 1;
+ } else if (Limit != 0 && !Line.startsWith(tok::kw_namespace) &&
+ !startsExternCBlock(Line)) {
+ // We don't merge short records.
+ if (Line.First->isOneOf(tok::kw_class, tok::kw_union, tok::kw_struct,
+ Keywords.kw_interface))
+ return 0;
+
+ // Check that we still have three lines and they fit into the limit.
+ if (I + 2 == E || I[2]->Type == LT_Invalid)
+ return 0;
+ Limit = limitConsideringMacros(I + 2, E, Limit);
+
+ if (!nextTwoLinesFitInto(I, Limit))
+ return 0;
+
+ // Second, check that the next line does not contain any braces - if it
+ // does, readability declines when putting it into a single line.
+ if (I[1]->Last->is(TT_LineComment))
+ return 0;
+ do {
+ if (Tok->is(tok::l_brace) && Tok->BlockKind != BK_BracedInit)
+ return 0;
+ Tok = Tok->Next;
+ } while (Tok);
+
+ // Last, check that the third line starts with a closing brace.
+ Tok = I[2]->First;
+ if (Tok->isNot(tok::r_brace))
+ return 0;
+
+ // Don't merge "if (a) { .. } else {".
+ if (Tok->Next && Tok->Next->is(tok::kw_else))
+ return 0;
+
+ return 2;
+ }
+ return 0;
+ }
+
+ /// Returns the modified column limit for \p I if it is inside a macro and
+ /// needs a trailing '\'.
+ unsigned
+ limitConsideringMacros(SmallVectorImpl<AnnotatedLine *>::const_iterator I,
+ SmallVectorImpl<AnnotatedLine *>::const_iterator E,
+ unsigned Limit) {
+ if (I[0]->InPPDirective && I + 1 != E &&
+ !I[1]->First->HasUnescapedNewline && !I[1]->First->is(tok::eof)) {
+ return Limit < 2 ? 0 : Limit - 2;
+ }
+ return Limit;
+ }
+
+ bool nextTwoLinesFitInto(SmallVectorImpl<AnnotatedLine *>::const_iterator I,
+ unsigned Limit) {
+ if (I[1]->First->MustBreakBefore || I[2]->First->MustBreakBefore)
+ return false;
+ return 1 + I[1]->Last->TotalLength + 1 + I[2]->Last->TotalLength <= Limit;
+ }
+
+ bool containsMustBreak(const AnnotatedLine *Line) {
+ for (const FormatToken *Tok = Line->First; Tok; Tok = Tok->Next) {
+ if (Tok->MustBreakBefore)
+ return true;
+ }
+ return false;
+ }
+
+ void join(AnnotatedLine &A, const AnnotatedLine &B) {
+ assert(!A.Last->Next);
+ assert(!B.First->Previous);
+ if (B.Affected)
+ A.Affected = true;
+ A.Last->Next = B.First;
+ B.First->Previous = A.Last;
+ B.First->CanBreakBefore = true;
+ unsigned LengthA = A.Last->TotalLength + B.First->SpacesRequiredBefore;
+ for (FormatToken *Tok = B.First; Tok; Tok = Tok->Next) {
+ Tok->TotalLength += LengthA;
+ A.Last = Tok;
+ }
+ }
+
+ const FormatStyle &Style;
+ const AdditionalKeywords &Keywords;
+ const SmallVectorImpl<AnnotatedLine *>::const_iterator End;
+
+ SmallVectorImpl<AnnotatedLine *>::const_iterator Next;
+};
+
+static void markFinalized(FormatToken *Tok) {
+ for (; Tok; Tok = Tok->Next) {
+ Tok->Finalized = true;
+ for (AnnotatedLine *Child : Tok->Children)
+ markFinalized(Child->First);
+ }
+}
+
+#ifndef NDEBUG
+static void printLineState(const LineState &State) {
+ llvm::dbgs() << "State: ";
+ for (const ParenState &P : State.Stack) {
+ llvm::dbgs() << P.Indent << "|" << P.LastSpace << "|" << P.NestedBlockIndent
+ << " ";
+ }
+ llvm::dbgs() << State.NextToken->TokenText << "\n";
+}
+#endif
+
+/// \brief Base class for classes that format one \c AnnotatedLine.
+class LineFormatter {
+public:
+ LineFormatter(ContinuationIndenter *Indenter, WhitespaceManager *Whitespaces,
+ const FormatStyle &Style,
+ UnwrappedLineFormatter *BlockFormatter)
+ : Indenter(Indenter), Whitespaces(Whitespaces), Style(Style),
+ BlockFormatter(BlockFormatter) {}
+ virtual ~LineFormatter() {}
+
+ /// \brief Formats an \c AnnotatedLine and returns the penalty.
+ ///
+ /// If \p DryRun is \c false, directly applies the changes.
+ virtual unsigned formatLine(const AnnotatedLine &Line, unsigned FirstIndent,
+ bool DryRun) = 0;
+
+protected:
+ /// \brief If the \p State's next token is an r_brace closing a nested block,
+ /// format the nested block before it.
+ ///
+ /// Returns \c true if all children could be placed successfully and adapts
+ /// \p Penalty as well as \p State. If \p DryRun is false, also directly
+ /// creates changes using \c Whitespaces.
+ ///
+ /// The crucial idea here is that children always get formatted upon
+ /// encountering the closing brace right after the nested block. Now, if we
+ /// are currently trying to keep the "}" on the same line (i.e. \p NewLine is
+ /// \c false), the entire block has to be kept on the same line (which is only
+ /// possible if it fits on the line, only contains a single statement, etc.
+ ///
+ /// If \p NewLine is true, we format the nested block on separate lines, i.e.
+ /// break after the "{", format all lines with correct indentation and the put
+ /// the closing "}" on yet another new line.
+ ///
+ /// This enables us to keep the simple structure of the
+ /// \c UnwrappedLineFormatter, where we only have two options for each token:
+ /// break or don't break.
+ bool formatChildren(LineState &State, bool NewLine, bool DryRun,
+ unsigned &Penalty) {
+ const FormatToken *LBrace = State.NextToken->getPreviousNonComment();
+ FormatToken &Previous = *State.NextToken->Previous;
+ if (!LBrace || LBrace->isNot(tok::l_brace) ||
+ LBrace->BlockKind != BK_Block || Previous.Children.size() == 0)
+ // The previous token does not open a block. Nothing to do. We don't
+ // assert so that we can simply call this function for all tokens.
+ return true;
+
+ if (NewLine) {
+ int AdditionalIndent = State.Stack.back().Indent -
+ Previous.Children[0]->Level * Style.IndentWidth;
+
+ Penalty +=
+ BlockFormatter->format(Previous.Children, DryRun, AdditionalIndent,
+ /*FixBadIndentation=*/true);
+ return true;
+ }
+
+ if (Previous.Children[0]->First->MustBreakBefore)
+ return false;
+
+ // Cannot merge multiple statements into a single line.
+ if (Previous.Children.size() > 1)
+ return false;
+
+ // Cannot merge into one line if this line ends on a comment.
+ if (Previous.is(tok::comment))
+ return false;
+
+ // We can't put the closing "}" on a line with a trailing comment.
+ if (Previous.Children[0]->Last->isTrailingComment())
+ return false;
+
+ // If the child line exceeds the column limit, we wouldn't want to merge it.
+ // We add +2 for the trailing " }".
+ if (Style.ColumnLimit > 0 &&
+ Previous.Children[0]->Last->TotalLength + State.Column + 2 >
+ Style.ColumnLimit)
+ return false;
+
+ if (!DryRun) {
+ Whitespaces->replaceWhitespace(
+ *Previous.Children[0]->First,
+ /*Newlines=*/0, /*IndentLevel=*/0, /*Spaces=*/1,
+ /*StartOfTokenColumn=*/State.Column, State.Line->InPPDirective);
+ }
+ Penalty += formatLine(*Previous.Children[0], State.Column + 1, DryRun);
+
+ State.Column += 1 + Previous.Children[0]->Last->TotalLength;
+ return true;
+ }
+
+ ContinuationIndenter *Indenter;
+
+private:
+ WhitespaceManager *Whitespaces;
+ const FormatStyle &Style;
+ UnwrappedLineFormatter *BlockFormatter;
+};
+
+/// \brief Formatter that keeps the existing line breaks.
+class NoColumnLimitLineFormatter : public LineFormatter {
+public:
+ NoColumnLimitLineFormatter(ContinuationIndenter *Indenter,
+ WhitespaceManager *Whitespaces,
+ const FormatStyle &Style,
+ UnwrappedLineFormatter *BlockFormatter)
+ : LineFormatter(Indenter, Whitespaces, Style, BlockFormatter) {}
+
+ /// \brief Formats the line, simply keeping all of the input's line breaking
+ /// decisions.
+ unsigned formatLine(const AnnotatedLine &Line, unsigned FirstIndent,
+ bool DryRun) override {
+ assert(!DryRun);
+ LineState State =
+ Indenter->getInitialState(FirstIndent, &Line, /*DryRun=*/false);
+ while (State.NextToken) {
+ bool Newline =
+ Indenter->mustBreak(State) ||
+ (Indenter->canBreak(State) && State.NextToken->NewlinesBefore > 0);
+ unsigned Penalty = 0;
+ formatChildren(State, Newline, /*DryRun=*/false, Penalty);
+ Indenter->addTokenToState(State, Newline, /*DryRun=*/false);
+ }
+ return 0;
+ }
+};
+
+/// \brief Formatter that puts all tokens into a single line without breaks.
+class NoLineBreakFormatter : public LineFormatter {
+public:
+ NoLineBreakFormatter(ContinuationIndenter *Indenter,
+ WhitespaceManager *Whitespaces, const FormatStyle &Style,
+ UnwrappedLineFormatter *BlockFormatter)
+ : LineFormatter(Indenter, Whitespaces, Style, BlockFormatter) {}
+
+ /// \brief Puts all tokens into a single line.
+ unsigned formatLine(const AnnotatedLine &Line, unsigned FirstIndent,
+ bool DryRun) override {
+ unsigned Penalty = 0;
+ LineState State = Indenter->getInitialState(FirstIndent, &Line, DryRun);
+ while (State.NextToken) {
+ formatChildren(State, /*Newline=*/false, DryRun, Penalty);
+ Indenter->addTokenToState(State, /*Newline=*/false, DryRun);
+ }
+ return Penalty;
+ }
+};
+
+/// \brief Finds the best way to break lines.
+class OptimizingLineFormatter : public LineFormatter {
+public:
+ OptimizingLineFormatter(ContinuationIndenter *Indenter,
+ WhitespaceManager *Whitespaces,
+ const FormatStyle &Style,
+ UnwrappedLineFormatter *BlockFormatter)
+ : LineFormatter(Indenter, Whitespaces, Style, BlockFormatter) {}
+
+ /// \brief Formats the line by finding the best line breaks with line lengths
+ /// below the column limit.
+ unsigned formatLine(const AnnotatedLine &Line, unsigned FirstIndent,
+ bool DryRun) override {
+ LineState State = Indenter->getInitialState(FirstIndent, &Line, DryRun);
+
+ // If the ObjC method declaration does not fit on a line, we should format
+ // it with one arg per line.
+ if (State.Line->Type == LT_ObjCMethodDecl)
+ State.Stack.back().BreakBeforeParameter = true;
+
+ // Find best solution in solution space.
+ return analyzeSolutionSpace(State, DryRun);
+ }
+
+private:
+ struct CompareLineStatePointers {
+ bool operator()(LineState *obj1, LineState *obj2) const {
+ return *obj1 < *obj2;
+ }
+ };
+
+ /// \brief A pair of <penalty, count> that is used to prioritize the BFS on.
+ ///
+ /// In case of equal penalties, we want to prefer states that were inserted
+ /// first. During state generation we make sure that we insert states first
+ /// that break the line as late as possible.
+ typedef std::pair<unsigned, unsigned> OrderedPenalty;
+
+ /// \brief An edge in the solution space from \c Previous->State to \c State,
+ /// inserting a newline dependent on the \c NewLine.
+ struct StateNode {
+ StateNode(const LineState &State, bool NewLine, StateNode *Previous)
+ : State(State), NewLine(NewLine), Previous(Previous) {}
+ LineState State;
+ bool NewLine;
+ StateNode *Previous;
+ };
+
+ /// \brief An item in the prioritized BFS search queue. The \c StateNode's
+ /// \c State has the given \c OrderedPenalty.
+ typedef std::pair<OrderedPenalty, StateNode *> QueueItem;
+
+ /// \brief The BFS queue type.
+ typedef std::priority_queue<QueueItem, std::vector<QueueItem>,
+ std::greater<QueueItem>> QueueType;
+
+ /// \brief Analyze the entire solution space starting from \p InitialState.
+ ///
+ /// This implements a variant of Dijkstra's algorithm on the graph that spans
+ /// the solution space (\c LineStates are the nodes). The algorithm tries to
+ /// find the shortest path (the one with lowest penalty) from \p InitialState
+ /// to a state where all tokens are placed. Returns the penalty.
+ ///
+ /// If \p DryRun is \c false, directly applies the changes.
+ unsigned analyzeSolutionSpace(LineState &InitialState, bool DryRun) {
+ std::set<LineState *, CompareLineStatePointers> Seen;
+
+ // Increasing count of \c StateNode items we have created. This is used to
+ // create a deterministic order independent of the container.
+ unsigned Count = 0;
+ QueueType Queue;
+
+ // Insert start element into queue.
+ StateNode *Node =
+ new (Allocator.Allocate()) StateNode(InitialState, false, nullptr);
+ Queue.push(QueueItem(OrderedPenalty(0, Count), Node));
+ ++Count;
+
+ unsigned Penalty = 0;
+
+ // While not empty, take first element and follow edges.
+ while (!Queue.empty()) {
+ Penalty = Queue.top().first.first;
+ StateNode *Node = Queue.top().second;
+ if (!Node->State.NextToken) {
+ DEBUG(llvm::dbgs() << "\n---\nPenalty for line: " << Penalty << "\n");
+ break;
+ }
+ Queue.pop();
+
+ // Cut off the analysis of certain solutions if the analysis gets too
+ // complex. See description of IgnoreStackForComparison.
+ if (Count > 50000)
+ Node->State.IgnoreStackForComparison = true;
+
+ if (!Seen.insert(&Node->State).second)
+ // State already examined with lower penalty.
+ continue;
+
+ FormatDecision LastFormat = Node->State.NextToken->Decision;
+ if (LastFormat == FD_Unformatted || LastFormat == FD_Continue)
+ addNextStateToQueue(Penalty, Node, /*NewLine=*/false, &Count, &Queue);
+ if (LastFormat == FD_Unformatted || LastFormat == FD_Break)
+ addNextStateToQueue(Penalty, Node, /*NewLine=*/true, &Count, &Queue);
+ }
+
+ if (Queue.empty()) {
+ // We were unable to find a solution, do nothing.
+ // FIXME: Add diagnostic?
+ DEBUG(llvm::dbgs() << "Could not find a solution.\n");
+ return 0;
+ }
+
+ // Reconstruct the solution.
+ if (!DryRun)
+ reconstructPath(InitialState, Queue.top().second);
+
+ DEBUG(llvm::dbgs() << "Total number of analyzed states: " << Count << "\n");
+ DEBUG(llvm::dbgs() << "---\n");
+
+ return Penalty;
+ }
+
+ /// \brief Add the following state to the analysis queue \c Queue.
+ ///
+ /// Assume the current state is \p PreviousNode and has been reached with a
+ /// penalty of \p Penalty. Insert a line break if \p NewLine is \c true.
+ void addNextStateToQueue(unsigned Penalty, StateNode *PreviousNode,
+ bool NewLine, unsigned *Count, QueueType *Queue) {
+ if (NewLine && !Indenter->canBreak(PreviousNode->State))
+ return;
+ if (!NewLine && Indenter->mustBreak(PreviousNode->State))
+ return;
+
+ StateNode *Node = new (Allocator.Allocate())
+ StateNode(PreviousNode->State, NewLine, PreviousNode);
+ if (!formatChildren(Node->State, NewLine, /*DryRun=*/true, Penalty))
+ return;
+
+ Penalty += Indenter->addTokenToState(Node->State, NewLine, true);
+
+ Queue->push(QueueItem(OrderedPenalty(Penalty, *Count), Node));
+ ++(*Count);
+ }
+
+ /// \brief Applies the best formatting by reconstructing the path in the
+ /// solution space that leads to \c Best.
+ void reconstructPath(LineState &State, StateNode *Best) {
+ std::deque<StateNode *> Path;
+ // We do not need a break before the initial token.
+ while (Best->Previous) {
+ Path.push_front(Best);
+ Best = Best->Previous;
+ }
+ for (std::deque<StateNode *>::iterator I = Path.begin(), E = Path.end();
+ I != E; ++I) {
+ unsigned Penalty = 0;
+ formatChildren(State, (*I)->NewLine, /*DryRun=*/false, Penalty);
+ Penalty += Indenter->addTokenToState(State, (*I)->NewLine, false);
+
+ DEBUG({
+ printLineState((*I)->Previous->State);
+ if ((*I)->NewLine) {
+ llvm::dbgs() << "Penalty for placing "
+ << (*I)->Previous->State.NextToken->Tok.getName() << ": "
+ << Penalty << "\n";
+ }
+ });
+ }
+ }
+
+ llvm::SpecificBumpPtrAllocator<StateNode> Allocator;
+};
+
+} // anonymous namespace
+
+unsigned
+UnwrappedLineFormatter::format(const SmallVectorImpl<AnnotatedLine *> &Lines,
+ bool DryRun, int AdditionalIndent,
+ bool FixBadIndentation) {
+ LineJoiner Joiner(Style, Keywords, Lines);
+
+ // Try to look up already computed penalty in DryRun-mode.
+ std::pair<const SmallVectorImpl<AnnotatedLine *> *, unsigned> CacheKey(
+ &Lines, AdditionalIndent);
+ auto CacheIt = PenaltyCache.find(CacheKey);
+ if (DryRun && CacheIt != PenaltyCache.end())
+ return CacheIt->second;
+
+ assert(!Lines.empty());
+ unsigned Penalty = 0;
+ LevelIndentTracker IndentTracker(Style, Keywords, Lines[0]->Level,
+ AdditionalIndent);
+ const AnnotatedLine *PreviousLine = nullptr;
+ const AnnotatedLine *NextLine = nullptr;
+
+ // The minimum level of consecutive lines that have been formatted.
+ unsigned RangeMinLevel = UINT_MAX;
+
+ for (const AnnotatedLine *Line =
+ Joiner.getNextMergedLine(DryRun, IndentTracker);
+ Line; Line = NextLine) {
+ const AnnotatedLine &TheLine = *Line;
+ unsigned Indent = IndentTracker.getIndent();
+
+ // We continue formatting unchanged lines to adjust their indent, e.g. if a
+ // scope was added. However, we need to carefully stop doing this when we
+ // exit the scope of affected lines to prevent indenting a the entire
+ // remaining file if it currently missing a closing brace.
+ bool ContinueFormatting =
+ TheLine.Level > RangeMinLevel ||
+ (TheLine.Level == RangeMinLevel && !TheLine.startsWith(tok::r_brace));
+
+ bool FixIndentation = (FixBadIndentation || ContinueFormatting) &&
+ Indent != TheLine.First->OriginalColumn;
+ bool ShouldFormat = TheLine.Affected || FixIndentation;
+ // We cannot format this line; if the reason is that the line had a
+ // parsing error, remember that.
+ if (ShouldFormat && TheLine.Type == LT_Invalid && IncompleteFormat)
+ *IncompleteFormat = true;
+
+ if (ShouldFormat && TheLine.Type != LT_Invalid) {
+ if (!DryRun)
+ formatFirstToken(*TheLine.First, PreviousLine, TheLine.Level, Indent,
+ TheLine.InPPDirective);
+
+ NextLine = Joiner.getNextMergedLine(DryRun, IndentTracker);
+ unsigned ColumnLimit = getColumnLimit(TheLine.InPPDirective, NextLine);
+ bool FitsIntoOneLine =
+ TheLine.Last->TotalLength + Indent <= ColumnLimit ||
+ TheLine.Type == LT_ImportStatement;
+
+ if (Style.ColumnLimit == 0)
+ NoColumnLimitLineFormatter(Indenter, Whitespaces, Style, this)
+ .formatLine(TheLine, Indent, DryRun);
+ else if (FitsIntoOneLine)
+ Penalty += NoLineBreakFormatter(Indenter, Whitespaces, Style, this)
+ .formatLine(TheLine, Indent, DryRun);
+ else
+ Penalty += OptimizingLineFormatter(Indenter, Whitespaces, Style, this)
+ .formatLine(TheLine, Indent, DryRun);
+ RangeMinLevel = std::min(RangeMinLevel, TheLine.Level);
+ } else {
+ // If no token in the current line is affected, we still need to format
+ // affected children.
+ if (TheLine.ChildrenAffected)
+ format(TheLine.Children, DryRun);
+
+ // Adapt following lines on the current indent level to the same level
+ // unless the current \c AnnotatedLine is not at the beginning of a line.
+ bool StartsNewLine =
+ TheLine.First->NewlinesBefore > 0 || TheLine.First->IsFirst;
+ if (StartsNewLine)
+ IndentTracker.adjustToUnmodifiedLine(TheLine);
+ if (!DryRun) {
+ bool ReformatLeadingWhitespace =
+ StartsNewLine && ((PreviousLine && PreviousLine->Affected) ||
+ TheLine.LeadingEmptyLinesAffected);
+ // Format the first token.
+ if (ReformatLeadingWhitespace)
+ formatFirstToken(*TheLine.First, PreviousLine, TheLine.Level,
+ TheLine.First->OriginalColumn,
+ TheLine.InPPDirective);
+ else
+ Whitespaces->addUntouchableToken(*TheLine.First,
+ TheLine.InPPDirective);
+
+ // Notify the WhitespaceManager about the unchanged whitespace.
+ for (FormatToken *Tok = TheLine.First->Next; Tok; Tok = Tok->Next)
+ Whitespaces->addUntouchableToken(*Tok, TheLine.InPPDirective);
+ }
+ NextLine = Joiner.getNextMergedLine(DryRun, IndentTracker);
+ RangeMinLevel = UINT_MAX;
+ }
+ if (!DryRun)
+ markFinalized(TheLine.First);
+ PreviousLine = &TheLine;
+ }
+ PenaltyCache[CacheKey] = Penalty;
+ return Penalty;
+}
+
+void UnwrappedLineFormatter::formatFirstToken(FormatToken &RootToken,
+ const AnnotatedLine *PreviousLine,
+ unsigned IndentLevel,
+ unsigned Indent,
+ bool InPPDirective) {
+ if (RootToken.is(tok::eof)) {
+ unsigned Newlines = std::min(RootToken.NewlinesBefore, 1u);
+ Whitespaces->replaceWhitespace(RootToken, Newlines, /*IndentLevel=*/0,
+ /*Spaces=*/0, /*TargetColumn=*/0);
+ return;
+ }
+ unsigned Newlines =
+ std::min(RootToken.NewlinesBefore, Style.MaxEmptyLinesToKeep + 1);
+ // Remove empty lines before "}" where applicable.
+ if (RootToken.is(tok::r_brace) &&
+ (!RootToken.Next ||
+ (RootToken.Next->is(tok::semi) && !RootToken.Next->Next)))
+ Newlines = std::min(Newlines, 1u);
+ if (Newlines == 0 && !RootToken.IsFirst)
+ Newlines = 1;
+ if (RootToken.IsFirst && !RootToken.HasUnescapedNewline)
+ Newlines = 0;
+
+ // Remove empty lines after "{".
+ if (!Style.KeepEmptyLinesAtTheStartOfBlocks && PreviousLine &&
+ PreviousLine->Last->is(tok::l_brace) &&
+ PreviousLine->First->isNot(tok::kw_namespace) &&
+ !startsExternCBlock(*PreviousLine))
+ Newlines = 1;
+
+ // Insert extra new line before access specifiers.
+ if (PreviousLine && PreviousLine->Last->isOneOf(tok::semi, tok::r_brace) &&
+ RootToken.isAccessSpecifier() && RootToken.NewlinesBefore == 1)
+ ++Newlines;
+
+ // Remove empty lines after access specifiers.
+ if (PreviousLine && PreviousLine->First->isAccessSpecifier() &&
+ (!PreviousLine->InPPDirective || !RootToken.HasUnescapedNewline))
+ Newlines = std::min(1u, Newlines);
+
+ Whitespaces->replaceWhitespace(RootToken, Newlines, IndentLevel, Indent,
+ Indent, InPPDirective &&
+ !RootToken.HasUnescapedNewline);
+}
+
+unsigned
+UnwrappedLineFormatter::getColumnLimit(bool InPPDirective,
+ const AnnotatedLine *NextLine) const {
+ // In preprocessor directives reserve two chars for trailing " \" if the
+ // next line continues the preprocessor directive.
+ bool ContinuesPPDirective =
+ InPPDirective &&
+ // If there is no next line, this is likely a child line and the parent
+ // continues the preprocessor directive.
+ (!NextLine ||
+ (NextLine->InPPDirective &&
+ // If there is an unescaped newline between this line and the next, the
+ // next line starts a new preprocessor directive.
+ !NextLine->First->HasUnescapedNewline));
+ return Style.ColumnLimit - (ContinuesPPDirective ? 2 : 0);
+}
+
+} // namespace format
+} // namespace clang
diff --git a/contrib/llvm/tools/clang/lib/Format/UnwrappedLineFormatter.h b/contrib/llvm/tools/clang/lib/Format/UnwrappedLineFormatter.h
new file mode 100644
index 0000000..478617d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Format/UnwrappedLineFormatter.h
@@ -0,0 +1,73 @@
+//===--- UnwrappedLineFormatter.h - Format C++ code -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Implements a combinartorial exploration of all the different
+/// linebreaks unwrapped lines can be formatted in.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_FORMAT_UNWRAPPEDLINEFORMATTER_H
+#define LLVM_CLANG_LIB_FORMAT_UNWRAPPEDLINEFORMATTER_H
+
+#include "ContinuationIndenter.h"
+#include "clang/Format/Format.h"
+#include <map>
+#include <queue>
+#include <string>
+
+namespace clang {
+namespace format {
+
+class ContinuationIndenter;
+class WhitespaceManager;
+
+class UnwrappedLineFormatter {
+public:
+ UnwrappedLineFormatter(ContinuationIndenter *Indenter,
+ WhitespaceManager *Whitespaces,
+ const FormatStyle &Style,
+ const AdditionalKeywords &Keywords,
+ bool *IncompleteFormat)
+ : Indenter(Indenter), Whitespaces(Whitespaces), Style(Style),
+ Keywords(Keywords), IncompleteFormat(IncompleteFormat) {}
+
+ /// \brief Format the current block and return the penalty.
+ unsigned format(const SmallVectorImpl<AnnotatedLine *> &Lines,
+ bool DryRun = false, int AdditionalIndent = 0,
+ bool FixBadIndentation = false);
+
+private:
+ /// \brief Add a new line and the required indent before the first Token
+ /// of the \c UnwrappedLine if there was no structural parsing error.
+ void formatFirstToken(FormatToken &RootToken,
+ const AnnotatedLine *PreviousLine, unsigned IndentLevel,
+ unsigned Indent, bool InPPDirective);
+
+ /// \brief Returns the column limit for a line, taking into account whether we
+ /// need an escaped newline due to a continued preprocessor directive.
+ unsigned getColumnLimit(bool InPPDirective,
+ const AnnotatedLine *NextLine) const;
+
+ // Cache to store the penalty of formatting a vector of AnnotatedLines
+ // starting from a specific additional offset. Improves performance if there
+ // are many nested blocks.
+ std::map<std::pair<const SmallVectorImpl<AnnotatedLine *> *, unsigned>,
+ unsigned> PenaltyCache;
+
+ ContinuationIndenter *Indenter;
+ WhitespaceManager *Whitespaces;
+ const FormatStyle &Style;
+ const AdditionalKeywords &Keywords;
+ bool *IncompleteFormat;
+};
+} // end namespace format
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_FORMAT_UNWRAPPEDLINEFORMATTER_H
diff --git a/contrib/llvm/tools/clang/lib/Format/UnwrappedLineParser.cpp b/contrib/llvm/tools/clang/lib/Format/UnwrappedLineParser.cpp
new file mode 100644
index 0000000..94b8498
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Format/UnwrappedLineParser.cpp
@@ -0,0 +1,1938 @@
+//===--- UnwrappedLineParser.cpp - Format C++ code ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file contains the implementation of the UnwrappedLineParser,
+/// which turns a stream of tokens into UnwrappedLines.
+///
+//===----------------------------------------------------------------------===//
+
+#include "UnwrappedLineParser.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define DEBUG_TYPE "format-parser"
+
+namespace clang {
+namespace format {
+
+class FormatTokenSource {
+public:
+ virtual ~FormatTokenSource() {}
+ virtual FormatToken *getNextToken() = 0;
+
+ virtual unsigned getPosition() = 0;
+ virtual FormatToken *setPosition(unsigned Position) = 0;
+};
+
+namespace {
+
+class ScopedDeclarationState {
+public:
+ ScopedDeclarationState(UnwrappedLine &Line, std::vector<bool> &Stack,
+ bool MustBeDeclaration)
+ : Line(Line), Stack(Stack) {
+ Line.MustBeDeclaration = MustBeDeclaration;
+ Stack.push_back(MustBeDeclaration);
+ }
+ ~ScopedDeclarationState() {
+ Stack.pop_back();
+ if (!Stack.empty())
+ Line.MustBeDeclaration = Stack.back();
+ else
+ Line.MustBeDeclaration = true;
+ }
+
+private:
+ UnwrappedLine &Line;
+ std::vector<bool> &Stack;
+};
+
+class ScopedMacroState : public FormatTokenSource {
+public:
+ ScopedMacroState(UnwrappedLine &Line, FormatTokenSource *&TokenSource,
+ FormatToken *&ResetToken)
+ : Line(Line), TokenSource(TokenSource), ResetToken(ResetToken),
+ PreviousLineLevel(Line.Level), PreviousTokenSource(TokenSource),
+ Token(nullptr) {
+ TokenSource = this;
+ Line.Level = 0;
+ Line.InPPDirective = true;
+ }
+
+ ~ScopedMacroState() override {
+ TokenSource = PreviousTokenSource;
+ ResetToken = Token;
+ Line.InPPDirective = false;
+ Line.Level = PreviousLineLevel;
+ }
+
+ FormatToken *getNextToken() override {
+ // The \c UnwrappedLineParser guards against this by never calling
+ // \c getNextToken() after it has encountered the first eof token.
+ assert(!eof());
+ Token = PreviousTokenSource->getNextToken();
+ if (eof())
+ return getFakeEOF();
+ return Token;
+ }
+
+ unsigned getPosition() override { return PreviousTokenSource->getPosition(); }
+
+ FormatToken *setPosition(unsigned Position) override {
+ Token = PreviousTokenSource->setPosition(Position);
+ return Token;
+ }
+
+private:
+ bool eof() { return Token && Token->HasUnescapedNewline; }
+
+ FormatToken *getFakeEOF() {
+ static bool EOFInitialized = false;
+ static FormatToken FormatTok;
+ if (!EOFInitialized) {
+ FormatTok.Tok.startToken();
+ FormatTok.Tok.setKind(tok::eof);
+ EOFInitialized = true;
+ }
+ return &FormatTok;
+ }
+
+ UnwrappedLine &Line;
+ FormatTokenSource *&TokenSource;
+ FormatToken *&ResetToken;
+ unsigned PreviousLineLevel;
+ FormatTokenSource *PreviousTokenSource;
+
+ FormatToken *Token;
+};
+
+} // end anonymous namespace
+
+class ScopedLineState {
+public:
+ ScopedLineState(UnwrappedLineParser &Parser,
+ bool SwitchToPreprocessorLines = false)
+ : Parser(Parser), OriginalLines(Parser.CurrentLines) {
+ if (SwitchToPreprocessorLines)
+ Parser.CurrentLines = &Parser.PreprocessorDirectives;
+ else if (!Parser.Line->Tokens.empty())
+ Parser.CurrentLines = &Parser.Line->Tokens.back().Children;
+ PreBlockLine = std::move(Parser.Line);
+ Parser.Line = llvm::make_unique<UnwrappedLine>();
+ Parser.Line->Level = PreBlockLine->Level;
+ Parser.Line->InPPDirective = PreBlockLine->InPPDirective;
+ }
+
+ ~ScopedLineState() {
+ if (!Parser.Line->Tokens.empty()) {
+ Parser.addUnwrappedLine();
+ }
+ assert(Parser.Line->Tokens.empty());
+ Parser.Line = std::move(PreBlockLine);
+ if (Parser.CurrentLines == &Parser.PreprocessorDirectives)
+ Parser.MustBreakBeforeNextToken = true;
+ Parser.CurrentLines = OriginalLines;
+ }
+
+private:
+ UnwrappedLineParser &Parser;
+
+ std::unique_ptr<UnwrappedLine> PreBlockLine;
+ SmallVectorImpl<UnwrappedLine> *OriginalLines;
+};
+
+class CompoundStatementIndenter {
+public:
+ CompoundStatementIndenter(UnwrappedLineParser *Parser,
+ const FormatStyle &Style, unsigned &LineLevel)
+ : LineLevel(LineLevel), OldLineLevel(LineLevel) {
+ if (Style.BraceWrapping.AfterControlStatement)
+ Parser->addUnwrappedLine();
+ if (Style.BraceWrapping.IndentBraces)
+ ++LineLevel;
+ }
+ ~CompoundStatementIndenter() { LineLevel = OldLineLevel; }
+
+private:
+ unsigned &LineLevel;
+ unsigned OldLineLevel;
+};
+
+namespace {
+
+class IndexedTokenSource : public FormatTokenSource {
+public:
+ IndexedTokenSource(ArrayRef<FormatToken *> Tokens)
+ : Tokens(Tokens), Position(-1) {}
+
+ FormatToken *getNextToken() override {
+ ++Position;
+ return Tokens[Position];
+ }
+
+ unsigned getPosition() override {
+ assert(Position >= 0);
+ return Position;
+ }
+
+ FormatToken *setPosition(unsigned P) override {
+ Position = P;
+ return Tokens[Position];
+ }
+
+ void reset() { Position = -1; }
+
+private:
+ ArrayRef<FormatToken *> Tokens;
+ int Position;
+};
+
+} // end anonymous namespace
+
+UnwrappedLineParser::UnwrappedLineParser(const FormatStyle &Style,
+ const AdditionalKeywords &Keywords,
+ ArrayRef<FormatToken *> Tokens,
+ UnwrappedLineConsumer &Callback)
+ : Line(new UnwrappedLine), MustBreakBeforeNextToken(false),
+ CurrentLines(&Lines), Style(Style), Keywords(Keywords), Tokens(nullptr),
+ Callback(Callback), AllTokens(Tokens), PPBranchLevel(-1) {}
+
+void UnwrappedLineParser::reset() {
+ PPBranchLevel = -1;
+ Line.reset(new UnwrappedLine);
+ CommentsBeforeNextToken.clear();
+ FormatTok = nullptr;
+ MustBreakBeforeNextToken = false;
+ PreprocessorDirectives.clear();
+ CurrentLines = &Lines;
+ DeclarationScopeStack.clear();
+ PPStack.clear();
+}
+
+void UnwrappedLineParser::parse() {
+ IndexedTokenSource TokenSource(AllTokens);
+ do {
+ DEBUG(llvm::dbgs() << "----\n");
+ reset();
+ Tokens = &TokenSource;
+ TokenSource.reset();
+
+ readToken();
+ parseFile();
+ // Create line with eof token.
+ pushToken(FormatTok);
+ addUnwrappedLine();
+
+ for (SmallVectorImpl<UnwrappedLine>::iterator I = Lines.begin(),
+ E = Lines.end();
+ I != E; ++I) {
+ Callback.consumeUnwrappedLine(*I);
+ }
+ Callback.finishRun();
+ Lines.clear();
+ while (!PPLevelBranchIndex.empty() &&
+ PPLevelBranchIndex.back() + 1 >= PPLevelBranchCount.back()) {
+ PPLevelBranchIndex.resize(PPLevelBranchIndex.size() - 1);
+ PPLevelBranchCount.resize(PPLevelBranchCount.size() - 1);
+ }
+ if (!PPLevelBranchIndex.empty()) {
+ ++PPLevelBranchIndex.back();
+ assert(PPLevelBranchIndex.size() == PPLevelBranchCount.size());
+ assert(PPLevelBranchIndex.back() <= PPLevelBranchCount.back());
+ }
+ } while (!PPLevelBranchIndex.empty());
+}
+
+void UnwrappedLineParser::parseFile() {
+ // The top-level context in a file always has declarations, except for pre-
+ // processor directives and JavaScript files.
+ bool MustBeDeclaration =
+ !Line->InPPDirective && Style.Language != FormatStyle::LK_JavaScript;
+ ScopedDeclarationState DeclarationState(*Line, DeclarationScopeStack,
+ MustBeDeclaration);
+ parseLevel(/*HasOpeningBrace=*/false);
+ // Make sure to format the remaining tokens.
+ flushComments(true);
+ addUnwrappedLine();
+}
+
+void UnwrappedLineParser::parseLevel(bool HasOpeningBrace) {
+ bool SwitchLabelEncountered = false;
+ do {
+ tok::TokenKind kind = FormatTok->Tok.getKind();
+ if (FormatTok->Type == TT_MacroBlockBegin) {
+ kind = tok::l_brace;
+ } else if (FormatTok->Type == TT_MacroBlockEnd) {
+ kind = tok::r_brace;
+ }
+
+ switch (kind) {
+ case tok::comment:
+ nextToken();
+ addUnwrappedLine();
+ break;
+ case tok::l_brace:
+ // FIXME: Add parameter whether this can happen - if this happens, we must
+ // be in a non-declaration context.
+ if (!FormatTok->is(TT_MacroBlockBegin) && tryToParseBracedList())
+ continue;
+ parseBlock(/*MustBeDeclaration=*/false);
+ addUnwrappedLine();
+ break;
+ case tok::r_brace:
+ if (HasOpeningBrace)
+ return;
+ nextToken();
+ addUnwrappedLine();
+ break;
+ case tok::kw_default:
+ case tok::kw_case:
+ if (!SwitchLabelEncountered &&
+ (Style.IndentCaseLabels || (Line->InPPDirective && Line->Level == 1)))
+ ++Line->Level;
+ SwitchLabelEncountered = true;
+ parseStructuralElement();
+ break;
+ default:
+ parseStructuralElement();
+ break;
+ }
+ } while (!eof());
+}
+
+void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
+ // We'll parse forward through the tokens until we hit
+ // a closing brace or eof - note that getNextToken() will
+ // parse macros, so this will magically work inside macro
+ // definitions, too.
+ unsigned StoredPosition = Tokens->getPosition();
+ FormatToken *Tok = FormatTok;
+ // Keep a stack of positions of lbrace tokens. We will
+ // update information about whether an lbrace starts a
+ // braced init list or a different block during the loop.
+ SmallVector<FormatToken *, 8> LBraceStack;
+ assert(Tok->Tok.is(tok::l_brace));
+ do {
+ // Get next non-comment token.
+ FormatToken *NextTok;
+ unsigned ReadTokens = 0;
+ do {
+ NextTok = Tokens->getNextToken();
+ ++ReadTokens;
+ } while (NextTok->is(tok::comment));
+
+ switch (Tok->Tok.getKind()) {
+ case tok::l_brace:
+ Tok->BlockKind = BK_Unknown;
+ LBraceStack.push_back(Tok);
+ break;
+ case tok::r_brace:
+ if (!LBraceStack.empty()) {
+ if (LBraceStack.back()->BlockKind == BK_Unknown) {
+ bool ProbablyBracedList = false;
+ if (Style.Language == FormatStyle::LK_Proto) {
+ ProbablyBracedList = NextTok->isOneOf(tok::comma, tok::r_square);
+ } else {
+ // Using OriginalColumn to distinguish between ObjC methods and
+ // binary operators is a bit hacky.
+ bool NextIsObjCMethod = NextTok->isOneOf(tok::plus, tok::minus) &&
+ NextTok->OriginalColumn == 0;
+
+ // If there is a comma, semicolon or right paren after the closing
+ // brace, we assume this is a braced initializer list. Note that
+ // regardless how we mark inner braces here, we will overwrite the
+ // BlockKind later if we parse a braced list (where all blocks
+ // inside are by default braced lists), or when we explicitly detect
+ // blocks (for example while parsing lambdas).
+ //
+ // We exclude + and - as they can be ObjC visibility modifiers.
+ ProbablyBracedList =
+ NextTok->isOneOf(tok::comma, tok::period, tok::colon,
+ tok::r_paren, tok::r_square, tok::l_brace,
+ tok::l_square, tok::l_paren, tok::ellipsis) ||
+ (NextTok->is(tok::semi) &&
+ (!ExpectClassBody || LBraceStack.size() != 1)) ||
+ (NextTok->isBinaryOperator() && !NextIsObjCMethod);
+ }
+ if (ProbablyBracedList) {
+ Tok->BlockKind = BK_BracedInit;
+ LBraceStack.back()->BlockKind = BK_BracedInit;
+ } else {
+ Tok->BlockKind = BK_Block;
+ LBraceStack.back()->BlockKind = BK_Block;
+ }
+ }
+ LBraceStack.pop_back();
+ }
+ break;
+ case tok::at:
+ case tok::semi:
+ case tok::kw_if:
+ case tok::kw_while:
+ case tok::kw_for:
+ case tok::kw_switch:
+ case tok::kw_try:
+ case tok::kw___try:
+ if (!LBraceStack.empty())
+ LBraceStack.back()->BlockKind = BK_Block;
+ break;
+ default:
+ break;
+ }
+ Tok = NextTok;
+ } while (Tok->Tok.isNot(tok::eof) && !LBraceStack.empty());
+ // Assume other blocks for all unclosed opening braces.
+ for (unsigned i = 0, e = LBraceStack.size(); i != e; ++i) {
+ if (LBraceStack[i]->BlockKind == BK_Unknown)
+ LBraceStack[i]->BlockKind = BK_Block;
+ }
+
+ FormatTok = Tokens->setPosition(StoredPosition);
+}
+
+void UnwrappedLineParser::parseBlock(bool MustBeDeclaration, bool AddLevel,
+ bool MunchSemi) {
+ assert(FormatTok->isOneOf(tok::l_brace, TT_MacroBlockBegin) &&
+ "'{' or macro block token expected");
+ const bool MacroBlock = FormatTok->is(TT_MacroBlockBegin);
+ FormatTok->BlockKind = BK_Block;
+
+ unsigned InitialLevel = Line->Level;
+ nextToken();
+
+ if (MacroBlock && FormatTok->is(tok::l_paren))
+ parseParens();
+
+ addUnwrappedLine();
+
+ ScopedDeclarationState DeclarationState(*Line, DeclarationScopeStack,
+ MustBeDeclaration);
+ if (AddLevel)
+ ++Line->Level;
+ parseLevel(/*HasOpeningBrace=*/true);
+
+ if (MacroBlock ? !FormatTok->is(TT_MacroBlockEnd)
+ : !FormatTok->is(tok::r_brace)) {
+ Line->Level = InitialLevel;
+ FormatTok->BlockKind = BK_Block;
+ return;
+ }
+
+ nextToken(); // Munch the closing brace.
+
+ if (MacroBlock && FormatTok->is(tok::l_paren))
+ parseParens();
+
+ if (MunchSemi && FormatTok->Tok.is(tok::semi))
+ nextToken();
+ Line->Level = InitialLevel;
+}
+
+static bool isGoogScope(const UnwrappedLine &Line) {
+ // FIXME: Closure-library specific stuff should not be hard-coded but be
+ // configurable.
+ if (Line.Tokens.size() < 4)
+ return false;
+ auto I = Line.Tokens.begin();
+ if (I->Tok->TokenText != "goog")
+ return false;
+ ++I;
+ if (I->Tok->isNot(tok::period))
+ return false;
+ ++I;
+ if (I->Tok->TokenText != "scope")
+ return false;
+ ++I;
+ return I->Tok->is(tok::l_paren);
+}
+
+static bool ShouldBreakBeforeBrace(const FormatStyle &Style,
+ const FormatToken &InitialToken) {
+ if (InitialToken.is(tok::kw_namespace))
+ return Style.BraceWrapping.AfterNamespace;
+ if (InitialToken.is(tok::kw_class))
+ return Style.BraceWrapping.AfterClass;
+ if (InitialToken.is(tok::kw_union))
+ return Style.BraceWrapping.AfterUnion;
+ if (InitialToken.is(tok::kw_struct))
+ return Style.BraceWrapping.AfterStruct;
+ return false;
+}
+
+void UnwrappedLineParser::parseChildBlock() {
+ FormatTok->BlockKind = BK_Block;
+ nextToken();
+ {
+ bool GoogScope =
+ Style.Language == FormatStyle::LK_JavaScript && isGoogScope(*Line);
+ ScopedLineState LineState(*this);
+ ScopedDeclarationState DeclarationState(*Line, DeclarationScopeStack,
+ /*MustBeDeclaration=*/false);
+ Line->Level += GoogScope ? 0 : 1;
+ parseLevel(/*HasOpeningBrace=*/true);
+ flushComments(isOnNewLine(*FormatTok));
+ Line->Level -= GoogScope ? 0 : 1;
+ }
+ nextToken();
+}
+
+void UnwrappedLineParser::parsePPDirective() {
+ assert(FormatTok->Tok.is(tok::hash) && "'#' expected");
+ ScopedMacroState MacroState(*Line, Tokens, FormatTok);
+ nextToken();
+
+ if (!FormatTok->Tok.getIdentifierInfo()) {
+ parsePPUnknown();
+ return;
+ }
+
+ switch (FormatTok->Tok.getIdentifierInfo()->getPPKeywordID()) {
+ case tok::pp_define:
+ parsePPDefine();
+ return;
+ case tok::pp_if:
+ parsePPIf(/*IfDef=*/false);
+ break;
+ case tok::pp_ifdef:
+ case tok::pp_ifndef:
+ parsePPIf(/*IfDef=*/true);
+ break;
+ case tok::pp_else:
+ parsePPElse();
+ break;
+ case tok::pp_elif:
+ parsePPElIf();
+ break;
+ case tok::pp_endif:
+ parsePPEndIf();
+ break;
+ default:
+ parsePPUnknown();
+ break;
+ }
+}
+
+void UnwrappedLineParser::conditionalCompilationCondition(bool Unreachable) {
+ if (Unreachable || (!PPStack.empty() && PPStack.back() == PP_Unreachable))
+ PPStack.push_back(PP_Unreachable);
+ else
+ PPStack.push_back(PP_Conditional);
+}
+
+void UnwrappedLineParser::conditionalCompilationStart(bool Unreachable) {
+ ++PPBranchLevel;
+ assert(PPBranchLevel >= 0 && PPBranchLevel <= (int)PPLevelBranchIndex.size());
+ if (PPBranchLevel == (int)PPLevelBranchIndex.size()) {
+ PPLevelBranchIndex.push_back(0);
+ PPLevelBranchCount.push_back(0);
+ }
+ PPChainBranchIndex.push(0);
+ bool Skip = PPLevelBranchIndex[PPBranchLevel] > 0;
+ conditionalCompilationCondition(Unreachable || Skip);
+}
+
+void UnwrappedLineParser::conditionalCompilationAlternative() {
+ if (!PPStack.empty())
+ PPStack.pop_back();
+ assert(PPBranchLevel < (int)PPLevelBranchIndex.size());
+ if (!PPChainBranchIndex.empty())
+ ++PPChainBranchIndex.top();
+ conditionalCompilationCondition(
+ PPBranchLevel >= 0 && !PPChainBranchIndex.empty() &&
+ PPLevelBranchIndex[PPBranchLevel] != PPChainBranchIndex.top());
+}
+
+void UnwrappedLineParser::conditionalCompilationEnd() {
+ assert(PPBranchLevel < (int)PPLevelBranchIndex.size());
+ if (PPBranchLevel >= 0 && !PPChainBranchIndex.empty()) {
+ if (PPChainBranchIndex.top() + 1 > PPLevelBranchCount[PPBranchLevel]) {
+ PPLevelBranchCount[PPBranchLevel] = PPChainBranchIndex.top() + 1;
+ }
+ }
+ // Guard against #endif's without #if.
+ if (PPBranchLevel > 0)
+ --PPBranchLevel;
+ if (!PPChainBranchIndex.empty())
+ PPChainBranchIndex.pop();
+ if (!PPStack.empty())
+ PPStack.pop_back();
+}
+
+void UnwrappedLineParser::parsePPIf(bool IfDef) {
+ nextToken();
+ bool IsLiteralFalse = (FormatTok->Tok.isLiteral() &&
+ FormatTok->Tok.getLiteralData() != nullptr &&
+ StringRef(FormatTok->Tok.getLiteralData(),
+ FormatTok->Tok.getLength()) == "0") ||
+ FormatTok->Tok.is(tok::kw_false);
+ conditionalCompilationStart(!IfDef && IsLiteralFalse);
+ parsePPUnknown();
+}
+
+void UnwrappedLineParser::parsePPElse() {
+ conditionalCompilationAlternative();
+ parsePPUnknown();
+}
+
+void UnwrappedLineParser::parsePPElIf() { parsePPElse(); }
+
+void UnwrappedLineParser::parsePPEndIf() {
+ conditionalCompilationEnd();
+ parsePPUnknown();
+}
+
+void UnwrappedLineParser::parsePPDefine() {
+ nextToken();
+
+ if (FormatTok->Tok.getKind() != tok::identifier) {
+ parsePPUnknown();
+ return;
+ }
+ nextToken();
+ if (FormatTok->Tok.getKind() == tok::l_paren &&
+ FormatTok->WhitespaceRange.getBegin() ==
+ FormatTok->WhitespaceRange.getEnd()) {
+ parseParens();
+ }
+ addUnwrappedLine();
+ Line->Level = 1;
+
+ // Errors during a preprocessor directive can only affect the layout of the
+ // preprocessor directive, and thus we ignore them. An alternative approach
+ // would be to use the same approach we use on the file level (no
+ // re-indentation if there was a structural error) within the macro
+ // definition.
+ parseFile();
+}
+
+void UnwrappedLineParser::parsePPUnknown() {
+ do {
+ nextToken();
+ } while (!eof());
+ addUnwrappedLine();
+}
+
+// Here we blacklist certain tokens that are not usually the first token in an
+// unwrapped line. This is used in attempt to distinguish macro calls without
+// trailing semicolons from other constructs split to several lines.
+static bool tokenCanStartNewLine(const clang::Token &Tok) {
+ // Semicolon can be a null-statement, l_square can be a start of a macro or
+ // a C++11 attribute, but this doesn't seem to be common.
+ return Tok.isNot(tok::semi) && Tok.isNot(tok::l_brace) &&
+ Tok.isNot(tok::l_square) &&
+ // Tokens that can only be used as binary operators and a part of
+ // overloaded operator names.
+ Tok.isNot(tok::period) && Tok.isNot(tok::periodstar) &&
+ Tok.isNot(tok::arrow) && Tok.isNot(tok::arrowstar) &&
+ Tok.isNot(tok::less) && Tok.isNot(tok::greater) &&
+ Tok.isNot(tok::slash) && Tok.isNot(tok::percent) &&
+ Tok.isNot(tok::lessless) && Tok.isNot(tok::greatergreater) &&
+ Tok.isNot(tok::equal) && Tok.isNot(tok::plusequal) &&
+ Tok.isNot(tok::minusequal) && Tok.isNot(tok::starequal) &&
+ Tok.isNot(tok::slashequal) && Tok.isNot(tok::percentequal) &&
+ Tok.isNot(tok::ampequal) && Tok.isNot(tok::pipeequal) &&
+ Tok.isNot(tok::caretequal) && Tok.isNot(tok::greatergreaterequal) &&
+ Tok.isNot(tok::lesslessequal) &&
+ // Colon is used in labels, base class lists, initializer lists,
+ // range-based for loops, ternary operator, but should never be the
+ // first token in an unwrapped line.
+ Tok.isNot(tok::colon) &&
+ // 'noexcept' is a trailing annotation.
+ Tok.isNot(tok::kw_noexcept);
+}
+
+void UnwrappedLineParser::parseStructuralElement() {
+ assert(!FormatTok->is(tok::l_brace));
+ if (Style.Language == FormatStyle::LK_TableGen &&
+ FormatTok->is(tok::pp_include)) {
+ nextToken();
+ if (FormatTok->is(tok::string_literal))
+ nextToken();
+ addUnwrappedLine();
+ return;
+ }
+ switch (FormatTok->Tok.getKind()) {
+ case tok::at:
+ nextToken();
+ if (FormatTok->Tok.is(tok::l_brace)) {
+ parseBracedList();
+ break;
+ }
+ switch (FormatTok->Tok.getObjCKeywordID()) {
+ case tok::objc_public:
+ case tok::objc_protected:
+ case tok::objc_package:
+ case tok::objc_private:
+ return parseAccessSpecifier();
+ case tok::objc_interface:
+ case tok::objc_implementation:
+ return parseObjCInterfaceOrImplementation();
+ case tok::objc_protocol:
+ return parseObjCProtocol();
+ case tok::objc_end:
+ return; // Handled by the caller.
+ case tok::objc_optional:
+ case tok::objc_required:
+ nextToken();
+ addUnwrappedLine();
+ return;
+ case tok::objc_autoreleasepool:
+ nextToken();
+ if (FormatTok->Tok.is(tok::l_brace)) {
+ if (Style.BraceWrapping.AfterObjCDeclaration)
+ addUnwrappedLine();
+ parseBlock(/*MustBeDeclaration=*/false);
+ }
+ addUnwrappedLine();
+ return;
+ case tok::objc_try:
+ // This branch isn't strictly necessary (the kw_try case below would
+ // do this too after the tok::at is parsed above). But be explicit.
+ parseTryCatch();
+ return;
+ default:
+ break;
+ }
+ break;
+ case tok::kw_asm:
+ nextToken();
+ if (FormatTok->is(tok::l_brace)) {
+ FormatTok->Type = TT_InlineASMBrace;
+ nextToken();
+ while (FormatTok && FormatTok->isNot(tok::eof)) {
+ if (FormatTok->is(tok::r_brace)) {
+ FormatTok->Type = TT_InlineASMBrace;
+ nextToken();
+ addUnwrappedLine();
+ break;
+ }
+ FormatTok->Finalized = true;
+ nextToken();
+ }
+ }
+ break;
+ case tok::kw_namespace:
+ parseNamespace();
+ return;
+ case tok::kw_inline:
+ nextToken();
+ if (FormatTok->Tok.is(tok::kw_namespace)) {
+ parseNamespace();
+ return;
+ }
+ break;
+ case tok::kw_public:
+ case tok::kw_protected:
+ case tok::kw_private:
+ if (Style.Language == FormatStyle::LK_Java ||
+ Style.Language == FormatStyle::LK_JavaScript)
+ nextToken();
+ else
+ parseAccessSpecifier();
+ return;
+ case tok::kw_if:
+ parseIfThenElse();
+ return;
+ case tok::kw_for:
+ case tok::kw_while:
+ parseForOrWhileLoop();
+ return;
+ case tok::kw_do:
+ parseDoWhile();
+ return;
+ case tok::kw_switch:
+ parseSwitch();
+ return;
+ case tok::kw_default:
+ nextToken();
+ parseLabel();
+ return;
+ case tok::kw_case:
+ parseCaseLabel();
+ return;
+ case tok::kw_try:
+ case tok::kw___try:
+ parseTryCatch();
+ return;
+ case tok::kw_extern:
+ nextToken();
+ if (FormatTok->Tok.is(tok::string_literal)) {
+ nextToken();
+ if (FormatTok->Tok.is(tok::l_brace)) {
+ parseBlock(/*MustBeDeclaration=*/true, /*AddLevel=*/false);
+ addUnwrappedLine();
+ return;
+ }
+ }
+ break;
+ case tok::kw_export:
+ if (Style.Language == FormatStyle::LK_JavaScript) {
+ parseJavaScriptEs6ImportExport();
+ return;
+ }
+ break;
+ case tok::identifier:
+ if (FormatTok->is(TT_ForEachMacro)) {
+ parseForOrWhileLoop();
+ return;
+ }
+ if (FormatTok->is(TT_MacroBlockBegin)) {
+ parseBlock(/*MustBeDeclaration=*/false, /*AddLevel=*/true,
+ /*MunchSemi=*/false);
+ return;
+ }
+ if (Style.Language == FormatStyle::LK_JavaScript &&
+ FormatTok->is(Keywords.kw_import)) {
+ parseJavaScriptEs6ImportExport();
+ return;
+ }
+ if (FormatTok->isOneOf(Keywords.kw_signals, Keywords.kw_qsignals,
+ Keywords.kw_slots, Keywords.kw_qslots)) {
+ nextToken();
+ if (FormatTok->is(tok::colon)) {
+ nextToken();
+ addUnwrappedLine();
+ }
+ return;
+ }
+ // In all other cases, parse the declaration.
+ break;
+ default:
+ break;
+ }
+ do {
+ switch (FormatTok->Tok.getKind()) {
+ case tok::at:
+ nextToken();
+ if (FormatTok->Tok.is(tok::l_brace))
+ parseBracedList();
+ break;
+ case tok::kw_enum:
+ // parseEnum falls through and does not yet add an unwrapped line as an
+ // enum definition can start a structural element.
+ if (!parseEnum())
+ break;
+ // This only applies for C++.
+ if (Style.Language != FormatStyle::LK_Cpp) {
+ addUnwrappedLine();
+ return;
+ }
+ break;
+ case tok::kw_typedef:
+ nextToken();
+ if (FormatTok->isOneOf(Keywords.kw_NS_ENUM, Keywords.kw_NS_OPTIONS,
+ Keywords.kw_CF_ENUM, Keywords.kw_CF_OPTIONS))
+ parseEnum();
+ break;
+ case tok::kw_struct:
+ case tok::kw_union:
+ case tok::kw_class:
+ // parseRecord falls through and does not yet add an unwrapped line as a
+ // record declaration or definition can start a structural element.
+ parseRecord();
+ // This does not apply for Java and JavaScript.
+ if (Style.Language == FormatStyle::LK_Java ||
+ Style.Language == FormatStyle::LK_JavaScript) {
+ addUnwrappedLine();
+ return;
+ }
+ break;
+ case tok::period:
+ nextToken();
+ // In Java, classes have an implicit static member "class".
+ if (Style.Language == FormatStyle::LK_Java && FormatTok &&
+ FormatTok->is(tok::kw_class))
+ nextToken();
+ if (Style.Language == FormatStyle::LK_JavaScript && FormatTok &&
+ FormatTok->Tok.getIdentifierInfo())
+ // JavaScript only has pseudo keywords, all keywords are allowed to
+ // appear in "IdentifierName" positions. See http://es5.github.io/#x7.6
+ nextToken();
+ break;
+ case tok::semi:
+ nextToken();
+ addUnwrappedLine();
+ return;
+ case tok::r_brace:
+ addUnwrappedLine();
+ return;
+ case tok::l_paren:
+ parseParens();
+ break;
+ case tok::kw_operator:
+ nextToken();
+ if (FormatTok->isBinaryOperator())
+ nextToken();
+ break;
+ case tok::caret:
+ nextToken();
+ if (FormatTok->Tok.isAnyIdentifier() ||
+ FormatTok->isSimpleTypeSpecifier())
+ nextToken();
+ if (FormatTok->is(tok::l_paren))
+ parseParens();
+ if (FormatTok->is(tok::l_brace))
+ parseChildBlock();
+ break;
+ case tok::l_brace:
+ if (!tryToParseBracedList()) {
+ // A block outside of parentheses must be the last part of a
+ // structural element.
+ // FIXME: Figure out cases where this is not true, and add projections
+ // for them (the one we know is missing are lambdas).
+ if (Style.BraceWrapping.AfterFunction)
+ addUnwrappedLine();
+ FormatTok->Type = TT_FunctionLBrace;
+ parseBlock(/*MustBeDeclaration=*/false);
+ addUnwrappedLine();
+ return;
+ }
+ // Otherwise this was a braced init list, and the structural
+ // element continues.
+ break;
+ case tok::kw_try:
+ // We arrive here when parsing function-try blocks.
+ parseTryCatch();
+ return;
+ case tok::identifier: {
+ if (FormatTok->is(TT_MacroBlockEnd)) {
+ addUnwrappedLine();
+ return;
+ }
+
+ // Parse function literal unless 'function' is the first token in a line
+ // in which case this should be treated as a free-standing function.
+ if (Style.Language == FormatStyle::LK_JavaScript &&
+ FormatTok->is(Keywords.kw_function) && Line->Tokens.size() > 0) {
+ tryToParseJSFunction();
+ break;
+ }
+ if ((Style.Language == FormatStyle::LK_JavaScript ||
+ Style.Language == FormatStyle::LK_Java) &&
+ FormatTok->is(Keywords.kw_interface)) {
+ parseRecord();
+ addUnwrappedLine();
+ return;
+ }
+
+ StringRef Text = FormatTok->TokenText;
+ nextToken();
+ if (Line->Tokens.size() == 1 &&
+ // JS doesn't have macros, and within classes colons indicate fields,
+ // not labels.
+ Style.Language != FormatStyle::LK_JavaScript) {
+ if (FormatTok->Tok.is(tok::colon) && !Line->MustBeDeclaration) {
+ parseLabel();
+ return;
+ }
+ // Recognize function-like macro usages without trailing semicolon as
+ // well as free-standing macros like Q_OBJECT.
+ bool FunctionLike = FormatTok->is(tok::l_paren);
+ if (FunctionLike)
+ parseParens();
+
+ bool FollowedByNewline =
+ CommentsBeforeNextToken.empty()
+ ? FormatTok->NewlinesBefore > 0
+ : CommentsBeforeNextToken.front()->NewlinesBefore > 0;
+
+ if (FollowedByNewline && (Text.size() >= 5 || FunctionLike) &&
+ tokenCanStartNewLine(FormatTok->Tok) && Text == Text.upper()) {
+ addUnwrappedLine();
+ return;
+ }
+ }
+ break;
+ }
+ case tok::equal:
+ // Fat arrows (=>) have tok::TokenKind tok::equal but TokenType
+ // TT_JsFatArrow. The always start an expression or a child block if
+ // followed by a curly.
+ if (FormatTok->is(TT_JsFatArrow)) {
+ nextToken();
+ if (FormatTok->is(tok::l_brace))
+ parseChildBlock();
+ break;
+ }
+
+ nextToken();
+ if (FormatTok->Tok.is(tok::l_brace)) {
+ parseBracedList();
+ }
+ break;
+ case tok::l_square:
+ parseSquare();
+ break;
+ case tok::kw_new:
+ parseNew();
+ break;
+ default:
+ nextToken();
+ break;
+ }
+ } while (!eof());
+}
+
+bool UnwrappedLineParser::tryToParseLambda() {
+ if (Style.Language != FormatStyle::LK_Cpp) {
+ nextToken();
+ return false;
+ }
+ // FIXME: This is a dirty way to access the previous token. Find a better
+ // solution.
+ if (!Line->Tokens.empty() &&
+ (Line->Tokens.back().Tok->isOneOf(tok::identifier, tok::kw_operator,
+ tok::kw_new, tok::kw_delete) ||
+ Line->Tokens.back().Tok->closesScope() ||
+ Line->Tokens.back().Tok->isSimpleTypeSpecifier())) {
+ nextToken();
+ return false;
+ }
+ assert(FormatTok->is(tok::l_square));
+ FormatToken &LSquare = *FormatTok;
+ if (!tryToParseLambdaIntroducer())
+ return false;
+
+ while (FormatTok->isNot(tok::l_brace)) {
+ if (FormatTok->isSimpleTypeSpecifier()) {
+ nextToken();
+ continue;
+ }
+ switch (FormatTok->Tok.getKind()) {
+ case tok::l_brace:
+ break;
+ case tok::l_paren:
+ parseParens();
+ break;
+ case tok::amp:
+ case tok::star:
+ case tok::kw_const:
+ case tok::comma:
+ case tok::less:
+ case tok::greater:
+ case tok::identifier:
+ case tok::numeric_constant:
+ case tok::coloncolon:
+ case tok::kw_mutable:
+ nextToken();
+ break;
+ case tok::arrow:
+ FormatTok->Type = TT_LambdaArrow;
+ nextToken();
+ break;
+ default:
+ return true;
+ }
+ }
+ LSquare.Type = TT_LambdaLSquare;
+ parseChildBlock();
+ return true;
+}
+
+bool UnwrappedLineParser::tryToParseLambdaIntroducer() {
+ nextToken();
+ if (FormatTok->is(tok::equal)) {
+ nextToken();
+ if (FormatTok->is(tok::r_square)) {
+ nextToken();
+ return true;
+ }
+ if (FormatTok->isNot(tok::comma))
+ return false;
+ nextToken();
+ } else if (FormatTok->is(tok::amp)) {
+ nextToken();
+ if (FormatTok->is(tok::r_square)) {
+ nextToken();
+ return true;
+ }
+ if (!FormatTok->isOneOf(tok::comma, tok::identifier)) {
+ return false;
+ }
+ if (FormatTok->is(tok::comma))
+ nextToken();
+ } else if (FormatTok->is(tok::r_square)) {
+ nextToken();
+ return true;
+ }
+ do {
+ if (FormatTok->is(tok::amp))
+ nextToken();
+ if (!FormatTok->isOneOf(tok::identifier, tok::kw_this))
+ return false;
+ nextToken();
+ if (FormatTok->is(tok::ellipsis))
+ nextToken();
+ if (FormatTok->is(tok::comma)) {
+ nextToken();
+ } else if (FormatTok->is(tok::r_square)) {
+ nextToken();
+ return true;
+ } else {
+ return false;
+ }
+ } while (!eof());
+ return false;
+}
+
+void UnwrappedLineParser::tryToParseJSFunction() {
+ nextToken();
+
+ // Consume function name.
+ if (FormatTok->is(tok::identifier))
+ nextToken();
+
+ if (FormatTok->isNot(tok::l_paren))
+ return;
+
+ // Parse formal parameter list.
+ parseParens();
+
+ if (FormatTok->is(tok::colon)) {
+ // Parse a type definition.
+ nextToken();
+
+ // Eat the type declaration. For braced inline object types, balance braces,
+ // otherwise just parse until finding an l_brace for the function body.
+ if (FormatTok->is(tok::l_brace))
+ tryToParseBracedList();
+ else
+ while (FormatTok->isNot(tok::l_brace) && !eof())
+ nextToken();
+ }
+
+ parseChildBlock();
+}
+
+bool UnwrappedLineParser::tryToParseBracedList() {
+ if (FormatTok->BlockKind == BK_Unknown)
+ calculateBraceTypes();
+ assert(FormatTok->BlockKind != BK_Unknown);
+ if (FormatTok->BlockKind == BK_Block)
+ return false;
+ parseBracedList();
+ return true;
+}
+
+bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons) {
+ bool HasError = false;
+ nextToken();
+
+ // FIXME: Once we have an expression parser in the UnwrappedLineParser,
+ // replace this by using parseAssigmentExpression() inside.
+ do {
+ if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (FormatTok->is(Keywords.kw_function)) {
+ tryToParseJSFunction();
+ continue;
+ }
+ if (FormatTok->is(TT_JsFatArrow)) {
+ nextToken();
+ // Fat arrows can be followed by simple expressions or by child blocks
+ // in curly braces.
+ if (FormatTok->is(tok::l_brace)) {
+ parseChildBlock();
+ continue;
+ }
+ }
+ }
+ switch (FormatTok->Tok.getKind()) {
+ case tok::caret:
+ nextToken();
+ if (FormatTok->is(tok::l_brace)) {
+ parseChildBlock();
+ }
+ break;
+ case tok::l_square:
+ tryToParseLambda();
+ break;
+ case tok::l_brace:
+ // Assume there are no blocks inside a braced init list apart
+ // from the ones we explicitly parse out (like lambdas).
+ FormatTok->BlockKind = BK_BracedInit;
+ parseBracedList();
+ break;
+ case tok::l_paren:
+ parseParens();
+ // JavaScript can just have free standing methods and getters/setters in
+ // object literals. Detect them by a "{" following ")".
+ if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (FormatTok->is(tok::l_brace))
+ parseChildBlock();
+ break;
+ }
+ break;
+ case tok::r_brace:
+ nextToken();
+ return !HasError;
+ case tok::semi:
+ HasError = true;
+ if (!ContinueOnSemicolons)
+ return !HasError;
+ nextToken();
+ break;
+ case tok::comma:
+ nextToken();
+ break;
+ default:
+ nextToken();
+ break;
+ }
+ } while (!eof());
+ return false;
+}
+
+void UnwrappedLineParser::parseParens() {
+ assert(FormatTok->Tok.is(tok::l_paren) && "'(' expected.");
+ nextToken();
+ do {
+ switch (FormatTok->Tok.getKind()) {
+ case tok::l_paren:
+ parseParens();
+ if (Style.Language == FormatStyle::LK_Java && FormatTok->is(tok::l_brace))
+ parseChildBlock();
+ break;
+ case tok::r_paren:
+ nextToken();
+ return;
+ case tok::r_brace:
+ // A "}" inside parenthesis is an error if there wasn't a matching "{".
+ return;
+ case tok::l_square:
+ tryToParseLambda();
+ break;
+ case tok::l_brace:
+ if (!tryToParseBracedList())
+ parseChildBlock();
+ break;
+ case tok::at:
+ nextToken();
+ if (FormatTok->Tok.is(tok::l_brace))
+ parseBracedList();
+ break;
+ case tok::identifier:
+ if (Style.Language == FormatStyle::LK_JavaScript &&
+ FormatTok->is(Keywords.kw_function))
+ tryToParseJSFunction();
+ else
+ nextToken();
+ break;
+ default:
+ nextToken();
+ break;
+ }
+ } while (!eof());
+}
+
+void UnwrappedLineParser::parseSquare() {
+ assert(FormatTok->Tok.is(tok::l_square) && "'[' expected.");
+ if (tryToParseLambda())
+ return;
+ do {
+ switch (FormatTok->Tok.getKind()) {
+ case tok::l_paren:
+ parseParens();
+ break;
+ case tok::r_square:
+ nextToken();
+ return;
+ case tok::r_brace:
+ // A "}" inside parenthesis is an error if there wasn't a matching "{".
+ return;
+ case tok::l_square:
+ parseSquare();
+ break;
+ case tok::l_brace: {
+ if (!tryToParseBracedList())
+ parseChildBlock();
+ break;
+ }
+ case tok::at:
+ nextToken();
+ if (FormatTok->Tok.is(tok::l_brace))
+ parseBracedList();
+ break;
+ default:
+ nextToken();
+ break;
+ }
+ } while (!eof());
+}
+
+void UnwrappedLineParser::parseIfThenElse() {
+ assert(FormatTok->Tok.is(tok::kw_if) && "'if' expected");
+ nextToken();
+ if (FormatTok->Tok.is(tok::l_paren))
+ parseParens();
+ bool NeedsUnwrappedLine = false;
+ if (FormatTok->Tok.is(tok::l_brace)) {
+ CompoundStatementIndenter Indenter(this, Style, Line->Level);
+ parseBlock(/*MustBeDeclaration=*/false);
+ if (Style.BraceWrapping.BeforeElse)
+ addUnwrappedLine();
+ else
+ NeedsUnwrappedLine = true;
+ } else {
+ addUnwrappedLine();
+ ++Line->Level;
+ parseStructuralElement();
+ --Line->Level;
+ }
+ if (FormatTok->Tok.is(tok::kw_else)) {
+ nextToken();
+ if (FormatTok->Tok.is(tok::l_brace)) {
+ CompoundStatementIndenter Indenter(this, Style, Line->Level);
+ parseBlock(/*MustBeDeclaration=*/false);
+ addUnwrappedLine();
+ } else if (FormatTok->Tok.is(tok::kw_if)) {
+ parseIfThenElse();
+ } else {
+ addUnwrappedLine();
+ ++Line->Level;
+ parseStructuralElement();
+ --Line->Level;
+ }
+ } else if (NeedsUnwrappedLine) {
+ addUnwrappedLine();
+ }
+}
+
+void UnwrappedLineParser::parseTryCatch() {
+ assert(FormatTok->isOneOf(tok::kw_try, tok::kw___try) && "'try' expected");
+ nextToken();
+ bool NeedsUnwrappedLine = false;
+ if (FormatTok->is(tok::colon)) {
+ // We are in a function try block, what comes is an initializer list.
+ nextToken();
+ while (FormatTok->is(tok::identifier)) {
+ nextToken();
+ if (FormatTok->is(tok::l_paren))
+ parseParens();
+ if (FormatTok->is(tok::comma))
+ nextToken();
+ }
+ }
+ // Parse try with resource.
+ if (Style.Language == FormatStyle::LK_Java && FormatTok->is(tok::l_paren)) {
+ parseParens();
+ }
+ if (FormatTok->is(tok::l_brace)) {
+ CompoundStatementIndenter Indenter(this, Style, Line->Level);
+ parseBlock(/*MustBeDeclaration=*/false);
+ if (Style.BraceWrapping.BeforeCatch) {
+ addUnwrappedLine();
+ } else {
+ NeedsUnwrappedLine = true;
+ }
+ } else if (!FormatTok->is(tok::kw_catch)) {
+ // The C++ standard requires a compound-statement after a try.
+ // If there's none, we try to assume there's a structuralElement
+ // and try to continue.
+ addUnwrappedLine();
+ ++Line->Level;
+ parseStructuralElement();
+ --Line->Level;
+ }
+ while (1) {
+ if (FormatTok->is(tok::at))
+ nextToken();
+ if (!(FormatTok->isOneOf(tok::kw_catch, Keywords.kw___except,
+ tok::kw___finally) ||
+ ((Style.Language == FormatStyle::LK_Java ||
+ Style.Language == FormatStyle::LK_JavaScript) &&
+ FormatTok->is(Keywords.kw_finally)) ||
+ (FormatTok->Tok.isObjCAtKeyword(tok::objc_catch) ||
+ FormatTok->Tok.isObjCAtKeyword(tok::objc_finally))))
+ break;
+ nextToken();
+ while (FormatTok->isNot(tok::l_brace)) {
+ if (FormatTok->is(tok::l_paren)) {
+ parseParens();
+ continue;
+ }
+ if (FormatTok->isOneOf(tok::semi, tok::r_brace, tok::eof))
+ return;
+ nextToken();
+ }
+ NeedsUnwrappedLine = false;
+ CompoundStatementIndenter Indenter(this, Style, Line->Level);
+ parseBlock(/*MustBeDeclaration=*/false);
+ if (Style.BraceWrapping.BeforeCatch)
+ addUnwrappedLine();
+ else
+ NeedsUnwrappedLine = true;
+ }
+ if (NeedsUnwrappedLine)
+ addUnwrappedLine();
+}
+
+void UnwrappedLineParser::parseNamespace() {
+ assert(FormatTok->Tok.is(tok::kw_namespace) && "'namespace' expected");
+
+ const FormatToken &InitialToken = *FormatTok;
+ nextToken();
+ while (FormatTok->isOneOf(tok::identifier, tok::coloncolon))
+ nextToken();
+ if (FormatTok->Tok.is(tok::l_brace)) {
+ if (ShouldBreakBeforeBrace(Style, InitialToken))
+ addUnwrappedLine();
+
+ bool AddLevel = Style.NamespaceIndentation == FormatStyle::NI_All ||
+ (Style.NamespaceIndentation == FormatStyle::NI_Inner &&
+ DeclarationScopeStack.size() > 1);
+ parseBlock(/*MustBeDeclaration=*/true, AddLevel);
+ // Munch the semicolon after a namespace. This is more common than one would
+ // think. Puttin the semicolon into its own line is very ugly.
+ if (FormatTok->Tok.is(tok::semi))
+ nextToken();
+ addUnwrappedLine();
+ }
+ // FIXME: Add error handling.
+}
+
+void UnwrappedLineParser::parseNew() {
+ assert(FormatTok->is(tok::kw_new) && "'new' expected");
+ nextToken();
+ if (Style.Language != FormatStyle::LK_Java)
+ return;
+
+ // In Java, we can parse everything up to the parens, which aren't optional.
+ do {
+ // There should not be a ;, { or } before the new's open paren.
+ if (FormatTok->isOneOf(tok::semi, tok::l_brace, tok::r_brace))
+ return;
+
+ // Consume the parens.
+ if (FormatTok->is(tok::l_paren)) {
+ parseParens();
+
+ // If there is a class body of an anonymous class, consume that as child.
+ if (FormatTok->is(tok::l_brace))
+ parseChildBlock();
+ return;
+ }
+ nextToken();
+ } while (!eof());
+}
+
+void UnwrappedLineParser::parseForOrWhileLoop() {
+ assert(FormatTok->isOneOf(tok::kw_for, tok::kw_while, TT_ForEachMacro) &&
+ "'for', 'while' or foreach macro expected");
+ nextToken();
+ if (FormatTok->Tok.is(tok::l_paren))
+ parseParens();
+ if (FormatTok->Tok.is(tok::l_brace)) {
+ CompoundStatementIndenter Indenter(this, Style, Line->Level);
+ parseBlock(/*MustBeDeclaration=*/false);
+ addUnwrappedLine();
+ } else {
+ addUnwrappedLine();
+ ++Line->Level;
+ parseStructuralElement();
+ --Line->Level;
+ }
+}
+
+void UnwrappedLineParser::parseDoWhile() {
+ assert(FormatTok->Tok.is(tok::kw_do) && "'do' expected");
+ nextToken();
+ if (FormatTok->Tok.is(tok::l_brace)) {
+ CompoundStatementIndenter Indenter(this, Style, Line->Level);
+ parseBlock(/*MustBeDeclaration=*/false);
+ if (Style.BraceWrapping.IndentBraces)
+ addUnwrappedLine();
+ } else {
+ addUnwrappedLine();
+ ++Line->Level;
+ parseStructuralElement();
+ --Line->Level;
+ }
+
+ // FIXME: Add error handling.
+ if (!FormatTok->Tok.is(tok::kw_while)) {
+ addUnwrappedLine();
+ return;
+ }
+
+ nextToken();
+ parseStructuralElement();
+}
+
+void UnwrappedLineParser::parseLabel() {
+ nextToken();
+ unsigned OldLineLevel = Line->Level;
+ if (Line->Level > 1 || (!Line->InPPDirective && Line->Level > 0))
+ --Line->Level;
+ if (CommentsBeforeNextToken.empty() && FormatTok->Tok.is(tok::l_brace)) {
+ CompoundStatementIndenter Indenter(this, Style, Line->Level);
+ parseBlock(/*MustBeDeclaration=*/false);
+ if (FormatTok->Tok.is(tok::kw_break)) {
+ if (Style.BraceWrapping.AfterControlStatement)
+ addUnwrappedLine();
+ parseStructuralElement();
+ }
+ addUnwrappedLine();
+ } else {
+ if (FormatTok->is(tok::semi))
+ nextToken();
+ addUnwrappedLine();
+ }
+ Line->Level = OldLineLevel;
+}
+
+void UnwrappedLineParser::parseCaseLabel() {
+ assert(FormatTok->Tok.is(tok::kw_case) && "'case' expected");
+ // FIXME: fix handling of complex expressions here.
+ do {
+ nextToken();
+ } while (!eof() && !FormatTok->Tok.is(tok::colon));
+ parseLabel();
+}
+
+void UnwrappedLineParser::parseSwitch() {
+ assert(FormatTok->Tok.is(tok::kw_switch) && "'switch' expected");
+ nextToken();
+ if (FormatTok->Tok.is(tok::l_paren))
+ parseParens();
+ if (FormatTok->Tok.is(tok::l_brace)) {
+ CompoundStatementIndenter Indenter(this, Style, Line->Level);
+ parseBlock(/*MustBeDeclaration=*/false);
+ addUnwrappedLine();
+ } else {
+ addUnwrappedLine();
+ ++Line->Level;
+ parseStructuralElement();
+ --Line->Level;
+ }
+}
+
+void UnwrappedLineParser::parseAccessSpecifier() {
+ nextToken();
+ // Understand Qt's slots.
+ if (FormatTok->isOneOf(Keywords.kw_slots, Keywords.kw_qslots))
+ nextToken();
+ // Otherwise, we don't know what it is, and we'd better keep the next token.
+ if (FormatTok->Tok.is(tok::colon))
+ nextToken();
+ addUnwrappedLine();
+}
+
+bool UnwrappedLineParser::parseEnum() {
+ // Won't be 'enum' for NS_ENUMs.
+ if (FormatTok->Tok.is(tok::kw_enum))
+ nextToken();
+
+ // In TypeScript, "enum" can also be used as property name, e.g. in interface
+ // declarations. An "enum" keyword followed by a colon would be a syntax
+ // error and thus assume it is just an identifier.
+ if (Style.Language == FormatStyle::LK_JavaScript && FormatTok->is(tok::colon))
+ return false;
+
+ // Eat up enum class ...
+ if (FormatTok->Tok.is(tok::kw_class) || FormatTok->Tok.is(tok::kw_struct))
+ nextToken();
+
+ while (FormatTok->Tok.getIdentifierInfo() ||
+ FormatTok->isOneOf(tok::colon, tok::coloncolon, tok::less,
+ tok::greater, tok::comma, tok::question)) {
+ nextToken();
+ // We can have macros or attributes in between 'enum' and the enum name.
+ if (FormatTok->is(tok::l_paren))
+ parseParens();
+ if (FormatTok->is(tok::identifier)) {
+ nextToken();
+ // If there are two identifiers in a row, this is likely an elaborate
+ // return type. In Java, this can be "implements", etc.
+ if (Style.Language == FormatStyle::LK_Cpp &&
+ FormatTok->is(tok::identifier))
+ return false;
+ }
+ }
+
+ // Just a declaration or something is wrong.
+ if (FormatTok->isNot(tok::l_brace))
+ return true;
+ FormatTok->BlockKind = BK_Block;
+
+ if (Style.Language == FormatStyle::LK_Java) {
+ // Java enums are different.
+ parseJavaEnumBody();
+ return true;
+ }
+ if (Style.Language == FormatStyle::LK_Proto) {
+ parseBlock(/*MustBeDeclaration=*/true);
+ return true;
+ }
+
+ // Parse enum body.
+ bool HasError = !parseBracedList(/*ContinueOnSemicolons=*/true);
+ if (HasError) {
+ if (FormatTok->is(tok::semi))
+ nextToken();
+ addUnwrappedLine();
+ }
+ return true;
+
+ // There is no addUnwrappedLine() here so that we fall through to parsing a
+ // structural element afterwards. Thus, in "enum A {} n, m;",
+ // "} n, m;" will end up in one unwrapped line.
+}
+
+void UnwrappedLineParser::parseJavaEnumBody() {
+ // Determine whether the enum is simple, i.e. does not have a semicolon or
+ // constants with class bodies. Simple enums can be formatted like braced
+ // lists, contracted to a single line, etc.
+ unsigned StoredPosition = Tokens->getPosition();
+ bool IsSimple = true;
+ FormatToken *Tok = Tokens->getNextToken();
+ while (Tok) {
+ if (Tok->is(tok::r_brace))
+ break;
+ if (Tok->isOneOf(tok::l_brace, tok::semi)) {
+ IsSimple = false;
+ break;
+ }
+ // FIXME: This will also mark enums with braces in the arguments to enum
+ // constants as "not simple". This is probably fine in practice, though.
+ Tok = Tokens->getNextToken();
+ }
+ FormatTok = Tokens->setPosition(StoredPosition);
+
+ if (IsSimple) {
+ parseBracedList();
+ addUnwrappedLine();
+ return;
+ }
+
+ // Parse the body of a more complex enum.
+ // First add a line for everything up to the "{".
+ nextToken();
+ addUnwrappedLine();
+ ++Line->Level;
+
+ // Parse the enum constants.
+ while (FormatTok) {
+ if (FormatTok->is(tok::l_brace)) {
+ // Parse the constant's class body.
+ parseBlock(/*MustBeDeclaration=*/true, /*AddLevel=*/true,
+ /*MunchSemi=*/false);
+ } else if (FormatTok->is(tok::l_paren)) {
+ parseParens();
+ } else if (FormatTok->is(tok::comma)) {
+ nextToken();
+ addUnwrappedLine();
+ } else if (FormatTok->is(tok::semi)) {
+ nextToken();
+ addUnwrappedLine();
+ break;
+ } else if (FormatTok->is(tok::r_brace)) {
+ addUnwrappedLine();
+ break;
+ } else {
+ nextToken();
+ }
+ }
+
+ // Parse the class body after the enum's ";" if any.
+ parseLevel(/*HasOpeningBrace=*/true);
+ nextToken();
+ --Line->Level;
+ addUnwrappedLine();
+}
+
+void UnwrappedLineParser::parseRecord() {
+ const FormatToken &InitialToken = *FormatTok;
+ nextToken();
+
+ // The actual identifier can be a nested name specifier, and in macros
+ // it is often token-pasted.
+ while (FormatTok->isOneOf(tok::identifier, tok::coloncolon, tok::hashhash,
+ tok::kw___attribute, tok::kw___declspec,
+ tok::kw_alignas) ||
+ ((Style.Language == FormatStyle::LK_Java ||
+ Style.Language == FormatStyle::LK_JavaScript) &&
+ FormatTok->isOneOf(tok::period, tok::comma))) {
+ bool IsNonMacroIdentifier =
+ FormatTok->is(tok::identifier) &&
+ FormatTok->TokenText != FormatTok->TokenText.upper();
+ nextToken();
+ // We can have macros or attributes in between 'class' and the class name.
+ if (!IsNonMacroIdentifier && FormatTok->Tok.is(tok::l_paren))
+ parseParens();
+ }
+
+ // Note that parsing away template declarations here leads to incorrectly
+ // accepting function declarations as record declarations.
+ // In general, we cannot solve this problem. Consider:
+ // class A<int> B() {}
+ // which can be a function definition or a class definition when B() is a
+ // macro. If we find enough real-world cases where this is a problem, we
+ // can parse for the 'template' keyword in the beginning of the statement,
+ // and thus rule out the record production in case there is no template
+ // (this would still leave us with an ambiguity between template function
+ // and class declarations).
+ if (FormatTok->isOneOf(tok::colon, tok::less)) {
+ while (!eof()) {
+ if (FormatTok->is(tok::l_brace)) {
+ calculateBraceTypes(/*ExpectClassBody=*/true);
+ if (!tryToParseBracedList())
+ break;
+ }
+ if (FormatTok->Tok.is(tok::semi))
+ return;
+ nextToken();
+ }
+ }
+ if (FormatTok->Tok.is(tok::l_brace)) {
+ if (ShouldBreakBeforeBrace(Style, InitialToken))
+ addUnwrappedLine();
+
+ parseBlock(/*MustBeDeclaration=*/true, /*AddLevel=*/true,
+ /*MunchSemi=*/false);
+ }
+ // There is no addUnwrappedLine() here so that we fall through to parsing a
+ // structural element afterwards. Thus, in "class A {} n, m;",
+ // "} n, m;" will end up in one unwrapped line.
+}
+
+void UnwrappedLineParser::parseObjCProtocolList() {
+ assert(FormatTok->Tok.is(tok::less) && "'<' expected.");
+ do
+ nextToken();
+ while (!eof() && FormatTok->Tok.isNot(tok::greater));
+ nextToken(); // Skip '>'.
+}
+
+void UnwrappedLineParser::parseObjCUntilAtEnd() {
+ do {
+ if (FormatTok->Tok.isObjCAtKeyword(tok::objc_end)) {
+ nextToken();
+ addUnwrappedLine();
+ break;
+ }
+ if (FormatTok->is(tok::l_brace)) {
+ parseBlock(/*MustBeDeclaration=*/false);
+ // In ObjC interfaces, nothing should be following the "}".
+ addUnwrappedLine();
+ } else if (FormatTok->is(tok::r_brace)) {
+ // Ignore stray "}". parseStructuralElement doesn't consume them.
+ nextToken();
+ addUnwrappedLine();
+ } else {
+ parseStructuralElement();
+ }
+ } while (!eof());
+}
+
+void UnwrappedLineParser::parseObjCInterfaceOrImplementation() {
+ nextToken();
+ nextToken(); // interface name
+
+ // @interface can be followed by either a base class, or a category.
+ if (FormatTok->Tok.is(tok::colon)) {
+ nextToken();
+ nextToken(); // base class name
+ } else if (FormatTok->Tok.is(tok::l_paren))
+ // Skip category, if present.
+ parseParens();
+
+ if (FormatTok->Tok.is(tok::less))
+ parseObjCProtocolList();
+
+ if (FormatTok->Tok.is(tok::l_brace)) {
+ if (Style.BraceWrapping.AfterObjCDeclaration)
+ addUnwrappedLine();
+ parseBlock(/*MustBeDeclaration=*/true);
+ }
+
+ // With instance variables, this puts '}' on its own line. Without instance
+ // variables, this ends the @interface line.
+ addUnwrappedLine();
+
+ parseObjCUntilAtEnd();
+}
+
+void UnwrappedLineParser::parseObjCProtocol() {
+ nextToken();
+ nextToken(); // protocol name
+
+ if (FormatTok->Tok.is(tok::less))
+ parseObjCProtocolList();
+
+ // Check for protocol declaration.
+ if (FormatTok->Tok.is(tok::semi)) {
+ nextToken();
+ return addUnwrappedLine();
+ }
+
+ addUnwrappedLine();
+ parseObjCUntilAtEnd();
+}
+
+void UnwrappedLineParser::parseJavaScriptEs6ImportExport() {
+ assert(FormatTok->isOneOf(Keywords.kw_import, tok::kw_export));
+ nextToken();
+
+ // Consume the "default" in "export default class/function".
+ if (FormatTok->is(tok::kw_default))
+ nextToken();
+
+ // Consume "function" and "default function", so that these get parsed as
+ // free-standing JS functions, i.e. do not require a trailing semicolon.
+ if (FormatTok->is(Keywords.kw_function)) {
+ nextToken();
+ return;
+ }
+
+ if (FormatTok->isOneOf(tok::kw_const, tok::kw_class, tok::kw_enum,
+ Keywords.kw_let, Keywords.kw_var))
+ return; // Fall through to parsing the corresponding structure.
+
+ if (FormatTok->is(tok::l_brace)) {
+ FormatTok->BlockKind = BK_Block;
+ parseBracedList();
+ }
+
+ while (!eof() && FormatTok->isNot(tok::semi) &&
+ FormatTok->isNot(tok::l_brace)) {
+ nextToken();
+ }
+}
+
+LLVM_ATTRIBUTE_UNUSED static void printDebugInfo(const UnwrappedLine &Line,
+ StringRef Prefix = "") {
+ llvm::dbgs() << Prefix << "Line(" << Line.Level << ")"
+ << (Line.InPPDirective ? " MACRO" : "") << ": ";
+ for (std::list<UnwrappedLineNode>::const_iterator I = Line.Tokens.begin(),
+ E = Line.Tokens.end();
+ I != E; ++I) {
+ llvm::dbgs() << I->Tok->Tok.getName() << "[" << I->Tok->Type << "] ";
+ }
+ for (std::list<UnwrappedLineNode>::const_iterator I = Line.Tokens.begin(),
+ E = Line.Tokens.end();
+ I != E; ++I) {
+ const UnwrappedLineNode &Node = *I;
+ for (SmallVectorImpl<UnwrappedLine>::const_iterator
+ I = Node.Children.begin(),
+ E = Node.Children.end();
+ I != E; ++I) {
+ printDebugInfo(*I, "\nChild: ");
+ }
+ }
+ llvm::dbgs() << "\n";
+}
+
+void UnwrappedLineParser::addUnwrappedLine() {
+ if (Line->Tokens.empty())
+ return;
+ DEBUG({
+ if (CurrentLines == &Lines)
+ printDebugInfo(*Line);
+ });
+ CurrentLines->push_back(std::move(*Line));
+ Line->Tokens.clear();
+ if (CurrentLines == &Lines && !PreprocessorDirectives.empty()) {
+ CurrentLines->append(
+ std::make_move_iterator(PreprocessorDirectives.begin()),
+ std::make_move_iterator(PreprocessorDirectives.end()));
+ PreprocessorDirectives.clear();
+ }
+}
+
+bool UnwrappedLineParser::eof() const { return FormatTok->Tok.is(tok::eof); }
+
+bool UnwrappedLineParser::isOnNewLine(const FormatToken &FormatTok) {
+ return (Line->InPPDirective || FormatTok.HasUnescapedNewline) &&
+ FormatTok.NewlinesBefore > 0;
+}
+
+void UnwrappedLineParser::flushComments(bool NewlineBeforeNext) {
+ bool JustComments = Line->Tokens.empty();
+ for (SmallVectorImpl<FormatToken *>::const_iterator
+ I = CommentsBeforeNextToken.begin(),
+ E = CommentsBeforeNextToken.end();
+ I != E; ++I) {
+ if (isOnNewLine(**I) && JustComments)
+ addUnwrappedLine();
+ pushToken(*I);
+ }
+ if (NewlineBeforeNext && JustComments)
+ addUnwrappedLine();
+ CommentsBeforeNextToken.clear();
+}
+
+void UnwrappedLineParser::nextToken() {
+ if (eof())
+ return;
+ flushComments(isOnNewLine(*FormatTok));
+ pushToken(FormatTok);
+ readToken();
+}
+
+void UnwrappedLineParser::readToken() {
+ bool CommentsInCurrentLine = true;
+ do {
+ FormatTok = Tokens->getNextToken();
+ assert(FormatTok);
+ while (!Line->InPPDirective && FormatTok->Tok.is(tok::hash) &&
+ (FormatTok->HasUnescapedNewline || FormatTok->IsFirst)) {
+ // If there is an unfinished unwrapped line, we flush the preprocessor
+ // directives only after that unwrapped line was finished later.
+ bool SwitchToPreprocessorLines = !Line->Tokens.empty();
+ ScopedLineState BlockState(*this, SwitchToPreprocessorLines);
+ // Comments stored before the preprocessor directive need to be output
+ // before the preprocessor directive, at the same level as the
+ // preprocessor directive, as we consider them to apply to the directive.
+ flushComments(isOnNewLine(*FormatTok));
+ parsePPDirective();
+ }
+ while (FormatTok->Type == TT_ConflictStart ||
+ FormatTok->Type == TT_ConflictEnd ||
+ FormatTok->Type == TT_ConflictAlternative) {
+ if (FormatTok->Type == TT_ConflictStart) {
+ conditionalCompilationStart(/*Unreachable=*/false);
+ } else if (FormatTok->Type == TT_ConflictAlternative) {
+ conditionalCompilationAlternative();
+ } else if (FormatTok->Type == TT_ConflictEnd) {
+ conditionalCompilationEnd();
+ }
+ FormatTok = Tokens->getNextToken();
+ FormatTok->MustBreakBefore = true;
+ }
+
+ if (!PPStack.empty() && (PPStack.back() == PP_Unreachable) &&
+ !Line->InPPDirective) {
+ continue;
+ }
+
+ if (!FormatTok->Tok.is(tok::comment))
+ return;
+ if (isOnNewLine(*FormatTok) || FormatTok->IsFirst) {
+ CommentsInCurrentLine = false;
+ }
+ if (CommentsInCurrentLine) {
+ pushToken(FormatTok);
+ } else {
+ CommentsBeforeNextToken.push_back(FormatTok);
+ }
+ } while (!eof());
+}
+
+void UnwrappedLineParser::pushToken(FormatToken *Tok) {
+ Line->Tokens.push_back(UnwrappedLineNode(Tok));
+ if (MustBreakBeforeNextToken) {
+ Line->Tokens.back().Tok->MustBreakBefore = true;
+ MustBreakBeforeNextToken = false;
+ }
+}
+
+} // end namespace format
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/Format/UnwrappedLineParser.h b/contrib/llvm/tools/clang/lib/Format/UnwrappedLineParser.h
new file mode 100644
index 0000000..a13c03f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Format/UnwrappedLineParser.h
@@ -0,0 +1,220 @@
+//===--- UnwrappedLineParser.h - Format C++ code ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file contains the declaration of the UnwrappedLineParser,
+/// which turns a stream of tokens into UnwrappedLines.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_FORMAT_UNWRAPPEDLINEPARSER_H
+#define LLVM_CLANG_LIB_FORMAT_UNWRAPPEDLINEPARSER_H
+
+#include "FormatToken.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Format/Format.h"
+#include <list>
+#include <stack>
+
+namespace clang {
+namespace format {
+
+struct UnwrappedLineNode;
+
+/// \brief An unwrapped line is a sequence of \c Token, that we would like to
+/// put on a single line if there was no column limit.
+///
+/// This is used as a main interface between the \c UnwrappedLineParser and the
+/// \c UnwrappedLineFormatter. The key property is that changing the formatting
+/// within an unwrapped line does not affect any other unwrapped lines.
+struct UnwrappedLine {
+ UnwrappedLine();
+
+ // FIXME: Don't use std::list here.
+ /// \brief The \c Tokens comprising this \c UnwrappedLine.
+ std::list<UnwrappedLineNode> Tokens;
+
+ /// \brief The indent level of the \c UnwrappedLine.
+ unsigned Level;
+
+ /// \brief Whether this \c UnwrappedLine is part of a preprocessor directive.
+ bool InPPDirective;
+
+ bool MustBeDeclaration;
+};
+
+class UnwrappedLineConsumer {
+public:
+ virtual ~UnwrappedLineConsumer() {}
+ virtual void consumeUnwrappedLine(const UnwrappedLine &Line) = 0;
+ virtual void finishRun() = 0;
+};
+
+class FormatTokenSource;
+
+class UnwrappedLineParser {
+public:
+ UnwrappedLineParser(const FormatStyle &Style,
+ const AdditionalKeywords &Keywords,
+ ArrayRef<FormatToken *> Tokens,
+ UnwrappedLineConsumer &Callback);
+
+ void parse();
+
+private:
+ void reset();
+ void parseFile();
+ void parseLevel(bool HasOpeningBrace);
+ void parseBlock(bool MustBeDeclaration, bool AddLevel = true,
+ bool MunchSemi = true);
+ void parseChildBlock();
+ void parsePPDirective();
+ void parsePPDefine();
+ void parsePPIf(bool IfDef);
+ void parsePPElIf();
+ void parsePPElse();
+ void parsePPEndIf();
+ void parsePPUnknown();
+ void parseStructuralElement();
+ bool tryToParseBracedList();
+ bool parseBracedList(bool ContinueOnSemicolons = false);
+ void parseParens();
+ void parseSquare();
+ void parseIfThenElse();
+ void parseTryCatch();
+ void parseForOrWhileLoop();
+ void parseDoWhile();
+ void parseLabel();
+ void parseCaseLabel();
+ void parseSwitch();
+ void parseNamespace();
+ void parseNew();
+ void parseAccessSpecifier();
+ bool parseEnum();
+ void parseJavaEnumBody();
+ void parseRecord();
+ void parseObjCProtocolList();
+ void parseObjCUntilAtEnd();
+ void parseObjCInterfaceOrImplementation();
+ void parseObjCProtocol();
+ void parseJavaScriptEs6ImportExport();
+ bool tryToParseLambda();
+ bool tryToParseLambdaIntroducer();
+ void tryToParseJSFunction();
+ void addUnwrappedLine();
+ bool eof() const;
+ void nextToken();
+ void readToken();
+ void flushComments(bool NewlineBeforeNext);
+ void pushToken(FormatToken *Tok);
+ void calculateBraceTypes(bool ExpectClassBody = false);
+
+ // Marks a conditional compilation edge (for example, an '#if', '#ifdef',
+ // '#else' or merge conflict marker). If 'Unreachable' is true, assumes
+ // this branch either cannot be taken (for example '#if false'), or should
+ // not be taken in this round.
+ void conditionalCompilationCondition(bool Unreachable);
+ void conditionalCompilationStart(bool Unreachable);
+ void conditionalCompilationAlternative();
+ void conditionalCompilationEnd();
+
+ bool isOnNewLine(const FormatToken &FormatTok);
+
+ // FIXME: We are constantly running into bugs where Line.Level is incorrectly
+ // subtracted from beyond 0. Introduce a method to subtract from Line.Level
+ // and use that everywhere in the Parser.
+ std::unique_ptr<UnwrappedLine> Line;
+
+ // Comments are sorted into unwrapped lines by whether they are in the same
+ // line as the previous token, or not. If not, they belong to the next token.
+ // Since the next token might already be in a new unwrapped line, we need to
+ // store the comments belonging to that token.
+ SmallVector<FormatToken *, 1> CommentsBeforeNextToken;
+ FormatToken *FormatTok;
+ bool MustBreakBeforeNextToken;
+
+ // The parsed lines. Only added to through \c CurrentLines.
+ SmallVector<UnwrappedLine, 8> Lines;
+
+ // Preprocessor directives are parsed out-of-order from other unwrapped lines.
+ // Thus, we need to keep a list of preprocessor directives to be reported
+ // after an unwarpped line that has been started was finished.
+ SmallVector<UnwrappedLine, 4> PreprocessorDirectives;
+
+ // New unwrapped lines are added via CurrentLines.
+ // Usually points to \c &Lines. While parsing a preprocessor directive when
+ // there is an unfinished previous unwrapped line, will point to
+ // \c &PreprocessorDirectives.
+ SmallVectorImpl<UnwrappedLine> *CurrentLines;
+
+ // We store for each line whether it must be a declaration depending on
+ // whether we are in a compound statement or not.
+ std::vector<bool> DeclarationScopeStack;
+
+ const FormatStyle &Style;
+ const AdditionalKeywords &Keywords;
+
+ FormatTokenSource *Tokens;
+ UnwrappedLineConsumer &Callback;
+
+ // FIXME: This is a temporary measure until we have reworked the ownership
+ // of the format tokens. The goal is to have the actual tokens created and
+ // owned outside of and handed into the UnwrappedLineParser.
+ ArrayRef<FormatToken *> AllTokens;
+
+ // Represents preprocessor branch type, so we can find matching
+ // #if/#else/#endif directives.
+ enum PPBranchKind {
+ PP_Conditional, // Any #if, #ifdef, #ifndef, #elif, block outside #if 0
+ PP_Unreachable // #if 0 or a conditional preprocessor block inside #if 0
+ };
+
+ // Keeps a stack of currently active preprocessor branching directives.
+ SmallVector<PPBranchKind, 16> PPStack;
+
+ // The \c UnwrappedLineParser re-parses the code for each combination
+ // of preprocessor branches that can be taken.
+ // To that end, we take the same branch (#if, #else, or one of the #elif
+ // branches) for each nesting level of preprocessor branches.
+ // \c PPBranchLevel stores the current nesting level of preprocessor
+ // branches during one pass over the code.
+ int PPBranchLevel;
+
+ // Contains the current branch (#if, #else or one of the #elif branches)
+ // for each nesting level.
+ SmallVector<int, 8> PPLevelBranchIndex;
+
+ // Contains the maximum number of branches at each nesting level.
+ SmallVector<int, 8> PPLevelBranchCount;
+
+ // Contains the number of branches per nesting level we are currently
+ // in while parsing a preprocessor branch sequence.
+ // This is used to update PPLevelBranchCount at the end of a branch
+ // sequence.
+ std::stack<int> PPChainBranchIndex;
+
+ friend class ScopedLineState;
+ friend class CompoundStatementIndenter;
+};
+
+struct UnwrappedLineNode {
+ UnwrappedLineNode() : Tok(nullptr) {}
+ UnwrappedLineNode(FormatToken *Tok) : Tok(Tok) {}
+
+ FormatToken *Tok;
+ SmallVector<UnwrappedLine, 0> Children;
+};
+
+inline UnwrappedLine::UnwrappedLine()
+ : Level(0), InPPDirective(false), MustBeDeclaration(false) {}
+
+} // end namespace format
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Format/WhitespaceManager.cpp b/contrib/llvm/tools/clang/lib/Format/WhitespaceManager.cpp
new file mode 100644
index 0000000..725f05b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Format/WhitespaceManager.cpp
@@ -0,0 +1,543 @@
+//===--- WhitespaceManager.cpp - Format C++ code --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements WhitespaceManager class.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WhitespaceManager.h"
+#include "llvm/ADT/STLExtras.h"
+
+namespace clang {
+namespace format {
+
+bool WhitespaceManager::Change::IsBeforeInFile::
+operator()(const Change &C1, const Change &C2) const {
+ return SourceMgr.isBeforeInTranslationUnit(
+ C1.OriginalWhitespaceRange.getBegin(),
+ C2.OriginalWhitespaceRange.getBegin());
+}
+
+WhitespaceManager::Change::Change(
+ bool CreateReplacement, SourceRange OriginalWhitespaceRange,
+ unsigned IndentLevel, int Spaces, unsigned StartOfTokenColumn,
+ unsigned NewlinesBefore, StringRef PreviousLinePostfix,
+ StringRef CurrentLinePrefix, tok::TokenKind Kind, bool ContinuesPPDirective,
+ bool IsStartOfDeclName)
+ : CreateReplacement(CreateReplacement),
+ OriginalWhitespaceRange(OriginalWhitespaceRange),
+ StartOfTokenColumn(StartOfTokenColumn), NewlinesBefore(NewlinesBefore),
+ PreviousLinePostfix(PreviousLinePostfix),
+ CurrentLinePrefix(CurrentLinePrefix), Kind(Kind),
+ ContinuesPPDirective(ContinuesPPDirective),
+ IsStartOfDeclName(IsStartOfDeclName), IndentLevel(IndentLevel),
+ Spaces(Spaces), IsTrailingComment(false), TokenLength(0),
+ PreviousEndOfTokenColumn(0), EscapedNewlineColumn(0),
+ StartOfBlockComment(nullptr), IndentationOffset(0) {}
+
+void WhitespaceManager::reset() {
+ Changes.clear();
+ Replaces.clear();
+}
+
+void WhitespaceManager::replaceWhitespace(FormatToken &Tok, unsigned Newlines,
+ unsigned IndentLevel, unsigned Spaces,
+ unsigned StartOfTokenColumn,
+ bool InPPDirective) {
+ if (Tok.Finalized)
+ return;
+ Tok.Decision = (Newlines > 0) ? FD_Break : FD_Continue;
+ Changes.push_back(
+ Change(true, Tok.WhitespaceRange, IndentLevel, Spaces, StartOfTokenColumn,
+ Newlines, "", "", Tok.Tok.getKind(), InPPDirective && !Tok.IsFirst,
+ Tok.is(TT_StartOfName) || Tok.is(TT_FunctionDeclarationName)));
+}
+
+void WhitespaceManager::addUntouchableToken(const FormatToken &Tok,
+ bool InPPDirective) {
+ if (Tok.Finalized)
+ return;
+ Changes.push_back(
+ Change(false, Tok.WhitespaceRange, /*IndentLevel=*/0,
+ /*Spaces=*/0, Tok.OriginalColumn, Tok.NewlinesBefore, "", "",
+ Tok.Tok.getKind(), InPPDirective && !Tok.IsFirst,
+ Tok.is(TT_StartOfName) || Tok.is(TT_FunctionDeclarationName)));
+}
+
+void WhitespaceManager::replaceWhitespaceInToken(
+ const FormatToken &Tok, unsigned Offset, unsigned ReplaceChars,
+ StringRef PreviousPostfix, StringRef CurrentPrefix, bool InPPDirective,
+ unsigned Newlines, unsigned IndentLevel, int Spaces) {
+ if (Tok.Finalized)
+ return;
+ SourceLocation Start = Tok.getStartOfNonWhitespace().getLocWithOffset(Offset);
+ Changes.push_back(Change(
+ true, SourceRange(Start, Start.getLocWithOffset(ReplaceChars)),
+ IndentLevel, Spaces, std::max(0, Spaces), Newlines, PreviousPostfix,
+ CurrentPrefix,
+ // If we don't add a newline this change doesn't start a comment. Thus,
+ // when we align line comments, we don't need to treat this change as one.
+ // FIXME: We still need to take this change in account to properly
+ // calculate the new length of the comment and to calculate the changes
+ // for which to do the alignment when aligning comments.
+ Tok.is(TT_LineComment) && Newlines > 0 ? tok::comment : tok::unknown,
+ InPPDirective && !Tok.IsFirst,
+ Tok.is(TT_StartOfName) || Tok.is(TT_FunctionDeclarationName)));
+}
+
+const tooling::Replacements &WhitespaceManager::generateReplacements() {
+ if (Changes.empty())
+ return Replaces;
+
+ std::sort(Changes.begin(), Changes.end(), Change::IsBeforeInFile(SourceMgr));
+ calculateLineBreakInformation();
+ alignConsecutiveDeclarations();
+ alignConsecutiveAssignments();
+ alignTrailingComments();
+ alignEscapedNewlines();
+ generateChanges();
+
+ return Replaces;
+}
+
+void WhitespaceManager::calculateLineBreakInformation() {
+ Changes[0].PreviousEndOfTokenColumn = 0;
+ for (unsigned i = 1, e = Changes.size(); i != e; ++i) {
+ unsigned OriginalWhitespaceStart =
+ SourceMgr.getFileOffset(Changes[i].OriginalWhitespaceRange.getBegin());
+ unsigned PreviousOriginalWhitespaceEnd = SourceMgr.getFileOffset(
+ Changes[i - 1].OriginalWhitespaceRange.getEnd());
+ Changes[i - 1].TokenLength = OriginalWhitespaceStart -
+ PreviousOriginalWhitespaceEnd +
+ Changes[i].PreviousLinePostfix.size() +
+ Changes[i - 1].CurrentLinePrefix.size();
+
+ Changes[i].PreviousEndOfTokenColumn =
+ Changes[i - 1].StartOfTokenColumn + Changes[i - 1].TokenLength;
+
+ Changes[i - 1].IsTrailingComment =
+ (Changes[i].NewlinesBefore > 0 || Changes[i].Kind == tok::eof) &&
+ Changes[i - 1].Kind == tok::comment;
+ }
+ // FIXME: The last token is currently not always an eof token; in those
+ // cases, setting TokenLength of the last token to 0 is wrong.
+ Changes.back().TokenLength = 0;
+ Changes.back().IsTrailingComment = Changes.back().Kind == tok::comment;
+
+ const WhitespaceManager::Change *LastBlockComment = nullptr;
+ for (auto &Change : Changes) {
+ Change.StartOfBlockComment = nullptr;
+ Change.IndentationOffset = 0;
+ if (Change.Kind == tok::comment) {
+ LastBlockComment = &Change;
+ } else if (Change.Kind == tok::unknown) {
+ if ((Change.StartOfBlockComment = LastBlockComment))
+ Change.IndentationOffset =
+ Change.StartOfTokenColumn -
+ Change.StartOfBlockComment->StartOfTokenColumn;
+ } else {
+ LastBlockComment = nullptr;
+ }
+ }
+}
+
+// Align a single sequence of tokens, see AlignTokens below.
+template <typename F>
+static void
+AlignTokenSequence(unsigned Start, unsigned End, unsigned Column, F &&Matches,
+ SmallVector<WhitespaceManager::Change, 16> &Changes) {
+ bool FoundMatchOnLine = false;
+ int Shift = 0;
+ for (unsigned i = Start; i != End; ++i) {
+ if (Changes[i].NewlinesBefore > 0) {
+ FoundMatchOnLine = false;
+ Shift = 0;
+ }
+
+ // If this is the first matching token to be aligned, remember by how many
+ // spaces it has to be shifted, so the rest of the changes on the line are
+ // shifted by the same amount
+ if (!FoundMatchOnLine && Matches(Changes[i])) {
+ FoundMatchOnLine = true;
+ Shift = Column - Changes[i].StartOfTokenColumn;
+ Changes[i].Spaces += Shift;
+ }
+
+ assert(Shift >= 0);
+ Changes[i].StartOfTokenColumn += Shift;
+ if (i + 1 != Changes.size())
+ Changes[i + 1].PreviousEndOfTokenColumn += Shift;
+ }
+}
+
+// Walk through all of the changes and find sequences of matching tokens to
+// align. To do so, keep track of the lines and whether or not a matching token
+// was found on a line. If a matching token is found, extend the current
+// sequence. If the current line cannot be part of a sequence, e.g. because
+// there is an empty line before it or it contains only non-matching tokens,
+// finalize the previous sequence.
+template <typename F>
+static void AlignTokens(const FormatStyle &Style, F &&Matches,
+ SmallVector<WhitespaceManager::Change, 16> &Changes) {
+ unsigned MinColumn = 0;
+ unsigned MaxColumn = UINT_MAX;
+
+ // Line number of the start and the end of the current token sequence.
+ unsigned StartOfSequence = 0;
+ unsigned EndOfSequence = 0;
+
+ // Keep track of the nesting level of matching tokens, i.e. the number of
+ // surrounding (), [], or {}. We will only align a sequence of matching
+ // token that share the same scope depth.
+ //
+ // FIXME: This could use FormatToken::NestingLevel information, but there is
+ // an outstanding issue wrt the brace scopes.
+ unsigned NestingLevelOfLastMatch = 0;
+ unsigned NestingLevel = 0;
+
+ // Keep track of the number of commas before the matching tokens, we will only
+ // align a sequence of matching tokens if they are preceded by the same number
+ // of commas.
+ unsigned CommasBeforeLastMatch = 0;
+ unsigned CommasBeforeMatch = 0;
+
+ // Whether a matching token has been found on the current line.
+ bool FoundMatchOnLine = false;
+
+ // Aligns a sequence of matching tokens, on the MinColumn column.
+ //
+ // Sequences start from the first matching token to align, and end at the
+ // first token of the first line that doesn't need to be aligned.
+ //
+ // We need to adjust the StartOfTokenColumn of each Change that is on a line
+ // containing any matching token to be aligned and located after such token.
+ auto AlignCurrentSequence = [&] {
+ if (StartOfSequence > 0 && StartOfSequence < EndOfSequence)
+ AlignTokenSequence(StartOfSequence, EndOfSequence, MinColumn, Matches,
+ Changes);
+ MinColumn = 0;
+ MaxColumn = UINT_MAX;
+ StartOfSequence = 0;
+ EndOfSequence = 0;
+ };
+
+ for (unsigned i = 0, e = Changes.size(); i != e; ++i) {
+ if (Changes[i].NewlinesBefore != 0) {
+ CommasBeforeMatch = 0;
+ EndOfSequence = i;
+ // If there is a blank line, or if the last line didn't contain any
+ // matching token, the sequence ends here.
+ if (Changes[i].NewlinesBefore > 1 || !FoundMatchOnLine)
+ AlignCurrentSequence();
+
+ FoundMatchOnLine = false;
+ }
+
+ if (Changes[i].Kind == tok::comma) {
+ ++CommasBeforeMatch;
+ } else if (Changes[i].Kind == tok::r_brace ||
+ Changes[i].Kind == tok::r_paren ||
+ Changes[i].Kind == tok::r_square) {
+ --NestingLevel;
+ } else if (Changes[i].Kind == tok::l_brace ||
+ Changes[i].Kind == tok::l_paren ||
+ Changes[i].Kind == tok::l_square) {
+ // We want sequences to skip over child scopes if possible, but not the
+ // other way around.
+ NestingLevelOfLastMatch = std::min(NestingLevelOfLastMatch, NestingLevel);
+ ++NestingLevel;
+ }
+
+ if (!Matches(Changes[i]))
+ continue;
+
+ // If there is more than one matching token per line, or if the number of
+ // preceding commas, or the scope depth, do not match anymore, end the
+ // sequence.
+ if (FoundMatchOnLine || CommasBeforeMatch != CommasBeforeLastMatch ||
+ NestingLevel != NestingLevelOfLastMatch)
+ AlignCurrentSequence();
+
+ CommasBeforeLastMatch = CommasBeforeMatch;
+ NestingLevelOfLastMatch = NestingLevel;
+ FoundMatchOnLine = true;
+
+ if (StartOfSequence == 0)
+ StartOfSequence = i;
+
+ unsigned ChangeMinColumn = Changes[i].StartOfTokenColumn;
+ int LineLengthAfter = -Changes[i].Spaces;
+ for (unsigned j = i; j != e && Changes[j].NewlinesBefore == 0; ++j)
+ LineLengthAfter += Changes[j].Spaces + Changes[j].TokenLength;
+ unsigned ChangeMaxColumn = Style.ColumnLimit - LineLengthAfter;
+
+ // If we are restricted by the maximum column width, end the sequence.
+ if (ChangeMinColumn > MaxColumn || ChangeMaxColumn < MinColumn ||
+ CommasBeforeLastMatch != CommasBeforeMatch) {
+ AlignCurrentSequence();
+ StartOfSequence = i;
+ }
+
+ MinColumn = std::max(MinColumn, ChangeMinColumn);
+ MaxColumn = std::min(MaxColumn, ChangeMaxColumn);
+ }
+
+ EndOfSequence = Changes.size();
+ AlignCurrentSequence();
+}
+
+void WhitespaceManager::alignConsecutiveAssignments() {
+ if (!Style.AlignConsecutiveAssignments)
+ return;
+
+ AlignTokens(Style,
+ [&](const Change &C) {
+ // Do not align on equal signs that are first on a line.
+ if (C.NewlinesBefore > 0)
+ return false;
+
+ // Do not align on equal signs that are last on a line.
+ if (&C != &Changes.back() && (&C + 1)->NewlinesBefore > 0)
+ return false;
+
+ return C.Kind == tok::equal;
+ },
+ Changes);
+}
+
+void WhitespaceManager::alignConsecutiveDeclarations() {
+ if (!Style.AlignConsecutiveDeclarations)
+ return;
+
+ // FIXME: Currently we don't handle properly the PointerAlignment: Right
+ // The * and & are not aligned and are left dangling. Something has to be done
+ // about it, but it raises the question of alignment of code like:
+ // const char* const* v1;
+ // float const* v2;
+ // SomeVeryLongType const& v3;
+
+ AlignTokens(Style, [](Change const &C) { return C.IsStartOfDeclName; },
+ Changes);
+}
+
+void WhitespaceManager::alignTrailingComments() {
+ unsigned MinColumn = 0;
+ unsigned MaxColumn = UINT_MAX;
+ unsigned StartOfSequence = 0;
+ bool BreakBeforeNext = false;
+ unsigned Newlines = 0;
+ for (unsigned i = 0, e = Changes.size(); i != e; ++i) {
+ if (Changes[i].StartOfBlockComment)
+ continue;
+ Newlines += Changes[i].NewlinesBefore;
+ if (!Changes[i].IsTrailingComment)
+ continue;
+
+ unsigned ChangeMinColumn = Changes[i].StartOfTokenColumn;
+ unsigned ChangeMaxColumn = Style.ColumnLimit - Changes[i].TokenLength;
+ if (i + 1 != e && Changes[i + 1].ContinuesPPDirective)
+ ChangeMaxColumn -= 2;
+ // If this comment follows an } in column 0, it probably documents the
+ // closing of a namespace and we don't want to align it.
+ bool FollowsRBraceInColumn0 = i > 0 && Changes[i].NewlinesBefore == 0 &&
+ Changes[i - 1].Kind == tok::r_brace &&
+ Changes[i - 1].StartOfTokenColumn == 0;
+ bool WasAlignedWithStartOfNextLine = false;
+ if (Changes[i].NewlinesBefore == 1) { // A comment on its own line.
+ unsigned CommentColumn = SourceMgr.getSpellingColumnNumber(
+ Changes[i].OriginalWhitespaceRange.getEnd());
+ for (unsigned j = i + 1; j != e; ++j) {
+ if (Changes[j].Kind != tok::comment) { // Skip over comments.
+ unsigned NextColumn = SourceMgr.getSpellingColumnNumber(
+ Changes[j].OriginalWhitespaceRange.getEnd());
+ // The start of the next token was previously aligned with the
+ // start of this comment.
+ WasAlignedWithStartOfNextLine =
+ CommentColumn == NextColumn ||
+ CommentColumn == NextColumn + Style.IndentWidth;
+ break;
+ }
+ }
+ }
+ if (!Style.AlignTrailingComments || FollowsRBraceInColumn0) {
+ alignTrailingComments(StartOfSequence, i, MinColumn);
+ MinColumn = ChangeMinColumn;
+ MaxColumn = ChangeMinColumn;
+ StartOfSequence = i;
+ } else if (BreakBeforeNext || Newlines > 1 ||
+ (ChangeMinColumn > MaxColumn || ChangeMaxColumn < MinColumn) ||
+ // Break the comment sequence if the previous line did not end
+ // in a trailing comment.
+ (Changes[i].NewlinesBefore == 1 && i > 0 &&
+ !Changes[i - 1].IsTrailingComment) ||
+ WasAlignedWithStartOfNextLine) {
+ alignTrailingComments(StartOfSequence, i, MinColumn);
+ MinColumn = ChangeMinColumn;
+ MaxColumn = ChangeMaxColumn;
+ StartOfSequence = i;
+ } else {
+ MinColumn = std::max(MinColumn, ChangeMinColumn);
+ MaxColumn = std::min(MaxColumn, ChangeMaxColumn);
+ }
+ BreakBeforeNext =
+ (i == 0) || (Changes[i].NewlinesBefore > 1) ||
+ // Never start a sequence with a comment at the beginning of
+ // the line.
+ (Changes[i].NewlinesBefore == 1 && StartOfSequence == i);
+ Newlines = 0;
+ }
+ alignTrailingComments(StartOfSequence, Changes.size(), MinColumn);
+}
+
+void WhitespaceManager::alignTrailingComments(unsigned Start, unsigned End,
+ unsigned Column) {
+ for (unsigned i = Start; i != End; ++i) {
+ int Shift = 0;
+ if (Changes[i].IsTrailingComment) {
+ Shift = Column - Changes[i].StartOfTokenColumn;
+ }
+ if (Changes[i].StartOfBlockComment) {
+ Shift = Changes[i].IndentationOffset +
+ Changes[i].StartOfBlockComment->StartOfTokenColumn -
+ Changes[i].StartOfTokenColumn;
+ }
+ assert(Shift >= 0);
+ Changes[i].Spaces += Shift;
+ if (i + 1 != End)
+ Changes[i + 1].PreviousEndOfTokenColumn += Shift;
+ Changes[i].StartOfTokenColumn += Shift;
+ }
+}
+
+void WhitespaceManager::alignEscapedNewlines() {
+ unsigned MaxEndOfLine =
+ Style.AlignEscapedNewlinesLeft ? 0 : Style.ColumnLimit;
+ unsigned StartOfMacro = 0;
+ for (unsigned i = 1, e = Changes.size(); i < e; ++i) {
+ Change &C = Changes[i];
+ if (C.NewlinesBefore > 0) {
+ if (C.ContinuesPPDirective) {
+ MaxEndOfLine = std::max(C.PreviousEndOfTokenColumn + 2, MaxEndOfLine);
+ } else {
+ alignEscapedNewlines(StartOfMacro + 1, i, MaxEndOfLine);
+ MaxEndOfLine = Style.AlignEscapedNewlinesLeft ? 0 : Style.ColumnLimit;
+ StartOfMacro = i;
+ }
+ }
+ }
+ alignEscapedNewlines(StartOfMacro + 1, Changes.size(), MaxEndOfLine);
+}
+
+void WhitespaceManager::alignEscapedNewlines(unsigned Start, unsigned End,
+ unsigned Column) {
+ for (unsigned i = Start; i < End; ++i) {
+ Change &C = Changes[i];
+ if (C.NewlinesBefore > 0) {
+ assert(C.ContinuesPPDirective);
+ if (C.PreviousEndOfTokenColumn + 1 > Column)
+ C.EscapedNewlineColumn = 0;
+ else
+ C.EscapedNewlineColumn = Column;
+ }
+ }
+}
+
+void WhitespaceManager::generateChanges() {
+ for (unsigned i = 0, e = Changes.size(); i != e; ++i) {
+ const Change &C = Changes[i];
+ if (i > 0) {
+ assert(Changes[i - 1].OriginalWhitespaceRange.getBegin() !=
+ C.OriginalWhitespaceRange.getBegin() &&
+ "Generating two replacements for the same location");
+ }
+ if (C.CreateReplacement) {
+ std::string ReplacementText = C.PreviousLinePostfix;
+ if (C.ContinuesPPDirective)
+ appendNewlineText(ReplacementText, C.NewlinesBefore,
+ C.PreviousEndOfTokenColumn, C.EscapedNewlineColumn);
+ else
+ appendNewlineText(ReplacementText, C.NewlinesBefore);
+ appendIndentText(ReplacementText, C.IndentLevel, std::max(0, C.Spaces),
+ C.StartOfTokenColumn - std::max(0, C.Spaces));
+ ReplacementText.append(C.CurrentLinePrefix);
+ storeReplacement(C.OriginalWhitespaceRange, ReplacementText);
+ }
+ }
+}
+
+void WhitespaceManager::storeReplacement(SourceRange Range,
+ StringRef Text) {
+ unsigned WhitespaceLength = SourceMgr.getFileOffset(Range.getEnd()) -
+ SourceMgr.getFileOffset(Range.getBegin());
+ // Don't create a replacement, if it does not change anything.
+ if (StringRef(SourceMgr.getCharacterData(Range.getBegin()),
+ WhitespaceLength) == Text)
+ return;
+ Replaces.insert(tooling::Replacement(
+ SourceMgr, CharSourceRange::getCharRange(Range), Text));
+}
+
+void WhitespaceManager::appendNewlineText(std::string &Text,
+ unsigned Newlines) {
+ for (unsigned i = 0; i < Newlines; ++i)
+ Text.append(UseCRLF ? "\r\n" : "\n");
+}
+
+void WhitespaceManager::appendNewlineText(std::string &Text, unsigned Newlines,
+ unsigned PreviousEndOfTokenColumn,
+ unsigned EscapedNewlineColumn) {
+ if (Newlines > 0) {
+ unsigned Offset =
+ std::min<int>(EscapedNewlineColumn - 1, PreviousEndOfTokenColumn);
+ for (unsigned i = 0; i < Newlines; ++i) {
+ Text.append(EscapedNewlineColumn - Offset - 1, ' ');
+ Text.append(UseCRLF ? "\\\r\n" : "\\\n");
+ Offset = 0;
+ }
+ }
+}
+
+void WhitespaceManager::appendIndentText(std::string &Text,
+ unsigned IndentLevel, unsigned Spaces,
+ unsigned WhitespaceStartColumn) {
+ switch (Style.UseTab) {
+ case FormatStyle::UT_Never:
+ Text.append(Spaces, ' ');
+ break;
+ case FormatStyle::UT_Always: {
+ unsigned FirstTabWidth =
+ Style.TabWidth - WhitespaceStartColumn % Style.TabWidth;
+ // Indent with tabs only when there's at least one full tab.
+ if (FirstTabWidth + Style.TabWidth <= Spaces) {
+ Spaces -= FirstTabWidth;
+ Text.append("\t");
+ }
+ Text.append(Spaces / Style.TabWidth, '\t');
+ Text.append(Spaces % Style.TabWidth, ' ');
+ break;
+ }
+ case FormatStyle::UT_ForIndentation:
+ if (WhitespaceStartColumn == 0) {
+ unsigned Indentation = IndentLevel * Style.IndentWidth;
+ // This happens, e.g. when a line in a block comment is indented less than
+ // the first one.
+ if (Indentation > Spaces)
+ Indentation = Spaces;
+ unsigned Tabs = Indentation / Style.TabWidth;
+ Text.append(Tabs, '\t');
+ Spaces -= Tabs * Style.TabWidth;
+ }
+ Text.append(Spaces, ' ');
+ break;
+ }
+}
+
+} // namespace format
+} // namespace clang
diff --git a/contrib/llvm/tools/clang/lib/Format/WhitespaceManager.h b/contrib/llvm/tools/clang/lib/Format/WhitespaceManager.h
new file mode 100644
index 0000000..f83971b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Format/WhitespaceManager.h
@@ -0,0 +1,210 @@
+//===--- WhitespaceManager.h - Format C++ code ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief WhitespaceManager class manages whitespace around tokens and their
+/// replacements.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_FORMAT_WHITESPACEMANAGER_H
+#define LLVM_CLANG_LIB_FORMAT_WHITESPACEMANAGER_H
+
+#include "TokenAnnotator.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Format/Format.h"
+#include <string>
+
+namespace clang {
+namespace format {
+
+/// \brief Manages the whitespaces around tokens and their replacements.
+///
+/// This includes special handling for certain constructs, e.g. the alignment of
+/// trailing line comments.
+///
+/// To guarantee correctness of alignment operations, the \c WhitespaceManager
+/// must be informed about every token in the source file; for each token, there
+/// must be exactly one call to either \c replaceWhitespace or
+/// \c addUntouchableToken.
+///
+/// There may be multiple calls to \c breakToken for a given token.
+class WhitespaceManager {
+public:
+ WhitespaceManager(SourceManager &SourceMgr, const FormatStyle &Style,
+ bool UseCRLF)
+ : SourceMgr(SourceMgr), Style(Style), UseCRLF(UseCRLF) {}
+
+ /// \brief Prepares the \c WhitespaceManager for another run.
+ void reset();
+
+ /// \brief Replaces the whitespace in front of \p Tok. Only call once for
+ /// each \c AnnotatedToken.
+ void replaceWhitespace(FormatToken &Tok, unsigned Newlines,
+ unsigned IndentLevel, unsigned Spaces,
+ unsigned StartOfTokenColumn,
+ bool InPPDirective = false);
+
+ /// \brief Adds information about an unchangeable token's whitespace.
+ ///
+ /// Needs to be called for every token for which \c replaceWhitespace
+ /// was not called.
+ void addUntouchableToken(const FormatToken &Tok, bool InPPDirective);
+
+ /// \brief Inserts or replaces whitespace in the middle of a token.
+ ///
+ /// Inserts \p PreviousPostfix, \p Newlines, \p Spaces and \p CurrentPrefix
+ /// (in this order) at \p Offset inside \p Tok, replacing \p ReplaceChars
+ /// characters.
+ ///
+ /// Note: \p Spaces can be negative to retain information about initial
+ /// relative column offset between a line of a block comment and the start of
+ /// the comment. This negative offset may be compensated by trailing comment
+ /// alignment here. In all other cases negative \p Spaces will be truncated to
+ /// 0.
+ ///
+ /// When \p InPPDirective is true, escaped newlines are inserted. \p Spaces is
+ /// used to align backslashes correctly.
+ void replaceWhitespaceInToken(const FormatToken &Tok, unsigned Offset,
+ unsigned ReplaceChars,
+ StringRef PreviousPostfix,
+ StringRef CurrentPrefix, bool InPPDirective,
+ unsigned Newlines, unsigned IndentLevel,
+ int Spaces);
+
+ /// \brief Returns all the \c Replacements created during formatting.
+ const tooling::Replacements &generateReplacements();
+
+ /// \brief Represents a change before a token, a break inside a token,
+ /// or the layout of an unchanged token (or whitespace within).
+ struct Change {
+ /// \brief Functor to sort changes in original source order.
+ class IsBeforeInFile {
+ public:
+ IsBeforeInFile(const SourceManager &SourceMgr) : SourceMgr(SourceMgr) {}
+ bool operator()(const Change &C1, const Change &C2) const;
+
+ private:
+ const SourceManager &SourceMgr;
+ };
+
+ Change() {}
+
+ /// \brief Creates a \c Change.
+ ///
+ /// The generated \c Change will replace the characters at
+ /// \p OriginalWhitespaceRange with a concatenation of
+ /// \p PreviousLinePostfix, \p NewlinesBefore line breaks, \p Spaces spaces
+ /// and \p CurrentLinePrefix.
+ ///
+ /// \p StartOfTokenColumn and \p InPPDirective will be used to lay out
+ /// trailing comments and escaped newlines.
+ Change(bool CreateReplacement, SourceRange OriginalWhitespaceRange,
+ unsigned IndentLevel, int Spaces, unsigned StartOfTokenColumn,
+ unsigned NewlinesBefore, StringRef PreviousLinePostfix,
+ StringRef CurrentLinePrefix, tok::TokenKind Kind,
+ bool ContinuesPPDirective, bool IsStartOfDeclName);
+
+ bool CreateReplacement;
+ // Changes might be in the middle of a token, so we cannot just keep the
+ // FormatToken around to query its information.
+ SourceRange OriginalWhitespaceRange;
+ unsigned StartOfTokenColumn;
+ unsigned NewlinesBefore;
+ std::string PreviousLinePostfix;
+ std::string CurrentLinePrefix;
+ // The kind of the token whose whitespace this change replaces, or in which
+ // this change inserts whitespace.
+ // FIXME: Currently this is not set correctly for breaks inside comments, as
+ // the \c BreakableToken is still doing its own alignment.
+ tok::TokenKind Kind;
+ bool ContinuesPPDirective;
+ bool IsStartOfDeclName;
+
+ // The number of nested blocks the token is in. This is used to add tabs
+ // only for the indentation, and not for alignment, when
+ // UseTab = US_ForIndentation.
+ unsigned IndentLevel;
+
+ // The number of spaces in front of the token or broken part of the token.
+ // This will be adapted when aligning tokens.
+ // Can be negative to retain information about the initial relative offset
+ // of the lines in a block comment. This is used when aligning trailing
+ // comments. Uncompensated negative offset is truncated to 0.
+ int Spaces;
+
+ // \c IsTrailingComment, \c TokenLength, \c PreviousEndOfTokenColumn and
+ // \c EscapedNewlineColumn will be calculated in
+ // \c calculateLineBreakInformation.
+ bool IsTrailingComment;
+ unsigned TokenLength;
+ unsigned PreviousEndOfTokenColumn;
+ unsigned EscapedNewlineColumn;
+
+ // These fields are used to retain correct relative line indentation in a
+ // block comment when aligning trailing comments.
+ //
+ // If this Change represents a continuation of a block comment,
+ // \c StartOfBlockComment is pointer to the first Change in the block
+ // comment. \c IndentationOffset is a relative column offset to this
+ // change, so that the correct column can be reconstructed at the end of
+ // the alignment process.
+ const Change *StartOfBlockComment;
+ int IndentationOffset;
+ };
+
+private:
+ /// \brief Calculate \c IsTrailingComment, \c TokenLength for the last tokens
+ /// or token parts in a line and \c PreviousEndOfTokenColumn and
+ /// \c EscapedNewlineColumn for the first tokens or token parts in a line.
+ void calculateLineBreakInformation();
+
+ /// \brief Align consecutive assignments over all \c Changes.
+ void alignConsecutiveAssignments();
+
+ /// \brief Align consecutive declarations over all \c Changes.
+ void alignConsecutiveDeclarations();
+
+ /// \brief Align trailing comments over all \c Changes.
+ void alignTrailingComments();
+
+ /// \brief Align trailing comments from change \p Start to change \p End at
+ /// the specified \p Column.
+ void alignTrailingComments(unsigned Start, unsigned End, unsigned Column);
+
+ /// \brief Align escaped newlines over all \c Changes.
+ void alignEscapedNewlines();
+
+ /// \brief Align escaped newlines from change \p Start to change \p End at
+ /// the specified \p Column.
+ void alignEscapedNewlines(unsigned Start, unsigned End, unsigned Column);
+
+ /// \brief Fill \c Replaces with the replacements for all effective changes.
+ void generateChanges();
+
+ /// \brief Stores \p Text as the replacement for the whitespace in \p Range.
+ void storeReplacement(SourceRange Range, StringRef Text);
+ void appendNewlineText(std::string &Text, unsigned Newlines);
+ void appendNewlineText(std::string &Text, unsigned Newlines,
+ unsigned PreviousEndOfTokenColumn,
+ unsigned EscapedNewlineColumn);
+ void appendIndentText(std::string &Text, unsigned IndentLevel,
+ unsigned Spaces, unsigned WhitespaceStartColumn);
+
+ SmallVector<Change, 16> Changes;
+ SourceManager &SourceMgr;
+ tooling::Replacements Replaces;
+ const FormatStyle &Style;
+ bool UseCRLF;
+};
+
+} // namespace format
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp b/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp
new file mode 100644
index 0000000..52776b6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp
@@ -0,0 +1,489 @@
+//===--- ASTConsumers.cpp - ASTConsumer implementations -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// AST Consumer Implementations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/ASTConsumers.h"
+#include "clang/AST/AST.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+/// ASTPrinter - Pretty-printer and dumper of ASTs
+
+namespace {
+ class ASTPrinter : public ASTConsumer,
+ public RecursiveASTVisitor<ASTPrinter> {
+ typedef RecursiveASTVisitor<ASTPrinter> base;
+
+ public:
+ ASTPrinter(raw_ostream *Out = nullptr, bool Dump = false,
+ StringRef FilterString = "", bool DumpLookups = false)
+ : Out(Out ? *Out : llvm::outs()), Dump(Dump),
+ FilterString(FilterString), DumpLookups(DumpLookups) {}
+
+ void HandleTranslationUnit(ASTContext &Context) override {
+ TranslationUnitDecl *D = Context.getTranslationUnitDecl();
+
+ if (FilterString.empty())
+ return print(D);
+
+ TraverseDecl(D);
+ }
+
+ bool shouldWalkTypesOfTypeLocs() const { return false; }
+
+ bool TraverseDecl(Decl *D) {
+ if (D && filterMatches(D)) {
+ bool ShowColors = Out.has_colors();
+ if (ShowColors)
+ Out.changeColor(raw_ostream::BLUE);
+ Out << ((Dump || DumpLookups) ? "Dumping " : "Printing ") << getName(D)
+ << ":\n";
+ if (ShowColors)
+ Out.resetColor();
+ print(D);
+ Out << "\n";
+ // Don't traverse child nodes to avoid output duplication.
+ return true;
+ }
+ return base::TraverseDecl(D);
+ }
+
+ private:
+ std::string getName(Decl *D) {
+ if (isa<NamedDecl>(D))
+ return cast<NamedDecl>(D)->getQualifiedNameAsString();
+ return "";
+ }
+ bool filterMatches(Decl *D) {
+ return getName(D).find(FilterString) != std::string::npos;
+ }
+ void print(Decl *D) {
+ if (DumpLookups) {
+ if (DeclContext *DC = dyn_cast<DeclContext>(D)) {
+ if (DC == DC->getPrimaryContext())
+ DC->dumpLookups(Out, Dump);
+ else
+ Out << "Lookup map is in primary DeclContext "
+ << DC->getPrimaryContext() << "\n";
+ } else
+ Out << "Not a DeclContext\n";
+ } else if (Dump)
+ D->dump(Out);
+ else
+ D->print(Out, /*Indentation=*/0, /*PrintInstantiation=*/true);
+ }
+
+ raw_ostream &Out;
+ bool Dump;
+ std::string FilterString;
+ bool DumpLookups;
+ };
+
+ class ASTDeclNodeLister : public ASTConsumer,
+ public RecursiveASTVisitor<ASTDeclNodeLister> {
+ public:
+ ASTDeclNodeLister(raw_ostream *Out = nullptr)
+ : Out(Out ? *Out : llvm::outs()) {}
+
+ void HandleTranslationUnit(ASTContext &Context) override {
+ TraverseDecl(Context.getTranslationUnitDecl());
+ }
+
+ bool shouldWalkTypesOfTypeLocs() const { return false; }
+
+ bool VisitNamedDecl(NamedDecl *D) {
+ D->printQualifiedName(Out);
+ Out << '\n';
+ return true;
+ }
+
+ private:
+ raw_ostream &Out;
+ };
+} // end anonymous namespace
+
+std::unique_ptr<ASTConsumer> clang::CreateASTPrinter(raw_ostream *Out,
+ StringRef FilterString) {
+ return llvm::make_unique<ASTPrinter>(Out, /*Dump=*/false, FilterString);
+}
+
+std::unique_ptr<ASTConsumer> clang::CreateASTDumper(StringRef FilterString,
+ bool DumpDecls,
+ bool DumpLookups) {
+ assert((DumpDecls || DumpLookups) && "nothing to dump");
+ return llvm::make_unique<ASTPrinter>(nullptr, DumpDecls, FilterString,
+ DumpLookups);
+}
+
+std::unique_ptr<ASTConsumer> clang::CreateASTDeclNodeLister() {
+ return llvm::make_unique<ASTDeclNodeLister>(nullptr);
+}
+
+//===----------------------------------------------------------------------===//
+/// ASTViewer - AST Visualization
+
+namespace {
+ class ASTViewer : public ASTConsumer {
+ ASTContext *Context;
+ public:
+ void Initialize(ASTContext &Context) override {
+ this->Context = &Context;
+ }
+
+ bool HandleTopLevelDecl(DeclGroupRef D) override {
+ for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I)
+ HandleTopLevelSingleDecl(*I);
+ return true;
+ }
+
+ void HandleTopLevelSingleDecl(Decl *D);
+ };
+}
+
+void ASTViewer::HandleTopLevelSingleDecl(Decl *D) {
+ if (isa<FunctionDecl>(D) || isa<ObjCMethodDecl>(D)) {
+ D->print(llvm::errs());
+
+ if (Stmt *Body = D->getBody()) {
+ llvm::errs() << '\n';
+ Body->viewAST();
+ llvm::errs() << '\n';
+ }
+ }
+}
+
+std::unique_ptr<ASTConsumer> clang::CreateASTViewer() {
+ return llvm::make_unique<ASTViewer>();
+}
+
+//===----------------------------------------------------------------------===//
+/// DeclContextPrinter - Decl and DeclContext Visualization
+
+namespace {
+
+class DeclContextPrinter : public ASTConsumer {
+ raw_ostream& Out;
+public:
+ DeclContextPrinter() : Out(llvm::errs()) {}
+
+ void HandleTranslationUnit(ASTContext &C) override {
+ PrintDeclContext(C.getTranslationUnitDecl(), 4);
+ }
+
+ void PrintDeclContext(const DeclContext* DC, unsigned Indentation);
+};
+} // end anonymous namespace
+
+void DeclContextPrinter::PrintDeclContext(const DeclContext* DC,
+ unsigned Indentation) {
+ // Print DeclContext name.
+ switch (DC->getDeclKind()) {
+ case Decl::TranslationUnit:
+ Out << "[translation unit] " << DC;
+ break;
+ case Decl::Namespace: {
+ Out << "[namespace] ";
+ const NamespaceDecl* ND = cast<NamespaceDecl>(DC);
+ Out << *ND;
+ break;
+ }
+ case Decl::Enum: {
+ const EnumDecl* ED = cast<EnumDecl>(DC);
+ if (ED->isCompleteDefinition())
+ Out << "[enum] ";
+ else
+ Out << "<enum> ";
+ Out << *ED;
+ break;
+ }
+ case Decl::Record: {
+ const RecordDecl* RD = cast<RecordDecl>(DC);
+ if (RD->isCompleteDefinition())
+ Out << "[struct] ";
+ else
+ Out << "<struct> ";
+ Out << *RD;
+ break;
+ }
+ case Decl::CXXRecord: {
+ const CXXRecordDecl* RD = cast<CXXRecordDecl>(DC);
+ if (RD->isCompleteDefinition())
+ Out << "[class] ";
+ else
+ Out << "<class> ";
+ Out << *RD << ' ' << DC;
+ break;
+ }
+ case Decl::ObjCMethod:
+ Out << "[objc method]";
+ break;
+ case Decl::ObjCInterface:
+ Out << "[objc interface]";
+ break;
+ case Decl::ObjCCategory:
+ Out << "[objc category]";
+ break;
+ case Decl::ObjCProtocol:
+ Out << "[objc protocol]";
+ break;
+ case Decl::ObjCImplementation:
+ Out << "[objc implementation]";
+ break;
+ case Decl::ObjCCategoryImpl:
+ Out << "[objc categoryimpl]";
+ break;
+ case Decl::LinkageSpec:
+ Out << "[linkage spec]";
+ break;
+ case Decl::Block:
+ Out << "[block]";
+ break;
+ case Decl::Function: {
+ const FunctionDecl* FD = cast<FunctionDecl>(DC);
+ if (FD->doesThisDeclarationHaveABody())
+ Out << "[function] ";
+ else
+ Out << "<function> ";
+ Out << *FD;
+ // Print the parameters.
+ Out << "(";
+ bool PrintComma = false;
+ for (auto I : FD->params()) {
+ if (PrintComma)
+ Out << ", ";
+ else
+ PrintComma = true;
+ Out << *I;
+ }
+ Out << ")";
+ break;
+ }
+ case Decl::CXXMethod: {
+ const CXXMethodDecl* D = cast<CXXMethodDecl>(DC);
+ if (D->isOutOfLine())
+ Out << "[c++ method] ";
+ else if (D->isImplicit())
+ Out << "(c++ method) ";
+ else
+ Out << "<c++ method> ";
+ Out << *D;
+ // Print the parameters.
+ Out << "(";
+ bool PrintComma = false;
+ for (FunctionDecl::param_const_iterator I = D->param_begin(),
+ E = D->param_end(); I != E; ++I) {
+ if (PrintComma)
+ Out << ", ";
+ else
+ PrintComma = true;
+ Out << **I;
+ }
+ Out << ")";
+
+ // Check the semantic DeclContext.
+ const DeclContext* SemaDC = D->getDeclContext();
+ const DeclContext* LexicalDC = D->getLexicalDeclContext();
+ if (SemaDC != LexicalDC)
+ Out << " [[" << SemaDC << "]]";
+
+ break;
+ }
+ case Decl::CXXConstructor: {
+ const CXXConstructorDecl* D = cast<CXXConstructorDecl>(DC);
+ if (D->isOutOfLine())
+ Out << "[c++ ctor] ";
+ else if (D->isImplicit())
+ Out << "(c++ ctor) ";
+ else
+ Out << "<c++ ctor> ";
+ Out << *D;
+ // Print the parameters.
+ Out << "(";
+ bool PrintComma = false;
+ for (FunctionDecl::param_const_iterator I = D->param_begin(),
+ E = D->param_end(); I != E; ++I) {
+ if (PrintComma)
+ Out << ", ";
+ else
+ PrintComma = true;
+ Out << **I;
+ }
+ Out << ")";
+
+ // Check the semantic DC.
+ const DeclContext* SemaDC = D->getDeclContext();
+ const DeclContext* LexicalDC = D->getLexicalDeclContext();
+ if (SemaDC != LexicalDC)
+ Out << " [[" << SemaDC << "]]";
+ break;
+ }
+ case Decl::CXXDestructor: {
+ const CXXDestructorDecl* D = cast<CXXDestructorDecl>(DC);
+ if (D->isOutOfLine())
+ Out << "[c++ dtor] ";
+ else if (D->isImplicit())
+ Out << "(c++ dtor) ";
+ else
+ Out << "<c++ dtor> ";
+ Out << *D;
+ // Check the semantic DC.
+ const DeclContext* SemaDC = D->getDeclContext();
+ const DeclContext* LexicalDC = D->getLexicalDeclContext();
+ if (SemaDC != LexicalDC)
+ Out << " [[" << SemaDC << "]]";
+ break;
+ }
+ case Decl::CXXConversion: {
+ const CXXConversionDecl* D = cast<CXXConversionDecl>(DC);
+ if (D->isOutOfLine())
+ Out << "[c++ conversion] ";
+ else if (D->isImplicit())
+ Out << "(c++ conversion) ";
+ else
+ Out << "<c++ conversion> ";
+ Out << *D;
+ // Check the semantic DC.
+ const DeclContext* SemaDC = D->getDeclContext();
+ const DeclContext* LexicalDC = D->getLexicalDeclContext();
+ if (SemaDC != LexicalDC)
+ Out << " [[" << SemaDC << "]]";
+ break;
+ }
+
+ default:
+ llvm_unreachable("a decl that inherits DeclContext isn't handled");
+ }
+
+ Out << "\n";
+
+ // Print decls in the DeclContext.
+ for (auto *I : DC->decls()) {
+ for (unsigned i = 0; i < Indentation; ++i)
+ Out << " ";
+
+ Decl::Kind DK = I->getKind();
+ switch (DK) {
+ case Decl::Namespace:
+ case Decl::Enum:
+ case Decl::Record:
+ case Decl::CXXRecord:
+ case Decl::ObjCMethod:
+ case Decl::ObjCInterface:
+ case Decl::ObjCCategory:
+ case Decl::ObjCProtocol:
+ case Decl::ObjCImplementation:
+ case Decl::ObjCCategoryImpl:
+ case Decl::LinkageSpec:
+ case Decl::Block:
+ case Decl::Function:
+ case Decl::CXXMethod:
+ case Decl::CXXConstructor:
+ case Decl::CXXDestructor:
+ case Decl::CXXConversion:
+ {
+ DeclContext* DC = cast<DeclContext>(I);
+ PrintDeclContext(DC, Indentation+2);
+ break;
+ }
+ case Decl::IndirectField: {
+ IndirectFieldDecl* IFD = cast<IndirectFieldDecl>(I);
+ Out << "<IndirectField> " << *IFD << '\n';
+ break;
+ }
+ case Decl::Label: {
+ LabelDecl *LD = cast<LabelDecl>(I);
+ Out << "<Label> " << *LD << '\n';
+ break;
+ }
+ case Decl::Field: {
+ FieldDecl *FD = cast<FieldDecl>(I);
+ Out << "<field> " << *FD << '\n';
+ break;
+ }
+ case Decl::Typedef:
+ case Decl::TypeAlias: {
+ TypedefNameDecl* TD = cast<TypedefNameDecl>(I);
+ Out << "<typedef> " << *TD << '\n';
+ break;
+ }
+ case Decl::EnumConstant: {
+ EnumConstantDecl* ECD = cast<EnumConstantDecl>(I);
+ Out << "<enum constant> " << *ECD << '\n';
+ break;
+ }
+ case Decl::Var: {
+ VarDecl* VD = cast<VarDecl>(I);
+ Out << "<var> " << *VD << '\n';
+ break;
+ }
+ case Decl::ImplicitParam: {
+ ImplicitParamDecl* IPD = cast<ImplicitParamDecl>(I);
+ Out << "<implicit parameter> " << *IPD << '\n';
+ break;
+ }
+ case Decl::ParmVar: {
+ ParmVarDecl* PVD = cast<ParmVarDecl>(I);
+ Out << "<parameter> " << *PVD << '\n';
+ break;
+ }
+ case Decl::ObjCProperty: {
+ ObjCPropertyDecl* OPD = cast<ObjCPropertyDecl>(I);
+ Out << "<objc property> " << *OPD << '\n';
+ break;
+ }
+ case Decl::FunctionTemplate: {
+ FunctionTemplateDecl* FTD = cast<FunctionTemplateDecl>(I);
+ Out << "<function template> " << *FTD << '\n';
+ break;
+ }
+ case Decl::FileScopeAsm: {
+ Out << "<file-scope asm>\n";
+ break;
+ }
+ case Decl::UsingDirective: {
+ Out << "<using directive>\n";
+ break;
+ }
+ case Decl::NamespaceAlias: {
+ NamespaceAliasDecl* NAD = cast<NamespaceAliasDecl>(I);
+ Out << "<namespace alias> " << *NAD << '\n';
+ break;
+ }
+ case Decl::ClassTemplate: {
+ ClassTemplateDecl *CTD = cast<ClassTemplateDecl>(I);
+ Out << "<class template> " << *CTD << '\n';
+ break;
+ }
+ case Decl::OMPThreadPrivate: {
+ Out << "<omp threadprivate> " << '"' << I << "\"\n";
+ break;
+ }
+ default:
+ Out << "DeclKind: " << DK << '"' << I << "\"\n";
+ llvm_unreachable("decl unhandled");
+ }
+ }
+}
+std::unique_ptr<ASTConsumer> clang::CreateDeclContextPrinter() {
+ return llvm::make_unique<DeclContextPrinter>();
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp b/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp
new file mode 100644
index 0000000..b499fa2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp
@@ -0,0 +1,114 @@
+//===-- ASTMerge.cpp - AST Merging Frontent Action --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Frontend/ASTUnit.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTDiagnostic.h"
+#include "clang/AST/ASTImporter.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendActions.h"
+
+using namespace clang;
+
+std::unique_ptr<ASTConsumer>
+ASTMergeAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
+ return AdaptedAction->CreateASTConsumer(CI, InFile);
+}
+
+bool ASTMergeAction::BeginSourceFileAction(CompilerInstance &CI,
+ StringRef Filename) {
+ // FIXME: This is a hack. We need a better way to communicate the
+ // AST file, compiler instance, and file name than member variables
+ // of FrontendAction.
+ AdaptedAction->setCurrentInput(getCurrentInput(), takeCurrentASTUnit());
+ AdaptedAction->setCompilerInstance(&CI);
+ return AdaptedAction->BeginSourceFileAction(CI, Filename);
+}
+
+void ASTMergeAction::ExecuteAction() {
+ CompilerInstance &CI = getCompilerInstance();
+ CI.getDiagnostics().getClient()->BeginSourceFile(
+ CI.getASTContext().getLangOpts());
+ CI.getDiagnostics().SetArgToStringFn(&FormatASTNodeDiagnosticArgument,
+ &CI.getASTContext());
+ IntrusiveRefCntPtr<DiagnosticIDs>
+ DiagIDs(CI.getDiagnostics().getDiagnosticIDs());
+ for (unsigned I = 0, N = ASTFiles.size(); I != N; ++I) {
+ IntrusiveRefCntPtr<DiagnosticsEngine>
+ Diags(new DiagnosticsEngine(DiagIDs, &CI.getDiagnosticOpts(),
+ new ForwardingDiagnosticConsumer(
+ *CI.getDiagnostics().getClient()),
+ /*ShouldOwnClient=*/true));
+ std::unique_ptr<ASTUnit> Unit =
+ ASTUnit::LoadFromASTFile(ASTFiles[I], CI.getPCHContainerReader(),
+ Diags, CI.getFileSystemOpts(), false);
+
+ if (!Unit)
+ continue;
+
+ ASTImporter Importer(CI.getASTContext(),
+ CI.getFileManager(),
+ Unit->getASTContext(),
+ Unit->getFileManager(),
+ /*MinimalImport=*/false);
+
+ TranslationUnitDecl *TU = Unit->getASTContext().getTranslationUnitDecl();
+ for (auto *D : TU->decls()) {
+ // Don't re-import __va_list_tag, __builtin_va_list.
+ if (const auto *ND = dyn_cast<NamedDecl>(D))
+ if (IdentifierInfo *II = ND->getIdentifier())
+ if (II->isStr("__va_list_tag") || II->isStr("__builtin_va_list"))
+ continue;
+
+ Decl *ToD = Importer.Import(D);
+
+ if (ToD) {
+ DeclGroupRef DGR(ToD);
+ CI.getASTConsumer().HandleTopLevelDecl(DGR);
+ }
+ }
+ }
+
+ AdaptedAction->ExecuteAction();
+ CI.getDiagnostics().getClient()->EndSourceFile();
+}
+
+void ASTMergeAction::EndSourceFileAction() {
+ return AdaptedAction->EndSourceFileAction();
+}
+
+ASTMergeAction::ASTMergeAction(FrontendAction *AdaptedAction,
+ ArrayRef<std::string> ASTFiles)
+ : AdaptedAction(AdaptedAction), ASTFiles(ASTFiles.begin(), ASTFiles.end()) {
+ assert(AdaptedAction && "ASTMergeAction needs an action to adapt");
+}
+
+ASTMergeAction::~ASTMergeAction() {
+ delete AdaptedAction;
+}
+
+bool ASTMergeAction::usesPreprocessorOnly() const {
+ return AdaptedAction->usesPreprocessorOnly();
+}
+
+TranslationUnitKind ASTMergeAction::getTranslationUnitKind() {
+ return AdaptedAction->getTranslationUnitKind();
+}
+
+bool ASTMergeAction::hasPCHSupport() const {
+ return AdaptedAction->hasPCHSupport();
+}
+
+bool ASTMergeAction::hasASTFileSupport() const {
+ return AdaptedAction->hasASTFileSupport();
+}
+
+bool ASTMergeAction::hasCodeCompletionSupport() const {
+ return AdaptedAction->hasCodeCompletionSupport();
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp b/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp
new file mode 100644
index 0000000..e6ba292
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp
@@ -0,0 +1,2856 @@
+//===--- ASTUnit.cpp - ASTUnit utility --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// ASTUnit Implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/ASTUnit.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/TypeOrdering.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TargetOptions.h"
+#include "clang/Basic/VirtualFileSystem.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendActions.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/FrontendOptions.h"
+#include "clang/Frontend/MultiplexConsumer.h"
+#include "clang/Frontend/Utils.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/PreprocessorOptions.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Serialization/ASTReader.h"
+#include "clang/Serialization/ASTWriter.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/Support/CrashRecoveryContext.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/MutexGuard.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/Support/raw_ostream.h"
+#include <atomic>
+#include <cstdio>
+#include <cstdlib>
+
+using namespace clang;
+
+using llvm::TimeRecord;
+
+namespace {
+ class SimpleTimer {
+ bool WantTiming;
+ TimeRecord Start;
+ std::string Output;
+
+ public:
+ explicit SimpleTimer(bool WantTiming) : WantTiming(WantTiming) {
+ if (WantTiming)
+ Start = TimeRecord::getCurrentTime();
+ }
+
+ void setOutput(const Twine &Output) {
+ if (WantTiming)
+ this->Output = Output.str();
+ }
+
+ ~SimpleTimer() {
+ if (WantTiming) {
+ TimeRecord Elapsed = TimeRecord::getCurrentTime();
+ Elapsed -= Start;
+ llvm::errs() << Output << ':';
+ Elapsed.print(Elapsed, llvm::errs());
+ llvm::errs() << '\n';
+ }
+ }
+ };
+
+ struct OnDiskData {
+ /// \brief The file in which the precompiled preamble is stored.
+ std::string PreambleFile;
+
+ /// \brief Temporary files that should be removed when the ASTUnit is
+ /// destroyed.
+ SmallVector<std::string, 4> TemporaryFiles;
+
+ /// \brief Erase temporary files.
+ void CleanTemporaryFiles();
+
+ /// \brief Erase the preamble file.
+ void CleanPreambleFile();
+
+ /// \brief Erase temporary files and the preamble file.
+ void Cleanup();
+ };
+}
+
+static llvm::sys::SmartMutex<false> &getOnDiskMutex() {
+ static llvm::sys::SmartMutex<false> M(/* recursive = */ true);
+ return M;
+}
+
+static void cleanupOnDiskMapAtExit();
+
+typedef llvm::DenseMap<const ASTUnit *,
+ std::unique_ptr<OnDiskData>> OnDiskDataMap;
+static OnDiskDataMap &getOnDiskDataMap() {
+ static OnDiskDataMap M;
+ static bool hasRegisteredAtExit = false;
+ if (!hasRegisteredAtExit) {
+ hasRegisteredAtExit = true;
+ atexit(cleanupOnDiskMapAtExit);
+ }
+ return M;
+}
+
+static void cleanupOnDiskMapAtExit() {
+ // Use the mutex because there can be an alive thread destroying an ASTUnit.
+ llvm::MutexGuard Guard(getOnDiskMutex());
+ for (const auto &I : getOnDiskDataMap()) {
+ // We don't worry about freeing the memory associated with OnDiskDataMap.
+ // All we care about is erasing stale files.
+ I.second->Cleanup();
+ }
+}
+
+static OnDiskData &getOnDiskData(const ASTUnit *AU) {
+ // We require the mutex since we are modifying the structure of the
+ // DenseMap.
+ llvm::MutexGuard Guard(getOnDiskMutex());
+ OnDiskDataMap &M = getOnDiskDataMap();
+ auto &D = M[AU];
+ if (!D)
+ D = llvm::make_unique<OnDiskData>();
+ return *D;
+}
+
+static void erasePreambleFile(const ASTUnit *AU) {
+ getOnDiskData(AU).CleanPreambleFile();
+}
+
+static void removeOnDiskEntry(const ASTUnit *AU) {
+ // We require the mutex since we are modifying the structure of the
+ // DenseMap.
+ llvm::MutexGuard Guard(getOnDiskMutex());
+ OnDiskDataMap &M = getOnDiskDataMap();
+ OnDiskDataMap::iterator I = M.find(AU);
+ if (I != M.end()) {
+ I->second->Cleanup();
+ M.erase(I);
+ }
+}
+
+static void setPreambleFile(const ASTUnit *AU, StringRef preambleFile) {
+ getOnDiskData(AU).PreambleFile = preambleFile;
+}
+
+static const std::string &getPreambleFile(const ASTUnit *AU) {
+ return getOnDiskData(AU).PreambleFile;
+}
+
+void OnDiskData::CleanTemporaryFiles() {
+ for (StringRef File : TemporaryFiles)
+ llvm::sys::fs::remove(File);
+ TemporaryFiles.clear();
+}
+
+void OnDiskData::CleanPreambleFile() {
+ if (!PreambleFile.empty()) {
+ llvm::sys::fs::remove(PreambleFile);
+ PreambleFile.clear();
+ }
+}
+
+void OnDiskData::Cleanup() {
+ CleanTemporaryFiles();
+ CleanPreambleFile();
+}
+
+struct ASTUnit::ASTWriterData {
+ SmallString<128> Buffer;
+ llvm::BitstreamWriter Stream;
+ ASTWriter Writer;
+
+ ASTWriterData() : Stream(Buffer), Writer(Stream, { }) { }
+};
+
+void ASTUnit::clearFileLevelDecls() {
+ llvm::DeleteContainerSeconds(FileDecls);
+}
+
+void ASTUnit::CleanTemporaryFiles() {
+ getOnDiskData(this).CleanTemporaryFiles();
+}
+
+void ASTUnit::addTemporaryFile(StringRef TempFile) {
+ getOnDiskData(this).TemporaryFiles.push_back(TempFile);
+}
+
+/// \brief After failing to build a precompiled preamble (due to
+/// errors in the source that occurs in the preamble), the number of
+/// reparses during which we'll skip even trying to precompile the
+/// preamble.
+const unsigned DefaultPreambleRebuildInterval = 5;
+
+/// \brief Tracks the number of ASTUnit objects that are currently active.
+///
+/// Used for debugging purposes only.
+static std::atomic<unsigned> ActiveASTUnitObjects;
+
+ASTUnit::ASTUnit(bool _MainFileIsAST)
+ : Reader(nullptr), HadModuleLoaderFatalFailure(false),
+ OnlyLocalDecls(false), CaptureDiagnostics(false),
+ MainFileIsAST(_MainFileIsAST),
+ TUKind(TU_Complete), WantTiming(getenv("LIBCLANG_TIMING")),
+ OwnsRemappedFileBuffers(true),
+ NumStoredDiagnosticsFromDriver(0),
+ PreambleRebuildCounter(0),
+ NumWarningsInPreamble(0),
+ ShouldCacheCodeCompletionResults(false),
+ IncludeBriefCommentsInCodeCompletion(false), UserFilesAreVolatile(false),
+ CompletionCacheTopLevelHashValue(0),
+ PreambleTopLevelHashValue(0),
+ CurrentTopLevelHashValue(0),
+ UnsafeToFree(false) {
+ if (getenv("LIBCLANG_OBJTRACKING"))
+ fprintf(stderr, "+++ %u translation units\n", ++ActiveASTUnitObjects);
+}
+
+ASTUnit::~ASTUnit() {
+ // If we loaded from an AST file, balance out the BeginSourceFile call.
+ if (MainFileIsAST && getDiagnostics().getClient()) {
+ getDiagnostics().getClient()->EndSourceFile();
+ }
+
+ clearFileLevelDecls();
+
+ // Clean up the temporary files and the preamble file.
+ removeOnDiskEntry(this);
+
+ // Free the buffers associated with remapped files. We are required to
+ // perform this operation here because we explicitly request that the
+ // compiler instance *not* free these buffers for each invocation of the
+ // parser.
+ if (Invocation.get() && OwnsRemappedFileBuffers) {
+ PreprocessorOptions &PPOpts = Invocation->getPreprocessorOpts();
+ for (const auto &RB : PPOpts.RemappedFileBuffers)
+ delete RB.second;
+ }
+
+ ClearCachedCompletionResults();
+
+ if (getenv("LIBCLANG_OBJTRACKING"))
+ fprintf(stderr, "--- %u translation units\n", --ActiveASTUnitObjects);
+}
+
+void ASTUnit::setPreprocessor(Preprocessor *pp) { PP = pp; }
+
+/// \brief Determine the set of code-completion contexts in which this
+/// declaration should be shown.
+static unsigned getDeclShowContexts(const NamedDecl *ND,
+ const LangOptions &LangOpts,
+ bool &IsNestedNameSpecifier) {
+ IsNestedNameSpecifier = false;
+
+ if (isa<UsingShadowDecl>(ND))
+ ND = dyn_cast<NamedDecl>(ND->getUnderlyingDecl());
+ if (!ND)
+ return 0;
+
+ uint64_t Contexts = 0;
+ if (isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND) ||
+ isa<ClassTemplateDecl>(ND) || isa<TemplateTemplateParmDecl>(ND)) {
+ // Types can appear in these contexts.
+ if (LangOpts.CPlusPlus || !isa<TagDecl>(ND))
+ Contexts |= (1LL << CodeCompletionContext::CCC_TopLevel)
+ | (1LL << CodeCompletionContext::CCC_ObjCIvarList)
+ | (1LL << CodeCompletionContext::CCC_ClassStructUnion)
+ | (1LL << CodeCompletionContext::CCC_Statement)
+ | (1LL << CodeCompletionContext::CCC_Type)
+ | (1LL << CodeCompletionContext::CCC_ParenthesizedExpression);
+
+ // In C++, types can appear in expressions contexts (for functional casts).
+ if (LangOpts.CPlusPlus)
+ Contexts |= (1LL << CodeCompletionContext::CCC_Expression);
+
+ // In Objective-C, message sends can send interfaces. In Objective-C++,
+ // all types are available due to functional casts.
+ if (LangOpts.CPlusPlus || isa<ObjCInterfaceDecl>(ND))
+ Contexts |= (1LL << CodeCompletionContext::CCC_ObjCMessageReceiver);
+
+ // In Objective-C, you can only be a subclass of another Objective-C class
+ if (isa<ObjCInterfaceDecl>(ND))
+ Contexts |= (1LL << CodeCompletionContext::CCC_ObjCInterfaceName);
+
+ // Deal with tag names.
+ if (isa<EnumDecl>(ND)) {
+ Contexts |= (1LL << CodeCompletionContext::CCC_EnumTag);
+
+ // Part of the nested-name-specifier in C++0x.
+ if (LangOpts.CPlusPlus11)
+ IsNestedNameSpecifier = true;
+ } else if (const RecordDecl *Record = dyn_cast<RecordDecl>(ND)) {
+ if (Record->isUnion())
+ Contexts |= (1LL << CodeCompletionContext::CCC_UnionTag);
+ else
+ Contexts |= (1LL << CodeCompletionContext::CCC_ClassOrStructTag);
+
+ if (LangOpts.CPlusPlus)
+ IsNestedNameSpecifier = true;
+ } else if (isa<ClassTemplateDecl>(ND))
+ IsNestedNameSpecifier = true;
+ } else if (isa<ValueDecl>(ND) || isa<FunctionTemplateDecl>(ND)) {
+ // Values can appear in these contexts.
+ Contexts = (1LL << CodeCompletionContext::CCC_Statement)
+ | (1LL << CodeCompletionContext::CCC_Expression)
+ | (1LL << CodeCompletionContext::CCC_ParenthesizedExpression)
+ | (1LL << CodeCompletionContext::CCC_ObjCMessageReceiver);
+ } else if (isa<ObjCProtocolDecl>(ND)) {
+ Contexts = (1LL << CodeCompletionContext::CCC_ObjCProtocolName);
+ } else if (isa<ObjCCategoryDecl>(ND)) {
+ Contexts = (1LL << CodeCompletionContext::CCC_ObjCCategoryName);
+ } else if (isa<NamespaceDecl>(ND) || isa<NamespaceAliasDecl>(ND)) {
+ Contexts = (1LL << CodeCompletionContext::CCC_Namespace);
+
+ // Part of the nested-name-specifier.
+ IsNestedNameSpecifier = true;
+ }
+
+ return Contexts;
+}
+
+void ASTUnit::CacheCodeCompletionResults() {
+ if (!TheSema)
+ return;
+
+ SimpleTimer Timer(WantTiming);
+ Timer.setOutput("Cache global code completions for " + getMainFileName());
+
+ // Clear out the previous results.
+ ClearCachedCompletionResults();
+
+ // Gather the set of global code completions.
+ typedef CodeCompletionResult Result;
+ SmallVector<Result, 8> Results;
+ CachedCompletionAllocator = new GlobalCodeCompletionAllocator;
+ CodeCompletionTUInfo CCTUInfo(CachedCompletionAllocator);
+ TheSema->GatherGlobalCodeCompletions(*CachedCompletionAllocator,
+ CCTUInfo, Results);
+
+ // Translate global code completions into cached completions.
+ llvm::DenseMap<CanQualType, unsigned> CompletionTypes;
+ CodeCompletionContext CCContext(CodeCompletionContext::CCC_TopLevel);
+
+ for (Result &R : Results) {
+ switch (R.Kind) {
+ case Result::RK_Declaration: {
+ bool IsNestedNameSpecifier = false;
+ CachedCodeCompletionResult CachedResult;
+ CachedResult.Completion = R.CreateCodeCompletionString(
+ *TheSema, CCContext, *CachedCompletionAllocator, CCTUInfo,
+ IncludeBriefCommentsInCodeCompletion);
+ CachedResult.ShowInContexts = getDeclShowContexts(
+ R.Declaration, Ctx->getLangOpts(), IsNestedNameSpecifier);
+ CachedResult.Priority = R.Priority;
+ CachedResult.Kind = R.CursorKind;
+ CachedResult.Availability = R.Availability;
+
+ // Keep track of the type of this completion in an ASTContext-agnostic
+ // way.
+ QualType UsageType = getDeclUsageType(*Ctx, R.Declaration);
+ if (UsageType.isNull()) {
+ CachedResult.TypeClass = STC_Void;
+ CachedResult.Type = 0;
+ } else {
+ CanQualType CanUsageType
+ = Ctx->getCanonicalType(UsageType.getUnqualifiedType());
+ CachedResult.TypeClass = getSimplifiedTypeClass(CanUsageType);
+
+ // Determine whether we have already seen this type. If so, we save
+ // ourselves the work of formatting the type string by using the
+ // temporary, CanQualType-based hash table to find the associated value.
+ unsigned &TypeValue = CompletionTypes[CanUsageType];
+ if (TypeValue == 0) {
+ TypeValue = CompletionTypes.size();
+ CachedCompletionTypes[QualType(CanUsageType).getAsString()]
+ = TypeValue;
+ }
+
+ CachedResult.Type = TypeValue;
+ }
+
+ CachedCompletionResults.push_back(CachedResult);
+
+ /// Handle nested-name-specifiers in C++.
+ if (TheSema->Context.getLangOpts().CPlusPlus && IsNestedNameSpecifier &&
+ !R.StartsNestedNameSpecifier) {
+ // The contexts in which a nested-name-specifier can appear in C++.
+ uint64_t NNSContexts
+ = (1LL << CodeCompletionContext::CCC_TopLevel)
+ | (1LL << CodeCompletionContext::CCC_ObjCIvarList)
+ | (1LL << CodeCompletionContext::CCC_ClassStructUnion)
+ | (1LL << CodeCompletionContext::CCC_Statement)
+ | (1LL << CodeCompletionContext::CCC_Expression)
+ | (1LL << CodeCompletionContext::CCC_ObjCMessageReceiver)
+ | (1LL << CodeCompletionContext::CCC_EnumTag)
+ | (1LL << CodeCompletionContext::CCC_UnionTag)
+ | (1LL << CodeCompletionContext::CCC_ClassOrStructTag)
+ | (1LL << CodeCompletionContext::CCC_Type)
+ | (1LL << CodeCompletionContext::CCC_PotentiallyQualifiedName)
+ | (1LL << CodeCompletionContext::CCC_ParenthesizedExpression);
+
+ if (isa<NamespaceDecl>(R.Declaration) ||
+ isa<NamespaceAliasDecl>(R.Declaration))
+ NNSContexts |= (1LL << CodeCompletionContext::CCC_Namespace);
+
+ if (unsigned RemainingContexts
+ = NNSContexts & ~CachedResult.ShowInContexts) {
+ // If there any contexts where this completion can be a
+ // nested-name-specifier but isn't already an option, create a
+ // nested-name-specifier completion.
+ R.StartsNestedNameSpecifier = true;
+ CachedResult.Completion = R.CreateCodeCompletionString(
+ *TheSema, CCContext, *CachedCompletionAllocator, CCTUInfo,
+ IncludeBriefCommentsInCodeCompletion);
+ CachedResult.ShowInContexts = RemainingContexts;
+ CachedResult.Priority = CCP_NestedNameSpecifier;
+ CachedResult.TypeClass = STC_Void;
+ CachedResult.Type = 0;
+ CachedCompletionResults.push_back(CachedResult);
+ }
+ }
+ break;
+ }
+
+ case Result::RK_Keyword:
+ case Result::RK_Pattern:
+ // Ignore keywords and patterns; we don't care, since they are so
+ // easily regenerated.
+ break;
+
+ case Result::RK_Macro: {
+ CachedCodeCompletionResult CachedResult;
+ CachedResult.Completion = R.CreateCodeCompletionString(
+ *TheSema, CCContext, *CachedCompletionAllocator, CCTUInfo,
+ IncludeBriefCommentsInCodeCompletion);
+ CachedResult.ShowInContexts
+ = (1LL << CodeCompletionContext::CCC_TopLevel)
+ | (1LL << CodeCompletionContext::CCC_ObjCInterface)
+ | (1LL << CodeCompletionContext::CCC_ObjCImplementation)
+ | (1LL << CodeCompletionContext::CCC_ObjCIvarList)
+ | (1LL << CodeCompletionContext::CCC_ClassStructUnion)
+ | (1LL << CodeCompletionContext::CCC_Statement)
+ | (1LL << CodeCompletionContext::CCC_Expression)
+ | (1LL << CodeCompletionContext::CCC_ObjCMessageReceiver)
+ | (1LL << CodeCompletionContext::CCC_MacroNameUse)
+ | (1LL << CodeCompletionContext::CCC_PreprocessorExpression)
+ | (1LL << CodeCompletionContext::CCC_ParenthesizedExpression)
+ | (1LL << CodeCompletionContext::CCC_OtherWithMacros);
+
+ CachedResult.Priority = R.Priority;
+ CachedResult.Kind = R.CursorKind;
+ CachedResult.Availability = R.Availability;
+ CachedResult.TypeClass = STC_Void;
+ CachedResult.Type = 0;
+ CachedCompletionResults.push_back(CachedResult);
+ break;
+ }
+ }
+ }
+
+ // Save the current top-level hash value.
+ CompletionCacheTopLevelHashValue = CurrentTopLevelHashValue;
+}
+
+void ASTUnit::ClearCachedCompletionResults() {
+ CachedCompletionResults.clear();
+ CachedCompletionTypes.clear();
+ CachedCompletionAllocator = nullptr;
+}
+
+namespace {
+
+/// \brief Gathers information from ASTReader that will be used to initialize
+/// a Preprocessor.
+class ASTInfoCollector : public ASTReaderListener {
+ Preprocessor &PP;
+ ASTContext &Context;
+ LangOptions &LangOpt;
+ std::shared_ptr<TargetOptions> &TargetOpts;
+ IntrusiveRefCntPtr<TargetInfo> &Target;
+ unsigned &Counter;
+
+ bool InitializedLanguage;
+public:
+ ASTInfoCollector(Preprocessor &PP, ASTContext &Context, LangOptions &LangOpt,
+ std::shared_ptr<TargetOptions> &TargetOpts,
+ IntrusiveRefCntPtr<TargetInfo> &Target, unsigned &Counter)
+ : PP(PP), Context(Context), LangOpt(LangOpt), TargetOpts(TargetOpts),
+ Target(Target), Counter(Counter), InitializedLanguage(false) {}
+
+ bool ReadLanguageOptions(const LangOptions &LangOpts, bool Complain,
+ bool AllowCompatibleDifferences) override {
+ if (InitializedLanguage)
+ return false;
+
+ LangOpt = LangOpts;
+ InitializedLanguage = true;
+
+ updated();
+ return false;
+ }
+
+ bool ReadTargetOptions(const TargetOptions &TargetOpts, bool Complain,
+ bool AllowCompatibleDifferences) override {
+ // If we've already initialized the target, don't do it again.
+ if (Target)
+ return false;
+
+ this->TargetOpts = std::make_shared<TargetOptions>(TargetOpts);
+ Target =
+ TargetInfo::CreateTargetInfo(PP.getDiagnostics(), this->TargetOpts);
+
+ updated();
+ return false;
+ }
+
+ void ReadCounter(const serialization::ModuleFile &M,
+ unsigned Value) override {
+ Counter = Value;
+ }
+
+private:
+ void updated() {
+ if (!Target || !InitializedLanguage)
+ return;
+
+ // Inform the target of the language options.
+ //
+ // FIXME: We shouldn't need to do this, the target should be immutable once
+ // created. This complexity should be lifted elsewhere.
+ Target->adjust(LangOpt);
+
+ // Initialize the preprocessor.
+ PP.Initialize(*Target);
+
+ // Initialize the ASTContext
+ Context.InitBuiltinTypes(*Target);
+
+ // We didn't have access to the comment options when the ASTContext was
+ // constructed, so register them now.
+ Context.getCommentCommandTraits().registerCommentOptions(
+ LangOpt.CommentOpts);
+ }
+};
+
+ /// \brief Diagnostic consumer that saves each diagnostic it is given.
+class StoredDiagnosticConsumer : public DiagnosticConsumer {
+ SmallVectorImpl<StoredDiagnostic> &StoredDiags;
+ SourceManager *SourceMgr;
+
+public:
+ explicit StoredDiagnosticConsumer(
+ SmallVectorImpl<StoredDiagnostic> &StoredDiags)
+ : StoredDiags(StoredDiags), SourceMgr(nullptr) {}
+
+ void BeginSourceFile(const LangOptions &LangOpts,
+ const Preprocessor *PP = nullptr) override {
+ if (PP)
+ SourceMgr = &PP->getSourceManager();
+ }
+
+ void HandleDiagnostic(DiagnosticsEngine::Level Level,
+ const Diagnostic &Info) override;
+};
+
+/// \brief RAII object that optionally captures diagnostics, if
+/// there is no diagnostic client to capture them already.
+class CaptureDroppedDiagnostics {
+ DiagnosticsEngine &Diags;
+ StoredDiagnosticConsumer Client;
+ DiagnosticConsumer *PreviousClient;
+ std::unique_ptr<DiagnosticConsumer> OwningPreviousClient;
+
+public:
+ CaptureDroppedDiagnostics(bool RequestCapture, DiagnosticsEngine &Diags,
+ SmallVectorImpl<StoredDiagnostic> &StoredDiags)
+ : Diags(Diags), Client(StoredDiags), PreviousClient(nullptr)
+ {
+ if (RequestCapture || Diags.getClient() == nullptr) {
+ OwningPreviousClient = Diags.takeClient();
+ PreviousClient = Diags.getClient();
+ Diags.setClient(&Client, false);
+ }
+ }
+
+ ~CaptureDroppedDiagnostics() {
+ if (Diags.getClient() == &Client)
+ Diags.setClient(PreviousClient, !!OwningPreviousClient.release());
+ }
+};
+
+} // anonymous namespace
+
+void StoredDiagnosticConsumer::HandleDiagnostic(DiagnosticsEngine::Level Level,
+ const Diagnostic &Info) {
+ // Default implementation (Warnings/errors count).
+ DiagnosticConsumer::HandleDiagnostic(Level, Info);
+
+ // Only record the diagnostic if it's part of the source manager we know
+ // about. This effectively drops diagnostics from modules we're building.
+ // FIXME: In the long run, ee don't want to drop source managers from modules.
+ if (!Info.hasSourceManager() || &Info.getSourceManager() == SourceMgr)
+ StoredDiags.emplace_back(Level, Info);
+}
+
+ASTMutationListener *ASTUnit::getASTMutationListener() {
+ if (WriterData)
+ return &WriterData->Writer;
+ return nullptr;
+}
+
+ASTDeserializationListener *ASTUnit::getDeserializationListener() {
+ if (WriterData)
+ return &WriterData->Writer;
+ return nullptr;
+}
+
+std::unique_ptr<llvm::MemoryBuffer>
+ASTUnit::getBufferForFile(StringRef Filename, std::string *ErrorStr) {
+ assert(FileMgr);
+ auto Buffer = FileMgr->getBufferForFile(Filename);
+ if (Buffer)
+ return std::move(*Buffer);
+ if (ErrorStr)
+ *ErrorStr = Buffer.getError().message();
+ return nullptr;
+}
+
+/// \brief Configure the diagnostics object for use with ASTUnit.
+void ASTUnit::ConfigureDiags(IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
+ ASTUnit &AST, bool CaptureDiagnostics) {
+ assert(Diags.get() && "no DiagnosticsEngine was provided");
+ if (CaptureDiagnostics)
+ Diags->setClient(new StoredDiagnosticConsumer(AST.StoredDiagnostics));
+}
+
+std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
+ const std::string &Filename, const PCHContainerReader &PCHContainerRdr,
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
+ const FileSystemOptions &FileSystemOpts, bool UseDebugInfo,
+ bool OnlyLocalDecls, ArrayRef<RemappedFile> RemappedFiles,
+ bool CaptureDiagnostics, bool AllowPCHWithCompilerErrors,
+ bool UserFilesAreVolatile) {
+ std::unique_ptr<ASTUnit> AST(new ASTUnit(true));
+
+ // Recover resources if we crash before exiting this method.
+ llvm::CrashRecoveryContextCleanupRegistrar<ASTUnit>
+ ASTUnitCleanup(AST.get());
+ llvm::CrashRecoveryContextCleanupRegistrar<DiagnosticsEngine,
+ llvm::CrashRecoveryContextReleaseRefCleanup<DiagnosticsEngine> >
+ DiagCleanup(Diags.get());
+
+ ConfigureDiags(Diags, *AST, CaptureDiagnostics);
+
+ AST->OnlyLocalDecls = OnlyLocalDecls;
+ AST->CaptureDiagnostics = CaptureDiagnostics;
+ AST->Diagnostics = Diags;
+ IntrusiveRefCntPtr<vfs::FileSystem> VFS = vfs::getRealFileSystem();
+ AST->FileMgr = new FileManager(FileSystemOpts, VFS);
+ AST->UserFilesAreVolatile = UserFilesAreVolatile;
+ AST->SourceMgr = new SourceManager(AST->getDiagnostics(),
+ AST->getFileManager(),
+ UserFilesAreVolatile);
+ AST->HSOpts = new HeaderSearchOptions();
+ AST->HSOpts->ModuleFormat = PCHContainerRdr.getFormat();
+ AST->HeaderInfo.reset(new HeaderSearch(AST->HSOpts,
+ AST->getSourceManager(),
+ AST->getDiagnostics(),
+ AST->ASTFileLangOpts,
+ /*Target=*/nullptr));
+
+ PreprocessorOptions *PPOpts = new PreprocessorOptions();
+
+ for (const auto &RemappedFile : RemappedFiles)
+ PPOpts->addRemappedFile(RemappedFile.first, RemappedFile.second);
+
+ // Gather Info for preprocessor construction later on.
+
+ HeaderSearch &HeaderInfo = *AST->HeaderInfo;
+ unsigned Counter;
+
+ AST->PP =
+ new Preprocessor(PPOpts, AST->getDiagnostics(), AST->ASTFileLangOpts,
+ AST->getSourceManager(), HeaderInfo, *AST,
+ /*IILookup=*/nullptr,
+ /*OwnsHeaderSearch=*/false);
+ Preprocessor &PP = *AST->PP;
+
+ AST->Ctx = new ASTContext(AST->ASTFileLangOpts, AST->getSourceManager(),
+ PP.getIdentifierTable(), PP.getSelectorTable(),
+ PP.getBuiltinInfo());
+ ASTContext &Context = *AST->Ctx;
+
+ bool disableValid = false;
+ if (::getenv("LIBCLANG_DISABLE_PCH_VALIDATION"))
+ disableValid = true;
+ AST->Reader = new ASTReader(PP, Context, PCHContainerRdr, { },
+ /*isysroot=*/"",
+ /*DisableValidation=*/disableValid,
+ AllowPCHWithCompilerErrors);
+
+ AST->Reader->setListener(llvm::make_unique<ASTInfoCollector>(
+ *AST->PP, Context, AST->ASTFileLangOpts, AST->TargetOpts, AST->Target,
+ Counter));
+
+ // Attach the AST reader to the AST context as an external AST
+ // source, so that declarations will be deserialized from the
+ // AST file as needed.
+ // We need the external source to be set up before we read the AST, because
+ // eagerly-deserialized declarations may use it.
+ Context.setExternalSource(AST->Reader);
+
+ switch (AST->Reader->ReadAST(Filename, serialization::MK_MainFile,
+ SourceLocation(), ASTReader::ARR_None)) {
+ case ASTReader::Success:
+ break;
+
+ case ASTReader::Failure:
+ case ASTReader::Missing:
+ case ASTReader::OutOfDate:
+ case ASTReader::VersionMismatch:
+ case ASTReader::ConfigurationMismatch:
+ case ASTReader::HadErrors:
+ AST->getDiagnostics().Report(diag::err_fe_unable_to_load_pch);
+ return nullptr;
+ }
+
+ AST->OriginalSourceFile = AST->Reader->getOriginalSourceFile();
+
+ PP.setCounterValue(Counter);
+
+ // Create an AST consumer, even though it isn't used.
+ AST->Consumer.reset(new ASTConsumer);
+
+ // Create a semantic analysis object and tell the AST reader about it.
+ AST->TheSema.reset(new Sema(PP, Context, *AST->Consumer));
+ AST->TheSema->Initialize();
+ AST->Reader->InitializeSema(*AST->TheSema);
+
+ // Tell the diagnostic client that we have started a source file.
+ AST->getDiagnostics().getClient()->BeginSourceFile(Context.getLangOpts(),&PP);
+
+ return AST;
+}
+
+namespace {
+
+/// \brief Preprocessor callback class that updates a hash value with the names
+/// of all macros that have been defined by the translation unit.
+class MacroDefinitionTrackerPPCallbacks : public PPCallbacks {
+ unsigned &Hash;
+
+public:
+ explicit MacroDefinitionTrackerPPCallbacks(unsigned &Hash) : Hash(Hash) { }
+
+ void MacroDefined(const Token &MacroNameTok,
+ const MacroDirective *MD) override {
+ Hash = llvm::HashString(MacroNameTok.getIdentifierInfo()->getName(), Hash);
+ }
+};
+
+/// \brief Add the given declaration to the hash of all top-level entities.
+void AddTopLevelDeclarationToHash(Decl *D, unsigned &Hash) {
+ if (!D)
+ return;
+
+ DeclContext *DC = D->getDeclContext();
+ if (!DC)
+ return;
+
+ if (!(DC->isTranslationUnit() || DC->getLookupParent()->isTranslationUnit()))
+ return;
+
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(D)) {
+ if (EnumDecl *EnumD = dyn_cast<EnumDecl>(D)) {
+ // For an unscoped enum include the enumerators in the hash since they
+ // enter the top-level namespace.
+ if (!EnumD->isScoped()) {
+ for (const auto *EI : EnumD->enumerators()) {
+ if (EI->getIdentifier())
+ Hash = llvm::HashString(EI->getIdentifier()->getName(), Hash);
+ }
+ }
+ }
+
+ if (ND->getIdentifier())
+ Hash = llvm::HashString(ND->getIdentifier()->getName(), Hash);
+ else if (DeclarationName Name = ND->getDeclName()) {
+ std::string NameStr = Name.getAsString();
+ Hash = llvm::HashString(NameStr, Hash);
+ }
+ return;
+ }
+
+ if (ImportDecl *ImportD = dyn_cast<ImportDecl>(D)) {
+ if (Module *Mod = ImportD->getImportedModule()) {
+ std::string ModName = Mod->getFullModuleName();
+ Hash = llvm::HashString(ModName, Hash);
+ }
+ return;
+ }
+}
+
+class TopLevelDeclTrackerConsumer : public ASTConsumer {
+ ASTUnit &Unit;
+ unsigned &Hash;
+
+public:
+ TopLevelDeclTrackerConsumer(ASTUnit &_Unit, unsigned &Hash)
+ : Unit(_Unit), Hash(Hash) {
+ Hash = 0;
+ }
+
+ void handleTopLevelDecl(Decl *D) {
+ if (!D)
+ return;
+
+ // FIXME: Currently ObjC method declarations are incorrectly being
+ // reported as top-level declarations, even though their DeclContext
+ // is the containing ObjC @interface/@implementation. This is a
+ // fundamental problem in the parser right now.
+ if (isa<ObjCMethodDecl>(D))
+ return;
+
+ AddTopLevelDeclarationToHash(D, Hash);
+ Unit.addTopLevelDecl(D);
+
+ handleFileLevelDecl(D);
+ }
+
+ void handleFileLevelDecl(Decl *D) {
+ Unit.addFileLevelDecl(D);
+ if (NamespaceDecl *NSD = dyn_cast<NamespaceDecl>(D)) {
+ for (auto *I : NSD->decls())
+ handleFileLevelDecl(I);
+ }
+ }
+
+ bool HandleTopLevelDecl(DeclGroupRef D) override {
+ for (Decl *TopLevelDecl : D)
+ handleTopLevelDecl(TopLevelDecl);
+ return true;
+ }
+
+ // We're not interested in "interesting" decls.
+ void HandleInterestingDecl(DeclGroupRef) override {}
+
+ void HandleTopLevelDeclInObjCContainer(DeclGroupRef D) override {
+ for (Decl *TopLevelDecl : D)
+ handleTopLevelDecl(TopLevelDecl);
+ }
+
+ ASTMutationListener *GetASTMutationListener() override {
+ return Unit.getASTMutationListener();
+ }
+
+ ASTDeserializationListener *GetASTDeserializationListener() override {
+ return Unit.getDeserializationListener();
+ }
+};
+
+class TopLevelDeclTrackerAction : public ASTFrontendAction {
+public:
+ ASTUnit &Unit;
+
+ std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) override {
+ CI.getPreprocessor().addPPCallbacks(
+ llvm::make_unique<MacroDefinitionTrackerPPCallbacks>(
+ Unit.getCurrentTopLevelHashValue()));
+ return llvm::make_unique<TopLevelDeclTrackerConsumer>(
+ Unit, Unit.getCurrentTopLevelHashValue());
+ }
+
+public:
+ TopLevelDeclTrackerAction(ASTUnit &_Unit) : Unit(_Unit) {}
+
+ bool hasCodeCompletionSupport() const override { return false; }
+ TranslationUnitKind getTranslationUnitKind() override {
+ return Unit.getTranslationUnitKind();
+ }
+};
+
+class PrecompilePreambleAction : public ASTFrontendAction {
+ ASTUnit &Unit;
+ bool HasEmittedPreamblePCH;
+
+public:
+ explicit PrecompilePreambleAction(ASTUnit &Unit)
+ : Unit(Unit), HasEmittedPreamblePCH(false) {}
+
+ std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) override;
+ bool hasEmittedPreamblePCH() const { return HasEmittedPreamblePCH; }
+ void setHasEmittedPreamblePCH() { HasEmittedPreamblePCH = true; }
+ bool shouldEraseOutputFiles() override { return !hasEmittedPreamblePCH(); }
+
+ bool hasCodeCompletionSupport() const override { return false; }
+ bool hasASTFileSupport() const override { return false; }
+ TranslationUnitKind getTranslationUnitKind() override { return TU_Prefix; }
+};
+
+class PrecompilePreambleConsumer : public PCHGenerator {
+ ASTUnit &Unit;
+ unsigned &Hash;
+ std::vector<Decl *> TopLevelDecls;
+ PrecompilePreambleAction *Action;
+ raw_ostream *Out;
+
+public:
+ PrecompilePreambleConsumer(ASTUnit &Unit, PrecompilePreambleAction *Action,
+ const Preprocessor &PP, StringRef isysroot,
+ raw_ostream *Out)
+ : PCHGenerator(PP, "", nullptr, isysroot, std::make_shared<PCHBuffer>(),
+ ArrayRef<llvm::IntrusiveRefCntPtr<ModuleFileExtension>>(),
+ /*AllowASTWithErrors=*/true),
+ Unit(Unit), Hash(Unit.getCurrentTopLevelHashValue()), Action(Action),
+ Out(Out) {
+ Hash = 0;
+ }
+
+ bool HandleTopLevelDecl(DeclGroupRef DG) override {
+ for (Decl *D : DG) {
+ // FIXME: Currently ObjC method declarations are incorrectly being
+ // reported as top-level declarations, even though their DeclContext
+ // is the containing ObjC @interface/@implementation. This is a
+ // fundamental problem in the parser right now.
+ if (isa<ObjCMethodDecl>(D))
+ continue;
+ AddTopLevelDeclarationToHash(D, Hash);
+ TopLevelDecls.push_back(D);
+ }
+ return true;
+ }
+
+ void HandleTranslationUnit(ASTContext &Ctx) override {
+ PCHGenerator::HandleTranslationUnit(Ctx);
+ if (hasEmittedPCH()) {
+ // Write the generated bitstream to "Out".
+ *Out << getPCH();
+ // Make sure it hits disk now.
+ Out->flush();
+ // Free the buffer.
+ llvm::SmallVector<char, 0> Empty;
+ getPCH() = std::move(Empty);
+
+ // Translate the top-level declarations we captured during
+ // parsing into declaration IDs in the precompiled
+ // preamble. This will allow us to deserialize those top-level
+ // declarations when requested.
+ for (Decl *D : TopLevelDecls) {
+ // Invalid top-level decls may not have been serialized.
+ if (D->isInvalidDecl())
+ continue;
+ Unit.addTopLevelDeclFromPreamble(getWriter().getDeclID(D));
+ }
+
+ Action->setHasEmittedPreamblePCH();
+ }
+ }
+};
+
+} // anonymous namespace
+
+std::unique_ptr<ASTConsumer>
+PrecompilePreambleAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ std::string Sysroot;
+ std::string OutputFile;
+ raw_ostream *OS = GeneratePCHAction::ComputeASTConsumerArguments(
+ CI, InFile, Sysroot, OutputFile);
+ if (!OS)
+ return nullptr;
+
+ if (!CI.getFrontendOpts().RelocatablePCH)
+ Sysroot.clear();
+
+ CI.getPreprocessor().addPPCallbacks(
+ llvm::make_unique<MacroDefinitionTrackerPPCallbacks>(
+ Unit.getCurrentTopLevelHashValue()));
+ return llvm::make_unique<PrecompilePreambleConsumer>(
+ Unit, this, CI.getPreprocessor(), Sysroot, OS);
+}
+
+static bool isNonDriverDiag(const StoredDiagnostic &StoredDiag) {
+ return StoredDiag.getLocation().isValid();
+}
+
+static void
+checkAndRemoveNonDriverDiags(SmallVectorImpl<StoredDiagnostic> &StoredDiags) {
+ // Get rid of stored diagnostics except the ones from the driver which do not
+ // have a source location.
+ StoredDiags.erase(
+ std::remove_if(StoredDiags.begin(), StoredDiags.end(), isNonDriverDiag),
+ StoredDiags.end());
+}
+
+static void checkAndSanitizeDiags(SmallVectorImpl<StoredDiagnostic> &
+ StoredDiagnostics,
+ SourceManager &SM) {
+ // The stored diagnostic has the old source manager in it; update
+ // the locations to refer into the new source manager. Since we've
+ // been careful to make sure that the source manager's state
+ // before and after are identical, so that we can reuse the source
+ // location itself.
+ for (StoredDiagnostic &SD : StoredDiagnostics) {
+ if (SD.getLocation().isValid()) {
+ FullSourceLoc Loc(SD.getLocation(), SM);
+ SD.setLocation(Loc);
+ }
+ }
+}
+
+/// Parse the source file into a translation unit using the given compiler
+/// invocation, replacing the current translation unit.
+///
+/// \returns True if a failure occurred that causes the ASTUnit not to
+/// contain any translation-unit information, false otherwise.
+bool ASTUnit::Parse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
+ std::unique_ptr<llvm::MemoryBuffer> OverrideMainBuffer) {
+ SavedMainFileBuffer.reset();
+
+ if (!Invocation)
+ return true;
+
+ // Create the compiler instance to use for building the AST.
+ std::unique_ptr<CompilerInstance> Clang(
+ new CompilerInstance(PCHContainerOps));
+
+ // Recover resources if we crash before exiting this method.
+ llvm::CrashRecoveryContextCleanupRegistrar<CompilerInstance>
+ CICleanup(Clang.get());
+
+ IntrusiveRefCntPtr<CompilerInvocation>
+ CCInvocation(new CompilerInvocation(*Invocation));
+
+ Clang->setInvocation(CCInvocation.get());
+ OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].getFile();
+
+ // Set up diagnostics, capturing any diagnostics that would
+ // otherwise be dropped.
+ Clang->setDiagnostics(&getDiagnostics());
+
+ // Create the target instance.
+ Clang->setTarget(TargetInfo::CreateTargetInfo(
+ Clang->getDiagnostics(), Clang->getInvocation().TargetOpts));
+ if (!Clang->hasTarget())
+ return true;
+
+ // Inform the target of the language options.
+ //
+ // FIXME: We shouldn't need to do this, the target should be immutable once
+ // created. This complexity should be lifted elsewhere.
+ Clang->getTarget().adjust(Clang->getLangOpts());
+
+ assert(Clang->getFrontendOpts().Inputs.size() == 1 &&
+ "Invocation must have exactly one source file!");
+ assert(Clang->getFrontendOpts().Inputs[0].getKind() != IK_AST &&
+ "FIXME: AST inputs not yet supported here!");
+ assert(Clang->getFrontendOpts().Inputs[0].getKind() != IK_LLVM_IR &&
+ "IR inputs not support here!");
+
+ // Configure the various subsystems.
+ LangOpts = Clang->getInvocation().LangOpts;
+ FileSystemOpts = Clang->getFileSystemOpts();
+ if (!FileMgr) {
+ Clang->createFileManager();
+ FileMgr = &Clang->getFileManager();
+ }
+ SourceMgr = new SourceManager(getDiagnostics(), *FileMgr,
+ UserFilesAreVolatile);
+ TheSema.reset();
+ Ctx = nullptr;
+ PP = nullptr;
+ Reader = nullptr;
+
+ // Clear out old caches and data.
+ TopLevelDecls.clear();
+ clearFileLevelDecls();
+ CleanTemporaryFiles();
+
+ if (!OverrideMainBuffer) {
+ checkAndRemoveNonDriverDiags(StoredDiagnostics);
+ TopLevelDeclsInPreamble.clear();
+ }
+
+ // Create a file manager object to provide access to and cache the filesystem.
+ Clang->setFileManager(&getFileManager());
+
+ // Create the source manager.
+ Clang->setSourceManager(&getSourceManager());
+
+ // If the main file has been overridden due to the use of a preamble,
+ // make that override happen and introduce the preamble.
+ PreprocessorOptions &PreprocessorOpts = Clang->getPreprocessorOpts();
+ if (OverrideMainBuffer) {
+ PreprocessorOpts.addRemappedFile(OriginalSourceFile,
+ OverrideMainBuffer.get());
+ PreprocessorOpts.PrecompiledPreambleBytes.first = Preamble.size();
+ PreprocessorOpts.PrecompiledPreambleBytes.second
+ = PreambleEndsAtStartOfLine;
+ PreprocessorOpts.ImplicitPCHInclude = getPreambleFile(this);
+ PreprocessorOpts.DisablePCHValidation = true;
+
+ // The stored diagnostic has the old source manager in it; update
+ // the locations to refer into the new source manager. Since we've
+ // been careful to make sure that the source manager's state
+ // before and after are identical, so that we can reuse the source
+ // location itself.
+ checkAndSanitizeDiags(StoredDiagnostics, getSourceManager());
+
+ // Keep track of the override buffer;
+ SavedMainFileBuffer = std::move(OverrideMainBuffer);
+ }
+
+ std::unique_ptr<TopLevelDeclTrackerAction> Act(
+ new TopLevelDeclTrackerAction(*this));
+
+ // Recover resources if we crash before exiting this method.
+ llvm::CrashRecoveryContextCleanupRegistrar<TopLevelDeclTrackerAction>
+ ActCleanup(Act.get());
+
+ if (!Act->BeginSourceFile(*Clang.get(), Clang->getFrontendOpts().Inputs[0]))
+ goto error;
+
+ if (SavedMainFileBuffer) {
+ std::string ModName = getPreambleFile(this);
+ TranslateStoredDiagnostics(getFileManager(), getSourceManager(),
+ PreambleDiagnostics, StoredDiagnostics);
+ }
+
+ if (!Act->Execute())
+ goto error;
+
+ transferASTDataFromCompilerInstance(*Clang);
+
+ Act->EndSourceFile();
+
+ FailedParseDiagnostics.clear();
+
+ return false;
+
+error:
+ // Remove the overridden buffer we used for the preamble.
+ SavedMainFileBuffer = nullptr;
+
+ // Keep the ownership of the data in the ASTUnit because the client may
+ // want to see the diagnostics.
+ transferASTDataFromCompilerInstance(*Clang);
+ FailedParseDiagnostics.swap(StoredDiagnostics);
+ StoredDiagnostics.clear();
+ NumStoredDiagnosticsFromDriver = 0;
+ return true;
+}
+
+/// \brief Simple function to retrieve a path for a preamble precompiled header.
+static std::string GetPreamblePCHPath() {
+ // FIXME: This is a hack so that we can override the preamble file during
+ // crash-recovery testing, which is the only case where the preamble files
+ // are not necessarily cleaned up.
+ const char *TmpFile = ::getenv("CINDEXTEST_PREAMBLE_FILE");
+ if (TmpFile)
+ return TmpFile;
+
+ SmallString<128> Path;
+ llvm::sys::fs::createTemporaryFile("preamble", "pch", Path);
+
+ return Path.str();
+}
+
+/// \brief Compute the preamble for the main file, providing the source buffer
+/// that corresponds to the main file along with a pair (bytes, start-of-line)
+/// that describes the preamble.
+ASTUnit::ComputedPreamble
+ASTUnit::ComputePreamble(CompilerInvocation &Invocation, unsigned MaxLines) {
+ FrontendOptions &FrontendOpts = Invocation.getFrontendOpts();
+ PreprocessorOptions &PreprocessorOpts = Invocation.getPreprocessorOpts();
+
+ // Try to determine if the main file has been remapped, either from the
+ // command line (to another file) or directly through the compiler invocation
+ // (to a memory buffer).
+ llvm::MemoryBuffer *Buffer = nullptr;
+ std::unique_ptr<llvm::MemoryBuffer> BufferOwner;
+ std::string MainFilePath(FrontendOpts.Inputs[0].getFile());
+ llvm::sys::fs::UniqueID MainFileID;
+ if (!llvm::sys::fs::getUniqueID(MainFilePath, MainFileID)) {
+ // Check whether there is a file-file remapping of the main file
+ for (const auto &RF : PreprocessorOpts.RemappedFiles) {
+ std::string MPath(RF.first);
+ llvm::sys::fs::UniqueID MID;
+ if (!llvm::sys::fs::getUniqueID(MPath, MID)) {
+ if (MainFileID == MID) {
+ // We found a remapping. Try to load the resulting, remapped source.
+ BufferOwner = getBufferForFile(RF.second);
+ if (!BufferOwner)
+ return ComputedPreamble(nullptr, nullptr, 0, true);
+ }
+ }
+ }
+
+ // Check whether there is a file-buffer remapping. It supercedes the
+ // file-file remapping.
+ for (const auto &RB : PreprocessorOpts.RemappedFileBuffers) {
+ std::string MPath(RB.first);
+ llvm::sys::fs::UniqueID MID;
+ if (!llvm::sys::fs::getUniqueID(MPath, MID)) {
+ if (MainFileID == MID) {
+ // We found a remapping.
+ BufferOwner.reset();
+ Buffer = const_cast<llvm::MemoryBuffer *>(RB.second);
+ }
+ }
+ }
+ }
+
+ // If the main source file was not remapped, load it now.
+ if (!Buffer && !BufferOwner) {
+ BufferOwner = getBufferForFile(FrontendOpts.Inputs[0].getFile());
+ if (!BufferOwner)
+ return ComputedPreamble(nullptr, nullptr, 0, true);
+ }
+
+ if (!Buffer)
+ Buffer = BufferOwner.get();
+ auto Pre = Lexer::ComputePreamble(Buffer->getBuffer(),
+ *Invocation.getLangOpts(), MaxLines);
+ return ComputedPreamble(Buffer, std::move(BufferOwner), Pre.first,
+ Pre.second);
+}
+
+ASTUnit::PreambleFileHash
+ASTUnit::PreambleFileHash::createForFile(off_t Size, time_t ModTime) {
+ PreambleFileHash Result;
+ Result.Size = Size;
+ Result.ModTime = ModTime;
+ memset(Result.MD5, 0, sizeof(Result.MD5));
+ return Result;
+}
+
+ASTUnit::PreambleFileHash ASTUnit::PreambleFileHash::createForMemoryBuffer(
+ const llvm::MemoryBuffer *Buffer) {
+ PreambleFileHash Result;
+ Result.Size = Buffer->getBufferSize();
+ Result.ModTime = 0;
+
+ llvm::MD5 MD5Ctx;
+ MD5Ctx.update(Buffer->getBuffer().data());
+ MD5Ctx.final(Result.MD5);
+
+ return Result;
+}
+
+namespace clang {
+bool operator==(const ASTUnit::PreambleFileHash &LHS,
+ const ASTUnit::PreambleFileHash &RHS) {
+ return LHS.Size == RHS.Size && LHS.ModTime == RHS.ModTime &&
+ memcmp(LHS.MD5, RHS.MD5, sizeof(LHS.MD5)) == 0;
+}
+} // namespace clang
+
+static std::pair<unsigned, unsigned>
+makeStandaloneRange(CharSourceRange Range, const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ CharSourceRange FileRange = Lexer::makeFileCharRange(Range, SM, LangOpts);
+ unsigned Offset = SM.getFileOffset(FileRange.getBegin());
+ unsigned EndOffset = SM.getFileOffset(FileRange.getEnd());
+ return std::make_pair(Offset, EndOffset);
+}
+
+static ASTUnit::StandaloneFixIt makeStandaloneFixIt(const SourceManager &SM,
+ const LangOptions &LangOpts,
+ const FixItHint &InFix) {
+ ASTUnit::StandaloneFixIt OutFix;
+ OutFix.RemoveRange = makeStandaloneRange(InFix.RemoveRange, SM, LangOpts);
+ OutFix.InsertFromRange = makeStandaloneRange(InFix.InsertFromRange, SM,
+ LangOpts);
+ OutFix.CodeToInsert = InFix.CodeToInsert;
+ OutFix.BeforePreviousInsertions = InFix.BeforePreviousInsertions;
+ return OutFix;
+}
+
+static ASTUnit::StandaloneDiagnostic
+makeStandaloneDiagnostic(const LangOptions &LangOpts,
+ const StoredDiagnostic &InDiag) {
+ ASTUnit::StandaloneDiagnostic OutDiag;
+ OutDiag.ID = InDiag.getID();
+ OutDiag.Level = InDiag.getLevel();
+ OutDiag.Message = InDiag.getMessage();
+ OutDiag.LocOffset = 0;
+ if (InDiag.getLocation().isInvalid())
+ return OutDiag;
+ const SourceManager &SM = InDiag.getLocation().getManager();
+ SourceLocation FileLoc = SM.getFileLoc(InDiag.getLocation());
+ OutDiag.Filename = SM.getFilename(FileLoc);
+ if (OutDiag.Filename.empty())
+ return OutDiag;
+ OutDiag.LocOffset = SM.getFileOffset(FileLoc);
+ for (const CharSourceRange &Range : InDiag.getRanges())
+ OutDiag.Ranges.push_back(makeStandaloneRange(Range, SM, LangOpts));
+ for (const FixItHint &FixIt : InDiag.getFixIts())
+ OutDiag.FixIts.push_back(makeStandaloneFixIt(SM, LangOpts, FixIt));
+
+ return OutDiag;
+}
+
+/// \brief Attempt to build or re-use a precompiled preamble when (re-)parsing
+/// the source file.
+///
+/// This routine will compute the preamble of the main source file. If a
+/// non-trivial preamble is found, it will precompile that preamble into a
+/// precompiled header so that the precompiled preamble can be used to reduce
+/// reparsing time. If a precompiled preamble has already been constructed,
+/// this routine will determine if it is still valid and, if so, avoid
+/// rebuilding the precompiled preamble.
+///
+/// \param AllowRebuild When true (the default), this routine is
+/// allowed to rebuild the precompiled preamble if it is found to be
+/// out-of-date.
+///
+/// \param MaxLines When non-zero, the maximum number of lines that
+/// can occur within the preamble.
+///
+/// \returns If the precompiled preamble can be used, returns a newly-allocated
+/// buffer that should be used in place of the main file when doing so.
+/// Otherwise, returns a NULL pointer.
+std::unique_ptr<llvm::MemoryBuffer>
+ASTUnit::getMainBufferWithPrecompiledPreamble(
+ std::shared_ptr<PCHContainerOperations> PCHContainerOps,
+ const CompilerInvocation &PreambleInvocationIn, bool AllowRebuild,
+ unsigned MaxLines) {
+
+ IntrusiveRefCntPtr<CompilerInvocation>
+ PreambleInvocation(new CompilerInvocation(PreambleInvocationIn));
+ FrontendOptions &FrontendOpts = PreambleInvocation->getFrontendOpts();
+ PreprocessorOptions &PreprocessorOpts
+ = PreambleInvocation->getPreprocessorOpts();
+
+ ComputedPreamble NewPreamble = ComputePreamble(*PreambleInvocation, MaxLines);
+
+ if (!NewPreamble.Size) {
+ // We couldn't find a preamble in the main source. Clear out the current
+ // preamble, if we have one. It's obviously no good any more.
+ Preamble.clear();
+ erasePreambleFile(this);
+
+ // The next time we actually see a preamble, precompile it.
+ PreambleRebuildCounter = 1;
+ return nullptr;
+ }
+
+ if (!Preamble.empty()) {
+ // We've previously computed a preamble. Check whether we have the same
+ // preamble now that we did before, and that there's enough space in
+ // the main-file buffer within the precompiled preamble to fit the
+ // new main file.
+ if (Preamble.size() == NewPreamble.Size &&
+ PreambleEndsAtStartOfLine == NewPreamble.PreambleEndsAtStartOfLine &&
+ memcmp(Preamble.getBufferStart(), NewPreamble.Buffer->getBufferStart(),
+ NewPreamble.Size) == 0) {
+ // The preamble has not changed. We may be able to re-use the precompiled
+ // preamble.
+
+ // Check that none of the files used by the preamble have changed.
+ bool AnyFileChanged = false;
+
+ // First, make a record of those files that have been overridden via
+ // remapping or unsaved_files.
+ llvm::StringMap<PreambleFileHash> OverriddenFiles;
+ for (const auto &R : PreprocessorOpts.RemappedFiles) {
+ if (AnyFileChanged)
+ break;
+
+ vfs::Status Status;
+ if (FileMgr->getNoncachedStatValue(R.second, Status)) {
+ // If we can't stat the file we're remapping to, assume that something
+ // horrible happened.
+ AnyFileChanged = true;
+ break;
+ }
+
+ OverriddenFiles[R.first] = PreambleFileHash::createForFile(
+ Status.getSize(), Status.getLastModificationTime().toEpochTime());
+ }
+
+ for (const auto &RB : PreprocessorOpts.RemappedFileBuffers) {
+ if (AnyFileChanged)
+ break;
+ OverriddenFiles[RB.first] =
+ PreambleFileHash::createForMemoryBuffer(RB.second);
+ }
+
+ // Check whether anything has changed.
+ for (llvm::StringMap<PreambleFileHash>::iterator
+ F = FilesInPreamble.begin(), FEnd = FilesInPreamble.end();
+ !AnyFileChanged && F != FEnd;
+ ++F) {
+ llvm::StringMap<PreambleFileHash>::iterator Overridden
+ = OverriddenFiles.find(F->first());
+ if (Overridden != OverriddenFiles.end()) {
+ // This file was remapped; check whether the newly-mapped file
+ // matches up with the previous mapping.
+ if (Overridden->second != F->second)
+ AnyFileChanged = true;
+ continue;
+ }
+
+ // The file was not remapped; check whether it has changed on disk.
+ vfs::Status Status;
+ if (FileMgr->getNoncachedStatValue(F->first(), Status)) {
+ // If we can't stat the file, assume that something horrible happened.
+ AnyFileChanged = true;
+ } else if (Status.getSize() != uint64_t(F->second.Size) ||
+ Status.getLastModificationTime().toEpochTime() !=
+ uint64_t(F->second.ModTime))
+ AnyFileChanged = true;
+ }
+
+ if (!AnyFileChanged) {
+ // Okay! We can re-use the precompiled preamble.
+
+ // Set the state of the diagnostic object to mimic its state
+ // after parsing the preamble.
+ getDiagnostics().Reset();
+ ProcessWarningOptions(getDiagnostics(),
+ PreambleInvocation->getDiagnosticOpts());
+ getDiagnostics().setNumWarnings(NumWarningsInPreamble);
+
+ return llvm::MemoryBuffer::getMemBufferCopy(
+ NewPreamble.Buffer->getBuffer(), FrontendOpts.Inputs[0].getFile());
+ }
+ }
+
+ // If we aren't allowed to rebuild the precompiled preamble, just
+ // return now.
+ if (!AllowRebuild)
+ return nullptr;
+
+ // We can't reuse the previously-computed preamble. Build a new one.
+ Preamble.clear();
+ PreambleDiagnostics.clear();
+ erasePreambleFile(this);
+ PreambleRebuildCounter = 1;
+ } else if (!AllowRebuild) {
+ // We aren't allowed to rebuild the precompiled preamble; just
+ // return now.
+ return nullptr;
+ }
+
+ // If the preamble rebuild counter > 1, it's because we previously
+ // failed to build a preamble and we're not yet ready to try
+ // again. Decrement the counter and return a failure.
+ if (PreambleRebuildCounter > 1) {
+ --PreambleRebuildCounter;
+ return nullptr;
+ }
+
+ // Create a temporary file for the precompiled preamble. In rare
+ // circumstances, this can fail.
+ std::string PreamblePCHPath = GetPreamblePCHPath();
+ if (PreamblePCHPath.empty()) {
+ // Try again next time.
+ PreambleRebuildCounter = 1;
+ return nullptr;
+ }
+
+ // We did not previously compute a preamble, or it can't be reused anyway.
+ SimpleTimer PreambleTimer(WantTiming);
+ PreambleTimer.setOutput("Precompiling preamble");
+
+ // Save the preamble text for later; we'll need to compare against it for
+ // subsequent reparses.
+ StringRef MainFilename = FrontendOpts.Inputs[0].getFile();
+ Preamble.assign(FileMgr->getFile(MainFilename),
+ NewPreamble.Buffer->getBufferStart(),
+ NewPreamble.Buffer->getBufferStart() + NewPreamble.Size);
+ PreambleEndsAtStartOfLine = NewPreamble.PreambleEndsAtStartOfLine;
+
+ PreambleBuffer = llvm::MemoryBuffer::getMemBufferCopy(
+ NewPreamble.Buffer->getBuffer().slice(0, Preamble.size()), MainFilename);
+
+ // Remap the main source file to the preamble buffer.
+ StringRef MainFilePath = FrontendOpts.Inputs[0].getFile();
+ PreprocessorOpts.addRemappedFile(MainFilePath, PreambleBuffer.get());
+
+ // Tell the compiler invocation to generate a temporary precompiled header.
+ FrontendOpts.ProgramAction = frontend::GeneratePCH;
+ // FIXME: Generate the precompiled header into memory?
+ FrontendOpts.OutputFile = PreamblePCHPath;
+ PreprocessorOpts.PrecompiledPreambleBytes.first = 0;
+ PreprocessorOpts.PrecompiledPreambleBytes.second = false;
+
+ // Create the compiler instance to use for building the precompiled preamble.
+ std::unique_ptr<CompilerInstance> Clang(
+ new CompilerInstance(PCHContainerOps));
+
+ // Recover resources if we crash before exiting this method.
+ llvm::CrashRecoveryContextCleanupRegistrar<CompilerInstance>
+ CICleanup(Clang.get());
+
+ Clang->setInvocation(&*PreambleInvocation);
+ OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].getFile();
+
+ // Set up diagnostics, capturing all of the diagnostics produced.
+ Clang->setDiagnostics(&getDiagnostics());
+
+ // Create the target instance.
+ Clang->setTarget(TargetInfo::CreateTargetInfo(
+ Clang->getDiagnostics(), Clang->getInvocation().TargetOpts));
+ if (!Clang->hasTarget()) {
+ llvm::sys::fs::remove(FrontendOpts.OutputFile);
+ Preamble.clear();
+ PreambleRebuildCounter = DefaultPreambleRebuildInterval;
+ PreprocessorOpts.RemappedFileBuffers.pop_back();
+ return nullptr;
+ }
+
+ // Inform the target of the language options.
+ //
+ // FIXME: We shouldn't need to do this, the target should be immutable once
+ // created. This complexity should be lifted elsewhere.
+ Clang->getTarget().adjust(Clang->getLangOpts());
+
+ assert(Clang->getFrontendOpts().Inputs.size() == 1 &&
+ "Invocation must have exactly one source file!");
+ assert(Clang->getFrontendOpts().Inputs[0].getKind() != IK_AST &&
+ "FIXME: AST inputs not yet supported here!");
+ assert(Clang->getFrontendOpts().Inputs[0].getKind() != IK_LLVM_IR &&
+ "IR inputs not support here!");
+
+ // Clear out old caches and data.
+ getDiagnostics().Reset();
+ ProcessWarningOptions(getDiagnostics(), Clang->getDiagnosticOpts());
+ checkAndRemoveNonDriverDiags(StoredDiagnostics);
+ TopLevelDecls.clear();
+ TopLevelDeclsInPreamble.clear();
+ PreambleDiagnostics.clear();
+
+ IntrusiveRefCntPtr<vfs::FileSystem> VFS =
+ createVFSFromCompilerInvocation(Clang->getInvocation(), getDiagnostics());
+ if (!VFS)
+ return nullptr;
+
+ // Create a file manager object to provide access to and cache the filesystem.
+ Clang->setFileManager(new FileManager(Clang->getFileSystemOpts(), VFS));
+
+ // Create the source manager.
+ Clang->setSourceManager(new SourceManager(getDiagnostics(),
+ Clang->getFileManager()));
+
+ auto PreambleDepCollector = std::make_shared<DependencyCollector>();
+ Clang->addDependencyCollector(PreambleDepCollector);
+
+ std::unique_ptr<PrecompilePreambleAction> Act;
+ Act.reset(new PrecompilePreambleAction(*this));
+ if (!Act->BeginSourceFile(*Clang.get(), Clang->getFrontendOpts().Inputs[0])) {
+ llvm::sys::fs::remove(FrontendOpts.OutputFile);
+ Preamble.clear();
+ PreambleRebuildCounter = DefaultPreambleRebuildInterval;
+ PreprocessorOpts.RemappedFileBuffers.pop_back();
+ return nullptr;
+ }
+
+ Act->Execute();
+
+ // Transfer any diagnostics generated when parsing the preamble into the set
+ // of preamble diagnostics.
+ for (stored_diag_iterator I = stored_diag_afterDriver_begin(),
+ E = stored_diag_end();
+ I != E; ++I)
+ PreambleDiagnostics.push_back(
+ makeStandaloneDiagnostic(Clang->getLangOpts(), *I));
+
+ Act->EndSourceFile();
+
+ checkAndRemoveNonDriverDiags(StoredDiagnostics);
+
+ if (!Act->hasEmittedPreamblePCH()) {
+ // The preamble PCH failed (e.g. there was a module loading fatal error),
+ // so no precompiled header was generated. Forget that we even tried.
+ // FIXME: Should we leave a note for ourselves to try again?
+ llvm::sys::fs::remove(FrontendOpts.OutputFile);
+ Preamble.clear();
+ TopLevelDeclsInPreamble.clear();
+ PreambleRebuildCounter = DefaultPreambleRebuildInterval;
+ PreprocessorOpts.RemappedFileBuffers.pop_back();
+ return nullptr;
+ }
+
+ // Keep track of the preamble we precompiled.
+ setPreambleFile(this, FrontendOpts.OutputFile);
+ NumWarningsInPreamble = getDiagnostics().getNumWarnings();
+
+ // Keep track of all of the files that the source manager knows about,
+ // so we can verify whether they have changed or not.
+ FilesInPreamble.clear();
+ SourceManager &SourceMgr = Clang->getSourceManager();
+ for (auto &Filename : PreambleDepCollector->getDependencies()) {
+ const FileEntry *File = Clang->getFileManager().getFile(Filename);
+ if (!File || File == SourceMgr.getFileEntryForID(SourceMgr.getMainFileID()))
+ continue;
+ if (time_t ModTime = File->getModificationTime()) {
+ FilesInPreamble[File->getName()] = PreambleFileHash::createForFile(
+ File->getSize(), ModTime);
+ } else {
+ llvm::MemoryBuffer *Buffer = SourceMgr.getMemoryBufferForFile(File);
+ FilesInPreamble[File->getName()] =
+ PreambleFileHash::createForMemoryBuffer(Buffer);
+ }
+ }
+
+ PreambleRebuildCounter = 1;
+ PreprocessorOpts.RemappedFileBuffers.pop_back();
+
+ // If the hash of top-level entities differs from the hash of the top-level
+ // entities the last time we rebuilt the preamble, clear out the completion
+ // cache.
+ if (CurrentTopLevelHashValue != PreambleTopLevelHashValue) {
+ CompletionCacheTopLevelHashValue = 0;
+ PreambleTopLevelHashValue = CurrentTopLevelHashValue;
+ }
+
+ return llvm::MemoryBuffer::getMemBufferCopy(NewPreamble.Buffer->getBuffer(),
+ MainFilename);
+}
+
+void ASTUnit::RealizeTopLevelDeclsFromPreamble() {
+ std::vector<Decl *> Resolved;
+ Resolved.reserve(TopLevelDeclsInPreamble.size());
+ ExternalASTSource &Source = *getASTContext().getExternalSource();
+ for (serialization::DeclID TopLevelDecl : TopLevelDeclsInPreamble) {
+ // Resolve the declaration ID to an actual declaration, possibly
+ // deserializing the declaration in the process.
+ if (Decl *D = Source.GetExternalDecl(TopLevelDecl))
+ Resolved.push_back(D);
+ }
+ TopLevelDeclsInPreamble.clear();
+ TopLevelDecls.insert(TopLevelDecls.begin(), Resolved.begin(), Resolved.end());
+}
+
+void ASTUnit::transferASTDataFromCompilerInstance(CompilerInstance &CI) {
+ // Steal the created target, context, and preprocessor if they have been
+ // created.
+ assert(CI.hasInvocation() && "missing invocation");
+ LangOpts = CI.getInvocation().LangOpts;
+ TheSema = CI.takeSema();
+ Consumer = CI.takeASTConsumer();
+ if (CI.hasASTContext())
+ Ctx = &CI.getASTContext();
+ if (CI.hasPreprocessor())
+ PP = &CI.getPreprocessor();
+ CI.setSourceManager(nullptr);
+ CI.setFileManager(nullptr);
+ if (CI.hasTarget())
+ Target = &CI.getTarget();
+ Reader = CI.getModuleManager();
+ HadModuleLoaderFatalFailure = CI.hadModuleLoaderFatalFailure();
+}
+
+StringRef ASTUnit::getMainFileName() const {
+ if (Invocation && !Invocation->getFrontendOpts().Inputs.empty()) {
+ const FrontendInputFile &Input = Invocation->getFrontendOpts().Inputs[0];
+ if (Input.isFile())
+ return Input.getFile();
+ else
+ return Input.getBuffer()->getBufferIdentifier();
+ }
+
+ if (SourceMgr) {
+ if (const FileEntry *
+ FE = SourceMgr->getFileEntryForID(SourceMgr->getMainFileID()))
+ return FE->getName();
+ }
+
+ return StringRef();
+}
+
+StringRef ASTUnit::getASTFileName() const {
+ if (!isMainFileAST())
+ return StringRef();
+
+ serialization::ModuleFile &
+ Mod = Reader->getModuleManager().getPrimaryModule();
+ return Mod.FileName;
+}
+
+ASTUnit *ASTUnit::create(CompilerInvocation *CI,
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
+ bool CaptureDiagnostics,
+ bool UserFilesAreVolatile) {
+ std::unique_ptr<ASTUnit> AST;
+ AST.reset(new ASTUnit(false));
+ ConfigureDiags(Diags, *AST, CaptureDiagnostics);
+ AST->Diagnostics = Diags;
+ AST->Invocation = CI;
+ AST->FileSystemOpts = CI->getFileSystemOpts();
+ IntrusiveRefCntPtr<vfs::FileSystem> VFS =
+ createVFSFromCompilerInvocation(*CI, *Diags);
+ if (!VFS)
+ return nullptr;
+ AST->FileMgr = new FileManager(AST->FileSystemOpts, VFS);
+ AST->UserFilesAreVolatile = UserFilesAreVolatile;
+ AST->SourceMgr = new SourceManager(AST->getDiagnostics(), *AST->FileMgr,
+ UserFilesAreVolatile);
+
+ return AST.release();
+}
+
+ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(
+ CompilerInvocation *CI,
+ std::shared_ptr<PCHContainerOperations> PCHContainerOps,
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags, ASTFrontendAction *Action,
+ ASTUnit *Unit, bool Persistent, StringRef ResourceFilesPath,
+ bool OnlyLocalDecls, bool CaptureDiagnostics,
+ unsigned PrecompilePreambleAfterNParses, bool CacheCodeCompletionResults,
+ bool IncludeBriefCommentsInCodeCompletion, bool UserFilesAreVolatile,
+ std::unique_ptr<ASTUnit> *ErrAST) {
+ assert(CI && "A CompilerInvocation is required");
+
+ std::unique_ptr<ASTUnit> OwnAST;
+ ASTUnit *AST = Unit;
+ if (!AST) {
+ // Create the AST unit.
+ OwnAST.reset(create(CI, Diags, CaptureDiagnostics, UserFilesAreVolatile));
+ AST = OwnAST.get();
+ if (!AST)
+ return nullptr;
+ }
+
+ if (!ResourceFilesPath.empty()) {
+ // Override the resources path.
+ CI->getHeaderSearchOpts().ResourceDir = ResourceFilesPath;
+ }
+ AST->OnlyLocalDecls = OnlyLocalDecls;
+ AST->CaptureDiagnostics = CaptureDiagnostics;
+ if (PrecompilePreambleAfterNParses > 0)
+ AST->PreambleRebuildCounter = PrecompilePreambleAfterNParses;
+ AST->TUKind = Action ? Action->getTranslationUnitKind() : TU_Complete;
+ AST->ShouldCacheCodeCompletionResults = CacheCodeCompletionResults;
+ AST->IncludeBriefCommentsInCodeCompletion
+ = IncludeBriefCommentsInCodeCompletion;
+
+ // Recover resources if we crash before exiting this method.
+ llvm::CrashRecoveryContextCleanupRegistrar<ASTUnit>
+ ASTUnitCleanup(OwnAST.get());
+ llvm::CrashRecoveryContextCleanupRegistrar<DiagnosticsEngine,
+ llvm::CrashRecoveryContextReleaseRefCleanup<DiagnosticsEngine> >
+ DiagCleanup(Diags.get());
+
+ // We'll manage file buffers ourselves.
+ CI->getPreprocessorOpts().RetainRemappedFileBuffers = true;
+ CI->getFrontendOpts().DisableFree = false;
+ ProcessWarningOptions(AST->getDiagnostics(), CI->getDiagnosticOpts());
+
+ // Create the compiler instance to use for building the AST.
+ std::unique_ptr<CompilerInstance> Clang(
+ new CompilerInstance(PCHContainerOps));
+
+ // Recover resources if we crash before exiting this method.
+ llvm::CrashRecoveryContextCleanupRegistrar<CompilerInstance>
+ CICleanup(Clang.get());
+
+ Clang->setInvocation(CI);
+ AST->OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].getFile();
+
+ // Set up diagnostics, capturing any diagnostics that would
+ // otherwise be dropped.
+ Clang->setDiagnostics(&AST->getDiagnostics());
+
+ // Create the target instance.
+ Clang->setTarget(TargetInfo::CreateTargetInfo(
+ Clang->getDiagnostics(), Clang->getInvocation().TargetOpts));
+ if (!Clang->hasTarget())
+ return nullptr;
+
+ // Inform the target of the language options.
+ //
+ // FIXME: We shouldn't need to do this, the target should be immutable once
+ // created. This complexity should be lifted elsewhere.
+ Clang->getTarget().adjust(Clang->getLangOpts());
+
+ assert(Clang->getFrontendOpts().Inputs.size() == 1 &&
+ "Invocation must have exactly one source file!");
+ assert(Clang->getFrontendOpts().Inputs[0].getKind() != IK_AST &&
+ "FIXME: AST inputs not yet supported here!");
+ assert(Clang->getFrontendOpts().Inputs[0].getKind() != IK_LLVM_IR &&
+ "IR inputs not supported here!");
+
+ // Configure the various subsystems.
+ AST->TheSema.reset();
+ AST->Ctx = nullptr;
+ AST->PP = nullptr;
+ AST->Reader = nullptr;
+
+ // Create a file manager object to provide access to and cache the filesystem.
+ Clang->setFileManager(&AST->getFileManager());
+
+ // Create the source manager.
+ Clang->setSourceManager(&AST->getSourceManager());
+
+ ASTFrontendAction *Act = Action;
+
+ std::unique_ptr<TopLevelDeclTrackerAction> TrackerAct;
+ if (!Act) {
+ TrackerAct.reset(new TopLevelDeclTrackerAction(*AST));
+ Act = TrackerAct.get();
+ }
+
+ // Recover resources if we crash before exiting this method.
+ llvm::CrashRecoveryContextCleanupRegistrar<TopLevelDeclTrackerAction>
+ ActCleanup(TrackerAct.get());
+
+ if (!Act->BeginSourceFile(*Clang.get(), Clang->getFrontendOpts().Inputs[0])) {
+ AST->transferASTDataFromCompilerInstance(*Clang);
+ if (OwnAST && ErrAST)
+ ErrAST->swap(OwnAST);
+
+ return nullptr;
+ }
+
+ if (Persistent && !TrackerAct) {
+ Clang->getPreprocessor().addPPCallbacks(
+ llvm::make_unique<MacroDefinitionTrackerPPCallbacks>(
+ AST->getCurrentTopLevelHashValue()));
+ std::vector<std::unique_ptr<ASTConsumer>> Consumers;
+ if (Clang->hasASTConsumer())
+ Consumers.push_back(Clang->takeASTConsumer());
+ Consumers.push_back(llvm::make_unique<TopLevelDeclTrackerConsumer>(
+ *AST, AST->getCurrentTopLevelHashValue()));
+ Clang->setASTConsumer(
+ llvm::make_unique<MultiplexConsumer>(std::move(Consumers)));
+ }
+ if (!Act->Execute()) {
+ AST->transferASTDataFromCompilerInstance(*Clang);
+ if (OwnAST && ErrAST)
+ ErrAST->swap(OwnAST);
+
+ return nullptr;
+ }
+
+ // Steal the created target, context, and preprocessor.
+ AST->transferASTDataFromCompilerInstance(*Clang);
+
+ Act->EndSourceFile();
+
+ if (OwnAST)
+ return OwnAST.release();
+ else
+ return AST;
+}
+
+bool ASTUnit::LoadFromCompilerInvocation(
+ std::shared_ptr<PCHContainerOperations> PCHContainerOps,
+ unsigned PrecompilePreambleAfterNParses) {
+ if (!Invocation)
+ return true;
+
+ // We'll manage file buffers ourselves.
+ Invocation->getPreprocessorOpts().RetainRemappedFileBuffers = true;
+ Invocation->getFrontendOpts().DisableFree = false;
+ ProcessWarningOptions(getDiagnostics(), Invocation->getDiagnosticOpts());
+
+ std::unique_ptr<llvm::MemoryBuffer> OverrideMainBuffer;
+ if (PrecompilePreambleAfterNParses > 0) {
+ PreambleRebuildCounter = PrecompilePreambleAfterNParses;
+ OverrideMainBuffer =
+ getMainBufferWithPrecompiledPreamble(PCHContainerOps, *Invocation);
+ }
+
+ SimpleTimer ParsingTimer(WantTiming);
+ ParsingTimer.setOutput("Parsing " + getMainFileName());
+
+ // Recover resources if we crash before exiting this method.
+ llvm::CrashRecoveryContextCleanupRegistrar<llvm::MemoryBuffer>
+ MemBufferCleanup(OverrideMainBuffer.get());
+
+ return Parse(PCHContainerOps, std::move(OverrideMainBuffer));
+}
+
+std::unique_ptr<ASTUnit> ASTUnit::LoadFromCompilerInvocation(
+ CompilerInvocation *CI,
+ std::shared_ptr<PCHContainerOperations> PCHContainerOps,
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags, FileManager *FileMgr,
+ bool OnlyLocalDecls, bool CaptureDiagnostics,
+ unsigned PrecompilePreambleAfterNParses, TranslationUnitKind TUKind,
+ bool CacheCodeCompletionResults, bool IncludeBriefCommentsInCodeCompletion,
+ bool UserFilesAreVolatile) {
+ // Create the AST unit.
+ std::unique_ptr<ASTUnit> AST(new ASTUnit(false));
+ ConfigureDiags(Diags, *AST, CaptureDiagnostics);
+ AST->Diagnostics = Diags;
+ AST->OnlyLocalDecls = OnlyLocalDecls;
+ AST->CaptureDiagnostics = CaptureDiagnostics;
+ AST->TUKind = TUKind;
+ AST->ShouldCacheCodeCompletionResults = CacheCodeCompletionResults;
+ AST->IncludeBriefCommentsInCodeCompletion
+ = IncludeBriefCommentsInCodeCompletion;
+ AST->Invocation = CI;
+ AST->FileSystemOpts = FileMgr->getFileSystemOpts();
+ AST->FileMgr = FileMgr;
+ AST->UserFilesAreVolatile = UserFilesAreVolatile;
+
+ // Recover resources if we crash before exiting this method.
+ llvm::CrashRecoveryContextCleanupRegistrar<ASTUnit>
+ ASTUnitCleanup(AST.get());
+ llvm::CrashRecoveryContextCleanupRegistrar<DiagnosticsEngine,
+ llvm::CrashRecoveryContextReleaseRefCleanup<DiagnosticsEngine> >
+ DiagCleanup(Diags.get());
+
+ if (AST->LoadFromCompilerInvocation(PCHContainerOps,
+ PrecompilePreambleAfterNParses))
+ return nullptr;
+ return AST;
+}
+
+ASTUnit *ASTUnit::LoadFromCommandLine(
+ const char **ArgBegin, const char **ArgEnd,
+ std::shared_ptr<PCHContainerOperations> PCHContainerOps,
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags, StringRef ResourceFilesPath,
+ bool OnlyLocalDecls, bool CaptureDiagnostics,
+ ArrayRef<RemappedFile> RemappedFiles, bool RemappedFilesKeepOriginalName,
+ unsigned PrecompilePreambleAfterNParses, TranslationUnitKind TUKind,
+ bool CacheCodeCompletionResults, bool IncludeBriefCommentsInCodeCompletion,
+ bool AllowPCHWithCompilerErrors, bool SkipFunctionBodies,
+ bool UserFilesAreVolatile, bool ForSerialization,
+ llvm::Optional<StringRef> ModuleFormat, std::unique_ptr<ASTUnit> *ErrAST) {
+ assert(Diags.get() && "no DiagnosticsEngine was provided");
+
+ SmallVector<StoredDiagnostic, 4> StoredDiagnostics;
+
+ IntrusiveRefCntPtr<CompilerInvocation> CI;
+
+ {
+
+ CaptureDroppedDiagnostics Capture(CaptureDiagnostics, *Diags,
+ StoredDiagnostics);
+
+ CI = clang::createInvocationFromCommandLine(
+ llvm::makeArrayRef(ArgBegin, ArgEnd),
+ Diags);
+ if (!CI)
+ return nullptr;
+ }
+
+ // Override any files that need remapping
+ for (const auto &RemappedFile : RemappedFiles) {
+ CI->getPreprocessorOpts().addRemappedFile(RemappedFile.first,
+ RemappedFile.second);
+ }
+ PreprocessorOptions &PPOpts = CI->getPreprocessorOpts();
+ PPOpts.RemappedFilesKeepOriginalName = RemappedFilesKeepOriginalName;
+ PPOpts.AllowPCHWithCompilerErrors = AllowPCHWithCompilerErrors;
+
+ // Override the resources path.
+ CI->getHeaderSearchOpts().ResourceDir = ResourceFilesPath;
+
+ CI->getFrontendOpts().SkipFunctionBodies = SkipFunctionBodies;
+
+ if (ModuleFormat)
+ CI->getHeaderSearchOpts().ModuleFormat = ModuleFormat.getValue();
+
+ // Create the AST unit.
+ std::unique_ptr<ASTUnit> AST;
+ AST.reset(new ASTUnit(false));
+ ConfigureDiags(Diags, *AST, CaptureDiagnostics);
+ AST->Diagnostics = Diags;
+ AST->FileSystemOpts = CI->getFileSystemOpts();
+ IntrusiveRefCntPtr<vfs::FileSystem> VFS =
+ createVFSFromCompilerInvocation(*CI, *Diags);
+ if (!VFS)
+ return nullptr;
+ AST->FileMgr = new FileManager(AST->FileSystemOpts, VFS);
+ AST->OnlyLocalDecls = OnlyLocalDecls;
+ AST->CaptureDiagnostics = CaptureDiagnostics;
+ AST->TUKind = TUKind;
+ AST->ShouldCacheCodeCompletionResults = CacheCodeCompletionResults;
+ AST->IncludeBriefCommentsInCodeCompletion
+ = IncludeBriefCommentsInCodeCompletion;
+ AST->UserFilesAreVolatile = UserFilesAreVolatile;
+ AST->NumStoredDiagnosticsFromDriver = StoredDiagnostics.size();
+ AST->StoredDiagnostics.swap(StoredDiagnostics);
+ AST->Invocation = CI;
+ if (ForSerialization)
+ AST->WriterData.reset(new ASTWriterData());
+ // Zero out now to ease cleanup during crash recovery.
+ CI = nullptr;
+ Diags = nullptr;
+
+ // Recover resources if we crash before exiting this method.
+ llvm::CrashRecoveryContextCleanupRegistrar<ASTUnit>
+ ASTUnitCleanup(AST.get());
+
+ if (AST->LoadFromCompilerInvocation(PCHContainerOps,
+ PrecompilePreambleAfterNParses)) {
+ // Some error occurred, if caller wants to examine diagnostics, pass it the
+ // ASTUnit.
+ if (ErrAST) {
+ AST->StoredDiagnostics.swap(AST->FailedParseDiagnostics);
+ ErrAST->swap(AST);
+ }
+ return nullptr;
+ }
+
+ return AST.release();
+}
+
+bool ASTUnit::Reparse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
+ ArrayRef<RemappedFile> RemappedFiles) {
+ if (!Invocation)
+ return true;
+
+ clearFileLevelDecls();
+
+ SimpleTimer ParsingTimer(WantTiming);
+ ParsingTimer.setOutput("Reparsing " + getMainFileName());
+
+ // Remap files.
+ PreprocessorOptions &PPOpts = Invocation->getPreprocessorOpts();
+ for (const auto &RB : PPOpts.RemappedFileBuffers)
+ delete RB.second;
+
+ Invocation->getPreprocessorOpts().clearRemappedFiles();
+ for (const auto &RemappedFile : RemappedFiles) {
+ Invocation->getPreprocessorOpts().addRemappedFile(RemappedFile.first,
+ RemappedFile.second);
+ }
+
+ // If we have a preamble file lying around, or if we might try to
+ // build a precompiled preamble, do so now.
+ std::unique_ptr<llvm::MemoryBuffer> OverrideMainBuffer;
+ if (!getPreambleFile(this).empty() || PreambleRebuildCounter > 0)
+ OverrideMainBuffer =
+ getMainBufferWithPrecompiledPreamble(PCHContainerOps, *Invocation);
+
+ // Clear out the diagnostics state.
+ FileMgr.reset();
+ getDiagnostics().Reset();
+ ProcessWarningOptions(getDiagnostics(), Invocation->getDiagnosticOpts());
+ if (OverrideMainBuffer)
+ getDiagnostics().setNumWarnings(NumWarningsInPreamble);
+
+ // Parse the sources
+ bool Result = Parse(PCHContainerOps, std::move(OverrideMainBuffer));
+
+ // If we're caching global code-completion results, and the top-level
+ // declarations have changed, clear out the code-completion cache.
+ if (!Result && ShouldCacheCodeCompletionResults &&
+ CurrentTopLevelHashValue != CompletionCacheTopLevelHashValue)
+ CacheCodeCompletionResults();
+
+ // We now need to clear out the completion info related to this translation
+ // unit; it'll be recreated if necessary.
+ CCTUInfo.reset();
+
+ return Result;
+}
+
+//----------------------------------------------------------------------------//
+// Code completion
+//----------------------------------------------------------------------------//
+
+namespace {
+ /// \brief Code completion consumer that combines the cached code-completion
+ /// results from an ASTUnit with the code-completion results provided to it,
+ /// then passes the result on to
+ class AugmentedCodeCompleteConsumer : public CodeCompleteConsumer {
+ uint64_t NormalContexts;
+ ASTUnit &AST;
+ CodeCompleteConsumer &Next;
+
+ public:
+ AugmentedCodeCompleteConsumer(ASTUnit &AST, CodeCompleteConsumer &Next,
+ const CodeCompleteOptions &CodeCompleteOpts)
+ : CodeCompleteConsumer(CodeCompleteOpts, Next.isOutputBinary()),
+ AST(AST), Next(Next)
+ {
+ // Compute the set of contexts in which we will look when we don't have
+ // any information about the specific context.
+ NormalContexts
+ = (1LL << CodeCompletionContext::CCC_TopLevel)
+ | (1LL << CodeCompletionContext::CCC_ObjCInterface)
+ | (1LL << CodeCompletionContext::CCC_ObjCImplementation)
+ | (1LL << CodeCompletionContext::CCC_ObjCIvarList)
+ | (1LL << CodeCompletionContext::CCC_Statement)
+ | (1LL << CodeCompletionContext::CCC_Expression)
+ | (1LL << CodeCompletionContext::CCC_ObjCMessageReceiver)
+ | (1LL << CodeCompletionContext::CCC_DotMemberAccess)
+ | (1LL << CodeCompletionContext::CCC_ArrowMemberAccess)
+ | (1LL << CodeCompletionContext::CCC_ObjCPropertyAccess)
+ | (1LL << CodeCompletionContext::CCC_ObjCProtocolName)
+ | (1LL << CodeCompletionContext::CCC_ParenthesizedExpression)
+ | (1LL << CodeCompletionContext::CCC_Recovery);
+
+ if (AST.getASTContext().getLangOpts().CPlusPlus)
+ NormalContexts |= (1LL << CodeCompletionContext::CCC_EnumTag)
+ | (1LL << CodeCompletionContext::CCC_UnionTag)
+ | (1LL << CodeCompletionContext::CCC_ClassOrStructTag);
+ }
+
+ void ProcessCodeCompleteResults(Sema &S, CodeCompletionContext Context,
+ CodeCompletionResult *Results,
+ unsigned NumResults) override;
+
+ void ProcessOverloadCandidates(Sema &S, unsigned CurrentArg,
+ OverloadCandidate *Candidates,
+ unsigned NumCandidates) override {
+ Next.ProcessOverloadCandidates(S, CurrentArg, Candidates, NumCandidates);
+ }
+
+ CodeCompletionAllocator &getAllocator() override {
+ return Next.getAllocator();
+ }
+
+ CodeCompletionTUInfo &getCodeCompletionTUInfo() override {
+ return Next.getCodeCompletionTUInfo();
+ }
+ };
+} // anonymous namespace
+
+/// \brief Helper function that computes which global names are hidden by the
+/// local code-completion results.
+static void CalculateHiddenNames(const CodeCompletionContext &Context,
+ CodeCompletionResult *Results,
+ unsigned NumResults,
+ ASTContext &Ctx,
+ llvm::StringSet<llvm::BumpPtrAllocator> &HiddenNames){
+ bool OnlyTagNames = false;
+ switch (Context.getKind()) {
+ case CodeCompletionContext::CCC_Recovery:
+ case CodeCompletionContext::CCC_TopLevel:
+ case CodeCompletionContext::CCC_ObjCInterface:
+ case CodeCompletionContext::CCC_ObjCImplementation:
+ case CodeCompletionContext::CCC_ObjCIvarList:
+ case CodeCompletionContext::CCC_ClassStructUnion:
+ case CodeCompletionContext::CCC_Statement:
+ case CodeCompletionContext::CCC_Expression:
+ case CodeCompletionContext::CCC_ObjCMessageReceiver:
+ case CodeCompletionContext::CCC_DotMemberAccess:
+ case CodeCompletionContext::CCC_ArrowMemberAccess:
+ case CodeCompletionContext::CCC_ObjCPropertyAccess:
+ case CodeCompletionContext::CCC_Namespace:
+ case CodeCompletionContext::CCC_Type:
+ case CodeCompletionContext::CCC_Name:
+ case CodeCompletionContext::CCC_PotentiallyQualifiedName:
+ case CodeCompletionContext::CCC_ParenthesizedExpression:
+ case CodeCompletionContext::CCC_ObjCInterfaceName:
+ break;
+
+ case CodeCompletionContext::CCC_EnumTag:
+ case CodeCompletionContext::CCC_UnionTag:
+ case CodeCompletionContext::CCC_ClassOrStructTag:
+ OnlyTagNames = true;
+ break;
+
+ case CodeCompletionContext::CCC_ObjCProtocolName:
+ case CodeCompletionContext::CCC_MacroName:
+ case CodeCompletionContext::CCC_MacroNameUse:
+ case CodeCompletionContext::CCC_PreprocessorExpression:
+ case CodeCompletionContext::CCC_PreprocessorDirective:
+ case CodeCompletionContext::CCC_NaturalLanguage:
+ case CodeCompletionContext::CCC_SelectorName:
+ case CodeCompletionContext::CCC_TypeQualifiers:
+ case CodeCompletionContext::CCC_Other:
+ case CodeCompletionContext::CCC_OtherWithMacros:
+ case CodeCompletionContext::CCC_ObjCInstanceMessage:
+ case CodeCompletionContext::CCC_ObjCClassMessage:
+ case CodeCompletionContext::CCC_ObjCCategoryName:
+ // We're looking for nothing, or we're looking for names that cannot
+ // be hidden.
+ return;
+ }
+
+ typedef CodeCompletionResult Result;
+ for (unsigned I = 0; I != NumResults; ++I) {
+ if (Results[I].Kind != Result::RK_Declaration)
+ continue;
+
+ unsigned IDNS
+ = Results[I].Declaration->getUnderlyingDecl()->getIdentifierNamespace();
+
+ bool Hiding = false;
+ if (OnlyTagNames)
+ Hiding = (IDNS & Decl::IDNS_Tag);
+ else {
+ unsigned HiddenIDNS = (Decl::IDNS_Type | Decl::IDNS_Member |
+ Decl::IDNS_Namespace | Decl::IDNS_Ordinary |
+ Decl::IDNS_NonMemberOperator);
+ if (Ctx.getLangOpts().CPlusPlus)
+ HiddenIDNS |= Decl::IDNS_Tag;
+ Hiding = (IDNS & HiddenIDNS);
+ }
+
+ if (!Hiding)
+ continue;
+
+ DeclarationName Name = Results[I].Declaration->getDeclName();
+ if (IdentifierInfo *Identifier = Name.getAsIdentifierInfo())
+ HiddenNames.insert(Identifier->getName());
+ else
+ HiddenNames.insert(Name.getAsString());
+ }
+}
+
+void AugmentedCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &S,
+ CodeCompletionContext Context,
+ CodeCompletionResult *Results,
+ unsigned NumResults) {
+ // Merge the results we were given with the results we cached.
+ bool AddedResult = false;
+ uint64_t InContexts =
+ Context.getKind() == CodeCompletionContext::CCC_Recovery
+ ? NormalContexts : (1LL << Context.getKind());
+ // Contains the set of names that are hidden by "local" completion results.
+ llvm::StringSet<llvm::BumpPtrAllocator> HiddenNames;
+ typedef CodeCompletionResult Result;
+ SmallVector<Result, 8> AllResults;
+ for (ASTUnit::cached_completion_iterator
+ C = AST.cached_completion_begin(),
+ CEnd = AST.cached_completion_end();
+ C != CEnd; ++C) {
+ // If the context we are in matches any of the contexts we are
+ // interested in, we'll add this result.
+ if ((C->ShowInContexts & InContexts) == 0)
+ continue;
+
+ // If we haven't added any results previously, do so now.
+ if (!AddedResult) {
+ CalculateHiddenNames(Context, Results, NumResults, S.Context,
+ HiddenNames);
+ AllResults.insert(AllResults.end(), Results, Results + NumResults);
+ AddedResult = true;
+ }
+
+ // Determine whether this global completion result is hidden by a local
+ // completion result. If so, skip it.
+ if (C->Kind != CXCursor_MacroDefinition &&
+ HiddenNames.count(C->Completion->getTypedText()))
+ continue;
+
+ // Adjust priority based on similar type classes.
+ unsigned Priority = C->Priority;
+ CodeCompletionString *Completion = C->Completion;
+ if (!Context.getPreferredType().isNull()) {
+ if (C->Kind == CXCursor_MacroDefinition) {
+ Priority = getMacroUsagePriority(C->Completion->getTypedText(),
+ S.getLangOpts(),
+ Context.getPreferredType()->isAnyPointerType());
+ } else if (C->Type) {
+ CanQualType Expected
+ = S.Context.getCanonicalType(
+ Context.getPreferredType().getUnqualifiedType());
+ SimplifiedTypeClass ExpectedSTC = getSimplifiedTypeClass(Expected);
+ if (ExpectedSTC == C->TypeClass) {
+ // We know this type is similar; check for an exact match.
+ llvm::StringMap<unsigned> &CachedCompletionTypes
+ = AST.getCachedCompletionTypes();
+ llvm::StringMap<unsigned>::iterator Pos
+ = CachedCompletionTypes.find(QualType(Expected).getAsString());
+ if (Pos != CachedCompletionTypes.end() && Pos->second == C->Type)
+ Priority /= CCF_ExactTypeMatch;
+ else
+ Priority /= CCF_SimilarTypeMatch;
+ }
+ }
+ }
+
+ // Adjust the completion string, if required.
+ if (C->Kind == CXCursor_MacroDefinition &&
+ Context.getKind() == CodeCompletionContext::CCC_MacroNameUse) {
+ // Create a new code-completion string that just contains the
+ // macro name, without its arguments.
+ CodeCompletionBuilder Builder(getAllocator(), getCodeCompletionTUInfo(),
+ CCP_CodePattern, C->Availability);
+ Builder.AddTypedTextChunk(C->Completion->getTypedText());
+ Priority = CCP_CodePattern;
+ Completion = Builder.TakeString();
+ }
+
+ AllResults.push_back(Result(Completion, Priority, C->Kind,
+ C->Availability));
+ }
+
+ // If we did not add any cached completion results, just forward the
+ // results we were given to the next consumer.
+ if (!AddedResult) {
+ Next.ProcessCodeCompleteResults(S, Context, Results, NumResults);
+ return;
+ }
+
+ Next.ProcessCodeCompleteResults(S, Context, AllResults.data(),
+ AllResults.size());
+}
+
+void ASTUnit::CodeComplete(
+ StringRef File, unsigned Line, unsigned Column,
+ ArrayRef<RemappedFile> RemappedFiles, bool IncludeMacros,
+ bool IncludeCodePatterns, bool IncludeBriefComments,
+ CodeCompleteConsumer &Consumer,
+ std::shared_ptr<PCHContainerOperations> PCHContainerOps,
+ DiagnosticsEngine &Diag, LangOptions &LangOpts, SourceManager &SourceMgr,
+ FileManager &FileMgr, SmallVectorImpl<StoredDiagnostic> &StoredDiagnostics,
+ SmallVectorImpl<const llvm::MemoryBuffer *> &OwnedBuffers) {
+ if (!Invocation)
+ return;
+
+ SimpleTimer CompletionTimer(WantTiming);
+ CompletionTimer.setOutput("Code completion @ " + File + ":" +
+ Twine(Line) + ":" + Twine(Column));
+
+ IntrusiveRefCntPtr<CompilerInvocation>
+ CCInvocation(new CompilerInvocation(*Invocation));
+
+ FrontendOptions &FrontendOpts = CCInvocation->getFrontendOpts();
+ CodeCompleteOptions &CodeCompleteOpts = FrontendOpts.CodeCompleteOpts;
+ PreprocessorOptions &PreprocessorOpts = CCInvocation->getPreprocessorOpts();
+
+ CodeCompleteOpts.IncludeMacros = IncludeMacros &&
+ CachedCompletionResults.empty();
+ CodeCompleteOpts.IncludeCodePatterns = IncludeCodePatterns;
+ CodeCompleteOpts.IncludeGlobals = CachedCompletionResults.empty();
+ CodeCompleteOpts.IncludeBriefComments = IncludeBriefComments;
+
+ assert(IncludeBriefComments == this->IncludeBriefCommentsInCodeCompletion);
+
+ FrontendOpts.CodeCompletionAt.FileName = File;
+ FrontendOpts.CodeCompletionAt.Line = Line;
+ FrontendOpts.CodeCompletionAt.Column = Column;
+
+ // Set the language options appropriately.
+ LangOpts = *CCInvocation->getLangOpts();
+
+ // Spell-checking and warnings are wasteful during code-completion.
+ LangOpts.SpellChecking = false;
+ CCInvocation->getDiagnosticOpts().IgnoreWarnings = true;
+
+ std::unique_ptr<CompilerInstance> Clang(
+ new CompilerInstance(PCHContainerOps));
+
+ // Recover resources if we crash before exiting this method.
+ llvm::CrashRecoveryContextCleanupRegistrar<CompilerInstance>
+ CICleanup(Clang.get());
+
+ Clang->setInvocation(&*CCInvocation);
+ OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].getFile();
+
+ // Set up diagnostics, capturing any diagnostics produced.
+ Clang->setDiagnostics(&Diag);
+ CaptureDroppedDiagnostics Capture(true,
+ Clang->getDiagnostics(),
+ StoredDiagnostics);
+ ProcessWarningOptions(Diag, CCInvocation->getDiagnosticOpts());
+
+ // Create the target instance.
+ Clang->setTarget(TargetInfo::CreateTargetInfo(
+ Clang->getDiagnostics(), Clang->getInvocation().TargetOpts));
+ if (!Clang->hasTarget()) {
+ Clang->setInvocation(nullptr);
+ return;
+ }
+
+ // Inform the target of the language options.
+ //
+ // FIXME: We shouldn't need to do this, the target should be immutable once
+ // created. This complexity should be lifted elsewhere.
+ Clang->getTarget().adjust(Clang->getLangOpts());
+
+ assert(Clang->getFrontendOpts().Inputs.size() == 1 &&
+ "Invocation must have exactly one source file!");
+ assert(Clang->getFrontendOpts().Inputs[0].getKind() != IK_AST &&
+ "FIXME: AST inputs not yet supported here!");
+ assert(Clang->getFrontendOpts().Inputs[0].getKind() != IK_LLVM_IR &&
+ "IR inputs not support here!");
+
+
+ // Use the source and file managers that we were given.
+ Clang->setFileManager(&FileMgr);
+ Clang->setSourceManager(&SourceMgr);
+
+ // Remap files.
+ PreprocessorOpts.clearRemappedFiles();
+ PreprocessorOpts.RetainRemappedFileBuffers = true;
+ for (const auto &RemappedFile : RemappedFiles) {
+ PreprocessorOpts.addRemappedFile(RemappedFile.first, RemappedFile.second);
+ OwnedBuffers.push_back(RemappedFile.second);
+ }
+
+ // Use the code completion consumer we were given, but adding any cached
+ // code-completion results.
+ AugmentedCodeCompleteConsumer *AugmentedConsumer
+ = new AugmentedCodeCompleteConsumer(*this, Consumer, CodeCompleteOpts);
+ Clang->setCodeCompletionConsumer(AugmentedConsumer);
+
+ // If we have a precompiled preamble, try to use it. We only allow
+ // the use of the precompiled preamble if we're if the completion
+ // point is within the main file, after the end of the precompiled
+ // preamble.
+ std::unique_ptr<llvm::MemoryBuffer> OverrideMainBuffer;
+ if (!getPreambleFile(this).empty()) {
+ std::string CompleteFilePath(File);
+ llvm::sys::fs::UniqueID CompleteFileID;
+
+ if (!llvm::sys::fs::getUniqueID(CompleteFilePath, CompleteFileID)) {
+ std::string MainPath(OriginalSourceFile);
+ llvm::sys::fs::UniqueID MainID;
+ if (!llvm::sys::fs::getUniqueID(MainPath, MainID)) {
+ if (CompleteFileID == MainID && Line > 1)
+ OverrideMainBuffer = getMainBufferWithPrecompiledPreamble(
+ PCHContainerOps, *CCInvocation, false, Line - 1);
+ }
+ }
+ }
+
+ // If the main file has been overridden due to the use of a preamble,
+ // make that override happen and introduce the preamble.
+ if (OverrideMainBuffer) {
+ PreprocessorOpts.addRemappedFile(OriginalSourceFile,
+ OverrideMainBuffer.get());
+ PreprocessorOpts.PrecompiledPreambleBytes.first = Preamble.size();
+ PreprocessorOpts.PrecompiledPreambleBytes.second
+ = PreambleEndsAtStartOfLine;
+ PreprocessorOpts.ImplicitPCHInclude = getPreambleFile(this);
+ PreprocessorOpts.DisablePCHValidation = true;
+
+ OwnedBuffers.push_back(OverrideMainBuffer.release());
+ } else {
+ PreprocessorOpts.PrecompiledPreambleBytes.first = 0;
+ PreprocessorOpts.PrecompiledPreambleBytes.second = false;
+ }
+
+ // Disable the preprocessing record if modules are not enabled.
+ if (!Clang->getLangOpts().Modules)
+ PreprocessorOpts.DetailedRecord = false;
+
+ std::unique_ptr<SyntaxOnlyAction> Act;
+ Act.reset(new SyntaxOnlyAction);
+ if (Act->BeginSourceFile(*Clang.get(), Clang->getFrontendOpts().Inputs[0])) {
+ Act->Execute();
+ Act->EndSourceFile();
+ }
+}
+
+bool ASTUnit::Save(StringRef File) {
+ if (HadModuleLoaderFatalFailure)
+ return true;
+
+ // Write to a temporary file and later rename it to the actual file, to avoid
+ // possible race conditions.
+ SmallString<128> TempPath;
+ TempPath = File;
+ TempPath += "-%%%%%%%%";
+ int fd;
+ if (llvm::sys::fs::createUniqueFile(TempPath, fd, TempPath))
+ return true;
+
+ // FIXME: Can we somehow regenerate the stat cache here, or do we need to
+ // unconditionally create a stat cache when we parse the file?
+ llvm::raw_fd_ostream Out(fd, /*shouldClose=*/true);
+
+ serialize(Out);
+ Out.close();
+ if (Out.has_error()) {
+ Out.clear_error();
+ return true;
+ }
+
+ if (llvm::sys::fs::rename(TempPath, File)) {
+ llvm::sys::fs::remove(TempPath);
+ return true;
+ }
+
+ return false;
+}
+
+static bool serializeUnit(ASTWriter &Writer,
+ SmallVectorImpl<char> &Buffer,
+ Sema &S,
+ bool hasErrors,
+ raw_ostream &OS) {
+ Writer.WriteAST(S, std::string(), nullptr, "", hasErrors);
+
+ // Write the generated bitstream to "Out".
+ if (!Buffer.empty())
+ OS.write(Buffer.data(), Buffer.size());
+
+ return false;
+}
+
+bool ASTUnit::serialize(raw_ostream &OS) {
+ bool hasErrors = getDiagnostics().hasErrorOccurred();
+
+ if (WriterData)
+ return serializeUnit(WriterData->Writer, WriterData->Buffer,
+ getSema(), hasErrors, OS);
+
+ SmallString<128> Buffer;
+ llvm::BitstreamWriter Stream(Buffer);
+ ASTWriter Writer(Stream, { });
+ return serializeUnit(Writer, Buffer, getSema(), hasErrors, OS);
+}
+
+typedef ContinuousRangeMap<unsigned, int, 2> SLocRemap;
+
+void ASTUnit::TranslateStoredDiagnostics(
+ FileManager &FileMgr,
+ SourceManager &SrcMgr,
+ const SmallVectorImpl<StandaloneDiagnostic> &Diags,
+ SmallVectorImpl<StoredDiagnostic> &Out) {
+ // Map the standalone diagnostic into the new source manager. We also need to
+ // remap all the locations to the new view. This includes the diag location,
+ // any associated source ranges, and the source ranges of associated fix-its.
+ // FIXME: There should be a cleaner way to do this.
+
+ SmallVector<StoredDiagnostic, 4> Result;
+ Result.reserve(Diags.size());
+ for (const StandaloneDiagnostic &SD : Diags) {
+ // Rebuild the StoredDiagnostic.
+ if (SD.Filename.empty())
+ continue;
+ const FileEntry *FE = FileMgr.getFile(SD.Filename);
+ if (!FE)
+ continue;
+ FileID FID = SrcMgr.translateFile(FE);
+ SourceLocation FileLoc = SrcMgr.getLocForStartOfFile(FID);
+ if (FileLoc.isInvalid())
+ continue;
+ SourceLocation L = FileLoc.getLocWithOffset(SD.LocOffset);
+ FullSourceLoc Loc(L, SrcMgr);
+
+ SmallVector<CharSourceRange, 4> Ranges;
+ Ranges.reserve(SD.Ranges.size());
+ for (const auto &Range : SD.Ranges) {
+ SourceLocation BL = FileLoc.getLocWithOffset(Range.first);
+ SourceLocation EL = FileLoc.getLocWithOffset(Range.second);
+ Ranges.push_back(CharSourceRange::getCharRange(BL, EL));
+ }
+
+ SmallVector<FixItHint, 2> FixIts;
+ FixIts.reserve(SD.FixIts.size());
+ for (const StandaloneFixIt &FixIt : SD.FixIts) {
+ FixIts.push_back(FixItHint());
+ FixItHint &FH = FixIts.back();
+ FH.CodeToInsert = FixIt.CodeToInsert;
+ SourceLocation BL = FileLoc.getLocWithOffset(FixIt.RemoveRange.first);
+ SourceLocation EL = FileLoc.getLocWithOffset(FixIt.RemoveRange.second);
+ FH.RemoveRange = CharSourceRange::getCharRange(BL, EL);
+ }
+
+ Result.push_back(StoredDiagnostic(SD.Level, SD.ID,
+ SD.Message, Loc, Ranges, FixIts));
+ }
+ Result.swap(Out);
+}
+
+void ASTUnit::addFileLevelDecl(Decl *D) {
+ assert(D);
+
+ // We only care about local declarations.
+ if (D->isFromASTFile())
+ return;
+
+ SourceManager &SM = *SourceMgr;
+ SourceLocation Loc = D->getLocation();
+ if (Loc.isInvalid() || !SM.isLocalSourceLocation(Loc))
+ return;
+
+ // We only keep track of the file-level declarations of each file.
+ if (!D->getLexicalDeclContext()->isFileContext())
+ return;
+
+ SourceLocation FileLoc = SM.getFileLoc(Loc);
+ assert(SM.isLocalSourceLocation(FileLoc));
+ FileID FID;
+ unsigned Offset;
+ std::tie(FID, Offset) = SM.getDecomposedLoc(FileLoc);
+ if (FID.isInvalid())
+ return;
+
+ LocDeclsTy *&Decls = FileDecls[FID];
+ if (!Decls)
+ Decls = new LocDeclsTy();
+
+ std::pair<unsigned, Decl *> LocDecl(Offset, D);
+
+ if (Decls->empty() || Decls->back().first <= Offset) {
+ Decls->push_back(LocDecl);
+ return;
+ }
+
+ LocDeclsTy::iterator I = std::upper_bound(Decls->begin(), Decls->end(),
+ LocDecl, llvm::less_first());
+
+ Decls->insert(I, LocDecl);
+}
+
+void ASTUnit::findFileRegionDecls(FileID File, unsigned Offset, unsigned Length,
+ SmallVectorImpl<Decl *> &Decls) {
+ if (File.isInvalid())
+ return;
+
+ if (SourceMgr->isLoadedFileID(File)) {
+ assert(Ctx->getExternalSource() && "No external source!");
+ return Ctx->getExternalSource()->FindFileRegionDecls(File, Offset, Length,
+ Decls);
+ }
+
+ FileDeclsTy::iterator I = FileDecls.find(File);
+ if (I == FileDecls.end())
+ return;
+
+ LocDeclsTy &LocDecls = *I->second;
+ if (LocDecls.empty())
+ return;
+
+ LocDeclsTy::iterator BeginIt =
+ std::lower_bound(LocDecls.begin(), LocDecls.end(),
+ std::make_pair(Offset, (Decl *)nullptr),
+ llvm::less_first());
+ if (BeginIt != LocDecls.begin())
+ --BeginIt;
+
+ // If we are pointing at a top-level decl inside an objc container, we need
+ // to backtrack until we find it otherwise we will fail to report that the
+ // region overlaps with an objc container.
+ while (BeginIt != LocDecls.begin() &&
+ BeginIt->second->isTopLevelDeclInObjCContainer())
+ --BeginIt;
+
+ LocDeclsTy::iterator EndIt = std::upper_bound(
+ LocDecls.begin(), LocDecls.end(),
+ std::make_pair(Offset + Length, (Decl *)nullptr), llvm::less_first());
+ if (EndIt != LocDecls.end())
+ ++EndIt;
+
+ for (LocDeclsTy::iterator DIt = BeginIt; DIt != EndIt; ++DIt)
+ Decls.push_back(DIt->second);
+}
+
+SourceLocation ASTUnit::getLocation(const FileEntry *File,
+ unsigned Line, unsigned Col) const {
+ const SourceManager &SM = getSourceManager();
+ SourceLocation Loc = SM.translateFileLineCol(File, Line, Col);
+ return SM.getMacroArgExpandedLocation(Loc);
+}
+
+SourceLocation ASTUnit::getLocation(const FileEntry *File,
+ unsigned Offset) const {
+ const SourceManager &SM = getSourceManager();
+ SourceLocation FileLoc = SM.translateFileLineCol(File, 1, 1);
+ return SM.getMacroArgExpandedLocation(FileLoc.getLocWithOffset(Offset));
+}
+
+/// \brief If \arg Loc is a loaded location from the preamble, returns
+/// the corresponding local location of the main file, otherwise it returns
+/// \arg Loc.
+SourceLocation ASTUnit::mapLocationFromPreamble(SourceLocation Loc) {
+ FileID PreambleID;
+ if (SourceMgr)
+ PreambleID = SourceMgr->getPreambleFileID();
+
+ if (Loc.isInvalid() || Preamble.empty() || PreambleID.isInvalid())
+ return Loc;
+
+ unsigned Offs;
+ if (SourceMgr->isInFileID(Loc, PreambleID, &Offs) && Offs < Preamble.size()) {
+ SourceLocation FileLoc
+ = SourceMgr->getLocForStartOfFile(SourceMgr->getMainFileID());
+ return FileLoc.getLocWithOffset(Offs);
+ }
+
+ return Loc;
+}
+
+/// \brief If \arg Loc is a local location of the main file but inside the
+/// preamble chunk, returns the corresponding loaded location from the
+/// preamble, otherwise it returns \arg Loc.
+SourceLocation ASTUnit::mapLocationToPreamble(SourceLocation Loc) {
+ FileID PreambleID;
+ if (SourceMgr)
+ PreambleID = SourceMgr->getPreambleFileID();
+
+ if (Loc.isInvalid() || Preamble.empty() || PreambleID.isInvalid())
+ return Loc;
+
+ unsigned Offs;
+ if (SourceMgr->isInFileID(Loc, SourceMgr->getMainFileID(), &Offs) &&
+ Offs < Preamble.size()) {
+ SourceLocation FileLoc = SourceMgr->getLocForStartOfFile(PreambleID);
+ return FileLoc.getLocWithOffset(Offs);
+ }
+
+ return Loc;
+}
+
+bool ASTUnit::isInPreambleFileID(SourceLocation Loc) {
+ FileID FID;
+ if (SourceMgr)
+ FID = SourceMgr->getPreambleFileID();
+
+ if (Loc.isInvalid() || FID.isInvalid())
+ return false;
+
+ return SourceMgr->isInFileID(Loc, FID);
+}
+
+bool ASTUnit::isInMainFileID(SourceLocation Loc) {
+ FileID FID;
+ if (SourceMgr)
+ FID = SourceMgr->getMainFileID();
+
+ if (Loc.isInvalid() || FID.isInvalid())
+ return false;
+
+ return SourceMgr->isInFileID(Loc, FID);
+}
+
+SourceLocation ASTUnit::getEndOfPreambleFileID() {
+ FileID FID;
+ if (SourceMgr)
+ FID = SourceMgr->getPreambleFileID();
+
+ if (FID.isInvalid())
+ return SourceLocation();
+
+ return SourceMgr->getLocForEndOfFile(FID);
+}
+
+SourceLocation ASTUnit::getStartOfMainFileID() {
+ FileID FID;
+ if (SourceMgr)
+ FID = SourceMgr->getMainFileID();
+
+ if (FID.isInvalid())
+ return SourceLocation();
+
+ return SourceMgr->getLocForStartOfFile(FID);
+}
+
+llvm::iterator_range<PreprocessingRecord::iterator>
+ASTUnit::getLocalPreprocessingEntities() const {
+ if (isMainFileAST()) {
+ serialization::ModuleFile &
+ Mod = Reader->getModuleManager().getPrimaryModule();
+ return Reader->getModulePreprocessedEntities(Mod);
+ }
+
+ if (PreprocessingRecord *PPRec = PP->getPreprocessingRecord())
+ return llvm::make_range(PPRec->local_begin(), PPRec->local_end());
+
+ return llvm::make_range(PreprocessingRecord::iterator(),
+ PreprocessingRecord::iterator());
+}
+
+bool ASTUnit::visitLocalTopLevelDecls(void *context, DeclVisitorFn Fn) {
+ if (isMainFileAST()) {
+ serialization::ModuleFile &
+ Mod = Reader->getModuleManager().getPrimaryModule();
+ for (const Decl *D : Reader->getModuleFileLevelDecls(Mod)) {
+ if (!Fn(context, D))
+ return false;
+ }
+
+ return true;
+ }
+
+ for (ASTUnit::top_level_iterator TL = top_level_begin(),
+ TLEnd = top_level_end();
+ TL != TLEnd; ++TL) {
+ if (!Fn(context, *TL))
+ return false;
+ }
+
+ return true;
+}
+
+const FileEntry *ASTUnit::getPCHFile() {
+ if (!Reader)
+ return nullptr;
+
+ serialization::ModuleFile *Mod = nullptr;
+ Reader->getModuleManager().visit([&Mod](serialization::ModuleFile &M) {
+ switch (M.Kind) {
+ case serialization::MK_ImplicitModule:
+ case serialization::MK_ExplicitModule:
+ return true; // skip dependencies.
+ case serialization::MK_PCH:
+ Mod = &M;
+ return true; // found it.
+ case serialization::MK_Preamble:
+ return false; // look in dependencies.
+ case serialization::MK_MainFile:
+ return false; // look in dependencies.
+ }
+
+ return true;
+ });
+ if (Mod)
+ return Mod->File;
+
+ return nullptr;
+}
+
+bool ASTUnit::isModuleFile() {
+ return isMainFileAST() && !ASTFileLangOpts.CurrentModule.empty();
+}
+
+void ASTUnit::PreambleData::countLines() const {
+ NumLines = 0;
+ if (empty())
+ return;
+
+ NumLines = std::count(Buffer.begin(), Buffer.end(), '\n');
+
+ if (Buffer.back() != '\n')
+ ++NumLines;
+}
+
+#ifndef NDEBUG
+ASTUnit::ConcurrencyState::ConcurrencyState() {
+ Mutex = new llvm::sys::MutexImpl(/*recursive=*/true);
+}
+
+ASTUnit::ConcurrencyState::~ConcurrencyState() {
+ delete static_cast<llvm::sys::MutexImpl *>(Mutex);
+}
+
+void ASTUnit::ConcurrencyState::start() {
+ bool acquired = static_cast<llvm::sys::MutexImpl *>(Mutex)->tryacquire();
+ assert(acquired && "Concurrent access to ASTUnit!");
+}
+
+void ASTUnit::ConcurrencyState::finish() {
+ static_cast<llvm::sys::MutexImpl *>(Mutex)->release();
+}
+
+#else // NDEBUG
+
+ASTUnit::ConcurrencyState::ConcurrencyState() { Mutex = nullptr; }
+ASTUnit::ConcurrencyState::~ConcurrencyState() {}
+void ASTUnit::ConcurrencyState::start() {}
+void ASTUnit::ConcurrencyState::finish() {}
+
+#endif // NDEBUG
diff --git a/contrib/llvm/tools/clang/lib/Frontend/CacheTokens.cpp b/contrib/llvm/tools/clang/lib/Frontend/CacheTokens.cpp
new file mode 100644
index 0000000..87f3d17
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/CacheTokens.cpp
@@ -0,0 +1,694 @@
+//===--- CacheTokens.cpp - Caching of lexer tokens for PTH support --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides a possible implementation of PTH support for Clang that is
+// based on caching lexed tokens and identifiers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/Utils.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/FileSystemStatCache.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Lex/PTHManager.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/EndianStream.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/OnDiskHashTable.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+
+// FIXME: put this somewhere else?
+#ifndef S_ISDIR
+#define S_ISDIR(x) (((x)&_S_IFDIR)!=0)
+#endif
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// PTH-specific stuff.
+//===----------------------------------------------------------------------===//
+
+typedef uint32_t Offset;
+
+namespace {
+class PTHEntry {
+ Offset TokenData, PPCondData;
+
+public:
+ PTHEntry() {}
+
+ PTHEntry(Offset td, Offset ppcd)
+ : TokenData(td), PPCondData(ppcd) {}
+
+ Offset getTokenOffset() const { return TokenData; }
+ Offset getPPCondTableOffset() const { return PPCondData; }
+};
+
+
+class PTHEntryKeyVariant {
+ union { const FileEntry* FE; const char* Path; };
+ enum { IsFE = 0x1, IsDE = 0x2, IsNoExist = 0x0 } Kind;
+ FileData *Data;
+
+public:
+ PTHEntryKeyVariant(const FileEntry *fe) : FE(fe), Kind(IsFE), Data(nullptr) {}
+
+ PTHEntryKeyVariant(FileData *Data, const char *path)
+ : Path(path), Kind(IsDE), Data(new FileData(*Data)) {}
+
+ explicit PTHEntryKeyVariant(const char *path)
+ : Path(path), Kind(IsNoExist), Data(nullptr) {}
+
+ bool isFile() const { return Kind == IsFE; }
+
+ StringRef getString() const {
+ return Kind == IsFE ? FE->getName() : Path;
+ }
+
+ unsigned getKind() const { return (unsigned) Kind; }
+
+ void EmitData(raw_ostream& Out) {
+ using namespace llvm::support;
+ endian::Writer<little> LE(Out);
+ switch (Kind) {
+ case IsFE: {
+ // Emit stat information.
+ llvm::sys::fs::UniqueID UID = FE->getUniqueID();
+ LE.write<uint64_t>(UID.getFile());
+ LE.write<uint64_t>(UID.getDevice());
+ LE.write<uint64_t>(FE->getModificationTime());
+ LE.write<uint64_t>(FE->getSize());
+ } break;
+ case IsDE:
+ // Emit stat information.
+ LE.write<uint64_t>(Data->UniqueID.getFile());
+ LE.write<uint64_t>(Data->UniqueID.getDevice());
+ LE.write<uint64_t>(Data->ModTime);
+ LE.write<uint64_t>(Data->Size);
+ delete Data;
+ break;
+ default:
+ break;
+ }
+ }
+
+ unsigned getRepresentationLength() const {
+ return Kind == IsNoExist ? 0 : 4 * 8;
+ }
+};
+
+class FileEntryPTHEntryInfo {
+public:
+ typedef PTHEntryKeyVariant key_type;
+ typedef key_type key_type_ref;
+
+ typedef PTHEntry data_type;
+ typedef const PTHEntry& data_type_ref;
+
+ typedef unsigned hash_value_type;
+ typedef unsigned offset_type;
+
+ static hash_value_type ComputeHash(PTHEntryKeyVariant V) {
+ return llvm::HashString(V.getString());
+ }
+
+ static std::pair<unsigned,unsigned>
+ EmitKeyDataLength(raw_ostream& Out, PTHEntryKeyVariant V,
+ const PTHEntry& E) {
+ using namespace llvm::support;
+ endian::Writer<little> LE(Out);
+
+ unsigned n = V.getString().size() + 1 + 1;
+ LE.write<uint16_t>(n);
+
+ unsigned m = V.getRepresentationLength() + (V.isFile() ? 4 + 4 : 0);
+ LE.write<uint8_t>(m);
+
+ return std::make_pair(n, m);
+ }
+
+ static void EmitKey(raw_ostream& Out, PTHEntryKeyVariant V, unsigned n){
+ using namespace llvm::support;
+ // Emit the entry kind.
+ endian::Writer<little>(Out).write<uint8_t>((unsigned)V.getKind());
+ // Emit the string.
+ Out.write(V.getString().data(), n - 1);
+ }
+
+ static void EmitData(raw_ostream& Out, PTHEntryKeyVariant V,
+ const PTHEntry& E, unsigned) {
+ using namespace llvm::support;
+ endian::Writer<little> LE(Out);
+
+ // For file entries emit the offsets into the PTH file for token data
+ // and the preprocessor blocks table.
+ if (V.isFile()) {
+ LE.write<uint32_t>(E.getTokenOffset());
+ LE.write<uint32_t>(E.getPPCondTableOffset());
+ }
+
+ // Emit any other data associated with the key (i.e., stat information).
+ V.EmitData(Out);
+ }
+};
+
+class OffsetOpt {
+ bool valid;
+ Offset off;
+public:
+ OffsetOpt() : valid(false) {}
+ bool hasOffset() const { return valid; }
+ Offset getOffset() const { assert(valid); return off; }
+ void setOffset(Offset o) { off = o; valid = true; }
+};
+} // end anonymous namespace
+
+typedef llvm::OnDiskChainedHashTableGenerator<FileEntryPTHEntryInfo> PTHMap;
+
+namespace {
+class PTHWriter {
+ typedef llvm::DenseMap<const IdentifierInfo*,uint32_t> IDMap;
+ typedef llvm::StringMap<OffsetOpt, llvm::BumpPtrAllocator> CachedStrsTy;
+
+ IDMap IM;
+ raw_pwrite_stream &Out;
+ Preprocessor& PP;
+ uint32_t idcount;
+ PTHMap PM;
+ CachedStrsTy CachedStrs;
+ Offset CurStrOffset;
+ std::vector<llvm::StringMapEntry<OffsetOpt>*> StrEntries;
+
+ //// Get the persistent id for the given IdentifierInfo*.
+ uint32_t ResolveID(const IdentifierInfo* II);
+
+ /// Emit a token to the PTH file.
+ void EmitToken(const Token& T);
+
+ void Emit8(uint32_t V) {
+ using namespace llvm::support;
+ endian::Writer<little>(Out).write<uint8_t>(V);
+ }
+
+ void Emit16(uint32_t V) {
+ using namespace llvm::support;
+ endian::Writer<little>(Out).write<uint16_t>(V);
+ }
+
+ void Emit32(uint32_t V) {
+ using namespace llvm::support;
+ endian::Writer<little>(Out).write<uint32_t>(V);
+ }
+
+ void EmitBuf(const char *Ptr, unsigned NumBytes) {
+ Out.write(Ptr, NumBytes);
+ }
+
+ void EmitString(StringRef V) {
+ using namespace llvm::support;
+ endian::Writer<little>(Out).write<uint16_t>(V.size());
+ EmitBuf(V.data(), V.size());
+ }
+
+ /// EmitIdentifierTable - Emits two tables to the PTH file. The first is
+ /// a hashtable mapping from identifier strings to persistent IDs.
+ /// The second is a straight table mapping from persistent IDs to string data
+ /// (the keys of the first table).
+ std::pair<Offset, Offset> EmitIdentifierTable();
+
+ /// EmitFileTable - Emit a table mapping from file name strings to PTH
+ /// token data.
+ Offset EmitFileTable() { return PM.Emit(Out); }
+
+ PTHEntry LexTokens(Lexer& L);
+ Offset EmitCachedSpellings();
+
+public:
+ PTHWriter(raw_pwrite_stream &out, Preprocessor &pp)
+ : Out(out), PP(pp), idcount(0), CurStrOffset(0) {}
+
+ PTHMap &getPM() { return PM; }
+ void GeneratePTH(const std::string &MainFile);
+};
+} // end anonymous namespace
+
+uint32_t PTHWriter::ResolveID(const IdentifierInfo* II) {
+ // Null IdentifierInfo's map to the persistent ID 0.
+ if (!II)
+ return 0;
+
+ IDMap::iterator I = IM.find(II);
+ if (I != IM.end())
+ return I->second; // We've already added 1.
+
+ IM[II] = ++idcount; // Pre-increment since '0' is reserved for NULL.
+ return idcount;
+}
+
+void PTHWriter::EmitToken(const Token& T) {
+ // Emit the token kind, flags, and length.
+ Emit32(((uint32_t) T.getKind()) | ((((uint32_t) T.getFlags())) << 8)|
+ (((uint32_t) T.getLength()) << 16));
+
+ if (!T.isLiteral()) {
+ Emit32(ResolveID(T.getIdentifierInfo()));
+ } else {
+ // We cache *un-cleaned* spellings. This gives us 100% fidelity with the
+ // source code.
+ StringRef s(T.getLiteralData(), T.getLength());
+
+ // Get the string entry.
+ auto &E = *CachedStrs.insert(std::make_pair(s, OffsetOpt())).first;
+
+ // If this is a new string entry, bump the PTH offset.
+ if (!E.second.hasOffset()) {
+ E.second.setOffset(CurStrOffset);
+ StrEntries.push_back(&E);
+ CurStrOffset += s.size() + 1;
+ }
+
+ // Emit the relative offset into the PTH file for the spelling string.
+ Emit32(E.second.getOffset());
+ }
+
+ // Emit the offset into the original source file of this token so that we
+ // can reconstruct its SourceLocation.
+ Emit32(PP.getSourceManager().getFileOffset(T.getLocation()));
+}
+
+PTHEntry PTHWriter::LexTokens(Lexer& L) {
+ // Pad 0's so that we emit tokens to a 4-byte alignment.
+ // This speed up reading them back in.
+ using namespace llvm::support;
+ endian::Writer<little> LE(Out);
+ uint32_t TokenOff = Out.tell();
+ for (uint64_t N = llvm::OffsetToAlignment(TokenOff, 4); N; --N, ++TokenOff)
+ LE.write<uint8_t>(0);
+
+ // Keep track of matching '#if' ... '#endif'.
+ typedef std::vector<std::pair<Offset, unsigned> > PPCondTable;
+ PPCondTable PPCond;
+ std::vector<unsigned> PPStartCond;
+ bool ParsingPreprocessorDirective = false;
+ Token Tok;
+
+ do {
+ L.LexFromRawLexer(Tok);
+ NextToken:
+
+ if ((Tok.isAtStartOfLine() || Tok.is(tok::eof)) &&
+ ParsingPreprocessorDirective) {
+ // Insert an eod token into the token cache. It has the same
+ // position as the next token that is not on the same line as the
+ // preprocessor directive. Observe that we continue processing
+ // 'Tok' when we exit this branch.
+ Token Tmp = Tok;
+ Tmp.setKind(tok::eod);
+ Tmp.clearFlag(Token::StartOfLine);
+ Tmp.setIdentifierInfo(nullptr);
+ EmitToken(Tmp);
+ ParsingPreprocessorDirective = false;
+ }
+
+ if (Tok.is(tok::raw_identifier)) {
+ PP.LookUpIdentifierInfo(Tok);
+ EmitToken(Tok);
+ continue;
+ }
+
+ if (Tok.is(tok::hash) && Tok.isAtStartOfLine()) {
+ // Special processing for #include. Store the '#' token and lex
+ // the next token.
+ assert(!ParsingPreprocessorDirective);
+ Offset HashOff = (Offset) Out.tell();
+
+ // Get the next token.
+ Token NextTok;
+ L.LexFromRawLexer(NextTok);
+
+ // If we see the start of line, then we had a null directive "#". In
+ // this case, discard both tokens.
+ if (NextTok.isAtStartOfLine())
+ goto NextToken;
+
+ // The token is the start of a directive. Emit it.
+ EmitToken(Tok);
+ Tok = NextTok;
+
+ // Did we see 'include'/'import'/'include_next'?
+ if (Tok.isNot(tok::raw_identifier)) {
+ EmitToken(Tok);
+ continue;
+ }
+
+ IdentifierInfo* II = PP.LookUpIdentifierInfo(Tok);
+ tok::PPKeywordKind K = II->getPPKeywordID();
+
+ ParsingPreprocessorDirective = true;
+
+ switch (K) {
+ case tok::pp_not_keyword:
+ // Invalid directives "#foo" can occur in #if 0 blocks etc, just pass
+ // them through.
+ default:
+ break;
+
+ case tok::pp_include:
+ case tok::pp_import:
+ case tok::pp_include_next: {
+ // Save the 'include' token.
+ EmitToken(Tok);
+ // Lex the next token as an include string.
+ L.setParsingPreprocessorDirective(true);
+ L.LexIncludeFilename(Tok);
+ L.setParsingPreprocessorDirective(false);
+ assert(!Tok.isAtStartOfLine());
+ if (Tok.is(tok::raw_identifier))
+ PP.LookUpIdentifierInfo(Tok);
+
+ break;
+ }
+ case tok::pp_if:
+ case tok::pp_ifdef:
+ case tok::pp_ifndef: {
+ // Add an entry for '#if' and friends. We initially set the target
+ // index to 0. This will get backpatched when we hit #endif.
+ PPStartCond.push_back(PPCond.size());
+ PPCond.push_back(std::make_pair(HashOff, 0U));
+ break;
+ }
+ case tok::pp_endif: {
+ // Add an entry for '#endif'. We set the target table index to itself.
+ // This will later be set to zero when emitting to the PTH file. We
+ // use 0 for uninitialized indices because that is easier to debug.
+ unsigned index = PPCond.size();
+ // Backpatch the opening '#if' entry.
+ assert(!PPStartCond.empty());
+ assert(PPCond.size() > PPStartCond.back());
+ assert(PPCond[PPStartCond.back()].second == 0);
+ PPCond[PPStartCond.back()].second = index;
+ PPStartCond.pop_back();
+ // Add the new entry to PPCond.
+ PPCond.push_back(std::make_pair(HashOff, index));
+ EmitToken(Tok);
+
+ // Some files have gibberish on the same line as '#endif'.
+ // Discard these tokens.
+ do
+ L.LexFromRawLexer(Tok);
+ while (Tok.isNot(tok::eof) && !Tok.isAtStartOfLine());
+ // We have the next token in hand.
+ // Don't immediately lex the next one.
+ goto NextToken;
+ }
+ case tok::pp_elif:
+ case tok::pp_else: {
+ // Add an entry for #elif or #else.
+ // This serves as both a closing and opening of a conditional block.
+ // This means that its entry will get backpatched later.
+ unsigned index = PPCond.size();
+ // Backpatch the previous '#if' entry.
+ assert(!PPStartCond.empty());
+ assert(PPCond.size() > PPStartCond.back());
+ assert(PPCond[PPStartCond.back()].second == 0);
+ PPCond[PPStartCond.back()].second = index;
+ PPStartCond.pop_back();
+ // Now add '#elif' as a new block opening.
+ PPCond.push_back(std::make_pair(HashOff, 0U));
+ PPStartCond.push_back(index);
+ break;
+ }
+ }
+ }
+
+ EmitToken(Tok);
+ }
+ while (Tok.isNot(tok::eof));
+
+ assert(PPStartCond.empty() && "Error: imblanced preprocessor conditionals.");
+
+ // Next write out PPCond.
+ Offset PPCondOff = (Offset) Out.tell();
+
+ // Write out the size of PPCond so that clients can identifer empty tables.
+ Emit32(PPCond.size());
+
+ for (unsigned i = 0, e = PPCond.size(); i!=e; ++i) {
+ Emit32(PPCond[i].first - TokenOff);
+ uint32_t x = PPCond[i].second;
+ assert(x != 0 && "PPCond entry not backpatched.");
+ // Emit zero for #endifs. This allows us to do checking when
+ // we read the PTH file back in.
+ Emit32(x == i ? 0 : x);
+ }
+
+ return PTHEntry(TokenOff, PPCondOff);
+}
+
+Offset PTHWriter::EmitCachedSpellings() {
+ // Write each cached strings to the PTH file.
+ Offset SpellingsOff = Out.tell();
+
+ for (std::vector<llvm::StringMapEntry<OffsetOpt>*>::iterator
+ I = StrEntries.begin(), E = StrEntries.end(); I!=E; ++I)
+ EmitBuf((*I)->getKeyData(), (*I)->getKeyLength()+1 /*nul included*/);
+
+ return SpellingsOff;
+}
+
+static uint32_t swap32le(uint32_t X) {
+ return llvm::support::endian::byte_swap<uint32_t, llvm::support::little>(X);
+}
+
+static void pwrite32le(raw_pwrite_stream &OS, uint32_t Val, uint64_t &Off) {
+ uint32_t LEVal = swap32le(Val);
+ OS.pwrite(reinterpret_cast<const char *>(&LEVal), 4, Off);
+ Off += 4;
+}
+
+void PTHWriter::GeneratePTH(const std::string &MainFile) {
+ // Generate the prologue.
+ Out << "cfe-pth" << '\0';
+ Emit32(PTHManager::Version);
+
+ // Leave 4 words for the prologue.
+ Offset PrologueOffset = Out.tell();
+ for (unsigned i = 0; i < 4; ++i)
+ Emit32(0);
+
+ // Write the name of the MainFile.
+ if (!MainFile.empty()) {
+ EmitString(MainFile);
+ } else {
+ // String with 0 bytes.
+ Emit16(0);
+ }
+ Emit8(0);
+
+ // Iterate over all the files in SourceManager. Create a lexer
+ // for each file and cache the tokens.
+ SourceManager &SM = PP.getSourceManager();
+ const LangOptions &LOpts = PP.getLangOpts();
+
+ for (SourceManager::fileinfo_iterator I = SM.fileinfo_begin(),
+ E = SM.fileinfo_end(); I != E; ++I) {
+ const SrcMgr::ContentCache &C = *I->second;
+ const FileEntry *FE = C.OrigEntry;
+
+ // FIXME: Handle files with non-absolute paths.
+ if (llvm::sys::path::is_relative(FE->getName()))
+ continue;
+
+ const llvm::MemoryBuffer *B = C.getBuffer(PP.getDiagnostics(), SM);
+ if (!B) continue;
+
+ FileID FID = SM.createFileID(FE, SourceLocation(), SrcMgr::C_User);
+ const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
+ Lexer L(FID, FromFile, SM, LOpts);
+ PM.insert(FE, LexTokens(L));
+ }
+
+ // Write out the identifier table.
+ const std::pair<Offset,Offset> &IdTableOff = EmitIdentifierTable();
+
+ // Write out the cached strings table.
+ Offset SpellingOff = EmitCachedSpellings();
+
+ // Write out the file table.
+ Offset FileTableOff = EmitFileTable();
+
+ // Finally, write the prologue.
+ uint64_t Off = PrologueOffset;
+ pwrite32le(Out, IdTableOff.first, Off);
+ pwrite32le(Out, IdTableOff.second, Off);
+ pwrite32le(Out, FileTableOff, Off);
+ pwrite32le(Out, SpellingOff, Off);
+}
+
+namespace {
+/// StatListener - A simple "interpose" object used to monitor stat calls
+/// invoked by FileManager while processing the original sources used
+/// as input to PTH generation. StatListener populates the PTHWriter's
+/// file map with stat information for directories as well as negative stats.
+/// Stat information for files are populated elsewhere.
+class StatListener : public FileSystemStatCache {
+ PTHMap &PM;
+public:
+ StatListener(PTHMap &pm) : PM(pm) {}
+ ~StatListener() override {}
+
+ LookupResult getStat(const char *Path, FileData &Data, bool isFile,
+ std::unique_ptr<vfs::File> *F,
+ vfs::FileSystem &FS) override {
+ LookupResult Result = statChained(Path, Data, isFile, F, FS);
+
+ if (Result == CacheMissing) // Failed 'stat'.
+ PM.insert(PTHEntryKeyVariant(Path), PTHEntry());
+ else if (Data.IsDirectory) {
+ // Only cache directories with absolute paths.
+ if (llvm::sys::path::is_relative(Path))
+ return Result;
+
+ PM.insert(PTHEntryKeyVariant(&Data, Path), PTHEntry());
+ }
+
+ return Result;
+ }
+};
+} // end anonymous namespace
+
+void clang::CacheTokens(Preprocessor &PP, raw_pwrite_stream *OS) {
+ // Get the name of the main file.
+ const SourceManager &SrcMgr = PP.getSourceManager();
+ const FileEntry *MainFile = SrcMgr.getFileEntryForID(SrcMgr.getMainFileID());
+ SmallString<128> MainFilePath(MainFile->getName());
+
+ llvm::sys::fs::make_absolute(MainFilePath);
+
+ // Create the PTHWriter.
+ PTHWriter PW(*OS, PP);
+
+ // Install the 'stat' system call listener in the FileManager.
+ auto StatCacheOwner = llvm::make_unique<StatListener>(PW.getPM());
+ StatListener *StatCache = StatCacheOwner.get();
+ PP.getFileManager().addStatCache(std::move(StatCacheOwner),
+ /*AtBeginning=*/true);
+
+ // Lex through the entire file. This will populate SourceManager with
+ // all of the header information.
+ Token Tok;
+ PP.EnterMainSourceFile();
+ do { PP.Lex(Tok); } while (Tok.isNot(tok::eof));
+
+ // Generate the PTH file.
+ PP.getFileManager().removeStatCache(StatCache);
+ PW.GeneratePTH(MainFilePath.str());
+}
+
+//===----------------------------------------------------------------------===//
+
+namespace {
+class PTHIdKey {
+public:
+ const IdentifierInfo* II;
+ uint32_t FileOffset;
+};
+
+class PTHIdentifierTableTrait {
+public:
+ typedef PTHIdKey* key_type;
+ typedef key_type key_type_ref;
+
+ typedef uint32_t data_type;
+ typedef data_type data_type_ref;
+
+ typedef unsigned hash_value_type;
+ typedef unsigned offset_type;
+
+ static hash_value_type ComputeHash(PTHIdKey* key) {
+ return llvm::HashString(key->II->getName());
+ }
+
+ static std::pair<unsigned,unsigned>
+ EmitKeyDataLength(raw_ostream& Out, const PTHIdKey* key, uint32_t) {
+ using namespace llvm::support;
+ unsigned n = key->II->getLength() + 1;
+ endian::Writer<little>(Out).write<uint16_t>(n);
+ return std::make_pair(n, sizeof(uint32_t));
+ }
+
+ static void EmitKey(raw_ostream& Out, PTHIdKey* key, unsigned n) {
+ // Record the location of the key data. This is used when generating
+ // the mapping from persistent IDs to strings.
+ key->FileOffset = Out.tell();
+ Out.write(key->II->getNameStart(), n);
+ }
+
+ static void EmitData(raw_ostream& Out, PTHIdKey*, uint32_t pID,
+ unsigned) {
+ using namespace llvm::support;
+ endian::Writer<little>(Out).write<uint32_t>(pID);
+ }
+};
+} // end anonymous namespace
+
+/// EmitIdentifierTable - Emits two tables to the PTH file. The first is
+/// a hashtable mapping from identifier strings to persistent IDs. The second
+/// is a straight table mapping from persistent IDs to string data (the
+/// keys of the first table).
+///
+std::pair<Offset,Offset> PTHWriter::EmitIdentifierTable() {
+ // Build two maps:
+ // (1) an inverse map from persistent IDs -> (IdentifierInfo*,Offset)
+ // (2) a map from (IdentifierInfo*, Offset)* -> persistent IDs
+
+ // Note that we use 'calloc', so all the bytes are 0.
+ PTHIdKey *IIDMap = (PTHIdKey*)calloc(idcount, sizeof(PTHIdKey));
+
+ // Create the hashtable.
+ llvm::OnDiskChainedHashTableGenerator<PTHIdentifierTableTrait> IIOffMap;
+
+ // Generate mapping from persistent IDs -> IdentifierInfo*.
+ for (IDMap::iterator I = IM.begin(), E = IM.end(); I != E; ++I) {
+ // Decrement by 1 because we are using a vector for the lookup and
+ // 0 is reserved for NULL.
+ assert(I->second > 0);
+ assert(I->second-1 < idcount);
+ unsigned idx = I->second-1;
+
+ // Store the mapping from persistent ID to IdentifierInfo*
+ IIDMap[idx].II = I->first;
+
+ // Store the reverse mapping in a hashtable.
+ IIOffMap.insert(&IIDMap[idx], I->second);
+ }
+
+ // Write out the inverse map first. This causes the PCIDKey entries to
+ // record PTH file offsets for the string data. This is used to write
+ // the second table.
+ Offset StringTableOffset = IIOffMap.Emit(Out);
+
+ // Now emit the table mapping from persistent IDs to PTH file offsets.
+ Offset IDOff = Out.tell();
+ Emit32(idcount); // Emit the number of identifiers.
+ for (unsigned i = 0 ; i < idcount; ++i)
+ Emit32(IIDMap[i].FileOffset);
+
+ // Finally, release the inverse map.
+ free(IIDMap);
+
+ return std::make_pair(IDOff, StringTableOffset);
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/ChainedDiagnosticConsumer.cpp b/contrib/llvm/tools/clang/lib/Frontend/ChainedDiagnosticConsumer.cpp
new file mode 100644
index 0000000..d77fd18
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/ChainedDiagnosticConsumer.cpp
@@ -0,0 +1,14 @@
+//===- ChainedDiagnosticConsumer.cpp - Chain Diagnostic Clients -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/ChainedDiagnosticConsumer.h"
+
+using namespace clang;
+
+void ChainedDiagnosticConsumer::anchor() { }
diff --git a/contrib/llvm/tools/clang/lib/Frontend/ChainedIncludesSource.cpp b/contrib/llvm/tools/clang/lib/Frontend/ChainedIncludesSource.cpp
new file mode 100644
index 0000000..1c1081f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/ChainedIncludesSource.cpp
@@ -0,0 +1,298 @@
+//===- ChainedIncludesSource.cpp - Chained PCHs in Memory -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ChainedIncludesSource class, which converts headers
+// to chained PCHs in memory, mainly used for testing.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Frontend/ASTUnit.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Parse/ParseAST.h"
+#include "clang/Serialization/ASTReader.h"
+#include "clang/Serialization/ASTWriter.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+using namespace clang;
+
+namespace {
+class ChainedIncludesSource : public ExternalSemaSource {
+public:
+ ~ChainedIncludesSource() override;
+
+ ExternalSemaSource &getFinalReader() const { return *FinalReader; }
+
+ std::vector<CompilerInstance *> CIs;
+ IntrusiveRefCntPtr<ExternalSemaSource> FinalReader;
+
+protected:
+ //===----------------------------------------------------------------------===//
+ // ExternalASTSource interface.
+ //===----------------------------------------------------------------------===//
+
+ Decl *GetExternalDecl(uint32_t ID) override;
+ Selector GetExternalSelector(uint32_t ID) override;
+ uint32_t GetNumExternalSelectors() override;
+ Stmt *GetExternalDeclStmt(uint64_t Offset) override;
+ CXXCtorInitializer **GetExternalCXXCtorInitializers(uint64_t Offset) override;
+ CXXBaseSpecifier *GetExternalCXXBaseSpecifiers(uint64_t Offset) override;
+ bool FindExternalVisibleDeclsByName(const DeclContext *DC,
+ DeclarationName Name) override;
+ void
+ FindExternalLexicalDecls(const DeclContext *DC,
+ llvm::function_ref<bool(Decl::Kind)> IsKindWeWant,
+ SmallVectorImpl<Decl *> &Result) override;
+ void CompleteType(TagDecl *Tag) override;
+ void CompleteType(ObjCInterfaceDecl *Class) override;
+ void StartedDeserializing() override;
+ void FinishedDeserializing() override;
+ void StartTranslationUnit(ASTConsumer *Consumer) override;
+ void PrintStats() override;
+
+ /// Return the amount of memory used by memory buffers, breaking down
+ /// by heap-backed versus mmap'ed memory.
+ void getMemoryBufferSizes(MemoryBufferSizes &sizes) const override;
+
+ //===----------------------------------------------------------------------===//
+ // ExternalSemaSource interface.
+ //===----------------------------------------------------------------------===//
+
+ void InitializeSema(Sema &S) override;
+ void ForgetSema() override;
+ void ReadMethodPool(Selector Sel) override;
+ bool LookupUnqualified(LookupResult &R, Scope *S) override;
+};
+}
+
+static ASTReader *
+createASTReader(CompilerInstance &CI, StringRef pchFile,
+ SmallVectorImpl<std::unique_ptr<llvm::MemoryBuffer>> &MemBufs,
+ SmallVectorImpl<std::string> &bufNames,
+ ASTDeserializationListener *deserialListener = nullptr) {
+ Preprocessor &PP = CI.getPreprocessor();
+ std::unique_ptr<ASTReader> Reader;
+ Reader.reset(new ASTReader(PP, CI.getASTContext(),
+ CI.getPCHContainerReader(),
+ /*Extensions=*/{ },
+ /*isysroot=*/"", /*DisableValidation=*/true));
+ for (unsigned ti = 0; ti < bufNames.size(); ++ti) {
+ StringRef sr(bufNames[ti]);
+ Reader->addInMemoryBuffer(sr, std::move(MemBufs[ti]));
+ }
+ Reader->setDeserializationListener(deserialListener);
+ switch (Reader->ReadAST(pchFile, serialization::MK_PCH, SourceLocation(),
+ ASTReader::ARR_None)) {
+ case ASTReader::Success:
+ // Set the predefines buffer as suggested by the PCH reader.
+ PP.setPredefines(Reader->getSuggestedPredefines());
+ return Reader.release();
+
+ case ASTReader::Failure:
+ case ASTReader::Missing:
+ case ASTReader::OutOfDate:
+ case ASTReader::VersionMismatch:
+ case ASTReader::ConfigurationMismatch:
+ case ASTReader::HadErrors:
+ break;
+ }
+ return nullptr;
+}
+
+ChainedIncludesSource::~ChainedIncludesSource() {
+ for (unsigned i = 0, e = CIs.size(); i != e; ++i)
+ delete CIs[i];
+}
+
+IntrusiveRefCntPtr<ExternalSemaSource> clang::createChainedIncludesSource(
+ CompilerInstance &CI, IntrusiveRefCntPtr<ExternalSemaSource> &Reader) {
+
+ std::vector<std::string> &includes = CI.getPreprocessorOpts().ChainedIncludes;
+ assert(!includes.empty() && "No '-chain-include' in options!");
+
+ IntrusiveRefCntPtr<ChainedIncludesSource> source(new ChainedIncludesSource());
+ InputKind IK = CI.getFrontendOpts().Inputs[0].getKind();
+
+ SmallVector<std::unique_ptr<llvm::MemoryBuffer>, 4> SerialBufs;
+ SmallVector<std::string, 4> serialBufNames;
+
+ for (unsigned i = 0, e = includes.size(); i != e; ++i) {
+ bool firstInclude = (i == 0);
+ std::unique_ptr<CompilerInvocation> CInvok;
+ CInvok.reset(new CompilerInvocation(CI.getInvocation()));
+
+ CInvok->getPreprocessorOpts().ChainedIncludes.clear();
+ CInvok->getPreprocessorOpts().ImplicitPCHInclude.clear();
+ CInvok->getPreprocessorOpts().ImplicitPTHInclude.clear();
+ CInvok->getPreprocessorOpts().DisablePCHValidation = true;
+ CInvok->getPreprocessorOpts().Includes.clear();
+ CInvok->getPreprocessorOpts().MacroIncludes.clear();
+ CInvok->getPreprocessorOpts().Macros.clear();
+
+ CInvok->getFrontendOpts().Inputs.clear();
+ FrontendInputFile InputFile(includes[i], IK);
+ CInvok->getFrontendOpts().Inputs.push_back(InputFile);
+
+ TextDiagnosticPrinter *DiagClient =
+ new TextDiagnosticPrinter(llvm::errs(), new DiagnosticOptions());
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ new DiagnosticsEngine(DiagID, &CI.getDiagnosticOpts(), DiagClient));
+
+ std::unique_ptr<CompilerInstance> Clang(
+ new CompilerInstance(CI.getPCHContainerOperations()));
+ Clang->setInvocation(CInvok.release());
+ Clang->setDiagnostics(Diags.get());
+ Clang->setTarget(TargetInfo::CreateTargetInfo(
+ Clang->getDiagnostics(), Clang->getInvocation().TargetOpts));
+ Clang->createFileManager();
+ Clang->createSourceManager(Clang->getFileManager());
+ Clang->createPreprocessor(TU_Prefix);
+ Clang->getDiagnosticClient().BeginSourceFile(Clang->getLangOpts(),
+ &Clang->getPreprocessor());
+ Clang->createASTContext();
+
+ auto Buffer = std::make_shared<PCHBuffer>();
+ ArrayRef<llvm::IntrusiveRefCntPtr<ModuleFileExtension>> Extensions;
+ auto consumer = llvm::make_unique<PCHGenerator>(
+ Clang->getPreprocessor(), "-", nullptr, /*isysroot=*/"", Buffer,
+ Extensions);
+ Clang->getASTContext().setASTMutationListener(
+ consumer->GetASTMutationListener());
+ Clang->setASTConsumer(std::move(consumer));
+ Clang->createSema(TU_Prefix, nullptr);
+
+ if (firstInclude) {
+ Preprocessor &PP = Clang->getPreprocessor();
+ PP.getBuiltinInfo().initializeBuiltins(PP.getIdentifierTable(),
+ PP.getLangOpts());
+ } else {
+ assert(!SerialBufs.empty());
+ SmallVector<std::unique_ptr<llvm::MemoryBuffer>, 4> Bufs;
+ // TODO: Pass through the existing MemoryBuffer instances instead of
+ // allocating new ones.
+ for (auto &SB : SerialBufs)
+ Bufs.push_back(llvm::MemoryBuffer::getMemBuffer(SB->getBuffer()));
+ std::string pchName = includes[i-1];
+ llvm::raw_string_ostream os(pchName);
+ os << ".pch" << i-1;
+ serialBufNames.push_back(os.str());
+
+ IntrusiveRefCntPtr<ASTReader> Reader;
+ Reader = createASTReader(
+ *Clang, pchName, Bufs, serialBufNames,
+ Clang->getASTConsumer().GetASTDeserializationListener());
+ if (!Reader)
+ return nullptr;
+ Clang->setModuleManager(Reader);
+ Clang->getASTContext().setExternalSource(Reader);
+ }
+
+ if (!Clang->InitializeSourceManager(InputFile))
+ return nullptr;
+
+ ParseAST(Clang->getSema());
+ Clang->getDiagnosticClient().EndSourceFile();
+ assert(Buffer->IsComplete && "serialization did not complete");
+ auto &serialAST = Buffer->Data;
+ SerialBufs.push_back(llvm::MemoryBuffer::getMemBufferCopy(
+ StringRef(serialAST.data(), serialAST.size())));
+ serialAST.clear();
+ source->CIs.push_back(Clang.release());
+ }
+
+ assert(!SerialBufs.empty());
+ std::string pchName = includes.back() + ".pch-final";
+ serialBufNames.push_back(pchName);
+ Reader = createASTReader(CI, pchName, SerialBufs, serialBufNames);
+ if (!Reader)
+ return nullptr;
+
+ source->FinalReader = Reader;
+ return source;
+}
+
+//===----------------------------------------------------------------------===//
+// ExternalASTSource interface.
+//===----------------------------------------------------------------------===//
+
+Decl *ChainedIncludesSource::GetExternalDecl(uint32_t ID) {
+ return getFinalReader().GetExternalDecl(ID);
+}
+Selector ChainedIncludesSource::GetExternalSelector(uint32_t ID) {
+ return getFinalReader().GetExternalSelector(ID);
+}
+uint32_t ChainedIncludesSource::GetNumExternalSelectors() {
+ return getFinalReader().GetNumExternalSelectors();
+}
+Stmt *ChainedIncludesSource::GetExternalDeclStmt(uint64_t Offset) {
+ return getFinalReader().GetExternalDeclStmt(Offset);
+}
+CXXBaseSpecifier *
+ChainedIncludesSource::GetExternalCXXBaseSpecifiers(uint64_t Offset) {
+ return getFinalReader().GetExternalCXXBaseSpecifiers(Offset);
+}
+CXXCtorInitializer **
+ChainedIncludesSource::GetExternalCXXCtorInitializers(uint64_t Offset) {
+ return getFinalReader().GetExternalCXXCtorInitializers(Offset);
+}
+bool
+ChainedIncludesSource::FindExternalVisibleDeclsByName(const DeclContext *DC,
+ DeclarationName Name) {
+ return getFinalReader().FindExternalVisibleDeclsByName(DC, Name);
+}
+void ChainedIncludesSource::FindExternalLexicalDecls(
+ const DeclContext *DC, llvm::function_ref<bool(Decl::Kind)> IsKindWeWant,
+ SmallVectorImpl<Decl *> &Result) {
+ return getFinalReader().FindExternalLexicalDecls(DC, IsKindWeWant, Result);
+}
+void ChainedIncludesSource::CompleteType(TagDecl *Tag) {
+ return getFinalReader().CompleteType(Tag);
+}
+void ChainedIncludesSource::CompleteType(ObjCInterfaceDecl *Class) {
+ return getFinalReader().CompleteType(Class);
+}
+void ChainedIncludesSource::StartedDeserializing() {
+ return getFinalReader().StartedDeserializing();
+}
+void ChainedIncludesSource::FinishedDeserializing() {
+ return getFinalReader().FinishedDeserializing();
+}
+void ChainedIncludesSource::StartTranslationUnit(ASTConsumer *Consumer) {
+ return getFinalReader().StartTranslationUnit(Consumer);
+}
+void ChainedIncludesSource::PrintStats() {
+ return getFinalReader().PrintStats();
+}
+void ChainedIncludesSource::getMemoryBufferSizes(MemoryBufferSizes &sizes)const{
+ for (unsigned i = 0, e = CIs.size(); i != e; ++i) {
+ if (const ExternalASTSource *eSrc =
+ CIs[i]->getASTContext().getExternalSource()) {
+ eSrc->getMemoryBufferSizes(sizes);
+ }
+ }
+
+ getFinalReader().getMemoryBufferSizes(sizes);
+}
+
+void ChainedIncludesSource::InitializeSema(Sema &S) {
+ return getFinalReader().InitializeSema(S);
+}
+void ChainedIncludesSource::ForgetSema() {
+ return getFinalReader().ForgetSema();
+}
+void ChainedIncludesSource::ReadMethodPool(Selector Sel) {
+ getFinalReader().ReadMethodPool(Sel);
+}
+bool ChainedIncludesSource::LookupUnqualified(LookupResult &R, Scope *S) {
+ return getFinalReader().LookupUnqualified(R, S);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Frontend/CodeGenOptions.cpp b/contrib/llvm/tools/clang/lib/Frontend/CodeGenOptions.cpp
new file mode 100644
index 0000000..50bb9f9
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/CodeGenOptions.cpp
@@ -0,0 +1,32 @@
+//===--- CodeGenOptions.cpp -----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/CodeGenOptions.h"
+#include <string.h>
+
+namespace clang {
+
+CodeGenOptions::CodeGenOptions() {
+#define CODEGENOPT(Name, Bits, Default) Name = Default;
+#define ENUM_CODEGENOPT(Name, Type, Bits, Default) set##Name(Default);
+#include "clang/Frontend/CodeGenOptions.def"
+
+ RelocationModel = "pic";
+ memcpy(CoverageVersion, "402*", 4);
+}
+
+bool CodeGenOptions::isNoBuiltinFunc(const char *Name) const {
+ StringRef FuncName(Name);
+ for (unsigned i = 0, e = NoBuiltinFuncs.size(); i != e; ++i)
+ if (FuncName.equals(NoBuiltinFuncs[i]))
+ return true;
+ return false;
+}
+
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp b/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp
new file mode 100644
index 0000000..3edcf5d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp
@@ -0,0 +1,1732 @@
+//===--- CompilerInstance.cpp ---------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/Version.h"
+#include "clang/Config/config.h"
+#include "clang/Frontend/ChainedDiagnosticConsumer.h"
+#include "clang/Frontend/FrontendAction.h"
+#include "clang/Frontend/FrontendActions.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/LogDiagnosticPrinter.h"
+#include "clang/Frontend/SerializedDiagnosticPrinter.h"
+#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "clang/Frontend/Utils.h"
+#include "clang/Frontend/VerifyDiagnosticConsumer.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/PTHManager.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/CodeCompleteConsumer.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Serialization/ASTReader.h"
+#include "clang/Serialization/GlobalModuleIndex.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/CrashRecoveryContext.h"
+#include "llvm/Support/Errc.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/LockFileManager.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Program.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/Support/raw_ostream.h"
+#include <sys/stat.h>
+#include <system_error>
+#include <time.h>
+
+using namespace clang;
+
+CompilerInstance::CompilerInstance(
+ std::shared_ptr<PCHContainerOperations> PCHContainerOps,
+ bool BuildingModule)
+ : ModuleLoader(BuildingModule), Invocation(new CompilerInvocation()),
+ ModuleManager(nullptr), ThePCHContainerOperations(PCHContainerOps),
+ BuildGlobalModuleIndex(false), HaveFullGlobalModuleIndex(false),
+ ModuleBuildFailed(false) {}
+
+CompilerInstance::~CompilerInstance() {
+ assert(OutputFiles.empty() && "Still output files in flight?");
+}
+
+void CompilerInstance::setInvocation(CompilerInvocation *Value) {
+ Invocation = Value;
+}
+
+bool CompilerInstance::shouldBuildGlobalModuleIndex() const {
+ return (BuildGlobalModuleIndex ||
+ (ModuleManager && ModuleManager->isGlobalIndexUnavailable() &&
+ getFrontendOpts().GenerateGlobalModuleIndex)) &&
+ !ModuleBuildFailed;
+}
+
+void CompilerInstance::setDiagnostics(DiagnosticsEngine *Value) {
+ Diagnostics = Value;
+}
+
+void CompilerInstance::setTarget(TargetInfo *Value) { Target = Value; }
+void CompilerInstance::setAuxTarget(TargetInfo *Value) { AuxTarget = Value; }
+
+void CompilerInstance::setFileManager(FileManager *Value) {
+ FileMgr = Value;
+ if (Value)
+ VirtualFileSystem = Value->getVirtualFileSystem();
+ else
+ VirtualFileSystem.reset();
+}
+
+void CompilerInstance::setSourceManager(SourceManager *Value) {
+ SourceMgr = Value;
+}
+
+void CompilerInstance::setPreprocessor(Preprocessor *Value) { PP = Value; }
+
+void CompilerInstance::setASTContext(ASTContext *Value) {
+ Context = Value;
+
+ if (Context && Consumer)
+ getASTConsumer().Initialize(getASTContext());
+}
+
+void CompilerInstance::setSema(Sema *S) {
+ TheSema.reset(S);
+}
+
+void CompilerInstance::setASTConsumer(std::unique_ptr<ASTConsumer> Value) {
+ Consumer = std::move(Value);
+
+ if (Context && Consumer)
+ getASTConsumer().Initialize(getASTContext());
+}
+
+void CompilerInstance::setCodeCompletionConsumer(CodeCompleteConsumer *Value) {
+ CompletionConsumer.reset(Value);
+}
+
+std::unique_ptr<Sema> CompilerInstance::takeSema() {
+ return std::move(TheSema);
+}
+
+IntrusiveRefCntPtr<ASTReader> CompilerInstance::getModuleManager() const {
+ return ModuleManager;
+}
+void CompilerInstance::setModuleManager(IntrusiveRefCntPtr<ASTReader> Reader) {
+ ModuleManager = Reader;
+}
+
+std::shared_ptr<ModuleDependencyCollector>
+CompilerInstance::getModuleDepCollector() const {
+ return ModuleDepCollector;
+}
+
+void CompilerInstance::setModuleDepCollector(
+ std::shared_ptr<ModuleDependencyCollector> Collector) {
+ ModuleDepCollector = Collector;
+}
+
+// Diagnostics
+static void SetUpDiagnosticLog(DiagnosticOptions *DiagOpts,
+ const CodeGenOptions *CodeGenOpts,
+ DiagnosticsEngine &Diags) {
+ std::error_code EC;
+ std::unique_ptr<raw_ostream> StreamOwner;
+ raw_ostream *OS = &llvm::errs();
+ if (DiagOpts->DiagnosticLogFile != "-") {
+ // Create the output stream.
+ auto FileOS = llvm::make_unique<llvm::raw_fd_ostream>(
+ DiagOpts->DiagnosticLogFile, EC,
+ llvm::sys::fs::F_Append | llvm::sys::fs::F_Text);
+ if (EC) {
+ Diags.Report(diag::warn_fe_cc_log_diagnostics_failure)
+ << DiagOpts->DiagnosticLogFile << EC.message();
+ } else {
+ FileOS->SetUnbuffered();
+ OS = FileOS.get();
+ StreamOwner = std::move(FileOS);
+ }
+ }
+
+ // Chain in the diagnostic client which will log the diagnostics.
+ auto Logger = llvm::make_unique<LogDiagnosticPrinter>(*OS, DiagOpts,
+ std::move(StreamOwner));
+ if (CodeGenOpts)
+ Logger->setDwarfDebugFlags(CodeGenOpts->DwarfDebugFlags);
+ assert(Diags.ownsClient());
+ Diags.setClient(
+ new ChainedDiagnosticConsumer(Diags.takeClient(), std::move(Logger)));
+}
+
+static void SetupSerializedDiagnostics(DiagnosticOptions *DiagOpts,
+ DiagnosticsEngine &Diags,
+ StringRef OutputFile) {
+ auto SerializedConsumer =
+ clang::serialized_diags::create(OutputFile, DiagOpts);
+
+ if (Diags.ownsClient()) {
+ Diags.setClient(new ChainedDiagnosticConsumer(
+ Diags.takeClient(), std::move(SerializedConsumer)));
+ } else {
+ Diags.setClient(new ChainedDiagnosticConsumer(
+ Diags.getClient(), std::move(SerializedConsumer)));
+ }
+}
+
+void CompilerInstance::createDiagnostics(DiagnosticConsumer *Client,
+ bool ShouldOwnClient) {
+ Diagnostics = createDiagnostics(&getDiagnosticOpts(), Client,
+ ShouldOwnClient, &getCodeGenOpts());
+}
+
+IntrusiveRefCntPtr<DiagnosticsEngine>
+CompilerInstance::createDiagnostics(DiagnosticOptions *Opts,
+ DiagnosticConsumer *Client,
+ bool ShouldOwnClient,
+ const CodeGenOptions *CodeGenOpts) {
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticsEngine>
+ Diags(new DiagnosticsEngine(DiagID, Opts));
+
+ // Create the diagnostic client for reporting errors or for
+ // implementing -verify.
+ if (Client) {
+ Diags->setClient(Client, ShouldOwnClient);
+ } else
+ Diags->setClient(new TextDiagnosticPrinter(llvm::errs(), Opts));
+
+ // Chain in -verify checker, if requested.
+ if (Opts->VerifyDiagnostics)
+ Diags->setClient(new VerifyDiagnosticConsumer(*Diags));
+
+ // Chain in -diagnostic-log-file dumper, if requested.
+ if (!Opts->DiagnosticLogFile.empty())
+ SetUpDiagnosticLog(Opts, CodeGenOpts, *Diags);
+
+ if (!Opts->DiagnosticSerializationFile.empty())
+ SetupSerializedDiagnostics(Opts, *Diags,
+ Opts->DiagnosticSerializationFile);
+
+ // Configure our handling of diagnostics.
+ ProcessWarningOptions(*Diags, *Opts);
+
+ return Diags;
+}
+
+// File Manager
+
+void CompilerInstance::createFileManager() {
+ if (!hasVirtualFileSystem()) {
+ // TODO: choose the virtual file system based on the CompilerInvocation.
+ setVirtualFileSystem(vfs::getRealFileSystem());
+ }
+ FileMgr = new FileManager(getFileSystemOpts(), VirtualFileSystem);
+}
+
+// Source Manager
+
+void CompilerInstance::createSourceManager(FileManager &FileMgr) {
+ SourceMgr = new SourceManager(getDiagnostics(), FileMgr);
+}
+
+// Initialize the remapping of files to alternative contents, e.g.,
+// those specified through other files.
+static void InitializeFileRemapping(DiagnosticsEngine &Diags,
+ SourceManager &SourceMgr,
+ FileManager &FileMgr,
+ const PreprocessorOptions &InitOpts) {
+ // Remap files in the source manager (with buffers).
+ for (const auto &RB : InitOpts.RemappedFileBuffers) {
+ // Create the file entry for the file that we're mapping from.
+ const FileEntry *FromFile =
+ FileMgr.getVirtualFile(RB.first, RB.second->getBufferSize(), 0);
+ if (!FromFile) {
+ Diags.Report(diag::err_fe_remap_missing_from_file) << RB.first;
+ if (!InitOpts.RetainRemappedFileBuffers)
+ delete RB.second;
+ continue;
+ }
+
+ // Override the contents of the "from" file with the contents of
+ // the "to" file.
+ SourceMgr.overrideFileContents(FromFile, RB.second,
+ InitOpts.RetainRemappedFileBuffers);
+ }
+
+ // Remap files in the source manager (with other files).
+ for (const auto &RF : InitOpts.RemappedFiles) {
+ // Find the file that we're mapping to.
+ const FileEntry *ToFile = FileMgr.getFile(RF.second);
+ if (!ToFile) {
+ Diags.Report(diag::err_fe_remap_missing_to_file) << RF.first << RF.second;
+ continue;
+ }
+
+ // Create the file entry for the file that we're mapping from.
+ const FileEntry *FromFile =
+ FileMgr.getVirtualFile(RF.first, ToFile->getSize(), 0);
+ if (!FromFile) {
+ Diags.Report(diag::err_fe_remap_missing_from_file) << RF.first;
+ continue;
+ }
+
+ // Override the contents of the "from" file with the contents of
+ // the "to" file.
+ SourceMgr.overrideFileContents(FromFile, ToFile);
+ }
+
+ SourceMgr.setOverridenFilesKeepOriginalName(
+ InitOpts.RemappedFilesKeepOriginalName);
+}
+
+// Preprocessor
+
+void CompilerInstance::createPreprocessor(TranslationUnitKind TUKind) {
+ const PreprocessorOptions &PPOpts = getPreprocessorOpts();
+
+ // Create a PTH manager if we are using some form of a token cache.
+ PTHManager *PTHMgr = nullptr;
+ if (!PPOpts.TokenCache.empty())
+ PTHMgr = PTHManager::Create(PPOpts.TokenCache, getDiagnostics());
+
+ // Create the Preprocessor.
+ HeaderSearch *HeaderInfo = new HeaderSearch(&getHeaderSearchOpts(),
+ getSourceManager(),
+ getDiagnostics(),
+ getLangOpts(),
+ &getTarget());
+ PP = new Preprocessor(&getPreprocessorOpts(), getDiagnostics(), getLangOpts(),
+ getSourceManager(), *HeaderInfo, *this, PTHMgr,
+ /*OwnsHeaderSearch=*/true, TUKind);
+ PP->Initialize(getTarget(), getAuxTarget());
+
+ // Note that this is different then passing PTHMgr to Preprocessor's ctor.
+ // That argument is used as the IdentifierInfoLookup argument to
+ // IdentifierTable's ctor.
+ if (PTHMgr) {
+ PTHMgr->setPreprocessor(&*PP);
+ PP->setPTHManager(PTHMgr);
+ }
+
+ if (PPOpts.DetailedRecord)
+ PP->createPreprocessingRecord();
+
+ // Apply remappings to the source manager.
+ InitializeFileRemapping(PP->getDiagnostics(), PP->getSourceManager(),
+ PP->getFileManager(), PPOpts);
+
+ // Predefine macros and configure the preprocessor.
+ InitializePreprocessor(*PP, PPOpts, getPCHContainerReader(),
+ getFrontendOpts());
+
+ // Initialize the header search object.
+ ApplyHeaderSearchOptions(PP->getHeaderSearchInfo(), getHeaderSearchOpts(),
+ PP->getLangOpts(), PP->getTargetInfo().getTriple());
+
+ PP->setPreprocessedOutput(getPreprocessorOutputOpts().ShowCPP);
+
+ if (PP->getLangOpts().Modules && PP->getLangOpts().ImplicitModules)
+ PP->getHeaderSearchInfo().setModuleCachePath(getSpecificModuleCachePath());
+
+ // Handle generating dependencies, if requested.
+ const DependencyOutputOptions &DepOpts = getDependencyOutputOpts();
+ if (!DepOpts.OutputFile.empty())
+ TheDependencyFileGenerator.reset(
+ DependencyFileGenerator::CreateAndAttachToPreprocessor(*PP, DepOpts));
+ if (!DepOpts.DOTOutputFile.empty())
+ AttachDependencyGraphGen(*PP, DepOpts.DOTOutputFile,
+ getHeaderSearchOpts().Sysroot);
+
+ for (auto &Listener : DependencyCollectors)
+ Listener->attachToPreprocessor(*PP);
+
+ // If we don't have a collector, but we are collecting module dependencies,
+ // then we're the top level compiler instance and need to create one.
+ if (!ModuleDepCollector && !DepOpts.ModuleDependencyOutputDir.empty())
+ ModuleDepCollector = std::make_shared<ModuleDependencyCollector>(
+ DepOpts.ModuleDependencyOutputDir);
+
+ // Handle generating header include information, if requested.
+ if (DepOpts.ShowHeaderIncludes)
+ AttachHeaderIncludeGen(*PP, DepOpts.ExtraDeps);
+ if (!DepOpts.HeaderIncludeOutputFile.empty()) {
+ StringRef OutputPath = DepOpts.HeaderIncludeOutputFile;
+ if (OutputPath == "-")
+ OutputPath = "";
+ AttachHeaderIncludeGen(*PP, DepOpts.ExtraDeps,
+ /*ShowAllHeaders=*/true, OutputPath,
+ /*ShowDepth=*/false);
+ }
+
+ if (DepOpts.PrintShowIncludes) {
+ AttachHeaderIncludeGen(*PP, DepOpts.ExtraDeps,
+ /*ShowAllHeaders=*/false, /*OutputPath=*/"",
+ /*ShowDepth=*/true, /*MSStyle=*/true);
+ }
+}
+
+std::string CompilerInstance::getSpecificModuleCachePath() {
+ // Set up the module path, including the hash for the
+ // module-creation options.
+ SmallString<256> SpecificModuleCache(getHeaderSearchOpts().ModuleCachePath);
+ if (!SpecificModuleCache.empty() && !getHeaderSearchOpts().DisableModuleHash)
+ llvm::sys::path::append(SpecificModuleCache,
+ getInvocation().getModuleHash());
+ return SpecificModuleCache.str();
+}
+
+// ASTContext
+
+void CompilerInstance::createASTContext() {
+ Preprocessor &PP = getPreprocessor();
+ auto *Context = new ASTContext(getLangOpts(), PP.getSourceManager(),
+ PP.getIdentifierTable(), PP.getSelectorTable(),
+ PP.getBuiltinInfo());
+ Context->InitBuiltinTypes(getTarget(), getAuxTarget());
+ setASTContext(Context);
+}
+
+// ExternalASTSource
+
+void CompilerInstance::createPCHExternalASTSource(
+ StringRef Path, bool DisablePCHValidation, bool AllowPCHWithCompilerErrors,
+ void *DeserializationListener, bool OwnDeserializationListener) {
+ bool Preamble = getPreprocessorOpts().PrecompiledPreambleBytes.first != 0;
+ ModuleManager = createPCHExternalASTSource(
+ Path, getHeaderSearchOpts().Sysroot, DisablePCHValidation,
+ AllowPCHWithCompilerErrors, getPreprocessor(), getASTContext(),
+ getPCHContainerReader(),
+ getFrontendOpts().ModuleFileExtensions,
+ DeserializationListener,
+ OwnDeserializationListener, Preamble,
+ getFrontendOpts().UseGlobalModuleIndex);
+}
+
+IntrusiveRefCntPtr<ASTReader> CompilerInstance::createPCHExternalASTSource(
+ StringRef Path, StringRef Sysroot, bool DisablePCHValidation,
+ bool AllowPCHWithCompilerErrors, Preprocessor &PP, ASTContext &Context,
+ const PCHContainerReader &PCHContainerRdr,
+ ArrayRef<IntrusiveRefCntPtr<ModuleFileExtension>> Extensions,
+ void *DeserializationListener, bool OwnDeserializationListener,
+ bool Preamble, bool UseGlobalModuleIndex) {
+ HeaderSearchOptions &HSOpts = PP.getHeaderSearchInfo().getHeaderSearchOpts();
+
+ IntrusiveRefCntPtr<ASTReader> Reader(new ASTReader(
+ PP, Context, PCHContainerRdr, Extensions,
+ Sysroot.empty() ? "" : Sysroot.data(), DisablePCHValidation,
+ AllowPCHWithCompilerErrors, /*AllowConfigurationMismatch*/ false,
+ HSOpts.ModulesValidateSystemHeaders, UseGlobalModuleIndex));
+
+ // We need the external source to be set up before we read the AST, because
+ // eagerly-deserialized declarations may use it.
+ Context.setExternalSource(Reader.get());
+
+ Reader->setDeserializationListener(
+ static_cast<ASTDeserializationListener *>(DeserializationListener),
+ /*TakeOwnership=*/OwnDeserializationListener);
+ switch (Reader->ReadAST(Path,
+ Preamble ? serialization::MK_Preamble
+ : serialization::MK_PCH,
+ SourceLocation(),
+ ASTReader::ARR_None)) {
+ case ASTReader::Success:
+ // Set the predefines buffer as suggested by the PCH reader. Typically, the
+ // predefines buffer will be empty.
+ PP.setPredefines(Reader->getSuggestedPredefines());
+ return Reader;
+
+ case ASTReader::Failure:
+ // Unrecoverable failure: don't even try to process the input file.
+ break;
+
+ case ASTReader::Missing:
+ case ASTReader::OutOfDate:
+ case ASTReader::VersionMismatch:
+ case ASTReader::ConfigurationMismatch:
+ case ASTReader::HadErrors:
+ // No suitable PCH file could be found. Return an error.
+ break;
+ }
+
+ Context.setExternalSource(nullptr);
+ return nullptr;
+}
+
+// Code Completion
+
+static bool EnableCodeCompletion(Preprocessor &PP,
+ const std::string &Filename,
+ unsigned Line,
+ unsigned Column) {
+ // Tell the source manager to chop off the given file at a specific
+ // line and column.
+ const FileEntry *Entry = PP.getFileManager().getFile(Filename);
+ if (!Entry) {
+ PP.getDiagnostics().Report(diag::err_fe_invalid_code_complete_file)
+ << Filename;
+ return true;
+ }
+
+ // Truncate the named file at the given line/column.
+ PP.SetCodeCompletionPoint(Entry, Line, Column);
+ return false;
+}
+
+void CompilerInstance::createCodeCompletionConsumer() {
+ const ParsedSourceLocation &Loc = getFrontendOpts().CodeCompletionAt;
+ if (!CompletionConsumer) {
+ setCodeCompletionConsumer(
+ createCodeCompletionConsumer(getPreprocessor(),
+ Loc.FileName, Loc.Line, Loc.Column,
+ getFrontendOpts().CodeCompleteOpts,
+ llvm::outs()));
+ if (!CompletionConsumer)
+ return;
+ } else if (EnableCodeCompletion(getPreprocessor(), Loc.FileName,
+ Loc.Line, Loc.Column)) {
+ setCodeCompletionConsumer(nullptr);
+ return;
+ }
+
+ if (CompletionConsumer->isOutputBinary() &&
+ llvm::sys::ChangeStdoutToBinary()) {
+ getPreprocessor().getDiagnostics().Report(diag::err_fe_stdout_binary);
+ setCodeCompletionConsumer(nullptr);
+ }
+}
+
+void CompilerInstance::createFrontendTimer() {
+ FrontendTimerGroup.reset(new llvm::TimerGroup("Clang front-end time report"));
+ FrontendTimer.reset(
+ new llvm::Timer("Clang front-end timer", *FrontendTimerGroup));
+}
+
+CodeCompleteConsumer *
+CompilerInstance::createCodeCompletionConsumer(Preprocessor &PP,
+ StringRef Filename,
+ unsigned Line,
+ unsigned Column,
+ const CodeCompleteOptions &Opts,
+ raw_ostream &OS) {
+ if (EnableCodeCompletion(PP, Filename, Line, Column))
+ return nullptr;
+
+ // Set up the creation routine for code-completion.
+ return new PrintingCodeCompleteConsumer(Opts, OS);
+}
+
+void CompilerInstance::createSema(TranslationUnitKind TUKind,
+ CodeCompleteConsumer *CompletionConsumer) {
+ TheSema.reset(new Sema(getPreprocessor(), getASTContext(), getASTConsumer(),
+ TUKind, CompletionConsumer));
+}
+
+// Output Files
+
+void CompilerInstance::addOutputFile(OutputFile &&OutFile) {
+ assert(OutFile.OS && "Attempt to add empty stream to output list!");
+ OutputFiles.push_back(std::move(OutFile));
+}
+
+void CompilerInstance::clearOutputFiles(bool EraseFiles) {
+ for (OutputFile &OF : OutputFiles) {
+ // Manually close the stream before we rename it.
+ OF.OS.reset();
+
+ if (!OF.TempFilename.empty()) {
+ if (EraseFiles) {
+ llvm::sys::fs::remove(OF.TempFilename);
+ } else {
+ SmallString<128> NewOutFile(OF.Filename);
+
+ // If '-working-directory' was passed, the output filename should be
+ // relative to that.
+ FileMgr->FixupRelativePath(NewOutFile);
+ if (std::error_code ec =
+ llvm::sys::fs::rename(OF.TempFilename, NewOutFile)) {
+ getDiagnostics().Report(diag::err_unable_to_rename_temp)
+ << OF.TempFilename << OF.Filename << ec.message();
+
+ llvm::sys::fs::remove(OF.TempFilename);
+ }
+ }
+ } else if (!OF.Filename.empty() && EraseFiles)
+ llvm::sys::fs::remove(OF.Filename);
+
+ }
+ OutputFiles.clear();
+ NonSeekStream.reset();
+}
+
+raw_pwrite_stream *
+CompilerInstance::createDefaultOutputFile(bool Binary, StringRef InFile,
+ StringRef Extension) {
+ return createOutputFile(getFrontendOpts().OutputFile, Binary,
+ /*RemoveFileOnSignal=*/true, InFile, Extension,
+ /*UseTemporary=*/true);
+}
+
+llvm::raw_null_ostream *CompilerInstance::createNullOutputFile() {
+ auto OS = llvm::make_unique<llvm::raw_null_ostream>();
+ llvm::raw_null_ostream *Ret = OS.get();
+ addOutputFile(OutputFile("", "", std::move(OS)));
+ return Ret;
+}
+
+raw_pwrite_stream *
+CompilerInstance::createOutputFile(StringRef OutputPath, bool Binary,
+ bool RemoveFileOnSignal, StringRef InFile,
+ StringRef Extension, bool UseTemporary,
+ bool CreateMissingDirectories) {
+ std::string OutputPathName, TempPathName;
+ std::error_code EC;
+ std::unique_ptr<raw_pwrite_stream> OS = createOutputFile(
+ OutputPath, EC, Binary, RemoveFileOnSignal, InFile, Extension,
+ UseTemporary, CreateMissingDirectories, &OutputPathName, &TempPathName);
+ if (!OS) {
+ getDiagnostics().Report(diag::err_fe_unable_to_open_output) << OutputPath
+ << EC.message();
+ return nullptr;
+ }
+
+ raw_pwrite_stream *Ret = OS.get();
+ // Add the output file -- but don't try to remove "-", since this means we are
+ // using stdin.
+ addOutputFile(OutputFile((OutputPathName != "-") ? OutputPathName : "",
+ TempPathName, std::move(OS)));
+
+ return Ret;
+}
+
+std::unique_ptr<llvm::raw_pwrite_stream> CompilerInstance::createOutputFile(
+ StringRef OutputPath, std::error_code &Error, bool Binary,
+ bool RemoveFileOnSignal, StringRef InFile, StringRef Extension,
+ bool UseTemporary, bool CreateMissingDirectories,
+ std::string *ResultPathName, std::string *TempPathName) {
+ assert((!CreateMissingDirectories || UseTemporary) &&
+ "CreateMissingDirectories is only allowed when using temporary files");
+
+ std::string OutFile, TempFile;
+ if (!OutputPath.empty()) {
+ OutFile = OutputPath;
+ } else if (InFile == "-") {
+ OutFile = "-";
+ } else if (!Extension.empty()) {
+ SmallString<128> Path(InFile);
+ llvm::sys::path::replace_extension(Path, Extension);
+ OutFile = Path.str();
+ } else {
+ OutFile = "-";
+ }
+
+ std::unique_ptr<llvm::raw_fd_ostream> OS;
+ std::string OSFile;
+
+ if (UseTemporary) {
+ if (OutFile == "-")
+ UseTemporary = false;
+ else {
+ llvm::sys::fs::file_status Status;
+ llvm::sys::fs::status(OutputPath, Status);
+ if (llvm::sys::fs::exists(Status)) {
+ // Fail early if we can't write to the final destination.
+ if (!llvm::sys::fs::can_write(OutputPath)) {
+ Error = make_error_code(llvm::errc::operation_not_permitted);
+ return nullptr;
+ }
+
+ // Don't use a temporary if the output is a special file. This handles
+ // things like '-o /dev/null'
+ if (!llvm::sys::fs::is_regular_file(Status))
+ UseTemporary = false;
+ }
+ }
+ }
+
+ if (UseTemporary) {
+ // Create a temporary file.
+ SmallString<128> TempPath;
+ TempPath = OutFile;
+ TempPath += "-%%%%%%%%";
+ int fd;
+ std::error_code EC =
+ llvm::sys::fs::createUniqueFile(TempPath, fd, TempPath);
+
+ if (CreateMissingDirectories &&
+ EC == llvm::errc::no_such_file_or_directory) {
+ StringRef Parent = llvm::sys::path::parent_path(OutputPath);
+ EC = llvm::sys::fs::create_directories(Parent);
+ if (!EC) {
+ EC = llvm::sys::fs::createUniqueFile(TempPath, fd, TempPath);
+ }
+ }
+
+ if (!EC) {
+ OS.reset(new llvm::raw_fd_ostream(fd, /*shouldClose=*/true));
+ OSFile = TempFile = TempPath.str();
+ }
+ // If we failed to create the temporary, fallback to writing to the file
+ // directly. This handles the corner case where we cannot write to the
+ // directory, but can write to the file.
+ }
+
+ if (!OS) {
+ OSFile = OutFile;
+ OS.reset(new llvm::raw_fd_ostream(
+ OSFile, Error,
+ (Binary ? llvm::sys::fs::F_None : llvm::sys::fs::F_Text)));
+ if (Error)
+ return nullptr;
+ }
+
+ // Make sure the out stream file gets removed if we crash.
+ if (RemoveFileOnSignal)
+ llvm::sys::RemoveFileOnSignal(OSFile);
+
+ if (ResultPathName)
+ *ResultPathName = OutFile;
+ if (TempPathName)
+ *TempPathName = TempFile;
+
+ if (!Binary || OS->supportsSeeking())
+ return std::move(OS);
+
+ auto B = llvm::make_unique<llvm::buffer_ostream>(*OS);
+ assert(!NonSeekStream);
+ NonSeekStream = std::move(OS);
+ return std::move(B);
+}
+
+// Initialization Utilities
+
+bool CompilerInstance::InitializeSourceManager(const FrontendInputFile &Input){
+ return InitializeSourceManager(Input, getDiagnostics(),
+ getFileManager(), getSourceManager(),
+ getFrontendOpts());
+}
+
+bool CompilerInstance::InitializeSourceManager(const FrontendInputFile &Input,
+ DiagnosticsEngine &Diags,
+ FileManager &FileMgr,
+ SourceManager &SourceMgr,
+ const FrontendOptions &Opts) {
+ SrcMgr::CharacteristicKind
+ Kind = Input.isSystem() ? SrcMgr::C_System : SrcMgr::C_User;
+
+ if (Input.isBuffer()) {
+ SourceMgr.setMainFileID(SourceMgr.createFileID(
+ std::unique_ptr<llvm::MemoryBuffer>(Input.getBuffer()), Kind));
+ assert(SourceMgr.getMainFileID().isValid() &&
+ "Couldn't establish MainFileID!");
+ return true;
+ }
+
+ StringRef InputFile = Input.getFile();
+
+ // Figure out where to get and map in the main file.
+ if (InputFile != "-") {
+ const FileEntry *File = FileMgr.getFile(InputFile, /*OpenFile=*/true);
+ if (!File) {
+ Diags.Report(diag::err_fe_error_reading) << InputFile;
+ return false;
+ }
+
+ // The natural SourceManager infrastructure can't currently handle named
+ // pipes, but we would at least like to accept them for the main
+ // file. Detect them here, read them with the volatile flag so FileMgr will
+ // pick up the correct size, and simply override their contents as we do for
+ // STDIN.
+ if (File->isNamedPipe()) {
+ auto MB = FileMgr.getBufferForFile(File, /*isVolatile=*/true);
+ if (MB) {
+ // Create a new virtual file that will have the correct size.
+ File = FileMgr.getVirtualFile(InputFile, (*MB)->getBufferSize(), 0);
+ SourceMgr.overrideFileContents(File, std::move(*MB));
+ } else {
+ Diags.Report(diag::err_cannot_open_file) << InputFile
+ << MB.getError().message();
+ return false;
+ }
+ }
+
+ SourceMgr.setMainFileID(
+ SourceMgr.createFileID(File, SourceLocation(), Kind));
+ } else {
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> SBOrErr =
+ llvm::MemoryBuffer::getSTDIN();
+ if (std::error_code EC = SBOrErr.getError()) {
+ Diags.Report(diag::err_fe_error_reading_stdin) << EC.message();
+ return false;
+ }
+ std::unique_ptr<llvm::MemoryBuffer> SB = std::move(SBOrErr.get());
+
+ const FileEntry *File = FileMgr.getVirtualFile(SB->getBufferIdentifier(),
+ SB->getBufferSize(), 0);
+ SourceMgr.setMainFileID(
+ SourceMgr.createFileID(File, SourceLocation(), Kind));
+ SourceMgr.overrideFileContents(File, std::move(SB));
+ }
+
+ assert(SourceMgr.getMainFileID().isValid() &&
+ "Couldn't establish MainFileID!");
+ return true;
+}
+
+// High-Level Operations
+
+bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
+ assert(hasDiagnostics() && "Diagnostics engine is not initialized!");
+ assert(!getFrontendOpts().ShowHelp && "Client must handle '-help'!");
+ assert(!getFrontendOpts().ShowVersion && "Client must handle '-version'!");
+
+ // FIXME: Take this as an argument, once all the APIs we used have moved to
+ // taking it as an input instead of hard-coding llvm::errs.
+ raw_ostream &OS = llvm::errs();
+
+ // Create the target instance.
+ setTarget(TargetInfo::CreateTargetInfo(getDiagnostics(),
+ getInvocation().TargetOpts));
+ if (!hasTarget())
+ return false;
+
+ // Create TargetInfo for the other side of CUDA compilation.
+ if (getLangOpts().CUDA && !getFrontendOpts().AuxTriple.empty()) {
+ std::shared_ptr<TargetOptions> TO(new TargetOptions);
+ TO->Triple = getFrontendOpts().AuxTriple;
+ setAuxTarget(TargetInfo::CreateTargetInfo(getDiagnostics(), TO));
+ }
+
+ // Inform the target of the language options.
+ //
+ // FIXME: We shouldn't need to do this, the target should be immutable once
+ // created. This complexity should be lifted elsewhere.
+ getTarget().adjust(getLangOpts());
+
+ // rewriter project will change target built-in bool type from its default.
+ if (getFrontendOpts().ProgramAction == frontend::RewriteObjC)
+ getTarget().noSignedCharForObjCBool();
+
+ // Validate/process some options.
+ if (getHeaderSearchOpts().Verbose)
+ OS << "clang -cc1 version " CLANG_VERSION_STRING
+ << " based upon " << BACKEND_PACKAGE_STRING
+ << " default target " << llvm::sys::getDefaultTargetTriple() << "\n";
+
+ if (getFrontendOpts().ShowTimers)
+ createFrontendTimer();
+
+ if (getFrontendOpts().ShowStats)
+ llvm::EnableStatistics();
+
+ for (const FrontendInputFile &FIF : getFrontendOpts().Inputs) {
+ // Reset the ID tables if we are reusing the SourceManager and parsing
+ // regular files.
+ if (hasSourceManager() && !Act.isModelParsingAction())
+ getSourceManager().clearIDTables();
+
+ if (Act.BeginSourceFile(*this, FIF)) {
+ Act.Execute();
+ Act.EndSourceFile();
+ }
+ }
+
+ // Notify the diagnostic client that all files were processed.
+ getDiagnostics().getClient()->finish();
+
+ if (getDiagnosticOpts().ShowCarets) {
+ // We can have multiple diagnostics sharing one diagnostic client.
+ // Get the total number of warnings/errors from the client.
+ unsigned NumWarnings = getDiagnostics().getClient()->getNumWarnings();
+ unsigned NumErrors = getDiagnostics().getClient()->getNumErrors();
+
+ if (NumWarnings)
+ OS << NumWarnings << " warning" << (NumWarnings == 1 ? "" : "s");
+ if (NumWarnings && NumErrors)
+ OS << " and ";
+ if (NumErrors)
+ OS << NumErrors << " error" << (NumErrors == 1 ? "" : "s");
+ if (NumWarnings || NumErrors)
+ OS << " generated.\n";
+ }
+
+ if (getFrontendOpts().ShowStats && hasFileManager()) {
+ getFileManager().PrintStats();
+ OS << "\n";
+ }
+
+ return !getDiagnostics().getClient()->getNumErrors();
+}
+
+/// \brief Determine the appropriate source input kind based on language
+/// options.
+static InputKind getSourceInputKindFromOptions(const LangOptions &LangOpts) {
+ if (LangOpts.OpenCL)
+ return IK_OpenCL;
+ if (LangOpts.CUDA)
+ return IK_CUDA;
+ if (LangOpts.ObjC1)
+ return LangOpts.CPlusPlus? IK_ObjCXX : IK_ObjC;
+ return LangOpts.CPlusPlus? IK_CXX : IK_C;
+}
+
+/// \brief Compile a module file for the given module, using the options
+/// provided by the importing compiler instance. Returns true if the module
+/// was built without errors.
+static bool compileModuleImpl(CompilerInstance &ImportingInstance,
+ SourceLocation ImportLoc,
+ Module *Module,
+ StringRef ModuleFileName) {
+ ModuleMap &ModMap
+ = ImportingInstance.getPreprocessor().getHeaderSearchInfo().getModuleMap();
+
+ // Construct a compiler invocation for creating this module.
+ IntrusiveRefCntPtr<CompilerInvocation> Invocation
+ (new CompilerInvocation(ImportingInstance.getInvocation()));
+
+ PreprocessorOptions &PPOpts = Invocation->getPreprocessorOpts();
+
+ // For any options that aren't intended to affect how a module is built,
+ // reset them to their default values.
+ Invocation->getLangOpts()->resetNonModularOptions();
+ PPOpts.resetNonModularOptions();
+
+ // Remove any macro definitions that are explicitly ignored by the module.
+ // They aren't supposed to affect how the module is built anyway.
+ const HeaderSearchOptions &HSOpts = Invocation->getHeaderSearchOpts();
+ PPOpts.Macros.erase(
+ std::remove_if(PPOpts.Macros.begin(), PPOpts.Macros.end(),
+ [&HSOpts](const std::pair<std::string, bool> &def) {
+ StringRef MacroDef = def.first;
+ return HSOpts.ModulesIgnoreMacros.count(MacroDef.split('=').first) > 0;
+ }),
+ PPOpts.Macros.end());
+
+ // Note the name of the module we're building.
+ Invocation->getLangOpts()->CurrentModule = Module->getTopLevelModuleName();
+
+ // Make sure that the failed-module structure has been allocated in
+ // the importing instance, and propagate the pointer to the newly-created
+ // instance.
+ PreprocessorOptions &ImportingPPOpts
+ = ImportingInstance.getInvocation().getPreprocessorOpts();
+ if (!ImportingPPOpts.FailedModules)
+ ImportingPPOpts.FailedModules = new PreprocessorOptions::FailedModulesSet;
+ PPOpts.FailedModules = ImportingPPOpts.FailedModules;
+
+ // If there is a module map file, build the module using the module map.
+ // Set up the inputs/outputs so that we build the module from its umbrella
+ // header.
+ FrontendOptions &FrontendOpts = Invocation->getFrontendOpts();
+ FrontendOpts.OutputFile = ModuleFileName.str();
+ FrontendOpts.DisableFree = false;
+ FrontendOpts.GenerateGlobalModuleIndex = false;
+ FrontendOpts.BuildingImplicitModule = true;
+ FrontendOpts.Inputs.clear();
+ InputKind IK = getSourceInputKindFromOptions(*Invocation->getLangOpts());
+
+ // Don't free the remapped file buffers; they are owned by our caller.
+ PPOpts.RetainRemappedFileBuffers = true;
+
+ Invocation->getDiagnosticOpts().VerifyDiagnostics = 0;
+ assert(ImportingInstance.getInvocation().getModuleHash() ==
+ Invocation->getModuleHash() && "Module hash mismatch!");
+
+ // Construct a compiler instance that will be used to actually create the
+ // module.
+ CompilerInstance Instance(ImportingInstance.getPCHContainerOperations(),
+ /*BuildingModule=*/true);
+ Instance.setInvocation(&*Invocation);
+
+ Instance.createDiagnostics(new ForwardingDiagnosticConsumer(
+ ImportingInstance.getDiagnosticClient()),
+ /*ShouldOwnClient=*/true);
+
+ Instance.setVirtualFileSystem(&ImportingInstance.getVirtualFileSystem());
+
+ // Note that this module is part of the module build stack, so that we
+ // can detect cycles in the module graph.
+ Instance.setFileManager(&ImportingInstance.getFileManager());
+ Instance.createSourceManager(Instance.getFileManager());
+ SourceManager &SourceMgr = Instance.getSourceManager();
+ SourceMgr.setModuleBuildStack(
+ ImportingInstance.getSourceManager().getModuleBuildStack());
+ SourceMgr.pushModuleBuildStack(Module->getTopLevelModuleName(),
+ FullSourceLoc(ImportLoc, ImportingInstance.getSourceManager()));
+
+ // If we're collecting module dependencies, we need to share a collector
+ // between all of the module CompilerInstances. Other than that, we don't
+ // want to produce any dependency output from the module build.
+ Instance.setModuleDepCollector(ImportingInstance.getModuleDepCollector());
+ Invocation->getDependencyOutputOpts() = DependencyOutputOptions();
+
+ // Get or create the module map that we'll use to build this module.
+ std::string InferredModuleMapContent;
+ if (const FileEntry *ModuleMapFile =
+ ModMap.getContainingModuleMapFile(Module)) {
+ // Use the module map where this module resides.
+ FrontendOpts.Inputs.emplace_back(ModuleMapFile->getName(), IK);
+ } else {
+ SmallString<128> FakeModuleMapFile(Module->Directory->getName());
+ llvm::sys::path::append(FakeModuleMapFile, "__inferred_module.map");
+ FrontendOpts.Inputs.emplace_back(FakeModuleMapFile, IK);
+
+ llvm::raw_string_ostream OS(InferredModuleMapContent);
+ Module->print(OS);
+ OS.flush();
+
+ std::unique_ptr<llvm::MemoryBuffer> ModuleMapBuffer =
+ llvm::MemoryBuffer::getMemBuffer(InferredModuleMapContent);
+ ModuleMapFile = Instance.getFileManager().getVirtualFile(
+ FakeModuleMapFile, InferredModuleMapContent.size(), 0);
+ SourceMgr.overrideFileContents(ModuleMapFile, std::move(ModuleMapBuffer));
+ }
+
+ // Construct a module-generating action. Passing through the module map is
+ // safe because the FileManager is shared between the compiler instances.
+ GenerateModuleAction CreateModuleAction(
+ ModMap.getModuleMapFileForUniquing(Module), Module->IsSystem);
+
+ ImportingInstance.getDiagnostics().Report(ImportLoc,
+ diag::remark_module_build)
+ << Module->Name << ModuleFileName;
+
+ // Execute the action to actually build the module in-place. Use a separate
+ // thread so that we get a stack large enough.
+ const unsigned ThreadStackSize = 8 << 20;
+ llvm::CrashRecoveryContext CRC;
+ CRC.RunSafelyOnThread([&]() { Instance.ExecuteAction(CreateModuleAction); },
+ ThreadStackSize);
+
+ ImportingInstance.getDiagnostics().Report(ImportLoc,
+ diag::remark_module_build_done)
+ << Module->Name;
+
+ // Delete the temporary module map file.
+ // FIXME: Even though we're executing under crash protection, it would still
+ // be nice to do this with RemoveFileOnSignal when we can. However, that
+ // doesn't make sense for all clients, so clean this up manually.
+ Instance.clearOutputFiles(/*EraseFiles=*/true);
+
+ // We've rebuilt a module. If we're allowed to generate or update the global
+ // module index, record that fact in the importing compiler instance.
+ if (ImportingInstance.getFrontendOpts().GenerateGlobalModuleIndex) {
+ ImportingInstance.setBuildGlobalModuleIndex(true);
+ }
+
+ return !Instance.getDiagnostics().hasErrorOccurred();
+}
+
+static bool compileAndLoadModule(CompilerInstance &ImportingInstance,
+ SourceLocation ImportLoc,
+ SourceLocation ModuleNameLoc, Module *Module,
+ StringRef ModuleFileName) {
+ DiagnosticsEngine &Diags = ImportingInstance.getDiagnostics();
+
+ auto diagnoseBuildFailure = [&] {
+ Diags.Report(ModuleNameLoc, diag::err_module_not_built)
+ << Module->Name << SourceRange(ImportLoc, ModuleNameLoc);
+ };
+
+ // FIXME: have LockFileManager return an error_code so that we can
+ // avoid the mkdir when the directory already exists.
+ StringRef Dir = llvm::sys::path::parent_path(ModuleFileName);
+ llvm::sys::fs::create_directories(Dir);
+
+ while (1) {
+ unsigned ModuleLoadCapabilities = ASTReader::ARR_Missing;
+ llvm::LockFileManager Locked(ModuleFileName);
+ switch (Locked) {
+ case llvm::LockFileManager::LFS_Error:
+ Diags.Report(ModuleNameLoc, diag::err_module_lock_failure)
+ << Module->Name;
+ return false;
+
+ case llvm::LockFileManager::LFS_Owned:
+ // We're responsible for building the module ourselves.
+ if (!compileModuleImpl(ImportingInstance, ModuleNameLoc, Module,
+ ModuleFileName)) {
+ diagnoseBuildFailure();
+ return false;
+ }
+ break;
+
+ case llvm::LockFileManager::LFS_Shared:
+ // Someone else is responsible for building the module. Wait for them to
+ // finish.
+ switch (Locked.waitForUnlock()) {
+ case llvm::LockFileManager::Res_Success:
+ ModuleLoadCapabilities |= ASTReader::ARR_OutOfDate;
+ break;
+ case llvm::LockFileManager::Res_OwnerDied:
+ continue; // try again to get the lock.
+ case llvm::LockFileManager::Res_Timeout:
+ Diags.Report(ModuleNameLoc, diag::err_module_lock_timeout)
+ << Module->Name;
+ // Clear the lock file so that future invokations can make progress.
+ Locked.unsafeRemoveLockFile();
+ return false;
+ }
+ break;
+ }
+
+ // Try to read the module file, now that we've compiled it.
+ ASTReader::ASTReadResult ReadResult =
+ ImportingInstance.getModuleManager()->ReadAST(
+ ModuleFileName, serialization::MK_ImplicitModule, ImportLoc,
+ ModuleLoadCapabilities);
+
+ if (ReadResult == ASTReader::OutOfDate &&
+ Locked == llvm::LockFileManager::LFS_Shared) {
+ // The module may be out of date in the presence of file system races,
+ // or if one of its imports depends on header search paths that are not
+ // consistent with this ImportingInstance. Try again...
+ continue;
+ } else if (ReadResult == ASTReader::Missing) {
+ diagnoseBuildFailure();
+ } else if (ReadResult != ASTReader::Success &&
+ !Diags.hasErrorOccurred()) {
+ // The ASTReader didn't diagnose the error, so conservatively report it.
+ diagnoseBuildFailure();
+ }
+ return ReadResult == ASTReader::Success;
+ }
+}
+
+/// \brief Diagnose differences between the current definition of the given
+/// configuration macro and the definition provided on the command line.
+static void checkConfigMacro(Preprocessor &PP, StringRef ConfigMacro,
+ Module *Mod, SourceLocation ImportLoc) {
+ IdentifierInfo *Id = PP.getIdentifierInfo(ConfigMacro);
+ SourceManager &SourceMgr = PP.getSourceManager();
+
+ // If this identifier has never had a macro definition, then it could
+ // not have changed.
+ if (!Id->hadMacroDefinition())
+ return;
+ auto *LatestLocalMD = PP.getLocalMacroDirectiveHistory(Id);
+
+ // Find the macro definition from the command line.
+ MacroInfo *CmdLineDefinition = nullptr;
+ for (auto *MD = LatestLocalMD; MD; MD = MD->getPrevious()) {
+ // We only care about the predefines buffer.
+ FileID FID = SourceMgr.getFileID(MD->getLocation());
+ if (FID.isInvalid() || FID != PP.getPredefinesFileID())
+ continue;
+ if (auto *DMD = dyn_cast<DefMacroDirective>(MD))
+ CmdLineDefinition = DMD->getMacroInfo();
+ break;
+ }
+
+ auto *CurrentDefinition = PP.getMacroInfo(Id);
+ if (CurrentDefinition == CmdLineDefinition) {
+ // Macro matches. Nothing to do.
+ } else if (!CurrentDefinition) {
+ // This macro was defined on the command line, then #undef'd later.
+ // Complain.
+ PP.Diag(ImportLoc, diag::warn_module_config_macro_undef)
+ << true << ConfigMacro << Mod->getFullModuleName();
+ auto LatestDef = LatestLocalMD->getDefinition();
+ assert(LatestDef.isUndefined() &&
+ "predefined macro went away with no #undef?");
+ PP.Diag(LatestDef.getUndefLocation(), diag::note_module_def_undef_here)
+ << true;
+ return;
+ } else if (!CmdLineDefinition) {
+ // There was no definition for this macro in the predefines buffer,
+ // but there was a local definition. Complain.
+ PP.Diag(ImportLoc, diag::warn_module_config_macro_undef)
+ << false << ConfigMacro << Mod->getFullModuleName();
+ PP.Diag(CurrentDefinition->getDefinitionLoc(),
+ diag::note_module_def_undef_here)
+ << false;
+ } else if (!CurrentDefinition->isIdenticalTo(*CmdLineDefinition, PP,
+ /*Syntactically=*/true)) {
+ // The macro definitions differ.
+ PP.Diag(ImportLoc, diag::warn_module_config_macro_undef)
+ << false << ConfigMacro << Mod->getFullModuleName();
+ PP.Diag(CurrentDefinition->getDefinitionLoc(),
+ diag::note_module_def_undef_here)
+ << false;
+ }
+}
+
+/// \brief Write a new timestamp file with the given path.
+static void writeTimestampFile(StringRef TimestampFile) {
+ std::error_code EC;
+ llvm::raw_fd_ostream Out(TimestampFile.str(), EC, llvm::sys::fs::F_None);
+}
+
+/// \brief Prune the module cache of modules that haven't been accessed in
+/// a long time.
+static void pruneModuleCache(const HeaderSearchOptions &HSOpts) {
+ struct stat StatBuf;
+ llvm::SmallString<128> TimestampFile;
+ TimestampFile = HSOpts.ModuleCachePath;
+ assert(!TimestampFile.empty());
+ llvm::sys::path::append(TimestampFile, "modules.timestamp");
+
+ // Try to stat() the timestamp file.
+ if (::stat(TimestampFile.c_str(), &StatBuf)) {
+ // If the timestamp file wasn't there, create one now.
+ if (errno == ENOENT) {
+ writeTimestampFile(TimestampFile);
+ }
+ return;
+ }
+
+ // Check whether the time stamp is older than our pruning interval.
+ // If not, do nothing.
+ time_t TimeStampModTime = StatBuf.st_mtime;
+ time_t CurrentTime = time(nullptr);
+ if (CurrentTime - TimeStampModTime <= time_t(HSOpts.ModuleCachePruneInterval))
+ return;
+
+ // Write a new timestamp file so that nobody else attempts to prune.
+ // There is a benign race condition here, if two Clang instances happen to
+ // notice at the same time that the timestamp is out-of-date.
+ writeTimestampFile(TimestampFile);
+
+ // Walk the entire module cache, looking for unused module files and module
+ // indices.
+ std::error_code EC;
+ SmallString<128> ModuleCachePathNative;
+ llvm::sys::path::native(HSOpts.ModuleCachePath, ModuleCachePathNative);
+ for (llvm::sys::fs::directory_iterator Dir(ModuleCachePathNative, EC), DirEnd;
+ Dir != DirEnd && !EC; Dir.increment(EC)) {
+ // If we don't have a directory, there's nothing to look into.
+ if (!llvm::sys::fs::is_directory(Dir->path()))
+ continue;
+
+ // Walk all of the files within this directory.
+ for (llvm::sys::fs::directory_iterator File(Dir->path(), EC), FileEnd;
+ File != FileEnd && !EC; File.increment(EC)) {
+ // We only care about module and global module index files.
+ StringRef Extension = llvm::sys::path::extension(File->path());
+ if (Extension != ".pcm" && Extension != ".timestamp" &&
+ llvm::sys::path::filename(File->path()) != "modules.idx")
+ continue;
+
+ // Look at this file. If we can't stat it, there's nothing interesting
+ // there.
+ if (::stat(File->path().c_str(), &StatBuf))
+ continue;
+
+ // If the file has been used recently enough, leave it there.
+ time_t FileAccessTime = StatBuf.st_atime;
+ if (CurrentTime - FileAccessTime <=
+ time_t(HSOpts.ModuleCachePruneAfter)) {
+ continue;
+ }
+
+ // Remove the file.
+ llvm::sys::fs::remove(File->path());
+
+ // Remove the timestamp file.
+ std::string TimpestampFilename = File->path() + ".timestamp";
+ llvm::sys::fs::remove(TimpestampFilename);
+ }
+
+ // If we removed all of the files in the directory, remove the directory
+ // itself.
+ if (llvm::sys::fs::directory_iterator(Dir->path(), EC) ==
+ llvm::sys::fs::directory_iterator() && !EC)
+ llvm::sys::fs::remove(Dir->path());
+ }
+}
+
+void CompilerInstance::createModuleManager() {
+ if (!ModuleManager) {
+ if (!hasASTContext())
+ createASTContext();
+
+ // If we're implicitly building modules but not currently recursively
+ // building a module, check whether we need to prune the module cache.
+ if (getSourceManager().getModuleBuildStack().empty() &&
+ !getPreprocessor().getHeaderSearchInfo().getModuleCachePath().empty() &&
+ getHeaderSearchOpts().ModuleCachePruneInterval > 0 &&
+ getHeaderSearchOpts().ModuleCachePruneAfter > 0) {
+ pruneModuleCache(getHeaderSearchOpts());
+ }
+
+ HeaderSearchOptions &HSOpts = getHeaderSearchOpts();
+ std::string Sysroot = HSOpts.Sysroot;
+ const PreprocessorOptions &PPOpts = getPreprocessorOpts();
+ std::unique_ptr<llvm::Timer> ReadTimer;
+ if (FrontendTimerGroup)
+ ReadTimer = llvm::make_unique<llvm::Timer>("Reading modules",
+ *FrontendTimerGroup);
+ ModuleManager = new ASTReader(
+ getPreprocessor(), getASTContext(), getPCHContainerReader(),
+ getFrontendOpts().ModuleFileExtensions,
+ Sysroot.empty() ? "" : Sysroot.c_str(), PPOpts.DisablePCHValidation,
+ /*AllowASTWithCompilerErrors=*/false,
+ /*AllowConfigurationMismatch=*/false,
+ HSOpts.ModulesValidateSystemHeaders,
+ getFrontendOpts().UseGlobalModuleIndex,
+ std::move(ReadTimer));
+ if (hasASTConsumer()) {
+ ModuleManager->setDeserializationListener(
+ getASTConsumer().GetASTDeserializationListener());
+ getASTContext().setASTMutationListener(
+ getASTConsumer().GetASTMutationListener());
+ }
+ getASTContext().setExternalSource(ModuleManager);
+ if (hasSema())
+ ModuleManager->InitializeSema(getSema());
+ if (hasASTConsumer())
+ ModuleManager->StartTranslationUnit(&getASTConsumer());
+
+ if (TheDependencyFileGenerator)
+ TheDependencyFileGenerator->AttachToASTReader(*ModuleManager);
+ if (ModuleDepCollector)
+ ModuleDepCollector->attachToASTReader(*ModuleManager);
+ for (auto &Listener : DependencyCollectors)
+ Listener->attachToASTReader(*ModuleManager);
+ }
+}
+
+bool CompilerInstance::loadModuleFile(StringRef FileName) {
+ llvm::Timer Timer;
+ if (FrontendTimerGroup)
+ Timer.init("Preloading " + FileName.str(), *FrontendTimerGroup);
+ llvm::TimeRegion TimeLoading(FrontendTimerGroup ? &Timer : nullptr);
+
+ // Helper to recursively read the module names for all modules we're adding.
+ // We mark these as known and redirect any attempt to load that module to
+ // the files we were handed.
+ struct ReadModuleNames : ASTReaderListener {
+ CompilerInstance &CI;
+ llvm::SmallVector<IdentifierInfo*, 8> LoadedModules;
+
+ ReadModuleNames(CompilerInstance &CI) : CI(CI) {}
+
+ void ReadModuleName(StringRef ModuleName) override {
+ LoadedModules.push_back(
+ CI.getPreprocessor().getIdentifierInfo(ModuleName));
+ }
+
+ void registerAll() {
+ for (auto *II : LoadedModules) {
+ CI.KnownModules[II] = CI.getPreprocessor()
+ .getHeaderSearchInfo()
+ .getModuleMap()
+ .findModule(II->getName());
+ }
+ LoadedModules.clear();
+ }
+
+ void markAllUnavailable() {
+ for (auto *II : LoadedModules) {
+ if (Module *M = CI.getPreprocessor()
+ .getHeaderSearchInfo()
+ .getModuleMap()
+ .findModule(II->getName()))
+ M->HasIncompatibleModuleFile = true;
+ }
+ LoadedModules.clear();
+ }
+ };
+
+ // If we don't already have an ASTReader, create one now.
+ if (!ModuleManager)
+ createModuleManager();
+
+ auto Listener = llvm::make_unique<ReadModuleNames>(*this);
+ auto &ListenerRef = *Listener;
+ ASTReader::ListenerScope ReadModuleNamesListener(*ModuleManager,
+ std::move(Listener));
+
+ // Try to load the module file.
+ switch (ModuleManager->ReadAST(FileName, serialization::MK_ExplicitModule,
+ SourceLocation(),
+ ASTReader::ARR_ConfigurationMismatch)) {
+ case ASTReader::Success:
+ // We successfully loaded the module file; remember the set of provided
+ // modules so that we don't try to load implicit modules for them.
+ ListenerRef.registerAll();
+ return true;
+
+ case ASTReader::ConfigurationMismatch:
+ // Ignore unusable module files.
+ getDiagnostics().Report(SourceLocation(), diag::warn_module_config_mismatch)
+ << FileName;
+ // All modules provided by any files we tried and failed to load are now
+ // unavailable; includes of those modules should now be handled textually.
+ ListenerRef.markAllUnavailable();
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+ModuleLoadResult
+CompilerInstance::loadModule(SourceLocation ImportLoc,
+ ModuleIdPath Path,
+ Module::NameVisibilityKind Visibility,
+ bool IsInclusionDirective) {
+ // Determine what file we're searching from.
+ StringRef ModuleName = Path[0].first->getName();
+ SourceLocation ModuleNameLoc = Path[0].second;
+
+ // If we've already handled this import, just return the cached result.
+ // This one-element cache is important to eliminate redundant diagnostics
+ // when both the preprocessor and parser see the same import declaration.
+ if (ImportLoc.isValid() && LastModuleImportLoc == ImportLoc) {
+ // Make the named module visible.
+ if (LastModuleImportResult && ModuleName != getLangOpts().CurrentModule &&
+ ModuleName != getLangOpts().ImplementationOfModule)
+ ModuleManager->makeModuleVisible(LastModuleImportResult, Visibility,
+ ImportLoc);
+ return LastModuleImportResult;
+ }
+
+ clang::Module *Module = nullptr;
+
+ // If we don't already have information on this module, load the module now.
+ llvm::DenseMap<const IdentifierInfo *, clang::Module *>::iterator Known
+ = KnownModules.find(Path[0].first);
+ if (Known != KnownModules.end()) {
+ // Retrieve the cached top-level module.
+ Module = Known->second;
+ } else if (ModuleName == getLangOpts().CurrentModule ||
+ ModuleName == getLangOpts().ImplementationOfModule) {
+ // This is the module we're building.
+ Module = PP->getHeaderSearchInfo().lookupModule(ModuleName);
+ Known = KnownModules.insert(std::make_pair(Path[0].first, Module)).first;
+ } else {
+ // Search for a module with the given name.
+ Module = PP->getHeaderSearchInfo().lookupModule(ModuleName);
+ if (!Module) {
+ getDiagnostics().Report(ModuleNameLoc, diag::err_module_not_found)
+ << ModuleName
+ << SourceRange(ImportLoc, ModuleNameLoc);
+ ModuleBuildFailed = true;
+ return ModuleLoadResult();
+ }
+
+ std::string ModuleFileName =
+ PP->getHeaderSearchInfo().getModuleFileName(Module);
+ if (ModuleFileName.empty()) {
+ if (Module->HasIncompatibleModuleFile) {
+ // We tried and failed to load a module file for this module. Fall
+ // back to textual inclusion for its headers.
+ return ModuleLoadResult(nullptr, /*missingExpected*/true);
+ }
+
+ getDiagnostics().Report(ModuleNameLoc, diag::err_module_build_disabled)
+ << ModuleName;
+ ModuleBuildFailed = true;
+ return ModuleLoadResult();
+ }
+
+ // If we don't already have an ASTReader, create one now.
+ if (!ModuleManager)
+ createModuleManager();
+
+ llvm::Timer Timer;
+ if (FrontendTimerGroup)
+ Timer.init("Loading " + ModuleFileName, *FrontendTimerGroup);
+ llvm::TimeRegion TimeLoading(FrontendTimerGroup ? &Timer : nullptr);
+
+ // Try to load the module file.
+ unsigned ARRFlags = ASTReader::ARR_OutOfDate | ASTReader::ARR_Missing;
+ switch (ModuleManager->ReadAST(ModuleFileName,
+ serialization::MK_ImplicitModule,
+ ImportLoc, ARRFlags)) {
+ case ASTReader::Success:
+ break;
+
+ case ASTReader::OutOfDate:
+ case ASTReader::Missing: {
+ // The module file is missing or out-of-date. Build it.
+ assert(Module && "missing module file");
+ // Check whether there is a cycle in the module graph.
+ ModuleBuildStack ModPath = getSourceManager().getModuleBuildStack();
+ ModuleBuildStack::iterator Pos = ModPath.begin(), PosEnd = ModPath.end();
+ for (; Pos != PosEnd; ++Pos) {
+ if (Pos->first == ModuleName)
+ break;
+ }
+
+ if (Pos != PosEnd) {
+ SmallString<256> CyclePath;
+ for (; Pos != PosEnd; ++Pos) {
+ CyclePath += Pos->first;
+ CyclePath += " -> ";
+ }
+ CyclePath += ModuleName;
+
+ getDiagnostics().Report(ModuleNameLoc, diag::err_module_cycle)
+ << ModuleName << CyclePath;
+ return ModuleLoadResult();
+ }
+
+ // Check whether we have already attempted to build this module (but
+ // failed).
+ if (getPreprocessorOpts().FailedModules &&
+ getPreprocessorOpts().FailedModules->hasAlreadyFailed(ModuleName)) {
+ getDiagnostics().Report(ModuleNameLoc, diag::err_module_not_built)
+ << ModuleName
+ << SourceRange(ImportLoc, ModuleNameLoc);
+ ModuleBuildFailed = true;
+ return ModuleLoadResult();
+ }
+
+ // Try to compile and then load the module.
+ if (!compileAndLoadModule(*this, ImportLoc, ModuleNameLoc, Module,
+ ModuleFileName)) {
+ assert(getDiagnostics().hasErrorOccurred() &&
+ "undiagnosed error in compileAndLoadModule");
+ if (getPreprocessorOpts().FailedModules)
+ getPreprocessorOpts().FailedModules->addFailed(ModuleName);
+ KnownModules[Path[0].first] = nullptr;
+ ModuleBuildFailed = true;
+ return ModuleLoadResult();
+ }
+
+ // Okay, we've rebuilt and now loaded the module.
+ break;
+ }
+
+ case ASTReader::VersionMismatch:
+ case ASTReader::ConfigurationMismatch:
+ case ASTReader::HadErrors:
+ ModuleLoader::HadFatalFailure = true;
+ // FIXME: The ASTReader will already have complained, but can we shoehorn
+ // that diagnostic information into a more useful form?
+ KnownModules[Path[0].first] = nullptr;
+ return ModuleLoadResult();
+
+ case ASTReader::Failure:
+ ModuleLoader::HadFatalFailure = true;
+ // Already complained, but note now that we failed.
+ KnownModules[Path[0].first] = nullptr;
+ ModuleBuildFailed = true;
+ return ModuleLoadResult();
+ }
+
+ // Cache the result of this top-level module lookup for later.
+ Known = KnownModules.insert(std::make_pair(Path[0].first, Module)).first;
+ }
+
+ // If we never found the module, fail.
+ if (!Module)
+ return ModuleLoadResult();
+
+ // Verify that the rest of the module path actually corresponds to
+ // a submodule.
+ if (Path.size() > 1) {
+ for (unsigned I = 1, N = Path.size(); I != N; ++I) {
+ StringRef Name = Path[I].first->getName();
+ clang::Module *Sub = Module->findSubmodule(Name);
+
+ if (!Sub) {
+ // Attempt to perform typo correction to find a module name that works.
+ SmallVector<StringRef, 2> Best;
+ unsigned BestEditDistance = (std::numeric_limits<unsigned>::max)();
+
+ for (clang::Module::submodule_iterator J = Module->submodule_begin(),
+ JEnd = Module->submodule_end();
+ J != JEnd; ++J) {
+ unsigned ED = Name.edit_distance((*J)->Name,
+ /*AllowReplacements=*/true,
+ BestEditDistance);
+ if (ED <= BestEditDistance) {
+ if (ED < BestEditDistance) {
+ Best.clear();
+ BestEditDistance = ED;
+ }
+
+ Best.push_back((*J)->Name);
+ }
+ }
+
+ // If there was a clear winner, user it.
+ if (Best.size() == 1) {
+ getDiagnostics().Report(Path[I].second,
+ diag::err_no_submodule_suggest)
+ << Path[I].first << Module->getFullModuleName() << Best[0]
+ << SourceRange(Path[0].second, Path[I-1].second)
+ << FixItHint::CreateReplacement(SourceRange(Path[I].second),
+ Best[0]);
+
+ Sub = Module->findSubmodule(Best[0]);
+ }
+ }
+
+ if (!Sub) {
+ // No submodule by this name. Complain, and don't look for further
+ // submodules.
+ getDiagnostics().Report(Path[I].second, diag::err_no_submodule)
+ << Path[I].first << Module->getFullModuleName()
+ << SourceRange(Path[0].second, Path[I-1].second);
+ break;
+ }
+
+ Module = Sub;
+ }
+ }
+
+ // Don't make the module visible if we are in the implementation.
+ if (ModuleName == getLangOpts().ImplementationOfModule)
+ return ModuleLoadResult(Module, false);
+
+ // Make the named module visible, if it's not already part of the module
+ // we are parsing.
+ if (ModuleName != getLangOpts().CurrentModule) {
+ if (!Module->IsFromModuleFile) {
+ // We have an umbrella header or directory that doesn't actually include
+ // all of the headers within the directory it covers. Complain about
+ // this missing submodule and recover by forgetting that we ever saw
+ // this submodule.
+ // FIXME: Should we detect this at module load time? It seems fairly
+ // expensive (and rare).
+ getDiagnostics().Report(ImportLoc, diag::warn_missing_submodule)
+ << Module->getFullModuleName()
+ << SourceRange(Path.front().second, Path.back().second);
+
+ return ModuleLoadResult(nullptr, true);
+ }
+
+ // Check whether this module is available.
+ clang::Module::Requirement Requirement;
+ clang::Module::UnresolvedHeaderDirective MissingHeader;
+ if (!Module->isAvailable(getLangOpts(), getTarget(), Requirement,
+ MissingHeader)) {
+ if (MissingHeader.FileNameLoc.isValid()) {
+ getDiagnostics().Report(MissingHeader.FileNameLoc,
+ diag::err_module_header_missing)
+ << MissingHeader.IsUmbrella << MissingHeader.FileName;
+ } else {
+ getDiagnostics().Report(ImportLoc, diag::err_module_unavailable)
+ << Module->getFullModuleName()
+ << Requirement.second << Requirement.first
+ << SourceRange(Path.front().second, Path.back().second);
+ }
+ LastModuleImportLoc = ImportLoc;
+ LastModuleImportResult = ModuleLoadResult();
+ return ModuleLoadResult();
+ }
+
+ ModuleManager->makeModuleVisible(Module, Visibility, ImportLoc);
+ }
+
+ // Check for any configuration macros that have changed.
+ clang::Module *TopModule = Module->getTopLevelModule();
+ for (unsigned I = 0, N = TopModule->ConfigMacros.size(); I != N; ++I) {
+ checkConfigMacro(getPreprocessor(), TopModule->ConfigMacros[I],
+ Module, ImportLoc);
+ }
+
+ LastModuleImportLoc = ImportLoc;
+ LastModuleImportResult = ModuleLoadResult(Module, false);
+ return LastModuleImportResult;
+}
+
+void CompilerInstance::makeModuleVisible(Module *Mod,
+ Module::NameVisibilityKind Visibility,
+ SourceLocation ImportLoc) {
+ if (!ModuleManager)
+ createModuleManager();
+ if (!ModuleManager)
+ return;
+
+ ModuleManager->makeModuleVisible(Mod, Visibility, ImportLoc);
+}
+
+GlobalModuleIndex *CompilerInstance::loadGlobalModuleIndex(
+ SourceLocation TriggerLoc) {
+ if (getPreprocessor().getHeaderSearchInfo().getModuleCachePath().empty())
+ return nullptr;
+ if (!ModuleManager)
+ createModuleManager();
+ // Can't do anything if we don't have the module manager.
+ if (!ModuleManager)
+ return nullptr;
+ // Get an existing global index. This loads it if not already
+ // loaded.
+ ModuleManager->loadGlobalIndex();
+ GlobalModuleIndex *GlobalIndex = ModuleManager->getGlobalIndex();
+ // If the global index doesn't exist, create it.
+ if (!GlobalIndex && shouldBuildGlobalModuleIndex() && hasFileManager() &&
+ hasPreprocessor()) {
+ llvm::sys::fs::create_directories(
+ getPreprocessor().getHeaderSearchInfo().getModuleCachePath());
+ GlobalModuleIndex::writeIndex(
+ getFileManager(), getPCHContainerReader(),
+ getPreprocessor().getHeaderSearchInfo().getModuleCachePath());
+ ModuleManager->resetForReload();
+ ModuleManager->loadGlobalIndex();
+ GlobalIndex = ModuleManager->getGlobalIndex();
+ }
+ // For finding modules needing to be imported for fixit messages,
+ // we need to make the global index cover all modules, so we do that here.
+ if (!HaveFullGlobalModuleIndex && GlobalIndex && !buildingModule()) {
+ ModuleMap &MMap = getPreprocessor().getHeaderSearchInfo().getModuleMap();
+ bool RecreateIndex = false;
+ for (ModuleMap::module_iterator I = MMap.module_begin(),
+ E = MMap.module_end(); I != E; ++I) {
+ Module *TheModule = I->second;
+ const FileEntry *Entry = TheModule->getASTFile();
+ if (!Entry) {
+ SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> Path;
+ Path.push_back(std::make_pair(
+ getPreprocessor().getIdentifierInfo(TheModule->Name), TriggerLoc));
+ std::reverse(Path.begin(), Path.end());
+ // Load a module as hidden. This also adds it to the global index.
+ loadModule(TheModule->DefinitionLoc, Path, Module::Hidden, false);
+ RecreateIndex = true;
+ }
+ }
+ if (RecreateIndex) {
+ GlobalModuleIndex::writeIndex(
+ getFileManager(), getPCHContainerReader(),
+ getPreprocessor().getHeaderSearchInfo().getModuleCachePath());
+ ModuleManager->resetForReload();
+ ModuleManager->loadGlobalIndex();
+ GlobalIndex = ModuleManager->getGlobalIndex();
+ }
+ HaveFullGlobalModuleIndex = true;
+ }
+ return GlobalIndex;
+}
+
+// Check global module index for missing imports.
+bool
+CompilerInstance::lookupMissingImports(StringRef Name,
+ SourceLocation TriggerLoc) {
+ // Look for the symbol in non-imported modules, but only if an error
+ // actually occurred.
+ if (!buildingModule()) {
+ // Load global module index, or retrieve a previously loaded one.
+ GlobalModuleIndex *GlobalIndex = loadGlobalModuleIndex(
+ TriggerLoc);
+
+ // Only if we have a global index.
+ if (GlobalIndex) {
+ GlobalModuleIndex::HitSet FoundModules;
+
+ // Find the modules that reference the identifier.
+ // Note that this only finds top-level modules.
+ // We'll let diagnoseTypo find the actual declaration module.
+ if (GlobalIndex->lookupIdentifier(Name, FoundModules))
+ return true;
+ }
+ }
+
+ return false;
+}
+void CompilerInstance::resetAndLeakSema() { BuryPointer(takeSema()); }
diff --git a/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp b/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp
new file mode 100644
index 0000000..3a32f47
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp
@@ -0,0 +1,2319 @@
+//===---
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "TestModuleFileExtension.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/Version.h"
+#include "clang/Config/config.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Options.h"
+#include "clang/Driver/Util.h"
+#include "clang/Frontend/CompilerInvocation.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/LangStandard.h"
+#include "clang/Frontend/Utils.h"
+#include "clang/Lex/HeaderSearchOptions.h"
+#include "clang/Serialization/ASTReader.h"
+#include "clang/Serialization/ModuleFileExtension.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Linker/Linker.h"
+#include "llvm/Option/Arg.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Option/OptTable.h"
+#include "llvm/Option/Option.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Target/TargetOptions.h"
+#include <atomic>
+#include <memory>
+#include <sys/stat.h>
+#include <system_error>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Initialization.
+//===----------------------------------------------------------------------===//
+
+CompilerInvocationBase::CompilerInvocationBase()
+ : LangOpts(new LangOptions()), TargetOpts(new TargetOptions()),
+ DiagnosticOpts(new DiagnosticOptions()),
+ HeaderSearchOpts(new HeaderSearchOptions()),
+ PreprocessorOpts(new PreprocessorOptions()) {}
+
+CompilerInvocationBase::CompilerInvocationBase(const CompilerInvocationBase &X)
+ : RefCountedBase<CompilerInvocation>(),
+ LangOpts(new LangOptions(*X.getLangOpts())),
+ TargetOpts(new TargetOptions(X.getTargetOpts())),
+ DiagnosticOpts(new DiagnosticOptions(X.getDiagnosticOpts())),
+ HeaderSearchOpts(new HeaderSearchOptions(X.getHeaderSearchOpts())),
+ PreprocessorOpts(new PreprocessorOptions(X.getPreprocessorOpts())) {}
+
+CompilerInvocationBase::~CompilerInvocationBase() {}
+
+//===----------------------------------------------------------------------===//
+// Deserialization (from args)
+//===----------------------------------------------------------------------===//
+
+using namespace clang::driver;
+using namespace clang::driver::options;
+using namespace llvm::opt;
+
+//
+
+static unsigned getOptimizationLevel(ArgList &Args, InputKind IK,
+ DiagnosticsEngine &Diags) {
+ unsigned DefaultOpt = 0;
+ if (IK == IK_OpenCL && !Args.hasArg(OPT_cl_opt_disable))
+ DefaultOpt = 2;
+
+ if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
+ if (A->getOption().matches(options::OPT_O0))
+ return 0;
+
+ if (A->getOption().matches(options::OPT_Ofast))
+ return 3;
+
+ assert (A->getOption().matches(options::OPT_O));
+
+ StringRef S(A->getValue());
+ if (S == "s" || S == "z" || S.empty())
+ return 2;
+
+ return getLastArgIntValue(Args, OPT_O, DefaultOpt, Diags);
+ }
+
+ return DefaultOpt;
+}
+
+static unsigned getOptimizationLevelSize(ArgList &Args) {
+ if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
+ if (A->getOption().matches(options::OPT_O)) {
+ switch (A->getValue()[0]) {
+ default:
+ return 0;
+ case 's':
+ return 1;
+ case 'z':
+ return 2;
+ }
+ }
+ }
+ return 0;
+}
+
+static void addDiagnosticArgs(ArgList &Args, OptSpecifier Group,
+ OptSpecifier GroupWithValue,
+ std::vector<std::string> &Diagnostics) {
+ for (Arg *A : Args.filtered(Group)) {
+ if (A->getOption().getKind() == Option::FlagClass) {
+ // The argument is a pure flag (such as OPT_Wall or OPT_Wdeprecated). Add
+ // its name (minus the "W" or "R" at the beginning) to the warning list.
+ Diagnostics.push_back(A->getOption().getName().drop_front(1));
+ } else if (A->getOption().matches(GroupWithValue)) {
+ // This is -Wfoo= or -Rfoo=, where foo is the name of the diagnostic group.
+ Diagnostics.push_back(A->getOption().getName().drop_front(1).rtrim("=-"));
+ } else {
+ // Otherwise, add its value (for OPT_W_Joined and similar).
+ for (const char *Arg : A->getValues())
+ Diagnostics.emplace_back(Arg);
+ }
+ }
+}
+
+static void getAllNoBuiltinFuncValues(ArgList &Args,
+ std::vector<std::string> &Funcs) {
+ SmallVector<const char *, 8> Values;
+ for (const auto &Arg : Args) {
+ const Option &O = Arg->getOption();
+ if (O.matches(options::OPT_fno_builtin_)) {
+ const char *FuncName = Arg->getValue();
+ if (Builtin::Context::isBuiltinFunc(FuncName))
+ Values.push_back(FuncName);
+ }
+ }
+ Funcs.insert(Funcs.end(), Values.begin(), Values.end());
+}
+
+static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
+ DiagnosticsEngine &Diags) {
+ using namespace options;
+ bool Success = true;
+ if (Arg *A = Args.getLastArg(OPT_analyzer_store)) {
+ StringRef Name = A->getValue();
+ AnalysisStores Value = llvm::StringSwitch<AnalysisStores>(Name)
+#define ANALYSIS_STORE(NAME, CMDFLAG, DESC, CREATFN) \
+ .Case(CMDFLAG, NAME##Model)
+#include "clang/StaticAnalyzer/Core/Analyses.def"
+ .Default(NumStores);
+ if (Value == NumStores) {
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << Name;
+ Success = false;
+ } else {
+ Opts.AnalysisStoreOpt = Value;
+ }
+ }
+
+ if (Arg *A = Args.getLastArg(OPT_analyzer_constraints)) {
+ StringRef Name = A->getValue();
+ AnalysisConstraints Value = llvm::StringSwitch<AnalysisConstraints>(Name)
+#define ANALYSIS_CONSTRAINTS(NAME, CMDFLAG, DESC, CREATFN) \
+ .Case(CMDFLAG, NAME##Model)
+#include "clang/StaticAnalyzer/Core/Analyses.def"
+ .Default(NumConstraints);
+ if (Value == NumConstraints) {
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << Name;
+ Success = false;
+ } else {
+ Opts.AnalysisConstraintsOpt = Value;
+ }
+ }
+
+ if (Arg *A = Args.getLastArg(OPT_analyzer_output)) {
+ StringRef Name = A->getValue();
+ AnalysisDiagClients Value = llvm::StringSwitch<AnalysisDiagClients>(Name)
+#define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATFN) \
+ .Case(CMDFLAG, PD_##NAME)
+#include "clang/StaticAnalyzer/Core/Analyses.def"
+ .Default(NUM_ANALYSIS_DIAG_CLIENTS);
+ if (Value == NUM_ANALYSIS_DIAG_CLIENTS) {
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << Name;
+ Success = false;
+ } else {
+ Opts.AnalysisDiagOpt = Value;
+ }
+ }
+
+ if (Arg *A = Args.getLastArg(OPT_analyzer_purge)) {
+ StringRef Name = A->getValue();
+ AnalysisPurgeMode Value = llvm::StringSwitch<AnalysisPurgeMode>(Name)
+#define ANALYSIS_PURGE(NAME, CMDFLAG, DESC) \
+ .Case(CMDFLAG, NAME)
+#include "clang/StaticAnalyzer/Core/Analyses.def"
+ .Default(NumPurgeModes);
+ if (Value == NumPurgeModes) {
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << Name;
+ Success = false;
+ } else {
+ Opts.AnalysisPurgeOpt = Value;
+ }
+ }
+
+ if (Arg *A = Args.getLastArg(OPT_analyzer_inlining_mode)) {
+ StringRef Name = A->getValue();
+ AnalysisInliningMode Value = llvm::StringSwitch<AnalysisInliningMode>(Name)
+#define ANALYSIS_INLINING_MODE(NAME, CMDFLAG, DESC) \
+ .Case(CMDFLAG, NAME)
+#include "clang/StaticAnalyzer/Core/Analyses.def"
+ .Default(NumInliningModes);
+ if (Value == NumInliningModes) {
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << Name;
+ Success = false;
+ } else {
+ Opts.InliningMode = Value;
+ }
+ }
+
+ Opts.ShowCheckerHelp = Args.hasArg(OPT_analyzer_checker_help);
+ Opts.DisableAllChecks = Args.hasArg(OPT_analyzer_disable_all_checks);
+
+ Opts.visualizeExplodedGraphWithGraphViz =
+ Args.hasArg(OPT_analyzer_viz_egraph_graphviz);
+ Opts.visualizeExplodedGraphWithUbiGraph =
+ Args.hasArg(OPT_analyzer_viz_egraph_ubigraph);
+ Opts.NoRetryExhausted = Args.hasArg(OPT_analyzer_disable_retry_exhausted);
+ Opts.AnalyzeAll = Args.hasArg(OPT_analyzer_opt_analyze_headers);
+ Opts.AnalyzerDisplayProgress = Args.hasArg(OPT_analyzer_display_progress);
+ Opts.AnalyzeNestedBlocks =
+ Args.hasArg(OPT_analyzer_opt_analyze_nested_blocks);
+ Opts.eagerlyAssumeBinOpBifurcation = Args.hasArg(OPT_analyzer_eagerly_assume);
+ Opts.AnalyzeSpecificFunction = Args.getLastArgValue(OPT_analyze_function);
+ Opts.UnoptimizedCFG = Args.hasArg(OPT_analysis_UnoptimizedCFG);
+ Opts.TrimGraph = Args.hasArg(OPT_trim_egraph);
+ Opts.maxBlockVisitOnPath =
+ getLastArgIntValue(Args, OPT_analyzer_max_loop, 4, Diags);
+ Opts.PrintStats = Args.hasArg(OPT_analyzer_stats);
+ Opts.InlineMaxStackDepth =
+ getLastArgIntValue(Args, OPT_analyzer_inline_max_stack_depth,
+ Opts.InlineMaxStackDepth, Diags);
+
+ Opts.CheckersControlList.clear();
+ for (const Arg *A :
+ Args.filtered(OPT_analyzer_checker, OPT_analyzer_disable_checker)) {
+ A->claim();
+ bool enable = (A->getOption().getID() == OPT_analyzer_checker);
+ // We can have a list of comma separated checker names, e.g:
+ // '-analyzer-checker=cocoa,unix'
+ StringRef checkerList = A->getValue();
+ SmallVector<StringRef, 4> checkers;
+ checkerList.split(checkers, ",");
+ for (StringRef checker : checkers)
+ Opts.CheckersControlList.emplace_back(checker, enable);
+ }
+
+ // Go through the analyzer configuration options.
+ for (const Arg *A : Args.filtered(OPT_analyzer_config)) {
+ A->claim();
+ // We can have a list of comma separated config names, e.g:
+ // '-analyzer-config key1=val1,key2=val2'
+ StringRef configList = A->getValue();
+ SmallVector<StringRef, 4> configVals;
+ configList.split(configVals, ",");
+ for (unsigned i = 0, e = configVals.size(); i != e; ++i) {
+ StringRef key, val;
+ std::tie(key, val) = configVals[i].split("=");
+ if (val.empty()) {
+ Diags.Report(SourceLocation(),
+ diag::err_analyzer_config_no_value) << configVals[i];
+ Success = false;
+ break;
+ }
+ if (val.find('=') != StringRef::npos) {
+ Diags.Report(SourceLocation(),
+ diag::err_analyzer_config_multiple_values)
+ << configVals[i];
+ Success = false;
+ break;
+ }
+ Opts.Config[key] = val;
+ }
+ }
+
+ return Success;
+}
+
+static bool ParseMigratorArgs(MigratorOptions &Opts, ArgList &Args) {
+ Opts.NoNSAllocReallocError = Args.hasArg(OPT_migrator_no_nsalloc_error);
+ Opts.NoFinalizeRemoval = Args.hasArg(OPT_migrator_no_finalize_removal);
+ return true;
+}
+
+static void ParseCommentArgs(CommentOptions &Opts, ArgList &Args) {
+ Opts.BlockCommandNames = Args.getAllArgValues(OPT_fcomment_block_commands);
+ Opts.ParseAllComments = Args.hasArg(OPT_fparse_all_comments);
+}
+
+static StringRef getCodeModel(ArgList &Args, DiagnosticsEngine &Diags) {
+ if (Arg *A = Args.getLastArg(OPT_mcode_model)) {
+ StringRef Value = A->getValue();
+ if (Value == "small" || Value == "kernel" || Value == "medium" ||
+ Value == "large")
+ return Value;
+ Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Value;
+ }
+ return "default";
+}
+
+/// \brief Create a new Regex instance out of the string value in \p RpassArg.
+/// It returns a pointer to the newly generated Regex instance.
+static std::shared_ptr<llvm::Regex>
+GenerateOptimizationRemarkRegex(DiagnosticsEngine &Diags, ArgList &Args,
+ Arg *RpassArg) {
+ StringRef Val = RpassArg->getValue();
+ std::string RegexError;
+ std::shared_ptr<llvm::Regex> Pattern = std::make_shared<llvm::Regex>(Val);
+ if (!Pattern->isValid(RegexError)) {
+ Diags.Report(diag::err_drv_optimization_remark_pattern)
+ << RegexError << RpassArg->getAsString(Args);
+ Pattern.reset();
+ }
+ return Pattern;
+}
+
+static bool parseDiagnosticLevelMask(StringRef FlagName,
+ const std::vector<std::string> &Levels,
+ DiagnosticsEngine *Diags,
+ DiagnosticLevelMask &M) {
+ bool Success = true;
+ for (const auto &Level : Levels) {
+ DiagnosticLevelMask const PM =
+ llvm::StringSwitch<DiagnosticLevelMask>(Level)
+ .Case("note", DiagnosticLevelMask::Note)
+ .Case("remark", DiagnosticLevelMask::Remark)
+ .Case("warning", DiagnosticLevelMask::Warning)
+ .Case("error", DiagnosticLevelMask::Error)
+ .Default(DiagnosticLevelMask::None);
+ if (PM == DiagnosticLevelMask::None) {
+ Success = false;
+ if (Diags)
+ Diags->Report(diag::err_drv_invalid_value) << FlagName << Level;
+ }
+ M = M | PM;
+ }
+ return Success;
+}
+
+static void parseSanitizerKinds(StringRef FlagName,
+ const std::vector<std::string> &Sanitizers,
+ DiagnosticsEngine &Diags, SanitizerSet &S) {
+ for (const auto &Sanitizer : Sanitizers) {
+ SanitizerMask K = parseSanitizerValue(Sanitizer, /*AllowGroups=*/false);
+ if (K == 0)
+ Diags.Report(diag::err_drv_invalid_value) << FlagName << Sanitizer;
+ else
+ S.set(K, true);
+ }
+}
+
+static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
+ DiagnosticsEngine &Diags,
+ const TargetOptions &TargetOpts) {
+ using namespace options;
+ bool Success = true;
+ llvm::Triple Triple = llvm::Triple(TargetOpts.Triple);
+
+ unsigned OptimizationLevel = getOptimizationLevel(Args, IK, Diags);
+ // TODO: This could be done in Driver
+ unsigned MaxOptLevel = 3;
+ if (OptimizationLevel > MaxOptLevel) {
+ // If the optimization level is not supported, fall back on the default
+ // optimization
+ Diags.Report(diag::warn_drv_optimization_value)
+ << Args.getLastArg(OPT_O)->getAsString(Args) << "-O" << MaxOptLevel;
+ OptimizationLevel = MaxOptLevel;
+ }
+ Opts.OptimizationLevel = OptimizationLevel;
+
+ // We must always run at least the always inlining pass.
+ Opts.setInlining(
+ (Opts.OptimizationLevel > 1) ? CodeGenOptions::NormalInlining
+ : CodeGenOptions::OnlyAlwaysInlining);
+ // -fno-inline-functions overrides OptimizationLevel > 1.
+ Opts.NoInline = Args.hasArg(OPT_fno_inline);
+ Opts.setInlining(Args.hasArg(OPT_fno_inline_functions) ?
+ CodeGenOptions::OnlyAlwaysInlining : Opts.getInlining());
+
+ if (Arg *A = Args.getLastArg(OPT_fveclib)) {
+ StringRef Name = A->getValue();
+ if (Name == "Accelerate")
+ Opts.setVecLib(CodeGenOptions::Accelerate);
+ else if (Name == "none")
+ Opts.setVecLib(CodeGenOptions::NoLibrary);
+ else
+ Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Name;
+ }
+
+ if (Arg *A = Args.getLastArg(OPT_debug_info_kind_EQ)) {
+ unsigned Val =
+ llvm::StringSwitch<unsigned>(A->getValue())
+ .Case("line-tables-only", CodeGenOptions::DebugLineTablesOnly)
+ .Case("limited", CodeGenOptions::LimitedDebugInfo)
+ .Case("standalone", CodeGenOptions::FullDebugInfo)
+ .Default(~0U);
+ if (Val == ~0U)
+ Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args)
+ << A->getValue();
+ else
+ Opts.setDebugInfo(static_cast<CodeGenOptions::DebugInfoKind>(Val));
+ }
+ if (Arg *A = Args.getLastArg(OPT_debugger_tuning_EQ)) {
+ unsigned Val = llvm::StringSwitch<unsigned>(A->getValue())
+ .Case("gdb", CodeGenOptions::DebuggerKindGDB)
+ .Case("lldb", CodeGenOptions::DebuggerKindLLDB)
+ .Case("sce", CodeGenOptions::DebuggerKindSCE)
+ .Default(~0U);
+ if (Val == ~0U)
+ Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args)
+ << A->getValue();
+ else
+ Opts.setDebuggerTuning(static_cast<CodeGenOptions::DebuggerKind>(Val));
+ }
+ Opts.DwarfVersion = getLastArgIntValue(Args, OPT_dwarf_version_EQ, 0, Diags);
+ Opts.DebugColumnInfo = Args.hasArg(OPT_dwarf_column_info);
+ Opts.EmitCodeView = Args.hasArg(OPT_gcodeview);
+ Opts.SplitDwarfFile = Args.getLastArgValue(OPT_split_dwarf_file);
+ Opts.DebugTypeExtRefs = Args.hasArg(OPT_dwarf_ext_refs);
+ Opts.DebugExplicitImport = Triple.isPS4CPU();
+
+ for (const auto &Arg : Args.getAllArgValues(OPT_fdebug_prefix_map_EQ))
+ Opts.DebugPrefixMap.insert(StringRef(Arg).split('='));
+
+ if (const Arg *A =
+ Args.getLastArg(OPT_emit_llvm_uselists, OPT_no_emit_llvm_uselists))
+ Opts.EmitLLVMUseLists = A->getOption().getID() == OPT_emit_llvm_uselists;
+
+ Opts.DisableLLVMOpts = Args.hasArg(OPT_disable_llvm_optzns);
+ Opts.DisableLLVMPasses = Args.hasArg(OPT_disable_llvm_passes);
+ Opts.DisableRedZone = Args.hasArg(OPT_disable_red_zone);
+ Opts.ForbidGuardVariables = Args.hasArg(OPT_fforbid_guard_variables);
+ Opts.UseRegisterSizedBitfieldAccess = Args.hasArg(
+ OPT_fuse_register_sized_bitfield_access);
+ Opts.RelaxedAliasing = Args.hasArg(OPT_relaxed_aliasing);
+ Opts.StructPathTBAA = !Args.hasArg(OPT_no_struct_path_tbaa);
+ Opts.DwarfDebugFlags = Args.getLastArgValue(OPT_dwarf_debug_flags);
+ Opts.MergeAllConstants = !Args.hasArg(OPT_fno_merge_all_constants);
+ Opts.NoCommon = Args.hasArg(OPT_fno_common);
+ Opts.NoImplicitFloat = Args.hasArg(OPT_no_implicit_float);
+ Opts.OptimizeSize = getOptimizationLevelSize(Args);
+ Opts.SimplifyLibCalls = !(Args.hasArg(OPT_fno_builtin) ||
+ Args.hasArg(OPT_ffreestanding));
+ if (Opts.SimplifyLibCalls)
+ getAllNoBuiltinFuncValues(Args, Opts.NoBuiltinFuncs);
+ Opts.UnrollLoops =
+ Args.hasFlag(OPT_funroll_loops, OPT_fno_unroll_loops,
+ (Opts.OptimizationLevel > 1 && !Opts.OptimizeSize));
+ Opts.RerollLoops = Args.hasArg(OPT_freroll_loops);
+
+ Opts.DisableIntegratedAS = Args.hasArg(OPT_fno_integrated_as);
+ Opts.Autolink = !Args.hasArg(OPT_fno_autolink);
+ Opts.SampleProfileFile = Args.getLastArgValue(OPT_fprofile_sample_use_EQ);
+ Opts.ProfileInstrGenerate = Args.hasArg(OPT_fprofile_instr_generate) ||
+ Args.hasArg(OPT_fprofile_instr_generate_EQ);
+ Opts.InstrProfileOutput = Args.getLastArgValue(OPT_fprofile_instr_generate_EQ);
+ Opts.InstrProfileInput = Args.getLastArgValue(OPT_fprofile_instr_use_EQ);
+ Opts.CoverageMapping =
+ Args.hasFlag(OPT_fcoverage_mapping, OPT_fno_coverage_mapping, false);
+ Opts.DumpCoverageMapping = Args.hasArg(OPT_dump_coverage_mapping);
+ Opts.AsmVerbose = Args.hasArg(OPT_masm_verbose);
+ Opts.ObjCAutoRefCountExceptions = Args.hasArg(OPT_fobjc_arc_exceptions);
+ Opts.CXAAtExit = !Args.hasArg(OPT_fno_use_cxa_atexit);
+ Opts.CXXCtorDtorAliases = Args.hasArg(OPT_mconstructor_aliases);
+ Opts.CodeModel = getCodeModel(Args, Diags);
+ Opts.DebugPass = Args.getLastArgValue(OPT_mdebug_pass);
+ Opts.DisableFPElim =
+ (Args.hasArg(OPT_mdisable_fp_elim) || Args.hasArg(OPT_pg));
+ Opts.DisableFree = Args.hasArg(OPT_disable_free);
+ Opts.DisableTailCalls = Args.hasArg(OPT_mdisable_tail_calls);
+ Opts.FloatABI = Args.getLastArgValue(OPT_mfloat_abi);
+ if (Arg *A = Args.getLastArg(OPT_meabi)) {
+ StringRef Value = A->getValue();
+ llvm::EABI EABIVersion = llvm::StringSwitch<llvm::EABI>(Value)
+ .Case("default", llvm::EABI::Default)
+ .Case("4", llvm::EABI::EABI4)
+ .Case("5", llvm::EABI::EABI5)
+ .Case("gnu", llvm::EABI::GNU)
+ .Default(llvm::EABI::Unknown);
+ if (EABIVersion == llvm::EABI::Unknown)
+ Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args)
+ << Value;
+ else
+ Opts.EABIVersion = Value;
+ }
+ Opts.LessPreciseFPMAD = Args.hasArg(OPT_cl_mad_enable);
+ Opts.LimitFloatPrecision = Args.getLastArgValue(OPT_mlimit_float_precision);
+ Opts.NoInfsFPMath = (Args.hasArg(OPT_menable_no_infinities) ||
+ Args.hasArg(OPT_cl_finite_math_only) ||
+ Args.hasArg(OPT_cl_fast_relaxed_math));
+ Opts.NoNaNsFPMath = (Args.hasArg(OPT_menable_no_nans) ||
+ Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
+ Args.hasArg(OPT_cl_finite_math_only) ||
+ Args.hasArg(OPT_cl_fast_relaxed_math));
+ Opts.NoSignedZeros = Args.hasArg(OPT_fno_signed_zeros);
+ Opts.ReciprocalMath = Args.hasArg(OPT_freciprocal_math);
+ Opts.NoZeroInitializedInBSS = Args.hasArg(OPT_mno_zero_initialized_in_bss);
+ Opts.BackendOptions = Args.getAllArgValues(OPT_backend_option);
+ Opts.NumRegisterParameters = getLastArgIntValue(Args, OPT_mregparm, 0, Diags);
+ Opts.NoExecStack = Args.hasArg(OPT_mno_exec_stack);
+ Opts.FatalWarnings = Args.hasArg(OPT_massembler_fatal_warnings);
+ Opts.EnableSegmentedStacks = Args.hasArg(OPT_split_stacks);
+ Opts.RelaxAll = Args.hasArg(OPT_mrelax_all);
+ Opts.IncrementalLinkerCompatible =
+ Args.hasArg(OPT_mincremental_linker_compatible);
+ Opts.OmitLeafFramePointer = Args.hasArg(OPT_momit_leaf_frame_pointer);
+ Opts.SaveTempLabels = Args.hasArg(OPT_msave_temp_labels);
+ Opts.NoDwarfDirectoryAsm = Args.hasArg(OPT_fno_dwarf_directory_asm);
+ Opts.SoftFloat = Args.hasArg(OPT_msoft_float);
+ Opts.StrictEnums = Args.hasArg(OPT_fstrict_enums);
+ Opts.StrictVTablePointers = Args.hasArg(OPT_fstrict_vtable_pointers);
+ Opts.UnsafeFPMath = Args.hasArg(OPT_menable_unsafe_fp_math) ||
+ Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
+ Args.hasArg(OPT_cl_fast_relaxed_math);
+ Opts.UnwindTables = Args.hasArg(OPT_munwind_tables);
+ Opts.RelocationModel = Args.getLastArgValue(OPT_mrelocation_model, "pic");
+ Opts.ThreadModel = Args.getLastArgValue(OPT_mthread_model, "posix");
+ if (Opts.ThreadModel != "posix" && Opts.ThreadModel != "single")
+ Diags.Report(diag::err_drv_invalid_value)
+ << Args.getLastArg(OPT_mthread_model)->getAsString(Args)
+ << Opts.ThreadModel;
+ Opts.TrapFuncName = Args.getLastArgValue(OPT_ftrap_function_EQ);
+ Opts.UseInitArray = Args.hasArg(OPT_fuse_init_array);
+
+ Opts.FunctionSections = Args.hasFlag(OPT_ffunction_sections,
+ OPT_fno_function_sections, false);
+ Opts.DataSections = Args.hasFlag(OPT_fdata_sections,
+ OPT_fno_data_sections, false);
+ Opts.UniqueSectionNames = Args.hasFlag(OPT_funique_section_names,
+ OPT_fno_unique_section_names, true);
+
+ Opts.MergeFunctions = Args.hasArg(OPT_fmerge_functions);
+
+ Opts.PrepareForLTO = Args.hasArg(OPT_flto, OPT_flto_EQ);
+ const Arg *A = Args.getLastArg(OPT_flto, OPT_flto_EQ);
+ Opts.EmitFunctionSummary = A && A->containsValue("thin");
+ if (Arg *A = Args.getLastArg(OPT_fthinlto_index_EQ)) {
+ if (IK != IK_LLVM_IR)
+ Diags.Report(diag::err_drv_argument_only_allowed_with)
+ << A->getAsString(Args) << "-x ir";
+ Opts.ThinLTOIndexFile = Args.getLastArgValue(OPT_fthinlto_index_EQ);
+ }
+
+ Opts.MSVolatile = Args.hasArg(OPT_fms_volatile);
+
+ Opts.VectorizeBB = Args.hasArg(OPT_vectorize_slp_aggressive);
+ Opts.VectorizeLoop = Args.hasArg(OPT_vectorize_loops);
+ Opts.VectorizeSLP = Args.hasArg(OPT_vectorize_slp);
+
+ Opts.MainFileName = Args.getLastArgValue(OPT_main_file_name);
+ Opts.VerifyModule = !Args.hasArg(OPT_disable_llvm_verifier);
+
+ Opts.DisableGCov = Args.hasArg(OPT_test_coverage);
+ Opts.EmitGcovArcs = Args.hasArg(OPT_femit_coverage_data);
+ Opts.EmitGcovNotes = Args.hasArg(OPT_femit_coverage_notes);
+ if (Opts.EmitGcovArcs || Opts.EmitGcovNotes) {
+ Opts.CoverageFile = Args.getLastArgValue(OPT_coverage_file);
+ Opts.CoverageExtraChecksum = Args.hasArg(OPT_coverage_cfg_checksum);
+ Opts.CoverageNoFunctionNamesInData =
+ Args.hasArg(OPT_coverage_no_function_names_in_data);
+ Opts.CoverageExitBlockBeforeBody =
+ Args.hasArg(OPT_coverage_exit_block_before_body);
+ if (Args.hasArg(OPT_coverage_version_EQ)) {
+ StringRef CoverageVersion = Args.getLastArgValue(OPT_coverage_version_EQ);
+ if (CoverageVersion.size() != 4) {
+ Diags.Report(diag::err_drv_invalid_value)
+ << Args.getLastArg(OPT_coverage_version_EQ)->getAsString(Args)
+ << CoverageVersion;
+ } else {
+ memcpy(Opts.CoverageVersion, CoverageVersion.data(), 4);
+ }
+ }
+ }
+
+ Opts.InstrumentFunctions = Args.hasArg(OPT_finstrument_functions);
+ Opts.InstrumentForProfiling = Args.hasArg(OPT_pg);
+ Opts.EmitOpenCLArgMetadata = Args.hasArg(OPT_cl_kernel_arg_info);
+ Opts.CompressDebugSections = Args.hasArg(OPT_compress_debug_sections);
+ Opts.DebugCompilationDir = Args.getLastArgValue(OPT_fdebug_compilation_dir);
+ for (auto A : Args.filtered(OPT_mlink_bitcode_file, OPT_mlink_cuda_bitcode)) {
+ unsigned LinkFlags = llvm::Linker::Flags::None;
+ if (A->getOption().matches(OPT_mlink_cuda_bitcode))
+ LinkFlags = llvm::Linker::Flags::LinkOnlyNeeded |
+ llvm::Linker::Flags::InternalizeLinkedSymbols;
+ Opts.LinkBitcodeFiles.push_back(std::make_pair(LinkFlags, A->getValue()));
+ }
+ Opts.SanitizeCoverageType =
+ getLastArgIntValue(Args, OPT_fsanitize_coverage_type, 0, Diags);
+ Opts.SanitizeCoverageIndirectCalls =
+ Args.hasArg(OPT_fsanitize_coverage_indirect_calls);
+ Opts.SanitizeCoverageTraceBB = Args.hasArg(OPT_fsanitize_coverage_trace_bb);
+ Opts.SanitizeCoverageTraceCmp = Args.hasArg(OPT_fsanitize_coverage_trace_cmp);
+ Opts.SanitizeCoverage8bitCounters =
+ Args.hasArg(OPT_fsanitize_coverage_8bit_counters);
+ Opts.SanitizeMemoryTrackOrigins =
+ getLastArgIntValue(Args, OPT_fsanitize_memory_track_origins_EQ, 0, Diags);
+ Opts.SanitizeMemoryUseAfterDtor =
+ Args.hasArg(OPT_fsanitize_memory_use_after_dtor);
+ Opts.SanitizeCfiCrossDso = Args.hasArg(OPT_fsanitize_cfi_cross_dso);
+ Opts.SSPBufferSize =
+ getLastArgIntValue(Args, OPT_stack_protector_buffer_size, 8, Diags);
+ Opts.StackRealignment = Args.hasArg(OPT_mstackrealign);
+ if (Arg *A = Args.getLastArg(OPT_mstack_alignment)) {
+ StringRef Val = A->getValue();
+ unsigned StackAlignment = Opts.StackAlignment;
+ Val.getAsInteger(10, StackAlignment);
+ Opts.StackAlignment = StackAlignment;
+ }
+
+ if (Arg *A = Args.getLastArg(OPT_mstack_probe_size)) {
+ StringRef Val = A->getValue();
+ unsigned StackProbeSize = Opts.StackProbeSize;
+ Val.getAsInteger(0, StackProbeSize);
+ Opts.StackProbeSize = StackProbeSize;
+ }
+
+ if (Arg *A = Args.getLastArg(OPT_fobjc_dispatch_method_EQ)) {
+ StringRef Name = A->getValue();
+ unsigned Method = llvm::StringSwitch<unsigned>(Name)
+ .Case("legacy", CodeGenOptions::Legacy)
+ .Case("non-legacy", CodeGenOptions::NonLegacy)
+ .Case("mixed", CodeGenOptions::Mixed)
+ .Default(~0U);
+ if (Method == ~0U) {
+ Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Name;
+ Success = false;
+ } else {
+ Opts.setObjCDispatchMethod(
+ static_cast<CodeGenOptions::ObjCDispatchMethodKind>(Method));
+ }
+ }
+
+ Opts.EmulatedTLS =
+ Args.hasFlag(OPT_femulated_tls, OPT_fno_emulated_tls, false);
+
+ if (Arg *A = Args.getLastArg(OPT_ftlsmodel_EQ)) {
+ StringRef Name = A->getValue();
+ unsigned Model = llvm::StringSwitch<unsigned>(Name)
+ .Case("global-dynamic", CodeGenOptions::GeneralDynamicTLSModel)
+ .Case("local-dynamic", CodeGenOptions::LocalDynamicTLSModel)
+ .Case("initial-exec", CodeGenOptions::InitialExecTLSModel)
+ .Case("local-exec", CodeGenOptions::LocalExecTLSModel)
+ .Default(~0U);
+ if (Model == ~0U) {
+ Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Name;
+ Success = false;
+ } else {
+ Opts.setDefaultTLSModel(static_cast<CodeGenOptions::TLSModel>(Model));
+ }
+ }
+
+ if (Arg *A = Args.getLastArg(OPT_ffp_contract)) {
+ StringRef Val = A->getValue();
+ if (Val == "fast")
+ Opts.setFPContractMode(CodeGenOptions::FPC_Fast);
+ else if (Val == "on")
+ Opts.setFPContractMode(CodeGenOptions::FPC_On);
+ else if (Val == "off")
+ Opts.setFPContractMode(CodeGenOptions::FPC_Off);
+ else
+ Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Val;
+ }
+
+ if (Arg *A = Args.getLastArg(OPT_fpcc_struct_return, OPT_freg_struct_return)) {
+ if (A->getOption().matches(OPT_fpcc_struct_return)) {
+ Opts.setStructReturnConvention(CodeGenOptions::SRCK_OnStack);
+ } else {
+ assert(A->getOption().matches(OPT_freg_struct_return));
+ Opts.setStructReturnConvention(CodeGenOptions::SRCK_InRegs);
+ }
+ }
+
+ Opts.DependentLibraries = Args.getAllArgValues(OPT_dependent_lib);
+ bool NeedLocTracking = false;
+
+ if (Arg *A = Args.getLastArg(OPT_Rpass_EQ)) {
+ Opts.OptimizationRemarkPattern =
+ GenerateOptimizationRemarkRegex(Diags, Args, A);
+ NeedLocTracking = true;
+ }
+
+ if (Arg *A = Args.getLastArg(OPT_Rpass_missed_EQ)) {
+ Opts.OptimizationRemarkMissedPattern =
+ GenerateOptimizationRemarkRegex(Diags, Args, A);
+ NeedLocTracking = true;
+ }
+
+ if (Arg *A = Args.getLastArg(OPT_Rpass_analysis_EQ)) {
+ Opts.OptimizationRemarkAnalysisPattern =
+ GenerateOptimizationRemarkRegex(Diags, Args, A);
+ NeedLocTracking = true;
+ }
+
+ // If the user requested to use a sample profile for PGO, then the
+ // backend will need to track source location information so the profile
+ // can be incorporated into the IR.
+ if (!Opts.SampleProfileFile.empty())
+ NeedLocTracking = true;
+
+ // If the user requested a flag that requires source locations available in
+ // the backend, make sure that the backend tracks source location information.
+ if (NeedLocTracking && Opts.getDebugInfo() == CodeGenOptions::NoDebugInfo)
+ Opts.setDebugInfo(CodeGenOptions::LocTrackingOnly);
+
+ Opts.RewriteMapFiles = Args.getAllArgValues(OPT_frewrite_map_file);
+
+ // Parse -fsanitize-recover= arguments.
+ // FIXME: Report unrecoverable sanitizers incorrectly specified here.
+ parseSanitizerKinds("-fsanitize-recover=",
+ Args.getAllArgValues(OPT_fsanitize_recover_EQ), Diags,
+ Opts.SanitizeRecover);
+ parseSanitizerKinds("-fsanitize-trap=",
+ Args.getAllArgValues(OPT_fsanitize_trap_EQ), Diags,
+ Opts.SanitizeTrap);
+
+ Opts.CudaGpuBinaryFileNames =
+ Args.getAllArgValues(OPT_fcuda_include_gpubinary);
+
+ return Success;
+}
+
+static void ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
+ ArgList &Args) {
+ using namespace options;
+ Opts.OutputFile = Args.getLastArgValue(OPT_dependency_file);
+ Opts.Targets = Args.getAllArgValues(OPT_MT);
+ Opts.IncludeSystemHeaders = Args.hasArg(OPT_sys_header_deps);
+ Opts.IncludeModuleFiles = Args.hasArg(OPT_module_file_deps);
+ Opts.UsePhonyTargets = Args.hasArg(OPT_MP);
+ Opts.ShowHeaderIncludes = Args.hasArg(OPT_H);
+ Opts.HeaderIncludeOutputFile = Args.getLastArgValue(OPT_header_include_file);
+ Opts.AddMissingHeaderDeps = Args.hasArg(OPT_MG);
+ Opts.PrintShowIncludes = Args.hasArg(OPT_show_includes);
+ Opts.DOTOutputFile = Args.getLastArgValue(OPT_dependency_dot);
+ Opts.ModuleDependencyOutputDir =
+ Args.getLastArgValue(OPT_module_dependency_dir);
+ if (Args.hasArg(OPT_MV))
+ Opts.OutputFormat = DependencyOutputFormat::NMake;
+ // Add sanitizer blacklists as extra dependencies.
+ // They won't be discovered by the regular preprocessor, so
+ // we let make / ninja to know about this implicit dependency.
+ Opts.ExtraDeps = Args.getAllArgValues(OPT_fdepfile_entry);
+ auto ModuleFiles = Args.getAllArgValues(OPT_fmodule_file);
+ Opts.ExtraDeps.insert(Opts.ExtraDeps.end(), ModuleFiles.begin(),
+ ModuleFiles.end());
+}
+
+bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
+ DiagnosticsEngine *Diags) {
+ using namespace options;
+ bool Success = true;
+
+ Opts.DiagnosticLogFile = Args.getLastArgValue(OPT_diagnostic_log_file);
+ if (Arg *A =
+ Args.getLastArg(OPT_diagnostic_serialized_file, OPT__serialize_diags))
+ Opts.DiagnosticSerializationFile = A->getValue();
+ Opts.IgnoreWarnings = Args.hasArg(OPT_w);
+ Opts.NoRewriteMacros = Args.hasArg(OPT_Wno_rewrite_macros);
+ Opts.Pedantic = Args.hasArg(OPT_pedantic);
+ Opts.PedanticErrors = Args.hasArg(OPT_pedantic_errors);
+ Opts.ShowCarets = !Args.hasArg(OPT_fno_caret_diagnostics);
+ Opts.ShowColors = Args.hasArg(OPT_fcolor_diagnostics);
+ Opts.ShowColumn = Args.hasFlag(OPT_fshow_column,
+ OPT_fno_show_column,
+ /*Default=*/true);
+ Opts.ShowFixits = !Args.hasArg(OPT_fno_diagnostics_fixit_info);
+ Opts.ShowLocation = !Args.hasArg(OPT_fno_show_source_location);
+ Opts.ShowOptionNames = Args.hasArg(OPT_fdiagnostics_show_option);
+
+ llvm::sys::Process::UseANSIEscapeCodes(Args.hasArg(OPT_fansi_escape_codes));
+
+ // Default behavior is to not to show note include stacks.
+ Opts.ShowNoteIncludeStack = false;
+ if (Arg *A = Args.getLastArg(OPT_fdiagnostics_show_note_include_stack,
+ OPT_fno_diagnostics_show_note_include_stack))
+ if (A->getOption().matches(OPT_fdiagnostics_show_note_include_stack))
+ Opts.ShowNoteIncludeStack = true;
+
+ StringRef ShowOverloads =
+ Args.getLastArgValue(OPT_fshow_overloads_EQ, "all");
+ if (ShowOverloads == "best")
+ Opts.setShowOverloads(Ovl_Best);
+ else if (ShowOverloads == "all")
+ Opts.setShowOverloads(Ovl_All);
+ else {
+ Success = false;
+ if (Diags)
+ Diags->Report(diag::err_drv_invalid_value)
+ << Args.getLastArg(OPT_fshow_overloads_EQ)->getAsString(Args)
+ << ShowOverloads;
+ }
+
+ StringRef ShowCategory =
+ Args.getLastArgValue(OPT_fdiagnostics_show_category, "none");
+ if (ShowCategory == "none")
+ Opts.ShowCategories = 0;
+ else if (ShowCategory == "id")
+ Opts.ShowCategories = 1;
+ else if (ShowCategory == "name")
+ Opts.ShowCategories = 2;
+ else {
+ Success = false;
+ if (Diags)
+ Diags->Report(diag::err_drv_invalid_value)
+ << Args.getLastArg(OPT_fdiagnostics_show_category)->getAsString(Args)
+ << ShowCategory;
+ }
+
+ StringRef Format =
+ Args.getLastArgValue(OPT_fdiagnostics_format, "clang");
+ if (Format == "clang")
+ Opts.setFormat(DiagnosticOptions::Clang);
+ else if (Format == "msvc")
+ Opts.setFormat(DiagnosticOptions::MSVC);
+ else if (Format == "msvc-fallback") {
+ Opts.setFormat(DiagnosticOptions::MSVC);
+ Opts.CLFallbackMode = true;
+ } else if (Format == "vi")
+ Opts.setFormat(DiagnosticOptions::Vi);
+ else {
+ Success = false;
+ if (Diags)
+ Diags->Report(diag::err_drv_invalid_value)
+ << Args.getLastArg(OPT_fdiagnostics_format)->getAsString(Args)
+ << Format;
+ }
+
+ Opts.ShowSourceRanges = Args.hasArg(OPT_fdiagnostics_print_source_range_info);
+ Opts.ShowParseableFixits = Args.hasArg(OPT_fdiagnostics_parseable_fixits);
+ Opts.ShowPresumedLoc = !Args.hasArg(OPT_fno_diagnostics_use_presumed_location);
+ Opts.VerifyDiagnostics = Args.hasArg(OPT_verify);
+ DiagnosticLevelMask DiagMask = DiagnosticLevelMask::None;
+ Success &= parseDiagnosticLevelMask("-verify-ignore-unexpected=",
+ Args.getAllArgValues(OPT_verify_ignore_unexpected_EQ),
+ Diags, DiagMask);
+ if (Args.hasArg(OPT_verify_ignore_unexpected))
+ DiagMask = DiagnosticLevelMask::All;
+ Opts.setVerifyIgnoreUnexpected(DiagMask);
+ Opts.ElideType = !Args.hasArg(OPT_fno_elide_type);
+ Opts.ShowTemplateTree = Args.hasArg(OPT_fdiagnostics_show_template_tree);
+ Opts.ErrorLimit = getLastArgIntValue(Args, OPT_ferror_limit, 0, Diags);
+ Opts.MacroBacktraceLimit =
+ getLastArgIntValue(Args, OPT_fmacro_backtrace_limit,
+ DiagnosticOptions::DefaultMacroBacktraceLimit, Diags);
+ Opts.TemplateBacktraceLimit = getLastArgIntValue(
+ Args, OPT_ftemplate_backtrace_limit,
+ DiagnosticOptions::DefaultTemplateBacktraceLimit, Diags);
+ Opts.ConstexprBacktraceLimit = getLastArgIntValue(
+ Args, OPT_fconstexpr_backtrace_limit,
+ DiagnosticOptions::DefaultConstexprBacktraceLimit, Diags);
+ Opts.SpellCheckingLimit = getLastArgIntValue(
+ Args, OPT_fspell_checking_limit,
+ DiagnosticOptions::DefaultSpellCheckingLimit, Diags);
+ Opts.TabStop = getLastArgIntValue(Args, OPT_ftabstop,
+ DiagnosticOptions::DefaultTabStop, Diags);
+ if (Opts.TabStop == 0 || Opts.TabStop > DiagnosticOptions::MaxTabStop) {
+ Opts.TabStop = DiagnosticOptions::DefaultTabStop;
+ if (Diags)
+ Diags->Report(diag::warn_ignoring_ftabstop_value)
+ << Opts.TabStop << DiagnosticOptions::DefaultTabStop;
+ }
+ Opts.MessageLength = getLastArgIntValue(Args, OPT_fmessage_length, 0, Diags);
+ addDiagnosticArgs(Args, OPT_W_Group, OPT_W_value_Group, Opts.Warnings);
+ addDiagnosticArgs(Args, OPT_R_Group, OPT_R_value_Group, Opts.Remarks);
+
+ return Success;
+}
+
+static void ParseFileSystemArgs(FileSystemOptions &Opts, ArgList &Args) {
+ Opts.WorkingDir = Args.getLastArgValue(OPT_working_directory);
+}
+
+/// Parse the argument to the -ftest-module-file-extension
+/// command-line argument.
+///
+/// \returns true on error, false on success.
+static bool parseTestModuleFileExtensionArg(StringRef Arg,
+ std::string &BlockName,
+ unsigned &MajorVersion,
+ unsigned &MinorVersion,
+ bool &Hashed,
+ std::string &UserInfo) {
+ SmallVector<StringRef, 5> Args;
+ Arg.split(Args, ':', 5);
+ if (Args.size() < 5)
+ return true;
+
+ BlockName = Args[0];
+ if (Args[1].getAsInteger(10, MajorVersion)) return true;
+ if (Args[2].getAsInteger(10, MinorVersion)) return true;
+ if (Args[3].getAsInteger(2, Hashed)) return true;
+ if (Args.size() > 4)
+ UserInfo = Args[4];
+ return false;
+}
+
+static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
+ DiagnosticsEngine &Diags) {
+ using namespace options;
+ Opts.ProgramAction = frontend::ParseSyntaxOnly;
+ if (const Arg *A = Args.getLastArg(OPT_Action_Group)) {
+ switch (A->getOption().getID()) {
+ default:
+ llvm_unreachable("Invalid option in group!");
+ case OPT_ast_list:
+ Opts.ProgramAction = frontend::ASTDeclList; break;
+ case OPT_ast_dump:
+ case OPT_ast_dump_lookups:
+ Opts.ProgramAction = frontend::ASTDump; break;
+ case OPT_ast_print:
+ Opts.ProgramAction = frontend::ASTPrint; break;
+ case OPT_ast_view:
+ Opts.ProgramAction = frontend::ASTView; break;
+ case OPT_dump_raw_tokens:
+ Opts.ProgramAction = frontend::DumpRawTokens; break;
+ case OPT_dump_tokens:
+ Opts.ProgramAction = frontend::DumpTokens; break;
+ case OPT_S:
+ Opts.ProgramAction = frontend::EmitAssembly; break;
+ case OPT_emit_llvm_bc:
+ Opts.ProgramAction = frontend::EmitBC; break;
+ case OPT_emit_html:
+ Opts.ProgramAction = frontend::EmitHTML; break;
+ case OPT_emit_llvm:
+ Opts.ProgramAction = frontend::EmitLLVM; break;
+ case OPT_emit_llvm_only:
+ Opts.ProgramAction = frontend::EmitLLVMOnly; break;
+ case OPT_emit_codegen_only:
+ Opts.ProgramAction = frontend::EmitCodeGenOnly; break;
+ case OPT_emit_obj:
+ Opts.ProgramAction = frontend::EmitObj; break;
+ case OPT_fixit_EQ:
+ Opts.FixItSuffix = A->getValue();
+ // fall-through!
+ case OPT_fixit:
+ Opts.ProgramAction = frontend::FixIt; break;
+ case OPT_emit_module:
+ Opts.ProgramAction = frontend::GenerateModule; break;
+ case OPT_emit_pch:
+ Opts.ProgramAction = frontend::GeneratePCH; break;
+ case OPT_emit_pth:
+ Opts.ProgramAction = frontend::GeneratePTH; break;
+ case OPT_init_only:
+ Opts.ProgramAction = frontend::InitOnly; break;
+ case OPT_fsyntax_only:
+ Opts.ProgramAction = frontend::ParseSyntaxOnly; break;
+ case OPT_module_file_info:
+ Opts.ProgramAction = frontend::ModuleFileInfo; break;
+ case OPT_verify_pch:
+ Opts.ProgramAction = frontend::VerifyPCH; break;
+ case OPT_print_decl_contexts:
+ Opts.ProgramAction = frontend::PrintDeclContext; break;
+ case OPT_print_preamble:
+ Opts.ProgramAction = frontend::PrintPreamble; break;
+ case OPT_E:
+ Opts.ProgramAction = frontend::PrintPreprocessedInput; break;
+ case OPT_rewrite_macros:
+ Opts.ProgramAction = frontend::RewriteMacros; break;
+ case OPT_rewrite_objc:
+ Opts.ProgramAction = frontend::RewriteObjC; break;
+ case OPT_rewrite_test:
+ Opts.ProgramAction = frontend::RewriteTest; break;
+ case OPT_analyze:
+ Opts.ProgramAction = frontend::RunAnalysis; break;
+ case OPT_migrate:
+ Opts.ProgramAction = frontend::MigrateSource; break;
+ case OPT_Eonly:
+ Opts.ProgramAction = frontend::RunPreprocessorOnly; break;
+ }
+ }
+
+ if (const Arg* A = Args.getLastArg(OPT_plugin)) {
+ Opts.Plugins.emplace_back(A->getValue(0));
+ Opts.ProgramAction = frontend::PluginAction;
+ Opts.ActionName = A->getValue();
+
+ for (const Arg *AA : Args.filtered(OPT_plugin_arg))
+ if (AA->getValue(0) == Opts.ActionName)
+ Opts.PluginArgs.emplace_back(AA->getValue(1));
+ }
+
+ Opts.AddPluginActions = Args.getAllArgValues(OPT_add_plugin);
+ Opts.AddPluginArgs.resize(Opts.AddPluginActions.size());
+ for (int i = 0, e = Opts.AddPluginActions.size(); i != e; ++i)
+ for (const Arg *A : Args.filtered(OPT_plugin_arg))
+ if (A->getValue(0) == Opts.AddPluginActions[i])
+ Opts.AddPluginArgs[i].emplace_back(A->getValue(1));
+
+ for (const std::string &Arg :
+ Args.getAllArgValues(OPT_ftest_module_file_extension_EQ)) {
+ std::string BlockName;
+ unsigned MajorVersion;
+ unsigned MinorVersion;
+ bool Hashed;
+ std::string UserInfo;
+ if (parseTestModuleFileExtensionArg(Arg, BlockName, MajorVersion,
+ MinorVersion, Hashed, UserInfo)) {
+ Diags.Report(diag::err_test_module_file_extension_format) << Arg;
+
+ continue;
+ }
+
+ // Add the testing module file extension.
+ Opts.ModuleFileExtensions.push_back(
+ new TestModuleFileExtension(BlockName, MajorVersion, MinorVersion,
+ Hashed, UserInfo));
+ }
+
+ if (const Arg *A = Args.getLastArg(OPT_code_completion_at)) {
+ Opts.CodeCompletionAt =
+ ParsedSourceLocation::FromString(A->getValue());
+ if (Opts.CodeCompletionAt.FileName.empty())
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue();
+ }
+ Opts.DisableFree = Args.hasArg(OPT_disable_free);
+
+ Opts.OutputFile = Args.getLastArgValue(OPT_o);
+ Opts.Plugins = Args.getAllArgValues(OPT_load);
+ Opts.RelocatablePCH = Args.hasArg(OPT_relocatable_pch);
+ Opts.ShowHelp = Args.hasArg(OPT_help);
+ Opts.ShowStats = Args.hasArg(OPT_print_stats);
+ Opts.ShowTimers = Args.hasArg(OPT_ftime_report);
+ Opts.ShowVersion = Args.hasArg(OPT_version);
+ Opts.ASTMergeFiles = Args.getAllArgValues(OPT_ast_merge);
+ Opts.LLVMArgs = Args.getAllArgValues(OPT_mllvm);
+ Opts.FixWhatYouCan = Args.hasArg(OPT_fix_what_you_can);
+ Opts.FixOnlyWarnings = Args.hasArg(OPT_fix_only_warnings);
+ Opts.FixAndRecompile = Args.hasArg(OPT_fixit_recompile);
+ Opts.FixToTemporaries = Args.hasArg(OPT_fixit_to_temp);
+ Opts.ASTDumpDecls = Args.hasArg(OPT_ast_dump);
+ Opts.ASTDumpFilter = Args.getLastArgValue(OPT_ast_dump_filter);
+ Opts.ASTDumpLookups = Args.hasArg(OPT_ast_dump_lookups);
+ Opts.UseGlobalModuleIndex = !Args.hasArg(OPT_fno_modules_global_index);
+ Opts.GenerateGlobalModuleIndex = Opts.UseGlobalModuleIndex;
+ Opts.ModuleMapFiles = Args.getAllArgValues(OPT_fmodule_map_file);
+ Opts.ModuleFiles = Args.getAllArgValues(OPT_fmodule_file);
+ Opts.ModulesEmbedFiles = Args.getAllArgValues(OPT_fmodules_embed_file_EQ);
+ Opts.ModulesEmbedAllFiles = Args.hasArg(OPT_fmodules_embed_all_files);
+
+ Opts.CodeCompleteOpts.IncludeMacros
+ = Args.hasArg(OPT_code_completion_macros);
+ Opts.CodeCompleteOpts.IncludeCodePatterns
+ = Args.hasArg(OPT_code_completion_patterns);
+ Opts.CodeCompleteOpts.IncludeGlobals
+ = !Args.hasArg(OPT_no_code_completion_globals);
+ Opts.CodeCompleteOpts.IncludeBriefComments
+ = Args.hasArg(OPT_code_completion_brief_comments);
+
+ Opts.OverrideRecordLayoutsFile
+ = Args.getLastArgValue(OPT_foverride_record_layout_EQ);
+ Opts.AuxTriple =
+ llvm::Triple::normalize(Args.getLastArgValue(OPT_aux_triple));
+
+ if (const Arg *A = Args.getLastArg(OPT_arcmt_check,
+ OPT_arcmt_modify,
+ OPT_arcmt_migrate)) {
+ switch (A->getOption().getID()) {
+ default:
+ llvm_unreachable("missed a case");
+ case OPT_arcmt_check:
+ Opts.ARCMTAction = FrontendOptions::ARCMT_Check;
+ break;
+ case OPT_arcmt_modify:
+ Opts.ARCMTAction = FrontendOptions::ARCMT_Modify;
+ break;
+ case OPT_arcmt_migrate:
+ Opts.ARCMTAction = FrontendOptions::ARCMT_Migrate;
+ break;
+ }
+ }
+ Opts.MTMigrateDir = Args.getLastArgValue(OPT_mt_migrate_directory);
+ Opts.ARCMTMigrateReportOut
+ = Args.getLastArgValue(OPT_arcmt_migrate_report_output);
+ Opts.ARCMTMigrateEmitARCErrors
+ = Args.hasArg(OPT_arcmt_migrate_emit_arc_errors);
+
+ if (Args.hasArg(OPT_objcmt_migrate_literals))
+ Opts.ObjCMTAction |= FrontendOptions::ObjCMT_Literals;
+ if (Args.hasArg(OPT_objcmt_migrate_subscripting))
+ Opts.ObjCMTAction |= FrontendOptions::ObjCMT_Subscripting;
+ if (Args.hasArg(OPT_objcmt_migrate_property_dot_syntax))
+ Opts.ObjCMTAction |= FrontendOptions::ObjCMT_PropertyDotSyntax;
+ if (Args.hasArg(OPT_objcmt_migrate_property))
+ Opts.ObjCMTAction |= FrontendOptions::ObjCMT_Property;
+ if (Args.hasArg(OPT_objcmt_migrate_readonly_property))
+ Opts.ObjCMTAction |= FrontendOptions::ObjCMT_ReadonlyProperty;
+ if (Args.hasArg(OPT_objcmt_migrate_readwrite_property))
+ Opts.ObjCMTAction |= FrontendOptions::ObjCMT_ReadwriteProperty;
+ if (Args.hasArg(OPT_objcmt_migrate_annotation))
+ Opts.ObjCMTAction |= FrontendOptions::ObjCMT_Annotation;
+ if (Args.hasArg(OPT_objcmt_returns_innerpointer_property))
+ Opts.ObjCMTAction |= FrontendOptions::ObjCMT_ReturnsInnerPointerProperty;
+ if (Args.hasArg(OPT_objcmt_migrate_instancetype))
+ Opts.ObjCMTAction |= FrontendOptions::ObjCMT_Instancetype;
+ if (Args.hasArg(OPT_objcmt_migrate_nsmacros))
+ Opts.ObjCMTAction |= FrontendOptions::ObjCMT_NsMacros;
+ if (Args.hasArg(OPT_objcmt_migrate_protocol_conformance))
+ Opts.ObjCMTAction |= FrontendOptions::ObjCMT_ProtocolConformance;
+ if (Args.hasArg(OPT_objcmt_atomic_property))
+ Opts.ObjCMTAction |= FrontendOptions::ObjCMT_AtomicProperty;
+ if (Args.hasArg(OPT_objcmt_ns_nonatomic_iosonly))
+ Opts.ObjCMTAction |= FrontendOptions::ObjCMT_NsAtomicIOSOnlyProperty;
+ if (Args.hasArg(OPT_objcmt_migrate_designated_init))
+ Opts.ObjCMTAction |= FrontendOptions::ObjCMT_DesignatedInitializer;
+ if (Args.hasArg(OPT_objcmt_migrate_all))
+ Opts.ObjCMTAction |= FrontendOptions::ObjCMT_MigrateDecls;
+
+ Opts.ObjCMTWhiteListPath = Args.getLastArgValue(OPT_objcmt_whitelist_dir_path);
+
+ if (Opts.ARCMTAction != FrontendOptions::ARCMT_None &&
+ Opts.ObjCMTAction != FrontendOptions::ObjCMT_None) {
+ Diags.Report(diag::err_drv_argument_not_allowed_with)
+ << "ARC migration" << "ObjC migration";
+ }
+
+ InputKind DashX = IK_None;
+ if (const Arg *A = Args.getLastArg(OPT_x)) {
+ DashX = llvm::StringSwitch<InputKind>(A->getValue())
+ .Case("c", IK_C)
+ .Case("cl", IK_OpenCL)
+ .Case("cuda", IK_CUDA)
+ .Case("c++", IK_CXX)
+ .Case("objective-c", IK_ObjC)
+ .Case("objective-c++", IK_ObjCXX)
+ .Case("cpp-output", IK_PreprocessedC)
+ .Case("assembler-with-cpp", IK_Asm)
+ .Case("c++-cpp-output", IK_PreprocessedCXX)
+ .Case("cuda-cpp-output", IK_PreprocessedCuda)
+ .Case("objective-c-cpp-output", IK_PreprocessedObjC)
+ .Case("objc-cpp-output", IK_PreprocessedObjC)
+ .Case("objective-c++-cpp-output", IK_PreprocessedObjCXX)
+ .Case("objc++-cpp-output", IK_PreprocessedObjCXX)
+ .Case("c-header", IK_C)
+ .Case("cl-header", IK_OpenCL)
+ .Case("objective-c-header", IK_ObjC)
+ .Case("c++-header", IK_CXX)
+ .Case("objective-c++-header", IK_ObjCXX)
+ .Cases("ast", "pcm", IK_AST)
+ .Case("ir", IK_LLVM_IR)
+ .Default(IK_None);
+ if (DashX == IK_None)
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue();
+ }
+
+ // '-' is the default input if none is given.
+ std::vector<std::string> Inputs = Args.getAllArgValues(OPT_INPUT);
+ Opts.Inputs.clear();
+ if (Inputs.empty())
+ Inputs.push_back("-");
+ for (unsigned i = 0, e = Inputs.size(); i != e; ++i) {
+ InputKind IK = DashX;
+ if (IK == IK_None) {
+ IK = FrontendOptions::getInputKindForExtension(
+ StringRef(Inputs[i]).rsplit('.').second);
+ // FIXME: Remove this hack.
+ if (i == 0)
+ DashX = IK;
+ }
+ Opts.Inputs.emplace_back(std::move(Inputs[i]), IK);
+ }
+
+ return DashX;
+}
+
+std::string CompilerInvocation::GetResourcesPath(const char *Argv0,
+ void *MainAddr) {
+ std::string ClangExecutable =
+ llvm::sys::fs::getMainExecutable(Argv0, MainAddr);
+ StringRef Dir = llvm::sys::path::parent_path(ClangExecutable);
+
+ // Compute the path to the resource directory.
+ StringRef ClangResourceDir(CLANG_RESOURCE_DIR);
+ SmallString<128> P(Dir);
+ if (ClangResourceDir != "")
+ llvm::sys::path::append(P, ClangResourceDir);
+ else
+ llvm::sys::path::append(P, "..", Twine("lib") + CLANG_LIBDIR_SUFFIX,
+ "clang", CLANG_VERSION_STRING);
+
+ return P.str();
+}
+
+static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args) {
+ using namespace options;
+ Opts.Sysroot = Args.getLastArgValue(OPT_isysroot, "/");
+ Opts.Verbose = Args.hasArg(OPT_v);
+ Opts.UseBuiltinIncludes = !Args.hasArg(OPT_nobuiltininc);
+ Opts.UseStandardSystemIncludes = !Args.hasArg(OPT_nostdsysteminc);
+ Opts.UseStandardCXXIncludes = !Args.hasArg(OPT_nostdincxx);
+ if (const Arg *A = Args.getLastArg(OPT_stdlib_EQ))
+ Opts.UseLibcxx = (strcmp(A->getValue(), "libc++") == 0);
+ Opts.ResourceDir = Args.getLastArgValue(OPT_resource_dir);
+ Opts.ModuleCachePath = Args.getLastArgValue(OPT_fmodules_cache_path);
+ Opts.ModuleUserBuildPath = Args.getLastArgValue(OPT_fmodules_user_build_path);
+ Opts.DisableModuleHash = Args.hasArg(OPT_fdisable_module_hash);
+ Opts.ImplicitModuleMaps = Args.hasArg(OPT_fimplicit_module_maps);
+ Opts.ModuleMapFileHomeIsCwd = Args.hasArg(OPT_fmodule_map_file_home_is_cwd);
+ Opts.ModuleCachePruneInterval =
+ getLastArgIntValue(Args, OPT_fmodules_prune_interval, 7 * 24 * 60 * 60);
+ Opts.ModuleCachePruneAfter =
+ getLastArgIntValue(Args, OPT_fmodules_prune_after, 31 * 24 * 60 * 60);
+ Opts.ModulesValidateOncePerBuildSession =
+ Args.hasArg(OPT_fmodules_validate_once_per_build_session);
+ Opts.BuildSessionTimestamp =
+ getLastArgUInt64Value(Args, OPT_fbuild_session_timestamp, 0);
+ Opts.ModulesValidateSystemHeaders =
+ Args.hasArg(OPT_fmodules_validate_system_headers);
+ if (const Arg *A = Args.getLastArg(OPT_fmodule_format_EQ))
+ Opts.ModuleFormat = A->getValue();
+
+ for (const Arg *A : Args.filtered(OPT_fmodules_ignore_macro)) {
+ StringRef MacroDef = A->getValue();
+ Opts.ModulesIgnoreMacros.insert(MacroDef.split('=').first);
+ }
+
+ // Add -I..., -F..., and -index-header-map options in order.
+ bool IsIndexHeaderMap = false;
+ for (const Arg *A : Args.filtered(OPT_I, OPT_F, OPT_index_header_map)) {
+ if (A->getOption().matches(OPT_index_header_map)) {
+ // -index-header-map applies to the next -I or -F.
+ IsIndexHeaderMap = true;
+ continue;
+ }
+
+ frontend::IncludeDirGroup Group =
+ IsIndexHeaderMap ? frontend::IndexHeaderMap : frontend::Angled;
+
+ Opts.AddPath(A->getValue(), Group,
+ /*IsFramework=*/A->getOption().matches(OPT_F), true);
+ IsIndexHeaderMap = false;
+ }
+
+ // Add -iprefix/-iwithprefix/-iwithprefixbefore options.
+ StringRef Prefix = ""; // FIXME: This isn't the correct default prefix.
+ for (const Arg *A :
+ Args.filtered(OPT_iprefix, OPT_iwithprefix, OPT_iwithprefixbefore)) {
+ if (A->getOption().matches(OPT_iprefix))
+ Prefix = A->getValue();
+ else if (A->getOption().matches(OPT_iwithprefix))
+ Opts.AddPath(Prefix.str() + A->getValue(), frontend::After, false, true);
+ else
+ Opts.AddPath(Prefix.str() + A->getValue(), frontend::Angled, false, true);
+ }
+
+ for (const Arg *A : Args.filtered(OPT_idirafter))
+ Opts.AddPath(A->getValue(), frontend::After, false, true);
+ for (const Arg *A : Args.filtered(OPT_iquote))
+ Opts.AddPath(A->getValue(), frontend::Quoted, false, true);
+ for (const Arg *A : Args.filtered(OPT_isystem, OPT_iwithsysroot))
+ Opts.AddPath(A->getValue(), frontend::System, false,
+ !A->getOption().matches(OPT_iwithsysroot));
+ for (const Arg *A : Args.filtered(OPT_iframework))
+ Opts.AddPath(A->getValue(), frontend::System, true, true);
+
+ // Add the paths for the various language specific isystem flags.
+ for (const Arg *A : Args.filtered(OPT_c_isystem))
+ Opts.AddPath(A->getValue(), frontend::CSystem, false, true);
+ for (const Arg *A : Args.filtered(OPT_cxx_isystem))
+ Opts.AddPath(A->getValue(), frontend::CXXSystem, false, true);
+ for (const Arg *A : Args.filtered(OPT_objc_isystem))
+ Opts.AddPath(A->getValue(), frontend::ObjCSystem, false,true);
+ for (const Arg *A : Args.filtered(OPT_objcxx_isystem))
+ Opts.AddPath(A->getValue(), frontend::ObjCXXSystem, false, true);
+
+ // Add the internal paths from a driver that detects standard include paths.
+ for (const Arg *A :
+ Args.filtered(OPT_internal_isystem, OPT_internal_externc_isystem)) {
+ frontend::IncludeDirGroup Group = frontend::System;
+ if (A->getOption().matches(OPT_internal_externc_isystem))
+ Group = frontend::ExternCSystem;
+ Opts.AddPath(A->getValue(), Group, false, true);
+ }
+
+ // Add the path prefixes which are implicitly treated as being system headers.
+ for (const Arg *A :
+ Args.filtered(OPT_system_header_prefix, OPT_no_system_header_prefix))
+ Opts.AddSystemHeaderPrefix(
+ A->getValue(), A->getOption().matches(OPT_system_header_prefix));
+
+ for (const Arg *A : Args.filtered(OPT_ivfsoverlay))
+ Opts.AddVFSOverlayFile(A->getValue());
+}
+
+void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
+ LangStandard::Kind LangStd) {
+ // Set some properties which depend solely on the input kind; it would be nice
+ // to move these to the language standard, and have the driver resolve the
+ // input kind + language standard.
+ if (IK == IK_Asm) {
+ Opts.AsmPreprocessor = 1;
+ } else if (IK == IK_ObjC ||
+ IK == IK_ObjCXX ||
+ IK == IK_PreprocessedObjC ||
+ IK == IK_PreprocessedObjCXX) {
+ Opts.ObjC1 = Opts.ObjC2 = 1;
+ }
+
+ if (LangStd == LangStandard::lang_unspecified) {
+ // Based on the base language, pick one.
+ switch (IK) {
+ case IK_None:
+ case IK_AST:
+ case IK_LLVM_IR:
+ llvm_unreachable("Invalid input kind!");
+ case IK_OpenCL:
+ LangStd = LangStandard::lang_opencl;
+ break;
+ case IK_CUDA:
+ case IK_PreprocessedCuda:
+ LangStd = LangStandard::lang_cuda;
+ break;
+ case IK_Asm:
+ case IK_C:
+ case IK_PreprocessedC:
+ case IK_ObjC:
+ case IK_PreprocessedObjC:
+ LangStd = LangStandard::lang_gnu11;
+ break;
+ case IK_CXX:
+ case IK_PreprocessedCXX:
+ case IK_ObjCXX:
+ case IK_PreprocessedObjCXX:
+ LangStd = LangStandard::lang_gnucxx98;
+ break;
+ }
+ }
+
+ const LangStandard &Std = LangStandard::getLangStandardForKind(LangStd);
+ Opts.LineComment = Std.hasLineComments();
+ Opts.C99 = Std.isC99();
+ Opts.C11 = Std.isC11();
+ Opts.CPlusPlus = Std.isCPlusPlus();
+ Opts.CPlusPlus11 = Std.isCPlusPlus11();
+ Opts.CPlusPlus14 = Std.isCPlusPlus14();
+ Opts.CPlusPlus1z = Std.isCPlusPlus1z();
+ Opts.Digraphs = Std.hasDigraphs();
+ Opts.GNUMode = Std.isGNUMode();
+ Opts.GNUInline = Std.isC89();
+ Opts.HexFloats = Std.hasHexFloats();
+ Opts.ImplicitInt = Std.hasImplicitInt();
+
+ // Set OpenCL Version.
+ Opts.OpenCL = LangStd == LangStandard::lang_opencl || IK == IK_OpenCL;
+ if (LangStd == LangStandard::lang_opencl)
+ Opts.OpenCLVersion = 100;
+ else if (LangStd == LangStandard::lang_opencl11)
+ Opts.OpenCLVersion = 110;
+ else if (LangStd == LangStandard::lang_opencl12)
+ Opts.OpenCLVersion = 120;
+ else if (LangStd == LangStandard::lang_opencl20)
+ Opts.OpenCLVersion = 200;
+
+ // OpenCL has some additional defaults.
+ if (Opts.OpenCL) {
+ Opts.AltiVec = 0;
+ Opts.ZVector = 0;
+ Opts.CXXOperatorNames = 1;
+ Opts.LaxVectorConversions = 0;
+ Opts.DefaultFPContract = 1;
+ Opts.NativeHalfType = 1;
+ }
+
+ Opts.CUDA = IK == IK_CUDA || IK == IK_PreprocessedCuda ||
+ LangStd == LangStandard::lang_cuda;
+
+ // OpenCL and C++ both have bool, true, false keywords.
+ Opts.Bool = Opts.OpenCL || Opts.CPlusPlus;
+
+ // OpenCL has half keyword
+ Opts.Half = Opts.OpenCL;
+
+ // C++ has wchar_t keyword.
+ Opts.WChar = Opts.CPlusPlus;
+
+ Opts.GNUKeywords = Opts.GNUMode;
+ Opts.CXXOperatorNames = Opts.CPlusPlus;
+
+ Opts.DollarIdents = !Opts.AsmPreprocessor;
+}
+
+/// Attempt to parse a visibility value out of the given argument.
+static Visibility parseVisibility(Arg *arg, ArgList &args,
+ DiagnosticsEngine &diags) {
+ StringRef value = arg->getValue();
+ if (value == "default") {
+ return DefaultVisibility;
+ } else if (value == "hidden" || value == "internal") {
+ return HiddenVisibility;
+ } else if (value == "protected") {
+ // FIXME: diagnose if target does not support protected visibility
+ return ProtectedVisibility;
+ }
+
+ diags.Report(diag::err_drv_invalid_value)
+ << arg->getAsString(args) << value;
+ return DefaultVisibility;
+}
+
+static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
+ DiagnosticsEngine &Diags) {
+ // FIXME: Cleanup per-file based stuff.
+ LangStandard::Kind LangStd = LangStandard::lang_unspecified;
+ if (const Arg *A = Args.getLastArg(OPT_std_EQ)) {
+ LangStd = llvm::StringSwitch<LangStandard::Kind>(A->getValue())
+#define LANGSTANDARD(id, name, desc, features) \
+ .Case(name, LangStandard::lang_##id)
+#include "clang/Frontend/LangStandards.def"
+ .Default(LangStandard::lang_unspecified);
+ if (LangStd == LangStandard::lang_unspecified)
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue();
+ else {
+ // Valid standard, check to make sure language and standard are
+ // compatible.
+ const LangStandard &Std = LangStandard::getLangStandardForKind(LangStd);
+ switch (IK) {
+ case IK_C:
+ case IK_ObjC:
+ case IK_PreprocessedC:
+ case IK_PreprocessedObjC:
+ if (!(Std.isC89() || Std.isC99()))
+ Diags.Report(diag::err_drv_argument_not_allowed_with)
+ << A->getAsString(Args) << "C/ObjC";
+ break;
+ case IK_CXX:
+ case IK_ObjCXX:
+ case IK_PreprocessedCXX:
+ case IK_PreprocessedObjCXX:
+ if (!Std.isCPlusPlus())
+ Diags.Report(diag::err_drv_argument_not_allowed_with)
+ << A->getAsString(Args) << "C++/ObjC++";
+ break;
+ case IK_OpenCL:
+ if (!Std.isC99())
+ Diags.Report(diag::err_drv_argument_not_allowed_with)
+ << A->getAsString(Args) << "OpenCL";
+ break;
+ case IK_CUDA:
+ case IK_PreprocessedCuda:
+ if (!Std.isCPlusPlus())
+ Diags.Report(diag::err_drv_argument_not_allowed_with)
+ << A->getAsString(Args) << "CUDA";
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ // -cl-std only applies for OpenCL language standards.
+ // Override the -std option in this case.
+ if (const Arg *A = Args.getLastArg(OPT_cl_std_EQ)) {
+ LangStandard::Kind OpenCLLangStd
+ = llvm::StringSwitch<LangStandard::Kind>(A->getValue())
+ .Case("CL", LangStandard::lang_opencl)
+ .Case("CL1.1", LangStandard::lang_opencl11)
+ .Case("CL1.2", LangStandard::lang_opencl12)
+ .Case("CL2.0", LangStandard::lang_opencl20)
+ .Default(LangStandard::lang_unspecified);
+
+ if (OpenCLLangStd == LangStandard::lang_unspecified) {
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue();
+ }
+ else
+ LangStd = OpenCLLangStd;
+ }
+
+ CompilerInvocation::setLangDefaults(Opts, IK, LangStd);
+
+ // We abuse '-f[no-]gnu-keywords' to force overriding all GNU-extension
+ // keywords. This behavior is provided by GCC's poorly named '-fasm' flag,
+ // while a subset (the non-C++ GNU keywords) is provided by GCC's
+ // '-fgnu-keywords'. Clang conflates the two for simplicity under the single
+ // name, as it doesn't seem a useful distinction.
+ Opts.GNUKeywords = Args.hasFlag(OPT_fgnu_keywords, OPT_fno_gnu_keywords,
+ Opts.GNUKeywords);
+
+ if (Args.hasArg(OPT_fno_operator_names))
+ Opts.CXXOperatorNames = 0;
+
+ if (Args.hasArg(OPT_fcuda_is_device))
+ Opts.CUDAIsDevice = 1;
+
+ if (Args.hasArg(OPT_fcuda_allow_host_calls_from_host_device))
+ Opts.CUDAAllowHostCallsFromHostDevice = 1;
+
+ if (Args.hasArg(OPT_fcuda_disable_target_call_checks))
+ Opts.CUDADisableTargetCallChecks = 1;
+
+ if (Args.hasArg(OPT_fcuda_target_overloads))
+ Opts.CUDATargetOverloads = 1;
+
+ if (Opts.ObjC1) {
+ if (Arg *arg = Args.getLastArg(OPT_fobjc_runtime_EQ)) {
+ StringRef value = arg->getValue();
+ if (Opts.ObjCRuntime.tryParse(value))
+ Diags.Report(diag::err_drv_unknown_objc_runtime) << value;
+ }
+
+ if (Args.hasArg(OPT_fobjc_gc_only))
+ Opts.setGC(LangOptions::GCOnly);
+ else if (Args.hasArg(OPT_fobjc_gc))
+ Opts.setGC(LangOptions::HybridGC);
+ else if (Args.hasArg(OPT_fobjc_arc)) {
+ Opts.ObjCAutoRefCount = 1;
+ if (!Opts.ObjCRuntime.allowsARC())
+ Diags.Report(diag::err_arc_unsupported_on_runtime);
+ }
+
+ // ObjCWeakRuntime tracks whether the runtime supports __weak, not
+ // whether the feature is actually enabled. This is predominantly
+ // determined by -fobjc-runtime, but we allow it to be overridden
+ // from the command line for testing purposes.
+ if (Args.hasArg(OPT_fobjc_runtime_has_weak))
+ Opts.ObjCWeakRuntime = 1;
+ else
+ Opts.ObjCWeakRuntime = Opts.ObjCRuntime.allowsWeak();
+
+ // ObjCWeak determines whether __weak is actually enabled.
+ // Note that we allow -fno-objc-weak to disable this even in ARC mode.
+ if (auto weakArg = Args.getLastArg(OPT_fobjc_weak, OPT_fno_objc_weak)) {
+ if (!weakArg->getOption().matches(OPT_fobjc_weak)) {
+ assert(!Opts.ObjCWeak);
+ } else if (Opts.getGC() != LangOptions::NonGC) {
+ Diags.Report(diag::err_objc_weak_with_gc);
+ } else if (!Opts.ObjCWeakRuntime) {
+ Diags.Report(diag::err_objc_weak_unsupported);
+ } else {
+ Opts.ObjCWeak = 1;
+ }
+ } else if (Opts.ObjCAutoRefCount) {
+ Opts.ObjCWeak = Opts.ObjCWeakRuntime;
+ }
+
+ if (Args.hasArg(OPT_fno_objc_infer_related_result_type))
+ Opts.ObjCInferRelatedResultType = 0;
+
+ if (Args.hasArg(OPT_fobjc_subscripting_legacy_runtime))
+ Opts.ObjCSubscriptingLegacyRuntime =
+ (Opts.ObjCRuntime.getKind() == ObjCRuntime::FragileMacOSX);
+ }
+
+ if (Args.hasArg(OPT_fgnu89_inline)) {
+ if (Opts.CPlusPlus)
+ Diags.Report(diag::err_drv_argument_not_allowed_with) << "-fgnu89-inline"
+ << "C++/ObjC++";
+ else
+ Opts.GNUInline = 1;
+ }
+
+ if (Args.hasArg(OPT_fapple_kext)) {
+ if (!Opts.CPlusPlus)
+ Diags.Report(diag::warn_c_kext);
+ else
+ Opts.AppleKext = 1;
+ }
+
+ if (Args.hasArg(OPT_print_ivar_layout))
+ Opts.ObjCGCBitmapPrint = 1;
+ if (Args.hasArg(OPT_fno_constant_cfstrings))
+ Opts.NoConstantCFStrings = 1;
+
+ if (Args.hasArg(OPT_faltivec))
+ Opts.AltiVec = 1;
+
+ if (Args.hasArg(OPT_fzvector))
+ Opts.ZVector = 1;
+
+ if (Args.hasArg(OPT_pthread))
+ Opts.POSIXThreads = 1;
+
+ // The value-visibility mode defaults to "default".
+ if (Arg *visOpt = Args.getLastArg(OPT_fvisibility)) {
+ Opts.setValueVisibilityMode(parseVisibility(visOpt, Args, Diags));
+ } else {
+ Opts.setValueVisibilityMode(DefaultVisibility);
+ }
+
+ // The type-visibility mode defaults to the value-visibility mode.
+ if (Arg *typeVisOpt = Args.getLastArg(OPT_ftype_visibility)) {
+ Opts.setTypeVisibilityMode(parseVisibility(typeVisOpt, Args, Diags));
+ } else {
+ Opts.setTypeVisibilityMode(Opts.getValueVisibilityMode());
+ }
+
+ if (Args.hasArg(OPT_fvisibility_inlines_hidden))
+ Opts.InlineVisibilityHidden = 1;
+
+ if (Args.hasArg(OPT_ftrapv)) {
+ Opts.setSignedOverflowBehavior(LangOptions::SOB_Trapping);
+ // Set the handler, if one is specified.
+ Opts.OverflowHandler =
+ Args.getLastArgValue(OPT_ftrapv_handler);
+ }
+ else if (Args.hasArg(OPT_fwrapv))
+ Opts.setSignedOverflowBehavior(LangOptions::SOB_Defined);
+
+ Opts.MSVCCompat = Args.hasArg(OPT_fms_compatibility);
+ Opts.MicrosoftExt = Opts.MSVCCompat || Args.hasArg(OPT_fms_extensions);
+ Opts.AsmBlocks = Args.hasArg(OPT_fasm_blocks) || Opts.MicrosoftExt;
+ Opts.MSCompatibilityVersion = 0;
+ if (const Arg *A = Args.getLastArg(OPT_fms_compatibility_version)) {
+ VersionTuple VT;
+ if (VT.tryParse(A->getValue()))
+ Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args)
+ << A->getValue();
+ Opts.MSCompatibilityVersion = VT.getMajor() * 10000000 +
+ VT.getMinor().getValueOr(0) * 100000 +
+ VT.getSubminor().getValueOr(0);
+ }
+
+ // Mimicing gcc's behavior, trigraphs are only enabled if -trigraphs
+ // is specified, or -std is set to a conforming mode.
+ // Trigraphs are disabled by default in c++1z onwards.
+ Opts.Trigraphs = !Opts.GNUMode && !Opts.MSVCCompat && !Opts.CPlusPlus1z;
+ Opts.Trigraphs =
+ Args.hasFlag(OPT_ftrigraphs, OPT_fno_trigraphs, Opts.Trigraphs);
+
+ Opts.DollarIdents = Args.hasFlag(OPT_fdollars_in_identifiers,
+ OPT_fno_dollars_in_identifiers,
+ Opts.DollarIdents);
+ Opts.PascalStrings = Args.hasArg(OPT_fpascal_strings);
+ Opts.VtorDispMode = getLastArgIntValue(Args, OPT_vtordisp_mode_EQ, 1, Diags);
+ Opts.Borland = Args.hasArg(OPT_fborland_extensions);
+ Opts.WritableStrings = Args.hasArg(OPT_fwritable_strings);
+ Opts.ConstStrings = Args.hasFlag(OPT_fconst_strings, OPT_fno_const_strings,
+ Opts.ConstStrings);
+ if (Args.hasArg(OPT_fno_lax_vector_conversions))
+ Opts.LaxVectorConversions = 0;
+ if (Args.hasArg(OPT_fno_threadsafe_statics))
+ Opts.ThreadsafeStatics = 0;
+ Opts.Exceptions = Args.hasArg(OPT_fexceptions);
+ Opts.ObjCExceptions = Args.hasArg(OPT_fobjc_exceptions);
+ Opts.CXXExceptions = Args.hasArg(OPT_fcxx_exceptions);
+ Opts.SjLjExceptions = Args.hasArg(OPT_fsjlj_exceptions);
+ Opts.TraditionalCPP = Args.hasArg(OPT_traditional_cpp);
+
+ Opts.RTTI = !Args.hasArg(OPT_fno_rtti);
+ Opts.RTTIData = Opts.RTTI && !Args.hasArg(OPT_fno_rtti_data);
+ Opts.Blocks = Args.hasArg(OPT_fblocks);
+ Opts.BlocksRuntimeOptional = Args.hasArg(OPT_fblocks_runtime_optional);
+ Opts.Coroutines = Args.hasArg(OPT_fcoroutines);
+ Opts.Modules = Args.hasArg(OPT_fmodules);
+ Opts.ModulesStrictDeclUse = Args.hasArg(OPT_fmodules_strict_decluse);
+ Opts.ModulesDeclUse =
+ Args.hasArg(OPT_fmodules_decluse) || Opts.ModulesStrictDeclUse;
+ Opts.ModulesLocalVisibility =
+ Args.hasArg(OPT_fmodules_local_submodule_visibility);
+ Opts.ModulesSearchAll = Opts.Modules &&
+ !Args.hasArg(OPT_fno_modules_search_all) &&
+ Args.hasArg(OPT_fmodules_search_all);
+ Opts.ModulesErrorRecovery = !Args.hasArg(OPT_fno_modules_error_recovery);
+ Opts.ImplicitModules = !Args.hasArg(OPT_fno_implicit_modules);
+ Opts.CharIsSigned = Opts.OpenCL || !Args.hasArg(OPT_fno_signed_char);
+ Opts.WChar = Opts.CPlusPlus && !Args.hasArg(OPT_fno_wchar);
+ Opts.ShortWChar = Args.hasFlag(OPT_fshort_wchar, OPT_fno_short_wchar, false);
+ Opts.ShortEnums = Args.hasArg(OPT_fshort_enums);
+ Opts.Freestanding = Args.hasArg(OPT_ffreestanding);
+ Opts.NoBuiltin = Args.hasArg(OPT_fno_builtin) || Opts.Freestanding;
+ if (!Opts.NoBuiltin)
+ getAllNoBuiltinFuncValues(Args, Opts.NoBuiltinFuncs);
+ Opts.NoMathBuiltin = Args.hasArg(OPT_fno_math_builtin);
+ Opts.AssumeSaneOperatorNew = !Args.hasArg(OPT_fno_assume_sane_operator_new);
+ Opts.SizedDeallocation = Args.hasArg(OPT_fsized_deallocation);
+ Opts.ConceptsTS = Args.hasArg(OPT_fconcepts_ts);
+ Opts.HeinousExtensions = Args.hasArg(OPT_fheinous_gnu_extensions);
+ Opts.AccessControl = !Args.hasArg(OPT_fno_access_control);
+ Opts.ElideConstructors = !Args.hasArg(OPT_fno_elide_constructors);
+ Opts.MathErrno = !Opts.OpenCL && Args.hasArg(OPT_fmath_errno);
+ Opts.InstantiationDepth =
+ getLastArgIntValue(Args, OPT_ftemplate_depth, 256, Diags);
+ Opts.ArrowDepth =
+ getLastArgIntValue(Args, OPT_foperator_arrow_depth, 256, Diags);
+ Opts.ConstexprCallDepth =
+ getLastArgIntValue(Args, OPT_fconstexpr_depth, 512, Diags);
+ Opts.ConstexprStepLimit =
+ getLastArgIntValue(Args, OPT_fconstexpr_steps, 1048576, Diags);
+ Opts.BracketDepth = getLastArgIntValue(Args, OPT_fbracket_depth, 256, Diags);
+ Opts.DelayedTemplateParsing = Args.hasArg(OPT_fdelayed_template_parsing);
+ Opts.NumLargeByValueCopy =
+ getLastArgIntValue(Args, OPT_Wlarge_by_value_copy_EQ, 0, Diags);
+ Opts.MSBitfields = Args.hasArg(OPT_mms_bitfields);
+ Opts.ObjCConstantStringClass =
+ Args.getLastArgValue(OPT_fconstant_string_class);
+ Opts.ObjCDefaultSynthProperties =
+ !Args.hasArg(OPT_disable_objc_default_synthesize_properties);
+ Opts.EncodeExtendedBlockSig =
+ Args.hasArg(OPT_fencode_extended_block_signature);
+ Opts.EmitAllDecls = Args.hasArg(OPT_femit_all_decls);
+ Opts.PackStruct = getLastArgIntValue(Args, OPT_fpack_struct_EQ, 0, Diags);
+ Opts.MaxTypeAlign = getLastArgIntValue(Args, OPT_fmax_type_align_EQ, 0, Diags);
+ Opts.PICLevel = getLastArgIntValue(Args, OPT_pic_level, 0, Diags);
+ Opts.PIELevel = getLastArgIntValue(Args, OPT_pie_level, 0, Diags);
+ Opts.Static = Args.hasArg(OPT_static_define);
+ Opts.DumpRecordLayoutsSimple = Args.hasArg(OPT_fdump_record_layouts_simple);
+ Opts.DumpRecordLayouts = Opts.DumpRecordLayoutsSimple
+ || Args.hasArg(OPT_fdump_record_layouts);
+ Opts.DumpVTableLayouts = Args.hasArg(OPT_fdump_vtable_layouts);
+ Opts.SpellChecking = !Args.hasArg(OPT_fno_spell_checking);
+ Opts.NoBitFieldTypeAlign = Args.hasArg(OPT_fno_bitfield_type_align);
+ Opts.SinglePrecisionConstants = Args.hasArg(OPT_cl_single_precision_constant);
+ Opts.FastRelaxedMath = Args.hasArg(OPT_cl_fast_relaxed_math);
+ Opts.MRTD = Args.hasArg(OPT_mrtd);
+ Opts.HexagonQdsp6Compat = Args.hasArg(OPT_mqdsp6_compat);
+ Opts.FakeAddressSpaceMap = Args.hasArg(OPT_ffake_address_space_map);
+ Opts.ParseUnknownAnytype = Args.hasArg(OPT_funknown_anytype);
+ Opts.DebuggerSupport = Args.hasArg(OPT_fdebugger_support);
+ Opts.DebuggerCastResultToId = Args.hasArg(OPT_fdebugger_cast_result_to_id);
+ Opts.DebuggerObjCLiteral = Args.hasArg(OPT_fdebugger_objc_literal);
+ Opts.ApplePragmaPack = Args.hasArg(OPT_fapple_pragma_pack);
+ Opts.CurrentModule = Args.getLastArgValue(OPT_fmodule_name);
+ Opts.AppExt = Args.hasArg(OPT_fapplication_extension);
+ Opts.ImplementationOfModule =
+ Args.getLastArgValue(OPT_fmodule_implementation_of);
+ Opts.ModuleFeatures = Args.getAllArgValues(OPT_fmodule_feature);
+ std::sort(Opts.ModuleFeatures.begin(), Opts.ModuleFeatures.end());
+ Opts.NativeHalfType |= Args.hasArg(OPT_fnative_half_type);
+ Opts.HalfArgsAndReturns = Args.hasArg(OPT_fallow_half_arguments_and_returns);
+ Opts.GNUAsm = !Args.hasArg(OPT_fno_gnu_inline_asm);
+
+ // __declspec is enabled by default for the PS4 by the driver, and also
+ // enabled for Microsoft Extensions or Borland Extensions, here.
+ //
+ // FIXME: __declspec is also currently enabled for CUDA, but isn't really a
+ // CUDA extension, however it is required for supporting cuda_builtin_vars.h,
+ // which uses __declspec(property). Once that has been rewritten in terms of
+ // something more generic, remove the Opts.CUDA term here.
+ Opts.DeclSpecKeyword =
+ Args.hasFlag(OPT_fdeclspec, OPT_fno_declspec,
+ (Opts.MicrosoftExt || Opts.Borland || Opts.CUDA));
+
+ if (!Opts.CurrentModule.empty() && !Opts.ImplementationOfModule.empty() &&
+ Opts.CurrentModule != Opts.ImplementationOfModule) {
+ Diags.Report(diag::err_conflicting_module_names)
+ << Opts.CurrentModule << Opts.ImplementationOfModule;
+ }
+
+ // For now, we only support local submodule visibility in C++ (because we
+ // heavily depend on the ODR for merging redefinitions).
+ if (Opts.ModulesLocalVisibility && !Opts.CPlusPlus)
+ Diags.Report(diag::err_drv_argument_not_allowed_with)
+ << "-fmodules-local-submodule-visibility" << "C";
+
+ if (Arg *A = Args.getLastArg(OPT_faddress_space_map_mangling_EQ)) {
+ switch (llvm::StringSwitch<unsigned>(A->getValue())
+ .Case("target", LangOptions::ASMM_Target)
+ .Case("no", LangOptions::ASMM_Off)
+ .Case("yes", LangOptions::ASMM_On)
+ .Default(255)) {
+ default:
+ Diags.Report(diag::err_drv_invalid_value)
+ << "-faddress-space-map-mangling=" << A->getValue();
+ break;
+ case LangOptions::ASMM_Target:
+ Opts.setAddressSpaceMapMangling(LangOptions::ASMM_Target);
+ break;
+ case LangOptions::ASMM_On:
+ Opts.setAddressSpaceMapMangling(LangOptions::ASMM_On);
+ break;
+ case LangOptions::ASMM_Off:
+ Opts.setAddressSpaceMapMangling(LangOptions::ASMM_Off);
+ break;
+ }
+ }
+
+ if (Arg *A = Args.getLastArg(OPT_fms_memptr_rep_EQ)) {
+ LangOptions::PragmaMSPointersToMembersKind InheritanceModel =
+ llvm::StringSwitch<LangOptions::PragmaMSPointersToMembersKind>(
+ A->getValue())
+ .Case("single",
+ LangOptions::PPTMK_FullGeneralitySingleInheritance)
+ .Case("multiple",
+ LangOptions::PPTMK_FullGeneralityMultipleInheritance)
+ .Case("virtual",
+ LangOptions::PPTMK_FullGeneralityVirtualInheritance)
+ .Default(LangOptions::PPTMK_BestCase);
+ if (InheritanceModel == LangOptions::PPTMK_BestCase)
+ Diags.Report(diag::err_drv_invalid_value)
+ << "-fms-memptr-rep=" << A->getValue();
+
+ Opts.setMSPointerToMemberRepresentationMethod(InheritanceModel);
+ }
+
+ // Check if -fopenmp is specified.
+ Opts.OpenMP = Args.hasArg(options::OPT_fopenmp);
+ Opts.OpenMPUseTLS =
+ Opts.OpenMP && !Args.hasArg(options::OPT_fnoopenmp_use_tls);
+ Opts.OpenMPIsDevice =
+ Opts.OpenMP && Args.hasArg(options::OPT_fopenmp_is_device);
+
+ // Get the OpenMP target triples if any.
+ if (Arg *A = Args.getLastArg(options::OPT_omptargets_EQ)) {
+
+ for (unsigned i = 0; i < A->getNumValues(); ++i) {
+ llvm::Triple TT(A->getValue(i));
+
+ if (TT.getArch() == llvm::Triple::UnknownArch)
+ Diags.Report(clang::diag::err_drv_invalid_omp_target) << A->getValue(i);
+ else
+ Opts.OMPTargetTriples.push_back(TT);
+ }
+ }
+
+ // Get OpenMP host file path if any and report if a non existent file is
+ // found
+ if (Arg *A = Args.getLastArg(options::OPT_omp_host_ir_file_path)) {
+ Opts.OMPHostIRFile = A->getValue();
+ if (!llvm::sys::fs::exists(Opts.OMPHostIRFile))
+ Diags.Report(clang::diag::err_drv_omp_host_ir_file_not_found)
+ << Opts.OMPHostIRFile;
+ }
+
+ // Record whether the __DEPRECATED define was requested.
+ Opts.Deprecated = Args.hasFlag(OPT_fdeprecated_macro,
+ OPT_fno_deprecated_macro,
+ Opts.Deprecated);
+
+ // FIXME: Eliminate this dependency.
+ unsigned Opt = getOptimizationLevel(Args, IK, Diags),
+ OptSize = getOptimizationLevelSize(Args);
+ Opts.Optimize = Opt != 0;
+ Opts.OptimizeSize = OptSize != 0;
+
+ // This is the __NO_INLINE__ define, which just depends on things like the
+ // optimization level and -fno-inline, not actually whether the backend has
+ // inlining enabled.
+ Opts.NoInlineDefine = !Opt || Args.hasArg(OPT_fno_inline);
+
+ Opts.FastMath = Args.hasArg(OPT_ffast_math) ||
+ Args.hasArg(OPT_cl_fast_relaxed_math);
+ Opts.FiniteMathOnly = Args.hasArg(OPT_ffinite_math_only) ||
+ Args.hasArg(OPT_cl_finite_math_only) ||
+ Args.hasArg(OPT_cl_fast_relaxed_math);
+ Opts.UnsafeFPMath = Args.hasArg(OPT_menable_unsafe_fp_math) ||
+ Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
+ Args.hasArg(OPT_cl_fast_relaxed_math);
+
+ Opts.RetainCommentsFromSystemHeaders =
+ Args.hasArg(OPT_fretain_comments_from_system_headers);
+
+ unsigned SSP = getLastArgIntValue(Args, OPT_stack_protector, 0, Diags);
+ switch (SSP) {
+ default:
+ Diags.Report(diag::err_drv_invalid_value)
+ << Args.getLastArg(OPT_stack_protector)->getAsString(Args) << SSP;
+ break;
+ case 0: Opts.setStackProtector(LangOptions::SSPOff); break;
+ case 1: Opts.setStackProtector(LangOptions::SSPOn); break;
+ case 2: Opts.setStackProtector(LangOptions::SSPStrong); break;
+ case 3: Opts.setStackProtector(LangOptions::SSPReq); break;
+ }
+
+ // Parse -fsanitize= arguments.
+ parseSanitizerKinds("-fsanitize=", Args.getAllArgValues(OPT_fsanitize_EQ),
+ Diags, Opts.Sanitize);
+ // -fsanitize-address-field-padding=N has to be a LangOpt, parse it here.
+ Opts.SanitizeAddressFieldPadding =
+ getLastArgIntValue(Args, OPT_fsanitize_address_field_padding, 0, Diags);
+ Opts.SanitizerBlacklistFiles = Args.getAllArgValues(OPT_fsanitize_blacklist);
+}
+
+static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
+ FileManager &FileMgr,
+ DiagnosticsEngine &Diags) {
+ using namespace options;
+ Opts.ImplicitPCHInclude = Args.getLastArgValue(OPT_include_pch);
+ Opts.ImplicitPTHInclude = Args.getLastArgValue(OPT_include_pth);
+ if (const Arg *A = Args.getLastArg(OPT_token_cache))
+ Opts.TokenCache = A->getValue();
+ else
+ Opts.TokenCache = Opts.ImplicitPTHInclude;
+ Opts.UsePredefines = !Args.hasArg(OPT_undef);
+ Opts.DetailedRecord = Args.hasArg(OPT_detailed_preprocessing_record);
+ Opts.DisablePCHValidation = Args.hasArg(OPT_fno_validate_pch);
+
+ Opts.DumpDeserializedPCHDecls = Args.hasArg(OPT_dump_deserialized_pch_decls);
+ for (const Arg *A : Args.filtered(OPT_error_on_deserialized_pch_decl))
+ Opts.DeserializedPCHDeclsToErrorOn.insert(A->getValue());
+
+ if (const Arg *A = Args.getLastArg(OPT_preamble_bytes_EQ)) {
+ StringRef Value(A->getValue());
+ size_t Comma = Value.find(',');
+ unsigned Bytes = 0;
+ unsigned EndOfLine = 0;
+
+ if (Comma == StringRef::npos ||
+ Value.substr(0, Comma).getAsInteger(10, Bytes) ||
+ Value.substr(Comma + 1).getAsInteger(10, EndOfLine))
+ Diags.Report(diag::err_drv_preamble_format);
+ else {
+ Opts.PrecompiledPreambleBytes.first = Bytes;
+ Opts.PrecompiledPreambleBytes.second = (EndOfLine != 0);
+ }
+ }
+
+ // Add macros from the command line.
+ for (const Arg *A : Args.filtered(OPT_D, OPT_U)) {
+ if (A->getOption().matches(OPT_D))
+ Opts.addMacroDef(A->getValue());
+ else
+ Opts.addMacroUndef(A->getValue());
+ }
+
+ Opts.MacroIncludes = Args.getAllArgValues(OPT_imacros);
+
+ // Add the ordered list of -includes.
+ for (const Arg *A : Args.filtered(OPT_include))
+ Opts.Includes.emplace_back(A->getValue());
+
+ for (const Arg *A : Args.filtered(OPT_chain_include))
+ Opts.ChainedIncludes.emplace_back(A->getValue());
+
+ // Include 'altivec.h' if -faltivec option present
+ if (Args.hasArg(OPT_faltivec))
+ Opts.Includes.emplace_back("altivec.h");
+
+ for (const Arg *A : Args.filtered(OPT_remap_file)) {
+ std::pair<StringRef, StringRef> Split = StringRef(A->getValue()).split(';');
+
+ if (Split.second.empty()) {
+ Diags.Report(diag::err_drv_invalid_remap_file) << A->getAsString(Args);
+ continue;
+ }
+
+ Opts.addRemappedFile(Split.first, Split.second);
+ }
+
+ if (Arg *A = Args.getLastArg(OPT_fobjc_arc_cxxlib_EQ)) {
+ StringRef Name = A->getValue();
+ unsigned Library = llvm::StringSwitch<unsigned>(Name)
+ .Case("libc++", ARCXX_libcxx)
+ .Case("libstdc++", ARCXX_libstdcxx)
+ .Case("none", ARCXX_nolib)
+ .Default(~0U);
+ if (Library == ~0U)
+ Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Name;
+ else
+ Opts.ObjCXXARCStandardLibrary = (ObjCXXARCStandardLibraryKind)Library;
+ }
+}
+
+static void ParsePreprocessorOutputArgs(PreprocessorOutputOptions &Opts,
+ ArgList &Args,
+ frontend::ActionKind Action) {
+ using namespace options;
+
+ switch (Action) {
+ case frontend::ASTDeclList:
+ case frontend::ASTDump:
+ case frontend::ASTPrint:
+ case frontend::ASTView:
+ case frontend::EmitAssembly:
+ case frontend::EmitBC:
+ case frontend::EmitHTML:
+ case frontend::EmitLLVM:
+ case frontend::EmitLLVMOnly:
+ case frontend::EmitCodeGenOnly:
+ case frontend::EmitObj:
+ case frontend::FixIt:
+ case frontend::GenerateModule:
+ case frontend::GeneratePCH:
+ case frontend::GeneratePTH:
+ case frontend::ParseSyntaxOnly:
+ case frontend::ModuleFileInfo:
+ case frontend::VerifyPCH:
+ case frontend::PluginAction:
+ case frontend::PrintDeclContext:
+ case frontend::RewriteObjC:
+ case frontend::RewriteTest:
+ case frontend::RunAnalysis:
+ case frontend::MigrateSource:
+ Opts.ShowCPP = 0;
+ break;
+
+ case frontend::DumpRawTokens:
+ case frontend::DumpTokens:
+ case frontend::InitOnly:
+ case frontend::PrintPreamble:
+ case frontend::PrintPreprocessedInput:
+ case frontend::RewriteMacros:
+ case frontend::RunPreprocessorOnly:
+ Opts.ShowCPP = !Args.hasArg(OPT_dM);
+ break;
+ }
+
+ Opts.ShowComments = Args.hasArg(OPT_C);
+ Opts.ShowLineMarkers = !Args.hasArg(OPT_P);
+ Opts.ShowMacroComments = Args.hasArg(OPT_CC);
+ Opts.ShowMacros = Args.hasArg(OPT_dM) || Args.hasArg(OPT_dD);
+ Opts.RewriteIncludes = Args.hasArg(OPT_frewrite_includes);
+ Opts.UseLineDirectives = Args.hasArg(OPT_fuse_line_directives);
+}
+
+static void ParseTargetArgs(TargetOptions &Opts, ArgList &Args) {
+ using namespace options;
+ Opts.ABI = Args.getLastArgValue(OPT_target_abi);
+ Opts.CPU = Args.getLastArgValue(OPT_target_cpu);
+ Opts.FPMath = Args.getLastArgValue(OPT_mfpmath);
+ Opts.FeaturesAsWritten = Args.getAllArgValues(OPT_target_feature);
+ Opts.LinkerVersion = Args.getLastArgValue(OPT_target_linker_version);
+ Opts.Triple = llvm::Triple::normalize(Args.getLastArgValue(OPT_triple));
+ Opts.Reciprocals = Args.getAllArgValues(OPT_mrecip_EQ);
+ // Use the default target triple if unspecified.
+ if (Opts.Triple.empty())
+ Opts.Triple = llvm::sys::getDefaultTargetTriple();
+}
+
+bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
+ const char *const *ArgBegin,
+ const char *const *ArgEnd,
+ DiagnosticsEngine &Diags) {
+ bool Success = true;
+
+ // Parse the arguments.
+ std::unique_ptr<OptTable> Opts(createDriverOptTable());
+ const unsigned IncludedFlagsBitmask = options::CC1Option;
+ unsigned MissingArgIndex, MissingArgCount;
+ InputArgList Args =
+ Opts->ParseArgs(llvm::makeArrayRef(ArgBegin, ArgEnd), MissingArgIndex,
+ MissingArgCount, IncludedFlagsBitmask);
+
+ // Check for missing argument error.
+ if (MissingArgCount) {
+ Diags.Report(diag::err_drv_missing_argument)
+ << Args.getArgString(MissingArgIndex) << MissingArgCount;
+ Success = false;
+ }
+
+ // Issue errors on unknown arguments.
+ for (const Arg *A : Args.filtered(OPT_UNKNOWN)) {
+ Diags.Report(diag::err_drv_unknown_argument) << A->getAsString(Args);
+ Success = false;
+ }
+
+ Success &= ParseAnalyzerArgs(*Res.getAnalyzerOpts(), Args, Diags);
+ Success &= ParseMigratorArgs(Res.getMigratorOpts(), Args);
+ ParseDependencyOutputArgs(Res.getDependencyOutputOpts(), Args);
+ Success &= ParseDiagnosticArgs(Res.getDiagnosticOpts(), Args, &Diags);
+ ParseCommentArgs(Res.getLangOpts()->CommentOpts, Args);
+ ParseFileSystemArgs(Res.getFileSystemOpts(), Args);
+ // FIXME: We shouldn't have to pass the DashX option around here
+ InputKind DashX = ParseFrontendArgs(Res.getFrontendOpts(), Args, Diags);
+ ParseTargetArgs(Res.getTargetOpts(), Args);
+ Success &= ParseCodeGenArgs(Res.getCodeGenOpts(), Args, DashX, Diags,
+ Res.getTargetOpts());
+ ParseHeaderSearchArgs(Res.getHeaderSearchOpts(), Args);
+ if (DashX == IK_AST || DashX == IK_LLVM_IR) {
+ // ObjCAAutoRefCount and Sanitize LangOpts are used to setup the
+ // PassManager in BackendUtil.cpp. They need to be initializd no matter
+ // what the input type is.
+ if (Args.hasArg(OPT_fobjc_arc))
+ Res.getLangOpts()->ObjCAutoRefCount = 1;
+ parseSanitizerKinds("-fsanitize=", Args.getAllArgValues(OPT_fsanitize_EQ),
+ Diags, Res.getLangOpts()->Sanitize);
+ } else {
+ // Other LangOpts are only initialzed when the input is not AST or LLVM IR.
+ ParseLangArgs(*Res.getLangOpts(), Args, DashX, Diags);
+ if (Res.getFrontendOpts().ProgramAction == frontend::RewriteObjC)
+ Res.getLangOpts()->ObjCExceptions = 1;
+ }
+ // FIXME: ParsePreprocessorArgs uses the FileManager to read the contents of
+ // PCH file and find the original header name. Remove the need to do that in
+ // ParsePreprocessorArgs and remove the FileManager
+ // parameters from the function and the "FileManager.h" #include.
+ FileManager FileMgr(Res.getFileSystemOpts());
+ ParsePreprocessorArgs(Res.getPreprocessorOpts(), Args, FileMgr, Diags);
+ ParsePreprocessorOutputArgs(Res.getPreprocessorOutputOpts(), Args,
+ Res.getFrontendOpts().ProgramAction);
+ return Success;
+}
+
+namespace {
+
+ class ModuleSignature {
+ SmallVector<uint64_t, 16> Data;
+ unsigned CurBit;
+ uint64_t CurValue;
+
+ public:
+ ModuleSignature() : CurBit(0), CurValue(0) { }
+
+ void add(uint64_t Value, unsigned Bits);
+ void add(StringRef Value);
+ void flush();
+
+ llvm::APInt getAsInteger() const;
+ };
+}
+
+void ModuleSignature::add(uint64_t Value, unsigned int NumBits) {
+ CurValue |= Value << CurBit;
+ if (CurBit + NumBits < 64) {
+ CurBit += NumBits;
+ return;
+ }
+
+ // Add the current word.
+ Data.push_back(CurValue);
+
+ if (CurBit)
+ CurValue = Value >> (64-CurBit);
+ else
+ CurValue = 0;
+ CurBit = (CurBit+NumBits) & 63;
+}
+
+void ModuleSignature::flush() {
+ if (CurBit == 0)
+ return;
+
+ Data.push_back(CurValue);
+ CurBit = 0;
+ CurValue = 0;
+}
+
+void ModuleSignature::add(StringRef Value) {
+ for (auto &c : Value)
+ add(c, 8);
+}
+
+llvm::APInt ModuleSignature::getAsInteger() const {
+ return llvm::APInt(Data.size() * 64, Data);
+}
+
+std::string CompilerInvocation::getModuleHash() const {
+ // Note: For QoI reasons, the things we use as a hash here should all be
+ // dumped via the -module-info flag.
+ using llvm::hash_code;
+ using llvm::hash_value;
+ using llvm::hash_combine;
+
+ // Start the signature with the compiler version.
+ // FIXME: We'd rather use something more cryptographically sound than
+ // CityHash, but this will do for now.
+ hash_code code = hash_value(getClangFullRepositoryVersion());
+
+ // Extend the signature with the language options
+#define LANGOPT(Name, Bits, Default, Description) \
+ code = hash_combine(code, LangOpts->Name);
+#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
+ code = hash_combine(code, static_cast<unsigned>(LangOpts->get##Name()));
+#define BENIGN_LANGOPT(Name, Bits, Default, Description)
+#define BENIGN_ENUM_LANGOPT(Name, Type, Bits, Default, Description)
+#include "clang/Basic/LangOptions.def"
+
+ for (StringRef Feature : LangOpts->ModuleFeatures)
+ code = hash_combine(code, Feature);
+
+ // Extend the signature with the target options.
+ code = hash_combine(code, TargetOpts->Triple, TargetOpts->CPU,
+ TargetOpts->ABI);
+ for (unsigned i = 0, n = TargetOpts->FeaturesAsWritten.size(); i != n; ++i)
+ code = hash_combine(code, TargetOpts->FeaturesAsWritten[i]);
+
+ // Extend the signature with preprocessor options.
+ const PreprocessorOptions &ppOpts = getPreprocessorOpts();
+ const HeaderSearchOptions &hsOpts = getHeaderSearchOpts();
+ code = hash_combine(code, ppOpts.UsePredefines, ppOpts.DetailedRecord);
+
+ for (std::vector<std::pair<std::string, bool/*isUndef*/>>::const_iterator
+ I = getPreprocessorOpts().Macros.begin(),
+ IEnd = getPreprocessorOpts().Macros.end();
+ I != IEnd; ++I) {
+ // If we're supposed to ignore this macro for the purposes of modules,
+ // don't put it into the hash.
+ if (!hsOpts.ModulesIgnoreMacros.empty()) {
+ // Check whether we're ignoring this macro.
+ StringRef MacroDef = I->first;
+ if (hsOpts.ModulesIgnoreMacros.count(MacroDef.split('=').first))
+ continue;
+ }
+
+ code = hash_combine(code, I->first, I->second);
+ }
+
+ // Extend the signature with the sysroot.
+ code = hash_combine(code, hsOpts.Sysroot, hsOpts.UseBuiltinIncludes,
+ hsOpts.UseStandardSystemIncludes,
+ hsOpts.UseStandardCXXIncludes,
+ hsOpts.UseLibcxx);
+ code = hash_combine(code, hsOpts.ResourceDir);
+
+ // Extend the signature with the user build path.
+ code = hash_combine(code, hsOpts.ModuleUserBuildPath);
+
+ // Extend the signature with the module file extensions.
+ const FrontendOptions &frontendOpts = getFrontendOpts();
+ for (auto ext : frontendOpts.ModuleFileExtensions) {
+ code = ext->hashExtension(code);
+ }
+
+ // Darwin-specific hack: if we have a sysroot, use the contents and
+ // modification time of
+ // $sysroot/System/Library/CoreServices/SystemVersion.plist
+ // as part of the module hash.
+ if (!hsOpts.Sysroot.empty()) {
+ SmallString<128> systemVersionFile;
+ systemVersionFile += hsOpts.Sysroot;
+ llvm::sys::path::append(systemVersionFile, "System");
+ llvm::sys::path::append(systemVersionFile, "Library");
+ llvm::sys::path::append(systemVersionFile, "CoreServices");
+ llvm::sys::path::append(systemVersionFile, "SystemVersion.plist");
+
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> buffer =
+ llvm::MemoryBuffer::getFile(systemVersionFile);
+ if (buffer) {
+ code = hash_combine(code, buffer.get()->getBuffer());
+
+ struct stat statBuf;
+ if (stat(systemVersionFile.c_str(), &statBuf) == 0)
+ code = hash_combine(code, statBuf.st_mtime);
+ }
+ }
+
+ return llvm::APInt(64, code).toString(36, /*Signed=*/false);
+}
+
+namespace clang {
+
+template<typename IntTy>
+static IntTy getLastArgIntValueImpl(const ArgList &Args, OptSpecifier Id,
+ IntTy Default,
+ DiagnosticsEngine *Diags) {
+ IntTy Res = Default;
+ if (Arg *A = Args.getLastArg(Id)) {
+ if (StringRef(A->getValue()).getAsInteger(10, Res)) {
+ if (Diags)
+ Diags->Report(diag::err_drv_invalid_int_value) << A->getAsString(Args)
+ << A->getValue();
+ }
+ }
+ return Res;
+}
+
+
+// Declared in clang/Frontend/Utils.h.
+int getLastArgIntValue(const ArgList &Args, OptSpecifier Id, int Default,
+ DiagnosticsEngine *Diags) {
+ return getLastArgIntValueImpl<int>(Args, Id, Default, Diags);
+}
+
+uint64_t getLastArgUInt64Value(const ArgList &Args, OptSpecifier Id,
+ uint64_t Default,
+ DiagnosticsEngine *Diags) {
+ return getLastArgIntValueImpl<uint64_t>(Args, Id, Default, Diags);
+}
+
+void BuryPointer(const void *Ptr) {
+ // This function may be called only a small fixed amount of times per each
+ // invocation, otherwise we do actually have a leak which we want to report.
+ // If this function is called more than kGraveYardMaxSize times, the pointers
+ // will not be properly buried and a leak detector will report a leak, which
+ // is what we want in such case.
+ static const size_t kGraveYardMaxSize = 16;
+ LLVM_ATTRIBUTE_UNUSED static const void *GraveYard[kGraveYardMaxSize];
+ static std::atomic<unsigned> GraveYardSize;
+ unsigned Idx = GraveYardSize++;
+ if (Idx >= kGraveYardMaxSize)
+ return;
+ GraveYard[Idx] = Ptr;
+}
+
+IntrusiveRefCntPtr<vfs::FileSystem>
+createVFSFromCompilerInvocation(const CompilerInvocation &CI,
+ DiagnosticsEngine &Diags) {
+ if (CI.getHeaderSearchOpts().VFSOverlayFiles.empty())
+ return vfs::getRealFileSystem();
+
+ IntrusiveRefCntPtr<vfs::OverlayFileSystem>
+ Overlay(new vfs::OverlayFileSystem(vfs::getRealFileSystem()));
+ // earlier vfs files are on the bottom
+ for (const std::string &File : CI.getHeaderSearchOpts().VFSOverlayFiles) {
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Buffer =
+ llvm::MemoryBuffer::getFile(File);
+ if (!Buffer) {
+ Diags.Report(diag::err_missing_vfs_overlay_file) << File;
+ return IntrusiveRefCntPtr<vfs::FileSystem>();
+ }
+
+ IntrusiveRefCntPtr<vfs::FileSystem> FS =
+ vfs::getVFSFromYAML(std::move(Buffer.get()), /*DiagHandler*/ nullptr);
+ if (!FS.get()) {
+ Diags.Report(diag::err_invalid_vfs_overlay) << File;
+ return IntrusiveRefCntPtr<vfs::FileSystem>();
+ }
+ Overlay->pushOverlay(FS);
+ }
+ return Overlay;
+}
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp b/contrib/llvm/tools/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
new file mode 100644
index 0000000..3019164
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
@@ -0,0 +1,104 @@
+//===--- CreateInvocationFromCommandLine.cpp - CompilerInvocation from Args ==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Construct a compiler invocation object for command line driver arguments
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/Utils.h"
+#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/Action.h"
+#include "clang/Driver/Options.h"
+#include "clang/Driver/Tool.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Support/Host.h"
+using namespace clang;
+using namespace llvm::opt;
+
+/// createInvocationFromCommandLine - Construct a compiler invocation object for
+/// a command line argument vector.
+///
+/// \return A CompilerInvocation, or 0 if none was built for the given
+/// argument vector.
+CompilerInvocation *
+clang::createInvocationFromCommandLine(ArrayRef<const char *> ArgList,
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags) {
+ if (!Diags.get()) {
+ // No diagnostics engine was provided, so create our own diagnostics object
+ // with the default options.
+ Diags = CompilerInstance::createDiagnostics(new DiagnosticOptions);
+ }
+
+ SmallVector<const char *, 16> Args(ArgList.begin(), ArgList.end());
+
+ // FIXME: Find a cleaner way to force the driver into restricted modes.
+ Args.push_back("-fsyntax-only");
+
+ // FIXME: We shouldn't have to pass in the path info.
+ driver::Driver TheDriver(Args[0], llvm::sys::getDefaultTargetTriple(),
+ *Diags);
+
+ // Don't check that inputs exist, they may have been remapped.
+ TheDriver.setCheckInputsExist(false);
+
+ std::unique_ptr<driver::Compilation> C(TheDriver.BuildCompilation(Args));
+
+ // Just print the cc1 options if -### was present.
+ if (C->getArgs().hasArg(driver::options::OPT__HASH_HASH_HASH)) {
+ C->getJobs().Print(llvm::errs(), "\n", true);
+ return nullptr;
+ }
+
+ // We expect to get back exactly one command job, if we didn't something
+ // failed. CUDA compilation is an exception as it creates multiple jobs. If
+ // that's the case, we proceed with the first job. If caller needs particular
+ // CUDA job, it should be controlled via --cuda-{host|device}-only option
+ // passed to the driver.
+ const driver::JobList &Jobs = C->getJobs();
+ bool CudaCompilation = false;
+ if (Jobs.size() > 1) {
+ for (auto &A : C->getActions()){
+ // On MacOSX real actions may end up being wrapped in BindArchAction
+ if (isa<driver::BindArchAction>(A))
+ A = *A->begin();
+ if (isa<driver::CudaDeviceAction>(A)) {
+ CudaCompilation = true;
+ break;
+ }
+ }
+ }
+ if (Jobs.size() == 0 || !isa<driver::Command>(*Jobs.begin()) ||
+ (Jobs.size() > 1 && !CudaCompilation)) {
+ SmallString<256> Msg;
+ llvm::raw_svector_ostream OS(Msg);
+ Jobs.Print(OS, "; ", true);
+ Diags->Report(diag::err_fe_expected_compiler_job) << OS.str();
+ return nullptr;
+ }
+
+ const driver::Command &Cmd = cast<driver::Command>(*Jobs.begin());
+ if (StringRef(Cmd.getCreator().getName()) != "clang") {
+ Diags->Report(diag::err_fe_expected_clang_command);
+ return nullptr;
+ }
+
+ const ArgStringList &CCArgs = Cmd.getArguments();
+ std::unique_ptr<CompilerInvocation> CI(new CompilerInvocation());
+ if (!CompilerInvocation::CreateFromArgs(*CI,
+ const_cast<const char **>(CCArgs.data()),
+ const_cast<const char **>(CCArgs.data()) +
+ CCArgs.size(),
+ *Diags))
+ return nullptr;
+ return CI.release();
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/DependencyFile.cpp b/contrib/llvm/tools/clang/lib/Frontend/DependencyFile.cpp
new file mode 100644
index 0000000..93d4a80
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/DependencyFile.cpp
@@ -0,0 +1,476 @@
+//===--- DependencyFile.cpp - Generate dependency file --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This code generates dependency files.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/Utils.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Frontend/DependencyOutputOptions.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Lex/DirectoryLookup.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/ModuleMap.h"
+#include "clang/Lex/PPCallbacks.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Serialization/ASTReader.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+namespace {
+struct DepCollectorPPCallbacks : public PPCallbacks {
+ DependencyCollector &DepCollector;
+ SourceManager &SM;
+ DepCollectorPPCallbacks(DependencyCollector &L, SourceManager &SM)
+ : DepCollector(L), SM(SM) { }
+
+ void FileChanged(SourceLocation Loc, FileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType,
+ FileID PrevFID) override {
+ if (Reason != PPCallbacks::EnterFile)
+ return;
+
+ // Dependency generation really does want to go all the way to the
+ // file entry for a source location to find out what is depended on.
+ // We do not want #line markers to affect dependency generation!
+ const FileEntry *FE =
+ SM.getFileEntryForID(SM.getFileID(SM.getExpansionLoc(Loc)));
+ if (!FE)
+ return;
+
+ StringRef Filename =
+ llvm::sys::path::remove_leading_dotslash(FE->getName());
+
+ DepCollector.maybeAddDependency(Filename, /*FromModule*/false,
+ FileType != SrcMgr::C_User,
+ /*IsModuleFile*/false, /*IsMissing*/false);
+ }
+
+ void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
+ StringRef FileName, bool IsAngled,
+ CharSourceRange FilenameRange, const FileEntry *File,
+ StringRef SearchPath, StringRef RelativePath,
+ const Module *Imported) override {
+ if (!File)
+ DepCollector.maybeAddDependency(FileName, /*FromModule*/false,
+ /*IsSystem*/false, /*IsModuleFile*/false,
+ /*IsMissing*/true);
+ // Files that actually exist are handled by FileChanged.
+ }
+
+ void EndOfMainFile() override {
+ DepCollector.finishedMainFile();
+ }
+};
+
+struct DepCollectorMMCallbacks : public ModuleMapCallbacks {
+ DependencyCollector &DepCollector;
+ DepCollectorMMCallbacks(DependencyCollector &DC) : DepCollector(DC) {}
+
+ void moduleMapFileRead(SourceLocation Loc, const FileEntry &Entry,
+ bool IsSystem) override {
+ StringRef Filename = Entry.getName();
+ DepCollector.maybeAddDependency(Filename, /*FromModule*/false,
+ /*IsSystem*/IsSystem,
+ /*IsModuleFile*/false,
+ /*IsMissing*/false);
+ }
+};
+
+struct DepCollectorASTListener : public ASTReaderListener {
+ DependencyCollector &DepCollector;
+ DepCollectorASTListener(DependencyCollector &L) : DepCollector(L) { }
+ bool needsInputFileVisitation() override { return true; }
+ bool needsSystemInputFileVisitation() override {
+ return DepCollector.needSystemDependencies();
+ }
+ void visitModuleFile(StringRef Filename,
+ serialization::ModuleKind Kind) override {
+ DepCollector.maybeAddDependency(Filename, /*FromModule*/true,
+ /*IsSystem*/false, /*IsModuleFile*/true,
+ /*IsMissing*/false);
+ }
+ bool visitInputFile(StringRef Filename, bool IsSystem,
+ bool IsOverridden, bool IsExplicitModule) override {
+ if (IsOverridden || IsExplicitModule)
+ return true;
+
+ DepCollector.maybeAddDependency(Filename, /*FromModule*/true, IsSystem,
+ /*IsModuleFile*/false, /*IsMissing*/false);
+ return true;
+ }
+};
+} // end anonymous namespace
+
+void DependencyCollector::maybeAddDependency(StringRef Filename, bool FromModule,
+ bool IsSystem, bool IsModuleFile,
+ bool IsMissing) {
+ if (Seen.insert(Filename).second &&
+ sawDependency(Filename, FromModule, IsSystem, IsModuleFile, IsMissing))
+ Dependencies.push_back(Filename);
+}
+
+static bool isSpecialFilename(StringRef Filename) {
+ return llvm::StringSwitch<bool>(Filename)
+ .Case("<built-in>", true)
+ .Case("<stdin>", true)
+ .Default(false);
+}
+
+bool DependencyCollector::sawDependency(StringRef Filename, bool FromModule,
+ bool IsSystem, bool IsModuleFile,
+ bool IsMissing) {
+ return !isSpecialFilename(Filename) &&
+ (needSystemDependencies() || !IsSystem);
+}
+
+DependencyCollector::~DependencyCollector() { }
+void DependencyCollector::attachToPreprocessor(Preprocessor &PP) {
+ PP.addPPCallbacks(
+ llvm::make_unique<DepCollectorPPCallbacks>(*this, PP.getSourceManager()));
+ PP.getHeaderSearchInfo().getModuleMap().addModuleMapCallbacks(
+ llvm::make_unique<DepCollectorMMCallbacks>(*this));
+}
+void DependencyCollector::attachToASTReader(ASTReader &R) {
+ R.addListener(llvm::make_unique<DepCollectorASTListener>(*this));
+}
+
+namespace {
+/// Private implementation for DependencyFileGenerator
+class DFGImpl : public PPCallbacks {
+ std::vector<std::string> Files;
+ llvm::StringSet<> FilesSet;
+ const Preprocessor *PP;
+ std::string OutputFile;
+ std::vector<std::string> Targets;
+ bool IncludeSystemHeaders;
+ bool PhonyTarget;
+ bool AddMissingHeaderDeps;
+ bool SeenMissingHeader;
+ bool IncludeModuleFiles;
+ DependencyOutputFormat OutputFormat;
+
+private:
+ bool FileMatchesDepCriteria(const char *Filename,
+ SrcMgr::CharacteristicKind FileType);
+ void OutputDependencyFile();
+
+public:
+ DFGImpl(const Preprocessor *_PP, const DependencyOutputOptions &Opts)
+ : PP(_PP), OutputFile(Opts.OutputFile), Targets(Opts.Targets),
+ IncludeSystemHeaders(Opts.IncludeSystemHeaders),
+ PhonyTarget(Opts.UsePhonyTargets),
+ AddMissingHeaderDeps(Opts.AddMissingHeaderDeps),
+ SeenMissingHeader(false),
+ IncludeModuleFiles(Opts.IncludeModuleFiles),
+ OutputFormat(Opts.OutputFormat) {
+ for (auto ExtraDep : Opts.ExtraDeps) {
+ AddFilename(ExtraDep);
+ }
+ }
+
+ void FileChanged(SourceLocation Loc, FileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType,
+ FileID PrevFID) override;
+ void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
+ StringRef FileName, bool IsAngled,
+ CharSourceRange FilenameRange, const FileEntry *File,
+ StringRef SearchPath, StringRef RelativePath,
+ const Module *Imported) override;
+
+ void EndOfMainFile() override {
+ OutputDependencyFile();
+ }
+
+ void AddFilename(StringRef Filename);
+ bool includeSystemHeaders() const { return IncludeSystemHeaders; }
+ bool includeModuleFiles() const { return IncludeModuleFiles; }
+};
+
+class DFGMMCallback : public ModuleMapCallbacks {
+ DFGImpl &Parent;
+public:
+ DFGMMCallback(DFGImpl &Parent) : Parent(Parent) {}
+ void moduleMapFileRead(SourceLocation Loc, const FileEntry &Entry,
+ bool IsSystem) override {
+ if (!IsSystem || Parent.includeSystemHeaders())
+ Parent.AddFilename(Entry.getName());
+ }
+};
+
+class DFGASTReaderListener : public ASTReaderListener {
+ DFGImpl &Parent;
+public:
+ DFGASTReaderListener(DFGImpl &Parent)
+ : Parent(Parent) { }
+ bool needsInputFileVisitation() override { return true; }
+ bool needsSystemInputFileVisitation() override {
+ return Parent.includeSystemHeaders();
+ }
+ void visitModuleFile(StringRef Filename,
+ serialization::ModuleKind Kind) override;
+ bool visitInputFile(StringRef Filename, bool isSystem,
+ bool isOverridden, bool isExplicitModule) override;
+};
+}
+
+DependencyFileGenerator::DependencyFileGenerator(void *Impl)
+: Impl(Impl) { }
+
+DependencyFileGenerator *DependencyFileGenerator::CreateAndAttachToPreprocessor(
+ clang::Preprocessor &PP, const clang::DependencyOutputOptions &Opts) {
+
+ if (Opts.Targets.empty()) {
+ PP.getDiagnostics().Report(diag::err_fe_dependency_file_requires_MT);
+ return nullptr;
+ }
+
+ // Disable the "file not found" diagnostic if the -MG option was given.
+ if (Opts.AddMissingHeaderDeps)
+ PP.SetSuppressIncludeNotFoundError(true);
+
+ DFGImpl *Callback = new DFGImpl(&PP, Opts);
+ PP.addPPCallbacks(std::unique_ptr<PPCallbacks>(Callback));
+ PP.getHeaderSearchInfo().getModuleMap().addModuleMapCallbacks(
+ llvm::make_unique<DFGMMCallback>(*Callback));
+ return new DependencyFileGenerator(Callback);
+}
+
+void DependencyFileGenerator::AttachToASTReader(ASTReader &R) {
+ DFGImpl *I = reinterpret_cast<DFGImpl *>(Impl);
+ assert(I && "missing implementation");
+ R.addListener(llvm::make_unique<DFGASTReaderListener>(*I));
+}
+
+/// FileMatchesDepCriteria - Determine whether the given Filename should be
+/// considered as a dependency.
+bool DFGImpl::FileMatchesDepCriteria(const char *Filename,
+ SrcMgr::CharacteristicKind FileType) {
+ if (isSpecialFilename(Filename))
+ return false;
+
+ if (IncludeSystemHeaders)
+ return true;
+
+ return FileType == SrcMgr::C_User;
+}
+
+void DFGImpl::FileChanged(SourceLocation Loc,
+ FileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType,
+ FileID PrevFID) {
+ if (Reason != PPCallbacks::EnterFile)
+ return;
+
+ // Dependency generation really does want to go all the way to the
+ // file entry for a source location to find out what is depended on.
+ // We do not want #line markers to affect dependency generation!
+ SourceManager &SM = PP->getSourceManager();
+
+ const FileEntry *FE =
+ SM.getFileEntryForID(SM.getFileID(SM.getExpansionLoc(Loc)));
+ if (!FE) return;
+
+ StringRef Filename = FE->getName();
+ if (!FileMatchesDepCriteria(Filename.data(), FileType))
+ return;
+
+ AddFilename(llvm::sys::path::remove_leading_dotslash(Filename));
+}
+
+void DFGImpl::InclusionDirective(SourceLocation HashLoc,
+ const Token &IncludeTok,
+ StringRef FileName,
+ bool IsAngled,
+ CharSourceRange FilenameRange,
+ const FileEntry *File,
+ StringRef SearchPath,
+ StringRef RelativePath,
+ const Module *Imported) {
+ if (!File) {
+ if (AddMissingHeaderDeps)
+ AddFilename(FileName);
+ else
+ SeenMissingHeader = true;
+ }
+}
+
+void DFGImpl::AddFilename(StringRef Filename) {
+ if (FilesSet.insert(Filename).second)
+ Files.push_back(Filename);
+}
+
+/// Print the filename, with escaping or quoting that accommodates the three
+/// most likely tools that use dependency files: GNU Make, BSD Make, and
+/// NMake/Jom.
+///
+/// BSD Make is the simplest case: It does no escaping at all. This means
+/// characters that are normally delimiters, i.e. space and # (the comment
+/// character) simply aren't supported in filenames.
+///
+/// GNU Make does allow space and # in filenames, but to avoid being treated
+/// as a delimiter or comment, these must be escaped with a backslash. Because
+/// backslash is itself the escape character, if a backslash appears in a
+/// filename, it should be escaped as well. (As a special case, $ is escaped
+/// as $$, which is the normal Make way to handle the $ character.)
+/// For compatibility with BSD Make and historical practice, if GNU Make
+/// un-escapes characters in a filename but doesn't find a match, it will
+/// retry with the unmodified original string.
+///
+/// GCC tries to accommodate both Make formats by escaping any space or #
+/// characters in the original filename, but not escaping backslashes. The
+/// apparent intent is so that filenames with backslashes will be handled
+/// correctly by BSD Make, and by GNU Make in its fallback mode of using the
+/// unmodified original string; filenames with # or space characters aren't
+/// supported by BSD Make at all, but will be handled correctly by GNU Make
+/// due to the escaping.
+///
+/// A corner case that GCC gets only partly right is when the original filename
+/// has a backslash immediately followed by space or #. GNU Make would expect
+/// this backslash to be escaped; however GCC escapes the original backslash
+/// only when followed by space, not #. It will therefore take a dependency
+/// from a directive such as
+/// #include "a\ b\#c.h"
+/// and emit it as
+/// a\\\ b\\#c.h
+/// which GNU Make will interpret as
+/// a\ b\
+/// followed by a comment. Failing to find this file, it will fall back to the
+/// original string, which probably doesn't exist either; in any case it won't
+/// find
+/// a\ b\#c.h
+/// which is the actual filename specified by the include directive.
+///
+/// Clang does what GCC does, rather than what GNU Make expects.
+///
+/// NMake/Jom has a different set of scary characters, but wraps filespecs in
+/// double-quotes to avoid misinterpreting them; see
+/// https://msdn.microsoft.com/en-us/library/dd9y37ha.aspx for NMake info,
+/// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
+/// for Windows file-naming info.
+static void PrintFilename(raw_ostream &OS, StringRef Filename,
+ DependencyOutputFormat OutputFormat) {
+ if (OutputFormat == DependencyOutputFormat::NMake) {
+ // Add quotes if needed. These are the characters listed as "special" to
+ // NMake, that are legal in a Windows filespec, and that could cause
+ // misinterpretation of the dependency string.
+ if (Filename.find_first_of(" #${}^!") != StringRef::npos)
+ OS << '\"' << Filename << '\"';
+ else
+ OS << Filename;
+ return;
+ }
+ assert(OutputFormat == DependencyOutputFormat::Make);
+ for (unsigned i = 0, e = Filename.size(); i != e; ++i) {
+ if (Filename[i] == '#') // Handle '#' the broken gcc way.
+ OS << '\\';
+ else if (Filename[i] == ' ') { // Handle space correctly.
+ OS << '\\';
+ unsigned j = i;
+ while (j > 0 && Filename[--j] == '\\')
+ OS << '\\';
+ } else if (Filename[i] == '$') // $ is escaped by $$.
+ OS << '$';
+ OS << Filename[i];
+ }
+}
+
+void DFGImpl::OutputDependencyFile() {
+ if (SeenMissingHeader) {
+ llvm::sys::fs::remove(OutputFile);
+ return;
+ }
+
+ std::error_code EC;
+ llvm::raw_fd_ostream OS(OutputFile, EC, llvm::sys::fs::F_Text);
+ if (EC) {
+ PP->getDiagnostics().Report(diag::err_fe_error_opening) << OutputFile
+ << EC.message();
+ return;
+ }
+
+ // Write out the dependency targets, trying to avoid overly long
+ // lines when possible. We try our best to emit exactly the same
+ // dependency file as GCC (4.2), assuming the included files are the
+ // same.
+ const unsigned MaxColumns = 75;
+ unsigned Columns = 0;
+
+ for (std::vector<std::string>::iterator
+ I = Targets.begin(), E = Targets.end(); I != E; ++I) {
+ unsigned N = I->length();
+ if (Columns == 0) {
+ Columns += N;
+ } else if (Columns + N + 2 > MaxColumns) {
+ Columns = N + 2;
+ OS << " \\\n ";
+ } else {
+ Columns += N + 1;
+ OS << ' ';
+ }
+ // Targets already quoted as needed.
+ OS << *I;
+ }
+
+ OS << ':';
+ Columns += 1;
+
+ // Now add each dependency in the order it was seen, but avoiding
+ // duplicates.
+ for (std::vector<std::string>::iterator I = Files.begin(),
+ E = Files.end(); I != E; ++I) {
+ // Start a new line if this would exceed the column limit. Make
+ // sure to leave space for a trailing " \" in case we need to
+ // break the line on the next iteration.
+ unsigned N = I->length();
+ if (Columns + (N + 1) + 2 > MaxColumns) {
+ OS << " \\\n ";
+ Columns = 2;
+ }
+ OS << ' ';
+ PrintFilename(OS, *I, OutputFormat);
+ Columns += N + 1;
+ }
+ OS << '\n';
+
+ // Create phony targets if requested.
+ if (PhonyTarget && !Files.empty()) {
+ // Skip the first entry, this is always the input file itself.
+ for (std::vector<std::string>::iterator I = Files.begin() + 1,
+ E = Files.end(); I != E; ++I) {
+ OS << '\n';
+ PrintFilename(OS, *I, OutputFormat);
+ OS << ":\n";
+ }
+ }
+}
+
+bool DFGASTReaderListener::visitInputFile(llvm::StringRef Filename,
+ bool IsSystem, bool IsOverridden,
+ bool IsExplicitModule) {
+ assert(!IsSystem || needsSystemInputFileVisitation());
+ if (IsOverridden || IsExplicitModule)
+ return true;
+
+ Parent.AddFilename(Filename);
+ return true;
+}
+
+void DFGASTReaderListener::visitModuleFile(llvm::StringRef Filename,
+ serialization::ModuleKind Kind) {
+ if (Parent.includeModuleFiles())
+ Parent.AddFilename(Filename);
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/DependencyGraph.cpp b/contrib/llvm/tools/clang/lib/Frontend/DependencyGraph.cpp
new file mode 100644
index 0000000..67a977e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/DependencyGraph.cpp
@@ -0,0 +1,138 @@
+//===--- DependencyGraph.cpp - Generate dependency file -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This code generates a header dependency graph in DOT format, for use
+// with, e.g., GraphViz.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/Utils.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Lex/PPCallbacks.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Support/GraphWriter.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+namespace DOT = llvm::DOT;
+
+namespace {
+class DependencyGraphCallback : public PPCallbacks {
+ const Preprocessor *PP;
+ std::string OutputFile;
+ std::string SysRoot;
+ llvm::SetVector<const FileEntry *> AllFiles;
+ typedef llvm::DenseMap<const FileEntry *,
+ SmallVector<const FileEntry *, 2> > DependencyMap;
+
+ DependencyMap Dependencies;
+
+private:
+ raw_ostream &writeNodeReference(raw_ostream &OS,
+ const FileEntry *Node);
+ void OutputGraphFile();
+
+public:
+ DependencyGraphCallback(const Preprocessor *_PP, StringRef OutputFile,
+ StringRef SysRoot)
+ : PP(_PP), OutputFile(OutputFile.str()), SysRoot(SysRoot.str()) { }
+
+ void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
+ StringRef FileName, bool IsAngled,
+ CharSourceRange FilenameRange, const FileEntry *File,
+ StringRef SearchPath, StringRef RelativePath,
+ const Module *Imported) override;
+
+ void EndOfMainFile() override {
+ OutputGraphFile();
+ }
+
+};
+}
+
+void clang::AttachDependencyGraphGen(Preprocessor &PP, StringRef OutputFile,
+ StringRef SysRoot) {
+ PP.addPPCallbacks(llvm::make_unique<DependencyGraphCallback>(&PP, OutputFile,
+ SysRoot));
+}
+
+void DependencyGraphCallback::InclusionDirective(SourceLocation HashLoc,
+ const Token &IncludeTok,
+ StringRef FileName,
+ bool IsAngled,
+ CharSourceRange FilenameRange,
+ const FileEntry *File,
+ StringRef SearchPath,
+ StringRef RelativePath,
+ const Module *Imported) {
+ if (!File)
+ return;
+
+ SourceManager &SM = PP->getSourceManager();
+ const FileEntry *FromFile
+ = SM.getFileEntryForID(SM.getFileID(SM.getExpansionLoc(HashLoc)));
+ if (!FromFile)
+ return;
+
+ Dependencies[FromFile].push_back(File);
+
+ AllFiles.insert(File);
+ AllFiles.insert(FromFile);
+}
+
+raw_ostream &
+DependencyGraphCallback::writeNodeReference(raw_ostream &OS,
+ const FileEntry *Node) {
+ OS << "header_" << Node->getUID();
+ return OS;
+}
+
+void DependencyGraphCallback::OutputGraphFile() {
+ std::error_code EC;
+ llvm::raw_fd_ostream OS(OutputFile, EC, llvm::sys::fs::F_Text);
+ if (EC) {
+ PP->getDiagnostics().Report(diag::err_fe_error_opening) << OutputFile
+ << EC.message();
+ return;
+ }
+
+ OS << "digraph \"dependencies\" {\n";
+
+ // Write the nodes
+ for (unsigned I = 0, N = AllFiles.size(); I != N; ++I) {
+ // Write the node itself.
+ OS.indent(2);
+ writeNodeReference(OS, AllFiles[I]);
+ OS << " [ shape=\"box\", label=\"";
+ StringRef FileName = AllFiles[I]->getName();
+ if (FileName.startswith(SysRoot))
+ FileName = FileName.substr(SysRoot.size());
+
+ OS << DOT::EscapeString(FileName)
+ << "\"];\n";
+ }
+
+ // Write the edges
+ for (DependencyMap::iterator F = Dependencies.begin(),
+ FEnd = Dependencies.end();
+ F != FEnd; ++F) {
+ for (unsigned I = 0, N = F->second.size(); I != N; ++I) {
+ OS.indent(2);
+ writeNodeReference(OS, F->first);
+ OS << " -> ";
+ writeNodeReference(OS, F->second[I]);
+ OS << ";\n";
+ }
+ }
+ OS << "}\n";
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Frontend/DiagnosticRenderer.cpp b/contrib/llvm/tools/clang/lib/Frontend/DiagnosticRenderer.cpp
new file mode 100644
index 0000000..caf1f0d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/DiagnosticRenderer.cpp
@@ -0,0 +1,667 @@
+//===--- DiagnosticRenderer.cpp - Diagnostic Pretty-Printing --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/DiagnosticRenderer.h"
+#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Edit/Commit.h"
+#include "clang/Edit/EditedSource.h"
+#include "clang/Edit/EditsReceiver.h"
+#include "clang/Lex/Lexer.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+using namespace clang;
+
+/// \brief Retrieve the name of the immediate macro expansion.
+///
+/// This routine starts from a source location, and finds the name of the macro
+/// responsible for its immediate expansion. It looks through any intervening
+/// macro argument expansions to compute this. It returns a StringRef which
+/// refers to the SourceManager-owned buffer of the source where that macro
+/// name is spelled. Thus, the result shouldn't out-live that SourceManager.
+///
+/// This differs from Lexer::getImmediateMacroName in that any macro argument
+/// location will result in the topmost function macro that accepted it.
+/// e.g.
+/// \code
+/// MAC1( MAC2(foo) )
+/// \endcode
+/// for location of 'foo' token, this function will return "MAC1" while
+/// Lexer::getImmediateMacroName will return "MAC2".
+static StringRef getImmediateMacroName(SourceLocation Loc,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ assert(Loc.isMacroID() && "Only reasonble to call this on macros");
+ // Walk past macro argument expanions.
+ while (SM.isMacroArgExpansion(Loc))
+ Loc = SM.getImmediateExpansionRange(Loc).first;
+
+ // If the macro's spelling has no FileID, then it's actually a token paste
+ // or stringization (or similar) and not a macro at all.
+ if (!SM.getFileEntryForID(SM.getFileID(SM.getSpellingLoc(Loc))))
+ return StringRef();
+
+ // Find the spelling location of the start of the non-argument expansion
+ // range. This is where the macro name was spelled in order to begin
+ // expanding this macro.
+ Loc = SM.getSpellingLoc(SM.getImmediateExpansionRange(Loc).first);
+
+ // Dig out the buffer where the macro name was spelled and the extents of the
+ // name so that we can render it into the expansion note.
+ std::pair<FileID, unsigned> ExpansionInfo = SM.getDecomposedLoc(Loc);
+ unsigned MacroTokenLength = Lexer::MeasureTokenLength(Loc, SM, LangOpts);
+ StringRef ExpansionBuffer = SM.getBufferData(ExpansionInfo.first);
+ return ExpansionBuffer.substr(ExpansionInfo.second, MacroTokenLength);
+}
+
+DiagnosticRenderer::DiagnosticRenderer(const LangOptions &LangOpts,
+ DiagnosticOptions *DiagOpts)
+ : LangOpts(LangOpts), DiagOpts(DiagOpts), LastLevel() {}
+
+DiagnosticRenderer::~DiagnosticRenderer() {}
+
+namespace {
+
+class FixitReceiver : public edit::EditsReceiver {
+ SmallVectorImpl<FixItHint> &MergedFixits;
+
+public:
+ FixitReceiver(SmallVectorImpl<FixItHint> &MergedFixits)
+ : MergedFixits(MergedFixits) { }
+ void insert(SourceLocation loc, StringRef text) override {
+ MergedFixits.push_back(FixItHint::CreateInsertion(loc, text));
+ }
+ void replace(CharSourceRange range, StringRef text) override {
+ MergedFixits.push_back(FixItHint::CreateReplacement(range, text));
+ }
+};
+
+}
+
+static void mergeFixits(ArrayRef<FixItHint> FixItHints,
+ const SourceManager &SM, const LangOptions &LangOpts,
+ SmallVectorImpl<FixItHint> &MergedFixits) {
+ edit::Commit commit(SM, LangOpts);
+ for (ArrayRef<FixItHint>::const_iterator
+ I = FixItHints.begin(), E = FixItHints.end(); I != E; ++I) {
+ const FixItHint &Hint = *I;
+ if (Hint.CodeToInsert.empty()) {
+ if (Hint.InsertFromRange.isValid())
+ commit.insertFromRange(Hint.RemoveRange.getBegin(),
+ Hint.InsertFromRange, /*afterToken=*/false,
+ Hint.BeforePreviousInsertions);
+ else
+ commit.remove(Hint.RemoveRange);
+ } else {
+ if (Hint.RemoveRange.isTokenRange() ||
+ Hint.RemoveRange.getBegin() != Hint.RemoveRange.getEnd())
+ commit.replace(Hint.RemoveRange, Hint.CodeToInsert);
+ else
+ commit.insert(Hint.RemoveRange.getBegin(), Hint.CodeToInsert,
+ /*afterToken=*/false, Hint.BeforePreviousInsertions);
+ }
+ }
+
+ edit::EditedSource Editor(SM, LangOpts);
+ if (Editor.commit(commit)) {
+ FixitReceiver Rec(MergedFixits);
+ Editor.applyRewrites(Rec);
+ }
+}
+
+void DiagnosticRenderer::emitDiagnostic(SourceLocation Loc,
+ DiagnosticsEngine::Level Level,
+ StringRef Message,
+ ArrayRef<CharSourceRange> Ranges,
+ ArrayRef<FixItHint> FixItHints,
+ const SourceManager *SM,
+ DiagOrStoredDiag D) {
+ assert(SM || Loc.isInvalid());
+
+ beginDiagnostic(D, Level);
+
+ if (!Loc.isValid())
+ // If we have no source location, just emit the diagnostic message.
+ emitDiagnosticMessage(Loc, PresumedLoc(), Level, Message, Ranges, SM, D);
+ else {
+ // Get the ranges into a local array we can hack on.
+ SmallVector<CharSourceRange, 20> MutableRanges(Ranges.begin(),
+ Ranges.end());
+
+ SmallVector<FixItHint, 8> MergedFixits;
+ if (!FixItHints.empty()) {
+ mergeFixits(FixItHints, *SM, LangOpts, MergedFixits);
+ FixItHints = MergedFixits;
+ }
+
+ for (ArrayRef<FixItHint>::const_iterator I = FixItHints.begin(),
+ E = FixItHints.end();
+ I != E; ++I)
+ if (I->RemoveRange.isValid())
+ MutableRanges.push_back(I->RemoveRange);
+
+ SourceLocation UnexpandedLoc = Loc;
+
+ // Find the ultimate expansion location for the diagnostic.
+ Loc = SM->getFileLoc(Loc);
+
+ PresumedLoc PLoc = SM->getPresumedLoc(Loc, DiagOpts->ShowPresumedLoc);
+
+ // First, if this diagnostic is not in the main file, print out the
+ // "included from" lines.
+ emitIncludeStack(Loc, PLoc, Level, *SM);
+
+ // Next, emit the actual diagnostic message and caret.
+ emitDiagnosticMessage(Loc, PLoc, Level, Message, Ranges, SM, D);
+ emitCaret(Loc, Level, MutableRanges, FixItHints, *SM);
+
+ // If this location is within a macro, walk from UnexpandedLoc up to Loc
+ // and produce a macro backtrace.
+ if (UnexpandedLoc.isValid() && UnexpandedLoc.isMacroID()) {
+ emitMacroExpansions(UnexpandedLoc, Level, MutableRanges, FixItHints, *SM);
+ }
+ }
+
+ LastLoc = Loc;
+ LastLevel = Level;
+
+ endDiagnostic(D, Level);
+}
+
+
+void DiagnosticRenderer::emitStoredDiagnostic(StoredDiagnostic &Diag) {
+ emitDiagnostic(Diag.getLocation(), Diag.getLevel(), Diag.getMessage(),
+ Diag.getRanges(), Diag.getFixIts(),
+ Diag.getLocation().isValid() ? &Diag.getLocation().getManager()
+ : nullptr,
+ &Diag);
+}
+
+void DiagnosticRenderer::emitBasicNote(StringRef Message) {
+ emitDiagnosticMessage(
+ SourceLocation(), PresumedLoc(), DiagnosticsEngine::Note, Message,
+ None, nullptr, DiagOrStoredDiag());
+}
+
+/// \brief Prints an include stack when appropriate for a particular
+/// diagnostic level and location.
+///
+/// This routine handles all the logic of suppressing particular include
+/// stacks (such as those for notes) and duplicate include stacks when
+/// repeated warnings occur within the same file. It also handles the logic
+/// of customizing the formatting and display of the include stack.
+///
+/// \param Loc The diagnostic location.
+/// \param PLoc The presumed location of the diagnostic location.
+/// \param Level The diagnostic level of the message this stack pertains to.
+void DiagnosticRenderer::emitIncludeStack(SourceLocation Loc,
+ PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ const SourceManager &SM) {
+ SourceLocation IncludeLoc = PLoc.getIncludeLoc();
+
+ // Skip redundant include stacks altogether.
+ if (LastIncludeLoc == IncludeLoc)
+ return;
+
+ LastIncludeLoc = IncludeLoc;
+
+ if (!DiagOpts->ShowNoteIncludeStack && Level == DiagnosticsEngine::Note)
+ return;
+
+ if (IncludeLoc.isValid())
+ emitIncludeStackRecursively(IncludeLoc, SM);
+ else {
+ emitModuleBuildStack(SM);
+ emitImportStack(Loc, SM);
+ }
+}
+
+/// \brief Helper to recursivly walk up the include stack and print each layer
+/// on the way back down.
+void DiagnosticRenderer::emitIncludeStackRecursively(SourceLocation Loc,
+ const SourceManager &SM) {
+ if (Loc.isInvalid()) {
+ emitModuleBuildStack(SM);
+ return;
+ }
+
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc, DiagOpts->ShowPresumedLoc);
+ if (PLoc.isInvalid())
+ return;
+
+ // If this source location was imported from a module, print the module
+ // import stack rather than the
+ // FIXME: We want submodule granularity here.
+ std::pair<SourceLocation, StringRef> Imported = SM.getModuleImportLoc(Loc);
+ if (!Imported.second.empty()) {
+ // This location was imported by a module. Emit the module import stack.
+ emitImportStackRecursively(Imported.first, Imported.second, SM);
+ return;
+ }
+
+ // Emit the other include frames first.
+ emitIncludeStackRecursively(PLoc.getIncludeLoc(), SM);
+
+ // Emit the inclusion text/note.
+ emitIncludeLocation(Loc, PLoc, SM);
+}
+
+/// \brief Emit the module import stack associated with the current location.
+void DiagnosticRenderer::emitImportStack(SourceLocation Loc,
+ const SourceManager &SM) {
+ if (Loc.isInvalid()) {
+ emitModuleBuildStack(SM);
+ return;
+ }
+
+ std::pair<SourceLocation, StringRef> NextImportLoc
+ = SM.getModuleImportLoc(Loc);
+ emitImportStackRecursively(NextImportLoc.first, NextImportLoc.second, SM);
+}
+
+/// \brief Helper to recursivly walk up the import stack and print each layer
+/// on the way back down.
+void DiagnosticRenderer::emitImportStackRecursively(SourceLocation Loc,
+ StringRef ModuleName,
+ const SourceManager &SM) {
+ if (ModuleName.empty()) {
+ return;
+ }
+
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc, DiagOpts->ShowPresumedLoc);
+
+ // Emit the other import frames first.
+ std::pair<SourceLocation, StringRef> NextImportLoc
+ = SM.getModuleImportLoc(Loc);
+ emitImportStackRecursively(NextImportLoc.first, NextImportLoc.second, SM);
+
+ // Emit the inclusion text/note.
+ emitImportLocation(Loc, PLoc, ModuleName, SM);
+}
+
+/// \brief Emit the module build stack, for cases where a module is (re-)built
+/// on demand.
+void DiagnosticRenderer::emitModuleBuildStack(const SourceManager &SM) {
+ ModuleBuildStack Stack = SM.getModuleBuildStack();
+ for (unsigned I = 0, N = Stack.size(); I != N; ++I) {
+ const SourceManager &CurSM = Stack[I].second.getManager();
+ SourceLocation CurLoc = Stack[I].second;
+ emitBuildingModuleLocation(CurLoc,
+ CurSM.getPresumedLoc(CurLoc,
+ DiagOpts->ShowPresumedLoc),
+ Stack[I].first,
+ CurSM);
+ }
+}
+
+/// A recursive function to trace all possible backtrace locations
+/// to match the \p CaretLocFileID.
+static SourceLocation
+retrieveMacroLocation(SourceLocation Loc, FileID MacroFileID,
+ FileID CaretFileID,
+ const SmallVectorImpl<FileID> &CommonArgExpansions,
+ bool IsBegin, const SourceManager *SM) {
+ assert(SM->getFileID(Loc) == MacroFileID);
+ if (MacroFileID == CaretFileID)
+ return Loc;
+ if (!Loc.isMacroID())
+ return SourceLocation();
+
+ SourceLocation MacroLocation, MacroArgLocation;
+
+ if (SM->isMacroArgExpansion(Loc)) {
+ // Only look at the immediate spelling location of this macro argument if
+ // the other location in the source range is also present in that expansion.
+ if (std::binary_search(CommonArgExpansions.begin(),
+ CommonArgExpansions.end(), MacroFileID))
+ MacroLocation = SM->getImmediateSpellingLoc(Loc);
+ MacroArgLocation = IsBegin ? SM->getImmediateExpansionRange(Loc).first
+ : SM->getImmediateExpansionRange(Loc).second;
+ } else {
+ MacroLocation = IsBegin ? SM->getImmediateExpansionRange(Loc).first
+ : SM->getImmediateExpansionRange(Loc).second;
+ MacroArgLocation = SM->getImmediateSpellingLoc(Loc);
+ }
+
+ if (MacroLocation.isValid()) {
+ MacroFileID = SM->getFileID(MacroLocation);
+ MacroLocation =
+ retrieveMacroLocation(MacroLocation, MacroFileID, CaretFileID,
+ CommonArgExpansions, IsBegin, SM);
+ if (MacroLocation.isValid())
+ return MacroLocation;
+ }
+
+ MacroFileID = SM->getFileID(MacroArgLocation);
+ return retrieveMacroLocation(MacroArgLocation, MacroFileID, CaretFileID,
+ CommonArgExpansions, IsBegin, SM);
+}
+
+/// Walk up the chain of macro expansions and collect the FileIDs identifying the
+/// expansions.
+static void getMacroArgExpansionFileIDs(SourceLocation Loc,
+ SmallVectorImpl<FileID> &IDs,
+ bool IsBegin, const SourceManager *SM) {
+ while (Loc.isMacroID()) {
+ if (SM->isMacroArgExpansion(Loc)) {
+ IDs.push_back(SM->getFileID(Loc));
+ Loc = SM->getImmediateSpellingLoc(Loc);
+ } else {
+ auto ExpRange = SM->getImmediateExpansionRange(Loc);
+ Loc = IsBegin ? ExpRange.first : ExpRange.second;
+ }
+ }
+}
+
+/// Collect the expansions of the begin and end locations and compute the set
+/// intersection. Produces a sorted vector of FileIDs in CommonArgExpansions.
+static void computeCommonMacroArgExpansionFileIDs(
+ SourceLocation Begin, SourceLocation End, const SourceManager *SM,
+ SmallVectorImpl<FileID> &CommonArgExpansions) {
+ SmallVector<FileID, 4> BeginArgExpansions;
+ SmallVector<FileID, 4> EndArgExpansions;
+ getMacroArgExpansionFileIDs(Begin, BeginArgExpansions, /*IsBegin=*/true, SM);
+ getMacroArgExpansionFileIDs(End, EndArgExpansions, /*IsBegin=*/false, SM);
+ std::sort(BeginArgExpansions.begin(), BeginArgExpansions.end());
+ std::sort(EndArgExpansions.begin(), EndArgExpansions.end());
+ std::set_intersection(BeginArgExpansions.begin(), BeginArgExpansions.end(),
+ EndArgExpansions.begin(), EndArgExpansions.end(),
+ std::back_inserter(CommonArgExpansions));
+}
+
+// Helper function to fix up source ranges. It takes in an array of ranges,
+// and outputs an array of ranges where we want to draw the range highlighting
+// around the location specified by CaretLoc.
+//
+// To find locations which correspond to the caret, we crawl the macro caller
+// chain for the beginning and end of each range. If the caret location
+// is in a macro expansion, we search each chain for a location
+// in the same expansion as the caret; otherwise, we crawl to the top of
+// each chain. Two locations are part of the same macro expansion
+// iff the FileID is the same.
+static void mapDiagnosticRanges(
+ SourceLocation CaretLoc,
+ ArrayRef<CharSourceRange> Ranges,
+ SmallVectorImpl<CharSourceRange> &SpellingRanges,
+ const SourceManager *SM) {
+ FileID CaretLocFileID = SM->getFileID(CaretLoc);
+
+ for (auto I = Ranges.begin(), E = Ranges.end(); I != E; ++I) {
+ if (I->isInvalid()) continue;
+
+ SourceLocation Begin = I->getBegin(), End = I->getEnd();
+ bool IsTokenRange = I->isTokenRange();
+
+ FileID BeginFileID = SM->getFileID(Begin);
+ FileID EndFileID = SM->getFileID(End);
+
+ // Find the common parent for the beginning and end of the range.
+
+ // First, crawl the expansion chain for the beginning of the range.
+ llvm::SmallDenseMap<FileID, SourceLocation> BeginLocsMap;
+ while (Begin.isMacroID() && BeginFileID != EndFileID) {
+ BeginLocsMap[BeginFileID] = Begin;
+ Begin = SM->getImmediateExpansionRange(Begin).first;
+ BeginFileID = SM->getFileID(Begin);
+ }
+
+ // Then, crawl the expansion chain for the end of the range.
+ if (BeginFileID != EndFileID) {
+ while (End.isMacroID() && !BeginLocsMap.count(EndFileID)) {
+ End = SM->getImmediateExpansionRange(End).second;
+ EndFileID = SM->getFileID(End);
+ }
+ if (End.isMacroID()) {
+ Begin = BeginLocsMap[EndFileID];
+ BeginFileID = EndFileID;
+ }
+ }
+
+ // Do the backtracking.
+ SmallVector<FileID, 4> CommonArgExpansions;
+ computeCommonMacroArgExpansionFileIDs(Begin, End, SM, CommonArgExpansions);
+ Begin = retrieveMacroLocation(Begin, BeginFileID, CaretLocFileID,
+ CommonArgExpansions, /*IsBegin=*/true, SM);
+ End = retrieveMacroLocation(End, BeginFileID, CaretLocFileID,
+ CommonArgExpansions, /*IsBegin=*/false, SM);
+ if (Begin.isInvalid() || End.isInvalid()) continue;
+
+ // Return the spelling location of the beginning and end of the range.
+ Begin = SM->getSpellingLoc(Begin);
+ End = SM->getSpellingLoc(End);
+
+ SpellingRanges.push_back(CharSourceRange(SourceRange(Begin, End),
+ IsTokenRange));
+ }
+}
+
+void DiagnosticRenderer::emitCaret(SourceLocation Loc,
+ DiagnosticsEngine::Level Level,
+ ArrayRef<CharSourceRange> Ranges,
+ ArrayRef<FixItHint> Hints,
+ const SourceManager &SM) {
+ SmallVector<CharSourceRange, 4> SpellingRanges;
+ mapDiagnosticRanges(Loc, Ranges, SpellingRanges, &SM);
+ emitCodeContext(Loc, Level, SpellingRanges, Hints, SM);
+}
+
+/// \brief A helper function for emitMacroExpansion to print the
+/// macro expansion message
+void DiagnosticRenderer::emitSingleMacroExpansion(
+ SourceLocation Loc,
+ DiagnosticsEngine::Level Level,
+ ArrayRef<CharSourceRange> Ranges,
+ const SourceManager &SM) {
+ // Find the spelling location for the macro definition. We must use the
+ // spelling location here to avoid emitting a macro backtrace for the note.
+ SourceLocation SpellingLoc = SM.getSpellingLoc(Loc);
+
+ // Map the ranges into the FileID of the diagnostic location.
+ SmallVector<CharSourceRange, 4> SpellingRanges;
+ mapDiagnosticRanges(Loc, Ranges, SpellingRanges, &SM);
+
+ SmallString<100> MessageStorage;
+ llvm::raw_svector_ostream Message(MessageStorage);
+ StringRef MacroName = getImmediateMacroName(Loc, SM, LangOpts);
+ if (MacroName.empty())
+ Message << "expanded from here";
+ else
+ Message << "expanded from macro '" << MacroName << "'";
+
+ emitDiagnostic(SpellingLoc, DiagnosticsEngine::Note, Message.str(),
+ SpellingRanges, None, &SM);
+}
+
+/// Check that the macro argument location of Loc starts with ArgumentLoc.
+/// The starting location of the macro expansions is used to differeniate
+/// different macro expansions.
+static bool checkLocForMacroArgExpansion(SourceLocation Loc,
+ const SourceManager &SM,
+ SourceLocation ArgumentLoc) {
+ SourceLocation MacroLoc;
+ if (SM.isMacroArgExpansion(Loc, &MacroLoc)) {
+ if (ArgumentLoc == MacroLoc) return true;
+ }
+
+ return false;
+}
+
+/// Check if all the locations in the range have the same macro argument
+/// expansion, and that that expansion starts with ArgumentLoc.
+static bool checkRangeForMacroArgExpansion(CharSourceRange Range,
+ const SourceManager &SM,
+ SourceLocation ArgumentLoc) {
+ SourceLocation BegLoc = Range.getBegin(), EndLoc = Range.getEnd();
+ while (BegLoc != EndLoc) {
+ if (!checkLocForMacroArgExpansion(BegLoc, SM, ArgumentLoc))
+ return false;
+ BegLoc.getLocWithOffset(1);
+ }
+
+ return checkLocForMacroArgExpansion(BegLoc, SM, ArgumentLoc);
+}
+
+/// A helper function to check if the current ranges are all inside the same
+/// macro argument expansion as Loc.
+static bool checkRangesForMacroArgExpansion(SourceLocation Loc,
+ ArrayRef<CharSourceRange> Ranges,
+ const SourceManager &SM) {
+ assert(Loc.isMacroID() && "Must be a macro expansion!");
+
+ SmallVector<CharSourceRange, 4> SpellingRanges;
+ mapDiagnosticRanges(Loc, Ranges, SpellingRanges, &SM);
+
+ /// Count all valid ranges.
+ unsigned ValidCount = 0;
+ for (auto I : Ranges)
+ if (I.isValid()) ValidCount++;
+
+ if (ValidCount > SpellingRanges.size())
+ return false;
+
+ /// To store the source location of the argument location.
+ SourceLocation ArgumentLoc;
+
+ /// Set the ArgumentLoc to the beginning location of the expansion of Loc
+ /// so to check if the ranges expands to the same beginning location.
+ if (!SM.isMacroArgExpansion(Loc,&ArgumentLoc))
+ return false;
+
+ for (auto I = SpellingRanges.begin(), E = SpellingRanges.end(); I != E; ++I) {
+ if (!checkRangeForMacroArgExpansion(*I, SM, ArgumentLoc))
+ return false;
+ }
+
+ return true;
+}
+
+/// \brief Recursively emit notes for each macro expansion and caret
+/// diagnostics where appropriate.
+///
+/// Walks up the macro expansion stack printing expansion notes, the code
+/// snippet, caret, underlines and FixItHint display as appropriate at each
+/// level.
+///
+/// \param Loc The location for this caret.
+/// \param Level The diagnostic level currently being emitted.
+/// \param Ranges The underlined ranges for this code snippet.
+/// \param Hints The FixIt hints active for this diagnostic.
+void DiagnosticRenderer::emitMacroExpansions(SourceLocation Loc,
+ DiagnosticsEngine::Level Level,
+ ArrayRef<CharSourceRange> Ranges,
+ ArrayRef<FixItHint> Hints,
+ const SourceManager &SM) {
+ assert(Loc.isValid() && "must have a valid source location here");
+
+ // Produce a stack of macro backtraces.
+ SmallVector<SourceLocation, 8> LocationStack;
+ unsigned IgnoredEnd = 0;
+ while (Loc.isMacroID()) {
+ // If this is the expansion of a macro argument, point the caret at the
+ // use of the argument in the definition of the macro, not the expansion.
+ if (SM.isMacroArgExpansion(Loc))
+ LocationStack.push_back(SM.getImmediateExpansionRange(Loc).first);
+ else
+ LocationStack.push_back(Loc);
+
+ if (checkRangesForMacroArgExpansion(Loc, Ranges, SM))
+ IgnoredEnd = LocationStack.size();
+
+ Loc = SM.getImmediateMacroCallerLoc(Loc);
+
+ // Once the location no longer points into a macro, try stepping through
+ // the last found location. This sometimes produces additional useful
+ // backtraces.
+ if (Loc.isFileID())
+ Loc = SM.getImmediateMacroCallerLoc(LocationStack.back());
+ assert(Loc.isValid() && "must have a valid source location here");
+ }
+
+ LocationStack.erase(LocationStack.begin(),
+ LocationStack.begin() + IgnoredEnd);
+
+ unsigned MacroDepth = LocationStack.size();
+ unsigned MacroLimit = DiagOpts->MacroBacktraceLimit;
+ if (MacroDepth <= MacroLimit || MacroLimit == 0) {
+ for (auto I = LocationStack.rbegin(), E = LocationStack.rend();
+ I != E; ++I)
+ emitSingleMacroExpansion(*I, Level, Ranges, SM);
+ return;
+ }
+
+ unsigned MacroStartMessages = MacroLimit / 2;
+ unsigned MacroEndMessages = MacroLimit / 2 + MacroLimit % 2;
+
+ for (auto I = LocationStack.rbegin(),
+ E = LocationStack.rbegin() + MacroStartMessages;
+ I != E; ++I)
+ emitSingleMacroExpansion(*I, Level, Ranges, SM);
+
+ SmallString<200> MessageStorage;
+ llvm::raw_svector_ostream Message(MessageStorage);
+ Message << "(skipping " << (MacroDepth - MacroLimit)
+ << " expansions in backtrace; use -fmacro-backtrace-limit=0 to "
+ "see all)";
+ emitBasicNote(Message.str());
+
+ for (auto I = LocationStack.rend() - MacroEndMessages,
+ E = LocationStack.rend();
+ I != E; ++I)
+ emitSingleMacroExpansion(*I, Level, Ranges, SM);
+}
+
+DiagnosticNoteRenderer::~DiagnosticNoteRenderer() {}
+
+void DiagnosticNoteRenderer::emitIncludeLocation(SourceLocation Loc,
+ PresumedLoc PLoc,
+ const SourceManager &SM) {
+ // Generate a note indicating the include location.
+ SmallString<200> MessageStorage;
+ llvm::raw_svector_ostream Message(MessageStorage);
+ Message << "in file included from " << PLoc.getFilename() << ':'
+ << PLoc.getLine() << ":";
+ emitNote(Loc, Message.str(), &SM);
+}
+
+void DiagnosticNoteRenderer::emitImportLocation(SourceLocation Loc,
+ PresumedLoc PLoc,
+ StringRef ModuleName,
+ const SourceManager &SM) {
+ // Generate a note indicating the include location.
+ SmallString<200> MessageStorage;
+ llvm::raw_svector_ostream Message(MessageStorage);
+ Message << "in module '" << ModuleName;
+ if (PLoc.isValid())
+ Message << "' imported from " << PLoc.getFilename() << ':'
+ << PLoc.getLine();
+ Message << ":";
+ emitNote(Loc, Message.str(), &SM);
+}
+
+void
+DiagnosticNoteRenderer::emitBuildingModuleLocation(SourceLocation Loc,
+ PresumedLoc PLoc,
+ StringRef ModuleName,
+ const SourceManager &SM) {
+ // Generate a note indicating the include location.
+ SmallString<200> MessageStorage;
+ llvm::raw_svector_ostream Message(MessageStorage);
+ if (PLoc.getFilename())
+ Message << "while building module '" << ModuleName << "' imported from "
+ << PLoc.getFilename() << ':' << PLoc.getLine() << ":";
+ else
+ Message << "while building module '" << ModuleName << "':";
+ emitNote(Loc, Message.str(), &SM);
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp b/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp
new file mode 100644
index 0000000..ecef92e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp
@@ -0,0 +1,592 @@
+//===--- FrontendAction.cpp -----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/FrontendAction.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclGroup.h"
+#include "clang/Frontend/ASTUnit.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/FrontendPluginRegistry.h"
+#include "clang/Frontend/LayoutOverrideSource.h"
+#include "clang/Frontend/MultiplexConsumer.h"
+#include "clang/Frontend/Utils.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Parse/ParseAST.h"
+#include "clang/Serialization/ASTDeserializationListener.h"
+#include "clang/Serialization/ASTReader.h"
+#include "clang/Serialization/GlobalModuleIndex.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/Support/raw_ostream.h"
+#include <system_error>
+using namespace clang;
+
+template class llvm::Registry<clang::PluginASTAction>;
+
+namespace {
+
+class DelegatingDeserializationListener : public ASTDeserializationListener {
+ ASTDeserializationListener *Previous;
+ bool DeletePrevious;
+
+public:
+ explicit DelegatingDeserializationListener(
+ ASTDeserializationListener *Previous, bool DeletePrevious)
+ : Previous(Previous), DeletePrevious(DeletePrevious) {}
+ ~DelegatingDeserializationListener() override {
+ if (DeletePrevious)
+ delete Previous;
+ }
+
+ void ReaderInitialized(ASTReader *Reader) override {
+ if (Previous)
+ Previous->ReaderInitialized(Reader);
+ }
+ void IdentifierRead(serialization::IdentID ID,
+ IdentifierInfo *II) override {
+ if (Previous)
+ Previous->IdentifierRead(ID, II);
+ }
+ void TypeRead(serialization::TypeIdx Idx, QualType T) override {
+ if (Previous)
+ Previous->TypeRead(Idx, T);
+ }
+ void DeclRead(serialization::DeclID ID, const Decl *D) override {
+ if (Previous)
+ Previous->DeclRead(ID, D);
+ }
+ void SelectorRead(serialization::SelectorID ID, Selector Sel) override {
+ if (Previous)
+ Previous->SelectorRead(ID, Sel);
+ }
+ void MacroDefinitionRead(serialization::PreprocessedEntityID PPID,
+ MacroDefinitionRecord *MD) override {
+ if (Previous)
+ Previous->MacroDefinitionRead(PPID, MD);
+ }
+};
+
+/// \brief Dumps deserialized declarations.
+class DeserializedDeclsDumper : public DelegatingDeserializationListener {
+public:
+ explicit DeserializedDeclsDumper(ASTDeserializationListener *Previous,
+ bool DeletePrevious)
+ : DelegatingDeserializationListener(Previous, DeletePrevious) {}
+
+ void DeclRead(serialization::DeclID ID, const Decl *D) override {
+ llvm::outs() << "PCH DECL: " << D->getDeclKindName();
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ llvm::outs() << " - " << *ND;
+ llvm::outs() << "\n";
+
+ DelegatingDeserializationListener::DeclRead(ID, D);
+ }
+};
+
+/// \brief Checks deserialized declarations and emits error if a name
+/// matches one given in command-line using -error-on-deserialized-decl.
+class DeserializedDeclsChecker : public DelegatingDeserializationListener {
+ ASTContext &Ctx;
+ std::set<std::string> NamesToCheck;
+
+public:
+ DeserializedDeclsChecker(ASTContext &Ctx,
+ const std::set<std::string> &NamesToCheck,
+ ASTDeserializationListener *Previous,
+ bool DeletePrevious)
+ : DelegatingDeserializationListener(Previous, DeletePrevious), Ctx(Ctx),
+ NamesToCheck(NamesToCheck) {}
+
+ void DeclRead(serialization::DeclID ID, const Decl *D) override {
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ if (NamesToCheck.find(ND->getNameAsString()) != NamesToCheck.end()) {
+ unsigned DiagID
+ = Ctx.getDiagnostics().getCustomDiagID(DiagnosticsEngine::Error,
+ "%0 was deserialized");
+ Ctx.getDiagnostics().Report(Ctx.getFullLoc(D->getLocation()), DiagID)
+ << ND->getNameAsString();
+ }
+
+ DelegatingDeserializationListener::DeclRead(ID, D);
+ }
+};
+
+} // end anonymous namespace
+
+FrontendAction::FrontendAction() : Instance(nullptr) {}
+
+FrontendAction::~FrontendAction() {}
+
+void FrontendAction::setCurrentInput(const FrontendInputFile &CurrentInput,
+ std::unique_ptr<ASTUnit> AST) {
+ this->CurrentInput = CurrentInput;
+ CurrentASTUnit = std::move(AST);
+}
+
+std::unique_ptr<ASTConsumer>
+FrontendAction::CreateWrappedASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ std::unique_ptr<ASTConsumer> Consumer = CreateASTConsumer(CI, InFile);
+ if (!Consumer)
+ return nullptr;
+
+ if (CI.getFrontendOpts().AddPluginActions.size() == 0)
+ return Consumer;
+
+ // Make sure the non-plugin consumer is first, so that plugins can't
+ // modifiy the AST.
+ std::vector<std::unique_ptr<ASTConsumer>> Consumers;
+ Consumers.push_back(std::move(Consumer));
+
+ for (size_t i = 0, e = CI.getFrontendOpts().AddPluginActions.size();
+ i != e; ++i) {
+ // This is O(|plugins| * |add_plugins|), but since both numbers are
+ // way below 50 in practice, that's ok.
+ for (FrontendPluginRegistry::iterator
+ it = FrontendPluginRegistry::begin(),
+ ie = FrontendPluginRegistry::end();
+ it != ie; ++it) {
+ if (it->getName() != CI.getFrontendOpts().AddPluginActions[i])
+ continue;
+ std::unique_ptr<PluginASTAction> P = it->instantiate();
+ if (P->ParseArgs(CI, CI.getFrontendOpts().AddPluginArgs[i]))
+ Consumers.push_back(P->CreateASTConsumer(CI, InFile));
+ }
+ }
+
+ return llvm::make_unique<MultiplexConsumer>(std::move(Consumers));
+}
+
+bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
+ const FrontendInputFile &Input) {
+ assert(!Instance && "Already processing a source file!");
+ assert(!Input.isEmpty() && "Unexpected empty filename!");
+ setCurrentInput(Input);
+ setCompilerInstance(&CI);
+
+ StringRef InputFile = Input.getFile();
+ bool HasBegunSourceFile = false;
+ if (!BeginInvocation(CI))
+ goto failure;
+
+ // AST files follow a very different path, since they share objects via the
+ // AST unit.
+ if (Input.getKind() == IK_AST) {
+ assert(!usesPreprocessorOnly() &&
+ "Attempt to pass AST file to preprocessor only action!");
+ assert(hasASTFileSupport() &&
+ "This action does not have AST file support!");
+
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(&CI.getDiagnostics());
+
+ std::unique_ptr<ASTUnit> AST = ASTUnit::LoadFromASTFile(
+ InputFile, CI.getPCHContainerReader(), Diags, CI.getFileSystemOpts(),
+ CI.getCodeGenOpts().DebugTypeExtRefs);
+
+ if (!AST)
+ goto failure;
+
+ // Inform the diagnostic client we are processing a source file.
+ CI.getDiagnosticClient().BeginSourceFile(CI.getLangOpts(), nullptr);
+ HasBegunSourceFile = true;
+
+ // Set the shared objects, these are reset when we finish processing the
+ // file, otherwise the CompilerInstance will happily destroy them.
+ CI.setFileManager(&AST->getFileManager());
+ CI.setSourceManager(&AST->getSourceManager());
+ CI.setPreprocessor(&AST->getPreprocessor());
+ CI.setASTContext(&AST->getASTContext());
+
+ setCurrentInput(Input, std::move(AST));
+
+ // Initialize the action.
+ if (!BeginSourceFileAction(CI, InputFile))
+ goto failure;
+
+ // Create the AST consumer.
+ CI.setASTConsumer(CreateWrappedASTConsumer(CI, InputFile));
+ if (!CI.hasASTConsumer())
+ goto failure;
+
+ return true;
+ }
+
+ if (!CI.hasVirtualFileSystem()) {
+ if (IntrusiveRefCntPtr<vfs::FileSystem> VFS =
+ createVFSFromCompilerInvocation(CI.getInvocation(),
+ CI.getDiagnostics()))
+ CI.setVirtualFileSystem(VFS);
+ else
+ goto failure;
+ }
+
+ // Set up the file and source managers, if needed.
+ if (!CI.hasFileManager())
+ CI.createFileManager();
+ if (!CI.hasSourceManager())
+ CI.createSourceManager(CI.getFileManager());
+
+ // IR files bypass the rest of initialization.
+ if (Input.getKind() == IK_LLVM_IR) {
+ assert(hasIRSupport() &&
+ "This action does not have IR file support!");
+
+ // Inform the diagnostic client we are processing a source file.
+ CI.getDiagnosticClient().BeginSourceFile(CI.getLangOpts(), nullptr);
+ HasBegunSourceFile = true;
+
+ // Initialize the action.
+ if (!BeginSourceFileAction(CI, InputFile))
+ goto failure;
+
+ // Initialize the main file entry.
+ if (!CI.InitializeSourceManager(CurrentInput))
+ goto failure;
+
+ return true;
+ }
+
+ // If the implicit PCH include is actually a directory, rather than
+ // a single file, search for a suitable PCH file in that directory.
+ if (!CI.getPreprocessorOpts().ImplicitPCHInclude.empty()) {
+ FileManager &FileMgr = CI.getFileManager();
+ PreprocessorOptions &PPOpts = CI.getPreprocessorOpts();
+ StringRef PCHInclude = PPOpts.ImplicitPCHInclude;
+ std::string SpecificModuleCachePath = CI.getSpecificModuleCachePath();
+ if (const DirectoryEntry *PCHDir = FileMgr.getDirectory(PCHInclude)) {
+ std::error_code EC;
+ SmallString<128> DirNative;
+ llvm::sys::path::native(PCHDir->getName(), DirNative);
+ bool Found = false;
+ for (llvm::sys::fs::directory_iterator Dir(DirNative, EC), DirEnd;
+ Dir != DirEnd && !EC; Dir.increment(EC)) {
+ // Check whether this is an acceptable AST file.
+ if (ASTReader::isAcceptableASTFile(
+ Dir->path(), FileMgr, CI.getPCHContainerReader(),
+ CI.getLangOpts(), CI.getTargetOpts(), CI.getPreprocessorOpts(),
+ SpecificModuleCachePath)) {
+ PPOpts.ImplicitPCHInclude = Dir->path();
+ Found = true;
+ break;
+ }
+ }
+
+ if (!Found) {
+ CI.getDiagnostics().Report(diag::err_fe_no_pch_in_dir) << PCHInclude;
+ goto failure;
+ }
+ }
+ }
+
+ // Set up the preprocessor if needed. When parsing model files the
+ // preprocessor of the original source is reused.
+ if (!isModelParsingAction())
+ CI.createPreprocessor(getTranslationUnitKind());
+
+ // Inform the diagnostic client we are processing a source file.
+ CI.getDiagnosticClient().BeginSourceFile(CI.getLangOpts(),
+ &CI.getPreprocessor());
+ HasBegunSourceFile = true;
+
+ // Initialize the action.
+ if (!BeginSourceFileAction(CI, InputFile))
+ goto failure;
+
+ // Initialize the main file entry. It is important that this occurs after
+ // BeginSourceFileAction, which may change CurrentInput during module builds.
+ if (!CI.InitializeSourceManager(CurrentInput))
+ goto failure;
+
+ // Create the AST context and consumer unless this is a preprocessor only
+ // action.
+ if (!usesPreprocessorOnly()) {
+ // Parsing a model file should reuse the existing ASTContext.
+ if (!isModelParsingAction())
+ CI.createASTContext();
+
+ std::unique_ptr<ASTConsumer> Consumer =
+ CreateWrappedASTConsumer(CI, InputFile);
+ if (!Consumer)
+ goto failure;
+
+ // FIXME: should not overwrite ASTMutationListener when parsing model files?
+ if (!isModelParsingAction())
+ CI.getASTContext().setASTMutationListener(Consumer->GetASTMutationListener());
+
+ if (!CI.getPreprocessorOpts().ChainedIncludes.empty()) {
+ // Convert headers to PCH and chain them.
+ IntrusiveRefCntPtr<ExternalSemaSource> source, FinalReader;
+ source = createChainedIncludesSource(CI, FinalReader);
+ if (!source)
+ goto failure;
+ CI.setModuleManager(static_cast<ASTReader *>(FinalReader.get()));
+ CI.getASTContext().setExternalSource(source);
+ } else if (!CI.getPreprocessorOpts().ImplicitPCHInclude.empty()) {
+ // Use PCH.
+ assert(hasPCHSupport() && "This action does not have PCH support!");
+ ASTDeserializationListener *DeserialListener =
+ Consumer->GetASTDeserializationListener();
+ bool DeleteDeserialListener = false;
+ if (CI.getPreprocessorOpts().DumpDeserializedPCHDecls) {
+ DeserialListener = new DeserializedDeclsDumper(DeserialListener,
+ DeleteDeserialListener);
+ DeleteDeserialListener = true;
+ }
+ if (!CI.getPreprocessorOpts().DeserializedPCHDeclsToErrorOn.empty()) {
+ DeserialListener = new DeserializedDeclsChecker(
+ CI.getASTContext(),
+ CI.getPreprocessorOpts().DeserializedPCHDeclsToErrorOn,
+ DeserialListener, DeleteDeserialListener);
+ DeleteDeserialListener = true;
+ }
+ CI.createPCHExternalASTSource(
+ CI.getPreprocessorOpts().ImplicitPCHInclude,
+ CI.getPreprocessorOpts().DisablePCHValidation,
+ CI.getPreprocessorOpts().AllowPCHWithCompilerErrors, DeserialListener,
+ DeleteDeserialListener);
+ if (!CI.getASTContext().getExternalSource())
+ goto failure;
+ }
+
+ CI.setASTConsumer(std::move(Consumer));
+ if (!CI.hasASTConsumer())
+ goto failure;
+ }
+
+ // Initialize built-in info as long as we aren't using an external AST
+ // source.
+ if (!CI.hasASTContext() || !CI.getASTContext().getExternalSource()) {
+ Preprocessor &PP = CI.getPreprocessor();
+
+ // If modules are enabled, create the module manager before creating
+ // any builtins, so that all declarations know that they might be
+ // extended by an external source.
+ if (CI.getLangOpts().Modules)
+ CI.createModuleManager();
+
+ PP.getBuiltinInfo().initializeBuiltins(PP.getIdentifierTable(),
+ PP.getLangOpts());
+ } else {
+ // FIXME: If this is a problem, recover from it by creating a multiplex
+ // source.
+ assert((!CI.getLangOpts().Modules || CI.getModuleManager()) &&
+ "modules enabled but created an external source that "
+ "doesn't support modules");
+ }
+
+ // If we were asked to load any module map files, do so now.
+ for (const auto &Filename : CI.getFrontendOpts().ModuleMapFiles) {
+ if (auto *File = CI.getFileManager().getFile(Filename))
+ CI.getPreprocessor().getHeaderSearchInfo().loadModuleMapFile(
+ File, /*IsSystem*/false);
+ else
+ CI.getDiagnostics().Report(diag::err_module_map_not_found) << Filename;
+ }
+
+ // If we were asked to load any module files, do so now.
+ for (const auto &ModuleFile : CI.getFrontendOpts().ModuleFiles)
+ if (!CI.loadModuleFile(ModuleFile))
+ goto failure;
+
+ // If there is a layout overrides file, attach an external AST source that
+ // provides the layouts from that file.
+ if (!CI.getFrontendOpts().OverrideRecordLayoutsFile.empty() &&
+ CI.hasASTContext() && !CI.getASTContext().getExternalSource()) {
+ IntrusiveRefCntPtr<ExternalASTSource>
+ Override(new LayoutOverrideSource(
+ CI.getFrontendOpts().OverrideRecordLayoutsFile));
+ CI.getASTContext().setExternalSource(Override);
+ }
+
+ return true;
+
+ // If we failed, reset state since the client will not end up calling the
+ // matching EndSourceFile().
+ failure:
+ if (isCurrentFileAST()) {
+ CI.setASTContext(nullptr);
+ CI.setPreprocessor(nullptr);
+ CI.setSourceManager(nullptr);
+ CI.setFileManager(nullptr);
+ }
+
+ if (HasBegunSourceFile)
+ CI.getDiagnosticClient().EndSourceFile();
+ CI.clearOutputFiles(/*EraseFiles=*/true);
+ setCurrentInput(FrontendInputFile());
+ setCompilerInstance(nullptr);
+ return false;
+}
+
+bool FrontendAction::Execute() {
+ CompilerInstance &CI = getCompilerInstance();
+
+ if (CI.hasFrontendTimer()) {
+ llvm::TimeRegion Timer(CI.getFrontendTimer());
+ ExecuteAction();
+ }
+ else ExecuteAction();
+
+ // If we are supposed to rebuild the global module index, do so now unless
+ // there were any module-build failures.
+ if (CI.shouldBuildGlobalModuleIndex() && CI.hasFileManager() &&
+ CI.hasPreprocessor()) {
+ StringRef Cache =
+ CI.getPreprocessor().getHeaderSearchInfo().getModuleCachePath();
+ if (!Cache.empty())
+ GlobalModuleIndex::writeIndex(CI.getFileManager(),
+ CI.getPCHContainerReader(), Cache);
+ }
+
+ return true;
+}
+
+void FrontendAction::EndSourceFile() {
+ CompilerInstance &CI = getCompilerInstance();
+
+ // Inform the diagnostic client we are done with this source file.
+ CI.getDiagnosticClient().EndSourceFile();
+
+ // Inform the preprocessor we are done.
+ if (CI.hasPreprocessor())
+ CI.getPreprocessor().EndSourceFile();
+
+ // Finalize the action.
+ EndSourceFileAction();
+
+ // Sema references the ast consumer, so reset sema first.
+ //
+ // FIXME: There is more per-file stuff we could just drop here?
+ bool DisableFree = CI.getFrontendOpts().DisableFree;
+ if (DisableFree) {
+ CI.resetAndLeakSema();
+ CI.resetAndLeakASTContext();
+ BuryPointer(CI.takeASTConsumer().get());
+ } else {
+ CI.setSema(nullptr);
+ CI.setASTContext(nullptr);
+ CI.setASTConsumer(nullptr);
+ }
+
+ if (CI.getFrontendOpts().ShowStats) {
+ llvm::errs() << "\nSTATISTICS FOR '" << getCurrentFile() << "':\n";
+ CI.getPreprocessor().PrintStats();
+ CI.getPreprocessor().getIdentifierTable().PrintStats();
+ CI.getPreprocessor().getHeaderSearchInfo().PrintStats();
+ CI.getSourceManager().PrintStats();
+ llvm::errs() << "\n";
+ }
+
+ // Cleanup the output streams, and erase the output files if instructed by the
+ // FrontendAction.
+ CI.clearOutputFiles(/*EraseFiles=*/shouldEraseOutputFiles());
+
+ if (isCurrentFileAST()) {
+ if (DisableFree) {
+ CI.resetAndLeakPreprocessor();
+ CI.resetAndLeakSourceManager();
+ CI.resetAndLeakFileManager();
+ } else {
+ CI.setPreprocessor(nullptr);
+ CI.setSourceManager(nullptr);
+ CI.setFileManager(nullptr);
+ }
+ }
+
+ setCompilerInstance(nullptr);
+ setCurrentInput(FrontendInputFile());
+}
+
+bool FrontendAction::shouldEraseOutputFiles() {
+ return getCompilerInstance().getDiagnostics().hasErrorOccurred();
+}
+
+//===----------------------------------------------------------------------===//
+// Utility Actions
+//===----------------------------------------------------------------------===//
+
+void ASTFrontendAction::ExecuteAction() {
+ CompilerInstance &CI = getCompilerInstance();
+ if (!CI.hasPreprocessor())
+ return;
+
+ // FIXME: Move the truncation aspect of this into Sema, we delayed this till
+ // here so the source manager would be initialized.
+ if (hasCodeCompletionSupport() &&
+ !CI.getFrontendOpts().CodeCompletionAt.FileName.empty())
+ CI.createCodeCompletionConsumer();
+
+ // Use a code completion consumer?
+ CodeCompleteConsumer *CompletionConsumer = nullptr;
+ if (CI.hasCodeCompletionConsumer())
+ CompletionConsumer = &CI.getCodeCompletionConsumer();
+
+ if (!CI.hasSema())
+ CI.createSema(getTranslationUnitKind(), CompletionConsumer);
+
+ ParseAST(CI.getSema(), CI.getFrontendOpts().ShowStats,
+ CI.getFrontendOpts().SkipFunctionBodies);
+}
+
+void PluginASTAction::anchor() { }
+
+std::unique_ptr<ASTConsumer>
+PreprocessorFrontendAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ llvm_unreachable("Invalid CreateASTConsumer on preprocessor action!");
+}
+
+std::unique_ptr<ASTConsumer>
+WrapperFrontendAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ return WrappedAction->CreateASTConsumer(CI, InFile);
+}
+bool WrapperFrontendAction::BeginInvocation(CompilerInstance &CI) {
+ return WrappedAction->BeginInvocation(CI);
+}
+bool WrapperFrontendAction::BeginSourceFileAction(CompilerInstance &CI,
+ StringRef Filename) {
+ WrappedAction->setCurrentInput(getCurrentInput());
+ WrappedAction->setCompilerInstance(&CI);
+ return WrappedAction->BeginSourceFileAction(CI, Filename);
+}
+void WrapperFrontendAction::ExecuteAction() {
+ WrappedAction->ExecuteAction();
+}
+void WrapperFrontendAction::EndSourceFileAction() {
+ WrappedAction->EndSourceFileAction();
+}
+
+bool WrapperFrontendAction::usesPreprocessorOnly() const {
+ return WrappedAction->usesPreprocessorOnly();
+}
+TranslationUnitKind WrapperFrontendAction::getTranslationUnitKind() {
+ return WrappedAction->getTranslationUnitKind();
+}
+bool WrapperFrontendAction::hasPCHSupport() const {
+ return WrappedAction->hasPCHSupport();
+}
+bool WrapperFrontendAction::hasASTFileSupport() const {
+ return WrappedAction->hasASTFileSupport();
+}
+bool WrapperFrontendAction::hasIRSupport() const {
+ return WrappedAction->hasIRSupport();
+}
+bool WrapperFrontendAction::hasCodeCompletionSupport() const {
+ return WrappedAction->hasCodeCompletionSupport();
+}
+
+WrapperFrontendAction::WrapperFrontendAction(FrontendAction *WrappedAction)
+ : WrappedAction(WrappedAction) {}
+
diff --git a/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp b/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp
new file mode 100644
index 0000000..d6c88d2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp
@@ -0,0 +1,749 @@
+//===--- FrontendActions.cpp ----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/FrontendActions.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Frontend/ASTConsumers.h"
+#include "clang/Frontend/ASTUnit.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/MultiplexConsumer.h"
+#include "clang/Frontend/Utils.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/Pragma.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Parse/Parser.h"
+#include "clang/Serialization/ASTReader.h"
+#include "clang/Serialization/ASTWriter.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include <memory>
+#include <system_error>
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Custom Actions
+//===----------------------------------------------------------------------===//
+
+std::unique_ptr<ASTConsumer>
+InitOnlyAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
+ return llvm::make_unique<ASTConsumer>();
+}
+
+void InitOnlyAction::ExecuteAction() {
+}
+
+//===----------------------------------------------------------------------===//
+// AST Consumer Actions
+//===----------------------------------------------------------------------===//
+
+std::unique_ptr<ASTConsumer>
+ASTPrintAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
+ if (raw_ostream *OS = CI.createDefaultOutputFile(false, InFile))
+ return CreateASTPrinter(OS, CI.getFrontendOpts().ASTDumpFilter);
+ return nullptr;
+}
+
+std::unique_ptr<ASTConsumer>
+ASTDumpAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
+ return CreateASTDumper(CI.getFrontendOpts().ASTDumpFilter,
+ CI.getFrontendOpts().ASTDumpDecls,
+ CI.getFrontendOpts().ASTDumpLookups);
+}
+
+std::unique_ptr<ASTConsumer>
+ASTDeclListAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
+ return CreateASTDeclNodeLister();
+}
+
+std::unique_ptr<ASTConsumer>
+ASTViewAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
+ return CreateASTViewer();
+}
+
+std::unique_ptr<ASTConsumer>
+DeclContextPrintAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ return CreateDeclContextPrinter();
+}
+
+std::unique_ptr<ASTConsumer>
+GeneratePCHAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
+ std::string Sysroot;
+ std::string OutputFile;
+ raw_pwrite_stream *OS =
+ ComputeASTConsumerArguments(CI, InFile, Sysroot, OutputFile);
+ if (!OS)
+ return nullptr;
+
+ if (!CI.getFrontendOpts().RelocatablePCH)
+ Sysroot.clear();
+
+ auto Buffer = std::make_shared<PCHBuffer>();
+ std::vector<std::unique_ptr<ASTConsumer>> Consumers;
+ Consumers.push_back(llvm::make_unique<PCHGenerator>(
+ CI.getPreprocessor(), OutputFile, nullptr, Sysroot,
+ Buffer, CI.getFrontendOpts().ModuleFileExtensions));
+ Consumers.push_back(CI.getPCHContainerWriter().CreatePCHContainerGenerator(
+ CI, InFile, OutputFile, OS, Buffer));
+
+ return llvm::make_unique<MultiplexConsumer>(std::move(Consumers));
+}
+
+raw_pwrite_stream *GeneratePCHAction::ComputeASTConsumerArguments(
+ CompilerInstance &CI, StringRef InFile, std::string &Sysroot,
+ std::string &OutputFile) {
+ Sysroot = CI.getHeaderSearchOpts().Sysroot;
+ if (CI.getFrontendOpts().RelocatablePCH && Sysroot.empty()) {
+ CI.getDiagnostics().Report(diag::err_relocatable_without_isysroot);
+ return nullptr;
+ }
+
+ // We use createOutputFile here because this is exposed via libclang, and we
+ // must disable the RemoveFileOnSignal behavior.
+ // We use a temporary to avoid race conditions.
+ raw_pwrite_stream *OS =
+ CI.createOutputFile(CI.getFrontendOpts().OutputFile, /*Binary=*/true,
+ /*RemoveFileOnSignal=*/false, InFile,
+ /*Extension=*/"", /*useTemporary=*/true);
+ if (!OS)
+ return nullptr;
+
+ OutputFile = CI.getFrontendOpts().OutputFile;
+ return OS;
+}
+
+std::unique_ptr<ASTConsumer>
+GenerateModuleAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ std::string Sysroot;
+ std::string OutputFile;
+ raw_pwrite_stream *OS =
+ ComputeASTConsumerArguments(CI, InFile, Sysroot, OutputFile);
+ if (!OS)
+ return nullptr;
+
+ auto Buffer = std::make_shared<PCHBuffer>();
+ std::vector<std::unique_ptr<ASTConsumer>> Consumers;
+
+ Consumers.push_back(llvm::make_unique<PCHGenerator>(
+ CI.getPreprocessor(), OutputFile, Module, Sysroot,
+ Buffer, CI.getFrontendOpts().ModuleFileExtensions,
+ /*AllowASTWithErrors=*/false,
+ /*IncludeTimestamps=*/
+ +CI.getFrontendOpts().BuildingImplicitModule));
+ Consumers.push_back(CI.getPCHContainerWriter().CreatePCHContainerGenerator(
+ CI, InFile, OutputFile, OS, Buffer));
+ return llvm::make_unique<MultiplexConsumer>(std::move(Consumers));
+}
+
+static SmallVectorImpl<char> &
+operator+=(SmallVectorImpl<char> &Includes, StringRef RHS) {
+ Includes.append(RHS.begin(), RHS.end());
+ return Includes;
+}
+
+static std::error_code addHeaderInclude(StringRef HeaderName,
+ SmallVectorImpl<char> &Includes,
+ const LangOptions &LangOpts,
+ bool IsExternC) {
+ if (IsExternC && LangOpts.CPlusPlus)
+ Includes += "extern \"C\" {\n";
+ if (LangOpts.ObjC1)
+ Includes += "#import \"";
+ else
+ Includes += "#include \"";
+
+ Includes += HeaderName;
+
+ Includes += "\"\n";
+ if (IsExternC && LangOpts.CPlusPlus)
+ Includes += "}\n";
+ return std::error_code();
+}
+
+/// \brief Collect the set of header includes needed to construct the given
+/// module and update the TopHeaders file set of the module.
+///
+/// \param Module The module we're collecting includes from.
+///
+/// \param Includes Will be augmented with the set of \#includes or \#imports
+/// needed to load all of the named headers.
+static std::error_code
+collectModuleHeaderIncludes(const LangOptions &LangOpts, FileManager &FileMgr,
+ ModuleMap &ModMap, clang::Module *Module,
+ SmallVectorImpl<char> &Includes) {
+ // Don't collect any headers for unavailable modules.
+ if (!Module->isAvailable())
+ return std::error_code();
+
+ // Add includes for each of these headers.
+ for (Module::Header &H : Module->Headers[Module::HK_Normal]) {
+ Module->addTopHeader(H.Entry);
+ // Use the path as specified in the module map file. We'll look for this
+ // file relative to the module build directory (the directory containing
+ // the module map file) so this will find the same file that we found
+ // while parsing the module map.
+ if (std::error_code Err = addHeaderInclude(H.NameAsWritten, Includes,
+ LangOpts, Module->IsExternC))
+ return Err;
+ }
+ // Note that Module->PrivateHeaders will not be a TopHeader.
+
+ if (Module::Header UmbrellaHeader = Module->getUmbrellaHeader()) {
+ Module->addTopHeader(UmbrellaHeader.Entry);
+ if (Module->Parent) {
+ // Include the umbrella header for submodules.
+ if (std::error_code Err = addHeaderInclude(UmbrellaHeader.NameAsWritten,
+ Includes, LangOpts,
+ Module->IsExternC))
+ return Err;
+ }
+ } else if (Module::DirectoryName UmbrellaDir = Module->getUmbrellaDir()) {
+ // Add all of the headers we find in this subdirectory.
+ std::error_code EC;
+ SmallString<128> DirNative;
+ llvm::sys::path::native(UmbrellaDir.Entry->getName(), DirNative);
+ for (llvm::sys::fs::recursive_directory_iterator Dir(DirNative, EC),
+ DirEnd;
+ Dir != DirEnd && !EC; Dir.increment(EC)) {
+ // Check whether this entry has an extension typically associated with
+ // headers.
+ if (!llvm::StringSwitch<bool>(llvm::sys::path::extension(Dir->path()))
+ .Cases(".h", ".H", ".hh", ".hpp", true)
+ .Default(false))
+ continue;
+
+ const FileEntry *Header = FileMgr.getFile(Dir->path());
+ // FIXME: This shouldn't happen unless there is a file system race. Is
+ // that worth diagnosing?
+ if (!Header)
+ continue;
+
+ // If this header is marked 'unavailable' in this module, don't include
+ // it.
+ if (ModMap.isHeaderUnavailableInModule(Header, Module))
+ continue;
+
+ // Compute the relative path from the directory to this file.
+ SmallVector<StringRef, 16> Components;
+ auto PathIt = llvm::sys::path::rbegin(Dir->path());
+ for (int I = 0; I != Dir.level() + 1; ++I, ++PathIt)
+ Components.push_back(*PathIt);
+ SmallString<128> RelativeHeader(UmbrellaDir.NameAsWritten);
+ for (auto It = Components.rbegin(), End = Components.rend(); It != End;
+ ++It)
+ llvm::sys::path::append(RelativeHeader, *It);
+
+ // Include this header as part of the umbrella directory.
+ Module->addTopHeader(Header);
+ if (std::error_code Err = addHeaderInclude(RelativeHeader, Includes,
+ LangOpts, Module->IsExternC))
+ return Err;
+ }
+
+ if (EC)
+ return EC;
+ }
+
+ // Recurse into submodules.
+ for (clang::Module::submodule_iterator Sub = Module->submodule_begin(),
+ SubEnd = Module->submodule_end();
+ Sub != SubEnd; ++Sub)
+ if (std::error_code Err = collectModuleHeaderIncludes(
+ LangOpts, FileMgr, ModMap, *Sub, Includes))
+ return Err;
+
+ return std::error_code();
+}
+
+bool GenerateModuleAction::BeginSourceFileAction(CompilerInstance &CI,
+ StringRef Filename) {
+ // Find the module map file.
+ const FileEntry *ModuleMap =
+ CI.getFileManager().getFile(Filename, /*openFile*/true);
+ if (!ModuleMap) {
+ CI.getDiagnostics().Report(diag::err_module_map_not_found)
+ << Filename;
+ return false;
+ }
+
+ // Set up embedding for any specified files. Do this before we load any
+ // source files, including the primary module map for the compilation.
+ for (const auto &F : CI.getFrontendOpts().ModulesEmbedFiles) {
+ if (const auto *FE = CI.getFileManager().getFile(F, /*openFile*/true))
+ CI.getSourceManager().setFileIsTransient(FE);
+ else
+ CI.getDiagnostics().Report(diag::err_modules_embed_file_not_found) << F;
+ }
+ if (CI.getFrontendOpts().ModulesEmbedAllFiles)
+ CI.getSourceManager().setAllFilesAreTransient(true);
+
+ // Parse the module map file.
+ HeaderSearch &HS = CI.getPreprocessor().getHeaderSearchInfo();
+ if (HS.loadModuleMapFile(ModuleMap, IsSystem))
+ return false;
+
+ if (CI.getLangOpts().CurrentModule.empty()) {
+ CI.getDiagnostics().Report(diag::err_missing_module_name);
+
+ // FIXME: Eventually, we could consider asking whether there was just
+ // a single module described in the module map, and use that as a
+ // default. Then it would be fairly trivial to just "compile" a module
+ // map with a single module (the common case).
+ return false;
+ }
+
+ // If we're being run from the command-line, the module build stack will not
+ // have been filled in yet, so complete it now in order to allow us to detect
+ // module cycles.
+ SourceManager &SourceMgr = CI.getSourceManager();
+ if (SourceMgr.getModuleBuildStack().empty())
+ SourceMgr.pushModuleBuildStack(CI.getLangOpts().CurrentModule,
+ FullSourceLoc(SourceLocation(), SourceMgr));
+
+ // Dig out the module definition.
+ Module = HS.lookupModule(CI.getLangOpts().CurrentModule,
+ /*AllowSearch=*/false);
+ if (!Module) {
+ CI.getDiagnostics().Report(diag::err_missing_module)
+ << CI.getLangOpts().CurrentModule << Filename;
+
+ return false;
+ }
+
+ // Check whether we can build this module at all.
+ clang::Module::Requirement Requirement;
+ clang::Module::UnresolvedHeaderDirective MissingHeader;
+ if (!Module->isAvailable(CI.getLangOpts(), CI.getTarget(), Requirement,
+ MissingHeader)) {
+ if (MissingHeader.FileNameLoc.isValid()) {
+ CI.getDiagnostics().Report(MissingHeader.FileNameLoc,
+ diag::err_module_header_missing)
+ << MissingHeader.IsUmbrella << MissingHeader.FileName;
+ } else {
+ CI.getDiagnostics().Report(diag::err_module_unavailable)
+ << Module->getFullModuleName()
+ << Requirement.second << Requirement.first;
+ }
+
+ return false;
+ }
+
+ if (ModuleMapForUniquing && ModuleMapForUniquing != ModuleMap) {
+ Module->IsInferred = true;
+ HS.getModuleMap().setInferredModuleAllowedBy(Module, ModuleMapForUniquing);
+ } else {
+ ModuleMapForUniquing = ModuleMap;
+ }
+
+ FileManager &FileMgr = CI.getFileManager();
+
+ // Collect the set of #includes we need to build the module.
+ SmallString<256> HeaderContents;
+ std::error_code Err = std::error_code();
+ if (Module::Header UmbrellaHeader = Module->getUmbrellaHeader())
+ Err = addHeaderInclude(UmbrellaHeader.NameAsWritten, HeaderContents,
+ CI.getLangOpts(), Module->IsExternC);
+ if (!Err)
+ Err = collectModuleHeaderIncludes(
+ CI.getLangOpts(), FileMgr,
+ CI.getPreprocessor().getHeaderSearchInfo().getModuleMap(), Module,
+ HeaderContents);
+
+ if (Err) {
+ CI.getDiagnostics().Report(diag::err_module_cannot_create_includes)
+ << Module->getFullModuleName() << Err.message();
+ return false;
+ }
+
+ // Inform the preprocessor that includes from within the input buffer should
+ // be resolved relative to the build directory of the module map file.
+ CI.getPreprocessor().setMainFileDir(Module->Directory);
+
+ std::unique_ptr<llvm::MemoryBuffer> InputBuffer =
+ llvm::MemoryBuffer::getMemBufferCopy(HeaderContents,
+ Module::getModuleInputBufferName());
+ // Ownership of InputBuffer will be transferred to the SourceManager.
+ setCurrentInput(FrontendInputFile(InputBuffer.release(), getCurrentFileKind(),
+ Module->IsSystem));
+ return true;
+}
+
+raw_pwrite_stream *GenerateModuleAction::ComputeASTConsumerArguments(
+ CompilerInstance &CI, StringRef InFile, std::string &Sysroot,
+ std::string &OutputFile) {
+ // If no output file was provided, figure out where this module would go
+ // in the module cache.
+ if (CI.getFrontendOpts().OutputFile.empty()) {
+ HeaderSearch &HS = CI.getPreprocessor().getHeaderSearchInfo();
+ CI.getFrontendOpts().OutputFile =
+ HS.getModuleFileName(CI.getLangOpts().CurrentModule,
+ ModuleMapForUniquing->getName());
+ }
+
+ // We use createOutputFile here because this is exposed via libclang, and we
+ // must disable the RemoveFileOnSignal behavior.
+ // We use a temporary to avoid race conditions.
+ raw_pwrite_stream *OS =
+ CI.createOutputFile(CI.getFrontendOpts().OutputFile, /*Binary=*/true,
+ /*RemoveFileOnSignal=*/false, InFile,
+ /*Extension=*/"", /*useTemporary=*/true,
+ /*CreateMissingDirectories=*/true);
+ if (!OS)
+ return nullptr;
+
+ OutputFile = CI.getFrontendOpts().OutputFile;
+ return OS;
+}
+
+std::unique_ptr<ASTConsumer>
+SyntaxOnlyAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
+ return llvm::make_unique<ASTConsumer>();
+}
+
+std::unique_ptr<ASTConsumer>
+DumpModuleInfoAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ return llvm::make_unique<ASTConsumer>();
+}
+
+std::unique_ptr<ASTConsumer>
+VerifyPCHAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
+ return llvm::make_unique<ASTConsumer>();
+}
+
+void VerifyPCHAction::ExecuteAction() {
+ CompilerInstance &CI = getCompilerInstance();
+ bool Preamble = CI.getPreprocessorOpts().PrecompiledPreambleBytes.first != 0;
+ const std::string &Sysroot = CI.getHeaderSearchOpts().Sysroot;
+ std::unique_ptr<ASTReader> Reader(new ASTReader(
+ CI.getPreprocessor(), CI.getASTContext(), CI.getPCHContainerReader(),
+ CI.getFrontendOpts().ModuleFileExtensions,
+ Sysroot.empty() ? "" : Sysroot.c_str(),
+ /*DisableValidation*/ false,
+ /*AllowPCHWithCompilerErrors*/ false,
+ /*AllowConfigurationMismatch*/ true,
+ /*ValidateSystemInputs*/ true));
+
+ Reader->ReadAST(getCurrentFile(),
+ Preamble ? serialization::MK_Preamble
+ : serialization::MK_PCH,
+ SourceLocation(),
+ ASTReader::ARR_ConfigurationMismatch);
+}
+
+namespace {
+ /// \brief AST reader listener that dumps module information for a module
+ /// file.
+ class DumpModuleInfoListener : public ASTReaderListener {
+ llvm::raw_ostream &Out;
+
+ public:
+ DumpModuleInfoListener(llvm::raw_ostream &Out) : Out(Out) { }
+
+#define DUMP_BOOLEAN(Value, Text) \
+ Out.indent(4) << Text << ": " << (Value? "Yes" : "No") << "\n"
+
+ bool ReadFullVersionInformation(StringRef FullVersion) override {
+ Out.indent(2)
+ << "Generated by "
+ << (FullVersion == getClangFullRepositoryVersion()? "this"
+ : "a different")
+ << " Clang: " << FullVersion << "\n";
+ return ASTReaderListener::ReadFullVersionInformation(FullVersion);
+ }
+
+ void ReadModuleName(StringRef ModuleName) override {
+ Out.indent(2) << "Module name: " << ModuleName << "\n";
+ }
+ void ReadModuleMapFile(StringRef ModuleMapPath) override {
+ Out.indent(2) << "Module map file: " << ModuleMapPath << "\n";
+ }
+
+ bool ReadLanguageOptions(const LangOptions &LangOpts, bool Complain,
+ bool AllowCompatibleDifferences) override {
+ Out.indent(2) << "Language options:\n";
+#define LANGOPT(Name, Bits, Default, Description) \
+ DUMP_BOOLEAN(LangOpts.Name, Description);
+#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
+ Out.indent(4) << Description << ": " \
+ << static_cast<unsigned>(LangOpts.get##Name()) << "\n";
+#define VALUE_LANGOPT(Name, Bits, Default, Description) \
+ Out.indent(4) << Description << ": " << LangOpts.Name << "\n";
+#define BENIGN_LANGOPT(Name, Bits, Default, Description)
+#define BENIGN_ENUM_LANGOPT(Name, Type, Bits, Default, Description)
+#include "clang/Basic/LangOptions.def"
+
+ if (!LangOpts.ModuleFeatures.empty()) {
+ Out.indent(4) << "Module features:\n";
+ for (StringRef Feature : LangOpts.ModuleFeatures)
+ Out.indent(6) << Feature << "\n";
+ }
+
+ return false;
+ }
+
+ bool ReadTargetOptions(const TargetOptions &TargetOpts, bool Complain,
+ bool AllowCompatibleDifferences) override {
+ Out.indent(2) << "Target options:\n";
+ Out.indent(4) << " Triple: " << TargetOpts.Triple << "\n";
+ Out.indent(4) << " CPU: " << TargetOpts.CPU << "\n";
+ Out.indent(4) << " ABI: " << TargetOpts.ABI << "\n";
+
+ if (!TargetOpts.FeaturesAsWritten.empty()) {
+ Out.indent(4) << "Target features:\n";
+ for (unsigned I = 0, N = TargetOpts.FeaturesAsWritten.size();
+ I != N; ++I) {
+ Out.indent(6) << TargetOpts.FeaturesAsWritten[I] << "\n";
+ }
+ }
+
+ return false;
+ }
+
+ bool ReadDiagnosticOptions(IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts,
+ bool Complain) override {
+ Out.indent(2) << "Diagnostic options:\n";
+#define DIAGOPT(Name, Bits, Default) DUMP_BOOLEAN(DiagOpts->Name, #Name);
+#define ENUM_DIAGOPT(Name, Type, Bits, Default) \
+ Out.indent(4) << #Name << ": " << DiagOpts->get##Name() << "\n";
+#define VALUE_DIAGOPT(Name, Bits, Default) \
+ Out.indent(4) << #Name << ": " << DiagOpts->Name << "\n";
+#include "clang/Basic/DiagnosticOptions.def"
+
+ Out.indent(4) << "Diagnostic flags:\n";
+ for (const std::string &Warning : DiagOpts->Warnings)
+ Out.indent(6) << "-W" << Warning << "\n";
+ for (const std::string &Remark : DiagOpts->Remarks)
+ Out.indent(6) << "-R" << Remark << "\n";
+
+ return false;
+ }
+
+ bool ReadHeaderSearchOptions(const HeaderSearchOptions &HSOpts,
+ StringRef SpecificModuleCachePath,
+ bool Complain) override {
+ Out.indent(2) << "Header search options:\n";
+ Out.indent(4) << "System root [-isysroot=]: '" << HSOpts.Sysroot << "'\n";
+ Out.indent(4) << "Module Cache: '" << SpecificModuleCachePath << "'\n";
+ DUMP_BOOLEAN(HSOpts.UseBuiltinIncludes,
+ "Use builtin include directories [-nobuiltininc]");
+ DUMP_BOOLEAN(HSOpts.UseStandardSystemIncludes,
+ "Use standard system include directories [-nostdinc]");
+ DUMP_BOOLEAN(HSOpts.UseStandardCXXIncludes,
+ "Use standard C++ include directories [-nostdinc++]");
+ DUMP_BOOLEAN(HSOpts.UseLibcxx,
+ "Use libc++ (rather than libstdc++) [-stdlib=]");
+ return false;
+ }
+
+ bool ReadPreprocessorOptions(const PreprocessorOptions &PPOpts,
+ bool Complain,
+ std::string &SuggestedPredefines) override {
+ Out.indent(2) << "Preprocessor options:\n";
+ DUMP_BOOLEAN(PPOpts.UsePredefines,
+ "Uses compiler/target-specific predefines [-undef]");
+ DUMP_BOOLEAN(PPOpts.DetailedRecord,
+ "Uses detailed preprocessing record (for indexing)");
+
+ if (!PPOpts.Macros.empty()) {
+ Out.indent(4) << "Predefined macros:\n";
+ }
+
+ for (std::vector<std::pair<std::string, bool/*isUndef*/> >::const_iterator
+ I = PPOpts.Macros.begin(), IEnd = PPOpts.Macros.end();
+ I != IEnd; ++I) {
+ Out.indent(6);
+ if (I->second)
+ Out << "-U";
+ else
+ Out << "-D";
+ Out << I->first << "\n";
+ }
+ return false;
+ }
+
+ /// Indicates that a particular module file extension has been read.
+ void readModuleFileExtension(
+ const ModuleFileExtensionMetadata &Metadata) override {
+ Out.indent(2) << "Module file extension '"
+ << Metadata.BlockName << "' " << Metadata.MajorVersion
+ << "." << Metadata.MinorVersion;
+ if (!Metadata.UserInfo.empty()) {
+ Out << ": ";
+ Out.write_escaped(Metadata.UserInfo);
+ }
+
+ Out << "\n";
+ }
+#undef DUMP_BOOLEAN
+ };
+}
+
+void DumpModuleInfoAction::ExecuteAction() {
+ // Set up the output file.
+ std::unique_ptr<llvm::raw_fd_ostream> OutFile;
+ StringRef OutputFileName = getCompilerInstance().getFrontendOpts().OutputFile;
+ if (!OutputFileName.empty() && OutputFileName != "-") {
+ std::error_code EC;
+ OutFile.reset(new llvm::raw_fd_ostream(OutputFileName.str(), EC,
+ llvm::sys::fs::F_Text));
+ }
+ llvm::raw_ostream &Out = OutFile.get()? *OutFile.get() : llvm::outs();
+
+ Out << "Information for module file '" << getCurrentFile() << "':\n";
+ DumpModuleInfoListener Listener(Out);
+ ASTReader::readASTFileControlBlock(
+ getCurrentFile(), getCompilerInstance().getFileManager(),
+ getCompilerInstance().getPCHContainerReader(),
+ /*FindModuleFileExtensions=*/true, Listener);
+}
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Actions
+//===----------------------------------------------------------------------===//
+
+void DumpRawTokensAction::ExecuteAction() {
+ Preprocessor &PP = getCompilerInstance().getPreprocessor();
+ SourceManager &SM = PP.getSourceManager();
+
+ // Start lexing the specified input file.
+ const llvm::MemoryBuffer *FromFile = SM.getBuffer(SM.getMainFileID());
+ Lexer RawLex(SM.getMainFileID(), FromFile, SM, PP.getLangOpts());
+ RawLex.SetKeepWhitespaceMode(true);
+
+ Token RawTok;
+ RawLex.LexFromRawLexer(RawTok);
+ while (RawTok.isNot(tok::eof)) {
+ PP.DumpToken(RawTok, true);
+ llvm::errs() << "\n";
+ RawLex.LexFromRawLexer(RawTok);
+ }
+}
+
+void DumpTokensAction::ExecuteAction() {
+ Preprocessor &PP = getCompilerInstance().getPreprocessor();
+ // Start preprocessing the specified input file.
+ Token Tok;
+ PP.EnterMainSourceFile();
+ do {
+ PP.Lex(Tok);
+ PP.DumpToken(Tok, true);
+ llvm::errs() << "\n";
+ } while (Tok.isNot(tok::eof));
+}
+
+void GeneratePTHAction::ExecuteAction() {
+ CompilerInstance &CI = getCompilerInstance();
+ raw_pwrite_stream *OS = CI.createDefaultOutputFile(true, getCurrentFile());
+ if (!OS)
+ return;
+
+ CacheTokens(CI.getPreprocessor(), OS);
+}
+
+void PreprocessOnlyAction::ExecuteAction() {
+ Preprocessor &PP = getCompilerInstance().getPreprocessor();
+
+ // Ignore unknown pragmas.
+ PP.IgnorePragmas();
+
+ Token Tok;
+ // Start parsing the specified input file.
+ PP.EnterMainSourceFile();
+ do {
+ PP.Lex(Tok);
+ } while (Tok.isNot(tok::eof));
+}
+
+void PrintPreprocessedAction::ExecuteAction() {
+ CompilerInstance &CI = getCompilerInstance();
+ // Output file may need to be set to 'Binary', to avoid converting Unix style
+ // line feeds (<LF>) to Microsoft style line feeds (<CR><LF>).
+ //
+ // Look to see what type of line endings the file uses. If there's a
+ // CRLF, then we won't open the file up in binary mode. If there is
+ // just an LF or CR, then we will open the file up in binary mode.
+ // In this fashion, the output format should match the input format, unless
+ // the input format has inconsistent line endings.
+ //
+ // This should be a relatively fast operation since most files won't have
+ // all of their source code on a single line. However, that is still a
+ // concern, so if we scan for too long, we'll just assume the file should
+ // be opened in binary mode.
+ bool BinaryMode = true;
+ bool InvalidFile = false;
+ const SourceManager& SM = CI.getSourceManager();
+ const llvm::MemoryBuffer *Buffer = SM.getBuffer(SM.getMainFileID(),
+ &InvalidFile);
+ if (!InvalidFile) {
+ const char *cur = Buffer->getBufferStart();
+ const char *end = Buffer->getBufferEnd();
+ const char *next = (cur != end) ? cur + 1 : end;
+
+ // Limit ourselves to only scanning 256 characters into the source
+ // file. This is mostly a sanity check in case the file has no
+ // newlines whatsoever.
+ if (end - cur > 256) end = cur + 256;
+
+ while (next < end) {
+ if (*cur == 0x0D) { // CR
+ if (*next == 0x0A) // CRLF
+ BinaryMode = false;
+
+ break;
+ } else if (*cur == 0x0A) // LF
+ break;
+
+ ++cur, ++next;
+ }
+ }
+
+ raw_ostream *OS = CI.createDefaultOutputFile(BinaryMode, getCurrentFile());
+ if (!OS) return;
+
+ DoPrintPreprocessedInput(CI.getPreprocessor(), OS,
+ CI.getPreprocessorOutputOpts());
+}
+
+void PrintPreambleAction::ExecuteAction() {
+ switch (getCurrentFileKind()) {
+ case IK_C:
+ case IK_CXX:
+ case IK_ObjC:
+ case IK_ObjCXX:
+ case IK_OpenCL:
+ case IK_CUDA:
+ break;
+
+ case IK_None:
+ case IK_Asm:
+ case IK_PreprocessedC:
+ case IK_PreprocessedCuda:
+ case IK_PreprocessedCXX:
+ case IK_PreprocessedObjC:
+ case IK_PreprocessedObjCXX:
+ case IK_AST:
+ case IK_LLVM_IR:
+ // We can't do anything with these.
+ return;
+ }
+
+ CompilerInstance &CI = getCompilerInstance();
+ auto Buffer = CI.getFileManager().getBufferForFile(getCurrentFile());
+ if (Buffer) {
+ unsigned Preamble =
+ Lexer::ComputePreamble((*Buffer)->getBuffer(), CI.getLangOpts()).first;
+ llvm::outs().write((*Buffer)->getBufferStart(), Preamble);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/FrontendOptions.cpp b/contrib/llvm/tools/clang/lib/Frontend/FrontendOptions.cpp
new file mode 100644
index 0000000..9ede674
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/FrontendOptions.cpp
@@ -0,0 +1,32 @@
+//===--- FrontendOptions.cpp ----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/FrontendOptions.h"
+#include "llvm/ADT/StringSwitch.h"
+using namespace clang;
+
+InputKind FrontendOptions::getInputKindForExtension(StringRef Extension) {
+ return llvm::StringSwitch<InputKind>(Extension)
+ .Cases("ast", "pcm", IK_AST)
+ .Case("c", IK_C)
+ .Cases("S", "s", IK_Asm)
+ .Case("i", IK_PreprocessedC)
+ .Case("ii", IK_PreprocessedCXX)
+ .Case("cui", IK_PreprocessedCuda)
+ .Case("m", IK_ObjC)
+ .Case("mi", IK_PreprocessedObjC)
+ .Cases("mm", "M", IK_ObjCXX)
+ .Case("mii", IK_PreprocessedObjCXX)
+ .Cases("C", "cc", "cp", IK_CXX)
+ .Cases("cpp", "CPP", "c++", "cxx", "hpp", IK_CXX)
+ .Case("cl", IK_OpenCL)
+ .Case("cu", IK_CUDA)
+ .Cases("ll", "bc", IK_LLVM_IR)
+ .Default(IK_C);
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/HeaderIncludeGen.cpp b/contrib/llvm/tools/clang/lib/Frontend/HeaderIncludeGen.cpp
new file mode 100644
index 0000000..0bc1169
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/HeaderIncludeGen.cpp
@@ -0,0 +1,154 @@
+//===--- HeaderIncludes.cpp - Generate Header Includes --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/Utils.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+namespace {
+class HeaderIncludesCallback : public PPCallbacks {
+ SourceManager &SM;
+ raw_ostream *OutputFile;
+ unsigned CurrentIncludeDepth;
+ bool HasProcessedPredefines;
+ bool OwnsOutputFile;
+ bool ShowAllHeaders;
+ bool ShowDepth;
+ bool MSStyle;
+
+public:
+ HeaderIncludesCallback(const Preprocessor *PP, bool ShowAllHeaders_,
+ raw_ostream *OutputFile_, bool OwnsOutputFile_,
+ bool ShowDepth_, bool MSStyle_)
+ : SM(PP->getSourceManager()), OutputFile(OutputFile_),
+ CurrentIncludeDepth(0), HasProcessedPredefines(false),
+ OwnsOutputFile(OwnsOutputFile_), ShowAllHeaders(ShowAllHeaders_),
+ ShowDepth(ShowDepth_), MSStyle(MSStyle_) {}
+
+ ~HeaderIncludesCallback() override {
+ if (OwnsOutputFile)
+ delete OutputFile;
+ }
+
+ void FileChanged(SourceLocation Loc, FileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType,
+ FileID PrevFID) override;
+};
+}
+
+static void PrintHeaderInfo(raw_ostream *OutputFile, const char* Filename,
+ bool ShowDepth, unsigned CurrentIncludeDepth,
+ bool MSStyle) {
+ // Write to a temporary string to avoid unnecessary flushing on errs().
+ SmallString<512> Pathname(Filename);
+ if (!MSStyle)
+ Lexer::Stringify(Pathname);
+
+ SmallString<256> Msg;
+ if (MSStyle)
+ Msg += "Note: including file:";
+
+ if (ShowDepth) {
+ // The main source file is at depth 1, so skip one dot.
+ for (unsigned i = 1; i != CurrentIncludeDepth; ++i)
+ Msg += MSStyle ? ' ' : '.';
+
+ if (!MSStyle)
+ Msg += ' ';
+ }
+ Msg += Pathname;
+ Msg += '\n';
+
+ OutputFile->write(Msg.data(), Msg.size());
+ OutputFile->flush();
+}
+
+void clang::AttachHeaderIncludeGen(Preprocessor &PP,
+ const std::vector<std::string> &ExtraHeaders,
+ bool ShowAllHeaders,
+ StringRef OutputPath, bool ShowDepth,
+ bool MSStyle) {
+ raw_ostream *OutputFile = MSStyle ? &llvm::outs() : &llvm::errs();
+ bool OwnsOutputFile = false;
+
+ // Open the output file, if used.
+ if (!OutputPath.empty()) {
+ std::error_code EC;
+ llvm::raw_fd_ostream *OS = new llvm::raw_fd_ostream(
+ OutputPath.str(), EC, llvm::sys::fs::F_Append | llvm::sys::fs::F_Text);
+ if (EC) {
+ PP.getDiagnostics().Report(clang::diag::warn_fe_cc_print_header_failure)
+ << EC.message();
+ delete OS;
+ } else {
+ OS->SetUnbuffered();
+ OutputFile = OS;
+ OwnsOutputFile = true;
+ }
+ }
+
+ // Print header info for extra headers, pretending they were discovered
+ // by the regular preprocessor. The primary use case is to support
+ // proper generation of Make / Ninja file dependencies for implicit includes,
+ // such as sanitizer blacklists. It's only important for cl.exe
+ // compatibility, the GNU way to generate rules is -M / -MM / -MD / -MMD.
+ for (auto Header : ExtraHeaders) {
+ PrintHeaderInfo(OutputFile, Header.c_str(), ShowDepth, 2, MSStyle);
+ }
+ PP.addPPCallbacks(llvm::make_unique<HeaderIncludesCallback>(&PP,
+ ShowAllHeaders,
+ OutputFile,
+ OwnsOutputFile,
+ ShowDepth,
+ MSStyle));
+}
+
+void HeaderIncludesCallback::FileChanged(SourceLocation Loc,
+ FileChangeReason Reason,
+ SrcMgr::CharacteristicKind NewFileType,
+ FileID PrevFID) {
+ // Unless we are exiting a #include, make sure to skip ahead to the line the
+ // #include directive was at.
+ PresumedLoc UserLoc = SM.getPresumedLoc(Loc);
+ if (UserLoc.isInvalid())
+ return;
+
+ // Adjust the current include depth.
+ if (Reason == PPCallbacks::EnterFile) {
+ ++CurrentIncludeDepth;
+ } else if (Reason == PPCallbacks::ExitFile) {
+ if (CurrentIncludeDepth)
+ --CurrentIncludeDepth;
+
+ // We track when we are done with the predefines by watching for the first
+ // place where we drop back to a nesting depth of 1.
+ if (CurrentIncludeDepth == 1 && !HasProcessedPredefines)
+ HasProcessedPredefines = true;
+
+ return;
+ } else
+ return;
+
+ // Show the header if we are (a) past the predefines, or (b) showing all
+ // headers and in the predefines at a depth past the initial file and command
+ // line buffers.
+ bool ShowHeader = (HasProcessedPredefines ||
+ (ShowAllHeaders && CurrentIncludeDepth > 2));
+
+ // Dump the header include information we are past the predefines buffer or
+ // are showing all headers.
+ if (ShowHeader && Reason == PPCallbacks::EnterFile) {
+ PrintHeaderInfo(OutputFile, UserLoc.getFilename(),
+ ShowDepth, CurrentIncludeDepth, MSStyle);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp b/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp
new file mode 100644
index 0000000..26bab0d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp
@@ -0,0 +1,675 @@
+//===--- InitHeaderSearch.cpp - Initialize header search paths ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the InitHeaderSearch class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/Utils.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Config/config.h" // C_INCLUDE_DIRS
+#include "clang/Lex/HeaderMap.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/HeaderSearchOptions.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace clang::frontend;
+
+namespace {
+
+/// InitHeaderSearch - This class makes it easier to set the search paths of
+/// a HeaderSearch object. InitHeaderSearch stores several search path lists
+/// internally, which can be sent to a HeaderSearch object in one swoop.
+class InitHeaderSearch {
+ std::vector<std::pair<IncludeDirGroup, DirectoryLookup> > IncludePath;
+ typedef std::vector<std::pair<IncludeDirGroup,
+ DirectoryLookup> >::const_iterator path_iterator;
+ std::vector<std::pair<std::string, bool> > SystemHeaderPrefixes;
+ HeaderSearch &Headers;
+ bool Verbose;
+ std::string IncludeSysroot;
+ bool HasSysroot;
+
+public:
+
+ InitHeaderSearch(HeaderSearch &HS, bool verbose, StringRef sysroot)
+ : Headers(HS), Verbose(verbose), IncludeSysroot(sysroot),
+ HasSysroot(!(sysroot.empty() || sysroot == "/")) {
+ }
+
+ /// AddPath - Add the specified path to the specified group list, prefixing
+ /// the sysroot if used.
+ void AddPath(const Twine &Path, IncludeDirGroup Group, bool isFramework);
+
+ /// AddUnmappedPath - Add the specified path to the specified group list,
+ /// without performing any sysroot remapping.
+ void AddUnmappedPath(const Twine &Path, IncludeDirGroup Group,
+ bool isFramework);
+
+ /// AddSystemHeaderPrefix - Add the specified prefix to the system header
+ /// prefix list.
+ void AddSystemHeaderPrefix(StringRef Prefix, bool IsSystemHeader) {
+ SystemHeaderPrefixes.emplace_back(Prefix, IsSystemHeader);
+ }
+
+ /// AddGnuCPlusPlusIncludePaths - Add the necessary paths to support a gnu
+ /// libstdc++.
+ void AddGnuCPlusPlusIncludePaths(StringRef Base,
+ StringRef ArchDir,
+ StringRef Dir32,
+ StringRef Dir64,
+ const llvm::Triple &triple);
+
+ /// AddMinGWCPlusPlusIncludePaths - Add the necessary paths to support a MinGW
+ /// libstdc++.
+ void AddMinGWCPlusPlusIncludePaths(StringRef Base,
+ StringRef Arch,
+ StringRef Version);
+
+ // AddDefaultCIncludePaths - Add paths that should always be searched.
+ void AddDefaultCIncludePaths(const llvm::Triple &triple,
+ const HeaderSearchOptions &HSOpts);
+
+ // AddDefaultCPlusPlusIncludePaths - Add paths that should be searched when
+ // compiling c++.
+ void AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple,
+ const HeaderSearchOptions &HSOpts);
+
+ /// AddDefaultSystemIncludePaths - Adds the default system include paths so
+ /// that e.g. stdio.h is found.
+ void AddDefaultIncludePaths(const LangOptions &Lang,
+ const llvm::Triple &triple,
+ const HeaderSearchOptions &HSOpts);
+
+ /// Realize - Merges all search path lists into one list and send it to
+ /// HeaderSearch.
+ void Realize(const LangOptions &Lang);
+};
+
+} // end anonymous namespace.
+
+static bool CanPrefixSysroot(StringRef Path) {
+#if defined(LLVM_ON_WIN32)
+ return !Path.empty() && llvm::sys::path::is_separator(Path[0]);
+#else
+ return llvm::sys::path::is_absolute(Path);
+#endif
+}
+
+void InitHeaderSearch::AddPath(const Twine &Path, IncludeDirGroup Group,
+ bool isFramework) {
+ // Add the path with sysroot prepended, if desired and this is a system header
+ // group.
+ if (HasSysroot) {
+ SmallString<256> MappedPathStorage;
+ StringRef MappedPathStr = Path.toStringRef(MappedPathStorage);
+ if (CanPrefixSysroot(MappedPathStr)) {
+ AddUnmappedPath(IncludeSysroot + Path, Group, isFramework);
+ return;
+ }
+ }
+
+ AddUnmappedPath(Path, Group, isFramework);
+}
+
+void InitHeaderSearch::AddUnmappedPath(const Twine &Path, IncludeDirGroup Group,
+ bool isFramework) {
+ assert(!Path.isTriviallyEmpty() && "can't handle empty path here");
+
+ FileManager &FM = Headers.getFileMgr();
+ SmallString<256> MappedPathStorage;
+ StringRef MappedPathStr = Path.toStringRef(MappedPathStorage);
+
+ // Compute the DirectoryLookup type.
+ SrcMgr::CharacteristicKind Type;
+ if (Group == Quoted || Group == Angled || Group == IndexHeaderMap) {
+ Type = SrcMgr::C_User;
+ } else if (Group == ExternCSystem) {
+ Type = SrcMgr::C_ExternCSystem;
+ } else {
+ Type = SrcMgr::C_System;
+ }
+
+ // If the directory exists, add it.
+ if (const DirectoryEntry *DE = FM.getDirectory(MappedPathStr)) {
+ IncludePath.push_back(
+ std::make_pair(Group, DirectoryLookup(DE, Type, isFramework)));
+ return;
+ }
+
+ // Check to see if this is an apple-style headermap (which are not allowed to
+ // be frameworks).
+ if (!isFramework) {
+ if (const FileEntry *FE = FM.getFile(MappedPathStr)) {
+ if (const HeaderMap *HM = Headers.CreateHeaderMap(FE)) {
+ // It is a headermap, add it to the search path.
+ IncludePath.push_back(
+ std::make_pair(Group,
+ DirectoryLookup(HM, Type, Group == IndexHeaderMap)));
+ return;
+ }
+ }
+ }
+
+ if (Verbose)
+ llvm::errs() << "ignoring nonexistent directory \""
+ << MappedPathStr << "\"\n";
+}
+
+void InitHeaderSearch::AddGnuCPlusPlusIncludePaths(StringRef Base,
+ StringRef ArchDir,
+ StringRef Dir32,
+ StringRef Dir64,
+ const llvm::Triple &triple) {
+ // Add the base dir
+ AddPath(Base, CXXSystem, false);
+
+ // Add the multilib dirs
+ llvm::Triple::ArchType arch = triple.getArch();
+ bool is64bit = arch == llvm::Triple::ppc64 || arch == llvm::Triple::x86_64;
+ if (is64bit)
+ AddPath(Base + "/" + ArchDir + "/" + Dir64, CXXSystem, false);
+ else
+ AddPath(Base + "/" + ArchDir + "/" + Dir32, CXXSystem, false);
+
+ // Add the backward dir
+ AddPath(Base + "/backward", CXXSystem, false);
+}
+
+void InitHeaderSearch::AddMinGWCPlusPlusIncludePaths(StringRef Base,
+ StringRef Arch,
+ StringRef Version) {
+ AddPath(Base + "/" + Arch + "/" + Version + "/include/c++",
+ CXXSystem, false);
+ AddPath(Base + "/" + Arch + "/" + Version + "/include/c++/" + Arch,
+ CXXSystem, false);
+ AddPath(Base + "/" + Arch + "/" + Version + "/include/c++/backward",
+ CXXSystem, false);
+}
+
+void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
+ const HeaderSearchOptions &HSOpts) {
+ llvm::Triple::OSType os = triple.getOS();
+
+ if (HSOpts.UseStandardSystemIncludes) {
+ switch (os) {
+ case llvm::Triple::CloudABI:
+ case llvm::Triple::FreeBSD:
+ case llvm::Triple::NetBSD:
+ case llvm::Triple::OpenBSD:
+ case llvm::Triple::Bitrig:
+ case llvm::Triple::NaCl:
+ case llvm::Triple::PS4:
+ case llvm::Triple::ELFIAMCU:
+ break;
+ case llvm::Triple::Win32:
+ if (triple.getEnvironment() != llvm::Triple::Cygnus)
+ break;
+ default:
+ // FIXME: temporary hack: hard-coded paths.
+ AddPath("/usr/local/include", System, false);
+ break;
+ }
+ }
+
+ // Builtin includes use #include_next directives and should be positioned
+ // just prior C include dirs.
+ if (HSOpts.UseBuiltinIncludes) {
+ // Ignore the sys root, we *always* look for clang headers relative to
+ // supplied path.
+ SmallString<128> P = StringRef(HSOpts.ResourceDir);
+ llvm::sys::path::append(P, "include");
+ AddUnmappedPath(P, ExternCSystem, false);
+ }
+
+ // All remaining additions are for system include directories, early exit if
+ // we aren't using them.
+ if (!HSOpts.UseStandardSystemIncludes)
+ return;
+
+ // Add dirs specified via 'configure --with-c-include-dirs'.
+ StringRef CIncludeDirs(C_INCLUDE_DIRS);
+ if (CIncludeDirs != "") {
+ SmallVector<StringRef, 5> dirs;
+ CIncludeDirs.split(dirs, ":");
+ for (StringRef dir : dirs)
+ AddPath(dir, ExternCSystem, false);
+ return;
+ }
+
+ switch (os) {
+ case llvm::Triple::Linux:
+ llvm_unreachable("Include management is handled in the driver.");
+
+ case llvm::Triple::CloudABI: {
+ // <sysroot>/<triple>/include
+ SmallString<128> P = StringRef(HSOpts.ResourceDir);
+ llvm::sys::path::append(P, "../../..", triple.str(), "include");
+ AddPath(P, System, false);
+ break;
+ }
+
+ case llvm::Triple::Haiku:
+ AddPath("/boot/common/include", System, false);
+ AddPath("/boot/develop/headers/os", System, false);
+ AddPath("/boot/develop/headers/os/app", System, false);
+ AddPath("/boot/develop/headers/os/arch", System, false);
+ AddPath("/boot/develop/headers/os/device", System, false);
+ AddPath("/boot/develop/headers/os/drivers", System, false);
+ AddPath("/boot/develop/headers/os/game", System, false);
+ AddPath("/boot/develop/headers/os/interface", System, false);
+ AddPath("/boot/develop/headers/os/kernel", System, false);
+ AddPath("/boot/develop/headers/os/locale", System, false);
+ AddPath("/boot/develop/headers/os/mail", System, false);
+ AddPath("/boot/develop/headers/os/media", System, false);
+ AddPath("/boot/develop/headers/os/midi", System, false);
+ AddPath("/boot/develop/headers/os/midi2", System, false);
+ AddPath("/boot/develop/headers/os/net", System, false);
+ AddPath("/boot/develop/headers/os/storage", System, false);
+ AddPath("/boot/develop/headers/os/support", System, false);
+ AddPath("/boot/develop/headers/os/translation", System, false);
+ AddPath("/boot/develop/headers/os/add-ons/graphics", System, false);
+ AddPath("/boot/develop/headers/os/add-ons/input_server", System, false);
+ AddPath("/boot/develop/headers/os/add-ons/screen_saver", System, false);
+ AddPath("/boot/develop/headers/os/add-ons/tracker", System, false);
+ AddPath("/boot/develop/headers/os/be_apps/Deskbar", System, false);
+ AddPath("/boot/develop/headers/os/be_apps/NetPositive", System, false);
+ AddPath("/boot/develop/headers/os/be_apps/Tracker", System, false);
+ AddPath("/boot/develop/headers/cpp", System, false);
+ AddPath("/boot/develop/headers/cpp/i586-pc-haiku", System, false);
+ AddPath("/boot/develop/headers/3rdparty", System, false);
+ AddPath("/boot/develop/headers/bsd", System, false);
+ AddPath("/boot/develop/headers/glibc", System, false);
+ AddPath("/boot/develop/headers/posix", System, false);
+ AddPath("/boot/develop/headers", System, false);
+ break;
+ case llvm::Triple::RTEMS:
+ break;
+ case llvm::Triple::Win32:
+ switch (triple.getEnvironment()) {
+ default: llvm_unreachable("Include management is handled in the driver.");
+ case llvm::Triple::Cygnus:
+ AddPath("/usr/include/w32api", System, false);
+ break;
+ case llvm::Triple::GNU:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ switch (os) {
+ case llvm::Triple::CloudABI:
+ case llvm::Triple::RTEMS:
+ case llvm::Triple::NaCl:
+ case llvm::Triple::ELFIAMCU:
+ break;
+ case llvm::Triple::PS4: {
+ // <isysroot> gets prepended later in AddPath().
+ std::string BaseSDKPath = "";
+ if (!HasSysroot) {
+ const char *envValue = getenv("SCE_PS4_SDK_DIR");
+ if (envValue)
+ BaseSDKPath = envValue;
+ else {
+ // HSOpts.ResourceDir variable contains the location of Clang's
+ // resource files.
+ // Assuming that Clang is configured for PS4 without
+ // --with-clang-resource-dir option, the location of Clang's resource
+ // files is <SDK_DIR>/host_tools/lib/clang
+ SmallString<128> P = StringRef(HSOpts.ResourceDir);
+ llvm::sys::path::append(P, "../../..");
+ BaseSDKPath = P.str();
+ }
+ }
+ AddPath(BaseSDKPath + "/target/include", System, false);
+ if (triple.isPS4CPU())
+ AddPath(BaseSDKPath + "/target/include_common", System, false);
+ }
+ default:
+ AddPath("/usr/include", ExternCSystem, false);
+ break;
+ }
+}
+
+void InitHeaderSearch::
+AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple, const HeaderSearchOptions &HSOpts) {
+ llvm::Triple::OSType os = triple.getOS();
+ // FIXME: temporary hack: hard-coded paths.
+
+ if (triple.isOSDarwin()) {
+ switch (triple.getArch()) {
+ default: break;
+
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
+ "powerpc-apple-darwin10", "", "ppc64",
+ triple);
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.0.0",
+ "powerpc-apple-darwin10", "", "ppc64",
+ triple);
+ break;
+
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
+ "i686-apple-darwin10", "", "x86_64", triple);
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.0.0",
+ "i686-apple-darwin8", "", "", triple);
+ break;
+
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
+ "arm-apple-darwin10", "v7", "", triple);
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
+ "arm-apple-darwin10", "v6", "", triple);
+ break;
+
+ case llvm::Triple::aarch64:
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
+ "arm64-apple-darwin10", "", "", triple);
+ break;
+ }
+ return;
+ }
+
+ switch (os) {
+ case llvm::Triple::Linux:
+ llvm_unreachable("Include management is handled in the driver.");
+ break;
+ case llvm::Triple::Win32:
+ switch (triple.getEnvironment()) {
+ default: llvm_unreachable("Include management is handled in the driver.");
+ case llvm::Triple::Cygnus:
+ // Cygwin-1.7
+ AddMinGWCPlusPlusIncludePaths("/usr/lib/gcc", "i686-pc-cygwin", "4.7.3");
+ AddMinGWCPlusPlusIncludePaths("/usr/lib/gcc", "i686-pc-cygwin", "4.5.3");
+ AddMinGWCPlusPlusIncludePaths("/usr/lib/gcc", "i686-pc-cygwin", "4.3.4");
+ // g++-4 / Cygwin-1.5
+ AddMinGWCPlusPlusIncludePaths("/usr/lib/gcc", "i686-pc-cygwin", "4.3.2");
+ break;
+ }
+ break;
+ case llvm::Triple::DragonFly:
+ AddPath("/usr/include/c++/5.0", CXXSystem, false);
+ break;
+ case llvm::Triple::OpenBSD: {
+ std::string t = triple.getTriple();
+ if (t.substr(0, 6) == "x86_64")
+ t.replace(0, 6, "amd64");
+ AddGnuCPlusPlusIncludePaths("/usr/include/g++",
+ t, "", "", triple);
+ break;
+ }
+ case llvm::Triple::Minix:
+ AddGnuCPlusPlusIncludePaths("/usr/gnu/include/c++/4.4.3",
+ "", "", "", triple);
+ break;
+ default:
+ break;
+ }
+}
+
+void InitHeaderSearch::AddDefaultIncludePaths(const LangOptions &Lang,
+ const llvm::Triple &triple,
+ const HeaderSearchOptions &HSOpts) {
+ // NB: This code path is going away. All of the logic is moving into the
+ // driver which has the information necessary to do target-specific
+ // selections of default include paths. Each target which moves there will be
+ // exempted from this logic here until we can delete the entire pile of code.
+ switch (triple.getOS()) {
+ default:
+ break; // Everything else continues to use this routine's logic.
+
+ case llvm::Triple::Linux:
+ return;
+
+ case llvm::Triple::Win32:
+ if (triple.getEnvironment() != llvm::Triple::Cygnus ||
+ triple.isOSBinFormatMachO())
+ return;
+ break;
+ }
+
+ if (Lang.CPlusPlus && HSOpts.UseStandardCXXIncludes &&
+ HSOpts.UseStandardSystemIncludes) {
+ if (HSOpts.UseLibcxx) {
+ if (triple.isOSDarwin()) {
+ // On Darwin, libc++ may be installed alongside the compiler in
+ // include/c++/v1.
+ if (!HSOpts.ResourceDir.empty()) {
+ // Remove version from foo/lib/clang/version
+ StringRef NoVer = llvm::sys::path::parent_path(HSOpts.ResourceDir);
+ // Remove clang from foo/lib/clang
+ StringRef Lib = llvm::sys::path::parent_path(NoVer);
+ // Remove lib from foo/lib
+ SmallString<128> P = llvm::sys::path::parent_path(Lib);
+
+ // Get foo/include/c++/v1
+ llvm::sys::path::append(P, "include", "c++", "v1");
+ AddUnmappedPath(P, CXXSystem, false);
+ }
+ }
+ AddPath("/usr/include/c++/v1", CXXSystem, false);
+ } else {
+ AddDefaultCPlusPlusIncludePaths(triple, HSOpts);
+ }
+ }
+
+ AddDefaultCIncludePaths(triple, HSOpts);
+
+ // Add the default framework include paths on Darwin.
+ if (HSOpts.UseStandardSystemIncludes) {
+ if (triple.isOSDarwin()) {
+ AddPath("/System/Library/Frameworks", System, true);
+ AddPath("/Library/Frameworks", System, true);
+ }
+ }
+}
+
+/// RemoveDuplicates - If there are duplicate directory entries in the specified
+/// search list, remove the later (dead) ones. Returns the number of non-system
+/// headers removed, which is used to update NumAngled.
+static unsigned RemoveDuplicates(std::vector<DirectoryLookup> &SearchList,
+ unsigned First, bool Verbose) {
+ llvm::SmallPtrSet<const DirectoryEntry *, 8> SeenDirs;
+ llvm::SmallPtrSet<const DirectoryEntry *, 8> SeenFrameworkDirs;
+ llvm::SmallPtrSet<const HeaderMap *, 8> SeenHeaderMaps;
+ unsigned NonSystemRemoved = 0;
+ for (unsigned i = First; i != SearchList.size(); ++i) {
+ unsigned DirToRemove = i;
+
+ const DirectoryLookup &CurEntry = SearchList[i];
+
+ if (CurEntry.isNormalDir()) {
+ // If this isn't the first time we've seen this dir, remove it.
+ if (SeenDirs.insert(CurEntry.getDir()).second)
+ continue;
+ } else if (CurEntry.isFramework()) {
+ // If this isn't the first time we've seen this framework dir, remove it.
+ if (SeenFrameworkDirs.insert(CurEntry.getFrameworkDir()).second)
+ continue;
+ } else {
+ assert(CurEntry.isHeaderMap() && "Not a headermap or normal dir?");
+ // If this isn't the first time we've seen this headermap, remove it.
+ if (SeenHeaderMaps.insert(CurEntry.getHeaderMap()).second)
+ continue;
+ }
+
+ // If we have a normal #include dir/framework/headermap that is shadowed
+ // later in the chain by a system include location, we actually want to
+ // ignore the user's request and drop the user dir... keeping the system
+ // dir. This is weird, but required to emulate GCC's search path correctly.
+ //
+ // Since dupes of system dirs are rare, just rescan to find the original
+ // that we're nuking instead of using a DenseMap.
+ if (CurEntry.getDirCharacteristic() != SrcMgr::C_User) {
+ // Find the dir that this is the same of.
+ unsigned FirstDir;
+ for (FirstDir = 0; ; ++FirstDir) {
+ assert(FirstDir != i && "Didn't find dupe?");
+
+ const DirectoryLookup &SearchEntry = SearchList[FirstDir];
+
+ // If these are different lookup types, then they can't be the dupe.
+ if (SearchEntry.getLookupType() != CurEntry.getLookupType())
+ continue;
+
+ bool isSame;
+ if (CurEntry.isNormalDir())
+ isSame = SearchEntry.getDir() == CurEntry.getDir();
+ else if (CurEntry.isFramework())
+ isSame = SearchEntry.getFrameworkDir() == CurEntry.getFrameworkDir();
+ else {
+ assert(CurEntry.isHeaderMap() && "Not a headermap or normal dir?");
+ isSame = SearchEntry.getHeaderMap() == CurEntry.getHeaderMap();
+ }
+
+ if (isSame)
+ break;
+ }
+
+ // If the first dir in the search path is a non-system dir, zap it
+ // instead of the system one.
+ if (SearchList[FirstDir].getDirCharacteristic() == SrcMgr::C_User)
+ DirToRemove = FirstDir;
+ }
+
+ if (Verbose) {
+ llvm::errs() << "ignoring duplicate directory \""
+ << CurEntry.getName() << "\"\n";
+ if (DirToRemove != i)
+ llvm::errs() << " as it is a non-system directory that duplicates "
+ << "a system directory\n";
+ }
+ if (DirToRemove != i)
+ ++NonSystemRemoved;
+
+ // This is reached if the current entry is a duplicate. Remove the
+ // DirToRemove (usually the current dir).
+ SearchList.erase(SearchList.begin()+DirToRemove);
+ --i;
+ }
+ return NonSystemRemoved;
+}
+
+
+void InitHeaderSearch::Realize(const LangOptions &Lang) {
+ // Concatenate ANGLE+SYSTEM+AFTER chains together into SearchList.
+ std::vector<DirectoryLookup> SearchList;
+ SearchList.reserve(IncludePath.size());
+
+ // Quoted arguments go first.
+ for (auto &Include : IncludePath)
+ if (Include.first == Quoted)
+ SearchList.push_back(Include.second);
+
+ // Deduplicate and remember index.
+ RemoveDuplicates(SearchList, 0, Verbose);
+ unsigned NumQuoted = SearchList.size();
+
+ for (auto &Include : IncludePath)
+ if (Include.first == Angled || Include.first == IndexHeaderMap)
+ SearchList.push_back(Include.second);
+
+ RemoveDuplicates(SearchList, NumQuoted, Verbose);
+ unsigned NumAngled = SearchList.size();
+
+ for (auto &Include : IncludePath)
+ if (Include.first == System || Include.first == ExternCSystem ||
+ (!Lang.ObjC1 && !Lang.CPlusPlus && Include.first == CSystem) ||
+ (/*FIXME !Lang.ObjC1 && */ Lang.CPlusPlus &&
+ Include.first == CXXSystem) ||
+ (Lang.ObjC1 && !Lang.CPlusPlus && Include.first == ObjCSystem) ||
+ (Lang.ObjC1 && Lang.CPlusPlus && Include.first == ObjCXXSystem))
+ SearchList.push_back(Include.second);
+
+ for (auto &Include : IncludePath)
+ if (Include.first == After)
+ SearchList.push_back(Include.second);
+
+ // Remove duplicates across both the Angled and System directories. GCC does
+ // this and failing to remove duplicates across these two groups breaks
+ // #include_next.
+ unsigned NonSystemRemoved = RemoveDuplicates(SearchList, NumQuoted, Verbose);
+ NumAngled -= NonSystemRemoved;
+
+ bool DontSearchCurDir = false; // TODO: set to true if -I- is set?
+ Headers.SetSearchPaths(SearchList, NumQuoted, NumAngled, DontSearchCurDir);
+
+ Headers.SetSystemHeaderPrefixes(SystemHeaderPrefixes);
+
+ // If verbose, print the list of directories that will be searched.
+ if (Verbose) {
+ llvm::errs() << "#include \"...\" search starts here:\n";
+ for (unsigned i = 0, e = SearchList.size(); i != e; ++i) {
+ if (i == NumQuoted)
+ llvm::errs() << "#include <...> search starts here:\n";
+ const char *Name = SearchList[i].getName();
+ const char *Suffix;
+ if (SearchList[i].isNormalDir())
+ Suffix = "";
+ else if (SearchList[i].isFramework())
+ Suffix = " (framework directory)";
+ else {
+ assert(SearchList[i].isHeaderMap() && "Unknown DirectoryLookup");
+ Suffix = " (headermap)";
+ }
+ llvm::errs() << " " << Name << Suffix << "\n";
+ }
+ llvm::errs() << "End of search list.\n";
+ }
+}
+
+void clang::ApplyHeaderSearchOptions(HeaderSearch &HS,
+ const HeaderSearchOptions &HSOpts,
+ const LangOptions &Lang,
+ const llvm::Triple &Triple) {
+ InitHeaderSearch Init(HS, HSOpts.Verbose, HSOpts.Sysroot);
+
+ // Add the user defined entries.
+ for (unsigned i = 0, e = HSOpts.UserEntries.size(); i != e; ++i) {
+ const HeaderSearchOptions::Entry &E = HSOpts.UserEntries[i];
+ if (E.IgnoreSysRoot) {
+ Init.AddUnmappedPath(E.Path, E.Group, E.IsFramework);
+ } else {
+ Init.AddPath(E.Path, E.Group, E.IsFramework);
+ }
+ }
+
+ Init.AddDefaultIncludePaths(Lang, Triple, HSOpts);
+
+ for (unsigned i = 0, e = HSOpts.SystemHeaderPrefixes.size(); i != e; ++i)
+ Init.AddSystemHeaderPrefix(HSOpts.SystemHeaderPrefixes[i].Prefix,
+ HSOpts.SystemHeaderPrefixes[i].IsSystemHeader);
+
+ if (HSOpts.UseBuiltinIncludes) {
+ // Set up the builtin include directory in the module map.
+ SmallString<128> P = StringRef(HSOpts.ResourceDir);
+ llvm::sys::path::append(P, "include");
+ if (const DirectoryEntry *Dir = HS.getFileMgr().getDirectory(P))
+ HS.getModuleMap().setBuiltinIncludeDir(Dir);
+ }
+
+ Init.Realize(Lang);
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp b/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp
new file mode 100644
index 0000000..15aa546
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp
@@ -0,0 +1,1003 @@
+//===--- InitPreprocessor.cpp - PP initialization code. ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the clang::InitializePreprocessor function.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/Utils.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/MacroBuilder.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/Version.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/FrontendOptions.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/PTHManager.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/PreprocessorOptions.h"
+#include "clang/Serialization/ASTReader.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+using namespace clang;
+
+static bool MacroBodyEndsInBackslash(StringRef MacroBody) {
+ while (!MacroBody.empty() && isWhitespace(MacroBody.back()))
+ MacroBody = MacroBody.drop_back();
+ return !MacroBody.empty() && MacroBody.back() == '\\';
+}
+
+// Append a #define line to Buf for Macro. Macro should be of the form XXX,
+// in which case we emit "#define XXX 1" or "XXX=Y z W" in which case we emit
+// "#define XXX Y z W". To get a #define with no value, use "XXX=".
+static void DefineBuiltinMacro(MacroBuilder &Builder, StringRef Macro,
+ DiagnosticsEngine &Diags) {
+ std::pair<StringRef, StringRef> MacroPair = Macro.split('=');
+ StringRef MacroName = MacroPair.first;
+ StringRef MacroBody = MacroPair.second;
+ if (MacroName.size() != Macro.size()) {
+ // Per GCC -D semantics, the macro ends at \n if it exists.
+ StringRef::size_type End = MacroBody.find_first_of("\n\r");
+ if (End != StringRef::npos)
+ Diags.Report(diag::warn_fe_macro_contains_embedded_newline)
+ << MacroName;
+ MacroBody = MacroBody.substr(0, End);
+ // We handle macro bodies which end in a backslash by appending an extra
+ // backslash+newline. This makes sure we don't accidentally treat the
+ // backslash as a line continuation marker.
+ if (MacroBodyEndsInBackslash(MacroBody))
+ Builder.defineMacro(MacroName, Twine(MacroBody) + "\\\n");
+ else
+ Builder.defineMacro(MacroName, MacroBody);
+ } else {
+ // Push "macroname 1".
+ Builder.defineMacro(Macro);
+ }
+}
+
+/// AddImplicitInclude - Add an implicit \#include of the specified file to the
+/// predefines buffer.
+/// As these includes are generated by -include arguments the header search
+/// logic is going to search relatively to the current working directory.
+static void AddImplicitInclude(MacroBuilder &Builder, StringRef File) {
+ Builder.append(Twine("#include \"") + File + "\"");
+}
+
+static void AddImplicitIncludeMacros(MacroBuilder &Builder, StringRef File) {
+ Builder.append(Twine("#__include_macros \"") + File + "\"");
+ // Marker token to stop the __include_macros fetch loop.
+ Builder.append("##"); // ##?
+}
+
+/// AddImplicitIncludePTH - Add an implicit \#include using the original file
+/// used to generate a PTH cache.
+static void AddImplicitIncludePTH(MacroBuilder &Builder, Preprocessor &PP,
+ StringRef ImplicitIncludePTH) {
+ PTHManager *P = PP.getPTHManager();
+ // Null check 'P' in the corner case where it couldn't be created.
+ const char *OriginalFile = P ? P->getOriginalSourceFile() : nullptr;
+
+ if (!OriginalFile) {
+ PP.getDiagnostics().Report(diag::err_fe_pth_file_has_no_source_header)
+ << ImplicitIncludePTH;
+ return;
+ }
+
+ AddImplicitInclude(Builder, OriginalFile);
+}
+
+/// \brief Add an implicit \#include using the original file used to generate
+/// a PCH file.
+static void AddImplicitIncludePCH(MacroBuilder &Builder, Preprocessor &PP,
+ const PCHContainerReader &PCHContainerRdr,
+ StringRef ImplicitIncludePCH) {
+ std::string OriginalFile =
+ ASTReader::getOriginalSourceFile(ImplicitIncludePCH, PP.getFileManager(),
+ PCHContainerRdr, PP.getDiagnostics());
+ if (OriginalFile.empty())
+ return;
+
+ AddImplicitInclude(Builder, OriginalFile);
+}
+
+/// PickFP - This is used to pick a value based on the FP semantics of the
+/// specified FP model.
+template <typename T>
+static T PickFP(const llvm::fltSemantics *Sem, T IEEESingleVal,
+ T IEEEDoubleVal, T X87DoubleExtendedVal, T PPCDoubleDoubleVal,
+ T IEEEQuadVal) {
+ if (Sem == (const llvm::fltSemantics*)&llvm::APFloat::IEEEsingle)
+ return IEEESingleVal;
+ if (Sem == (const llvm::fltSemantics*)&llvm::APFloat::IEEEdouble)
+ return IEEEDoubleVal;
+ if (Sem == (const llvm::fltSemantics*)&llvm::APFloat::x87DoubleExtended)
+ return X87DoubleExtendedVal;
+ if (Sem == (const llvm::fltSemantics*)&llvm::APFloat::PPCDoubleDouble)
+ return PPCDoubleDoubleVal;
+ assert(Sem == (const llvm::fltSemantics*)&llvm::APFloat::IEEEquad);
+ return IEEEQuadVal;
+}
+
+static void DefineFloatMacros(MacroBuilder &Builder, StringRef Prefix,
+ const llvm::fltSemantics *Sem, StringRef Ext) {
+ const char *DenormMin, *Epsilon, *Max, *Min;
+ DenormMin = PickFP(Sem, "1.40129846e-45", "4.9406564584124654e-324",
+ "3.64519953188247460253e-4951",
+ "4.94065645841246544176568792868221e-324",
+ "6.47517511943802511092443895822764655e-4966");
+ int Digits = PickFP(Sem, 6, 15, 18, 31, 33);
+ int DecimalDigits = PickFP(Sem, 9, 17, 21, 33, 36);
+ Epsilon = PickFP(Sem, "1.19209290e-7", "2.2204460492503131e-16",
+ "1.08420217248550443401e-19",
+ "4.94065645841246544176568792868221e-324",
+ "1.92592994438723585305597794258492732e-34");
+ int MantissaDigits = PickFP(Sem, 24, 53, 64, 106, 113);
+ int Min10Exp = PickFP(Sem, -37, -307, -4931, -291, -4931);
+ int Max10Exp = PickFP(Sem, 38, 308, 4932, 308, 4932);
+ int MinExp = PickFP(Sem, -125, -1021, -16381, -968, -16381);
+ int MaxExp = PickFP(Sem, 128, 1024, 16384, 1024, 16384);
+ Min = PickFP(Sem, "1.17549435e-38", "2.2250738585072014e-308",
+ "3.36210314311209350626e-4932",
+ "2.00416836000897277799610805135016e-292",
+ "3.36210314311209350626267781732175260e-4932");
+ Max = PickFP(Sem, "3.40282347e+38", "1.7976931348623157e+308",
+ "1.18973149535723176502e+4932",
+ "1.79769313486231580793728971405301e+308",
+ "1.18973149535723176508575932662800702e+4932");
+
+ SmallString<32> DefPrefix;
+ DefPrefix = "__";
+ DefPrefix += Prefix;
+ DefPrefix += "_";
+
+ Builder.defineMacro(DefPrefix + "DENORM_MIN__", Twine(DenormMin)+Ext);
+ Builder.defineMacro(DefPrefix + "HAS_DENORM__");
+ Builder.defineMacro(DefPrefix + "DIG__", Twine(Digits));
+ Builder.defineMacro(DefPrefix + "DECIMAL_DIG__", Twine(DecimalDigits));
+ Builder.defineMacro(DefPrefix + "EPSILON__", Twine(Epsilon)+Ext);
+ Builder.defineMacro(DefPrefix + "HAS_INFINITY__");
+ Builder.defineMacro(DefPrefix + "HAS_QUIET_NAN__");
+ Builder.defineMacro(DefPrefix + "MANT_DIG__", Twine(MantissaDigits));
+
+ Builder.defineMacro(DefPrefix + "MAX_10_EXP__", Twine(Max10Exp));
+ Builder.defineMacro(DefPrefix + "MAX_EXP__", Twine(MaxExp));
+ Builder.defineMacro(DefPrefix + "MAX__", Twine(Max)+Ext);
+
+ Builder.defineMacro(DefPrefix + "MIN_10_EXP__","("+Twine(Min10Exp)+")");
+ Builder.defineMacro(DefPrefix + "MIN_EXP__", "("+Twine(MinExp)+")");
+ Builder.defineMacro(DefPrefix + "MIN__", Twine(Min)+Ext);
+}
+
+
+/// DefineTypeSize - Emit a macro to the predefines buffer that declares a macro
+/// named MacroName with the max value for a type with width 'TypeWidth' a
+/// signedness of 'isSigned' and with a value suffix of 'ValSuffix' (e.g. LL).
+static void DefineTypeSize(const Twine &MacroName, unsigned TypeWidth,
+ StringRef ValSuffix, bool isSigned,
+ MacroBuilder &Builder) {
+ llvm::APInt MaxVal = isSigned ? llvm::APInt::getSignedMaxValue(TypeWidth)
+ : llvm::APInt::getMaxValue(TypeWidth);
+ Builder.defineMacro(MacroName, MaxVal.toString(10, isSigned) + ValSuffix);
+}
+
+/// DefineTypeSize - An overloaded helper that uses TargetInfo to determine
+/// the width, suffix, and signedness of the given type
+static void DefineTypeSize(const Twine &MacroName, TargetInfo::IntType Ty,
+ const TargetInfo &TI, MacroBuilder &Builder) {
+ DefineTypeSize(MacroName, TI.getTypeWidth(Ty), TI.getTypeConstantSuffix(Ty),
+ TI.isTypeSigned(Ty), Builder);
+}
+
+static void DefineFmt(const Twine &Prefix, TargetInfo::IntType Ty,
+ const TargetInfo &TI, MacroBuilder &Builder) {
+ bool IsSigned = TI.isTypeSigned(Ty);
+ StringRef FmtModifier = TI.getTypeFormatModifier(Ty);
+ for (const char *Fmt = IsSigned ? "di" : "ouxX"; *Fmt; ++Fmt) {
+ Builder.defineMacro(Prefix + "_FMT" + Twine(*Fmt) + "__",
+ Twine("\"") + FmtModifier + Twine(*Fmt) + "\"");
+ }
+}
+
+static void DefineType(const Twine &MacroName, TargetInfo::IntType Ty,
+ MacroBuilder &Builder) {
+ Builder.defineMacro(MacroName, TargetInfo::getTypeName(Ty));
+}
+
+static void DefineTypeWidth(StringRef MacroName, TargetInfo::IntType Ty,
+ const TargetInfo &TI, MacroBuilder &Builder) {
+ Builder.defineMacro(MacroName, Twine(TI.getTypeWidth(Ty)));
+}
+
+static void DefineTypeSizeof(StringRef MacroName, unsigned BitWidth,
+ const TargetInfo &TI, MacroBuilder &Builder) {
+ Builder.defineMacro(MacroName,
+ Twine(BitWidth / TI.getCharWidth()));
+}
+
+static void DefineExactWidthIntType(TargetInfo::IntType Ty,
+ const TargetInfo &TI,
+ MacroBuilder &Builder) {
+ int TypeWidth = TI.getTypeWidth(Ty);
+ bool IsSigned = TI.isTypeSigned(Ty);
+
+ // Use the target specified int64 type, when appropriate, so that [u]int64_t
+ // ends up being defined in terms of the correct type.
+ if (TypeWidth == 64)
+ Ty = IsSigned ? TI.getInt64Type() : TI.getUInt64Type();
+
+ const char *Prefix = IsSigned ? "__INT" : "__UINT";
+
+ DefineType(Prefix + Twine(TypeWidth) + "_TYPE__", Ty, Builder);
+ DefineFmt(Prefix + Twine(TypeWidth), Ty, TI, Builder);
+
+ StringRef ConstSuffix(TI.getTypeConstantSuffix(Ty));
+ Builder.defineMacro(Prefix + Twine(TypeWidth) + "_C_SUFFIX__", ConstSuffix);
+}
+
+static void DefineExactWidthIntTypeSize(TargetInfo::IntType Ty,
+ const TargetInfo &TI,
+ MacroBuilder &Builder) {
+ int TypeWidth = TI.getTypeWidth(Ty);
+ bool IsSigned = TI.isTypeSigned(Ty);
+
+ // Use the target specified int64 type, when appropriate, so that [u]int64_t
+ // ends up being defined in terms of the correct type.
+ if (TypeWidth == 64)
+ Ty = IsSigned ? TI.getInt64Type() : TI.getUInt64Type();
+
+ const char *Prefix = IsSigned ? "__INT" : "__UINT";
+ DefineTypeSize(Prefix + Twine(TypeWidth) + "_MAX__", Ty, TI, Builder);
+}
+
+static void DefineLeastWidthIntType(unsigned TypeWidth, bool IsSigned,
+ const TargetInfo &TI,
+ MacroBuilder &Builder) {
+ TargetInfo::IntType Ty = TI.getLeastIntTypeByWidth(TypeWidth, IsSigned);
+ if (Ty == TargetInfo::NoInt)
+ return;
+
+ const char *Prefix = IsSigned ? "__INT_LEAST" : "__UINT_LEAST";
+ DefineType(Prefix + Twine(TypeWidth) + "_TYPE__", Ty, Builder);
+ DefineTypeSize(Prefix + Twine(TypeWidth) + "_MAX__", Ty, TI, Builder);
+ DefineFmt(Prefix + Twine(TypeWidth), Ty, TI, Builder);
+}
+
+static void DefineFastIntType(unsigned TypeWidth, bool IsSigned,
+ const TargetInfo &TI, MacroBuilder &Builder) {
+ // stdint.h currently defines the fast int types as equivalent to the least
+ // types.
+ TargetInfo::IntType Ty = TI.getLeastIntTypeByWidth(TypeWidth, IsSigned);
+ if (Ty == TargetInfo::NoInt)
+ return;
+
+ const char *Prefix = IsSigned ? "__INT_FAST" : "__UINT_FAST";
+ DefineType(Prefix + Twine(TypeWidth) + "_TYPE__", Ty, Builder);
+ DefineTypeSize(Prefix + Twine(TypeWidth) + "_MAX__", Ty, TI, Builder);
+
+ DefineFmt(Prefix + Twine(TypeWidth), Ty, TI, Builder);
+}
+
+
+/// Get the value the ATOMIC_*_LOCK_FREE macro should have for a type with
+/// the specified properties.
+static const char *getLockFreeValue(unsigned TypeWidth, unsigned TypeAlign,
+ unsigned InlineWidth) {
+ // Fully-aligned, power-of-2 sizes no larger than the inline
+ // width will be inlined as lock-free operations.
+ if (TypeWidth == TypeAlign && (TypeWidth & (TypeWidth - 1)) == 0 &&
+ TypeWidth <= InlineWidth)
+ return "2"; // "always lock free"
+ // We cannot be certain what operations the lib calls might be
+ // able to implement as lock-free on future processors.
+ return "1"; // "sometimes lock free"
+}
+
+/// \brief Add definitions required for a smooth interaction between
+/// Objective-C++ automated reference counting and libstdc++ (4.2).
+static void AddObjCXXARCLibstdcxxDefines(const LangOptions &LangOpts,
+ MacroBuilder &Builder) {
+ Builder.defineMacro("_GLIBCXX_PREDEFINED_OBJC_ARC_IS_SCALAR");
+
+ std::string Result;
+ {
+ // Provide specializations for the __is_scalar type trait so that
+ // lifetime-qualified objects are not considered "scalar" types, which
+ // libstdc++ uses as an indicator of the presence of trivial copy, assign,
+ // default-construct, and destruct semantics (none of which hold for
+ // lifetime-qualified objects in ARC).
+ llvm::raw_string_ostream Out(Result);
+
+ Out << "namespace std {\n"
+ << "\n"
+ << "struct __true_type;\n"
+ << "struct __false_type;\n"
+ << "\n";
+
+ Out << "template<typename _Tp> struct __is_scalar;\n"
+ << "\n";
+
+ if (LangOpts.ObjCAutoRefCount) {
+ Out << "template<typename _Tp>\n"
+ << "struct __is_scalar<__attribute__((objc_ownership(strong))) _Tp> {\n"
+ << " enum { __value = 0 };\n"
+ << " typedef __false_type __type;\n"
+ << "};\n"
+ << "\n";
+ }
+
+ if (LangOpts.ObjCWeak) {
+ Out << "template<typename _Tp>\n"
+ << "struct __is_scalar<__attribute__((objc_ownership(weak))) _Tp> {\n"
+ << " enum { __value = 0 };\n"
+ << " typedef __false_type __type;\n"
+ << "};\n"
+ << "\n";
+ }
+
+ if (LangOpts.ObjCAutoRefCount) {
+ Out << "template<typename _Tp>\n"
+ << "struct __is_scalar<__attribute__((objc_ownership(autoreleasing)))"
+ << " _Tp> {\n"
+ << " enum { __value = 0 };\n"
+ << " typedef __false_type __type;\n"
+ << "};\n"
+ << "\n";
+ }
+
+ Out << "}\n";
+ }
+ Builder.append(Result);
+}
+
+static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
+ const LangOptions &LangOpts,
+ const FrontendOptions &FEOpts,
+ MacroBuilder &Builder) {
+ if (!LangOpts.MSVCCompat && !LangOpts.TraditionalCPP)
+ Builder.defineMacro("__STDC__");
+ if (LangOpts.Freestanding)
+ Builder.defineMacro("__STDC_HOSTED__", "0");
+ else
+ Builder.defineMacro("__STDC_HOSTED__");
+
+ if (!LangOpts.CPlusPlus) {
+ if (LangOpts.C11)
+ Builder.defineMacro("__STDC_VERSION__", "201112L");
+ else if (LangOpts.C99)
+ Builder.defineMacro("__STDC_VERSION__", "199901L");
+ else if (!LangOpts.GNUMode && LangOpts.Digraphs)
+ Builder.defineMacro("__STDC_VERSION__", "199409L");
+ } else {
+ // FIXME: Use correct value for C++17.
+ if (LangOpts.CPlusPlus1z)
+ Builder.defineMacro("__cplusplus", "201406L");
+ // C++1y [cpp.predefined]p1:
+ // The name __cplusplus is defined to the value 201402L when compiling a
+ // C++ translation unit.
+ else if (LangOpts.CPlusPlus14)
+ Builder.defineMacro("__cplusplus", "201402L");
+ // C++11 [cpp.predefined]p1:
+ // The name __cplusplus is defined to the value 201103L when compiling a
+ // C++ translation unit.
+ else if (LangOpts.CPlusPlus11)
+ Builder.defineMacro("__cplusplus", "201103L");
+ // C++03 [cpp.predefined]p1:
+ // The name __cplusplus is defined to the value 199711L when compiling a
+ // C++ translation unit.
+ else
+ Builder.defineMacro("__cplusplus", "199711L");
+ }
+
+ // In C11 these are environment macros. In C++11 they are only defined
+ // as part of <cuchar>. To prevent breakage when mixing C and C++
+ // code, define these macros unconditionally. We can define them
+ // unconditionally, as Clang always uses UTF-16 and UTF-32 for 16-bit
+ // and 32-bit character literals.
+ Builder.defineMacro("__STDC_UTF_16__", "1");
+ Builder.defineMacro("__STDC_UTF_32__", "1");
+
+ if (LangOpts.ObjC1)
+ Builder.defineMacro("__OBJC__");
+
+ // Not "standard" per se, but available even with the -undef flag.
+ if (LangOpts.AsmPreprocessor)
+ Builder.defineMacro("__ASSEMBLER__");
+ if (LangOpts.CUDA)
+ Builder.defineMacro("__CUDA__");
+}
+
+/// Initialize the predefined C++ language feature test macros defined in
+/// ISO/IEC JTC1/SC22/WG21 (C++) SD-6: "SG10 Feature Test Recommendations".
+static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
+ MacroBuilder &Builder) {
+ // C++98 features.
+ if (LangOpts.RTTI)
+ Builder.defineMacro("__cpp_rtti", "199711");
+ if (LangOpts.CXXExceptions)
+ Builder.defineMacro("__cpp_exceptions", "199711");
+
+ // C++11 features.
+ if (LangOpts.CPlusPlus11) {
+ Builder.defineMacro("__cpp_unicode_characters", "200704");
+ Builder.defineMacro("__cpp_raw_strings", "200710");
+ Builder.defineMacro("__cpp_unicode_literals", "200710");
+ Builder.defineMacro("__cpp_user_defined_literals", "200809");
+ Builder.defineMacro("__cpp_lambdas", "200907");
+ Builder.defineMacro("__cpp_constexpr",
+ LangOpts.CPlusPlus14 ? "201304" : "200704");
+ Builder.defineMacro("__cpp_range_based_for", "200907");
+ Builder.defineMacro("__cpp_static_assert", "200410");
+ Builder.defineMacro("__cpp_decltype", "200707");
+ Builder.defineMacro("__cpp_attributes", "200809");
+ Builder.defineMacro("__cpp_rvalue_references", "200610");
+ Builder.defineMacro("__cpp_variadic_templates", "200704");
+ Builder.defineMacro("__cpp_initializer_lists", "200806");
+ Builder.defineMacro("__cpp_delegating_constructors", "200604");
+ Builder.defineMacro("__cpp_nsdmi", "200809");
+ Builder.defineMacro("__cpp_inheriting_constructors", "200802");
+ Builder.defineMacro("__cpp_ref_qualifiers", "200710");
+ Builder.defineMacro("__cpp_alias_templates", "200704");
+ }
+
+ // C++14 features.
+ if (LangOpts.CPlusPlus14) {
+ Builder.defineMacro("__cpp_binary_literals", "201304");
+ Builder.defineMacro("__cpp_digit_separators", "201309");
+ Builder.defineMacro("__cpp_init_captures", "201304");
+ Builder.defineMacro("__cpp_generic_lambdas", "201304");
+ Builder.defineMacro("__cpp_decltype_auto", "201304");
+ Builder.defineMacro("__cpp_return_type_deduction", "201304");
+ Builder.defineMacro("__cpp_aggregate_nsdmi", "201304");
+ Builder.defineMacro("__cpp_variable_templates", "201304");
+ }
+ if (LangOpts.SizedDeallocation)
+ Builder.defineMacro("__cpp_sized_deallocation", "201309");
+ if (LangOpts.ConceptsTS)
+ Builder.defineMacro("__cpp_experimental_concepts", "1");
+ if (LangOpts.Coroutines)
+ Builder.defineMacro("__cpp_coroutines", "1");
+}
+
+static void InitializePredefinedMacros(const TargetInfo &TI,
+ const LangOptions &LangOpts,
+ const FrontendOptions &FEOpts,
+ MacroBuilder &Builder) {
+ // Compiler version introspection macros.
+ Builder.defineMacro("__llvm__"); // LLVM Backend
+ Builder.defineMacro("__clang__"); // Clang Frontend
+#define TOSTR2(X) #X
+#define TOSTR(X) TOSTR2(X)
+ Builder.defineMacro("__clang_major__", TOSTR(CLANG_VERSION_MAJOR));
+ Builder.defineMacro("__clang_minor__", TOSTR(CLANG_VERSION_MINOR));
+#ifdef CLANG_VERSION_PATCHLEVEL
+ Builder.defineMacro("__clang_patchlevel__", TOSTR(CLANG_VERSION_PATCHLEVEL));
+#else
+ Builder.defineMacro("__clang_patchlevel__", "0");
+#endif
+ Builder.defineMacro("__clang_version__",
+ "\"" CLANG_VERSION_STRING " "
+ + getClangFullRepositoryVersion() + "\"");
+#undef TOSTR
+#undef TOSTR2
+ if (!LangOpts.MSVCCompat) {
+ // Currently claim to be compatible with GCC 4.2.1-5621, but only if we're
+ // not compiling for MSVC compatibility
+ Builder.defineMacro("__GNUC_MINOR__", "2");
+ Builder.defineMacro("__GNUC_PATCHLEVEL__", "1");
+ Builder.defineMacro("__GNUC__", "4");
+ Builder.defineMacro("__GXX_ABI_VERSION", "1002");
+ }
+
+ // Define macros for the C11 / C++11 memory orderings
+ Builder.defineMacro("__ATOMIC_RELAXED", "0");
+ Builder.defineMacro("__ATOMIC_CONSUME", "1");
+ Builder.defineMacro("__ATOMIC_ACQUIRE", "2");
+ Builder.defineMacro("__ATOMIC_RELEASE", "3");
+ Builder.defineMacro("__ATOMIC_ACQ_REL", "4");
+ Builder.defineMacro("__ATOMIC_SEQ_CST", "5");
+
+ // Support for #pragma redefine_extname (Sun compatibility)
+ Builder.defineMacro("__PRAGMA_REDEFINE_EXTNAME", "1");
+
+ // As sad as it is, enough software depends on the __VERSION__ for version
+ // checks that it is necessary to report 4.2.1 (the base GCC version we claim
+ // compatibility with) first.
+ Builder.defineMacro("__VERSION__", "\"4.2.1 Compatible " +
+ Twine(getClangFullCPPVersion()) + "\"");
+
+ // Initialize language-specific preprocessor defines.
+
+ // Standard conforming mode?
+ if (!LangOpts.GNUMode && !LangOpts.MSVCCompat)
+ Builder.defineMacro("__STRICT_ANSI__");
+
+ if (!LangOpts.MSVCCompat && LangOpts.CPlusPlus11)
+ Builder.defineMacro("__GXX_EXPERIMENTAL_CXX0X__");
+
+ if (LangOpts.ObjC1) {
+ if (LangOpts.ObjCRuntime.isNonFragile()) {
+ Builder.defineMacro("__OBJC2__");
+
+ if (LangOpts.ObjCExceptions)
+ Builder.defineMacro("OBJC_ZEROCOST_EXCEPTIONS");
+ }
+
+ if (LangOpts.getGC() != LangOptions::NonGC)
+ Builder.defineMacro("__OBJC_GC__");
+
+ if (LangOpts.ObjCRuntime.isNeXTFamily())
+ Builder.defineMacro("__NEXT_RUNTIME__");
+
+ if (LangOpts.ObjCRuntime.getKind() == ObjCRuntime::ObjFW) {
+ VersionTuple tuple = LangOpts.ObjCRuntime.getVersion();
+
+ unsigned minor = 0;
+ if (tuple.getMinor().hasValue())
+ minor = tuple.getMinor().getValue();
+
+ unsigned subminor = 0;
+ if (tuple.getSubminor().hasValue())
+ subminor = tuple.getSubminor().getValue();
+
+ Builder.defineMacro("__OBJFW_RUNTIME_ABI__",
+ Twine(tuple.getMajor() * 10000 + minor * 100 +
+ subminor));
+ }
+
+ Builder.defineMacro("IBOutlet", "__attribute__((iboutlet))");
+ Builder.defineMacro("IBOutletCollection(ClassName)",
+ "__attribute__((iboutletcollection(ClassName)))");
+ Builder.defineMacro("IBAction", "void)__attribute__((ibaction)");
+ Builder.defineMacro("IBInspectable", "");
+ Builder.defineMacro("IB_DESIGNABLE", "");
+ }
+
+ if (LangOpts.CPlusPlus)
+ InitializeCPlusPlusFeatureTestMacros(LangOpts, Builder);
+
+ // darwin_constant_cfstrings controls this. This is also dependent
+ // on other things like the runtime I believe. This is set even for C code.
+ if (!LangOpts.NoConstantCFStrings)
+ Builder.defineMacro("__CONSTANT_CFSTRINGS__");
+
+ if (LangOpts.ObjC2)
+ Builder.defineMacro("OBJC_NEW_PROPERTIES");
+
+ if (LangOpts.PascalStrings)
+ Builder.defineMacro("__PASCAL_STRINGS__");
+
+ if (LangOpts.Blocks) {
+ Builder.defineMacro("__block", "__attribute__((__blocks__(byref)))");
+ Builder.defineMacro("__BLOCKS__");
+ }
+
+ if (!LangOpts.MSVCCompat && LangOpts.Exceptions)
+ Builder.defineMacro("__EXCEPTIONS");
+ if (!LangOpts.MSVCCompat && LangOpts.RTTI)
+ Builder.defineMacro("__GXX_RTTI");
+ if (LangOpts.SjLjExceptions)
+ Builder.defineMacro("__USING_SJLJ_EXCEPTIONS__");
+
+ if (LangOpts.Deprecated)
+ Builder.defineMacro("__DEPRECATED");
+
+ if (!LangOpts.MSVCCompat && LangOpts.CPlusPlus) {
+ Builder.defineMacro("__GNUG__", "4");
+ Builder.defineMacro("__GXX_WEAK__");
+ Builder.defineMacro("__private_extern__", "extern");
+ }
+
+ if (LangOpts.MicrosoftExt) {
+ if (LangOpts.WChar) {
+ // wchar_t supported as a keyword.
+ Builder.defineMacro("_WCHAR_T_DEFINED");
+ Builder.defineMacro("_NATIVE_WCHAR_T_DEFINED");
+ }
+ }
+
+ if (LangOpts.Optimize)
+ Builder.defineMacro("__OPTIMIZE__");
+ if (LangOpts.OptimizeSize)
+ Builder.defineMacro("__OPTIMIZE_SIZE__");
+
+ if (LangOpts.FastMath)
+ Builder.defineMacro("__FAST_MATH__");
+
+ // Initialize target-specific preprocessor defines.
+
+ // __BYTE_ORDER__ was added in GCC 4.6. It's analogous
+ // to the macro __BYTE_ORDER (no trailing underscores)
+ // from glibc's <endian.h> header.
+ // We don't support the PDP-11 as a target, but include
+ // the define so it can still be compared against.
+ Builder.defineMacro("__ORDER_LITTLE_ENDIAN__", "1234");
+ Builder.defineMacro("__ORDER_BIG_ENDIAN__", "4321");
+ Builder.defineMacro("__ORDER_PDP_ENDIAN__", "3412");
+ if (TI.isBigEndian()) {
+ Builder.defineMacro("__BYTE_ORDER__", "__ORDER_BIG_ENDIAN__");
+ Builder.defineMacro("__BIG_ENDIAN__");
+ } else {
+ Builder.defineMacro("__BYTE_ORDER__", "__ORDER_LITTLE_ENDIAN__");
+ Builder.defineMacro("__LITTLE_ENDIAN__");
+ }
+
+ if (TI.getPointerWidth(0) == 64 && TI.getLongWidth() == 64
+ && TI.getIntWidth() == 32) {
+ Builder.defineMacro("_LP64");
+ Builder.defineMacro("__LP64__");
+ }
+
+ if (TI.getPointerWidth(0) == 32 && TI.getLongWidth() == 32
+ && TI.getIntWidth() == 32) {
+ Builder.defineMacro("_ILP32");
+ Builder.defineMacro("__ILP32__");
+ }
+
+ // Define type sizing macros based on the target properties.
+ assert(TI.getCharWidth() == 8 && "Only support 8-bit char so far");
+ Builder.defineMacro("__CHAR_BIT__", "8");
+
+ DefineTypeSize("__SCHAR_MAX__", TargetInfo::SignedChar, TI, Builder);
+ DefineTypeSize("__SHRT_MAX__", TargetInfo::SignedShort, TI, Builder);
+ DefineTypeSize("__INT_MAX__", TargetInfo::SignedInt, TI, Builder);
+ DefineTypeSize("__LONG_MAX__", TargetInfo::SignedLong, TI, Builder);
+ DefineTypeSize("__LONG_LONG_MAX__", TargetInfo::SignedLongLong, TI, Builder);
+ DefineTypeSize("__WCHAR_MAX__", TI.getWCharType(), TI, Builder);
+ DefineTypeSize("__INTMAX_MAX__", TI.getIntMaxType(), TI, Builder);
+ DefineTypeSize("__SIZE_MAX__", TI.getSizeType(), TI, Builder);
+
+ DefineTypeSize("__UINTMAX_MAX__", TI.getUIntMaxType(), TI, Builder);
+ DefineTypeSize("__PTRDIFF_MAX__", TI.getPtrDiffType(0), TI, Builder);
+ DefineTypeSize("__INTPTR_MAX__", TI.getIntPtrType(), TI, Builder);
+ DefineTypeSize("__UINTPTR_MAX__", TI.getUIntPtrType(), TI, Builder);
+
+ DefineTypeSizeof("__SIZEOF_DOUBLE__", TI.getDoubleWidth(), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_FLOAT__", TI.getFloatWidth(), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_INT__", TI.getIntWidth(), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_LONG__", TI.getLongWidth(), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_LONG_DOUBLE__",TI.getLongDoubleWidth(),TI,Builder);
+ DefineTypeSizeof("__SIZEOF_LONG_LONG__", TI.getLongLongWidth(), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_POINTER__", TI.getPointerWidth(0), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_SHORT__", TI.getShortWidth(), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_PTRDIFF_T__",
+ TI.getTypeWidth(TI.getPtrDiffType(0)), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_SIZE_T__",
+ TI.getTypeWidth(TI.getSizeType()), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_WCHAR_T__",
+ TI.getTypeWidth(TI.getWCharType()), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_WINT_T__",
+ TI.getTypeWidth(TI.getWIntType()), TI, Builder);
+ if (TI.hasInt128Type())
+ DefineTypeSizeof("__SIZEOF_INT128__", 128, TI, Builder);
+
+ DefineType("__INTMAX_TYPE__", TI.getIntMaxType(), Builder);
+ DefineFmt("__INTMAX", TI.getIntMaxType(), TI, Builder);
+ Builder.defineMacro("__INTMAX_C_SUFFIX__",
+ TI.getTypeConstantSuffix(TI.getIntMaxType()));
+ DefineType("__UINTMAX_TYPE__", TI.getUIntMaxType(), Builder);
+ DefineFmt("__UINTMAX", TI.getUIntMaxType(), TI, Builder);
+ Builder.defineMacro("__UINTMAX_C_SUFFIX__",
+ TI.getTypeConstantSuffix(TI.getUIntMaxType()));
+ DefineTypeWidth("__INTMAX_WIDTH__", TI.getIntMaxType(), TI, Builder);
+ DefineType("__PTRDIFF_TYPE__", TI.getPtrDiffType(0), Builder);
+ DefineFmt("__PTRDIFF", TI.getPtrDiffType(0), TI, Builder);
+ DefineTypeWidth("__PTRDIFF_WIDTH__", TI.getPtrDiffType(0), TI, Builder);
+ DefineType("__INTPTR_TYPE__", TI.getIntPtrType(), Builder);
+ DefineFmt("__INTPTR", TI.getIntPtrType(), TI, Builder);
+ DefineTypeWidth("__INTPTR_WIDTH__", TI.getIntPtrType(), TI, Builder);
+ DefineType("__SIZE_TYPE__", TI.getSizeType(), Builder);
+ DefineFmt("__SIZE", TI.getSizeType(), TI, Builder);
+ DefineTypeWidth("__SIZE_WIDTH__", TI.getSizeType(), TI, Builder);
+ DefineType("__WCHAR_TYPE__", TI.getWCharType(), Builder);
+ DefineTypeWidth("__WCHAR_WIDTH__", TI.getWCharType(), TI, Builder);
+ DefineType("__WINT_TYPE__", TI.getWIntType(), Builder);
+ DefineTypeWidth("__WINT_WIDTH__", TI.getWIntType(), TI, Builder);
+ DefineTypeWidth("__SIG_ATOMIC_WIDTH__", TI.getSigAtomicType(), TI, Builder);
+ DefineTypeSize("__SIG_ATOMIC_MAX__", TI.getSigAtomicType(), TI, Builder);
+ DefineType("__CHAR16_TYPE__", TI.getChar16Type(), Builder);
+ DefineType("__CHAR32_TYPE__", TI.getChar32Type(), Builder);
+
+ DefineTypeWidth("__UINTMAX_WIDTH__", TI.getUIntMaxType(), TI, Builder);
+ DefineType("__UINTPTR_TYPE__", TI.getUIntPtrType(), Builder);
+ DefineFmt("__UINTPTR", TI.getUIntPtrType(), TI, Builder);
+ DefineTypeWidth("__UINTPTR_WIDTH__", TI.getUIntPtrType(), TI, Builder);
+
+ DefineFloatMacros(Builder, "FLT", &TI.getFloatFormat(), "F");
+ DefineFloatMacros(Builder, "DBL", &TI.getDoubleFormat(), "");
+ DefineFloatMacros(Builder, "LDBL", &TI.getLongDoubleFormat(), "L");
+
+ // Define a __POINTER_WIDTH__ macro for stdint.h.
+ Builder.defineMacro("__POINTER_WIDTH__",
+ Twine((int)TI.getPointerWidth(0)));
+
+ // Define __BIGGEST_ALIGNMENT__ to be compatible with gcc.
+ Builder.defineMacro("__BIGGEST_ALIGNMENT__",
+ Twine(TI.getSuitableAlign() / TI.getCharWidth()) );
+
+ if (!LangOpts.CharIsSigned)
+ Builder.defineMacro("__CHAR_UNSIGNED__");
+
+ if (!TargetInfo::isTypeSigned(TI.getWCharType()))
+ Builder.defineMacro("__WCHAR_UNSIGNED__");
+
+ if (!TargetInfo::isTypeSigned(TI.getWIntType()))
+ Builder.defineMacro("__WINT_UNSIGNED__");
+
+ // Define exact-width integer types for stdint.h
+ DefineExactWidthIntType(TargetInfo::SignedChar, TI, Builder);
+
+ if (TI.getShortWidth() > TI.getCharWidth())
+ DefineExactWidthIntType(TargetInfo::SignedShort, TI, Builder);
+
+ if (TI.getIntWidth() > TI.getShortWidth())
+ DefineExactWidthIntType(TargetInfo::SignedInt, TI, Builder);
+
+ if (TI.getLongWidth() > TI.getIntWidth())
+ DefineExactWidthIntType(TargetInfo::SignedLong, TI, Builder);
+
+ if (TI.getLongLongWidth() > TI.getLongWidth())
+ DefineExactWidthIntType(TargetInfo::SignedLongLong, TI, Builder);
+
+ DefineExactWidthIntType(TargetInfo::UnsignedChar, TI, Builder);
+ DefineExactWidthIntTypeSize(TargetInfo::UnsignedChar, TI, Builder);
+ DefineExactWidthIntTypeSize(TargetInfo::SignedChar, TI, Builder);
+
+ if (TI.getShortWidth() > TI.getCharWidth()) {
+ DefineExactWidthIntType(TargetInfo::UnsignedShort, TI, Builder);
+ DefineExactWidthIntTypeSize(TargetInfo::UnsignedShort, TI, Builder);
+ DefineExactWidthIntTypeSize(TargetInfo::SignedShort, TI, Builder);
+ }
+
+ if (TI.getIntWidth() > TI.getShortWidth()) {
+ DefineExactWidthIntType(TargetInfo::UnsignedInt, TI, Builder);
+ DefineExactWidthIntTypeSize(TargetInfo::UnsignedInt, TI, Builder);
+ DefineExactWidthIntTypeSize(TargetInfo::SignedInt, TI, Builder);
+ }
+
+ if (TI.getLongWidth() > TI.getIntWidth()) {
+ DefineExactWidthIntType(TargetInfo::UnsignedLong, TI, Builder);
+ DefineExactWidthIntTypeSize(TargetInfo::UnsignedLong, TI, Builder);
+ DefineExactWidthIntTypeSize(TargetInfo::SignedLong, TI, Builder);
+ }
+
+ if (TI.getLongLongWidth() > TI.getLongWidth()) {
+ DefineExactWidthIntType(TargetInfo::UnsignedLongLong, TI, Builder);
+ DefineExactWidthIntTypeSize(TargetInfo::UnsignedLongLong, TI, Builder);
+ DefineExactWidthIntTypeSize(TargetInfo::SignedLongLong, TI, Builder);
+ }
+
+ DefineLeastWidthIntType(8, true, TI, Builder);
+ DefineLeastWidthIntType(8, false, TI, Builder);
+ DefineLeastWidthIntType(16, true, TI, Builder);
+ DefineLeastWidthIntType(16, false, TI, Builder);
+ DefineLeastWidthIntType(32, true, TI, Builder);
+ DefineLeastWidthIntType(32, false, TI, Builder);
+ DefineLeastWidthIntType(64, true, TI, Builder);
+ DefineLeastWidthIntType(64, false, TI, Builder);
+
+ DefineFastIntType(8, true, TI, Builder);
+ DefineFastIntType(8, false, TI, Builder);
+ DefineFastIntType(16, true, TI, Builder);
+ DefineFastIntType(16, false, TI, Builder);
+ DefineFastIntType(32, true, TI, Builder);
+ DefineFastIntType(32, false, TI, Builder);
+ DefineFastIntType(64, true, TI, Builder);
+ DefineFastIntType(64, false, TI, Builder);
+
+ if (const char *Prefix = TI.getUserLabelPrefix())
+ Builder.defineMacro("__USER_LABEL_PREFIX__", Prefix);
+
+ if (LangOpts.FastMath || LangOpts.FiniteMathOnly)
+ Builder.defineMacro("__FINITE_MATH_ONLY__", "1");
+ else
+ Builder.defineMacro("__FINITE_MATH_ONLY__", "0");
+
+ if (!LangOpts.MSVCCompat) {
+ if (LangOpts.GNUInline || LangOpts.CPlusPlus)
+ Builder.defineMacro("__GNUC_GNU_INLINE__");
+ else
+ Builder.defineMacro("__GNUC_STDC_INLINE__");
+
+ // The value written by __atomic_test_and_set.
+ // FIXME: This is target-dependent.
+ Builder.defineMacro("__GCC_ATOMIC_TEST_AND_SET_TRUEVAL", "1");
+
+ // Used by libstdc++ to implement ATOMIC_<foo>_LOCK_FREE.
+ unsigned InlineWidthBits = TI.getMaxAtomicInlineWidth();
+#define DEFINE_LOCK_FREE_MACRO(TYPE, Type) \
+ Builder.defineMacro("__GCC_ATOMIC_" #TYPE "_LOCK_FREE", \
+ getLockFreeValue(TI.get##Type##Width(), \
+ TI.get##Type##Align(), \
+ InlineWidthBits));
+ DEFINE_LOCK_FREE_MACRO(BOOL, Bool);
+ DEFINE_LOCK_FREE_MACRO(CHAR, Char);
+ DEFINE_LOCK_FREE_MACRO(CHAR16_T, Char16);
+ DEFINE_LOCK_FREE_MACRO(CHAR32_T, Char32);
+ DEFINE_LOCK_FREE_MACRO(WCHAR_T, WChar);
+ DEFINE_LOCK_FREE_MACRO(SHORT, Short);
+ DEFINE_LOCK_FREE_MACRO(INT, Int);
+ DEFINE_LOCK_FREE_MACRO(LONG, Long);
+ DEFINE_LOCK_FREE_MACRO(LLONG, LongLong);
+ Builder.defineMacro("__GCC_ATOMIC_POINTER_LOCK_FREE",
+ getLockFreeValue(TI.getPointerWidth(0),
+ TI.getPointerAlign(0),
+ InlineWidthBits));
+#undef DEFINE_LOCK_FREE_MACRO
+ }
+
+ if (LangOpts.NoInlineDefine)
+ Builder.defineMacro("__NO_INLINE__");
+
+ if (unsigned PICLevel = LangOpts.PICLevel) {
+ Builder.defineMacro("__PIC__", Twine(PICLevel));
+ Builder.defineMacro("__pic__", Twine(PICLevel));
+ }
+ if (unsigned PIELevel = LangOpts.PIELevel) {
+ Builder.defineMacro("__PIE__", Twine(PIELevel));
+ Builder.defineMacro("__pie__", Twine(PIELevel));
+ }
+
+ // Macros to control C99 numerics and <float.h>
+ Builder.defineMacro("__FLT_EVAL_METHOD__", Twine(TI.getFloatEvalMethod()));
+ Builder.defineMacro("__FLT_RADIX__", "2");
+ Builder.defineMacro("__DECIMAL_DIG__", "__LDBL_DECIMAL_DIG__");
+
+ if (LangOpts.getStackProtector() == LangOptions::SSPOn)
+ Builder.defineMacro("__SSP__");
+ else if (LangOpts.getStackProtector() == LangOptions::SSPStrong)
+ Builder.defineMacro("__SSP_STRONG__", "2");
+ else if (LangOpts.getStackProtector() == LangOptions::SSPReq)
+ Builder.defineMacro("__SSP_ALL__", "3");
+
+ // Define a macro that exists only when using the static analyzer.
+ if (FEOpts.ProgramAction == frontend::RunAnalysis)
+ Builder.defineMacro("__clang_analyzer__");
+
+ if (LangOpts.FastRelaxedMath)
+ Builder.defineMacro("__FAST_RELAXED_MATH__");
+
+ if (FEOpts.ProgramAction == frontend::RewriteObjC ||
+ LangOpts.getGC() != LangOptions::NonGC) {
+ Builder.defineMacro("__weak", "__attribute__((objc_gc(weak)))");
+ Builder.defineMacro("__strong", "__attribute__((objc_gc(strong)))");
+ Builder.defineMacro("__autoreleasing", "");
+ Builder.defineMacro("__unsafe_unretained", "");
+ } else if (LangOpts.ObjC1) {
+ Builder.defineMacro("__weak", "__attribute__((objc_ownership(weak)))");
+ Builder.defineMacro("__strong", "__attribute__((objc_ownership(strong)))");
+ Builder.defineMacro("__autoreleasing",
+ "__attribute__((objc_ownership(autoreleasing)))");
+ Builder.defineMacro("__unsafe_unretained",
+ "__attribute__((objc_ownership(none)))");
+ }
+
+ // On Darwin, there are __double_underscored variants of the type
+ // nullability qualifiers.
+ if (TI.getTriple().isOSDarwin()) {
+ Builder.defineMacro("__nonnull", "_Nonnull");
+ Builder.defineMacro("__null_unspecified", "_Null_unspecified");
+ Builder.defineMacro("__nullable", "_Nullable");
+ }
+
+ // OpenMP definition
+ if (LangOpts.OpenMP) {
+ // OpenMP 2.2:
+ // In implementations that support a preprocessor, the _OPENMP
+ // macro name is defined to have the decimal value yyyymm where
+ // yyyy and mm are the year and the month designations of the
+ // version of the OpenMP API that the implementation support.
+ Builder.defineMacro("_OPENMP", "201307");
+ }
+
+ // CUDA device path compilaton
+ if (LangOpts.CUDAIsDevice) {
+ // The CUDA_ARCH value is set for the GPU target specified in the NVPTX
+ // backend's target defines.
+ Builder.defineMacro("__CUDA_ARCH__");
+ }
+
+ // Get other target #defines.
+ TI.getTargetDefines(LangOpts, Builder);
+}
+
+/// InitializePreprocessor - Initialize the preprocessor getting it and the
+/// environment ready to process a single file. This returns true on error.
+///
+void clang::InitializePreprocessor(
+ Preprocessor &PP, const PreprocessorOptions &InitOpts,
+ const PCHContainerReader &PCHContainerRdr,
+ const FrontendOptions &FEOpts) {
+ const LangOptions &LangOpts = PP.getLangOpts();
+ std::string PredefineBuffer;
+ PredefineBuffer.reserve(4080);
+ llvm::raw_string_ostream Predefines(PredefineBuffer);
+ MacroBuilder Builder(Predefines);
+
+ // Emit line markers for various builtin sections of the file. We don't do
+ // this in asm preprocessor mode, because "# 4" is not a line marker directive
+ // in this mode.
+ if (!PP.getLangOpts().AsmPreprocessor)
+ Builder.append("# 1 \"<built-in>\" 3");
+
+ // Install things like __POWERPC__, __GNUC__, etc into the macro table.
+ if (InitOpts.UsePredefines) {
+ if (LangOpts.CUDA && PP.getAuxTargetInfo())
+ InitializePredefinedMacros(*PP.getAuxTargetInfo(), LangOpts, FEOpts,
+ Builder);
+
+ InitializePredefinedMacros(PP.getTargetInfo(), LangOpts, FEOpts, Builder);
+
+ // Install definitions to make Objective-C++ ARC work well with various
+ // C++ Standard Library implementations.
+ if (LangOpts.ObjC1 && LangOpts.CPlusPlus &&
+ (LangOpts.ObjCAutoRefCount || LangOpts.ObjCWeak)) {
+ switch (InitOpts.ObjCXXARCStandardLibrary) {
+ case ARCXX_nolib:
+ case ARCXX_libcxx:
+ break;
+
+ case ARCXX_libstdcxx:
+ AddObjCXXARCLibstdcxxDefines(LangOpts, Builder);
+ break;
+ }
+ }
+ }
+
+ // Even with predefines off, some macros are still predefined.
+ // These should all be defined in the preprocessor according to the
+ // current language configuration.
+ InitializeStandardPredefinedMacros(PP.getTargetInfo(), PP.getLangOpts(),
+ FEOpts, Builder);
+
+ // Add on the predefines from the driver. Wrap in a #line directive to report
+ // that they come from the command line.
+ if (!PP.getLangOpts().AsmPreprocessor)
+ Builder.append("# 1 \"<command line>\" 1");
+
+ // Process #define's and #undef's in the order they are given.
+ for (unsigned i = 0, e = InitOpts.Macros.size(); i != e; ++i) {
+ if (InitOpts.Macros[i].second) // isUndef
+ Builder.undefineMacro(InitOpts.Macros[i].first);
+ else
+ DefineBuiltinMacro(Builder, InitOpts.Macros[i].first,
+ PP.getDiagnostics());
+ }
+
+ // If -imacros are specified, include them now. These are processed before
+ // any -include directives.
+ for (unsigned i = 0, e = InitOpts.MacroIncludes.size(); i != e; ++i)
+ AddImplicitIncludeMacros(Builder, InitOpts.MacroIncludes[i]);
+
+ // Process -include-pch/-include-pth directives.
+ if (!InitOpts.ImplicitPCHInclude.empty())
+ AddImplicitIncludePCH(Builder, PP, PCHContainerRdr,
+ InitOpts.ImplicitPCHInclude);
+ if (!InitOpts.ImplicitPTHInclude.empty())
+ AddImplicitIncludePTH(Builder, PP, InitOpts.ImplicitPTHInclude);
+
+ // Process -include directives.
+ for (unsigned i = 0, e = InitOpts.Includes.size(); i != e; ++i) {
+ const std::string &Path = InitOpts.Includes[i];
+ AddImplicitInclude(Builder, Path);
+ }
+
+ // Exit the command line and go back to <built-in> (2 is LC_LEAVE).
+ if (!PP.getLangOpts().AsmPreprocessor)
+ Builder.append("# 1 \"<built-in>\" 2");
+
+ // Instruct the preprocessor to skip the preamble.
+ PP.setSkipMainFilePreamble(InitOpts.PrecompiledPreambleBytes.first,
+ InitOpts.PrecompiledPreambleBytes.second);
+
+ // Copy PredefinedBuffer into the Preprocessor.
+ PP.setPredefines(Predefines.str());
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/LangStandards.cpp b/contrib/llvm/tools/clang/lib/Frontend/LangStandards.cpp
new file mode 100644
index 0000000..f133327
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/LangStandards.cpp
@@ -0,0 +1,43 @@
+//===--- LangStandards.cpp - Language Standard Definitions ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/LangStandard.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace clang;
+using namespace clang::frontend;
+
+#define LANGSTANDARD(id, name, desc, features) \
+ static const LangStandard Lang_##id = { name, desc, features };
+#include "clang/Frontend/LangStandards.def"
+
+const LangStandard &LangStandard::getLangStandardForKind(Kind K) {
+ switch (K) {
+ case lang_unspecified:
+ llvm::report_fatal_error("getLangStandardForKind() on unspecified kind");
+#define LANGSTANDARD(id, name, desc, features) \
+ case lang_##id: return Lang_##id;
+#include "clang/Frontend/LangStandards.def"
+ }
+ llvm_unreachable("Invalid language kind!");
+}
+
+const LangStandard *LangStandard::getLangStandardForName(StringRef Name) {
+ Kind K = llvm::StringSwitch<Kind>(Name)
+#define LANGSTANDARD(id, name, desc, features) \
+ .Case(name, lang_##id)
+#include "clang/Frontend/LangStandards.def"
+ .Default(lang_unspecified);
+ if (K == lang_unspecified)
+ return nullptr;
+
+ return &getLangStandardForKind(K);
+}
+
+
diff --git a/contrib/llvm/tools/clang/lib/Frontend/LayoutOverrideSource.cpp b/contrib/llvm/tools/clang/lib/Frontend/LayoutOverrideSource.cpp
new file mode 100644
index 0000000..924a640
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/LayoutOverrideSource.cpp
@@ -0,0 +1,208 @@
+//===--- LayoutOverrideSource.cpp --Override Record Layouts ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Frontend/LayoutOverrideSource.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/CharInfo.h"
+#include "llvm/Support/raw_ostream.h"
+#include <fstream>
+#include <string>
+
+using namespace clang;
+
+/// \brief Parse a simple identifier.
+static std::string parseName(StringRef S) {
+ if (S.empty() || !isIdentifierHead(S[0]))
+ return "";
+
+ unsigned Offset = 1;
+ while (Offset < S.size() && isIdentifierBody(S[Offset]))
+ ++Offset;
+
+ return S.substr(0, Offset).str();
+}
+
+LayoutOverrideSource::LayoutOverrideSource(StringRef Filename) {
+ std::ifstream Input(Filename.str().c_str());
+ if (!Input.is_open())
+ return;
+
+ // Parse the output of -fdump-record-layouts.
+ std::string CurrentType;
+ Layout CurrentLayout;
+ bool ExpectingType = false;
+
+ while (Input.good()) {
+ std::string Line;
+ getline(Input, Line);
+
+ StringRef LineStr(Line);
+
+ // Determine whether the following line will start a
+ if (LineStr.find("*** Dumping AST Record Layout") != StringRef::npos) {
+ // Flush the last type/layout, if there is one.
+ if (!CurrentType.empty())
+ Layouts[CurrentType] = CurrentLayout;
+ CurrentLayout = Layout();
+
+ ExpectingType = true;
+ continue;
+ }
+
+ // If we're expecting a type, grab it.
+ if (ExpectingType) {
+ ExpectingType = false;
+
+ StringRef::size_type Pos;
+ if ((Pos = LineStr.find("struct ")) != StringRef::npos)
+ LineStr = LineStr.substr(Pos + strlen("struct "));
+ else if ((Pos = LineStr.find("class ")) != StringRef::npos)
+ LineStr = LineStr.substr(Pos + strlen("class "));
+ else if ((Pos = LineStr.find("union ")) != StringRef::npos)
+ LineStr = LineStr.substr(Pos + strlen("union "));
+ else
+ continue;
+
+ // Find the name of the type.
+ CurrentType = parseName(LineStr);
+ CurrentLayout = Layout();
+ continue;
+ }
+
+ // Check for the size of the type.
+ StringRef::size_type Pos = LineStr.find(" Size:");
+ if (Pos != StringRef::npos) {
+ // Skip past the " Size:" prefix.
+ LineStr = LineStr.substr(Pos + strlen(" Size:"));
+
+ unsigned long long Size = 0;
+ (void)LineStr.getAsInteger(10, Size);
+ CurrentLayout.Size = Size;
+ continue;
+ }
+
+ // Check for the alignment of the type.
+ Pos = LineStr.find("Alignment:");
+ if (Pos != StringRef::npos) {
+ // Skip past the "Alignment:" prefix.
+ LineStr = LineStr.substr(Pos + strlen("Alignment:"));
+
+ unsigned long long Alignment = 0;
+ (void)LineStr.getAsInteger(10, Alignment);
+ CurrentLayout.Align = Alignment;
+ continue;
+ }
+
+ // Check for the size/alignment of the type.
+ Pos = LineStr.find("sizeof=");
+ if (Pos != StringRef::npos) {
+ /* Skip past the sizeof= prefix. */
+ LineStr = LineStr.substr(Pos + strlen("sizeof="));
+
+ // Parse size.
+ unsigned long long Size = 0;
+ (void)LineStr.getAsInteger(10, Size);
+ CurrentLayout.Size = Size;
+
+ Pos = LineStr.find("align=");
+ if (Pos != StringRef::npos) {
+ /* Skip past the align= prefix. */
+ LineStr = LineStr.substr(Pos + strlen("align="));
+
+ // Parse alignment.
+ unsigned long long Alignment = 0;
+ (void)LineStr.getAsInteger(10, Alignment);
+ CurrentLayout.Align = Alignment;
+ }
+
+ continue;
+ }
+
+ // Check for the field offsets of the type.
+ Pos = LineStr.find("FieldOffsets: [");
+ if (Pos == StringRef::npos)
+ continue;
+
+ LineStr = LineStr.substr(Pos + strlen("FieldOffsets: ["));
+ while (!LineStr.empty() && isDigit(LineStr[0])) {
+ // Parse this offset.
+ unsigned Idx = 1;
+ while (Idx < LineStr.size() && isDigit(LineStr[Idx]))
+ ++Idx;
+
+ unsigned long long Offset = 0;
+ (void)LineStr.substr(0, Idx).getAsInteger(10, Offset);
+
+ CurrentLayout.FieldOffsets.push_back(Offset);
+
+ // Skip over this offset, the following comma, and any spaces.
+ LineStr = LineStr.substr(Idx + 1);
+ while (!LineStr.empty() && isWhitespace(LineStr[0]))
+ LineStr = LineStr.substr(1);
+ }
+ }
+
+ // Flush the last type/layout, if there is one.
+ if (!CurrentType.empty())
+ Layouts[CurrentType] = CurrentLayout;
+}
+
+bool
+LayoutOverrideSource::layoutRecordType(const RecordDecl *Record,
+ uint64_t &Size, uint64_t &Alignment,
+ llvm::DenseMap<const FieldDecl *, uint64_t> &FieldOffsets,
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> &BaseOffsets,
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> &VirtualBaseOffsets)
+{
+ // We can't override unnamed declarations.
+ if (!Record->getIdentifier())
+ return false;
+
+ // Check whether we have a layout for this record.
+ llvm::StringMap<Layout>::iterator Known = Layouts.find(Record->getName());
+ if (Known == Layouts.end())
+ return false;
+
+ // Provide field layouts.
+ unsigned NumFields = 0;
+ for (RecordDecl::field_iterator F = Record->field_begin(),
+ FEnd = Record->field_end();
+ F != FEnd; ++F, ++NumFields) {
+ if (NumFields >= Known->second.FieldOffsets.size())
+ continue;
+
+ FieldOffsets[*F] = Known->second.FieldOffsets[NumFields];
+ }
+
+ // Wrong number of fields.
+ if (NumFields != Known->second.FieldOffsets.size())
+ return false;
+
+ Size = Known->second.Size;
+ Alignment = Known->second.Align;
+ return true;
+}
+
+void LayoutOverrideSource::dump() {
+ raw_ostream &OS = llvm::errs();
+ for (llvm::StringMap<Layout>::iterator L = Layouts.begin(),
+ LEnd = Layouts.end();
+ L != LEnd; ++L) {
+ OS << "Type: blah " << L->first() << '\n';
+ OS << " Size:" << L->second.Size << '\n';
+ OS << " Alignment:" << L->second.Align << '\n';
+ OS << " FieldOffsets: [";
+ for (unsigned I = 0, N = L->second.FieldOffsets.size(); I != N; ++I) {
+ if (I)
+ OS << ", ";
+ OS << L->second.FieldOffsets[I];
+ }
+ OS << "]\n";
+ }
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Frontend/LogDiagnosticPrinter.cpp b/contrib/llvm/tools/clang/lib/Frontend/LogDiagnosticPrinter.cpp
new file mode 100644
index 0000000..9998f65
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/LogDiagnosticPrinter.cpp
@@ -0,0 +1,165 @@
+//===--- LogDiagnosticPrinter.cpp - Log Diagnostic Printer ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/LogDiagnosticPrinter.h"
+#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/PlistSupport.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+using namespace markup;
+
+LogDiagnosticPrinter::LogDiagnosticPrinter(
+ raw_ostream &os, DiagnosticOptions *diags,
+ std::unique_ptr<raw_ostream> StreamOwner)
+ : OS(os), StreamOwner(std::move(StreamOwner)), LangOpts(nullptr),
+ DiagOpts(diags) {}
+
+static StringRef getLevelName(DiagnosticsEngine::Level Level) {
+ switch (Level) {
+ case DiagnosticsEngine::Ignored: return "ignored";
+ case DiagnosticsEngine::Remark: return "remark";
+ case DiagnosticsEngine::Note: return "note";
+ case DiagnosticsEngine::Warning: return "warning";
+ case DiagnosticsEngine::Error: return "error";
+ case DiagnosticsEngine::Fatal: return "fatal error";
+ }
+ llvm_unreachable("Invalid DiagnosticsEngine level!");
+}
+
+void
+LogDiagnosticPrinter::EmitDiagEntry(llvm::raw_ostream &OS,
+ const LogDiagnosticPrinter::DiagEntry &DE) {
+ OS << " <dict>\n";
+ OS << " <key>level</key>\n"
+ << " ";
+ EmitString(OS, getLevelName(DE.DiagnosticLevel)) << '\n';
+ if (!DE.Filename.empty()) {
+ OS << " <key>filename</key>\n"
+ << " ";
+ EmitString(OS, DE.Filename) << '\n';
+ }
+ if (DE.Line != 0) {
+ OS << " <key>line</key>\n"
+ << " ";
+ EmitInteger(OS, DE.Line) << '\n';
+ }
+ if (DE.Column != 0) {
+ OS << " <key>column</key>\n"
+ << " ";
+ EmitInteger(OS, DE.Column) << '\n';
+ }
+ if (!DE.Message.empty()) {
+ OS << " <key>message</key>\n"
+ << " ";
+ EmitString(OS, DE.Message) << '\n';
+ }
+ OS << " <key>ID</key>\n"
+ << " ";
+ EmitInteger(OS, DE.DiagnosticID) << '\n';
+ if (!DE.WarningOption.empty()) {
+ OS << " <key>WarningOption</key>\n"
+ << " ";
+ EmitString(OS, DE.WarningOption) << '\n';
+ }
+ OS << " </dict>\n";
+}
+
+void LogDiagnosticPrinter::EndSourceFile() {
+ // We emit all the diagnostics in EndSourceFile. However, we don't emit any
+ // entry if no diagnostics were present.
+ //
+ // Note that DiagnosticConsumer has no "end-of-compilation" callback, so we
+ // will miss any diagnostics which are emitted after and outside the
+ // translation unit processing.
+ if (Entries.empty())
+ return;
+
+ // Write to a temporary string to ensure atomic write of diagnostic object.
+ SmallString<512> Msg;
+ llvm::raw_svector_ostream OS(Msg);
+
+ OS << "<dict>\n";
+ if (!MainFilename.empty()) {
+ OS << " <key>main-file</key>\n"
+ << " ";
+ EmitString(OS, MainFilename) << '\n';
+ }
+ if (!DwarfDebugFlags.empty()) {
+ OS << " <key>dwarf-debug-flags</key>\n"
+ << " ";
+ EmitString(OS, DwarfDebugFlags) << '\n';
+ }
+ OS << " <key>diagnostics</key>\n";
+ OS << " <array>\n";
+ for (auto &DE : Entries)
+ EmitDiagEntry(OS, DE);
+ OS << " </array>\n";
+ OS << "</dict>\n";
+
+ this->OS << OS.str();
+}
+
+void LogDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
+ const Diagnostic &Info) {
+ // Default implementation (Warnings/errors count).
+ DiagnosticConsumer::HandleDiagnostic(Level, Info);
+
+ // Initialize the main file name, if we haven't already fetched it.
+ if (MainFilename.empty() && Info.hasSourceManager()) {
+ const SourceManager &SM = Info.getSourceManager();
+ FileID FID = SM.getMainFileID();
+ if (FID.isValid()) {
+ const FileEntry *FE = SM.getFileEntryForID(FID);
+ if (FE && FE->isValid())
+ MainFilename = FE->getName();
+ }
+ }
+
+ // Create the diag entry.
+ DiagEntry DE;
+ DE.DiagnosticID = Info.getID();
+ DE.DiagnosticLevel = Level;
+
+ DE.WarningOption = DiagnosticIDs::getWarningOptionForDiag(DE.DiagnosticID);
+
+ // Format the message.
+ SmallString<100> MessageStr;
+ Info.FormatDiagnostic(MessageStr);
+ DE.Message = MessageStr.str();
+
+ // Set the location information.
+ DE.Filename = "";
+ DE.Line = DE.Column = 0;
+ if (Info.getLocation().isValid() && Info.hasSourceManager()) {
+ const SourceManager &SM = Info.getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Info.getLocation());
+
+ if (PLoc.isInvalid()) {
+ // At least print the file name if available:
+ FileID FID = SM.getFileID(Info.getLocation());
+ if (FID.isValid()) {
+ const FileEntry *FE = SM.getFileEntryForID(FID);
+ if (FE && FE->isValid())
+ DE.Filename = FE->getName();
+ }
+ } else {
+ DE.Filename = PLoc.getFilename();
+ DE.Line = PLoc.getLine();
+ DE.Column = PLoc.getColumn();
+ }
+ }
+
+ // Record the diagnostic entry.
+ Entries.push_back(DE);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Frontend/ModuleDependencyCollector.cpp b/contrib/llvm/tools/clang/lib/Frontend/ModuleDependencyCollector.cpp
new file mode 100644
index 0000000..9768a16
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/ModuleDependencyCollector.cpp
@@ -0,0 +1,94 @@
+//===--- ModuleDependencyCollector.cpp - Collect module dependencies ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Collect the dependencies of a set of modules.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/Utils.h"
+#include "clang/Serialization/ASTReader.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+namespace {
+/// Private implementation for ModuleDependencyCollector
+class ModuleDependencyListener : public ASTReaderListener {
+ ModuleDependencyCollector &Collector;
+
+ std::error_code copyToRoot(StringRef Src);
+public:
+ ModuleDependencyListener(ModuleDependencyCollector &Collector)
+ : Collector(Collector) {}
+ bool needsInputFileVisitation() override { return true; }
+ bool needsSystemInputFileVisitation() override { return true; }
+ bool visitInputFile(StringRef Filename, bool IsSystem, bool IsOverridden,
+ bool IsExplicitModule) override;
+};
+}
+
+void ModuleDependencyCollector::attachToASTReader(ASTReader &R) {
+ R.addListener(llvm::make_unique<ModuleDependencyListener>(*this));
+}
+
+void ModuleDependencyCollector::writeFileMap() {
+ if (Seen.empty())
+ return;
+
+ SmallString<256> Dest = getDest();
+ llvm::sys::path::append(Dest, "vfs.yaml");
+
+ std::error_code EC;
+ llvm::raw_fd_ostream OS(Dest, EC, llvm::sys::fs::F_Text);
+ if (EC) {
+ setHasErrors();
+ return;
+ }
+ VFSWriter.write(OS);
+}
+
+std::error_code ModuleDependencyListener::copyToRoot(StringRef Src) {
+ using namespace llvm::sys;
+
+ // We need an absolute path to append to the root.
+ SmallString<256> AbsoluteSrc = Src;
+ fs::make_absolute(AbsoluteSrc);
+ // Canonicalize to a native path to avoid mixed separator styles.
+ path::native(AbsoluteSrc);
+ // TODO: We probably need to handle .. as well as . in order to have valid
+ // input to the YAMLVFSWriter.
+ path::remove_dots(AbsoluteSrc);
+
+ // Build the destination path.
+ SmallString<256> Dest = Collector.getDest();
+ path::append(Dest, path::relative_path(AbsoluteSrc));
+
+ // Copy the file into place.
+ if (std::error_code EC = fs::create_directories(path::parent_path(Dest),
+ /*IgnoreExisting=*/true))
+ return EC;
+ if (std::error_code EC = fs::copy_file(AbsoluteSrc, Dest))
+ return EC;
+ // Use the absolute path under the root for the file mapping.
+ Collector.addFileMapping(AbsoluteSrc, Dest);
+ return std::error_code();
+}
+
+bool ModuleDependencyListener::visitInputFile(StringRef Filename, bool IsSystem,
+ bool IsOverridden,
+ bool IsExplicitModule) {
+ if (Collector.insertSeen(Filename))
+ if (copyToRoot(Filename))
+ Collector.setHasErrors();
+ return true;
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/MultiplexConsumer.cpp b/contrib/llvm/tools/clang/lib/Frontend/MultiplexConsumer.cpp
new file mode 100644
index 0000000..12c8524
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/MultiplexConsumer.cpp
@@ -0,0 +1,362 @@
+//===- MultiplexConsumer.cpp - AST Consumer for PCH Generation --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MultiplexConsumer class. It also declares and defines
+// MultiplexASTDeserializationListener and MultiplexASTMutationListener, which
+// are implementation details of MultiplexConsumer.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/MultiplexConsumer.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/DeclGroup.h"
+#include "clang/Serialization/ASTDeserializationListener.h"
+
+using namespace clang;
+
+namespace clang {
+
+// This ASTDeserializationListener forwards its notifications to a set of
+// child listeners.
+class MultiplexASTDeserializationListener
+ : public ASTDeserializationListener {
+public:
+ // Does NOT take ownership of the elements in L.
+ MultiplexASTDeserializationListener(
+ const std::vector<ASTDeserializationListener*>& L);
+ void ReaderInitialized(ASTReader *Reader) override;
+ void IdentifierRead(serialization::IdentID ID,
+ IdentifierInfo *II) override;
+ void MacroRead(serialization::MacroID ID, MacroInfo *MI) override;
+ void TypeRead(serialization::TypeIdx Idx, QualType T) override;
+ void DeclRead(serialization::DeclID ID, const Decl *D) override;
+ void SelectorRead(serialization::SelectorID iD, Selector Sel) override;
+ void MacroDefinitionRead(serialization::PreprocessedEntityID,
+ MacroDefinitionRecord *MD) override;
+ void ModuleRead(serialization::SubmoduleID ID, Module *Mod) override;
+
+private:
+ std::vector<ASTDeserializationListener *> Listeners;
+};
+
+MultiplexASTDeserializationListener::MultiplexASTDeserializationListener(
+ const std::vector<ASTDeserializationListener*>& L)
+ : Listeners(L) {
+}
+
+void MultiplexASTDeserializationListener::ReaderInitialized(
+ ASTReader *Reader) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->ReaderInitialized(Reader);
+}
+
+void MultiplexASTDeserializationListener::IdentifierRead(
+ serialization::IdentID ID, IdentifierInfo *II) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->IdentifierRead(ID, II);
+}
+
+void MultiplexASTDeserializationListener::MacroRead(
+ serialization::MacroID ID, MacroInfo *MI) {
+ for (auto &Listener : Listeners)
+ Listener->MacroRead(ID, MI);
+}
+
+void MultiplexASTDeserializationListener::TypeRead(
+ serialization::TypeIdx Idx, QualType T) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->TypeRead(Idx, T);
+}
+
+void MultiplexASTDeserializationListener::DeclRead(
+ serialization::DeclID ID, const Decl *D) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->DeclRead(ID, D);
+}
+
+void MultiplexASTDeserializationListener::SelectorRead(
+ serialization::SelectorID ID, Selector Sel) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->SelectorRead(ID, Sel);
+}
+
+void MultiplexASTDeserializationListener::MacroDefinitionRead(
+ serialization::PreprocessedEntityID ID, MacroDefinitionRecord *MD) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->MacroDefinitionRead(ID, MD);
+}
+
+void MultiplexASTDeserializationListener::ModuleRead(
+ serialization::SubmoduleID ID, Module *Mod) {
+ for (auto &Listener : Listeners)
+ Listener->ModuleRead(ID, Mod);
+}
+
+// This ASTMutationListener forwards its notifications to a set of
+// child listeners.
+class MultiplexASTMutationListener : public ASTMutationListener {
+public:
+ // Does NOT take ownership of the elements in L.
+ MultiplexASTMutationListener(ArrayRef<ASTMutationListener*> L);
+ void CompletedTagDefinition(const TagDecl *D) override;
+ void AddedVisibleDecl(const DeclContext *DC, const Decl *D) override;
+ void AddedCXXImplicitMember(const CXXRecordDecl *RD, const Decl *D) override;
+ void AddedCXXTemplateSpecialization(const ClassTemplateDecl *TD,
+ const ClassTemplateSpecializationDecl *D) override;
+ void AddedCXXTemplateSpecialization(const VarTemplateDecl *TD,
+ const VarTemplateSpecializationDecl *D) override;
+ void AddedCXXTemplateSpecialization(const FunctionTemplateDecl *TD,
+ const FunctionDecl *D) override;
+ void ResolvedExceptionSpec(const FunctionDecl *FD) override;
+ void DeducedReturnType(const FunctionDecl *FD, QualType ReturnType) override;
+ void ResolvedOperatorDelete(const CXXDestructorDecl *DD,
+ const FunctionDecl *Delete) override;
+ void CompletedImplicitDefinition(const FunctionDecl *D) override;
+ void StaticDataMemberInstantiated(const VarDecl *D) override;
+ void AddedObjCCategoryToInterface(const ObjCCategoryDecl *CatD,
+ const ObjCInterfaceDecl *IFD) override;
+ void FunctionDefinitionInstantiated(const FunctionDecl *D) override;
+ void DeclarationMarkedUsed(const Decl *D) override;
+ void DeclarationMarkedOpenMPThreadPrivate(const Decl *D) override;
+ void RedefinedHiddenDefinition(const NamedDecl *D, Module *M) override;
+ void AddedAttributeToRecord(const Attr *Attr,
+ const RecordDecl *Record) override;
+
+private:
+ std::vector<ASTMutationListener*> Listeners;
+};
+
+MultiplexASTMutationListener::MultiplexASTMutationListener(
+ ArrayRef<ASTMutationListener*> L)
+ : Listeners(L.begin(), L.end()) {
+}
+
+void MultiplexASTMutationListener::CompletedTagDefinition(const TagDecl *D) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->CompletedTagDefinition(D);
+}
+
+void MultiplexASTMutationListener::AddedVisibleDecl(
+ const DeclContext *DC, const Decl *D) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->AddedVisibleDecl(DC, D);
+}
+
+void MultiplexASTMutationListener::AddedCXXImplicitMember(
+ const CXXRecordDecl *RD, const Decl *D) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->AddedCXXImplicitMember(RD, D);
+}
+void MultiplexASTMutationListener::AddedCXXTemplateSpecialization(
+ const ClassTemplateDecl *TD, const ClassTemplateSpecializationDecl *D) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->AddedCXXTemplateSpecialization(TD, D);
+}
+void MultiplexASTMutationListener::AddedCXXTemplateSpecialization(
+ const VarTemplateDecl *TD, const VarTemplateSpecializationDecl *D) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->AddedCXXTemplateSpecialization(TD, D);
+}
+void MultiplexASTMutationListener::AddedCXXTemplateSpecialization(
+ const FunctionTemplateDecl *TD, const FunctionDecl *D) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->AddedCXXTemplateSpecialization(TD, D);
+}
+void MultiplexASTMutationListener::ResolvedExceptionSpec(
+ const FunctionDecl *FD) {
+ for (auto &Listener : Listeners)
+ Listener->ResolvedExceptionSpec(FD);
+}
+void MultiplexASTMutationListener::DeducedReturnType(const FunctionDecl *FD,
+ QualType ReturnType) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->DeducedReturnType(FD, ReturnType);
+}
+void MultiplexASTMutationListener::ResolvedOperatorDelete(
+ const CXXDestructorDecl *DD, const FunctionDecl *Delete) {
+ for (auto *L : Listeners)
+ L->ResolvedOperatorDelete(DD, Delete);
+}
+void MultiplexASTMutationListener::CompletedImplicitDefinition(
+ const FunctionDecl *D) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->CompletedImplicitDefinition(D);
+}
+void MultiplexASTMutationListener::StaticDataMemberInstantiated(
+ const VarDecl *D) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->StaticDataMemberInstantiated(D);
+}
+void MultiplexASTMutationListener::AddedObjCCategoryToInterface(
+ const ObjCCategoryDecl *CatD,
+ const ObjCInterfaceDecl *IFD) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->AddedObjCCategoryToInterface(CatD, IFD);
+}
+void MultiplexASTMutationListener::FunctionDefinitionInstantiated(
+ const FunctionDecl *D) {
+ for (auto &Listener : Listeners)
+ Listener->FunctionDefinitionInstantiated(D);
+}
+void MultiplexASTMutationListener::DeclarationMarkedUsed(const Decl *D) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->DeclarationMarkedUsed(D);
+}
+void MultiplexASTMutationListener::DeclarationMarkedOpenMPThreadPrivate(
+ const Decl *D) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->DeclarationMarkedOpenMPThreadPrivate(D);
+}
+void MultiplexASTMutationListener::RedefinedHiddenDefinition(const NamedDecl *D,
+ Module *M) {
+ for (auto *L : Listeners)
+ L->RedefinedHiddenDefinition(D, M);
+}
+
+void MultiplexASTMutationListener::AddedAttributeToRecord(
+ const Attr *Attr,
+ const RecordDecl *Record) {
+ for (auto *L : Listeners)
+ L->AddedAttributeToRecord(Attr, Record);
+}
+
+} // end namespace clang
+
+MultiplexConsumer::MultiplexConsumer(
+ std::vector<std::unique_ptr<ASTConsumer>> C)
+ : Consumers(std::move(C)), MutationListener(), DeserializationListener() {
+ // Collect the mutation listeners and deserialization listeners of all
+ // children, and create a multiplex listener each if so.
+ std::vector<ASTMutationListener*> mutationListeners;
+ std::vector<ASTDeserializationListener*> serializationListeners;
+ for (auto &Consumer : Consumers) {
+ if (auto *mutationListener = Consumer->GetASTMutationListener())
+ mutationListeners.push_back(mutationListener);
+ if (auto *serializationListener = Consumer->GetASTDeserializationListener())
+ serializationListeners.push_back(serializationListener);
+ }
+ if (!mutationListeners.empty()) {
+ MutationListener =
+ llvm::make_unique<MultiplexASTMutationListener>(mutationListeners);
+ }
+ if (!serializationListeners.empty()) {
+ DeserializationListener =
+ llvm::make_unique<MultiplexASTDeserializationListener>(
+ serializationListeners);
+ }
+}
+
+MultiplexConsumer::~MultiplexConsumer() {}
+
+void MultiplexConsumer::Initialize(ASTContext &Context) {
+ for (auto &Consumer : Consumers)
+ Consumer->Initialize(Context);
+}
+
+bool MultiplexConsumer::HandleTopLevelDecl(DeclGroupRef D) {
+ bool Continue = true;
+ for (auto &Consumer : Consumers)
+ Continue = Continue && Consumer->HandleTopLevelDecl(D);
+ return Continue;
+}
+
+void MultiplexConsumer::HandleInlineMethodDefinition(CXXMethodDecl *D) {
+ for (auto &Consumer : Consumers)
+ Consumer->HandleInlineMethodDefinition(D);
+}
+
+void MultiplexConsumer::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
+ for (auto &Consumer : Consumers)
+ Consumer->HandleCXXStaticMemberVarInstantiation(VD);
+}
+
+void MultiplexConsumer::HandleInterestingDecl(DeclGroupRef D) {
+ for (auto &Consumer : Consumers)
+ Consumer->HandleInterestingDecl(D);
+}
+
+void MultiplexConsumer::HandleTranslationUnit(ASTContext &Ctx) {
+ for (auto &Consumer : Consumers)
+ Consumer->HandleTranslationUnit(Ctx);
+}
+
+void MultiplexConsumer::HandleTagDeclDefinition(TagDecl *D) {
+ for (auto &Consumer : Consumers)
+ Consumer->HandleTagDeclDefinition(D);
+}
+
+void MultiplexConsumer::HandleTagDeclRequiredDefinition(const TagDecl *D) {
+ for (auto &Consumer : Consumers)
+ Consumer->HandleTagDeclRequiredDefinition(D);
+}
+
+void MultiplexConsumer::HandleCXXImplicitFunctionInstantiation(FunctionDecl *D){
+ for (auto &Consumer : Consumers)
+ Consumer->HandleCXXImplicitFunctionInstantiation(D);
+}
+
+void MultiplexConsumer::HandleTopLevelDeclInObjCContainer(DeclGroupRef D) {
+ for (auto &Consumer : Consumers)
+ Consumer->HandleTopLevelDeclInObjCContainer(D);
+}
+
+void MultiplexConsumer::HandleImplicitImportDecl(ImportDecl *D) {
+ for (auto &Consumer : Consumers)
+ Consumer->HandleImplicitImportDecl(D);
+}
+
+void MultiplexConsumer::HandleLinkerOptionPragma(llvm::StringRef Opts) {
+ for (auto &Consumer : Consumers)
+ Consumer->HandleLinkerOptionPragma(Opts);
+}
+
+void MultiplexConsumer::HandleDetectMismatch(llvm::StringRef Name, llvm::StringRef Value) {
+ for (auto &Consumer : Consumers)
+ Consumer->HandleDetectMismatch(Name, Value);
+}
+
+void MultiplexConsumer::HandleDependentLibrary(llvm::StringRef Lib) {
+ for (auto &Consumer : Consumers)
+ Consumer->HandleDependentLibrary(Lib);
+}
+
+void MultiplexConsumer::CompleteTentativeDefinition(VarDecl *D) {
+ for (auto &Consumer : Consumers)
+ Consumer->CompleteTentativeDefinition(D);
+}
+
+void MultiplexConsumer::HandleVTable(CXXRecordDecl *RD) {
+ for (auto &Consumer : Consumers)
+ Consumer->HandleVTable(RD);
+}
+
+ASTMutationListener *MultiplexConsumer::GetASTMutationListener() {
+ return MutationListener.get();
+}
+
+ASTDeserializationListener *MultiplexConsumer::GetASTDeserializationListener() {
+ return DeserializationListener.get();
+}
+
+void MultiplexConsumer::PrintStats() {
+ for (auto &Consumer : Consumers)
+ Consumer->PrintStats();
+}
+
+void MultiplexConsumer::InitializeSema(Sema &S) {
+ for (auto &Consumer : Consumers)
+ if (SemaConsumer *SC = dyn_cast<SemaConsumer>(Consumer.get()))
+ SC->InitializeSema(S);
+}
+
+void MultiplexConsumer::ForgetSema() {
+ for (auto &Consumer : Consumers)
+ if (SemaConsumer *SC = dyn_cast<SemaConsumer>(Consumer.get()))
+ SC->ForgetSema();
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/PCHContainerOperations.cpp b/contrib/llvm/tools/clang/lib/Frontend/PCHContainerOperations.cpp
new file mode 100644
index 0000000..5e1d772
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/PCHContainerOperations.cpp
@@ -0,0 +1,66 @@
+//===--- Frontend/PCHContainerOperations.cpp - PCH Containers ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines PCHContainerOperations and RawPCHContainerOperation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/PCHContainerOperations.h"
+#include "clang/AST/ASTConsumer.h"
+#include "llvm/Bitcode/BitstreamReader.h"
+#include "llvm/Support/raw_ostream.h"
+#include "clang/Lex/ModuleLoader.h"
+
+using namespace clang;
+
+namespace {
+
+/// \brief A PCHContainerGenerator that writes out the PCH to a flat file.
+class RawPCHContainerGenerator : public ASTConsumer {
+ std::shared_ptr<PCHBuffer> Buffer;
+ raw_pwrite_stream *OS;
+
+public:
+ RawPCHContainerGenerator(llvm::raw_pwrite_stream *OS,
+ std::shared_ptr<PCHBuffer> Buffer)
+ : Buffer(Buffer), OS(OS) {}
+
+ ~RawPCHContainerGenerator() override = default;
+
+ void HandleTranslationUnit(ASTContext &Ctx) override {
+ if (Buffer->IsComplete) {
+ // Make sure it hits disk now.
+ *OS << Buffer->Data;
+ OS->flush();
+ }
+ // Free the space of the temporary buffer.
+ llvm::SmallVector<char, 0> Empty;
+ Buffer->Data = std::move(Empty);
+ }
+};
+
+} // anonymous namespace
+
+std::unique_ptr<ASTConsumer> RawPCHContainerWriter::CreatePCHContainerGenerator(
+ CompilerInstance &CI, const std::string &MainFileName,
+ const std::string &OutputFileName, llvm::raw_pwrite_stream *OS,
+ std::shared_ptr<PCHBuffer> Buffer) const {
+ return llvm::make_unique<RawPCHContainerGenerator>(OS, Buffer);
+}
+
+void RawPCHContainerReader::ExtractPCH(
+ llvm::MemoryBufferRef Buffer, llvm::BitstreamReader &StreamFile) const {
+ StreamFile.init((const unsigned char *)Buffer.getBufferStart(),
+ (const unsigned char *)Buffer.getBufferEnd());
+}
+
+PCHContainerOperations::PCHContainerOperations() {
+ registerWriter(llvm::make_unique<RawPCHContainerWriter>());
+ registerReader(llvm::make_unique<RawPCHContainerReader>());
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp b/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp
new file mode 100644
index 0000000..a58c935
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp
@@ -0,0 +1,784 @@
+//===--- PrintPreprocessedOutput.cpp - Implement the -E mode --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This code simply runs the preprocessor on the input file and prints out the
+// result. This is the traditional behavior of the -E option.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/Utils.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Frontend/PreprocessorOutputOptions.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/PPCallbacks.h"
+#include "clang/Lex/Pragma.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/TokenConcatenation.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstdio>
+using namespace clang;
+
+/// PrintMacroDefinition - Print a macro definition in a form that will be
+/// properly accepted back as a definition.
+static void PrintMacroDefinition(const IdentifierInfo &II, const MacroInfo &MI,
+ Preprocessor &PP, raw_ostream &OS) {
+ OS << "#define " << II.getName();
+
+ if (MI.isFunctionLike()) {
+ OS << '(';
+ if (!MI.arg_empty()) {
+ MacroInfo::arg_iterator AI = MI.arg_begin(), E = MI.arg_end();
+ for (; AI+1 != E; ++AI) {
+ OS << (*AI)->getName();
+ OS << ',';
+ }
+
+ // Last argument.
+ if ((*AI)->getName() == "__VA_ARGS__")
+ OS << "...";
+ else
+ OS << (*AI)->getName();
+ }
+
+ if (MI.isGNUVarargs())
+ OS << "..."; // #define foo(x...)
+
+ OS << ')';
+ }
+
+ // GCC always emits a space, even if the macro body is empty. However, do not
+ // want to emit two spaces if the first token has a leading space.
+ if (MI.tokens_empty() || !MI.tokens_begin()->hasLeadingSpace())
+ OS << ' ';
+
+ SmallString<128> SpellingBuffer;
+ for (const auto &T : MI.tokens()) {
+ if (T.hasLeadingSpace())
+ OS << ' ';
+
+ OS << PP.getSpelling(T, SpellingBuffer);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Preprocessed token printer
+//===----------------------------------------------------------------------===//
+
+namespace {
+class PrintPPOutputPPCallbacks : public PPCallbacks {
+ Preprocessor &PP;
+ SourceManager &SM;
+ TokenConcatenation ConcatInfo;
+public:
+ raw_ostream &OS;
+private:
+ unsigned CurLine;
+
+ bool EmittedTokensOnThisLine;
+ bool EmittedDirectiveOnThisLine;
+ SrcMgr::CharacteristicKind FileType;
+ SmallString<512> CurFilename;
+ bool Initialized;
+ bool DisableLineMarkers;
+ bool DumpDefines;
+ bool UseLineDirectives;
+ bool IsFirstFileEntered;
+public:
+ PrintPPOutputPPCallbacks(Preprocessor &pp, raw_ostream &os, bool lineMarkers,
+ bool defines, bool UseLineDirectives)
+ : PP(pp), SM(PP.getSourceManager()), ConcatInfo(PP), OS(os),
+ DisableLineMarkers(lineMarkers), DumpDefines(defines),
+ UseLineDirectives(UseLineDirectives) {
+ CurLine = 0;
+ CurFilename += "<uninit>";
+ EmittedTokensOnThisLine = false;
+ EmittedDirectiveOnThisLine = false;
+ FileType = SrcMgr::C_User;
+ Initialized = false;
+ IsFirstFileEntered = false;
+ }
+
+ void setEmittedTokensOnThisLine() { EmittedTokensOnThisLine = true; }
+ bool hasEmittedTokensOnThisLine() const { return EmittedTokensOnThisLine; }
+
+ void setEmittedDirectiveOnThisLine() { EmittedDirectiveOnThisLine = true; }
+ bool hasEmittedDirectiveOnThisLine() const {
+ return EmittedDirectiveOnThisLine;
+ }
+
+ bool startNewLineIfNeeded(bool ShouldUpdateCurrentLine = true);
+
+ void FileChanged(SourceLocation Loc, FileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType,
+ FileID PrevFID) override;
+ void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
+ StringRef FileName, bool IsAngled,
+ CharSourceRange FilenameRange, const FileEntry *File,
+ StringRef SearchPath, StringRef RelativePath,
+ const Module *Imported) override;
+ void Ident(SourceLocation Loc, StringRef str) override;
+ void PragmaMessage(SourceLocation Loc, StringRef Namespace,
+ PragmaMessageKind Kind, StringRef Str) override;
+ void PragmaDebug(SourceLocation Loc, StringRef DebugType) override;
+ void PragmaDiagnosticPush(SourceLocation Loc, StringRef Namespace) override;
+ void PragmaDiagnosticPop(SourceLocation Loc, StringRef Namespace) override;
+ void PragmaDiagnostic(SourceLocation Loc, StringRef Namespace,
+ diag::Severity Map, StringRef Str) override;
+ void PragmaWarning(SourceLocation Loc, StringRef WarningSpec,
+ ArrayRef<int> Ids) override;
+ void PragmaWarningPush(SourceLocation Loc, int Level) override;
+ void PragmaWarningPop(SourceLocation Loc) override;
+
+ bool HandleFirstTokOnLine(Token &Tok);
+
+ /// Move to the line of the provided source location. This will
+ /// return true if the output stream required adjustment or if
+ /// the requested location is on the first line.
+ bool MoveToLine(SourceLocation Loc) {
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+ if (PLoc.isInvalid())
+ return false;
+ return MoveToLine(PLoc.getLine()) || (PLoc.getLine() == 1);
+ }
+ bool MoveToLine(unsigned LineNo);
+
+ bool AvoidConcat(const Token &PrevPrevTok, const Token &PrevTok,
+ const Token &Tok) {
+ return ConcatInfo.AvoidConcat(PrevPrevTok, PrevTok, Tok);
+ }
+ void WriteLineInfo(unsigned LineNo, const char *Extra=nullptr,
+ unsigned ExtraLen=0);
+ bool LineMarkersAreDisabled() const { return DisableLineMarkers; }
+ void HandleNewlinesInToken(const char *TokStr, unsigned Len);
+
+ /// MacroDefined - This hook is called whenever a macro definition is seen.
+ void MacroDefined(const Token &MacroNameTok,
+ const MacroDirective *MD) override;
+
+ /// MacroUndefined - This hook is called whenever a macro #undef is seen.
+ void MacroUndefined(const Token &MacroNameTok,
+ const MacroDefinition &MD) override;
+};
+} // end anonymous namespace
+
+void PrintPPOutputPPCallbacks::WriteLineInfo(unsigned LineNo,
+ const char *Extra,
+ unsigned ExtraLen) {
+ startNewLineIfNeeded(/*ShouldUpdateCurrentLine=*/false);
+
+ // Emit #line directives or GNU line markers depending on what mode we're in.
+ if (UseLineDirectives) {
+ OS << "#line" << ' ' << LineNo << ' ' << '"';
+ OS.write_escaped(CurFilename);
+ OS << '"';
+ } else {
+ OS << '#' << ' ' << LineNo << ' ' << '"';
+ OS.write_escaped(CurFilename);
+ OS << '"';
+
+ if (ExtraLen)
+ OS.write(Extra, ExtraLen);
+
+ if (FileType == SrcMgr::C_System)
+ OS.write(" 3", 2);
+ else if (FileType == SrcMgr::C_ExternCSystem)
+ OS.write(" 3 4", 4);
+ }
+ OS << '\n';
+}
+
+/// MoveToLine - Move the output to the source line specified by the location
+/// object. We can do this by emitting some number of \n's, or be emitting a
+/// #line directive. This returns false if already at the specified line, true
+/// if some newlines were emitted.
+bool PrintPPOutputPPCallbacks::MoveToLine(unsigned LineNo) {
+ // If this line is "close enough" to the original line, just print newlines,
+ // otherwise print a #line directive.
+ if (LineNo-CurLine <= 8) {
+ if (LineNo-CurLine == 1)
+ OS << '\n';
+ else if (LineNo == CurLine)
+ return false; // Spelling line moved, but expansion line didn't.
+ else {
+ const char *NewLines = "\n\n\n\n\n\n\n\n";
+ OS.write(NewLines, LineNo-CurLine);
+ }
+ } else if (!DisableLineMarkers) {
+ // Emit a #line or line marker.
+ WriteLineInfo(LineNo, nullptr, 0);
+ } else {
+ // Okay, we're in -P mode, which turns off line markers. However, we still
+ // need to emit a newline between tokens on different lines.
+ startNewLineIfNeeded(/*ShouldUpdateCurrentLine=*/false);
+ }
+
+ CurLine = LineNo;
+ return true;
+}
+
+bool
+PrintPPOutputPPCallbacks::startNewLineIfNeeded(bool ShouldUpdateCurrentLine) {
+ if (EmittedTokensOnThisLine || EmittedDirectiveOnThisLine) {
+ OS << '\n';
+ EmittedTokensOnThisLine = false;
+ EmittedDirectiveOnThisLine = false;
+ if (ShouldUpdateCurrentLine)
+ ++CurLine;
+ return true;
+ }
+
+ return false;
+}
+
+/// FileChanged - Whenever the preprocessor enters or exits a #include file
+/// it invokes this handler. Update our conception of the current source
+/// position.
+void PrintPPOutputPPCallbacks::FileChanged(SourceLocation Loc,
+ FileChangeReason Reason,
+ SrcMgr::CharacteristicKind NewFileType,
+ FileID PrevFID) {
+ // Unless we are exiting a #include, make sure to skip ahead to the line the
+ // #include directive was at.
+ SourceManager &SourceMgr = SM;
+
+ PresumedLoc UserLoc = SourceMgr.getPresumedLoc(Loc);
+ if (UserLoc.isInvalid())
+ return;
+
+ unsigned NewLine = UserLoc.getLine();
+
+ if (Reason == PPCallbacks::EnterFile) {
+ SourceLocation IncludeLoc = UserLoc.getIncludeLoc();
+ if (IncludeLoc.isValid())
+ MoveToLine(IncludeLoc);
+ } else if (Reason == PPCallbacks::SystemHeaderPragma) {
+ // GCC emits the # directive for this directive on the line AFTER the
+ // directive and emits a bunch of spaces that aren't needed. This is because
+ // otherwise we will emit a line marker for THIS line, which requires an
+ // extra blank line after the directive to avoid making all following lines
+ // off by one. We can do better by simply incrementing NewLine here.
+ NewLine += 1;
+ }
+
+ CurLine = NewLine;
+
+ CurFilename.clear();
+ CurFilename += UserLoc.getFilename();
+ FileType = NewFileType;
+
+ if (DisableLineMarkers) {
+ startNewLineIfNeeded(/*ShouldUpdateCurrentLine=*/false);
+ return;
+ }
+
+ if (!Initialized) {
+ WriteLineInfo(CurLine);
+ Initialized = true;
+ }
+
+ // Do not emit an enter marker for the main file (which we expect is the first
+ // entered file). This matches gcc, and improves compatibility with some tools
+ // which track the # line markers as a way to determine when the preprocessed
+ // output is in the context of the main file.
+ if (Reason == PPCallbacks::EnterFile && !IsFirstFileEntered) {
+ IsFirstFileEntered = true;
+ return;
+ }
+
+ switch (Reason) {
+ case PPCallbacks::EnterFile:
+ WriteLineInfo(CurLine, " 1", 2);
+ break;
+ case PPCallbacks::ExitFile:
+ WriteLineInfo(CurLine, " 2", 2);
+ break;
+ case PPCallbacks::SystemHeaderPragma:
+ case PPCallbacks::RenameFile:
+ WriteLineInfo(CurLine);
+ break;
+ }
+}
+
+void PrintPPOutputPPCallbacks::InclusionDirective(SourceLocation HashLoc,
+ const Token &IncludeTok,
+ StringRef FileName,
+ bool IsAngled,
+ CharSourceRange FilenameRange,
+ const FileEntry *File,
+ StringRef SearchPath,
+ StringRef RelativePath,
+ const Module *Imported) {
+ // When preprocessing, turn implicit imports into @imports.
+ // FIXME: This is a stop-gap until a more comprehensive "preprocessing with
+ // modules" solution is introduced.
+ if (Imported) {
+ startNewLineIfNeeded();
+ MoveToLine(HashLoc);
+ OS << "@import " << Imported->getFullModuleName() << ";"
+ << " /* clang -E: implicit import for \"" << File->getName() << "\" */";
+ // Since we want a newline after the @import, but not a #<line>, start a new
+ // line immediately.
+ EmittedTokensOnThisLine = true;
+ startNewLineIfNeeded();
+ }
+}
+
+/// Ident - Handle #ident directives when read by the preprocessor.
+///
+void PrintPPOutputPPCallbacks::Ident(SourceLocation Loc, StringRef S) {
+ MoveToLine(Loc);
+
+ OS.write("#ident ", strlen("#ident "));
+ OS.write(S.begin(), S.size());
+ EmittedTokensOnThisLine = true;
+}
+
+/// MacroDefined - This hook is called whenever a macro definition is seen.
+void PrintPPOutputPPCallbacks::MacroDefined(const Token &MacroNameTok,
+ const MacroDirective *MD) {
+ const MacroInfo *MI = MD->getMacroInfo();
+ // Only print out macro definitions in -dD mode.
+ if (!DumpDefines ||
+ // Ignore __FILE__ etc.
+ MI->isBuiltinMacro()) return;
+
+ MoveToLine(MI->getDefinitionLoc());
+ PrintMacroDefinition(*MacroNameTok.getIdentifierInfo(), *MI, PP, OS);
+ setEmittedDirectiveOnThisLine();
+}
+
+void PrintPPOutputPPCallbacks::MacroUndefined(const Token &MacroNameTok,
+ const MacroDefinition &MD) {
+ // Only print out macro definitions in -dD mode.
+ if (!DumpDefines) return;
+
+ MoveToLine(MacroNameTok.getLocation());
+ OS << "#undef " << MacroNameTok.getIdentifierInfo()->getName();
+ setEmittedDirectiveOnThisLine();
+}
+
+static void outputPrintable(llvm::raw_ostream& OS,
+ const std::string &Str) {
+ for (unsigned i = 0, e = Str.size(); i != e; ++i) {
+ unsigned char Char = Str[i];
+ if (isPrintable(Char) && Char != '\\' && Char != '"')
+ OS << (char)Char;
+ else // Output anything hard as an octal escape.
+ OS << '\\'
+ << (char)('0'+ ((Char >> 6) & 7))
+ << (char)('0'+ ((Char >> 3) & 7))
+ << (char)('0'+ ((Char >> 0) & 7));
+ }
+}
+
+void PrintPPOutputPPCallbacks::PragmaMessage(SourceLocation Loc,
+ StringRef Namespace,
+ PragmaMessageKind Kind,
+ StringRef Str) {
+ startNewLineIfNeeded();
+ MoveToLine(Loc);
+ OS << "#pragma ";
+ if (!Namespace.empty())
+ OS << Namespace << ' ';
+ switch (Kind) {
+ case PMK_Message:
+ OS << "message(\"";
+ break;
+ case PMK_Warning:
+ OS << "warning \"";
+ break;
+ case PMK_Error:
+ OS << "error \"";
+ break;
+ }
+
+ outputPrintable(OS, Str);
+ OS << '"';
+ if (Kind == PMK_Message)
+ OS << ')';
+ setEmittedDirectiveOnThisLine();
+}
+
+void PrintPPOutputPPCallbacks::PragmaDebug(SourceLocation Loc,
+ StringRef DebugType) {
+ startNewLineIfNeeded();
+ MoveToLine(Loc);
+
+ OS << "#pragma clang __debug ";
+ OS << DebugType;
+
+ setEmittedDirectiveOnThisLine();
+}
+
+void PrintPPOutputPPCallbacks::
+PragmaDiagnosticPush(SourceLocation Loc, StringRef Namespace) {
+ startNewLineIfNeeded();
+ MoveToLine(Loc);
+ OS << "#pragma " << Namespace << " diagnostic push";
+ setEmittedDirectiveOnThisLine();
+}
+
+void PrintPPOutputPPCallbacks::
+PragmaDiagnosticPop(SourceLocation Loc, StringRef Namespace) {
+ startNewLineIfNeeded();
+ MoveToLine(Loc);
+ OS << "#pragma " << Namespace << " diagnostic pop";
+ setEmittedDirectiveOnThisLine();
+}
+
+void PrintPPOutputPPCallbacks::PragmaDiagnostic(SourceLocation Loc,
+ StringRef Namespace,
+ diag::Severity Map,
+ StringRef Str) {
+ startNewLineIfNeeded();
+ MoveToLine(Loc);
+ OS << "#pragma " << Namespace << " diagnostic ";
+ switch (Map) {
+ case diag::Severity::Remark:
+ OS << "remark";
+ break;
+ case diag::Severity::Warning:
+ OS << "warning";
+ break;
+ case diag::Severity::Error:
+ OS << "error";
+ break;
+ case diag::Severity::Ignored:
+ OS << "ignored";
+ break;
+ case diag::Severity::Fatal:
+ OS << "fatal";
+ break;
+ }
+ OS << " \"" << Str << '"';
+ setEmittedDirectiveOnThisLine();
+}
+
+void PrintPPOutputPPCallbacks::PragmaWarning(SourceLocation Loc,
+ StringRef WarningSpec,
+ ArrayRef<int> Ids) {
+ startNewLineIfNeeded();
+ MoveToLine(Loc);
+ OS << "#pragma warning(" << WarningSpec << ':';
+ for (ArrayRef<int>::iterator I = Ids.begin(), E = Ids.end(); I != E; ++I)
+ OS << ' ' << *I;
+ OS << ')';
+ setEmittedDirectiveOnThisLine();
+}
+
+void PrintPPOutputPPCallbacks::PragmaWarningPush(SourceLocation Loc,
+ int Level) {
+ startNewLineIfNeeded();
+ MoveToLine(Loc);
+ OS << "#pragma warning(push";
+ if (Level >= 0)
+ OS << ", " << Level;
+ OS << ')';
+ setEmittedDirectiveOnThisLine();
+}
+
+void PrintPPOutputPPCallbacks::PragmaWarningPop(SourceLocation Loc) {
+ startNewLineIfNeeded();
+ MoveToLine(Loc);
+ OS << "#pragma warning(pop)";
+ setEmittedDirectiveOnThisLine();
+}
+
+/// HandleFirstTokOnLine - When emitting a preprocessed file in -E mode, this
+/// is called for the first token on each new line. If this really is the start
+/// of a new logical line, handle it and return true, otherwise return false.
+/// This may not be the start of a logical line because the "start of line"
+/// marker is set for spelling lines, not expansion ones.
+bool PrintPPOutputPPCallbacks::HandleFirstTokOnLine(Token &Tok) {
+ // Figure out what line we went to and insert the appropriate number of
+ // newline characters.
+ if (!MoveToLine(Tok.getLocation()))
+ return false;
+
+ // Print out space characters so that the first token on a line is
+ // indented for easy reading.
+ unsigned ColNo = SM.getExpansionColumnNumber(Tok.getLocation());
+
+ // The first token on a line can have a column number of 1, yet still expect
+ // leading white space, if a macro expansion in column 1 starts with an empty
+ // macro argument, or an empty nested macro expansion. In this case, move the
+ // token to column 2.
+ if (ColNo == 1 && Tok.hasLeadingSpace())
+ ColNo = 2;
+
+ // This hack prevents stuff like:
+ // #define HASH #
+ // HASH define foo bar
+ // From having the # character end up at column 1, which makes it so it
+ // is not handled as a #define next time through the preprocessor if in
+ // -fpreprocessed mode.
+ if (ColNo <= 1 && Tok.is(tok::hash))
+ OS << ' ';
+
+ // Otherwise, indent the appropriate number of spaces.
+ for (; ColNo > 1; --ColNo)
+ OS << ' ';
+
+ return true;
+}
+
+void PrintPPOutputPPCallbacks::HandleNewlinesInToken(const char *TokStr,
+ unsigned Len) {
+ unsigned NumNewlines = 0;
+ for (; Len; --Len, ++TokStr) {
+ if (*TokStr != '\n' &&
+ *TokStr != '\r')
+ continue;
+
+ ++NumNewlines;
+
+ // If we have \n\r or \r\n, skip both and count as one line.
+ if (Len != 1 &&
+ (TokStr[1] == '\n' || TokStr[1] == '\r') &&
+ TokStr[0] != TokStr[1])
+ ++TokStr, --Len;
+ }
+
+ if (NumNewlines == 0) return;
+
+ CurLine += NumNewlines;
+}
+
+
+namespace {
+struct UnknownPragmaHandler : public PragmaHandler {
+ const char *Prefix;
+ PrintPPOutputPPCallbacks *Callbacks;
+
+ // Set to true if tokens should be expanded
+ bool ShouldExpandTokens;
+
+ UnknownPragmaHandler(const char *prefix, PrintPPOutputPPCallbacks *callbacks,
+ bool RequireTokenExpansion)
+ : Prefix(prefix), Callbacks(callbacks),
+ ShouldExpandTokens(RequireTokenExpansion) {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &PragmaTok) override {
+ // Figure out what line we went to and insert the appropriate number of
+ // newline characters.
+ Callbacks->startNewLineIfNeeded();
+ Callbacks->MoveToLine(PragmaTok.getLocation());
+ Callbacks->OS.write(Prefix, strlen(Prefix));
+
+ Token PrevToken;
+ Token PrevPrevToken;
+ PrevToken.startToken();
+ PrevPrevToken.startToken();
+
+ // Read and print all of the pragma tokens.
+ while (PragmaTok.isNot(tok::eod)) {
+ if (PragmaTok.hasLeadingSpace() ||
+ Callbacks->AvoidConcat(PrevPrevToken, PrevToken, PragmaTok))
+ Callbacks->OS << ' ';
+ std::string TokSpell = PP.getSpelling(PragmaTok);
+ Callbacks->OS.write(&TokSpell[0], TokSpell.size());
+
+ PrevPrevToken = PrevToken;
+ PrevToken = PragmaTok;
+
+ if (ShouldExpandTokens)
+ PP.Lex(PragmaTok);
+ else
+ PP.LexUnexpandedToken(PragmaTok);
+ }
+ Callbacks->setEmittedDirectiveOnThisLine();
+ }
+};
+} // end anonymous namespace
+
+
+static void PrintPreprocessedTokens(Preprocessor &PP, Token &Tok,
+ PrintPPOutputPPCallbacks *Callbacks,
+ raw_ostream &OS) {
+ bool DropComments = PP.getLangOpts().TraditionalCPP &&
+ !PP.getCommentRetentionState();
+
+ char Buffer[256];
+ Token PrevPrevTok, PrevTok;
+ PrevPrevTok.startToken();
+ PrevTok.startToken();
+ while (1) {
+ if (Callbacks->hasEmittedDirectiveOnThisLine()) {
+ Callbacks->startNewLineIfNeeded();
+ Callbacks->MoveToLine(Tok.getLocation());
+ }
+
+ // If this token is at the start of a line, emit newlines if needed.
+ if (Tok.isAtStartOfLine() && Callbacks->HandleFirstTokOnLine(Tok)) {
+ // done.
+ } else if (Tok.hasLeadingSpace() ||
+ // If we haven't emitted a token on this line yet, PrevTok isn't
+ // useful to look at and no concatenation could happen anyway.
+ (Callbacks->hasEmittedTokensOnThisLine() &&
+ // Don't print "-" next to "-", it would form "--".
+ Callbacks->AvoidConcat(PrevPrevTok, PrevTok, Tok))) {
+ OS << ' ';
+ }
+
+ if (DropComments && Tok.is(tok::comment)) {
+ // Skip comments. Normally the preprocessor does not generate
+ // tok::comment nodes at all when not keeping comments, but under
+ // -traditional-cpp the lexer keeps /all/ whitespace, including comments.
+ SourceLocation StartLoc = Tok.getLocation();
+ Callbacks->MoveToLine(StartLoc.getLocWithOffset(Tok.getLength()));
+ } else if (Tok.is(tok::annot_module_include) ||
+ Tok.is(tok::annot_module_begin) ||
+ Tok.is(tok::annot_module_end)) {
+ // PrintPPOutputPPCallbacks::InclusionDirective handles producing
+ // appropriate output here. Ignore this token entirely.
+ PP.Lex(Tok);
+ continue;
+ } else if (IdentifierInfo *II = Tok.getIdentifierInfo()) {
+ OS << II->getName();
+ } else if (Tok.isLiteral() && !Tok.needsCleaning() &&
+ Tok.getLiteralData()) {
+ OS.write(Tok.getLiteralData(), Tok.getLength());
+ } else if (Tok.getLength() < 256) {
+ const char *TokPtr = Buffer;
+ unsigned Len = PP.getSpelling(Tok, TokPtr);
+ OS.write(TokPtr, Len);
+
+ // Tokens that can contain embedded newlines need to adjust our current
+ // line number.
+ if (Tok.getKind() == tok::comment || Tok.getKind() == tok::unknown)
+ Callbacks->HandleNewlinesInToken(TokPtr, Len);
+ } else {
+ std::string S = PP.getSpelling(Tok);
+ OS.write(&S[0], S.size());
+
+ // Tokens that can contain embedded newlines need to adjust our current
+ // line number.
+ if (Tok.getKind() == tok::comment || Tok.getKind() == tok::unknown)
+ Callbacks->HandleNewlinesInToken(&S[0], S.size());
+ }
+ Callbacks->setEmittedTokensOnThisLine();
+
+ if (Tok.is(tok::eof)) break;
+
+ PrevPrevTok = PrevTok;
+ PrevTok = Tok;
+ PP.Lex(Tok);
+ }
+}
+
+typedef std::pair<const IdentifierInfo *, MacroInfo *> id_macro_pair;
+static int MacroIDCompare(const id_macro_pair *LHS, const id_macro_pair *RHS) {
+ return LHS->first->getName().compare(RHS->first->getName());
+}
+
+static void DoPrintMacros(Preprocessor &PP, raw_ostream *OS) {
+ // Ignore unknown pragmas.
+ PP.IgnorePragmas();
+
+ // -dM mode just scans and ignores all tokens in the files, then dumps out
+ // the macro table at the end.
+ PP.EnterMainSourceFile();
+
+ Token Tok;
+ do PP.Lex(Tok);
+ while (Tok.isNot(tok::eof));
+
+ SmallVector<id_macro_pair, 128> MacrosByID;
+ for (Preprocessor::macro_iterator I = PP.macro_begin(), E = PP.macro_end();
+ I != E; ++I) {
+ auto *MD = I->second.getLatest();
+ if (MD && MD->isDefined())
+ MacrosByID.push_back(id_macro_pair(I->first, MD->getMacroInfo()));
+ }
+ llvm::array_pod_sort(MacrosByID.begin(), MacrosByID.end(), MacroIDCompare);
+
+ for (unsigned i = 0, e = MacrosByID.size(); i != e; ++i) {
+ MacroInfo &MI = *MacrosByID[i].second;
+ // Ignore computed macros like __LINE__ and friends.
+ if (MI.isBuiltinMacro()) continue;
+
+ PrintMacroDefinition(*MacrosByID[i].first, MI, PP, *OS);
+ *OS << '\n';
+ }
+}
+
+/// DoPrintPreprocessedInput - This implements -E mode.
+///
+void clang::DoPrintPreprocessedInput(Preprocessor &PP, raw_ostream *OS,
+ const PreprocessorOutputOptions &Opts) {
+ // Show macros with no output is handled specially.
+ if (!Opts.ShowCPP) {
+ assert(Opts.ShowMacros && "Not yet implemented!");
+ DoPrintMacros(PP, OS);
+ return;
+ }
+
+ // Inform the preprocessor whether we want it to retain comments or not, due
+ // to -C or -CC.
+ PP.SetCommentRetentionState(Opts.ShowComments, Opts.ShowMacroComments);
+
+ PrintPPOutputPPCallbacks *Callbacks = new PrintPPOutputPPCallbacks(
+ PP, *OS, !Opts.ShowLineMarkers, Opts.ShowMacros, Opts.UseLineDirectives);
+
+ // Expand macros in pragmas with -fms-extensions. The assumption is that
+ // the majority of pragmas in such a file will be Microsoft pragmas.
+ PP.AddPragmaHandler(new UnknownPragmaHandler(
+ "#pragma", Callbacks,
+ /*RequireTokenExpansion=*/PP.getLangOpts().MicrosoftExt));
+ PP.AddPragmaHandler(
+ "GCC", new UnknownPragmaHandler(
+ "#pragma GCC", Callbacks,
+ /*RequireTokenExpansion=*/PP.getLangOpts().MicrosoftExt));
+ PP.AddPragmaHandler(
+ "clang", new UnknownPragmaHandler(
+ "#pragma clang", Callbacks,
+ /*RequireTokenExpansion=*/PP.getLangOpts().MicrosoftExt));
+
+ // The tokens after pragma omp need to be expanded.
+ //
+ // OpenMP [2.1, Directive format]
+ // Preprocessing tokens following the #pragma omp are subject to macro
+ // replacement.
+ PP.AddPragmaHandler("omp",
+ new UnknownPragmaHandler("#pragma omp", Callbacks,
+ /*RequireTokenExpansion=*/true));
+
+ PP.addPPCallbacks(std::unique_ptr<PPCallbacks>(Callbacks));
+
+ // After we have configured the preprocessor, enter the main file.
+ PP.EnterMainSourceFile();
+
+ // Consume all of the tokens that come from the predefines buffer. Those
+ // should not be emitted into the output and are guaranteed to be at the
+ // start.
+ const SourceManager &SourceMgr = PP.getSourceManager();
+ Token Tok;
+ do {
+ PP.Lex(Tok);
+ if (Tok.is(tok::eof) || !Tok.getLocation().isFileID())
+ break;
+
+ PresumedLoc PLoc = SourceMgr.getPresumedLoc(Tok.getLocation());
+ if (PLoc.isInvalid())
+ break;
+
+ if (strcmp(PLoc.getFilename(), "<built-in>"))
+ break;
+ } while (true);
+
+ // Read all the preprocessed tokens, printing them out to the stream.
+ PrintPreprocessedTokens(PP, Tok, Callbacks, *OS);
+ *OS << '\n';
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/Rewrite/FixItRewriter.cpp b/contrib/llvm/tools/clang/lib/Frontend/Rewrite/FixItRewriter.cpp
new file mode 100644
index 0000000..dc787ac
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/Rewrite/FixItRewriter.cpp
@@ -0,0 +1,203 @@
+//===--- FixItRewriter.cpp - Fix-It Rewriter Diagnostic Client --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a diagnostic client adaptor that performs rewrites as
+// suggested by code modification hints attached to diagnostics. It
+// then forwards any diagnostics to the adapted diagnostic client.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/Frontend/FixItRewriter.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Edit/Commit.h"
+#include "clang/Edit/EditsReceiver.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstdio>
+#include <memory>
+
+using namespace clang;
+
+FixItRewriter::FixItRewriter(DiagnosticsEngine &Diags, SourceManager &SourceMgr,
+ const LangOptions &LangOpts,
+ FixItOptions *FixItOpts)
+ : Diags(Diags),
+ Editor(SourceMgr, LangOpts),
+ Rewrite(SourceMgr, LangOpts),
+ FixItOpts(FixItOpts),
+ NumFailures(0),
+ PrevDiagSilenced(false) {
+ Owner = Diags.takeClient();
+ Client = Diags.getClient();
+ Diags.setClient(this, false);
+}
+
+FixItRewriter::~FixItRewriter() {
+ Diags.setClient(Client, Owner.release() != nullptr);
+}
+
+bool FixItRewriter::WriteFixedFile(FileID ID, raw_ostream &OS) {
+ const RewriteBuffer *RewriteBuf = Rewrite.getRewriteBufferFor(ID);
+ if (!RewriteBuf) return true;
+ RewriteBuf->write(OS);
+ OS.flush();
+ return false;
+}
+
+namespace {
+
+class RewritesReceiver : public edit::EditsReceiver {
+ Rewriter &Rewrite;
+
+public:
+ RewritesReceiver(Rewriter &Rewrite) : Rewrite(Rewrite) { }
+
+ void insert(SourceLocation loc, StringRef text) override {
+ Rewrite.InsertText(loc, text);
+ }
+ void replace(CharSourceRange range, StringRef text) override {
+ Rewrite.ReplaceText(range.getBegin(), Rewrite.getRangeSize(range), text);
+ }
+};
+
+}
+
+bool FixItRewriter::WriteFixedFiles(
+ std::vector<std::pair<std::string, std::string> > *RewrittenFiles) {
+ if (NumFailures > 0 && !FixItOpts->FixWhatYouCan) {
+ Diag(FullSourceLoc(), diag::warn_fixit_no_changes);
+ return true;
+ }
+
+ RewritesReceiver Rec(Rewrite);
+ Editor.applyRewrites(Rec);
+
+ if (FixItOpts->InPlace) {
+ // Overwriting open files on Windows is tricky, but the rewriter can do it
+ // for us.
+ Rewrite.overwriteChangedFiles();
+ return false;
+ }
+
+ for (iterator I = buffer_begin(), E = buffer_end(); I != E; ++I) {
+ const FileEntry *Entry = Rewrite.getSourceMgr().getFileEntryForID(I->first);
+ int fd;
+ std::string Filename = FixItOpts->RewriteFilename(Entry->getName(), fd);
+ std::error_code EC;
+ std::unique_ptr<llvm::raw_fd_ostream> OS;
+ if (fd != -1) {
+ OS.reset(new llvm::raw_fd_ostream(fd, /*shouldClose=*/true));
+ } else {
+ OS.reset(new llvm::raw_fd_ostream(Filename, EC, llvm::sys::fs::F_None));
+ }
+ if (EC) {
+ Diags.Report(clang::diag::err_fe_unable_to_open_output) << Filename
+ << EC.message();
+ continue;
+ }
+ RewriteBuffer &RewriteBuf = I->second;
+ RewriteBuf.write(*OS);
+ OS->flush();
+
+ if (RewrittenFiles)
+ RewrittenFiles->push_back(std::make_pair(Entry->getName(), Filename));
+ }
+
+ return false;
+}
+
+bool FixItRewriter::IncludeInDiagnosticCounts() const {
+ return Client ? Client->IncludeInDiagnosticCounts() : true;
+}
+
+void FixItRewriter::HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
+ const Diagnostic &Info) {
+ // Default implementation (Warnings/errors count).
+ DiagnosticConsumer::HandleDiagnostic(DiagLevel, Info);
+
+ if (!FixItOpts->Silent ||
+ DiagLevel >= DiagnosticsEngine::Error ||
+ (DiagLevel == DiagnosticsEngine::Note && !PrevDiagSilenced) ||
+ (DiagLevel > DiagnosticsEngine::Note && Info.getNumFixItHints())) {
+ Client->HandleDiagnostic(DiagLevel, Info);
+ PrevDiagSilenced = false;
+ } else {
+ PrevDiagSilenced = true;
+ }
+
+ // Skip over any diagnostics that are ignored or notes.
+ if (DiagLevel <= DiagnosticsEngine::Note)
+ return;
+ // Skip over errors if we are only fixing warnings.
+ if (DiagLevel >= DiagnosticsEngine::Error && FixItOpts->FixOnlyWarnings) {
+ ++NumFailures;
+ return;
+ }
+
+ // Make sure that we can perform all of the modifications we
+ // in this diagnostic.
+ edit::Commit commit(Editor);
+ for (unsigned Idx = 0, Last = Info.getNumFixItHints();
+ Idx < Last; ++Idx) {
+ const FixItHint &Hint = Info.getFixItHint(Idx);
+
+ if (Hint.CodeToInsert.empty()) {
+ if (Hint.InsertFromRange.isValid())
+ commit.insertFromRange(Hint.RemoveRange.getBegin(),
+ Hint.InsertFromRange, /*afterToken=*/false,
+ Hint.BeforePreviousInsertions);
+ else
+ commit.remove(Hint.RemoveRange);
+ } else {
+ if (Hint.RemoveRange.isTokenRange() ||
+ Hint.RemoveRange.getBegin() != Hint.RemoveRange.getEnd())
+ commit.replace(Hint.RemoveRange, Hint.CodeToInsert);
+ else
+ commit.insert(Hint.RemoveRange.getBegin(), Hint.CodeToInsert,
+ /*afterToken=*/false, Hint.BeforePreviousInsertions);
+ }
+ }
+ bool CanRewrite = Info.getNumFixItHints() > 0 && commit.isCommitable();
+
+ if (!CanRewrite) {
+ if (Info.getNumFixItHints() > 0)
+ Diag(Info.getLocation(), diag::note_fixit_in_macro);
+
+ // If this was an error, refuse to perform any rewriting.
+ if (DiagLevel >= DiagnosticsEngine::Error) {
+ if (++NumFailures == 1)
+ Diag(Info.getLocation(), diag::note_fixit_unfixed_error);
+ }
+ return;
+ }
+
+ if (!Editor.commit(commit)) {
+ ++NumFailures;
+ Diag(Info.getLocation(), diag::note_fixit_failed);
+ return;
+ }
+
+ Diag(Info.getLocation(), diag::note_fixit_applied);
+}
+
+/// \brief Emit a diagnostic via the adapted diagnostic client.
+void FixItRewriter::Diag(SourceLocation Loc, unsigned DiagID) {
+ // When producing this diagnostic, we temporarily bypass ourselves,
+ // clear out any current diagnostic, and let the downstream client
+ // format the diagnostic.
+ Diags.setClient(Client, false);
+ Diags.Clear();
+ Diags.Report(Loc, DiagID);
+ Diags.setClient(this, false);
+}
+
+FixItOptions::~FixItOptions() {}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/Rewrite/FrontendActions.cpp b/contrib/llvm/tools/clang/lib/Frontend/Rewrite/FrontendActions.cpp
new file mode 100644
index 0000000..8cf8adf
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/Rewrite/FrontendActions.cpp
@@ -0,0 +1,197 @@
+//===--- FrontendActions.cpp ----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/Frontend/FrontendActions.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendActions.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/Utils.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Parse/Parser.h"
+#include "clang/Rewrite/Frontend/ASTConsumers.h"
+#include "clang/Rewrite/Frontend/FixItRewriter.h"
+#include "clang/Rewrite/Frontend/Rewriters.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+#include <memory>
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// AST Consumer Actions
+//===----------------------------------------------------------------------===//
+
+std::unique_ptr<ASTConsumer>
+HTMLPrintAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
+ if (raw_ostream *OS = CI.createDefaultOutputFile(false, InFile))
+ return CreateHTMLPrinter(OS, CI.getPreprocessor());
+ return nullptr;
+}
+
+FixItAction::FixItAction() {}
+FixItAction::~FixItAction() {}
+
+std::unique_ptr<ASTConsumer>
+FixItAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
+ return llvm::make_unique<ASTConsumer>();
+}
+
+namespace {
+class FixItRewriteInPlace : public FixItOptions {
+public:
+ FixItRewriteInPlace() { InPlace = true; }
+
+ std::string RewriteFilename(const std::string &Filename, int &fd) override {
+ llvm_unreachable("don't call RewriteFilename for inplace rewrites");
+ }
+};
+
+class FixItActionSuffixInserter : public FixItOptions {
+ std::string NewSuffix;
+
+public:
+ FixItActionSuffixInserter(std::string NewSuffix, bool FixWhatYouCan)
+ : NewSuffix(NewSuffix) {
+ this->FixWhatYouCan = FixWhatYouCan;
+ }
+
+ std::string RewriteFilename(const std::string &Filename, int &fd) override {
+ fd = -1;
+ SmallString<128> Path(Filename);
+ llvm::sys::path::replace_extension(Path,
+ NewSuffix + llvm::sys::path::extension(Path));
+ return Path.str();
+ }
+};
+
+class FixItRewriteToTemp : public FixItOptions {
+public:
+ std::string RewriteFilename(const std::string &Filename, int &fd) override {
+ SmallString<128> Path;
+ llvm::sys::fs::createTemporaryFile(llvm::sys::path::filename(Filename),
+ llvm::sys::path::extension(Filename).drop_front(), fd,
+ Path);
+ return Path.str();
+ }
+};
+} // end anonymous namespace
+
+bool FixItAction::BeginSourceFileAction(CompilerInstance &CI,
+ StringRef Filename) {
+ const FrontendOptions &FEOpts = getCompilerInstance().getFrontendOpts();
+ if (!FEOpts.FixItSuffix.empty()) {
+ FixItOpts.reset(new FixItActionSuffixInserter(FEOpts.FixItSuffix,
+ FEOpts.FixWhatYouCan));
+ } else {
+ FixItOpts.reset(new FixItRewriteInPlace);
+ FixItOpts->FixWhatYouCan = FEOpts.FixWhatYouCan;
+ }
+ Rewriter.reset(new FixItRewriter(CI.getDiagnostics(), CI.getSourceManager(),
+ CI.getLangOpts(), FixItOpts.get()));
+ return true;
+}
+
+void FixItAction::EndSourceFileAction() {
+ // Otherwise rewrite all files.
+ Rewriter->WriteFixedFiles();
+}
+
+bool FixItRecompile::BeginInvocation(CompilerInstance &CI) {
+
+ std::vector<std::pair<std::string, std::string> > RewrittenFiles;
+ bool err = false;
+ {
+ const FrontendOptions &FEOpts = CI.getFrontendOpts();
+ std::unique_ptr<FrontendAction> FixAction(new SyntaxOnlyAction());
+ if (FixAction->BeginSourceFile(CI, FEOpts.Inputs[0])) {
+ std::unique_ptr<FixItOptions> FixItOpts;
+ if (FEOpts.FixToTemporaries)
+ FixItOpts.reset(new FixItRewriteToTemp());
+ else
+ FixItOpts.reset(new FixItRewriteInPlace());
+ FixItOpts->Silent = true;
+ FixItOpts->FixWhatYouCan = FEOpts.FixWhatYouCan;
+ FixItOpts->FixOnlyWarnings = FEOpts.FixOnlyWarnings;
+ FixItRewriter Rewriter(CI.getDiagnostics(), CI.getSourceManager(),
+ CI.getLangOpts(), FixItOpts.get());
+ FixAction->Execute();
+
+ err = Rewriter.WriteFixedFiles(&RewrittenFiles);
+
+ FixAction->EndSourceFile();
+ CI.setSourceManager(nullptr);
+ CI.setFileManager(nullptr);
+ } else {
+ err = true;
+ }
+ }
+ if (err)
+ return false;
+ CI.getDiagnosticClient().clear();
+ CI.getDiagnostics().Reset();
+
+ PreprocessorOptions &PPOpts = CI.getPreprocessorOpts();
+ PPOpts.RemappedFiles.insert(PPOpts.RemappedFiles.end(),
+ RewrittenFiles.begin(), RewrittenFiles.end());
+ PPOpts.RemappedFilesKeepOriginalName = false;
+
+ return true;
+}
+
+#ifdef CLANG_ENABLE_OBJC_REWRITER
+
+std::unique_ptr<ASTConsumer>
+RewriteObjCAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
+ if (raw_ostream *OS = CI.createDefaultOutputFile(false, InFile, "cpp")) {
+ if (CI.getLangOpts().ObjCRuntime.isNonFragile())
+ return CreateModernObjCRewriter(InFile, OS,
+ CI.getDiagnostics(), CI.getLangOpts(),
+ CI.getDiagnosticOpts().NoRewriteMacros,
+ (CI.getCodeGenOpts().getDebugInfo() !=
+ CodeGenOptions::NoDebugInfo));
+ return CreateObjCRewriter(InFile, OS,
+ CI.getDiagnostics(), CI.getLangOpts(),
+ CI.getDiagnosticOpts().NoRewriteMacros);
+ }
+ return nullptr;
+}
+
+#endif
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Actions
+//===----------------------------------------------------------------------===//
+
+void RewriteMacrosAction::ExecuteAction() {
+ CompilerInstance &CI = getCompilerInstance();
+ raw_ostream *OS = CI.createDefaultOutputFile(true, getCurrentFile());
+ if (!OS) return;
+
+ RewriteMacrosInInput(CI.getPreprocessor(), OS);
+}
+
+void RewriteTestAction::ExecuteAction() {
+ CompilerInstance &CI = getCompilerInstance();
+ raw_ostream *OS = CI.createDefaultOutputFile(false, getCurrentFile());
+ if (!OS) return;
+
+ DoRewriteTest(CI.getPreprocessor(), OS);
+}
+
+void RewriteIncludesAction::ExecuteAction() {
+ CompilerInstance &CI = getCompilerInstance();
+ raw_ostream *OS = CI.createDefaultOutputFile(true, getCurrentFile());
+ if (!OS) return;
+
+ RewriteIncludesInInput(CI.getPreprocessor(), OS,
+ CI.getPreprocessorOutputOpts());
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/Rewrite/HTMLPrint.cpp b/contrib/llvm/tools/clang/lib/Frontend/Rewrite/HTMLPrint.cpp
new file mode 100644
index 0000000..22ccfe6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/Rewrite/HTMLPrint.cpp
@@ -0,0 +1,95 @@
+//===--- HTMLPrint.cpp - Source code -> HTML pretty-printing --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Pretty-printing of source code to HTML.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/Frontend/ASTConsumers.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Rewrite/Core/HTMLRewrite.h"
+#include "clang/Rewrite/Core/Rewriter.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Functional HTML pretty-printing.
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class HTMLPrinter : public ASTConsumer {
+ Rewriter R;
+ raw_ostream *Out;
+ Preprocessor &PP;
+ bool SyntaxHighlight, HighlightMacros;
+
+ public:
+ HTMLPrinter(raw_ostream *OS, Preprocessor &pp,
+ bool _SyntaxHighlight, bool _HighlightMacros)
+ : Out(OS), PP(pp), SyntaxHighlight(_SyntaxHighlight),
+ HighlightMacros(_HighlightMacros) {}
+
+ void Initialize(ASTContext &context) override;
+ void HandleTranslationUnit(ASTContext &Ctx) override;
+ };
+}
+
+std::unique_ptr<ASTConsumer> clang::CreateHTMLPrinter(raw_ostream *OS,
+ Preprocessor &PP,
+ bool SyntaxHighlight,
+ bool HighlightMacros) {
+ return llvm::make_unique<HTMLPrinter>(OS, PP, SyntaxHighlight,
+ HighlightMacros);
+}
+
+void HTMLPrinter::Initialize(ASTContext &context) {
+ R.setSourceMgr(context.getSourceManager(), context.getLangOpts());
+}
+
+void HTMLPrinter::HandleTranslationUnit(ASTContext &Ctx) {
+ if (PP.getDiagnostics().hasErrorOccurred())
+ return;
+
+ // Format the file.
+ FileID FID = R.getSourceMgr().getMainFileID();
+ const FileEntry* Entry = R.getSourceMgr().getFileEntryForID(FID);
+ const char* Name;
+ // In some cases, in particular the case where the input is from stdin,
+ // there is no entry. Fall back to the memory buffer for a name in those
+ // cases.
+ if (Entry)
+ Name = Entry->getName();
+ else
+ Name = R.getSourceMgr().getBuffer(FID)->getBufferIdentifier();
+
+ html::AddLineNumbers(R, FID);
+ html::AddHeaderFooterInternalBuiltinCSS(R, FID, Name);
+
+ // If we have a preprocessor, relex the file and syntax highlight.
+ // We might not have a preprocessor if we come from a deserialized AST file,
+ // for example.
+
+ if (SyntaxHighlight) html::SyntaxHighlight(R, FID, PP);
+ if (HighlightMacros) html::HighlightMacros(R, FID, PP);
+ html::EscapeText(R, FID, false, true);
+
+ // Emit the HTML.
+ const RewriteBuffer &RewriteBuf = R.getEditBuffer(FID);
+ char *Buffer = (char*)malloc(RewriteBuf.size());
+ std::copy(RewriteBuf.begin(), RewriteBuf.end(), Buffer);
+ Out->write(Buffer, RewriteBuf.size());
+ free(Buffer);
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp b/contrib/llvm/tools/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp
new file mode 100644
index 0000000..ca82262
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp
@@ -0,0 +1,603 @@
+//===--- InclusionRewriter.cpp - Rewrite includes into their expansions ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This code rewrites include invocations into their expansions. This gives you
+// a file with all included files merged into it.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/Frontend/Rewriters.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Frontend/PreprocessorOutputOptions.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/Pragma.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace llvm;
+
+namespace {
+
+class InclusionRewriter : public PPCallbacks {
+ /// Information about which #includes were actually performed,
+ /// created by preprocessor callbacks.
+ struct IncludedFile {
+ FileID Id;
+ SrcMgr::CharacteristicKind FileType;
+ IncludedFile(FileID Id, SrcMgr::CharacteristicKind FileType)
+ : Id(Id), FileType(FileType) {}
+ };
+ Preprocessor &PP; ///< Used to find inclusion directives.
+ SourceManager &SM; ///< Used to read and manage source files.
+ raw_ostream &OS; ///< The destination stream for rewritten contents.
+ StringRef MainEOL; ///< The line ending marker to use.
+ const llvm::MemoryBuffer *PredefinesBuffer; ///< The preprocessor predefines.
+ bool ShowLineMarkers; ///< Show #line markers.
+ bool UseLineDirectives; ///< Use of line directives or line markers.
+ /// Tracks where inclusions that change the file are found.
+ std::map<unsigned, IncludedFile> FileIncludes;
+ /// Tracks where inclusions that import modules are found.
+ std::map<unsigned, const Module *> ModuleIncludes;
+ /// Used transitively for building up the FileIncludes mapping over the
+ /// various \c PPCallbacks callbacks.
+ SourceLocation LastInclusionLocation;
+public:
+ InclusionRewriter(Preprocessor &PP, raw_ostream &OS, bool ShowLineMarkers,
+ bool UseLineDirectives);
+ bool Process(FileID FileId, SrcMgr::CharacteristicKind FileType);
+ void setPredefinesBuffer(const llvm::MemoryBuffer *Buf) {
+ PredefinesBuffer = Buf;
+ }
+ void detectMainFileEOL();
+private:
+ void FileChanged(SourceLocation Loc, FileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType,
+ FileID PrevFID) override;
+ void FileSkipped(const FileEntry &SkippedFile, const Token &FilenameTok,
+ SrcMgr::CharacteristicKind FileType) override;
+ void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
+ StringRef FileName, bool IsAngled,
+ CharSourceRange FilenameRange, const FileEntry *File,
+ StringRef SearchPath, StringRef RelativePath,
+ const Module *Imported) override;
+ void WriteLineInfo(const char *Filename, int Line,
+ SrcMgr::CharacteristicKind FileType,
+ StringRef Extra = StringRef());
+ void WriteImplicitModuleImport(const Module *Mod);
+ void OutputContentUpTo(const MemoryBuffer &FromFile,
+ unsigned &WriteFrom, unsigned WriteTo,
+ StringRef EOL, int &lines,
+ bool EnsureNewline);
+ void CommentOutDirective(Lexer &DirectivesLex, const Token &StartToken,
+ const MemoryBuffer &FromFile, StringRef EOL,
+ unsigned &NextToWrite, int &Lines);
+ bool HandleHasInclude(FileID FileId, Lexer &RawLex,
+ const DirectoryLookup *Lookup, Token &Tok,
+ bool &FileExists);
+ const IncludedFile *FindIncludeAtLocation(SourceLocation Loc) const;
+ const Module *FindModuleAtLocation(SourceLocation Loc) const;
+ StringRef NextIdentifierName(Lexer &RawLex, Token &RawToken);
+};
+
+} // end anonymous namespace
+
+/// Initializes an InclusionRewriter with a \p PP source and \p OS destination.
+InclusionRewriter::InclusionRewriter(Preprocessor &PP, raw_ostream &OS,
+ bool ShowLineMarkers,
+ bool UseLineDirectives)
+ : PP(PP), SM(PP.getSourceManager()), OS(OS), MainEOL("\n"),
+ PredefinesBuffer(nullptr), ShowLineMarkers(ShowLineMarkers),
+ UseLineDirectives(UseLineDirectives),
+ LastInclusionLocation(SourceLocation()) {}
+
+/// Write appropriate line information as either #line directives or GNU line
+/// markers depending on what mode we're in, including the \p Filename and
+/// \p Line we are located at, using the specified \p EOL line separator, and
+/// any \p Extra context specifiers in GNU line directives.
+void InclusionRewriter::WriteLineInfo(const char *Filename, int Line,
+ SrcMgr::CharacteristicKind FileType,
+ StringRef Extra) {
+ if (!ShowLineMarkers)
+ return;
+ if (UseLineDirectives) {
+ OS << "#line" << ' ' << Line << ' ' << '"';
+ OS.write_escaped(Filename);
+ OS << '"';
+ } else {
+ // Use GNU linemarkers as described here:
+ // http://gcc.gnu.org/onlinedocs/cpp/Preprocessor-Output.html
+ OS << '#' << ' ' << Line << ' ' << '"';
+ OS.write_escaped(Filename);
+ OS << '"';
+ if (!Extra.empty())
+ OS << Extra;
+ if (FileType == SrcMgr::C_System)
+ // "`3' This indicates that the following text comes from a system header
+ // file, so certain warnings should be suppressed."
+ OS << " 3";
+ else if (FileType == SrcMgr::C_ExternCSystem)
+ // as above for `3', plus "`4' This indicates that the following text
+ // should be treated as being wrapped in an implicit extern "C" block."
+ OS << " 3 4";
+ }
+ OS << MainEOL;
+}
+
+void InclusionRewriter::WriteImplicitModuleImport(const Module *Mod) {
+ OS << "@import " << Mod->getFullModuleName() << ";"
+ << " /* clang -frewrite-includes: implicit import */" << MainEOL;
+}
+
+/// FileChanged - Whenever the preprocessor enters or exits a #include file
+/// it invokes this handler.
+void InclusionRewriter::FileChanged(SourceLocation Loc,
+ FileChangeReason Reason,
+ SrcMgr::CharacteristicKind NewFileType,
+ FileID) {
+ if (Reason != EnterFile)
+ return;
+ if (LastInclusionLocation.isInvalid())
+ // we didn't reach this file (eg: the main file) via an inclusion directive
+ return;
+ FileID Id = FullSourceLoc(Loc, SM).getFileID();
+ auto P = FileIncludes.insert(std::make_pair(
+ LastInclusionLocation.getRawEncoding(), IncludedFile(Id, NewFileType)));
+ (void)P;
+ assert(P.second && "Unexpected revisitation of the same include directive");
+ LastInclusionLocation = SourceLocation();
+}
+
+/// Called whenever an inclusion is skipped due to canonical header protection
+/// macros.
+void InclusionRewriter::FileSkipped(const FileEntry &/*SkippedFile*/,
+ const Token &/*FilenameTok*/,
+ SrcMgr::CharacteristicKind /*FileType*/) {
+ assert(LastInclusionLocation.isValid() &&
+ "A file, that wasn't found via an inclusion directive, was skipped");
+ LastInclusionLocation = SourceLocation();
+}
+
+/// This should be called whenever the preprocessor encounters include
+/// directives. It does not say whether the file has been included, but it
+/// provides more information about the directive (hash location instead
+/// of location inside the included file). It is assumed that the matching
+/// FileChanged() or FileSkipped() is called after this.
+void InclusionRewriter::InclusionDirective(SourceLocation HashLoc,
+ const Token &/*IncludeTok*/,
+ StringRef /*FileName*/,
+ bool /*IsAngled*/,
+ CharSourceRange /*FilenameRange*/,
+ const FileEntry * /*File*/,
+ StringRef /*SearchPath*/,
+ StringRef /*RelativePath*/,
+ const Module *Imported) {
+ assert(LastInclusionLocation.isInvalid() &&
+ "Another inclusion directive was found before the previous one "
+ "was processed");
+ if (Imported) {
+ auto P = ModuleIncludes.insert(
+ std::make_pair(HashLoc.getRawEncoding(), Imported));
+ (void)P;
+ assert(P.second && "Unexpected revisitation of the same include directive");
+ } else
+ LastInclusionLocation = HashLoc;
+}
+
+/// Simple lookup for a SourceLocation (specifically one denoting the hash in
+/// an inclusion directive) in the map of inclusion information, FileChanges.
+const InclusionRewriter::IncludedFile *
+InclusionRewriter::FindIncludeAtLocation(SourceLocation Loc) const {
+ const auto I = FileIncludes.find(Loc.getRawEncoding());
+ if (I != FileIncludes.end())
+ return &I->second;
+ return nullptr;
+}
+
+/// Simple lookup for a SourceLocation (specifically one denoting the hash in
+/// an inclusion directive) in the map of module inclusion information.
+const Module *
+InclusionRewriter::FindModuleAtLocation(SourceLocation Loc) const {
+ const auto I = ModuleIncludes.find(Loc.getRawEncoding());
+ if (I != ModuleIncludes.end())
+ return I->second;
+ return nullptr;
+}
+
+/// Detect the likely line ending style of \p FromFile by examining the first
+/// newline found within it.
+static StringRef DetectEOL(const MemoryBuffer &FromFile) {
+ // Detect what line endings the file uses, so that added content does not mix
+ // the style. We need to check for "\r\n" first because "\n\r" will match
+ // "\r\n\r\n".
+ const char *Pos = strchr(FromFile.getBufferStart(), '\n');
+ if (!Pos)
+ return "\n";
+ if (Pos - 1 >= FromFile.getBufferStart() && Pos[-1] == '\r')
+ return "\r\n";
+ if (Pos + 1 < FromFile.getBufferEnd() && Pos[1] == '\r')
+ return "\n\r";
+ return "\n";
+}
+
+void InclusionRewriter::detectMainFileEOL() {
+ bool Invalid;
+ const MemoryBuffer &FromFile = *SM.getBuffer(SM.getMainFileID(), &Invalid);
+ assert(!Invalid);
+ if (Invalid)
+ return; // Should never happen, but whatever.
+ MainEOL = DetectEOL(FromFile);
+}
+
+/// Writes out bytes from \p FromFile, starting at \p NextToWrite and ending at
+/// \p WriteTo - 1.
+void InclusionRewriter::OutputContentUpTo(const MemoryBuffer &FromFile,
+ unsigned &WriteFrom, unsigned WriteTo,
+ StringRef LocalEOL, int &Line,
+ bool EnsureNewline) {
+ if (WriteTo <= WriteFrom)
+ return;
+ if (&FromFile == PredefinesBuffer) {
+ // Ignore the #defines of the predefines buffer.
+ WriteFrom = WriteTo;
+ return;
+ }
+
+ // If we would output half of a line ending, advance one character to output
+ // the whole line ending. All buffers are null terminated, so looking ahead
+ // one byte is safe.
+ if (LocalEOL.size() == 2 &&
+ LocalEOL[0] == (FromFile.getBufferStart() + WriteTo)[-1] &&
+ LocalEOL[1] == (FromFile.getBufferStart() + WriteTo)[0])
+ WriteTo++;
+
+ StringRef TextToWrite(FromFile.getBufferStart() + WriteFrom,
+ WriteTo - WriteFrom);
+
+ if (MainEOL == LocalEOL) {
+ OS << TextToWrite;
+ // count lines manually, it's faster than getPresumedLoc()
+ Line += TextToWrite.count(LocalEOL);
+ if (EnsureNewline && !TextToWrite.endswith(LocalEOL))
+ OS << MainEOL;
+ } else {
+ // Output the file one line at a time, rewriting the line endings as we go.
+ StringRef Rest = TextToWrite;
+ while (!Rest.empty()) {
+ StringRef LineText;
+ std::tie(LineText, Rest) = Rest.split(LocalEOL);
+ OS << LineText;
+ Line++;
+ if (!Rest.empty())
+ OS << MainEOL;
+ }
+ if (TextToWrite.endswith(LocalEOL) || EnsureNewline)
+ OS << MainEOL;
+ }
+ WriteFrom = WriteTo;
+}
+
+/// Print characters from \p FromFile starting at \p NextToWrite up until the
+/// inclusion directive at \p StartToken, then print out the inclusion
+/// inclusion directive disabled by a #if directive, updating \p NextToWrite
+/// and \p Line to track the number of source lines visited and the progress
+/// through the \p FromFile buffer.
+void InclusionRewriter::CommentOutDirective(Lexer &DirectiveLex,
+ const Token &StartToken,
+ const MemoryBuffer &FromFile,
+ StringRef LocalEOL,
+ unsigned &NextToWrite, int &Line) {
+ OutputContentUpTo(FromFile, NextToWrite,
+ SM.getFileOffset(StartToken.getLocation()), LocalEOL, Line,
+ false);
+ Token DirectiveToken;
+ do {
+ DirectiveLex.LexFromRawLexer(DirectiveToken);
+ } while (!DirectiveToken.is(tok::eod) && DirectiveToken.isNot(tok::eof));
+ if (&FromFile == PredefinesBuffer) {
+ // OutputContentUpTo() would not output anything anyway.
+ return;
+ }
+ OS << "#if 0 /* expanded by -frewrite-includes */" << MainEOL;
+ OutputContentUpTo(FromFile, NextToWrite,
+ SM.getFileOffset(DirectiveToken.getLocation()) +
+ DirectiveToken.getLength(),
+ LocalEOL, Line, true);
+ OS << "#endif /* expanded by -frewrite-includes */" << MainEOL;
+}
+
+/// Find the next identifier in the pragma directive specified by \p RawToken.
+StringRef InclusionRewriter::NextIdentifierName(Lexer &RawLex,
+ Token &RawToken) {
+ RawLex.LexFromRawLexer(RawToken);
+ if (RawToken.is(tok::raw_identifier))
+ PP.LookUpIdentifierInfo(RawToken);
+ if (RawToken.is(tok::identifier))
+ return RawToken.getIdentifierInfo()->getName();
+ return StringRef();
+}
+
+// Expand __has_include and __has_include_next if possible. If there's no
+// definitive answer return false.
+bool InclusionRewriter::HandleHasInclude(
+ FileID FileId, Lexer &RawLex, const DirectoryLookup *Lookup, Token &Tok,
+ bool &FileExists) {
+ // Lex the opening paren.
+ RawLex.LexFromRawLexer(Tok);
+ if (Tok.isNot(tok::l_paren))
+ return false;
+
+ RawLex.LexFromRawLexer(Tok);
+
+ SmallString<128> FilenameBuffer;
+ StringRef Filename;
+ // Since the raw lexer doesn't give us angle_literals we have to parse them
+ // ourselves.
+ // FIXME: What to do if the file name is a macro?
+ if (Tok.is(tok::less)) {
+ RawLex.LexFromRawLexer(Tok);
+
+ FilenameBuffer += '<';
+ do {
+ if (Tok.is(tok::eod)) // Sanity check.
+ return false;
+
+ if (Tok.is(tok::raw_identifier))
+ PP.LookUpIdentifierInfo(Tok);
+
+ // Get the string piece.
+ SmallVector<char, 128> TmpBuffer;
+ bool Invalid = false;
+ StringRef TmpName = PP.getSpelling(Tok, TmpBuffer, &Invalid);
+ if (Invalid)
+ return false;
+
+ FilenameBuffer += TmpName;
+
+ RawLex.LexFromRawLexer(Tok);
+ } while (Tok.isNot(tok::greater));
+
+ FilenameBuffer += '>';
+ Filename = FilenameBuffer;
+ } else {
+ if (Tok.isNot(tok::string_literal))
+ return false;
+
+ bool Invalid = false;
+ Filename = PP.getSpelling(Tok, FilenameBuffer, &Invalid);
+ if (Invalid)
+ return false;
+ }
+
+ // Lex the closing paren.
+ RawLex.LexFromRawLexer(Tok);
+ if (Tok.isNot(tok::r_paren))
+ return false;
+
+ // Now ask HeaderInfo if it knows about the header.
+ // FIXME: Subframeworks aren't handled here. Do we care?
+ bool isAngled = PP.GetIncludeFilenameSpelling(Tok.getLocation(), Filename);
+ const DirectoryLookup *CurDir;
+ const FileEntry *FileEnt = PP.getSourceManager().getFileEntryForID(FileId);
+ SmallVector<std::pair<const FileEntry *, const DirectoryEntry *>, 1>
+ Includers;
+ Includers.push_back(std::make_pair(FileEnt, FileEnt->getDir()));
+ // FIXME: Why don't we call PP.LookupFile here?
+ const FileEntry *File = PP.getHeaderSearchInfo().LookupFile(
+ Filename, SourceLocation(), isAngled, nullptr, CurDir, Includers, nullptr,
+ nullptr, nullptr, nullptr, false);
+
+ FileExists = File != nullptr;
+ return true;
+}
+
+/// Use a raw lexer to analyze \p FileId, incrementally copying parts of it
+/// and including content of included files recursively.
+bool InclusionRewriter::Process(FileID FileId,
+ SrcMgr::CharacteristicKind FileType)
+{
+ bool Invalid;
+ const MemoryBuffer &FromFile = *SM.getBuffer(FileId, &Invalid);
+ assert(!Invalid && "Attempting to process invalid inclusion");
+ const char *FileName = FromFile.getBufferIdentifier();
+ Lexer RawLex(FileId, &FromFile, PP.getSourceManager(), PP.getLangOpts());
+ RawLex.SetCommentRetentionState(false);
+
+ StringRef LocalEOL = DetectEOL(FromFile);
+
+ // Per the GNU docs: "1" indicates entering a new file.
+ if (FileId == SM.getMainFileID() || FileId == PP.getPredefinesFileID())
+ WriteLineInfo(FileName, 1, FileType, "");
+ else
+ WriteLineInfo(FileName, 1, FileType, " 1");
+
+ if (SM.getFileIDSize(FileId) == 0)
+ return false;
+
+ // The next byte to be copied from the source file, which may be non-zero if
+ // the lexer handled a BOM.
+ unsigned NextToWrite = SM.getFileOffset(RawLex.getSourceLocation());
+ assert(SM.getLineNumber(FileId, NextToWrite) == 1);
+ int Line = 1; // The current input file line number.
+
+ Token RawToken;
+ RawLex.LexFromRawLexer(RawToken);
+
+ // TODO: Consider adding a switch that strips possibly unimportant content,
+ // such as comments, to reduce the size of repro files.
+ while (RawToken.isNot(tok::eof)) {
+ if (RawToken.is(tok::hash) && RawToken.isAtStartOfLine()) {
+ RawLex.setParsingPreprocessorDirective(true);
+ Token HashToken = RawToken;
+ RawLex.LexFromRawLexer(RawToken);
+ if (RawToken.is(tok::raw_identifier))
+ PP.LookUpIdentifierInfo(RawToken);
+ if (RawToken.getIdentifierInfo() != nullptr) {
+ switch (RawToken.getIdentifierInfo()->getPPKeywordID()) {
+ case tok::pp_include:
+ case tok::pp_include_next:
+ case tok::pp_import: {
+ CommentOutDirective(RawLex, HashToken, FromFile, LocalEOL, NextToWrite,
+ Line);
+ if (FileId != PP.getPredefinesFileID())
+ WriteLineInfo(FileName, Line - 1, FileType, "");
+ StringRef LineInfoExtra;
+ SourceLocation Loc = HashToken.getLocation();
+ if (const Module *Mod = FindModuleAtLocation(Loc))
+ WriteImplicitModuleImport(Mod);
+ else if (const IncludedFile *Inc = FindIncludeAtLocation(Loc)) {
+ // include and recursively process the file
+ if (Process(Inc->Id, Inc->FileType)) {
+ // and set lineinfo back to this file, if the nested one was
+ // actually included
+ // `2' indicates returning to a file (after having included
+ // another file.
+ LineInfoExtra = " 2";
+ }
+ }
+ // fix up lineinfo (since commented out directive changed line
+ // numbers) for inclusions that were skipped due to header guards
+ WriteLineInfo(FileName, Line, FileType, LineInfoExtra);
+ break;
+ }
+ case tok::pp_pragma: {
+ StringRef Identifier = NextIdentifierName(RawLex, RawToken);
+ if (Identifier == "clang" || Identifier == "GCC") {
+ if (NextIdentifierName(RawLex, RawToken) == "system_header") {
+ // keep the directive in, commented out
+ CommentOutDirective(RawLex, HashToken, FromFile, LocalEOL,
+ NextToWrite, Line);
+ // update our own type
+ FileType = SM.getFileCharacteristic(RawToken.getLocation());
+ WriteLineInfo(FileName, Line, FileType);
+ }
+ } else if (Identifier == "once") {
+ // keep the directive in, commented out
+ CommentOutDirective(RawLex, HashToken, FromFile, LocalEOL,
+ NextToWrite, Line);
+ WriteLineInfo(FileName, Line, FileType);
+ }
+ break;
+ }
+ case tok::pp_if:
+ case tok::pp_elif: {
+ bool elif = (RawToken.getIdentifierInfo()->getPPKeywordID() ==
+ tok::pp_elif);
+ // Rewrite special builtin macros to avoid pulling in host details.
+ do {
+ // Walk over the directive.
+ RawLex.LexFromRawLexer(RawToken);
+ if (RawToken.is(tok::raw_identifier))
+ PP.LookUpIdentifierInfo(RawToken);
+
+ if (RawToken.is(tok::identifier)) {
+ bool HasFile;
+ SourceLocation Loc = RawToken.getLocation();
+
+ // Rewrite __has_include(x)
+ if (RawToken.getIdentifierInfo()->isStr("__has_include")) {
+ if (!HandleHasInclude(FileId, RawLex, nullptr, RawToken,
+ HasFile))
+ continue;
+ // Rewrite __has_include_next(x)
+ } else if (RawToken.getIdentifierInfo()->isStr(
+ "__has_include_next")) {
+ const DirectoryLookup *Lookup = PP.GetCurDirLookup();
+ if (Lookup)
+ ++Lookup;
+
+ if (!HandleHasInclude(FileId, RawLex, Lookup, RawToken,
+ HasFile))
+ continue;
+ } else {
+ continue;
+ }
+ // Replace the macro with (0) or (1), followed by the commented
+ // out macro for reference.
+ OutputContentUpTo(FromFile, NextToWrite, SM.getFileOffset(Loc),
+ LocalEOL, Line, false);
+ OS << '(' << (int) HasFile << ")/*";
+ OutputContentUpTo(FromFile, NextToWrite,
+ SM.getFileOffset(RawToken.getLocation()) +
+ RawToken.getLength(),
+ LocalEOL, Line, false);
+ OS << "*/";
+ }
+ } while (RawToken.isNot(tok::eod));
+ if (elif) {
+ OutputContentUpTo(FromFile, NextToWrite,
+ SM.getFileOffset(RawToken.getLocation()) +
+ RawToken.getLength(),
+ LocalEOL, Line, /*EnsureNewline=*/ true);
+ WriteLineInfo(FileName, Line, FileType);
+ }
+ break;
+ }
+ case tok::pp_endif:
+ case tok::pp_else: {
+ // We surround every #include by #if 0 to comment it out, but that
+ // changes line numbers. These are fixed up right after that, but
+ // the whole #include could be inside a preprocessor conditional
+ // that is not processed. So it is necessary to fix the line
+ // numbers one the next line after each #else/#endif as well.
+ RawLex.SetKeepWhitespaceMode(true);
+ do {
+ RawLex.LexFromRawLexer(RawToken);
+ } while (RawToken.isNot(tok::eod) && RawToken.isNot(tok::eof));
+ OutputContentUpTo(FromFile, NextToWrite,
+ SM.getFileOffset(RawToken.getLocation()) +
+ RawToken.getLength(),
+ LocalEOL, Line, /*EnsureNewline=*/ true);
+ WriteLineInfo(FileName, Line, FileType);
+ RawLex.SetKeepWhitespaceMode(false);
+ }
+ default:
+ break;
+ }
+ }
+ RawLex.setParsingPreprocessorDirective(false);
+ }
+ RawLex.LexFromRawLexer(RawToken);
+ }
+ OutputContentUpTo(FromFile, NextToWrite,
+ SM.getFileOffset(SM.getLocForEndOfFile(FileId)), LocalEOL,
+ Line, /*EnsureNewline=*/true);
+ return true;
+}
+
+/// InclusionRewriterInInput - Implement -frewrite-includes mode.
+void clang::RewriteIncludesInInput(Preprocessor &PP, raw_ostream *OS,
+ const PreprocessorOutputOptions &Opts) {
+ SourceManager &SM = PP.getSourceManager();
+ InclusionRewriter *Rewrite = new InclusionRewriter(
+ PP, *OS, Opts.ShowLineMarkers, Opts.UseLineDirectives);
+ Rewrite->detectMainFileEOL();
+
+ PP.addPPCallbacks(std::unique_ptr<PPCallbacks>(Rewrite));
+ PP.IgnorePragmas();
+
+ // First let the preprocessor process the entire file and call callbacks.
+ // Callbacks will record which #include's were actually performed.
+ PP.EnterMainSourceFile();
+ Token Tok;
+ // Only preprocessor directives matter here, so disable macro expansion
+ // everywhere else as an optimization.
+ // TODO: It would be even faster if the preprocessor could be switched
+ // to a mode where it would parse only preprocessor directives and comments,
+ // nothing else matters for parsing or processing.
+ PP.SetMacroExpansionOnlyInDirectives();
+ do {
+ PP.Lex(Tok);
+ } while (Tok.isNot(tok::eof));
+ Rewrite->setPredefinesBuffer(SM.getBuffer(PP.getPredefinesFileID()));
+ Rewrite->Process(PP.getPredefinesFileID(), SrcMgr::C_User);
+ Rewrite->Process(SM.getMainFileID(), SrcMgr::C_User);
+ OS->flush();
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/Rewrite/RewriteMacros.cpp b/contrib/llvm/tools/clang/lib/Frontend/Rewrite/RewriteMacros.cpp
new file mode 100644
index 0000000..0d0a991
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/Rewrite/RewriteMacros.cpp
@@ -0,0 +1,217 @@
+//===--- RewriteMacros.cpp - Rewrite macros into their expansions ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This code rewrites macro invocations into their expansions. This gives you
+// a macro expanded file that retains comments and #includes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/Frontend/Rewriters.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Rewrite/Core/Rewriter.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstdio>
+#include <memory>
+
+using namespace clang;
+
+/// isSameToken - Return true if the two specified tokens start have the same
+/// content.
+static bool isSameToken(Token &RawTok, Token &PPTok) {
+ // If two tokens have the same kind and the same identifier info, they are
+ // obviously the same.
+ if (PPTok.getKind() == RawTok.getKind() &&
+ PPTok.getIdentifierInfo() == RawTok.getIdentifierInfo())
+ return true;
+
+ // Otherwise, if they are different but have the same identifier info, they
+ // are also considered to be the same. This allows keywords and raw lexed
+ // identifiers with the same name to be treated the same.
+ if (PPTok.getIdentifierInfo() &&
+ PPTok.getIdentifierInfo() == RawTok.getIdentifierInfo())
+ return true;
+
+ return false;
+}
+
+
+/// GetNextRawTok - Return the next raw token in the stream, skipping over
+/// comments if ReturnComment is false.
+static const Token &GetNextRawTok(const std::vector<Token> &RawTokens,
+ unsigned &CurTok, bool ReturnComment) {
+ assert(CurTok < RawTokens.size() && "Overran eof!");
+
+ // If the client doesn't want comments and we have one, skip it.
+ if (!ReturnComment && RawTokens[CurTok].is(tok::comment))
+ ++CurTok;
+
+ return RawTokens[CurTok++];
+}
+
+
+/// LexRawTokensFromMainFile - Lets all the raw tokens from the main file into
+/// the specified vector.
+static void LexRawTokensFromMainFile(Preprocessor &PP,
+ std::vector<Token> &RawTokens) {
+ SourceManager &SM = PP.getSourceManager();
+
+ // Create a lexer to lex all the tokens of the main file in raw mode. Even
+ // though it is in raw mode, it will not return comments.
+ const llvm::MemoryBuffer *FromFile = SM.getBuffer(SM.getMainFileID());
+ Lexer RawLex(SM.getMainFileID(), FromFile, SM, PP.getLangOpts());
+
+ // Switch on comment lexing because we really do want them.
+ RawLex.SetCommentRetentionState(true);
+
+ Token RawTok;
+ do {
+ RawLex.LexFromRawLexer(RawTok);
+
+ // If we have an identifier with no identifier info for our raw token, look
+ // up the indentifier info. This is important for equality comparison of
+ // identifier tokens.
+ if (RawTok.is(tok::raw_identifier))
+ PP.LookUpIdentifierInfo(RawTok);
+
+ RawTokens.push_back(RawTok);
+ } while (RawTok.isNot(tok::eof));
+}
+
+
+/// RewriteMacrosInInput - Implement -rewrite-macros mode.
+void clang::RewriteMacrosInInput(Preprocessor &PP, raw_ostream *OS) {
+ SourceManager &SM = PP.getSourceManager();
+
+ Rewriter Rewrite;
+ Rewrite.setSourceMgr(SM, PP.getLangOpts());
+ RewriteBuffer &RB = Rewrite.getEditBuffer(SM.getMainFileID());
+
+ std::vector<Token> RawTokens;
+ LexRawTokensFromMainFile(PP, RawTokens);
+ unsigned CurRawTok = 0;
+ Token RawTok = GetNextRawTok(RawTokens, CurRawTok, false);
+
+
+ // Get the first preprocessing token.
+ PP.EnterMainSourceFile();
+ Token PPTok;
+ PP.Lex(PPTok);
+
+ // Preprocess the input file in parallel with raw lexing the main file. Ignore
+ // all tokens that are preprocessed from a file other than the main file (e.g.
+ // a header). If we see tokens that are in the preprocessed file but not the
+ // lexed file, we have a macro expansion. If we see tokens in the lexed file
+ // that aren't in the preprocessed view, we have macros that expand to no
+ // tokens, or macro arguments etc.
+ while (RawTok.isNot(tok::eof) || PPTok.isNot(tok::eof)) {
+ SourceLocation PPLoc = SM.getExpansionLoc(PPTok.getLocation());
+
+ // If PPTok is from a different source file, ignore it.
+ if (!SM.isWrittenInMainFile(PPLoc)) {
+ PP.Lex(PPTok);
+ continue;
+ }
+
+ // If the raw file hits a preprocessor directive, they will be extra tokens
+ // in the raw file that don't exist in the preprocsesed file. However, we
+ // choose to preserve them in the output file and otherwise handle them
+ // specially.
+ if (RawTok.is(tok::hash) && RawTok.isAtStartOfLine()) {
+ // If this is a #warning directive or #pragma mark (GNU extensions),
+ // comment the line out.
+ if (RawTokens[CurRawTok].is(tok::identifier)) {
+ const IdentifierInfo *II = RawTokens[CurRawTok].getIdentifierInfo();
+ if (II->getName() == "warning") {
+ // Comment out #warning.
+ RB.InsertTextAfter(SM.getFileOffset(RawTok.getLocation()), "//");
+ } else if (II->getName() == "pragma" &&
+ RawTokens[CurRawTok+1].is(tok::identifier) &&
+ (RawTokens[CurRawTok+1].getIdentifierInfo()->getName() ==
+ "mark")) {
+ // Comment out #pragma mark.
+ RB.InsertTextAfter(SM.getFileOffset(RawTok.getLocation()), "//");
+ }
+ }
+
+ // Otherwise, if this is a #include or some other directive, just leave it
+ // in the file by skipping over the line.
+ RawTok = GetNextRawTok(RawTokens, CurRawTok, false);
+ while (!RawTok.isAtStartOfLine() && RawTok.isNot(tok::eof))
+ RawTok = GetNextRawTok(RawTokens, CurRawTok, false);
+ continue;
+ }
+
+ // Okay, both tokens are from the same file. Get their offsets from the
+ // start of the file.
+ unsigned PPOffs = SM.getFileOffset(PPLoc);
+ unsigned RawOffs = SM.getFileOffset(RawTok.getLocation());
+
+ // If the offsets are the same and the token kind is the same, ignore them.
+ if (PPOffs == RawOffs && isSameToken(RawTok, PPTok)) {
+ RawTok = GetNextRawTok(RawTokens, CurRawTok, false);
+ PP.Lex(PPTok);
+ continue;
+ }
+
+ // If the PP token is farther along than the raw token, something was
+ // deleted. Comment out the raw token.
+ if (RawOffs <= PPOffs) {
+ // Comment out a whole run of tokens instead of bracketing each one with
+ // comments. Add a leading space if RawTok didn't have one.
+ bool HasSpace = RawTok.hasLeadingSpace();
+ RB.InsertTextAfter(RawOffs, &" /*"[HasSpace]);
+ unsigned EndPos;
+
+ do {
+ EndPos = RawOffs+RawTok.getLength();
+
+ RawTok = GetNextRawTok(RawTokens, CurRawTok, true);
+ RawOffs = SM.getFileOffset(RawTok.getLocation());
+
+ if (RawTok.is(tok::comment)) {
+ // Skip past the comment.
+ RawTok = GetNextRawTok(RawTokens, CurRawTok, false);
+ break;
+ }
+
+ } while (RawOffs <= PPOffs && !RawTok.isAtStartOfLine() &&
+ (PPOffs != RawOffs || !isSameToken(RawTok, PPTok)));
+
+ RB.InsertTextBefore(EndPos, "*/");
+ continue;
+ }
+
+ // Otherwise, there was a replacement an expansion. Insert the new token
+ // in the output buffer. Insert the whole run of new tokens at once to get
+ // them in the right order.
+ unsigned InsertPos = PPOffs;
+ std::string Expansion;
+ while (PPOffs < RawOffs) {
+ Expansion += ' ' + PP.getSpelling(PPTok);
+ PP.Lex(PPTok);
+ PPLoc = SM.getExpansionLoc(PPTok.getLocation());
+ PPOffs = SM.getFileOffset(PPLoc);
+ }
+ Expansion += ' ';
+ RB.InsertTextBefore(InsertPos, Expansion);
+ }
+
+ // Get the buffer corresponding to MainFileID. If we haven't changed it, then
+ // we are done.
+ if (const RewriteBuffer *RewriteBuf =
+ Rewrite.getRewriteBufferFor(SM.getMainFileID())) {
+ //printf("Changed:\n");
+ *OS << std::string(RewriteBuf->begin(), RewriteBuf->end());
+ } else {
+ fprintf(stderr, "No changes\n");
+ }
+ OS->flush();
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp b/contrib/llvm/tools/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp
new file mode 100644
index 0000000..be68d42
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp
@@ -0,0 +1,7708 @@
+//===--- RewriteObjC.cpp - Playground for the code rewriter ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Hacks and fun related to the code rewriter.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/Frontend/ASTConsumers.h"
+#include "clang/AST/AST.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Rewrite/Core/Rewriter.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include <memory>
+
+#ifdef CLANG_ENABLE_OBJC_REWRITER
+
+using namespace clang;
+using llvm::utostr;
+
+namespace {
+ class RewriteModernObjC : public ASTConsumer {
+ protected:
+
+ enum {
+ BLOCK_FIELD_IS_OBJECT = 3, /* id, NSObject, __attribute__((NSObject)),
+ block, ... */
+ BLOCK_FIELD_IS_BLOCK = 7, /* a block variable */
+ BLOCK_FIELD_IS_BYREF = 8, /* the on stack structure holding the
+ __block variable */
+ BLOCK_FIELD_IS_WEAK = 16, /* declared __weak, only used in byref copy
+ helpers */
+ BLOCK_BYREF_CALLER = 128, /* called from __block (byref) copy/dispose
+ support routines */
+ BLOCK_BYREF_CURRENT_MAX = 256
+ };
+
+ enum {
+ BLOCK_NEEDS_FREE = (1 << 24),
+ BLOCK_HAS_COPY_DISPOSE = (1 << 25),
+ BLOCK_HAS_CXX_OBJ = (1 << 26),
+ BLOCK_IS_GC = (1 << 27),
+ BLOCK_IS_GLOBAL = (1 << 28),
+ BLOCK_HAS_DESCRIPTOR = (1 << 29)
+ };
+
+ Rewriter Rewrite;
+ DiagnosticsEngine &Diags;
+ const LangOptions &LangOpts;
+ ASTContext *Context;
+ SourceManager *SM;
+ TranslationUnitDecl *TUDecl;
+ FileID MainFileID;
+ const char *MainFileStart, *MainFileEnd;
+ Stmt *CurrentBody;
+ ParentMap *PropParentMap; // created lazily.
+ std::string InFileName;
+ raw_ostream* OutFile;
+ std::string Preamble;
+
+ TypeDecl *ProtocolTypeDecl;
+ VarDecl *GlobalVarDecl;
+ Expr *GlobalConstructionExp;
+ unsigned RewriteFailedDiag;
+ unsigned GlobalBlockRewriteFailedDiag;
+ // ObjC string constant support.
+ unsigned NumObjCStringLiterals;
+ VarDecl *ConstantStringClassReference;
+ RecordDecl *NSStringRecord;
+
+ // ObjC foreach break/continue generation support.
+ int BcLabelCount;
+
+ unsigned TryFinallyContainsReturnDiag;
+ // Needed for super.
+ ObjCMethodDecl *CurMethodDef;
+ RecordDecl *SuperStructDecl;
+ RecordDecl *ConstantStringDecl;
+
+ FunctionDecl *MsgSendFunctionDecl;
+ FunctionDecl *MsgSendSuperFunctionDecl;
+ FunctionDecl *MsgSendStretFunctionDecl;
+ FunctionDecl *MsgSendSuperStretFunctionDecl;
+ FunctionDecl *MsgSendFpretFunctionDecl;
+ FunctionDecl *GetClassFunctionDecl;
+ FunctionDecl *GetMetaClassFunctionDecl;
+ FunctionDecl *GetSuperClassFunctionDecl;
+ FunctionDecl *SelGetUidFunctionDecl;
+ FunctionDecl *CFStringFunctionDecl;
+ FunctionDecl *SuperConstructorFunctionDecl;
+ FunctionDecl *CurFunctionDef;
+
+ /* Misc. containers needed for meta-data rewrite. */
+ SmallVector<ObjCImplementationDecl *, 8> ClassImplementation;
+ SmallVector<ObjCCategoryImplDecl *, 8> CategoryImplementation;
+ llvm::SmallPtrSet<ObjCInterfaceDecl*, 8> ObjCSynthesizedStructs;
+ llvm::SmallPtrSet<ObjCProtocolDecl*, 8> ObjCSynthesizedProtocols;
+ llvm::SmallPtrSet<ObjCInterfaceDecl*, 8> ObjCWrittenInterfaces;
+ llvm::SmallPtrSet<TagDecl*, 32> GlobalDefinedTags;
+ SmallVector<ObjCInterfaceDecl*, 32> ObjCInterfacesSeen;
+ /// DefinedNonLazyClasses - List of defined "non-lazy" classes.
+ SmallVector<ObjCInterfaceDecl*, 8> DefinedNonLazyClasses;
+
+ /// DefinedNonLazyCategories - List of defined "non-lazy" categories.
+ SmallVector<ObjCCategoryDecl *, 8> DefinedNonLazyCategories;
+
+ SmallVector<Stmt *, 32> Stmts;
+ SmallVector<int, 8> ObjCBcLabelNo;
+ // Remember all the @protocol(<expr>) expressions.
+ llvm::SmallPtrSet<ObjCProtocolDecl *, 32> ProtocolExprDecls;
+
+ llvm::DenseSet<uint64_t> CopyDestroyCache;
+
+ // Block expressions.
+ SmallVector<BlockExpr *, 32> Blocks;
+ SmallVector<int, 32> InnerDeclRefsCount;
+ SmallVector<DeclRefExpr *, 32> InnerDeclRefs;
+
+ SmallVector<DeclRefExpr *, 32> BlockDeclRefs;
+
+
+ // Block related declarations.
+ SmallVector<ValueDecl *, 8> BlockByCopyDecls;
+ llvm::SmallPtrSet<ValueDecl *, 8> BlockByCopyDeclsPtrSet;
+ SmallVector<ValueDecl *, 8> BlockByRefDecls;
+ llvm::SmallPtrSet<ValueDecl *, 8> BlockByRefDeclsPtrSet;
+ llvm::DenseMap<ValueDecl *, unsigned> BlockByRefDeclNo;
+ llvm::SmallPtrSet<ValueDecl *, 8> ImportedBlockDecls;
+ llvm::SmallPtrSet<VarDecl *, 8> ImportedLocalExternalDecls;
+
+ llvm::DenseMap<BlockExpr *, std::string> RewrittenBlockExprs;
+ llvm::DenseMap<ObjCInterfaceDecl *,
+ llvm::SmallPtrSet<ObjCIvarDecl *, 8> > ReferencedIvars;
+
+ // ivar bitfield grouping containers
+ llvm::DenseSet<const ObjCInterfaceDecl *> ObjCInterefaceHasBitfieldGroups;
+ llvm::DenseMap<const ObjCIvarDecl* , unsigned> IvarGroupNumber;
+ // This container maps an <class, group number for ivar> tuple to the type
+ // of the struct where the bitfield belongs.
+ llvm::DenseMap<std::pair<const ObjCInterfaceDecl*, unsigned>, QualType> GroupRecordType;
+ SmallVector<FunctionDecl*, 32> FunctionDefinitionsSeen;
+
+ // This maps an original source AST to it's rewritten form. This allows
+ // us to avoid rewriting the same node twice (which is very uncommon).
+ // This is needed to support some of the exotic property rewriting.
+ llvm::DenseMap<Stmt *, Stmt *> ReplacedNodes;
+
+ // Needed for header files being rewritten
+ bool IsHeader;
+ bool SilenceRewriteMacroWarning;
+ bool GenerateLineInfo;
+ bool objc_impl_method;
+
+ bool DisableReplaceStmt;
+ class DisableReplaceStmtScope {
+ RewriteModernObjC &R;
+ bool SavedValue;
+
+ public:
+ DisableReplaceStmtScope(RewriteModernObjC &R)
+ : R(R), SavedValue(R.DisableReplaceStmt) {
+ R.DisableReplaceStmt = true;
+ }
+ ~DisableReplaceStmtScope() {
+ R.DisableReplaceStmt = SavedValue;
+ }
+ };
+ void InitializeCommon(ASTContext &context);
+
+ public:
+ llvm::DenseMap<ObjCMethodDecl*, std::string> MethodInternalNames;
+ // Top Level Driver code.
+ bool HandleTopLevelDecl(DeclGroupRef D) override {
+ for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) {
+ if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(*I)) {
+ if (!Class->isThisDeclarationADefinition()) {
+ RewriteForwardClassDecl(D);
+ break;
+ } else {
+ // Keep track of all interface declarations seen.
+ ObjCInterfacesSeen.push_back(Class);
+ break;
+ }
+ }
+
+ if (ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>(*I)) {
+ if (!Proto->isThisDeclarationADefinition()) {
+ RewriteForwardProtocolDecl(D);
+ break;
+ }
+ }
+
+ if (FunctionDecl *FDecl = dyn_cast<FunctionDecl>(*I)) {
+ // Under modern abi, we cannot translate body of the function
+ // yet until all class extensions and its implementation is seen.
+ // This is because they may introduce new bitfields which must go
+ // into their grouping struct.
+ if (FDecl->isThisDeclarationADefinition() &&
+ // Not c functions defined inside an objc container.
+ !FDecl->isTopLevelDeclInObjCContainer()) {
+ FunctionDefinitionsSeen.push_back(FDecl);
+ break;
+ }
+ }
+ HandleTopLevelSingleDecl(*I);
+ }
+ return true;
+ }
+
+ void HandleTopLevelDeclInObjCContainer(DeclGroupRef D) override {
+ for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) {
+ if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(*I)) {
+ if (isTopLevelBlockPointerType(TD->getUnderlyingType()))
+ RewriteBlockPointerDecl(TD);
+ else if (TD->getUnderlyingType()->isFunctionPointerType())
+ CheckFunctionPointerDecl(TD->getUnderlyingType(), TD);
+ else
+ RewriteObjCQualifiedInterfaceTypes(TD);
+ }
+ }
+ return;
+ }
+
+ void HandleTopLevelSingleDecl(Decl *D);
+ void HandleDeclInMainFile(Decl *D);
+ RewriteModernObjC(std::string inFile, raw_ostream *OS,
+ DiagnosticsEngine &D, const LangOptions &LOpts,
+ bool silenceMacroWarn, bool LineInfo);
+
+ ~RewriteModernObjC() override {}
+
+ void HandleTranslationUnit(ASTContext &C) override;
+
+ void ReplaceStmt(Stmt *Old, Stmt *New) {
+ ReplaceStmtWithRange(Old, New, Old->getSourceRange());
+ }
+
+ void ReplaceStmtWithRange(Stmt *Old, Stmt *New, SourceRange SrcRange) {
+ assert(Old != nullptr && New != nullptr && "Expected non-null Stmt's");
+
+ Stmt *ReplacingStmt = ReplacedNodes[Old];
+ if (ReplacingStmt)
+ return; // We can't rewrite the same node twice.
+
+ if (DisableReplaceStmt)
+ return;
+
+ // Measure the old text.
+ int Size = Rewrite.getRangeSize(SrcRange);
+ if (Size == -1) {
+ Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag)
+ << Old->getSourceRange();
+ return;
+ }
+ // Get the new text.
+ std::string SStr;
+ llvm::raw_string_ostream S(SStr);
+ New->printPretty(S, nullptr, PrintingPolicy(LangOpts));
+ const std::string &Str = S.str();
+
+ // If replacement succeeded or warning disabled return with no warning.
+ if (!Rewrite.ReplaceText(SrcRange.getBegin(), Size, Str)) {
+ ReplacedNodes[Old] = New;
+ return;
+ }
+ if (SilenceRewriteMacroWarning)
+ return;
+ Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag)
+ << Old->getSourceRange();
+ }
+
+ void InsertText(SourceLocation Loc, StringRef Str,
+ bool InsertAfter = true) {
+ // If insertion succeeded or warning disabled return with no warning.
+ if (!Rewrite.InsertText(Loc, Str, InsertAfter) ||
+ SilenceRewriteMacroWarning)
+ return;
+
+ Diags.Report(Context->getFullLoc(Loc), RewriteFailedDiag);
+ }
+
+ void ReplaceText(SourceLocation Start, unsigned OrigLength,
+ StringRef Str) {
+ // If removal succeeded or warning disabled return with no warning.
+ if (!Rewrite.ReplaceText(Start, OrigLength, Str) ||
+ SilenceRewriteMacroWarning)
+ return;
+
+ Diags.Report(Context->getFullLoc(Start), RewriteFailedDiag);
+ }
+
+ // Syntactic Rewriting.
+ void RewriteRecordBody(RecordDecl *RD);
+ void RewriteInclude();
+ void RewriteLineDirective(const Decl *D);
+ void ConvertSourceLocationToLineDirective(SourceLocation Loc,
+ std::string &LineString);
+ void RewriteForwardClassDecl(DeclGroupRef D);
+ void RewriteForwardClassDecl(const SmallVectorImpl<Decl *> &DG);
+ void RewriteForwardClassEpilogue(ObjCInterfaceDecl *ClassDecl,
+ const std::string &typedefString);
+ void RewriteImplementations();
+ void RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
+ ObjCImplementationDecl *IMD,
+ ObjCCategoryImplDecl *CID);
+ void RewriteInterfaceDecl(ObjCInterfaceDecl *Dcl);
+ void RewriteImplementationDecl(Decl *Dcl);
+ void RewriteObjCMethodDecl(const ObjCInterfaceDecl *IDecl,
+ ObjCMethodDecl *MDecl, std::string &ResultStr);
+ void RewriteTypeIntoString(QualType T, std::string &ResultStr,
+ const FunctionType *&FPRetType);
+ void RewriteByRefString(std::string &ResultStr, const std::string &Name,
+ ValueDecl *VD, bool def=false);
+ void RewriteCategoryDecl(ObjCCategoryDecl *Dcl);
+ void RewriteProtocolDecl(ObjCProtocolDecl *Dcl);
+ void RewriteForwardProtocolDecl(DeclGroupRef D);
+ void RewriteForwardProtocolDecl(const SmallVectorImpl<Decl *> &DG);
+ void RewriteMethodDeclaration(ObjCMethodDecl *Method);
+ void RewriteProperty(ObjCPropertyDecl *prop);
+ void RewriteFunctionDecl(FunctionDecl *FD);
+ void RewriteBlockPointerType(std::string& Str, QualType Type);
+ void RewriteBlockPointerTypeVariable(std::string& Str, ValueDecl *VD);
+ void RewriteBlockLiteralFunctionDecl(FunctionDecl *FD);
+ void RewriteObjCQualifiedInterfaceTypes(Decl *Dcl);
+ void RewriteTypeOfDecl(VarDecl *VD);
+ void RewriteObjCQualifiedInterfaceTypes(Expr *E);
+
+ std::string getIvarAccessString(ObjCIvarDecl *D);
+
+ // Expression Rewriting.
+ Stmt *RewriteFunctionBodyOrGlobalInitializer(Stmt *S);
+ Stmt *RewriteAtEncode(ObjCEncodeExpr *Exp);
+ Stmt *RewritePropertyOrImplicitGetter(PseudoObjectExpr *Pseudo);
+ Stmt *RewritePropertyOrImplicitSetter(PseudoObjectExpr *Pseudo);
+ Stmt *RewriteAtSelector(ObjCSelectorExpr *Exp);
+ Stmt *RewriteMessageExpr(ObjCMessageExpr *Exp);
+ Stmt *RewriteObjCStringLiteral(ObjCStringLiteral *Exp);
+ Stmt *RewriteObjCBoolLiteralExpr(ObjCBoolLiteralExpr *Exp);
+ Stmt *RewriteObjCBoxedExpr(ObjCBoxedExpr *Exp);
+ Stmt *RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp);
+ Stmt *RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral *Exp);
+ Stmt *RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp);
+ Stmt *RewriteObjCTryStmt(ObjCAtTryStmt *S);
+ Stmt *RewriteObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S);
+ Stmt *RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S);
+ Stmt *RewriteObjCThrowStmt(ObjCAtThrowStmt *S);
+ Stmt *RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
+ SourceLocation OrigEnd);
+ Stmt *RewriteBreakStmt(BreakStmt *S);
+ Stmt *RewriteContinueStmt(ContinueStmt *S);
+ void RewriteCastExpr(CStyleCastExpr *CE);
+ void RewriteImplicitCastObjCExpr(CastExpr *IE);
+ void RewriteLinkageSpec(LinkageSpecDecl *LSD);
+
+ // Computes ivar bitfield group no.
+ unsigned ObjCIvarBitfieldGroupNo(ObjCIvarDecl *IV);
+ // Names field decl. for ivar bitfield group.
+ void ObjCIvarBitfieldGroupDecl(ObjCIvarDecl *IV, std::string &Result);
+ // Names struct type for ivar bitfield group.
+ void ObjCIvarBitfieldGroupType(ObjCIvarDecl *IV, std::string &Result);
+ // Names symbol for ivar bitfield group field offset.
+ void ObjCIvarBitfieldGroupOffset(ObjCIvarDecl *IV, std::string &Result);
+ // Given an ivar bitfield, it builds (or finds) its group record type.
+ QualType GetGroupRecordTypeForObjCIvarBitfield(ObjCIvarDecl *IV);
+ QualType SynthesizeBitfieldGroupStructType(
+ ObjCIvarDecl *IV,
+ SmallVectorImpl<ObjCIvarDecl *> &IVars);
+
+ // Block rewriting.
+ void RewriteBlocksInFunctionProtoType(QualType funcType, NamedDecl *D);
+
+ // Block specific rewrite rules.
+ void RewriteBlockPointerDecl(NamedDecl *VD);
+ void RewriteByRefVar(VarDecl *VD, bool firstDecl, bool lastDecl);
+ Stmt *RewriteBlockDeclRefExpr(DeclRefExpr *VD);
+ Stmt *RewriteLocalVariableExternalStorage(DeclRefExpr *DRE);
+ void RewriteBlockPointerFunctionArgs(FunctionDecl *FD);
+
+ void RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl,
+ std::string &Result);
+
+ void RewriteObjCFieldDecl(FieldDecl *fieldDecl, std::string &Result);
+ bool IsTagDefinedInsideClass(ObjCContainerDecl *IDecl, TagDecl *Tag,
+ bool &IsNamedDefinition);
+ void RewriteLocallyDefinedNamedAggregates(FieldDecl *fieldDecl,
+ std::string &Result);
+
+ bool RewriteObjCFieldDeclType(QualType &Type, std::string &Result);
+
+ void RewriteIvarOffsetSymbols(ObjCInterfaceDecl *CDecl,
+ std::string &Result);
+
+ void Initialize(ASTContext &context) override;
+
+ // Misc. AST transformation routines. Sometimes they end up calling
+ // rewriting routines on the new ASTs.
+ CallExpr *SynthesizeCallToFunctionDecl(FunctionDecl *FD,
+ ArrayRef<Expr *> Args,
+ SourceLocation StartLoc=SourceLocation(),
+ SourceLocation EndLoc=SourceLocation());
+
+ Expr *SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFlavor,
+ QualType returnType,
+ SmallVectorImpl<QualType> &ArgTypes,
+ SmallVectorImpl<Expr*> &MsgExprs,
+ ObjCMethodDecl *Method);
+
+ Stmt *SynthMessageExpr(ObjCMessageExpr *Exp,
+ SourceLocation StartLoc=SourceLocation(),
+ SourceLocation EndLoc=SourceLocation());
+
+ void SynthCountByEnumWithState(std::string &buf);
+ void SynthMsgSendFunctionDecl();
+ void SynthMsgSendSuperFunctionDecl();
+ void SynthMsgSendStretFunctionDecl();
+ void SynthMsgSendFpretFunctionDecl();
+ void SynthMsgSendSuperStretFunctionDecl();
+ void SynthGetClassFunctionDecl();
+ void SynthGetMetaClassFunctionDecl();
+ void SynthGetSuperClassFunctionDecl();
+ void SynthSelGetUidFunctionDecl();
+ void SynthSuperConstructorFunctionDecl();
+
+ // Rewriting metadata
+ template<typename MethodIterator>
+ void RewriteObjCMethodsMetaData(MethodIterator MethodBegin,
+ MethodIterator MethodEnd,
+ bool IsInstanceMethod,
+ StringRef prefix,
+ StringRef ClassName,
+ std::string &Result);
+ void RewriteObjCProtocolMetaData(ObjCProtocolDecl *Protocol,
+ std::string &Result);
+ void RewriteObjCProtocolListMetaData(
+ const ObjCList<ObjCProtocolDecl> &Prots,
+ StringRef prefix, StringRef ClassName, std::string &Result);
+ void RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
+ std::string &Result);
+ void RewriteClassSetupInitHook(std::string &Result);
+
+ void RewriteMetaDataIntoBuffer(std::string &Result);
+ void WriteImageInfo(std::string &Result);
+ void RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *CDecl,
+ std::string &Result);
+ void RewriteCategorySetupInitHook(std::string &Result);
+
+ // Rewriting ivar
+ void RewriteIvarOffsetComputation(ObjCIvarDecl *ivar,
+ std::string &Result);
+ Stmt *RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV);
+
+
+ std::string SynthesizeByrefCopyDestroyHelper(VarDecl *VD, int flag);
+ std::string SynthesizeBlockHelperFuncs(BlockExpr *CE, int i,
+ StringRef funcName, std::string Tag);
+ std::string SynthesizeBlockFunc(BlockExpr *CE, int i,
+ StringRef funcName, std::string Tag);
+ std::string SynthesizeBlockImpl(BlockExpr *CE,
+ std::string Tag, std::string Desc);
+ std::string SynthesizeBlockDescriptor(std::string DescTag,
+ std::string ImplTag,
+ int i, StringRef funcName,
+ unsigned hasCopy);
+ Stmt *SynthesizeBlockCall(CallExpr *Exp, const Expr* BlockExp);
+ void SynthesizeBlockLiterals(SourceLocation FunLocStart,
+ StringRef FunName);
+ FunctionDecl *SynthBlockInitFunctionDecl(StringRef name);
+ Stmt *SynthBlockInitExpr(BlockExpr *Exp,
+ const SmallVectorImpl<DeclRefExpr *> &InnerBlockDeclRefs);
+
+ // Misc. helper routines.
+ QualType getProtocolType();
+ void WarnAboutReturnGotoStmts(Stmt *S);
+ void CheckFunctionPointerDecl(QualType dType, NamedDecl *ND);
+ void InsertBlockLiteralsWithinFunction(FunctionDecl *FD);
+ void InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD);
+
+ bool IsDeclStmtInForeachHeader(DeclStmt *DS);
+ void CollectBlockDeclRefInfo(BlockExpr *Exp);
+ void GetBlockDeclRefExprs(Stmt *S);
+ void GetInnerBlockDeclRefExprs(Stmt *S,
+ SmallVectorImpl<DeclRefExpr *> &InnerBlockDeclRefs,
+ llvm::SmallPtrSetImpl<const DeclContext *> &InnerContexts);
+
+ // We avoid calling Type::isBlockPointerType(), since it operates on the
+ // canonical type. We only care if the top-level type is a closure pointer.
+ bool isTopLevelBlockPointerType(QualType T) {
+ return isa<BlockPointerType>(T);
+ }
+
+ /// convertBlockPointerToFunctionPointer - Converts a block-pointer type
+ /// to a function pointer type and upon success, returns true; false
+ /// otherwise.
+ bool convertBlockPointerToFunctionPointer(QualType &T) {
+ if (isTopLevelBlockPointerType(T)) {
+ const BlockPointerType *BPT = T->getAs<BlockPointerType>();
+ T = Context->getPointerType(BPT->getPointeeType());
+ return true;
+ }
+ return false;
+ }
+
+ bool convertObjCTypeToCStyleType(QualType &T);
+
+ bool needToScanForQualifiers(QualType T);
+ QualType getSuperStructType();
+ QualType getConstantStringStructType();
+ QualType convertFunctionTypeOfBlocks(const FunctionType *FT);
+ bool BufferContainsPPDirectives(const char *startBuf, const char *endBuf);
+
+ void convertToUnqualifiedObjCType(QualType &T) {
+ if (T->isObjCQualifiedIdType()) {
+ bool isConst = T.isConstQualified();
+ T = isConst ? Context->getObjCIdType().withConst()
+ : Context->getObjCIdType();
+ }
+ else if (T->isObjCQualifiedClassType())
+ T = Context->getObjCClassType();
+ else if (T->isObjCObjectPointerType() &&
+ T->getPointeeType()->isObjCQualifiedInterfaceType()) {
+ if (const ObjCObjectPointerType * OBJPT =
+ T->getAsObjCInterfacePointerType()) {
+ const ObjCInterfaceType *IFaceT = OBJPT->getInterfaceType();
+ T = QualType(IFaceT, 0);
+ T = Context->getPointerType(T);
+ }
+ }
+ }
+
+ // FIXME: This predicate seems like it would be useful to add to ASTContext.
+ bool isObjCType(QualType T) {
+ if (!LangOpts.ObjC1 && !LangOpts.ObjC2)
+ return false;
+
+ QualType OCT = Context->getCanonicalType(T).getUnqualifiedType();
+
+ if (OCT == Context->getCanonicalType(Context->getObjCIdType()) ||
+ OCT == Context->getCanonicalType(Context->getObjCClassType()))
+ return true;
+
+ if (const PointerType *PT = OCT->getAs<PointerType>()) {
+ if (isa<ObjCInterfaceType>(PT->getPointeeType()) ||
+ PT->getPointeeType()->isObjCQualifiedIdType())
+ return true;
+ }
+ return false;
+ }
+ bool PointerTypeTakesAnyBlockArguments(QualType QT);
+ bool PointerTypeTakesAnyObjCQualifiedType(QualType QT);
+ void GetExtentOfArgList(const char *Name, const char *&LParen,
+ const char *&RParen);
+
+ void QuoteDoublequotes(std::string &From, std::string &To) {
+ for (unsigned i = 0; i < From.length(); i++) {
+ if (From[i] == '"')
+ To += "\\\"";
+ else
+ To += From[i];
+ }
+ }
+
+ QualType getSimpleFunctionType(QualType result,
+ ArrayRef<QualType> args,
+ bool variadic = false) {
+ if (result == Context->getObjCInstanceType())
+ result = Context->getObjCIdType();
+ FunctionProtoType::ExtProtoInfo fpi;
+ fpi.Variadic = variadic;
+ return Context->getFunctionType(result, args, fpi);
+ }
+
+ // Helper function: create a CStyleCastExpr with trivial type source info.
+ CStyleCastExpr* NoTypeInfoCStyleCastExpr(ASTContext *Ctx, QualType Ty,
+ CastKind Kind, Expr *E) {
+ TypeSourceInfo *TInfo = Ctx->getTrivialTypeSourceInfo(Ty, SourceLocation());
+ return CStyleCastExpr::Create(*Ctx, Ty, VK_RValue, Kind, E, nullptr,
+ TInfo, SourceLocation(), SourceLocation());
+ }
+
+ bool ImplementationIsNonLazy(const ObjCImplDecl *OD) const {
+ IdentifierInfo* II = &Context->Idents.get("load");
+ Selector LoadSel = Context->Selectors.getSelector(0, &II);
+ return OD->getClassMethod(LoadSel) != nullptr;
+ }
+
+ StringLiteral *getStringLiteral(StringRef Str) {
+ QualType StrType = Context->getConstantArrayType(
+ Context->CharTy, llvm::APInt(32, Str.size() + 1), ArrayType::Normal,
+ 0);
+ return StringLiteral::Create(*Context, Str, StringLiteral::Ascii,
+ /*Pascal=*/false, StrType, SourceLocation());
+ }
+ };
+
+}
+
+void RewriteModernObjC::RewriteBlocksInFunctionProtoType(QualType funcType,
+ NamedDecl *D) {
+ if (const FunctionProtoType *fproto
+ = dyn_cast<FunctionProtoType>(funcType.IgnoreParens())) {
+ for (const auto &I : fproto->param_types())
+ if (isTopLevelBlockPointerType(I)) {
+ // All the args are checked/rewritten. Don't call twice!
+ RewriteBlockPointerDecl(D);
+ break;
+ }
+ }
+}
+
+void RewriteModernObjC::CheckFunctionPointerDecl(QualType funcType, NamedDecl *ND) {
+ const PointerType *PT = funcType->getAs<PointerType>();
+ if (PT && PointerTypeTakesAnyBlockArguments(funcType))
+ RewriteBlocksInFunctionProtoType(PT->getPointeeType(), ND);
+}
+
+static bool IsHeaderFile(const std::string &Filename) {
+ std::string::size_type DotPos = Filename.rfind('.');
+
+ if (DotPos == std::string::npos) {
+ // no file extension
+ return false;
+ }
+
+ std::string Ext = std::string(Filename.begin()+DotPos+1, Filename.end());
+ // C header: .h
+ // C++ header: .hh or .H;
+ return Ext == "h" || Ext == "hh" || Ext == "H";
+}
+
+RewriteModernObjC::RewriteModernObjC(std::string inFile, raw_ostream* OS,
+ DiagnosticsEngine &D, const LangOptions &LOpts,
+ bool silenceMacroWarn,
+ bool LineInfo)
+ : Diags(D), LangOpts(LOpts), InFileName(inFile), OutFile(OS),
+ SilenceRewriteMacroWarning(silenceMacroWarn), GenerateLineInfo(LineInfo) {
+ IsHeader = IsHeaderFile(inFile);
+ RewriteFailedDiag = Diags.getCustomDiagID(DiagnosticsEngine::Warning,
+ "rewriting sub-expression within a macro (may not be correct)");
+ // FIXME. This should be an error. But if block is not called, it is OK. And it
+ // may break including some headers.
+ GlobalBlockRewriteFailedDiag = Diags.getCustomDiagID(DiagnosticsEngine::Warning,
+ "rewriting block literal declared in global scope is not implemented");
+
+ TryFinallyContainsReturnDiag = Diags.getCustomDiagID(
+ DiagnosticsEngine::Warning,
+ "rewriter doesn't support user-specified control flow semantics "
+ "for @try/@finally (code may not execute properly)");
+}
+
+std::unique_ptr<ASTConsumer> clang::CreateModernObjCRewriter(
+ const std::string &InFile, raw_ostream *OS, DiagnosticsEngine &Diags,
+ const LangOptions &LOpts, bool SilenceRewriteMacroWarning, bool LineInfo) {
+ return llvm::make_unique<RewriteModernObjC>(
+ InFile, OS, Diags, LOpts, SilenceRewriteMacroWarning, LineInfo);
+}
+
+void RewriteModernObjC::InitializeCommon(ASTContext &context) {
+ Context = &context;
+ SM = &Context->getSourceManager();
+ TUDecl = Context->getTranslationUnitDecl();
+ MsgSendFunctionDecl = nullptr;
+ MsgSendSuperFunctionDecl = nullptr;
+ MsgSendStretFunctionDecl = nullptr;
+ MsgSendSuperStretFunctionDecl = nullptr;
+ MsgSendFpretFunctionDecl = nullptr;
+ GetClassFunctionDecl = nullptr;
+ GetMetaClassFunctionDecl = nullptr;
+ GetSuperClassFunctionDecl = nullptr;
+ SelGetUidFunctionDecl = nullptr;
+ CFStringFunctionDecl = nullptr;
+ ConstantStringClassReference = nullptr;
+ NSStringRecord = nullptr;
+ CurMethodDef = nullptr;
+ CurFunctionDef = nullptr;
+ GlobalVarDecl = nullptr;
+ GlobalConstructionExp = nullptr;
+ SuperStructDecl = nullptr;
+ ProtocolTypeDecl = nullptr;
+ ConstantStringDecl = nullptr;
+ BcLabelCount = 0;
+ SuperConstructorFunctionDecl = nullptr;
+ NumObjCStringLiterals = 0;
+ PropParentMap = nullptr;
+ CurrentBody = nullptr;
+ DisableReplaceStmt = false;
+ objc_impl_method = false;
+
+ // Get the ID and start/end of the main file.
+ MainFileID = SM->getMainFileID();
+ const llvm::MemoryBuffer *MainBuf = SM->getBuffer(MainFileID);
+ MainFileStart = MainBuf->getBufferStart();
+ MainFileEnd = MainBuf->getBufferEnd();
+
+ Rewrite.setSourceMgr(Context->getSourceManager(), Context->getLangOpts());
+}
+
+//===----------------------------------------------------------------------===//
+// Top Level Driver Code
+//===----------------------------------------------------------------------===//
+
+void RewriteModernObjC::HandleTopLevelSingleDecl(Decl *D) {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ // Two cases: either the decl could be in the main file, or it could be in a
+ // #included file. If the former, rewrite it now. If the later, check to see
+ // if we rewrote the #include/#import.
+ SourceLocation Loc = D->getLocation();
+ Loc = SM->getExpansionLoc(Loc);
+
+ // If this is for a builtin, ignore it.
+ if (Loc.isInvalid()) return;
+
+ // Look for built-in declarations that we need to refer during the rewrite.
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ RewriteFunctionDecl(FD);
+ } else if (VarDecl *FVD = dyn_cast<VarDecl>(D)) {
+ // declared in <Foundation/NSString.h>
+ if (FVD->getName() == "_NSConstantStringClassReference") {
+ ConstantStringClassReference = FVD;
+ return;
+ }
+ } else if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(D)) {
+ RewriteCategoryDecl(CD);
+ } else if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(D)) {
+ if (PD->isThisDeclarationADefinition())
+ RewriteProtocolDecl(PD);
+ } else if (LinkageSpecDecl *LSD = dyn_cast<LinkageSpecDecl>(D)) {
+ // FIXME. This will not work in all situations and leaving it out
+ // is harmless.
+ // RewriteLinkageSpec(LSD);
+
+ // Recurse into linkage specifications
+ for (DeclContext::decl_iterator DI = LSD->decls_begin(),
+ DIEnd = LSD->decls_end();
+ DI != DIEnd; ) {
+ if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>((*DI))) {
+ if (!IFace->isThisDeclarationADefinition()) {
+ SmallVector<Decl *, 8> DG;
+ SourceLocation StartLoc = IFace->getLocStart();
+ do {
+ if (isa<ObjCInterfaceDecl>(*DI) &&
+ !cast<ObjCInterfaceDecl>(*DI)->isThisDeclarationADefinition() &&
+ StartLoc == (*DI)->getLocStart())
+ DG.push_back(*DI);
+ else
+ break;
+
+ ++DI;
+ } while (DI != DIEnd);
+ RewriteForwardClassDecl(DG);
+ continue;
+ }
+ else {
+ // Keep track of all interface declarations seen.
+ ObjCInterfacesSeen.push_back(IFace);
+ ++DI;
+ continue;
+ }
+ }
+
+ if (ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>((*DI))) {
+ if (!Proto->isThisDeclarationADefinition()) {
+ SmallVector<Decl *, 8> DG;
+ SourceLocation StartLoc = Proto->getLocStart();
+ do {
+ if (isa<ObjCProtocolDecl>(*DI) &&
+ !cast<ObjCProtocolDecl>(*DI)->isThisDeclarationADefinition() &&
+ StartLoc == (*DI)->getLocStart())
+ DG.push_back(*DI);
+ else
+ break;
+
+ ++DI;
+ } while (DI != DIEnd);
+ RewriteForwardProtocolDecl(DG);
+ continue;
+ }
+ }
+
+ HandleTopLevelSingleDecl(*DI);
+ ++DI;
+ }
+ }
+ // If we have a decl in the main file, see if we should rewrite it.
+ if (SM->isWrittenInMainFile(Loc))
+ return HandleDeclInMainFile(D);
+}
+
+//===----------------------------------------------------------------------===//
+// Syntactic (non-AST) Rewriting Code
+//===----------------------------------------------------------------------===//
+
+void RewriteModernObjC::RewriteInclude() {
+ SourceLocation LocStart = SM->getLocForStartOfFile(MainFileID);
+ StringRef MainBuf = SM->getBufferData(MainFileID);
+ const char *MainBufStart = MainBuf.begin();
+ const char *MainBufEnd = MainBuf.end();
+ size_t ImportLen = strlen("import");
+
+ // Loop over the whole file, looking for includes.
+ for (const char *BufPtr = MainBufStart; BufPtr < MainBufEnd; ++BufPtr) {
+ if (*BufPtr == '#') {
+ if (++BufPtr == MainBufEnd)
+ return;
+ while (*BufPtr == ' ' || *BufPtr == '\t')
+ if (++BufPtr == MainBufEnd)
+ return;
+ if (!strncmp(BufPtr, "import", ImportLen)) {
+ // replace import with include
+ SourceLocation ImportLoc =
+ LocStart.getLocWithOffset(BufPtr-MainBufStart);
+ ReplaceText(ImportLoc, ImportLen, "include");
+ BufPtr += ImportLen;
+ }
+ }
+ }
+}
+
+static void WriteInternalIvarName(const ObjCInterfaceDecl *IDecl,
+ ObjCIvarDecl *IvarDecl, std::string &Result) {
+ Result += "OBJC_IVAR_$_";
+ Result += IDecl->getName();
+ Result += "$";
+ Result += IvarDecl->getName();
+}
+
+std::string
+RewriteModernObjC::getIvarAccessString(ObjCIvarDecl *D) {
+ const ObjCInterfaceDecl *ClassDecl = D->getContainingInterface();
+
+ // Build name of symbol holding ivar offset.
+ std::string IvarOffsetName;
+ if (D->isBitField())
+ ObjCIvarBitfieldGroupOffset(D, IvarOffsetName);
+ else
+ WriteInternalIvarName(ClassDecl, D, IvarOffsetName);
+
+
+ std::string S = "(*(";
+ QualType IvarT = D->getType();
+ if (D->isBitField())
+ IvarT = GetGroupRecordTypeForObjCIvarBitfield(D);
+
+ if (!isa<TypedefType>(IvarT) && IvarT->isRecordType()) {
+ RecordDecl *RD = IvarT->getAs<RecordType>()->getDecl();
+ RD = RD->getDefinition();
+ if (RD && !RD->getDeclName().getAsIdentifierInfo()) {
+ // decltype(((Foo_IMPL*)0)->bar) *
+ ObjCContainerDecl *CDecl =
+ dyn_cast<ObjCContainerDecl>(D->getDeclContext());
+ // ivar in class extensions requires special treatment.
+ if (ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(CDecl))
+ CDecl = CatDecl->getClassInterface();
+ std::string RecName = CDecl->getName();
+ RecName += "_IMPL";
+ RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get(RecName.c_str()));
+ QualType PtrStructIMPL = Context->getPointerType(Context->getTagDeclType(RD));
+ unsigned UnsignedIntSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->UnsignedIntTy));
+ Expr *Zero = IntegerLiteral::Create(*Context,
+ llvm::APInt(UnsignedIntSize, 0),
+ Context->UnsignedIntTy, SourceLocation());
+ Zero = NoTypeInfoCStyleCastExpr(Context, PtrStructIMPL, CK_BitCast, Zero);
+ ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
+ Zero);
+ FieldDecl *FD = FieldDecl::Create(*Context, nullptr, SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get(D->getNameAsString()),
+ IvarT, nullptr,
+ /*BitWidth=*/nullptr, /*Mutable=*/true,
+ ICIS_NoInit);
+ MemberExpr *ME = new (Context)
+ MemberExpr(PE, true, SourceLocation(), FD, SourceLocation(),
+ FD->getType(), VK_LValue, OK_Ordinary);
+ IvarT = Context->getDecltypeType(ME, ME->getType());
+ }
+ }
+ convertObjCTypeToCStyleType(IvarT);
+ QualType castT = Context->getPointerType(IvarT);
+ std::string TypeString(castT.getAsString(Context->getPrintingPolicy()));
+ S += TypeString;
+ S += ")";
+
+ // ((char *)self + IVAR_OFFSET_SYMBOL_NAME)
+ S += "((char *)self + ";
+ S += IvarOffsetName;
+ S += "))";
+ if (D->isBitField()) {
+ S += ".";
+ S += D->getNameAsString();
+ }
+ ReferencedIvars[const_cast<ObjCInterfaceDecl *>(ClassDecl)].insert(D);
+ return S;
+}
+
+/// mustSynthesizeSetterGetterMethod - returns true if setter or getter has not
+/// been found in the class implementation. In this case, it must be synthesized.
+static bool mustSynthesizeSetterGetterMethod(ObjCImplementationDecl *IMP,
+ ObjCPropertyDecl *PD,
+ bool getter) {
+ return getter ? !IMP->getInstanceMethod(PD->getGetterName())
+ : !IMP->getInstanceMethod(PD->getSetterName());
+
+}
+
+void RewriteModernObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
+ ObjCImplementationDecl *IMD,
+ ObjCCategoryImplDecl *CID) {
+ static bool objcGetPropertyDefined = false;
+ static bool objcSetPropertyDefined = false;
+ SourceLocation startGetterSetterLoc;
+
+ if (PID->getLocStart().isValid()) {
+ SourceLocation startLoc = PID->getLocStart();
+ InsertText(startLoc, "// ");
+ const char *startBuf = SM->getCharacterData(startLoc);
+ assert((*startBuf == '@') && "bogus @synthesize location");
+ const char *semiBuf = strchr(startBuf, ';');
+ assert((*semiBuf == ';') && "@synthesize: can't find ';'");
+ startGetterSetterLoc = startLoc.getLocWithOffset(semiBuf-startBuf+1);
+ }
+ else
+ startGetterSetterLoc = IMD ? IMD->getLocEnd() : CID->getLocEnd();
+
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+ return; // FIXME: is this correct?
+
+ // Generate the 'getter' function.
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ ObjCIvarDecl *OID = PID->getPropertyIvarDecl();
+ assert(IMD && OID && "Synthesized ivars must be attached to @implementation");
+
+ unsigned Attributes = PD->getPropertyAttributes();
+ if (mustSynthesizeSetterGetterMethod(IMD, PD, true /*getter*/)) {
+ bool GenGetProperty = !(Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) &&
+ (Attributes & (ObjCPropertyDecl::OBJC_PR_retain |
+ ObjCPropertyDecl::OBJC_PR_copy));
+ std::string Getr;
+ if (GenGetProperty && !objcGetPropertyDefined) {
+ objcGetPropertyDefined = true;
+ // FIXME. Is this attribute correct in all cases?
+ Getr = "\nextern \"C\" __declspec(dllimport) "
+ "id objc_getProperty(id, SEL, long, bool);\n";
+ }
+ RewriteObjCMethodDecl(OID->getContainingInterface(),
+ PD->getGetterMethodDecl(), Getr);
+ Getr += "{ ";
+ // Synthesize an explicit cast to gain access to the ivar.
+ // See objc-act.c:objc_synthesize_new_getter() for details.
+ if (GenGetProperty) {
+ // return objc_getProperty(self, _cmd, offsetof(ClassDecl, OID), 1)
+ Getr += "typedef ";
+ const FunctionType *FPRetType = nullptr;
+ RewriteTypeIntoString(PD->getGetterMethodDecl()->getReturnType(), Getr,
+ FPRetType);
+ Getr += " _TYPE";
+ if (FPRetType) {
+ Getr += ")"; // close the precedence "scope" for "*".
+
+ // Now, emit the argument types (if any).
+ if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FPRetType)){
+ Getr += "(";
+ for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
+ if (i) Getr += ", ";
+ std::string ParamStr =
+ FT->getParamType(i).getAsString(Context->getPrintingPolicy());
+ Getr += ParamStr;
+ }
+ if (FT->isVariadic()) {
+ if (FT->getNumParams())
+ Getr += ", ";
+ Getr += "...";
+ }
+ Getr += ")";
+ } else
+ Getr += "()";
+ }
+ Getr += ";\n";
+ Getr += "return (_TYPE)";
+ Getr += "objc_getProperty(self, _cmd, ";
+ RewriteIvarOffsetComputation(OID, Getr);
+ Getr += ", 1)";
+ }
+ else
+ Getr += "return " + getIvarAccessString(OID);
+ Getr += "; }";
+ InsertText(startGetterSetterLoc, Getr);
+ }
+
+ if (PD->isReadOnly() ||
+ !mustSynthesizeSetterGetterMethod(IMD, PD, false /*setter*/))
+ return;
+
+ // Generate the 'setter' function.
+ std::string Setr;
+ bool GenSetProperty = Attributes & (ObjCPropertyDecl::OBJC_PR_retain |
+ ObjCPropertyDecl::OBJC_PR_copy);
+ if (GenSetProperty && !objcSetPropertyDefined) {
+ objcSetPropertyDefined = true;
+ // FIXME. Is this attribute correct in all cases?
+ Setr = "\nextern \"C\" __declspec(dllimport) "
+ "void objc_setProperty (id, SEL, long, id, bool, bool);\n";
+ }
+
+ RewriteObjCMethodDecl(OID->getContainingInterface(),
+ PD->getSetterMethodDecl(), Setr);
+ Setr += "{ ";
+ // Synthesize an explicit cast to initialize the ivar.
+ // See objc-act.c:objc_synthesize_new_setter() for details.
+ if (GenSetProperty) {
+ Setr += "objc_setProperty (self, _cmd, ";
+ RewriteIvarOffsetComputation(OID, Setr);
+ Setr += ", (id)";
+ Setr += PD->getName();
+ Setr += ", ";
+ if (Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic)
+ Setr += "0, ";
+ else
+ Setr += "1, ";
+ if (Attributes & ObjCPropertyDecl::OBJC_PR_copy)
+ Setr += "1)";
+ else
+ Setr += "0)";
+ }
+ else {
+ Setr += getIvarAccessString(OID) + " = ";
+ Setr += PD->getName();
+ }
+ Setr += "; }\n";
+ InsertText(startGetterSetterLoc, Setr);
+}
+
+static void RewriteOneForwardClassDecl(ObjCInterfaceDecl *ForwardDecl,
+ std::string &typedefString) {
+ typedefString += "\n#ifndef _REWRITER_typedef_";
+ typedefString += ForwardDecl->getNameAsString();
+ typedefString += "\n";
+ typedefString += "#define _REWRITER_typedef_";
+ typedefString += ForwardDecl->getNameAsString();
+ typedefString += "\n";
+ typedefString += "typedef struct objc_object ";
+ typedefString += ForwardDecl->getNameAsString();
+ // typedef struct { } _objc_exc_Classname;
+ typedefString += ";\ntypedef struct {} _objc_exc_";
+ typedefString += ForwardDecl->getNameAsString();
+ typedefString += ";\n#endif\n";
+}
+
+void RewriteModernObjC::RewriteForwardClassEpilogue(ObjCInterfaceDecl *ClassDecl,
+ const std::string &typedefString) {
+ SourceLocation startLoc = ClassDecl->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+ const char *semiPtr = strchr(startBuf, ';');
+ // Replace the @class with typedefs corresponding to the classes.
+ ReplaceText(startLoc, semiPtr-startBuf+1, typedefString);
+}
+
+void RewriteModernObjC::RewriteForwardClassDecl(DeclGroupRef D) {
+ std::string typedefString;
+ for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) {
+ if (ObjCInterfaceDecl *ForwardDecl = dyn_cast<ObjCInterfaceDecl>(*I)) {
+ if (I == D.begin()) {
+ // Translate to typedef's that forward reference structs with the same name
+ // as the class. As a convenience, we include the original declaration
+ // as a comment.
+ typedefString += "// @class ";
+ typedefString += ForwardDecl->getNameAsString();
+ typedefString += ";";
+ }
+ RewriteOneForwardClassDecl(ForwardDecl, typedefString);
+ }
+ else
+ HandleTopLevelSingleDecl(*I);
+ }
+ DeclGroupRef::iterator I = D.begin();
+ RewriteForwardClassEpilogue(cast<ObjCInterfaceDecl>(*I), typedefString);
+}
+
+void RewriteModernObjC::RewriteForwardClassDecl(
+ const SmallVectorImpl<Decl *> &D) {
+ std::string typedefString;
+ for (unsigned i = 0; i < D.size(); i++) {
+ ObjCInterfaceDecl *ForwardDecl = cast<ObjCInterfaceDecl>(D[i]);
+ if (i == 0) {
+ typedefString += "// @class ";
+ typedefString += ForwardDecl->getNameAsString();
+ typedefString += ";";
+ }
+ RewriteOneForwardClassDecl(ForwardDecl, typedefString);
+ }
+ RewriteForwardClassEpilogue(cast<ObjCInterfaceDecl>(D[0]), typedefString);
+}
+
+void RewriteModernObjC::RewriteMethodDeclaration(ObjCMethodDecl *Method) {
+ // When method is a synthesized one, such as a getter/setter there is
+ // nothing to rewrite.
+ if (Method->isImplicit())
+ return;
+ SourceLocation LocStart = Method->getLocStart();
+ SourceLocation LocEnd = Method->getLocEnd();
+
+ if (SM->getExpansionLineNumber(LocEnd) >
+ SM->getExpansionLineNumber(LocStart)) {
+ InsertText(LocStart, "#if 0\n");
+ ReplaceText(LocEnd, 1, ";\n#endif\n");
+ } else {
+ InsertText(LocStart, "// ");
+ }
+}
+
+void RewriteModernObjC::RewriteProperty(ObjCPropertyDecl *prop) {
+ SourceLocation Loc = prop->getAtLoc();
+
+ ReplaceText(Loc, 0, "// ");
+ // FIXME: handle properties that are declared across multiple lines.
+}
+
+void RewriteModernObjC::RewriteCategoryDecl(ObjCCategoryDecl *CatDecl) {
+ SourceLocation LocStart = CatDecl->getLocStart();
+
+ // FIXME: handle category headers that are declared across multiple lines.
+ if (CatDecl->getIvarRBraceLoc().isValid()) {
+ ReplaceText(LocStart, 1, "/** ");
+ ReplaceText(CatDecl->getIvarRBraceLoc(), 1, "**/ ");
+ }
+ else {
+ ReplaceText(LocStart, 0, "// ");
+ }
+
+ for (auto *I : CatDecl->properties())
+ RewriteProperty(I);
+
+ for (auto *I : CatDecl->instance_methods())
+ RewriteMethodDeclaration(I);
+ for (auto *I : CatDecl->class_methods())
+ RewriteMethodDeclaration(I);
+
+ // Lastly, comment out the @end.
+ ReplaceText(CatDecl->getAtEndRange().getBegin(),
+ strlen("@end"), "/* @end */\n");
+}
+
+void RewriteModernObjC::RewriteProtocolDecl(ObjCProtocolDecl *PDecl) {
+ SourceLocation LocStart = PDecl->getLocStart();
+ assert(PDecl->isThisDeclarationADefinition());
+
+ // FIXME: handle protocol headers that are declared across multiple lines.
+ ReplaceText(LocStart, 0, "// ");
+
+ for (auto *I : PDecl->instance_methods())
+ RewriteMethodDeclaration(I);
+ for (auto *I : PDecl->class_methods())
+ RewriteMethodDeclaration(I);
+ for (auto *I : PDecl->properties())
+ RewriteProperty(I);
+
+ // Lastly, comment out the @end.
+ SourceLocation LocEnd = PDecl->getAtEndRange().getBegin();
+ ReplaceText(LocEnd, strlen("@end"), "/* @end */\n");
+
+ // Must comment out @optional/@required
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+ for (const char *p = startBuf; p < endBuf; p++) {
+ if (*p == '@' && !strncmp(p+1, "optional", strlen("optional"))) {
+ SourceLocation OptionalLoc = LocStart.getLocWithOffset(p-startBuf);
+ ReplaceText(OptionalLoc, strlen("@optional"), "/* @optional */");
+
+ }
+ else if (*p == '@' && !strncmp(p+1, "required", strlen("required"))) {
+ SourceLocation OptionalLoc = LocStart.getLocWithOffset(p-startBuf);
+ ReplaceText(OptionalLoc, strlen("@required"), "/* @required */");
+
+ }
+ }
+}
+
+void RewriteModernObjC::RewriteForwardProtocolDecl(DeclGroupRef D) {
+ SourceLocation LocStart = (*D.begin())->getLocStart();
+ if (LocStart.isInvalid())
+ llvm_unreachable("Invalid SourceLocation");
+ // FIXME: handle forward protocol that are declared across multiple lines.
+ ReplaceText(LocStart, 0, "// ");
+}
+
+void
+RewriteModernObjC::RewriteForwardProtocolDecl(const SmallVectorImpl<Decl *> &DG) {
+ SourceLocation LocStart = DG[0]->getLocStart();
+ if (LocStart.isInvalid())
+ llvm_unreachable("Invalid SourceLocation");
+ // FIXME: handle forward protocol that are declared across multiple lines.
+ ReplaceText(LocStart, 0, "// ");
+}
+
+void
+RewriteModernObjC::RewriteLinkageSpec(LinkageSpecDecl *LSD) {
+ SourceLocation LocStart = LSD->getExternLoc();
+ if (LocStart.isInvalid())
+ llvm_unreachable("Invalid extern SourceLocation");
+
+ ReplaceText(LocStart, 0, "// ");
+ if (!LSD->hasBraces())
+ return;
+ // FIXME. We don't rewrite well if '{' is not on same line as 'extern'.
+ SourceLocation LocRBrace = LSD->getRBraceLoc();
+ if (LocRBrace.isInvalid())
+ llvm_unreachable("Invalid rbrace SourceLocation");
+ ReplaceText(LocRBrace, 0, "// ");
+}
+
+void RewriteModernObjC::RewriteTypeIntoString(QualType T, std::string &ResultStr,
+ const FunctionType *&FPRetType) {
+ if (T->isObjCQualifiedIdType())
+ ResultStr += "id";
+ else if (T->isFunctionPointerType() ||
+ T->isBlockPointerType()) {
+ // needs special handling, since pointer-to-functions have special
+ // syntax (where a decaration models use).
+ QualType retType = T;
+ QualType PointeeTy;
+ if (const PointerType* PT = retType->getAs<PointerType>())
+ PointeeTy = PT->getPointeeType();
+ else if (const BlockPointerType *BPT = retType->getAs<BlockPointerType>())
+ PointeeTy = BPT->getPointeeType();
+ if ((FPRetType = PointeeTy->getAs<FunctionType>())) {
+ ResultStr +=
+ FPRetType->getReturnType().getAsString(Context->getPrintingPolicy());
+ ResultStr += "(*";
+ }
+ } else
+ ResultStr += T.getAsString(Context->getPrintingPolicy());
+}
+
+void RewriteModernObjC::RewriteObjCMethodDecl(const ObjCInterfaceDecl *IDecl,
+ ObjCMethodDecl *OMD,
+ std::string &ResultStr) {
+ //fprintf(stderr,"In RewriteObjCMethodDecl\n");
+ const FunctionType *FPRetType = nullptr;
+ ResultStr += "\nstatic ";
+ RewriteTypeIntoString(OMD->getReturnType(), ResultStr, FPRetType);
+ ResultStr += " ";
+
+ // Unique method name
+ std::string NameStr;
+
+ if (OMD->isInstanceMethod())
+ NameStr += "_I_";
+ else
+ NameStr += "_C_";
+
+ NameStr += IDecl->getNameAsString();
+ NameStr += "_";
+
+ if (ObjCCategoryImplDecl *CID =
+ dyn_cast<ObjCCategoryImplDecl>(OMD->getDeclContext())) {
+ NameStr += CID->getNameAsString();
+ NameStr += "_";
+ }
+ // Append selector names, replacing ':' with '_'
+ {
+ std::string selString = OMD->getSelector().getAsString();
+ int len = selString.size();
+ for (int i = 0; i < len; i++)
+ if (selString[i] == ':')
+ selString[i] = '_';
+ NameStr += selString;
+ }
+ // Remember this name for metadata emission
+ MethodInternalNames[OMD] = NameStr;
+ ResultStr += NameStr;
+
+ // Rewrite arguments
+ ResultStr += "(";
+
+ // invisible arguments
+ if (OMD->isInstanceMethod()) {
+ QualType selfTy = Context->getObjCInterfaceType(IDecl);
+ selfTy = Context->getPointerType(selfTy);
+ if (!LangOpts.MicrosoftExt) {
+ if (ObjCSynthesizedStructs.count(const_cast<ObjCInterfaceDecl*>(IDecl)))
+ ResultStr += "struct ";
+ }
+ // When rewriting for Microsoft, explicitly omit the structure name.
+ ResultStr += IDecl->getNameAsString();
+ ResultStr += " *";
+ }
+ else
+ ResultStr += Context->getObjCClassType().getAsString(
+ Context->getPrintingPolicy());
+
+ ResultStr += " self, ";
+ ResultStr += Context->getObjCSelType().getAsString(Context->getPrintingPolicy());
+ ResultStr += " _cmd";
+
+ // Method arguments.
+ for (const auto *PDecl : OMD->params()) {
+ ResultStr += ", ";
+ if (PDecl->getType()->isObjCQualifiedIdType()) {
+ ResultStr += "id ";
+ ResultStr += PDecl->getNameAsString();
+ } else {
+ std::string Name = PDecl->getNameAsString();
+ QualType QT = PDecl->getType();
+ // Make sure we convert "t (^)(...)" to "t (*)(...)".
+ (void)convertBlockPointerToFunctionPointer(QT);
+ QT.getAsStringInternal(Name, Context->getPrintingPolicy());
+ ResultStr += Name;
+ }
+ }
+ if (OMD->isVariadic())
+ ResultStr += ", ...";
+ ResultStr += ") ";
+
+ if (FPRetType) {
+ ResultStr += ")"; // close the precedence "scope" for "*".
+
+ // Now, emit the argument types (if any).
+ if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FPRetType)) {
+ ResultStr += "(";
+ for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
+ if (i) ResultStr += ", ";
+ std::string ParamStr =
+ FT->getParamType(i).getAsString(Context->getPrintingPolicy());
+ ResultStr += ParamStr;
+ }
+ if (FT->isVariadic()) {
+ if (FT->getNumParams())
+ ResultStr += ", ";
+ ResultStr += "...";
+ }
+ ResultStr += ")";
+ } else {
+ ResultStr += "()";
+ }
+ }
+}
+void RewriteModernObjC::RewriteImplementationDecl(Decl *OID) {
+ ObjCImplementationDecl *IMD = dyn_cast<ObjCImplementationDecl>(OID);
+ ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(OID);
+
+ if (IMD) {
+ if (IMD->getIvarRBraceLoc().isValid()) {
+ ReplaceText(IMD->getLocStart(), 1, "/** ");
+ ReplaceText(IMD->getIvarRBraceLoc(), 1, "**/ ");
+ }
+ else {
+ InsertText(IMD->getLocStart(), "// ");
+ }
+ }
+ else
+ InsertText(CID->getLocStart(), "// ");
+
+ for (auto *OMD : IMD ? IMD->instance_methods() : CID->instance_methods()) {
+ std::string ResultStr;
+ RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr);
+ SourceLocation LocStart = OMD->getLocStart();
+ SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart();
+
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+ ReplaceText(LocStart, endBuf-startBuf, ResultStr);
+ }
+
+ for (auto *OMD : IMD ? IMD->class_methods() : CID->class_methods()) {
+ std::string ResultStr;
+ RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr);
+ SourceLocation LocStart = OMD->getLocStart();
+ SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart();
+
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+ ReplaceText(LocStart, endBuf-startBuf, ResultStr);
+ }
+ for (auto *I : IMD ? IMD->property_impls() : CID->property_impls())
+ RewritePropertyImplDecl(I, IMD, CID);
+
+ InsertText(IMD ? IMD->getLocEnd() : CID->getLocEnd(), "// ");
+}
+
+void RewriteModernObjC::RewriteInterfaceDecl(ObjCInterfaceDecl *ClassDecl) {
+ // Do not synthesize more than once.
+ if (ObjCSynthesizedStructs.count(ClassDecl))
+ return;
+ // Make sure super class's are written before current class is written.
+ ObjCInterfaceDecl *SuperClass = ClassDecl->getSuperClass();
+ while (SuperClass) {
+ RewriteInterfaceDecl(SuperClass);
+ SuperClass = SuperClass->getSuperClass();
+ }
+ std::string ResultStr;
+ if (!ObjCWrittenInterfaces.count(ClassDecl->getCanonicalDecl())) {
+ // we haven't seen a forward decl - generate a typedef.
+ RewriteOneForwardClassDecl(ClassDecl, ResultStr);
+ RewriteIvarOffsetSymbols(ClassDecl, ResultStr);
+
+ RewriteObjCInternalStruct(ClassDecl, ResultStr);
+ // Mark this typedef as having been written into its c++ equivalent.
+ ObjCWrittenInterfaces.insert(ClassDecl->getCanonicalDecl());
+
+ for (auto *I : ClassDecl->properties())
+ RewriteProperty(I);
+ for (auto *I : ClassDecl->instance_methods())
+ RewriteMethodDeclaration(I);
+ for (auto *I : ClassDecl->class_methods())
+ RewriteMethodDeclaration(I);
+
+ // Lastly, comment out the @end.
+ ReplaceText(ClassDecl->getAtEndRange().getBegin(), strlen("@end"),
+ "/* @end */\n");
+ }
+}
+
+Stmt *RewriteModernObjC::RewritePropertyOrImplicitSetter(PseudoObjectExpr *PseudoOp) {
+ SourceRange OldRange = PseudoOp->getSourceRange();
+
+ // We just magically know some things about the structure of this
+ // expression.
+ ObjCMessageExpr *OldMsg =
+ cast<ObjCMessageExpr>(PseudoOp->getSemanticExpr(
+ PseudoOp->getNumSemanticExprs() - 1));
+
+ // Because the rewriter doesn't allow us to rewrite rewritten code,
+ // we need to suppress rewriting the sub-statements.
+ Expr *Base;
+ SmallVector<Expr*, 2> Args;
+ {
+ DisableReplaceStmtScope S(*this);
+
+ // Rebuild the base expression if we have one.
+ Base = nullptr;
+ if (OldMsg->getReceiverKind() == ObjCMessageExpr::Instance) {
+ Base = OldMsg->getInstanceReceiver();
+ Base = cast<OpaqueValueExpr>(Base)->getSourceExpr();
+ Base = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(Base));
+ }
+
+ unsigned numArgs = OldMsg->getNumArgs();
+ for (unsigned i = 0; i < numArgs; i++) {
+ Expr *Arg = OldMsg->getArg(i);
+ if (isa<OpaqueValueExpr>(Arg))
+ Arg = cast<OpaqueValueExpr>(Arg)->getSourceExpr();
+ Arg = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(Arg));
+ Args.push_back(Arg);
+ }
+ }
+
+ // TODO: avoid this copy.
+ SmallVector<SourceLocation, 1> SelLocs;
+ OldMsg->getSelectorLocs(SelLocs);
+
+ ObjCMessageExpr *NewMsg = nullptr;
+ switch (OldMsg->getReceiverKind()) {
+ case ObjCMessageExpr::Class:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ OldMsg->getClassReceiverTypeInfo(),
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ Args,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+
+ case ObjCMessageExpr::Instance:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ Base,
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ Args,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+
+ case ObjCMessageExpr::SuperClass:
+ case ObjCMessageExpr::SuperInstance:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ OldMsg->getSuperLoc(),
+ OldMsg->getReceiverKind() == ObjCMessageExpr::SuperInstance,
+ OldMsg->getSuperType(),
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ Args,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+ }
+
+ Stmt *Replacement = SynthMessageExpr(NewMsg);
+ ReplaceStmtWithRange(PseudoOp, Replacement, OldRange);
+ return Replacement;
+}
+
+Stmt *RewriteModernObjC::RewritePropertyOrImplicitGetter(PseudoObjectExpr *PseudoOp) {
+ SourceRange OldRange = PseudoOp->getSourceRange();
+
+ // We just magically know some things about the structure of this
+ // expression.
+ ObjCMessageExpr *OldMsg =
+ cast<ObjCMessageExpr>(PseudoOp->getResultExpr()->IgnoreImplicit());
+
+ // Because the rewriter doesn't allow us to rewrite rewritten code,
+ // we need to suppress rewriting the sub-statements.
+ Expr *Base = nullptr;
+ SmallVector<Expr*, 1> Args;
+ {
+ DisableReplaceStmtScope S(*this);
+ // Rebuild the base expression if we have one.
+ if (OldMsg->getReceiverKind() == ObjCMessageExpr::Instance) {
+ Base = OldMsg->getInstanceReceiver();
+ Base = cast<OpaqueValueExpr>(Base)->getSourceExpr();
+ Base = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(Base));
+ }
+ unsigned numArgs = OldMsg->getNumArgs();
+ for (unsigned i = 0; i < numArgs; i++) {
+ Expr *Arg = OldMsg->getArg(i);
+ if (isa<OpaqueValueExpr>(Arg))
+ Arg = cast<OpaqueValueExpr>(Arg)->getSourceExpr();
+ Arg = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(Arg));
+ Args.push_back(Arg);
+ }
+ }
+
+ // Intentionally empty.
+ SmallVector<SourceLocation, 1> SelLocs;
+
+ ObjCMessageExpr *NewMsg = nullptr;
+ switch (OldMsg->getReceiverKind()) {
+ case ObjCMessageExpr::Class:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ OldMsg->getClassReceiverTypeInfo(),
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ Args,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+
+ case ObjCMessageExpr::Instance:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ Base,
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ Args,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+
+ case ObjCMessageExpr::SuperClass:
+ case ObjCMessageExpr::SuperInstance:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ OldMsg->getSuperLoc(),
+ OldMsg->getReceiverKind() == ObjCMessageExpr::SuperInstance,
+ OldMsg->getSuperType(),
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ Args,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+ }
+
+ Stmt *Replacement = SynthMessageExpr(NewMsg);
+ ReplaceStmtWithRange(PseudoOp, Replacement, OldRange);
+ return Replacement;
+}
+
+/// SynthCountByEnumWithState - To print:
+/// ((NSUInteger (*)
+/// (id, SEL, struct __objcFastEnumerationState *, id *, NSUInteger))
+/// (void *)objc_msgSend)((id)l_collection,
+/// sel_registerName(
+/// "countByEnumeratingWithState:objects:count:"),
+/// &enumState,
+/// (id *)__rw_items, (NSUInteger)16)
+///
+void RewriteModernObjC::SynthCountByEnumWithState(std::string &buf) {
+ buf += "((_WIN_NSUInteger (*) (id, SEL, struct __objcFastEnumerationState *, "
+ "id *, _WIN_NSUInteger))(void *)objc_msgSend)";
+ buf += "\n\t\t";
+ buf += "((id)l_collection,\n\t\t";
+ buf += "sel_registerName(\"countByEnumeratingWithState:objects:count:\"),";
+ buf += "\n\t\t";
+ buf += "&enumState, "
+ "(id *)__rw_items, (_WIN_NSUInteger)16)";
+}
+
+/// RewriteBreakStmt - Rewrite for a break-stmt inside an ObjC2's foreach
+/// statement to exit to its outer synthesized loop.
+///
+Stmt *RewriteModernObjC::RewriteBreakStmt(BreakStmt *S) {
+ if (Stmts.empty() || !isa<ObjCForCollectionStmt>(Stmts.back()))
+ return S;
+ // replace break with goto __break_label
+ std::string buf;
+
+ SourceLocation startLoc = S->getLocStart();
+ buf = "goto __break_label_";
+ buf += utostr(ObjCBcLabelNo.back());
+ ReplaceText(startLoc, strlen("break"), buf);
+
+ return nullptr;
+}
+
+void RewriteModernObjC::ConvertSourceLocationToLineDirective(
+ SourceLocation Loc,
+ std::string &LineString) {
+ if (Loc.isFileID() && GenerateLineInfo) {
+ LineString += "\n#line ";
+ PresumedLoc PLoc = SM->getPresumedLoc(Loc);
+ LineString += utostr(PLoc.getLine());
+ LineString += " \"";
+ LineString += Lexer::Stringify(PLoc.getFilename());
+ LineString += "\"\n";
+ }
+}
+
+/// RewriteContinueStmt - Rewrite for a continue-stmt inside an ObjC2's foreach
+/// statement to continue with its inner synthesized loop.
+///
+Stmt *RewriteModernObjC::RewriteContinueStmt(ContinueStmt *S) {
+ if (Stmts.empty() || !isa<ObjCForCollectionStmt>(Stmts.back()))
+ return S;
+ // replace continue with goto __continue_label
+ std::string buf;
+
+ SourceLocation startLoc = S->getLocStart();
+ buf = "goto __continue_label_";
+ buf += utostr(ObjCBcLabelNo.back());
+ ReplaceText(startLoc, strlen("continue"), buf);
+
+ return nullptr;
+}
+
+/// RewriteObjCForCollectionStmt - Rewriter for ObjC2's foreach statement.
+/// It rewrites:
+/// for ( type elem in collection) { stmts; }
+
+/// Into:
+/// {
+/// type elem;
+/// struct __objcFastEnumerationState enumState = { 0 };
+/// id __rw_items[16];
+/// id l_collection = (id)collection;
+/// NSUInteger limit = [l_collection countByEnumeratingWithState:&enumState
+/// objects:__rw_items count:16];
+/// if (limit) {
+/// unsigned long startMutations = *enumState.mutationsPtr;
+/// do {
+/// unsigned long counter = 0;
+/// do {
+/// if (startMutations != *enumState.mutationsPtr)
+/// objc_enumerationMutation(l_collection);
+/// elem = (type)enumState.itemsPtr[counter++];
+/// stmts;
+/// __continue_label: ;
+/// } while (counter < limit);
+/// } while ((limit = [l_collection countByEnumeratingWithState:&enumState
+/// objects:__rw_items count:16]));
+/// elem = nil;
+/// __break_label: ;
+/// }
+/// else
+/// elem = nil;
+/// }
+///
+Stmt *RewriteModernObjC::RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
+ SourceLocation OrigEnd) {
+ assert(!Stmts.empty() && "ObjCForCollectionStmt - Statement stack empty");
+ assert(isa<ObjCForCollectionStmt>(Stmts.back()) &&
+ "ObjCForCollectionStmt Statement stack mismatch");
+ assert(!ObjCBcLabelNo.empty() &&
+ "ObjCForCollectionStmt - Label No stack empty");
+
+ SourceLocation startLoc = S->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+ StringRef elementName;
+ std::string elementTypeAsString;
+ std::string buf;
+ // line directive first.
+ SourceLocation ForEachLoc = S->getForLoc();
+ ConvertSourceLocationToLineDirective(ForEachLoc, buf);
+ buf += "{\n\t";
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(S->getElement())) {
+ // type elem;
+ NamedDecl* D = cast<NamedDecl>(DS->getSingleDecl());
+ QualType ElementType = cast<ValueDecl>(D)->getType();
+ if (ElementType->isObjCQualifiedIdType() ||
+ ElementType->isObjCQualifiedInterfaceType())
+ // Simply use 'id' for all qualified types.
+ elementTypeAsString = "id";
+ else
+ elementTypeAsString = ElementType.getAsString(Context->getPrintingPolicy());
+ buf += elementTypeAsString;
+ buf += " ";
+ elementName = D->getName();
+ buf += elementName;
+ buf += ";\n\t";
+ }
+ else {
+ DeclRefExpr *DR = cast<DeclRefExpr>(S->getElement());
+ elementName = DR->getDecl()->getName();
+ ValueDecl *VD = cast<ValueDecl>(DR->getDecl());
+ if (VD->getType()->isObjCQualifiedIdType() ||
+ VD->getType()->isObjCQualifiedInterfaceType())
+ // Simply use 'id' for all qualified types.
+ elementTypeAsString = "id";
+ else
+ elementTypeAsString = VD->getType().getAsString(Context->getPrintingPolicy());
+ }
+
+ // struct __objcFastEnumerationState enumState = { 0 };
+ buf += "struct __objcFastEnumerationState enumState = { 0 };\n\t";
+ // id __rw_items[16];
+ buf += "id __rw_items[16];\n\t";
+ // id l_collection = (id)
+ buf += "id l_collection = (id)";
+ // Find start location of 'collection' the hard way!
+ const char *startCollectionBuf = startBuf;
+ startCollectionBuf += 3; // skip 'for'
+ startCollectionBuf = strchr(startCollectionBuf, '(');
+ startCollectionBuf++; // skip '('
+ // find 'in' and skip it.
+ while (*startCollectionBuf != ' ' ||
+ *(startCollectionBuf+1) != 'i' || *(startCollectionBuf+2) != 'n' ||
+ (*(startCollectionBuf+3) != ' ' &&
+ *(startCollectionBuf+3) != '[' && *(startCollectionBuf+3) != '('))
+ startCollectionBuf++;
+ startCollectionBuf += 3;
+
+ // Replace: "for (type element in" with string constructed thus far.
+ ReplaceText(startLoc, startCollectionBuf - startBuf, buf);
+ // Replace ')' in for '(' type elem in collection ')' with ';'
+ SourceLocation rightParenLoc = S->getRParenLoc();
+ const char *rparenBuf = SM->getCharacterData(rightParenLoc);
+ SourceLocation lparenLoc = startLoc.getLocWithOffset(rparenBuf-startBuf);
+ buf = ";\n\t";
+
+ // unsigned long limit = [l_collection countByEnumeratingWithState:&enumState
+ // objects:__rw_items count:16];
+ // which is synthesized into:
+ // NSUInteger limit =
+ // ((NSUInteger (*)
+ // (id, SEL, struct __objcFastEnumerationState *, id *, NSUInteger))
+ // (void *)objc_msgSend)((id)l_collection,
+ // sel_registerName(
+ // "countByEnumeratingWithState:objects:count:"),
+ // (struct __objcFastEnumerationState *)&state,
+ // (id *)__rw_items, (NSUInteger)16);
+ buf += "_WIN_NSUInteger limit =\n\t\t";
+ SynthCountByEnumWithState(buf);
+ buf += ";\n\t";
+ /// if (limit) {
+ /// unsigned long startMutations = *enumState.mutationsPtr;
+ /// do {
+ /// unsigned long counter = 0;
+ /// do {
+ /// if (startMutations != *enumState.mutationsPtr)
+ /// objc_enumerationMutation(l_collection);
+ /// elem = (type)enumState.itemsPtr[counter++];
+ buf += "if (limit) {\n\t";
+ buf += "unsigned long startMutations = *enumState.mutationsPtr;\n\t";
+ buf += "do {\n\t\t";
+ buf += "unsigned long counter = 0;\n\t\t";
+ buf += "do {\n\t\t\t";
+ buf += "if (startMutations != *enumState.mutationsPtr)\n\t\t\t\t";
+ buf += "objc_enumerationMutation(l_collection);\n\t\t\t";
+ buf += elementName;
+ buf += " = (";
+ buf += elementTypeAsString;
+ buf += ")enumState.itemsPtr[counter++];";
+ // Replace ')' in for '(' type elem in collection ')' with all of these.
+ ReplaceText(lparenLoc, 1, buf);
+
+ /// __continue_label: ;
+ /// } while (counter < limit);
+ /// } while ((limit = [l_collection countByEnumeratingWithState:&enumState
+ /// objects:__rw_items count:16]));
+ /// elem = nil;
+ /// __break_label: ;
+ /// }
+ /// else
+ /// elem = nil;
+ /// }
+ ///
+ buf = ";\n\t";
+ buf += "__continue_label_";
+ buf += utostr(ObjCBcLabelNo.back());
+ buf += ": ;";
+ buf += "\n\t\t";
+ buf += "} while (counter < limit);\n\t";
+ buf += "} while ((limit = ";
+ SynthCountByEnumWithState(buf);
+ buf += "));\n\t";
+ buf += elementName;
+ buf += " = ((";
+ buf += elementTypeAsString;
+ buf += ")0);\n\t";
+ buf += "__break_label_";
+ buf += utostr(ObjCBcLabelNo.back());
+ buf += ": ;\n\t";
+ buf += "}\n\t";
+ buf += "else\n\t\t";
+ buf += elementName;
+ buf += " = ((";
+ buf += elementTypeAsString;
+ buf += ")0);\n\t";
+ buf += "}\n";
+
+ // Insert all these *after* the statement body.
+ // FIXME: If this should support Obj-C++, support CXXTryStmt
+ if (isa<CompoundStmt>(S->getBody())) {
+ SourceLocation endBodyLoc = OrigEnd.getLocWithOffset(1);
+ InsertText(endBodyLoc, buf);
+ } else {
+ /* Need to treat single statements specially. For example:
+ *
+ * for (A *a in b) if (stuff()) break;
+ * for (A *a in b) xxxyy;
+ *
+ * The following code simply scans ahead to the semi to find the actual end.
+ */
+ const char *stmtBuf = SM->getCharacterData(OrigEnd);
+ const char *semiBuf = strchr(stmtBuf, ';');
+ assert(semiBuf && "Can't find ';'");
+ SourceLocation endBodyLoc = OrigEnd.getLocWithOffset(semiBuf-stmtBuf+1);
+ InsertText(endBodyLoc, buf);
+ }
+ Stmts.pop_back();
+ ObjCBcLabelNo.pop_back();
+ return nullptr;
+}
+
+static void Write_RethrowObject(std::string &buf) {
+ buf += "{ struct _FIN { _FIN(id reth) : rethrow(reth) {}\n";
+ buf += "\t~_FIN() { if (rethrow) objc_exception_throw(rethrow); }\n";
+ buf += "\tid rethrow;\n";
+ buf += "\t} _fin_force_rethow(_rethrow);";
+}
+
+/// RewriteObjCSynchronizedStmt -
+/// This routine rewrites @synchronized(expr) stmt;
+/// into:
+/// objc_sync_enter(expr);
+/// @try stmt @finally { objc_sync_exit(expr); }
+///
+Stmt *RewriteModernObjC::RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
+ // Get the start location and compute the semi location.
+ SourceLocation startLoc = S->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+
+ assert((*startBuf == '@') && "bogus @synchronized location");
+
+ std::string buf;
+ SourceLocation SynchLoc = S->getAtSynchronizedLoc();
+ ConvertSourceLocationToLineDirective(SynchLoc, buf);
+ buf += "{ id _rethrow = 0; id _sync_obj = (id)";
+
+ const char *lparenBuf = startBuf;
+ while (*lparenBuf != '(') lparenBuf++;
+ ReplaceText(startLoc, lparenBuf-startBuf+1, buf);
+
+ buf = "; objc_sync_enter(_sync_obj);\n";
+ buf += "try {\n\tstruct _SYNC_EXIT { _SYNC_EXIT(id arg) : sync_exit(arg) {}";
+ buf += "\n\t~_SYNC_EXIT() {objc_sync_exit(sync_exit);}";
+ buf += "\n\tid sync_exit;";
+ buf += "\n\t} _sync_exit(_sync_obj);\n";
+
+ // We can't use S->getSynchExpr()->getLocEnd() to find the end location, since
+ // the sync expression is typically a message expression that's already
+ // been rewritten! (which implies the SourceLocation's are invalid).
+ SourceLocation RParenExprLoc = S->getSynchBody()->getLocStart();
+ const char *RParenExprLocBuf = SM->getCharacterData(RParenExprLoc);
+ while (*RParenExprLocBuf != ')') RParenExprLocBuf--;
+ RParenExprLoc = startLoc.getLocWithOffset(RParenExprLocBuf-startBuf);
+
+ SourceLocation LBranceLoc = S->getSynchBody()->getLocStart();
+ const char *LBraceLocBuf = SM->getCharacterData(LBranceLoc);
+ assert (*LBraceLocBuf == '{');
+ ReplaceText(RParenExprLoc, (LBraceLocBuf - SM->getCharacterData(RParenExprLoc) + 1), buf);
+
+ SourceLocation startRBraceLoc = S->getSynchBody()->getLocEnd();
+ assert((*SM->getCharacterData(startRBraceLoc) == '}') &&
+ "bogus @synchronized block");
+
+ buf = "} catch (id e) {_rethrow = e;}\n";
+ Write_RethrowObject(buf);
+ buf += "}\n";
+ buf += "}\n";
+
+ ReplaceText(startRBraceLoc, 1, buf);
+
+ return nullptr;
+}
+
+void RewriteModernObjC::WarnAboutReturnGotoStmts(Stmt *S)
+{
+ // Perform a bottom up traversal of all children.
+ for (Stmt *SubStmt : S->children())
+ if (SubStmt)
+ WarnAboutReturnGotoStmts(SubStmt);
+
+ if (isa<ReturnStmt>(S) || isa<GotoStmt>(S)) {
+ Diags.Report(Context->getFullLoc(S->getLocStart()),
+ TryFinallyContainsReturnDiag);
+ }
+ return;
+}
+
+Stmt *RewriteModernObjC::RewriteObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S) {
+ SourceLocation startLoc = S->getAtLoc();
+ ReplaceText(startLoc, strlen("@autoreleasepool"), "/* @autoreleasepool */");
+ ReplaceText(S->getSubStmt()->getLocStart(), 1,
+ "{ __AtAutoreleasePool __autoreleasepool; ");
+
+ return nullptr;
+}
+
+Stmt *RewriteModernObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
+ ObjCAtFinallyStmt *finalStmt = S->getFinallyStmt();
+ bool noCatch = S->getNumCatchStmts() == 0;
+ std::string buf;
+ SourceLocation TryLocation = S->getAtTryLoc();
+ ConvertSourceLocationToLineDirective(TryLocation, buf);
+
+ if (finalStmt) {
+ if (noCatch)
+ buf += "{ id volatile _rethrow = 0;\n";
+ else {
+ buf += "{ id volatile _rethrow = 0;\ntry {\n";
+ }
+ }
+ // Get the start location and compute the semi location.
+ SourceLocation startLoc = S->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+
+ assert((*startBuf == '@') && "bogus @try location");
+ if (finalStmt)
+ ReplaceText(startLoc, 1, buf);
+ else
+ // @try -> try
+ ReplaceText(startLoc, 1, "");
+
+ for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I) {
+ ObjCAtCatchStmt *Catch = S->getCatchStmt(I);
+ VarDecl *catchDecl = Catch->getCatchParamDecl();
+
+ startLoc = Catch->getLocStart();
+ bool AtRemoved = false;
+ if (catchDecl) {
+ QualType t = catchDecl->getType();
+ if (const ObjCObjectPointerType *Ptr = t->getAs<ObjCObjectPointerType>()) {
+ // Should be a pointer to a class.
+ ObjCInterfaceDecl *IDecl = Ptr->getObjectType()->getInterface();
+ if (IDecl) {
+ std::string Result;
+ ConvertSourceLocationToLineDirective(Catch->getLocStart(), Result);
+
+ startBuf = SM->getCharacterData(startLoc);
+ assert((*startBuf == '@') && "bogus @catch location");
+ SourceLocation rParenLoc = Catch->getRParenLoc();
+ const char *rParenBuf = SM->getCharacterData(rParenLoc);
+
+ // _objc_exc_Foo *_e as argument to catch.
+ Result += "catch (_objc_exc_"; Result += IDecl->getNameAsString();
+ Result += " *_"; Result += catchDecl->getNameAsString();
+ Result += ")";
+ ReplaceText(startLoc, rParenBuf-startBuf+1, Result);
+ // Foo *e = (Foo *)_e;
+ Result.clear();
+ Result = "{ ";
+ Result += IDecl->getNameAsString();
+ Result += " *"; Result += catchDecl->getNameAsString();
+ Result += " = ("; Result += IDecl->getNameAsString(); Result += "*)";
+ Result += "_"; Result += catchDecl->getNameAsString();
+
+ Result += "; ";
+ SourceLocation lBraceLoc = Catch->getCatchBody()->getLocStart();
+ ReplaceText(lBraceLoc, 1, Result);
+ AtRemoved = true;
+ }
+ }
+ }
+ if (!AtRemoved)
+ // @catch -> catch
+ ReplaceText(startLoc, 1, "");
+
+ }
+ if (finalStmt) {
+ buf.clear();
+ SourceLocation FinallyLoc = finalStmt->getLocStart();
+
+ if (noCatch) {
+ ConvertSourceLocationToLineDirective(FinallyLoc, buf);
+ buf += "catch (id e) {_rethrow = e;}\n";
+ }
+ else {
+ buf += "}\n";
+ ConvertSourceLocationToLineDirective(FinallyLoc, buf);
+ buf += "catch (id e) {_rethrow = e;}\n";
+ }
+
+ SourceLocation startFinalLoc = finalStmt->getLocStart();
+ ReplaceText(startFinalLoc, 8, buf);
+ Stmt *body = finalStmt->getFinallyBody();
+ SourceLocation startFinalBodyLoc = body->getLocStart();
+ buf.clear();
+ Write_RethrowObject(buf);
+ ReplaceText(startFinalBodyLoc, 1, buf);
+
+ SourceLocation endFinalBodyLoc = body->getLocEnd();
+ ReplaceText(endFinalBodyLoc, 1, "}\n}");
+ // Now check for any return/continue/go statements within the @try.
+ WarnAboutReturnGotoStmts(S->getTryBody());
+ }
+
+ return nullptr;
+}
+
+// This can't be done with ReplaceStmt(S, ThrowExpr), since
+// the throw expression is typically a message expression that's already
+// been rewritten! (which implies the SourceLocation's are invalid).
+Stmt *RewriteModernObjC::RewriteObjCThrowStmt(ObjCAtThrowStmt *S) {
+ // Get the start location and compute the semi location.
+ SourceLocation startLoc = S->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+
+ assert((*startBuf == '@') && "bogus @throw location");
+
+ std::string buf;
+ /* void objc_exception_throw(id) __attribute__((noreturn)); */
+ if (S->getThrowExpr())
+ buf = "objc_exception_throw(";
+ else
+ buf = "throw";
+
+ // handle "@ throw" correctly.
+ const char *wBuf = strchr(startBuf, 'w');
+ assert((*wBuf == 'w') && "@throw: can't find 'w'");
+ ReplaceText(startLoc, wBuf-startBuf+1, buf);
+
+ SourceLocation endLoc = S->getLocEnd();
+ const char *endBuf = SM->getCharacterData(endLoc);
+ const char *semiBuf = strchr(endBuf, ';');
+ assert((*semiBuf == ';') && "@throw: can't find ';'");
+ SourceLocation semiLoc = startLoc.getLocWithOffset(semiBuf-startBuf);
+ if (S->getThrowExpr())
+ ReplaceText(semiLoc, 1, ");");
+ return nullptr;
+}
+
+Stmt *RewriteModernObjC::RewriteAtEncode(ObjCEncodeExpr *Exp) {
+ // Create a new string expression.
+ std::string StrEncoding;
+ Context->getObjCEncodingForType(Exp->getEncodedType(), StrEncoding);
+ Expr *Replacement = getStringLiteral(StrEncoding);
+ ReplaceStmt(Exp, Replacement);
+
+ // Replace this subexpr in the parent.
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return Replacement;
+}
+
+Stmt *RewriteModernObjC::RewriteAtSelector(ObjCSelectorExpr *Exp) {
+ if (!SelGetUidFunctionDecl)
+ SynthSelGetUidFunctionDecl();
+ assert(SelGetUidFunctionDecl && "Can't find sel_registerName() decl");
+ // Create a call to sel_registerName("selName").
+ SmallVector<Expr*, 8> SelExprs;
+ SelExprs.push_back(getStringLiteral(Exp->getSelector().getAsString()));
+ CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
+ SelExprs);
+ ReplaceStmt(Exp, SelExp);
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return SelExp;
+}
+
+CallExpr *
+RewriteModernObjC::SynthesizeCallToFunctionDecl(FunctionDecl *FD,
+ ArrayRef<Expr *> Args,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ // Get the type, we will need to reference it in a couple spots.
+ QualType msgSendType = FD->getType();
+
+ // Create a reference to the objc_msgSend() declaration.
+ DeclRefExpr *DRE =
+ new (Context) DeclRefExpr(FD, false, msgSendType, VK_LValue, SourceLocation());
+
+ // Now, we cast the reference to a pointer to the objc_msgSend type.
+ QualType pToFunc = Context->getPointerType(msgSendType);
+ ImplicitCastExpr *ICE =
+ ImplicitCastExpr::Create(*Context, pToFunc, CK_FunctionToPointerDecay,
+ DRE, nullptr, VK_RValue);
+
+ const FunctionType *FT = msgSendType->getAs<FunctionType>();
+
+ CallExpr *Exp = new (Context) CallExpr(*Context, ICE, Args,
+ FT->getCallResultType(*Context),
+ VK_RValue, EndLoc);
+ return Exp;
+}
+
+static bool scanForProtocolRefs(const char *startBuf, const char *endBuf,
+ const char *&startRef, const char *&endRef) {
+ while (startBuf < endBuf) {
+ if (*startBuf == '<')
+ startRef = startBuf; // mark the start.
+ if (*startBuf == '>') {
+ if (startRef && *startRef == '<') {
+ endRef = startBuf; // mark the end.
+ return true;
+ }
+ return false;
+ }
+ startBuf++;
+ }
+ return false;
+}
+
+static void scanToNextArgument(const char *&argRef) {
+ int angle = 0;
+ while (*argRef != ')' && (*argRef != ',' || angle > 0)) {
+ if (*argRef == '<')
+ angle++;
+ else if (*argRef == '>')
+ angle--;
+ argRef++;
+ }
+ assert(angle == 0 && "scanToNextArgument - bad protocol type syntax");
+}
+
+bool RewriteModernObjC::needToScanForQualifiers(QualType T) {
+ if (T->isObjCQualifiedIdType())
+ return true;
+ if (const PointerType *PT = T->getAs<PointerType>()) {
+ if (PT->getPointeeType()->isObjCQualifiedIdType())
+ return true;
+ }
+ if (T->isObjCObjectPointerType()) {
+ T = T->getPointeeType();
+ return T->isObjCQualifiedInterfaceType();
+ }
+ if (T->isArrayType()) {
+ QualType ElemTy = Context->getBaseElementType(T);
+ return needToScanForQualifiers(ElemTy);
+ }
+ return false;
+}
+
+void RewriteModernObjC::RewriteObjCQualifiedInterfaceTypes(Expr *E) {
+ QualType Type = E->getType();
+ if (needToScanForQualifiers(Type)) {
+ SourceLocation Loc, EndLoc;
+
+ if (const CStyleCastExpr *ECE = dyn_cast<CStyleCastExpr>(E)) {
+ Loc = ECE->getLParenLoc();
+ EndLoc = ECE->getRParenLoc();
+ } else {
+ Loc = E->getLocStart();
+ EndLoc = E->getLocEnd();
+ }
+ // This will defend against trying to rewrite synthesized expressions.
+ if (Loc.isInvalid() || EndLoc.isInvalid())
+ return;
+
+ const char *startBuf = SM->getCharacterData(Loc);
+ const char *endBuf = SM->getCharacterData(EndLoc);
+ const char *startRef = nullptr, *endRef = nullptr;
+ if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) {
+ // Get the locations of the startRef, endRef.
+ SourceLocation LessLoc = Loc.getLocWithOffset(startRef-startBuf);
+ SourceLocation GreaterLoc = Loc.getLocWithOffset(endRef-startBuf+1);
+ // Comment out the protocol references.
+ InsertText(LessLoc, "/*");
+ InsertText(GreaterLoc, "*/");
+ }
+ }
+}
+
+void RewriteModernObjC::RewriteObjCQualifiedInterfaceTypes(Decl *Dcl) {
+ SourceLocation Loc;
+ QualType Type;
+ const FunctionProtoType *proto = nullptr;
+ if (VarDecl *VD = dyn_cast<VarDecl>(Dcl)) {
+ Loc = VD->getLocation();
+ Type = VD->getType();
+ }
+ else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(Dcl)) {
+ Loc = FD->getLocation();
+ // Check for ObjC 'id' and class types that have been adorned with protocol
+ // information (id<p>, C<p>*). The protocol references need to be rewritten!
+ const FunctionType *funcType = FD->getType()->getAs<FunctionType>();
+ assert(funcType && "missing function type");
+ proto = dyn_cast<FunctionProtoType>(funcType);
+ if (!proto)
+ return;
+ Type = proto->getReturnType();
+ }
+ else if (FieldDecl *FD = dyn_cast<FieldDecl>(Dcl)) {
+ Loc = FD->getLocation();
+ Type = FD->getType();
+ }
+ else if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(Dcl)) {
+ Loc = TD->getLocation();
+ Type = TD->getUnderlyingType();
+ }
+ else
+ return;
+
+ if (needToScanForQualifiers(Type)) {
+ // Since types are unique, we need to scan the buffer.
+
+ const char *endBuf = SM->getCharacterData(Loc);
+ const char *startBuf = endBuf;
+ while (*startBuf != ';' && *startBuf != '<' && startBuf != MainFileStart)
+ startBuf--; // scan backward (from the decl location) for return type.
+ const char *startRef = nullptr, *endRef = nullptr;
+ if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) {
+ // Get the locations of the startRef, endRef.
+ SourceLocation LessLoc = Loc.getLocWithOffset(startRef-endBuf);
+ SourceLocation GreaterLoc = Loc.getLocWithOffset(endRef-endBuf+1);
+ // Comment out the protocol references.
+ InsertText(LessLoc, "/*");
+ InsertText(GreaterLoc, "*/");
+ }
+ }
+ if (!proto)
+ return; // most likely, was a variable
+ // Now check arguments.
+ const char *startBuf = SM->getCharacterData(Loc);
+ const char *startFuncBuf = startBuf;
+ for (unsigned i = 0; i < proto->getNumParams(); i++) {
+ if (needToScanForQualifiers(proto->getParamType(i))) {
+ // Since types are unique, we need to scan the buffer.
+
+ const char *endBuf = startBuf;
+ // scan forward (from the decl location) for argument types.
+ scanToNextArgument(endBuf);
+ const char *startRef = nullptr, *endRef = nullptr;
+ if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) {
+ // Get the locations of the startRef, endRef.
+ SourceLocation LessLoc =
+ Loc.getLocWithOffset(startRef-startFuncBuf);
+ SourceLocation GreaterLoc =
+ Loc.getLocWithOffset(endRef-startFuncBuf+1);
+ // Comment out the protocol references.
+ InsertText(LessLoc, "/*");
+ InsertText(GreaterLoc, "*/");
+ }
+ startBuf = ++endBuf;
+ }
+ else {
+ // If the function name is derived from a macro expansion, then the
+ // argument buffer will not follow the name. Need to speak with Chris.
+ while (*startBuf && *startBuf != ')' && *startBuf != ',')
+ startBuf++; // scan forward (from the decl location) for argument types.
+ startBuf++;
+ }
+ }
+}
+
+void RewriteModernObjC::RewriteTypeOfDecl(VarDecl *ND) {
+ QualType QT = ND->getType();
+ const Type* TypePtr = QT->getAs<Type>();
+ if (!isa<TypeOfExprType>(TypePtr))
+ return;
+ while (isa<TypeOfExprType>(TypePtr)) {
+ const TypeOfExprType *TypeOfExprTypePtr = cast<TypeOfExprType>(TypePtr);
+ QT = TypeOfExprTypePtr->getUnderlyingExpr()->getType();
+ TypePtr = QT->getAs<Type>();
+ }
+ // FIXME. This will not work for multiple declarators; as in:
+ // __typeof__(a) b,c,d;
+ std::string TypeAsString(QT.getAsString(Context->getPrintingPolicy()));
+ SourceLocation DeclLoc = ND->getTypeSpecStartLoc();
+ const char *startBuf = SM->getCharacterData(DeclLoc);
+ if (ND->getInit()) {
+ std::string Name(ND->getNameAsString());
+ TypeAsString += " " + Name + " = ";
+ Expr *E = ND->getInit();
+ SourceLocation startLoc;
+ if (const CStyleCastExpr *ECE = dyn_cast<CStyleCastExpr>(E))
+ startLoc = ECE->getLParenLoc();
+ else
+ startLoc = E->getLocStart();
+ startLoc = SM->getExpansionLoc(startLoc);
+ const char *endBuf = SM->getCharacterData(startLoc);
+ ReplaceText(DeclLoc, endBuf-startBuf-1, TypeAsString);
+ }
+ else {
+ SourceLocation X = ND->getLocEnd();
+ X = SM->getExpansionLoc(X);
+ const char *endBuf = SM->getCharacterData(X);
+ ReplaceText(DeclLoc, endBuf-startBuf-1, TypeAsString);
+ }
+}
+
+// SynthSelGetUidFunctionDecl - SEL sel_registerName(const char *str);
+void RewriteModernObjC::SynthSelGetUidFunctionDecl() {
+ IdentifierInfo *SelGetUidIdent = &Context->Idents.get("sel_registerName");
+ SmallVector<QualType, 16> ArgTys;
+ ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst()));
+ QualType getFuncType =
+ getSimpleFunctionType(Context->getObjCSelType(), ArgTys);
+ SelGetUidFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ SelGetUidIdent, getFuncType,
+ nullptr, SC_Extern);
+}
+
+void RewriteModernObjC::RewriteFunctionDecl(FunctionDecl *FD) {
+ // declared in <objc/objc.h>
+ if (FD->getIdentifier() &&
+ FD->getName() == "sel_registerName") {
+ SelGetUidFunctionDecl = FD;
+ return;
+ }
+ RewriteObjCQualifiedInterfaceTypes(FD);
+}
+
+void RewriteModernObjC::RewriteBlockPointerType(std::string& Str, QualType Type) {
+ std::string TypeString(Type.getAsString(Context->getPrintingPolicy()));
+ const char *argPtr = TypeString.c_str();
+ if (!strchr(argPtr, '^')) {
+ Str += TypeString;
+ return;
+ }
+ while (*argPtr) {
+ Str += (*argPtr == '^' ? '*' : *argPtr);
+ argPtr++;
+ }
+}
+
+// FIXME. Consolidate this routine with RewriteBlockPointerType.
+void RewriteModernObjC::RewriteBlockPointerTypeVariable(std::string& Str,
+ ValueDecl *VD) {
+ QualType Type = VD->getType();
+ std::string TypeString(Type.getAsString(Context->getPrintingPolicy()));
+ const char *argPtr = TypeString.c_str();
+ int paren = 0;
+ while (*argPtr) {
+ switch (*argPtr) {
+ case '(':
+ Str += *argPtr;
+ paren++;
+ break;
+ case ')':
+ Str += *argPtr;
+ paren--;
+ break;
+ case '^':
+ Str += '*';
+ if (paren == 1)
+ Str += VD->getNameAsString();
+ break;
+ default:
+ Str += *argPtr;
+ break;
+ }
+ argPtr++;
+ }
+}
+
+void RewriteModernObjC::RewriteBlockLiteralFunctionDecl(FunctionDecl *FD) {
+ SourceLocation FunLocStart = FD->getTypeSpecStartLoc();
+ const FunctionType *funcType = FD->getType()->getAs<FunctionType>();
+ const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(funcType);
+ if (!proto)
+ return;
+ QualType Type = proto->getReturnType();
+ std::string FdStr = Type.getAsString(Context->getPrintingPolicy());
+ FdStr += " ";
+ FdStr += FD->getName();
+ FdStr += "(";
+ unsigned numArgs = proto->getNumParams();
+ for (unsigned i = 0; i < numArgs; i++) {
+ QualType ArgType = proto->getParamType(i);
+ RewriteBlockPointerType(FdStr, ArgType);
+ if (i+1 < numArgs)
+ FdStr += ", ";
+ }
+ if (FD->isVariadic()) {
+ FdStr += (numArgs > 0) ? ", ...);\n" : "...);\n";
+ }
+ else
+ FdStr += ");\n";
+ InsertText(FunLocStart, FdStr);
+}
+
+// SynthSuperConstructorFunctionDecl - id __rw_objc_super(id obj, id super);
+void RewriteModernObjC::SynthSuperConstructorFunctionDecl() {
+ if (SuperConstructorFunctionDecl)
+ return;
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("__rw_objc_super");
+ SmallVector<QualType, 16> ArgTys;
+ QualType argT = Context->getObjCIdType();
+ assert(!argT.isNull() && "Can't find 'id' type");
+ ArgTys.push_back(argT);
+ ArgTys.push_back(argT);
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ ArgTys);
+ SuperConstructorFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent, msgSendType,
+ nullptr, SC_Extern);
+}
+
+// SynthMsgSendFunctionDecl - id objc_msgSend(id self, SEL op, ...);
+void RewriteModernObjC::SynthMsgSendFunctionDecl() {
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend");
+ SmallVector<QualType, 16> ArgTys;
+ QualType argT = Context->getObjCIdType();
+ assert(!argT.isNull() && "Can't find 'id' type");
+ ArgTys.push_back(argT);
+ argT = Context->getObjCSelType();
+ assert(!argT.isNull() && "Can't find 'SEL' type");
+ ArgTys.push_back(argT);
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ ArgTys, /*isVariadic=*/true);
+ MsgSendFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent, msgSendType, nullptr,
+ SC_Extern);
+}
+
+// SynthMsgSendSuperFunctionDecl - id objc_msgSendSuper(void);
+void RewriteModernObjC::SynthMsgSendSuperFunctionDecl() {
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSendSuper");
+ SmallVector<QualType, 2> ArgTys;
+ ArgTys.push_back(Context->VoidTy);
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ ArgTys, /*isVariadic=*/true);
+ MsgSendSuperFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent, msgSendType,
+ nullptr, SC_Extern);
+}
+
+// SynthMsgSendStretFunctionDecl - id objc_msgSend_stret(id self, SEL op, ...);
+void RewriteModernObjC::SynthMsgSendStretFunctionDecl() {
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend_stret");
+ SmallVector<QualType, 16> ArgTys;
+ QualType argT = Context->getObjCIdType();
+ assert(!argT.isNull() && "Can't find 'id' type");
+ ArgTys.push_back(argT);
+ argT = Context->getObjCSelType();
+ assert(!argT.isNull() && "Can't find 'SEL' type");
+ ArgTys.push_back(argT);
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ ArgTys, /*isVariadic=*/true);
+ MsgSendStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent, msgSendType,
+ nullptr, SC_Extern);
+}
+
+// SynthMsgSendSuperStretFunctionDecl -
+// id objc_msgSendSuper_stret(void);
+void RewriteModernObjC::SynthMsgSendSuperStretFunctionDecl() {
+ IdentifierInfo *msgSendIdent =
+ &Context->Idents.get("objc_msgSendSuper_stret");
+ SmallVector<QualType, 2> ArgTys;
+ ArgTys.push_back(Context->VoidTy);
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ ArgTys, /*isVariadic=*/true);
+ MsgSendSuperStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent,
+ msgSendType, nullptr,
+ SC_Extern);
+}
+
+// SynthMsgSendFpretFunctionDecl - double objc_msgSend_fpret(id self, SEL op, ...);
+void RewriteModernObjC::SynthMsgSendFpretFunctionDecl() {
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend_fpret");
+ SmallVector<QualType, 16> ArgTys;
+ QualType argT = Context->getObjCIdType();
+ assert(!argT.isNull() && "Can't find 'id' type");
+ ArgTys.push_back(argT);
+ argT = Context->getObjCSelType();
+ assert(!argT.isNull() && "Can't find 'SEL' type");
+ ArgTys.push_back(argT);
+ QualType msgSendType = getSimpleFunctionType(Context->DoubleTy,
+ ArgTys, /*isVariadic=*/true);
+ MsgSendFpretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent, msgSendType,
+ nullptr, SC_Extern);
+}
+
+// SynthGetClassFunctionDecl - Class objc_getClass(const char *name);
+void RewriteModernObjC::SynthGetClassFunctionDecl() {
+ IdentifierInfo *getClassIdent = &Context->Idents.get("objc_getClass");
+ SmallVector<QualType, 16> ArgTys;
+ ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst()));
+ QualType getClassType = getSimpleFunctionType(Context->getObjCClassType(),
+ ArgTys);
+ GetClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ getClassIdent, getClassType,
+ nullptr, SC_Extern);
+}
+
+// SynthGetSuperClassFunctionDecl - Class class_getSuperclass(Class cls);
+void RewriteModernObjC::SynthGetSuperClassFunctionDecl() {
+ IdentifierInfo *getSuperClassIdent =
+ &Context->Idents.get("class_getSuperclass");
+ SmallVector<QualType, 16> ArgTys;
+ ArgTys.push_back(Context->getObjCClassType());
+ QualType getClassType = getSimpleFunctionType(Context->getObjCClassType(),
+ ArgTys);
+ GetSuperClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ getSuperClassIdent,
+ getClassType, nullptr,
+ SC_Extern);
+}
+
+// SynthGetMetaClassFunctionDecl - Class objc_getMetaClass(const char *name);
+void RewriteModernObjC::SynthGetMetaClassFunctionDecl() {
+ IdentifierInfo *getClassIdent = &Context->Idents.get("objc_getMetaClass");
+ SmallVector<QualType, 16> ArgTys;
+ ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst()));
+ QualType getClassType = getSimpleFunctionType(Context->getObjCClassType(),
+ ArgTys);
+ GetMetaClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ getClassIdent, getClassType,
+ nullptr, SC_Extern);
+}
+
+Stmt *RewriteModernObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) {
+ assert (Exp != nullptr && "Expected non-null ObjCStringLiteral");
+ QualType strType = getConstantStringStructType();
+
+ std::string S = "__NSConstantStringImpl_";
+
+ std::string tmpName = InFileName;
+ unsigned i;
+ for (i=0; i < tmpName.length(); i++) {
+ char c = tmpName.at(i);
+ // replace any non-alphanumeric characters with '_'.
+ if (!isAlphanumeric(c))
+ tmpName[i] = '_';
+ }
+ S += tmpName;
+ S += "_";
+ S += utostr(NumObjCStringLiterals++);
+
+ Preamble += "static __NSConstantStringImpl " + S;
+ Preamble += " __attribute__ ((section (\"__DATA, __cfstring\"))) = {__CFConstantStringClassReference,";
+ Preamble += "0x000007c8,"; // utf8_str
+ // The pretty printer for StringLiteral handles escape characters properly.
+ std::string prettyBufS;
+ llvm::raw_string_ostream prettyBuf(prettyBufS);
+ Exp->getString()->printPretty(prettyBuf, nullptr, PrintingPolicy(LangOpts));
+ Preamble += prettyBuf.str();
+ Preamble += ",";
+ Preamble += utostr(Exp->getString()->getByteLength()) + "};\n";
+
+ VarDecl *NewVD = VarDecl::Create(*Context, TUDecl, SourceLocation(),
+ SourceLocation(), &Context->Idents.get(S),
+ strType, nullptr, SC_Static);
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(NewVD, false, strType, VK_LValue,
+ SourceLocation());
+ Expr *Unop = new (Context) UnaryOperator(DRE, UO_AddrOf,
+ Context->getPointerType(DRE->getType()),
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
+ // cast to NSConstantString *
+ CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, Exp->getType(),
+ CK_CPointerToObjCPointerCast, Unop);
+ ReplaceStmt(Exp, cast);
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return cast;
+}
+
+Stmt *RewriteModernObjC::RewriteObjCBoolLiteralExpr(ObjCBoolLiteralExpr *Exp) {
+ unsigned IntSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->IntTy));
+
+ Expr *FlagExp = IntegerLiteral::Create(*Context,
+ llvm::APInt(IntSize, Exp->getValue()),
+ Context->IntTy, Exp->getLocation());
+ CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, Context->ObjCBuiltinBoolTy,
+ CK_BitCast, FlagExp);
+ ParenExpr *PE = new (Context) ParenExpr(Exp->getLocation(), Exp->getExprLoc(),
+ cast);
+ ReplaceStmt(Exp, PE);
+ return PE;
+}
+
+Stmt *RewriteModernObjC::RewriteObjCBoxedExpr(ObjCBoxedExpr *Exp) {
+ // synthesize declaration of helper functions needed in this routine.
+ if (!SelGetUidFunctionDecl)
+ SynthSelGetUidFunctionDecl();
+ // use objc_msgSend() for all.
+ if (!MsgSendFunctionDecl)
+ SynthMsgSendFunctionDecl();
+ if (!GetClassFunctionDecl)
+ SynthGetClassFunctionDecl();
+
+ FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl;
+ SourceLocation StartLoc = Exp->getLocStart();
+ SourceLocation EndLoc = Exp->getLocEnd();
+
+ // Synthesize a call to objc_msgSend().
+ SmallVector<Expr*, 4> MsgExprs;
+ SmallVector<Expr*, 4> ClsExprs;
+
+ // Create a call to objc_getClass("<BoxingClass>"). It will be the 1st argument.
+ ObjCMethodDecl *BoxingMethod = Exp->getBoxingMethod();
+ ObjCInterfaceDecl *BoxingClass = BoxingMethod->getClassInterface();
+
+ IdentifierInfo *clsName = BoxingClass->getIdentifier();
+ ClsExprs.push_back(getStringLiteral(clsName->getName()));
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl, ClsExprs,
+ StartLoc, EndLoc);
+ MsgExprs.push_back(Cls);
+
+ // Create a call to sel_registerName("<BoxingMethod>:"), etc.
+ // it will be the 2nd argument.
+ SmallVector<Expr*, 4> SelExprs;
+ SelExprs.push_back(
+ getStringLiteral(BoxingMethod->getSelector().getAsString()));
+ CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
+ SelExprs, StartLoc, EndLoc);
+ MsgExprs.push_back(SelExp);
+
+ // User provided sub-expression is the 3rd, and last, argument.
+ Expr *subExpr = Exp->getSubExpr();
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(subExpr)) {
+ QualType type = ICE->getType();
+ const Expr *SubExpr = ICE->IgnoreParenImpCasts();
+ CastKind CK = CK_BitCast;
+ if (SubExpr->getType()->isIntegralType(*Context) && type->isBooleanType())
+ CK = CK_IntegralToBoolean;
+ subExpr = NoTypeInfoCStyleCastExpr(Context, type, CK, subExpr);
+ }
+ MsgExprs.push_back(subExpr);
+
+ SmallVector<QualType, 4> ArgTypes;
+ ArgTypes.push_back(Context->getObjCClassType());
+ ArgTypes.push_back(Context->getObjCSelType());
+ for (const auto PI : BoxingMethod->parameters())
+ ArgTypes.push_back(PI->getType());
+
+ QualType returnType = Exp->getType();
+ // Get the type, we will need to reference it in a couple spots.
+ QualType msgSendType = MsgSendFlavor->getType();
+
+ // Create a reference to the objc_msgSend() declaration.
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType,
+ VK_LValue, SourceLocation());
+
+ CastExpr *cast = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(Context->VoidTy),
+ CK_BitCast, DRE);
+
+ // Now do the "normal" pointer to function cast.
+ QualType castType =
+ getSimpleFunctionType(returnType, ArgTypes, BoxingMethod->isVariadic());
+ castType = Context->getPointerType(castType);
+ cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
+ cast);
+
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
+
+ const FunctionType *FT = msgSendType->getAs<FunctionType>();
+ CallExpr *CE = new (Context)
+ CallExpr(*Context, PE, MsgExprs, FT->getReturnType(), VK_RValue, EndLoc);
+ ReplaceStmt(Exp, CE);
+ return CE;
+}
+
+Stmt *RewriteModernObjC::RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp) {
+ // synthesize declaration of helper functions needed in this routine.
+ if (!SelGetUidFunctionDecl)
+ SynthSelGetUidFunctionDecl();
+ // use objc_msgSend() for all.
+ if (!MsgSendFunctionDecl)
+ SynthMsgSendFunctionDecl();
+ if (!GetClassFunctionDecl)
+ SynthGetClassFunctionDecl();
+
+ FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl;
+ SourceLocation StartLoc = Exp->getLocStart();
+ SourceLocation EndLoc = Exp->getLocEnd();
+
+ // Build the expression: __NSContainer_literal(int, ...).arr
+ QualType IntQT = Context->IntTy;
+ QualType NSArrayFType =
+ getSimpleFunctionType(Context->VoidTy, IntQT, true);
+ std::string NSArrayFName("__NSContainer_literal");
+ FunctionDecl *NSArrayFD = SynthBlockInitFunctionDecl(NSArrayFName);
+ DeclRefExpr *NSArrayDRE =
+ new (Context) DeclRefExpr(NSArrayFD, false, NSArrayFType, VK_RValue,
+ SourceLocation());
+
+ SmallVector<Expr*, 16> InitExprs;
+ unsigned NumElements = Exp->getNumElements();
+ unsigned UnsignedIntSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->UnsignedIntTy));
+ Expr *count = IntegerLiteral::Create(*Context,
+ llvm::APInt(UnsignedIntSize, NumElements),
+ Context->UnsignedIntTy, SourceLocation());
+ InitExprs.push_back(count);
+ for (unsigned i = 0; i < NumElements; i++)
+ InitExprs.push_back(Exp->getElement(i));
+ Expr *NSArrayCallExpr =
+ new (Context) CallExpr(*Context, NSArrayDRE, InitExprs,
+ NSArrayFType, VK_LValue, SourceLocation());
+
+ FieldDecl *ARRFD = FieldDecl::Create(*Context, nullptr, SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get("arr"),
+ Context->getPointerType(Context->VoidPtrTy),
+ nullptr, /*BitWidth=*/nullptr,
+ /*Mutable=*/true, ICIS_NoInit);
+ MemberExpr *ArrayLiteralME = new (Context)
+ MemberExpr(NSArrayCallExpr, false, SourceLocation(), ARRFD,
+ SourceLocation(), ARRFD->getType(), VK_LValue, OK_Ordinary);
+ QualType ConstIdT = Context->getObjCIdType().withConst();
+ CStyleCastExpr * ArrayLiteralObjects =
+ NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(ConstIdT),
+ CK_BitCast,
+ ArrayLiteralME);
+
+ // Synthesize a call to objc_msgSend().
+ SmallVector<Expr*, 32> MsgExprs;
+ SmallVector<Expr*, 4> ClsExprs;
+ QualType expType = Exp->getType();
+
+ // Create a call to objc_getClass("NSArray"). It will be th 1st argument.
+ ObjCInterfaceDecl *Class =
+ expType->getPointeeType()->getAs<ObjCObjectType>()->getInterface();
+
+ IdentifierInfo *clsName = Class->getIdentifier();
+ ClsExprs.push_back(getStringLiteral(clsName->getName()));
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl, ClsExprs,
+ StartLoc, EndLoc);
+ MsgExprs.push_back(Cls);
+
+ // Create a call to sel_registerName("arrayWithObjects:count:").
+ // it will be the 2nd argument.
+ SmallVector<Expr*, 4> SelExprs;
+ ObjCMethodDecl *ArrayMethod = Exp->getArrayWithObjectsMethod();
+ SelExprs.push_back(
+ getStringLiteral(ArrayMethod->getSelector().getAsString()));
+ CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
+ SelExprs, StartLoc, EndLoc);
+ MsgExprs.push_back(SelExp);
+
+ // (const id [])objects
+ MsgExprs.push_back(ArrayLiteralObjects);
+
+ // (NSUInteger)cnt
+ Expr *cnt = IntegerLiteral::Create(*Context,
+ llvm::APInt(UnsignedIntSize, NumElements),
+ Context->UnsignedIntTy, SourceLocation());
+ MsgExprs.push_back(cnt);
+
+
+ SmallVector<QualType, 4> ArgTypes;
+ ArgTypes.push_back(Context->getObjCClassType());
+ ArgTypes.push_back(Context->getObjCSelType());
+ for (const auto *PI : ArrayMethod->params())
+ ArgTypes.push_back(PI->getType());
+
+ QualType returnType = Exp->getType();
+ // Get the type, we will need to reference it in a couple spots.
+ QualType msgSendType = MsgSendFlavor->getType();
+
+ // Create a reference to the objc_msgSend() declaration.
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType,
+ VK_LValue, SourceLocation());
+
+ CastExpr *cast = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(Context->VoidTy),
+ CK_BitCast, DRE);
+
+ // Now do the "normal" pointer to function cast.
+ QualType castType =
+ getSimpleFunctionType(returnType, ArgTypes, ArrayMethod->isVariadic());
+ castType = Context->getPointerType(castType);
+ cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
+ cast);
+
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
+
+ const FunctionType *FT = msgSendType->getAs<FunctionType>();
+ CallExpr *CE = new (Context)
+ CallExpr(*Context, PE, MsgExprs, FT->getReturnType(), VK_RValue, EndLoc);
+ ReplaceStmt(Exp, CE);
+ return CE;
+}
+
+Stmt *RewriteModernObjC::RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral *Exp) {
+ // synthesize declaration of helper functions needed in this routine.
+ if (!SelGetUidFunctionDecl)
+ SynthSelGetUidFunctionDecl();
+ // use objc_msgSend() for all.
+ if (!MsgSendFunctionDecl)
+ SynthMsgSendFunctionDecl();
+ if (!GetClassFunctionDecl)
+ SynthGetClassFunctionDecl();
+
+ FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl;
+ SourceLocation StartLoc = Exp->getLocStart();
+ SourceLocation EndLoc = Exp->getLocEnd();
+
+ // Build the expression: __NSContainer_literal(int, ...).arr
+ QualType IntQT = Context->IntTy;
+ QualType NSDictFType =
+ getSimpleFunctionType(Context->VoidTy, IntQT, true);
+ std::string NSDictFName("__NSContainer_literal");
+ FunctionDecl *NSDictFD = SynthBlockInitFunctionDecl(NSDictFName);
+ DeclRefExpr *NSDictDRE =
+ new (Context) DeclRefExpr(NSDictFD, false, NSDictFType, VK_RValue,
+ SourceLocation());
+
+ SmallVector<Expr*, 16> KeyExprs;
+ SmallVector<Expr*, 16> ValueExprs;
+
+ unsigned NumElements = Exp->getNumElements();
+ unsigned UnsignedIntSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->UnsignedIntTy));
+ Expr *count = IntegerLiteral::Create(*Context,
+ llvm::APInt(UnsignedIntSize, NumElements),
+ Context->UnsignedIntTy, SourceLocation());
+ KeyExprs.push_back(count);
+ ValueExprs.push_back(count);
+ for (unsigned i = 0; i < NumElements; i++) {
+ ObjCDictionaryElement Element = Exp->getKeyValueElement(i);
+ KeyExprs.push_back(Element.Key);
+ ValueExprs.push_back(Element.Value);
+ }
+
+ // (const id [])objects
+ Expr *NSValueCallExpr =
+ new (Context) CallExpr(*Context, NSDictDRE, ValueExprs,
+ NSDictFType, VK_LValue, SourceLocation());
+
+ FieldDecl *ARRFD = FieldDecl::Create(*Context, nullptr, SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get("arr"),
+ Context->getPointerType(Context->VoidPtrTy),
+ nullptr, /*BitWidth=*/nullptr,
+ /*Mutable=*/true, ICIS_NoInit);
+ MemberExpr *DictLiteralValueME = new (Context)
+ MemberExpr(NSValueCallExpr, false, SourceLocation(), ARRFD,
+ SourceLocation(), ARRFD->getType(), VK_LValue, OK_Ordinary);
+ QualType ConstIdT = Context->getObjCIdType().withConst();
+ CStyleCastExpr * DictValueObjects =
+ NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(ConstIdT),
+ CK_BitCast,
+ DictLiteralValueME);
+ // (const id <NSCopying> [])keys
+ Expr *NSKeyCallExpr =
+ new (Context) CallExpr(*Context, NSDictDRE, KeyExprs,
+ NSDictFType, VK_LValue, SourceLocation());
+
+ MemberExpr *DictLiteralKeyME = new (Context)
+ MemberExpr(NSKeyCallExpr, false, SourceLocation(), ARRFD,
+ SourceLocation(), ARRFD->getType(), VK_LValue, OK_Ordinary);
+
+ CStyleCastExpr * DictKeyObjects =
+ NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(ConstIdT),
+ CK_BitCast,
+ DictLiteralKeyME);
+
+
+
+ // Synthesize a call to objc_msgSend().
+ SmallVector<Expr*, 32> MsgExprs;
+ SmallVector<Expr*, 4> ClsExprs;
+ QualType expType = Exp->getType();
+
+ // Create a call to objc_getClass("NSArray"). It will be th 1st argument.
+ ObjCInterfaceDecl *Class =
+ expType->getPointeeType()->getAs<ObjCObjectType>()->getInterface();
+
+ IdentifierInfo *clsName = Class->getIdentifier();
+ ClsExprs.push_back(getStringLiteral(clsName->getName()));
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl, ClsExprs,
+ StartLoc, EndLoc);
+ MsgExprs.push_back(Cls);
+
+ // Create a call to sel_registerName("arrayWithObjects:count:").
+ // it will be the 2nd argument.
+ SmallVector<Expr*, 4> SelExprs;
+ ObjCMethodDecl *DictMethod = Exp->getDictWithObjectsMethod();
+ SelExprs.push_back(getStringLiteral(DictMethod->getSelector().getAsString()));
+ CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
+ SelExprs, StartLoc, EndLoc);
+ MsgExprs.push_back(SelExp);
+
+ // (const id [])objects
+ MsgExprs.push_back(DictValueObjects);
+
+ // (const id <NSCopying> [])keys
+ MsgExprs.push_back(DictKeyObjects);
+
+ // (NSUInteger)cnt
+ Expr *cnt = IntegerLiteral::Create(*Context,
+ llvm::APInt(UnsignedIntSize, NumElements),
+ Context->UnsignedIntTy, SourceLocation());
+ MsgExprs.push_back(cnt);
+
+
+ SmallVector<QualType, 8> ArgTypes;
+ ArgTypes.push_back(Context->getObjCClassType());
+ ArgTypes.push_back(Context->getObjCSelType());
+ for (const auto *PI : DictMethod->params()) {
+ QualType T = PI->getType();
+ if (const PointerType* PT = T->getAs<PointerType>()) {
+ QualType PointeeTy = PT->getPointeeType();
+ convertToUnqualifiedObjCType(PointeeTy);
+ T = Context->getPointerType(PointeeTy);
+ }
+ ArgTypes.push_back(T);
+ }
+
+ QualType returnType = Exp->getType();
+ // Get the type, we will need to reference it in a couple spots.
+ QualType msgSendType = MsgSendFlavor->getType();
+
+ // Create a reference to the objc_msgSend() declaration.
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType,
+ VK_LValue, SourceLocation());
+
+ CastExpr *cast = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(Context->VoidTy),
+ CK_BitCast, DRE);
+
+ // Now do the "normal" pointer to function cast.
+ QualType castType =
+ getSimpleFunctionType(returnType, ArgTypes, DictMethod->isVariadic());
+ castType = Context->getPointerType(castType);
+ cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
+ cast);
+
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
+
+ const FunctionType *FT = msgSendType->getAs<FunctionType>();
+ CallExpr *CE = new (Context)
+ CallExpr(*Context, PE, MsgExprs, FT->getReturnType(), VK_RValue, EndLoc);
+ ReplaceStmt(Exp, CE);
+ return CE;
+}
+
+// struct __rw_objc_super {
+// struct objc_object *object; struct objc_object *superClass;
+// };
+QualType RewriteModernObjC::getSuperStructType() {
+ if (!SuperStructDecl) {
+ SuperStructDecl = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("__rw_objc_super"));
+ QualType FieldTypes[2];
+
+ // struct objc_object *object;
+ FieldTypes[0] = Context->getObjCIdType();
+ // struct objc_object *superClass;
+ FieldTypes[1] = Context->getObjCIdType();
+
+ // Create fields
+ for (unsigned i = 0; i < 2; ++i) {
+ SuperStructDecl->addDecl(FieldDecl::Create(*Context, SuperStructDecl,
+ SourceLocation(),
+ SourceLocation(), nullptr,
+ FieldTypes[i], nullptr,
+ /*BitWidth=*/nullptr,
+ /*Mutable=*/false,
+ ICIS_NoInit));
+ }
+
+ SuperStructDecl->completeDefinition();
+ }
+ return Context->getTagDeclType(SuperStructDecl);
+}
+
+QualType RewriteModernObjC::getConstantStringStructType() {
+ if (!ConstantStringDecl) {
+ ConstantStringDecl = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("__NSConstantStringImpl"));
+ QualType FieldTypes[4];
+
+ // struct objc_object *receiver;
+ FieldTypes[0] = Context->getObjCIdType();
+ // int flags;
+ FieldTypes[1] = Context->IntTy;
+ // char *str;
+ FieldTypes[2] = Context->getPointerType(Context->CharTy);
+ // long length;
+ FieldTypes[3] = Context->LongTy;
+
+ // Create fields
+ for (unsigned i = 0; i < 4; ++i) {
+ ConstantStringDecl->addDecl(FieldDecl::Create(*Context,
+ ConstantStringDecl,
+ SourceLocation(),
+ SourceLocation(), nullptr,
+ FieldTypes[i], nullptr,
+ /*BitWidth=*/nullptr,
+ /*Mutable=*/true,
+ ICIS_NoInit));
+ }
+
+ ConstantStringDecl->completeDefinition();
+ }
+ return Context->getTagDeclType(ConstantStringDecl);
+}
+
+/// getFunctionSourceLocation - returns start location of a function
+/// definition. Complication arises when function has declared as
+/// extern "C" or extern "C" {...}
+static SourceLocation getFunctionSourceLocation (RewriteModernObjC &R,
+ FunctionDecl *FD) {
+ if (FD->isExternC() && !FD->isMain()) {
+ const DeclContext *DC = FD->getDeclContext();
+ if (const LinkageSpecDecl *LSD = dyn_cast<LinkageSpecDecl>(DC))
+ // if it is extern "C" {...}, return function decl's own location.
+ if (!LSD->getRBraceLoc().isValid())
+ return LSD->getExternLoc();
+ }
+ if (FD->getStorageClass() != SC_None)
+ R.RewriteBlockLiteralFunctionDecl(FD);
+ return FD->getTypeSpecStartLoc();
+}
+
+void RewriteModernObjC::RewriteLineDirective(const Decl *D) {
+
+ SourceLocation Location = D->getLocation();
+
+ if (Location.isFileID() && GenerateLineInfo) {
+ std::string LineString("\n#line ");
+ PresumedLoc PLoc = SM->getPresumedLoc(Location);
+ LineString += utostr(PLoc.getLine());
+ LineString += " \"";
+ LineString += Lexer::Stringify(PLoc.getFilename());
+ if (isa<ObjCMethodDecl>(D))
+ LineString += "\"";
+ else LineString += "\"\n";
+
+ Location = D->getLocStart();
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->isExternC() && !FD->isMain()) {
+ const DeclContext *DC = FD->getDeclContext();
+ if (const LinkageSpecDecl *LSD = dyn_cast<LinkageSpecDecl>(DC))
+ // if it is extern "C" {...}, return function decl's own location.
+ if (!LSD->getRBraceLoc().isValid())
+ Location = LSD->getExternLoc();
+ }
+ }
+ InsertText(Location, LineString);
+ }
+}
+
+/// SynthMsgSendStretCallExpr - This routine translates message expression
+/// into a call to objc_msgSend_stret() entry point. Tricky part is that
+/// nil check on receiver must be performed before calling objc_msgSend_stret.
+/// MsgSendStretFlavor - function declaration objc_msgSend_stret(...)
+/// msgSendType - function type of objc_msgSend_stret(...)
+/// returnType - Result type of the method being synthesized.
+/// ArgTypes - type of the arguments passed to objc_msgSend_stret, starting with receiver type.
+/// MsgExprs - list of argument expressions being passed to objc_msgSend_stret,
+/// starting with receiver.
+/// Method - Method being rewritten.
+Expr *RewriteModernObjC::SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFlavor,
+ QualType returnType,
+ SmallVectorImpl<QualType> &ArgTypes,
+ SmallVectorImpl<Expr*> &MsgExprs,
+ ObjCMethodDecl *Method) {
+ // Now do the "normal" pointer to function cast.
+ QualType castType = getSimpleFunctionType(returnType, ArgTypes,
+ Method ? Method->isVariadic()
+ : false);
+ castType = Context->getPointerType(castType);
+
+ // build type for containing the objc_msgSend_stret object.
+ static unsigned stretCount=0;
+ std::string name = "__Stret"; name += utostr(stretCount);
+ std::string str =
+ "extern \"C\" void * __cdecl memset(void *_Dst, int _Val, size_t _Size);\n";
+ str += "namespace {\n";
+ str += "struct "; str += name;
+ str += " {\n\t";
+ str += name;
+ str += "(id receiver, SEL sel";
+ for (unsigned i = 2; i < ArgTypes.size(); i++) {
+ std::string ArgName = "arg"; ArgName += utostr(i);
+ ArgTypes[i].getAsStringInternal(ArgName, Context->getPrintingPolicy());
+ str += ", "; str += ArgName;
+ }
+ // could be vararg.
+ for (unsigned i = ArgTypes.size(); i < MsgExprs.size(); i++) {
+ std::string ArgName = "arg"; ArgName += utostr(i);
+ MsgExprs[i]->getType().getAsStringInternal(ArgName,
+ Context->getPrintingPolicy());
+ str += ", "; str += ArgName;
+ }
+
+ str += ") {\n";
+ str += "\t unsigned size = sizeof(";
+ str += returnType.getAsString(Context->getPrintingPolicy()); str += ");\n";
+
+ str += "\t if (size == 1 || size == 2 || size == 4 || size == 8)\n";
+
+ str += "\t s = (("; str += castType.getAsString(Context->getPrintingPolicy());
+ str += ")(void *)objc_msgSend)(receiver, sel";
+ for (unsigned i = 2; i < ArgTypes.size(); i++) {
+ str += ", arg"; str += utostr(i);
+ }
+ // could be vararg.
+ for (unsigned i = ArgTypes.size(); i < MsgExprs.size(); i++) {
+ str += ", arg"; str += utostr(i);
+ }
+ str+= ");\n";
+
+ str += "\t else if (receiver == 0)\n";
+ str += "\t memset((void*)&s, 0, sizeof(s));\n";
+ str += "\t else\n";
+
+
+ str += "\t s = (("; str += castType.getAsString(Context->getPrintingPolicy());
+ str += ")(void *)objc_msgSend_stret)(receiver, sel";
+ for (unsigned i = 2; i < ArgTypes.size(); i++) {
+ str += ", arg"; str += utostr(i);
+ }
+ // could be vararg.
+ for (unsigned i = ArgTypes.size(); i < MsgExprs.size(); i++) {
+ str += ", arg"; str += utostr(i);
+ }
+ str += ");\n";
+
+
+ str += "\t}\n";
+ str += "\t"; str += returnType.getAsString(Context->getPrintingPolicy());
+ str += " s;\n";
+ str += "};\n};\n\n";
+ SourceLocation FunLocStart;
+ if (CurFunctionDef)
+ FunLocStart = getFunctionSourceLocation(*this, CurFunctionDef);
+ else {
+ assert(CurMethodDef && "SynthMsgSendStretCallExpr - CurMethodDef is null");
+ FunLocStart = CurMethodDef->getLocStart();
+ }
+
+ InsertText(FunLocStart, str);
+ ++stretCount;
+
+ // AST for __Stretn(receiver, args).s;
+ IdentifierInfo *ID = &Context->Idents.get(name);
+ FunctionDecl *FD = FunctionDecl::Create(*Context, TUDecl, SourceLocation(),
+ SourceLocation(), ID, castType,
+ nullptr, SC_Extern, false, false);
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, false, castType, VK_RValue,
+ SourceLocation());
+ CallExpr *STCE = new (Context) CallExpr(*Context, DRE, MsgExprs,
+ castType, VK_LValue, SourceLocation());
+
+ FieldDecl *FieldD = FieldDecl::Create(*Context, nullptr, SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get("s"),
+ returnType, nullptr,
+ /*BitWidth=*/nullptr,
+ /*Mutable=*/true, ICIS_NoInit);
+ MemberExpr *ME = new (Context)
+ MemberExpr(STCE, false, SourceLocation(), FieldD, SourceLocation(),
+ FieldD->getType(), VK_LValue, OK_Ordinary);
+
+ return ME;
+}
+
+Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (!SelGetUidFunctionDecl)
+ SynthSelGetUidFunctionDecl();
+ if (!MsgSendFunctionDecl)
+ SynthMsgSendFunctionDecl();
+ if (!MsgSendSuperFunctionDecl)
+ SynthMsgSendSuperFunctionDecl();
+ if (!MsgSendStretFunctionDecl)
+ SynthMsgSendStretFunctionDecl();
+ if (!MsgSendSuperStretFunctionDecl)
+ SynthMsgSendSuperStretFunctionDecl();
+ if (!MsgSendFpretFunctionDecl)
+ SynthMsgSendFpretFunctionDecl();
+ if (!GetClassFunctionDecl)
+ SynthGetClassFunctionDecl();
+ if (!GetSuperClassFunctionDecl)
+ SynthGetSuperClassFunctionDecl();
+ if (!GetMetaClassFunctionDecl)
+ SynthGetMetaClassFunctionDecl();
+
+ // default to objc_msgSend().
+ FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl;
+ // May need to use objc_msgSend_stret() as well.
+ FunctionDecl *MsgSendStretFlavor = nullptr;
+ if (ObjCMethodDecl *mDecl = Exp->getMethodDecl()) {
+ QualType resultType = mDecl->getReturnType();
+ if (resultType->isRecordType())
+ MsgSendStretFlavor = MsgSendStretFunctionDecl;
+ else if (resultType->isRealFloatingType())
+ MsgSendFlavor = MsgSendFpretFunctionDecl;
+ }
+
+ // Synthesize a call to objc_msgSend().
+ SmallVector<Expr*, 8> MsgExprs;
+ switch (Exp->getReceiverKind()) {
+ case ObjCMessageExpr::SuperClass: {
+ MsgSendFlavor = MsgSendSuperFunctionDecl;
+ if (MsgSendStretFlavor)
+ MsgSendStretFlavor = MsgSendSuperStretFunctionDecl;
+ assert(MsgSendFlavor && "MsgSendFlavor is NULL!");
+
+ ObjCInterfaceDecl *ClassDecl = CurMethodDef->getClassInterface();
+
+ SmallVector<Expr*, 4> InitExprs;
+
+ // set the receiver to self, the first argument to all methods.
+ InitExprs.push_back(
+ NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
+ CK_BitCast,
+ new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(),
+ false,
+ Context->getObjCIdType(),
+ VK_RValue,
+ SourceLocation()))
+ ); // set the 'receiver'.
+
+ // (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
+ SmallVector<Expr*, 8> ClsExprs;
+ ClsExprs.push_back(getStringLiteral(ClassDecl->getIdentifier()->getName()));
+ // (Class)objc_getClass("CurrentClass")
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetMetaClassFunctionDecl,
+ ClsExprs, StartLoc, EndLoc);
+ ClsExprs.clear();
+ ClsExprs.push_back(Cls);
+ Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl, ClsExprs,
+ StartLoc, EndLoc);
+
+ // (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
+ // To turn off a warning, type-cast to 'id'
+ InitExprs.push_back( // set 'super class', using class_getSuperclass().
+ NoTypeInfoCStyleCastExpr(Context,
+ Context->getObjCIdType(),
+ CK_BitCast, Cls));
+ // struct __rw_objc_super
+ QualType superType = getSuperStructType();
+ Expr *SuperRep;
+
+ if (LangOpts.MicrosoftExt) {
+ SynthSuperConstructorFunctionDecl();
+ // Simulate a constructor call...
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperConstructorFunctionDecl,
+ false, superType, VK_LValue,
+ SourceLocation());
+ SuperRep = new (Context) CallExpr(*Context, DRE, InitExprs,
+ superType, VK_LValue,
+ SourceLocation());
+ // The code for super is a little tricky to prevent collision with
+ // the structure definition in the header. The rewriter has it's own
+ // internal definition (__rw_objc_super) that is uses. This is why
+ // we need the cast below. For example:
+ // (struct __rw_objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER"))
+ //
+ SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
+ Context->getPointerType(SuperRep->getType()),
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
+ SuperRep = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(superType),
+ CK_BitCast, SuperRep);
+ } else {
+ // (struct __rw_objc_super) { <exprs from above> }
+ InitListExpr *ILE =
+ new (Context) InitListExpr(*Context, SourceLocation(), InitExprs,
+ SourceLocation());
+ TypeSourceInfo *superTInfo
+ = Context->getTrivialTypeSourceInfo(superType);
+ SuperRep = new (Context) CompoundLiteralExpr(SourceLocation(), superTInfo,
+ superType, VK_LValue,
+ ILE, false);
+ // struct __rw_objc_super *
+ SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
+ Context->getPointerType(SuperRep->getType()),
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
+ }
+ MsgExprs.push_back(SuperRep);
+ break;
+ }
+
+ case ObjCMessageExpr::Class: {
+ SmallVector<Expr*, 8> ClsExprs;
+ ObjCInterfaceDecl *Class
+ = Exp->getClassReceiver()->getAs<ObjCObjectType>()->getInterface();
+ IdentifierInfo *clsName = Class->getIdentifier();
+ ClsExprs.push_back(getStringLiteral(clsName->getName()));
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl, ClsExprs,
+ StartLoc, EndLoc);
+ CastExpr *ArgExpr = NoTypeInfoCStyleCastExpr(Context,
+ Context->getObjCIdType(),
+ CK_BitCast, Cls);
+ MsgExprs.push_back(ArgExpr);
+ break;
+ }
+
+ case ObjCMessageExpr::SuperInstance:{
+ MsgSendFlavor = MsgSendSuperFunctionDecl;
+ if (MsgSendStretFlavor)
+ MsgSendStretFlavor = MsgSendSuperStretFunctionDecl;
+ assert(MsgSendFlavor && "MsgSendFlavor is NULL!");
+ ObjCInterfaceDecl *ClassDecl = CurMethodDef->getClassInterface();
+ SmallVector<Expr*, 4> InitExprs;
+
+ InitExprs.push_back(
+ NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
+ CK_BitCast,
+ new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(),
+ false,
+ Context->getObjCIdType(),
+ VK_RValue, SourceLocation()))
+ ); // set the 'receiver'.
+
+ // (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
+ SmallVector<Expr*, 8> ClsExprs;
+ ClsExprs.push_back(getStringLiteral(ClassDecl->getIdentifier()->getName()));
+ // (Class)objc_getClass("CurrentClass")
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl, ClsExprs,
+ StartLoc, EndLoc);
+ ClsExprs.clear();
+ ClsExprs.push_back(Cls);
+ Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl, ClsExprs,
+ StartLoc, EndLoc);
+
+ // (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
+ // To turn off a warning, type-cast to 'id'
+ InitExprs.push_back(
+ // set 'super class', using class_getSuperclass().
+ NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
+ CK_BitCast, Cls));
+ // struct __rw_objc_super
+ QualType superType = getSuperStructType();
+ Expr *SuperRep;
+
+ if (LangOpts.MicrosoftExt) {
+ SynthSuperConstructorFunctionDecl();
+ // Simulate a constructor call...
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperConstructorFunctionDecl,
+ false, superType, VK_LValue,
+ SourceLocation());
+ SuperRep = new (Context) CallExpr(*Context, DRE, InitExprs,
+ superType, VK_LValue, SourceLocation());
+ // The code for super is a little tricky to prevent collision with
+ // the structure definition in the header. The rewriter has it's own
+ // internal definition (__rw_objc_super) that is uses. This is why
+ // we need the cast below. For example:
+ // (struct __rw_objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER"))
+ //
+ SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
+ Context->getPointerType(SuperRep->getType()),
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
+ SuperRep = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(superType),
+ CK_BitCast, SuperRep);
+ } else {
+ // (struct __rw_objc_super) { <exprs from above> }
+ InitListExpr *ILE =
+ new (Context) InitListExpr(*Context, SourceLocation(), InitExprs,
+ SourceLocation());
+ TypeSourceInfo *superTInfo
+ = Context->getTrivialTypeSourceInfo(superType);
+ SuperRep = new (Context) CompoundLiteralExpr(SourceLocation(), superTInfo,
+ superType, VK_RValue, ILE,
+ false);
+ }
+ MsgExprs.push_back(SuperRep);
+ break;
+ }
+
+ case ObjCMessageExpr::Instance: {
+ // Remove all type-casts because it may contain objc-style types; e.g.
+ // Foo<Proto> *.
+ Expr *recExpr = Exp->getInstanceReceiver();
+ while (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(recExpr))
+ recExpr = CE->getSubExpr();
+ CastKind CK = recExpr->getType()->isObjCObjectPointerType()
+ ? CK_BitCast : recExpr->getType()->isBlockPointerType()
+ ? CK_BlockPointerToObjCPointerCast
+ : CK_CPointerToObjCPointerCast;
+
+ recExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
+ CK, recExpr);
+ MsgExprs.push_back(recExpr);
+ break;
+ }
+ }
+
+ // Create a call to sel_registerName("selName"), it will be the 2nd argument.
+ SmallVector<Expr*, 8> SelExprs;
+ SelExprs.push_back(getStringLiteral(Exp->getSelector().getAsString()));
+ CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
+ SelExprs, StartLoc, EndLoc);
+ MsgExprs.push_back(SelExp);
+
+ // Now push any user supplied arguments.
+ for (unsigned i = 0; i < Exp->getNumArgs(); i++) {
+ Expr *userExpr = Exp->getArg(i);
+ // Make all implicit casts explicit...ICE comes in handy:-)
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(userExpr)) {
+ // Reuse the ICE type, it is exactly what the doctor ordered.
+ QualType type = ICE->getType();
+ if (needToScanForQualifiers(type))
+ type = Context->getObjCIdType();
+ // Make sure we convert "type (^)(...)" to "type (*)(...)".
+ (void)convertBlockPointerToFunctionPointer(type);
+ const Expr *SubExpr = ICE->IgnoreParenImpCasts();
+ CastKind CK;
+ if (SubExpr->getType()->isIntegralType(*Context) &&
+ type->isBooleanType()) {
+ CK = CK_IntegralToBoolean;
+ } else if (type->isObjCObjectPointerType()) {
+ if (SubExpr->getType()->isBlockPointerType()) {
+ CK = CK_BlockPointerToObjCPointerCast;
+ } else if (SubExpr->getType()->isPointerType()) {
+ CK = CK_CPointerToObjCPointerCast;
+ } else {
+ CK = CK_BitCast;
+ }
+ } else {
+ CK = CK_BitCast;
+ }
+
+ userExpr = NoTypeInfoCStyleCastExpr(Context, type, CK, userExpr);
+ }
+ // Make id<P...> cast into an 'id' cast.
+ else if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(userExpr)) {
+ if (CE->getType()->isObjCQualifiedIdType()) {
+ while ((CE = dyn_cast<CStyleCastExpr>(userExpr)))
+ userExpr = CE->getSubExpr();
+ CastKind CK;
+ if (userExpr->getType()->isIntegralType(*Context)) {
+ CK = CK_IntegralToPointer;
+ } else if (userExpr->getType()->isBlockPointerType()) {
+ CK = CK_BlockPointerToObjCPointerCast;
+ } else if (userExpr->getType()->isPointerType()) {
+ CK = CK_CPointerToObjCPointerCast;
+ } else {
+ CK = CK_BitCast;
+ }
+ userExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
+ CK, userExpr);
+ }
+ }
+ MsgExprs.push_back(userExpr);
+ // We've transferred the ownership to MsgExprs. For now, we *don't* null
+ // out the argument in the original expression (since we aren't deleting
+ // the ObjCMessageExpr). See RewritePropertyOrImplicitSetter() usage for more info.
+ //Exp->setArg(i, 0);
+ }
+ // Generate the funky cast.
+ CastExpr *cast;
+ SmallVector<QualType, 8> ArgTypes;
+ QualType returnType;
+
+ // Push 'id' and 'SEL', the 2 implicit arguments.
+ if (MsgSendFlavor == MsgSendSuperFunctionDecl)
+ ArgTypes.push_back(Context->getPointerType(getSuperStructType()));
+ else
+ ArgTypes.push_back(Context->getObjCIdType());
+ ArgTypes.push_back(Context->getObjCSelType());
+ if (ObjCMethodDecl *OMD = Exp->getMethodDecl()) {
+ // Push any user argument types.
+ for (const auto *PI : OMD->params()) {
+ QualType t = PI->getType()->isObjCQualifiedIdType()
+ ? Context->getObjCIdType()
+ : PI->getType();
+ // Make sure we convert "t (^)(...)" to "t (*)(...)".
+ (void)convertBlockPointerToFunctionPointer(t);
+ ArgTypes.push_back(t);
+ }
+ returnType = Exp->getType();
+ convertToUnqualifiedObjCType(returnType);
+ (void)convertBlockPointerToFunctionPointer(returnType);
+ } else {
+ returnType = Context->getObjCIdType();
+ }
+ // Get the type, we will need to reference it in a couple spots.
+ QualType msgSendType = MsgSendFlavor->getType();
+
+ // Create a reference to the objc_msgSend() declaration.
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType,
+ VK_LValue, SourceLocation());
+
+ // Need to cast objc_msgSend to "void *" (to workaround a GCC bandaid).
+ // If we don't do this cast, we get the following bizarre warning/note:
+ // xx.m:13: warning: function called through a non-compatible type
+ // xx.m:13: note: if this code is reached, the program will abort
+ cast = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(Context->VoidTy),
+ CK_BitCast, DRE);
+
+ // Now do the "normal" pointer to function cast.
+ // If we don't have a method decl, force a variadic cast.
+ const ObjCMethodDecl *MD = Exp->getMethodDecl();
+ QualType castType =
+ getSimpleFunctionType(returnType, ArgTypes, MD ? MD->isVariadic() : true);
+ castType = Context->getPointerType(castType);
+ cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
+ cast);
+
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
+
+ const FunctionType *FT = msgSendType->getAs<FunctionType>();
+ CallExpr *CE = new (Context)
+ CallExpr(*Context, PE, MsgExprs, FT->getReturnType(), VK_RValue, EndLoc);
+ Stmt *ReplacingStmt = CE;
+ if (MsgSendStretFlavor) {
+ // We have the method which returns a struct/union. Must also generate
+ // call to objc_msgSend_stret and hang both varieties on a conditional
+ // expression which dictate which one to envoke depending on size of
+ // method's return type.
+
+ Expr *STCE = SynthMsgSendStretCallExpr(MsgSendStretFlavor,
+ returnType,
+ ArgTypes, MsgExprs,
+ Exp->getMethodDecl());
+ ReplacingStmt = STCE;
+ }
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return ReplacingStmt;
+}
+
+Stmt *RewriteModernObjC::RewriteMessageExpr(ObjCMessageExpr *Exp) {
+ Stmt *ReplacingStmt = SynthMessageExpr(Exp, Exp->getLocStart(),
+ Exp->getLocEnd());
+
+ // Now do the actual rewrite.
+ ReplaceStmt(Exp, ReplacingStmt);
+
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return ReplacingStmt;
+}
+
+// typedef struct objc_object Protocol;
+QualType RewriteModernObjC::getProtocolType() {
+ if (!ProtocolTypeDecl) {
+ TypeSourceInfo *TInfo
+ = Context->getTrivialTypeSourceInfo(Context->getObjCIdType());
+ ProtocolTypeDecl = TypedefDecl::Create(*Context, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("Protocol"),
+ TInfo);
+ }
+ return Context->getTypeDeclType(ProtocolTypeDecl);
+}
+
+/// RewriteObjCProtocolExpr - Rewrite a protocol expression into
+/// a synthesized/forward data reference (to the protocol's metadata).
+/// The forward references (and metadata) are generated in
+/// RewriteModernObjC::HandleTranslationUnit().
+Stmt *RewriteModernObjC::RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp) {
+ std::string Name = "_OBJC_PROTOCOL_REFERENCE_$_" +
+ Exp->getProtocol()->getNameAsString();
+ IdentifierInfo *ID = &Context->Idents.get(Name);
+ VarDecl *VD = VarDecl::Create(*Context, TUDecl, SourceLocation(),
+ SourceLocation(), ID, getProtocolType(),
+ nullptr, SC_Extern);
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(VD, false, getProtocolType(),
+ VK_LValue, SourceLocation());
+ CastExpr *castExpr =
+ NoTypeInfoCStyleCastExpr(
+ Context, Context->getPointerType(DRE->getType()), CK_BitCast, DRE);
+ ReplaceStmt(Exp, castExpr);
+ ProtocolExprDecls.insert(Exp->getProtocol()->getCanonicalDecl());
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return castExpr;
+
+}
+
+bool RewriteModernObjC::BufferContainsPPDirectives(const char *startBuf,
+ const char *endBuf) {
+ while (startBuf < endBuf) {
+ if (*startBuf == '#') {
+ // Skip whitespace.
+ for (++startBuf; startBuf[0] == ' ' || startBuf[0] == '\t'; ++startBuf)
+ ;
+ if (!strncmp(startBuf, "if", strlen("if")) ||
+ !strncmp(startBuf, "ifdef", strlen("ifdef")) ||
+ !strncmp(startBuf, "ifndef", strlen("ifndef")) ||
+ !strncmp(startBuf, "define", strlen("define")) ||
+ !strncmp(startBuf, "undef", strlen("undef")) ||
+ !strncmp(startBuf, "else", strlen("else")) ||
+ !strncmp(startBuf, "elif", strlen("elif")) ||
+ !strncmp(startBuf, "endif", strlen("endif")) ||
+ !strncmp(startBuf, "pragma", strlen("pragma")) ||
+ !strncmp(startBuf, "include", strlen("include")) ||
+ !strncmp(startBuf, "import", strlen("import")) ||
+ !strncmp(startBuf, "include_next", strlen("include_next")))
+ return true;
+ }
+ startBuf++;
+ }
+ return false;
+}
+
+/// IsTagDefinedInsideClass - This routine checks that a named tagged type
+/// is defined inside an objective-c class. If so, it returns true.
+bool RewriteModernObjC::IsTagDefinedInsideClass(ObjCContainerDecl *IDecl,
+ TagDecl *Tag,
+ bool &IsNamedDefinition) {
+ if (!IDecl)
+ return false;
+ SourceLocation TagLocation;
+ if (RecordDecl *RD = dyn_cast<RecordDecl>(Tag)) {
+ RD = RD->getDefinition();
+ if (!RD || !RD->getDeclName().getAsIdentifierInfo())
+ return false;
+ IsNamedDefinition = true;
+ TagLocation = RD->getLocation();
+ return Context->getSourceManager().isBeforeInTranslationUnit(
+ IDecl->getLocation(), TagLocation);
+ }
+ if (EnumDecl *ED = dyn_cast<EnumDecl>(Tag)) {
+ if (!ED || !ED->getDeclName().getAsIdentifierInfo())
+ return false;
+ IsNamedDefinition = true;
+ TagLocation = ED->getLocation();
+ return Context->getSourceManager().isBeforeInTranslationUnit(
+ IDecl->getLocation(), TagLocation);
+
+ }
+ return false;
+}
+
+/// RewriteObjCFieldDeclType - This routine rewrites a type into the buffer.
+/// It handles elaborated types, as well as enum types in the process.
+bool RewriteModernObjC::RewriteObjCFieldDeclType(QualType &Type,
+ std::string &Result) {
+ if (isa<TypedefType>(Type)) {
+ Result += "\t";
+ return false;
+ }
+
+ if (Type->isArrayType()) {
+ QualType ElemTy = Context->getBaseElementType(Type);
+ return RewriteObjCFieldDeclType(ElemTy, Result);
+ }
+ else if (Type->isRecordType()) {
+ RecordDecl *RD = Type->getAs<RecordType>()->getDecl();
+ if (RD->isCompleteDefinition()) {
+ if (RD->isStruct())
+ Result += "\n\tstruct ";
+ else if (RD->isUnion())
+ Result += "\n\tunion ";
+ else
+ assert(false && "class not allowed as an ivar type");
+
+ Result += RD->getName();
+ if (GlobalDefinedTags.count(RD)) {
+ // struct/union is defined globally, use it.
+ Result += " ";
+ return true;
+ }
+ Result += " {\n";
+ for (auto *FD : RD->fields())
+ RewriteObjCFieldDecl(FD, Result);
+ Result += "\t} ";
+ return true;
+ }
+ }
+ else if (Type->isEnumeralType()) {
+ EnumDecl *ED = Type->getAs<EnumType>()->getDecl();
+ if (ED->isCompleteDefinition()) {
+ Result += "\n\tenum ";
+ Result += ED->getName();
+ if (GlobalDefinedTags.count(ED)) {
+ // Enum is globall defined, use it.
+ Result += " ";
+ return true;
+ }
+
+ Result += " {\n";
+ for (const auto *EC : ED->enumerators()) {
+ Result += "\t"; Result += EC->getName(); Result += " = ";
+ llvm::APSInt Val = EC->getInitVal();
+ Result += Val.toString(10);
+ Result += ",\n";
+ }
+ Result += "\t} ";
+ return true;
+ }
+ }
+
+ Result += "\t";
+ convertObjCTypeToCStyleType(Type);
+ return false;
+}
+
+
+/// RewriteObjCFieldDecl - This routine rewrites a field into the buffer.
+/// It handles elaborated types, as well as enum types in the process.
+void RewriteModernObjC::RewriteObjCFieldDecl(FieldDecl *fieldDecl,
+ std::string &Result) {
+ QualType Type = fieldDecl->getType();
+ std::string Name = fieldDecl->getNameAsString();
+
+ bool EleboratedType = RewriteObjCFieldDeclType(Type, Result);
+ if (!EleboratedType)
+ Type.getAsStringInternal(Name, Context->getPrintingPolicy());
+ Result += Name;
+ if (fieldDecl->isBitField()) {
+ Result += " : "; Result += utostr(fieldDecl->getBitWidthValue(*Context));
+ }
+ else if (EleboratedType && Type->isArrayType()) {
+ const ArrayType *AT = Context->getAsArrayType(Type);
+ do {
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) {
+ Result += "[";
+ llvm::APInt Dim = CAT->getSize();
+ Result += utostr(Dim.getZExtValue());
+ Result += "]";
+ }
+ AT = Context->getAsArrayType(AT->getElementType());
+ } while (AT);
+ }
+
+ Result += ";\n";
+}
+
+/// RewriteLocallyDefinedNamedAggregates - This routine rewrites locally defined
+/// named aggregate types into the input buffer.
+void RewriteModernObjC::RewriteLocallyDefinedNamedAggregates(FieldDecl *fieldDecl,
+ std::string &Result) {
+ QualType Type = fieldDecl->getType();
+ if (isa<TypedefType>(Type))
+ return;
+ if (Type->isArrayType())
+ Type = Context->getBaseElementType(Type);
+ ObjCContainerDecl *IDecl =
+ dyn_cast<ObjCContainerDecl>(fieldDecl->getDeclContext());
+
+ TagDecl *TD = nullptr;
+ if (Type->isRecordType()) {
+ TD = Type->getAs<RecordType>()->getDecl();
+ }
+ else if (Type->isEnumeralType()) {
+ TD = Type->getAs<EnumType>()->getDecl();
+ }
+
+ if (TD) {
+ if (GlobalDefinedTags.count(TD))
+ return;
+
+ bool IsNamedDefinition = false;
+ if (IsTagDefinedInsideClass(IDecl, TD, IsNamedDefinition)) {
+ RewriteObjCFieldDeclType(Type, Result);
+ Result += ";";
+ }
+ if (IsNamedDefinition)
+ GlobalDefinedTags.insert(TD);
+ }
+
+}
+
+unsigned RewriteModernObjC::ObjCIvarBitfieldGroupNo(ObjCIvarDecl *IV) {
+ const ObjCInterfaceDecl *CDecl = IV->getContainingInterface();
+ if (ObjCInterefaceHasBitfieldGroups.count(CDecl)) {
+ return IvarGroupNumber[IV];
+ }
+ unsigned GroupNo = 0;
+ SmallVector<const ObjCIvarDecl *, 8> IVars;
+ for (const ObjCIvarDecl *IVD = CDecl->all_declared_ivar_begin();
+ IVD; IVD = IVD->getNextIvar())
+ IVars.push_back(IVD);
+
+ for (unsigned i = 0, e = IVars.size(); i < e; i++)
+ if (IVars[i]->isBitField()) {
+ IvarGroupNumber[IVars[i++]] = ++GroupNo;
+ while (i < e && IVars[i]->isBitField())
+ IvarGroupNumber[IVars[i++]] = GroupNo;
+ if (i < e)
+ --i;
+ }
+
+ ObjCInterefaceHasBitfieldGroups.insert(CDecl);
+ return IvarGroupNumber[IV];
+}
+
+QualType RewriteModernObjC::SynthesizeBitfieldGroupStructType(
+ ObjCIvarDecl *IV,
+ SmallVectorImpl<ObjCIvarDecl *> &IVars) {
+ std::string StructTagName;
+ ObjCIvarBitfieldGroupType(IV, StructTagName);
+ RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct,
+ Context->getTranslationUnitDecl(),
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get(StructTagName));
+ for (unsigned i=0, e = IVars.size(); i < e; i++) {
+ ObjCIvarDecl *Ivar = IVars[i];
+ RD->addDecl(FieldDecl::Create(*Context, RD, SourceLocation(), SourceLocation(),
+ &Context->Idents.get(Ivar->getName()),
+ Ivar->getType(),
+ nullptr, /*Expr *BW */Ivar->getBitWidth(),
+ false, ICIS_NoInit));
+ }
+ RD->completeDefinition();
+ return Context->getTagDeclType(RD);
+}
+
+QualType RewriteModernObjC::GetGroupRecordTypeForObjCIvarBitfield(ObjCIvarDecl *IV) {
+ const ObjCInterfaceDecl *CDecl = IV->getContainingInterface();
+ unsigned GroupNo = ObjCIvarBitfieldGroupNo(IV);
+ std::pair<const ObjCInterfaceDecl*, unsigned> tuple = std::make_pair(CDecl, GroupNo);
+ if (GroupRecordType.count(tuple))
+ return GroupRecordType[tuple];
+
+ SmallVector<ObjCIvarDecl *, 8> IVars;
+ for (const ObjCIvarDecl *IVD = CDecl->all_declared_ivar_begin();
+ IVD; IVD = IVD->getNextIvar()) {
+ if (IVD->isBitField())
+ IVars.push_back(const_cast<ObjCIvarDecl *>(IVD));
+ else {
+ if (!IVars.empty()) {
+ unsigned GroupNo = ObjCIvarBitfieldGroupNo(IVars[0]);
+ // Generate the struct type for this group of bitfield ivars.
+ GroupRecordType[std::make_pair(CDecl, GroupNo)] =
+ SynthesizeBitfieldGroupStructType(IVars[0], IVars);
+ IVars.clear();
+ }
+ }
+ }
+ if (!IVars.empty()) {
+ // Do the last one.
+ unsigned GroupNo = ObjCIvarBitfieldGroupNo(IVars[0]);
+ GroupRecordType[std::make_pair(CDecl, GroupNo)] =
+ SynthesizeBitfieldGroupStructType(IVars[0], IVars);
+ }
+ QualType RetQT = GroupRecordType[tuple];
+ assert(!RetQT.isNull() && "GetGroupRecordTypeForObjCIvarBitfield struct type is NULL");
+
+ return RetQT;
+}
+
+/// ObjCIvarBitfieldGroupDecl - Names field decl. for ivar bitfield group.
+/// Name would be: classname__GRBF_n where n is the group number for this ivar.
+void RewriteModernObjC::ObjCIvarBitfieldGroupDecl(ObjCIvarDecl *IV,
+ std::string &Result) {
+ const ObjCInterfaceDecl *CDecl = IV->getContainingInterface();
+ Result += CDecl->getName();
+ Result += "__GRBF_";
+ unsigned GroupNo = ObjCIvarBitfieldGroupNo(IV);
+ Result += utostr(GroupNo);
+ return;
+}
+
+/// ObjCIvarBitfieldGroupType - Names struct type for ivar bitfield group.
+/// Name of the struct would be: classname__T_n where n is the group number for
+/// this ivar.
+void RewriteModernObjC::ObjCIvarBitfieldGroupType(ObjCIvarDecl *IV,
+ std::string &Result) {
+ const ObjCInterfaceDecl *CDecl = IV->getContainingInterface();
+ Result += CDecl->getName();
+ Result += "__T_";
+ unsigned GroupNo = ObjCIvarBitfieldGroupNo(IV);
+ Result += utostr(GroupNo);
+ return;
+}
+
+/// ObjCIvarBitfieldGroupOffset - Names symbol for ivar bitfield group field offset.
+/// Name would be: OBJC_IVAR_$_classname__GRBF_n where n is the group number for
+/// this ivar.
+void RewriteModernObjC::ObjCIvarBitfieldGroupOffset(ObjCIvarDecl *IV,
+ std::string &Result) {
+ Result += "OBJC_IVAR_$_";
+ ObjCIvarBitfieldGroupDecl(IV, Result);
+}
+
+#define SKIP_BITFIELDS(IX, ENDIX, VEC) { \
+ while ((IX < ENDIX) && VEC[IX]->isBitField()) \
+ ++IX; \
+ if (IX < ENDIX) \
+ --IX; \
+}
+
+/// RewriteObjCInternalStruct - Rewrite one internal struct corresponding to
+/// an objective-c class with ivars.
+void RewriteModernObjC::RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl,
+ std::string &Result) {
+ assert(CDecl && "Class missing in SynthesizeObjCInternalStruct");
+ assert(CDecl->getName() != "" &&
+ "Name missing in SynthesizeObjCInternalStruct");
+ ObjCInterfaceDecl *RCDecl = CDecl->getSuperClass();
+ SmallVector<ObjCIvarDecl *, 8> IVars;
+ for (ObjCIvarDecl *IVD = CDecl->all_declared_ivar_begin();
+ IVD; IVD = IVD->getNextIvar())
+ IVars.push_back(IVD);
+
+ SourceLocation LocStart = CDecl->getLocStart();
+ SourceLocation LocEnd = CDecl->getEndOfDefinitionLoc();
+
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+
+ // If no ivars and no root or if its root, directly or indirectly,
+ // have no ivars (thus not synthesized) then no need to synthesize this class.
+ if ((!CDecl->isThisDeclarationADefinition() || IVars.size() == 0) &&
+ (!RCDecl || !ObjCSynthesizedStructs.count(RCDecl))) {
+ endBuf += Lexer::MeasureTokenLength(LocEnd, *SM, LangOpts);
+ ReplaceText(LocStart, endBuf-startBuf, Result);
+ return;
+ }
+
+ // Insert named struct/union definitions inside class to
+ // outer scope. This follows semantics of locally defined
+ // struct/unions in objective-c classes.
+ for (unsigned i = 0, e = IVars.size(); i < e; i++)
+ RewriteLocallyDefinedNamedAggregates(IVars[i], Result);
+
+ // Insert named structs which are syntheized to group ivar bitfields
+ // to outer scope as well.
+ for (unsigned i = 0, e = IVars.size(); i < e; i++)
+ if (IVars[i]->isBitField()) {
+ ObjCIvarDecl *IV = IVars[i];
+ QualType QT = GetGroupRecordTypeForObjCIvarBitfield(IV);
+ RewriteObjCFieldDeclType(QT, Result);
+ Result += ";";
+ // skip over ivar bitfields in this group.
+ SKIP_BITFIELDS(i , e, IVars);
+ }
+
+ Result += "\nstruct ";
+ Result += CDecl->getNameAsString();
+ Result += "_IMPL {\n";
+
+ if (RCDecl && ObjCSynthesizedStructs.count(RCDecl)) {
+ Result += "\tstruct "; Result += RCDecl->getNameAsString();
+ Result += "_IMPL "; Result += RCDecl->getNameAsString();
+ Result += "_IVARS;\n";
+ }
+
+ for (unsigned i = 0, e = IVars.size(); i < e; i++) {
+ if (IVars[i]->isBitField()) {
+ ObjCIvarDecl *IV = IVars[i];
+ Result += "\tstruct ";
+ ObjCIvarBitfieldGroupType(IV, Result); Result += " ";
+ ObjCIvarBitfieldGroupDecl(IV, Result); Result += ";\n";
+ // skip over ivar bitfields in this group.
+ SKIP_BITFIELDS(i , e, IVars);
+ }
+ else
+ RewriteObjCFieldDecl(IVars[i], Result);
+ }
+
+ Result += "};\n";
+ endBuf += Lexer::MeasureTokenLength(LocEnd, *SM, LangOpts);
+ ReplaceText(LocStart, endBuf-startBuf, Result);
+ // Mark this struct as having been generated.
+ if (!ObjCSynthesizedStructs.insert(CDecl).second)
+ llvm_unreachable("struct already synthesize- RewriteObjCInternalStruct");
+}
+
+/// RewriteIvarOffsetSymbols - Rewrite ivar offset symbols of those ivars which
+/// have been referenced in an ivar access expression.
+void RewriteModernObjC::RewriteIvarOffsetSymbols(ObjCInterfaceDecl *CDecl,
+ std::string &Result) {
+ // write out ivar offset symbols which have been referenced in an ivar
+ // access expression.
+ llvm::SmallPtrSet<ObjCIvarDecl *, 8> Ivars = ReferencedIvars[CDecl];
+ if (Ivars.empty())
+ return;
+
+ llvm::DenseSet<std::pair<const ObjCInterfaceDecl*, unsigned> > GroupSymbolOutput;
+ for (ObjCIvarDecl *IvarDecl : Ivars) {
+ const ObjCInterfaceDecl *IDecl = IvarDecl->getContainingInterface();
+ unsigned GroupNo = 0;
+ if (IvarDecl->isBitField()) {
+ GroupNo = ObjCIvarBitfieldGroupNo(IvarDecl);
+ if (GroupSymbolOutput.count(std::make_pair(IDecl, GroupNo)))
+ continue;
+ }
+ Result += "\n";
+ if (LangOpts.MicrosoftExt)
+ Result += "__declspec(allocate(\".objc_ivar$B\")) ";
+ Result += "extern \"C\" ";
+ if (LangOpts.MicrosoftExt &&
+ IvarDecl->getAccessControl() != ObjCIvarDecl::Private &&
+ IvarDecl->getAccessControl() != ObjCIvarDecl::Package)
+ Result += "__declspec(dllimport) ";
+
+ Result += "unsigned long ";
+ if (IvarDecl->isBitField()) {
+ ObjCIvarBitfieldGroupOffset(IvarDecl, Result);
+ GroupSymbolOutput.insert(std::make_pair(IDecl, GroupNo));
+ }
+ else
+ WriteInternalIvarName(CDecl, IvarDecl, Result);
+ Result += ";";
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Meta Data Emission
+//===----------------------------------------------------------------------===//
+
+
+/// RewriteImplementations - This routine rewrites all method implementations
+/// and emits meta-data.
+
+void RewriteModernObjC::RewriteImplementations() {
+ int ClsDefCount = ClassImplementation.size();
+ int CatDefCount = CategoryImplementation.size();
+
+ // Rewrite implemented methods
+ for (int i = 0; i < ClsDefCount; i++) {
+ ObjCImplementationDecl *OIMP = ClassImplementation[i];
+ ObjCInterfaceDecl *CDecl = OIMP->getClassInterface();
+ if (CDecl->isImplicitInterfaceDecl())
+ assert(false &&
+ "Legacy implicit interface rewriting not supported in moder abi");
+ RewriteImplementationDecl(OIMP);
+ }
+
+ for (int i = 0; i < CatDefCount; i++) {
+ ObjCCategoryImplDecl *CIMP = CategoryImplementation[i];
+ ObjCInterfaceDecl *CDecl = CIMP->getClassInterface();
+ if (CDecl->isImplicitInterfaceDecl())
+ assert(false &&
+ "Legacy implicit interface rewriting not supported in moder abi");
+ RewriteImplementationDecl(CIMP);
+ }
+}
+
+void RewriteModernObjC::RewriteByRefString(std::string &ResultStr,
+ const std::string &Name,
+ ValueDecl *VD, bool def) {
+ assert(BlockByRefDeclNo.count(VD) &&
+ "RewriteByRefString: ByRef decl missing");
+ if (def)
+ ResultStr += "struct ";
+ ResultStr += "__Block_byref_" + Name +
+ "_" + utostr(BlockByRefDeclNo[VD]) ;
+}
+
+static bool HasLocalVariableExternalStorage(ValueDecl *VD) {
+ if (VarDecl *Var = dyn_cast<VarDecl>(VD))
+ return (Var->isFunctionOrMethodVarDecl() && !Var->hasLocalStorage());
+ return false;
+}
+
+std::string RewriteModernObjC::SynthesizeBlockFunc(BlockExpr *CE, int i,
+ StringRef funcName,
+ std::string Tag) {
+ const FunctionType *AFT = CE->getFunctionType();
+ QualType RT = AFT->getReturnType();
+ std::string StructRef = "struct " + Tag;
+ SourceLocation BlockLoc = CE->getExprLoc();
+ std::string S;
+ ConvertSourceLocationToLineDirective(BlockLoc, S);
+
+ S += "static " + RT.getAsString(Context->getPrintingPolicy()) + " __" +
+ funcName.str() + "_block_func_" + utostr(i);
+
+ BlockDecl *BD = CE->getBlockDecl();
+
+ if (isa<FunctionNoProtoType>(AFT)) {
+ // No user-supplied arguments. Still need to pass in a pointer to the
+ // block (to reference imported block decl refs).
+ S += "(" + StructRef + " *__cself)";
+ } else if (BD->param_empty()) {
+ S += "(" + StructRef + " *__cself)";
+ } else {
+ const FunctionProtoType *FT = cast<FunctionProtoType>(AFT);
+ assert(FT && "SynthesizeBlockFunc: No function proto");
+ S += '(';
+ // first add the implicit argument.
+ S += StructRef + " *__cself, ";
+ std::string ParamStr;
+ for (BlockDecl::param_iterator AI = BD->param_begin(),
+ E = BD->param_end(); AI != E; ++AI) {
+ if (AI != BD->param_begin()) S += ", ";
+ ParamStr = (*AI)->getNameAsString();
+ QualType QT = (*AI)->getType();
+ (void)convertBlockPointerToFunctionPointer(QT);
+ QT.getAsStringInternal(ParamStr, Context->getPrintingPolicy());
+ S += ParamStr;
+ }
+ if (FT->isVariadic()) {
+ if (!BD->param_empty()) S += ", ";
+ S += "...";
+ }
+ S += ')';
+ }
+ S += " {\n";
+
+ // Create local declarations to avoid rewriting all closure decl ref exprs.
+ // First, emit a declaration for all "by ref" decls.
+ for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByRefDecls.begin(),
+ E = BlockByRefDecls.end(); I != E; ++I) {
+ S += " ";
+ std::string Name = (*I)->getNameAsString();
+ std::string TypeString;
+ RewriteByRefString(TypeString, Name, (*I));
+ TypeString += " *";
+ Name = TypeString + Name;
+ S += Name + " = __cself->" + (*I)->getNameAsString() + "; // bound by ref\n";
+ }
+ // Next, emit a declaration for all "by copy" declarations.
+ for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByCopyDecls.begin(),
+ E = BlockByCopyDecls.end(); I != E; ++I) {
+ S += " ";
+ // Handle nested closure invocation. For example:
+ //
+ // void (^myImportedClosure)(void);
+ // myImportedClosure = ^(void) { setGlobalInt(x + y); };
+ //
+ // void (^anotherClosure)(void);
+ // anotherClosure = ^(void) {
+ // myImportedClosure(); // import and invoke the closure
+ // };
+ //
+ if (isTopLevelBlockPointerType((*I)->getType())) {
+ RewriteBlockPointerTypeVariable(S, (*I));
+ S += " = (";
+ RewriteBlockPointerType(S, (*I)->getType());
+ S += ")";
+ S += "__cself->" + (*I)->getNameAsString() + "; // bound by copy\n";
+ }
+ else {
+ std::string Name = (*I)->getNameAsString();
+ QualType QT = (*I)->getType();
+ if (HasLocalVariableExternalStorage(*I))
+ QT = Context->getPointerType(QT);
+ QT.getAsStringInternal(Name, Context->getPrintingPolicy());
+ S += Name + " = __cself->" +
+ (*I)->getNameAsString() + "; // bound by copy\n";
+ }
+ }
+ std::string RewrittenStr = RewrittenBlockExprs[CE];
+ const char *cstr = RewrittenStr.c_str();
+ while (*cstr++ != '{') ;
+ S += cstr;
+ S += "\n";
+ return S;
+}
+
+std::string RewriteModernObjC::SynthesizeBlockHelperFuncs(BlockExpr *CE, int i,
+ StringRef funcName,
+ std::string Tag) {
+ std::string StructRef = "struct " + Tag;
+ std::string S = "static void __";
+
+ S += funcName;
+ S += "_block_copy_" + utostr(i);
+ S += "(" + StructRef;
+ S += "*dst, " + StructRef;
+ S += "*src) {";
+ for (ValueDecl *VD : ImportedBlockDecls) {
+ S += "_Block_object_assign((void*)&dst->";
+ S += VD->getNameAsString();
+ S += ", (void*)src->";
+ S += VD->getNameAsString();
+ if (BlockByRefDeclsPtrSet.count(VD))
+ S += ", " + utostr(BLOCK_FIELD_IS_BYREF) + "/*BLOCK_FIELD_IS_BYREF*/);";
+ else if (VD->getType()->isBlockPointerType())
+ S += ", " + utostr(BLOCK_FIELD_IS_BLOCK) + "/*BLOCK_FIELD_IS_BLOCK*/);";
+ else
+ S += ", " + utostr(BLOCK_FIELD_IS_OBJECT) + "/*BLOCK_FIELD_IS_OBJECT*/);";
+ }
+ S += "}\n";
+
+ S += "\nstatic void __";
+ S += funcName;
+ S += "_block_dispose_" + utostr(i);
+ S += "(" + StructRef;
+ S += "*src) {";
+ for (ValueDecl *VD : ImportedBlockDecls) {
+ S += "_Block_object_dispose((void*)src->";
+ S += VD->getNameAsString();
+ if (BlockByRefDeclsPtrSet.count(VD))
+ S += ", " + utostr(BLOCK_FIELD_IS_BYREF) + "/*BLOCK_FIELD_IS_BYREF*/);";
+ else if (VD->getType()->isBlockPointerType())
+ S += ", " + utostr(BLOCK_FIELD_IS_BLOCK) + "/*BLOCK_FIELD_IS_BLOCK*/);";
+ else
+ S += ", " + utostr(BLOCK_FIELD_IS_OBJECT) + "/*BLOCK_FIELD_IS_OBJECT*/);";
+ }
+ S += "}\n";
+ return S;
+}
+
+std::string RewriteModernObjC::SynthesizeBlockImpl(BlockExpr *CE, std::string Tag,
+ std::string Desc) {
+ std::string S = "\nstruct " + Tag;
+ std::string Constructor = " " + Tag;
+
+ S += " {\n struct __block_impl impl;\n";
+ S += " struct " + Desc;
+ S += "* Desc;\n";
+
+ Constructor += "(void *fp, "; // Invoke function pointer.
+ Constructor += "struct " + Desc; // Descriptor pointer.
+ Constructor += " *desc";
+
+ if (BlockDeclRefs.size()) {
+ // Output all "by copy" declarations.
+ for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByCopyDecls.begin(),
+ E = BlockByCopyDecls.end(); I != E; ++I) {
+ S += " ";
+ std::string FieldName = (*I)->getNameAsString();
+ std::string ArgName = "_" + FieldName;
+ // Handle nested closure invocation. For example:
+ //
+ // void (^myImportedBlock)(void);
+ // myImportedBlock = ^(void) { setGlobalInt(x + y); };
+ //
+ // void (^anotherBlock)(void);
+ // anotherBlock = ^(void) {
+ // myImportedBlock(); // import and invoke the closure
+ // };
+ //
+ if (isTopLevelBlockPointerType((*I)->getType())) {
+ S += "struct __block_impl *";
+ Constructor += ", void *" + ArgName;
+ } else {
+ QualType QT = (*I)->getType();
+ if (HasLocalVariableExternalStorage(*I))
+ QT = Context->getPointerType(QT);
+ QT.getAsStringInternal(FieldName, Context->getPrintingPolicy());
+ QT.getAsStringInternal(ArgName, Context->getPrintingPolicy());
+ Constructor += ", " + ArgName;
+ }
+ S += FieldName + ";\n";
+ }
+ // Output all "by ref" declarations.
+ for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByRefDecls.begin(),
+ E = BlockByRefDecls.end(); I != E; ++I) {
+ S += " ";
+ std::string FieldName = (*I)->getNameAsString();
+ std::string ArgName = "_" + FieldName;
+ {
+ std::string TypeString;
+ RewriteByRefString(TypeString, FieldName, (*I));
+ TypeString += " *";
+ FieldName = TypeString + FieldName;
+ ArgName = TypeString + ArgName;
+ Constructor += ", " + ArgName;
+ }
+ S += FieldName + "; // by ref\n";
+ }
+ // Finish writing the constructor.
+ Constructor += ", int flags=0)";
+ // Initialize all "by copy" arguments.
+ bool firsTime = true;
+ for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByCopyDecls.begin(),
+ E = BlockByCopyDecls.end(); I != E; ++I) {
+ std::string Name = (*I)->getNameAsString();
+ if (firsTime) {
+ Constructor += " : ";
+ firsTime = false;
+ }
+ else
+ Constructor += ", ";
+ if (isTopLevelBlockPointerType((*I)->getType()))
+ Constructor += Name + "((struct __block_impl *)_" + Name + ")";
+ else
+ Constructor += Name + "(_" + Name + ")";
+ }
+ // Initialize all "by ref" arguments.
+ for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByRefDecls.begin(),
+ E = BlockByRefDecls.end(); I != E; ++I) {
+ std::string Name = (*I)->getNameAsString();
+ if (firsTime) {
+ Constructor += " : ";
+ firsTime = false;
+ }
+ else
+ Constructor += ", ";
+ Constructor += Name + "(_" + Name + "->__forwarding)";
+ }
+
+ Constructor += " {\n";
+ if (GlobalVarDecl)
+ Constructor += " impl.isa = &_NSConcreteGlobalBlock;\n";
+ else
+ Constructor += " impl.isa = &_NSConcreteStackBlock;\n";
+ Constructor += " impl.Flags = flags;\n impl.FuncPtr = fp;\n";
+
+ Constructor += " Desc = desc;\n";
+ } else {
+ // Finish writing the constructor.
+ Constructor += ", int flags=0) {\n";
+ if (GlobalVarDecl)
+ Constructor += " impl.isa = &_NSConcreteGlobalBlock;\n";
+ else
+ Constructor += " impl.isa = &_NSConcreteStackBlock;\n";
+ Constructor += " impl.Flags = flags;\n impl.FuncPtr = fp;\n";
+ Constructor += " Desc = desc;\n";
+ }
+ Constructor += " ";
+ Constructor += "}\n";
+ S += Constructor;
+ S += "};\n";
+ return S;
+}
+
+std::string RewriteModernObjC::SynthesizeBlockDescriptor(std::string DescTag,
+ std::string ImplTag, int i,
+ StringRef FunName,
+ unsigned hasCopy) {
+ std::string S = "\nstatic struct " + DescTag;
+
+ S += " {\n size_t reserved;\n";
+ S += " size_t Block_size;\n";
+ if (hasCopy) {
+ S += " void (*copy)(struct ";
+ S += ImplTag; S += "*, struct ";
+ S += ImplTag; S += "*);\n";
+
+ S += " void (*dispose)(struct ";
+ S += ImplTag; S += "*);\n";
+ }
+ S += "} ";
+
+ S += DescTag + "_DATA = { 0, sizeof(struct ";
+ S += ImplTag + ")";
+ if (hasCopy) {
+ S += ", __" + FunName.str() + "_block_copy_" + utostr(i);
+ S += ", __" + FunName.str() + "_block_dispose_" + utostr(i);
+ }
+ S += "};\n";
+ return S;
+}
+
+void RewriteModernObjC::SynthesizeBlockLiterals(SourceLocation FunLocStart,
+ StringRef FunName) {
+ bool RewriteSC = (GlobalVarDecl &&
+ !Blocks.empty() &&
+ GlobalVarDecl->getStorageClass() == SC_Static &&
+ GlobalVarDecl->getType().getCVRQualifiers());
+ if (RewriteSC) {
+ std::string SC(" void __");
+ SC += GlobalVarDecl->getNameAsString();
+ SC += "() {}";
+ InsertText(FunLocStart, SC);
+ }
+
+ // Insert closures that were part of the function.
+ for (unsigned i = 0, count=0; i < Blocks.size(); i++) {
+ CollectBlockDeclRefInfo(Blocks[i]);
+ // Need to copy-in the inner copied-in variables not actually used in this
+ // block.
+ for (int j = 0; j < InnerDeclRefsCount[i]; j++) {
+ DeclRefExpr *Exp = InnerDeclRefs[count++];
+ ValueDecl *VD = Exp->getDecl();
+ BlockDeclRefs.push_back(Exp);
+ if (!VD->hasAttr<BlocksAttr>()) {
+ if (!BlockByCopyDeclsPtrSet.count(VD)) {
+ BlockByCopyDeclsPtrSet.insert(VD);
+ BlockByCopyDecls.push_back(VD);
+ }
+ continue;
+ }
+
+ if (!BlockByRefDeclsPtrSet.count(VD)) {
+ BlockByRefDeclsPtrSet.insert(VD);
+ BlockByRefDecls.push_back(VD);
+ }
+
+ // imported objects in the inner blocks not used in the outer
+ // blocks must be copied/disposed in the outer block as well.
+ if (VD->getType()->isObjCObjectPointerType() ||
+ VD->getType()->isBlockPointerType())
+ ImportedBlockDecls.insert(VD);
+ }
+
+ std::string ImplTag = "__" + FunName.str() + "_block_impl_" + utostr(i);
+ std::string DescTag = "__" + FunName.str() + "_block_desc_" + utostr(i);
+
+ std::string CI = SynthesizeBlockImpl(Blocks[i], ImplTag, DescTag);
+
+ InsertText(FunLocStart, CI);
+
+ std::string CF = SynthesizeBlockFunc(Blocks[i], i, FunName, ImplTag);
+
+ InsertText(FunLocStart, CF);
+
+ if (ImportedBlockDecls.size()) {
+ std::string HF = SynthesizeBlockHelperFuncs(Blocks[i], i, FunName, ImplTag);
+ InsertText(FunLocStart, HF);
+ }
+ std::string BD = SynthesizeBlockDescriptor(DescTag, ImplTag, i, FunName,
+ ImportedBlockDecls.size() > 0);
+ InsertText(FunLocStart, BD);
+
+ BlockDeclRefs.clear();
+ BlockByRefDecls.clear();
+ BlockByRefDeclsPtrSet.clear();
+ BlockByCopyDecls.clear();
+ BlockByCopyDeclsPtrSet.clear();
+ ImportedBlockDecls.clear();
+ }
+ if (RewriteSC) {
+ // Must insert any 'const/volatile/static here. Since it has been
+ // removed as result of rewriting of block literals.
+ std::string SC;
+ if (GlobalVarDecl->getStorageClass() == SC_Static)
+ SC = "static ";
+ if (GlobalVarDecl->getType().isConstQualified())
+ SC += "const ";
+ if (GlobalVarDecl->getType().isVolatileQualified())
+ SC += "volatile ";
+ if (GlobalVarDecl->getType().isRestrictQualified())
+ SC += "restrict ";
+ InsertText(FunLocStart, SC);
+ }
+ if (GlobalConstructionExp) {
+ // extra fancy dance for global literal expression.
+
+ // Always the latest block expression on the block stack.
+ std::string Tag = "__";
+ Tag += FunName;
+ Tag += "_block_impl_";
+ Tag += utostr(Blocks.size()-1);
+ std::string globalBuf = "static ";
+ globalBuf += Tag; globalBuf += " ";
+ std::string SStr;
+
+ llvm::raw_string_ostream constructorExprBuf(SStr);
+ GlobalConstructionExp->printPretty(constructorExprBuf, nullptr,
+ PrintingPolicy(LangOpts));
+ globalBuf += constructorExprBuf.str();
+ globalBuf += ";\n";
+ InsertText(FunLocStart, globalBuf);
+ GlobalConstructionExp = nullptr;
+ }
+
+ Blocks.clear();
+ InnerDeclRefsCount.clear();
+ InnerDeclRefs.clear();
+ RewrittenBlockExprs.clear();
+}
+
+void RewriteModernObjC::InsertBlockLiteralsWithinFunction(FunctionDecl *FD) {
+ SourceLocation FunLocStart =
+ (!Blocks.empty()) ? getFunctionSourceLocation(*this, FD)
+ : FD->getTypeSpecStartLoc();
+ StringRef FuncName = FD->getName();
+
+ SynthesizeBlockLiterals(FunLocStart, FuncName);
+}
+
+static void BuildUniqueMethodName(std::string &Name,
+ ObjCMethodDecl *MD) {
+ ObjCInterfaceDecl *IFace = MD->getClassInterface();
+ Name = IFace->getName();
+ Name += "__" + MD->getSelector().getAsString();
+ // Convert colons to underscores.
+ std::string::size_type loc = 0;
+ while ((loc = Name.find(":", loc)) != std::string::npos)
+ Name.replace(loc, 1, "_");
+}
+
+void RewriteModernObjC::InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD) {
+ //fprintf(stderr,"In InsertBlockLiteralsWitinMethod\n");
+ //SourceLocation FunLocStart = MD->getLocStart();
+ SourceLocation FunLocStart = MD->getLocStart();
+ std::string FuncName;
+ BuildUniqueMethodName(FuncName, MD);
+ SynthesizeBlockLiterals(FunLocStart, FuncName);
+}
+
+void RewriteModernObjC::GetBlockDeclRefExprs(Stmt *S) {
+ for (Stmt *SubStmt : S->children())
+ if (SubStmt) {
+ if (BlockExpr *CBE = dyn_cast<BlockExpr>(SubStmt))
+ GetBlockDeclRefExprs(CBE->getBody());
+ else
+ GetBlockDeclRefExprs(SubStmt);
+ }
+ // Handle specific things.
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S))
+ if (DRE->refersToEnclosingVariableOrCapture() ||
+ HasLocalVariableExternalStorage(DRE->getDecl()))
+ // FIXME: Handle enums.
+ BlockDeclRefs.push_back(DRE);
+
+ return;
+}
+
+void RewriteModernObjC::GetInnerBlockDeclRefExprs(Stmt *S,
+ SmallVectorImpl<DeclRefExpr *> &InnerBlockDeclRefs,
+ llvm::SmallPtrSetImpl<const DeclContext *> &InnerContexts) {
+ for (Stmt *SubStmt : S->children())
+ if (SubStmt) {
+ if (BlockExpr *CBE = dyn_cast<BlockExpr>(SubStmt)) {
+ InnerContexts.insert(cast<DeclContext>(CBE->getBlockDecl()));
+ GetInnerBlockDeclRefExprs(CBE->getBody(),
+ InnerBlockDeclRefs,
+ InnerContexts);
+ }
+ else
+ GetInnerBlockDeclRefExprs(SubStmt, InnerBlockDeclRefs, InnerContexts);
+ }
+ // Handle specific things.
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S)) {
+ if (DRE->refersToEnclosingVariableOrCapture() ||
+ HasLocalVariableExternalStorage(DRE->getDecl())) {
+ if (!InnerContexts.count(DRE->getDecl()->getDeclContext()))
+ InnerBlockDeclRefs.push_back(DRE);
+ if (VarDecl *Var = cast<VarDecl>(DRE->getDecl()))
+ if (Var->isFunctionOrMethodVarDecl())
+ ImportedLocalExternalDecls.insert(Var);
+ }
+ }
+
+ return;
+}
+
+/// convertObjCTypeToCStyleType - This routine converts such objc types
+/// as qualified objects, and blocks to their closest c/c++ types that
+/// it can. It returns true if input type was modified.
+bool RewriteModernObjC::convertObjCTypeToCStyleType(QualType &T) {
+ QualType oldT = T;
+ convertBlockPointerToFunctionPointer(T);
+ if (T->isFunctionPointerType()) {
+ QualType PointeeTy;
+ if (const PointerType* PT = T->getAs<PointerType>()) {
+ PointeeTy = PT->getPointeeType();
+ if (const FunctionType *FT = PointeeTy->getAs<FunctionType>()) {
+ T = convertFunctionTypeOfBlocks(FT);
+ T = Context->getPointerType(T);
+ }
+ }
+ }
+
+ convertToUnqualifiedObjCType(T);
+ return T != oldT;
+}
+
+/// convertFunctionTypeOfBlocks - This routine converts a function type
+/// whose result type may be a block pointer or whose argument type(s)
+/// might be block pointers to an equivalent function type replacing
+/// all block pointers to function pointers.
+QualType RewriteModernObjC::convertFunctionTypeOfBlocks(const FunctionType *FT) {
+ const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FT);
+ // FTP will be null for closures that don't take arguments.
+ // Generate a funky cast.
+ SmallVector<QualType, 8> ArgTypes;
+ QualType Res = FT->getReturnType();
+ bool modified = convertObjCTypeToCStyleType(Res);
+
+ if (FTP) {
+ for (auto &I : FTP->param_types()) {
+ QualType t = I;
+ // Make sure we convert "t (^)(...)" to "t (*)(...)".
+ if (convertObjCTypeToCStyleType(t))
+ modified = true;
+ ArgTypes.push_back(t);
+ }
+ }
+ QualType FuncType;
+ if (modified)
+ FuncType = getSimpleFunctionType(Res, ArgTypes);
+ else FuncType = QualType(FT, 0);
+ return FuncType;
+}
+
+Stmt *RewriteModernObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) {
+ // Navigate to relevant type information.
+ const BlockPointerType *CPT = nullptr;
+
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BlockExp)) {
+ CPT = DRE->getType()->getAs<BlockPointerType>();
+ } else if (const MemberExpr *MExpr = dyn_cast<MemberExpr>(BlockExp)) {
+ CPT = MExpr->getType()->getAs<BlockPointerType>();
+ }
+ else if (const ParenExpr *PRE = dyn_cast<ParenExpr>(BlockExp)) {
+ return SynthesizeBlockCall(Exp, PRE->getSubExpr());
+ }
+ else if (const ImplicitCastExpr *IEXPR = dyn_cast<ImplicitCastExpr>(BlockExp))
+ CPT = IEXPR->getType()->getAs<BlockPointerType>();
+ else if (const ConditionalOperator *CEXPR =
+ dyn_cast<ConditionalOperator>(BlockExp)) {
+ Expr *LHSExp = CEXPR->getLHS();
+ Stmt *LHSStmt = SynthesizeBlockCall(Exp, LHSExp);
+ Expr *RHSExp = CEXPR->getRHS();
+ Stmt *RHSStmt = SynthesizeBlockCall(Exp, RHSExp);
+ Expr *CONDExp = CEXPR->getCond();
+ ConditionalOperator *CondExpr =
+ new (Context) ConditionalOperator(CONDExp,
+ SourceLocation(), cast<Expr>(LHSStmt),
+ SourceLocation(), cast<Expr>(RHSStmt),
+ Exp->getType(), VK_RValue, OK_Ordinary);
+ return CondExpr;
+ } else if (const ObjCIvarRefExpr *IRE = dyn_cast<ObjCIvarRefExpr>(BlockExp)) {
+ CPT = IRE->getType()->getAs<BlockPointerType>();
+ } else if (const PseudoObjectExpr *POE
+ = dyn_cast<PseudoObjectExpr>(BlockExp)) {
+ CPT = POE->getType()->castAs<BlockPointerType>();
+ } else {
+ assert(1 && "RewriteBlockClass: Bad type");
+ }
+ assert(CPT && "RewriteBlockClass: Bad type");
+ const FunctionType *FT = CPT->getPointeeType()->getAs<FunctionType>();
+ assert(FT && "RewriteBlockClass: Bad type");
+ const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FT);
+ // FTP will be null for closures that don't take arguments.
+
+ RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("__block_impl"));
+ QualType PtrBlock = Context->getPointerType(Context->getTagDeclType(RD));
+
+ // Generate a funky cast.
+ SmallVector<QualType, 8> ArgTypes;
+
+ // Push the block argument type.
+ ArgTypes.push_back(PtrBlock);
+ if (FTP) {
+ for (auto &I : FTP->param_types()) {
+ QualType t = I;
+ // Make sure we convert "t (^)(...)" to "t (*)(...)".
+ if (!convertBlockPointerToFunctionPointer(t))
+ convertToUnqualifiedObjCType(t);
+ ArgTypes.push_back(t);
+ }
+ }
+ // Now do the pointer to function cast.
+ QualType PtrToFuncCastType = getSimpleFunctionType(Exp->getType(), ArgTypes);
+
+ PtrToFuncCastType = Context->getPointerType(PtrToFuncCastType);
+
+ CastExpr *BlkCast = NoTypeInfoCStyleCastExpr(Context, PtrBlock,
+ CK_BitCast,
+ const_cast<Expr*>(BlockExp));
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
+ BlkCast);
+ //PE->dump();
+
+ FieldDecl *FD = FieldDecl::Create(*Context, nullptr, SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get("FuncPtr"),
+ Context->VoidPtrTy, nullptr,
+ /*BitWidth=*/nullptr, /*Mutable=*/true,
+ ICIS_NoInit);
+ MemberExpr *ME =
+ new (Context) MemberExpr(PE, true, SourceLocation(), FD, SourceLocation(),
+ FD->getType(), VK_LValue, OK_Ordinary);
+
+ CastExpr *FunkCast = NoTypeInfoCStyleCastExpr(Context, PtrToFuncCastType,
+ CK_BitCast, ME);
+ PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), FunkCast);
+
+ SmallVector<Expr*, 8> BlkExprs;
+ // Add the implicit argument.
+ BlkExprs.push_back(BlkCast);
+ // Add the user arguments.
+ for (CallExpr::arg_iterator I = Exp->arg_begin(),
+ E = Exp->arg_end(); I != E; ++I) {
+ BlkExprs.push_back(*I);
+ }
+ CallExpr *CE = new (Context) CallExpr(*Context, PE, BlkExprs,
+ Exp->getType(), VK_RValue,
+ SourceLocation());
+ return CE;
+}
+
+// We need to return the rewritten expression to handle cases where the
+// DeclRefExpr is embedded in another expression being rewritten.
+// For example:
+//
+// int main() {
+// __block Foo *f;
+// __block int i;
+//
+// void (^myblock)() = ^() {
+// [f test]; // f is a DeclRefExpr embedded in a message (which is being rewritten).
+// i = 77;
+// };
+//}
+Stmt *RewriteModernObjC::RewriteBlockDeclRefExpr(DeclRefExpr *DeclRefExp) {
+ // Rewrite the byref variable into BYREFVAR->__forwarding->BYREFVAR
+ // for each DeclRefExp where BYREFVAR is name of the variable.
+ ValueDecl *VD = DeclRefExp->getDecl();
+ bool isArrow = DeclRefExp->refersToEnclosingVariableOrCapture() ||
+ HasLocalVariableExternalStorage(DeclRefExp->getDecl());
+
+ FieldDecl *FD = FieldDecl::Create(*Context, nullptr, SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get("__forwarding"),
+ Context->VoidPtrTy, nullptr,
+ /*BitWidth=*/nullptr, /*Mutable=*/true,
+ ICIS_NoInit);
+ MemberExpr *ME = new (Context)
+ MemberExpr(DeclRefExp, isArrow, SourceLocation(), FD, SourceLocation(),
+ FD->getType(), VK_LValue, OK_Ordinary);
+
+ StringRef Name = VD->getName();
+ FD = FieldDecl::Create(*Context, nullptr, SourceLocation(), SourceLocation(),
+ &Context->Idents.get(Name),
+ Context->VoidPtrTy, nullptr,
+ /*BitWidth=*/nullptr, /*Mutable=*/true,
+ ICIS_NoInit);
+ ME =
+ new (Context) MemberExpr(ME, true, SourceLocation(), FD, SourceLocation(),
+ DeclRefExp->getType(), VK_LValue, OK_Ordinary);
+
+ // Need parens to enforce precedence.
+ ParenExpr *PE = new (Context) ParenExpr(DeclRefExp->getExprLoc(),
+ DeclRefExp->getExprLoc(),
+ ME);
+ ReplaceStmt(DeclRefExp, PE);
+ return PE;
+}
+
+// Rewrites the imported local variable V with external storage
+// (static, extern, etc.) as *V
+//
+Stmt *RewriteModernObjC::RewriteLocalVariableExternalStorage(DeclRefExpr *DRE) {
+ ValueDecl *VD = DRE->getDecl();
+ if (VarDecl *Var = dyn_cast<VarDecl>(VD))
+ if (!ImportedLocalExternalDecls.count(Var))
+ return DRE;
+ Expr *Exp = new (Context) UnaryOperator(DRE, UO_Deref, DRE->getType(),
+ VK_LValue, OK_Ordinary,
+ DRE->getLocation());
+ // Need parens to enforce precedence.
+ ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
+ Exp);
+ ReplaceStmt(DRE, PE);
+ return PE;
+}
+
+void RewriteModernObjC::RewriteCastExpr(CStyleCastExpr *CE) {
+ SourceLocation LocStart = CE->getLParenLoc();
+ SourceLocation LocEnd = CE->getRParenLoc();
+
+ // Need to avoid trying to rewrite synthesized casts.
+ if (LocStart.isInvalid())
+ return;
+ // Need to avoid trying to rewrite casts contained in macros.
+ if (!Rewriter::isRewritable(LocStart) || !Rewriter::isRewritable(LocEnd))
+ return;
+
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+ QualType QT = CE->getType();
+ const Type* TypePtr = QT->getAs<Type>();
+ if (isa<TypeOfExprType>(TypePtr)) {
+ const TypeOfExprType *TypeOfExprTypePtr = cast<TypeOfExprType>(TypePtr);
+ QT = TypeOfExprTypePtr->getUnderlyingExpr()->getType();
+ std::string TypeAsString = "(";
+ RewriteBlockPointerType(TypeAsString, QT);
+ TypeAsString += ")";
+ ReplaceText(LocStart, endBuf-startBuf+1, TypeAsString);
+ return;
+ }
+ // advance the location to startArgList.
+ const char *argPtr = startBuf;
+
+ while (*argPtr++ && (argPtr < endBuf)) {
+ switch (*argPtr) {
+ case '^':
+ // Replace the '^' with '*'.
+ LocStart = LocStart.getLocWithOffset(argPtr-startBuf);
+ ReplaceText(LocStart, 1, "*");
+ break;
+ }
+ }
+ return;
+}
+
+void RewriteModernObjC::RewriteImplicitCastObjCExpr(CastExpr *IC) {
+ CastKind CastKind = IC->getCastKind();
+ if (CastKind != CK_BlockPointerToObjCPointerCast &&
+ CastKind != CK_AnyPointerToBlockPointerCast)
+ return;
+
+ QualType QT = IC->getType();
+ (void)convertBlockPointerToFunctionPointer(QT);
+ std::string TypeString(QT.getAsString(Context->getPrintingPolicy()));
+ std::string Str = "(";
+ Str += TypeString;
+ Str += ")";
+ InsertText(IC->getSubExpr()->getLocStart(), Str);
+
+ return;
+}
+
+void RewriteModernObjC::RewriteBlockPointerFunctionArgs(FunctionDecl *FD) {
+ SourceLocation DeclLoc = FD->getLocation();
+ unsigned parenCount = 0;
+
+ // We have 1 or more arguments that have closure pointers.
+ const char *startBuf = SM->getCharacterData(DeclLoc);
+ const char *startArgList = strchr(startBuf, '(');
+
+ assert((*startArgList == '(') && "Rewriter fuzzy parser confused");
+
+ parenCount++;
+ // advance the location to startArgList.
+ DeclLoc = DeclLoc.getLocWithOffset(startArgList-startBuf);
+ assert((DeclLoc.isValid()) && "Invalid DeclLoc");
+
+ const char *argPtr = startArgList;
+
+ while (*argPtr++ && parenCount) {
+ switch (*argPtr) {
+ case '^':
+ // Replace the '^' with '*'.
+ DeclLoc = DeclLoc.getLocWithOffset(argPtr-startArgList);
+ ReplaceText(DeclLoc, 1, "*");
+ break;
+ case '(':
+ parenCount++;
+ break;
+ case ')':
+ parenCount--;
+ break;
+ }
+ }
+ return;
+}
+
+bool RewriteModernObjC::PointerTypeTakesAnyBlockArguments(QualType QT) {
+ const FunctionProtoType *FTP;
+ const PointerType *PT = QT->getAs<PointerType>();
+ if (PT) {
+ FTP = PT->getPointeeType()->getAs<FunctionProtoType>();
+ } else {
+ const BlockPointerType *BPT = QT->getAs<BlockPointerType>();
+ assert(BPT && "BlockPointerTypeTakeAnyBlockArguments(): not a block pointer type");
+ FTP = BPT->getPointeeType()->getAs<FunctionProtoType>();
+ }
+ if (FTP) {
+ for (const auto &I : FTP->param_types())
+ if (isTopLevelBlockPointerType(I))
+ return true;
+ }
+ return false;
+}
+
+bool RewriteModernObjC::PointerTypeTakesAnyObjCQualifiedType(QualType QT) {
+ const FunctionProtoType *FTP;
+ const PointerType *PT = QT->getAs<PointerType>();
+ if (PT) {
+ FTP = PT->getPointeeType()->getAs<FunctionProtoType>();
+ } else {
+ const BlockPointerType *BPT = QT->getAs<BlockPointerType>();
+ assert(BPT && "BlockPointerTypeTakeAnyBlockArguments(): not a block pointer type");
+ FTP = BPT->getPointeeType()->getAs<FunctionProtoType>();
+ }
+ if (FTP) {
+ for (const auto &I : FTP->param_types()) {
+ if (I->isObjCQualifiedIdType())
+ return true;
+ if (I->isObjCObjectPointerType() &&
+ I->getPointeeType()->isObjCQualifiedInterfaceType())
+ return true;
+ }
+
+ }
+ return false;
+}
+
+void RewriteModernObjC::GetExtentOfArgList(const char *Name, const char *&LParen,
+ const char *&RParen) {
+ const char *argPtr = strchr(Name, '(');
+ assert((*argPtr == '(') && "Rewriter fuzzy parser confused");
+
+ LParen = argPtr; // output the start.
+ argPtr++; // skip past the left paren.
+ unsigned parenCount = 1;
+
+ while (*argPtr && parenCount) {
+ switch (*argPtr) {
+ case '(': parenCount++; break;
+ case ')': parenCount--; break;
+ default: break;
+ }
+ if (parenCount) argPtr++;
+ }
+ assert((*argPtr == ')') && "Rewriter fuzzy parser confused");
+ RParen = argPtr; // output the end
+}
+
+void RewriteModernObjC::RewriteBlockPointerDecl(NamedDecl *ND) {
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
+ RewriteBlockPointerFunctionArgs(FD);
+ return;
+ }
+ // Handle Variables and Typedefs.
+ SourceLocation DeclLoc = ND->getLocation();
+ QualType DeclT;
+ if (VarDecl *VD = dyn_cast<VarDecl>(ND))
+ DeclT = VD->getType();
+ else if (TypedefNameDecl *TDD = dyn_cast<TypedefNameDecl>(ND))
+ DeclT = TDD->getUnderlyingType();
+ else if (FieldDecl *FD = dyn_cast<FieldDecl>(ND))
+ DeclT = FD->getType();
+ else
+ llvm_unreachable("RewriteBlockPointerDecl(): Decl type not yet handled");
+
+ const char *startBuf = SM->getCharacterData(DeclLoc);
+ const char *endBuf = startBuf;
+ // scan backward (from the decl location) for the end of the previous decl.
+ while (*startBuf != '^' && *startBuf != ';' && startBuf != MainFileStart)
+ startBuf--;
+ SourceLocation Start = DeclLoc.getLocWithOffset(startBuf-endBuf);
+ std::string buf;
+ unsigned OrigLength=0;
+ // *startBuf != '^' if we are dealing with a pointer to function that
+ // may take block argument types (which will be handled below).
+ if (*startBuf == '^') {
+ // Replace the '^' with '*', computing a negative offset.
+ buf = '*';
+ startBuf++;
+ OrigLength++;
+ }
+ while (*startBuf != ')') {
+ buf += *startBuf;
+ startBuf++;
+ OrigLength++;
+ }
+ buf += ')';
+ OrigLength++;
+
+ if (PointerTypeTakesAnyBlockArguments(DeclT) ||
+ PointerTypeTakesAnyObjCQualifiedType(DeclT)) {
+ // Replace the '^' with '*' for arguments.
+ // Replace id<P> with id/*<>*/
+ DeclLoc = ND->getLocation();
+ startBuf = SM->getCharacterData(DeclLoc);
+ const char *argListBegin, *argListEnd;
+ GetExtentOfArgList(startBuf, argListBegin, argListEnd);
+ while (argListBegin < argListEnd) {
+ if (*argListBegin == '^')
+ buf += '*';
+ else if (*argListBegin == '<') {
+ buf += "/*";
+ buf += *argListBegin++;
+ OrigLength++;
+ while (*argListBegin != '>') {
+ buf += *argListBegin++;
+ OrigLength++;
+ }
+ buf += *argListBegin;
+ buf += "*/";
+ }
+ else
+ buf += *argListBegin;
+ argListBegin++;
+ OrigLength++;
+ }
+ buf += ')';
+ OrigLength++;
+ }
+ ReplaceText(Start, OrigLength, buf);
+
+ return;
+}
+
+
+/// SynthesizeByrefCopyDestroyHelper - This routine synthesizes:
+/// void __Block_byref_id_object_copy(struct Block_byref_id_object *dst,
+/// struct Block_byref_id_object *src) {
+/// _Block_object_assign (&_dest->object, _src->object,
+/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT
+/// [|BLOCK_FIELD_IS_WEAK]) // object
+/// _Block_object_assign(&_dest->object, _src->object,
+/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK
+/// [|BLOCK_FIELD_IS_WEAK]) // block
+/// }
+/// And:
+/// void __Block_byref_id_object_dispose(struct Block_byref_id_object *_src) {
+/// _Block_object_dispose(_src->object,
+/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT
+/// [|BLOCK_FIELD_IS_WEAK]) // object
+/// _Block_object_dispose(_src->object,
+/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK
+/// [|BLOCK_FIELD_IS_WEAK]) // block
+/// }
+
+std::string RewriteModernObjC::SynthesizeByrefCopyDestroyHelper(VarDecl *VD,
+ int flag) {
+ std::string S;
+ if (CopyDestroyCache.count(flag))
+ return S;
+ CopyDestroyCache.insert(flag);
+ S = "static void __Block_byref_id_object_copy_";
+ S += utostr(flag);
+ S += "(void *dst, void *src) {\n";
+
+ // offset into the object pointer is computed as:
+ // void * + void* + int + int + void* + void *
+ unsigned IntSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->IntTy));
+ unsigned VoidPtrSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->VoidPtrTy));
+
+ unsigned offset = (VoidPtrSize*4 + IntSize + IntSize)/Context->getCharWidth();
+ S += " _Block_object_assign((char*)dst + ";
+ S += utostr(offset);
+ S += ", *(void * *) ((char*)src + ";
+ S += utostr(offset);
+ S += "), ";
+ S += utostr(flag);
+ S += ");\n}\n";
+
+ S += "static void __Block_byref_id_object_dispose_";
+ S += utostr(flag);
+ S += "(void *src) {\n";
+ S += " _Block_object_dispose(*(void * *) ((char*)src + ";
+ S += utostr(offset);
+ S += "), ";
+ S += utostr(flag);
+ S += ");\n}\n";
+ return S;
+}
+
+/// RewriteByRefVar - For each __block typex ND variable this routine transforms
+/// the declaration into:
+/// struct __Block_byref_ND {
+/// void *__isa; // NULL for everything except __weak pointers
+/// struct __Block_byref_ND *__forwarding;
+/// int32_t __flags;
+/// int32_t __size;
+/// void *__Block_byref_id_object_copy; // If variable is __block ObjC object
+/// void *__Block_byref_id_object_dispose; // If variable is __block ObjC object
+/// typex ND;
+/// };
+///
+/// It then replaces declaration of ND variable with:
+/// struct __Block_byref_ND ND = {__isa=0B, __forwarding=&ND, __flags=some_flag,
+/// __size=sizeof(struct __Block_byref_ND),
+/// ND=initializer-if-any};
+///
+///
+void RewriteModernObjC::RewriteByRefVar(VarDecl *ND, bool firstDecl,
+ bool lastDecl) {
+ int flag = 0;
+ int isa = 0;
+ SourceLocation DeclLoc = ND->getTypeSpecStartLoc();
+ if (DeclLoc.isInvalid())
+ // If type location is missing, it is because of missing type (a warning).
+ // Use variable's location which is good for this case.
+ DeclLoc = ND->getLocation();
+ const char *startBuf = SM->getCharacterData(DeclLoc);
+ SourceLocation X = ND->getLocEnd();
+ X = SM->getExpansionLoc(X);
+ const char *endBuf = SM->getCharacterData(X);
+ std::string Name(ND->getNameAsString());
+ std::string ByrefType;
+ RewriteByRefString(ByrefType, Name, ND, true);
+ ByrefType += " {\n";
+ ByrefType += " void *__isa;\n";
+ RewriteByRefString(ByrefType, Name, ND);
+ ByrefType += " *__forwarding;\n";
+ ByrefType += " int __flags;\n";
+ ByrefType += " int __size;\n";
+ // Add void *__Block_byref_id_object_copy;
+ // void *__Block_byref_id_object_dispose; if needed.
+ QualType Ty = ND->getType();
+ bool HasCopyAndDispose = Context->BlockRequiresCopying(Ty, ND);
+ if (HasCopyAndDispose) {
+ ByrefType += " void (*__Block_byref_id_object_copy)(void*, void*);\n";
+ ByrefType += " void (*__Block_byref_id_object_dispose)(void*);\n";
+ }
+
+ QualType T = Ty;
+ (void)convertBlockPointerToFunctionPointer(T);
+ T.getAsStringInternal(Name, Context->getPrintingPolicy());
+
+ ByrefType += " " + Name + ";\n";
+ ByrefType += "};\n";
+ // Insert this type in global scope. It is needed by helper function.
+ SourceLocation FunLocStart;
+ if (CurFunctionDef)
+ FunLocStart = getFunctionSourceLocation(*this, CurFunctionDef);
+ else {
+ assert(CurMethodDef && "RewriteByRefVar - CurMethodDef is null");
+ FunLocStart = CurMethodDef->getLocStart();
+ }
+ InsertText(FunLocStart, ByrefType);
+
+ if (Ty.isObjCGCWeak()) {
+ flag |= BLOCK_FIELD_IS_WEAK;
+ isa = 1;
+ }
+ if (HasCopyAndDispose) {
+ flag = BLOCK_BYREF_CALLER;
+ QualType Ty = ND->getType();
+ // FIXME. Handle __weak variable (BLOCK_FIELD_IS_WEAK) as well.
+ if (Ty->isBlockPointerType())
+ flag |= BLOCK_FIELD_IS_BLOCK;
+ else
+ flag |= BLOCK_FIELD_IS_OBJECT;
+ std::string HF = SynthesizeByrefCopyDestroyHelper(ND, flag);
+ if (!HF.empty())
+ Preamble += HF;
+ }
+
+ // struct __Block_byref_ND ND =
+ // {0, &ND, some_flag, __size=sizeof(struct __Block_byref_ND),
+ // initializer-if-any};
+ bool hasInit = (ND->getInit() != nullptr);
+ // FIXME. rewriter does not support __block c++ objects which
+ // require construction.
+ if (hasInit)
+ if (CXXConstructExpr *CExp = dyn_cast<CXXConstructExpr>(ND->getInit())) {
+ CXXConstructorDecl *CXXDecl = CExp->getConstructor();
+ if (CXXDecl && CXXDecl->isDefaultConstructor())
+ hasInit = false;
+ }
+
+ unsigned flags = 0;
+ if (HasCopyAndDispose)
+ flags |= BLOCK_HAS_COPY_DISPOSE;
+ Name = ND->getNameAsString();
+ ByrefType.clear();
+ RewriteByRefString(ByrefType, Name, ND);
+ std::string ForwardingCastType("(");
+ ForwardingCastType += ByrefType + " *)";
+ ByrefType += " " + Name + " = {(void*)";
+ ByrefType += utostr(isa);
+ ByrefType += "," + ForwardingCastType + "&" + Name + ", ";
+ ByrefType += utostr(flags);
+ ByrefType += ", ";
+ ByrefType += "sizeof(";
+ RewriteByRefString(ByrefType, Name, ND);
+ ByrefType += ")";
+ if (HasCopyAndDispose) {
+ ByrefType += ", __Block_byref_id_object_copy_";
+ ByrefType += utostr(flag);
+ ByrefType += ", __Block_byref_id_object_dispose_";
+ ByrefType += utostr(flag);
+ }
+
+ if (!firstDecl) {
+ // In multiple __block declarations, and for all but 1st declaration,
+ // find location of the separating comma. This would be start location
+ // where new text is to be inserted.
+ DeclLoc = ND->getLocation();
+ const char *startDeclBuf = SM->getCharacterData(DeclLoc);
+ const char *commaBuf = startDeclBuf;
+ while (*commaBuf != ',')
+ commaBuf--;
+ assert((*commaBuf == ',') && "RewriteByRefVar: can't find ','");
+ DeclLoc = DeclLoc.getLocWithOffset(commaBuf - startDeclBuf);
+ startBuf = commaBuf;
+ }
+
+ if (!hasInit) {
+ ByrefType += "};\n";
+ unsigned nameSize = Name.size();
+ // for block or function pointer declaration. Name is aleady
+ // part of the declaration.
+ if (Ty->isBlockPointerType() || Ty->isFunctionPointerType())
+ nameSize = 1;
+ ReplaceText(DeclLoc, endBuf-startBuf+nameSize, ByrefType);
+ }
+ else {
+ ByrefType += ", ";
+ SourceLocation startLoc;
+ Expr *E = ND->getInit();
+ if (const CStyleCastExpr *ECE = dyn_cast<CStyleCastExpr>(E))
+ startLoc = ECE->getLParenLoc();
+ else
+ startLoc = E->getLocStart();
+ startLoc = SM->getExpansionLoc(startLoc);
+ endBuf = SM->getCharacterData(startLoc);
+ ReplaceText(DeclLoc, endBuf-startBuf, ByrefType);
+
+ const char separator = lastDecl ? ';' : ',';
+ const char *startInitializerBuf = SM->getCharacterData(startLoc);
+ const char *separatorBuf = strchr(startInitializerBuf, separator);
+ assert((*separatorBuf == separator) &&
+ "RewriteByRefVar: can't find ';' or ','");
+ SourceLocation separatorLoc =
+ startLoc.getLocWithOffset(separatorBuf-startInitializerBuf);
+
+ InsertText(separatorLoc, lastDecl ? "}" : "};\n");
+ }
+ return;
+}
+
+void RewriteModernObjC::CollectBlockDeclRefInfo(BlockExpr *Exp) {
+ // Add initializers for any closure decl refs.
+ GetBlockDeclRefExprs(Exp->getBody());
+ if (BlockDeclRefs.size()) {
+ // Unique all "by copy" declarations.
+ for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
+ if (!BlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>()) {
+ if (!BlockByCopyDeclsPtrSet.count(BlockDeclRefs[i]->getDecl())) {
+ BlockByCopyDeclsPtrSet.insert(BlockDeclRefs[i]->getDecl());
+ BlockByCopyDecls.push_back(BlockDeclRefs[i]->getDecl());
+ }
+ }
+ // Unique all "by ref" declarations.
+ for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
+ if (BlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>()) {
+ if (!BlockByRefDeclsPtrSet.count(BlockDeclRefs[i]->getDecl())) {
+ BlockByRefDeclsPtrSet.insert(BlockDeclRefs[i]->getDecl());
+ BlockByRefDecls.push_back(BlockDeclRefs[i]->getDecl());
+ }
+ }
+ // Find any imported blocks...they will need special attention.
+ for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
+ if (BlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>() ||
+ BlockDeclRefs[i]->getType()->isObjCObjectPointerType() ||
+ BlockDeclRefs[i]->getType()->isBlockPointerType())
+ ImportedBlockDecls.insert(BlockDeclRefs[i]->getDecl());
+ }
+}
+
+FunctionDecl *RewriteModernObjC::SynthBlockInitFunctionDecl(StringRef name) {
+ IdentifierInfo *ID = &Context->Idents.get(name);
+ QualType FType = Context->getFunctionNoProtoType(Context->VoidPtrTy);
+ return FunctionDecl::Create(*Context, TUDecl, SourceLocation(),
+ SourceLocation(), ID, FType, nullptr, SC_Extern,
+ false, false);
+}
+
+Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
+ const SmallVectorImpl<DeclRefExpr *> &InnerBlockDeclRefs) {
+
+ const BlockDecl *block = Exp->getBlockDecl();
+
+ Blocks.push_back(Exp);
+
+ CollectBlockDeclRefInfo(Exp);
+
+ // Add inner imported variables now used in current block.
+ int countOfInnerDecls = 0;
+ if (!InnerBlockDeclRefs.empty()) {
+ for (unsigned i = 0; i < InnerBlockDeclRefs.size(); i++) {
+ DeclRefExpr *Exp = InnerBlockDeclRefs[i];
+ ValueDecl *VD = Exp->getDecl();
+ if (!VD->hasAttr<BlocksAttr>() && !BlockByCopyDeclsPtrSet.count(VD)) {
+ // We need to save the copied-in variables in nested
+ // blocks because it is needed at the end for some of the API generations.
+ // See SynthesizeBlockLiterals routine.
+ InnerDeclRefs.push_back(Exp); countOfInnerDecls++;
+ BlockDeclRefs.push_back(Exp);
+ BlockByCopyDeclsPtrSet.insert(VD);
+ BlockByCopyDecls.push_back(VD);
+ }
+ if (VD->hasAttr<BlocksAttr>() && !BlockByRefDeclsPtrSet.count(VD)) {
+ InnerDeclRefs.push_back(Exp); countOfInnerDecls++;
+ BlockDeclRefs.push_back(Exp);
+ BlockByRefDeclsPtrSet.insert(VD);
+ BlockByRefDecls.push_back(VD);
+ }
+ }
+ // Find any imported blocks...they will need special attention.
+ for (unsigned i = 0; i < InnerBlockDeclRefs.size(); i++)
+ if (InnerBlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>() ||
+ InnerBlockDeclRefs[i]->getType()->isObjCObjectPointerType() ||
+ InnerBlockDeclRefs[i]->getType()->isBlockPointerType())
+ ImportedBlockDecls.insert(InnerBlockDeclRefs[i]->getDecl());
+ }
+ InnerDeclRefsCount.push_back(countOfInnerDecls);
+
+ std::string FuncName;
+
+ if (CurFunctionDef)
+ FuncName = CurFunctionDef->getNameAsString();
+ else if (CurMethodDef)
+ BuildUniqueMethodName(FuncName, CurMethodDef);
+ else if (GlobalVarDecl)
+ FuncName = std::string(GlobalVarDecl->getNameAsString());
+
+ bool GlobalBlockExpr =
+ block->getDeclContext()->getRedeclContext()->isFileContext();
+
+ if (GlobalBlockExpr && !GlobalVarDecl) {
+ Diags.Report(block->getLocation(), GlobalBlockRewriteFailedDiag);
+ GlobalBlockExpr = false;
+ }
+
+ std::string BlockNumber = utostr(Blocks.size()-1);
+
+ std::string Func = "__" + FuncName + "_block_func_" + BlockNumber;
+
+ // Get a pointer to the function type so we can cast appropriately.
+ QualType BFT = convertFunctionTypeOfBlocks(Exp->getFunctionType());
+ QualType FType = Context->getPointerType(BFT);
+
+ FunctionDecl *FD;
+ Expr *NewRep;
+
+ // Simulate a constructor call...
+ std::string Tag;
+
+ if (GlobalBlockExpr)
+ Tag = "__global_";
+ else
+ Tag = "__";
+ Tag += FuncName + "_block_impl_" + BlockNumber;
+
+ FD = SynthBlockInitFunctionDecl(Tag);
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, false, FType, VK_RValue,
+ SourceLocation());
+
+ SmallVector<Expr*, 4> InitExprs;
+
+ // Initialize the block function.
+ FD = SynthBlockInitFunctionDecl(Func);
+ DeclRefExpr *Arg = new (Context) DeclRefExpr(FD, false, FD->getType(),
+ VK_LValue, SourceLocation());
+ CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy,
+ CK_BitCast, Arg);
+ InitExprs.push_back(castExpr);
+
+ // Initialize the block descriptor.
+ std::string DescData = "__" + FuncName + "_block_desc_" + BlockNumber + "_DATA";
+
+ VarDecl *NewVD = VarDecl::Create(*Context, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get(DescData.c_str()),
+ Context->VoidPtrTy, nullptr,
+ SC_Static);
+ UnaryOperator *DescRefExpr =
+ new (Context) UnaryOperator(new (Context) DeclRefExpr(NewVD, false,
+ Context->VoidPtrTy,
+ VK_LValue,
+ SourceLocation()),
+ UO_AddrOf,
+ Context->getPointerType(Context->VoidPtrTy),
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
+ InitExprs.push_back(DescRefExpr);
+
+ // Add initializers for any closure decl refs.
+ if (BlockDeclRefs.size()) {
+ Expr *Exp;
+ // Output all "by copy" declarations.
+ for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByCopyDecls.begin(),
+ E = BlockByCopyDecls.end(); I != E; ++I) {
+ if (isObjCType((*I)->getType())) {
+ // FIXME: Conform to ABI ([[obj retain] autorelease]).
+ FD = SynthBlockInitFunctionDecl((*I)->getName());
+ Exp = new (Context) DeclRefExpr(FD, false, FD->getType(),
+ VK_LValue, SourceLocation());
+ if (HasLocalVariableExternalStorage(*I)) {
+ QualType QT = (*I)->getType();
+ QT = Context->getPointerType(QT);
+ Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue,
+ OK_Ordinary, SourceLocation());
+ }
+ } else if (isTopLevelBlockPointerType((*I)->getType())) {
+ FD = SynthBlockInitFunctionDecl((*I)->getName());
+ Arg = new (Context) DeclRefExpr(FD, false, FD->getType(),
+ VK_LValue, SourceLocation());
+ Exp = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy,
+ CK_BitCast, Arg);
+ } else {
+ FD = SynthBlockInitFunctionDecl((*I)->getName());
+ Exp = new (Context) DeclRefExpr(FD, false, FD->getType(),
+ VK_LValue, SourceLocation());
+ if (HasLocalVariableExternalStorage(*I)) {
+ QualType QT = (*I)->getType();
+ QT = Context->getPointerType(QT);
+ Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue,
+ OK_Ordinary, SourceLocation());
+ }
+
+ }
+ InitExprs.push_back(Exp);
+ }
+ // Output all "by ref" declarations.
+ for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByRefDecls.begin(),
+ E = BlockByRefDecls.end(); I != E; ++I) {
+ ValueDecl *ND = (*I);
+ std::string Name(ND->getNameAsString());
+ std::string RecName;
+ RewriteByRefString(RecName, Name, ND, true);
+ IdentifierInfo *II = &Context->Idents.get(RecName.c_str()
+ + sizeof("struct"));
+ RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ II);
+ assert(RD && "SynthBlockInitExpr(): Can't find RecordDecl");
+ QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
+
+ FD = SynthBlockInitFunctionDecl((*I)->getName());
+ Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue,
+ SourceLocation());
+ bool isNestedCapturedVar = false;
+ if (block)
+ for (const auto &CI : block->captures()) {
+ const VarDecl *variable = CI.getVariable();
+ if (variable == ND && CI.isNested()) {
+ assert (CI.isByRef() &&
+ "SynthBlockInitExpr - captured block variable is not byref");
+ isNestedCapturedVar = true;
+ break;
+ }
+ }
+ // captured nested byref variable has its address passed. Do not take
+ // its address again.
+ if (!isNestedCapturedVar)
+ Exp = new (Context) UnaryOperator(Exp, UO_AddrOf,
+ Context->getPointerType(Exp->getType()),
+ VK_RValue, OK_Ordinary, SourceLocation());
+ Exp = NoTypeInfoCStyleCastExpr(Context, castT, CK_BitCast, Exp);
+ InitExprs.push_back(Exp);
+ }
+ }
+ if (ImportedBlockDecls.size()) {
+ // generate BLOCK_HAS_COPY_DISPOSE(have helper funcs) | BLOCK_HAS_DESCRIPTOR
+ int flag = (BLOCK_HAS_COPY_DISPOSE | BLOCK_HAS_DESCRIPTOR);
+ unsigned IntSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->IntTy));
+ Expr *FlagExp = IntegerLiteral::Create(*Context, llvm::APInt(IntSize, flag),
+ Context->IntTy, SourceLocation());
+ InitExprs.push_back(FlagExp);
+ }
+ NewRep = new (Context) CallExpr(*Context, DRE, InitExprs,
+ FType, VK_LValue, SourceLocation());
+
+ if (GlobalBlockExpr) {
+ assert (!GlobalConstructionExp &&
+ "SynthBlockInitExpr - GlobalConstructionExp must be null");
+ GlobalConstructionExp = NewRep;
+ NewRep = DRE;
+ }
+
+ NewRep = new (Context) UnaryOperator(NewRep, UO_AddrOf,
+ Context->getPointerType(NewRep->getType()),
+ VK_RValue, OK_Ordinary, SourceLocation());
+ NewRep = NoTypeInfoCStyleCastExpr(Context, FType, CK_BitCast,
+ NewRep);
+ // Put Paren around the call.
+ NewRep = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
+ NewRep);
+
+ BlockDeclRefs.clear();
+ BlockByRefDecls.clear();
+ BlockByRefDeclsPtrSet.clear();
+ BlockByCopyDecls.clear();
+ BlockByCopyDeclsPtrSet.clear();
+ ImportedBlockDecls.clear();
+ return NewRep;
+}
+
+bool RewriteModernObjC::IsDeclStmtInForeachHeader(DeclStmt *DS) {
+ if (const ObjCForCollectionStmt * CS =
+ dyn_cast<ObjCForCollectionStmt>(Stmts.back()))
+ return CS->getElement() == DS;
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Function Body / Expression rewriting
+//===----------------------------------------------------------------------===//
+
+Stmt *RewriteModernObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
+ if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
+ isa<DoStmt>(S) || isa<ForStmt>(S))
+ Stmts.push_back(S);
+ else if (isa<ObjCForCollectionStmt>(S)) {
+ Stmts.push_back(S);
+ ObjCBcLabelNo.push_back(++BcLabelCount);
+ }
+
+ // Pseudo-object operations and ivar references need special
+ // treatment because we're going to recursively rewrite them.
+ if (PseudoObjectExpr *PseudoOp = dyn_cast<PseudoObjectExpr>(S)) {
+ if (isa<BinaryOperator>(PseudoOp->getSyntacticForm())) {
+ return RewritePropertyOrImplicitSetter(PseudoOp);
+ } else {
+ return RewritePropertyOrImplicitGetter(PseudoOp);
+ }
+ } else if (ObjCIvarRefExpr *IvarRefExpr = dyn_cast<ObjCIvarRefExpr>(S)) {
+ return RewriteObjCIvarRefExpr(IvarRefExpr);
+ }
+ else if (isa<OpaqueValueExpr>(S))
+ S = cast<OpaqueValueExpr>(S)->getSourceExpr();
+
+ SourceRange OrigStmtRange = S->getSourceRange();
+
+ // Perform a bottom up rewrite of all children.
+ for (Stmt *&childStmt : S->children())
+ if (childStmt) {
+ Stmt *newStmt = RewriteFunctionBodyOrGlobalInitializer(childStmt);
+ if (newStmt) {
+ childStmt = newStmt;
+ }
+ }
+
+ if (BlockExpr *BE = dyn_cast<BlockExpr>(S)) {
+ SmallVector<DeclRefExpr *, 8> InnerBlockDeclRefs;
+ llvm::SmallPtrSet<const DeclContext *, 8> InnerContexts;
+ InnerContexts.insert(BE->getBlockDecl());
+ ImportedLocalExternalDecls.clear();
+ GetInnerBlockDeclRefExprs(BE->getBody(),
+ InnerBlockDeclRefs, InnerContexts);
+ // Rewrite the block body in place.
+ Stmt *SaveCurrentBody = CurrentBody;
+ CurrentBody = BE->getBody();
+ PropParentMap = nullptr;
+ // block literal on rhs of a property-dot-sytax assignment
+ // must be replaced by its synthesize ast so getRewrittenText
+ // works as expected. In this case, what actually ends up on RHS
+ // is the blockTranscribed which is the helper function for the
+ // block literal; as in: self.c = ^() {[ace ARR];};
+ bool saveDisableReplaceStmt = DisableReplaceStmt;
+ DisableReplaceStmt = false;
+ RewriteFunctionBodyOrGlobalInitializer(BE->getBody());
+ DisableReplaceStmt = saveDisableReplaceStmt;
+ CurrentBody = SaveCurrentBody;
+ PropParentMap = nullptr;
+ ImportedLocalExternalDecls.clear();
+ // Now we snarf the rewritten text and stash it away for later use.
+ std::string Str = Rewrite.getRewrittenText(BE->getSourceRange());
+ RewrittenBlockExprs[BE] = Str;
+
+ Stmt *blockTranscribed = SynthBlockInitExpr(BE, InnerBlockDeclRefs);
+
+ //blockTranscribed->dump();
+ ReplaceStmt(S, blockTranscribed);
+ return blockTranscribed;
+ }
+ // Handle specific things.
+ if (ObjCEncodeExpr *AtEncode = dyn_cast<ObjCEncodeExpr>(S))
+ return RewriteAtEncode(AtEncode);
+
+ if (ObjCSelectorExpr *AtSelector = dyn_cast<ObjCSelectorExpr>(S))
+ return RewriteAtSelector(AtSelector);
+
+ if (ObjCStringLiteral *AtString = dyn_cast<ObjCStringLiteral>(S))
+ return RewriteObjCStringLiteral(AtString);
+
+ if (ObjCBoolLiteralExpr *BoolLitExpr = dyn_cast<ObjCBoolLiteralExpr>(S))
+ return RewriteObjCBoolLiteralExpr(BoolLitExpr);
+
+ if (ObjCBoxedExpr *BoxedExpr = dyn_cast<ObjCBoxedExpr>(S))
+ return RewriteObjCBoxedExpr(BoxedExpr);
+
+ if (ObjCArrayLiteral *ArrayLitExpr = dyn_cast<ObjCArrayLiteral>(S))
+ return RewriteObjCArrayLiteralExpr(ArrayLitExpr);
+
+ if (ObjCDictionaryLiteral *DictionaryLitExpr =
+ dyn_cast<ObjCDictionaryLiteral>(S))
+ return RewriteObjCDictionaryLiteralExpr(DictionaryLitExpr);
+
+ if (ObjCMessageExpr *MessExpr = dyn_cast<ObjCMessageExpr>(S)) {
+#if 0
+ // Before we rewrite it, put the original message expression in a comment.
+ SourceLocation startLoc = MessExpr->getLocStart();
+ SourceLocation endLoc = MessExpr->getLocEnd();
+
+ const char *startBuf = SM->getCharacterData(startLoc);
+ const char *endBuf = SM->getCharacterData(endLoc);
+
+ std::string messString;
+ messString += "// ";
+ messString.append(startBuf, endBuf-startBuf+1);
+ messString += "\n";
+
+ // FIXME: Missing definition of
+ // InsertText(clang::SourceLocation, char const*, unsigned int).
+ // InsertText(startLoc, messString);
+ // Tried this, but it didn't work either...
+ // ReplaceText(startLoc, 0, messString.c_str(), messString.size());
+#endif
+ return RewriteMessageExpr(MessExpr);
+ }
+
+ if (ObjCAutoreleasePoolStmt *StmtAutoRelease =
+ dyn_cast<ObjCAutoreleasePoolStmt>(S)) {
+ return RewriteObjCAutoreleasePoolStmt(StmtAutoRelease);
+ }
+
+ if (ObjCAtTryStmt *StmtTry = dyn_cast<ObjCAtTryStmt>(S))
+ return RewriteObjCTryStmt(StmtTry);
+
+ if (ObjCAtSynchronizedStmt *StmtTry = dyn_cast<ObjCAtSynchronizedStmt>(S))
+ return RewriteObjCSynchronizedStmt(StmtTry);
+
+ if (ObjCAtThrowStmt *StmtThrow = dyn_cast<ObjCAtThrowStmt>(S))
+ return RewriteObjCThrowStmt(StmtThrow);
+
+ if (ObjCProtocolExpr *ProtocolExp = dyn_cast<ObjCProtocolExpr>(S))
+ return RewriteObjCProtocolExpr(ProtocolExp);
+
+ if (ObjCForCollectionStmt *StmtForCollection =
+ dyn_cast<ObjCForCollectionStmt>(S))
+ return RewriteObjCForCollectionStmt(StmtForCollection,
+ OrigStmtRange.getEnd());
+ if (BreakStmt *StmtBreakStmt =
+ dyn_cast<BreakStmt>(S))
+ return RewriteBreakStmt(StmtBreakStmt);
+ if (ContinueStmt *StmtContinueStmt =
+ dyn_cast<ContinueStmt>(S))
+ return RewriteContinueStmt(StmtContinueStmt);
+
+ // Need to check for protocol refs (id <P>, Foo <P> *) in variable decls
+ // and cast exprs.
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(S)) {
+ // FIXME: What we're doing here is modifying the type-specifier that
+ // precedes the first Decl. In the future the DeclGroup should have
+ // a separate type-specifier that we can rewrite.
+ // NOTE: We need to avoid rewriting the DeclStmt if it is within
+ // the context of an ObjCForCollectionStmt. For example:
+ // NSArray *someArray;
+ // for (id <FooProtocol> index in someArray) ;
+ // This is because RewriteObjCForCollectionStmt() does textual rewriting
+ // and it depends on the original text locations/positions.
+ if (Stmts.empty() || !IsDeclStmtInForeachHeader(DS))
+ RewriteObjCQualifiedInterfaceTypes(*DS->decl_begin());
+
+ // Blocks rewrite rules.
+ for (DeclStmt::decl_iterator DI = DS->decl_begin(), DE = DS->decl_end();
+ DI != DE; ++DI) {
+ Decl *SD = *DI;
+ if (ValueDecl *ND = dyn_cast<ValueDecl>(SD)) {
+ if (isTopLevelBlockPointerType(ND->getType()))
+ RewriteBlockPointerDecl(ND);
+ else if (ND->getType()->isFunctionPointerType())
+ CheckFunctionPointerDecl(ND->getType(), ND);
+ if (VarDecl *VD = dyn_cast<VarDecl>(SD)) {
+ if (VD->hasAttr<BlocksAttr>()) {
+ static unsigned uniqueByrefDeclCount = 0;
+ assert(!BlockByRefDeclNo.count(ND) &&
+ "RewriteFunctionBodyOrGlobalInitializer: Duplicate byref decl");
+ BlockByRefDeclNo[ND] = uniqueByrefDeclCount++;
+ RewriteByRefVar(VD, (DI == DS->decl_begin()), ((DI+1) == DE));
+ }
+ else
+ RewriteTypeOfDecl(VD);
+ }
+ }
+ if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(SD)) {
+ if (isTopLevelBlockPointerType(TD->getUnderlyingType()))
+ RewriteBlockPointerDecl(TD);
+ else if (TD->getUnderlyingType()->isFunctionPointerType())
+ CheckFunctionPointerDecl(TD->getUnderlyingType(), TD);
+ }
+ }
+ }
+
+ if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(S))
+ RewriteObjCQualifiedInterfaceTypes(CE);
+
+ if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
+ isa<DoStmt>(S) || isa<ForStmt>(S)) {
+ assert(!Stmts.empty() && "Statement stack is empty");
+ assert ((isa<SwitchStmt>(Stmts.back()) || isa<WhileStmt>(Stmts.back()) ||
+ isa<DoStmt>(Stmts.back()) || isa<ForStmt>(Stmts.back()))
+ && "Statement stack mismatch");
+ Stmts.pop_back();
+ }
+ // Handle blocks rewriting.
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S)) {
+ ValueDecl *VD = DRE->getDecl();
+ if (VD->hasAttr<BlocksAttr>())
+ return RewriteBlockDeclRefExpr(DRE);
+ if (HasLocalVariableExternalStorage(VD))
+ return RewriteLocalVariableExternalStorage(DRE);
+ }
+
+ if (CallExpr *CE = dyn_cast<CallExpr>(S)) {
+ if (CE->getCallee()->getType()->isBlockPointerType()) {
+ Stmt *BlockCall = SynthesizeBlockCall(CE, CE->getCallee());
+ ReplaceStmt(S, BlockCall);
+ return BlockCall;
+ }
+ }
+ if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(S)) {
+ RewriteCastExpr(CE);
+ }
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(S)) {
+ RewriteImplicitCastObjCExpr(ICE);
+ }
+#if 0
+
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(S)) {
+ CastExpr *Replacement = new (Context) CastExpr(ICE->getType(),
+ ICE->getSubExpr(),
+ SourceLocation());
+ // Get the new text.
+ std::string SStr;
+ llvm::raw_string_ostream Buf(SStr);
+ Replacement->printPretty(Buf);
+ const std::string &Str = Buf.str();
+
+ printf("CAST = %s\n", &Str[0]);
+ InsertText(ICE->getSubExpr()->getLocStart(), Str);
+ delete S;
+ return Replacement;
+ }
+#endif
+ // Return this stmt unmodified.
+ return S;
+}
+
+void RewriteModernObjC::RewriteRecordBody(RecordDecl *RD) {
+ for (auto *FD : RD->fields()) {
+ if (isTopLevelBlockPointerType(FD->getType()))
+ RewriteBlockPointerDecl(FD);
+ if (FD->getType()->isObjCQualifiedIdType() ||
+ FD->getType()->isObjCQualifiedInterfaceType())
+ RewriteObjCQualifiedInterfaceTypes(FD);
+ }
+}
+
+/// HandleDeclInMainFile - This is called for each top-level decl defined in the
+/// main file of the input.
+void RewriteModernObjC::HandleDeclInMainFile(Decl *D) {
+ switch (D->getKind()) {
+ case Decl::Function: {
+ FunctionDecl *FD = cast<FunctionDecl>(D);
+ if (FD->isOverloadedOperator())
+ return;
+
+ // Since function prototypes don't have ParmDecl's, we check the function
+ // prototype. This enables us to rewrite function declarations and
+ // definitions using the same code.
+ RewriteBlocksInFunctionProtoType(FD->getType(), FD);
+
+ if (!FD->isThisDeclarationADefinition())
+ break;
+
+ // FIXME: If this should support Obj-C++, support CXXTryStmt
+ if (CompoundStmt *Body = dyn_cast_or_null<CompoundStmt>(FD->getBody())) {
+ CurFunctionDef = FD;
+ CurrentBody = Body;
+ Body =
+ cast_or_null<CompoundStmt>(RewriteFunctionBodyOrGlobalInitializer(Body));
+ FD->setBody(Body);
+ CurrentBody = nullptr;
+ if (PropParentMap) {
+ delete PropParentMap;
+ PropParentMap = nullptr;
+ }
+ // This synthesizes and inserts the block "impl" struct, invoke function,
+ // and any copy/dispose helper functions.
+ InsertBlockLiteralsWithinFunction(FD);
+ RewriteLineDirective(D);
+ CurFunctionDef = nullptr;
+ }
+ break;
+ }
+ case Decl::ObjCMethod: {
+ ObjCMethodDecl *MD = cast<ObjCMethodDecl>(D);
+ if (CompoundStmt *Body = MD->getCompoundBody()) {
+ CurMethodDef = MD;
+ CurrentBody = Body;
+ Body =
+ cast_or_null<CompoundStmt>(RewriteFunctionBodyOrGlobalInitializer(Body));
+ MD->setBody(Body);
+ CurrentBody = nullptr;
+ if (PropParentMap) {
+ delete PropParentMap;
+ PropParentMap = nullptr;
+ }
+ InsertBlockLiteralsWithinMethod(MD);
+ RewriteLineDirective(D);
+ CurMethodDef = nullptr;
+ }
+ break;
+ }
+ case Decl::ObjCImplementation: {
+ ObjCImplementationDecl *CI = cast<ObjCImplementationDecl>(D);
+ ClassImplementation.push_back(CI);
+ break;
+ }
+ case Decl::ObjCCategoryImpl: {
+ ObjCCategoryImplDecl *CI = cast<ObjCCategoryImplDecl>(D);
+ CategoryImplementation.push_back(CI);
+ break;
+ }
+ case Decl::Var: {
+ VarDecl *VD = cast<VarDecl>(D);
+ RewriteObjCQualifiedInterfaceTypes(VD);
+ if (isTopLevelBlockPointerType(VD->getType()))
+ RewriteBlockPointerDecl(VD);
+ else if (VD->getType()->isFunctionPointerType()) {
+ CheckFunctionPointerDecl(VD->getType(), VD);
+ if (VD->getInit()) {
+ if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(VD->getInit())) {
+ RewriteCastExpr(CE);
+ }
+ }
+ } else if (VD->getType()->isRecordType()) {
+ RecordDecl *RD = VD->getType()->getAs<RecordType>()->getDecl();
+ if (RD->isCompleteDefinition())
+ RewriteRecordBody(RD);
+ }
+ if (VD->getInit()) {
+ GlobalVarDecl = VD;
+ CurrentBody = VD->getInit();
+ RewriteFunctionBodyOrGlobalInitializer(VD->getInit());
+ CurrentBody = nullptr;
+ if (PropParentMap) {
+ delete PropParentMap;
+ PropParentMap = nullptr;
+ }
+ SynthesizeBlockLiterals(VD->getTypeSpecStartLoc(), VD->getName());
+ GlobalVarDecl = nullptr;
+
+ // This is needed for blocks.
+ if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(VD->getInit())) {
+ RewriteCastExpr(CE);
+ }
+ }
+ break;
+ }
+ case Decl::TypeAlias:
+ case Decl::Typedef: {
+ if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
+ if (isTopLevelBlockPointerType(TD->getUnderlyingType()))
+ RewriteBlockPointerDecl(TD);
+ else if (TD->getUnderlyingType()->isFunctionPointerType())
+ CheckFunctionPointerDecl(TD->getUnderlyingType(), TD);
+ else
+ RewriteObjCQualifiedInterfaceTypes(TD);
+ }
+ break;
+ }
+ case Decl::CXXRecord:
+ case Decl::Record: {
+ RecordDecl *RD = cast<RecordDecl>(D);
+ if (RD->isCompleteDefinition())
+ RewriteRecordBody(RD);
+ break;
+ }
+ default:
+ break;
+ }
+ // Nothing yet.
+}
+
+/// Write_ProtocolExprReferencedMetadata - This routine writer out the
+/// protocol reference symbols in the for of:
+/// struct _protocol_t *PROTOCOL_REF = &PROTOCOL_METADATA.
+static void Write_ProtocolExprReferencedMetadata(ASTContext *Context,
+ ObjCProtocolDecl *PDecl,
+ std::string &Result) {
+ // Also output .objc_protorefs$B section and its meta-data.
+ if (Context->getLangOpts().MicrosoftExt)
+ Result += "static ";
+ Result += "struct _protocol_t *";
+ Result += "_OBJC_PROTOCOL_REFERENCE_$_";
+ Result += PDecl->getNameAsString();
+ Result += " = &";
+ Result += "_OBJC_PROTOCOL_"; Result += PDecl->getNameAsString();
+ Result += ";\n";
+}
+
+void RewriteModernObjC::HandleTranslationUnit(ASTContext &C) {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ RewriteInclude();
+
+ for (unsigned i = 0, e = FunctionDefinitionsSeen.size(); i < e; i++) {
+ // translation of function bodies were postponed until all class and
+ // their extensions and implementations are seen. This is because, we
+ // cannot build grouping structs for bitfields until they are all seen.
+ FunctionDecl *FDecl = FunctionDefinitionsSeen[i];
+ HandleTopLevelSingleDecl(FDecl);
+ }
+
+ // Here's a great place to add any extra declarations that may be needed.
+ // Write out meta data for each @protocol(<expr>).
+ for (ObjCProtocolDecl *ProtDecl : ProtocolExprDecls) {
+ RewriteObjCProtocolMetaData(ProtDecl, Preamble);
+ Write_ProtocolExprReferencedMetadata(Context, ProtDecl, Preamble);
+ }
+
+ InsertText(SM->getLocForStartOfFile(MainFileID), Preamble, false);
+
+ if (ClassImplementation.size() || CategoryImplementation.size())
+ RewriteImplementations();
+
+ for (unsigned i = 0, e = ObjCInterfacesSeen.size(); i < e; i++) {
+ ObjCInterfaceDecl *CDecl = ObjCInterfacesSeen[i];
+ // Write struct declaration for the class matching its ivar declarations.
+ // Note that for modern abi, this is postponed until the end of TU
+ // because class extensions and the implementation might declare their own
+ // private ivars.
+ RewriteInterfaceDecl(CDecl);
+ }
+
+ // Get the buffer corresponding to MainFileID. If we haven't changed it, then
+ // we are done.
+ if (const RewriteBuffer *RewriteBuf =
+ Rewrite.getRewriteBufferFor(MainFileID)) {
+ //printf("Changed:\n");
+ *OutFile << std::string(RewriteBuf->begin(), RewriteBuf->end());
+ } else {
+ llvm::errs() << "No changes\n";
+ }
+
+ if (ClassImplementation.size() || CategoryImplementation.size() ||
+ ProtocolExprDecls.size()) {
+ // Rewrite Objective-c meta data*
+ std::string ResultStr;
+ RewriteMetaDataIntoBuffer(ResultStr);
+ // Emit metadata.
+ *OutFile << ResultStr;
+ }
+ // Emit ImageInfo;
+ {
+ std::string ResultStr;
+ WriteImageInfo(ResultStr);
+ *OutFile << ResultStr;
+ }
+ OutFile->flush();
+}
+
+void RewriteModernObjC::Initialize(ASTContext &context) {
+ InitializeCommon(context);
+
+ Preamble += "#ifndef __OBJC2__\n";
+ Preamble += "#define __OBJC2__\n";
+ Preamble += "#endif\n";
+
+ // declaring objc_selector outside the parameter list removes a silly
+ // scope related warning...
+ if (IsHeader)
+ Preamble = "#pragma once\n";
+ Preamble += "struct objc_selector; struct objc_class;\n";
+ Preamble += "struct __rw_objc_super { \n\tstruct objc_object *object; ";
+ Preamble += "\n\tstruct objc_object *superClass; ";
+ // Add a constructor for creating temporary objects.
+ Preamble += "\n\t__rw_objc_super(struct objc_object *o, struct objc_object *s) ";
+ Preamble += ": object(o), superClass(s) {} ";
+ Preamble += "\n};\n";
+
+ if (LangOpts.MicrosoftExt) {
+ // Define all sections using syntax that makes sense.
+ // These are currently generated.
+ Preamble += "\n#pragma section(\".objc_classlist$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".objc_catlist$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".objc_imageinfo$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".objc_nlclslist$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".objc_nlcatlist$B\", long, read, write)\n";
+ // These are generated but not necessary for functionality.
+ Preamble += "#pragma section(\".cat_cls_meth$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".inst_meth$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".cls_meth$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".objc_ivar$B\", long, read, write)\n";
+
+ // These need be generated for performance. Currently they are not,
+ // using API calls instead.
+ Preamble += "#pragma section(\".objc_selrefs$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".objc_classrefs$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".objc_superrefs$B\", long, read, write)\n";
+
+ }
+ Preamble += "#ifndef _REWRITER_typedef_Protocol\n";
+ Preamble += "typedef struct objc_object Protocol;\n";
+ Preamble += "#define _REWRITER_typedef_Protocol\n";
+ Preamble += "#endif\n";
+ if (LangOpts.MicrosoftExt) {
+ Preamble += "#define __OBJC_RW_DLLIMPORT extern \"C\" __declspec(dllimport)\n";
+ Preamble += "#define __OBJC_RW_STATICIMPORT extern \"C\"\n";
+ }
+ else
+ Preamble += "#define __OBJC_RW_DLLIMPORT extern\n";
+
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSend(void);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSendSuper(void);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSend_stret(void);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSendSuper_stret(void);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSend_fpret(void);\n";
+
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_class *objc_getClass";
+ Preamble += "(const char *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_class *class_getSuperclass";
+ Preamble += "(struct objc_class *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_class *objc_getMetaClass";
+ Preamble += "(const char *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_throw( struct objc_object *);\n";
+ // @synchronized hooks.
+ Preamble += "__OBJC_RW_DLLIMPORT int objc_sync_enter( struct objc_object *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT int objc_sync_exit( struct objc_object *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT Protocol *objc_getProtocol(const char *);\n";
+ Preamble += "#ifdef _WIN64\n";
+ Preamble += "typedef unsigned long long _WIN_NSUInteger;\n";
+ Preamble += "#else\n";
+ Preamble += "typedef unsigned int _WIN_NSUInteger;\n";
+ Preamble += "#endif\n";
+ Preamble += "#ifndef __FASTENUMERATIONSTATE\n";
+ Preamble += "struct __objcFastEnumerationState {\n\t";
+ Preamble += "unsigned long state;\n\t";
+ Preamble += "void **itemsPtr;\n\t";
+ Preamble += "unsigned long *mutationsPtr;\n\t";
+ Preamble += "unsigned long extra[5];\n};\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_enumerationMutation(struct objc_object *);\n";
+ Preamble += "#define __FASTENUMERATIONSTATE\n";
+ Preamble += "#endif\n";
+ Preamble += "#ifndef __NSCONSTANTSTRINGIMPL\n";
+ Preamble += "struct __NSConstantStringImpl {\n";
+ Preamble += " int *isa;\n";
+ Preamble += " int flags;\n";
+ Preamble += " char *str;\n";
+ Preamble += "#if _WIN64\n";
+ Preamble += " long long length;\n";
+ Preamble += "#else\n";
+ Preamble += " long length;\n";
+ Preamble += "#endif\n";
+ Preamble += "};\n";
+ Preamble += "#ifdef CF_EXPORT_CONSTANT_STRING\n";
+ Preamble += "extern \"C\" __declspec(dllexport) int __CFConstantStringClassReference[];\n";
+ Preamble += "#else\n";
+ Preamble += "__OBJC_RW_DLLIMPORT int __CFConstantStringClassReference[];\n";
+ Preamble += "#endif\n";
+ Preamble += "#define __NSCONSTANTSTRINGIMPL\n";
+ Preamble += "#endif\n";
+ // Blocks preamble.
+ Preamble += "#ifndef BLOCK_IMPL\n";
+ Preamble += "#define BLOCK_IMPL\n";
+ Preamble += "struct __block_impl {\n";
+ Preamble += " void *isa;\n";
+ Preamble += " int Flags;\n";
+ Preamble += " int Reserved;\n";
+ Preamble += " void *FuncPtr;\n";
+ Preamble += "};\n";
+ Preamble += "// Runtime copy/destroy helper functions (from Block_private.h)\n";
+ Preamble += "#ifdef __OBJC_EXPORT_BLOCKS\n";
+ Preamble += "extern \"C\" __declspec(dllexport) "
+ "void _Block_object_assign(void *, const void *, const int);\n";
+ Preamble += "extern \"C\" __declspec(dllexport) void _Block_object_dispose(const void *, const int);\n";
+ Preamble += "extern \"C\" __declspec(dllexport) void *_NSConcreteGlobalBlock[32];\n";
+ Preamble += "extern \"C\" __declspec(dllexport) void *_NSConcreteStackBlock[32];\n";
+ Preamble += "#else\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void _Block_object_assign(void *, const void *, const int);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void _Block_object_dispose(const void *, const int);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void *_NSConcreteGlobalBlock[32];\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void *_NSConcreteStackBlock[32];\n";
+ Preamble += "#endif\n";
+ Preamble += "#endif\n";
+ if (LangOpts.MicrosoftExt) {
+ Preamble += "#undef __OBJC_RW_DLLIMPORT\n";
+ Preamble += "#undef __OBJC_RW_STATICIMPORT\n";
+ Preamble += "#ifndef KEEP_ATTRIBUTES\n"; // We use this for clang tests.
+ Preamble += "#define __attribute__(X)\n";
+ Preamble += "#endif\n";
+ Preamble += "#ifndef __weak\n";
+ Preamble += "#define __weak\n";
+ Preamble += "#endif\n";
+ Preamble += "#ifndef __block\n";
+ Preamble += "#define __block\n";
+ Preamble += "#endif\n";
+ }
+ else {
+ Preamble += "#define __block\n";
+ Preamble += "#define __weak\n";
+ }
+
+ // Declarations required for modern objective-c array and dictionary literals.
+ Preamble += "\n#include <stdarg.h>\n";
+ Preamble += "struct __NSContainer_literal {\n";
+ Preamble += " void * *arr;\n";
+ Preamble += " __NSContainer_literal (unsigned int count, ...) {\n";
+ Preamble += "\tva_list marker;\n";
+ Preamble += "\tva_start(marker, count);\n";
+ Preamble += "\tarr = new void *[count];\n";
+ Preamble += "\tfor (unsigned i = 0; i < count; i++)\n";
+ Preamble += "\t arr[i] = va_arg(marker, void *);\n";
+ Preamble += "\tva_end( marker );\n";
+ Preamble += " };\n";
+ Preamble += " ~__NSContainer_literal() {\n";
+ Preamble += "\tdelete[] arr;\n";
+ Preamble += " }\n";
+ Preamble += "};\n";
+
+ // Declaration required for implementation of @autoreleasepool statement.
+ Preamble += "extern \"C\" __declspec(dllimport) void * objc_autoreleasePoolPush(void);\n";
+ Preamble += "extern \"C\" __declspec(dllimport) void objc_autoreleasePoolPop(void *);\n\n";
+ Preamble += "struct __AtAutoreleasePool {\n";
+ Preamble += " __AtAutoreleasePool() {atautoreleasepoolobj = objc_autoreleasePoolPush();}\n";
+ Preamble += " ~__AtAutoreleasePool() {objc_autoreleasePoolPop(atautoreleasepoolobj);}\n";
+ Preamble += " void * atautoreleasepoolobj;\n";
+ Preamble += "};\n";
+
+ // NOTE! Windows uses LLP64 for 64bit mode. So, cast pointer to long long
+ // as this avoids warning in any 64bit/32bit compilation model.
+ Preamble += "\n#define __OFFSETOFIVAR__(TYPE, MEMBER) ((long long) &((TYPE *)0)->MEMBER)\n";
+}
+
+/// RewriteIvarOffsetComputation - This rutine synthesizes computation of
+/// ivar offset.
+void RewriteModernObjC::RewriteIvarOffsetComputation(ObjCIvarDecl *ivar,
+ std::string &Result) {
+ Result += "__OFFSETOFIVAR__(struct ";
+ Result += ivar->getContainingInterface()->getNameAsString();
+ if (LangOpts.MicrosoftExt)
+ Result += "_IMPL";
+ Result += ", ";
+ if (ivar->isBitField())
+ ObjCIvarBitfieldGroupDecl(ivar, Result);
+ else
+ Result += ivar->getNameAsString();
+ Result += ")";
+}
+
+/// WriteModernMetadataDeclarations - Writes out metadata declarations for modern ABI.
+/// struct _prop_t {
+/// const char *name;
+/// char *attributes;
+/// }
+
+/// struct _prop_list_t {
+/// uint32_t entsize; // sizeof(struct _prop_t)
+/// uint32_t count_of_properties;
+/// struct _prop_t prop_list[count_of_properties];
+/// }
+
+/// struct _protocol_t;
+
+/// struct _protocol_list_t {
+/// long protocol_count; // Note, this is 32/64 bit
+/// struct _protocol_t * protocol_list[protocol_count];
+/// }
+
+/// struct _objc_method {
+/// SEL _cmd;
+/// const char *method_type;
+/// char *_imp;
+/// }
+
+/// struct _method_list_t {
+/// uint32_t entsize; // sizeof(struct _objc_method)
+/// uint32_t method_count;
+/// struct _objc_method method_list[method_count];
+/// }
+
+/// struct _protocol_t {
+/// id isa; // NULL
+/// const char *protocol_name;
+/// const struct _protocol_list_t * protocol_list; // super protocols
+/// const struct method_list_t *instance_methods;
+/// const struct method_list_t *class_methods;
+/// const struct method_list_t *optionalInstanceMethods;
+/// const struct method_list_t *optionalClassMethods;
+/// const struct _prop_list_t * properties;
+/// const uint32_t size; // sizeof(struct _protocol_t)
+/// const uint32_t flags; // = 0
+/// const char ** extendedMethodTypes;
+/// }
+
+/// struct _ivar_t {
+/// unsigned long int *offset; // pointer to ivar offset location
+/// const char *name;
+/// const char *type;
+/// uint32_t alignment;
+/// uint32_t size;
+/// }
+
+/// struct _ivar_list_t {
+/// uint32 entsize; // sizeof(struct _ivar_t)
+/// uint32 count;
+/// struct _ivar_t list[count];
+/// }
+
+/// struct _class_ro_t {
+/// uint32_t flags;
+/// uint32_t instanceStart;
+/// uint32_t instanceSize;
+/// uint32_t reserved; // only when building for 64bit targets
+/// const uint8_t *ivarLayout;
+/// const char *name;
+/// const struct _method_list_t *baseMethods;
+/// const struct _protocol_list_t *baseProtocols;
+/// const struct _ivar_list_t *ivars;
+/// const uint8_t *weakIvarLayout;
+/// const struct _prop_list_t *properties;
+/// }
+
+/// struct _class_t {
+/// struct _class_t *isa;
+/// struct _class_t *superclass;
+/// void *cache;
+/// IMP *vtable;
+/// struct _class_ro_t *ro;
+/// }
+
+/// struct _category_t {
+/// const char *name;
+/// struct _class_t *cls;
+/// const struct _method_list_t *instance_methods;
+/// const struct _method_list_t *class_methods;
+/// const struct _protocol_list_t *protocols;
+/// const struct _prop_list_t *properties;
+/// }
+
+/// MessageRefTy - LLVM for:
+/// struct _message_ref_t {
+/// IMP messenger;
+/// SEL name;
+/// };
+
+/// SuperMessageRefTy - LLVM for:
+/// struct _super_message_ref_t {
+/// SUPER_IMP messenger;
+/// SEL name;
+/// };
+
+static void WriteModernMetadataDeclarations(ASTContext *Context, std::string &Result) {
+ static bool meta_data_declared = false;
+ if (meta_data_declared)
+ return;
+
+ Result += "\nstruct _prop_t {\n";
+ Result += "\tconst char *name;\n";
+ Result += "\tconst char *attributes;\n";
+ Result += "};\n";
+
+ Result += "\nstruct _protocol_t;\n";
+
+ Result += "\nstruct _objc_method {\n";
+ Result += "\tstruct objc_selector * _cmd;\n";
+ Result += "\tconst char *method_type;\n";
+ Result += "\tvoid *_imp;\n";
+ Result += "};\n";
+
+ Result += "\nstruct _protocol_t {\n";
+ Result += "\tvoid * isa; // NULL\n";
+ Result += "\tconst char *protocol_name;\n";
+ Result += "\tconst struct _protocol_list_t * protocol_list; // super protocols\n";
+ Result += "\tconst struct method_list_t *instance_methods;\n";
+ Result += "\tconst struct method_list_t *class_methods;\n";
+ Result += "\tconst struct method_list_t *optionalInstanceMethods;\n";
+ Result += "\tconst struct method_list_t *optionalClassMethods;\n";
+ Result += "\tconst struct _prop_list_t * properties;\n";
+ Result += "\tconst unsigned int size; // sizeof(struct _protocol_t)\n";
+ Result += "\tconst unsigned int flags; // = 0\n";
+ Result += "\tconst char ** extendedMethodTypes;\n";
+ Result += "};\n";
+
+ Result += "\nstruct _ivar_t {\n";
+ Result += "\tunsigned long int *offset; // pointer to ivar offset location\n";
+ Result += "\tconst char *name;\n";
+ Result += "\tconst char *type;\n";
+ Result += "\tunsigned int alignment;\n";
+ Result += "\tunsigned int size;\n";
+ Result += "};\n";
+
+ Result += "\nstruct _class_ro_t {\n";
+ Result += "\tunsigned int flags;\n";
+ Result += "\tunsigned int instanceStart;\n";
+ Result += "\tunsigned int instanceSize;\n";
+ const llvm::Triple &Triple(Context->getTargetInfo().getTriple());
+ if (Triple.getArch() == llvm::Triple::x86_64)
+ Result += "\tunsigned int reserved;\n";
+ Result += "\tconst unsigned char *ivarLayout;\n";
+ Result += "\tconst char *name;\n";
+ Result += "\tconst struct _method_list_t *baseMethods;\n";
+ Result += "\tconst struct _objc_protocol_list *baseProtocols;\n";
+ Result += "\tconst struct _ivar_list_t *ivars;\n";
+ Result += "\tconst unsigned char *weakIvarLayout;\n";
+ Result += "\tconst struct _prop_list_t *properties;\n";
+ Result += "};\n";
+
+ Result += "\nstruct _class_t {\n";
+ Result += "\tstruct _class_t *isa;\n";
+ Result += "\tstruct _class_t *superclass;\n";
+ Result += "\tvoid *cache;\n";
+ Result += "\tvoid *vtable;\n";
+ Result += "\tstruct _class_ro_t *ro;\n";
+ Result += "};\n";
+
+ Result += "\nstruct _category_t {\n";
+ Result += "\tconst char *name;\n";
+ Result += "\tstruct _class_t *cls;\n";
+ Result += "\tconst struct _method_list_t *instance_methods;\n";
+ Result += "\tconst struct _method_list_t *class_methods;\n";
+ Result += "\tconst struct _protocol_list_t *protocols;\n";
+ Result += "\tconst struct _prop_list_t *properties;\n";
+ Result += "};\n";
+
+ Result += "extern \"C\" __declspec(dllimport) struct objc_cache _objc_empty_cache;\n";
+ Result += "#pragma warning(disable:4273)\n";
+ meta_data_declared = true;
+}
+
+static void Write_protocol_list_t_TypeDecl(std::string &Result,
+ long super_protocol_count) {
+ Result += "struct /*_protocol_list_t*/"; Result += " {\n";
+ Result += "\tlong protocol_count; // Note, this is 32/64 bit\n";
+ Result += "\tstruct _protocol_t *super_protocols[";
+ Result += utostr(super_protocol_count); Result += "];\n";
+ Result += "}";
+}
+
+static void Write_method_list_t_TypeDecl(std::string &Result,
+ unsigned int method_count) {
+ Result += "struct /*_method_list_t*/"; Result += " {\n";
+ Result += "\tunsigned int entsize; // sizeof(struct _objc_method)\n";
+ Result += "\tunsigned int method_count;\n";
+ Result += "\tstruct _objc_method method_list[";
+ Result += utostr(method_count); Result += "];\n";
+ Result += "}";
+}
+
+static void Write__prop_list_t_TypeDecl(std::string &Result,
+ unsigned int property_count) {
+ Result += "struct /*_prop_list_t*/"; Result += " {\n";
+ Result += "\tunsigned int entsize; // sizeof(struct _prop_t)\n";
+ Result += "\tunsigned int count_of_properties;\n";
+ Result += "\tstruct _prop_t prop_list[";
+ Result += utostr(property_count); Result += "];\n";
+ Result += "}";
+}
+
+static void Write__ivar_list_t_TypeDecl(std::string &Result,
+ unsigned int ivar_count) {
+ Result += "struct /*_ivar_list_t*/"; Result += " {\n";
+ Result += "\tunsigned int entsize; // sizeof(struct _prop_t)\n";
+ Result += "\tunsigned int count;\n";
+ Result += "\tstruct _ivar_t ivar_list[";
+ Result += utostr(ivar_count); Result += "];\n";
+ Result += "}";
+}
+
+static void Write_protocol_list_initializer(ASTContext *Context, std::string &Result,
+ ArrayRef<ObjCProtocolDecl *> SuperProtocols,
+ StringRef VarName,
+ StringRef ProtocolName) {
+ if (SuperProtocols.size() > 0) {
+ Result += "\nstatic ";
+ Write_protocol_list_t_TypeDecl(Result, SuperProtocols.size());
+ Result += " "; Result += VarName;
+ Result += ProtocolName;
+ Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n";
+ Result += "\t"; Result += utostr(SuperProtocols.size()); Result += ",\n";
+ for (unsigned i = 0, e = SuperProtocols.size(); i < e; i++) {
+ ObjCProtocolDecl *SuperPD = SuperProtocols[i];
+ Result += "\t&"; Result += "_OBJC_PROTOCOL_";
+ Result += SuperPD->getNameAsString();
+ if (i == e-1)
+ Result += "\n};\n";
+ else
+ Result += ",\n";
+ }
+ }
+}
+
+static void Write_method_list_t_initializer(RewriteModernObjC &RewriteObj,
+ ASTContext *Context, std::string &Result,
+ ArrayRef<ObjCMethodDecl *> Methods,
+ StringRef VarName,
+ StringRef TopLevelDeclName,
+ bool MethodImpl) {
+ if (Methods.size() > 0) {
+ Result += "\nstatic ";
+ Write_method_list_t_TypeDecl(Result, Methods.size());
+ Result += " "; Result += VarName;
+ Result += TopLevelDeclName;
+ Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n";
+ Result += "\t"; Result += "sizeof(_objc_method)"; Result += ",\n";
+ Result += "\t"; Result += utostr(Methods.size()); Result += ",\n";
+ for (unsigned i = 0, e = Methods.size(); i < e; i++) {
+ ObjCMethodDecl *MD = Methods[i];
+ if (i == 0)
+ Result += "\t{{(struct objc_selector *)\"";
+ else
+ Result += "\t{(struct objc_selector *)\"";
+ Result += (MD)->getSelector().getAsString(); Result += "\"";
+ Result += ", ";
+ std::string MethodTypeString;
+ Context->getObjCEncodingForMethodDecl(MD, MethodTypeString);
+ Result += "\""; Result += MethodTypeString; Result += "\"";
+ Result += ", ";
+ if (!MethodImpl)
+ Result += "0";
+ else {
+ Result += "(void *)";
+ Result += RewriteObj.MethodInternalNames[MD];
+ }
+ if (i == e-1)
+ Result += "}}\n";
+ else
+ Result += "},\n";
+ }
+ Result += "};\n";
+ }
+}
+
+static void Write_prop_list_t_initializer(RewriteModernObjC &RewriteObj,
+ ASTContext *Context, std::string &Result,
+ ArrayRef<ObjCPropertyDecl *> Properties,
+ const Decl *Container,
+ StringRef VarName,
+ StringRef ProtocolName) {
+ if (Properties.size() > 0) {
+ Result += "\nstatic ";
+ Write__prop_list_t_TypeDecl(Result, Properties.size());
+ Result += " "; Result += VarName;
+ Result += ProtocolName;
+ Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n";
+ Result += "\t"; Result += "sizeof(_prop_t)"; Result += ",\n";
+ Result += "\t"; Result += utostr(Properties.size()); Result += ",\n";
+ for (unsigned i = 0, e = Properties.size(); i < e; i++) {
+ ObjCPropertyDecl *PropDecl = Properties[i];
+ if (i == 0)
+ Result += "\t{{\"";
+ else
+ Result += "\t{\"";
+ Result += PropDecl->getName(); Result += "\",";
+ std::string PropertyTypeString, QuotePropertyTypeString;
+ Context->getObjCEncodingForPropertyDecl(PropDecl, Container, PropertyTypeString);
+ RewriteObj.QuoteDoublequotes(PropertyTypeString, QuotePropertyTypeString);
+ Result += "\""; Result += QuotePropertyTypeString; Result += "\"";
+ if (i == e-1)
+ Result += "}}\n";
+ else
+ Result += "},\n";
+ }
+ Result += "};\n";
+ }
+}
+
+// Metadata flags
+enum MetaDataDlags {
+ CLS = 0x0,
+ CLS_META = 0x1,
+ CLS_ROOT = 0x2,
+ OBJC2_CLS_HIDDEN = 0x10,
+ CLS_EXCEPTION = 0x20,
+
+ /// (Obsolete) ARC-specific: this class has a .release_ivars method
+ CLS_HAS_IVAR_RELEASER = 0x40,
+ /// class was compiled with -fobjc-arr
+ CLS_COMPILED_BY_ARC = 0x80 // (1<<7)
+};
+
+static void Write__class_ro_t_initializer(ASTContext *Context, std::string &Result,
+ unsigned int flags,
+ const std::string &InstanceStart,
+ const std::string &InstanceSize,
+ ArrayRef<ObjCMethodDecl *>baseMethods,
+ ArrayRef<ObjCProtocolDecl *>baseProtocols,
+ ArrayRef<ObjCIvarDecl *>ivars,
+ ArrayRef<ObjCPropertyDecl *>Properties,
+ StringRef VarName,
+ StringRef ClassName) {
+ Result += "\nstatic struct _class_ro_t ";
+ Result += VarName; Result += ClassName;
+ Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n";
+ Result += "\t";
+ Result += llvm::utostr(flags); Result += ", ";
+ Result += InstanceStart; Result += ", ";
+ Result += InstanceSize; Result += ", \n";
+ Result += "\t";
+ const llvm::Triple &Triple(Context->getTargetInfo().getTriple());
+ if (Triple.getArch() == llvm::Triple::x86_64)
+ // uint32_t const reserved; // only when building for 64bit targets
+ Result += "(unsigned int)0, \n\t";
+ // const uint8_t * const ivarLayout;
+ Result += "0, \n\t";
+ Result += "\""; Result += ClassName; Result += "\",\n\t";
+ bool metaclass = ((flags & CLS_META) != 0);
+ if (baseMethods.size() > 0) {
+ Result += "(const struct _method_list_t *)&";
+ if (metaclass)
+ Result += "_OBJC_$_CLASS_METHODS_";
+ else
+ Result += "_OBJC_$_INSTANCE_METHODS_";
+ Result += ClassName;
+ Result += ",\n\t";
+ }
+ else
+ Result += "0, \n\t";
+
+ if (!metaclass && baseProtocols.size() > 0) {
+ Result += "(const struct _objc_protocol_list *)&";
+ Result += "_OBJC_CLASS_PROTOCOLS_$_"; Result += ClassName;
+ Result += ",\n\t";
+ }
+ else
+ Result += "0, \n\t";
+
+ if (!metaclass && ivars.size() > 0) {
+ Result += "(const struct _ivar_list_t *)&";
+ Result += "_OBJC_$_INSTANCE_VARIABLES_"; Result += ClassName;
+ Result += ",\n\t";
+ }
+ else
+ Result += "0, \n\t";
+
+ // weakIvarLayout
+ Result += "0, \n\t";
+ if (!metaclass && Properties.size() > 0) {
+ Result += "(const struct _prop_list_t *)&";
+ Result += "_OBJC_$_PROP_LIST_"; Result += ClassName;
+ Result += ",\n";
+ }
+ else
+ Result += "0, \n";
+
+ Result += "};\n";
+}
+
+static void Write_class_t(ASTContext *Context, std::string &Result,
+ StringRef VarName,
+ const ObjCInterfaceDecl *CDecl, bool metaclass) {
+ bool rootClass = (!CDecl->getSuperClass());
+ const ObjCInterfaceDecl *RootClass = CDecl;
+
+ if (!rootClass) {
+ // Find the Root class
+ RootClass = CDecl->getSuperClass();
+ while (RootClass->getSuperClass()) {
+ RootClass = RootClass->getSuperClass();
+ }
+ }
+
+ if (metaclass && rootClass) {
+ // Need to handle a case of use of forward declaration.
+ Result += "\n";
+ Result += "extern \"C\" ";
+ if (CDecl->getImplementation())
+ Result += "__declspec(dllexport) ";
+ else
+ Result += "__declspec(dllimport) ";
+
+ Result += "struct _class_t OBJC_CLASS_$_";
+ Result += CDecl->getNameAsString();
+ Result += ";\n";
+ }
+ // Also, for possibility of 'super' metadata class not having been defined yet.
+ if (!rootClass) {
+ ObjCInterfaceDecl *SuperClass = CDecl->getSuperClass();
+ Result += "\n";
+ Result += "extern \"C\" ";
+ if (SuperClass->getImplementation())
+ Result += "__declspec(dllexport) ";
+ else
+ Result += "__declspec(dllimport) ";
+
+ Result += "struct _class_t ";
+ Result += VarName;
+ Result += SuperClass->getNameAsString();
+ Result += ";\n";
+
+ if (metaclass && RootClass != SuperClass) {
+ Result += "extern \"C\" ";
+ if (RootClass->getImplementation())
+ Result += "__declspec(dllexport) ";
+ else
+ Result += "__declspec(dllimport) ";
+
+ Result += "struct _class_t ";
+ Result += VarName;
+ Result += RootClass->getNameAsString();
+ Result += ";\n";
+ }
+ }
+
+ Result += "\nextern \"C\" __declspec(dllexport) struct _class_t ";
+ Result += VarName; Result += CDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__DATA,__objc_data\"))) = {\n";
+ Result += "\t";
+ if (metaclass) {
+ if (!rootClass) {
+ Result += "0, // &"; Result += VarName;
+ Result += RootClass->getNameAsString();
+ Result += ",\n\t";
+ Result += "0, // &"; Result += VarName;
+ Result += CDecl->getSuperClass()->getNameAsString();
+ Result += ",\n\t";
+ }
+ else {
+ Result += "0, // &"; Result += VarName;
+ Result += CDecl->getNameAsString();
+ Result += ",\n\t";
+ Result += "0, // &OBJC_CLASS_$_"; Result += CDecl->getNameAsString();
+ Result += ",\n\t";
+ }
+ }
+ else {
+ Result += "0, // &OBJC_METACLASS_$_";
+ Result += CDecl->getNameAsString();
+ Result += ",\n\t";
+ if (!rootClass) {
+ Result += "0, // &"; Result += VarName;
+ Result += CDecl->getSuperClass()->getNameAsString();
+ Result += ",\n\t";
+ }
+ else
+ Result += "0,\n\t";
+ }
+ Result += "0, // (void *)&_objc_empty_cache,\n\t";
+ Result += "0, // unused, was (void *)&_objc_empty_vtable,\n\t";
+ if (metaclass)
+ Result += "&_OBJC_METACLASS_RO_$_";
+ else
+ Result += "&_OBJC_CLASS_RO_$_";
+ Result += CDecl->getNameAsString();
+ Result += ",\n};\n";
+
+ // Add static function to initialize some of the meta-data fields.
+ // avoid doing it twice.
+ if (metaclass)
+ return;
+
+ const ObjCInterfaceDecl *SuperClass =
+ rootClass ? CDecl : CDecl->getSuperClass();
+
+ Result += "static void OBJC_CLASS_SETUP_$_";
+ Result += CDecl->getNameAsString();
+ Result += "(void ) {\n";
+ Result += "\tOBJC_METACLASS_$_"; Result += CDecl->getNameAsString();
+ Result += ".isa = "; Result += "&OBJC_METACLASS_$_";
+ Result += RootClass->getNameAsString(); Result += ";\n";
+
+ Result += "\tOBJC_METACLASS_$_"; Result += CDecl->getNameAsString();
+ Result += ".superclass = ";
+ if (rootClass)
+ Result += "&OBJC_CLASS_$_";
+ else
+ Result += "&OBJC_METACLASS_$_";
+
+ Result += SuperClass->getNameAsString(); Result += ";\n";
+
+ Result += "\tOBJC_METACLASS_$_"; Result += CDecl->getNameAsString();
+ Result += ".cache = "; Result += "&_objc_empty_cache"; Result += ";\n";
+
+ Result += "\tOBJC_CLASS_$_"; Result += CDecl->getNameAsString();
+ Result += ".isa = "; Result += "&OBJC_METACLASS_$_";
+ Result += CDecl->getNameAsString(); Result += ";\n";
+
+ if (!rootClass) {
+ Result += "\tOBJC_CLASS_$_"; Result += CDecl->getNameAsString();
+ Result += ".superclass = "; Result += "&OBJC_CLASS_$_";
+ Result += SuperClass->getNameAsString(); Result += ";\n";
+ }
+
+ Result += "\tOBJC_CLASS_$_"; Result += CDecl->getNameAsString();
+ Result += ".cache = "; Result += "&_objc_empty_cache"; Result += ";\n";
+ Result += "}\n";
+}
+
+static void Write_category_t(RewriteModernObjC &RewriteObj, ASTContext *Context,
+ std::string &Result,
+ ObjCCategoryDecl *CatDecl,
+ ObjCInterfaceDecl *ClassDecl,
+ ArrayRef<ObjCMethodDecl *> InstanceMethods,
+ ArrayRef<ObjCMethodDecl *> ClassMethods,
+ ArrayRef<ObjCProtocolDecl *> RefedProtocols,
+ ArrayRef<ObjCPropertyDecl *> ClassProperties) {
+ StringRef CatName = CatDecl->getName();
+ StringRef ClassName = ClassDecl->getName();
+ // must declare an extern class object in case this class is not implemented
+ // in this TU.
+ Result += "\n";
+ Result += "extern \"C\" ";
+ if (ClassDecl->getImplementation())
+ Result += "__declspec(dllexport) ";
+ else
+ Result += "__declspec(dllimport) ";
+
+ Result += "struct _class_t ";
+ Result += "OBJC_CLASS_$_"; Result += ClassName;
+ Result += ";\n";
+
+ Result += "\nstatic struct _category_t ";
+ Result += "_OBJC_$_CATEGORY_";
+ Result += ClassName; Result += "_$_"; Result += CatName;
+ Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = \n";
+ Result += "{\n";
+ Result += "\t\""; Result += ClassName; Result += "\",\n";
+ Result += "\t0, // &"; Result += "OBJC_CLASS_$_"; Result += ClassName;
+ Result += ",\n";
+ if (InstanceMethods.size() > 0) {
+ Result += "\t(const struct _method_list_t *)&";
+ Result += "_OBJC_$_CATEGORY_INSTANCE_METHODS_";
+ Result += ClassName; Result += "_$_"; Result += CatName;
+ Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+
+ if (ClassMethods.size() > 0) {
+ Result += "\t(const struct _method_list_t *)&";
+ Result += "_OBJC_$_CATEGORY_CLASS_METHODS_";
+ Result += ClassName; Result += "_$_"; Result += CatName;
+ Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+
+ if (RefedProtocols.size() > 0) {
+ Result += "\t(const struct _protocol_list_t *)&";
+ Result += "_OBJC_CATEGORY_PROTOCOLS_$_";
+ Result += ClassName; Result += "_$_"; Result += CatName;
+ Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+
+ if (ClassProperties.size() > 0) {
+ Result += "\t(const struct _prop_list_t *)&"; Result += "_OBJC_$_PROP_LIST_";
+ Result += ClassName; Result += "_$_"; Result += CatName;
+ Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+
+ Result += "};\n";
+
+ // Add static function to initialize the class pointer in the category structure.
+ Result += "static void OBJC_CATEGORY_SETUP_$_";
+ Result += ClassDecl->getNameAsString();
+ Result += "_$_";
+ Result += CatName;
+ Result += "(void ) {\n";
+ Result += "\t_OBJC_$_CATEGORY_";
+ Result += ClassDecl->getNameAsString();
+ Result += "_$_";
+ Result += CatName;
+ Result += ".cls = "; Result += "&OBJC_CLASS_$_"; Result += ClassName;
+ Result += ";\n}\n";
+}
+
+static void Write__extendedMethodTypes_initializer(RewriteModernObjC &RewriteObj,
+ ASTContext *Context, std::string &Result,
+ ArrayRef<ObjCMethodDecl *> Methods,
+ StringRef VarName,
+ StringRef ProtocolName) {
+ if (Methods.size() == 0)
+ return;
+
+ Result += "\nstatic const char *";
+ Result += VarName; Result += ProtocolName;
+ Result += " [] __attribute__ ((used, section (\"__DATA,__objc_const\"))) = \n";
+ Result += "{\n";
+ for (unsigned i = 0, e = Methods.size(); i < e; i++) {
+ ObjCMethodDecl *MD = Methods[i];
+ std::string MethodTypeString, QuoteMethodTypeString;
+ Context->getObjCEncodingForMethodDecl(MD, MethodTypeString, true);
+ RewriteObj.QuoteDoublequotes(MethodTypeString, QuoteMethodTypeString);
+ Result += "\t\""; Result += QuoteMethodTypeString; Result += "\"";
+ if (i == e-1)
+ Result += "\n};\n";
+ else {
+ Result += ",\n";
+ }
+ }
+}
+
+static void Write_IvarOffsetVar(RewriteModernObjC &RewriteObj,
+ ASTContext *Context,
+ std::string &Result,
+ ArrayRef<ObjCIvarDecl *> Ivars,
+ ObjCInterfaceDecl *CDecl) {
+ // FIXME. visibilty of offset symbols may have to be set; for Darwin
+ // this is what happens:
+ /**
+ if (Ivar->getAccessControl() == ObjCIvarDecl::Private ||
+ Ivar->getAccessControl() == ObjCIvarDecl::Package ||
+ Class->getVisibility() == HiddenVisibility)
+ Visibility shoud be: HiddenVisibility;
+ else
+ Visibility shoud be: DefaultVisibility;
+ */
+
+ Result += "\n";
+ for (unsigned i =0, e = Ivars.size(); i < e; i++) {
+ ObjCIvarDecl *IvarDecl = Ivars[i];
+ if (Context->getLangOpts().MicrosoftExt)
+ Result += "__declspec(allocate(\".objc_ivar$B\")) ";
+
+ if (!Context->getLangOpts().MicrosoftExt ||
+ IvarDecl->getAccessControl() == ObjCIvarDecl::Private ||
+ IvarDecl->getAccessControl() == ObjCIvarDecl::Package)
+ Result += "extern \"C\" unsigned long int ";
+ else
+ Result += "extern \"C\" __declspec(dllexport) unsigned long int ";
+ if (Ivars[i]->isBitField())
+ RewriteObj.ObjCIvarBitfieldGroupOffset(IvarDecl, Result);
+ else
+ WriteInternalIvarName(CDecl, IvarDecl, Result);
+ Result += " __attribute__ ((used, section (\"__DATA,__objc_ivar\")))";
+ Result += " = ";
+ RewriteObj.RewriteIvarOffsetComputation(IvarDecl, Result);
+ Result += ";\n";
+ if (Ivars[i]->isBitField()) {
+ // skip over rest of the ivar bitfields.
+ SKIP_BITFIELDS(i , e, Ivars);
+ }
+ }
+}
+
+static void Write__ivar_list_t_initializer(RewriteModernObjC &RewriteObj,
+ ASTContext *Context, std::string &Result,
+ ArrayRef<ObjCIvarDecl *> OriginalIvars,
+ StringRef VarName,
+ ObjCInterfaceDecl *CDecl) {
+ if (OriginalIvars.size() > 0) {
+ Write_IvarOffsetVar(RewriteObj, Context, Result, OriginalIvars, CDecl);
+ SmallVector<ObjCIvarDecl *, 8> Ivars;
+ // strip off all but the first ivar bitfield from each group of ivars.
+ // Such ivars in the ivar list table will be replaced by their grouping struct
+ // 'ivar'.
+ for (unsigned i = 0, e = OriginalIvars.size(); i < e; i++) {
+ if (OriginalIvars[i]->isBitField()) {
+ Ivars.push_back(OriginalIvars[i]);
+ // skip over rest of the ivar bitfields.
+ SKIP_BITFIELDS(i , e, OriginalIvars);
+ }
+ else
+ Ivars.push_back(OriginalIvars[i]);
+ }
+
+ Result += "\nstatic ";
+ Write__ivar_list_t_TypeDecl(Result, Ivars.size());
+ Result += " "; Result += VarName;
+ Result += CDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n";
+ Result += "\t"; Result += "sizeof(_ivar_t)"; Result += ",\n";
+ Result += "\t"; Result += utostr(Ivars.size()); Result += ",\n";
+ for (unsigned i =0, e = Ivars.size(); i < e; i++) {
+ ObjCIvarDecl *IvarDecl = Ivars[i];
+ if (i == 0)
+ Result += "\t{{";
+ else
+ Result += "\t {";
+ Result += "(unsigned long int *)&";
+ if (Ivars[i]->isBitField())
+ RewriteObj.ObjCIvarBitfieldGroupOffset(IvarDecl, Result);
+ else
+ WriteInternalIvarName(CDecl, IvarDecl, Result);
+ Result += ", ";
+
+ Result += "\"";
+ if (Ivars[i]->isBitField())
+ RewriteObj.ObjCIvarBitfieldGroupDecl(Ivars[i], Result);
+ else
+ Result += IvarDecl->getName();
+ Result += "\", ";
+
+ QualType IVQT = IvarDecl->getType();
+ if (IvarDecl->isBitField())
+ IVQT = RewriteObj.GetGroupRecordTypeForObjCIvarBitfield(IvarDecl);
+
+ std::string IvarTypeString, QuoteIvarTypeString;
+ Context->getObjCEncodingForType(IVQT, IvarTypeString,
+ IvarDecl);
+ RewriteObj.QuoteDoublequotes(IvarTypeString, QuoteIvarTypeString);
+ Result += "\""; Result += QuoteIvarTypeString; Result += "\", ";
+
+ // FIXME. this alignment represents the host alignment and need be changed to
+ // represent the target alignment.
+ unsigned Align = Context->getTypeAlign(IVQT)/8;
+ Align = llvm::Log2_32(Align);
+ Result += llvm::utostr(Align); Result += ", ";
+ CharUnits Size = Context->getTypeSizeInChars(IVQT);
+ Result += llvm::utostr(Size.getQuantity());
+ if (i == e-1)
+ Result += "}}\n";
+ else
+ Result += "},\n";
+ }
+ Result += "};\n";
+ }
+}
+
+/// RewriteObjCProtocolMetaData - Rewrite protocols meta-data.
+void RewriteModernObjC::RewriteObjCProtocolMetaData(ObjCProtocolDecl *PDecl,
+ std::string &Result) {
+
+ // Do not synthesize the protocol more than once.
+ if (ObjCSynthesizedProtocols.count(PDecl->getCanonicalDecl()))
+ return;
+ WriteModernMetadataDeclarations(Context, Result);
+
+ if (ObjCProtocolDecl *Def = PDecl->getDefinition())
+ PDecl = Def;
+ // Must write out all protocol definitions in current qualifier list,
+ // and in their nested qualifiers before writing out current definition.
+ for (auto *I : PDecl->protocols())
+ RewriteObjCProtocolMetaData(I, Result);
+
+ // Construct method lists.
+ std::vector<ObjCMethodDecl *> InstanceMethods, ClassMethods;
+ std::vector<ObjCMethodDecl *> OptInstanceMethods, OptClassMethods;
+ for (auto *MD : PDecl->instance_methods()) {
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptInstanceMethods.push_back(MD);
+ } else {
+ InstanceMethods.push_back(MD);
+ }
+ }
+
+ for (auto *MD : PDecl->class_methods()) {
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptClassMethods.push_back(MD);
+ } else {
+ ClassMethods.push_back(MD);
+ }
+ }
+ std::vector<ObjCMethodDecl *> AllMethods;
+ for (unsigned i = 0, e = InstanceMethods.size(); i < e; i++)
+ AllMethods.push_back(InstanceMethods[i]);
+ for (unsigned i = 0, e = ClassMethods.size(); i < e; i++)
+ AllMethods.push_back(ClassMethods[i]);
+ for (unsigned i = 0, e = OptInstanceMethods.size(); i < e; i++)
+ AllMethods.push_back(OptInstanceMethods[i]);
+ for (unsigned i = 0, e = OptClassMethods.size(); i < e; i++)
+ AllMethods.push_back(OptClassMethods[i]);
+
+ Write__extendedMethodTypes_initializer(*this, Context, Result,
+ AllMethods,
+ "_OBJC_PROTOCOL_METHOD_TYPES_",
+ PDecl->getNameAsString());
+ // Protocol's super protocol list
+ SmallVector<ObjCProtocolDecl *, 8> SuperProtocols(PDecl->protocols());
+ Write_protocol_list_initializer(Context, Result, SuperProtocols,
+ "_OBJC_PROTOCOL_REFS_",
+ PDecl->getNameAsString());
+
+ Write_method_list_t_initializer(*this, Context, Result, InstanceMethods,
+ "_OBJC_PROTOCOL_INSTANCE_METHODS_",
+ PDecl->getNameAsString(), false);
+
+ Write_method_list_t_initializer(*this, Context, Result, ClassMethods,
+ "_OBJC_PROTOCOL_CLASS_METHODS_",
+ PDecl->getNameAsString(), false);
+
+ Write_method_list_t_initializer(*this, Context, Result, OptInstanceMethods,
+ "_OBJC_PROTOCOL_OPT_INSTANCE_METHODS_",
+ PDecl->getNameAsString(), false);
+
+ Write_method_list_t_initializer(*this, Context, Result, OptClassMethods,
+ "_OBJC_PROTOCOL_OPT_CLASS_METHODS_",
+ PDecl->getNameAsString(), false);
+
+ // Protocol's property metadata.
+ SmallVector<ObjCPropertyDecl *, 8> ProtocolProperties(PDecl->properties());
+ Write_prop_list_t_initializer(*this, Context, Result, ProtocolProperties,
+ /* Container */nullptr,
+ "_OBJC_PROTOCOL_PROPERTIES_",
+ PDecl->getNameAsString());
+
+ // Writer out root metadata for current protocol: struct _protocol_t
+ Result += "\n";
+ if (LangOpts.MicrosoftExt)
+ Result += "static ";
+ Result += "struct _protocol_t _OBJC_PROTOCOL_";
+ Result += PDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__DATA,__datacoal_nt,coalesced\"))) = {\n";
+ Result += "\t0,\n"; // id is; is null
+ Result += "\t\""; Result += PDecl->getNameAsString(); Result += "\",\n";
+ if (SuperProtocols.size() > 0) {
+ Result += "\t(const struct _protocol_list_t *)&"; Result += "_OBJC_PROTOCOL_REFS_";
+ Result += PDecl->getNameAsString(); Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+ if (InstanceMethods.size() > 0) {
+ Result += "\t(const struct method_list_t *)&_OBJC_PROTOCOL_INSTANCE_METHODS_";
+ Result += PDecl->getNameAsString(); Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+
+ if (ClassMethods.size() > 0) {
+ Result += "\t(const struct method_list_t *)&_OBJC_PROTOCOL_CLASS_METHODS_";
+ Result += PDecl->getNameAsString(); Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+
+ if (OptInstanceMethods.size() > 0) {
+ Result += "\t(const struct method_list_t *)&_OBJC_PROTOCOL_OPT_INSTANCE_METHODS_";
+ Result += PDecl->getNameAsString(); Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+
+ if (OptClassMethods.size() > 0) {
+ Result += "\t(const struct method_list_t *)&_OBJC_PROTOCOL_OPT_CLASS_METHODS_";
+ Result += PDecl->getNameAsString(); Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+
+ if (ProtocolProperties.size() > 0) {
+ Result += "\t(const struct _prop_list_t *)&_OBJC_PROTOCOL_PROPERTIES_";
+ Result += PDecl->getNameAsString(); Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+
+ Result += "\t"; Result += "sizeof(_protocol_t)"; Result += ",\n";
+ Result += "\t0,\n";
+
+ if (AllMethods.size() > 0) {
+ Result += "\t(const char **)&"; Result += "_OBJC_PROTOCOL_METHOD_TYPES_";
+ Result += PDecl->getNameAsString();
+ Result += "\n};\n";
+ }
+ else
+ Result += "\t0\n};\n";
+
+ if (LangOpts.MicrosoftExt)
+ Result += "static ";
+ Result += "struct _protocol_t *";
+ Result += "_OBJC_LABEL_PROTOCOL_$_"; Result += PDecl->getNameAsString();
+ Result += " = &_OBJC_PROTOCOL_"; Result += PDecl->getNameAsString();
+ Result += ";\n";
+
+ // Mark this protocol as having been generated.
+ if (!ObjCSynthesizedProtocols.insert(PDecl->getCanonicalDecl()).second)
+ llvm_unreachable("protocol already synthesized");
+
+}
+
+void RewriteModernObjC::RewriteObjCProtocolListMetaData(
+ const ObjCList<ObjCProtocolDecl> &Protocols,
+ StringRef prefix, StringRef ClassName,
+ std::string &Result) {
+ if (Protocols.empty()) return;
+
+ for (unsigned i = 0; i != Protocols.size(); i++)
+ RewriteObjCProtocolMetaData(Protocols[i], Result);
+
+ // Output the top lovel protocol meta-data for the class.
+ /* struct _objc_protocol_list {
+ struct _objc_protocol_list *next;
+ int protocol_count;
+ struct _objc_protocol *class_protocols[];
+ }
+ */
+ Result += "\n";
+ if (LangOpts.MicrosoftExt)
+ Result += "__declspec(allocate(\".cat_cls_meth$B\")) ";
+ Result += "static struct {\n";
+ Result += "\tstruct _objc_protocol_list *next;\n";
+ Result += "\tint protocol_count;\n";
+ Result += "\tstruct _objc_protocol *class_protocols[";
+ Result += utostr(Protocols.size());
+ Result += "];\n} _OBJC_";
+ Result += prefix;
+ Result += "_PROTOCOLS_";
+ Result += ClassName;
+ Result += " __attribute__ ((used, section (\"__OBJC, __cat_cls_meth\")))= "
+ "{\n\t0, ";
+ Result += utostr(Protocols.size());
+ Result += "\n";
+
+ Result += "\t,{&_OBJC_PROTOCOL_";
+ Result += Protocols[0]->getNameAsString();
+ Result += " \n";
+
+ for (unsigned i = 1; i != Protocols.size(); i++) {
+ Result += "\t ,&_OBJC_PROTOCOL_";
+ Result += Protocols[i]->getNameAsString();
+ Result += "\n";
+ }
+ Result += "\t }\n};\n";
+}
+
+/// hasObjCExceptionAttribute - Return true if this class or any super
+/// class has the __objc_exception__ attribute.
+/// FIXME. Move this to ASTContext.cpp as it is also used for IRGen.
+static bool hasObjCExceptionAttribute(ASTContext &Context,
+ const ObjCInterfaceDecl *OID) {
+ if (OID->hasAttr<ObjCExceptionAttr>())
+ return true;
+ if (const ObjCInterfaceDecl *Super = OID->getSuperClass())
+ return hasObjCExceptionAttribute(Context, Super);
+ return false;
+}
+
+void RewriteModernObjC::RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
+ std::string &Result) {
+ ObjCInterfaceDecl *CDecl = IDecl->getClassInterface();
+
+ // Explicitly declared @interface's are already synthesized.
+ if (CDecl->isImplicitInterfaceDecl())
+ assert(false &&
+ "Legacy implicit interface rewriting not supported in moder abi");
+
+ WriteModernMetadataDeclarations(Context, Result);
+ SmallVector<ObjCIvarDecl *, 8> IVars;
+
+ for (ObjCIvarDecl *IVD = CDecl->all_declared_ivar_begin();
+ IVD; IVD = IVD->getNextIvar()) {
+ // Ignore unnamed bit-fields.
+ if (!IVD->getDeclName())
+ continue;
+ IVars.push_back(IVD);
+ }
+
+ Write__ivar_list_t_initializer(*this, Context, Result, IVars,
+ "_OBJC_$_INSTANCE_VARIABLES_",
+ CDecl);
+
+ // Build _objc_method_list for class's instance methods if needed
+ SmallVector<ObjCMethodDecl *, 32> InstanceMethods(IDecl->instance_methods());
+
+ // If any of our property implementations have associated getters or
+ // setters, produce metadata for them as well.
+ for (const auto *Prop : IDecl->property_impls()) {
+ if (Prop->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+ continue;
+ if (!Prop->getPropertyIvarDecl())
+ continue;
+ ObjCPropertyDecl *PD = Prop->getPropertyDecl();
+ if (!PD)
+ continue;
+ if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl())
+ if (mustSynthesizeSetterGetterMethod(IDecl, PD, true /*getter*/))
+ InstanceMethods.push_back(Getter);
+ if (PD->isReadOnly())
+ continue;
+ if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl())
+ if (mustSynthesizeSetterGetterMethod(IDecl, PD, false /*setter*/))
+ InstanceMethods.push_back(Setter);
+ }
+
+ Write_method_list_t_initializer(*this, Context, Result, InstanceMethods,
+ "_OBJC_$_INSTANCE_METHODS_",
+ IDecl->getNameAsString(), true);
+
+ SmallVector<ObjCMethodDecl *, 32> ClassMethods(IDecl->class_methods());
+
+ Write_method_list_t_initializer(*this, Context, Result, ClassMethods,
+ "_OBJC_$_CLASS_METHODS_",
+ IDecl->getNameAsString(), true);
+
+ // Protocols referenced in class declaration?
+ // Protocol's super protocol list
+ std::vector<ObjCProtocolDecl *> RefedProtocols;
+ const ObjCList<ObjCProtocolDecl> &Protocols = CDecl->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
+ E = Protocols.end();
+ I != E; ++I) {
+ RefedProtocols.push_back(*I);
+ // Must write out all protocol definitions in current qualifier list,
+ // and in their nested qualifiers before writing out current definition.
+ RewriteObjCProtocolMetaData(*I, Result);
+ }
+
+ Write_protocol_list_initializer(Context, Result,
+ RefedProtocols,
+ "_OBJC_CLASS_PROTOCOLS_$_",
+ IDecl->getNameAsString());
+
+ // Protocol's property metadata.
+ SmallVector<ObjCPropertyDecl *, 8> ClassProperties(CDecl->properties());
+ Write_prop_list_t_initializer(*this, Context, Result, ClassProperties,
+ /* Container */IDecl,
+ "_OBJC_$_PROP_LIST_",
+ CDecl->getNameAsString());
+
+
+ // Data for initializing _class_ro_t metaclass meta-data
+ uint32_t flags = CLS_META;
+ std::string InstanceSize;
+ std::string InstanceStart;
+
+
+ bool classIsHidden = CDecl->getVisibility() == HiddenVisibility;
+ if (classIsHidden)
+ flags |= OBJC2_CLS_HIDDEN;
+
+ if (!CDecl->getSuperClass())
+ // class is root
+ flags |= CLS_ROOT;
+ InstanceSize = "sizeof(struct _class_t)";
+ InstanceStart = InstanceSize;
+ Write__class_ro_t_initializer(Context, Result, flags,
+ InstanceStart, InstanceSize,
+ ClassMethods,
+ nullptr,
+ nullptr,
+ nullptr,
+ "_OBJC_METACLASS_RO_$_",
+ CDecl->getNameAsString());
+
+ // Data for initializing _class_ro_t meta-data
+ flags = CLS;
+ if (classIsHidden)
+ flags |= OBJC2_CLS_HIDDEN;
+
+ if (hasObjCExceptionAttribute(*Context, CDecl))
+ flags |= CLS_EXCEPTION;
+
+ if (!CDecl->getSuperClass())
+ // class is root
+ flags |= CLS_ROOT;
+
+ InstanceSize.clear();
+ InstanceStart.clear();
+ if (!ObjCSynthesizedStructs.count(CDecl)) {
+ InstanceSize = "0";
+ InstanceStart = "0";
+ }
+ else {
+ InstanceSize = "sizeof(struct ";
+ InstanceSize += CDecl->getNameAsString();
+ InstanceSize += "_IMPL)";
+
+ ObjCIvarDecl *IVD = CDecl->all_declared_ivar_begin();
+ if (IVD) {
+ RewriteIvarOffsetComputation(IVD, InstanceStart);
+ }
+ else
+ InstanceStart = InstanceSize;
+ }
+ Write__class_ro_t_initializer(Context, Result, flags,
+ InstanceStart, InstanceSize,
+ InstanceMethods,
+ RefedProtocols,
+ IVars,
+ ClassProperties,
+ "_OBJC_CLASS_RO_$_",
+ CDecl->getNameAsString());
+
+ Write_class_t(Context, Result,
+ "OBJC_METACLASS_$_",
+ CDecl, /*metaclass*/true);
+
+ Write_class_t(Context, Result,
+ "OBJC_CLASS_$_",
+ CDecl, /*metaclass*/false);
+
+ if (ImplementationIsNonLazy(IDecl))
+ DefinedNonLazyClasses.push_back(CDecl);
+
+}
+
+void RewriteModernObjC::RewriteClassSetupInitHook(std::string &Result) {
+ int ClsDefCount = ClassImplementation.size();
+ if (!ClsDefCount)
+ return;
+ Result += "#pragma section(\".objc_inithooks$B\", long, read, write)\n";
+ Result += "__declspec(allocate(\".objc_inithooks$B\")) ";
+ Result += "static void *OBJC_CLASS_SETUP[] = {\n";
+ for (int i = 0; i < ClsDefCount; i++) {
+ ObjCImplementationDecl *IDecl = ClassImplementation[i];
+ ObjCInterfaceDecl *CDecl = IDecl->getClassInterface();
+ Result += "\t(void *)&OBJC_CLASS_SETUP_$_";
+ Result += CDecl->getName(); Result += ",\n";
+ }
+ Result += "};\n";
+}
+
+void RewriteModernObjC::RewriteMetaDataIntoBuffer(std::string &Result) {
+ int ClsDefCount = ClassImplementation.size();
+ int CatDefCount = CategoryImplementation.size();
+
+ // For each implemented class, write out all its meta data.
+ for (int i = 0; i < ClsDefCount; i++)
+ RewriteObjCClassMetaData(ClassImplementation[i], Result);
+
+ RewriteClassSetupInitHook(Result);
+
+ // For each implemented category, write out all its meta data.
+ for (int i = 0; i < CatDefCount; i++)
+ RewriteObjCCategoryImplDecl(CategoryImplementation[i], Result);
+
+ RewriteCategorySetupInitHook(Result);
+
+ if (ClsDefCount > 0) {
+ if (LangOpts.MicrosoftExt)
+ Result += "__declspec(allocate(\".objc_classlist$B\")) ";
+ Result += "static struct _class_t *L_OBJC_LABEL_CLASS_$ [";
+ Result += llvm::utostr(ClsDefCount); Result += "]";
+ Result +=
+ " __attribute__((used, section (\"__DATA, __objc_classlist,"
+ "regular,no_dead_strip\")))= {\n";
+ for (int i = 0; i < ClsDefCount; i++) {
+ Result += "\t&OBJC_CLASS_$_";
+ Result += ClassImplementation[i]->getNameAsString();
+ Result += ",\n";
+ }
+ Result += "};\n";
+
+ if (!DefinedNonLazyClasses.empty()) {
+ if (LangOpts.MicrosoftExt)
+ Result += "__declspec(allocate(\".objc_nlclslist$B\")) \n";
+ Result += "static struct _class_t *_OBJC_LABEL_NONLAZY_CLASS_$[] = {\n\t";
+ for (unsigned i = 0, e = DefinedNonLazyClasses.size(); i < e; i++) {
+ Result += "\t&OBJC_CLASS_$_"; Result += DefinedNonLazyClasses[i]->getNameAsString();
+ Result += ",\n";
+ }
+ Result += "};\n";
+ }
+ }
+
+ if (CatDefCount > 0) {
+ if (LangOpts.MicrosoftExt)
+ Result += "__declspec(allocate(\".objc_catlist$B\")) ";
+ Result += "static struct _category_t *L_OBJC_LABEL_CATEGORY_$ [";
+ Result += llvm::utostr(CatDefCount); Result += "]";
+ Result +=
+ " __attribute__((used, section (\"__DATA, __objc_catlist,"
+ "regular,no_dead_strip\")))= {\n";
+ for (int i = 0; i < CatDefCount; i++) {
+ Result += "\t&_OBJC_$_CATEGORY_";
+ Result +=
+ CategoryImplementation[i]->getClassInterface()->getNameAsString();
+ Result += "_$_";
+ Result += CategoryImplementation[i]->getNameAsString();
+ Result += ",\n";
+ }
+ Result += "};\n";
+ }
+
+ if (!DefinedNonLazyCategories.empty()) {
+ if (LangOpts.MicrosoftExt)
+ Result += "__declspec(allocate(\".objc_nlcatlist$B\")) \n";
+ Result += "static struct _category_t *_OBJC_LABEL_NONLAZY_CATEGORY_$[] = {\n\t";
+ for (unsigned i = 0, e = DefinedNonLazyCategories.size(); i < e; i++) {
+ Result += "\t&_OBJC_$_CATEGORY_";
+ Result +=
+ DefinedNonLazyCategories[i]->getClassInterface()->getNameAsString();
+ Result += "_$_";
+ Result += DefinedNonLazyCategories[i]->getNameAsString();
+ Result += ",\n";
+ }
+ Result += "};\n";
+ }
+}
+
+void RewriteModernObjC::WriteImageInfo(std::string &Result) {
+ if (LangOpts.MicrosoftExt)
+ Result += "__declspec(allocate(\".objc_imageinfo$B\")) \n";
+
+ Result += "static struct IMAGE_INFO { unsigned version; unsigned flag; } ";
+ // version 0, ObjCABI is 2
+ Result += "_OBJC_IMAGE_INFO = { 0, 2 };\n";
+}
+
+/// RewriteObjCCategoryImplDecl - Rewrite metadata for each category
+/// implementation.
+void RewriteModernObjC::RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *IDecl,
+ std::string &Result) {
+ WriteModernMetadataDeclarations(Context, Result);
+ ObjCInterfaceDecl *ClassDecl = IDecl->getClassInterface();
+ // Find category declaration for this implementation.
+ ObjCCategoryDecl *CDecl
+ = ClassDecl->FindCategoryDeclaration(IDecl->getIdentifier());
+
+ std::string FullCategoryName = ClassDecl->getNameAsString();
+ FullCategoryName += "_$_";
+ FullCategoryName += CDecl->getNameAsString();
+
+ // Build _objc_method_list for class's instance methods if needed
+ SmallVector<ObjCMethodDecl *, 32> InstanceMethods(IDecl->instance_methods());
+
+ // If any of our property implementations have associated getters or
+ // setters, produce metadata for them as well.
+ for (const auto *Prop : IDecl->property_impls()) {
+ if (Prop->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+ continue;
+ if (!Prop->getPropertyIvarDecl())
+ continue;
+ ObjCPropertyDecl *PD = Prop->getPropertyDecl();
+ if (!PD)
+ continue;
+ if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl())
+ InstanceMethods.push_back(Getter);
+ if (PD->isReadOnly())
+ continue;
+ if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl())
+ InstanceMethods.push_back(Setter);
+ }
+
+ Write_method_list_t_initializer(*this, Context, Result, InstanceMethods,
+ "_OBJC_$_CATEGORY_INSTANCE_METHODS_",
+ FullCategoryName, true);
+
+ SmallVector<ObjCMethodDecl *, 32> ClassMethods(IDecl->class_methods());
+
+ Write_method_list_t_initializer(*this, Context, Result, ClassMethods,
+ "_OBJC_$_CATEGORY_CLASS_METHODS_",
+ FullCategoryName, true);
+
+ // Protocols referenced in class declaration?
+ // Protocol's super protocol list
+ SmallVector<ObjCProtocolDecl *, 8> RefedProtocols(CDecl->protocols());
+ for (auto *I : CDecl->protocols())
+ // Must write out all protocol definitions in current qualifier list,
+ // and in their nested qualifiers before writing out current definition.
+ RewriteObjCProtocolMetaData(I, Result);
+
+ Write_protocol_list_initializer(Context, Result,
+ RefedProtocols,
+ "_OBJC_CATEGORY_PROTOCOLS_$_",
+ FullCategoryName);
+
+ // Protocol's property metadata.
+ SmallVector<ObjCPropertyDecl *, 8> ClassProperties(CDecl->properties());
+ Write_prop_list_t_initializer(*this, Context, Result, ClassProperties,
+ /* Container */IDecl,
+ "_OBJC_$_PROP_LIST_",
+ FullCategoryName);
+
+ Write_category_t(*this, Context, Result,
+ CDecl,
+ ClassDecl,
+ InstanceMethods,
+ ClassMethods,
+ RefedProtocols,
+ ClassProperties);
+
+ // Determine if this category is also "non-lazy".
+ if (ImplementationIsNonLazy(IDecl))
+ DefinedNonLazyCategories.push_back(CDecl);
+
+}
+
+void RewriteModernObjC::RewriteCategorySetupInitHook(std::string &Result) {
+ int CatDefCount = CategoryImplementation.size();
+ if (!CatDefCount)
+ return;
+ Result += "#pragma section(\".objc_inithooks$B\", long, read, write)\n";
+ Result += "__declspec(allocate(\".objc_inithooks$B\")) ";
+ Result += "static void *OBJC_CATEGORY_SETUP[] = {\n";
+ for (int i = 0; i < CatDefCount; i++) {
+ ObjCCategoryImplDecl *IDecl = CategoryImplementation[i];
+ ObjCCategoryDecl *CatDecl= IDecl->getCategoryDecl();
+ ObjCInterfaceDecl *ClassDecl = IDecl->getClassInterface();
+ Result += "\t(void *)&OBJC_CATEGORY_SETUP_$_";
+ Result += ClassDecl->getName();
+ Result += "_$_";
+ Result += CatDecl->getName();
+ Result += ",\n";
+ }
+ Result += "};\n";
+}
+
+// RewriteObjCMethodsMetaData - Rewrite methods metadata for instance or
+/// class methods.
+template<typename MethodIterator>
+void RewriteModernObjC::RewriteObjCMethodsMetaData(MethodIterator MethodBegin,
+ MethodIterator MethodEnd,
+ bool IsInstanceMethod,
+ StringRef prefix,
+ StringRef ClassName,
+ std::string &Result) {
+ if (MethodBegin == MethodEnd) return;
+
+ if (!objc_impl_method) {
+ /* struct _objc_method {
+ SEL _cmd;
+ char *method_types;
+ void *_imp;
+ }
+ */
+ Result += "\nstruct _objc_method {\n";
+ Result += "\tSEL _cmd;\n";
+ Result += "\tchar *method_types;\n";
+ Result += "\tvoid *_imp;\n";
+ Result += "};\n";
+
+ objc_impl_method = true;
+ }
+
+ // Build _objc_method_list for class's methods if needed
+
+ /* struct {
+ struct _objc_method_list *next_method;
+ int method_count;
+ struct _objc_method method_list[];
+ }
+ */
+ unsigned NumMethods = std::distance(MethodBegin, MethodEnd);
+ Result += "\n";
+ if (LangOpts.MicrosoftExt) {
+ if (IsInstanceMethod)
+ Result += "__declspec(allocate(\".inst_meth$B\")) ";
+ else
+ Result += "__declspec(allocate(\".cls_meth$B\")) ";
+ }
+ Result += "static struct {\n";
+ Result += "\tstruct _objc_method_list *next_method;\n";
+ Result += "\tint method_count;\n";
+ Result += "\tstruct _objc_method method_list[";
+ Result += utostr(NumMethods);
+ Result += "];\n} _OBJC_";
+ Result += prefix;
+ Result += IsInstanceMethod ? "INSTANCE" : "CLASS";
+ Result += "_METHODS_";
+ Result += ClassName;
+ Result += " __attribute__ ((used, section (\"__OBJC, __";
+ Result += IsInstanceMethod ? "inst" : "cls";
+ Result += "_meth\")))= ";
+ Result += "{\n\t0, " + utostr(NumMethods) + "\n";
+
+ Result += "\t,{{(SEL)\"";
+ Result += (*MethodBegin)->getSelector().getAsString().c_str();
+ std::string MethodTypeString;
+ Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString);
+ Result += "\", \"";
+ Result += MethodTypeString;
+ Result += "\", (void *)";
+ Result += MethodInternalNames[*MethodBegin];
+ Result += "}\n";
+ for (++MethodBegin; MethodBegin != MethodEnd; ++MethodBegin) {
+ Result += "\t ,{(SEL)\"";
+ Result += (*MethodBegin)->getSelector().getAsString().c_str();
+ std::string MethodTypeString;
+ Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString);
+ Result += "\", \"";
+ Result += MethodTypeString;
+ Result += "\", (void *)";
+ Result += MethodInternalNames[*MethodBegin];
+ Result += "}\n";
+ }
+ Result += "\t }\n};\n";
+}
+
+Stmt *RewriteModernObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
+ SourceRange OldRange = IV->getSourceRange();
+ Expr *BaseExpr = IV->getBase();
+
+ // Rewrite the base, but without actually doing replaces.
+ {
+ DisableReplaceStmtScope S(*this);
+ BaseExpr = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(BaseExpr));
+ IV->setBase(BaseExpr);
+ }
+
+ ObjCIvarDecl *D = IV->getDecl();
+
+ Expr *Replacement = IV;
+
+ if (BaseExpr->getType()->isObjCObjectPointerType()) {
+ const ObjCInterfaceType *iFaceDecl =
+ dyn_cast<ObjCInterfaceType>(BaseExpr->getType()->getPointeeType());
+ assert(iFaceDecl && "RewriteObjCIvarRefExpr - iFaceDecl is null");
+ // lookup which class implements the instance variable.
+ ObjCInterfaceDecl *clsDeclared = nullptr;
+ iFaceDecl->getDecl()->lookupInstanceVariable(D->getIdentifier(),
+ clsDeclared);
+ assert(clsDeclared && "RewriteObjCIvarRefExpr(): Can't find class");
+
+ // Build name of symbol holding ivar offset.
+ std::string IvarOffsetName;
+ if (D->isBitField())
+ ObjCIvarBitfieldGroupOffset(D, IvarOffsetName);
+ else
+ WriteInternalIvarName(clsDeclared, D, IvarOffsetName);
+
+ ReferencedIvars[clsDeclared].insert(D);
+
+ // cast offset to "char *".
+ CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(Context->CharTy),
+ CK_BitCast,
+ BaseExpr);
+ VarDecl *NewVD = VarDecl::Create(*Context, TUDecl, SourceLocation(),
+ SourceLocation(), &Context->Idents.get(IvarOffsetName),
+ Context->UnsignedLongTy, nullptr,
+ SC_Extern);
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(NewVD, false,
+ Context->UnsignedLongTy, VK_LValue,
+ SourceLocation());
+ BinaryOperator *addExpr =
+ new (Context) BinaryOperator(castExpr, DRE, BO_Add,
+ Context->getPointerType(Context->CharTy),
+ VK_RValue, OK_Ordinary, SourceLocation(), false);
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(SourceLocation(),
+ SourceLocation(),
+ addExpr);
+ QualType IvarT = D->getType();
+ if (D->isBitField())
+ IvarT = GetGroupRecordTypeForObjCIvarBitfield(D);
+
+ if (!isa<TypedefType>(IvarT) && IvarT->isRecordType()) {
+ RecordDecl *RD = IvarT->getAs<RecordType>()->getDecl();
+ RD = RD->getDefinition();
+ if (RD && !RD->getDeclName().getAsIdentifierInfo()) {
+ // decltype(((Foo_IMPL*)0)->bar) *
+ ObjCContainerDecl *CDecl =
+ dyn_cast<ObjCContainerDecl>(D->getDeclContext());
+ // ivar in class extensions requires special treatment.
+ if (ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(CDecl))
+ CDecl = CatDecl->getClassInterface();
+ std::string RecName = CDecl->getName();
+ RecName += "_IMPL";
+ RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get(RecName.c_str()));
+ QualType PtrStructIMPL = Context->getPointerType(Context->getTagDeclType(RD));
+ unsigned UnsignedIntSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->UnsignedIntTy));
+ Expr *Zero = IntegerLiteral::Create(*Context,
+ llvm::APInt(UnsignedIntSize, 0),
+ Context->UnsignedIntTy, SourceLocation());
+ Zero = NoTypeInfoCStyleCastExpr(Context, PtrStructIMPL, CK_BitCast, Zero);
+ ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
+ Zero);
+ FieldDecl *FD = FieldDecl::Create(*Context, nullptr, SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get(D->getNameAsString()),
+ IvarT, nullptr,
+ /*BitWidth=*/nullptr,
+ /*Mutable=*/true, ICIS_NoInit);
+ MemberExpr *ME = new (Context)
+ MemberExpr(PE, true, SourceLocation(), FD, SourceLocation(),
+ FD->getType(), VK_LValue, OK_Ordinary);
+ IvarT = Context->getDecltypeType(ME, ME->getType());
+ }
+ }
+ convertObjCTypeToCStyleType(IvarT);
+ QualType castT = Context->getPointerType(IvarT);
+
+ castExpr = NoTypeInfoCStyleCastExpr(Context,
+ castT,
+ CK_BitCast,
+ PE);
+
+
+ Expr *Exp = new (Context) UnaryOperator(castExpr, UO_Deref, IvarT,
+ VK_LValue, OK_Ordinary,
+ SourceLocation());
+ PE = new (Context) ParenExpr(OldRange.getBegin(),
+ OldRange.getEnd(),
+ Exp);
+
+ if (D->isBitField()) {
+ FieldDecl *FD = FieldDecl::Create(*Context, nullptr, SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get(D->getNameAsString()),
+ D->getType(), nullptr,
+ /*BitWidth=*/D->getBitWidth(),
+ /*Mutable=*/true, ICIS_NoInit);
+ MemberExpr *ME = new (Context)
+ MemberExpr(PE, /*isArrow*/ false, SourceLocation(), FD,
+ SourceLocation(), FD->getType(), VK_LValue, OK_Ordinary);
+ Replacement = ME;
+
+ }
+ else
+ Replacement = PE;
+ }
+
+ ReplaceStmtWithRange(IV, Replacement, OldRange);
+ return Replacement;
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Frontend/Rewrite/RewriteObjC.cpp b/contrib/llvm/tools/clang/lib/Frontend/Rewrite/RewriteObjC.cpp
new file mode 100644
index 0000000..e0ddadb
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/Rewrite/RewriteObjC.cpp
@@ -0,0 +1,5913 @@
+//===--- RewriteObjC.cpp - Playground for the code rewriter ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Hacks and fun related to the code rewriter.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/Frontend/ASTConsumers.h"
+#include "clang/AST/AST.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Rewrite/Core/Rewriter.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include <memory>
+
+#ifdef CLANG_ENABLE_OBJC_REWRITER
+
+using namespace clang;
+using llvm::utostr;
+
+namespace {
+ class RewriteObjC : public ASTConsumer {
+ protected:
+
+ enum {
+ BLOCK_FIELD_IS_OBJECT = 3, /* id, NSObject, __attribute__((NSObject)),
+ block, ... */
+ BLOCK_FIELD_IS_BLOCK = 7, /* a block variable */
+ BLOCK_FIELD_IS_BYREF = 8, /* the on stack structure holding the
+ __block variable */
+ BLOCK_FIELD_IS_WEAK = 16, /* declared __weak, only used in byref copy
+ helpers */
+ BLOCK_BYREF_CALLER = 128, /* called from __block (byref) copy/dispose
+ support routines */
+ BLOCK_BYREF_CURRENT_MAX = 256
+ };
+
+ enum {
+ BLOCK_NEEDS_FREE = (1 << 24),
+ BLOCK_HAS_COPY_DISPOSE = (1 << 25),
+ BLOCK_HAS_CXX_OBJ = (1 << 26),
+ BLOCK_IS_GC = (1 << 27),
+ BLOCK_IS_GLOBAL = (1 << 28),
+ BLOCK_HAS_DESCRIPTOR = (1 << 29)
+ };
+ static const int OBJC_ABI_VERSION = 7;
+
+ Rewriter Rewrite;
+ DiagnosticsEngine &Diags;
+ const LangOptions &LangOpts;
+ ASTContext *Context;
+ SourceManager *SM;
+ TranslationUnitDecl *TUDecl;
+ FileID MainFileID;
+ const char *MainFileStart, *MainFileEnd;
+ Stmt *CurrentBody;
+ ParentMap *PropParentMap; // created lazily.
+ std::string InFileName;
+ raw_ostream* OutFile;
+ std::string Preamble;
+
+ TypeDecl *ProtocolTypeDecl;
+ VarDecl *GlobalVarDecl;
+ unsigned RewriteFailedDiag;
+ // ObjC string constant support.
+ unsigned NumObjCStringLiterals;
+ VarDecl *ConstantStringClassReference;
+ RecordDecl *NSStringRecord;
+
+ // ObjC foreach break/continue generation support.
+ int BcLabelCount;
+
+ unsigned TryFinallyContainsReturnDiag;
+ // Needed for super.
+ ObjCMethodDecl *CurMethodDef;
+ RecordDecl *SuperStructDecl;
+ RecordDecl *ConstantStringDecl;
+
+ FunctionDecl *MsgSendFunctionDecl;
+ FunctionDecl *MsgSendSuperFunctionDecl;
+ FunctionDecl *MsgSendStretFunctionDecl;
+ FunctionDecl *MsgSendSuperStretFunctionDecl;
+ FunctionDecl *MsgSendFpretFunctionDecl;
+ FunctionDecl *GetClassFunctionDecl;
+ FunctionDecl *GetMetaClassFunctionDecl;
+ FunctionDecl *GetSuperClassFunctionDecl;
+ FunctionDecl *SelGetUidFunctionDecl;
+ FunctionDecl *CFStringFunctionDecl;
+ FunctionDecl *SuperConstructorFunctionDecl;
+ FunctionDecl *CurFunctionDef;
+ FunctionDecl *CurFunctionDeclToDeclareForBlock;
+
+ /* Misc. containers needed for meta-data rewrite. */
+ SmallVector<ObjCImplementationDecl *, 8> ClassImplementation;
+ SmallVector<ObjCCategoryImplDecl *, 8> CategoryImplementation;
+ llvm::SmallPtrSet<ObjCInterfaceDecl*, 8> ObjCSynthesizedStructs;
+ llvm::SmallPtrSet<ObjCProtocolDecl*, 8> ObjCSynthesizedProtocols;
+ llvm::SmallPtrSet<ObjCInterfaceDecl*, 8> ObjCForwardDecls;
+ llvm::DenseMap<ObjCMethodDecl*, std::string> MethodInternalNames;
+ SmallVector<Stmt *, 32> Stmts;
+ SmallVector<int, 8> ObjCBcLabelNo;
+ // Remember all the @protocol(<expr>) expressions.
+ llvm::SmallPtrSet<ObjCProtocolDecl *, 32> ProtocolExprDecls;
+
+ llvm::DenseSet<uint64_t> CopyDestroyCache;
+
+ // Block expressions.
+ SmallVector<BlockExpr *, 32> Blocks;
+ SmallVector<int, 32> InnerDeclRefsCount;
+ SmallVector<DeclRefExpr *, 32> InnerDeclRefs;
+
+ SmallVector<DeclRefExpr *, 32> BlockDeclRefs;
+
+ // Block related declarations.
+ SmallVector<ValueDecl *, 8> BlockByCopyDecls;
+ llvm::SmallPtrSet<ValueDecl *, 8> BlockByCopyDeclsPtrSet;
+ SmallVector<ValueDecl *, 8> BlockByRefDecls;
+ llvm::SmallPtrSet<ValueDecl *, 8> BlockByRefDeclsPtrSet;
+ llvm::DenseMap<ValueDecl *, unsigned> BlockByRefDeclNo;
+ llvm::SmallPtrSet<ValueDecl *, 8> ImportedBlockDecls;
+ llvm::SmallPtrSet<VarDecl *, 8> ImportedLocalExternalDecls;
+
+ llvm::DenseMap<BlockExpr *, std::string> RewrittenBlockExprs;
+
+ // This maps an original source AST to it's rewritten form. This allows
+ // us to avoid rewriting the same node twice (which is very uncommon).
+ // This is needed to support some of the exotic property rewriting.
+ llvm::DenseMap<Stmt *, Stmt *> ReplacedNodes;
+
+ // Needed for header files being rewritten
+ bool IsHeader;
+ bool SilenceRewriteMacroWarning;
+ bool objc_impl_method;
+
+ bool DisableReplaceStmt;
+ class DisableReplaceStmtScope {
+ RewriteObjC &R;
+ bool SavedValue;
+
+ public:
+ DisableReplaceStmtScope(RewriteObjC &R)
+ : R(R), SavedValue(R.DisableReplaceStmt) {
+ R.DisableReplaceStmt = true;
+ }
+ ~DisableReplaceStmtScope() {
+ R.DisableReplaceStmt = SavedValue;
+ }
+ };
+ void InitializeCommon(ASTContext &context);
+
+ public:
+
+ // Top Level Driver code.
+ bool HandleTopLevelDecl(DeclGroupRef D) override {
+ for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) {
+ if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(*I)) {
+ if (!Class->isThisDeclarationADefinition()) {
+ RewriteForwardClassDecl(D);
+ break;
+ }
+ }
+
+ if (ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>(*I)) {
+ if (!Proto->isThisDeclarationADefinition()) {
+ RewriteForwardProtocolDecl(D);
+ break;
+ }
+ }
+
+ HandleTopLevelSingleDecl(*I);
+ }
+ return true;
+ }
+ void HandleTopLevelSingleDecl(Decl *D);
+ void HandleDeclInMainFile(Decl *D);
+ RewriteObjC(std::string inFile, raw_ostream *OS,
+ DiagnosticsEngine &D, const LangOptions &LOpts,
+ bool silenceMacroWarn);
+
+ ~RewriteObjC() override {}
+
+ void HandleTranslationUnit(ASTContext &C) override;
+
+ void ReplaceStmt(Stmt *Old, Stmt *New) {
+ ReplaceStmtWithRange(Old, New, Old->getSourceRange());
+ }
+
+ void ReplaceStmtWithRange(Stmt *Old, Stmt *New, SourceRange SrcRange) {
+ assert(Old != nullptr && New != nullptr && "Expected non-null Stmt's");
+
+ Stmt *ReplacingStmt = ReplacedNodes[Old];
+ if (ReplacingStmt)
+ return; // We can't rewrite the same node twice.
+
+ if (DisableReplaceStmt)
+ return;
+
+ // Measure the old text.
+ int Size = Rewrite.getRangeSize(SrcRange);
+ if (Size == -1) {
+ Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag)
+ << Old->getSourceRange();
+ return;
+ }
+ // Get the new text.
+ std::string SStr;
+ llvm::raw_string_ostream S(SStr);
+ New->printPretty(S, nullptr, PrintingPolicy(LangOpts));
+ const std::string &Str = S.str();
+
+ // If replacement succeeded or warning disabled return with no warning.
+ if (!Rewrite.ReplaceText(SrcRange.getBegin(), Size, Str)) {
+ ReplacedNodes[Old] = New;
+ return;
+ }
+ if (SilenceRewriteMacroWarning)
+ return;
+ Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag)
+ << Old->getSourceRange();
+ }
+
+ void InsertText(SourceLocation Loc, StringRef Str,
+ bool InsertAfter = true) {
+ // If insertion succeeded or warning disabled return with no warning.
+ if (!Rewrite.InsertText(Loc, Str, InsertAfter) ||
+ SilenceRewriteMacroWarning)
+ return;
+
+ Diags.Report(Context->getFullLoc(Loc), RewriteFailedDiag);
+ }
+
+ void ReplaceText(SourceLocation Start, unsigned OrigLength,
+ StringRef Str) {
+ // If removal succeeded or warning disabled return with no warning.
+ if (!Rewrite.ReplaceText(Start, OrigLength, Str) ||
+ SilenceRewriteMacroWarning)
+ return;
+
+ Diags.Report(Context->getFullLoc(Start), RewriteFailedDiag);
+ }
+
+ // Syntactic Rewriting.
+ void RewriteRecordBody(RecordDecl *RD);
+ void RewriteInclude();
+ void RewriteForwardClassDecl(DeclGroupRef D);
+ void RewriteForwardClassDecl(const SmallVectorImpl<Decl *> &DG);
+ void RewriteForwardClassEpilogue(ObjCInterfaceDecl *ClassDecl,
+ const std::string &typedefString);
+ void RewriteImplementations();
+ void RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
+ ObjCImplementationDecl *IMD,
+ ObjCCategoryImplDecl *CID);
+ void RewriteInterfaceDecl(ObjCInterfaceDecl *Dcl);
+ void RewriteImplementationDecl(Decl *Dcl);
+ void RewriteObjCMethodDecl(const ObjCInterfaceDecl *IDecl,
+ ObjCMethodDecl *MDecl, std::string &ResultStr);
+ void RewriteTypeIntoString(QualType T, std::string &ResultStr,
+ const FunctionType *&FPRetType);
+ void RewriteByRefString(std::string &ResultStr, const std::string &Name,
+ ValueDecl *VD, bool def=false);
+ void RewriteCategoryDecl(ObjCCategoryDecl *Dcl);
+ void RewriteProtocolDecl(ObjCProtocolDecl *Dcl);
+ void RewriteForwardProtocolDecl(DeclGroupRef D);
+ void RewriteForwardProtocolDecl(const SmallVectorImpl<Decl *> &DG);
+ void RewriteMethodDeclaration(ObjCMethodDecl *Method);
+ void RewriteProperty(ObjCPropertyDecl *prop);
+ void RewriteFunctionDecl(FunctionDecl *FD);
+ void RewriteBlockPointerType(std::string& Str, QualType Type);
+ void RewriteBlockPointerTypeVariable(std::string& Str, ValueDecl *VD);
+ void RewriteBlockLiteralFunctionDecl(FunctionDecl *FD);
+ void RewriteObjCQualifiedInterfaceTypes(Decl *Dcl);
+ void RewriteTypeOfDecl(VarDecl *VD);
+ void RewriteObjCQualifiedInterfaceTypes(Expr *E);
+
+ // Expression Rewriting.
+ Stmt *RewriteFunctionBodyOrGlobalInitializer(Stmt *S);
+ Stmt *RewriteAtEncode(ObjCEncodeExpr *Exp);
+ Stmt *RewritePropertyOrImplicitGetter(PseudoObjectExpr *Pseudo);
+ Stmt *RewritePropertyOrImplicitSetter(PseudoObjectExpr *Pseudo);
+ Stmt *RewriteAtSelector(ObjCSelectorExpr *Exp);
+ Stmt *RewriteMessageExpr(ObjCMessageExpr *Exp);
+ Stmt *RewriteObjCStringLiteral(ObjCStringLiteral *Exp);
+ Stmt *RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp);
+ void RewriteTryReturnStmts(Stmt *S);
+ void RewriteSyncReturnStmts(Stmt *S, std::string buf);
+ Stmt *RewriteObjCTryStmt(ObjCAtTryStmt *S);
+ Stmt *RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S);
+ Stmt *RewriteObjCThrowStmt(ObjCAtThrowStmt *S);
+ Stmt *RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
+ SourceLocation OrigEnd);
+ Stmt *RewriteBreakStmt(BreakStmt *S);
+ Stmt *RewriteContinueStmt(ContinueStmt *S);
+ void RewriteCastExpr(CStyleCastExpr *CE);
+
+ // Block rewriting.
+ void RewriteBlocksInFunctionProtoType(QualType funcType, NamedDecl *D);
+
+ // Block specific rewrite rules.
+ void RewriteBlockPointerDecl(NamedDecl *VD);
+ void RewriteByRefVar(VarDecl *VD);
+ Stmt *RewriteBlockDeclRefExpr(DeclRefExpr *VD);
+ Stmt *RewriteLocalVariableExternalStorage(DeclRefExpr *DRE);
+ void RewriteBlockPointerFunctionArgs(FunctionDecl *FD);
+
+ void RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl,
+ std::string &Result);
+
+ void Initialize(ASTContext &context) override = 0;
+
+ // Metadata Rewriting.
+ virtual void RewriteMetaDataIntoBuffer(std::string &Result) = 0;
+ virtual void RewriteObjCProtocolListMetaData(const ObjCList<ObjCProtocolDecl> &Prots,
+ StringRef prefix,
+ StringRef ClassName,
+ std::string &Result) = 0;
+ virtual void RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *CDecl,
+ std::string &Result) = 0;
+ virtual void RewriteObjCProtocolMetaData(ObjCProtocolDecl *Protocol,
+ StringRef prefix,
+ StringRef ClassName,
+ std::string &Result) = 0;
+ virtual void RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
+ std::string &Result) = 0;
+
+ // Rewriting ivar access
+ virtual Stmt *RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) = 0;
+ virtual void RewriteIvarOffsetComputation(ObjCIvarDecl *ivar,
+ std::string &Result) = 0;
+
+ // Misc. AST transformation routines. Sometimes they end up calling
+ // rewriting routines on the new ASTs.
+ CallExpr *SynthesizeCallToFunctionDecl(FunctionDecl *FD,
+ ArrayRef<Expr *> Args,
+ SourceLocation StartLoc=SourceLocation(),
+ SourceLocation EndLoc=SourceLocation());
+ CallExpr *SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFlavor,
+ QualType msgSendType,
+ QualType returnType,
+ SmallVectorImpl<QualType> &ArgTypes,
+ SmallVectorImpl<Expr*> &MsgExprs,
+ ObjCMethodDecl *Method);
+ Stmt *SynthMessageExpr(ObjCMessageExpr *Exp,
+ SourceLocation StartLoc=SourceLocation(),
+ SourceLocation EndLoc=SourceLocation());
+
+ void SynthCountByEnumWithState(std::string &buf);
+ void SynthMsgSendFunctionDecl();
+ void SynthMsgSendSuperFunctionDecl();
+ void SynthMsgSendStretFunctionDecl();
+ void SynthMsgSendFpretFunctionDecl();
+ void SynthMsgSendSuperStretFunctionDecl();
+ void SynthGetClassFunctionDecl();
+ void SynthGetMetaClassFunctionDecl();
+ void SynthGetSuperClassFunctionDecl();
+ void SynthSelGetUidFunctionDecl();
+ void SynthSuperConstructorFunctionDecl();
+
+ std::string SynthesizeByrefCopyDestroyHelper(VarDecl *VD, int flag);
+ std::string SynthesizeBlockHelperFuncs(BlockExpr *CE, int i,
+ StringRef funcName, std::string Tag);
+ std::string SynthesizeBlockFunc(BlockExpr *CE, int i,
+ StringRef funcName, std::string Tag);
+ std::string SynthesizeBlockImpl(BlockExpr *CE,
+ std::string Tag, std::string Desc);
+ std::string SynthesizeBlockDescriptor(std::string DescTag,
+ std::string ImplTag,
+ int i, StringRef funcName,
+ unsigned hasCopy);
+ Stmt *SynthesizeBlockCall(CallExpr *Exp, const Expr* BlockExp);
+ void SynthesizeBlockLiterals(SourceLocation FunLocStart,
+ StringRef FunName);
+ FunctionDecl *SynthBlockInitFunctionDecl(StringRef name);
+ Stmt *SynthBlockInitExpr(BlockExpr *Exp,
+ const SmallVectorImpl<DeclRefExpr *> &InnerBlockDeclRefs);
+
+ // Misc. helper routines.
+ QualType getProtocolType();
+ void WarnAboutReturnGotoStmts(Stmt *S);
+ void HasReturnStmts(Stmt *S, bool &hasReturns);
+ void CheckFunctionPointerDecl(QualType dType, NamedDecl *ND);
+ void InsertBlockLiteralsWithinFunction(FunctionDecl *FD);
+ void InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD);
+
+ bool IsDeclStmtInForeachHeader(DeclStmt *DS);
+ void CollectBlockDeclRefInfo(BlockExpr *Exp);
+ void GetBlockDeclRefExprs(Stmt *S);
+ void GetInnerBlockDeclRefExprs(Stmt *S,
+ SmallVectorImpl<DeclRefExpr *> &InnerBlockDeclRefs,
+ llvm::SmallPtrSetImpl<const DeclContext *> &InnerContexts);
+
+ // We avoid calling Type::isBlockPointerType(), since it operates on the
+ // canonical type. We only care if the top-level type is a closure pointer.
+ bool isTopLevelBlockPointerType(QualType T) {
+ return isa<BlockPointerType>(T);
+ }
+
+ /// convertBlockPointerToFunctionPointer - Converts a block-pointer type
+ /// to a function pointer type and upon success, returns true; false
+ /// otherwise.
+ bool convertBlockPointerToFunctionPointer(QualType &T) {
+ if (isTopLevelBlockPointerType(T)) {
+ const BlockPointerType *BPT = T->getAs<BlockPointerType>();
+ T = Context->getPointerType(BPT->getPointeeType());
+ return true;
+ }
+ return false;
+ }
+
+ bool needToScanForQualifiers(QualType T);
+ QualType getSuperStructType();
+ QualType getConstantStringStructType();
+ QualType convertFunctionTypeOfBlocks(const FunctionType *FT);
+ bool BufferContainsPPDirectives(const char *startBuf, const char *endBuf);
+
+ void convertToUnqualifiedObjCType(QualType &T) {
+ if (T->isObjCQualifiedIdType())
+ T = Context->getObjCIdType();
+ else if (T->isObjCQualifiedClassType())
+ T = Context->getObjCClassType();
+ else if (T->isObjCObjectPointerType() &&
+ T->getPointeeType()->isObjCQualifiedInterfaceType()) {
+ if (const ObjCObjectPointerType * OBJPT =
+ T->getAsObjCInterfacePointerType()) {
+ const ObjCInterfaceType *IFaceT = OBJPT->getInterfaceType();
+ T = QualType(IFaceT, 0);
+ T = Context->getPointerType(T);
+ }
+ }
+ }
+
+ // FIXME: This predicate seems like it would be useful to add to ASTContext.
+ bool isObjCType(QualType T) {
+ if (!LangOpts.ObjC1 && !LangOpts.ObjC2)
+ return false;
+
+ QualType OCT = Context->getCanonicalType(T).getUnqualifiedType();
+
+ if (OCT == Context->getCanonicalType(Context->getObjCIdType()) ||
+ OCT == Context->getCanonicalType(Context->getObjCClassType()))
+ return true;
+
+ if (const PointerType *PT = OCT->getAs<PointerType>()) {
+ if (isa<ObjCInterfaceType>(PT->getPointeeType()) ||
+ PT->getPointeeType()->isObjCQualifiedIdType())
+ return true;
+ }
+ return false;
+ }
+ bool PointerTypeTakesAnyBlockArguments(QualType QT);
+ bool PointerTypeTakesAnyObjCQualifiedType(QualType QT);
+ void GetExtentOfArgList(const char *Name, const char *&LParen,
+ const char *&RParen);
+
+ void QuoteDoublequotes(std::string &From, std::string &To) {
+ for (unsigned i = 0; i < From.length(); i++) {
+ if (From[i] == '"')
+ To += "\\\"";
+ else
+ To += From[i];
+ }
+ }
+
+ QualType getSimpleFunctionType(QualType result,
+ ArrayRef<QualType> args,
+ bool variadic = false) {
+ if (result == Context->getObjCInstanceType())
+ result = Context->getObjCIdType();
+ FunctionProtoType::ExtProtoInfo fpi;
+ fpi.Variadic = variadic;
+ return Context->getFunctionType(result, args, fpi);
+ }
+
+ // Helper function: create a CStyleCastExpr with trivial type source info.
+ CStyleCastExpr* NoTypeInfoCStyleCastExpr(ASTContext *Ctx, QualType Ty,
+ CastKind Kind, Expr *E) {
+ TypeSourceInfo *TInfo = Ctx->getTrivialTypeSourceInfo(Ty, SourceLocation());
+ return CStyleCastExpr::Create(*Ctx, Ty, VK_RValue, Kind, E, nullptr,
+ TInfo, SourceLocation(), SourceLocation());
+ }
+
+ StringLiteral *getStringLiteral(StringRef Str) {
+ QualType StrType = Context->getConstantArrayType(
+ Context->CharTy, llvm::APInt(32, Str.size() + 1), ArrayType::Normal,
+ 0);
+ return StringLiteral::Create(*Context, Str, StringLiteral::Ascii,
+ /*Pascal=*/false, StrType, SourceLocation());
+ }
+ };
+
+ class RewriteObjCFragileABI : public RewriteObjC {
+ public:
+
+ RewriteObjCFragileABI(std::string inFile, raw_ostream *OS,
+ DiagnosticsEngine &D, const LangOptions &LOpts,
+ bool silenceMacroWarn) : RewriteObjC(inFile, OS,
+ D, LOpts,
+ silenceMacroWarn) {}
+
+ ~RewriteObjCFragileABI() override {}
+ void Initialize(ASTContext &context) override;
+
+ // Rewriting metadata
+ template<typename MethodIterator>
+ void RewriteObjCMethodsMetaData(MethodIterator MethodBegin,
+ MethodIterator MethodEnd,
+ bool IsInstanceMethod,
+ StringRef prefix,
+ StringRef ClassName,
+ std::string &Result);
+ void RewriteObjCProtocolMetaData(ObjCProtocolDecl *Protocol,
+ StringRef prefix, StringRef ClassName,
+ std::string &Result) override;
+ void RewriteObjCProtocolListMetaData(
+ const ObjCList<ObjCProtocolDecl> &Prots,
+ StringRef prefix, StringRef ClassName, std::string &Result) override;
+ void RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
+ std::string &Result) override;
+ void RewriteMetaDataIntoBuffer(std::string &Result) override;
+ void RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *CDecl,
+ std::string &Result) override;
+
+ // Rewriting ivar
+ void RewriteIvarOffsetComputation(ObjCIvarDecl *ivar,
+ std::string &Result) override;
+ Stmt *RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) override;
+ };
+}
+
+void RewriteObjC::RewriteBlocksInFunctionProtoType(QualType funcType,
+ NamedDecl *D) {
+ if (const FunctionProtoType *fproto
+ = dyn_cast<FunctionProtoType>(funcType.IgnoreParens())) {
+ for (const auto &I : fproto->param_types())
+ if (isTopLevelBlockPointerType(I)) {
+ // All the args are checked/rewritten. Don't call twice!
+ RewriteBlockPointerDecl(D);
+ break;
+ }
+ }
+}
+
+void RewriteObjC::CheckFunctionPointerDecl(QualType funcType, NamedDecl *ND) {
+ const PointerType *PT = funcType->getAs<PointerType>();
+ if (PT && PointerTypeTakesAnyBlockArguments(funcType))
+ RewriteBlocksInFunctionProtoType(PT->getPointeeType(), ND);
+}
+
+static bool IsHeaderFile(const std::string &Filename) {
+ std::string::size_type DotPos = Filename.rfind('.');
+
+ if (DotPos == std::string::npos) {
+ // no file extension
+ return false;
+ }
+
+ std::string Ext = std::string(Filename.begin()+DotPos+1, Filename.end());
+ // C header: .h
+ // C++ header: .hh or .H;
+ return Ext == "h" || Ext == "hh" || Ext == "H";
+}
+
+RewriteObjC::RewriteObjC(std::string inFile, raw_ostream* OS,
+ DiagnosticsEngine &D, const LangOptions &LOpts,
+ bool silenceMacroWarn)
+ : Diags(D), LangOpts(LOpts), InFileName(inFile), OutFile(OS),
+ SilenceRewriteMacroWarning(silenceMacroWarn) {
+ IsHeader = IsHeaderFile(inFile);
+ RewriteFailedDiag = Diags.getCustomDiagID(DiagnosticsEngine::Warning,
+ "rewriting sub-expression within a macro (may not be correct)");
+ TryFinallyContainsReturnDiag = Diags.getCustomDiagID(
+ DiagnosticsEngine::Warning,
+ "rewriter doesn't support user-specified control flow semantics "
+ "for @try/@finally (code may not execute properly)");
+}
+
+std::unique_ptr<ASTConsumer>
+clang::CreateObjCRewriter(const std::string &InFile, raw_ostream *OS,
+ DiagnosticsEngine &Diags, const LangOptions &LOpts,
+ bool SilenceRewriteMacroWarning) {
+ return llvm::make_unique<RewriteObjCFragileABI>(InFile, OS, Diags, LOpts,
+ SilenceRewriteMacroWarning);
+}
+
+void RewriteObjC::InitializeCommon(ASTContext &context) {
+ Context = &context;
+ SM = &Context->getSourceManager();
+ TUDecl = Context->getTranslationUnitDecl();
+ MsgSendFunctionDecl = nullptr;
+ MsgSendSuperFunctionDecl = nullptr;
+ MsgSendStretFunctionDecl = nullptr;
+ MsgSendSuperStretFunctionDecl = nullptr;
+ MsgSendFpretFunctionDecl = nullptr;
+ GetClassFunctionDecl = nullptr;
+ GetMetaClassFunctionDecl = nullptr;
+ GetSuperClassFunctionDecl = nullptr;
+ SelGetUidFunctionDecl = nullptr;
+ CFStringFunctionDecl = nullptr;
+ ConstantStringClassReference = nullptr;
+ NSStringRecord = nullptr;
+ CurMethodDef = nullptr;
+ CurFunctionDef = nullptr;
+ CurFunctionDeclToDeclareForBlock = nullptr;
+ GlobalVarDecl = nullptr;
+ SuperStructDecl = nullptr;
+ ProtocolTypeDecl = nullptr;
+ ConstantStringDecl = nullptr;
+ BcLabelCount = 0;
+ SuperConstructorFunctionDecl = nullptr;
+ NumObjCStringLiterals = 0;
+ PropParentMap = nullptr;
+ CurrentBody = nullptr;
+ DisableReplaceStmt = false;
+ objc_impl_method = false;
+
+ // Get the ID and start/end of the main file.
+ MainFileID = SM->getMainFileID();
+ const llvm::MemoryBuffer *MainBuf = SM->getBuffer(MainFileID);
+ MainFileStart = MainBuf->getBufferStart();
+ MainFileEnd = MainBuf->getBufferEnd();
+
+ Rewrite.setSourceMgr(Context->getSourceManager(), Context->getLangOpts());
+}
+
+//===----------------------------------------------------------------------===//
+// Top Level Driver Code
+//===----------------------------------------------------------------------===//
+
+void RewriteObjC::HandleTopLevelSingleDecl(Decl *D) {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ // Two cases: either the decl could be in the main file, or it could be in a
+ // #included file. If the former, rewrite it now. If the later, check to see
+ // if we rewrote the #include/#import.
+ SourceLocation Loc = D->getLocation();
+ Loc = SM->getExpansionLoc(Loc);
+
+ // If this is for a builtin, ignore it.
+ if (Loc.isInvalid()) return;
+
+ // Look for built-in declarations that we need to refer during the rewrite.
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ RewriteFunctionDecl(FD);
+ } else if (VarDecl *FVD = dyn_cast<VarDecl>(D)) {
+ // declared in <Foundation/NSString.h>
+ if (FVD->getName() == "_NSConstantStringClassReference") {
+ ConstantStringClassReference = FVD;
+ return;
+ }
+ } else if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D)) {
+ if (ID->isThisDeclarationADefinition())
+ RewriteInterfaceDecl(ID);
+ } else if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(D)) {
+ RewriteCategoryDecl(CD);
+ } else if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(D)) {
+ if (PD->isThisDeclarationADefinition())
+ RewriteProtocolDecl(PD);
+ } else if (LinkageSpecDecl *LSD = dyn_cast<LinkageSpecDecl>(D)) {
+ // Recurse into linkage specifications
+ for (DeclContext::decl_iterator DI = LSD->decls_begin(),
+ DIEnd = LSD->decls_end();
+ DI != DIEnd; ) {
+ if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>((*DI))) {
+ if (!IFace->isThisDeclarationADefinition()) {
+ SmallVector<Decl *, 8> DG;
+ SourceLocation StartLoc = IFace->getLocStart();
+ do {
+ if (isa<ObjCInterfaceDecl>(*DI) &&
+ !cast<ObjCInterfaceDecl>(*DI)->isThisDeclarationADefinition() &&
+ StartLoc == (*DI)->getLocStart())
+ DG.push_back(*DI);
+ else
+ break;
+
+ ++DI;
+ } while (DI != DIEnd);
+ RewriteForwardClassDecl(DG);
+ continue;
+ }
+ }
+
+ if (ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>((*DI))) {
+ if (!Proto->isThisDeclarationADefinition()) {
+ SmallVector<Decl *, 8> DG;
+ SourceLocation StartLoc = Proto->getLocStart();
+ do {
+ if (isa<ObjCProtocolDecl>(*DI) &&
+ !cast<ObjCProtocolDecl>(*DI)->isThisDeclarationADefinition() &&
+ StartLoc == (*DI)->getLocStart())
+ DG.push_back(*DI);
+ else
+ break;
+
+ ++DI;
+ } while (DI != DIEnd);
+ RewriteForwardProtocolDecl(DG);
+ continue;
+ }
+ }
+
+ HandleTopLevelSingleDecl(*DI);
+ ++DI;
+ }
+ }
+ // If we have a decl in the main file, see if we should rewrite it.
+ if (SM->isWrittenInMainFile(Loc))
+ return HandleDeclInMainFile(D);
+}
+
+//===----------------------------------------------------------------------===//
+// Syntactic (non-AST) Rewriting Code
+//===----------------------------------------------------------------------===//
+
+void RewriteObjC::RewriteInclude() {
+ SourceLocation LocStart = SM->getLocForStartOfFile(MainFileID);
+ StringRef MainBuf = SM->getBufferData(MainFileID);
+ const char *MainBufStart = MainBuf.begin();
+ const char *MainBufEnd = MainBuf.end();
+ size_t ImportLen = strlen("import");
+
+ // Loop over the whole file, looking for includes.
+ for (const char *BufPtr = MainBufStart; BufPtr < MainBufEnd; ++BufPtr) {
+ if (*BufPtr == '#') {
+ if (++BufPtr == MainBufEnd)
+ return;
+ while (*BufPtr == ' ' || *BufPtr == '\t')
+ if (++BufPtr == MainBufEnd)
+ return;
+ if (!strncmp(BufPtr, "import", ImportLen)) {
+ // replace import with include
+ SourceLocation ImportLoc =
+ LocStart.getLocWithOffset(BufPtr-MainBufStart);
+ ReplaceText(ImportLoc, ImportLen, "include");
+ BufPtr += ImportLen;
+ }
+ }
+ }
+}
+
+static std::string getIvarAccessString(ObjCIvarDecl *OID) {
+ const ObjCInterfaceDecl *ClassDecl = OID->getContainingInterface();
+ std::string S;
+ S = "((struct ";
+ S += ClassDecl->getIdentifier()->getName();
+ S += "_IMPL *)self)->";
+ S += OID->getName();
+ return S;
+}
+
+void RewriteObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
+ ObjCImplementationDecl *IMD,
+ ObjCCategoryImplDecl *CID) {
+ static bool objcGetPropertyDefined = false;
+ static bool objcSetPropertyDefined = false;
+ SourceLocation startLoc = PID->getLocStart();
+ InsertText(startLoc, "// ");
+ const char *startBuf = SM->getCharacterData(startLoc);
+ assert((*startBuf == '@') && "bogus @synthesize location");
+ const char *semiBuf = strchr(startBuf, ';');
+ assert((*semiBuf == ';') && "@synthesize: can't find ';'");
+ SourceLocation onePastSemiLoc =
+ startLoc.getLocWithOffset(semiBuf-startBuf+1);
+
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+ return; // FIXME: is this correct?
+
+ // Generate the 'getter' function.
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ ObjCIvarDecl *OID = PID->getPropertyIvarDecl();
+
+ if (!OID)
+ return;
+ unsigned Attributes = PD->getPropertyAttributes();
+ if (!PD->getGetterMethodDecl()->isDefined()) {
+ bool GenGetProperty = !(Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) &&
+ (Attributes & (ObjCPropertyDecl::OBJC_PR_retain |
+ ObjCPropertyDecl::OBJC_PR_copy));
+ std::string Getr;
+ if (GenGetProperty && !objcGetPropertyDefined) {
+ objcGetPropertyDefined = true;
+ // FIXME. Is this attribute correct in all cases?
+ Getr = "\nextern \"C\" __declspec(dllimport) "
+ "id objc_getProperty(id, SEL, long, bool);\n";
+ }
+ RewriteObjCMethodDecl(OID->getContainingInterface(),
+ PD->getGetterMethodDecl(), Getr);
+ Getr += "{ ";
+ // Synthesize an explicit cast to gain access to the ivar.
+ // See objc-act.c:objc_synthesize_new_getter() for details.
+ if (GenGetProperty) {
+ // return objc_getProperty(self, _cmd, offsetof(ClassDecl, OID), 1)
+ Getr += "typedef ";
+ const FunctionType *FPRetType = nullptr;
+ RewriteTypeIntoString(PD->getGetterMethodDecl()->getReturnType(), Getr,
+ FPRetType);
+ Getr += " _TYPE";
+ if (FPRetType) {
+ Getr += ")"; // close the precedence "scope" for "*".
+
+ // Now, emit the argument types (if any).
+ if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FPRetType)){
+ Getr += "(";
+ for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
+ if (i) Getr += ", ";
+ std::string ParamStr =
+ FT->getParamType(i).getAsString(Context->getPrintingPolicy());
+ Getr += ParamStr;
+ }
+ if (FT->isVariadic()) {
+ if (FT->getNumParams())
+ Getr += ", ";
+ Getr += "...";
+ }
+ Getr += ")";
+ } else
+ Getr += "()";
+ }
+ Getr += ";\n";
+ Getr += "return (_TYPE)";
+ Getr += "objc_getProperty(self, _cmd, ";
+ RewriteIvarOffsetComputation(OID, Getr);
+ Getr += ", 1)";
+ }
+ else
+ Getr += "return " + getIvarAccessString(OID);
+ Getr += "; }";
+ InsertText(onePastSemiLoc, Getr);
+ }
+
+ if (PD->isReadOnly() || PD->getSetterMethodDecl()->isDefined())
+ return;
+
+ // Generate the 'setter' function.
+ std::string Setr;
+ bool GenSetProperty = Attributes & (ObjCPropertyDecl::OBJC_PR_retain |
+ ObjCPropertyDecl::OBJC_PR_copy);
+ if (GenSetProperty && !objcSetPropertyDefined) {
+ objcSetPropertyDefined = true;
+ // FIXME. Is this attribute correct in all cases?
+ Setr = "\nextern \"C\" __declspec(dllimport) "
+ "void objc_setProperty (id, SEL, long, id, bool, bool);\n";
+ }
+
+ RewriteObjCMethodDecl(OID->getContainingInterface(),
+ PD->getSetterMethodDecl(), Setr);
+ Setr += "{ ";
+ // Synthesize an explicit cast to initialize the ivar.
+ // See objc-act.c:objc_synthesize_new_setter() for details.
+ if (GenSetProperty) {
+ Setr += "objc_setProperty (self, _cmd, ";
+ RewriteIvarOffsetComputation(OID, Setr);
+ Setr += ", (id)";
+ Setr += PD->getName();
+ Setr += ", ";
+ if (Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic)
+ Setr += "0, ";
+ else
+ Setr += "1, ";
+ if (Attributes & ObjCPropertyDecl::OBJC_PR_copy)
+ Setr += "1)";
+ else
+ Setr += "0)";
+ }
+ else {
+ Setr += getIvarAccessString(OID) + " = ";
+ Setr += PD->getName();
+ }
+ Setr += "; }";
+ InsertText(onePastSemiLoc, Setr);
+}
+
+static void RewriteOneForwardClassDecl(ObjCInterfaceDecl *ForwardDecl,
+ std::string &typedefString) {
+ typedefString += "#ifndef _REWRITER_typedef_";
+ typedefString += ForwardDecl->getNameAsString();
+ typedefString += "\n";
+ typedefString += "#define _REWRITER_typedef_";
+ typedefString += ForwardDecl->getNameAsString();
+ typedefString += "\n";
+ typedefString += "typedef struct objc_object ";
+ typedefString += ForwardDecl->getNameAsString();
+ typedefString += ";\n#endif\n";
+}
+
+void RewriteObjC::RewriteForwardClassEpilogue(ObjCInterfaceDecl *ClassDecl,
+ const std::string &typedefString) {
+ SourceLocation startLoc = ClassDecl->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+ const char *semiPtr = strchr(startBuf, ';');
+ // Replace the @class with typedefs corresponding to the classes.
+ ReplaceText(startLoc, semiPtr-startBuf+1, typedefString);
+}
+
+void RewriteObjC::RewriteForwardClassDecl(DeclGroupRef D) {
+ std::string typedefString;
+ for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) {
+ ObjCInterfaceDecl *ForwardDecl = cast<ObjCInterfaceDecl>(*I);
+ if (I == D.begin()) {
+ // Translate to typedef's that forward reference structs with the same name
+ // as the class. As a convenience, we include the original declaration
+ // as a comment.
+ typedefString += "// @class ";
+ typedefString += ForwardDecl->getNameAsString();
+ typedefString += ";\n";
+ }
+ RewriteOneForwardClassDecl(ForwardDecl, typedefString);
+ }
+ DeclGroupRef::iterator I = D.begin();
+ RewriteForwardClassEpilogue(cast<ObjCInterfaceDecl>(*I), typedefString);
+}
+
+void RewriteObjC::RewriteForwardClassDecl(const SmallVectorImpl<Decl *> &D) {
+ std::string typedefString;
+ for (unsigned i = 0; i < D.size(); i++) {
+ ObjCInterfaceDecl *ForwardDecl = cast<ObjCInterfaceDecl>(D[i]);
+ if (i == 0) {
+ typedefString += "// @class ";
+ typedefString += ForwardDecl->getNameAsString();
+ typedefString += ";\n";
+ }
+ RewriteOneForwardClassDecl(ForwardDecl, typedefString);
+ }
+ RewriteForwardClassEpilogue(cast<ObjCInterfaceDecl>(D[0]), typedefString);
+}
+
+void RewriteObjC::RewriteMethodDeclaration(ObjCMethodDecl *Method) {
+ // When method is a synthesized one, such as a getter/setter there is
+ // nothing to rewrite.
+ if (Method->isImplicit())
+ return;
+ SourceLocation LocStart = Method->getLocStart();
+ SourceLocation LocEnd = Method->getLocEnd();
+
+ if (SM->getExpansionLineNumber(LocEnd) >
+ SM->getExpansionLineNumber(LocStart)) {
+ InsertText(LocStart, "#if 0\n");
+ ReplaceText(LocEnd, 1, ";\n#endif\n");
+ } else {
+ InsertText(LocStart, "// ");
+ }
+}
+
+void RewriteObjC::RewriteProperty(ObjCPropertyDecl *prop) {
+ SourceLocation Loc = prop->getAtLoc();
+
+ ReplaceText(Loc, 0, "// ");
+ // FIXME: handle properties that are declared across multiple lines.
+}
+
+void RewriteObjC::RewriteCategoryDecl(ObjCCategoryDecl *CatDecl) {
+ SourceLocation LocStart = CatDecl->getLocStart();
+
+ // FIXME: handle category headers that are declared across multiple lines.
+ ReplaceText(LocStart, 0, "// ");
+
+ for (auto *I : CatDecl->properties())
+ RewriteProperty(I);
+ for (auto *I : CatDecl->instance_methods())
+ RewriteMethodDeclaration(I);
+ for (auto *I : CatDecl->class_methods())
+ RewriteMethodDeclaration(I);
+
+ // Lastly, comment out the @end.
+ ReplaceText(CatDecl->getAtEndRange().getBegin(),
+ strlen("@end"), "/* @end */");
+}
+
+void RewriteObjC::RewriteProtocolDecl(ObjCProtocolDecl *PDecl) {
+ SourceLocation LocStart = PDecl->getLocStart();
+ assert(PDecl->isThisDeclarationADefinition());
+
+ // FIXME: handle protocol headers that are declared across multiple lines.
+ ReplaceText(LocStart, 0, "// ");
+
+ for (auto *I : PDecl->instance_methods())
+ RewriteMethodDeclaration(I);
+ for (auto *I : PDecl->class_methods())
+ RewriteMethodDeclaration(I);
+ for (auto *I : PDecl->properties())
+ RewriteProperty(I);
+
+ // Lastly, comment out the @end.
+ SourceLocation LocEnd = PDecl->getAtEndRange().getBegin();
+ ReplaceText(LocEnd, strlen("@end"), "/* @end */");
+
+ // Must comment out @optional/@required
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+ for (const char *p = startBuf; p < endBuf; p++) {
+ if (*p == '@' && !strncmp(p+1, "optional", strlen("optional"))) {
+ SourceLocation OptionalLoc = LocStart.getLocWithOffset(p-startBuf);
+ ReplaceText(OptionalLoc, strlen("@optional"), "/* @optional */");
+
+ }
+ else if (*p == '@' && !strncmp(p+1, "required", strlen("required"))) {
+ SourceLocation OptionalLoc = LocStart.getLocWithOffset(p-startBuf);
+ ReplaceText(OptionalLoc, strlen("@required"), "/* @required */");
+
+ }
+ }
+}
+
+void RewriteObjC::RewriteForwardProtocolDecl(DeclGroupRef D) {
+ SourceLocation LocStart = (*D.begin())->getLocStart();
+ if (LocStart.isInvalid())
+ llvm_unreachable("Invalid SourceLocation");
+ // FIXME: handle forward protocol that are declared across multiple lines.
+ ReplaceText(LocStart, 0, "// ");
+}
+
+void
+RewriteObjC::RewriteForwardProtocolDecl(const SmallVectorImpl<Decl *> &DG) {
+ SourceLocation LocStart = DG[0]->getLocStart();
+ if (LocStart.isInvalid())
+ llvm_unreachable("Invalid SourceLocation");
+ // FIXME: handle forward protocol that are declared across multiple lines.
+ ReplaceText(LocStart, 0, "// ");
+}
+
+void RewriteObjC::RewriteTypeIntoString(QualType T, std::string &ResultStr,
+ const FunctionType *&FPRetType) {
+ if (T->isObjCQualifiedIdType())
+ ResultStr += "id";
+ else if (T->isFunctionPointerType() ||
+ T->isBlockPointerType()) {
+ // needs special handling, since pointer-to-functions have special
+ // syntax (where a decaration models use).
+ QualType retType = T;
+ QualType PointeeTy;
+ if (const PointerType* PT = retType->getAs<PointerType>())
+ PointeeTy = PT->getPointeeType();
+ else if (const BlockPointerType *BPT = retType->getAs<BlockPointerType>())
+ PointeeTy = BPT->getPointeeType();
+ if ((FPRetType = PointeeTy->getAs<FunctionType>())) {
+ ResultStr +=
+ FPRetType->getReturnType().getAsString(Context->getPrintingPolicy());
+ ResultStr += "(*";
+ }
+ } else
+ ResultStr += T.getAsString(Context->getPrintingPolicy());
+}
+
+void RewriteObjC::RewriteObjCMethodDecl(const ObjCInterfaceDecl *IDecl,
+ ObjCMethodDecl *OMD,
+ std::string &ResultStr) {
+ //fprintf(stderr,"In RewriteObjCMethodDecl\n");
+ const FunctionType *FPRetType = nullptr;
+ ResultStr += "\nstatic ";
+ RewriteTypeIntoString(OMD->getReturnType(), ResultStr, FPRetType);
+ ResultStr += " ";
+
+ // Unique method name
+ std::string NameStr;
+
+ if (OMD->isInstanceMethod())
+ NameStr += "_I_";
+ else
+ NameStr += "_C_";
+
+ NameStr += IDecl->getNameAsString();
+ NameStr += "_";
+
+ if (ObjCCategoryImplDecl *CID =
+ dyn_cast<ObjCCategoryImplDecl>(OMD->getDeclContext())) {
+ NameStr += CID->getNameAsString();
+ NameStr += "_";
+ }
+ // Append selector names, replacing ':' with '_'
+ {
+ std::string selString = OMD->getSelector().getAsString();
+ int len = selString.size();
+ for (int i = 0; i < len; i++)
+ if (selString[i] == ':')
+ selString[i] = '_';
+ NameStr += selString;
+ }
+ // Remember this name for metadata emission
+ MethodInternalNames[OMD] = NameStr;
+ ResultStr += NameStr;
+
+ // Rewrite arguments
+ ResultStr += "(";
+
+ // invisible arguments
+ if (OMD->isInstanceMethod()) {
+ QualType selfTy = Context->getObjCInterfaceType(IDecl);
+ selfTy = Context->getPointerType(selfTy);
+ if (!LangOpts.MicrosoftExt) {
+ if (ObjCSynthesizedStructs.count(const_cast<ObjCInterfaceDecl*>(IDecl)))
+ ResultStr += "struct ";
+ }
+ // When rewriting for Microsoft, explicitly omit the structure name.
+ ResultStr += IDecl->getNameAsString();
+ ResultStr += " *";
+ }
+ else
+ ResultStr += Context->getObjCClassType().getAsString(
+ Context->getPrintingPolicy());
+
+ ResultStr += " self, ";
+ ResultStr += Context->getObjCSelType().getAsString(Context->getPrintingPolicy());
+ ResultStr += " _cmd";
+
+ // Method arguments.
+ for (const auto *PDecl : OMD->params()) {
+ ResultStr += ", ";
+ if (PDecl->getType()->isObjCQualifiedIdType()) {
+ ResultStr += "id ";
+ ResultStr += PDecl->getNameAsString();
+ } else {
+ std::string Name = PDecl->getNameAsString();
+ QualType QT = PDecl->getType();
+ // Make sure we convert "t (^)(...)" to "t (*)(...)".
+ (void)convertBlockPointerToFunctionPointer(QT);
+ QT.getAsStringInternal(Name, Context->getPrintingPolicy());
+ ResultStr += Name;
+ }
+ }
+ if (OMD->isVariadic())
+ ResultStr += ", ...";
+ ResultStr += ") ";
+
+ if (FPRetType) {
+ ResultStr += ")"; // close the precedence "scope" for "*".
+
+ // Now, emit the argument types (if any).
+ if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FPRetType)) {
+ ResultStr += "(";
+ for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
+ if (i) ResultStr += ", ";
+ std::string ParamStr =
+ FT->getParamType(i).getAsString(Context->getPrintingPolicy());
+ ResultStr += ParamStr;
+ }
+ if (FT->isVariadic()) {
+ if (FT->getNumParams())
+ ResultStr += ", ";
+ ResultStr += "...";
+ }
+ ResultStr += ")";
+ } else {
+ ResultStr += "()";
+ }
+ }
+}
+void RewriteObjC::RewriteImplementationDecl(Decl *OID) {
+ ObjCImplementationDecl *IMD = dyn_cast<ObjCImplementationDecl>(OID);
+ ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(OID);
+
+ InsertText(IMD ? IMD->getLocStart() : CID->getLocStart(), "// ");
+
+ for (auto *OMD : IMD ? IMD->instance_methods() : CID->instance_methods()) {
+ std::string ResultStr;
+ RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr);
+ SourceLocation LocStart = OMD->getLocStart();
+ SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart();
+
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+ ReplaceText(LocStart, endBuf-startBuf, ResultStr);
+ }
+
+ for (auto *OMD : IMD ? IMD->class_methods() : CID->class_methods()) {
+ std::string ResultStr;
+ RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr);
+ SourceLocation LocStart = OMD->getLocStart();
+ SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart();
+
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+ ReplaceText(LocStart, endBuf-startBuf, ResultStr);
+ }
+ for (auto *I : IMD ? IMD->property_impls() : CID->property_impls())
+ RewritePropertyImplDecl(I, IMD, CID);
+
+ InsertText(IMD ? IMD->getLocEnd() : CID->getLocEnd(), "// ");
+}
+
+void RewriteObjC::RewriteInterfaceDecl(ObjCInterfaceDecl *ClassDecl) {
+ std::string ResultStr;
+ if (!ObjCForwardDecls.count(ClassDecl->getCanonicalDecl())) {
+ // we haven't seen a forward decl - generate a typedef.
+ ResultStr = "#ifndef _REWRITER_typedef_";
+ ResultStr += ClassDecl->getNameAsString();
+ ResultStr += "\n";
+ ResultStr += "#define _REWRITER_typedef_";
+ ResultStr += ClassDecl->getNameAsString();
+ ResultStr += "\n";
+ ResultStr += "typedef struct objc_object ";
+ ResultStr += ClassDecl->getNameAsString();
+ ResultStr += ";\n#endif\n";
+ // Mark this typedef as having been generated.
+ ObjCForwardDecls.insert(ClassDecl->getCanonicalDecl());
+ }
+ RewriteObjCInternalStruct(ClassDecl, ResultStr);
+
+ for (auto *I : ClassDecl->properties())
+ RewriteProperty(I);
+ for (auto *I : ClassDecl->instance_methods())
+ RewriteMethodDeclaration(I);
+ for (auto *I : ClassDecl->class_methods())
+ RewriteMethodDeclaration(I);
+
+ // Lastly, comment out the @end.
+ ReplaceText(ClassDecl->getAtEndRange().getBegin(), strlen("@end"),
+ "/* @end */");
+}
+
+Stmt *RewriteObjC::RewritePropertyOrImplicitSetter(PseudoObjectExpr *PseudoOp) {
+ SourceRange OldRange = PseudoOp->getSourceRange();
+
+ // We just magically know some things about the structure of this
+ // expression.
+ ObjCMessageExpr *OldMsg =
+ cast<ObjCMessageExpr>(PseudoOp->getSemanticExpr(
+ PseudoOp->getNumSemanticExprs() - 1));
+
+ // Because the rewriter doesn't allow us to rewrite rewritten code,
+ // we need to suppress rewriting the sub-statements.
+ Expr *Base, *RHS;
+ {
+ DisableReplaceStmtScope S(*this);
+
+ // Rebuild the base expression if we have one.
+ Base = nullptr;
+ if (OldMsg->getReceiverKind() == ObjCMessageExpr::Instance) {
+ Base = OldMsg->getInstanceReceiver();
+ Base = cast<OpaqueValueExpr>(Base)->getSourceExpr();
+ Base = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(Base));
+ }
+
+ // Rebuild the RHS.
+ RHS = cast<BinaryOperator>(PseudoOp->getSyntacticForm())->getRHS();
+ RHS = cast<OpaqueValueExpr>(RHS)->getSourceExpr();
+ RHS = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(RHS));
+ }
+
+ // TODO: avoid this copy.
+ SmallVector<SourceLocation, 1> SelLocs;
+ OldMsg->getSelectorLocs(SelLocs);
+
+ ObjCMessageExpr *NewMsg = nullptr;
+ switch (OldMsg->getReceiverKind()) {
+ case ObjCMessageExpr::Class:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ OldMsg->getClassReceiverTypeInfo(),
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ RHS,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+
+ case ObjCMessageExpr::Instance:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ Base,
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ RHS,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+
+ case ObjCMessageExpr::SuperClass:
+ case ObjCMessageExpr::SuperInstance:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ OldMsg->getSuperLoc(),
+ OldMsg->getReceiverKind() == ObjCMessageExpr::SuperInstance,
+ OldMsg->getSuperType(),
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ RHS,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+ }
+
+ Stmt *Replacement = SynthMessageExpr(NewMsg);
+ ReplaceStmtWithRange(PseudoOp, Replacement, OldRange);
+ return Replacement;
+}
+
+Stmt *RewriteObjC::RewritePropertyOrImplicitGetter(PseudoObjectExpr *PseudoOp) {
+ SourceRange OldRange = PseudoOp->getSourceRange();
+
+ // We just magically know some things about the structure of this
+ // expression.
+ ObjCMessageExpr *OldMsg =
+ cast<ObjCMessageExpr>(PseudoOp->getResultExpr()->IgnoreImplicit());
+
+ // Because the rewriter doesn't allow us to rewrite rewritten code,
+ // we need to suppress rewriting the sub-statements.
+ Expr *Base = nullptr;
+ {
+ DisableReplaceStmtScope S(*this);
+
+ // Rebuild the base expression if we have one.
+ if (OldMsg->getReceiverKind() == ObjCMessageExpr::Instance) {
+ Base = OldMsg->getInstanceReceiver();
+ Base = cast<OpaqueValueExpr>(Base)->getSourceExpr();
+ Base = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(Base));
+ }
+ }
+
+ // Intentionally empty.
+ SmallVector<SourceLocation, 1> SelLocs;
+ SmallVector<Expr*, 1> Args;
+
+ ObjCMessageExpr *NewMsg = nullptr;
+ switch (OldMsg->getReceiverKind()) {
+ case ObjCMessageExpr::Class:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ OldMsg->getClassReceiverTypeInfo(),
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ Args,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+
+ case ObjCMessageExpr::Instance:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ Base,
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ Args,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+
+ case ObjCMessageExpr::SuperClass:
+ case ObjCMessageExpr::SuperInstance:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ OldMsg->getSuperLoc(),
+ OldMsg->getReceiverKind() == ObjCMessageExpr::SuperInstance,
+ OldMsg->getSuperType(),
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ Args,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+ }
+
+ Stmt *Replacement = SynthMessageExpr(NewMsg);
+ ReplaceStmtWithRange(PseudoOp, Replacement, OldRange);
+ return Replacement;
+}
+
+/// SynthCountByEnumWithState - To print:
+/// ((unsigned int (*)
+/// (id, SEL, struct __objcFastEnumerationState *, id *, unsigned int))
+/// (void *)objc_msgSend)((id)l_collection,
+/// sel_registerName(
+/// "countByEnumeratingWithState:objects:count:"),
+/// &enumState,
+/// (id *)__rw_items, (unsigned int)16)
+///
+void RewriteObjC::SynthCountByEnumWithState(std::string &buf) {
+ buf += "((unsigned int (*) (id, SEL, struct __objcFastEnumerationState *, "
+ "id *, unsigned int))(void *)objc_msgSend)";
+ buf += "\n\t\t";
+ buf += "((id)l_collection,\n\t\t";
+ buf += "sel_registerName(\"countByEnumeratingWithState:objects:count:\"),";
+ buf += "\n\t\t";
+ buf += "&enumState, "
+ "(id *)__rw_items, (unsigned int)16)";
+}
+
+/// RewriteBreakStmt - Rewrite for a break-stmt inside an ObjC2's foreach
+/// statement to exit to its outer synthesized loop.
+///
+Stmt *RewriteObjC::RewriteBreakStmt(BreakStmt *S) {
+ if (Stmts.empty() || !isa<ObjCForCollectionStmt>(Stmts.back()))
+ return S;
+ // replace break with goto __break_label
+ std::string buf;
+
+ SourceLocation startLoc = S->getLocStart();
+ buf = "goto __break_label_";
+ buf += utostr(ObjCBcLabelNo.back());
+ ReplaceText(startLoc, strlen("break"), buf);
+
+ return nullptr;
+}
+
+/// RewriteContinueStmt - Rewrite for a continue-stmt inside an ObjC2's foreach
+/// statement to continue with its inner synthesized loop.
+///
+Stmt *RewriteObjC::RewriteContinueStmt(ContinueStmt *S) {
+ if (Stmts.empty() || !isa<ObjCForCollectionStmt>(Stmts.back()))
+ return S;
+ // replace continue with goto __continue_label
+ std::string buf;
+
+ SourceLocation startLoc = S->getLocStart();
+ buf = "goto __continue_label_";
+ buf += utostr(ObjCBcLabelNo.back());
+ ReplaceText(startLoc, strlen("continue"), buf);
+
+ return nullptr;
+}
+
+/// RewriteObjCForCollectionStmt - Rewriter for ObjC2's foreach statement.
+/// It rewrites:
+/// for ( type elem in collection) { stmts; }
+
+/// Into:
+/// {
+/// type elem;
+/// struct __objcFastEnumerationState enumState = { 0 };
+/// id __rw_items[16];
+/// id l_collection = (id)collection;
+/// unsigned long limit = [l_collection countByEnumeratingWithState:&enumState
+/// objects:__rw_items count:16];
+/// if (limit) {
+/// unsigned long startMutations = *enumState.mutationsPtr;
+/// do {
+/// unsigned long counter = 0;
+/// do {
+/// if (startMutations != *enumState.mutationsPtr)
+/// objc_enumerationMutation(l_collection);
+/// elem = (type)enumState.itemsPtr[counter++];
+/// stmts;
+/// __continue_label: ;
+/// } while (counter < limit);
+/// } while (limit = [l_collection countByEnumeratingWithState:&enumState
+/// objects:__rw_items count:16]);
+/// elem = nil;
+/// __break_label: ;
+/// }
+/// else
+/// elem = nil;
+/// }
+///
+Stmt *RewriteObjC::RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
+ SourceLocation OrigEnd) {
+ assert(!Stmts.empty() && "ObjCForCollectionStmt - Statement stack empty");
+ assert(isa<ObjCForCollectionStmt>(Stmts.back()) &&
+ "ObjCForCollectionStmt Statement stack mismatch");
+ assert(!ObjCBcLabelNo.empty() &&
+ "ObjCForCollectionStmt - Label No stack empty");
+
+ SourceLocation startLoc = S->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+ StringRef elementName;
+ std::string elementTypeAsString;
+ std::string buf;
+ buf = "\n{\n\t";
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(S->getElement())) {
+ // type elem;
+ NamedDecl* D = cast<NamedDecl>(DS->getSingleDecl());
+ QualType ElementType = cast<ValueDecl>(D)->getType();
+ if (ElementType->isObjCQualifiedIdType() ||
+ ElementType->isObjCQualifiedInterfaceType())
+ // Simply use 'id' for all qualified types.
+ elementTypeAsString = "id";
+ else
+ elementTypeAsString = ElementType.getAsString(Context->getPrintingPolicy());
+ buf += elementTypeAsString;
+ buf += " ";
+ elementName = D->getName();
+ buf += elementName;
+ buf += ";\n\t";
+ }
+ else {
+ DeclRefExpr *DR = cast<DeclRefExpr>(S->getElement());
+ elementName = DR->getDecl()->getName();
+ ValueDecl *VD = cast<ValueDecl>(DR->getDecl());
+ if (VD->getType()->isObjCQualifiedIdType() ||
+ VD->getType()->isObjCQualifiedInterfaceType())
+ // Simply use 'id' for all qualified types.
+ elementTypeAsString = "id";
+ else
+ elementTypeAsString = VD->getType().getAsString(Context->getPrintingPolicy());
+ }
+
+ // struct __objcFastEnumerationState enumState = { 0 };
+ buf += "struct __objcFastEnumerationState enumState = { 0 };\n\t";
+ // id __rw_items[16];
+ buf += "id __rw_items[16];\n\t";
+ // id l_collection = (id)
+ buf += "id l_collection = (id)";
+ // Find start location of 'collection' the hard way!
+ const char *startCollectionBuf = startBuf;
+ startCollectionBuf += 3; // skip 'for'
+ startCollectionBuf = strchr(startCollectionBuf, '(');
+ startCollectionBuf++; // skip '('
+ // find 'in' and skip it.
+ while (*startCollectionBuf != ' ' ||
+ *(startCollectionBuf+1) != 'i' || *(startCollectionBuf+2) != 'n' ||
+ (*(startCollectionBuf+3) != ' ' &&
+ *(startCollectionBuf+3) != '[' && *(startCollectionBuf+3) != '('))
+ startCollectionBuf++;
+ startCollectionBuf += 3;
+
+ // Replace: "for (type element in" with string constructed thus far.
+ ReplaceText(startLoc, startCollectionBuf - startBuf, buf);
+ // Replace ')' in for '(' type elem in collection ')' with ';'
+ SourceLocation rightParenLoc = S->getRParenLoc();
+ const char *rparenBuf = SM->getCharacterData(rightParenLoc);
+ SourceLocation lparenLoc = startLoc.getLocWithOffset(rparenBuf-startBuf);
+ buf = ";\n\t";
+
+ // unsigned long limit = [l_collection countByEnumeratingWithState:&enumState
+ // objects:__rw_items count:16];
+ // which is synthesized into:
+ // unsigned int limit =
+ // ((unsigned int (*)
+ // (id, SEL, struct __objcFastEnumerationState *, id *, unsigned int))
+ // (void *)objc_msgSend)((id)l_collection,
+ // sel_registerName(
+ // "countByEnumeratingWithState:objects:count:"),
+ // (struct __objcFastEnumerationState *)&state,
+ // (id *)__rw_items, (unsigned int)16);
+ buf += "unsigned long limit =\n\t\t";
+ SynthCountByEnumWithState(buf);
+ buf += ";\n\t";
+ /// if (limit) {
+ /// unsigned long startMutations = *enumState.mutationsPtr;
+ /// do {
+ /// unsigned long counter = 0;
+ /// do {
+ /// if (startMutations != *enumState.mutationsPtr)
+ /// objc_enumerationMutation(l_collection);
+ /// elem = (type)enumState.itemsPtr[counter++];
+ buf += "if (limit) {\n\t";
+ buf += "unsigned long startMutations = *enumState.mutationsPtr;\n\t";
+ buf += "do {\n\t\t";
+ buf += "unsigned long counter = 0;\n\t\t";
+ buf += "do {\n\t\t\t";
+ buf += "if (startMutations != *enumState.mutationsPtr)\n\t\t\t\t";
+ buf += "objc_enumerationMutation(l_collection);\n\t\t\t";
+ buf += elementName;
+ buf += " = (";
+ buf += elementTypeAsString;
+ buf += ")enumState.itemsPtr[counter++];";
+ // Replace ')' in for '(' type elem in collection ')' with all of these.
+ ReplaceText(lparenLoc, 1, buf);
+
+ /// __continue_label: ;
+ /// } while (counter < limit);
+ /// } while (limit = [l_collection countByEnumeratingWithState:&enumState
+ /// objects:__rw_items count:16]);
+ /// elem = nil;
+ /// __break_label: ;
+ /// }
+ /// else
+ /// elem = nil;
+ /// }
+ ///
+ buf = ";\n\t";
+ buf += "__continue_label_";
+ buf += utostr(ObjCBcLabelNo.back());
+ buf += ": ;";
+ buf += "\n\t\t";
+ buf += "} while (counter < limit);\n\t";
+ buf += "} while (limit = ";
+ SynthCountByEnumWithState(buf);
+ buf += ");\n\t";
+ buf += elementName;
+ buf += " = ((";
+ buf += elementTypeAsString;
+ buf += ")0);\n\t";
+ buf += "__break_label_";
+ buf += utostr(ObjCBcLabelNo.back());
+ buf += ": ;\n\t";
+ buf += "}\n\t";
+ buf += "else\n\t\t";
+ buf += elementName;
+ buf += " = ((";
+ buf += elementTypeAsString;
+ buf += ")0);\n\t";
+ buf += "}\n";
+
+ // Insert all these *after* the statement body.
+ // FIXME: If this should support Obj-C++, support CXXTryStmt
+ if (isa<CompoundStmt>(S->getBody())) {
+ SourceLocation endBodyLoc = OrigEnd.getLocWithOffset(1);
+ InsertText(endBodyLoc, buf);
+ } else {
+ /* Need to treat single statements specially. For example:
+ *
+ * for (A *a in b) if (stuff()) break;
+ * for (A *a in b) xxxyy;
+ *
+ * The following code simply scans ahead to the semi to find the actual end.
+ */
+ const char *stmtBuf = SM->getCharacterData(OrigEnd);
+ const char *semiBuf = strchr(stmtBuf, ';');
+ assert(semiBuf && "Can't find ';'");
+ SourceLocation endBodyLoc = OrigEnd.getLocWithOffset(semiBuf-stmtBuf+1);
+ InsertText(endBodyLoc, buf);
+ }
+ Stmts.pop_back();
+ ObjCBcLabelNo.pop_back();
+ return nullptr;
+}
+
+/// RewriteObjCSynchronizedStmt -
+/// This routine rewrites @synchronized(expr) stmt;
+/// into:
+/// objc_sync_enter(expr);
+/// @try stmt @finally { objc_sync_exit(expr); }
+///
+Stmt *RewriteObjC::RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
+ // Get the start location and compute the semi location.
+ SourceLocation startLoc = S->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+
+ assert((*startBuf == '@') && "bogus @synchronized location");
+
+ std::string buf;
+ buf = "objc_sync_enter((id)";
+ const char *lparenBuf = startBuf;
+ while (*lparenBuf != '(') lparenBuf++;
+ ReplaceText(startLoc, lparenBuf-startBuf+1, buf);
+ // We can't use S->getSynchExpr()->getLocEnd() to find the end location, since
+ // the sync expression is typically a message expression that's already
+ // been rewritten! (which implies the SourceLocation's are invalid).
+ SourceLocation endLoc = S->getSynchBody()->getLocStart();
+ const char *endBuf = SM->getCharacterData(endLoc);
+ while (*endBuf != ')') endBuf--;
+ SourceLocation rparenLoc = startLoc.getLocWithOffset(endBuf-startBuf);
+ buf = ");\n";
+ // declare a new scope with two variables, _stack and _rethrow.
+ buf += "/* @try scope begin */ \n{ struct _objc_exception_data {\n";
+ buf += "int buf[18/*32-bit i386*/];\n";
+ buf += "char *pointers[4];} _stack;\n";
+ buf += "id volatile _rethrow = 0;\n";
+ buf += "objc_exception_try_enter(&_stack);\n";
+ buf += "if (!_setjmp(_stack.buf)) /* @try block continue */\n";
+ ReplaceText(rparenLoc, 1, buf);
+ startLoc = S->getSynchBody()->getLocEnd();
+ startBuf = SM->getCharacterData(startLoc);
+
+ assert((*startBuf == '}') && "bogus @synchronized block");
+ SourceLocation lastCurlyLoc = startLoc;
+ buf = "}\nelse {\n";
+ buf += " _rethrow = objc_exception_extract(&_stack);\n";
+ buf += "}\n";
+ buf += "{ /* implicit finally clause */\n";
+ buf += " if (!_rethrow) objc_exception_try_exit(&_stack);\n";
+
+ std::string syncBuf;
+ syncBuf += " objc_sync_exit(";
+
+ Expr *syncExpr = S->getSynchExpr();
+ CastKind CK = syncExpr->getType()->isObjCObjectPointerType()
+ ? CK_BitCast :
+ syncExpr->getType()->isBlockPointerType()
+ ? CK_BlockPointerToObjCPointerCast
+ : CK_CPointerToObjCPointerCast;
+ syncExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
+ CK, syncExpr);
+ std::string syncExprBufS;
+ llvm::raw_string_ostream syncExprBuf(syncExprBufS);
+ assert(syncExpr != nullptr && "Expected non-null Expr");
+ syncExpr->printPretty(syncExprBuf, nullptr, PrintingPolicy(LangOpts));
+ syncBuf += syncExprBuf.str();
+ syncBuf += ");";
+
+ buf += syncBuf;
+ buf += "\n if (_rethrow) objc_exception_throw(_rethrow);\n";
+ buf += "}\n";
+ buf += "}";
+
+ ReplaceText(lastCurlyLoc, 1, buf);
+
+ bool hasReturns = false;
+ HasReturnStmts(S->getSynchBody(), hasReturns);
+ if (hasReturns)
+ RewriteSyncReturnStmts(S->getSynchBody(), syncBuf);
+
+ return nullptr;
+}
+
+void RewriteObjC::WarnAboutReturnGotoStmts(Stmt *S)
+{
+ // Perform a bottom up traversal of all children.
+ for (Stmt *SubStmt : S->children())
+ if (SubStmt)
+ WarnAboutReturnGotoStmts(SubStmt);
+
+ if (isa<ReturnStmt>(S) || isa<GotoStmt>(S)) {
+ Diags.Report(Context->getFullLoc(S->getLocStart()),
+ TryFinallyContainsReturnDiag);
+ }
+ return;
+}
+
+void RewriteObjC::HasReturnStmts(Stmt *S, bool &hasReturns)
+{
+ // Perform a bottom up traversal of all children.
+ for (Stmt *SubStmt : S->children())
+ if (SubStmt)
+ HasReturnStmts(SubStmt, hasReturns);
+
+ if (isa<ReturnStmt>(S))
+ hasReturns = true;
+ return;
+}
+
+void RewriteObjC::RewriteTryReturnStmts(Stmt *S) {
+ // Perform a bottom up traversal of all children.
+ for (Stmt *SubStmt : S->children())
+ if (SubStmt) {
+ RewriteTryReturnStmts(SubStmt);
+ }
+ if (isa<ReturnStmt>(S)) {
+ SourceLocation startLoc = S->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+
+ const char *semiBuf = strchr(startBuf, ';');
+ assert((*semiBuf == ';') && "RewriteTryReturnStmts: can't find ';'");
+ SourceLocation onePastSemiLoc = startLoc.getLocWithOffset(semiBuf-startBuf+1);
+
+ std::string buf;
+ buf = "{ objc_exception_try_exit(&_stack); return";
+
+ ReplaceText(startLoc, 6, buf);
+ InsertText(onePastSemiLoc, "}");
+ }
+ return;
+}
+
+void RewriteObjC::RewriteSyncReturnStmts(Stmt *S, std::string syncExitBuf) {
+ // Perform a bottom up traversal of all children.
+ for (Stmt *SubStmt : S->children())
+ if (SubStmt) {
+ RewriteSyncReturnStmts(SubStmt, syncExitBuf);
+ }
+ if (isa<ReturnStmt>(S)) {
+ SourceLocation startLoc = S->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+
+ const char *semiBuf = strchr(startBuf, ';');
+ assert((*semiBuf == ';') && "RewriteSyncReturnStmts: can't find ';'");
+ SourceLocation onePastSemiLoc = startLoc.getLocWithOffset(semiBuf-startBuf+1);
+
+ std::string buf;
+ buf = "{ objc_exception_try_exit(&_stack);";
+ buf += syncExitBuf;
+ buf += " return";
+
+ ReplaceText(startLoc, 6, buf);
+ InsertText(onePastSemiLoc, "}");
+ }
+ return;
+}
+
+Stmt *RewriteObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
+ // Get the start location and compute the semi location.
+ SourceLocation startLoc = S->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+
+ assert((*startBuf == '@') && "bogus @try location");
+
+ std::string buf;
+ // declare a new scope with two variables, _stack and _rethrow.
+ buf = "/* @try scope begin */ { struct _objc_exception_data {\n";
+ buf += "int buf[18/*32-bit i386*/];\n";
+ buf += "char *pointers[4];} _stack;\n";
+ buf += "id volatile _rethrow = 0;\n";
+ buf += "objc_exception_try_enter(&_stack);\n";
+ buf += "if (!_setjmp(_stack.buf)) /* @try block continue */\n";
+
+ ReplaceText(startLoc, 4, buf);
+
+ startLoc = S->getTryBody()->getLocEnd();
+ startBuf = SM->getCharacterData(startLoc);
+
+ assert((*startBuf == '}') && "bogus @try block");
+
+ SourceLocation lastCurlyLoc = startLoc;
+ if (S->getNumCatchStmts()) {
+ startLoc = startLoc.getLocWithOffset(1);
+ buf = " /* @catch begin */ else {\n";
+ buf += " id _caught = objc_exception_extract(&_stack);\n";
+ buf += " objc_exception_try_enter (&_stack);\n";
+ buf += " if (_setjmp(_stack.buf))\n";
+ buf += " _rethrow = objc_exception_extract(&_stack);\n";
+ buf += " else { /* @catch continue */";
+
+ InsertText(startLoc, buf);
+ } else { /* no catch list */
+ buf = "}\nelse {\n";
+ buf += " _rethrow = objc_exception_extract(&_stack);\n";
+ buf += "}";
+ ReplaceText(lastCurlyLoc, 1, buf);
+ }
+ Stmt *lastCatchBody = nullptr;
+ for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I) {
+ ObjCAtCatchStmt *Catch = S->getCatchStmt(I);
+ VarDecl *catchDecl = Catch->getCatchParamDecl();
+
+ if (I == 0)
+ buf = "if ("; // we are generating code for the first catch clause
+ else
+ buf = "else if (";
+ startLoc = Catch->getLocStart();
+ startBuf = SM->getCharacterData(startLoc);
+
+ assert((*startBuf == '@') && "bogus @catch location");
+
+ const char *lParenLoc = strchr(startBuf, '(');
+
+ if (Catch->hasEllipsis()) {
+ // Now rewrite the body...
+ lastCatchBody = Catch->getCatchBody();
+ SourceLocation bodyLoc = lastCatchBody->getLocStart();
+ const char *bodyBuf = SM->getCharacterData(bodyLoc);
+ assert(*SM->getCharacterData(Catch->getRParenLoc()) == ')' &&
+ "bogus @catch paren location");
+ assert((*bodyBuf == '{') && "bogus @catch body location");
+
+ buf += "1) { id _tmp = _caught;";
+ Rewrite.ReplaceText(startLoc, bodyBuf-startBuf+1, buf);
+ } else if (catchDecl) {
+ QualType t = catchDecl->getType();
+ if (t == Context->getObjCIdType()) {
+ buf += "1) { ";
+ ReplaceText(startLoc, lParenLoc-startBuf+1, buf);
+ } else if (const ObjCObjectPointerType *Ptr =
+ t->getAs<ObjCObjectPointerType>()) {
+ // Should be a pointer to a class.
+ ObjCInterfaceDecl *IDecl = Ptr->getObjectType()->getInterface();
+ if (IDecl) {
+ buf += "objc_exception_match((struct objc_class *)objc_getClass(\"";
+ buf += IDecl->getNameAsString();
+ buf += "\"), (struct objc_object *)_caught)) { ";
+ ReplaceText(startLoc, lParenLoc-startBuf+1, buf);
+ }
+ }
+ // Now rewrite the body...
+ lastCatchBody = Catch->getCatchBody();
+ SourceLocation rParenLoc = Catch->getRParenLoc();
+ SourceLocation bodyLoc = lastCatchBody->getLocStart();
+ const char *bodyBuf = SM->getCharacterData(bodyLoc);
+ const char *rParenBuf = SM->getCharacterData(rParenLoc);
+ assert((*rParenBuf == ')') && "bogus @catch paren location");
+ assert((*bodyBuf == '{') && "bogus @catch body location");
+
+ // Here we replace ") {" with "= _caught;" (which initializes and
+ // declares the @catch parameter).
+ ReplaceText(rParenLoc, bodyBuf-rParenBuf+1, " = _caught;");
+ } else {
+ llvm_unreachable("@catch rewrite bug");
+ }
+ }
+ // Complete the catch list...
+ if (lastCatchBody) {
+ SourceLocation bodyLoc = lastCatchBody->getLocEnd();
+ assert(*SM->getCharacterData(bodyLoc) == '}' &&
+ "bogus @catch body location");
+
+ // Insert the last (implicit) else clause *before* the right curly brace.
+ bodyLoc = bodyLoc.getLocWithOffset(-1);
+ buf = "} /* last catch end */\n";
+ buf += "else {\n";
+ buf += " _rethrow = _caught;\n";
+ buf += " objc_exception_try_exit(&_stack);\n";
+ buf += "} } /* @catch end */\n";
+ if (!S->getFinallyStmt())
+ buf += "}\n";
+ InsertText(bodyLoc, buf);
+
+ // Set lastCurlyLoc
+ lastCurlyLoc = lastCatchBody->getLocEnd();
+ }
+ if (ObjCAtFinallyStmt *finalStmt = S->getFinallyStmt()) {
+ startLoc = finalStmt->getLocStart();
+ startBuf = SM->getCharacterData(startLoc);
+ assert((*startBuf == '@') && "bogus @finally start");
+
+ ReplaceText(startLoc, 8, "/* @finally */");
+
+ Stmt *body = finalStmt->getFinallyBody();
+ SourceLocation startLoc = body->getLocStart();
+ SourceLocation endLoc = body->getLocEnd();
+ assert(*SM->getCharacterData(startLoc) == '{' &&
+ "bogus @finally body location");
+ assert(*SM->getCharacterData(endLoc) == '}' &&
+ "bogus @finally body location");
+
+ startLoc = startLoc.getLocWithOffset(1);
+ InsertText(startLoc, " if (!_rethrow) objc_exception_try_exit(&_stack);\n");
+ endLoc = endLoc.getLocWithOffset(-1);
+ InsertText(endLoc, " if (_rethrow) objc_exception_throw(_rethrow);\n");
+
+ // Set lastCurlyLoc
+ lastCurlyLoc = body->getLocEnd();
+
+ // Now check for any return/continue/go statements within the @try.
+ WarnAboutReturnGotoStmts(S->getTryBody());
+ } else { /* no finally clause - make sure we synthesize an implicit one */
+ buf = "{ /* implicit finally clause */\n";
+ buf += " if (!_rethrow) objc_exception_try_exit(&_stack);\n";
+ buf += " if (_rethrow) objc_exception_throw(_rethrow);\n";
+ buf += "}";
+ ReplaceText(lastCurlyLoc, 1, buf);
+
+ // Now check for any return/continue/go statements within the @try.
+ // The implicit finally clause won't called if the @try contains any
+ // jump statements.
+ bool hasReturns = false;
+ HasReturnStmts(S->getTryBody(), hasReturns);
+ if (hasReturns)
+ RewriteTryReturnStmts(S->getTryBody());
+ }
+ // Now emit the final closing curly brace...
+ lastCurlyLoc = lastCurlyLoc.getLocWithOffset(1);
+ InsertText(lastCurlyLoc, " } /* @try scope end */\n");
+ return nullptr;
+}
+
+// This can't be done with ReplaceStmt(S, ThrowExpr), since
+// the throw expression is typically a message expression that's already
+// been rewritten! (which implies the SourceLocation's are invalid).
+Stmt *RewriteObjC::RewriteObjCThrowStmt(ObjCAtThrowStmt *S) {
+ // Get the start location and compute the semi location.
+ SourceLocation startLoc = S->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+
+ assert((*startBuf == '@') && "bogus @throw location");
+
+ std::string buf;
+ /* void objc_exception_throw(id) __attribute__((noreturn)); */
+ if (S->getThrowExpr())
+ buf = "objc_exception_throw(";
+ else // add an implicit argument
+ buf = "objc_exception_throw(_caught";
+
+ // handle "@ throw" correctly.
+ const char *wBuf = strchr(startBuf, 'w');
+ assert((*wBuf == 'w') && "@throw: can't find 'w'");
+ ReplaceText(startLoc, wBuf-startBuf+1, buf);
+
+ const char *semiBuf = strchr(startBuf, ';');
+ assert((*semiBuf == ';') && "@throw: can't find ';'");
+ SourceLocation semiLoc = startLoc.getLocWithOffset(semiBuf-startBuf);
+ ReplaceText(semiLoc, 1, ");");
+ return nullptr;
+}
+
+Stmt *RewriteObjC::RewriteAtEncode(ObjCEncodeExpr *Exp) {
+ // Create a new string expression.
+ std::string StrEncoding;
+ Context->getObjCEncodingForType(Exp->getEncodedType(), StrEncoding);
+ Expr *Replacement = getStringLiteral(StrEncoding);
+ ReplaceStmt(Exp, Replacement);
+
+ // Replace this subexpr in the parent.
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return Replacement;
+}
+
+Stmt *RewriteObjC::RewriteAtSelector(ObjCSelectorExpr *Exp) {
+ if (!SelGetUidFunctionDecl)
+ SynthSelGetUidFunctionDecl();
+ assert(SelGetUidFunctionDecl && "Can't find sel_registerName() decl");
+ // Create a call to sel_registerName("selName").
+ SmallVector<Expr*, 8> SelExprs;
+ SelExprs.push_back(getStringLiteral(Exp->getSelector().getAsString()));
+ CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
+ SelExprs);
+ ReplaceStmt(Exp, SelExp);
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return SelExp;
+}
+
+CallExpr *
+RewriteObjC::SynthesizeCallToFunctionDecl(FunctionDecl *FD,
+ ArrayRef<Expr *> Args,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ // Get the type, we will need to reference it in a couple spots.
+ QualType msgSendType = FD->getType();
+
+ // Create a reference to the objc_msgSend() declaration.
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, false, msgSendType,
+ VK_LValue, SourceLocation());
+
+ // Now, we cast the reference to a pointer to the objc_msgSend type.
+ QualType pToFunc = Context->getPointerType(msgSendType);
+ ImplicitCastExpr *ICE =
+ ImplicitCastExpr::Create(*Context, pToFunc, CK_FunctionToPointerDecay,
+ DRE, nullptr, VK_RValue);
+
+ const FunctionType *FT = msgSendType->getAs<FunctionType>();
+
+ CallExpr *Exp = new (Context) CallExpr(*Context, ICE, Args,
+ FT->getCallResultType(*Context),
+ VK_RValue, EndLoc);
+ return Exp;
+}
+
+static bool scanForProtocolRefs(const char *startBuf, const char *endBuf,
+ const char *&startRef, const char *&endRef) {
+ while (startBuf < endBuf) {
+ if (*startBuf == '<')
+ startRef = startBuf; // mark the start.
+ if (*startBuf == '>') {
+ if (startRef && *startRef == '<') {
+ endRef = startBuf; // mark the end.
+ return true;
+ }
+ return false;
+ }
+ startBuf++;
+ }
+ return false;
+}
+
+static void scanToNextArgument(const char *&argRef) {
+ int angle = 0;
+ while (*argRef != ')' && (*argRef != ',' || angle > 0)) {
+ if (*argRef == '<')
+ angle++;
+ else if (*argRef == '>')
+ angle--;
+ argRef++;
+ }
+ assert(angle == 0 && "scanToNextArgument - bad protocol type syntax");
+}
+
+bool RewriteObjC::needToScanForQualifiers(QualType T) {
+ if (T->isObjCQualifiedIdType())
+ return true;
+ if (const PointerType *PT = T->getAs<PointerType>()) {
+ if (PT->getPointeeType()->isObjCQualifiedIdType())
+ return true;
+ }
+ if (T->isObjCObjectPointerType()) {
+ T = T->getPointeeType();
+ return T->isObjCQualifiedInterfaceType();
+ }
+ if (T->isArrayType()) {
+ QualType ElemTy = Context->getBaseElementType(T);
+ return needToScanForQualifiers(ElemTy);
+ }
+ return false;
+}
+
+void RewriteObjC::RewriteObjCQualifiedInterfaceTypes(Expr *E) {
+ QualType Type = E->getType();
+ if (needToScanForQualifiers(Type)) {
+ SourceLocation Loc, EndLoc;
+
+ if (const CStyleCastExpr *ECE = dyn_cast<CStyleCastExpr>(E)) {
+ Loc = ECE->getLParenLoc();
+ EndLoc = ECE->getRParenLoc();
+ } else {
+ Loc = E->getLocStart();
+ EndLoc = E->getLocEnd();
+ }
+ // This will defend against trying to rewrite synthesized expressions.
+ if (Loc.isInvalid() || EndLoc.isInvalid())
+ return;
+
+ const char *startBuf = SM->getCharacterData(Loc);
+ const char *endBuf = SM->getCharacterData(EndLoc);
+ const char *startRef = nullptr, *endRef = nullptr;
+ if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) {
+ // Get the locations of the startRef, endRef.
+ SourceLocation LessLoc = Loc.getLocWithOffset(startRef-startBuf);
+ SourceLocation GreaterLoc = Loc.getLocWithOffset(endRef-startBuf+1);
+ // Comment out the protocol references.
+ InsertText(LessLoc, "/*");
+ InsertText(GreaterLoc, "*/");
+ }
+ }
+}
+
+void RewriteObjC::RewriteObjCQualifiedInterfaceTypes(Decl *Dcl) {
+ SourceLocation Loc;
+ QualType Type;
+ const FunctionProtoType *proto = nullptr;
+ if (VarDecl *VD = dyn_cast<VarDecl>(Dcl)) {
+ Loc = VD->getLocation();
+ Type = VD->getType();
+ }
+ else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(Dcl)) {
+ Loc = FD->getLocation();
+ // Check for ObjC 'id' and class types that have been adorned with protocol
+ // information (id<p>, C<p>*). The protocol references need to be rewritten!
+ const FunctionType *funcType = FD->getType()->getAs<FunctionType>();
+ assert(funcType && "missing function type");
+ proto = dyn_cast<FunctionProtoType>(funcType);
+ if (!proto)
+ return;
+ Type = proto->getReturnType();
+ }
+ else if (FieldDecl *FD = dyn_cast<FieldDecl>(Dcl)) {
+ Loc = FD->getLocation();
+ Type = FD->getType();
+ }
+ else
+ return;
+
+ if (needToScanForQualifiers(Type)) {
+ // Since types are unique, we need to scan the buffer.
+
+ const char *endBuf = SM->getCharacterData(Loc);
+ const char *startBuf = endBuf;
+ while (*startBuf != ';' && *startBuf != '<' && startBuf != MainFileStart)
+ startBuf--; // scan backward (from the decl location) for return type.
+ const char *startRef = nullptr, *endRef = nullptr;
+ if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) {
+ // Get the locations of the startRef, endRef.
+ SourceLocation LessLoc = Loc.getLocWithOffset(startRef-endBuf);
+ SourceLocation GreaterLoc = Loc.getLocWithOffset(endRef-endBuf+1);
+ // Comment out the protocol references.
+ InsertText(LessLoc, "/*");
+ InsertText(GreaterLoc, "*/");
+ }
+ }
+ if (!proto)
+ return; // most likely, was a variable
+ // Now check arguments.
+ const char *startBuf = SM->getCharacterData(Loc);
+ const char *startFuncBuf = startBuf;
+ for (unsigned i = 0; i < proto->getNumParams(); i++) {
+ if (needToScanForQualifiers(proto->getParamType(i))) {
+ // Since types are unique, we need to scan the buffer.
+
+ const char *endBuf = startBuf;
+ // scan forward (from the decl location) for argument types.
+ scanToNextArgument(endBuf);
+ const char *startRef = nullptr, *endRef = nullptr;
+ if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) {
+ // Get the locations of the startRef, endRef.
+ SourceLocation LessLoc =
+ Loc.getLocWithOffset(startRef-startFuncBuf);
+ SourceLocation GreaterLoc =
+ Loc.getLocWithOffset(endRef-startFuncBuf+1);
+ // Comment out the protocol references.
+ InsertText(LessLoc, "/*");
+ InsertText(GreaterLoc, "*/");
+ }
+ startBuf = ++endBuf;
+ }
+ else {
+ // If the function name is derived from a macro expansion, then the
+ // argument buffer will not follow the name. Need to speak with Chris.
+ while (*startBuf && *startBuf != ')' && *startBuf != ',')
+ startBuf++; // scan forward (from the decl location) for argument types.
+ startBuf++;
+ }
+ }
+}
+
+void RewriteObjC::RewriteTypeOfDecl(VarDecl *ND) {
+ QualType QT = ND->getType();
+ const Type* TypePtr = QT->getAs<Type>();
+ if (!isa<TypeOfExprType>(TypePtr))
+ return;
+ while (isa<TypeOfExprType>(TypePtr)) {
+ const TypeOfExprType *TypeOfExprTypePtr = cast<TypeOfExprType>(TypePtr);
+ QT = TypeOfExprTypePtr->getUnderlyingExpr()->getType();
+ TypePtr = QT->getAs<Type>();
+ }
+ // FIXME. This will not work for multiple declarators; as in:
+ // __typeof__(a) b,c,d;
+ std::string TypeAsString(QT.getAsString(Context->getPrintingPolicy()));
+ SourceLocation DeclLoc = ND->getTypeSpecStartLoc();
+ const char *startBuf = SM->getCharacterData(DeclLoc);
+ if (ND->getInit()) {
+ std::string Name(ND->getNameAsString());
+ TypeAsString += " " + Name + " = ";
+ Expr *E = ND->getInit();
+ SourceLocation startLoc;
+ if (const CStyleCastExpr *ECE = dyn_cast<CStyleCastExpr>(E))
+ startLoc = ECE->getLParenLoc();
+ else
+ startLoc = E->getLocStart();
+ startLoc = SM->getExpansionLoc(startLoc);
+ const char *endBuf = SM->getCharacterData(startLoc);
+ ReplaceText(DeclLoc, endBuf-startBuf-1, TypeAsString);
+ }
+ else {
+ SourceLocation X = ND->getLocEnd();
+ X = SM->getExpansionLoc(X);
+ const char *endBuf = SM->getCharacterData(X);
+ ReplaceText(DeclLoc, endBuf-startBuf-1, TypeAsString);
+ }
+}
+
+// SynthSelGetUidFunctionDecl - SEL sel_registerName(const char *str);
+void RewriteObjC::SynthSelGetUidFunctionDecl() {
+ IdentifierInfo *SelGetUidIdent = &Context->Idents.get("sel_registerName");
+ SmallVector<QualType, 16> ArgTys;
+ ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst()));
+ QualType getFuncType =
+ getSimpleFunctionType(Context->getObjCSelType(), ArgTys);
+ SelGetUidFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ SelGetUidIdent, getFuncType,
+ nullptr, SC_Extern);
+}
+
+void RewriteObjC::RewriteFunctionDecl(FunctionDecl *FD) {
+ // declared in <objc/objc.h>
+ if (FD->getIdentifier() &&
+ FD->getName() == "sel_registerName") {
+ SelGetUidFunctionDecl = FD;
+ return;
+ }
+ RewriteObjCQualifiedInterfaceTypes(FD);
+}
+
+void RewriteObjC::RewriteBlockPointerType(std::string& Str, QualType Type) {
+ std::string TypeString(Type.getAsString(Context->getPrintingPolicy()));
+ const char *argPtr = TypeString.c_str();
+ if (!strchr(argPtr, '^')) {
+ Str += TypeString;
+ return;
+ }
+ while (*argPtr) {
+ Str += (*argPtr == '^' ? '*' : *argPtr);
+ argPtr++;
+ }
+}
+
+// FIXME. Consolidate this routine with RewriteBlockPointerType.
+void RewriteObjC::RewriteBlockPointerTypeVariable(std::string& Str,
+ ValueDecl *VD) {
+ QualType Type = VD->getType();
+ std::string TypeString(Type.getAsString(Context->getPrintingPolicy()));
+ const char *argPtr = TypeString.c_str();
+ int paren = 0;
+ while (*argPtr) {
+ switch (*argPtr) {
+ case '(':
+ Str += *argPtr;
+ paren++;
+ break;
+ case ')':
+ Str += *argPtr;
+ paren--;
+ break;
+ case '^':
+ Str += '*';
+ if (paren == 1)
+ Str += VD->getNameAsString();
+ break;
+ default:
+ Str += *argPtr;
+ break;
+ }
+ argPtr++;
+ }
+}
+
+
+void RewriteObjC::RewriteBlockLiteralFunctionDecl(FunctionDecl *FD) {
+ SourceLocation FunLocStart = FD->getTypeSpecStartLoc();
+ const FunctionType *funcType = FD->getType()->getAs<FunctionType>();
+ const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(funcType);
+ if (!proto)
+ return;
+ QualType Type = proto->getReturnType();
+ std::string FdStr = Type.getAsString(Context->getPrintingPolicy());
+ FdStr += " ";
+ FdStr += FD->getName();
+ FdStr += "(";
+ unsigned numArgs = proto->getNumParams();
+ for (unsigned i = 0; i < numArgs; i++) {
+ QualType ArgType = proto->getParamType(i);
+ RewriteBlockPointerType(FdStr, ArgType);
+ if (i+1 < numArgs)
+ FdStr += ", ";
+ }
+ FdStr += ");\n";
+ InsertText(FunLocStart, FdStr);
+ CurFunctionDeclToDeclareForBlock = nullptr;
+}
+
+// SynthSuperConstructorFunctionDecl - id objc_super(id obj, id super);
+void RewriteObjC::SynthSuperConstructorFunctionDecl() {
+ if (SuperConstructorFunctionDecl)
+ return;
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("__rw_objc_super");
+ SmallVector<QualType, 16> ArgTys;
+ QualType argT = Context->getObjCIdType();
+ assert(!argT.isNull() && "Can't find 'id' type");
+ ArgTys.push_back(argT);
+ ArgTys.push_back(argT);
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ ArgTys);
+ SuperConstructorFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent, msgSendType,
+ nullptr, SC_Extern);
+}
+
+// SynthMsgSendFunctionDecl - id objc_msgSend(id self, SEL op, ...);
+void RewriteObjC::SynthMsgSendFunctionDecl() {
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend");
+ SmallVector<QualType, 16> ArgTys;
+ QualType argT = Context->getObjCIdType();
+ assert(!argT.isNull() && "Can't find 'id' type");
+ ArgTys.push_back(argT);
+ argT = Context->getObjCSelType();
+ assert(!argT.isNull() && "Can't find 'SEL' type");
+ ArgTys.push_back(argT);
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ ArgTys, /*isVariadic=*/true);
+ MsgSendFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent, msgSendType,
+ nullptr, SC_Extern);
+}
+
+// SynthMsgSendSuperFunctionDecl - id objc_msgSendSuper(struct objc_super *, SEL op, ...);
+void RewriteObjC::SynthMsgSendSuperFunctionDecl() {
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSendSuper");
+ SmallVector<QualType, 16> ArgTys;
+ RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("objc_super"));
+ QualType argT = Context->getPointerType(Context->getTagDeclType(RD));
+ assert(!argT.isNull() && "Can't build 'struct objc_super *' type");
+ ArgTys.push_back(argT);
+ argT = Context->getObjCSelType();
+ assert(!argT.isNull() && "Can't find 'SEL' type");
+ ArgTys.push_back(argT);
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ ArgTys, /*isVariadic=*/true);
+ MsgSendSuperFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent, msgSendType,
+ nullptr, SC_Extern);
+}
+
+// SynthMsgSendStretFunctionDecl - id objc_msgSend_stret(id self, SEL op, ...);
+void RewriteObjC::SynthMsgSendStretFunctionDecl() {
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend_stret");
+ SmallVector<QualType, 16> ArgTys;
+ QualType argT = Context->getObjCIdType();
+ assert(!argT.isNull() && "Can't find 'id' type");
+ ArgTys.push_back(argT);
+ argT = Context->getObjCSelType();
+ assert(!argT.isNull() && "Can't find 'SEL' type");
+ ArgTys.push_back(argT);
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ ArgTys, /*isVariadic=*/true);
+ MsgSendStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent, msgSendType,
+ nullptr, SC_Extern);
+}
+
+// SynthMsgSendSuperStretFunctionDecl -
+// id objc_msgSendSuper_stret(struct objc_super *, SEL op, ...);
+void RewriteObjC::SynthMsgSendSuperStretFunctionDecl() {
+ IdentifierInfo *msgSendIdent =
+ &Context->Idents.get("objc_msgSendSuper_stret");
+ SmallVector<QualType, 16> ArgTys;
+ RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("objc_super"));
+ QualType argT = Context->getPointerType(Context->getTagDeclType(RD));
+ assert(!argT.isNull() && "Can't build 'struct objc_super *' type");
+ ArgTys.push_back(argT);
+ argT = Context->getObjCSelType();
+ assert(!argT.isNull() && "Can't find 'SEL' type");
+ ArgTys.push_back(argT);
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ ArgTys, /*isVariadic=*/true);
+ MsgSendSuperStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent,
+ msgSendType, nullptr,
+ SC_Extern);
+}
+
+// SynthMsgSendFpretFunctionDecl - double objc_msgSend_fpret(id self, SEL op, ...);
+void RewriteObjC::SynthMsgSendFpretFunctionDecl() {
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend_fpret");
+ SmallVector<QualType, 16> ArgTys;
+ QualType argT = Context->getObjCIdType();
+ assert(!argT.isNull() && "Can't find 'id' type");
+ ArgTys.push_back(argT);
+ argT = Context->getObjCSelType();
+ assert(!argT.isNull() && "Can't find 'SEL' type");
+ ArgTys.push_back(argT);
+ QualType msgSendType = getSimpleFunctionType(Context->DoubleTy,
+ ArgTys, /*isVariadic=*/true);
+ MsgSendFpretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent, msgSendType,
+ nullptr, SC_Extern);
+}
+
+// SynthGetClassFunctionDecl - id objc_getClass(const char *name);
+void RewriteObjC::SynthGetClassFunctionDecl() {
+ IdentifierInfo *getClassIdent = &Context->Idents.get("objc_getClass");
+ SmallVector<QualType, 16> ArgTys;
+ ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst()));
+ QualType getClassType = getSimpleFunctionType(Context->getObjCIdType(),
+ ArgTys);
+ GetClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ getClassIdent, getClassType,
+ nullptr, SC_Extern);
+}
+
+// SynthGetSuperClassFunctionDecl - Class class_getSuperclass(Class cls);
+void RewriteObjC::SynthGetSuperClassFunctionDecl() {
+ IdentifierInfo *getSuperClassIdent =
+ &Context->Idents.get("class_getSuperclass");
+ SmallVector<QualType, 16> ArgTys;
+ ArgTys.push_back(Context->getObjCClassType());
+ QualType getClassType = getSimpleFunctionType(Context->getObjCClassType(),
+ ArgTys);
+ GetSuperClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ getSuperClassIdent,
+ getClassType, nullptr,
+ SC_Extern);
+}
+
+// SynthGetMetaClassFunctionDecl - id objc_getMetaClass(const char *name);
+void RewriteObjC::SynthGetMetaClassFunctionDecl() {
+ IdentifierInfo *getClassIdent = &Context->Idents.get("objc_getMetaClass");
+ SmallVector<QualType, 16> ArgTys;
+ ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst()));
+ QualType getClassType = getSimpleFunctionType(Context->getObjCIdType(),
+ ArgTys);
+ GetMetaClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ getClassIdent, getClassType,
+ nullptr, SC_Extern);
+}
+
+Stmt *RewriteObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) {
+ assert(Exp != nullptr && "Expected non-null ObjCStringLiteral");
+ QualType strType = getConstantStringStructType();
+
+ std::string S = "__NSConstantStringImpl_";
+
+ std::string tmpName = InFileName;
+ unsigned i;
+ for (i=0; i < tmpName.length(); i++) {
+ char c = tmpName.at(i);
+ // replace any non-alphanumeric characters with '_'.
+ if (!isAlphanumeric(c))
+ tmpName[i] = '_';
+ }
+ S += tmpName;
+ S += "_";
+ S += utostr(NumObjCStringLiterals++);
+
+ Preamble += "static __NSConstantStringImpl " + S;
+ Preamble += " __attribute__ ((section (\"__DATA, __cfstring\"))) = {__CFConstantStringClassReference,";
+ Preamble += "0x000007c8,"; // utf8_str
+ // The pretty printer for StringLiteral handles escape characters properly.
+ std::string prettyBufS;
+ llvm::raw_string_ostream prettyBuf(prettyBufS);
+ Exp->getString()->printPretty(prettyBuf, nullptr, PrintingPolicy(LangOpts));
+ Preamble += prettyBuf.str();
+ Preamble += ",";
+ Preamble += utostr(Exp->getString()->getByteLength()) + "};\n";
+
+ VarDecl *NewVD = VarDecl::Create(*Context, TUDecl, SourceLocation(),
+ SourceLocation(), &Context->Idents.get(S),
+ strType, nullptr, SC_Static);
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(NewVD, false, strType, VK_LValue,
+ SourceLocation());
+ Expr *Unop = new (Context) UnaryOperator(DRE, UO_AddrOf,
+ Context->getPointerType(DRE->getType()),
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
+ // cast to NSConstantString *
+ CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, Exp->getType(),
+ CK_CPointerToObjCPointerCast, Unop);
+ ReplaceStmt(Exp, cast);
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return cast;
+}
+
+// struct objc_super { struct objc_object *receiver; struct objc_class *super; };
+QualType RewriteObjC::getSuperStructType() {
+ if (!SuperStructDecl) {
+ SuperStructDecl = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("objc_super"));
+ QualType FieldTypes[2];
+
+ // struct objc_object *receiver;
+ FieldTypes[0] = Context->getObjCIdType();
+ // struct objc_class *super;
+ FieldTypes[1] = Context->getObjCClassType();
+
+ // Create fields
+ for (unsigned i = 0; i < 2; ++i) {
+ SuperStructDecl->addDecl(FieldDecl::Create(*Context, SuperStructDecl,
+ SourceLocation(),
+ SourceLocation(), nullptr,
+ FieldTypes[i], nullptr,
+ /*BitWidth=*/nullptr,
+ /*Mutable=*/false,
+ ICIS_NoInit));
+ }
+
+ SuperStructDecl->completeDefinition();
+ }
+ return Context->getTagDeclType(SuperStructDecl);
+}
+
+QualType RewriteObjC::getConstantStringStructType() {
+ if (!ConstantStringDecl) {
+ ConstantStringDecl = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("__NSConstantStringImpl"));
+ QualType FieldTypes[4];
+
+ // struct objc_object *receiver;
+ FieldTypes[0] = Context->getObjCIdType();
+ // int flags;
+ FieldTypes[1] = Context->IntTy;
+ // char *str;
+ FieldTypes[2] = Context->getPointerType(Context->CharTy);
+ // long length;
+ FieldTypes[3] = Context->LongTy;
+
+ // Create fields
+ for (unsigned i = 0; i < 4; ++i) {
+ ConstantStringDecl->addDecl(FieldDecl::Create(*Context,
+ ConstantStringDecl,
+ SourceLocation(),
+ SourceLocation(), nullptr,
+ FieldTypes[i], nullptr,
+ /*BitWidth=*/nullptr,
+ /*Mutable=*/true,
+ ICIS_NoInit));
+ }
+
+ ConstantStringDecl->completeDefinition();
+ }
+ return Context->getTagDeclType(ConstantStringDecl);
+}
+
+CallExpr *RewriteObjC::SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFlavor,
+ QualType msgSendType,
+ QualType returnType,
+ SmallVectorImpl<QualType> &ArgTypes,
+ SmallVectorImpl<Expr*> &MsgExprs,
+ ObjCMethodDecl *Method) {
+ // Create a reference to the objc_msgSend_stret() declaration.
+ DeclRefExpr *STDRE = new (Context) DeclRefExpr(MsgSendStretFlavor,
+ false, msgSendType,
+ VK_LValue, SourceLocation());
+ // Need to cast objc_msgSend_stret to "void *" (see above comment).
+ CastExpr *cast = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(Context->VoidTy),
+ CK_BitCast, STDRE);
+ // Now do the "normal" pointer to function cast.
+ QualType castType = getSimpleFunctionType(returnType, ArgTypes,
+ Method ? Method->isVariadic()
+ : false);
+ castType = Context->getPointerType(castType);
+ cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
+ cast);
+
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), cast);
+
+ const FunctionType *FT = msgSendType->getAs<FunctionType>();
+ CallExpr *STCE = new (Context) CallExpr(
+ *Context, PE, MsgExprs, FT->getReturnType(), VK_RValue, SourceLocation());
+ return STCE;
+
+}
+
+
+Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (!SelGetUidFunctionDecl)
+ SynthSelGetUidFunctionDecl();
+ if (!MsgSendFunctionDecl)
+ SynthMsgSendFunctionDecl();
+ if (!MsgSendSuperFunctionDecl)
+ SynthMsgSendSuperFunctionDecl();
+ if (!MsgSendStretFunctionDecl)
+ SynthMsgSendStretFunctionDecl();
+ if (!MsgSendSuperStretFunctionDecl)
+ SynthMsgSendSuperStretFunctionDecl();
+ if (!MsgSendFpretFunctionDecl)
+ SynthMsgSendFpretFunctionDecl();
+ if (!GetClassFunctionDecl)
+ SynthGetClassFunctionDecl();
+ if (!GetSuperClassFunctionDecl)
+ SynthGetSuperClassFunctionDecl();
+ if (!GetMetaClassFunctionDecl)
+ SynthGetMetaClassFunctionDecl();
+
+ // default to objc_msgSend().
+ FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl;
+ // May need to use objc_msgSend_stret() as well.
+ FunctionDecl *MsgSendStretFlavor = nullptr;
+ if (ObjCMethodDecl *mDecl = Exp->getMethodDecl()) {
+ QualType resultType = mDecl->getReturnType();
+ if (resultType->isRecordType())
+ MsgSendStretFlavor = MsgSendStretFunctionDecl;
+ else if (resultType->isRealFloatingType())
+ MsgSendFlavor = MsgSendFpretFunctionDecl;
+ }
+
+ // Synthesize a call to objc_msgSend().
+ SmallVector<Expr*, 8> MsgExprs;
+ switch (Exp->getReceiverKind()) {
+ case ObjCMessageExpr::SuperClass: {
+ MsgSendFlavor = MsgSendSuperFunctionDecl;
+ if (MsgSendStretFlavor)
+ MsgSendStretFlavor = MsgSendSuperStretFunctionDecl;
+ assert(MsgSendFlavor && "MsgSendFlavor is NULL!");
+
+ ObjCInterfaceDecl *ClassDecl = CurMethodDef->getClassInterface();
+
+ SmallVector<Expr*, 4> InitExprs;
+
+ // set the receiver to self, the first argument to all methods.
+ InitExprs.push_back(
+ NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
+ CK_BitCast,
+ new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(),
+ false,
+ Context->getObjCIdType(),
+ VK_RValue,
+ SourceLocation()))
+ ); // set the 'receiver'.
+
+ // (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
+ SmallVector<Expr*, 8> ClsExprs;
+ ClsExprs.push_back(getStringLiteral(ClassDecl->getIdentifier()->getName()));
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetMetaClassFunctionDecl,
+ ClsExprs, StartLoc, EndLoc);
+ // (Class)objc_getClass("CurrentClass")
+ CastExpr *ArgExpr = NoTypeInfoCStyleCastExpr(Context,
+ Context->getObjCClassType(),
+ CK_BitCast, Cls);
+ ClsExprs.clear();
+ ClsExprs.push_back(ArgExpr);
+ Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl, ClsExprs,
+ StartLoc, EndLoc);
+ // (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
+ // To turn off a warning, type-cast to 'id'
+ InitExprs.push_back( // set 'super class', using class_getSuperclass().
+ NoTypeInfoCStyleCastExpr(Context,
+ Context->getObjCIdType(),
+ CK_BitCast, Cls));
+ // struct objc_super
+ QualType superType = getSuperStructType();
+ Expr *SuperRep;
+
+ if (LangOpts.MicrosoftExt) {
+ SynthSuperConstructorFunctionDecl();
+ // Simulate a constructor call...
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperConstructorFunctionDecl,
+ false, superType, VK_LValue,
+ SourceLocation());
+ SuperRep = new (Context) CallExpr(*Context, DRE, InitExprs,
+ superType, VK_LValue,
+ SourceLocation());
+ // The code for super is a little tricky to prevent collision with
+ // the structure definition in the header. The rewriter has it's own
+ // internal definition (__rw_objc_super) that is uses. This is why
+ // we need the cast below. For example:
+ // (struct objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER"))
+ //
+ SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
+ Context->getPointerType(SuperRep->getType()),
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
+ SuperRep = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(superType),
+ CK_BitCast, SuperRep);
+ } else {
+ // (struct objc_super) { <exprs from above> }
+ InitListExpr *ILE =
+ new (Context) InitListExpr(*Context, SourceLocation(), InitExprs,
+ SourceLocation());
+ TypeSourceInfo *superTInfo
+ = Context->getTrivialTypeSourceInfo(superType);
+ SuperRep = new (Context) CompoundLiteralExpr(SourceLocation(), superTInfo,
+ superType, VK_LValue,
+ ILE, false);
+ // struct objc_super *
+ SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
+ Context->getPointerType(SuperRep->getType()),
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
+ }
+ MsgExprs.push_back(SuperRep);
+ break;
+ }
+
+ case ObjCMessageExpr::Class: {
+ SmallVector<Expr*, 8> ClsExprs;
+ ObjCInterfaceDecl *Class
+ = Exp->getClassReceiver()->getAs<ObjCObjectType>()->getInterface();
+ IdentifierInfo *clsName = Class->getIdentifier();
+ ClsExprs.push_back(getStringLiteral(clsName->getName()));
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl, ClsExprs,
+ StartLoc, EndLoc);
+ MsgExprs.push_back(Cls);
+ break;
+ }
+
+ case ObjCMessageExpr::SuperInstance:{
+ MsgSendFlavor = MsgSendSuperFunctionDecl;
+ if (MsgSendStretFlavor)
+ MsgSendStretFlavor = MsgSendSuperStretFunctionDecl;
+ assert(MsgSendFlavor && "MsgSendFlavor is NULL!");
+ ObjCInterfaceDecl *ClassDecl = CurMethodDef->getClassInterface();
+ SmallVector<Expr*, 4> InitExprs;
+
+ InitExprs.push_back(
+ NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
+ CK_BitCast,
+ new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(),
+ false,
+ Context->getObjCIdType(),
+ VK_RValue, SourceLocation()))
+ ); // set the 'receiver'.
+
+ // (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
+ SmallVector<Expr*, 8> ClsExprs;
+ ClsExprs.push_back(getStringLiteral(ClassDecl->getIdentifier()->getName()));
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl, ClsExprs,
+ StartLoc, EndLoc);
+ // (Class)objc_getClass("CurrentClass")
+ CastExpr *ArgExpr = NoTypeInfoCStyleCastExpr(Context,
+ Context->getObjCClassType(),
+ CK_BitCast, Cls);
+ ClsExprs.clear();
+ ClsExprs.push_back(ArgExpr);
+ Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl, ClsExprs,
+ StartLoc, EndLoc);
+
+ // (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
+ // To turn off a warning, type-cast to 'id'
+ InitExprs.push_back(
+ // set 'super class', using class_getSuperclass().
+ NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
+ CK_BitCast, Cls));
+ // struct objc_super
+ QualType superType = getSuperStructType();
+ Expr *SuperRep;
+
+ if (LangOpts.MicrosoftExt) {
+ SynthSuperConstructorFunctionDecl();
+ // Simulate a constructor call...
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperConstructorFunctionDecl,
+ false, superType, VK_LValue,
+ SourceLocation());
+ SuperRep = new (Context) CallExpr(*Context, DRE, InitExprs,
+ superType, VK_LValue, SourceLocation());
+ // The code for super is a little tricky to prevent collision with
+ // the structure definition in the header. The rewriter has it's own
+ // internal definition (__rw_objc_super) that is uses. This is why
+ // we need the cast below. For example:
+ // (struct objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER"))
+ //
+ SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
+ Context->getPointerType(SuperRep->getType()),
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
+ SuperRep = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(superType),
+ CK_BitCast, SuperRep);
+ } else {
+ // (struct objc_super) { <exprs from above> }
+ InitListExpr *ILE =
+ new (Context) InitListExpr(*Context, SourceLocation(), InitExprs,
+ SourceLocation());
+ TypeSourceInfo *superTInfo
+ = Context->getTrivialTypeSourceInfo(superType);
+ SuperRep = new (Context) CompoundLiteralExpr(SourceLocation(), superTInfo,
+ superType, VK_RValue, ILE,
+ false);
+ }
+ MsgExprs.push_back(SuperRep);
+ break;
+ }
+
+ case ObjCMessageExpr::Instance: {
+ // Remove all type-casts because it may contain objc-style types; e.g.
+ // Foo<Proto> *.
+ Expr *recExpr = Exp->getInstanceReceiver();
+ while (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(recExpr))
+ recExpr = CE->getSubExpr();
+ CastKind CK = recExpr->getType()->isObjCObjectPointerType()
+ ? CK_BitCast : recExpr->getType()->isBlockPointerType()
+ ? CK_BlockPointerToObjCPointerCast
+ : CK_CPointerToObjCPointerCast;
+
+ recExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
+ CK, recExpr);
+ MsgExprs.push_back(recExpr);
+ break;
+ }
+ }
+
+ // Create a call to sel_registerName("selName"), it will be the 2nd argument.
+ SmallVector<Expr*, 8> SelExprs;
+ SelExprs.push_back(getStringLiteral(Exp->getSelector().getAsString()));
+ CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
+ SelExprs, StartLoc, EndLoc);
+ MsgExprs.push_back(SelExp);
+
+ // Now push any user supplied arguments.
+ for (unsigned i = 0; i < Exp->getNumArgs(); i++) {
+ Expr *userExpr = Exp->getArg(i);
+ // Make all implicit casts explicit...ICE comes in handy:-)
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(userExpr)) {
+ // Reuse the ICE type, it is exactly what the doctor ordered.
+ QualType type = ICE->getType();
+ if (needToScanForQualifiers(type))
+ type = Context->getObjCIdType();
+ // Make sure we convert "type (^)(...)" to "type (*)(...)".
+ (void)convertBlockPointerToFunctionPointer(type);
+ const Expr *SubExpr = ICE->IgnoreParenImpCasts();
+ CastKind CK;
+ if (SubExpr->getType()->isIntegralType(*Context) &&
+ type->isBooleanType()) {
+ CK = CK_IntegralToBoolean;
+ } else if (type->isObjCObjectPointerType()) {
+ if (SubExpr->getType()->isBlockPointerType()) {
+ CK = CK_BlockPointerToObjCPointerCast;
+ } else if (SubExpr->getType()->isPointerType()) {
+ CK = CK_CPointerToObjCPointerCast;
+ } else {
+ CK = CK_BitCast;
+ }
+ } else {
+ CK = CK_BitCast;
+ }
+
+ userExpr = NoTypeInfoCStyleCastExpr(Context, type, CK, userExpr);
+ }
+ // Make id<P...> cast into an 'id' cast.
+ else if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(userExpr)) {
+ if (CE->getType()->isObjCQualifiedIdType()) {
+ while ((CE = dyn_cast<CStyleCastExpr>(userExpr)))
+ userExpr = CE->getSubExpr();
+ CastKind CK;
+ if (userExpr->getType()->isIntegralType(*Context)) {
+ CK = CK_IntegralToPointer;
+ } else if (userExpr->getType()->isBlockPointerType()) {
+ CK = CK_BlockPointerToObjCPointerCast;
+ } else if (userExpr->getType()->isPointerType()) {
+ CK = CK_CPointerToObjCPointerCast;
+ } else {
+ CK = CK_BitCast;
+ }
+ userExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
+ CK, userExpr);
+ }
+ }
+ MsgExprs.push_back(userExpr);
+ // We've transferred the ownership to MsgExprs. For now, we *don't* null
+ // out the argument in the original expression (since we aren't deleting
+ // the ObjCMessageExpr). See RewritePropertyOrImplicitSetter() usage for more info.
+ //Exp->setArg(i, 0);
+ }
+ // Generate the funky cast.
+ CastExpr *cast;
+ SmallVector<QualType, 8> ArgTypes;
+ QualType returnType;
+
+ // Push 'id' and 'SEL', the 2 implicit arguments.
+ if (MsgSendFlavor == MsgSendSuperFunctionDecl)
+ ArgTypes.push_back(Context->getPointerType(getSuperStructType()));
+ else
+ ArgTypes.push_back(Context->getObjCIdType());
+ ArgTypes.push_back(Context->getObjCSelType());
+ if (ObjCMethodDecl *OMD = Exp->getMethodDecl()) {
+ // Push any user argument types.
+ for (const auto *PI : OMD->params()) {
+ QualType t = PI->getType()->isObjCQualifiedIdType()
+ ? Context->getObjCIdType()
+ : PI->getType();
+ // Make sure we convert "t (^)(...)" to "t (*)(...)".
+ (void)convertBlockPointerToFunctionPointer(t);
+ ArgTypes.push_back(t);
+ }
+ returnType = Exp->getType();
+ convertToUnqualifiedObjCType(returnType);
+ (void)convertBlockPointerToFunctionPointer(returnType);
+ } else {
+ returnType = Context->getObjCIdType();
+ }
+ // Get the type, we will need to reference it in a couple spots.
+ QualType msgSendType = MsgSendFlavor->getType();
+
+ // Create a reference to the objc_msgSend() declaration.
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType,
+ VK_LValue, SourceLocation());
+
+ // Need to cast objc_msgSend to "void *" (to workaround a GCC bandaid).
+ // If we don't do this cast, we get the following bizarre warning/note:
+ // xx.m:13: warning: function called through a non-compatible type
+ // xx.m:13: note: if this code is reached, the program will abort
+ cast = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(Context->VoidTy),
+ CK_BitCast, DRE);
+
+ // Now do the "normal" pointer to function cast.
+ // If we don't have a method decl, force a variadic cast.
+ const ObjCMethodDecl *MD = Exp->getMethodDecl();
+ QualType castType =
+ getSimpleFunctionType(returnType, ArgTypes, MD ? MD->isVariadic() : true);
+ castType = Context->getPointerType(castType);
+ cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
+ cast);
+
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
+
+ const FunctionType *FT = msgSendType->getAs<FunctionType>();
+ CallExpr *CE = new (Context)
+ CallExpr(*Context, PE, MsgExprs, FT->getReturnType(), VK_RValue, EndLoc);
+ Stmt *ReplacingStmt = CE;
+ if (MsgSendStretFlavor) {
+ // We have the method which returns a struct/union. Must also generate
+ // call to objc_msgSend_stret and hang both varieties on a conditional
+ // expression which dictate which one to envoke depending on size of
+ // method's return type.
+
+ CallExpr *STCE = SynthMsgSendStretCallExpr(MsgSendStretFlavor,
+ msgSendType, returnType,
+ ArgTypes, MsgExprs,
+ Exp->getMethodDecl());
+
+ // Build sizeof(returnType)
+ UnaryExprOrTypeTraitExpr *sizeofExpr =
+ new (Context) UnaryExprOrTypeTraitExpr(UETT_SizeOf,
+ Context->getTrivialTypeSourceInfo(returnType),
+ Context->getSizeType(), SourceLocation(),
+ SourceLocation());
+ // (sizeof(returnType) <= 8 ? objc_msgSend(...) : objc_msgSend_stret(...))
+ // FIXME: Value of 8 is base on ppc32/x86 ABI for the most common cases.
+ // For X86 it is more complicated and some kind of target specific routine
+ // is needed to decide what to do.
+ unsigned IntSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->IntTy));
+ IntegerLiteral *limit = IntegerLiteral::Create(*Context,
+ llvm::APInt(IntSize, 8),
+ Context->IntTy,
+ SourceLocation());
+ BinaryOperator *lessThanExpr =
+ new (Context) BinaryOperator(sizeofExpr, limit, BO_LE, Context->IntTy,
+ VK_RValue, OK_Ordinary, SourceLocation(),
+ false);
+ // (sizeof(returnType) <= 8 ? objc_msgSend(...) : objc_msgSend_stret(...))
+ ConditionalOperator *CondExpr =
+ new (Context) ConditionalOperator(lessThanExpr,
+ SourceLocation(), CE,
+ SourceLocation(), STCE,
+ returnType, VK_RValue, OK_Ordinary);
+ ReplacingStmt = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
+ CondExpr);
+ }
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return ReplacingStmt;
+}
+
+Stmt *RewriteObjC::RewriteMessageExpr(ObjCMessageExpr *Exp) {
+ Stmt *ReplacingStmt = SynthMessageExpr(Exp, Exp->getLocStart(),
+ Exp->getLocEnd());
+
+ // Now do the actual rewrite.
+ ReplaceStmt(Exp, ReplacingStmt);
+
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return ReplacingStmt;
+}
+
+// typedef struct objc_object Protocol;
+QualType RewriteObjC::getProtocolType() {
+ if (!ProtocolTypeDecl) {
+ TypeSourceInfo *TInfo
+ = Context->getTrivialTypeSourceInfo(Context->getObjCIdType());
+ ProtocolTypeDecl = TypedefDecl::Create(*Context, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("Protocol"),
+ TInfo);
+ }
+ return Context->getTypeDeclType(ProtocolTypeDecl);
+}
+
+/// RewriteObjCProtocolExpr - Rewrite a protocol expression into
+/// a synthesized/forward data reference (to the protocol's metadata).
+/// The forward references (and metadata) are generated in
+/// RewriteObjC::HandleTranslationUnit().
+Stmt *RewriteObjC::RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp) {
+ std::string Name = "_OBJC_PROTOCOL_" + Exp->getProtocol()->getNameAsString();
+ IdentifierInfo *ID = &Context->Idents.get(Name);
+ VarDecl *VD = VarDecl::Create(*Context, TUDecl, SourceLocation(),
+ SourceLocation(), ID, getProtocolType(),
+ nullptr, SC_Extern);
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(VD, false, getProtocolType(),
+ VK_LValue, SourceLocation());
+ Expr *DerefExpr = new (Context) UnaryOperator(DRE, UO_AddrOf,
+ Context->getPointerType(DRE->getType()),
+ VK_RValue, OK_Ordinary, SourceLocation());
+ CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, DerefExpr->getType(),
+ CK_BitCast,
+ DerefExpr);
+ ReplaceStmt(Exp, castExpr);
+ ProtocolExprDecls.insert(Exp->getProtocol()->getCanonicalDecl());
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return castExpr;
+
+}
+
+bool RewriteObjC::BufferContainsPPDirectives(const char *startBuf,
+ const char *endBuf) {
+ while (startBuf < endBuf) {
+ if (*startBuf == '#') {
+ // Skip whitespace.
+ for (++startBuf; startBuf[0] == ' ' || startBuf[0] == '\t'; ++startBuf)
+ ;
+ if (!strncmp(startBuf, "if", strlen("if")) ||
+ !strncmp(startBuf, "ifdef", strlen("ifdef")) ||
+ !strncmp(startBuf, "ifndef", strlen("ifndef")) ||
+ !strncmp(startBuf, "define", strlen("define")) ||
+ !strncmp(startBuf, "undef", strlen("undef")) ||
+ !strncmp(startBuf, "else", strlen("else")) ||
+ !strncmp(startBuf, "elif", strlen("elif")) ||
+ !strncmp(startBuf, "endif", strlen("endif")) ||
+ !strncmp(startBuf, "pragma", strlen("pragma")) ||
+ !strncmp(startBuf, "include", strlen("include")) ||
+ !strncmp(startBuf, "import", strlen("import")) ||
+ !strncmp(startBuf, "include_next", strlen("include_next")))
+ return true;
+ }
+ startBuf++;
+ }
+ return false;
+}
+
+/// RewriteObjCInternalStruct - Rewrite one internal struct corresponding to
+/// an objective-c class with ivars.
+void RewriteObjC::RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl,
+ std::string &Result) {
+ assert(CDecl && "Class missing in SynthesizeObjCInternalStruct");
+ assert(CDecl->getName() != "" &&
+ "Name missing in SynthesizeObjCInternalStruct");
+ // Do not synthesize more than once.
+ if (ObjCSynthesizedStructs.count(CDecl))
+ return;
+ ObjCInterfaceDecl *RCDecl = CDecl->getSuperClass();
+ int NumIvars = CDecl->ivar_size();
+ SourceLocation LocStart = CDecl->getLocStart();
+ SourceLocation LocEnd = CDecl->getEndOfDefinitionLoc();
+
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+
+ // If no ivars and no root or if its root, directly or indirectly,
+ // have no ivars (thus not synthesized) then no need to synthesize this class.
+ if ((!CDecl->isThisDeclarationADefinition() || NumIvars == 0) &&
+ (!RCDecl || !ObjCSynthesizedStructs.count(RCDecl))) {
+ endBuf += Lexer::MeasureTokenLength(LocEnd, *SM, LangOpts);
+ ReplaceText(LocStart, endBuf-startBuf, Result);
+ return;
+ }
+
+ // FIXME: This has potential of causing problem. If
+ // SynthesizeObjCInternalStruct is ever called recursively.
+ Result += "\nstruct ";
+ Result += CDecl->getNameAsString();
+ if (LangOpts.MicrosoftExt)
+ Result += "_IMPL";
+
+ if (NumIvars > 0) {
+ const char *cursor = strchr(startBuf, '{');
+ assert((cursor && endBuf)
+ && "SynthesizeObjCInternalStruct - malformed @interface");
+ // If the buffer contains preprocessor directives, we do more fine-grained
+ // rewrites. This is intended to fix code that looks like (which occurs in
+ // NSURL.h, for example):
+ //
+ // #ifdef XYZ
+ // @interface Foo : NSObject
+ // #else
+ // @interface FooBar : NSObject
+ // #endif
+ // {
+ // int i;
+ // }
+ // @end
+ //
+ // This clause is segregated to avoid breaking the common case.
+ if (BufferContainsPPDirectives(startBuf, cursor)) {
+ SourceLocation L = RCDecl ? CDecl->getSuperClassLoc() :
+ CDecl->getAtStartLoc();
+ const char *endHeader = SM->getCharacterData(L);
+ endHeader += Lexer::MeasureTokenLength(L, *SM, LangOpts);
+
+ if (CDecl->protocol_begin() != CDecl->protocol_end()) {
+ // advance to the end of the referenced protocols.
+ while (endHeader < cursor && *endHeader != '>') endHeader++;
+ endHeader++;
+ }
+ // rewrite the original header
+ ReplaceText(LocStart, endHeader-startBuf, Result);
+ } else {
+ // rewrite the original header *without* disturbing the '{'
+ ReplaceText(LocStart, cursor-startBuf, Result);
+ }
+ if (RCDecl && ObjCSynthesizedStructs.count(RCDecl)) {
+ Result = "\n struct ";
+ Result += RCDecl->getNameAsString();
+ Result += "_IMPL ";
+ Result += RCDecl->getNameAsString();
+ Result += "_IVARS;\n";
+
+ // insert the super class structure definition.
+ SourceLocation OnePastCurly =
+ LocStart.getLocWithOffset(cursor-startBuf+1);
+ InsertText(OnePastCurly, Result);
+ }
+ cursor++; // past '{'
+
+ // Now comment out any visibility specifiers.
+ while (cursor < endBuf) {
+ if (*cursor == '@') {
+ SourceLocation atLoc = LocStart.getLocWithOffset(cursor-startBuf);
+ // Skip whitespace.
+ for (++cursor; cursor[0] == ' ' || cursor[0] == '\t'; ++cursor)
+ /*scan*/;
+
+ // FIXME: presence of @public, etc. inside comment results in
+ // this transformation as well, which is still correct c-code.
+ if (!strncmp(cursor, "public", strlen("public")) ||
+ !strncmp(cursor, "private", strlen("private")) ||
+ !strncmp(cursor, "package", strlen("package")) ||
+ !strncmp(cursor, "protected", strlen("protected")))
+ InsertText(atLoc, "// ");
+ }
+ // FIXME: If there are cases where '<' is used in ivar declaration part
+ // of user code, then scan the ivar list and use needToScanForQualifiers
+ // for type checking.
+ else if (*cursor == '<') {
+ SourceLocation atLoc = LocStart.getLocWithOffset(cursor-startBuf);
+ InsertText(atLoc, "/* ");
+ cursor = strchr(cursor, '>');
+ cursor++;
+ atLoc = LocStart.getLocWithOffset(cursor-startBuf);
+ InsertText(atLoc, " */");
+ } else if (*cursor == '^') { // rewrite block specifier.
+ SourceLocation caretLoc = LocStart.getLocWithOffset(cursor-startBuf);
+ ReplaceText(caretLoc, 1, "*");
+ }
+ cursor++;
+ }
+ // Don't forget to add a ';'!!
+ InsertText(LocEnd.getLocWithOffset(1), ";");
+ } else { // we don't have any instance variables - insert super struct.
+ endBuf += Lexer::MeasureTokenLength(LocEnd, *SM, LangOpts);
+ Result += " {\n struct ";
+ Result += RCDecl->getNameAsString();
+ Result += "_IMPL ";
+ Result += RCDecl->getNameAsString();
+ Result += "_IVARS;\n};\n";
+ ReplaceText(LocStart, endBuf-startBuf, Result);
+ }
+ // Mark this struct as having been generated.
+ if (!ObjCSynthesizedStructs.insert(CDecl).second)
+ llvm_unreachable("struct already synthesize- SynthesizeObjCInternalStruct");
+}
+
+//===----------------------------------------------------------------------===//
+// Meta Data Emission
+//===----------------------------------------------------------------------===//
+
+
+/// RewriteImplementations - This routine rewrites all method implementations
+/// and emits meta-data.
+
+void RewriteObjC::RewriteImplementations() {
+ int ClsDefCount = ClassImplementation.size();
+ int CatDefCount = CategoryImplementation.size();
+
+ // Rewrite implemented methods
+ for (int i = 0; i < ClsDefCount; i++)
+ RewriteImplementationDecl(ClassImplementation[i]);
+
+ for (int i = 0; i < CatDefCount; i++)
+ RewriteImplementationDecl(CategoryImplementation[i]);
+}
+
+void RewriteObjC::RewriteByRefString(std::string &ResultStr,
+ const std::string &Name,
+ ValueDecl *VD, bool def) {
+ assert(BlockByRefDeclNo.count(VD) &&
+ "RewriteByRefString: ByRef decl missing");
+ if (def)
+ ResultStr += "struct ";
+ ResultStr += "__Block_byref_" + Name +
+ "_" + utostr(BlockByRefDeclNo[VD]) ;
+}
+
+static bool HasLocalVariableExternalStorage(ValueDecl *VD) {
+ if (VarDecl *Var = dyn_cast<VarDecl>(VD))
+ return (Var->isFunctionOrMethodVarDecl() && !Var->hasLocalStorage());
+ return false;
+}
+
+std::string RewriteObjC::SynthesizeBlockFunc(BlockExpr *CE, int i,
+ StringRef funcName,
+ std::string Tag) {
+ const FunctionType *AFT = CE->getFunctionType();
+ QualType RT = AFT->getReturnType();
+ std::string StructRef = "struct " + Tag;
+ std::string S = "static " + RT.getAsString(Context->getPrintingPolicy()) + " __" +
+ funcName.str() + "_" + "block_func_" + utostr(i);
+
+ BlockDecl *BD = CE->getBlockDecl();
+
+ if (isa<FunctionNoProtoType>(AFT)) {
+ // No user-supplied arguments. Still need to pass in a pointer to the
+ // block (to reference imported block decl refs).
+ S += "(" + StructRef + " *__cself)";
+ } else if (BD->param_empty()) {
+ S += "(" + StructRef + " *__cself)";
+ } else {
+ const FunctionProtoType *FT = cast<FunctionProtoType>(AFT);
+ assert(FT && "SynthesizeBlockFunc: No function proto");
+ S += '(';
+ // first add the implicit argument.
+ S += StructRef + " *__cself, ";
+ std::string ParamStr;
+ for (BlockDecl::param_iterator AI = BD->param_begin(),
+ E = BD->param_end(); AI != E; ++AI) {
+ if (AI != BD->param_begin()) S += ", ";
+ ParamStr = (*AI)->getNameAsString();
+ QualType QT = (*AI)->getType();
+ (void)convertBlockPointerToFunctionPointer(QT);
+ QT.getAsStringInternal(ParamStr, Context->getPrintingPolicy());
+ S += ParamStr;
+ }
+ if (FT->isVariadic()) {
+ if (!BD->param_empty()) S += ", ";
+ S += "...";
+ }
+ S += ')';
+ }
+ S += " {\n";
+
+ // Create local declarations to avoid rewriting all closure decl ref exprs.
+ // First, emit a declaration for all "by ref" decls.
+ for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByRefDecls.begin(),
+ E = BlockByRefDecls.end(); I != E; ++I) {
+ S += " ";
+ std::string Name = (*I)->getNameAsString();
+ std::string TypeString;
+ RewriteByRefString(TypeString, Name, (*I));
+ TypeString += " *";
+ Name = TypeString + Name;
+ S += Name + " = __cself->" + (*I)->getNameAsString() + "; // bound by ref\n";
+ }
+ // Next, emit a declaration for all "by copy" declarations.
+ for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByCopyDecls.begin(),
+ E = BlockByCopyDecls.end(); I != E; ++I) {
+ S += " ";
+ // Handle nested closure invocation. For example:
+ //
+ // void (^myImportedClosure)(void);
+ // myImportedClosure = ^(void) { setGlobalInt(x + y); };
+ //
+ // void (^anotherClosure)(void);
+ // anotherClosure = ^(void) {
+ // myImportedClosure(); // import and invoke the closure
+ // };
+ //
+ if (isTopLevelBlockPointerType((*I)->getType())) {
+ RewriteBlockPointerTypeVariable(S, (*I));
+ S += " = (";
+ RewriteBlockPointerType(S, (*I)->getType());
+ S += ")";
+ S += "__cself->" + (*I)->getNameAsString() + "; // bound by copy\n";
+ }
+ else {
+ std::string Name = (*I)->getNameAsString();
+ QualType QT = (*I)->getType();
+ if (HasLocalVariableExternalStorage(*I))
+ QT = Context->getPointerType(QT);
+ QT.getAsStringInternal(Name, Context->getPrintingPolicy());
+ S += Name + " = __cself->" +
+ (*I)->getNameAsString() + "; // bound by copy\n";
+ }
+ }
+ std::string RewrittenStr = RewrittenBlockExprs[CE];
+ const char *cstr = RewrittenStr.c_str();
+ while (*cstr++ != '{') ;
+ S += cstr;
+ S += "\n";
+ return S;
+}
+
+std::string RewriteObjC::SynthesizeBlockHelperFuncs(BlockExpr *CE, int i,
+ StringRef funcName,
+ std::string Tag) {
+ std::string StructRef = "struct " + Tag;
+ std::string S = "static void __";
+
+ S += funcName;
+ S += "_block_copy_" + utostr(i);
+ S += "(" + StructRef;
+ S += "*dst, " + StructRef;
+ S += "*src) {";
+ for (ValueDecl *VD : ImportedBlockDecls) {
+ S += "_Block_object_assign((void*)&dst->";
+ S += VD->getNameAsString();
+ S += ", (void*)src->";
+ S += VD->getNameAsString();
+ if (BlockByRefDeclsPtrSet.count(VD))
+ S += ", " + utostr(BLOCK_FIELD_IS_BYREF) + "/*BLOCK_FIELD_IS_BYREF*/);";
+ else if (VD->getType()->isBlockPointerType())
+ S += ", " + utostr(BLOCK_FIELD_IS_BLOCK) + "/*BLOCK_FIELD_IS_BLOCK*/);";
+ else
+ S += ", " + utostr(BLOCK_FIELD_IS_OBJECT) + "/*BLOCK_FIELD_IS_OBJECT*/);";
+ }
+ S += "}\n";
+
+ S += "\nstatic void __";
+ S += funcName;
+ S += "_block_dispose_" + utostr(i);
+ S += "(" + StructRef;
+ S += "*src) {";
+ for (ValueDecl *VD : ImportedBlockDecls) {
+ S += "_Block_object_dispose((void*)src->";
+ S += VD->getNameAsString();
+ if (BlockByRefDeclsPtrSet.count(VD))
+ S += ", " + utostr(BLOCK_FIELD_IS_BYREF) + "/*BLOCK_FIELD_IS_BYREF*/);";
+ else if (VD->getType()->isBlockPointerType())
+ S += ", " + utostr(BLOCK_FIELD_IS_BLOCK) + "/*BLOCK_FIELD_IS_BLOCK*/);";
+ else
+ S += ", " + utostr(BLOCK_FIELD_IS_OBJECT) + "/*BLOCK_FIELD_IS_OBJECT*/);";
+ }
+ S += "}\n";
+ return S;
+}
+
+std::string RewriteObjC::SynthesizeBlockImpl(BlockExpr *CE, std::string Tag,
+ std::string Desc) {
+ std::string S = "\nstruct " + Tag;
+ std::string Constructor = " " + Tag;
+
+ S += " {\n struct __block_impl impl;\n";
+ S += " struct " + Desc;
+ S += "* Desc;\n";
+
+ Constructor += "(void *fp, "; // Invoke function pointer.
+ Constructor += "struct " + Desc; // Descriptor pointer.
+ Constructor += " *desc";
+
+ if (BlockDeclRefs.size()) {
+ // Output all "by copy" declarations.
+ for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByCopyDecls.begin(),
+ E = BlockByCopyDecls.end(); I != E; ++I) {
+ S += " ";
+ std::string FieldName = (*I)->getNameAsString();
+ std::string ArgName = "_" + FieldName;
+ // Handle nested closure invocation. For example:
+ //
+ // void (^myImportedBlock)(void);
+ // myImportedBlock = ^(void) { setGlobalInt(x + y); };
+ //
+ // void (^anotherBlock)(void);
+ // anotherBlock = ^(void) {
+ // myImportedBlock(); // import and invoke the closure
+ // };
+ //
+ if (isTopLevelBlockPointerType((*I)->getType())) {
+ S += "struct __block_impl *";
+ Constructor += ", void *" + ArgName;
+ } else {
+ QualType QT = (*I)->getType();
+ if (HasLocalVariableExternalStorage(*I))
+ QT = Context->getPointerType(QT);
+ QT.getAsStringInternal(FieldName, Context->getPrintingPolicy());
+ QT.getAsStringInternal(ArgName, Context->getPrintingPolicy());
+ Constructor += ", " + ArgName;
+ }
+ S += FieldName + ";\n";
+ }
+ // Output all "by ref" declarations.
+ for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByRefDecls.begin(),
+ E = BlockByRefDecls.end(); I != E; ++I) {
+ S += " ";
+ std::string FieldName = (*I)->getNameAsString();
+ std::string ArgName = "_" + FieldName;
+ {
+ std::string TypeString;
+ RewriteByRefString(TypeString, FieldName, (*I));
+ TypeString += " *";
+ FieldName = TypeString + FieldName;
+ ArgName = TypeString + ArgName;
+ Constructor += ", " + ArgName;
+ }
+ S += FieldName + "; // by ref\n";
+ }
+ // Finish writing the constructor.
+ Constructor += ", int flags=0)";
+ // Initialize all "by copy" arguments.
+ bool firsTime = true;
+ for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByCopyDecls.begin(),
+ E = BlockByCopyDecls.end(); I != E; ++I) {
+ std::string Name = (*I)->getNameAsString();
+ if (firsTime) {
+ Constructor += " : ";
+ firsTime = false;
+ }
+ else
+ Constructor += ", ";
+ if (isTopLevelBlockPointerType((*I)->getType()))
+ Constructor += Name + "((struct __block_impl *)_" + Name + ")";
+ else
+ Constructor += Name + "(_" + Name + ")";
+ }
+ // Initialize all "by ref" arguments.
+ for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByRefDecls.begin(),
+ E = BlockByRefDecls.end(); I != E; ++I) {
+ std::string Name = (*I)->getNameAsString();
+ if (firsTime) {
+ Constructor += " : ";
+ firsTime = false;
+ }
+ else
+ Constructor += ", ";
+ Constructor += Name + "(_" + Name + "->__forwarding)";
+ }
+
+ Constructor += " {\n";
+ if (GlobalVarDecl)
+ Constructor += " impl.isa = &_NSConcreteGlobalBlock;\n";
+ else
+ Constructor += " impl.isa = &_NSConcreteStackBlock;\n";
+ Constructor += " impl.Flags = flags;\n impl.FuncPtr = fp;\n";
+
+ Constructor += " Desc = desc;\n";
+ } else {
+ // Finish writing the constructor.
+ Constructor += ", int flags=0) {\n";
+ if (GlobalVarDecl)
+ Constructor += " impl.isa = &_NSConcreteGlobalBlock;\n";
+ else
+ Constructor += " impl.isa = &_NSConcreteStackBlock;\n";
+ Constructor += " impl.Flags = flags;\n impl.FuncPtr = fp;\n";
+ Constructor += " Desc = desc;\n";
+ }
+ Constructor += " ";
+ Constructor += "}\n";
+ S += Constructor;
+ S += "};\n";
+ return S;
+}
+
+std::string RewriteObjC::SynthesizeBlockDescriptor(std::string DescTag,
+ std::string ImplTag, int i,
+ StringRef FunName,
+ unsigned hasCopy) {
+ std::string S = "\nstatic struct " + DescTag;
+
+ S += " {\n unsigned long reserved;\n";
+ S += " unsigned long Block_size;\n";
+ if (hasCopy) {
+ S += " void (*copy)(struct ";
+ S += ImplTag; S += "*, struct ";
+ S += ImplTag; S += "*);\n";
+
+ S += " void (*dispose)(struct ";
+ S += ImplTag; S += "*);\n";
+ }
+ S += "} ";
+
+ S += DescTag + "_DATA = { 0, sizeof(struct ";
+ S += ImplTag + ")";
+ if (hasCopy) {
+ S += ", __" + FunName.str() + "_block_copy_" + utostr(i);
+ S += ", __" + FunName.str() + "_block_dispose_" + utostr(i);
+ }
+ S += "};\n";
+ return S;
+}
+
+void RewriteObjC::SynthesizeBlockLiterals(SourceLocation FunLocStart,
+ StringRef FunName) {
+ // Insert declaration for the function in which block literal is used.
+ if (CurFunctionDeclToDeclareForBlock && !Blocks.empty())
+ RewriteBlockLiteralFunctionDecl(CurFunctionDeclToDeclareForBlock);
+ bool RewriteSC = (GlobalVarDecl &&
+ !Blocks.empty() &&
+ GlobalVarDecl->getStorageClass() == SC_Static &&
+ GlobalVarDecl->getType().getCVRQualifiers());
+ if (RewriteSC) {
+ std::string SC(" void __");
+ SC += GlobalVarDecl->getNameAsString();
+ SC += "() {}";
+ InsertText(FunLocStart, SC);
+ }
+
+ // Insert closures that were part of the function.
+ for (unsigned i = 0, count=0; i < Blocks.size(); i++) {
+ CollectBlockDeclRefInfo(Blocks[i]);
+ // Need to copy-in the inner copied-in variables not actually used in this
+ // block.
+ for (int j = 0; j < InnerDeclRefsCount[i]; j++) {
+ DeclRefExpr *Exp = InnerDeclRefs[count++];
+ ValueDecl *VD = Exp->getDecl();
+ BlockDeclRefs.push_back(Exp);
+ if (!VD->hasAttr<BlocksAttr>() && !BlockByCopyDeclsPtrSet.count(VD)) {
+ BlockByCopyDeclsPtrSet.insert(VD);
+ BlockByCopyDecls.push_back(VD);
+ }
+ if (VD->hasAttr<BlocksAttr>() && !BlockByRefDeclsPtrSet.count(VD)) {
+ BlockByRefDeclsPtrSet.insert(VD);
+ BlockByRefDecls.push_back(VD);
+ }
+ // imported objects in the inner blocks not used in the outer
+ // blocks must be copied/disposed in the outer block as well.
+ if (VD->hasAttr<BlocksAttr>() ||
+ VD->getType()->isObjCObjectPointerType() ||
+ VD->getType()->isBlockPointerType())
+ ImportedBlockDecls.insert(VD);
+ }
+
+ std::string ImplTag = "__" + FunName.str() + "_block_impl_" + utostr(i);
+ std::string DescTag = "__" + FunName.str() + "_block_desc_" + utostr(i);
+
+ std::string CI = SynthesizeBlockImpl(Blocks[i], ImplTag, DescTag);
+
+ InsertText(FunLocStart, CI);
+
+ std::string CF = SynthesizeBlockFunc(Blocks[i], i, FunName, ImplTag);
+
+ InsertText(FunLocStart, CF);
+
+ if (ImportedBlockDecls.size()) {
+ std::string HF = SynthesizeBlockHelperFuncs(Blocks[i], i, FunName, ImplTag);
+ InsertText(FunLocStart, HF);
+ }
+ std::string BD = SynthesizeBlockDescriptor(DescTag, ImplTag, i, FunName,
+ ImportedBlockDecls.size() > 0);
+ InsertText(FunLocStart, BD);
+
+ BlockDeclRefs.clear();
+ BlockByRefDecls.clear();
+ BlockByRefDeclsPtrSet.clear();
+ BlockByCopyDecls.clear();
+ BlockByCopyDeclsPtrSet.clear();
+ ImportedBlockDecls.clear();
+ }
+ if (RewriteSC) {
+ // Must insert any 'const/volatile/static here. Since it has been
+ // removed as result of rewriting of block literals.
+ std::string SC;
+ if (GlobalVarDecl->getStorageClass() == SC_Static)
+ SC = "static ";
+ if (GlobalVarDecl->getType().isConstQualified())
+ SC += "const ";
+ if (GlobalVarDecl->getType().isVolatileQualified())
+ SC += "volatile ";
+ if (GlobalVarDecl->getType().isRestrictQualified())
+ SC += "restrict ";
+ InsertText(FunLocStart, SC);
+ }
+
+ Blocks.clear();
+ InnerDeclRefsCount.clear();
+ InnerDeclRefs.clear();
+ RewrittenBlockExprs.clear();
+}
+
+void RewriteObjC::InsertBlockLiteralsWithinFunction(FunctionDecl *FD) {
+ SourceLocation FunLocStart = FD->getTypeSpecStartLoc();
+ StringRef FuncName = FD->getName();
+
+ SynthesizeBlockLiterals(FunLocStart, FuncName);
+}
+
+static void BuildUniqueMethodName(std::string &Name,
+ ObjCMethodDecl *MD) {
+ ObjCInterfaceDecl *IFace = MD->getClassInterface();
+ Name = IFace->getName();
+ Name += "__" + MD->getSelector().getAsString();
+ // Convert colons to underscores.
+ std::string::size_type loc = 0;
+ while ((loc = Name.find(":", loc)) != std::string::npos)
+ Name.replace(loc, 1, "_");
+}
+
+void RewriteObjC::InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD) {
+ //fprintf(stderr,"In InsertBlockLiteralsWitinMethod\n");
+ //SourceLocation FunLocStart = MD->getLocStart();
+ SourceLocation FunLocStart = MD->getLocStart();
+ std::string FuncName;
+ BuildUniqueMethodName(FuncName, MD);
+ SynthesizeBlockLiterals(FunLocStart, FuncName);
+}
+
+void RewriteObjC::GetBlockDeclRefExprs(Stmt *S) {
+ for (Stmt *SubStmt : S->children())
+ if (SubStmt) {
+ if (BlockExpr *CBE = dyn_cast<BlockExpr>(SubStmt))
+ GetBlockDeclRefExprs(CBE->getBody());
+ else
+ GetBlockDeclRefExprs(SubStmt);
+ }
+ // Handle specific things.
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S))
+ if (DRE->refersToEnclosingVariableOrCapture() ||
+ HasLocalVariableExternalStorage(DRE->getDecl()))
+ // FIXME: Handle enums.
+ BlockDeclRefs.push_back(DRE);
+
+ return;
+}
+
+void RewriteObjC::GetInnerBlockDeclRefExprs(Stmt *S,
+ SmallVectorImpl<DeclRefExpr *> &InnerBlockDeclRefs,
+ llvm::SmallPtrSetImpl<const DeclContext *> &InnerContexts) {
+ for (Stmt *SubStmt : S->children())
+ if (SubStmt) {
+ if (BlockExpr *CBE = dyn_cast<BlockExpr>(SubStmt)) {
+ InnerContexts.insert(cast<DeclContext>(CBE->getBlockDecl()));
+ GetInnerBlockDeclRefExprs(CBE->getBody(),
+ InnerBlockDeclRefs,
+ InnerContexts);
+ }
+ else
+ GetInnerBlockDeclRefExprs(SubStmt, InnerBlockDeclRefs, InnerContexts);
+ }
+ // Handle specific things.
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S)) {
+ if (DRE->refersToEnclosingVariableOrCapture() ||
+ HasLocalVariableExternalStorage(DRE->getDecl())) {
+ if (!InnerContexts.count(DRE->getDecl()->getDeclContext()))
+ InnerBlockDeclRefs.push_back(DRE);
+ if (VarDecl *Var = cast<VarDecl>(DRE->getDecl()))
+ if (Var->isFunctionOrMethodVarDecl())
+ ImportedLocalExternalDecls.insert(Var);
+ }
+ }
+
+ return;
+}
+
+/// convertFunctionTypeOfBlocks - This routine converts a function type
+/// whose result type may be a block pointer or whose argument type(s)
+/// might be block pointers to an equivalent function type replacing
+/// all block pointers to function pointers.
+QualType RewriteObjC::convertFunctionTypeOfBlocks(const FunctionType *FT) {
+ const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FT);
+ // FTP will be null for closures that don't take arguments.
+ // Generate a funky cast.
+ SmallVector<QualType, 8> ArgTypes;
+ QualType Res = FT->getReturnType();
+ bool HasBlockType = convertBlockPointerToFunctionPointer(Res);
+
+ if (FTP) {
+ for (auto &I : FTP->param_types()) {
+ QualType t = I;
+ // Make sure we convert "t (^)(...)" to "t (*)(...)".
+ if (convertBlockPointerToFunctionPointer(t))
+ HasBlockType = true;
+ ArgTypes.push_back(t);
+ }
+ }
+ QualType FuncType;
+ // FIXME. Does this work if block takes no argument but has a return type
+ // which is of block type?
+ if (HasBlockType)
+ FuncType = getSimpleFunctionType(Res, ArgTypes);
+ else FuncType = QualType(FT, 0);
+ return FuncType;
+}
+
+Stmt *RewriteObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) {
+ // Navigate to relevant type information.
+ const BlockPointerType *CPT = nullptr;
+
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BlockExp)) {
+ CPT = DRE->getType()->getAs<BlockPointerType>();
+ } else if (const MemberExpr *MExpr = dyn_cast<MemberExpr>(BlockExp)) {
+ CPT = MExpr->getType()->getAs<BlockPointerType>();
+ }
+ else if (const ParenExpr *PRE = dyn_cast<ParenExpr>(BlockExp)) {
+ return SynthesizeBlockCall(Exp, PRE->getSubExpr());
+ }
+ else if (const ImplicitCastExpr *IEXPR = dyn_cast<ImplicitCastExpr>(BlockExp))
+ CPT = IEXPR->getType()->getAs<BlockPointerType>();
+ else if (const ConditionalOperator *CEXPR =
+ dyn_cast<ConditionalOperator>(BlockExp)) {
+ Expr *LHSExp = CEXPR->getLHS();
+ Stmt *LHSStmt = SynthesizeBlockCall(Exp, LHSExp);
+ Expr *RHSExp = CEXPR->getRHS();
+ Stmt *RHSStmt = SynthesizeBlockCall(Exp, RHSExp);
+ Expr *CONDExp = CEXPR->getCond();
+ ConditionalOperator *CondExpr =
+ new (Context) ConditionalOperator(CONDExp,
+ SourceLocation(), cast<Expr>(LHSStmt),
+ SourceLocation(), cast<Expr>(RHSStmt),
+ Exp->getType(), VK_RValue, OK_Ordinary);
+ return CondExpr;
+ } else if (const ObjCIvarRefExpr *IRE = dyn_cast<ObjCIvarRefExpr>(BlockExp)) {
+ CPT = IRE->getType()->getAs<BlockPointerType>();
+ } else if (const PseudoObjectExpr *POE
+ = dyn_cast<PseudoObjectExpr>(BlockExp)) {
+ CPT = POE->getType()->castAs<BlockPointerType>();
+ } else {
+ assert(1 && "RewriteBlockClass: Bad type");
+ }
+ assert(CPT && "RewriteBlockClass: Bad type");
+ const FunctionType *FT = CPT->getPointeeType()->getAs<FunctionType>();
+ assert(FT && "RewriteBlockClass: Bad type");
+ const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FT);
+ // FTP will be null for closures that don't take arguments.
+
+ RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("__block_impl"));
+ QualType PtrBlock = Context->getPointerType(Context->getTagDeclType(RD));
+
+ // Generate a funky cast.
+ SmallVector<QualType, 8> ArgTypes;
+
+ // Push the block argument type.
+ ArgTypes.push_back(PtrBlock);
+ if (FTP) {
+ for (auto &I : FTP->param_types()) {
+ QualType t = I;
+ // Make sure we convert "t (^)(...)" to "t (*)(...)".
+ if (!convertBlockPointerToFunctionPointer(t))
+ convertToUnqualifiedObjCType(t);
+ ArgTypes.push_back(t);
+ }
+ }
+ // Now do the pointer to function cast.
+ QualType PtrToFuncCastType = getSimpleFunctionType(Exp->getType(), ArgTypes);
+
+ PtrToFuncCastType = Context->getPointerType(PtrToFuncCastType);
+
+ CastExpr *BlkCast = NoTypeInfoCStyleCastExpr(Context, PtrBlock,
+ CK_BitCast,
+ const_cast<Expr*>(BlockExp));
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
+ BlkCast);
+ //PE->dump();
+
+ FieldDecl *FD = FieldDecl::Create(*Context, nullptr, SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get("FuncPtr"),
+ Context->VoidPtrTy, nullptr,
+ /*BitWidth=*/nullptr, /*Mutable=*/true,
+ ICIS_NoInit);
+ MemberExpr *ME =
+ new (Context) MemberExpr(PE, true, SourceLocation(), FD, SourceLocation(),
+ FD->getType(), VK_LValue, OK_Ordinary);
+
+ CastExpr *FunkCast = NoTypeInfoCStyleCastExpr(Context, PtrToFuncCastType,
+ CK_BitCast, ME);
+ PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), FunkCast);
+
+ SmallVector<Expr*, 8> BlkExprs;
+ // Add the implicit argument.
+ BlkExprs.push_back(BlkCast);
+ // Add the user arguments.
+ for (CallExpr::arg_iterator I = Exp->arg_begin(),
+ E = Exp->arg_end(); I != E; ++I) {
+ BlkExprs.push_back(*I);
+ }
+ CallExpr *CE = new (Context) CallExpr(*Context, PE, BlkExprs,
+ Exp->getType(), VK_RValue,
+ SourceLocation());
+ return CE;
+}
+
+// We need to return the rewritten expression to handle cases where the
+// BlockDeclRefExpr is embedded in another expression being rewritten.
+// For example:
+//
+// int main() {
+// __block Foo *f;
+// __block int i;
+//
+// void (^myblock)() = ^() {
+// [f test]; // f is a BlockDeclRefExpr embedded in a message (which is being rewritten).
+// i = 77;
+// };
+//}
+Stmt *RewriteObjC::RewriteBlockDeclRefExpr(DeclRefExpr *DeclRefExp) {
+ // Rewrite the byref variable into BYREFVAR->__forwarding->BYREFVAR
+ // for each DeclRefExp where BYREFVAR is name of the variable.
+ ValueDecl *VD = DeclRefExp->getDecl();
+ bool isArrow = DeclRefExp->refersToEnclosingVariableOrCapture() ||
+ HasLocalVariableExternalStorage(DeclRefExp->getDecl());
+
+ FieldDecl *FD = FieldDecl::Create(*Context, nullptr, SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get("__forwarding"),
+ Context->VoidPtrTy, nullptr,
+ /*BitWidth=*/nullptr, /*Mutable=*/true,
+ ICIS_NoInit);
+ MemberExpr *ME = new (Context)
+ MemberExpr(DeclRefExp, isArrow, SourceLocation(), FD, SourceLocation(),
+ FD->getType(), VK_LValue, OK_Ordinary);
+
+ StringRef Name = VD->getName();
+ FD = FieldDecl::Create(*Context, nullptr, SourceLocation(), SourceLocation(),
+ &Context->Idents.get(Name),
+ Context->VoidPtrTy, nullptr,
+ /*BitWidth=*/nullptr, /*Mutable=*/true,
+ ICIS_NoInit);
+ ME =
+ new (Context) MemberExpr(ME, true, SourceLocation(), FD, SourceLocation(),
+ DeclRefExp->getType(), VK_LValue, OK_Ordinary);
+
+ // Need parens to enforce precedence.
+ ParenExpr *PE = new (Context) ParenExpr(DeclRefExp->getExprLoc(),
+ DeclRefExp->getExprLoc(),
+ ME);
+ ReplaceStmt(DeclRefExp, PE);
+ return PE;
+}
+
+// Rewrites the imported local variable V with external storage
+// (static, extern, etc.) as *V
+//
+Stmt *RewriteObjC::RewriteLocalVariableExternalStorage(DeclRefExpr *DRE) {
+ ValueDecl *VD = DRE->getDecl();
+ if (VarDecl *Var = dyn_cast<VarDecl>(VD))
+ if (!ImportedLocalExternalDecls.count(Var))
+ return DRE;
+ Expr *Exp = new (Context) UnaryOperator(DRE, UO_Deref, DRE->getType(),
+ VK_LValue, OK_Ordinary,
+ DRE->getLocation());
+ // Need parens to enforce precedence.
+ ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
+ Exp);
+ ReplaceStmt(DRE, PE);
+ return PE;
+}
+
+void RewriteObjC::RewriteCastExpr(CStyleCastExpr *CE) {
+ SourceLocation LocStart = CE->getLParenLoc();
+ SourceLocation LocEnd = CE->getRParenLoc();
+
+ // Need to avoid trying to rewrite synthesized casts.
+ if (LocStart.isInvalid())
+ return;
+ // Need to avoid trying to rewrite casts contained in macros.
+ if (!Rewriter::isRewritable(LocStart) || !Rewriter::isRewritable(LocEnd))
+ return;
+
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+ QualType QT = CE->getType();
+ const Type* TypePtr = QT->getAs<Type>();
+ if (isa<TypeOfExprType>(TypePtr)) {
+ const TypeOfExprType *TypeOfExprTypePtr = cast<TypeOfExprType>(TypePtr);
+ QT = TypeOfExprTypePtr->getUnderlyingExpr()->getType();
+ std::string TypeAsString = "(";
+ RewriteBlockPointerType(TypeAsString, QT);
+ TypeAsString += ")";
+ ReplaceText(LocStart, endBuf-startBuf+1, TypeAsString);
+ return;
+ }
+ // advance the location to startArgList.
+ const char *argPtr = startBuf;
+
+ while (*argPtr++ && (argPtr < endBuf)) {
+ switch (*argPtr) {
+ case '^':
+ // Replace the '^' with '*'.
+ LocStart = LocStart.getLocWithOffset(argPtr-startBuf);
+ ReplaceText(LocStart, 1, "*");
+ break;
+ }
+ }
+ return;
+}
+
+void RewriteObjC::RewriteBlockPointerFunctionArgs(FunctionDecl *FD) {
+ SourceLocation DeclLoc = FD->getLocation();
+ unsigned parenCount = 0;
+
+ // We have 1 or more arguments that have closure pointers.
+ const char *startBuf = SM->getCharacterData(DeclLoc);
+ const char *startArgList = strchr(startBuf, '(');
+
+ assert((*startArgList == '(') && "Rewriter fuzzy parser confused");
+
+ parenCount++;
+ // advance the location to startArgList.
+ DeclLoc = DeclLoc.getLocWithOffset(startArgList-startBuf);
+ assert((DeclLoc.isValid()) && "Invalid DeclLoc");
+
+ const char *argPtr = startArgList;
+
+ while (*argPtr++ && parenCount) {
+ switch (*argPtr) {
+ case '^':
+ // Replace the '^' with '*'.
+ DeclLoc = DeclLoc.getLocWithOffset(argPtr-startArgList);
+ ReplaceText(DeclLoc, 1, "*");
+ break;
+ case '(':
+ parenCount++;
+ break;
+ case ')':
+ parenCount--;
+ break;
+ }
+ }
+ return;
+}
+
+bool RewriteObjC::PointerTypeTakesAnyBlockArguments(QualType QT) {
+ const FunctionProtoType *FTP;
+ const PointerType *PT = QT->getAs<PointerType>();
+ if (PT) {
+ FTP = PT->getPointeeType()->getAs<FunctionProtoType>();
+ } else {
+ const BlockPointerType *BPT = QT->getAs<BlockPointerType>();
+ assert(BPT && "BlockPointerTypeTakeAnyBlockArguments(): not a block pointer type");
+ FTP = BPT->getPointeeType()->getAs<FunctionProtoType>();
+ }
+ if (FTP) {
+ for (const auto &I : FTP->param_types())
+ if (isTopLevelBlockPointerType(I))
+ return true;
+ }
+ return false;
+}
+
+bool RewriteObjC::PointerTypeTakesAnyObjCQualifiedType(QualType QT) {
+ const FunctionProtoType *FTP;
+ const PointerType *PT = QT->getAs<PointerType>();
+ if (PT) {
+ FTP = PT->getPointeeType()->getAs<FunctionProtoType>();
+ } else {
+ const BlockPointerType *BPT = QT->getAs<BlockPointerType>();
+ assert(BPT && "BlockPointerTypeTakeAnyBlockArguments(): not a block pointer type");
+ FTP = BPT->getPointeeType()->getAs<FunctionProtoType>();
+ }
+ if (FTP) {
+ for (const auto &I : FTP->param_types()) {
+ if (I->isObjCQualifiedIdType())
+ return true;
+ if (I->isObjCObjectPointerType() &&
+ I->getPointeeType()->isObjCQualifiedInterfaceType())
+ return true;
+ }
+
+ }
+ return false;
+}
+
+void RewriteObjC::GetExtentOfArgList(const char *Name, const char *&LParen,
+ const char *&RParen) {
+ const char *argPtr = strchr(Name, '(');
+ assert((*argPtr == '(') && "Rewriter fuzzy parser confused");
+
+ LParen = argPtr; // output the start.
+ argPtr++; // skip past the left paren.
+ unsigned parenCount = 1;
+
+ while (*argPtr && parenCount) {
+ switch (*argPtr) {
+ case '(': parenCount++; break;
+ case ')': parenCount--; break;
+ default: break;
+ }
+ if (parenCount) argPtr++;
+ }
+ assert((*argPtr == ')') && "Rewriter fuzzy parser confused");
+ RParen = argPtr; // output the end
+}
+
+void RewriteObjC::RewriteBlockPointerDecl(NamedDecl *ND) {
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
+ RewriteBlockPointerFunctionArgs(FD);
+ return;
+ }
+ // Handle Variables and Typedefs.
+ SourceLocation DeclLoc = ND->getLocation();
+ QualType DeclT;
+ if (VarDecl *VD = dyn_cast<VarDecl>(ND))
+ DeclT = VD->getType();
+ else if (TypedefNameDecl *TDD = dyn_cast<TypedefNameDecl>(ND))
+ DeclT = TDD->getUnderlyingType();
+ else if (FieldDecl *FD = dyn_cast<FieldDecl>(ND))
+ DeclT = FD->getType();
+ else
+ llvm_unreachable("RewriteBlockPointerDecl(): Decl type not yet handled");
+
+ const char *startBuf = SM->getCharacterData(DeclLoc);
+ const char *endBuf = startBuf;
+ // scan backward (from the decl location) for the end of the previous decl.
+ while (*startBuf != '^' && *startBuf != ';' && startBuf != MainFileStart)
+ startBuf--;
+ SourceLocation Start = DeclLoc.getLocWithOffset(startBuf-endBuf);
+ std::string buf;
+ unsigned OrigLength=0;
+ // *startBuf != '^' if we are dealing with a pointer to function that
+ // may take block argument types (which will be handled below).
+ if (*startBuf == '^') {
+ // Replace the '^' with '*', computing a negative offset.
+ buf = '*';
+ startBuf++;
+ OrigLength++;
+ }
+ while (*startBuf != ')') {
+ buf += *startBuf;
+ startBuf++;
+ OrigLength++;
+ }
+ buf += ')';
+ OrigLength++;
+
+ if (PointerTypeTakesAnyBlockArguments(DeclT) ||
+ PointerTypeTakesAnyObjCQualifiedType(DeclT)) {
+ // Replace the '^' with '*' for arguments.
+ // Replace id<P> with id/*<>*/
+ DeclLoc = ND->getLocation();
+ startBuf = SM->getCharacterData(DeclLoc);
+ const char *argListBegin, *argListEnd;
+ GetExtentOfArgList(startBuf, argListBegin, argListEnd);
+ while (argListBegin < argListEnd) {
+ if (*argListBegin == '^')
+ buf += '*';
+ else if (*argListBegin == '<') {
+ buf += "/*";
+ buf += *argListBegin++;
+ OrigLength++;
+ while (*argListBegin != '>') {
+ buf += *argListBegin++;
+ OrigLength++;
+ }
+ buf += *argListBegin;
+ buf += "*/";
+ }
+ else
+ buf += *argListBegin;
+ argListBegin++;
+ OrigLength++;
+ }
+ buf += ')';
+ OrigLength++;
+ }
+ ReplaceText(Start, OrigLength, buf);
+
+ return;
+}
+
+
+/// SynthesizeByrefCopyDestroyHelper - This routine synthesizes:
+/// void __Block_byref_id_object_copy(struct Block_byref_id_object *dst,
+/// struct Block_byref_id_object *src) {
+/// _Block_object_assign (&_dest->object, _src->object,
+/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT
+/// [|BLOCK_FIELD_IS_WEAK]) // object
+/// _Block_object_assign(&_dest->object, _src->object,
+/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK
+/// [|BLOCK_FIELD_IS_WEAK]) // block
+/// }
+/// And:
+/// void __Block_byref_id_object_dispose(struct Block_byref_id_object *_src) {
+/// _Block_object_dispose(_src->object,
+/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT
+/// [|BLOCK_FIELD_IS_WEAK]) // object
+/// _Block_object_dispose(_src->object,
+/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK
+/// [|BLOCK_FIELD_IS_WEAK]) // block
+/// }
+
+std::string RewriteObjC::SynthesizeByrefCopyDestroyHelper(VarDecl *VD,
+ int flag) {
+ std::string S;
+ if (CopyDestroyCache.count(flag))
+ return S;
+ CopyDestroyCache.insert(flag);
+ S = "static void __Block_byref_id_object_copy_";
+ S += utostr(flag);
+ S += "(void *dst, void *src) {\n";
+
+ // offset into the object pointer is computed as:
+ // void * + void* + int + int + void* + void *
+ unsigned IntSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->IntTy));
+ unsigned VoidPtrSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->VoidPtrTy));
+
+ unsigned offset = (VoidPtrSize*4 + IntSize + IntSize)/Context->getCharWidth();
+ S += " _Block_object_assign((char*)dst + ";
+ S += utostr(offset);
+ S += ", *(void * *) ((char*)src + ";
+ S += utostr(offset);
+ S += "), ";
+ S += utostr(flag);
+ S += ");\n}\n";
+
+ S += "static void __Block_byref_id_object_dispose_";
+ S += utostr(flag);
+ S += "(void *src) {\n";
+ S += " _Block_object_dispose(*(void * *) ((char*)src + ";
+ S += utostr(offset);
+ S += "), ";
+ S += utostr(flag);
+ S += ");\n}\n";
+ return S;
+}
+
+/// RewriteByRefVar - For each __block typex ND variable this routine transforms
+/// the declaration into:
+/// struct __Block_byref_ND {
+/// void *__isa; // NULL for everything except __weak pointers
+/// struct __Block_byref_ND *__forwarding;
+/// int32_t __flags;
+/// int32_t __size;
+/// void *__Block_byref_id_object_copy; // If variable is __block ObjC object
+/// void *__Block_byref_id_object_dispose; // If variable is __block ObjC object
+/// typex ND;
+/// };
+///
+/// It then replaces declaration of ND variable with:
+/// struct __Block_byref_ND ND = {__isa=0B, __forwarding=&ND, __flags=some_flag,
+/// __size=sizeof(struct __Block_byref_ND),
+/// ND=initializer-if-any};
+///
+///
+void RewriteObjC::RewriteByRefVar(VarDecl *ND) {
+ // Insert declaration for the function in which block literal is
+ // used.
+ if (CurFunctionDeclToDeclareForBlock)
+ RewriteBlockLiteralFunctionDecl(CurFunctionDeclToDeclareForBlock);
+ int flag = 0;
+ int isa = 0;
+ SourceLocation DeclLoc = ND->getTypeSpecStartLoc();
+ if (DeclLoc.isInvalid())
+ // If type location is missing, it is because of missing type (a warning).
+ // Use variable's location which is good for this case.
+ DeclLoc = ND->getLocation();
+ const char *startBuf = SM->getCharacterData(DeclLoc);
+ SourceLocation X = ND->getLocEnd();
+ X = SM->getExpansionLoc(X);
+ const char *endBuf = SM->getCharacterData(X);
+ std::string Name(ND->getNameAsString());
+ std::string ByrefType;
+ RewriteByRefString(ByrefType, Name, ND, true);
+ ByrefType += " {\n";
+ ByrefType += " void *__isa;\n";
+ RewriteByRefString(ByrefType, Name, ND);
+ ByrefType += " *__forwarding;\n";
+ ByrefType += " int __flags;\n";
+ ByrefType += " int __size;\n";
+ // Add void *__Block_byref_id_object_copy;
+ // void *__Block_byref_id_object_dispose; if needed.
+ QualType Ty = ND->getType();
+ bool HasCopyAndDispose = Context->BlockRequiresCopying(Ty, ND);
+ if (HasCopyAndDispose) {
+ ByrefType += " void (*__Block_byref_id_object_copy)(void*, void*);\n";
+ ByrefType += " void (*__Block_byref_id_object_dispose)(void*);\n";
+ }
+
+ QualType T = Ty;
+ (void)convertBlockPointerToFunctionPointer(T);
+ T.getAsStringInternal(Name, Context->getPrintingPolicy());
+
+ ByrefType += " " + Name + ";\n";
+ ByrefType += "};\n";
+ // Insert this type in global scope. It is needed by helper function.
+ SourceLocation FunLocStart;
+ if (CurFunctionDef)
+ FunLocStart = CurFunctionDef->getTypeSpecStartLoc();
+ else {
+ assert(CurMethodDef && "RewriteByRefVar - CurMethodDef is null");
+ FunLocStart = CurMethodDef->getLocStart();
+ }
+ InsertText(FunLocStart, ByrefType);
+ if (Ty.isObjCGCWeak()) {
+ flag |= BLOCK_FIELD_IS_WEAK;
+ isa = 1;
+ }
+
+ if (HasCopyAndDispose) {
+ flag = BLOCK_BYREF_CALLER;
+ QualType Ty = ND->getType();
+ // FIXME. Handle __weak variable (BLOCK_FIELD_IS_WEAK) as well.
+ if (Ty->isBlockPointerType())
+ flag |= BLOCK_FIELD_IS_BLOCK;
+ else
+ flag |= BLOCK_FIELD_IS_OBJECT;
+ std::string HF = SynthesizeByrefCopyDestroyHelper(ND, flag);
+ if (!HF.empty())
+ InsertText(FunLocStart, HF);
+ }
+
+ // struct __Block_byref_ND ND =
+ // {0, &ND, some_flag, __size=sizeof(struct __Block_byref_ND),
+ // initializer-if-any};
+ bool hasInit = (ND->getInit() != nullptr);
+ unsigned flags = 0;
+ if (HasCopyAndDispose)
+ flags |= BLOCK_HAS_COPY_DISPOSE;
+ Name = ND->getNameAsString();
+ ByrefType.clear();
+ RewriteByRefString(ByrefType, Name, ND);
+ std::string ForwardingCastType("(");
+ ForwardingCastType += ByrefType + " *)";
+ if (!hasInit) {
+ ByrefType += " " + Name + " = {(void*)";
+ ByrefType += utostr(isa);
+ ByrefType += "," + ForwardingCastType + "&" + Name + ", ";
+ ByrefType += utostr(flags);
+ ByrefType += ", ";
+ ByrefType += "sizeof(";
+ RewriteByRefString(ByrefType, Name, ND);
+ ByrefType += ")";
+ if (HasCopyAndDispose) {
+ ByrefType += ", __Block_byref_id_object_copy_";
+ ByrefType += utostr(flag);
+ ByrefType += ", __Block_byref_id_object_dispose_";
+ ByrefType += utostr(flag);
+ }
+ ByrefType += "};\n";
+ unsigned nameSize = Name.size();
+ // for block or function pointer declaration. Name is aleady
+ // part of the declaration.
+ if (Ty->isBlockPointerType() || Ty->isFunctionPointerType())
+ nameSize = 1;
+ ReplaceText(DeclLoc, endBuf-startBuf+nameSize, ByrefType);
+ }
+ else {
+ SourceLocation startLoc;
+ Expr *E = ND->getInit();
+ if (const CStyleCastExpr *ECE = dyn_cast<CStyleCastExpr>(E))
+ startLoc = ECE->getLParenLoc();
+ else
+ startLoc = E->getLocStart();
+ startLoc = SM->getExpansionLoc(startLoc);
+ endBuf = SM->getCharacterData(startLoc);
+ ByrefType += " " + Name;
+ ByrefType += " = {(void*)";
+ ByrefType += utostr(isa);
+ ByrefType += "," + ForwardingCastType + "&" + Name + ", ";
+ ByrefType += utostr(flags);
+ ByrefType += ", ";
+ ByrefType += "sizeof(";
+ RewriteByRefString(ByrefType, Name, ND);
+ ByrefType += "), ";
+ if (HasCopyAndDispose) {
+ ByrefType += "__Block_byref_id_object_copy_";
+ ByrefType += utostr(flag);
+ ByrefType += ", __Block_byref_id_object_dispose_";
+ ByrefType += utostr(flag);
+ ByrefType += ", ";
+ }
+ ReplaceText(DeclLoc, endBuf-startBuf, ByrefType);
+
+ // Complete the newly synthesized compound expression by inserting a right
+ // curly brace before the end of the declaration.
+ // FIXME: This approach avoids rewriting the initializer expression. It
+ // also assumes there is only one declarator. For example, the following
+ // isn't currently supported by this routine (in general):
+ //
+ // double __block BYREFVAR = 1.34, BYREFVAR2 = 1.37;
+ //
+ const char *startInitializerBuf = SM->getCharacterData(startLoc);
+ const char *semiBuf = strchr(startInitializerBuf, ';');
+ assert((*semiBuf == ';') && "RewriteByRefVar: can't find ';'");
+ SourceLocation semiLoc =
+ startLoc.getLocWithOffset(semiBuf-startInitializerBuf);
+
+ InsertText(semiLoc, "}");
+ }
+ return;
+}
+
+void RewriteObjC::CollectBlockDeclRefInfo(BlockExpr *Exp) {
+ // Add initializers for any closure decl refs.
+ GetBlockDeclRefExprs(Exp->getBody());
+ if (BlockDeclRefs.size()) {
+ // Unique all "by copy" declarations.
+ for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
+ if (!BlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>()) {
+ if (!BlockByCopyDeclsPtrSet.count(BlockDeclRefs[i]->getDecl())) {
+ BlockByCopyDeclsPtrSet.insert(BlockDeclRefs[i]->getDecl());
+ BlockByCopyDecls.push_back(BlockDeclRefs[i]->getDecl());
+ }
+ }
+ // Unique all "by ref" declarations.
+ for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
+ if (BlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>()) {
+ if (!BlockByRefDeclsPtrSet.count(BlockDeclRefs[i]->getDecl())) {
+ BlockByRefDeclsPtrSet.insert(BlockDeclRefs[i]->getDecl());
+ BlockByRefDecls.push_back(BlockDeclRefs[i]->getDecl());
+ }
+ }
+ // Find any imported blocks...they will need special attention.
+ for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
+ if (BlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>() ||
+ BlockDeclRefs[i]->getType()->isObjCObjectPointerType() ||
+ BlockDeclRefs[i]->getType()->isBlockPointerType())
+ ImportedBlockDecls.insert(BlockDeclRefs[i]->getDecl());
+ }
+}
+
+FunctionDecl *RewriteObjC::SynthBlockInitFunctionDecl(StringRef name) {
+ IdentifierInfo *ID = &Context->Idents.get(name);
+ QualType FType = Context->getFunctionNoProtoType(Context->VoidPtrTy);
+ return FunctionDecl::Create(*Context, TUDecl, SourceLocation(),
+ SourceLocation(), ID, FType, nullptr, SC_Extern,
+ false, false);
+}
+
+Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
+ const SmallVectorImpl<DeclRefExpr *> &InnerBlockDeclRefs) {
+ const BlockDecl *block = Exp->getBlockDecl();
+ Blocks.push_back(Exp);
+
+ CollectBlockDeclRefInfo(Exp);
+
+ // Add inner imported variables now used in current block.
+ int countOfInnerDecls = 0;
+ if (!InnerBlockDeclRefs.empty()) {
+ for (unsigned i = 0; i < InnerBlockDeclRefs.size(); i++) {
+ DeclRefExpr *Exp = InnerBlockDeclRefs[i];
+ ValueDecl *VD = Exp->getDecl();
+ if (!VD->hasAttr<BlocksAttr>() && !BlockByCopyDeclsPtrSet.count(VD)) {
+ // We need to save the copied-in variables in nested
+ // blocks because it is needed at the end for some of the API generations.
+ // See SynthesizeBlockLiterals routine.
+ InnerDeclRefs.push_back(Exp); countOfInnerDecls++;
+ BlockDeclRefs.push_back(Exp);
+ BlockByCopyDeclsPtrSet.insert(VD);
+ BlockByCopyDecls.push_back(VD);
+ }
+ if (VD->hasAttr<BlocksAttr>() && !BlockByRefDeclsPtrSet.count(VD)) {
+ InnerDeclRefs.push_back(Exp); countOfInnerDecls++;
+ BlockDeclRefs.push_back(Exp);
+ BlockByRefDeclsPtrSet.insert(VD);
+ BlockByRefDecls.push_back(VD);
+ }
+ }
+ // Find any imported blocks...they will need special attention.
+ for (unsigned i = 0; i < InnerBlockDeclRefs.size(); i++)
+ if (InnerBlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>() ||
+ InnerBlockDeclRefs[i]->getType()->isObjCObjectPointerType() ||
+ InnerBlockDeclRefs[i]->getType()->isBlockPointerType())
+ ImportedBlockDecls.insert(InnerBlockDeclRefs[i]->getDecl());
+ }
+ InnerDeclRefsCount.push_back(countOfInnerDecls);
+
+ std::string FuncName;
+
+ if (CurFunctionDef)
+ FuncName = CurFunctionDef->getNameAsString();
+ else if (CurMethodDef)
+ BuildUniqueMethodName(FuncName, CurMethodDef);
+ else if (GlobalVarDecl)
+ FuncName = std::string(GlobalVarDecl->getNameAsString());
+
+ std::string BlockNumber = utostr(Blocks.size()-1);
+
+ std::string Tag = "__" + FuncName + "_block_impl_" + BlockNumber;
+ std::string Func = "__" + FuncName + "_block_func_" + BlockNumber;
+
+ // Get a pointer to the function type so we can cast appropriately.
+ QualType BFT = convertFunctionTypeOfBlocks(Exp->getFunctionType());
+ QualType FType = Context->getPointerType(BFT);
+
+ FunctionDecl *FD;
+ Expr *NewRep;
+
+ // Simulate a constructor call...
+ FD = SynthBlockInitFunctionDecl(Tag);
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, false, FType, VK_RValue,
+ SourceLocation());
+
+ SmallVector<Expr*, 4> InitExprs;
+
+ // Initialize the block function.
+ FD = SynthBlockInitFunctionDecl(Func);
+ DeclRefExpr *Arg = new (Context) DeclRefExpr(FD, false, FD->getType(),
+ VK_LValue, SourceLocation());
+ CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy,
+ CK_BitCast, Arg);
+ InitExprs.push_back(castExpr);
+
+ // Initialize the block descriptor.
+ std::string DescData = "__" + FuncName + "_block_desc_" + BlockNumber + "_DATA";
+
+ VarDecl *NewVD = VarDecl::Create(*Context, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get(DescData.c_str()),
+ Context->VoidPtrTy, nullptr,
+ SC_Static);
+ UnaryOperator *DescRefExpr =
+ new (Context) UnaryOperator(new (Context) DeclRefExpr(NewVD, false,
+ Context->VoidPtrTy,
+ VK_LValue,
+ SourceLocation()),
+ UO_AddrOf,
+ Context->getPointerType(Context->VoidPtrTy),
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
+ InitExprs.push_back(DescRefExpr);
+
+ // Add initializers for any closure decl refs.
+ if (BlockDeclRefs.size()) {
+ Expr *Exp;
+ // Output all "by copy" declarations.
+ for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByCopyDecls.begin(),
+ E = BlockByCopyDecls.end(); I != E; ++I) {
+ if (isObjCType((*I)->getType())) {
+ // FIXME: Conform to ABI ([[obj retain] autorelease]).
+ FD = SynthBlockInitFunctionDecl((*I)->getName());
+ Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue,
+ SourceLocation());
+ if (HasLocalVariableExternalStorage(*I)) {
+ QualType QT = (*I)->getType();
+ QT = Context->getPointerType(QT);
+ Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue,
+ OK_Ordinary, SourceLocation());
+ }
+ } else if (isTopLevelBlockPointerType((*I)->getType())) {
+ FD = SynthBlockInitFunctionDecl((*I)->getName());
+ Arg = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue,
+ SourceLocation());
+ Exp = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy,
+ CK_BitCast, Arg);
+ } else {
+ FD = SynthBlockInitFunctionDecl((*I)->getName());
+ Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue,
+ SourceLocation());
+ if (HasLocalVariableExternalStorage(*I)) {
+ QualType QT = (*I)->getType();
+ QT = Context->getPointerType(QT);
+ Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue,
+ OK_Ordinary, SourceLocation());
+ }
+
+ }
+ InitExprs.push_back(Exp);
+ }
+ // Output all "by ref" declarations.
+ for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByRefDecls.begin(),
+ E = BlockByRefDecls.end(); I != E; ++I) {
+ ValueDecl *ND = (*I);
+ std::string Name(ND->getNameAsString());
+ std::string RecName;
+ RewriteByRefString(RecName, Name, ND, true);
+ IdentifierInfo *II = &Context->Idents.get(RecName.c_str()
+ + sizeof("struct"));
+ RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ II);
+ assert(RD && "SynthBlockInitExpr(): Can't find RecordDecl");
+ QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
+
+ FD = SynthBlockInitFunctionDecl((*I)->getName());
+ Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue,
+ SourceLocation());
+ bool isNestedCapturedVar = false;
+ if (block)
+ for (const auto &CI : block->captures()) {
+ const VarDecl *variable = CI.getVariable();
+ if (variable == ND && CI.isNested()) {
+ assert (CI.isByRef() &&
+ "SynthBlockInitExpr - captured block variable is not byref");
+ isNestedCapturedVar = true;
+ break;
+ }
+ }
+ // captured nested byref variable has its address passed. Do not take
+ // its address again.
+ if (!isNestedCapturedVar)
+ Exp = new (Context) UnaryOperator(Exp, UO_AddrOf,
+ Context->getPointerType(Exp->getType()),
+ VK_RValue, OK_Ordinary, SourceLocation());
+ Exp = NoTypeInfoCStyleCastExpr(Context, castT, CK_BitCast, Exp);
+ InitExprs.push_back(Exp);
+ }
+ }
+ if (ImportedBlockDecls.size()) {
+ // generate BLOCK_HAS_COPY_DISPOSE(have helper funcs) | BLOCK_HAS_DESCRIPTOR
+ int flag = (BLOCK_HAS_COPY_DISPOSE | BLOCK_HAS_DESCRIPTOR);
+ unsigned IntSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->IntTy));
+ Expr *FlagExp = IntegerLiteral::Create(*Context, llvm::APInt(IntSize, flag),
+ Context->IntTy, SourceLocation());
+ InitExprs.push_back(FlagExp);
+ }
+ NewRep = new (Context) CallExpr(*Context, DRE, InitExprs,
+ FType, VK_LValue, SourceLocation());
+ NewRep = new (Context) UnaryOperator(NewRep, UO_AddrOf,
+ Context->getPointerType(NewRep->getType()),
+ VK_RValue, OK_Ordinary, SourceLocation());
+ NewRep = NoTypeInfoCStyleCastExpr(Context, FType, CK_BitCast,
+ NewRep);
+ BlockDeclRefs.clear();
+ BlockByRefDecls.clear();
+ BlockByRefDeclsPtrSet.clear();
+ BlockByCopyDecls.clear();
+ BlockByCopyDeclsPtrSet.clear();
+ ImportedBlockDecls.clear();
+ return NewRep;
+}
+
+bool RewriteObjC::IsDeclStmtInForeachHeader(DeclStmt *DS) {
+ if (const ObjCForCollectionStmt * CS =
+ dyn_cast<ObjCForCollectionStmt>(Stmts.back()))
+ return CS->getElement() == DS;
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Function Body / Expression rewriting
+//===----------------------------------------------------------------------===//
+
+Stmt *RewriteObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
+ if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
+ isa<DoStmt>(S) || isa<ForStmt>(S))
+ Stmts.push_back(S);
+ else if (isa<ObjCForCollectionStmt>(S)) {
+ Stmts.push_back(S);
+ ObjCBcLabelNo.push_back(++BcLabelCount);
+ }
+
+ // Pseudo-object operations and ivar references need special
+ // treatment because we're going to recursively rewrite them.
+ if (PseudoObjectExpr *PseudoOp = dyn_cast<PseudoObjectExpr>(S)) {
+ if (isa<BinaryOperator>(PseudoOp->getSyntacticForm())) {
+ return RewritePropertyOrImplicitSetter(PseudoOp);
+ } else {
+ return RewritePropertyOrImplicitGetter(PseudoOp);
+ }
+ } else if (ObjCIvarRefExpr *IvarRefExpr = dyn_cast<ObjCIvarRefExpr>(S)) {
+ return RewriteObjCIvarRefExpr(IvarRefExpr);
+ }
+
+ SourceRange OrigStmtRange = S->getSourceRange();
+
+ // Perform a bottom up rewrite of all children.
+ for (Stmt *&childStmt : S->children())
+ if (childStmt) {
+ Stmt *newStmt = RewriteFunctionBodyOrGlobalInitializer(childStmt);
+ if (newStmt) {
+ childStmt = newStmt;
+ }
+ }
+
+ if (BlockExpr *BE = dyn_cast<BlockExpr>(S)) {
+ SmallVector<DeclRefExpr *, 8> InnerBlockDeclRefs;
+ llvm::SmallPtrSet<const DeclContext *, 8> InnerContexts;
+ InnerContexts.insert(BE->getBlockDecl());
+ ImportedLocalExternalDecls.clear();
+ GetInnerBlockDeclRefExprs(BE->getBody(),
+ InnerBlockDeclRefs, InnerContexts);
+ // Rewrite the block body in place.
+ Stmt *SaveCurrentBody = CurrentBody;
+ CurrentBody = BE->getBody();
+ PropParentMap = nullptr;
+ // block literal on rhs of a property-dot-sytax assignment
+ // must be replaced by its synthesize ast so getRewrittenText
+ // works as expected. In this case, what actually ends up on RHS
+ // is the blockTranscribed which is the helper function for the
+ // block literal; as in: self.c = ^() {[ace ARR];};
+ bool saveDisableReplaceStmt = DisableReplaceStmt;
+ DisableReplaceStmt = false;
+ RewriteFunctionBodyOrGlobalInitializer(BE->getBody());
+ DisableReplaceStmt = saveDisableReplaceStmt;
+ CurrentBody = SaveCurrentBody;
+ PropParentMap = nullptr;
+ ImportedLocalExternalDecls.clear();
+ // Now we snarf the rewritten text and stash it away for later use.
+ std::string Str = Rewrite.getRewrittenText(BE->getSourceRange());
+ RewrittenBlockExprs[BE] = Str;
+
+ Stmt *blockTranscribed = SynthBlockInitExpr(BE, InnerBlockDeclRefs);
+
+ //blockTranscribed->dump();
+ ReplaceStmt(S, blockTranscribed);
+ return blockTranscribed;
+ }
+ // Handle specific things.
+ if (ObjCEncodeExpr *AtEncode = dyn_cast<ObjCEncodeExpr>(S))
+ return RewriteAtEncode(AtEncode);
+
+ if (ObjCSelectorExpr *AtSelector = dyn_cast<ObjCSelectorExpr>(S))
+ return RewriteAtSelector(AtSelector);
+
+ if (ObjCStringLiteral *AtString = dyn_cast<ObjCStringLiteral>(S))
+ return RewriteObjCStringLiteral(AtString);
+
+ if (ObjCMessageExpr *MessExpr = dyn_cast<ObjCMessageExpr>(S)) {
+#if 0
+ // Before we rewrite it, put the original message expression in a comment.
+ SourceLocation startLoc = MessExpr->getLocStart();
+ SourceLocation endLoc = MessExpr->getLocEnd();
+
+ const char *startBuf = SM->getCharacterData(startLoc);
+ const char *endBuf = SM->getCharacterData(endLoc);
+
+ std::string messString;
+ messString += "// ";
+ messString.append(startBuf, endBuf-startBuf+1);
+ messString += "\n";
+
+ // FIXME: Missing definition of
+ // InsertText(clang::SourceLocation, char const*, unsigned int).
+ // InsertText(startLoc, messString);
+ // Tried this, but it didn't work either...
+ // ReplaceText(startLoc, 0, messString.c_str(), messString.size());
+#endif
+ return RewriteMessageExpr(MessExpr);
+ }
+
+ if (ObjCAtTryStmt *StmtTry = dyn_cast<ObjCAtTryStmt>(S))
+ return RewriteObjCTryStmt(StmtTry);
+
+ if (ObjCAtSynchronizedStmt *StmtTry = dyn_cast<ObjCAtSynchronizedStmt>(S))
+ return RewriteObjCSynchronizedStmt(StmtTry);
+
+ if (ObjCAtThrowStmt *StmtThrow = dyn_cast<ObjCAtThrowStmt>(S))
+ return RewriteObjCThrowStmt(StmtThrow);
+
+ if (ObjCProtocolExpr *ProtocolExp = dyn_cast<ObjCProtocolExpr>(S))
+ return RewriteObjCProtocolExpr(ProtocolExp);
+
+ if (ObjCForCollectionStmt *StmtForCollection =
+ dyn_cast<ObjCForCollectionStmt>(S))
+ return RewriteObjCForCollectionStmt(StmtForCollection,
+ OrigStmtRange.getEnd());
+ if (BreakStmt *StmtBreakStmt =
+ dyn_cast<BreakStmt>(S))
+ return RewriteBreakStmt(StmtBreakStmt);
+ if (ContinueStmt *StmtContinueStmt =
+ dyn_cast<ContinueStmt>(S))
+ return RewriteContinueStmt(StmtContinueStmt);
+
+ // Need to check for protocol refs (id <P>, Foo <P> *) in variable decls
+ // and cast exprs.
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(S)) {
+ // FIXME: What we're doing here is modifying the type-specifier that
+ // precedes the first Decl. In the future the DeclGroup should have
+ // a separate type-specifier that we can rewrite.
+ // NOTE: We need to avoid rewriting the DeclStmt if it is within
+ // the context of an ObjCForCollectionStmt. For example:
+ // NSArray *someArray;
+ // for (id <FooProtocol> index in someArray) ;
+ // This is because RewriteObjCForCollectionStmt() does textual rewriting
+ // and it depends on the original text locations/positions.
+ if (Stmts.empty() || !IsDeclStmtInForeachHeader(DS))
+ RewriteObjCQualifiedInterfaceTypes(*DS->decl_begin());
+
+ // Blocks rewrite rules.
+ for (auto *SD : DS->decls()) {
+ if (ValueDecl *ND = dyn_cast<ValueDecl>(SD)) {
+ if (isTopLevelBlockPointerType(ND->getType()))
+ RewriteBlockPointerDecl(ND);
+ else if (ND->getType()->isFunctionPointerType())
+ CheckFunctionPointerDecl(ND->getType(), ND);
+ if (VarDecl *VD = dyn_cast<VarDecl>(SD)) {
+ if (VD->hasAttr<BlocksAttr>()) {
+ static unsigned uniqueByrefDeclCount = 0;
+ assert(!BlockByRefDeclNo.count(ND) &&
+ "RewriteFunctionBodyOrGlobalInitializer: Duplicate byref decl");
+ BlockByRefDeclNo[ND] = uniqueByrefDeclCount++;
+ RewriteByRefVar(VD);
+ }
+ else
+ RewriteTypeOfDecl(VD);
+ }
+ }
+ if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(SD)) {
+ if (isTopLevelBlockPointerType(TD->getUnderlyingType()))
+ RewriteBlockPointerDecl(TD);
+ else if (TD->getUnderlyingType()->isFunctionPointerType())
+ CheckFunctionPointerDecl(TD->getUnderlyingType(), TD);
+ }
+ }
+ }
+
+ if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(S))
+ RewriteObjCQualifiedInterfaceTypes(CE);
+
+ if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
+ isa<DoStmt>(S) || isa<ForStmt>(S)) {
+ assert(!Stmts.empty() && "Statement stack is empty");
+ assert ((isa<SwitchStmt>(Stmts.back()) || isa<WhileStmt>(Stmts.back()) ||
+ isa<DoStmt>(Stmts.back()) || isa<ForStmt>(Stmts.back()))
+ && "Statement stack mismatch");
+ Stmts.pop_back();
+ }
+ // Handle blocks rewriting.
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S)) {
+ ValueDecl *VD = DRE->getDecl();
+ if (VD->hasAttr<BlocksAttr>())
+ return RewriteBlockDeclRefExpr(DRE);
+ if (HasLocalVariableExternalStorage(VD))
+ return RewriteLocalVariableExternalStorage(DRE);
+ }
+
+ if (CallExpr *CE = dyn_cast<CallExpr>(S)) {
+ if (CE->getCallee()->getType()->isBlockPointerType()) {
+ Stmt *BlockCall = SynthesizeBlockCall(CE, CE->getCallee());
+ ReplaceStmt(S, BlockCall);
+ return BlockCall;
+ }
+ }
+ if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(S)) {
+ RewriteCastExpr(CE);
+ }
+#if 0
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(S)) {
+ CastExpr *Replacement = new (Context) CastExpr(ICE->getType(),
+ ICE->getSubExpr(),
+ SourceLocation());
+ // Get the new text.
+ std::string SStr;
+ llvm::raw_string_ostream Buf(SStr);
+ Replacement->printPretty(Buf);
+ const std::string &Str = Buf.str();
+
+ printf("CAST = %s\n", &Str[0]);
+ InsertText(ICE->getSubExpr()->getLocStart(), Str);
+ delete S;
+ return Replacement;
+ }
+#endif
+ // Return this stmt unmodified.
+ return S;
+}
+
+void RewriteObjC::RewriteRecordBody(RecordDecl *RD) {
+ for (auto *FD : RD->fields()) {
+ if (isTopLevelBlockPointerType(FD->getType()))
+ RewriteBlockPointerDecl(FD);
+ if (FD->getType()->isObjCQualifiedIdType() ||
+ FD->getType()->isObjCQualifiedInterfaceType())
+ RewriteObjCQualifiedInterfaceTypes(FD);
+ }
+}
+
+/// HandleDeclInMainFile - This is called for each top-level decl defined in the
+/// main file of the input.
+void RewriteObjC::HandleDeclInMainFile(Decl *D) {
+ switch (D->getKind()) {
+ case Decl::Function: {
+ FunctionDecl *FD = cast<FunctionDecl>(D);
+ if (FD->isOverloadedOperator())
+ return;
+
+ // Since function prototypes don't have ParmDecl's, we check the function
+ // prototype. This enables us to rewrite function declarations and
+ // definitions using the same code.
+ RewriteBlocksInFunctionProtoType(FD->getType(), FD);
+
+ if (!FD->isThisDeclarationADefinition())
+ break;
+
+ // FIXME: If this should support Obj-C++, support CXXTryStmt
+ if (CompoundStmt *Body = dyn_cast_or_null<CompoundStmt>(FD->getBody())) {
+ CurFunctionDef = FD;
+ CurFunctionDeclToDeclareForBlock = FD;
+ CurrentBody = Body;
+ Body =
+ cast_or_null<CompoundStmt>(RewriteFunctionBodyOrGlobalInitializer(Body));
+ FD->setBody(Body);
+ CurrentBody = nullptr;
+ if (PropParentMap) {
+ delete PropParentMap;
+ PropParentMap = nullptr;
+ }
+ // This synthesizes and inserts the block "impl" struct, invoke function,
+ // and any copy/dispose helper functions.
+ InsertBlockLiteralsWithinFunction(FD);
+ CurFunctionDef = nullptr;
+ CurFunctionDeclToDeclareForBlock = nullptr;
+ }
+ break;
+ }
+ case Decl::ObjCMethod: {
+ ObjCMethodDecl *MD = cast<ObjCMethodDecl>(D);
+ if (CompoundStmt *Body = MD->getCompoundBody()) {
+ CurMethodDef = MD;
+ CurrentBody = Body;
+ Body =
+ cast_or_null<CompoundStmt>(RewriteFunctionBodyOrGlobalInitializer(Body));
+ MD->setBody(Body);
+ CurrentBody = nullptr;
+ if (PropParentMap) {
+ delete PropParentMap;
+ PropParentMap = nullptr;
+ }
+ InsertBlockLiteralsWithinMethod(MD);
+ CurMethodDef = nullptr;
+ }
+ break;
+ }
+ case Decl::ObjCImplementation: {
+ ObjCImplementationDecl *CI = cast<ObjCImplementationDecl>(D);
+ ClassImplementation.push_back(CI);
+ break;
+ }
+ case Decl::ObjCCategoryImpl: {
+ ObjCCategoryImplDecl *CI = cast<ObjCCategoryImplDecl>(D);
+ CategoryImplementation.push_back(CI);
+ break;
+ }
+ case Decl::Var: {
+ VarDecl *VD = cast<VarDecl>(D);
+ RewriteObjCQualifiedInterfaceTypes(VD);
+ if (isTopLevelBlockPointerType(VD->getType()))
+ RewriteBlockPointerDecl(VD);
+ else if (VD->getType()->isFunctionPointerType()) {
+ CheckFunctionPointerDecl(VD->getType(), VD);
+ if (VD->getInit()) {
+ if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(VD->getInit())) {
+ RewriteCastExpr(CE);
+ }
+ }
+ } else if (VD->getType()->isRecordType()) {
+ RecordDecl *RD = VD->getType()->getAs<RecordType>()->getDecl();
+ if (RD->isCompleteDefinition())
+ RewriteRecordBody(RD);
+ }
+ if (VD->getInit()) {
+ GlobalVarDecl = VD;
+ CurrentBody = VD->getInit();
+ RewriteFunctionBodyOrGlobalInitializer(VD->getInit());
+ CurrentBody = nullptr;
+ if (PropParentMap) {
+ delete PropParentMap;
+ PropParentMap = nullptr;
+ }
+ SynthesizeBlockLiterals(VD->getTypeSpecStartLoc(), VD->getName());
+ GlobalVarDecl = nullptr;
+
+ // This is needed for blocks.
+ if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(VD->getInit())) {
+ RewriteCastExpr(CE);
+ }
+ }
+ break;
+ }
+ case Decl::TypeAlias:
+ case Decl::Typedef: {
+ if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
+ if (isTopLevelBlockPointerType(TD->getUnderlyingType()))
+ RewriteBlockPointerDecl(TD);
+ else if (TD->getUnderlyingType()->isFunctionPointerType())
+ CheckFunctionPointerDecl(TD->getUnderlyingType(), TD);
+ }
+ break;
+ }
+ case Decl::CXXRecord:
+ case Decl::Record: {
+ RecordDecl *RD = cast<RecordDecl>(D);
+ if (RD->isCompleteDefinition())
+ RewriteRecordBody(RD);
+ break;
+ }
+ default:
+ break;
+ }
+ // Nothing yet.
+}
+
+void RewriteObjC::HandleTranslationUnit(ASTContext &C) {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ RewriteInclude();
+
+ // Here's a great place to add any extra declarations that may be needed.
+ // Write out meta data for each @protocol(<expr>).
+ for (ObjCProtocolDecl *ProtDecl : ProtocolExprDecls)
+ RewriteObjCProtocolMetaData(ProtDecl, "", "", Preamble);
+
+ InsertText(SM->getLocForStartOfFile(MainFileID), Preamble, false);
+ if (ClassImplementation.size() || CategoryImplementation.size())
+ RewriteImplementations();
+
+ // Get the buffer corresponding to MainFileID. If we haven't changed it, then
+ // we are done.
+ if (const RewriteBuffer *RewriteBuf =
+ Rewrite.getRewriteBufferFor(MainFileID)) {
+ //printf("Changed:\n");
+ *OutFile << std::string(RewriteBuf->begin(), RewriteBuf->end());
+ } else {
+ llvm::errs() << "No changes\n";
+ }
+
+ if (ClassImplementation.size() || CategoryImplementation.size() ||
+ ProtocolExprDecls.size()) {
+ // Rewrite Objective-c meta data*
+ std::string ResultStr;
+ RewriteMetaDataIntoBuffer(ResultStr);
+ // Emit metadata.
+ *OutFile << ResultStr;
+ }
+ OutFile->flush();
+}
+
+void RewriteObjCFragileABI::Initialize(ASTContext &context) {
+ InitializeCommon(context);
+
+ // declaring objc_selector outside the parameter list removes a silly
+ // scope related warning...
+ if (IsHeader)
+ Preamble = "#pragma once\n";
+ Preamble += "struct objc_selector; struct objc_class;\n";
+ Preamble += "struct __rw_objc_super { struct objc_object *object; ";
+ Preamble += "struct objc_object *superClass; ";
+ if (LangOpts.MicrosoftExt) {
+ // Add a constructor for creating temporary objects.
+ Preamble += "__rw_objc_super(struct objc_object *o, struct objc_object *s) "
+ ": ";
+ Preamble += "object(o), superClass(s) {} ";
+ }
+ Preamble += "};\n";
+ Preamble += "#ifndef _REWRITER_typedef_Protocol\n";
+ Preamble += "typedef struct objc_object Protocol;\n";
+ Preamble += "#define _REWRITER_typedef_Protocol\n";
+ Preamble += "#endif\n";
+ if (LangOpts.MicrosoftExt) {
+ Preamble += "#define __OBJC_RW_DLLIMPORT extern \"C\" __declspec(dllimport)\n";
+ Preamble += "#define __OBJC_RW_STATICIMPORT extern \"C\"\n";
+ } else
+ Preamble += "#define __OBJC_RW_DLLIMPORT extern\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_msgSend";
+ Preamble += "(struct objc_object *, struct objc_selector *, ...);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_msgSendSuper";
+ Preamble += "(struct objc_super *, struct objc_selector *, ...);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object* objc_msgSend_stret";
+ Preamble += "(struct objc_object *, struct objc_selector *, ...);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object* objc_msgSendSuper_stret";
+ Preamble += "(struct objc_super *, struct objc_selector *, ...);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT double objc_msgSend_fpret";
+ Preamble += "(struct objc_object *, struct objc_selector *, ...);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_getClass";
+ Preamble += "(const char *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_class *class_getSuperclass";
+ Preamble += "(struct objc_class *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_getMetaClass";
+ Preamble += "(const char *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_throw(struct objc_object *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_try_enter(void *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_try_exit(void *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_exception_extract(void *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT int objc_exception_match";
+ Preamble += "(struct objc_class *, struct objc_object *);\n";
+ // @synchronized hooks.
+ Preamble += "__OBJC_RW_DLLIMPORT int objc_sync_enter(struct objc_object *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT int objc_sync_exit(struct objc_object *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT Protocol *objc_getProtocol(const char *);\n";
+ Preamble += "#ifndef __FASTENUMERATIONSTATE\n";
+ Preamble += "struct __objcFastEnumerationState {\n\t";
+ Preamble += "unsigned long state;\n\t";
+ Preamble += "void **itemsPtr;\n\t";
+ Preamble += "unsigned long *mutationsPtr;\n\t";
+ Preamble += "unsigned long extra[5];\n};\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_enumerationMutation(struct objc_object *);\n";
+ Preamble += "#define __FASTENUMERATIONSTATE\n";
+ Preamble += "#endif\n";
+ Preamble += "#ifndef __NSCONSTANTSTRINGIMPL\n";
+ Preamble += "struct __NSConstantStringImpl {\n";
+ Preamble += " int *isa;\n";
+ Preamble += " int flags;\n";
+ Preamble += " char *str;\n";
+ Preamble += " long length;\n";
+ Preamble += "};\n";
+ Preamble += "#ifdef CF_EXPORT_CONSTANT_STRING\n";
+ Preamble += "extern \"C\" __declspec(dllexport) int __CFConstantStringClassReference[];\n";
+ Preamble += "#else\n";
+ Preamble += "__OBJC_RW_DLLIMPORT int __CFConstantStringClassReference[];\n";
+ Preamble += "#endif\n";
+ Preamble += "#define __NSCONSTANTSTRINGIMPL\n";
+ Preamble += "#endif\n";
+ // Blocks preamble.
+ Preamble += "#ifndef BLOCK_IMPL\n";
+ Preamble += "#define BLOCK_IMPL\n";
+ Preamble += "struct __block_impl {\n";
+ Preamble += " void *isa;\n";
+ Preamble += " int Flags;\n";
+ Preamble += " int Reserved;\n";
+ Preamble += " void *FuncPtr;\n";
+ Preamble += "};\n";
+ Preamble += "// Runtime copy/destroy helper functions (from Block_private.h)\n";
+ Preamble += "#ifdef __OBJC_EXPORT_BLOCKS\n";
+ Preamble += "extern \"C\" __declspec(dllexport) "
+ "void _Block_object_assign(void *, const void *, const int);\n";
+ Preamble += "extern \"C\" __declspec(dllexport) void _Block_object_dispose(const void *, const int);\n";
+ Preamble += "extern \"C\" __declspec(dllexport) void *_NSConcreteGlobalBlock[32];\n";
+ Preamble += "extern \"C\" __declspec(dllexport) void *_NSConcreteStackBlock[32];\n";
+ Preamble += "#else\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void _Block_object_assign(void *, const void *, const int);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void _Block_object_dispose(const void *, const int);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void *_NSConcreteGlobalBlock[32];\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void *_NSConcreteStackBlock[32];\n";
+ Preamble += "#endif\n";
+ Preamble += "#endif\n";
+ if (LangOpts.MicrosoftExt) {
+ Preamble += "#undef __OBJC_RW_DLLIMPORT\n";
+ Preamble += "#undef __OBJC_RW_STATICIMPORT\n";
+ Preamble += "#ifndef KEEP_ATTRIBUTES\n"; // We use this for clang tests.
+ Preamble += "#define __attribute__(X)\n";
+ Preamble += "#endif\n";
+ Preamble += "#define __weak\n";
+ }
+ else {
+ Preamble += "#define __block\n";
+ Preamble += "#define __weak\n";
+ }
+ // NOTE! Windows uses LLP64 for 64bit mode. So, cast pointer to long long
+ // as this avoids warning in any 64bit/32bit compilation model.
+ Preamble += "\n#define __OFFSETOFIVAR__(TYPE, MEMBER) ((long long) &((TYPE *)0)->MEMBER)\n";
+}
+
+/// RewriteIvarOffsetComputation - This rutine synthesizes computation of
+/// ivar offset.
+void RewriteObjCFragileABI::RewriteIvarOffsetComputation(ObjCIvarDecl *ivar,
+ std::string &Result) {
+ if (ivar->isBitField()) {
+ // FIXME: The hack below doesn't work for bitfields. For now, we simply
+ // place all bitfields at offset 0.
+ Result += "0";
+ } else {
+ Result += "__OFFSETOFIVAR__(struct ";
+ Result += ivar->getContainingInterface()->getNameAsString();
+ if (LangOpts.MicrosoftExt)
+ Result += "_IMPL";
+ Result += ", ";
+ Result += ivar->getNameAsString();
+ Result += ")";
+ }
+}
+
+/// RewriteObjCProtocolMetaData - Rewrite protocols meta-data.
+void RewriteObjCFragileABI::RewriteObjCProtocolMetaData(
+ ObjCProtocolDecl *PDecl, StringRef prefix,
+ StringRef ClassName, std::string &Result) {
+ static bool objc_protocol_methods = false;
+
+ // Output struct protocol_methods holder of method selector and type.
+ if (!objc_protocol_methods && PDecl->hasDefinition()) {
+ /* struct protocol_methods {
+ SEL _cmd;
+ char *method_types;
+ }
+ */
+ Result += "\nstruct _protocol_methods {\n";
+ Result += "\tstruct objc_selector *_cmd;\n";
+ Result += "\tchar *method_types;\n";
+ Result += "};\n";
+
+ objc_protocol_methods = true;
+ }
+ // Do not synthesize the protocol more than once.
+ if (ObjCSynthesizedProtocols.count(PDecl->getCanonicalDecl()))
+ return;
+
+ if (ObjCProtocolDecl *Def = PDecl->getDefinition())
+ PDecl = Def;
+
+ if (PDecl->instmeth_begin() != PDecl->instmeth_end()) {
+ unsigned NumMethods = std::distance(PDecl->instmeth_begin(),
+ PDecl->instmeth_end());
+ /* struct _objc_protocol_method_list {
+ int protocol_method_count;
+ struct protocol_methods protocols[];
+ }
+ */
+ Result += "\nstatic struct {\n";
+ Result += "\tint protocol_method_count;\n";
+ Result += "\tstruct _protocol_methods protocol_methods[";
+ Result += utostr(NumMethods);
+ Result += "];\n} _OBJC_PROTOCOL_INSTANCE_METHODS_";
+ Result += PDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__OBJC, __cat_inst_meth\")))= "
+ "{\n\t" + utostr(NumMethods) + "\n";
+
+ // Output instance methods declared in this protocol.
+ for (ObjCProtocolDecl::instmeth_iterator
+ I = PDecl->instmeth_begin(), E = PDecl->instmeth_end();
+ I != E; ++I) {
+ if (I == PDecl->instmeth_begin())
+ Result += "\t ,{{(struct objc_selector *)\"";
+ else
+ Result += "\t ,{(struct objc_selector *)\"";
+ Result += (*I)->getSelector().getAsString();
+ std::string MethodTypeString;
+ Context->getObjCEncodingForMethodDecl((*I), MethodTypeString);
+ Result += "\", \"";
+ Result += MethodTypeString;
+ Result += "\"}\n";
+ }
+ Result += "\t }\n};\n";
+ }
+
+ // Output class methods declared in this protocol.
+ unsigned NumMethods = std::distance(PDecl->classmeth_begin(),
+ PDecl->classmeth_end());
+ if (NumMethods > 0) {
+ /* struct _objc_protocol_method_list {
+ int protocol_method_count;
+ struct protocol_methods protocols[];
+ }
+ */
+ Result += "\nstatic struct {\n";
+ Result += "\tint protocol_method_count;\n";
+ Result += "\tstruct _protocol_methods protocol_methods[";
+ Result += utostr(NumMethods);
+ Result += "];\n} _OBJC_PROTOCOL_CLASS_METHODS_";
+ Result += PDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__OBJC, __cat_cls_meth\")))= "
+ "{\n\t";
+ Result += utostr(NumMethods);
+ Result += "\n";
+
+ // Output instance methods declared in this protocol.
+ for (ObjCProtocolDecl::classmeth_iterator
+ I = PDecl->classmeth_begin(), E = PDecl->classmeth_end();
+ I != E; ++I) {
+ if (I == PDecl->classmeth_begin())
+ Result += "\t ,{{(struct objc_selector *)\"";
+ else
+ Result += "\t ,{(struct objc_selector *)\"";
+ Result += (*I)->getSelector().getAsString();
+ std::string MethodTypeString;
+ Context->getObjCEncodingForMethodDecl((*I), MethodTypeString);
+ Result += "\", \"";
+ Result += MethodTypeString;
+ Result += "\"}\n";
+ }
+ Result += "\t }\n};\n";
+ }
+
+ // Output:
+ /* struct _objc_protocol {
+ // Objective-C 1.0 extensions
+ struct _objc_protocol_extension *isa;
+ char *protocol_name;
+ struct _objc_protocol **protocol_list;
+ struct _objc_protocol_method_list *instance_methods;
+ struct _objc_protocol_method_list *class_methods;
+ };
+ */
+ static bool objc_protocol = false;
+ if (!objc_protocol) {
+ Result += "\nstruct _objc_protocol {\n";
+ Result += "\tstruct _objc_protocol_extension *isa;\n";
+ Result += "\tchar *protocol_name;\n";
+ Result += "\tstruct _objc_protocol **protocol_list;\n";
+ Result += "\tstruct _objc_protocol_method_list *instance_methods;\n";
+ Result += "\tstruct _objc_protocol_method_list *class_methods;\n";
+ Result += "};\n";
+
+ objc_protocol = true;
+ }
+
+ Result += "\nstatic struct _objc_protocol _OBJC_PROTOCOL_";
+ Result += PDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__OBJC, __protocol\")))= "
+ "{\n\t0, \"";
+ Result += PDecl->getNameAsString();
+ Result += "\", 0, ";
+ if (PDecl->instmeth_begin() != PDecl->instmeth_end()) {
+ Result += "(struct _objc_protocol_method_list *)&_OBJC_PROTOCOL_INSTANCE_METHODS_";
+ Result += PDecl->getNameAsString();
+ Result += ", ";
+ }
+ else
+ Result += "0, ";
+ if (PDecl->classmeth_begin() != PDecl->classmeth_end()) {
+ Result += "(struct _objc_protocol_method_list *)&_OBJC_PROTOCOL_CLASS_METHODS_";
+ Result += PDecl->getNameAsString();
+ Result += "\n";
+ }
+ else
+ Result += "0\n";
+ Result += "};\n";
+
+ // Mark this protocol as having been generated.
+ if (!ObjCSynthesizedProtocols.insert(PDecl->getCanonicalDecl()).second)
+ llvm_unreachable("protocol already synthesized");
+
+}
+
+void RewriteObjCFragileABI::RewriteObjCProtocolListMetaData(
+ const ObjCList<ObjCProtocolDecl> &Protocols,
+ StringRef prefix, StringRef ClassName,
+ std::string &Result) {
+ if (Protocols.empty()) return;
+
+ for (unsigned i = 0; i != Protocols.size(); i++)
+ RewriteObjCProtocolMetaData(Protocols[i], prefix, ClassName, Result);
+
+ // Output the top lovel protocol meta-data for the class.
+ /* struct _objc_protocol_list {
+ struct _objc_protocol_list *next;
+ int protocol_count;
+ struct _objc_protocol *class_protocols[];
+ }
+ */
+ Result += "\nstatic struct {\n";
+ Result += "\tstruct _objc_protocol_list *next;\n";
+ Result += "\tint protocol_count;\n";
+ Result += "\tstruct _objc_protocol *class_protocols[";
+ Result += utostr(Protocols.size());
+ Result += "];\n} _OBJC_";
+ Result += prefix;
+ Result += "_PROTOCOLS_";
+ Result += ClassName;
+ Result += " __attribute__ ((used, section (\"__OBJC, __cat_cls_meth\")))= "
+ "{\n\t0, ";
+ Result += utostr(Protocols.size());
+ Result += "\n";
+
+ Result += "\t,{&_OBJC_PROTOCOL_";
+ Result += Protocols[0]->getNameAsString();
+ Result += " \n";
+
+ for (unsigned i = 1; i != Protocols.size(); i++) {
+ Result += "\t ,&_OBJC_PROTOCOL_";
+ Result += Protocols[i]->getNameAsString();
+ Result += "\n";
+ }
+ Result += "\t }\n};\n";
+}
+
+void RewriteObjCFragileABI::RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
+ std::string &Result) {
+ ObjCInterfaceDecl *CDecl = IDecl->getClassInterface();
+
+ // Explicitly declared @interface's are already synthesized.
+ if (CDecl->isImplicitInterfaceDecl()) {
+ // FIXME: Implementation of a class with no @interface (legacy) does not
+ // produce correct synthesis as yet.
+ RewriteObjCInternalStruct(CDecl, Result);
+ }
+
+ // Build _objc_ivar_list metadata for classes ivars if needed
+ unsigned NumIvars = !IDecl->ivar_empty()
+ ? IDecl->ivar_size()
+ : (CDecl ? CDecl->ivar_size() : 0);
+ if (NumIvars > 0) {
+ static bool objc_ivar = false;
+ if (!objc_ivar) {
+ /* struct _objc_ivar {
+ char *ivar_name;
+ char *ivar_type;
+ int ivar_offset;
+ };
+ */
+ Result += "\nstruct _objc_ivar {\n";
+ Result += "\tchar *ivar_name;\n";
+ Result += "\tchar *ivar_type;\n";
+ Result += "\tint ivar_offset;\n";
+ Result += "};\n";
+
+ objc_ivar = true;
+ }
+
+ /* struct {
+ int ivar_count;
+ struct _objc_ivar ivar_list[nIvars];
+ };
+ */
+ Result += "\nstatic struct {\n";
+ Result += "\tint ivar_count;\n";
+ Result += "\tstruct _objc_ivar ivar_list[";
+ Result += utostr(NumIvars);
+ Result += "];\n} _OBJC_INSTANCE_VARIABLES_";
+ Result += IDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__OBJC, __instance_vars\")))= "
+ "{\n\t";
+ Result += utostr(NumIvars);
+ Result += "\n";
+
+ ObjCInterfaceDecl::ivar_iterator IVI, IVE;
+ SmallVector<ObjCIvarDecl *, 8> IVars;
+ if (!IDecl->ivar_empty()) {
+ for (auto *IV : IDecl->ivars())
+ IVars.push_back(IV);
+ IVI = IDecl->ivar_begin();
+ IVE = IDecl->ivar_end();
+ } else {
+ IVI = CDecl->ivar_begin();
+ IVE = CDecl->ivar_end();
+ }
+ Result += "\t,{{\"";
+ Result += IVI->getNameAsString();
+ Result += "\", \"";
+ std::string TmpString, StrEncoding;
+ Context->getObjCEncodingForType(IVI->getType(), TmpString, *IVI);
+ QuoteDoublequotes(TmpString, StrEncoding);
+ Result += StrEncoding;
+ Result += "\", ";
+ RewriteIvarOffsetComputation(*IVI, Result);
+ Result += "}\n";
+ for (++IVI; IVI != IVE; ++IVI) {
+ Result += "\t ,{\"";
+ Result += IVI->getNameAsString();
+ Result += "\", \"";
+ std::string TmpString, StrEncoding;
+ Context->getObjCEncodingForType(IVI->getType(), TmpString, *IVI);
+ QuoteDoublequotes(TmpString, StrEncoding);
+ Result += StrEncoding;
+ Result += "\", ";
+ RewriteIvarOffsetComputation(*IVI, Result);
+ Result += "}\n";
+ }
+
+ Result += "\t }\n};\n";
+ }
+
+ // Build _objc_method_list for class's instance methods if needed
+ SmallVector<ObjCMethodDecl *, 32> InstanceMethods(IDecl->instance_methods());
+
+ // If any of our property implementations have associated getters or
+ // setters, produce metadata for them as well.
+ for (const auto *Prop : IDecl->property_impls()) {
+ if (Prop->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+ continue;
+ if (!Prop->getPropertyIvarDecl())
+ continue;
+ ObjCPropertyDecl *PD = Prop->getPropertyDecl();
+ if (!PD)
+ continue;
+ if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl())
+ if (!Getter->isDefined())
+ InstanceMethods.push_back(Getter);
+ if (PD->isReadOnly())
+ continue;
+ if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl())
+ if (!Setter->isDefined())
+ InstanceMethods.push_back(Setter);
+ }
+ RewriteObjCMethodsMetaData(InstanceMethods.begin(), InstanceMethods.end(),
+ true, "", IDecl->getName(), Result);
+
+ // Build _objc_method_list for class's class methods if needed
+ RewriteObjCMethodsMetaData(IDecl->classmeth_begin(), IDecl->classmeth_end(),
+ false, "", IDecl->getName(), Result);
+
+ // Protocols referenced in class declaration?
+ RewriteObjCProtocolListMetaData(CDecl->getReferencedProtocols(),
+ "CLASS", CDecl->getName(), Result);
+
+ // Declaration of class/meta-class metadata
+ /* struct _objc_class {
+ struct _objc_class *isa; // or const char *root_class_name when metadata
+ const char *super_class_name;
+ char *name;
+ long version;
+ long info;
+ long instance_size;
+ struct _objc_ivar_list *ivars;
+ struct _objc_method_list *methods;
+ struct objc_cache *cache;
+ struct objc_protocol_list *protocols;
+ const char *ivar_layout;
+ struct _objc_class_ext *ext;
+ };
+ */
+ static bool objc_class = false;
+ if (!objc_class) {
+ Result += "\nstruct _objc_class {\n";
+ Result += "\tstruct _objc_class *isa;\n";
+ Result += "\tconst char *super_class_name;\n";
+ Result += "\tchar *name;\n";
+ Result += "\tlong version;\n";
+ Result += "\tlong info;\n";
+ Result += "\tlong instance_size;\n";
+ Result += "\tstruct _objc_ivar_list *ivars;\n";
+ Result += "\tstruct _objc_method_list *methods;\n";
+ Result += "\tstruct objc_cache *cache;\n";
+ Result += "\tstruct _objc_protocol_list *protocols;\n";
+ Result += "\tconst char *ivar_layout;\n";
+ Result += "\tstruct _objc_class_ext *ext;\n";
+ Result += "};\n";
+ objc_class = true;
+ }
+
+ // Meta-class metadata generation.
+ ObjCInterfaceDecl *RootClass = nullptr;
+ ObjCInterfaceDecl *SuperClass = CDecl->getSuperClass();
+ while (SuperClass) {
+ RootClass = SuperClass;
+ SuperClass = SuperClass->getSuperClass();
+ }
+ SuperClass = CDecl->getSuperClass();
+
+ Result += "\nstatic struct _objc_class _OBJC_METACLASS_";
+ Result += CDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__OBJC, __meta_class\")))= "
+ "{\n\t(struct _objc_class *)\"";
+ Result += (RootClass ? RootClass->getNameAsString() : CDecl->getNameAsString());
+ Result += "\"";
+
+ if (SuperClass) {
+ Result += ", \"";
+ Result += SuperClass->getNameAsString();
+ Result += "\", \"";
+ Result += CDecl->getNameAsString();
+ Result += "\"";
+ }
+ else {
+ Result += ", 0, \"";
+ Result += CDecl->getNameAsString();
+ Result += "\"";
+ }
+ // Set 'ivars' field for root class to 0. ObjC1 runtime does not use it.
+ // 'info' field is initialized to CLS_META(2) for metaclass
+ Result += ", 0,2, sizeof(struct _objc_class), 0";
+ if (IDecl->classmeth_begin() != IDecl->classmeth_end()) {
+ Result += "\n\t, (struct _objc_method_list *)&_OBJC_CLASS_METHODS_";
+ Result += IDecl->getNameAsString();
+ Result += "\n";
+ }
+ else
+ Result += ", 0\n";
+ if (CDecl->protocol_begin() != CDecl->protocol_end()) {
+ Result += "\t,0, (struct _objc_protocol_list *)&_OBJC_CLASS_PROTOCOLS_";
+ Result += CDecl->getNameAsString();
+ Result += ",0,0\n";
+ }
+ else
+ Result += "\t,0,0,0,0\n";
+ Result += "};\n";
+
+ // class metadata generation.
+ Result += "\nstatic struct _objc_class _OBJC_CLASS_";
+ Result += CDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__OBJC, __class\")))= "
+ "{\n\t&_OBJC_METACLASS_";
+ Result += CDecl->getNameAsString();
+ if (SuperClass) {
+ Result += ", \"";
+ Result += SuperClass->getNameAsString();
+ Result += "\", \"";
+ Result += CDecl->getNameAsString();
+ Result += "\"";
+ }
+ else {
+ Result += ", 0, \"";
+ Result += CDecl->getNameAsString();
+ Result += "\"";
+ }
+ // 'info' field is initialized to CLS_CLASS(1) for class
+ Result += ", 0,1";
+ if (!ObjCSynthesizedStructs.count(CDecl))
+ Result += ",0";
+ else {
+ // class has size. Must synthesize its size.
+ Result += ",sizeof(struct ";
+ Result += CDecl->getNameAsString();
+ if (LangOpts.MicrosoftExt)
+ Result += "_IMPL";
+ Result += ")";
+ }
+ if (NumIvars > 0) {
+ Result += ", (struct _objc_ivar_list *)&_OBJC_INSTANCE_VARIABLES_";
+ Result += CDecl->getNameAsString();
+ Result += "\n\t";
+ }
+ else
+ Result += ",0";
+ if (IDecl->instmeth_begin() != IDecl->instmeth_end()) {
+ Result += ", (struct _objc_method_list *)&_OBJC_INSTANCE_METHODS_";
+ Result += CDecl->getNameAsString();
+ Result += ", 0\n\t";
+ }
+ else
+ Result += ",0,0";
+ if (CDecl->protocol_begin() != CDecl->protocol_end()) {
+ Result += ", (struct _objc_protocol_list*)&_OBJC_CLASS_PROTOCOLS_";
+ Result += CDecl->getNameAsString();
+ Result += ", 0,0\n";
+ }
+ else
+ Result += ",0,0,0\n";
+ Result += "};\n";
+}
+
+void RewriteObjCFragileABI::RewriteMetaDataIntoBuffer(std::string &Result) {
+ int ClsDefCount = ClassImplementation.size();
+ int CatDefCount = CategoryImplementation.size();
+
+ // For each implemented class, write out all its meta data.
+ for (int i = 0; i < ClsDefCount; i++)
+ RewriteObjCClassMetaData(ClassImplementation[i], Result);
+
+ // For each implemented category, write out all its meta data.
+ for (int i = 0; i < CatDefCount; i++)
+ RewriteObjCCategoryImplDecl(CategoryImplementation[i], Result);
+
+ // Write objc_symtab metadata
+ /*
+ struct _objc_symtab
+ {
+ long sel_ref_cnt;
+ SEL *refs;
+ short cls_def_cnt;
+ short cat_def_cnt;
+ void *defs[cls_def_cnt + cat_def_cnt];
+ };
+ */
+
+ Result += "\nstruct _objc_symtab {\n";
+ Result += "\tlong sel_ref_cnt;\n";
+ Result += "\tSEL *refs;\n";
+ Result += "\tshort cls_def_cnt;\n";
+ Result += "\tshort cat_def_cnt;\n";
+ Result += "\tvoid *defs[" + utostr(ClsDefCount + CatDefCount)+ "];\n";
+ Result += "};\n\n";
+
+ Result += "static struct _objc_symtab "
+ "_OBJC_SYMBOLS __attribute__((used, section (\"__OBJC, __symbols\")))= {\n";
+ Result += "\t0, 0, " + utostr(ClsDefCount)
+ + ", " + utostr(CatDefCount) + "\n";
+ for (int i = 0; i < ClsDefCount; i++) {
+ Result += "\t,&_OBJC_CLASS_";
+ Result += ClassImplementation[i]->getNameAsString();
+ Result += "\n";
+ }
+
+ for (int i = 0; i < CatDefCount; i++) {
+ Result += "\t,&_OBJC_CATEGORY_";
+ Result += CategoryImplementation[i]->getClassInterface()->getNameAsString();
+ Result += "_";
+ Result += CategoryImplementation[i]->getNameAsString();
+ Result += "\n";
+ }
+
+ Result += "};\n\n";
+
+ // Write objc_module metadata
+
+ /*
+ struct _objc_module {
+ long version;
+ long size;
+ const char *name;
+ struct _objc_symtab *symtab;
+ }
+ */
+
+ Result += "\nstruct _objc_module {\n";
+ Result += "\tlong version;\n";
+ Result += "\tlong size;\n";
+ Result += "\tconst char *name;\n";
+ Result += "\tstruct _objc_symtab *symtab;\n";
+ Result += "};\n\n";
+ Result += "static struct _objc_module "
+ "_OBJC_MODULES __attribute__ ((used, section (\"__OBJC, __module_info\")))= {\n";
+ Result += "\t" + utostr(OBJC_ABI_VERSION) +
+ ", sizeof(struct _objc_module), \"\", &_OBJC_SYMBOLS\n";
+ Result += "};\n\n";
+
+ if (LangOpts.MicrosoftExt) {
+ if (ProtocolExprDecls.size()) {
+ Result += "#pragma section(\".objc_protocol$B\",long,read,write)\n";
+ Result += "#pragma data_seg(push, \".objc_protocol$B\")\n";
+ for (ObjCProtocolDecl *ProtDecl : ProtocolExprDecls) {
+ Result += "static struct _objc_protocol *_POINTER_OBJC_PROTOCOL_";
+ Result += ProtDecl->getNameAsString();
+ Result += " = &_OBJC_PROTOCOL_";
+ Result += ProtDecl->getNameAsString();
+ Result += ";\n";
+ }
+ Result += "#pragma data_seg(pop)\n\n";
+ }
+ Result += "#pragma section(\".objc_module_info$B\",long,read,write)\n";
+ Result += "#pragma data_seg(push, \".objc_module_info$B\")\n";
+ Result += "static struct _objc_module *_POINTER_OBJC_MODULES = ";
+ Result += "&_OBJC_MODULES;\n";
+ Result += "#pragma data_seg(pop)\n\n";
+ }
+}
+
+/// RewriteObjCCategoryImplDecl - Rewrite metadata for each category
+/// implementation.
+void RewriteObjCFragileABI::RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *IDecl,
+ std::string &Result) {
+ ObjCInterfaceDecl *ClassDecl = IDecl->getClassInterface();
+ // Find category declaration for this implementation.
+ ObjCCategoryDecl *CDecl
+ = ClassDecl->FindCategoryDeclaration(IDecl->getIdentifier());
+
+ std::string FullCategoryName = ClassDecl->getNameAsString();
+ FullCategoryName += '_';
+ FullCategoryName += IDecl->getNameAsString();
+
+ // Build _objc_method_list for class's instance methods if needed
+ SmallVector<ObjCMethodDecl *, 32> InstanceMethods(IDecl->instance_methods());
+
+ // If any of our property implementations have associated getters or
+ // setters, produce metadata for them as well.
+ for (const auto *Prop : IDecl->property_impls()) {
+ if (Prop->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+ continue;
+ if (!Prop->getPropertyIvarDecl())
+ continue;
+ ObjCPropertyDecl *PD = Prop->getPropertyDecl();
+ if (!PD)
+ continue;
+ if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl())
+ InstanceMethods.push_back(Getter);
+ if (PD->isReadOnly())
+ continue;
+ if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl())
+ InstanceMethods.push_back(Setter);
+ }
+ RewriteObjCMethodsMetaData(InstanceMethods.begin(), InstanceMethods.end(),
+ true, "CATEGORY_", FullCategoryName.c_str(),
+ Result);
+
+ // Build _objc_method_list for class's class methods if needed
+ RewriteObjCMethodsMetaData(IDecl->classmeth_begin(), IDecl->classmeth_end(),
+ false, "CATEGORY_", FullCategoryName.c_str(),
+ Result);
+
+ // Protocols referenced in class declaration?
+ // Null CDecl is case of a category implementation with no category interface
+ if (CDecl)
+ RewriteObjCProtocolListMetaData(CDecl->getReferencedProtocols(), "CATEGORY",
+ FullCategoryName, Result);
+ /* struct _objc_category {
+ char *category_name;
+ char *class_name;
+ struct _objc_method_list *instance_methods;
+ struct _objc_method_list *class_methods;
+ struct _objc_protocol_list *protocols;
+ // Objective-C 1.0 extensions
+ uint32_t size; // sizeof (struct _objc_category)
+ struct _objc_property_list *instance_properties; // category's own
+ // @property decl.
+ };
+ */
+
+ static bool objc_category = false;
+ if (!objc_category) {
+ Result += "\nstruct _objc_category {\n";
+ Result += "\tchar *category_name;\n";
+ Result += "\tchar *class_name;\n";
+ Result += "\tstruct _objc_method_list *instance_methods;\n";
+ Result += "\tstruct _objc_method_list *class_methods;\n";
+ Result += "\tstruct _objc_protocol_list *protocols;\n";
+ Result += "\tunsigned int size;\n";
+ Result += "\tstruct _objc_property_list *instance_properties;\n";
+ Result += "};\n";
+ objc_category = true;
+ }
+ Result += "\nstatic struct _objc_category _OBJC_CATEGORY_";
+ Result += FullCategoryName;
+ Result += " __attribute__ ((used, section (\"__OBJC, __category\")))= {\n\t\"";
+ Result += IDecl->getNameAsString();
+ Result += "\"\n\t, \"";
+ Result += ClassDecl->getNameAsString();
+ Result += "\"\n";
+
+ if (IDecl->instmeth_begin() != IDecl->instmeth_end()) {
+ Result += "\t, (struct _objc_method_list *)"
+ "&_OBJC_CATEGORY_INSTANCE_METHODS_";
+ Result += FullCategoryName;
+ Result += "\n";
+ }
+ else
+ Result += "\t, 0\n";
+ if (IDecl->classmeth_begin() != IDecl->classmeth_end()) {
+ Result += "\t, (struct _objc_method_list *)"
+ "&_OBJC_CATEGORY_CLASS_METHODS_";
+ Result += FullCategoryName;
+ Result += "\n";
+ }
+ else
+ Result += "\t, 0\n";
+
+ if (CDecl && CDecl->protocol_begin() != CDecl->protocol_end()) {
+ Result += "\t, (struct _objc_protocol_list *)&_OBJC_CATEGORY_PROTOCOLS_";
+ Result += FullCategoryName;
+ Result += "\n";
+ }
+ else
+ Result += "\t, 0\n";
+ Result += "\t, sizeof(struct _objc_category), 0\n};\n";
+}
+
+// RewriteObjCMethodsMetaData - Rewrite methods metadata for instance or
+/// class methods.
+template<typename MethodIterator>
+void RewriteObjCFragileABI::RewriteObjCMethodsMetaData(MethodIterator MethodBegin,
+ MethodIterator MethodEnd,
+ bool IsInstanceMethod,
+ StringRef prefix,
+ StringRef ClassName,
+ std::string &Result) {
+ if (MethodBegin == MethodEnd) return;
+
+ if (!objc_impl_method) {
+ /* struct _objc_method {
+ SEL _cmd;
+ char *method_types;
+ void *_imp;
+ }
+ */
+ Result += "\nstruct _objc_method {\n";
+ Result += "\tSEL _cmd;\n";
+ Result += "\tchar *method_types;\n";
+ Result += "\tvoid *_imp;\n";
+ Result += "};\n";
+
+ objc_impl_method = true;
+ }
+
+ // Build _objc_method_list for class's methods if needed
+
+ /* struct {
+ struct _objc_method_list *next_method;
+ int method_count;
+ struct _objc_method method_list[];
+ }
+ */
+ unsigned NumMethods = std::distance(MethodBegin, MethodEnd);
+ Result += "\nstatic struct {\n";
+ Result += "\tstruct _objc_method_list *next_method;\n";
+ Result += "\tint method_count;\n";
+ Result += "\tstruct _objc_method method_list[";
+ Result += utostr(NumMethods);
+ Result += "];\n} _OBJC_";
+ Result += prefix;
+ Result += IsInstanceMethod ? "INSTANCE" : "CLASS";
+ Result += "_METHODS_";
+ Result += ClassName;
+ Result += " __attribute__ ((used, section (\"__OBJC, __";
+ Result += IsInstanceMethod ? "inst" : "cls";
+ Result += "_meth\")))= ";
+ Result += "{\n\t0, " + utostr(NumMethods) + "\n";
+
+ Result += "\t,{{(SEL)\"";
+ Result += (*MethodBegin)->getSelector().getAsString().c_str();
+ std::string MethodTypeString;
+ Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString);
+ Result += "\", \"";
+ Result += MethodTypeString;
+ Result += "\", (void *)";
+ Result += MethodInternalNames[*MethodBegin];
+ Result += "}\n";
+ for (++MethodBegin; MethodBegin != MethodEnd; ++MethodBegin) {
+ Result += "\t ,{(SEL)\"";
+ Result += (*MethodBegin)->getSelector().getAsString().c_str();
+ std::string MethodTypeString;
+ Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString);
+ Result += "\", \"";
+ Result += MethodTypeString;
+ Result += "\", (void *)";
+ Result += MethodInternalNames[*MethodBegin];
+ Result += "}\n";
+ }
+ Result += "\t }\n};\n";
+}
+
+Stmt *RewriteObjCFragileABI::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
+ SourceRange OldRange = IV->getSourceRange();
+ Expr *BaseExpr = IV->getBase();
+
+ // Rewrite the base, but without actually doing replaces.
+ {
+ DisableReplaceStmtScope S(*this);
+ BaseExpr = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(BaseExpr));
+ IV->setBase(BaseExpr);
+ }
+
+ ObjCIvarDecl *D = IV->getDecl();
+
+ Expr *Replacement = IV;
+ if (CurMethodDef) {
+ if (BaseExpr->getType()->isObjCObjectPointerType()) {
+ const ObjCInterfaceType *iFaceDecl =
+ dyn_cast<ObjCInterfaceType>(BaseExpr->getType()->getPointeeType());
+ assert(iFaceDecl && "RewriteObjCIvarRefExpr - iFaceDecl is null");
+ // lookup which class implements the instance variable.
+ ObjCInterfaceDecl *clsDeclared = nullptr;
+ iFaceDecl->getDecl()->lookupInstanceVariable(D->getIdentifier(),
+ clsDeclared);
+ assert(clsDeclared && "RewriteObjCIvarRefExpr(): Can't find class");
+
+ // Synthesize an explicit cast to gain access to the ivar.
+ std::string RecName = clsDeclared->getIdentifier()->getName();
+ RecName += "_IMPL";
+ IdentifierInfo *II = &Context->Idents.get(RecName);
+ RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ II);
+ assert(RD && "RewriteObjCIvarRefExpr(): Can't find RecordDecl");
+ QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
+ CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, castT,
+ CK_BitCast,
+ IV->getBase());
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(OldRange.getBegin(),
+ OldRange.getEnd(),
+ castExpr);
+ if (IV->isFreeIvar() &&
+ declaresSameEntity(CurMethodDef->getClassInterface(), iFaceDecl->getDecl())) {
+ MemberExpr *ME = new (Context)
+ MemberExpr(PE, true, SourceLocation(), D, IV->getLocation(),
+ D->getType(), VK_LValue, OK_Ordinary);
+ Replacement = ME;
+ } else {
+ IV->setBase(PE);
+ }
+ }
+ } else { // we are outside a method.
+ assert(!IV->isFreeIvar() && "Cannot have a free standing ivar outside a method");
+
+ // Explicit ivar refs need to have a cast inserted.
+ // FIXME: consider sharing some of this code with the code above.
+ if (BaseExpr->getType()->isObjCObjectPointerType()) {
+ const ObjCInterfaceType *iFaceDecl =
+ dyn_cast<ObjCInterfaceType>(BaseExpr->getType()->getPointeeType());
+ // lookup which class implements the instance variable.
+ ObjCInterfaceDecl *clsDeclared = nullptr;
+ iFaceDecl->getDecl()->lookupInstanceVariable(D->getIdentifier(),
+ clsDeclared);
+ assert(clsDeclared && "RewriteObjCIvarRefExpr(): Can't find class");
+
+ // Synthesize an explicit cast to gain access to the ivar.
+ std::string RecName = clsDeclared->getIdentifier()->getName();
+ RecName += "_IMPL";
+ IdentifierInfo *II = &Context->Idents.get(RecName);
+ RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ II);
+ assert(RD && "RewriteObjCIvarRefExpr(): Can't find RecordDecl");
+ QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
+ CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, castT,
+ CK_BitCast,
+ IV->getBase());
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(IV->getBase()->getLocStart(),
+ IV->getBase()->getLocEnd(), castExpr);
+ // Cannot delete IV->getBase(), since PE points to it.
+ // Replace the old base with the cast. This is important when doing
+ // embedded rewrites. For example, [newInv->_container addObject:0].
+ IV->setBase(PE);
+ }
+ }
+
+ ReplaceStmtWithRange(IV, Replacement, OldRange);
+ return Replacement;
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Frontend/Rewrite/RewriteTest.cpp b/contrib/llvm/tools/clang/lib/Frontend/Rewrite/RewriteTest.cpp
new file mode 100644
index 0000000..722c5e8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/Rewrite/RewriteTest.cpp
@@ -0,0 +1,39 @@
+//===--- RewriteTest.cpp - Rewriter playground ----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a testbed.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/Frontend/Rewriters.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Rewrite/Core/TokenRewriter.h"
+#include "llvm/Support/raw_ostream.h"
+
+void clang::DoRewriteTest(Preprocessor &PP, raw_ostream* OS) {
+ SourceManager &SM = PP.getSourceManager();
+ const LangOptions &LangOpts = PP.getLangOpts();
+
+ TokenRewriter Rewriter(SM.getMainFileID(), SM, LangOpts);
+
+ // Throw <i> </i> tags around comments.
+ for (TokenRewriter::token_iterator I = Rewriter.token_begin(),
+ E = Rewriter.token_end(); I != E; ++I) {
+ if (I->isNot(tok::comment)) continue;
+
+ Rewriter.AddTokenBefore(I, "<i>");
+ Rewriter.AddTokenAfter(I, "</i>");
+ }
+
+
+ // Print out the output.
+ for (TokenRewriter::token_iterator I = Rewriter.token_begin(),
+ E = Rewriter.token_end(); I != E; ++I)
+ *OS << PP.getSpelling(*I);
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp b/contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
new file mode 100644
index 0000000..1bf10d2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
@@ -0,0 +1,886 @@
+//===--- SerializedDiagnosticPrinter.cpp - Serializer for diagnostics -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/SerializedDiagnosticPrinter.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/Version.h"
+#include "clang/Frontend/DiagnosticRenderer.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/SerializedDiagnosticReader.h"
+#include "clang/Frontend/SerializedDiagnostics.h"
+#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "clang/Lex/Lexer.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/raw_ostream.h"
+#include <vector>
+
+using namespace clang;
+using namespace clang::serialized_diags;
+
+namespace {
+
+class AbbreviationMap {
+ llvm::DenseMap<unsigned, unsigned> Abbrevs;
+public:
+ AbbreviationMap() {}
+
+ void set(unsigned recordID, unsigned abbrevID) {
+ assert(Abbrevs.find(recordID) == Abbrevs.end()
+ && "Abbreviation already set.");
+ Abbrevs[recordID] = abbrevID;
+ }
+
+ unsigned get(unsigned recordID) {
+ assert(Abbrevs.find(recordID) != Abbrevs.end() &&
+ "Abbreviation not set.");
+ return Abbrevs[recordID];
+ }
+};
+
+typedef SmallVector<uint64_t, 64> RecordData;
+typedef SmallVectorImpl<uint64_t> RecordDataImpl;
+typedef ArrayRef<uint64_t> RecordDataRef;
+
+class SDiagsWriter;
+
+class SDiagsRenderer : public DiagnosticNoteRenderer {
+ SDiagsWriter &Writer;
+public:
+ SDiagsRenderer(SDiagsWriter &Writer, const LangOptions &LangOpts,
+ DiagnosticOptions *DiagOpts)
+ : DiagnosticNoteRenderer(LangOpts, DiagOpts), Writer(Writer) {}
+
+ ~SDiagsRenderer() override {}
+
+protected:
+ void emitDiagnosticMessage(SourceLocation Loc,
+ PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ StringRef Message,
+ ArrayRef<CharSourceRange> Ranges,
+ const SourceManager *SM,
+ DiagOrStoredDiag D) override;
+
+ void emitDiagnosticLoc(SourceLocation Loc, PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ ArrayRef<CharSourceRange> Ranges,
+ const SourceManager &SM) override {}
+
+ void emitNote(SourceLocation Loc, StringRef Message,
+ const SourceManager *SM) override;
+
+ void emitCodeContext(SourceLocation Loc,
+ DiagnosticsEngine::Level Level,
+ SmallVectorImpl<CharSourceRange>& Ranges,
+ ArrayRef<FixItHint> Hints,
+ const SourceManager &SM) override;
+
+ void beginDiagnostic(DiagOrStoredDiag D,
+ DiagnosticsEngine::Level Level) override;
+ void endDiagnostic(DiagOrStoredDiag D,
+ DiagnosticsEngine::Level Level) override;
+};
+
+typedef llvm::DenseMap<unsigned, unsigned> AbbrevLookup;
+
+class SDiagsMerger : SerializedDiagnosticReader {
+ SDiagsWriter &Writer;
+ AbbrevLookup FileLookup;
+ AbbrevLookup CategoryLookup;
+ AbbrevLookup DiagFlagLookup;
+
+public:
+ SDiagsMerger(SDiagsWriter &Writer)
+ : SerializedDiagnosticReader(), Writer(Writer) {}
+
+ std::error_code mergeRecordsFromFile(const char *File) {
+ return readDiagnostics(File);
+ }
+
+protected:
+ std::error_code visitStartOfDiagnostic() override;
+ std::error_code visitEndOfDiagnostic() override;
+ std::error_code visitCategoryRecord(unsigned ID, StringRef Name) override;
+ std::error_code visitDiagFlagRecord(unsigned ID, StringRef Name) override;
+ std::error_code visitDiagnosticRecord(
+ unsigned Severity, const serialized_diags::Location &Location,
+ unsigned Category, unsigned Flag, StringRef Message) override;
+ std::error_code visitFilenameRecord(unsigned ID, unsigned Size,
+ unsigned Timestamp,
+ StringRef Name) override;
+ std::error_code visitFixitRecord(const serialized_diags::Location &Start,
+ const serialized_diags::Location &End,
+ StringRef CodeToInsert) override;
+ std::error_code
+ visitSourceRangeRecord(const serialized_diags::Location &Start,
+ const serialized_diags::Location &End) override;
+
+private:
+ std::error_code adjustSourceLocFilename(RecordData &Record,
+ unsigned int offset);
+
+ void adjustAbbrevID(RecordData &Record, AbbrevLookup &Lookup,
+ unsigned NewAbbrev);
+
+ void writeRecordWithAbbrev(unsigned ID, RecordData &Record);
+
+ void writeRecordWithBlob(unsigned ID, RecordData &Record, StringRef Blob);
+};
+
+class SDiagsWriter : public DiagnosticConsumer {
+ friend class SDiagsRenderer;
+ friend class SDiagsMerger;
+
+ struct SharedState;
+
+ explicit SDiagsWriter(IntrusiveRefCntPtr<SharedState> State)
+ : LangOpts(nullptr), OriginalInstance(false), MergeChildRecords(false),
+ State(State) {}
+
+public:
+ SDiagsWriter(StringRef File, DiagnosticOptions *Diags, bool MergeChildRecords)
+ : LangOpts(nullptr), OriginalInstance(true),
+ MergeChildRecords(MergeChildRecords),
+ State(new SharedState(File, Diags)) {
+ if (MergeChildRecords)
+ RemoveOldDiagnostics();
+ EmitPreamble();
+ }
+
+ ~SDiagsWriter() override {}
+
+ void HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
+ const Diagnostic &Info) override;
+
+ void BeginSourceFile(const LangOptions &LO, const Preprocessor *PP) override {
+ LangOpts = &LO;
+ }
+
+ void finish() override;
+
+private:
+ /// \brief Build a DiagnosticsEngine to emit diagnostics about the diagnostics
+ DiagnosticsEngine *getMetaDiags();
+
+ /// \brief Remove old copies of the serialized diagnostics. This is necessary
+ /// so that we can detect when subprocesses write diagnostics that we should
+ /// merge into our own.
+ void RemoveOldDiagnostics();
+
+ /// \brief Emit the preamble for the serialized diagnostics.
+ void EmitPreamble();
+
+ /// \brief Emit the BLOCKINFO block.
+ void EmitBlockInfoBlock();
+
+ /// \brief Emit the META data block.
+ void EmitMetaBlock();
+
+ /// \brief Start a DIAG block.
+ void EnterDiagBlock();
+
+ /// \brief End a DIAG block.
+ void ExitDiagBlock();
+
+ /// \brief Emit a DIAG record.
+ void EmitDiagnosticMessage(SourceLocation Loc,
+ PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ StringRef Message,
+ const SourceManager *SM,
+ DiagOrStoredDiag D);
+
+ /// \brief Emit FIXIT and SOURCE_RANGE records for a diagnostic.
+ void EmitCodeContext(SmallVectorImpl<CharSourceRange> &Ranges,
+ ArrayRef<FixItHint> Hints,
+ const SourceManager &SM);
+
+ /// \brief Emit a record for a CharSourceRange.
+ void EmitCharSourceRange(CharSourceRange R, const SourceManager &SM);
+
+ /// \brief Emit the string information for the category.
+ unsigned getEmitCategory(unsigned category = 0);
+
+ /// \brief Emit the string information for diagnostic flags.
+ unsigned getEmitDiagnosticFlag(DiagnosticsEngine::Level DiagLevel,
+ unsigned DiagID = 0);
+
+ unsigned getEmitDiagnosticFlag(StringRef DiagName);
+
+ /// \brief Emit (lazily) the file string and retrieved the file identifier.
+ unsigned getEmitFile(const char *Filename);
+
+ /// \brief Add SourceLocation information the specified record.
+ void AddLocToRecord(SourceLocation Loc, const SourceManager *SM,
+ PresumedLoc PLoc, RecordDataImpl &Record,
+ unsigned TokSize = 0);
+
+ /// \brief Add SourceLocation information the specified record.
+ void AddLocToRecord(SourceLocation Loc, RecordDataImpl &Record,
+ const SourceManager *SM,
+ unsigned TokSize = 0) {
+ AddLocToRecord(Loc, SM, SM ? SM->getPresumedLoc(Loc) : PresumedLoc(),
+ Record, TokSize);
+ }
+
+ /// \brief Add CharSourceRange information the specified record.
+ void AddCharSourceRangeToRecord(CharSourceRange R, RecordDataImpl &Record,
+ const SourceManager &SM);
+
+ /// \brief Language options, which can differ from one clone of this client
+ /// to another.
+ const LangOptions *LangOpts;
+
+ /// \brief Whether this is the original instance (rather than one of its
+ /// clones), responsible for writing the file at the end.
+ bool OriginalInstance;
+
+ /// \brief Whether this instance should aggregate diagnostics that are
+ /// generated from child processes.
+ bool MergeChildRecords;
+
+ /// \brief State that is shared among the various clones of this diagnostic
+ /// consumer.
+ struct SharedState : RefCountedBase<SharedState> {
+ SharedState(StringRef File, DiagnosticOptions *Diags)
+ : DiagOpts(Diags), Stream(Buffer), OutputFile(File.str()),
+ EmittedAnyDiagBlocks(false) {}
+
+ /// \brief Diagnostic options.
+ IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts;
+
+ /// \brief The byte buffer for the serialized content.
+ SmallString<1024> Buffer;
+
+ /// \brief The BitStreamWriter for the serialized diagnostics.
+ llvm::BitstreamWriter Stream;
+
+ /// \brief The name of the diagnostics file.
+ std::string OutputFile;
+
+ /// \brief The set of constructed record abbreviations.
+ AbbreviationMap Abbrevs;
+
+ /// \brief A utility buffer for constructing record content.
+ RecordData Record;
+
+ /// \brief A text buffer for rendering diagnostic text.
+ SmallString<256> diagBuf;
+
+ /// \brief The collection of diagnostic categories used.
+ llvm::DenseSet<unsigned> Categories;
+
+ /// \brief The collection of files used.
+ llvm::DenseMap<const char *, unsigned> Files;
+
+ typedef llvm::DenseMap<const void *, std::pair<unsigned, StringRef> >
+ DiagFlagsTy;
+
+ /// \brief Map for uniquing strings.
+ DiagFlagsTy DiagFlags;
+
+ /// \brief Whether we have already started emission of any DIAG blocks. Once
+ /// this becomes \c true, we never close a DIAG block until we know that we're
+ /// starting another one or we're done.
+ bool EmittedAnyDiagBlocks;
+
+ /// \brief Engine for emitting diagnostics about the diagnostics.
+ std::unique_ptr<DiagnosticsEngine> MetaDiagnostics;
+ };
+
+ /// \brief State shared among the various clones of this diagnostic consumer.
+ IntrusiveRefCntPtr<SharedState> State;
+};
+} // end anonymous namespace
+
+namespace clang {
+namespace serialized_diags {
+std::unique_ptr<DiagnosticConsumer>
+create(StringRef OutputFile, DiagnosticOptions *Diags, bool MergeChildRecords) {
+ return llvm::make_unique<SDiagsWriter>(OutputFile, Diags, MergeChildRecords);
+}
+
+} // end namespace serialized_diags
+} // end namespace clang
+
+//===----------------------------------------------------------------------===//
+// Serialization methods.
+//===----------------------------------------------------------------------===//
+
+/// \brief Emits a block ID in the BLOCKINFO block.
+static void EmitBlockID(unsigned ID, const char *Name,
+ llvm::BitstreamWriter &Stream,
+ RecordDataImpl &Record) {
+ Record.clear();
+ Record.push_back(ID);
+ Stream.EmitRecord(llvm::bitc::BLOCKINFO_CODE_SETBID, Record);
+
+ // Emit the block name if present.
+ if (!Name || Name[0] == 0)
+ return;
+
+ Record.clear();
+
+ while (*Name)
+ Record.push_back(*Name++);
+
+ Stream.EmitRecord(llvm::bitc::BLOCKINFO_CODE_BLOCKNAME, Record);
+}
+
+/// \brief Emits a record ID in the BLOCKINFO block.
+static void EmitRecordID(unsigned ID, const char *Name,
+ llvm::BitstreamWriter &Stream,
+ RecordDataImpl &Record){
+ Record.clear();
+ Record.push_back(ID);
+
+ while (*Name)
+ Record.push_back(*Name++);
+
+ Stream.EmitRecord(llvm::bitc::BLOCKINFO_CODE_SETRECORDNAME, Record);
+}
+
+void SDiagsWriter::AddLocToRecord(SourceLocation Loc,
+ const SourceManager *SM,
+ PresumedLoc PLoc,
+ RecordDataImpl &Record,
+ unsigned TokSize) {
+ if (PLoc.isInvalid()) {
+ // Emit a "sentinel" location.
+ Record.push_back((unsigned)0); // File.
+ Record.push_back((unsigned)0); // Line.
+ Record.push_back((unsigned)0); // Column.
+ Record.push_back((unsigned)0); // Offset.
+ return;
+ }
+
+ Record.push_back(getEmitFile(PLoc.getFilename()));
+ Record.push_back(PLoc.getLine());
+ Record.push_back(PLoc.getColumn()+TokSize);
+ Record.push_back(SM->getFileOffset(Loc));
+}
+
+void SDiagsWriter::AddCharSourceRangeToRecord(CharSourceRange Range,
+ RecordDataImpl &Record,
+ const SourceManager &SM) {
+ AddLocToRecord(Range.getBegin(), Record, &SM);
+ unsigned TokSize = 0;
+ if (Range.isTokenRange())
+ TokSize = Lexer::MeasureTokenLength(Range.getEnd(),
+ SM, *LangOpts);
+
+ AddLocToRecord(Range.getEnd(), Record, &SM, TokSize);
+}
+
+unsigned SDiagsWriter::getEmitFile(const char *FileName){
+ if (!FileName)
+ return 0;
+
+ unsigned &entry = State->Files[FileName];
+ if (entry)
+ return entry;
+
+ // Lazily generate the record for the file.
+ entry = State->Files.size();
+ StringRef Name(FileName);
+ RecordData::value_type Record[] = {RECORD_FILENAME, entry, 0 /* For legacy */,
+ 0 /* For legacy */, Name.size()};
+ State->Stream.EmitRecordWithBlob(State->Abbrevs.get(RECORD_FILENAME), Record,
+ Name);
+
+ return entry;
+}
+
+void SDiagsWriter::EmitCharSourceRange(CharSourceRange R,
+ const SourceManager &SM) {
+ State->Record.clear();
+ State->Record.push_back(RECORD_SOURCE_RANGE);
+ AddCharSourceRangeToRecord(R, State->Record, SM);
+ State->Stream.EmitRecordWithAbbrev(State->Abbrevs.get(RECORD_SOURCE_RANGE),
+ State->Record);
+}
+
+/// \brief Emits the preamble of the diagnostics file.
+void SDiagsWriter::EmitPreamble() {
+ // Emit the file header.
+ State->Stream.Emit((unsigned)'D', 8);
+ State->Stream.Emit((unsigned)'I', 8);
+ State->Stream.Emit((unsigned)'A', 8);
+ State->Stream.Emit((unsigned)'G', 8);
+
+ EmitBlockInfoBlock();
+ EmitMetaBlock();
+}
+
+static void AddSourceLocationAbbrev(llvm::BitCodeAbbrev *Abbrev) {
+ using namespace llvm;
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 10)); // File ID.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // Line.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // Column.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // Offset;
+}
+
+static void AddRangeLocationAbbrev(llvm::BitCodeAbbrev *Abbrev) {
+ AddSourceLocationAbbrev(Abbrev);
+ AddSourceLocationAbbrev(Abbrev);
+}
+
+void SDiagsWriter::EmitBlockInfoBlock() {
+ State->Stream.EnterBlockInfoBlock(3);
+
+ using namespace llvm;
+ llvm::BitstreamWriter &Stream = State->Stream;
+ RecordData &Record = State->Record;
+ AbbreviationMap &Abbrevs = State->Abbrevs;
+
+ // ==---------------------------------------------------------------------==//
+ // The subsequent records and Abbrevs are for the "Meta" block.
+ // ==---------------------------------------------------------------------==//
+
+ EmitBlockID(BLOCK_META, "Meta", Stream, Record);
+ EmitRecordID(RECORD_VERSION, "Version", Stream, Record);
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(RECORD_VERSION));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbrevs.set(RECORD_VERSION, Stream.EmitBlockInfoAbbrev(BLOCK_META, Abbrev));
+
+ // ==---------------------------------------------------------------------==//
+ // The subsequent records and Abbrevs are for the "Diagnostic" block.
+ // ==---------------------------------------------------------------------==//
+
+ EmitBlockID(BLOCK_DIAG, "Diag", Stream, Record);
+ EmitRecordID(RECORD_DIAG, "DiagInfo", Stream, Record);
+ EmitRecordID(RECORD_SOURCE_RANGE, "SrcRange", Stream, Record);
+ EmitRecordID(RECORD_CATEGORY, "CatName", Stream, Record);
+ EmitRecordID(RECORD_DIAG_FLAG, "DiagFlag", Stream, Record);
+ EmitRecordID(RECORD_FILENAME, "FileName", Stream, Record);
+ EmitRecordID(RECORD_FIXIT, "FixIt", Stream, Record);
+
+ // Emit abbreviation for RECORD_DIAG.
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(RECORD_DIAG));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // Diag level.
+ AddSourceLocationAbbrev(Abbrev);
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 10)); // Category.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 10)); // Mapped Diag ID.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 16)); // Text size.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Diagnostc text.
+ Abbrevs.set(RECORD_DIAG, Stream.EmitBlockInfoAbbrev(BLOCK_DIAG, Abbrev));
+
+ // Emit abbrevation for RECORD_CATEGORY.
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(RECORD_CATEGORY));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Category ID.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8)); // Text size.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Category text.
+ Abbrevs.set(RECORD_CATEGORY, Stream.EmitBlockInfoAbbrev(BLOCK_DIAG, Abbrev));
+
+ // Emit abbrevation for RECORD_SOURCE_RANGE.
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(RECORD_SOURCE_RANGE));
+ AddRangeLocationAbbrev(Abbrev);
+ Abbrevs.set(RECORD_SOURCE_RANGE,
+ Stream.EmitBlockInfoAbbrev(BLOCK_DIAG, Abbrev));
+
+ // Emit the abbreviation for RECORD_DIAG_FLAG.
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(RECORD_DIAG_FLAG));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 10)); // Mapped Diag ID.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Text size.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Flag name text.
+ Abbrevs.set(RECORD_DIAG_FLAG, Stream.EmitBlockInfoAbbrev(BLOCK_DIAG,
+ Abbrev));
+
+ // Emit the abbreviation for RECORD_FILENAME.
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(RECORD_FILENAME));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 10)); // Mapped file ID.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // Size.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // Modifcation time.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Text size.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // File name text.
+ Abbrevs.set(RECORD_FILENAME, Stream.EmitBlockInfoAbbrev(BLOCK_DIAG,
+ Abbrev));
+
+ // Emit the abbreviation for RECORD_FIXIT.
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(RECORD_FIXIT));
+ AddRangeLocationAbbrev(Abbrev);
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Text size.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // FixIt text.
+ Abbrevs.set(RECORD_FIXIT, Stream.EmitBlockInfoAbbrev(BLOCK_DIAG,
+ Abbrev));
+
+ Stream.ExitBlock();
+}
+
+void SDiagsWriter::EmitMetaBlock() {
+ llvm::BitstreamWriter &Stream = State->Stream;
+ AbbreviationMap &Abbrevs = State->Abbrevs;
+
+ Stream.EnterSubblock(BLOCK_META, 3);
+ RecordData::value_type Record[] = {RECORD_VERSION, VersionNumber};
+ Stream.EmitRecordWithAbbrev(Abbrevs.get(RECORD_VERSION), Record);
+ Stream.ExitBlock();
+}
+
+unsigned SDiagsWriter::getEmitCategory(unsigned int category) {
+ if (!State->Categories.insert(category).second)
+ return category;
+
+ // We use a local version of 'Record' so that we can be generating
+ // another record when we lazily generate one for the category entry.
+ StringRef catName = DiagnosticIDs::getCategoryNameFromID(category);
+ RecordData::value_type Record[] = {RECORD_CATEGORY, category, catName.size()};
+ State->Stream.EmitRecordWithBlob(State->Abbrevs.get(RECORD_CATEGORY), Record,
+ catName);
+
+ return category;
+}
+
+unsigned SDiagsWriter::getEmitDiagnosticFlag(DiagnosticsEngine::Level DiagLevel,
+ unsigned DiagID) {
+ if (DiagLevel == DiagnosticsEngine::Note)
+ return 0; // No flag for notes.
+
+ StringRef FlagName = DiagnosticIDs::getWarningOptionForDiag(DiagID);
+ return getEmitDiagnosticFlag(FlagName);
+}
+
+unsigned SDiagsWriter::getEmitDiagnosticFlag(StringRef FlagName) {
+ if (FlagName.empty())
+ return 0;
+
+ // Here we assume that FlagName points to static data whose pointer
+ // value is fixed. This allows us to unique by diagnostic groups.
+ const void *data = FlagName.data();
+ std::pair<unsigned, StringRef> &entry = State->DiagFlags[data];
+ if (entry.first == 0) {
+ entry.first = State->DiagFlags.size();
+ entry.second = FlagName;
+
+ // Lazily emit the string in a separate record.
+ RecordData::value_type Record[] = {RECORD_DIAG_FLAG, entry.first,
+ FlagName.size()};
+ State->Stream.EmitRecordWithBlob(State->Abbrevs.get(RECORD_DIAG_FLAG),
+ Record, FlagName);
+ }
+
+ return entry.first;
+}
+
+void SDiagsWriter::HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
+ const Diagnostic &Info) {
+ // Enter the block for a non-note diagnostic immediately, rather than waiting
+ // for beginDiagnostic, in case associated notes are emitted before we get
+ // there.
+ if (DiagLevel != DiagnosticsEngine::Note) {
+ if (State->EmittedAnyDiagBlocks)
+ ExitDiagBlock();
+
+ EnterDiagBlock();
+ State->EmittedAnyDiagBlocks = true;
+ }
+
+ // Compute the diagnostic text.
+ State->diagBuf.clear();
+ Info.FormatDiagnostic(State->diagBuf);
+
+ if (Info.getLocation().isInvalid()) {
+ // Special-case diagnostics with no location. We may not have entered a
+ // source file in this case, so we can't use the normal DiagnosticsRenderer
+ // machinery.
+
+ // Make sure we bracket all notes as "sub-diagnostics". This matches
+ // the behavior in SDiagsRenderer::emitDiagnostic().
+ if (DiagLevel == DiagnosticsEngine::Note)
+ EnterDiagBlock();
+
+ EmitDiagnosticMessage(SourceLocation(), PresumedLoc(), DiagLevel,
+ State->diagBuf, nullptr, &Info);
+
+ if (DiagLevel == DiagnosticsEngine::Note)
+ ExitDiagBlock();
+
+ return;
+ }
+
+ assert(Info.hasSourceManager() && LangOpts &&
+ "Unexpected diagnostic with valid location outside of a source file");
+ SDiagsRenderer Renderer(*this, *LangOpts, &*State->DiagOpts);
+ Renderer.emitDiagnostic(Info.getLocation(), DiagLevel,
+ State->diagBuf,
+ Info.getRanges(),
+ Info.getFixItHints(),
+ &Info.getSourceManager(),
+ &Info);
+}
+
+static serialized_diags::Level getStableLevel(DiagnosticsEngine::Level Level) {
+ switch (Level) {
+#define CASE(X) case DiagnosticsEngine::X: return serialized_diags::X;
+ CASE(Ignored)
+ CASE(Note)
+ CASE(Remark)
+ CASE(Warning)
+ CASE(Error)
+ CASE(Fatal)
+#undef CASE
+ }
+
+ llvm_unreachable("invalid diagnostic level");
+}
+
+void SDiagsWriter::EmitDiagnosticMessage(SourceLocation Loc,
+ PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ StringRef Message,
+ const SourceManager *SM,
+ DiagOrStoredDiag D) {
+ llvm::BitstreamWriter &Stream = State->Stream;
+ RecordData &Record = State->Record;
+ AbbreviationMap &Abbrevs = State->Abbrevs;
+
+ // Emit the RECORD_DIAG record.
+ Record.clear();
+ Record.push_back(RECORD_DIAG);
+ Record.push_back(getStableLevel(Level));
+ AddLocToRecord(Loc, SM, PLoc, Record);
+
+ if (const Diagnostic *Info = D.dyn_cast<const Diagnostic*>()) {
+ // Emit the category string lazily and get the category ID.
+ unsigned DiagID = DiagnosticIDs::getCategoryNumberForDiag(Info->getID());
+ Record.push_back(getEmitCategory(DiagID));
+ // Emit the diagnostic flag string lazily and get the mapped ID.
+ Record.push_back(getEmitDiagnosticFlag(Level, Info->getID()));
+ } else {
+ Record.push_back(getEmitCategory());
+ Record.push_back(getEmitDiagnosticFlag(Level));
+ }
+
+ Record.push_back(Message.size());
+ Stream.EmitRecordWithBlob(Abbrevs.get(RECORD_DIAG), Record, Message);
+}
+
+void
+SDiagsRenderer::emitDiagnosticMessage(SourceLocation Loc,
+ PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ StringRef Message,
+ ArrayRef<clang::CharSourceRange> Ranges,
+ const SourceManager *SM,
+ DiagOrStoredDiag D) {
+ Writer.EmitDiagnosticMessage(Loc, PLoc, Level, Message, SM, D);
+}
+
+void SDiagsWriter::EnterDiagBlock() {
+ State->Stream.EnterSubblock(BLOCK_DIAG, 4);
+}
+
+void SDiagsWriter::ExitDiagBlock() {
+ State->Stream.ExitBlock();
+}
+
+void SDiagsRenderer::beginDiagnostic(DiagOrStoredDiag D,
+ DiagnosticsEngine::Level Level) {
+ if (Level == DiagnosticsEngine::Note)
+ Writer.EnterDiagBlock();
+}
+
+void SDiagsRenderer::endDiagnostic(DiagOrStoredDiag D,
+ DiagnosticsEngine::Level Level) {
+ // Only end note diagnostics here, because we can't be sure when we've seen
+ // the last note associated with a non-note diagnostic.
+ if (Level == DiagnosticsEngine::Note)
+ Writer.ExitDiagBlock();
+}
+
+void SDiagsWriter::EmitCodeContext(SmallVectorImpl<CharSourceRange> &Ranges,
+ ArrayRef<FixItHint> Hints,
+ const SourceManager &SM) {
+ llvm::BitstreamWriter &Stream = State->Stream;
+ RecordData &Record = State->Record;
+ AbbreviationMap &Abbrevs = State->Abbrevs;
+
+ // Emit Source Ranges.
+ for (ArrayRef<CharSourceRange>::iterator I = Ranges.begin(), E = Ranges.end();
+ I != E; ++I)
+ if (I->isValid())
+ EmitCharSourceRange(*I, SM);
+
+ // Emit FixIts.
+ for (ArrayRef<FixItHint>::iterator I = Hints.begin(), E = Hints.end();
+ I != E; ++I) {
+ const FixItHint &Fix = *I;
+ if (Fix.isNull())
+ continue;
+ Record.clear();
+ Record.push_back(RECORD_FIXIT);
+ AddCharSourceRangeToRecord(Fix.RemoveRange, Record, SM);
+ Record.push_back(Fix.CodeToInsert.size());
+ Stream.EmitRecordWithBlob(Abbrevs.get(RECORD_FIXIT), Record,
+ Fix.CodeToInsert);
+ }
+}
+
+void SDiagsRenderer::emitCodeContext(SourceLocation Loc,
+ DiagnosticsEngine::Level Level,
+ SmallVectorImpl<CharSourceRange> &Ranges,
+ ArrayRef<FixItHint> Hints,
+ const SourceManager &SM) {
+ Writer.EmitCodeContext(Ranges, Hints, SM);
+}
+
+void SDiagsRenderer::emitNote(SourceLocation Loc, StringRef Message,
+ const SourceManager *SM) {
+ Writer.EnterDiagBlock();
+ PresumedLoc PLoc = SM ? SM->getPresumedLoc(Loc) : PresumedLoc();
+ Writer.EmitDiagnosticMessage(Loc, PLoc, DiagnosticsEngine::Note,
+ Message, SM, DiagOrStoredDiag());
+ Writer.ExitDiagBlock();
+}
+
+DiagnosticsEngine *SDiagsWriter::getMetaDiags() {
+ // FIXME: It's slightly absurd to create a new diagnostics engine here, but
+ // the other options that are available today are worse:
+ //
+ // 1. Teach DiagnosticsConsumers to emit diagnostics to the engine they are a
+ // part of. The DiagnosticsEngine would need to know not to send
+ // diagnostics back to the consumer that failed. This would require us to
+ // rework ChainedDiagnosticsConsumer and teach the engine about multiple
+ // consumers, which is difficult today because most APIs interface with
+ // consumers rather than the engine itself.
+ //
+ // 2. Pass a DiagnosticsEngine to SDiagsWriter on creation - this would need
+ // to be distinct from the engine the writer was being added to and would
+ // normally not be used.
+ if (!State->MetaDiagnostics) {
+ IntrusiveRefCntPtr<DiagnosticIDs> IDs(new DiagnosticIDs());
+ auto Client =
+ new TextDiagnosticPrinter(llvm::errs(), State->DiagOpts.get());
+ State->MetaDiagnostics = llvm::make_unique<DiagnosticsEngine>(
+ IDs, State->DiagOpts.get(), Client);
+ }
+ return State->MetaDiagnostics.get();
+}
+
+void SDiagsWriter::RemoveOldDiagnostics() {
+ if (!llvm::sys::fs::remove(State->OutputFile))
+ return;
+
+ getMetaDiags()->Report(diag::warn_fe_serialized_diag_merge_failure);
+ // Disable merging child records, as whatever is in this file may be
+ // misleading.
+ MergeChildRecords = false;
+}
+
+void SDiagsWriter::finish() {
+ // The original instance is responsible for writing the file.
+ if (!OriginalInstance)
+ return;
+
+ // Finish off any diagnostic we were in the process of emitting.
+ if (State->EmittedAnyDiagBlocks)
+ ExitDiagBlock();
+
+ if (MergeChildRecords) {
+ if (!State->EmittedAnyDiagBlocks)
+ // We have no diagnostics of our own, so we can just leave the child
+ // process' output alone
+ return;
+
+ if (llvm::sys::fs::exists(State->OutputFile))
+ if (SDiagsMerger(*this).mergeRecordsFromFile(State->OutputFile.c_str()))
+ getMetaDiags()->Report(diag::warn_fe_serialized_diag_merge_failure);
+ }
+
+ std::error_code EC;
+ auto OS = llvm::make_unique<llvm::raw_fd_ostream>(State->OutputFile.c_str(),
+ EC, llvm::sys::fs::F_None);
+ if (EC) {
+ getMetaDiags()->Report(diag::warn_fe_serialized_diag_failure)
+ << State->OutputFile << EC.message();
+ return;
+ }
+
+ // Write the generated bitstream to "Out".
+ OS->write((char *)&State->Buffer.front(), State->Buffer.size());
+ OS->flush();
+}
+
+std::error_code SDiagsMerger::visitStartOfDiagnostic() {
+ Writer.EnterDiagBlock();
+ return std::error_code();
+}
+
+std::error_code SDiagsMerger::visitEndOfDiagnostic() {
+ Writer.ExitDiagBlock();
+ return std::error_code();
+}
+
+std::error_code
+SDiagsMerger::visitSourceRangeRecord(const serialized_diags::Location &Start,
+ const serialized_diags::Location &End) {
+ RecordData::value_type Record[] = {
+ RECORD_SOURCE_RANGE, FileLookup[Start.FileID], Start.Line, Start.Col,
+ Start.Offset, FileLookup[End.FileID], End.Line, End.Col, End.Offset};
+ Writer.State->Stream.EmitRecordWithAbbrev(
+ Writer.State->Abbrevs.get(RECORD_SOURCE_RANGE), Record);
+ return std::error_code();
+}
+
+std::error_code SDiagsMerger::visitDiagnosticRecord(
+ unsigned Severity, const serialized_diags::Location &Location,
+ unsigned Category, unsigned Flag, StringRef Message) {
+ RecordData::value_type Record[] = {
+ RECORD_DIAG, Severity, FileLookup[Location.FileID], Location.Line,
+ Location.Col, Location.Offset, CategoryLookup[Category],
+ Flag ? DiagFlagLookup[Flag] : 0, Message.size()};
+
+ Writer.State->Stream.EmitRecordWithBlob(
+ Writer.State->Abbrevs.get(RECORD_DIAG), Record, Message);
+ return std::error_code();
+}
+
+std::error_code
+SDiagsMerger::visitFixitRecord(const serialized_diags::Location &Start,
+ const serialized_diags::Location &End,
+ StringRef Text) {
+ RecordData::value_type Record[] = {RECORD_FIXIT, FileLookup[Start.FileID],
+ Start.Line, Start.Col, Start.Offset,
+ FileLookup[End.FileID], End.Line, End.Col,
+ End.Offset, Text.size()};
+
+ Writer.State->Stream.EmitRecordWithBlob(
+ Writer.State->Abbrevs.get(RECORD_FIXIT), Record, Text);
+ return std::error_code();
+}
+
+std::error_code SDiagsMerger::visitFilenameRecord(unsigned ID, unsigned Size,
+ unsigned Timestamp,
+ StringRef Name) {
+ FileLookup[ID] = Writer.getEmitFile(Name.str().c_str());
+ return std::error_code();
+}
+
+std::error_code SDiagsMerger::visitCategoryRecord(unsigned ID, StringRef Name) {
+ CategoryLookup[ID] = Writer.getEmitCategory(ID);
+ return std::error_code();
+}
+
+std::error_code SDiagsMerger::visitDiagFlagRecord(unsigned ID, StringRef Name) {
+ DiagFlagLookup[ID] = Writer.getEmitDiagnosticFlag(Name);
+ return std::error_code();
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticReader.cpp b/contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticReader.cpp
new file mode 100644
index 0000000..0ebbd22
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticReader.cpp
@@ -0,0 +1,295 @@
+//===--- SerializedDiagnosticReader.cpp - Reads diagnostics ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/SerializedDiagnosticReader.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Frontend/SerializedDiagnostics.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+using namespace clang;
+using namespace clang::serialized_diags;
+
+std::error_code SerializedDiagnosticReader::readDiagnostics(StringRef File) {
+ // Open the diagnostics file.
+ FileSystemOptions FO;
+ FileManager FileMgr(FO);
+
+ auto Buffer = FileMgr.getBufferForFile(File);
+ if (!Buffer)
+ return SDError::CouldNotLoad;
+
+ llvm::BitstreamReader StreamFile;
+ StreamFile.init((const unsigned char *)(*Buffer)->getBufferStart(),
+ (const unsigned char *)(*Buffer)->getBufferEnd());
+
+ llvm::BitstreamCursor Stream(StreamFile);
+
+ // Sniff for the signature.
+ if (Stream.Read(8) != 'D' ||
+ Stream.Read(8) != 'I' ||
+ Stream.Read(8) != 'A' ||
+ Stream.Read(8) != 'G')
+ return SDError::InvalidSignature;
+
+ // Read the top level blocks.
+ while (!Stream.AtEndOfStream()) {
+ if (Stream.ReadCode() != llvm::bitc::ENTER_SUBBLOCK)
+ return SDError::InvalidDiagnostics;
+
+ std::error_code EC;
+ switch (Stream.ReadSubBlockID()) {
+ case llvm::bitc::BLOCKINFO_BLOCK_ID:
+ if (Stream.ReadBlockInfoBlock())
+ return SDError::MalformedBlockInfoBlock;
+ continue;
+ case BLOCK_META:
+ if ((EC = readMetaBlock(Stream)))
+ return EC;
+ continue;
+ case BLOCK_DIAG:
+ if ((EC = readDiagnosticBlock(Stream)))
+ return EC;
+ continue;
+ default:
+ if (!Stream.SkipBlock())
+ return SDError::MalformedTopLevelBlock;
+ continue;
+ }
+ }
+ return std::error_code();
+}
+
+enum class SerializedDiagnosticReader::Cursor {
+ Record = 1,
+ BlockEnd,
+ BlockBegin
+};
+
+llvm::ErrorOr<SerializedDiagnosticReader::Cursor>
+SerializedDiagnosticReader::skipUntilRecordOrBlock(
+ llvm::BitstreamCursor &Stream, unsigned &BlockOrRecordID) {
+ BlockOrRecordID = 0;
+
+ while (!Stream.AtEndOfStream()) {
+ unsigned Code = Stream.ReadCode();
+
+ switch ((llvm::bitc::FixedAbbrevIDs)Code) {
+ case llvm::bitc::ENTER_SUBBLOCK:
+ BlockOrRecordID = Stream.ReadSubBlockID();
+ return Cursor::BlockBegin;
+
+ case llvm::bitc::END_BLOCK:
+ if (Stream.ReadBlockEnd())
+ return SDError::InvalidDiagnostics;
+ return Cursor::BlockEnd;
+
+ case llvm::bitc::DEFINE_ABBREV:
+ Stream.ReadAbbrevRecord();
+ continue;
+
+ case llvm::bitc::UNABBREV_RECORD:
+ return SDError::UnsupportedConstruct;
+
+ default:
+ // We found a record.
+ BlockOrRecordID = Code;
+ return Cursor::Record;
+ }
+ }
+
+ return SDError::InvalidDiagnostics;
+}
+
+std::error_code
+SerializedDiagnosticReader::readMetaBlock(llvm::BitstreamCursor &Stream) {
+ if (Stream.EnterSubBlock(clang::serialized_diags::BLOCK_META))
+ return SDError::MalformedMetadataBlock;
+
+ bool VersionChecked = false;
+
+ while (true) {
+ unsigned BlockOrCode = 0;
+ llvm::ErrorOr<Cursor> Res = skipUntilRecordOrBlock(Stream, BlockOrCode);
+ if (!Res)
+ Res.getError();
+
+ switch (Res.get()) {
+ case Cursor::Record:
+ break;
+ case Cursor::BlockBegin:
+ if (Stream.SkipBlock())
+ return SDError::MalformedMetadataBlock;
+ case Cursor::BlockEnd:
+ if (!VersionChecked)
+ return SDError::MissingVersion;
+ return std::error_code();
+ }
+
+ SmallVector<uint64_t, 1> Record;
+ unsigned RecordID = Stream.readRecord(BlockOrCode, Record);
+
+ if (RecordID == RECORD_VERSION) {
+ if (Record.size() < 1)
+ return SDError::MissingVersion;
+ if (Record[0] > VersionNumber)
+ return SDError::VersionMismatch;
+ VersionChecked = true;
+ }
+ }
+}
+
+std::error_code
+SerializedDiagnosticReader::readDiagnosticBlock(llvm::BitstreamCursor &Stream) {
+ if (Stream.EnterSubBlock(clang::serialized_diags::BLOCK_DIAG))
+ return SDError::MalformedDiagnosticBlock;
+
+ std::error_code EC;
+ if ((EC = visitStartOfDiagnostic()))
+ return EC;
+
+ SmallVector<uint64_t, 16> Record;
+ while (true) {
+ unsigned BlockOrCode = 0;
+ llvm::ErrorOr<Cursor> Res = skipUntilRecordOrBlock(Stream, BlockOrCode);
+ if (!Res)
+ Res.getError();
+
+ switch (Res.get()) {
+ case Cursor::BlockBegin:
+ // The only blocks we care about are subdiagnostics.
+ if (BlockOrCode == serialized_diags::BLOCK_DIAG) {
+ if ((EC = readDiagnosticBlock(Stream)))
+ return EC;
+ } else if (!Stream.SkipBlock())
+ return SDError::MalformedSubBlock;
+ continue;
+ case Cursor::BlockEnd:
+ if ((EC = visitEndOfDiagnostic()))
+ return EC;
+ return std::error_code();
+ case Cursor::Record:
+ break;
+ }
+
+ // Read the record.
+ Record.clear();
+ StringRef Blob;
+ unsigned RecID = Stream.readRecord(BlockOrCode, Record, &Blob);
+
+ if (RecID < serialized_diags::RECORD_FIRST ||
+ RecID > serialized_diags::RECORD_LAST)
+ continue;
+
+ switch ((RecordIDs)RecID) {
+ case RECORD_CATEGORY:
+ // A category has ID and name size.
+ if (Record.size() != 2)
+ return SDError::MalformedDiagnosticRecord;
+ if ((EC = visitCategoryRecord(Record[0], Blob)))
+ return EC;
+ continue;
+ case RECORD_DIAG:
+ // A diagnostic has severity, location (4), category, flag, and message
+ // size.
+ if (Record.size() != 8)
+ return SDError::MalformedDiagnosticRecord;
+ if ((EC = visitDiagnosticRecord(
+ Record[0], Location(Record[1], Record[2], Record[3], Record[4]),
+ Record[5], Record[6], Blob)))
+ return EC;
+ continue;
+ case RECORD_DIAG_FLAG:
+ // A diagnostic flag has ID and name size.
+ if (Record.size() != 2)
+ return SDError::MalformedDiagnosticRecord;
+ if ((EC = visitDiagFlagRecord(Record[0], Blob)))
+ return EC;
+ continue;
+ case RECORD_FILENAME:
+ // A filename has ID, size, timestamp, and name size. The size and
+ // timestamp are legacy fields that are always zero these days.
+ if (Record.size() != 4)
+ return SDError::MalformedDiagnosticRecord;
+ if ((EC = visitFilenameRecord(Record[0], Record[1], Record[2], Blob)))
+ return EC;
+ continue;
+ case RECORD_FIXIT:
+ // A fixit has two locations (4 each) and message size.
+ if (Record.size() != 9)
+ return SDError::MalformedDiagnosticRecord;
+ if ((EC = visitFixitRecord(
+ Location(Record[0], Record[1], Record[2], Record[3]),
+ Location(Record[4], Record[5], Record[6], Record[7]), Blob)))
+ return EC;
+ continue;
+ case RECORD_SOURCE_RANGE:
+ // A source range is two locations (4 each).
+ if (Record.size() != 8)
+ return SDError::MalformedDiagnosticRecord;
+ if ((EC = visitSourceRangeRecord(
+ Location(Record[0], Record[1], Record[2], Record[3]),
+ Location(Record[4], Record[5], Record[6], Record[7]))))
+ return EC;
+ continue;
+ case RECORD_VERSION:
+ // A version is just a number.
+ if (Record.size() != 1)
+ return SDError::MalformedDiagnosticRecord;
+ if ((EC = visitVersionRecord(Record[0])))
+ return EC;
+ continue;
+ }
+ }
+}
+
+namespace {
+class SDErrorCategoryType final : public std::error_category {
+ const char *name() const LLVM_NOEXCEPT override {
+ return "clang.serialized_diags";
+ }
+ std::string message(int IE) const override {
+ SDError E = static_cast<SDError>(IE);
+ switch (E) {
+ case SDError::CouldNotLoad:
+ return "Failed to open diagnostics file";
+ case SDError::InvalidSignature:
+ return "Invalid diagnostics signature";
+ case SDError::InvalidDiagnostics:
+ return "Parse error reading diagnostics";
+ case SDError::MalformedTopLevelBlock:
+ return "Malformed block at top-level of diagnostics";
+ case SDError::MalformedSubBlock:
+ return "Malformed sub-block in a diagnostic";
+ case SDError::MalformedBlockInfoBlock:
+ return "Malformed BlockInfo block";
+ case SDError::MalformedMetadataBlock:
+ return "Malformed Metadata block";
+ case SDError::MalformedDiagnosticBlock:
+ return "Malformed Diagnostic block";
+ case SDError::MalformedDiagnosticRecord:
+ return "Malformed Diagnostic record";
+ case SDError::MissingVersion:
+ return "No version provided in diagnostics";
+ case SDError::VersionMismatch:
+ return "Unsupported diagnostics version";
+ case SDError::UnsupportedConstruct:
+ return "Bitcode constructs that are not supported in diagnostics appear";
+ case SDError::HandlerFailed:
+ return "Generic error occurred while handling a record";
+ }
+ llvm_unreachable("Unknown error type!");
+ }
+};
+}
+
+static llvm::ManagedStatic<SDErrorCategoryType> ErrorCategory;
+const std::error_category &clang::serialized_diags::SDErrorCategory() {
+ return *ErrorCategory;
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/TestModuleFileExtension.cpp b/contrib/llvm/tools/clang/lib/Frontend/TestModuleFileExtension.cpp
new file mode 100644
index 0000000..d1b20c4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/TestModuleFileExtension.cpp
@@ -0,0 +1,123 @@
+//===-- TestModuleFileExtension.cpp - Module Extension Tester -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#include "TestModuleFileExtension.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Serialization/ASTReader.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/Bitcode/BitstreamWriter.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstdio>
+using namespace clang;
+using namespace clang::serialization;
+
+TestModuleFileExtension::Writer::~Writer() { }
+
+void TestModuleFileExtension::Writer::writeExtensionContents(
+ Sema &SemaRef,
+ llvm::BitstreamWriter &Stream) {
+ using namespace llvm;
+
+ // Write an abbreviation for this record.
+ BitCodeAbbrev *Abv = new llvm::BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(FIRST_EXTENSION_RECORD_ID));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // # of characters
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // message
+ auto Abbrev = Stream.EmitAbbrev(Abv);
+
+ // Write a message into the extension block.
+ SmallString<64> Message;
+ {
+ auto Ext = static_cast<TestModuleFileExtension *>(getExtension());
+ raw_svector_ostream OS(Message);
+ OS << "Hello from " << Ext->BlockName << " v" << Ext->MajorVersion << "."
+ << Ext->MinorVersion;
+ }
+ SmallVector<uint64_t, 4> Record;
+ Record.push_back(FIRST_EXTENSION_RECORD_ID);
+ Record.push_back(Message.size());
+ Stream.EmitRecordWithBlob(Abbrev, Record, Message);
+}
+
+TestModuleFileExtension::Reader::Reader(ModuleFileExtension *Ext,
+ const llvm::BitstreamCursor &InStream)
+ : ModuleFileExtensionReader(Ext), Stream(InStream)
+{
+ // Read the extension block.
+ SmallVector<uint64_t, 4> Record;
+ while (true) {
+ llvm::BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
+ switch (Entry.Kind) {
+ case llvm::BitstreamEntry::SubBlock:
+ case llvm::BitstreamEntry::EndBlock:
+ case llvm::BitstreamEntry::Error:
+ return;
+
+ case llvm::BitstreamEntry::Record:
+ break;
+ }
+
+ Record.clear();
+ StringRef Blob;
+ unsigned RecCode = Stream.readRecord(Entry.ID, Record, &Blob);
+ switch (RecCode) {
+ case FIRST_EXTENSION_RECORD_ID: {
+ StringRef Message = Blob.substr(0, Record[0]);
+ fprintf(stderr, "Read extension block message: %s\n",
+ Message.str().c_str());
+ break;
+ }
+ }
+ }
+}
+
+TestModuleFileExtension::Reader::~Reader() { }
+
+TestModuleFileExtension::~TestModuleFileExtension() { }
+
+ModuleFileExtensionMetadata
+TestModuleFileExtension::getExtensionMetadata() const {
+ return { BlockName, MajorVersion, MinorVersion, UserInfo };
+}
+
+llvm::hash_code TestModuleFileExtension::hashExtension(
+ llvm::hash_code Code) const {
+ if (Hashed) {
+ Code = llvm::hash_combine(Code, BlockName);
+ Code = llvm::hash_combine(Code, MajorVersion);
+ Code = llvm::hash_combine(Code, MinorVersion);
+ Code = llvm::hash_combine(Code, UserInfo);
+ }
+
+ return Code;
+}
+
+std::unique_ptr<ModuleFileExtensionWriter>
+TestModuleFileExtension::createExtensionWriter(ASTWriter &) {
+ return std::unique_ptr<ModuleFileExtensionWriter>(new Writer(this));
+}
+
+std::unique_ptr<ModuleFileExtensionReader>
+TestModuleFileExtension::createExtensionReader(
+ const ModuleFileExtensionMetadata &Metadata,
+ ASTReader &Reader, serialization::ModuleFile &Mod,
+ const llvm::BitstreamCursor &Stream)
+{
+ assert(Metadata.BlockName == BlockName && "Wrong block name");
+ if (std::make_pair(Metadata.MajorVersion, Metadata.MinorVersion) !=
+ std::make_pair(MajorVersion, MinorVersion)) {
+ Reader.getDiags().Report(Mod.ImportLoc,
+ diag::err_test_module_file_extension_version)
+ << BlockName << Metadata.MajorVersion << Metadata.MinorVersion
+ << MajorVersion << MinorVersion;
+ return nullptr;
+ }
+
+ return std::unique_ptr<ModuleFileExtensionReader>(
+ new TestModuleFileExtension::Reader(this, Stream));
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/TestModuleFileExtension.h b/contrib/llvm/tools/clang/lib/Frontend/TestModuleFileExtension.h
new file mode 100644
index 0000000..41f3ca9
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/TestModuleFileExtension.h
@@ -0,0 +1,72 @@
+//===-- TestModuleFileExtension.h - Module Extension Tester -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_FRONTEND_TESTMODULEFILEEXTENSION_H
+#define LLVM_CLANG_FRONTEND_TESTMODULEFILEEXTENSION_H
+
+#include "clang/Serialization/ModuleFileExtension.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Bitcode/BitstreamReader.h"
+#include <string>
+
+namespace clang {
+
+/// A module file extension used for testing purposes.
+class TestModuleFileExtension : public ModuleFileExtension {
+ std::string BlockName;
+ unsigned MajorVersion;
+ unsigned MinorVersion;
+ bool Hashed;
+ std::string UserInfo;
+
+ class Writer : public ModuleFileExtensionWriter {
+ public:
+ Writer(ModuleFileExtension *Ext) : ModuleFileExtensionWriter(Ext) { }
+ ~Writer() override;
+
+ void writeExtensionContents(Sema &SemaRef,
+ llvm::BitstreamWriter &Stream) override;
+ };
+
+ class Reader : public ModuleFileExtensionReader {
+ llvm::BitstreamCursor Stream;
+
+ public:
+ ~Reader() override;
+
+ Reader(ModuleFileExtension *Ext, const llvm::BitstreamCursor &InStream);
+ };
+
+public:
+ TestModuleFileExtension(StringRef BlockName,
+ unsigned MajorVersion,
+ unsigned MinorVersion,
+ bool Hashed,
+ StringRef UserInfo)
+ : BlockName(BlockName),
+ MajorVersion(MajorVersion), MinorVersion(MinorVersion),
+ Hashed(Hashed), UserInfo(UserInfo) { }
+ ~TestModuleFileExtension() override;
+
+ ModuleFileExtensionMetadata getExtensionMetadata() const override;
+
+ llvm::hash_code hashExtension(llvm::hash_code Code) const override;
+
+ std::unique_ptr<ModuleFileExtensionWriter>
+ createExtensionWriter(ASTWriter &Writer) override;
+
+ std::unique_ptr<ModuleFileExtensionReader>
+ createExtensionReader(const ModuleFileExtensionMetadata &Metadata,
+ ASTReader &Reader, serialization::ModuleFile &Mod,
+ const llvm::BitstreamCursor &Stream) override;
+};
+
+} // end namespace clang
+
+#endif // LLVM_CLANG_FRONTEND_TESTMODULEFILEEXTENSION_H
diff --git a/contrib/llvm/tools/clang/lib/Frontend/TextDiagnostic.cpp b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnostic.cpp
new file mode 100644
index 0000000..d4e156d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnostic.cpp
@@ -0,0 +1,1261 @@
+//===--- TextDiagnostic.cpp - Text Diagnostic Pretty-Printing -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/TextDiagnostic.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Lexer.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/ConvertUTF.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Locale.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+
+using namespace clang;
+
+static const enum raw_ostream::Colors noteColor =
+ raw_ostream::BLACK;
+static const enum raw_ostream::Colors remarkColor =
+ raw_ostream::BLUE;
+static const enum raw_ostream::Colors fixitColor =
+ raw_ostream::GREEN;
+static const enum raw_ostream::Colors caretColor =
+ raw_ostream::GREEN;
+static const enum raw_ostream::Colors warningColor =
+ raw_ostream::MAGENTA;
+static const enum raw_ostream::Colors templateColor =
+ raw_ostream::CYAN;
+static const enum raw_ostream::Colors errorColor = raw_ostream::RED;
+static const enum raw_ostream::Colors fatalColor = raw_ostream::RED;
+// Used for changing only the bold attribute.
+static const enum raw_ostream::Colors savedColor =
+ raw_ostream::SAVEDCOLOR;
+
+/// \brief Add highlights to differences in template strings.
+static void applyTemplateHighlighting(raw_ostream &OS, StringRef Str,
+ bool &Normal, bool Bold) {
+ while (1) {
+ size_t Pos = Str.find(ToggleHighlight);
+ OS << Str.slice(0, Pos);
+ if (Pos == StringRef::npos)
+ break;
+
+ Str = Str.substr(Pos + 1);
+ if (Normal)
+ OS.changeColor(templateColor, true);
+ else {
+ OS.resetColor();
+ if (Bold)
+ OS.changeColor(savedColor, true);
+ }
+ Normal = !Normal;
+ }
+}
+
+/// \brief Number of spaces to indent when word-wrapping.
+const unsigned WordWrapIndentation = 6;
+
+static int bytesSincePreviousTabOrLineBegin(StringRef SourceLine, size_t i) {
+ int bytes = 0;
+ while (0<i) {
+ if (SourceLine[--i]=='\t')
+ break;
+ ++bytes;
+ }
+ return bytes;
+}
+
+/// \brief returns a printable representation of first item from input range
+///
+/// This function returns a printable representation of the next item in a line
+/// of source. If the next byte begins a valid and printable character, that
+/// character is returned along with 'true'.
+///
+/// Otherwise, if the next byte begins a valid, but unprintable character, a
+/// printable, escaped representation of the character is returned, along with
+/// 'false'. Otherwise a printable, escaped representation of the next byte
+/// is returned along with 'false'.
+///
+/// \note The index is updated to be used with a subsequent call to
+/// printableTextForNextCharacter.
+///
+/// \param SourceLine The line of source
+/// \param i Pointer to byte index,
+/// \param TabStop used to expand tabs
+/// \return pair(printable text, 'true' iff original text was printable)
+///
+static std::pair<SmallString<16>, bool>
+printableTextForNextCharacter(StringRef SourceLine, size_t *i,
+ unsigned TabStop) {
+ assert(i && "i must not be null");
+ assert(*i<SourceLine.size() && "must point to a valid index");
+
+ if (SourceLine[*i]=='\t') {
+ assert(0 < TabStop && TabStop <= DiagnosticOptions::MaxTabStop &&
+ "Invalid -ftabstop value");
+ unsigned col = bytesSincePreviousTabOrLineBegin(SourceLine, *i);
+ unsigned NumSpaces = TabStop - col%TabStop;
+ assert(0 < NumSpaces && NumSpaces <= TabStop
+ && "Invalid computation of space amt");
+ ++(*i);
+
+ SmallString<16> expandedTab;
+ expandedTab.assign(NumSpaces, ' ');
+ return std::make_pair(expandedTab, true);
+ }
+
+ unsigned char const *begin, *end;
+ begin = reinterpret_cast<unsigned char const *>(&*(SourceLine.begin() + *i));
+ end = begin + (SourceLine.size() - *i);
+
+ if (isLegalUTF8Sequence(begin, end)) {
+ UTF32 c;
+ UTF32 *cptr = &c;
+ unsigned char const *original_begin = begin;
+ unsigned char const *cp_end = begin+getNumBytesForUTF8(SourceLine[*i]);
+
+ ConversionResult res = ConvertUTF8toUTF32(&begin, cp_end, &cptr, cptr+1,
+ strictConversion);
+ (void)res;
+ assert(conversionOK==res);
+ assert(0 < begin-original_begin
+ && "we must be further along in the string now");
+ *i += begin-original_begin;
+
+ if (!llvm::sys::locale::isPrint(c)) {
+ // If next character is valid UTF-8, but not printable
+ SmallString<16> expandedCP("<U+>");
+ while (c) {
+ expandedCP.insert(expandedCP.begin()+3, llvm::hexdigit(c%16));
+ c/=16;
+ }
+ while (expandedCP.size() < 8)
+ expandedCP.insert(expandedCP.begin()+3, llvm::hexdigit(0));
+ return std::make_pair(expandedCP, false);
+ }
+
+ // If next character is valid UTF-8, and printable
+ return std::make_pair(SmallString<16>(original_begin, cp_end), true);
+
+ }
+
+ // If next byte is not valid UTF-8 (and therefore not printable)
+ SmallString<16> expandedByte("<XX>");
+ unsigned char byte = SourceLine[*i];
+ expandedByte[1] = llvm::hexdigit(byte / 16);
+ expandedByte[2] = llvm::hexdigit(byte % 16);
+ ++(*i);
+ return std::make_pair(expandedByte, false);
+}
+
+static void expandTabs(std::string &SourceLine, unsigned TabStop) {
+ size_t i = SourceLine.size();
+ while (i>0) {
+ i--;
+ if (SourceLine[i]!='\t')
+ continue;
+ size_t tmp_i = i;
+ std::pair<SmallString<16>,bool> res
+ = printableTextForNextCharacter(SourceLine, &tmp_i, TabStop);
+ SourceLine.replace(i, 1, res.first.c_str());
+ }
+}
+
+/// This function takes a raw source line and produces a mapping from the bytes
+/// of the printable representation of the line to the columns those printable
+/// characters will appear at (numbering the first column as 0).
+///
+/// If a byte 'i' corresponds to multiple columns (e.g. the byte contains a tab
+/// character) then the array will map that byte to the first column the
+/// tab appears at and the next value in the map will have been incremented
+/// more than once.
+///
+/// If a byte is the first in a sequence of bytes that together map to a single
+/// entity in the output, then the array will map that byte to the appropriate
+/// column while the subsequent bytes will be -1.
+///
+/// The last element in the array does not correspond to any byte in the input
+/// and instead is the number of columns needed to display the source
+///
+/// example: (given a tabstop of 8)
+///
+/// "a \t \u3042" -> {0,1,2,8,9,-1,-1,11}
+///
+/// (\\u3042 is represented in UTF-8 by three bytes and takes two columns to
+/// display)
+static void byteToColumn(StringRef SourceLine, unsigned TabStop,
+ SmallVectorImpl<int> &out) {
+ out.clear();
+
+ if (SourceLine.empty()) {
+ out.resize(1u,0);
+ return;
+ }
+
+ out.resize(SourceLine.size()+1, -1);
+
+ int columns = 0;
+ size_t i = 0;
+ while (i<SourceLine.size()) {
+ out[i] = columns;
+ std::pair<SmallString<16>,bool> res
+ = printableTextForNextCharacter(SourceLine, &i, TabStop);
+ columns += llvm::sys::locale::columnWidth(res.first);
+ }
+ out.back() = columns;
+}
+
+/// This function takes a raw source line and produces a mapping from columns
+/// to the byte of the source line that produced the character displaying at
+/// that column. This is the inverse of the mapping produced by byteToColumn()
+///
+/// The last element in the array is the number of bytes in the source string
+///
+/// example: (given a tabstop of 8)
+///
+/// "a \t \u3042" -> {0,1,2,-1,-1,-1,-1,-1,3,4,-1,7}
+///
+/// (\\u3042 is represented in UTF-8 by three bytes and takes two columns to
+/// display)
+static void columnToByte(StringRef SourceLine, unsigned TabStop,
+ SmallVectorImpl<int> &out) {
+ out.clear();
+
+ if (SourceLine.empty()) {
+ out.resize(1u, 0);
+ return;
+ }
+
+ int columns = 0;
+ size_t i = 0;
+ while (i<SourceLine.size()) {
+ out.resize(columns+1, -1);
+ out.back() = i;
+ std::pair<SmallString<16>,bool> res
+ = printableTextForNextCharacter(SourceLine, &i, TabStop);
+ columns += llvm::sys::locale::columnWidth(res.first);
+ }
+ out.resize(columns+1, -1);
+ out.back() = i;
+}
+
+namespace {
+struct SourceColumnMap {
+ SourceColumnMap(StringRef SourceLine, unsigned TabStop)
+ : m_SourceLine(SourceLine) {
+
+ ::byteToColumn(SourceLine, TabStop, m_byteToColumn);
+ ::columnToByte(SourceLine, TabStop, m_columnToByte);
+
+ assert(m_byteToColumn.size()==SourceLine.size()+1);
+ assert(0 < m_byteToColumn.size() && 0 < m_columnToByte.size());
+ assert(m_byteToColumn.size()
+ == static_cast<unsigned>(m_columnToByte.back()+1));
+ assert(static_cast<unsigned>(m_byteToColumn.back()+1)
+ == m_columnToByte.size());
+ }
+ int columns() const { return m_byteToColumn.back(); }
+ int bytes() const { return m_columnToByte.back(); }
+
+ /// \brief Map a byte to the column which it is at the start of, or return -1
+ /// if it is not at the start of a column (for a UTF-8 trailing byte).
+ int byteToColumn(int n) const {
+ assert(0<=n && n<static_cast<int>(m_byteToColumn.size()));
+ return m_byteToColumn[n];
+ }
+
+ /// \brief Map a byte to the first column which contains it.
+ int byteToContainingColumn(int N) const {
+ assert(0 <= N && N < static_cast<int>(m_byteToColumn.size()));
+ while (m_byteToColumn[N] == -1)
+ --N;
+ return m_byteToColumn[N];
+ }
+
+ /// \brief Map a column to the byte which starts the column, or return -1 if
+ /// the column the second or subsequent column of an expanded tab or similar
+ /// multi-column entity.
+ int columnToByte(int n) const {
+ assert(0<=n && n<static_cast<int>(m_columnToByte.size()));
+ return m_columnToByte[n];
+ }
+
+ /// \brief Map from a byte index to the next byte which starts a column.
+ int startOfNextColumn(int N) const {
+ assert(0 <= N && N < static_cast<int>(m_byteToColumn.size() - 1));
+ while (byteToColumn(++N) == -1) {}
+ return N;
+ }
+
+ /// \brief Map from a byte index to the previous byte which starts a column.
+ int startOfPreviousColumn(int N) const {
+ assert(0 < N && N < static_cast<int>(m_byteToColumn.size()));
+ while (byteToColumn(--N) == -1) {}
+ return N;
+ }
+
+ StringRef getSourceLine() const {
+ return m_SourceLine;
+ }
+
+private:
+ const std::string m_SourceLine;
+ SmallVector<int,200> m_byteToColumn;
+ SmallVector<int,200> m_columnToByte;
+};
+} // end anonymous namespace
+
+/// \brief When the source code line we want to print is too long for
+/// the terminal, select the "interesting" region.
+static void selectInterestingSourceRegion(std::string &SourceLine,
+ std::string &CaretLine,
+ std::string &FixItInsertionLine,
+ unsigned Columns,
+ const SourceColumnMap &map) {
+ unsigned CaretColumns = CaretLine.size();
+ unsigned FixItColumns = llvm::sys::locale::columnWidth(FixItInsertionLine);
+ unsigned MaxColumns = std::max(static_cast<unsigned>(map.columns()),
+ std::max(CaretColumns, FixItColumns));
+ // if the number of columns is less than the desired number we're done
+ if (MaxColumns <= Columns)
+ return;
+
+ // No special characters are allowed in CaretLine.
+ assert(CaretLine.end() ==
+ std::find_if(CaretLine.begin(), CaretLine.end(),
+ [](char c) { return c < ' ' || '~' < c; }));
+
+ // Find the slice that we need to display the full caret line
+ // correctly.
+ unsigned CaretStart = 0, CaretEnd = CaretLine.size();
+ for (; CaretStart != CaretEnd; ++CaretStart)
+ if (!isWhitespace(CaretLine[CaretStart]))
+ break;
+
+ for (; CaretEnd != CaretStart; --CaretEnd)
+ if (!isWhitespace(CaretLine[CaretEnd - 1]))
+ break;
+
+ // caret has already been inserted into CaretLine so the above whitespace
+ // check is guaranteed to include the caret
+
+ // If we have a fix-it line, make sure the slice includes all of the
+ // fix-it information.
+ if (!FixItInsertionLine.empty()) {
+ unsigned FixItStart = 0, FixItEnd = FixItInsertionLine.size();
+ for (; FixItStart != FixItEnd; ++FixItStart)
+ if (!isWhitespace(FixItInsertionLine[FixItStart]))
+ break;
+
+ for (; FixItEnd != FixItStart; --FixItEnd)
+ if (!isWhitespace(FixItInsertionLine[FixItEnd - 1]))
+ break;
+
+ // We can safely use the byte offset FixItStart as the column offset
+ // because the characters up until FixItStart are all ASCII whitespace
+ // characters.
+ unsigned FixItStartCol = FixItStart;
+ unsigned FixItEndCol
+ = llvm::sys::locale::columnWidth(FixItInsertionLine.substr(0, FixItEnd));
+
+ CaretStart = std::min(FixItStartCol, CaretStart);
+ CaretEnd = std::max(FixItEndCol, CaretEnd);
+ }
+
+ // CaretEnd may have been set at the middle of a character
+ // If it's not at a character's first column then advance it past the current
+ // character.
+ while (static_cast<int>(CaretEnd) < map.columns() &&
+ -1 == map.columnToByte(CaretEnd))
+ ++CaretEnd;
+
+ assert((static_cast<int>(CaretStart) > map.columns() ||
+ -1!=map.columnToByte(CaretStart)) &&
+ "CaretStart must not point to a column in the middle of a source"
+ " line character");
+ assert((static_cast<int>(CaretEnd) > map.columns() ||
+ -1!=map.columnToByte(CaretEnd)) &&
+ "CaretEnd must not point to a column in the middle of a source line"
+ " character");
+
+ // CaretLine[CaretStart, CaretEnd) contains all of the interesting
+ // parts of the caret line. While this slice is smaller than the
+ // number of columns we have, try to grow the slice to encompass
+ // more context.
+
+ unsigned SourceStart = map.columnToByte(std::min<unsigned>(CaretStart,
+ map.columns()));
+ unsigned SourceEnd = map.columnToByte(std::min<unsigned>(CaretEnd,
+ map.columns()));
+
+ unsigned CaretColumnsOutsideSource = CaretEnd-CaretStart
+ - (map.byteToColumn(SourceEnd)-map.byteToColumn(SourceStart));
+
+ char const *front_ellipse = " ...";
+ char const *front_space = " ";
+ char const *back_ellipse = "...";
+ unsigned ellipses_space = strlen(front_ellipse) + strlen(back_ellipse);
+
+ unsigned TargetColumns = Columns;
+ // Give us extra room for the ellipses
+ // and any of the caret line that extends past the source
+ if (TargetColumns > ellipses_space+CaretColumnsOutsideSource)
+ TargetColumns -= ellipses_space+CaretColumnsOutsideSource;
+
+ while (SourceStart>0 || SourceEnd<SourceLine.size()) {
+ bool ExpandedRegion = false;
+
+ if (SourceStart>0) {
+ unsigned NewStart = map.startOfPreviousColumn(SourceStart);
+
+ // Skip over any whitespace we see here; we're looking for
+ // another bit of interesting text.
+ // FIXME: Detect non-ASCII whitespace characters too.
+ while (NewStart && isWhitespace(SourceLine[NewStart]))
+ NewStart = map.startOfPreviousColumn(NewStart);
+
+ // Skip over this bit of "interesting" text.
+ while (NewStart) {
+ unsigned Prev = map.startOfPreviousColumn(NewStart);
+ if (isWhitespace(SourceLine[Prev]))
+ break;
+ NewStart = Prev;
+ }
+
+ assert(map.byteToColumn(NewStart) != -1);
+ unsigned NewColumns = map.byteToColumn(SourceEnd) -
+ map.byteToColumn(NewStart);
+ if (NewColumns <= TargetColumns) {
+ SourceStart = NewStart;
+ ExpandedRegion = true;
+ }
+ }
+
+ if (SourceEnd<SourceLine.size()) {
+ unsigned NewEnd = map.startOfNextColumn(SourceEnd);
+
+ // Skip over any whitespace we see here; we're looking for
+ // another bit of interesting text.
+ // FIXME: Detect non-ASCII whitespace characters too.
+ while (NewEnd < SourceLine.size() && isWhitespace(SourceLine[NewEnd]))
+ NewEnd = map.startOfNextColumn(NewEnd);
+
+ // Skip over this bit of "interesting" text.
+ while (NewEnd < SourceLine.size() && isWhitespace(SourceLine[NewEnd]))
+ NewEnd = map.startOfNextColumn(NewEnd);
+
+ assert(map.byteToColumn(NewEnd) != -1);
+ unsigned NewColumns = map.byteToColumn(NewEnd) -
+ map.byteToColumn(SourceStart);
+ if (NewColumns <= TargetColumns) {
+ SourceEnd = NewEnd;
+ ExpandedRegion = true;
+ }
+ }
+
+ if (!ExpandedRegion)
+ break;
+ }
+
+ CaretStart = map.byteToColumn(SourceStart);
+ CaretEnd = map.byteToColumn(SourceEnd) + CaretColumnsOutsideSource;
+
+ // [CaretStart, CaretEnd) is the slice we want. Update the various
+ // output lines to show only this slice, with two-space padding
+ // before the lines so that it looks nicer.
+
+ assert(CaretStart!=(unsigned)-1 && CaretEnd!=(unsigned)-1 &&
+ SourceStart!=(unsigned)-1 && SourceEnd!=(unsigned)-1);
+ assert(SourceStart <= SourceEnd);
+ assert(CaretStart <= CaretEnd);
+
+ unsigned BackColumnsRemoved
+ = map.byteToColumn(SourceLine.size())-map.byteToColumn(SourceEnd);
+ unsigned FrontColumnsRemoved = CaretStart;
+ unsigned ColumnsKept = CaretEnd-CaretStart;
+
+ // We checked up front that the line needed truncation
+ assert(FrontColumnsRemoved+ColumnsKept+BackColumnsRemoved > Columns);
+
+ // The line needs some truncation, and we'd prefer to keep the front
+ // if possible, so remove the back
+ if (BackColumnsRemoved > strlen(back_ellipse))
+ SourceLine.replace(SourceEnd, std::string::npos, back_ellipse);
+
+ // If that's enough then we're done
+ if (FrontColumnsRemoved+ColumnsKept <= Columns)
+ return;
+
+ // Otherwise remove the front as well
+ if (FrontColumnsRemoved > strlen(front_ellipse)) {
+ SourceLine.replace(0, SourceStart, front_ellipse);
+ CaretLine.replace(0, CaretStart, front_space);
+ if (!FixItInsertionLine.empty())
+ FixItInsertionLine.replace(0, CaretStart, front_space);
+ }
+}
+
+/// \brief Skip over whitespace in the string, starting at the given
+/// index.
+///
+/// \returns The index of the first non-whitespace character that is
+/// greater than or equal to Idx or, if no such character exists,
+/// returns the end of the string.
+static unsigned skipWhitespace(unsigned Idx, StringRef Str, unsigned Length) {
+ while (Idx < Length && isWhitespace(Str[Idx]))
+ ++Idx;
+ return Idx;
+}
+
+/// \brief If the given character is the start of some kind of
+/// balanced punctuation (e.g., quotes or parentheses), return the
+/// character that will terminate the punctuation.
+///
+/// \returns The ending punctuation character, if any, or the NULL
+/// character if the input character does not start any punctuation.
+static inline char findMatchingPunctuation(char c) {
+ switch (c) {
+ case '\'': return '\'';
+ case '`': return '\'';
+ case '"': return '"';
+ case '(': return ')';
+ case '[': return ']';
+ case '{': return '}';
+ default: break;
+ }
+
+ return 0;
+}
+
+/// \brief Find the end of the word starting at the given offset
+/// within a string.
+///
+/// \returns the index pointing one character past the end of the
+/// word.
+static unsigned findEndOfWord(unsigned Start, StringRef Str,
+ unsigned Length, unsigned Column,
+ unsigned Columns) {
+ assert(Start < Str.size() && "Invalid start position!");
+ unsigned End = Start + 1;
+
+ // If we are already at the end of the string, take that as the word.
+ if (End == Str.size())
+ return End;
+
+ // Determine if the start of the string is actually opening
+ // punctuation, e.g., a quote or parentheses.
+ char EndPunct = findMatchingPunctuation(Str[Start]);
+ if (!EndPunct) {
+ // This is a normal word. Just find the first space character.
+ while (End < Length && !isWhitespace(Str[End]))
+ ++End;
+ return End;
+ }
+
+ // We have the start of a balanced punctuation sequence (quotes,
+ // parentheses, etc.). Determine the full sequence is.
+ SmallString<16> PunctuationEndStack;
+ PunctuationEndStack.push_back(EndPunct);
+ while (End < Length && !PunctuationEndStack.empty()) {
+ if (Str[End] == PunctuationEndStack.back())
+ PunctuationEndStack.pop_back();
+ else if (char SubEndPunct = findMatchingPunctuation(Str[End]))
+ PunctuationEndStack.push_back(SubEndPunct);
+
+ ++End;
+ }
+
+ // Find the first space character after the punctuation ended.
+ while (End < Length && !isWhitespace(Str[End]))
+ ++End;
+
+ unsigned PunctWordLength = End - Start;
+ if (// If the word fits on this line
+ Column + PunctWordLength <= Columns ||
+ // ... or the word is "short enough" to take up the next line
+ // without too much ugly white space
+ PunctWordLength < Columns/3)
+ return End; // Take the whole thing as a single "word".
+
+ // The whole quoted/parenthesized string is too long to print as a
+ // single "word". Instead, find the "word" that starts just after
+ // the punctuation and use that end-point instead. This will recurse
+ // until it finds something small enough to consider a word.
+ return findEndOfWord(Start + 1, Str, Length, Column + 1, Columns);
+}
+
+/// \brief Print the given string to a stream, word-wrapping it to
+/// some number of columns in the process.
+///
+/// \param OS the stream to which the word-wrapping string will be
+/// emitted.
+/// \param Str the string to word-wrap and output.
+/// \param Columns the number of columns to word-wrap to.
+/// \param Column the column number at which the first character of \p
+/// Str will be printed. This will be non-zero when part of the first
+/// line has already been printed.
+/// \param Bold if the current text should be bold
+/// \param Indentation the number of spaces to indent any lines beyond
+/// the first line.
+/// \returns true if word-wrapping was required, or false if the
+/// string fit on the first line.
+static bool printWordWrapped(raw_ostream &OS, StringRef Str,
+ unsigned Columns,
+ unsigned Column = 0,
+ bool Bold = false,
+ unsigned Indentation = WordWrapIndentation) {
+ const unsigned Length = std::min(Str.find('\n'), Str.size());
+ bool TextNormal = true;
+
+ // The string used to indent each line.
+ SmallString<16> IndentStr;
+ IndentStr.assign(Indentation, ' ');
+ bool Wrapped = false;
+ for (unsigned WordStart = 0, WordEnd; WordStart < Length;
+ WordStart = WordEnd) {
+ // Find the beginning of the next word.
+ WordStart = skipWhitespace(WordStart, Str, Length);
+ if (WordStart == Length)
+ break;
+
+ // Find the end of this word.
+ WordEnd = findEndOfWord(WordStart, Str, Length, Column, Columns);
+
+ // Does this word fit on the current line?
+ unsigned WordLength = WordEnd - WordStart;
+ if (Column + WordLength < Columns) {
+ // This word fits on the current line; print it there.
+ if (WordStart) {
+ OS << ' ';
+ Column += 1;
+ }
+ applyTemplateHighlighting(OS, Str.substr(WordStart, WordLength),
+ TextNormal, Bold);
+ Column += WordLength;
+ continue;
+ }
+
+ // This word does not fit on the current line, so wrap to the next
+ // line.
+ OS << '\n';
+ OS.write(&IndentStr[0], Indentation);
+ applyTemplateHighlighting(OS, Str.substr(WordStart, WordLength),
+ TextNormal, Bold);
+ Column = Indentation + WordLength;
+ Wrapped = true;
+ }
+
+ // Append any remaning text from the message with its existing formatting.
+ applyTemplateHighlighting(OS, Str.substr(Length), TextNormal, Bold);
+
+ assert(TextNormal && "Text highlighted at end of diagnostic message.");
+
+ return Wrapped;
+}
+
+TextDiagnostic::TextDiagnostic(raw_ostream &OS,
+ const LangOptions &LangOpts,
+ DiagnosticOptions *DiagOpts)
+ : DiagnosticRenderer(LangOpts, DiagOpts), OS(OS) {}
+
+TextDiagnostic::~TextDiagnostic() {}
+
+void
+TextDiagnostic::emitDiagnosticMessage(SourceLocation Loc,
+ PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ StringRef Message,
+ ArrayRef<clang::CharSourceRange> Ranges,
+ const SourceManager *SM,
+ DiagOrStoredDiag D) {
+ uint64_t StartOfLocationInfo = OS.tell();
+
+ // Emit the location of this particular diagnostic.
+ if (Loc.isValid())
+ emitDiagnosticLoc(Loc, PLoc, Level, Ranges, *SM);
+
+ if (DiagOpts->ShowColors)
+ OS.resetColor();
+
+ printDiagnosticLevel(OS, Level, DiagOpts->ShowColors,
+ DiagOpts->CLFallbackMode);
+ printDiagnosticMessage(OS,
+ /*IsSupplemental*/ Level == DiagnosticsEngine::Note,
+ Message, OS.tell() - StartOfLocationInfo,
+ DiagOpts->MessageLength, DiagOpts->ShowColors);
+}
+
+/*static*/ void
+TextDiagnostic::printDiagnosticLevel(raw_ostream &OS,
+ DiagnosticsEngine::Level Level,
+ bool ShowColors,
+ bool CLFallbackMode) {
+ if (ShowColors) {
+ // Print diagnostic category in bold and color
+ switch (Level) {
+ case DiagnosticsEngine::Ignored:
+ llvm_unreachable("Invalid diagnostic type");
+ case DiagnosticsEngine::Note: OS.changeColor(noteColor, true); break;
+ case DiagnosticsEngine::Remark: OS.changeColor(remarkColor, true); break;
+ case DiagnosticsEngine::Warning: OS.changeColor(warningColor, true); break;
+ case DiagnosticsEngine::Error: OS.changeColor(errorColor, true); break;
+ case DiagnosticsEngine::Fatal: OS.changeColor(fatalColor, true); break;
+ }
+ }
+
+ switch (Level) {
+ case DiagnosticsEngine::Ignored:
+ llvm_unreachable("Invalid diagnostic type");
+ case DiagnosticsEngine::Note: OS << "note"; break;
+ case DiagnosticsEngine::Remark: OS << "remark"; break;
+ case DiagnosticsEngine::Warning: OS << "warning"; break;
+ case DiagnosticsEngine::Error: OS << "error"; break;
+ case DiagnosticsEngine::Fatal: OS << "fatal error"; break;
+ }
+
+ // In clang-cl /fallback mode, print diagnostics as "error(clang):". This
+ // makes it more clear whether a message is coming from clang or cl.exe,
+ // and it prevents MSBuild from concluding that the build failed just because
+ // there is an "error:" in the output.
+ if (CLFallbackMode)
+ OS << "(clang)";
+
+ OS << ": ";
+
+ if (ShowColors)
+ OS.resetColor();
+}
+
+/*static*/
+void TextDiagnostic::printDiagnosticMessage(raw_ostream &OS,
+ bool IsSupplemental,
+ StringRef Message,
+ unsigned CurrentColumn,
+ unsigned Columns, bool ShowColors) {
+ bool Bold = false;
+ if (ShowColors && !IsSupplemental) {
+ // Print primary diagnostic messages in bold and without color, to visually
+ // indicate the transition from continuation notes and other output.
+ OS.changeColor(savedColor, true);
+ Bold = true;
+ }
+
+ if (Columns)
+ printWordWrapped(OS, Message, Columns, CurrentColumn, Bold);
+ else {
+ bool Normal = true;
+ applyTemplateHighlighting(OS, Message, Normal, Bold);
+ assert(Normal && "Formatting should have returned to normal");
+ }
+
+ if (ShowColors)
+ OS.resetColor();
+ OS << '\n';
+}
+
+/// \brief Print out the file/line/column information and include trace.
+///
+/// This method handlen the emission of the diagnostic location information.
+/// This includes extracting as much location information as is present for
+/// the diagnostic and printing it, as well as any include stack or source
+/// ranges necessary.
+void TextDiagnostic::emitDiagnosticLoc(SourceLocation Loc, PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ ArrayRef<CharSourceRange> Ranges,
+ const SourceManager &SM) {
+ if (PLoc.isInvalid()) {
+ // At least print the file name if available:
+ FileID FID = SM.getFileID(Loc);
+ if (FID.isValid()) {
+ const FileEntry* FE = SM.getFileEntryForID(FID);
+ if (FE && FE->isValid()) {
+ OS << FE->getName();
+ if (FE->isInPCH())
+ OS << " (in PCH)";
+ OS << ": ";
+ }
+ }
+ return;
+ }
+ unsigned LineNo = PLoc.getLine();
+
+ if (!DiagOpts->ShowLocation)
+ return;
+
+ if (DiagOpts->ShowColors)
+ OS.changeColor(savedColor, true);
+
+ OS << PLoc.getFilename();
+ switch (DiagOpts->getFormat()) {
+ case DiagnosticOptions::Clang: OS << ':' << LineNo; break;
+ case DiagnosticOptions::MSVC: OS << '(' << LineNo; break;
+ case DiagnosticOptions::Vi: OS << " +" << LineNo; break;
+ }
+
+ if (DiagOpts->ShowColumn)
+ // Compute the column number.
+ if (unsigned ColNo = PLoc.getColumn()) {
+ if (DiagOpts->getFormat() == DiagnosticOptions::MSVC) {
+ OS << ',';
+ // Visual Studio 2010 or earlier expects column number to be off by one
+ if (LangOpts.MSCompatibilityVersion &&
+ !LangOpts.isCompatibleWithMSVC(LangOptions::MSVC2012))
+ ColNo--;
+ } else
+ OS << ':';
+ OS << ColNo;
+ }
+ switch (DiagOpts->getFormat()) {
+ case DiagnosticOptions::Clang:
+ case DiagnosticOptions::Vi: OS << ':'; break;
+ case DiagnosticOptions::MSVC: OS << ") : "; break;
+ }
+
+ if (DiagOpts->ShowSourceRanges && !Ranges.empty()) {
+ FileID CaretFileID =
+ SM.getFileID(SM.getExpansionLoc(Loc));
+ bool PrintedRange = false;
+
+ for (ArrayRef<CharSourceRange>::const_iterator RI = Ranges.begin(),
+ RE = Ranges.end();
+ RI != RE; ++RI) {
+ // Ignore invalid ranges.
+ if (!RI->isValid()) continue;
+
+ SourceLocation B = SM.getExpansionLoc(RI->getBegin());
+ SourceLocation E = SM.getExpansionLoc(RI->getEnd());
+
+ // If the End location and the start location are the same and are a
+ // macro location, then the range was something that came from a
+ // macro expansion or _Pragma. If this is an object-like macro, the
+ // best we can do is to highlight the range. If this is a
+ // function-like macro, we'd also like to highlight the arguments.
+ if (B == E && RI->getEnd().isMacroID())
+ E = SM.getExpansionRange(RI->getEnd()).second;
+
+ std::pair<FileID, unsigned> BInfo = SM.getDecomposedLoc(B);
+ std::pair<FileID, unsigned> EInfo = SM.getDecomposedLoc(E);
+
+ // If the start or end of the range is in another file, just discard
+ // it.
+ if (BInfo.first != CaretFileID || EInfo.first != CaretFileID)
+ continue;
+
+ // Add in the length of the token, so that we cover multi-char
+ // tokens.
+ unsigned TokSize = 0;
+ if (RI->isTokenRange())
+ TokSize = Lexer::MeasureTokenLength(E, SM, LangOpts);
+
+ OS << '{' << SM.getLineNumber(BInfo.first, BInfo.second) << ':'
+ << SM.getColumnNumber(BInfo.first, BInfo.second) << '-'
+ << SM.getLineNumber(EInfo.first, EInfo.second) << ':'
+ << (SM.getColumnNumber(EInfo.first, EInfo.second)+TokSize)
+ << '}';
+ PrintedRange = true;
+ }
+
+ if (PrintedRange)
+ OS << ':';
+ }
+ OS << ' ';
+}
+
+void TextDiagnostic::emitIncludeLocation(SourceLocation Loc,
+ PresumedLoc PLoc,
+ const SourceManager &SM) {
+ if (DiagOpts->ShowLocation && PLoc.getFilename())
+ OS << "In file included from " << PLoc.getFilename() << ':'
+ << PLoc.getLine() << ":\n";
+ else
+ OS << "In included file:\n";
+}
+
+void TextDiagnostic::emitImportLocation(SourceLocation Loc, PresumedLoc PLoc,
+ StringRef ModuleName,
+ const SourceManager &SM) {
+ if (DiagOpts->ShowLocation && PLoc.getFilename())
+ OS << "In module '" << ModuleName << "' imported from "
+ << PLoc.getFilename() << ':' << PLoc.getLine() << ":\n";
+ else
+ OS << "In module '" << ModuleName << "':\n";
+}
+
+void TextDiagnostic::emitBuildingModuleLocation(SourceLocation Loc,
+ PresumedLoc PLoc,
+ StringRef ModuleName,
+ const SourceManager &SM) {
+ if (DiagOpts->ShowLocation && PLoc.getFilename())
+ OS << "While building module '" << ModuleName << "' imported from "
+ << PLoc.getFilename() << ':' << PLoc.getLine() << ":\n";
+ else
+ OS << "While building module '" << ModuleName << "':\n";
+}
+
+/// \brief Highlight a SourceRange (with ~'s) for any characters on LineNo.
+static void highlightRange(const CharSourceRange &R,
+ unsigned LineNo, FileID FID,
+ const SourceColumnMap &map,
+ std::string &CaretLine,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ if (!R.isValid()) return;
+
+ SourceLocation Begin = R.getBegin();
+ SourceLocation End = R.getEnd();
+
+ unsigned StartLineNo = SM.getExpansionLineNumber(Begin);
+ if (StartLineNo > LineNo || SM.getFileID(Begin) != FID)
+ return; // No intersection.
+
+ unsigned EndLineNo = SM.getExpansionLineNumber(End);
+ if (EndLineNo < LineNo || SM.getFileID(End) != FID)
+ return; // No intersection.
+
+ // Compute the column number of the start.
+ unsigned StartColNo = 0;
+ if (StartLineNo == LineNo) {
+ StartColNo = SM.getExpansionColumnNumber(Begin);
+ if (StartColNo) --StartColNo; // Zero base the col #.
+ }
+
+ // Compute the column number of the end.
+ unsigned EndColNo = map.getSourceLine().size();
+ if (EndLineNo == LineNo) {
+ EndColNo = SM.getExpansionColumnNumber(End);
+ if (EndColNo) {
+ --EndColNo; // Zero base the col #.
+
+ // Add in the length of the token, so that we cover multi-char tokens if
+ // this is a token range.
+ if (R.isTokenRange())
+ EndColNo += Lexer::MeasureTokenLength(End, SM, LangOpts);
+ } else {
+ EndColNo = CaretLine.size();
+ }
+ }
+
+ assert(StartColNo <= EndColNo && "Invalid range!");
+
+ // Check that a token range does not highlight only whitespace.
+ if (R.isTokenRange()) {
+ // Pick the first non-whitespace column.
+ while (StartColNo < map.getSourceLine().size() &&
+ (map.getSourceLine()[StartColNo] == ' ' ||
+ map.getSourceLine()[StartColNo] == '\t'))
+ StartColNo = map.startOfNextColumn(StartColNo);
+
+ // Pick the last non-whitespace column.
+ if (EndColNo > map.getSourceLine().size())
+ EndColNo = map.getSourceLine().size();
+ while (EndColNo &&
+ (map.getSourceLine()[EndColNo-1] == ' ' ||
+ map.getSourceLine()[EndColNo-1] == '\t'))
+ EndColNo = map.startOfPreviousColumn(EndColNo);
+
+ // If the start/end passed each other, then we are trying to highlight a
+ // range that just exists in whitespace, which must be some sort of other
+ // bug.
+ assert(StartColNo <= EndColNo && "Trying to highlight whitespace??");
+ }
+
+ assert(StartColNo <= map.getSourceLine().size() && "Invalid range!");
+ assert(EndColNo <= map.getSourceLine().size() && "Invalid range!");
+
+ // Fill the range with ~'s.
+ StartColNo = map.byteToContainingColumn(StartColNo);
+ EndColNo = map.byteToContainingColumn(EndColNo);
+
+ assert(StartColNo <= EndColNo && "Invalid range!");
+ if (CaretLine.size() < EndColNo)
+ CaretLine.resize(EndColNo,' ');
+ std::fill(CaretLine.begin()+StartColNo,CaretLine.begin()+EndColNo,'~');
+}
+
+static std::string buildFixItInsertionLine(unsigned LineNo,
+ const SourceColumnMap &map,
+ ArrayRef<FixItHint> Hints,
+ const SourceManager &SM,
+ const DiagnosticOptions *DiagOpts) {
+ std::string FixItInsertionLine;
+ if (Hints.empty() || !DiagOpts->ShowFixits)
+ return FixItInsertionLine;
+ unsigned PrevHintEndCol = 0;
+
+ for (ArrayRef<FixItHint>::iterator I = Hints.begin(), E = Hints.end();
+ I != E; ++I) {
+ if (!I->CodeToInsert.empty()) {
+ // We have an insertion hint. Determine whether the inserted
+ // code contains no newlines and is on the same line as the caret.
+ std::pair<FileID, unsigned> HintLocInfo
+ = SM.getDecomposedExpansionLoc(I->RemoveRange.getBegin());
+ if (LineNo == SM.getLineNumber(HintLocInfo.first, HintLocInfo.second) &&
+ StringRef(I->CodeToInsert).find_first_of("\n\r") == StringRef::npos) {
+ // Insert the new code into the line just below the code
+ // that the user wrote.
+ // Note: When modifying this function, be very careful about what is a
+ // "column" (printed width, platform-dependent) and what is a
+ // "byte offset" (SourceManager "column").
+ unsigned HintByteOffset
+ = SM.getColumnNumber(HintLocInfo.first, HintLocInfo.second) - 1;
+
+ // The hint must start inside the source or right at the end
+ assert(HintByteOffset < static_cast<unsigned>(map.bytes())+1);
+ unsigned HintCol = map.byteToContainingColumn(HintByteOffset);
+
+ // If we inserted a long previous hint, push this one forwards, and add
+ // an extra space to show that this is not part of the previous
+ // completion. This is sort of the best we can do when two hints appear
+ // to overlap.
+ //
+ // Note that if this hint is located immediately after the previous
+ // hint, no space will be added, since the location is more important.
+ if (HintCol < PrevHintEndCol)
+ HintCol = PrevHintEndCol + 1;
+
+ // This should NOT use HintByteOffset, because the source might have
+ // Unicode characters in earlier columns.
+ unsigned NewFixItLineSize = FixItInsertionLine.size() +
+ (HintCol - PrevHintEndCol) + I->CodeToInsert.size();
+ if (NewFixItLineSize > FixItInsertionLine.size())
+ FixItInsertionLine.resize(NewFixItLineSize, ' ');
+
+ std::copy(I->CodeToInsert.begin(), I->CodeToInsert.end(),
+ FixItInsertionLine.end() - I->CodeToInsert.size());
+
+ PrevHintEndCol =
+ HintCol + llvm::sys::locale::columnWidth(I->CodeToInsert);
+ } else {
+ FixItInsertionLine.clear();
+ break;
+ }
+ }
+ }
+
+ expandTabs(FixItInsertionLine, DiagOpts->TabStop);
+
+ return FixItInsertionLine;
+}
+
+/// \brief Emit a code snippet and caret line.
+///
+/// This routine emits a single line's code snippet and caret line..
+///
+/// \param Loc The location for the caret.
+/// \param Ranges The underlined ranges for this code snippet.
+/// \param Hints The FixIt hints active for this diagnostic.
+void TextDiagnostic::emitSnippetAndCaret(
+ SourceLocation Loc, DiagnosticsEngine::Level Level,
+ SmallVectorImpl<CharSourceRange>& Ranges,
+ ArrayRef<FixItHint> Hints,
+ const SourceManager &SM) {
+ assert(Loc.isValid() && "must have a valid source location here");
+ assert(Loc.isFileID() && "must have a file location here");
+
+ // If caret diagnostics are enabled and we have location, we want to
+ // emit the caret. However, we only do this if the location moved
+ // from the last diagnostic, if the last diagnostic was a note that
+ // was part of a different warning or error diagnostic, or if the
+ // diagnostic has ranges. We don't want to emit the same caret
+ // multiple times if one loc has multiple diagnostics.
+ if (!DiagOpts->ShowCarets)
+ return;
+ if (Loc == LastLoc && Ranges.empty() && Hints.empty() &&
+ (LastLevel != DiagnosticsEngine::Note || Level == LastLevel))
+ return;
+
+ // Decompose the location into a FID/Offset pair.
+ std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
+ FileID FID = LocInfo.first;
+ unsigned FileOffset = LocInfo.second;
+
+ // Get information about the buffer it points into.
+ bool Invalid = false;
+ const char *BufStart = SM.getBufferData(FID, &Invalid).data();
+ if (Invalid)
+ return;
+
+ unsigned LineNo = SM.getLineNumber(FID, FileOffset);
+ unsigned ColNo = SM.getColumnNumber(FID, FileOffset);
+
+ // Arbitrarily stop showing snippets when the line is too long.
+ static const size_t MaxLineLengthToPrint = 4096;
+ if (ColNo > MaxLineLengthToPrint)
+ return;
+
+ // Rewind from the current position to the start of the line.
+ const char *TokPtr = BufStart+FileOffset;
+ const char *LineStart = TokPtr-ColNo+1; // Column # is 1-based.
+
+ // Compute the line end. Scan forward from the error position to the end of
+ // the line.
+ const char *LineEnd = TokPtr;
+ while (*LineEnd != '\n' && *LineEnd != '\r' && *LineEnd != '\0')
+ ++LineEnd;
+
+ // Arbitrarily stop showing snippets when the line is too long.
+ if (size_t(LineEnd - LineStart) > MaxLineLengthToPrint)
+ return;
+
+ // Copy the line of code into an std::string for ease of manipulation.
+ std::string SourceLine(LineStart, LineEnd);
+
+ // Build the byte to column map.
+ const SourceColumnMap sourceColMap(SourceLine, DiagOpts->TabStop);
+
+ // Create a line for the caret that is filled with spaces that is the same
+ // number of columns as the line of source code.
+ std::string CaretLine(sourceColMap.columns(), ' ');
+
+ // Highlight all of the characters covered by Ranges with ~ characters.
+ for (SmallVectorImpl<CharSourceRange>::iterator I = Ranges.begin(),
+ E = Ranges.end();
+ I != E; ++I)
+ highlightRange(*I, LineNo, FID, sourceColMap, CaretLine, SM, LangOpts);
+
+ // Next, insert the caret itself.
+ ColNo = sourceColMap.byteToContainingColumn(ColNo-1);
+ if (CaretLine.size()<ColNo+1)
+ CaretLine.resize(ColNo+1, ' ');
+ CaretLine[ColNo] = '^';
+
+ std::string FixItInsertionLine = buildFixItInsertionLine(LineNo,
+ sourceColMap,
+ Hints, SM,
+ DiagOpts.get());
+
+ // If the source line is too long for our terminal, select only the
+ // "interesting" source region within that line.
+ unsigned Columns = DiagOpts->MessageLength;
+ if (Columns)
+ selectInterestingSourceRegion(SourceLine, CaretLine, FixItInsertionLine,
+ Columns, sourceColMap);
+
+ // If we are in -fdiagnostics-print-source-range-info mode, we are trying
+ // to produce easily machine parsable output. Add a space before the
+ // source line and the caret to make it trivial to tell the main diagnostic
+ // line from what the user is intended to see.
+ if (DiagOpts->ShowSourceRanges) {
+ SourceLine = ' ' + SourceLine;
+ CaretLine = ' ' + CaretLine;
+ }
+
+ // Finally, remove any blank spaces from the end of CaretLine.
+ while (CaretLine[CaretLine.size()-1] == ' ')
+ CaretLine.erase(CaretLine.end()-1);
+
+ // Emit what we have computed.
+ emitSnippet(SourceLine);
+
+ if (DiagOpts->ShowColors)
+ OS.changeColor(caretColor, true);
+ OS << CaretLine << '\n';
+ if (DiagOpts->ShowColors)
+ OS.resetColor();
+
+ if (!FixItInsertionLine.empty()) {
+ if (DiagOpts->ShowColors)
+ // Print fixit line in color
+ OS.changeColor(fixitColor, false);
+ if (DiagOpts->ShowSourceRanges)
+ OS << ' ';
+ OS << FixItInsertionLine << '\n';
+ if (DiagOpts->ShowColors)
+ OS.resetColor();
+ }
+
+ // Print out any parseable fixit information requested by the options.
+ emitParseableFixits(Hints, SM);
+}
+
+void TextDiagnostic::emitSnippet(StringRef line) {
+ if (line.empty())
+ return;
+
+ size_t i = 0;
+
+ std::string to_print;
+ bool print_reversed = false;
+
+ while (i<line.size()) {
+ std::pair<SmallString<16>,bool> res
+ = printableTextForNextCharacter(line, &i, DiagOpts->TabStop);
+ bool was_printable = res.second;
+
+ if (DiagOpts->ShowColors && was_printable == print_reversed) {
+ if (print_reversed)
+ OS.reverseColor();
+ OS << to_print;
+ to_print.clear();
+ if (DiagOpts->ShowColors)
+ OS.resetColor();
+ }
+
+ print_reversed = !was_printable;
+ to_print += res.first.str();
+ }
+
+ if (print_reversed && DiagOpts->ShowColors)
+ OS.reverseColor();
+ OS << to_print;
+ if (print_reversed && DiagOpts->ShowColors)
+ OS.resetColor();
+
+ OS << '\n';
+}
+
+void TextDiagnostic::emitParseableFixits(ArrayRef<FixItHint> Hints,
+ const SourceManager &SM) {
+ if (!DiagOpts->ShowParseableFixits)
+ return;
+
+ // We follow FixItRewriter's example in not (yet) handling
+ // fix-its in macros.
+ for (ArrayRef<FixItHint>::iterator I = Hints.begin(), E = Hints.end();
+ I != E; ++I) {
+ if (I->RemoveRange.isInvalid() ||
+ I->RemoveRange.getBegin().isMacroID() ||
+ I->RemoveRange.getEnd().isMacroID())
+ return;
+ }
+
+ for (ArrayRef<FixItHint>::iterator I = Hints.begin(), E = Hints.end();
+ I != E; ++I) {
+ SourceLocation BLoc = I->RemoveRange.getBegin();
+ SourceLocation ELoc = I->RemoveRange.getEnd();
+
+ std::pair<FileID, unsigned> BInfo = SM.getDecomposedLoc(BLoc);
+ std::pair<FileID, unsigned> EInfo = SM.getDecomposedLoc(ELoc);
+
+ // Adjust for token ranges.
+ if (I->RemoveRange.isTokenRange())
+ EInfo.second += Lexer::MeasureTokenLength(ELoc, SM, LangOpts);
+
+ // We specifically do not do word-wrapping or tab-expansion here,
+ // because this is supposed to be easy to parse.
+ PresumedLoc PLoc = SM.getPresumedLoc(BLoc);
+ if (PLoc.isInvalid())
+ break;
+
+ OS << "fix-it:\"";
+ OS.write_escaped(PLoc.getFilename());
+ OS << "\":{" << SM.getLineNumber(BInfo.first, BInfo.second)
+ << ':' << SM.getColumnNumber(BInfo.first, BInfo.second)
+ << '-' << SM.getLineNumber(EInfo.first, EInfo.second)
+ << ':' << SM.getColumnNumber(EInfo.first, EInfo.second)
+ << "}:\"";
+ OS.write_escaped(I->CodeToInsert);
+ OS << "\"\n";
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticBuffer.cpp b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticBuffer.cpp
new file mode 100644
index 0000000..d49e983
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticBuffer.cpp
@@ -0,0 +1,63 @@
+//===--- TextDiagnosticBuffer.cpp - Buffer Text Diagnostics ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a concrete diagnostic client, which buffers the diagnostic messages.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/TextDiagnosticBuffer.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace clang;
+
+/// HandleDiagnostic - Store the errors, warnings, and notes that are
+/// reported.
+///
+void TextDiagnosticBuffer::HandleDiagnostic(DiagnosticsEngine::Level Level,
+ const Diagnostic &Info) {
+ // Default implementation (Warnings/errors count).
+ DiagnosticConsumer::HandleDiagnostic(Level, Info);
+
+ SmallString<100> Buf;
+ Info.FormatDiagnostic(Buf);
+ switch (Level) {
+ default: llvm_unreachable(
+ "Diagnostic not handled during diagnostic buffering!");
+ case DiagnosticsEngine::Note:
+ Notes.emplace_back(Info.getLocation(), Buf.str());
+ break;
+ case DiagnosticsEngine::Warning:
+ Warnings.emplace_back(Info.getLocation(), Buf.str());
+ break;
+ case DiagnosticsEngine::Remark:
+ Remarks.emplace_back(Info.getLocation(), Buf.str());
+ break;
+ case DiagnosticsEngine::Error:
+ case DiagnosticsEngine::Fatal:
+ Errors.emplace_back(Info.getLocation(), Buf.str());
+ break;
+ }
+}
+
+void TextDiagnosticBuffer::FlushDiagnostics(DiagnosticsEngine &Diags) const {
+ // FIXME: Flush the diagnostics in order.
+ for (const_iterator it = err_begin(), ie = err_end(); it != ie; ++it)
+ Diags.Report(Diags.getCustomDiagID(DiagnosticsEngine::Error, "%0"))
+ << it->second;
+ for (const_iterator it = warn_begin(), ie = warn_end(); it != ie; ++it)
+ Diags.Report(Diags.getCustomDiagID(DiagnosticsEngine::Warning, "%0"))
+ << it->second;
+ for (const_iterator it = remark_begin(), ie = remark_end(); it != ie; ++it)
+ Diags.Report(Diags.getCustomDiagID(DiagnosticsEngine::Remark, "%0"))
+ << it->second;
+ for (const_iterator it = note_begin(), ie = note_end(); it != ie; ++it)
+ Diags.Report(Diags.getCustomDiagID(DiagnosticsEngine::Note, "%0"))
+ << it->second;
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp
new file mode 100644
index 0000000..66b46b7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp
@@ -0,0 +1,161 @@
+//===--- TextDiagnosticPrinter.cpp - Diagnostic Printer -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This diagnostic client prints out their diagnostic messages.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Frontend/TextDiagnostic.h"
+#include "clang/Lex/Lexer.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+using namespace clang;
+
+TextDiagnosticPrinter::TextDiagnosticPrinter(raw_ostream &os,
+ DiagnosticOptions *diags,
+ bool _OwnsOutputStream)
+ : OS(os), DiagOpts(diags),
+ OwnsOutputStream(_OwnsOutputStream) {
+}
+
+TextDiagnosticPrinter::~TextDiagnosticPrinter() {
+ if (OwnsOutputStream)
+ delete &OS;
+}
+
+void TextDiagnosticPrinter::BeginSourceFile(const LangOptions &LO,
+ const Preprocessor *PP) {
+ // Build the TextDiagnostic utility.
+ TextDiag.reset(new TextDiagnostic(OS, LO, &*DiagOpts));
+}
+
+void TextDiagnosticPrinter::EndSourceFile() {
+ TextDiag.reset();
+}
+
+/// \brief Print any diagnostic option information to a raw_ostream.
+///
+/// This implements all of the logic for adding diagnostic options to a message
+/// (via OS). Each relevant option is comma separated and all are enclosed in
+/// the standard bracketing: " [...]".
+static void printDiagnosticOptions(raw_ostream &OS,
+ DiagnosticsEngine::Level Level,
+ const Diagnostic &Info,
+ const DiagnosticOptions &DiagOpts) {
+ bool Started = false;
+ if (DiagOpts.ShowOptionNames) {
+ // Handle special cases for non-warnings early.
+ if (Info.getID() == diag::fatal_too_many_errors) {
+ OS << " [-ferror-limit=]";
+ return;
+ }
+
+ // The code below is somewhat fragile because we are essentially trying to
+ // report to the user what happened by inferring what the diagnostic engine
+ // did. Eventually it might make more sense to have the diagnostic engine
+ // include some "why" information in the diagnostic.
+
+ // If this is a warning which has been mapped to an error by the user (as
+ // inferred by checking whether the default mapping is to an error) then
+ // flag it as such. Note that diagnostics could also have been mapped by a
+ // pragma, but we don't currently have a way to distinguish this.
+ if (Level == DiagnosticsEngine::Error &&
+ DiagnosticIDs::isBuiltinWarningOrExtension(Info.getID()) &&
+ !DiagnosticIDs::isDefaultMappingAsError(Info.getID())) {
+ OS << " [-Werror";
+ Started = true;
+ }
+
+ StringRef Opt = DiagnosticIDs::getWarningOptionForDiag(Info.getID());
+ if (!Opt.empty()) {
+ OS << (Started ? "," : " [")
+ << (Level == DiagnosticsEngine::Remark ? "-R" : "-W") << Opt;
+ StringRef OptValue = Info.getDiags()->getFlagValue();
+ if (!OptValue.empty())
+ OS << "=" << OptValue;
+ Started = true;
+ }
+ }
+
+ // If the user wants to see category information, include it too.
+ if (DiagOpts.ShowCategories) {
+ unsigned DiagCategory =
+ DiagnosticIDs::getCategoryNumberForDiag(Info.getID());
+ if (DiagCategory) {
+ OS << (Started ? "," : " [");
+ Started = true;
+ if (DiagOpts.ShowCategories == 1)
+ OS << DiagCategory;
+ else {
+ assert(DiagOpts.ShowCategories == 2 && "Invalid ShowCategories value");
+ OS << DiagnosticIDs::getCategoryNameFromID(DiagCategory);
+ }
+ }
+ }
+ if (Started)
+ OS << ']';
+}
+
+void TextDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
+ const Diagnostic &Info) {
+ // Default implementation (Warnings/errors count).
+ DiagnosticConsumer::HandleDiagnostic(Level, Info);
+
+ // Render the diagnostic message into a temporary buffer eagerly. We'll use
+ // this later as we print out the diagnostic to the terminal.
+ SmallString<100> OutStr;
+ Info.FormatDiagnostic(OutStr);
+
+ llvm::raw_svector_ostream DiagMessageStream(OutStr);
+ printDiagnosticOptions(DiagMessageStream, Level, Info, *DiagOpts);
+
+ // Keeps track of the starting position of the location
+ // information (e.g., "foo.c:10:4:") that precedes the error
+ // message. We use this information to determine how long the
+ // file+line+column number prefix is.
+ uint64_t StartOfLocationInfo = OS.tell();
+
+ if (!Prefix.empty())
+ OS << Prefix << ": ";
+
+ // Use a dedicated, simpler path for diagnostics without a valid location.
+ // This is important as if the location is missing, we may be emitting
+ // diagnostics in a context that lacks language options, a source manager, or
+ // other infrastructure necessary when emitting more rich diagnostics.
+ if (!Info.getLocation().isValid()) {
+ TextDiagnostic::printDiagnosticLevel(OS, Level, DiagOpts->ShowColors,
+ DiagOpts->CLFallbackMode);
+ TextDiagnostic::printDiagnosticMessage(OS, Level, DiagMessageStream.str(),
+ OS.tell() - StartOfLocationInfo,
+ DiagOpts->MessageLength,
+ DiagOpts->ShowColors);
+ OS.flush();
+ return;
+ }
+
+ // Assert that the rest of our infrastructure is setup properly.
+ assert(DiagOpts && "Unexpected diagnostic without options set");
+ assert(Info.hasSourceManager() &&
+ "Unexpected diagnostic with no source manager");
+ assert(TextDiag && "Unexpected diagnostic outside source file processing");
+
+ TextDiag->emitDiagnostic(Info.getLocation(), Level, DiagMessageStream.str(),
+ Info.getRanges(),
+ Info.getFixItHints(),
+ &Info.getSourceManager());
+
+ OS.flush();
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp b/contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
new file mode 100644
index 0000000..7331d77
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
@@ -0,0 +1,921 @@
+//===---- VerifyDiagnosticConsumer.cpp - Verifying Diagnostic Client ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a concrete diagnostic client, which buffers the diagnostic messages.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/VerifyDiagnosticConsumer.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/TextDiagnosticBuffer.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/Regex.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+typedef VerifyDiagnosticConsumer::Directive Directive;
+typedef VerifyDiagnosticConsumer::DirectiveList DirectiveList;
+typedef VerifyDiagnosticConsumer::ExpectedData ExpectedData;
+
+VerifyDiagnosticConsumer::VerifyDiagnosticConsumer(DiagnosticsEngine &Diags_)
+ : Diags(Diags_),
+ PrimaryClient(Diags.getClient()), PrimaryClientOwner(Diags.takeClient()),
+ Buffer(new TextDiagnosticBuffer()), CurrentPreprocessor(nullptr),
+ LangOpts(nullptr), SrcManager(nullptr), ActiveSourceFiles(0),
+ Status(HasNoDirectives)
+{
+ if (Diags.hasSourceManager())
+ setSourceManager(Diags.getSourceManager());
+}
+
+VerifyDiagnosticConsumer::~VerifyDiagnosticConsumer() {
+ assert(!ActiveSourceFiles && "Incomplete parsing of source files!");
+ assert(!CurrentPreprocessor && "CurrentPreprocessor should be invalid!");
+ SrcManager = nullptr;
+ CheckDiagnostics();
+ Diags.takeClient().release();
+}
+
+#ifndef NDEBUG
+namespace {
+class VerifyFileTracker : public PPCallbacks {
+ VerifyDiagnosticConsumer &Verify;
+ SourceManager &SM;
+
+public:
+ VerifyFileTracker(VerifyDiagnosticConsumer &Verify, SourceManager &SM)
+ : Verify(Verify), SM(SM) { }
+
+ /// \brief Hook into the preprocessor and update the list of parsed
+ /// files when the preprocessor indicates a new file is entered.
+ void FileChanged(SourceLocation Loc, FileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType,
+ FileID PrevFID) override {
+ Verify.UpdateParsedFileStatus(SM, SM.getFileID(Loc),
+ VerifyDiagnosticConsumer::IsParsed);
+ }
+};
+} // End anonymous namespace.
+#endif
+
+// DiagnosticConsumer interface.
+
+void VerifyDiagnosticConsumer::BeginSourceFile(const LangOptions &LangOpts,
+ const Preprocessor *PP) {
+ // Attach comment handler on first invocation.
+ if (++ActiveSourceFiles == 1) {
+ if (PP) {
+ CurrentPreprocessor = PP;
+ this->LangOpts = &LangOpts;
+ setSourceManager(PP->getSourceManager());
+ const_cast<Preprocessor*>(PP)->addCommentHandler(this);
+#ifndef NDEBUG
+ // Debug build tracks parsed files.
+ const_cast<Preprocessor*>(PP)->addPPCallbacks(
+ llvm::make_unique<VerifyFileTracker>(*this, *SrcManager));
+#endif
+ }
+ }
+
+ assert((!PP || CurrentPreprocessor == PP) && "Preprocessor changed!");
+ PrimaryClient->BeginSourceFile(LangOpts, PP);
+}
+
+void VerifyDiagnosticConsumer::EndSourceFile() {
+ assert(ActiveSourceFiles && "No active source files!");
+ PrimaryClient->EndSourceFile();
+
+ // Detach comment handler once last active source file completed.
+ if (--ActiveSourceFiles == 0) {
+ if (CurrentPreprocessor)
+ const_cast<Preprocessor*>(CurrentPreprocessor)->removeCommentHandler(this);
+
+ // Check diagnostics once last file completed.
+ CheckDiagnostics();
+ CurrentPreprocessor = nullptr;
+ LangOpts = nullptr;
+ }
+}
+
+void VerifyDiagnosticConsumer::HandleDiagnostic(
+ DiagnosticsEngine::Level DiagLevel, const Diagnostic &Info) {
+ if (Info.hasSourceManager()) {
+ // If this diagnostic is for a different source manager, ignore it.
+ if (SrcManager && &Info.getSourceManager() != SrcManager)
+ return;
+
+ setSourceManager(Info.getSourceManager());
+ }
+
+#ifndef NDEBUG
+ // Debug build tracks unparsed files for possible
+ // unparsed expected-* directives.
+ if (SrcManager) {
+ SourceLocation Loc = Info.getLocation();
+ if (Loc.isValid()) {
+ ParsedStatus PS = IsUnparsed;
+
+ Loc = SrcManager->getExpansionLoc(Loc);
+ FileID FID = SrcManager->getFileID(Loc);
+
+ const FileEntry *FE = SrcManager->getFileEntryForID(FID);
+ if (FE && CurrentPreprocessor && SrcManager->isLoadedFileID(FID)) {
+ // If the file is a modules header file it shall not be parsed
+ // for expected-* directives.
+ HeaderSearch &HS = CurrentPreprocessor->getHeaderSearchInfo();
+ if (HS.findModuleForHeader(FE))
+ PS = IsUnparsedNoDirectives;
+ }
+
+ UpdateParsedFileStatus(*SrcManager, FID, PS);
+ }
+ }
+#endif
+
+ // Send the diagnostic to the buffer, we will check it once we reach the end
+ // of the source file (or are destructed).
+ Buffer->HandleDiagnostic(DiagLevel, Info);
+}
+
+//===----------------------------------------------------------------------===//
+// Checking diagnostics implementation.
+//===----------------------------------------------------------------------===//
+
+typedef TextDiagnosticBuffer::DiagList DiagList;
+typedef TextDiagnosticBuffer::const_iterator const_diag_iterator;
+
+namespace {
+
+/// StandardDirective - Directive with string matching.
+///
+class StandardDirective : public Directive {
+public:
+ StandardDirective(SourceLocation DirectiveLoc, SourceLocation DiagnosticLoc,
+ bool MatchAnyLine, StringRef Text, unsigned Min,
+ unsigned Max)
+ : Directive(DirectiveLoc, DiagnosticLoc, MatchAnyLine, Text, Min, Max) { }
+
+ bool isValid(std::string &Error) override {
+ // all strings are considered valid; even empty ones
+ return true;
+ }
+
+ bool match(StringRef S) override {
+ return S.find(Text) != StringRef::npos;
+ }
+};
+
+/// RegexDirective - Directive with regular-expression matching.
+///
+class RegexDirective : public Directive {
+public:
+ RegexDirective(SourceLocation DirectiveLoc, SourceLocation DiagnosticLoc,
+ bool MatchAnyLine, StringRef Text, unsigned Min, unsigned Max,
+ StringRef RegexStr)
+ : Directive(DirectiveLoc, DiagnosticLoc, MatchAnyLine, Text, Min, Max),
+ Regex(RegexStr) { }
+
+ bool isValid(std::string &Error) override {
+ return Regex.isValid(Error);
+ }
+
+ bool match(StringRef S) override {
+ return Regex.match(S);
+ }
+
+private:
+ llvm::Regex Regex;
+};
+
+class ParseHelper
+{
+public:
+ ParseHelper(StringRef S)
+ : Begin(S.begin()), End(S.end()), C(Begin), P(Begin), PEnd(nullptr) {}
+
+ // Return true if string literal is next.
+ bool Next(StringRef S) {
+ P = C;
+ PEnd = C + S.size();
+ if (PEnd > End)
+ return false;
+ return !memcmp(P, S.data(), S.size());
+ }
+
+ // Return true if number is next.
+ // Output N only if number is next.
+ bool Next(unsigned &N) {
+ unsigned TMP = 0;
+ P = C;
+ for (; P < End && P[0] >= '0' && P[0] <= '9'; ++P) {
+ TMP *= 10;
+ TMP += P[0] - '0';
+ }
+ if (P == C)
+ return false;
+ PEnd = P;
+ N = TMP;
+ return true;
+ }
+
+ // Return true if string literal is found.
+ // When true, P marks begin-position of S in content.
+ bool Search(StringRef S, bool EnsureStartOfWord = false) {
+ do {
+ P = std::search(C, End, S.begin(), S.end());
+ PEnd = P + S.size();
+ if (P == End)
+ break;
+ if (!EnsureStartOfWord
+ // Check if string literal starts a new word.
+ || P == Begin || isWhitespace(P[-1])
+ // Or it could be preceded by the start of a comment.
+ || (P > (Begin + 1) && (P[-1] == '/' || P[-1] == '*')
+ && P[-2] == '/'))
+ return true;
+ // Otherwise, skip and search again.
+ } while (Advance());
+ return false;
+ }
+
+ // Return true if a CloseBrace that closes the OpenBrace at the current nest
+ // level is found. When true, P marks begin-position of CloseBrace.
+ bool SearchClosingBrace(StringRef OpenBrace, StringRef CloseBrace) {
+ unsigned Depth = 1;
+ P = C;
+ while (P < End) {
+ StringRef S(P, End - P);
+ if (S.startswith(OpenBrace)) {
+ ++Depth;
+ P += OpenBrace.size();
+ } else if (S.startswith(CloseBrace)) {
+ --Depth;
+ if (Depth == 0) {
+ PEnd = P + CloseBrace.size();
+ return true;
+ }
+ P += CloseBrace.size();
+ } else {
+ ++P;
+ }
+ }
+ return false;
+ }
+
+ // Advance 1-past previous next/search.
+ // Behavior is undefined if previous next/search failed.
+ bool Advance() {
+ C = PEnd;
+ return C < End;
+ }
+
+ // Skip zero or more whitespace.
+ void SkipWhitespace() {
+ for (; C < End && isWhitespace(*C); ++C)
+ ;
+ }
+
+ // Return true if EOF reached.
+ bool Done() {
+ return !(C < End);
+ }
+
+ const char * const Begin; // beginning of expected content
+ const char * const End; // end of expected content (1-past)
+ const char *C; // position of next char in content
+ const char *P;
+
+private:
+ const char *PEnd; // previous next/search subject end (1-past)
+};
+
+} // namespace anonymous
+
+/// ParseDirective - Go through the comment and see if it indicates expected
+/// diagnostics. If so, then put them in the appropriate directive list.
+///
+/// Returns true if any valid directives were found.
+static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
+ Preprocessor *PP, SourceLocation Pos,
+ VerifyDiagnosticConsumer::DirectiveStatus &Status) {
+ DiagnosticsEngine &Diags = PP ? PP->getDiagnostics() : SM.getDiagnostics();
+
+ // A single comment may contain multiple directives.
+ bool FoundDirective = false;
+ for (ParseHelper PH(S); !PH.Done();) {
+ // Search for token: expected
+ if (!PH.Search("expected", true))
+ break;
+ PH.Advance();
+
+ // Next token: -
+ if (!PH.Next("-"))
+ continue;
+ PH.Advance();
+
+ // Next token: { error | warning | note }
+ DirectiveList *DL = nullptr;
+ if (PH.Next("error"))
+ DL = ED ? &ED->Errors : nullptr;
+ else if (PH.Next("warning"))
+ DL = ED ? &ED->Warnings : nullptr;
+ else if (PH.Next("remark"))
+ DL = ED ? &ED->Remarks : nullptr;
+ else if (PH.Next("note"))
+ DL = ED ? &ED->Notes : nullptr;
+ else if (PH.Next("no-diagnostics")) {
+ if (Status == VerifyDiagnosticConsumer::HasOtherExpectedDirectives)
+ Diags.Report(Pos, diag::err_verify_invalid_no_diags)
+ << /*IsExpectedNoDiagnostics=*/true;
+ else
+ Status = VerifyDiagnosticConsumer::HasExpectedNoDiagnostics;
+ continue;
+ } else
+ continue;
+ PH.Advance();
+
+ if (Status == VerifyDiagnosticConsumer::HasExpectedNoDiagnostics) {
+ Diags.Report(Pos, diag::err_verify_invalid_no_diags)
+ << /*IsExpectedNoDiagnostics=*/false;
+ continue;
+ }
+ Status = VerifyDiagnosticConsumer::HasOtherExpectedDirectives;
+
+ // If a directive has been found but we're not interested
+ // in storing the directive information, return now.
+ if (!DL)
+ return true;
+
+ // Default directive kind.
+ bool RegexKind = false;
+ const char* KindStr = "string";
+
+ // Next optional token: -
+ if (PH.Next("-re")) {
+ PH.Advance();
+ RegexKind = true;
+ KindStr = "regex";
+ }
+
+ // Next optional token: @
+ SourceLocation ExpectedLoc;
+ bool MatchAnyLine = false;
+ if (!PH.Next("@")) {
+ ExpectedLoc = Pos;
+ } else {
+ PH.Advance();
+ unsigned Line = 0;
+ bool FoundPlus = PH.Next("+");
+ if (FoundPlus || PH.Next("-")) {
+ // Relative to current line.
+ PH.Advance();
+ bool Invalid = false;
+ unsigned ExpectedLine = SM.getSpellingLineNumber(Pos, &Invalid);
+ if (!Invalid && PH.Next(Line) && (FoundPlus || Line < ExpectedLine)) {
+ if (FoundPlus) ExpectedLine += Line;
+ else ExpectedLine -= Line;
+ ExpectedLoc = SM.translateLineCol(SM.getFileID(Pos), ExpectedLine, 1);
+ }
+ } else if (PH.Next(Line)) {
+ // Absolute line number.
+ if (Line > 0)
+ ExpectedLoc = SM.translateLineCol(SM.getFileID(Pos), Line, 1);
+ } else if (PP && PH.Search(":")) {
+ // Specific source file.
+ StringRef Filename(PH.C, PH.P-PH.C);
+ PH.Advance();
+
+ // Lookup file via Preprocessor, like a #include.
+ const DirectoryLookup *CurDir;
+ const FileEntry *FE =
+ PP->LookupFile(Pos, Filename, false, nullptr, nullptr, CurDir,
+ nullptr, nullptr, nullptr);
+ if (!FE) {
+ Diags.Report(Pos.getLocWithOffset(PH.C-PH.Begin),
+ diag::err_verify_missing_file) << Filename << KindStr;
+ continue;
+ }
+
+ if (SM.translateFile(FE).isInvalid())
+ SM.createFileID(FE, Pos, SrcMgr::C_User);
+
+ if (PH.Next(Line) && Line > 0)
+ ExpectedLoc = SM.translateFileLineCol(FE, Line, 1);
+ else if (PH.Next("*")) {
+ MatchAnyLine = true;
+ ExpectedLoc = SM.translateFileLineCol(FE, 1, 1);
+ }
+ }
+
+ if (ExpectedLoc.isInvalid()) {
+ Diags.Report(Pos.getLocWithOffset(PH.C-PH.Begin),
+ diag::err_verify_missing_line) << KindStr;
+ continue;
+ }
+ PH.Advance();
+ }
+
+ // Skip optional whitespace.
+ PH.SkipWhitespace();
+
+ // Next optional token: positive integer or a '+'.
+ unsigned Min = 1;
+ unsigned Max = 1;
+ if (PH.Next(Min)) {
+ PH.Advance();
+ // A positive integer can be followed by a '+' meaning min
+ // or more, or by a '-' meaning a range from min to max.
+ if (PH.Next("+")) {
+ Max = Directive::MaxCount;
+ PH.Advance();
+ } else if (PH.Next("-")) {
+ PH.Advance();
+ if (!PH.Next(Max) || Max < Min) {
+ Diags.Report(Pos.getLocWithOffset(PH.C-PH.Begin),
+ diag::err_verify_invalid_range) << KindStr;
+ continue;
+ }
+ PH.Advance();
+ } else {
+ Max = Min;
+ }
+ } else if (PH.Next("+")) {
+ // '+' on its own means "1 or more".
+ Max = Directive::MaxCount;
+ PH.Advance();
+ }
+
+ // Skip optional whitespace.
+ PH.SkipWhitespace();
+
+ // Next token: {{
+ if (!PH.Next("{{")) {
+ Diags.Report(Pos.getLocWithOffset(PH.C-PH.Begin),
+ diag::err_verify_missing_start) << KindStr;
+ continue;
+ }
+ PH.Advance();
+ const char* const ContentBegin = PH.C; // mark content begin
+
+ // Search for token: }}
+ if (!PH.SearchClosingBrace("{{", "}}")) {
+ Diags.Report(Pos.getLocWithOffset(PH.C-PH.Begin),
+ diag::err_verify_missing_end) << KindStr;
+ continue;
+ }
+ const char* const ContentEnd = PH.P; // mark content end
+ PH.Advance();
+
+ // Build directive text; convert \n to newlines.
+ std::string Text;
+ StringRef NewlineStr = "\\n";
+ StringRef Content(ContentBegin, ContentEnd-ContentBegin);
+ size_t CPos = 0;
+ size_t FPos;
+ while ((FPos = Content.find(NewlineStr, CPos)) != StringRef::npos) {
+ Text += Content.substr(CPos, FPos-CPos);
+ Text += '\n';
+ CPos = FPos + NewlineStr.size();
+ }
+ if (Text.empty())
+ Text.assign(ContentBegin, ContentEnd);
+
+ // Check that regex directives contain at least one regex.
+ if (RegexKind && Text.find("{{") == StringRef::npos) {
+ Diags.Report(Pos.getLocWithOffset(ContentBegin-PH.Begin),
+ diag::err_verify_missing_regex) << Text;
+ return false;
+ }
+
+ // Construct new directive.
+ std::unique_ptr<Directive> D = Directive::create(
+ RegexKind, Pos, ExpectedLoc, MatchAnyLine, Text, Min, Max);
+
+ std::string Error;
+ if (D->isValid(Error)) {
+ DL->push_back(std::move(D));
+ FoundDirective = true;
+ } else {
+ Diags.Report(Pos.getLocWithOffset(ContentBegin-PH.Begin),
+ diag::err_verify_invalid_content)
+ << KindStr << Error;
+ }
+ }
+
+ return FoundDirective;
+}
+
+/// HandleComment - Hook into the preprocessor and extract comments containing
+/// expected errors and warnings.
+bool VerifyDiagnosticConsumer::HandleComment(Preprocessor &PP,
+ SourceRange Comment) {
+ SourceManager &SM = PP.getSourceManager();
+
+ // If this comment is for a different source manager, ignore it.
+ if (SrcManager && &SM != SrcManager)
+ return false;
+
+ SourceLocation CommentBegin = Comment.getBegin();
+
+ const char *CommentRaw = SM.getCharacterData(CommentBegin);
+ StringRef C(CommentRaw, SM.getCharacterData(Comment.getEnd()) - CommentRaw);
+
+ if (C.empty())
+ return false;
+
+ // Fold any "\<EOL>" sequences
+ size_t loc = C.find('\\');
+ if (loc == StringRef::npos) {
+ ParseDirective(C, &ED, SM, &PP, CommentBegin, Status);
+ return false;
+ }
+
+ std::string C2;
+ C2.reserve(C.size());
+
+ for (size_t last = 0;; loc = C.find('\\', last)) {
+ if (loc == StringRef::npos || loc == C.size()) {
+ C2 += C.substr(last);
+ break;
+ }
+ C2 += C.substr(last, loc-last);
+ last = loc + 1;
+
+ if (C[last] == '\n' || C[last] == '\r') {
+ ++last;
+
+ // Escape \r\n or \n\r, but not \n\n.
+ if (last < C.size())
+ if (C[last] == '\n' || C[last] == '\r')
+ if (C[last] != C[last-1])
+ ++last;
+ } else {
+ // This was just a normal backslash.
+ C2 += '\\';
+ }
+ }
+
+ if (!C2.empty())
+ ParseDirective(C2, &ED, SM, &PP, CommentBegin, Status);
+ return false;
+}
+
+#ifndef NDEBUG
+/// \brief Lex the specified source file to determine whether it contains
+/// any expected-* directives. As a Lexer is used rather than a full-blown
+/// Preprocessor, directives inside skipped #if blocks will still be found.
+///
+/// \return true if any directives were found.
+static bool findDirectives(SourceManager &SM, FileID FID,
+ const LangOptions &LangOpts) {
+ // Create a raw lexer to pull all the comments out of FID.
+ if (FID.isInvalid())
+ return false;
+
+ // Create a lexer to lex all the tokens of the main file in raw mode.
+ const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
+ Lexer RawLex(FID, FromFile, SM, LangOpts);
+
+ // Return comments as tokens, this is how we find expected diagnostics.
+ RawLex.SetCommentRetentionState(true);
+
+ Token Tok;
+ Tok.setKind(tok::comment);
+ VerifyDiagnosticConsumer::DirectiveStatus Status =
+ VerifyDiagnosticConsumer::HasNoDirectives;
+ while (Tok.isNot(tok::eof)) {
+ RawLex.LexFromRawLexer(Tok);
+ if (!Tok.is(tok::comment)) continue;
+
+ std::string Comment = RawLex.getSpelling(Tok, SM, LangOpts);
+ if (Comment.empty()) continue;
+
+ // Find first directive.
+ if (ParseDirective(Comment, nullptr, SM, nullptr, Tok.getLocation(),
+ Status))
+ return true;
+ }
+ return false;
+}
+#endif // !NDEBUG
+
+/// \brief Takes a list of diagnostics that have been generated but not matched
+/// by an expected-* directive and produces a diagnostic to the user from this.
+static unsigned PrintUnexpected(DiagnosticsEngine &Diags, SourceManager *SourceMgr,
+ const_diag_iterator diag_begin,
+ const_diag_iterator diag_end,
+ const char *Kind) {
+ if (diag_begin == diag_end) return 0;
+
+ SmallString<256> Fmt;
+ llvm::raw_svector_ostream OS(Fmt);
+ for (const_diag_iterator I = diag_begin, E = diag_end; I != E; ++I) {
+ if (I->first.isInvalid() || !SourceMgr)
+ OS << "\n (frontend)";
+ else {
+ OS << "\n ";
+ if (const FileEntry *File = SourceMgr->getFileEntryForID(
+ SourceMgr->getFileID(I->first)))
+ OS << " File " << File->getName();
+ OS << " Line " << SourceMgr->getPresumedLineNumber(I->first);
+ }
+ OS << ": " << I->second;
+ }
+
+ Diags.Report(diag::err_verify_inconsistent_diags).setForceEmit()
+ << Kind << /*Unexpected=*/true << OS.str();
+ return std::distance(diag_begin, diag_end);
+}
+
+/// \brief Takes a list of diagnostics that were expected to have been generated
+/// but were not and produces a diagnostic to the user from this.
+static unsigned PrintExpected(DiagnosticsEngine &Diags,
+ SourceManager &SourceMgr,
+ std::vector<Directive *> &DL, const char *Kind) {
+ if (DL.empty())
+ return 0;
+
+ SmallString<256> Fmt;
+ llvm::raw_svector_ostream OS(Fmt);
+ for (auto *DirPtr : DL) {
+ Directive &D = *DirPtr;
+ OS << "\n File " << SourceMgr.getFilename(D.DiagnosticLoc);
+ if (D.MatchAnyLine)
+ OS << " Line *";
+ else
+ OS << " Line " << SourceMgr.getPresumedLineNumber(D.DiagnosticLoc);
+ if (D.DirectiveLoc != D.DiagnosticLoc)
+ OS << " (directive at "
+ << SourceMgr.getFilename(D.DirectiveLoc) << ':'
+ << SourceMgr.getPresumedLineNumber(D.DirectiveLoc) << ')';
+ OS << ": " << D.Text;
+ }
+
+ Diags.Report(diag::err_verify_inconsistent_diags).setForceEmit()
+ << Kind << /*Unexpected=*/false << OS.str();
+ return DL.size();
+}
+
+/// \brief Determine whether two source locations come from the same file.
+static bool IsFromSameFile(SourceManager &SM, SourceLocation DirectiveLoc,
+ SourceLocation DiagnosticLoc) {
+ while (DiagnosticLoc.isMacroID())
+ DiagnosticLoc = SM.getImmediateMacroCallerLoc(DiagnosticLoc);
+
+ if (SM.isWrittenInSameFile(DirectiveLoc, DiagnosticLoc))
+ return true;
+
+ const FileEntry *DiagFile = SM.getFileEntryForID(SM.getFileID(DiagnosticLoc));
+ if (!DiagFile && SM.isWrittenInMainFile(DirectiveLoc))
+ return true;
+
+ return (DiagFile == SM.getFileEntryForID(SM.getFileID(DirectiveLoc)));
+}
+
+/// CheckLists - Compare expected to seen diagnostic lists and return the
+/// the difference between them.
+///
+static unsigned CheckLists(DiagnosticsEngine &Diags, SourceManager &SourceMgr,
+ const char *Label,
+ DirectiveList &Left,
+ const_diag_iterator d2_begin,
+ const_diag_iterator d2_end,
+ bool IgnoreUnexpected) {
+ std::vector<Directive *> LeftOnly;
+ DiagList Right(d2_begin, d2_end);
+
+ for (auto &Owner : Left) {
+ Directive &D = *Owner;
+ unsigned LineNo1 = SourceMgr.getPresumedLineNumber(D.DiagnosticLoc);
+
+ for (unsigned i = 0; i < D.Max; ++i) {
+ DiagList::iterator II, IE;
+ for (II = Right.begin(), IE = Right.end(); II != IE; ++II) {
+ if (!D.MatchAnyLine) {
+ unsigned LineNo2 = SourceMgr.getPresumedLineNumber(II->first);
+ if (LineNo1 != LineNo2)
+ continue;
+ }
+
+ if (!IsFromSameFile(SourceMgr, D.DiagnosticLoc, II->first))
+ continue;
+
+ const std::string &RightText = II->second;
+ if (D.match(RightText))
+ break;
+ }
+ if (II == IE) {
+ // Not found.
+ if (i >= D.Min) break;
+ LeftOnly.push_back(&D);
+ } else {
+ // Found. The same cannot be found twice.
+ Right.erase(II);
+ }
+ }
+ }
+ // Now all that's left in Right are those that were not matched.
+ unsigned num = PrintExpected(Diags, SourceMgr, LeftOnly, Label);
+ if (!IgnoreUnexpected)
+ num += PrintUnexpected(Diags, &SourceMgr, Right.begin(), Right.end(), Label);
+ return num;
+}
+
+/// CheckResults - This compares the expected results to those that
+/// were actually reported. It emits any discrepencies. Return "true" if there
+/// were problems. Return "false" otherwise.
+///
+static unsigned CheckResults(DiagnosticsEngine &Diags, SourceManager &SourceMgr,
+ const TextDiagnosticBuffer &Buffer,
+ ExpectedData &ED) {
+ // We want to capture the delta between what was expected and what was
+ // seen.
+ //
+ // Expected \ Seen - set expected but not seen
+ // Seen \ Expected - set seen but not expected
+ unsigned NumProblems = 0;
+
+ const DiagnosticLevelMask DiagMask =
+ Diags.getDiagnosticOptions().getVerifyIgnoreUnexpected();
+
+ // See if there are error mismatches.
+ NumProblems += CheckLists(Diags, SourceMgr, "error", ED.Errors,
+ Buffer.err_begin(), Buffer.err_end(),
+ bool(DiagnosticLevelMask::Error & DiagMask));
+
+ // See if there are warning mismatches.
+ NumProblems += CheckLists(Diags, SourceMgr, "warning", ED.Warnings,
+ Buffer.warn_begin(), Buffer.warn_end(),
+ bool(DiagnosticLevelMask::Warning & DiagMask));
+
+ // See if there are remark mismatches.
+ NumProblems += CheckLists(Diags, SourceMgr, "remark", ED.Remarks,
+ Buffer.remark_begin(), Buffer.remark_end(),
+ bool(DiagnosticLevelMask::Remark & DiagMask));
+
+ // See if there are note mismatches.
+ NumProblems += CheckLists(Diags, SourceMgr, "note", ED.Notes,
+ Buffer.note_begin(), Buffer.note_end(),
+ bool(DiagnosticLevelMask::Note & DiagMask));
+
+ return NumProblems;
+}
+
+void VerifyDiagnosticConsumer::UpdateParsedFileStatus(SourceManager &SM,
+ FileID FID,
+ ParsedStatus PS) {
+ // Check SourceManager hasn't changed.
+ setSourceManager(SM);
+
+#ifndef NDEBUG
+ if (FID.isInvalid())
+ return;
+
+ const FileEntry *FE = SM.getFileEntryForID(FID);
+
+ if (PS == IsParsed) {
+ // Move the FileID from the unparsed set to the parsed set.
+ UnparsedFiles.erase(FID);
+ ParsedFiles.insert(std::make_pair(FID, FE));
+ } else if (!ParsedFiles.count(FID) && !UnparsedFiles.count(FID)) {
+ // Add the FileID to the unparsed set if we haven't seen it before.
+
+ // Check for directives.
+ bool FoundDirectives;
+ if (PS == IsUnparsedNoDirectives)
+ FoundDirectives = false;
+ else
+ FoundDirectives = !LangOpts || findDirectives(SM, FID, *LangOpts);
+
+ // Add the FileID to the unparsed set.
+ UnparsedFiles.insert(std::make_pair(FID,
+ UnparsedFileStatus(FE, FoundDirectives)));
+ }
+#endif
+}
+
+void VerifyDiagnosticConsumer::CheckDiagnostics() {
+ // Ensure any diagnostics go to the primary client.
+ DiagnosticConsumer *CurClient = Diags.getClient();
+ std::unique_ptr<DiagnosticConsumer> Owner = Diags.takeClient();
+ Diags.setClient(PrimaryClient, false);
+
+#ifndef NDEBUG
+ // In a debug build, scan through any files that may have been missed
+ // during parsing and issue a fatal error if directives are contained
+ // within these files. If a fatal error occurs, this suggests that
+ // this file is being parsed separately from the main file, in which
+ // case consider moving the directives to the correct place, if this
+ // is applicable.
+ if (UnparsedFiles.size() > 0) {
+ // Generate a cache of parsed FileEntry pointers for alias lookups.
+ llvm::SmallPtrSet<const FileEntry *, 8> ParsedFileCache;
+ for (ParsedFilesMap::iterator I = ParsedFiles.begin(),
+ End = ParsedFiles.end(); I != End; ++I) {
+ if (const FileEntry *FE = I->second)
+ ParsedFileCache.insert(FE);
+ }
+
+ // Iterate through list of unparsed files.
+ for (UnparsedFilesMap::iterator I = UnparsedFiles.begin(),
+ End = UnparsedFiles.end(); I != End; ++I) {
+ const UnparsedFileStatus &Status = I->second;
+ const FileEntry *FE = Status.getFile();
+
+ // Skip files that have been parsed via an alias.
+ if (FE && ParsedFileCache.count(FE))
+ continue;
+
+ // Report a fatal error if this file contained directives.
+ if (Status.foundDirectives()) {
+ llvm::report_fatal_error(Twine("-verify directives found after rather"
+ " than during normal parsing of ",
+ StringRef(FE ? FE->getName() : "(unknown)")));
+ }
+ }
+
+ // UnparsedFiles has been processed now, so clear it.
+ UnparsedFiles.clear();
+ }
+#endif // !NDEBUG
+
+ if (SrcManager) {
+ // Produce an error if no expected-* directives could be found in the
+ // source file(s) processed.
+ if (Status == HasNoDirectives) {
+ Diags.Report(diag::err_verify_no_directives).setForceEmit();
+ ++NumErrors;
+ Status = HasNoDirectivesReported;
+ }
+
+ // Check that the expected diagnostics occurred.
+ NumErrors += CheckResults(Diags, *SrcManager, *Buffer, ED);
+ } else {
+ const DiagnosticLevelMask DiagMask =
+ ~Diags.getDiagnosticOptions().getVerifyIgnoreUnexpected();
+ if (bool(DiagnosticLevelMask::Error & DiagMask))
+ NumErrors += PrintUnexpected(Diags, nullptr, Buffer->err_begin(),
+ Buffer->err_end(), "error");
+ if (bool(DiagnosticLevelMask::Warning & DiagMask))
+ NumErrors += PrintUnexpected(Diags, nullptr, Buffer->warn_begin(),
+ Buffer->warn_end(), "warn");
+ if (bool(DiagnosticLevelMask::Remark & DiagMask))
+ NumErrors += PrintUnexpected(Diags, nullptr, Buffer->remark_begin(),
+ Buffer->remark_end(), "remark");
+ if (bool(DiagnosticLevelMask::Note & DiagMask))
+ NumErrors += PrintUnexpected(Diags, nullptr, Buffer->note_begin(),
+ Buffer->note_end(), "note");
+ }
+
+ Diags.setClient(CurClient, Owner.release() != nullptr);
+
+ // Reset the buffer, we have processed all the diagnostics in it.
+ Buffer.reset(new TextDiagnosticBuffer());
+ ED.Reset();
+}
+
+std::unique_ptr<Directive> Directive::create(bool RegexKind,
+ SourceLocation DirectiveLoc,
+ SourceLocation DiagnosticLoc,
+ bool MatchAnyLine, StringRef Text,
+ unsigned Min, unsigned Max) {
+ if (!RegexKind)
+ return llvm::make_unique<StandardDirective>(DirectiveLoc, DiagnosticLoc,
+ MatchAnyLine, Text, Min, Max);
+
+ // Parse the directive into a regular expression.
+ std::string RegexStr;
+ StringRef S = Text;
+ while (!S.empty()) {
+ if (S.startswith("{{")) {
+ S = S.drop_front(2);
+ size_t RegexMatchLength = S.find("}}");
+ assert(RegexMatchLength != StringRef::npos);
+ // Append the regex, enclosed in parentheses.
+ RegexStr += "(";
+ RegexStr.append(S.data(), RegexMatchLength);
+ RegexStr += ")";
+ S = S.drop_front(RegexMatchLength + 2);
+ } else {
+ size_t VerbatimMatchLength = S.find("{{");
+ if (VerbatimMatchLength == StringRef::npos)
+ VerbatimMatchLength = S.size();
+ // Escape and append the fixed string.
+ RegexStr += llvm::Regex::escape(S.substr(0, VerbatimMatchLength));
+ S = S.drop_front(VerbatimMatchLength);
+ }
+ }
+
+ return llvm::make_unique<RegexDirective>(
+ DirectiveLoc, DiagnosticLoc, MatchAnyLine, Text, Min, Max, RegexStr);
+}
diff --git a/contrib/llvm/tools/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/contrib/llvm/tools/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
new file mode 100644
index 0000000..79cf004
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
@@ -0,0 +1,226 @@
+//===--- ExecuteCompilerInvocation.cpp ------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file holds ExecuteCompilerInvocation(). It is split into its own file to
+// minimize the impact of pulling in essentially everything else in Clang.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/FrontendTool/Utils.h"
+#include "clang/ARCMigrate/ARCMTActions.h"
+#include "clang/CodeGen/CodeGenAction.h"
+#include "clang/Driver/Options.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/CompilerInvocation.h"
+#include "clang/Frontend/FrontendActions.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/FrontendPluginRegistry.h"
+#include "clang/Frontend/Utils.h"
+#include "clang/Rewrite/Frontend/FrontendActions.h"
+#include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
+#include "llvm/Option/OptTable.h"
+#include "llvm/Option/Option.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace clang;
+using namespace llvm::opt;
+
+static FrontendAction *CreateFrontendBaseAction(CompilerInstance &CI) {
+ using namespace clang::frontend;
+ StringRef Action("unknown");
+ (void)Action;
+
+ switch (CI.getFrontendOpts().ProgramAction) {
+ case ASTDeclList: return new ASTDeclListAction();
+ case ASTDump: return new ASTDumpAction();
+ case ASTPrint: return new ASTPrintAction();
+ case ASTView: return new ASTViewAction();
+ case DumpRawTokens: return new DumpRawTokensAction();
+ case DumpTokens: return new DumpTokensAction();
+ case EmitAssembly: return new EmitAssemblyAction();
+ case EmitBC: return new EmitBCAction();
+ case EmitHTML: return new HTMLPrintAction();
+ case EmitLLVM: return new EmitLLVMAction();
+ case EmitLLVMOnly: return new EmitLLVMOnlyAction();
+ case EmitCodeGenOnly: return new EmitCodeGenOnlyAction();
+ case EmitObj: return new EmitObjAction();
+ case FixIt: return new FixItAction();
+ case GenerateModule: return new GenerateModuleAction;
+ case GeneratePCH: return new GeneratePCHAction;
+ case GeneratePTH: return new GeneratePTHAction();
+ case InitOnly: return new InitOnlyAction();
+ case ParseSyntaxOnly: return new SyntaxOnlyAction();
+ case ModuleFileInfo: return new DumpModuleInfoAction();
+ case VerifyPCH: return new VerifyPCHAction();
+
+ case PluginAction: {
+ for (FrontendPluginRegistry::iterator it =
+ FrontendPluginRegistry::begin(), ie = FrontendPluginRegistry::end();
+ it != ie; ++it) {
+ if (it->getName() == CI.getFrontendOpts().ActionName) {
+ std::unique_ptr<PluginASTAction> P(it->instantiate());
+ if (!P->ParseArgs(CI, CI.getFrontendOpts().PluginArgs))
+ return nullptr;
+ return P.release();
+ }
+ }
+
+ CI.getDiagnostics().Report(diag::err_fe_invalid_plugin_name)
+ << CI.getFrontendOpts().ActionName;
+ return nullptr;
+ }
+
+ case PrintDeclContext: return new DeclContextPrintAction();
+ case PrintPreamble: return new PrintPreambleAction();
+ case PrintPreprocessedInput: {
+ if (CI.getPreprocessorOutputOpts().RewriteIncludes)
+ return new RewriteIncludesAction();
+ return new PrintPreprocessedAction();
+ }
+
+ case RewriteMacros: return new RewriteMacrosAction();
+ case RewriteTest: return new RewriteTestAction();
+#ifdef CLANG_ENABLE_OBJC_REWRITER
+ case RewriteObjC: return new RewriteObjCAction();
+#else
+ case RewriteObjC: Action = "RewriteObjC"; break;
+#endif
+#ifdef CLANG_ENABLE_ARCMT
+ case MigrateSource: return new arcmt::MigrateSourceAction();
+#else
+ case MigrateSource: Action = "MigrateSource"; break;
+#endif
+#ifdef CLANG_ENABLE_STATIC_ANALYZER
+ case RunAnalysis: return new ento::AnalysisAction();
+#else
+ case RunAnalysis: Action = "RunAnalysis"; break;
+#endif
+ case RunPreprocessorOnly: return new PreprocessOnlyAction();
+ }
+
+#if !defined(CLANG_ENABLE_ARCMT) || !defined(CLANG_ENABLE_STATIC_ANALYZER) \
+ || !defined(CLANG_ENABLE_OBJC_REWRITER)
+ CI.getDiagnostics().Report(diag::err_fe_action_not_available) << Action;
+ return 0;
+#else
+ llvm_unreachable("Invalid program action!");
+#endif
+}
+
+static FrontendAction *CreateFrontendAction(CompilerInstance &CI) {
+ // Create the underlying action.
+ FrontendAction *Act = CreateFrontendBaseAction(CI);
+ if (!Act)
+ return nullptr;
+
+ const FrontendOptions &FEOpts = CI.getFrontendOpts();
+
+ if (FEOpts.FixAndRecompile) {
+ Act = new FixItRecompile(Act);
+ }
+
+#ifdef CLANG_ENABLE_ARCMT
+ if (CI.getFrontendOpts().ProgramAction != frontend::MigrateSource &&
+ CI.getFrontendOpts().ProgramAction != frontend::GeneratePCH) {
+ // Potentially wrap the base FE action in an ARC Migrate Tool action.
+ switch (FEOpts.ARCMTAction) {
+ case FrontendOptions::ARCMT_None:
+ break;
+ case FrontendOptions::ARCMT_Check:
+ Act = new arcmt::CheckAction(Act);
+ break;
+ case FrontendOptions::ARCMT_Modify:
+ Act = new arcmt::ModifyAction(Act);
+ break;
+ case FrontendOptions::ARCMT_Migrate:
+ Act = new arcmt::MigrateAction(Act,
+ FEOpts.MTMigrateDir,
+ FEOpts.ARCMTMigrateReportOut,
+ FEOpts.ARCMTMigrateEmitARCErrors);
+ break;
+ }
+
+ if (FEOpts.ObjCMTAction != FrontendOptions::ObjCMT_None) {
+ Act = new arcmt::ObjCMigrateAction(Act, FEOpts.MTMigrateDir,
+ FEOpts.ObjCMTAction);
+ }
+ }
+#endif
+
+ // If there are any AST files to merge, create a frontend action
+ // adaptor to perform the merge.
+ if (!FEOpts.ASTMergeFiles.empty())
+ Act = new ASTMergeAction(Act, FEOpts.ASTMergeFiles);
+
+ return Act;
+}
+
+bool clang::ExecuteCompilerInvocation(CompilerInstance *Clang) {
+ // Honor -help.
+ if (Clang->getFrontendOpts().ShowHelp) {
+ std::unique_ptr<OptTable> Opts(driver::createDriverOptTable());
+ Opts->PrintHelp(llvm::outs(), "clang -cc1",
+ "LLVM 'Clang' Compiler: http://clang.llvm.org",
+ /*Include=*/ driver::options::CC1Option, /*Exclude=*/ 0);
+ return true;
+ }
+
+ // Honor -version.
+ //
+ // FIXME: Use a better -version message?
+ if (Clang->getFrontendOpts().ShowVersion) {
+ llvm::cl::PrintVersionMessage();
+ return true;
+ }
+
+ // Load any requested plugins.
+ for (unsigned i = 0,
+ e = Clang->getFrontendOpts().Plugins.size(); i != e; ++i) {
+ const std::string &Path = Clang->getFrontendOpts().Plugins[i];
+ std::string Error;
+ if (llvm::sys::DynamicLibrary::LoadLibraryPermanently(Path.c_str(), &Error))
+ Clang->getDiagnostics().Report(diag::err_fe_unable_to_load_plugin)
+ << Path << Error;
+ }
+
+ // Honor -mllvm.
+ //
+ // FIXME: Remove this, one day.
+ // This should happen AFTER plugins have been loaded!
+ if (!Clang->getFrontendOpts().LLVMArgs.empty()) {
+ unsigned NumArgs = Clang->getFrontendOpts().LLVMArgs.size();
+ auto Args = llvm::make_unique<const char*[]>(NumArgs + 2);
+ Args[0] = "clang (LLVM option parsing)";
+ for (unsigned i = 0; i != NumArgs; ++i)
+ Args[i + 1] = Clang->getFrontendOpts().LLVMArgs[i].c_str();
+ Args[NumArgs + 1] = nullptr;
+ llvm::cl::ParseCommandLineOptions(NumArgs + 1, Args.get());
+ }
+
+#ifdef CLANG_ENABLE_STATIC_ANALYZER
+ // Honor -analyzer-checker-help.
+ // This should happen AFTER plugins have been loaded!
+ if (Clang->getAnalyzerOpts()->ShowCheckerHelp) {
+ ento::printCheckerHelp(llvm::outs(), Clang->getFrontendOpts().Plugins);
+ return true;
+ }
+#endif
+
+ // If there were errors in processing arguments, don't do anything else.
+ if (Clang->getDiagnostics().hasErrorOccurred())
+ return false;
+ // Create and execute the frontend action.
+ std::unique_ptr<FrontendAction> Act(CreateFrontendAction(*Clang));
+ if (!Act)
+ return false;
+ bool Success = Clang->ExecuteAction(*Act);
+ if (Clang->getFrontendOpts().DisableFree)
+ BuryPointer(std::move(Act));
+ return Success;
+}
diff --git a/contrib/llvm/tools/clang/lib/Headers/Intrin.h b/contrib/llvm/tools/clang/lib/Headers/Intrin.h
new file mode 100644
index 0000000..6c1d0d1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/Intrin.h
@@ -0,0 +1,958 @@
+/* ===-------- Intrin.h ---------------------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Only include this if we're compiling for the windows platform. */
+#ifndef _MSC_VER
+#include_next <Intrin.h>
+#else
+
+#ifndef __INTRIN_H
+#define __INTRIN_H
+
+/* First include the standard intrinsics. */
+#if defined(__i386__) || defined(__x86_64__)
+#include <x86intrin.h>
+#endif
+
+/* For the definition of jmp_buf. */
+#if __STDC_HOSTED__
+#include <setjmp.h>
+#endif
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if defined(__MMX__)
+/* And the random ones that aren't in those files. */
+__m64 _m_from_float(float);
+float _m_to_float(__m64);
+#endif
+
+/* Other assorted instruction intrinsics. */
+void __addfsbyte(unsigned long, unsigned char);
+void __addfsdword(unsigned long, unsigned long);
+void __addfsword(unsigned long, unsigned short);
+void __code_seg(const char *);
+static __inline__
+void __cpuid(int[4], int);
+static __inline__
+void __cpuidex(int[4], int, int);
+void __debugbreak(void);
+__int64 __emul(int, int);
+unsigned __int64 __emulu(unsigned int, unsigned int);
+void __cdecl __fastfail(unsigned int);
+unsigned int __getcallerseflags(void);
+static __inline__
+void __halt(void);
+unsigned char __inbyte(unsigned short);
+void __inbytestring(unsigned short, unsigned char *, unsigned long);
+void __incfsbyte(unsigned long);
+void __incfsdword(unsigned long);
+void __incfsword(unsigned long);
+unsigned long __indword(unsigned short);
+void __indwordstring(unsigned short, unsigned long *, unsigned long);
+void __int2c(void);
+void __invlpg(void *);
+unsigned short __inword(unsigned short);
+void __inwordstring(unsigned short, unsigned short *, unsigned long);
+void __lidt(void *);
+unsigned __int64 __ll_lshift(unsigned __int64, int);
+__int64 __ll_rshift(__int64, int);
+void __llwpcb(void *);
+unsigned char __lwpins32(unsigned int, unsigned int, unsigned int);
+void __lwpval32(unsigned int, unsigned int, unsigned int);
+unsigned int __lzcnt(unsigned int);
+unsigned short __lzcnt16(unsigned short);
+static __inline__
+void __movsb(unsigned char *, unsigned char const *, size_t);
+static __inline__
+void __movsd(unsigned long *, unsigned long const *, size_t);
+static __inline__
+void __movsw(unsigned short *, unsigned short const *, size_t);
+void __nop(void);
+void __nvreg_restore_fence(void);
+void __nvreg_save_fence(void);
+void __outbyte(unsigned short, unsigned char);
+void __outbytestring(unsigned short, unsigned char *, unsigned long);
+void __outdword(unsigned short, unsigned long);
+void __outdwordstring(unsigned short, unsigned long *, unsigned long);
+void __outword(unsigned short, unsigned short);
+void __outwordstring(unsigned short, unsigned short *, unsigned long);
+static __inline__
+unsigned int __popcnt(unsigned int);
+static __inline__
+unsigned short __popcnt16(unsigned short);
+unsigned long __readcr0(void);
+unsigned long __readcr2(void);
+static __inline__
+unsigned long __readcr3(void);
+unsigned long __readcr4(void);
+unsigned long __readcr8(void);
+unsigned int __readdr(unsigned int);
+#ifdef __i386__
+static __inline__
+unsigned char __readfsbyte(unsigned long);
+static __inline__
+unsigned long __readfsdword(unsigned long);
+static __inline__
+unsigned __int64 __readfsqword(unsigned long);
+static __inline__
+unsigned short __readfsword(unsigned long);
+#endif
+static __inline__
+unsigned __int64 __readmsr(unsigned long);
+unsigned __int64 __readpmc(unsigned long);
+unsigned long __segmentlimit(unsigned long);
+void __sidt(void *);
+void *__slwpcb(void);
+static __inline__
+void __stosb(unsigned char *, unsigned char, size_t);
+static __inline__
+void __stosd(unsigned long *, unsigned long, size_t);
+static __inline__
+void __stosw(unsigned short *, unsigned short, size_t);
+void __svm_clgi(void);
+void __svm_invlpga(void *, int);
+void __svm_skinit(int);
+void __svm_stgi(void);
+void __svm_vmload(size_t);
+void __svm_vmrun(size_t);
+void __svm_vmsave(size_t);
+void __ud2(void);
+unsigned __int64 __ull_rshift(unsigned __int64, int);
+void __vmx_off(void);
+void __vmx_vmptrst(unsigned __int64 *);
+void __wbinvd(void);
+void __writecr0(unsigned int);
+static __inline__
+void __writecr3(unsigned int);
+void __writecr4(unsigned int);
+void __writecr8(unsigned int);
+void __writedr(unsigned int, unsigned int);
+void __writefsbyte(unsigned long, unsigned char);
+void __writefsdword(unsigned long, unsigned long);
+void __writefsqword(unsigned long, unsigned __int64);
+void __writefsword(unsigned long, unsigned short);
+void __writemsr(unsigned long, unsigned __int64);
+static __inline__
+void *_AddressOfReturnAddress(void);
+static __inline__
+unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask);
+static __inline__
+unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask);
+static __inline__
+unsigned char _bittest(long const *, long);
+static __inline__
+unsigned char _bittestandcomplement(long *, long);
+static __inline__
+unsigned char _bittestandreset(long *, long);
+static __inline__
+unsigned char _bittestandset(long *, long);
+unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64);
+unsigned long __cdecl _byteswap_ulong(unsigned long);
+unsigned short __cdecl _byteswap_ushort(unsigned short);
+void __cdecl _disable(void);
+void __cdecl _enable(void);
+long _InterlockedAddLargeStatistic(__int64 volatile *_Addend, long _Value);
+static __inline__
+long _InterlockedAnd(long volatile *_Value, long _Mask);
+static __inline__
+short _InterlockedAnd16(short volatile *_Value, short _Mask);
+static __inline__
+char _InterlockedAnd8(char volatile *_Value, char _Mask);
+unsigned char _interlockedbittestandreset(long volatile *, long);
+static __inline__
+unsigned char _interlockedbittestandset(long volatile *, long);
+static __inline__
+long __cdecl _InterlockedCompareExchange(long volatile *_Destination,
+ long _Exchange, long _Comparand);
+long _InterlockedCompareExchange_HLEAcquire(long volatile *, long, long);
+long _InterlockedCompareExchange_HLERelease(long volatile *, long, long);
+static __inline__
+short _InterlockedCompareExchange16(short volatile *_Destination,
+ short _Exchange, short _Comparand);
+static __inline__
+__int64 _InterlockedCompareExchange64(__int64 volatile *_Destination,
+ __int64 _Exchange, __int64 _Comparand);
+__int64 _InterlockedcompareExchange64_HLEAcquire(__int64 volatile *, __int64,
+ __int64);
+__int64 _InterlockedCompareExchange64_HLERelease(__int64 volatile *, __int64,
+ __int64);
+static __inline__
+char _InterlockedCompareExchange8(char volatile *_Destination, char _Exchange,
+ char _Comparand);
+void *_InterlockedCompareExchangePointer_HLEAcquire(void *volatile *, void *,
+ void *);
+void *_InterlockedCompareExchangePointer_HLERelease(void *volatile *, void *,
+ void *);
+static __inline__
+long __cdecl _InterlockedDecrement(long volatile *_Addend);
+static __inline__
+short _InterlockedDecrement16(short volatile *_Addend);
+long _InterlockedExchange(long volatile *_Target, long _Value);
+static __inline__
+short _InterlockedExchange16(short volatile *_Target, short _Value);
+static __inline__
+char _InterlockedExchange8(char volatile *_Target, char _Value);
+static __inline__
+long __cdecl _InterlockedExchangeAdd(long volatile *_Addend, long _Value);
+long _InterlockedExchangeAdd_HLEAcquire(long volatile *, long);
+long _InterlockedExchangeAdd_HLERelease(long volatile *, long);
+static __inline__
+short _InterlockedExchangeAdd16(short volatile *_Addend, short _Value);
+__int64 _InterlockedExchangeAdd64_HLEAcquire(__int64 volatile *, __int64);
+__int64 _InterlockedExchangeAdd64_HLERelease(__int64 volatile *, __int64);
+static __inline__
+char _InterlockedExchangeAdd8(char volatile *_Addend, char _Value);
+static __inline__
+long __cdecl _InterlockedIncrement(long volatile *_Addend);
+static __inline__
+short _InterlockedIncrement16(short volatile *_Addend);
+static __inline__
+long _InterlockedOr(long volatile *_Value, long _Mask);
+static __inline__
+short _InterlockedOr16(short volatile *_Value, short _Mask);
+static __inline__
+char _InterlockedOr8(char volatile *_Value, char _Mask);
+static __inline__
+long _InterlockedXor(long volatile *_Value, long _Mask);
+static __inline__
+short _InterlockedXor16(short volatile *_Value, short _Mask);
+static __inline__
+char _InterlockedXor8(char volatile *_Value, char _Mask);
+void __cdecl _invpcid(unsigned int, void *);
+static __inline__
+unsigned long __cdecl _lrotl(unsigned long, int);
+static __inline__
+unsigned long __cdecl _lrotr(unsigned long, int);
+static __inline__
+static __inline__
+void _ReadBarrier(void);
+static __inline__
+void _ReadWriteBarrier(void);
+static __inline__
+void *_ReturnAddress(void);
+unsigned int _rorx_u32(unsigned int, const unsigned int);
+static __inline__
+unsigned int __cdecl _rotl(unsigned int _Value, int _Shift);
+static __inline__
+unsigned short _rotl16(unsigned short _Value, unsigned char _Shift);
+static __inline__
+unsigned __int64 __cdecl _rotl64(unsigned __int64 _Value, int _Shift);
+static __inline__
+unsigned char _rotl8(unsigned char _Value, unsigned char _Shift);
+static __inline__
+unsigned int __cdecl _rotr(unsigned int _Value, int _Shift);
+static __inline__
+unsigned short _rotr16(unsigned short _Value, unsigned char _Shift);
+static __inline__
+unsigned __int64 __cdecl _rotr64(unsigned __int64 _Value, int _Shift);
+static __inline__
+unsigned char _rotr8(unsigned char _Value, unsigned char _Shift);
+int _sarx_i32(int, unsigned int);
+#if __STDC_HOSTED__
+int __cdecl _setjmp(jmp_buf);
+#endif
+unsigned int _shlx_u32(unsigned int, unsigned int);
+unsigned int _shrx_u32(unsigned int, unsigned int);
+void _Store_HLERelease(long volatile *, long);
+void _Store64_HLERelease(__int64 volatile *, __int64);
+void _StorePointer_HLERelease(void *volatile *, void *);
+static __inline__
+void _WriteBarrier(void);
+unsigned __int32 xbegin(void);
+void _xend(void);
+static __inline__
+#define _XCR_XFEATURE_ENABLED_MASK 0
+unsigned __int64 __cdecl _xgetbv(unsigned int);
+void __cdecl _xsetbv(unsigned int, unsigned __int64);
+
+/* These additional intrinsics are turned on in x64/amd64/x86_64 mode. */
+#ifdef __x86_64__
+void __addgsbyte(unsigned long, unsigned char);
+void __addgsdword(unsigned long, unsigned long);
+void __addgsqword(unsigned long, unsigned __int64);
+void __addgsword(unsigned long, unsigned short);
+static __inline__
+void __faststorefence(void);
+void __incgsbyte(unsigned long);
+void __incgsdword(unsigned long);
+void __incgsqword(unsigned long);
+void __incgsword(unsigned long);
+unsigned char __lwpins64(unsigned __int64, unsigned int, unsigned int);
+void __lwpval64(unsigned __int64, unsigned int, unsigned int);
+unsigned __int64 __lzcnt64(unsigned __int64);
+static __inline__
+void __movsq(unsigned long long *, unsigned long long const *, size_t);
+__int64 __mulh(__int64, __int64);
+static __inline__
+unsigned __int64 __popcnt64(unsigned __int64);
+static __inline__
+unsigned char __readgsbyte(unsigned long);
+static __inline__
+unsigned long __readgsdword(unsigned long);
+static __inline__
+unsigned __int64 __readgsqword(unsigned long);
+unsigned short __readgsword(unsigned long);
+unsigned __int64 __shiftleft128(unsigned __int64 _LowPart,
+ unsigned __int64 _HighPart,
+ unsigned char _Shift);
+unsigned __int64 __shiftright128(unsigned __int64 _LowPart,
+ unsigned __int64 _HighPart,
+ unsigned char _Shift);
+static __inline__
+void __stosq(unsigned __int64 *, unsigned __int64, size_t);
+unsigned char __vmx_on(unsigned __int64 *);
+unsigned char __vmx_vmclear(unsigned __int64 *);
+unsigned char __vmx_vmlaunch(void);
+unsigned char __vmx_vmptrld(unsigned __int64 *);
+unsigned char __vmx_vmread(size_t, size_t *);
+unsigned char __vmx_vmresume(void);
+unsigned char __vmx_vmwrite(size_t, size_t);
+void __writegsbyte(unsigned long, unsigned char);
+void __writegsdword(unsigned long, unsigned long);
+void __writegsqword(unsigned long, unsigned __int64);
+void __writegsword(unsigned long, unsigned short);
+static __inline__
+unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask);
+static __inline__
+unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);
+static __inline__
+unsigned char _bittest64(__int64 const *, __int64);
+static __inline__
+unsigned char _bittestandcomplement64(__int64 *, __int64);
+static __inline__
+unsigned char _bittestandreset64(__int64 *, __int64);
+static __inline__
+unsigned char _bittestandset64(__int64 *, __int64);
+unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64);
+long _InterlockedAnd_np(long volatile *_Value, long _Mask);
+short _InterlockedAnd16_np(short volatile *_Value, short _Mask);
+__int64 _InterlockedAnd64_np(__int64 volatile *_Value, __int64 _Mask);
+char _InterlockedAnd8_np(char volatile *_Value, char _Mask);
+unsigned char _interlockedbittestandreset64(__int64 volatile *, __int64);
+static __inline__
+unsigned char _interlockedbittestandset64(__int64 volatile *, __int64);
+long _InterlockedCompareExchange_np(long volatile *_Destination, long _Exchange,
+ long _Comparand);
+unsigned char _InterlockedCompareExchange128(__int64 volatile *_Destination,
+ __int64 _ExchangeHigh,
+ __int64 _ExchangeLow,
+ __int64 *_CompareandResult);
+unsigned char _InterlockedCompareExchange128_np(__int64 volatile *_Destination,
+ __int64 _ExchangeHigh,
+ __int64 _ExchangeLow,
+ __int64 *_ComparandResult);
+short _InterlockedCompareExchange16_np(short volatile *_Destination,
+ short _Exchange, short _Comparand);
+__int64 _InterlockedCompareExchange64_HLEAcquire(__int64 volatile *, __int64,
+ __int64);
+__int64 _InterlockedCompareExchange64_HLERelease(__int64 volatile *, __int64,
+ __int64);
+__int64 _InterlockedCompareExchange64_np(__int64 volatile *_Destination,
+ __int64 _Exchange, __int64 _Comparand);
+void *_InterlockedCompareExchangePointer(void *volatile *_Destination,
+ void *_Exchange, void *_Comparand);
+void *_InterlockedCompareExchangePointer_np(void *volatile *_Destination,
+ void *_Exchange, void *_Comparand);
+static __inline__
+__int64 _InterlockedDecrement64(__int64 volatile *_Addend);
+static __inline__
+__int64 _InterlockedExchange64(__int64 volatile *_Target, __int64 _Value);
+static __inline__
+__int64 _InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value);
+void *_InterlockedExchangePointer(void *volatile *_Target, void *_Value);
+static __inline__
+__int64 _InterlockedIncrement64(__int64 volatile *_Addend);
+long _InterlockedOr_np(long volatile *_Value, long _Mask);
+short _InterlockedOr16_np(short volatile *_Value, short _Mask);
+static __inline__
+__int64 _InterlockedOr64(__int64 volatile *_Value, __int64 _Mask);
+__int64 _InterlockedOr64_np(__int64 volatile *_Value, __int64 _Mask);
+char _InterlockedOr8_np(char volatile *_Value, char _Mask);
+long _InterlockedXor_np(long volatile *_Value, long _Mask);
+short _InterlockedXor16_np(short volatile *_Value, short _Mask);
+static __inline__
+__int64 _InterlockedXor64(__int64 volatile *_Value, __int64 _Mask);
+__int64 _InterlockedXor64_np(__int64 volatile *_Value, __int64 _Mask);
+char _InterlockedXor8_np(char volatile *_Value, char _Mask);
+static __inline__
+__int64 _mul128(__int64 _Multiplier, __int64 _Multiplicand,
+ __int64 *_HighProduct);
+unsigned __int64 _rorx_u64(unsigned __int64, const unsigned int);
+__int64 _sarx_i64(__int64, unsigned int);
+#if __STDC_HOSTED__
+int __cdecl _setjmpex(jmp_buf);
+#endif
+unsigned __int64 _shlx_u64(unsigned __int64, unsigned int);
+unsigned __int64 _shrx_u64(unsigned __int64, unsigned int);
+/*
+ * Multiply two 64-bit integers and obtain a 64-bit result.
+ * The low-half is returned directly and the high half is in an out parameter.
+ */
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+_umul128(unsigned __int64 _Multiplier, unsigned __int64 _Multiplicand,
+ unsigned __int64 *_HighProduct) {
+ unsigned __int128 _FullProduct =
+ (unsigned __int128)_Multiplier * (unsigned __int128)_Multiplicand;
+ *_HighProduct = _FullProduct >> 64;
+ return _FullProduct;
+}
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+__umulh(unsigned __int64 _Multiplier, unsigned __int64 _Multiplicand) {
+ unsigned __int128 _FullProduct =
+ (unsigned __int128)_Multiplier * (unsigned __int128)_Multiplicand;
+ return _FullProduct >> 64;
+}
+
+#endif /* __x86_64__ */
+
+/*----------------------------------------------------------------------------*\
+|* Multiplication
+\*----------------------------------------------------------------------------*/
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+__emul(int __in1, int __in2) {
+ return (__int64)__in1 * (__int64)__in2;
+}
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+__emulu(unsigned int __in1, unsigned int __in2) {
+ return (unsigned __int64)__in1 * (unsigned __int64)__in2;
+}
+/*----------------------------------------------------------------------------*\
+|* Bit Twiddling
+\*----------------------------------------------------------------------------*/
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_rotl8(unsigned char _Value, unsigned char _Shift) {
+ _Shift &= 0x7;
+ return _Shift ? (_Value << _Shift) | (_Value >> (8 - _Shift)) : _Value;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_rotr8(unsigned char _Value, unsigned char _Shift) {
+ _Shift &= 0x7;
+ return _Shift ? (_Value >> _Shift) | (_Value << (8 - _Shift)) : _Value;
+}
+static __inline__ unsigned short __DEFAULT_FN_ATTRS
+_rotl16(unsigned short _Value, unsigned char _Shift) {
+ _Shift &= 0xf;
+ return _Shift ? (_Value << _Shift) | (_Value >> (16 - _Shift)) : _Value;
+}
+static __inline__ unsigned short __DEFAULT_FN_ATTRS
+_rotr16(unsigned short _Value, unsigned char _Shift) {
+ _Shift &= 0xf;
+ return _Shift ? (_Value >> _Shift) | (_Value << (16 - _Shift)) : _Value;
+}
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_rotl(unsigned int _Value, int _Shift) {
+ _Shift &= 0x1f;
+ return _Shift ? (_Value << _Shift) | (_Value >> (32 - _Shift)) : _Value;
+}
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_rotr(unsigned int _Value, int _Shift) {
+ _Shift &= 0x1f;
+ return _Shift ? (_Value >> _Shift) | (_Value << (32 - _Shift)) : _Value;
+}
+static __inline__ unsigned long __DEFAULT_FN_ATTRS
+_lrotl(unsigned long _Value, int _Shift) {
+ _Shift &= 0x1f;
+ return _Shift ? (_Value << _Shift) | (_Value >> (32 - _Shift)) : _Value;
+}
+static __inline__ unsigned long __DEFAULT_FN_ATTRS
+_lrotr(unsigned long _Value, int _Shift) {
+ _Shift &= 0x1f;
+ return _Shift ? (_Value >> _Shift) | (_Value << (32 - _Shift)) : _Value;
+}
+static
+__inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+_rotl64(unsigned __int64 _Value, int _Shift) {
+ _Shift &= 0x3f;
+ return _Shift ? (_Value << _Shift) | (_Value >> (64 - _Shift)) : _Value;
+}
+static
+__inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+_rotr64(unsigned __int64 _Value, int _Shift) {
+ _Shift &= 0x3f;
+ return _Shift ? (_Value >> _Shift) | (_Value << (64 - _Shift)) : _Value;
+}
+/*----------------------------------------------------------------------------*\
+|* Bit Counting and Testing
+\*----------------------------------------------------------------------------*/
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_BitScanForward(unsigned long *_Index, unsigned long _Mask) {
+ if (!_Mask)
+ return 0;
+ *_Index = __builtin_ctzl(_Mask);
+ return 1;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_BitScanReverse(unsigned long *_Index, unsigned long _Mask) {
+ if (!_Mask)
+ return 0;
+ *_Index = 31 - __builtin_clzl(_Mask);
+ return 1;
+}
+static __inline__ unsigned short __DEFAULT_FN_ATTRS
+__popcnt16(unsigned short _Value) {
+ return __builtin_popcount((int)_Value);
+}
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__popcnt(unsigned int _Value) {
+ return __builtin_popcount(_Value);
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittest(long const *_BitBase, long _BitPos) {
+ return (*_BitBase >> _BitPos) & 1;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittestandcomplement(long *_BitBase, long _BitPos) {
+ unsigned char _Res = (*_BitBase >> _BitPos) & 1;
+ *_BitBase = *_BitBase ^ (1 << _BitPos);
+ return _Res;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittestandreset(long *_BitBase, long _BitPos) {
+ unsigned char _Res = (*_BitBase >> _BitPos) & 1;
+ *_BitBase = *_BitBase & ~(1 << _BitPos);
+ return _Res;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittestandset(long *_BitBase, long _BitPos) {
+ unsigned char _Res = (*_BitBase >> _BitPos) & 1;
+ *_BitBase = *_BitBase | (1 << _BitPos);
+ return _Res;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_interlockedbittestandset(long volatile *_BitBase, long _BitPos) {
+ long _PrevVal = __atomic_fetch_or(_BitBase, 1l << _BitPos, __ATOMIC_SEQ_CST);
+ return (_PrevVal >> _BitPos) & 1;
+}
+#ifdef __x86_64__
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask) {
+ if (!_Mask)
+ return 0;
+ *_Index = __builtin_ctzll(_Mask);
+ return 1;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask) {
+ if (!_Mask)
+ return 0;
+ *_Index = 63 - __builtin_clzll(_Mask);
+ return 1;
+}
+static __inline__
+unsigned __int64 __DEFAULT_FN_ATTRS
+__popcnt64(unsigned __int64 _Value) {
+ return __builtin_popcountll(_Value);
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittest64(__int64 const *_BitBase, __int64 _BitPos) {
+ return (*_BitBase >> _BitPos) & 1;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittestandcomplement64(__int64 *_BitBase, __int64 _BitPos) {
+ unsigned char _Res = (*_BitBase >> _BitPos) & 1;
+ *_BitBase = *_BitBase ^ (1ll << _BitPos);
+ return _Res;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittestandreset64(__int64 *_BitBase, __int64 _BitPos) {
+ unsigned char _Res = (*_BitBase >> _BitPos) & 1;
+ *_BitBase = *_BitBase & ~(1ll << _BitPos);
+ return _Res;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_bittestandset64(__int64 *_BitBase, __int64 _BitPos) {
+ unsigned char _Res = (*_BitBase >> _BitPos) & 1;
+ *_BitBase = *_BitBase | (1ll << _BitPos);
+ return _Res;
+}
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_interlockedbittestandset64(__int64 volatile *_BitBase, __int64 _BitPos) {
+ long long _PrevVal =
+ __atomic_fetch_or(_BitBase, 1ll << _BitPos, __ATOMIC_SEQ_CST);
+ return (_PrevVal >> _BitPos) & 1;
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Exchange Add
+\*----------------------------------------------------------------------------*/
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedExchangeAdd8(char volatile *_Addend, char _Value) {
+ return __atomic_fetch_add(_Addend, _Value, __ATOMIC_SEQ_CST);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedExchangeAdd16(short volatile *_Addend, short _Value) {
+ return __atomic_fetch_add(_Addend, _Value, __ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value) {
+ return __atomic_fetch_add(_Addend, _Value, __ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Exchange Sub
+\*----------------------------------------------------------------------------*/
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedExchangeSub8(char volatile *_Subend, char _Value) {
+ return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedExchangeSub16(short volatile *_Subend, short _Value) {
+ return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedExchangeSub(long volatile *_Subend, long _Value) {
+ return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value) {
+ return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Increment
+\*----------------------------------------------------------------------------*/
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedIncrement16(short volatile *_Value) {
+ return __atomic_add_fetch(_Value, 1, __ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedIncrement64(__int64 volatile *_Value) {
+ return __atomic_add_fetch(_Value, 1, __ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Decrement
+\*----------------------------------------------------------------------------*/
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedDecrement16(short volatile *_Value) {
+ return __atomic_sub_fetch(_Value, 1, __ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedDecrement64(__int64 volatile *_Value) {
+ return __atomic_sub_fetch(_Value, 1, __ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked And
+\*----------------------------------------------------------------------------*/
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedAnd8(char volatile *_Value, char _Mask) {
+ return __atomic_and_fetch(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedAnd16(short volatile *_Value, short _Mask) {
+ return __atomic_and_fetch(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedAnd(long volatile *_Value, long _Mask) {
+ return __atomic_and_fetch(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask) {
+ return __atomic_and_fetch(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Or
+\*----------------------------------------------------------------------------*/
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedOr8(char volatile *_Value, char _Mask) {
+ return __atomic_or_fetch(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedOr16(short volatile *_Value, short _Mask) {
+ return __atomic_or_fetch(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedOr(long volatile *_Value, long _Mask) {
+ return __atomic_or_fetch(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedOr64(__int64 volatile *_Value, __int64 _Mask) {
+ return __atomic_or_fetch(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Xor
+\*----------------------------------------------------------------------------*/
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedXor8(char volatile *_Value, char _Mask) {
+ return __atomic_xor_fetch(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedXor16(short volatile *_Value, short _Mask) {
+ return __atomic_xor_fetch(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedXor(long volatile *_Value, long _Mask) {
+ return __atomic_xor_fetch(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedXor64(__int64 volatile *_Value, __int64 _Mask) {
+ return __atomic_xor_fetch(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Exchange
+\*----------------------------------------------------------------------------*/
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedExchange8(char volatile *_Target, char _Value) {
+ __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_SEQ_CST);
+ return _Value;
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedExchange16(short volatile *_Target, short _Value) {
+ __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_SEQ_CST);
+ return _Value;
+}
+#ifdef __x86_64__
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedExchange64(__int64 volatile *_Target, __int64 _Value) {
+ __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_SEQ_CST);
+ return _Value;
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Compare Exchange
+\*----------------------------------------------------------------------------*/
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedCompareExchange8(char volatile *_Destination,
+ char _Exchange, char _Comparand) {
+ __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
+ __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ return _Comparand;
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedCompareExchange16(short volatile *_Destination,
+ short _Exchange, short _Comparand) {
+ __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
+ __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ return _Comparand;
+}
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedCompareExchange64(__int64 volatile *_Destination,
+ __int64 _Exchange, __int64 _Comparand) {
+ __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
+ __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ return _Comparand;
+}
+/*----------------------------------------------------------------------------*\
+|* Barriers
+\*----------------------------------------------------------------------------*/
+static __inline__ void __DEFAULT_FN_ATTRS
+__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
+_ReadWriteBarrier(void) {
+ __atomic_signal_fence(__ATOMIC_SEQ_CST);
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
+_ReadBarrier(void) {
+ __atomic_signal_fence(__ATOMIC_SEQ_CST);
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
+_WriteBarrier(void) {
+ __atomic_signal_fence(__ATOMIC_SEQ_CST);
+}
+#ifdef __x86_64__
+static __inline__ void __DEFAULT_FN_ATTRS
+__faststorefence(void) {
+ __atomic_thread_fence(__ATOMIC_SEQ_CST);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* readfs, readgs
+|* (Pointers in address space #256 and #257 are relative to the GS and FS
+|* segment registers, respectively.)
+\*----------------------------------------------------------------------------*/
+#define __ptr_to_addr_space(__addr_space_nbr, __type, __offset) \
+ ((volatile __type __attribute__((__address_space__(__addr_space_nbr)))*) \
+ (__offset))
+
+#ifdef __i386__
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+__readfsbyte(unsigned long __offset) {
+ return *__ptr_to_addr_space(257, unsigned char, __offset);
+}
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+__readfsqword(unsigned long __offset) {
+ return *__ptr_to_addr_space(257, unsigned __int64, __offset);
+}
+static __inline__ unsigned short __DEFAULT_FN_ATTRS
+__readfsword(unsigned long __offset) {
+ return *__ptr_to_addr_space(257, unsigned short, __offset);
+}
+#endif
+#ifdef __x86_64__
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+__readgsbyte(unsigned long __offset) {
+ return *__ptr_to_addr_space(256, unsigned char, __offset);
+}
+static __inline__ unsigned long __DEFAULT_FN_ATTRS
+__readgsdword(unsigned long __offset) {
+ return *__ptr_to_addr_space(256, unsigned long, __offset);
+}
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+__readgsqword(unsigned long __offset) {
+ return *__ptr_to_addr_space(256, unsigned __int64, __offset);
+}
+static __inline__ unsigned short __DEFAULT_FN_ATTRS
+__readgsword(unsigned long __offset) {
+ return *__ptr_to_addr_space(256, unsigned short, __offset);
+}
+#endif
+#undef __ptr_to_addr_space
+/*----------------------------------------------------------------------------*\
+|* movs, stos
+\*----------------------------------------------------------------------------*/
+#if defined(__i386__) || defined(__x86_64__)
+static __inline__ void __DEFAULT_FN_ATTRS
+__movsb(unsigned char *__dst, unsigned char const *__src, size_t __n) {
+ __asm__("rep movsb" : : "D"(__dst), "S"(__src), "c"(__n)
+ : "%edi", "%esi", "%ecx");
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__movsd(unsigned long *__dst, unsigned long const *__src, size_t __n) {
+ __asm__("rep movsl" : : "D"(__dst), "S"(__src), "c"(__n)
+ : "%edi", "%esi", "%ecx");
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__movsw(unsigned short *__dst, unsigned short const *__src, size_t __n) {
+ __asm__("rep movsw" : : "D"(__dst), "S"(__src), "c"(__n)
+ : "%edi", "%esi", "%ecx");
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__stosb(unsigned char *__dst, unsigned char __x, size_t __n) {
+ __asm__("rep stosb" : : "D"(__dst), "a"(__x), "c"(__n)
+ : "%edi", "%ecx");
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__stosd(unsigned long *__dst, unsigned long __x, size_t __n) {
+ __asm__("rep stosl" : : "D"(__dst), "a"(__x), "c"(__n)
+ : "%edi", "%ecx");
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__stosw(unsigned short *__dst, unsigned short __x, size_t __n) {
+ __asm__("rep stosw" : : "D"(__dst), "a"(__x), "c"(__n)
+ : "%edi", "%ecx");
+}
+#endif
+#ifdef __x86_64__
+static __inline__ void __DEFAULT_FN_ATTRS
+__movsq(unsigned long long *__dst, unsigned long long const *__src, size_t __n) {
+ __asm__("rep movsq" : : "D"(__dst), "S"(__src), "c"(__n)
+ : "%edi", "%esi", "%ecx");
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__stosq(unsigned __int64 *__dst, unsigned __int64 __x, size_t __n) {
+ __asm__("rep stosq" : : "D"(__dst), "a"(__x), "c"(__n)
+ : "%edi", "%ecx");
+}
+#endif
+
+/*----------------------------------------------------------------------------*\
+|* Misc
+\*----------------------------------------------------------------------------*/
+static __inline__ void * __DEFAULT_FN_ATTRS
+_AddressOfReturnAddress(void) {
+ return (void*)((char*)__builtin_frame_address(0) + sizeof(void*));
+}
+static __inline__ void * __DEFAULT_FN_ATTRS
+_ReturnAddress(void) {
+ return __builtin_return_address(0);
+}
+#if defined(__i386__) || defined(__x86_64__)
+static __inline__ void __DEFAULT_FN_ATTRS
+__cpuid(int __info[4], int __level) {
+ __asm__ ("cpuid" : "=a"(__info[0]), "=b" (__info[1]), "=c"(__info[2]), "=d"(__info[3])
+ : "a"(__level));
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__cpuidex(int __info[4], int __level, int __ecx) {
+ __asm__ ("cpuid" : "=a"(__info[0]), "=b" (__info[1]), "=c"(__info[2]), "=d"(__info[3])
+ : "a"(__level), "c"(__ecx));
+}
+static __inline__ unsigned __int64 __cdecl __DEFAULT_FN_ATTRS
+_xgetbv(unsigned int __xcr_no) {
+ unsigned int __eax, __edx;
+ __asm__ ("xgetbv" : "=a" (__eax), "=d" (__edx) : "c" (__xcr_no));
+ return ((unsigned __int64)__edx << 32) | __eax;
+}
+static __inline__ void __DEFAULT_FN_ATTRS
+__halt(void) {
+ __asm__ volatile ("hlt");
+}
+#endif
+
+/*----------------------------------------------------------------------------*\
+|* Privileged intrinsics
+\*----------------------------------------------------------------------------*/
+#if defined(__i386__) || defined(__x86_64__)
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+__readmsr(unsigned long __register) {
+ // Loads the contents of a 64-bit model specific register (MSR) specified in
+ // the ECX register into registers EDX:EAX. The EDX register is loaded with
+ // the high-order 32 bits of the MSR and the EAX register is loaded with the
+ // low-order 32 bits. If less than 64 bits are implemented in the MSR being
+ // read, the values returned to EDX:EAX in unimplemented bit locations are
+ // undefined.
+ unsigned long __edx;
+ unsigned long __eax;
+ __asm__ ("rdmsr" : "=d"(__edx), "=a"(__eax) : "c"(__register));
+ return (((unsigned __int64)__edx) << 32) | (unsigned __int64)__eax;
+}
+
+static __inline__ unsigned long __DEFAULT_FN_ATTRS
+__readcr3(void) {
+ unsigned long __cr3_val;
+ __asm__ __volatile__ ("mov %%cr3, %0" : "=q"(__cr3_val) : : "memory");
+ return __cr3_val;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+__writecr3(unsigned int __cr3_val) {
+ __asm__ ("mov %0, %%cr3" : : "q"(__cr3_val) : "memory");
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __INTRIN_H */
+#endif /* _MSC_VER */
diff --git a/contrib/llvm/tools/clang/lib/Headers/__clang_cuda_runtime_wrapper.h b/contrib/llvm/tools/clang/lib/Headers/__clang_cuda_runtime_wrapper.h
new file mode 100644
index 0000000..8e5f033
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/__clang_cuda_runtime_wrapper.h
@@ -0,0 +1,216 @@
+/*===---- __clang_cuda_runtime_wrapper.h - CUDA runtime support -------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/*
+ * WARNING: This header is intended to be directly -include'd by
+ * the compiler and is not supposed to be included by users.
+ *
+ * CUDA headers are implemented in a way that currently makes it
+ * impossible for user code to #include directly when compiling with
+ * Clang. They present different view of CUDA-supplied functions
+ * depending on where in NVCC's compilation pipeline the headers are
+ * included. Neither of these modes provides function definitions with
+ * correct attributes, so we use preprocessor to force the headers
+ * into a form that Clang can use.
+ *
+ * Similarly to NVCC which -include's cuda_runtime.h, Clang -include's
+ * this file during every CUDA compilation.
+ */
+
+#ifndef __CLANG_CUDA_RUNTIME_WRAPPER_H__
+#define __CLANG_CUDA_RUNTIME_WRAPPER_H__
+
+#if defined(__CUDA__) && defined(__clang__)
+
+// Include some standard headers to avoid CUDA headers including them
+// while some required macros (like __THROW) are in a weird state.
+#include <stdlib.h>
+#include <cmath>
+
+// Preserve common macros that will be changed below by us or by CUDA
+// headers.
+#pragma push_macro("__THROW")
+#pragma push_macro("__CUDA_ARCH__")
+
+// WARNING: Preprocessor hacks below are based on specific details of
+// CUDA-7.x headers and are not expected to work with any other
+// version of CUDA headers.
+#include "cuda.h"
+#if !defined(CUDA_VERSION)
+#error "cuda.h did not define CUDA_VERSION"
+#elif CUDA_VERSION < 7000 || CUDA_VERSION > 7050
+#error "Unsupported CUDA version!"
+#endif
+
+// Make largest subset of device functions available during host
+// compilation -- SM_35 for the time being.
+#ifndef __CUDA_ARCH__
+#define __CUDA_ARCH__ 350
+#endif
+
+#include "cuda_builtin_vars.h"
+
+// No need for device_launch_parameters.h as cuda_builtin_vars.h above
+// has taken care of builtin variables declared in the file.
+#define __DEVICE_LAUNCH_PARAMETERS_H__
+
+// {math,device}_functions.h only have declarations of the
+// functions. We don't need them as we're going to pull in their
+// definitions from .hpp files.
+#define __DEVICE_FUNCTIONS_H__
+#define __MATH_FUNCTIONS_H__
+
+#undef __CUDACC__
+#define __CUDABE__
+// Disables definitions of device-side runtime support stubs in
+// cuda_device_runtime_api.h
+#define __CUDADEVRT_INTERNAL__
+#include "host_config.h"
+#include "host_defines.h"
+#include "driver_types.h"
+#include "common_functions.h"
+#undef __CUDADEVRT_INTERNAL__
+
+#undef __CUDABE__
+#define __CUDACC__
+#include "cuda_runtime.h"
+
+#undef __CUDACC__
+#define __CUDABE__
+
+// CUDA headers use __nvvm_memcpy and __nvvm_memset which Clang does
+// not have at the moment. Emulate them with a builtin memcpy/memset.
+#define __nvvm_memcpy(s,d,n,a) __builtin_memcpy(s,d,n)
+#define __nvvm_memset(d,c,n,a) __builtin_memset(d,c,n)
+
+#include "crt/host_runtime.h"
+#include "crt/device_runtime.h"
+// device_runtime.h defines __cxa_* macros that will conflict with
+// cxxabi.h.
+// FIXME: redefine these as __device__ functions.
+#undef __cxa_vec_ctor
+#undef __cxa_vec_cctor
+#undef __cxa_vec_dtor
+#undef __cxa_vec_new2
+#undef __cxa_vec_new3
+#undef __cxa_vec_delete2
+#undef __cxa_vec_delete
+#undef __cxa_vec_delete3
+#undef __cxa_pure_virtual
+
+// We need decls for functions in CUDA's libdevice with __device__
+// attribute only. Alas they come either as __host__ __device__ or
+// with no attributes at all. To work around that, define __CUDA_RTC__
+// which produces HD variant and undef __host__ which gives us desided
+// decls with __device__ attribute.
+#pragma push_macro("__host__")
+#define __host__
+#define __CUDACC_RTC__
+#include "device_functions_decls.h"
+#undef __CUDACC_RTC__
+
+// Temporarily poison __host__ macro to ensure it's not used by any of
+// the headers we're about to include.
+#define __host__ UNEXPECTED_HOST_ATTRIBUTE
+
+// device_functions.hpp and math_functions*.hpp use 'static
+// __forceinline__' (with no __device__) for definitions of device
+// functions. Temporarily redefine __forceinline__ to include
+// __device__.
+#pragma push_macro("__forceinline__")
+#define __forceinline__ __device__ __inline__ __attribute__((always_inline))
+#include "device_functions.hpp"
+#include "math_functions.hpp"
+#include "math_functions_dbl_ptx3.hpp"
+#pragma pop_macro("__forceinline__")
+
+// Pull in host-only functions that are only available when neither
+// __CUDACC__ nor __CUDABE__ are defined.
+#undef __MATH_FUNCTIONS_HPP__
+#undef __CUDABE__
+#include "math_functions.hpp"
+// Alas, additional overloads for these functions are hard to get to.
+// Considering that we only need these overloads for a few functions,
+// we can provide them here.
+static inline float rsqrt(float a) { return rsqrtf(a); }
+static inline float rcbrt(float a) { return rcbrtf(a); }
+static inline float sinpi(float a) { return sinpif(a); }
+static inline float cospi(float a) { return cospif(a); }
+static inline void sincospi(float a, float *b, float *c) {
+ return sincospi(a, b, c);
+}
+static inline float erfcinv(float a) { return erfcinvf(a); }
+static inline float normcdfinv(float a) { return normcdfinvf(a); }
+static inline float normcdf(float a) { return normcdff(a); }
+static inline float erfcx(float a) { return erfcxf(a); }
+
+// For some reason single-argument variant is not always declared by
+// CUDA headers. Alas, device_functions.hpp included below needs it.
+static inline __device__ void __brkpt(int c) { __brkpt(); }
+
+// Now include *.hpp with definitions of various GPU functions. Alas,
+// a lot of thins get declared/defined with __host__ attribute which
+// we don't want and we have to define it out. We also have to include
+// {device,math}_functions.hpp again in order to extract the other
+// branch of #if/else inside.
+
+#define __host__
+#undef __CUDABE__
+#define __CUDACC__
+#undef __DEVICE_FUNCTIONS_HPP__
+#include "device_functions.hpp"
+#include "device_atomic_functions.hpp"
+#include "sm_20_atomic_functions.hpp"
+#include "sm_32_atomic_functions.hpp"
+#include "sm_20_intrinsics.hpp"
+// sm_30_intrinsics.h has declarations that use default argument, so
+// we have to include it and it will in turn include .hpp
+#include "sm_30_intrinsics.h"
+#include "sm_32_intrinsics.hpp"
+#undef __MATH_FUNCTIONS_HPP__
+#include "math_functions.hpp"
+#pragma pop_macro("__host__")
+
+#include "texture_indirect_functions.h"
+
+// Restore state of __CUDA_ARCH__ and __THROW we had on entry.
+#pragma pop_macro("__CUDA_ARCH__")
+#pragma pop_macro("__THROW")
+
+// Set up compiler macros expected to be seen during compilation.
+#undef __CUDABE__
+#define __CUDACC__
+#define __NVCC__
+
+#if defined(__CUDA_ARCH__)
+// We need to emit IR declaration for non-existing __nvvm_reflect() to
+// let backend know that it should be treated as const nothrow
+// function which is what NVVMReflect pass expects to see.
+extern "C" __device__ __attribute__((const)) int __nvvm_reflect(const void *);
+static __device__ __attribute__((used)) int __nvvm_reflect_anchor() {
+ return __nvvm_reflect("NONE");
+}
+#endif
+
+#endif // __CUDA__
+#endif // __CLANG_CUDA_RUNTIME_WRAPPER_H__
diff --git a/contrib/llvm/tools/clang/lib/Headers/__stddef_max_align_t.h b/contrib/llvm/tools/clang/lib/Headers/__stddef_max_align_t.h
new file mode 100644
index 0000000..1e10ca9
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/__stddef_max_align_t.h
@@ -0,0 +1,43 @@
+/*===---- __stddef_max_align_t.h - Definition of max_align_t for modules ---===
+ *
+ * Copyright (c) 2014 Chandler Carruth
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_MAX_ALIGN_T_DEFINED
+#define __CLANG_MAX_ALIGN_T_DEFINED
+
+#if defined(_MSC_VER)
+typedef double max_align_t;
+#elif defined(__APPLE__)
+typedef long double max_align_t;
+#else
+// Define 'max_align_t' to match the GCC definition.
+typedef struct {
+ long long __clang_max_align_nonce1
+ __attribute__((__aligned__(__alignof__(long long))));
+ long double __clang_max_align_nonce2
+ __attribute__((__aligned__(__alignof__(long double))));
+} max_align_t;
+#endif
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Headers/__wmmintrin_aes.h b/contrib/llvm/tools/clang/lib/Headers/__wmmintrin_aes.h
new file mode 100644
index 0000000..100799e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/__wmmintrin_aes.h
@@ -0,0 +1,66 @@
+/*===---- __wmmintrin_aes.h - AES intrinsics -------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef _WMMINTRIN_AES_H
+#define _WMMINTRIN_AES_H
+
+#include <emmintrin.h>
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("aes")))
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_aesenc_si128(__m128i __V, __m128i __R)
+{
+ return (__m128i)__builtin_ia32_aesenc128(__V, __R);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_aesenclast_si128(__m128i __V, __m128i __R)
+{
+ return (__m128i)__builtin_ia32_aesenclast128(__V, __R);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_aesdec_si128(__m128i __V, __m128i __R)
+{
+ return (__m128i)__builtin_ia32_aesdec128(__V, __R);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_aesdeclast_si128(__m128i __V, __m128i __R)
+{
+ return (__m128i)__builtin_ia32_aesdeclast128(__V, __R);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_aesimc_si128(__m128i __V)
+{
+ return (__m128i)__builtin_ia32_aesimc128(__V);
+}
+
+#define _mm_aeskeygenassist_si128(C, R) \
+ (__m128i)__builtin_ia32_aeskeygenassist128((__v2di)(__m128i)(C), (int)(R))
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* _WMMINTRIN_AES_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/__wmmintrin_pclmul.h b/contrib/llvm/tools/clang/lib/Headers/__wmmintrin_pclmul.h
new file mode 100644
index 0000000..68e944e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/__wmmintrin_pclmul.h
@@ -0,0 +1,30 @@
+/*===---- __wmmintrin_pclmul.h - PCMUL intrinsics ---------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef _WMMINTRIN_PCLMUL_H
+#define _WMMINTRIN_PCLMUL_H
+
+#define _mm_clmulepi64_si128(__X, __Y, __I) \
+ ((__m128i)__builtin_ia32_pclmulqdq128((__v2di)(__m128i)(__X), \
+ (__v2di)(__m128i)(__Y), (char)(__I)))
+
+#endif /* _WMMINTRIN_PCLMUL_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/adxintrin.h b/contrib/llvm/tools/clang/lib/Headers/adxintrin.h
new file mode 100644
index 0000000..ee34728
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/adxintrin.h
@@ -0,0 +1,86 @@
+/*===---- adxintrin.h - ADX intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <adxintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __ADXINTRIN_H
+#define __ADXINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+
+/* Intrinsics that are available only if __ADX__ defined */
+static __inline unsigned char __attribute__((__always_inline__, __nodebug__, __target__("adx")))
+_addcarryx_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
+ unsigned int *__p)
+{
+ return __builtin_ia32_addcarryx_u32(__cf, __x, __y, __p);
+}
+
+#ifdef __x86_64__
+static __inline unsigned char __attribute__((__always_inline__, __nodebug__, __target__("adx")))
+_addcarryx_u64(unsigned char __cf, unsigned long long __x,
+ unsigned long long __y, unsigned long long *__p)
+{
+ return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p);
+}
+#endif
+
+/* Intrinsics that are also available if __ADX__ undefined */
+static __inline unsigned char __DEFAULT_FN_ATTRS
+_addcarry_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
+ unsigned int *__p)
+{
+ return __builtin_ia32_addcarry_u32(__cf, __x, __y, __p);
+}
+
+#ifdef __x86_64__
+static __inline unsigned char __DEFAULT_FN_ATTRS
+_addcarry_u64(unsigned char __cf, unsigned long long __x,
+ unsigned long long __y, unsigned long long *__p)
+{
+ return __builtin_ia32_addcarry_u64(__cf, __x, __y, __p);
+}
+#endif
+
+static __inline unsigned char __DEFAULT_FN_ATTRS
+_subborrow_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
+ unsigned int *__p)
+{
+ return __builtin_ia32_subborrow_u32(__cf, __x, __y, __p);
+}
+
+#ifdef __x86_64__
+static __inline unsigned char __DEFAULT_FN_ATTRS
+_subborrow_u64(unsigned char __cf, unsigned long long __x,
+ unsigned long long __y, unsigned long long *__p)
+{
+ return __builtin_ia32_subborrow_u64(__cf, __x, __y, __p);
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __ADXINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/altivec.h b/contrib/llvm/tools/clang/lib/Headers/altivec.h
new file mode 100644
index 0000000..dc0dcbc
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/altivec.h
@@ -0,0 +1,13919 @@
+/*===---- altivec.h - Standard header for type generic math ---------------===*\
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+\*===----------------------------------------------------------------------===*/
+
+#ifndef __ALTIVEC_H
+#define __ALTIVEC_H
+
+#ifndef __ALTIVEC__
+#error "AltiVec support not enabled"
+#endif
+
+/* Constants for mapping CR6 bits to predicate result. */
+
+#define __CR6_EQ 0
+#define __CR6_EQ_REV 1
+#define __CR6_LT 2
+#define __CR6_LT_REV 3
+
+#define __ATTRS_o_ai __attribute__((__overloadable__, __always_inline__))
+
+static vector signed char __ATTRS_o_ai vec_perm(vector signed char __a,
+ vector signed char __b,
+ vector unsigned char __c);
+
+static vector unsigned char __ATTRS_o_ai vec_perm(vector unsigned char __a,
+ vector unsigned char __b,
+ vector unsigned char __c);
+
+static vector bool char __ATTRS_o_ai vec_perm(vector bool char __a,
+ vector bool char __b,
+ vector unsigned char __c);
+
+static vector short __ATTRS_o_ai vec_perm(vector signed short __a,
+ vector signed short __b,
+ vector unsigned char __c);
+
+static vector unsigned short __ATTRS_o_ai vec_perm(vector unsigned short __a,
+ vector unsigned short __b,
+ vector unsigned char __c);
+
+static vector bool short __ATTRS_o_ai vec_perm(vector bool short __a,
+ vector bool short __b,
+ vector unsigned char __c);
+
+static vector pixel __ATTRS_o_ai vec_perm(vector pixel __a, vector pixel __b,
+ vector unsigned char __c);
+
+static vector int __ATTRS_o_ai vec_perm(vector signed int __a,
+ vector signed int __b,
+ vector unsigned char __c);
+
+static vector unsigned int __ATTRS_o_ai vec_perm(vector unsigned int __a,
+ vector unsigned int __b,
+ vector unsigned char __c);
+
+static vector bool int __ATTRS_o_ai vec_perm(vector bool int __a,
+ vector bool int __b,
+ vector unsigned char __c);
+
+static vector float __ATTRS_o_ai vec_perm(vector float __a, vector float __b,
+ vector unsigned char __c);
+
+#ifdef __VSX__
+static vector long long __ATTRS_o_ai vec_perm(vector signed long long __a,
+ vector signed long long __b,
+ vector unsigned char __c);
+
+static vector unsigned long long __ATTRS_o_ai
+vec_perm(vector unsigned long long __a, vector unsigned long long __b,
+ vector unsigned char __c);
+
+static vector bool long long __ATTRS_o_ai
+vec_perm(vector bool long long __a, vector bool long long __b,
+ vector unsigned char __c);
+
+static vector double __ATTRS_o_ai vec_perm(vector double __a, vector double __b,
+ vector unsigned char __c);
+#endif
+
+static vector unsigned char __ATTRS_o_ai vec_xor(vector unsigned char __a,
+ vector unsigned char __b);
+
+/* vec_abs */
+
+#define __builtin_altivec_abs_v16qi vec_abs
+#define __builtin_altivec_abs_v8hi vec_abs
+#define __builtin_altivec_abs_v4si vec_abs
+
+static vector signed char __ATTRS_o_ai vec_abs(vector signed char __a) {
+ return __builtin_altivec_vmaxsb(__a, -__a);
+}
+
+static vector signed short __ATTRS_o_ai vec_abs(vector signed short __a) {
+ return __builtin_altivec_vmaxsh(__a, -__a);
+}
+
+static vector signed int __ATTRS_o_ai vec_abs(vector signed int __a) {
+ return __builtin_altivec_vmaxsw(__a, -__a);
+}
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static vector signed long long __ATTRS_o_ai
+vec_abs(vector signed long long __a) {
+ return __builtin_altivec_vmaxsd(__a, -__a);
+}
+#endif
+
+static vector float __ATTRS_o_ai vec_abs(vector float __a) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & (vector unsigned int)(0x7FFFFFFF);
+ return (vector float)__res;
+}
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static vector double __ATTRS_o_ai vec_abs(vector double __a) {
+ vector unsigned long long __res = { 0x7FFFFFFFFFFFFFFF, 0x7FFFFFFFFFFFFFFF };
+ __res &= (vector unsigned int)__a;
+ return (vector double)__res;
+}
+#endif
+
+/* vec_abss */
+#define __builtin_altivec_abss_v16qi vec_abss
+#define __builtin_altivec_abss_v8hi vec_abss
+#define __builtin_altivec_abss_v4si vec_abss
+
+static vector signed char __ATTRS_o_ai vec_abss(vector signed char __a) {
+ return __builtin_altivec_vmaxsb(
+ __a, __builtin_altivec_vsubsbs((vector signed char)(0), __a));
+}
+
+static vector signed short __ATTRS_o_ai vec_abss(vector signed short __a) {
+ return __builtin_altivec_vmaxsh(
+ __a, __builtin_altivec_vsubshs((vector signed short)(0), __a));
+}
+
+static vector signed int __ATTRS_o_ai vec_abss(vector signed int __a) {
+ return __builtin_altivec_vmaxsw(
+ __a, __builtin_altivec_vsubsws((vector signed int)(0), __a));
+}
+
+/* vec_add */
+
+static vector signed char __ATTRS_o_ai vec_add(vector signed char __a,
+ vector signed char __b) {
+ return __a + __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_add(vector bool char __a,
+ vector signed char __b) {
+ return (vector signed char)__a + __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_add(vector signed char __a,
+ vector bool char __b) {
+ return __a + (vector signed char)__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_add(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a + __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_add(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__a + __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_add(vector unsigned char __a,
+ vector bool char __b) {
+ return __a + (vector unsigned char)__b;
+}
+
+static vector short __ATTRS_o_ai vec_add(vector short __a, vector short __b) {
+ return __a + __b;
+}
+
+static vector short __ATTRS_o_ai vec_add(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a + __b;
+}
+
+static vector short __ATTRS_o_ai vec_add(vector short __a,
+ vector bool short __b) {
+ return __a + (vector short)__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_add(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a + __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_add(vector bool short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__a + __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_add(vector unsigned short __a,
+ vector bool short __b) {
+ return __a + (vector unsigned short)__b;
+}
+
+static vector int __ATTRS_o_ai vec_add(vector int __a, vector int __b) {
+ return __a + __b;
+}
+
+static vector int __ATTRS_o_ai vec_add(vector bool int __a, vector int __b) {
+ return (vector int)__a + __b;
+}
+
+static vector int __ATTRS_o_ai vec_add(vector int __a, vector bool int __b) {
+ return __a + (vector int)__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_add(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a + __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_add(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__a + __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_add(vector unsigned int __a,
+ vector bool int __b) {
+ return __a + (vector unsigned int)__b;
+}
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static vector signed long long __ATTRS_o_ai
+vec_add(vector signed long long __a, vector signed long long __b) {
+ return __a + __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_add(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a + __b;
+}
+
+static vector signed __int128 __ATTRS_o_ai vec_add(vector signed __int128 __a,
+ vector signed __int128 __b) {
+ return __a + __b;
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_add(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return __a + __b;
+}
+#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+
+static vector float __ATTRS_o_ai vec_add(vector float __a, vector float __b) {
+ return __a + __b;
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai
+vec_add(vector double __a, vector double __b) {
+ return __a + __b;
+}
+#endif // __VSX__
+
+/* vec_adde */
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static vector signed __int128 __ATTRS_o_ai
+vec_adde(vector signed __int128 __a, vector signed __int128 __b,
+ vector signed __int128 __c) {
+ return __builtin_altivec_vaddeuqm(__a, __b, __c);
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_adde(vector unsigned __int128 __a, vector unsigned __int128 __b,
+ vector unsigned __int128 __c) {
+ return __builtin_altivec_vaddeuqm(__a, __b, __c);
+}
+#endif
+
+/* vec_addec */
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static vector signed __int128 __ATTRS_o_ai
+vec_addec(vector signed __int128 __a, vector signed __int128 __b,
+ vector signed __int128 __c) {
+ return __builtin_altivec_vaddecuq(__a, __b, __c);
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_addec(vector unsigned __int128 __a, vector unsigned __int128 __b,
+ vector unsigned __int128 __c) {
+ return __builtin_altivec_vaddecuq(__a, __b, __c);
+}
+#endif
+
+/* vec_vaddubm */
+
+#define __builtin_altivec_vaddubm vec_vaddubm
+
+static vector signed char __ATTRS_o_ai vec_vaddubm(vector signed char __a,
+ vector signed char __b) {
+ return __a + __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_vaddubm(vector bool char __a,
+ vector signed char __b) {
+ return (vector signed char)__a + __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_vaddubm(vector signed char __a,
+ vector bool char __b) {
+ return __a + (vector signed char)__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vaddubm(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a + __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vaddubm(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__a + __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vaddubm(vector unsigned char __a,
+ vector bool char __b) {
+ return __a + (vector unsigned char)__b;
+}
+
+/* vec_vadduhm */
+
+#define __builtin_altivec_vadduhm vec_vadduhm
+
+static vector short __ATTRS_o_ai vec_vadduhm(vector short __a,
+ vector short __b) {
+ return __a + __b;
+}
+
+static vector short __ATTRS_o_ai vec_vadduhm(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a + __b;
+}
+
+static vector short __ATTRS_o_ai vec_vadduhm(vector short __a,
+ vector bool short __b) {
+ return __a + (vector short)__b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vadduhm(vector unsigned short __a, vector unsigned short __b) {
+ return __a + __b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vadduhm(vector bool short __a, vector unsigned short __b) {
+ return (vector unsigned short)__a + __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vadduhm(vector unsigned short __a,
+ vector bool short __b) {
+ return __a + (vector unsigned short)__b;
+}
+
+/* vec_vadduwm */
+
+#define __builtin_altivec_vadduwm vec_vadduwm
+
+static vector int __ATTRS_o_ai vec_vadduwm(vector int __a, vector int __b) {
+ return __a + __b;
+}
+
+static vector int __ATTRS_o_ai vec_vadduwm(vector bool int __a,
+ vector int __b) {
+ return (vector int)__a + __b;
+}
+
+static vector int __ATTRS_o_ai vec_vadduwm(vector int __a,
+ vector bool int __b) {
+ return __a + (vector int)__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vadduwm(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a + __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vadduwm(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__a + __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vadduwm(vector unsigned int __a,
+ vector bool int __b) {
+ return __a + (vector unsigned int)__b;
+}
+
+/* vec_vaddfp */
+
+#define __builtin_altivec_vaddfp vec_vaddfp
+
+static vector float __attribute__((__always_inline__))
+vec_vaddfp(vector float __a, vector float __b) {
+ return __a + __b;
+}
+
+/* vec_addc */
+
+static vector signed int __ATTRS_o_ai vec_addc(vector signed int __a,
+ vector signed int __b) {
+ return (vector signed int)__builtin_altivec_vaddcuw((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_addc(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vaddcuw(__a, __b);
+}
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static vector signed __int128 __ATTRS_o_ai
+vec_addc(vector signed __int128 __a, vector signed __int128 __b) {
+ return (vector signed __int128)__builtin_altivec_vaddcuq(
+ (vector unsigned __int128)__a,
+ (vector unsigned __int128)__b);
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_addc(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return __builtin_altivec_vaddcuq(__a, __b);
+}
+#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+
+/* vec_vaddcuw */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vaddcuw(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_altivec_vaddcuw(__a, __b);
+}
+
+/* vec_adds */
+
+static vector signed char __ATTRS_o_ai vec_adds(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vaddsbs(__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_adds(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vaddsbs((vector signed char)__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_adds(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vaddsbs(__a, (vector signed char)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_adds(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vaddubs(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_adds(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vaddubs((vector unsigned char)__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_adds(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vaddubs(__a, (vector unsigned char)__b);
+}
+
+static vector short __ATTRS_o_ai vec_adds(vector short __a, vector short __b) {
+ return __builtin_altivec_vaddshs(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_adds(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vaddshs((vector short)__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_adds(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vaddshs(__a, (vector short)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_adds(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vadduhs(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_adds(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vadduhs((vector unsigned short)__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_adds(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vadduhs(__a, (vector unsigned short)__b);
+}
+
+static vector int __ATTRS_o_ai vec_adds(vector int __a, vector int __b) {
+ return __builtin_altivec_vaddsws(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_adds(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vaddsws((vector int)__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_adds(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vaddsws(__a, (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_adds(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vadduws(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_adds(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vadduws((vector unsigned int)__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_adds(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vadduws(__a, (vector unsigned int)__b);
+}
+
+/* vec_vaddsbs */
+
+static vector signed char __ATTRS_o_ai vec_vaddsbs(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vaddsbs(__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vaddsbs(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vaddsbs((vector signed char)__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vaddsbs(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vaddsbs(__a, (vector signed char)__b);
+}
+
+/* vec_vaddubs */
+
+static vector unsigned char __ATTRS_o_ai vec_vaddubs(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vaddubs(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vaddubs(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vaddubs((vector unsigned char)__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vaddubs(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vaddubs(__a, (vector unsigned char)__b);
+}
+
+/* vec_vaddshs */
+
+static vector short __ATTRS_o_ai vec_vaddshs(vector short __a,
+ vector short __b) {
+ return __builtin_altivec_vaddshs(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_vaddshs(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vaddshs((vector short)__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_vaddshs(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vaddshs(__a, (vector short)__b);
+}
+
+/* vec_vadduhs */
+
+static vector unsigned short __ATTRS_o_ai
+vec_vadduhs(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_altivec_vadduhs(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vadduhs(vector bool short __a, vector unsigned short __b) {
+ return __builtin_altivec_vadduhs((vector unsigned short)__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vadduhs(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vadduhs(__a, (vector unsigned short)__b);
+}
+
+/* vec_vaddsws */
+
+static vector int __ATTRS_o_ai vec_vaddsws(vector int __a, vector int __b) {
+ return __builtin_altivec_vaddsws(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_vaddsws(vector bool int __a,
+ vector int __b) {
+ return __builtin_altivec_vaddsws((vector int)__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_vaddsws(vector int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vaddsws(__a, (vector int)__b);
+}
+
+/* vec_vadduws */
+
+static vector unsigned int __ATTRS_o_ai vec_vadduws(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vadduws(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vadduws(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vadduws((vector unsigned int)__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vadduws(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vadduws(__a, (vector unsigned int)__b);
+}
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+/* vec_vadduqm */
+
+static vector signed __int128 __ATTRS_o_ai
+vec_vadduqm(vector signed __int128 __a, vector signed __int128 __b) {
+ return __a + __b;
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_vadduqm(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return __a + __b;
+}
+
+/* vec_vaddeuqm */
+
+static vector signed __int128 __ATTRS_o_ai
+vec_vaddeuqm(vector signed __int128 __a, vector signed __int128 __b,
+ vector signed __int128 __c) {
+ return __builtin_altivec_vaddeuqm(__a, __b, __c);
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_vaddeuqm(vector unsigned __int128 __a, vector unsigned __int128 __b,
+ vector unsigned __int128 __c) {
+ return __builtin_altivec_vaddeuqm(__a, __b, __c);
+}
+
+/* vec_vaddcuq */
+
+static vector signed __int128 __ATTRS_o_ai
+vec_vaddcuq(vector signed __int128 __a, vector signed __int128 __b) {
+ return __builtin_altivec_vaddcuq(__a, __b);
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_vaddcuq(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return __builtin_altivec_vaddcuq(__a, __b);
+}
+
+/* vec_vaddecuq */
+
+static vector signed __int128 __ATTRS_o_ai
+vec_vaddecuq(vector signed __int128 __a, vector signed __int128 __b,
+ vector signed __int128 __c) {
+ return __builtin_altivec_vaddecuq(__a, __b, __c);
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_vaddecuq(vector unsigned __int128 __a, vector unsigned __int128 __b,
+ vector unsigned __int128 __c) {
+ return __builtin_altivec_vaddecuq(__a, __b, __c);
+}
+#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+
+/* vec_and */
+
+#define __builtin_altivec_vand vec_and
+
+static vector signed char __ATTRS_o_ai vec_and(vector signed char __a,
+ vector signed char __b) {
+ return __a & __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_and(vector bool char __a,
+ vector signed char __b) {
+ return (vector signed char)__a & __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_and(vector signed char __a,
+ vector bool char __b) {
+ return __a & (vector signed char)__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_and(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a & __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_and(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__a & __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_and(vector unsigned char __a,
+ vector bool char __b) {
+ return __a & (vector unsigned char)__b;
+}
+
+static vector bool char __ATTRS_o_ai vec_and(vector bool char __a,
+ vector bool char __b) {
+ return __a & __b;
+}
+
+static vector short __ATTRS_o_ai vec_and(vector short __a, vector short __b) {
+ return __a & __b;
+}
+
+static vector short __ATTRS_o_ai vec_and(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a & __b;
+}
+
+static vector short __ATTRS_o_ai vec_and(vector short __a,
+ vector bool short __b) {
+ return __a & (vector short)__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_and(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a & __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_and(vector bool short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__a & __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_and(vector unsigned short __a,
+ vector bool short __b) {
+ return __a & (vector unsigned short)__b;
+}
+
+static vector bool short __ATTRS_o_ai vec_and(vector bool short __a,
+ vector bool short __b) {
+ return __a & __b;
+}
+
+static vector int __ATTRS_o_ai vec_and(vector int __a, vector int __b) {
+ return __a & __b;
+}
+
+static vector int __ATTRS_o_ai vec_and(vector bool int __a, vector int __b) {
+ return (vector int)__a & __b;
+}
+
+static vector int __ATTRS_o_ai vec_and(vector int __a, vector bool int __b) {
+ return __a & (vector int)__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_and(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a & __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_and(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__a & __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_and(vector unsigned int __a,
+ vector bool int __b) {
+ return __a & (vector unsigned int)__b;
+}
+
+static vector bool int __ATTRS_o_ai vec_and(vector bool int __a,
+ vector bool int __b) {
+ return __a & __b;
+}
+
+static vector float __ATTRS_o_ai vec_and(vector float __a, vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_and(vector bool int __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_and(vector float __a,
+ vector bool int __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_and(vector bool long long __a, vector double __b) {
+ vector unsigned long long __res =
+ (vector unsigned long long)__a & (vector unsigned long long)__b;
+ return (vector double)__res;
+}
+
+static vector double __ATTRS_o_ai vec_and(vector double __a, vector bool long long __b) {
+ vector unsigned long long __res =
+ (vector unsigned long long)__a & (vector unsigned long long)__b;
+ return (vector double)__res;
+}
+
+static vector double __ATTRS_o_ai vec_and(vector double __a, vector double __b) {
+ vector unsigned long long __res =
+ (vector unsigned long long)__a & (vector unsigned long long)__b;
+ return (vector double)__res;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_and(vector signed long long __a, vector signed long long __b) {
+ return __a & __b;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_and(vector bool long long __a, vector signed long long __b) {
+ return (vector signed long long)__a & __b;
+}
+
+static vector signed long long __ATTRS_o_ai vec_and(vector signed long long __a,
+ vector bool long long __b) {
+ return __a & (vector signed long long)__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_and(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a & __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_and(vector bool long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__a & __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_and(vector unsigned long long __a, vector bool long long __b) {
+ return __a & (vector unsigned long long)__b;
+}
+
+static vector bool long long __ATTRS_o_ai vec_and(vector bool long long __a,
+ vector bool long long __b) {
+ return __a & __b;
+}
+#endif
+
+/* vec_vand */
+
+static vector signed char __ATTRS_o_ai vec_vand(vector signed char __a,
+ vector signed char __b) {
+ return __a & __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_vand(vector bool char __a,
+ vector signed char __b) {
+ return (vector signed char)__a & __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_vand(vector signed char __a,
+ vector bool char __b) {
+ return __a & (vector signed char)__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vand(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a & __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vand(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__a & __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vand(vector unsigned char __a,
+ vector bool char __b) {
+ return __a & (vector unsigned char)__b;
+}
+
+static vector bool char __ATTRS_o_ai vec_vand(vector bool char __a,
+ vector bool char __b) {
+ return __a & __b;
+}
+
+static vector short __ATTRS_o_ai vec_vand(vector short __a, vector short __b) {
+ return __a & __b;
+}
+
+static vector short __ATTRS_o_ai vec_vand(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a & __b;
+}
+
+static vector short __ATTRS_o_ai vec_vand(vector short __a,
+ vector bool short __b) {
+ return __a & (vector short)__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vand(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a & __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vand(vector bool short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__a & __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vand(vector unsigned short __a,
+ vector bool short __b) {
+ return __a & (vector unsigned short)__b;
+}
+
+static vector bool short __ATTRS_o_ai vec_vand(vector bool short __a,
+ vector bool short __b) {
+ return __a & __b;
+}
+
+static vector int __ATTRS_o_ai vec_vand(vector int __a, vector int __b) {
+ return __a & __b;
+}
+
+static vector int __ATTRS_o_ai vec_vand(vector bool int __a, vector int __b) {
+ return (vector int)__a & __b;
+}
+
+static vector int __ATTRS_o_ai vec_vand(vector int __a, vector bool int __b) {
+ return __a & (vector int)__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vand(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a & __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vand(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__a & __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vand(vector unsigned int __a,
+ vector bool int __b) {
+ return __a & (vector unsigned int)__b;
+}
+
+static vector bool int __ATTRS_o_ai vec_vand(vector bool int __a,
+ vector bool int __b) {
+ return __a & __b;
+}
+
+static vector float __ATTRS_o_ai vec_vand(vector float __a, vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_vand(vector bool int __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_vand(vector float __a,
+ vector bool int __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai
+vec_vand(vector signed long long __a, vector signed long long __b) {
+ return __a & __b;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_vand(vector bool long long __a, vector signed long long __b) {
+ return (vector signed long long)__a & __b;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_vand(vector signed long long __a, vector bool long long __b) {
+ return __a & (vector signed long long)__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vand(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a & __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vand(vector bool long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__a & __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vand(vector unsigned long long __a, vector bool long long __b) {
+ return __a & (vector unsigned long long)__b;
+}
+
+static vector bool long long __ATTRS_o_ai vec_vand(vector bool long long __a,
+ vector bool long long __b) {
+ return __a & __b;
+}
+#endif
+
+/* vec_andc */
+
+#define __builtin_altivec_vandc vec_andc
+
+static vector signed char __ATTRS_o_ai vec_andc(vector signed char __a,
+ vector signed char __b) {
+ return __a & ~__b;
+}
+
+static vector signed char __ATTRS_o_ai vec_andc(vector bool char __a,
+ vector signed char __b) {
+ return (vector signed char)__a & ~__b;
+}
+
+static vector signed char __ATTRS_o_ai vec_andc(vector signed char __a,
+ vector bool char __b) {
+ return __a & ~(vector signed char)__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_andc(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a & ~__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_andc(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__a & ~__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_andc(vector unsigned char __a,
+ vector bool char __b) {
+ return __a & ~(vector unsigned char)__b;
+}
+
+static vector bool char __ATTRS_o_ai vec_andc(vector bool char __a,
+ vector bool char __b) {
+ return __a & ~__b;
+}
+
+static vector short __ATTRS_o_ai vec_andc(vector short __a, vector short __b) {
+ return __a & ~__b;
+}
+
+static vector short __ATTRS_o_ai vec_andc(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a & ~__b;
+}
+
+static vector short __ATTRS_o_ai vec_andc(vector short __a,
+ vector bool short __b) {
+ return __a & ~(vector short)__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_andc(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a & ~__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_andc(vector bool short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__a & ~__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_andc(vector unsigned short __a,
+ vector bool short __b) {
+ return __a & ~(vector unsigned short)__b;
+}
+
+static vector bool short __ATTRS_o_ai vec_andc(vector bool short __a,
+ vector bool short __b) {
+ return __a & ~__b;
+}
+
+static vector int __ATTRS_o_ai vec_andc(vector int __a, vector int __b) {
+ return __a & ~__b;
+}
+
+static vector int __ATTRS_o_ai vec_andc(vector bool int __a, vector int __b) {
+ return (vector int)__a & ~__b;
+}
+
+static vector int __ATTRS_o_ai vec_andc(vector int __a, vector bool int __b) {
+ return __a & ~(vector int)__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_andc(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a & ~__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_andc(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__a & ~__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_andc(vector unsigned int __a,
+ vector bool int __b) {
+ return __a & ~(vector unsigned int)__b;
+}
+
+static vector bool int __ATTRS_o_ai vec_andc(vector bool int __a,
+ vector bool int __b) {
+ return __a & ~__b;
+}
+
+static vector float __ATTRS_o_ai vec_andc(vector float __a, vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & ~(vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_andc(vector bool int __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & ~(vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_andc(vector float __a,
+ vector bool int __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & ~(vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai
+vec_andc(vector bool long long __a, vector double __b) {
+ vector unsigned long long __res =
+ (vector unsigned long long)__a & ~(vector unsigned long long)__b;
+ return (vector double)__res;
+}
+
+static vector double __ATTRS_o_ai
+vec_andc(vector double __a, vector bool long long __b) {
+ vector unsigned long long __res =
+ (vector unsigned long long)__a & ~(vector unsigned long long)__b;
+ return (vector double)__res;
+}
+
+static vector double __ATTRS_o_ai vec_andc(vector double __a, vector double __b) {
+ vector unsigned long long __res =
+ (vector unsigned long long)__a & ~(vector unsigned long long)__b;
+ return (vector double)__res;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_andc(vector signed long long __a, vector signed long long __b) {
+ return __a & ~__b;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_andc(vector bool long long __a, vector signed long long __b) {
+ return (vector signed long long)__a & ~__b;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_andc(vector signed long long __a, vector bool long long __b) {
+ return __a & ~(vector signed long long)__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_andc(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a & ~__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_andc(vector bool long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__a & ~__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_andc(vector unsigned long long __a, vector bool long long __b) {
+ return __a & ~(vector unsigned long long)__b;
+}
+
+static vector bool long long __ATTRS_o_ai vec_andc(vector bool long long __a,
+ vector bool long long __b) {
+ return __a & ~__b;
+}
+#endif
+
+/* vec_vandc */
+
+static vector signed char __ATTRS_o_ai vec_vandc(vector signed char __a,
+ vector signed char __b) {
+ return __a & ~__b;
+}
+
+static vector signed char __ATTRS_o_ai vec_vandc(vector bool char __a,
+ vector signed char __b) {
+ return (vector signed char)__a & ~__b;
+}
+
+static vector signed char __ATTRS_o_ai vec_vandc(vector signed char __a,
+ vector bool char __b) {
+ return __a & ~(vector signed char)__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vandc(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a & ~__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vandc(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__a & ~__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vandc(vector unsigned char __a,
+ vector bool char __b) {
+ return __a & ~(vector unsigned char)__b;
+}
+
+static vector bool char __ATTRS_o_ai vec_vandc(vector bool char __a,
+ vector bool char __b) {
+ return __a & ~__b;
+}
+
+static vector short __ATTRS_o_ai vec_vandc(vector short __a, vector short __b) {
+ return __a & ~__b;
+}
+
+static vector short __ATTRS_o_ai vec_vandc(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a & ~__b;
+}
+
+static vector short __ATTRS_o_ai vec_vandc(vector short __a,
+ vector bool short __b) {
+ return __a & ~(vector short)__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vandc(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a & ~__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vandc(vector bool short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__a & ~__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vandc(vector unsigned short __a,
+ vector bool short __b) {
+ return __a & ~(vector unsigned short)__b;
+}
+
+static vector bool short __ATTRS_o_ai vec_vandc(vector bool short __a,
+ vector bool short __b) {
+ return __a & ~__b;
+}
+
+static vector int __ATTRS_o_ai vec_vandc(vector int __a, vector int __b) {
+ return __a & ~__b;
+}
+
+static vector int __ATTRS_o_ai vec_vandc(vector bool int __a, vector int __b) {
+ return (vector int)__a & ~__b;
+}
+
+static vector int __ATTRS_o_ai vec_vandc(vector int __a, vector bool int __b) {
+ return __a & ~(vector int)__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vandc(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a & ~__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vandc(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__a & ~__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vandc(vector unsigned int __a,
+ vector bool int __b) {
+ return __a & ~(vector unsigned int)__b;
+}
+
+static vector bool int __ATTRS_o_ai vec_vandc(vector bool int __a,
+ vector bool int __b) {
+ return __a & ~__b;
+}
+
+static vector float __ATTRS_o_ai vec_vandc(vector float __a, vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & ~(vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_vandc(vector bool int __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & ~(vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_vandc(vector float __a,
+ vector bool int __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a & ~(vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai
+vec_vandc(vector signed long long __a, vector signed long long __b) {
+ return __a & ~__b;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_vandc(vector bool long long __a, vector signed long long __b) {
+ return (vector signed long long)__a & ~__b;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_vandc(vector signed long long __a, vector bool long long __b) {
+ return __a & ~(vector signed long long)__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vandc(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a & ~__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vandc(vector bool long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__a & ~__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vandc(vector unsigned long long __a, vector bool long long __b) {
+ return __a & ~(vector unsigned long long)__b;
+}
+
+static vector bool long long __ATTRS_o_ai vec_vandc(vector bool long long __a,
+ vector bool long long __b) {
+ return __a & ~__b;
+}
+#endif
+
+/* vec_avg */
+
+static vector signed char __ATTRS_o_ai vec_avg(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vavgsb(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_avg(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vavgub(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_avg(vector short __a, vector short __b) {
+ return __builtin_altivec_vavgsh(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_avg(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vavguh(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_avg(vector int __a, vector int __b) {
+ return __builtin_altivec_vavgsw(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_avg(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vavguw(__a, __b);
+}
+
+/* vec_vavgsb */
+
+static vector signed char __attribute__((__always_inline__))
+vec_vavgsb(vector signed char __a, vector signed char __b) {
+ return __builtin_altivec_vavgsb(__a, __b);
+}
+
+/* vec_vavgub */
+
+static vector unsigned char __attribute__((__always_inline__))
+vec_vavgub(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_altivec_vavgub(__a, __b);
+}
+
+/* vec_vavgsh */
+
+static vector short __attribute__((__always_inline__))
+vec_vavgsh(vector short __a, vector short __b) {
+ return __builtin_altivec_vavgsh(__a, __b);
+}
+
+/* vec_vavguh */
+
+static vector unsigned short __attribute__((__always_inline__))
+vec_vavguh(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_altivec_vavguh(__a, __b);
+}
+
+/* vec_vavgsw */
+
+static vector int __attribute__((__always_inline__))
+vec_vavgsw(vector int __a, vector int __b) {
+ return __builtin_altivec_vavgsw(__a, __b);
+}
+
+/* vec_vavguw */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vavguw(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_altivec_vavguw(__a, __b);
+}
+
+/* vec_ceil */
+
+static vector float __ATTRS_o_ai vec_ceil(vector float __a) {
+#ifdef __VSX__
+ return __builtin_vsx_xvrspip(__a);
+#else
+ return __builtin_altivec_vrfip(__a);
+#endif
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_ceil(vector double __a) {
+ return __builtin_vsx_xvrdpip(__a);
+}
+#endif
+
+/* vec_vrfip */
+
+static vector float __attribute__((__always_inline__))
+vec_vrfip(vector float __a) {
+ return __builtin_altivec_vrfip(__a);
+}
+
+/* vec_cmpb */
+
+static vector int __attribute__((__always_inline__))
+vec_cmpb(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpbfp(__a, __b);
+}
+
+/* vec_vcmpbfp */
+
+static vector int __attribute__((__always_inline__))
+vec_vcmpbfp(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpbfp(__a, __b);
+}
+
+/* vec_cmpeq */
+
+static vector bool char __ATTRS_o_ai vec_cmpeq(vector signed char __a,
+ vector signed char __b) {
+ return (vector bool char)__builtin_altivec_vcmpequb((vector char)__a,
+ (vector char)__b);
+}
+
+static vector bool char __ATTRS_o_ai vec_cmpeq(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector bool char)__builtin_altivec_vcmpequb((vector char)__a,
+ (vector char)__b);
+}
+
+static vector bool short __ATTRS_o_ai vec_cmpeq(vector short __a,
+ vector short __b) {
+ return (vector bool short)__builtin_altivec_vcmpequh(__a, __b);
+}
+
+static vector bool short __ATTRS_o_ai vec_cmpeq(vector unsigned short __a,
+ vector unsigned short __b) {
+ return (vector bool short)__builtin_altivec_vcmpequh((vector short)__a,
+ (vector short)__b);
+}
+
+static vector bool int __ATTRS_o_ai vec_cmpeq(vector int __a, vector int __b) {
+ return (vector bool int)__builtin_altivec_vcmpequw(__a, __b);
+}
+
+static vector bool int __ATTRS_o_ai vec_cmpeq(vector unsigned int __a,
+ vector unsigned int __b) {
+ return (vector bool int)__builtin_altivec_vcmpequw((vector int)__a,
+ (vector int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static vector bool long long __ATTRS_o_ai
+vec_cmpeq(vector signed long long __a, vector signed long long __b) {
+ return (vector bool long long)__builtin_altivec_vcmpequd(__a, __b);
+}
+
+static vector bool long long __ATTRS_o_ai
+vec_cmpeq(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector bool long long)__builtin_altivec_vcmpequd(
+ (vector long long)__a, (vector long long)__b);
+}
+#endif
+
+static vector bool int __ATTRS_o_ai vec_cmpeq(vector float __a,
+ vector float __b) {
+#ifdef __VSX__
+ return (vector bool int)__builtin_vsx_xvcmpeqsp(__a, __b);
+#else
+ return (vector bool int)__builtin_altivec_vcmpeqfp(__a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static vector bool long long __ATTRS_o_ai
+vec_cmpeq(vector double __a, vector double __b) {
+ return (vector bool long long)__builtin_vsx_xvcmpeqdp(__a, __b);
+}
+#endif
+
+
+/* vec_cmpgt */
+
+static vector bool char __ATTRS_o_ai vec_cmpgt(vector signed char __a,
+ vector signed char __b) {
+ return (vector bool char)__builtin_altivec_vcmpgtsb(__a, __b);
+}
+
+static vector bool char __ATTRS_o_ai vec_cmpgt(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector bool char)__builtin_altivec_vcmpgtub(__a, __b);
+}
+
+static vector bool short __ATTRS_o_ai vec_cmpgt(vector short __a,
+ vector short __b) {
+ return (vector bool short)__builtin_altivec_vcmpgtsh(__a, __b);
+}
+
+static vector bool short __ATTRS_o_ai vec_cmpgt(vector unsigned short __a,
+ vector unsigned short __b) {
+ return (vector bool short)__builtin_altivec_vcmpgtuh(__a, __b);
+}
+
+static vector bool int __ATTRS_o_ai vec_cmpgt(vector int __a, vector int __b) {
+ return (vector bool int)__builtin_altivec_vcmpgtsw(__a, __b);
+}
+
+static vector bool int __ATTRS_o_ai vec_cmpgt(vector unsigned int __a,
+ vector unsigned int __b) {
+ return (vector bool int)__builtin_altivec_vcmpgtuw(__a, __b);
+}
+
+#ifdef __POWER8_VECTOR__
+static vector bool long long __ATTRS_o_ai
+vec_cmpgt(vector signed long long __a, vector signed long long __b) {
+ return (vector bool long long)__builtin_altivec_vcmpgtsd(__a, __b);
+}
+
+static vector bool long long __ATTRS_o_ai
+vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector bool long long)__builtin_altivec_vcmpgtud(__a, __b);
+}
+#endif
+
+static vector bool int __ATTRS_o_ai vec_cmpgt(vector float __a,
+ vector float __b) {
+#ifdef __VSX__
+ return (vector bool int)__builtin_vsx_xvcmpgtsp(__a, __b);
+#else
+ return (vector bool int)__builtin_altivec_vcmpgtfp(__a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static vector bool long long __ATTRS_o_ai
+vec_cmpgt(vector double __a, vector double __b) {
+ return (vector bool long long)__builtin_vsx_xvcmpgtdp(__a, __b);
+}
+#endif
+
+/* vec_cmpge */
+
+static vector bool char __ATTRS_o_ai
+vec_cmpge (vector signed char __a, vector signed char __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static vector bool char __ATTRS_o_ai
+vec_cmpge (vector unsigned char __a, vector unsigned char __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static vector bool short __ATTRS_o_ai
+vec_cmpge (vector signed short __a, vector signed short __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static vector bool short __ATTRS_o_ai
+vec_cmpge (vector unsigned short __a, vector unsigned short __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmpge (vector signed int __a, vector signed int __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmpge (vector unsigned int __a, vector unsigned int __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmpge(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return (vector bool int)__builtin_vsx_xvcmpgesp(__a, __b);
+#else
+ return (vector bool int)__builtin_altivec_vcmpgefp(__a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static vector bool long long __ATTRS_o_ai
+vec_cmpge(vector double __a, vector double __b) {
+ return (vector bool long long)__builtin_vsx_xvcmpgedp(__a, __b);
+}
+#endif
+
+#ifdef __POWER8_VECTOR__
+static vector bool long long __ATTRS_o_ai
+vec_cmpge(vector signed long long __a, vector signed long long __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static vector bool long long __ATTRS_o_ai
+vec_cmpge(vector unsigned long long __a, vector unsigned long long __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+#endif
+
+/* vec_vcmpgefp */
+
+static vector bool int __attribute__((__always_inline__))
+vec_vcmpgefp(vector float __a, vector float __b) {
+ return (vector bool int)__builtin_altivec_vcmpgefp(__a, __b);
+}
+
+/* vec_vcmpgtsb */
+
+static vector bool char __attribute__((__always_inline__))
+vec_vcmpgtsb(vector signed char __a, vector signed char __b) {
+ return (vector bool char)__builtin_altivec_vcmpgtsb(__a, __b);
+}
+
+/* vec_vcmpgtub */
+
+static vector bool char __attribute__((__always_inline__))
+vec_vcmpgtub(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_altivec_vcmpgtub(__a, __b);
+}
+
+/* vec_vcmpgtsh */
+
+static vector bool short __attribute__((__always_inline__))
+vec_vcmpgtsh(vector short __a, vector short __b) {
+ return (vector bool short)__builtin_altivec_vcmpgtsh(__a, __b);
+}
+
+/* vec_vcmpgtuh */
+
+static vector bool short __attribute__((__always_inline__))
+vec_vcmpgtuh(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_altivec_vcmpgtuh(__a, __b);
+}
+
+/* vec_vcmpgtsw */
+
+static vector bool int __attribute__((__always_inline__))
+vec_vcmpgtsw(vector int __a, vector int __b) {
+ return (vector bool int)__builtin_altivec_vcmpgtsw(__a, __b);
+}
+
+/* vec_vcmpgtuw */
+
+static vector bool int __attribute__((__always_inline__))
+vec_vcmpgtuw(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_altivec_vcmpgtuw(__a, __b);
+}
+
+/* vec_vcmpgtfp */
+
+static vector bool int __attribute__((__always_inline__))
+vec_vcmpgtfp(vector float __a, vector float __b) {
+ return (vector bool int)__builtin_altivec_vcmpgtfp(__a, __b);
+}
+
+/* vec_cmple */
+
+static vector bool char __ATTRS_o_ai
+vec_cmple (vector signed char __a, vector signed char __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static vector bool char __ATTRS_o_ai
+vec_cmple (vector unsigned char __a, vector unsigned char __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_cmple (vector signed short __a, vector signed short __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_cmple (vector unsigned short __a, vector unsigned short __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmple (vector signed int __a, vector signed int __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmple (vector unsigned int __a, vector unsigned int __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmple(vector float __a, vector float __b) {
+ return vec_cmpge(__b, __a);
+}
+
+#ifdef __VSX__
+static vector bool long long __ATTRS_o_ai
+vec_cmple(vector double __a, vector double __b) {
+ return vec_cmpge(__b, __a);
+}
+#endif
+
+#ifdef __POWER8_VECTOR__
+static vector bool long long __ATTRS_o_ai
+vec_cmple(vector signed long long __a, vector signed long long __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static vector bool long long __ATTRS_o_ai
+vec_cmple(vector unsigned long long __a, vector unsigned long long __b) {
+ return vec_cmpge(__b, __a);
+}
+#endif
+
+/* vec_cmplt */
+
+static vector bool char __ATTRS_o_ai vec_cmplt(vector signed char __a,
+ vector signed char __b) {
+ return vec_cmpgt(__b, __a);
+}
+
+static vector bool char __ATTRS_o_ai vec_cmplt(vector unsigned char __a,
+ vector unsigned char __b) {
+ return vec_cmpgt(__b, __a);
+}
+
+static vector bool short __ATTRS_o_ai vec_cmplt(vector short __a,
+ vector short __b) {
+ return vec_cmpgt(__b, __a);
+}
+
+static vector bool short __ATTRS_o_ai vec_cmplt(vector unsigned short __a,
+ vector unsigned short __b) {
+ return vec_cmpgt(__b, __a);
+}
+
+static vector bool int __ATTRS_o_ai vec_cmplt(vector int __a, vector int __b) {
+ return vec_cmpgt(__b, __a);
+}
+
+static vector bool int __ATTRS_o_ai vec_cmplt(vector unsigned int __a,
+ vector unsigned int __b) {
+ return vec_cmpgt(__b, __a);
+}
+
+static vector bool int __ATTRS_o_ai vec_cmplt(vector float __a,
+ vector float __b) {
+ return vec_cmpgt(__b, __a);
+}
+
+#ifdef __VSX__
+static vector bool long long __ATTRS_o_ai
+vec_cmplt(vector double __a, vector double __b) {
+ return vec_cmpgt(__b, __a);
+}
+#endif
+
+#ifdef __POWER8_VECTOR__
+static vector bool long long __ATTRS_o_ai
+vec_cmplt(vector signed long long __a, vector signed long long __b) {
+ return vec_cmpgt(__b, __a);
+}
+
+static vector bool long long __ATTRS_o_ai
+vec_cmplt(vector unsigned long long __a, vector unsigned long long __b) {
+ return vec_cmpgt(__b, __a);
+}
+
+/* vec_cntlz */
+
+static vector signed char __ATTRS_o_ai vec_cntlz(vector signed char __a) {
+ return __builtin_altivec_vclzb(__a);
+}
+static vector unsigned char __ATTRS_o_ai vec_cntlz(vector unsigned char __a) {
+ return __builtin_altivec_vclzb(__a);
+}
+static vector signed short __ATTRS_o_ai vec_cntlz(vector signed short __a) {
+ return __builtin_altivec_vclzh(__a);
+}
+static vector unsigned short __ATTRS_o_ai vec_cntlz(vector unsigned short __a) {
+ return __builtin_altivec_vclzh(__a);
+}
+static vector signed int __ATTRS_o_ai vec_cntlz(vector signed int __a) {
+ return __builtin_altivec_vclzw(__a);
+}
+static vector unsigned int __ATTRS_o_ai vec_cntlz(vector unsigned int __a) {
+ return __builtin_altivec_vclzw(__a);
+}
+static vector signed long long __ATTRS_o_ai
+vec_cntlz(vector signed long long __a) {
+ return __builtin_altivec_vclzd(__a);
+}
+static vector unsigned long long __ATTRS_o_ai
+vec_cntlz(vector unsigned long long __a) {
+ return __builtin_altivec_vclzd(__a);
+}
+#endif
+
+/* vec_cpsgn */
+
+#ifdef __VSX__
+static vector float __ATTRS_o_ai vec_cpsgn(vector float __a, vector float __b) {
+ return __builtin_vsx_xvcpsgnsp(__a, __b);
+}
+
+static vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvcpsgndp(__a, __b);
+}
+#endif
+
+/* vec_ctf */
+
+static vector float __ATTRS_o_ai vec_ctf(vector int __a, int __b) {
+ return __builtin_altivec_vcfsx(__a, __b);
+}
+
+static vector float __ATTRS_o_ai vec_ctf(vector unsigned int __a, int __b) {
+ return __builtin_altivec_vcfux((vector int)__a, __b);
+}
+
+/* vec_vcfsx */
+
+static vector float __attribute__((__always_inline__))
+vec_vcfsx(vector int __a, int __b) {
+ return __builtin_altivec_vcfsx(__a, __b);
+}
+
+/* vec_vcfux */
+
+static vector float __attribute__((__always_inline__))
+vec_vcfux(vector unsigned int __a, int __b) {
+ return __builtin_altivec_vcfux((vector int)__a, __b);
+}
+
+/* vec_cts */
+
+static vector int __attribute__((__always_inline__))
+vec_cts(vector float __a, int __b) {
+ return __builtin_altivec_vctsxs(__a, __b);
+}
+
+/* vec_vctsxs */
+
+static vector int __attribute__((__always_inline__))
+vec_vctsxs(vector float __a, int __b) {
+ return __builtin_altivec_vctsxs(__a, __b);
+}
+
+/* vec_ctu */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_ctu(vector float __a, int __b) {
+ return __builtin_altivec_vctuxs(__a, __b);
+}
+
+/* vec_vctuxs */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vctuxs(vector float __a, int __b) {
+ return __builtin_altivec_vctuxs(__a, __b);
+}
+
+/* vec_double */
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_double (vector signed long long __a) {
+ vector double __ret = { __a[0], __a[1] };
+ return __ret;
+}
+
+static vector double __ATTRS_o_ai vec_double (vector unsigned long long __a) {
+ vector double __ret = { __a[0], __a[1] };
+ return __ret;
+}
+#endif
+
+/* vec_div */
+
+/* Integer vector divides (vectors are scalarized, elements divided
+ and the vectors reassembled).
+*/
+static vector signed char __ATTRS_o_ai vec_div(vector signed char __a,
+ vector signed char __b) {
+ return __a / __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_div(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a / __b;
+}
+
+static vector signed short __ATTRS_o_ai vec_div(vector signed short __a,
+ vector signed short __b) {
+ return __a / __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_div(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a / __b;
+}
+
+static vector signed int __ATTRS_o_ai vec_div(vector signed int __a,
+ vector signed int __b) {
+ return __a / __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_div(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a / __b;
+}
+
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai
+vec_div(vector signed long long __a, vector signed long long __b) {
+ return __a / __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_div(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a / __b;
+}
+
+static vector float __ATTRS_o_ai vec_div(vector float __a, vector float __b) {
+ return __a / __b;
+}
+
+static vector double __ATTRS_o_ai vec_div(vector double __a,
+ vector double __b) {
+ return __a / __b;
+}
+#endif
+
+/* vec_dss */
+
+static void __attribute__((__always_inline__)) vec_dss(int __a) {
+ __builtin_altivec_dss(__a);
+}
+
+/* vec_dssall */
+
+static void __attribute__((__always_inline__)) vec_dssall(void) {
+ __builtin_altivec_dssall();
+}
+
+/* vec_dst */
+
+static void __attribute__((__always_inline__))
+vec_dst(const void *__a, int __b, int __c) {
+ __builtin_altivec_dst(__a, __b, __c);
+}
+
+/* vec_dstst */
+
+static void __attribute__((__always_inline__))
+vec_dstst(const void *__a, int __b, int __c) {
+ __builtin_altivec_dstst(__a, __b, __c);
+}
+
+/* vec_dststt */
+
+static void __attribute__((__always_inline__))
+vec_dststt(const void *__a, int __b, int __c) {
+ __builtin_altivec_dststt(__a, __b, __c);
+}
+
+/* vec_dstt */
+
+static void __attribute__((__always_inline__))
+vec_dstt(const void *__a, int __b, int __c) {
+ __builtin_altivec_dstt(__a, __b, __c);
+}
+
+/* vec_eqv */
+
+#ifdef __POWER8_VECTOR__
+static vector signed char __ATTRS_o_ai vec_eqv(vector signed char __a,
+ vector signed char __b) {
+ return (vector signed char)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_eqv(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static vector bool char __ATTRS_o_ai vec_eqv(vector bool char __a,
+ vector bool char __b) {
+ return (vector bool char)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static vector signed short __ATTRS_o_ai vec_eqv(vector signed short __a,
+ vector signed short __b) {
+ return (vector signed short)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_eqv(vector unsigned short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static vector bool short __ATTRS_o_ai vec_eqv(vector bool short __a,
+ vector bool short __b) {
+ return (vector bool short)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static vector signed int __ATTRS_o_ai vec_eqv(vector signed int __a,
+ vector signed int __b) {
+ return (vector signed int)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_eqv(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_vsx_xxleqv(__a, __b);
+}
+
+static vector bool int __ATTRS_o_ai vec_eqv(vector bool int __a,
+ vector bool int __b) {
+ return (vector bool int)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_eqv(vector signed long long __a, vector signed long long __b) {
+ return (vector signed long long)
+ __builtin_vsx_xxleqv((vector unsigned int)__a, (vector unsigned int)__b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_eqv(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)
+ __builtin_vsx_xxleqv((vector unsigned int)__a, (vector unsigned int)__b);
+}
+
+static vector bool long long __ATTRS_o_ai
+vec_eqv(vector bool long long __a, vector bool long long __b) {
+ return (vector bool long long)
+ __builtin_vsx_xxleqv((vector unsigned int)__a, (vector unsigned int)__b);
+}
+
+static vector float __ATTRS_o_ai vec_eqv(vector float __a, vector float __b) {
+ return (vector float)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static vector double __ATTRS_o_ai vec_eqv(vector double __a,
+ vector double __b) {
+ return (vector double)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+#endif
+
+/* vec_expte */
+
+static vector float __attribute__((__always_inline__))
+vec_expte(vector float __a) {
+ return __builtin_altivec_vexptefp(__a);
+}
+
+/* vec_vexptefp */
+
+static vector float __attribute__((__always_inline__))
+vec_vexptefp(vector float __a) {
+ return __builtin_altivec_vexptefp(__a);
+}
+
+/* vec_floor */
+
+static vector float __ATTRS_o_ai vec_floor(vector float __a) {
+#ifdef __VSX__
+ return __builtin_vsx_xvrspim(__a);
+#else
+ return __builtin_altivec_vrfim(__a);
+#endif
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_floor(vector double __a) {
+ return __builtin_vsx_xvrdpim(__a);
+}
+#endif
+
+/* vec_vrfim */
+
+static vector float __attribute__((__always_inline__))
+vec_vrfim(vector float __a) {
+ return __builtin_altivec_vrfim(__a);
+}
+
+/* vec_ld */
+
+static vector signed char __ATTRS_o_ai vec_ld(int __a,
+ const vector signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_ld(int __a, const signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_ld(int __a, const vector unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_ld(int __a,
+ const unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector bool char __ATTRS_o_ai vec_ld(int __a,
+ const vector bool char *__b) {
+ return (vector bool char)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_ld(int __a, const vector short *__b) {
+ return (vector short)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_ld(int __a, const short *__b) {
+ return (vector short)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_ld(int __a, const vector unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_ld(int __a,
+ const unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector bool short __ATTRS_o_ai vec_ld(int __a,
+ const vector bool short *__b) {
+ return (vector bool short)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector pixel __ATTRS_o_ai vec_ld(int __a, const vector pixel *__b) {
+ return (vector pixel)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_ld(int __a, const vector int *__b) {
+ return (vector int)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_ld(int __a, const int *__b) {
+ return (vector int)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_ld(int __a,
+ const vector unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_ld(int __a,
+ const unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector bool int __ATTRS_o_ai vec_ld(int __a,
+ const vector bool int *__b) {
+ return (vector bool int)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector float __ATTRS_o_ai vec_ld(int __a, const vector float *__b) {
+ return (vector float)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector float __ATTRS_o_ai vec_ld(int __a, const float *__b) {
+ return (vector float)__builtin_altivec_lvx(__a, __b);
+}
+
+/* vec_lvx */
+
+static vector signed char __ATTRS_o_ai vec_lvx(int __a,
+ const vector signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_lvx(int __a,
+ const signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvx(int __a, const vector unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_lvx(int __a,
+ const unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector bool char __ATTRS_o_ai vec_lvx(int __a,
+ const vector bool char *__b) {
+ return (vector bool char)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_lvx(int __a, const vector short *__b) {
+ return (vector short)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_lvx(int __a, const short *__b) {
+ return (vector short)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvx(int __a, const vector unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_lvx(int __a,
+ const unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector bool short __ATTRS_o_ai vec_lvx(int __a,
+ const vector bool short *__b) {
+ return (vector bool short)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector pixel __ATTRS_o_ai vec_lvx(int __a, const vector pixel *__b) {
+ return (vector pixel)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_lvx(int __a, const vector int *__b) {
+ return (vector int)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_lvx(int __a, const int *__b) {
+ return (vector int)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvx(int __a, const vector unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_lvx(int __a,
+ const unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector bool int __ATTRS_o_ai vec_lvx(int __a,
+ const vector bool int *__b) {
+ return (vector bool int)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector float __ATTRS_o_ai vec_lvx(int __a, const vector float *__b) {
+ return (vector float)__builtin_altivec_lvx(__a, __b);
+}
+
+static vector float __ATTRS_o_ai vec_lvx(int __a, const float *__b) {
+ return (vector float)__builtin_altivec_lvx(__a, __b);
+}
+
+/* vec_lde */
+
+static vector signed char __ATTRS_o_ai vec_lde(int __a,
+ const signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvebx(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_lde(int __a,
+ const unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvebx(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_lde(int __a, const short *__b) {
+ return (vector short)__builtin_altivec_lvehx(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_lde(int __a,
+ const unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvehx(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_lde(int __a, const int *__b) {
+ return (vector int)__builtin_altivec_lvewx(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_lde(int __a,
+ const unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvewx(__a, __b);
+}
+
+static vector float __ATTRS_o_ai vec_lde(int __a, const float *__b) {
+ return (vector float)__builtin_altivec_lvewx(__a, __b);
+}
+
+/* vec_lvebx */
+
+static vector signed char __ATTRS_o_ai vec_lvebx(int __a,
+ const signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvebx(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_lvebx(int __a,
+ const unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvebx(__a, __b);
+}
+
+/* vec_lvehx */
+
+static vector short __ATTRS_o_ai vec_lvehx(int __a, const short *__b) {
+ return (vector short)__builtin_altivec_lvehx(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_lvehx(int __a,
+ const unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvehx(__a, __b);
+}
+
+/* vec_lvewx */
+
+static vector int __ATTRS_o_ai vec_lvewx(int __a, const int *__b) {
+ return (vector int)__builtin_altivec_lvewx(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_lvewx(int __a,
+ const unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvewx(__a, __b);
+}
+
+static vector float __ATTRS_o_ai vec_lvewx(int __a, const float *__b) {
+ return (vector float)__builtin_altivec_lvewx(__a, __b);
+}
+
+/* vec_ldl */
+
+static vector signed char __ATTRS_o_ai vec_ldl(int __a,
+ const vector signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_ldl(int __a,
+ const signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_ldl(int __a, const vector unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_ldl(int __a,
+ const unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector bool char __ATTRS_o_ai vec_ldl(int __a,
+ const vector bool char *__b) {
+ return (vector bool char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_ldl(int __a, const vector short *__b) {
+ return (vector short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_ldl(int __a, const short *__b) {
+ return (vector short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_ldl(int __a, const vector unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_ldl(int __a,
+ const unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector bool short __ATTRS_o_ai vec_ldl(int __a,
+ const vector bool short *__b) {
+ return (vector bool short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector pixel __ATTRS_o_ai vec_ldl(int __a, const vector pixel *__b) {
+ return (vector pixel short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_ldl(int __a, const vector int *__b) {
+ return (vector int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_ldl(int __a, const int *__b) {
+ return (vector int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_ldl(int __a, const vector unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_ldl(int __a,
+ const unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector bool int __ATTRS_o_ai vec_ldl(int __a,
+ const vector bool int *__b) {
+ return (vector bool int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector float __ATTRS_o_ai vec_ldl(int __a, const vector float *__b) {
+ return (vector float)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector float __ATTRS_o_ai vec_ldl(int __a, const float *__b) {
+ return (vector float)__builtin_altivec_lvxl(__a, __b);
+}
+
+/* vec_lvxl */
+
+static vector signed char __ATTRS_o_ai vec_lvxl(int __a,
+ const vector signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_lvxl(int __a,
+ const signed char *__b) {
+ return (vector signed char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvxl(int __a, const vector unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_lvxl(int __a,
+ const unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector bool char __ATTRS_o_ai vec_lvxl(int __a,
+ const vector bool char *__b) {
+ return (vector bool char)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_lvxl(int __a, const vector short *__b) {
+ return (vector short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_lvxl(int __a, const short *__b) {
+ return (vector short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvxl(int __a, const vector unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_lvxl(int __a,
+ const unsigned short *__b) {
+ return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector bool short __ATTRS_o_ai vec_lvxl(int __a,
+ const vector bool short *__b) {
+ return (vector bool short)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector pixel __ATTRS_o_ai vec_lvxl(int __a, const vector pixel *__b) {
+ return (vector pixel)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_lvxl(int __a, const vector int *__b) {
+ return (vector int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_lvxl(int __a, const int *__b) {
+ return (vector int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvxl(int __a, const vector unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_lvxl(int __a,
+ const unsigned int *__b) {
+ return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector bool int __ATTRS_o_ai vec_lvxl(int __a,
+ const vector bool int *__b) {
+ return (vector bool int)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector float __ATTRS_o_ai vec_lvxl(int __a, const vector float *__b) {
+ return (vector float)__builtin_altivec_lvxl(__a, __b);
+}
+
+static vector float __ATTRS_o_ai vec_lvxl(int __a, const float *__b) {
+ return (vector float)__builtin_altivec_lvxl(__a, __b);
+}
+
+/* vec_loge */
+
+static vector float __attribute__((__always_inline__))
+vec_loge(vector float __a) {
+ return __builtin_altivec_vlogefp(__a);
+}
+
+/* vec_vlogefp */
+
+static vector float __attribute__((__always_inline__))
+vec_vlogefp(vector float __a) {
+ return __builtin_altivec_vlogefp(__a);
+}
+
+/* vec_lvsl */
+
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsl(int __a, const signed char *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
+ const signed char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsl(int __a, const unsigned char *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
+ const unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsl(int __a, const short *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static vector unsigned char __ATTRS_o_ai vec_lvsl(int __a, const short *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsl(int __a, const unsigned short *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
+ const unsigned short *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsl(int __a, const int *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static vector unsigned char __ATTRS_o_ai vec_lvsl(int __a, const int *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsl(int __a, const unsigned int *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
+ const unsigned int *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsl(int __a, const float *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static vector unsigned char __ATTRS_o_ai vec_lvsl(int __a, const float *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+}
+#endif
+
+/* vec_lvsr */
+
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsr(int __a, const signed char *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
+ const signed char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsr(int __a, const unsigned char *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
+ const unsigned char *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsr(int __a, const short *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static vector unsigned char __ATTRS_o_ai vec_lvsr(int __a, const short *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsr(int __a, const unsigned short *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
+ const unsigned short *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsr(int __a, const int *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static vector unsigned char __ATTRS_o_ai vec_lvsr(int __a, const int *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsr(int __a, const unsigned int *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
+ const unsigned int *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+ __attribute__((__deprecated__("use assignment for unaligned little endian \
+loads/stores"))) vec_lvsr(int __a, const float *__b) {
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
+static vector unsigned char __ATTRS_o_ai vec_lvsr(int __a, const float *__b) {
+ return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+}
+#endif
+
+/* vec_madd */
+static vector signed short __ATTRS_o_ai
+vec_mladd(vector signed short, vector signed short, vector signed short);
+static vector signed short __ATTRS_o_ai
+vec_mladd(vector signed short, vector unsigned short, vector unsigned short);
+static vector signed short __ATTRS_o_ai
+vec_mladd(vector unsigned short, vector signed short, vector signed short);
+static vector unsigned short __ATTRS_o_ai
+vec_mladd(vector unsigned short, vector unsigned short, vector unsigned short);
+
+static vector signed short __ATTRS_o_ai
+vec_madd(vector signed short __a, vector signed short __b,
+ vector signed short __c) {
+ return vec_mladd(__a, __b, __c);
+}
+
+static vector signed short __ATTRS_o_ai
+vec_madd(vector signed short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return vec_mladd(__a, __b, __c);
+}
+
+static vector signed short __ATTRS_o_ai
+vec_madd(vector unsigned short __a, vector signed short __b,
+ vector signed short __c) {
+ return vec_mladd(__a, __b, __c);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_madd(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return vec_mladd(__a, __b, __c);
+}
+
+static vector float __ATTRS_o_ai
+vec_madd(vector float __a, vector float __b, vector float __c) {
+#ifdef __VSX__
+ return __builtin_vsx_xvmaddasp(__a, __b, __c);
+#else
+ return __builtin_altivec_vmaddfp(__a, __b, __c);
+#endif
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai
+vec_madd(vector double __a, vector double __b, vector double __c) {
+ return __builtin_vsx_xvmaddadp(__a, __b, __c);
+}
+#endif
+
+/* vec_vmaddfp */
+
+static vector float __attribute__((__always_inline__))
+vec_vmaddfp(vector float __a, vector float __b, vector float __c) {
+ return __builtin_altivec_vmaddfp(__a, __b, __c);
+}
+
+/* vec_madds */
+
+static vector signed short __attribute__((__always_inline__))
+vec_madds(vector signed short __a, vector signed short __b,
+ vector signed short __c) {
+ return __builtin_altivec_vmhaddshs(__a, __b, __c);
+}
+
+/* vec_vmhaddshs */
+static vector signed short __attribute__((__always_inline__))
+vec_vmhaddshs(vector signed short __a, vector signed short __b,
+ vector signed short __c) {
+ return __builtin_altivec_vmhaddshs(__a, __b, __c);
+}
+
+/* vec_msub */
+
+#ifdef __VSX__
+static vector float __ATTRS_o_ai
+vec_msub(vector float __a, vector float __b, vector float __c) {
+ return __builtin_vsx_xvmsubasp(__a, __b, __c);
+}
+
+static vector double __ATTRS_o_ai
+vec_msub(vector double __a, vector double __b, vector double __c) {
+ return __builtin_vsx_xvmsubadp(__a, __b, __c);
+}
+#endif
+
+/* vec_max */
+
+static vector signed char __ATTRS_o_ai vec_max(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vmaxsb(__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_max(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vmaxsb((vector signed char)__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_max(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vmaxsb(__a, (vector signed char)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_max(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vmaxub(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_max(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vmaxub((vector unsigned char)__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_max(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vmaxub(__a, (vector unsigned char)__b);
+}
+
+static vector short __ATTRS_o_ai vec_max(vector short __a, vector short __b) {
+ return __builtin_altivec_vmaxsh(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_max(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vmaxsh((vector short)__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_max(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vmaxsh(__a, (vector short)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_max(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vmaxuh(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_max(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vmaxuh((vector unsigned short)__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_max(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vmaxuh(__a, (vector unsigned short)__b);
+}
+
+static vector int __ATTRS_o_ai vec_max(vector int __a, vector int __b) {
+ return __builtin_altivec_vmaxsw(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_max(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vmaxsw((vector int)__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_max(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vmaxsw(__a, (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_max(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vmaxuw(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_max(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vmaxuw((vector unsigned int)__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_max(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vmaxuw(__a, (vector unsigned int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static vector signed long long __ATTRS_o_ai
+vec_max(vector signed long long __a, vector signed long long __b) {
+ return __builtin_altivec_vmaxsd(__a, __b);
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_max(vector bool long long __a, vector signed long long __b) {
+ return __builtin_altivec_vmaxsd((vector signed long long)__a, __b);
+}
+
+static vector signed long long __ATTRS_o_ai vec_max(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vmaxsd(__a, (vector signed long long)__b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_max(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vmaxud(__a, __b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_max(vector bool long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vmaxud((vector unsigned long long)__a, __b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_max(vector unsigned long long __a, vector bool long long __b) {
+ return __builtin_altivec_vmaxud(__a, (vector unsigned long long)__b);
+}
+#endif
+
+static vector float __ATTRS_o_ai vec_max(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvmaxsp(__a, __b);
+#else
+ return __builtin_altivec_vmaxfp(__a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_max(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvmaxdp(__a, __b);
+}
+#endif
+
+/* vec_vmaxsb */
+
+static vector signed char __ATTRS_o_ai vec_vmaxsb(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vmaxsb(__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vmaxsb(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vmaxsb((vector signed char)__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vmaxsb(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vmaxsb(__a, (vector signed char)__b);
+}
+
+/* vec_vmaxub */
+
+static vector unsigned char __ATTRS_o_ai vec_vmaxub(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vmaxub(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vmaxub(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vmaxub((vector unsigned char)__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vmaxub(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vmaxub(__a, (vector unsigned char)__b);
+}
+
+/* vec_vmaxsh */
+
+static vector short __ATTRS_o_ai vec_vmaxsh(vector short __a,
+ vector short __b) {
+ return __builtin_altivec_vmaxsh(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_vmaxsh(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vmaxsh((vector short)__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_vmaxsh(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vmaxsh(__a, (vector short)__b);
+}
+
+/* vec_vmaxuh */
+
+static vector unsigned short __ATTRS_o_ai
+vec_vmaxuh(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_altivec_vmaxuh(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vmaxuh(vector bool short __a, vector unsigned short __b) {
+ return __builtin_altivec_vmaxuh((vector unsigned short)__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vmaxuh(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vmaxuh(__a, (vector unsigned short)__b);
+}
+
+/* vec_vmaxsw */
+
+static vector int __ATTRS_o_ai vec_vmaxsw(vector int __a, vector int __b) {
+ return __builtin_altivec_vmaxsw(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_vmaxsw(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vmaxsw((vector int)__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_vmaxsw(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vmaxsw(__a, (vector int)__b);
+}
+
+/* vec_vmaxuw */
+
+static vector unsigned int __ATTRS_o_ai vec_vmaxuw(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vmaxuw(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vmaxuw(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vmaxuw((vector unsigned int)__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vmaxuw(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vmaxuw(__a, (vector unsigned int)__b);
+}
+
+/* vec_vmaxfp */
+
+static vector float __attribute__((__always_inline__))
+vec_vmaxfp(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvmaxsp(__a, __b);
+#else
+ return __builtin_altivec_vmaxfp(__a, __b);
+#endif
+}
+
+/* vec_mergeh */
+
+static vector signed char __ATTRS_o_ai vec_mergeh(vector signed char __a,
+ vector signed char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
+ 0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
+ 0x06, 0x16, 0x07, 0x17));
+}
+
+static vector unsigned char __ATTRS_o_ai vec_mergeh(vector unsigned char __a,
+ vector unsigned char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
+ 0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
+ 0x06, 0x16, 0x07, 0x17));
+}
+
+static vector bool char __ATTRS_o_ai vec_mergeh(vector bool char __a,
+ vector bool char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
+ 0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
+ 0x06, 0x16, 0x07, 0x17));
+}
+
+static vector short __ATTRS_o_ai vec_mergeh(vector short __a,
+ vector short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
+ 0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
+ 0x06, 0x07, 0x16, 0x17));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_mergeh(vector unsigned short __a, vector unsigned short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
+ 0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
+ 0x06, 0x07, 0x16, 0x17));
+}
+
+static vector bool short __ATTRS_o_ai vec_mergeh(vector bool short __a,
+ vector bool short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
+ 0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
+ 0x06, 0x07, 0x16, 0x17));
+}
+
+static vector pixel __ATTRS_o_ai vec_mergeh(vector pixel __a,
+ vector pixel __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
+ 0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
+ 0x06, 0x07, 0x16, 0x17));
+}
+
+static vector int __ATTRS_o_ai vec_mergeh(vector int __a, vector int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector unsigned int __ATTRS_o_ai vec_mergeh(vector unsigned int __a,
+ vector unsigned int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector bool int __ATTRS_o_ai vec_mergeh(vector bool int __a,
+ vector bool int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector float __ATTRS_o_ai vec_mergeh(vector float __a,
+ vector float __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai
+vec_mergeh(vector signed long long __a, vector signed long long __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07,
+ 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_mergeh(vector signed long long __a, vector bool long long __b) {
+ return vec_perm(__a, (vector signed long long)__b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07,
+ 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_mergeh(vector bool long long __a, vector signed long long __b) {
+ return vec_perm((vector signed long long)__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07,
+ 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_mergeh(vector unsigned long long __a, vector unsigned long long __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07,
+ 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_mergeh(vector unsigned long long __a, vector bool long long __b) {
+ return vec_perm(__a, (vector unsigned long long)__b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07,
+ 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_mergeh(vector bool long long __a, vector unsigned long long __b) {
+ return vec_perm((vector unsigned long long)__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07,
+ 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector bool long long __ATTRS_o_ai
+vec_mergeh(vector bool long long __a, vector bool long long __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07,
+ 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector double __ATTRS_o_ai vec_mergeh(vector double __a,
+ vector double __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07,
+ 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+static vector double __ATTRS_o_ai vec_mergeh(vector double __a,
+ vector bool long long __b) {
+ return vec_perm(__a, (vector double)__b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07,
+ 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+static vector double __ATTRS_o_ai vec_mergeh(vector bool long long __a,
+ vector double __b) {
+ return vec_perm((vector double)__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07,
+ 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+#endif
+
+/* vec_vmrghb */
+
+#define __builtin_altivec_vmrghb vec_vmrghb
+
+static vector signed char __ATTRS_o_ai vec_vmrghb(vector signed char __a,
+ vector signed char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
+ 0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
+ 0x06, 0x16, 0x07, 0x17));
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vmrghb(vector unsigned char __a,
+ vector unsigned char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
+ 0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
+ 0x06, 0x16, 0x07, 0x17));
+}
+
+static vector bool char __ATTRS_o_ai vec_vmrghb(vector bool char __a,
+ vector bool char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
+ 0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
+ 0x06, 0x16, 0x07, 0x17));
+}
+
+/* vec_vmrghh */
+
+#define __builtin_altivec_vmrghh vec_vmrghh
+
+static vector short __ATTRS_o_ai vec_vmrghh(vector short __a,
+ vector short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
+ 0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
+ 0x06, 0x07, 0x16, 0x17));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vmrghh(vector unsigned short __a, vector unsigned short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
+ 0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
+ 0x06, 0x07, 0x16, 0x17));
+}
+
+static vector bool short __ATTRS_o_ai vec_vmrghh(vector bool short __a,
+ vector bool short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
+ 0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
+ 0x06, 0x07, 0x16, 0x17));
+}
+
+static vector pixel __ATTRS_o_ai vec_vmrghh(vector pixel __a,
+ vector pixel __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
+ 0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
+ 0x06, 0x07, 0x16, 0x17));
+}
+
+/* vec_vmrghw */
+
+#define __builtin_altivec_vmrghw vec_vmrghw
+
+static vector int __ATTRS_o_ai vec_vmrghw(vector int __a, vector int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vmrghw(vector unsigned int __a,
+ vector unsigned int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector bool int __ATTRS_o_ai vec_vmrghw(vector bool int __a,
+ vector bool int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector float __ATTRS_o_ai vec_vmrghw(vector float __a,
+ vector float __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
+/* vec_mergel */
+
+static vector signed char __ATTRS_o_ai vec_mergel(vector signed char __a,
+ vector signed char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
+ 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
+ 0x0E, 0x1E, 0x0F, 0x1F));
+}
+
+static vector unsigned char __ATTRS_o_ai vec_mergel(vector unsigned char __a,
+ vector unsigned char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
+ 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
+ 0x0E, 0x1E, 0x0F, 0x1F));
+}
+
+static vector bool char __ATTRS_o_ai vec_mergel(vector bool char __a,
+ vector bool char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
+ 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
+ 0x0E, 0x1E, 0x0F, 0x1F));
+}
+
+static vector short __ATTRS_o_ai vec_mergel(vector short __a,
+ vector short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
+ 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_mergel(vector unsigned short __a, vector unsigned short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
+ 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static vector bool short __ATTRS_o_ai vec_mergel(vector bool short __a,
+ vector bool short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
+ 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static vector pixel __ATTRS_o_ai vec_mergel(vector pixel __a,
+ vector pixel __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
+ 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static vector int __ATTRS_o_ai vec_mergel(vector int __a, vector int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static vector unsigned int __ATTRS_o_ai vec_mergel(vector unsigned int __a,
+ vector unsigned int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static vector bool int __ATTRS_o_ai vec_mergel(vector bool int __a,
+ vector bool int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static vector float __ATTRS_o_ai vec_mergel(vector float __a,
+ vector float __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai
+vec_mergel(vector signed long long __a, vector signed long long __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+static vector signed long long __ATTRS_o_ai
+vec_mergel(vector signed long long __a, vector bool long long __b) {
+ return vec_perm(__a, (vector signed long long)__b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+static vector signed long long __ATTRS_o_ai
+vec_mergel(vector bool long long __a, vector signed long long __b) {
+ return vec_perm((vector signed long long)__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+static vector unsigned long long __ATTRS_o_ai
+vec_mergel(vector unsigned long long __a, vector unsigned long long __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+static vector unsigned long long __ATTRS_o_ai
+vec_mergel(vector unsigned long long __a, vector bool long long __b) {
+ return vec_perm(__a, (vector unsigned long long)__b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+static vector unsigned long long __ATTRS_o_ai
+vec_mergel(vector bool long long __a, vector unsigned long long __b) {
+ return vec_perm((vector unsigned long long)__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+static vector bool long long __ATTRS_o_ai
+vec_mergel(vector bool long long __a, vector bool long long __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+static vector double __ATTRS_o_ai
+vec_mergel(vector double __a, vector double __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+static vector double __ATTRS_o_ai
+vec_mergel(vector double __a, vector bool long long __b) {
+ return vec_perm(__a, (vector double)__b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+static vector double __ATTRS_o_ai
+vec_mergel(vector bool long long __a, vector double __b) {
+ return vec_perm((vector double)__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+#endif
+
+/* vec_vmrglb */
+
+#define __builtin_altivec_vmrglb vec_vmrglb
+
+static vector signed char __ATTRS_o_ai vec_vmrglb(vector signed char __a,
+ vector signed char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
+ 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
+ 0x0E, 0x1E, 0x0F, 0x1F));
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vmrglb(vector unsigned char __a,
+ vector unsigned char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
+ 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
+ 0x0E, 0x1E, 0x0F, 0x1F));
+}
+
+static vector bool char __ATTRS_o_ai vec_vmrglb(vector bool char __a,
+ vector bool char __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
+ 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
+ 0x0E, 0x1E, 0x0F, 0x1F));
+}
+
+/* vec_vmrglh */
+
+#define __builtin_altivec_vmrglh vec_vmrglh
+
+static vector short __ATTRS_o_ai vec_vmrglh(vector short __a,
+ vector short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
+ 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vmrglh(vector unsigned short __a, vector unsigned short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
+ 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static vector bool short __ATTRS_o_ai vec_vmrglh(vector bool short __a,
+ vector bool short __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
+ 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static vector pixel __ATTRS_o_ai vec_vmrglh(vector pixel __a,
+ vector pixel __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
+ 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+/* vec_vmrglw */
+
+#define __builtin_altivec_vmrglw vec_vmrglw
+
+static vector int __ATTRS_o_ai vec_vmrglw(vector int __a, vector int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vmrglw(vector unsigned int __a,
+ vector unsigned int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static vector bool int __ATTRS_o_ai vec_vmrglw(vector bool int __a,
+ vector bool int __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static vector float __ATTRS_o_ai vec_vmrglw(vector float __a,
+ vector float __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
+ 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+
+#ifdef __POWER8_VECTOR__
+/* vec_mergee */
+
+static vector bool int __ATTRS_o_ai
+vec_mergee(vector bool int __a, vector bool int __b) {
+ return vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
+ 0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B));
+}
+
+static vector signed int __ATTRS_o_ai
+vec_mergee(vector signed int __a, vector signed int __b) {
+ return vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
+ 0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_mergee(vector unsigned int __a, vector unsigned int __b) {
+ return vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
+ 0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B));
+}
+
+/* vec_mergeo */
+
+static vector bool int __ATTRS_o_ai
+vec_mergeo(vector bool int __a, vector bool int __b) {
+ return vec_perm(__a, __b, (vector unsigned char)
+ (0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static vector signed int __ATTRS_o_ai
+vec_mergeo(vector signed int __a, vector signed int __b) {
+ return vec_perm(__a, __b, (vector unsigned char)
+ (0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_mergeo(vector unsigned int __a, vector unsigned int __b) {
+ return vec_perm(__a, __b, (vector unsigned char)
+ (0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+#endif
+
+/* vec_mfvscr */
+
+static vector unsigned short __attribute__((__always_inline__))
+vec_mfvscr(void) {
+ return __builtin_altivec_mfvscr();
+}
+
+/* vec_min */
+
+static vector signed char __ATTRS_o_ai vec_min(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vminsb(__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_min(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vminsb((vector signed char)__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_min(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vminsb(__a, (vector signed char)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_min(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vminub(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_min(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vminub((vector unsigned char)__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_min(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vminub(__a, (vector unsigned char)__b);
+}
+
+static vector short __ATTRS_o_ai vec_min(vector short __a, vector short __b) {
+ return __builtin_altivec_vminsh(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_min(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vminsh((vector short)__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_min(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vminsh(__a, (vector short)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_min(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vminuh(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_min(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vminuh((vector unsigned short)__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_min(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vminuh(__a, (vector unsigned short)__b);
+}
+
+static vector int __ATTRS_o_ai vec_min(vector int __a, vector int __b) {
+ return __builtin_altivec_vminsw(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_min(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vminsw((vector int)__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_min(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vminsw(__a, (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_min(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vminuw(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_min(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vminuw((vector unsigned int)__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_min(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vminuw(__a, (vector unsigned int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static vector signed long long __ATTRS_o_ai
+vec_min(vector signed long long __a, vector signed long long __b) {
+ return __builtin_altivec_vminsd(__a, __b);
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_min(vector bool long long __a, vector signed long long __b) {
+ return __builtin_altivec_vminsd((vector signed long long)__a, __b);
+}
+
+static vector signed long long __ATTRS_o_ai vec_min(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vminsd(__a, (vector signed long long)__b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_min(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vminud(__a, __b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_min(vector bool long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vminud((vector unsigned long long)__a, __b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_min(vector unsigned long long __a, vector bool long long __b) {
+ return __builtin_altivec_vminud(__a, (vector unsigned long long)__b);
+}
+#endif
+
+static vector float __ATTRS_o_ai vec_min(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvminsp(__a, __b);
+#else
+ return __builtin_altivec_vminfp(__a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_min(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvmindp(__a, __b);
+}
+#endif
+
+/* vec_vminsb */
+
+static vector signed char __ATTRS_o_ai vec_vminsb(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vminsb(__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vminsb(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vminsb((vector signed char)__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vminsb(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vminsb(__a, (vector signed char)__b);
+}
+
+/* vec_vminub */
+
+static vector unsigned char __ATTRS_o_ai vec_vminub(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vminub(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vminub(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vminub((vector unsigned char)__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vminub(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vminub(__a, (vector unsigned char)__b);
+}
+
+/* vec_vminsh */
+
+static vector short __ATTRS_o_ai vec_vminsh(vector short __a,
+ vector short __b) {
+ return __builtin_altivec_vminsh(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_vminsh(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vminsh((vector short)__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_vminsh(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vminsh(__a, (vector short)__b);
+}
+
+/* vec_vminuh */
+
+static vector unsigned short __ATTRS_o_ai
+vec_vminuh(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_altivec_vminuh(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vminuh(vector bool short __a, vector unsigned short __b) {
+ return __builtin_altivec_vminuh((vector unsigned short)__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vminuh(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vminuh(__a, (vector unsigned short)__b);
+}
+
+/* vec_vminsw */
+
+static vector int __ATTRS_o_ai vec_vminsw(vector int __a, vector int __b) {
+ return __builtin_altivec_vminsw(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_vminsw(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vminsw((vector int)__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_vminsw(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vminsw(__a, (vector int)__b);
+}
+
+/* vec_vminuw */
+
+static vector unsigned int __ATTRS_o_ai vec_vminuw(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vminuw(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vminuw(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vminuw((vector unsigned int)__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vminuw(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vminuw(__a, (vector unsigned int)__b);
+}
+
+/* vec_vminfp */
+
+static vector float __attribute__((__always_inline__))
+vec_vminfp(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvminsp(__a, __b);
+#else
+ return __builtin_altivec_vminfp(__a, __b);
+#endif
+}
+
+/* vec_mladd */
+
+#define __builtin_altivec_vmladduhm vec_mladd
+
+static vector short __ATTRS_o_ai vec_mladd(vector short __a, vector short __b,
+ vector short __c) {
+ return __a * __b + __c;
+}
+
+static vector short __ATTRS_o_ai vec_mladd(vector short __a,
+ vector unsigned short __b,
+ vector unsigned short __c) {
+ return __a * (vector short)__b + (vector short)__c;
+}
+
+static vector short __ATTRS_o_ai vec_mladd(vector unsigned short __a,
+ vector short __b, vector short __c) {
+ return (vector short)__a * __b + __c;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_mladd(vector unsigned short __a,
+ vector unsigned short __b,
+ vector unsigned short __c) {
+ return __a * __b + __c;
+}
+
+/* vec_vmladduhm */
+
+static vector short __ATTRS_o_ai vec_vmladduhm(vector short __a,
+ vector short __b,
+ vector short __c) {
+ return __a * __b + __c;
+}
+
+static vector short __ATTRS_o_ai vec_vmladduhm(vector short __a,
+ vector unsigned short __b,
+ vector unsigned short __c) {
+ return __a * (vector short)__b + (vector short)__c;
+}
+
+static vector short __ATTRS_o_ai vec_vmladduhm(vector unsigned short __a,
+ vector short __b,
+ vector short __c) {
+ return (vector short)__a * __b + __c;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vmladduhm(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __a * __b + __c;
+}
+
+/* vec_mradds */
+
+static vector short __attribute__((__always_inline__))
+vec_mradds(vector short __a, vector short __b, vector short __c) {
+ return __builtin_altivec_vmhraddshs(__a, __b, __c);
+}
+
+/* vec_vmhraddshs */
+
+static vector short __attribute__((__always_inline__))
+vec_vmhraddshs(vector short __a, vector short __b, vector short __c) {
+ return __builtin_altivec_vmhraddshs(__a, __b, __c);
+}
+
+/* vec_msum */
+
+static vector int __ATTRS_o_ai vec_msum(vector signed char __a,
+ vector unsigned char __b,
+ vector int __c) {
+ return __builtin_altivec_vmsummbm(__a, __b, __c);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_msum(vector unsigned char __a,
+ vector unsigned char __b,
+ vector unsigned int __c) {
+ return __builtin_altivec_vmsumubm(__a, __b, __c);
+}
+
+static vector int __ATTRS_o_ai vec_msum(vector short __a, vector short __b,
+ vector int __c) {
+ return __builtin_altivec_vmsumshm(__a, __b, __c);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_msum(vector unsigned short __a,
+ vector unsigned short __b,
+ vector unsigned int __c) {
+ return __builtin_altivec_vmsumuhm(__a, __b, __c);
+}
+
+/* vec_vmsummbm */
+
+static vector int __attribute__((__always_inline__))
+vec_vmsummbm(vector signed char __a, vector unsigned char __b, vector int __c) {
+ return __builtin_altivec_vmsummbm(__a, __b, __c);
+}
+
+/* vec_vmsumubm */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vmsumubm(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned int __c) {
+ return __builtin_altivec_vmsumubm(__a, __b, __c);
+}
+
+/* vec_vmsumshm */
+
+static vector int __attribute__((__always_inline__))
+vec_vmsumshm(vector short __a, vector short __b, vector int __c) {
+ return __builtin_altivec_vmsumshm(__a, __b, __c);
+}
+
+/* vec_vmsumuhm */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vmsumuhm(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned int __c) {
+ return __builtin_altivec_vmsumuhm(__a, __b, __c);
+}
+
+/* vec_msums */
+
+static vector int __ATTRS_o_ai vec_msums(vector short __a, vector short __b,
+ vector int __c) {
+ return __builtin_altivec_vmsumshs(__a, __b, __c);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_msums(vector unsigned short __a,
+ vector unsigned short __b,
+ vector unsigned int __c) {
+ return __builtin_altivec_vmsumuhs(__a, __b, __c);
+}
+
+/* vec_vmsumshs */
+
+static vector int __attribute__((__always_inline__))
+vec_vmsumshs(vector short __a, vector short __b, vector int __c) {
+ return __builtin_altivec_vmsumshs(__a, __b, __c);
+}
+
+/* vec_vmsumuhs */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vmsumuhs(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned int __c) {
+ return __builtin_altivec_vmsumuhs(__a, __b, __c);
+}
+
+/* vec_mtvscr */
+
+static void __ATTRS_o_ai vec_mtvscr(vector signed char __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static void __ATTRS_o_ai vec_mtvscr(vector unsigned char __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static void __ATTRS_o_ai vec_mtvscr(vector bool char __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static void __ATTRS_o_ai vec_mtvscr(vector short __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static void __ATTRS_o_ai vec_mtvscr(vector unsigned short __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static void __ATTRS_o_ai vec_mtvscr(vector bool short __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static void __ATTRS_o_ai vec_mtvscr(vector pixel __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static void __ATTRS_o_ai vec_mtvscr(vector int __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static void __ATTRS_o_ai vec_mtvscr(vector unsigned int __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static void __ATTRS_o_ai vec_mtvscr(vector bool int __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+static void __ATTRS_o_ai vec_mtvscr(vector float __a) {
+ __builtin_altivec_mtvscr((vector int)__a);
+}
+
+/* vec_mul */
+
+/* Integer vector multiplication will involve multiplication of the odd/even
+ elements separately, then truncating the results and moving to the
+ result vector.
+*/
+static vector signed char __ATTRS_o_ai vec_mul(vector signed char __a,
+ vector signed char __b) {
+ return __a * __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_mul(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a * __b;
+}
+
+static vector signed short __ATTRS_o_ai vec_mul(vector signed short __a,
+ vector signed short __b) {
+ return __a * __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_mul(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a * __b;
+}
+
+static vector signed int __ATTRS_o_ai vec_mul(vector signed int __a,
+ vector signed int __b) {
+ return __a * __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_mul(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a * __b;
+}
+
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai
+vec_mul(vector signed long long __a, vector signed long long __b) {
+ return __a * __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_mul(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a * __b;
+}
+#endif
+
+static vector float __ATTRS_o_ai vec_mul(vector float __a, vector float __b) {
+ return __a * __b;
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai
+vec_mul(vector double __a, vector double __b) {
+ return __a * __b;
+}
+#endif
+
+/* The vmulos* and vmules* instructions have a big endian bias, so
+ we must reverse the meaning of "even" and "odd" for little endian. */
+
+/* vec_mule */
+
+static vector short __ATTRS_o_ai vec_mule(vector signed char __a,
+ vector signed char __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulosb(__a, __b);
+#else
+ return __builtin_altivec_vmulesb(__a, __b);
+#endif
+}
+
+static vector unsigned short __ATTRS_o_ai vec_mule(vector unsigned char __a,
+ vector unsigned char __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuloub(__a, __b);
+#else
+ return __builtin_altivec_vmuleub(__a, __b);
+#endif
+}
+
+static vector int __ATTRS_o_ai vec_mule(vector short __a, vector short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulosh(__a, __b);
+#else
+ return __builtin_altivec_vmulesh(__a, __b);
+#endif
+}
+
+static vector unsigned int __ATTRS_o_ai vec_mule(vector unsigned short __a,
+ vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulouh(__a, __b);
+#else
+ return __builtin_altivec_vmuleuh(__a, __b);
+#endif
+}
+
+#ifdef __POWER8_VECTOR__
+static vector signed long long __ATTRS_o_ai vec_mule(vector signed int __a,
+ vector signed int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulosw(__a, __b);
+#else
+ return __builtin_altivec_vmulesw(__a, __b);
+#endif
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_mule(vector unsigned int __a, vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulouw(__a, __b);
+#else
+ return __builtin_altivec_vmuleuw(__a, __b);
+#endif
+}
+#endif
+
+/* vec_vmulesb */
+
+static vector short __attribute__((__always_inline__))
+vec_vmulesb(vector signed char __a, vector signed char __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulosb(__a, __b);
+#else
+ return __builtin_altivec_vmulesb(__a, __b);
+#endif
+}
+
+/* vec_vmuleub */
+
+static vector unsigned short __attribute__((__always_inline__))
+vec_vmuleub(vector unsigned char __a, vector unsigned char __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuloub(__a, __b);
+#else
+ return __builtin_altivec_vmuleub(__a, __b);
+#endif
+}
+
+/* vec_vmulesh */
+
+static vector int __attribute__((__always_inline__))
+vec_vmulesh(vector short __a, vector short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulosh(__a, __b);
+#else
+ return __builtin_altivec_vmulesh(__a, __b);
+#endif
+}
+
+/* vec_vmuleuh */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vmuleuh(vector unsigned short __a, vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulouh(__a, __b);
+#else
+ return __builtin_altivec_vmuleuh(__a, __b);
+#endif
+}
+
+/* vec_mulo */
+
+static vector short __ATTRS_o_ai vec_mulo(vector signed char __a,
+ vector signed char __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulesb(__a, __b);
+#else
+ return __builtin_altivec_vmulosb(__a, __b);
+#endif
+}
+
+static vector unsigned short __ATTRS_o_ai vec_mulo(vector unsigned char __a,
+ vector unsigned char __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuleub(__a, __b);
+#else
+ return __builtin_altivec_vmuloub(__a, __b);
+#endif
+}
+
+static vector int __ATTRS_o_ai vec_mulo(vector short __a, vector short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulesh(__a, __b);
+#else
+ return __builtin_altivec_vmulosh(__a, __b);
+#endif
+}
+
+static vector unsigned int __ATTRS_o_ai vec_mulo(vector unsigned short __a,
+ vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuleuh(__a, __b);
+#else
+ return __builtin_altivec_vmulouh(__a, __b);
+#endif
+}
+
+#ifdef __POWER8_VECTOR__
+static vector signed long long __ATTRS_o_ai vec_mulo(vector signed int __a,
+ vector signed int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulesw(__a, __b);
+#else
+ return __builtin_altivec_vmulosw(__a, __b);
+#endif
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_mulo(vector unsigned int __a, vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuleuw(__a, __b);
+#else
+ return __builtin_altivec_vmulouw(__a, __b);
+#endif
+}
+#endif
+
+/* vec_vmulosb */
+
+static vector short __attribute__((__always_inline__))
+vec_vmulosb(vector signed char __a, vector signed char __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulesb(__a, __b);
+#else
+ return __builtin_altivec_vmulosb(__a, __b);
+#endif
+}
+
+/* vec_vmuloub */
+
+static vector unsigned short __attribute__((__always_inline__))
+vec_vmuloub(vector unsigned char __a, vector unsigned char __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuleub(__a, __b);
+#else
+ return __builtin_altivec_vmuloub(__a, __b);
+#endif
+}
+
+/* vec_vmulosh */
+
+static vector int __attribute__((__always_inline__))
+vec_vmulosh(vector short __a, vector short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulesh(__a, __b);
+#else
+ return __builtin_altivec_vmulosh(__a, __b);
+#endif
+}
+
+/* vec_vmulouh */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vmulouh(vector unsigned short __a, vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuleuh(__a, __b);
+#else
+ return __builtin_altivec_vmulouh(__a, __b);
+#endif
+}
+
+/* vec_nand */
+
+#ifdef __POWER8_VECTOR__
+static vector signed char __ATTRS_o_ai vec_nand(vector signed char __a,
+ vector signed char __b) {
+ return ~(__a & __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_nand(vector signed char __a,
+ vector bool char __b) {
+ return ~(__a & __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_nand(vector bool char __a,
+ vector signed char __b) {
+ return ~(__a & __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_nand(vector unsigned char __a,
+ vector unsigned char __b) {
+ return ~(__a & __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_nand(vector unsigned char __a,
+ vector bool char __b) {
+ return ~(__a & __b);
+
+}
+
+static vector unsigned char __ATTRS_o_ai vec_nand(vector bool char __a,
+ vector unsigned char __b) {
+ return ~(__a & __b);
+}
+
+static vector bool char __ATTRS_o_ai vec_nand(vector bool char __a,
+ vector bool char __b) {
+ return ~(__a & __b);
+}
+
+static vector signed short __ATTRS_o_ai vec_nand(vector signed short __a,
+ vector signed short __b) {
+ return ~(__a & __b);
+}
+
+static vector signed short __ATTRS_o_ai vec_nand(vector signed short __a,
+ vector bool short __b) {
+ return ~(__a & __b);
+}
+
+static vector signed short __ATTRS_o_ai vec_nand(vector bool short __a,
+ vector signed short __b) {
+ return ~(__a & __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_nand(vector unsigned short __a,
+ vector unsigned short __b) {
+ return ~(__a & __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_nand(vector unsigned short __a,
+ vector bool short __b) {
+ return ~(__a & __b);
+
+}
+
+static vector bool short __ATTRS_o_ai vec_nand(vector bool short __a,
+ vector bool short __b) {
+ return ~(__a & __b);
+
+}
+
+static vector signed int __ATTRS_o_ai vec_nand(vector signed int __a,
+ vector signed int __b) {
+ return ~(__a & __b);
+}
+
+static vector signed int __ATTRS_o_ai vec_nand(vector signed int __a,
+ vector bool int __b) {
+ return ~(__a & __b);
+}
+
+static vector signed int __ATTRS_o_ai vec_nand(vector bool int __a,
+ vector signed int __b) {
+ return ~(__a & __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_nand(vector unsigned int __a,
+ vector unsigned int __b) {
+ return ~(__a & __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_nand(vector unsigned int __a,
+ vector bool int __b) {
+ return ~(__a & __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_nand(vector bool int __a,
+ vector unsigned int __b) {
+ return ~(__a & __b);
+}
+
+static vector bool int __ATTRS_o_ai vec_nand(vector bool int __a,
+ vector bool int __b) {
+ return ~(__a & __b);
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_nand(vector signed long long __a, vector signed long long __b) {
+ return ~(__a & __b);
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_nand(vector signed long long __a, vector bool long long __b) {
+ return ~(__a & __b);
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_nand(vector bool long long __a, vector signed long long __b) {
+ return ~(__a & __b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_nand(vector unsigned long long __a, vector unsigned long long __b) {
+ return ~(__a & __b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_nand(vector unsigned long long __a, vector bool long long __b) {
+ return ~(__a & __b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_nand(vector bool long long __a, vector unsigned long long __b) {
+ return ~(__a & __b);
+}
+
+static vector bool long long __ATTRS_o_ai
+vec_nand(vector bool long long __a, vector bool long long __b) {
+ return ~(__a & __b);
+}
+
+#endif
+
+/* vec_nmadd */
+
+#ifdef __VSX__
+static vector float __ATTRS_o_ai
+vec_nmadd(vector float __a, vector float __b, vector float __c) {
+ return __builtin_vsx_xvnmaddasp(__a, __b, __c);
+}
+
+static vector double __ATTRS_o_ai
+vec_nmadd(vector double __a, vector double __b, vector double __c) {
+ return __builtin_vsx_xvnmaddadp(__a, __b, __c);
+}
+#endif
+
+/* vec_nmsub */
+
+static vector float __ATTRS_o_ai
+vec_nmsub(vector float __a, vector float __b, vector float __c) {
+#ifdef __VSX__
+ return __builtin_vsx_xvnmsubasp(__a, __b, __c);
+#else
+ return __builtin_altivec_vnmsubfp(__a, __b, __c);
+#endif
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai
+vec_nmsub(vector double __a, vector double __b, vector double __c) {
+ return __builtin_vsx_xvnmsubadp(__a, __b, __c);
+}
+#endif
+
+/* vec_vnmsubfp */
+
+static vector float __attribute__((__always_inline__))
+vec_vnmsubfp(vector float __a, vector float __b, vector float __c) {
+ return __builtin_altivec_vnmsubfp(__a, __b, __c);
+}
+
+/* vec_nor */
+
+#define __builtin_altivec_vnor vec_nor
+
+static vector signed char __ATTRS_o_ai vec_nor(vector signed char __a,
+ vector signed char __b) {
+ return ~(__a | __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_nor(vector unsigned char __a,
+ vector unsigned char __b) {
+ return ~(__a | __b);
+}
+
+static vector bool char __ATTRS_o_ai vec_nor(vector bool char __a,
+ vector bool char __b) {
+ return ~(__a | __b);
+}
+
+static vector short __ATTRS_o_ai vec_nor(vector short __a, vector short __b) {
+ return ~(__a | __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_nor(vector unsigned short __a,
+ vector unsigned short __b) {
+ return ~(__a | __b);
+}
+
+static vector bool short __ATTRS_o_ai vec_nor(vector bool short __a,
+ vector bool short __b) {
+ return ~(__a | __b);
+}
+
+static vector int __ATTRS_o_ai vec_nor(vector int __a, vector int __b) {
+ return ~(__a | __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_nor(vector unsigned int __a,
+ vector unsigned int __b) {
+ return ~(__a | __b);
+}
+
+static vector bool int __ATTRS_o_ai vec_nor(vector bool int __a,
+ vector bool int __b) {
+ return ~(__a | __b);
+}
+
+static vector float __ATTRS_o_ai vec_nor(vector float __a, vector float __b) {
+ vector unsigned int __res =
+ ~((vector unsigned int)__a | (vector unsigned int)__b);
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai
+vec_nor(vector double __a, vector double __b) {
+ vector unsigned long long __res =
+ ~((vector unsigned long long)__a | (vector unsigned long long)__b);
+ return (vector double)__res;
+}
+#endif
+
+/* vec_vnor */
+
+static vector signed char __ATTRS_o_ai vec_vnor(vector signed char __a,
+ vector signed char __b) {
+ return ~(__a | __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vnor(vector unsigned char __a,
+ vector unsigned char __b) {
+ return ~(__a | __b);
+}
+
+static vector bool char __ATTRS_o_ai vec_vnor(vector bool char __a,
+ vector bool char __b) {
+ return ~(__a | __b);
+}
+
+static vector short __ATTRS_o_ai vec_vnor(vector short __a, vector short __b) {
+ return ~(__a | __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vnor(vector unsigned short __a,
+ vector unsigned short __b) {
+ return ~(__a | __b);
+}
+
+static vector bool short __ATTRS_o_ai vec_vnor(vector bool short __a,
+ vector bool short __b) {
+ return ~(__a | __b);
+}
+
+static vector int __ATTRS_o_ai vec_vnor(vector int __a, vector int __b) {
+ return ~(__a | __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vnor(vector unsigned int __a,
+ vector unsigned int __b) {
+ return ~(__a | __b);
+}
+
+static vector bool int __ATTRS_o_ai vec_vnor(vector bool int __a,
+ vector bool int __b) {
+ return ~(__a | __b);
+}
+
+static vector float __ATTRS_o_ai vec_vnor(vector float __a, vector float __b) {
+ vector unsigned int __res =
+ ~((vector unsigned int)__a | (vector unsigned int)__b);
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai
+vec_nor(vector signed long long __a, vector signed long long __b) {
+ return ~(__a | __b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_nor(vector unsigned long long __a, vector unsigned long long __b) {
+ return ~(__a | __b);
+}
+
+static vector bool long long __ATTRS_o_ai vec_nor(vector bool long long __a,
+ vector bool long long __b) {
+ return ~(__a | __b);
+}
+#endif
+
+/* vec_or */
+
+#define __builtin_altivec_vor vec_or
+
+static vector signed char __ATTRS_o_ai vec_or(vector signed char __a,
+ vector signed char __b) {
+ return __a | __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_or(vector bool char __a,
+ vector signed char __b) {
+ return (vector signed char)__a | __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_or(vector signed char __a,
+ vector bool char __b) {
+ return __a | (vector signed char)__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_or(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a | __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_or(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__a | __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_or(vector unsigned char __a,
+ vector bool char __b) {
+ return __a | (vector unsigned char)__b;
+}
+
+static vector bool char __ATTRS_o_ai vec_or(vector bool char __a,
+ vector bool char __b) {
+ return __a | __b;
+}
+
+static vector short __ATTRS_o_ai vec_or(vector short __a, vector short __b) {
+ return __a | __b;
+}
+
+static vector short __ATTRS_o_ai vec_or(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a | __b;
+}
+
+static vector short __ATTRS_o_ai vec_or(vector short __a,
+ vector bool short __b) {
+ return __a | (vector short)__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_or(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a | __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_or(vector bool short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__a | __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_or(vector unsigned short __a,
+ vector bool short __b) {
+ return __a | (vector unsigned short)__b;
+}
+
+static vector bool short __ATTRS_o_ai vec_or(vector bool short __a,
+ vector bool short __b) {
+ return __a | __b;
+}
+
+static vector int __ATTRS_o_ai vec_or(vector int __a, vector int __b) {
+ return __a | __b;
+}
+
+static vector int __ATTRS_o_ai vec_or(vector bool int __a, vector int __b) {
+ return (vector int)__a | __b;
+}
+
+static vector int __ATTRS_o_ai vec_or(vector int __a, vector bool int __b) {
+ return __a | (vector int)__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_or(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a | __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_or(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__a | __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_or(vector unsigned int __a,
+ vector bool int __b) {
+ return __a | (vector unsigned int)__b;
+}
+
+static vector bool int __ATTRS_o_ai vec_or(vector bool int __a,
+ vector bool int __b) {
+ return __a | __b;
+}
+
+static vector float __ATTRS_o_ai vec_or(vector float __a, vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a | (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_or(vector bool int __a, vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a | (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_or(vector float __a, vector bool int __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a | (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_or(vector bool long long __a,
+ vector double __b) {
+ return (vector unsigned long long)__a | (vector unsigned long long)__b;
+}
+
+static vector double __ATTRS_o_ai vec_or(vector double __a,
+ vector bool long long __b) {
+ return (vector unsigned long long)__a | (vector unsigned long long)__b;
+}
+
+static vector double __ATTRS_o_ai vec_or(vector double __a, vector double __b) {
+ vector unsigned long long __res =
+ (vector unsigned long long)__a | (vector unsigned long long)__b;
+ return (vector double)__res;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_or(vector signed long long __a, vector signed long long __b) {
+ return __a | __b;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_or(vector bool long long __a, vector signed long long __b) {
+ return (vector signed long long)__a | __b;
+}
+
+static vector signed long long __ATTRS_o_ai vec_or(vector signed long long __a,
+ vector bool long long __b) {
+ return __a | (vector signed long long)__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_or(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a | __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_or(vector bool long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__a | __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_or(vector unsigned long long __a, vector bool long long __b) {
+ return __a | (vector unsigned long long)__b;
+}
+
+static vector bool long long __ATTRS_o_ai vec_or(vector bool long long __a,
+ vector bool long long __b) {
+ return __a | __b;
+}
+#endif
+
+#ifdef __POWER8_VECTOR__
+static vector signed char __ATTRS_o_ai vec_orc(vector signed char __a,
+ vector signed char __b) {
+ return __a | ~__b;
+}
+
+static vector signed char __ATTRS_o_ai vec_orc(vector signed char __a,
+ vector bool char __b) {
+ return __a | ~__b;
+}
+
+static vector signed char __ATTRS_o_ai vec_orc(vector bool char __a,
+ vector signed char __b) {
+ return __a | ~__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_orc(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a | ~__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_orc(vector unsigned char __a,
+ vector bool char __b) {
+ return __a | ~__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_orc(vector bool char __a,
+ vector unsigned char __b) {
+ return __a | ~__b;
+}
+
+static vector bool char __ATTRS_o_ai vec_orc(vector bool char __a,
+ vector bool char __b) {
+ return __a | ~__b;
+}
+
+static vector signed short __ATTRS_o_ai vec_orc(vector signed short __a,
+ vector signed short __b) {
+ return __a | ~__b;
+}
+
+static vector signed short __ATTRS_o_ai vec_orc(vector signed short __a,
+ vector bool short __b) {
+ return __a | ~__b;
+}
+
+static vector signed short __ATTRS_o_ai vec_orc(vector bool short __a,
+ vector signed short __b) {
+ return __a | ~__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_orc(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a | ~__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_orc(vector unsigned short __a,
+ vector bool short __b) {
+ return __a | ~__b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_orc(vector bool short __a, vector unsigned short __b) {
+ return __a | ~__b;
+}
+
+static vector bool short __ATTRS_o_ai vec_orc(vector bool short __a,
+ vector bool short __b) {
+ return __a | ~__b;
+}
+
+static vector signed int __ATTRS_o_ai vec_orc(vector signed int __a,
+ vector signed int __b) {
+ return __a | ~__b;
+}
+
+static vector signed int __ATTRS_o_ai vec_orc(vector signed int __a,
+ vector bool int __b) {
+ return __a | ~__b;
+}
+
+static vector signed int __ATTRS_o_ai vec_orc(vector bool int __a,
+ vector signed int __b) {
+ return __a | ~__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_orc(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a | ~__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_orc(vector unsigned int __a,
+ vector bool int __b) {
+ return __a | ~__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_orc(vector bool int __a,
+ vector unsigned int __b) {
+ return __a | ~__b;
+}
+
+static vector bool int __ATTRS_o_ai vec_orc(vector bool int __a,
+ vector bool int __b) {
+ return __a | ~__b;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_orc(vector signed long long __a, vector signed long long __b) {
+ return __a | ~__b;
+}
+
+static vector signed long long __ATTRS_o_ai vec_orc(vector signed long long __a,
+ vector bool long long __b) {
+ return __a | ~__b;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_orc(vector bool long long __a, vector signed long long __b) {
+ return __a | ~__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_orc(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a | ~__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_orc(vector unsigned long long __a, vector bool long long __b) {
+ return __a | ~__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_orc(vector bool long long __a, vector unsigned long long __b) {
+ return __a | ~__b;
+}
+
+static vector bool long long __ATTRS_o_ai
+vec_orc(vector bool long long __a, vector bool long long __b) {
+ return __a | ~__b;
+}
+#endif
+
+/* vec_vor */
+
+static vector signed char __ATTRS_o_ai vec_vor(vector signed char __a,
+ vector signed char __b) {
+ return __a | __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_vor(vector bool char __a,
+ vector signed char __b) {
+ return (vector signed char)__a | __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_vor(vector signed char __a,
+ vector bool char __b) {
+ return __a | (vector signed char)__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vor(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a | __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vor(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__a | __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vor(vector unsigned char __a,
+ vector bool char __b) {
+ return __a | (vector unsigned char)__b;
+}
+
+static vector bool char __ATTRS_o_ai vec_vor(vector bool char __a,
+ vector bool char __b) {
+ return __a | __b;
+}
+
+static vector short __ATTRS_o_ai vec_vor(vector short __a, vector short __b) {
+ return __a | __b;
+}
+
+static vector short __ATTRS_o_ai vec_vor(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a | __b;
+}
+
+static vector short __ATTRS_o_ai vec_vor(vector short __a,
+ vector bool short __b) {
+ return __a | (vector short)__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vor(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a | __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vor(vector bool short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__a | __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vor(vector unsigned short __a,
+ vector bool short __b) {
+ return __a | (vector unsigned short)__b;
+}
+
+static vector bool short __ATTRS_o_ai vec_vor(vector bool short __a,
+ vector bool short __b) {
+ return __a | __b;
+}
+
+static vector int __ATTRS_o_ai vec_vor(vector int __a, vector int __b) {
+ return __a | __b;
+}
+
+static vector int __ATTRS_o_ai vec_vor(vector bool int __a, vector int __b) {
+ return (vector int)__a | __b;
+}
+
+static vector int __ATTRS_o_ai vec_vor(vector int __a, vector bool int __b) {
+ return __a | (vector int)__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vor(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a | __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vor(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__a | __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vor(vector unsigned int __a,
+ vector bool int __b) {
+ return __a | (vector unsigned int)__b;
+}
+
+static vector bool int __ATTRS_o_ai vec_vor(vector bool int __a,
+ vector bool int __b) {
+ return __a | __b;
+}
+
+static vector float __ATTRS_o_ai vec_vor(vector float __a, vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a | (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_vor(vector bool int __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a | (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_vor(vector float __a,
+ vector bool int __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a | (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai
+vec_vor(vector signed long long __a, vector signed long long __b) {
+ return __a | __b;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_vor(vector bool long long __a, vector signed long long __b) {
+ return (vector signed long long)__a | __b;
+}
+
+static vector signed long long __ATTRS_o_ai vec_vor(vector signed long long __a,
+ vector bool long long __b) {
+ return __a | (vector signed long long)__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vor(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a | __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vor(vector bool long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__a | __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vor(vector unsigned long long __a, vector bool long long __b) {
+ return __a | (vector unsigned long long)__b;
+}
+
+static vector bool long long __ATTRS_o_ai vec_vor(vector bool long long __a,
+ vector bool long long __b) {
+ return __a | __b;
+}
+#endif
+
+/* vec_pack */
+
+/* The various vector pack instructions have a big-endian bias, so for
+ little endian we must handle reversed element numbering. */
+
+static vector signed char __ATTRS_o_ai vec_pack(vector signed short __a,
+ vector signed short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector signed char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
+ return (vector signed char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
+ 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
+}
+
+static vector unsigned char __ATTRS_o_ai vec_pack(vector unsigned short __a,
+ vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
+ return (vector unsigned char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
+ 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
+}
+
+static vector bool char __ATTRS_o_ai vec_pack(vector bool short __a,
+ vector bool short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
+ return (vector bool char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
+ 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
+}
+
+static vector short __ATTRS_o_ai vec_pack(vector int __a, vector int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
+ return (vector short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
+ 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
+}
+
+static vector unsigned short __ATTRS_o_ai vec_pack(vector unsigned int __a,
+ vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
+ return (vector unsigned short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
+ 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
+}
+
+static vector bool short __ATTRS_o_ai vec_pack(vector bool int __a,
+ vector bool int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
+ return (vector bool short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
+ 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
+}
+
+#ifdef __VSX__
+static vector signed int __ATTRS_o_ai vec_pack(vector signed long long __a,
+ vector signed long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector signed int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
+ 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
+#else
+ return (vector signed int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
+#endif
+}
+static vector unsigned int __ATTRS_o_ai
+vec_pack(vector unsigned long long __a, vector unsigned long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
+ 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
+#else
+ return (vector unsigned int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
+#endif
+}
+
+static vector bool int __ATTRS_o_ai vec_pack(vector bool long long __a,
+ vector bool long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
+ 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
+#else
+ return (vector bool int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
+#endif
+}
+
+#endif
+
+/* vec_vpkuhum */
+
+#define __builtin_altivec_vpkuhum vec_vpkuhum
+
+static vector signed char __ATTRS_o_ai vec_vpkuhum(vector signed short __a,
+ vector signed short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector signed char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
+ return (vector signed char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
+ 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vpkuhum(vector unsigned short __a, vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
+ return (vector unsigned char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
+ 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
+}
+
+static vector bool char __ATTRS_o_ai vec_vpkuhum(vector bool short __a,
+ vector bool short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
+ return (vector bool char)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
+ 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
+}
+
+/* vec_vpkuwum */
+
+#define __builtin_altivec_vpkuwum vec_vpkuwum
+
+static vector short __ATTRS_o_ai vec_vpkuwum(vector int __a, vector int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
+ return (vector short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
+ 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vpkuwum(vector unsigned int __a,
+ vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
+ return (vector unsigned short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
+ 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
+}
+
+static vector bool short __ATTRS_o_ai vec_vpkuwum(vector bool int __a,
+ vector bool int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
+ return (vector bool short)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
+ 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
+}
+
+/* vec_vpkudum */
+
+#ifdef __POWER8_VECTOR__
+#define __builtin_altivec_vpkudum vec_vpkudum
+
+static vector int __ATTRS_o_ai vec_vpkudum(vector long long __a,
+ vector long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
+ 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
+#else
+ return (vector int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
+#endif
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vpkudum(vector unsigned long long __a, vector unsigned long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
+ 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
+#else
+ return (vector unsigned int)vec_perm(
+ __a, __b,
+ (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
+#endif
+}
+
+static vector bool int __ATTRS_o_ai vec_vpkudum(vector bool long long __a,
+ vector bool long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool int)vec_perm(
+ (vector long long)__a, (vector long long)__b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
+ 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
+#else
+ return (vector bool int)vec_perm(
+ (vector long long)__a, (vector long long)__b,
+ (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
+#endif
+}
+#endif
+
+/* vec_packpx */
+
+static vector pixel __attribute__((__always_inline__))
+vec_packpx(vector unsigned int __a, vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector pixel)__builtin_altivec_vpkpx(__b, __a);
+#else
+ return (vector pixel)__builtin_altivec_vpkpx(__a, __b);
+#endif
+}
+
+/* vec_vpkpx */
+
+static vector pixel __attribute__((__always_inline__))
+vec_vpkpx(vector unsigned int __a, vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector pixel)__builtin_altivec_vpkpx(__b, __a);
+#else
+ return (vector pixel)__builtin_altivec_vpkpx(__a, __b);
+#endif
+}
+
+/* vec_packs */
+
+static vector signed char __ATTRS_o_ai vec_packs(vector short __a,
+ vector short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkshss(__b, __a);
+#else
+ return __builtin_altivec_vpkshss(__a, __b);
+#endif
+}
+
+static vector unsigned char __ATTRS_o_ai vec_packs(vector unsigned short __a,
+ vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuhus(__b, __a);
+#else
+ return __builtin_altivec_vpkuhus(__a, __b);
+#endif
+}
+
+static vector signed short __ATTRS_o_ai vec_packs(vector int __a,
+ vector int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkswss(__b, __a);
+#else
+ return __builtin_altivec_vpkswss(__a, __b);
+#endif
+}
+
+static vector unsigned short __ATTRS_o_ai vec_packs(vector unsigned int __a,
+ vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuwus(__b, __a);
+#else
+ return __builtin_altivec_vpkuwus(__a, __b);
+#endif
+}
+
+#ifdef __POWER8_VECTOR__
+static vector int __ATTRS_o_ai vec_packs(vector long long __a,
+ vector long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpksdss(__b, __a);
+#else
+ return __builtin_altivec_vpksdss(__a, __b);
+#endif
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_packs(vector unsigned long long __a, vector unsigned long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkudus(__b, __a);
+#else
+ return __builtin_altivec_vpkudus(__a, __b);
+#endif
+}
+#endif
+
+/* vec_vpkshss */
+
+static vector signed char __attribute__((__always_inline__))
+vec_vpkshss(vector short __a, vector short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkshss(__b, __a);
+#else
+ return __builtin_altivec_vpkshss(__a, __b);
+#endif
+}
+
+/* vec_vpksdss */
+
+#ifdef __POWER8_VECTOR__
+static vector int __ATTRS_o_ai vec_vpksdss(vector long long __a,
+ vector long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpksdss(__b, __a);
+#else
+ return __builtin_altivec_vpksdss(__a, __b);
+#endif
+}
+#endif
+
+/* vec_vpkuhus */
+
+static vector unsigned char __attribute__((__always_inline__))
+vec_vpkuhus(vector unsigned short __a, vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuhus(__b, __a);
+#else
+ return __builtin_altivec_vpkuhus(__a, __b);
+#endif
+}
+
+/* vec_vpkudus */
+
+#ifdef __POWER8_VECTOR__
+static vector unsigned int __attribute__((__always_inline__))
+vec_vpkudus(vector unsigned long long __a, vector unsigned long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkudus(__b, __a);
+#else
+ return __builtin_altivec_vpkudus(__a, __b);
+#endif
+}
+#endif
+
+/* vec_vpkswss */
+
+static vector signed short __attribute__((__always_inline__))
+vec_vpkswss(vector int __a, vector int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkswss(__b, __a);
+#else
+ return __builtin_altivec_vpkswss(__a, __b);
+#endif
+}
+
+/* vec_vpkuwus */
+
+static vector unsigned short __attribute__((__always_inline__))
+vec_vpkuwus(vector unsigned int __a, vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuwus(__b, __a);
+#else
+ return __builtin_altivec_vpkuwus(__a, __b);
+#endif
+}
+
+/* vec_packsu */
+
+static vector unsigned char __ATTRS_o_ai vec_packsu(vector short __a,
+ vector short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkshus(__b, __a);
+#else
+ return __builtin_altivec_vpkshus(__a, __b);
+#endif
+}
+
+static vector unsigned char __ATTRS_o_ai vec_packsu(vector unsigned short __a,
+ vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuhus(__b, __a);
+#else
+ return __builtin_altivec_vpkuhus(__a, __b);
+#endif
+}
+
+static vector unsigned short __ATTRS_o_ai vec_packsu(vector int __a,
+ vector int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkswus(__b, __a);
+#else
+ return __builtin_altivec_vpkswus(__a, __b);
+#endif
+}
+
+static vector unsigned short __ATTRS_o_ai vec_packsu(vector unsigned int __a,
+ vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuwus(__b, __a);
+#else
+ return __builtin_altivec_vpkuwus(__a, __b);
+#endif
+}
+
+#ifdef __POWER8_VECTOR__
+static vector unsigned int __ATTRS_o_ai vec_packsu(vector long long __a,
+ vector long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpksdus(__b, __a);
+#else
+ return __builtin_altivec_vpksdus(__a, __b);
+#endif
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_packsu(vector unsigned long long __a, vector unsigned long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkudus(__b, __a);
+#else
+ return __builtin_altivec_vpkudus(__a, __b);
+#endif
+}
+#endif
+
+/* vec_vpkshus */
+
+static vector unsigned char __ATTRS_o_ai vec_vpkshus(vector short __a,
+ vector short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkshus(__b, __a);
+#else
+ return __builtin_altivec_vpkshus(__a, __b);
+#endif
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vpkshus(vector unsigned short __a, vector unsigned short __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuhus(__b, __a);
+#else
+ return __builtin_altivec_vpkuhus(__a, __b);
+#endif
+}
+
+/* vec_vpkswus */
+
+static vector unsigned short __ATTRS_o_ai vec_vpkswus(vector int __a,
+ vector int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkswus(__b, __a);
+#else
+ return __builtin_altivec_vpkswus(__a, __b);
+#endif
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vpkswus(vector unsigned int __a,
+ vector unsigned int __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuwus(__b, __a);
+#else
+ return __builtin_altivec_vpkuwus(__a, __b);
+#endif
+}
+
+/* vec_vpksdus */
+
+#ifdef __POWER8_VECTOR__
+static vector unsigned int __ATTRS_o_ai vec_vpksdus(vector long long __a,
+ vector long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpksdus(__b, __a);
+#else
+ return __builtin_altivec_vpksdus(__a, __b);
+#endif
+}
+#endif
+
+/* vec_perm */
+
+// The vperm instruction is defined architecturally with a big-endian bias.
+// For little endian, we swap the input operands and invert the permute
+// control vector. Only the rightmost 5 bits matter, so we could use
+// a vector of all 31s instead of all 255s to perform the inversion.
+// However, when the PCV is not a constant, using 255 has an advantage
+// in that the vec_xor can be recognized as a vec_nor (and for P8 and
+// later, possibly a vec_nand).
+
+static vector signed char __ATTRS_o_ai vec_perm(vector signed char __a,
+ vector signed char __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector signed char)__builtin_altivec_vperm_4si((vector int)__b,
+ (vector int)__a, __d);
+#else
+ return (vector signed char)__builtin_altivec_vperm_4si((vector int)__a,
+ (vector int)__b, __c);
+#endif
+}
+
+static vector unsigned char __ATTRS_o_ai vec_perm(vector unsigned char __a,
+ vector unsigned char __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector unsigned char)__builtin_altivec_vperm_4si(
+ (vector int)__b, (vector int)__a, __d);
+#else
+ return (vector unsigned char)__builtin_altivec_vperm_4si(
+ (vector int)__a, (vector int)__b, __c);
+#endif
+}
+
+static vector bool char __ATTRS_o_ai vec_perm(vector bool char __a,
+ vector bool char __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector bool char)__builtin_altivec_vperm_4si((vector int)__b,
+ (vector int)__a, __d);
+#else
+ return (vector bool char)__builtin_altivec_vperm_4si((vector int)__a,
+ (vector int)__b, __c);
+#endif
+}
+
+static vector short __ATTRS_o_ai vec_perm(vector signed short __a,
+ vector signed short __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector signed short)__builtin_altivec_vperm_4si((vector int)__b,
+ (vector int)__a, __d);
+#else
+ return (vector signed short)__builtin_altivec_vperm_4si((vector int)__a,
+ (vector int)__b, __c);
+#endif
+}
+
+static vector unsigned short __ATTRS_o_ai vec_perm(vector unsigned short __a,
+ vector unsigned short __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector unsigned short)__builtin_altivec_vperm_4si(
+ (vector int)__b, (vector int)__a, __d);
+#else
+ return (vector unsigned short)__builtin_altivec_vperm_4si(
+ (vector int)__a, (vector int)__b, __c);
+#endif
+}
+
+static vector bool short __ATTRS_o_ai vec_perm(vector bool short __a,
+ vector bool short __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector bool short)__builtin_altivec_vperm_4si((vector int)__b,
+ (vector int)__a, __d);
+#else
+ return (vector bool short)__builtin_altivec_vperm_4si((vector int)__a,
+ (vector int)__b, __c);
+#endif
+}
+
+static vector pixel __ATTRS_o_ai vec_perm(vector pixel __a, vector pixel __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector pixel)__builtin_altivec_vperm_4si((vector int)__b,
+ (vector int)__a, __d);
+#else
+ return (vector pixel)__builtin_altivec_vperm_4si((vector int)__a,
+ (vector int)__b, __c);
+#endif
+}
+
+static vector int __ATTRS_o_ai vec_perm(vector signed int __a,
+ vector signed int __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector signed int)__builtin_altivec_vperm_4si(__b, __a, __d);
+#else
+ return (vector signed int)__builtin_altivec_vperm_4si(__a, __b, __c);
+#endif
+}
+
+static vector unsigned int __ATTRS_o_ai vec_perm(vector unsigned int __a,
+ vector unsigned int __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector unsigned int)__builtin_altivec_vperm_4si((vector int)__b,
+ (vector int)__a, __d);
+#else
+ return (vector unsigned int)__builtin_altivec_vperm_4si((vector int)__a,
+ (vector int)__b, __c);
+#endif
+}
+
+static vector bool int __ATTRS_o_ai vec_perm(vector bool int __a,
+ vector bool int __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector bool int)__builtin_altivec_vperm_4si((vector int)__b,
+ (vector int)__a, __d);
+#else
+ return (vector bool int)__builtin_altivec_vperm_4si((vector int)__a,
+ (vector int)__b, __c);
+#endif
+}
+
+static vector float __ATTRS_o_ai vec_perm(vector float __a, vector float __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector float)__builtin_altivec_vperm_4si((vector int)__b,
+ (vector int)__a, __d);
+#else
+ return (vector float)__builtin_altivec_vperm_4si((vector int)__a,
+ (vector int)__b, __c);
+#endif
+}
+
+#ifdef __VSX__
+static vector long long __ATTRS_o_ai vec_perm(vector signed long long __a,
+ vector signed long long __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector signed long long)__builtin_altivec_vperm_4si(
+ (vector int)__b, (vector int)__a, __d);
+#else
+ return (vector signed long long)__builtin_altivec_vperm_4si(
+ (vector int)__a, (vector int)__b, __c);
+#endif
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_perm(vector unsigned long long __a, vector unsigned long long __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector unsigned long long)__builtin_altivec_vperm_4si(
+ (vector int)__b, (vector int)__a, __d);
+#else
+ return (vector unsigned long long)__builtin_altivec_vperm_4si(
+ (vector int)__a, (vector int)__b, __c);
+#endif
+}
+
+static vector bool long long __ATTRS_o_ai
+vec_perm(vector bool long long __a, vector bool long long __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector bool long long)__builtin_altivec_vperm_4si(
+ (vector int)__b, (vector int)__a, __d);
+#else
+ return (vector bool long long)__builtin_altivec_vperm_4si(
+ (vector int)__a, (vector int)__b, __c);
+#endif
+}
+
+static vector double __ATTRS_o_ai vec_perm(vector double __a, vector double __b,
+ vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255};
+ __d = vec_xor(__c, __d);
+ return (vector double)__builtin_altivec_vperm_4si((vector int)__b,
+ (vector int)__a, __d);
+#else
+ return (vector double)__builtin_altivec_vperm_4si((vector int)__a,
+ (vector int)__b, __c);
+#endif
+}
+#endif
+
+/* vec_vperm */
+
+static vector signed char __ATTRS_o_ai vec_vperm(vector signed char __a,
+ vector signed char __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vperm(vector unsigned char __a,
+ vector unsigned char __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static vector bool char __ATTRS_o_ai vec_vperm(vector bool char __a,
+ vector bool char __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static vector short __ATTRS_o_ai vec_vperm(vector short __a, vector short __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vperm(vector unsigned short __a,
+ vector unsigned short __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static vector bool short __ATTRS_o_ai vec_vperm(vector bool short __a,
+ vector bool short __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static vector pixel __ATTRS_o_ai vec_vperm(vector pixel __a, vector pixel __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static vector int __ATTRS_o_ai vec_vperm(vector int __a, vector int __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vperm(vector unsigned int __a,
+ vector unsigned int __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static vector bool int __ATTRS_o_ai vec_vperm(vector bool int __a,
+ vector bool int __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static vector float __ATTRS_o_ai vec_vperm(vector float __a, vector float __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+#ifdef __VSX__
+static vector long long __ATTRS_o_ai vec_vperm(vector long long __a,
+ vector long long __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vperm(vector unsigned long long __a, vector unsigned long long __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+
+static vector double __ATTRS_o_ai vec_vperm(vector double __a,
+ vector double __b,
+ vector unsigned char __c) {
+ return vec_perm(__a, __b, __c);
+}
+#endif
+
+/* vec_re */
+
+static vector float __ATTRS_o_ai
+vec_re(vector float __a) {
+#ifdef __VSX__
+ return __builtin_vsx_xvresp(__a);
+#else
+ return __builtin_altivec_vrefp(__a);
+#endif
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_re(vector double __a) {
+ return __builtin_vsx_xvredp(__a);
+}
+#endif
+
+/* vec_vrefp */
+
+static vector float __attribute__((__always_inline__))
+vec_vrefp(vector float __a) {
+ return __builtin_altivec_vrefp(__a);
+}
+
+/* vec_rl */
+
+static vector signed char __ATTRS_o_ai vec_rl(vector signed char __a,
+ vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vrlb((vector char)__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_rl(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vrlb((vector char)__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_rl(vector short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vrlh(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_rl(vector unsigned short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__builtin_altivec_vrlh((vector short)__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_rl(vector int __a, vector unsigned int __b) {
+ return __builtin_altivec_vrlw(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_rl(vector unsigned int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__builtin_altivec_vrlw((vector int)__a, __b);
+}
+
+#ifdef __POWER8_VECTOR__
+static vector signed long long __ATTRS_o_ai
+vec_rl(vector signed long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vrld(__a, __b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_rl(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vrld(__a, __b);
+}
+#endif
+
+/* vec_vrlb */
+
+static vector signed char __ATTRS_o_ai vec_vrlb(vector signed char __a,
+ vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vrlb((vector char)__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vrlb(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vrlb((vector char)__a, __b);
+}
+
+/* vec_vrlh */
+
+static vector short __ATTRS_o_ai vec_vrlh(vector short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vrlh(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vrlh(vector unsigned short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__builtin_altivec_vrlh((vector short)__a, __b);
+}
+
+/* vec_vrlw */
+
+static vector int __ATTRS_o_ai vec_vrlw(vector int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vrlw(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vrlw(vector unsigned int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__builtin_altivec_vrlw((vector int)__a, __b);
+}
+
+/* vec_round */
+
+static vector float __ATTRS_o_ai vec_round(vector float __a) {
+#ifdef __VSX__
+ return __builtin_vsx_xvrspi(__a);
+#else
+ return __builtin_altivec_vrfin(__a);
+#endif
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_round(vector double __a) {
+ return __builtin_vsx_xvrdpi(__a);
+}
+
+/* vec_rint */
+
+static vector float __ATTRS_o_ai
+vec_rint(vector float __a) {
+ return __builtin_vsx_xvrspic(__a);
+}
+
+static vector double __ATTRS_o_ai
+vec_rint(vector double __a) {
+ return __builtin_vsx_xvrdpic(__a);
+}
+
+/* vec_nearbyint */
+
+static vector float __ATTRS_o_ai vec_nearbyint(vector float __a) {
+ return __builtin_vsx_xvrspi(__a);
+}
+
+static vector double __ATTRS_o_ai vec_nearbyint(vector double __a) {
+ return __builtin_vsx_xvrdpi(__a);
+}
+#endif
+
+/* vec_vrfin */
+
+static vector float __attribute__((__always_inline__))
+vec_vrfin(vector float __a) {
+ return __builtin_altivec_vrfin(__a);
+}
+
+/* vec_sqrt */
+
+#ifdef __VSX__
+static vector float __ATTRS_o_ai vec_sqrt(vector float __a) {
+ return __builtin_vsx_xvsqrtsp(__a);
+}
+
+static vector double __ATTRS_o_ai vec_sqrt(vector double __a) {
+ return __builtin_vsx_xvsqrtdp(__a);
+}
+#endif
+
+/* vec_rsqrte */
+
+static vector float __ATTRS_o_ai
+vec_rsqrte(vector float __a) {
+#ifdef __VSX__
+ return __builtin_vsx_xvrsqrtesp(__a);
+#else
+ return __builtin_altivec_vrsqrtefp(__a);
+#endif
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_rsqrte(vector double __a) {
+ return __builtin_vsx_xvrsqrtedp(__a);
+}
+#endif
+
+/* vec_vrsqrtefp */
+
+static __vector float __attribute__((__always_inline__))
+vec_vrsqrtefp(vector float __a) {
+ return __builtin_altivec_vrsqrtefp(__a);
+}
+
+/* vec_sel */
+
+#define __builtin_altivec_vsel_4si vec_sel
+
+static vector signed char __ATTRS_o_ai vec_sel(vector signed char __a,
+ vector signed char __b,
+ vector unsigned char __c) {
+ return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
+}
+
+static vector signed char __ATTRS_o_ai vec_sel(vector signed char __a,
+ vector signed char __b,
+ vector bool char __c) {
+ return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_sel(vector unsigned char __a,
+ vector unsigned char __b,
+ vector unsigned char __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_sel(vector unsigned char __a,
+ vector unsigned char __b,
+ vector bool char __c) {
+ return (__a & ~(vector unsigned char)__c) | (__b & (vector unsigned char)__c);
+}
+
+static vector bool char __ATTRS_o_ai vec_sel(vector bool char __a,
+ vector bool char __b,
+ vector unsigned char __c) {
+ return (__a & ~(vector bool char)__c) | (__b & (vector bool char)__c);
+}
+
+static vector bool char __ATTRS_o_ai vec_sel(vector bool char __a,
+ vector bool char __b,
+ vector bool char __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static vector short __ATTRS_o_ai vec_sel(vector short __a, vector short __b,
+ vector unsigned short __c) {
+ return (__a & ~(vector short)__c) | (__b & (vector short)__c);
+}
+
+static vector short __ATTRS_o_ai vec_sel(vector short __a, vector short __b,
+ vector bool short __c) {
+ return (__a & ~(vector short)__c) | (__b & (vector short)__c);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_sel(vector unsigned short __a,
+ vector unsigned short __b,
+ vector unsigned short __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_sel(vector unsigned short __a,
+ vector unsigned short __b,
+ vector bool short __c) {
+ return (__a & ~(vector unsigned short)__c) |
+ (__b & (vector unsigned short)__c);
+}
+
+static vector bool short __ATTRS_o_ai vec_sel(vector bool short __a,
+ vector bool short __b,
+ vector unsigned short __c) {
+ return (__a & ~(vector bool short)__c) | (__b & (vector bool short)__c);
+}
+
+static vector bool short __ATTRS_o_ai vec_sel(vector bool short __a,
+ vector bool short __b,
+ vector bool short __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static vector int __ATTRS_o_ai vec_sel(vector int __a, vector int __b,
+ vector unsigned int __c) {
+ return (__a & ~(vector int)__c) | (__b & (vector int)__c);
+}
+
+static vector int __ATTRS_o_ai vec_sel(vector int __a, vector int __b,
+ vector bool int __c) {
+ return (__a & ~(vector int)__c) | (__b & (vector int)__c);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sel(vector unsigned int __a,
+ vector unsigned int __b,
+ vector unsigned int __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sel(vector unsigned int __a,
+ vector unsigned int __b,
+ vector bool int __c) {
+ return (__a & ~(vector unsigned int)__c) | (__b & (vector unsigned int)__c);
+}
+
+static vector bool int __ATTRS_o_ai vec_sel(vector bool int __a,
+ vector bool int __b,
+ vector unsigned int __c) {
+ return (__a & ~(vector bool int)__c) | (__b & (vector bool int)__c);
+}
+
+static vector bool int __ATTRS_o_ai vec_sel(vector bool int __a,
+ vector bool int __b,
+ vector bool int __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static vector float __ATTRS_o_ai vec_sel(vector float __a, vector float __b,
+ vector unsigned int __c) {
+ vector int __res = ((vector int)__a & ~(vector int)__c) |
+ ((vector int)__b & (vector int)__c);
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_sel(vector float __a, vector float __b,
+ vector bool int __c) {
+ vector int __res = ((vector int)__a & ~(vector int)__c) |
+ ((vector int)__b & (vector int)__c);
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_sel(vector double __a, vector double __b,
+ vector bool long long __c) {
+ vector long long __res = ((vector long long)__a & ~(vector long long)__c) |
+ ((vector long long)__b & (vector long long)__c);
+ return (vector double)__res;
+}
+
+static vector double __ATTRS_o_ai vec_sel(vector double __a, vector double __b,
+ vector unsigned long long __c) {
+ vector long long __res = ((vector long long)__a & ~(vector long long)__c) |
+ ((vector long long)__b & (vector long long)__c);
+ return (vector double)__res;
+}
+#endif
+
+/* vec_vsel */
+
+static vector signed char __ATTRS_o_ai vec_vsel(vector signed char __a,
+ vector signed char __b,
+ vector unsigned char __c) {
+ return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
+}
+
+static vector signed char __ATTRS_o_ai vec_vsel(vector signed char __a,
+ vector signed char __b,
+ vector bool char __c) {
+ return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsel(vector unsigned char __a,
+ vector unsigned char __b,
+ vector unsigned char __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsel(vector unsigned char __a,
+ vector unsigned char __b,
+ vector bool char __c) {
+ return (__a & ~(vector unsigned char)__c) | (__b & (vector unsigned char)__c);
+}
+
+static vector bool char __ATTRS_o_ai vec_vsel(vector bool char __a,
+ vector bool char __b,
+ vector unsigned char __c) {
+ return (__a & ~(vector bool char)__c) | (__b & (vector bool char)__c);
+}
+
+static vector bool char __ATTRS_o_ai vec_vsel(vector bool char __a,
+ vector bool char __b,
+ vector bool char __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static vector short __ATTRS_o_ai vec_vsel(vector short __a, vector short __b,
+ vector unsigned short __c) {
+ return (__a & ~(vector short)__c) | (__b & (vector short)__c);
+}
+
+static vector short __ATTRS_o_ai vec_vsel(vector short __a, vector short __b,
+ vector bool short __c) {
+ return (__a & ~(vector short)__c) | (__b & (vector short)__c);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsel(vector unsigned short __a,
+ vector unsigned short __b,
+ vector unsigned short __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsel(vector unsigned short __a,
+ vector unsigned short __b,
+ vector bool short __c) {
+ return (__a & ~(vector unsigned short)__c) |
+ (__b & (vector unsigned short)__c);
+}
+
+static vector bool short __ATTRS_o_ai vec_vsel(vector bool short __a,
+ vector bool short __b,
+ vector unsigned short __c) {
+ return (__a & ~(vector bool short)__c) | (__b & (vector bool short)__c);
+}
+
+static vector bool short __ATTRS_o_ai vec_vsel(vector bool short __a,
+ vector bool short __b,
+ vector bool short __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static vector int __ATTRS_o_ai vec_vsel(vector int __a, vector int __b,
+ vector unsigned int __c) {
+ return (__a & ~(vector int)__c) | (__b & (vector int)__c);
+}
+
+static vector int __ATTRS_o_ai vec_vsel(vector int __a, vector int __b,
+ vector bool int __c) {
+ return (__a & ~(vector int)__c) | (__b & (vector int)__c);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsel(vector unsigned int __a,
+ vector unsigned int __b,
+ vector unsigned int __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsel(vector unsigned int __a,
+ vector unsigned int __b,
+ vector bool int __c) {
+ return (__a & ~(vector unsigned int)__c) | (__b & (vector unsigned int)__c);
+}
+
+static vector bool int __ATTRS_o_ai vec_vsel(vector bool int __a,
+ vector bool int __b,
+ vector unsigned int __c) {
+ return (__a & ~(vector bool int)__c) | (__b & (vector bool int)__c);
+}
+
+static vector bool int __ATTRS_o_ai vec_vsel(vector bool int __a,
+ vector bool int __b,
+ vector bool int __c) {
+ return (__a & ~__c) | (__b & __c);
+}
+
+static vector float __ATTRS_o_ai vec_vsel(vector float __a, vector float __b,
+ vector unsigned int __c) {
+ vector int __res = ((vector int)__a & ~(vector int)__c) |
+ ((vector int)__b & (vector int)__c);
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_vsel(vector float __a, vector float __b,
+ vector bool int __c) {
+ vector int __res = ((vector int)__a & ~(vector int)__c) |
+ ((vector int)__b & (vector int)__c);
+ return (vector float)__res;
+}
+
+/* vec_sl */
+
+static vector signed char __ATTRS_o_ai vec_sl(vector signed char __a,
+ vector unsigned char __b) {
+ return __a << (vector signed char)__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_sl(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a << __b;
+}
+
+static vector short __ATTRS_o_ai vec_sl(vector short __a,
+ vector unsigned short __b) {
+ return __a << (vector short)__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_sl(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a << __b;
+}
+
+static vector int __ATTRS_o_ai vec_sl(vector int __a, vector unsigned int __b) {
+ return __a << (vector int)__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sl(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a << __b;
+}
+
+#ifdef __POWER8_VECTOR__
+static vector signed long long __ATTRS_o_ai
+vec_sl(vector signed long long __a, vector unsigned long long __b) {
+ return __a << (vector long long)__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_sl(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a << __b;
+}
+#endif
+
+/* vec_vslb */
+
+#define __builtin_altivec_vslb vec_vslb
+
+static vector signed char __ATTRS_o_ai vec_vslb(vector signed char __a,
+ vector unsigned char __b) {
+ return vec_sl(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vslb(vector unsigned char __a,
+ vector unsigned char __b) {
+ return vec_sl(__a, __b);
+}
+
+/* vec_vslh */
+
+#define __builtin_altivec_vslh vec_vslh
+
+static vector short __ATTRS_o_ai vec_vslh(vector short __a,
+ vector unsigned short __b) {
+ return vec_sl(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vslh(vector unsigned short __a,
+ vector unsigned short __b) {
+ return vec_sl(__a, __b);
+}
+
+/* vec_vslw */
+
+#define __builtin_altivec_vslw vec_vslw
+
+static vector int __ATTRS_o_ai vec_vslw(vector int __a,
+ vector unsigned int __b) {
+ return vec_sl(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vslw(vector unsigned int __a,
+ vector unsigned int __b) {
+ return vec_sl(__a, __b);
+}
+
+/* vec_sld */
+
+#define __builtin_altivec_vsldoi_4si vec_sld
+
+static vector signed char __ATTRS_o_ai vec_sld(vector signed char __a,
+ vector signed char __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector unsigned char __ATTRS_o_ai vec_sld(vector unsigned char __a,
+ vector unsigned char __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector bool char __ATTRS_o_ai vec_sld(vector bool char __a,
+ vector bool char __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector signed short __ATTRS_o_ai vec_sld(vector signed short __a,
+ vector signed short __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector unsigned short __ATTRS_o_ai vec_sld(vector unsigned short __a,
+ vector unsigned short __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector bool short __ATTRS_o_ai vec_sld(vector bool short __a,
+ vector bool short __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector pixel __ATTRS_o_ai vec_sld(vector pixel __a, vector pixel __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector signed int __ATTRS_o_ai vec_sld(vector signed int __a,
+ vector signed int __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sld(vector unsigned int __a,
+ vector unsigned int __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector bool int __ATTRS_o_ai vec_sld(vector bool int __a,
+ vector bool int __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector float __ATTRS_o_ai vec_sld(vector float __a, vector float __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+/* vec_vsldoi */
+
+static vector signed char __ATTRS_o_ai vec_vsldoi(vector signed char __a,
+ vector signed char __b,
+ unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsldoi(vector unsigned char __a,
+ vector unsigned char __b,
+ unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector short __ATTRS_o_ai vec_vsldoi(vector short __a, vector short __b,
+ unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsldoi(vector unsigned short __a,
+ vector unsigned short __b,
+ unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector pixel __ATTRS_o_ai vec_vsldoi(vector pixel __a, vector pixel __b,
+ unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector int __ATTRS_o_ai vec_vsldoi(vector int __a, vector int __b,
+ unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsldoi(vector unsigned int __a,
+ vector unsigned int __b,
+ unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static vector float __ATTRS_o_ai vec_vsldoi(vector float __a, vector float __b,
+ unsigned char __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a,
+ (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, 20 - __d,
+ 21 - __d, 22 - __d, 23 - __d, 24 - __d, 25 - __d,
+ 26 - __d, 27 - __d, 28 - __d, 29 - __d, 30 - __d,
+ 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+/* vec_sll */
+
+static vector signed char __ATTRS_o_ai vec_sll(vector signed char __a,
+ vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector signed char __ATTRS_o_ai vec_sll(vector signed char __a,
+ vector unsigned short __b) {
+ return (vector signed char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector signed char __ATTRS_o_ai vec_sll(vector signed char __a,
+ vector unsigned int __b) {
+ return (vector signed char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_sll(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_sll(vector unsigned char __a,
+ vector unsigned short __b) {
+ return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_sll(vector unsigned char __a,
+ vector unsigned int __b) {
+ return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool char __ATTRS_o_ai vec_sll(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector bool char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool char __ATTRS_o_ai vec_sll(vector bool char __a,
+ vector unsigned short __b) {
+ return (vector bool char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool char __ATTRS_o_ai vec_sll(vector bool char __a,
+ vector unsigned int __b) {
+ return (vector bool char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_sll(vector short __a,
+ vector unsigned char __b) {
+ return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_sll(vector short __a,
+ vector unsigned short __b) {
+ return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_sll(vector short __a,
+ vector unsigned int __b) {
+ return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_sll(vector unsigned short __a,
+ vector unsigned char __b) {
+ return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_sll(vector unsigned short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_sll(vector unsigned short __a,
+ vector unsigned int __b) {
+ return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool short __ATTRS_o_ai vec_sll(vector bool short __a,
+ vector unsigned char __b) {
+ return (vector bool short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool short __ATTRS_o_ai vec_sll(vector bool short __a,
+ vector unsigned short __b) {
+ return (vector bool short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool short __ATTRS_o_ai vec_sll(vector bool short __a,
+ vector unsigned int __b) {
+ return (vector bool short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_sll(vector pixel __a,
+ vector unsigned char __b) {
+ return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_sll(vector pixel __a,
+ vector unsigned short __b) {
+ return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_sll(vector pixel __a,
+ vector unsigned int __b) {
+ return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_sll(vector int __a,
+ vector unsigned char __b) {
+ return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_sll(vector int __a,
+ vector unsigned short __b) {
+ return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_sll(vector int __a,
+ vector unsigned int __b) {
+ return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sll(vector unsigned int __a,
+ vector unsigned char __b) {
+ return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sll(vector unsigned int __a,
+ vector unsigned short __b) {
+ return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sll(vector unsigned int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool int __ATTRS_o_ai vec_sll(vector bool int __a,
+ vector unsigned char __b) {
+ return (vector bool int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool int __ATTRS_o_ai vec_sll(vector bool int __a,
+ vector unsigned short __b) {
+ return (vector bool int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool int __ATTRS_o_ai vec_sll(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector bool int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+/* vec_vsl */
+
+static vector signed char __ATTRS_o_ai vec_vsl(vector signed char __a,
+ vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vsl(vector signed char __a,
+ vector unsigned short __b) {
+ return (vector signed char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vsl(vector signed char __a,
+ vector unsigned int __b) {
+ return (vector signed char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsl(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsl(vector unsigned char __a,
+ vector unsigned short __b) {
+ return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsl(vector unsigned char __a,
+ vector unsigned int __b) {
+ return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool char __ATTRS_o_ai vec_vsl(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector bool char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool char __ATTRS_o_ai vec_vsl(vector bool char __a,
+ vector unsigned short __b) {
+ return (vector bool char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool char __ATTRS_o_ai vec_vsl(vector bool char __a,
+ vector unsigned int __b) {
+ return (vector bool char)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_vsl(vector short __a,
+ vector unsigned char __b) {
+ return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_vsl(vector short __a,
+ vector unsigned short __b) {
+ return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_vsl(vector short __a,
+ vector unsigned int __b) {
+ return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsl(vector unsigned short __a,
+ vector unsigned char __b) {
+ return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsl(vector unsigned short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsl(vector unsigned short __a,
+ vector unsigned int __b) {
+ return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool short __ATTRS_o_ai vec_vsl(vector bool short __a,
+ vector unsigned char __b) {
+ return (vector bool short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool short __ATTRS_o_ai vec_vsl(vector bool short __a,
+ vector unsigned short __b) {
+ return (vector bool short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool short __ATTRS_o_ai vec_vsl(vector bool short __a,
+ vector unsigned int __b) {
+ return (vector bool short)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a,
+ vector unsigned char __b) {
+ return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a,
+ vector unsigned short __b) {
+ return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a,
+ vector unsigned int __b) {
+ return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_vsl(vector int __a,
+ vector unsigned char __b) {
+ return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_vsl(vector int __a,
+ vector unsigned short __b) {
+ return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_vsl(vector int __a,
+ vector unsigned int __b) {
+ return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsl(vector unsigned int __a,
+ vector unsigned char __b) {
+ return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsl(vector unsigned int __a,
+ vector unsigned short __b) {
+ return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsl(vector unsigned int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool int __ATTRS_o_ai vec_vsl(vector bool int __a,
+ vector unsigned char __b) {
+ return (vector bool int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool int __ATTRS_o_ai vec_vsl(vector bool int __a,
+ vector unsigned short __b) {
+ return (vector bool int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool int __ATTRS_o_ai vec_vsl(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector bool int)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+/* vec_slo */
+
+static vector signed char __ATTRS_o_ai vec_slo(vector signed char __a,
+ vector signed char __b) {
+ return (vector signed char)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector signed char __ATTRS_o_ai vec_slo(vector signed char __a,
+ vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_slo(vector unsigned char __a,
+ vector signed char __b) {
+ return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_slo(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_slo(vector short __a,
+ vector signed char __b) {
+ return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_slo(vector short __a,
+ vector unsigned char __b) {
+ return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_slo(vector unsigned short __a,
+ vector signed char __b) {
+ return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_slo(vector unsigned short __a,
+ vector unsigned char __b) {
+ return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_slo(vector pixel __a,
+ vector signed char __b) {
+ return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_slo(vector pixel __a,
+ vector unsigned char __b) {
+ return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_slo(vector int __a, vector signed char __b) {
+ return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_slo(vector int __a,
+ vector unsigned char __b) {
+ return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_slo(vector unsigned int __a,
+ vector signed char __b) {
+ return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_slo(vector unsigned int __a,
+ vector unsigned char __b) {
+ return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector float __ATTRS_o_ai vec_slo(vector float __a,
+ vector signed char __b) {
+ return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static vector float __ATTRS_o_ai vec_slo(vector float __a,
+ vector unsigned char __b) {
+ return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+/* vec_vslo */
+
+static vector signed char __ATTRS_o_ai vec_vslo(vector signed char __a,
+ vector signed char __b) {
+ return (vector signed char)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vslo(vector signed char __a,
+ vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vslo(vector unsigned char __a,
+ vector signed char __b) {
+ return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vslo(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_vslo(vector short __a,
+ vector signed char __b) {
+ return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_vslo(vector short __a,
+ vector unsigned char __b) {
+ return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vslo(vector unsigned short __a,
+ vector signed char __b) {
+ return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vslo(vector unsigned short __a,
+ vector unsigned char __b) {
+ return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_vslo(vector pixel __a,
+ vector signed char __b) {
+ return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_vslo(vector pixel __a,
+ vector unsigned char __b) {
+ return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_vslo(vector int __a,
+ vector signed char __b) {
+ return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_vslo(vector int __a,
+ vector unsigned char __b) {
+ return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vslo(vector unsigned int __a,
+ vector signed char __b) {
+ return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vslo(vector unsigned int __a,
+ vector unsigned char __b) {
+ return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static vector float __ATTRS_o_ai vec_vslo(vector float __a,
+ vector signed char __b) {
+ return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+static vector float __ATTRS_o_ai vec_vslo(vector float __a,
+ vector unsigned char __b) {
+ return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
+}
+
+/* vec_splat */
+
+static vector signed char __ATTRS_o_ai vec_splat(vector signed char __a,
+ unsigned const int __b) {
+ return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F));
+}
+
+static vector unsigned char __ATTRS_o_ai vec_splat(vector unsigned char __a,
+ unsigned const int __b) {
+ return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F));
+}
+
+static vector bool char __ATTRS_o_ai vec_splat(vector bool char __a,
+ unsigned const int __b) {
+ return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F));
+}
+
+static vector signed short __ATTRS_o_ai vec_splat(vector signed short __a,
+ unsigned const int __b) {
+ unsigned char b0 = (__b & 0x07) * 2;
+ unsigned char b1 = b0 + 1;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1,
+ b0, b1, b0, b1, b0, b1, b0, b1));
+}
+
+static vector unsigned short __ATTRS_o_ai vec_splat(vector unsigned short __a,
+ unsigned const int __b) {
+ unsigned char b0 = (__b & 0x07) * 2;
+ unsigned char b1 = b0 + 1;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1,
+ b0, b1, b0, b1, b0, b1, b0, b1));
+}
+
+static vector bool short __ATTRS_o_ai vec_splat(vector bool short __a,
+ unsigned const int __b) {
+ unsigned char b0 = (__b & 0x07) * 2;
+ unsigned char b1 = b0 + 1;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1,
+ b0, b1, b0, b1, b0, b1, b0, b1));
+}
+
+static vector pixel __ATTRS_o_ai vec_splat(vector pixel __a,
+ unsigned const int __b) {
+ unsigned char b0 = (__b & 0x07) * 2;
+ unsigned char b1 = b0 + 1;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1,
+ b0, b1, b0, b1, b0, b1, b0, b1));
+}
+
+static vector signed int __ATTRS_o_ai vec_splat(vector signed int __a,
+ unsigned const int __b) {
+ unsigned char b0 = (__b & 0x03) * 4;
+ unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0,
+ b1, b2, b3, b0, b1, b2, b3));
+}
+
+static vector unsigned int __ATTRS_o_ai vec_splat(vector unsigned int __a,
+ unsigned const int __b) {
+ unsigned char b0 = (__b & 0x03) * 4;
+ unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0,
+ b1, b2, b3, b0, b1, b2, b3));
+}
+
+static vector bool int __ATTRS_o_ai vec_splat(vector bool int __a,
+ unsigned const int __b) {
+ unsigned char b0 = (__b & 0x03) * 4;
+ unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0,
+ b1, b2, b3, b0, b1, b2, b3));
+}
+
+static vector float __ATTRS_o_ai vec_splat(vector float __a,
+ unsigned const int __b) {
+ unsigned char b0 = (__b & 0x03) * 4;
+ unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0,
+ b1, b2, b3, b0, b1, b2, b3));
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_splat(vector double __a,
+ unsigned const int __b) {
+ unsigned char b0 = (__b & 0x01) * 8;
+ unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4,
+ b5 = b0 + 5, b6 = b0 + 6, b7 = b0 + 7;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7,
+ b0, b1, b2, b3, b4, b5, b6, b7));
+}
+static vector bool long long __ATTRS_o_ai vec_splat(vector bool long long __a,
+ unsigned const int __b) {
+ unsigned char b0 = (__b & 0x01) * 8;
+ unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4,
+ b5 = b0 + 5, b6 = b0 + 6, b7 = b0 + 7;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7,
+ b0, b1, b2, b3, b4, b5, b6, b7));
+}
+static vector signed long long __ATTRS_o_ai
+vec_splat(vector signed long long __a, unsigned const int __b) {
+ unsigned char b0 = (__b & 0x01) * 8;
+ unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4,
+ b5 = b0 + 5, b6 = b0 + 6, b7 = b0 + 7;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7,
+ b0, b1, b2, b3, b4, b5, b6, b7));
+}
+static vector unsigned long long __ATTRS_o_ai
+vec_splat(vector unsigned long long __a, unsigned const int __b) {
+ unsigned char b0 = (__b & 0x01) * 8;
+ unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4,
+ b5 = b0 + 5, b6 = b0 + 6, b7 = b0 + 7;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7,
+ b0, b1, b2, b3, b4, b5, b6, b7));
+}
+#endif
+
+/* vec_vspltb */
+
+#define __builtin_altivec_vspltb vec_vspltb
+
+static vector signed char __ATTRS_o_ai vec_vspltb(vector signed char __a,
+ unsigned char __b) {
+ return vec_perm(__a, __a, (vector unsigned char)(__b));
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vspltb(vector unsigned char __a,
+ unsigned char __b) {
+ return vec_perm(__a, __a, (vector unsigned char)(__b));
+}
+
+static vector bool char __ATTRS_o_ai vec_vspltb(vector bool char __a,
+ unsigned char __b) {
+ return vec_perm(__a, __a, (vector unsigned char)(__b));
+}
+
+/* vec_vsplth */
+
+#define __builtin_altivec_vsplth vec_vsplth
+
+static vector short __ATTRS_o_ai vec_vsplth(vector short __a,
+ unsigned char __b) {
+ __b *= 2;
+ unsigned char b1 = __b + 1;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
+ __b, b1, __b, b1, __b, b1, __b, b1));
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsplth(vector unsigned short __a,
+ unsigned char __b) {
+ __b *= 2;
+ unsigned char b1 = __b + 1;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
+ __b, b1, __b, b1, __b, b1, __b, b1));
+}
+
+static vector bool short __ATTRS_o_ai vec_vsplth(vector bool short __a,
+ unsigned char __b) {
+ __b *= 2;
+ unsigned char b1 = __b + 1;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
+ __b, b1, __b, b1, __b, b1, __b, b1));
+}
+
+static vector pixel __ATTRS_o_ai vec_vsplth(vector pixel __a,
+ unsigned char __b) {
+ __b *= 2;
+ unsigned char b1 = __b + 1;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
+ __b, b1, __b, b1, __b, b1, __b, b1));
+}
+
+/* vec_vspltw */
+
+#define __builtin_altivec_vspltw vec_vspltw
+
+static vector int __ATTRS_o_ai vec_vspltw(vector int __a, unsigned char __b) {
+ __b *= 4;
+ unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
+ b1, b2, b3, __b, b1, b2, b3));
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vspltw(vector unsigned int __a,
+ unsigned char __b) {
+ __b *= 4;
+ unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
+ b1, b2, b3, __b, b1, b2, b3));
+}
+
+static vector bool int __ATTRS_o_ai vec_vspltw(vector bool int __a,
+ unsigned char __b) {
+ __b *= 4;
+ unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
+ b1, b2, b3, __b, b1, b2, b3));
+}
+
+static vector float __ATTRS_o_ai vec_vspltw(vector float __a,
+ unsigned char __b) {
+ __b *= 4;
+ unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
+ return vec_perm(__a, __a,
+ (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
+ b1, b2, b3, __b, b1, b2, b3));
+}
+
+/* vec_splat_s8 */
+
+#define __builtin_altivec_vspltisb vec_splat_s8
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector signed char __ATTRS_o_ai vec_splat_s8(signed char __a) {
+ return (vector signed char)(__a);
+}
+
+/* vec_vspltisb */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector signed char __ATTRS_o_ai vec_vspltisb(signed char __a) {
+ return (vector signed char)(__a);
+}
+
+/* vec_splat_s16 */
+
+#define __builtin_altivec_vspltish vec_splat_s16
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector short __ATTRS_o_ai vec_splat_s16(signed char __a) {
+ return (vector short)(__a);
+}
+
+/* vec_vspltish */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector short __ATTRS_o_ai vec_vspltish(signed char __a) {
+ return (vector short)(__a);
+}
+
+/* vec_splat_s32 */
+
+#define __builtin_altivec_vspltisw vec_splat_s32
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector int __ATTRS_o_ai vec_splat_s32(signed char __a) {
+ return (vector int)(__a);
+}
+
+/* vec_vspltisw */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector int __ATTRS_o_ai vec_vspltisw(signed char __a) {
+ return (vector int)(__a);
+}
+
+/* vec_splat_u8 */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector unsigned char __ATTRS_o_ai vec_splat_u8(unsigned char __a) {
+ return (vector unsigned char)(__a);
+}
+
+/* vec_splat_u16 */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector unsigned short __ATTRS_o_ai vec_splat_u16(signed char __a) {
+ return (vector unsigned short)(__a);
+}
+
+/* vec_splat_u32 */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector unsigned int __ATTRS_o_ai vec_splat_u32(signed char __a) {
+ return (vector unsigned int)(__a);
+}
+
+/* vec_sr */
+
+static vector signed char __ATTRS_o_ai vec_sr(vector signed char __a,
+ vector unsigned char __b) {
+ vector unsigned char __res = (vector unsigned char)__a >> __b;
+ return (vector signed char)__res;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_sr(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a >> __b;
+}
+
+static vector signed short __ATTRS_o_ai vec_sr(vector signed short __a,
+ vector unsigned short __b) {
+ vector unsigned short __res = (vector unsigned short)__a >> __b;
+ return (vector signed short)__res;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_sr(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a >> __b;
+}
+
+static vector signed int __ATTRS_o_ai vec_sr(vector signed int __a,
+ vector unsigned int __b) {
+ vector unsigned int __res = (vector unsigned int)__a >> __b;
+ return (vector signed int)__res;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sr(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a >> __b;
+}
+
+#ifdef __POWER8_VECTOR__
+static vector signed long long __ATTRS_o_ai
+vec_sr(vector signed long long __a, vector unsigned long long __b) {
+ vector unsigned long long __res = (vector unsigned long long)__a >> __b;
+ return (vector signed long long)__res;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_sr(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a >> __b;
+}
+#endif
+
+/* vec_vsrb */
+
+#define __builtin_altivec_vsrb vec_vsrb
+
+static vector signed char __ATTRS_o_ai vec_vsrb(vector signed char __a,
+ vector unsigned char __b) {
+ return __a >> (vector signed char)__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsrb(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a >> __b;
+}
+
+/* vec_vsrh */
+
+#define __builtin_altivec_vsrh vec_vsrh
+
+static vector short __ATTRS_o_ai vec_vsrh(vector short __a,
+ vector unsigned short __b) {
+ return __a >> (vector short)__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsrh(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a >> __b;
+}
+
+/* vec_vsrw */
+
+#define __builtin_altivec_vsrw vec_vsrw
+
+static vector int __ATTRS_o_ai vec_vsrw(vector int __a,
+ vector unsigned int __b) {
+ return __a >> (vector int)__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsrw(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a >> __b;
+}
+
+/* vec_sra */
+
+static vector signed char __ATTRS_o_ai vec_sra(vector signed char __a,
+ vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vsrab((vector char)__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_sra(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vsrab((vector char)__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_sra(vector short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vsrah(__a, (vector unsigned short)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_sra(vector unsigned short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__builtin_altivec_vsrah((vector short)__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_sra(vector int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vsraw(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sra(vector unsigned int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__builtin_altivec_vsraw((vector int)__a, __b);
+}
+
+#ifdef __POWER8_VECTOR__
+static vector signed long long __ATTRS_o_ai
+vec_sra(vector signed long long __a, vector unsigned long long __b) {
+ return __a >> __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_sra(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)((vector signed long long)__a >> __b);
+}
+#endif
+
+/* vec_vsrab */
+
+static vector signed char __ATTRS_o_ai vec_vsrab(vector signed char __a,
+ vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vsrab((vector char)__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsrab(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vsrab((vector char)__a, __b);
+}
+
+/* vec_vsrah */
+
+static vector short __ATTRS_o_ai vec_vsrah(vector short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vsrah(__a, (vector unsigned short)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsrah(vector unsigned short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__builtin_altivec_vsrah((vector short)__a, __b);
+}
+
+/* vec_vsraw */
+
+static vector int __ATTRS_o_ai vec_vsraw(vector int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vsraw(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsraw(vector unsigned int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__builtin_altivec_vsraw((vector int)__a, __b);
+}
+
+/* vec_srl */
+
+static vector signed char __ATTRS_o_ai vec_srl(vector signed char __a,
+ vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector signed char __ATTRS_o_ai vec_srl(vector signed char __a,
+ vector unsigned short __b) {
+ return (vector signed char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector signed char __ATTRS_o_ai vec_srl(vector signed char __a,
+ vector unsigned int __b) {
+ return (vector signed char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_srl(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_srl(vector unsigned char __a,
+ vector unsigned short __b) {
+ return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_srl(vector unsigned char __a,
+ vector unsigned int __b) {
+ return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool char __ATTRS_o_ai vec_srl(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector bool char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool char __ATTRS_o_ai vec_srl(vector bool char __a,
+ vector unsigned short __b) {
+ return (vector bool char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool char __ATTRS_o_ai vec_srl(vector bool char __a,
+ vector unsigned int __b) {
+ return (vector bool char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_srl(vector short __a,
+ vector unsigned char __b) {
+ return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_srl(vector short __a,
+ vector unsigned short __b) {
+ return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_srl(vector short __a,
+ vector unsigned int __b) {
+ return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_srl(vector unsigned short __a,
+ vector unsigned char __b) {
+ return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_srl(vector unsigned short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_srl(vector unsigned short __a,
+ vector unsigned int __b) {
+ return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool short __ATTRS_o_ai vec_srl(vector bool short __a,
+ vector unsigned char __b) {
+ return (vector bool short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool short __ATTRS_o_ai vec_srl(vector bool short __a,
+ vector unsigned short __b) {
+ return (vector bool short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool short __ATTRS_o_ai vec_srl(vector bool short __a,
+ vector unsigned int __b) {
+ return (vector bool short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_srl(vector pixel __a,
+ vector unsigned char __b) {
+ return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_srl(vector pixel __a,
+ vector unsigned short __b) {
+ return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_srl(vector pixel __a,
+ vector unsigned int __b) {
+ return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_srl(vector int __a,
+ vector unsigned char __b) {
+ return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_srl(vector int __a,
+ vector unsigned short __b) {
+ return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_srl(vector int __a,
+ vector unsigned int __b) {
+ return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_srl(vector unsigned int __a,
+ vector unsigned char __b) {
+ return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_srl(vector unsigned int __a,
+ vector unsigned short __b) {
+ return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_srl(vector unsigned int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool int __ATTRS_o_ai vec_srl(vector bool int __a,
+ vector unsigned char __b) {
+ return (vector bool int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool int __ATTRS_o_ai vec_srl(vector bool int __a,
+ vector unsigned short __b) {
+ return (vector bool int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool int __ATTRS_o_ai vec_srl(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector bool int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+/* vec_vsr */
+
+static vector signed char __ATTRS_o_ai vec_vsr(vector signed char __a,
+ vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vsr(vector signed char __a,
+ vector unsigned short __b) {
+ return (vector signed char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vsr(vector signed char __a,
+ vector unsigned int __b) {
+ return (vector signed char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsr(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsr(vector unsigned char __a,
+ vector unsigned short __b) {
+ return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsr(vector unsigned char __a,
+ vector unsigned int __b) {
+ return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool char __ATTRS_o_ai vec_vsr(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector bool char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool char __ATTRS_o_ai vec_vsr(vector bool char __a,
+ vector unsigned short __b) {
+ return (vector bool char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool char __ATTRS_o_ai vec_vsr(vector bool char __a,
+ vector unsigned int __b) {
+ return (vector bool char)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_vsr(vector short __a,
+ vector unsigned char __b) {
+ return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_vsr(vector short __a,
+ vector unsigned short __b) {
+ return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_vsr(vector short __a,
+ vector unsigned int __b) {
+ return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsr(vector unsigned short __a,
+ vector unsigned char __b) {
+ return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsr(vector unsigned short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsr(vector unsigned short __a,
+ vector unsigned int __b) {
+ return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool short __ATTRS_o_ai vec_vsr(vector bool short __a,
+ vector unsigned char __b) {
+ return (vector bool short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool short __ATTRS_o_ai vec_vsr(vector bool short __a,
+ vector unsigned short __b) {
+ return (vector bool short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool short __ATTRS_o_ai vec_vsr(vector bool short __a,
+ vector unsigned int __b) {
+ return (vector bool short)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a,
+ vector unsigned char __b) {
+ return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a,
+ vector unsigned short __b) {
+ return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a,
+ vector unsigned int __b) {
+ return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_vsr(vector int __a,
+ vector unsigned char __b) {
+ return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_vsr(vector int __a,
+ vector unsigned short __b) {
+ return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_vsr(vector int __a,
+ vector unsigned int __b) {
+ return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsr(vector unsigned int __a,
+ vector unsigned char __b) {
+ return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsr(vector unsigned int __a,
+ vector unsigned short __b) {
+ return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsr(vector unsigned int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool int __ATTRS_o_ai vec_vsr(vector bool int __a,
+ vector unsigned char __b) {
+ return (vector bool int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool int __ATTRS_o_ai vec_vsr(vector bool int __a,
+ vector unsigned short __b) {
+ return (vector bool int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static vector bool int __ATTRS_o_ai vec_vsr(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector bool int)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+/* vec_sro */
+
+static vector signed char __ATTRS_o_ai vec_sro(vector signed char __a,
+ vector signed char __b) {
+ return (vector signed char)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector signed char __ATTRS_o_ai vec_sro(vector signed char __a,
+ vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_sro(vector unsigned char __a,
+ vector signed char __b) {
+ return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_sro(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_sro(vector short __a,
+ vector signed char __b) {
+ return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_sro(vector short __a,
+ vector unsigned char __b) {
+ return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_sro(vector unsigned short __a,
+ vector signed char __b) {
+ return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_sro(vector unsigned short __a,
+ vector unsigned char __b) {
+ return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_sro(vector pixel __a,
+ vector signed char __b) {
+ return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_sro(vector pixel __a,
+ vector unsigned char __b) {
+ return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_sro(vector int __a, vector signed char __b) {
+ return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_sro(vector int __a,
+ vector unsigned char __b) {
+ return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sro(vector unsigned int __a,
+ vector signed char __b) {
+ return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sro(vector unsigned int __a,
+ vector unsigned char __b) {
+ return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector float __ATTRS_o_ai vec_sro(vector float __a,
+ vector signed char __b) {
+ return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static vector float __ATTRS_o_ai vec_sro(vector float __a,
+ vector unsigned char __b) {
+ return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+/* vec_vsro */
+
+static vector signed char __ATTRS_o_ai vec_vsro(vector signed char __a,
+ vector signed char __b) {
+ return (vector signed char)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vsro(vector signed char __a,
+ vector unsigned char __b) {
+ return (vector signed char)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsro(vector unsigned char __a,
+ vector signed char __b) {
+ return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsro(vector unsigned char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_vsro(vector short __a,
+ vector signed char __b) {
+ return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static vector short __ATTRS_o_ai vec_vsro(vector short __a,
+ vector unsigned char __b) {
+ return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsro(vector unsigned short __a,
+ vector signed char __b) {
+ return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsro(vector unsigned short __a,
+ vector unsigned char __b) {
+ return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_vsro(vector pixel __a,
+ vector signed char __b) {
+ return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static vector pixel __ATTRS_o_ai vec_vsro(vector pixel __a,
+ vector unsigned char __b) {
+ return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_vsro(vector int __a,
+ vector signed char __b) {
+ return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
+}
+
+static vector int __ATTRS_o_ai vec_vsro(vector int __a,
+ vector unsigned char __b) {
+ return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsro(vector unsigned int __a,
+ vector signed char __b) {
+ return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsro(vector unsigned int __a,
+ vector unsigned char __b) {
+ return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static vector float __ATTRS_o_ai vec_vsro(vector float __a,
+ vector signed char __b) {
+ return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+static vector float __ATTRS_o_ai vec_vsro(vector float __a,
+ vector unsigned char __b) {
+ return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
+}
+
+/* vec_st */
+
+static void __ATTRS_o_ai vec_st(vector signed char __a, int __b,
+ vector signed char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector signed char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector unsigned char __a, int __b,
+ vector unsigned char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector unsigned char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector bool char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector bool char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector bool char __a, int __b,
+ vector bool char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector short __a, int __b, vector short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector short __a, int __b, short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector unsigned short __a, int __b,
+ vector unsigned short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector unsigned short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector bool short __a, int __b, short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector bool short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector bool short __a, int __b,
+ vector bool short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector pixel __a, int __b, short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector pixel __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector pixel __a, int __b, vector pixel *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector int __a, int __b, vector int *__c) {
+ __builtin_altivec_stvx(__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector int __a, int __b, int *__c) {
+ __builtin_altivec_stvx(__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector unsigned int __a, int __b,
+ vector unsigned int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector bool int __a, int __b, int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector bool int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector bool int __a, int __b,
+ vector bool int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector float __a, int __b, vector float *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_st(vector float __a, int __b, float *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+/* vec_stvx */
+
+static void __ATTRS_o_ai vec_stvx(vector signed char __a, int __b,
+ vector signed char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector signed char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector unsigned char __a, int __b,
+ vector unsigned char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector unsigned char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b,
+ vector bool char *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector short __a, int __b,
+ vector short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector short __a, int __b, short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector unsigned short __a, int __b,
+ vector unsigned short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector unsigned short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b, short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b,
+ vector bool short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b, short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b,
+ vector pixel *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector int __a, int __b, vector int *__c) {
+ __builtin_altivec_stvx(__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector int __a, int __b, int *__c) {
+ __builtin_altivec_stvx(__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector unsigned int __a, int __b,
+ vector unsigned int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b, int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b,
+ vector bool int *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector float __a, int __b,
+ vector float *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvx(vector float __a, int __b, float *__c) {
+ __builtin_altivec_stvx((vector int)__a, __b, __c);
+}
+
+/* vec_ste */
+
+static void __ATTRS_o_ai vec_ste(vector signed char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvebx((vector char)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_ste(vector unsigned char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvebx((vector char)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_ste(vector bool char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvebx((vector char)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_ste(vector bool char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvebx((vector char)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_ste(vector short __a, int __b, short *__c) {
+ __builtin_altivec_stvehx(__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_ste(vector unsigned short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_ste(vector bool short __a, int __b, short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_ste(vector bool short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_ste(vector pixel __a, int __b, short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_ste(vector pixel __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_ste(vector int __a, int __b, int *__c) {
+ __builtin_altivec_stvewx(__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_ste(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvewx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_ste(vector bool int __a, int __b, int *__c) {
+ __builtin_altivec_stvewx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_ste(vector bool int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvewx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_ste(vector float __a, int __b, float *__c) {
+ __builtin_altivec_stvewx((vector int)__a, __b, __c);
+}
+
+/* vec_stvebx */
+
+static void __ATTRS_o_ai vec_stvebx(vector signed char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvebx((vector char)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvebx(vector unsigned char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvebx((vector char)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvebx(vector bool char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvebx((vector char)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvebx(vector bool char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvebx((vector char)__a, __b, __c);
+}
+
+/* vec_stvehx */
+
+static void __ATTRS_o_ai vec_stvehx(vector short __a, int __b, short *__c) {
+ __builtin_altivec_stvehx(__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvehx(vector unsigned short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvehx(vector bool short __a, int __b,
+ short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvehx(vector bool short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvehx(vector pixel __a, int __b, short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvehx(vector pixel __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvehx((vector short)__a, __b, __c);
+}
+
+/* vec_stvewx */
+
+static void __ATTRS_o_ai vec_stvewx(vector int __a, int __b, int *__c) {
+ __builtin_altivec_stvewx(__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvewx(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvewx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvewx(vector bool int __a, int __b, int *__c) {
+ __builtin_altivec_stvewx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvewx(vector bool int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvewx((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvewx(vector float __a, int __b, float *__c) {
+ __builtin_altivec_stvewx((vector int)__a, __b, __c);
+}
+
+/* vec_stl */
+
+static void __ATTRS_o_ai vec_stl(vector signed char __a, int __b,
+ vector signed char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector signed char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector unsigned char __a, int __b,
+ vector unsigned char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector unsigned char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector bool char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector bool char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector bool char __a, int __b,
+ vector bool char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector short __a, int __b, vector short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector short __a, int __b, short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector unsigned short __a, int __b,
+ vector unsigned short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector unsigned short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector bool short __a, int __b, short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector bool short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector bool short __a, int __b,
+ vector bool short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector pixel __a, int __b, short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector pixel __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector pixel __a, int __b, vector pixel *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector int __a, int __b, vector int *__c) {
+ __builtin_altivec_stvxl(__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector int __a, int __b, int *__c) {
+ __builtin_altivec_stvxl(__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector unsigned int __a, int __b,
+ vector unsigned int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector bool int __a, int __b, int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector bool int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector bool int __a, int __b,
+ vector bool int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector float __a, int __b, vector float *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stl(vector float __a, int __b, float *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+/* vec_stvxl */
+
+static void __ATTRS_o_ai vec_stvxl(vector signed char __a, int __b,
+ vector signed char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector signed char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector unsigned char __a, int __b,
+ vector unsigned char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector unsigned char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b,
+ signed char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b,
+ unsigned char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b,
+ vector bool char *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector short __a, int __b,
+ vector short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector short __a, int __b, short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector unsigned short __a, int __b,
+ vector unsigned short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector unsigned short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b, short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b,
+ vector bool short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b, short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b,
+ unsigned short *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b,
+ vector pixel *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector int __a, int __b, vector int *__c) {
+ __builtin_altivec_stvxl(__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector int __a, int __b, int *__c) {
+ __builtin_altivec_stvxl(__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector unsigned int __a, int __b,
+ vector unsigned int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b, int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b,
+ unsigned int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b,
+ vector bool int *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector float __a, int __b,
+ vector float *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvxl(vector float __a, int __b, float *__c) {
+ __builtin_altivec_stvxl((vector int)__a, __b, __c);
+}
+
+/* vec_sub */
+
+static vector signed char __ATTRS_o_ai vec_sub(vector signed char __a,
+ vector signed char __b) {
+ return __a - __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_sub(vector bool char __a,
+ vector signed char __b) {
+ return (vector signed char)__a - __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_sub(vector signed char __a,
+ vector bool char __b) {
+ return __a - (vector signed char)__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_sub(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a - __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_sub(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__a - __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_sub(vector unsigned char __a,
+ vector bool char __b) {
+ return __a - (vector unsigned char)__b;
+}
+
+static vector short __ATTRS_o_ai vec_sub(vector short __a, vector short __b) {
+ return __a - __b;
+}
+
+static vector short __ATTRS_o_ai vec_sub(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a - __b;
+}
+
+static vector short __ATTRS_o_ai vec_sub(vector short __a,
+ vector bool short __b) {
+ return __a - (vector short)__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_sub(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a - __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_sub(vector bool short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__a - __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_sub(vector unsigned short __a,
+ vector bool short __b) {
+ return __a - (vector unsigned short)__b;
+}
+
+static vector int __ATTRS_o_ai vec_sub(vector int __a, vector int __b) {
+ return __a - __b;
+}
+
+static vector int __ATTRS_o_ai vec_sub(vector bool int __a, vector int __b) {
+ return (vector int)__a - __b;
+}
+
+static vector int __ATTRS_o_ai vec_sub(vector int __a, vector bool int __b) {
+ return __a - (vector int)__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sub(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a - __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sub(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__a - __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sub(vector unsigned int __a,
+ vector bool int __b) {
+ return __a - (vector unsigned int)__b;
+}
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static vector signed __int128 __ATTRS_o_ai vec_sub(vector signed __int128 __a,
+ vector signed __int128 __b) {
+ return __a - __b;
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_sub(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return __a - __b;
+}
+#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai
+vec_sub(vector signed long long __a, vector signed long long __b) {
+ return __a - __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_sub(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a - __b;
+}
+
+static vector double __ATTRS_o_ai
+vec_sub(vector double __a, vector double __b) {
+ return __a - __b;
+}
+#endif
+
+static vector float __ATTRS_o_ai vec_sub(vector float __a, vector float __b) {
+ return __a - __b;
+}
+
+/* vec_vsububm */
+
+#define __builtin_altivec_vsububm vec_vsububm
+
+static vector signed char __ATTRS_o_ai vec_vsububm(vector signed char __a,
+ vector signed char __b) {
+ return __a - __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_vsububm(vector bool char __a,
+ vector signed char __b) {
+ return (vector signed char)__a - __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_vsububm(vector signed char __a,
+ vector bool char __b) {
+ return __a - (vector signed char)__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsububm(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a - __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsububm(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__a - __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsububm(vector unsigned char __a,
+ vector bool char __b) {
+ return __a - (vector unsigned char)__b;
+}
+
+/* vec_vsubuhm */
+
+#define __builtin_altivec_vsubuhm vec_vsubuhm
+
+static vector short __ATTRS_o_ai vec_vsubuhm(vector short __a,
+ vector short __b) {
+ return __a - __b;
+}
+
+static vector short __ATTRS_o_ai vec_vsubuhm(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a - __b;
+}
+
+static vector short __ATTRS_o_ai vec_vsubuhm(vector short __a,
+ vector bool short __b) {
+ return __a - (vector short)__b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsubuhm(vector unsigned short __a, vector unsigned short __b) {
+ return __a - __b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsubuhm(vector bool short __a, vector unsigned short __b) {
+ return (vector unsigned short)__a - __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsubuhm(vector unsigned short __a,
+ vector bool short __b) {
+ return __a - (vector unsigned short)__b;
+}
+
+/* vec_vsubuwm */
+
+#define __builtin_altivec_vsubuwm vec_vsubuwm
+
+static vector int __ATTRS_o_ai vec_vsubuwm(vector int __a, vector int __b) {
+ return __a - __b;
+}
+
+static vector int __ATTRS_o_ai vec_vsubuwm(vector bool int __a,
+ vector int __b) {
+ return (vector int)__a - __b;
+}
+
+static vector int __ATTRS_o_ai vec_vsubuwm(vector int __a,
+ vector bool int __b) {
+ return __a - (vector int)__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsubuwm(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a - __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsubuwm(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__a - __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsubuwm(vector unsigned int __a,
+ vector bool int __b) {
+ return __a - (vector unsigned int)__b;
+}
+
+/* vec_vsubfp */
+
+#define __builtin_altivec_vsubfp vec_vsubfp
+
+static vector float __attribute__((__always_inline__))
+vec_vsubfp(vector float __a, vector float __b) {
+ return __a - __b;
+}
+
+/* vec_subc */
+
+static vector unsigned int __ATTRS_o_ai vec_subc(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vsubcuw(__a, __b);
+}
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static vector unsigned __int128 __ATTRS_o_ai
+vec_subc(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return __builtin_altivec_vsubcuq(__a, __b);
+}
+
+static vector signed __int128 __ATTRS_o_ai
+vec_subc(vector signed __int128 __a, vector signed __int128 __b) {
+ return __builtin_altivec_vsubcuq(__a, __b);
+}
+#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+
+/* vec_vsubcuw */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vsubcuw(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_altivec_vsubcuw(__a, __b);
+}
+
+/* vec_subs */
+
+static vector signed char __ATTRS_o_ai vec_subs(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vsubsbs(__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_subs(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vsubsbs((vector signed char)__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_subs(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vsubsbs(__a, (vector signed char)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_subs(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vsububs(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_subs(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vsububs((vector unsigned char)__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_subs(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vsububs(__a, (vector unsigned char)__b);
+}
+
+static vector short __ATTRS_o_ai vec_subs(vector short __a, vector short __b) {
+ return __builtin_altivec_vsubshs(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_subs(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vsubshs((vector short)__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_subs(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vsubshs(__a, (vector short)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_subs(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vsubuhs(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_subs(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vsubuhs((vector unsigned short)__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_subs(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vsubuhs(__a, (vector unsigned short)__b);
+}
+
+static vector int __ATTRS_o_ai vec_subs(vector int __a, vector int __b) {
+ return __builtin_altivec_vsubsws(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_subs(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vsubsws((vector int)__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_subs(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vsubsws(__a, (vector int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_subs(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vsubuws(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_subs(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vsubuws((vector unsigned int)__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_subs(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vsubuws(__a, (vector unsigned int)__b);
+}
+
+/* vec_vsubsbs */
+
+static vector signed char __ATTRS_o_ai vec_vsubsbs(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vsubsbs(__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vsubsbs(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vsubsbs((vector signed char)__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vsubsbs(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vsubsbs(__a, (vector signed char)__b);
+}
+
+/* vec_vsububs */
+
+static vector unsigned char __ATTRS_o_ai vec_vsububs(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vsububs(__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsububs(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vsububs((vector unsigned char)__a, __b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vsububs(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vsububs(__a, (vector unsigned char)__b);
+}
+
+/* vec_vsubshs */
+
+static vector short __ATTRS_o_ai vec_vsubshs(vector short __a,
+ vector short __b) {
+ return __builtin_altivec_vsubshs(__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_vsubshs(vector bool short __a,
+ vector short __b) {
+ return __builtin_altivec_vsubshs((vector short)__a, __b);
+}
+
+static vector short __ATTRS_o_ai vec_vsubshs(vector short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vsubshs(__a, (vector short)__b);
+}
+
+/* vec_vsubuhs */
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsubuhs(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_altivec_vsubuhs(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsubuhs(vector bool short __a, vector unsigned short __b) {
+ return __builtin_altivec_vsubuhs((vector unsigned short)__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vsubuhs(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vsubuhs(__a, (vector unsigned short)__b);
+}
+
+/* vec_vsubsws */
+
+static vector int __ATTRS_o_ai vec_vsubsws(vector int __a, vector int __b) {
+ return __builtin_altivec_vsubsws(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_vsubsws(vector bool int __a,
+ vector int __b) {
+ return __builtin_altivec_vsubsws((vector int)__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_vsubsws(vector int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vsubsws(__a, (vector int)__b);
+}
+
+/* vec_vsubuws */
+
+static vector unsigned int __ATTRS_o_ai vec_vsubuws(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vsubuws(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsubuws(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vsubuws((vector unsigned int)__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vsubuws(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vsubuws(__a, (vector unsigned int)__b);
+}
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+/* vec_vsubuqm */
+
+static vector signed __int128 __ATTRS_o_ai
+vec_vsubuqm(vector signed __int128 __a, vector signed __int128 __b) {
+ return __a - __b;
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_vsubuqm(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return __a - __b;
+}
+
+/* vec_vsubeuqm */
+
+static vector signed __int128 __ATTRS_o_ai
+vec_vsubeuqm(vector signed __int128 __a, vector signed __int128 __b,
+ vector signed __int128 __c) {
+ return __builtin_altivec_vsubeuqm(__a, __b, __c);
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_vsubeuqm(vector unsigned __int128 __a, vector unsigned __int128 __b,
+ vector unsigned __int128 __c) {
+ return __builtin_altivec_vsubeuqm(__a, __b, __c);
+}
+
+/* vec_vsubcuq */
+
+static vector signed __int128 __ATTRS_o_ai
+vec_vsubcuq(vector signed __int128 __a, vector signed __int128 __b) {
+ return __builtin_altivec_vsubcuq(__a, __b);
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_vsubcuq(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+ return __builtin_altivec_vsubcuq(__a, __b);
+}
+
+/* vec_vsubecuq */
+
+static vector signed __int128 __ATTRS_o_ai
+vec_vsubecuq(vector signed __int128 __a, vector signed __int128 __b,
+ vector signed __int128 __c) {
+ return __builtin_altivec_vsubecuq(__a, __b, __c);
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_vsubecuq(vector unsigned __int128 __a, vector unsigned __int128 __b,
+ vector unsigned __int128 __c) {
+ return __builtin_altivec_vsubecuq(__a, __b, __c);
+}
+#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+
+/* vec_sum4s */
+
+static vector int __ATTRS_o_ai vec_sum4s(vector signed char __a,
+ vector int __b) {
+ return __builtin_altivec_vsum4sbs(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_sum4s(vector unsigned char __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vsum4ubs(__a, __b);
+}
+
+static vector int __ATTRS_o_ai vec_sum4s(vector signed short __a,
+ vector int __b) {
+ return __builtin_altivec_vsum4shs(__a, __b);
+}
+
+/* vec_vsum4sbs */
+
+static vector int __attribute__((__always_inline__))
+vec_vsum4sbs(vector signed char __a, vector int __b) {
+ return __builtin_altivec_vsum4sbs(__a, __b);
+}
+
+/* vec_vsum4ubs */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vsum4ubs(vector unsigned char __a, vector unsigned int __b) {
+ return __builtin_altivec_vsum4ubs(__a, __b);
+}
+
+/* vec_vsum4shs */
+
+static vector int __attribute__((__always_inline__))
+vec_vsum4shs(vector signed short __a, vector int __b) {
+ return __builtin_altivec_vsum4shs(__a, __b);
+}
+
+/* vec_sum2s */
+
+/* The vsum2sws instruction has a big-endian bias, so that the second
+ input vector and the result always reference big-endian elements
+ 1 and 3 (little-endian element 0 and 2). For ease of porting the
+ programmer wants elements 1 and 3 in both cases, so for little
+ endian we must perform some permutes. */
+
+static vector signed int __attribute__((__always_inline__))
+vec_sum2s(vector int __a, vector int __b) {
+#ifdef __LITTLE_ENDIAN__
+ vector int __c = (vector signed int)vec_perm(
+ __b, __b, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
+ 8, 9, 10, 11));
+ __c = __builtin_altivec_vsum2sws(__a, __c);
+ return (vector signed int)vec_perm(
+ __c, __c, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
+ 8, 9, 10, 11));
+#else
+ return __builtin_altivec_vsum2sws(__a, __b);
+#endif
+}
+
+/* vec_vsum2sws */
+
+static vector signed int __attribute__((__always_inline__))
+vec_vsum2sws(vector int __a, vector int __b) {
+#ifdef __LITTLE_ENDIAN__
+ vector int __c = (vector signed int)vec_perm(
+ __b, __b, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
+ 8, 9, 10, 11));
+ __c = __builtin_altivec_vsum2sws(__a, __c);
+ return (vector signed int)vec_perm(
+ __c, __c, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
+ 8, 9, 10, 11));
+#else
+ return __builtin_altivec_vsum2sws(__a, __b);
+#endif
+}
+
+/* vec_sums */
+
+/* The vsumsws instruction has a big-endian bias, so that the second
+ input vector and the result always reference big-endian element 3
+ (little-endian element 0). For ease of porting the programmer
+ wants element 3 in both cases, so for little endian we must perform
+ some permutes. */
+
+static vector signed int __attribute__((__always_inline__))
+vec_sums(vector signed int __a, vector signed int __b) {
+#ifdef __LITTLE_ENDIAN__
+ __b = (vector signed int)vec_splat(__b, 3);
+ __b = __builtin_altivec_vsumsws(__a, __b);
+ return (vector signed int)(0, 0, 0, __b[0]);
+#else
+ return __builtin_altivec_vsumsws(__a, __b);
+#endif
+}
+
+/* vec_vsumsws */
+
+static vector signed int __attribute__((__always_inline__))
+vec_vsumsws(vector signed int __a, vector signed int __b) {
+#ifdef __LITTLE_ENDIAN__
+ __b = (vector signed int)vec_splat(__b, 3);
+ __b = __builtin_altivec_vsumsws(__a, __b);
+ return (vector signed int)(0, 0, 0, __b[0]);
+#else
+ return __builtin_altivec_vsumsws(__a, __b);
+#endif
+}
+
+/* vec_trunc */
+
+static vector float __ATTRS_o_ai
+vec_trunc(vector float __a) {
+#ifdef __VSX__
+ return __builtin_vsx_xvrspiz(__a);
+#else
+ return __builtin_altivec_vrfiz(__a);
+#endif
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_trunc(vector double __a) {
+ return __builtin_vsx_xvrdpiz(__a);
+}
+#endif
+
+/* vec_vrfiz */
+
+static vector float __attribute__((__always_inline__))
+vec_vrfiz(vector float __a) {
+ return __builtin_altivec_vrfiz(__a);
+}
+
+/* vec_unpackh */
+
+/* The vector unpack instructions all have a big-endian bias, so for
+ little endian we must reverse the meanings of "high" and "low." */
+
+static vector short __ATTRS_o_ai vec_unpackh(vector signed char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupklsb((vector char)__a);
+#else
+ return __builtin_altivec_vupkhsb((vector char)__a);
+#endif
+}
+
+static vector bool short __ATTRS_o_ai vec_unpackh(vector bool char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
+#else
+ return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
+#endif
+}
+
+static vector int __ATTRS_o_ai vec_unpackh(vector short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupklsh(__a);
+#else
+ return __builtin_altivec_vupkhsh(__a);
+#endif
+}
+
+static vector bool int __ATTRS_o_ai vec_unpackh(vector bool short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
+#else
+ return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
+#endif
+}
+
+static vector unsigned int __ATTRS_o_ai vec_unpackh(vector pixel __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
+#else
+ return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
+#endif
+}
+
+#ifdef __POWER8_VECTOR__
+static vector long long __ATTRS_o_ai vec_unpackh(vector int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupklsw(__a);
+#else
+ return __builtin_altivec_vupkhsw(__a);
+#endif
+}
+
+static vector bool long long __ATTRS_o_ai vec_unpackh(vector bool int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
+#else
+ return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
+#endif
+}
+#endif
+
+/* vec_vupkhsb */
+
+static vector short __ATTRS_o_ai vec_vupkhsb(vector signed char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupklsb((vector char)__a);
+#else
+ return __builtin_altivec_vupkhsb((vector char)__a);
+#endif
+}
+
+static vector bool short __ATTRS_o_ai vec_vupkhsb(vector bool char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
+#else
+ return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
+#endif
+}
+
+/* vec_vupkhsh */
+
+static vector int __ATTRS_o_ai vec_vupkhsh(vector short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupklsh(__a);
+#else
+ return __builtin_altivec_vupkhsh(__a);
+#endif
+}
+
+static vector bool int __ATTRS_o_ai vec_vupkhsh(vector bool short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
+#else
+ return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
+#endif
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vupkhsh(vector pixel __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
+#else
+ return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
+#endif
+}
+
+/* vec_vupkhsw */
+
+#ifdef __POWER8_VECTOR__
+static vector long long __ATTRS_o_ai vec_vupkhsw(vector int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupklsw(__a);
+#else
+ return __builtin_altivec_vupkhsw(__a);
+#endif
+}
+
+static vector bool long long __ATTRS_o_ai vec_vupkhsw(vector bool int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
+#else
+ return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
+#endif
+}
+#endif
+
+/* vec_unpackl */
+
+static vector short __ATTRS_o_ai vec_unpackl(vector signed char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupkhsb((vector char)__a);
+#else
+ return __builtin_altivec_vupklsb((vector char)__a);
+#endif
+}
+
+static vector bool short __ATTRS_o_ai vec_unpackl(vector bool char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
+#else
+ return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
+#endif
+}
+
+static vector int __ATTRS_o_ai vec_unpackl(vector short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupkhsh(__a);
+#else
+ return __builtin_altivec_vupklsh(__a);
+#endif
+}
+
+static vector bool int __ATTRS_o_ai vec_unpackl(vector bool short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
+#else
+ return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
+#endif
+}
+
+static vector unsigned int __ATTRS_o_ai vec_unpackl(vector pixel __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
+#else
+ return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
+#endif
+}
+
+#ifdef __POWER8_VECTOR__
+static vector long long __ATTRS_o_ai vec_unpackl(vector int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupkhsw(__a);
+#else
+ return __builtin_altivec_vupklsw(__a);
+#endif
+}
+
+static vector bool long long __ATTRS_o_ai vec_unpackl(vector bool int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
+#else
+ return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
+#endif
+}
+#endif
+
+/* vec_vupklsb */
+
+static vector short __ATTRS_o_ai vec_vupklsb(vector signed char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupkhsb((vector char)__a);
+#else
+ return __builtin_altivec_vupklsb((vector char)__a);
+#endif
+}
+
+static vector bool short __ATTRS_o_ai vec_vupklsb(vector bool char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
+#else
+ return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
+#endif
+}
+
+/* vec_vupklsh */
+
+static vector int __ATTRS_o_ai vec_vupklsh(vector short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupkhsh(__a);
+#else
+ return __builtin_altivec_vupklsh(__a);
+#endif
+}
+
+static vector bool int __ATTRS_o_ai vec_vupklsh(vector bool short __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
+#else
+ return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
+#endif
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vupklsh(vector pixel __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
+#else
+ return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
+#endif
+}
+
+/* vec_vupklsw */
+
+#ifdef __POWER8_VECTOR__
+static vector long long __ATTRS_o_ai vec_vupklsw(vector int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupkhsw(__a);
+#else
+ return __builtin_altivec_vupklsw(__a);
+#endif
+}
+
+static vector bool long long __ATTRS_o_ai vec_vupklsw(vector bool int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
+#else
+ return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
+#endif
+}
+#endif
+
+/* vec_vsx_ld */
+
+#ifdef __VSX__
+
+static vector signed int __ATTRS_o_ai vec_vsx_ld(int __a,
+ const vector signed int *__b) {
+ return (vector signed int)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector unsigned int *__b) {
+ return (vector unsigned int)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static vector float __ATTRS_o_ai vec_vsx_ld(int __a, const vector float *__b) {
+ return (vector float)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector signed long long *__b) {
+ return (vector signed long long)__builtin_vsx_lxvd2x(__a, __b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector unsigned long long *__b) {
+ return (vector unsigned long long)__builtin_vsx_lxvd2x(__a, __b);
+}
+
+static vector double __ATTRS_o_ai vec_vsx_ld(int __a,
+ const vector double *__b) {
+ return (vector double)__builtin_vsx_lxvd2x(__a, __b);
+}
+
+#endif
+
+/* vec_vsx_st */
+
+#ifdef __VSX__
+
+static void __ATTRS_o_ai vec_vsx_st(vector signed int __a, int __b,
+ vector signed int *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_vsx_st(vector unsigned int __a, int __b,
+ vector unsigned int *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_vsx_st(vector float __a, int __b,
+ vector float *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_vsx_st(vector signed long long __a, int __b,
+ vector signed long long *__c) {
+ __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_vsx_st(vector unsigned long long __a, int __b,
+ vector unsigned long long *__c) {
+ __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai vec_vsx_st(vector double __a, int __b,
+ vector double *__c) {
+ __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
+}
+
+#endif
+
+/* vec_xor */
+
+#define __builtin_altivec_vxor vec_xor
+
+static vector signed char __ATTRS_o_ai vec_xor(vector signed char __a,
+ vector signed char __b) {
+ return __a ^ __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_xor(vector bool char __a,
+ vector signed char __b) {
+ return (vector signed char)__a ^ __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_xor(vector signed char __a,
+ vector bool char __b) {
+ return __a ^ (vector signed char)__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_xor(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a ^ __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_xor(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__a ^ __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_xor(vector unsigned char __a,
+ vector bool char __b) {
+ return __a ^ (vector unsigned char)__b;
+}
+
+static vector bool char __ATTRS_o_ai vec_xor(vector bool char __a,
+ vector bool char __b) {
+ return __a ^ __b;
+}
+
+static vector short __ATTRS_o_ai vec_xor(vector short __a, vector short __b) {
+ return __a ^ __b;
+}
+
+static vector short __ATTRS_o_ai vec_xor(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a ^ __b;
+}
+
+static vector short __ATTRS_o_ai vec_xor(vector short __a,
+ vector bool short __b) {
+ return __a ^ (vector short)__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_xor(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a ^ __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_xor(vector bool short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__a ^ __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_xor(vector unsigned short __a,
+ vector bool short __b) {
+ return __a ^ (vector unsigned short)__b;
+}
+
+static vector bool short __ATTRS_o_ai vec_xor(vector bool short __a,
+ vector bool short __b) {
+ return __a ^ __b;
+}
+
+static vector int __ATTRS_o_ai vec_xor(vector int __a, vector int __b) {
+ return __a ^ __b;
+}
+
+static vector int __ATTRS_o_ai vec_xor(vector bool int __a, vector int __b) {
+ return (vector int)__a ^ __b;
+}
+
+static vector int __ATTRS_o_ai vec_xor(vector int __a, vector bool int __b) {
+ return __a ^ (vector int)__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_xor(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a ^ __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_xor(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__a ^ __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_xor(vector unsigned int __a,
+ vector bool int __b) {
+ return __a ^ (vector unsigned int)__b;
+}
+
+static vector bool int __ATTRS_o_ai vec_xor(vector bool int __a,
+ vector bool int __b) {
+ return __a ^ __b;
+}
+
+static vector float __ATTRS_o_ai vec_xor(vector float __a, vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a ^ (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_xor(vector bool int __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a ^ (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_xor(vector float __a,
+ vector bool int __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a ^ (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai
+vec_xor(vector signed long long __a, vector signed long long __b) {
+ return __a ^ __b;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_xor(vector bool long long __a, vector signed long long __b) {
+ return (vector signed long long)__a ^ __b;
+}
+
+static vector signed long long __ATTRS_o_ai vec_xor(vector signed long long __a,
+ vector bool long long __b) {
+ return __a ^ (vector signed long long)__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_xor(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a ^ __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_xor(vector bool long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__a ^ __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_xor(vector unsigned long long __a, vector bool long long __b) {
+ return __a ^ (vector unsigned long long)__b;
+}
+
+static vector bool long long __ATTRS_o_ai vec_xor(vector bool long long __a,
+ vector bool long long __b) {
+ return __a ^ __b;
+}
+
+static vector double __ATTRS_o_ai
+vec_xor(vector double __a, vector double __b) {
+ return (vector double)((vector unsigned long long)__a ^
+ (vector unsigned long long)__b);
+}
+
+static vector double __ATTRS_o_ai
+vec_xor(vector double __a, vector bool long long __b) {
+ return (vector double)((vector unsigned long long)__a ^
+ (vector unsigned long long) __b);
+}
+
+static vector double __ATTRS_o_ai
+vec_xor(vector bool long long __a, vector double __b) {
+ return (vector double)((vector unsigned long long)__a ^
+ (vector unsigned long long)__b);
+}
+#endif
+
+/* vec_vxor */
+
+static vector signed char __ATTRS_o_ai vec_vxor(vector signed char __a,
+ vector signed char __b) {
+ return __a ^ __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_vxor(vector bool char __a,
+ vector signed char __b) {
+ return (vector signed char)__a ^ __b;
+}
+
+static vector signed char __ATTRS_o_ai vec_vxor(vector signed char __a,
+ vector bool char __b) {
+ return __a ^ (vector signed char)__b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vxor(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __a ^ __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vxor(vector bool char __a,
+ vector unsigned char __b) {
+ return (vector unsigned char)__a ^ __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_vxor(vector unsigned char __a,
+ vector bool char __b) {
+ return __a ^ (vector unsigned char)__b;
+}
+
+static vector bool char __ATTRS_o_ai vec_vxor(vector bool char __a,
+ vector bool char __b) {
+ return __a ^ __b;
+}
+
+static vector short __ATTRS_o_ai vec_vxor(vector short __a, vector short __b) {
+ return __a ^ __b;
+}
+
+static vector short __ATTRS_o_ai vec_vxor(vector bool short __a,
+ vector short __b) {
+ return (vector short)__a ^ __b;
+}
+
+static vector short __ATTRS_o_ai vec_vxor(vector short __a,
+ vector bool short __b) {
+ return __a ^ (vector short)__b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vxor(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __a ^ __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vxor(vector bool short __a,
+ vector unsigned short __b) {
+ return (vector unsigned short)__a ^ __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_vxor(vector unsigned short __a,
+ vector bool short __b) {
+ return __a ^ (vector unsigned short)__b;
+}
+
+static vector bool short __ATTRS_o_ai vec_vxor(vector bool short __a,
+ vector bool short __b) {
+ return __a ^ __b;
+}
+
+static vector int __ATTRS_o_ai vec_vxor(vector int __a, vector int __b) {
+ return __a ^ __b;
+}
+
+static vector int __ATTRS_o_ai vec_vxor(vector bool int __a, vector int __b) {
+ return (vector int)__a ^ __b;
+}
+
+static vector int __ATTRS_o_ai vec_vxor(vector int __a, vector bool int __b) {
+ return __a ^ (vector int)__b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vxor(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __a ^ __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vxor(vector bool int __a,
+ vector unsigned int __b) {
+ return (vector unsigned int)__a ^ __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_vxor(vector unsigned int __a,
+ vector bool int __b) {
+ return __a ^ (vector unsigned int)__b;
+}
+
+static vector bool int __ATTRS_o_ai vec_vxor(vector bool int __a,
+ vector bool int __b) {
+ return __a ^ __b;
+}
+
+static vector float __ATTRS_o_ai vec_vxor(vector float __a, vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a ^ (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_vxor(vector bool int __a,
+ vector float __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a ^ (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+static vector float __ATTRS_o_ai vec_vxor(vector float __a,
+ vector bool int __b) {
+ vector unsigned int __res =
+ (vector unsigned int)__a ^ (vector unsigned int)__b;
+ return (vector float)__res;
+}
+
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai
+vec_vxor(vector signed long long __a, vector signed long long __b) {
+ return __a ^ __b;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_vxor(vector bool long long __a, vector signed long long __b) {
+ return (vector signed long long)__a ^ __b;
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_vxor(vector signed long long __a, vector bool long long __b) {
+ return __a ^ (vector signed long long)__b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vxor(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a ^ __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vxor(vector bool long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__a ^ __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vxor(vector unsigned long long __a, vector bool long long __b) {
+ return __a ^ (vector unsigned long long)__b;
+}
+
+static vector bool long long __ATTRS_o_ai vec_vxor(vector bool long long __a,
+ vector bool long long __b) {
+ return __a ^ __b;
+}
+#endif
+
+/* ------------------------ extensions for CBEA ----------------------------- */
+
+/* vec_extract */
+
+static signed char __ATTRS_o_ai vec_extract(vector signed char __a, int __b) {
+ return __a[__b];
+}
+
+static unsigned char __ATTRS_o_ai vec_extract(vector unsigned char __a,
+ int __b) {
+ return __a[__b];
+}
+
+static unsigned char __ATTRS_o_ai vec_extract(vector bool char __a,
+ int __b) {
+ return __a[__b];
+}
+
+static signed short __ATTRS_o_ai vec_extract(vector signed short __a, int __b) {
+ return __a[__b];
+}
+
+static unsigned short __ATTRS_o_ai vec_extract(vector unsigned short __a,
+ int __b) {
+ return __a[__b];
+}
+
+static unsigned short __ATTRS_o_ai vec_extract(vector bool short __a,
+ int __b) {
+ return __a[__b];
+}
+
+static signed int __ATTRS_o_ai vec_extract(vector signed int __a, int __b) {
+ return __a[__b];
+}
+
+static unsigned int __ATTRS_o_ai vec_extract(vector unsigned int __a, int __b) {
+ return __a[__b];
+}
+
+static unsigned int __ATTRS_o_ai vec_extract(vector bool int __a, int __b) {
+ return __a[__b];
+}
+
+#ifdef __VSX__
+static signed long long __ATTRS_o_ai vec_extract(vector signed long long __a,
+ int __b) {
+ return __a[__b];
+}
+
+static unsigned long long __ATTRS_o_ai
+vec_extract(vector unsigned long long __a, int __b) {
+ return __a[__b];
+}
+
+static unsigned long long __ATTRS_o_ai vec_extract(vector bool long long __a,
+ int __b) {
+ return __a[__b];
+}
+
+static double __ATTRS_o_ai vec_extract(vector double __a, int __b) {
+ return __a[__b];
+}
+#endif
+
+static float __ATTRS_o_ai vec_extract(vector float __a, int __b) {
+ return __a[__b];
+}
+
+/* vec_insert */
+
+static vector signed char __ATTRS_o_ai vec_insert(signed char __a,
+ vector signed char __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_insert(unsigned char __a,
+ vector unsigned char __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static vector bool char __ATTRS_o_ai vec_insert(unsigned char __a,
+ vector bool char __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static vector signed short __ATTRS_o_ai vec_insert(signed short __a,
+ vector signed short __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_insert(unsigned short __a,
+ vector unsigned short __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static vector bool short __ATTRS_o_ai vec_insert(unsigned short __a,
+ vector bool short __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static vector signed int __ATTRS_o_ai vec_insert(signed int __a,
+ vector signed int __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_insert(unsigned int __a,
+ vector unsigned int __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static vector bool int __ATTRS_o_ai vec_insert(unsigned int __a,
+ vector bool int __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai
+vec_insert(signed long long __a, vector signed long long __b, int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_insert(unsigned long long __a, vector unsigned long long __b, int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static vector bool long long __ATTRS_o_ai
+vec_insert(unsigned long long __a, vector bool long long __b, int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+static vector double __ATTRS_o_ai vec_insert(double __a, vector double __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+#endif
+
+static vector float __ATTRS_o_ai vec_insert(float __a, vector float __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+/* vec_lvlx */
+
+static vector signed char __ATTRS_o_ai vec_lvlx(int __a,
+ const signed char *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector signed char)(0),
+ vec_lvsl(__a, __b));
+}
+
+static vector signed char __ATTRS_o_ai vec_lvlx(int __a,
+ const vector signed char *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector signed char)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector unsigned char __ATTRS_o_ai vec_lvlx(int __a,
+ const unsigned char *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector unsigned char)(0),
+ vec_lvsl(__a, __b));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvlx(int __a, const vector unsigned char *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector unsigned char)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector bool char __ATTRS_o_ai vec_lvlx(int __a,
+ const vector bool char *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector bool char)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector short __ATTRS_o_ai vec_lvlx(int __a, const short *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector short)(0), vec_lvsl(__a, __b));
+}
+
+static vector short __ATTRS_o_ai vec_lvlx(int __a, const vector short *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector short)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector unsigned short __ATTRS_o_ai vec_lvlx(int __a,
+ const unsigned short *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector unsigned short)(0),
+ vec_lvsl(__a, __b));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvlx(int __a, const vector unsigned short *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector unsigned short)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector bool short __ATTRS_o_ai vec_lvlx(int __a,
+ const vector bool short *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector bool short)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector pixel __ATTRS_o_ai vec_lvlx(int __a, const vector pixel *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector pixel)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector int __ATTRS_o_ai vec_lvlx(int __a, const int *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector int)(0), vec_lvsl(__a, __b));
+}
+
+static vector int __ATTRS_o_ai vec_lvlx(int __a, const vector int *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector int)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector unsigned int __ATTRS_o_ai vec_lvlx(int __a,
+ const unsigned int *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector unsigned int)(0),
+ vec_lvsl(__a, __b));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvlx(int __a, const vector unsigned int *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector unsigned int)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector bool int __ATTRS_o_ai vec_lvlx(int __a,
+ const vector bool int *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector bool int)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector float __ATTRS_o_ai vec_lvlx(int __a, const float *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector float)(0), vec_lvsl(__a, __b));
+}
+
+static vector float __ATTRS_o_ai vec_lvlx(int __a, const vector float *__b) {
+ return vec_perm(vec_ld(__a, __b), (vector float)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+/* vec_lvlxl */
+
+static vector signed char __ATTRS_o_ai vec_lvlxl(int __a,
+ const signed char *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector signed char)(0),
+ vec_lvsl(__a, __b));
+}
+
+static vector signed char __ATTRS_o_ai
+vec_lvlxl(int __a, const vector signed char *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector signed char)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector unsigned char __ATTRS_o_ai vec_lvlxl(int __a,
+ const unsigned char *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector unsigned char)(0),
+ vec_lvsl(__a, __b));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvlxl(int __a, const vector unsigned char *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector unsigned char)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector bool char __ATTRS_o_ai vec_lvlxl(int __a,
+ const vector bool char *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector bool char)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector short __ATTRS_o_ai vec_lvlxl(int __a, const short *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector short)(0), vec_lvsl(__a, __b));
+}
+
+static vector short __ATTRS_o_ai vec_lvlxl(int __a, const vector short *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector short)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector unsigned short __ATTRS_o_ai vec_lvlxl(int __a,
+ const unsigned short *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector unsigned short)(0),
+ vec_lvsl(__a, __b));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvlxl(int __a, const vector unsigned short *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector unsigned short)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector bool short __ATTRS_o_ai vec_lvlxl(int __a,
+ const vector bool short *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector bool short)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector pixel __ATTRS_o_ai vec_lvlxl(int __a, const vector pixel *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector pixel)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector int __ATTRS_o_ai vec_lvlxl(int __a, const int *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector int)(0), vec_lvsl(__a, __b));
+}
+
+static vector int __ATTRS_o_ai vec_lvlxl(int __a, const vector int *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector int)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector unsigned int __ATTRS_o_ai vec_lvlxl(int __a,
+ const unsigned int *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector unsigned int)(0),
+ vec_lvsl(__a, __b));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvlxl(int __a, const vector unsigned int *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector unsigned int)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector bool int __ATTRS_o_ai vec_lvlxl(int __a,
+ const vector bool int *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector bool int)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector float __ATTRS_o_ai vec_lvlxl(int __a, const float *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector float)(0), vec_lvsl(__a, __b));
+}
+
+static vector float __ATTRS_o_ai vec_lvlxl(int __a, vector float *__b) {
+ return vec_perm(vec_ldl(__a, __b), (vector float)(0),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+/* vec_lvrx */
+
+static vector signed char __ATTRS_o_ai vec_lvrx(int __a,
+ const signed char *__b) {
+ return vec_perm((vector signed char)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, __b));
+}
+
+static vector signed char __ATTRS_o_ai vec_lvrx(int __a,
+ const vector signed char *__b) {
+ return vec_perm((vector signed char)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector unsigned char __ATTRS_o_ai vec_lvrx(int __a,
+ const unsigned char *__b) {
+ return vec_perm((vector unsigned char)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, __b));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvrx(int __a, const vector unsigned char *__b) {
+ return vec_perm((vector unsigned char)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector bool char __ATTRS_o_ai vec_lvrx(int __a,
+ const vector bool char *__b) {
+ return vec_perm((vector bool char)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector short __ATTRS_o_ai vec_lvrx(int __a, const short *__b) {
+ return vec_perm((vector short)(0), vec_ld(__a, __b), vec_lvsl(__a, __b));
+}
+
+static vector short __ATTRS_o_ai vec_lvrx(int __a, const vector short *__b) {
+ return vec_perm((vector short)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector unsigned short __ATTRS_o_ai vec_lvrx(int __a,
+ const unsigned short *__b) {
+ return vec_perm((vector unsigned short)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, __b));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvrx(int __a, const vector unsigned short *__b) {
+ return vec_perm((vector unsigned short)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector bool short __ATTRS_o_ai vec_lvrx(int __a,
+ const vector bool short *__b) {
+ return vec_perm((vector bool short)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector pixel __ATTRS_o_ai vec_lvrx(int __a, const vector pixel *__b) {
+ return vec_perm((vector pixel)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector int __ATTRS_o_ai vec_lvrx(int __a, const int *__b) {
+ return vec_perm((vector int)(0), vec_ld(__a, __b), vec_lvsl(__a, __b));
+}
+
+static vector int __ATTRS_o_ai vec_lvrx(int __a, const vector int *__b) {
+ return vec_perm((vector int)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector unsigned int __ATTRS_o_ai vec_lvrx(int __a,
+ const unsigned int *__b) {
+ return vec_perm((vector unsigned int)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, __b));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvrx(int __a, const vector unsigned int *__b) {
+ return vec_perm((vector unsigned int)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector bool int __ATTRS_o_ai vec_lvrx(int __a,
+ const vector bool int *__b) {
+ return vec_perm((vector bool int)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector float __ATTRS_o_ai vec_lvrx(int __a, const float *__b) {
+ return vec_perm((vector float)(0), vec_ld(__a, __b), vec_lvsl(__a, __b));
+}
+
+static vector float __ATTRS_o_ai vec_lvrx(int __a, const vector float *__b) {
+ return vec_perm((vector float)(0), vec_ld(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+/* vec_lvrxl */
+
+static vector signed char __ATTRS_o_ai vec_lvrxl(int __a,
+ const signed char *__b) {
+ return vec_perm((vector signed char)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, __b));
+}
+
+static vector signed char __ATTRS_o_ai
+vec_lvrxl(int __a, const vector signed char *__b) {
+ return vec_perm((vector signed char)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector unsigned char __ATTRS_o_ai vec_lvrxl(int __a,
+ const unsigned char *__b) {
+ return vec_perm((vector unsigned char)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, __b));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvrxl(int __a, const vector unsigned char *__b) {
+ return vec_perm((vector unsigned char)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector bool char __ATTRS_o_ai vec_lvrxl(int __a,
+ const vector bool char *__b) {
+ return vec_perm((vector bool char)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector short __ATTRS_o_ai vec_lvrxl(int __a, const short *__b) {
+ return vec_perm((vector short)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b));
+}
+
+static vector short __ATTRS_o_ai vec_lvrxl(int __a, const vector short *__b) {
+ return vec_perm((vector short)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector unsigned short __ATTRS_o_ai vec_lvrxl(int __a,
+ const unsigned short *__b) {
+ return vec_perm((vector unsigned short)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, __b));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvrxl(int __a, const vector unsigned short *__b) {
+ return vec_perm((vector unsigned short)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector bool short __ATTRS_o_ai vec_lvrxl(int __a,
+ const vector bool short *__b) {
+ return vec_perm((vector bool short)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector pixel __ATTRS_o_ai vec_lvrxl(int __a, const vector pixel *__b) {
+ return vec_perm((vector pixel)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector int __ATTRS_o_ai vec_lvrxl(int __a, const int *__b) {
+ return vec_perm((vector int)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b));
+}
+
+static vector int __ATTRS_o_ai vec_lvrxl(int __a, const vector int *__b) {
+ return vec_perm((vector int)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector unsigned int __ATTRS_o_ai vec_lvrxl(int __a,
+ const unsigned int *__b) {
+ return vec_perm((vector unsigned int)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, __b));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvrxl(int __a, const vector unsigned int *__b) {
+ return vec_perm((vector unsigned int)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector bool int __ATTRS_o_ai vec_lvrxl(int __a,
+ const vector bool int *__b) {
+ return vec_perm((vector bool int)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+static vector float __ATTRS_o_ai vec_lvrxl(int __a, const float *__b) {
+ return vec_perm((vector float)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b));
+}
+
+static vector float __ATTRS_o_ai vec_lvrxl(int __a, const vector float *__b) {
+ return vec_perm((vector float)(0), vec_ldl(__a, __b),
+ vec_lvsl(__a, (unsigned char *)__b));
+}
+
+/* vec_stvlx */
+
+static void __ATTRS_o_ai vec_stvlx(vector signed char __a, int __b,
+ signed char *__c) {
+ return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector signed char __a, int __b,
+ vector signed char *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector unsigned char __a, int __b,
+ unsigned char *__c) {
+ return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector unsigned char __a, int __b,
+ vector unsigned char *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector bool char __a, int __b,
+ vector bool char *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector short __a, int __b, short *__c) {
+ return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector short __a, int __b,
+ vector short *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector unsigned short __a, int __b,
+ unsigned short *__c) {
+ return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector unsigned short __a, int __b,
+ vector unsigned short *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector bool short __a, int __b,
+ vector bool short *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector pixel __a, int __b,
+ vector pixel *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector int __a, int __b, int *__c) {
+ return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector int __a, int __b, vector int *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector unsigned int __a, int __b,
+ vector unsigned int *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector bool int __a, int __b,
+ vector bool int *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlx(vector float __a, int __b,
+ vector float *__c) {
+ return vec_st(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+/* vec_stvlxl */
+
+static void __ATTRS_o_ai vec_stvlxl(vector signed char __a, int __b,
+ signed char *__c) {
+ return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector signed char __a, int __b,
+ vector signed char *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector unsigned char __a, int __b,
+ unsigned char *__c) {
+ return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector unsigned char __a, int __b,
+ vector unsigned char *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector bool char __a, int __b,
+ vector bool char *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector short __a, int __b, short *__c) {
+ return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector short __a, int __b,
+ vector short *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector unsigned short __a, int __b,
+ unsigned short *__c) {
+ return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector unsigned short __a, int __b,
+ vector unsigned short *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector bool short __a, int __b,
+ vector bool short *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector pixel __a, int __b,
+ vector pixel *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector int __a, int __b, int *__c) {
+ return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector int __a, int __b, vector int *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector unsigned int __a, int __b,
+ vector unsigned int *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector bool int __a, int __b,
+ vector bool int *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvlxl(vector float __a, int __b,
+ vector float *__c) {
+ return vec_stl(
+ vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+/* vec_stvrx */
+
+static void __ATTRS_o_ai vec_stvrx(vector signed char __a, int __b,
+ signed char *__c) {
+ return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector signed char __a, int __b,
+ vector signed char *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector unsigned char __a, int __b,
+ unsigned char *__c) {
+ return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector unsigned char __a, int __b,
+ vector unsigned char *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector bool char __a, int __b,
+ vector bool char *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector short __a, int __b, short *__c) {
+ return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector short __a, int __b,
+ vector short *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector unsigned short __a, int __b,
+ unsigned short *__c) {
+ return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector unsigned short __a, int __b,
+ vector unsigned short *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector bool short __a, int __b,
+ vector bool short *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector pixel __a, int __b,
+ vector pixel *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector int __a, int __b, int *__c) {
+ return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector int __a, int __b, vector int *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector unsigned int __a, int __b,
+ vector unsigned int *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector bool int __a, int __b,
+ vector bool int *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrx(vector float __a, int __b,
+ vector float *__c) {
+ return vec_st(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+/* vec_stvrxl */
+
+static void __ATTRS_o_ai vec_stvrxl(vector signed char __a, int __b,
+ signed char *__c) {
+ return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector signed char __a, int __b,
+ vector signed char *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector unsigned char __a, int __b,
+ unsigned char *__c) {
+ return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector unsigned char __a, int __b,
+ vector unsigned char *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector bool char __a, int __b,
+ vector bool char *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector short __a, int __b, short *__c) {
+ return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector short __a, int __b,
+ vector short *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector unsigned short __a, int __b,
+ unsigned short *__c) {
+ return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector unsigned short __a, int __b,
+ vector unsigned short *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector bool short __a, int __b,
+ vector bool short *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector pixel __a, int __b,
+ vector pixel *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector int __a, int __b, int *__c) {
+ return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector int __a, int __b, vector int *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector unsigned int __a, int __b,
+ unsigned int *__c) {
+ return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
+ __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector unsigned int __a, int __b,
+ vector unsigned int *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector bool int __a, int __b,
+ vector bool int *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+static void __ATTRS_o_ai vec_stvrxl(vector float __a, int __b,
+ vector float *__c) {
+ return vec_stl(
+ vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
+ __b, __c);
+}
+
+/* vec_promote */
+
+static vector signed char __ATTRS_o_ai vec_promote(signed char __a, int __b) {
+ vector signed char __res = (vector signed char)(0);
+ __res[__b] = __a;
+ return __res;
+}
+
+static vector unsigned char __ATTRS_o_ai vec_promote(unsigned char __a,
+ int __b) {
+ vector unsigned char __res = (vector unsigned char)(0);
+ __res[__b] = __a;
+ return __res;
+}
+
+static vector short __ATTRS_o_ai vec_promote(short __a, int __b) {
+ vector short __res = (vector short)(0);
+ __res[__b] = __a;
+ return __res;
+}
+
+static vector unsigned short __ATTRS_o_ai vec_promote(unsigned short __a,
+ int __b) {
+ vector unsigned short __res = (vector unsigned short)(0);
+ __res[__b] = __a;
+ return __res;
+}
+
+static vector int __ATTRS_o_ai vec_promote(int __a, int __b) {
+ vector int __res = (vector int)(0);
+ __res[__b] = __a;
+ return __res;
+}
+
+static vector unsigned int __ATTRS_o_ai vec_promote(unsigned int __a, int __b) {
+ vector unsigned int __res = (vector unsigned int)(0);
+ __res[__b] = __a;
+ return __res;
+}
+
+static vector float __ATTRS_o_ai vec_promote(float __a, int __b) {
+ vector float __res = (vector float)(0);
+ __res[__b] = __a;
+ return __res;
+}
+
+/* vec_splats */
+
+static vector signed char __ATTRS_o_ai vec_splats(signed char __a) {
+ return (vector signed char)(__a);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_splats(unsigned char __a) {
+ return (vector unsigned char)(__a);
+}
+
+static vector short __ATTRS_o_ai vec_splats(short __a) {
+ return (vector short)(__a);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_splats(unsigned short __a) {
+ return (vector unsigned short)(__a);
+}
+
+static vector int __ATTRS_o_ai vec_splats(int __a) { return (vector int)(__a); }
+
+static vector unsigned int __ATTRS_o_ai vec_splats(unsigned int __a) {
+ return (vector unsigned int)(__a);
+}
+
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai vec_splats(signed long long __a) {
+ return (vector signed long long)(__a);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_splats(unsigned long long __a) {
+ return (vector unsigned long long)(__a);
+}
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static vector signed __int128 __ATTRS_o_ai vec_splats(signed __int128 __a) {
+ return (vector signed __int128)(__a);
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_splats(unsigned __int128 __a) {
+ return (vector unsigned __int128)(__a);
+}
+
+#endif
+
+static vector double __ATTRS_o_ai vec_splats(double __a) {
+ return (vector double)(__a);
+}
+#endif
+
+static vector float __ATTRS_o_ai vec_splats(float __a) {
+ return (vector float)(__a);
+}
+
+/* ----------------------------- predicates --------------------------------- */
+
+/* vec_all_eq */
+
+static int __ATTRS_o_ai vec_all_eq(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector bool char __a, vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector short __a, vector short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector short __a, vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT, __a, (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector bool short __a, vector short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector pixel __a, vector pixel __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT, __a, (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector bool int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
+ (vector int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static int __ATTRS_o_ai vec_all_eq(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT, __a, (vector long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
+ (vector long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
+ (vector long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
+ vector long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
+ (vector long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
+ (vector long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
+ (vector long long)__b);
+}
+#endif
+
+static int __ATTRS_o_ai vec_all_eq(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpeqsp_p(__CR6_LT, __a, __b);
+#else
+ return __builtin_altivec_vcmpeqfp_p(__CR6_LT, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_all_eq(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_LT, __a, __b);
+}
+#endif
+
+/* vec_all_ge */
+
+static int __ATTRS_o_ai vec_all_ge(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, (vector signed char)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b,
+ (vector unsigned char)__a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __b, (vector unsigned char)__a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector bool char __a, vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b,
+ (vector unsigned char)__a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector short __a, vector bool short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, (vector short)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b,
+ __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector bool short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b,
+ (vector unsigned short)__a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __b,
+ (vector unsigned short)__a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b,
+ (vector unsigned short)__a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, (vector int)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b,
+ (vector unsigned int)__a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __b, (vector unsigned int)__a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector bool int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b,
+ (vector unsigned int)__a);
+}
+
+#ifdef __POWER8_VECTOR__
+static int __ATTRS_o_ai vec_all_ge(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __b, __a);
+}
+static int __ATTRS_o_ai vec_all_ge(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, (vector signed long long)__b,
+ __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b,
+ __a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b,
+ (vector unsigned long long)__a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __b,
+ (vector unsigned long long)__a);
+}
+
+static int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b,
+ (vector unsigned long long)__a);
+}
+#endif
+
+static int __ATTRS_o_ai vec_all_ge(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgesp_p(__CR6_LT, __a, __b);
+#else
+ return __builtin_altivec_vcmpgefp_p(__CR6_LT, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_all_ge(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgedp_p(__CR6_LT, __a, __b);
+}
+#endif
+
+/* vec_all_gt */
+
+static int __ATTRS_o_ai vec_all_gt(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __a, (vector signed char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, __a, (vector unsigned char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector bool char __a, vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector short __a, vector bool short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __a, (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __a,
+ (vector unsigned short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector bool short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a,
+ __b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __a, (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __a, (vector unsigned int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector bool int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static int __ATTRS_o_ai vec_all_gt(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __a, __b);
+}
+static int __ATTRS_o_ai vec_all_gt(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __a,
+ (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, __a,
+ (vector unsigned long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a,
+ (vector unsigned long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a,
+ __b);
+}
+
+static int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a,
+ (vector unsigned long long)__b);
+}
+#endif
+
+static int __ATTRS_o_ai vec_all_gt(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgtsp_p(__CR6_LT, __a, __b);
+#else
+ return __builtin_altivec_vcmpgtfp_p(__CR6_LT, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_all_gt(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgtdp_p(__CR6_LT, __a, __b);
+}
+#endif
+
+/* vec_all_in */
+
+static int __attribute__((__always_inline__))
+vec_all_in(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpbfp_p(__CR6_EQ, __a, __b);
+}
+
+/* vec_all_le */
+
+static int __ATTRS_o_ai vec_all_le(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __a, (vector signed char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __a, (vector unsigned char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector bool char __a, vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector short __a, vector bool short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __a, (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __a,
+ (vector unsigned short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector bool short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a,
+ __b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __a, (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __a, (vector unsigned int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector bool int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static int __ATTRS_o_ai vec_all_le(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __a,
+ (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __a,
+ (vector unsigned long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a,
+ (vector unsigned long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a,
+ __b);
+}
+
+static int __ATTRS_o_ai vec_all_le(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a,
+ (vector unsigned long long)__b);
+}
+#endif
+
+static int __ATTRS_o_ai vec_all_le(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgesp_p(__CR6_LT, __b, __a);
+#else
+ return __builtin_altivec_vcmpgefp_p(__CR6_LT, __b, __a);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_all_le(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgedp_p(__CR6_LT, __b, __a);
+}
+#endif
+
+/* vec_all_lt */
+
+static int __ATTRS_o_ai vec_all_lt(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT, (vector signed char)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b,
+ (vector unsigned char)__a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, __b, (vector unsigned char)__a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector bool char __a, vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b,
+ (vector unsigned char)__a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector short __a, vector bool short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT, (vector short)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b,
+ __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector bool short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b,
+ (vector unsigned short)__a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __b,
+ (vector unsigned short)__a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b,
+ (vector unsigned short)__a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT, (vector int)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b,
+ (vector unsigned int)__a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __b, (vector unsigned int)__a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector bool int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b,
+ (vector unsigned int)__a);
+}
+
+#ifdef __POWER8_VECTOR__
+static int __ATTRS_o_ai vec_all_lt(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_LT, (vector signed long long)__b,
+ __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b,
+ __a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b,
+ (vector unsigned long long)__a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, __b,
+ (vector unsigned long long)__a);
+}
+
+static int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b,
+ (vector unsigned long long)__a);
+}
+#endif
+
+static int __ATTRS_o_ai vec_all_lt(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgtsp_p(__CR6_LT, __b, __a);
+#else
+ return __builtin_altivec_vcmpgtfp_p(__CR6_LT, __b, __a);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_all_lt(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgtdp_p(__CR6_LT, __b, __a);
+}
+#endif
+
+/* vec_all_nan */
+
+static int __ATTRS_o_ai vec_all_nan(vector float __a) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ, __a, __a);
+#else
+ return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, __a, __a);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_all_nan(vector double __a) {
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __a);
+}
+#endif
+
+/* vec_all_ne */
+
+static int __ATTRS_o_ai vec_all_ne(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector bool char __a, vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector short __a, vector short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector short __a, vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ, __a, (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector bool short __a, vector short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector pixel __a, vector pixel __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ, __a, (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector bool int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
+ (vector int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static int __ATTRS_o_ai vec_all_ne(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector long long)__a,
+ (vector long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ, __a,
+ (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
+ (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
+ (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
+ (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
+ (vector signed long long)__b);
+}
+#endif
+
+static int __ATTRS_o_ai vec_all_ne(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __b);
+#else
+ return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_all_ne(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __b);
+}
+#endif
+
+/* vec_all_nge */
+
+static int __ATTRS_o_ai
+vec_all_nge(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgesp_p(__CR6_EQ, __a, __b);
+#else
+ return __builtin_altivec_vcmpgefp_p(__CR6_EQ, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai
+vec_all_nge(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgedp_p(__CR6_EQ, __a, __b);
+}
+#endif
+
+/* vec_all_ngt */
+
+static int __ATTRS_o_ai
+vec_all_ngt(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ, __a, __b);
+#else
+ return __builtin_altivec_vcmpgtfp_p(__CR6_EQ, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai
+vec_all_ngt(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ, __a, __b);
+}
+#endif
+
+/* vec_all_nle */
+
+static int __attribute__((__always_inline__))
+vec_all_nle(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpgefp_p(__CR6_EQ, __b, __a);
+}
+
+/* vec_all_nlt */
+
+static int __attribute__((__always_inline__))
+vec_all_nlt(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpgtfp_p(__CR6_EQ, __b, __a);
+}
+
+/* vec_all_numeric */
+
+static int __attribute__((__always_inline__))
+vec_all_numeric(vector float __a) {
+ return __builtin_altivec_vcmpeqfp_p(__CR6_LT, __a, __a);
+}
+
+/* vec_any_eq */
+
+static int __ATTRS_o_ai vec_any_eq(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector bool char __a, vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector short __a, vector short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector short __a, vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, __a, (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector bool short __a, vector short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector pixel __a, vector pixel __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, __a, (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector bool int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static int __ATTRS_o_ai vec_any_eq(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, (vector long long)__a,
+ (vector long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, __a,
+ (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(
+ __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpequd_p(
+ __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpequd_p(
+ __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_eq(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(
+ __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
+}
+#endif
+
+static int __ATTRS_o_ai vec_any_eq(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ_REV, __a, __b);
+#else
+ return __builtin_altivec_vcmpeqfp_p(__CR6_EQ_REV, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_any_eq(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ_REV, __a, __b);
+}
+#endif
+
+/* vec_any_ge */
+
+static int __ATTRS_o_ai vec_any_ge(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, (vector signed char)__b,
+ __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b,
+ __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b,
+ (vector unsigned char)__a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __b,
+ (vector unsigned char)__a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector bool char __a, vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b,
+ (vector unsigned char)__a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector short __a, vector bool short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, (vector short)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b,
+ __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector bool short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b,
+ (vector unsigned short)__a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __b,
+ (vector unsigned short)__a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b,
+ (vector unsigned short)__a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, (vector int)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b,
+ __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b,
+ (vector unsigned int)__a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __b,
+ (vector unsigned int)__a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector bool int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b,
+ (vector unsigned int)__a);
+}
+
+#ifdef __POWER8_VECTOR__
+static int __ATTRS_o_ai vec_any_ge(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV,
+ (vector signed long long)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
+ (vector unsigned long long)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
+ (vector unsigned long long)__b,
+ (vector unsigned long long)__a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __b,
+ (vector unsigned long long)__a);
+}
+
+static int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
+ (vector unsigned long long)__b,
+ (vector unsigned long long)__a);
+}
+#endif
+
+static int __ATTRS_o_ai vec_any_ge(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgesp_p(__CR6_EQ_REV, __a, __b);
+#else
+ return __builtin_altivec_vcmpgefp_p(__CR6_EQ_REV, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_any_ge(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgedp_p(__CR6_EQ_REV, __a, __b);
+}
+#endif
+
+/* vec_any_gt */
+
+static int __ATTRS_o_ai vec_any_gt(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __a,
+ (vector signed char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __a,
+ (vector unsigned char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a,
+ __b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector bool char __a, vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector short __a, vector bool short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __a, (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __a,
+ (vector unsigned short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector bool short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a,
+ __b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __a, (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __a,
+ (vector unsigned int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a,
+ __b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector bool int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static int __ATTRS_o_ai vec_any_gt(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __a,
+ (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __a,
+ (vector unsigned long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
+ (vector unsigned long long)__a,
+ (vector unsigned long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
+ (vector unsigned long long)__a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
+ (vector unsigned long long)__a,
+ (vector unsigned long long)__b);
+}
+#endif
+
+static int __ATTRS_o_ai vec_any_gt(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ_REV, __a, __b);
+#else
+ return __builtin_altivec_vcmpgtfp_p(__CR6_EQ_REV, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_any_gt(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ_REV, __a, __b);
+}
+#endif
+
+/* vec_any_le */
+
+static int __ATTRS_o_ai vec_any_le(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __a,
+ (vector signed char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __a,
+ (vector unsigned char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a,
+ __b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector bool char __a, vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector short __a, vector bool short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __a, (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __a,
+ (vector unsigned short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector bool short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a,
+ __b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __a, (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __a,
+ (vector unsigned int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a,
+ __b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector bool int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static int __ATTRS_o_ai vec_any_le(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __a,
+ (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __a,
+ (vector unsigned long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
+ (vector unsigned long long)__a,
+ (vector unsigned long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
+ (vector unsigned long long)__a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_le(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
+ (vector unsigned long long)__a,
+ (vector unsigned long long)__b);
+}
+#endif
+
+static int __ATTRS_o_ai vec_any_le(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgesp_p(__CR6_EQ_REV, __b, __a);
+#else
+ return __builtin_altivec_vcmpgefp_p(__CR6_EQ_REV, __b, __a);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_any_le(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgedp_p(__CR6_EQ_REV, __b, __a);
+}
+#endif
+
+/* vec_any_lt */
+
+static int __ATTRS_o_ai vec_any_lt(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, (vector signed char)__b,
+ __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b,
+ __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b,
+ (vector unsigned char)__a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __b,
+ (vector unsigned char)__a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector bool char __a, vector bool char __b) {
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b,
+ (vector unsigned char)__a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector short __a, vector bool short __b) {
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, (vector short)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b,
+ __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector bool short __a, vector short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b,
+ (vector unsigned short)__a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __b,
+ (vector unsigned short)__a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b,
+ (vector unsigned short)__a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, (vector int)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b,
+ __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b,
+ (vector unsigned int)__a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __b,
+ (vector unsigned int)__a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector bool int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b,
+ (vector unsigned int)__a);
+}
+
+#ifdef __POWER8_VECTOR__
+static int __ATTRS_o_ai vec_any_lt(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV,
+ (vector signed long long)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
+ (vector unsigned long long)__b, __a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
+ (vector unsigned long long)__b,
+ (vector unsigned long long)__a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __b,
+ (vector unsigned long long)__a);
+}
+
+static int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
+ (vector unsigned long long)__b,
+ (vector unsigned long long)__a);
+}
+#endif
+
+static int __ATTRS_o_ai vec_any_lt(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ_REV, __b, __a);
+#else
+ return __builtin_altivec_vcmpgtfp_p(__CR6_EQ_REV, __b, __a);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_any_lt(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ_REV, __b, __a);
+}
+#endif
+
+/* vec_any_nan */
+
+static int __attribute__((__always_inline__)) vec_any_nan(vector float __a) {
+ return __builtin_altivec_vcmpeqfp_p(__CR6_LT_REV, __a, __a);
+}
+
+/* vec_any_ne */
+
+static int __ATTRS_o_ai vec_any_ne(vector signed char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector signed char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector unsigned char __a,
+ vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector bool char __a,
+ vector signed char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector bool char __a,
+ vector unsigned char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector bool char __a, vector bool char __b) {
+ return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
+ (vector char)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector short __a, vector short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector short __a, vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, __a, (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector unsigned short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector unsigned short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector bool short __a, vector short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector bool short __a,
+ vector unsigned short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector bool short __a,
+ vector bool short __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector pixel __a, vector pixel __b) {
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
+ (vector short)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector int __a, vector int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, __a, (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector unsigned int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector unsigned int __a,
+ vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector bool int __a, vector int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector bool int __a,
+ vector unsigned int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector bool int __a, vector bool int __b) {
+ return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
+ (vector int)__b);
+}
+
+#ifdef __POWER8_VECTOR__
+static int __ATTRS_o_ai vec_any_ne(vector signed long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, __a, __b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, (vector long long)__a,
+ (vector long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector signed long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, __a,
+ (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector unsigned long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(
+ __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
+ vector signed long long __b) {
+ return __builtin_altivec_vcmpequd_p(
+ __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_vcmpequd_p(
+ __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
+}
+
+static int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
+ vector bool long long __b) {
+ return __builtin_altivec_vcmpequd_p(
+ __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
+}
+#endif
+
+static int __ATTRS_o_ai vec_any_ne(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpeqsp_p(__CR6_LT_REV, __a, __b);
+#else
+ return __builtin_altivec_vcmpeqfp_p(__CR6_LT_REV, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_any_ne(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_LT_REV, __a, __b);
+}
+#endif
+
+/* vec_any_nge */
+
+static int __attribute__((__always_inline__))
+vec_any_nge(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpgefp_p(__CR6_LT_REV, __a, __b);
+}
+
+/* vec_any_ngt */
+
+static int __attribute__((__always_inline__))
+vec_any_ngt(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpgtfp_p(__CR6_LT_REV, __a, __b);
+}
+
+/* vec_any_nle */
+
+static int __attribute__((__always_inline__))
+vec_any_nle(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpgefp_p(__CR6_LT_REV, __b, __a);
+}
+
+/* vec_any_nlt */
+
+static int __attribute__((__always_inline__))
+vec_any_nlt(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpgtfp_p(__CR6_LT_REV, __b, __a);
+}
+
+/* vec_any_numeric */
+
+static int __attribute__((__always_inline__))
+vec_any_numeric(vector float __a) {
+ return __builtin_altivec_vcmpeqfp_p(__CR6_EQ_REV, __a, __a);
+}
+
+/* vec_any_out */
+
+static int __attribute__((__always_inline__))
+vec_any_out(vector float __a, vector float __b) {
+ return __builtin_altivec_vcmpbfp_p(__CR6_EQ_REV, __a, __b);
+}
+
+/* Power 8 Crypto functions
+Note: We diverge from the current GCC implementation with regard
+to cryptography and related functions as follows:
+- Only the SHA and AES instructions and builtins are disabled by -mno-crypto
+- The remaining ones are only available on Power8 and up so
+ require -mpower8-vector
+The justification for this is that export requirements require that
+Category:Vector.Crypto is optional (i.e. compliant hardware may not provide
+support). As a result, we need to be able to turn off support for those.
+The remaining ones (currently controlled by -mcrypto for GCC) still
+need to be provided on compliant hardware even if Vector.Crypto is not
+provided.
+*/
+#ifdef __CRYPTO__
+#define vec_sbox_be __builtin_altivec_crypto_vsbox
+#define vec_cipher_be __builtin_altivec_crypto_vcipher
+#define vec_cipherlast_be __builtin_altivec_crypto_vcipherlast
+#define vec_ncipher_be __builtin_altivec_crypto_vncipher
+#define vec_ncipherlast_be __builtin_altivec_crypto_vncipherlast
+
+static vector unsigned long long __attribute__((__always_inline__))
+__builtin_crypto_vsbox(vector unsigned long long __a) {
+ return __builtin_altivec_crypto_vsbox(__a);
+}
+
+static vector unsigned long long __attribute__((__always_inline__))
+__builtin_crypto_vcipher(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_crypto_vcipher(__a, __b);
+}
+
+static vector unsigned long long __attribute__((__always_inline__))
+__builtin_crypto_vcipherlast(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_crypto_vcipherlast(__a, __b);
+}
+
+static vector unsigned long long __attribute__((__always_inline__))
+__builtin_crypto_vncipher(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_crypto_vncipher(__a, __b);
+}
+
+static vector unsigned long long __attribute__((__always_inline__))
+__builtin_crypto_vncipherlast(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_crypto_vncipherlast(__a, __b);
+}
+
+#define __builtin_crypto_vshasigmad __builtin_altivec_crypto_vshasigmad
+#define __builtin_crypto_vshasigmaw __builtin_altivec_crypto_vshasigmaw
+
+#define vec_shasigma_be(X, Y, Z) \
+ _Generic((X), vector unsigned int: __builtin_crypto_vshasigmaw, \
+ vector unsigned long long: __builtin_crypto_vshasigmad) \
+((X), (Y), (Z))
+#endif
+
+#ifdef __POWER8_VECTOR__
+static vector unsigned char __ATTRS_o_ai
+__builtin_crypto_vpermxor(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
+}
+
+static vector unsigned short __ATTRS_o_ai
+__builtin_crypto_vpermxor(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return (vector unsigned short)__builtin_altivec_crypto_vpermxor(
+ (vector unsigned char)__a, (vector unsigned char)__b,
+ (vector unsigned char)__c);
+}
+
+static vector unsigned int __ATTRS_o_ai __builtin_crypto_vpermxor(
+ vector unsigned int __a, vector unsigned int __b, vector unsigned int __c) {
+ return (vector unsigned int)__builtin_altivec_crypto_vpermxor(
+ (vector unsigned char)__a, (vector unsigned char)__b,
+ (vector unsigned char)__c);
+}
+
+static vector unsigned long long __ATTRS_o_ai __builtin_crypto_vpermxor(
+ vector unsigned long long __a, vector unsigned long long __b,
+ vector unsigned long long __c) {
+ return (vector unsigned long long)__builtin_altivec_crypto_vpermxor(
+ (vector unsigned char)__a, (vector unsigned char)__b,
+ (vector unsigned char)__c);
+}
+
+static vector unsigned char __ATTRS_o_ai
+__builtin_crypto_vpmsumb(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_altivec_crypto_vpmsumb(__a, __b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+__builtin_crypto_vpmsumb(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_altivec_crypto_vpmsumh(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+__builtin_crypto_vpmsumb(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_altivec_crypto_vpmsumw(__a, __b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+__builtin_crypto_vpmsumb(vector unsigned long long __a,
+ vector unsigned long long __b) {
+ return __builtin_altivec_crypto_vpmsumd(__a, __b);
+}
+
+static vector signed char __ATTRS_o_ai vec_vgbbd (vector signed char __a)
+{
+ return __builtin_altivec_vgbbd((vector unsigned char) __a);
+}
+
+#define vec_pmsum_be __builtin_crypto_vpmsumb
+#define vec_gb __builtin_altivec_vgbbd
+
+static vector unsigned char __ATTRS_o_ai vec_vgbbd (vector unsigned char __a)
+{
+ return __builtin_altivec_vgbbd(__a);
+}
+
+static vector long long __ATTRS_o_ai
+vec_vbpermq (vector signed char __a, vector signed char __b)
+{
+ return __builtin_altivec_vbpermq((vector unsigned char) __a,
+ (vector unsigned char) __b);
+}
+
+static vector long long __ATTRS_o_ai
+vec_vbpermq (vector unsigned char __a, vector unsigned char __b)
+{
+ return __builtin_altivec_vbpermq(__a, __b);
+}
+
+#ifdef __powerpc64__
+static vector unsigned long long __attribute__((__always_inline__))
+vec_bperm (vector unsigned __int128 __a, vector unsigned char __b) {
+ return __builtin_altivec_vbpermq((vector unsigned char) __a,
+ (vector unsigned char) __b);
+}
+#endif
+#endif
+
+#undef __ATTRS_o_ai
+
+#endif /* __ALTIVEC_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/ammintrin.h b/contrib/llvm/tools/clang/lib/Headers/ammintrin.h
new file mode 100644
index 0000000..4880fd7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/ammintrin.h
@@ -0,0 +1,209 @@
+/*===---- ammintrin.h - SSE4a intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __AMMINTRIN_H
+#define __AMMINTRIN_H
+
+#include <pmmintrin.h>
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4a")))
+
+/// \brief Extracts the specified bits from the lower 64 bits of the 128-bit
+/// integer vector operand at the index idx and of the length len.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128i _mm_extracti_si64(__m128i x, const int len, const int idx);
+/// \endcode
+///
+/// \code
+/// This intrinsic corresponds to the \c EXTRQ instruction.
+/// \endcode
+///
+/// \param x
+/// The value from which bits are extracted.
+/// \param len
+/// Bits [5:0] specify the length; the other bits are ignored. If bits [5:0]
+/// are zero, the length is interpreted as 64.
+/// \param idx
+/// Bits [5:0] specify the index of the least significant bit; the other
+/// bits are ignored. If the sum of the index and length is greater than
+/// 64, the result is undefined. If the length and index are both zero,
+/// bits [63:0] of parameter x are extracted. If the length is zero
+/// but the index is non-zero, the result is undefined.
+/// \returns A 128-bit integer vector whose lower 64 bits contain the bits
+/// extracted from the source operand.
+#define _mm_extracti_si64(x, len, idx) \
+ ((__m128i)__builtin_ia32_extrqi((__v2di)(__m128i)(x), \
+ (char)(len), (char)(idx)))
+
+/// \brief Extracts the specified bits from the lower 64 bits of the 128-bit
+/// integer vector operand at the index and of the length specified by __y.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// This intrinsic corresponds to the \c EXTRQ instruction.
+/// \endcode
+///
+/// \param __x
+/// The value from which bits are extracted.
+/// \param __y
+/// Specifies the index of the least significant bit at [13:8]
+/// and the length at [5:0]; all other bits are ignored.
+/// If bits [5:0] are zero, the length is interpreted as 64.
+/// If the sum of the index and length is greater than 64, the result is
+/// undefined. If the length and index are both zero, bits [63:0] of
+/// parameter __x are extracted. If the length is zero but the index is
+/// non-zero, the result is undefined.
+/// \returns A 128-bit vector whose lower 64 bits contain the bits extracted
+/// from the source operand.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_extract_si64(__m128i __x, __m128i __y)
+{
+ return (__m128i)__builtin_ia32_extrq((__v2di)__x, (__v16qi)__y);
+}
+
+/// \brief Inserts bits of a specified length from the source integer vector
+/// y into the lower 64 bits of the destination integer vector x at the
+/// index idx and of the length len.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128i _mm_inserti_si64(__m128i x, __m128i y, const int len,
+/// const int idx);
+/// \endcode
+///
+/// \code
+/// This intrinsic corresponds to the \c INSERTQ instruction.
+/// \endcode
+///
+/// \param x
+/// The destination operand where bits will be inserted. The inserted bits
+/// are defined by the length len and by the index idx specifying the least
+/// significant bit.
+/// \param y
+/// The source operand containing the bits to be extracted. The extracted
+/// bits are the least significant bits of operand y of length len.
+/// \param len
+/// Bits [5:0] specify the length; the other bits are ignored. If bits [5:0]
+/// are zero, the length is interpreted as 64.
+/// \param idx
+/// Bits [5:0] specify the index of the least significant bit; the other
+/// bits are ignored. If the sum of the index and length is greater than
+/// 64, the result is undefined. If the length and index are both zero,
+/// bits [63:0] of parameter y are inserted into parameter x. If the
+/// length is zero but the index is non-zero, the result is undefined.
+/// \returns A 128-bit integer vector containing the original lower 64-bits
+/// of destination operand x with the specified bitfields replaced by the
+/// lower bits of source operand y. The upper 64 bits of the return value
+/// are undefined.
+
+#define _mm_inserti_si64(x, y, len, idx) \
+ ((__m128i)__builtin_ia32_insertqi((__v2di)(__m128i)(x), \
+ (__v2di)(__m128i)(y), \
+ (char)(len), (char)(idx)))
+
+/// \brief Inserts bits of a specified length from the source integer vector
+/// __y into the lower 64 bits of the destination integer vector __x at
+/// the index and of the length specified by __y.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// This intrinsic corresponds to the \c INSERTQ instruction.
+/// \endcode
+///
+/// \param __x
+/// The destination operand where bits will be inserted. The inserted bits
+/// are defined by the length and by the index of the least significant bit
+/// specified by operand __y.
+/// \param __y
+/// The source operand containing the bits to be extracted. The extracted
+/// bits are the least significant bits of operand __y with length specified
+/// by bits [69:64]. These are inserted into the destination at the index
+/// specified by bits [77:72]; all other bits are ignored.
+/// If bits [69:64] are zero, the length is interpreted as 64.
+/// If the sum of the index and length is greater than 64, the result is
+/// undefined. If the length and index are both zero, bits [63:0] of
+/// parameter __y are inserted into parameter __x. If the length
+/// is zero but the index is non-zero, the result is undefined.
+/// \returns A 128-bit integer vector containing the original lower 64-bits
+/// of destination operand __x with the specified bitfields replaced by the
+/// lower bits of source operand __y. The upper 64 bits of the return value
+/// are undefined.
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_insert_si64(__m128i __x, __m128i __y)
+{
+ return (__m128i)__builtin_ia32_insertq((__v2di)__x, (__v2di)__y);
+}
+
+/// \brief Stores a 64-bit double-precision value in a 64-bit memory location.
+/// To minimize caching, the data is flagged as non-temporal (unlikely to be
+/// used again soon).
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// This intrinsic corresponds to the \c MOVNTSD instruction.
+/// \endcode
+///
+/// \param __p
+/// The 64-bit memory location used to store the register value.
+/// \param __a
+/// The 64-bit double-precision floating-point register value to
+/// be stored.
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_stream_sd(double *__p, __m128d __a)
+{
+ __builtin_ia32_movntsd(__p, (__v2df)__a);
+}
+
+/// \brief Stores a 32-bit single-precision floating-point value in a 32-bit
+/// memory location. To minimize caching, the data is flagged as
+/// non-temporal (unlikely to be used again soon).
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// This intrinsic corresponds to the \c MOVNTSS instruction.
+/// \endcode
+///
+/// \param __p
+/// The 32-bit memory location used to store the register value.
+/// \param __a
+/// The 32-bit single-precision floating-point register value to
+/// be stored.
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_stream_ss(float *__p, __m128 __a)
+{
+ __builtin_ia32_movntss(__p, (__v4sf)__a);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __AMMINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/arm_acle.h b/contrib/llvm/tools/clang/lib/Headers/arm_acle.h
new file mode 100644
index 0000000..4be1d09
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/arm_acle.h
@@ -0,0 +1,308 @@
+/*===---- arm_acle.h - ARM Non-Neon intrinsics -----------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __ARM_ACLE_H
+#define __ARM_ACLE_H
+
+#ifndef __ARM_ACLE
+#error "ACLE intrinsics support not enabled."
+#endif
+
+#include <stdint.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* 8 SYNCHRONIZATION, BARRIER AND HINT INTRINSICS */
+/* 8.3 Memory barriers */
+#if !defined(_MSC_VER)
+#define __dmb(i) __builtin_arm_dmb(i)
+#define __dsb(i) __builtin_arm_dsb(i)
+#define __isb(i) __builtin_arm_isb(i)
+#endif
+
+/* 8.4 Hints */
+
+#if !defined(_MSC_VER)
+static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfi(void) {
+ __builtin_arm_wfi();
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfe(void) {
+ __builtin_arm_wfe();
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sev(void) {
+ __builtin_arm_sev();
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sevl(void) {
+ __builtin_arm_sevl();
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__)) __yield(void) {
+ __builtin_arm_yield();
+}
+#endif
+
+#if __ARM_32BIT_STATE
+#define __dbg(t) __builtin_arm_dbg(t)
+#endif
+
+/* 8.5 Swap */
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+ __swp(uint32_t x, volatile uint32_t *p) {
+ uint32_t v;
+ do v = __builtin_arm_ldrex(p); while (__builtin_arm_strex(x, p));
+ return v;
+}
+
+/* 8.6 Memory prefetch intrinsics */
+/* 8.6.1 Data prefetch */
+#define __pld(addr) __pldx(0, 0, 0, addr)
+
+#if __ARM_32BIT_STATE
+#define __pldx(access_kind, cache_level, retention_policy, addr) \
+ __builtin_arm_prefetch(addr, access_kind, 1)
+#else
+#define __pldx(access_kind, cache_level, retention_policy, addr) \
+ __builtin_arm_prefetch(addr, access_kind, cache_level, retention_policy, 1)
+#endif
+
+/* 8.6.2 Instruction prefetch */
+#define __pli(addr) __plix(0, 0, addr)
+
+#if __ARM_32BIT_STATE
+#define __plix(cache_level, retention_policy, addr) \
+ __builtin_arm_prefetch(addr, 0, 0)
+#else
+#define __plix(cache_level, retention_policy, addr) \
+ __builtin_arm_prefetch(addr, 0, cache_level, retention_policy, 0)
+#endif
+
+/* 8.7 NOP */
+static __inline__ void __attribute__((__always_inline__, __nodebug__)) __nop(void) {
+ __builtin_arm_nop();
+}
+
+/* 9 DATA-PROCESSING INTRINSICS */
+/* 9.2 Miscellaneous data-processing intrinsics */
+/* ROR */
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+ __ror(uint32_t x, uint32_t y) {
+ y %= 32;
+ if (y == 0) return x;
+ return (x >> y) | (x << (32 - y));
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+ __rorll(uint64_t x, uint32_t y) {
+ y %= 64;
+ if (y == 0) return x;
+ return (x >> y) | (x << (64 - y));
+}
+
+static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
+ __rorl(unsigned long x, uint32_t y) {
+#if __SIZEOF_LONG__ == 4
+ return __ror(x, y);
+#else
+ return __rorll(x, y);
+#endif
+}
+
+
+/* CLZ */
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+ __clz(uint32_t t) {
+ return __builtin_clz(t);
+}
+
+static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
+ __clzl(unsigned long t) {
+ return __builtin_clzl(t);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+ __clzll(uint64_t t) {
+ return __builtin_clzll(t);
+}
+
+/* REV */
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+ __rev(uint32_t t) {
+ return __builtin_bswap32(t);
+}
+
+static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
+ __revl(unsigned long t) {
+#if __SIZEOF_LONG__ == 4
+ return __builtin_bswap32(t);
+#else
+ return __builtin_bswap64(t);
+#endif
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+ __revll(uint64_t t) {
+ return __builtin_bswap64(t);
+}
+
+/* REV16 */
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+ __rev16(uint32_t t) {
+ return __ror(__rev(t), 16);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+ __rev16ll(uint64_t t) {
+ return (((uint64_t)__rev16(t >> 32)) << 32) | __rev16(t);
+}
+
+static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
+ __rev16l(unsigned long t) {
+#if __SIZEOF_LONG__ == 4
+ return __rev16(t);
+#else
+ return __rev16ll(t);
+#endif
+}
+
+/* REVSH */
+static __inline__ int16_t __attribute__((__always_inline__, __nodebug__))
+ __revsh(int16_t t) {
+ return __builtin_bswap16(t);
+}
+
+/* RBIT */
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+ __rbit(uint32_t t) {
+ return __builtin_arm_rbit(t);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+ __rbitll(uint64_t t) {
+#if __ARM_32BIT_STATE
+ return (((uint64_t) __builtin_arm_rbit(t)) << 32) |
+ __builtin_arm_rbit(t >> 32);
+#else
+ return __builtin_arm_rbit64(t);
+#endif
+}
+
+static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
+ __rbitl(unsigned long t) {
+#if __SIZEOF_LONG__ == 4
+ return __rbit(t);
+#else
+ return __rbitll(t);
+#endif
+}
+
+/*
+ * 9.4 Saturating intrinsics
+ *
+ * FIXME: Change guard to their corrosponding __ARM_FEATURE flag when Q flag
+ * intrinsics are implemented and the flag is enabled.
+ */
+/* 9.4.1 Width-specified saturation intrinsics */
+#if __ARM_32BIT_STATE
+#define __ssat(x, y) __builtin_arm_ssat(x, y)
+#define __usat(x, y) __builtin_arm_usat(x, y)
+#endif
+
+/* 9.4.2 Saturating addition and subtraction intrinsics */
+#if __ARM_32BIT_STATE
+static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
+ __qadd(int32_t t, int32_t v) {
+ return __builtin_arm_qadd(t, v);
+}
+
+static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
+ __qsub(int32_t t, int32_t v) {
+ return __builtin_arm_qsub(t, v);
+}
+
+static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
+__qdbl(int32_t t) {
+ return __builtin_arm_qadd(t, t);
+}
+#endif
+
+/* 9.7 CRC32 intrinsics */
+#if __ARM_FEATURE_CRC32
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+ __crc32b(uint32_t a, uint8_t b) {
+ return __builtin_arm_crc32b(a, b);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+ __crc32h(uint32_t a, uint16_t b) {
+ return __builtin_arm_crc32h(a, b);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+ __crc32w(uint32_t a, uint32_t b) {
+ return __builtin_arm_crc32w(a, b);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+ __crc32d(uint32_t a, uint64_t b) {
+ return __builtin_arm_crc32d(a, b);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+ __crc32cb(uint32_t a, uint8_t b) {
+ return __builtin_arm_crc32cb(a, b);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+ __crc32ch(uint32_t a, uint16_t b) {
+ return __builtin_arm_crc32ch(a, b);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+ __crc32cw(uint32_t a, uint32_t b) {
+ return __builtin_arm_crc32cw(a, b);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+ __crc32cd(uint32_t a, uint64_t b) {
+ return __builtin_arm_crc32cd(a, b);
+}
+#endif
+
+/* 10.1 Special register intrinsics */
+#define __arm_rsr(sysreg) __builtin_arm_rsr(sysreg)
+#define __arm_rsr64(sysreg) __builtin_arm_rsr64(sysreg)
+#define __arm_rsrp(sysreg) __builtin_arm_rsrp(sysreg)
+#define __arm_wsr(sysreg, v) __builtin_arm_wsr(sysreg, v)
+#define __arm_wsr64(sysreg, v) __builtin_arm_wsr64(sysreg, v)
+#define __arm_wsrp(sysreg, v) __builtin_arm_wsrp(sysreg, v)
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* __ARM_ACLE_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/avx2intrin.h b/contrib/llvm/tools/clang/lib/Headers/avx2intrin.h
new file mode 100644
index 0000000..f786572
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/avx2intrin.h
@@ -0,0 +1,1215 @@
+/*===---- avx2intrin.h - AVX2 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <avx2intrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX2INTRIN_H
+#define __AVX2INTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx2")))
+
+/* SSE4 Multiple Packed Sums of Absolute Difference. */
+#define _mm256_mpsadbw_epu8(X, Y, M) __builtin_ia32_mpsadbw256((X), (Y), (M))
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_abs_epi8(__m256i __a)
+{
+ return (__m256i)__builtin_ia32_pabsb256((__v32qi)__a);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_abs_epi16(__m256i __a)
+{
+ return (__m256i)__builtin_ia32_pabsw256((__v16hi)__a);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_abs_epi32(__m256i __a)
+{
+ return (__m256i)__builtin_ia32_pabsd256((__v8si)__a);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_packs_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_packsswb256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_packs_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_packssdw256((__v8si)__a, (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_packus_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_packuswb256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_packus_epi32(__m256i __V1, __m256i __V2)
+{
+ return (__m256i) __builtin_ia32_packusdw256((__v8si)__V1, (__v8si)__V2);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_add_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v32qi)__a + (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_add_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v16hi)__a + (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_add_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v8si)__a + (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_add_epi64(__m256i __a, __m256i __b)
+{
+ return __a + __b;
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_adds_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_paddsb256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_adds_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_paddsw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_adds_epu8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_paddusb256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_adds_epu16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_paddusw256((__v16hi)__a, (__v16hi)__b);
+}
+
+#define _mm256_alignr_epi8(a, b, n) __extension__ ({ \
+ (__m256i)__builtin_ia32_palignr256((__v32qi)(__m256i)(a), \
+ (__v32qi)(__m256i)(b), (n)); })
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_and_si256(__m256i __a, __m256i __b)
+{
+ return __a & __b;
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_andnot_si256(__m256i __a, __m256i __b)
+{
+ return ~__a & __b;
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_avg_epu8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pavgb256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_avg_epu16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pavgw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_blendv_epi8(__m256i __V1, __m256i __V2, __m256i __M)
+{
+ return (__m256i)__builtin_ia32_pblendvb256((__v32qi)__V1, (__v32qi)__V2,
+ (__v32qi)__M);
+}
+
+#define _mm256_blend_epi16(V1, V2, M) __extension__ ({ \
+ (__m256i)__builtin_shufflevector((__v16hi)(__m256i)(V1), \
+ (__v16hi)(__m256i)(V2), \
+ (((M) & 0x01) ? 16 : 0), \
+ (((M) & 0x02) ? 17 : 1), \
+ (((M) & 0x04) ? 18 : 2), \
+ (((M) & 0x08) ? 19 : 3), \
+ (((M) & 0x10) ? 20 : 4), \
+ (((M) & 0x20) ? 21 : 5), \
+ (((M) & 0x40) ? 22 : 6), \
+ (((M) & 0x80) ? 23 : 7), \
+ (((M) & 0x01) ? 24 : 8), \
+ (((M) & 0x02) ? 25 : 9), \
+ (((M) & 0x04) ? 26 : 10), \
+ (((M) & 0x08) ? 27 : 11), \
+ (((M) & 0x10) ? 28 : 12), \
+ (((M) & 0x20) ? 29 : 13), \
+ (((M) & 0x40) ? 30 : 14), \
+ (((M) & 0x80) ? 31 : 15)); })
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v32qi)__a == (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v16hi)__a == (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v8si)__a == (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epi64(__m256i __a, __m256i __b)
+{
+ return (__m256i)(__a == __b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epi8(__m256i __a, __m256i __b)
+{
+ /* This function always performs a signed comparison, but __v32qi is a char
+ which may be signed or unsigned, so use __v32qs. */
+ return (__m256i)((__v32qs)__a > (__v32qs)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v16hi)__a > (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v8si)__a > (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epi64(__m256i __a, __m256i __b)
+{
+ return (__m256i)(__a > __b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_hadd_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_phaddw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_hadd_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_phaddd256((__v8si)__a, (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_hadds_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_phaddsw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_hsub_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_phsubw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_hsub_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_phsubd256((__v8si)__a, (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_hsubs_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_phsubsw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maddubs_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmaddubsw256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_madd_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmaddwd256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_max_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmaxsb256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_max_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmaxsw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_max_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmaxsd256((__v8si)__a, (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_max_epu8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmaxub256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_max_epu16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmaxuw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_max_epu32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmaxud256((__v8si)__a, (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_min_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pminsb256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_min_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pminsw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_min_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pminsd256((__v8si)__a, (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_min_epu8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pminub256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_min_epu16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pminuw256 ((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_min_epu32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pminud256((__v8si)__a, (__v8si)__b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm256_movemask_epi8(__m256i __a)
+{
+ return __builtin_ia32_pmovmskb256((__v32qi)__a);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepi8_epi16(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxbw256((__v16qi)__V);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepi8_epi32(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxbd256((__v16qi)__V);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepi8_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxbq256((__v16qi)__V);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepi16_epi32(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxwd256((__v8hi)__V);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepi16_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxwq256((__v8hi)__V);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepi32_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxdq256((__v4si)__V);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepu8_epi16(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxbw256((__v16qi)__V);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepu8_epi32(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxbd256((__v16qi)__V);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepu8_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxbq256((__v16qi)__V);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepu16_epi32(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxwd256((__v8hi)__V);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepu16_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxwq256((__v8hi)__V);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtepu32_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxdq256((__v4si)__V);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mul_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmuldq256((__v8si)__a, (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mulhrs_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmulhrsw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mulhi_epu16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmulhuw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mulhi_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmulhw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mullo_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v16hi)__a * (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mullo_epi32 (__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v8si)__a * (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mul_epu32(__m256i __a, __m256i __b)
+{
+ return __builtin_ia32_pmuludq256((__v8si)__a, (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_or_si256(__m256i __a, __m256i __b)
+{
+ return __a | __b;
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sad_epu8(__m256i __a, __m256i __b)
+{
+ return __builtin_ia32_psadbw256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_shuffle_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pshufb256((__v32qi)__a, (__v32qi)__b);
+}
+
+#define _mm256_shuffle_epi32(a, imm) __extension__ ({ \
+ (__m256i)__builtin_shufflevector((__v8si)(__m256i)(a), \
+ (__v8si)_mm256_setzero_si256(), \
+ (imm) & 0x3, ((imm) & 0xc) >> 2, \
+ ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
+ 4 + (((imm) & 0x03) >> 0), \
+ 4 + (((imm) & 0x0c) >> 2), \
+ 4 + (((imm) & 0x30) >> 4), \
+ 4 + (((imm) & 0xc0) >> 6)); })
+
+#define _mm256_shufflehi_epi16(a, imm) __extension__ ({ \
+ (__m256i)__builtin_shufflevector((__v16hi)(__m256i)(a), \
+ (__v16hi)_mm256_setzero_si256(), \
+ 0, 1, 2, 3, \
+ 4 + (((imm) & 0x03) >> 0), \
+ 4 + (((imm) & 0x0c) >> 2), \
+ 4 + (((imm) & 0x30) >> 4), \
+ 4 + (((imm) & 0xc0) >> 6), \
+ 8, 9, 10, 11, \
+ 12 + (((imm) & 0x03) >> 0), \
+ 12 + (((imm) & 0x0c) >> 2), \
+ 12 + (((imm) & 0x30) >> 4), \
+ 12 + (((imm) & 0xc0) >> 6)); })
+
+#define _mm256_shufflelo_epi16(a, imm) __extension__ ({ \
+ (__m256i)__builtin_shufflevector((__v16hi)(__m256i)(a), \
+ (__v16hi)_mm256_setzero_si256(), \
+ (imm) & 0x3,((imm) & 0xc) >> 2, \
+ ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
+ 4, 5, 6, 7, \
+ 8 + (((imm) & 0x03) >> 0), \
+ 8 + (((imm) & 0x0c) >> 2), \
+ 8 + (((imm) & 0x30) >> 4), \
+ 8 + (((imm) & 0xc0) >> 6), \
+ 12, 13, 14, 15); })
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sign_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_psignb256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sign_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_psignw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sign_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_psignd256((__v8si)__a, (__v8si)__b);
+}
+
+#define _mm256_slli_si256(a, count) __extension__ ({ \
+ (__m256i)__builtin_ia32_pslldqi256((__m256i)(a), (count)*8); })
+
+#define _mm256_bslli_epi128(a, count) _mm256_slli_si256((a), (count))
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_slli_epi16(__m256i __a, int __count)
+{
+ return (__m256i)__builtin_ia32_psllwi256((__v16hi)__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sll_epi16(__m256i __a, __m128i __count)
+{
+ return (__m256i)__builtin_ia32_psllw256((__v16hi)__a, (__v8hi)__count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_slli_epi32(__m256i __a, int __count)
+{
+ return (__m256i)__builtin_ia32_pslldi256((__v8si)__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sll_epi32(__m256i __a, __m128i __count)
+{
+ return (__m256i)__builtin_ia32_pslld256((__v8si)__a, (__v4si)__count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_slli_epi64(__m256i __a, int __count)
+{
+ return __builtin_ia32_psllqi256(__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sll_epi64(__m256i __a, __m128i __count)
+{
+ return __builtin_ia32_psllq256(__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srai_epi16(__m256i __a, int __count)
+{
+ return (__m256i)__builtin_ia32_psrawi256((__v16hi)__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sra_epi16(__m256i __a, __m128i __count)
+{
+ return (__m256i)__builtin_ia32_psraw256((__v16hi)__a, (__v8hi)__count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srai_epi32(__m256i __a, int __count)
+{
+ return (__m256i)__builtin_ia32_psradi256((__v8si)__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sra_epi32(__m256i __a, __m128i __count)
+{
+ return (__m256i)__builtin_ia32_psrad256((__v8si)__a, (__v4si)__count);
+}
+
+#define _mm256_srli_si256(a, count) __extension__ ({ \
+ (__m256i)__builtin_ia32_psrldqi256((__m256i)(a), (count)*8); })
+
+#define _mm256_bsrli_epi128(a, count) _mm256_srli_si256((a), (count))
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srli_epi16(__m256i __a, int __count)
+{
+ return (__m256i)__builtin_ia32_psrlwi256((__v16hi)__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srl_epi16(__m256i __a, __m128i __count)
+{
+ return (__m256i)__builtin_ia32_psrlw256((__v16hi)__a, (__v8hi)__count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srli_epi32(__m256i __a, int __count)
+{
+ return (__m256i)__builtin_ia32_psrldi256((__v8si)__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srl_epi32(__m256i __a, __m128i __count)
+{
+ return (__m256i)__builtin_ia32_psrld256((__v8si)__a, (__v4si)__count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srli_epi64(__m256i __a, int __count)
+{
+ return __builtin_ia32_psrlqi256(__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srl_epi64(__m256i __a, __m128i __count)
+{
+ return __builtin_ia32_psrlq256(__a, __count);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sub_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v32qi)__a - (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sub_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v16hi)__a - (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sub_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v8si)__a - (__v8si)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sub_epi64(__m256i __a, __m256i __b)
+{
+ return __a - __b;
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_subs_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_psubsb256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_subs_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_psubsw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_subs_epu8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_psubusb256((__v32qi)__a, (__v32qi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_subs_epu16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_psubusw256((__v16hi)__a, (__v16hi)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_unpackhi_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 8, 32+8, 9, 32+9, 10, 32+10, 11, 32+11, 12, 32+12, 13, 32+13, 14, 32+14, 15, 32+15, 24, 32+24, 25, 32+25, 26, 32+26, 27, 32+27, 28, 32+28, 29, 32+29, 30, 32+30, 31, 32+31);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_unpackhi_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_unpackhi_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 2, 8+2, 3, 8+3, 6, 8+6, 7, 8+7);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_unpackhi_epi64(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector(__a, __b, 1, 4+1, 3, 4+3);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_unpacklo_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 0, 32+0, 1, 32+1, 2, 32+2, 3, 32+3, 4, 32+4, 5, 32+5, 6, 32+6, 7, 32+7, 16, 32+16, 17, 32+17, 18, 32+18, 19, 32+19, 20, 32+20, 21, 32+21, 22, 32+22, 23, 32+23);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_unpacklo_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_unpacklo_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 0, 8+0, 1, 8+1, 4, 8+4, 5, 8+5);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_unpacklo_epi64(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector(__a, __b, 0, 4+0, 2, 4+2);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_xor_si256(__m256i __a, __m256i __b)
+{
+ return __a ^ __b;
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_stream_load_si256(__m256i const *__V)
+{
+ return (__m256i)__builtin_ia32_movntdqa256((const __v4di *)__V);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_broadcastss_ps(__m128 __X)
+{
+ return (__m128)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_broadcastsd_pd(__m128d __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 0);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_broadcastss_ps(__m128 __X)
+{
+ return (__m256)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_broadcastsd_pd(__m128d __X)
+{
+ return (__m256d)__builtin_shufflevector((__v2df)__X, (__v2df)__X, 0, 0, 0, 0);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_broadcastsi128_si256(__m128i __X)
+{
+ return (__m256i)__builtin_shufflevector(__X, __X, 0, 1, 0, 1);
+}
+
+#define _mm_blend_epi32(V1, V2, M) __extension__ ({ \
+ (__m128i)__builtin_shufflevector((__v4si)(__m128i)(V1), \
+ (__v4si)(__m128i)(V2), \
+ (((M) & 0x01) ? 4 : 0), \
+ (((M) & 0x02) ? 5 : 1), \
+ (((M) & 0x04) ? 6 : 2), \
+ (((M) & 0x08) ? 7 : 3)); })
+
+#define _mm256_blend_epi32(V1, V2, M) __extension__ ({ \
+ (__m256i)__builtin_shufflevector((__v8si)(__m256i)(V1), \
+ (__v8si)(__m256i)(V2), \
+ (((M) & 0x01) ? 8 : 0), \
+ (((M) & 0x02) ? 9 : 1), \
+ (((M) & 0x04) ? 10 : 2), \
+ (((M) & 0x08) ? 11 : 3), \
+ (((M) & 0x10) ? 12 : 4), \
+ (((M) & 0x20) ? 13 : 5), \
+ (((M) & 0x40) ? 14 : 6), \
+ (((M) & 0x80) ? 15 : 7)); })
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_broadcastb_epi8(__m128i __X)
+{
+ return (__m256i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_broadcastw_epi16(__m128i __X)
+{
+ return (__m256i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_broadcastd_epi32(__m128i __X)
+{
+ return (__m256i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_broadcastq_epi64(__m128i __X)
+{
+ return (__m256i)__builtin_shufflevector(__X, __X, 0, 0, 0, 0);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_broadcastb_epi8(__m128i __X)
+{
+ return (__m128i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_broadcastw_epi16(__m128i __X)
+{
+ return (__m128i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_broadcastd_epi32(__m128i __X)
+{
+ return (__m128i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_broadcastq_epi64(__m128i __X)
+{
+ return (__m128i)__builtin_shufflevector(__X, __X, 0, 0);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_permutevar8x32_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_permvarsi256((__v8si)__a, (__v8si)__b);
+}
+
+#define _mm256_permute4x64_pd(V, M) __extension__ ({ \
+ (__m256d)__builtin_shufflevector((__v4df)(__m256d)(V), \
+ (__v4df)_mm256_setzero_pd(), \
+ (M) & 0x3, ((M) & 0xc) >> 2, \
+ ((M) & 0x30) >> 4, ((M) & 0xc0) >> 6); })
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_permutevar8x32_ps(__m256 __a, __m256i __b)
+{
+ return (__m256)__builtin_ia32_permvarsf256((__v8sf)__a, (__v8si)__b);
+}
+
+#define _mm256_permute4x64_epi64(V, M) __extension__ ({ \
+ (__m256i)__builtin_shufflevector((__v4di)(__m256i)(V), \
+ (__v4di)_mm256_setzero_si256(), \
+ (M) & 0x3, ((M) & 0xc) >> 2, \
+ ((M) & 0x30) >> 4, ((M) & 0xc0) >> 6); })
+
+#define _mm256_permute2x128_si256(V1, V2, M) __extension__ ({ \
+ (__m256i)__builtin_ia32_permti256((__m256i)(V1), (__m256i)(V2), (M)); })
+
+#define _mm256_extracti128_si256(V, M) __extension__ ({ \
+ (__m128i)__builtin_shufflevector((__v4di)(__m256i)(V), \
+ (__v4di)_mm256_setzero_si256(), \
+ (((M) & 1) ? 2 : 0), \
+ (((M) & 1) ? 3 : 1) ); })
+
+#define _mm256_inserti128_si256(V1, V2, M) __extension__ ({ \
+ (__m256i)__builtin_shufflevector((__v4di)(__m256i)(V1), \
+ (__v4di)_mm256_castsi128_si256((__m128i)(V2)), \
+ (((M) & 1) ? 0 : 4), \
+ (((M) & 1) ? 1 : 5), \
+ (((M) & 1) ? 4 : 2), \
+ (((M) & 1) ? 5 : 3) ); })
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskload_epi32(int const *__X, __m256i __M)
+{
+ return (__m256i)__builtin_ia32_maskloadd256((const __v8si *)__X, (__v8si)__M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskload_epi64(long long const *__X, __m256i __M)
+{
+ return (__m256i)__builtin_ia32_maskloadq256((const __v4di *)__X, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskload_epi32(int const *__X, __m128i __M)
+{
+ return (__m128i)__builtin_ia32_maskloadd((const __v4si *)__X, (__v4si)__M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskload_epi64(long long const *__X, __m128i __M)
+{
+ return (__m128i)__builtin_ia32_maskloadq((const __v2di *)__X, (__v2di)__M);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_maskstore_epi32(int *__X, __m256i __M, __m256i __Y)
+{
+ __builtin_ia32_maskstored256((__v8si *)__X, (__v8si)__M, (__v8si)__Y);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_maskstore_epi64(long long *__X, __m256i __M, __m256i __Y)
+{
+ __builtin_ia32_maskstoreq256((__v4di *)__X, __M, __Y);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_maskstore_epi32(int *__X, __m128i __M, __m128i __Y)
+{
+ __builtin_ia32_maskstored((__v4si *)__X, (__v4si)__M, (__v4si)__Y);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_maskstore_epi64(long long *__X, __m128i __M, __m128i __Y)
+{
+ __builtin_ia32_maskstoreq(( __v2di *)__X, __M, __Y);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sllv_epi32(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psllv8si((__v8si)__X, (__v8si)__Y);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sllv_epi32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psllv4si((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_sllv_epi64(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psllv4di(__X, __Y);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sllv_epi64(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psllv2di(__X, __Y);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srav_epi32(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psrav8si((__v8si)__X, (__v8si)__Y);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srav_epi32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psrav4si((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srlv_epi32(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psrlv8si((__v8si)__X, (__v8si)__Y);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srlv_epi32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psrlv4si((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srlv_epi64(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psrlv4di(__X, __Y);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srlv_epi64(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psrlv2di(__X, __Y);
+}
+
+#define _mm_mask_i32gather_pd(a, m, i, mask, s) __extension__ ({ \
+ (__m128d)__builtin_ia32_gatherd_pd((__v2df)(__m128i)(a), \
+ (double const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v2df)(__m128d)(mask), (s)); })
+
+#define _mm256_mask_i32gather_pd(a, m, i, mask, s) __extension__ ({ \
+ (__m256d)__builtin_ia32_gatherd_pd256((__v4df)(__m256d)(a), \
+ (double const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4df)(__m256d)(mask), (s)); })
+
+#define _mm_mask_i64gather_pd(a, m, i, mask, s) __extension__ ({ \
+ (__m128d)__builtin_ia32_gatherq_pd((__v2df)(__m128d)(a), \
+ (double const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v2df)(__m128d)(mask), (s)); })
+
+#define _mm256_mask_i64gather_pd(a, m, i, mask, s) __extension__ ({ \
+ (__m256d)__builtin_ia32_gatherq_pd256((__v4df)(__m256d)(a), \
+ (double const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4df)(__m256d)(mask), (s)); })
+
+#define _mm_mask_i32gather_ps(a, m, i, mask, s) __extension__ ({ \
+ (__m128)__builtin_ia32_gatherd_ps((__v4sf)(__m128)(a), \
+ (float const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4sf)(__m128)(mask), (s)); })
+
+#define _mm256_mask_i32gather_ps(a, m, i, mask, s) __extension__ ({ \
+ (__m256)__builtin_ia32_gatherd_ps256((__v8sf)(__m256)(a), \
+ (float const *)(m), \
+ (__v8si)(__m256i)(i), \
+ (__v8sf)(__m256)(mask), (s)); })
+
+#define _mm_mask_i64gather_ps(a, m, i, mask, s) __extension__ ({ \
+ (__m128)__builtin_ia32_gatherq_ps((__v4sf)(__m128)(a), \
+ (float const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v4sf)(__m128)(mask), (s)); })
+
+#define _mm256_mask_i64gather_ps(a, m, i, mask, s) __extension__ ({ \
+ (__m128)__builtin_ia32_gatherq_ps256((__v4sf)(__m128)(a), \
+ (float const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4sf)(__m128)(mask), (s)); })
+
+#define _mm_mask_i32gather_epi32(a, m, i, mask, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherd_d((__v4si)(__m128i)(a), \
+ (int const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4si)(__m128i)(mask), (s)); })
+
+#define _mm256_mask_i32gather_epi32(a, m, i, mask, s) __extension__ ({ \
+ (__m256i)__builtin_ia32_gatherd_d256((__v8si)(__m256i)(a), \
+ (int const *)(m), \
+ (__v8si)(__m256i)(i), \
+ (__v8si)(__m256i)(mask), (s)); })
+
+#define _mm_mask_i64gather_epi32(a, m, i, mask, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherq_d((__v4si)(__m128i)(a), \
+ (int const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v4si)(__m128i)(mask), (s)); })
+
+#define _mm256_mask_i64gather_epi32(a, m, i, mask, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherq_d256((__v4si)(__m128i)(a), \
+ (int const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4si)(__m128i)(mask), (s)); })
+
+#define _mm_mask_i32gather_epi64(a, m, i, mask, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherd_q((__v2di)(__m128i)(a), \
+ (long long const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v2di)(__m128i)(mask), (s)); })
+
+#define _mm256_mask_i32gather_epi64(a, m, i, mask, s) __extension__ ({ \
+ (__m256i)__builtin_ia32_gatherd_q256((__v4di)(__m256i)(a), \
+ (long long const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4di)(__m256i)(mask), (s)); })
+
+#define _mm_mask_i64gather_epi64(a, m, i, mask, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherq_q((__v2di)(__m128i)(a), \
+ (long long const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v2di)(__m128i)(mask), (s)); })
+
+#define _mm256_mask_i64gather_epi64(a, m, i, mask, s) __extension__ ({ \
+ (__m256i)__builtin_ia32_gatherq_q256((__v4di)(__m256i)(a), \
+ (long long const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4di)(__m256i)(mask), (s)); })
+
+#define _mm_i32gather_pd(m, i, s) __extension__ ({ \
+ (__m128d)__builtin_ia32_gatherd_pd((__v2df)_mm_undefined_pd(), \
+ (double const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \
+ _mm_setzero_pd()), \
+ (s)); })
+
+#define _mm256_i32gather_pd(m, i, s) __extension__ ({ \
+ (__m256d)__builtin_ia32_gatherd_pd256((__v4df)_mm256_undefined_pd(), \
+ (double const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \
+ _mm256_setzero_pd(), \
+ _CMP_EQ_OQ), \
+ (s)); })
+
+#define _mm_i64gather_pd(m, i, s) __extension__ ({ \
+ (__m128d)__builtin_ia32_gatherq_pd((__v2df)_mm_undefined_pd(), \
+ (double const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \
+ _mm_setzero_pd()), \
+ (s)); })
+
+#define _mm256_i64gather_pd(m, i, s) __extension__ ({ \
+ (__m256d)__builtin_ia32_gatherq_pd256((__v4df)_mm256_undefined_pd(), \
+ (double const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \
+ _mm256_setzero_pd(), \
+ _CMP_EQ_OQ), \
+ (s)); })
+
+#define _mm_i32gather_ps(m, i, s) __extension__ ({ \
+ (__m128)__builtin_ia32_gatherd_ps((__v4sf)_mm_undefined_ps(), \
+ (float const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
+ _mm_setzero_ps()), \
+ (s)); })
+
+#define _mm256_i32gather_ps(m, i, s) __extension__ ({ \
+ (__m256)__builtin_ia32_gatherd_ps256((__v8sf)_mm256_undefined_ps(), \
+ (float const *)(m), \
+ (__v8si)(__m256i)(i), \
+ (__v8sf)_mm256_cmp_ps(_mm256_setzero_ps(), \
+ _mm256_setzero_ps(), \
+ _CMP_EQ_OQ), \
+ (s)); })
+
+#define _mm_i64gather_ps(m, i, s) __extension__ ({ \
+ (__m128)__builtin_ia32_gatherq_ps((__v4sf)_mm_undefined_ps(), \
+ (float const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
+ _mm_setzero_ps()), \
+ (s)); })
+
+#define _mm256_i64gather_ps(m, i, s) __extension__ ({ \
+ (__m128)__builtin_ia32_gatherq_ps256((__v4sf)_mm_undefined_ps(), \
+ (float const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
+ _mm_setzero_ps()), \
+ (s)); })
+
+#define _mm_i32gather_epi32(m, i, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherd_d((__v4si)_mm_undefined_si128(), \
+ (int const *)(m), (__v4si)(__m128i)(i), \
+ (__v4si)_mm_set1_epi32(-1), (s)); })
+
+#define _mm256_i32gather_epi32(m, i, s) __extension__ ({ \
+ (__m256i)__builtin_ia32_gatherd_d256((__v8si)_mm256_undefined_si256(), \
+ (int const *)(m), (__v8si)(__m256i)(i), \
+ (__v8si)_mm256_set1_epi32(-1), (s)); })
+
+#define _mm_i64gather_epi32(m, i, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherq_d((__v4si)_mm_undefined_si128(), \
+ (int const *)(m), (__v2di)(__m128i)(i), \
+ (__v4si)_mm_set1_epi32(-1), (s)); })
+
+#define _mm256_i64gather_epi32(m, i, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherq_d256((__v4si)_mm_undefined_si128(), \
+ (int const *)(m), (__v4di)(__m256i)(i), \
+ (__v4si)_mm_set1_epi32(-1), (s)); })
+
+#define _mm_i32gather_epi64(m, i, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherd_q((__v2di)_mm_undefined_si128(), \
+ (long long const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v2di)_mm_set1_epi64x(-1), (s)); })
+
+#define _mm256_i32gather_epi64(m, i, s) __extension__ ({ \
+ (__m256i)__builtin_ia32_gatherd_q256((__v4di)_mm256_undefined_si256(), \
+ (long long const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4di)_mm256_set1_epi64x(-1), (s)); })
+
+#define _mm_i64gather_epi64(m, i, s) __extension__ ({ \
+ (__m128i)__builtin_ia32_gatherq_q((__v2di)_mm_undefined_si128(), \
+ (long long const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v2di)_mm_set1_epi64x(-1), (s)); })
+
+#define _mm256_i64gather_epi64(m, i, s) __extension__ ({ \
+ (__m256i)__builtin_ia32_gatherq_q256((__v4di)_mm256_undefined_si256(), \
+ (long long const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4di)_mm256_set1_epi64x(-1), (s)); })
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __AVX2INTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/avx512bwintrin.h b/contrib/llvm/tools/clang/lib/Headers/avx512bwintrin.h
new file mode 100644
index 0000000..f289ed7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/avx512bwintrin.h
@@ -0,0 +1,1542 @@
+/*===------------- avx512bwintrin.h - AVX512BW intrinsics ------------------===
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512bwintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512BWINTRIN_H
+#define __AVX512BWINTRIN_H
+
+typedef unsigned int __mmask32;
+typedef unsigned long long __mmask64;
+typedef char __v64qi __attribute__ ((__vector_size__ (64)));
+typedef short __v32hi __attribute__ ((__vector_size__ (64)));
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512bw")))
+
+static __inline __v64qi __DEFAULT_FN_ATTRS
+_mm512_setzero_qi(void) {
+ return (__v64qi){ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 };
+}
+
+static __inline __v32hi __DEFAULT_FN_ATTRS
+_mm512_setzero_hi(void) {
+ return (__v32hi){ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 };
+}
+
+/* Integer compare */
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmpeq_epi8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_pcmpeqb512_mask((__v64qi)__a, (__v64qi)__b,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpeq_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_pcmpeqb512_mask((__v64qi)__a, (__v64qi)__b,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmpeq_epu8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 0,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpeq_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmpeq_epi16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_pcmpeqw512_mask((__v32hi)__a, (__v32hi)__b,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpeq_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_pcmpeqw512_mask((__v32hi)__a, (__v32hi)__b,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmpeq_epu16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 0,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpeq_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmpge_epi8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 5,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpge_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmpge_epu8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 5,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpge_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmpge_epi16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 5,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpge_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmpge_epu16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 5,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpge_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmpgt_epi8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_pcmpgtb512_mask((__v64qi)__a, (__v64qi)__b,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpgt_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_pcmpgtb512_mask((__v64qi)__a, (__v64qi)__b,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmpgt_epu8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 6,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpgt_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmpgt_epi16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_pcmpgtw512_mask((__v32hi)__a, (__v32hi)__b,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpgt_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_pcmpgtw512_mask((__v32hi)__a, (__v32hi)__b,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmpgt_epu16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 6,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpgt_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmple_epi8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 2,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmple_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmple_epu8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 2,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmple_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmple_epi16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 2,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmple_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmple_epu16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 2,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmple_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmplt_epi8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 1,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmplt_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmplt_epu8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 1,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmplt_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmplt_epi16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 1,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmplt_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmplt_epu16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 1,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmplt_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmpneq_epi8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 4,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpneq_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_cmpneq_epu8_mask(__m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 4,
+ (__mmask64)-1);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpneq_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) {
+ return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmpneq_epi16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 4,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpneq_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_cmpneq_epu16_mask(__m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 4,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpneq_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) {
+ return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 4,
+ __u);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_add_epi8 (__m512i __A, __m512i __B) {
+ return (__m512i) ((__v64qi) __A + (__v64qi) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_add_epi8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_paddb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_add_epi8 (__mmask64 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_paddb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sub_epi8 (__m512i __A, __m512i __B) {
+ return (__m512i) ((__v64qi) __A - (__v64qi) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sub_epi8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_psubb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sub_epi8 (__mmask64 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_psubb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_add_epi16 (__m512i __A, __m512i __B) {
+ return (__m512i) ((__v32hi) __A + (__v32hi) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_add_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_paddw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_add_epi16 (__mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_paddw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sub_epi16 (__m512i __A, __m512i __B) {
+ return (__m512i) ((__v32hi) __A - (__v32hi) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sub_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_psubw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sub_epi16 (__mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_psubw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mullo_epi16 (__m512i __A, __m512i __B) {
+ return (__m512i) ((__v32hi) __A * (__v32hi) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mullo_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_pmullw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mullo_epi16 (__mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_pmullw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_blend_epi8 (__mmask64 __U, __m512i __A, __m512i __W)
+{
+ return (__m512i) __builtin_ia32_blendmb_512_mask ((__v64qi) __A,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_blend_epi16 (__mmask32 __U, __m512i __A, __m512i __W)
+{
+ return (__m512i) __builtin_ia32_blendmw_512_mask ((__v32hi) __A,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_abs_epi8 (__m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsb512_mask ((__v64qi) __A,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_abs_epi8 (__m512i __W, __mmask64 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsb512_mask ((__v64qi) __A,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_abs_epi8 (__mmask64 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsb512_mask ((__v64qi) __A,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_abs_epi16 (__m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsw512_mask ((__v32hi) __A,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_abs_epi16 (__m512i __W, __mmask32 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsw512_mask ((__v32hi) __A,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_abs_epi16 (__mmask32 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsw512_mask ((__v32hi) __A,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_packs_epi32 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packssdw512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_packs_epi32 (__mmask32 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packssdw512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_packs_epi32 (__m512i __W, __mmask32 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packssdw512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v32hi) __W,
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_packs_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packsswb512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_packs_epi16 (__m512i __W, __mmask64 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packsswb512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v64qi) __W,
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_packs_epi16 (__mmask64 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packsswb512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_packus_epi32 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packusdw512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_packus_epi32 (__mmask32 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packusdw512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_packus_epi32 (__m512i __W, __mmask32 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packusdw512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v32hi) __W,
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_packus_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packuswb512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_packus_epi16 (__m512i __W, __mmask64 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packuswb512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v64qi) __W,
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_packus_epi16 (__mmask64 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_packuswb512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_adds_epi8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_adds_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_adds_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_adds_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_adds_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_adds_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_adds_epu8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddusb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_adds_epu8 (__m512i __W, __mmask64 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddusb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_adds_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddusb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_adds_epu16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddusw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_adds_epu16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddusw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_adds_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddusw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_avg_epu8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pavgb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_avg_epu8 (__m512i __W, __mmask64 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pavgb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_avg_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pavgb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_avg_epu16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pavgw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_avg_epu16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pavgw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_avg_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pavgw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_max_epi8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_max_epi8 (__mmask64 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_max_epi8 (__m512i __W, __mmask64 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_max_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_max_epi16 (__mmask32 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_max_epi16 (__m512i __W, __mmask32 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_max_epu8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxub512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_max_epu8 (__mmask64 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxub512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_max_epu8 (__m512i __W, __mmask64 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxub512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_max_epu16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_max_epu16 (__mmask32 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_max_epu16 (__m512i __W, __mmask32 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_min_epi8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_min_epi8 (__mmask64 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_min_epi8 (__m512i __W, __mmask64 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_min_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_min_epi16 (__mmask32 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_min_epi16 (__m512i __W, __mmask32 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_min_epu8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminub512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_min_epu8 (__mmask64 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminub512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_min_epu8 (__m512i __W, __mmask64 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminub512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_min_epu16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_min_epu16 (__mmask32 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_min_epu16 (__m512i __W, __mmask32 __M, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_shuffle_epi8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pshufb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_shuffle_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pshufb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_shuffle_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pshufb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_subs_epi8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_subs_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_subs_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubsb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_subs_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_subs_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_subs_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_subs_epu8 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubusb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_subs_epu8 (__m512i __W, __mmask64 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubusb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_subs_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubusb512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_subs_epu16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubusw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_subs_epu16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubusw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_subs_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubusw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask2_permutex2var_epi16 (__m512i __A, __m512i __I,
+ __mmask32 __U, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermi2varhi512_mask ((__v32hi) __A,
+ (__v32hi) __I /* idx */ ,
+ (__v32hi) __B,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_permutex2var_epi16 (__m512i __A, __m512i __I, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermt2varhi512_mask ((__v32hi) __I /* idx */,
+ (__v32hi) __A,
+ (__v32hi) __B,
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_permutex2var_epi16 (__m512i __A, __mmask32 __U,
+ __m512i __I, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermt2varhi512_mask ((__v32hi) __I /* idx */,
+ (__v32hi) __A,
+ (__v32hi) __B,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_permutex2var_epi16 (__mmask32 __U, __m512i __A,
+ __m512i __I, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermt2varhi512_maskz ((__v32hi) __I
+ /* idx */ ,
+ (__v32hi) __A,
+ (__v32hi) __B,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mulhrs_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhrsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mulhrs_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhrsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mulhrs_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhrsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mulhi_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mulhi_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mulhi_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mulhi_epu16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mulhi_epu16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mulhi_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maddubs_epi16 (__m512i __X, __m512i __Y) {
+ return (__m512i) __builtin_ia32_pmaddubsw512_mask ((__v64qi) __X,
+ (__v64qi) __Y,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_maddubs_epi16 (__m512i __W, __mmask32 __U, __m512i __X,
+ __m512i __Y) {
+ return (__m512i) __builtin_ia32_pmaddubsw512_mask ((__v64qi) __X,
+ (__v64qi) __Y,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_maddubs_epi16 (__mmask32 __U, __m512i __X, __m512i __Y) {
+ return (__m512i) __builtin_ia32_pmaddubsw512_mask ((__v64qi) __X,
+ (__v64qi) __Y,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_madd_epi16 (__m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_pmaddwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v16si) _mm512_setzero_si512(),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_madd_epi16 (__m512i __W, __mmask16 __U, __m512i __A,
+ __m512i __B) {
+ return (__m512i) __builtin_ia32_pmaddwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_madd_epi16 (__mmask16 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_pmaddwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v16si) _mm512_setzero_si512(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvtsepi16_epi8 (__m512i __A) {
+ return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A,
+ (__v32qi)_mm256_setzero_si256(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtsepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) {
+ return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A,
+ (__v32qi)__O,
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtsepi16_epi8 (__mmask32 __M, __m512i __A) {
+ return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A,
+ (__v32qi) _mm256_setzero_si256(),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvtusepi16_epi8 (__m512i __A) {
+ return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A,
+ (__v32qi) _mm256_setzero_si256(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtusepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) {
+ return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A,
+ (__v32qi) __O,
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtusepi16_epi8 (__mmask32 __M, __m512i __A) {
+ return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A,
+ (__v32qi) _mm256_setzero_si256(),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvtepi16_epi8 (__m512i __A) {
+ return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
+ (__v32qi) _mm256_setzero_si256(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) {
+ return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
+ (__v32qi) __O,
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi16_epi8 (__mmask32 __M, __m512i __A) {
+ return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
+ (__v32qi) _mm256_setzero_si256(),
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_unpackhi_epi8 (__m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_punpckhbw512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_unpackhi_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
+ __m512i __B) {
+ return (__m512i) __builtin_ia32_punpckhbw512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_unpackhi_epi8 (__mmask64 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_punpckhbw512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_unpackhi_epi16 (__m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_punpckhwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_unpackhi_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B) {
+ return (__m512i) __builtin_ia32_punpckhwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_unpackhi_epi16 (__mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_punpckhwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_unpacklo_epi8 (__m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_punpcklbw512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_unpacklo_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
+ __m512i __B) {
+ return (__m512i) __builtin_ia32_punpcklbw512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_unpacklo_epi8 (__mmask64 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_punpcklbw512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_unpacklo_epi16 (__m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_punpcklwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_unpacklo_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B) {
+ return (__m512i) __builtin_ia32_punpcklwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_unpacklo_epi16 (__mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_punpcklwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+#define _mm512_cmp_epi8_mask(a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \
+ (__v64qi)(__m512i)(b), \
+ (p), (__mmask64)-1); })
+
+#define _mm512_mask_cmp_epi8_mask(m, a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \
+ (__v64qi)(__m512i)(b), \
+ (p), (__mmask64)(m)); })
+
+#define _mm512_cmp_epu8_mask(a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \
+ (__v64qi)(__m512i)(b), \
+ (p), (__mmask64)-1); })
+
+#define _mm512_mask_cmp_epu8_mask(m, a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \
+ (__v64qi)(__m512i)(b), \
+ (p), (__mmask64)(m)); })
+
+#define _mm512_cmp_epi16_mask(a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \
+ (__v32hi)(__m512i)(b), \
+ (p), (__mmask32)-1); })
+
+#define _mm512_mask_cmp_epi16_mask(m, a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \
+ (__v32hi)(__m512i)(b), \
+ (p), (__mmask32)(m)); })
+
+#define _mm512_cmp_epu16_mask(a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \
+ (__v32hi)(__m512i)(b), \
+ (p), (__mmask32)-1); })
+
+#define _mm512_mask_cmp_epu16_mask(m, a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \
+ (__v32hi)(__m512i)(b), \
+ (p), (__mmask32)(m)); })
+
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Headers/avx512cdintrin.h b/contrib/llvm/tools/clang/lib/Headers/avx512cdintrin.h
new file mode 100644
index 0000000..3894b29
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/avx512cdintrin.h
@@ -0,0 +1,131 @@
+/*===------------- avx512cdintrin.h - AVX512CD intrinsics ------------------===
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512cdintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512CDINTRIN_H
+#define __AVX512CDINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512cd")))
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_conflict_epi64 (__m512i __A)
+{
+ return (__m512i) __builtin_ia32_vpconflictdi_512_mask ((__v8di) __A,
+ (__v8di) _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_conflict_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_vpconflictdi_512_mask ((__v8di) __A,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_conflict_epi64 (__mmask8 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_vpconflictdi_512_mask ((__v8di) __A,
+ (__v8di) _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_conflict_epi32 (__m512i __A)
+{
+ return (__m512i) __builtin_ia32_vpconflictsi_512_mask ((__v16si) __A,
+ (__v16si) _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_conflict_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_vpconflictsi_512_mask ((__v16si) __A,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_conflict_epi32 (__mmask16 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_vpconflictsi_512_mask ((__v16si) __A,
+ (__v16si) _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_lzcnt_epi32 (__m512i __A)
+{
+ return (__m512i) __builtin_ia32_vplzcntd_512_mask ((__v16si) __A,
+ (__v16si) _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_lzcnt_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_vplzcntd_512_mask ((__v16si) __A,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_lzcnt_epi32 (__mmask16 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_vplzcntd_512_mask ((__v16si) __A,
+ (__v16si) _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_lzcnt_epi64 (__m512i __A)
+{
+ return (__m512i) __builtin_ia32_vplzcntq_512_mask ((__v8di) __A,
+ (__v8di) _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_lzcnt_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_vplzcntq_512_mask ((__v8di) __A,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_lzcnt_epi64 (__mmask8 __U, __m512i __A)
+{
+ return (__m512i) __builtin_ia32_vplzcntq_512_mask ((__v8di) __A,
+ (__v8di) _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Headers/avx512dqintrin.h b/contrib/llvm/tools/clang/lib/Headers/avx512dqintrin.h
new file mode 100644
index 0000000..afee490
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/avx512dqintrin.h
@@ -0,0 +1,778 @@
+/*===---- avx512dqintrin.h - AVX512DQ intrinsics ---------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512dqintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512DQINTRIN_H
+#define __AVX512DQINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512dq")))
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mullo_epi64 (__m512i __A, __m512i __B) {
+ return (__m512i) ((__v8di) __A * (__v8di) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mullo_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_pmullq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mullo_epi64 (__mmask8 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_pmullq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_xor_pd (__m512d __A, __m512d __B) {
+ return (__m512d) ((__v8di) __A ^ (__v8di) __B);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_xor_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_xorpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_xor_pd (__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_xorpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_xor_ps (__m512 __A, __m512 __B) {
+ return (__m512) ((__v16si) __A ^ (__v16si) __B);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_xor_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_xorps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_xor_ps (__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_xorps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_or_pd (__m512d __A, __m512d __B) {
+ return (__m512d) ((__v8di) __A | (__v8di) __B);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_or_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_orpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_or_pd (__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_orpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_or_ps (__m512 __A, __m512 __B) {
+ return (__m512) ((__v16si) __A | (__v16si) __B);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_or_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_orps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_or_ps (__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_orps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_and_pd (__m512d __A, __m512d __B) {
+ return (__m512d) ((__v8di) __A & (__v8di) __B);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_and_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_andpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_and_pd (__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_andpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_and_ps (__m512 __A, __m512 __B) {
+ return (__m512) ((__v16si) __A & (__v16si) __B);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_and_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_andps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_and_ps (__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_andps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_andnot_pd (__m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_andnot_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_andnot_pd (__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_andnot_ps (__m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_andnot_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_andnot_ps (__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtpd_epi64 (__m512d __A) {
+ return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtpd_epi64 (__mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundpd_epi64(__A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvt_roundpd_epi64(__W, __U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A, \
+ (__v8di) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvt_roundpd_epi64(__U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R); })
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtpd_epu64 (__m512d __A) {
+ return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtpd_epu64 (__mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundpd_epu64(__A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvt_roundpd_epu64(__W, __U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A, \
+ (__v8di) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvt_roundpd_epu64(__U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R);})
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtps_epi64 (__m256 __A) {
+ return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtps_epi64 (__m512i __W, __mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtps_epi64 (__mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundps_epi64(__A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvt_roundps_epi64(__W, __U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A, \
+ (__v8di) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvt_roundps_epi64(__U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R);})
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtps_epu64 (__m256 __A) {
+ return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtps_epu64 (__m512i __W, __mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtps_epu64 (__mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundps_epu64(__A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvt_roundps_epu64(__W, __U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A, \
+ (__v8di) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvt_roundps_epu64(__U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R);})
+
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_cvtepi64_pd (__m512i __A) {
+ return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,
+ (__v8df) _mm512_setzero_pd(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi64_pd (__m512d __W, __mmask8 __U, __m512i __A) {
+ return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi64_pd (__mmask8 __U, __m512i __A) {
+ return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,
+ (__v8df) _mm512_setzero_pd(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundepi64_pd(__A, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvt_roundepi64_pd(__W, __U, __A, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A, \
+ (__v8df) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvt_roundepi64_pd(__U, __A, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R);})
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_cvtepi64_ps (__m512i __A) {
+ return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
+ (__v8sf) _mm256_setzero_ps(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi64_ps (__m256 __W, __mmask8 __U, __m512i __A) {
+ return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
+ (__v8sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi64_ps (__mmask8 __U, __m512i __A) {
+ return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
+ (__v8sf) _mm256_setzero_ps(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundepi64_ps(__A, __R) __extension__ ({ \
+ (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvt_roundepi64_ps(__W, __U, __A, __R) __extension__ ({ \
+ (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A, \
+ (__v8sf) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvt_roundepi64_ps(__U, __A, __R) __extension__ ({ \
+ (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) __U, __R);})
+
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvttpd_epi64 (__m512d __A) {
+ return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvttpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvttpd_epi64 (__mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvtt_roundpd_epi64(__A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvtt_roundpd_epi64(__W, __U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A, \
+ (__v8di) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvtt_roundpd_epi64(__U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R);})
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvttpd_epu64 (__m512d __A) {
+ return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvttpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvttpd_epu64 (__mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvtt_roundpd_epu64(__A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvtt_roundpd_epu64(__W, __U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A, \
+ (__v8di) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvtt_roundpd_epu64(__U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R);})
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvttps_epi64 (__m256 __A) {
+ return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvttps_epi64 (__m512i __W, __mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvttps_epi64 (__mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvtt_roundps_epi64(__A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvtt_roundps_epi64(__W, __U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A, \
+ (__v8di) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvtt_roundps_epi64(__U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R);})
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvttps_epu64 (__m256 __A) {
+ return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvttps_epu64 (__m512i __W, __mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvttps_epu64 (__mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvtt_roundps_epu64(__A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A, \
+ (__v8di) _mm512_setzero_si512(),(__mmask8) -1, __R);})
+
+#define _mm512_mask_cvtt_roundps_epu64(__W, __U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A, \
+ (__v8di) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvtt_roundps_epu64(__U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R);})
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_cvtepu64_pd (__m512i __A) {
+ return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,
+ (__v8df) _mm512_setzero_pd(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepu64_pd (__m512d __W, __mmask8 __U, __m512i __A) {
+ return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepu64_pd (__mmask8 __U, __m512i __A) {
+ return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,
+ (__v8df) _mm512_setzero_pd(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundepu64_pd(__A, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvt_roundepu64_pd(__W, __U, __A, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A, \
+ (__v8df) __W, (__mmask8) __U, __R);})
+
+
+#define _mm512_maskz_cvt_roundepu64_pd(__U, __A, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R);})
+
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_cvtepu64_ps (__m512i __A) {
+ return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
+ (__v8sf) _mm256_setzero_ps(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepu64_ps (__m256 __W, __mmask8 __U, __m512i __A) {
+ return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
+ (__v8sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepu64_ps (__mmask8 __U, __m512i __A) {
+ return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
+ (__v8sf) _mm256_setzero_ps(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundepu64_ps(__A, __R) __extension__ ({ \
+ (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvt_roundepu64_ps(__W, __U, __A, __R) __extension__ ({ \
+ (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A, \
+ (__v8sf) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvt_roundepu64_ps(__U, __A, __R) __extension__ ({ \
+ (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) __U, __R);})
+
+#define _mm512_range_pd(__A, __B, __C) __extension__ ({ \
+ (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, (__v8df) __B, __C,\
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, \
+ _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_mask_range_pd(__W, __U, __A, __B, __C) __extension__ ({ \
+ (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, (__v8df) __B, __C,\
+ (__v8df) __W, (__mmask8) __U, _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_maskz_range_pd(__U, __A, __B, __C) __extension__ ({ \
+ (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, (__v8df) __B, __C, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, \
+ _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_range_round_pd(__A, __B, __C, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, (__v8df) __B, __C, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_range_round_pd(__W, __U, __A, __B, __C, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, (__v8df) __B, __C, \
+ (__v8df) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_range_round_pd(__U, __A, __B, __C, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, (__v8df) __B, __C, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R);})
+
+#define _mm512_range_ps(__A, __B, __C) __extension__ ({ \
+ (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A, (__v16sf) __B, __C, \
+ (__v16sf) _mm512_setzero_ps(), (__mmask16) -1, \
+ _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_mask_range_ps(__W, __U, __A, __B, __C) __extension__ ({ \
+ (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ __C, (__v16sf) __W, (__mmask16) __U, _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_maskz_range_ps(__U, __A, __B, __C) __extension__ ({ \
+ (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A,(__v16sf) __B, \
+ __C, (__v16sf) _mm512_setzero_ps(), (__mmask16) __U, \
+ _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_range_round_ps(__A, __B, __C, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ __C, (__v16sf) _mm512_setzero_ps(), (__mmask16) -1, __R);})
+
+#define _mm512_mask_range_round_ps(__W, __U, __A, __B, __C, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ __C, (__v16sf) __W, (__mmask16) __U, __R);})
+
+#define _mm512_maskz_range_round_ps(__U, __A, __B, __C, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ __C, (__v16sf) _mm512_setzero_ps(), (__mmask16) __U, __R);})
+
+#define _mm512_reduce_pd(__A, __B) __extension__ ({ \
+ (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_mask_reduce_pd(__W, __U, __A, __B) __extension__ ({ \
+ (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B, \
+ (__v8df) __W,(__mmask8) __U, _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_maskz_reduce_pd(__U, __A, __B) __extension__ ({ \
+ (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_reduce_ps(__A, __B) __extension__ ({ \
+ (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B, \
+ (__v16sf) _mm512_setzero_ps(), (__mmask16) -1, _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_mask_reduce_ps(__W, __U, __A, __B) __extension__ ({ \
+ (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B, \
+ (__v16sf) __W, (__mmask16) __U, _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_maskz_reduce_ps(__U, __A, __B) __extension__ ({ \
+ (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B, \
+ (__v16sf) _mm512_setzero_ps(), (__mmask16) __U, _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_reduce_round_pd(__A, __B, __R) __extension__ ({\
+ (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_reduce_round_pd(__W, __U, __A, __B, __R) __extension__ ({\
+ (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B, \
+ (__v8df) __W,(__mmask8) __U, __R);})
+
+#define _mm512_maskz_reduce_round_pd(__U, __A, __B, __R) __extension__ ({\
+ (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R);})
+
+#define _mm512_reduce_round_ps(__A, __B, __R) __extension__ ({\
+ (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B, \
+ (__v16sf) _mm512_setzero_ps(), (__mmask16) -1, __R);})
+
+#define _mm512_mask_reduce_round_ps(__W, __U, __A, __B, __R) __extension__ ({\
+ (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B, \
+ (__v16sf) __W, (__mmask16) __U, __R);})
+
+#define _mm512_maskz_reduce_round_ps(__U, __A, __B, __R) __extension__ ({\
+ (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B, \
+ (__v16sf) _mm512_setzero_ps(), (__mmask16) __U, __R);})
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Headers/avx512erintrin.h b/contrib/llvm/tools/clang/lib/Headers/avx512erintrin.h
new file mode 100644
index 0000000..40a9121
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/avx512erintrin.h
@@ -0,0 +1,285 @@
+/*===---- avx512erintrin.h - AVX512ER intrinsics ---------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512erintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512ERINTRIN_H
+#define __AVX512ERINTRIN_H
+
+// exp2a23
+#define _mm512_exp2a23_round_pd(A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, (R)); })
+
+#define _mm512_mask_exp2a23_round_pd(S, M, A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(S), \
+ (__mmask8)(M), (R)); })
+
+#define _mm512_maskz_exp2a23_round_pd(M, A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(M), (R)); })
+
+#define _mm512_exp2a23_pd(A) \
+ _mm512_exp2a23_round_pd((A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_mask_exp2a23_pd(S, M, A) \
+ _mm512_mask_exp2a23_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_maskz_exp2a23_pd(M, A) \
+ _mm512_maskz_exp2a23_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_exp2a23_round_ps(A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask8)-1, (R)); })
+
+#define _mm512_mask_exp2a23_round_ps(S, M, A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(S), \
+ (__mmask8)(M), (R)); })
+
+#define _mm512_maskz_exp2a23_round_ps(M, A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask8)(M), (R)); })
+
+#define _mm512_exp2a23_ps(A) \
+ _mm512_exp2a23_round_ps((A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_mask_exp2a23_ps(S, M, A) \
+ _mm512_mask_exp2a23_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_maskz_exp2a23_ps(M, A) \
+ _mm512_maskz_exp2a23_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
+
+// rsqrt28
+#define _mm512_rsqrt28_round_pd(A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, (R)); })
+
+#define _mm512_mask_rsqrt28_round_pd(S, M, A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(S), \
+ (__mmask8)(M), (R)); })
+
+#define _mm512_maskz_rsqrt28_round_pd(M, A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(M), (R)); })
+
+#define _mm512_rsqrt28_pd(A) \
+ _mm512_rsqrt28_round_pd((A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_mask_rsqrt28_pd(S, M, A) \
+ _mm512_mask_rsqrt28_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_maskz_rsqrt28_pd(M, A) \
+ _mm512_maskz_rsqrt28_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_rsqrt28_round_ps(A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, (R)); })
+
+#define _mm512_mask_rsqrt28_round_ps(S, M, A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(S), \
+ (__mmask16)(M), (R)); })
+
+#define _mm512_maskz_rsqrt28_round_ps(M, A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(M), (R)); })
+
+#define _mm512_rsqrt28_ps(A) \
+ _mm512_rsqrt28_round_ps((A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_mask_rsqrt28_ps(S, M, A) \
+ _mm512_mask_rsqrt28_round_ps((S), (M), A, _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_maskz_rsqrt28_ps(M, A) \
+ _mm512_maskz_rsqrt28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_rsqrt28_round_ss(A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_rsqrt28ss_round((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (R)); })
+
+#define _mm_mask_rsqrt28_round_ss(S, M, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_rsqrt28ss_round((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(S), \
+ (__mmask8)(M), (R)); })
+
+#define _mm_maskz_rsqrt28_round_ss(M, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_rsqrt28ss_round((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(M), (R)); })
+
+#define _mm_rsqrt28_ss(A, B) \
+ _mm_rsqrt28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_mask_rsqrt28_ss(S, M, A, B) \
+ _mm_mask_rsqrt28_round_ss((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_rsqrt28_ss(M, A, B) \
+ _mm_maskz_rsqrt28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_rsqrt28_round_sd(A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_rsqrt28sd_round((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (R)); })
+
+#define _mm_mask_rsqrt28_round_sd(S, M, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_rsqrt28sd_round((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(S), \
+ (__mmask8)(M), (R)); })
+
+#define _mm_maskz_rsqrt28_round_sd(M, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_rsqrt28sd_round((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(M), (R)); })
+
+#define _mm_rsqrt28_sd(A, B) \
+ _mm_rsqrt28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_mask_rsqrt28_sd(S, M, A, B) \
+ _mm_mask_rsqrt28_round_sd((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_rsqrt28_sd(M, A, B) \
+ _mm_mask_rsqrt28_round_sd((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+// rcp28
+#define _mm512_rcp28_round_pd(A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, (R)); })
+
+#define _mm512_mask_rcp28_round_pd(S, M, A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(S), \
+ (__mmask8)(M), (R)); })
+
+#define _mm512_maskz_rcp28_round_pd(M, A, R) __extension__ ({ \
+ (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(M), (R)); })
+
+#define _mm512_rcp28_pd(A) \
+ _mm512_rcp28_round_pd((A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_mask_rcp28_pd(S, M, A) \
+ _mm512_mask_rcp28_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_maskz_rcp28_pd(M, A) \
+ _mm512_maskz_rcp28_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_rcp28_round_ps(A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, (R)); })
+
+#define _mm512_mask_rcp28_round_ps(S, M, A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(S), \
+ (__mmask16)(M), (R)); })
+
+#define _mm512_maskz_rcp28_round_ps(M, A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(M), (R)); })
+
+#define _mm512_rcp28_ps(A) \
+ _mm512_rcp28_round_ps((A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_mask_rcp28_ps(S, M, A) \
+ _mm512_mask_rcp28_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_maskz_rcp28_ps(M, A) \
+ _mm512_maskz_rcp28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_rcp28_round_ss(A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_rcp28ss_round((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (R)); })
+
+#define _mm_mask_rcp28_round_ss(S, M, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_rcp28ss_round((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(S), \
+ (__mmask8)(M), (R)); })
+
+#define _mm_maskz_rcp28_round_ss(M, A, B, R) __extension__ ({ \
+ (__m128)__builtin_ia32_rcp28ss_round((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(M), (R)); })
+
+#define _mm_rcp28_ss(A, B) \
+ _mm_rcp28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_mask_rcp28_ss(S, M, A, B) \
+ _mm_mask_rcp28_round_ss((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_rcp28_ss(M, A, B) \
+ _mm_maskz_rcp28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_rcp28_round_sd(A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_rcp28sd_round((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (R)); })
+
+#define _mm_mask_rcp28_round_sd(S, M, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_rcp28sd_round((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(S), \
+ (__mmask8)(M), (R)); })
+
+#define _mm_maskz_rcp28_round_sd(M, A, B, R) __extension__ ({ \
+ (__m128d)__builtin_ia32_rcp28sd_round((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(M), (R)); })
+
+#define _mm_rcp28_sd(A, B) \
+ _mm_rcp28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_mask_rcp28_sd(S, M, A, B) \
+ _mm_mask_rcp28_round_sd((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_rcp28_sd(M, A, B) \
+ _mm_maskz_rcp28_round_sd((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#endif // __AVX512ERINTRIN_H
diff --git a/contrib/llvm/tools/clang/lib/Headers/avx512fintrin.h b/contrib/llvm/tools/clang/lib/Headers/avx512fintrin.h
new file mode 100644
index 0000000..8dcdc71
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/avx512fintrin.h
@@ -0,0 +1,3074 @@
+/*===---- avx512fintrin.h - AVX512F intrinsics -----------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512fintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512FINTRIN_H
+#define __AVX512FINTRIN_H
+
+typedef double __v8df __attribute__((__vector_size__(64)));
+typedef float __v16sf __attribute__((__vector_size__(64)));
+typedef long long __v8di __attribute__((__vector_size__(64)));
+typedef int __v16si __attribute__((__vector_size__(64)));
+
+typedef float __m512 __attribute__((__vector_size__(64)));
+typedef double __m512d __attribute__((__vector_size__(64)));
+typedef long long __m512i __attribute__((__vector_size__(64)));
+
+typedef unsigned char __mmask8;
+typedef unsigned short __mmask16;
+
+/* Rounding mode macros. */
+#define _MM_FROUND_TO_NEAREST_INT 0x00
+#define _MM_FROUND_TO_NEG_INF 0x01
+#define _MM_FROUND_TO_POS_INF 0x02
+#define _MM_FROUND_TO_ZERO 0x03
+#define _MM_FROUND_CUR_DIRECTION 0x04
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512f")))
+
+/* Create vectors with repeated elements */
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_setzero_si512(void)
+{
+ return (__m512i)(__v8di){ 0, 0, 0, 0, 0, 0, 0, 0 };
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_undefined_pd()
+{
+ return (__m512d)__builtin_ia32_undef512();
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_undefined()
+{
+ return (__m512)__builtin_ia32_undef512();
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_undefined_ps()
+{
+ return (__m512)__builtin_ia32_undef512();
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_undefined_epi32()
+{
+ return (__m512i)__builtin_ia32_undef512();
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_set1_epi32(__mmask16 __M, int __A)
+{
+ return (__m512i) __builtin_ia32_pbroadcastd512_gpr_mask (__A,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ __M);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_set1_epi64(__mmask8 __M, long long __A)
+{
+#ifdef __x86_64__
+ return (__m512i) __builtin_ia32_pbroadcastq512_gpr_mask (__A,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ __M);
+#else
+ return (__m512i) __builtin_ia32_pbroadcastq512_mem_mask (__A,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ __M);
+#endif
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_setzero_ps(void)
+{
+ return (__m512){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
+}
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_setzero_pd(void)
+{
+ return (__m512d){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_set1_ps(float __w)
+{
+ return (__m512){ __w, __w, __w, __w, __w, __w, __w, __w,
+ __w, __w, __w, __w, __w, __w, __w, __w };
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_set1_pd(double __w)
+{
+ return (__m512d){ __w, __w, __w, __w, __w, __w, __w, __w };
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_set1_epi32(int __s)
+{
+ return (__m512i)(__v16si){ __s, __s, __s, __s, __s, __s, __s, __s,
+ __s, __s, __s, __s, __s, __s, __s, __s };
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_set1_epi64(long long __d)
+{
+ return (__m512i)(__v8di){ __d, __d, __d, __d, __d, __d, __d, __d };
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_broadcastss_ps(__m128 __X)
+{
+ float __f = __X[0];
+ return (__v16sf){ __f, __f, __f, __f,
+ __f, __f, __f, __f,
+ __f, __f, __f, __f,
+ __f, __f, __f, __f };
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_broadcastsd_pd(__m128d __X)
+{
+ double __d = __X[0];
+ return (__v8df){ __d, __d, __d, __d,
+ __d, __d, __d, __d };
+}
+
+/* Cast between vector types */
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_castpd256_pd512(__m256d __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, -1, -1, -1, -1);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_castps256_ps512(__m256 __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7,
+ -1, -1, -1, -1, -1, -1, -1, -1);
+}
+
+static __inline __m128d __DEFAULT_FN_ATTRS
+_mm512_castpd512_pd128(__m512d __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 1);
+}
+
+static __inline __m128 __DEFAULT_FN_ATTRS
+_mm512_castps512_ps128(__m512 __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
+}
+
+/* Bitwise operators */
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_and_epi32(__m512i __a, __m512i __b)
+{
+ return __a & __b;
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_and_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i) __builtin_ia32_pandd512_mask((__v16si) __a,
+ (__v16si) __b,
+ (__v16si) __src,
+ (__mmask16) __k);
+}
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_and_epi32(__mmask16 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i) __builtin_ia32_pandd512_mask((__v16si) __a,
+ (__v16si) __b,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __k);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_and_epi64(__m512i __a, __m512i __b)
+{
+ return __a & __b;
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_and_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i) __builtin_ia32_pandq512_mask ((__v8di) __a,
+ (__v8di) __b,
+ (__v8di) __src,
+ (__mmask8) __k);
+}
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_and_epi64(__mmask8 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i) __builtin_ia32_pandq512_mask ((__v8di) __a,
+ (__v8di) __b,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __k);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_andnot_epi32 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pandnd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_andnot_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pandnd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_andnot_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pandnd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_andnot_epi64 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pandnq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_andnot_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pandnq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di) __W, __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_andnot_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pandnq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_pd (),
+ __U);
+}
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_or_epi32(__m512i __a, __m512i __b)
+{
+ return __a | __b;
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_or_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i) __builtin_ia32_pord512_mask((__v16si) __a,
+ (__v16si) __b,
+ (__v16si) __src,
+ (__mmask16) __k);
+}
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_or_epi32(__mmask16 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i) __builtin_ia32_pord512_mask((__v16si) __a,
+ (__v16si) __b,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __k);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_or_epi64(__m512i __a, __m512i __b)
+{
+ return __a | __b;
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_or_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i) __builtin_ia32_porq512_mask ((__v8di) __a,
+ (__v8di) __b,
+ (__v8di) __src,
+ (__mmask8) __k);
+}
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_or_epi64(__mmask8 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i) __builtin_ia32_porq512_mask ((__v8di) __a,
+ (__v8di) __b,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __k);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_xor_epi32(__m512i __a, __m512i __b)
+{
+ return __a ^ __b;
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_xor_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i) __builtin_ia32_pxord512_mask((__v16si) __a,
+ (__v16si) __b,
+ (__v16si) __src,
+ (__mmask16) __k);
+}
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_xor_epi32(__mmask16 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i) __builtin_ia32_pxord512_mask((__v16si) __a,
+ (__v16si) __b,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __k);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_xor_epi64(__m512i __a, __m512i __b)
+{
+ return __a ^ __b;
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_xor_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i) __builtin_ia32_pxorq512_mask ((__v8di) __a,
+ (__v8di) __b,
+ (__v8di) __src,
+ (__mmask8) __k);
+}
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_xor_epi64(__mmask8 __k, __m512i __a, __m512i __b)
+{
+ return (__m512i) __builtin_ia32_pxorq512_mask ((__v8di) __a,
+ (__v8di) __b,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __k);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_and_si512(__m512i __a, __m512i __b)
+{
+ return __a & __b;
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_or_si512(__m512i __a, __m512i __b)
+{
+ return __a | __b;
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_xor_si512(__m512i __a, __m512i __b)
+{
+ return __a ^ __b;
+}
+/* Arithmetic */
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_add_pd(__m512d __a, __m512d __b)
+{
+ return __a + __b;
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_add_ps(__m512 __a, __m512 __b)
+{
+ return __a + __b;
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_mul_pd(__m512d __a, __m512d __b)
+{
+ return __a * __b;
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_mul_ps(__m512 __a, __m512 __b)
+{
+ return __a * __b;
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_sub_pd(__m512d __a, __m512d __b)
+{
+ return __a - __b;
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_sub_ps(__m512 __a, __m512 __b)
+{
+ return __a - __b;
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_add_epi64 (__m512i __A, __m512i __B)
+{
+ return (__m512i) ((__v8di) __A + (__v8di) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_add_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_add_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sub_epi64 (__m512i __A, __m512i __B)
+{
+ return (__m512i) ((__v8di) __A - (__v8di) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sub_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sub_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_add_epi32 (__m512i __A, __m512i __B)
+{
+ return (__m512i) ((__v16si) __A + (__v16si) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_add_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_add_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_paddd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_sub_epi32 (__m512i __A, __m512i __B)
+{
+ return (__m512i) ((__v16si) __A - (__v16si) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_sub_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_sub_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_psubd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_max_pd(__m512d __A, __m512d __B)
+{
+ return (__m512d) __builtin_ia32_maxpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_max_ps(__m512 __A, __m512 __B)
+{
+ return (__m512) __builtin_ia32_maxps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_max_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_maxss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_max_ss(__mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_maxss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_max_round_ss(__A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_maxss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
+
+#define _mm_mask_max_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_maxss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_max_round_ss(__U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_maxss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) __U,__R); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_max_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_maxsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_max_sd(__mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_maxsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_max_round_sd(__A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_maxsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm_mask_max_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_maxsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_max_round_sd(__U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_maxsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) __U,__R); })
+
+static __inline __m512i
+__DEFAULT_FN_ATTRS
+_mm512_max_epi32(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_max_epu32(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxud512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_max_epi64(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxsq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_max_epu64(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmaxuq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_min_pd(__m512d __A, __m512d __B)
+{
+ return (__m512d) __builtin_ia32_minpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_min_ps(__m512 __A, __m512 __B)
+{
+ return (__m512) __builtin_ia32_minps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_min_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_minss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_min_ss(__mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_minss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_min_round_ss(__A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_minss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
+
+#define _mm_mask_min_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_minss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_min_round_ss(__U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_minss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) __U,__R); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_min_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_minsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_min_sd(__mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_minsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_min_round_sd(__A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_minsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm_mask_min_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_minsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_min_round_sd(__U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_minsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) __U,__R); })
+
+static __inline __m512i
+__DEFAULT_FN_ATTRS
+_mm512_min_epi32(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsd512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_min_epu32(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminud512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_min_epi64(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminsq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_min_epu64(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pminuq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_mul_epi32(__m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_pmuldq512_mask ((__v16si) __X,
+ (__v16si) __Y,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mul_epi32 (__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_pmuldq512_mask ((__v16si) __X,
+ (__v16si) __Y,
+ (__v8di) __W, __M);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mul_epi32 (__mmask8 __M, __m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_pmuldq512_mask ((__v16si) __X,
+ (__v16si) __Y,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ __M);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_mul_epu32(__m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_pmuludq512_mask ((__v16si) __X,
+ (__v16si) __Y,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mul_epu32 (__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_pmuludq512_mask ((__v16si) __X,
+ (__v16si) __Y,
+ (__v8di) __W, __M);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mul_epu32 (__mmask8 __M, __m512i __X, __m512i __Y)
+{
+ return (__m512i) __builtin_ia32_pmuludq512_mask ((__v16si) __X,
+ (__v16si) __Y,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ __M);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_mullo_epi32 (__m512i __A, __m512i __B)
+{
+ return (__m512i) ((__v16si) __A * (__v16si) __B);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mullo_epi32 (__mmask16 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulld512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ __M);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mullo_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulld512_mask ((__v16si) __A,
+ (__v16si) __B,
+ (__v16si) __W, __M);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_sqrt_pd(__m512d __a)
+{
+ return (__m512d)__builtin_ia32_sqrtpd512_mask((__v8df)__a,
+ (__v8df) _mm512_setzero_pd (),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_sqrt_ps(__m512 __a)
+{
+ return (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)__a,
+ (__v16sf) _mm512_setzero_ps (),
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_rsqrt14_pd(__m512d __A)
+{
+ return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1);}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_rsqrt14_ps(__m512 __A)
+{
+ return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_rsqrt14_ss(__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_rsqrt14ss ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_rsqrt14_sd(__m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_rsqrt14sd ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_rcp14_pd(__m512d __A)
+{
+ return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_rcp14_ps(__m512 __A)
+{
+ return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) -1);
+}
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_rcp14_ss(__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_rcp14ss ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_rcp14_sd(__m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_rcp14sd ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_floor_ps(__m512 __A)
+{
+ return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
+ _MM_FROUND_FLOOR,
+ (__v16sf) __A, -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_floor_pd(__m512d __A)
+{
+ return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
+ _MM_FROUND_FLOOR,
+ (__v8df) __A, -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_ceil_ps(__m512 __A)
+{
+ return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
+ _MM_FROUND_CEIL,
+ (__v16sf) __A, -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_ceil_pd(__m512d __A)
+{
+ return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
+ _MM_FROUND_CEIL,
+ (__v8df) __A, -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_abs_epi64(__m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsq512_mask ((__v8di) __A,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) -1);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_abs_epi32(__m512i __A)
+{
+ return (__m512i) __builtin_ia32_pabsd512_mask ((__v16si) __A,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_add_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_addss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_add_ss(__mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_addss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_add_round_ss(__A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_addss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
+
+#define _mm_mask_add_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_addss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_add_round_ss(__U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_addss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) __U,__R); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_add_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_addsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_add_sd(__mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_addsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+#define _mm_add_round_sd(__A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_addsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm_mask_add_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_addsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_add_round_sd(__U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_addsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) __U,__R); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_add_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_addpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_add_pd(__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_addpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) _mm512_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_add_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_addps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_add_ps(__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_addps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) _mm512_setzero_ps (),
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_add_round_pd(__A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_addpd512_mask ((__v8df) __A, (__v8df) __B, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm512_mask_add_round_pd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_addpd512_mask((__v8df) __A, (__v8df) __B, \
+ (__v8df) __W, (__mmask8) __U, __R); })
+
+#define _mm512_maskz_add_round_pd(__U, __A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_addpd512_mask ((__v8df) __A, (__v8df) __B, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R); })
+
+#define _mm512_add_round_ps(__A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_addps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) _mm512_setzero_ps(), (__mmask16) -1, __R); })
+
+#define _mm512_mask_add_round_ps(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_addps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) __W, (__mmask16)__U, __R); })
+
+#define _mm512_maskz_add_round_ps(__U, __A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_addps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) _mm512_setzero_ps(), (__mmask16)__U, __R); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_sub_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_subss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_sub_ss(__mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_subss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+#define _mm_sub_round_ss(__A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_subss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
+
+#define _mm_mask_sub_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_subss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_sub_round_ss(__U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_subss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) __U,__R); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_sub_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_subsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_sub_sd(__mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_subsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_sub_round_sd(__A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_subsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm_mask_sub_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_subsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_sub_round_sd(__U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_subsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) __U,__R); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_sub_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_subpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_sub_pd(__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_subpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_sub_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_subps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_sub_ps(__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_subps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_sub_round_pd(__A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_subpd512_mask ((__v8df) __A, (__v8df) __B,\
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm512_mask_sub_round_pd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_subpd512_mask ((__v8df) __A, (__v8df) __B, \
+ (__v8df) __W, (__mmask8) __U, __R); })
+
+#define _mm512_maskz_sub_round_pd(__U, __A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_subpd512_mask ((__v8df) __A, (__v8df) __B, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R);})
+
+#define _mm512_sub_round_ps(__A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_subps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) _mm512_setzero_ps (), (__mmask16) -1, __R);})
+
+#define _mm512_mask_sub_round_ps(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_subps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) __W, (__mmask16) __U, __R); });
+
+#define _mm512_maskz_sub_round_ps(__U, __A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_subps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) _mm512_setzero_ps (), (__mmask16) __U, __R);});
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_mul_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_mulss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_mul_ss(__mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_mulss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+#define _mm_mul_round_ss(__A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_mulss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
+
+#define _mm_mask_mul_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_mulss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_mul_round_ss(__U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_mulss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) __U,__R); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_mul_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_mulsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_mul_sd(__mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_mulsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mul_round_sd(__A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_mulsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm_mask_mul_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_mulsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_mul_round_sd(__U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_mulsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) __U,__R); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_mul_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_mulpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_mul_pd(__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_mulpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_mul_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_mulps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_mul_ps(__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_mulps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mul_round_pd(__A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_mulpd512_mask ((__v8df) __A, (__v8df) __B,\
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm512_mask_mul_round_pd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_mulpd512_mask ((__v8df) __A, (__v8df) __B, \
+ (__v8df) __W, (__mmask8) __U, __R); })
+
+#define _mm512_maskz_mul_round_pd(__U, __A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_mulpd512_mask ((__v8df) __A, (__v8df) __B, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R);})
+
+#define _mm512_mul_round_ps(__A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_mulps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) _mm512_setzero_ps (), (__mmask16) -1, __R);})
+
+#define _mm512_mask_mul_round_ps(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_mulps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) __W, (__mmask16) __U, __R); });
+
+#define _mm512_maskz_mul_round_ps(__U, __A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_mulps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) _mm512_setzero_ps (), (__mmask16) __U, __R);});
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_div_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_divss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_div_ss(__mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_divss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_div_round_ss(__A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_divss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
+
+#define _mm_mask_div_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_divss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_div_round_ss(__U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_divss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) __U,__R); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_div_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_divsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_div_sd(__mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_divsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_div_round_sd(__A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_divsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm_mask_div_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_divsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_div_round_sd(__U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_divsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) __U,__R); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_div_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_divpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_div_pd(__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_divpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_div_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_div_ps(__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_div_round_pd(__A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_divpd512_mask ((__v8df) __A, (__v8df) __B,\
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm512_mask_div_round_pd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_divpd512_mask ((__v8df) __A, (__v8df) __B, \
+ (__v8df) __W, (__mmask8) __U, __R); })
+
+#define _mm512_maskz_div_round_pd(__U, __A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_divpd512_mask ((__v8df) __A, (__v8df) __B, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R);})
+
+#define _mm512_div_round_ps(__A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) _mm512_setzero_ps (), (__mmask16) -1, __R);})
+
+#define _mm512_mask_div_round_ps(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) __W, (__mmask16) __U, __R); });
+
+#define _mm512_maskz_div_round_ps(__U, __A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) _mm512_setzero_ps (), (__mmask16) __U, __R);});
+
+#define _mm512_roundscale_ps(A, B) __extension__ ({ \
+ (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(A), (B), (__v16sf)(A), \
+ -1, _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_roundscale_pd(A, B) __extension__ ({ \
+ (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(A), (B), (__v8df)(A), \
+ -1, _MM_FROUND_CUR_DIRECTION); })
+
+#define _mm512_fmadd_round_pd(A, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) -1, (R)); })
+
+
+#define _mm512_mask_fmadd_round_pd(A, U, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+#define _mm512_mask3_fmadd_round_pd(A, B, C, U, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddpd512_mask3 ((__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+#define _mm512_maskz_fmadd_round_pd(U, A, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+#define _mm512_fmsub_round_pd(A, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) (A), \
+ (__v8df) (B), -(__v8df) (C), \
+ (__mmask8) -1, (R)); })
+
+
+#define _mm512_mask_fmsub_round_pd(A, U, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) (A), \
+ (__v8df) (B), -(__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+#define _mm512_maskz_fmsub_round_pd(U, A, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) (A), \
+ (__v8df) (B), -(__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+#define _mm512_fnmadd_round_pd(A, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddpd512_mask (-(__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) -1, (R)); })
+
+
+#define _mm512_mask3_fnmadd_round_pd(A, B, C, U, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddpd512_mask3 (-(__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+#define _mm512_maskz_fnmadd_round_pd(U, A, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+#define _mm512_fnmsub_round_pd(A, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddpd512_mask (-(__v8df) (A), \
+ (__v8df) (B), -(__v8df) (C), \
+ (__mmask8) -1, (R)); })
+
+
+#define _mm512_maskz_fnmsub_round_pd(U, A, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) (A), \
+ (__v8df) (B), -(__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_fmadd_pd(__m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_fmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask3_fmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask3 ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_fmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_fmsub_pd(__m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_fmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_fmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask (-(__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask3_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask3 (-(__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_fnmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask (-(__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_fnmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_fmadd_round_ps(A, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) -1, (R)); })
+
+
+#define _mm512_mask_fmadd_round_ps(A, U, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+#define _mm512_mask3_fmadd_round_ps(A, B, C, U, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddps512_mask3 ((__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+#define _mm512_maskz_fmadd_round_ps(U, A, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+#define _mm512_fmsub_round_ps(A, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) (A), \
+ (__v16sf) (B), -(__v16sf) (C), \
+ (__mmask16) -1, (R)); })
+
+
+#define _mm512_mask_fmsub_round_ps(A, U, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) (A), \
+ (__v16sf) (B), -(__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+#define _mm512_maskz_fmsub_round_ps(U, A, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) (A), \
+ (__v16sf) (B), -(__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+#define _mm512_fnmadd_round_ps(A, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddps512_mask (-(__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) -1, (R)); })
+
+
+#define _mm512_mask3_fnmadd_round_ps(A, B, C, U, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddps512_mask3 (-(__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+#define _mm512_maskz_fnmadd_round_ps(U, A, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+#define _mm512_fnmsub_round_ps(A, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddps512_mask (-(__v16sf) (A), \
+ (__v16sf) (B), -(__v16sf) (C), \
+ (__mmask16) -1, (R)); })
+
+
+#define _mm512_maskz_fnmsub_round_ps(U, A, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) (A), \
+ (__v16sf) (B), -(__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_fmadd_ps(__m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_fmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask3_fmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_mask3 ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_fmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_fmsub_ps(__m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_fmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_fmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_mask (-(__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask3_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_mask3 (-(__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_fnmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_mask (-(__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_fnmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_fmaddsub_round_pd(A, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) -1, (R)); })
+
+
+#define _mm512_mask_fmaddsub_round_pd(A, U, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+#define _mm512_mask3_fmaddsub_round_pd(A, B, C, U, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddsubpd512_mask3 ((__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+#define _mm512_maskz_fmaddsub_round_pd(U, A, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+#define _mm512_fmsubadd_round_pd(A, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) (A), \
+ (__v8df) (B), -(__v8df) (C), \
+ (__mmask8) -1, (R)); })
+
+
+#define _mm512_mask_fmsubadd_round_pd(A, U, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) (A), \
+ (__v8df) (B), -(__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+#define _mm512_maskz_fmsubadd_round_pd(U, A, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) (A), \
+ (__v8df) (B), -(__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_fmaddsub_pd(__m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_fmaddsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask3_fmaddsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
+{
+ return (__m512d) __builtin_ia32_vfmaddsubpd512_mask3 ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_fmaddsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_fmsubadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_fmsubadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_fmaddsub_round_ps(A, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) -1, (R)); })
+
+
+#define _mm512_mask_fmaddsub_round_ps(A, U, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+#define _mm512_mask3_fmaddsub_round_ps(A, B, C, U, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddsubps512_mask3 ((__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+#define _mm512_maskz_fmaddsub_round_ps(U, A, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+#define _mm512_fmsubadd_round_ps(A, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) (A), \
+ (__v16sf) (B), -(__v16sf) (C), \
+ (__mmask16) -1, (R)); })
+
+
+#define _mm512_mask_fmsubadd_round_ps(A, U, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) (A), \
+ (__v16sf) (B), -(__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+#define _mm512_maskz_fmsubadd_round_ps(U, A, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) (A), \
+ (__v16sf) (B), -(__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_fmaddsub_ps(__m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_fmaddsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask3_fmaddsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
+{
+ return (__m512) __builtin_ia32_vfmaddsubps512_mask3 ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_fmaddsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_fmsubadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_fmsubadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask3_fmsub_round_pd(A, B, C, U, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmsubpd512_mask3 ((__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask3_fmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
+{
+ return (__m512d) __builtin_ia32_vfmsubpd512_mask3 ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask3_fmsub_round_ps(A, B, C, U, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmsubps512_mask3 ((__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask3_fmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
+{
+ return (__m512) __builtin_ia32_vfmsubps512_mask3 ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask3_fmsubadd_round_pd(A, B, C, U, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfmsubaddpd512_mask3 ((__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask3_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
+{
+ return (__m512d) __builtin_ia32_vfmsubaddpd512_mask3 ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask3_fmsubadd_round_ps(A, B, C, U, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfmsubaddps512_mask3 ((__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask3_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
+{
+ return (__m512) __builtin_ia32_vfmsubaddps512_mask3 ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask_fnmadd_round_pd(A, U, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfnmaddpd512_mask ((__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_fnmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfnmaddpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask_fnmadd_round_ps(A, U, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfnmaddps512_mask ((__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_fnmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfnmaddps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask_fnmsub_round_pd(A, U, B, C, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfnmsubpd512_mask ((__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+#define _mm512_mask3_fnmsub_round_pd(A, B, C, U, R) __extension__ ({ \
+ (__m512d) __builtin_ia32_vfnmsubpd512_mask3 ((__v8df) (A), \
+ (__v8df) (B), (__v8df) (C), \
+ (__mmask8) (U), (R)); })
+
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_fnmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
+{
+ return (__m512d) __builtin_ia32_vfnmsubpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask3_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
+{
+ return (__m512d) __builtin_ia32_vfnmsubpd512_mask3 ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask_fnmsub_round_ps(A, U, B, C, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfnmsubps512_mask ((__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+#define _mm512_mask3_fnmsub_round_ps(A, B, C, U, R) __extension__ ({ \
+ (__m512) __builtin_ia32_vfnmsubps512_mask3 ((__v16sf) (A), \
+ (__v16sf) (B), (__v16sf) (C), \
+ (__mmask16) (U), (R)); })
+
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_fnmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
+{
+ return (__m512) __builtin_ia32_vfnmsubps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask3_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
+{
+ return (__m512) __builtin_ia32_vfnmsubps512_mask3 ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+
+
+/* Vector permutations */
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_permutex2var_epi32(__m512i __A, __m512i __I, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermt2vard512_mask ((__v16si) __I
+ /* idx */ ,
+ (__v16si) __A,
+ (__v16si) __B,
+ (__mmask16) -1);
+}
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_permutex2var_epi64(__m512i __A, __m512i __I, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_vpermt2varq512_mask ((__v8di) __I
+ /* idx */ ,
+ (__v8di) __A,
+ (__v8di) __B,
+ (__mmask8) -1);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_permutex2var_pd(__m512d __A, __m512i __I, __m512d __B)
+{
+ return (__m512d) __builtin_ia32_vpermt2varpd512_mask ((__v8di) __I
+ /* idx */ ,
+ (__v8df) __A,
+ (__v8df) __B,
+ (__mmask8) -1);
+}
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_permutex2var_ps(__m512 __A, __m512i __I, __m512 __B)
+{
+ return (__m512) __builtin_ia32_vpermt2varps512_mask ((__v16si) __I
+ /* idx */ ,
+ (__v16sf) __A,
+ (__v16sf) __B,
+ (__mmask16) -1);
+}
+
+#define _mm512_alignr_epi64(A, B, I) __extension__ ({ \
+ (__m512i)__builtin_ia32_alignq512_mask((__v8di)(__m512i)(A), \
+ (__v8di)(__m512i)(B), \
+ (I), (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)-1); })
+
+#define _mm512_alignr_epi32(A, B, I) __extension__ ({ \
+ (__m512i)__builtin_ia32_alignd512_mask((__v16si)(__m512i)(A), \
+ (__v16si)(__m512i)(B), \
+ (I), (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)-1); })
+
+/* Vector Extract */
+
+#define _mm512_extractf64x4_pd(A, I) __extension__ ({ \
+ (__m256d) \
+ __builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), \
+ (I), \
+ (__v4df)_mm256_setzero_si256(), \
+ (__mmask8) -1); })
+
+#define _mm512_extractf32x4_ps(A, I) __extension__ ({ \
+ (__m128) \
+ __builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), \
+ (I), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8) -1); })
+
+/* Vector Blend */
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_blend_pd(__mmask8 __U, __m512d __A, __m512d __W)
+{
+ return (__m512d) __builtin_ia32_blendmpd_512_mask ((__v8df) __A,
+ (__v8df) __W,
+ (__mmask8) __U);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_blend_ps(__mmask16 __U, __m512 __A, __m512 __W)
+{
+ return (__m512) __builtin_ia32_blendmps_512_mask ((__v16sf) __A,
+ (__v16sf) __W,
+ (__mmask16) __U);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_blend_epi64(__mmask8 __U, __m512i __A, __m512i __W)
+{
+ return (__m512i) __builtin_ia32_blendmq_512_mask ((__v8di) __A,
+ (__v8di) __W,
+ (__mmask8) __U);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W)
+{
+ return (__m512i) __builtin_ia32_blendmd_512_mask ((__v16si) __A,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+/* Compare */
+
+#define _mm512_cmp_round_ps_mask(A, B, P, R) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (P), (__mmask16)-1, (R)); })
+
+#define _mm512_mask_cmp_round_ps_mask(U, A, B, P, R) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (P), (__mmask16)(U), (R)); })
+
+#define _mm512_cmp_ps_mask(A, B, P) \
+ _mm512_cmp_round_ps_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_mask_cmp_ps_mask(U, A, B, P) \
+ _mm512_mask_cmp_round_ps_mask((U), (A), (B), (P), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_cmp_round_pd_mask(A, B, P, R) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (P), (__mmask8)-1, (R)); })
+
+#define _mm512_mask_cmp_round_pd_mask(U, A, B, P, R) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (P), (__mmask8)(U), (R)); })
+
+#define _mm512_cmp_pd_mask(A, B, P) \
+ _mm512_cmp_round_pd_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_mask_cmp_pd_mask(U, A, B, P) \
+ _mm512_mask_cmp_round_pd_mask((U), (A), (B), (P), _MM_FROUND_CUR_DIRECTION)
+
+/* Conversion */
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_cvttps_epu32(__m512 __A)
+{
+ return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundepi32_ps(A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, (R)); })
+
+#define _mm512_cvt_roundepu32_ps(A, R) __extension__ ({ \
+ (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, (R)); })
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_cvtepi32_pd(__m256i __A)
+{
+ return (__m512d) __builtin_ia32_cvtdq2pd512_mask ((__v8si) __A,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_cvtepu32_pd(__m256i __A)
+{
+ return (__m512d) __builtin_ia32_cvtudq2pd512_mask ((__v8si) __A,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1);
+}
+
+#define _mm512_cvt_roundpd_ps(A, R) __extension__ ({ \
+ (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(A), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)-1, (R)); })
+
+#define _mm512_cvtps_ph(A, I) __extension__ ({ \
+ (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(A), (I), \
+ (__v16hi)_mm256_setzero_si256(), \
+ -1); })
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_cvtph_ps(__m256i __A)
+{
+ return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_cvttps_epi32(__m512 __a)
+{
+ return (__m512i)
+ __builtin_ia32_cvttps2dq512_mask((__v16sf) __a,
+ (__v16si) _mm512_setzero_si512 (),
+ (__mmask16) -1, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm512_cvttpd_epi32(__m512d __a)
+{
+ return (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df) __a,
+ (__v8si)_mm256_setzero_si256(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvtt_roundpd_epi32(A, R) __extension__ ({ \
+ (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(A), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)-1, (R)); })
+
+#define _mm512_cvtt_roundps_epi32(A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(A), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)-1, (R)); })
+
+#define _mm512_cvt_roundps_epi32(A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(A), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)-1, (R)); })
+
+#define _mm512_cvt_roundpd_epi32(A, R) __extension__ ({ \
+ (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(A), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)-1, (R)); })
+
+#define _mm512_cvt_roundps_epu32(A, R) __extension__ ({ \
+ (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(A), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)-1, (R)); })
+
+#define _mm512_cvt_roundpd_epu32(A, R) __extension__ ({ \
+ (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(A), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8) -1, (R)); })
+
+/* Unpack and Interleave */
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_unpackhi_pd(__m512d __a, __m512d __b)
+{
+ return __builtin_shufflevector(__a, __b, 1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_unpacklo_pd(__m512d __a, __m512d __b)
+{
+ return __builtin_shufflevector(__a, __b, 0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_unpackhi_ps(__m512 __a, __m512 __b)
+{
+ return __builtin_shufflevector(__a, __b,
+ 2, 18, 3, 19,
+ 2+4, 18+4, 3+4, 19+4,
+ 2+8, 18+8, 3+8, 19+8,
+ 2+12, 18+12, 3+12, 19+12);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_unpacklo_ps(__m512 __a, __m512 __b)
+{
+ return __builtin_shufflevector(__a, __b,
+ 0, 16, 1, 17,
+ 0+4, 16+4, 1+4, 17+4,
+ 0+8, 16+8, 1+8, 17+8,
+ 0+12, 16+12, 1+12, 17+12);
+}
+
+/* Bit Test */
+
+static __inline __mmask16 __DEFAULT_FN_ATTRS
+_mm512_test_epi32_mask(__m512i __A, __m512i __B)
+{
+ return (__mmask16) __builtin_ia32_ptestmd512 ((__v16si) __A,
+ (__v16si) __B,
+ (__mmask16) -1);
+}
+
+static __inline __mmask8 __DEFAULT_FN_ATTRS
+_mm512_test_epi64_mask(__m512i __A, __m512i __B)
+{
+ return (__mmask8) __builtin_ia32_ptestmq512 ((__v8di) __A,
+ (__v8di) __B,
+ (__mmask8) -1);
+}
+
+/* SIMD load ops */
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_loadu_epi32(__mmask16 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_loaddqusi512_mask ((const __v16si *)__P,
+ (__v16si)
+ _mm512_setzero_si512 (),
+ (__mmask16) __U);
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_loadu_epi64(__mmask8 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_loaddqudi512_mask ((const __v8di *)__P,
+ (__v8di)
+ _mm512_setzero_si512 (),
+ (__mmask8) __U);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_loadu_ps(__mmask16 __U, void const *__P)
+{
+ return (__m512) __builtin_ia32_loadups512_mask ((const __v16sf *)__P,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_loadu_pd(__mmask8 __U, void const *__P)
+{
+ return (__m512d) __builtin_ia32_loadupd512_mask ((const __v8df *)__P,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_load_ps(__mmask16 __U, void const *__P)
+{
+ return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *)__P,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_load_pd(__mmask8 __U, void const *__P)
+{
+ return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *)__P,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_loadu_pd(double const *__p)
+{
+ struct __loadu_pd {
+ __m512d __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_pd*)__p)->__v;
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_loadu_ps(float const *__p)
+{
+ struct __loadu_ps {
+ __m512 __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_ps*)__p)->__v;
+}
+
+static __inline __m512 __DEFAULT_FN_ATTRS
+_mm512_load_ps(double const *__p)
+{
+ return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *)__p,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) -1);
+}
+
+static __inline __m512d __DEFAULT_FN_ATTRS
+_mm512_load_pd(float const *__p)
+{
+ return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *)__p,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1);
+}
+
+/* SIMD store ops */
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_mask_storeu_epi64(void *__P, __mmask8 __U, __m512i __A)
+{
+ __builtin_ia32_storedqudi512_mask ((__v8di *)__P, (__v8di) __A,
+ (__mmask8) __U);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_mask_storeu_epi32(void *__P, __mmask16 __U, __m512i __A)
+{
+ __builtin_ia32_storedqusi512_mask ((__v16si *)__P, (__v16si) __A,
+ (__mmask16) __U);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_mask_storeu_pd(void *__P, __mmask8 __U, __m512d __A)
+{
+ __builtin_ia32_storeupd512_mask ((__v8df *)__P, (__v8df) __A, (__mmask8) __U);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_storeu_pd(void *__P, __m512d __A)
+{
+ __builtin_ia32_storeupd512_mask((__v8df *)__P, (__v8df)__A, (__mmask8)-1);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_mask_storeu_ps(void *__P, __mmask16 __U, __m512 __A)
+{
+ __builtin_ia32_storeups512_mask ((__v16sf *)__P, (__v16sf) __A,
+ (__mmask16) __U);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_storeu_ps(void *__P, __m512 __A)
+{
+ __builtin_ia32_storeups512_mask((__v16sf *)__P, (__v16sf)__A, (__mmask16)-1);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_mask_store_pd(void *__P, __mmask8 __U, __m512d __A)
+{
+ __builtin_ia32_storeapd512_mask ((__v8df *)__P, (__v8df) __A, (__mmask8) __U);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_store_pd(void *__P, __m512d __A)
+{
+ *(__m512d*)__P = __A;
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_mask_store_ps(void *__P, __mmask16 __U, __m512 __A)
+{
+ __builtin_ia32_storeaps512_mask ((__v16sf *)__P, (__v16sf) __A,
+ (__mmask16) __U);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm512_store_ps(void *__P, __m512 __A)
+{
+ *(__m512*)__P = __A;
+}
+
+/* Mask ops */
+
+static __inline __mmask16 __DEFAULT_FN_ATTRS
+_mm512_knot(__mmask16 __M)
+{
+ return __builtin_ia32_knothi(__M);
+}
+
+/* Integer compare */
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmpeq_epi32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_pcmpeqd512_mask((__v16si)__a, (__v16si)__b,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpeq_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_pcmpeqd512_mask((__v16si)__a, (__v16si)__b,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmpeq_epu32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 0,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpeq_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpeq_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqq512_mask((__v8di)__a, (__v8di)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmpeq_epi64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqq512_mask((__v8di)__a, (__v8di)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmpeq_epu64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 0,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpeq_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmpge_epi32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 5,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpge_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmpge_epu32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 5,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpge_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmpge_epi64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpge_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmpge_epu64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpge_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmpgt_epi32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_pcmpgtd512_mask((__v16si)__a, (__v16si)__b,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpgt_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_pcmpgtd512_mask((__v16si)__a, (__v16si)__b,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmpgt_epu32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 6,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpgt_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpgt_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtq512_mask((__v8di)__a, (__v8di)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmpgt_epi64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtq512_mask((__v8di)__a, (__v8di)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmpgt_epu64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 6,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpgt_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmple_epi32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 2,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmple_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmple_epu32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 2,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmple_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmple_epi64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmple_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmple_epu64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmple_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmplt_epi32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 1,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmplt_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmplt_epu32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 1,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmplt_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmplt_epi64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmplt_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmplt_epu64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmplt_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmpneq_epi32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 4,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpneq_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_cmpneq_epu32_mask(__m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 4,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpneq_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b) {
+ return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmpneq_epi64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpneq_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_cmpneq_epu64_mask(__m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm512_mask_cmpneq_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 4,
+ __u);
+}
+
+#define _mm512_cmp_epi32_mask(a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
+ (__v16si)(__m512i)(b), (p), \
+ (__mmask16)-1); })
+
+#define _mm512_cmp_epu32_mask(a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
+ (__v16si)(__m512i)(b), (p), \
+ (__mmask16)-1); })
+
+#define _mm512_cmp_epi64_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
+ (__v8di)(__m512i)(b), (p), \
+ (__mmask8)-1); })
+
+#define _mm512_cmp_epu64_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
+ (__v8di)(__m512i)(b), (p), \
+ (__mmask8)-1); })
+
+#define _mm512_mask_cmp_epi32_mask(m, a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
+ (__v16si)(__m512i)(b), (p), \
+ (__mmask16)(m)); })
+
+#define _mm512_mask_cmp_epu32_mask(m, a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
+ (__v16si)(__m512i)(b), (p), \
+ (__mmask16)(m)); })
+
+#define _mm512_mask_cmp_epi64_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
+ (__v8di)(__m512i)(b), (p), \
+ (__mmask8)(m)); })
+
+#define _mm512_mask_cmp_epu64_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
+ (__v8di)(__m512i)(b), (p), \
+ (__mmask8)(m)); })
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif // __AVX512FINTRIN_H
diff --git a/contrib/llvm/tools/clang/lib/Headers/avx512vlbwintrin.h b/contrib/llvm/tools/clang/lib/Headers/avx512vlbwintrin.h
new file mode 100644
index 0000000..b4542d6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/avx512vlbwintrin.h
@@ -0,0 +1,2336 @@
+/*===---- avx512vlbwintrin.h - AVX512VL and AVX512BW intrinsics ------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512vlbwintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512VLBWINTRIN_H
+#define __AVX512VLBWINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512bw")))
+
+/* Integer compare */
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmpeq_epi8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_pcmpeqb128_mask((__v16qi)__a, (__v16qi)__b,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmpeq_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_pcmpeqb128_mask((__v16qi)__a, (__v16qi)__b,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmpeq_epu8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 0,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmpeq_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epi8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_pcmpeqb256_mask((__v32qi)__a, (__v32qi)__b,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpeq_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_pcmpeqb256_mask((__v32qi)__a, (__v32qi)__b,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epu8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 0,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpeq_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpeq_epi16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqw128_mask((__v8hi)__a, (__v8hi)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpeq_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqw128_mask((__v8hi)__a, (__v8hi)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpeq_epu16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 0,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpeq_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epi16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_pcmpeqw256_mask((__v16hi)__a, (__v16hi)__b,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpeq_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_pcmpeqw256_mask((__v16hi)__a, (__v16hi)__b,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epu16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 0,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpeq_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmpge_epi8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 5,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmpge_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmpge_epu8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 5,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmpge_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmpge_epi8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 5,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpge_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmpge_epu8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 5,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpge_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpge_epi16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpge_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpge_epu16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpge_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmpge_epi16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 5,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpge_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmpge_epu16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 5,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpge_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmpgt_epi8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_pcmpgtb128_mask((__v16qi)__a, (__v16qi)__b,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmpgt_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_pcmpgtb128_mask((__v16qi)__a, (__v16qi)__b,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmpgt_epu8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 6,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmpgt_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epi8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_pcmpgtb256_mask((__v32qi)__a, (__v32qi)__b,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpgt_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_pcmpgtb256_mask((__v32qi)__a, (__v32qi)__b,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epu8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 6,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpgt_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpgt_epi16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtw128_mask((__v8hi)__a, (__v8hi)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpgt_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtw128_mask((__v8hi)__a, (__v8hi)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpgt_epu16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 6,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpgt_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epi16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_pcmpgtw256_mask((__v16hi)__a, (__v16hi)__b,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpgt_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_pcmpgtw256_mask((__v16hi)__a, (__v16hi)__b,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epu16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 6,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpgt_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmple_epi8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 2,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmple_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmple_epu8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 2,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmple_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmple_epi8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 2,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmple_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmple_epu8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 2,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmple_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmple_epi16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmple_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmple_epu16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmple_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmple_epi16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 2,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmple_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmple_epu16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 2,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmple_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmplt_epi8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 1,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmplt_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmplt_epu8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 1,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmplt_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmplt_epi8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 1,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmplt_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmplt_epu8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 1,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmplt_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmplt_epi16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmplt_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmplt_epu16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmplt_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmplt_epi16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 1,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmplt_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmplt_epu16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 1,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmplt_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmpneq_epi8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 4,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmpneq_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_cmpneq_epu8_mask(__m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 4,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm_mask_cmpneq_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
+ return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmpneq_epi8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 4,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpneq_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_cmpneq_epu8_mask(__m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 4,
+ (__mmask32)-1);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpneq_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) {
+ return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpneq_epi16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpneq_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpneq_epu16_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpneq_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmpneq_epi16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 4,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpneq_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_cmpneq_epu16_mask(__m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 4,
+ (__mmask16)-1);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpneq_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
+ return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 4,
+ __u);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_add_epi8 (__m256i __W, __mmask32 __U, __m256i __A, __m256i __B){
+ return (__m256i) __builtin_ia32_paddb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_add_epi8 (__mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_paddb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi)
+ _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_add_epi16 (__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_paddw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_add_epi16 (__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_paddw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sub_epi8 (__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_psubb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sub_epi8 (__mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_psubb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi)
+ _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sub_epi16 (__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_psubw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sub_epi16 (__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_psubw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_add_epi8 (__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_paddb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_add_epi8 (__mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_paddb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi)
+ _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_add_epi16 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_paddw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_add_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_paddw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sub_epi8 (__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_psubb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sub_epi8 (__mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_psubb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi)
+ _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sub_epi16 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_psubw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sub_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_psubw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mullo_epi16 (__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmullw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mullo_epi16 (__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmullw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mullo_epi16 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmullw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mullo_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmullw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_blend_epi8 (__mmask16 __U, __m128i __A, __m128i __W)
+{
+ return (__m128i) __builtin_ia32_blendmb_128_mask ((__v16qi) __A,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_blend_epi8 (__mmask32 __U, __m256i __A, __m256i __W)
+{
+ return (__m256i) __builtin_ia32_blendmb_256_mask ((__v32qi) __A,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_blend_epi16 (__mmask8 __U, __m128i __A, __m128i __W)
+{
+ return (__m128i) __builtin_ia32_blendmw_128_mask ((__v8hi) __A,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_blend_epi16 (__mmask16 __U, __m256i __A, __m256i __W)
+{
+ return (__m256i) __builtin_ia32_blendmw_256_mask ((__v16hi) __A,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_abs_epi8 (__m128i __W, __mmask16 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pabsb128_mask ((__v16qi) __A,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_abs_epi8 (__mmask16 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pabsb128_mask ((__v16qi) __A,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_abs_epi8 (__m256i __W, __mmask32 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_pabsb256_mask ((__v32qi) __A,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_abs_epi8 (__mmask32 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_pabsb256_mask ((__v32qi) __A,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_abs_epi16 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pabsw128_mask ((__v8hi) __A,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_abs_epi16 (__mmask8 __U, __m128i __A)
+{
+ return (__m128i) __builtin_ia32_pabsw128_mask ((__v8hi) __A,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_abs_epi16 (__m256i __W, __mmask16 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_pabsw256_mask ((__v16hi) __A,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_abs_epi16 (__mmask16 __U, __m256i __A)
+{
+ return (__m256i) __builtin_ia32_pabsw256_mask ((__v16hi) __A,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_packs_epi32 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_packssdw128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v8hi) _mm_setzero_si128 (), __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_packs_epi32 (__m128i __W, __mmask16 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_packssdw128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v8hi) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_packs_epi32 (__mmask16 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_packssdw256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_packs_epi32 (__m256i __W, __mmask16 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_packssdw256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v16hi) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_packs_epi16 (__mmask16 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_packsswb128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_packs_epi16 (__m128i __W, __mmask16 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_packsswb128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v16qi) __W,
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_packs_epi16 (__mmask32 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_packsswb256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_packs_epi16 (__m256i __W, __mmask32 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_packsswb256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v32qi) __W,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_packus_epi32 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_packusdw128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_packus_epi32 (__m128i __W, __mmask16 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_packusdw128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v8hi) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_packus_epi32 (__mmask16 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_packusdw256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_packus_epi32 (__m256i __W, __mmask16 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_packusdw256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v16hi) __W,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_packus_epi16 (__mmask16 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_packuswb128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_packus_epi16 (__m128i __W, __mmask16 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_packuswb128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v16qi) __W,
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_packus_epi16 (__mmask32 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_packuswb256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_packus_epi16 (__m256i __W, __mmask32 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_packuswb256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v32qi) __W,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_adds_epi8 (__m128i __W, __mmask16 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddsb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_adds_epi8 (__mmask16 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddsb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_adds_epi8 (__m256i __W, __mmask32 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddsb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_adds_epi8 (__mmask32 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddsb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_adds_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddsw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_adds_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddsw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_adds_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddsw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_adds_epi16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddsw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_adds_epu8 (__m128i __W, __mmask16 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddusb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_adds_epu8 (__mmask16 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddusb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_adds_epu8 (__m256i __W, __mmask32 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddusb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_adds_epu8 (__mmask32 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddusb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_adds_epu16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddusw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_adds_epu16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddusw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_adds_epu16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddusw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_adds_epu16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddusw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_avg_epu8 (__m128i __W, __mmask16 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pavgb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_avg_epu8 (__mmask16 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pavgb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_avg_epu8 (__m256i __W, __mmask32 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pavgb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_avg_epu8 (__mmask32 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pavgb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_avg_epu16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pavgw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_avg_epu16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pavgw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_avg_epu16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pavgw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_avg_epu16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pavgw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_max_epi8 (__mmask16 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmaxsb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_max_epi8 (__m128i __W, __mmask16 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmaxsb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_max_epi8 (__mmask32 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmaxsb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_max_epi8 (__m256i __W, __mmask32 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmaxsb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_max_epi16 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmaxsw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_max_epi16 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmaxsw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_max_epi16 (__mmask16 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmaxsw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_max_epi16 (__m256i __W, __mmask16 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmaxsw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_max_epu8 (__mmask16 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmaxub128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_max_epu8 (__m128i __W, __mmask16 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmaxub128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_max_epu8 (__mmask32 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmaxub256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_max_epu8 (__m256i __W, __mmask32 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmaxub256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_max_epu16 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmaxuw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_max_epu16 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmaxuw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_max_epu16 (__mmask16 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmaxuw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_max_epu16 (__m256i __W, __mmask16 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmaxuw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_min_epi8 (__mmask16 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pminsb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_min_epi8 (__m128i __W, __mmask16 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pminsb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_min_epi8 (__mmask32 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pminsb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_min_epi8 (__m256i __W, __mmask32 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pminsb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_min_epi16 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pminsw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_min_epi16 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pminsw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_min_epi16 (__mmask16 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pminsw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_min_epi16 (__m256i __W, __mmask16 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pminsw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_min_epu8 (__mmask16 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pminub128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_min_epu8 (__m128i __W, __mmask16 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pminub128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_min_epu8 (__mmask32 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pminub256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_min_epu8 (__m256i __W, __mmask32 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pminub256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_min_epu16 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pminuw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_min_epu16 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pminuw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_min_epu16 (__mmask16 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pminuw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_min_epu16 (__m256i __W, __mmask16 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pminuw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_shuffle_epi8 (__m128i __W, __mmask16 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pshufb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_shuffle_epi8 (__mmask16 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pshufb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_shuffle_epi8 (__m256i __W, __mmask32 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pshufb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_shuffle_epi8 (__mmask32 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pshufb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_subs_epi8 (__m128i __W, __mmask16 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubsb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_subs_epi8 (__mmask16 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubsb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_subs_epi8 (__m256i __W, __mmask32 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubsb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_subs_epi8 (__mmask32 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubsb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_subs_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubsw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_subs_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubsw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_subs_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubsw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_subs_epi16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubsw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_subs_epu8 (__m128i __W, __mmask16 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubusb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_subs_epu8 (__mmask16 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubusb128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_subs_epu8 (__m256i __W, __mmask32 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubusb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_subs_epu8 (__mmask32 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubusb256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_subs_epu16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubusw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_subs_epu16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubusw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_subs_epu16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubusw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_subs_epu16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubusw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask2_permutex2var_epi16 (__m128i __A, __m128i __I, __mmask8 __U,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpermi2varhi128_mask ((__v8hi) __A,
+ (__v8hi) __I /* idx */ ,
+ (__v8hi) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask2_permutex2var_epi16 (__m256i __A, __m256i __I,
+ __mmask16 __U, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpermi2varhi256_mask ((__v16hi) __A,
+ (__v16hi) __I /* idx */ ,
+ (__v16hi) __B,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_permutex2var_epi16 (__m128i __A, __m128i __I, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpermt2varhi128_mask ((__v8hi) __I/* idx */,
+ (__v8hi) __A,
+ (__v8hi) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_permutex2var_epi16 (__m128i __A, __mmask8 __U, __m128i __I,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpermt2varhi128_mask ((__v8hi) __I/* idx */,
+ (__v8hi) __A,
+ (__v8hi) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_permutex2var_epi16 (__mmask8 __U, __m128i __A, __m128i __I,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_vpermt2varhi128_maskz ((__v8hi) __I/* idx */,
+ (__v8hi) __A,
+ (__v8hi) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_permutex2var_epi16 (__m256i __A, __m256i __I, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpermt2varhi256_mask ((__v16hi) __I/* idx */,
+ (__v16hi) __A,
+ (__v16hi) __B,
+ (__mmask16) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_permutex2var_epi16 (__m256i __A, __mmask16 __U,
+ __m256i __I, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpermt2varhi256_mask ((__v16hi) __I/* idx */,
+ (__v16hi) __A,
+ (__v16hi) __B,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_permutex2var_epi16 (__mmask16 __U, __m256i __A,
+ __m256i __I, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_vpermt2varhi256_maskz ((__v16hi) __I/* idx */,
+ (__v16hi) __A,
+ (__v16hi) __B,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_maddubs_epi16 (__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
+ return (__m128i) __builtin_ia32_pmaddubsw128_mask ((__v16qi) __X,
+ (__v16qi) __Y,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_maddubs_epi16 (__mmask8 __U, __m128i __X, __m128i __Y) {
+ return (__m128i) __builtin_ia32_pmaddubsw128_mask ((__v16qi) __X,
+ (__v16qi) __Y,
+ (__v8hi) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_maddubs_epi16 (__m256i __W, __mmask16 __U, __m256i __X,
+ __m256i __Y) {
+ return (__m256i) __builtin_ia32_pmaddubsw256_mask ((__v32qi) __X,
+ (__v32qi) __Y,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_maddubs_epi16 (__mmask16 __U, __m256i __X, __m256i __Y) {
+ return (__m256i) __builtin_ia32_pmaddubsw256_mask ((__v32qi) __X,
+ (__v32qi) __Y,
+ (__v16hi) _mm256_setzero_si256(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_madd_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaddwd128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_madd_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaddwd128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v4si) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_madd_epi16 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaddwd256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_madd_epi16 (__mmask8 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaddwd256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v8si) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtsepi16_epi8 (__m128i __A) {
+ return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtsepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) {
+ return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtsepi16_epi8 (__mmask8 __M, __m128i __A) {
+ return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtsepi16_epi8 (__m256i __A) {
+ return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask16) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtsepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) {
+ return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtsepi16_epi8 (__mmask16 __M, __m256i __A) {
+ return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtusepi16_epi8 (__m128i __A) {
+ return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtusepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) {
+ return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtusepi16_epi8 (__mmask8 __M, __m128i __A) {
+ return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtusepi16_epi8 (__m256i __A) {
+ return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask16) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtusepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) {
+ return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtusepi16_epi8 (__mmask16 __M, __m256i __A) {
+ return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi16_epi8 (__m128i __A) {
+
+ return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) {
+ return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi16_epi8 (__mmask8 __M, __m128i __A) {
+ return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtepi16_epi8 (__m256i __A) {
+ return (__m128i) __builtin_ia32_pmovwb256_mask ((__v16hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask16) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) {
+ return (__m128i) __builtin_ia32_pmovwb256_mask ((__v16hi) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi16_epi8 (__mmask16 __M, __m256i __A) {
+ return (__m128i) __builtin_ia32_pmovwb256_mask ((__v16hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mulhrs_epi16 (__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
+ return (__m128i) __builtin_ia32_pmulhrsw128_mask ((__v8hi) __X,
+ (__v8hi) __Y,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mulhrs_epi16 (__mmask8 __U, __m128i __X, __m128i __Y) {
+ return (__m128i) __builtin_ia32_pmulhrsw128_mask ((__v8hi) __X,
+ (__v8hi) __Y,
+ (__v8hi) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mulhrs_epi16 (__m256i __W, __mmask16 __U, __m256i __X, __m256i __Y) {
+ return (__m256i) __builtin_ia32_pmulhrsw256_mask ((__v16hi) __X,
+ (__v16hi) __Y,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mulhrs_epi16 (__mmask16 __U, __m256i __X, __m256i __Y) {
+ return (__m256i) __builtin_ia32_pmulhrsw256_mask ((__v16hi) __X,
+ (__v16hi) __Y,
+ (__v16hi) _mm256_setzero_si256(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mulhi_epu16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pmulhuw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mulhi_epu16 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmulhuw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mulhi_epu16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pmulhuw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mulhi_epu16 (__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmulhuw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mulhi_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pmulhw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mulhi_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmulhw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mulhi_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pmulhw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mulhi_epi16 (__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmulhw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_unpackhi_epi8 (__m128i __W, __mmask16 __U, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_punpckhbw128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_unpackhi_epi8 (__mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_punpckhbw128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_unpackhi_epi8 (__m256i __W, __mmask32 __U, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_punpckhbw256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_unpackhi_epi8 (__mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_punpckhbw256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_unpackhi_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_punpckhwd128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_unpackhi_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_punpckhwd128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_unpackhi_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_punpckhwd256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_unpackhi_epi16 (__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_punpckhwd256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_unpacklo_epi8 (__m128i __W, __mmask16 __U, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_punpcklbw128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_unpacklo_epi8 (__mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_punpcklbw128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_unpacklo_epi8 (__m256i __W, __mmask32 __U, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_punpcklbw256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_unpacklo_epi8 (__mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_punpcklbw256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_unpacklo_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_punpcklwd128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_unpacklo_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_punpcklwd128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_unpacklo_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_punpcklwd256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_unpacklo_epi16 (__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_punpcklwd256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256(),
+ (__mmask16) __U);
+}
+
+#define _mm_cmp_epi8_mask(a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \
+ (__v16qi)(__m128i)(b), \
+ (p), (__mmask16)-1); })
+
+#define _mm_mask_cmp_epi8_mask(m, a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \
+ (__v16qi)(__m128i)(b), \
+ (p), (__mmask16)(m)); })
+
+#define _mm_cmp_epu8_mask(a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \
+ (__v16qi)(__m128i)(b), \
+ (p), (__mmask16)-1); })
+
+#define _mm_mask_cmp_epu8_mask(m, a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \
+ (__v16qi)(__m128i)(b), \
+ (p), (__mmask16)(m)); })
+
+#define _mm256_cmp_epi8_mask(a, b, p) __extension__ ({ \
+ (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \
+ (__v32qi)(__m256i)(b), \
+ (p), (__mmask32)-1); })
+
+#define _mm256_mask_cmp_epi8_mask(m, a, b, p) __extension__ ({ \
+ (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \
+ (__v32qi)(__m256i)(b), \
+ (p), (__mmask32)(m)); })
+
+#define _mm256_cmp_epu8_mask(a, b, p) __extension__ ({ \
+ (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \
+ (__v32qi)(__m256i)(b), \
+ (p), (__mmask32)-1); })
+
+#define _mm256_mask_cmp_epu8_mask(m, a, b, p) __extension__ ({ \
+ (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \
+ (__v32qi)(__m256i)(b), \
+ (p), (__mmask32)(m)); })
+
+#define _mm_cmp_epi16_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \
+ (__v8hi)(__m128i)(b), \
+ (p), (__mmask8)-1); })
+
+#define _mm_mask_cmp_epi16_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \
+ (__v8hi)(__m128i)(b), \
+ (p), (__mmask8)(m)); })
+
+#define _mm_cmp_epu16_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \
+ (__v8hi)(__m128i)(b), \
+ (p), (__mmask8)-1); })
+
+#define _mm_mask_cmp_epu16_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \
+ (__v8hi)(__m128i)(b), \
+ (p), (__mmask8)(m)); })
+
+#define _mm256_cmp_epi16_mask(a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \
+ (__v16hi)(__m256i)(b), \
+ (p), (__mmask16)-1); })
+
+#define _mm256_mask_cmp_epi16_mask(m, a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \
+ (__v16hi)(__m256i)(b), \
+ (p), (__mmask16)(m)); })
+
+#define _mm256_cmp_epu16_mask(a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \
+ (__v16hi)(__m256i)(b), \
+ (p), (__mmask16)-1); })
+
+#define _mm256_mask_cmp_epu16_mask(m, a, b, p) __extension__ ({ \
+ (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \
+ (__v16hi)(__m256i)(b), \
+ (p), (__mmask16)(m)); })
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __AVX512VLBWINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/avx512vldqintrin.h b/contrib/llvm/tools/clang/lib/Headers/avx512vldqintrin.h
new file mode 100644
index 0000000..dfd858e0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/avx512vldqintrin.h
@@ -0,0 +1,953 @@
+/*===---- avx512vldqintrin.h - AVX512VL and AVX512DQ intrinsics ------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512vldqintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512VLDQINTRIN_H
+#define __AVX512VLDQINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512dq")))
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mullo_epi64 (__m256i __A, __m256i __B) {
+ return (__m256i) ((__v4di) __A * (__v4di) __B);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mullo_epi64 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmullq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mullo_epi64 (__mmask8 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmullq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mullo_epi64 (__m128i __A, __m128i __B) {
+ return (__m128i) ((__v2di) __A * (__v2di) __B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mullo_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmullq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mullo_epi64 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmullq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_andnot_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_andnpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_andnot_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_andnpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_andnot_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_andnpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_andnot_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_andnpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_andnot_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_andnps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_andnot_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_andnps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_andnot_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_andnps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_andnot_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_andnps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_and_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_andpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_and_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_andpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_and_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_andpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_and_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_andpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_and_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_andps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_and_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_andps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_and_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_andps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_and_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_andps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_xor_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_xorpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_xor_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_xorpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_xor_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_xorpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_xor_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_xorpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_xor_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_xorps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_xor_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_xorps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_xor_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_xorps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_xor_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_xorps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_or_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_orpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_or_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_orpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_or_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_orpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_or_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_orpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_or_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_orps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_or_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_orps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_or_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_orps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_or_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_orps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtpd_epi64 (__m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtpd_epi64 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtpd_epi64 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtpd_epi64 (__m256d __A) {
+ return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtpd_epi64 (__m256i __W, __mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtpd_epi64 (__mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtpd_epu64 (__m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtpd_epu64 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtpd_epu64 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtpd_epu64 (__m256d __A) {
+ return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtpd_epu64 (__m256i __W, __mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtpd_epu64 (__mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtps_epi64 (__m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtps_epi64 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtps_epi64 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtps_epi64 (__m128 __A) {
+ return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtps_epi64 (__m256i __W, __mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtps_epi64 (__mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtps_epu64 (__m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtps_epu64 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtps_epu64 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtps_epu64 (__m128 __A) {
+ return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtps_epu64 (__m256i __W, __mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtps_epu64 (__mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtepi64_pd (__m128i __A) {
+ return (__m128d) __builtin_ia32_cvtqq2pd128_mask ((__v2di) __A,
+ (__v2df) _mm_setzero_pd(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi64_pd (__m128d __W, __mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtqq2pd128_mask ((__v2di) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi64_pd (__mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtqq2pd128_mask ((__v2di) __A,
+ (__v2df) _mm_setzero_pd(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_cvtepi64_pd (__m256i __A) {
+ return (__m256d) __builtin_ia32_cvtqq2pd256_mask ((__v4di) __A,
+ (__v4df) _mm256_setzero_pd(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi64_pd (__m256d __W, __mmask8 __U, __m256i __A) {
+ return (__m256d) __builtin_ia32_cvtqq2pd256_mask ((__v4di) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi64_pd (__mmask8 __U, __m256i __A) {
+ return (__m256d) __builtin_ia32_cvtqq2pd256_mask ((__v4di) __A,
+ (__v4df) _mm256_setzero_pd(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtepi64_ps (__m128i __A) {
+ return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi64_ps (__m128 __W, __mmask8 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi64_ps (__mmask8 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_cvtepi64_ps (__m256i __A) {
+ return (__m128) __builtin_ia32_cvtqq2ps256_mask ((__v4di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi64_ps (__m128 __W, __mmask8 __U, __m256i __A) {
+ return (__m128) __builtin_ia32_cvtqq2ps256_mask ((__v4di) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi64_ps (__mmask8 __U, __m256i __A) {
+ return (__m128) __builtin_ia32_cvtqq2ps256_mask ((__v4di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttpd_epi64 (__m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttpd_epi64 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttpd_epi64 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvttpd_epi64 (__m256d __A) {
+ return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttpd_epi64 (__m256i __W, __mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttpd_epi64 (__mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttpd_epu64 (__m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttpd_epu64 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttpd_epu64 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvttpd_epu64 (__m256d __A) {
+ return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttpd_epu64 (__m256i __W, __mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttpd_epu64 (__mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttps_epi64 (__m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttps_epi64 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttps_epi64 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvttps_epi64 (__m128 __A) {
+ return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttps_epi64 (__m256i __W, __mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttps_epi64 (__mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttps_epu64 (__m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttps_epu64 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttps_epu64 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvttps_epu64 (__m128 __A) {
+ return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttps_epu64 (__m256i __W, __mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttps_epu64 (__mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtepu64_pd (__m128i __A) {
+ return (__m128d) __builtin_ia32_cvtuqq2pd128_mask ((__v2di) __A,
+ (__v2df) _mm_setzero_pd(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_cvtepu64_pd (__m128d __W, __mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtuqq2pd128_mask ((__v2di) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepu64_pd (__mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtuqq2pd128_mask ((__v2di) __A,
+ (__v2df) _mm_setzero_pd(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_cvtepu64_pd (__m256i __A) {
+ return (__m256d) __builtin_ia32_cvtuqq2pd256_mask ((__v4di) __A,
+ (__v4df) _mm256_setzero_pd(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepu64_pd (__m256d __W, __mmask8 __U, __m256i __A) {
+ return (__m256d) __builtin_ia32_cvtuqq2pd256_mask ((__v4di) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepu64_pd (__mmask8 __U, __m256i __A) {
+ return (__m256d) __builtin_ia32_cvtuqq2pd256_mask ((__v4di) __A,
+ (__v4df) _mm256_setzero_pd(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtepu64_ps (__m128i __A) {
+ return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_cvtepu64_ps (__m128 __W, __mmask8 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepu64_ps (__mmask8 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_cvtepu64_ps (__m256i __A) {
+ return (__m128) __builtin_ia32_cvtuqq2ps256_mask ((__v4di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepu64_ps (__m128 __W, __mmask8 __U, __m256i __A) {
+ return (__m128) __builtin_ia32_cvtuqq2ps256_mask ((__v4di) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepu64_ps (__mmask8 __U, __m256i __A) {
+ return (__m128) __builtin_ia32_cvtuqq2ps256_mask ((__v4di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) __U);
+}
+
+#define _mm_range_pd(__A, __B, __C) __extension__ ({ \
+ (__m128d) __builtin_ia32_rangepd128_mask ((__v2df) __A, (__v2df) __B, __C, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) -1); })
+
+#define _mm_mask_range_pd(__W, __U, __A, __B, __C) __extension__ ({ \
+ (__m128d) __builtin_ia32_rangepd128_mask ((__v2df) __A, (__v2df) __B, __C, \
+ (__v2df) __W, (__mmask8) __U); })
+
+#define _mm_maskz_range_pd(__U, __A, __B, __C) __extension__ ({ \
+ (__m128d) __builtin_ia32_rangepd128_mask ((__v2df) __A, (__v2df) __B, __C, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) __U); })
+
+#define _mm256_range_pd(__A, __B, __C) __extension__ ({ \
+ (__m256d) __builtin_ia32_rangepd256_mask ((__v4df) __A, (__v4df) __B, __C, \
+ (__v4df) _mm256_setzero_pd(), (__mmask8) -1); })
+
+#define _mm256_mask_range_pd(__W, __U, __A, __B, __C) __extension__ ({ \
+ (__m256d) __builtin_ia32_rangepd256_mask ((__v4df) __A, (__v4df) __B, __C, \
+ (__v4df) __W, (__mmask8) __U); })
+
+#define _mm256_maskz_range_pd(__U, __A, __B, __C) __extension__ ({ \
+ (__m256d) __builtin_ia32_rangepd256_mask ((__v4df) __A, (__v4df) __B, __C, \
+ (__v4df) _mm256_setzero_pd(), (__mmask8) __U); })
+
+#define _mm_range_ps(__A, __B, __C) __extension__ ({ \
+ (__m128) __builtin_ia32_rangeps128_mask ((__v4sf) __A, (__v4sf) __B, __C, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) -1); })
+
+#define _mm_mask_range_ps(__W, __U, __A, __B, __C) __extension__ ({ \
+ (__m128) __builtin_ia32_rangeps128_mask ((__v4sf) __A, (__v4sf) __B, __C, \
+ (__v4sf) __W, (__mmask8) __U); })
+
+#define _mm_maskz_range_ps(__U, __A, __B, __C) __extension__ ({ \
+ (__m128) __builtin_ia32_rangeps128_mask ((__v4sf) __A, (__v4sf) __B, __C, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) __U); })
+
+#define _mm256_range_ps(__A, __B, __C) __extension__ ({ \
+ (__m256) __builtin_ia32_rangeps256_mask ((__v8sf) __A, (__v8sf) __B, __C, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) -1); })
+
+#define _mm256_mask_range_ps(__W, __U, __A, __B, __C) __extension__ ({ \
+ (__m256) __builtin_ia32_rangeps256_mask ((__v8sf) __A, (__v8sf) __B, __C, \
+ (__v8sf) __W, (__mmask8) __U); })
+
+#define _mm256_maskz_range_ps(__U, __A, __B, __C) __extension__ ({ \
+ (__m256) __builtin_ia32_rangeps256_mask ((__v8sf) __A, (__v8sf) __B, __C, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) __U); })
+
+#define _mm_reduce_pd(__A, __B) __extension__ ({ \
+ (__m128d) __builtin_ia32_reducepd128_mask ((__v2df) __A, __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) -1); })
+
+#define _mm_mask_reduce_pd(__W, __U, __A, __B) __extension__ ({ \
+ (__m128d) __builtin_ia32_reducepd128_mask ((__v2df) __A, __B, \
+ (__v2df) __W, (__mmask8) __U); })
+
+#define _mm_maskz_reduce_pd(__U, __A, __B) __extension__ ({ \
+ (__m128d) __builtin_ia32_reducepd128_mask ((__v2df) __A, __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) __U); })
+
+#define _mm256_reduce_pd(__A, __B) __extension__ ({ \
+ (__m256d) __builtin_ia32_reducepd256_mask ((__v4df) __A, __B, \
+ (__v4df) _mm256_setzero_pd(), (__mmask8) -1); })
+
+#define _mm256_mask_reduce_pd(__W, __U, __A, __B) __extension__ ({ \
+ (__m256d) __builtin_ia32_reducepd256_mask ((__v4df) __A, __B, \
+ (__v4df) __W, (__mmask8) __U); })
+
+#define _mm256_maskz_reduce_pd(__U, __A, __B) __extension__ ({ \
+ (__m256d) __builtin_ia32_reducepd256_mask ((__v4df) __A, __B, \
+ (__v4df) _mm256_setzero_pd(), (__mmask8) __U); })
+
+#define _mm_reduce_ps(__A, __B) __extension__ ({ \
+ (__m128) __builtin_ia32_reduceps128_mask ((__v4sf) __A, __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) -1); })
+
+#define _mm_mask_reduce_ps(__W, __U, __A, __B) __extension__ ({ \
+ (__m128) __builtin_ia32_reduceps128_mask ((__v4sf) __A, __B, \
+ (__v4sf) __W, (__mmask8) __U); })
+
+#define _mm_maskz_reduce_ps(__U, __A, __B) __extension__ ({ \
+ (__m128) __builtin_ia32_reduceps128_mask ((__v4sf) __A, __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) __U); })
+
+#define _mm256_reduce_ps(__A, __B) __extension__ ({ \
+ (__m256) __builtin_ia32_reduceps256_mask ((__v8sf) __A, __B, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) -1); })
+
+#define _mm256_mask_reduce_ps(__W, __U, __A, __B) __extension__ ({ \
+ (__m256) __builtin_ia32_reduceps256_mask ((__v8sf) __A, __B, \
+ (__v8sf) __W, (__mmask8) __U); })
+
+#define _mm256_maskz_reduce_ps(__U, __A, __B) __extension__ ({ \
+ (__m256) __builtin_ia32_reduceps256_mask ((__v8sf) __A, __B, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) __U); })
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Headers/avx512vlintrin.h b/contrib/llvm/tools/clang/lib/Headers/avx512vlintrin.h
new file mode 100644
index 0000000..8f13536
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/avx512vlintrin.h
@@ -0,0 +1,4606 @@
+/*===---- avx512vlintrin.h - AVX512VL intrinsics ---------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512vlintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVX512VLINTRIN_H
+#define __AVX512VLINTRIN_H
+
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl")))
+#define __DEFAULT_FN_ATTRS_BOTH __attribute__((__always_inline__, __nodebug__, __target__("avx512vl, avx512bw")))
+
+/* Integer compare */
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm_cmpeq_epi32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqd128_mask((__v4si)__a, (__v4si)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm_mask_cmpeq_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqd128_mask((__v4si)__a, (__v4si)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpeq_epu32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 0,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpeq_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm256_cmpeq_epi32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqd256_mask((__v8si)__a, (__v8si)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm256_mask_cmpeq_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqd256_mask((__v8si)__a, (__v8si)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epu32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 0,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpeq_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm_cmpeq_epi64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqq128_mask((__v2di)__a, (__v2di)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm_mask_cmpeq_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqq128_mask((__v2di)__a, (__v2di)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpeq_epu64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 0,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpeq_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 0,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm256_cmpeq_epi64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqq256_mask((__v4di)__a, (__v4di)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm256_mask_cmpeq_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_pcmpeqq256_mask((__v4di)__a, (__v4di)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpeq_epu64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 0,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpeq_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 0,
+ __u);
+}
+
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpge_epi32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpge_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpge_epu32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpge_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpge_epi32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpge_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpge_epu32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpge_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpge_epi64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpge_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpge_epu64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpge_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpge_epi64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpge_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpge_epu64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 5,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpge_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 5,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm_cmpgt_epi32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtd128_mask((__v4si)__a, (__v4si)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm_mask_cmpgt_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtd128_mask((__v4si)__a, (__v4si)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpgt_epu32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 6,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpgt_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm256_cmpgt_epi32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtd256_mask((__v8si)__a, (__v8si)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm256_mask_cmpgt_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtd256_mask((__v8si)__a, (__v8si)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epu32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 6,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpgt_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm_cmpgt_epi64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtq128_mask((__v2di)__a, (__v2di)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm_mask_cmpgt_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtq128_mask((__v2di)__a, (__v2di)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpgt_epu64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 6,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpgt_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm256_cmpgt_epi64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtq256_mask((__v4di)__a, (__v4di)__b,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
+_mm256_mask_cmpgt_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_pcmpgtq256_mask((__v4di)__a, (__v4di)__b,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpgt_epu64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 6,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpgt_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 6,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmple_epi32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmple_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmple_epu32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmple_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmple_epi32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmple_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmple_epu32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmple_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmple_epi64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmple_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmple_epu64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmple_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmple_epi64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmple_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmple_epu64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 2,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmple_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 2,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmplt_epi32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmplt_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmplt_epu32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmplt_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmplt_epi32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmplt_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmplt_epu32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmplt_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmplt_epi64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmplt_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmplt_epu64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmplt_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmplt_epi64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmplt_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmplt_epu64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 1,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmplt_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 1,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpneq_epi32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpneq_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpneq_epu32_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpneq_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpneq_epi32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpneq_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpneq_epu32_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpneq_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpneq_epi64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpneq_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_cmpneq_epu64_mask(__m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm_mask_cmpneq_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpneq_epi64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpneq_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 4,
+ __u);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_cmpneq_epu64_mask(__m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 4,
+ (__mmask8)-1);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_mm256_mask_cmpneq_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
+ return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 4,
+ __u);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_add_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_add_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_add_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_add_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_paddq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sub_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sub_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_sub_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_sub_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_psubq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_add_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_add_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_add_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_add_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_paddq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sub_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sub_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_sub_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_sub_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_psubq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mul_epi32 (__m256i __W, __mmask8 __M, __m256i __X,
+ __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_pmuldq256_mask ((__v8si) __X,
+ (__v8si) __Y,
+ (__v4di) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mul_epi32 (__mmask8 __M, __m256i __X, __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_pmuldq256_mask ((__v8si) __X,
+ (__v8si) __Y,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mul_epi32 (__m128i __W, __mmask8 __M, __m128i __X,
+ __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_pmuldq128_mask ((__v4si) __X,
+ (__v4si) __Y,
+ (__v2di) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mul_epi32 (__mmask8 __M, __m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_pmuldq128_mask ((__v4si) __X,
+ (__v4si) __Y,
+ (__v2di)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mul_epu32 (__m256i __W, __mmask8 __M, __m256i __X,
+ __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_pmuludq256_mask ((__v8si) __X,
+ (__v8si) __Y,
+ (__v4di) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mul_epu32 (__mmask8 __M, __m256i __X, __m256i __Y)
+{
+ return (__m256i) __builtin_ia32_pmuludq256_mask ((__v8si) __X,
+ (__v8si) __Y,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mul_epu32 (__m128i __W, __mmask8 __M, __m128i __X,
+ __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_pmuludq128_mask ((__v4si) __X,
+ (__v4si) __Y,
+ (__v2di) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mul_epu32 (__mmask8 __M, __m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_pmuludq128_mask ((__v4si) __X,
+ (__v4si) __Y,
+ (__v2di)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mullo_epi32 (__mmask8 __M, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmulld256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mullo_epi32 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pmulld256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mullo_epi32 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmulld128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mullo_epi32 (__m128i __W, __mmask16 __M, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pmulld128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_and_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pandd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_and_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pandd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_and_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pandd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_and_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pandd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_andnot_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pandnd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_andnot_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pandnd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_andnot_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pandnd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_andnot_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pandnd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_or_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pord256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_or_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pord256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_or_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pord128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_or_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pord128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_xor_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pxord256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_xor_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pxord256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_xor_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pxord128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_xor_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pxord128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_and_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pandq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W, __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_and_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pandq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_pd (),
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_and_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pandq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W, __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_and_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pandq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_pd (),
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_andnot_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pandnq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W, __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_andnot_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pandnq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_pd (),
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_andnot_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pandnq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W, __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_andnot_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pandnq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_pd (),
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_or_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_porq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_or_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_porq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_or_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_porq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_or_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_porq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_xor_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+ __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pxorq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_xor_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+ return (__m256i) __builtin_ia32_pxorq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_xor_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pxorq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_xor_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+ return (__m128i) __builtin_ia32_pxorq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+#define _mm_cmp_epi32_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \
+ (__v4si)(__m128i)(b), \
+ (p), (__mmask8)-1); })
+
+#define _mm_mask_cmp_epi32_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \
+ (__v4si)(__m128i)(b), \
+ (p), (__mmask8)(m)); })
+
+#define _mm_cmp_epu32_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \
+ (__v4si)(__m128i)(b), \
+ (p), (__mmask8)-1); })
+
+#define _mm_mask_cmp_epu32_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \
+ (__v4si)(__m128i)(b), \
+ (p), (__mmask8)(m)); })
+
+#define _mm256_cmp_epi32_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \
+ (__v8si)(__m256i)(b), \
+ (p), (__mmask8)-1); })
+
+#define _mm256_mask_cmp_epi32_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \
+ (__v8si)(__m256i)(b), \
+ (p), (__mmask8)(m)); })
+
+#define _mm256_cmp_epu32_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \
+ (__v8si)(__m256i)(b), \
+ (p), (__mmask8)-1); })
+
+#define _mm256_mask_cmp_epu32_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \
+ (__v8si)(__m256i)(b), \
+ (p), (__mmask8)(m)); })
+
+#define _mm_cmp_epi64_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \
+ (__v2di)(__m128i)(b), \
+ (p), (__mmask8)-1); })
+
+#define _mm_mask_cmp_epi64_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \
+ (__v2di)(__m128i)(b), \
+ (p), (__mmask8)(m)); })
+
+#define _mm_cmp_epu64_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \
+ (__v2di)(__m128i)(b), \
+ (p), (__mmask8)-1); })
+
+#define _mm_mask_cmp_epu64_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \
+ (__v2di)(__m128i)(b), \
+ (p), (__mmask8)(m)); })
+
+#define _mm256_cmp_epi64_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \
+ (__v4di)(__m256i)(b), \
+ (p), (__mmask8)-1); })
+
+#define _mm256_mask_cmp_epi64_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \
+ (__v4di)(__m256i)(b), \
+ (p), (__mmask8)(m)); })
+
+#define _mm256_cmp_epu64_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \
+ (__v4di)(__m256i)(b), \
+ (p), (__mmask8)-1); })
+
+#define _mm256_mask_cmp_epu64_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \
+ (__v4di)(__m256i)(b), \
+ (p), (__mmask8)(m)); })
+
+#define _mm256_cmp_ps_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \
+ (__v8sf)(__m256)(b), \
+ (p), (__mmask8)-1); })
+
+#define _mm256_mask_cmp_ps_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \
+ (__v8sf)(__m256)(b), \
+ (p), (__mmask8)(m)); })
+
+#define _mm256_cmp_pd_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256)(a), \
+ (__v4df)(__m256)(b), \
+ (p), (__mmask8)-1); })
+
+#define _mm256_mask_cmp_pd_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256)(a), \
+ (__v4df)(__m256)(b), \
+ (p), (__mmask8)(m)); })
+
+#define _mm128_cmp_ps_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
+ (__v4sf)(__m128)(b), \
+ (p), (__mmask8)-1); })
+
+#define _mm128_mask_cmp_ps_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
+ (__v4sf)(__m128)(b), \
+ (p), (__mmask8)(m)); })
+
+#define _mm128_cmp_pd_mask(a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128)(a), \
+ (__v2df)(__m128)(b), \
+ (p), (__mmask8)-1); })
+
+#define _mm128_mask_cmp_pd_mask(m, a, b, p) __extension__ ({ \
+ (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128)(a), \
+ (__v2df)(__m128)(b), \
+ (p), (__mmask8)(m)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfmaddpd128_mask3 ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddpd128_maskz ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ -(__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_fmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddpd128_maskz ((__v2df) __A,
+ (__v2df) __B,
+ -(__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask3_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfmaddpd128_mask3 (-(__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_fnmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddpd128_maskz (-(__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_fnmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddpd128_maskz (-(__v2df) __A,
+ (__v2df) __B,
+ -(__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_fmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask3_fmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
+{
+ return (__m256d) __builtin_ia32_vfmaddpd256_mask3 ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_fmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddpd256_maskz ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_fmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ -(__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_fmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddpd256_maskz ((__v4df) __A,
+ (__v4df) __B,
+ -(__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask3_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
+{
+ return (__m256d) __builtin_ia32_vfmaddpd256_mask3 (-(__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_fnmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddpd256_maskz (-(__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_fnmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddpd256_maskz (-(__v4df) __A,
+ (__v4df) __B,
+ -(__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_fmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask3_fmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfmaddps128_mask3 ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_fmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddps128_maskz ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_fmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ -(__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_fmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddps128_maskz ((__v4sf) __A,
+ (__v4sf) __B,
+ -(__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask3_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfmaddps128_mask3 (-(__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_fnmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddps128_maskz (-(__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_fnmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddps128_maskz (-(__v4sf) __A,
+ (__v4sf) __B,
+ -(__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_fmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask3_fmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
+{
+ return (__m256) __builtin_ia32_vfmaddps256_mask3 ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_fmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddps256_maskz ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_fmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ -(__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_fmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddps256_maskz ((__v8sf) __A,
+ (__v8sf) __B,
+ -(__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask3_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
+{
+ return (__m256) __builtin_ia32_vfmaddps256_mask3 (-(__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_fnmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddps256_maskz (-(__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_fnmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddps256_maskz (-(__v8sf) __A,
+ (__v8sf) __B,
+ -(__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_fmaddsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddsubpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask3_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfmaddsubpd128_mask3 ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_fmaddsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddsubpd128_maskz ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_fmsubadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddsubpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ -(__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_fmsubadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfmaddsubpd128_maskz ((__v2df) __A,
+ (__v2df) __B,
+ -(__v2df) __C,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_fmaddsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddsubpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask3_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
+{
+ return (__m256d) __builtin_ia32_vfmaddsubpd256_mask3 ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_fmaddsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddsubpd256_maskz ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_fmsubadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddsubpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ -(__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_fmsubadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfmaddsubpd256_maskz ((__v4df) __A,
+ (__v4df) __B,
+ -(__v4df) __C,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_fmaddsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddsubps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask3_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfmaddsubps128_mask3 ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_fmaddsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddsubps128_maskz ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_fmsubadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddsubps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ -(__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_fmsubadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfmaddsubps128_maskz ((__v4sf) __A,
+ (__v4sf) __B,
+ -(__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_fmaddsub_ps(__m256 __A, __mmask8 __U, __m256 __B,
+ __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddsubps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask3_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
+{
+ return (__m256) __builtin_ia32_vfmaddsubps256_mask3 ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_fmaddsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddsubps256_maskz ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_fmsubadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddsubps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ -(__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_fmsubadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfmaddsubps256_maskz ((__v8sf) __A,
+ (__v8sf) __B,
+ -(__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask3_fmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfmsubpd128_mask3 ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask3_fmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
+{
+ return (__m256d) __builtin_ia32_vfmsubpd256_mask3 ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask3_fmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfmsubps128_mask3 ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask3_fmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
+{
+ return (__m256) __builtin_ia32_vfmsubps256_mask3 ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask3_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfmsubaddpd128_mask3 ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask3_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
+{
+ return (__m256d) __builtin_ia32_vfmsubaddpd256_mask3 ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask3_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfmsubaddps128_mask3 ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask3_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
+{
+ return (__m256) __builtin_ia32_vfmsubaddps256_mask3 ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_fnmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfnmaddpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_fnmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfnmaddpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_fnmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfnmaddps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_fnmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfnmaddps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_fnmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
+{
+ return (__m128d) __builtin_ia32_vfnmsubpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask3_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
+{
+ return (__m128d) __builtin_ia32_vfnmsubpd128_mask3 ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_fnmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
+{
+ return (__m256d) __builtin_ia32_vfnmsubpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask3_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
+{
+ return (__m256d) __builtin_ia32_vfnmsubpd256_mask3 ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_fnmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
+{
+ return (__m128) __builtin_ia32_vfnmsubps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask3_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
+{
+ return (__m128) __builtin_ia32_vfnmsubps128_mask3 ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_fnmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
+{
+ return (__m256) __builtin_ia32_vfnmsubps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask3_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
+{
+ return (__m256) __builtin_ia32_vfnmsubps256_mask3 ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_add_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_addpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_add_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_addpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_add_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_addpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_add_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_addpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_add_ps (__m128 __W, __mmask16 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_addps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_add_ps (__mmask16 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_addps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_add_ps (__m256 __W, __mmask16 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_addps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_add_ps (__mmask16 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_addps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_blend_epi32 (__mmask8 __U, __m128i __A, __m128i __W) {
+ return (__m128i) __builtin_ia32_blendmd_128_mask ((__v4si) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_blend_epi32 (__mmask8 __U, __m256i __A, __m256i __W) {
+ return (__m256i) __builtin_ia32_blendmd_256_mask ((__v8si) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_blend_pd (__mmask8 __U, __m128d __A, __m128d __W) {
+ return (__m128d) __builtin_ia32_blendmpd_128_mask ((__v2df) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_blend_pd (__mmask8 __U, __m256d __A, __m256d __W) {
+ return (__m256d) __builtin_ia32_blendmpd_256_mask ((__v4df) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_blend_ps (__mmask8 __U, __m128 __A, __m128 __W) {
+ return (__m128) __builtin_ia32_blendmps_128_mask ((__v4sf) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_blend_ps (__mmask8 __U, __m256 __A, __m256 __W) {
+ return (__m256) __builtin_ia32_blendmps_256_mask ((__v8sf) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_blend_epi64 (__mmask8 __U, __m128i __A, __m128i __W) {
+ return (__m128i) __builtin_ia32_blendmq_128_mask ((__v2di) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_blend_epi64 (__mmask8 __U, __m256i __A, __m256i __W) {
+ return (__m256i) __builtin_ia32_blendmq_256_mask ((__v4di) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_compress_pd (__m128d __W, __mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_compress_pd (__mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_compress_pd (__m256d __W, __mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_compress_pd (__mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_compress_epi64 (__m128i __W, __mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_compress_epi64 (__mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_compress_epi64 (__m256i __W, __mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_compress_epi64 (__mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_compress_ps (__m128 __W, __mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_compress_ps (__mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_compress_ps (__m256 __W, __mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_compress_ps (__mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_compress_epi32 (__m128i __W, __mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_compress_epi32 (__mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_compress_epi32 (__m256i __W, __mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_compress_epi32 (__mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m128d __A) {
+ __builtin_ia32_compressstoredf128_mask ((__v2df *) __P,
+ (__v2df) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m256d __A) {
+ __builtin_ia32_compressstoredf256_mask ((__v4df *) __P,
+ (__v4df) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m128i __A) {
+ __builtin_ia32_compressstoredi128_mask ((__v2di *) __P,
+ (__v2di) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m256i __A) {
+ __builtin_ia32_compressstoredi256_mask ((__v4di *) __P,
+ (__v4di) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_compressstoreu_ps (void *__P, __mmask8 __U, __m128 __A) {
+ __builtin_ia32_compressstoresf128_mask ((__v4sf *) __P,
+ (__v4sf) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_compressstoreu_ps (void *__P, __mmask8 __U, __m256 __A) {
+ __builtin_ia32_compressstoresf256_mask ((__v8sf *) __P,
+ (__v8sf) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_compressstoreu_epi32 (void *__P, __mmask8 __U, __m128i __A) {
+ __builtin_ia32_compressstoresi128_mask ((__v4si *) __P,
+ (__v4si) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_compressstoreu_epi32 (void *__P, __mmask8 __U, __m256i __A) {
+ __builtin_ia32_compressstoresi256_mask ((__v8si *) __P,
+ (__v8si) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi32_pd (__m128d __W, __mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtdq2pd128_mask ((__v4si) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtdq2pd128_mask ((__v4si) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi32_pd (__m256d __W, __mmask8 __U, __m128i __A) {
+ return (__m256d) __builtin_ia32_cvtdq2pd256_mask ((__v4si) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) {
+ return (__m256d) __builtin_ia32_cvtdq2pd256_mask ((__v4si) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi32_ps (__m128 __W, __mmask8 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtdq2ps128_mask ((__v4si) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi32_ps (__mmask16 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtdq2ps128_mask ((__v4si) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi32_ps (__m256 __W, __mmask8 __U, __m256i __A) {
+ return (__m256) __builtin_ia32_cvtdq2ps256_mask ((__v8si) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi32_ps (__mmask16 __U, __m256i __A) {
+ return (__m256) __builtin_ia32_cvtdq2ps256_mask ((__v8si) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtpd_epi32 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtpd_epi32 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2dq256_mask ((__v4df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtpd_epi32 (__mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2dq256_mask ((__v4df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_cvtpd_ps (__m128 __W, __mmask8 __U, __m128d __A) {
+ return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_cvtpd_ps (__mmask8 __U, __m128d __A) {
+ return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_mask_cvtpd_ps (__m128 __W, __mmask8 __U, __m256d __A) {
+ return (__m128) __builtin_ia32_cvtpd2ps256_mask ((__v4df) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtpd_ps (__mmask8 __U, __m256d __A) {
+ return (__m128) __builtin_ia32_cvtpd2ps256_mask ((__v4df) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtpd_epu32 (__m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtpd_epu32 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtpd_epu32 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtpd_epu32 (__m256d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtpd_epu32 (__mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtps_epi32 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2dq128_mask ((__v4sf) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtps_epi32 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2dq128_mask ((__v4sf) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtps_epi32 (__m256i __W, __mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvtps2dq256_mask ((__v8sf) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtps_epi32 (__mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvtps2dq256_mask ((__v8sf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_cvtps_pd (__m128d __W, __mmask8 __U, __m128 __A) {
+ return (__m128d) __builtin_ia32_cvtps2pd128_mask ((__v4sf) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_cvtps_pd (__mmask8 __U, __m128 __A) {
+ return (__m128d) __builtin_ia32_cvtps2pd128_mask ((__v4sf) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_cvtps_pd (__m256d __W, __mmask8 __U, __m128 __A) {
+ return (__m256d) __builtin_ia32_cvtps2pd256_mask ((__v4sf) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtps_pd (__mmask8 __U, __m128 __A) {
+ return (__m256d) __builtin_ia32_cvtps2pd256_mask ((__v4sf) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtps_epu32 (__m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtps_epu32 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtps_epu32 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtps_epu32 (__m256 __A) {
+ return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtps_epu32 (__m256i __W, __mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtps_epu32 (__mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttpd_epi32 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttpd_epi32 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2dq256_mask ((__v4df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttpd_epi32 (__mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2dq256_mask ((__v4df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttpd_epu32 (__m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttpd_epu32 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttpd_epu32 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvttpd_epu32 (__m256d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttpd_epu32 (__mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttps_epi32 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2dq128_mask ((__v4sf) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttps_epi32 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2dq128_mask ((__v4sf) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttps_epi32 (__m256i __W, __mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvttps2dq256_mask ((__v8sf) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttps_epi32 (__mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvttps2dq256_mask ((__v8sf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttps_epu32 (__m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttps_epu32 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttps_epu32 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvttps_epu32 (__m256 __A) {
+ return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttps_epu32 (__m256i __W, __mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttps_epu32 (__mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtepu32_pd (__m128i __A) {
+ return (__m128d) __builtin_ia32_cvtudq2pd128_mask ((__v4si) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_cvtepu32_pd (__m128d __W, __mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtudq2pd128_mask ((__v4si) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtudq2pd128_mask ((__v4si) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_cvtepu32_pd (__m128i __A) {
+ return (__m256d) __builtin_ia32_cvtudq2pd256_mask ((__v4si) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepu32_pd (__m256d __W, __mmask8 __U, __m128i __A) {
+ return (__m256d) __builtin_ia32_cvtudq2pd256_mask ((__v4si) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) {
+ return (__m256d) __builtin_ia32_cvtudq2pd256_mask ((__v4si) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtepu32_ps (__m128i __A) {
+ return (__m128) __builtin_ia32_cvtudq2ps128_mask ((__v4si) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_cvtepu32_ps (__m128 __W, __mmask8 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtudq2ps128_mask ((__v4si) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepu32_ps (__mmask8 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtudq2ps128_mask ((__v4si) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_cvtepu32_ps (__m256i __A) {
+ return (__m256) __builtin_ia32_cvtudq2ps256_mask ((__v8si) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepu32_ps (__m256 __W, __mmask8 __U, __m256i __A) {
+ return (__m256) __builtin_ia32_cvtudq2ps256_mask ((__v8si) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepu32_ps (__mmask8 __U, __m256i __A) {
+ return (__m256) __builtin_ia32_cvtudq2ps256_mask ((__v8si) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_div_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_divpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_div_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_divpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_div_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_divpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_div_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_divpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_div_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_divps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_div_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_divps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_div_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_divps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_div_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_divps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_expand_pd (__m128d __W, __mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_expand_pd (__mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_expand_pd (__m256d __W, __mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_expand_pd (__mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_expand_epi64 (__m128i __W, __mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_expand_epi64 (__mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_expand_epi64 (__m256i __W, __mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_expand_epi64 (__mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_expandloadu_pd (__m128d __W, __mmask8 __U, void const *__P) {
+ return (__m128d) __builtin_ia32_expandloaddf128_mask ((__v2df *) __P,
+ (__v2df) __W,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_expandloadu_pd (__mmask8 __U, void const *__P) {
+ return (__m128d) __builtin_ia32_expandloaddf128_mask ((__v2df *) __P,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_expandloadu_pd (__m256d __W, __mmask8 __U, void const *__P) {
+ return (__m256d) __builtin_ia32_expandloaddf256_mask ((__v4df *) __P,
+ (__v4df) __W,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_expandloadu_pd (__mmask8 __U, void const *__P) {
+ return (__m256d) __builtin_ia32_expandloaddf256_mask ((__v4df *) __P,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_expandloadu_epi64 (__m128i __W, __mmask8 __U, void const *__P) {
+ return (__m128i) __builtin_ia32_expandloaddi128_mask ((__v2di *) __P,
+ (__v2di) __W,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P) {
+ return (__m128i) __builtin_ia32_expandloaddi128_mask ((__v2di *) __P,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_expandloadu_epi64 (__m256i __W, __mmask8 __U,
+ void const *__P) {
+ return (__m256i) __builtin_ia32_expandloaddi256_mask ((__v4di *) __P,
+ (__v4di) __W,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P) {
+ return (__m256i) __builtin_ia32_expandloaddi256_mask ((__v4di *) __P,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_expandloadu_ps (__m128 __W, __mmask8 __U, void const *__P) {
+ return (__m128) __builtin_ia32_expandloadsf128_mask ((__v4sf *) __P,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_expandloadu_ps (__mmask8 __U, void const *__P) {
+ return (__m128) __builtin_ia32_expandloadsf128_mask ((__v4sf *) __P,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_expandloadu_ps (__m256 __W, __mmask8 __U, void const *__P) {
+ return (__m256) __builtin_ia32_expandloadsf256_mask ((__v8sf *) __P,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_expandloadu_ps (__mmask8 __U, void const *__P) {
+ return (__m256) __builtin_ia32_expandloadsf256_mask ((__v8sf *) __P,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_expandloadu_epi32 (__m128i __W, __mmask8 __U, void const *__P) {
+ return (__m128i) __builtin_ia32_expandloadsi128_mask ((__v4si *) __P,
+ (__v4si) __W,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P) {
+ return (__m128i) __builtin_ia32_expandloadsi128_mask ((__v4si *) __P,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_expandloadu_epi32 (__m256i __W, __mmask8 __U,
+ void const *__P) {
+ return (__m256i) __builtin_ia32_expandloadsi256_mask ((__v8si *) __P,
+ (__v8si) __W,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P) {
+ return (__m256i) __builtin_ia32_expandloadsi256_mask ((__v8si *) __P,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_expand_ps (__m128 __W, __mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_expand_ps (__mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_expand_ps (__m256 __W, __mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_expand_ps (__mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_expand_epi32 (__m128i __W, __mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_expand_epi32 (__mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_expand_epi32 (__m256i __W, __mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_expand_epi32 (__mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_getexp_pd (__m128d __A) {
+ return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_getexp_pd (__m128d __W, __mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_getexp_pd (__mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_getexp_pd (__m256d __A) {
+ return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_getexp_pd (__m256d __W, __mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_getexp_pd (__mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_getexp_ps (__m128 __A) {
+ return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_getexp_ps (__m128 __W, __mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_getexp_ps (__mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_getexp_ps (__m256 __A) {
+ return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_getexp_ps (__m256 __W, __mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_getexp_ps (__mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_max_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_maxpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_max_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_maxpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_max_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_maxpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_max_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_maxpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_max_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_maxps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_max_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_maxps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_max_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_maxps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_max_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_maxps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_min_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_minpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_min_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_minpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_min_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_minpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_min_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_minpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_min_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_minps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_min_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_minps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_min_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_minps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_min_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_minps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_mul_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_mulpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_mul_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_mulpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_mul_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_mulpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_mul_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_mulpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_mul_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_mulps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_mul_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_mulps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_mul_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_mulps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_mul_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_mulps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_abs_epi32 (__m128i __W, __mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_pabsd128_mask ((__v4si) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_abs_epi32 (__mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_pabsd128_mask ((__v4si) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_abs_epi32 (__m256i __W, __mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_pabsd256_mask ((__v8si) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_abs_epi32 (__mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_pabsd256_mask ((__v8si) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_abs_epi64 (__m128i __A) {
+ return (__m128i) __builtin_ia32_pabsq128_mask ((__v2di) __A,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_abs_epi64 (__m128i __W, __mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_pabsq128_mask ((__v2di) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_abs_epi64 (__mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_pabsq128_mask ((__v2di) __A,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_abs_epi64 (__m256i __A) {
+ return (__m256i) __builtin_ia32_pabsq256_mask ((__v4di) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_abs_epi64 (__m256i __W, __mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_pabsq256_mask ((__v4di) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_abs_epi64 (__mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_pabsq256_mask ((__v4di) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_max_epi32 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxsd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_max_epi32 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxsd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_max_epi32 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxsd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_max_epi32 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxsd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_max_epi64 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxsq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_max_epi64 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxsq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_max_epi64 (__m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxsq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_max_epi64 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxsq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_max_epi64 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxsq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_max_epi64 (__m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxsq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_max_epu32 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxud128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_max_epu32 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxud128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_max_epu32 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxud256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_max_epu32 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxud256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_max_epu64 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxuq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_max_epu64 (__m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxuq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_max_epu64 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxuq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_max_epu64 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxuq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_max_epu64 (__m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxuq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_max_epu64 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxuq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_min_epi32 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pminsd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_min_epi32 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pminsd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_min_epi32 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pminsd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_min_epi32 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pminsd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_min_epi64 (__m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pminsq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_min_epi64 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pminsq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_min_epi64 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pminsq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_min_epi64 (__m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pminsq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_min_epi64 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pminsq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_min_epi64 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pminsq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_min_epu32 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pminud128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_min_epu32 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pminud128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_min_epu32 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pminud256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_min_epu32 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pminud256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_min_epu64 (__m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pminuq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_min_epu64 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pminuq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_min_epu64 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pminuq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_min_epu64 (__m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pminuq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_min_epu64 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pminuq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_min_epu64 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pminuq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+#define _mm_roundscale_pd(__A, __imm) __extension__ ({ \
+ (__m128d) __builtin_ia32_rndscalepd_128_mask ((__v2df) __A, \
+ __imm, (__v2df) _mm_setzero_pd (), (__mmask8) -1); })
+
+
+#define _mm_mask_roundscale_pd(__W, __U, __A, __imm) __extension__ ({ \
+ (__m128d) __builtin_ia32_rndscalepd_128_mask ((__v2df) __A, __imm, \
+ (__v2df) __W, (__mmask8) __U); })
+
+
+#define _mm_maskz_roundscale_pd(__U, __A, __imm) __extension__ ({ \
+ (__m128d) __builtin_ia32_rndscalepd_128_mask ((__v2df) __A, __imm, \
+ (__v2df) _mm_setzero_pd (), (__mmask8) __U); })
+
+
+#define _mm256_roundscale_pd(__A, __imm) __extension__ ({ \
+ (__m256d) __builtin_ia32_rndscalepd_256_mask ((__v4df) __A, __imm, \
+ (__v4df) _mm256_setzero_pd (), (__mmask8) -1); })
+
+
+#define _mm256_mask_roundscale_pd(__W, __U, __A, __imm) __extension__ ({ \
+ (__m256d) __builtin_ia32_rndscalepd_256_mask ((__v4df) __A, __imm, \
+ (__v4df) __W, (__mmask8) __U); })
+
+
+#define _mm256_maskz_roundscale_pd(__U, __A, __imm) __extension__ ({ \
+ (__m256d) __builtin_ia32_rndscalepd_256_mask ((__v4df) __A, __imm, \
+ (__v4df) _mm256_setzero_pd(), (__mmask8) __U); })
+
+#define _mm_roundscale_ps(__A, __imm) __extension__ ({ \
+ (__m128) __builtin_ia32_rndscaleps_128_mask ((__v4sf) __A, __imm, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) -1); })
+
+
+#define _mm_mask_roundscale_ps(__W, __U, __A, __imm) __extension__ ({ \
+ (__m128) __builtin_ia32_rndscaleps_128_mask ((__v4sf) __A, __imm, \
+ (__v4sf) __W, (__mmask8) __U); })
+
+
+#define _mm_maskz_roundscale_ps(__U, __A, __imm) __extension__ ({ \
+ (__m128) __builtin_ia32_rndscaleps_128_mask ((__v4sf) __A, __imm, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) __U); })
+
+#define _mm256_roundscale_ps(__A, __imm) __extension__ ({ \
+ (__m256) __builtin_ia32_rndscaleps_256_mask ((__v8sf) __A,__imm, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) -1); })
+
+#define _mm256_mask_roundscale_ps(__W, __U, __A,__imm) __extension__ ({ \
+ (__m256) __builtin_ia32_rndscaleps_256_mask ((__v8sf) __A, __imm, \
+ (__v8sf) __W, (__mmask8) __U); })
+
+
+#define _mm256_maskz_roundscale_ps(__U, __A, __imm) __extension__ ({ \
+ (__m256) __builtin_ia32_rndscaleps_256_mask ((__v8sf) __A, __imm, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) __U); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_scalef_pd (__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_scalef_pd (__m128d __W, __mmask8 __U, __m128d __A,
+ __m128d __B) {
+ return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_scalef_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_scalef_pd (__m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_scalef_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_scalef_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_scalef_ps (__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_scalef_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_scalef_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_scalef_ps (__m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_scalef_ps (__m256 __W, __mmask8 __U, __m256 __A,
+ __m256 __B) {
+ return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_scalef_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+#define _mm_i64scatter_pd(__addr,__index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv2df(__addr, (__mmask8) 0xFF, (__v2di) __index, \
+ (__v2df) __v1, __scale); })
+
+#define _mm_mask_i64scatter_pd(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv2df (__addr, __mask, (__v2di) __index, \
+ (__v2df) __v1, __scale); })
+
+
+#define _mm_i64scatter_epi64(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv2di (__addr, (__mmask8) 0xFF, \
+ (__v2di) __index, (__v2di) __v1, __scale); })
+
+#define _mm_mask_i64scatter_epi64(__addr, __mask, __index, __v1,\
+ __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv2di (__addr, __mask, (__v2di) __index,\
+ (__v2di) __v1, __scale); })
+
+#define _mm256_i64scatter_pd(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4df (__addr, (__mmask8) 0xFF,\
+ (__v4di) __index, (__v4df) __v1, __scale); })
+
+#define _mm256_mask_i64scatter_pd(__addr, __mask, __index, __v1,\
+ __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4df (__addr, __mask, (__v4di) __index,\
+ (__v4df) __v1, __scale); })
+
+#define _mm256_i64scatter_epi64(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4di (__addr, (__mmask8) 0xFF, (__v4di) __index,\
+ (__v4di) __v1, __scale); })
+
+#define _mm256_mask_i64scatter_epi64(__addr, __mask, __index, __v1,\
+ __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4di (__addr, __mask, (__v4di) __index,\
+ (__v4di) __v1, __scale); })
+
+#define _mm_i64scatter_ps(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4sf (__addr, (__mmask8) 0xFF,\
+ (__v2di) __index, (__v4sf) __v1, __scale); })
+
+#define _mm_mask_i64scatter_ps(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4sf (__addr, __mask, (__v2di) __index,\
+ (__v4sf) __v1, __scale); })
+
+#define _mm_i64scatter_epi32(__addr, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4si (__addr, (__mmask8) 0xFF,\
+ (__v2di) __index, (__v4si) __v1, __scale); })
+
+#define _mm_mask_i64scatter_epi32(__addr, __mask, __index, __v1,\
+ __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4si (__addr, __mask, (__v2di) __index,\
+ (__v4si) __v1, __scale); })
+
+#define _mm256_i64scatter_ps(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv8sf (__addr, (__mmask8) 0xFF, (__v4di) __index, \
+ (__v4sf) __v1, __scale); })
+
+#define _mm256_mask_i64scatter_ps(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv8sf (__addr, __mask, (__v4di) __index, \
+ (__v4sf) __v1, __scale); })
+
+#define _mm256_i64scatter_epi32(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv8si (__addr, (__mmask8) 0xFF, \
+ (__v4di) __index, (__v4si) __v1, __scale); })
+
+#define _mm256_mask_i64scatter_epi32(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv8si(__addr, __mask, (__v4di) __index, \
+ (__v4si) __v1, __scale); })
+
+#define _mm_i32scatter_pd(__addr, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv2df (__addr, (__mmask8) 0xFF, \
+ (__v4si) __index, (__v2df) __v1, __scale); })
+
+#define _mm_mask_i32scatter_pd(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv2df (__addr, __mask, (__v4si) __index,\
+ (__v2df) __v1, __scale); })
+
+#define _mm_i32scatter_epi64(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv2di (__addr, (__mmask8) 0xFF, \
+ (__v4si) __index, (__v2di) __v1, __scale); })
+
+#define _mm_mask_i32scatter_epi64(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv2di (__addr, __mask, (__v4si) __index, \
+ (__v2di) __v1, __scale); })
+
+#define _mm256_i32scatter_pd(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4df (__addr, (__mmask8) 0xFF, \
+ (__v4si) __index, (__v4df) __v1, __scale); })
+
+#define _mm256_mask_i32scatter_pd(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4df (__addr, __mask, (__v4si) __index, \
+ (__v4df) __v1, __scale); })
+
+#define _mm256_i32scatter_epi64(__addr, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4di (__addr, (__mmask8) 0xFF, \
+ (__v4si) __index, (__v4di) __v1, __scale); })
+
+#define _mm256_mask_i32scatter_epi64(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4di (__addr, __mask, (__v4si) __index, \
+ (__v4di) __v1, __scale); })
+
+#define _mm_i32scatter_ps(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4sf (__addr, (__mmask8) 0xFF, \
+ (__v4si) __index, (__v4sf) __v1, __scale); })
+
+#define _mm_mask_i32scatter_ps(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4sf (__addr, __mask, (__v4si) __index, \
+ (__v4sf) __v1, __scale); })
+
+#define _mm_i32scatter_epi32(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4si (__addr, (__mmask8) 0xFF, \
+ (__v4si) __index, (__v4si) __v1, __scale); })
+
+#define _mm_mask_i32scatter_epi32(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4si (__addr, __mask, (__v4si) __index,\
+ (__v4si) __v1, __scale); })
+
+#define _mm256_i32scatter_ps(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv8sf (__addr, (__mmask8) 0xFF, \
+ (__v8si) __index, (__v8sf) __v1, __scale); })
+
+#define _mm256_mask_i32scatter_ps(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv8sf (__addr, __mask, (__v8si) __index,\
+ (__v8sf) __v1, __scale); })
+
+#define _mm256_i32scatter_epi32(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv8si (__addr, (__mmask8) 0xFF, \
+ (__v8si) __index, (__v8si) __v1, __scale); })
+
+#define _mm256_mask_i32scatter_epi32(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv8si (__addr, __mask, (__v8si) __index, \
+ (__v8si) __v1, __scale); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_sqrt_pd (__m128d __W, __mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_sqrtpd128_mask ((__v2df) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_sqrt_pd (__mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_sqrtpd128_mask ((__v2df) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_sqrt_pd (__m256d __W, __mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_sqrtpd256_mask ((__v4df) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_sqrt_pd (__mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_sqrtpd256_mask ((__v4df) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_sqrt_ps (__m128 __W, __mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_sqrtps128_mask ((__v4sf) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_sqrt_ps (__mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_sqrtps128_mask ((__v4sf) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_sqrt_ps (__m256 __W, __mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_sqrtps256_mask ((__v8sf) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_sqrt_ps (__mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_sqrtps256_mask ((__v8sf) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_sub_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_subpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_sub_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_subpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_sub_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_subpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_sub_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_subpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_sub_ps (__m128 __W, __mmask16 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_subps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_sub_ps (__mmask16 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_subps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_sub_ps (__m256 __W, __mmask16 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_subps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_sub_ps (__mmask16 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_subps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask2_permutex2var_epi32 (__m128i __A, __m128i __I, __mmask8 __U,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermi2vard128_mask ((__v4si) __A,
+ (__v4si) __I
+ /* idx */ ,
+ (__v4si) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask2_permutex2var_epi32 (__m256i __A, __m256i __I,
+ __mmask8 __U, __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermi2vard256_mask ((__v8si) __A,
+ (__v8si) __I
+ /* idx */ ,
+ (__v8si) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask2_permutex2var_pd (__m128d __A, __m128i __I, __mmask8 __U,
+ __m128d __B) {
+ return (__m128d) __builtin_ia32_vpermi2varpd128_mask ((__v2df) __A,
+ (__v2di) __I
+ /* idx */ ,
+ (__v2df) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask2_permutex2var_pd (__m256d __A, __m256i __I, __mmask8 __U,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_vpermi2varpd256_mask ((__v4df) __A,
+ (__v4di) __I
+ /* idx */ ,
+ (__v4df) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask2_permutex2var_ps (__m128 __A, __m128i __I, __mmask8 __U,
+ __m128 __B) {
+ return (__m128) __builtin_ia32_vpermi2varps128_mask ((__v4sf) __A,
+ (__v4si) __I
+ /* idx */ ,
+ (__v4sf) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask2_permutex2var_ps (__m256 __A, __m256i __I, __mmask8 __U,
+ __m256 __B) {
+ return (__m256) __builtin_ia32_vpermi2varps256_mask ((__v8sf) __A,
+ (__v8si) __I
+ /* idx */ ,
+ (__v8sf) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask2_permutex2var_epi64 (__m128i __A, __m128i __I, __mmask8 __U,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermi2varq128_mask ((__v2di) __A,
+ (__v2di) __I
+ /* idx */ ,
+ (__v2di) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask2_permutex2var_epi64 (__m256i __A, __m256i __I,
+ __mmask8 __U, __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermi2varq256_mask ((__v4di) __A,
+ (__v4di) __I
+ /* idx */ ,
+ (__v4di) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_permutex2var_epi32 (__m128i __A, __m128i __I, __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermt2vard128_mask ((__v4si) __I
+ /* idx */ ,
+ (__v4si) __A,
+ (__v4si) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_permutex2var_epi32 (__m128i __A, __mmask8 __U, __m128i __I,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermt2vard128_mask ((__v4si) __I
+ /* idx */ ,
+ (__v4si) __A,
+ (__v4si) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_permutex2var_epi32 (__mmask8 __U, __m128i __A, __m128i __I,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermt2vard128_maskz ((__v4si) __I
+ /* idx */ ,
+ (__v4si) __A,
+ (__v4si) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_permutex2var_epi32 (__m256i __A, __m256i __I, __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermt2vard256_mask ((__v8si) __I
+ /* idx */ ,
+ (__v8si) __A,
+ (__v8si) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_permutex2var_epi32 (__m256i __A, __mmask8 __U, __m256i __I,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermt2vard256_mask ((__v8si) __I
+ /* idx */ ,
+ (__v8si) __A,
+ (__v8si) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_permutex2var_epi32 (__mmask8 __U, __m256i __A,
+ __m256i __I, __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermt2vard256_maskz ((__v8si) __I
+ /* idx */ ,
+ (__v8si) __A,
+ (__v8si) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_permutex2var_pd (__m128d __A, __m128i __I, __m128d __B) {
+ return (__m128d) __builtin_ia32_vpermt2varpd128_mask ((__v2di) __I
+ /* idx */ ,
+ (__v2df) __A,
+ (__v2df) __B,
+ (__mmask8) -
+ 1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_permutex2var_pd (__m128d __A, __mmask8 __U, __m128i __I,
+ __m128d __B) {
+ return (__m128d) __builtin_ia32_vpermt2varpd128_mask ((__v2di) __I
+ /* idx */ ,
+ (__v2df) __A,
+ (__v2df) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_permutex2var_pd (__mmask8 __U, __m128d __A, __m128i __I,
+ __m128d __B) {
+ return (__m128d) __builtin_ia32_vpermt2varpd128_maskz ((__v2di) __I
+ /* idx */ ,
+ (__v2df) __A,
+ (__v2df) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_permutex2var_pd (__m256d __A, __m256i __I, __m256d __B) {
+ return (__m256d) __builtin_ia32_vpermt2varpd256_mask ((__v4di) __I
+ /* idx */ ,
+ (__v4df) __A,
+ (__v4df) __B,
+ (__mmask8) -
+ 1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_permutex2var_pd (__m256d __A, __mmask8 __U, __m256i __I,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_vpermt2varpd256_mask ((__v4di) __I
+ /* idx */ ,
+ (__v4df) __A,
+ (__v4df) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_permutex2var_pd (__mmask8 __U, __m256d __A, __m256i __I,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_vpermt2varpd256_maskz ((__v4di) __I
+ /* idx */ ,
+ (__v4df) __A,
+ (__v4df) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_permutex2var_ps (__m128 __A, __m128i __I, __m128 __B) {
+ return (__m128) __builtin_ia32_vpermt2varps128_mask ((__v4si) __I
+ /* idx */ ,
+ (__v4sf) __A,
+ (__v4sf) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_permutex2var_ps (__m128 __A, __mmask8 __U, __m128i __I,
+ __m128 __B) {
+ return (__m128) __builtin_ia32_vpermt2varps128_mask ((__v4si) __I
+ /* idx */ ,
+ (__v4sf) __A,
+ (__v4sf) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_permutex2var_ps (__mmask8 __U, __m128 __A, __m128i __I,
+ __m128 __B) {
+ return (__m128) __builtin_ia32_vpermt2varps128_maskz ((__v4si) __I
+ /* idx */ ,
+ (__v4sf) __A,
+ (__v4sf) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_permutex2var_ps (__m256 __A, __m256i __I, __m256 __B) {
+ return (__m256) __builtin_ia32_vpermt2varps256_mask ((__v8si) __I
+ /* idx */ ,
+ (__v8sf) __A,
+ (__v8sf) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_permutex2var_ps (__m256 __A, __mmask8 __U, __m256i __I,
+ __m256 __B) {
+ return (__m256) __builtin_ia32_vpermt2varps256_mask ((__v8si) __I
+ /* idx */ ,
+ (__v8sf) __A,
+ (__v8sf) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_permutex2var_ps (__mmask8 __U, __m256 __A, __m256i __I,
+ __m256 __B) {
+ return (__m256) __builtin_ia32_vpermt2varps256_maskz ((__v8si) __I
+ /* idx */ ,
+ (__v8sf) __A,
+ (__v8sf) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_permutex2var_epi64 (__m128i __A, __m128i __I, __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermt2varq128_mask ((__v2di) __I
+ /* idx */ ,
+ (__v2di) __A,
+ (__v2di) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_permutex2var_epi64 (__m128i __A, __mmask8 __U, __m128i __I,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermt2varq128_mask ((__v2di) __I
+ /* idx */ ,
+ (__v2di) __A,
+ (__v2di) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_permutex2var_epi64 (__mmask8 __U, __m128i __A, __m128i __I,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermt2varq128_maskz ((__v2di) __I
+ /* idx */ ,
+ (__v2di) __A,
+ (__v2di) __B,
+ (__mmask8)
+ __U);
+}
+
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_permutex2var_epi64 (__m256i __A, __m256i __I, __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermt2varq256_mask ((__v4di) __I
+ /* idx */ ,
+ (__v4di) __A,
+ (__v4di) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_permutex2var_epi64 (__m256i __A, __mmask8 __U, __m256i __I,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermt2varq256_mask ((__v4di) __I
+ /* idx */ ,
+ (__v4di) __A,
+ (__v4di) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_permutex2var_epi64 (__mmask8 __U, __m256i __A,
+ __m256i __I, __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermt2varq256_maskz ((__v4di) __I
+ /* idx */ ,
+ (__v4di) __A,
+ (__v4di) __B,
+ (__mmask8)
+ __U);
+}
+
+#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS_BOTH
+
+#endif /* __AVX512VLINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/avxintrin.h b/contrib/llvm/tools/clang/lib/Headers/avxintrin.h
new file mode 100644
index 0000000..6d1ca54
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/avxintrin.h
@@ -0,0 +1,1318 @@
+/*===---- avxintrin.h - AVX intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <avxintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVXINTRIN_H
+#define __AVXINTRIN_H
+
+typedef double __v4df __attribute__ ((__vector_size__ (32)));
+typedef float __v8sf __attribute__ ((__vector_size__ (32)));
+typedef long long __v4di __attribute__ ((__vector_size__ (32)));
+typedef int __v8si __attribute__ ((__vector_size__ (32)));
+typedef short __v16hi __attribute__ ((__vector_size__ (32)));
+typedef char __v32qi __attribute__ ((__vector_size__ (32)));
+
+/* We need an explicitly signed variant for char. Note that this shouldn't
+ * appear in the interface though. */
+typedef signed char __v32qs __attribute__((__vector_size__(32)));
+
+typedef float __m256 __attribute__ ((__vector_size__ (32)));
+typedef double __m256d __attribute__((__vector_size__(32)));
+typedef long long __m256i __attribute__((__vector_size__(32)));
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx")))
+
+/* Arithmetic */
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_add_pd(__m256d __a, __m256d __b)
+{
+ return __a+__b;
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_add_ps(__m256 __a, __m256 __b)
+{
+ return __a+__b;
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_sub_pd(__m256d __a, __m256d __b)
+{
+ return __a-__b;
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_sub_ps(__m256 __a, __m256 __b)
+{
+ return __a-__b;
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_addsub_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)__builtin_ia32_addsubpd256((__v4df)__a, (__v4df)__b);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_addsub_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)__builtin_ia32_addsubps256((__v8sf)__a, (__v8sf)__b);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_div_pd(__m256d __a, __m256d __b)
+{
+ return __a / __b;
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_div_ps(__m256 __a, __m256 __b)
+{
+ return __a / __b;
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_max_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)__builtin_ia32_maxpd256((__v4df)__a, (__v4df)__b);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_max_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)__builtin_ia32_maxps256((__v8sf)__a, (__v8sf)__b);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_min_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)__builtin_ia32_minpd256((__v4df)__a, (__v4df)__b);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_min_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)__builtin_ia32_minps256((__v8sf)__a, (__v8sf)__b);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_mul_pd(__m256d __a, __m256d __b)
+{
+ return __a * __b;
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_mul_ps(__m256 __a, __m256 __b)
+{
+ return __a * __b;
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_sqrt_pd(__m256d __a)
+{
+ return (__m256d)__builtin_ia32_sqrtpd256((__v4df)__a);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_sqrt_ps(__m256 __a)
+{
+ return (__m256)__builtin_ia32_sqrtps256((__v8sf)__a);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_rsqrt_ps(__m256 __a)
+{
+ return (__m256)__builtin_ia32_rsqrtps256((__v8sf)__a);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_rcp_ps(__m256 __a)
+{
+ return (__m256)__builtin_ia32_rcpps256((__v8sf)__a);
+}
+
+#define _mm256_round_pd(V, M) __extension__ ({ \
+ (__m256d)__builtin_ia32_roundpd256((__v4df)(__m256d)(V), (M)); })
+
+#define _mm256_round_ps(V, M) __extension__ ({ \
+ (__m256)__builtin_ia32_roundps256((__v8sf)(__m256)(V), (M)); })
+
+#define _mm256_ceil_pd(V) _mm256_round_pd((V), _MM_FROUND_CEIL)
+#define _mm256_floor_pd(V) _mm256_round_pd((V), _MM_FROUND_FLOOR)
+#define _mm256_ceil_ps(V) _mm256_round_ps((V), _MM_FROUND_CEIL)
+#define _mm256_floor_ps(V) _mm256_round_ps((V), _MM_FROUND_FLOOR)
+
+/* Logical */
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_and_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)((__v4di)__a & (__v4di)__b);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_and_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)((__v8si)__a & (__v8si)__b);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_andnot_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)(~(__v4di)__a & (__v4di)__b);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_andnot_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)(~(__v8si)__a & (__v8si)__b);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_or_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)((__v4di)__a | (__v4di)__b);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_or_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)((__v8si)__a | (__v8si)__b);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_xor_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)((__v4di)__a ^ (__v4di)__b);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_xor_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)((__v8si)__a ^ (__v8si)__b);
+}
+
+/* Horizontal arithmetic */
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_hadd_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)__builtin_ia32_haddpd256((__v4df)__a, (__v4df)__b);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_hadd_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)__builtin_ia32_haddps256((__v8sf)__a, (__v8sf)__b);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_hsub_pd(__m256d __a, __m256d __b)
+{
+ return (__m256d)__builtin_ia32_hsubpd256((__v4df)__a, (__v4df)__b);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_hsub_ps(__m256 __a, __m256 __b)
+{
+ return (__m256)__builtin_ia32_hsubps256((__v8sf)__a, (__v8sf)__b);
+}
+
+/* Vector permutations */
+static __inline __m128d __DEFAULT_FN_ATTRS
+_mm_permutevar_pd(__m128d __a, __m128i __c)
+{
+ return (__m128d)__builtin_ia32_vpermilvarpd((__v2df)__a, (__v2di)__c);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_permutevar_pd(__m256d __a, __m256i __c)
+{
+ return (__m256d)__builtin_ia32_vpermilvarpd256((__v4df)__a, (__v4di)__c);
+}
+
+static __inline __m128 __DEFAULT_FN_ATTRS
+_mm_permutevar_ps(__m128 __a, __m128i __c)
+{
+ return (__m128)__builtin_ia32_vpermilvarps((__v4sf)__a, (__v4si)__c);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_permutevar_ps(__m256 __a, __m256i __c)
+{
+ return (__m256)__builtin_ia32_vpermilvarps256((__v8sf)__a, (__v8si)__c);
+}
+
+#define _mm_permute_pd(A, C) __extension__ ({ \
+ (__m128d)__builtin_shufflevector((__v2df)(__m128d)(A), \
+ (__v2df)_mm_setzero_pd(), \
+ (C) & 0x1, ((C) & 0x2) >> 1); })
+
+#define _mm256_permute_pd(A, C) __extension__ ({ \
+ (__m256d)__builtin_shufflevector((__v4df)(__m256d)(A), \
+ (__v4df)_mm256_setzero_pd(), \
+ (C) & 0x1, ((C) & 0x2) >> 1, \
+ 2 + (((C) & 0x4) >> 2), \
+ 2 + (((C) & 0x8) >> 3)); })
+
+#define _mm_permute_ps(A, C) __extension__ ({ \
+ (__m128)__builtin_shufflevector((__v4sf)(__m128)(A), \
+ (__v4sf)_mm_setzero_ps(), \
+ (C) & 0x3, ((C) & 0xc) >> 2, \
+ ((C) & 0x30) >> 4, ((C) & 0xc0) >> 6); })
+
+#define _mm256_permute_ps(A, C) __extension__ ({ \
+ (__m256)__builtin_shufflevector((__v8sf)(__m256)(A), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (C) & 0x3, ((C) & 0xc) >> 2, \
+ ((C) & 0x30) >> 4, ((C) & 0xc0) >> 6, \
+ 4 + (((C) & 0x03) >> 0), \
+ 4 + (((C) & 0x0c) >> 2), \
+ 4 + (((C) & 0x30) >> 4), \
+ 4 + (((C) & 0xc0) >> 6)); })
+
+#define _mm256_permute2f128_pd(V1, V2, M) __extension__ ({ \
+ (__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)(__m256d)(V1), \
+ (__v4df)(__m256d)(V2), (M)); })
+
+#define _mm256_permute2f128_ps(V1, V2, M) __extension__ ({ \
+ (__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)(__m256)(V1), \
+ (__v8sf)(__m256)(V2), (M)); })
+
+#define _mm256_permute2f128_si256(V1, V2, M) __extension__ ({ \
+ (__m256i)__builtin_ia32_vperm2f128_si256((__v8si)(__m256i)(V1), \
+ (__v8si)(__m256i)(V2), (M)); })
+
+/* Vector Blend */
+#define _mm256_blend_pd(V1, V2, M) __extension__ ({ \
+ (__m256d)__builtin_shufflevector((__v4df)(__m256d)(V1), \
+ (__v4df)(__m256d)(V2), \
+ (((M) & 0x01) ? 4 : 0), \
+ (((M) & 0x02) ? 5 : 1), \
+ (((M) & 0x04) ? 6 : 2), \
+ (((M) & 0x08) ? 7 : 3)); })
+
+#define _mm256_blend_ps(V1, V2, M) __extension__ ({ \
+ (__m256)__builtin_shufflevector((__v8sf)(__m256)(V1), \
+ (__v8sf)(__m256)(V2), \
+ (((M) & 0x01) ? 8 : 0), \
+ (((M) & 0x02) ? 9 : 1), \
+ (((M) & 0x04) ? 10 : 2), \
+ (((M) & 0x08) ? 11 : 3), \
+ (((M) & 0x10) ? 12 : 4), \
+ (((M) & 0x20) ? 13 : 5), \
+ (((M) & 0x40) ? 14 : 6), \
+ (((M) & 0x80) ? 15 : 7)); })
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_blendv_pd(__m256d __a, __m256d __b, __m256d __c)
+{
+ return (__m256d)__builtin_ia32_blendvpd256(
+ (__v4df)__a, (__v4df)__b, (__v4df)__c);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
+{
+ return (__m256)__builtin_ia32_blendvps256(
+ (__v8sf)__a, (__v8sf)__b, (__v8sf)__c);
+}
+
+/* Vector Dot Product */
+#define _mm256_dp_ps(V1, V2, M) __extension__ ({ \
+ (__m256)__builtin_ia32_dpps256((__v8sf)(__m256)(V1), \
+ (__v8sf)(__m256)(V2), (M)); })
+
+/* Vector shuffle */
+#define _mm256_shuffle_ps(a, b, mask) __extension__ ({ \
+ (__m256)__builtin_shufflevector((__v8sf)(__m256)(a), \
+ (__v8sf)(__m256)(b), \
+ (mask) & 0x3, \
+ ((mask) & 0xc) >> 2, \
+ (((mask) & 0x30) >> 4) + 8, \
+ (((mask) & 0xc0) >> 6) + 8, \
+ ((mask) & 0x3) + 4, \
+ (((mask) & 0xc) >> 2) + 4, \
+ (((mask) & 0x30) >> 4) + 12, \
+ (((mask) & 0xc0) >> 6) + 12); })
+
+#define _mm256_shuffle_pd(a, b, mask) __extension__ ({ \
+ (__m256d)__builtin_shufflevector((__v4df)(__m256d)(a), \
+ (__v4df)(__m256d)(b), \
+ (mask) & 0x1, \
+ (((mask) & 0x2) >> 1) + 4, \
+ (((mask) & 0x4) >> 2) + 2, \
+ (((mask) & 0x8) >> 3) + 6); })
+
+/* Compare */
+#define _CMP_EQ_OQ 0x00 /* Equal (ordered, non-signaling) */
+#define _CMP_LT_OS 0x01 /* Less-than (ordered, signaling) */
+#define _CMP_LE_OS 0x02 /* Less-than-or-equal (ordered, signaling) */
+#define _CMP_UNORD_Q 0x03 /* Unordered (non-signaling) */
+#define _CMP_NEQ_UQ 0x04 /* Not-equal (unordered, non-signaling) */
+#define _CMP_NLT_US 0x05 /* Not-less-than (unordered, signaling) */
+#define _CMP_NLE_US 0x06 /* Not-less-than-or-equal (unordered, signaling) */
+#define _CMP_ORD_Q 0x07 /* Ordered (nonsignaling) */
+#define _CMP_EQ_UQ 0x08 /* Equal (unordered, non-signaling) */
+#define _CMP_NGE_US 0x09 /* Not-greater-than-or-equal (unord, signaling) */
+#define _CMP_NGT_US 0x0a /* Not-greater-than (unordered, signaling) */
+#define _CMP_FALSE_OQ 0x0b /* False (ordered, non-signaling) */
+#define _CMP_NEQ_OQ 0x0c /* Not-equal (ordered, non-signaling) */
+#define _CMP_GE_OS 0x0d /* Greater-than-or-equal (ordered, signaling) */
+#define _CMP_GT_OS 0x0e /* Greater-than (ordered, signaling) */
+#define _CMP_TRUE_UQ 0x0f /* True (unordered, non-signaling) */
+#define _CMP_EQ_OS 0x10 /* Equal (ordered, signaling) */
+#define _CMP_LT_OQ 0x11 /* Less-than (ordered, non-signaling) */
+#define _CMP_LE_OQ 0x12 /* Less-than-or-equal (ordered, non-signaling) */
+#define _CMP_UNORD_S 0x13 /* Unordered (signaling) */
+#define _CMP_NEQ_US 0x14 /* Not-equal (unordered, signaling) */
+#define _CMP_NLT_UQ 0x15 /* Not-less-than (unordered, non-signaling) */
+#define _CMP_NLE_UQ 0x16 /* Not-less-than-or-equal (unord, non-signaling) */
+#define _CMP_ORD_S 0x17 /* Ordered (signaling) */
+#define _CMP_EQ_US 0x18 /* Equal (unordered, signaling) */
+#define _CMP_NGE_UQ 0x19 /* Not-greater-than-or-equal (unord, non-sign) */
+#define _CMP_NGT_UQ 0x1a /* Not-greater-than (unordered, non-signaling) */
+#define _CMP_FALSE_OS 0x1b /* False (ordered, signaling) */
+#define _CMP_NEQ_OS 0x1c /* Not-equal (ordered, signaling) */
+#define _CMP_GE_OQ 0x1d /* Greater-than-or-equal (ordered, non-signaling) */
+#define _CMP_GT_OQ 0x1e /* Greater-than (ordered, non-signaling) */
+#define _CMP_TRUE_US 0x1f /* True (unordered, signaling) */
+
+#define _mm_cmp_pd(a, b, c) __extension__ ({ \
+ (__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(a), \
+ (__v2df)(__m128d)(b), (c)); })
+
+#define _mm_cmp_ps(a, b, c) __extension__ ({ \
+ (__m128)__builtin_ia32_cmpps((__v4sf)(__m128)(a), \
+ (__v4sf)(__m128)(b), (c)); })
+
+#define _mm256_cmp_pd(a, b, c) __extension__ ({ \
+ (__m256d)__builtin_ia32_cmppd256((__v4df)(__m256d)(a), \
+ (__v4df)(__m256d)(b), (c)); })
+
+#define _mm256_cmp_ps(a, b, c) __extension__ ({ \
+ (__m256)__builtin_ia32_cmpps256((__v8sf)(__m256)(a), \
+ (__v8sf)(__m256)(b), (c)); })
+
+#define _mm_cmp_sd(a, b, c) __extension__ ({ \
+ (__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(a), \
+ (__v2df)(__m128d)(b), (c)); })
+
+#define _mm_cmp_ss(a, b, c) __extension__ ({ \
+ (__m128)__builtin_ia32_cmpss((__v4sf)(__m128)(a), \
+ (__v4sf)(__m128)(b), (c)); })
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_extract_epi32(__m256i __a, const int __imm)
+{
+ __v8si __b = (__v8si)__a;
+ return __b[__imm & 7];
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_extract_epi16(__m256i __a, const int __imm)
+{
+ __v16hi __b = (__v16hi)__a;
+ return __b[__imm & 15];
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_extract_epi8(__m256i __a, const int __imm)
+{
+ __v32qi __b = (__v32qi)__a;
+ return __b[__imm & 31];
+}
+
+#ifdef __x86_64__
+static __inline long long __DEFAULT_FN_ATTRS
+_mm256_extract_epi64(__m256i __a, const int __imm)
+{
+ __v4di __b = (__v4di)__a;
+ return __b[__imm & 3];
+}
+#endif
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_insert_epi32(__m256i __a, int __b, int const __imm)
+{
+ __v8si __c = (__v8si)__a;
+ __c[__imm & 7] = __b;
+ return (__m256i)__c;
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_insert_epi16(__m256i __a, int __b, int const __imm)
+{
+ __v16hi __c = (__v16hi)__a;
+ __c[__imm & 15] = __b;
+ return (__m256i)__c;
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_insert_epi8(__m256i __a, int __b, int const __imm)
+{
+ __v32qi __c = (__v32qi)__a;
+ __c[__imm & 31] = __b;
+ return (__m256i)__c;
+}
+
+#ifdef __x86_64__
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_insert_epi64(__m256i __a, long long __b, int const __imm)
+{
+ __v4di __c = (__v4di)__a;
+ __c[__imm & 3] = __b;
+ return (__m256i)__c;
+}
+#endif
+
+/* Conversion */
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_cvtepi32_pd(__m128i __a)
+{
+ return (__m256d)__builtin_ia32_cvtdq2pd256((__v4si) __a);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_cvtepi32_ps(__m256i __a)
+{
+ return (__m256)__builtin_ia32_cvtdq2ps256((__v8si) __a);
+}
+
+static __inline __m128 __DEFAULT_FN_ATTRS
+_mm256_cvtpd_ps(__m256d __a)
+{
+ return (__m128)__builtin_ia32_cvtpd2ps256((__v4df) __a);
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtps_epi32(__m256 __a)
+{
+ return (__m256i)__builtin_ia32_cvtps2dq256((__v8sf) __a);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_cvtps_pd(__m128 __a)
+{
+ return (__m256d)__builtin_ia32_cvtps2pd256((__v4sf) __a);
+}
+
+static __inline __m128i __DEFAULT_FN_ATTRS
+_mm256_cvttpd_epi32(__m256d __a)
+{
+ return (__m128i)__builtin_ia32_cvttpd2dq256((__v4df) __a);
+}
+
+static __inline __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtpd_epi32(__m256d __a)
+{
+ return (__m128i)__builtin_ia32_cvtpd2dq256((__v4df) __a);
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_cvttps_epi32(__m256 __a)
+{
+ return (__m256i)__builtin_ia32_cvttps2dq256((__v8sf) __a);
+}
+
+/* Vector replicate */
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_movehdup_ps(__m256 __a)
+{
+ return __builtin_shufflevector(__a, __a, 1, 1, 3, 3, 5, 5, 7, 7);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_moveldup_ps(__m256 __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 0, 2, 2, 4, 4, 6, 6);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_movedup_pd(__m256d __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 0, 2, 2);
+}
+
+/* Unpack and Interleave */
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_unpackhi_pd(__m256d __a, __m256d __b)
+{
+ return __builtin_shufflevector(__a, __b, 1, 5, 1+2, 5+2);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_unpacklo_pd(__m256d __a, __m256d __b)
+{
+ return __builtin_shufflevector(__a, __b, 0, 4, 0+2, 4+2);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_unpackhi_ps(__m256 __a, __m256 __b)
+{
+ return __builtin_shufflevector(__a, __b, 2, 10, 2+1, 10+1, 6, 14, 6+1, 14+1);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_unpacklo_ps(__m256 __a, __m256 __b)
+{
+ return __builtin_shufflevector(__a, __b, 0, 8, 0+1, 8+1, 4, 12, 4+1, 12+1);
+}
+
+/* Bit Test */
+static __inline int __DEFAULT_FN_ATTRS
+_mm_testz_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_vtestzpd((__v2df)__a, (__v2df)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm_testc_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_vtestcpd((__v2df)__a, (__v2df)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm_testnzc_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_vtestnzcpd((__v2df)__a, (__v2df)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm_testz_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_vtestzps((__v4sf)__a, (__v4sf)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm_testc_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_vtestcps((__v4sf)__a, (__v4sf)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm_testnzc_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_vtestnzcps((__v4sf)__a, (__v4sf)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_testz_pd(__m256d __a, __m256d __b)
+{
+ return __builtin_ia32_vtestzpd256((__v4df)__a, (__v4df)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_testc_pd(__m256d __a, __m256d __b)
+{
+ return __builtin_ia32_vtestcpd256((__v4df)__a, (__v4df)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_testnzc_pd(__m256d __a, __m256d __b)
+{
+ return __builtin_ia32_vtestnzcpd256((__v4df)__a, (__v4df)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_testz_ps(__m256 __a, __m256 __b)
+{
+ return __builtin_ia32_vtestzps256((__v8sf)__a, (__v8sf)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_testc_ps(__m256 __a, __m256 __b)
+{
+ return __builtin_ia32_vtestcps256((__v8sf)__a, (__v8sf)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_testnzc_ps(__m256 __a, __m256 __b)
+{
+ return __builtin_ia32_vtestnzcps256((__v8sf)__a, (__v8sf)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_testz_si256(__m256i __a, __m256i __b)
+{
+ return __builtin_ia32_ptestz256((__v4di)__a, (__v4di)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_testc_si256(__m256i __a, __m256i __b)
+{
+ return __builtin_ia32_ptestc256((__v4di)__a, (__v4di)__b);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_testnzc_si256(__m256i __a, __m256i __b)
+{
+ return __builtin_ia32_ptestnzc256((__v4di)__a, (__v4di)__b);
+}
+
+/* Vector extract sign mask */
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_movemask_pd(__m256d __a)
+{
+ return __builtin_ia32_movmskpd256((__v4df)__a);
+}
+
+static __inline int __DEFAULT_FN_ATTRS
+_mm256_movemask_ps(__m256 __a)
+{
+ return __builtin_ia32_movmskps256((__v8sf)__a);
+}
+
+/* Vector __zero */
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_zeroall(void)
+{
+ __builtin_ia32_vzeroall();
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_zeroupper(void)
+{
+ __builtin_ia32_vzeroupper();
+}
+
+/* Vector load with broadcast */
+static __inline __m128 __DEFAULT_FN_ATTRS
+_mm_broadcast_ss(float const *__a)
+{
+ float __f = *__a;
+ return (__m128)(__v4sf){ __f, __f, __f, __f };
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_broadcast_sd(double const *__a)
+{
+ double __d = *__a;
+ return (__m256d)(__v4df){ __d, __d, __d, __d };
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_broadcast_ss(float const *__a)
+{
+ float __f = *__a;
+ return (__m256)(__v8sf){ __f, __f, __f, __f, __f, __f, __f, __f };
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_broadcast_pd(__m128d const *__a)
+{
+ return (__m256d)__builtin_ia32_vbroadcastf128_pd256(__a);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_broadcast_ps(__m128 const *__a)
+{
+ return (__m256)__builtin_ia32_vbroadcastf128_ps256(__a);
+}
+
+/* SIMD load ops */
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_load_pd(double const *__p)
+{
+ return *(__m256d *)__p;
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_load_ps(float const *__p)
+{
+ return *(__m256 *)__p;
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_loadu_pd(double const *__p)
+{
+ struct __loadu_pd {
+ __m256d __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_pd*)__p)->__v;
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_loadu_ps(float const *__p)
+{
+ struct __loadu_ps {
+ __m256 __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_ps*)__p)->__v;
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_load_si256(__m256i const *__p)
+{
+ return *__p;
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_loadu_si256(__m256i const *__p)
+{
+ struct __loadu_si256 {
+ __m256i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_si256*)__p)->__v;
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_lddqu_si256(__m256i const *__p)
+{
+ return (__m256i)__builtin_ia32_lddqu256((char const *)__p);
+}
+
+/* SIMD store ops */
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_store_pd(double *__p, __m256d __a)
+{
+ *(__m256d *)__p = __a;
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_store_ps(float *__p, __m256 __a)
+{
+ *(__m256 *)__p = __a;
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_storeu_pd(double *__p, __m256d __a)
+{
+ __builtin_ia32_storeupd256(__p, (__v4df)__a);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_storeu_ps(float *__p, __m256 __a)
+{
+ __builtin_ia32_storeups256(__p, (__v8sf)__a);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_store_si256(__m256i *__p, __m256i __a)
+{
+ *__p = __a;
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_storeu_si256(__m256i *__p, __m256i __a)
+{
+ __builtin_ia32_storedqu256((char *)__p, (__v32qi)__a);
+}
+
+/* Conditional load ops */
+static __inline __m128d __DEFAULT_FN_ATTRS
+_mm_maskload_pd(double const *__p, __m128i __m)
+{
+ return (__m128d)__builtin_ia32_maskloadpd((const __v2df *)__p, (__v2di)__m);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_maskload_pd(double const *__p, __m256i __m)
+{
+ return (__m256d)__builtin_ia32_maskloadpd256((const __v4df *)__p,
+ (__v4di)__m);
+}
+
+static __inline __m128 __DEFAULT_FN_ATTRS
+_mm_maskload_ps(float const *__p, __m128i __m)
+{
+ return (__m128)__builtin_ia32_maskloadps((const __v4sf *)__p, (__v4si)__m);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_maskload_ps(float const *__p, __m256i __m)
+{
+ return (__m256)__builtin_ia32_maskloadps256((const __v8sf *)__p, (__v8si)__m);
+}
+
+/* Conditional store ops */
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_maskstore_ps(float *__p, __m256i __m, __m256 __a)
+{
+ __builtin_ia32_maskstoreps256((__v8sf *)__p, (__v8si)__m, (__v8sf)__a);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm_maskstore_pd(double *__p, __m128i __m, __m128d __a)
+{
+ __builtin_ia32_maskstorepd((__v2df *)__p, (__v2di)__m, (__v2df)__a);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_maskstore_pd(double *__p, __m256i __m, __m256d __a)
+{
+ __builtin_ia32_maskstorepd256((__v4df *)__p, (__v4di)__m, (__v4df)__a);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm_maskstore_ps(float *__p, __m128i __m, __m128 __a)
+{
+ __builtin_ia32_maskstoreps((__v4sf *)__p, (__v4si)__m, (__v4sf)__a);
+}
+
+/* Cacheability support ops */
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_stream_si256(__m256i *__a, __m256i __b)
+{
+ __builtin_ia32_movntdq256((__v4di *)__a, (__v4di)__b);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_stream_pd(double *__a, __m256d __b)
+{
+ __builtin_ia32_movntpd256(__a, (__v4df)__b);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_stream_ps(float *__p, __m256 __a)
+{
+ __builtin_ia32_movntps256(__p, (__v8sf)__a);
+}
+
+/* Create vectors */
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_undefined_pd()
+{
+ return (__m256d)__builtin_ia32_undef256();
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_undefined_ps()
+{
+ return (__m256)__builtin_ia32_undef256();
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_undefined_si256()
+{
+ return (__m256i)__builtin_ia32_undef256();
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_set_pd(double __a, double __b, double __c, double __d)
+{
+ return (__m256d){ __d, __c, __b, __a };
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_set_ps(float __a, float __b, float __c, float __d,
+ float __e, float __f, float __g, float __h)
+{
+ return (__m256){ __h, __g, __f, __e, __d, __c, __b, __a };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_set_epi32(int __i0, int __i1, int __i2, int __i3,
+ int __i4, int __i5, int __i6, int __i7)
+{
+ return (__m256i)(__v8si){ __i7, __i6, __i5, __i4, __i3, __i2, __i1, __i0 };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_set_epi16(short __w15, short __w14, short __w13, short __w12,
+ short __w11, short __w10, short __w09, short __w08,
+ short __w07, short __w06, short __w05, short __w04,
+ short __w03, short __w02, short __w01, short __w00)
+{
+ return (__m256i)(__v16hi){ __w00, __w01, __w02, __w03, __w04, __w05, __w06,
+ __w07, __w08, __w09, __w10, __w11, __w12, __w13, __w14, __w15 };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_set_epi8(char __b31, char __b30, char __b29, char __b28,
+ char __b27, char __b26, char __b25, char __b24,
+ char __b23, char __b22, char __b21, char __b20,
+ char __b19, char __b18, char __b17, char __b16,
+ char __b15, char __b14, char __b13, char __b12,
+ char __b11, char __b10, char __b09, char __b08,
+ char __b07, char __b06, char __b05, char __b04,
+ char __b03, char __b02, char __b01, char __b00)
+{
+ return (__m256i)(__v32qi){
+ __b00, __b01, __b02, __b03, __b04, __b05, __b06, __b07,
+ __b08, __b09, __b10, __b11, __b12, __b13, __b14, __b15,
+ __b16, __b17, __b18, __b19, __b20, __b21, __b22, __b23,
+ __b24, __b25, __b26, __b27, __b28, __b29, __b30, __b31
+ };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_set_epi64x(long long __a, long long __b, long long __c, long long __d)
+{
+ return (__m256i)(__v4di){ __d, __c, __b, __a };
+}
+
+/* Create vectors with elements in reverse order */
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_setr_pd(double __a, double __b, double __c, double __d)
+{
+ return (__m256d){ __a, __b, __c, __d };
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_setr_ps(float __a, float __b, float __c, float __d,
+ float __e, float __f, float __g, float __h)
+{
+ return (__m256){ __a, __b, __c, __d, __e, __f, __g, __h };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_setr_epi32(int __i0, int __i1, int __i2, int __i3,
+ int __i4, int __i5, int __i6, int __i7)
+{
+ return (__m256i)(__v8si){ __i0, __i1, __i2, __i3, __i4, __i5, __i6, __i7 };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_setr_epi16(short __w15, short __w14, short __w13, short __w12,
+ short __w11, short __w10, short __w09, short __w08,
+ short __w07, short __w06, short __w05, short __w04,
+ short __w03, short __w02, short __w01, short __w00)
+{
+ return (__m256i)(__v16hi){ __w15, __w14, __w13, __w12, __w11, __w10, __w09,
+ __w08, __w07, __w06, __w05, __w04, __w03, __w02, __w01, __w00 };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_setr_epi8(char __b31, char __b30, char __b29, char __b28,
+ char __b27, char __b26, char __b25, char __b24,
+ char __b23, char __b22, char __b21, char __b20,
+ char __b19, char __b18, char __b17, char __b16,
+ char __b15, char __b14, char __b13, char __b12,
+ char __b11, char __b10, char __b09, char __b08,
+ char __b07, char __b06, char __b05, char __b04,
+ char __b03, char __b02, char __b01, char __b00)
+{
+ return (__m256i)(__v32qi){
+ __b31, __b30, __b29, __b28, __b27, __b26, __b25, __b24,
+ __b23, __b22, __b21, __b20, __b19, __b18, __b17, __b16,
+ __b15, __b14, __b13, __b12, __b11, __b10, __b09, __b08,
+ __b07, __b06, __b05, __b04, __b03, __b02, __b01, __b00 };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_setr_epi64x(long long __a, long long __b, long long __c, long long __d)
+{
+ return (__m256i)(__v4di){ __a, __b, __c, __d };
+}
+
+/* Create vectors with repeated elements */
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_set1_pd(double __w)
+{
+ return (__m256d){ __w, __w, __w, __w };
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_set1_ps(float __w)
+{
+ return (__m256){ __w, __w, __w, __w, __w, __w, __w, __w };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_set1_epi32(int __i)
+{
+ return (__m256i)(__v8si){ __i, __i, __i, __i, __i, __i, __i, __i };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_set1_epi16(short __w)
+{
+ return (__m256i)(__v16hi){ __w, __w, __w, __w, __w, __w, __w, __w, __w, __w,
+ __w, __w, __w, __w, __w, __w };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_set1_epi8(char __b)
+{
+ return (__m256i)(__v32qi){ __b, __b, __b, __b, __b, __b, __b, __b, __b, __b,
+ __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b,
+ __b, __b, __b, __b, __b, __b, __b };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_set1_epi64x(long long __q)
+{
+ return (__m256i)(__v4di){ __q, __q, __q, __q };
+}
+
+/* Create __zeroed vectors */
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_setzero_pd(void)
+{
+ return (__m256d){ 0, 0, 0, 0 };
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_setzero_ps(void)
+{
+ return (__m256){ 0, 0, 0, 0, 0, 0, 0, 0 };
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_setzero_si256(void)
+{
+ return (__m256i){ 0LL, 0LL, 0LL, 0LL };
+}
+
+/* Cast between vector types */
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_castpd_ps(__m256d __a)
+{
+ return (__m256)__a;
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_castpd_si256(__m256d __a)
+{
+ return (__m256i)__a;
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_castps_pd(__m256 __a)
+{
+ return (__m256d)__a;
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_castps_si256(__m256 __a)
+{
+ return (__m256i)__a;
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_castsi256_ps(__m256i __a)
+{
+ return (__m256)__a;
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_castsi256_pd(__m256i __a)
+{
+ return (__m256d)__a;
+}
+
+static __inline __m128d __DEFAULT_FN_ATTRS
+_mm256_castpd256_pd128(__m256d __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 1);
+}
+
+static __inline __m128 __DEFAULT_FN_ATTRS
+_mm256_castps256_ps128(__m256 __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
+}
+
+static __inline __m128i __DEFAULT_FN_ATTRS
+_mm256_castsi256_si128(__m256i __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 1);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_castpd128_pd256(__m128d __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 1, -1, -1);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_castps128_ps256(__m128 __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, -1, -1, -1, -1);
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_castsi128_si256(__m128i __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 1, -1, -1);
+}
+
+/*
+ Vector insert.
+ We use macros rather than inlines because we only want to accept
+ invocations where the immediate M is a constant expression.
+*/
+#define _mm256_insertf128_ps(V1, V2, M) __extension__ ({ \
+ (__m256)__builtin_shufflevector( \
+ (__v8sf)(__m256)(V1), \
+ (__v8sf)_mm256_castps128_ps256((__m128)(V2)), \
+ (((M) & 1) ? 0 : 8), \
+ (((M) & 1) ? 1 : 9), \
+ (((M) & 1) ? 2 : 10), \
+ (((M) & 1) ? 3 : 11), \
+ (((M) & 1) ? 8 : 4), \
+ (((M) & 1) ? 9 : 5), \
+ (((M) & 1) ? 10 : 6), \
+ (((M) & 1) ? 11 : 7) );})
+
+#define _mm256_insertf128_pd(V1, V2, M) __extension__ ({ \
+ (__m256d)__builtin_shufflevector( \
+ (__v4df)(__m256d)(V1), \
+ (__v4df)_mm256_castpd128_pd256((__m128d)(V2)), \
+ (((M) & 1) ? 0 : 4), \
+ (((M) & 1) ? 1 : 5), \
+ (((M) & 1) ? 4 : 2), \
+ (((M) & 1) ? 5 : 3) );})
+
+#define _mm256_insertf128_si256(V1, V2, M) __extension__ ({ \
+ (__m256i)__builtin_shufflevector( \
+ (__v4di)(__m256i)(V1), \
+ (__v4di)_mm256_castsi128_si256((__m128i)(V2)), \
+ (((M) & 1) ? 0 : 4), \
+ (((M) & 1) ? 1 : 5), \
+ (((M) & 1) ? 4 : 2), \
+ (((M) & 1) ? 5 : 3) );})
+
+/*
+ Vector extract.
+ We use macros rather than inlines because we only want to accept
+ invocations where the immediate M is a constant expression.
+*/
+#define _mm256_extractf128_ps(V, M) __extension__ ({ \
+ (__m128)__builtin_shufflevector( \
+ (__v8sf)(__m256)(V), \
+ (__v8sf)(_mm256_setzero_ps()), \
+ (((M) & 1) ? 4 : 0), \
+ (((M) & 1) ? 5 : 1), \
+ (((M) & 1) ? 6 : 2), \
+ (((M) & 1) ? 7 : 3) );})
+
+#define _mm256_extractf128_pd(V, M) __extension__ ({ \
+ (__m128d)__builtin_shufflevector( \
+ (__v4df)(__m256d)(V), \
+ (__v4df)(_mm256_setzero_pd()), \
+ (((M) & 1) ? 2 : 0), \
+ (((M) & 1) ? 3 : 1) );})
+
+#define _mm256_extractf128_si256(V, M) __extension__ ({ \
+ (__m128i)__builtin_shufflevector( \
+ (__v4di)(__m256i)(V), \
+ (__v4di)(_mm256_setzero_si256()), \
+ (((M) & 1) ? 2 : 0), \
+ (((M) & 1) ? 3 : 1) );})
+
+/* SIMD load ops (unaligned) */
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_loadu2_m128(float const *__addr_hi, float const *__addr_lo)
+{
+ struct __loadu_ps {
+ __m128 __v;
+ } __attribute__((__packed__, __may_alias__));
+
+ __m256 __v256 = _mm256_castps128_ps256(((struct __loadu_ps*)__addr_lo)->__v);
+ return _mm256_insertf128_ps(__v256, ((struct __loadu_ps*)__addr_hi)->__v, 1);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_loadu2_m128d(double const *__addr_hi, double const *__addr_lo)
+{
+ struct __loadu_pd {
+ __m128d __v;
+ } __attribute__((__packed__, __may_alias__));
+
+ __m256d __v256 = _mm256_castpd128_pd256(((struct __loadu_pd*)__addr_lo)->__v);
+ return _mm256_insertf128_pd(__v256, ((struct __loadu_pd*)__addr_hi)->__v, 1);
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_loadu2_m128i(__m128i const *__addr_hi, __m128i const *__addr_lo)
+{
+ struct __loadu_si128 {
+ __m128i __v;
+ } __attribute__((__packed__, __may_alias__));
+ __m256i __v256 = _mm256_castsi128_si256(
+ ((struct __loadu_si128*)__addr_lo)->__v);
+ return _mm256_insertf128_si256(__v256,
+ ((struct __loadu_si128*)__addr_hi)->__v, 1);
+}
+
+/* SIMD store ops (unaligned) */
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_storeu2_m128(float *__addr_hi, float *__addr_lo, __m256 __a)
+{
+ __m128 __v128;
+
+ __v128 = _mm256_castps256_ps128(__a);
+ __builtin_ia32_storeups(__addr_lo, __v128);
+ __v128 = _mm256_extractf128_ps(__a, 1);
+ __builtin_ia32_storeups(__addr_hi, __v128);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_storeu2_m128d(double *__addr_hi, double *__addr_lo, __m256d __a)
+{
+ __m128d __v128;
+
+ __v128 = _mm256_castpd256_pd128(__a);
+ __builtin_ia32_storeupd(__addr_lo, __v128);
+ __v128 = _mm256_extractf128_pd(__a, 1);
+ __builtin_ia32_storeupd(__addr_hi, __v128);
+}
+
+static __inline void __DEFAULT_FN_ATTRS
+_mm256_storeu2_m128i(__m128i *__addr_hi, __m128i *__addr_lo, __m256i __a)
+{
+ __m128i __v128;
+
+ __v128 = _mm256_castsi256_si128(__a);
+ __builtin_ia32_storedqu((char *)__addr_lo, (__v16qi)__v128);
+ __v128 = _mm256_extractf128_si256(__a, 1);
+ __builtin_ia32_storedqu((char *)__addr_hi, (__v16qi)__v128);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_set_m128 (__m128 __hi, __m128 __lo) {
+ return (__m256) __builtin_shufflevector(__lo, __hi, 0, 1, 2, 3, 4, 5, 6, 7);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_set_m128d (__m128d __hi, __m128d __lo) {
+ return (__m256d)_mm256_set_m128((__m128)__hi, (__m128)__lo);
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_set_m128i (__m128i __hi, __m128i __lo) {
+ return (__m256i)_mm256_set_m128((__m128)__hi, (__m128)__lo);
+}
+
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_setr_m128 (__m128 __lo, __m128 __hi) {
+ return _mm256_set_m128(__hi, __lo);
+}
+
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_setr_m128d (__m128d __lo, __m128d __hi) {
+ return (__m256d)_mm256_set_m128((__m128)__hi, (__m128)__lo);
+}
+
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_setr_m128i (__m128i __lo, __m128i __hi) {
+ return (__m256i)_mm256_set_m128((__m128)__hi, (__m128)__lo);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __AVXINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/bmi2intrin.h b/contrib/llvm/tools/clang/lib/Headers/bmi2intrin.h
new file mode 100644
index 0000000..fdae82c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/bmi2intrin.h
@@ -0,0 +1,95 @@
+/*===---- bmi2intrin.h - BMI2 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <bmi2intrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __BMI2INTRIN_H
+#define __BMI2INTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi2")))
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_bzhi_u32(unsigned int __X, unsigned int __Y)
+{
+ return __builtin_ia32_bzhi_si(__X, __Y);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_pdep_u32(unsigned int __X, unsigned int __Y)
+{
+ return __builtin_ia32_pdep_si(__X, __Y);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_pext_u32(unsigned int __X, unsigned int __Y)
+{
+ return __builtin_ia32_pext_si(__X, __Y);
+}
+
+#ifdef __x86_64__
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_bzhi_u64(unsigned long long __X, unsigned long long __Y)
+{
+ return __builtin_ia32_bzhi_di(__X, __Y);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_pdep_u64(unsigned long long __X, unsigned long long __Y)
+{
+ return __builtin_ia32_pdep_di(__X, __Y);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_pext_u64(unsigned long long __X, unsigned long long __Y)
+{
+ return __builtin_ia32_pext_di(__X, __Y);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_mulx_u64 (unsigned long long __X, unsigned long long __Y,
+ unsigned long long *__P)
+{
+ unsigned __int128 __res = (unsigned __int128) __X * __Y;
+ *__P = (unsigned long long) (__res >> 64);
+ return (unsigned long long) __res;
+}
+
+#else /* !__x86_64__ */
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mulx_u32 (unsigned int __X, unsigned int __Y, unsigned int *__P)
+{
+ unsigned long long __res = (unsigned long long) __X * __Y;
+ *__P = (unsigned int) (__res >> 32);
+ return (unsigned int) __res;
+}
+
+#endif /* !__x86_64__ */
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __BMI2INTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/bmiintrin.h b/contrib/llvm/tools/clang/lib/Headers/bmiintrin.h
new file mode 100644
index 0000000..da98792
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/bmiintrin.h
@@ -0,0 +1,155 @@
+/*===---- bmiintrin.h - BMI intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <bmiintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __BMIINTRIN_H
+#define __BMIINTRIN_H
+
+#define _tzcnt_u16(a) (__tzcnt_u16((a)))
+#define _andn_u32(a, b) (__andn_u32((a), (b)))
+/* _bextr_u32 != __bextr_u32 */
+#define _blsi_u32(a) (__blsi_u32((a)))
+#define _blsmsk_u32(a) (__blsmsk_u32((a)))
+#define _blsr_u32(a) (__blsr_u32((a)))
+#define _tzcnt_u32(a) (__tzcnt_u32((a)))
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi")))
+
+/* Allow using the tzcnt intrinsics even for non-BMI targets. Since the TZCNT
+ instruction behaves as BSF on non-BMI targets, there is code that expects
+ to use it as a potentially faster version of BSF. */
+#define __RELAXED_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+
+static __inline__ unsigned short __RELAXED_FN_ATTRS
+__tzcnt_u16(unsigned short __X)
+{
+ return __X ? __builtin_ctzs(__X) : 16;
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__andn_u32(unsigned int __X, unsigned int __Y)
+{
+ return ~__X & __Y;
+}
+
+/* AMD-specified, double-leading-underscore version of BEXTR */
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__bextr_u32(unsigned int __X, unsigned int __Y)
+{
+ return __builtin_ia32_bextr_u32(__X, __Y);
+}
+
+/* Intel-specified, single-leading-underscore version of BEXTR */
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_bextr_u32(unsigned int __X, unsigned int __Y, unsigned int __Z)
+{
+ return __builtin_ia32_bextr_u32 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8)));
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blsi_u32(unsigned int __X)
+{
+ return __X & -__X;
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blsmsk_u32(unsigned int __X)
+{
+ return __X ^ (__X - 1);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blsr_u32(unsigned int __X)
+{
+ return __X & (__X - 1);
+}
+
+static __inline__ unsigned int __RELAXED_FN_ATTRS
+__tzcnt_u32(unsigned int __X)
+{
+ return __X ? __builtin_ctz(__X) : 32;
+}
+
+#ifdef __x86_64__
+
+#define _andn_u64(a, b) (__andn_u64((a), (b)))
+/* _bextr_u64 != __bextr_u64 */
+#define _blsi_u64(a) (__blsi_u64((a)))
+#define _blsmsk_u64(a) (__blsmsk_u64((a)))
+#define _blsr_u64(a) (__blsr_u64((a)))
+#define _tzcnt_u64(a) (__tzcnt_u64((a)))
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__andn_u64 (unsigned long long __X, unsigned long long __Y)
+{
+ return ~__X & __Y;
+}
+
+/* AMD-specified, double-leading-underscore version of BEXTR */
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__bextr_u64(unsigned long long __X, unsigned long long __Y)
+{
+ return __builtin_ia32_bextr_u64(__X, __Y);
+}
+
+/* Intel-specified, single-leading-underscore version of BEXTR */
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_bextr_u64(unsigned long long __X, unsigned int __Y, unsigned int __Z)
+{
+ return __builtin_ia32_bextr_u64 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8)));
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blsi_u64(unsigned long long __X)
+{
+ return __X & -__X;
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blsmsk_u64(unsigned long long __X)
+{
+ return __X ^ (__X - 1);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blsr_u64(unsigned long long __X)
+{
+ return __X & (__X - 1);
+}
+
+static __inline__ unsigned long long __RELAXED_FN_ATTRS
+__tzcnt_u64(unsigned long long __X)
+{
+ return __X ? __builtin_ctzll(__X) : 64;
+}
+
+#endif /* __x86_64__ */
+
+#undef __DEFAULT_FN_ATTRS
+#undef __RELAXED_FN_ATTRS
+
+#endif /* __BMIINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/cpuid.h b/contrib/llvm/tools/clang/lib/Headers/cpuid.h
new file mode 100644
index 0000000..5da02e0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/cpuid.h
@@ -0,0 +1,209 @@
+/*===---- cpuid.h - X86 cpu model detection --------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !(__x86_64__ || __i386__)
+#error this header is for x86 only
+#endif
+
+/* Responses identification request with %eax 0 */
+/* AMD: "AuthenticAMD" */
+#define signature_AMD_ebx 0x68747541
+#define signature_AMD_edx 0x69746e65
+#define signature_AMD_ecx 0x444d4163
+/* CENTAUR: "CentaurHauls" */
+#define signature_CENTAUR_ebx 0x746e6543
+#define signature_CENTAUR_edx 0x48727561
+#define signature_CENTAUR_ecx 0x736c7561
+/* CYRIX: "CyrixInstead" */
+#define signature_CYRIX_ebx 0x69727943
+#define signature_CYRIX_edx 0x736e4978
+#define signature_CYRIX_ecx 0x64616574
+/* INTEL: "GenuineIntel" */
+#define signature_INTEL_ebx 0x756e6547
+#define signature_INTEL_edx 0x49656e69
+#define signature_INTEL_ecx 0x6c65746e
+/* TM1: "TransmetaCPU" */
+#define signature_TM1_ebx 0x6e617254
+#define signature_TM1_edx 0x74656d73
+#define signature_TM1_ecx 0x55504361
+/* TM2: "GenuineTMx86" */
+#define signature_TM2_ebx 0x756e6547
+#define signature_TM2_edx 0x54656e69
+#define signature_TM2_ecx 0x3638784d
+/* NSC: "Geode by NSC" */
+#define signature_NSC_ebx 0x646f6547
+#define signature_NSC_edx 0x43534e20
+#define signature_NSC_ecx 0x79622065
+/* NEXGEN: "NexGenDriven" */
+#define signature_NEXGEN_ebx 0x4778654e
+#define signature_NEXGEN_edx 0x72446e65
+#define signature_NEXGEN_ecx 0x6e657669
+/* RISE: "RiseRiseRise" */
+#define signature_RISE_ebx 0x65736952
+#define signature_RISE_edx 0x65736952
+#define signature_RISE_ecx 0x65736952
+/* SIS: "SiS SiS SiS " */
+#define signature_SIS_ebx 0x20536953
+#define signature_SIS_edx 0x20536953
+#define signature_SIS_ecx 0x20536953
+/* UMC: "UMC UMC UMC " */
+#define signature_UMC_ebx 0x20434d55
+#define signature_UMC_edx 0x20434d55
+#define signature_UMC_ecx 0x20434d55
+/* VIA: "VIA VIA VIA " */
+#define signature_VIA_ebx 0x20414956
+#define signature_VIA_edx 0x20414956
+#define signature_VIA_ecx 0x20414956
+/* VORTEX: "Vortex86 SoC" */
+#define signature_VORTEX_ebx 0x74726f56
+#define signature_VORTEX_edx 0x36387865
+#define signature_VORTEX_ecx 0x436f5320
+
+/* Features in %ecx for level 1 */
+#define bit_SSE3 0x00000001
+#define bit_PCLMULQDQ 0x00000002
+#define bit_DTES64 0x00000004
+#define bit_MONITOR 0x00000008
+#define bit_DSCPL 0x00000010
+#define bit_VMX 0x00000020
+#define bit_SMX 0x00000040
+#define bit_EIST 0x00000080
+#define bit_TM2 0x00000100
+#define bit_SSSE3 0x00000200
+#define bit_CNXTID 0x00000400
+#define bit_FMA 0x00001000
+#define bit_CMPXCHG16B 0x00002000
+#define bit_xTPR 0x00004000
+#define bit_PDCM 0x00008000
+#define bit_PCID 0x00020000
+#define bit_DCA 0x00040000
+#define bit_SSE41 0x00080000
+#define bit_SSE42 0x00100000
+#define bit_x2APIC 0x00200000
+#define bit_MOVBE 0x00400000
+#define bit_POPCNT 0x00800000
+#define bit_TSCDeadline 0x01000000
+#define bit_AESNI 0x02000000
+#define bit_XSAVE 0x04000000
+#define bit_OSXSAVE 0x08000000
+#define bit_AVX 0x10000000
+#define bit_RDRND 0x40000000
+
+/* Features in %edx for level 1 */
+#define bit_FPU 0x00000001
+#define bit_VME 0x00000002
+#define bit_DE 0x00000004
+#define bit_PSE 0x00000008
+#define bit_TSC 0x00000010
+#define bit_MSR 0x00000020
+#define bit_PAE 0x00000040
+#define bit_MCE 0x00000080
+#define bit_CX8 0x00000100
+#define bit_APIC 0x00000200
+#define bit_SEP 0x00000800
+#define bit_MTRR 0x00001000
+#define bit_PGE 0x00002000
+#define bit_MCA 0x00004000
+#define bit_CMOV 0x00008000
+#define bit_PAT 0x00010000
+#define bit_PSE36 0x00020000
+#define bit_PSN 0x00040000
+#define bit_CLFSH 0x00080000
+#define bit_DS 0x00200000
+#define bit_ACPI 0x00400000
+#define bit_MMX 0x00800000
+#define bit_FXSR 0x01000000
+#define bit_FXSAVE bit_FXSR /* for gcc compat */
+#define bit_SSE 0x02000000
+#define bit_SSE2 0x04000000
+#define bit_SS 0x08000000
+#define bit_HTT 0x10000000
+#define bit_TM 0x20000000
+#define bit_PBE 0x80000000
+
+/* Features in %ebx for level 7 sub-leaf 0 */
+#define bit_FSGSBASE 0x00000001
+#define bit_SMEP 0x00000080
+#define bit_ENH_MOVSB 0x00000200
+
+#if __i386__
+#define __cpuid(__level, __eax, __ebx, __ecx, __edx) \
+ __asm("cpuid" : "=a"(__eax), "=b" (__ebx), "=c"(__ecx), "=d"(__edx) \
+ : "0"(__level))
+
+#define __cpuid_count(__level, __count, __eax, __ebx, __ecx, __edx) \
+ __asm("cpuid" : "=a"(__eax), "=b" (__ebx), "=c"(__ecx), "=d"(__edx) \
+ : "0"(__level), "2"(__count))
+#else
+/* x86-64 uses %rbx as the base register, so preserve it. */
+#define __cpuid(__level, __eax, __ebx, __ecx, __edx) \
+ __asm(" xchgq %%rbx,%q1\n" \
+ " cpuid\n" \
+ " xchgq %%rbx,%q1" \
+ : "=a"(__eax), "=r" (__ebx), "=c"(__ecx), "=d"(__edx) \
+ : "0"(__level))
+
+#define __cpuid_count(__level, __count, __eax, __ebx, __ecx, __edx) \
+ __asm(" xchgq %%rbx,%q1\n" \
+ " cpuid\n" \
+ " xchgq %%rbx,%q1" \
+ : "=a"(__eax), "=r" (__ebx), "=c"(__ecx), "=d"(__edx) \
+ : "0"(__level), "2"(__count))
+#endif
+
+static __inline int __get_cpuid (unsigned int __level, unsigned int *__eax,
+ unsigned int *__ebx, unsigned int *__ecx,
+ unsigned int *__edx) {
+ __cpuid(__level, *__eax, *__ebx, *__ecx, *__edx);
+ return 1;
+}
+
+static __inline int __get_cpuid_max (unsigned int __level, unsigned int *__sig)
+{
+ unsigned int __eax, __ebx, __ecx, __edx;
+#if __i386__
+ int __cpuid_supported;
+
+ __asm(" pushfl\n"
+ " popl %%eax\n"
+ " movl %%eax,%%ecx\n"
+ " xorl $0x00200000,%%eax\n"
+ " pushl %%eax\n"
+ " popfl\n"
+ " pushfl\n"
+ " popl %%eax\n"
+ " movl $0,%0\n"
+ " cmpl %%eax,%%ecx\n"
+ " je 1f\n"
+ " movl $1,%0\n"
+ "1:"
+ : "=r" (__cpuid_supported) : : "eax", "ecx");
+ if (!__cpuid_supported)
+ return 0;
+#endif
+
+ __cpuid(__level, __eax, __ebx, __ecx, __edx);
+ if (__sig)
+ *__sig = __ebx;
+ return __eax;
+}
diff --git a/contrib/llvm/tools/clang/lib/Headers/cuda_builtin_vars.h b/contrib/llvm/tools/clang/lib/Headers/cuda_builtin_vars.h
new file mode 100644
index 0000000..901356b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/cuda_builtin_vars.h
@@ -0,0 +1,110 @@
+/*===---- cuda_builtin_vars.h - CUDA built-in variables ---------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CUDA_BUILTIN_VARS_H
+#define __CUDA_BUILTIN_VARS_H
+
+// The file implements built-in CUDA variables using __declspec(property).
+// https://msdn.microsoft.com/en-us/library/yhfk0thd.aspx
+// All read accesses of built-in variable fields get converted into calls to a
+// getter function which in turn would call appropriate builtin to fetch the
+// value.
+//
+// Example:
+// int x = threadIdx.x;
+// IR output:
+// %0 = call i32 @llvm.ptx.read.tid.x() #3
+// PTX output:
+// mov.u32 %r2, %tid.x;
+
+#define __CUDA_DEVICE_BUILTIN(FIELD, INTRINSIC) \
+ __declspec(property(get = __fetch_builtin_##FIELD)) unsigned int FIELD; \
+ static inline __attribute__((always_inline)) \
+ __attribute__((device)) unsigned int __fetch_builtin_##FIELD(void) { \
+ return INTRINSIC; \
+ }
+
+#if __cplusplus >= 201103L
+#define __DELETE =delete
+#else
+#define __DELETE
+#endif
+
+// Make sure nobody can create instances of the special varible types. nvcc
+// also disallows taking address of special variables, so we disable address-of
+// operator as well.
+#define __CUDA_DISALLOW_BUILTINVAR_ACCESS(TypeName) \
+ __attribute__((device)) TypeName() __DELETE; \
+ __attribute__((device)) TypeName(const TypeName &) __DELETE; \
+ __attribute__((device)) void operator=(const TypeName &) const __DELETE; \
+ __attribute__((device)) TypeName *operator&() const __DELETE
+
+struct __cuda_builtin_threadIdx_t {
+ __CUDA_DEVICE_BUILTIN(x,__builtin_ptx_read_tid_x());
+ __CUDA_DEVICE_BUILTIN(y,__builtin_ptx_read_tid_y());
+ __CUDA_DEVICE_BUILTIN(z,__builtin_ptx_read_tid_z());
+private:
+ __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_threadIdx_t);
+};
+
+struct __cuda_builtin_blockIdx_t {
+ __CUDA_DEVICE_BUILTIN(x,__builtin_ptx_read_ctaid_x());
+ __CUDA_DEVICE_BUILTIN(y,__builtin_ptx_read_ctaid_y());
+ __CUDA_DEVICE_BUILTIN(z,__builtin_ptx_read_ctaid_z());
+private:
+ __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_blockIdx_t);
+};
+
+struct __cuda_builtin_blockDim_t {
+ __CUDA_DEVICE_BUILTIN(x,__builtin_ptx_read_ntid_x());
+ __CUDA_DEVICE_BUILTIN(y,__builtin_ptx_read_ntid_y());
+ __CUDA_DEVICE_BUILTIN(z,__builtin_ptx_read_ntid_z());
+private:
+ __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_blockDim_t);
+};
+
+struct __cuda_builtin_gridDim_t {
+ __CUDA_DEVICE_BUILTIN(x,__builtin_ptx_read_nctaid_x());
+ __CUDA_DEVICE_BUILTIN(y,__builtin_ptx_read_nctaid_y());
+ __CUDA_DEVICE_BUILTIN(z,__builtin_ptx_read_nctaid_z());
+private:
+ __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_gridDim_t);
+};
+
+#define __CUDA_BUILTIN_VAR \
+ extern const __attribute__((device)) __attribute__((weak))
+__CUDA_BUILTIN_VAR __cuda_builtin_threadIdx_t threadIdx;
+__CUDA_BUILTIN_VAR __cuda_builtin_blockIdx_t blockIdx;
+__CUDA_BUILTIN_VAR __cuda_builtin_blockDim_t blockDim;
+__CUDA_BUILTIN_VAR __cuda_builtin_gridDim_t gridDim;
+
+// warpSize should translate to read of %WARP_SZ but there's currently no
+// builtin to do so. According to PTX v4.2 docs 'to date, all target
+// architectures have a WARP_SZ value of 32'.
+__attribute__((device)) const int warpSize = 32;
+
+#undef __CUDA_DEVICE_BUILTIN
+#undef __CUDA_BUILTIN_VAR
+#undef __CUDA_DISALLOW_BUILTINVAR_ACCESS
+
+#endif /* __CUDA_BUILTIN_VARS_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/emmintrin.h b/contrib/llvm/tools/clang/lib/Headers/emmintrin.h
new file mode 100644
index 0000000..cfc2c71
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/emmintrin.h
@@ -0,0 +1,1491 @@
+/*===---- emmintrin.h - SSE2 intrinsics ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __EMMINTRIN_H
+#define __EMMINTRIN_H
+
+#include <xmmintrin.h>
+
+typedef double __m128d __attribute__((__vector_size__(16)));
+typedef long long __m128i __attribute__((__vector_size__(16)));
+
+/* Type defines. */
+typedef double __v2df __attribute__ ((__vector_size__ (16)));
+typedef long long __v2di __attribute__ ((__vector_size__ (16)));
+typedef short __v8hi __attribute__((__vector_size__(16)));
+typedef char __v16qi __attribute__((__vector_size__(16)));
+
+/* We need an explicitly signed variant for char. Note that this shouldn't
+ * appear in the interface though. */
+typedef signed char __v16qs __attribute__((__vector_size__(16)));
+
+#include <f16cintrin.h>
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_add_sd(__m128d __a, __m128d __b)
+{
+ __a[0] += __b[0];
+ return __a;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_add_pd(__m128d __a, __m128d __b)
+{
+ return __a + __b;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_sub_sd(__m128d __a, __m128d __b)
+{
+ __a[0] -= __b[0];
+ return __a;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_sub_pd(__m128d __a, __m128d __b)
+{
+ return __a - __b;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mul_sd(__m128d __a, __m128d __b)
+{
+ __a[0] *= __b[0];
+ return __a;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mul_pd(__m128d __a, __m128d __b)
+{
+ return __a * __b;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_div_sd(__m128d __a, __m128d __b)
+{
+ __a[0] /= __b[0];
+ return __a;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_div_pd(__m128d __a, __m128d __b)
+{
+ return __a / __b;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_sqrt_sd(__m128d __a, __m128d __b)
+{
+ __m128d __c = __builtin_ia32_sqrtsd(__b);
+ return (__m128d) { __c[0], __a[1] };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_sqrt_pd(__m128d __a)
+{
+ return __builtin_ia32_sqrtpd(__a);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_min_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_minsd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_min_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_minpd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_max_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_maxsd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_max_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_maxpd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_and_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)((__v4si)__a & (__v4si)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_andnot_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)(~(__v4si)__a & (__v4si)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_or_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)((__v4si)__a | (__v4si)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_xor_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)((__v4si)__a ^ (__v4si)__b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpeq_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpeqpd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmplt_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpltpd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmple_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmplepd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpgt_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpltpd(__b, __a);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpge_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmplepd(__b, __a);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpord_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpordpd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpunord_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpunordpd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpneq_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpneqpd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpnlt_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpnltpd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpnle_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpnlepd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpngt_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpnltpd(__b, __a);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpnge_pd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpnlepd(__b, __a);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpeq_sd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpeqsd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmplt_sd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpltsd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmple_sd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmplesd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpgt_sd(__m128d __a, __m128d __b)
+{
+ __m128d __c = __builtin_ia32_cmpltsd(__b, __a);
+ return (__m128d) { __c[0], __a[1] };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpge_sd(__m128d __a, __m128d __b)
+{
+ __m128d __c = __builtin_ia32_cmplesd(__b, __a);
+ return (__m128d) { __c[0], __a[1] };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpord_sd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpordsd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpunord_sd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpunordsd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpneq_sd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpneqsd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpnlt_sd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpnltsd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpnle_sd(__m128d __a, __m128d __b)
+{
+ return (__m128d)__builtin_ia32_cmpnlesd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpngt_sd(__m128d __a, __m128d __b)
+{
+ __m128d __c = __builtin_ia32_cmpnltsd(__b, __a);
+ return (__m128d) { __c[0], __a[1] };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cmpnge_sd(__m128d __a, __m128d __b)
+{
+ __m128d __c = __builtin_ia32_cmpnlesd(__b, __a);
+ return (__m128d) { __c[0], __a[1] };
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comieq_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_comisdeq(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comilt_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_comisdlt(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comile_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_comisdle(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comigt_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_comisdgt(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comige_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_comisdge(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comineq_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_comisdneq(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomieq_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_ucomisdeq(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomilt_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_ucomisdlt(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomile_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_ucomisdle(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomigt_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_ucomisdgt(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomige_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_ucomisdge(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomineq_sd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_ucomisdneq(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtpd_ps(__m128d __a)
+{
+ return __builtin_ia32_cvtpd2ps(__a);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtps_pd(__m128 __a)
+{
+ return __builtin_ia32_cvtps2pd(__a);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtepi32_pd(__m128i __a)
+{
+ return __builtin_ia32_cvtdq2pd((__v4si)__a);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtpd_epi32(__m128d __a)
+{
+ return __builtin_ia32_cvtpd2dq(__a);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_cvtsd_si32(__m128d __a)
+{
+ return __builtin_ia32_cvtsd2si(__a);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtsd_ss(__m128 __a, __m128d __b)
+{
+ __a[0] = __b[0];
+ return __a;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtsi32_sd(__m128d __a, int __b)
+{
+ __a[0] = __b;
+ return __a;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtss_sd(__m128d __a, __m128 __b)
+{
+ __a[0] = __b[0];
+ return __a;
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttpd_epi32(__m128d __a)
+{
+ return (__m128i)__builtin_ia32_cvttpd2dq(__a);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_cvttsd_si32(__m128d __a)
+{
+ return __a[0];
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvtpd_pi32(__m128d __a)
+{
+ return (__m64)__builtin_ia32_cvtpd2pi(__a);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvttpd_pi32(__m128d __a)
+{
+ return (__m64)__builtin_ia32_cvttpd2pi(__a);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtpi32_pd(__m64 __a)
+{
+ return __builtin_ia32_cvtpi2pd((__v2si)__a);
+}
+
+static __inline__ double __DEFAULT_FN_ATTRS
+_mm_cvtsd_f64(__m128d __a)
+{
+ return __a[0];
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_load_pd(double const *__dp)
+{
+ return *(__m128d*)__dp;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_load1_pd(double const *__dp)
+{
+ struct __mm_load1_pd_struct {
+ double __u;
+ } __attribute__((__packed__, __may_alias__));
+ double __u = ((struct __mm_load1_pd_struct*)__dp)->__u;
+ return (__m128d){ __u, __u };
+}
+
+#define _mm_load_pd1(dp) _mm_load1_pd(dp)
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_loadr_pd(double const *__dp)
+{
+ __m128d __u = *(__m128d*)__dp;
+ return __builtin_shufflevector(__u, __u, 1, 0);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_loadu_pd(double const *__dp)
+{
+ struct __loadu_pd {
+ __m128d __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_pd*)__dp)->__v;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_load_sd(double const *__dp)
+{
+ struct __mm_load_sd_struct {
+ double __u;
+ } __attribute__((__packed__, __may_alias__));
+ double __u = ((struct __mm_load_sd_struct*)__dp)->__u;
+ return (__m128d){ __u, 0 };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_loadh_pd(__m128d __a, double const *__dp)
+{
+ struct __mm_loadh_pd_struct {
+ double __u;
+ } __attribute__((__packed__, __may_alias__));
+ double __u = ((struct __mm_loadh_pd_struct*)__dp)->__u;
+ return (__m128d){ __a[0], __u };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_loadl_pd(__m128d __a, double const *__dp)
+{
+ struct __mm_loadl_pd_struct {
+ double __u;
+ } __attribute__((__packed__, __may_alias__));
+ double __u = ((struct __mm_loadl_pd_struct*)__dp)->__u;
+ return (__m128d){ __u, __a[1] };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_undefined_pd()
+{
+ return (__m128d)__builtin_ia32_undef128();
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_set_sd(double __w)
+{
+ return (__m128d){ __w, 0 };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_set1_pd(double __w)
+{
+ return (__m128d){ __w, __w };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_set_pd(double __w, double __x)
+{
+ return (__m128d){ __x, __w };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_setr_pd(double __w, double __x)
+{
+ return (__m128d){ __w, __x };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_setzero_pd(void)
+{
+ return (__m128d){ 0, 0 };
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_move_sd(__m128d __a, __m128d __b)
+{
+ return (__m128d){ __b[0], __a[1] };
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_store_sd(double *__dp, __m128d __a)
+{
+ struct __mm_store_sd_struct {
+ double __u;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __mm_store_sd_struct*)__dp)->__u = __a[0];
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_store1_pd(double *__dp, __m128d __a)
+{
+ struct __mm_store1_pd_struct {
+ double __u[2];
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __mm_store1_pd_struct*)__dp)->__u[0] = __a[0];
+ ((struct __mm_store1_pd_struct*)__dp)->__u[1] = __a[0];
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_store_pd(double *__dp, __m128d __a)
+{
+ *(__m128d *)__dp = __a;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storeu_pd(double *__dp, __m128d __a)
+{
+ __builtin_ia32_storeupd(__dp, __a);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storer_pd(double *__dp, __m128d __a)
+{
+ __a = __builtin_shufflevector(__a, __a, 1, 0);
+ *(__m128d *)__dp = __a;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storeh_pd(double *__dp, __m128d __a)
+{
+ struct __mm_storeh_pd_struct {
+ double __u;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __mm_storeh_pd_struct*)__dp)->__u = __a[1];
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storel_pd(double *__dp, __m128d __a)
+{
+ struct __mm_storeh_pd_struct {
+ double __u;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __mm_storeh_pd_struct*)__dp)->__u = __a[0];
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_add_epi8(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v16qi)__a + (__v16qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_add_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v8hi)__a + (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_add_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v4si)__a + (__v4si)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_add_si64(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_paddq(__a, __b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_add_epi64(__m128i __a, __m128i __b)
+{
+ return __a + __b;
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_adds_epi8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_paddsb128((__v16qi)__a, (__v16qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_adds_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_paddsw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_adds_epu8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_paddusb128((__v16qi)__a, (__v16qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_adds_epu16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_paddusw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_avg_epu8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pavgb128((__v16qi)__a, (__v16qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_avg_epu16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pavgw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_madd_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pmaddwd128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_max_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pmaxsw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_max_epu8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pmaxub128((__v16qi)__a, (__v16qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_min_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pminsw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_min_epu8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pminub128((__v16qi)__a, (__v16qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mulhi_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pmulhw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mulhi_epu16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pmulhuw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mullo_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v8hi)__a * (__v8hi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_mul_su32(__m64 __a, __m64 __b)
+{
+ return __builtin_ia32_pmuludq((__v2si)__a, (__v2si)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mul_epu32(__m128i __a, __m128i __b)
+{
+ return __builtin_ia32_pmuludq128((__v4si)__a, (__v4si)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sad_epu8(__m128i __a, __m128i __b)
+{
+ return __builtin_ia32_psadbw128((__v16qi)__a, (__v16qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sub_epi8(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v16qi)__a - (__v16qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sub_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v8hi)__a - (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sub_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v4si)__a - (__v4si)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sub_si64(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_psubq(__a, __b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sub_epi64(__m128i __a, __m128i __b)
+{
+ return __a - __b;
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_subs_epi8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_psubsb128((__v16qi)__a, (__v16qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_subs_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_psubsw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_subs_epu8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_psubusb128((__v16qi)__a, (__v16qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_subs_epu16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_psubusw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_and_si128(__m128i __a, __m128i __b)
+{
+ return __a & __b;
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_andnot_si128(__m128i __a, __m128i __b)
+{
+ return ~__a & __b;
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_or_si128(__m128i __a, __m128i __b)
+{
+ return __a | __b;
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_xor_si128(__m128i __a, __m128i __b)
+{
+ return __a ^ __b;
+}
+
+#define _mm_slli_si128(a, imm) __extension__ ({ \
+ (__m128i)__builtin_shufflevector((__v16qi)_mm_setzero_si128(), \
+ (__v16qi)(__m128i)(a), \
+ ((imm)&0xF0) ? 0 : 16 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 17 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 18 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 19 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 20 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 21 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 22 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 23 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 24 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 25 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 26 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 27 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 28 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 29 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 30 - ((imm)&0xF), \
+ ((imm)&0xF0) ? 0 : 31 - ((imm)&0xF)); })
+
+#define _mm_bslli_si128(a, imm) \
+ _mm_slli_si128((a), (imm))
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_slli_epi16(__m128i __a, int __count)
+{
+ return (__m128i)__builtin_ia32_psllwi128((__v8hi)__a, __count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sll_epi16(__m128i __a, __m128i __count)
+{
+ return (__m128i)__builtin_ia32_psllw128((__v8hi)__a, (__v8hi)__count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_slli_epi32(__m128i __a, int __count)
+{
+ return (__m128i)__builtin_ia32_pslldi128((__v4si)__a, __count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sll_epi32(__m128i __a, __m128i __count)
+{
+ return (__m128i)__builtin_ia32_pslld128((__v4si)__a, (__v4si)__count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_slli_epi64(__m128i __a, int __count)
+{
+ return __builtin_ia32_psllqi128(__a, __count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sll_epi64(__m128i __a, __m128i __count)
+{
+ return __builtin_ia32_psllq128(__a, __count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srai_epi16(__m128i __a, int __count)
+{
+ return (__m128i)__builtin_ia32_psrawi128((__v8hi)__a, __count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sra_epi16(__m128i __a, __m128i __count)
+{
+ return (__m128i)__builtin_ia32_psraw128((__v8hi)__a, (__v8hi)__count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srai_epi32(__m128i __a, int __count)
+{
+ return (__m128i)__builtin_ia32_psradi128((__v4si)__a, __count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sra_epi32(__m128i __a, __m128i __count)
+{
+ return (__m128i)__builtin_ia32_psrad128((__v4si)__a, (__v4si)__count);
+}
+
+#define _mm_srli_si128(a, imm) __extension__ ({ \
+ (__m128i)__builtin_shufflevector((__v16qi)(__m128i)(a), \
+ (__v16qi)_mm_setzero_si128(), \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 0, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 1, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 2, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 3, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 4, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 5, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 6, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 7, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 8, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 9, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 10, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 11, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 12, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 13, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 14, \
+ ((imm)&0xF0) ? 16 : ((imm)&0xF) + 15); })
+
+#define _mm_bsrli_si128(a, imm) \
+ _mm_srli_si128((a), (imm))
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srli_epi16(__m128i __a, int __count)
+{
+ return (__m128i)__builtin_ia32_psrlwi128((__v8hi)__a, __count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srl_epi16(__m128i __a, __m128i __count)
+{
+ return (__m128i)__builtin_ia32_psrlw128((__v8hi)__a, (__v8hi)__count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srli_epi32(__m128i __a, int __count)
+{
+ return (__m128i)__builtin_ia32_psrldi128((__v4si)__a, __count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srl_epi32(__m128i __a, __m128i __count)
+{
+ return (__m128i)__builtin_ia32_psrld128((__v4si)__a, (__v4si)__count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srli_epi64(__m128i __a, int __count)
+{
+ return __builtin_ia32_psrlqi128(__a, __count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srl_epi64(__m128i __a, __m128i __count)
+{
+ return __builtin_ia32_psrlq128(__a, __count);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmpeq_epi8(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v16qi)__a == (__v16qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmpeq_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v8hi)__a == (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmpeq_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v4si)__a == (__v4si)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmpgt_epi8(__m128i __a, __m128i __b)
+{
+ /* This function always performs a signed comparison, but __v16qi is a char
+ which may be signed or unsigned, so use __v16qs. */
+ return (__m128i)((__v16qs)__a > (__v16qs)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmpgt_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v8hi)__a > (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmpgt_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v4si)__a > (__v4si)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmplt_epi8(__m128i __a, __m128i __b)
+{
+ return _mm_cmpgt_epi8(__b, __a);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmplt_epi16(__m128i __a, __m128i __b)
+{
+ return _mm_cmpgt_epi16(__b, __a);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmplt_epi32(__m128i __a, __m128i __b)
+{
+ return _mm_cmpgt_epi32(__b, __a);
+}
+
+#ifdef __x86_64__
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtsi64_sd(__m128d __a, long long __b)
+{
+ __a[0] = __b;
+ return __a;
+}
+
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm_cvtsd_si64(__m128d __a)
+{
+ return __builtin_ia32_cvtsd2si64(__a);
+}
+
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm_cvttsd_si64(__m128d __a)
+{
+ return __a[0];
+}
+#endif
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtepi32_ps(__m128i __a)
+{
+ return __builtin_ia32_cvtdq2ps((__v4si)__a);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtps_epi32(__m128 __a)
+{
+ return (__m128i)__builtin_ia32_cvtps2dq(__a);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttps_epi32(__m128 __a)
+{
+ return (__m128i)__builtin_ia32_cvttps2dq(__a);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtsi32_si128(int __a)
+{
+ return (__m128i)(__v4si){ __a, 0, 0, 0 };
+}
+
+#ifdef __x86_64__
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtsi64_si128(long long __a)
+{
+ return (__m128i){ __a, 0 };
+}
+#endif
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_cvtsi128_si32(__m128i __a)
+{
+ __v4si __b = (__v4si)__a;
+ return __b[0];
+}
+
+#ifdef __x86_64__
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm_cvtsi128_si64(__m128i __a)
+{
+ return __a[0];
+}
+#endif
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_load_si128(__m128i const *__p)
+{
+ return *__p;
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_loadu_si128(__m128i const *__p)
+{
+ struct __loadu_si128 {
+ __m128i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_si128*)__p)->__v;
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_loadl_epi64(__m128i const *__p)
+{
+ struct __mm_loadl_epi64_struct {
+ long long __u;
+ } __attribute__((__packed__, __may_alias__));
+ return (__m128i) { ((struct __mm_loadl_epi64_struct*)__p)->__u, 0};
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_undefined_si128()
+{
+ return (__m128i)__builtin_ia32_undef128();
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set_epi64x(long long __q1, long long __q0)
+{
+ return (__m128i){ __q0, __q1 };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set_epi64(__m64 __q1, __m64 __q0)
+{
+ return (__m128i){ (long long)__q0, (long long)__q1 };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set_epi32(int __i3, int __i2, int __i1, int __i0)
+{
+ return (__m128i)(__v4si){ __i0, __i1, __i2, __i3};
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set_epi16(short __w7, short __w6, short __w5, short __w4, short __w3, short __w2, short __w1, short __w0)
+{
+ return (__m128i)(__v8hi){ __w0, __w1, __w2, __w3, __w4, __w5, __w6, __w7 };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set_epi8(char __b15, char __b14, char __b13, char __b12, char __b11, char __b10, char __b9, char __b8, char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0)
+{
+ return (__m128i)(__v16qi){ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7, __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15 };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set1_epi64x(long long __q)
+{
+ return (__m128i){ __q, __q };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set1_epi64(__m64 __q)
+{
+ return (__m128i){ (long long)__q, (long long)__q };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set1_epi32(int __i)
+{
+ return (__m128i)(__v4si){ __i, __i, __i, __i };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set1_epi16(short __w)
+{
+ return (__m128i)(__v8hi){ __w, __w, __w, __w, __w, __w, __w, __w };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set1_epi8(char __b)
+{
+ return (__m128i)(__v16qi){ __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_setr_epi64(__m64 __q0, __m64 __q1)
+{
+ return (__m128i){ (long long)__q0, (long long)__q1 };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_setr_epi32(int __i0, int __i1, int __i2, int __i3)
+{
+ return (__m128i)(__v4si){ __i0, __i1, __i2, __i3};
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_setr_epi16(short __w0, short __w1, short __w2, short __w3, short __w4, short __w5, short __w6, short __w7)
+{
+ return (__m128i)(__v8hi){ __w0, __w1, __w2, __w3, __w4, __w5, __w6, __w7 };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_setr_epi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, char __b6, char __b7, char __b8, char __b9, char __b10, char __b11, char __b12, char __b13, char __b14, char __b15)
+{
+ return (__m128i)(__v16qi){ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7, __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15 };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_setzero_si128(void)
+{
+ return (__m128i){ 0LL, 0LL };
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_store_si128(__m128i *__p, __m128i __b)
+{
+ *__p = __b;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storeu_si128(__m128i *__p, __m128i __b)
+{
+ __builtin_ia32_storedqu((char *)__p, (__v16qi)__b);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_maskmoveu_si128(__m128i __d, __m128i __n, char *__p)
+{
+ __builtin_ia32_maskmovdqu((__v16qi)__d, (__v16qi)__n, __p);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storel_epi64(__m128i *__p, __m128i __a)
+{
+ struct __mm_storel_epi64_struct {
+ long long __u;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __mm_storel_epi64_struct*)__p)->__u = __a[0];
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_stream_pd(double *__p, __m128d __a)
+{
+ __builtin_ia32_movntpd(__p, __a);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_stream_si128(__m128i *__p, __m128i __a)
+{
+ __builtin_ia32_movntdq(__p, __a);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_stream_si32(int *__p, int __a)
+{
+ __builtin_ia32_movnti(__p, __a);
+}
+
+#ifdef __x86_64__
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_stream_si64(long long *__p, long long __a)
+{
+ __builtin_ia32_movnti64(__p, __a);
+}
+#endif
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_clflush(void const *__p)
+{
+ __builtin_ia32_clflush(__p);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_lfence(void)
+{
+ __builtin_ia32_lfence();
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mfence(void)
+{
+ __builtin_ia32_mfence();
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_packs_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_packsswb128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_packs_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_packssdw128((__v4si)__a, (__v4si)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_packus_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_packuswb128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_extract_epi16(__m128i __a, int __imm)
+{
+ __v8hi __b = (__v8hi)__a;
+ return (unsigned short)__b[__imm & 7];
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_insert_epi16(__m128i __a, int __b, int __imm)
+{
+ __v8hi __c = (__v8hi)__a;
+ __c[__imm & 7] = __b;
+ return (__m128i)__c;
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_movemask_epi8(__m128i __a)
+{
+ return __builtin_ia32_pmovmskb128((__v16qi)__a);
+}
+
+#define _mm_shuffle_epi32(a, imm) __extension__ ({ \
+ (__m128i)__builtin_shufflevector((__v4si)(__m128i)(a), \
+ (__v4si)_mm_setzero_si128(), \
+ (imm) & 0x3, ((imm) & 0xc) >> 2, \
+ ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6); })
+
+#define _mm_shufflelo_epi16(a, imm) __extension__ ({ \
+ (__m128i)__builtin_shufflevector((__v8hi)(__m128i)(a), \
+ (__v8hi)_mm_setzero_si128(), \
+ (imm) & 0x3, ((imm) & 0xc) >> 2, \
+ ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
+ 4, 5, 6, 7); })
+
+#define _mm_shufflehi_epi16(a, imm) __extension__ ({ \
+ (__m128i)__builtin_shufflevector((__v8hi)(__m128i)(a), \
+ (__v8hi)_mm_setzero_si128(), \
+ 0, 1, 2, 3, \
+ 4 + (((imm) & 0x03) >> 0), \
+ 4 + (((imm) & 0x0c) >> 2), \
+ 4 + (((imm) & 0x30) >> 4), \
+ 4 + (((imm) & 0xc0) >> 6)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_unpackhi_epi8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_unpackhi_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_unpackhi_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 2, 4+2, 3, 4+3);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_unpackhi_epi64(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_shufflevector(__a, __b, 1, 2+1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_unpacklo_epi8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_unpacklo_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_unpacklo_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 0, 4+0, 1, 4+1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_unpacklo_epi64(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_shufflevector(__a, __b, 0, 2+0);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_movepi64_pi64(__m128i __a)
+{
+ return (__m64)__a[0];
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_movpi64_epi64(__m64 __a)
+{
+ return (__m128i){ (long long)__a, 0 };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_move_epi64(__m128i __a)
+{
+ return __builtin_shufflevector(__a, (__m128i){ 0 }, 0, 2);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_unpackhi_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_shufflevector(__a, __b, 1, 2+1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_unpacklo_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_shufflevector(__a, __b, 0, 2+0);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_movemask_pd(__m128d __a)
+{
+ return __builtin_ia32_movmskpd(__a);
+}
+
+#define _mm_shuffle_pd(a, b, i) __extension__ ({ \
+ (__m128d)__builtin_shufflevector((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \
+ (i) & 1, (((i) & 2) >> 1) + 2); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_castpd_ps(__m128d __a)
+{
+ return (__m128)__a;
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_castpd_si128(__m128d __a)
+{
+ return (__m128i)__a;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_castps_pd(__m128 __a)
+{
+ return (__m128d)__a;
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_castps_si128(__m128 __a)
+{
+ return (__m128i)__a;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_castsi128_ps(__m128i __a)
+{
+ return (__m128)__a;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_castsi128_pd(__m128i __a)
+{
+ return (__m128d)__a;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_pause(void)
+{
+ __builtin_ia32_pause();
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#define _MM_SHUFFLE2(x, y) (((x) << 1) | (y))
+
+#endif /* __EMMINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/f16cintrin.h b/contrib/llvm/tools/clang/lib/Headers/f16cintrin.h
new file mode 100644
index 0000000..c655d98
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/f16cintrin.h
@@ -0,0 +1,45 @@
+/*===---- f16cintrin.h - F16C intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __EMMINTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <f16cintrin.h> directly; include <emmintrin.h> instead."
+#endif
+
+#ifndef __F16CINTRIN_H
+#define __F16CINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("f16c")))
+
+#define _mm_cvtps_ph(a, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_vcvtps2ph((__v4sf)(__m128)(a), (imm)); })
+
+static __inline __m128 __DEFAULT_FN_ATTRS
+_mm_cvtph_ps(__m128i __a)
+{
+ return (__m128)__builtin_ia32_vcvtph2ps((__v8hi)__a);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __F16CINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/float.h b/contrib/llvm/tools/clang/lib/Headers/float.h
new file mode 100644
index 0000000..238cf76
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/float.h
@@ -0,0 +1,124 @@
+/*===---- float.h - Characteristics of floating point types ----------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __FLOAT_H
+#define __FLOAT_H
+
+/* If we're on MinGW, fall back to the system's float.h, which might have
+ * additional definitions provided for Windows.
+ * For more details see http://msdn.microsoft.com/en-us/library/y0ybw9fy.aspx
+ */
+#if (defined(__MINGW32__) || defined(_MSC_VER)) && __STDC_HOSTED__ && \
+ __has_include_next(<float.h>)
+# include_next <float.h>
+
+/* Undefine anything that we'll be redefining below. */
+# undef FLT_EVAL_METHOD
+# undef FLT_ROUNDS
+# undef FLT_RADIX
+# undef FLT_MANT_DIG
+# undef DBL_MANT_DIG
+# undef LDBL_MANT_DIG
+# undef DECIMAL_DIG
+# undef FLT_DIG
+# undef DBL_DIG
+# undef LDBL_DIG
+# undef FLT_MIN_EXP
+# undef DBL_MIN_EXP
+# undef LDBL_MIN_EXP
+# undef FLT_MIN_10_EXP
+# undef DBL_MIN_10_EXP
+# undef LDBL_MIN_10_EXP
+# undef FLT_MAX_EXP
+# undef DBL_MAX_EXP
+# undef LDBL_MAX_EXP
+# undef FLT_MAX_10_EXP
+# undef DBL_MAX_10_EXP
+# undef LDBL_MAX_10_EXP
+# undef FLT_MAX
+# undef DBL_MAX
+# undef LDBL_MAX
+# undef FLT_EPSILON
+# undef DBL_EPSILON
+# undef LDBL_EPSILON
+# undef FLT_MIN
+# undef DBL_MIN
+# undef LDBL_MIN
+# if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__)
+# undef FLT_TRUE_MIN
+# undef DBL_TRUE_MIN
+# undef LDBL_TRUE_MIN
+# endif
+#endif
+
+/* Characteristics of floating point types, C99 5.2.4.2.2 */
+
+#define FLT_EVAL_METHOD __FLT_EVAL_METHOD__
+#define FLT_ROUNDS (__builtin_flt_rounds())
+#define FLT_RADIX __FLT_RADIX__
+
+#define FLT_MANT_DIG __FLT_MANT_DIG__
+#define DBL_MANT_DIG __DBL_MANT_DIG__
+#define LDBL_MANT_DIG __LDBL_MANT_DIG__
+
+#define DECIMAL_DIG __DECIMAL_DIG__
+
+#define FLT_DIG __FLT_DIG__
+#define DBL_DIG __DBL_DIG__
+#define LDBL_DIG __LDBL_DIG__
+
+#define FLT_MIN_EXP __FLT_MIN_EXP__
+#define DBL_MIN_EXP __DBL_MIN_EXP__
+#define LDBL_MIN_EXP __LDBL_MIN_EXP__
+
+#define FLT_MIN_10_EXP __FLT_MIN_10_EXP__
+#define DBL_MIN_10_EXP __DBL_MIN_10_EXP__
+#define LDBL_MIN_10_EXP __LDBL_MIN_10_EXP__
+
+#define FLT_MAX_EXP __FLT_MAX_EXP__
+#define DBL_MAX_EXP __DBL_MAX_EXP__
+#define LDBL_MAX_EXP __LDBL_MAX_EXP__
+
+#define FLT_MAX_10_EXP __FLT_MAX_10_EXP__
+#define DBL_MAX_10_EXP __DBL_MAX_10_EXP__
+#define LDBL_MAX_10_EXP __LDBL_MAX_10_EXP__
+
+#define FLT_MAX __FLT_MAX__
+#define DBL_MAX __DBL_MAX__
+#define LDBL_MAX __LDBL_MAX__
+
+#define FLT_EPSILON __FLT_EPSILON__
+#define DBL_EPSILON __DBL_EPSILON__
+#define LDBL_EPSILON __LDBL_EPSILON__
+
+#define FLT_MIN __FLT_MIN__
+#define DBL_MIN __DBL_MIN__
+#define LDBL_MIN __LDBL_MIN__
+
+#if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__)
+# define FLT_TRUE_MIN __FLT_DENORM_MIN__
+# define DBL_TRUE_MIN __DBL_DENORM_MIN__
+# define LDBL_TRUE_MIN __LDBL_DENORM_MIN__
+#endif
+
+#endif /* __FLOAT_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/fma4intrin.h b/contrib/llvm/tools/clang/lib/Headers/fma4intrin.h
new file mode 100644
index 0000000..f117887
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/fma4intrin.h
@@ -0,0 +1,230 @@
+/*===---- fma4intrin.h - FMA4 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#error "Never use <fma4intrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __FMA4INTRIN_H
+#define __FMA4INTRIN_H
+
+#include <pmmintrin.h>
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("fma4")))
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_macc_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_macc_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_macc_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddss(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_macc_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_msub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_msub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_msub_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubss(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_msub_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_nmacc_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_nmacc_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmaddpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_nmacc_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmaddss(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_nmacc_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmaddsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_nmsub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_nmsub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_nmsub_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmsubss(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_nmsub_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmsubsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maddsub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maddsub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_msubadd_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_msubadd_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubaddpd(__A, __B, __C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_macc_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_macc_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmaddpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_msub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_msub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_nmacc_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfnmaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_nmacc_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfnmaddpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_nmsub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfnmsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_nmsub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfnmsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maddsub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmaddsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maddsub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmaddsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_msubadd_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmsubaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_msubadd_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmsubaddpd256(__A, __B, __C);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __FMA4INTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/fmaintrin.h b/contrib/llvm/tools/clang/lib/Headers/fmaintrin.h
new file mode 100644
index 0000000..114a143
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/fmaintrin.h
@@ -0,0 +1,228 @@
+/*===---- fma4intrin.h - FMA4 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <fmaintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __FMAINTRIN_H
+#define __FMAINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("fma")))
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fmadd_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddss(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fmadd_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fmsub_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubss(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fmsub_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmaddpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fnmadd_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmaddss(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fnmadd_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmaddsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fnmsub_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmsubss(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fnmsub_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmsubsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubaddpd(__A, __B, __C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmaddpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfnmaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfnmaddpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfnmsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfnmsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmaddsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmaddsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmsubaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmsubaddpd256(__A, __B, __C);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __FMAINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/fxsrintrin.h b/contrib/llvm/tools/clang/lib/Headers/fxsrintrin.h
new file mode 100644
index 0000000..ac6026a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/fxsrintrin.h
@@ -0,0 +1,55 @@
+/*===---- fxsrintrin.h - FXSR intrinsic ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <fxsrintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __FXSRINTRIN_H
+#define __FXSRINTRIN_H
+
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("fxsr")))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_fxsave(void *__p) {
+ return __builtin_ia32_fxsave(__p);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_fxsave64(void *__p) {
+ return __builtin_ia32_fxsave64(__p);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_fxrstor(void *__p) {
+ return __builtin_ia32_fxrstor(__p);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_fxrstor64(void *__p) {
+ return __builtin_ia32_fxrstor64(__p);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Headers/htmintrin.h b/contrib/llvm/tools/clang/lib/Headers/htmintrin.h
new file mode 100644
index 0000000..0088c7c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/htmintrin.h
@@ -0,0 +1,226 @@
+/*===---- htmintrin.h - Standard header for PowerPC HTM ---------------===*\
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+\*===----------------------------------------------------------------------===*/
+
+#ifndef __HTMINTRIN_H
+#define __HTMINTRIN_H
+
+#ifndef __HTM__
+#error "HTM instruction set not enabled"
+#endif
+
+#ifdef __powerpc__
+
+#include <stdint.h>
+
+typedef uint64_t texasr_t;
+typedef uint32_t texasru_t;
+typedef uint32_t texasrl_t;
+typedef uintptr_t tfiar_t;
+typedef uintptr_t tfhar_t;
+
+#define _HTM_STATE(CR0) ((CR0 >> 1) & 0x3)
+#define _HTM_NONTRANSACTIONAL 0x0
+#define _HTM_SUSPENDED 0x1
+#define _HTM_TRANSACTIONAL 0x2
+
+#define _TEXASR_EXTRACT_BITS(TEXASR,BITNUM,SIZE) \
+ (((TEXASR) >> (63-(BITNUM))) & ((1<<(SIZE))-1))
+#define _TEXASRU_EXTRACT_BITS(TEXASR,BITNUM,SIZE) \
+ (((TEXASR) >> (31-(BITNUM))) & ((1<<(SIZE))-1))
+
+#define _TEXASR_FAILURE_CODE(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 7, 8)
+#define _TEXASRU_FAILURE_CODE(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 7, 8)
+
+#define _TEXASR_FAILURE_PERSISTENT(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 7, 1)
+#define _TEXASRU_FAILURE_PERSISTENT(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 7, 1)
+
+#define _TEXASR_DISALLOWED(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 8, 1)
+#define _TEXASRU_DISALLOWED(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 8, 1)
+
+#define _TEXASR_NESTING_OVERFLOW(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 9, 1)
+#define _TEXASRU_NESTING_OVERFLOW(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 9, 1)
+
+#define _TEXASR_FOOTPRINT_OVERFLOW(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 10, 1)
+#define _TEXASRU_FOOTPRINT_OVERFLOW(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 10, 1)
+
+#define _TEXASR_SELF_INDUCED_CONFLICT(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 11, 1)
+#define _TEXASRU_SELF_INDUCED_CONFLICT(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 11, 1)
+
+#define _TEXASR_NON_TRANSACTIONAL_CONFLICT(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 12, 1)
+#define _TEXASRU_NON_TRANSACTIONAL_CONFLICT(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 12, 1)
+
+#define _TEXASR_TRANSACTION_CONFLICT(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 13, 1)
+#define _TEXASRU_TRANSACTION_CONFLICT(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 13, 1)
+
+#define _TEXASR_TRANSLATION_INVALIDATION_CONFLICT(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 14, 1)
+#define _TEXASRU_TRANSLATION_INVALIDATION_CONFLICT(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 14, 1)
+
+#define _TEXASR_IMPLEMENTAION_SPECIFIC(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 15, 1)
+#define _TEXASRU_IMPLEMENTAION_SPECIFIC(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 15, 1)
+
+#define _TEXASR_INSTRUCTION_FETCH_CONFLICT(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 16, 1)
+#define _TEXASRU_INSTRUCTION_FETCH_CONFLICT(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 16, 1)
+
+#define _TEXASR_ABORT(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 31, 1)
+#define _TEXASRU_ABORT(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 31, 1)
+
+
+#define _TEXASR_SUSPENDED(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 32, 1)
+
+#define _TEXASR_PRIVILEGE(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 35, 2)
+
+#define _TEXASR_FAILURE_SUMMARY(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 36, 1)
+
+#define _TEXASR_TFIAR_EXACT(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 37, 1)
+
+#define _TEXASR_ROT(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 38, 1)
+
+#define _TEXASR_TRANSACTION_LEVEL(TEXASR) \
+ _TEXASR_EXTRACT_BITS(TEXASR, 63, 12)
+
+#endif /* __powerpc */
+
+#ifdef __s390__
+
+/* Condition codes generated by tbegin */
+#define _HTM_TBEGIN_STARTED 0
+#define _HTM_TBEGIN_INDETERMINATE 1
+#define _HTM_TBEGIN_TRANSIENT 2
+#define _HTM_TBEGIN_PERSISTENT 3
+
+/* The abort codes below this threshold are reserved for machine use. */
+#define _HTM_FIRST_USER_ABORT_CODE 256
+
+/* The transaction diagnostic block is it is defined in the Principles
+ of Operation chapter 5-91. */
+
+struct __htm_tdb {
+ unsigned char format; /* 0 */
+ unsigned char flags;
+ unsigned char reserved1[4];
+ unsigned short nesting_depth;
+ unsigned long long abort_code; /* 8 */
+ unsigned long long conflict_token; /* 16 */
+ unsigned long long atia; /* 24 */
+ unsigned char eaid; /* 32 */
+ unsigned char dxc;
+ unsigned char reserved2[2];
+ unsigned int program_int_id;
+ unsigned long long exception_id; /* 40 */
+ unsigned long long bea; /* 48 */
+ unsigned char reserved3[72]; /* 56 */
+ unsigned long long gprs[16]; /* 128 */
+} __attribute__((__packed__, __aligned__ (8)));
+
+
+/* Helper intrinsics to retry tbegin in case of transient failure. */
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+__builtin_tbegin_retry_null (int retry)
+{
+ int cc, i = 0;
+
+ while ((cc = __builtin_tbegin(0)) == _HTM_TBEGIN_TRANSIENT
+ && i++ < retry)
+ __builtin_tx_assist(i);
+
+ return cc;
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+__builtin_tbegin_retry_tdb (void *tdb, int retry)
+{
+ int cc, i = 0;
+
+ while ((cc = __builtin_tbegin(tdb)) == _HTM_TBEGIN_TRANSIENT
+ && i++ < retry)
+ __builtin_tx_assist(i);
+
+ return cc;
+}
+
+#define __builtin_tbegin_retry(tdb, retry) \
+ (__builtin_constant_p(tdb == 0) && tdb == 0 ? \
+ __builtin_tbegin_retry_null(retry) : \
+ __builtin_tbegin_retry_tdb(tdb, retry))
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+__builtin_tbegin_retry_nofloat_null (int retry)
+{
+ int cc, i = 0;
+
+ while ((cc = __builtin_tbegin_nofloat(0)) == _HTM_TBEGIN_TRANSIENT
+ && i++ < retry)
+ __builtin_tx_assist(i);
+
+ return cc;
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+__builtin_tbegin_retry_nofloat_tdb (void *tdb, int retry)
+{
+ int cc, i = 0;
+
+ while ((cc = __builtin_tbegin_nofloat(tdb)) == _HTM_TBEGIN_TRANSIENT
+ && i++ < retry)
+ __builtin_tx_assist(i);
+
+ return cc;
+}
+
+#define __builtin_tbegin_retry_nofloat(tdb, retry) \
+ (__builtin_constant_p(tdb == 0) && tdb == 0 ? \
+ __builtin_tbegin_retry_nofloat_null(retry) : \
+ __builtin_tbegin_retry_nofloat_tdb(tdb, retry))
+
+#endif /* __s390__ */
+
+#endif /* __HTMINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/htmxlintrin.h b/contrib/llvm/tools/clang/lib/Headers/htmxlintrin.h
new file mode 100644
index 0000000..c7571ec
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/htmxlintrin.h
@@ -0,0 +1,363 @@
+/*===---- htmxlintrin.h - XL compiler HTM execution intrinsics-------------===*\
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+\*===----------------------------------------------------------------------===*/
+
+#ifndef __HTMXLINTRIN_H
+#define __HTMXLINTRIN_H
+
+#ifndef __HTM__
+#error "HTM instruction set not enabled"
+#endif
+
+#include <htmintrin.h>
+
+#ifdef __powerpc__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define _TEXASR_PTR(TM_BUF) \
+ ((texasr_t *)((TM_BUF)+0))
+#define _TEXASRU_PTR(TM_BUF) \
+ ((texasru_t *)((TM_BUF)+0))
+#define _TEXASRL_PTR(TM_BUF) \
+ ((texasrl_t *)((TM_BUF)+4))
+#define _TFIAR_PTR(TM_BUF) \
+ ((tfiar_t *)((TM_BUF)+8))
+
+typedef char TM_buff_type[16];
+
+/* This macro can be used to determine whether a transaction was successfully
+ started from the __TM_begin() and __TM_simple_begin() intrinsic functions
+ below. */
+#define _HTM_TBEGIN_STARTED 1
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_simple_begin (void)
+{
+ if (__builtin_expect (__builtin_tbegin (0), 1))
+ return _HTM_TBEGIN_STARTED;
+ return 0;
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_begin (void* const TM_buff)
+{
+ *_TEXASRL_PTR (TM_buff) = 0;
+ if (__builtin_expect (__builtin_tbegin (0), 1))
+ return _HTM_TBEGIN_STARTED;
+#ifdef __powerpc64__
+ *_TEXASR_PTR (TM_buff) = __builtin_get_texasr ();
+#else
+ *_TEXASRU_PTR (TM_buff) = __builtin_get_texasru ();
+ *_TEXASRL_PTR (TM_buff) = __builtin_get_texasr ();
+#endif
+ *_TFIAR_PTR (TM_buff) = __builtin_get_tfiar ();
+ return 0;
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_end (void)
+{
+ if (__builtin_expect (__builtin_tend (0), 1))
+ return 1;
+ return 0;
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_abort (void)
+{
+ __builtin_tabort (0);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_named_abort (unsigned char const code)
+{
+ __builtin_tabort (code);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_resume (void)
+{
+ __builtin_tresume ();
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_suspend (void)
+{
+ __builtin_tsuspend ();
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_is_user_abort (void* const TM_buff)
+{
+ texasru_t texasru = *_TEXASRU_PTR (TM_buff);
+ return _TEXASRU_ABORT (texasru);
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_is_named_user_abort (void* const TM_buff, unsigned char *code)
+{
+ texasru_t texasru = *_TEXASRU_PTR (TM_buff);
+
+ *code = _TEXASRU_FAILURE_CODE (texasru);
+ return _TEXASRU_ABORT (texasru);
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_is_illegal (void* const TM_buff)
+{
+ texasru_t texasru = *_TEXASRU_PTR (TM_buff);
+ return _TEXASRU_DISALLOWED (texasru);
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_is_footprint_exceeded (void* const TM_buff)
+{
+ texasru_t texasru = *_TEXASRU_PTR (TM_buff);
+ return _TEXASRU_FOOTPRINT_OVERFLOW (texasru);
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_nesting_depth (void* const TM_buff)
+{
+ texasrl_t texasrl;
+
+ if (_HTM_STATE (__builtin_ttest ()) == _HTM_NONTRANSACTIONAL)
+ {
+ texasrl = *_TEXASRL_PTR (TM_buff);
+ if (!_TEXASR_FAILURE_SUMMARY (texasrl))
+ texasrl = 0;
+ }
+ else
+ texasrl = (texasrl_t) __builtin_get_texasr ();
+
+ return _TEXASR_TRANSACTION_LEVEL (texasrl);
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_is_nested_too_deep(void* const TM_buff)
+{
+ texasru_t texasru = *_TEXASRU_PTR (TM_buff);
+ return _TEXASRU_NESTING_OVERFLOW (texasru);
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_is_conflict(void* const TM_buff)
+{
+ texasru_t texasru = *_TEXASRU_PTR (TM_buff);
+ /* Return TEXASR bits 11 (Self-Induced Conflict) through
+ 14 (Translation Invalidation Conflict). */
+ return (_TEXASRU_EXTRACT_BITS (texasru, 14, 4)) ? 1 : 0;
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_is_failure_persistent(void* const TM_buff)
+{
+ texasru_t texasru = *_TEXASRU_PTR (TM_buff);
+ return _TEXASRU_FAILURE_PERSISTENT (texasru);
+}
+
+extern __inline long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_failure_address(void* const TM_buff)
+{
+ return *_TFIAR_PTR (TM_buff);
+}
+
+extern __inline long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+__TM_failure_code(void* const TM_buff)
+{
+ return *_TEXASR_PTR (TM_buff);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __powerpc__ */
+
+#ifdef __s390__
+
+#include <stdint.h>
+
+/* These intrinsics are being made available for compatibility with
+ the IBM XL compiler. For documentation please see the "z/OS XL
+ C/C++ Programming Guide" publically available on the web. */
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_simple_begin ()
+{
+ return __builtin_tbegin_nofloat (0);
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_begin (void* const tdb)
+{
+ return __builtin_tbegin_nofloat (tdb);
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_end ()
+{
+ return __builtin_tend ();
+}
+
+static __inline void __attribute__((__always_inline__))
+__TM_abort ()
+{
+ return __builtin_tabort (_HTM_FIRST_USER_ABORT_CODE);
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+__TM_named_abort (unsigned char const code)
+{
+ return __builtin_tabort ((int)_HTM_FIRST_USER_ABORT_CODE + code);
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+__TM_non_transactional_store (void* const addr, long long const value)
+{
+ __builtin_non_tx_store ((uint64_t*)addr, (uint64_t)value);
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_nesting_depth (void* const tdb_ptr)
+{
+ int depth = __builtin_tx_nesting_depth ();
+ struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
+
+ if (depth != 0)
+ return depth;
+
+ if (tdb->format != 1)
+ return 0;
+ return tdb->nesting_depth;
+}
+
+/* Transaction failure diagnostics */
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_is_user_abort (void* const tdb_ptr)
+{
+ struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
+
+ if (tdb->format != 1)
+ return 0;
+
+ return !!(tdb->abort_code >= _HTM_FIRST_USER_ABORT_CODE);
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_is_named_user_abort (void* const tdb_ptr, unsigned char* code)
+{
+ struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
+
+ if (tdb->format != 1)
+ return 0;
+
+ if (tdb->abort_code >= _HTM_FIRST_USER_ABORT_CODE)
+ {
+ *code = tdb->abort_code - _HTM_FIRST_USER_ABORT_CODE;
+ return 1;
+ }
+ return 0;
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_is_illegal (void* const tdb_ptr)
+{
+ struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
+
+ return (tdb->format == 1
+ && (tdb->abort_code == 4 /* unfiltered program interruption */
+ || tdb->abort_code == 11 /* restricted instruction */));
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_is_footprint_exceeded (void* const tdb_ptr)
+{
+ struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
+
+ return (tdb->format == 1
+ && (tdb->abort_code == 7 /* fetch overflow */
+ || tdb->abort_code == 8 /* store overflow */));
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_is_nested_too_deep (void* const tdb_ptr)
+{
+ struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
+
+ return tdb->format == 1 && tdb->abort_code == 13; /* depth exceeded */
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_is_conflict (void* const tdb_ptr)
+{
+ struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
+
+ return (tdb->format == 1
+ && (tdb->abort_code == 9 /* fetch conflict */
+ || tdb->abort_code == 10 /* store conflict */));
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_is_failure_persistent (long const result)
+{
+ return result == _HTM_TBEGIN_PERSISTENT;
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_failure_address (void* const tdb_ptr)
+{
+ struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
+ return tdb->atia;
+}
+
+static __inline long __attribute__((__always_inline__, __nodebug__))
+__TM_failure_code (void* const tdb_ptr)
+{
+ struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
+
+ return tdb->abort_code;
+}
+
+#endif /* __s390__ */
+
+#endif /* __HTMXLINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/ia32intrin.h b/contrib/llvm/tools/clang/lib/Headers/ia32intrin.h
new file mode 100644
index 0000000..b2f82bb
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/ia32intrin.h
@@ -0,0 +1,77 @@
+/* ===-------- ia32intrin.h ---------------------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#error "Never use <ia32intrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __IA32INTRIN_H
+#define __IA32INTRIN_H
+
+#ifdef __x86_64__
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__readeflags(void)
+{
+ return __builtin_ia32_readeflags_u64();
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+__writeeflags(unsigned long long __f)
+{
+ __builtin_ia32_writeeflags_u64(__f);
+}
+
+#else /* !__x86_64__ */
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__readeflags(void)
+{
+ return __builtin_ia32_readeflags_u32();
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+__writeeflags(unsigned int __f)
+{
+ __builtin_ia32_writeeflags_u32(__f);
+}
+#endif /* !__x86_64__ */
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__rdpmc(int __A) {
+ return __builtin_ia32_rdpmc(__A);
+}
+
+/* __rdtsc */
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__rdtsc(void) {
+ return __builtin_ia32_rdtsc();
+}
+
+/* __rdtscp */
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__rdtscp(unsigned int *__A) {
+ return __builtin_ia32_rdtscp(__A);
+}
+
+#define _rdtsc() __rdtsc()
+
+#endif /* __IA32INTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/immintrin.h b/contrib/llvm/tools/clang/lib/Headers/immintrin.h
new file mode 100644
index 0000000..6376461
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/immintrin.h
@@ -0,0 +1,174 @@
+/*===---- immintrin.h - Intel intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#define __IMMINTRIN_H
+
+#include <mmintrin.h>
+
+#include <xmmintrin.h>
+
+#include <emmintrin.h>
+
+#include <pmmintrin.h>
+
+#include <tmmintrin.h>
+
+#include <smmintrin.h>
+
+#include <wmmintrin.h>
+
+#include <avxintrin.h>
+
+#include <avx2intrin.h>
+
+/* The 256-bit versions of functions in f16cintrin.h.
+ Intel documents these as being in immintrin.h, and
+ they depend on typedefs from avxintrin.h. */
+
+#define _mm256_cvtps_ph(a, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_vcvtps2ph256((__v8sf)(__m256)(a), (imm)); })
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__, __target__("f16c")))
+_mm256_cvtph_ps(__m128i __a)
+{
+ return (__m256)__builtin_ia32_vcvtph2ps256((__v8hi)__a);
+}
+
+#include <bmiintrin.h>
+
+#include <bmi2intrin.h>
+
+#include <lzcntintrin.h>
+
+#include <fmaintrin.h>
+
+#include <avx512fintrin.h>
+
+#include <avx512vlintrin.h>
+
+#include <avx512bwintrin.h>
+
+#include <avx512cdintrin.h>
+
+#include <avx512dqintrin.h>
+
+#include <avx512vlbwintrin.h>
+
+#include <avx512vldqintrin.h>
+
+#include <avx512erintrin.h>
+
+#include <pkuintrin.h>
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
+_rdrand16_step(unsigned short *__p)
+{
+ return __builtin_ia32_rdrand16_step(__p);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
+_rdrand32_step(unsigned int *__p)
+{
+ return __builtin_ia32_rdrand32_step(__p);
+}
+
+#ifdef __x86_64__
+static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
+_rdrand64_step(unsigned long long *__p)
+{
+ return __builtin_ia32_rdrand64_step(__p);
+}
+#endif
+
+#ifdef __x86_64__
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+_readfsbase_u32(void)
+{
+ return __builtin_ia32_rdfsbase32();
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+_readfsbase_u64(void)
+{
+ return __builtin_ia32_rdfsbase64();
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+_readgsbase_u32(void)
+{
+ return __builtin_ia32_rdgsbase32();
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+_readgsbase_u64(void)
+{
+ return __builtin_ia32_rdgsbase64();
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+_writefsbase_u32(unsigned int __V)
+{
+ return __builtin_ia32_wrfsbase32(__V);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+_writefsbase_u64(unsigned long long __V)
+{
+ return __builtin_ia32_wrfsbase64(__V);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+_writegsbase_u32(unsigned int __V)
+{
+ return __builtin_ia32_wrgsbase32(__V);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
+_writegsbase_u64(unsigned long long __V)
+{
+ return __builtin_ia32_wrgsbase64(__V);
+}
+#endif
+
+#include <rtmintrin.h>
+
+#include <xtestintrin.h>
+
+#include <shaintrin.h>
+
+#include <fxsrintrin.h>
+
+#include <xsaveintrin.h>
+
+#include <xsaveoptintrin.h>
+
+#include <xsavecintrin.h>
+
+#include <xsavesintrin.h>
+
+/* Some intrinsics inside adxintrin.h are available only on processors with ADX,
+ * whereas others are also available at all times. */
+#include <adxintrin.h>
+
+#endif /* __IMMINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/inttypes.h b/contrib/llvm/tools/clang/lib/Headers/inttypes.h
new file mode 100644
index 0000000..3d59d14
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/inttypes.h
@@ -0,0 +1,102 @@
+/*===---- inttypes.h - Standard header for integer printf macros ----------===*\
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+\*===----------------------------------------------------------------------===*/
+
+#ifndef __CLANG_INTTYPES_H
+#define __CLANG_INTTYPES_H
+
+#include_next <inttypes.h>
+
+#if defined(_MSC_VER) && _MSC_VER < 1900
+/* MSVC headers define int32_t as int, but PRIx32 as "lx" instead of "x".
+ * This triggers format warnings, so fix it up here. */
+#undef PRId32
+#undef PRIdLEAST32
+#undef PRIdFAST32
+#undef PRIi32
+#undef PRIiLEAST32
+#undef PRIiFAST32
+#undef PRIo32
+#undef PRIoLEAST32
+#undef PRIoFAST32
+#undef PRIu32
+#undef PRIuLEAST32
+#undef PRIuFAST32
+#undef PRIx32
+#undef PRIxLEAST32
+#undef PRIxFAST32
+#undef PRIX32
+#undef PRIXLEAST32
+#undef PRIXFAST32
+
+#undef SCNd32
+#undef SCNdLEAST32
+#undef SCNdFAST32
+#undef SCNi32
+#undef SCNiLEAST32
+#undef SCNiFAST32
+#undef SCNo32
+#undef SCNoLEAST32
+#undef SCNoFAST32
+#undef SCNu32
+#undef SCNuLEAST32
+#undef SCNuFAST32
+#undef SCNx32
+#undef SCNxLEAST32
+#undef SCNxFAST32
+
+#define PRId32 "d"
+#define PRIdLEAST32 "d"
+#define PRIdFAST32 "d"
+#define PRIi32 "i"
+#define PRIiLEAST32 "i"
+#define PRIiFAST32 "i"
+#define PRIo32 "o"
+#define PRIoLEAST32 "o"
+#define PRIoFAST32 "o"
+#define PRIu32 "u"
+#define PRIuLEAST32 "u"
+#define PRIuFAST32 "u"
+#define PRIx32 "x"
+#define PRIxLEAST32 "x"
+#define PRIxFAST32 "x"
+#define PRIX32 "X"
+#define PRIXLEAST32 "X"
+#define PRIXFAST32 "X"
+
+#define SCNd32 "d"
+#define SCNdLEAST32 "d"
+#define SCNdFAST32 "d"
+#define SCNi32 "i"
+#define SCNiLEAST32 "i"
+#define SCNiFAST32 "i"
+#define SCNo32 "o"
+#define SCNoLEAST32 "o"
+#define SCNoFAST32 "o"
+#define SCNu32 "u"
+#define SCNuLEAST32 "u"
+#define SCNuFAST32 "u"
+#define SCNx32 "x"
+#define SCNxLEAST32 "x"
+#define SCNxFAST32 "x"
+#endif
+
+#endif /* __CLANG_INTTYPES_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/iso646.h b/contrib/llvm/tools/clang/lib/Headers/iso646.h
new file mode 100644
index 0000000..dca13c5
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/iso646.h
@@ -0,0 +1,43 @@
+/*===---- iso646.h - Standard header for alternate spellings of operators---===
+ *
+ * Copyright (c) 2008 Eli Friedman
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __ISO646_H
+#define __ISO646_H
+
+#ifndef __cplusplus
+#define and &&
+#define and_eq &=
+#define bitand &
+#define bitor |
+#define compl ~
+#define not !
+#define not_eq !=
+#define or ||
+#define or_eq |=
+#define xor ^
+#define xor_eq ^=
+#endif
+
+#endif /* __ISO646_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/limits.h b/contrib/llvm/tools/clang/lib/Headers/limits.h
new file mode 100644
index 0000000..f04187c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/limits.h
@@ -0,0 +1,118 @@
+/*===---- limits.h - Standard header for integer sizes --------------------===*\
+ *
+ * Copyright (c) 2009 Chris Lattner
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+\*===----------------------------------------------------------------------===*/
+
+#ifndef __CLANG_LIMITS_H
+#define __CLANG_LIMITS_H
+
+/* The system's limits.h may, in turn, try to #include_next GCC's limits.h.
+ Avert this #include_next madness. */
+#if defined __GNUC__ && !defined _GCC_LIMITS_H_
+#define _GCC_LIMITS_H_
+#endif
+
+/* System headers include a number of constants from POSIX in <limits.h>.
+ Include it if we're hosted. */
+#if __STDC_HOSTED__ && __has_include_next(<limits.h>)
+#include_next <limits.h>
+#endif
+
+/* Many system headers try to "help us out" by defining these. No really, we
+ know how big each datatype is. */
+#undef SCHAR_MIN
+#undef SCHAR_MAX
+#undef UCHAR_MAX
+#undef SHRT_MIN
+#undef SHRT_MAX
+#undef USHRT_MAX
+#undef INT_MIN
+#undef INT_MAX
+#undef UINT_MAX
+#undef LONG_MIN
+#undef LONG_MAX
+#undef ULONG_MAX
+
+#undef CHAR_BIT
+#undef CHAR_MIN
+#undef CHAR_MAX
+
+/* C90/99 5.2.4.2.1 */
+#define SCHAR_MAX __SCHAR_MAX__
+#define SHRT_MAX __SHRT_MAX__
+#define INT_MAX __INT_MAX__
+#define LONG_MAX __LONG_MAX__
+
+#define SCHAR_MIN (-__SCHAR_MAX__-1)
+#define SHRT_MIN (-__SHRT_MAX__ -1)
+#define INT_MIN (-__INT_MAX__ -1)
+#define LONG_MIN (-__LONG_MAX__ -1L)
+
+#define UCHAR_MAX (__SCHAR_MAX__*2 +1)
+#define USHRT_MAX (__SHRT_MAX__ *2 +1)
+#define UINT_MAX (__INT_MAX__ *2U +1U)
+#define ULONG_MAX (__LONG_MAX__ *2UL+1UL)
+
+#ifndef MB_LEN_MAX
+#define MB_LEN_MAX 1
+#endif
+
+#define CHAR_BIT __CHAR_BIT__
+
+#ifdef __CHAR_UNSIGNED__ /* -funsigned-char */
+#define CHAR_MIN 0
+#define CHAR_MAX UCHAR_MAX
+#else
+#define CHAR_MIN SCHAR_MIN
+#define CHAR_MAX __SCHAR_MAX__
+#endif
+
+/* C99 5.2.4.2.1: Added long long.
+ C++11 18.3.3.2: same contents as the Standard C Library header <limits.h>.
+ */
+#if __STDC_VERSION__ >= 199901L || __cplusplus >= 201103L
+
+#undef LLONG_MIN
+#undef LLONG_MAX
+#undef ULLONG_MAX
+
+#define LLONG_MAX __LONG_LONG_MAX__
+#define LLONG_MIN (-__LONG_LONG_MAX__-1LL)
+#define ULLONG_MAX (__LONG_LONG_MAX__*2ULL+1ULL)
+#endif
+
+/* LONG_LONG_MIN/LONG_LONG_MAX/ULONG_LONG_MAX are a GNU extension. It's too bad
+ that we don't have something like #pragma poison that could be used to
+ deprecate a macro - the code should just use LLONG_MAX and friends.
+ */
+#if defined(__GNU_LIBRARY__) ? defined(__USE_GNU) : !defined(__STRICT_ANSI__)
+
+#undef LONG_LONG_MIN
+#undef LONG_LONG_MAX
+#undef ULONG_LONG_MAX
+
+#define LONG_LONG_MAX __LONG_LONG_MAX__
+#define LONG_LONG_MIN (-__LONG_LONG_MAX__-1LL)
+#define ULONG_LONG_MAX (__LONG_LONG_MAX__*2ULL+1ULL)
+#endif
+
+#endif /* __CLANG_LIMITS_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/lzcntintrin.h b/contrib/llvm/tools/clang/lib/Headers/lzcntintrin.h
new file mode 100644
index 0000000..4c00e42
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/lzcntintrin.h
@@ -0,0 +1,68 @@
+/*===---- lzcntintrin.h - LZCNT intrinsics ---------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <lzcntintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __LZCNTINTRIN_H
+#define __LZCNTINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("lzcnt")))
+
+static __inline__ unsigned short __DEFAULT_FN_ATTRS
+__lzcnt16(unsigned short __X)
+{
+ return __X ? __builtin_clzs(__X) : 16;
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__lzcnt32(unsigned int __X)
+{
+ return __X ? __builtin_clz(__X) : 32;
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_lzcnt_u32(unsigned int __X)
+{
+ return __X ? __builtin_clz(__X) : 32;
+}
+
+#ifdef __x86_64__
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__lzcnt64(unsigned long long __X)
+{
+ return __X ? __builtin_clzll(__X) : 64;
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_lzcnt_u64(unsigned long long __X)
+{
+ return __X ? __builtin_clzll(__X) : 64;
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __LZCNTINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/mm3dnow.h b/contrib/llvm/tools/clang/lib/Headers/mm3dnow.h
new file mode 100644
index 0000000..cb93faf
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/mm3dnow.h
@@ -0,0 +1,171 @@
+/*===---- mm3dnow.h - 3DNow! intrinsics ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _MM3DNOW_H_INCLUDED
+#define _MM3DNOW_H_INCLUDED
+
+#include <mmintrin.h>
+#include <prfchwintrin.h>
+
+typedef float __v2sf __attribute__((__vector_size__(8)));
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("3dnow")))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_m_femms() {
+ __builtin_ia32_femms();
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pavgusb(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pavgusb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pf2id(__m64 __m) {
+ return (__m64)__builtin_ia32_pf2id((__v2sf)__m);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfacc(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfacc((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfadd(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfadd((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfcmpeq(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfcmpeq((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfcmpge(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfcmpge((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfcmpgt(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfcmpgt((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfmax(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfmax((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfmin(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfmin((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfmul(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfmul((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfrcp(__m64 __m) {
+ return (__m64)__builtin_ia32_pfrcp((__v2sf)__m);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfrcpit1(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfrcpit1((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfrcpit2(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfrcpit2((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfrsqrt(__m64 __m) {
+ return (__m64)__builtin_ia32_pfrsqrt((__v2sf)__m);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfrsqrtit1(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfrsqit1((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfsub(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfsub((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfsubr(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfsubr((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pi2fd(__m64 __m) {
+ return (__m64)__builtin_ia32_pi2fd((__v2si)__m);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pmulhrw(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pmulhrw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Handle the 3dnowa instructions here. */
+#undef __DEFAULT_FN_ATTRS
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("3dnowa")))
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pf2iw(__m64 __m) {
+ return (__m64)__builtin_ia32_pf2iw((__v2sf)__m);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfnacc(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfnacc((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pfpnacc(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfpnacc((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pi2fw(__m64 __m) {
+ return (__m64)__builtin_ia32_pi2fw((__v2si)__m);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pswapdsf(__m64 __m) {
+ return (__m64)__builtin_ia32_pswapdsf((__v2sf)__m);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_m_pswapdsi(__m64 __m) {
+ return (__m64)__builtin_ia32_pswapdsi((__v2si)__m);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Headers/mm_malloc.h b/contrib/llvm/tools/clang/lib/Headers/mm_malloc.h
new file mode 100644
index 0000000..305afd3
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/mm_malloc.h
@@ -0,0 +1,75 @@
+/*===---- mm_malloc.h - Allocating and Freeing Aligned Memory Blocks -------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __MM_MALLOC_H
+#define __MM_MALLOC_H
+
+#include <stdlib.h>
+
+#ifdef _WIN32
+#include <malloc.h>
+#else
+#ifndef __cplusplus
+extern int posix_memalign(void **__memptr, size_t __alignment, size_t __size);
+#else
+// Some systems (e.g. those with GNU libc) declare posix_memalign with an
+// exception specifier. Via an "egregious workaround" in
+// Sema::CheckEquivalentExceptionSpec, Clang accepts the following as a valid
+// redeclaration of glibc's declaration.
+extern "C" int posix_memalign(void **__memptr, size_t __alignment, size_t __size);
+#endif
+#endif
+
+#if !(defined(_WIN32) && defined(_mm_malloc))
+static __inline__ void *__attribute__((__always_inline__, __nodebug__,
+ __malloc__))
+_mm_malloc(size_t __size, size_t __align)
+{
+ if (__align == 1) {
+ return malloc(__size);
+ }
+
+ if (!(__align & (__align - 1)) && __align < sizeof(void *))
+ __align = sizeof(void *);
+
+ void *__mallocedMemory;
+#if defined(__MINGW32__)
+ __mallocedMemory = __mingw_aligned_malloc(__size, __align);
+#elif defined(_WIN32)
+ __mallocedMemory = _aligned_malloc(__size, __align);
+#else
+ if (posix_memalign(&__mallocedMemory, __align, __size))
+ return 0;
+#endif
+
+ return __mallocedMemory;
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_free(void *__p)
+{
+ free(__p);
+}
+#endif
+
+#endif /* __MM_MALLOC_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/mmintrin.h b/contrib/llvm/tools/clang/lib/Headers/mmintrin.h
new file mode 100644
index 0000000..162cb1a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/mmintrin.h
@@ -0,0 +1,503 @@
+/*===---- mmintrin.h - MMX intrinsics --------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __MMINTRIN_H
+#define __MMINTRIN_H
+
+typedef long long __m64 __attribute__((__vector_size__(8)));
+
+typedef int __v2si __attribute__((__vector_size__(8)));
+typedef short __v4hi __attribute__((__vector_size__(8)));
+typedef char __v8qi __attribute__((__vector_size__(8)));
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("mmx")))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_empty(void)
+{
+ __builtin_ia32_emms();
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvtsi32_si64(int __i)
+{
+ return (__m64)__builtin_ia32_vec_init_v2si(__i, 0);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_cvtsi64_si32(__m64 __m)
+{
+ return __builtin_ia32_vec_ext_v2si((__v2si)__m, 0);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvtsi64_m64(long long __i)
+{
+ return (__m64)__i;
+}
+
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm_cvtm64_si64(__m64 __m)
+{
+ return (long long)__m;
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_packs_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_packsswb((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_packs_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_packssdw((__v2si)__m1, (__v2si)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_packs_pu16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_packuswb((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_unpackhi_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_punpckhbw((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_unpackhi_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_punpckhwd((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_unpackhi_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_punpckhdq((__v2si)__m1, (__v2si)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_unpacklo_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_punpcklbw((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_unpacklo_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_punpcklwd((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_unpacklo_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_punpckldq((__v2si)__m1, (__v2si)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_add_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_add_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_add_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddd((__v2si)__m1, (__v2si)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_adds_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddsb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_adds_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddsw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_adds_pu8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddusb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_adds_pu16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddusw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sub_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sub_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sub_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubd((__v2si)__m1, (__v2si)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_subs_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubsb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_subs_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubsw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_subs_pu8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubusb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_subs_pu16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubusw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_madd_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pmaddwd((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_mulhi_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pmulhw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_mullo_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pmullw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sll_pi16(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psllw((__v4hi)__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_slli_pi16(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psllwi((__v4hi)__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sll_pi32(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_pslld((__v2si)__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_slli_pi32(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_pslldi((__v2si)__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sll_si64(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psllq(__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_slli_si64(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psllqi(__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sra_pi16(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psraw((__v4hi)__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_srai_pi16(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psrawi((__v4hi)__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sra_pi32(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psrad((__v2si)__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_srai_pi32(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psradi((__v2si)__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_srl_pi16(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psrlw((__v4hi)__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_srli_pi16(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psrlwi((__v4hi)__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_srl_pi32(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psrld((__v2si)__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_srli_pi32(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psrldi((__v2si)__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_srl_si64(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psrlq(__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_srli_si64(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psrlqi(__m, __count);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_and_si64(__m64 __m1, __m64 __m2)
+{
+ return __builtin_ia32_pand(__m1, __m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_andnot_si64(__m64 __m1, __m64 __m2)
+{
+ return __builtin_ia32_pandn(__m1, __m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_or_si64(__m64 __m1, __m64 __m2)
+{
+ return __builtin_ia32_por(__m1, __m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_xor_si64(__m64 __m1, __m64 __m2)
+{
+ return __builtin_ia32_pxor(__m1, __m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cmpeq_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpeqb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cmpeq_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpeqw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cmpeq_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpeqd((__v2si)__m1, (__v2si)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cmpgt_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpgtb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cmpgt_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpgtw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cmpgt_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpgtd((__v2si)__m1, (__v2si)__m2);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_setzero_si64(void)
+{
+ return (__m64){ 0LL };
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_set_pi32(int __i1, int __i0)
+{
+ return (__m64)__builtin_ia32_vec_init_v2si(__i0, __i1);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_set_pi16(short __s3, short __s2, short __s1, short __s0)
+{
+ return (__m64)__builtin_ia32_vec_init_v4hi(__s0, __s1, __s2, __s3);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3, char __b2,
+ char __b1, char __b0)
+{
+ return (__m64)__builtin_ia32_vec_init_v8qi(__b0, __b1, __b2, __b3,
+ __b4, __b5, __b6, __b7);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_set1_pi32(int __i)
+{
+ return _mm_set_pi32(__i, __i);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_set1_pi16(short __w)
+{
+ return _mm_set_pi16(__w, __w, __w, __w);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_set1_pi8(char __b)
+{
+ return _mm_set_pi8(__b, __b, __b, __b, __b, __b, __b, __b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_setr_pi32(int __i0, int __i1)
+{
+ return _mm_set_pi32(__i1, __i0);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_setr_pi16(short __w0, short __w1, short __w2, short __w3)
+{
+ return _mm_set_pi16(__w3, __w2, __w1, __w0);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_setr_pi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5,
+ char __b6, char __b7)
+{
+ return _mm_set_pi8(__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+/* Aliases for compatibility. */
+#define _m_empty _mm_empty
+#define _m_from_int _mm_cvtsi32_si64
+#define _m_from_int64 _mm_cvtsi64_m64
+#define _m_to_int _mm_cvtsi64_si32
+#define _m_to_int64 _mm_cvtm64_si64
+#define _m_packsswb _mm_packs_pi16
+#define _m_packssdw _mm_packs_pi32
+#define _m_packuswb _mm_packs_pu16
+#define _m_punpckhbw _mm_unpackhi_pi8
+#define _m_punpckhwd _mm_unpackhi_pi16
+#define _m_punpckhdq _mm_unpackhi_pi32
+#define _m_punpcklbw _mm_unpacklo_pi8
+#define _m_punpcklwd _mm_unpacklo_pi16
+#define _m_punpckldq _mm_unpacklo_pi32
+#define _m_paddb _mm_add_pi8
+#define _m_paddw _mm_add_pi16
+#define _m_paddd _mm_add_pi32
+#define _m_paddsb _mm_adds_pi8
+#define _m_paddsw _mm_adds_pi16
+#define _m_paddusb _mm_adds_pu8
+#define _m_paddusw _mm_adds_pu16
+#define _m_psubb _mm_sub_pi8
+#define _m_psubw _mm_sub_pi16
+#define _m_psubd _mm_sub_pi32
+#define _m_psubsb _mm_subs_pi8
+#define _m_psubsw _mm_subs_pi16
+#define _m_psubusb _mm_subs_pu8
+#define _m_psubusw _mm_subs_pu16
+#define _m_pmaddwd _mm_madd_pi16
+#define _m_pmulhw _mm_mulhi_pi16
+#define _m_pmullw _mm_mullo_pi16
+#define _m_psllw _mm_sll_pi16
+#define _m_psllwi _mm_slli_pi16
+#define _m_pslld _mm_sll_pi32
+#define _m_pslldi _mm_slli_pi32
+#define _m_psllq _mm_sll_si64
+#define _m_psllqi _mm_slli_si64
+#define _m_psraw _mm_sra_pi16
+#define _m_psrawi _mm_srai_pi16
+#define _m_psrad _mm_sra_pi32
+#define _m_psradi _mm_srai_pi32
+#define _m_psrlw _mm_srl_pi16
+#define _m_psrlwi _mm_srli_pi16
+#define _m_psrld _mm_srl_pi32
+#define _m_psrldi _mm_srli_pi32
+#define _m_psrlq _mm_srl_si64
+#define _m_psrlqi _mm_srli_si64
+#define _m_pand _mm_and_si64
+#define _m_pandn _mm_andnot_si64
+#define _m_por _mm_or_si64
+#define _m_pxor _mm_xor_si64
+#define _m_pcmpeqb _mm_cmpeq_pi8
+#define _m_pcmpeqw _mm_cmpeq_pi16
+#define _m_pcmpeqd _mm_cmpeq_pi32
+#define _m_pcmpgtb _mm_cmpgt_pi8
+#define _m_pcmpgtw _mm_cmpgt_pi16
+#define _m_pcmpgtd _mm_cmpgt_pi32
+
+#endif /* __MMINTRIN_H */
+
diff --git a/contrib/llvm/tools/clang/lib/Headers/module.modulemap b/contrib/llvm/tools/clang/lib/Headers/module.modulemap
new file mode 100644
index 0000000..b147e89
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/module.modulemap
@@ -0,0 +1,171 @@
+module _Builtin_intrinsics [system] [extern_c] {
+ explicit module altivec {
+ requires altivec
+ header "altivec.h"
+ }
+
+ explicit module arm {
+ requires arm
+
+ explicit module acle {
+ header "arm_acle.h"
+ export *
+ }
+
+ explicit module neon {
+ requires neon
+ header "arm_neon.h"
+ export *
+ }
+ }
+
+ explicit module intel {
+ requires x86
+ export *
+
+ header "immintrin.h"
+ header "x86intrin.h"
+
+ explicit module mm_malloc {
+ header "mm_malloc.h"
+ export * // note: for <stdlib.h> dependency
+ }
+
+ explicit module cpuid {
+ header "cpuid.h"
+ }
+
+ explicit module mmx {
+ header "mmintrin.h"
+ }
+
+ explicit module f16c {
+ header "f16cintrin.h"
+ }
+
+ explicit module sse {
+ export mmx
+ export sse2 // note: for hackish <emmintrin.h> dependency
+ header "xmmintrin.h"
+ }
+
+ explicit module sse2 {
+ export sse
+ header "emmintrin.h"
+ }
+
+ explicit module sse3 {
+ export sse2
+ header "pmmintrin.h"
+ }
+
+ explicit module ssse3 {
+ export sse3
+ header "tmmintrin.h"
+ }
+
+ explicit module sse4_1 {
+ export ssse3
+ header "smmintrin.h"
+ }
+
+ explicit module sse4_2 {
+ export sse4_1
+ header "nmmintrin.h"
+ }
+
+ explicit module sse4a {
+ export sse3
+ header "ammintrin.h"
+ }
+
+ explicit module avx {
+ export sse4_2
+ header "avxintrin.h"
+ }
+
+ explicit module avx2 {
+ export avx
+ header "avx2intrin.h"
+ }
+
+ explicit module avx512f {
+ export avx2
+ header "avx512fintrin.h"
+ }
+
+ explicit module avx512er {
+ header "avx512erintrin.h"
+ }
+
+ explicit module bmi {
+ header "bmiintrin.h"
+ }
+
+ explicit module bmi2 {
+ header "bmi2intrin.h"
+ }
+
+ explicit module fma {
+ header "fmaintrin.h"
+ }
+
+ explicit module fma4 {
+ export sse3
+ header "fma4intrin.h"
+ }
+
+ explicit module lzcnt {
+ header "lzcntintrin.h"
+ }
+
+ explicit module popcnt {
+ header "popcntintrin.h"
+ }
+
+ explicit module mm3dnow {
+ header "mm3dnow.h"
+ }
+
+ explicit module xop {
+ export fma4
+ header "xopintrin.h"
+ }
+
+ explicit module aes_pclmul {
+ header "wmmintrin.h"
+ export aes
+ export pclmul
+ }
+
+ explicit module aes {
+ header "__wmmintrin_aes.h"
+ }
+
+ explicit module pclmul {
+ header "__wmmintrin_pclmul.h"
+ }
+ }
+
+ explicit module systemz {
+ requires systemz
+ export *
+
+ header "s390intrin.h"
+
+ explicit module htm {
+ requires htm
+ header "htmintrin.h"
+ header "htmxlintrin.h"
+ }
+
+ explicit module zvector {
+ requires zvector, vx
+ header "vecintrin.h"
+ }
+ }
+}
+
+module _Builtin_stddef_max_align_t [system] [extern_c] {
+ header "__stddef_max_align_t.h"
+}
diff --git a/contrib/llvm/tools/clang/lib/Headers/nmmintrin.h b/contrib/llvm/tools/clang/lib/Headers/nmmintrin.h
new file mode 100644
index 0000000..57fec15
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/nmmintrin.h
@@ -0,0 +1,30 @@
+/*===---- nmmintrin.h - SSE4 intrinsics ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _NMMINTRIN_H
+#define _NMMINTRIN_H
+
+/* To match expectations of gcc we put the sse4.2 definitions into smmintrin.h,
+ just include it now then. */
+#include <smmintrin.h>
+#endif /* _NMMINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/pkuintrin.h b/contrib/llvm/tools/clang/lib/Headers/pkuintrin.h
new file mode 100644
index 0000000..ad12348
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/pkuintrin.h
@@ -0,0 +1,48 @@
+/*===------------- pkuintrin.h - PKU intrinsics ------------------===
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <pkuintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __PKUINTRIN_H
+#define __PKUINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("pku")))
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_rdpkru_u32(void)
+{
+ return __builtin_ia32_rdpkru();
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_wrpkru(unsigned int val)
+{
+ return __builtin_ia32_wrpkru(val);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Headers/pmmintrin.h b/contrib/llvm/tools/clang/lib/Headers/pmmintrin.h
new file mode 100644
index 0000000..0ff9409
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/pmmintrin.h
@@ -0,0 +1,116 @@
+/*===---- pmmintrin.h - SSE3 intrinsics ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __PMMINTRIN_H
+#define __PMMINTRIN_H
+
+#include <emmintrin.h>
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse3")))
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_lddqu_si128(__m128i const *__p)
+{
+ return (__m128i)__builtin_ia32_lddqu((char const *)__p);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_addsub_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_addsubps(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_hadd_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_haddps(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_hsub_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_hsubps(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_movehdup_ps(__m128 __a)
+{
+ return __builtin_shufflevector(__a, __a, 1, 1, 3, 3);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_moveldup_ps(__m128 __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 0, 2, 2);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_addsub_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_addsubpd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_hadd_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_haddpd(__a, __b);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_hsub_pd(__m128d __a, __m128d __b)
+{
+ return __builtin_ia32_hsubpd(__a, __b);
+}
+
+#define _mm_loaddup_pd(dp) _mm_load1_pd(dp)
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_movedup_pd(__m128d __a)
+{
+ return __builtin_shufflevector(__a, __a, 0, 0);
+}
+
+#define _MM_DENORMALS_ZERO_ON (0x0040)
+#define _MM_DENORMALS_ZERO_OFF (0x0000)
+
+#define _MM_DENORMALS_ZERO_MASK (0x0040)
+
+#define _MM_GET_DENORMALS_ZERO_MODE() (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK)
+#define _MM_SET_DENORMALS_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x)))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_monitor(void const *__p, unsigned __extensions, unsigned __hints)
+{
+ __builtin_ia32_monitor((void *)__p, __extensions, __hints);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mwait(unsigned __extensions, unsigned __hints)
+{
+ __builtin_ia32_mwait(__extensions, __hints);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __PMMINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/popcntintrin.h b/contrib/llvm/tools/clang/lib/Headers/popcntintrin.h
new file mode 100644
index 0000000..6fcda65
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/popcntintrin.h
@@ -0,0 +1,58 @@
+/*===---- popcntintrin.h - POPCNT intrinsics -------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _POPCNTINTRIN_H
+#define _POPCNTINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("popcnt")))
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_popcnt_u32(unsigned int __A)
+{
+ return __builtin_popcount(__A);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_popcnt32(int __A)
+{
+ return __builtin_popcount(__A);
+}
+
+#ifdef __x86_64__
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm_popcnt_u64(unsigned long long __A)
+{
+ return __builtin_popcountll(__A);
+}
+
+static __inline__ long long __DEFAULT_FN_ATTRS
+_popcnt64(long long __A)
+{
+ return __builtin_popcountll(__A);
+}
+#endif /* __x86_64__ */
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* _POPCNTINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/prfchwintrin.h b/contrib/llvm/tools/clang/lib/Headers/prfchwintrin.h
new file mode 100644
index 0000000..ba02857
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/prfchwintrin.h
@@ -0,0 +1,45 @@
+/*===---- prfchwintrin.h - PREFETCHW intrinsic -----------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined(__X86INTRIN_H) && !defined(_MM3DNOW_H_INCLUDED)
+#error "Never use <prfchwintrin.h> directly; include <x86intrin.h> or <mm3dnow.h> instead."
+#endif
+
+#ifndef __PRFCHWINTRIN_H
+#define __PRFCHWINTRIN_H
+
+#if defined(__PRFCHW__) || defined(__3dNOW__)
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_m_prefetch(void *__P)
+{
+ __builtin_prefetch (__P, 0, 3 /* _MM_HINT_T0 */);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_m_prefetchw(void *__P)
+{
+ __builtin_prefetch (__P, 1, 3 /* _MM_HINT_T0 */);
+}
+#endif
+
+#endif /* __PRFCHWINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/rdseedintrin.h b/contrib/llvm/tools/clang/lib/Headers/rdseedintrin.h
new file mode 100644
index 0000000..421f4ea
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/rdseedintrin.h
@@ -0,0 +1,56 @@
+/*===---- rdseedintrin.h - RDSEED intrinsics -------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#error "Never use <rdseedintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __RDSEEDINTRIN_H
+#define __RDSEEDINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("rdseed")))
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_rdseed16_step(unsigned short *__p)
+{
+ return __builtin_ia32_rdseed16_step(__p);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_rdseed32_step(unsigned int *__p)
+{
+ return __builtin_ia32_rdseed32_step(__p);
+}
+
+#ifdef __x86_64__
+static __inline__ int __DEFAULT_FN_ATTRS
+_rdseed64_step(unsigned long long *__p)
+{
+ return __builtin_ia32_rdseed64_step(__p);
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __RDSEEDINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/rtmintrin.h b/contrib/llvm/tools/clang/lib/Headers/rtmintrin.h
new file mode 100644
index 0000000..e6a58d7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/rtmintrin.h
@@ -0,0 +1,59 @@
+/*===---- rtmintrin.h - RTM intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <rtmintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __RTMINTRIN_H
+#define __RTMINTRIN_H
+
+#define _XBEGIN_STARTED (~0u)
+#define _XABORT_EXPLICIT (1 << 0)
+#define _XABORT_RETRY (1 << 1)
+#define _XABORT_CONFLICT (1 << 2)
+#define _XABORT_CAPACITY (1 << 3)
+#define _XABORT_DEBUG (1 << 4)
+#define _XABORT_NESTED (1 << 5)
+#define _XABORT_CODE(x) (((x) >> 24) & 0xFF)
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("rtm")))
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_xbegin(void)
+{
+ return __builtin_ia32_xbegin();
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xend(void)
+{
+ __builtin_ia32_xend();
+}
+
+#define _xabort(imm) __builtin_ia32_xabort((imm))
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __RTMINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/s390intrin.h b/contrib/llvm/tools/clang/lib/Headers/s390intrin.h
new file mode 100644
index 0000000..d51274c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/s390intrin.h
@@ -0,0 +1,39 @@
+/*===---- s390intrin.h - SystemZ intrinsics --------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __S390INTRIN_H
+#define __S390INTRIN_H
+
+#ifndef __s390__
+#error "<s390intrin.h> is for s390 only"
+#endif
+
+#ifdef __HTM__
+#include <htmintrin.h>
+#endif
+
+#ifdef __VEC__
+#include <vecintrin.h>
+#endif
+
+#endif /* __S390INTRIN_H*/
diff --git a/contrib/llvm/tools/clang/lib/Headers/shaintrin.h b/contrib/llvm/tools/clang/lib/Headers/shaintrin.h
new file mode 100644
index 0000000..9b5d218
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/shaintrin.h
@@ -0,0 +1,75 @@
+/*===---- shaintrin.h - SHA intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <shaintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __SHAINTRIN_H
+#define __SHAINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sha")))
+
+#define _mm_sha1rnds4_epu32(V1, V2, M) __extension__ ({ \
+ __builtin_ia32_sha1rnds4((__v4si)(__m128i)(V1), (__v4si)(__m128i)(V2), (M)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha1nexte_epu32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_sha1nexte((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha1msg1_epu32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_sha1msg1((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha1msg2_epu32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_sha1msg2((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha256rnds2_epu32(__m128i __X, __m128i __Y, __m128i __Z)
+{
+ return (__m128i)__builtin_ia32_sha256rnds2((__v4si)__X, (__v4si)__Y, (__v4si)__Z);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha256msg1_epu32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_sha256msg1((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha256msg2_epu32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_sha256msg2((__v4si)__X, (__v4si)__Y);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __SHAINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/smmintrin.h b/contrib/llvm/tools/clang/lib/Headers/smmintrin.h
new file mode 100644
index 0000000..69ad07f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/smmintrin.h
@@ -0,0 +1,508 @@
+/*===---- smmintrin.h - SSE4 intrinsics ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _SMMINTRIN_H
+#define _SMMINTRIN_H
+
+#include <tmmintrin.h>
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.1")))
+
+/* SSE4 Rounding macros. */
+#define _MM_FROUND_TO_NEAREST_INT 0x00
+#define _MM_FROUND_TO_NEG_INF 0x01
+#define _MM_FROUND_TO_POS_INF 0x02
+#define _MM_FROUND_TO_ZERO 0x03
+#define _MM_FROUND_CUR_DIRECTION 0x04
+
+#define _MM_FROUND_RAISE_EXC 0x00
+#define _MM_FROUND_NO_EXC 0x08
+
+#define _MM_FROUND_NINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT)
+#define _MM_FROUND_FLOOR (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF)
+#define _MM_FROUND_CEIL (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF)
+#define _MM_FROUND_TRUNC (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO)
+#define _MM_FROUND_RINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION)
+#define _MM_FROUND_NEARBYINT (_MM_FROUND_NO_EXC | _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_ceil_ps(X) _mm_round_ps((X), _MM_FROUND_CEIL)
+#define _mm_ceil_pd(X) _mm_round_pd((X), _MM_FROUND_CEIL)
+#define _mm_ceil_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_CEIL)
+#define _mm_ceil_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_CEIL)
+
+#define _mm_floor_ps(X) _mm_round_ps((X), _MM_FROUND_FLOOR)
+#define _mm_floor_pd(X) _mm_round_pd((X), _MM_FROUND_FLOOR)
+#define _mm_floor_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_FLOOR)
+#define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR)
+
+#define _mm_round_ps(X, M) __extension__ ({ \
+ (__m128)__builtin_ia32_roundps((__v4sf)(__m128)(X), (M)); })
+
+#define _mm_round_ss(X, Y, M) __extension__ ({ \
+ (__m128)__builtin_ia32_roundss((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (M)); })
+
+#define _mm_round_pd(X, M) __extension__ ({ \
+ (__m128d)__builtin_ia32_roundpd((__v2df)(__m128d)(X), (M)); })
+
+#define _mm_round_sd(X, Y, M) __extension__ ({ \
+ (__m128d)__builtin_ia32_roundsd((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (M)); })
+
+/* SSE4 Packed Blending Intrinsics. */
+#define _mm_blend_pd(V1, V2, M) __extension__ ({ \
+ (__m128d)__builtin_shufflevector((__v2df)(__m128d)(V1), \
+ (__v2df)(__m128d)(V2), \
+ (((M) & 0x01) ? 2 : 0), \
+ (((M) & 0x02) ? 3 : 1)); })
+
+#define _mm_blend_ps(V1, V2, M) __extension__ ({ \
+ (__m128)__builtin_shufflevector((__v4sf)(__m128)(V1), (__v4sf)(__m128)(V2), \
+ (((M) & 0x01) ? 4 : 0), \
+ (((M) & 0x02) ? 5 : 1), \
+ (((M) & 0x04) ? 6 : 2), \
+ (((M) & 0x08) ? 7 : 3)); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M)
+{
+ return (__m128d) __builtin_ia32_blendvpd ((__v2df)__V1, (__v2df)__V2,
+ (__v2df)__M);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_blendv_ps (__m128 __V1, __m128 __V2, __m128 __M)
+{
+ return (__m128) __builtin_ia32_blendvps ((__v4sf)__V1, (__v4sf)__V2,
+ (__v4sf)__M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
+{
+ return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__V1, (__v16qi)__V2,
+ (__v16qi)__M);
+}
+
+#define _mm_blend_epi16(V1, V2, M) __extension__ ({ \
+ (__m128i)__builtin_shufflevector((__v8hi)(__m128i)(V1), \
+ (__v8hi)(__m128i)(V2), \
+ (((M) & 0x01) ? 8 : 0), \
+ (((M) & 0x02) ? 9 : 1), \
+ (((M) & 0x04) ? 10 : 2), \
+ (((M) & 0x08) ? 11 : 3), \
+ (((M) & 0x10) ? 12 : 4), \
+ (((M) & 0x20) ? 13 : 5), \
+ (((M) & 0x40) ? 14 : 6), \
+ (((M) & 0x80) ? 15 : 7)); })
+
+/* SSE4 Dword Multiply Instructions. */
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mullo_epi32 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) ((__v4si)__V1 * (__v4si)__V2);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mul_epi32 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__V1, (__v4si)__V2);
+}
+
+/* SSE4 Floating Point Dot Product Instructions. */
+#define _mm_dp_ps(X, Y, M) __extension__ ({ \
+ (__m128) __builtin_ia32_dpps((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (M)); })
+
+#define _mm_dp_pd(X, Y, M) __extension__ ({\
+ (__m128d) __builtin_ia32_dppd((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (M)); })
+
+/* SSE4 Streaming Load Hint Instruction. */
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_stream_load_si128 (__m128i const *__V)
+{
+ return (__m128i) __builtin_ia32_movntdqa ((const __v2di *) __V);
+}
+
+/* SSE4 Packed Integer Min/Max Instructions. */
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_min_epi8 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pminsb128 ((__v16qi) __V1, (__v16qi) __V2);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_max_epi8 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi) __V1, (__v16qi) __V2);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_min_epu16 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pminuw128 ((__v8hi) __V1, (__v8hi) __V2);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_max_epu16 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi) __V1, (__v8hi) __V2);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_min_epi32 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pminsd128 ((__v4si) __V1, (__v4si) __V2);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_max_epi32 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si) __V1, (__v4si) __V2);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_min_epu32 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pminud128((__v4si) __V1, (__v4si) __V2);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_max_epu32 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pmaxud128((__v4si) __V1, (__v4si) __V2);
+}
+
+/* SSE4 Insertion and Extraction from XMM Register Instructions. */
+#define _mm_insert_ps(X, Y, N) __builtin_ia32_insertps128((X), (Y), (N))
+#define _mm_extract_ps(X, N) (__extension__ \
+ ({ union { int __i; float __f; } __t; \
+ __v4sf __a = (__v4sf)(__m128)(X); \
+ __t.__f = __a[(N) & 3]; \
+ __t.__i;}))
+
+/* Miscellaneous insert and extract macros. */
+/* Extract a single-precision float from X at index N into D. */
+#define _MM_EXTRACT_FLOAT(D, X, N) (__extension__ ({ __v4sf __a = (__v4sf)(X); \
+ (D) = __a[N]; }))
+
+/* Or together 2 sets of indexes (X and Y) with the zeroing bits (Z) to create
+ an index suitable for _mm_insert_ps. */
+#define _MM_MK_INSERTPS_NDX(X, Y, Z) (((X) << 6) | ((Y) << 4) | (Z))
+
+/* Extract a float from X at index N into the first index of the return. */
+#define _MM_PICK_OUT_PS(X, N) _mm_insert_ps (_mm_setzero_ps(), (X), \
+ _MM_MK_INSERTPS_NDX((N), 0, 0x0e))
+
+/* Insert int into packed integer array at index. */
+#define _mm_insert_epi8(X, I, N) (__extension__ \
+ ({ __v16qi __a = (__v16qi)(__m128i)(X); \
+ __a[(N) & 15] = (I); \
+ __a;}))
+#define _mm_insert_epi32(X, I, N) (__extension__ \
+ ({ __v4si __a = (__v4si)(__m128i)(X); \
+ __a[(N) & 3] = (I); \
+ __a;}))
+#ifdef __x86_64__
+#define _mm_insert_epi64(X, I, N) (__extension__ \
+ ({ __v2di __a = (__v2di)(__m128i)(X); \
+ __a[(N) & 1] = (I); \
+ __a;}))
+#endif /* __x86_64__ */
+
+/* Extract int from packed integer array at index. This returns the element
+ * as a zero extended value, so it is unsigned.
+ */
+#define _mm_extract_epi8(X, N) (__extension__ \
+ ({ __v16qi __a = (__v16qi)(__m128i)(X); \
+ (int)(unsigned char) __a[(N) & 15];}))
+#define _mm_extract_epi32(X, N) (__extension__ \
+ ({ __v4si __a = (__v4si)(__m128i)(X); \
+ (int)__a[(N) & 3];}))
+#ifdef __x86_64__
+#define _mm_extract_epi64(X, N) (__extension__ \
+ ({ __v2di __a = (__v2di)(__m128i)(X); \
+ (long long)__a[(N) & 1];}))
+#endif /* __x86_64 */
+
+/* SSE4 128-bit Packed Integer Comparisons. */
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_testz_si128(__m128i __M, __m128i __V)
+{
+ return __builtin_ia32_ptestz128((__v2di)__M, (__v2di)__V);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_testc_si128(__m128i __M, __m128i __V)
+{
+ return __builtin_ia32_ptestc128((__v2di)__M, (__v2di)__V);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_testnzc_si128(__m128i __M, __m128i __V)
+{
+ return __builtin_ia32_ptestnzc128((__v2di)__M, (__v2di)__V);
+}
+
+#define _mm_test_all_ones(V) _mm_testc_si128((V), _mm_cmpeq_epi32((V), (V)))
+#define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128((M), (V))
+#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V))
+
+/* SSE4 64-bit Packed Integer Comparisons. */
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmpeq_epi64(__m128i __V1, __m128i __V2)
+{
+ return (__m128i)((__v2di)__V1 == (__v2di)__V2);
+}
+
+/* SSE4 Packed Integer Sign-Extension. */
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi8_epi16(__m128i __V)
+{
+ /* This function always performs a signed extension, but __v16qi is a char
+ which may be signed or unsigned, so use __v16qs. */
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8hi);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi8_epi32(__m128i __V)
+{
+ /* This function always performs a signed extension, but __v16qi is a char
+ which may be signed or unsigned, so use __v16qs. */
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4si);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi8_epi64(__m128i __V)
+{
+ /* This function always performs a signed extension, but __v16qi is a char
+ which may be signed or unsigned, so use __v16qs. */
+ typedef signed char __v16qs __attribute__((__vector_size__(16)));
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1), __v2di);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi16_epi32(__m128i __V)
+{
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4si);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi16_epi64(__m128i __V)
+{
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1), __v2di);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi32_epi64(__m128i __V)
+{
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v4si)__V, (__v4si)__V, 0, 1), __v2di);
+}
+
+/* SSE4 Packed Integer Zero-Extension. */
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepu8_epi16(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_pmovzxbw128((__v16qi) __V);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepu8_epi32(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_pmovzxbd128((__v16qi)__V);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepu8_epi64(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_pmovzxbq128((__v16qi)__V);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepu16_epi32(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_pmovzxwd128((__v8hi)__V);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepu16_epi64(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_pmovzxwq128((__v8hi)__V);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepu32_epi64(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_pmovzxdq128((__v4si)__V);
+}
+
+/* SSE4 Pack with Unsigned Saturation. */
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_packus_epi32(__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_packusdw128((__v4si)__V1, (__v4si)__V2);
+}
+
+/* SSE4 Multiple Packed Sums of Absolute Difference. */
+#define _mm_mpsadbw_epu8(X, Y, M) __extension__ ({ \
+ (__m128i) __builtin_ia32_mpsadbw128((__v16qi)(__m128i)(X), \
+ (__v16qi)(__m128i)(Y), (M)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_minpos_epu16(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_phminposuw128((__v8hi)__V);
+}
+
+/* Handle the sse4.2 definitions here. */
+
+/* These definitions are normally in nmmintrin.h, but gcc puts them in here
+ so we'll do the same. */
+
+#undef __DEFAULT_FN_ATTRS
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+
+/* These specify the type of data that we're comparing. */
+#define _SIDD_UBYTE_OPS 0x00
+#define _SIDD_UWORD_OPS 0x01
+#define _SIDD_SBYTE_OPS 0x02
+#define _SIDD_SWORD_OPS 0x03
+
+/* These specify the type of comparison operation. */
+#define _SIDD_CMP_EQUAL_ANY 0x00
+#define _SIDD_CMP_RANGES 0x04
+#define _SIDD_CMP_EQUAL_EACH 0x08
+#define _SIDD_CMP_EQUAL_ORDERED 0x0c
+
+/* These macros specify the polarity of the operation. */
+#define _SIDD_POSITIVE_POLARITY 0x00
+#define _SIDD_NEGATIVE_POLARITY 0x10
+#define _SIDD_MASKED_POSITIVE_POLARITY 0x20
+#define _SIDD_MASKED_NEGATIVE_POLARITY 0x30
+
+/* These macros are used in _mm_cmpXstri() to specify the return. */
+#define _SIDD_LEAST_SIGNIFICANT 0x00
+#define _SIDD_MOST_SIGNIFICANT 0x40
+
+/* These macros are used in _mm_cmpXstri() to specify the return. */
+#define _SIDD_BIT_MASK 0x00
+#define _SIDD_UNIT_MASK 0x40
+
+/* SSE4.2 Packed Comparison Intrinsics. */
+#define _mm_cmpistrm(A, B, M) \
+ (__m128i)__builtin_ia32_pcmpistrm128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M))
+#define _mm_cmpistri(A, B, M) \
+ (int)__builtin_ia32_pcmpistri128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M))
+
+#define _mm_cmpestrm(A, LA, B, LB, M) \
+ (__m128i)__builtin_ia32_pcmpestrm128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M))
+#define _mm_cmpestri(A, LA, B, LB, M) \
+ (int)__builtin_ia32_pcmpestri128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M))
+
+/* SSE4.2 Packed Comparison Intrinsics and EFlag Reading. */
+#define _mm_cmpistra(A, B, M) \
+ (int)__builtin_ia32_pcmpistria128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M))
+#define _mm_cmpistrc(A, B, M) \
+ (int)__builtin_ia32_pcmpistric128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M))
+#define _mm_cmpistro(A, B, M) \
+ (int)__builtin_ia32_pcmpistrio128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M))
+#define _mm_cmpistrs(A, B, M) \
+ (int)__builtin_ia32_pcmpistris128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M))
+#define _mm_cmpistrz(A, B, M) \
+ (int)__builtin_ia32_pcmpistriz128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M))
+
+#define _mm_cmpestra(A, LA, B, LB, M) \
+ (int)__builtin_ia32_pcmpestria128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M))
+#define _mm_cmpestrc(A, LA, B, LB, M) \
+ (int)__builtin_ia32_pcmpestric128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M))
+#define _mm_cmpestro(A, LA, B, LB, M) \
+ (int)__builtin_ia32_pcmpestrio128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M))
+#define _mm_cmpestrs(A, LA, B, LB, M) \
+ (int)__builtin_ia32_pcmpestris128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M))
+#define _mm_cmpestrz(A, LA, B, LB, M) \
+ (int)__builtin_ia32_pcmpestriz128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M))
+
+/* SSE4.2 Compare Packed Data -- Greater Than. */
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmpgt_epi64(__m128i __V1, __m128i __V2)
+{
+ return (__m128i)((__v2di)__V1 > (__v2di)__V2);
+}
+
+/* SSE4.2 Accumulate CRC32. */
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mm_crc32_u8(unsigned int __C, unsigned char __D)
+{
+ return __builtin_ia32_crc32qi(__C, __D);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mm_crc32_u16(unsigned int __C, unsigned short __D)
+{
+ return __builtin_ia32_crc32hi(__C, __D);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mm_crc32_u32(unsigned int __C, unsigned int __D)
+{
+ return __builtin_ia32_crc32si(__C, __D);
+}
+
+#ifdef __x86_64__
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_mm_crc32_u64(unsigned long long __C, unsigned long long __D)
+{
+ return __builtin_ia32_crc32di(__C, __D);
+}
+#endif /* __x86_64__ */
+
+#undef __DEFAULT_FN_ATTRS
+
+#ifdef __POPCNT__
+#include <popcntintrin.h>
+#endif
+
+#endif /* _SMMINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/stdalign.h b/contrib/llvm/tools/clang/lib/Headers/stdalign.h
new file mode 100644
index 0000000..3738d12
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/stdalign.h
@@ -0,0 +1,35 @@
+/*===---- stdalign.h - Standard header for alignment ------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __STDALIGN_H
+#define __STDALIGN_H
+
+#ifndef __cplusplus
+#define alignas _Alignas
+#define alignof _Alignof
+#endif
+
+#define __alignas_is_defined 1
+#define __alignof_is_defined 1
+
+#endif /* __STDALIGN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/stdarg.h b/contrib/llvm/tools/clang/lib/Headers/stdarg.h
new file mode 100644
index 0000000..a57e183
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/stdarg.h
@@ -0,0 +1,52 @@
+/*===---- stdarg.h - Variable argument handling ----------------------------===
+ *
+ * Copyright (c) 2008 Eli Friedman
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __STDARG_H
+#define __STDARG_H
+
+#ifndef _VA_LIST
+typedef __builtin_va_list va_list;
+#define _VA_LIST
+#endif
+#define va_start(ap, param) __builtin_va_start(ap, param)
+#define va_end(ap) __builtin_va_end(ap)
+#define va_arg(ap, type) __builtin_va_arg(ap, type)
+
+/* GCC always defines __va_copy, but does not define va_copy unless in c99 mode
+ * or -ansi is not specified, since it was not part of C90.
+ */
+#define __va_copy(d,s) __builtin_va_copy(d,s)
+
+#if __STDC_VERSION__ >= 199901L || __cplusplus >= 201103L || !defined(__STRICT_ANSI__)
+#define va_copy(dest, src) __builtin_va_copy(dest, src)
+#endif
+
+/* Hack required to make standard headers work, at least on Ubuntu */
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST 1
+#endif
+typedef __builtin_va_list __gnuc_va_list;
+
+#endif /* __STDARG_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/stdatomic.h b/contrib/llvm/tools/clang/lib/Headers/stdatomic.h
new file mode 100644
index 0000000..e037987
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/stdatomic.h
@@ -0,0 +1,190 @@
+/*===---- stdatomic.h - Standard header for atomic types and operations -----===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_STDATOMIC_H
+#define __CLANG_STDATOMIC_H
+
+/* If we're hosted, fall back to the system's stdatomic.h. FreeBSD, for
+ * example, already has a Clang-compatible stdatomic.h header.
+ */
+#if __STDC_HOSTED__ && __has_include_next(<stdatomic.h>)
+# include_next <stdatomic.h>
+#else
+
+#include <stddef.h>
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* 7.17.1 Introduction */
+
+#define ATOMIC_BOOL_LOCK_FREE __GCC_ATOMIC_BOOL_LOCK_FREE
+#define ATOMIC_CHAR_LOCK_FREE __GCC_ATOMIC_CHAR_LOCK_FREE
+#define ATOMIC_CHAR16_T_LOCK_FREE __GCC_ATOMIC_CHAR16_T_LOCK_FREE
+#define ATOMIC_CHAR32_T_LOCK_FREE __GCC_ATOMIC_CHAR32_T_LOCK_FREE
+#define ATOMIC_WCHAR_T_LOCK_FREE __GCC_ATOMIC_WCHAR_T_LOCK_FREE
+#define ATOMIC_SHORT_T_LOCK_FREE __GCC_ATOMIC_SHORT_T_LOCK_FREE
+#define ATOMIC_INT_T_LOCK_FREE __GCC_ATOMIC_INT_T_LOCK_FREE
+#define ATOMIC_LONG_T_LOCK_FREE __GCC_ATOMIC_LONG_T_LOCK_FREE
+#define ATOMIC_LLONG_T_LOCK_FREE __GCC_ATOMIC_LLONG_T_LOCK_FREE
+#define ATOMIC_POINTER_T_LOCK_FREE __GCC_ATOMIC_POINTER_T_LOCK_FREE
+
+/* 7.17.2 Initialization */
+
+#define ATOMIC_VAR_INIT(value) (value)
+#define atomic_init __c11_atomic_init
+
+/* 7.17.3 Order and consistency */
+
+typedef enum memory_order {
+ memory_order_relaxed = __ATOMIC_RELAXED,
+ memory_order_consume = __ATOMIC_CONSUME,
+ memory_order_acquire = __ATOMIC_ACQUIRE,
+ memory_order_release = __ATOMIC_RELEASE,
+ memory_order_acq_rel = __ATOMIC_ACQ_REL,
+ memory_order_seq_cst = __ATOMIC_SEQ_CST
+} memory_order;
+
+#define kill_dependency(y) (y)
+
+/* 7.17.4 Fences */
+
+/* These should be provided by the libc implementation. */
+void atomic_thread_fence(memory_order);
+void atomic_signal_fence(memory_order);
+
+#define atomic_thread_fence(order) __c11_atomic_thread_fence(order)
+#define atomic_signal_fence(order) __c11_atomic_signal_fence(order)
+
+/* 7.17.5 Lock-free property */
+
+#define atomic_is_lock_free(obj) __c11_atomic_is_lock_free(sizeof(*(obj)))
+
+/* 7.17.6 Atomic integer types */
+
+#ifdef __cplusplus
+typedef _Atomic(bool) atomic_bool;
+#else
+typedef _Atomic(_Bool) atomic_bool;
+#endif
+typedef _Atomic(char) atomic_char;
+typedef _Atomic(signed char) atomic_schar;
+typedef _Atomic(unsigned char) atomic_uchar;
+typedef _Atomic(short) atomic_short;
+typedef _Atomic(unsigned short) atomic_ushort;
+typedef _Atomic(int) atomic_int;
+typedef _Atomic(unsigned int) atomic_uint;
+typedef _Atomic(long) atomic_long;
+typedef _Atomic(unsigned long) atomic_ulong;
+typedef _Atomic(long long) atomic_llong;
+typedef _Atomic(unsigned long long) atomic_ullong;
+typedef _Atomic(uint_least16_t) atomic_char16_t;
+typedef _Atomic(uint_least32_t) atomic_char32_t;
+typedef _Atomic(wchar_t) atomic_wchar_t;
+typedef _Atomic(int_least8_t) atomic_int_least8_t;
+typedef _Atomic(uint_least8_t) atomic_uint_least8_t;
+typedef _Atomic(int_least16_t) atomic_int_least16_t;
+typedef _Atomic(uint_least16_t) atomic_uint_least16_t;
+typedef _Atomic(int_least32_t) atomic_int_least32_t;
+typedef _Atomic(uint_least32_t) atomic_uint_least32_t;
+typedef _Atomic(int_least64_t) atomic_int_least64_t;
+typedef _Atomic(uint_least64_t) atomic_uint_least64_t;
+typedef _Atomic(int_fast8_t) atomic_int_fast8_t;
+typedef _Atomic(uint_fast8_t) atomic_uint_fast8_t;
+typedef _Atomic(int_fast16_t) atomic_int_fast16_t;
+typedef _Atomic(uint_fast16_t) atomic_uint_fast16_t;
+typedef _Atomic(int_fast32_t) atomic_int_fast32_t;
+typedef _Atomic(uint_fast32_t) atomic_uint_fast32_t;
+typedef _Atomic(int_fast64_t) atomic_int_fast64_t;
+typedef _Atomic(uint_fast64_t) atomic_uint_fast64_t;
+typedef _Atomic(intptr_t) atomic_intptr_t;
+typedef _Atomic(uintptr_t) atomic_uintptr_t;
+typedef _Atomic(size_t) atomic_size_t;
+typedef _Atomic(ptrdiff_t) atomic_ptrdiff_t;
+typedef _Atomic(intmax_t) atomic_intmax_t;
+typedef _Atomic(uintmax_t) atomic_uintmax_t;
+
+/* 7.17.7 Operations on atomic types */
+
+#define atomic_store(object, desired) __c11_atomic_store(object, desired, __ATOMIC_SEQ_CST)
+#define atomic_store_explicit __c11_atomic_store
+
+#define atomic_load(object) __c11_atomic_load(object, __ATOMIC_SEQ_CST)
+#define atomic_load_explicit __c11_atomic_load
+
+#define atomic_exchange(object, desired) __c11_atomic_exchange(object, desired, __ATOMIC_SEQ_CST)
+#define atomic_exchange_explicit __c11_atomic_exchange
+
+#define atomic_compare_exchange_strong(object, expected, desired) __c11_atomic_compare_exchange_strong(object, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+#define atomic_compare_exchange_strong_explicit __c11_atomic_compare_exchange_strong
+
+#define atomic_compare_exchange_weak(object, expected, desired) __c11_atomic_compare_exchange_weak(object, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+#define atomic_compare_exchange_weak_explicit __c11_atomic_compare_exchange_weak
+
+#define atomic_fetch_add(object, operand) __c11_atomic_fetch_add(object, operand, __ATOMIC_SEQ_CST)
+#define atomic_fetch_add_explicit __c11_atomic_fetch_add
+
+#define atomic_fetch_sub(object, operand) __c11_atomic_fetch_sub(object, operand, __ATOMIC_SEQ_CST)
+#define atomic_fetch_sub_explicit __c11_atomic_fetch_sub
+
+#define atomic_fetch_or(object, operand) __c11_atomic_fetch_or(object, operand, __ATOMIC_SEQ_CST)
+#define atomic_fetch_or_explicit __c11_atomic_fetch_or
+
+#define atomic_fetch_xor(object, operand) __c11_atomic_fetch_xor(object, operand, __ATOMIC_SEQ_CST)
+#define atomic_fetch_xor_explicit __c11_atomic_fetch_xor
+
+#define atomic_fetch_and(object, operand) __c11_atomic_fetch_and(object, operand, __ATOMIC_SEQ_CST)
+#define atomic_fetch_and_explicit __c11_atomic_fetch_and
+
+/* 7.17.8 Atomic flag type and operations */
+
+typedef struct atomic_flag { atomic_bool _Value; } atomic_flag;
+
+#define ATOMIC_FLAG_INIT { 0 }
+
+/* These should be provided by the libc implementation. */
+#ifdef __cplusplus
+bool atomic_flag_test_and_set(volatile atomic_flag *);
+bool atomic_flag_test_and_set_explicit(volatile atomic_flag *, memory_order);
+#else
+_Bool atomic_flag_test_and_set(volatile atomic_flag *);
+_Bool atomic_flag_test_and_set_explicit(volatile atomic_flag *, memory_order);
+#endif
+void atomic_flag_clear(volatile atomic_flag *);
+void atomic_flag_clear_explicit(volatile atomic_flag *, memory_order);
+
+#define atomic_flag_test_and_set(object) __c11_atomic_exchange(&(object)->_Value, 1, __ATOMIC_SEQ_CST)
+#define atomic_flag_test_and_set_explicit(object, order) __c11_atomic_exchange(&(object)->_Value, 1, order)
+
+#define atomic_flag_clear(object) __c11_atomic_store(&(object)->_Value, 0, __ATOMIC_SEQ_CST)
+#define atomic_flag_clear_explicit(object, order) __c11_atomic_store(&(object)->_Value, 0, order)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __STDC_HOSTED__ */
+#endif /* __CLANG_STDATOMIC_H */
+
diff --git a/contrib/llvm/tools/clang/lib/Headers/stdbool.h b/contrib/llvm/tools/clang/lib/Headers/stdbool.h
new file mode 100644
index 0000000..0467893
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/stdbool.h
@@ -0,0 +1,44 @@
+/*===---- stdbool.h - Standard header for booleans -------------------------===
+ *
+ * Copyright (c) 2008 Eli Friedman
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __STDBOOL_H
+#define __STDBOOL_H
+
+/* Don't define bool, true, and false in C++, except as a GNU extension. */
+#ifndef __cplusplus
+#define bool _Bool
+#define true 1
+#define false 0
+#elif defined(__GNUC__) && !defined(__STRICT_ANSI__)
+/* Define _Bool, bool, false, true as a GNU extension. */
+#define _Bool bool
+#define bool bool
+#define false false
+#define true true
+#endif
+
+#define __bool_true_false_are_defined 1
+
+#endif /* __STDBOOL_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/stddef.h b/contrib/llvm/tools/clang/lib/Headers/stddef.h
new file mode 100644
index 0000000..7354996
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/stddef.h
@@ -0,0 +1,137 @@
+/*===---- stddef.h - Basic type definitions --------------------------------===
+ *
+ * Copyright (c) 2008 Eli Friedman
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined(__STDDEF_H) || defined(__need_ptrdiff_t) || \
+ defined(__need_size_t) || defined(__need_wchar_t) || \
+ defined(__need_NULL) || defined(__need_wint_t)
+
+#if !defined(__need_ptrdiff_t) && !defined(__need_size_t) && \
+ !defined(__need_wchar_t) && !defined(__need_NULL) && \
+ !defined(__need_wint_t)
+/* Always define miscellaneous pieces when modules are available. */
+#if !__has_feature(modules)
+#define __STDDEF_H
+#endif
+#define __need_ptrdiff_t
+#define __need_size_t
+#define __need_wchar_t
+#define __need_NULL
+#define __need_STDDEF_H_misc
+/* __need_wint_t is intentionally not defined here. */
+#endif
+
+#if defined(__need_ptrdiff_t)
+#if !defined(_PTRDIFF_T) || __has_feature(modules)
+/* Always define ptrdiff_t when modules are available. */
+#if !__has_feature(modules)
+#define _PTRDIFF_T
+#endif
+typedef __PTRDIFF_TYPE__ ptrdiff_t;
+#endif
+#undef __need_ptrdiff_t
+#endif /* defined(__need_ptrdiff_t) */
+
+#if defined(__need_size_t)
+#if !defined(_SIZE_T) || __has_feature(modules)
+/* Always define size_t when modules are available. */
+#if !__has_feature(modules)
+#define _SIZE_T
+#endif
+typedef __SIZE_TYPE__ size_t;
+#endif
+#undef __need_size_t
+#endif /*defined(__need_size_t) */
+
+#if defined(__need_STDDEF_H_misc)
+/* ISO9899:2011 7.20 (C11 Annex K): Define rsize_t if __STDC_WANT_LIB_EXT1__ is
+ * enabled. */
+#if (defined(__STDC_WANT_LIB_EXT1__) && __STDC_WANT_LIB_EXT1__ >= 1 && \
+ !defined(_RSIZE_T)) || __has_feature(modules)
+/* Always define rsize_t when modules are available. */
+#if !__has_feature(modules)
+#define _RSIZE_T
+#endif
+typedef __SIZE_TYPE__ rsize_t;
+#endif
+#endif /* defined(__need_STDDEF_H_misc) */
+
+#if defined(__need_wchar_t)
+#ifndef __cplusplus
+/* Always define wchar_t when modules are available. */
+#if !defined(_WCHAR_T) || __has_feature(modules)
+#if !__has_feature(modules)
+#define _WCHAR_T
+#if defined(_MSC_EXTENSIONS)
+#define _WCHAR_T_DEFINED
+#endif
+#endif
+typedef __WCHAR_TYPE__ wchar_t;
+#endif
+#endif
+#undef __need_wchar_t
+#endif /* defined(__need_wchar_t) */
+
+#if defined(__need_NULL)
+#undef NULL
+#ifdef __cplusplus
+# if !defined(__MINGW32__) && !defined(_MSC_VER)
+# define NULL __null
+# else
+# define NULL 0
+# endif
+#else
+# define NULL ((void*)0)
+#endif
+#ifdef __cplusplus
+#if defined(_MSC_EXTENSIONS) && defined(_NATIVE_NULLPTR_SUPPORTED)
+namespace std { typedef decltype(nullptr) nullptr_t; }
+using ::std::nullptr_t;
+#endif
+#endif
+#undef __need_NULL
+#endif /* defined(__need_NULL) */
+
+#if defined(__need_STDDEF_H_misc)
+#if __STDC_VERSION__ >= 201112L || __cplusplus >= 201103L
+#include "__stddef_max_align_t.h"
+#endif
+#define offsetof(t, d) __builtin_offsetof(t, d)
+#undef __need_STDDEF_H_misc
+#endif /* defined(__need_STDDEF_H_misc) */
+
+/* Some C libraries expect to see a wint_t here. Others (notably MinGW) will use
+__WINT_TYPE__ directly; accommodate both by requiring __need_wint_t */
+#if defined(__need_wint_t)
+/* Always define wint_t when modules are available. */
+#if !defined(_WINT_T) || __has_feature(modules)
+#if !__has_feature(modules)
+#define _WINT_T
+#endif
+typedef __WINT_TYPE__ wint_t;
+#endif
+#undef __need_wint_t
+#endif /* __need_wint_t */
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Headers/stdint.h b/contrib/llvm/tools/clang/lib/Headers/stdint.h
new file mode 100644
index 0000000..3f2fcbc
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/stdint.h
@@ -0,0 +1,707 @@
+/*===---- stdint.h - Standard header for sized integer types --------------===*\
+ *
+ * Copyright (c) 2009 Chris Lattner
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+\*===----------------------------------------------------------------------===*/
+
+#ifndef __CLANG_STDINT_H
+#define __CLANG_STDINT_H
+
+/* If we're hosted, fall back to the system's stdint.h, which might have
+ * additional definitions.
+ */
+#if __STDC_HOSTED__ && __has_include_next(<stdint.h>)
+
+// C99 7.18.3 Limits of other integer types
+//
+// Footnote 219, 220: C++ implementations should define these macros only when
+// __STDC_LIMIT_MACROS is defined before <stdint.h> is included.
+//
+// Footnote 222: C++ implementations should define these macros only when
+// __STDC_CONSTANT_MACROS is defined before <stdint.h> is included.
+//
+// C++11 [cstdint.syn]p2:
+//
+// The macros defined by <cstdint> are provided unconditionally. In particular,
+// the symbols __STDC_LIMIT_MACROS and __STDC_CONSTANT_MACROS (mentioned in
+// footnotes 219, 220, and 222 in the C standard) play no role in C++.
+//
+// C11 removed the problematic footnotes.
+//
+// Work around this inconsistency by always defining those macros in C++ mode,
+// so that a C library implementation which follows the C99 standard can be
+// used in C++.
+# ifdef __cplusplus
+# if !defined(__STDC_LIMIT_MACROS)
+# define __STDC_LIMIT_MACROS
+# define __STDC_LIMIT_MACROS_DEFINED_BY_CLANG
+# endif
+# if !defined(__STDC_CONSTANT_MACROS)
+# define __STDC_CONSTANT_MACROS
+# define __STDC_CONSTANT_MACROS_DEFINED_BY_CLANG
+# endif
+# endif
+
+# include_next <stdint.h>
+
+# ifdef __STDC_LIMIT_MACROS_DEFINED_BY_CLANG
+# undef __STDC_LIMIT_MACROS
+# undef __STDC_LIMIT_MACROS_DEFINED_BY_CLANG
+# endif
+# ifdef __STDC_CONSTANT_MACROS_DEFINED_BY_CLANG
+# undef __STDC_CONSTANT_MACROS
+# undef __STDC_CONSTANT_MACROS_DEFINED_BY_CLANG
+# endif
+
+#else
+
+/* C99 7.18.1.1 Exact-width integer types.
+ * C99 7.18.1.2 Minimum-width integer types.
+ * C99 7.18.1.3 Fastest minimum-width integer types.
+ *
+ * The standard requires that exact-width type be defined for 8-, 16-, 32-, and
+ * 64-bit types if they are implemented. Other exact width types are optional.
+ * This implementation defines an exact-width types for every integer width
+ * that is represented in the standard integer types.
+ *
+ * The standard also requires minimum-width types be defined for 8-, 16-, 32-,
+ * and 64-bit widths regardless of whether there are corresponding exact-width
+ * types.
+ *
+ * To accommodate targets that are missing types that are exactly 8, 16, 32, or
+ * 64 bits wide, this implementation takes an approach of cascading
+ * redefintions, redefining __int_leastN_t to successively smaller exact-width
+ * types. It is therefore important that the types are defined in order of
+ * descending widths.
+ *
+ * We currently assume that the minimum-width types and the fastest
+ * minimum-width types are the same. This is allowed by the standard, but is
+ * suboptimal.
+ *
+ * In violation of the standard, some targets do not implement a type that is
+ * wide enough to represent all of the required widths (8-, 16-, 32-, 64-bit).
+ * To accommodate these targets, a required minimum-width type is only
+ * defined if there exists an exact-width type of equal or greater width.
+ */
+
+#ifdef __INT64_TYPE__
+# ifndef __int8_t_defined /* glibc sys/types.h also defines int64_t*/
+typedef __INT64_TYPE__ int64_t;
+# endif /* __int8_t_defined */
+typedef __UINT64_TYPE__ uint64_t;
+# define __int_least64_t int64_t
+# define __uint_least64_t uint64_t
+# define __int_least32_t int64_t
+# define __uint_least32_t uint64_t
+# define __int_least16_t int64_t
+# define __uint_least16_t uint64_t
+# define __int_least8_t int64_t
+# define __uint_least8_t uint64_t
+#endif /* __INT64_TYPE__ */
+
+#ifdef __int_least64_t
+typedef __int_least64_t int_least64_t;
+typedef __uint_least64_t uint_least64_t;
+typedef __int_least64_t int_fast64_t;
+typedef __uint_least64_t uint_fast64_t;
+#endif /* __int_least64_t */
+
+#ifdef __INT56_TYPE__
+typedef __INT56_TYPE__ int56_t;
+typedef __UINT56_TYPE__ uint56_t;
+typedef int56_t int_least56_t;
+typedef uint56_t uint_least56_t;
+typedef int56_t int_fast56_t;
+typedef uint56_t uint_fast56_t;
+# define __int_least32_t int56_t
+# define __uint_least32_t uint56_t
+# define __int_least16_t int56_t
+# define __uint_least16_t uint56_t
+# define __int_least8_t int56_t
+# define __uint_least8_t uint56_t
+#endif /* __INT56_TYPE__ */
+
+
+#ifdef __INT48_TYPE__
+typedef __INT48_TYPE__ int48_t;
+typedef __UINT48_TYPE__ uint48_t;
+typedef int48_t int_least48_t;
+typedef uint48_t uint_least48_t;
+typedef int48_t int_fast48_t;
+typedef uint48_t uint_fast48_t;
+# define __int_least32_t int48_t
+# define __uint_least32_t uint48_t
+# define __int_least16_t int48_t
+# define __uint_least16_t uint48_t
+# define __int_least8_t int48_t
+# define __uint_least8_t uint48_t
+#endif /* __INT48_TYPE__ */
+
+
+#ifdef __INT40_TYPE__
+typedef __INT40_TYPE__ int40_t;
+typedef __UINT40_TYPE__ uint40_t;
+typedef int40_t int_least40_t;
+typedef uint40_t uint_least40_t;
+typedef int40_t int_fast40_t;
+typedef uint40_t uint_fast40_t;
+# define __int_least32_t int40_t
+# define __uint_least32_t uint40_t
+# define __int_least16_t int40_t
+# define __uint_least16_t uint40_t
+# define __int_least8_t int40_t
+# define __uint_least8_t uint40_t
+#endif /* __INT40_TYPE__ */
+
+
+#ifdef __INT32_TYPE__
+
+# ifndef __int8_t_defined /* glibc sys/types.h also defines int32_t*/
+typedef __INT32_TYPE__ int32_t;
+# endif /* __int8_t_defined */
+
+# ifndef __uint32_t_defined /* more glibc compatibility */
+# define __uint32_t_defined
+typedef __UINT32_TYPE__ uint32_t;
+# endif /* __uint32_t_defined */
+
+# define __int_least32_t int32_t
+# define __uint_least32_t uint32_t
+# define __int_least16_t int32_t
+# define __uint_least16_t uint32_t
+# define __int_least8_t int32_t
+# define __uint_least8_t uint32_t
+#endif /* __INT32_TYPE__ */
+
+#ifdef __int_least32_t
+typedef __int_least32_t int_least32_t;
+typedef __uint_least32_t uint_least32_t;
+typedef __int_least32_t int_fast32_t;
+typedef __uint_least32_t uint_fast32_t;
+#endif /* __int_least32_t */
+
+#ifdef __INT24_TYPE__
+typedef __INT24_TYPE__ int24_t;
+typedef __UINT24_TYPE__ uint24_t;
+typedef int24_t int_least24_t;
+typedef uint24_t uint_least24_t;
+typedef int24_t int_fast24_t;
+typedef uint24_t uint_fast24_t;
+# define __int_least16_t int24_t
+# define __uint_least16_t uint24_t
+# define __int_least8_t int24_t
+# define __uint_least8_t uint24_t
+#endif /* __INT24_TYPE__ */
+
+#ifdef __INT16_TYPE__
+#ifndef __int8_t_defined /* glibc sys/types.h also defines int16_t*/
+typedef __INT16_TYPE__ int16_t;
+#endif /* __int8_t_defined */
+typedef __UINT16_TYPE__ uint16_t;
+# define __int_least16_t int16_t
+# define __uint_least16_t uint16_t
+# define __int_least8_t int16_t
+# define __uint_least8_t uint16_t
+#endif /* __INT16_TYPE__ */
+
+#ifdef __int_least16_t
+typedef __int_least16_t int_least16_t;
+typedef __uint_least16_t uint_least16_t;
+typedef __int_least16_t int_fast16_t;
+typedef __uint_least16_t uint_fast16_t;
+#endif /* __int_least16_t */
+
+
+#ifdef __INT8_TYPE__
+#ifndef __int8_t_defined /* glibc sys/types.h also defines int8_t*/
+typedef __INT8_TYPE__ int8_t;
+#endif /* __int8_t_defined */
+typedef __UINT8_TYPE__ uint8_t;
+# define __int_least8_t int8_t
+# define __uint_least8_t uint8_t
+#endif /* __INT8_TYPE__ */
+
+#ifdef __int_least8_t
+typedef __int_least8_t int_least8_t;
+typedef __uint_least8_t uint_least8_t;
+typedef __int_least8_t int_fast8_t;
+typedef __uint_least8_t uint_fast8_t;
+#endif /* __int_least8_t */
+
+/* prevent glibc sys/types.h from defining conflicting types */
+#ifndef __int8_t_defined
+# define __int8_t_defined
+#endif /* __int8_t_defined */
+
+/* C99 7.18.1.4 Integer types capable of holding object pointers.
+ */
+#define __stdint_join3(a,b,c) a ## b ## c
+
+#define __intn_t(n) __stdint_join3( int, n, _t)
+#define __uintn_t(n) __stdint_join3(uint, n, _t)
+
+#ifndef _INTPTR_T
+#ifndef __intptr_t_defined
+typedef __intn_t(__INTPTR_WIDTH__) intptr_t;
+#define __intptr_t_defined
+#define _INTPTR_T
+#endif
+#endif
+
+#ifndef _UINTPTR_T
+typedef __uintn_t(__INTPTR_WIDTH__) uintptr_t;
+#define _UINTPTR_T
+#endif
+
+/* C99 7.18.1.5 Greatest-width integer types.
+ */
+typedef __INTMAX_TYPE__ intmax_t;
+typedef __UINTMAX_TYPE__ uintmax_t;
+
+/* C99 7.18.4 Macros for minimum-width integer constants.
+ *
+ * The standard requires that integer constant macros be defined for all the
+ * minimum-width types defined above. As 8-, 16-, 32-, and 64-bit minimum-width
+ * types are required, the corresponding integer constant macros are defined
+ * here. This implementation also defines minimum-width types for every other
+ * integer width that the target implements, so corresponding macros are
+ * defined below, too.
+ *
+ * These macros are defined using the same successive-shrinking approach as
+ * the type definitions above. It is likewise important that macros are defined
+ * in order of decending width.
+ *
+ * Note that C++ should not check __STDC_CONSTANT_MACROS here, contrary to the
+ * claims of the C standard (see C++ 18.3.1p2, [cstdint.syn]).
+ */
+
+#define __int_c_join(a, b) a ## b
+#define __int_c(v, suffix) __int_c_join(v, suffix)
+#define __uint_c(v, suffix) __int_c_join(v##U, suffix)
+
+
+#ifdef __INT64_TYPE__
+# ifdef __INT64_C_SUFFIX__
+# define __int64_c_suffix __INT64_C_SUFFIX__
+# define __int32_c_suffix __INT64_C_SUFFIX__
+# define __int16_c_suffix __INT64_C_SUFFIX__
+# define __int8_c_suffix __INT64_C_SUFFIX__
+# else
+# undef __int64_c_suffix
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef __int8_c_suffix
+# endif /* __INT64_C_SUFFIX__ */
+#endif /* __INT64_TYPE__ */
+
+#ifdef __int_least64_t
+# ifdef __int64_c_suffix
+# define INT64_C(v) __int_c(v, __int64_c_suffix)
+# define UINT64_C(v) __uint_c(v, __int64_c_suffix)
+# else
+# define INT64_C(v) v
+# define UINT64_C(v) v ## U
+# endif /* __int64_c_suffix */
+#endif /* __int_least64_t */
+
+
+#ifdef __INT56_TYPE__
+# ifdef __INT56_C_SUFFIX__
+# define INT56_C(v) __int_c(v, __INT56_C_SUFFIX__)
+# define UINT56_C(v) __uint_c(v, __INT56_C_SUFFIX__)
+# define __int32_c_suffix __INT56_C_SUFFIX__
+# define __int16_c_suffix __INT56_C_SUFFIX__
+# define __int8_c_suffix __INT56_C_SUFFIX__
+# else
+# define INT56_C(v) v
+# define UINT56_C(v) v ## U
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef __int8_c_suffix
+# endif /* __INT56_C_SUFFIX__ */
+#endif /* __INT56_TYPE__ */
+
+
+#ifdef __INT48_TYPE__
+# ifdef __INT48_C_SUFFIX__
+# define INT48_C(v) __int_c(v, __INT48_C_SUFFIX__)
+# define UINT48_C(v) __uint_c(v, __INT48_C_SUFFIX__)
+# define __int32_c_suffix __INT48_C_SUFFIX__
+# define __int16_c_suffix __INT48_C_SUFFIX__
+# define __int8_c_suffix __INT48_C_SUFFIX__
+# else
+# define INT48_C(v) v
+# define UINT48_C(v) v ## U
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef __int8_c_suffix
+# endif /* __INT48_C_SUFFIX__ */
+#endif /* __INT48_TYPE__ */
+
+
+#ifdef __INT40_TYPE__
+# ifdef __INT40_C_SUFFIX__
+# define INT40_C(v) __int_c(v, __INT40_C_SUFFIX__)
+# define UINT40_C(v) __uint_c(v, __INT40_C_SUFFIX__)
+# define __int32_c_suffix __INT40_C_SUFFIX__
+# define __int16_c_suffix __INT40_C_SUFFIX__
+# define __int8_c_suffix __INT40_C_SUFFIX__
+# else
+# define INT40_C(v) v
+# define UINT40_C(v) v ## U
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef __int8_c_suffix
+# endif /* __INT40_C_SUFFIX__ */
+#endif /* __INT40_TYPE__ */
+
+
+#ifdef __INT32_TYPE__
+# ifdef __INT32_C_SUFFIX__
+# define __int32_c_suffix __INT32_C_SUFFIX__
+# define __int16_c_suffix __INT32_C_SUFFIX__
+# define __int8_c_suffix __INT32_C_SUFFIX__
+#else
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef __int8_c_suffix
+# endif /* __INT32_C_SUFFIX__ */
+#endif /* __INT32_TYPE__ */
+
+#ifdef __int_least32_t
+# ifdef __int32_c_suffix
+# define INT32_C(v) __int_c(v, __int32_c_suffix)
+# define UINT32_C(v) __uint_c(v, __int32_c_suffix)
+# else
+# define INT32_C(v) v
+# define UINT32_C(v) v ## U
+# endif /* __int32_c_suffix */
+#endif /* __int_least32_t */
+
+
+#ifdef __INT24_TYPE__
+# ifdef __INT24_C_SUFFIX__
+# define INT24_C(v) __int_c(v, __INT24_C_SUFFIX__)
+# define UINT24_C(v) __uint_c(v, __INT24_C_SUFFIX__)
+# define __int16_c_suffix __INT24_C_SUFFIX__
+# define __int8_c_suffix __INT24_C_SUFFIX__
+# else
+# define INT24_C(v) v
+# define UINT24_C(v) v ## U
+# undef __int16_c_suffix
+# undef __int8_c_suffix
+# endif /* __INT24_C_SUFFIX__ */
+#endif /* __INT24_TYPE__ */
+
+
+#ifdef __INT16_TYPE__
+# ifdef __INT16_C_SUFFIX__
+# define __int16_c_suffix __INT16_C_SUFFIX__
+# define __int8_c_suffix __INT16_C_SUFFIX__
+#else
+# undef __int16_c_suffix
+# undef __int8_c_suffix
+# endif /* __INT16_C_SUFFIX__ */
+#endif /* __INT16_TYPE__ */
+
+#ifdef __int_least16_t
+# ifdef __int16_c_suffix
+# define INT16_C(v) __int_c(v, __int16_c_suffix)
+# define UINT16_C(v) __uint_c(v, __int16_c_suffix)
+# else
+# define INT16_C(v) v
+# define UINT16_C(v) v ## U
+# endif /* __int16_c_suffix */
+#endif /* __int_least16_t */
+
+
+#ifdef __INT8_TYPE__
+# ifdef __INT8_C_SUFFIX__
+# define __int8_c_suffix __INT8_C_SUFFIX__
+#else
+# undef __int8_c_suffix
+# endif /* __INT8_C_SUFFIX__ */
+#endif /* __INT8_TYPE__ */
+
+#ifdef __int_least8_t
+# ifdef __int8_c_suffix
+# define INT8_C(v) __int_c(v, __int8_c_suffix)
+# define UINT8_C(v) __uint_c(v, __int8_c_suffix)
+# else
+# define INT8_C(v) v
+# define UINT8_C(v) v ## U
+# endif /* __int8_c_suffix */
+#endif /* __int_least8_t */
+
+
+/* C99 7.18.2.1 Limits of exact-width integer types.
+ * C99 7.18.2.2 Limits of minimum-width integer types.
+ * C99 7.18.2.3 Limits of fastest minimum-width integer types.
+ *
+ * The presence of limit macros are completely optional in C99. This
+ * implementation defines limits for all of the types (exact- and
+ * minimum-width) that it defines above, using the limits of the minimum-width
+ * type for any types that do not have exact-width representations.
+ *
+ * As in the type definitions, this section takes an approach of
+ * successive-shrinking to determine which limits to use for the standard (8,
+ * 16, 32, 64) bit widths when they don't have exact representations. It is
+ * therefore important that the defintions be kept in order of decending
+ * widths.
+ *
+ * Note that C++ should not check __STDC_LIMIT_MACROS here, contrary to the
+ * claims of the C standard (see C++ 18.3.1p2, [cstdint.syn]).
+ */
+
+#ifdef __INT64_TYPE__
+# define INT64_MAX INT64_C( 9223372036854775807)
+# define INT64_MIN (-INT64_C( 9223372036854775807)-1)
+# define UINT64_MAX UINT64_C(18446744073709551615)
+# define __INT_LEAST64_MIN INT64_MIN
+# define __INT_LEAST64_MAX INT64_MAX
+# define __UINT_LEAST64_MAX UINT64_MAX
+# define __INT_LEAST32_MIN INT64_MIN
+# define __INT_LEAST32_MAX INT64_MAX
+# define __UINT_LEAST32_MAX UINT64_MAX
+# define __INT_LEAST16_MIN INT64_MIN
+# define __INT_LEAST16_MAX INT64_MAX
+# define __UINT_LEAST16_MAX UINT64_MAX
+# define __INT_LEAST8_MIN INT64_MIN
+# define __INT_LEAST8_MAX INT64_MAX
+# define __UINT_LEAST8_MAX UINT64_MAX
+#endif /* __INT64_TYPE__ */
+
+#ifdef __INT_LEAST64_MIN
+# define INT_LEAST64_MIN __INT_LEAST64_MIN
+# define INT_LEAST64_MAX __INT_LEAST64_MAX
+# define UINT_LEAST64_MAX __UINT_LEAST64_MAX
+# define INT_FAST64_MIN __INT_LEAST64_MIN
+# define INT_FAST64_MAX __INT_LEAST64_MAX
+# define UINT_FAST64_MAX __UINT_LEAST64_MAX
+#endif /* __INT_LEAST64_MIN */
+
+
+#ifdef __INT56_TYPE__
+# define INT56_MAX INT56_C(36028797018963967)
+# define INT56_MIN (-INT56_C(36028797018963967)-1)
+# define UINT56_MAX UINT56_C(72057594037927935)
+# define INT_LEAST56_MIN INT56_MIN
+# define INT_LEAST56_MAX INT56_MAX
+# define UINT_LEAST56_MAX UINT56_MAX
+# define INT_FAST56_MIN INT56_MIN
+# define INT_FAST56_MAX INT56_MAX
+# define UINT_FAST56_MAX UINT56_MAX
+# define __INT_LEAST32_MIN INT56_MIN
+# define __INT_LEAST32_MAX INT56_MAX
+# define __UINT_LEAST32_MAX UINT56_MAX
+# define __INT_LEAST16_MIN INT56_MIN
+# define __INT_LEAST16_MAX INT56_MAX
+# define __UINT_LEAST16_MAX UINT56_MAX
+# define __INT_LEAST8_MIN INT56_MIN
+# define __INT_LEAST8_MAX INT56_MAX
+# define __UINT_LEAST8_MAX UINT56_MAX
+#endif /* __INT56_TYPE__ */
+
+
+#ifdef __INT48_TYPE__
+# define INT48_MAX INT48_C(140737488355327)
+# define INT48_MIN (-INT48_C(140737488355327)-1)
+# define UINT48_MAX UINT48_C(281474976710655)
+# define INT_LEAST48_MIN INT48_MIN
+# define INT_LEAST48_MAX INT48_MAX
+# define UINT_LEAST48_MAX UINT48_MAX
+# define INT_FAST48_MIN INT48_MIN
+# define INT_FAST48_MAX INT48_MAX
+# define UINT_FAST48_MAX UINT48_MAX
+# define __INT_LEAST32_MIN INT48_MIN
+# define __INT_LEAST32_MAX INT48_MAX
+# define __UINT_LEAST32_MAX UINT48_MAX
+# define __INT_LEAST16_MIN INT48_MIN
+# define __INT_LEAST16_MAX INT48_MAX
+# define __UINT_LEAST16_MAX UINT48_MAX
+# define __INT_LEAST8_MIN INT48_MIN
+# define __INT_LEAST8_MAX INT48_MAX
+# define __UINT_LEAST8_MAX UINT48_MAX
+#endif /* __INT48_TYPE__ */
+
+
+#ifdef __INT40_TYPE__
+# define INT40_MAX INT40_C(549755813887)
+# define INT40_MIN (-INT40_C(549755813887)-1)
+# define UINT40_MAX UINT40_C(1099511627775)
+# define INT_LEAST40_MIN INT40_MIN
+# define INT_LEAST40_MAX INT40_MAX
+# define UINT_LEAST40_MAX UINT40_MAX
+# define INT_FAST40_MIN INT40_MIN
+# define INT_FAST40_MAX INT40_MAX
+# define UINT_FAST40_MAX UINT40_MAX
+# define __INT_LEAST32_MIN INT40_MIN
+# define __INT_LEAST32_MAX INT40_MAX
+# define __UINT_LEAST32_MAX UINT40_MAX
+# define __INT_LEAST16_MIN INT40_MIN
+# define __INT_LEAST16_MAX INT40_MAX
+# define __UINT_LEAST16_MAX UINT40_MAX
+# define __INT_LEAST8_MIN INT40_MIN
+# define __INT_LEAST8_MAX INT40_MAX
+# define __UINT_LEAST8_MAX UINT40_MAX
+#endif /* __INT40_TYPE__ */
+
+
+#ifdef __INT32_TYPE__
+# define INT32_MAX INT32_C(2147483647)
+# define INT32_MIN (-INT32_C(2147483647)-1)
+# define UINT32_MAX UINT32_C(4294967295)
+# define __INT_LEAST32_MIN INT32_MIN
+# define __INT_LEAST32_MAX INT32_MAX
+# define __UINT_LEAST32_MAX UINT32_MAX
+# define __INT_LEAST16_MIN INT32_MIN
+# define __INT_LEAST16_MAX INT32_MAX
+# define __UINT_LEAST16_MAX UINT32_MAX
+# define __INT_LEAST8_MIN INT32_MIN
+# define __INT_LEAST8_MAX INT32_MAX
+# define __UINT_LEAST8_MAX UINT32_MAX
+#endif /* __INT32_TYPE__ */
+
+#ifdef __INT_LEAST32_MIN
+# define INT_LEAST32_MIN __INT_LEAST32_MIN
+# define INT_LEAST32_MAX __INT_LEAST32_MAX
+# define UINT_LEAST32_MAX __UINT_LEAST32_MAX
+# define INT_FAST32_MIN __INT_LEAST32_MIN
+# define INT_FAST32_MAX __INT_LEAST32_MAX
+# define UINT_FAST32_MAX __UINT_LEAST32_MAX
+#endif /* __INT_LEAST32_MIN */
+
+
+#ifdef __INT24_TYPE__
+# define INT24_MAX INT24_C(8388607)
+# define INT24_MIN (-INT24_C(8388607)-1)
+# define UINT24_MAX UINT24_C(16777215)
+# define INT_LEAST24_MIN INT24_MIN
+# define INT_LEAST24_MAX INT24_MAX
+# define UINT_LEAST24_MAX UINT24_MAX
+# define INT_FAST24_MIN INT24_MIN
+# define INT_FAST24_MAX INT24_MAX
+# define UINT_FAST24_MAX UINT24_MAX
+# define __INT_LEAST16_MIN INT24_MIN
+# define __INT_LEAST16_MAX INT24_MAX
+# define __UINT_LEAST16_MAX UINT24_MAX
+# define __INT_LEAST8_MIN INT24_MIN
+# define __INT_LEAST8_MAX INT24_MAX
+# define __UINT_LEAST8_MAX UINT24_MAX
+#endif /* __INT24_TYPE__ */
+
+
+#ifdef __INT16_TYPE__
+#define INT16_MAX INT16_C(32767)
+#define INT16_MIN (-INT16_C(32767)-1)
+#define UINT16_MAX UINT16_C(65535)
+# define __INT_LEAST16_MIN INT16_MIN
+# define __INT_LEAST16_MAX INT16_MAX
+# define __UINT_LEAST16_MAX UINT16_MAX
+# define __INT_LEAST8_MIN INT16_MIN
+# define __INT_LEAST8_MAX INT16_MAX
+# define __UINT_LEAST8_MAX UINT16_MAX
+#endif /* __INT16_TYPE__ */
+
+#ifdef __INT_LEAST16_MIN
+# define INT_LEAST16_MIN __INT_LEAST16_MIN
+# define INT_LEAST16_MAX __INT_LEAST16_MAX
+# define UINT_LEAST16_MAX __UINT_LEAST16_MAX
+# define INT_FAST16_MIN __INT_LEAST16_MIN
+# define INT_FAST16_MAX __INT_LEAST16_MAX
+# define UINT_FAST16_MAX __UINT_LEAST16_MAX
+#endif /* __INT_LEAST16_MIN */
+
+
+#ifdef __INT8_TYPE__
+# define INT8_MAX INT8_C(127)
+# define INT8_MIN (-INT8_C(127)-1)
+# define UINT8_MAX UINT8_C(255)
+# define __INT_LEAST8_MIN INT8_MIN
+# define __INT_LEAST8_MAX INT8_MAX
+# define __UINT_LEAST8_MAX UINT8_MAX
+#endif /* __INT8_TYPE__ */
+
+#ifdef __INT_LEAST8_MIN
+# define INT_LEAST8_MIN __INT_LEAST8_MIN
+# define INT_LEAST8_MAX __INT_LEAST8_MAX
+# define UINT_LEAST8_MAX __UINT_LEAST8_MAX
+# define INT_FAST8_MIN __INT_LEAST8_MIN
+# define INT_FAST8_MAX __INT_LEAST8_MAX
+# define UINT_FAST8_MAX __UINT_LEAST8_MAX
+#endif /* __INT_LEAST8_MIN */
+
+/* Some utility macros */
+#define __INTN_MIN(n) __stdint_join3( INT, n, _MIN)
+#define __INTN_MAX(n) __stdint_join3( INT, n, _MAX)
+#define __UINTN_MAX(n) __stdint_join3(UINT, n, _MAX)
+#define __INTN_C(n, v) __stdint_join3( INT, n, _C(v))
+#define __UINTN_C(n, v) __stdint_join3(UINT, n, _C(v))
+
+/* C99 7.18.2.4 Limits of integer types capable of holding object pointers. */
+/* C99 7.18.3 Limits of other integer types. */
+
+#define INTPTR_MIN __INTN_MIN(__INTPTR_WIDTH__)
+#define INTPTR_MAX __INTN_MAX(__INTPTR_WIDTH__)
+#define UINTPTR_MAX __UINTN_MAX(__INTPTR_WIDTH__)
+#define PTRDIFF_MIN __INTN_MIN(__PTRDIFF_WIDTH__)
+#define PTRDIFF_MAX __INTN_MAX(__PTRDIFF_WIDTH__)
+#define SIZE_MAX __UINTN_MAX(__SIZE_WIDTH__)
+
+/* ISO9899:2011 7.20 (C11 Annex K): Define RSIZE_MAX if __STDC_WANT_LIB_EXT1__
+ * is enabled. */
+#if defined(__STDC_WANT_LIB_EXT1__) && __STDC_WANT_LIB_EXT1__ >= 1
+#define RSIZE_MAX (SIZE_MAX >> 1)
+#endif
+
+/* C99 7.18.2.5 Limits of greatest-width integer types. */
+#define INTMAX_MIN __INTN_MIN(__INTMAX_WIDTH__)
+#define INTMAX_MAX __INTN_MAX(__INTMAX_WIDTH__)
+#define UINTMAX_MAX __UINTN_MAX(__INTMAX_WIDTH__)
+
+/* C99 7.18.3 Limits of other integer types. */
+#define SIG_ATOMIC_MIN __INTN_MIN(__SIG_ATOMIC_WIDTH__)
+#define SIG_ATOMIC_MAX __INTN_MAX(__SIG_ATOMIC_WIDTH__)
+#ifdef __WINT_UNSIGNED__
+# define WINT_MIN __UINTN_C(__WINT_WIDTH__, 0)
+# define WINT_MAX __UINTN_MAX(__WINT_WIDTH__)
+#else
+# define WINT_MIN __INTN_MIN(__WINT_WIDTH__)
+# define WINT_MAX __INTN_MAX(__WINT_WIDTH__)
+#endif
+
+#ifndef WCHAR_MAX
+# define WCHAR_MAX __WCHAR_MAX__
+#endif
+#ifndef WCHAR_MIN
+# if __WCHAR_MAX__ == __INTN_MAX(__WCHAR_WIDTH__)
+# define WCHAR_MIN __INTN_MIN(__WCHAR_WIDTH__)
+# else
+# define WCHAR_MIN __UINTN_C(__WCHAR_WIDTH__, 0)
+# endif
+#endif
+
+/* 7.18.4.2 Macros for greatest-width integer constants. */
+#define INTMAX_C(v) __INTN_C(__INTMAX_WIDTH__, v)
+#define UINTMAX_C(v) __UINTN_C(__INTMAX_WIDTH__, v)
+
+#endif /* __STDC_HOSTED__ */
+#endif /* __CLANG_STDINT_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/stdnoreturn.h b/contrib/llvm/tools/clang/lib/Headers/stdnoreturn.h
new file mode 100644
index 0000000..a7a301d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/stdnoreturn.h
@@ -0,0 +1,30 @@
+/*===---- stdnoreturn.h - Standard header for noreturn macro ---------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __STDNORETURN_H
+#define __STDNORETURN_H
+
+#define noreturn _Noreturn
+#define __noreturn_is_defined 1
+
+#endif /* __STDNORETURN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/tbmintrin.h b/contrib/llvm/tools/clang/lib/Headers/tbmintrin.h
new file mode 100644
index 0000000..785961c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/tbmintrin.h
@@ -0,0 +1,154 @@
+/*===---- tbmintrin.h - TBM intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#error "Never use <tbmintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __TBMINTRIN_H
+#define __TBMINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("tbm")))
+
+#define __bextri_u32(a, b) \
+ ((unsigned int)__builtin_ia32_bextri_u32((unsigned int)(a), \
+ (unsigned int)(b)))
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blcfill_u32(unsigned int a)
+{
+ return a & (a + 1);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blci_u32(unsigned int a)
+{
+ return a | ~(a + 1);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blcic_u32(unsigned int a)
+{
+ return ~a & (a + 1);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blcmsk_u32(unsigned int a)
+{
+ return a ^ (a + 1);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blcs_u32(unsigned int a)
+{
+ return a | (a + 1);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blsfill_u32(unsigned int a)
+{
+ return a | (a - 1);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__blsic_u32(unsigned int a)
+{
+ return ~a | (a - 1);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__t1mskc_u32(unsigned int a)
+{
+ return ~a | (a + 1);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+__tzmsk_u32(unsigned int a)
+{
+ return ~a & (a - 1);
+}
+
+#ifdef __x86_64__
+#define __bextri_u64(a, b) \
+ ((unsigned long long)__builtin_ia32_bextri_u64((unsigned long long)(a), \
+ (unsigned long long)(b)))
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blcfill_u64(unsigned long long a)
+{
+ return a & (a + 1);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blci_u64(unsigned long long a)
+{
+ return a | ~(a + 1);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blcic_u64(unsigned long long a)
+{
+ return ~a & (a + 1);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blcmsk_u64(unsigned long long a)
+{
+ return a ^ (a + 1);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blcs_u64(unsigned long long a)
+{
+ return a | (a + 1);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blsfill_u64(unsigned long long a)
+{
+ return a | (a - 1);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__blsic_u64(unsigned long long a)
+{
+ return ~a | (a - 1);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__t1mskc_u64(unsigned long long a)
+{
+ return ~a | (a + 1);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__tzmsk_u64(unsigned long long a)
+{
+ return ~a & (a - 1);
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __TBMINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/tgmath.h b/contrib/llvm/tools/clang/lib/Headers/tgmath.h
new file mode 100644
index 0000000..318e118
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/tgmath.h
@@ -0,0 +1,1374 @@
+/*===---- tgmath.h - Standard header for type generic math ----------------===*\
+ *
+ * Copyright (c) 2009 Howard Hinnant
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+\*===----------------------------------------------------------------------===*/
+
+#ifndef __TGMATH_H
+#define __TGMATH_H
+
+/* C99 7.22 Type-generic math <tgmath.h>. */
+#include <math.h>
+
+/* C++ handles type genericity with overloading in math.h. */
+#ifndef __cplusplus
+#include <complex.h>
+
+#define _TG_ATTRSp __attribute__((__overloadable__))
+#define _TG_ATTRS __attribute__((__overloadable__, __always_inline__))
+
+// promotion
+
+typedef void _Argument_type_is_not_arithmetic;
+static _Argument_type_is_not_arithmetic __tg_promote(...)
+ __attribute__((__unavailable__,__overloadable__));
+static double _TG_ATTRSp __tg_promote(int);
+static double _TG_ATTRSp __tg_promote(unsigned int);
+static double _TG_ATTRSp __tg_promote(long);
+static double _TG_ATTRSp __tg_promote(unsigned long);
+static double _TG_ATTRSp __tg_promote(long long);
+static double _TG_ATTRSp __tg_promote(unsigned long long);
+static float _TG_ATTRSp __tg_promote(float);
+static double _TG_ATTRSp __tg_promote(double);
+static long double _TG_ATTRSp __tg_promote(long double);
+static float _Complex _TG_ATTRSp __tg_promote(float _Complex);
+static double _Complex _TG_ATTRSp __tg_promote(double _Complex);
+static long double _Complex _TG_ATTRSp __tg_promote(long double _Complex);
+
+#define __tg_promote1(__x) (__typeof__(__tg_promote(__x)))
+#define __tg_promote2(__x, __y) (__typeof__(__tg_promote(__x) + \
+ __tg_promote(__y)))
+#define __tg_promote3(__x, __y, __z) (__typeof__(__tg_promote(__x) + \
+ __tg_promote(__y) + \
+ __tg_promote(__z)))
+
+// acos
+
+static float
+ _TG_ATTRS
+ __tg_acos(float __x) {return acosf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_acos(double __x) {return acos(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_acos(long double __x) {return acosl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_acos(float _Complex __x) {return cacosf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_acos(double _Complex __x) {return cacos(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_acos(long double _Complex __x) {return cacosl(__x);}
+
+#undef acos
+#define acos(__x) __tg_acos(__tg_promote1((__x))(__x))
+
+// asin
+
+static float
+ _TG_ATTRS
+ __tg_asin(float __x) {return asinf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_asin(double __x) {return asin(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_asin(long double __x) {return asinl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_asin(float _Complex __x) {return casinf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_asin(double _Complex __x) {return casin(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_asin(long double _Complex __x) {return casinl(__x);}
+
+#undef asin
+#define asin(__x) __tg_asin(__tg_promote1((__x))(__x))
+
+// atan
+
+static float
+ _TG_ATTRS
+ __tg_atan(float __x) {return atanf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_atan(double __x) {return atan(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_atan(long double __x) {return atanl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_atan(float _Complex __x) {return catanf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_atan(double _Complex __x) {return catan(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_atan(long double _Complex __x) {return catanl(__x);}
+
+#undef atan
+#define atan(__x) __tg_atan(__tg_promote1((__x))(__x))
+
+// acosh
+
+static float
+ _TG_ATTRS
+ __tg_acosh(float __x) {return acoshf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_acosh(double __x) {return acosh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_acosh(long double __x) {return acoshl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_acosh(float _Complex __x) {return cacoshf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_acosh(double _Complex __x) {return cacosh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_acosh(long double _Complex __x) {return cacoshl(__x);}
+
+#undef acosh
+#define acosh(__x) __tg_acosh(__tg_promote1((__x))(__x))
+
+// asinh
+
+static float
+ _TG_ATTRS
+ __tg_asinh(float __x) {return asinhf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_asinh(double __x) {return asinh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_asinh(long double __x) {return asinhl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_asinh(float _Complex __x) {return casinhf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_asinh(double _Complex __x) {return casinh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_asinh(long double _Complex __x) {return casinhl(__x);}
+
+#undef asinh
+#define asinh(__x) __tg_asinh(__tg_promote1((__x))(__x))
+
+// atanh
+
+static float
+ _TG_ATTRS
+ __tg_atanh(float __x) {return atanhf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_atanh(double __x) {return atanh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_atanh(long double __x) {return atanhl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_atanh(float _Complex __x) {return catanhf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_atanh(double _Complex __x) {return catanh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_atanh(long double _Complex __x) {return catanhl(__x);}
+
+#undef atanh
+#define atanh(__x) __tg_atanh(__tg_promote1((__x))(__x))
+
+// cos
+
+static float
+ _TG_ATTRS
+ __tg_cos(float __x) {return cosf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_cos(double __x) {return cos(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_cos(long double __x) {return cosl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_cos(float _Complex __x) {return ccosf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_cos(double _Complex __x) {return ccos(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_cos(long double _Complex __x) {return ccosl(__x);}
+
+#undef cos
+#define cos(__x) __tg_cos(__tg_promote1((__x))(__x))
+
+// sin
+
+static float
+ _TG_ATTRS
+ __tg_sin(float __x) {return sinf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_sin(double __x) {return sin(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_sin(long double __x) {return sinl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_sin(float _Complex __x) {return csinf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_sin(double _Complex __x) {return csin(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_sin(long double _Complex __x) {return csinl(__x);}
+
+#undef sin
+#define sin(__x) __tg_sin(__tg_promote1((__x))(__x))
+
+// tan
+
+static float
+ _TG_ATTRS
+ __tg_tan(float __x) {return tanf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_tan(double __x) {return tan(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_tan(long double __x) {return tanl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_tan(float _Complex __x) {return ctanf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_tan(double _Complex __x) {return ctan(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_tan(long double _Complex __x) {return ctanl(__x);}
+
+#undef tan
+#define tan(__x) __tg_tan(__tg_promote1((__x))(__x))
+
+// cosh
+
+static float
+ _TG_ATTRS
+ __tg_cosh(float __x) {return coshf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_cosh(double __x) {return cosh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_cosh(long double __x) {return coshl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_cosh(float _Complex __x) {return ccoshf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_cosh(double _Complex __x) {return ccosh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_cosh(long double _Complex __x) {return ccoshl(__x);}
+
+#undef cosh
+#define cosh(__x) __tg_cosh(__tg_promote1((__x))(__x))
+
+// sinh
+
+static float
+ _TG_ATTRS
+ __tg_sinh(float __x) {return sinhf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_sinh(double __x) {return sinh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_sinh(long double __x) {return sinhl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_sinh(float _Complex __x) {return csinhf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_sinh(double _Complex __x) {return csinh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_sinh(long double _Complex __x) {return csinhl(__x);}
+
+#undef sinh
+#define sinh(__x) __tg_sinh(__tg_promote1((__x))(__x))
+
+// tanh
+
+static float
+ _TG_ATTRS
+ __tg_tanh(float __x) {return tanhf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_tanh(double __x) {return tanh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_tanh(long double __x) {return tanhl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_tanh(float _Complex __x) {return ctanhf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_tanh(double _Complex __x) {return ctanh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_tanh(long double _Complex __x) {return ctanhl(__x);}
+
+#undef tanh
+#define tanh(__x) __tg_tanh(__tg_promote1((__x))(__x))
+
+// exp
+
+static float
+ _TG_ATTRS
+ __tg_exp(float __x) {return expf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_exp(double __x) {return exp(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_exp(long double __x) {return expl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_exp(float _Complex __x) {return cexpf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_exp(double _Complex __x) {return cexp(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_exp(long double _Complex __x) {return cexpl(__x);}
+
+#undef exp
+#define exp(__x) __tg_exp(__tg_promote1((__x))(__x))
+
+// log
+
+static float
+ _TG_ATTRS
+ __tg_log(float __x) {return logf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_log(double __x) {return log(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_log(long double __x) {return logl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_log(float _Complex __x) {return clogf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_log(double _Complex __x) {return clog(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_log(long double _Complex __x) {return clogl(__x);}
+
+#undef log
+#define log(__x) __tg_log(__tg_promote1((__x))(__x))
+
+// pow
+
+static float
+ _TG_ATTRS
+ __tg_pow(float __x, float __y) {return powf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_pow(double __x, double __y) {return pow(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_pow(long double __x, long double __y) {return powl(__x, __y);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_pow(float _Complex __x, float _Complex __y) {return cpowf(__x, __y);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_pow(double _Complex __x, double _Complex __y) {return cpow(__x, __y);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_pow(long double _Complex __x, long double _Complex __y)
+ {return cpowl(__x, __y);}
+
+#undef pow
+#define pow(__x, __y) __tg_pow(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// sqrt
+
+static float
+ _TG_ATTRS
+ __tg_sqrt(float __x) {return sqrtf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_sqrt(double __x) {return sqrt(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_sqrt(long double __x) {return sqrtl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_sqrt(float _Complex __x) {return csqrtf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_sqrt(double _Complex __x) {return csqrt(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_sqrt(long double _Complex __x) {return csqrtl(__x);}
+
+#undef sqrt
+#define sqrt(__x) __tg_sqrt(__tg_promote1((__x))(__x))
+
+// fabs
+
+static float
+ _TG_ATTRS
+ __tg_fabs(float __x) {return fabsf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_fabs(double __x) {return fabs(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_fabs(long double __x) {return fabsl(__x);}
+
+static float
+ _TG_ATTRS
+ __tg_fabs(float _Complex __x) {return cabsf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_fabs(double _Complex __x) {return cabs(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_fabs(long double _Complex __x) {return cabsl(__x);}
+
+#undef fabs
+#define fabs(__x) __tg_fabs(__tg_promote1((__x))(__x))
+
+// atan2
+
+static float
+ _TG_ATTRS
+ __tg_atan2(float __x, float __y) {return atan2f(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_atan2(double __x, double __y) {return atan2(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_atan2(long double __x, long double __y) {return atan2l(__x, __y);}
+
+#undef atan2
+#define atan2(__x, __y) __tg_atan2(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// cbrt
+
+static float
+ _TG_ATTRS
+ __tg_cbrt(float __x) {return cbrtf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_cbrt(double __x) {return cbrt(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_cbrt(long double __x) {return cbrtl(__x);}
+
+#undef cbrt
+#define cbrt(__x) __tg_cbrt(__tg_promote1((__x))(__x))
+
+// ceil
+
+static float
+ _TG_ATTRS
+ __tg_ceil(float __x) {return ceilf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_ceil(double __x) {return ceil(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_ceil(long double __x) {return ceill(__x);}
+
+#undef ceil
+#define ceil(__x) __tg_ceil(__tg_promote1((__x))(__x))
+
+// copysign
+
+static float
+ _TG_ATTRS
+ __tg_copysign(float __x, float __y) {return copysignf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_copysign(double __x, double __y) {return copysign(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_copysign(long double __x, long double __y) {return copysignl(__x, __y);}
+
+#undef copysign
+#define copysign(__x, __y) __tg_copysign(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// erf
+
+static float
+ _TG_ATTRS
+ __tg_erf(float __x) {return erff(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_erf(double __x) {return erf(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_erf(long double __x) {return erfl(__x);}
+
+#undef erf
+#define erf(__x) __tg_erf(__tg_promote1((__x))(__x))
+
+// erfc
+
+static float
+ _TG_ATTRS
+ __tg_erfc(float __x) {return erfcf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_erfc(double __x) {return erfc(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_erfc(long double __x) {return erfcl(__x);}
+
+#undef erfc
+#define erfc(__x) __tg_erfc(__tg_promote1((__x))(__x))
+
+// exp2
+
+static float
+ _TG_ATTRS
+ __tg_exp2(float __x) {return exp2f(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_exp2(double __x) {return exp2(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_exp2(long double __x) {return exp2l(__x);}
+
+#undef exp2
+#define exp2(__x) __tg_exp2(__tg_promote1((__x))(__x))
+
+// expm1
+
+static float
+ _TG_ATTRS
+ __tg_expm1(float __x) {return expm1f(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_expm1(double __x) {return expm1(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_expm1(long double __x) {return expm1l(__x);}
+
+#undef expm1
+#define expm1(__x) __tg_expm1(__tg_promote1((__x))(__x))
+
+// fdim
+
+static float
+ _TG_ATTRS
+ __tg_fdim(float __x, float __y) {return fdimf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_fdim(double __x, double __y) {return fdim(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_fdim(long double __x, long double __y) {return fdiml(__x, __y);}
+
+#undef fdim
+#define fdim(__x, __y) __tg_fdim(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// floor
+
+static float
+ _TG_ATTRS
+ __tg_floor(float __x) {return floorf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_floor(double __x) {return floor(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_floor(long double __x) {return floorl(__x);}
+
+#undef floor
+#define floor(__x) __tg_floor(__tg_promote1((__x))(__x))
+
+// fma
+
+static float
+ _TG_ATTRS
+ __tg_fma(float __x, float __y, float __z)
+ {return fmaf(__x, __y, __z);}
+
+static double
+ _TG_ATTRS
+ __tg_fma(double __x, double __y, double __z)
+ {return fma(__x, __y, __z);}
+
+static long double
+ _TG_ATTRS
+ __tg_fma(long double __x,long double __y, long double __z)
+ {return fmal(__x, __y, __z);}
+
+#undef fma
+#define fma(__x, __y, __z) \
+ __tg_fma(__tg_promote3((__x), (__y), (__z))(__x), \
+ __tg_promote3((__x), (__y), (__z))(__y), \
+ __tg_promote3((__x), (__y), (__z))(__z))
+
+// fmax
+
+static float
+ _TG_ATTRS
+ __tg_fmax(float __x, float __y) {return fmaxf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_fmax(double __x, double __y) {return fmax(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_fmax(long double __x, long double __y) {return fmaxl(__x, __y);}
+
+#undef fmax
+#define fmax(__x, __y) __tg_fmax(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// fmin
+
+static float
+ _TG_ATTRS
+ __tg_fmin(float __x, float __y) {return fminf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_fmin(double __x, double __y) {return fmin(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_fmin(long double __x, long double __y) {return fminl(__x, __y);}
+
+#undef fmin
+#define fmin(__x, __y) __tg_fmin(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// fmod
+
+static float
+ _TG_ATTRS
+ __tg_fmod(float __x, float __y) {return fmodf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_fmod(double __x, double __y) {return fmod(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_fmod(long double __x, long double __y) {return fmodl(__x, __y);}
+
+#undef fmod
+#define fmod(__x, __y) __tg_fmod(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// frexp
+
+static float
+ _TG_ATTRS
+ __tg_frexp(float __x, int* __y) {return frexpf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_frexp(double __x, int* __y) {return frexp(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_frexp(long double __x, int* __y) {return frexpl(__x, __y);}
+
+#undef frexp
+#define frexp(__x, __y) __tg_frexp(__tg_promote1((__x))(__x), __y)
+
+// hypot
+
+static float
+ _TG_ATTRS
+ __tg_hypot(float __x, float __y) {return hypotf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_hypot(double __x, double __y) {return hypot(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_hypot(long double __x, long double __y) {return hypotl(__x, __y);}
+
+#undef hypot
+#define hypot(__x, __y) __tg_hypot(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// ilogb
+
+static int
+ _TG_ATTRS
+ __tg_ilogb(float __x) {return ilogbf(__x);}
+
+static int
+ _TG_ATTRS
+ __tg_ilogb(double __x) {return ilogb(__x);}
+
+static int
+ _TG_ATTRS
+ __tg_ilogb(long double __x) {return ilogbl(__x);}
+
+#undef ilogb
+#define ilogb(__x) __tg_ilogb(__tg_promote1((__x))(__x))
+
+// ldexp
+
+static float
+ _TG_ATTRS
+ __tg_ldexp(float __x, int __y) {return ldexpf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_ldexp(double __x, int __y) {return ldexp(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_ldexp(long double __x, int __y) {return ldexpl(__x, __y);}
+
+#undef ldexp
+#define ldexp(__x, __y) __tg_ldexp(__tg_promote1((__x))(__x), __y)
+
+// lgamma
+
+static float
+ _TG_ATTRS
+ __tg_lgamma(float __x) {return lgammaf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_lgamma(double __x) {return lgamma(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_lgamma(long double __x) {return lgammal(__x);}
+
+#undef lgamma
+#define lgamma(__x) __tg_lgamma(__tg_promote1((__x))(__x))
+
+// llrint
+
+static long long
+ _TG_ATTRS
+ __tg_llrint(float __x) {return llrintf(__x);}
+
+static long long
+ _TG_ATTRS
+ __tg_llrint(double __x) {return llrint(__x);}
+
+static long long
+ _TG_ATTRS
+ __tg_llrint(long double __x) {return llrintl(__x);}
+
+#undef llrint
+#define llrint(__x) __tg_llrint(__tg_promote1((__x))(__x))
+
+// llround
+
+static long long
+ _TG_ATTRS
+ __tg_llround(float __x) {return llroundf(__x);}
+
+static long long
+ _TG_ATTRS
+ __tg_llround(double __x) {return llround(__x);}
+
+static long long
+ _TG_ATTRS
+ __tg_llround(long double __x) {return llroundl(__x);}
+
+#undef llround
+#define llround(__x) __tg_llround(__tg_promote1((__x))(__x))
+
+// log10
+
+static float
+ _TG_ATTRS
+ __tg_log10(float __x) {return log10f(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_log10(double __x) {return log10(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_log10(long double __x) {return log10l(__x);}
+
+#undef log10
+#define log10(__x) __tg_log10(__tg_promote1((__x))(__x))
+
+// log1p
+
+static float
+ _TG_ATTRS
+ __tg_log1p(float __x) {return log1pf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_log1p(double __x) {return log1p(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_log1p(long double __x) {return log1pl(__x);}
+
+#undef log1p
+#define log1p(__x) __tg_log1p(__tg_promote1((__x))(__x))
+
+// log2
+
+static float
+ _TG_ATTRS
+ __tg_log2(float __x) {return log2f(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_log2(double __x) {return log2(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_log2(long double __x) {return log2l(__x);}
+
+#undef log2
+#define log2(__x) __tg_log2(__tg_promote1((__x))(__x))
+
+// logb
+
+static float
+ _TG_ATTRS
+ __tg_logb(float __x) {return logbf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_logb(double __x) {return logb(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_logb(long double __x) {return logbl(__x);}
+
+#undef logb
+#define logb(__x) __tg_logb(__tg_promote1((__x))(__x))
+
+// lrint
+
+static long
+ _TG_ATTRS
+ __tg_lrint(float __x) {return lrintf(__x);}
+
+static long
+ _TG_ATTRS
+ __tg_lrint(double __x) {return lrint(__x);}
+
+static long
+ _TG_ATTRS
+ __tg_lrint(long double __x) {return lrintl(__x);}
+
+#undef lrint
+#define lrint(__x) __tg_lrint(__tg_promote1((__x))(__x))
+
+// lround
+
+static long
+ _TG_ATTRS
+ __tg_lround(float __x) {return lroundf(__x);}
+
+static long
+ _TG_ATTRS
+ __tg_lround(double __x) {return lround(__x);}
+
+static long
+ _TG_ATTRS
+ __tg_lround(long double __x) {return lroundl(__x);}
+
+#undef lround
+#define lround(__x) __tg_lround(__tg_promote1((__x))(__x))
+
+// nearbyint
+
+static float
+ _TG_ATTRS
+ __tg_nearbyint(float __x) {return nearbyintf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_nearbyint(double __x) {return nearbyint(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_nearbyint(long double __x) {return nearbyintl(__x);}
+
+#undef nearbyint
+#define nearbyint(__x) __tg_nearbyint(__tg_promote1((__x))(__x))
+
+// nextafter
+
+static float
+ _TG_ATTRS
+ __tg_nextafter(float __x, float __y) {return nextafterf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_nextafter(double __x, double __y) {return nextafter(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_nextafter(long double __x, long double __y) {return nextafterl(__x, __y);}
+
+#undef nextafter
+#define nextafter(__x, __y) __tg_nextafter(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// nexttoward
+
+static float
+ _TG_ATTRS
+ __tg_nexttoward(float __x, long double __y) {return nexttowardf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_nexttoward(double __x, long double __y) {return nexttoward(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_nexttoward(long double __x, long double __y) {return nexttowardl(__x, __y);}
+
+#undef nexttoward
+#define nexttoward(__x, __y) __tg_nexttoward(__tg_promote1((__x))(__x), (__y))
+
+// remainder
+
+static float
+ _TG_ATTRS
+ __tg_remainder(float __x, float __y) {return remainderf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_remainder(double __x, double __y) {return remainder(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_remainder(long double __x, long double __y) {return remainderl(__x, __y);}
+
+#undef remainder
+#define remainder(__x, __y) __tg_remainder(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// remquo
+
+static float
+ _TG_ATTRS
+ __tg_remquo(float __x, float __y, int* __z)
+ {return remquof(__x, __y, __z);}
+
+static double
+ _TG_ATTRS
+ __tg_remquo(double __x, double __y, int* __z)
+ {return remquo(__x, __y, __z);}
+
+static long double
+ _TG_ATTRS
+ __tg_remquo(long double __x,long double __y, int* __z)
+ {return remquol(__x, __y, __z);}
+
+#undef remquo
+#define remquo(__x, __y, __z) \
+ __tg_remquo(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y), \
+ (__z))
+
+// rint
+
+static float
+ _TG_ATTRS
+ __tg_rint(float __x) {return rintf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_rint(double __x) {return rint(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_rint(long double __x) {return rintl(__x);}
+
+#undef rint
+#define rint(__x) __tg_rint(__tg_promote1((__x))(__x))
+
+// round
+
+static float
+ _TG_ATTRS
+ __tg_round(float __x) {return roundf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_round(double __x) {return round(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_round(long double __x) {return roundl(__x);}
+
+#undef round
+#define round(__x) __tg_round(__tg_promote1((__x))(__x))
+
+// scalbn
+
+static float
+ _TG_ATTRS
+ __tg_scalbn(float __x, int __y) {return scalbnf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_scalbn(double __x, int __y) {return scalbn(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_scalbn(long double __x, int __y) {return scalbnl(__x, __y);}
+
+#undef scalbn
+#define scalbn(__x, __y) __tg_scalbn(__tg_promote1((__x))(__x), __y)
+
+// scalbln
+
+static float
+ _TG_ATTRS
+ __tg_scalbln(float __x, long __y) {return scalblnf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_scalbln(double __x, long __y) {return scalbln(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_scalbln(long double __x, long __y) {return scalblnl(__x, __y);}
+
+#undef scalbln
+#define scalbln(__x, __y) __tg_scalbln(__tg_promote1((__x))(__x), __y)
+
+// tgamma
+
+static float
+ _TG_ATTRS
+ __tg_tgamma(float __x) {return tgammaf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_tgamma(double __x) {return tgamma(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_tgamma(long double __x) {return tgammal(__x);}
+
+#undef tgamma
+#define tgamma(__x) __tg_tgamma(__tg_promote1((__x))(__x))
+
+// trunc
+
+static float
+ _TG_ATTRS
+ __tg_trunc(float __x) {return truncf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_trunc(double __x) {return trunc(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_trunc(long double __x) {return truncl(__x);}
+
+#undef trunc
+#define trunc(__x) __tg_trunc(__tg_promote1((__x))(__x))
+
+// carg
+
+static float
+ _TG_ATTRS
+ __tg_carg(float __x) {return atan2f(0.F, __x);}
+
+static double
+ _TG_ATTRS
+ __tg_carg(double __x) {return atan2(0., __x);}
+
+static long double
+ _TG_ATTRS
+ __tg_carg(long double __x) {return atan2l(0.L, __x);}
+
+static float
+ _TG_ATTRS
+ __tg_carg(float _Complex __x) {return cargf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_carg(double _Complex __x) {return carg(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_carg(long double _Complex __x) {return cargl(__x);}
+
+#undef carg
+#define carg(__x) __tg_carg(__tg_promote1((__x))(__x))
+
+// cimag
+
+static float
+ _TG_ATTRS
+ __tg_cimag(float __x) {return 0;}
+
+static double
+ _TG_ATTRS
+ __tg_cimag(double __x) {return 0;}
+
+static long double
+ _TG_ATTRS
+ __tg_cimag(long double __x) {return 0;}
+
+static float
+ _TG_ATTRS
+ __tg_cimag(float _Complex __x) {return cimagf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_cimag(double _Complex __x) {return cimag(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_cimag(long double _Complex __x) {return cimagl(__x);}
+
+#undef cimag
+#define cimag(__x) __tg_cimag(__tg_promote1((__x))(__x))
+
+// conj
+
+static float _Complex
+ _TG_ATTRS
+ __tg_conj(float __x) {return __x;}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_conj(double __x) {return __x;}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_conj(long double __x) {return __x;}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_conj(float _Complex __x) {return conjf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_conj(double _Complex __x) {return conj(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_conj(long double _Complex __x) {return conjl(__x);}
+
+#undef conj
+#define conj(__x) __tg_conj(__tg_promote1((__x))(__x))
+
+// cproj
+
+static float _Complex
+ _TG_ATTRS
+ __tg_cproj(float __x) {return cprojf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_cproj(double __x) {return cproj(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_cproj(long double __x) {return cprojl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_cproj(float _Complex __x) {return cprojf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_cproj(double _Complex __x) {return cproj(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_cproj(long double _Complex __x) {return cprojl(__x);}
+
+#undef cproj
+#define cproj(__x) __tg_cproj(__tg_promote1((__x))(__x))
+
+// creal
+
+static float
+ _TG_ATTRS
+ __tg_creal(float __x) {return __x;}
+
+static double
+ _TG_ATTRS
+ __tg_creal(double __x) {return __x;}
+
+static long double
+ _TG_ATTRS
+ __tg_creal(long double __x) {return __x;}
+
+static float
+ _TG_ATTRS
+ __tg_creal(float _Complex __x) {return crealf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_creal(double _Complex __x) {return creal(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_creal(long double _Complex __x) {return creall(__x);}
+
+#undef creal
+#define creal(__x) __tg_creal(__tg_promote1((__x))(__x))
+
+#undef _TG_ATTRSp
+#undef _TG_ATTRS
+
+#endif /* __cplusplus */
+#endif /* __TGMATH_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/tmmintrin.h b/contrib/llvm/tools/clang/lib/Headers/tmmintrin.h
new file mode 100644
index 0000000..0002890
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/tmmintrin.h
@@ -0,0 +1,221 @@
+/*===---- tmmintrin.h - SSSE3 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __TMMINTRIN_H
+#define __TMMINTRIN_H
+
+#include <pmmintrin.h>
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("ssse3")))
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_abs_pi8(__m64 __a)
+{
+ return (__m64)__builtin_ia32_pabsb((__v8qi)__a);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_abs_epi8(__m128i __a)
+{
+ return (__m128i)__builtin_ia32_pabsb128((__v16qi)__a);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_abs_pi16(__m64 __a)
+{
+ return (__m64)__builtin_ia32_pabsw((__v4hi)__a);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_abs_epi16(__m128i __a)
+{
+ return (__m128i)__builtin_ia32_pabsw128((__v8hi)__a);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_abs_pi32(__m64 __a)
+{
+ return (__m64)__builtin_ia32_pabsd((__v2si)__a);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_abs_epi32(__m128i __a)
+{
+ return (__m128i)__builtin_ia32_pabsd128((__v4si)__a);
+}
+
+#define _mm_alignr_epi8(a, b, n) __extension__ ({ \
+ (__m128i)__builtin_ia32_palignr128((__v16qi)(__m128i)(a), \
+ (__v16qi)(__m128i)(b), (n)); })
+
+#define _mm_alignr_pi8(a, b, n) __extension__ ({ \
+ (__m64)__builtin_ia32_palignr((__v8qi)(__m64)(a), (__v8qi)(__m64)(b), (n)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_hadd_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_phaddw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_hadd_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_phaddd128((__v4si)__a, (__v4si)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_hadd_pi16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_phaddw((__v4hi)__a, (__v4hi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_hadd_pi32(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_phaddd((__v2si)__a, (__v2si)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_hadds_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_phaddsw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_hadds_pi16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_phaddsw((__v4hi)__a, (__v4hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_hsub_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_phsubw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_hsub_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_phsubd128((__v4si)__a, (__v4si)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_hsub_pi16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_phsubw((__v4hi)__a, (__v4hi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_hsub_pi32(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_phsubd((__v2si)__a, (__v2si)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_hsubs_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_phsubsw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_hsubs_pi16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_phsubsw((__v4hi)__a, (__v4hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maddubs_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pmaddubsw128((__v16qi)__a, (__v16qi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_maddubs_pi16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pmaddubsw((__v8qi)__a, (__v8qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mulhrs_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pmulhrsw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_mulhrs_pi16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pmulhrsw((__v4hi)__a, (__v4hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_shuffle_epi8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_pshufb128((__v16qi)__a, (__v16qi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_shuffle_pi8(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pshufb((__v8qi)__a, (__v8qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sign_epi8(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_psignb128((__v16qi)__a, (__v16qi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sign_epi16(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_psignw128((__v8hi)__a, (__v8hi)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sign_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)__builtin_ia32_psignd128((__v4si)__a, (__v4si)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sign_pi8(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_psignb((__v8qi)__a, (__v8qi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sign_pi16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_psignw((__v4hi)__a, (__v4hi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sign_pi32(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_psignd((__v2si)__a, (__v2si)__b);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __TMMINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/unwind.h b/contrib/llvm/tools/clang/lib/Headers/unwind.h
new file mode 100644
index 0000000..303d792
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/unwind.h
@@ -0,0 +1,282 @@
+/*===---- unwind.h - Stack unwinding ----------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* See "Data Definitions for libgcc_s" in the Linux Standard Base.*/
+
+#ifndef __CLANG_UNWIND_H
+#define __CLANG_UNWIND_H
+
+#if defined(__APPLE__) && __has_include_next(<unwind.h>)
+/* Darwin (from 11.x on) provide an unwind.h. If that's available,
+ * use it. libunwind wraps some of its definitions in #ifdef _GNU_SOURCE,
+ * so define that around the include.*/
+# ifndef _GNU_SOURCE
+# define _SHOULD_UNDEFINE_GNU_SOURCE
+# define _GNU_SOURCE
+# endif
+// libunwind's unwind.h reflects the current visibility. However, Mozilla
+// builds with -fvisibility=hidden and relies on gcc's unwind.h to reset the
+// visibility to default and export its contents. gcc also allows users to
+// override its override by #defining HIDE_EXPORTS (but note, this only obeys
+// the user's -fvisibility setting; it doesn't hide any exports on its own). We
+// imitate gcc's header here:
+# ifdef HIDE_EXPORTS
+# include_next <unwind.h>
+# else
+# pragma GCC visibility push(default)
+# include_next <unwind.h>
+# pragma GCC visibility pop
+# endif
+# ifdef _SHOULD_UNDEFINE_GNU_SOURCE
+# undef _GNU_SOURCE
+# undef _SHOULD_UNDEFINE_GNU_SOURCE
+# endif
+#else
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* It is a bit strange for a header to play with the visibility of the
+ symbols it declares, but this matches gcc's behavior and some programs
+ depend on it */
+#ifndef HIDE_EXPORTS
+#pragma GCC visibility push(default)
+#endif
+
+typedef uintptr_t _Unwind_Word;
+typedef intptr_t _Unwind_Sword;
+typedef uintptr_t _Unwind_Ptr;
+typedef uintptr_t _Unwind_Internal_Ptr;
+typedef uint64_t _Unwind_Exception_Class;
+
+typedef intptr_t _sleb128_t;
+typedef uintptr_t _uleb128_t;
+
+struct _Unwind_Context;
+struct _Unwind_Exception;
+typedef enum {
+ _URC_NO_REASON = 0,
+ _URC_FOREIGN_EXCEPTION_CAUGHT = 1,
+
+ _URC_FATAL_PHASE2_ERROR = 2,
+ _URC_FATAL_PHASE1_ERROR = 3,
+ _URC_NORMAL_STOP = 4,
+
+ _URC_END_OF_STACK = 5,
+ _URC_HANDLER_FOUND = 6,
+ _URC_INSTALL_CONTEXT = 7,
+ _URC_CONTINUE_UNWIND = 8
+} _Unwind_Reason_Code;
+
+typedef enum {
+ _UA_SEARCH_PHASE = 1,
+ _UA_CLEANUP_PHASE = 2,
+
+ _UA_HANDLER_FRAME = 4,
+ _UA_FORCE_UNWIND = 8,
+ _UA_END_OF_STACK = 16 /* gcc extension to C++ ABI */
+} _Unwind_Action;
+
+typedef void (*_Unwind_Exception_Cleanup_Fn)(_Unwind_Reason_Code,
+ struct _Unwind_Exception *);
+
+struct _Unwind_Exception {
+ _Unwind_Exception_Class exception_class;
+ _Unwind_Exception_Cleanup_Fn exception_cleanup;
+ _Unwind_Word private_1;
+ _Unwind_Word private_2;
+ /* The Itanium ABI requires that _Unwind_Exception objects are "double-word
+ * aligned". GCC has interpreted this to mean "use the maximum useful
+ * alignment for the target"; so do we. */
+} __attribute__((__aligned__));
+
+typedef _Unwind_Reason_Code (*_Unwind_Stop_Fn)(int, _Unwind_Action,
+ _Unwind_Exception_Class,
+ struct _Unwind_Exception *,
+ struct _Unwind_Context *,
+ void *);
+
+typedef _Unwind_Reason_Code (*_Unwind_Personality_Fn)(
+ int, _Unwind_Action, _Unwind_Exception_Class, struct _Unwind_Exception *,
+ struct _Unwind_Context *);
+typedef _Unwind_Personality_Fn __personality_routine;
+
+typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn)(struct _Unwind_Context *,
+ void *);
+
+#if defined(__arm__) && !defined(__APPLE__)
+
+typedef enum {
+ _UVRSC_CORE = 0, /* integer register */
+ _UVRSC_VFP = 1, /* vfp */
+ _UVRSC_WMMXD = 3, /* Intel WMMX data register */
+ _UVRSC_WMMXC = 4 /* Intel WMMX control register */
+} _Unwind_VRS_RegClass;
+
+typedef enum {
+ _UVRSD_UINT32 = 0,
+ _UVRSD_VFPX = 1,
+ _UVRSD_UINT64 = 3,
+ _UVRSD_FLOAT = 4,
+ _UVRSD_DOUBLE = 5
+} _Unwind_VRS_DataRepresentation;
+
+typedef enum {
+ _UVRSR_OK = 0,
+ _UVRSR_NOT_IMPLEMENTED = 1,
+ _UVRSR_FAILED = 2
+} _Unwind_VRS_Result;
+
+_Unwind_VRS_Result _Unwind_VRS_Get(struct _Unwind_Context *__context,
+ _Unwind_VRS_RegClass __regclass,
+ uint32_t __regno,
+ _Unwind_VRS_DataRepresentation __representation,
+ void *__valuep);
+
+_Unwind_VRS_Result _Unwind_VRS_Set(struct _Unwind_Context *__context,
+ _Unwind_VRS_RegClass __regclass,
+ uint32_t __regno,
+ _Unwind_VRS_DataRepresentation __representation,
+ void *__valuep);
+
+static __inline__
+_Unwind_Word _Unwind_GetGR(struct _Unwind_Context *__context, int __index) {
+ _Unwind_Word __value;
+ _Unwind_VRS_Get(__context, _UVRSC_CORE, __index, _UVRSD_UINT32, &__value);
+ return __value;
+}
+
+static __inline__
+void _Unwind_SetGR(struct _Unwind_Context *__context, int __index,
+ _Unwind_Word __value) {
+ _Unwind_VRS_Set(__context, _UVRSC_CORE, __index, _UVRSD_UINT32, &__value);
+}
+
+static __inline__
+_Unwind_Word _Unwind_GetIP(struct _Unwind_Context *__context) {
+ _Unwind_Word __ip = _Unwind_GetGR(__context, 15);
+ return __ip & ~(_Unwind_Word)(0x1); /* Remove thumb mode bit. */
+}
+
+static __inline__
+void _Unwind_SetIP(struct _Unwind_Context *__context, _Unwind_Word __value) {
+ _Unwind_Word __thumb_mode_bit = _Unwind_GetGR(__context, 15) & 0x1;
+ _Unwind_SetGR(__context, 15, __value | __thumb_mode_bit);
+}
+#else
+_Unwind_Word _Unwind_GetGR(struct _Unwind_Context *, int);
+void _Unwind_SetGR(struct _Unwind_Context *, int, _Unwind_Word);
+
+_Unwind_Word _Unwind_GetIP(struct _Unwind_Context *);
+void _Unwind_SetIP(struct _Unwind_Context *, _Unwind_Word);
+#endif
+
+
+_Unwind_Word _Unwind_GetIPInfo(struct _Unwind_Context *, int *);
+
+_Unwind_Word _Unwind_GetCFA(struct _Unwind_Context *);
+
+_Unwind_Word _Unwind_GetBSP(struct _Unwind_Context *);
+
+void *_Unwind_GetLanguageSpecificData(struct _Unwind_Context *);
+
+_Unwind_Ptr _Unwind_GetRegionStart(struct _Unwind_Context *);
+
+/* DWARF EH functions; currently not available on Darwin/ARM */
+#if !defined(__APPLE__) || !defined(__arm__)
+
+_Unwind_Reason_Code _Unwind_RaiseException(struct _Unwind_Exception *);
+_Unwind_Reason_Code _Unwind_ForcedUnwind(struct _Unwind_Exception *,
+ _Unwind_Stop_Fn, void *);
+void _Unwind_DeleteException(struct _Unwind_Exception *);
+void _Unwind_Resume(struct _Unwind_Exception *);
+_Unwind_Reason_Code _Unwind_Resume_or_Rethrow(struct _Unwind_Exception *);
+
+#endif
+
+_Unwind_Reason_Code _Unwind_Backtrace(_Unwind_Trace_Fn, void *);
+
+/* setjmp(3)/longjmp(3) stuff */
+typedef struct SjLj_Function_Context *_Unwind_FunctionContext_t;
+
+void _Unwind_SjLj_Register(_Unwind_FunctionContext_t);
+void _Unwind_SjLj_Unregister(_Unwind_FunctionContext_t);
+_Unwind_Reason_Code _Unwind_SjLj_RaiseException(struct _Unwind_Exception *);
+_Unwind_Reason_Code _Unwind_SjLj_ForcedUnwind(struct _Unwind_Exception *,
+ _Unwind_Stop_Fn, void *);
+void _Unwind_SjLj_Resume(struct _Unwind_Exception *);
+_Unwind_Reason_Code _Unwind_SjLj_Resume_or_Rethrow(struct _Unwind_Exception *);
+
+void *_Unwind_FindEnclosingFunction(void *);
+
+#ifdef __APPLE__
+
+_Unwind_Ptr _Unwind_GetDataRelBase(struct _Unwind_Context *)
+ __attribute__((__unavailable__));
+_Unwind_Ptr _Unwind_GetTextRelBase(struct _Unwind_Context *)
+ __attribute__((__unavailable__));
+
+/* Darwin-specific functions */
+void __register_frame(const void *);
+void __deregister_frame(const void *);
+
+struct dwarf_eh_bases {
+ uintptr_t tbase;
+ uintptr_t dbase;
+ uintptr_t func;
+};
+void *_Unwind_Find_FDE(const void *, struct dwarf_eh_bases *);
+
+void __register_frame_info_bases(const void *, void *, void *, void *)
+ __attribute__((__unavailable__));
+void __register_frame_info(const void *, void *) __attribute__((__unavailable__));
+void __register_frame_info_table_bases(const void *, void*, void *, void *)
+ __attribute__((__unavailable__));
+void __register_frame_info_table(const void *, void *)
+ __attribute__((__unavailable__));
+void __register_frame_table(const void *) __attribute__((__unavailable__));
+void __deregister_frame_info(const void *) __attribute__((__unavailable__));
+void __deregister_frame_info_bases(const void *)__attribute__((__unavailable__));
+
+#else
+
+_Unwind_Ptr _Unwind_GetDataRelBase(struct _Unwind_Context *);
+_Unwind_Ptr _Unwind_GetTextRelBase(struct _Unwind_Context *);
+
+#endif
+
+
+#ifndef HIDE_EXPORTS
+#pragma GCC visibility pop
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
+#endif /* __CLANG_UNWIND_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/vadefs.h b/contrib/llvm/tools/clang/lib/Headers/vadefs.h
new file mode 100644
index 0000000..7fe9a74
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/vadefs.h
@@ -0,0 +1,65 @@
+/* ===-------- vadefs.h ---------------------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Only include this if we are aiming for MSVC compatibility. */
+#ifndef _MSC_VER
+#include_next <vadefs.h>
+#else
+
+#ifndef __clang_vadefs_h
+#define __clang_vadefs_h
+
+#include_next <vadefs.h>
+
+/* Override macros from vadefs.h with definitions that work with Clang. */
+#ifdef _crt_va_start
+#undef _crt_va_start
+#define _crt_va_start(ap, param) __builtin_va_start(ap, param)
+#endif
+#ifdef _crt_va_end
+#undef _crt_va_end
+#define _crt_va_end(ap) __builtin_va_end(ap)
+#endif
+#ifdef _crt_va_arg
+#undef _crt_va_arg
+#define _crt_va_arg(ap, type) __builtin_va_arg(ap, type)
+#endif
+
+/* VS 2015 switched to double underscore names, which is an improvement, but now
+ * we have to intercept those names too.
+ */
+#ifdef __crt_va_start
+#undef __crt_va_start
+#define __crt_va_start(ap, param) __builtin_va_start(ap, param)
+#endif
+#ifdef __crt_va_end
+#undef __crt_va_end
+#define __crt_va_end(ap) __builtin_va_end(ap)
+#endif
+#ifdef __crt_va_arg
+#undef __crt_va_arg
+#define __crt_va_arg(ap, type) __builtin_va_arg(ap, type)
+#endif
+
+#endif
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Headers/varargs.h b/contrib/llvm/tools/clang/lib/Headers/varargs.h
new file mode 100644
index 0000000..b5477d0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/varargs.h
@@ -0,0 +1,26 @@
+/*===---- varargs.h - Variable argument handling -------------------------------------===
+*
+* Permission is hereby granted, free of charge, to any person obtaining a copy
+* of this software and associated documentation files (the "Software"), to deal
+* in the Software without restriction, including without limitation the rights
+* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+* copies of the Software, and to permit persons to whom the Software is
+* furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+* THE SOFTWARE.
+*
+*===-----------------------------------------------------------------------===
+*/
+#ifndef __VARARGS_H
+#define __VARARGS_H
+ #error "Please use <stdarg.h> instead of <varargs.h>"
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Headers/vecintrin.h b/contrib/llvm/tools/clang/lib/Headers/vecintrin.h
new file mode 100644
index 0000000..ca7acb4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/vecintrin.h
@@ -0,0 +1,8946 @@
+/*===---- vecintrin.h - Vector intrinsics ----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if defined(__s390x__) && defined(__VEC__)
+
+#define __ATTRS_ai __attribute__((__always_inline__))
+#define __ATTRS_o __attribute__((__overloadable__))
+#define __ATTRS_o_ai __attribute__((__overloadable__, __always_inline__))
+
+#define __constant(PARM) \
+ __attribute__((__enable_if__ ((PARM) == (PARM), \
+ "argument must be a constant integer")))
+#define __constant_range(PARM, LOW, HIGH) \
+ __attribute__((__enable_if__ ((PARM) >= (LOW) && (PARM) <= (HIGH), \
+ "argument must be a constant integer from " #LOW " to " #HIGH)))
+#define __constant_pow2_range(PARM, LOW, HIGH) \
+ __attribute__((__enable_if__ ((PARM) >= (LOW) && (PARM) <= (HIGH) && \
+ ((PARM) & ((PARM) - 1)) == 0, \
+ "argument must be a constant power of 2 from " #LOW " to " #HIGH)))
+
+/*-- __lcbb -----------------------------------------------------------------*/
+
+extern __ATTRS_o unsigned int
+__lcbb(const void *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+#define __lcbb(X, Y) ((__typeof__((__lcbb)((X), (Y)))) \
+ __builtin_s390_lcbb((X), __builtin_constant_p((Y))? \
+ ((Y) == 64 ? 0 : \
+ (Y) == 128 ? 1 : \
+ (Y) == 256 ? 2 : \
+ (Y) == 512 ? 3 : \
+ (Y) == 1024 ? 4 : \
+ (Y) == 2048 ? 5 : \
+ (Y) == 4096 ? 6 : 0) : 0))
+
+/*-- vec_extract ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai signed char
+vec_extract(vector signed char __vec, int __index) {
+ return __vec[__index & 15];
+}
+
+static inline __ATTRS_o_ai unsigned char
+vec_extract(vector bool char __vec, int __index) {
+ return __vec[__index & 15];
+}
+
+static inline __ATTRS_o_ai unsigned char
+vec_extract(vector unsigned char __vec, int __index) {
+ return __vec[__index & 15];
+}
+
+static inline __ATTRS_o_ai signed short
+vec_extract(vector signed short __vec, int __index) {
+ return __vec[__index & 7];
+}
+
+static inline __ATTRS_o_ai unsigned short
+vec_extract(vector bool short __vec, int __index) {
+ return __vec[__index & 7];
+}
+
+static inline __ATTRS_o_ai unsigned short
+vec_extract(vector unsigned short __vec, int __index) {
+ return __vec[__index & 7];
+}
+
+static inline __ATTRS_o_ai signed int
+vec_extract(vector signed int __vec, int __index) {
+ return __vec[__index & 3];
+}
+
+static inline __ATTRS_o_ai unsigned int
+vec_extract(vector bool int __vec, int __index) {
+ return __vec[__index & 3];
+}
+
+static inline __ATTRS_o_ai unsigned int
+vec_extract(vector unsigned int __vec, int __index) {
+ return __vec[__index & 3];
+}
+
+static inline __ATTRS_o_ai signed long long
+vec_extract(vector signed long long __vec, int __index) {
+ return __vec[__index & 1];
+}
+
+static inline __ATTRS_o_ai unsigned long long
+vec_extract(vector bool long long __vec, int __index) {
+ return __vec[__index & 1];
+}
+
+static inline __ATTRS_o_ai unsigned long long
+vec_extract(vector unsigned long long __vec, int __index) {
+ return __vec[__index & 1];
+}
+
+static inline __ATTRS_o_ai double
+vec_extract(vector double __vec, int __index) {
+ return __vec[__index & 1];
+}
+
+/*-- vec_insert -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_insert(signed char __scalar, vector signed char __vec, int __index) {
+ __vec[__index & 15] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_insert(unsigned char __scalar, vector bool char __vec, int __index) {
+ vector unsigned char __newvec = (vector unsigned char)__vec;
+ __newvec[__index & 15] = (unsigned char)__scalar;
+ return __newvec;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_insert(unsigned char __scalar, vector unsigned char __vec, int __index) {
+ __vec[__index & 15] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_insert(signed short __scalar, vector signed short __vec, int __index) {
+ __vec[__index & 7] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_insert(unsigned short __scalar, vector bool short __vec, int __index) {
+ vector unsigned short __newvec = (vector unsigned short)__vec;
+ __newvec[__index & 7] = (unsigned short)__scalar;
+ return __newvec;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_insert(unsigned short __scalar, vector unsigned short __vec, int __index) {
+ __vec[__index & 7] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_insert(signed int __scalar, vector signed int __vec, int __index) {
+ __vec[__index & 3] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_insert(unsigned int __scalar, vector bool int __vec, int __index) {
+ vector unsigned int __newvec = (vector unsigned int)__vec;
+ __newvec[__index & 3] = __scalar;
+ return __newvec;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_insert(unsigned int __scalar, vector unsigned int __vec, int __index) {
+ __vec[__index & 3] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_insert(signed long long __scalar, vector signed long long __vec,
+ int __index) {
+ __vec[__index & 1] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_insert(unsigned long long __scalar, vector bool long long __vec,
+ int __index) {
+ vector unsigned long long __newvec = (vector unsigned long long)__vec;
+ __newvec[__index & 1] = __scalar;
+ return __newvec;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_insert(unsigned long long __scalar, vector unsigned long long __vec,
+ int __index) {
+ __vec[__index & 1] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector double
+vec_insert(double __scalar, vector double __vec, int __index) {
+ __vec[__index & 1] = __scalar;
+ return __vec;
+}
+
+/*-- vec_promote ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_promote(signed char __scalar, int __index) {
+ const vector signed char __zero = (vector signed char)0;
+ vector signed char __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
+ __vec[__index & 15] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_promote(unsigned char __scalar, int __index) {
+ const vector unsigned char __zero = (vector unsigned char)0;
+ vector unsigned char __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
+ __vec[__index & 15] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_promote(signed short __scalar, int __index) {
+ const vector signed short __zero = (vector signed short)0;
+ vector signed short __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1, -1, -1, -1, -1);
+ __vec[__index & 7] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_promote(unsigned short __scalar, int __index) {
+ const vector unsigned short __zero = (vector unsigned short)0;
+ vector unsigned short __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1, -1, -1, -1, -1);
+ __vec[__index & 7] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_promote(signed int __scalar, int __index) {
+ const vector signed int __zero = (vector signed int)0;
+ vector signed int __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1);
+ __vec[__index & 3] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_promote(unsigned int __scalar, int __index) {
+ const vector unsigned int __zero = (vector unsigned int)0;
+ vector unsigned int __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1);
+ __vec[__index & 3] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_promote(signed long long __scalar, int __index) {
+ const vector signed long long __zero = (vector signed long long)0;
+ vector signed long long __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1);
+ __vec[__index & 1] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_promote(unsigned long long __scalar, int __index) {
+ const vector unsigned long long __zero = (vector unsigned long long)0;
+ vector unsigned long long __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1);
+ __vec[__index & 1] = __scalar;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector double
+vec_promote(double __scalar, int __index) {
+ const vector double __zero = (vector double)0;
+ vector double __vec = __builtin_shufflevector(__zero, __zero, -1, -1);
+ __vec[__index & 1] = __scalar;
+ return __vec;
+}
+
+/*-- vec_insert_and_zero ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_insert_and_zero(const signed char *__ptr) {
+ vector signed char __vec = (vector signed char)0;
+ __vec[7] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_insert_and_zero(const unsigned char *__ptr) {
+ vector unsigned char __vec = (vector unsigned char)0;
+ __vec[7] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_insert_and_zero(const signed short *__ptr) {
+ vector signed short __vec = (vector signed short)0;
+ __vec[3] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_insert_and_zero(const unsigned short *__ptr) {
+ vector unsigned short __vec = (vector unsigned short)0;
+ __vec[3] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_insert_and_zero(const signed int *__ptr) {
+ vector signed int __vec = (vector signed int)0;
+ __vec[1] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_insert_and_zero(const unsigned int *__ptr) {
+ vector unsigned int __vec = (vector unsigned int)0;
+ __vec[1] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_insert_and_zero(const signed long long *__ptr) {
+ vector signed long long __vec = (vector signed long long)0;
+ __vec[0] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_insert_and_zero(const unsigned long long *__ptr) {
+ vector unsigned long long __vec = (vector unsigned long long)0;
+ __vec[0] = *__ptr;
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector double
+vec_insert_and_zero(const double *__ptr) {
+ vector double __vec = (vector double)0;
+ __vec[0] = *__ptr;
+ return __vec;
+}
+
+/*-- vec_perm ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_perm(vector signed char __a, vector signed char __b,
+ vector unsigned char __c) {
+ return (vector signed char)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_perm(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return (vector unsigned char)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_perm(vector bool char __a, vector bool char __b,
+ vector unsigned char __c) {
+ return (vector bool char)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_perm(vector signed short __a, vector signed short __b,
+ vector unsigned char __c) {
+ return (vector signed short)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_perm(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned char __c) {
+ return (vector unsigned short)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_perm(vector bool short __a, vector bool short __b,
+ vector unsigned char __c) {
+ return (vector bool short)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_perm(vector signed int __a, vector signed int __b,
+ vector unsigned char __c) {
+ return (vector signed int)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_perm(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned char __c) {
+ return (vector unsigned int)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_perm(vector bool int __a, vector bool int __b,
+ vector unsigned char __c) {
+ return (vector bool int)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_perm(vector signed long long __a, vector signed long long __b,
+ vector unsigned char __c) {
+ return (vector signed long long)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_perm(vector unsigned long long __a, vector unsigned long long __b,
+ vector unsigned char __c) {
+ return (vector unsigned long long)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_perm(vector bool long long __a, vector bool long long __b,
+ vector unsigned char __c) {
+ return (vector bool long long)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_perm(vector double __a, vector double __b,
+ vector unsigned char __c) {
+ return (vector double)__builtin_s390_vperm(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
+}
+
+/*-- vec_permi --------------------------------------------------------------*/
+
+extern __ATTRS_o vector signed long long
+vec_permi(vector signed long long __a, vector signed long long __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector unsigned long long
+vec_permi(vector unsigned long long __a, vector unsigned long long __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector bool long long
+vec_permi(vector bool long long __a, vector bool long long __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector double
+vec_permi(vector double __a, vector double __b, int __c)
+ __constant_range(__c, 0, 3);
+
+#define vec_permi(X, Y, Z) ((__typeof__((vec_permi)((X), (Y), (Z)))) \
+ __builtin_s390_vpdi((vector unsigned long long)(X), \
+ (vector unsigned long long)(Y), \
+ (((Z) & 2) << 1) | ((Z) & 1)))
+
+/*-- vec_sel ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_sel(vector signed char __a, vector signed char __b,
+ vector unsigned char __c) {
+ return ((vector signed char)__c & __b) | (~(vector signed char)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_sel(vector signed char __a, vector signed char __b, vector bool char __c) {
+ return ((vector signed char)__c & __b) | (~(vector signed char)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sel(vector bool char __a, vector bool char __b, vector unsigned char __c) {
+ return ((vector bool char)__c & __b) | (~(vector bool char)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sel(vector bool char __a, vector bool char __b, vector bool char __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sel(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sel(vector unsigned char __a, vector unsigned char __b,
+ vector bool char __c) {
+ return ((vector unsigned char)__c & __b) | (~(vector unsigned char)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sel(vector signed short __a, vector signed short __b,
+ vector unsigned short __c) {
+ return ((vector signed short)__c & __b) | (~(vector signed short)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sel(vector signed short __a, vector signed short __b,
+ vector bool short __c) {
+ return ((vector signed short)__c & __b) | (~(vector signed short)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sel(vector bool short __a, vector bool short __b,
+ vector unsigned short __c) {
+ return ((vector bool short)__c & __b) | (~(vector bool short)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sel(vector bool short __a, vector bool short __b, vector bool short __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sel(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sel(vector unsigned short __a, vector unsigned short __b,
+ vector bool short __c) {
+ return (((vector unsigned short)__c & __b) |
+ (~(vector unsigned short)__c & __a));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sel(vector signed int __a, vector signed int __b,
+ vector unsigned int __c) {
+ return ((vector signed int)__c & __b) | (~(vector signed int)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sel(vector signed int __a, vector signed int __b, vector bool int __c) {
+ return ((vector signed int)__c & __b) | (~(vector signed int)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sel(vector bool int __a, vector bool int __b, vector unsigned int __c) {
+ return ((vector bool int)__c & __b) | (~(vector bool int)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sel(vector bool int __a, vector bool int __b, vector bool int __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sel(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sel(vector unsigned int __a, vector unsigned int __b, vector bool int __c) {
+ return ((vector unsigned int)__c & __b) | (~(vector unsigned int)__c & __a);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sel(vector signed long long __a, vector signed long long __b,
+ vector unsigned long long __c) {
+ return (((vector signed long long)__c & __b) |
+ (~(vector signed long long)__c & __a));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sel(vector signed long long __a, vector signed long long __b,
+ vector bool long long __c) {
+ return (((vector signed long long)__c & __b) |
+ (~(vector signed long long)__c & __a));
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sel(vector bool long long __a, vector bool long long __b,
+ vector unsigned long long __c) {
+ return (((vector bool long long)__c & __b) |
+ (~(vector bool long long)__c & __a));
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sel(vector bool long long __a, vector bool long long __b,
+ vector bool long long __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sel(vector unsigned long long __a, vector unsigned long long __b,
+ vector unsigned long long __c) {
+ return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sel(vector unsigned long long __a, vector unsigned long long __b,
+ vector bool long long __c) {
+ return (((vector unsigned long long)__c & __b) |
+ (~(vector unsigned long long)__c & __a));
+}
+
+static inline __ATTRS_o_ai vector double
+vec_sel(vector double __a, vector double __b, vector unsigned long long __c) {
+ return (vector double)((__c & (vector unsigned long long)__b) |
+ (~__c & (vector unsigned long long)__a));
+}
+
+static inline __ATTRS_o_ai vector double
+vec_sel(vector double __a, vector double __b, vector bool long long __c) {
+ vector unsigned long long __ac = (vector unsigned long long)__a;
+ vector unsigned long long __bc = (vector unsigned long long)__b;
+ vector unsigned long long __cc = (vector unsigned long long)__c;
+ return (vector double)((__cc & __bc) | (~__cc & __ac));
+}
+
+/*-- vec_gather_element -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed int
+vec_gather_element(vector signed int __vec, vector unsigned int __offset,
+ const signed int *__ptr, int __index)
+ __constant_range(__index, 0, 3) {
+ __vec[__index] = *(const signed int *)(
+ (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_gather_element(vector bool int __vec, vector unsigned int __offset,
+ const unsigned int *__ptr, int __index)
+ __constant_range(__index, 0, 3) {
+ __vec[__index] = *(const unsigned int *)(
+ (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_gather_element(vector unsigned int __vec, vector unsigned int __offset,
+ const unsigned int *__ptr, int __index)
+ __constant_range(__index, 0, 3) {
+ __vec[__index] = *(const unsigned int *)(
+ (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_gather_element(vector signed long long __vec,
+ vector unsigned long long __offset,
+ const signed long long *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ __vec[__index] = *(const signed long long *)(
+ (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_gather_element(vector bool long long __vec,
+ vector unsigned long long __offset,
+ const unsigned long long *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ __vec[__index] = *(const unsigned long long *)(
+ (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_gather_element(vector unsigned long long __vec,
+ vector unsigned long long __offset,
+ const unsigned long long *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ __vec[__index] = *(const unsigned long long *)(
+ (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ return __vec;
+}
+
+static inline __ATTRS_o_ai vector double
+vec_gather_element(vector double __vec, vector unsigned long long __offset,
+ const double *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ __vec[__index] = *(const double *)(
+ (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ return __vec;
+}
+
+/*-- vec_scatter_element ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(vector signed int __vec, vector unsigned int __offset,
+ signed int *__ptr, int __index)
+ __constant_range(__index, 0, 3) {
+ *(signed int *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(vector bool int __vec, vector unsigned int __offset,
+ unsigned int *__ptr, int __index)
+ __constant_range(__index, 0, 3) {
+ *(unsigned int *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(vector unsigned int __vec, vector unsigned int __offset,
+ unsigned int *__ptr, int __index)
+ __constant_range(__index, 0, 3) {
+ *(unsigned int *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(vector signed long long __vec,
+ vector unsigned long long __offset,
+ signed long long *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ *(signed long long *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(vector bool long long __vec,
+ vector unsigned long long __offset,
+ unsigned long long *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ *(unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(vector unsigned long long __vec,
+ vector unsigned long long __offset,
+ unsigned long long *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ *(unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(vector double __vec, vector unsigned long long __offset,
+ double *__ptr, int __index)
+ __constant_range(__index, 0, 1) {
+ *(double *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ __vec[__index];
+}
+
+/*-- vec_xld2 ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_xld2(long __offset, const signed char *__ptr) {
+ return *(const vector signed char *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_xld2(long __offset, const unsigned char *__ptr) {
+ return *(const vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_xld2(long __offset, const signed short *__ptr) {
+ return *(const vector signed short *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_xld2(long __offset, const unsigned short *__ptr) {
+ return *(const vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_xld2(long __offset, const signed int *__ptr) {
+ return *(const vector signed int *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_xld2(long __offset, const unsigned int *__ptr) {
+ return *(const vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_xld2(long __offset, const signed long long *__ptr) {
+ return *(const vector signed long long *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_xld2(long __offset, const unsigned long long *__ptr) {
+ return *(const vector unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_xld2(long __offset, const double *__ptr) {
+ return *(const vector double *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+/*-- vec_xlw4 ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_xlw4(long __offset, const signed char *__ptr) {
+ return *(const vector signed char *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_xlw4(long __offset, const unsigned char *__ptr) {
+ return *(const vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_xlw4(long __offset, const signed short *__ptr) {
+ return *(const vector signed short *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_xlw4(long __offset, const unsigned short *__ptr) {
+ return *(const vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_xlw4(long __offset, const signed int *__ptr) {
+ return *(const vector signed int *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_xlw4(long __offset, const unsigned int *__ptr) {
+ return *(const vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset);
+}
+
+/*-- vec_xstd2 --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector signed char __vec, long __offset, signed char *__ptr) {
+ *(vector signed char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector unsigned char __vec, long __offset, unsigned char *__ptr) {
+ *(vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector signed short __vec, long __offset, signed short *__ptr) {
+ *(vector signed short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector unsigned short __vec, long __offset, unsigned short *__ptr) {
+ *(vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector signed int __vec, long __offset, signed int *__ptr) {
+ *(vector signed int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector unsigned int __vec, long __offset, unsigned int *__ptr) {
+ *(vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector signed long long __vec, long __offset,
+ signed long long *__ptr) {
+ *(vector signed long long *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector unsigned long long __vec, long __offset,
+ unsigned long long *__ptr) {
+ *(vector unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset) =
+ __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstd2(vector double __vec, long __offset, double *__ptr) {
+ *(vector double *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+/*-- vec_xstw4 --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai void
+vec_xstw4(vector signed char __vec, long __offset, signed char *__ptr) {
+ *(vector signed char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstw4(vector unsigned char __vec, long __offset, unsigned char *__ptr) {
+ *(vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstw4(vector signed short __vec, long __offset, signed short *__ptr) {
+ *(vector signed short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstw4(vector unsigned short __vec, long __offset, unsigned short *__ptr) {
+ *(vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstw4(vector signed int __vec, long __offset, signed int *__ptr) {
+ *(vector signed int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xstw4(vector unsigned int __vec, long __offset, unsigned int *__ptr) {
+ *(vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+}
+
+/*-- vec_load_bndry ---------------------------------------------------------*/
+
+extern __ATTRS_o vector signed char
+vec_load_bndry(const signed char *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector unsigned char
+vec_load_bndry(const unsigned char *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector signed short
+vec_load_bndry(const signed short *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector unsigned short
+vec_load_bndry(const unsigned short *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector signed int
+vec_load_bndry(const signed int *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector unsigned int
+vec_load_bndry(const unsigned int *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector signed long long
+vec_load_bndry(const signed long long *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector unsigned long long
+vec_load_bndry(const unsigned long long *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o vector double
+vec_load_bndry(const double *__ptr, unsigned short __len)
+ __constant_pow2_range(__len, 64, 4096);
+
+#define vec_load_bndry(X, Y) ((__typeof__((vec_load_bndry)((X), (Y)))) \
+ __builtin_s390_vlbb((X), ((Y) == 64 ? 0 : \
+ (Y) == 128 ? 1 : \
+ (Y) == 256 ? 2 : \
+ (Y) == 512 ? 3 : \
+ (Y) == 1024 ? 4 : \
+ (Y) == 2048 ? 5 : \
+ (Y) == 4096 ? 6 : -1)))
+
+/*-- vec_load_len -----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_load_len(const signed char *__ptr, unsigned int __len) {
+ return (vector signed char)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_load_len(const unsigned char *__ptr, unsigned int __len) {
+ return (vector unsigned char)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_load_len(const signed short *__ptr, unsigned int __len) {
+ return (vector signed short)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_load_len(const unsigned short *__ptr, unsigned int __len) {
+ return (vector unsigned short)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_load_len(const signed int *__ptr, unsigned int __len) {
+ return (vector signed int)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_load_len(const unsigned int *__ptr, unsigned int __len) {
+ return (vector unsigned int)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_load_len(const signed long long *__ptr, unsigned int __len) {
+ return (vector signed long long)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_load_len(const unsigned long long *__ptr, unsigned int __len) {
+ return (vector unsigned long long)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_load_len(const double *__ptr, unsigned int __len) {
+ return (vector double)__builtin_s390_vll(__len, __ptr);
+}
+
+/*-- vec_store_len ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector signed char __vec, signed char *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector unsigned char __vec, unsigned char *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector signed short __vec, signed short *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector unsigned short __vec, unsigned short *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector signed int __vec, signed int *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector unsigned int __vec, unsigned int *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector signed long long __vec, signed long long *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector unsigned long long __vec, unsigned long long *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(vector double __vec, double *__ptr,
+ unsigned int __len) {
+ __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+}
+
+/*-- vec_load_pair ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed long long
+vec_load_pair(signed long long __a, signed long long __b) {
+ return (vector signed long long)(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_load_pair(unsigned long long __a, unsigned long long __b) {
+ return (vector unsigned long long)(__a, __b);
+}
+
+/*-- vec_genmask ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_genmask(unsigned short __mask)
+ __constant(__mask) {
+ return (vector unsigned char)(
+ __mask & 0x8000 ? 0xff : 0,
+ __mask & 0x4000 ? 0xff : 0,
+ __mask & 0x2000 ? 0xff : 0,
+ __mask & 0x1000 ? 0xff : 0,
+ __mask & 0x0800 ? 0xff : 0,
+ __mask & 0x0400 ? 0xff : 0,
+ __mask & 0x0200 ? 0xff : 0,
+ __mask & 0x0100 ? 0xff : 0,
+ __mask & 0x0080 ? 0xff : 0,
+ __mask & 0x0040 ? 0xff : 0,
+ __mask & 0x0020 ? 0xff : 0,
+ __mask & 0x0010 ? 0xff : 0,
+ __mask & 0x0008 ? 0xff : 0,
+ __mask & 0x0004 ? 0xff : 0,
+ __mask & 0x0002 ? 0xff : 0,
+ __mask & 0x0001 ? 0xff : 0);
+}
+
+/*-- vec_genmasks_* ---------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_genmasks_8(unsigned char __first, unsigned char __last)
+ __constant(__first) __constant(__last) {
+ unsigned char __bit1 = __first & 7;
+ unsigned char __bit2 = __last & 7;
+ unsigned char __mask1 = (unsigned char)(1U << (7 - __bit1) << 1) - 1;
+ unsigned char __mask2 = (unsigned char)(1U << (7 - __bit2)) - 1;
+ unsigned char __value = (__bit1 <= __bit2 ?
+ __mask1 & ~__mask2 :
+ __mask1 | ~__mask2);
+ return (vector unsigned char)__value;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_genmasks_16(unsigned char __first, unsigned char __last)
+ __constant(__first) __constant(__last) {
+ unsigned char __bit1 = __first & 15;
+ unsigned char __bit2 = __last & 15;
+ unsigned short __mask1 = (unsigned short)(1U << (15 - __bit1) << 1) - 1;
+ unsigned short __mask2 = (unsigned short)(1U << (15 - __bit2)) - 1;
+ unsigned short __value = (__bit1 <= __bit2 ?
+ __mask1 & ~__mask2 :
+ __mask1 | ~__mask2);
+ return (vector unsigned short)__value;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_genmasks_32(unsigned char __first, unsigned char __last)
+ __constant(__first) __constant(__last) {
+ unsigned char __bit1 = __first & 31;
+ unsigned char __bit2 = __last & 31;
+ unsigned int __mask1 = (1U << (31 - __bit1) << 1) - 1;
+ unsigned int __mask2 = (1U << (31 - __bit2)) - 1;
+ unsigned int __value = (__bit1 <= __bit2 ?
+ __mask1 & ~__mask2 :
+ __mask1 | ~__mask2);
+ return (vector unsigned int)__value;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_genmasks_64(unsigned char __first, unsigned char __last)
+ __constant(__first) __constant(__last) {
+ unsigned char __bit1 = __first & 63;
+ unsigned char __bit2 = __last & 63;
+ unsigned long long __mask1 = (1ULL << (63 - __bit1) << 1) - 1;
+ unsigned long long __mask2 = (1ULL << (63 - __bit2)) - 1;
+ unsigned long long __value = (__bit1 <= __bit2 ?
+ __mask1 & ~__mask2 :
+ __mask1 | ~__mask2);
+ return (vector unsigned long long)__value;
+}
+
+/*-- vec_splat --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_splat(vector signed char __vec, int __index)
+ __constant_range(__index, 0, 15) {
+ return (vector signed char)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_splat(vector bool char __vec, int __index)
+ __constant_range(__index, 0, 15) {
+ return (vector bool char)(vector unsigned char)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_splat(vector unsigned char __vec, int __index)
+ __constant_range(__index, 0, 15) {
+ return (vector unsigned char)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_splat(vector signed short __vec, int __index)
+ __constant_range(__index, 0, 7) {
+ return (vector signed short)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_splat(vector bool short __vec, int __index)
+ __constant_range(__index, 0, 7) {
+ return (vector bool short)(vector unsigned short)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_splat(vector unsigned short __vec, int __index)
+ __constant_range(__index, 0, 7) {
+ return (vector unsigned short)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_splat(vector signed int __vec, int __index)
+ __constant_range(__index, 0, 3) {
+ return (vector signed int)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_splat(vector bool int __vec, int __index)
+ __constant_range(__index, 0, 3) {
+ return (vector bool int)(vector unsigned int)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_splat(vector unsigned int __vec, int __index)
+ __constant_range(__index, 0, 3) {
+ return (vector unsigned int)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_splat(vector signed long long __vec, int __index)
+ __constant_range(__index, 0, 1) {
+ return (vector signed long long)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_splat(vector bool long long __vec, int __index)
+ __constant_range(__index, 0, 1) {
+ return (vector bool long long)(vector unsigned long long)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_splat(vector unsigned long long __vec, int __index)
+ __constant_range(__index, 0, 1) {
+ return (vector unsigned long long)__vec[__index];
+}
+
+static inline __ATTRS_o_ai vector double
+vec_splat(vector double __vec, int __index)
+ __constant_range(__index, 0, 1) {
+ return (vector double)__vec[__index];
+}
+
+/*-- vec_splat_s* -----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector signed char
+vec_splat_s8(signed char __scalar)
+ __constant(__scalar) {
+ return (vector signed char)__scalar;
+}
+
+static inline __ATTRS_ai vector signed short
+vec_splat_s16(signed short __scalar)
+ __constant(__scalar) {
+ return (vector signed short)__scalar;
+}
+
+static inline __ATTRS_ai vector signed int
+vec_splat_s32(signed short __scalar)
+ __constant(__scalar) {
+ return (vector signed int)(signed int)__scalar;
+}
+
+static inline __ATTRS_ai vector signed long long
+vec_splat_s64(signed short __scalar)
+ __constant(__scalar) {
+ return (vector signed long long)(signed long)__scalar;
+}
+
+/*-- vec_splat_u* -----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_splat_u8(unsigned char __scalar)
+ __constant(__scalar) {
+ return (vector unsigned char)__scalar;
+}
+
+static inline __ATTRS_ai vector unsigned short
+vec_splat_u16(unsigned short __scalar)
+ __constant(__scalar) {
+ return (vector unsigned short)__scalar;
+}
+
+static inline __ATTRS_ai vector unsigned int
+vec_splat_u32(signed short __scalar)
+ __constant(__scalar) {
+ return (vector unsigned int)(signed int)__scalar;
+}
+
+static inline __ATTRS_ai vector unsigned long long
+vec_splat_u64(signed short __scalar)
+ __constant(__scalar) {
+ return (vector unsigned long long)(signed long long)__scalar;
+}
+
+/*-- vec_splats -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_splats(signed char __scalar) {
+ return (vector signed char)__scalar;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_splats(unsigned char __scalar) {
+ return (vector unsigned char)__scalar;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_splats(signed short __scalar) {
+ return (vector signed short)__scalar;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_splats(unsigned short __scalar) {
+ return (vector unsigned short)__scalar;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_splats(signed int __scalar) {
+ return (vector signed int)__scalar;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_splats(unsigned int __scalar) {
+ return (vector unsigned int)__scalar;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_splats(signed long long __scalar) {
+ return (vector signed long long)__scalar;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_splats(unsigned long long __scalar) {
+ return (vector unsigned long long)__scalar;
+}
+
+static inline __ATTRS_o_ai vector double
+vec_splats(double __scalar) {
+ return (vector double)__scalar;
+}
+
+/*-- vec_extend_s64 ---------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed long long
+vec_extend_s64(vector signed char __a) {
+ return (vector signed long long)(__a[7], __a[15]);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_extend_s64(vector signed short __a) {
+ return (vector signed long long)(__a[3], __a[7]);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_extend_s64(vector signed int __a) {
+ return (vector signed long long)(__a[1], __a[3]);
+}
+
+/*-- vec_mergeh -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_mergeh(vector signed char __a, vector signed char __b) {
+ return (vector signed char)(
+ __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
+ __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_mergeh(vector bool char __a, vector bool char __b) {
+ return (vector bool char)(
+ __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
+ __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_mergeh(vector unsigned char __a, vector unsigned char __b) {
+ return (vector unsigned char)(
+ __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
+ __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_mergeh(vector signed short __a, vector signed short __b) {
+ return (vector signed short)(
+ __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_mergeh(vector bool short __a, vector bool short __b) {
+ return (vector bool short)(
+ __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_mergeh(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)(
+ __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mergeh(vector signed int __a, vector signed int __b) {
+ return (vector signed int)(__a[0], __b[0], __a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_mergeh(vector bool int __a, vector bool int __b) {
+ return (vector bool int)(__a[0], __b[0], __a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_mergeh(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)(__a[0], __b[0], __a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_mergeh(vector signed long long __a, vector signed long long __b) {
+ return (vector signed long long)(__a[0], __b[0]);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_mergeh(vector bool long long __a, vector bool long long __b) {
+ return (vector bool long long)(__a[0], __b[0]);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_mergeh(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)(__a[0], __b[0]);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_mergeh(vector double __a, vector double __b) {
+ return (vector double)(__a[0], __b[0]);
+}
+
+/*-- vec_mergel -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_mergel(vector signed char __a, vector signed char __b) {
+ return (vector signed char)(
+ __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
+ __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_mergel(vector bool char __a, vector bool char __b) {
+ return (vector bool char)(
+ __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
+ __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_mergel(vector unsigned char __a, vector unsigned char __b) {
+ return (vector unsigned char)(
+ __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
+ __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_mergel(vector signed short __a, vector signed short __b) {
+ return (vector signed short)(
+ __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_mergel(vector bool short __a, vector bool short __b) {
+ return (vector bool short)(
+ __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_mergel(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)(
+ __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mergel(vector signed int __a, vector signed int __b) {
+ return (vector signed int)(__a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_mergel(vector bool int __a, vector bool int __b) {
+ return (vector bool int)(__a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_mergel(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)(__a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_mergel(vector signed long long __a, vector signed long long __b) {
+ return (vector signed long long)(__a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_mergel(vector bool long long __a, vector bool long long __b) {
+ return (vector bool long long)(__a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_mergel(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)(__a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_mergel(vector double __a, vector double __b) {
+ return (vector double)(__a[1], __b[1]);
+}
+
+/*-- vec_pack ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_pack(vector signed short __a, vector signed short __b) {
+ vector signed char __ac = (vector signed char)__a;
+ vector signed char __bc = (vector signed char)__b;
+ return (vector signed char)(
+ __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
+ __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_pack(vector bool short __a, vector bool short __b) {
+ vector bool char __ac = (vector bool char)__a;
+ vector bool char __bc = (vector bool char)__b;
+ return (vector bool char)(
+ __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
+ __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_pack(vector unsigned short __a, vector unsigned short __b) {
+ vector unsigned char __ac = (vector unsigned char)__a;
+ vector unsigned char __bc = (vector unsigned char)__b;
+ return (vector unsigned char)(
+ __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
+ __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_pack(vector signed int __a, vector signed int __b) {
+ vector signed short __ac = (vector signed short)__a;
+ vector signed short __bc = (vector signed short)__b;
+ return (vector signed short)(
+ __ac[1], __ac[3], __ac[5], __ac[7],
+ __bc[1], __bc[3], __bc[5], __bc[7]);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_pack(vector bool int __a, vector bool int __b) {
+ vector bool short __ac = (vector bool short)__a;
+ vector bool short __bc = (vector bool short)__b;
+ return (vector bool short)(
+ __ac[1], __ac[3], __ac[5], __ac[7],
+ __bc[1], __bc[3], __bc[5], __bc[7]);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_pack(vector unsigned int __a, vector unsigned int __b) {
+ vector unsigned short __ac = (vector unsigned short)__a;
+ vector unsigned short __bc = (vector unsigned short)__b;
+ return (vector unsigned short)(
+ __ac[1], __ac[3], __ac[5], __ac[7],
+ __bc[1], __bc[3], __bc[5], __bc[7]);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_pack(vector signed long long __a, vector signed long long __b) {
+ vector signed int __ac = (vector signed int)__a;
+ vector signed int __bc = (vector signed int)__b;
+ return (vector signed int)(__ac[1], __ac[3], __bc[1], __bc[3]);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_pack(vector bool long long __a, vector bool long long __b) {
+ vector bool int __ac = (vector bool int)__a;
+ vector bool int __bc = (vector bool int)__b;
+ return (vector bool int)(__ac[1], __ac[3], __bc[1], __bc[3]);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_pack(vector unsigned long long __a, vector unsigned long long __b) {
+ vector unsigned int __ac = (vector unsigned int)__a;
+ vector unsigned int __bc = (vector unsigned int)__b;
+ return (vector unsigned int)(__ac[1], __ac[3], __bc[1], __bc[3]);
+}
+
+/*-- vec_packs --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_packs(vector signed short __a, vector signed short __b) {
+ return __builtin_s390_vpksh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_packs(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vpklsh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_packs(vector signed int __a, vector signed int __b) {
+ return __builtin_s390_vpksf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_packs(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vpklsf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_packs(vector signed long long __a, vector signed long long __b) {
+ return __builtin_s390_vpksg(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_packs(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vpklsg(__a, __b);
+}
+
+/*-- vec_packs_cc -----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_packs_cc(vector signed short __a, vector signed short __b, int *__cc) {
+ return __builtin_s390_vpkshs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_packs_cc(vector unsigned short __a, vector unsigned short __b, int *__cc) {
+ return __builtin_s390_vpklshs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_packs_cc(vector signed int __a, vector signed int __b, int *__cc) {
+ return __builtin_s390_vpksfs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_packs_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
+ return __builtin_s390_vpklsfs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_packs_cc(vector signed long long __a, vector signed long long __b,
+ int *__cc) {
+ return __builtin_s390_vpksgs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_packs_cc(vector unsigned long long __a, vector unsigned long long __b,
+ int *__cc) {
+ return __builtin_s390_vpklsgs(__a, __b, __cc);
+}
+
+/*-- vec_packsu -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_packsu(vector signed short __a, vector signed short __b) {
+ const vector signed short __zero = (vector signed short)0;
+ return __builtin_s390_vpklsh(
+ (vector unsigned short)(__a >= __zero) & (vector unsigned short)__a,
+ (vector unsigned short)(__b >= __zero) & (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_packsu(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vpklsh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_packsu(vector signed int __a, vector signed int __b) {
+ const vector signed int __zero = (vector signed int)0;
+ return __builtin_s390_vpklsf(
+ (vector unsigned int)(__a >= __zero) & (vector unsigned int)__a,
+ (vector unsigned int)(__b >= __zero) & (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_packsu(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vpklsf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_packsu(vector signed long long __a, vector signed long long __b) {
+ const vector signed long long __zero = (vector signed long long)0;
+ return __builtin_s390_vpklsg(
+ (vector unsigned long long)(__a >= __zero) &
+ (vector unsigned long long)__a,
+ (vector unsigned long long)(__b >= __zero) &
+ (vector unsigned long long)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_packsu(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vpklsg(__a, __b);
+}
+
+/*-- vec_packsu_cc ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_packsu_cc(vector unsigned short __a, vector unsigned short __b, int *__cc) {
+ return __builtin_s390_vpklshs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_packsu_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
+ return __builtin_s390_vpklsfs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_packsu_cc(vector unsigned long long __a, vector unsigned long long __b,
+ int *__cc) {
+ return __builtin_s390_vpklsgs(__a, __b, __cc);
+}
+
+/*-- vec_unpackh ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed short
+vec_unpackh(vector signed char __a) {
+ return __builtin_s390_vuphb(__a);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_unpackh(vector bool char __a) {
+ return (vector bool short)__builtin_s390_vuphb((vector signed char)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_unpackh(vector unsigned char __a) {
+ return __builtin_s390_vuplhb(__a);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_unpackh(vector signed short __a) {
+ return __builtin_s390_vuphh(__a);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_unpackh(vector bool short __a) {
+ return (vector bool int)__builtin_s390_vuphh((vector signed short)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_unpackh(vector unsigned short __a) {
+ return __builtin_s390_vuplhh(__a);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_unpackh(vector signed int __a) {
+ return __builtin_s390_vuphf(__a);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_unpackh(vector bool int __a) {
+ return (vector bool long long)__builtin_s390_vuphf((vector signed int)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_unpackh(vector unsigned int __a) {
+ return __builtin_s390_vuplhf(__a);
+}
+
+/*-- vec_unpackl ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed short
+vec_unpackl(vector signed char __a) {
+ return __builtin_s390_vuplb(__a);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_unpackl(vector bool char __a) {
+ return (vector bool short)__builtin_s390_vuplb((vector signed char)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_unpackl(vector unsigned char __a) {
+ return __builtin_s390_vupllb(__a);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_unpackl(vector signed short __a) {
+ return __builtin_s390_vuplhw(__a);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_unpackl(vector bool short __a) {
+ return (vector bool int)__builtin_s390_vuplhw((vector signed short)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_unpackl(vector unsigned short __a) {
+ return __builtin_s390_vupllh(__a);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_unpackl(vector signed int __a) {
+ return __builtin_s390_vuplf(__a);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_unpackl(vector bool int __a) {
+ return (vector bool long long)__builtin_s390_vuplf((vector signed int)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_unpackl(vector unsigned int __a) {
+ return __builtin_s390_vupllf(__a);
+}
+
+/*-- vec_cmpeq --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpeq(vector bool char __a, vector bool char __b) {
+ return (vector bool char)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpeq(vector signed char __a, vector signed char __b) {
+ return (vector bool char)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpeq(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpeq(vector bool short __a, vector bool short __b) {
+ return (vector bool short)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpeq(vector signed short __a, vector signed short __b) {
+ return (vector bool short)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpeq(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpeq(vector bool int __a, vector bool int __b) {
+ return (vector bool int)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpeq(vector signed int __a, vector signed int __b) {
+ return (vector bool int)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpeq(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpeq(vector bool long long __a, vector bool long long __b) {
+ return (vector bool long long)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpeq(vector signed long long __a, vector signed long long __b) {
+ return (vector bool long long)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpeq(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector bool long long)(__a == __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpeq(vector double __a, vector double __b) {
+ return (vector bool long long)(__a == __b);
+}
+
+/*-- vec_cmpge --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpge(vector signed char __a, vector signed char __b) {
+ return (vector bool char)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpge(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpge(vector signed short __a, vector signed short __b) {
+ return (vector bool short)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpge(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpge(vector signed int __a, vector signed int __b) {
+ return (vector bool int)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpge(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpge(vector signed long long __a, vector signed long long __b) {
+ return (vector bool long long)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpge(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector bool long long)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpge(vector double __a, vector double __b) {
+ return (vector bool long long)(__a >= __b);
+}
+
+/*-- vec_cmpgt --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpgt(vector signed char __a, vector signed char __b) {
+ return (vector bool char)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpgt(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpgt(vector signed short __a, vector signed short __b) {
+ return (vector bool short)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpgt(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpgt(vector signed int __a, vector signed int __b) {
+ return (vector bool int)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpgt(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpgt(vector signed long long __a, vector signed long long __b) {
+ return (vector bool long long)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector bool long long)(__a > __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmpgt(vector double __a, vector double __b) {
+ return (vector bool long long)(__a > __b);
+}
+
+/*-- vec_cmple --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmple(vector signed char __a, vector signed char __b) {
+ return (vector bool char)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmple(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmple(vector signed short __a, vector signed short __b) {
+ return (vector bool short)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmple(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmple(vector signed int __a, vector signed int __b) {
+ return (vector bool int)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmple(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmple(vector signed long long __a, vector signed long long __b) {
+ return (vector bool long long)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmple(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector bool long long)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmple(vector double __a, vector double __b) {
+ return (vector bool long long)(__a <= __b);
+}
+
+/*-- vec_cmplt --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmplt(vector signed char __a, vector signed char __b) {
+ return (vector bool char)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmplt(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmplt(vector signed short __a, vector signed short __b) {
+ return (vector bool short)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmplt(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmplt(vector signed int __a, vector signed int __b) {
+ return (vector bool int)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmplt(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmplt(vector signed long long __a, vector signed long long __b) {
+ return (vector bool long long)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmplt(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector bool long long)(__a < __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_cmplt(vector double __a, vector double __b) {
+ return (vector bool long long)(__a < __b);
+}
+
+/*-- vec_all_eq -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfcedbs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+/*-- vec_all_ne -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfcedbs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+/*-- vec_all_ge -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b,
+ (vector unsigned char)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b,
+ (vector unsigned short)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b,
+ (vector unsigned int)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b,
+ (vector unsigned long long)__a, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+/*-- vec_all_gt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a,
+ (vector unsigned char)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a,
+ (vector unsigned short)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a,
+ (vector unsigned int)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a,
+ (vector unsigned long long)__b, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__a, __b, &__cc);
+ return __cc == 0;
+}
+
+/*-- vec_all_le -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a,
+ (vector unsigned char)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a,
+ (vector unsigned short)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a,
+ (vector unsigned int)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a,
+ (vector unsigned long long)__b, &__cc);
+ return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+/*-- vec_all_lt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b,
+ (vector unsigned char)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b,
+ (vector unsigned short)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b,
+ (vector unsigned int)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b,
+ (vector unsigned long long)__a, &__cc);
+ return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__b, __a, &__cc);
+ return __cc == 0;
+}
+
+/*-- vec_all_nge ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_all_nge(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+/*-- vec_all_ngt ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_all_ngt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__a, __b, &__cc);
+ return __cc == 3;
+}
+
+/*-- vec_all_nle ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_all_nle(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+/*-- vec_all_nlt ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_all_nlt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__b, __a, &__cc);
+ return __cc == 3;
+}
+
+/*-- vec_all_nan ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_all_nan(vector double __a) {
+ int __cc;
+ __builtin_s390_vftcidb(__a, 15, &__cc);
+ return __cc == 0;
+}
+
+/*-- vec_all_numeric --------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_all_numeric(vector double __a) {
+ int __cc;
+ __builtin_s390_vftcidb(__a, 15, &__cc);
+ return __cc == 3;
+}
+
+/*-- vec_any_eq -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfcedbs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+/*-- vec_any_ne -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vceqbs((vector signed char)__a,
+ (vector signed char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vceqhs((vector signed short)__a,
+ (vector signed short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vceqfs((vector signed int)__a,
+ (vector signed int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vceqgs((vector signed long long)__a,
+ (vector signed long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfcedbs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+/*-- vec_any_ge -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b,
+ (vector unsigned char)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b,
+ (vector unsigned short)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b,
+ (vector unsigned int)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b,
+ (vector unsigned long long)__a, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+/*-- vec_any_gt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a,
+ (vector unsigned char)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a,
+ (vector unsigned short)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a,
+ (vector unsigned int)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a,
+ (vector unsigned long long)__b, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__a, __b, &__cc);
+ return __cc <= 1;
+}
+
+/*-- vec_any_le -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__a,
+ (vector unsigned char)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__a,
+ (vector unsigned short)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__a,
+ (vector unsigned int)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__a,
+ (vector unsigned long long)__b, &__cc);
+ return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+/*-- vec_any_lt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool char __a, vector signed char __b) {
+ int __cc;
+ __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool char __a, vector unsigned char __b) {
+ int __cc;
+ __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool char __a, vector bool char __b) {
+ int __cc;
+ __builtin_s390_vchlbs((vector unsigned char)__b,
+ (vector unsigned char)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool short __a, vector signed short __b) {
+ int __cc;
+ __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool short __a, vector unsigned short __b) {
+ int __cc;
+ __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool short __a, vector bool short __b) {
+ int __cc;
+ __builtin_s390_vchlhs((vector unsigned short)__b,
+ (vector unsigned short)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool int __a, vector signed int __b) {
+ int __cc;
+ __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool int __a, vector unsigned int __b) {
+ int __cc;
+ __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool int __a, vector bool int __b) {
+ int __cc;
+ __builtin_s390_vchlfs((vector unsigned int)__b,
+ (vector unsigned int)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector signed long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool long long __a, vector signed long long __b) {
+ int __cc;
+ __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector unsigned long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool long long __a, vector unsigned long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector bool long long __a, vector bool long long __b) {
+ int __cc;
+ __builtin_s390_vchlgs((vector unsigned long long)__b,
+ (vector unsigned long long)__a, &__cc);
+ return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__b, __a, &__cc);
+ return __cc <= 1;
+}
+
+/*-- vec_any_nge ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_any_nge(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+/*-- vec_any_ngt ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_any_ngt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__a, __b, &__cc);
+ return __cc != 0;
+}
+
+/*-- vec_any_nle ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_any_nle(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchedbs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+/*-- vec_any_nlt ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_any_nlt(vector double __a, vector double __b) {
+ int __cc;
+ __builtin_s390_vfchdbs(__b, __a, &__cc);
+ return __cc != 0;
+}
+
+/*-- vec_any_nan ------------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_any_nan(vector double __a) {
+ int __cc;
+ __builtin_s390_vftcidb(__a, 15, &__cc);
+ return __cc != 3;
+}
+
+/*-- vec_any_numeric --------------------------------------------------------*/
+
+static inline __ATTRS_ai int
+vec_any_numeric(vector double __a) {
+ int __cc;
+ __builtin_s390_vftcidb(__a, 15, &__cc);
+ return __cc != 0;
+}
+
+/*-- vec_andc ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_andc(vector bool char __a, vector bool char __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_andc(vector signed char __a, vector signed char __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_andc(vector bool char __a, vector signed char __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_andc(vector signed char __a, vector bool char __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_andc(vector unsigned char __a, vector unsigned char __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_andc(vector bool char __a, vector unsigned char __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_andc(vector unsigned char __a, vector bool char __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_andc(vector bool short __a, vector bool short __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_andc(vector signed short __a, vector signed short __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_andc(vector bool short __a, vector signed short __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_andc(vector signed short __a, vector bool short __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_andc(vector unsigned short __a, vector unsigned short __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_andc(vector bool short __a, vector unsigned short __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_andc(vector unsigned short __a, vector bool short __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_andc(vector bool int __a, vector bool int __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_andc(vector signed int __a, vector signed int __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_andc(vector bool int __a, vector signed int __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_andc(vector signed int __a, vector bool int __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_andc(vector unsigned int __a, vector unsigned int __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_andc(vector bool int __a, vector unsigned int __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_andc(vector unsigned int __a, vector bool int __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_andc(vector bool long long __a, vector bool long long __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_andc(vector signed long long __a, vector signed long long __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_andc(vector bool long long __a, vector signed long long __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_andc(vector signed long long __a, vector bool long long __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_andc(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_andc(vector bool long long __a, vector unsigned long long __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_andc(vector unsigned long long __a, vector bool long long __b) {
+ return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai vector double
+vec_andc(vector double __a, vector double __b) {
+ return (vector double)((vector unsigned long long)__a &
+ ~(vector unsigned long long)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_andc(vector bool long long __a, vector double __b) {
+ return (vector double)((vector unsigned long long)__a &
+ ~(vector unsigned long long)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_andc(vector double __a, vector bool long long __b) {
+ return (vector double)((vector unsigned long long)__a &
+ ~(vector unsigned long long)__b);
+}
+
+/*-- vec_nor ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_nor(vector bool char __a, vector bool char __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_nor(vector signed char __a, vector signed char __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_nor(vector bool char __a, vector signed char __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_nor(vector signed char __a, vector bool char __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_nor(vector unsigned char __a, vector unsigned char __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_nor(vector bool char __a, vector unsigned char __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_nor(vector unsigned char __a, vector bool char __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_nor(vector bool short __a, vector bool short __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_nor(vector signed short __a, vector signed short __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_nor(vector bool short __a, vector signed short __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_nor(vector signed short __a, vector bool short __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_nor(vector unsigned short __a, vector unsigned short __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_nor(vector bool short __a, vector unsigned short __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_nor(vector unsigned short __a, vector bool short __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_nor(vector bool int __a, vector bool int __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_nor(vector signed int __a, vector signed int __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_nor(vector bool int __a, vector signed int __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_nor(vector signed int __a, vector bool int __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_nor(vector unsigned int __a, vector unsigned int __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_nor(vector bool int __a, vector unsigned int __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_nor(vector unsigned int __a, vector bool int __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_nor(vector bool long long __a, vector bool long long __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_nor(vector signed long long __a, vector signed long long __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_nor(vector bool long long __a, vector signed long long __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_nor(vector signed long long __a, vector bool long long __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_nor(vector unsigned long long __a, vector unsigned long long __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_nor(vector bool long long __a, vector unsigned long long __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_nor(vector unsigned long long __a, vector bool long long __b) {
+ return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_nor(vector double __a, vector double __b) {
+ return (vector double)~((vector unsigned long long)__a |
+ (vector unsigned long long)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_nor(vector bool long long __a, vector double __b) {
+ return (vector double)~((vector unsigned long long)__a |
+ (vector unsigned long long)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_nor(vector double __a, vector bool long long __b) {
+ return (vector double)~((vector unsigned long long)__a |
+ (vector unsigned long long)__b);
+}
+
+/*-- vec_cntlz --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cntlz(vector signed char __a) {
+ return __builtin_s390_vclzb((vector unsigned char)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cntlz(vector unsigned char __a) {
+ return __builtin_s390_vclzb(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cntlz(vector signed short __a) {
+ return __builtin_s390_vclzh((vector unsigned short)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cntlz(vector unsigned short __a) {
+ return __builtin_s390_vclzh(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cntlz(vector signed int __a) {
+ return __builtin_s390_vclzf((vector unsigned int)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cntlz(vector unsigned int __a) {
+ return __builtin_s390_vclzf(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_cntlz(vector signed long long __a) {
+ return __builtin_s390_vclzg((vector unsigned long long)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_cntlz(vector unsigned long long __a) {
+ return __builtin_s390_vclzg(__a);
+}
+
+/*-- vec_cnttz --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cnttz(vector signed char __a) {
+ return __builtin_s390_vctzb((vector unsigned char)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cnttz(vector unsigned char __a) {
+ return __builtin_s390_vctzb(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cnttz(vector signed short __a) {
+ return __builtin_s390_vctzh((vector unsigned short)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cnttz(vector unsigned short __a) {
+ return __builtin_s390_vctzh(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cnttz(vector signed int __a) {
+ return __builtin_s390_vctzf((vector unsigned int)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cnttz(vector unsigned int __a) {
+ return __builtin_s390_vctzf(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_cnttz(vector signed long long __a) {
+ return __builtin_s390_vctzg((vector unsigned long long)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_cnttz(vector unsigned long long __a) {
+ return __builtin_s390_vctzg(__a);
+}
+
+/*-- vec_popcnt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_popcnt(vector signed char __a) {
+ return __builtin_s390_vpopctb((vector unsigned char)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_popcnt(vector unsigned char __a) {
+ return __builtin_s390_vpopctb(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_popcnt(vector signed short __a) {
+ return __builtin_s390_vpopcth((vector unsigned short)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_popcnt(vector unsigned short __a) {
+ return __builtin_s390_vpopcth(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_popcnt(vector signed int __a) {
+ return __builtin_s390_vpopctf((vector unsigned int)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_popcnt(vector unsigned int __a) {
+ return __builtin_s390_vpopctf(__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_popcnt(vector signed long long __a) {
+ return __builtin_s390_vpopctg((vector unsigned long long)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_popcnt(vector unsigned long long __a) {
+ return __builtin_s390_vpopctg(__a);
+}
+
+/*-- vec_rl -----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_rl(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_s390_verllvb(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_rl(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_verllvb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_rl(vector signed short __a, vector unsigned short __b) {
+ return (vector signed short)__builtin_s390_verllvh(
+ (vector unsigned short)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_rl(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_verllvh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_rl(vector signed int __a, vector unsigned int __b) {
+ return (vector signed int)__builtin_s390_verllvf(
+ (vector unsigned int)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_rl(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_verllvf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_rl(vector signed long long __a, vector unsigned long long __b) {
+ return (vector signed long long)__builtin_s390_verllvg(
+ (vector unsigned long long)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_rl(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_verllvg(__a, __b);
+}
+
+/*-- vec_rli ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_rli(vector signed char __a, unsigned long __b) {
+ return (vector signed char)__builtin_s390_verllb(
+ (vector unsigned char)__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_rli(vector unsigned char __a, unsigned long __b) {
+ return __builtin_s390_verllb(__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_rli(vector signed short __a, unsigned long __b) {
+ return (vector signed short)__builtin_s390_verllh(
+ (vector unsigned short)__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_rli(vector unsigned short __a, unsigned long __b) {
+ return __builtin_s390_verllh(__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_rli(vector signed int __a, unsigned long __b) {
+ return (vector signed int)__builtin_s390_verllf(
+ (vector unsigned int)__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_rli(vector unsigned int __a, unsigned long __b) {
+ return __builtin_s390_verllf(__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_rli(vector signed long long __a, unsigned long __b) {
+ return (vector signed long long)__builtin_s390_verllg(
+ (vector unsigned long long)__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_rli(vector unsigned long long __a, unsigned long __b) {
+ return __builtin_s390_verllg(__a, (int)__b);
+}
+
+/*-- vec_rl_mask ------------------------------------------------------------*/
+
+extern __ATTRS_o vector signed char
+vec_rl_mask(vector signed char __a, vector unsigned char __b,
+ unsigned char __c) __constant(__c);
+
+extern __ATTRS_o vector unsigned char
+vec_rl_mask(vector unsigned char __a, vector unsigned char __b,
+ unsigned char __c) __constant(__c);
+
+extern __ATTRS_o vector signed short
+vec_rl_mask(vector signed short __a, vector unsigned short __b,
+ unsigned char __c) __constant(__c);
+
+extern __ATTRS_o vector unsigned short
+vec_rl_mask(vector unsigned short __a, vector unsigned short __b,
+ unsigned char __c) __constant(__c);
+
+extern __ATTRS_o vector signed int
+vec_rl_mask(vector signed int __a, vector unsigned int __b,
+ unsigned char __c) __constant(__c);
+
+extern __ATTRS_o vector unsigned int
+vec_rl_mask(vector unsigned int __a, vector unsigned int __b,
+ unsigned char __c) __constant(__c);
+
+extern __ATTRS_o vector signed long long
+vec_rl_mask(vector signed long long __a, vector unsigned long long __b,
+ unsigned char __c) __constant(__c);
+
+extern __ATTRS_o vector unsigned long long
+vec_rl_mask(vector unsigned long long __a, vector unsigned long long __b,
+ unsigned char __c) __constant(__c);
+
+#define vec_rl_mask(X, Y, Z) ((__typeof__((vec_rl_mask)((X), (Y), (Z)))) \
+ __extension__ ({ \
+ vector unsigned char __res; \
+ vector unsigned char __x = (vector unsigned char)(X); \
+ vector unsigned char __y = (vector unsigned char)(Y); \
+ switch (sizeof ((X)[0])) { \
+ case 1: __res = (vector unsigned char) __builtin_s390_verimb( \
+ (vector unsigned char)__x, (vector unsigned char)__x, \
+ (vector unsigned char)__y, (Z)); break; \
+ case 2: __res = (vector unsigned char) __builtin_s390_verimh( \
+ (vector unsigned short)__x, (vector unsigned short)__x, \
+ (vector unsigned short)__y, (Z)); break; \
+ case 4: __res = (vector unsigned char) __builtin_s390_verimf( \
+ (vector unsigned int)__x, (vector unsigned int)__x, \
+ (vector unsigned int)__y, (Z)); break; \
+ default: __res = (vector unsigned char) __builtin_s390_verimg( \
+ (vector unsigned long long)__x, (vector unsigned long long)__x, \
+ (vector unsigned long long)__y, (Z)); break; \
+ } __res; }))
+
+/*-- vec_sll ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_sll(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_sll(vector signed char __a, vector unsigned short __b) {
+ return (vector signed char)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_sll(vector signed char __a, vector unsigned int __b) {
+ return (vector signed char)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sll(vector bool char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sll(vector bool char __a, vector unsigned short __b) {
+ return (vector bool char)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sll(vector bool char __a, vector unsigned int __b) {
+ return (vector bool char)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sll(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vsl(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sll(vector unsigned char __a, vector unsigned short __b) {
+ return __builtin_s390_vsl(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sll(vector unsigned char __a, vector unsigned int __b) {
+ return __builtin_s390_vsl(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sll(vector signed short __a, vector unsigned char __b) {
+ return (vector signed short)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sll(vector signed short __a, vector unsigned short __b) {
+ return (vector signed short)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sll(vector signed short __a, vector unsigned int __b) {
+ return (vector signed short)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sll(vector bool short __a, vector unsigned char __b) {
+ return (vector bool short)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sll(vector bool short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sll(vector bool short __a, vector unsigned int __b) {
+ return (vector bool short)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sll(vector unsigned short __a, vector unsigned char __b) {
+ return (vector unsigned short)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sll(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sll(vector unsigned short __a, vector unsigned int __b) {
+ return (vector unsigned short)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sll(vector signed int __a, vector unsigned char __b) {
+ return (vector signed int)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sll(vector signed int __a, vector unsigned short __b) {
+ return (vector signed int)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sll(vector signed int __a, vector unsigned int __b) {
+ return (vector signed int)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sll(vector bool int __a, vector unsigned char __b) {
+ return (vector bool int)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sll(vector bool int __a, vector unsigned short __b) {
+ return (vector bool int)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sll(vector bool int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sll(vector unsigned int __a, vector unsigned char __b) {
+ return (vector unsigned int)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sll(vector unsigned int __a, vector unsigned short __b) {
+ return (vector unsigned int)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sll(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sll(vector signed long long __a, vector unsigned char __b) {
+ return (vector signed long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sll(vector signed long long __a, vector unsigned short __b) {
+ return (vector signed long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sll(vector signed long long __a, vector unsigned int __b) {
+ return (vector signed long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sll(vector bool long long __a, vector unsigned char __b) {
+ return (vector bool long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sll(vector bool long long __a, vector unsigned short __b) {
+ return (vector bool long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sll(vector bool long long __a, vector unsigned int __b) {
+ return (vector bool long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sll(vector unsigned long long __a, vector unsigned char __b) {
+ return (vector unsigned long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sll(vector unsigned long long __a, vector unsigned short __b) {
+ return (vector unsigned long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sll(vector unsigned long long __a, vector unsigned int __b) {
+ return (vector unsigned long long)__builtin_s390_vsl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+/*-- vec_slb ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_slb(vector signed char __a, vector signed char __b) {
+ return (vector signed char)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_slb(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_s390_vslb(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_slb(vector unsigned char __a, vector signed char __b) {
+ return __builtin_s390_vslb(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_slb(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vslb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_slb(vector signed short __a, vector signed short __b) {
+ return (vector signed short)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_slb(vector signed short __a, vector unsigned short __b) {
+ return (vector signed short)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_slb(vector unsigned short __a, vector signed short __b) {
+ return (vector unsigned short)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_slb(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_slb(vector signed int __a, vector signed int __b) {
+ return (vector signed int)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_slb(vector signed int __a, vector unsigned int __b) {
+ return (vector signed int)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_slb(vector unsigned int __a, vector signed int __b) {
+ return (vector unsigned int)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_slb(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_slb(vector signed long long __a, vector signed long long __b) {
+ return (vector signed long long)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_slb(vector signed long long __a, vector unsigned long long __b) {
+ return (vector signed long long)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_slb(vector unsigned long long __a, vector signed long long __b) {
+ return (vector unsigned long long)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_slb(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_slb(vector double __a, vector signed long long __b) {
+ return (vector double)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_slb(vector double __a, vector unsigned long long __b) {
+ return (vector double)__builtin_s390_vslb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+/*-- vec_sld ----------------------------------------------------------------*/
+
+extern __ATTRS_o vector signed char
+vec_sld(vector signed char __a, vector signed char __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector unsigned char
+vec_sld(vector unsigned char __a, vector unsigned char __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector signed short
+vec_sld(vector signed short __a, vector signed short __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector unsigned short
+vec_sld(vector unsigned short __a, vector unsigned short __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector signed int
+vec_sld(vector signed int __a, vector signed int __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector unsigned int
+vec_sld(vector unsigned int __a, vector unsigned int __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector signed long long
+vec_sld(vector signed long long __a, vector signed long long __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector unsigned long long
+vec_sld(vector unsigned long long __a, vector unsigned long long __b, int __c)
+ __constant_range(__c, 0, 15);
+
+extern __ATTRS_o vector double
+vec_sld(vector double __a, vector double __b, int __c)
+ __constant_range(__c, 0, 15);
+
+#define vec_sld(X, Y, Z) ((__typeof__((vec_sld)((X), (Y), (Z)))) \
+ __builtin_s390_vsldb((vector unsigned char)(X), \
+ (vector unsigned char)(Y), (Z)))
+
+/*-- vec_sldw ---------------------------------------------------------------*/
+
+extern __ATTRS_o vector signed char
+vec_sldw(vector signed char __a, vector signed char __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector unsigned char
+vec_sldw(vector unsigned char __a, vector unsigned char __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector signed short
+vec_sldw(vector signed short __a, vector signed short __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector unsigned short
+vec_sldw(vector unsigned short __a, vector unsigned short __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector signed int
+vec_sldw(vector signed int __a, vector signed int __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector unsigned int
+vec_sldw(vector unsigned int __a, vector unsigned int __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector signed long long
+vec_sldw(vector signed long long __a, vector signed long long __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector unsigned long long
+vec_sldw(vector unsigned long long __a, vector unsigned long long __b, int __c)
+ __constant_range(__c, 0, 3);
+
+extern __ATTRS_o vector double
+vec_sldw(vector double __a, vector double __b, int __c)
+ __constant_range(__c, 0, 3);
+
+#define vec_sldw(X, Y, Z) ((__typeof__((vec_sldw)((X), (Y), (Z)))) \
+ __builtin_s390_vsldb((vector unsigned char)(X), \
+ (vector unsigned char)(Y), (Z) * 4))
+
+/*-- vec_sral ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_sral(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_sral(vector signed char __a, vector unsigned short __b) {
+ return (vector signed char)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_sral(vector signed char __a, vector unsigned int __b) {
+ return (vector signed char)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sral(vector bool char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sral(vector bool char __a, vector unsigned short __b) {
+ return (vector bool char)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_sral(vector bool char __a, vector unsigned int __b) {
+ return (vector bool char)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sral(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vsra(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sral(vector unsigned char __a, vector unsigned short __b) {
+ return __builtin_s390_vsra(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sral(vector unsigned char __a, vector unsigned int __b) {
+ return __builtin_s390_vsra(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sral(vector signed short __a, vector unsigned char __b) {
+ return (vector signed short)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sral(vector signed short __a, vector unsigned short __b) {
+ return (vector signed short)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_sral(vector signed short __a, vector unsigned int __b) {
+ return (vector signed short)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sral(vector bool short __a, vector unsigned char __b) {
+ return (vector bool short)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sral(vector bool short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_sral(vector bool short __a, vector unsigned int __b) {
+ return (vector bool short)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sral(vector unsigned short __a, vector unsigned char __b) {
+ return (vector unsigned short)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sral(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_sral(vector unsigned short __a, vector unsigned int __b) {
+ return (vector unsigned short)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sral(vector signed int __a, vector unsigned char __b) {
+ return (vector signed int)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sral(vector signed int __a, vector unsigned short __b) {
+ return (vector signed int)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_sral(vector signed int __a, vector unsigned int __b) {
+ return (vector signed int)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sral(vector bool int __a, vector unsigned char __b) {
+ return (vector bool int)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sral(vector bool int __a, vector unsigned short __b) {
+ return (vector bool int)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_sral(vector bool int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sral(vector unsigned int __a, vector unsigned char __b) {
+ return (vector unsigned int)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sral(vector unsigned int __a, vector unsigned short __b) {
+ return (vector unsigned int)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sral(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sral(vector signed long long __a, vector unsigned char __b) {
+ return (vector signed long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sral(vector signed long long __a, vector unsigned short __b) {
+ return (vector signed long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_sral(vector signed long long __a, vector unsigned int __b) {
+ return (vector signed long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sral(vector bool long long __a, vector unsigned char __b) {
+ return (vector bool long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sral(vector bool long long __a, vector unsigned short __b) {
+ return (vector bool long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_sral(vector bool long long __a, vector unsigned int __b) {
+ return (vector bool long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sral(vector unsigned long long __a, vector unsigned char __b) {
+ return (vector unsigned long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sral(vector unsigned long long __a, vector unsigned short __b) {
+ return (vector unsigned long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sral(vector unsigned long long __a, vector unsigned int __b) {
+ return (vector unsigned long long)__builtin_s390_vsra(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+/*-- vec_srab ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_srab(vector signed char __a, vector signed char __b) {
+ return (vector signed char)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_srab(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_s390_vsrab(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_srab(vector unsigned char __a, vector signed char __b) {
+ return __builtin_s390_vsrab(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_srab(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vsrab(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_srab(vector signed short __a, vector signed short __b) {
+ return (vector signed short)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_srab(vector signed short __a, vector unsigned short __b) {
+ return (vector signed short)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_srab(vector unsigned short __a, vector signed short __b) {
+ return (vector unsigned short)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_srab(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_srab(vector signed int __a, vector signed int __b) {
+ return (vector signed int)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_srab(vector signed int __a, vector unsigned int __b) {
+ return (vector signed int)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_srab(vector unsigned int __a, vector signed int __b) {
+ return (vector unsigned int)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_srab(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_srab(vector signed long long __a, vector signed long long __b) {
+ return (vector signed long long)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_srab(vector signed long long __a, vector unsigned long long __b) {
+ return (vector signed long long)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_srab(vector unsigned long long __a, vector signed long long __b) {
+ return (vector unsigned long long)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_srab(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_srab(vector double __a, vector signed long long __b) {
+ return (vector double)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_srab(vector double __a, vector unsigned long long __b) {
+ return (vector double)__builtin_s390_vsrab(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+/*-- vec_srl ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_srl(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_srl(vector signed char __a, vector unsigned short __b) {
+ return (vector signed char)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_srl(vector signed char __a, vector unsigned int __b) {
+ return (vector signed char)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_srl(vector bool char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_srl(vector bool char __a, vector unsigned short __b) {
+ return (vector bool char)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_srl(vector bool char __a, vector unsigned int __b) {
+ return (vector bool char)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_srl(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vsrl(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_srl(vector unsigned char __a, vector unsigned short __b) {
+ return __builtin_s390_vsrl(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_srl(vector unsigned char __a, vector unsigned int __b) {
+ return __builtin_s390_vsrl(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_srl(vector signed short __a, vector unsigned char __b) {
+ return (vector signed short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_srl(vector signed short __a, vector unsigned short __b) {
+ return (vector signed short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_srl(vector signed short __a, vector unsigned int __b) {
+ return (vector signed short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_srl(vector bool short __a, vector unsigned char __b) {
+ return (vector bool short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_srl(vector bool short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_srl(vector bool short __a, vector unsigned int __b) {
+ return (vector bool short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_srl(vector unsigned short __a, vector unsigned char __b) {
+ return (vector unsigned short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_srl(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_srl(vector unsigned short __a, vector unsigned int __b) {
+ return (vector unsigned short)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_srl(vector signed int __a, vector unsigned char __b) {
+ return (vector signed int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_srl(vector signed int __a, vector unsigned short __b) {
+ return (vector signed int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_srl(vector signed int __a, vector unsigned int __b) {
+ return (vector signed int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_srl(vector bool int __a, vector unsigned char __b) {
+ return (vector bool int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_srl(vector bool int __a, vector unsigned short __b) {
+ return (vector bool int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_srl(vector bool int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_srl(vector unsigned int __a, vector unsigned char __b) {
+ return (vector unsigned int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_srl(vector unsigned int __a, vector unsigned short __b) {
+ return (vector unsigned int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_srl(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_srl(vector signed long long __a, vector unsigned char __b) {
+ return (vector signed long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_srl(vector signed long long __a, vector unsigned short __b) {
+ return (vector signed long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_srl(vector signed long long __a, vector unsigned int __b) {
+ return (vector signed long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_srl(vector bool long long __a, vector unsigned char __b) {
+ return (vector bool long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_srl(vector bool long long __a, vector unsigned short __b) {
+ return (vector bool long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_srl(vector bool long long __a, vector unsigned int __b) {
+ return (vector bool long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_srl(vector unsigned long long __a, vector unsigned char __b) {
+ return (vector unsigned long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_srl(vector unsigned long long __a, vector unsigned short __b) {
+ return (vector unsigned long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_srl(vector unsigned long long __a, vector unsigned int __b) {
+ return (vector unsigned long long)__builtin_s390_vsrl(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+/*-- vec_srb ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_srb(vector signed char __a, vector signed char __b) {
+ return (vector signed char)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_srb(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_srb(vector unsigned char __a, vector signed char __b) {
+ return __builtin_s390_vsrlb(__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_srb(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vsrlb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_srb(vector signed short __a, vector signed short __b) {
+ return (vector signed short)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_srb(vector signed short __a, vector unsigned short __b) {
+ return (vector signed short)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_srb(vector unsigned short __a, vector signed short __b) {
+ return (vector unsigned short)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_srb(vector unsigned short __a, vector unsigned short __b) {
+ return (vector unsigned short)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_srb(vector signed int __a, vector signed int __b) {
+ return (vector signed int)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_srb(vector signed int __a, vector unsigned int __b) {
+ return (vector signed int)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_srb(vector unsigned int __a, vector signed int __b) {
+ return (vector unsigned int)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_srb(vector unsigned int __a, vector unsigned int __b) {
+ return (vector unsigned int)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_srb(vector signed long long __a, vector signed long long __b) {
+ return (vector signed long long)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_srb(vector signed long long __a, vector unsigned long long __b) {
+ return (vector signed long long)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_srb(vector unsigned long long __a, vector signed long long __b) {
+ return (vector unsigned long long)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_srb(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_srb(vector double __a, vector signed long long __b) {
+ return (vector double)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector double
+vec_srb(vector double __a, vector unsigned long long __b) {
+ return (vector double)__builtin_s390_vsrlb(
+ (vector unsigned char)__a, (vector unsigned char)__b);
+}
+
+/*-- vec_abs ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_abs(vector signed char __a) {
+ return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed char)0));
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_abs(vector signed short __a) {
+ return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed short)0));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_abs(vector signed int __a) {
+ return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed int)0));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_abs(vector signed long long __a) {
+ return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed long long)0));
+}
+
+static inline __ATTRS_o_ai vector double
+vec_abs(vector double __a) {
+ return __builtin_s390_vflpdb(__a);
+}
+
+/*-- vec_nabs ---------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_nabs(vector double __a) {
+ return __builtin_s390_vflndb(__a);
+}
+
+/*-- vec_max ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_max(vector signed char __a, vector signed char __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_max(vector signed char __a, vector bool char __b) {
+ vector signed char __bc = (vector signed char)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_max(vector bool char __a, vector signed char __b) {
+ vector signed char __ac = (vector signed char)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_max(vector unsigned char __a, vector unsigned char __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_max(vector unsigned char __a, vector bool char __b) {
+ vector unsigned char __bc = (vector unsigned char)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_max(vector bool char __a, vector unsigned char __b) {
+ vector unsigned char __ac = (vector unsigned char)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_max(vector signed short __a, vector signed short __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_max(vector signed short __a, vector bool short __b) {
+ vector signed short __bc = (vector signed short)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_max(vector bool short __a, vector signed short __b) {
+ vector signed short __ac = (vector signed short)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_max(vector unsigned short __a, vector unsigned short __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_max(vector unsigned short __a, vector bool short __b) {
+ vector unsigned short __bc = (vector unsigned short)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_max(vector bool short __a, vector unsigned short __b) {
+ vector unsigned short __ac = (vector unsigned short)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_max(vector signed int __a, vector signed int __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_max(vector signed int __a, vector bool int __b) {
+ vector signed int __bc = (vector signed int)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_max(vector bool int __a, vector signed int __b) {
+ vector signed int __ac = (vector signed int)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_max(vector unsigned int __a, vector unsigned int __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_max(vector unsigned int __a, vector bool int __b) {
+ vector unsigned int __bc = (vector unsigned int)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_max(vector bool int __a, vector unsigned int __b) {
+ vector unsigned int __ac = (vector unsigned int)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_max(vector signed long long __a, vector signed long long __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_max(vector signed long long __a, vector bool long long __b) {
+ vector signed long long __bc = (vector signed long long)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_max(vector bool long long __a, vector signed long long __b) {
+ vector signed long long __ac = (vector signed long long)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_max(vector unsigned long long __a, vector unsigned long long __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_max(vector unsigned long long __a, vector bool long long __b) {
+ vector unsigned long long __bc = (vector unsigned long long)__b;
+ return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_max(vector bool long long __a, vector unsigned long long __b) {
+ vector unsigned long long __ac = (vector unsigned long long)__a;
+ return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector double
+vec_max(vector double __a, vector double __b) {
+ return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+/*-- vec_min ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_min(vector signed char __a, vector signed char __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_min(vector signed char __a, vector bool char __b) {
+ vector signed char __bc = (vector signed char)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_min(vector bool char __a, vector signed char __b) {
+ vector signed char __ac = (vector signed char)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_min(vector unsigned char __a, vector unsigned char __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_min(vector unsigned char __a, vector bool char __b) {
+ vector unsigned char __bc = (vector unsigned char)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_min(vector bool char __a, vector unsigned char __b) {
+ vector unsigned char __ac = (vector unsigned char)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_min(vector signed short __a, vector signed short __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_min(vector signed short __a, vector bool short __b) {
+ vector signed short __bc = (vector signed short)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_min(vector bool short __a, vector signed short __b) {
+ vector signed short __ac = (vector signed short)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_min(vector unsigned short __a, vector unsigned short __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_min(vector unsigned short __a, vector bool short __b) {
+ vector unsigned short __bc = (vector unsigned short)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_min(vector bool short __a, vector unsigned short __b) {
+ vector unsigned short __ac = (vector unsigned short)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_min(vector signed int __a, vector signed int __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_min(vector signed int __a, vector bool int __b) {
+ vector signed int __bc = (vector signed int)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_min(vector bool int __a, vector signed int __b) {
+ vector signed int __ac = (vector signed int)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_min(vector unsigned int __a, vector unsigned int __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_min(vector unsigned int __a, vector bool int __b) {
+ vector unsigned int __bc = (vector unsigned int)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_min(vector bool int __a, vector unsigned int __b) {
+ vector unsigned int __ac = (vector unsigned int)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_min(vector signed long long __a, vector signed long long __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_min(vector signed long long __a, vector bool long long __b) {
+ vector signed long long __bc = (vector signed long long)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_min(vector bool long long __a, vector signed long long __b) {
+ vector signed long long __ac = (vector signed long long)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_min(vector unsigned long long __a, vector unsigned long long __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_min(vector unsigned long long __a, vector bool long long __b) {
+ vector unsigned long long __bc = (vector unsigned long long)__b;
+ return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_min(vector bool long long __a, vector unsigned long long __b) {
+ vector unsigned long long __ac = (vector unsigned long long)__a;
+ return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai vector double
+vec_min(vector double __a, vector double __b) {
+ return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+/*-- vec_add_u128 -----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_add_u128(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vaq(__a, __b);
+}
+
+/*-- vec_addc ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_addc(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vaccb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_addc(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vacch(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_addc(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vaccf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_addc(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vaccg(__a, __b);
+}
+
+/*-- vec_addc_u128 ----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_addc_u128(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vaccq(__a, __b);
+}
+
+/*-- vec_adde_u128 ----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_adde_u128(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vacq(__a, __b, __c);
+}
+
+/*-- vec_addec_u128 ---------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_addec_u128(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vacccq(__a, __b, __c);
+}
+
+/*-- vec_avg ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_avg(vector signed char __a, vector signed char __b) {
+ return __builtin_s390_vavgb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_avg(vector signed short __a, vector signed short __b) {
+ return __builtin_s390_vavgh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_avg(vector signed int __a, vector signed int __b) {
+ return __builtin_s390_vavgf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_avg(vector signed long long __a, vector signed long long __b) {
+ return __builtin_s390_vavgg(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_avg(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vavglb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_avg(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vavglh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_avg(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vavglf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_avg(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vavglg(__a, __b);
+}
+
+/*-- vec_checksum -----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned int
+vec_checksum(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vcksm(__a, __b);
+}
+
+/*-- vec_gfmsum -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_gfmsum(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vgfmb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_gfmsum(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vgfmh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_gfmsum(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vgfmf(__a, __b);
+}
+
+/*-- vec_gfmsum_128 ---------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_gfmsum_128(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vgfmg(__a, __b);
+}
+
+/*-- vec_gfmsum_accum -------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_gfmsum_accum(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vgfmab(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_gfmsum_accum(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vgfmah(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_gfmsum_accum(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned long long __c) {
+ return __builtin_s390_vgfmaf(__a, __b, __c);
+}
+
+/*-- vec_gfmsum_accum_128 ---------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_gfmsum_accum_128(vector unsigned long long __a,
+ vector unsigned long long __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vgfmag(__a, __b, __c);
+}
+
+/*-- vec_mladd --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_mladd(vector signed char __a, vector signed char __b,
+ vector signed char __c) {
+ return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_mladd(vector unsigned char __a, vector signed char __b,
+ vector signed char __c) {
+ return (vector signed char)__a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed char
+vec_mladd(vector signed char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __a * (vector signed char)__b + (vector signed char)__c;
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_mladd(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_mladd(vector signed short __a, vector signed short __b,
+ vector signed short __c) {
+ return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_mladd(vector unsigned short __a, vector signed short __b,
+ vector signed short __c) {
+ return (vector signed short)__a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_mladd(vector signed short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __a * (vector signed short)__b + (vector signed short)__c;
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_mladd(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mladd(vector signed int __a, vector signed int __b,
+ vector signed int __c) {
+ return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mladd(vector unsigned int __a, vector signed int __b,
+ vector signed int __c) {
+ return (vector signed int)__a * __b + __c;
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mladd(vector signed int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __a * (vector signed int)__b + (vector signed int)__c;
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_mladd(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __a * __b + __c;
+}
+
+/*-- vec_mhadd --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_mhadd(vector signed char __a, vector signed char __b,
+ vector signed char __c) {
+ return __builtin_s390_vmahb(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_mhadd(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vmalhb(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_mhadd(vector signed short __a, vector signed short __b,
+ vector signed short __c) {
+ return __builtin_s390_vmahh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_mhadd(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vmalhh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mhadd(vector signed int __a, vector signed int __b,
+ vector signed int __c) {
+ return __builtin_s390_vmahf(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_mhadd(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vmalhf(__a, __b, __c);
+}
+
+/*-- vec_meadd --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed short
+vec_meadd(vector signed char __a, vector signed char __b,
+ vector signed short __c) {
+ return __builtin_s390_vmaeb(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_meadd(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vmaleb(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_meadd(vector signed short __a, vector signed short __b,
+ vector signed int __c) {
+ return __builtin_s390_vmaeh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_meadd(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vmaleh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_meadd(vector signed int __a, vector signed int __b,
+ vector signed long long __c) {
+ return __builtin_s390_vmaef(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_meadd(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned long long __c) {
+ return __builtin_s390_vmalef(__a, __b, __c);
+}
+
+/*-- vec_moadd --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed short
+vec_moadd(vector signed char __a, vector signed char __b,
+ vector signed short __c) {
+ return __builtin_s390_vmaob(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_moadd(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vmalob(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_moadd(vector signed short __a, vector signed short __b,
+ vector signed int __c) {
+ return __builtin_s390_vmaoh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_moadd(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vmaloh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_moadd(vector signed int __a, vector signed int __b,
+ vector signed long long __c) {
+ return __builtin_s390_vmaof(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_moadd(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned long long __c) {
+ return __builtin_s390_vmalof(__a, __b, __c);
+}
+
+/*-- vec_mulh ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_mulh(vector signed char __a, vector signed char __b) {
+ return __builtin_s390_vmhb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_mulh(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vmlhb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_mulh(vector signed short __a, vector signed short __b) {
+ return __builtin_s390_vmhh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_mulh(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vmlhh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mulh(vector signed int __a, vector signed int __b) {
+ return __builtin_s390_vmhf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_mulh(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vmlhf(__a, __b);
+}
+
+/*-- vec_mule ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed short
+vec_mule(vector signed char __a, vector signed char __b) {
+ return __builtin_s390_vmeb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_mule(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vmleb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mule(vector signed short __a, vector signed short __b) {
+ return __builtin_s390_vmeh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_mule(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vmleh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_mule(vector signed int __a, vector signed int __b) {
+ return __builtin_s390_vmef(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_mule(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vmlef(__a, __b);
+}
+
+/*-- vec_mulo ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed short
+vec_mulo(vector signed char __a, vector signed char __b) {
+ return __builtin_s390_vmob(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_mulo(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vmlob(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_mulo(vector signed short __a, vector signed short __b) {
+ return __builtin_s390_vmoh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_mulo(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vmloh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_mulo(vector signed int __a, vector signed int __b) {
+ return __builtin_s390_vmof(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_mulo(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vmlof(__a, __b);
+}
+
+/*-- vec_sub_u128 -----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_sub_u128(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vsq(__a, __b);
+}
+
+/*-- vec_subc ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_subc(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vscbib(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_subc(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vscbih(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_subc(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vscbif(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_subc(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vscbig(__a, __b);
+}
+
+/*-- vec_subc_u128 ----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_subc_u128(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vscbiq(__a, __b);
+}
+
+/*-- vec_sube_u128 ----------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_sube_u128(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vsbiq(__a, __b, __c);
+}
+
+/*-- vec_subec_u128 ---------------------------------------------------------*/
+
+static inline __ATTRS_ai vector unsigned char
+vec_subec_u128(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vsbcbiq(__a, __b, __c);
+}
+
+/*-- vec_sum2 ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sum2(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vsumgh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_sum2(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vsumgf(__a, __b);
+}
+
+/*-- vec_sum_u128 -----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sum_u128(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vsumqf(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_sum_u128(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vsumqg(__a, __b);
+}
+
+/*-- vec_sum4 ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sum4(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vsumb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_sum4(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vsumh(__a, __b);
+}
+
+/*-- vec_test_mask ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector signed char __a, vector unsigned char __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vtm(__a, __b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector signed short __a, vector unsigned short __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector signed int __a, vector unsigned int __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector signed long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(vector double __a, vector unsigned long long __b) {
+ return __builtin_s390_vtm((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+/*-- vec_madd ---------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_madd(vector double __a, vector double __b, vector double __c) {
+ return __builtin_s390_vfmadb(__a, __b, __c);
+}
+
+/*-- vec_msub ---------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_msub(vector double __a, vector double __b, vector double __c) {
+ return __builtin_s390_vfmsdb(__a, __b, __c);
+}
+
+/*-- vec_sqrt ---------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_sqrt(vector double __a) {
+ return __builtin_s390_vfsqdb(__a);
+}
+
+/*-- vec_ld2f ---------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_ld2f(const float *__ptr) {
+ typedef float __v2f32 __attribute__((__vector_size__(8)));
+ return __builtin_convertvector(*(const __v2f32 *)__ptr, vector double);
+}
+
+/*-- vec_st2f ---------------------------------------------------------------*/
+
+static inline __ATTRS_ai void
+vec_st2f(vector double __a, float *__ptr) {
+ typedef float __v2f32 __attribute__((__vector_size__(8)));
+ *(__v2f32 *)__ptr = __builtin_convertvector(__a, __v2f32);
+}
+
+/*-- vec_ctd ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector double
+vec_ctd(vector signed long long __a, int __b)
+ __constant_range(__b, 0, 31) {
+ vector double __conv = __builtin_convertvector(__a, vector double);
+ __conv *= (vector double)(vector unsigned long long)((0x3ffULL - __b) << 52);
+ return __conv;
+}
+
+static inline __ATTRS_o_ai vector double
+vec_ctd(vector unsigned long long __a, int __b)
+ __constant_range(__b, 0, 31) {
+ vector double __conv = __builtin_convertvector(__a, vector double);
+ __conv *= (vector double)(vector unsigned long long)((0x3ffULL - __b) << 52);
+ return __conv;
+}
+
+/*-- vec_ctsl ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed long long
+vec_ctsl(vector double __a, int __b)
+ __constant_range(__b, 0, 31) {
+ __a *= (vector double)(vector unsigned long long)((0x3ffULL + __b) << 52);
+ return __builtin_convertvector(__a, vector signed long long);
+}
+
+/*-- vec_ctul ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_ctul(vector double __a, int __b)
+ __constant_range(__b, 0, 31) {
+ __a *= (vector double)(vector unsigned long long)((0x3ffULL + __b) << 52);
+ return __builtin_convertvector(__a, vector unsigned long long);
+}
+
+/*-- vec_roundp -------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_roundp(vector double __a) {
+ return __builtin_s390_vfidb(__a, 4, 6);
+}
+
+/*-- vec_ceil ---------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_ceil(vector double __a) {
+ // On this platform, vec_ceil never triggers the IEEE-inexact exception.
+ return __builtin_s390_vfidb(__a, 4, 6);
+}
+
+/*-- vec_roundm -------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_roundm(vector double __a) {
+ return __builtin_s390_vfidb(__a, 4, 7);
+}
+
+/*-- vec_floor --------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_floor(vector double __a) {
+ // On this platform, vec_floor never triggers the IEEE-inexact exception.
+ return __builtin_s390_vfidb(__a, 4, 7);
+}
+
+/*-- vec_roundz -------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_roundz(vector double __a) {
+ return __builtin_s390_vfidb(__a, 4, 5);
+}
+
+/*-- vec_trunc --------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_trunc(vector double __a) {
+ // On this platform, vec_trunc never triggers the IEEE-inexact exception.
+ return __builtin_s390_vfidb(__a, 4, 5);
+}
+
+/*-- vec_roundc -------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_roundc(vector double __a) {
+ return __builtin_s390_vfidb(__a, 4, 0);
+}
+
+/*-- vec_round --------------------------------------------------------------*/
+
+static inline __ATTRS_ai vector double
+vec_round(vector double __a) {
+ return __builtin_s390_vfidb(__a, 4, 4);
+}
+
+/*-- vec_fp_test_data_class -------------------------------------------------*/
+
+#define vec_fp_test_data_class(X, Y, Z) \
+ ((vector bool long long)__builtin_s390_vftcidb((X), (Y), (Z)))
+
+/*-- vec_cp_until_zero ------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cp_until_zero(vector signed char __a) {
+ return (vector signed char)__builtin_s390_vistrb((vector unsigned char)__a);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cp_until_zero(vector bool char __a) {
+ return (vector bool char)__builtin_s390_vistrb((vector unsigned char)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cp_until_zero(vector unsigned char __a) {
+ return __builtin_s390_vistrb(__a);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cp_until_zero(vector signed short __a) {
+ return (vector signed short)__builtin_s390_vistrh((vector unsigned short)__a);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cp_until_zero(vector bool short __a) {
+ return (vector bool short)__builtin_s390_vistrh((vector unsigned short)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cp_until_zero(vector unsigned short __a) {
+ return __builtin_s390_vistrh(__a);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cp_until_zero(vector signed int __a) {
+ return (vector signed int)__builtin_s390_vistrf((vector unsigned int)__a);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cp_until_zero(vector bool int __a) {
+ return (vector bool int)__builtin_s390_vistrf((vector unsigned int)__a);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cp_until_zero(vector unsigned int __a) {
+ return __builtin_s390_vistrf(__a);
+}
+
+/*-- vec_cp_until_zero_cc ---------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cp_until_zero_cc(vector signed char __a, int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vistrbs((vector unsigned char)__a, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_cp_until_zero_cc(vector bool char __a, int *__cc) {
+ return (vector bool char)
+ __builtin_s390_vistrbs((vector unsigned char)__a, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cp_until_zero_cc(vector unsigned char __a, int *__cc) {
+ return __builtin_s390_vistrbs(__a, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cp_until_zero_cc(vector signed short __a, int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vistrhs((vector unsigned short)__a, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cp_until_zero_cc(vector bool short __a, int *__cc) {
+ return (vector bool short)
+ __builtin_s390_vistrhs((vector unsigned short)__a, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cp_until_zero_cc(vector unsigned short __a, int *__cc) {
+ return __builtin_s390_vistrhs(__a, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cp_until_zero_cc(vector signed int __a, int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vistrfs((vector unsigned int)__a, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cp_until_zero_cc(vector bool int __a, int *__cc) {
+ return (vector bool int)__builtin_s390_vistrfs((vector unsigned int)__a,
+ __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cp_until_zero_cc(vector unsigned int __a, int *__cc) {
+ return __builtin_s390_vistrfs(__a, __cc);
+}
+
+/*-- vec_cmpeq_idx ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpeq_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfeeb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfeeb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfeeb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpeq_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfeeh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfeeh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfeeh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpeq_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfeef((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfeef((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfeef(__a, __b);
+}
+
+/*-- vec_cmpeq_idx_cc -------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpeq_idx_cc(vector signed char __a, vector signed char __b, int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfeebs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return __builtin_s390_vfeebs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfeebs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpeq_idx_cc(vector signed short __a, vector signed short __b, int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfeehs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
+ return __builtin_s390_vfeehs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return __builtin_s390_vfeehs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpeq_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfeefs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return __builtin_s390_vfeefs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_idx_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
+ return __builtin_s390_vfeefs(__a, __b, __cc);
+}
+
+/*-- vec_cmpeq_or_0_idx -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpeq_or_0_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfeezb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_or_0_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfeezb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfeezb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpeq_or_0_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfeezh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_or_0_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfeezh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfeezh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpeq_or_0_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfeezf((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_or_0_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfeezf((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfeezf(__a, __b);
+}
+
+/*-- vec_cmpeq_or_0_idx_cc --------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpeq_or_0_idx_cc(vector signed char __a, vector signed char __b,
+ int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfeezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_or_0_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return __builtin_s390_vfeezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpeq_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfeezbs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpeq_or_0_idx_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfeezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_or_0_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
+ return __builtin_s390_vfeezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpeq_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return __builtin_s390_vfeezhs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpeq_or_0_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfeezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_or_0_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return __builtin_s390_vfeezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpeq_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return __builtin_s390_vfeezfs(__a, __b, __cc);
+}
+
+/*-- vec_cmpne_idx ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpne_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfeneb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfeneb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfeneb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpne_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfeneh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfeneh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfeneh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpne_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfenef((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfenef((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfenef(__a, __b);
+}
+
+/*-- vec_cmpne_idx_cc -------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpne_idx_cc(vector signed char __a, vector signed char __b, int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfenebs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return __builtin_s390_vfenebs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfenebs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpne_idx_cc(vector signed short __a, vector signed short __b, int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfenehs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
+ return __builtin_s390_vfenehs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return __builtin_s390_vfenehs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpne_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfenefs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return __builtin_s390_vfenefs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_idx_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
+ return __builtin_s390_vfenefs(__a, __b, __cc);
+}
+
+/*-- vec_cmpne_or_0_idx -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpne_or_0_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfenezb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_or_0_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfenezb((vector unsigned char)__a,
+ (vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfenezb(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpne_or_0_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfenezh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_or_0_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfenezh((vector unsigned short)__a,
+ (vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfenezh(__a, __b);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpne_or_0_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfenezf((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_or_0_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfenezf((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfenezf(__a, __b);
+}
+
+/*-- vec_cmpne_or_0_idx_cc --------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_cmpne_or_0_idx_cc(vector signed char __a, vector signed char __b,
+ int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfenezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_or_0_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return __builtin_s390_vfenezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpne_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfenezbs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_cmpne_or_0_idx_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfenezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_or_0_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
+ return __builtin_s390_vfenezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpne_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return __builtin_s390_vfenezhs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_cmpne_or_0_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfenezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_or_0_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return __builtin_s390_vfenezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpne_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return __builtin_s390_vfenezfs(__a, __b, __cc);
+}
+
+/*-- vec_cmprg --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmprg(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return (vector bool char)__builtin_s390_vstrcb(__a, __b, __c, 4);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmprg(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return (vector bool short)__builtin_s390_vstrch(__a, __b, __c, 4);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmprg(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return (vector bool int)__builtin_s390_vstrcf(__a, __b, __c, 4);
+}
+
+/*-- vec_cmprg_cc -----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmprg_cc(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c, int *__cc) {
+ return (vector bool char)__builtin_s390_vstrcbs(__a, __b, __c, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmprg_cc(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c, int *__cc) {
+ return (vector bool short)__builtin_s390_vstrchs(__a, __b, __c, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmprg_cc(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c, int *__cc) {
+ return (vector bool int)__builtin_s390_vstrcfs(__a, __b, __c, 4, __cc);
+}
+
+/*-- vec_cmprg_idx ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmprg_idx(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vstrcb(__a, __b, __c, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmprg_idx(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vstrch(__a, __b, __c, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmprg_idx(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vstrcf(__a, __b, __c, 0);
+}
+
+/*-- vec_cmprg_idx_cc -------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmprg_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrcbs(__a, __b, __c, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmprg_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c, int *__cc) {
+ return __builtin_s390_vstrchs(__a, __b, __c, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmprg_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c, int *__cc) {
+ return __builtin_s390_vstrcfs(__a, __b, __c, 0, __cc);
+}
+
+/*-- vec_cmprg_or_0_idx -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmprg_or_0_idx(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vstrczb(__a, __b, __c, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmprg_or_0_idx(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vstrczh(__a, __b, __c, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmprg_or_0_idx(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vstrczf(__a, __b, __c, 0);
+}
+
+/*-- vec_cmprg_or_0_idx_cc --------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmprg_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrczbs(__a, __b, __c, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmprg_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c, int *__cc) {
+ return __builtin_s390_vstrczhs(__a, __b, __c, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmprg_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c, int *__cc) {
+ return __builtin_s390_vstrczfs(__a, __b, __c, 0, __cc);
+}
+
+/*-- vec_cmpnrg -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpnrg(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return (vector bool char)__builtin_s390_vstrcb(__a, __b, __c, 12);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpnrg(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return (vector bool short)__builtin_s390_vstrch(__a, __b, __c, 12);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpnrg(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return (vector bool int)__builtin_s390_vstrcf(__a, __b, __c, 12);
+}
+
+/*-- vec_cmpnrg_cc ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_cmpnrg_cc(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c, int *__cc) {
+ return (vector bool char)__builtin_s390_vstrcbs(__a, __b, __c, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_cmpnrg_cc(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c, int *__cc) {
+ return (vector bool short)__builtin_s390_vstrchs(__a, __b, __c, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_cmpnrg_cc(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c, int *__cc) {
+ return (vector bool int)__builtin_s390_vstrcfs(__a, __b, __c, 12, __cc);
+}
+
+/*-- vec_cmpnrg_idx ---------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpnrg_idx(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vstrcb(__a, __b, __c, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpnrg_idx(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vstrch(__a, __b, __c, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpnrg_idx(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vstrcf(__a, __b, __c, 8);
+}
+
+/*-- vec_cmpnrg_idx_cc ------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpnrg_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrcbs(__a, __b, __c, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpnrg_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c, int *__cc) {
+ return __builtin_s390_vstrchs(__a, __b, __c, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpnrg_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c, int *__cc) {
+ return __builtin_s390_vstrcfs(__a, __b, __c, 8, __cc);
+}
+
+/*-- vec_cmpnrg_or_0_idx ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpnrg_or_0_idx(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_s390_vstrczb(__a, __b, __c, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpnrg_or_0_idx(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __builtin_s390_vstrczh(__a, __b, __c, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpnrg_or_0_idx(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __builtin_s390_vstrczf(__a, __b, __c, 8);
+}
+
+/*-- vec_cmpnrg_or_0_idx_cc -------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_cmpnrg_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrczbs(__a, __b, __c, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_cmpnrg_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c, int *__cc) {
+ return __builtin_s390_vstrczhs(__a, __b, __c, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_cmpnrg_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c, int *__cc) {
+ return __builtin_s390_vstrczfs(__a, __b, __c, 8, __cc);
+}
+
+/*-- vec_find_any_eq --------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_eq(vector signed char __a, vector signed char __b) {
+ return (vector bool char)
+ __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_eq(vector bool char __a, vector bool char __b) {
+ return (vector bool char)
+ __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_eq(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_s390_vfaeb(__a, __b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_eq(vector signed short __a, vector signed short __b) {
+ return (vector bool short)
+ __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_eq(vector bool short __a, vector bool short __b) {
+ return (vector bool short)
+ __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_eq(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_s390_vfaeh(__a, __b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_eq(vector signed int __a, vector signed int __b) {
+ return (vector bool int)
+ __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_eq(vector bool int __a, vector bool int __b) {
+ return (vector bool int)
+ __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 4);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_eq(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_s390_vfaef(__a, __b, 4);
+}
+
+/*-- vec_find_any_eq_cc -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_eq_cc(vector signed char __a, vector signed char __b, int *__cc) {
+ return (vector bool char)
+ __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_eq_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return (vector bool char)
+ __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_eq_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return (vector bool char)__builtin_s390_vfaebs(__a, __b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_eq_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector bool short)
+ __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_eq_cc(vector bool short __a, vector bool short __b, int *__cc) {
+ return (vector bool short)
+ __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_eq_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return (vector bool short)__builtin_s390_vfaehs(__a, __b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_eq_cc(vector signed int __a, vector signed int __b, int *__cc) {
+ return (vector bool int)
+ __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_eq_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return (vector bool int)
+ __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_eq_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return (vector bool int)__builtin_s390_vfaefs(__a, __b, 4, __cc);
+}
+
+/*-- vec_find_any_eq_idx ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_eq_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfaeb(__a, __b, 0);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_eq_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfaeh(__a, __b, 0);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_eq_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfaef(__a, __b, 0);
+}
+
+/*-- vec_find_any_eq_idx_cc -------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_eq_idx_cc(vector signed char __a, vector signed char __b,
+ int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfaebs(__a, __b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_eq_idx_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_idx_cc(vector bool short __a, vector bool short __b,
+ int *__cc) {
+ return __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return __builtin_s390_vfaehs(__a, __b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_eq_idx_cc(vector signed int __a, vector signed int __b,
+ int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return __builtin_s390_vfaefs(__a, __b, 0, __cc);
+}
+
+/*-- vec_find_any_eq_or_0_idx -----------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_eq_or_0_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfaezb((vector unsigned char)__a,
+ (vector unsigned char)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_or_0_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfaezb((vector unsigned char)__a,
+ (vector unsigned char)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfaezb(__a, __b, 0);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_eq_or_0_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfaezh((vector unsigned short)__a,
+ (vector unsigned short)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_or_0_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfaezh((vector unsigned short)__a,
+ (vector unsigned short)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfaezh(__a, __b, 0);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_eq_or_0_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfaezf((vector unsigned int)__a,
+ (vector unsigned int)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_or_0_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfaezf((vector unsigned int)__a,
+ (vector unsigned int)__b, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfaezf(__a, __b, 0);
+}
+
+/*-- vec_find_any_eq_or_0_idx_cc --------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_eq_or_0_idx_cc(vector signed char __a, vector signed char __b,
+ int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfaezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_or_0_idx_cc(vector bool char __a, vector bool char __b,
+ int *__cc) {
+ return __builtin_s390_vfaezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_eq_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfaezbs(__a, __b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_eq_or_0_idx_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfaezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_or_0_idx_cc(vector bool short __a, vector bool short __b,
+ int *__cc) {
+ return __builtin_s390_vfaezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_eq_or_0_idx_cc(vector unsigned short __a,
+ vector unsigned short __b, int *__cc) {
+ return __builtin_s390_vfaezhs(__a, __b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_eq_or_0_idx_cc(vector signed int __a, vector signed int __b,
+ int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfaezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_or_0_idx_cc(vector bool int __a, vector bool int __b,
+ int *__cc) {
+ return __builtin_s390_vfaezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_eq_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return __builtin_s390_vfaezfs(__a, __b, 0, __cc);
+}
+
+/*-- vec_find_any_ne --------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_ne(vector signed char __a, vector signed char __b) {
+ return (vector bool char)
+ __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_ne(vector bool char __a, vector bool char __b) {
+ return (vector bool char)
+ __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_ne(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_s390_vfaeb(__a, __b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_ne(vector signed short __a, vector signed short __b) {
+ return (vector bool short)
+ __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_ne(vector bool short __a, vector bool short __b) {
+ return (vector bool short)
+ __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_ne(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_s390_vfaeh(__a, __b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_ne(vector signed int __a, vector signed int __b) {
+ return (vector bool int)
+ __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_ne(vector bool int __a, vector bool int __b) {
+ return (vector bool int)
+ __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 12);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_ne(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_s390_vfaef(__a, __b, 12);
+}
+
+/*-- vec_find_any_ne_cc -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_ne_cc(vector signed char __a, vector signed char __b, int *__cc) {
+ return (vector bool char)
+ __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_ne_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return (vector bool char)
+ __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_find_any_ne_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return (vector bool char)__builtin_s390_vfaebs(__a, __b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_ne_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector bool short)
+ __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_ne_cc(vector bool short __a, vector bool short __b, int *__cc) {
+ return (vector bool short)
+ __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_find_any_ne_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return (vector bool short)__builtin_s390_vfaehs(__a, __b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_ne_cc(vector signed int __a, vector signed int __b, int *__cc) {
+ return (vector bool int)
+ __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_ne_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return (vector bool int)
+ __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_find_any_ne_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return (vector bool int)__builtin_s390_vfaefs(__a, __b, 12, __cc);
+}
+
+/*-- vec_find_any_ne_idx ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_ne_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfaeb((vector unsigned char)__a,
+ (vector unsigned char)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfaeb(__a, __b, 8);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_ne_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfaeh((vector unsigned short)__a,
+ (vector unsigned short)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfaeh(__a, __b, 8);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_ne_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfaef((vector unsigned int)__a,
+ (vector unsigned int)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfaef(__a, __b, 8);
+}
+
+/*-- vec_find_any_ne_idx_cc -------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_ne_idx_cc(vector signed char __a, vector signed char __b,
+ int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
+ return __builtin_s390_vfaebs((vector unsigned char)__a,
+ (vector unsigned char)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfaebs(__a, __b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_ne_idx_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_idx_cc(vector bool short __a, vector bool short __b,
+ int *__cc) {
+ return __builtin_s390_vfaehs((vector unsigned short)__a,
+ (vector unsigned short)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_idx_cc(vector unsigned short __a, vector unsigned short __b,
+ int *__cc) {
+ return __builtin_s390_vfaehs(__a, __b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_ne_idx_cc(vector signed int __a, vector signed int __b,
+ int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
+ return __builtin_s390_vfaefs((vector unsigned int)__a,
+ (vector unsigned int)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return __builtin_s390_vfaefs(__a, __b, 8, __cc);
+}
+
+/*-- vec_find_any_ne_or_0_idx -----------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_ne_or_0_idx(vector signed char __a, vector signed char __b) {
+ return (vector signed char)
+ __builtin_s390_vfaezb((vector unsigned char)__a,
+ (vector unsigned char)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_or_0_idx(vector bool char __a, vector bool char __b) {
+ return __builtin_s390_vfaezb((vector unsigned char)__a,
+ (vector unsigned char)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_s390_vfaezb(__a, __b, 8);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_ne_or_0_idx(vector signed short __a, vector signed short __b) {
+ return (vector signed short)
+ __builtin_s390_vfaezh((vector unsigned short)__a,
+ (vector unsigned short)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_or_0_idx(vector bool short __a, vector bool short __b) {
+ return __builtin_s390_vfaezh((vector unsigned short)__a,
+ (vector unsigned short)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_s390_vfaezh(__a, __b, 8);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_ne_or_0_idx(vector signed int __a, vector signed int __b) {
+ return (vector signed int)
+ __builtin_s390_vfaezf((vector unsigned int)__a,
+ (vector unsigned int)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_or_0_idx(vector bool int __a, vector bool int __b) {
+ return __builtin_s390_vfaezf((vector unsigned int)__a,
+ (vector unsigned int)__b, 8);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_s390_vfaezf(__a, __b, 8);
+}
+
+/*-- vec_find_any_ne_or_0_idx_cc --------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_find_any_ne_or_0_idx_cc(vector signed char __a, vector signed char __b,
+ int *__cc) {
+ return (vector signed char)
+ __builtin_s390_vfaezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_or_0_idx_cc(vector bool char __a, vector bool char __b,
+ int *__cc) {
+ return __builtin_s390_vfaezbs((vector unsigned char)__a,
+ (vector unsigned char)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_find_any_ne_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
+ int *__cc) {
+ return __builtin_s390_vfaezbs(__a, __b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_find_any_ne_or_0_idx_cc(vector signed short __a, vector signed short __b,
+ int *__cc) {
+ return (vector signed short)
+ __builtin_s390_vfaezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_or_0_idx_cc(vector bool short __a, vector bool short __b,
+ int *__cc) {
+ return __builtin_s390_vfaezhs((vector unsigned short)__a,
+ (vector unsigned short)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_find_any_ne_or_0_idx_cc(vector unsigned short __a,
+ vector unsigned short __b, int *__cc) {
+ return __builtin_s390_vfaezhs(__a, __b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_find_any_ne_or_0_idx_cc(vector signed int __a, vector signed int __b,
+ int *__cc) {
+ return (vector signed int)
+ __builtin_s390_vfaezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_or_0_idx_cc(vector bool int __a, vector bool int __b,
+ int *__cc) {
+ return __builtin_s390_vfaezfs((vector unsigned int)__a,
+ (vector unsigned int)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_find_any_ne_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
+ int *__cc) {
+ return __builtin_s390_vfaezfs(__a, __b, 8, __cc);
+}
+
+#undef __constant_pow2_range
+#undef __constant_range
+#undef __constant
+#undef __ATTRS_o
+#undef __ATTRS_o_ai
+#undef __ATTRS_ai
+
+#else
+
+#error "Use -fzvector to enable vector extensions"
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Headers/wmmintrin.h b/contrib/llvm/tools/clang/lib/Headers/wmmintrin.h
new file mode 100644
index 0000000..a2d9310
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/wmmintrin.h
@@ -0,0 +1,33 @@
+/*===---- wmmintrin.h - AES intrinsics ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _WMMINTRIN_H
+#define _WMMINTRIN_H
+
+#include <emmintrin.h>
+
+#include <__wmmintrin_aes.h>
+
+#include <__wmmintrin_pclmul.h>
+
+#endif /* _WMMINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/x86intrin.h b/contrib/llvm/tools/clang/lib/Headers/x86intrin.h
new file mode 100644
index 0000000..4d8077e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/x86intrin.h
@@ -0,0 +1,57 @@
+/*===---- x86intrin.h - X86 intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#define __X86INTRIN_H
+
+#include <ia32intrin.h>
+
+#include <immintrin.h>
+
+#include <mm3dnow.h>
+
+#include <bmiintrin.h>
+
+#include <bmi2intrin.h>
+
+#include <lzcntintrin.h>
+
+#include <popcntintrin.h>
+
+#include <rdseedintrin.h>
+
+#include <prfchwintrin.h>
+
+#include <ammintrin.h>
+
+#include <fma4intrin.h>
+
+#include <xopintrin.h>
+
+#include <tbmintrin.h>
+
+#include <f16cintrin.h>
+
+/* FIXME: LWP */
+
+#endif /* __X86INTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h b/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h
new file mode 100644
index 0000000..ae0b2cd
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h
@@ -0,0 +1,1010 @@
+/*===---- xmmintrin.h - SSE intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __XMMINTRIN_H
+#define __XMMINTRIN_H
+
+#include <mmintrin.h>
+
+typedef int __v4si __attribute__((__vector_size__(16)));
+typedef float __v4sf __attribute__((__vector_size__(16)));
+typedef float __m128 __attribute__((__vector_size__(16)));
+
+/* This header should only be included in a hosted environment as it depends on
+ * a standard library to provide allocation routines. */
+#if __STDC_HOSTED__
+#include <mm_malloc.h>
+#endif
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse")))
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_add_ss(__m128 __a, __m128 __b)
+{
+ __a[0] += __b[0];
+ return __a;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_add_ps(__m128 __a, __m128 __b)
+{
+ return __a + __b;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_sub_ss(__m128 __a, __m128 __b)
+{
+ __a[0] -= __b[0];
+ return __a;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_sub_ps(__m128 __a, __m128 __b)
+{
+ return __a - __b;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mul_ss(__m128 __a, __m128 __b)
+{
+ __a[0] *= __b[0];
+ return __a;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mul_ps(__m128 __a, __m128 __b)
+{
+ return __a * __b;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_div_ss(__m128 __a, __m128 __b)
+{
+ __a[0] /= __b[0];
+ return __a;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_div_ps(__m128 __a, __m128 __b)
+{
+ return __a / __b;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_sqrt_ss(__m128 __a)
+{
+ __m128 __c = __builtin_ia32_sqrtss(__a);
+ return (__m128) { __c[0], __a[1], __a[2], __a[3] };
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_sqrt_ps(__m128 __a)
+{
+ return __builtin_ia32_sqrtps(__a);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_rcp_ss(__m128 __a)
+{
+ __m128 __c = __builtin_ia32_rcpss(__a);
+ return (__m128) { __c[0], __a[1], __a[2], __a[3] };
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_rcp_ps(__m128 __a)
+{
+ return __builtin_ia32_rcpps(__a);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_rsqrt_ss(__m128 __a)
+{
+ __m128 __c = __builtin_ia32_rsqrtss(__a);
+ return (__m128) { __c[0], __a[1], __a[2], __a[3] };
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_rsqrt_ps(__m128 __a)
+{
+ return __builtin_ia32_rsqrtps(__a);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_min_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_minss(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_min_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_minps(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_max_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_maxss(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_max_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_maxps(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_and_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)((__v4si)__a & (__v4si)__b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_andnot_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)(~(__v4si)__a & (__v4si)__b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_or_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)((__v4si)__a | (__v4si)__b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_xor_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)((__v4si)__a ^ (__v4si)__b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpeq_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpeqss(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpeq_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpeqps(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmplt_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpltss(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmplt_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpltps(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmple_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpless(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmple_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpleps(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpgt_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_shufflevector(__a,
+ __builtin_ia32_cmpltss(__b, __a),
+ 4, 1, 2, 3);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpgt_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpltps(__b, __a);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpge_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_shufflevector(__a,
+ __builtin_ia32_cmpless(__b, __a),
+ 4, 1, 2, 3);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpge_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpleps(__b, __a);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpneq_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpneqss(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpneq_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpneqps(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpnlt_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpnltss(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpnlt_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpnltps(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpnle_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpnless(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpnle_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpnleps(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpngt_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_shufflevector(__a,
+ __builtin_ia32_cmpnltss(__b, __a),
+ 4, 1, 2, 3);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpngt_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpnltps(__b, __a);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpnge_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_shufflevector(__a,
+ __builtin_ia32_cmpnless(__b, __a),
+ 4, 1, 2, 3);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpnge_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpnleps(__b, __a);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpord_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpordss(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpord_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpordps(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpunord_ss(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpunordss(__a, __b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cmpunord_ps(__m128 __a, __m128 __b)
+{
+ return (__m128)__builtin_ia32_cmpunordps(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comieq_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_comieq(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comilt_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_comilt(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comile_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_comile(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comigt_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_comigt(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comige_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_comige(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_comineq_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_comineq(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomieq_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_ucomieq(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomilt_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_ucomilt(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomile_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_ucomile(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomigt_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_ucomigt(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomige_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_ucomige(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_ucomineq_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_ia32_ucomineq(__a, __b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_cvtss_si32(__m128 __a)
+{
+ return __builtin_ia32_cvtss2si(__a);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_cvt_ss2si(__m128 __a)
+{
+ return _mm_cvtss_si32(__a);
+}
+
+#ifdef __x86_64__
+
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm_cvtss_si64(__m128 __a)
+{
+ return __builtin_ia32_cvtss2si64(__a);
+}
+
+#endif
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvtps_pi32(__m128 __a)
+{
+ return (__m64)__builtin_ia32_cvtps2pi(__a);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvt_ps2pi(__m128 __a)
+{
+ return _mm_cvtps_pi32(__a);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_cvttss_si32(__m128 __a)
+{
+ return __a[0];
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_cvtt_ss2si(__m128 __a)
+{
+ return _mm_cvttss_si32(__a);
+}
+
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm_cvttss_si64(__m128 __a)
+{
+ return __a[0];
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvttps_pi32(__m128 __a)
+{
+ return (__m64)__builtin_ia32_cvttps2pi(__a);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvtt_ps2pi(__m128 __a)
+{
+ return _mm_cvttps_pi32(__a);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtsi32_ss(__m128 __a, int __b)
+{
+ __a[0] = __b;
+ return __a;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvt_si2ss(__m128 __a, int __b)
+{
+ return _mm_cvtsi32_ss(__a, __b);
+}
+
+#ifdef __x86_64__
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtsi64_ss(__m128 __a, long long __b)
+{
+ __a[0] = __b;
+ return __a;
+}
+
+#endif
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtpi32_ps(__m128 __a, __m64 __b)
+{
+ return __builtin_ia32_cvtpi2ps(__a, (__v2si)__b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvt_pi2ps(__m128 __a, __m64 __b)
+{
+ return _mm_cvtpi32_ps(__a, __b);
+}
+
+static __inline__ float __DEFAULT_FN_ATTRS
+_mm_cvtss_f32(__m128 __a)
+{
+ return __a[0];
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_loadh_pi(__m128 __a, const __m64 *__p)
+{
+ typedef float __mm_loadh_pi_v2f32 __attribute__((__vector_size__(8)));
+ struct __mm_loadh_pi_struct {
+ __mm_loadh_pi_v2f32 __u;
+ } __attribute__((__packed__, __may_alias__));
+ __mm_loadh_pi_v2f32 __b = ((struct __mm_loadh_pi_struct*)__p)->__u;
+ __m128 __bb = __builtin_shufflevector(__b, __b, 0, 1, 0, 1);
+ return __builtin_shufflevector(__a, __bb, 0, 1, 4, 5);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_loadl_pi(__m128 __a, const __m64 *__p)
+{
+ typedef float __mm_loadl_pi_v2f32 __attribute__((__vector_size__(8)));
+ struct __mm_loadl_pi_struct {
+ __mm_loadl_pi_v2f32 __u;
+ } __attribute__((__packed__, __may_alias__));
+ __mm_loadl_pi_v2f32 __b = ((struct __mm_loadl_pi_struct*)__p)->__u;
+ __m128 __bb = __builtin_shufflevector(__b, __b, 0, 1, 0, 1);
+ return __builtin_shufflevector(__a, __bb, 4, 5, 2, 3);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_load_ss(const float *__p)
+{
+ struct __mm_load_ss_struct {
+ float __u;
+ } __attribute__((__packed__, __may_alias__));
+ float __u = ((struct __mm_load_ss_struct*)__p)->__u;
+ return (__m128){ __u, 0, 0, 0 };
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_load1_ps(const float *__p)
+{
+ struct __mm_load1_ps_struct {
+ float __u;
+ } __attribute__((__packed__, __may_alias__));
+ float __u = ((struct __mm_load1_ps_struct*)__p)->__u;
+ return (__m128){ __u, __u, __u, __u };
+}
+
+#define _mm_load_ps1(p) _mm_load1_ps(p)
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_load_ps(const float *__p)
+{
+ return *(__m128*)__p;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_loadu_ps(const float *__p)
+{
+ struct __loadu_ps {
+ __m128 __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_ps*)__p)->__v;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_loadr_ps(const float *__p)
+{
+ __m128 __a = _mm_load_ps(__p);
+ return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_undefined_ps()
+{
+ return (__m128)__builtin_ia32_undef128();
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_set_ss(float __w)
+{
+ return (__m128){ __w, 0, 0, 0 };
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_set1_ps(float __w)
+{
+ return (__m128){ __w, __w, __w, __w };
+}
+
+/* Microsoft specific. */
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_set_ps1(float __w)
+{
+ return _mm_set1_ps(__w);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_set_ps(float __z, float __y, float __x, float __w)
+{
+ return (__m128){ __w, __x, __y, __z };
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_setr_ps(float __z, float __y, float __x, float __w)
+{
+ return (__m128){ __z, __y, __x, __w };
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_setzero_ps(void)
+{
+ return (__m128){ 0, 0, 0, 0 };
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storeh_pi(__m64 *__p, __m128 __a)
+{
+ __builtin_ia32_storehps((__v2si *)__p, __a);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storel_pi(__m64 *__p, __m128 __a)
+{
+ __builtin_ia32_storelps((__v2si *)__p, __a);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_store_ss(float *__p, __m128 __a)
+{
+ struct __mm_store_ss_struct {
+ float __u;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __mm_store_ss_struct*)__p)->__u = __a[0];
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storeu_ps(float *__p, __m128 __a)
+{
+ __builtin_ia32_storeups(__p, __a);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_store1_ps(float *__p, __m128 __a)
+{
+ __a = __builtin_shufflevector(__a, __a, 0, 0, 0, 0);
+ _mm_storeu_ps(__p, __a);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_store_ps1(float *__p, __m128 __a)
+{
+ return _mm_store1_ps(__p, __a);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_store_ps(float *__p, __m128 __a)
+{
+ *(__m128 *)__p = __a;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storer_ps(float *__p, __m128 __a)
+{
+ __a = __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
+ _mm_store_ps(__p, __a);
+}
+
+#define _MM_HINT_T0 3
+#define _MM_HINT_T1 2
+#define _MM_HINT_T2 1
+#define _MM_HINT_NTA 0
+
+#ifndef _MSC_VER
+/* FIXME: We have to #define this because "sel" must be a constant integer, and
+ Sema doesn't do any form of constant propagation yet. */
+
+#define _mm_prefetch(a, sel) (__builtin_prefetch((void *)(a), 0, (sel)))
+#endif
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_stream_pi(__m64 *__p, __m64 __a)
+{
+ __builtin_ia32_movntq(__p, __a);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_stream_ps(float *__p, __m128 __a)
+{
+ __builtin_ia32_movntps(__p, __a);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_sfence(void)
+{
+ __builtin_ia32_sfence();
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_extract_pi16(__m64 __a, int __n)
+{
+ __v4hi __b = (__v4hi)__a;
+ return (unsigned short)__b[__n & 3];
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_insert_pi16(__m64 __a, int __d, int __n)
+{
+ __v4hi __b = (__v4hi)__a;
+ __b[__n & 3] = __d;
+ return (__m64)__b;
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_max_pi16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pmaxsw((__v4hi)__a, (__v4hi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_max_pu8(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pmaxub((__v8qi)__a, (__v8qi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_min_pi16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pminsw((__v4hi)__a, (__v4hi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_min_pu8(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pminub((__v8qi)__a, (__v8qi)__b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_movemask_pi8(__m64 __a)
+{
+ return __builtin_ia32_pmovmskb((__v8qi)__a);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_mulhi_pu16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pmulhuw((__v4hi)__a, (__v4hi)__b);
+}
+
+#define _mm_shuffle_pi16(a, n) __extension__ ({ \
+ (__m64)__builtin_ia32_pshufw((__v4hi)(__m64)(a), (n)); })
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_maskmove_si64(__m64 __d, __m64 __n, char *__p)
+{
+ __builtin_ia32_maskmovq((__v8qi)__d, (__v8qi)__n, __p);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_avg_pu8(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pavgb((__v8qi)__a, (__v8qi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_avg_pu16(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_pavgw((__v4hi)__a, (__v4hi)__b);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_sad_pu8(__m64 __a, __m64 __b)
+{
+ return (__m64)__builtin_ia32_psadbw((__v8qi)__a, (__v8qi)__b);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mm_getcsr(void)
+{
+ return __builtin_ia32_stmxcsr();
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_setcsr(unsigned int __i)
+{
+ __builtin_ia32_ldmxcsr(__i);
+}
+
+#define _mm_shuffle_ps(a, b, mask) __extension__ ({ \
+ (__m128)__builtin_shufflevector((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), \
+ (mask) & 0x3, ((mask) & 0xc) >> 2, \
+ (((mask) & 0x30) >> 4) + 4, \
+ (((mask) & 0xc0) >> 6) + 4); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_unpackhi_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_shufflevector(__a, __b, 2, 6, 3, 7);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_unpacklo_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_shufflevector(__a, __b, 0, 4, 1, 5);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_move_ss(__m128 __a, __m128 __b)
+{
+ return __builtin_shufflevector(__a, __b, 4, 1, 2, 3);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_movehl_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_shufflevector(__a, __b, 6, 7, 2, 3);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_movelh_ps(__m128 __a, __m128 __b)
+{
+ return __builtin_shufflevector(__a, __b, 0, 1, 4, 5);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtpi16_ps(__m64 __a)
+{
+ __m64 __b, __c;
+ __m128 __r;
+
+ __b = _mm_setzero_si64();
+ __b = _mm_cmpgt_pi16(__b, __a);
+ __c = _mm_unpackhi_pi16(__a, __b);
+ __r = _mm_setzero_ps();
+ __r = _mm_cvtpi32_ps(__r, __c);
+ __r = _mm_movelh_ps(__r, __r);
+ __c = _mm_unpacklo_pi16(__a, __b);
+ __r = _mm_cvtpi32_ps(__r, __c);
+
+ return __r;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtpu16_ps(__m64 __a)
+{
+ __m64 __b, __c;
+ __m128 __r;
+
+ __b = _mm_setzero_si64();
+ __c = _mm_unpackhi_pi16(__a, __b);
+ __r = _mm_setzero_ps();
+ __r = _mm_cvtpi32_ps(__r, __c);
+ __r = _mm_movelh_ps(__r, __r);
+ __c = _mm_unpacklo_pi16(__a, __b);
+ __r = _mm_cvtpi32_ps(__r, __c);
+
+ return __r;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtpi8_ps(__m64 __a)
+{
+ __m64 __b;
+
+ __b = _mm_setzero_si64();
+ __b = _mm_cmpgt_pi8(__b, __a);
+ __b = _mm_unpacklo_pi8(__a, __b);
+
+ return _mm_cvtpi16_ps(__b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtpu8_ps(__m64 __a)
+{
+ __m64 __b;
+
+ __b = _mm_setzero_si64();
+ __b = _mm_unpacklo_pi8(__a, __b);
+
+ return _mm_cvtpi16_ps(__b);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtpi32x2_ps(__m64 __a, __m64 __b)
+{
+ __m128 __c;
+
+ __c = _mm_setzero_ps();
+ __c = _mm_cvtpi32_ps(__c, __b);
+ __c = _mm_movelh_ps(__c, __c);
+
+ return _mm_cvtpi32_ps(__c, __a);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvtps_pi16(__m128 __a)
+{
+ __m64 __b, __c;
+
+ __b = _mm_cvtps_pi32(__a);
+ __a = _mm_movehl_ps(__a, __a);
+ __c = _mm_cvtps_pi32(__a);
+
+ return _mm_packs_pi32(__b, __c);
+}
+
+static __inline__ __m64 __DEFAULT_FN_ATTRS
+_mm_cvtps_pi8(__m128 __a)
+{
+ __m64 __b, __c;
+
+ __b = _mm_cvtps_pi16(__a);
+ __c = _mm_setzero_si64();
+
+ return _mm_packs_pi16(__b, __c);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm_movemask_ps(__m128 __a)
+{
+ return __builtin_ia32_movmskps(__a);
+}
+
+
+#ifdef _MSC_VER
+#define _MM_ALIGN16 __declspec(align(16))
+#endif
+
+#define _MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
+
+#define _MM_EXCEPT_INVALID (0x0001)
+#define _MM_EXCEPT_DENORM (0x0002)
+#define _MM_EXCEPT_DIV_ZERO (0x0004)
+#define _MM_EXCEPT_OVERFLOW (0x0008)
+#define _MM_EXCEPT_UNDERFLOW (0x0010)
+#define _MM_EXCEPT_INEXACT (0x0020)
+#define _MM_EXCEPT_MASK (0x003f)
+
+#define _MM_MASK_INVALID (0x0080)
+#define _MM_MASK_DENORM (0x0100)
+#define _MM_MASK_DIV_ZERO (0x0200)
+#define _MM_MASK_OVERFLOW (0x0400)
+#define _MM_MASK_UNDERFLOW (0x0800)
+#define _MM_MASK_INEXACT (0x1000)
+#define _MM_MASK_MASK (0x1f80)
+
+#define _MM_ROUND_NEAREST (0x0000)
+#define _MM_ROUND_DOWN (0x2000)
+#define _MM_ROUND_UP (0x4000)
+#define _MM_ROUND_TOWARD_ZERO (0x6000)
+#define _MM_ROUND_MASK (0x6000)
+
+#define _MM_FLUSH_ZERO_MASK (0x8000)
+#define _MM_FLUSH_ZERO_ON (0x8000)
+#define _MM_FLUSH_ZERO_OFF (0x0000)
+
+#define _MM_GET_EXCEPTION_MASK() (_mm_getcsr() & _MM_MASK_MASK)
+#define _MM_GET_EXCEPTION_STATE() (_mm_getcsr() & _MM_EXCEPT_MASK)
+#define _MM_GET_FLUSH_ZERO_MODE() (_mm_getcsr() & _MM_FLUSH_ZERO_MASK)
+#define _MM_GET_ROUNDING_MODE() (_mm_getcsr() & _MM_ROUND_MASK)
+
+#define _MM_SET_EXCEPTION_MASK(x) (_mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | (x)))
+#define _MM_SET_EXCEPTION_STATE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | (x)))
+#define _MM_SET_FLUSH_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | (x)))
+#define _MM_SET_ROUNDING_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | (x)))
+
+#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
+do { \
+ __m128 tmp3, tmp2, tmp1, tmp0; \
+ tmp0 = _mm_unpacklo_ps((row0), (row1)); \
+ tmp2 = _mm_unpacklo_ps((row2), (row3)); \
+ tmp1 = _mm_unpackhi_ps((row0), (row1)); \
+ tmp3 = _mm_unpackhi_ps((row2), (row3)); \
+ (row0) = _mm_movelh_ps(tmp0, tmp2); \
+ (row1) = _mm_movehl_ps(tmp2, tmp0); \
+ (row2) = _mm_movelh_ps(tmp1, tmp3); \
+ (row3) = _mm_movehl_ps(tmp3, tmp1); \
+} while (0)
+
+/* Aliases for compatibility. */
+#define _m_pextrw _mm_extract_pi16
+#define _m_pinsrw _mm_insert_pi16
+#define _m_pmaxsw _mm_max_pi16
+#define _m_pmaxub _mm_max_pu8
+#define _m_pminsw _mm_min_pi16
+#define _m_pminub _mm_min_pu8
+#define _m_pmovmskb _mm_movemask_pi8
+#define _m_pmulhuw _mm_mulhi_pu16
+#define _m_pshufw _mm_shuffle_pi16
+#define _m_maskmovq _mm_maskmove_si64
+#define _m_pavgb _mm_avg_pu8
+#define _m_pavgw _mm_avg_pu16
+#define _m_psadbw _mm_sad_pu8
+#define _m_ _mm_
+#define _m_ _mm_
+
+#undef __DEFAULT_FN_ATTRS
+
+/* Ugly hack for backwards-compatibility (compatible with gcc) */
+#if defined(__SSE2__) && !__has_feature(modules)
+#include <emmintrin.h>
+#endif
+
+#endif /* __XMMINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/xopintrin.h b/contrib/llvm/tools/clang/lib/Headers/xopintrin.h
new file mode 100644
index 0000000..f07f51c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/xopintrin.h
@@ -0,0 +1,782 @@
+/*===---- xopintrin.h - XOP intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#error "Never use <xopintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __XOPINTRIN_H
+#define __XOPINTRIN_H
+
+#include <fma4intrin.h>
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xop")))
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maccs_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacssww((__v8hi)__A, (__v8hi)__B, (__v8hi)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_macc_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacsww((__v8hi)__A, (__v8hi)__B, (__v8hi)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maccsd_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacsswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maccd_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maccs_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacssdd((__v4si)__A, (__v4si)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_macc_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacsdd((__v4si)__A, (__v4si)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maccslo_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacssdql((__v4si)__A, (__v4si)__B, (__v2di)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_macclo_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacsdql((__v4si)__A, (__v4si)__B, (__v2di)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maccshi_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacssdqh((__v4si)__A, (__v4si)__B, (__v2di)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_macchi_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacsdqh((__v4si)__A, (__v4si)__B, (__v2di)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maddsd_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmadcsswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maddd_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmadcswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddw_epi8(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddbw((__v16qi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddd_epi8(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddbd((__v16qi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddq_epi8(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddbq((__v16qi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddd_epi16(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddwd((__v8hi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddq_epi16(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddwq((__v8hi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddq_epi32(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphadddq((__v4si)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddw_epu8(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddubw((__v16qi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddd_epu8(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddubd((__v16qi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddq_epu8(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddubq((__v16qi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddd_epu16(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphadduwd((__v8hi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddq_epu16(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphadduwq((__v8hi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_haddq_epu32(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddudq((__v4si)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_hsubw_epi8(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphsubbw((__v16qi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_hsubd_epi16(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphsubwd((__v8hi)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_hsubq_epi32(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphsubdq((__v4si)__A);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cmov_si128(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpcmov(__A, __B, __C);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cmov_si256(__m256i __A, __m256i __B, __m256i __C)
+{
+ return (__m256i)__builtin_ia32_vpcmov_256(__A, __B, __C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_perm_epi8(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpperm((__v16qi)__A, (__v16qi)__B, (__v16qi)__C);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_rot_epi8(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vprotb((__v16qi)__A, (__v16qi)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_rot_epi16(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vprotw((__v8hi)__A, (__v8hi)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_rot_epi32(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vprotd((__v4si)__A, (__v4si)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_rot_epi64(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vprotq((__v2di)__A, (__v2di)__B);
+}
+
+#define _mm_roti_epi8(A, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vprotbi((__v16qi)(__m128i)(A), (N)); })
+
+#define _mm_roti_epi16(A, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vprotwi((__v8hi)(__m128i)(A), (N)); })
+
+#define _mm_roti_epi32(A, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vprotdi((__v4si)(__m128i)(A), (N)); })
+
+#define _mm_roti_epi64(A, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vprotqi((__v2di)(__m128i)(A), (N)); })
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_shl_epi8(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshlb((__v16qi)__A, (__v16qi)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_shl_epi16(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshlw((__v8hi)__A, (__v8hi)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_shl_epi32(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshld((__v4si)__A, (__v4si)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_shl_epi64(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshlq((__v2di)__A, (__v2di)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha_epi8(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshab((__v16qi)__A, (__v16qi)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha_epi16(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshaw((__v8hi)__A, (__v8hi)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha_epi32(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshad((__v4si)__A, (__v4si)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_sha_epi64(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshaq((__v2di)__A, (__v2di)__B);
+}
+
+#define _mm_com_epu8(A, B, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpcomub((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (N)); })
+
+#define _mm_com_epu16(A, B, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpcomuw((__v8hi)(__m128i)(A), \
+ (__v8hi)(__m128i)(B), (N)); })
+
+#define _mm_com_epu32(A, B, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpcomud((__v4si)(__m128i)(A), \
+ (__v4si)(__m128i)(B), (N)); })
+
+#define _mm_com_epu64(A, B, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpcomuq((__v2di)(__m128i)(A), \
+ (__v2di)(__m128i)(B), (N)); })
+
+#define _mm_com_epi8(A, B, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpcomb((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (N)); })
+
+#define _mm_com_epi16(A, B, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpcomw((__v8hi)(__m128i)(A), \
+ (__v8hi)(__m128i)(B), (N)); })
+
+#define _mm_com_epi32(A, B, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpcomd((__v4si)(__m128i)(A), \
+ (__v4si)(__m128i)(B), (N)); })
+
+#define _mm_com_epi64(A, B, N) __extension__ ({ \
+ (__m128i)__builtin_ia32_vpcomq((__v2di)(__m128i)(A), \
+ (__v2di)(__m128i)(B), (N)); })
+
+#define _MM_PCOMCTRL_LT 0
+#define _MM_PCOMCTRL_LE 1
+#define _MM_PCOMCTRL_GT 2
+#define _MM_PCOMCTRL_GE 3
+#define _MM_PCOMCTRL_EQ 4
+#define _MM_PCOMCTRL_NEQ 5
+#define _MM_PCOMCTRL_FALSE 6
+#define _MM_PCOMCTRL_TRUE 7
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comlt_epu8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_LT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comle_epu8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_LE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comgt_epu8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_GT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comge_epu8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_GE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comeq_epu8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_EQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comneq_epu8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_NEQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comfalse_epu8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_FALSE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comtrue_epu8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_TRUE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comlt_epu16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_LT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comle_epu16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_LE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comgt_epu16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_GT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comge_epu16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_GE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comeq_epu16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_EQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comneq_epu16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_NEQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comfalse_epu16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_FALSE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comtrue_epu16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_TRUE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comlt_epu32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_LT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comle_epu32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_LE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comgt_epu32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_GT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comge_epu32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_GE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comeq_epu32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_EQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comneq_epu32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_NEQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comfalse_epu32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_FALSE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comtrue_epu32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_TRUE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comlt_epu64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_LT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comle_epu64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_LE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comgt_epu64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_GT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comge_epu64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_GE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comeq_epu64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_EQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comneq_epu64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_NEQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comfalse_epu64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_FALSE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comtrue_epu64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_TRUE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comlt_epi8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_LT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comle_epi8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_LE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comgt_epi8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_GT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comge_epi8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_GE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comeq_epi8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_EQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comneq_epi8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_NEQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comfalse_epi8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_FALSE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comtrue_epi8(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_TRUE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comlt_epi16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_LT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comle_epi16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_LE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comgt_epi16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_GT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comge_epi16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_GE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comeq_epi16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_EQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comneq_epi16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_NEQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comfalse_epi16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_FALSE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comtrue_epi16(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_TRUE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comlt_epi32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_LT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comle_epi32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_LE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comgt_epi32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_GT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comge_epi32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_GE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comeq_epi32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_EQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comneq_epi32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_NEQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comfalse_epi32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_FALSE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comtrue_epi32(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_TRUE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comlt_epi64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_LT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comle_epi64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_LE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comgt_epi64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_GT);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comge_epi64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_GE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comeq_epi64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_EQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comneq_epi64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_NEQ);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comfalse_epi64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_FALSE);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_comtrue_epi64(__m128i __A, __m128i __B)
+{
+ return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_TRUE);
+}
+
+#define _mm_permute2_pd(X, Y, C, I) __extension__ ({ \
+ (__m128d)__builtin_ia32_vpermil2pd((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), \
+ (__v2di)(__m128i)(C), (I)); })
+
+#define _mm256_permute2_pd(X, Y, C, I) __extension__ ({ \
+ (__m256d)__builtin_ia32_vpermil2pd256((__v4df)(__m256d)(X), \
+ (__v4df)(__m256d)(Y), \
+ (__v4di)(__m256i)(C), (I)); })
+
+#define _mm_permute2_ps(X, Y, C, I) __extension__ ({ \
+ (__m128)__builtin_ia32_vpermil2ps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \
+ (__v4si)(__m128i)(C), (I)); })
+
+#define _mm256_permute2_ps(X, Y, C, I) __extension__ ({ \
+ (__m256)__builtin_ia32_vpermil2ps256((__v8sf)(__m256)(X), \
+ (__v8sf)(__m256)(Y), \
+ (__v8si)(__m256i)(C), (I)); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_frcz_ss(__m128 __A)
+{
+ return (__m128)__builtin_ia32_vfrczss((__v4sf)__A);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_frcz_sd(__m128d __A)
+{
+ return (__m128d)__builtin_ia32_vfrczsd((__v2df)__A);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_frcz_ps(__m128 __A)
+{
+ return (__m128)__builtin_ia32_vfrczps((__v4sf)__A);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_frcz_pd(__m128d __A)
+{
+ return (__m128d)__builtin_ia32_vfrczpd((__v2df)__A);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_frcz_ps(__m256 __A)
+{
+ return (__m256)__builtin_ia32_vfrczps256((__v8sf)__A);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_frcz_pd(__m256d __A)
+{
+ return (__m256d)__builtin_ia32_vfrczpd256((__v4df)__A);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __XOPINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/xsavecintrin.h b/contrib/llvm/tools/clang/lib/Headers/xsavecintrin.h
new file mode 100644
index 0000000..598470a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/xsavecintrin.h
@@ -0,0 +1,48 @@
+/*===---- xsavecintrin.h - XSAVEC intrinsic ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <xsavecintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __XSAVECINTRIN_H
+#define __XSAVECINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsavec")))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsavec(void *__p, unsigned long long __m) {
+ __builtin_ia32_xsavec(__p, __m);
+}
+
+#ifdef __x86_64__
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsavec64(void *__p, unsigned long long __m) {
+ __builtin_ia32_xsavec64(__p, __m);
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Headers/xsaveintrin.h b/contrib/llvm/tools/clang/lib/Headers/xsaveintrin.h
new file mode 100644
index 0000000..a2e6b2e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/xsaveintrin.h
@@ -0,0 +1,58 @@
+/*===---- xsaveintrin.h - XSAVE intrinsic ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <xsaveintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __XSAVEINTRIN_H
+#define __XSAVEINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsave")))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsave(void *__p, unsigned long long __m) {
+ return __builtin_ia32_xsave(__p, __m);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xrstor(void *__p, unsigned long long __m) {
+ return __builtin_ia32_xrstor(__p, __m);
+}
+
+#ifdef __x86_64__
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsave64(void *__p, unsigned long long __m) {
+ return __builtin_ia32_xsave64(__p, __m);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xrstor64(void *__p, unsigned long long __m) {
+ return __builtin_ia32_xrstor64(__p, __m);
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Headers/xsaveoptintrin.h b/contrib/llvm/tools/clang/lib/Headers/xsaveoptintrin.h
new file mode 100644
index 0000000..d3faae7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/xsaveoptintrin.h
@@ -0,0 +1,48 @@
+/*===---- xsaveoptintrin.h - XSAVEOPT intrinsic ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <xsaveoptintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __XSAVEOPTINTRIN_H
+#define __XSAVEOPTINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsaveopt")))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsaveopt(void *__p, unsigned long long __m) {
+ return __builtin_ia32_xsaveopt(__p, __m);
+}
+
+#ifdef __x86_64__
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsaveopt64(void *__p, unsigned long long __m) {
+ return __builtin_ia32_xsaveopt64(__p, __m);
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Headers/xsavesintrin.h b/contrib/llvm/tools/clang/lib/Headers/xsavesintrin.h
new file mode 100644
index 0000000..c5e540a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/xsavesintrin.h
@@ -0,0 +1,58 @@
+/*===---- xsavesintrin.h - XSAVES intrinsic ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <xsavesintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __XSAVESINTRIN_H
+#define __XSAVESINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsaves")))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsaves(void *__p, unsigned long long __m) {
+ __builtin_ia32_xsaves(__p, __m);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xrstors(void *__p, unsigned long long __m) {
+ __builtin_ia32_xrstors(__p, __m);
+}
+
+#ifdef __x86_64__
+static __inline__ void __DEFAULT_FN_ATTRS
+_xrstors64(void *__p, unsigned long long __m) {
+ __builtin_ia32_xrstors64(__p, __m);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsaves64(void *__p, unsigned long long __m) {
+ __builtin_ia32_xsaves64(__p, __m);
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Headers/xtestintrin.h b/contrib/llvm/tools/clang/lib/Headers/xtestintrin.h
new file mode 100644
index 0000000..9d3378f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/xtestintrin.h
@@ -0,0 +1,41 @@
+/*===---- xtestintrin.h - XTEST intrinsic ---------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <xtestintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __XTESTINTRIN_H
+#define __XTESTINTRIN_H
+
+/* xtest returns non-zero if the instruction is executed within an RTM or active
+ * HLE region. */
+/* FIXME: This can be an either or for RTM/HLE. Deal with this when HLE is
+ * supported. */
+static __inline__ int
+ __attribute__((__always_inline__, __nodebug__, __target__("rtm")))
+ _xtest(void) {
+ return __builtin_ia32_xtest();
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Index/CommentToXML.cpp b/contrib/llvm/tools/clang/lib/Index/CommentToXML.cpp
new file mode 100644
index 0000000..15f1696
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Index/CommentToXML.cpp
@@ -0,0 +1,1163 @@
+//===--- CommentToXML.cpp - Convert comments to XML representation --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Index/CommentToXML.h"
+#include "SimpleFormatContext.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/Comment.h"
+#include "clang/AST/CommentVisitor.h"
+#include "clang/Format/Format.h"
+#include "clang/Index/USRGeneration.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/TinyPtrVector.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace clang::comments;
+using namespace clang::index;
+
+namespace {
+
+/// This comparison will sort parameters with valid index by index, then vararg
+/// parameters, and invalid (unresolved) parameters last.
+class ParamCommandCommentCompareIndex {
+public:
+ bool operator()(const ParamCommandComment *LHS,
+ const ParamCommandComment *RHS) const {
+ unsigned LHSIndex = UINT_MAX;
+ unsigned RHSIndex = UINT_MAX;
+
+ if (LHS->isParamIndexValid()) {
+ if (LHS->isVarArgParam())
+ LHSIndex = UINT_MAX - 1;
+ else
+ LHSIndex = LHS->getParamIndex();
+ }
+ if (RHS->isParamIndexValid()) {
+ if (RHS->isVarArgParam())
+ RHSIndex = UINT_MAX - 1;
+ else
+ RHSIndex = RHS->getParamIndex();
+ }
+ return LHSIndex < RHSIndex;
+ }
+};
+
+/// This comparison will sort template parameters in the following order:
+/// \li real template parameters (depth = 1) in index order;
+/// \li all other names (depth > 1);
+/// \li unresolved names.
+class TParamCommandCommentComparePosition {
+public:
+ bool operator()(const TParamCommandComment *LHS,
+ const TParamCommandComment *RHS) const {
+ // Sort unresolved names last.
+ if (!LHS->isPositionValid())
+ return false;
+ if (!RHS->isPositionValid())
+ return true;
+
+ if (LHS->getDepth() > 1)
+ return false;
+ if (RHS->getDepth() > 1)
+ return true;
+
+ // Sort template parameters in index order.
+ if (LHS->getDepth() == 1 && RHS->getDepth() == 1)
+ return LHS->getIndex(0) < RHS->getIndex(0);
+
+ // Leave all other names in source order.
+ return true;
+ }
+};
+
+/// Separate parts of a FullComment.
+struct FullCommentParts {
+ /// Take a full comment apart and initialize members accordingly.
+ FullCommentParts(const FullComment *C,
+ const CommandTraits &Traits);
+
+ const BlockContentComment *Brief;
+ const BlockContentComment *Headerfile;
+ const ParagraphComment *FirstParagraph;
+ SmallVector<const BlockCommandComment *, 4> Returns;
+ SmallVector<const ParamCommandComment *, 8> Params;
+ SmallVector<const TParamCommandComment *, 4> TParams;
+ llvm::TinyPtrVector<const BlockCommandComment *> Exceptions;
+ SmallVector<const BlockContentComment *, 8> MiscBlocks;
+};
+
+FullCommentParts::FullCommentParts(const FullComment *C,
+ const CommandTraits &Traits) :
+ Brief(nullptr), Headerfile(nullptr), FirstParagraph(nullptr) {
+ for (Comment::child_iterator I = C->child_begin(), E = C->child_end();
+ I != E; ++I) {
+ const Comment *Child = *I;
+ if (!Child)
+ continue;
+ switch (Child->getCommentKind()) {
+ case Comment::NoCommentKind:
+ continue;
+
+ case Comment::ParagraphCommentKind: {
+ const ParagraphComment *PC = cast<ParagraphComment>(Child);
+ if (PC->isWhitespace())
+ break;
+ if (!FirstParagraph)
+ FirstParagraph = PC;
+
+ MiscBlocks.push_back(PC);
+ break;
+ }
+
+ case Comment::BlockCommandCommentKind: {
+ const BlockCommandComment *BCC = cast<BlockCommandComment>(Child);
+ const CommandInfo *Info = Traits.getCommandInfo(BCC->getCommandID());
+ if (!Brief && Info->IsBriefCommand) {
+ Brief = BCC;
+ break;
+ }
+ if (!Headerfile && Info->IsHeaderfileCommand) {
+ Headerfile = BCC;
+ break;
+ }
+ if (Info->IsReturnsCommand) {
+ Returns.push_back(BCC);
+ break;
+ }
+ if (Info->IsThrowsCommand) {
+ Exceptions.push_back(BCC);
+ break;
+ }
+ MiscBlocks.push_back(BCC);
+ break;
+ }
+
+ case Comment::ParamCommandCommentKind: {
+ const ParamCommandComment *PCC = cast<ParamCommandComment>(Child);
+ if (!PCC->hasParamName())
+ break;
+
+ if (!PCC->isDirectionExplicit() && !PCC->hasNonWhitespaceParagraph())
+ break;
+
+ Params.push_back(PCC);
+ break;
+ }
+
+ case Comment::TParamCommandCommentKind: {
+ const TParamCommandComment *TPCC = cast<TParamCommandComment>(Child);
+ if (!TPCC->hasParamName())
+ break;
+
+ if (!TPCC->hasNonWhitespaceParagraph())
+ break;
+
+ TParams.push_back(TPCC);
+ break;
+ }
+
+ case Comment::VerbatimBlockCommentKind:
+ MiscBlocks.push_back(cast<BlockCommandComment>(Child));
+ break;
+
+ case Comment::VerbatimLineCommentKind: {
+ const VerbatimLineComment *VLC = cast<VerbatimLineComment>(Child);
+ const CommandInfo *Info = Traits.getCommandInfo(VLC->getCommandID());
+ if (!Info->IsDeclarationCommand)
+ MiscBlocks.push_back(VLC);
+ break;
+ }
+
+ case Comment::TextCommentKind:
+ case Comment::InlineCommandCommentKind:
+ case Comment::HTMLStartTagCommentKind:
+ case Comment::HTMLEndTagCommentKind:
+ case Comment::VerbatimBlockLineCommentKind:
+ case Comment::FullCommentKind:
+ llvm_unreachable("AST node of this kind can't be a child of "
+ "a FullComment");
+ }
+ }
+
+ // Sort params in order they are declared in the function prototype.
+ // Unresolved parameters are put at the end of the list in the same order
+ // they were seen in the comment.
+ std::stable_sort(Params.begin(), Params.end(),
+ ParamCommandCommentCompareIndex());
+
+ std::stable_sort(TParams.begin(), TParams.end(),
+ TParamCommandCommentComparePosition());
+}
+
+void printHTMLStartTagComment(const HTMLStartTagComment *C,
+ llvm::raw_svector_ostream &Result) {
+ Result << "<" << C->getTagName();
+
+ if (C->getNumAttrs() != 0) {
+ for (unsigned i = 0, e = C->getNumAttrs(); i != e; i++) {
+ Result << " ";
+ const HTMLStartTagComment::Attribute &Attr = C->getAttr(i);
+ Result << Attr.Name;
+ if (!Attr.Value.empty())
+ Result << "=\"" << Attr.Value << "\"";
+ }
+ }
+
+ if (!C->isSelfClosing())
+ Result << ">";
+ else
+ Result << "/>";
+}
+
+class CommentASTToHTMLConverter :
+ public ConstCommentVisitor<CommentASTToHTMLConverter> {
+public:
+ /// \param Str accumulator for HTML.
+ CommentASTToHTMLConverter(const FullComment *FC,
+ SmallVectorImpl<char> &Str,
+ const CommandTraits &Traits) :
+ FC(FC), Result(Str), Traits(Traits)
+ { }
+
+ // Inline content.
+ void visitTextComment(const TextComment *C);
+ void visitInlineCommandComment(const InlineCommandComment *C);
+ void visitHTMLStartTagComment(const HTMLStartTagComment *C);
+ void visitHTMLEndTagComment(const HTMLEndTagComment *C);
+
+ // Block content.
+ void visitParagraphComment(const ParagraphComment *C);
+ void visitBlockCommandComment(const BlockCommandComment *C);
+ void visitParamCommandComment(const ParamCommandComment *C);
+ void visitTParamCommandComment(const TParamCommandComment *C);
+ void visitVerbatimBlockComment(const VerbatimBlockComment *C);
+ void visitVerbatimBlockLineComment(const VerbatimBlockLineComment *C);
+ void visitVerbatimLineComment(const VerbatimLineComment *C);
+
+ void visitFullComment(const FullComment *C);
+
+ // Helpers.
+
+ /// Convert a paragraph that is not a block by itself (an argument to some
+ /// command).
+ void visitNonStandaloneParagraphComment(const ParagraphComment *C);
+
+ void appendToResultWithHTMLEscaping(StringRef S);
+
+private:
+ const FullComment *FC;
+ /// Output stream for HTML.
+ llvm::raw_svector_ostream Result;
+
+ const CommandTraits &Traits;
+};
+} // end unnamed namespace
+
+void CommentASTToHTMLConverter::visitTextComment(const TextComment *C) {
+ appendToResultWithHTMLEscaping(C->getText());
+}
+
+void CommentASTToHTMLConverter::visitInlineCommandComment(
+ const InlineCommandComment *C) {
+ // Nothing to render if no arguments supplied.
+ if (C->getNumArgs() == 0)
+ return;
+
+ // Nothing to render if argument is empty.
+ StringRef Arg0 = C->getArgText(0);
+ if (Arg0.empty())
+ return;
+
+ switch (C->getRenderKind()) {
+ case InlineCommandComment::RenderNormal:
+ for (unsigned i = 0, e = C->getNumArgs(); i != e; ++i) {
+ appendToResultWithHTMLEscaping(C->getArgText(i));
+ Result << " ";
+ }
+ return;
+
+ case InlineCommandComment::RenderBold:
+ assert(C->getNumArgs() == 1);
+ Result << "<b>";
+ appendToResultWithHTMLEscaping(Arg0);
+ Result << "</b>";
+ return;
+ case InlineCommandComment::RenderMonospaced:
+ assert(C->getNumArgs() == 1);
+ Result << "<tt>";
+ appendToResultWithHTMLEscaping(Arg0);
+ Result<< "</tt>";
+ return;
+ case InlineCommandComment::RenderEmphasized:
+ assert(C->getNumArgs() == 1);
+ Result << "<em>";
+ appendToResultWithHTMLEscaping(Arg0);
+ Result << "</em>";
+ return;
+ }
+}
+
+void CommentASTToHTMLConverter::visitHTMLStartTagComment(
+ const HTMLStartTagComment *C) {
+ printHTMLStartTagComment(C, Result);
+}
+
+void CommentASTToHTMLConverter::visitHTMLEndTagComment(
+ const HTMLEndTagComment *C) {
+ Result << "</" << C->getTagName() << ">";
+}
+
+void CommentASTToHTMLConverter::visitParagraphComment(
+ const ParagraphComment *C) {
+ if (C->isWhitespace())
+ return;
+
+ Result << "<p>";
+ for (Comment::child_iterator I = C->child_begin(), E = C->child_end();
+ I != E; ++I) {
+ visit(*I);
+ }
+ Result << "</p>";
+}
+
+void CommentASTToHTMLConverter::visitBlockCommandComment(
+ const BlockCommandComment *C) {
+ const CommandInfo *Info = Traits.getCommandInfo(C->getCommandID());
+ if (Info->IsBriefCommand) {
+ Result << "<p class=\"para-brief\">";
+ visitNonStandaloneParagraphComment(C->getParagraph());
+ Result << "</p>";
+ return;
+ }
+ if (Info->IsReturnsCommand) {
+ Result << "<p class=\"para-returns\">"
+ "<span class=\"word-returns\">Returns</span> ";
+ visitNonStandaloneParagraphComment(C->getParagraph());
+ Result << "</p>";
+ return;
+ }
+ // We don't know anything about this command. Just render the paragraph.
+ visit(C->getParagraph());
+}
+
+void CommentASTToHTMLConverter::visitParamCommandComment(
+ const ParamCommandComment *C) {
+ if (C->isParamIndexValid()) {
+ if (C->isVarArgParam()) {
+ Result << "<dt class=\"param-name-index-vararg\">";
+ appendToResultWithHTMLEscaping(C->getParamNameAsWritten());
+ } else {
+ Result << "<dt class=\"param-name-index-"
+ << C->getParamIndex()
+ << "\">";
+ appendToResultWithHTMLEscaping(C->getParamName(FC));
+ }
+ } else {
+ Result << "<dt class=\"param-name-index-invalid\">";
+ appendToResultWithHTMLEscaping(C->getParamNameAsWritten());
+ }
+ Result << "</dt>";
+
+ if (C->isParamIndexValid()) {
+ if (C->isVarArgParam())
+ Result << "<dd class=\"param-descr-index-vararg\">";
+ else
+ Result << "<dd class=\"param-descr-index-"
+ << C->getParamIndex()
+ << "\">";
+ } else
+ Result << "<dd class=\"param-descr-index-invalid\">";
+
+ visitNonStandaloneParagraphComment(C->getParagraph());
+ Result << "</dd>";
+}
+
+void CommentASTToHTMLConverter::visitTParamCommandComment(
+ const TParamCommandComment *C) {
+ if (C->isPositionValid()) {
+ if (C->getDepth() == 1)
+ Result << "<dt class=\"tparam-name-index-"
+ << C->getIndex(0)
+ << "\">";
+ else
+ Result << "<dt class=\"tparam-name-index-other\">";
+ appendToResultWithHTMLEscaping(C->getParamName(FC));
+ } else {
+ Result << "<dt class=\"tparam-name-index-invalid\">";
+ appendToResultWithHTMLEscaping(C->getParamNameAsWritten());
+ }
+
+ Result << "</dt>";
+
+ if (C->isPositionValid()) {
+ if (C->getDepth() == 1)
+ Result << "<dd class=\"tparam-descr-index-"
+ << C->getIndex(0)
+ << "\">";
+ else
+ Result << "<dd class=\"tparam-descr-index-other\">";
+ } else
+ Result << "<dd class=\"tparam-descr-index-invalid\">";
+
+ visitNonStandaloneParagraphComment(C->getParagraph());
+ Result << "</dd>";
+}
+
+void CommentASTToHTMLConverter::visitVerbatimBlockComment(
+ const VerbatimBlockComment *C) {
+ unsigned NumLines = C->getNumLines();
+ if (NumLines == 0)
+ return;
+
+ Result << "<pre>";
+ for (unsigned i = 0; i != NumLines; ++i) {
+ appendToResultWithHTMLEscaping(C->getText(i));
+ if (i + 1 != NumLines)
+ Result << '\n';
+ }
+ Result << "</pre>";
+}
+
+void CommentASTToHTMLConverter::visitVerbatimBlockLineComment(
+ const VerbatimBlockLineComment *C) {
+ llvm_unreachable("should not see this AST node");
+}
+
+void CommentASTToHTMLConverter::visitVerbatimLineComment(
+ const VerbatimLineComment *C) {
+ Result << "<pre>";
+ appendToResultWithHTMLEscaping(C->getText());
+ Result << "</pre>";
+}
+
+void CommentASTToHTMLConverter::visitFullComment(const FullComment *C) {
+ FullCommentParts Parts(C, Traits);
+
+ bool FirstParagraphIsBrief = false;
+ if (Parts.Headerfile)
+ visit(Parts.Headerfile);
+ if (Parts.Brief)
+ visit(Parts.Brief);
+ else if (Parts.FirstParagraph) {
+ Result << "<p class=\"para-brief\">";
+ visitNonStandaloneParagraphComment(Parts.FirstParagraph);
+ Result << "</p>";
+ FirstParagraphIsBrief = true;
+ }
+
+ for (unsigned i = 0, e = Parts.MiscBlocks.size(); i != e; ++i) {
+ const Comment *C = Parts.MiscBlocks[i];
+ if (FirstParagraphIsBrief && C == Parts.FirstParagraph)
+ continue;
+ visit(C);
+ }
+
+ if (Parts.TParams.size() != 0) {
+ Result << "<dl>";
+ for (unsigned i = 0, e = Parts.TParams.size(); i != e; ++i)
+ visit(Parts.TParams[i]);
+ Result << "</dl>";
+ }
+
+ if (Parts.Params.size() != 0) {
+ Result << "<dl>";
+ for (unsigned i = 0, e = Parts.Params.size(); i != e; ++i)
+ visit(Parts.Params[i]);
+ Result << "</dl>";
+ }
+
+ if (Parts.Returns.size() != 0) {
+ Result << "<div class=\"result-discussion\">";
+ for (unsigned i = 0, e = Parts.Returns.size(); i != e; ++i)
+ visit(Parts.Returns[i]);
+ Result << "</div>";
+ }
+
+}
+
+void CommentASTToHTMLConverter::visitNonStandaloneParagraphComment(
+ const ParagraphComment *C) {
+ if (!C)
+ return;
+
+ for (Comment::child_iterator I = C->child_begin(), E = C->child_end();
+ I != E; ++I) {
+ visit(*I);
+ }
+}
+
+void CommentASTToHTMLConverter::appendToResultWithHTMLEscaping(StringRef S) {
+ for (StringRef::iterator I = S.begin(), E = S.end(); I != E; ++I) {
+ const char C = *I;
+ switch (C) {
+ case '&':
+ Result << "&amp;";
+ break;
+ case '<':
+ Result << "&lt;";
+ break;
+ case '>':
+ Result << "&gt;";
+ break;
+ case '"':
+ Result << "&quot;";
+ break;
+ case '\'':
+ Result << "&#39;";
+ break;
+ case '/':
+ Result << "&#47;";
+ break;
+ default:
+ Result << C;
+ break;
+ }
+ }
+}
+
+namespace {
+class CommentASTToXMLConverter :
+ public ConstCommentVisitor<CommentASTToXMLConverter> {
+public:
+ /// \param Str accumulator for XML.
+ CommentASTToXMLConverter(const FullComment *FC,
+ SmallVectorImpl<char> &Str,
+ const CommandTraits &Traits,
+ const SourceManager &SM,
+ SimpleFormatContext &SFC,
+ unsigned FUID) :
+ FC(FC), Result(Str), Traits(Traits), SM(SM),
+ FormatRewriterContext(SFC),
+ FormatInMemoryUniqueId(FUID) { }
+
+ // Inline content.
+ void visitTextComment(const TextComment *C);
+ void visitInlineCommandComment(const InlineCommandComment *C);
+ void visitHTMLStartTagComment(const HTMLStartTagComment *C);
+ void visitHTMLEndTagComment(const HTMLEndTagComment *C);
+
+ // Block content.
+ void visitParagraphComment(const ParagraphComment *C);
+
+ void appendParagraphCommentWithKind(const ParagraphComment *C,
+ StringRef Kind);
+
+ void visitBlockCommandComment(const BlockCommandComment *C);
+ void visitParamCommandComment(const ParamCommandComment *C);
+ void visitTParamCommandComment(const TParamCommandComment *C);
+ void visitVerbatimBlockComment(const VerbatimBlockComment *C);
+ void visitVerbatimBlockLineComment(const VerbatimBlockLineComment *C);
+ void visitVerbatimLineComment(const VerbatimLineComment *C);
+
+ void visitFullComment(const FullComment *C);
+
+ // Helpers.
+ void appendToResultWithXMLEscaping(StringRef S);
+ void appendToResultWithCDATAEscaping(StringRef S);
+
+ void formatTextOfDeclaration(const DeclInfo *DI,
+ SmallString<128> &Declaration);
+
+private:
+ const FullComment *FC;
+
+ /// Output stream for XML.
+ llvm::raw_svector_ostream Result;
+
+ const CommandTraits &Traits;
+ const SourceManager &SM;
+ SimpleFormatContext &FormatRewriterContext;
+ unsigned FormatInMemoryUniqueId;
+};
+
+void getSourceTextOfDeclaration(const DeclInfo *ThisDecl,
+ SmallVectorImpl<char> &Str) {
+ ASTContext &Context = ThisDecl->CurrentDecl->getASTContext();
+ const LangOptions &LangOpts = Context.getLangOpts();
+ llvm::raw_svector_ostream OS(Str);
+ PrintingPolicy PPolicy(LangOpts);
+ PPolicy.PolishForDeclaration = true;
+ PPolicy.TerseOutput = true;
+ ThisDecl->CurrentDecl->print(OS, PPolicy,
+ /*Indentation*/0, /*PrintInstantiation*/false);
+}
+
+void CommentASTToXMLConverter::formatTextOfDeclaration(
+ const DeclInfo *DI, SmallString<128> &Declaration) {
+ // FIXME. formatting API expects null terminated input string.
+ // There might be more efficient way of doing this.
+ std::string StringDecl = Declaration.str();
+
+ // Formatter specific code.
+ // Form a unique in memory buffer name.
+ SmallString<128> filename;
+ filename += "xmldecl";
+ filename += llvm::utostr(FormatInMemoryUniqueId);
+ filename += ".xd";
+ FileID ID = FormatRewriterContext.createInMemoryFile(filename, StringDecl);
+ SourceLocation Start = FormatRewriterContext.Sources.getLocForStartOfFile(ID)
+ .getLocWithOffset(0);
+ unsigned Length = Declaration.size();
+
+ tooling::Replacements Replace = reformat(
+ format::getLLVMStyle(), FormatRewriterContext.Sources, ID,
+ CharSourceRange::getCharRange(Start, Start.getLocWithOffset(Length)));
+ applyAllReplacements(Replace, FormatRewriterContext.Rewrite);
+ Declaration = FormatRewriterContext.getRewrittenText(ID);
+}
+
+} // end unnamed namespace
+
+void CommentASTToXMLConverter::visitTextComment(const TextComment *C) {
+ appendToResultWithXMLEscaping(C->getText());
+}
+
+void CommentASTToXMLConverter::visitInlineCommandComment(
+ const InlineCommandComment *C) {
+ // Nothing to render if no arguments supplied.
+ if (C->getNumArgs() == 0)
+ return;
+
+ // Nothing to render if argument is empty.
+ StringRef Arg0 = C->getArgText(0);
+ if (Arg0.empty())
+ return;
+
+ switch (C->getRenderKind()) {
+ case InlineCommandComment::RenderNormal:
+ for (unsigned i = 0, e = C->getNumArgs(); i != e; ++i) {
+ appendToResultWithXMLEscaping(C->getArgText(i));
+ Result << " ";
+ }
+ return;
+ case InlineCommandComment::RenderBold:
+ assert(C->getNumArgs() == 1);
+ Result << "<bold>";
+ appendToResultWithXMLEscaping(Arg0);
+ Result << "</bold>";
+ return;
+ case InlineCommandComment::RenderMonospaced:
+ assert(C->getNumArgs() == 1);
+ Result << "<monospaced>";
+ appendToResultWithXMLEscaping(Arg0);
+ Result << "</monospaced>";
+ return;
+ case InlineCommandComment::RenderEmphasized:
+ assert(C->getNumArgs() == 1);
+ Result << "<emphasized>";
+ appendToResultWithXMLEscaping(Arg0);
+ Result << "</emphasized>";
+ return;
+ }
+}
+
+void CommentASTToXMLConverter::visitHTMLStartTagComment(
+ const HTMLStartTagComment *C) {
+ Result << "<rawHTML";
+ if (C->isMalformed())
+ Result << " isMalformed=\"1\"";
+ Result << ">";
+ {
+ SmallString<32> Tag;
+ {
+ llvm::raw_svector_ostream TagOS(Tag);
+ printHTMLStartTagComment(C, TagOS);
+ }
+ appendToResultWithCDATAEscaping(Tag);
+ }
+ Result << "</rawHTML>";
+}
+
+void
+CommentASTToXMLConverter::visitHTMLEndTagComment(const HTMLEndTagComment *C) {
+ Result << "<rawHTML";
+ if (C->isMalformed())
+ Result << " isMalformed=\"1\"";
+ Result << ">&lt;/" << C->getTagName() << "&gt;</rawHTML>";
+}
+
+void
+CommentASTToXMLConverter::visitParagraphComment(const ParagraphComment *C) {
+ appendParagraphCommentWithKind(C, StringRef());
+}
+
+void CommentASTToXMLConverter::appendParagraphCommentWithKind(
+ const ParagraphComment *C,
+ StringRef ParagraphKind) {
+ if (C->isWhitespace())
+ return;
+
+ if (ParagraphKind.empty())
+ Result << "<Para>";
+ else
+ Result << "<Para kind=\"" << ParagraphKind << "\">";
+
+ for (Comment::child_iterator I = C->child_begin(), E = C->child_end();
+ I != E; ++I) {
+ visit(*I);
+ }
+ Result << "</Para>";
+}
+
+void CommentASTToXMLConverter::visitBlockCommandComment(
+ const BlockCommandComment *C) {
+ StringRef ParagraphKind;
+
+ switch (C->getCommandID()) {
+ case CommandTraits::KCI_attention:
+ case CommandTraits::KCI_author:
+ case CommandTraits::KCI_authors:
+ case CommandTraits::KCI_bug:
+ case CommandTraits::KCI_copyright:
+ case CommandTraits::KCI_date:
+ case CommandTraits::KCI_invariant:
+ case CommandTraits::KCI_note:
+ case CommandTraits::KCI_post:
+ case CommandTraits::KCI_pre:
+ case CommandTraits::KCI_remark:
+ case CommandTraits::KCI_remarks:
+ case CommandTraits::KCI_sa:
+ case CommandTraits::KCI_see:
+ case CommandTraits::KCI_since:
+ case CommandTraits::KCI_todo:
+ case CommandTraits::KCI_version:
+ case CommandTraits::KCI_warning:
+ ParagraphKind = C->getCommandName(Traits);
+ default:
+ break;
+ }
+
+ appendParagraphCommentWithKind(C->getParagraph(), ParagraphKind);
+}
+
+void CommentASTToXMLConverter::visitParamCommandComment(
+ const ParamCommandComment *C) {
+ Result << "<Parameter><Name>";
+ appendToResultWithXMLEscaping(C->isParamIndexValid()
+ ? C->getParamName(FC)
+ : C->getParamNameAsWritten());
+ Result << "</Name>";
+
+ if (C->isParamIndexValid()) {
+ if (C->isVarArgParam())
+ Result << "<IsVarArg />";
+ else
+ Result << "<Index>" << C->getParamIndex() << "</Index>";
+ }
+
+ Result << "<Direction isExplicit=\"" << C->isDirectionExplicit() << "\">";
+ switch (C->getDirection()) {
+ case ParamCommandComment::In:
+ Result << "in";
+ break;
+ case ParamCommandComment::Out:
+ Result << "out";
+ break;
+ case ParamCommandComment::InOut:
+ Result << "in,out";
+ break;
+ }
+ Result << "</Direction><Discussion>";
+ visit(C->getParagraph());
+ Result << "</Discussion></Parameter>";
+}
+
+void CommentASTToXMLConverter::visitTParamCommandComment(
+ const TParamCommandComment *C) {
+ Result << "<Parameter><Name>";
+ appendToResultWithXMLEscaping(C->isPositionValid() ? C->getParamName(FC)
+ : C->getParamNameAsWritten());
+ Result << "</Name>";
+
+ if (C->isPositionValid() && C->getDepth() == 1) {
+ Result << "<Index>" << C->getIndex(0) << "</Index>";
+ }
+
+ Result << "<Discussion>";
+ visit(C->getParagraph());
+ Result << "</Discussion></Parameter>";
+}
+
+void CommentASTToXMLConverter::visitVerbatimBlockComment(
+ const VerbatimBlockComment *C) {
+ unsigned NumLines = C->getNumLines();
+ if (NumLines == 0)
+ return;
+
+ switch (C->getCommandID()) {
+ case CommandTraits::KCI_code:
+ Result << "<Verbatim xml:space=\"preserve\" kind=\"code\">";
+ break;
+ default:
+ Result << "<Verbatim xml:space=\"preserve\" kind=\"verbatim\">";
+ break;
+ }
+ for (unsigned i = 0; i != NumLines; ++i) {
+ appendToResultWithXMLEscaping(C->getText(i));
+ if (i + 1 != NumLines)
+ Result << '\n';
+ }
+ Result << "</Verbatim>";
+}
+
+void CommentASTToXMLConverter::visitVerbatimBlockLineComment(
+ const VerbatimBlockLineComment *C) {
+ llvm_unreachable("should not see this AST node");
+}
+
+void CommentASTToXMLConverter::visitVerbatimLineComment(
+ const VerbatimLineComment *C) {
+ Result << "<Verbatim xml:space=\"preserve\" kind=\"verbatim\">";
+ appendToResultWithXMLEscaping(C->getText());
+ Result << "</Verbatim>";
+}
+
+void CommentASTToXMLConverter::visitFullComment(const FullComment *C) {
+ FullCommentParts Parts(C, Traits);
+
+ const DeclInfo *DI = C->getDeclInfo();
+ StringRef RootEndTag;
+ if (DI) {
+ switch (DI->getKind()) {
+ case DeclInfo::OtherKind:
+ RootEndTag = "</Other>";
+ Result << "<Other";
+ break;
+ case DeclInfo::FunctionKind:
+ RootEndTag = "</Function>";
+ Result << "<Function";
+ switch (DI->TemplateKind) {
+ case DeclInfo::NotTemplate:
+ break;
+ case DeclInfo::Template:
+ Result << " templateKind=\"template\"";
+ break;
+ case DeclInfo::TemplateSpecialization:
+ Result << " templateKind=\"specialization\"";
+ break;
+ case DeclInfo::TemplatePartialSpecialization:
+ llvm_unreachable("partial specializations of functions "
+ "are not allowed in C++");
+ }
+ if (DI->IsInstanceMethod)
+ Result << " isInstanceMethod=\"1\"";
+ if (DI->IsClassMethod)
+ Result << " isClassMethod=\"1\"";
+ break;
+ case DeclInfo::ClassKind:
+ RootEndTag = "</Class>";
+ Result << "<Class";
+ switch (DI->TemplateKind) {
+ case DeclInfo::NotTemplate:
+ break;
+ case DeclInfo::Template:
+ Result << " templateKind=\"template\"";
+ break;
+ case DeclInfo::TemplateSpecialization:
+ Result << " templateKind=\"specialization\"";
+ break;
+ case DeclInfo::TemplatePartialSpecialization:
+ Result << " templateKind=\"partialSpecialization\"";
+ break;
+ }
+ break;
+ case DeclInfo::VariableKind:
+ RootEndTag = "</Variable>";
+ Result << "<Variable";
+ break;
+ case DeclInfo::NamespaceKind:
+ RootEndTag = "</Namespace>";
+ Result << "<Namespace";
+ break;
+ case DeclInfo::TypedefKind:
+ RootEndTag = "</Typedef>";
+ Result << "<Typedef";
+ break;
+ case DeclInfo::EnumKind:
+ RootEndTag = "</Enum>";
+ Result << "<Enum";
+ break;
+ }
+
+ {
+ // Print line and column number.
+ SourceLocation Loc = DI->CurrentDecl->getLocation();
+ std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
+ FileID FID = LocInfo.first;
+ unsigned FileOffset = LocInfo.second;
+
+ if (FID.isValid()) {
+ if (const FileEntry *FE = SM.getFileEntryForID(FID)) {
+ Result << " file=\"";
+ appendToResultWithXMLEscaping(FE->getName());
+ Result << "\"";
+ }
+ Result << " line=\"" << SM.getLineNumber(FID, FileOffset)
+ << "\" column=\"" << SM.getColumnNumber(FID, FileOffset)
+ << "\"";
+ }
+ }
+
+ // Finish the root tag.
+ Result << ">";
+
+ bool FoundName = false;
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(DI->CommentDecl)) {
+ if (DeclarationName DeclName = ND->getDeclName()) {
+ Result << "<Name>";
+ std::string Name = DeclName.getAsString();
+ appendToResultWithXMLEscaping(Name);
+ FoundName = true;
+ Result << "</Name>";
+ }
+ }
+ if (!FoundName)
+ Result << "<Name>&lt;anonymous&gt;</Name>";
+
+ {
+ // Print USR.
+ SmallString<128> USR;
+ generateUSRForDecl(DI->CommentDecl, USR);
+ if (!USR.empty()) {
+ Result << "<USR>";
+ appendToResultWithXMLEscaping(USR);
+ Result << "</USR>";
+ }
+ }
+ } else {
+ // No DeclInfo -- just emit some root tag and name tag.
+ RootEndTag = "</Other>";
+ Result << "<Other><Name>unknown</Name>";
+ }
+
+ if (Parts.Headerfile) {
+ Result << "<Headerfile>";
+ visit(Parts.Headerfile);
+ Result << "</Headerfile>";
+ }
+
+ {
+ // Pretty-print the declaration.
+ Result << "<Declaration>";
+ SmallString<128> Declaration;
+ getSourceTextOfDeclaration(DI, Declaration);
+ formatTextOfDeclaration(DI, Declaration);
+ appendToResultWithXMLEscaping(Declaration);
+ Result << "</Declaration>";
+ }
+
+ bool FirstParagraphIsBrief = false;
+ if (Parts.Brief) {
+ Result << "<Abstract>";
+ visit(Parts.Brief);
+ Result << "</Abstract>";
+ } else if (Parts.FirstParagraph) {
+ Result << "<Abstract>";
+ visit(Parts.FirstParagraph);
+ Result << "</Abstract>";
+ FirstParagraphIsBrief = true;
+ }
+
+ if (Parts.TParams.size() != 0) {
+ Result << "<TemplateParameters>";
+ for (unsigned i = 0, e = Parts.TParams.size(); i != e; ++i)
+ visit(Parts.TParams[i]);
+ Result << "</TemplateParameters>";
+ }
+
+ if (Parts.Params.size() != 0) {
+ Result << "<Parameters>";
+ for (unsigned i = 0, e = Parts.Params.size(); i != e; ++i)
+ visit(Parts.Params[i]);
+ Result << "</Parameters>";
+ }
+
+ if (Parts.Exceptions.size() != 0) {
+ Result << "<Exceptions>";
+ for (unsigned i = 0, e = Parts.Exceptions.size(); i != e; ++i)
+ visit(Parts.Exceptions[i]);
+ Result << "</Exceptions>";
+ }
+
+ if (Parts.Returns.size() != 0) {
+ Result << "<ResultDiscussion>";
+ for (unsigned i = 0, e = Parts.Returns.size(); i != e; ++i)
+ visit(Parts.Returns[i]);
+ Result << "</ResultDiscussion>";
+ }
+
+ if (DI->CommentDecl->hasAttrs()) {
+ const AttrVec &Attrs = DI->CommentDecl->getAttrs();
+ for (unsigned i = 0, e = Attrs.size(); i != e; i++) {
+ const AvailabilityAttr *AA = dyn_cast<AvailabilityAttr>(Attrs[i]);
+ if (!AA) {
+ if (const DeprecatedAttr *DA = dyn_cast<DeprecatedAttr>(Attrs[i])) {
+ if (DA->getMessage().empty())
+ Result << "<Deprecated/>";
+ else {
+ Result << "<Deprecated>";
+ appendToResultWithXMLEscaping(DA->getMessage());
+ Result << "</Deprecated>";
+ }
+ }
+ else if (const UnavailableAttr *UA = dyn_cast<UnavailableAttr>(Attrs[i])) {
+ if (UA->getMessage().empty())
+ Result << "<Unavailable/>";
+ else {
+ Result << "<Unavailable>";
+ appendToResultWithXMLEscaping(UA->getMessage());
+ Result << "</Unavailable>";
+ }
+ }
+ continue;
+ }
+
+ // 'availability' attribute.
+ Result << "<Availability";
+ StringRef Distribution;
+ if (AA->getPlatform()) {
+ Distribution = AvailabilityAttr::getPrettyPlatformName(
+ AA->getPlatform()->getName());
+ if (Distribution.empty())
+ Distribution = AA->getPlatform()->getName();
+ }
+ Result << " distribution=\"" << Distribution << "\">";
+ VersionTuple IntroducedInVersion = AA->getIntroduced();
+ if (!IntroducedInVersion.empty()) {
+ Result << "<IntroducedInVersion>"
+ << IntroducedInVersion.getAsString()
+ << "</IntroducedInVersion>";
+ }
+ VersionTuple DeprecatedInVersion = AA->getDeprecated();
+ if (!DeprecatedInVersion.empty()) {
+ Result << "<DeprecatedInVersion>"
+ << DeprecatedInVersion.getAsString()
+ << "</DeprecatedInVersion>";
+ }
+ VersionTuple RemovedAfterVersion = AA->getObsoleted();
+ if (!RemovedAfterVersion.empty()) {
+ Result << "<RemovedAfterVersion>"
+ << RemovedAfterVersion.getAsString()
+ << "</RemovedAfterVersion>";
+ }
+ StringRef DeprecationSummary = AA->getMessage();
+ if (!DeprecationSummary.empty()) {
+ Result << "<DeprecationSummary>";
+ appendToResultWithXMLEscaping(DeprecationSummary);
+ Result << "</DeprecationSummary>";
+ }
+ if (AA->getUnavailable())
+ Result << "<Unavailable/>";
+ Result << "</Availability>";
+ }
+ }
+
+ {
+ bool StartTagEmitted = false;
+ for (unsigned i = 0, e = Parts.MiscBlocks.size(); i != e; ++i) {
+ const Comment *C = Parts.MiscBlocks[i];
+ if (FirstParagraphIsBrief && C == Parts.FirstParagraph)
+ continue;
+ if (!StartTagEmitted) {
+ Result << "<Discussion>";
+ StartTagEmitted = true;
+ }
+ visit(C);
+ }
+ if (StartTagEmitted)
+ Result << "</Discussion>";
+ }
+
+ Result << RootEndTag;
+}
+
+void CommentASTToXMLConverter::appendToResultWithXMLEscaping(StringRef S) {
+ for (StringRef::iterator I = S.begin(), E = S.end(); I != E; ++I) {
+ const char C = *I;
+ switch (C) {
+ case '&':
+ Result << "&amp;";
+ break;
+ case '<':
+ Result << "&lt;";
+ break;
+ case '>':
+ Result << "&gt;";
+ break;
+ case '"':
+ Result << "&quot;";
+ break;
+ case '\'':
+ Result << "&apos;";
+ break;
+ default:
+ Result << C;
+ break;
+ }
+ }
+}
+
+void CommentASTToXMLConverter::appendToResultWithCDATAEscaping(StringRef S) {
+ if (S.empty())
+ return;
+
+ Result << "<![CDATA[";
+ while (!S.empty()) {
+ size_t Pos = S.find("]]>");
+ if (Pos == 0) {
+ Result << "]]]]><![CDATA[>";
+ S = S.drop_front(3);
+ continue;
+ }
+ if (Pos == StringRef::npos)
+ Pos = S.size();
+
+ Result << S.substr(0, Pos);
+
+ S = S.drop_front(Pos);
+ }
+ Result << "]]>";
+}
+
+CommentToXMLConverter::CommentToXMLConverter() : FormatInMemoryUniqueId(0) {}
+CommentToXMLConverter::~CommentToXMLConverter() {}
+
+void CommentToXMLConverter::convertCommentToHTML(const FullComment *FC,
+ SmallVectorImpl<char> &HTML,
+ const ASTContext &Context) {
+ CommentASTToHTMLConverter Converter(FC, HTML,
+ Context.getCommentCommandTraits());
+ Converter.visit(FC);
+}
+
+void CommentToXMLConverter::convertHTMLTagNodeToText(
+ const comments::HTMLTagComment *HTC, SmallVectorImpl<char> &Text,
+ const ASTContext &Context) {
+ CommentASTToHTMLConverter Converter(nullptr, Text,
+ Context.getCommentCommandTraits());
+ Converter.visit(HTC);
+}
+
+void CommentToXMLConverter::convertCommentToXML(const FullComment *FC,
+ SmallVectorImpl<char> &XML,
+ const ASTContext &Context) {
+ if (!FormatContext || (FormatInMemoryUniqueId % 1000) == 0) {
+ // Create a new format context, or re-create it after some number of
+ // iterations, so the buffers don't grow too large.
+ FormatContext.reset(new SimpleFormatContext(Context.getLangOpts()));
+ }
+
+ CommentASTToXMLConverter Converter(FC, XML, Context.getCommentCommandTraits(),
+ Context.getSourceManager(), *FormatContext,
+ FormatInMemoryUniqueId++);
+ Converter.visit(FC);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Index/SimpleFormatContext.h b/contrib/llvm/tools/clang/lib/Index/SimpleFormatContext.h
new file mode 100644
index 0000000..2c26e4d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Index/SimpleFormatContext.h
@@ -0,0 +1,75 @@
+//===--- SimpleFormatContext.h ----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+///
+/// \brief Defines a utility class for use of clang-format in libclang
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_INDEX_SIMPLEFORMATCONTEXT_H
+#define LLVM_CLANG_LIB_INDEX_SIMPLEFORMATCONTEXT_H
+
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Rewrite/Core/Rewriter.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace clang {
+namespace index {
+
+/// \brief A small class to be used by libclang clients to format
+/// a declaration string in memory. This object is instantiated once
+/// and used each time a formatting is needed.
+class SimpleFormatContext {
+public:
+ SimpleFormatContext(LangOptions Options)
+ : DiagOpts(new DiagnosticOptions()),
+ Diagnostics(new DiagnosticsEngine(new DiagnosticIDs,
+ DiagOpts.get())),
+ InMemoryFileSystem(new vfs::InMemoryFileSystem),
+ Files(FileSystemOptions(), InMemoryFileSystem),
+ Sources(*Diagnostics, Files),
+ Rewrite(Sources, Options) {
+ Diagnostics->setClient(new IgnoringDiagConsumer, true);
+ }
+
+ FileID createInMemoryFile(StringRef Name, StringRef Content) {
+ InMemoryFileSystem->addFile(Name, 0,
+ llvm::MemoryBuffer::getMemBuffer(Content));
+ const FileEntry *Entry = Files.getFile(Name);
+ assert(Entry != nullptr);
+ return Sources.createFileID(Entry, SourceLocation(), SrcMgr::C_User);
+ }
+
+ std::string getRewrittenText(FileID ID) {
+ std::string Result;
+ llvm::raw_string_ostream OS(Result);
+ Rewrite.getEditBuffer(ID).write(OS);
+ OS.flush();
+ return Result;
+ }
+
+ IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts;
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diagnostics;
+ IntrusiveRefCntPtr<vfs::InMemoryFileSystem> InMemoryFileSystem;
+ FileManager Files;
+ SourceManager Sources;
+ Rewriter Rewrite;
+};
+
+} // end namespace index
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Index/USRGeneration.cpp b/contrib/llvm/tools/clang/lib/Index/USRGeneration.cpp
new file mode 100644
index 0000000..c57694f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Index/USRGeneration.cpp
@@ -0,0 +1,878 @@
+//===- USRGeneration.cpp - Routines for USR generation --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Index/USRGeneration.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DeclVisitor.h"
+#include "clang/Lex/PreprocessingRecord.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace clang::index;
+
+//===----------------------------------------------------------------------===//
+// USR generation.
+//===----------------------------------------------------------------------===//
+
+/// \returns true on error.
+static bool printLoc(llvm::raw_ostream &OS, SourceLocation Loc,
+ const SourceManager &SM, bool IncludeOffset) {
+ if (Loc.isInvalid()) {
+ return true;
+ }
+ Loc = SM.getExpansionLoc(Loc);
+ const std::pair<FileID, unsigned> &Decomposed = SM.getDecomposedLoc(Loc);
+ const FileEntry *FE = SM.getFileEntryForID(Decomposed.first);
+ if (FE) {
+ OS << llvm::sys::path::filename(FE->getName());
+ } else {
+ // This case really isn't interesting.
+ return true;
+ }
+ if (IncludeOffset) {
+ // Use the offest into the FileID to represent the location. Using
+ // a line/column can cause us to look back at the original source file,
+ // which is expensive.
+ OS << '@' << Decomposed.second;
+ }
+ return false;
+}
+
+namespace {
+class USRGenerator : public ConstDeclVisitor<USRGenerator> {
+ SmallVectorImpl<char> &Buf;
+ llvm::raw_svector_ostream Out;
+ bool IgnoreResults;
+ ASTContext *Context;
+ bool generatedLoc;
+
+ llvm::DenseMap<const Type *, unsigned> TypeSubstitutions;
+
+public:
+ explicit USRGenerator(ASTContext *Ctx, SmallVectorImpl<char> &Buf)
+ : Buf(Buf),
+ Out(Buf),
+ IgnoreResults(false),
+ Context(Ctx),
+ generatedLoc(false)
+ {
+ // Add the USR space prefix.
+ Out << getUSRSpacePrefix();
+ }
+
+ bool ignoreResults() const { return IgnoreResults; }
+
+ // Visitation methods from generating USRs from AST elements.
+ void VisitDeclContext(const DeclContext *D);
+ void VisitFieldDecl(const FieldDecl *D);
+ void VisitFunctionDecl(const FunctionDecl *D);
+ void VisitNamedDecl(const NamedDecl *D);
+ void VisitNamespaceDecl(const NamespaceDecl *D);
+ void VisitNamespaceAliasDecl(const NamespaceAliasDecl *D);
+ void VisitFunctionTemplateDecl(const FunctionTemplateDecl *D);
+ void VisitClassTemplateDecl(const ClassTemplateDecl *D);
+ void VisitObjCContainerDecl(const ObjCContainerDecl *CD);
+ void VisitObjCMethodDecl(const ObjCMethodDecl *MD);
+ void VisitObjCPropertyDecl(const ObjCPropertyDecl *D);
+ void VisitObjCPropertyImplDecl(const ObjCPropertyImplDecl *D);
+ void VisitTagDecl(const TagDecl *D);
+ void VisitTypedefDecl(const TypedefDecl *D);
+ void VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D);
+ void VisitVarDecl(const VarDecl *D);
+ void VisitNonTypeTemplateParmDecl(const NonTypeTemplateParmDecl *D);
+ void VisitTemplateTemplateParmDecl(const TemplateTemplateParmDecl *D);
+ void VisitLinkageSpecDecl(const LinkageSpecDecl *D) {
+ IgnoreResults = true;
+ }
+ void VisitUsingDirectiveDecl(const UsingDirectiveDecl *D) {
+ IgnoreResults = true;
+ }
+ void VisitUsingDecl(const UsingDecl *D) {
+ IgnoreResults = true;
+ }
+ void VisitUnresolvedUsingValueDecl(const UnresolvedUsingValueDecl *D) {
+ IgnoreResults = true;
+ }
+ void VisitUnresolvedUsingTypenameDecl(const UnresolvedUsingTypenameDecl *D) {
+ IgnoreResults = true;
+ }
+
+ bool ShouldGenerateLocation(const NamedDecl *D);
+
+ bool isLocal(const NamedDecl *D) {
+ return D->getParentFunctionOrMethod() != nullptr;
+ }
+
+ /// Generate the string component containing the location of the
+ /// declaration.
+ bool GenLoc(const Decl *D, bool IncludeOffset);
+
+ /// String generation methods used both by the visitation methods
+ /// and from other clients that want to directly generate USRs. These
+ /// methods do not construct complete USRs (which incorporate the parents
+ /// of an AST element), but only the fragments concerning the AST element
+ /// itself.
+
+ /// Generate a USR for an Objective-C class.
+ void GenObjCClass(StringRef cls) {
+ generateUSRForObjCClass(cls, Out);
+ }
+ /// Generate a USR for an Objective-C class category.
+ void GenObjCCategory(StringRef cls, StringRef cat) {
+ generateUSRForObjCCategory(cls, cat, Out);
+ }
+ /// Generate a USR fragment for an Objective-C property.
+ void GenObjCProperty(StringRef prop) {
+ generateUSRForObjCProperty(prop, Out);
+ }
+ /// Generate a USR for an Objective-C protocol.
+ void GenObjCProtocol(StringRef prot) {
+ generateUSRForObjCProtocol(prot, Out);
+ }
+
+ void VisitType(QualType T);
+ void VisitTemplateParameterList(const TemplateParameterList *Params);
+ void VisitTemplateName(TemplateName Name);
+ void VisitTemplateArgument(const TemplateArgument &Arg);
+
+ /// Emit a Decl's name using NamedDecl::printName() and return true if
+ /// the decl had no name.
+ bool EmitDeclName(const NamedDecl *D);
+};
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Generating USRs from ASTS.
+//===----------------------------------------------------------------------===//
+
+bool USRGenerator::EmitDeclName(const NamedDecl *D) {
+ const unsigned startSize = Buf.size();
+ D->printName(Out);
+ const unsigned endSize = Buf.size();
+ return startSize == endSize;
+}
+
+bool USRGenerator::ShouldGenerateLocation(const NamedDecl *D) {
+ if (D->isExternallyVisible())
+ return false;
+ if (D->getParentFunctionOrMethod())
+ return true;
+ const SourceManager &SM = Context->getSourceManager();
+ return !SM.isInSystemHeader(D->getLocation());
+}
+
+void USRGenerator::VisitDeclContext(const DeclContext *DC) {
+ if (const NamedDecl *D = dyn_cast<NamedDecl>(DC))
+ Visit(D);
+}
+
+void USRGenerator::VisitFieldDecl(const FieldDecl *D) {
+ // The USR for an ivar declared in a class extension is based on the
+ // ObjCInterfaceDecl, not the ObjCCategoryDecl.
+ if (const ObjCInterfaceDecl *ID = Context->getObjContainingInterface(D))
+ Visit(ID);
+ else
+ VisitDeclContext(D->getDeclContext());
+ Out << (isa<ObjCIvarDecl>(D) ? "@" : "@FI@");
+ if (EmitDeclName(D)) {
+ // Bit fields can be anonymous.
+ IgnoreResults = true;
+ return;
+ }
+}
+
+void USRGenerator::VisitFunctionDecl(const FunctionDecl *D) {
+ if (ShouldGenerateLocation(D) && GenLoc(D, /*IncludeOffset=*/isLocal(D)))
+ return;
+
+ VisitDeclContext(D->getDeclContext());
+ bool IsTemplate = false;
+ if (FunctionTemplateDecl *FunTmpl = D->getDescribedFunctionTemplate()) {
+ IsTemplate = true;
+ Out << "@FT@";
+ VisitTemplateParameterList(FunTmpl->getTemplateParameters());
+ } else
+ Out << "@F@";
+ D->printName(Out);
+
+ ASTContext &Ctx = *Context;
+ if (!Ctx.getLangOpts().CPlusPlus || D->isExternC())
+ return;
+
+ if (const TemplateArgumentList *
+ SpecArgs = D->getTemplateSpecializationArgs()) {
+ Out << '<';
+ for (unsigned I = 0, N = SpecArgs->size(); I != N; ++I) {
+ Out << '#';
+ VisitTemplateArgument(SpecArgs->get(I));
+ }
+ Out << '>';
+ }
+
+ // Mangle in type information for the arguments.
+ for (auto PD : D->params()) {
+ Out << '#';
+ VisitType(PD->getType());
+ }
+ if (D->isVariadic())
+ Out << '.';
+ if (IsTemplate) {
+ // Function templates can be overloaded by return type, for example:
+ // \code
+ // template <class T> typename T::A foo() {}
+ // template <class T> typename T::B foo() {}
+ // \endcode
+ Out << '#';
+ VisitType(D->getReturnType());
+ }
+ Out << '#';
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
+ if (MD->isStatic())
+ Out << 'S';
+ if (unsigned quals = MD->getTypeQualifiers())
+ Out << (char)('0' + quals);
+ switch (MD->getRefQualifier()) {
+ case RQ_None: break;
+ case RQ_LValue: Out << '&'; break;
+ case RQ_RValue: Out << "&&"; break;
+ }
+ }
+}
+
+void USRGenerator::VisitNamedDecl(const NamedDecl *D) {
+ VisitDeclContext(D->getDeclContext());
+ Out << "@";
+
+ if (EmitDeclName(D)) {
+ // The string can be empty if the declaration has no name; e.g., it is
+ // the ParmDecl with no name for declaration of a function pointer type,
+ // e.g.: void (*f)(void *);
+ // In this case, don't generate a USR.
+ IgnoreResults = true;
+ }
+}
+
+void USRGenerator::VisitVarDecl(const VarDecl *D) {
+ // VarDecls can be declared 'extern' within a function or method body,
+ // but their enclosing DeclContext is the function, not the TU. We need
+ // to check the storage class to correctly generate the USR.
+ if (ShouldGenerateLocation(D) && GenLoc(D, /*IncludeOffset=*/isLocal(D)))
+ return;
+
+ VisitDeclContext(D->getDeclContext());
+
+ // Variables always have simple names.
+ StringRef s = D->getName();
+
+ // The string can be empty if the declaration has no name; e.g., it is
+ // the ParmDecl with no name for declaration of a function pointer type, e.g.:
+ // void (*f)(void *);
+ // In this case, don't generate a USR.
+ if (s.empty())
+ IgnoreResults = true;
+ else
+ Out << '@' << s;
+}
+
+void USRGenerator::VisitNonTypeTemplateParmDecl(
+ const NonTypeTemplateParmDecl *D) {
+ GenLoc(D, /*IncludeOffset=*/true);
+ return;
+}
+
+void USRGenerator::VisitTemplateTemplateParmDecl(
+ const TemplateTemplateParmDecl *D) {
+ GenLoc(D, /*IncludeOffset=*/true);
+ return;
+}
+
+void USRGenerator::VisitNamespaceDecl(const NamespaceDecl *D) {
+ if (D->isAnonymousNamespace()) {
+ Out << "@aN";
+ return;
+ }
+
+ VisitDeclContext(D->getDeclContext());
+ if (!IgnoreResults)
+ Out << "@N@" << D->getName();
+}
+
+void USRGenerator::VisitFunctionTemplateDecl(const FunctionTemplateDecl *D) {
+ VisitFunctionDecl(D->getTemplatedDecl());
+}
+
+void USRGenerator::VisitClassTemplateDecl(const ClassTemplateDecl *D) {
+ VisitTagDecl(D->getTemplatedDecl());
+}
+
+void USRGenerator::VisitNamespaceAliasDecl(const NamespaceAliasDecl *D) {
+ VisitDeclContext(D->getDeclContext());
+ if (!IgnoreResults)
+ Out << "@NA@" << D->getName();
+}
+
+void USRGenerator::VisitObjCMethodDecl(const ObjCMethodDecl *D) {
+ const DeclContext *container = D->getDeclContext();
+ if (const ObjCProtocolDecl *pd = dyn_cast<ObjCProtocolDecl>(container)) {
+ Visit(pd);
+ }
+ else {
+ // The USR for a method declared in a class extension or category is based on
+ // the ObjCInterfaceDecl, not the ObjCCategoryDecl.
+ const ObjCInterfaceDecl *ID = D->getClassInterface();
+ if (!ID) {
+ IgnoreResults = true;
+ return;
+ }
+ Visit(ID);
+ }
+ // Ideally we would use 'GenObjCMethod', but this is such a hot path
+ // for Objective-C code that we don't want to use
+ // DeclarationName::getAsString().
+ Out << (D->isInstanceMethod() ? "(im)" : "(cm)")
+ << DeclarationName(D->getSelector());
+}
+
+void USRGenerator::VisitObjCContainerDecl(const ObjCContainerDecl *D) {
+ switch (D->getKind()) {
+ default:
+ llvm_unreachable("Invalid ObjC container.");
+ case Decl::ObjCInterface:
+ case Decl::ObjCImplementation:
+ GenObjCClass(D->getName());
+ break;
+ case Decl::ObjCCategory: {
+ const ObjCCategoryDecl *CD = cast<ObjCCategoryDecl>(D);
+ const ObjCInterfaceDecl *ID = CD->getClassInterface();
+ if (!ID) {
+ // Handle invalid code where the @interface might not
+ // have been specified.
+ // FIXME: We should be able to generate this USR even if the
+ // @interface isn't available.
+ IgnoreResults = true;
+ return;
+ }
+ // Specially handle class extensions, which are anonymous categories.
+ // We want to mangle in the location to uniquely distinguish them.
+ if (CD->IsClassExtension()) {
+ Out << "objc(ext)" << ID->getName() << '@';
+ GenLoc(CD, /*IncludeOffset=*/true);
+ }
+ else
+ GenObjCCategory(ID->getName(), CD->getName());
+
+ break;
+ }
+ case Decl::ObjCCategoryImpl: {
+ const ObjCCategoryImplDecl *CD = cast<ObjCCategoryImplDecl>(D);
+ const ObjCInterfaceDecl *ID = CD->getClassInterface();
+ if (!ID) {
+ // Handle invalid code where the @interface might not
+ // have been specified.
+ // FIXME: We should be able to generate this USR even if the
+ // @interface isn't available.
+ IgnoreResults = true;
+ return;
+ }
+ GenObjCCategory(ID->getName(), CD->getName());
+ break;
+ }
+ case Decl::ObjCProtocol:
+ GenObjCProtocol(cast<ObjCProtocolDecl>(D)->getName());
+ break;
+ }
+}
+
+void USRGenerator::VisitObjCPropertyDecl(const ObjCPropertyDecl *D) {
+ // The USR for a property declared in a class extension or category is based
+ // on the ObjCInterfaceDecl, not the ObjCCategoryDecl.
+ if (const ObjCInterfaceDecl *ID = Context->getObjContainingInterface(D))
+ Visit(ID);
+ else
+ Visit(cast<Decl>(D->getDeclContext()));
+ GenObjCProperty(D->getName());
+}
+
+void USRGenerator::VisitObjCPropertyImplDecl(const ObjCPropertyImplDecl *D) {
+ if (ObjCPropertyDecl *PD = D->getPropertyDecl()) {
+ VisitObjCPropertyDecl(PD);
+ return;
+ }
+
+ IgnoreResults = true;
+}
+
+void USRGenerator::VisitTagDecl(const TagDecl *D) {
+ // Add the location of the tag decl to handle resolution across
+ // translation units.
+ if (ShouldGenerateLocation(D) && GenLoc(D, /*IncludeOffset=*/isLocal(D)))
+ return;
+
+ D = D->getCanonicalDecl();
+ VisitDeclContext(D->getDeclContext());
+
+ bool AlreadyStarted = false;
+ if (const CXXRecordDecl *CXXRecord = dyn_cast<CXXRecordDecl>(D)) {
+ if (ClassTemplateDecl *ClassTmpl = CXXRecord->getDescribedClassTemplate()) {
+ AlreadyStarted = true;
+
+ switch (D->getTagKind()) {
+ case TTK_Interface:
+ case TTK_Class:
+ case TTK_Struct: Out << "@ST"; break;
+ case TTK_Union: Out << "@UT"; break;
+ case TTK_Enum: llvm_unreachable("enum template");
+ }
+ VisitTemplateParameterList(ClassTmpl->getTemplateParameters());
+ } else if (const ClassTemplatePartialSpecializationDecl *PartialSpec
+ = dyn_cast<ClassTemplatePartialSpecializationDecl>(CXXRecord)) {
+ AlreadyStarted = true;
+
+ switch (D->getTagKind()) {
+ case TTK_Interface:
+ case TTK_Class:
+ case TTK_Struct: Out << "@SP"; break;
+ case TTK_Union: Out << "@UP"; break;
+ case TTK_Enum: llvm_unreachable("enum partial specialization");
+ }
+ VisitTemplateParameterList(PartialSpec->getTemplateParameters());
+ }
+ }
+
+ if (!AlreadyStarted) {
+ switch (D->getTagKind()) {
+ case TTK_Interface:
+ case TTK_Class:
+ case TTK_Struct: Out << "@S"; break;
+ case TTK_Union: Out << "@U"; break;
+ case TTK_Enum: Out << "@E"; break;
+ }
+ }
+
+ Out << '@';
+ assert(Buf.size() > 0);
+ const unsigned off = Buf.size() - 1;
+
+ if (EmitDeclName(D)) {
+ if (const TypedefNameDecl *TD = D->getTypedefNameForAnonDecl()) {
+ Buf[off] = 'A';
+ Out << '@' << *TD;
+ }
+ else {
+ if (D->isEmbeddedInDeclarator() && !D->isFreeStanding()) {
+ printLoc(Out, D->getLocation(), Context->getSourceManager(), true);
+ } else
+ Buf[off] = 'a';
+ }
+ }
+
+ // For a class template specialization, mangle the template arguments.
+ if (const ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
+ const TemplateArgumentList &Args = Spec->getTemplateInstantiationArgs();
+ Out << '>';
+ for (unsigned I = 0, N = Args.size(); I != N; ++I) {
+ Out << '#';
+ VisitTemplateArgument(Args.get(I));
+ }
+ }
+}
+
+void USRGenerator::VisitTypedefDecl(const TypedefDecl *D) {
+ if (ShouldGenerateLocation(D) && GenLoc(D, /*IncludeOffset=*/isLocal(D)))
+ return;
+ const DeclContext *DC = D->getDeclContext();
+ if (const NamedDecl *DCN = dyn_cast<NamedDecl>(DC))
+ Visit(DCN);
+ Out << "@T@";
+ Out << D->getName();
+}
+
+void USRGenerator::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D) {
+ GenLoc(D, /*IncludeOffset=*/true);
+ return;
+}
+
+bool USRGenerator::GenLoc(const Decl *D, bool IncludeOffset) {
+ if (generatedLoc)
+ return IgnoreResults;
+ generatedLoc = true;
+
+ // Guard against null declarations in invalid code.
+ if (!D) {
+ IgnoreResults = true;
+ return true;
+ }
+
+ // Use the location of canonical decl.
+ D = D->getCanonicalDecl();
+
+ IgnoreResults =
+ IgnoreResults || printLoc(Out, D->getLocStart(),
+ Context->getSourceManager(), IncludeOffset);
+
+ return IgnoreResults;
+}
+
+void USRGenerator::VisitType(QualType T) {
+ // This method mangles in USR information for types. It can possibly
+ // just reuse the naming-mangling logic used by codegen, although the
+ // requirements for USRs might not be the same.
+ ASTContext &Ctx = *Context;
+
+ do {
+ T = Ctx.getCanonicalType(T);
+ Qualifiers Q = T.getQualifiers();
+ unsigned qVal = 0;
+ if (Q.hasConst())
+ qVal |= 0x1;
+ if (Q.hasVolatile())
+ qVal |= 0x2;
+ if (Q.hasRestrict())
+ qVal |= 0x4;
+ if(qVal)
+ Out << ((char) ('0' + qVal));
+
+ // Mangle in ObjC GC qualifiers?
+
+ if (const PackExpansionType *Expansion = T->getAs<PackExpansionType>()) {
+ Out << 'P';
+ T = Expansion->getPattern();
+ }
+
+ if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
+ unsigned char c = '\0';
+ switch (BT->getKind()) {
+ case BuiltinType::Void:
+ c = 'v'; break;
+ case BuiltinType::Bool:
+ c = 'b'; break;
+ case BuiltinType::UChar:
+ c = 'c'; break;
+ case BuiltinType::Char16:
+ c = 'q'; break;
+ case BuiltinType::Char32:
+ c = 'w'; break;
+ case BuiltinType::UShort:
+ c = 's'; break;
+ case BuiltinType::UInt:
+ c = 'i'; break;
+ case BuiltinType::ULong:
+ c = 'l'; break;
+ case BuiltinType::ULongLong:
+ c = 'k'; break;
+ case BuiltinType::UInt128:
+ c = 'j'; break;
+ case BuiltinType::Char_U:
+ case BuiltinType::Char_S:
+ c = 'C'; break;
+ case BuiltinType::SChar:
+ c = 'r'; break;
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ c = 'W'; break;
+ case BuiltinType::Short:
+ c = 'S'; break;
+ case BuiltinType::Int:
+ c = 'I'; break;
+ case BuiltinType::Long:
+ c = 'L'; break;
+ case BuiltinType::LongLong:
+ c = 'K'; break;
+ case BuiltinType::Int128:
+ c = 'J'; break;
+ case BuiltinType::Half:
+ c = 'h'; break;
+ case BuiltinType::Float:
+ c = 'f'; break;
+ case BuiltinType::Double:
+ c = 'd'; break;
+ case BuiltinType::LongDouble:
+ c = 'D'; break;
+ case BuiltinType::NullPtr:
+ c = 'n'; break;
+#define BUILTIN_TYPE(Id, SingletonId)
+#define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
+ case BuiltinType::Dependent:
+ case BuiltinType::OCLImage1d:
+ case BuiltinType::OCLImage1dArray:
+ case BuiltinType::OCLImage1dBuffer:
+ case BuiltinType::OCLImage2d:
+ case BuiltinType::OCLImage2dArray:
+ case BuiltinType::OCLImage2dDepth:
+ case BuiltinType::OCLImage2dArrayDepth:
+ case BuiltinType::OCLImage2dMSAA:
+ case BuiltinType::OCLImage2dArrayMSAA:
+ case BuiltinType::OCLImage2dMSAADepth:
+ case BuiltinType::OCLImage2dArrayMSAADepth:
+ case BuiltinType::OCLImage3d:
+ case BuiltinType::OCLEvent:
+ case BuiltinType::OCLClkEvent:
+ case BuiltinType::OCLQueue:
+ case BuiltinType::OCLNDRange:
+ case BuiltinType::OCLReserveID:
+ case BuiltinType::OCLSampler:
+ IgnoreResults = true;
+ return;
+ case BuiltinType::ObjCId:
+ c = 'o'; break;
+ case BuiltinType::ObjCClass:
+ c = 'O'; break;
+ case BuiltinType::ObjCSel:
+ c = 'e'; break;
+ }
+ Out << c;
+ return;
+ }
+
+ // If we have already seen this (non-built-in) type, use a substitution
+ // encoding.
+ llvm::DenseMap<const Type *, unsigned>::iterator Substitution
+ = TypeSubstitutions.find(T.getTypePtr());
+ if (Substitution != TypeSubstitutions.end()) {
+ Out << 'S' << Substitution->second << '_';
+ return;
+ } else {
+ // Record this as a substitution.
+ unsigned Number = TypeSubstitutions.size();
+ TypeSubstitutions[T.getTypePtr()] = Number;
+ }
+
+ if (const PointerType *PT = T->getAs<PointerType>()) {
+ Out << '*';
+ T = PT->getPointeeType();
+ continue;
+ }
+ if (const RValueReferenceType *RT = T->getAs<RValueReferenceType>()) {
+ Out << "&&";
+ T = RT->getPointeeType();
+ continue;
+ }
+ if (const ReferenceType *RT = T->getAs<ReferenceType>()) {
+ Out << '&';
+ T = RT->getPointeeType();
+ continue;
+ }
+ if (const FunctionProtoType *FT = T->getAs<FunctionProtoType>()) {
+ Out << 'F';
+ VisitType(FT->getReturnType());
+ for (const auto &I : FT->param_types())
+ VisitType(I);
+ if (FT->isVariadic())
+ Out << '.';
+ return;
+ }
+ if (const BlockPointerType *BT = T->getAs<BlockPointerType>()) {
+ Out << 'B';
+ T = BT->getPointeeType();
+ continue;
+ }
+ if (const ComplexType *CT = T->getAs<ComplexType>()) {
+ Out << '<';
+ T = CT->getElementType();
+ continue;
+ }
+ if (const TagType *TT = T->getAs<TagType>()) {
+ Out << '$';
+ VisitTagDecl(TT->getDecl());
+ return;
+ }
+ if (const TemplateTypeParmType *TTP = T->getAs<TemplateTypeParmType>()) {
+ Out << 't' << TTP->getDepth() << '.' << TTP->getIndex();
+ return;
+ }
+ if (const TemplateSpecializationType *Spec
+ = T->getAs<TemplateSpecializationType>()) {
+ Out << '>';
+ VisitTemplateName(Spec->getTemplateName());
+ Out << Spec->getNumArgs();
+ for (unsigned I = 0, N = Spec->getNumArgs(); I != N; ++I)
+ VisitTemplateArgument(Spec->getArg(I));
+ return;
+ }
+ if (const DependentNameType *DNT = T->getAs<DependentNameType>()) {
+ Out << '^';
+ // FIXME: Encode the qualifier, don't just print it.
+ PrintingPolicy PO(Ctx.getLangOpts());
+ PO.SuppressTagKeyword = true;
+ PO.SuppressUnwrittenScope = true;
+ PO.ConstantArraySizeAsWritten = false;
+ PO.AnonymousTagLocations = false;
+ DNT->getQualifier()->print(Out, PO);
+ Out << ':' << DNT->getIdentifier()->getName();
+ return;
+ }
+ if (const InjectedClassNameType *InjT = T->getAs<InjectedClassNameType>()) {
+ T = InjT->getInjectedSpecializationType();
+ continue;
+ }
+
+ // Unhandled type.
+ Out << ' ';
+ break;
+ } while (true);
+}
+
+void USRGenerator::VisitTemplateParameterList(
+ const TemplateParameterList *Params) {
+ if (!Params)
+ return;
+ Out << '>' << Params->size();
+ for (TemplateParameterList::const_iterator P = Params->begin(),
+ PEnd = Params->end();
+ P != PEnd; ++P) {
+ Out << '#';
+ if (isa<TemplateTypeParmDecl>(*P)) {
+ if (cast<TemplateTypeParmDecl>(*P)->isParameterPack())
+ Out<< 'p';
+ Out << 'T';
+ continue;
+ }
+
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
+ if (NTTP->isParameterPack())
+ Out << 'p';
+ Out << 'N';
+ VisitType(NTTP->getType());
+ continue;
+ }
+
+ TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(*P);
+ if (TTP->isParameterPack())
+ Out << 'p';
+ Out << 't';
+ VisitTemplateParameterList(TTP->getTemplateParameters());
+ }
+}
+
+void USRGenerator::VisitTemplateName(TemplateName Name) {
+ if (TemplateDecl *Template = Name.getAsTemplateDecl()) {
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(Template)) {
+ Out << 't' << TTP->getDepth() << '.' << TTP->getIndex();
+ return;
+ }
+
+ Visit(Template);
+ return;
+ }
+
+ // FIXME: Visit dependent template names.
+}
+
+void USRGenerator::VisitTemplateArgument(const TemplateArgument &Arg) {
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ break;
+
+ case TemplateArgument::Declaration:
+ Visit(Arg.getAsDecl());
+ break;
+
+ case TemplateArgument::NullPtr:
+ break;
+
+ case TemplateArgument::TemplateExpansion:
+ Out << 'P'; // pack expansion of...
+ // Fall through
+ case TemplateArgument::Template:
+ VisitTemplateName(Arg.getAsTemplateOrTemplatePattern());
+ break;
+
+ case TemplateArgument::Expression:
+ // FIXME: Visit expressions.
+ break;
+
+ case TemplateArgument::Pack:
+ Out << 'p' << Arg.pack_size();
+ for (const auto &P : Arg.pack_elements())
+ VisitTemplateArgument(P);
+ break;
+
+ case TemplateArgument::Type:
+ VisitType(Arg.getAsType());
+ break;
+
+ case TemplateArgument::Integral:
+ Out << 'V';
+ VisitType(Arg.getIntegralType());
+ Out << Arg.getAsIntegral();
+ break;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// USR generation functions.
+//===----------------------------------------------------------------------===//
+
+void clang::index::generateUSRForObjCClass(StringRef Cls, raw_ostream &OS) {
+ OS << "objc(cs)" << Cls;
+}
+
+void clang::index::generateUSRForObjCCategory(StringRef Cls, StringRef Cat,
+ raw_ostream &OS) {
+ OS << "objc(cy)" << Cls << '@' << Cat;
+}
+
+void clang::index::generateUSRForObjCIvar(StringRef Ivar, raw_ostream &OS) {
+ OS << '@' << Ivar;
+}
+
+void clang::index::generateUSRForObjCMethod(StringRef Sel,
+ bool IsInstanceMethod,
+ raw_ostream &OS) {
+ OS << (IsInstanceMethod ? "(im)" : "(cm)") << Sel;
+}
+
+void clang::index::generateUSRForObjCProperty(StringRef Prop, raw_ostream &OS) {
+ OS << "(py)" << Prop;
+}
+
+void clang::index::generateUSRForObjCProtocol(StringRef Prot, raw_ostream &OS) {
+ OS << "objc(pl)" << Prot;
+}
+
+bool clang::index::generateUSRForDecl(const Decl *D,
+ SmallVectorImpl<char> &Buf) {
+ // Don't generate USRs for things with invalid locations.
+ if (!D || D->getLocStart().isInvalid())
+ return true;
+
+ USRGenerator UG(&D->getASTContext(), Buf);
+ UG.Visit(D);
+ return UG.ignoreResults();
+}
+
+bool clang::index::generateUSRForMacro(const MacroDefinitionRecord *MD,
+ const SourceManager &SM,
+ SmallVectorImpl<char> &Buf) {
+ // Don't generate USRs for things with invalid locations.
+ if (!MD || MD->getLocation().isInvalid())
+ return true;
+
+ llvm::raw_svector_ostream Out(Buf);
+
+ // Assume that system headers are sane. Don't put source location
+ // information into the USR if the macro comes from a system header.
+ SourceLocation Loc = MD->getLocation();
+ bool ShouldGenerateLocation = !SM.isInSystemHeader(Loc);
+
+ Out << getUSRSpacePrefix();
+ if (ShouldGenerateLocation)
+ printLoc(Out, Loc, SM, /*IncludeOffset=*/true);
+ Out << "@macro@";
+ Out << MD->getName()->getName();
+ return false;
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Lex/HeaderMap.cpp b/contrib/llvm/tools/clang/lib/Lex/HeaderMap.cpp
new file mode 100644
index 0000000..09d5384
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/HeaderMap.cpp
@@ -0,0 +1,237 @@
+//===--- HeaderMap.cpp - A file that acts like dir of symlinks ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the HeaderMap interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/HeaderMap.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/FileManager.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <cstdio>
+#include <memory>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Data Structures and Manifest Constants
+//===----------------------------------------------------------------------===//
+
+enum {
+ HMAP_HeaderMagicNumber = ('h' << 24) | ('m' << 16) | ('a' << 8) | 'p',
+ HMAP_HeaderVersion = 1,
+
+ HMAP_EmptyBucketKey = 0
+};
+
+namespace clang {
+struct HMapBucket {
+ uint32_t Key; // Offset (into strings) of key.
+
+ uint32_t Prefix; // Offset (into strings) of value prefix.
+ uint32_t Suffix; // Offset (into strings) of value suffix.
+};
+
+struct HMapHeader {
+ uint32_t Magic; // Magic word, also indicates byte order.
+ uint16_t Version; // Version number -- currently 1.
+ uint16_t Reserved; // Reserved for future use - zero for now.
+ uint32_t StringsOffset; // Offset to start of string pool.
+ uint32_t NumEntries; // Number of entries in the string table.
+ uint32_t NumBuckets; // Number of buckets (always a power of 2).
+ uint32_t MaxValueLength; // Length of longest result path (excluding nul).
+ // An array of 'NumBuckets' HMapBucket objects follows this header.
+ // Strings follow the buckets, at StringsOffset.
+};
+} // end namespace clang.
+
+/// HashHMapKey - This is the 'well known' hash function required by the file
+/// format, used to look up keys in the hash table. The hash table uses simple
+/// linear probing based on this function.
+static inline unsigned HashHMapKey(StringRef Str) {
+ unsigned Result = 0;
+ const char *S = Str.begin(), *End = Str.end();
+
+ for (; S != End; S++)
+ Result += toLowercase(*S) * 13;
+ return Result;
+}
+
+
+
+//===----------------------------------------------------------------------===//
+// Verification and Construction
+//===----------------------------------------------------------------------===//
+
+/// HeaderMap::Create - This attempts to load the specified file as a header
+/// map. If it doesn't look like a HeaderMap, it gives up and returns null.
+/// If it looks like a HeaderMap but is obviously corrupted, it puts a reason
+/// into the string error argument and returns null.
+const HeaderMap *HeaderMap::Create(const FileEntry *FE, FileManager &FM) {
+ // If the file is too small to be a header map, ignore it.
+ unsigned FileSize = FE->getSize();
+ if (FileSize <= sizeof(HMapHeader)) return nullptr;
+
+ auto FileBuffer = FM.getBufferForFile(FE);
+ if (!FileBuffer) return nullptr; // Unreadable file?
+ const char *FileStart = (*FileBuffer)->getBufferStart();
+
+ // We know the file is at least as big as the header, check it now.
+ const HMapHeader *Header = reinterpret_cast<const HMapHeader*>(FileStart);
+
+ // Sniff it to see if it's a headermap by checking the magic number and
+ // version.
+ bool NeedsByteSwap;
+ if (Header->Magic == HMAP_HeaderMagicNumber &&
+ Header->Version == HMAP_HeaderVersion)
+ NeedsByteSwap = false;
+ else if (Header->Magic == llvm::ByteSwap_32(HMAP_HeaderMagicNumber) &&
+ Header->Version == llvm::ByteSwap_16(HMAP_HeaderVersion))
+ NeedsByteSwap = true; // Mixed endianness headermap.
+ else
+ return nullptr; // Not a header map.
+
+ if (Header->Reserved != 0) return nullptr;
+
+ // Okay, everything looks good, create the header map.
+ return new HeaderMap(std::move(*FileBuffer), NeedsByteSwap);
+}
+
+//===----------------------------------------------------------------------===//
+// Utility Methods
+//===----------------------------------------------------------------------===//
+
+
+/// getFileName - Return the filename of the headermap.
+const char *HeaderMap::getFileName() const {
+ return FileBuffer->getBufferIdentifier();
+}
+
+unsigned HeaderMap::getEndianAdjustedWord(unsigned X) const {
+ if (!NeedsBSwap) return X;
+ return llvm::ByteSwap_32(X);
+}
+
+/// getHeader - Return a reference to the file header, in unbyte-swapped form.
+/// This method cannot fail.
+const HMapHeader &HeaderMap::getHeader() const {
+ // We know the file is at least as big as the header. Return it.
+ return *reinterpret_cast<const HMapHeader*>(FileBuffer->getBufferStart());
+}
+
+/// getBucket - Return the specified hash table bucket from the header map,
+/// bswap'ing its fields as appropriate. If the bucket number is not valid,
+/// this return a bucket with an empty key (0).
+HMapBucket HeaderMap::getBucket(unsigned BucketNo) const {
+ HMapBucket Result;
+ Result.Key = HMAP_EmptyBucketKey;
+
+ const HMapBucket *BucketArray =
+ reinterpret_cast<const HMapBucket*>(FileBuffer->getBufferStart() +
+ sizeof(HMapHeader));
+
+ const HMapBucket *BucketPtr = BucketArray+BucketNo;
+ if ((const char*)(BucketPtr+1) > FileBuffer->getBufferEnd()) {
+ Result.Prefix = 0;
+ Result.Suffix = 0;
+ return Result; // Invalid buffer, corrupt hmap.
+ }
+
+ // Otherwise, the bucket is valid. Load the values, bswapping as needed.
+ Result.Key = getEndianAdjustedWord(BucketPtr->Key);
+ Result.Prefix = getEndianAdjustedWord(BucketPtr->Prefix);
+ Result.Suffix = getEndianAdjustedWord(BucketPtr->Suffix);
+ return Result;
+}
+
+/// getString - Look up the specified string in the string table. If the string
+/// index is not valid, it returns an empty string.
+const char *HeaderMap::getString(unsigned StrTabIdx) const {
+ // Add the start of the string table to the idx.
+ StrTabIdx += getEndianAdjustedWord(getHeader().StringsOffset);
+
+ // Check for invalid index.
+ if (StrTabIdx >= FileBuffer->getBufferSize())
+ return nullptr;
+
+ // Otherwise, we have a valid pointer into the file. Just return it. We know
+ // that the "string" can not overrun the end of the file, because the buffer
+ // is nul terminated by virtue of being a MemoryBuffer.
+ return FileBuffer->getBufferStart()+StrTabIdx;
+}
+
+//===----------------------------------------------------------------------===//
+// The Main Drivers
+//===----------------------------------------------------------------------===//
+
+/// dump - Print the contents of this headermap to stderr.
+void HeaderMap::dump() const {
+ const HMapHeader &Hdr = getHeader();
+ unsigned NumBuckets = getEndianAdjustedWord(Hdr.NumBuckets);
+
+ fprintf(stderr, "Header Map %s:\n %d buckets, %d entries\n",
+ getFileName(), NumBuckets,
+ getEndianAdjustedWord(Hdr.NumEntries));
+
+ for (unsigned i = 0; i != NumBuckets; ++i) {
+ HMapBucket B = getBucket(i);
+ if (B.Key == HMAP_EmptyBucketKey) continue;
+
+ const char *Key = getString(B.Key);
+ const char *Prefix = getString(B.Prefix);
+ const char *Suffix = getString(B.Suffix);
+ fprintf(stderr, " %d. %s -> '%s' '%s'\n", i, Key, Prefix, Suffix);
+ }
+}
+
+/// LookupFile - Check to see if the specified relative filename is located in
+/// this HeaderMap. If so, open it and return its FileEntry.
+const FileEntry *HeaderMap::LookupFile(
+ StringRef Filename, FileManager &FM) const {
+
+ SmallString<1024> Path;
+ StringRef Dest = lookupFilename(Filename, Path);
+ if (Dest.empty())
+ return nullptr;
+
+ return FM.getFile(Dest);
+}
+
+StringRef HeaderMap::lookupFilename(StringRef Filename,
+ SmallVectorImpl<char> &DestPath) const {
+ const HMapHeader &Hdr = getHeader();
+ unsigned NumBuckets = getEndianAdjustedWord(Hdr.NumBuckets);
+
+ // If the number of buckets is not a power of two, the headermap is corrupt.
+ // Don't probe infinitely.
+ if (NumBuckets & (NumBuckets-1))
+ return StringRef();
+
+ // Linearly probe the hash table.
+ for (unsigned Bucket = HashHMapKey(Filename);; ++Bucket) {
+ HMapBucket B = getBucket(Bucket & (NumBuckets-1));
+ if (B.Key == HMAP_EmptyBucketKey) return StringRef(); // Hash miss.
+
+ // See if the key matches. If not, probe on.
+ if (!Filename.equals_lower(getString(B.Key)))
+ continue;
+
+ // If so, we have a match in the hash table. Construct the destination
+ // path.
+ StringRef Prefix = getString(B.Prefix);
+ StringRef Suffix = getString(B.Suffix);
+ DestPath.clear();
+ DestPath.append(Prefix.begin(), Prefix.end());
+ DestPath.append(Suffix.begin(), Suffix.end());
+ return StringRef(DestPath.begin(), DestPath.size());
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp b/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp
new file mode 100644
index 0000000..8a686a7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp
@@ -0,0 +1,1423 @@
+//===--- HeaderSearch.cpp - Resolve Header File Locations ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the DirectoryLookup and HeaderSearch interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Frontend/PCHContainerOperations.h"
+#include "clang/Lex/ExternalPreprocessorSource.h"
+#include "clang/Lex/HeaderMap.h"
+#include "clang/Lex/HeaderSearchOptions.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/Capacity.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstdio>
+#if defined(LLVM_ON_UNIX)
+#include <limits.h>
+#endif
+using namespace clang;
+
+const IdentifierInfo *
+HeaderFileInfo::getControllingMacro(ExternalPreprocessorSource *External) {
+ if (ControllingMacro) {
+ if (ControllingMacro->isOutOfDate())
+ External->updateOutOfDateIdentifier(
+ *const_cast<IdentifierInfo *>(ControllingMacro));
+ return ControllingMacro;
+ }
+
+ if (!ControllingMacroID || !External)
+ return nullptr;
+
+ ControllingMacro = External->GetIdentifier(ControllingMacroID);
+ return ControllingMacro;
+}
+
+ExternalHeaderFileInfoSource::~ExternalHeaderFileInfoSource() {}
+
+HeaderSearch::HeaderSearch(IntrusiveRefCntPtr<HeaderSearchOptions> HSOpts,
+ SourceManager &SourceMgr, DiagnosticsEngine &Diags,
+ const LangOptions &LangOpts,
+ const TargetInfo *Target)
+ : HSOpts(HSOpts), Diags(Diags), FileMgr(SourceMgr.getFileManager()),
+ FrameworkMap(64), ModMap(SourceMgr, Diags, LangOpts, Target, *this),
+ LangOpts(LangOpts) {
+ AngledDirIdx = 0;
+ SystemDirIdx = 0;
+ NoCurDirSearch = false;
+
+ ExternalLookup = nullptr;
+ ExternalSource = nullptr;
+ NumIncluded = 0;
+ NumMultiIncludeFileOptzn = 0;
+ NumFrameworkLookups = NumSubFrameworkLookups = 0;
+}
+
+HeaderSearch::~HeaderSearch() {
+ // Delete headermaps.
+ for (unsigned i = 0, e = HeaderMaps.size(); i != e; ++i)
+ delete HeaderMaps[i].second;
+}
+
+void HeaderSearch::PrintStats() {
+ fprintf(stderr, "\n*** HeaderSearch Stats:\n");
+ fprintf(stderr, "%d files tracked.\n", (int)FileInfo.size());
+ unsigned NumOnceOnlyFiles = 0, MaxNumIncludes = 0, NumSingleIncludedFiles = 0;
+ for (unsigned i = 0, e = FileInfo.size(); i != e; ++i) {
+ NumOnceOnlyFiles += FileInfo[i].isImport;
+ if (MaxNumIncludes < FileInfo[i].NumIncludes)
+ MaxNumIncludes = FileInfo[i].NumIncludes;
+ NumSingleIncludedFiles += FileInfo[i].NumIncludes == 1;
+ }
+ fprintf(stderr, " %d #import/#pragma once files.\n", NumOnceOnlyFiles);
+ fprintf(stderr, " %d included exactly once.\n", NumSingleIncludedFiles);
+ fprintf(stderr, " %d max times a file is included.\n", MaxNumIncludes);
+
+ fprintf(stderr, " %d #include/#include_next/#import.\n", NumIncluded);
+ fprintf(stderr, " %d #includes skipped due to"
+ " the multi-include optimization.\n", NumMultiIncludeFileOptzn);
+
+ fprintf(stderr, "%d framework lookups.\n", NumFrameworkLookups);
+ fprintf(stderr, "%d subframework lookups.\n", NumSubFrameworkLookups);
+}
+
+/// CreateHeaderMap - This method returns a HeaderMap for the specified
+/// FileEntry, uniquing them through the 'HeaderMaps' datastructure.
+const HeaderMap *HeaderSearch::CreateHeaderMap(const FileEntry *FE) {
+ // We expect the number of headermaps to be small, and almost always empty.
+ // If it ever grows, use of a linear search should be re-evaluated.
+ if (!HeaderMaps.empty()) {
+ for (unsigned i = 0, e = HeaderMaps.size(); i != e; ++i)
+ // Pointer equality comparison of FileEntries works because they are
+ // already uniqued by inode.
+ if (HeaderMaps[i].first == FE)
+ return HeaderMaps[i].second;
+ }
+
+ if (const HeaderMap *HM = HeaderMap::Create(FE, FileMgr)) {
+ HeaderMaps.push_back(std::make_pair(FE, HM));
+ return HM;
+ }
+
+ return nullptr;
+}
+
+std::string HeaderSearch::getModuleFileName(Module *Module) {
+ const FileEntry *ModuleMap =
+ getModuleMap().getModuleMapFileForUniquing(Module);
+ return getModuleFileName(Module->Name, ModuleMap->getName());
+}
+
+std::string HeaderSearch::getModuleFileName(StringRef ModuleName,
+ StringRef ModuleMapPath) {
+ // If we don't have a module cache path or aren't supposed to use one, we
+ // can't do anything.
+ if (getModuleCachePath().empty())
+ return std::string();
+
+ SmallString<256> Result(getModuleCachePath());
+ llvm::sys::fs::make_absolute(Result);
+
+ if (HSOpts->DisableModuleHash) {
+ llvm::sys::path::append(Result, ModuleName + ".pcm");
+ } else {
+ // Construct the name <ModuleName>-<hash of ModuleMapPath>.pcm which should
+ // ideally be globally unique to this particular module. Name collisions
+ // in the hash are safe (because any translation unit can only import one
+ // module with each name), but result in a loss of caching.
+ //
+ // To avoid false-negatives, we form as canonical a path as we can, and map
+ // to lower-case in case we're on a case-insensitive file system.
+ auto *Dir =
+ FileMgr.getDirectory(llvm::sys::path::parent_path(ModuleMapPath));
+ if (!Dir)
+ return std::string();
+ auto DirName = FileMgr.getCanonicalName(Dir);
+ auto FileName = llvm::sys::path::filename(ModuleMapPath);
+
+ llvm::hash_code Hash =
+ llvm::hash_combine(DirName.lower(), FileName.lower(),
+ HSOpts->ModuleFormat, HSOpts->UseDebugInfo);
+
+ SmallString<128> HashStr;
+ llvm::APInt(64, size_t(Hash)).toStringUnsigned(HashStr, /*Radix*/36);
+ llvm::sys::path::append(Result, ModuleName + "-" + HashStr + ".pcm");
+ }
+ return Result.str().str();
+}
+
+Module *HeaderSearch::lookupModule(StringRef ModuleName, bool AllowSearch) {
+ // Look in the module map to determine if there is a module by this name.
+ Module *Module = ModMap.findModule(ModuleName);
+ if (Module || !AllowSearch || !HSOpts->ImplicitModuleMaps)
+ return Module;
+
+ // Look through the various header search paths to load any available module
+ // maps, searching for a module map that describes this module.
+ for (unsigned Idx = 0, N = SearchDirs.size(); Idx != N; ++Idx) {
+ if (SearchDirs[Idx].isFramework()) {
+ // Search for or infer a module map for a framework.
+ SmallString<128> FrameworkDirName;
+ FrameworkDirName += SearchDirs[Idx].getFrameworkDir()->getName();
+ llvm::sys::path::append(FrameworkDirName, ModuleName + ".framework");
+ if (const DirectoryEntry *FrameworkDir
+ = FileMgr.getDirectory(FrameworkDirName)) {
+ bool IsSystem
+ = SearchDirs[Idx].getDirCharacteristic() != SrcMgr::C_User;
+ Module = loadFrameworkModule(ModuleName, FrameworkDir, IsSystem);
+ if (Module)
+ break;
+ }
+ }
+
+ // FIXME: Figure out how header maps and module maps will work together.
+
+ // Only deal with normal search directories.
+ if (!SearchDirs[Idx].isNormalDir())
+ continue;
+
+ bool IsSystem = SearchDirs[Idx].isSystemHeaderDirectory();
+ // Search for a module map file in this directory.
+ if (loadModuleMapFile(SearchDirs[Idx].getDir(), IsSystem,
+ /*IsFramework*/false) == LMM_NewlyLoaded) {
+ // We just loaded a module map file; check whether the module is
+ // available now.
+ Module = ModMap.findModule(ModuleName);
+ if (Module)
+ break;
+ }
+
+ // Search for a module map in a subdirectory with the same name as the
+ // module.
+ SmallString<128> NestedModuleMapDirName;
+ NestedModuleMapDirName = SearchDirs[Idx].getDir()->getName();
+ llvm::sys::path::append(NestedModuleMapDirName, ModuleName);
+ if (loadModuleMapFile(NestedModuleMapDirName, IsSystem,
+ /*IsFramework*/false) == LMM_NewlyLoaded){
+ // If we just loaded a module map file, look for the module again.
+ Module = ModMap.findModule(ModuleName);
+ if (Module)
+ break;
+ }
+
+ // If we've already performed the exhaustive search for module maps in this
+ // search directory, don't do it again.
+ if (SearchDirs[Idx].haveSearchedAllModuleMaps())
+ continue;
+
+ // Load all module maps in the immediate subdirectories of this search
+ // directory.
+ loadSubdirectoryModuleMaps(SearchDirs[Idx]);
+
+ // Look again for the module.
+ Module = ModMap.findModule(ModuleName);
+ if (Module)
+ break;
+ }
+
+ return Module;
+}
+
+//===----------------------------------------------------------------------===//
+// File lookup within a DirectoryLookup scope
+//===----------------------------------------------------------------------===//
+
+/// getName - Return the directory or filename corresponding to this lookup
+/// object.
+const char *DirectoryLookup::getName() const {
+ if (isNormalDir())
+ return getDir()->getName();
+ if (isFramework())
+ return getFrameworkDir()->getName();
+ assert(isHeaderMap() && "Unknown DirectoryLookup");
+ return getHeaderMap()->getFileName();
+}
+
+const FileEntry *HeaderSearch::getFileAndSuggestModule(
+ StringRef FileName, const DirectoryEntry *Dir, bool IsSystemHeaderDir,
+ Module *RequestingModule, ModuleMap::KnownHeader *SuggestedModule) {
+ // If we have a module map that might map this header, load it and
+ // check whether we'll have a suggestion for a module.
+ const FileEntry *File = getFileMgr().getFile(FileName, /*OpenFile=*/true);
+ if (!File)
+ return nullptr;
+
+ // If there is a module that corresponds to this header, suggest it.
+ if (!findUsableModuleForHeader(File, Dir ? Dir : File->getDir(),
+ RequestingModule, SuggestedModule,
+ IsSystemHeaderDir))
+ return nullptr;
+
+ return File;
+}
+
+/// LookupFile - Lookup the specified file in this search path, returning it
+/// if it exists or returning null if not.
+const FileEntry *DirectoryLookup::LookupFile(
+ StringRef &Filename,
+ HeaderSearch &HS,
+ SmallVectorImpl<char> *SearchPath,
+ SmallVectorImpl<char> *RelativePath,
+ Module *RequestingModule,
+ ModuleMap::KnownHeader *SuggestedModule,
+ bool &InUserSpecifiedSystemFramework,
+ bool &HasBeenMapped,
+ SmallVectorImpl<char> &MappedName) const {
+ InUserSpecifiedSystemFramework = false;
+ HasBeenMapped = false;
+
+ SmallString<1024> TmpDir;
+ if (isNormalDir()) {
+ // Concatenate the requested file onto the directory.
+ TmpDir = getDir()->getName();
+ llvm::sys::path::append(TmpDir, Filename);
+ if (SearchPath) {
+ StringRef SearchPathRef(getDir()->getName());
+ SearchPath->clear();
+ SearchPath->append(SearchPathRef.begin(), SearchPathRef.end());
+ }
+ if (RelativePath) {
+ RelativePath->clear();
+ RelativePath->append(Filename.begin(), Filename.end());
+ }
+
+ return HS.getFileAndSuggestModule(TmpDir, getDir(),
+ isSystemHeaderDirectory(),
+ RequestingModule, SuggestedModule);
+ }
+
+ if (isFramework())
+ return DoFrameworkLookup(Filename, HS, SearchPath, RelativePath,
+ RequestingModule, SuggestedModule,
+ InUserSpecifiedSystemFramework);
+
+ assert(isHeaderMap() && "Unknown directory lookup");
+ const HeaderMap *HM = getHeaderMap();
+ SmallString<1024> Path;
+ StringRef Dest = HM->lookupFilename(Filename, Path);
+ if (Dest.empty())
+ return nullptr;
+
+ const FileEntry *Result;
+
+ // Check if the headermap maps the filename to a framework include
+ // ("Foo.h" -> "Foo/Foo.h"), in which case continue header lookup using the
+ // framework include.
+ if (llvm::sys::path::is_relative(Dest)) {
+ MappedName.clear();
+ MappedName.append(Dest.begin(), Dest.end());
+ Filename = StringRef(MappedName.begin(), MappedName.size());
+ HasBeenMapped = true;
+ Result = HM->LookupFile(Filename, HS.getFileMgr());
+
+ } else {
+ Result = HS.getFileMgr().getFile(Dest);
+ }
+
+ if (Result) {
+ if (SearchPath) {
+ StringRef SearchPathRef(getName());
+ SearchPath->clear();
+ SearchPath->append(SearchPathRef.begin(), SearchPathRef.end());
+ }
+ if (RelativePath) {
+ RelativePath->clear();
+ RelativePath->append(Filename.begin(), Filename.end());
+ }
+ }
+ return Result;
+}
+
+/// \brief Given a framework directory, find the top-most framework directory.
+///
+/// \param FileMgr The file manager to use for directory lookups.
+/// \param DirName The name of the framework directory.
+/// \param SubmodulePath Will be populated with the submodule path from the
+/// returned top-level module to the originally named framework.
+static const DirectoryEntry *
+getTopFrameworkDir(FileManager &FileMgr, StringRef DirName,
+ SmallVectorImpl<std::string> &SubmodulePath) {
+ assert(llvm::sys::path::extension(DirName) == ".framework" &&
+ "Not a framework directory");
+
+ // Note: as an egregious but useful hack we use the real path here, because
+ // frameworks moving between top-level frameworks to embedded frameworks tend
+ // to be symlinked, and we base the logical structure of modules on the
+ // physical layout. In particular, we need to deal with crazy includes like
+ //
+ // #include <Foo/Frameworks/Bar.framework/Headers/Wibble.h>
+ //
+ // where 'Bar' used to be embedded in 'Foo', is now a top-level framework
+ // which one should access with, e.g.,
+ //
+ // #include <Bar/Wibble.h>
+ //
+ // Similar issues occur when a top-level framework has moved into an
+ // embedded framework.
+ const DirectoryEntry *TopFrameworkDir = FileMgr.getDirectory(DirName);
+ DirName = FileMgr.getCanonicalName(TopFrameworkDir);
+ do {
+ // Get the parent directory name.
+ DirName = llvm::sys::path::parent_path(DirName);
+ if (DirName.empty())
+ break;
+
+ // Determine whether this directory exists.
+ const DirectoryEntry *Dir = FileMgr.getDirectory(DirName);
+ if (!Dir)
+ break;
+
+ // If this is a framework directory, then we're a subframework of this
+ // framework.
+ if (llvm::sys::path::extension(DirName) == ".framework") {
+ SubmodulePath.push_back(llvm::sys::path::stem(DirName));
+ TopFrameworkDir = Dir;
+ }
+ } while (true);
+
+ return TopFrameworkDir;
+}
+
+/// DoFrameworkLookup - Do a lookup of the specified file in the current
+/// DirectoryLookup, which is a framework directory.
+const FileEntry *DirectoryLookup::DoFrameworkLookup(
+ StringRef Filename, HeaderSearch &HS, SmallVectorImpl<char> *SearchPath,
+ SmallVectorImpl<char> *RelativePath, Module *RequestingModule,
+ ModuleMap::KnownHeader *SuggestedModule,
+ bool &InUserSpecifiedSystemFramework) const {
+ FileManager &FileMgr = HS.getFileMgr();
+
+ // Framework names must have a '/' in the filename.
+ size_t SlashPos = Filename.find('/');
+ if (SlashPos == StringRef::npos) return nullptr;
+
+ // Find out if this is the home for the specified framework, by checking
+ // HeaderSearch. Possible answers are yes/no and unknown.
+ HeaderSearch::FrameworkCacheEntry &CacheEntry =
+ HS.LookupFrameworkCache(Filename.substr(0, SlashPos));
+
+ // If it is known and in some other directory, fail.
+ if (CacheEntry.Directory && CacheEntry.Directory != getFrameworkDir())
+ return nullptr;
+
+ // Otherwise, construct the path to this framework dir.
+
+ // FrameworkName = "/System/Library/Frameworks/"
+ SmallString<1024> FrameworkName;
+ FrameworkName += getFrameworkDir()->getName();
+ if (FrameworkName.empty() || FrameworkName.back() != '/')
+ FrameworkName.push_back('/');
+
+ // FrameworkName = "/System/Library/Frameworks/Cocoa"
+ StringRef ModuleName(Filename.begin(), SlashPos);
+ FrameworkName += ModuleName;
+
+ // FrameworkName = "/System/Library/Frameworks/Cocoa.framework/"
+ FrameworkName += ".framework/";
+
+ // If the cache entry was unresolved, populate it now.
+ if (!CacheEntry.Directory) {
+ HS.IncrementFrameworkLookupCount();
+
+ // If the framework dir doesn't exist, we fail.
+ const DirectoryEntry *Dir = FileMgr.getDirectory(FrameworkName);
+ if (!Dir) return nullptr;
+
+ // Otherwise, if it does, remember that this is the right direntry for this
+ // framework.
+ CacheEntry.Directory = getFrameworkDir();
+
+ // If this is a user search directory, check if the framework has been
+ // user-specified as a system framework.
+ if (getDirCharacteristic() == SrcMgr::C_User) {
+ SmallString<1024> SystemFrameworkMarker(FrameworkName);
+ SystemFrameworkMarker += ".system_framework";
+ if (llvm::sys::fs::exists(SystemFrameworkMarker)) {
+ CacheEntry.IsUserSpecifiedSystemFramework = true;
+ }
+ }
+ }
+
+ // Set the 'user-specified system framework' flag.
+ InUserSpecifiedSystemFramework = CacheEntry.IsUserSpecifiedSystemFramework;
+
+ if (RelativePath) {
+ RelativePath->clear();
+ RelativePath->append(Filename.begin()+SlashPos+1, Filename.end());
+ }
+
+ // Check "/System/Library/Frameworks/Cocoa.framework/Headers/file.h"
+ unsigned OrigSize = FrameworkName.size();
+
+ FrameworkName += "Headers/";
+
+ if (SearchPath) {
+ SearchPath->clear();
+ // Without trailing '/'.
+ SearchPath->append(FrameworkName.begin(), FrameworkName.end()-1);
+ }
+
+ FrameworkName.append(Filename.begin()+SlashPos+1, Filename.end());
+ const FileEntry *FE = FileMgr.getFile(FrameworkName,
+ /*openFile=*/!SuggestedModule);
+ if (!FE) {
+ // Check "/System/Library/Frameworks/Cocoa.framework/PrivateHeaders/file.h"
+ const char *Private = "Private";
+ FrameworkName.insert(FrameworkName.begin()+OrigSize, Private,
+ Private+strlen(Private));
+ if (SearchPath)
+ SearchPath->insert(SearchPath->begin()+OrigSize, Private,
+ Private+strlen(Private));
+
+ FE = FileMgr.getFile(FrameworkName, /*openFile=*/!SuggestedModule);
+ }
+
+ // If we found the header and are allowed to suggest a module, do so now.
+ if (FE && SuggestedModule) {
+ // Find the framework in which this header occurs.
+ StringRef FrameworkPath = FE->getDir()->getName();
+ bool FoundFramework = false;
+ do {
+ // Determine whether this directory exists.
+ const DirectoryEntry *Dir = FileMgr.getDirectory(FrameworkPath);
+ if (!Dir)
+ break;
+
+ // If this is a framework directory, then we're a subframework of this
+ // framework.
+ if (llvm::sys::path::extension(FrameworkPath) == ".framework") {
+ FoundFramework = true;
+ break;
+ }
+
+ // Get the parent directory name.
+ FrameworkPath = llvm::sys::path::parent_path(FrameworkPath);
+ if (FrameworkPath.empty())
+ break;
+ } while (true);
+
+ bool IsSystem = getDirCharacteristic() != SrcMgr::C_User;
+ if (FoundFramework) {
+ if (!HS.findUsableModuleForFrameworkHeader(
+ FE, FrameworkPath, RequestingModule, SuggestedModule, IsSystem))
+ return nullptr;
+ } else {
+ if (!HS.findUsableModuleForHeader(FE, getDir(), RequestingModule,
+ SuggestedModule, IsSystem))
+ return nullptr;
+ }
+ }
+ return FE;
+}
+
+void HeaderSearch::setTarget(const TargetInfo &Target) {
+ ModMap.setTarget(Target);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Header File Location.
+//===----------------------------------------------------------------------===//
+
+/// \brief Return true with a diagnostic if the file that MSVC would have found
+/// fails to match the one that Clang would have found with MSVC header search
+/// disabled.
+static bool checkMSVCHeaderSearch(DiagnosticsEngine &Diags,
+ const FileEntry *MSFE, const FileEntry *FE,
+ SourceLocation IncludeLoc) {
+ if (MSFE && FE != MSFE) {
+ Diags.Report(IncludeLoc, diag::ext_pp_include_search_ms) << MSFE->getName();
+ return true;
+ }
+ return false;
+}
+
+static const char *copyString(StringRef Str, llvm::BumpPtrAllocator &Alloc) {
+ assert(!Str.empty());
+ char *CopyStr = Alloc.Allocate<char>(Str.size()+1);
+ std::copy(Str.begin(), Str.end(), CopyStr);
+ CopyStr[Str.size()] = '\0';
+ return CopyStr;
+}
+
+/// LookupFile - Given a "foo" or \<foo> reference, look up the indicated file,
+/// return null on failure. isAngled indicates whether the file reference is
+/// for system \#include's or not (i.e. using <> instead of ""). Includers, if
+/// non-empty, indicates where the \#including file(s) are, in case a relative
+/// search is needed. Microsoft mode will pass all \#including files.
+const FileEntry *HeaderSearch::LookupFile(
+ StringRef Filename, SourceLocation IncludeLoc, bool isAngled,
+ const DirectoryLookup *FromDir, const DirectoryLookup *&CurDir,
+ ArrayRef<std::pair<const FileEntry *, const DirectoryEntry *>> Includers,
+ SmallVectorImpl<char> *SearchPath, SmallVectorImpl<char> *RelativePath,
+ Module *RequestingModule, ModuleMap::KnownHeader *SuggestedModule,
+ bool SkipCache) {
+ if (SuggestedModule)
+ *SuggestedModule = ModuleMap::KnownHeader();
+
+ // If 'Filename' is absolute, check to see if it exists and no searching.
+ if (llvm::sys::path::is_absolute(Filename)) {
+ CurDir = nullptr;
+
+ // If this was an #include_next "/absolute/file", fail.
+ if (FromDir) return nullptr;
+
+ if (SearchPath)
+ SearchPath->clear();
+ if (RelativePath) {
+ RelativePath->clear();
+ RelativePath->append(Filename.begin(), Filename.end());
+ }
+ // Otherwise, just return the file.
+ return getFileAndSuggestModule(Filename, nullptr,
+ /*IsSystemHeaderDir*/false,
+ RequestingModule, SuggestedModule);
+ }
+
+ // This is the header that MSVC's header search would have found.
+ const FileEntry *MSFE = nullptr;
+ ModuleMap::KnownHeader MSSuggestedModule;
+
+ // Unless disabled, check to see if the file is in the #includer's
+ // directory. This cannot be based on CurDir, because each includer could be
+ // a #include of a subdirectory (#include "foo/bar.h") and a subsequent
+ // include of "baz.h" should resolve to "whatever/foo/baz.h".
+ // This search is not done for <> headers.
+ if (!Includers.empty() && !isAngled && !NoCurDirSearch) {
+ SmallString<1024> TmpDir;
+ bool First = true;
+ for (const auto &IncluderAndDir : Includers) {
+ const FileEntry *Includer = IncluderAndDir.first;
+
+ // Concatenate the requested file onto the directory.
+ // FIXME: Portability. Filename concatenation should be in sys::Path.
+ TmpDir = IncluderAndDir.second->getName();
+ TmpDir.push_back('/');
+ TmpDir.append(Filename.begin(), Filename.end());
+
+ // FIXME: We don't cache the result of getFileInfo across the call to
+ // getFileAndSuggestModule, because it's a reference to an element of
+ // a container that could be reallocated across this call.
+ //
+ // FIXME: If we have no includer, that means we're processing a #include
+ // from a module build. We should treat this as a system header if we're
+ // building a [system] module.
+ bool IncluderIsSystemHeader =
+ Includer && getFileInfo(Includer).DirInfo != SrcMgr::C_User;
+ if (const FileEntry *FE = getFileAndSuggestModule(
+ TmpDir, IncluderAndDir.second, IncluderIsSystemHeader,
+ RequestingModule, SuggestedModule)) {
+ if (!Includer) {
+ assert(First && "only first includer can have no file");
+ return FE;
+ }
+
+ // Leave CurDir unset.
+ // This file is a system header or C++ unfriendly if the old file is.
+ //
+ // Note that we only use one of FromHFI/ToHFI at once, due to potential
+ // reallocation of the underlying vector potentially making the first
+ // reference binding dangling.
+ HeaderFileInfo &FromHFI = getFileInfo(Includer);
+ unsigned DirInfo = FromHFI.DirInfo;
+ bool IndexHeaderMapHeader = FromHFI.IndexHeaderMapHeader;
+ StringRef Framework = FromHFI.Framework;
+
+ HeaderFileInfo &ToHFI = getFileInfo(FE);
+ ToHFI.DirInfo = DirInfo;
+ ToHFI.IndexHeaderMapHeader = IndexHeaderMapHeader;
+ ToHFI.Framework = Framework;
+
+ if (SearchPath) {
+ StringRef SearchPathRef(IncluderAndDir.second->getName());
+ SearchPath->clear();
+ SearchPath->append(SearchPathRef.begin(), SearchPathRef.end());
+ }
+ if (RelativePath) {
+ RelativePath->clear();
+ RelativePath->append(Filename.begin(), Filename.end());
+ }
+ if (First)
+ return FE;
+
+ // Otherwise, we found the path via MSVC header search rules. If
+ // -Wmsvc-include is enabled, we have to keep searching to see if we
+ // would've found this header in -I or -isystem directories.
+ if (Diags.isIgnored(diag::ext_pp_include_search_ms, IncludeLoc)) {
+ return FE;
+ } else {
+ MSFE = FE;
+ if (SuggestedModule) {
+ MSSuggestedModule = *SuggestedModule;
+ *SuggestedModule = ModuleMap::KnownHeader();
+ }
+ break;
+ }
+ }
+ First = false;
+ }
+ }
+
+ CurDir = nullptr;
+
+ // If this is a system #include, ignore the user #include locs.
+ unsigned i = isAngled ? AngledDirIdx : 0;
+
+ // If this is a #include_next request, start searching after the directory the
+ // file was found in.
+ if (FromDir)
+ i = FromDir-&SearchDirs[0];
+
+ // Cache all of the lookups performed by this method. Many headers are
+ // multiply included, and the "pragma once" optimization prevents them from
+ // being relex/pp'd, but they would still have to search through a
+ // (potentially huge) series of SearchDirs to find it.
+ LookupFileCacheInfo &CacheLookup = LookupFileCache[Filename];
+
+ // If the entry has been previously looked up, the first value will be
+ // non-zero. If the value is equal to i (the start point of our search), then
+ // this is a matching hit.
+ if (!SkipCache && CacheLookup.StartIdx == i+1) {
+ // Skip querying potentially lots of directories for this lookup.
+ i = CacheLookup.HitIdx;
+ if (CacheLookup.MappedName)
+ Filename = CacheLookup.MappedName;
+ } else {
+ // Otherwise, this is the first query, or the previous query didn't match
+ // our search start. We will fill in our found location below, so prime the
+ // start point value.
+ CacheLookup.reset(/*StartIdx=*/i+1);
+ }
+
+ SmallString<64> MappedName;
+
+ // Check each directory in sequence to see if it contains this file.
+ for (; i != SearchDirs.size(); ++i) {
+ bool InUserSpecifiedSystemFramework = false;
+ bool HasBeenMapped = false;
+ const FileEntry *FE = SearchDirs[i].LookupFile(
+ Filename, *this, SearchPath, RelativePath, RequestingModule,
+ SuggestedModule, InUserSpecifiedSystemFramework, HasBeenMapped,
+ MappedName);
+ if (HasBeenMapped) {
+ CacheLookup.MappedName =
+ copyString(Filename, LookupFileCache.getAllocator());
+ }
+ if (!FE) continue;
+
+ CurDir = &SearchDirs[i];
+
+ // This file is a system header or C++ unfriendly if the dir is.
+ HeaderFileInfo &HFI = getFileInfo(FE);
+ HFI.DirInfo = CurDir->getDirCharacteristic();
+
+ // If the directory characteristic is User but this framework was
+ // user-specified to be treated as a system framework, promote the
+ // characteristic.
+ if (HFI.DirInfo == SrcMgr::C_User && InUserSpecifiedSystemFramework)
+ HFI.DirInfo = SrcMgr::C_System;
+
+ // If the filename matches a known system header prefix, override
+ // whether the file is a system header.
+ for (unsigned j = SystemHeaderPrefixes.size(); j; --j) {
+ if (Filename.startswith(SystemHeaderPrefixes[j-1].first)) {
+ HFI.DirInfo = SystemHeaderPrefixes[j-1].second ? SrcMgr::C_System
+ : SrcMgr::C_User;
+ break;
+ }
+ }
+
+ // If this file is found in a header map and uses the framework style of
+ // includes, then this header is part of a framework we're building.
+ if (CurDir->isIndexHeaderMap()) {
+ size_t SlashPos = Filename.find('/');
+ if (SlashPos != StringRef::npos) {
+ HFI.IndexHeaderMapHeader = 1;
+ HFI.Framework = getUniqueFrameworkName(StringRef(Filename.begin(),
+ SlashPos));
+ }
+ }
+
+ if (checkMSVCHeaderSearch(Diags, MSFE, FE, IncludeLoc)) {
+ if (SuggestedModule)
+ *SuggestedModule = MSSuggestedModule;
+ return MSFE;
+ }
+
+ // Remember this location for the next lookup we do.
+ CacheLookup.HitIdx = i;
+ return FE;
+ }
+
+ // If we are including a file with a quoted include "foo.h" from inside
+ // a header in a framework that is currently being built, and we couldn't
+ // resolve "foo.h" any other way, change the include to <Foo/foo.h>, where
+ // "Foo" is the name of the framework in which the including header was found.
+ if (!Includers.empty() && Includers.front().first && !isAngled &&
+ Filename.find('/') == StringRef::npos) {
+ HeaderFileInfo &IncludingHFI = getFileInfo(Includers.front().first);
+ if (IncludingHFI.IndexHeaderMapHeader) {
+ SmallString<128> ScratchFilename;
+ ScratchFilename += IncludingHFI.Framework;
+ ScratchFilename += '/';
+ ScratchFilename += Filename;
+
+ const FileEntry *FE =
+ LookupFile(ScratchFilename, IncludeLoc, /*isAngled=*/true, FromDir,
+ CurDir, Includers.front(), SearchPath, RelativePath,
+ RequestingModule, SuggestedModule);
+
+ if (checkMSVCHeaderSearch(Diags, MSFE, FE, IncludeLoc)) {
+ if (SuggestedModule)
+ *SuggestedModule = MSSuggestedModule;
+ return MSFE;
+ }
+
+ LookupFileCacheInfo &CacheLookup = LookupFileCache[Filename];
+ CacheLookup.HitIdx = LookupFileCache[ScratchFilename].HitIdx;
+ // FIXME: SuggestedModule.
+ return FE;
+ }
+ }
+
+ if (checkMSVCHeaderSearch(Diags, MSFE, nullptr, IncludeLoc)) {
+ if (SuggestedModule)
+ *SuggestedModule = MSSuggestedModule;
+ return MSFE;
+ }
+
+ // Otherwise, didn't find it. Remember we didn't find this.
+ CacheLookup.HitIdx = SearchDirs.size();
+ return nullptr;
+}
+
+/// LookupSubframeworkHeader - Look up a subframework for the specified
+/// \#include file. For example, if \#include'ing <HIToolbox/HIToolbox.h> from
+/// within ".../Carbon.framework/Headers/Carbon.h", check to see if HIToolbox
+/// is a subframework within Carbon.framework. If so, return the FileEntry
+/// for the designated file, otherwise return null.
+const FileEntry *HeaderSearch::
+LookupSubframeworkHeader(StringRef Filename,
+ const FileEntry *ContextFileEnt,
+ SmallVectorImpl<char> *SearchPath,
+ SmallVectorImpl<char> *RelativePath,
+ Module *RequestingModule,
+ ModuleMap::KnownHeader *SuggestedModule) {
+ assert(ContextFileEnt && "No context file?");
+
+ // Framework names must have a '/' in the filename. Find it.
+ // FIXME: Should we permit '\' on Windows?
+ size_t SlashPos = Filename.find('/');
+ if (SlashPos == StringRef::npos) return nullptr;
+
+ // Look up the base framework name of the ContextFileEnt.
+ const char *ContextName = ContextFileEnt->getName();
+
+ // If the context info wasn't a framework, couldn't be a subframework.
+ const unsigned DotFrameworkLen = 10;
+ const char *FrameworkPos = strstr(ContextName, ".framework");
+ if (FrameworkPos == nullptr ||
+ (FrameworkPos[DotFrameworkLen] != '/' &&
+ FrameworkPos[DotFrameworkLen] != '\\'))
+ return nullptr;
+
+ SmallString<1024> FrameworkName(ContextName, FrameworkPos+DotFrameworkLen+1);
+
+ // Append Frameworks/HIToolbox.framework/
+ FrameworkName += "Frameworks/";
+ FrameworkName.append(Filename.begin(), Filename.begin()+SlashPos);
+ FrameworkName += ".framework/";
+
+ auto &CacheLookup =
+ *FrameworkMap.insert(std::make_pair(Filename.substr(0, SlashPos),
+ FrameworkCacheEntry())).first;
+
+ // Some other location?
+ if (CacheLookup.second.Directory &&
+ CacheLookup.first().size() == FrameworkName.size() &&
+ memcmp(CacheLookup.first().data(), &FrameworkName[0],
+ CacheLookup.first().size()) != 0)
+ return nullptr;
+
+ // Cache subframework.
+ if (!CacheLookup.second.Directory) {
+ ++NumSubFrameworkLookups;
+
+ // If the framework dir doesn't exist, we fail.
+ const DirectoryEntry *Dir = FileMgr.getDirectory(FrameworkName);
+ if (!Dir) return nullptr;
+
+ // Otherwise, if it does, remember that this is the right direntry for this
+ // framework.
+ CacheLookup.second.Directory = Dir;
+ }
+
+ const FileEntry *FE = nullptr;
+
+ if (RelativePath) {
+ RelativePath->clear();
+ RelativePath->append(Filename.begin()+SlashPos+1, Filename.end());
+ }
+
+ // Check ".../Frameworks/HIToolbox.framework/Headers/HIToolbox.h"
+ SmallString<1024> HeadersFilename(FrameworkName);
+ HeadersFilename += "Headers/";
+ if (SearchPath) {
+ SearchPath->clear();
+ // Without trailing '/'.
+ SearchPath->append(HeadersFilename.begin(), HeadersFilename.end()-1);
+ }
+
+ HeadersFilename.append(Filename.begin()+SlashPos+1, Filename.end());
+ if (!(FE = FileMgr.getFile(HeadersFilename, /*openFile=*/true))) {
+
+ // Check ".../Frameworks/HIToolbox.framework/PrivateHeaders/HIToolbox.h"
+ HeadersFilename = FrameworkName;
+ HeadersFilename += "PrivateHeaders/";
+ if (SearchPath) {
+ SearchPath->clear();
+ // Without trailing '/'.
+ SearchPath->append(HeadersFilename.begin(), HeadersFilename.end()-1);
+ }
+
+ HeadersFilename.append(Filename.begin()+SlashPos+1, Filename.end());
+ if (!(FE = FileMgr.getFile(HeadersFilename, /*openFile=*/true)))
+ return nullptr;
+ }
+
+ // This file is a system header or C++ unfriendly if the old file is.
+ //
+ // Note that the temporary 'DirInfo' is required here, as either call to
+ // getFileInfo could resize the vector and we don't want to rely on order
+ // of evaluation.
+ unsigned DirInfo = getFileInfo(ContextFileEnt).DirInfo;
+ getFileInfo(FE).DirInfo = DirInfo;
+
+ FrameworkName.pop_back(); // remove the trailing '/'
+ if (!findUsableModuleForFrameworkHeader(FE, FrameworkName, RequestingModule,
+ SuggestedModule, /*IsSystem*/ false))
+ return nullptr;
+
+ return FE;
+}
+
+//===----------------------------------------------------------------------===//
+// File Info Management.
+//===----------------------------------------------------------------------===//
+
+/// \brief Merge the header file info provided by \p OtherHFI into the current
+/// header file info (\p HFI)
+static void mergeHeaderFileInfo(HeaderFileInfo &HFI,
+ const HeaderFileInfo &OtherHFI) {
+ assert(OtherHFI.External && "expected to merge external HFI");
+
+ HFI.isImport |= OtherHFI.isImport;
+ HFI.isPragmaOnce |= OtherHFI.isPragmaOnce;
+ HFI.isModuleHeader |= OtherHFI.isModuleHeader;
+ HFI.NumIncludes += OtherHFI.NumIncludes;
+
+ if (!HFI.ControllingMacro && !HFI.ControllingMacroID) {
+ HFI.ControllingMacro = OtherHFI.ControllingMacro;
+ HFI.ControllingMacroID = OtherHFI.ControllingMacroID;
+ }
+
+ HFI.DirInfo = OtherHFI.DirInfo;
+ HFI.External = (!HFI.IsValid || HFI.External);
+ HFI.IsValid = true;
+ HFI.IndexHeaderMapHeader = OtherHFI.IndexHeaderMapHeader;
+
+ if (HFI.Framework.empty())
+ HFI.Framework = OtherHFI.Framework;
+}
+
+/// getFileInfo - Return the HeaderFileInfo structure for the specified
+/// FileEntry.
+HeaderFileInfo &HeaderSearch::getFileInfo(const FileEntry *FE) {
+ if (FE->getUID() >= FileInfo.size())
+ FileInfo.resize(FE->getUID() + 1);
+
+ HeaderFileInfo *HFI = &FileInfo[FE->getUID()];
+ // FIXME: Use a generation count to check whether this is really up to date.
+ if (ExternalSource && !HFI->Resolved) {
+ HFI->Resolved = true;
+ auto ExternalHFI = ExternalSource->GetHeaderFileInfo(FE);
+
+ HFI = &FileInfo[FE->getUID()];
+ if (ExternalHFI.External)
+ mergeHeaderFileInfo(*HFI, ExternalHFI);
+ }
+
+ HFI->IsValid = true;
+ // We have local information about this header file, so it's no longer
+ // strictly external.
+ HFI->External = false;
+ return *HFI;
+}
+
+const HeaderFileInfo *
+HeaderSearch::getExistingFileInfo(const FileEntry *FE,
+ bool WantExternal) const {
+ // If we have an external source, ensure we have the latest information.
+ // FIXME: Use a generation count to check whether this is really up to date.
+ HeaderFileInfo *HFI;
+ if (ExternalSource) {
+ if (FE->getUID() >= FileInfo.size()) {
+ if (!WantExternal)
+ return nullptr;
+ FileInfo.resize(FE->getUID() + 1);
+ }
+
+ HFI = &FileInfo[FE->getUID()];
+ if (!WantExternal && (!HFI->IsValid || HFI->External))
+ return nullptr;
+ if (!HFI->Resolved) {
+ HFI->Resolved = true;
+ auto ExternalHFI = ExternalSource->GetHeaderFileInfo(FE);
+
+ HFI = &FileInfo[FE->getUID()];
+ if (ExternalHFI.External)
+ mergeHeaderFileInfo(*HFI, ExternalHFI);
+ }
+ } else if (FE->getUID() >= FileInfo.size()) {
+ return nullptr;
+ } else {
+ HFI = &FileInfo[FE->getUID()];
+ }
+
+ if (!HFI->IsValid || (HFI->External && !WantExternal))
+ return nullptr;
+
+ return HFI;
+}
+
+bool HeaderSearch::isFileMultipleIncludeGuarded(const FileEntry *File) {
+ // Check if we've ever seen this file as a header.
+ if (auto *HFI = getExistingFileInfo(File))
+ return HFI->isPragmaOnce || HFI->isImport || HFI->ControllingMacro ||
+ HFI->ControllingMacroID;
+ return false;
+}
+
+void HeaderSearch::MarkFileModuleHeader(const FileEntry *FE,
+ ModuleMap::ModuleHeaderRole Role,
+ bool isCompilingModuleHeader) {
+ bool isModularHeader = !(Role & ModuleMap::TextualHeader);
+
+ // Don't mark the file info as non-external if there's nothing to change.
+ if (!isCompilingModuleHeader) {
+ if (!isModularHeader)
+ return;
+ auto *HFI = getExistingFileInfo(FE);
+ if (HFI && HFI->isModuleHeader)
+ return;
+ }
+
+ auto &HFI = getFileInfo(FE);
+ HFI.isModuleHeader |= isModularHeader;
+ HFI.isCompilingModuleHeader |= isCompilingModuleHeader;
+}
+
+bool HeaderSearch::ShouldEnterIncludeFile(Preprocessor &PP,
+ const FileEntry *File,
+ bool isImport, Module *M) {
+ ++NumIncluded; // Count # of attempted #includes.
+
+ // Get information about this file.
+ HeaderFileInfo &FileInfo = getFileInfo(File);
+
+ // If this is a #import directive, check that we have not already imported
+ // this header.
+ if (isImport) {
+ // If this has already been imported, don't import it again.
+ FileInfo.isImport = true;
+
+ // Has this already been #import'ed or #include'd?
+ if (FileInfo.NumIncludes) return false;
+ } else {
+ // Otherwise, if this is a #include of a file that was previously #import'd
+ // or if this is the second #include of a #pragma once file, ignore it.
+ if (FileInfo.isImport)
+ return false;
+ }
+
+ // Next, check to see if the file is wrapped with #ifndef guards. If so, and
+ // if the macro that guards it is defined, we know the #include has no effect.
+ if (const IdentifierInfo *ControllingMacro
+ = FileInfo.getControllingMacro(ExternalLookup)) {
+ // If the header corresponds to a module, check whether the macro is already
+ // defined in that module rather than checking in the current set of visible
+ // modules.
+ if (M ? PP.isMacroDefinedInLocalModule(ControllingMacro, M)
+ : PP.isMacroDefined(ControllingMacro)) {
+ ++NumMultiIncludeFileOptzn;
+ return false;
+ }
+ }
+
+ // Increment the number of times this file has been included.
+ ++FileInfo.NumIncludes;
+
+ return true;
+}
+
+size_t HeaderSearch::getTotalMemory() const {
+ return SearchDirs.capacity()
+ + llvm::capacity_in_bytes(FileInfo)
+ + llvm::capacity_in_bytes(HeaderMaps)
+ + LookupFileCache.getAllocator().getTotalMemory()
+ + FrameworkMap.getAllocator().getTotalMemory();
+}
+
+StringRef HeaderSearch::getUniqueFrameworkName(StringRef Framework) {
+ return FrameworkNames.insert(Framework).first->first();
+}
+
+bool HeaderSearch::hasModuleMap(StringRef FileName,
+ const DirectoryEntry *Root,
+ bool IsSystem) {
+ if (!HSOpts->ImplicitModuleMaps)
+ return false;
+
+ SmallVector<const DirectoryEntry *, 2> FixUpDirectories;
+
+ StringRef DirName = FileName;
+ do {
+ // Get the parent directory name.
+ DirName = llvm::sys::path::parent_path(DirName);
+ if (DirName.empty())
+ return false;
+
+ // Determine whether this directory exists.
+ const DirectoryEntry *Dir = FileMgr.getDirectory(DirName);
+ if (!Dir)
+ return false;
+
+ // Try to load the module map file in this directory.
+ switch (loadModuleMapFile(Dir, IsSystem,
+ llvm::sys::path::extension(Dir->getName()) ==
+ ".framework")) {
+ case LMM_NewlyLoaded:
+ case LMM_AlreadyLoaded:
+ // Success. All of the directories we stepped through inherit this module
+ // map file.
+ for (unsigned I = 0, N = FixUpDirectories.size(); I != N; ++I)
+ DirectoryHasModuleMap[FixUpDirectories[I]] = true;
+ return true;
+
+ case LMM_NoDirectory:
+ case LMM_InvalidModuleMap:
+ break;
+ }
+
+ // If we hit the top of our search, we're done.
+ if (Dir == Root)
+ return false;
+
+ // Keep track of all of the directories we checked, so we can mark them as
+ // having module maps if we eventually do find a module map.
+ FixUpDirectories.push_back(Dir);
+ } while (true);
+}
+
+ModuleMap::KnownHeader
+HeaderSearch::findModuleForHeader(const FileEntry *File) const {
+ if (ExternalSource) {
+ // Make sure the external source has handled header info about this file,
+ // which includes whether the file is part of a module.
+ (void)getExistingFileInfo(File);
+ }
+ return ModMap.findModuleForHeader(File);
+}
+
+bool HeaderSearch::findUsableModuleForHeader(
+ const FileEntry *File, const DirectoryEntry *Root, Module *RequestingModule,
+ ModuleMap::KnownHeader *SuggestedModule, bool IsSystemHeaderDir) {
+ if (File && SuggestedModule) {
+ // If there is a module that corresponds to this header, suggest it.
+ hasModuleMap(File->getName(), Root, IsSystemHeaderDir);
+ *SuggestedModule = findModuleForHeader(File);
+ }
+ return true;
+}
+
+bool HeaderSearch::findUsableModuleForFrameworkHeader(
+ const FileEntry *File, StringRef FrameworkName, Module *RequestingModule,
+ ModuleMap::KnownHeader *SuggestedModule, bool IsSystemFramework) {
+ // If we're supposed to suggest a module, look for one now.
+ if (SuggestedModule) {
+ // Find the top-level framework based on this framework.
+ SmallVector<std::string, 4> SubmodulePath;
+ const DirectoryEntry *TopFrameworkDir
+ = ::getTopFrameworkDir(FileMgr, FrameworkName, SubmodulePath);
+
+ // Determine the name of the top-level framework.
+ StringRef ModuleName = llvm::sys::path::stem(TopFrameworkDir->getName());
+
+ // Load this framework module. If that succeeds, find the suggested module
+ // for this header, if any.
+ loadFrameworkModule(ModuleName, TopFrameworkDir, IsSystemFramework);
+
+ // FIXME: This can find a module not part of ModuleName, which is
+ // important so that we're consistent about whether this header
+ // corresponds to a module. Possibly we should lock down framework modules
+ // so that this is not possible.
+ *SuggestedModule = findModuleForHeader(File);
+ }
+ return true;
+}
+
+static const FileEntry *getPrivateModuleMap(const FileEntry *File,
+ FileManager &FileMgr) {
+ StringRef Filename = llvm::sys::path::filename(File->getName());
+ SmallString<128> PrivateFilename(File->getDir()->getName());
+ if (Filename == "module.map")
+ llvm::sys::path::append(PrivateFilename, "module_private.map");
+ else if (Filename == "module.modulemap")
+ llvm::sys::path::append(PrivateFilename, "module.private.modulemap");
+ else
+ return nullptr;
+ return FileMgr.getFile(PrivateFilename);
+}
+
+bool HeaderSearch::loadModuleMapFile(const FileEntry *File, bool IsSystem) {
+ // Find the directory for the module. For frameworks, that may require going
+ // up from the 'Modules' directory.
+ const DirectoryEntry *Dir = nullptr;
+ if (getHeaderSearchOpts().ModuleMapFileHomeIsCwd)
+ Dir = FileMgr.getDirectory(".");
+ else {
+ Dir = File->getDir();
+ StringRef DirName(Dir->getName());
+ if (llvm::sys::path::filename(DirName) == "Modules") {
+ DirName = llvm::sys::path::parent_path(DirName);
+ if (DirName.endswith(".framework"))
+ Dir = FileMgr.getDirectory(DirName);
+ // FIXME: This assert can fail if there's a race between the above check
+ // and the removal of the directory.
+ assert(Dir && "parent must exist");
+ }
+ }
+
+ switch (loadModuleMapFileImpl(File, IsSystem, Dir)) {
+ case LMM_AlreadyLoaded:
+ case LMM_NewlyLoaded:
+ return false;
+ case LMM_NoDirectory:
+ case LMM_InvalidModuleMap:
+ return true;
+ }
+ llvm_unreachable("Unknown load module map result");
+}
+
+HeaderSearch::LoadModuleMapResult
+HeaderSearch::loadModuleMapFileImpl(const FileEntry *File, bool IsSystem,
+ const DirectoryEntry *Dir) {
+ assert(File && "expected FileEntry");
+
+ // Check whether we've already loaded this module map, and mark it as being
+ // loaded in case we recursively try to load it from itself.
+ auto AddResult = LoadedModuleMaps.insert(std::make_pair(File, true));
+ if (!AddResult.second)
+ return AddResult.first->second ? LMM_AlreadyLoaded : LMM_InvalidModuleMap;
+
+ if (ModMap.parseModuleMapFile(File, IsSystem, Dir)) {
+ LoadedModuleMaps[File] = false;
+ return LMM_InvalidModuleMap;
+ }
+
+ // Try to load a corresponding private module map.
+ if (const FileEntry *PMMFile = getPrivateModuleMap(File, FileMgr)) {
+ if (ModMap.parseModuleMapFile(PMMFile, IsSystem, Dir)) {
+ LoadedModuleMaps[File] = false;
+ return LMM_InvalidModuleMap;
+ }
+ }
+
+ // This directory has a module map.
+ return LMM_NewlyLoaded;
+}
+
+const FileEntry *
+HeaderSearch::lookupModuleMapFile(const DirectoryEntry *Dir, bool IsFramework) {
+ if (!HSOpts->ImplicitModuleMaps)
+ return nullptr;
+ // For frameworks, the preferred spelling is Modules/module.modulemap, but
+ // module.map at the framework root is also accepted.
+ SmallString<128> ModuleMapFileName(Dir->getName());
+ if (IsFramework)
+ llvm::sys::path::append(ModuleMapFileName, "Modules");
+ llvm::sys::path::append(ModuleMapFileName, "module.modulemap");
+ if (const FileEntry *F = FileMgr.getFile(ModuleMapFileName))
+ return F;
+
+ // Continue to allow module.map
+ ModuleMapFileName = Dir->getName();
+ llvm::sys::path::append(ModuleMapFileName, "module.map");
+ return FileMgr.getFile(ModuleMapFileName);
+}
+
+Module *HeaderSearch::loadFrameworkModule(StringRef Name,
+ const DirectoryEntry *Dir,
+ bool IsSystem) {
+ if (Module *Module = ModMap.findModule(Name))
+ return Module;
+
+ // Try to load a module map file.
+ switch (loadModuleMapFile(Dir, IsSystem, /*IsFramework*/true)) {
+ case LMM_InvalidModuleMap:
+ // Try to infer a module map from the framework directory.
+ if (HSOpts->ImplicitModuleMaps)
+ ModMap.inferFrameworkModule(Dir, IsSystem, /*Parent=*/nullptr);
+ break;
+
+ case LMM_AlreadyLoaded:
+ case LMM_NoDirectory:
+ return nullptr;
+
+ case LMM_NewlyLoaded:
+ break;
+ }
+
+ return ModMap.findModule(Name);
+}
+
+
+HeaderSearch::LoadModuleMapResult
+HeaderSearch::loadModuleMapFile(StringRef DirName, bool IsSystem,
+ bool IsFramework) {
+ if (const DirectoryEntry *Dir = FileMgr.getDirectory(DirName))
+ return loadModuleMapFile(Dir, IsSystem, IsFramework);
+
+ return LMM_NoDirectory;
+}
+
+HeaderSearch::LoadModuleMapResult
+HeaderSearch::loadModuleMapFile(const DirectoryEntry *Dir, bool IsSystem,
+ bool IsFramework) {
+ auto KnownDir = DirectoryHasModuleMap.find(Dir);
+ if (KnownDir != DirectoryHasModuleMap.end())
+ return KnownDir->second ? LMM_AlreadyLoaded : LMM_InvalidModuleMap;
+
+ if (const FileEntry *ModuleMapFile = lookupModuleMapFile(Dir, IsFramework)) {
+ LoadModuleMapResult Result =
+ loadModuleMapFileImpl(ModuleMapFile, IsSystem, Dir);
+ // Add Dir explicitly in case ModuleMapFile is in a subdirectory.
+ // E.g. Foo.framework/Modules/module.modulemap
+ // ^Dir ^ModuleMapFile
+ if (Result == LMM_NewlyLoaded)
+ DirectoryHasModuleMap[Dir] = true;
+ else if (Result == LMM_InvalidModuleMap)
+ DirectoryHasModuleMap[Dir] = false;
+ return Result;
+ }
+ return LMM_InvalidModuleMap;
+}
+
+void HeaderSearch::collectAllModules(SmallVectorImpl<Module *> &Modules) {
+ Modules.clear();
+
+ if (HSOpts->ImplicitModuleMaps) {
+ // Load module maps for each of the header search directories.
+ for (unsigned Idx = 0, N = SearchDirs.size(); Idx != N; ++Idx) {
+ bool IsSystem = SearchDirs[Idx].isSystemHeaderDirectory();
+ if (SearchDirs[Idx].isFramework()) {
+ std::error_code EC;
+ SmallString<128> DirNative;
+ llvm::sys::path::native(SearchDirs[Idx].getFrameworkDir()->getName(),
+ DirNative);
+
+ // Search each of the ".framework" directories to load them as modules.
+ for (llvm::sys::fs::directory_iterator Dir(DirNative, EC), DirEnd;
+ Dir != DirEnd && !EC; Dir.increment(EC)) {
+ if (llvm::sys::path::extension(Dir->path()) != ".framework")
+ continue;
+
+ const DirectoryEntry *FrameworkDir =
+ FileMgr.getDirectory(Dir->path());
+ if (!FrameworkDir)
+ continue;
+
+ // Load this framework module.
+ loadFrameworkModule(llvm::sys::path::stem(Dir->path()), FrameworkDir,
+ IsSystem);
+ }
+ continue;
+ }
+
+ // FIXME: Deal with header maps.
+ if (SearchDirs[Idx].isHeaderMap())
+ continue;
+
+ // Try to load a module map file for the search directory.
+ loadModuleMapFile(SearchDirs[Idx].getDir(), IsSystem,
+ /*IsFramework*/ false);
+
+ // Try to load module map files for immediate subdirectories of this
+ // search directory.
+ loadSubdirectoryModuleMaps(SearchDirs[Idx]);
+ }
+ }
+
+ // Populate the list of modules.
+ for (ModuleMap::module_iterator M = ModMap.module_begin(),
+ MEnd = ModMap.module_end();
+ M != MEnd; ++M) {
+ Modules.push_back(M->getValue());
+ }
+}
+
+void HeaderSearch::loadTopLevelSystemModules() {
+ if (!HSOpts->ImplicitModuleMaps)
+ return;
+
+ // Load module maps for each of the header search directories.
+ for (unsigned Idx = 0, N = SearchDirs.size(); Idx != N; ++Idx) {
+ // We only care about normal header directories.
+ if (!SearchDirs[Idx].isNormalDir()) {
+ continue;
+ }
+
+ // Try to load a module map file for the search directory.
+ loadModuleMapFile(SearchDirs[Idx].getDir(),
+ SearchDirs[Idx].isSystemHeaderDirectory(),
+ SearchDirs[Idx].isFramework());
+ }
+}
+
+void HeaderSearch::loadSubdirectoryModuleMaps(DirectoryLookup &SearchDir) {
+ assert(HSOpts->ImplicitModuleMaps &&
+ "Should not be loading subdirectory module maps");
+
+ if (SearchDir.haveSearchedAllModuleMaps())
+ return;
+
+ std::error_code EC;
+ SmallString<128> DirNative;
+ llvm::sys::path::native(SearchDir.getDir()->getName(), DirNative);
+ for (llvm::sys::fs::directory_iterator Dir(DirNative, EC), DirEnd;
+ Dir != DirEnd && !EC; Dir.increment(EC)) {
+ bool IsFramework = llvm::sys::path::extension(Dir->path()) == ".framework";
+ if (IsFramework == SearchDir.isFramework())
+ loadModuleMapFile(Dir->path(), SearchDir.isSystemHeaderDirectory(),
+ SearchDir.isFramework());
+ }
+
+ SearchDir.setSearchedAllModuleMaps(true);
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp b/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp
new file mode 100644
index 0000000..27b0feb
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp
@@ -0,0 +1,3650 @@
+//===--- Lexer.cpp - C Language Family Lexer ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Lexer and Token interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/Lexer.h"
+#include "UnicodeCharSets.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/CodeCompletionHandler.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/LiteralSupport.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ConvertUTF.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <cstring>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Token Class Implementation
+//===----------------------------------------------------------------------===//
+
+/// isObjCAtKeyword - Return true if we have an ObjC keyword identifier.
+bool Token::isObjCAtKeyword(tok::ObjCKeywordKind objcKey) const {
+ if (IdentifierInfo *II = getIdentifierInfo())
+ return II->getObjCKeywordID() == objcKey;
+ return false;
+}
+
+/// getObjCKeywordID - Return the ObjC keyword kind.
+tok::ObjCKeywordKind Token::getObjCKeywordID() const {
+ IdentifierInfo *specId = getIdentifierInfo();
+ return specId ? specId->getObjCKeywordID() : tok::objc_not_keyword;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Lexer Class Implementation
+//===----------------------------------------------------------------------===//
+
+void Lexer::anchor() { }
+
+void Lexer::InitLexer(const char *BufStart, const char *BufPtr,
+ const char *BufEnd) {
+ BufferStart = BufStart;
+ BufferPtr = BufPtr;
+ BufferEnd = BufEnd;
+
+ assert(BufEnd[0] == 0 &&
+ "We assume that the input buffer has a null character at the end"
+ " to simplify lexing!");
+
+ // Check whether we have a BOM in the beginning of the buffer. If yes - act
+ // accordingly. Right now we support only UTF-8 with and without BOM, so, just
+ // skip the UTF-8 BOM if it's present.
+ if (BufferStart == BufferPtr) {
+ // Determine the size of the BOM.
+ StringRef Buf(BufferStart, BufferEnd - BufferStart);
+ size_t BOMLength = llvm::StringSwitch<size_t>(Buf)
+ .StartsWith("\xEF\xBB\xBF", 3) // UTF-8 BOM
+ .Default(0);
+
+ // Skip the BOM.
+ BufferPtr += BOMLength;
+ }
+
+ Is_PragmaLexer = false;
+ CurrentConflictMarkerState = CMK_None;
+
+ // Start of the file is a start of line.
+ IsAtStartOfLine = true;
+ IsAtPhysicalStartOfLine = true;
+
+ HasLeadingSpace = false;
+ HasLeadingEmptyMacro = false;
+
+ // We are not after parsing a #.
+ ParsingPreprocessorDirective = false;
+
+ // We are not after parsing #include.
+ ParsingFilename = false;
+
+ // We are not in raw mode. Raw mode disables diagnostics and interpretation
+ // of tokens (e.g. identifiers, thus disabling macro expansion). It is used
+ // to quickly lex the tokens of the buffer, e.g. when handling a "#if 0" block
+ // or otherwise skipping over tokens.
+ LexingRawMode = false;
+
+ // Default to not keeping comments.
+ ExtendedTokenMode = 0;
+}
+
+/// Lexer constructor - Create a new lexer object for the specified buffer
+/// with the specified preprocessor managing the lexing process. This lexer
+/// assumes that the associated file buffer and Preprocessor objects will
+/// outlive it, so it doesn't take ownership of either of them.
+Lexer::Lexer(FileID FID, const llvm::MemoryBuffer *InputFile, Preprocessor &PP)
+ : PreprocessorLexer(&PP, FID),
+ FileLoc(PP.getSourceManager().getLocForStartOfFile(FID)),
+ LangOpts(PP.getLangOpts()) {
+
+ InitLexer(InputFile->getBufferStart(), InputFile->getBufferStart(),
+ InputFile->getBufferEnd());
+
+ resetExtendedTokenMode();
+}
+
+void Lexer::resetExtendedTokenMode() {
+ assert(PP && "Cannot reset token mode without a preprocessor");
+ if (LangOpts.TraditionalCPP)
+ SetKeepWhitespaceMode(true);
+ else
+ SetCommentRetentionState(PP->getCommentRetentionState());
+}
+
+/// Lexer constructor - Create a new raw lexer object. This object is only
+/// suitable for calls to 'LexFromRawLexer'. This lexer assumes that the text
+/// range will outlive it, so it doesn't take ownership of it.
+Lexer::Lexer(SourceLocation fileloc, const LangOptions &langOpts,
+ const char *BufStart, const char *BufPtr, const char *BufEnd)
+ : FileLoc(fileloc), LangOpts(langOpts) {
+
+ InitLexer(BufStart, BufPtr, BufEnd);
+
+ // We *are* in raw mode.
+ LexingRawMode = true;
+}
+
+/// Lexer constructor - Create a new raw lexer object. This object is only
+/// suitable for calls to 'LexFromRawLexer'. This lexer assumes that the text
+/// range will outlive it, so it doesn't take ownership of it.
+Lexer::Lexer(FileID FID, const llvm::MemoryBuffer *FromFile,
+ const SourceManager &SM, const LangOptions &langOpts)
+ : Lexer(SM.getLocForStartOfFile(FID), langOpts, FromFile->getBufferStart(),
+ FromFile->getBufferStart(), FromFile->getBufferEnd()) {}
+
+/// Create_PragmaLexer: Lexer constructor - Create a new lexer object for
+/// _Pragma expansion. This has a variety of magic semantics that this method
+/// sets up. It returns a new'd Lexer that must be delete'd when done.
+///
+/// On entrance to this routine, TokStartLoc is a macro location which has a
+/// spelling loc that indicates the bytes to be lexed for the token and an
+/// expansion location that indicates where all lexed tokens should be
+/// "expanded from".
+///
+/// TODO: It would really be nice to make _Pragma just be a wrapper around a
+/// normal lexer that remaps tokens as they fly by. This would require making
+/// Preprocessor::Lex virtual. Given that, we could just dump in a magic lexer
+/// interface that could handle this stuff. This would pull GetMappedTokenLoc
+/// out of the critical path of the lexer!
+///
+Lexer *Lexer::Create_PragmaLexer(SourceLocation SpellingLoc,
+ SourceLocation ExpansionLocStart,
+ SourceLocation ExpansionLocEnd,
+ unsigned TokLen, Preprocessor &PP) {
+ SourceManager &SM = PP.getSourceManager();
+
+ // Create the lexer as if we were going to lex the file normally.
+ FileID SpellingFID = SM.getFileID(SpellingLoc);
+ const llvm::MemoryBuffer *InputFile = SM.getBuffer(SpellingFID);
+ Lexer *L = new Lexer(SpellingFID, InputFile, PP);
+
+ // Now that the lexer is created, change the start/end locations so that we
+ // just lex the subsection of the file that we want. This is lexing from a
+ // scratch buffer.
+ const char *StrData = SM.getCharacterData(SpellingLoc);
+
+ L->BufferPtr = StrData;
+ L->BufferEnd = StrData+TokLen;
+ assert(L->BufferEnd[0] == 0 && "Buffer is not nul terminated!");
+
+ // Set the SourceLocation with the remapping information. This ensures that
+ // GetMappedTokenLoc will remap the tokens as they are lexed.
+ L->FileLoc = SM.createExpansionLoc(SM.getLocForStartOfFile(SpellingFID),
+ ExpansionLocStart,
+ ExpansionLocEnd, TokLen);
+
+ // Ensure that the lexer thinks it is inside a directive, so that end \n will
+ // return an EOD token.
+ L->ParsingPreprocessorDirective = true;
+
+ // This lexer really is for _Pragma.
+ L->Is_PragmaLexer = true;
+ return L;
+}
+
+
+/// Stringify - Convert the specified string into a C string, with surrounding
+/// ""'s, and with escaped \ and " characters.
+std::string Lexer::Stringify(StringRef Str, bool Charify) {
+ std::string Result = Str;
+ char Quote = Charify ? '\'' : '"';
+ for (unsigned i = 0, e = Result.size(); i != e; ++i) {
+ if (Result[i] == '\\' || Result[i] == Quote) {
+ Result.insert(Result.begin()+i, '\\');
+ ++i; ++e;
+ }
+ }
+ return Result;
+}
+
+/// Stringify - Convert the specified string into a C string by escaping '\'
+/// and " characters. This does not add surrounding ""'s to the string.
+void Lexer::Stringify(SmallVectorImpl<char> &Str) {
+ for (unsigned i = 0, e = Str.size(); i != e; ++i) {
+ if (Str[i] == '\\' || Str[i] == '"') {
+ Str.insert(Str.begin()+i, '\\');
+ ++i; ++e;
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Token Spelling
+//===----------------------------------------------------------------------===//
+
+/// \brief Slow case of getSpelling. Extract the characters comprising the
+/// spelling of this token from the provided input buffer.
+static size_t getSpellingSlow(const Token &Tok, const char *BufPtr,
+ const LangOptions &LangOpts, char *Spelling) {
+ assert(Tok.needsCleaning() && "getSpellingSlow called on simple token");
+
+ size_t Length = 0;
+ const char *BufEnd = BufPtr + Tok.getLength();
+
+ if (tok::isStringLiteral(Tok.getKind())) {
+ // Munch the encoding-prefix and opening double-quote.
+ while (BufPtr < BufEnd) {
+ unsigned Size;
+ Spelling[Length++] = Lexer::getCharAndSizeNoWarn(BufPtr, Size, LangOpts);
+ BufPtr += Size;
+
+ if (Spelling[Length - 1] == '"')
+ break;
+ }
+
+ // Raw string literals need special handling; trigraph expansion and line
+ // splicing do not occur within their d-char-sequence nor within their
+ // r-char-sequence.
+ if (Length >= 2 &&
+ Spelling[Length - 2] == 'R' && Spelling[Length - 1] == '"') {
+ // Search backwards from the end of the token to find the matching closing
+ // quote.
+ const char *RawEnd = BufEnd;
+ do --RawEnd; while (*RawEnd != '"');
+ size_t RawLength = RawEnd - BufPtr + 1;
+
+ // Everything between the quotes is included verbatim in the spelling.
+ memcpy(Spelling + Length, BufPtr, RawLength);
+ Length += RawLength;
+ BufPtr += RawLength;
+
+ // The rest of the token is lexed normally.
+ }
+ }
+
+ while (BufPtr < BufEnd) {
+ unsigned Size;
+ Spelling[Length++] = Lexer::getCharAndSizeNoWarn(BufPtr, Size, LangOpts);
+ BufPtr += Size;
+ }
+
+ assert(Length < Tok.getLength() &&
+ "NeedsCleaning flag set on token that didn't need cleaning!");
+ return Length;
+}
+
+/// getSpelling() - Return the 'spelling' of this token. The spelling of a
+/// token are the characters used to represent the token in the source file
+/// after trigraph expansion and escaped-newline folding. In particular, this
+/// wants to get the true, uncanonicalized, spelling of things like digraphs
+/// UCNs, etc.
+StringRef Lexer::getSpelling(SourceLocation loc,
+ SmallVectorImpl<char> &buffer,
+ const SourceManager &SM,
+ const LangOptions &options,
+ bool *invalid) {
+ // Break down the source location.
+ std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(loc);
+
+ // Try to the load the file buffer.
+ bool invalidTemp = false;
+ StringRef file = SM.getBufferData(locInfo.first, &invalidTemp);
+ if (invalidTemp) {
+ if (invalid) *invalid = true;
+ return StringRef();
+ }
+
+ const char *tokenBegin = file.data() + locInfo.second;
+
+ // Lex from the start of the given location.
+ Lexer lexer(SM.getLocForStartOfFile(locInfo.first), options,
+ file.begin(), tokenBegin, file.end());
+ Token token;
+ lexer.LexFromRawLexer(token);
+
+ unsigned length = token.getLength();
+
+ // Common case: no need for cleaning.
+ if (!token.needsCleaning())
+ return StringRef(tokenBegin, length);
+
+ // Hard case, we need to relex the characters into the string.
+ buffer.resize(length);
+ buffer.resize(getSpellingSlow(token, tokenBegin, options, buffer.data()));
+ return StringRef(buffer.data(), buffer.size());
+}
+
+/// getSpelling() - Return the 'spelling' of this token. The spelling of a
+/// token are the characters used to represent the token in the source file
+/// after trigraph expansion and escaped-newline folding. In particular, this
+/// wants to get the true, uncanonicalized, spelling of things like digraphs
+/// UCNs, etc.
+std::string Lexer::getSpelling(const Token &Tok, const SourceManager &SourceMgr,
+ const LangOptions &LangOpts, bool *Invalid) {
+ assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
+
+ bool CharDataInvalid = false;
+ const char *TokStart = SourceMgr.getCharacterData(Tok.getLocation(),
+ &CharDataInvalid);
+ if (Invalid)
+ *Invalid = CharDataInvalid;
+ if (CharDataInvalid)
+ return std::string();
+
+ // If this token contains nothing interesting, return it directly.
+ if (!Tok.needsCleaning())
+ return std::string(TokStart, TokStart + Tok.getLength());
+
+ std::string Result;
+ Result.resize(Tok.getLength());
+ Result.resize(getSpellingSlow(Tok, TokStart, LangOpts, &*Result.begin()));
+ return Result;
+}
+
+/// getSpelling - This method is used to get the spelling of a token into a
+/// preallocated buffer, instead of as an std::string. The caller is required
+/// to allocate enough space for the token, which is guaranteed to be at least
+/// Tok.getLength() bytes long. The actual length of the token is returned.
+///
+/// Note that this method may do two possible things: it may either fill in
+/// the buffer specified with characters, or it may *change the input pointer*
+/// to point to a constant buffer with the data already in it (avoiding a
+/// copy). The caller is not allowed to modify the returned buffer pointer
+/// if an internal buffer is returned.
+unsigned Lexer::getSpelling(const Token &Tok, const char *&Buffer,
+ const SourceManager &SourceMgr,
+ const LangOptions &LangOpts, bool *Invalid) {
+ assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
+
+ const char *TokStart = nullptr;
+ // NOTE: this has to be checked *before* testing for an IdentifierInfo.
+ if (Tok.is(tok::raw_identifier))
+ TokStart = Tok.getRawIdentifier().data();
+ else if (!Tok.hasUCN()) {
+ if (const IdentifierInfo *II = Tok.getIdentifierInfo()) {
+ // Just return the string from the identifier table, which is very quick.
+ Buffer = II->getNameStart();
+ return II->getLength();
+ }
+ }
+
+ // NOTE: this can be checked even after testing for an IdentifierInfo.
+ if (Tok.isLiteral())
+ TokStart = Tok.getLiteralData();
+
+ if (!TokStart) {
+ // Compute the start of the token in the input lexer buffer.
+ bool CharDataInvalid = false;
+ TokStart = SourceMgr.getCharacterData(Tok.getLocation(), &CharDataInvalid);
+ if (Invalid)
+ *Invalid = CharDataInvalid;
+ if (CharDataInvalid) {
+ Buffer = "";
+ return 0;
+ }
+ }
+
+ // If this token contains nothing interesting, return it directly.
+ if (!Tok.needsCleaning()) {
+ Buffer = TokStart;
+ return Tok.getLength();
+ }
+
+ // Otherwise, hard case, relex the characters into the string.
+ return getSpellingSlow(Tok, TokStart, LangOpts, const_cast<char*>(Buffer));
+}
+
+
+/// MeasureTokenLength - Relex the token at the specified location and return
+/// its length in bytes in the input file. If the token needs cleaning (e.g.
+/// includes a trigraph or an escaped newline) then this count includes bytes
+/// that are part of that.
+unsigned Lexer::MeasureTokenLength(SourceLocation Loc,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ Token TheTok;
+ if (getRawToken(Loc, TheTok, SM, LangOpts))
+ return 0;
+ return TheTok.getLength();
+}
+
+/// \brief Relex the token at the specified location.
+/// \returns true if there was a failure, false on success.
+bool Lexer::getRawToken(SourceLocation Loc, Token &Result,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ bool IgnoreWhiteSpace) {
+ // TODO: this could be special cased for common tokens like identifiers, ')',
+ // etc to make this faster, if it mattered. Just look at StrData[0] to handle
+ // all obviously single-char tokens. This could use
+ // Lexer::isObviouslySimpleCharacter for example to handle identifiers or
+ // something.
+
+ // If this comes from a macro expansion, we really do want the macro name, not
+ // the token this macro expanded to.
+ Loc = SM.getExpansionLoc(Loc);
+ std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
+ bool Invalid = false;
+ StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
+ if (Invalid)
+ return true;
+
+ const char *StrData = Buffer.data()+LocInfo.second;
+
+ if (!IgnoreWhiteSpace && isWhitespace(StrData[0]))
+ return true;
+
+ // Create a lexer starting at the beginning of this token.
+ Lexer TheLexer(SM.getLocForStartOfFile(LocInfo.first), LangOpts,
+ Buffer.begin(), StrData, Buffer.end());
+ TheLexer.SetCommentRetentionState(true);
+ TheLexer.LexFromRawLexer(Result);
+ return false;
+}
+
+static SourceLocation getBeginningOfFileToken(SourceLocation Loc,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ assert(Loc.isFileID());
+ std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
+ if (LocInfo.first.isInvalid())
+ return Loc;
+
+ bool Invalid = false;
+ StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
+ if (Invalid)
+ return Loc;
+
+ // Back up from the current location until we hit the beginning of a line
+ // (or the buffer). We'll relex from that point.
+ const char *BufStart = Buffer.data();
+ if (LocInfo.second >= Buffer.size())
+ return Loc;
+
+ const char *StrData = BufStart+LocInfo.second;
+ if (StrData[0] == '\n' || StrData[0] == '\r')
+ return Loc;
+
+ const char *LexStart = StrData;
+ while (LexStart != BufStart) {
+ if (LexStart[0] == '\n' || LexStart[0] == '\r') {
+ ++LexStart;
+ break;
+ }
+
+ --LexStart;
+ }
+
+ // Create a lexer starting at the beginning of this token.
+ SourceLocation LexerStartLoc = Loc.getLocWithOffset(-LocInfo.second);
+ Lexer TheLexer(LexerStartLoc, LangOpts, BufStart, LexStart, Buffer.end());
+ TheLexer.SetCommentRetentionState(true);
+
+ // Lex tokens until we find the token that contains the source location.
+ Token TheTok;
+ do {
+ TheLexer.LexFromRawLexer(TheTok);
+
+ if (TheLexer.getBufferLocation() > StrData) {
+ // Lexing this token has taken the lexer past the source location we're
+ // looking for. If the current token encompasses our source location,
+ // return the beginning of that token.
+ if (TheLexer.getBufferLocation() - TheTok.getLength() <= StrData)
+ return TheTok.getLocation();
+
+ // We ended up skipping over the source location entirely, which means
+ // that it points into whitespace. We're done here.
+ break;
+ }
+ } while (TheTok.getKind() != tok::eof);
+
+ // We've passed our source location; just return the original source location.
+ return Loc;
+}
+
+SourceLocation Lexer::GetBeginningOfToken(SourceLocation Loc,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ if (Loc.isFileID())
+ return getBeginningOfFileToken(Loc, SM, LangOpts);
+
+ if (!SM.isMacroArgExpansion(Loc))
+ return Loc;
+
+ SourceLocation FileLoc = SM.getSpellingLoc(Loc);
+ SourceLocation BeginFileLoc = getBeginningOfFileToken(FileLoc, SM, LangOpts);
+ std::pair<FileID, unsigned> FileLocInfo = SM.getDecomposedLoc(FileLoc);
+ std::pair<FileID, unsigned> BeginFileLocInfo
+ = SM.getDecomposedLoc(BeginFileLoc);
+ assert(FileLocInfo.first == BeginFileLocInfo.first &&
+ FileLocInfo.second >= BeginFileLocInfo.second);
+ return Loc.getLocWithOffset(BeginFileLocInfo.second - FileLocInfo.second);
+}
+
+namespace {
+ enum PreambleDirectiveKind {
+ PDK_Skipped,
+ PDK_StartIf,
+ PDK_EndIf,
+ PDK_Unknown
+ };
+}
+
+std::pair<unsigned, bool> Lexer::ComputePreamble(StringRef Buffer,
+ const LangOptions &LangOpts,
+ unsigned MaxLines) {
+ // Create a lexer starting at the beginning of the file. Note that we use a
+ // "fake" file source location at offset 1 so that the lexer will track our
+ // position within the file.
+ const unsigned StartOffset = 1;
+ SourceLocation FileLoc = SourceLocation::getFromRawEncoding(StartOffset);
+ Lexer TheLexer(FileLoc, LangOpts, Buffer.begin(), Buffer.begin(),
+ Buffer.end());
+ TheLexer.SetCommentRetentionState(true);
+
+ // StartLoc will differ from FileLoc if there is a BOM that was skipped.
+ SourceLocation StartLoc = TheLexer.getSourceLocation();
+
+ bool InPreprocessorDirective = false;
+ Token TheTok;
+ Token IfStartTok;
+ unsigned IfCount = 0;
+ SourceLocation ActiveCommentLoc;
+
+ unsigned MaxLineOffset = 0;
+ if (MaxLines) {
+ const char *CurPtr = Buffer.begin();
+ unsigned CurLine = 0;
+ while (CurPtr != Buffer.end()) {
+ char ch = *CurPtr++;
+ if (ch == '\n') {
+ ++CurLine;
+ if (CurLine == MaxLines)
+ break;
+ }
+ }
+ if (CurPtr != Buffer.end())
+ MaxLineOffset = CurPtr - Buffer.begin();
+ }
+
+ do {
+ TheLexer.LexFromRawLexer(TheTok);
+
+ if (InPreprocessorDirective) {
+ // If we've hit the end of the file, we're done.
+ if (TheTok.getKind() == tok::eof) {
+ break;
+ }
+
+ // If we haven't hit the end of the preprocessor directive, skip this
+ // token.
+ if (!TheTok.isAtStartOfLine())
+ continue;
+
+ // We've passed the end of the preprocessor directive, and will look
+ // at this token again below.
+ InPreprocessorDirective = false;
+ }
+
+ // Keep track of the # of lines in the preamble.
+ if (TheTok.isAtStartOfLine()) {
+ unsigned TokOffset = TheTok.getLocation().getRawEncoding() - StartOffset;
+
+ // If we were asked to limit the number of lines in the preamble,
+ // and we're about to exceed that limit, we're done.
+ if (MaxLineOffset && TokOffset >= MaxLineOffset)
+ break;
+ }
+
+ // Comments are okay; skip over them.
+ if (TheTok.getKind() == tok::comment) {
+ if (ActiveCommentLoc.isInvalid())
+ ActiveCommentLoc = TheTok.getLocation();
+ continue;
+ }
+
+ if (TheTok.isAtStartOfLine() && TheTok.getKind() == tok::hash) {
+ // This is the start of a preprocessor directive.
+ Token HashTok = TheTok;
+ InPreprocessorDirective = true;
+ ActiveCommentLoc = SourceLocation();
+
+ // Figure out which directive this is. Since we're lexing raw tokens,
+ // we don't have an identifier table available. Instead, just look at
+ // the raw identifier to recognize and categorize preprocessor directives.
+ TheLexer.LexFromRawLexer(TheTok);
+ if (TheTok.getKind() == tok::raw_identifier && !TheTok.needsCleaning()) {
+ StringRef Keyword = TheTok.getRawIdentifier();
+ PreambleDirectiveKind PDK
+ = llvm::StringSwitch<PreambleDirectiveKind>(Keyword)
+ .Case("include", PDK_Skipped)
+ .Case("__include_macros", PDK_Skipped)
+ .Case("define", PDK_Skipped)
+ .Case("undef", PDK_Skipped)
+ .Case("line", PDK_Skipped)
+ .Case("error", PDK_Skipped)
+ .Case("pragma", PDK_Skipped)
+ .Case("import", PDK_Skipped)
+ .Case("include_next", PDK_Skipped)
+ .Case("warning", PDK_Skipped)
+ .Case("ident", PDK_Skipped)
+ .Case("sccs", PDK_Skipped)
+ .Case("assert", PDK_Skipped)
+ .Case("unassert", PDK_Skipped)
+ .Case("if", PDK_StartIf)
+ .Case("ifdef", PDK_StartIf)
+ .Case("ifndef", PDK_StartIf)
+ .Case("elif", PDK_Skipped)
+ .Case("else", PDK_Skipped)
+ .Case("endif", PDK_EndIf)
+ .Default(PDK_Unknown);
+
+ switch (PDK) {
+ case PDK_Skipped:
+ continue;
+
+ case PDK_StartIf:
+ if (IfCount == 0)
+ IfStartTok = HashTok;
+
+ ++IfCount;
+ continue;
+
+ case PDK_EndIf:
+ // Mismatched #endif. The preamble ends here.
+ if (IfCount == 0)
+ break;
+
+ --IfCount;
+ continue;
+
+ case PDK_Unknown:
+ // We don't know what this directive is; stop at the '#'.
+ break;
+ }
+ }
+
+ // We only end up here if we didn't recognize the preprocessor
+ // directive or it was one that can't occur in the preamble at this
+ // point. Roll back the current token to the location of the '#'.
+ InPreprocessorDirective = false;
+ TheTok = HashTok;
+ }
+
+ // We hit a token that we don't recognize as being in the
+ // "preprocessing only" part of the file, so we're no longer in
+ // the preamble.
+ break;
+ } while (true);
+
+ SourceLocation End;
+ if (IfCount)
+ End = IfStartTok.getLocation();
+ else if (ActiveCommentLoc.isValid())
+ End = ActiveCommentLoc; // don't truncate a decl comment.
+ else
+ End = TheTok.getLocation();
+
+ return std::make_pair(End.getRawEncoding() - StartLoc.getRawEncoding(),
+ IfCount? IfStartTok.isAtStartOfLine()
+ : TheTok.isAtStartOfLine());
+}
+
+
+/// AdvanceToTokenCharacter - Given a location that specifies the start of a
+/// token, return a new location that specifies a character within the token.
+SourceLocation Lexer::AdvanceToTokenCharacter(SourceLocation TokStart,
+ unsigned CharNo,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ // Figure out how many physical characters away the specified expansion
+ // character is. This needs to take into consideration newlines and
+ // trigraphs.
+ bool Invalid = false;
+ const char *TokPtr = SM.getCharacterData(TokStart, &Invalid);
+
+ // If they request the first char of the token, we're trivially done.
+ if (Invalid || (CharNo == 0 && Lexer::isObviouslySimpleCharacter(*TokPtr)))
+ return TokStart;
+
+ unsigned PhysOffset = 0;
+
+ // The usual case is that tokens don't contain anything interesting. Skip
+ // over the uninteresting characters. If a token only consists of simple
+ // chars, this method is extremely fast.
+ while (Lexer::isObviouslySimpleCharacter(*TokPtr)) {
+ if (CharNo == 0)
+ return TokStart.getLocWithOffset(PhysOffset);
+ ++TokPtr, --CharNo, ++PhysOffset;
+ }
+
+ // If we have a character that may be a trigraph or escaped newline, use a
+ // lexer to parse it correctly.
+ for (; CharNo; --CharNo) {
+ unsigned Size;
+ Lexer::getCharAndSizeNoWarn(TokPtr, Size, LangOpts);
+ TokPtr += Size;
+ PhysOffset += Size;
+ }
+
+ // Final detail: if we end up on an escaped newline, we want to return the
+ // location of the actual byte of the token. For example foo\<newline>bar
+ // advanced by 3 should return the location of b, not of \\. One compounding
+ // detail of this is that the escape may be made by a trigraph.
+ if (!Lexer::isObviouslySimpleCharacter(*TokPtr))
+ PhysOffset += Lexer::SkipEscapedNewLines(TokPtr)-TokPtr;
+
+ return TokStart.getLocWithOffset(PhysOffset);
+}
+
+/// \brief Computes the source location just past the end of the
+/// token at this source location.
+///
+/// This routine can be used to produce a source location that
+/// points just past the end of the token referenced by \p Loc, and
+/// is generally used when a diagnostic needs to point just after a
+/// token where it expected something different that it received. If
+/// the returned source location would not be meaningful (e.g., if
+/// it points into a macro), this routine returns an invalid
+/// source location.
+///
+/// \param Offset an offset from the end of the token, where the source
+/// location should refer to. The default offset (0) produces a source
+/// location pointing just past the end of the token; an offset of 1 produces
+/// a source location pointing to the last character in the token, etc.
+SourceLocation Lexer::getLocForEndOfToken(SourceLocation Loc, unsigned Offset,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ if (Loc.isInvalid())
+ return SourceLocation();
+
+ if (Loc.isMacroID()) {
+ if (Offset > 0 || !isAtEndOfMacroExpansion(Loc, SM, LangOpts, &Loc))
+ return SourceLocation(); // Points inside the macro expansion.
+ }
+
+ unsigned Len = Lexer::MeasureTokenLength(Loc, SM, LangOpts);
+ if (Len > Offset)
+ Len = Len - Offset;
+ else
+ return Loc;
+
+ return Loc.getLocWithOffset(Len);
+}
+
+/// \brief Returns true if the given MacroID location points at the first
+/// token of the macro expansion.
+bool Lexer::isAtStartOfMacroExpansion(SourceLocation loc,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ SourceLocation *MacroBegin) {
+ assert(loc.isValid() && loc.isMacroID() && "Expected a valid macro loc");
+
+ SourceLocation expansionLoc;
+ if (!SM.isAtStartOfImmediateMacroExpansion(loc, &expansionLoc))
+ return false;
+
+ if (expansionLoc.isFileID()) {
+ // No other macro expansions, this is the first.
+ if (MacroBegin)
+ *MacroBegin = expansionLoc;
+ return true;
+ }
+
+ return isAtStartOfMacroExpansion(expansionLoc, SM, LangOpts, MacroBegin);
+}
+
+/// \brief Returns true if the given MacroID location points at the last
+/// token of the macro expansion.
+bool Lexer::isAtEndOfMacroExpansion(SourceLocation loc,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ SourceLocation *MacroEnd) {
+ assert(loc.isValid() && loc.isMacroID() && "Expected a valid macro loc");
+
+ SourceLocation spellLoc = SM.getSpellingLoc(loc);
+ unsigned tokLen = MeasureTokenLength(spellLoc, SM, LangOpts);
+ if (tokLen == 0)
+ return false;
+
+ SourceLocation afterLoc = loc.getLocWithOffset(tokLen);
+ SourceLocation expansionLoc;
+ if (!SM.isAtEndOfImmediateMacroExpansion(afterLoc, &expansionLoc))
+ return false;
+
+ if (expansionLoc.isFileID()) {
+ // No other macro expansions.
+ if (MacroEnd)
+ *MacroEnd = expansionLoc;
+ return true;
+ }
+
+ return isAtEndOfMacroExpansion(expansionLoc, SM, LangOpts, MacroEnd);
+}
+
+static CharSourceRange makeRangeFromFileLocs(CharSourceRange Range,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ SourceLocation Begin = Range.getBegin();
+ SourceLocation End = Range.getEnd();
+ assert(Begin.isFileID() && End.isFileID());
+ if (Range.isTokenRange()) {
+ End = Lexer::getLocForEndOfToken(End, 0, SM,LangOpts);
+ if (End.isInvalid())
+ return CharSourceRange();
+ }
+
+ // Break down the source locations.
+ FileID FID;
+ unsigned BeginOffs;
+ std::tie(FID, BeginOffs) = SM.getDecomposedLoc(Begin);
+ if (FID.isInvalid())
+ return CharSourceRange();
+
+ unsigned EndOffs;
+ if (!SM.isInFileID(End, FID, &EndOffs) ||
+ BeginOffs > EndOffs)
+ return CharSourceRange();
+
+ return CharSourceRange::getCharRange(Begin, End);
+}
+
+CharSourceRange Lexer::makeFileCharRange(CharSourceRange Range,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ SourceLocation Begin = Range.getBegin();
+ SourceLocation End = Range.getEnd();
+ if (Begin.isInvalid() || End.isInvalid())
+ return CharSourceRange();
+
+ if (Begin.isFileID() && End.isFileID())
+ return makeRangeFromFileLocs(Range, SM, LangOpts);
+
+ if (Begin.isMacroID() && End.isFileID()) {
+ if (!isAtStartOfMacroExpansion(Begin, SM, LangOpts, &Begin))
+ return CharSourceRange();
+ Range.setBegin(Begin);
+ return makeRangeFromFileLocs(Range, SM, LangOpts);
+ }
+
+ if (Begin.isFileID() && End.isMacroID()) {
+ if ((Range.isTokenRange() && !isAtEndOfMacroExpansion(End, SM, LangOpts,
+ &End)) ||
+ (Range.isCharRange() && !isAtStartOfMacroExpansion(End, SM, LangOpts,
+ &End)))
+ return CharSourceRange();
+ Range.setEnd(End);
+ return makeRangeFromFileLocs(Range, SM, LangOpts);
+ }
+
+ assert(Begin.isMacroID() && End.isMacroID());
+ SourceLocation MacroBegin, MacroEnd;
+ if (isAtStartOfMacroExpansion(Begin, SM, LangOpts, &MacroBegin) &&
+ ((Range.isTokenRange() && isAtEndOfMacroExpansion(End, SM, LangOpts,
+ &MacroEnd)) ||
+ (Range.isCharRange() && isAtStartOfMacroExpansion(End, SM, LangOpts,
+ &MacroEnd)))) {
+ Range.setBegin(MacroBegin);
+ Range.setEnd(MacroEnd);
+ return makeRangeFromFileLocs(Range, SM, LangOpts);
+ }
+
+ bool Invalid = false;
+ const SrcMgr::SLocEntry &BeginEntry = SM.getSLocEntry(SM.getFileID(Begin),
+ &Invalid);
+ if (Invalid)
+ return CharSourceRange();
+
+ if (BeginEntry.getExpansion().isMacroArgExpansion()) {
+ const SrcMgr::SLocEntry &EndEntry = SM.getSLocEntry(SM.getFileID(End),
+ &Invalid);
+ if (Invalid)
+ return CharSourceRange();
+
+ if (EndEntry.getExpansion().isMacroArgExpansion() &&
+ BeginEntry.getExpansion().getExpansionLocStart() ==
+ EndEntry.getExpansion().getExpansionLocStart()) {
+ Range.setBegin(SM.getImmediateSpellingLoc(Begin));
+ Range.setEnd(SM.getImmediateSpellingLoc(End));
+ return makeFileCharRange(Range, SM, LangOpts);
+ }
+ }
+
+ return CharSourceRange();
+}
+
+StringRef Lexer::getSourceText(CharSourceRange Range,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ bool *Invalid) {
+ Range = makeFileCharRange(Range, SM, LangOpts);
+ if (Range.isInvalid()) {
+ if (Invalid) *Invalid = true;
+ return StringRef();
+ }
+
+ // Break down the source location.
+ std::pair<FileID, unsigned> beginInfo = SM.getDecomposedLoc(Range.getBegin());
+ if (beginInfo.first.isInvalid()) {
+ if (Invalid) *Invalid = true;
+ return StringRef();
+ }
+
+ unsigned EndOffs;
+ if (!SM.isInFileID(Range.getEnd(), beginInfo.first, &EndOffs) ||
+ beginInfo.second > EndOffs) {
+ if (Invalid) *Invalid = true;
+ return StringRef();
+ }
+
+ // Try to the load the file buffer.
+ bool invalidTemp = false;
+ StringRef file = SM.getBufferData(beginInfo.first, &invalidTemp);
+ if (invalidTemp) {
+ if (Invalid) *Invalid = true;
+ return StringRef();
+ }
+
+ if (Invalid) *Invalid = false;
+ return file.substr(beginInfo.second, EndOffs - beginInfo.second);
+}
+
+StringRef Lexer::getImmediateMacroName(SourceLocation Loc,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ assert(Loc.isMacroID() && "Only reasonble to call this on macros");
+
+ // Find the location of the immediate macro expansion.
+ while (1) {
+ FileID FID = SM.getFileID(Loc);
+ const SrcMgr::SLocEntry *E = &SM.getSLocEntry(FID);
+ const SrcMgr::ExpansionInfo &Expansion = E->getExpansion();
+ Loc = Expansion.getExpansionLocStart();
+ if (!Expansion.isMacroArgExpansion())
+ break;
+
+ // For macro arguments we need to check that the argument did not come
+ // from an inner macro, e.g: "MAC1( MAC2(foo) )"
+
+ // Loc points to the argument id of the macro definition, move to the
+ // macro expansion.
+ Loc = SM.getImmediateExpansionRange(Loc).first;
+ SourceLocation SpellLoc = Expansion.getSpellingLoc();
+ if (SpellLoc.isFileID())
+ break; // No inner macro.
+
+ // If spelling location resides in the same FileID as macro expansion
+ // location, it means there is no inner macro.
+ FileID MacroFID = SM.getFileID(Loc);
+ if (SM.isInFileID(SpellLoc, MacroFID))
+ break;
+
+ // Argument came from inner macro.
+ Loc = SpellLoc;
+ }
+
+ // Find the spelling location of the start of the non-argument expansion
+ // range. This is where the macro name was spelled in order to begin
+ // expanding this macro.
+ Loc = SM.getSpellingLoc(Loc);
+
+ // Dig out the buffer where the macro name was spelled and the extents of the
+ // name so that we can render it into the expansion note.
+ std::pair<FileID, unsigned> ExpansionInfo = SM.getDecomposedLoc(Loc);
+ unsigned MacroTokenLength = Lexer::MeasureTokenLength(Loc, SM, LangOpts);
+ StringRef ExpansionBuffer = SM.getBufferData(ExpansionInfo.first);
+ return ExpansionBuffer.substr(ExpansionInfo.second, MacroTokenLength);
+}
+
+bool Lexer::isIdentifierBodyChar(char c, const LangOptions &LangOpts) {
+ return isIdentifierBody(c, LangOpts.DollarIdents);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Diagnostics forwarding code.
+//===----------------------------------------------------------------------===//
+
+/// GetMappedTokenLoc - If lexing out of a 'mapped buffer', where we pretend the
+/// lexer buffer was all expanded at a single point, perform the mapping.
+/// This is currently only used for _Pragma implementation, so it is the slow
+/// path of the hot getSourceLocation method. Do not allow it to be inlined.
+static LLVM_ATTRIBUTE_NOINLINE SourceLocation GetMappedTokenLoc(
+ Preprocessor &PP, SourceLocation FileLoc, unsigned CharNo, unsigned TokLen);
+static SourceLocation GetMappedTokenLoc(Preprocessor &PP,
+ SourceLocation FileLoc,
+ unsigned CharNo, unsigned TokLen) {
+ assert(FileLoc.isMacroID() && "Must be a macro expansion");
+
+ // Otherwise, we're lexing "mapped tokens". This is used for things like
+ // _Pragma handling. Combine the expansion location of FileLoc with the
+ // spelling location.
+ SourceManager &SM = PP.getSourceManager();
+
+ // Create a new SLoc which is expanded from Expansion(FileLoc) but whose
+ // characters come from spelling(FileLoc)+Offset.
+ SourceLocation SpellingLoc = SM.getSpellingLoc(FileLoc);
+ SpellingLoc = SpellingLoc.getLocWithOffset(CharNo);
+
+ // Figure out the expansion loc range, which is the range covered by the
+ // original _Pragma(...) sequence.
+ std::pair<SourceLocation,SourceLocation> II =
+ SM.getImmediateExpansionRange(FileLoc);
+
+ return SM.createExpansionLoc(SpellingLoc, II.first, II.second, TokLen);
+}
+
+/// getSourceLocation - Return a source location identifier for the specified
+/// offset in the current file.
+SourceLocation Lexer::getSourceLocation(const char *Loc,
+ unsigned TokLen) const {
+ assert(Loc >= BufferStart && Loc <= BufferEnd &&
+ "Location out of range for this buffer!");
+
+ // In the normal case, we're just lexing from a simple file buffer, return
+ // the file id from FileLoc with the offset specified.
+ unsigned CharNo = Loc-BufferStart;
+ if (FileLoc.isFileID())
+ return FileLoc.getLocWithOffset(CharNo);
+
+ // Otherwise, this is the _Pragma lexer case, which pretends that all of the
+ // tokens are lexed from where the _Pragma was defined.
+ assert(PP && "This doesn't work on raw lexers");
+ return GetMappedTokenLoc(*PP, FileLoc, CharNo, TokLen);
+}
+
+/// Diag - Forwarding function for diagnostics. This translate a source
+/// position in the current buffer into a SourceLocation object for rendering.
+DiagnosticBuilder Lexer::Diag(const char *Loc, unsigned DiagID) const {
+ return PP->Diag(getSourceLocation(Loc), DiagID);
+}
+
+//===----------------------------------------------------------------------===//
+// Trigraph and Escaped Newline Handling Code.
+//===----------------------------------------------------------------------===//
+
+/// GetTrigraphCharForLetter - Given a character that occurs after a ?? pair,
+/// return the decoded trigraph letter it corresponds to, or '\0' if nothing.
+static char GetTrigraphCharForLetter(char Letter) {
+ switch (Letter) {
+ default: return 0;
+ case '=': return '#';
+ case ')': return ']';
+ case '(': return '[';
+ case '!': return '|';
+ case '\'': return '^';
+ case '>': return '}';
+ case '/': return '\\';
+ case '<': return '{';
+ case '-': return '~';
+ }
+}
+
+/// DecodeTrigraphChar - If the specified character is a legal trigraph when
+/// prefixed with ??, emit a trigraph warning. If trigraphs are enabled,
+/// return the result character. Finally, emit a warning about trigraph use
+/// whether trigraphs are enabled or not.
+static char DecodeTrigraphChar(const char *CP, Lexer *L) {
+ char Res = GetTrigraphCharForLetter(*CP);
+ if (!Res || !L) return Res;
+
+ if (!L->getLangOpts().Trigraphs) {
+ if (!L->isLexingRawMode())
+ L->Diag(CP-2, diag::trigraph_ignored);
+ return 0;
+ }
+
+ if (!L->isLexingRawMode())
+ L->Diag(CP-2, diag::trigraph_converted) << StringRef(&Res, 1);
+ return Res;
+}
+
+/// getEscapedNewLineSize - Return the size of the specified escaped newline,
+/// or 0 if it is not an escaped newline. P[-1] is known to be a "\" or a
+/// trigraph equivalent on entry to this function.
+unsigned Lexer::getEscapedNewLineSize(const char *Ptr) {
+ unsigned Size = 0;
+ while (isWhitespace(Ptr[Size])) {
+ ++Size;
+
+ if (Ptr[Size-1] != '\n' && Ptr[Size-1] != '\r')
+ continue;
+
+ // If this is a \r\n or \n\r, skip the other half.
+ if ((Ptr[Size] == '\r' || Ptr[Size] == '\n') &&
+ Ptr[Size-1] != Ptr[Size])
+ ++Size;
+
+ return Size;
+ }
+
+ // Not an escaped newline, must be a \t or something else.
+ return 0;
+}
+
+/// SkipEscapedNewLines - If P points to an escaped newline (or a series of
+/// them), skip over them and return the first non-escaped-newline found,
+/// otherwise return P.
+const char *Lexer::SkipEscapedNewLines(const char *P) {
+ while (1) {
+ const char *AfterEscape;
+ if (*P == '\\') {
+ AfterEscape = P+1;
+ } else if (*P == '?') {
+ // If not a trigraph for escape, bail out.
+ if (P[1] != '?' || P[2] != '/')
+ return P;
+ AfterEscape = P+3;
+ } else {
+ return P;
+ }
+
+ unsigned NewLineSize = Lexer::getEscapedNewLineSize(AfterEscape);
+ if (NewLineSize == 0) return P;
+ P = AfterEscape+NewLineSize;
+ }
+}
+
+/// \brief Checks that the given token is the first token that occurs after the
+/// given location (this excludes comments and whitespace). Returns the location
+/// immediately after the specified token. If the token is not found or the
+/// location is inside a macro, the returned source location will be invalid.
+SourceLocation Lexer::findLocationAfterToken(SourceLocation Loc,
+ tok::TokenKind TKind,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ bool SkipTrailingWhitespaceAndNewLine) {
+ if (Loc.isMacroID()) {
+ if (!Lexer::isAtEndOfMacroExpansion(Loc, SM, LangOpts, &Loc))
+ return SourceLocation();
+ }
+ Loc = Lexer::getLocForEndOfToken(Loc, 0, SM, LangOpts);
+
+ // Break down the source location.
+ std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
+
+ // Try to load the file buffer.
+ bool InvalidTemp = false;
+ StringRef File = SM.getBufferData(LocInfo.first, &InvalidTemp);
+ if (InvalidTemp)
+ return SourceLocation();
+
+ const char *TokenBegin = File.data() + LocInfo.second;
+
+ // Lex from the start of the given location.
+ Lexer lexer(SM.getLocForStartOfFile(LocInfo.first), LangOpts, File.begin(),
+ TokenBegin, File.end());
+ // Find the token.
+ Token Tok;
+ lexer.LexFromRawLexer(Tok);
+ if (Tok.isNot(TKind))
+ return SourceLocation();
+ SourceLocation TokenLoc = Tok.getLocation();
+
+ // Calculate how much whitespace needs to be skipped if any.
+ unsigned NumWhitespaceChars = 0;
+ if (SkipTrailingWhitespaceAndNewLine) {
+ const char *TokenEnd = SM.getCharacterData(TokenLoc) +
+ Tok.getLength();
+ unsigned char C = *TokenEnd;
+ while (isHorizontalWhitespace(C)) {
+ C = *(++TokenEnd);
+ NumWhitespaceChars++;
+ }
+
+ // Skip \r, \n, \r\n, or \n\r
+ if (C == '\n' || C == '\r') {
+ char PrevC = C;
+ C = *(++TokenEnd);
+ NumWhitespaceChars++;
+ if ((C == '\n' || C == '\r') && C != PrevC)
+ NumWhitespaceChars++;
+ }
+ }
+
+ return TokenLoc.getLocWithOffset(Tok.getLength() + NumWhitespaceChars);
+}
+
+/// getCharAndSizeSlow - Peek a single 'character' from the specified buffer,
+/// get its size, and return it. This is tricky in several cases:
+/// 1. If currently at the start of a trigraph, we warn about the trigraph,
+/// then either return the trigraph (skipping 3 chars) or the '?',
+/// depending on whether trigraphs are enabled or not.
+/// 2. If this is an escaped newline (potentially with whitespace between
+/// the backslash and newline), implicitly skip the newline and return
+/// the char after it.
+///
+/// This handles the slow/uncommon case of the getCharAndSize method. Here we
+/// know that we can accumulate into Size, and that we have already incremented
+/// Ptr by Size bytes.
+///
+/// NOTE: When this method is updated, getCharAndSizeSlowNoWarn (below) should
+/// be updated to match.
+///
+char Lexer::getCharAndSizeSlow(const char *Ptr, unsigned &Size,
+ Token *Tok) {
+ // If we have a slash, look for an escaped newline.
+ if (Ptr[0] == '\\') {
+ ++Size;
+ ++Ptr;
+Slash:
+ // Common case, backslash-char where the char is not whitespace.
+ if (!isWhitespace(Ptr[0])) return '\\';
+
+ // See if we have optional whitespace characters between the slash and
+ // newline.
+ if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) {
+ // Remember that this token needs to be cleaned.
+ if (Tok) Tok->setFlag(Token::NeedsCleaning);
+
+ // Warn if there was whitespace between the backslash and newline.
+ if (Ptr[0] != '\n' && Ptr[0] != '\r' && Tok && !isLexingRawMode())
+ Diag(Ptr, diag::backslash_newline_space);
+
+ // Found backslash<whitespace><newline>. Parse the char after it.
+ Size += EscapedNewLineSize;
+ Ptr += EscapedNewLineSize;
+
+ // If the char that we finally got was a \n, then we must have had
+ // something like \<newline><newline>. We don't want to consume the
+ // second newline.
+ if (*Ptr == '\n' || *Ptr == '\r' || *Ptr == '\0')
+ return ' ';
+
+ // Use slow version to accumulate a correct size field.
+ return getCharAndSizeSlow(Ptr, Size, Tok);
+ }
+
+ // Otherwise, this is not an escaped newline, just return the slash.
+ return '\\';
+ }
+
+ // If this is a trigraph, process it.
+ if (Ptr[0] == '?' && Ptr[1] == '?') {
+ // If this is actually a legal trigraph (not something like "??x"), emit
+ // a trigraph warning. If so, and if trigraphs are enabled, return it.
+ if (char C = DecodeTrigraphChar(Ptr+2, Tok ? this : nullptr)) {
+ // Remember that this token needs to be cleaned.
+ if (Tok) Tok->setFlag(Token::NeedsCleaning);
+
+ Ptr += 3;
+ Size += 3;
+ if (C == '\\') goto Slash;
+ return C;
+ }
+ }
+
+ // If this is neither, return a single character.
+ ++Size;
+ return *Ptr;
+}
+
+
+/// getCharAndSizeSlowNoWarn - Handle the slow/uncommon case of the
+/// getCharAndSizeNoWarn method. Here we know that we can accumulate into Size,
+/// and that we have already incremented Ptr by Size bytes.
+///
+/// NOTE: When this method is updated, getCharAndSizeSlow (above) should
+/// be updated to match.
+char Lexer::getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size,
+ const LangOptions &LangOpts) {
+ // If we have a slash, look for an escaped newline.
+ if (Ptr[0] == '\\') {
+ ++Size;
+ ++Ptr;
+Slash:
+ // Common case, backslash-char where the char is not whitespace.
+ if (!isWhitespace(Ptr[0])) return '\\';
+
+ // See if we have optional whitespace characters followed by a newline.
+ if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) {
+ // Found backslash<whitespace><newline>. Parse the char after it.
+ Size += EscapedNewLineSize;
+ Ptr += EscapedNewLineSize;
+
+ // If the char that we finally got was a \n, then we must have had
+ // something like \<newline><newline>. We don't want to consume the
+ // second newline.
+ if (*Ptr == '\n' || *Ptr == '\r' || *Ptr == '\0')
+ return ' ';
+
+ // Use slow version to accumulate a correct size field.
+ return getCharAndSizeSlowNoWarn(Ptr, Size, LangOpts);
+ }
+
+ // Otherwise, this is not an escaped newline, just return the slash.
+ return '\\';
+ }
+
+ // If this is a trigraph, process it.
+ if (LangOpts.Trigraphs && Ptr[0] == '?' && Ptr[1] == '?') {
+ // If this is actually a legal trigraph (not something like "??x"), return
+ // it.
+ if (char C = GetTrigraphCharForLetter(Ptr[2])) {
+ Ptr += 3;
+ Size += 3;
+ if (C == '\\') goto Slash;
+ return C;
+ }
+ }
+
+ // If this is neither, return a single character.
+ ++Size;
+ return *Ptr;
+}
+
+//===----------------------------------------------------------------------===//
+// Helper methods for lexing.
+//===----------------------------------------------------------------------===//
+
+/// \brief Routine that indiscriminately skips bytes in the source file.
+void Lexer::SkipBytes(unsigned Bytes, bool StartOfLine) {
+ BufferPtr += Bytes;
+ if (BufferPtr > BufferEnd)
+ BufferPtr = BufferEnd;
+ // FIXME: What exactly does the StartOfLine bit mean? There are two
+ // possible meanings for the "start" of the line: the first token on the
+ // unexpanded line, or the first token on the expanded line.
+ IsAtStartOfLine = StartOfLine;
+ IsAtPhysicalStartOfLine = StartOfLine;
+}
+
+static bool isAllowedIDChar(uint32_t C, const LangOptions &LangOpts) {
+ if (LangOpts.AsmPreprocessor) {
+ return false;
+ } else if (LangOpts.CPlusPlus11 || LangOpts.C11) {
+ static const llvm::sys::UnicodeCharSet C11AllowedIDChars(
+ C11AllowedIDCharRanges);
+ return C11AllowedIDChars.contains(C);
+ } else if (LangOpts.CPlusPlus) {
+ static const llvm::sys::UnicodeCharSet CXX03AllowedIDChars(
+ CXX03AllowedIDCharRanges);
+ return CXX03AllowedIDChars.contains(C);
+ } else {
+ static const llvm::sys::UnicodeCharSet C99AllowedIDChars(
+ C99AllowedIDCharRanges);
+ return C99AllowedIDChars.contains(C);
+ }
+}
+
+static bool isAllowedInitiallyIDChar(uint32_t C, const LangOptions &LangOpts) {
+ assert(isAllowedIDChar(C, LangOpts));
+ if (LangOpts.AsmPreprocessor) {
+ return false;
+ } else if (LangOpts.CPlusPlus11 || LangOpts.C11) {
+ static const llvm::sys::UnicodeCharSet C11DisallowedInitialIDChars(
+ C11DisallowedInitialIDCharRanges);
+ return !C11DisallowedInitialIDChars.contains(C);
+ } else if (LangOpts.CPlusPlus) {
+ return true;
+ } else {
+ static const llvm::sys::UnicodeCharSet C99DisallowedInitialIDChars(
+ C99DisallowedInitialIDCharRanges);
+ return !C99DisallowedInitialIDChars.contains(C);
+ }
+}
+
+static inline CharSourceRange makeCharRange(Lexer &L, const char *Begin,
+ const char *End) {
+ return CharSourceRange::getCharRange(L.getSourceLocation(Begin),
+ L.getSourceLocation(End));
+}
+
+static void maybeDiagnoseIDCharCompat(DiagnosticsEngine &Diags, uint32_t C,
+ CharSourceRange Range, bool IsFirst) {
+ // Check C99 compatibility.
+ if (!Diags.isIgnored(diag::warn_c99_compat_unicode_id, Range.getBegin())) {
+ enum {
+ CannotAppearInIdentifier = 0,
+ CannotStartIdentifier
+ };
+
+ static const llvm::sys::UnicodeCharSet C99AllowedIDChars(
+ C99AllowedIDCharRanges);
+ static const llvm::sys::UnicodeCharSet C99DisallowedInitialIDChars(
+ C99DisallowedInitialIDCharRanges);
+ if (!C99AllowedIDChars.contains(C)) {
+ Diags.Report(Range.getBegin(), diag::warn_c99_compat_unicode_id)
+ << Range
+ << CannotAppearInIdentifier;
+ } else if (IsFirst && C99DisallowedInitialIDChars.contains(C)) {
+ Diags.Report(Range.getBegin(), diag::warn_c99_compat_unicode_id)
+ << Range
+ << CannotStartIdentifier;
+ }
+ }
+
+ // Check C++98 compatibility.
+ if (!Diags.isIgnored(diag::warn_cxx98_compat_unicode_id, Range.getBegin())) {
+ static const llvm::sys::UnicodeCharSet CXX03AllowedIDChars(
+ CXX03AllowedIDCharRanges);
+ if (!CXX03AllowedIDChars.contains(C)) {
+ Diags.Report(Range.getBegin(), diag::warn_cxx98_compat_unicode_id)
+ << Range;
+ }
+ }
+}
+
+bool Lexer::tryConsumeIdentifierUCN(const char *&CurPtr, unsigned Size,
+ Token &Result) {
+ const char *UCNPtr = CurPtr + Size;
+ uint32_t CodePoint = tryReadUCN(UCNPtr, CurPtr, /*Token=*/nullptr);
+ if (CodePoint == 0 || !isAllowedIDChar(CodePoint, LangOpts))
+ return false;
+
+ if (!isLexingRawMode())
+ maybeDiagnoseIDCharCompat(PP->getDiagnostics(), CodePoint,
+ makeCharRange(*this, CurPtr, UCNPtr),
+ /*IsFirst=*/false);
+
+ Result.setFlag(Token::HasUCN);
+ if ((UCNPtr - CurPtr == 6 && CurPtr[1] == 'u') ||
+ (UCNPtr - CurPtr == 10 && CurPtr[1] == 'U'))
+ CurPtr = UCNPtr;
+ else
+ while (CurPtr != UCNPtr)
+ (void)getAndAdvanceChar(CurPtr, Result);
+ return true;
+}
+
+bool Lexer::tryConsumeIdentifierUTF8Char(const char *&CurPtr) {
+ const char *UnicodePtr = CurPtr;
+ UTF32 CodePoint;
+ ConversionResult Result =
+ llvm::convertUTF8Sequence((const UTF8 **)&UnicodePtr,
+ (const UTF8 *)BufferEnd,
+ &CodePoint,
+ strictConversion);
+ if (Result != conversionOK ||
+ !isAllowedIDChar(static_cast<uint32_t>(CodePoint), LangOpts))
+ return false;
+
+ if (!isLexingRawMode())
+ maybeDiagnoseIDCharCompat(PP->getDiagnostics(), CodePoint,
+ makeCharRange(*this, CurPtr, UnicodePtr),
+ /*IsFirst=*/false);
+
+ CurPtr = UnicodePtr;
+ return true;
+}
+
+bool Lexer::LexIdentifier(Token &Result, const char *CurPtr) {
+ // Match [_A-Za-z0-9]*, we have already matched [_A-Za-z$]
+ unsigned Size;
+ unsigned char C = *CurPtr++;
+ while (isIdentifierBody(C))
+ C = *CurPtr++;
+
+ --CurPtr; // Back up over the skipped character.
+
+ // Fast path, no $,\,? in identifier found. '\' might be an escaped newline
+ // or UCN, and ? might be a trigraph for '\', an escaped newline or UCN.
+ //
+ // TODO: Could merge these checks into an InfoTable flag to make the
+ // comparison cheaper
+ if (isASCII(C) && C != '\\' && C != '?' &&
+ (C != '$' || !LangOpts.DollarIdents)) {
+FinishIdentifier:
+ const char *IdStart = BufferPtr;
+ FormTokenWithChars(Result, CurPtr, tok::raw_identifier);
+ Result.setRawIdentifierData(IdStart);
+
+ // If we are in raw mode, return this identifier raw. There is no need to
+ // look up identifier information or attempt to macro expand it.
+ if (LexingRawMode)
+ return true;
+
+ // Fill in Result.IdentifierInfo and update the token kind,
+ // looking up the identifier in the identifier table.
+ IdentifierInfo *II = PP->LookUpIdentifierInfo(Result);
+
+ // Finally, now that we know we have an identifier, pass this off to the
+ // preprocessor, which may macro expand it or something.
+ if (II->isHandleIdentifierCase())
+ return PP->HandleIdentifier(Result);
+
+ return true;
+ }
+
+ // Otherwise, $,\,? in identifier found. Enter slower path.
+
+ C = getCharAndSize(CurPtr, Size);
+ while (1) {
+ if (C == '$') {
+ // If we hit a $ and they are not supported in identifiers, we are done.
+ if (!LangOpts.DollarIdents) goto FinishIdentifier;
+
+ // Otherwise, emit a diagnostic and continue.
+ if (!isLexingRawMode())
+ Diag(CurPtr, diag::ext_dollar_in_identifier);
+ CurPtr = ConsumeChar(CurPtr, Size, Result);
+ C = getCharAndSize(CurPtr, Size);
+ continue;
+
+ } else if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result)) {
+ C = getCharAndSize(CurPtr, Size);
+ continue;
+ } else if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr)) {
+ C = getCharAndSize(CurPtr, Size);
+ continue;
+ } else if (!isIdentifierBody(C)) {
+ goto FinishIdentifier;
+ }
+
+ // Otherwise, this character is good, consume it.
+ CurPtr = ConsumeChar(CurPtr, Size, Result);
+
+ C = getCharAndSize(CurPtr, Size);
+ while (isIdentifierBody(C)) {
+ CurPtr = ConsumeChar(CurPtr, Size, Result);
+ C = getCharAndSize(CurPtr, Size);
+ }
+ }
+}
+
+/// isHexaLiteral - Return true if Start points to a hex constant.
+/// in microsoft mode (where this is supposed to be several different tokens).
+bool Lexer::isHexaLiteral(const char *Start, const LangOptions &LangOpts) {
+ unsigned Size;
+ char C1 = Lexer::getCharAndSizeNoWarn(Start, Size, LangOpts);
+ if (C1 != '0')
+ return false;
+ char C2 = Lexer::getCharAndSizeNoWarn(Start + Size, Size, LangOpts);
+ return (C2 == 'x' || C2 == 'X');
+}
+
+/// LexNumericConstant - Lex the remainder of a integer or floating point
+/// constant. From[-1] is the first character lexed. Return the end of the
+/// constant.
+bool Lexer::LexNumericConstant(Token &Result, const char *CurPtr) {
+ unsigned Size;
+ char C = getCharAndSize(CurPtr, Size);
+ char PrevCh = 0;
+ while (isPreprocessingNumberBody(C)) {
+ CurPtr = ConsumeChar(CurPtr, Size, Result);
+ PrevCh = C;
+ C = getCharAndSize(CurPtr, Size);
+ }
+
+ // If we fell out, check for a sign, due to 1e+12. If we have one, continue.
+ if ((C == '-' || C == '+') && (PrevCh == 'E' || PrevCh == 'e')) {
+ // If we are in Microsoft mode, don't continue if the constant is hex.
+ // For example, MSVC will accept the following as 3 tokens: 0x1234567e+1
+ if (!LangOpts.MicrosoftExt || !isHexaLiteral(BufferPtr, LangOpts))
+ return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
+ }
+
+ // If we have a hex FP constant, continue.
+ if ((C == '-' || C == '+') && (PrevCh == 'P' || PrevCh == 'p')) {
+ // Outside C99, we accept hexadecimal floating point numbers as a
+ // not-quite-conforming extension. Only do so if this looks like it's
+ // actually meant to be a hexfloat, and not if it has a ud-suffix.
+ bool IsHexFloat = true;
+ if (!LangOpts.C99) {
+ if (!isHexaLiteral(BufferPtr, LangOpts))
+ IsHexFloat = false;
+ else if (std::find(BufferPtr, CurPtr, '_') != CurPtr)
+ IsHexFloat = false;
+ }
+ if (IsHexFloat)
+ return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
+ }
+
+ // If we have a digit separator, continue.
+ if (C == '\'' && getLangOpts().CPlusPlus14) {
+ unsigned NextSize;
+ char Next = getCharAndSizeNoWarn(CurPtr + Size, NextSize, getLangOpts());
+ if (isIdentifierBody(Next)) {
+ if (!isLexingRawMode())
+ Diag(CurPtr, diag::warn_cxx11_compat_digit_separator);
+ CurPtr = ConsumeChar(CurPtr, Size, Result);
+ CurPtr = ConsumeChar(CurPtr, NextSize, Result);
+ return LexNumericConstant(Result, CurPtr);
+ }
+ }
+
+ // If we have a UCN or UTF-8 character (perhaps in a ud-suffix), continue.
+ if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result))
+ return LexNumericConstant(Result, CurPtr);
+ if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr))
+ return LexNumericConstant(Result, CurPtr);
+
+ // Update the location of token as well as BufferPtr.
+ const char *TokStart = BufferPtr;
+ FormTokenWithChars(Result, CurPtr, tok::numeric_constant);
+ Result.setLiteralData(TokStart);
+ return true;
+}
+
+/// LexUDSuffix - Lex the ud-suffix production for user-defined literal suffixes
+/// in C++11, or warn on a ud-suffix in C++98.
+const char *Lexer::LexUDSuffix(Token &Result, const char *CurPtr,
+ bool IsStringLiteral) {
+ assert(getLangOpts().CPlusPlus);
+
+ // Maximally munch an identifier.
+ unsigned Size;
+ char C = getCharAndSize(CurPtr, Size);
+ bool Consumed = false;
+
+ if (!isIdentifierHead(C)) {
+ if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result))
+ Consumed = true;
+ else if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr))
+ Consumed = true;
+ else
+ return CurPtr;
+ }
+
+ if (!getLangOpts().CPlusPlus11) {
+ if (!isLexingRawMode())
+ Diag(CurPtr,
+ C == '_' ? diag::warn_cxx11_compat_user_defined_literal
+ : diag::warn_cxx11_compat_reserved_user_defined_literal)
+ << FixItHint::CreateInsertion(getSourceLocation(CurPtr), " ");
+ return CurPtr;
+ }
+
+ // C++11 [lex.ext]p10, [usrlit.suffix]p1: A program containing a ud-suffix
+ // that does not start with an underscore is ill-formed. As a conforming
+ // extension, we treat all such suffixes as if they had whitespace before
+ // them. We assume a suffix beginning with a UCN or UTF-8 character is more
+ // likely to be a ud-suffix than a macro, however, and accept that.
+ if (!Consumed) {
+ bool IsUDSuffix = false;
+ if (C == '_')
+ IsUDSuffix = true;
+ else if (IsStringLiteral && getLangOpts().CPlusPlus14) {
+ // In C++1y, we need to look ahead a few characters to see if this is a
+ // valid suffix for a string literal or a numeric literal (this could be
+ // the 'operator""if' defining a numeric literal operator).
+ const unsigned MaxStandardSuffixLength = 3;
+ char Buffer[MaxStandardSuffixLength] = { C };
+ unsigned Consumed = Size;
+ unsigned Chars = 1;
+ while (true) {
+ unsigned NextSize;
+ char Next = getCharAndSizeNoWarn(CurPtr + Consumed, NextSize,
+ getLangOpts());
+ if (!isIdentifierBody(Next)) {
+ // End of suffix. Check whether this is on the whitelist.
+ IsUDSuffix = (Chars == 1 && Buffer[0] == 's') ||
+ NumericLiteralParser::isValidUDSuffix(
+ getLangOpts(), StringRef(Buffer, Chars));
+ break;
+ }
+
+ if (Chars == MaxStandardSuffixLength)
+ // Too long: can't be a standard suffix.
+ break;
+
+ Buffer[Chars++] = Next;
+ Consumed += NextSize;
+ }
+ }
+
+ if (!IsUDSuffix) {
+ if (!isLexingRawMode())
+ Diag(CurPtr, getLangOpts().MSVCCompat
+ ? diag::ext_ms_reserved_user_defined_literal
+ : diag::ext_reserved_user_defined_literal)
+ << FixItHint::CreateInsertion(getSourceLocation(CurPtr), " ");
+ return CurPtr;
+ }
+
+ CurPtr = ConsumeChar(CurPtr, Size, Result);
+ }
+
+ Result.setFlag(Token::HasUDSuffix);
+ while (true) {
+ C = getCharAndSize(CurPtr, Size);
+ if (isIdentifierBody(C)) { CurPtr = ConsumeChar(CurPtr, Size, Result); }
+ else if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result)) {}
+ else if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr)) {}
+ else break;
+ }
+
+ return CurPtr;
+}
+
+/// LexStringLiteral - Lex the remainder of a string literal, after having lexed
+/// either " or L" or u8" or u" or U".
+bool Lexer::LexStringLiteral(Token &Result, const char *CurPtr,
+ tok::TokenKind Kind) {
+ // Does this string contain the \0 character?
+ const char *NulCharacter = nullptr;
+
+ if (!isLexingRawMode() &&
+ (Kind == tok::utf8_string_literal ||
+ Kind == tok::utf16_string_literal ||
+ Kind == tok::utf32_string_literal))
+ Diag(BufferPtr, getLangOpts().CPlusPlus
+ ? diag::warn_cxx98_compat_unicode_literal
+ : diag::warn_c99_compat_unicode_literal);
+
+ char C = getAndAdvanceChar(CurPtr, Result);
+ while (C != '"') {
+ // Skip escaped characters. Escaped newlines will already be processed by
+ // getAndAdvanceChar.
+ if (C == '\\')
+ C = getAndAdvanceChar(CurPtr, Result);
+
+ if (C == '\n' || C == '\r' || // Newline.
+ (C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
+ if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
+ Diag(BufferPtr, diag::ext_unterminated_char_or_string) << 1;
+ FormTokenWithChars(Result, CurPtr-1, tok::unknown);
+ return true;
+ }
+
+ if (C == 0) {
+ if (isCodeCompletionPoint(CurPtr-1)) {
+ PP->CodeCompleteNaturalLanguage();
+ FormTokenWithChars(Result, CurPtr-1, tok::unknown);
+ cutOffLexing();
+ return true;
+ }
+
+ NulCharacter = CurPtr-1;
+ }
+ C = getAndAdvanceChar(CurPtr, Result);
+ }
+
+ // If we are in C++11, lex the optional ud-suffix.
+ if (getLangOpts().CPlusPlus)
+ CurPtr = LexUDSuffix(Result, CurPtr, true);
+
+ // If a nul character existed in the string, warn about it.
+ if (NulCharacter && !isLexingRawMode())
+ Diag(NulCharacter, diag::null_in_char_or_string) << 1;
+
+ // Update the location of the token as well as the BufferPtr instance var.
+ const char *TokStart = BufferPtr;
+ FormTokenWithChars(Result, CurPtr, Kind);
+ Result.setLiteralData(TokStart);
+ return true;
+}
+
+/// LexRawStringLiteral - Lex the remainder of a raw string literal, after
+/// having lexed R", LR", u8R", uR", or UR".
+bool Lexer::LexRawStringLiteral(Token &Result, const char *CurPtr,
+ tok::TokenKind Kind) {
+ // This function doesn't use getAndAdvanceChar because C++0x [lex.pptoken]p3:
+ // Between the initial and final double quote characters of the raw string,
+ // any transformations performed in phases 1 and 2 (trigraphs,
+ // universal-character-names, and line splicing) are reverted.
+
+ if (!isLexingRawMode())
+ Diag(BufferPtr, diag::warn_cxx98_compat_raw_string_literal);
+
+ unsigned PrefixLen = 0;
+
+ while (PrefixLen != 16 && isRawStringDelimBody(CurPtr[PrefixLen]))
+ ++PrefixLen;
+
+ // If the last character was not a '(', then we didn't lex a valid delimiter.
+ if (CurPtr[PrefixLen] != '(') {
+ if (!isLexingRawMode()) {
+ const char *PrefixEnd = &CurPtr[PrefixLen];
+ if (PrefixLen == 16) {
+ Diag(PrefixEnd, diag::err_raw_delim_too_long);
+ } else {
+ Diag(PrefixEnd, diag::err_invalid_char_raw_delim)
+ << StringRef(PrefixEnd, 1);
+ }
+ }
+
+ // Search for the next '"' in hopes of salvaging the lexer. Unfortunately,
+ // it's possible the '"' was intended to be part of the raw string, but
+ // there's not much we can do about that.
+ while (1) {
+ char C = *CurPtr++;
+
+ if (C == '"')
+ break;
+ if (C == 0 && CurPtr-1 == BufferEnd) {
+ --CurPtr;
+ break;
+ }
+ }
+
+ FormTokenWithChars(Result, CurPtr, tok::unknown);
+ return true;
+ }
+
+ // Save prefix and move CurPtr past it
+ const char *Prefix = CurPtr;
+ CurPtr += PrefixLen + 1; // skip over prefix and '('
+
+ while (1) {
+ char C = *CurPtr++;
+
+ if (C == ')') {
+ // Check for prefix match and closing quote.
+ if (strncmp(CurPtr, Prefix, PrefixLen) == 0 && CurPtr[PrefixLen] == '"') {
+ CurPtr += PrefixLen + 1; // skip over prefix and '"'
+ break;
+ }
+ } else if (C == 0 && CurPtr-1 == BufferEnd) { // End of file.
+ if (!isLexingRawMode())
+ Diag(BufferPtr, diag::err_unterminated_raw_string)
+ << StringRef(Prefix, PrefixLen);
+ FormTokenWithChars(Result, CurPtr-1, tok::unknown);
+ return true;
+ }
+ }
+
+ // If we are in C++11, lex the optional ud-suffix.
+ if (getLangOpts().CPlusPlus)
+ CurPtr = LexUDSuffix(Result, CurPtr, true);
+
+ // Update the location of token as well as BufferPtr.
+ const char *TokStart = BufferPtr;
+ FormTokenWithChars(Result, CurPtr, Kind);
+ Result.setLiteralData(TokStart);
+ return true;
+}
+
+/// LexAngledStringLiteral - Lex the remainder of an angled string literal,
+/// after having lexed the '<' character. This is used for #include filenames.
+bool Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) {
+ // Does this string contain the \0 character?
+ const char *NulCharacter = nullptr;
+ const char *AfterLessPos = CurPtr;
+ char C = getAndAdvanceChar(CurPtr, Result);
+ while (C != '>') {
+ // Skip escaped characters.
+ if (C == '\\' && CurPtr < BufferEnd) {
+ // Skip the escaped character.
+ getAndAdvanceChar(CurPtr, Result);
+ } else if (C == '\n' || C == '\r' || // Newline.
+ (C == 0 && (CurPtr-1 == BufferEnd || // End of file.
+ isCodeCompletionPoint(CurPtr-1)))) {
+ // If the filename is unterminated, then it must just be a lone <
+ // character. Return this as such.
+ FormTokenWithChars(Result, AfterLessPos, tok::less);
+ return true;
+ } else if (C == 0) {
+ NulCharacter = CurPtr-1;
+ }
+ C = getAndAdvanceChar(CurPtr, Result);
+ }
+
+ // If a nul character existed in the string, warn about it.
+ if (NulCharacter && !isLexingRawMode())
+ Diag(NulCharacter, diag::null_in_char_or_string) << 1;
+
+ // Update the location of token as well as BufferPtr.
+ const char *TokStart = BufferPtr;
+ FormTokenWithChars(Result, CurPtr, tok::angle_string_literal);
+ Result.setLiteralData(TokStart);
+ return true;
+}
+
+
+/// LexCharConstant - Lex the remainder of a character constant, after having
+/// lexed either ' or L' or u8' or u' or U'.
+bool Lexer::LexCharConstant(Token &Result, const char *CurPtr,
+ tok::TokenKind Kind) {
+ // Does this character contain the \0 character?
+ const char *NulCharacter = nullptr;
+
+ if (!isLexingRawMode()) {
+ if (Kind == tok::utf16_char_constant || Kind == tok::utf32_char_constant)
+ Diag(BufferPtr, getLangOpts().CPlusPlus
+ ? diag::warn_cxx98_compat_unicode_literal
+ : diag::warn_c99_compat_unicode_literal);
+ else if (Kind == tok::utf8_char_constant)
+ Diag(BufferPtr, diag::warn_cxx14_compat_u8_character_literal);
+ }
+
+ char C = getAndAdvanceChar(CurPtr, Result);
+ if (C == '\'') {
+ if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
+ Diag(BufferPtr, diag::ext_empty_character);
+ FormTokenWithChars(Result, CurPtr, tok::unknown);
+ return true;
+ }
+
+ while (C != '\'') {
+ // Skip escaped characters.
+ if (C == '\\')
+ C = getAndAdvanceChar(CurPtr, Result);
+
+ if (C == '\n' || C == '\r' || // Newline.
+ (C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
+ if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
+ Diag(BufferPtr, diag::ext_unterminated_char_or_string) << 0;
+ FormTokenWithChars(Result, CurPtr-1, tok::unknown);
+ return true;
+ }
+
+ if (C == 0) {
+ if (isCodeCompletionPoint(CurPtr-1)) {
+ PP->CodeCompleteNaturalLanguage();
+ FormTokenWithChars(Result, CurPtr-1, tok::unknown);
+ cutOffLexing();
+ return true;
+ }
+
+ NulCharacter = CurPtr-1;
+ }
+ C = getAndAdvanceChar(CurPtr, Result);
+ }
+
+ // If we are in C++11, lex the optional ud-suffix.
+ if (getLangOpts().CPlusPlus)
+ CurPtr = LexUDSuffix(Result, CurPtr, false);
+
+ // If a nul character existed in the character, warn about it.
+ if (NulCharacter && !isLexingRawMode())
+ Diag(NulCharacter, diag::null_in_char_or_string) << 0;
+
+ // Update the location of token as well as BufferPtr.
+ const char *TokStart = BufferPtr;
+ FormTokenWithChars(Result, CurPtr, Kind);
+ Result.setLiteralData(TokStart);
+ return true;
+}
+
+/// SkipWhitespace - Efficiently skip over a series of whitespace characters.
+/// Update BufferPtr to point to the next non-whitespace character and return.
+///
+/// This method forms a token and returns true if KeepWhitespaceMode is enabled.
+///
+bool Lexer::SkipWhitespace(Token &Result, const char *CurPtr,
+ bool &TokAtPhysicalStartOfLine) {
+ // Whitespace - Skip it, then return the token after the whitespace.
+ bool SawNewline = isVerticalWhitespace(CurPtr[-1]);
+
+ unsigned char Char = *CurPtr;
+
+ // Skip consecutive spaces efficiently.
+ while (1) {
+ // Skip horizontal whitespace very aggressively.
+ while (isHorizontalWhitespace(Char))
+ Char = *++CurPtr;
+
+ // Otherwise if we have something other than whitespace, we're done.
+ if (!isVerticalWhitespace(Char))
+ break;
+
+ if (ParsingPreprocessorDirective) {
+ // End of preprocessor directive line, let LexTokenInternal handle this.
+ BufferPtr = CurPtr;
+ return false;
+ }
+
+ // OK, but handle newline.
+ SawNewline = true;
+ Char = *++CurPtr;
+ }
+
+ // If the client wants us to return whitespace, return it now.
+ if (isKeepWhitespaceMode()) {
+ FormTokenWithChars(Result, CurPtr, tok::unknown);
+ if (SawNewline) {
+ IsAtStartOfLine = true;
+ IsAtPhysicalStartOfLine = true;
+ }
+ // FIXME: The next token will not have LeadingSpace set.
+ return true;
+ }
+
+ // If this isn't immediately after a newline, there is leading space.
+ char PrevChar = CurPtr[-1];
+ bool HasLeadingSpace = !isVerticalWhitespace(PrevChar);
+
+ Result.setFlagValue(Token::LeadingSpace, HasLeadingSpace);
+ if (SawNewline) {
+ Result.setFlag(Token::StartOfLine);
+ TokAtPhysicalStartOfLine = true;
+ }
+
+ BufferPtr = CurPtr;
+ return false;
+}
+
+/// We have just read the // characters from input. Skip until we find the
+/// newline character thats terminate the comment. Then update BufferPtr and
+/// return.
+///
+/// If we're in KeepCommentMode or any CommentHandler has inserted
+/// some tokens, this will store the first token and return true.
+bool Lexer::SkipLineComment(Token &Result, const char *CurPtr,
+ bool &TokAtPhysicalStartOfLine) {
+ // If Line comments aren't explicitly enabled for this language, emit an
+ // extension warning.
+ if (!LangOpts.LineComment && !isLexingRawMode()) {
+ Diag(BufferPtr, diag::ext_line_comment);
+
+ // Mark them enabled so we only emit one warning for this translation
+ // unit.
+ LangOpts.LineComment = true;
+ }
+
+ // Scan over the body of the comment. The common case, when scanning, is that
+ // the comment contains normal ascii characters with nothing interesting in
+ // them. As such, optimize for this case with the inner loop.
+ char C;
+ do {
+ C = *CurPtr;
+ // Skip over characters in the fast loop.
+ while (C != 0 && // Potentially EOF.
+ C != '\n' && C != '\r') // Newline or DOS-style newline.
+ C = *++CurPtr;
+
+ const char *NextLine = CurPtr;
+ if (C != 0) {
+ // We found a newline, see if it's escaped.
+ const char *EscapePtr = CurPtr-1;
+ bool HasSpace = false;
+ while (isHorizontalWhitespace(*EscapePtr)) { // Skip whitespace.
+ --EscapePtr;
+ HasSpace = true;
+ }
+
+ if (*EscapePtr == '\\') // Escaped newline.
+ CurPtr = EscapePtr;
+ else if (EscapePtr[0] == '/' && EscapePtr[-1] == '?' &&
+ EscapePtr[-2] == '?') // Trigraph-escaped newline.
+ CurPtr = EscapePtr-2;
+ else
+ break; // This is a newline, we're done.
+
+ // If there was space between the backslash and newline, warn about it.
+ if (HasSpace && !isLexingRawMode())
+ Diag(EscapePtr, diag::backslash_newline_space);
+ }
+
+ // Otherwise, this is a hard case. Fall back on getAndAdvanceChar to
+ // properly decode the character. Read it in raw mode to avoid emitting
+ // diagnostics about things like trigraphs. If we see an escaped newline,
+ // we'll handle it below.
+ const char *OldPtr = CurPtr;
+ bool OldRawMode = isLexingRawMode();
+ LexingRawMode = true;
+ C = getAndAdvanceChar(CurPtr, Result);
+ LexingRawMode = OldRawMode;
+
+ // If we only read only one character, then no special handling is needed.
+ // We're done and can skip forward to the newline.
+ if (C != 0 && CurPtr == OldPtr+1) {
+ CurPtr = NextLine;
+ break;
+ }
+
+ // If we read multiple characters, and one of those characters was a \r or
+ // \n, then we had an escaped newline within the comment. Emit diagnostic
+ // unless the next line is also a // comment.
+ if (CurPtr != OldPtr+1 && C != '/' && CurPtr[0] != '/') {
+ for (; OldPtr != CurPtr; ++OldPtr)
+ if (OldPtr[0] == '\n' || OldPtr[0] == '\r') {
+ // Okay, we found a // comment that ends in a newline, if the next
+ // line is also a // comment, but has spaces, don't emit a diagnostic.
+ if (isWhitespace(C)) {
+ const char *ForwardPtr = CurPtr;
+ while (isWhitespace(*ForwardPtr)) // Skip whitespace.
+ ++ForwardPtr;
+ if (ForwardPtr[0] == '/' && ForwardPtr[1] == '/')
+ break;
+ }
+
+ if (!isLexingRawMode())
+ Diag(OldPtr-1, diag::ext_multi_line_line_comment);
+ break;
+ }
+ }
+
+ if (CurPtr == BufferEnd+1) {
+ --CurPtr;
+ break;
+ }
+
+ if (C == '\0' && isCodeCompletionPoint(CurPtr-1)) {
+ PP->CodeCompleteNaturalLanguage();
+ cutOffLexing();
+ return false;
+ }
+
+ } while (C != '\n' && C != '\r');
+
+ // Found but did not consume the newline. Notify comment handlers about the
+ // comment unless we're in a #if 0 block.
+ if (PP && !isLexingRawMode() &&
+ PP->HandleComment(Result, SourceRange(getSourceLocation(BufferPtr),
+ getSourceLocation(CurPtr)))) {
+ BufferPtr = CurPtr;
+ return true; // A token has to be returned.
+ }
+
+ // If we are returning comments as tokens, return this comment as a token.
+ if (inKeepCommentMode())
+ return SaveLineComment(Result, CurPtr);
+
+ // If we are inside a preprocessor directive and we see the end of line,
+ // return immediately, so that the lexer can return this as an EOD token.
+ if (ParsingPreprocessorDirective || CurPtr == BufferEnd) {
+ BufferPtr = CurPtr;
+ return false;
+ }
+
+ // Otherwise, eat the \n character. We don't care if this is a \n\r or
+ // \r\n sequence. This is an efficiency hack (because we know the \n can't
+ // contribute to another token), it isn't needed for correctness. Note that
+ // this is ok even in KeepWhitespaceMode, because we would have returned the
+ /// comment above in that mode.
+ ++CurPtr;
+
+ // The next returned token is at the start of the line.
+ Result.setFlag(Token::StartOfLine);
+ TokAtPhysicalStartOfLine = true;
+ // No leading whitespace seen so far.
+ Result.clearFlag(Token::LeadingSpace);
+ BufferPtr = CurPtr;
+ return false;
+}
+
+/// If in save-comment mode, package up this Line comment in an appropriate
+/// way and return it.
+bool Lexer::SaveLineComment(Token &Result, const char *CurPtr) {
+ // If we're not in a preprocessor directive, just return the // comment
+ // directly.
+ FormTokenWithChars(Result, CurPtr, tok::comment);
+
+ if (!ParsingPreprocessorDirective || LexingRawMode)
+ return true;
+
+ // If this Line-style comment is in a macro definition, transmogrify it into
+ // a C-style block comment.
+ bool Invalid = false;
+ std::string Spelling = PP->getSpelling(Result, &Invalid);
+ if (Invalid)
+ return true;
+
+ assert(Spelling[0] == '/' && Spelling[1] == '/' && "Not line comment?");
+ Spelling[1] = '*'; // Change prefix to "/*".
+ Spelling += "*/"; // add suffix.
+
+ Result.setKind(tok::comment);
+ PP->CreateString(Spelling, Result,
+ Result.getLocation(), Result.getLocation());
+ return true;
+}
+
+/// isBlockCommentEndOfEscapedNewLine - Return true if the specified newline
+/// character (either \\n or \\r) is part of an escaped newline sequence. Issue
+/// a diagnostic if so. We know that the newline is inside of a block comment.
+static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr,
+ Lexer *L) {
+ assert(CurPtr[0] == '\n' || CurPtr[0] == '\r');
+
+ // Back up off the newline.
+ --CurPtr;
+
+ // If this is a two-character newline sequence, skip the other character.
+ if (CurPtr[0] == '\n' || CurPtr[0] == '\r') {
+ // \n\n or \r\r -> not escaped newline.
+ if (CurPtr[0] == CurPtr[1])
+ return false;
+ // \n\r or \r\n -> skip the newline.
+ --CurPtr;
+ }
+
+ // If we have horizontal whitespace, skip over it. We allow whitespace
+ // between the slash and newline.
+ bool HasSpace = false;
+ while (isHorizontalWhitespace(*CurPtr) || *CurPtr == 0) {
+ --CurPtr;
+ HasSpace = true;
+ }
+
+ // If we have a slash, we know this is an escaped newline.
+ if (*CurPtr == '\\') {
+ if (CurPtr[-1] != '*') return false;
+ } else {
+ // It isn't a slash, is it the ?? / trigraph?
+ if (CurPtr[0] != '/' || CurPtr[-1] != '?' || CurPtr[-2] != '?' ||
+ CurPtr[-3] != '*')
+ return false;
+
+ // This is the trigraph ending the comment. Emit a stern warning!
+ CurPtr -= 2;
+
+ // If no trigraphs are enabled, warn that we ignored this trigraph and
+ // ignore this * character.
+ if (!L->getLangOpts().Trigraphs) {
+ if (!L->isLexingRawMode())
+ L->Diag(CurPtr, diag::trigraph_ignored_block_comment);
+ return false;
+ }
+ if (!L->isLexingRawMode())
+ L->Diag(CurPtr, diag::trigraph_ends_block_comment);
+ }
+
+ // Warn about having an escaped newline between the */ characters.
+ if (!L->isLexingRawMode())
+ L->Diag(CurPtr, diag::escaped_newline_block_comment_end);
+
+ // If there was space between the backslash and newline, warn about it.
+ if (HasSpace && !L->isLexingRawMode())
+ L->Diag(CurPtr, diag::backslash_newline_space);
+
+ return true;
+}
+
+#ifdef __SSE2__
+#include <emmintrin.h>
+#elif __ALTIVEC__
+#include <altivec.h>
+#undef bool
+#endif
+
+/// We have just read from input the / and * characters that started a comment.
+/// Read until we find the * and / characters that terminate the comment.
+/// Note that we don't bother decoding trigraphs or escaped newlines in block
+/// comments, because they cannot cause the comment to end. The only thing
+/// that can happen is the comment could end with an escaped newline between
+/// the terminating * and /.
+///
+/// If we're in KeepCommentMode or any CommentHandler has inserted
+/// some tokens, this will store the first token and return true.
+bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr,
+ bool &TokAtPhysicalStartOfLine) {
+ // Scan one character past where we should, looking for a '/' character. Once
+ // we find it, check to see if it was preceded by a *. This common
+ // optimization helps people who like to put a lot of * characters in their
+ // comments.
+
+ // The first character we get with newlines and trigraphs skipped to handle
+ // the degenerate /*/ case below correctly if the * has an escaped newline
+ // after it.
+ unsigned CharSize;
+ unsigned char C = getCharAndSize(CurPtr, CharSize);
+ CurPtr += CharSize;
+ if (C == 0 && CurPtr == BufferEnd+1) {
+ if (!isLexingRawMode())
+ Diag(BufferPtr, diag::err_unterminated_block_comment);
+ --CurPtr;
+
+ // KeepWhitespaceMode should return this broken comment as a token. Since
+ // it isn't a well formed comment, just return it as an 'unknown' token.
+ if (isKeepWhitespaceMode()) {
+ FormTokenWithChars(Result, CurPtr, tok::unknown);
+ return true;
+ }
+
+ BufferPtr = CurPtr;
+ return false;
+ }
+
+ // Check to see if the first character after the '/*' is another /. If so,
+ // then this slash does not end the block comment, it is part of it.
+ if (C == '/')
+ C = *CurPtr++;
+
+ while (1) {
+ // Skip over all non-interesting characters until we find end of buffer or a
+ // (probably ending) '/' character.
+ if (CurPtr + 24 < BufferEnd &&
+ // If there is a code-completion point avoid the fast scan because it
+ // doesn't check for '\0'.
+ !(PP && PP->getCodeCompletionFileLoc() == FileLoc)) {
+ // While not aligned to a 16-byte boundary.
+ while (C != '/' && ((intptr_t)CurPtr & 0x0F) != 0)
+ C = *CurPtr++;
+
+ if (C == '/') goto FoundSlash;
+
+#ifdef __SSE2__
+ __m128i Slashes = _mm_set1_epi8('/');
+ while (CurPtr+16 <= BufferEnd) {
+ int cmp = _mm_movemask_epi8(_mm_cmpeq_epi8(*(const __m128i*)CurPtr,
+ Slashes));
+ if (cmp != 0) {
+ // Adjust the pointer to point directly after the first slash. It's
+ // not necessary to set C here, it will be overwritten at the end of
+ // the outer loop.
+ CurPtr += llvm::countTrailingZeros<unsigned>(cmp) + 1;
+ goto FoundSlash;
+ }
+ CurPtr += 16;
+ }
+#elif __ALTIVEC__
+ __vector unsigned char Slashes = {
+ '/', '/', '/', '/', '/', '/', '/', '/',
+ '/', '/', '/', '/', '/', '/', '/', '/'
+ };
+ while (CurPtr+16 <= BufferEnd &&
+ !vec_any_eq(*(const vector unsigned char*)CurPtr, Slashes))
+ CurPtr += 16;
+#else
+ // Scan for '/' quickly. Many block comments are very large.
+ while (CurPtr[0] != '/' &&
+ CurPtr[1] != '/' &&
+ CurPtr[2] != '/' &&
+ CurPtr[3] != '/' &&
+ CurPtr+4 < BufferEnd) {
+ CurPtr += 4;
+ }
+#endif
+
+ // It has to be one of the bytes scanned, increment to it and read one.
+ C = *CurPtr++;
+ }
+
+ // Loop to scan the remainder.
+ while (C != '/' && C != '\0')
+ C = *CurPtr++;
+
+ if (C == '/') {
+ FoundSlash:
+ if (CurPtr[-2] == '*') // We found the final */. We're done!
+ break;
+
+ if ((CurPtr[-2] == '\n' || CurPtr[-2] == '\r')) {
+ if (isEndOfBlockCommentWithEscapedNewLine(CurPtr-2, this)) {
+ // We found the final */, though it had an escaped newline between the
+ // * and /. We're done!
+ break;
+ }
+ }
+ if (CurPtr[0] == '*' && CurPtr[1] != '/') {
+ // If this is a /* inside of the comment, emit a warning. Don't do this
+ // if this is a /*/, which will end the comment. This misses cases with
+ // embedded escaped newlines, but oh well.
+ if (!isLexingRawMode())
+ Diag(CurPtr-1, diag::warn_nested_block_comment);
+ }
+ } else if (C == 0 && CurPtr == BufferEnd+1) {
+ if (!isLexingRawMode())
+ Diag(BufferPtr, diag::err_unterminated_block_comment);
+ // Note: the user probably forgot a */. We could continue immediately
+ // after the /*, but this would involve lexing a lot of what really is the
+ // comment, which surely would confuse the parser.
+ --CurPtr;
+
+ // KeepWhitespaceMode should return this broken comment as a token. Since
+ // it isn't a well formed comment, just return it as an 'unknown' token.
+ if (isKeepWhitespaceMode()) {
+ FormTokenWithChars(Result, CurPtr, tok::unknown);
+ return true;
+ }
+
+ BufferPtr = CurPtr;
+ return false;
+ } else if (C == '\0' && isCodeCompletionPoint(CurPtr-1)) {
+ PP->CodeCompleteNaturalLanguage();
+ cutOffLexing();
+ return false;
+ }
+
+ C = *CurPtr++;
+ }
+
+ // Notify comment handlers about the comment unless we're in a #if 0 block.
+ if (PP && !isLexingRawMode() &&
+ PP->HandleComment(Result, SourceRange(getSourceLocation(BufferPtr),
+ getSourceLocation(CurPtr)))) {
+ BufferPtr = CurPtr;
+ return true; // A token has to be returned.
+ }
+
+ // If we are returning comments as tokens, return this comment as a token.
+ if (inKeepCommentMode()) {
+ FormTokenWithChars(Result, CurPtr, tok::comment);
+ return true;
+ }
+
+ // It is common for the tokens immediately after a /**/ comment to be
+ // whitespace. Instead of going through the big switch, handle it
+ // efficiently now. This is safe even in KeepWhitespaceMode because we would
+ // have already returned above with the comment as a token.
+ if (isHorizontalWhitespace(*CurPtr)) {
+ SkipWhitespace(Result, CurPtr+1, TokAtPhysicalStartOfLine);
+ return false;
+ }
+
+ // Otherwise, just return so that the next character will be lexed as a token.
+ BufferPtr = CurPtr;
+ Result.setFlag(Token::LeadingSpace);
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Primary Lexing Entry Points
+//===----------------------------------------------------------------------===//
+
+/// ReadToEndOfLine - Read the rest of the current preprocessor line as an
+/// uninterpreted string. This switches the lexer out of directive mode.
+void Lexer::ReadToEndOfLine(SmallVectorImpl<char> *Result) {
+ assert(ParsingPreprocessorDirective && ParsingFilename == false &&
+ "Must be in a preprocessing directive!");
+ Token Tmp;
+
+ // CurPtr - Cache BufferPtr in an automatic variable.
+ const char *CurPtr = BufferPtr;
+ while (1) {
+ char Char = getAndAdvanceChar(CurPtr, Tmp);
+ switch (Char) {
+ default:
+ if (Result)
+ Result->push_back(Char);
+ break;
+ case 0: // Null.
+ // Found end of file?
+ if (CurPtr-1 != BufferEnd) {
+ if (isCodeCompletionPoint(CurPtr-1)) {
+ PP->CodeCompleteNaturalLanguage();
+ cutOffLexing();
+ return;
+ }
+
+ // Nope, normal character, continue.
+ if (Result)
+ Result->push_back(Char);
+ break;
+ }
+ // FALL THROUGH.
+ case '\r':
+ case '\n':
+ // Okay, we found the end of the line. First, back up past the \0, \r, \n.
+ assert(CurPtr[-1] == Char && "Trigraphs for newline?");
+ BufferPtr = CurPtr-1;
+
+ // Next, lex the character, which should handle the EOD transition.
+ Lex(Tmp);
+ if (Tmp.is(tok::code_completion)) {
+ if (PP)
+ PP->CodeCompleteNaturalLanguage();
+ Lex(Tmp);
+ }
+ assert(Tmp.is(tok::eod) && "Unexpected token!");
+
+ // Finally, we're done;
+ return;
+ }
+ }
+}
+
+/// LexEndOfFile - CurPtr points to the end of this file. Handle this
+/// condition, reporting diagnostics and handling other edge cases as required.
+/// This returns true if Result contains a token, false if PP.Lex should be
+/// called again.
+bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) {
+ // If we hit the end of the file while parsing a preprocessor directive,
+ // end the preprocessor directive first. The next token returned will
+ // then be the end of file.
+ if (ParsingPreprocessorDirective) {
+ // Done parsing the "line".
+ ParsingPreprocessorDirective = false;
+ // Update the location of token as well as BufferPtr.
+ FormTokenWithChars(Result, CurPtr, tok::eod);
+
+ // Restore comment saving mode, in case it was disabled for directive.
+ if (PP)
+ resetExtendedTokenMode();
+ return true; // Have a token.
+ }
+
+ // If we are in raw mode, return this event as an EOF token. Let the caller
+ // that put us in raw mode handle the event.
+ if (isLexingRawMode()) {
+ Result.startToken();
+ BufferPtr = BufferEnd;
+ FormTokenWithChars(Result, BufferEnd, tok::eof);
+ return true;
+ }
+
+ // Issue diagnostics for unterminated #if and missing newline.
+
+ // If we are in a #if directive, emit an error.
+ while (!ConditionalStack.empty()) {
+ if (PP->getCodeCompletionFileLoc() != FileLoc)
+ PP->Diag(ConditionalStack.back().IfLoc,
+ diag::err_pp_unterminated_conditional);
+ ConditionalStack.pop_back();
+ }
+
+ // C99 5.1.1.2p2: If the file is non-empty and didn't end in a newline, issue
+ // a pedwarn.
+ if (CurPtr != BufferStart && (CurPtr[-1] != '\n' && CurPtr[-1] != '\r')) {
+ DiagnosticsEngine &Diags = PP->getDiagnostics();
+ SourceLocation EndLoc = getSourceLocation(BufferEnd);
+ unsigned DiagID;
+
+ if (LangOpts.CPlusPlus11) {
+ // C++11 [lex.phases] 2.2 p2
+ // Prefer the C++98 pedantic compatibility warning over the generic,
+ // non-extension, user-requested "missing newline at EOF" warning.
+ if (!Diags.isIgnored(diag::warn_cxx98_compat_no_newline_eof, EndLoc)) {
+ DiagID = diag::warn_cxx98_compat_no_newline_eof;
+ } else {
+ DiagID = diag::warn_no_newline_eof;
+ }
+ } else {
+ DiagID = diag::ext_no_newline_eof;
+ }
+
+ Diag(BufferEnd, DiagID)
+ << FixItHint::CreateInsertion(EndLoc, "\n");
+ }
+
+ BufferPtr = CurPtr;
+
+ // Finally, let the preprocessor handle this.
+ return PP->HandleEndOfFile(Result, isPragmaLexer());
+}
+
+/// isNextPPTokenLParen - Return 1 if the next unexpanded token lexed from
+/// the specified lexer will return a tok::l_paren token, 0 if it is something
+/// else and 2 if there are no more tokens in the buffer controlled by the
+/// lexer.
+unsigned Lexer::isNextPPTokenLParen() {
+ assert(!LexingRawMode && "How can we expand a macro from a skipping buffer?");
+
+ // Switch to 'skipping' mode. This will ensure that we can lex a token
+ // without emitting diagnostics, disables macro expansion, and will cause EOF
+ // to return an EOF token instead of popping the include stack.
+ LexingRawMode = true;
+
+ // Save state that can be changed while lexing so that we can restore it.
+ const char *TmpBufferPtr = BufferPtr;
+ bool inPPDirectiveMode = ParsingPreprocessorDirective;
+ bool atStartOfLine = IsAtStartOfLine;
+ bool atPhysicalStartOfLine = IsAtPhysicalStartOfLine;
+ bool leadingSpace = HasLeadingSpace;
+
+ Token Tok;
+ Lex(Tok);
+
+ // Restore state that may have changed.
+ BufferPtr = TmpBufferPtr;
+ ParsingPreprocessorDirective = inPPDirectiveMode;
+ HasLeadingSpace = leadingSpace;
+ IsAtStartOfLine = atStartOfLine;
+ IsAtPhysicalStartOfLine = atPhysicalStartOfLine;
+
+ // Restore the lexer back to non-skipping mode.
+ LexingRawMode = false;
+
+ if (Tok.is(tok::eof))
+ return 2;
+ return Tok.is(tok::l_paren);
+}
+
+/// \brief Find the end of a version control conflict marker.
+static const char *FindConflictEnd(const char *CurPtr, const char *BufferEnd,
+ ConflictMarkerKind CMK) {
+ const char *Terminator = CMK == CMK_Perforce ? "<<<<\n" : ">>>>>>>";
+ size_t TermLen = CMK == CMK_Perforce ? 5 : 7;
+ StringRef RestOfBuffer(CurPtr+TermLen, BufferEnd-CurPtr-TermLen);
+ size_t Pos = RestOfBuffer.find(Terminator);
+ while (Pos != StringRef::npos) {
+ // Must occur at start of line.
+ if (Pos == 0 ||
+ (RestOfBuffer[Pos - 1] != '\r' && RestOfBuffer[Pos - 1] != '\n')) {
+ RestOfBuffer = RestOfBuffer.substr(Pos+TermLen);
+ Pos = RestOfBuffer.find(Terminator);
+ continue;
+ }
+ return RestOfBuffer.data()+Pos;
+ }
+ return nullptr;
+}
+
+/// IsStartOfConflictMarker - If the specified pointer is the start of a version
+/// control conflict marker like '<<<<<<<', recognize it as such, emit an error
+/// and recover nicely. This returns true if it is a conflict marker and false
+/// if not.
+bool Lexer::IsStartOfConflictMarker(const char *CurPtr) {
+ // Only a conflict marker if it starts at the beginning of a line.
+ if (CurPtr != BufferStart &&
+ CurPtr[-1] != '\n' && CurPtr[-1] != '\r')
+ return false;
+
+ // Check to see if we have <<<<<<< or >>>>.
+ if ((BufferEnd-CurPtr < 8 || StringRef(CurPtr, 7) != "<<<<<<<") &&
+ (BufferEnd-CurPtr < 6 || StringRef(CurPtr, 5) != ">>>> "))
+ return false;
+
+ // If we have a situation where we don't care about conflict markers, ignore
+ // it.
+ if (CurrentConflictMarkerState || isLexingRawMode())
+ return false;
+
+ ConflictMarkerKind Kind = *CurPtr == '<' ? CMK_Normal : CMK_Perforce;
+
+ // Check to see if there is an ending marker somewhere in the buffer at the
+ // start of a line to terminate this conflict marker.
+ if (FindConflictEnd(CurPtr, BufferEnd, Kind)) {
+ // We found a match. We are really in a conflict marker.
+ // Diagnose this, and ignore to the end of line.
+ Diag(CurPtr, diag::err_conflict_marker);
+ CurrentConflictMarkerState = Kind;
+
+ // Skip ahead to the end of line. We know this exists because the
+ // end-of-conflict marker starts with \r or \n.
+ while (*CurPtr != '\r' && *CurPtr != '\n') {
+ assert(CurPtr != BufferEnd && "Didn't find end of line");
+ ++CurPtr;
+ }
+ BufferPtr = CurPtr;
+ return true;
+ }
+
+ // No end of conflict marker found.
+ return false;
+}
+
+
+/// HandleEndOfConflictMarker - If this is a '====' or '||||' or '>>>>', or if
+/// it is '<<<<' and the conflict marker started with a '>>>>' marker, then it
+/// is the end of a conflict marker. Handle it by ignoring up until the end of
+/// the line. This returns true if it is a conflict marker and false if not.
+bool Lexer::HandleEndOfConflictMarker(const char *CurPtr) {
+ // Only a conflict marker if it starts at the beginning of a line.
+ if (CurPtr != BufferStart &&
+ CurPtr[-1] != '\n' && CurPtr[-1] != '\r')
+ return false;
+
+ // If we have a situation where we don't care about conflict markers, ignore
+ // it.
+ if (!CurrentConflictMarkerState || isLexingRawMode())
+ return false;
+
+ // Check to see if we have the marker (4 characters in a row).
+ for (unsigned i = 1; i != 4; ++i)
+ if (CurPtr[i] != CurPtr[0])
+ return false;
+
+ // If we do have it, search for the end of the conflict marker. This could
+ // fail if it got skipped with a '#if 0' or something. Note that CurPtr might
+ // be the end of conflict marker.
+ if (const char *End = FindConflictEnd(CurPtr, BufferEnd,
+ CurrentConflictMarkerState)) {
+ CurPtr = End;
+
+ // Skip ahead to the end of line.
+ while (CurPtr != BufferEnd && *CurPtr != '\r' && *CurPtr != '\n')
+ ++CurPtr;
+
+ BufferPtr = CurPtr;
+
+ // No longer in the conflict marker.
+ CurrentConflictMarkerState = CMK_None;
+ return true;
+ }
+
+ return false;
+}
+
+bool Lexer::isCodeCompletionPoint(const char *CurPtr) const {
+ if (PP && PP->isCodeCompletionEnabled()) {
+ SourceLocation Loc = FileLoc.getLocWithOffset(CurPtr-BufferStart);
+ return Loc == PP->getCodeCompletionLoc();
+ }
+
+ return false;
+}
+
+uint32_t Lexer::tryReadUCN(const char *&StartPtr, const char *SlashLoc,
+ Token *Result) {
+ unsigned CharSize;
+ char Kind = getCharAndSize(StartPtr, CharSize);
+
+ unsigned NumHexDigits;
+ if (Kind == 'u')
+ NumHexDigits = 4;
+ else if (Kind == 'U')
+ NumHexDigits = 8;
+ else
+ return 0;
+
+ if (!LangOpts.CPlusPlus && !LangOpts.C99) {
+ if (Result && !isLexingRawMode())
+ Diag(SlashLoc, diag::warn_ucn_not_valid_in_c89);
+ return 0;
+ }
+
+ const char *CurPtr = StartPtr + CharSize;
+ const char *KindLoc = &CurPtr[-1];
+
+ uint32_t CodePoint = 0;
+ for (unsigned i = 0; i < NumHexDigits; ++i) {
+ char C = getCharAndSize(CurPtr, CharSize);
+
+ unsigned Value = llvm::hexDigitValue(C);
+ if (Value == -1U) {
+ if (Result && !isLexingRawMode()) {
+ if (i == 0) {
+ Diag(BufferPtr, diag::warn_ucn_escape_no_digits)
+ << StringRef(KindLoc, 1);
+ } else {
+ Diag(BufferPtr, diag::warn_ucn_escape_incomplete);
+
+ // If the user wrote \U1234, suggest a fixit to \u.
+ if (i == 4 && NumHexDigits == 8) {
+ CharSourceRange URange = makeCharRange(*this, KindLoc, KindLoc + 1);
+ Diag(KindLoc, diag::note_ucn_four_not_eight)
+ << FixItHint::CreateReplacement(URange, "u");
+ }
+ }
+ }
+
+ return 0;
+ }
+
+ CodePoint <<= 4;
+ CodePoint += Value;
+
+ CurPtr += CharSize;
+ }
+
+ if (Result) {
+ Result->setFlag(Token::HasUCN);
+ if (CurPtr - StartPtr == (ptrdiff_t)NumHexDigits + 2)
+ StartPtr = CurPtr;
+ else
+ while (StartPtr != CurPtr)
+ (void)getAndAdvanceChar(StartPtr, *Result);
+ } else {
+ StartPtr = CurPtr;
+ }
+
+ // Don't apply C family restrictions to UCNs in assembly mode
+ if (LangOpts.AsmPreprocessor)
+ return CodePoint;
+
+ // C99 6.4.3p2: A universal character name shall not specify a character whose
+ // short identifier is less than 00A0 other than 0024 ($), 0040 (@), or
+ // 0060 (`), nor one in the range D800 through DFFF inclusive.)
+ // C++11 [lex.charset]p2: If the hexadecimal value for a
+ // universal-character-name corresponds to a surrogate code point (in the
+ // range 0xD800-0xDFFF, inclusive), the program is ill-formed. Additionally,
+ // if the hexadecimal value for a universal-character-name outside the
+ // c-char-sequence, s-char-sequence, or r-char-sequence of a character or
+ // string literal corresponds to a control character (in either of the
+ // ranges 0x00-0x1F or 0x7F-0x9F, both inclusive) or to a character in the
+ // basic source character set, the program is ill-formed.
+ if (CodePoint < 0xA0) {
+ if (CodePoint == 0x24 || CodePoint == 0x40 || CodePoint == 0x60)
+ return CodePoint;
+
+ // We don't use isLexingRawMode() here because we need to warn about bad
+ // UCNs even when skipping preprocessing tokens in a #if block.
+ if (Result && PP) {
+ if (CodePoint < 0x20 || CodePoint >= 0x7F)
+ Diag(BufferPtr, diag::err_ucn_control_character);
+ else {
+ char C = static_cast<char>(CodePoint);
+ Diag(BufferPtr, diag::err_ucn_escape_basic_scs) << StringRef(&C, 1);
+ }
+ }
+
+ return 0;
+
+ } else if (CodePoint >= 0xD800 && CodePoint <= 0xDFFF) {
+ // C++03 allows UCNs representing surrogate characters. C99 and C++11 don't.
+ // We don't use isLexingRawMode() here because we need to diagnose bad
+ // UCNs even when skipping preprocessing tokens in a #if block.
+ if (Result && PP) {
+ if (LangOpts.CPlusPlus && !LangOpts.CPlusPlus11)
+ Diag(BufferPtr, diag::warn_ucn_escape_surrogate);
+ else
+ Diag(BufferPtr, diag::err_ucn_escape_invalid);
+ }
+ return 0;
+ }
+
+ return CodePoint;
+}
+
+bool Lexer::CheckUnicodeWhitespace(Token &Result, uint32_t C,
+ const char *CurPtr) {
+ static const llvm::sys::UnicodeCharSet UnicodeWhitespaceChars(
+ UnicodeWhitespaceCharRanges);
+ if (!isLexingRawMode() && !PP->isPreprocessedOutput() &&
+ UnicodeWhitespaceChars.contains(C)) {
+ Diag(BufferPtr, diag::ext_unicode_whitespace)
+ << makeCharRange(*this, BufferPtr, CurPtr);
+
+ Result.setFlag(Token::LeadingSpace);
+ return true;
+ }
+ return false;
+}
+
+bool Lexer::LexUnicode(Token &Result, uint32_t C, const char *CurPtr) {
+ if (isAllowedIDChar(C, LangOpts) && isAllowedInitiallyIDChar(C, LangOpts)) {
+ if (!isLexingRawMode() && !ParsingPreprocessorDirective &&
+ !PP->isPreprocessedOutput()) {
+ maybeDiagnoseIDCharCompat(PP->getDiagnostics(), C,
+ makeCharRange(*this, BufferPtr, CurPtr),
+ /*IsFirst=*/true);
+ }
+
+ MIOpt.ReadToken();
+ return LexIdentifier(Result, CurPtr);
+ }
+
+ if (!isLexingRawMode() && !ParsingPreprocessorDirective &&
+ !PP->isPreprocessedOutput() &&
+ !isASCII(*BufferPtr) && !isAllowedIDChar(C, LangOpts)) {
+ // Non-ASCII characters tend to creep into source code unintentionally.
+ // Instead of letting the parser complain about the unknown token,
+ // just drop the character.
+ // Note that we can /only/ do this when the non-ASCII character is actually
+ // spelled as Unicode, not written as a UCN. The standard requires that
+ // we not throw away any possible preprocessor tokens, but there's a
+ // loophole in the mapping of Unicode characters to basic character set
+ // characters that allows us to map these particular characters to, say,
+ // whitespace.
+ Diag(BufferPtr, diag::err_non_ascii)
+ << FixItHint::CreateRemoval(makeCharRange(*this, BufferPtr, CurPtr));
+
+ BufferPtr = CurPtr;
+ return false;
+ }
+
+ // Otherwise, we have an explicit UCN or a character that's unlikely to show
+ // up by accident.
+ MIOpt.ReadToken();
+ FormTokenWithChars(Result, CurPtr, tok::unknown);
+ return true;
+}
+
+void Lexer::PropagateLineStartLeadingSpaceInfo(Token &Result) {
+ IsAtStartOfLine = Result.isAtStartOfLine();
+ HasLeadingSpace = Result.hasLeadingSpace();
+ HasLeadingEmptyMacro = Result.hasLeadingEmptyMacro();
+ // Note that this doesn't affect IsAtPhysicalStartOfLine.
+}
+
+bool Lexer::Lex(Token &Result) {
+ // Start a new token.
+ Result.startToken();
+
+ // Set up misc whitespace flags for LexTokenInternal.
+ if (IsAtStartOfLine) {
+ Result.setFlag(Token::StartOfLine);
+ IsAtStartOfLine = false;
+ }
+
+ if (HasLeadingSpace) {
+ Result.setFlag(Token::LeadingSpace);
+ HasLeadingSpace = false;
+ }
+
+ if (HasLeadingEmptyMacro) {
+ Result.setFlag(Token::LeadingEmptyMacro);
+ HasLeadingEmptyMacro = false;
+ }
+
+ bool atPhysicalStartOfLine = IsAtPhysicalStartOfLine;
+ IsAtPhysicalStartOfLine = false;
+ bool isRawLex = isLexingRawMode();
+ (void) isRawLex;
+ bool returnedToken = LexTokenInternal(Result, atPhysicalStartOfLine);
+ // (After the LexTokenInternal call, the lexer might be destroyed.)
+ assert((returnedToken || !isRawLex) && "Raw lex must succeed");
+ return returnedToken;
+}
+
+/// LexTokenInternal - This implements a simple C family lexer. It is an
+/// extremely performance critical piece of code. This assumes that the buffer
+/// has a null character at the end of the file. This returns a preprocessing
+/// token, not a normal token, as such, it is an internal interface. It assumes
+/// that the Flags of result have been cleared before calling this.
+bool Lexer::LexTokenInternal(Token &Result, bool TokAtPhysicalStartOfLine) {
+LexNextToken:
+ // New token, can't need cleaning yet.
+ Result.clearFlag(Token::NeedsCleaning);
+ Result.setIdentifierInfo(nullptr);
+
+ // CurPtr - Cache BufferPtr in an automatic variable.
+ const char *CurPtr = BufferPtr;
+
+ // Small amounts of horizontal whitespace is very common between tokens.
+ if ((*CurPtr == ' ') || (*CurPtr == '\t')) {
+ ++CurPtr;
+ while ((*CurPtr == ' ') || (*CurPtr == '\t'))
+ ++CurPtr;
+
+ // If we are keeping whitespace and other tokens, just return what we just
+ // skipped. The next lexer invocation will return the token after the
+ // whitespace.
+ if (isKeepWhitespaceMode()) {
+ FormTokenWithChars(Result, CurPtr, tok::unknown);
+ // FIXME: The next token will not have LeadingSpace set.
+ return true;
+ }
+
+ BufferPtr = CurPtr;
+ Result.setFlag(Token::LeadingSpace);
+ }
+
+ unsigned SizeTmp, SizeTmp2; // Temporaries for use in cases below.
+
+ // Read a character, advancing over it.
+ char Char = getAndAdvanceChar(CurPtr, Result);
+ tok::TokenKind Kind;
+
+ switch (Char) {
+ case 0: // Null.
+ // Found end of file?
+ if (CurPtr-1 == BufferEnd)
+ return LexEndOfFile(Result, CurPtr-1);
+
+ // Check if we are performing code completion.
+ if (isCodeCompletionPoint(CurPtr-1)) {
+ // Return the code-completion token.
+ Result.startToken();
+ FormTokenWithChars(Result, CurPtr, tok::code_completion);
+ return true;
+ }
+
+ if (!isLexingRawMode())
+ Diag(CurPtr-1, diag::null_in_file);
+ Result.setFlag(Token::LeadingSpace);
+ if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
+ return true; // KeepWhitespaceMode
+
+ // We know the lexer hasn't changed, so just try again with this lexer.
+ // (We manually eliminate the tail call to avoid recursion.)
+ goto LexNextToken;
+
+ case 26: // DOS & CP/M EOF: "^Z".
+ // If we're in Microsoft extensions mode, treat this as end of file.
+ if (LangOpts.MicrosoftExt) {
+ if (!isLexingRawMode())
+ Diag(CurPtr-1, diag::ext_ctrl_z_eof_microsoft);
+ return LexEndOfFile(Result, CurPtr-1);
+ }
+
+ // If Microsoft extensions are disabled, this is just random garbage.
+ Kind = tok::unknown;
+ break;
+
+ case '\n':
+ case '\r':
+ // If we are inside a preprocessor directive and we see the end of line,
+ // we know we are done with the directive, so return an EOD token.
+ if (ParsingPreprocessorDirective) {
+ // Done parsing the "line".
+ ParsingPreprocessorDirective = false;
+
+ // Restore comment saving mode, in case it was disabled for directive.
+ if (PP)
+ resetExtendedTokenMode();
+
+ // Since we consumed a newline, we are back at the start of a line.
+ IsAtStartOfLine = true;
+ IsAtPhysicalStartOfLine = true;
+
+ Kind = tok::eod;
+ break;
+ }
+
+ // No leading whitespace seen so far.
+ Result.clearFlag(Token::LeadingSpace);
+
+ if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
+ return true; // KeepWhitespaceMode
+
+ // We only saw whitespace, so just try again with this lexer.
+ // (We manually eliminate the tail call to avoid recursion.)
+ goto LexNextToken;
+ case ' ':
+ case '\t':
+ case '\f':
+ case '\v':
+ SkipHorizontalWhitespace:
+ Result.setFlag(Token::LeadingSpace);
+ if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
+ return true; // KeepWhitespaceMode
+
+ SkipIgnoredUnits:
+ CurPtr = BufferPtr;
+
+ // If the next token is obviously a // or /* */ comment, skip it efficiently
+ // too (without going through the big switch stmt).
+ if (CurPtr[0] == '/' && CurPtr[1] == '/' && !inKeepCommentMode() &&
+ LangOpts.LineComment &&
+ (LangOpts.CPlusPlus || !LangOpts.TraditionalCPP)) {
+ if (SkipLineComment(Result, CurPtr+2, TokAtPhysicalStartOfLine))
+ return true; // There is a token to return.
+ goto SkipIgnoredUnits;
+ } else if (CurPtr[0] == '/' && CurPtr[1] == '*' && !inKeepCommentMode()) {
+ if (SkipBlockComment(Result, CurPtr+2, TokAtPhysicalStartOfLine))
+ return true; // There is a token to return.
+ goto SkipIgnoredUnits;
+ } else if (isHorizontalWhitespace(*CurPtr)) {
+ goto SkipHorizontalWhitespace;
+ }
+ // We only saw whitespace, so just try again with this lexer.
+ // (We manually eliminate the tail call to avoid recursion.)
+ goto LexNextToken;
+
+ // C99 6.4.4.1: Integer Constants.
+ // C99 6.4.4.2: Floating Constants.
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+ return LexNumericConstant(Result, CurPtr);
+
+ case 'u': // Identifier (uber) or C11/C++11 UTF-8 or UTF-16 string literal
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+
+ if (LangOpts.CPlusPlus11 || LangOpts.C11) {
+ Char = getCharAndSize(CurPtr, SizeTmp);
+
+ // UTF-16 string literal
+ if (Char == '"')
+ return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result),
+ tok::utf16_string_literal);
+
+ // UTF-16 character constant
+ if (Char == '\'')
+ return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result),
+ tok::utf16_char_constant);
+
+ // UTF-16 raw string literal
+ if (Char == 'R' && LangOpts.CPlusPlus11 &&
+ getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"')
+ return LexRawStringLiteral(Result,
+ ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result),
+ tok::utf16_string_literal);
+
+ if (Char == '8') {
+ char Char2 = getCharAndSize(CurPtr + SizeTmp, SizeTmp2);
+
+ // UTF-8 string literal
+ if (Char2 == '"')
+ return LexStringLiteral(Result,
+ ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result),
+ tok::utf8_string_literal);
+ if (Char2 == '\'' && LangOpts.CPlusPlus1z)
+ return LexCharConstant(
+ Result, ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result),
+ tok::utf8_char_constant);
+
+ if (Char2 == 'R' && LangOpts.CPlusPlus11) {
+ unsigned SizeTmp3;
+ char Char3 = getCharAndSize(CurPtr + SizeTmp + SizeTmp2, SizeTmp3);
+ // UTF-8 raw string literal
+ if (Char3 == '"') {
+ return LexRawStringLiteral(Result,
+ ConsumeChar(ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result),
+ SizeTmp3, Result),
+ tok::utf8_string_literal);
+ }
+ }
+ }
+ }
+
+ // treat u like the start of an identifier.
+ return LexIdentifier(Result, CurPtr);
+
+ case 'U': // Identifier (Uber) or C11/C++11 UTF-32 string literal
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+
+ if (LangOpts.CPlusPlus11 || LangOpts.C11) {
+ Char = getCharAndSize(CurPtr, SizeTmp);
+
+ // UTF-32 string literal
+ if (Char == '"')
+ return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result),
+ tok::utf32_string_literal);
+
+ // UTF-32 character constant
+ if (Char == '\'')
+ return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result),
+ tok::utf32_char_constant);
+
+ // UTF-32 raw string literal
+ if (Char == 'R' && LangOpts.CPlusPlus11 &&
+ getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"')
+ return LexRawStringLiteral(Result,
+ ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result),
+ tok::utf32_string_literal);
+ }
+
+ // treat U like the start of an identifier.
+ return LexIdentifier(Result, CurPtr);
+
+ case 'R': // Identifier or C++0x raw string literal
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+
+ if (LangOpts.CPlusPlus11) {
+ Char = getCharAndSize(CurPtr, SizeTmp);
+
+ if (Char == '"')
+ return LexRawStringLiteral(Result,
+ ConsumeChar(CurPtr, SizeTmp, Result),
+ tok::string_literal);
+ }
+
+ // treat R like the start of an identifier.
+ return LexIdentifier(Result, CurPtr);
+
+ case 'L': // Identifier (Loony) or wide literal (L'x' or L"xyz").
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+ Char = getCharAndSize(CurPtr, SizeTmp);
+
+ // Wide string literal.
+ if (Char == '"')
+ return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result),
+ tok::wide_string_literal);
+
+ // Wide raw string literal.
+ if (LangOpts.CPlusPlus11 && Char == 'R' &&
+ getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"')
+ return LexRawStringLiteral(Result,
+ ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result),
+ tok::wide_string_literal);
+
+ // Wide character constant.
+ if (Char == '\'')
+ return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result),
+ tok::wide_char_constant);
+ // FALL THROUGH, treating L like the start of an identifier.
+
+ // C99 6.4.2: Identifiers.
+ case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G':
+ case 'H': case 'I': case 'J': case 'K': /*'L'*/case 'M': case 'N':
+ case 'O': case 'P': case 'Q': /*'R'*/case 'S': case 'T': /*'U'*/
+ case 'V': case 'W': case 'X': case 'Y': case 'Z':
+ case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g':
+ case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n':
+ case 'o': case 'p': case 'q': case 'r': case 's': case 't': /*'u'*/
+ case 'v': case 'w': case 'x': case 'y': case 'z':
+ case '_':
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+ return LexIdentifier(Result, CurPtr);
+
+ case '$': // $ in identifiers.
+ if (LangOpts.DollarIdents) {
+ if (!isLexingRawMode())
+ Diag(CurPtr-1, diag::ext_dollar_in_identifier);
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+ return LexIdentifier(Result, CurPtr);
+ }
+
+ Kind = tok::unknown;
+ break;
+
+ // C99 6.4.4: Character Constants.
+ case '\'':
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+ return LexCharConstant(Result, CurPtr, tok::char_constant);
+
+ // C99 6.4.5: String Literals.
+ case '"':
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+ return LexStringLiteral(Result, CurPtr, tok::string_literal);
+
+ // C99 6.4.6: Punctuators.
+ case '?':
+ Kind = tok::question;
+ break;
+ case '[':
+ Kind = tok::l_square;
+ break;
+ case ']':
+ Kind = tok::r_square;
+ break;
+ case '(':
+ Kind = tok::l_paren;
+ break;
+ case ')':
+ Kind = tok::r_paren;
+ break;
+ case '{':
+ Kind = tok::l_brace;
+ break;
+ case '}':
+ Kind = tok::r_brace;
+ break;
+ case '.':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char >= '0' && Char <= '9') {
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+
+ return LexNumericConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result));
+ } else if (LangOpts.CPlusPlus && Char == '*') {
+ Kind = tok::periodstar;
+ CurPtr += SizeTmp;
+ } else if (Char == '.' &&
+ getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '.') {
+ Kind = tok::ellipsis;
+ CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result);
+ } else {
+ Kind = tok::period;
+ }
+ break;
+ case '&':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '&') {
+ Kind = tok::ampamp;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else if (Char == '=') {
+ Kind = tok::ampequal;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else {
+ Kind = tok::amp;
+ }
+ break;
+ case '*':
+ if (getCharAndSize(CurPtr, SizeTmp) == '=') {
+ Kind = tok::starequal;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else {
+ Kind = tok::star;
+ }
+ break;
+ case '+':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '+') {
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::plusplus;
+ } else if (Char == '=') {
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::plusequal;
+ } else {
+ Kind = tok::plus;
+ }
+ break;
+ case '-':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '-') { // --
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::minusminus;
+ } else if (Char == '>' && LangOpts.CPlusPlus &&
+ getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '*') { // C++ ->*
+ CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result);
+ Kind = tok::arrowstar;
+ } else if (Char == '>') { // ->
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::arrow;
+ } else if (Char == '=') { // -=
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::minusequal;
+ } else {
+ Kind = tok::minus;
+ }
+ break;
+ case '~':
+ Kind = tok::tilde;
+ break;
+ case '!':
+ if (getCharAndSize(CurPtr, SizeTmp) == '=') {
+ Kind = tok::exclaimequal;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else {
+ Kind = tok::exclaim;
+ }
+ break;
+ case '/':
+ // 6.4.9: Comments
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '/') { // Line comment.
+ // Even if Line comments are disabled (e.g. in C89 mode), we generally
+ // want to lex this as a comment. There is one problem with this though,
+ // that in one particular corner case, this can change the behavior of the
+ // resultant program. For example, In "foo //**/ bar", C89 would lex
+ // this as "foo / bar" and langauges with Line comments would lex it as
+ // "foo". Check to see if the character after the second slash is a '*'.
+ // If so, we will lex that as a "/" instead of the start of a comment.
+ // However, we never do this if we are just preprocessing.
+ bool TreatAsComment = LangOpts.LineComment &&
+ (LangOpts.CPlusPlus || !LangOpts.TraditionalCPP);
+ if (!TreatAsComment)
+ if (!(PP && PP->isPreprocessedOutput()))
+ TreatAsComment = getCharAndSize(CurPtr+SizeTmp, SizeTmp2) != '*';
+
+ if (TreatAsComment) {
+ if (SkipLineComment(Result, ConsumeChar(CurPtr, SizeTmp, Result),
+ TokAtPhysicalStartOfLine))
+ return true; // There is a token to return.
+
+ // It is common for the tokens immediately after a // comment to be
+ // whitespace (indentation for the next line). Instead of going through
+ // the big switch, handle it efficiently now.
+ goto SkipIgnoredUnits;
+ }
+ }
+
+ if (Char == '*') { // /**/ comment.
+ if (SkipBlockComment(Result, ConsumeChar(CurPtr, SizeTmp, Result),
+ TokAtPhysicalStartOfLine))
+ return true; // There is a token to return.
+
+ // We only saw whitespace, so just try again with this lexer.
+ // (We manually eliminate the tail call to avoid recursion.)
+ goto LexNextToken;
+ }
+
+ if (Char == '=') {
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::slashequal;
+ } else {
+ Kind = tok::slash;
+ }
+ break;
+ case '%':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '=') {
+ Kind = tok::percentequal;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else if (LangOpts.Digraphs && Char == '>') {
+ Kind = tok::r_brace; // '%>' -> '}'
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else if (LangOpts.Digraphs && Char == ':') {
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '%' && getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == ':') {
+ Kind = tok::hashhash; // '%:%:' -> '##'
+ CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result);
+ } else if (Char == '@' && LangOpts.MicrosoftExt) {// %:@ -> #@ -> Charize
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ if (!isLexingRawMode())
+ Diag(BufferPtr, diag::ext_charize_microsoft);
+ Kind = tok::hashat;
+ } else { // '%:' -> '#'
+ // We parsed a # character. If this occurs at the start of the line,
+ // it's actually the start of a preprocessing directive. Callback to
+ // the preprocessor to handle it.
+ // TODO: -fpreprocessed mode??
+ if (TokAtPhysicalStartOfLine && !LexingRawMode && !Is_PragmaLexer)
+ goto HandleDirective;
+
+ Kind = tok::hash;
+ }
+ } else {
+ Kind = tok::percent;
+ }
+ break;
+ case '<':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (ParsingFilename) {
+ return LexAngledStringLiteral(Result, CurPtr);
+ } else if (Char == '<') {
+ char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2);
+ if (After == '=') {
+ Kind = tok::lesslessequal;
+ CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result);
+ } else if (After == '<' && IsStartOfConflictMarker(CurPtr-1)) {
+ // If this is actually a '<<<<<<<' version control conflict marker,
+ // recognize it as such and recover nicely.
+ goto LexNextToken;
+ } else if (After == '<' && HandleEndOfConflictMarker(CurPtr-1)) {
+ // If this is '<<<<' and we're in a Perforce-style conflict marker,
+ // ignore it.
+ goto LexNextToken;
+ } else if (LangOpts.CUDA && After == '<') {
+ Kind = tok::lesslessless;
+ CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result);
+ } else {
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::lessless;
+ }
+ } else if (Char == '=') {
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::lessequal;
+ } else if (LangOpts.Digraphs && Char == ':') { // '<:' -> '['
+ if (LangOpts.CPlusPlus11 &&
+ getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == ':') {
+ // C++0x [lex.pptoken]p3:
+ // Otherwise, if the next three characters are <:: and the subsequent
+ // character is neither : nor >, the < is treated as a preprocessor
+ // token by itself and not as the first character of the alternative
+ // token <:.
+ unsigned SizeTmp3;
+ char After = getCharAndSize(CurPtr + SizeTmp + SizeTmp2, SizeTmp3);
+ if (After != ':' && After != '>') {
+ Kind = tok::less;
+ if (!isLexingRawMode())
+ Diag(BufferPtr, diag::warn_cxx98_compat_less_colon_colon);
+ break;
+ }
+ }
+
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::l_square;
+ } else if (LangOpts.Digraphs && Char == '%') { // '<%' -> '{'
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::l_brace;
+ } else {
+ Kind = tok::less;
+ }
+ break;
+ case '>':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '=') {
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::greaterequal;
+ } else if (Char == '>') {
+ char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2);
+ if (After == '=') {
+ CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result);
+ Kind = tok::greatergreaterequal;
+ } else if (After == '>' && IsStartOfConflictMarker(CurPtr-1)) {
+ // If this is actually a '>>>>' conflict marker, recognize it as such
+ // and recover nicely.
+ goto LexNextToken;
+ } else if (After == '>' && HandleEndOfConflictMarker(CurPtr-1)) {
+ // If this is '>>>>>>>' and we're in a conflict marker, ignore it.
+ goto LexNextToken;
+ } else if (LangOpts.CUDA && After == '>') {
+ Kind = tok::greatergreatergreater;
+ CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result);
+ } else {
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::greatergreater;
+ }
+
+ } else {
+ Kind = tok::greater;
+ }
+ break;
+ case '^':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '=') {
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::caretequal;
+ } else {
+ Kind = tok::caret;
+ }
+ break;
+ case '|':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '=') {
+ Kind = tok::pipeequal;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else if (Char == '|') {
+ // If this is '|||||||' and we're in a conflict marker, ignore it.
+ if (CurPtr[1] == '|' && HandleEndOfConflictMarker(CurPtr-1))
+ goto LexNextToken;
+ Kind = tok::pipepipe;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else {
+ Kind = tok::pipe;
+ }
+ break;
+ case ':':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (LangOpts.Digraphs && Char == '>') {
+ Kind = tok::r_square; // ':>' -> ']'
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else if (LangOpts.CPlusPlus && Char == ':') {
+ Kind = tok::coloncolon;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else {
+ Kind = tok::colon;
+ }
+ break;
+ case ';':
+ Kind = tok::semi;
+ break;
+ case '=':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '=') {
+ // If this is '====' and we're in a conflict marker, ignore it.
+ if (CurPtr[1] == '=' && HandleEndOfConflictMarker(CurPtr-1))
+ goto LexNextToken;
+
+ Kind = tok::equalequal;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else {
+ Kind = tok::equal;
+ }
+ break;
+ case ',':
+ Kind = tok::comma;
+ break;
+ case '#':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '#') {
+ Kind = tok::hashhash;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else if (Char == '@' && LangOpts.MicrosoftExt) { // #@ -> Charize
+ Kind = tok::hashat;
+ if (!isLexingRawMode())
+ Diag(BufferPtr, diag::ext_charize_microsoft);
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else {
+ // We parsed a # character. If this occurs at the start of the line,
+ // it's actually the start of a preprocessing directive. Callback to
+ // the preprocessor to handle it.
+ // TODO: -fpreprocessed mode??
+ if (TokAtPhysicalStartOfLine && !LexingRawMode && !Is_PragmaLexer)
+ goto HandleDirective;
+
+ Kind = tok::hash;
+ }
+ break;
+
+ case '@':
+ // Objective C support.
+ if (CurPtr[-1] == '@' && LangOpts.ObjC1)
+ Kind = tok::at;
+ else
+ Kind = tok::unknown;
+ break;
+
+ // UCNs (C99 6.4.3, C++11 [lex.charset]p2)
+ case '\\':
+ if (uint32_t CodePoint = tryReadUCN(CurPtr, BufferPtr, &Result)) {
+ if (CheckUnicodeWhitespace(Result, CodePoint, CurPtr)) {
+ if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
+ return true; // KeepWhitespaceMode
+
+ // We only saw whitespace, so just try again with this lexer.
+ // (We manually eliminate the tail call to avoid recursion.)
+ goto LexNextToken;
+ }
+
+ return LexUnicode(Result, CodePoint, CurPtr);
+ }
+
+ Kind = tok::unknown;
+ break;
+
+ default: {
+ if (isASCII(Char)) {
+ Kind = tok::unknown;
+ break;
+ }
+
+ UTF32 CodePoint;
+
+ // We can't just reset CurPtr to BufferPtr because BufferPtr may point to
+ // an escaped newline.
+ --CurPtr;
+ ConversionResult Status =
+ llvm::convertUTF8Sequence((const UTF8 **)&CurPtr,
+ (const UTF8 *)BufferEnd,
+ &CodePoint,
+ strictConversion);
+ if (Status == conversionOK) {
+ if (CheckUnicodeWhitespace(Result, CodePoint, CurPtr)) {
+ if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
+ return true; // KeepWhitespaceMode
+
+ // We only saw whitespace, so just try again with this lexer.
+ // (We manually eliminate the tail call to avoid recursion.)
+ goto LexNextToken;
+ }
+ return LexUnicode(Result, CodePoint, CurPtr);
+ }
+
+ if (isLexingRawMode() || ParsingPreprocessorDirective ||
+ PP->isPreprocessedOutput()) {
+ ++CurPtr;
+ Kind = tok::unknown;
+ break;
+ }
+
+ // Non-ASCII characters tend to creep into source code unintentionally.
+ // Instead of letting the parser complain about the unknown token,
+ // just diagnose the invalid UTF-8, then drop the character.
+ Diag(CurPtr, diag::err_invalid_utf8);
+
+ BufferPtr = CurPtr+1;
+ // We're pretending the character didn't exist, so just try again with
+ // this lexer.
+ // (We manually eliminate the tail call to avoid recursion.)
+ goto LexNextToken;
+ }
+ }
+
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+
+ // Update the location of token as well as BufferPtr.
+ FormTokenWithChars(Result, CurPtr, Kind);
+ return true;
+
+HandleDirective:
+ // We parsed a # character and it's the start of a preprocessing directive.
+
+ FormTokenWithChars(Result, CurPtr, tok::hash);
+ PP->HandleDirective(Result);
+
+ if (PP->hadModuleLoaderFatalFailure()) {
+ // With a fatal failure in the module loader, we abort parsing.
+ assert(Result.is(tok::eof) && "Preprocessor did not set tok:eof");
+ return true;
+ }
+
+ // We parsed the directive; lex a token with the new state.
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp b/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp
new file mode 100644
index 0000000..1e7858a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp
@@ -0,0 +1,1685 @@
+//===--- LiteralSupport.cpp - Code to parse and process literals ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the NumericLiteralParser, CharLiteralParser, and
+// StringLiteralParser interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/LiteralSupport.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/ConvertUTF.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace clang;
+
+static unsigned getCharWidth(tok::TokenKind kind, const TargetInfo &Target) {
+ switch (kind) {
+ default: llvm_unreachable("Unknown token type!");
+ case tok::char_constant:
+ case tok::string_literal:
+ case tok::utf8_char_constant:
+ case tok::utf8_string_literal:
+ return Target.getCharWidth();
+ case tok::wide_char_constant:
+ case tok::wide_string_literal:
+ return Target.getWCharWidth();
+ case tok::utf16_char_constant:
+ case tok::utf16_string_literal:
+ return Target.getChar16Width();
+ case tok::utf32_char_constant:
+ case tok::utf32_string_literal:
+ return Target.getChar32Width();
+ }
+}
+
+static CharSourceRange MakeCharSourceRange(const LangOptions &Features,
+ FullSourceLoc TokLoc,
+ const char *TokBegin,
+ const char *TokRangeBegin,
+ const char *TokRangeEnd) {
+ SourceLocation Begin =
+ Lexer::AdvanceToTokenCharacter(TokLoc, TokRangeBegin - TokBegin,
+ TokLoc.getManager(), Features);
+ SourceLocation End =
+ Lexer::AdvanceToTokenCharacter(Begin, TokRangeEnd - TokRangeBegin,
+ TokLoc.getManager(), Features);
+ return CharSourceRange::getCharRange(Begin, End);
+}
+
+/// \brief Produce a diagnostic highlighting some portion of a literal.
+///
+/// Emits the diagnostic \p DiagID, highlighting the range of characters from
+/// \p TokRangeBegin (inclusive) to \p TokRangeEnd (exclusive), which must be
+/// a substring of a spelling buffer for the token beginning at \p TokBegin.
+static DiagnosticBuilder Diag(DiagnosticsEngine *Diags,
+ const LangOptions &Features, FullSourceLoc TokLoc,
+ const char *TokBegin, const char *TokRangeBegin,
+ const char *TokRangeEnd, unsigned DiagID) {
+ SourceLocation Begin =
+ Lexer::AdvanceToTokenCharacter(TokLoc, TokRangeBegin - TokBegin,
+ TokLoc.getManager(), Features);
+ return Diags->Report(Begin, DiagID) <<
+ MakeCharSourceRange(Features, TokLoc, TokBegin, TokRangeBegin, TokRangeEnd);
+}
+
+/// ProcessCharEscape - Parse a standard C escape sequence, which can occur in
+/// either a character or a string literal.
+static unsigned ProcessCharEscape(const char *ThisTokBegin,
+ const char *&ThisTokBuf,
+ const char *ThisTokEnd, bool &HadError,
+ FullSourceLoc Loc, unsigned CharWidth,
+ DiagnosticsEngine *Diags,
+ const LangOptions &Features) {
+ const char *EscapeBegin = ThisTokBuf;
+
+ // Skip the '\' char.
+ ++ThisTokBuf;
+
+ // We know that this character can't be off the end of the buffer, because
+ // that would have been \", which would not have been the end of string.
+ unsigned ResultChar = *ThisTokBuf++;
+ switch (ResultChar) {
+ // These map to themselves.
+ case '\\': case '\'': case '"': case '?': break;
+
+ // These have fixed mappings.
+ case 'a':
+ // TODO: K&R: the meaning of '\\a' is different in traditional C
+ ResultChar = 7;
+ break;
+ case 'b':
+ ResultChar = 8;
+ break;
+ case 'e':
+ if (Diags)
+ Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
+ diag::ext_nonstandard_escape) << "e";
+ ResultChar = 27;
+ break;
+ case 'E':
+ if (Diags)
+ Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
+ diag::ext_nonstandard_escape) << "E";
+ ResultChar = 27;
+ break;
+ case 'f':
+ ResultChar = 12;
+ break;
+ case 'n':
+ ResultChar = 10;
+ break;
+ case 'r':
+ ResultChar = 13;
+ break;
+ case 't':
+ ResultChar = 9;
+ break;
+ case 'v':
+ ResultChar = 11;
+ break;
+ case 'x': { // Hex escape.
+ ResultChar = 0;
+ if (ThisTokBuf == ThisTokEnd || !isHexDigit(*ThisTokBuf)) {
+ if (Diags)
+ Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
+ diag::err_hex_escape_no_digits) << "x";
+ HadError = 1;
+ break;
+ }
+
+ // Hex escapes are a maximal series of hex digits.
+ bool Overflow = false;
+ for (; ThisTokBuf != ThisTokEnd; ++ThisTokBuf) {
+ int CharVal = llvm::hexDigitValue(ThisTokBuf[0]);
+ if (CharVal == -1) break;
+ // About to shift out a digit?
+ if (ResultChar & 0xF0000000)
+ Overflow = true;
+ ResultChar <<= 4;
+ ResultChar |= CharVal;
+ }
+
+ // See if any bits will be truncated when evaluated as a character.
+ if (CharWidth != 32 && (ResultChar >> CharWidth) != 0) {
+ Overflow = true;
+ ResultChar &= ~0U >> (32-CharWidth);
+ }
+
+ // Check for overflow.
+ if (Overflow && Diags) // Too many digits to fit in
+ Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
+ diag::err_escape_too_large) << 0;
+ break;
+ }
+ case '0': case '1': case '2': case '3':
+ case '4': case '5': case '6': case '7': {
+ // Octal escapes.
+ --ThisTokBuf;
+ ResultChar = 0;
+
+ // Octal escapes are a series of octal digits with maximum length 3.
+ // "\0123" is a two digit sequence equal to "\012" "3".
+ unsigned NumDigits = 0;
+ do {
+ ResultChar <<= 3;
+ ResultChar |= *ThisTokBuf++ - '0';
+ ++NumDigits;
+ } while (ThisTokBuf != ThisTokEnd && NumDigits < 3 &&
+ ThisTokBuf[0] >= '0' && ThisTokBuf[0] <= '7');
+
+ // Check for overflow. Reject '\777', but not L'\777'.
+ if (CharWidth != 32 && (ResultChar >> CharWidth) != 0) {
+ if (Diags)
+ Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
+ diag::err_escape_too_large) << 1;
+ ResultChar &= ~0U >> (32-CharWidth);
+ }
+ break;
+ }
+
+ // Otherwise, these are not valid escapes.
+ case '(': case '{': case '[': case '%':
+ // GCC accepts these as extensions. We warn about them as such though.
+ if (Diags)
+ Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
+ diag::ext_nonstandard_escape)
+ << std::string(1, ResultChar);
+ break;
+ default:
+ if (!Diags)
+ break;
+
+ if (isPrintable(ResultChar))
+ Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
+ diag::ext_unknown_escape)
+ << std::string(1, ResultChar);
+ else
+ Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
+ diag::ext_unknown_escape)
+ << "x" + llvm::utohexstr(ResultChar);
+ break;
+ }
+
+ return ResultChar;
+}
+
+static void appendCodePoint(unsigned Codepoint,
+ llvm::SmallVectorImpl<char> &Str) {
+ char ResultBuf[4];
+ char *ResultPtr = ResultBuf;
+ bool Res = llvm::ConvertCodePointToUTF8(Codepoint, ResultPtr);
+ (void)Res;
+ assert(Res && "Unexpected conversion failure");
+ Str.append(ResultBuf, ResultPtr);
+}
+
+void clang::expandUCNs(SmallVectorImpl<char> &Buf, StringRef Input) {
+ for (StringRef::iterator I = Input.begin(), E = Input.end(); I != E; ++I) {
+ if (*I != '\\') {
+ Buf.push_back(*I);
+ continue;
+ }
+
+ ++I;
+ assert(*I == 'u' || *I == 'U');
+
+ unsigned NumHexDigits;
+ if (*I == 'u')
+ NumHexDigits = 4;
+ else
+ NumHexDigits = 8;
+
+ assert(I + NumHexDigits <= E);
+
+ uint32_t CodePoint = 0;
+ for (++I; NumHexDigits != 0; ++I, --NumHexDigits) {
+ unsigned Value = llvm::hexDigitValue(*I);
+ assert(Value != -1U);
+
+ CodePoint <<= 4;
+ CodePoint += Value;
+ }
+
+ appendCodePoint(CodePoint, Buf);
+ --I;
+ }
+}
+
+/// ProcessUCNEscape - Read the Universal Character Name, check constraints and
+/// return the UTF32.
+static bool ProcessUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
+ const char *ThisTokEnd,
+ uint32_t &UcnVal, unsigned short &UcnLen,
+ FullSourceLoc Loc, DiagnosticsEngine *Diags,
+ const LangOptions &Features,
+ bool in_char_string_literal = false) {
+ const char *UcnBegin = ThisTokBuf;
+
+ // Skip the '\u' char's.
+ ThisTokBuf += 2;
+
+ if (ThisTokBuf == ThisTokEnd || !isHexDigit(*ThisTokBuf)) {
+ if (Diags)
+ Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
+ diag::err_hex_escape_no_digits) << StringRef(&ThisTokBuf[-1], 1);
+ return false;
+ }
+ UcnLen = (ThisTokBuf[-1] == 'u' ? 4 : 8);
+ unsigned short UcnLenSave = UcnLen;
+ for (; ThisTokBuf != ThisTokEnd && UcnLenSave; ++ThisTokBuf, UcnLenSave--) {
+ int CharVal = llvm::hexDigitValue(ThisTokBuf[0]);
+ if (CharVal == -1) break;
+ UcnVal <<= 4;
+ UcnVal |= CharVal;
+ }
+ // If we didn't consume the proper number of digits, there is a problem.
+ if (UcnLenSave) {
+ if (Diags)
+ Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
+ diag::err_ucn_escape_incomplete);
+ return false;
+ }
+
+ // Check UCN constraints (C99 6.4.3p2) [C++11 lex.charset p2]
+ if ((0xD800 <= UcnVal && UcnVal <= 0xDFFF) || // surrogate codepoints
+ UcnVal > 0x10FFFF) { // maximum legal UTF32 value
+ if (Diags)
+ Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
+ diag::err_ucn_escape_invalid);
+ return false;
+ }
+
+ // C++11 allows UCNs that refer to control characters and basic source
+ // characters inside character and string literals
+ if (UcnVal < 0xa0 &&
+ (UcnVal != 0x24 && UcnVal != 0x40 && UcnVal != 0x60)) { // $, @, `
+ bool IsError = (!Features.CPlusPlus11 || !in_char_string_literal);
+ if (Diags) {
+ char BasicSCSChar = UcnVal;
+ if (UcnVal >= 0x20 && UcnVal < 0x7f)
+ Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
+ IsError ? diag::err_ucn_escape_basic_scs :
+ diag::warn_cxx98_compat_literal_ucn_escape_basic_scs)
+ << StringRef(&BasicSCSChar, 1);
+ else
+ Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
+ IsError ? diag::err_ucn_control_character :
+ diag::warn_cxx98_compat_literal_ucn_control_character);
+ }
+ if (IsError)
+ return false;
+ }
+
+ if (!Features.CPlusPlus && !Features.C99 && Diags)
+ Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
+ diag::warn_ucn_not_valid_in_c89_literal);
+
+ return true;
+}
+
+/// MeasureUCNEscape - Determine the number of bytes within the resulting string
+/// which this UCN will occupy.
+static int MeasureUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
+ const char *ThisTokEnd, unsigned CharByteWidth,
+ const LangOptions &Features, bool &HadError) {
+ // UTF-32: 4 bytes per escape.
+ if (CharByteWidth == 4)
+ return 4;
+
+ uint32_t UcnVal = 0;
+ unsigned short UcnLen = 0;
+ FullSourceLoc Loc;
+
+ if (!ProcessUCNEscape(ThisTokBegin, ThisTokBuf, ThisTokEnd, UcnVal,
+ UcnLen, Loc, nullptr, Features, true)) {
+ HadError = true;
+ return 0;
+ }
+
+ // UTF-16: 2 bytes for BMP, 4 bytes otherwise.
+ if (CharByteWidth == 2)
+ return UcnVal <= 0xFFFF ? 2 : 4;
+
+ // UTF-8.
+ if (UcnVal < 0x80)
+ return 1;
+ if (UcnVal < 0x800)
+ return 2;
+ if (UcnVal < 0x10000)
+ return 3;
+ return 4;
+}
+
+/// EncodeUCNEscape - Read the Universal Character Name, check constraints and
+/// convert the UTF32 to UTF8 or UTF16. This is a subroutine of
+/// StringLiteralParser. When we decide to implement UCN's for identifiers,
+/// we will likely rework our support for UCN's.
+static void EncodeUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
+ const char *ThisTokEnd,
+ char *&ResultBuf, bool &HadError,
+ FullSourceLoc Loc, unsigned CharByteWidth,
+ DiagnosticsEngine *Diags,
+ const LangOptions &Features) {
+ typedef uint32_t UTF32;
+ UTF32 UcnVal = 0;
+ unsigned short UcnLen = 0;
+ if (!ProcessUCNEscape(ThisTokBegin, ThisTokBuf, ThisTokEnd, UcnVal, UcnLen,
+ Loc, Diags, Features, true)) {
+ HadError = true;
+ return;
+ }
+
+ assert((CharByteWidth == 1 || CharByteWidth == 2 || CharByteWidth == 4) &&
+ "only character widths of 1, 2, or 4 bytes supported");
+
+ (void)UcnLen;
+ assert((UcnLen== 4 || UcnLen== 8) && "only ucn length of 4 or 8 supported");
+
+ if (CharByteWidth == 4) {
+ // FIXME: Make the type of the result buffer correct instead of
+ // using reinterpret_cast.
+ UTF32 *ResultPtr = reinterpret_cast<UTF32*>(ResultBuf);
+ *ResultPtr = UcnVal;
+ ResultBuf += 4;
+ return;
+ }
+
+ if (CharByteWidth == 2) {
+ // FIXME: Make the type of the result buffer correct instead of
+ // using reinterpret_cast.
+ UTF16 *ResultPtr = reinterpret_cast<UTF16*>(ResultBuf);
+
+ if (UcnVal <= (UTF32)0xFFFF) {
+ *ResultPtr = UcnVal;
+ ResultBuf += 2;
+ return;
+ }
+
+ // Convert to UTF16.
+ UcnVal -= 0x10000;
+ *ResultPtr = 0xD800 + (UcnVal >> 10);
+ *(ResultPtr+1) = 0xDC00 + (UcnVal & 0x3FF);
+ ResultBuf += 4;
+ return;
+ }
+
+ assert(CharByteWidth == 1 && "UTF-8 encoding is only for 1 byte characters");
+
+ // Now that we've parsed/checked the UCN, we convert from UTF32->UTF8.
+ // The conversion below was inspired by:
+ // http://www.unicode.org/Public/PROGRAMS/CVTUTF/ConvertUTF.c
+ // First, we determine how many bytes the result will require.
+ typedef uint8_t UTF8;
+
+ unsigned short bytesToWrite = 0;
+ if (UcnVal < (UTF32)0x80)
+ bytesToWrite = 1;
+ else if (UcnVal < (UTF32)0x800)
+ bytesToWrite = 2;
+ else if (UcnVal < (UTF32)0x10000)
+ bytesToWrite = 3;
+ else
+ bytesToWrite = 4;
+
+ const unsigned byteMask = 0xBF;
+ const unsigned byteMark = 0x80;
+
+ // Once the bits are split out into bytes of UTF8, this is a mask OR-ed
+ // into the first byte, depending on how many bytes follow.
+ static const UTF8 firstByteMark[5] = {
+ 0x00, 0x00, 0xC0, 0xE0, 0xF0
+ };
+ // Finally, we write the bytes into ResultBuf.
+ ResultBuf += bytesToWrite;
+ switch (bytesToWrite) { // note: everything falls through.
+ case 4: *--ResultBuf = (UTF8)((UcnVal | byteMark) & byteMask); UcnVal >>= 6;
+ case 3: *--ResultBuf = (UTF8)((UcnVal | byteMark) & byteMask); UcnVal >>= 6;
+ case 2: *--ResultBuf = (UTF8)((UcnVal | byteMark) & byteMask); UcnVal >>= 6;
+ case 1: *--ResultBuf = (UTF8) (UcnVal | firstByteMark[bytesToWrite]);
+ }
+ // Update the buffer.
+ ResultBuf += bytesToWrite;
+}
+
+
+/// integer-constant: [C99 6.4.4.1]
+/// decimal-constant integer-suffix
+/// octal-constant integer-suffix
+/// hexadecimal-constant integer-suffix
+/// binary-literal integer-suffix [GNU, C++1y]
+/// user-defined-integer-literal: [C++11 lex.ext]
+/// decimal-literal ud-suffix
+/// octal-literal ud-suffix
+/// hexadecimal-literal ud-suffix
+/// binary-literal ud-suffix [GNU, C++1y]
+/// decimal-constant:
+/// nonzero-digit
+/// decimal-constant digit
+/// octal-constant:
+/// 0
+/// octal-constant octal-digit
+/// hexadecimal-constant:
+/// hexadecimal-prefix hexadecimal-digit
+/// hexadecimal-constant hexadecimal-digit
+/// hexadecimal-prefix: one of
+/// 0x 0X
+/// binary-literal:
+/// 0b binary-digit
+/// 0B binary-digit
+/// binary-literal binary-digit
+/// integer-suffix:
+/// unsigned-suffix [long-suffix]
+/// unsigned-suffix [long-long-suffix]
+/// long-suffix [unsigned-suffix]
+/// long-long-suffix [unsigned-sufix]
+/// nonzero-digit:
+/// 1 2 3 4 5 6 7 8 9
+/// octal-digit:
+/// 0 1 2 3 4 5 6 7
+/// hexadecimal-digit:
+/// 0 1 2 3 4 5 6 7 8 9
+/// a b c d e f
+/// A B C D E F
+/// binary-digit:
+/// 0
+/// 1
+/// unsigned-suffix: one of
+/// u U
+/// long-suffix: one of
+/// l L
+/// long-long-suffix: one of
+/// ll LL
+///
+/// floating-constant: [C99 6.4.4.2]
+/// TODO: add rules...
+///
+NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
+ SourceLocation TokLoc,
+ Preprocessor &PP)
+ : PP(PP), ThisTokBegin(TokSpelling.begin()), ThisTokEnd(TokSpelling.end()) {
+
+ // This routine assumes that the range begin/end matches the regex for integer
+ // and FP constants (specifically, the 'pp-number' regex), and assumes that
+ // the byte at "*end" is both valid and not part of the regex. Because of
+ // this, it doesn't have to check for 'overscan' in various places.
+ assert(!isPreprocessingNumberBody(*ThisTokEnd) && "didn't maximally munch?");
+
+ s = DigitsBegin = ThisTokBegin;
+ saw_exponent = false;
+ saw_period = false;
+ saw_ud_suffix = false;
+ isLong = false;
+ isUnsigned = false;
+ isLongLong = false;
+ isFloat = false;
+ isImaginary = false;
+ MicrosoftInteger = 0;
+ hadError = false;
+
+ if (*s == '0') { // parse radix
+ ParseNumberStartingWithZero(TokLoc);
+ if (hadError)
+ return;
+ } else { // the first digit is non-zero
+ radix = 10;
+ s = SkipDigits(s);
+ if (s == ThisTokEnd) {
+ // Done.
+ } else if (isHexDigit(*s) && !(*s == 'e' || *s == 'E')) {
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s - ThisTokBegin),
+ diag::err_invalid_digit) << StringRef(s, 1) << 0;
+ hadError = true;
+ return;
+ } else if (*s == '.') {
+ checkSeparator(TokLoc, s, CSK_AfterDigits);
+ s++;
+ saw_period = true;
+ checkSeparator(TokLoc, s, CSK_BeforeDigits);
+ s = SkipDigits(s);
+ }
+ if ((*s == 'e' || *s == 'E')) { // exponent
+ checkSeparator(TokLoc, s, CSK_AfterDigits);
+ const char *Exponent = s;
+ s++;
+ saw_exponent = true;
+ if (*s == '+' || *s == '-') s++; // sign
+ checkSeparator(TokLoc, s, CSK_BeforeDigits);
+ const char *first_non_digit = SkipDigits(s);
+ if (first_non_digit != s) {
+ s = first_non_digit;
+ } else {
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, Exponent - ThisTokBegin),
+ diag::err_exponent_has_no_digits);
+ hadError = true;
+ return;
+ }
+ }
+ }
+
+ SuffixBegin = s;
+ checkSeparator(TokLoc, s, CSK_AfterDigits);
+
+ // Parse the suffix. At this point we can classify whether we have an FP or
+ // integer constant.
+ bool isFPConstant = isFloatingLiteral();
+ const char *ImaginarySuffixLoc = nullptr;
+
+ // Loop over all of the characters of the suffix. If we see something bad,
+ // we break out of the loop.
+ for (; s != ThisTokEnd; ++s) {
+ switch (*s) {
+ case 'f': // FP Suffix for "float"
+ case 'F':
+ if (!isFPConstant) break; // Error for integer constant.
+ if (isFloat || isLong) break; // FF, LF invalid.
+ isFloat = true;
+ continue; // Success.
+ case 'u':
+ case 'U':
+ if (isFPConstant) break; // Error for floating constant.
+ if (isUnsigned) break; // Cannot be repeated.
+ isUnsigned = true;
+ continue; // Success.
+ case 'l':
+ case 'L':
+ if (isLong || isLongLong) break; // Cannot be repeated.
+ if (isFloat) break; // LF invalid.
+
+ // Check for long long. The L's need to be adjacent and the same case.
+ if (s[1] == s[0]) {
+ assert(s + 1 < ThisTokEnd && "didn't maximally munch?");
+ if (isFPConstant) break; // long long invalid for floats.
+ isLongLong = true;
+ ++s; // Eat both of them.
+ } else {
+ isLong = true;
+ }
+ continue; // Success.
+ case 'i':
+ case 'I':
+ if (PP.getLangOpts().MicrosoftExt) {
+ if (isLong || isLongLong || MicrosoftInteger)
+ break;
+
+ if (!isFPConstant) {
+ // Allow i8, i16, i32, and i64.
+ switch (s[1]) {
+ case '8':
+ s += 2; // i8 suffix
+ MicrosoftInteger = 8;
+ break;
+ case '1':
+ if (s[2] == '6') {
+ s += 3; // i16 suffix
+ MicrosoftInteger = 16;
+ }
+ break;
+ case '3':
+ if (s[2] == '2') {
+ s += 3; // i32 suffix
+ MicrosoftInteger = 32;
+ }
+ break;
+ case '6':
+ if (s[2] == '4') {
+ s += 3; // i64 suffix
+ MicrosoftInteger = 64;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ if (MicrosoftInteger) {
+ assert(s <= ThisTokEnd && "didn't maximally munch?");
+ break;
+ }
+ }
+ // "i", "if", and "il" are user-defined suffixes in C++1y.
+ if (*s == 'i' && PP.getLangOpts().CPlusPlus14)
+ break;
+ // fall through.
+ case 'j':
+ case 'J':
+ if (isImaginary) break; // Cannot be repeated.
+ isImaginary = true;
+ ImaginarySuffixLoc = s;
+ continue; // Success.
+ }
+ // If we reached here, there was an error or a ud-suffix.
+ break;
+ }
+
+ if (s != ThisTokEnd) {
+ // FIXME: Don't bother expanding UCNs if !tok.hasUCN().
+ expandUCNs(UDSuffixBuf, StringRef(SuffixBegin, ThisTokEnd - SuffixBegin));
+ if (isValidUDSuffix(PP.getLangOpts(), UDSuffixBuf)) {
+ // Any suffix pieces we might have parsed are actually part of the
+ // ud-suffix.
+ isLong = false;
+ isUnsigned = false;
+ isLongLong = false;
+ isFloat = false;
+ isImaginary = false;
+ MicrosoftInteger = 0;
+
+ saw_ud_suffix = true;
+ return;
+ }
+
+ // Report an error if there are any.
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, SuffixBegin - ThisTokBegin),
+ diag::err_invalid_suffix_constant)
+ << StringRef(SuffixBegin, ThisTokEnd-SuffixBegin) << isFPConstant;
+ hadError = true;
+ return;
+ }
+
+ if (isImaginary) {
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc,
+ ImaginarySuffixLoc - ThisTokBegin),
+ diag::ext_imaginary_constant);
+ }
+}
+
+/// Determine whether a suffix is a valid ud-suffix. We avoid treating reserved
+/// suffixes as ud-suffixes, because the diagnostic experience is better if we
+/// treat it as an invalid suffix.
+bool NumericLiteralParser::isValidUDSuffix(const LangOptions &LangOpts,
+ StringRef Suffix) {
+ if (!LangOpts.CPlusPlus11 || Suffix.empty())
+ return false;
+
+ // By C++11 [lex.ext]p10, ud-suffixes starting with an '_' are always valid.
+ if (Suffix[0] == '_')
+ return true;
+
+ // In C++11, there are no library suffixes.
+ if (!LangOpts.CPlusPlus14)
+ return false;
+
+ // In C++1y, "s", "h", "min", "ms", "us", and "ns" are used in the library.
+ // Per tweaked N3660, "il", "i", and "if" are also used in the library.
+ return llvm::StringSwitch<bool>(Suffix)
+ .Cases("h", "min", "s", true)
+ .Cases("ms", "us", "ns", true)
+ .Cases("il", "i", "if", true)
+ .Default(false);
+}
+
+void NumericLiteralParser::checkSeparator(SourceLocation TokLoc,
+ const char *Pos,
+ CheckSeparatorKind IsAfterDigits) {
+ if (IsAfterDigits == CSK_AfterDigits) {
+ if (Pos == ThisTokBegin)
+ return;
+ --Pos;
+ } else if (Pos == ThisTokEnd)
+ return;
+
+ if (isDigitSeparator(*Pos))
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, Pos - ThisTokBegin),
+ diag::err_digit_separator_not_between_digits)
+ << IsAfterDigits;
+}
+
+/// ParseNumberStartingWithZero - This method is called when the first character
+/// of the number is found to be a zero. This means it is either an octal
+/// number (like '04') or a hex number ('0x123a') a binary number ('0b1010') or
+/// a floating point number (01239.123e4). Eat the prefix, determining the
+/// radix etc.
+void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
+ assert(s[0] == '0' && "Invalid method call");
+ s++;
+
+ int c1 = s[0];
+
+ // Handle a hex number like 0x1234.
+ if ((c1 == 'x' || c1 == 'X') && (isHexDigit(s[1]) || s[1] == '.')) {
+ s++;
+ assert(s < ThisTokEnd && "didn't maximally munch?");
+ radix = 16;
+ DigitsBegin = s;
+ s = SkipHexDigits(s);
+ bool noSignificand = (s == DigitsBegin);
+ if (s == ThisTokEnd) {
+ // Done.
+ } else if (*s == '.') {
+ s++;
+ saw_period = true;
+ const char *floatDigitsBegin = s;
+ checkSeparator(TokLoc, s, CSK_BeforeDigits);
+ s = SkipHexDigits(s);
+ noSignificand &= (floatDigitsBegin == s);
+ }
+
+ if (noSignificand) {
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s - ThisTokBegin),
+ diag::err_hexconstant_requires) << 1;
+ hadError = true;
+ return;
+ }
+
+ // A binary exponent can appear with or with a '.'. If dotted, the
+ // binary exponent is required.
+ if (*s == 'p' || *s == 'P') {
+ checkSeparator(TokLoc, s, CSK_AfterDigits);
+ const char *Exponent = s;
+ s++;
+ saw_exponent = true;
+ if (*s == '+' || *s == '-') s++; // sign
+ const char *first_non_digit = SkipDigits(s);
+ if (first_non_digit == s) {
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, Exponent-ThisTokBegin),
+ diag::err_exponent_has_no_digits);
+ hadError = true;
+ return;
+ }
+ checkSeparator(TokLoc, s, CSK_BeforeDigits);
+ s = first_non_digit;
+
+ if (!PP.getLangOpts().HexFloats)
+ PP.Diag(TokLoc, diag::ext_hexconstant_invalid);
+ } else if (saw_period) {
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-ThisTokBegin),
+ diag::err_hexconstant_requires) << 0;
+ hadError = true;
+ }
+ return;
+ }
+
+ // Handle simple binary numbers 0b01010
+ if ((c1 == 'b' || c1 == 'B') && (s[1] == '0' || s[1] == '1')) {
+ // 0b101010 is a C++1y / GCC extension.
+ PP.Diag(TokLoc,
+ PP.getLangOpts().CPlusPlus14
+ ? diag::warn_cxx11_compat_binary_literal
+ : PP.getLangOpts().CPlusPlus
+ ? diag::ext_binary_literal_cxx14
+ : diag::ext_binary_literal);
+ ++s;
+ assert(s < ThisTokEnd && "didn't maximally munch?");
+ radix = 2;
+ DigitsBegin = s;
+ s = SkipBinaryDigits(s);
+ if (s == ThisTokEnd) {
+ // Done.
+ } else if (isHexDigit(*s)) {
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-ThisTokBegin),
+ diag::err_invalid_digit) << StringRef(s, 1) << 2;
+ hadError = true;
+ }
+ // Other suffixes will be diagnosed by the caller.
+ return;
+ }
+
+ // For now, the radix is set to 8. If we discover that we have a
+ // floating point constant, the radix will change to 10. Octal floating
+ // point constants are not permitted (only decimal and hexadecimal).
+ radix = 8;
+ DigitsBegin = s;
+ s = SkipOctalDigits(s);
+ if (s == ThisTokEnd)
+ return; // Done, simple octal number like 01234
+
+ // If we have some other non-octal digit that *is* a decimal digit, see if
+ // this is part of a floating point number like 094.123 or 09e1.
+ if (isDigit(*s)) {
+ const char *EndDecimal = SkipDigits(s);
+ if (EndDecimal[0] == '.' || EndDecimal[0] == 'e' || EndDecimal[0] == 'E') {
+ s = EndDecimal;
+ radix = 10;
+ }
+ }
+
+ // If we have a hex digit other than 'e' (which denotes a FP exponent) then
+ // the code is using an incorrect base.
+ if (isHexDigit(*s) && *s != 'e' && *s != 'E') {
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-ThisTokBegin),
+ diag::err_invalid_digit) << StringRef(s, 1) << 1;
+ hadError = true;
+ return;
+ }
+
+ if (*s == '.') {
+ s++;
+ radix = 10;
+ saw_period = true;
+ checkSeparator(TokLoc, s, CSK_BeforeDigits);
+ s = SkipDigits(s); // Skip suffix.
+ }
+ if (*s == 'e' || *s == 'E') { // exponent
+ checkSeparator(TokLoc, s, CSK_AfterDigits);
+ const char *Exponent = s;
+ s++;
+ radix = 10;
+ saw_exponent = true;
+ if (*s == '+' || *s == '-') s++; // sign
+ const char *first_non_digit = SkipDigits(s);
+ if (first_non_digit != s) {
+ checkSeparator(TokLoc, s, CSK_BeforeDigits);
+ s = first_non_digit;
+ } else {
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, Exponent-ThisTokBegin),
+ diag::err_exponent_has_no_digits);
+ hadError = true;
+ return;
+ }
+ }
+}
+
+static bool alwaysFitsInto64Bits(unsigned Radix, unsigned NumDigits) {
+ switch (Radix) {
+ case 2:
+ return NumDigits <= 64;
+ case 8:
+ return NumDigits <= 64 / 3; // Digits are groups of 3 bits.
+ case 10:
+ return NumDigits <= 19; // floor(log10(2^64))
+ case 16:
+ return NumDigits <= 64 / 4; // Digits are groups of 4 bits.
+ default:
+ llvm_unreachable("impossible Radix");
+ }
+}
+
+/// GetIntegerValue - Convert this numeric literal value to an APInt that
+/// matches Val's input width. If there is an overflow, set Val to the low bits
+/// of the result and return true. Otherwise, return false.
+bool NumericLiteralParser::GetIntegerValue(llvm::APInt &Val) {
+ // Fast path: Compute a conservative bound on the maximum number of
+ // bits per digit in this radix. If we can't possibly overflow a
+ // uint64 based on that bound then do the simple conversion to
+ // integer. This avoids the expensive overflow checking below, and
+ // handles the common cases that matter (small decimal integers and
+ // hex/octal values which don't overflow).
+ const unsigned NumDigits = SuffixBegin - DigitsBegin;
+ if (alwaysFitsInto64Bits(radix, NumDigits)) {
+ uint64_t N = 0;
+ for (const char *Ptr = DigitsBegin; Ptr != SuffixBegin; ++Ptr)
+ if (!isDigitSeparator(*Ptr))
+ N = N * radix + llvm::hexDigitValue(*Ptr);
+
+ // This will truncate the value to Val's input width. Simply check
+ // for overflow by comparing.
+ Val = N;
+ return Val.getZExtValue() != N;
+ }
+
+ Val = 0;
+ const char *Ptr = DigitsBegin;
+
+ llvm::APInt RadixVal(Val.getBitWidth(), radix);
+ llvm::APInt CharVal(Val.getBitWidth(), 0);
+ llvm::APInt OldVal = Val;
+
+ bool OverflowOccurred = false;
+ while (Ptr < SuffixBegin) {
+ if (isDigitSeparator(*Ptr)) {
+ ++Ptr;
+ continue;
+ }
+
+ unsigned C = llvm::hexDigitValue(*Ptr++);
+
+ // If this letter is out of bound for this radix, reject it.
+ assert(C < radix && "NumericLiteralParser ctor should have rejected this");
+
+ CharVal = C;
+
+ // Add the digit to the value in the appropriate radix. If adding in digits
+ // made the value smaller, then this overflowed.
+ OldVal = Val;
+
+ // Multiply by radix, did overflow occur on the multiply?
+ Val *= RadixVal;
+ OverflowOccurred |= Val.udiv(RadixVal) != OldVal;
+
+ // Add value, did overflow occur on the value?
+ // (a + b) ult b <=> overflow
+ Val += CharVal;
+ OverflowOccurred |= Val.ult(CharVal);
+ }
+ return OverflowOccurred;
+}
+
+llvm::APFloat::opStatus
+NumericLiteralParser::GetFloatValue(llvm::APFloat &Result) {
+ using llvm::APFloat;
+
+ unsigned n = std::min(SuffixBegin - ThisTokBegin, ThisTokEnd - ThisTokBegin);
+
+ llvm::SmallString<16> Buffer;
+ StringRef Str(ThisTokBegin, n);
+ if (Str.find('\'') != StringRef::npos) {
+ Buffer.reserve(n);
+ std::remove_copy_if(Str.begin(), Str.end(), std::back_inserter(Buffer),
+ &isDigitSeparator);
+ Str = Buffer;
+ }
+
+ return Result.convertFromString(Str, APFloat::rmNearestTiesToEven);
+}
+
+
+/// \verbatim
+/// user-defined-character-literal: [C++11 lex.ext]
+/// character-literal ud-suffix
+/// ud-suffix:
+/// identifier
+/// character-literal: [C++11 lex.ccon]
+/// ' c-char-sequence '
+/// u' c-char-sequence '
+/// U' c-char-sequence '
+/// L' c-char-sequence '
+/// c-char-sequence:
+/// c-char
+/// c-char-sequence c-char
+/// c-char:
+/// any member of the source character set except the single-quote ',
+/// backslash \, or new-line character
+/// escape-sequence
+/// universal-character-name
+/// escape-sequence:
+/// simple-escape-sequence
+/// octal-escape-sequence
+/// hexadecimal-escape-sequence
+/// simple-escape-sequence:
+/// one of \' \" \? \\ \a \b \f \n \r \t \v
+/// octal-escape-sequence:
+/// \ octal-digit
+/// \ octal-digit octal-digit
+/// \ octal-digit octal-digit octal-digit
+/// hexadecimal-escape-sequence:
+/// \x hexadecimal-digit
+/// hexadecimal-escape-sequence hexadecimal-digit
+/// universal-character-name: [C++11 lex.charset]
+/// \u hex-quad
+/// \U hex-quad hex-quad
+/// hex-quad:
+/// hex-digit hex-digit hex-digit hex-digit
+/// \endverbatim
+///
+CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
+ SourceLocation Loc, Preprocessor &PP,
+ tok::TokenKind kind) {
+ // At this point we know that the character matches the regex "(L|u|U)?'.*'".
+ HadError = false;
+
+ Kind = kind;
+
+ const char *TokBegin = begin;
+
+ // Skip over wide character determinant.
+ if (Kind != tok::char_constant)
+ ++begin;
+ if (Kind == tok::utf8_char_constant)
+ ++begin;
+
+ // Skip over the entry quote.
+ assert(begin[0] == '\'' && "Invalid token lexed");
+ ++begin;
+
+ // Remove an optional ud-suffix.
+ if (end[-1] != '\'') {
+ const char *UDSuffixEnd = end;
+ do {
+ --end;
+ } while (end[-1] != '\'');
+ // FIXME: Don't bother with this if !tok.hasUCN().
+ expandUCNs(UDSuffixBuf, StringRef(end, UDSuffixEnd - end));
+ UDSuffixOffset = end - TokBegin;
+ }
+
+ // Trim the ending quote.
+ assert(end != begin && "Invalid token lexed");
+ --end;
+
+ // FIXME: The "Value" is an uint64_t so we can handle char literals of
+ // up to 64-bits.
+ // FIXME: This extensively assumes that 'char' is 8-bits.
+ assert(PP.getTargetInfo().getCharWidth() == 8 &&
+ "Assumes char is 8 bits");
+ assert(PP.getTargetInfo().getIntWidth() <= 64 &&
+ (PP.getTargetInfo().getIntWidth() & 7) == 0 &&
+ "Assumes sizeof(int) on target is <= 64 and a multiple of char");
+ assert(PP.getTargetInfo().getWCharWidth() <= 64 &&
+ "Assumes sizeof(wchar) on target is <= 64");
+
+ SmallVector<uint32_t, 4> codepoint_buffer;
+ codepoint_buffer.resize(end - begin);
+ uint32_t *buffer_begin = &codepoint_buffer.front();
+ uint32_t *buffer_end = buffer_begin + codepoint_buffer.size();
+
+ // Unicode escapes representing characters that cannot be correctly
+ // represented in a single code unit are disallowed in character literals
+ // by this implementation.
+ uint32_t largest_character_for_kind;
+ if (tok::wide_char_constant == Kind) {
+ largest_character_for_kind =
+ 0xFFFFFFFFu >> (32-PP.getTargetInfo().getWCharWidth());
+ } else if (tok::utf8_char_constant == Kind) {
+ largest_character_for_kind = 0x7F;
+ } else if (tok::utf16_char_constant == Kind) {
+ largest_character_for_kind = 0xFFFF;
+ } else if (tok::utf32_char_constant == Kind) {
+ largest_character_for_kind = 0x10FFFF;
+ } else {
+ largest_character_for_kind = 0x7Fu;
+ }
+
+ while (begin != end) {
+ // Is this a span of non-escape characters?
+ if (begin[0] != '\\') {
+ char const *start = begin;
+ do {
+ ++begin;
+ } while (begin != end && *begin != '\\');
+
+ char const *tmp_in_start = start;
+ uint32_t *tmp_out_start = buffer_begin;
+ ConversionResult res =
+ ConvertUTF8toUTF32(reinterpret_cast<UTF8 const **>(&start),
+ reinterpret_cast<UTF8 const *>(begin),
+ &buffer_begin, buffer_end, strictConversion);
+ if (res != conversionOK) {
+ // If we see bad encoding for unprefixed character literals, warn and
+ // simply copy the byte values, for compatibility with gcc and
+ // older versions of clang.
+ bool NoErrorOnBadEncoding = isAscii();
+ unsigned Msg = diag::err_bad_character_encoding;
+ if (NoErrorOnBadEncoding)
+ Msg = diag::warn_bad_character_encoding;
+ PP.Diag(Loc, Msg);
+ if (NoErrorOnBadEncoding) {
+ start = tmp_in_start;
+ buffer_begin = tmp_out_start;
+ for (; start != begin; ++start, ++buffer_begin)
+ *buffer_begin = static_cast<uint8_t>(*start);
+ } else {
+ HadError = true;
+ }
+ } else {
+ for (; tmp_out_start < buffer_begin; ++tmp_out_start) {
+ if (*tmp_out_start > largest_character_for_kind) {
+ HadError = true;
+ PP.Diag(Loc, diag::err_character_too_large);
+ }
+ }
+ }
+
+ continue;
+ }
+ // Is this a Universal Character Name escape?
+ if (begin[1] == 'u' || begin[1] == 'U') {
+ unsigned short UcnLen = 0;
+ if (!ProcessUCNEscape(TokBegin, begin, end, *buffer_begin, UcnLen,
+ FullSourceLoc(Loc, PP.getSourceManager()),
+ &PP.getDiagnostics(), PP.getLangOpts(), true)) {
+ HadError = true;
+ } else if (*buffer_begin > largest_character_for_kind) {
+ HadError = true;
+ PP.Diag(Loc, diag::err_character_too_large);
+ }
+
+ ++buffer_begin;
+ continue;
+ }
+ unsigned CharWidth = getCharWidth(Kind, PP.getTargetInfo());
+ uint64_t result =
+ ProcessCharEscape(TokBegin, begin, end, HadError,
+ FullSourceLoc(Loc,PP.getSourceManager()),
+ CharWidth, &PP.getDiagnostics(), PP.getLangOpts());
+ *buffer_begin++ = result;
+ }
+
+ unsigned NumCharsSoFar = buffer_begin - &codepoint_buffer.front();
+
+ if (NumCharsSoFar > 1) {
+ if (isWide())
+ PP.Diag(Loc, diag::warn_extraneous_char_constant);
+ else if (isAscii() && NumCharsSoFar == 4)
+ PP.Diag(Loc, diag::ext_four_char_character_literal);
+ else if (isAscii())
+ PP.Diag(Loc, diag::ext_multichar_character_literal);
+ else
+ PP.Diag(Loc, diag::err_multichar_utf_character_literal);
+ IsMultiChar = true;
+ } else {
+ IsMultiChar = false;
+ }
+
+ llvm::APInt LitVal(PP.getTargetInfo().getIntWidth(), 0);
+
+ // Narrow character literals act as though their value is concatenated
+ // in this implementation, but warn on overflow.
+ bool multi_char_too_long = false;
+ if (isAscii() && isMultiChar()) {
+ LitVal = 0;
+ for (size_t i = 0; i < NumCharsSoFar; ++i) {
+ // check for enough leading zeros to shift into
+ multi_char_too_long |= (LitVal.countLeadingZeros() < 8);
+ LitVal <<= 8;
+ LitVal = LitVal + (codepoint_buffer[i] & 0xFF);
+ }
+ } else if (NumCharsSoFar > 0) {
+ // otherwise just take the last character
+ LitVal = buffer_begin[-1];
+ }
+
+ if (!HadError && multi_char_too_long) {
+ PP.Diag(Loc, diag::warn_char_constant_too_large);
+ }
+
+ // Transfer the value from APInt to uint64_t
+ Value = LitVal.getZExtValue();
+
+ // If this is a single narrow character, sign extend it (e.g. '\xFF' is "-1")
+ // if 'char' is signed for this target (C99 6.4.4.4p10). Note that multiple
+ // character constants are not sign extended in the this implementation:
+ // '\xFF\xFF' = 65536 and '\x0\xFF' = 255, which matches GCC.
+ if (isAscii() && NumCharsSoFar == 1 && (Value & 128) &&
+ PP.getLangOpts().CharIsSigned)
+ Value = (signed char)Value;
+}
+
+/// \verbatim
+/// string-literal: [C++0x lex.string]
+/// encoding-prefix " [s-char-sequence] "
+/// encoding-prefix R raw-string
+/// encoding-prefix:
+/// u8
+/// u
+/// U
+/// L
+/// s-char-sequence:
+/// s-char
+/// s-char-sequence s-char
+/// s-char:
+/// any member of the source character set except the double-quote ",
+/// backslash \, or new-line character
+/// escape-sequence
+/// universal-character-name
+/// raw-string:
+/// " d-char-sequence ( r-char-sequence ) d-char-sequence "
+/// r-char-sequence:
+/// r-char
+/// r-char-sequence r-char
+/// r-char:
+/// any member of the source character set, except a right parenthesis )
+/// followed by the initial d-char-sequence (which may be empty)
+/// followed by a double quote ".
+/// d-char-sequence:
+/// d-char
+/// d-char-sequence d-char
+/// d-char:
+/// any member of the basic source character set except:
+/// space, the left parenthesis (, the right parenthesis ),
+/// the backslash \, and the control characters representing horizontal
+/// tab, vertical tab, form feed, and newline.
+/// escape-sequence: [C++0x lex.ccon]
+/// simple-escape-sequence
+/// octal-escape-sequence
+/// hexadecimal-escape-sequence
+/// simple-escape-sequence:
+/// one of \' \" \? \\ \a \b \f \n \r \t \v
+/// octal-escape-sequence:
+/// \ octal-digit
+/// \ octal-digit octal-digit
+/// \ octal-digit octal-digit octal-digit
+/// hexadecimal-escape-sequence:
+/// \x hexadecimal-digit
+/// hexadecimal-escape-sequence hexadecimal-digit
+/// universal-character-name:
+/// \u hex-quad
+/// \U hex-quad hex-quad
+/// hex-quad:
+/// hex-digit hex-digit hex-digit hex-digit
+/// \endverbatim
+///
+StringLiteralParser::
+StringLiteralParser(ArrayRef<Token> StringToks,
+ Preprocessor &PP, bool Complain)
+ : SM(PP.getSourceManager()), Features(PP.getLangOpts()),
+ Target(PP.getTargetInfo()), Diags(Complain ? &PP.getDiagnostics() :nullptr),
+ MaxTokenLength(0), SizeBound(0), CharByteWidth(0), Kind(tok::unknown),
+ ResultPtr(ResultBuf.data()), hadError(false), Pascal(false) {
+ init(StringToks);
+}
+
+void StringLiteralParser::init(ArrayRef<Token> StringToks){
+ // The literal token may have come from an invalid source location (e.g. due
+ // to a PCH error), in which case the token length will be 0.
+ if (StringToks.empty() || StringToks[0].getLength() < 2)
+ return DiagnoseLexingError(SourceLocation());
+
+ // Scan all of the string portions, remember the max individual token length,
+ // computing a bound on the concatenated string length, and see whether any
+ // piece is a wide-string. If any of the string portions is a wide-string
+ // literal, the result is a wide-string literal [C99 6.4.5p4].
+ assert(!StringToks.empty() && "expected at least one token");
+ MaxTokenLength = StringToks[0].getLength();
+ assert(StringToks[0].getLength() >= 2 && "literal token is invalid!");
+ SizeBound = StringToks[0].getLength()-2; // -2 for "".
+ Kind = StringToks[0].getKind();
+
+ hadError = false;
+
+ // Implement Translation Phase #6: concatenation of string literals
+ /// (C99 5.1.1.2p1). The common case is only one string fragment.
+ for (unsigned i = 1; i != StringToks.size(); ++i) {
+ if (StringToks[i].getLength() < 2)
+ return DiagnoseLexingError(StringToks[i].getLocation());
+
+ // The string could be shorter than this if it needs cleaning, but this is a
+ // reasonable bound, which is all we need.
+ assert(StringToks[i].getLength() >= 2 && "literal token is invalid!");
+ SizeBound += StringToks[i].getLength()-2; // -2 for "".
+
+ // Remember maximum string piece length.
+ if (StringToks[i].getLength() > MaxTokenLength)
+ MaxTokenLength = StringToks[i].getLength();
+
+ // Remember if we see any wide or utf-8/16/32 strings.
+ // Also check for illegal concatenations.
+ if (StringToks[i].isNot(Kind) && StringToks[i].isNot(tok::string_literal)) {
+ if (isAscii()) {
+ Kind = StringToks[i].getKind();
+ } else {
+ if (Diags)
+ Diags->Report(StringToks[i].getLocation(),
+ diag::err_unsupported_string_concat);
+ hadError = true;
+ }
+ }
+ }
+
+ // Include space for the null terminator.
+ ++SizeBound;
+
+ // TODO: K&R warning: "traditional C rejects string constant concatenation"
+
+ // Get the width in bytes of char/wchar_t/char16_t/char32_t
+ CharByteWidth = getCharWidth(Kind, Target);
+ assert((CharByteWidth & 7) == 0 && "Assumes character size is byte multiple");
+ CharByteWidth /= 8;
+
+ // The output buffer size needs to be large enough to hold wide characters.
+ // This is a worst-case assumption which basically corresponds to L"" "long".
+ SizeBound *= CharByteWidth;
+
+ // Size the temporary buffer to hold the result string data.
+ ResultBuf.resize(SizeBound);
+
+ // Likewise, but for each string piece.
+ SmallString<512> TokenBuf;
+ TokenBuf.resize(MaxTokenLength);
+
+ // Loop over all the strings, getting their spelling, and expanding them to
+ // wide strings as appropriate.
+ ResultPtr = &ResultBuf[0]; // Next byte to fill in.
+
+ Pascal = false;
+
+ SourceLocation UDSuffixTokLoc;
+
+ for (unsigned i = 0, e = StringToks.size(); i != e; ++i) {
+ const char *ThisTokBuf = &TokenBuf[0];
+ // Get the spelling of the token, which eliminates trigraphs, etc. We know
+ // that ThisTokBuf points to a buffer that is big enough for the whole token
+ // and 'spelled' tokens can only shrink.
+ bool StringInvalid = false;
+ unsigned ThisTokLen =
+ Lexer::getSpelling(StringToks[i], ThisTokBuf, SM, Features,
+ &StringInvalid);
+ if (StringInvalid)
+ return DiagnoseLexingError(StringToks[i].getLocation());
+
+ const char *ThisTokBegin = ThisTokBuf;
+ const char *ThisTokEnd = ThisTokBuf+ThisTokLen;
+
+ // Remove an optional ud-suffix.
+ if (ThisTokEnd[-1] != '"') {
+ const char *UDSuffixEnd = ThisTokEnd;
+ do {
+ --ThisTokEnd;
+ } while (ThisTokEnd[-1] != '"');
+
+ StringRef UDSuffix(ThisTokEnd, UDSuffixEnd - ThisTokEnd);
+
+ if (UDSuffixBuf.empty()) {
+ if (StringToks[i].hasUCN())
+ expandUCNs(UDSuffixBuf, UDSuffix);
+ else
+ UDSuffixBuf.assign(UDSuffix);
+ UDSuffixToken = i;
+ UDSuffixOffset = ThisTokEnd - ThisTokBuf;
+ UDSuffixTokLoc = StringToks[i].getLocation();
+ } else {
+ SmallString<32> ExpandedUDSuffix;
+ if (StringToks[i].hasUCN()) {
+ expandUCNs(ExpandedUDSuffix, UDSuffix);
+ UDSuffix = ExpandedUDSuffix;
+ }
+
+ // C++11 [lex.ext]p8: At the end of phase 6, if a string literal is the
+ // result of a concatenation involving at least one user-defined-string-
+ // literal, all the participating user-defined-string-literals shall
+ // have the same ud-suffix.
+ if (UDSuffixBuf != UDSuffix) {
+ if (Diags) {
+ SourceLocation TokLoc = StringToks[i].getLocation();
+ Diags->Report(TokLoc, diag::err_string_concat_mixed_suffix)
+ << UDSuffixBuf << UDSuffix
+ << SourceRange(UDSuffixTokLoc, UDSuffixTokLoc)
+ << SourceRange(TokLoc, TokLoc);
+ }
+ hadError = true;
+ }
+ }
+ }
+
+ // Strip the end quote.
+ --ThisTokEnd;
+
+ // TODO: Input character set mapping support.
+
+ // Skip marker for wide or unicode strings.
+ if (ThisTokBuf[0] == 'L' || ThisTokBuf[0] == 'u' || ThisTokBuf[0] == 'U') {
+ ++ThisTokBuf;
+ // Skip 8 of u8 marker for utf8 strings.
+ if (ThisTokBuf[0] == '8')
+ ++ThisTokBuf;
+ }
+
+ // Check for raw string
+ if (ThisTokBuf[0] == 'R') {
+ ThisTokBuf += 2; // skip R"
+
+ const char *Prefix = ThisTokBuf;
+ while (ThisTokBuf[0] != '(')
+ ++ThisTokBuf;
+ ++ThisTokBuf; // skip '('
+
+ // Remove same number of characters from the end
+ ThisTokEnd -= ThisTokBuf - Prefix;
+ assert(ThisTokEnd >= ThisTokBuf && "malformed raw string literal");
+
+ // C++14 [lex.string]p4: A source-file new-line in a raw string literal
+ // results in a new-line in the resulting execution string-literal.
+ StringRef RemainingTokenSpan(ThisTokBuf, ThisTokEnd - ThisTokBuf);
+ while (!RemainingTokenSpan.empty()) {
+ // Split the string literal on \r\n boundaries.
+ size_t CRLFPos = RemainingTokenSpan.find("\r\n");
+ StringRef BeforeCRLF = RemainingTokenSpan.substr(0, CRLFPos);
+ StringRef AfterCRLF = RemainingTokenSpan.substr(CRLFPos);
+
+ // Copy everything before the \r\n sequence into the string literal.
+ if (CopyStringFragment(StringToks[i], ThisTokBegin, BeforeCRLF))
+ hadError = true;
+
+ // Point into the \n inside the \r\n sequence and operate on the
+ // remaining portion of the literal.
+ RemainingTokenSpan = AfterCRLF.substr(1);
+ }
+ } else {
+ if (ThisTokBuf[0] != '"') {
+ // The file may have come from PCH and then changed after loading the
+ // PCH; Fail gracefully.
+ return DiagnoseLexingError(StringToks[i].getLocation());
+ }
+ ++ThisTokBuf; // skip "
+
+ // Check if this is a pascal string
+ if (Features.PascalStrings && ThisTokBuf + 1 != ThisTokEnd &&
+ ThisTokBuf[0] == '\\' && ThisTokBuf[1] == 'p') {
+
+ // If the \p sequence is found in the first token, we have a pascal string
+ // Otherwise, if we already have a pascal string, ignore the first \p
+ if (i == 0) {
+ ++ThisTokBuf;
+ Pascal = true;
+ } else if (Pascal)
+ ThisTokBuf += 2;
+ }
+
+ while (ThisTokBuf != ThisTokEnd) {
+ // Is this a span of non-escape characters?
+ if (ThisTokBuf[0] != '\\') {
+ const char *InStart = ThisTokBuf;
+ do {
+ ++ThisTokBuf;
+ } while (ThisTokBuf != ThisTokEnd && ThisTokBuf[0] != '\\');
+
+ // Copy the character span over.
+ if (CopyStringFragment(StringToks[i], ThisTokBegin,
+ StringRef(InStart, ThisTokBuf - InStart)))
+ hadError = true;
+ continue;
+ }
+ // Is this a Universal Character Name escape?
+ if (ThisTokBuf[1] == 'u' || ThisTokBuf[1] == 'U') {
+ EncodeUCNEscape(ThisTokBegin, ThisTokBuf, ThisTokEnd,
+ ResultPtr, hadError,
+ FullSourceLoc(StringToks[i].getLocation(), SM),
+ CharByteWidth, Diags, Features);
+ continue;
+ }
+ // Otherwise, this is a non-UCN escape character. Process it.
+ unsigned ResultChar =
+ ProcessCharEscape(ThisTokBegin, ThisTokBuf, ThisTokEnd, hadError,
+ FullSourceLoc(StringToks[i].getLocation(), SM),
+ CharByteWidth*8, Diags, Features);
+
+ if (CharByteWidth == 4) {
+ // FIXME: Make the type of the result buffer correct instead of
+ // using reinterpret_cast.
+ UTF32 *ResultWidePtr = reinterpret_cast<UTF32*>(ResultPtr);
+ *ResultWidePtr = ResultChar;
+ ResultPtr += 4;
+ } else if (CharByteWidth == 2) {
+ // FIXME: Make the type of the result buffer correct instead of
+ // using reinterpret_cast.
+ UTF16 *ResultWidePtr = reinterpret_cast<UTF16*>(ResultPtr);
+ *ResultWidePtr = ResultChar & 0xFFFF;
+ ResultPtr += 2;
+ } else {
+ assert(CharByteWidth == 1 && "Unexpected char width");
+ *ResultPtr++ = ResultChar & 0xFF;
+ }
+ }
+ }
+ }
+
+ if (Pascal) {
+ if (CharByteWidth == 4) {
+ // FIXME: Make the type of the result buffer correct instead of
+ // using reinterpret_cast.
+ UTF32 *ResultWidePtr = reinterpret_cast<UTF32*>(ResultBuf.data());
+ ResultWidePtr[0] = GetNumStringChars() - 1;
+ } else if (CharByteWidth == 2) {
+ // FIXME: Make the type of the result buffer correct instead of
+ // using reinterpret_cast.
+ UTF16 *ResultWidePtr = reinterpret_cast<UTF16*>(ResultBuf.data());
+ ResultWidePtr[0] = GetNumStringChars() - 1;
+ } else {
+ assert(CharByteWidth == 1 && "Unexpected char width");
+ ResultBuf[0] = GetNumStringChars() - 1;
+ }
+
+ // Verify that pascal strings aren't too large.
+ if (GetStringLength() > 256) {
+ if (Diags)
+ Diags->Report(StringToks.front().getLocation(),
+ diag::err_pascal_string_too_long)
+ << SourceRange(StringToks.front().getLocation(),
+ StringToks.back().getLocation());
+ hadError = true;
+ return;
+ }
+ } else if (Diags) {
+ // Complain if this string literal has too many characters.
+ unsigned MaxChars = Features.CPlusPlus? 65536 : Features.C99 ? 4095 : 509;
+
+ if (GetNumStringChars() > MaxChars)
+ Diags->Report(StringToks.front().getLocation(),
+ diag::ext_string_too_long)
+ << GetNumStringChars() << MaxChars
+ << (Features.CPlusPlus ? 2 : Features.C99 ? 1 : 0)
+ << SourceRange(StringToks.front().getLocation(),
+ StringToks.back().getLocation());
+ }
+}
+
+static const char *resyncUTF8(const char *Err, const char *End) {
+ if (Err == End)
+ return End;
+ End = Err + std::min<unsigned>(getNumBytesForUTF8(*Err), End-Err);
+ while (++Err != End && (*Err & 0xC0) == 0x80)
+ ;
+ return Err;
+}
+
+/// \brief This function copies from Fragment, which is a sequence of bytes
+/// within Tok's contents (which begin at TokBegin) into ResultPtr.
+/// Performs widening for multi-byte characters.
+bool StringLiteralParser::CopyStringFragment(const Token &Tok,
+ const char *TokBegin,
+ StringRef Fragment) {
+ const UTF8 *ErrorPtrTmp;
+ if (ConvertUTF8toWide(CharByteWidth, Fragment, ResultPtr, ErrorPtrTmp))
+ return false;
+
+ // If we see bad encoding for unprefixed string literals, warn and
+ // simply copy the byte values, for compatibility with gcc and older
+ // versions of clang.
+ bool NoErrorOnBadEncoding = isAscii();
+ if (NoErrorOnBadEncoding) {
+ memcpy(ResultPtr, Fragment.data(), Fragment.size());
+ ResultPtr += Fragment.size();
+ }
+
+ if (Diags) {
+ const char *ErrorPtr = reinterpret_cast<const char *>(ErrorPtrTmp);
+
+ FullSourceLoc SourceLoc(Tok.getLocation(), SM);
+ const DiagnosticBuilder &Builder =
+ Diag(Diags, Features, SourceLoc, TokBegin,
+ ErrorPtr, resyncUTF8(ErrorPtr, Fragment.end()),
+ NoErrorOnBadEncoding ? diag::warn_bad_string_encoding
+ : diag::err_bad_string_encoding);
+
+ const char *NextStart = resyncUTF8(ErrorPtr, Fragment.end());
+ StringRef NextFragment(NextStart, Fragment.end()-NextStart);
+
+ // Decode into a dummy buffer.
+ SmallString<512> Dummy;
+ Dummy.reserve(Fragment.size() * CharByteWidth);
+ char *Ptr = Dummy.data();
+
+ while (!ConvertUTF8toWide(CharByteWidth, NextFragment, Ptr, ErrorPtrTmp)) {
+ const char *ErrorPtr = reinterpret_cast<const char *>(ErrorPtrTmp);
+ NextStart = resyncUTF8(ErrorPtr, Fragment.end());
+ Builder << MakeCharSourceRange(Features, SourceLoc, TokBegin,
+ ErrorPtr, NextStart);
+ NextFragment = StringRef(NextStart, Fragment.end()-NextStart);
+ }
+ }
+ return !NoErrorOnBadEncoding;
+}
+
+void StringLiteralParser::DiagnoseLexingError(SourceLocation Loc) {
+ hadError = true;
+ if (Diags)
+ Diags->Report(Loc, diag::err_lexing_string);
+}
+
+/// getOffsetOfStringByte - This function returns the offset of the
+/// specified byte of the string data represented by Token. This handles
+/// advancing over escape sequences in the string.
+unsigned StringLiteralParser::getOffsetOfStringByte(const Token &Tok,
+ unsigned ByteNo) const {
+ // Get the spelling of the token.
+ SmallString<32> SpellingBuffer;
+ SpellingBuffer.resize(Tok.getLength());
+
+ bool StringInvalid = false;
+ const char *SpellingPtr = &SpellingBuffer[0];
+ unsigned TokLen = Lexer::getSpelling(Tok, SpellingPtr, SM, Features,
+ &StringInvalid);
+ if (StringInvalid)
+ return 0;
+
+ const char *SpellingStart = SpellingPtr;
+ const char *SpellingEnd = SpellingPtr+TokLen;
+
+ // Handle UTF-8 strings just like narrow strings.
+ if (SpellingPtr[0] == 'u' && SpellingPtr[1] == '8')
+ SpellingPtr += 2;
+
+ assert(SpellingPtr[0] != 'L' && SpellingPtr[0] != 'u' &&
+ SpellingPtr[0] != 'U' && "Doesn't handle wide or utf strings yet");
+
+ // For raw string literals, this is easy.
+ if (SpellingPtr[0] == 'R') {
+ assert(SpellingPtr[1] == '"' && "Should be a raw string literal!");
+ // Skip 'R"'.
+ SpellingPtr += 2;
+ while (*SpellingPtr != '(') {
+ ++SpellingPtr;
+ assert(SpellingPtr < SpellingEnd && "Missing ( for raw string literal");
+ }
+ // Skip '('.
+ ++SpellingPtr;
+ return SpellingPtr - SpellingStart + ByteNo;
+ }
+
+ // Skip over the leading quote
+ assert(SpellingPtr[0] == '"' && "Should be a string literal!");
+ ++SpellingPtr;
+
+ // Skip over bytes until we find the offset we're looking for.
+ while (ByteNo) {
+ assert(SpellingPtr < SpellingEnd && "Didn't find byte offset!");
+
+ // Step over non-escapes simply.
+ if (*SpellingPtr != '\\') {
+ ++SpellingPtr;
+ --ByteNo;
+ continue;
+ }
+
+ // Otherwise, this is an escape character. Advance over it.
+ bool HadError = false;
+ if (SpellingPtr[1] == 'u' || SpellingPtr[1] == 'U') {
+ const char *EscapePtr = SpellingPtr;
+ unsigned Len = MeasureUCNEscape(SpellingStart, SpellingPtr, SpellingEnd,
+ 1, Features, HadError);
+ if (Len > ByteNo) {
+ // ByteNo is somewhere within the escape sequence.
+ SpellingPtr = EscapePtr;
+ break;
+ }
+ ByteNo -= Len;
+ } else {
+ ProcessCharEscape(SpellingStart, SpellingPtr, SpellingEnd, HadError,
+ FullSourceLoc(Tok.getLocation(), SM),
+ CharByteWidth*8, Diags, Features);
+ --ByteNo;
+ }
+ assert(!HadError && "This method isn't valid on erroneous strings");
+ }
+
+ return SpellingPtr-SpellingStart;
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/MacroArgs.cpp b/contrib/llvm/tools/clang/lib/Lex/MacroArgs.cpp
new file mode 100644
index 0000000..1c1979d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/MacroArgs.cpp
@@ -0,0 +1,313 @@
+//===--- MacroArgs.cpp - Formal argument info for Macros ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the MacroArgs interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/MacroArgs.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/SaveAndRestore.h"
+#include <algorithm>
+
+using namespace clang;
+
+/// MacroArgs ctor function - This destroys the vector passed in.
+MacroArgs *MacroArgs::create(const MacroInfo *MI,
+ ArrayRef<Token> UnexpArgTokens,
+ bool VarargsElided, Preprocessor &PP) {
+ assert(MI->isFunctionLike() &&
+ "Can't have args for an object-like macro!");
+ MacroArgs **ResultEnt = nullptr;
+ unsigned ClosestMatch = ~0U;
+
+ // See if we have an entry with a big enough argument list to reuse on the
+ // free list. If so, reuse it.
+ for (MacroArgs **Entry = &PP.MacroArgCache; *Entry;
+ Entry = &(*Entry)->ArgCache)
+ if ((*Entry)->NumUnexpArgTokens >= UnexpArgTokens.size() &&
+ (*Entry)->NumUnexpArgTokens < ClosestMatch) {
+ ResultEnt = Entry;
+
+ // If we have an exact match, use it.
+ if ((*Entry)->NumUnexpArgTokens == UnexpArgTokens.size())
+ break;
+ // Otherwise, use the best fit.
+ ClosestMatch = (*Entry)->NumUnexpArgTokens;
+ }
+
+ MacroArgs *Result;
+ if (!ResultEnt) {
+ // Allocate memory for a MacroArgs object with the lexer tokens at the end.
+ Result = (MacroArgs*)malloc(sizeof(MacroArgs) +
+ UnexpArgTokens.size() * sizeof(Token));
+ // Construct the MacroArgs object.
+ new (Result) MacroArgs(UnexpArgTokens.size(), VarargsElided);
+ } else {
+ Result = *ResultEnt;
+ // Unlink this node from the preprocessors singly linked list.
+ *ResultEnt = Result->ArgCache;
+ Result->NumUnexpArgTokens = UnexpArgTokens.size();
+ Result->VarargsElided = VarargsElided;
+ }
+
+ // Copy the actual unexpanded tokens to immediately after the result ptr.
+ if (!UnexpArgTokens.empty())
+ std::copy(UnexpArgTokens.begin(), UnexpArgTokens.end(),
+ const_cast<Token*>(Result->getUnexpArgument(0)));
+
+ return Result;
+}
+
+/// destroy - Destroy and deallocate the memory for this object.
+///
+void MacroArgs::destroy(Preprocessor &PP) {
+ StringifiedArgs.clear();
+
+ // Don't clear PreExpArgTokens, just clear the entries. Clearing the entries
+ // would deallocate the element vectors.
+ for (unsigned i = 0, e = PreExpArgTokens.size(); i != e; ++i)
+ PreExpArgTokens[i].clear();
+
+ // Add this to the preprocessor's free list.
+ ArgCache = PP.MacroArgCache;
+ PP.MacroArgCache = this;
+}
+
+/// deallocate - This should only be called by the Preprocessor when managing
+/// its freelist.
+MacroArgs *MacroArgs::deallocate() {
+ MacroArgs *Next = ArgCache;
+
+ // Run the dtor to deallocate the vectors.
+ this->~MacroArgs();
+ // Release the memory for the object.
+ free(this);
+
+ return Next;
+}
+
+
+/// getArgLength - Given a pointer to an expanded or unexpanded argument,
+/// return the number of tokens, not counting the EOF, that make up the
+/// argument.
+unsigned MacroArgs::getArgLength(const Token *ArgPtr) {
+ unsigned NumArgTokens = 0;
+ for (; ArgPtr->isNot(tok::eof); ++ArgPtr)
+ ++NumArgTokens;
+ return NumArgTokens;
+}
+
+
+/// getUnexpArgument - Return the unexpanded tokens for the specified formal.
+///
+const Token *MacroArgs::getUnexpArgument(unsigned Arg) const {
+ // The unexpanded argument tokens start immediately after the MacroArgs object
+ // in memory.
+ const Token *Start = (const Token *)(this+1);
+ const Token *Result = Start;
+ // Scan to find Arg.
+ for (; Arg; ++Result) {
+ assert(Result < Start+NumUnexpArgTokens && "Invalid arg #");
+ if (Result->is(tok::eof))
+ --Arg;
+ }
+ assert(Result < Start+NumUnexpArgTokens && "Invalid arg #");
+ return Result;
+}
+
+
+/// ArgNeedsPreexpansion - If we can prove that the argument won't be affected
+/// by pre-expansion, return false. Otherwise, conservatively return true.
+bool MacroArgs::ArgNeedsPreexpansion(const Token *ArgTok,
+ Preprocessor &PP) const {
+ // If there are no identifiers in the argument list, or if the identifiers are
+ // known to not be macros, pre-expansion won't modify it.
+ for (; ArgTok->isNot(tok::eof); ++ArgTok)
+ if (IdentifierInfo *II = ArgTok->getIdentifierInfo())
+ if (II->hasMacroDefinition())
+ // Return true even though the macro could be a function-like macro
+ // without a following '(' token, or could be disabled, or not visible.
+ return true;
+ return false;
+}
+
+/// getPreExpArgument - Return the pre-expanded form of the specified
+/// argument.
+const std::vector<Token> &
+MacroArgs::getPreExpArgument(unsigned Arg, const MacroInfo *MI,
+ Preprocessor &PP) {
+ assert(Arg < MI->getNumArgs() && "Invalid argument number!");
+
+ // If we have already computed this, return it.
+ if (PreExpArgTokens.size() < MI->getNumArgs())
+ PreExpArgTokens.resize(MI->getNumArgs());
+
+ std::vector<Token> &Result = PreExpArgTokens[Arg];
+ if (!Result.empty()) return Result;
+
+ SaveAndRestore<bool> PreExpandingMacroArgs(PP.InMacroArgPreExpansion, true);
+
+ const Token *AT = getUnexpArgument(Arg);
+ unsigned NumToks = getArgLength(AT)+1; // Include the EOF.
+
+ // Otherwise, we have to pre-expand this argument, populating Result. To do
+ // this, we set up a fake TokenLexer to lex from the unexpanded argument
+ // list. With this installed, we lex expanded tokens until we hit the EOF
+ // token at the end of the unexp list.
+ PP.EnterTokenStream(AT, NumToks, false /*disable expand*/,
+ false /*owns tokens*/);
+
+ // Lex all of the macro-expanded tokens into Result.
+ do {
+ Result.push_back(Token());
+ Token &Tok = Result.back();
+ PP.Lex(Tok);
+ } while (Result.back().isNot(tok::eof));
+
+ // Pop the token stream off the top of the stack. We know that the internal
+ // pointer inside of it is to the "end" of the token stream, but the stack
+ // will not otherwise be popped until the next token is lexed. The problem is
+ // that the token may be lexed sometime after the vector of tokens itself is
+ // destroyed, which would be badness.
+ if (PP.InCachingLexMode())
+ PP.ExitCachingLexMode();
+ PP.RemoveTopOfLexerStack();
+ return Result;
+}
+
+
+/// StringifyArgument - Implement C99 6.10.3.2p2, converting a sequence of
+/// tokens into the literal string token that should be produced by the C #
+/// preprocessor operator. If Charify is true, then it should be turned into
+/// a character literal for the Microsoft charize (#@) extension.
+///
+Token MacroArgs::StringifyArgument(const Token *ArgToks,
+ Preprocessor &PP, bool Charify,
+ SourceLocation ExpansionLocStart,
+ SourceLocation ExpansionLocEnd) {
+ Token Tok;
+ Tok.startToken();
+ Tok.setKind(Charify ? tok::char_constant : tok::string_literal);
+
+ const Token *ArgTokStart = ArgToks;
+
+ // Stringify all the tokens.
+ SmallString<128> Result;
+ Result += "\"";
+
+ bool isFirst = true;
+ for (; ArgToks->isNot(tok::eof); ++ArgToks) {
+ const Token &Tok = *ArgToks;
+ if (!isFirst && (Tok.hasLeadingSpace() || Tok.isAtStartOfLine()))
+ Result += ' ';
+ isFirst = false;
+
+ // If this is a string or character constant, escape the token as specified
+ // by 6.10.3.2p2.
+ if (tok::isStringLiteral(Tok.getKind()) || // "foo", u8R"x(foo)x"_bar, etc.
+ Tok.is(tok::char_constant) || // 'x'
+ Tok.is(tok::wide_char_constant) || // L'x'.
+ Tok.is(tok::utf8_char_constant) || // u8'x'.
+ Tok.is(tok::utf16_char_constant) || // u'x'.
+ Tok.is(tok::utf32_char_constant)) { // U'x'.
+ bool Invalid = false;
+ std::string TokStr = PP.getSpelling(Tok, &Invalid);
+ if (!Invalid) {
+ std::string Str = Lexer::Stringify(TokStr);
+ Result.append(Str.begin(), Str.end());
+ }
+ } else if (Tok.is(tok::code_completion)) {
+ PP.CodeCompleteNaturalLanguage();
+ } else {
+ // Otherwise, just append the token. Do some gymnastics to get the token
+ // in place and avoid copies where possible.
+ unsigned CurStrLen = Result.size();
+ Result.resize(CurStrLen+Tok.getLength());
+ const char *BufPtr = Result.data() + CurStrLen;
+ bool Invalid = false;
+ unsigned ActualTokLen = PP.getSpelling(Tok, BufPtr, &Invalid);
+
+ if (!Invalid) {
+ // If getSpelling returned a pointer to an already uniqued version of
+ // the string instead of filling in BufPtr, memcpy it onto our string.
+ if (ActualTokLen && BufPtr != &Result[CurStrLen])
+ memcpy(&Result[CurStrLen], BufPtr, ActualTokLen);
+
+ // If the token was dirty, the spelling may be shorter than the token.
+ if (ActualTokLen != Tok.getLength())
+ Result.resize(CurStrLen+ActualTokLen);
+ }
+ }
+ }
+
+ // If the last character of the string is a \, and if it isn't escaped, this
+ // is an invalid string literal, diagnose it as specified in C99.
+ if (Result.back() == '\\') {
+ // Count the number of consequtive \ characters. If even, then they are
+ // just escaped backslashes, otherwise it's an error.
+ unsigned FirstNonSlash = Result.size()-2;
+ // Guaranteed to find the starting " if nothing else.
+ while (Result[FirstNonSlash] == '\\')
+ --FirstNonSlash;
+ if ((Result.size()-1-FirstNonSlash) & 1) {
+ // Diagnose errors for things like: #define F(X) #X / F(\)
+ PP.Diag(ArgToks[-1], diag::pp_invalid_string_literal);
+ Result.pop_back(); // remove one of the \'s.
+ }
+ }
+ Result += '"';
+
+ // If this is the charify operation and the result is not a legal character
+ // constant, diagnose it.
+ if (Charify) {
+ // First step, turn double quotes into single quotes:
+ Result[0] = '\'';
+ Result[Result.size()-1] = '\'';
+
+ // Check for bogus character.
+ bool isBad = false;
+ if (Result.size() == 3)
+ isBad = Result[1] == '\''; // ''' is not legal. '\' already fixed above.
+ else
+ isBad = (Result.size() != 4 || Result[1] != '\\'); // Not '\x'
+
+ if (isBad) {
+ PP.Diag(ArgTokStart[0], diag::err_invalid_character_to_charify);
+ Result = "' '"; // Use something arbitrary, but legal.
+ }
+ }
+
+ PP.CreateString(Result, Tok,
+ ExpansionLocStart, ExpansionLocEnd);
+ return Tok;
+}
+
+/// getStringifiedArgument - Compute, cache, and return the specified argument
+/// that has been 'stringified' as required by the # operator.
+const Token &MacroArgs::getStringifiedArgument(unsigned ArgNo,
+ Preprocessor &PP,
+ SourceLocation ExpansionLocStart,
+ SourceLocation ExpansionLocEnd) {
+ assert(ArgNo < NumUnexpArgTokens && "Invalid argument number!");
+ if (StringifiedArgs.empty()) {
+ StringifiedArgs.resize(getNumArguments());
+ memset((void*)&StringifiedArgs[0], 0,
+ sizeof(StringifiedArgs[0])*getNumArguments());
+ }
+ if (StringifiedArgs[ArgNo].isNot(tok::string_literal))
+ StringifiedArgs[ArgNo] = StringifyArgument(getUnexpArgument(ArgNo), PP,
+ /*Charify=*/false,
+ ExpansionLocStart,
+ ExpansionLocEnd);
+ return StringifiedArgs[ArgNo];
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp b/contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp
new file mode 100644
index 0000000..0b4292f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp
@@ -0,0 +1,245 @@
+//===--- MacroInfo.cpp - Information about #defined identifiers -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the MacroInfo interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/Preprocessor.h"
+using namespace clang;
+
+MacroInfo::MacroInfo(SourceLocation DefLoc)
+ : Location(DefLoc),
+ ArgumentList(nullptr),
+ NumArguments(0),
+ IsDefinitionLengthCached(false),
+ IsFunctionLike(false),
+ IsC99Varargs(false),
+ IsGNUVarargs(false),
+ IsBuiltinMacro(false),
+ HasCommaPasting(false),
+ IsDisabled(false),
+ IsUsed(false),
+ IsAllowRedefinitionsWithoutWarning(false),
+ IsWarnIfUnused(false),
+ FromASTFile(false),
+ UsedForHeaderGuard(false) {
+}
+
+unsigned MacroInfo::getDefinitionLengthSlow(SourceManager &SM) const {
+ assert(!IsDefinitionLengthCached);
+ IsDefinitionLengthCached = true;
+
+ if (ReplacementTokens.empty())
+ return (DefinitionLength = 0);
+
+ const Token &firstToken = ReplacementTokens.front();
+ const Token &lastToken = ReplacementTokens.back();
+ SourceLocation macroStart = firstToken.getLocation();
+ SourceLocation macroEnd = lastToken.getLocation();
+ assert(macroStart.isValid() && macroEnd.isValid());
+ assert((macroStart.isFileID() || firstToken.is(tok::comment)) &&
+ "Macro defined in macro?");
+ assert((macroEnd.isFileID() || lastToken.is(tok::comment)) &&
+ "Macro defined in macro?");
+ std::pair<FileID, unsigned>
+ startInfo = SM.getDecomposedExpansionLoc(macroStart);
+ std::pair<FileID, unsigned>
+ endInfo = SM.getDecomposedExpansionLoc(macroEnd);
+ assert(startInfo.first == endInfo.first &&
+ "Macro definition spanning multiple FileIDs ?");
+ assert(startInfo.second <= endInfo.second);
+ DefinitionLength = endInfo.second - startInfo.second;
+ DefinitionLength += lastToken.getLength();
+
+ return DefinitionLength;
+}
+
+/// \brief Return true if the specified macro definition is equal to
+/// this macro in spelling, arguments, and whitespace.
+///
+/// \param Syntactically if true, the macro definitions can be identical even
+/// if they use different identifiers for the function macro parameters.
+/// Otherwise the comparison is lexical and this implements the rules in
+/// C99 6.10.3.
+bool MacroInfo::isIdenticalTo(const MacroInfo &Other, Preprocessor &PP,
+ bool Syntactically) const {
+ bool Lexically = !Syntactically;
+
+ // Check # tokens in replacement, number of args, and various flags all match.
+ if (ReplacementTokens.size() != Other.ReplacementTokens.size() ||
+ getNumArgs() != Other.getNumArgs() ||
+ isFunctionLike() != Other.isFunctionLike() ||
+ isC99Varargs() != Other.isC99Varargs() ||
+ isGNUVarargs() != Other.isGNUVarargs())
+ return false;
+
+ if (Lexically) {
+ // Check arguments.
+ for (arg_iterator I = arg_begin(), OI = Other.arg_begin(), E = arg_end();
+ I != E; ++I, ++OI)
+ if (*I != *OI) return false;
+ }
+
+ // Check all the tokens.
+ for (unsigned i = 0, e = ReplacementTokens.size(); i != e; ++i) {
+ const Token &A = ReplacementTokens[i];
+ const Token &B = Other.ReplacementTokens[i];
+ if (A.getKind() != B.getKind())
+ return false;
+
+ // If this isn't the first first token, check that the whitespace and
+ // start-of-line characteristics match.
+ if (i != 0 &&
+ (A.isAtStartOfLine() != B.isAtStartOfLine() ||
+ A.hasLeadingSpace() != B.hasLeadingSpace()))
+ return false;
+
+ // If this is an identifier, it is easy.
+ if (A.getIdentifierInfo() || B.getIdentifierInfo()) {
+ if (A.getIdentifierInfo() == B.getIdentifierInfo())
+ continue;
+ if (Lexically)
+ return false;
+ // With syntactic equivalence the parameter names can be different as long
+ // as they are used in the same place.
+ int AArgNum = getArgumentNum(A.getIdentifierInfo());
+ if (AArgNum == -1)
+ return false;
+ if (AArgNum != Other.getArgumentNum(B.getIdentifierInfo()))
+ return false;
+ continue;
+ }
+
+ // Otherwise, check the spelling.
+ if (PP.getSpelling(A) != PP.getSpelling(B))
+ return false;
+ }
+
+ return true;
+}
+
+void MacroInfo::dump() const {
+ llvm::raw_ostream &Out = llvm::errs();
+
+ // FIXME: Dump locations.
+ Out << "MacroInfo " << this;
+ if (IsBuiltinMacro) Out << " builtin";
+ if (IsDisabled) Out << " disabled";
+ if (IsUsed) Out << " used";
+ if (IsAllowRedefinitionsWithoutWarning)
+ Out << " allow_redefinitions_without_warning";
+ if (IsWarnIfUnused) Out << " warn_if_unused";
+ if (FromASTFile) Out << " imported";
+ if (UsedForHeaderGuard) Out << " header_guard";
+
+ Out << "\n #define <macro>";
+ if (IsFunctionLike) {
+ Out << "(";
+ for (unsigned I = 0; I != NumArguments; ++I) {
+ if (I) Out << ", ";
+ Out << ArgumentList[I]->getName();
+ }
+ if (IsC99Varargs || IsGNUVarargs) {
+ if (NumArguments && IsC99Varargs) Out << ", ";
+ Out << "...";
+ }
+ Out << ")";
+ }
+
+ bool First = true;
+ for (const Token &Tok : ReplacementTokens) {
+ // Leading space is semantically meaningful in a macro definition,
+ // so preserve it in the dump output.
+ if (First || Tok.hasLeadingSpace())
+ Out << " ";
+ First = false;
+
+ if (const char *Punc = tok::getPunctuatorSpelling(Tok.getKind()))
+ Out << Punc;
+ else if (Tok.isLiteral() && Tok.getLiteralData())
+ Out << StringRef(Tok.getLiteralData(), Tok.getLength());
+ else if (auto *II = Tok.getIdentifierInfo())
+ Out << II->getName();
+ else
+ Out << Tok.getName();
+ }
+}
+
+MacroDirective::DefInfo MacroDirective::getDefinition() {
+ MacroDirective *MD = this;
+ SourceLocation UndefLoc;
+ Optional<bool> isPublic;
+ for (; MD; MD = MD->getPrevious()) {
+ if (DefMacroDirective *DefMD = dyn_cast<DefMacroDirective>(MD))
+ return DefInfo(DefMD, UndefLoc,
+ !isPublic.hasValue() || isPublic.getValue());
+
+ if (UndefMacroDirective *UndefMD = dyn_cast<UndefMacroDirective>(MD)) {
+ UndefLoc = UndefMD->getLocation();
+ continue;
+ }
+
+ VisibilityMacroDirective *VisMD = cast<VisibilityMacroDirective>(MD);
+ if (!isPublic.hasValue())
+ isPublic = VisMD->isPublic();
+ }
+
+ return DefInfo(nullptr, UndefLoc,
+ !isPublic.hasValue() || isPublic.getValue());
+}
+
+const MacroDirective::DefInfo
+MacroDirective::findDirectiveAtLoc(SourceLocation L, SourceManager &SM) const {
+ assert(L.isValid() && "SourceLocation is invalid.");
+ for (DefInfo Def = getDefinition(); Def; Def = Def.getPreviousDefinition()) {
+ if (Def.getLocation().isInvalid() || // For macros defined on the command line.
+ SM.isBeforeInTranslationUnit(Def.getLocation(), L))
+ return (!Def.isUndefined() ||
+ SM.isBeforeInTranslationUnit(L, Def.getUndefLocation()))
+ ? Def : DefInfo();
+ }
+ return DefInfo();
+}
+
+void MacroDirective::dump() const {
+ llvm::raw_ostream &Out = llvm::errs();
+
+ switch (getKind()) {
+ case MD_Define: Out << "DefMacroDirective"; break;
+ case MD_Undefine: Out << "UndefMacroDirective"; break;
+ case MD_Visibility: Out << "VisibilityMacroDirective"; break;
+ }
+ Out << " " << this;
+ // FIXME: Dump SourceLocation.
+ if (auto *Prev = getPrevious())
+ Out << " prev " << Prev;
+ if (IsFromPCH) Out << " from_pch";
+
+ if (isa<VisibilityMacroDirective>(this))
+ Out << (IsPublic ? " public" : " private");
+
+ if (auto *DMD = dyn_cast<DefMacroDirective>(this)) {
+ if (auto *Info = DMD->getInfo()) {
+ Out << "\n ";
+ Info->dump();
+ }
+ }
+ Out << "\n";
+}
+
+ModuleMacro *ModuleMacro::create(Preprocessor &PP, Module *OwningModule,
+ IdentifierInfo *II, MacroInfo *Macro,
+ ArrayRef<ModuleMacro *> Overrides) {
+ void *Mem = PP.getPreprocessorAllocator().Allocate(
+ sizeof(ModuleMacro) + sizeof(ModuleMacro *) * Overrides.size(),
+ llvm::alignOf<ModuleMacro>());
+ return new (Mem) ModuleMacro(OwningModule, II, Macro, Overrides);
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp b/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp
new file mode 100644
index 0000000..a752402
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp
@@ -0,0 +1,2460 @@
+//===--- ModuleMap.cpp - Describe the layout of modules ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ModuleMap implementation, which describes the layout
+// of a module as it relates to headers.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Lex/ModuleMap.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TargetOptions.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/HeaderSearchOptions.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Lex/LiteralSupport.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+#include <stdlib.h>
+#if defined(LLVM_ON_UNIX)
+#include <limits.h>
+#endif
+using namespace clang;
+
+Module::ExportDecl
+ModuleMap::resolveExport(Module *Mod,
+ const Module::UnresolvedExportDecl &Unresolved,
+ bool Complain) const {
+ // We may have just a wildcard.
+ if (Unresolved.Id.empty()) {
+ assert(Unresolved.Wildcard && "Invalid unresolved export");
+ return Module::ExportDecl(nullptr, true);
+ }
+
+ // Resolve the module-id.
+ Module *Context = resolveModuleId(Unresolved.Id, Mod, Complain);
+ if (!Context)
+ return Module::ExportDecl();
+
+ return Module::ExportDecl(Context, Unresolved.Wildcard);
+}
+
+Module *ModuleMap::resolveModuleId(const ModuleId &Id, Module *Mod,
+ bool Complain) const {
+ // Find the starting module.
+ Module *Context = lookupModuleUnqualified(Id[0].first, Mod);
+ if (!Context) {
+ if (Complain)
+ Diags.Report(Id[0].second, diag::err_mmap_missing_module_unqualified)
+ << Id[0].first << Mod->getFullModuleName();
+
+ return nullptr;
+ }
+
+ // Dig into the module path.
+ for (unsigned I = 1, N = Id.size(); I != N; ++I) {
+ Module *Sub = lookupModuleQualified(Id[I].first, Context);
+ if (!Sub) {
+ if (Complain)
+ Diags.Report(Id[I].second, diag::err_mmap_missing_module_qualified)
+ << Id[I].first << Context->getFullModuleName()
+ << SourceRange(Id[0].second, Id[I-1].second);
+
+ return nullptr;
+ }
+
+ Context = Sub;
+ }
+
+ return Context;
+}
+
+ModuleMap::ModuleMap(SourceManager &SourceMgr, DiagnosticsEngine &Diags,
+ const LangOptions &LangOpts, const TargetInfo *Target,
+ HeaderSearch &HeaderInfo)
+ : SourceMgr(SourceMgr), Diags(Diags), LangOpts(LangOpts), Target(Target),
+ HeaderInfo(HeaderInfo), BuiltinIncludeDir(nullptr),
+ CompilingModule(nullptr), SourceModule(nullptr), NumCreatedModules(0) {
+ MMapLangOpts.LineComment = true;
+}
+
+ModuleMap::~ModuleMap() {
+ for (llvm::StringMap<Module *>::iterator I = Modules.begin(),
+ IEnd = Modules.end();
+ I != IEnd; ++I) {
+ delete I->getValue();
+ }
+}
+
+void ModuleMap::setTarget(const TargetInfo &Target) {
+ assert((!this->Target || this->Target == &Target) &&
+ "Improper target override");
+ this->Target = &Target;
+}
+
+/// \brief "Sanitize" a filename so that it can be used as an identifier.
+static StringRef sanitizeFilenameAsIdentifier(StringRef Name,
+ SmallVectorImpl<char> &Buffer) {
+ if (Name.empty())
+ return Name;
+
+ if (!isValidIdentifier(Name)) {
+ // If we don't already have something with the form of an identifier,
+ // create a buffer with the sanitized name.
+ Buffer.clear();
+ if (isDigit(Name[0]))
+ Buffer.push_back('_');
+ Buffer.reserve(Buffer.size() + Name.size());
+ for (unsigned I = 0, N = Name.size(); I != N; ++I) {
+ if (isIdentifierBody(Name[I]))
+ Buffer.push_back(Name[I]);
+ else
+ Buffer.push_back('_');
+ }
+
+ Name = StringRef(Buffer.data(), Buffer.size());
+ }
+
+ while (llvm::StringSwitch<bool>(Name)
+#define KEYWORD(Keyword,Conditions) .Case(#Keyword, true)
+#define ALIAS(Keyword, AliasOf, Conditions) .Case(Keyword, true)
+#include "clang/Basic/TokenKinds.def"
+ .Default(false)) {
+ if (Name.data() != Buffer.data())
+ Buffer.append(Name.begin(), Name.end());
+ Buffer.push_back('_');
+ Name = StringRef(Buffer.data(), Buffer.size());
+ }
+
+ return Name;
+}
+
+/// \brief Determine whether the given file name is the name of a builtin
+/// header, supplied by Clang to replace, override, or augment existing system
+/// headers.
+static bool isBuiltinHeader(StringRef FileName) {
+ return llvm::StringSwitch<bool>(FileName)
+ .Case("float.h", true)
+ .Case("iso646.h", true)
+ .Case("limits.h", true)
+ .Case("stdalign.h", true)
+ .Case("stdarg.h", true)
+ .Case("stdbool.h", true)
+ .Case("stddef.h", true)
+ .Case("stdint.h", true)
+ .Case("tgmath.h", true)
+ .Case("unwind.h", true)
+ .Default(false);
+}
+
+ModuleMap::HeadersMap::iterator
+ModuleMap::findKnownHeader(const FileEntry *File) {
+ HeadersMap::iterator Known = Headers.find(File);
+ if (HeaderInfo.getHeaderSearchOpts().ImplicitModuleMaps &&
+ Known == Headers.end() && File->getDir() == BuiltinIncludeDir &&
+ isBuiltinHeader(llvm::sys::path::filename(File->getName()))) {
+ HeaderInfo.loadTopLevelSystemModules();
+ return Headers.find(File);
+ }
+ return Known;
+}
+
+ModuleMap::KnownHeader
+ModuleMap::findHeaderInUmbrellaDirs(const FileEntry *File,
+ SmallVectorImpl<const DirectoryEntry *> &IntermediateDirs) {
+ if (UmbrellaDirs.empty())
+ return KnownHeader();
+
+ const DirectoryEntry *Dir = File->getDir();
+ assert(Dir && "file in no directory");
+
+ // Note: as an egregious but useful hack we use the real path here, because
+ // frameworks moving from top-level frameworks to embedded frameworks tend
+ // to be symlinked from the top-level location to the embedded location,
+ // and we need to resolve lookups as if we had found the embedded location.
+ StringRef DirName = SourceMgr.getFileManager().getCanonicalName(Dir);
+
+ // Keep walking up the directory hierarchy, looking for a directory with
+ // an umbrella header.
+ do {
+ auto KnownDir = UmbrellaDirs.find(Dir);
+ if (KnownDir != UmbrellaDirs.end())
+ return KnownHeader(KnownDir->second, NormalHeader);
+
+ IntermediateDirs.push_back(Dir);
+
+ // Retrieve our parent path.
+ DirName = llvm::sys::path::parent_path(DirName);
+ if (DirName.empty())
+ break;
+
+ // Resolve the parent path to a directory entry.
+ Dir = SourceMgr.getFileManager().getDirectory(DirName);
+ } while (Dir);
+ return KnownHeader();
+}
+
+static bool violatesPrivateInclude(Module *RequestingModule,
+ const FileEntry *IncFileEnt,
+ ModuleMap::ModuleHeaderRole Role,
+ Module *RequestedModule) {
+ bool IsPrivateRole = Role & ModuleMap::PrivateHeader;
+#ifndef NDEBUG
+ if (IsPrivateRole) {
+ // Check for consistency between the module header role
+ // as obtained from the lookup and as obtained from the module.
+ // This check is not cheap, so enable it only for debugging.
+ bool IsPrivate = false;
+ SmallVectorImpl<Module::Header> *HeaderList[] = {
+ &RequestedModule->Headers[Module::HK_Private],
+ &RequestedModule->Headers[Module::HK_PrivateTextual]};
+ for (auto *Hs : HeaderList)
+ IsPrivate |=
+ std::find_if(Hs->begin(), Hs->end(), [&](const Module::Header &H) {
+ return H.Entry == IncFileEnt;
+ }) != Hs->end();
+ assert((!IsPrivateRole || IsPrivate) && "inconsistent headers and roles");
+ }
+#endif
+ return IsPrivateRole && (!RequestingModule ||
+ RequestedModule->getTopLevelModule() !=
+ RequestingModule->getTopLevelModule());
+}
+
+static Module *getTopLevelOrNull(Module *M) {
+ return M ? M->getTopLevelModule() : nullptr;
+}
+
+void ModuleMap::diagnoseHeaderInclusion(Module *RequestingModule,
+ SourceLocation FilenameLoc,
+ StringRef Filename,
+ const FileEntry *File) {
+ // No errors for indirect modules. This may be a bit of a problem for modules
+ // with no source files.
+ if (getTopLevelOrNull(RequestingModule) != getTopLevelOrNull(SourceModule))
+ return;
+
+ if (RequestingModule)
+ resolveUses(RequestingModule, /*Complain=*/false);
+
+ bool Excluded = false;
+ Module *Private = nullptr;
+ Module *NotUsed = nullptr;
+
+ HeadersMap::iterator Known = findKnownHeader(File);
+ if (Known != Headers.end()) {
+ for (const KnownHeader &Header : Known->second) {
+ // Remember private headers for later printing of a diagnostic.
+ if (violatesPrivateInclude(RequestingModule, File, Header.getRole(),
+ Header.getModule())) {
+ Private = Header.getModule();
+ continue;
+ }
+
+ // If uses need to be specified explicitly, we are only allowed to return
+ // modules that are explicitly used by the requesting module.
+ if (RequestingModule && LangOpts.ModulesDeclUse &&
+ !RequestingModule->directlyUses(Header.getModule())) {
+ NotUsed = Header.getModule();
+ continue;
+ }
+
+ // We have found a module that we can happily use.
+ return;
+ }
+
+ Excluded = true;
+ }
+
+ // We have found a header, but it is private.
+ if (Private) {
+ Diags.Report(FilenameLoc, diag::warn_use_of_private_header_outside_module)
+ << Filename;
+ return;
+ }
+
+ // We have found a module, but we don't use it.
+ if (NotUsed) {
+ Diags.Report(FilenameLoc, diag::err_undeclared_use_of_module)
+ << RequestingModule->getFullModuleName() << Filename;
+ return;
+ }
+
+ if (Excluded || isHeaderInUmbrellaDirs(File))
+ return;
+
+ // At this point, only non-modular includes remain.
+
+ if (LangOpts.ModulesStrictDeclUse) {
+ Diags.Report(FilenameLoc, diag::err_undeclared_use_of_module)
+ << RequestingModule->getFullModuleName() << Filename;
+ } else if (RequestingModule) {
+ diag::kind DiagID = RequestingModule->getTopLevelModule()->IsFramework ?
+ diag::warn_non_modular_include_in_framework_module :
+ diag::warn_non_modular_include_in_module;
+ Diags.Report(FilenameLoc, DiagID) << RequestingModule->getFullModuleName();
+ }
+}
+
+static bool isBetterKnownHeader(const ModuleMap::KnownHeader &New,
+ const ModuleMap::KnownHeader &Old) {
+ // Prefer available modules.
+ if (New.getModule()->isAvailable() && !Old.getModule()->isAvailable())
+ return true;
+
+ // Prefer a public header over a private header.
+ if ((New.getRole() & ModuleMap::PrivateHeader) !=
+ (Old.getRole() & ModuleMap::PrivateHeader))
+ return !(New.getRole() & ModuleMap::PrivateHeader);
+
+ // Prefer a non-textual header over a textual header.
+ if ((New.getRole() & ModuleMap::TextualHeader) !=
+ (Old.getRole() & ModuleMap::TextualHeader))
+ return !(New.getRole() & ModuleMap::TextualHeader);
+
+ // Don't have a reason to choose between these. Just keep the first one.
+ return false;
+}
+
+ModuleMap::KnownHeader ModuleMap::findModuleForHeader(const FileEntry *File) {
+ auto MakeResult = [&](ModuleMap::KnownHeader R) -> ModuleMap::KnownHeader {
+ if (R.getRole() & ModuleMap::TextualHeader)
+ return ModuleMap::KnownHeader();
+ return R;
+ };
+
+ HeadersMap::iterator Known = findKnownHeader(File);
+ if (Known != Headers.end()) {
+ ModuleMap::KnownHeader Result;
+ // Iterate over all modules that 'File' is part of to find the best fit.
+ for (KnownHeader &H : Known->second) {
+ // Prefer a header from the current module over all others.
+ if (H.getModule()->getTopLevelModule() == CompilingModule)
+ return MakeResult(H);
+ if (!Result || isBetterKnownHeader(H, Result))
+ Result = H;
+ }
+ return MakeResult(Result);
+ }
+
+ return MakeResult(findOrCreateModuleForHeaderInUmbrellaDir(File));
+}
+
+ModuleMap::KnownHeader
+ModuleMap::findOrCreateModuleForHeaderInUmbrellaDir(const FileEntry *File) {
+ assert(!Headers.count(File) && "already have a module for this header");
+
+ SmallVector<const DirectoryEntry *, 2> SkippedDirs;
+ KnownHeader H = findHeaderInUmbrellaDirs(File, SkippedDirs);
+ if (H) {
+ Module *Result = H.getModule();
+
+ // Search up the module stack until we find a module with an umbrella
+ // directory.
+ Module *UmbrellaModule = Result;
+ while (!UmbrellaModule->getUmbrellaDir() && UmbrellaModule->Parent)
+ UmbrellaModule = UmbrellaModule->Parent;
+
+ if (UmbrellaModule->InferSubmodules) {
+ const FileEntry *UmbrellaModuleMap =
+ getModuleMapFileForUniquing(UmbrellaModule);
+
+ // Infer submodules for each of the directories we found between
+ // the directory of the umbrella header and the directory where
+ // the actual header is located.
+ bool Explicit = UmbrellaModule->InferExplicitSubmodules;
+
+ for (unsigned I = SkippedDirs.size(); I != 0; --I) {
+ // Find or create the module that corresponds to this directory name.
+ SmallString<32> NameBuf;
+ StringRef Name = sanitizeFilenameAsIdentifier(
+ llvm::sys::path::stem(SkippedDirs[I-1]->getName()), NameBuf);
+ Result = findOrCreateModule(Name, Result, /*IsFramework=*/false,
+ Explicit).first;
+ InferredModuleAllowedBy[Result] = UmbrellaModuleMap;
+ Result->IsInferred = true;
+
+ // Associate the module and the directory.
+ UmbrellaDirs[SkippedDirs[I-1]] = Result;
+
+ // If inferred submodules export everything they import, add a
+ // wildcard to the set of exports.
+ if (UmbrellaModule->InferExportWildcard && Result->Exports.empty())
+ Result->Exports.push_back(Module::ExportDecl(nullptr, true));
+ }
+
+ // Infer a submodule with the same name as this header file.
+ SmallString<32> NameBuf;
+ StringRef Name = sanitizeFilenameAsIdentifier(
+ llvm::sys::path::stem(File->getName()), NameBuf);
+ Result = findOrCreateModule(Name, Result, /*IsFramework=*/false,
+ Explicit).first;
+ InferredModuleAllowedBy[Result] = UmbrellaModuleMap;
+ Result->IsInferred = true;
+ Result->addTopHeader(File);
+
+ // If inferred submodules export everything they import, add a
+ // wildcard to the set of exports.
+ if (UmbrellaModule->InferExportWildcard && Result->Exports.empty())
+ Result->Exports.push_back(Module::ExportDecl(nullptr, true));
+ } else {
+ // Record each of the directories we stepped through as being part of
+ // the module we found, since the umbrella header covers them all.
+ for (unsigned I = 0, N = SkippedDirs.size(); I != N; ++I)
+ UmbrellaDirs[SkippedDirs[I]] = Result;
+ }
+
+ KnownHeader Header(Result, NormalHeader);
+ Headers[File].push_back(Header);
+ return Header;
+ }
+
+ return KnownHeader();
+}
+
+ArrayRef<ModuleMap::KnownHeader>
+ModuleMap::findAllModulesForHeader(const FileEntry *File) const {
+ auto It = Headers.find(File);
+ if (It == Headers.end())
+ return None;
+ return It->second;
+}
+
+bool ModuleMap::isHeaderInUnavailableModule(const FileEntry *Header) const {
+ return isHeaderUnavailableInModule(Header, nullptr);
+}
+
+bool
+ModuleMap::isHeaderUnavailableInModule(const FileEntry *Header,
+ const Module *RequestingModule) const {
+ HeadersMap::const_iterator Known = Headers.find(Header);
+ if (Known != Headers.end()) {
+ for (SmallVectorImpl<KnownHeader>::const_iterator
+ I = Known->second.begin(),
+ E = Known->second.end();
+ I != E; ++I) {
+ if (I->isAvailable() && (!RequestingModule ||
+ I->getModule()->isSubModuleOf(RequestingModule)))
+ return false;
+ }
+ return true;
+ }
+
+ const DirectoryEntry *Dir = Header->getDir();
+ SmallVector<const DirectoryEntry *, 2> SkippedDirs;
+ StringRef DirName = Dir->getName();
+
+ auto IsUnavailable = [&](const Module *M) {
+ return !M->isAvailable() && (!RequestingModule ||
+ M->isSubModuleOf(RequestingModule));
+ };
+
+ // Keep walking up the directory hierarchy, looking for a directory with
+ // an umbrella header.
+ do {
+ llvm::DenseMap<const DirectoryEntry *, Module *>::const_iterator KnownDir
+ = UmbrellaDirs.find(Dir);
+ if (KnownDir != UmbrellaDirs.end()) {
+ Module *Found = KnownDir->second;
+ if (IsUnavailable(Found))
+ return true;
+
+ // Search up the module stack until we find a module with an umbrella
+ // directory.
+ Module *UmbrellaModule = Found;
+ while (!UmbrellaModule->getUmbrellaDir() && UmbrellaModule->Parent)
+ UmbrellaModule = UmbrellaModule->Parent;
+
+ if (UmbrellaModule->InferSubmodules) {
+ for (unsigned I = SkippedDirs.size(); I != 0; --I) {
+ // Find or create the module that corresponds to this directory name.
+ SmallString<32> NameBuf;
+ StringRef Name = sanitizeFilenameAsIdentifier(
+ llvm::sys::path::stem(SkippedDirs[I-1]->getName()),
+ NameBuf);
+ Found = lookupModuleQualified(Name, Found);
+ if (!Found)
+ return false;
+ if (IsUnavailable(Found))
+ return true;
+ }
+
+ // Infer a submodule with the same name as this header file.
+ SmallString<32> NameBuf;
+ StringRef Name = sanitizeFilenameAsIdentifier(
+ llvm::sys::path::stem(Header->getName()),
+ NameBuf);
+ Found = lookupModuleQualified(Name, Found);
+ if (!Found)
+ return false;
+ }
+
+ return IsUnavailable(Found);
+ }
+
+ SkippedDirs.push_back(Dir);
+
+ // Retrieve our parent path.
+ DirName = llvm::sys::path::parent_path(DirName);
+ if (DirName.empty())
+ break;
+
+ // Resolve the parent path to a directory entry.
+ Dir = SourceMgr.getFileManager().getDirectory(DirName);
+ } while (Dir);
+
+ return false;
+}
+
+Module *ModuleMap::findModule(StringRef Name) const {
+ llvm::StringMap<Module *>::const_iterator Known = Modules.find(Name);
+ if (Known != Modules.end())
+ return Known->getValue();
+
+ return nullptr;
+}
+
+Module *ModuleMap::lookupModuleUnqualified(StringRef Name,
+ Module *Context) const {
+ for(; Context; Context = Context->Parent) {
+ if (Module *Sub = lookupModuleQualified(Name, Context))
+ return Sub;
+ }
+
+ return findModule(Name);
+}
+
+Module *ModuleMap::lookupModuleQualified(StringRef Name, Module *Context) const{
+ if (!Context)
+ return findModule(Name);
+
+ return Context->findSubmodule(Name);
+}
+
+std::pair<Module *, bool>
+ModuleMap::findOrCreateModule(StringRef Name, Module *Parent, bool IsFramework,
+ bool IsExplicit) {
+ // Try to find an existing module with this name.
+ if (Module *Sub = lookupModuleQualified(Name, Parent))
+ return std::make_pair(Sub, false);
+
+ // Create a new module with this name.
+ Module *Result = new Module(Name, SourceLocation(), Parent,
+ IsFramework, IsExplicit, NumCreatedModules++);
+ if (LangOpts.CurrentModule == Name) {
+ SourceModule = Result;
+ SourceModuleName = Name;
+ }
+ if (!Parent) {
+ Modules[Name] = Result;
+ if (!LangOpts.CurrentModule.empty() && !CompilingModule &&
+ Name == LangOpts.CurrentModule) {
+ CompilingModule = Result;
+ }
+ }
+ return std::make_pair(Result, true);
+}
+
+/// \brief For a framework module, infer the framework against which we
+/// should link.
+static void inferFrameworkLink(Module *Mod, const DirectoryEntry *FrameworkDir,
+ FileManager &FileMgr) {
+ assert(Mod->IsFramework && "Can only infer linking for framework modules");
+ assert(!Mod->isSubFramework() &&
+ "Can only infer linking for top-level frameworks");
+
+ SmallString<128> LibName;
+ LibName += FrameworkDir->getName();
+ llvm::sys::path::append(LibName, Mod->Name);
+
+ // The library name of a framework has more than one possible extension since
+ // the introduction of the text-based dynamic library format. We need to check
+ // for both before we give up.
+ static const char *frameworkExtensions[] = {"", ".tbd"};
+ for (const auto *extension : frameworkExtensions) {
+ llvm::sys::path::replace_extension(LibName, extension);
+ if (FileMgr.getFile(LibName)) {
+ Mod->LinkLibraries.push_back(Module::LinkLibrary(Mod->Name,
+ /*IsFramework=*/true));
+ return;
+ }
+ }
+}
+
+Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
+ bool IsSystem, Module *Parent) {
+ Attributes Attrs;
+ Attrs.IsSystem = IsSystem;
+ return inferFrameworkModule(FrameworkDir, Attrs, Parent);
+}
+
+Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
+ Attributes Attrs, Module *Parent) {
+ // Note: as an egregious but useful hack we use the real path here, because
+ // we might be looking at an embedded framework that symlinks out to a
+ // top-level framework, and we need to infer as if we were naming the
+ // top-level framework.
+ StringRef FrameworkDirName =
+ SourceMgr.getFileManager().getCanonicalName(FrameworkDir);
+
+ // In case this is a case-insensitive filesystem, use the canonical
+ // directory name as the ModuleName, since modules are case-sensitive.
+ // FIXME: we should be able to give a fix-it hint for the correct spelling.
+ SmallString<32> ModuleNameStorage;
+ StringRef ModuleName = sanitizeFilenameAsIdentifier(
+ llvm::sys::path::stem(FrameworkDirName), ModuleNameStorage);
+
+ // Check whether we've already found this module.
+ if (Module *Mod = lookupModuleQualified(ModuleName, Parent))
+ return Mod;
+
+ FileManager &FileMgr = SourceMgr.getFileManager();
+
+ // If the framework has a parent path from which we're allowed to infer
+ // a framework module, do so.
+ const FileEntry *ModuleMapFile = nullptr;
+ if (!Parent) {
+ // Determine whether we're allowed to infer a module map.
+ bool canInfer = false;
+ if (llvm::sys::path::has_parent_path(FrameworkDirName)) {
+ // Figure out the parent path.
+ StringRef Parent = llvm::sys::path::parent_path(FrameworkDirName);
+ if (const DirectoryEntry *ParentDir = FileMgr.getDirectory(Parent)) {
+ // Check whether we have already looked into the parent directory
+ // for a module map.
+ llvm::DenseMap<const DirectoryEntry *, InferredDirectory>::const_iterator
+ inferred = InferredDirectories.find(ParentDir);
+ if (inferred == InferredDirectories.end()) {
+ // We haven't looked here before. Load a module map, if there is
+ // one.
+ bool IsFrameworkDir = Parent.endswith(".framework");
+ if (const FileEntry *ModMapFile =
+ HeaderInfo.lookupModuleMapFile(ParentDir, IsFrameworkDir)) {
+ parseModuleMapFile(ModMapFile, Attrs.IsSystem, ParentDir);
+ inferred = InferredDirectories.find(ParentDir);
+ }
+
+ if (inferred == InferredDirectories.end())
+ inferred = InferredDirectories.insert(
+ std::make_pair(ParentDir, InferredDirectory())).first;
+ }
+
+ if (inferred->second.InferModules) {
+ // We're allowed to infer for this directory, but make sure it's okay
+ // to infer this particular module.
+ StringRef Name = llvm::sys::path::stem(FrameworkDirName);
+ canInfer = std::find(inferred->second.ExcludedModules.begin(),
+ inferred->second.ExcludedModules.end(),
+ Name) == inferred->second.ExcludedModules.end();
+
+ Attrs.IsSystem |= inferred->second.Attrs.IsSystem;
+ Attrs.IsExternC |= inferred->second.Attrs.IsExternC;
+ Attrs.IsExhaustive |= inferred->second.Attrs.IsExhaustive;
+ ModuleMapFile = inferred->second.ModuleMapFile;
+ }
+ }
+ }
+
+ // If we're not allowed to infer a framework module, don't.
+ if (!canInfer)
+ return nullptr;
+ } else
+ ModuleMapFile = getModuleMapFileForUniquing(Parent);
+
+
+ // Look for an umbrella header.
+ SmallString<128> UmbrellaName = StringRef(FrameworkDir->getName());
+ llvm::sys::path::append(UmbrellaName, "Headers", ModuleName + ".h");
+ const FileEntry *UmbrellaHeader = FileMgr.getFile(UmbrellaName);
+
+ // FIXME: If there's no umbrella header, we could probably scan the
+ // framework to load *everything*. But, it's not clear that this is a good
+ // idea.
+ if (!UmbrellaHeader)
+ return nullptr;
+
+ Module *Result = new Module(ModuleName, SourceLocation(), Parent,
+ /*IsFramework=*/true, /*IsExplicit=*/false,
+ NumCreatedModules++);
+ InferredModuleAllowedBy[Result] = ModuleMapFile;
+ Result->IsInferred = true;
+ if (LangOpts.CurrentModule == ModuleName) {
+ SourceModule = Result;
+ SourceModuleName = ModuleName;
+ }
+
+ Result->IsSystem |= Attrs.IsSystem;
+ Result->IsExternC |= Attrs.IsExternC;
+ Result->ConfigMacrosExhaustive |= Attrs.IsExhaustive;
+ Result->Directory = FrameworkDir;
+
+ if (!Parent)
+ Modules[ModuleName] = Result;
+
+ // umbrella header "umbrella-header-name"
+ //
+ // The "Headers/" component of the name is implied because this is
+ // a framework module.
+ setUmbrellaHeader(Result, UmbrellaHeader, ModuleName + ".h");
+
+ // export *
+ Result->Exports.push_back(Module::ExportDecl(nullptr, true));
+
+ // module * { export * }
+ Result->InferSubmodules = true;
+ Result->InferExportWildcard = true;
+
+ // Look for subframeworks.
+ std::error_code EC;
+ SmallString<128> SubframeworksDirName
+ = StringRef(FrameworkDir->getName());
+ llvm::sys::path::append(SubframeworksDirName, "Frameworks");
+ llvm::sys::path::native(SubframeworksDirName);
+ for (llvm::sys::fs::directory_iterator Dir(SubframeworksDirName, EC), DirEnd;
+ Dir != DirEnd && !EC; Dir.increment(EC)) {
+ if (!StringRef(Dir->path()).endswith(".framework"))
+ continue;
+
+ if (const DirectoryEntry *SubframeworkDir
+ = FileMgr.getDirectory(Dir->path())) {
+ // Note: as an egregious but useful hack, we use the real path here and
+ // check whether it is actually a subdirectory of the parent directory.
+ // This will not be the case if the 'subframework' is actually a symlink
+ // out to a top-level framework.
+ StringRef SubframeworkDirName = FileMgr.getCanonicalName(SubframeworkDir);
+ bool FoundParent = false;
+ do {
+ // Get the parent directory name.
+ SubframeworkDirName
+ = llvm::sys::path::parent_path(SubframeworkDirName);
+ if (SubframeworkDirName.empty())
+ break;
+
+ if (FileMgr.getDirectory(SubframeworkDirName) == FrameworkDir) {
+ FoundParent = true;
+ break;
+ }
+ } while (true);
+
+ if (!FoundParent)
+ continue;
+
+ // FIXME: Do we want to warn about subframeworks without umbrella headers?
+ inferFrameworkModule(SubframeworkDir, Attrs, Result);
+ }
+ }
+
+ // If the module is a top-level framework, automatically link against the
+ // framework.
+ if (!Result->isSubFramework()) {
+ inferFrameworkLink(Result, FrameworkDir, FileMgr);
+ }
+
+ return Result;
+}
+
+void ModuleMap::setUmbrellaHeader(Module *Mod, const FileEntry *UmbrellaHeader,
+ Twine NameAsWritten) {
+ Headers[UmbrellaHeader].push_back(KnownHeader(Mod, NormalHeader));
+ Mod->Umbrella = UmbrellaHeader;
+ Mod->UmbrellaAsWritten = NameAsWritten.str();
+ UmbrellaDirs[UmbrellaHeader->getDir()] = Mod;
+}
+
+void ModuleMap::setUmbrellaDir(Module *Mod, const DirectoryEntry *UmbrellaDir,
+ Twine NameAsWritten) {
+ Mod->Umbrella = UmbrellaDir;
+ Mod->UmbrellaAsWritten = NameAsWritten.str();
+ UmbrellaDirs[UmbrellaDir] = Mod;
+}
+
+static Module::HeaderKind headerRoleToKind(ModuleMap::ModuleHeaderRole Role) {
+ switch ((int)Role) {
+ default: llvm_unreachable("unknown header role");
+ case ModuleMap::NormalHeader:
+ return Module::HK_Normal;
+ case ModuleMap::PrivateHeader:
+ return Module::HK_Private;
+ case ModuleMap::TextualHeader:
+ return Module::HK_Textual;
+ case ModuleMap::PrivateHeader | ModuleMap::TextualHeader:
+ return Module::HK_PrivateTextual;
+ }
+}
+
+void ModuleMap::addHeader(Module *Mod, Module::Header Header,
+ ModuleHeaderRole Role, bool Imported) {
+ KnownHeader KH(Mod, Role);
+
+ // Only add each header to the headers list once.
+ // FIXME: Should we diagnose if a header is listed twice in the
+ // same module definition?
+ auto &HeaderList = Headers[Header.Entry];
+ for (auto H : HeaderList)
+ if (H == KH)
+ return;
+
+ HeaderList.push_back(KH);
+ Mod->Headers[headerRoleToKind(Role)].push_back(std::move(Header));
+
+ bool isCompilingModuleHeader = Mod->getTopLevelModule() == CompilingModule;
+ if (!Imported || isCompilingModuleHeader) {
+ // When we import HeaderFileInfo, the external source is expected to
+ // set the isModuleHeader flag itself.
+ HeaderInfo.MarkFileModuleHeader(Header.Entry, Role,
+ isCompilingModuleHeader);
+ }
+}
+
+void ModuleMap::excludeHeader(Module *Mod, Module::Header Header) {
+ // Add this as a known header so we won't implicitly add it to any
+ // umbrella directory module.
+ // FIXME: Should we only exclude it from umbrella modules within the
+ // specified module?
+ (void) Headers[Header.Entry];
+
+ Mod->Headers[Module::HK_Excluded].push_back(std::move(Header));
+}
+
+const FileEntry *
+ModuleMap::getContainingModuleMapFile(const Module *Module) const {
+ if (Module->DefinitionLoc.isInvalid())
+ return nullptr;
+
+ return SourceMgr.getFileEntryForID(
+ SourceMgr.getFileID(Module->DefinitionLoc));
+}
+
+const FileEntry *ModuleMap::getModuleMapFileForUniquing(const Module *M) const {
+ if (M->IsInferred) {
+ assert(InferredModuleAllowedBy.count(M) && "missing inferred module map");
+ return InferredModuleAllowedBy.find(M)->second;
+ }
+ return getContainingModuleMapFile(M);
+}
+
+void ModuleMap::setInferredModuleAllowedBy(Module *M, const FileEntry *ModMap) {
+ assert(M->IsInferred && "module not inferred");
+ InferredModuleAllowedBy[M] = ModMap;
+}
+
+void ModuleMap::dump() {
+ llvm::errs() << "Modules:";
+ for (llvm::StringMap<Module *>::iterator M = Modules.begin(),
+ MEnd = Modules.end();
+ M != MEnd; ++M)
+ M->getValue()->print(llvm::errs(), 2);
+
+ llvm::errs() << "Headers:";
+ for (HeadersMap::iterator H = Headers.begin(), HEnd = Headers.end();
+ H != HEnd; ++H) {
+ llvm::errs() << " \"" << H->first->getName() << "\" -> ";
+ for (SmallVectorImpl<KnownHeader>::const_iterator I = H->second.begin(),
+ E = H->second.end();
+ I != E; ++I) {
+ if (I != H->second.begin())
+ llvm::errs() << ",";
+ llvm::errs() << I->getModule()->getFullModuleName();
+ }
+ llvm::errs() << "\n";
+ }
+}
+
+bool ModuleMap::resolveExports(Module *Mod, bool Complain) {
+ auto Unresolved = std::move(Mod->UnresolvedExports);
+ Mod->UnresolvedExports.clear();
+ for (auto &UE : Unresolved) {
+ Module::ExportDecl Export = resolveExport(Mod, UE, Complain);
+ if (Export.getPointer() || Export.getInt())
+ Mod->Exports.push_back(Export);
+ else
+ Mod->UnresolvedExports.push_back(UE);
+ }
+ return !Mod->UnresolvedExports.empty();
+}
+
+bool ModuleMap::resolveUses(Module *Mod, bool Complain) {
+ auto Unresolved = std::move(Mod->UnresolvedDirectUses);
+ Mod->UnresolvedDirectUses.clear();
+ for (auto &UDU : Unresolved) {
+ Module *DirectUse = resolveModuleId(UDU, Mod, Complain);
+ if (DirectUse)
+ Mod->DirectUses.push_back(DirectUse);
+ else
+ Mod->UnresolvedDirectUses.push_back(UDU);
+ }
+ return !Mod->UnresolvedDirectUses.empty();
+}
+
+bool ModuleMap::resolveConflicts(Module *Mod, bool Complain) {
+ auto Unresolved = std::move(Mod->UnresolvedConflicts);
+ Mod->UnresolvedConflicts.clear();
+ for (auto &UC : Unresolved) {
+ if (Module *OtherMod = resolveModuleId(UC.Id, Mod, Complain)) {
+ Module::Conflict Conflict;
+ Conflict.Other = OtherMod;
+ Conflict.Message = UC.Message;
+ Mod->Conflicts.push_back(Conflict);
+ } else
+ Mod->UnresolvedConflicts.push_back(UC);
+ }
+ return !Mod->UnresolvedConflicts.empty();
+}
+
+Module *ModuleMap::inferModuleFromLocation(FullSourceLoc Loc) {
+ if (Loc.isInvalid())
+ return nullptr;
+
+ // Use the expansion location to determine which module we're in.
+ FullSourceLoc ExpansionLoc = Loc.getExpansionLoc();
+ if (!ExpansionLoc.isFileID())
+ return nullptr;
+
+ const SourceManager &SrcMgr = Loc.getManager();
+ FileID ExpansionFileID = ExpansionLoc.getFileID();
+
+ while (const FileEntry *ExpansionFile
+ = SrcMgr.getFileEntryForID(ExpansionFileID)) {
+ // Find the module that owns this header (if any).
+ if (Module *Mod = findModuleForHeader(ExpansionFile).getModule())
+ return Mod;
+
+ // No module owns this header, so look up the inclusion chain to see if
+ // any included header has an associated module.
+ SourceLocation IncludeLoc = SrcMgr.getIncludeLoc(ExpansionFileID);
+ if (IncludeLoc.isInvalid())
+ return nullptr;
+
+ ExpansionFileID = SrcMgr.getFileID(IncludeLoc);
+ }
+
+ return nullptr;
+}
+
+//----------------------------------------------------------------------------//
+// Module map file parser
+//----------------------------------------------------------------------------//
+
+namespace clang {
+ /// \brief A token in a module map file.
+ struct MMToken {
+ enum TokenKind {
+ Comma,
+ ConfigMacros,
+ Conflict,
+ EndOfFile,
+ HeaderKeyword,
+ Identifier,
+ Exclaim,
+ ExcludeKeyword,
+ ExplicitKeyword,
+ ExportKeyword,
+ ExternKeyword,
+ FrameworkKeyword,
+ LinkKeyword,
+ ModuleKeyword,
+ Period,
+ PrivateKeyword,
+ UmbrellaKeyword,
+ UseKeyword,
+ RequiresKeyword,
+ Star,
+ StringLiteral,
+ TextualKeyword,
+ LBrace,
+ RBrace,
+ LSquare,
+ RSquare
+ } Kind;
+
+ unsigned Location;
+ unsigned StringLength;
+ const char *StringData;
+
+ void clear() {
+ Kind = EndOfFile;
+ Location = 0;
+ StringLength = 0;
+ StringData = nullptr;
+ }
+
+ bool is(TokenKind K) const { return Kind == K; }
+
+ SourceLocation getLocation() const {
+ return SourceLocation::getFromRawEncoding(Location);
+ }
+
+ StringRef getString() const {
+ return StringRef(StringData, StringLength);
+ }
+ };
+
+ class ModuleMapParser {
+ Lexer &L;
+ SourceManager &SourceMgr;
+
+ /// \brief Default target information, used only for string literal
+ /// parsing.
+ const TargetInfo *Target;
+
+ DiagnosticsEngine &Diags;
+ ModuleMap &Map;
+
+ /// \brief The current module map file.
+ const FileEntry *ModuleMapFile;
+
+ /// \brief The directory that file names in this module map file should
+ /// be resolved relative to.
+ const DirectoryEntry *Directory;
+
+ /// \brief The directory containing Clang-supplied headers.
+ const DirectoryEntry *BuiltinIncludeDir;
+
+ /// \brief Whether this module map is in a system header directory.
+ bool IsSystem;
+
+ /// \brief Whether an error occurred.
+ bool HadError;
+
+ /// \brief Stores string data for the various string literals referenced
+ /// during parsing.
+ llvm::BumpPtrAllocator StringData;
+
+ /// \brief The current token.
+ MMToken Tok;
+
+ /// \brief The active module.
+ Module *ActiveModule;
+
+ /// \brief Whether a module uses the 'requires excluded' hack to mark its
+ /// contents as 'textual'.
+ ///
+ /// On older Darwin SDK versions, 'requires excluded' is used to mark the
+ /// contents of the Darwin.C.excluded (assert.h) and Tcl.Private modules as
+ /// non-modular headers. For backwards compatibility, we continue to
+ /// support this idiom for just these modules, and map the headers to
+ /// 'textual' to match the original intent.
+ llvm::SmallPtrSet<Module *, 2> UsesRequiresExcludedHack;
+
+ /// \brief Consume the current token and return its location.
+ SourceLocation consumeToken();
+
+ /// \brief Skip tokens until we reach the a token with the given kind
+ /// (or the end of the file).
+ void skipUntil(MMToken::TokenKind K);
+
+ typedef SmallVector<std::pair<std::string, SourceLocation>, 2> ModuleId;
+ bool parseModuleId(ModuleId &Id);
+ void parseModuleDecl();
+ void parseExternModuleDecl();
+ void parseRequiresDecl();
+ void parseHeaderDecl(clang::MMToken::TokenKind,
+ SourceLocation LeadingLoc);
+ void parseUmbrellaDirDecl(SourceLocation UmbrellaLoc);
+ void parseExportDecl();
+ void parseUseDecl();
+ void parseLinkDecl();
+ void parseConfigMacros();
+ void parseConflict();
+ void parseInferredModuleDecl(bool Framework, bool Explicit);
+
+ typedef ModuleMap::Attributes Attributes;
+ bool parseOptionalAttributes(Attributes &Attrs);
+
+ public:
+ explicit ModuleMapParser(Lexer &L, SourceManager &SourceMgr,
+ const TargetInfo *Target,
+ DiagnosticsEngine &Diags,
+ ModuleMap &Map,
+ const FileEntry *ModuleMapFile,
+ const DirectoryEntry *Directory,
+ const DirectoryEntry *BuiltinIncludeDir,
+ bool IsSystem)
+ : L(L), SourceMgr(SourceMgr), Target(Target), Diags(Diags), Map(Map),
+ ModuleMapFile(ModuleMapFile), Directory(Directory),
+ BuiltinIncludeDir(BuiltinIncludeDir), IsSystem(IsSystem),
+ HadError(false), ActiveModule(nullptr)
+ {
+ Tok.clear();
+ consumeToken();
+ }
+
+ bool parseModuleMapFile();
+ };
+}
+
+SourceLocation ModuleMapParser::consumeToken() {
+retry:
+ SourceLocation Result = Tok.getLocation();
+ Tok.clear();
+
+ Token LToken;
+ L.LexFromRawLexer(LToken);
+ Tok.Location = LToken.getLocation().getRawEncoding();
+ switch (LToken.getKind()) {
+ case tok::raw_identifier: {
+ StringRef RI = LToken.getRawIdentifier();
+ Tok.StringData = RI.data();
+ Tok.StringLength = RI.size();
+ Tok.Kind = llvm::StringSwitch<MMToken::TokenKind>(RI)
+ .Case("config_macros", MMToken::ConfigMacros)
+ .Case("conflict", MMToken::Conflict)
+ .Case("exclude", MMToken::ExcludeKeyword)
+ .Case("explicit", MMToken::ExplicitKeyword)
+ .Case("export", MMToken::ExportKeyword)
+ .Case("extern", MMToken::ExternKeyword)
+ .Case("framework", MMToken::FrameworkKeyword)
+ .Case("header", MMToken::HeaderKeyword)
+ .Case("link", MMToken::LinkKeyword)
+ .Case("module", MMToken::ModuleKeyword)
+ .Case("private", MMToken::PrivateKeyword)
+ .Case("requires", MMToken::RequiresKeyword)
+ .Case("textual", MMToken::TextualKeyword)
+ .Case("umbrella", MMToken::UmbrellaKeyword)
+ .Case("use", MMToken::UseKeyword)
+ .Default(MMToken::Identifier);
+ break;
+ }
+
+ case tok::comma:
+ Tok.Kind = MMToken::Comma;
+ break;
+
+ case tok::eof:
+ Tok.Kind = MMToken::EndOfFile;
+ break;
+
+ case tok::l_brace:
+ Tok.Kind = MMToken::LBrace;
+ break;
+
+ case tok::l_square:
+ Tok.Kind = MMToken::LSquare;
+ break;
+
+ case tok::period:
+ Tok.Kind = MMToken::Period;
+ break;
+
+ case tok::r_brace:
+ Tok.Kind = MMToken::RBrace;
+ break;
+
+ case tok::r_square:
+ Tok.Kind = MMToken::RSquare;
+ break;
+
+ case tok::star:
+ Tok.Kind = MMToken::Star;
+ break;
+
+ case tok::exclaim:
+ Tok.Kind = MMToken::Exclaim;
+ break;
+
+ case tok::string_literal: {
+ if (LToken.hasUDSuffix()) {
+ Diags.Report(LToken.getLocation(), diag::err_invalid_string_udl);
+ HadError = true;
+ goto retry;
+ }
+
+ // Parse the string literal.
+ LangOptions LangOpts;
+ StringLiteralParser StringLiteral(LToken, SourceMgr, LangOpts, *Target);
+ if (StringLiteral.hadError)
+ goto retry;
+
+ // Copy the string literal into our string data allocator.
+ unsigned Length = StringLiteral.GetStringLength();
+ char *Saved = StringData.Allocate<char>(Length + 1);
+ memcpy(Saved, StringLiteral.GetString().data(), Length);
+ Saved[Length] = 0;
+
+ // Form the token.
+ Tok.Kind = MMToken::StringLiteral;
+ Tok.StringData = Saved;
+ Tok.StringLength = Length;
+ break;
+ }
+
+ case tok::comment:
+ goto retry;
+
+ default:
+ Diags.Report(LToken.getLocation(), diag::err_mmap_unknown_token);
+ HadError = true;
+ goto retry;
+ }
+
+ return Result;
+}
+
+void ModuleMapParser::skipUntil(MMToken::TokenKind K) {
+ unsigned braceDepth = 0;
+ unsigned squareDepth = 0;
+ do {
+ switch (Tok.Kind) {
+ case MMToken::EndOfFile:
+ return;
+
+ case MMToken::LBrace:
+ if (Tok.is(K) && braceDepth == 0 && squareDepth == 0)
+ return;
+
+ ++braceDepth;
+ break;
+
+ case MMToken::LSquare:
+ if (Tok.is(K) && braceDepth == 0 && squareDepth == 0)
+ return;
+
+ ++squareDepth;
+ break;
+
+ case MMToken::RBrace:
+ if (braceDepth > 0)
+ --braceDepth;
+ else if (Tok.is(K))
+ return;
+ break;
+
+ case MMToken::RSquare:
+ if (squareDepth > 0)
+ --squareDepth;
+ else if (Tok.is(K))
+ return;
+ break;
+
+ default:
+ if (braceDepth == 0 && squareDepth == 0 && Tok.is(K))
+ return;
+ break;
+ }
+
+ consumeToken();
+ } while (true);
+}
+
+/// \brief Parse a module-id.
+///
+/// module-id:
+/// identifier
+/// identifier '.' module-id
+///
+/// \returns true if an error occurred, false otherwise.
+bool ModuleMapParser::parseModuleId(ModuleId &Id) {
+ Id.clear();
+ do {
+ if (Tok.is(MMToken::Identifier) || Tok.is(MMToken::StringLiteral)) {
+ Id.push_back(std::make_pair(Tok.getString(), Tok.getLocation()));
+ consumeToken();
+ } else {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_module_name);
+ return true;
+ }
+
+ if (!Tok.is(MMToken::Period))
+ break;
+
+ consumeToken();
+ } while (true);
+
+ return false;
+}
+
+namespace {
+ /// \brief Enumerates the known attributes.
+ enum AttributeKind {
+ /// \brief An unknown attribute.
+ AT_unknown,
+ /// \brief The 'system' attribute.
+ AT_system,
+ /// \brief The 'extern_c' attribute.
+ AT_extern_c,
+ /// \brief The 'exhaustive' attribute.
+ AT_exhaustive
+ };
+}
+
+/// \brief Parse a module declaration.
+///
+/// module-declaration:
+/// 'extern' 'module' module-id string-literal
+/// 'explicit'[opt] 'framework'[opt] 'module' module-id attributes[opt]
+/// { module-member* }
+///
+/// module-member:
+/// requires-declaration
+/// header-declaration
+/// submodule-declaration
+/// export-declaration
+/// link-declaration
+///
+/// submodule-declaration:
+/// module-declaration
+/// inferred-submodule-declaration
+void ModuleMapParser::parseModuleDecl() {
+ assert(Tok.is(MMToken::ExplicitKeyword) || Tok.is(MMToken::ModuleKeyword) ||
+ Tok.is(MMToken::FrameworkKeyword) || Tok.is(MMToken::ExternKeyword));
+ if (Tok.is(MMToken::ExternKeyword)) {
+ parseExternModuleDecl();
+ return;
+ }
+
+ // Parse 'explicit' or 'framework' keyword, if present.
+ SourceLocation ExplicitLoc;
+ bool Explicit = false;
+ bool Framework = false;
+
+ // Parse 'explicit' keyword, if present.
+ if (Tok.is(MMToken::ExplicitKeyword)) {
+ ExplicitLoc = consumeToken();
+ Explicit = true;
+ }
+
+ // Parse 'framework' keyword, if present.
+ if (Tok.is(MMToken::FrameworkKeyword)) {
+ consumeToken();
+ Framework = true;
+ }
+
+ // Parse 'module' keyword.
+ if (!Tok.is(MMToken::ModuleKeyword)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_module);
+ consumeToken();
+ HadError = true;
+ return;
+ }
+ consumeToken(); // 'module' keyword
+
+ // If we have a wildcard for the module name, this is an inferred submodule.
+ // Parse it.
+ if (Tok.is(MMToken::Star))
+ return parseInferredModuleDecl(Framework, Explicit);
+
+ // Parse the module name.
+ ModuleId Id;
+ if (parseModuleId(Id)) {
+ HadError = true;
+ return;
+ }
+
+ if (ActiveModule) {
+ if (Id.size() > 1) {
+ Diags.Report(Id.front().second, diag::err_mmap_nested_submodule_id)
+ << SourceRange(Id.front().second, Id.back().second);
+
+ HadError = true;
+ return;
+ }
+ } else if (Id.size() == 1 && Explicit) {
+ // Top-level modules can't be explicit.
+ Diags.Report(ExplicitLoc, diag::err_mmap_explicit_top_level);
+ Explicit = false;
+ ExplicitLoc = SourceLocation();
+ HadError = true;
+ }
+
+ Module *PreviousActiveModule = ActiveModule;
+ if (Id.size() > 1) {
+ // This module map defines a submodule. Go find the module of which it
+ // is a submodule.
+ ActiveModule = nullptr;
+ const Module *TopLevelModule = nullptr;
+ for (unsigned I = 0, N = Id.size() - 1; I != N; ++I) {
+ if (Module *Next = Map.lookupModuleQualified(Id[I].first, ActiveModule)) {
+ if (I == 0)
+ TopLevelModule = Next;
+ ActiveModule = Next;
+ continue;
+ }
+
+ if (ActiveModule) {
+ Diags.Report(Id[I].second, diag::err_mmap_missing_module_qualified)
+ << Id[I].first
+ << ActiveModule->getTopLevelModule()->getFullModuleName();
+ } else {
+ Diags.Report(Id[I].second, diag::err_mmap_expected_module_name);
+ }
+ HadError = true;
+ return;
+ }
+
+ if (ModuleMapFile != Map.getContainingModuleMapFile(TopLevelModule)) {
+ assert(ModuleMapFile != Map.getModuleMapFileForUniquing(TopLevelModule) &&
+ "submodule defined in same file as 'module *' that allowed its "
+ "top-level module");
+ Map.addAdditionalModuleMapFile(TopLevelModule, ModuleMapFile);
+ }
+ }
+
+ StringRef ModuleName = Id.back().first;
+ SourceLocation ModuleNameLoc = Id.back().second;
+
+ // Parse the optional attribute list.
+ Attributes Attrs;
+ parseOptionalAttributes(Attrs);
+
+ // Parse the opening brace.
+ if (!Tok.is(MMToken::LBrace)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_lbrace)
+ << ModuleName;
+ HadError = true;
+ return;
+ }
+ SourceLocation LBraceLoc = consumeToken();
+
+ // Determine whether this (sub)module has already been defined.
+ if (Module *Existing = Map.lookupModuleQualified(ModuleName, ActiveModule)) {
+ if (Existing->DefinitionLoc.isInvalid() && !ActiveModule) {
+ // Skip the module definition.
+ skipUntil(MMToken::RBrace);
+ if (Tok.is(MMToken::RBrace))
+ consumeToken();
+ else {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_rbrace);
+ Diags.Report(LBraceLoc, diag::note_mmap_lbrace_match);
+ HadError = true;
+ }
+ return;
+ }
+
+ Diags.Report(ModuleNameLoc, diag::err_mmap_module_redefinition)
+ << ModuleName;
+ Diags.Report(Existing->DefinitionLoc, diag::note_mmap_prev_definition);
+
+ // Skip the module definition.
+ skipUntil(MMToken::RBrace);
+ if (Tok.is(MMToken::RBrace))
+ consumeToken();
+
+ HadError = true;
+ return;
+ }
+
+ // Start defining this module.
+ ActiveModule = Map.findOrCreateModule(ModuleName, ActiveModule, Framework,
+ Explicit).first;
+ ActiveModule->DefinitionLoc = ModuleNameLoc;
+ if (Attrs.IsSystem || IsSystem)
+ ActiveModule->IsSystem = true;
+ if (Attrs.IsExternC)
+ ActiveModule->IsExternC = true;
+ ActiveModule->Directory = Directory;
+
+ bool Done = false;
+ do {
+ switch (Tok.Kind) {
+ case MMToken::EndOfFile:
+ case MMToken::RBrace:
+ Done = true;
+ break;
+
+ case MMToken::ConfigMacros:
+ parseConfigMacros();
+ break;
+
+ case MMToken::Conflict:
+ parseConflict();
+ break;
+
+ case MMToken::ExplicitKeyword:
+ case MMToken::ExternKeyword:
+ case MMToken::FrameworkKeyword:
+ case MMToken::ModuleKeyword:
+ parseModuleDecl();
+ break;
+
+ case MMToken::ExportKeyword:
+ parseExportDecl();
+ break;
+
+ case MMToken::UseKeyword:
+ parseUseDecl();
+ break;
+
+ case MMToken::RequiresKeyword:
+ parseRequiresDecl();
+ break;
+
+ case MMToken::TextualKeyword:
+ parseHeaderDecl(MMToken::TextualKeyword, consumeToken());
+ break;
+
+ case MMToken::UmbrellaKeyword: {
+ SourceLocation UmbrellaLoc = consumeToken();
+ if (Tok.is(MMToken::HeaderKeyword))
+ parseHeaderDecl(MMToken::UmbrellaKeyword, UmbrellaLoc);
+ else
+ parseUmbrellaDirDecl(UmbrellaLoc);
+ break;
+ }
+
+ case MMToken::ExcludeKeyword:
+ parseHeaderDecl(MMToken::ExcludeKeyword, consumeToken());
+ break;
+
+ case MMToken::PrivateKeyword:
+ parseHeaderDecl(MMToken::PrivateKeyword, consumeToken());
+ break;
+
+ case MMToken::HeaderKeyword:
+ parseHeaderDecl(MMToken::HeaderKeyword, consumeToken());
+ break;
+
+ case MMToken::LinkKeyword:
+ parseLinkDecl();
+ break;
+
+ default:
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_member);
+ consumeToken();
+ break;
+ }
+ } while (!Done);
+
+ if (Tok.is(MMToken::RBrace))
+ consumeToken();
+ else {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_rbrace);
+ Diags.Report(LBraceLoc, diag::note_mmap_lbrace_match);
+ HadError = true;
+ }
+
+ // If the active module is a top-level framework, and there are no link
+ // libraries, automatically link against the framework.
+ if (ActiveModule->IsFramework && !ActiveModule->isSubFramework() &&
+ ActiveModule->LinkLibraries.empty()) {
+ inferFrameworkLink(ActiveModule, Directory, SourceMgr.getFileManager());
+ }
+
+ // If the module meets all requirements but is still unavailable, mark the
+ // whole tree as unavailable to prevent it from building.
+ if (!ActiveModule->IsAvailable && !ActiveModule->IsMissingRequirement &&
+ ActiveModule->Parent) {
+ ActiveModule->getTopLevelModule()->markUnavailable();
+ ActiveModule->getTopLevelModule()->MissingHeaders.append(
+ ActiveModule->MissingHeaders.begin(), ActiveModule->MissingHeaders.end());
+ }
+
+ // We're done parsing this module. Pop back to the previous module.
+ ActiveModule = PreviousActiveModule;
+}
+
+/// \brief Parse an extern module declaration.
+///
+/// extern module-declaration:
+/// 'extern' 'module' module-id string-literal
+void ModuleMapParser::parseExternModuleDecl() {
+ assert(Tok.is(MMToken::ExternKeyword));
+ SourceLocation ExternLoc = consumeToken(); // 'extern' keyword
+
+ // Parse 'module' keyword.
+ if (!Tok.is(MMToken::ModuleKeyword)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_module);
+ consumeToken();
+ HadError = true;
+ return;
+ }
+ consumeToken(); // 'module' keyword
+
+ // Parse the module name.
+ ModuleId Id;
+ if (parseModuleId(Id)) {
+ HadError = true;
+ return;
+ }
+
+ // Parse the referenced module map file name.
+ if (!Tok.is(MMToken::StringLiteral)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_mmap_file);
+ HadError = true;
+ return;
+ }
+ std::string FileName = Tok.getString();
+ consumeToken(); // filename
+
+ StringRef FileNameRef = FileName;
+ SmallString<128> ModuleMapFileName;
+ if (llvm::sys::path::is_relative(FileNameRef)) {
+ ModuleMapFileName += Directory->getName();
+ llvm::sys::path::append(ModuleMapFileName, FileName);
+ FileNameRef = ModuleMapFileName;
+ }
+ if (const FileEntry *File = SourceMgr.getFileManager().getFile(FileNameRef))
+ Map.parseModuleMapFile(
+ File, /*IsSystem=*/false,
+ Map.HeaderInfo.getHeaderSearchOpts().ModuleMapFileHomeIsCwd
+ ? Directory
+ : File->getDir(), ExternLoc);
+}
+
+/// Whether to add the requirement \p Feature to the module \p M.
+///
+/// This preserves backwards compatibility for two hacks in the Darwin system
+/// module map files:
+///
+/// 1. The use of 'requires excluded' to make headers non-modular, which
+/// should really be mapped to 'textual' now that we have this feature. We
+/// drop the 'excluded' requirement, and set \p IsRequiresExcludedHack to
+/// true. Later, this bit will be used to map all the headers inside this
+/// module to 'textual'.
+///
+/// This affects Darwin.C.excluded (for assert.h) and Tcl.Private.
+///
+/// 2. Removes a bogus cplusplus requirement from IOKit.avc. This requirement
+/// was never correct and causes issues now that we check it, so drop it.
+static bool shouldAddRequirement(Module *M, StringRef Feature,
+ bool &IsRequiresExcludedHack) {
+ static const StringRef DarwinCExcluded[] = {"Darwin", "C", "excluded"};
+ static const StringRef TclPrivate[] = {"Tcl", "Private"};
+ static const StringRef IOKitAVC[] = {"IOKit", "avc"};
+
+ if (Feature == "excluded" && (M->fullModuleNameIs(DarwinCExcluded) ||
+ M->fullModuleNameIs(TclPrivate))) {
+ IsRequiresExcludedHack = true;
+ return false;
+ } else if (Feature == "cplusplus" && M->fullModuleNameIs(IOKitAVC)) {
+ return false;
+ }
+
+ return true;
+}
+
+/// \brief Parse a requires declaration.
+///
+/// requires-declaration:
+/// 'requires' feature-list
+///
+/// feature-list:
+/// feature ',' feature-list
+/// feature
+///
+/// feature:
+/// '!'[opt] identifier
+void ModuleMapParser::parseRequiresDecl() {
+ assert(Tok.is(MMToken::RequiresKeyword));
+
+ // Parse 'requires' keyword.
+ consumeToken();
+
+ // Parse the feature-list.
+ do {
+ bool RequiredState = true;
+ if (Tok.is(MMToken::Exclaim)) {
+ RequiredState = false;
+ consumeToken();
+ }
+
+ if (!Tok.is(MMToken::Identifier)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_feature);
+ HadError = true;
+ return;
+ }
+
+ // Consume the feature name.
+ std::string Feature = Tok.getString();
+ consumeToken();
+
+ bool IsRequiresExcludedHack = false;
+ bool ShouldAddRequirement =
+ shouldAddRequirement(ActiveModule, Feature, IsRequiresExcludedHack);
+
+ if (IsRequiresExcludedHack)
+ UsesRequiresExcludedHack.insert(ActiveModule);
+
+ if (ShouldAddRequirement) {
+ // Add this feature.
+ ActiveModule->addRequirement(Feature, RequiredState, Map.LangOpts,
+ *Map.Target);
+ }
+
+ if (!Tok.is(MMToken::Comma))
+ break;
+
+ // Consume the comma.
+ consumeToken();
+ } while (true);
+}
+
+/// \brief Append to \p Paths the set of paths needed to get to the
+/// subframework in which the given module lives.
+static void appendSubframeworkPaths(Module *Mod,
+ SmallVectorImpl<char> &Path) {
+ // Collect the framework names from the given module to the top-level module.
+ SmallVector<StringRef, 2> Paths;
+ for (; Mod; Mod = Mod->Parent) {
+ if (Mod->IsFramework)
+ Paths.push_back(Mod->Name);
+ }
+
+ if (Paths.empty())
+ return;
+
+ // Add Frameworks/Name.framework for each subframework.
+ for (unsigned I = Paths.size() - 1; I != 0; --I)
+ llvm::sys::path::append(Path, "Frameworks", Paths[I-1] + ".framework");
+}
+
+/// \brief Parse a header declaration.
+///
+/// header-declaration:
+/// 'textual'[opt] 'header' string-literal
+/// 'private' 'textual'[opt] 'header' string-literal
+/// 'exclude' 'header' string-literal
+/// 'umbrella' 'header' string-literal
+///
+/// FIXME: Support 'private textual header'.
+void ModuleMapParser::parseHeaderDecl(MMToken::TokenKind LeadingToken,
+ SourceLocation LeadingLoc) {
+ // We've already consumed the first token.
+ ModuleMap::ModuleHeaderRole Role = ModuleMap::NormalHeader;
+ if (LeadingToken == MMToken::PrivateKeyword) {
+ Role = ModuleMap::PrivateHeader;
+ // 'private' may optionally be followed by 'textual'.
+ if (Tok.is(MMToken::TextualKeyword)) {
+ LeadingToken = Tok.Kind;
+ consumeToken();
+ }
+ }
+
+ if (LeadingToken == MMToken::TextualKeyword)
+ Role = ModuleMap::ModuleHeaderRole(Role | ModuleMap::TextualHeader);
+
+ if (UsesRequiresExcludedHack.count(ActiveModule)) {
+ // Mark this header 'textual' (see doc comment for
+ // Module::UsesRequiresExcludedHack).
+ Role = ModuleMap::ModuleHeaderRole(Role | ModuleMap::TextualHeader);
+ }
+
+ if (LeadingToken != MMToken::HeaderKeyword) {
+ if (!Tok.is(MMToken::HeaderKeyword)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_header)
+ << (LeadingToken == MMToken::PrivateKeyword ? "private" :
+ LeadingToken == MMToken::ExcludeKeyword ? "exclude" :
+ LeadingToken == MMToken::TextualKeyword ? "textual" : "umbrella");
+ return;
+ }
+ consumeToken();
+ }
+
+ // Parse the header name.
+ if (!Tok.is(MMToken::StringLiteral)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_header)
+ << "header";
+ HadError = true;
+ return;
+ }
+ Module::UnresolvedHeaderDirective Header;
+ Header.FileName = Tok.getString();
+ Header.FileNameLoc = consumeToken();
+
+ // Check whether we already have an umbrella.
+ if (LeadingToken == MMToken::UmbrellaKeyword && ActiveModule->Umbrella) {
+ Diags.Report(Header.FileNameLoc, diag::err_mmap_umbrella_clash)
+ << ActiveModule->getFullModuleName();
+ HadError = true;
+ return;
+ }
+
+ // Look for this file.
+ const FileEntry *File = nullptr;
+ const FileEntry *BuiltinFile = nullptr;
+ SmallString<128> RelativePathName;
+ if (llvm::sys::path::is_absolute(Header.FileName)) {
+ RelativePathName = Header.FileName;
+ File = SourceMgr.getFileManager().getFile(RelativePathName);
+ } else {
+ // Search for the header file within the search directory.
+ SmallString<128> FullPathName(Directory->getName());
+ unsigned FullPathLength = FullPathName.size();
+
+ if (ActiveModule->isPartOfFramework()) {
+ appendSubframeworkPaths(ActiveModule, RelativePathName);
+
+ // Check whether this file is in the public headers.
+ llvm::sys::path::append(RelativePathName, "Headers", Header.FileName);
+ llvm::sys::path::append(FullPathName, RelativePathName);
+ File = SourceMgr.getFileManager().getFile(FullPathName);
+
+ if (!File) {
+ // Check whether this file is in the private headers.
+ // FIXME: Should we retain the subframework paths here?
+ RelativePathName.clear();
+ FullPathName.resize(FullPathLength);
+ llvm::sys::path::append(RelativePathName, "PrivateHeaders",
+ Header.FileName);
+ llvm::sys::path::append(FullPathName, RelativePathName);
+ File = SourceMgr.getFileManager().getFile(FullPathName);
+ }
+ } else {
+ // Lookup for normal headers.
+ llvm::sys::path::append(RelativePathName, Header.FileName);
+ llvm::sys::path::append(FullPathName, RelativePathName);
+ File = SourceMgr.getFileManager().getFile(FullPathName);
+
+ // If this is a system module with a top-level header, this header
+ // may have a counterpart (or replacement) in the set of headers
+ // supplied by Clang. Find that builtin header.
+ if (ActiveModule->IsSystem && LeadingToken != MMToken::UmbrellaKeyword &&
+ BuiltinIncludeDir && BuiltinIncludeDir != Directory &&
+ isBuiltinHeader(Header.FileName)) {
+ SmallString<128> BuiltinPathName(BuiltinIncludeDir->getName());
+ llvm::sys::path::append(BuiltinPathName, Header.FileName);
+ BuiltinFile = SourceMgr.getFileManager().getFile(BuiltinPathName);
+
+ // If Clang supplies this header but the underlying system does not,
+ // just silently swap in our builtin version. Otherwise, we'll end
+ // up adding both (later).
+ //
+ // For local visibility, entirely replace the system file with our
+ // one and textually include the system one. We need to pass macros
+ // from our header to the system one if we #include_next it.
+ //
+ // FIXME: Can we do this in all cases?
+ if (BuiltinFile && (!File || Map.LangOpts.ModulesLocalVisibility)) {
+ File = BuiltinFile;
+ RelativePathName = BuiltinPathName;
+ BuiltinFile = nullptr;
+ }
+ }
+ }
+ }
+
+ // FIXME: We shouldn't be eagerly stat'ing every file named in a module map.
+ // Come up with a lazy way to do this.
+ if (File) {
+ if (LeadingToken == MMToken::UmbrellaKeyword) {
+ const DirectoryEntry *UmbrellaDir = File->getDir();
+ if (Module *UmbrellaModule = Map.UmbrellaDirs[UmbrellaDir]) {
+ Diags.Report(LeadingLoc, diag::err_mmap_umbrella_clash)
+ << UmbrellaModule->getFullModuleName();
+ HadError = true;
+ } else {
+ // Record this umbrella header.
+ Map.setUmbrellaHeader(ActiveModule, File, RelativePathName.str());
+ }
+ } else if (LeadingToken == MMToken::ExcludeKeyword) {
+ Module::Header H = {RelativePathName.str(), File};
+ Map.excludeHeader(ActiveModule, H);
+ } else {
+ // If there is a builtin counterpart to this file, add it now, before
+ // the "real" header, so we build the built-in one first when building
+ // the module.
+ if (BuiltinFile) {
+ // FIXME: Taking the name from the FileEntry is unstable and can give
+ // different results depending on how we've previously named that file
+ // in this build.
+ Module::Header H = { BuiltinFile->getName(), BuiltinFile };
+ Map.addHeader(ActiveModule, H, Role);
+ }
+
+ // Record this header.
+ Module::Header H = { RelativePathName.str(), File };
+ Map.addHeader(ActiveModule, H, Role);
+ }
+ } else if (LeadingToken != MMToken::ExcludeKeyword) {
+ // Ignore excluded header files. They're optional anyway.
+
+ // If we find a module that has a missing header, we mark this module as
+ // unavailable and store the header directive for displaying diagnostics.
+ Header.IsUmbrella = LeadingToken == MMToken::UmbrellaKeyword;
+ ActiveModule->markUnavailable();
+ ActiveModule->MissingHeaders.push_back(Header);
+ }
+}
+
+static int compareModuleHeaders(const Module::Header *A,
+ const Module::Header *B) {
+ return A->NameAsWritten.compare(B->NameAsWritten);
+}
+
+/// \brief Parse an umbrella directory declaration.
+///
+/// umbrella-dir-declaration:
+/// umbrella string-literal
+void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
+ // Parse the directory name.
+ if (!Tok.is(MMToken::StringLiteral)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_header)
+ << "umbrella";
+ HadError = true;
+ return;
+ }
+
+ std::string DirName = Tok.getString();
+ SourceLocation DirNameLoc = consumeToken();
+
+ // Check whether we already have an umbrella.
+ if (ActiveModule->Umbrella) {
+ Diags.Report(DirNameLoc, diag::err_mmap_umbrella_clash)
+ << ActiveModule->getFullModuleName();
+ HadError = true;
+ return;
+ }
+
+ // Look for this file.
+ const DirectoryEntry *Dir = nullptr;
+ if (llvm::sys::path::is_absolute(DirName))
+ Dir = SourceMgr.getFileManager().getDirectory(DirName);
+ else {
+ SmallString<128> PathName;
+ PathName = Directory->getName();
+ llvm::sys::path::append(PathName, DirName);
+ Dir = SourceMgr.getFileManager().getDirectory(PathName);
+ }
+
+ if (!Dir) {
+ Diags.Report(DirNameLoc, diag::err_mmap_umbrella_dir_not_found)
+ << DirName;
+ HadError = true;
+ return;
+ }
+
+ if (UsesRequiresExcludedHack.count(ActiveModule)) {
+ // Mark this header 'textual' (see doc comment for
+ // ModuleMapParser::UsesRequiresExcludedHack). Although iterating over the
+ // directory is relatively expensive, in practice this only applies to the
+ // uncommonly used Tcl module on Darwin platforms.
+ std::error_code EC;
+ SmallVector<Module::Header, 6> Headers;
+ for (llvm::sys::fs::recursive_directory_iterator I(Dir->getName(), EC), E;
+ I != E && !EC; I.increment(EC)) {
+ if (const FileEntry *FE = SourceMgr.getFileManager().getFile(I->path())) {
+
+ Module::Header Header = {I->path(), FE};
+ Headers.push_back(std::move(Header));
+ }
+ }
+
+ // Sort header paths so that the pcm doesn't depend on iteration order.
+ llvm::array_pod_sort(Headers.begin(), Headers.end(), compareModuleHeaders);
+
+ for (auto &Header : Headers)
+ Map.addHeader(ActiveModule, std::move(Header), ModuleMap::TextualHeader);
+ return;
+ }
+
+ if (Module *OwningModule = Map.UmbrellaDirs[Dir]) {
+ Diags.Report(UmbrellaLoc, diag::err_mmap_umbrella_clash)
+ << OwningModule->getFullModuleName();
+ HadError = true;
+ return;
+ }
+
+ // Record this umbrella directory.
+ Map.setUmbrellaDir(ActiveModule, Dir, DirName);
+}
+
+/// \brief Parse a module export declaration.
+///
+/// export-declaration:
+/// 'export' wildcard-module-id
+///
+/// wildcard-module-id:
+/// identifier
+/// '*'
+/// identifier '.' wildcard-module-id
+void ModuleMapParser::parseExportDecl() {
+ assert(Tok.is(MMToken::ExportKeyword));
+ SourceLocation ExportLoc = consumeToken();
+
+ // Parse the module-id with an optional wildcard at the end.
+ ModuleId ParsedModuleId;
+ bool Wildcard = false;
+ do {
+ // FIXME: Support string-literal module names here.
+ if (Tok.is(MMToken::Identifier)) {
+ ParsedModuleId.push_back(std::make_pair(Tok.getString(),
+ Tok.getLocation()));
+ consumeToken();
+
+ if (Tok.is(MMToken::Period)) {
+ consumeToken();
+ continue;
+ }
+
+ break;
+ }
+
+ if(Tok.is(MMToken::Star)) {
+ Wildcard = true;
+ consumeToken();
+ break;
+ }
+
+ Diags.Report(Tok.getLocation(), diag::err_mmap_module_id);
+ HadError = true;
+ return;
+ } while (true);
+
+ Module::UnresolvedExportDecl Unresolved = {
+ ExportLoc, ParsedModuleId, Wildcard
+ };
+ ActiveModule->UnresolvedExports.push_back(Unresolved);
+}
+
+/// \brief Parse a module use declaration.
+///
+/// use-declaration:
+/// 'use' wildcard-module-id
+void ModuleMapParser::parseUseDecl() {
+ assert(Tok.is(MMToken::UseKeyword));
+ auto KWLoc = consumeToken();
+ // Parse the module-id.
+ ModuleId ParsedModuleId;
+ parseModuleId(ParsedModuleId);
+
+ if (ActiveModule->Parent)
+ Diags.Report(KWLoc, diag::err_mmap_use_decl_submodule);
+ else
+ ActiveModule->UnresolvedDirectUses.push_back(ParsedModuleId);
+}
+
+/// \brief Parse a link declaration.
+///
+/// module-declaration:
+/// 'link' 'framework'[opt] string-literal
+void ModuleMapParser::parseLinkDecl() {
+ assert(Tok.is(MMToken::LinkKeyword));
+ SourceLocation LinkLoc = consumeToken();
+
+ // Parse the optional 'framework' keyword.
+ bool IsFramework = false;
+ if (Tok.is(MMToken::FrameworkKeyword)) {
+ consumeToken();
+ IsFramework = true;
+ }
+
+ // Parse the library name
+ if (!Tok.is(MMToken::StringLiteral)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_library_name)
+ << IsFramework << SourceRange(LinkLoc);
+ HadError = true;
+ return;
+ }
+
+ std::string LibraryName = Tok.getString();
+ consumeToken();
+ ActiveModule->LinkLibraries.push_back(Module::LinkLibrary(LibraryName,
+ IsFramework));
+}
+
+/// \brief Parse a configuration macro declaration.
+///
+/// module-declaration:
+/// 'config_macros' attributes[opt] config-macro-list?
+///
+/// config-macro-list:
+/// identifier (',' identifier)?
+void ModuleMapParser::parseConfigMacros() {
+ assert(Tok.is(MMToken::ConfigMacros));
+ SourceLocation ConfigMacrosLoc = consumeToken();
+
+ // Only top-level modules can have configuration macros.
+ if (ActiveModule->Parent) {
+ Diags.Report(ConfigMacrosLoc, diag::err_mmap_config_macro_submodule);
+ }
+
+ // Parse the optional attributes.
+ Attributes Attrs;
+ parseOptionalAttributes(Attrs);
+ if (Attrs.IsExhaustive && !ActiveModule->Parent) {
+ ActiveModule->ConfigMacrosExhaustive = true;
+ }
+
+ // If we don't have an identifier, we're done.
+ // FIXME: Support macros with the same name as a keyword here.
+ if (!Tok.is(MMToken::Identifier))
+ return;
+
+ // Consume the first identifier.
+ if (!ActiveModule->Parent) {
+ ActiveModule->ConfigMacros.push_back(Tok.getString().str());
+ }
+ consumeToken();
+
+ do {
+ // If there's a comma, consume it.
+ if (!Tok.is(MMToken::Comma))
+ break;
+ consumeToken();
+
+ // We expect to see a macro name here.
+ // FIXME: Support macros with the same name as a keyword here.
+ if (!Tok.is(MMToken::Identifier)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_config_macro);
+ break;
+ }
+
+ // Consume the macro name.
+ if (!ActiveModule->Parent) {
+ ActiveModule->ConfigMacros.push_back(Tok.getString().str());
+ }
+ consumeToken();
+ } while (true);
+}
+
+/// \brief Format a module-id into a string.
+static std::string formatModuleId(const ModuleId &Id) {
+ std::string result;
+ {
+ llvm::raw_string_ostream OS(result);
+
+ for (unsigned I = 0, N = Id.size(); I != N; ++I) {
+ if (I)
+ OS << ".";
+ OS << Id[I].first;
+ }
+ }
+
+ return result;
+}
+
+/// \brief Parse a conflict declaration.
+///
+/// module-declaration:
+/// 'conflict' module-id ',' string-literal
+void ModuleMapParser::parseConflict() {
+ assert(Tok.is(MMToken::Conflict));
+ SourceLocation ConflictLoc = consumeToken();
+ Module::UnresolvedConflict Conflict;
+
+ // Parse the module-id.
+ if (parseModuleId(Conflict.Id))
+ return;
+
+ // Parse the ','.
+ if (!Tok.is(MMToken::Comma)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_conflicts_comma)
+ << SourceRange(ConflictLoc);
+ return;
+ }
+ consumeToken();
+
+ // Parse the message.
+ if (!Tok.is(MMToken::StringLiteral)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_conflicts_message)
+ << formatModuleId(Conflict.Id);
+ return;
+ }
+ Conflict.Message = Tok.getString().str();
+ consumeToken();
+
+ // Add this unresolved conflict.
+ ActiveModule->UnresolvedConflicts.push_back(Conflict);
+}
+
+/// \brief Parse an inferred module declaration (wildcard modules).
+///
+/// module-declaration:
+/// 'explicit'[opt] 'framework'[opt] 'module' * attributes[opt]
+/// { inferred-module-member* }
+///
+/// inferred-module-member:
+/// 'export' '*'
+/// 'exclude' identifier
+void ModuleMapParser::parseInferredModuleDecl(bool Framework, bool Explicit) {
+ assert(Tok.is(MMToken::Star));
+ SourceLocation StarLoc = consumeToken();
+ bool Failed = false;
+
+ // Inferred modules must be submodules.
+ if (!ActiveModule && !Framework) {
+ Diags.Report(StarLoc, diag::err_mmap_top_level_inferred_submodule);
+ Failed = true;
+ }
+
+ if (ActiveModule) {
+ // Inferred modules must have umbrella directories.
+ if (!Failed && ActiveModule->IsAvailable &&
+ !ActiveModule->getUmbrellaDir()) {
+ Diags.Report(StarLoc, diag::err_mmap_inferred_no_umbrella);
+ Failed = true;
+ }
+
+ // Check for redefinition of an inferred module.
+ if (!Failed && ActiveModule->InferSubmodules) {
+ Diags.Report(StarLoc, diag::err_mmap_inferred_redef);
+ if (ActiveModule->InferredSubmoduleLoc.isValid())
+ Diags.Report(ActiveModule->InferredSubmoduleLoc,
+ diag::note_mmap_prev_definition);
+ Failed = true;
+ }
+
+ // Check for the 'framework' keyword, which is not permitted here.
+ if (Framework) {
+ Diags.Report(StarLoc, diag::err_mmap_inferred_framework_submodule);
+ Framework = false;
+ }
+ } else if (Explicit) {
+ Diags.Report(StarLoc, diag::err_mmap_explicit_inferred_framework);
+ Explicit = false;
+ }
+
+ // If there were any problems with this inferred submodule, skip its body.
+ if (Failed) {
+ if (Tok.is(MMToken::LBrace)) {
+ consumeToken();
+ skipUntil(MMToken::RBrace);
+ if (Tok.is(MMToken::RBrace))
+ consumeToken();
+ }
+ HadError = true;
+ return;
+ }
+
+ // Parse optional attributes.
+ Attributes Attrs;
+ parseOptionalAttributes(Attrs);
+
+ if (ActiveModule) {
+ // Note that we have an inferred submodule.
+ ActiveModule->InferSubmodules = true;
+ ActiveModule->InferredSubmoduleLoc = StarLoc;
+ ActiveModule->InferExplicitSubmodules = Explicit;
+ } else {
+ // We'll be inferring framework modules for this directory.
+ Map.InferredDirectories[Directory].InferModules = true;
+ Map.InferredDirectories[Directory].Attrs = Attrs;
+ Map.InferredDirectories[Directory].ModuleMapFile = ModuleMapFile;
+ // FIXME: Handle the 'framework' keyword.
+ }
+
+ // Parse the opening brace.
+ if (!Tok.is(MMToken::LBrace)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_lbrace_wildcard);
+ HadError = true;
+ return;
+ }
+ SourceLocation LBraceLoc = consumeToken();
+
+ // Parse the body of the inferred submodule.
+ bool Done = false;
+ do {
+ switch (Tok.Kind) {
+ case MMToken::EndOfFile:
+ case MMToken::RBrace:
+ Done = true;
+ break;
+
+ case MMToken::ExcludeKeyword: {
+ if (ActiveModule) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_inferred_member)
+ << (ActiveModule != nullptr);
+ consumeToken();
+ break;
+ }
+
+ consumeToken();
+ // FIXME: Support string-literal module names here.
+ if (!Tok.is(MMToken::Identifier)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_missing_exclude_name);
+ break;
+ }
+
+ Map.InferredDirectories[Directory].ExcludedModules
+ .push_back(Tok.getString());
+ consumeToken();
+ break;
+ }
+
+ case MMToken::ExportKeyword:
+ if (!ActiveModule) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_inferred_member)
+ << (ActiveModule != nullptr);
+ consumeToken();
+ break;
+ }
+
+ consumeToken();
+ if (Tok.is(MMToken::Star))
+ ActiveModule->InferExportWildcard = true;
+ else
+ Diags.Report(Tok.getLocation(),
+ diag::err_mmap_expected_export_wildcard);
+ consumeToken();
+ break;
+
+ case MMToken::ExplicitKeyword:
+ case MMToken::ModuleKeyword:
+ case MMToken::HeaderKeyword:
+ case MMToken::PrivateKeyword:
+ case MMToken::UmbrellaKeyword:
+ default:
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_inferred_member)
+ << (ActiveModule != nullptr);
+ consumeToken();
+ break;
+ }
+ } while (!Done);
+
+ if (Tok.is(MMToken::RBrace))
+ consumeToken();
+ else {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_rbrace);
+ Diags.Report(LBraceLoc, diag::note_mmap_lbrace_match);
+ HadError = true;
+ }
+}
+
+/// \brief Parse optional attributes.
+///
+/// attributes:
+/// attribute attributes
+/// attribute
+///
+/// attribute:
+/// [ identifier ]
+///
+/// \param Attrs Will be filled in with the parsed attributes.
+///
+/// \returns true if an error occurred, false otherwise.
+bool ModuleMapParser::parseOptionalAttributes(Attributes &Attrs) {
+ bool HadError = false;
+
+ while (Tok.is(MMToken::LSquare)) {
+ // Consume the '['.
+ SourceLocation LSquareLoc = consumeToken();
+
+ // Check whether we have an attribute name here.
+ if (!Tok.is(MMToken::Identifier)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_attribute);
+ skipUntil(MMToken::RSquare);
+ if (Tok.is(MMToken::RSquare))
+ consumeToken();
+ HadError = true;
+ }
+
+ // Decode the attribute name.
+ AttributeKind Attribute
+ = llvm::StringSwitch<AttributeKind>(Tok.getString())
+ .Case("exhaustive", AT_exhaustive)
+ .Case("extern_c", AT_extern_c)
+ .Case("system", AT_system)
+ .Default(AT_unknown);
+ switch (Attribute) {
+ case AT_unknown:
+ Diags.Report(Tok.getLocation(), diag::warn_mmap_unknown_attribute)
+ << Tok.getString();
+ break;
+
+ case AT_system:
+ Attrs.IsSystem = true;
+ break;
+
+ case AT_extern_c:
+ Attrs.IsExternC = true;
+ break;
+
+ case AT_exhaustive:
+ Attrs.IsExhaustive = true;
+ break;
+ }
+ consumeToken();
+
+ // Consume the ']'.
+ if (!Tok.is(MMToken::RSquare)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_rsquare);
+ Diags.Report(LSquareLoc, diag::note_mmap_lsquare_match);
+ skipUntil(MMToken::RSquare);
+ HadError = true;
+ }
+
+ if (Tok.is(MMToken::RSquare))
+ consumeToken();
+ }
+
+ return HadError;
+}
+
+/// \brief Parse a module map file.
+///
+/// module-map-file:
+/// module-declaration*
+bool ModuleMapParser::parseModuleMapFile() {
+ do {
+ switch (Tok.Kind) {
+ case MMToken::EndOfFile:
+ return HadError;
+
+ case MMToken::ExplicitKeyword:
+ case MMToken::ExternKeyword:
+ case MMToken::ModuleKeyword:
+ case MMToken::FrameworkKeyword:
+ parseModuleDecl();
+ break;
+
+ case MMToken::Comma:
+ case MMToken::ConfigMacros:
+ case MMToken::Conflict:
+ case MMToken::Exclaim:
+ case MMToken::ExcludeKeyword:
+ case MMToken::ExportKeyword:
+ case MMToken::HeaderKeyword:
+ case MMToken::Identifier:
+ case MMToken::LBrace:
+ case MMToken::LinkKeyword:
+ case MMToken::LSquare:
+ case MMToken::Period:
+ case MMToken::PrivateKeyword:
+ case MMToken::RBrace:
+ case MMToken::RSquare:
+ case MMToken::RequiresKeyword:
+ case MMToken::Star:
+ case MMToken::StringLiteral:
+ case MMToken::TextualKeyword:
+ case MMToken::UmbrellaKeyword:
+ case MMToken::UseKeyword:
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_module);
+ HadError = true;
+ consumeToken();
+ break;
+ }
+ } while (true);
+}
+
+bool ModuleMap::parseModuleMapFile(const FileEntry *File, bool IsSystem,
+ const DirectoryEntry *Dir,
+ SourceLocation ExternModuleLoc) {
+ llvm::DenseMap<const FileEntry *, bool>::iterator Known
+ = ParsedModuleMap.find(File);
+ if (Known != ParsedModuleMap.end())
+ return Known->second;
+
+ assert(Target && "Missing target information");
+ auto FileCharacter = IsSystem ? SrcMgr::C_System : SrcMgr::C_User;
+ FileID ID = SourceMgr.createFileID(File, ExternModuleLoc, FileCharacter);
+ const llvm::MemoryBuffer *Buffer = SourceMgr.getBuffer(ID);
+ if (!Buffer)
+ return ParsedModuleMap[File] = true;
+
+ // Parse this module map file.
+ Lexer L(ID, SourceMgr.getBuffer(ID), SourceMgr, MMapLangOpts);
+ SourceLocation Start = L.getSourceLocation();
+ ModuleMapParser Parser(L, SourceMgr, Target, Diags, *this, File, Dir,
+ BuiltinIncludeDir, IsSystem);
+ bool Result = Parser.parseModuleMapFile();
+ ParsedModuleMap[File] = Result;
+
+ // Notify callbacks that we parsed it.
+ for (const auto &Cb : Callbacks)
+ Cb->moduleMapFileRead(Start, *File, IsSystem);
+ return Result;
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPCaching.cpp b/contrib/llvm/tools/clang/lib/Lex/PPCaching.cpp
new file mode 100644
index 0000000..bd48ae64
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/PPCaching.cpp
@@ -0,0 +1,118 @@
+//===--- PPCaching.cpp - Handle caching lexed tokens ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements pieces of the Preprocessor interface that manage the
+// caching of lexed tokens.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/Preprocessor.h"
+using namespace clang;
+
+// EnableBacktrackAtThisPos - From the point that this method is called, and
+// until CommitBacktrackedTokens() or Backtrack() is called, the Preprocessor
+// keeps track of the lexed tokens so that a subsequent Backtrack() call will
+// make the Preprocessor re-lex the same tokens.
+//
+// Nested backtracks are allowed, meaning that EnableBacktrackAtThisPos can
+// be called multiple times and CommitBacktrackedTokens/Backtrack calls will
+// be combined with the EnableBacktrackAtThisPos calls in reverse order.
+void Preprocessor::EnableBacktrackAtThisPos() {
+ BacktrackPositions.push_back(CachedLexPos);
+ EnterCachingLexMode();
+}
+
+// Disable the last EnableBacktrackAtThisPos call.
+void Preprocessor::CommitBacktrackedTokens() {
+ assert(!BacktrackPositions.empty()
+ && "EnableBacktrackAtThisPos was not called!");
+ BacktrackPositions.pop_back();
+}
+
+// Make Preprocessor re-lex the tokens that were lexed since
+// EnableBacktrackAtThisPos() was previously called.
+void Preprocessor::Backtrack() {
+ assert(!BacktrackPositions.empty()
+ && "EnableBacktrackAtThisPos was not called!");
+ CachedLexPos = BacktrackPositions.back();
+ BacktrackPositions.pop_back();
+ recomputeCurLexerKind();
+}
+
+void Preprocessor::CachingLex(Token &Result) {
+ if (!InCachingLexMode())
+ return;
+
+ if (CachedLexPos < CachedTokens.size()) {
+ Result = CachedTokens[CachedLexPos++];
+ return;
+ }
+
+ ExitCachingLexMode();
+ Lex(Result);
+
+ if (isBacktrackEnabled()) {
+ // Cache the lexed token.
+ EnterCachingLexMode();
+ CachedTokens.push_back(Result);
+ ++CachedLexPos;
+ return;
+ }
+
+ if (CachedLexPos < CachedTokens.size()) {
+ EnterCachingLexMode();
+ } else {
+ // All cached tokens were consumed.
+ CachedTokens.clear();
+ CachedLexPos = 0;
+ }
+}
+
+void Preprocessor::EnterCachingLexMode() {
+ if (InCachingLexMode())
+ return;
+
+ PushIncludeMacroStack();
+ CurLexerKind = CLK_CachingLexer;
+}
+
+
+const Token &Preprocessor::PeekAhead(unsigned N) {
+ assert(CachedLexPos + N > CachedTokens.size() && "Confused caching.");
+ ExitCachingLexMode();
+ for (unsigned C = CachedLexPos + N - CachedTokens.size(); C > 0; --C) {
+ CachedTokens.push_back(Token());
+ Lex(CachedTokens.back());
+ }
+ EnterCachingLexMode();
+ return CachedTokens.back();
+}
+
+void Preprocessor::AnnotatePreviousCachedTokens(const Token &Tok) {
+ assert(Tok.isAnnotation() && "Expected annotation token");
+ assert(CachedLexPos != 0 && "Expected to have some cached tokens");
+ assert(CachedTokens[CachedLexPos-1].getLastLoc() == Tok.getAnnotationEndLoc()
+ && "The annotation should be until the most recent cached token");
+
+ // Start from the end of the cached tokens list and look for the token
+ // that is the beginning of the annotation token.
+ for (CachedTokensTy::size_type i = CachedLexPos; i != 0; --i) {
+ CachedTokensTy::iterator AnnotBegin = CachedTokens.begin() + i-1;
+ if (AnnotBegin->getLocation() == Tok.getLocation()) {
+ assert((BacktrackPositions.empty() || BacktrackPositions.back() < i) &&
+ "The backtrack pos points inside the annotated tokens!");
+ // Replace the cached tokens with the single annotation token.
+ if (i < CachedLexPos)
+ CachedTokens.erase(AnnotBegin + 1, CachedTokens.begin() + CachedLexPos);
+ *AnnotBegin = Tok;
+ CachedLexPos = i;
+ return;
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPCallbacks.cpp b/contrib/llvm/tools/clang/lib/Lex/PPCallbacks.cpp
new file mode 100644
index 0000000..952b926
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/PPCallbacks.cpp
@@ -0,0 +1,14 @@
+//===--- PPCallbacks.cpp - Callbacks for Preprocessor actions ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/PPCallbacks.h"
+
+using namespace clang;
+
+void PPChainedCallbacks::anchor() { }
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPConditionalDirectiveRecord.cpp b/contrib/llvm/tools/clang/lib/Lex/PPConditionalDirectiveRecord.cpp
new file mode 100644
index 0000000..12a7784
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/PPConditionalDirectiveRecord.cpp
@@ -0,0 +1,122 @@
+//===--- PPConditionalDirectiveRecord.h - Preprocessing Directives-*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the PPConditionalDirectiveRecord class, which maintains
+// a record of conditional directive regions.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Lex/PPConditionalDirectiveRecord.h"
+#include "llvm/Support/Capacity.h"
+
+using namespace clang;
+
+PPConditionalDirectiveRecord::PPConditionalDirectiveRecord(SourceManager &SM)
+ : SourceMgr(SM) {
+ CondDirectiveStack.push_back(SourceLocation());
+}
+
+bool PPConditionalDirectiveRecord::rangeIntersectsConditionalDirective(
+ SourceRange Range) const {
+ if (Range.isInvalid())
+ return false;
+
+ CondDirectiveLocsTy::const_iterator
+ low = std::lower_bound(CondDirectiveLocs.begin(), CondDirectiveLocs.end(),
+ Range.getBegin(), CondDirectiveLoc::Comp(SourceMgr));
+ if (low == CondDirectiveLocs.end())
+ return false;
+
+ if (SourceMgr.isBeforeInTranslationUnit(Range.getEnd(), low->getLoc()))
+ return false;
+
+ CondDirectiveLocsTy::const_iterator
+ upp = std::upper_bound(low, CondDirectiveLocs.end(),
+ Range.getEnd(), CondDirectiveLoc::Comp(SourceMgr));
+ SourceLocation uppRegion;
+ if (upp != CondDirectiveLocs.end())
+ uppRegion = upp->getRegionLoc();
+
+ return low->getRegionLoc() != uppRegion;
+}
+
+SourceLocation PPConditionalDirectiveRecord::findConditionalDirectiveRegionLoc(
+ SourceLocation Loc) const {
+ if (Loc.isInvalid())
+ return SourceLocation();
+ if (CondDirectiveLocs.empty())
+ return SourceLocation();
+
+ if (SourceMgr.isBeforeInTranslationUnit(CondDirectiveLocs.back().getLoc(),
+ Loc))
+ return CondDirectiveStack.back();
+
+ CondDirectiveLocsTy::const_iterator
+ low = std::lower_bound(CondDirectiveLocs.begin(), CondDirectiveLocs.end(),
+ Loc, CondDirectiveLoc::Comp(SourceMgr));
+ assert(low != CondDirectiveLocs.end());
+ return low->getRegionLoc();
+}
+
+void PPConditionalDirectiveRecord::addCondDirectiveLoc(
+ CondDirectiveLoc DirLoc) {
+ // Ignore directives in system headers.
+ if (SourceMgr.isInSystemHeader(DirLoc.getLoc()))
+ return;
+
+ assert(CondDirectiveLocs.empty() ||
+ SourceMgr.isBeforeInTranslationUnit(CondDirectiveLocs.back().getLoc(),
+ DirLoc.getLoc()));
+ CondDirectiveLocs.push_back(DirLoc);
+}
+
+void PPConditionalDirectiveRecord::If(SourceLocation Loc,
+ SourceRange ConditionRange,
+ ConditionValueKind ConditionValue) {
+ addCondDirectiveLoc(CondDirectiveLoc(Loc, CondDirectiveStack.back()));
+ CondDirectiveStack.push_back(Loc);
+}
+
+void PPConditionalDirectiveRecord::Ifdef(SourceLocation Loc,
+ const Token &MacroNameTok,
+ const MacroDefinition &MD) {
+ addCondDirectiveLoc(CondDirectiveLoc(Loc, CondDirectiveStack.back()));
+ CondDirectiveStack.push_back(Loc);
+}
+
+void PPConditionalDirectiveRecord::Ifndef(SourceLocation Loc,
+ const Token &MacroNameTok,
+ const MacroDefinition &MD) {
+ addCondDirectiveLoc(CondDirectiveLoc(Loc, CondDirectiveStack.back()));
+ CondDirectiveStack.push_back(Loc);
+}
+
+void PPConditionalDirectiveRecord::Elif(SourceLocation Loc,
+ SourceRange ConditionRange,
+ ConditionValueKind ConditionValue,
+ SourceLocation IfLoc) {
+ addCondDirectiveLoc(CondDirectiveLoc(Loc, CondDirectiveStack.back()));
+ CondDirectiveStack.back() = Loc;
+}
+
+void PPConditionalDirectiveRecord::Else(SourceLocation Loc,
+ SourceLocation IfLoc) {
+ addCondDirectiveLoc(CondDirectiveLoc(Loc, CondDirectiveStack.back()));
+ CondDirectiveStack.back() = Loc;
+}
+
+void PPConditionalDirectiveRecord::Endif(SourceLocation Loc,
+ SourceLocation IfLoc) {
+ addCondDirectiveLoc(CondDirectiveLoc(Loc, CondDirectiveStack.back()));
+ assert(!CondDirectiveStack.empty());
+ CondDirectiveStack.pop_back();
+}
+
+size_t PPConditionalDirectiveRecord::getTotalMemory() const {
+ return llvm::capacity_in_bytes(CondDirectiveLocs);
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp b/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp
new file mode 100644
index 0000000..c02a0cb
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp
@@ -0,0 +1,2567 @@
+//===--- PPDirectives.cpp - Directive Handling for Preprocessor -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Implements # directive processing for the Preprocessor.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/CodeCompletionHandler.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/HeaderSearchOptions.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/LiteralSupport.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/ModuleLoader.h"
+#include "clang/Lex/Pragma.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/SaveAndRestore.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Utility Methods for Preprocessor Directive Handling.
+//===----------------------------------------------------------------------===//
+
+MacroInfo *Preprocessor::AllocateMacroInfo() {
+ MacroInfoChain *MIChain = BP.Allocate<MacroInfoChain>();
+ MIChain->Next = MIChainHead;
+ MIChainHead = MIChain;
+ return &MIChain->MI;
+}
+
+MacroInfo *Preprocessor::AllocateMacroInfo(SourceLocation L) {
+ MacroInfo *MI = AllocateMacroInfo();
+ new (MI) MacroInfo(L);
+ return MI;
+}
+
+MacroInfo *Preprocessor::AllocateDeserializedMacroInfo(SourceLocation L,
+ unsigned SubModuleID) {
+ static_assert(llvm::AlignOf<MacroInfo>::Alignment >= sizeof(SubModuleID),
+ "alignment for MacroInfo is less than the ID");
+ DeserializedMacroInfoChain *MIChain =
+ BP.Allocate<DeserializedMacroInfoChain>();
+ MIChain->Next = DeserialMIChainHead;
+ DeserialMIChainHead = MIChain;
+
+ MacroInfo *MI = &MIChain->MI;
+ new (MI) MacroInfo(L);
+ MI->FromASTFile = true;
+ MI->setOwningModuleID(SubModuleID);
+ return MI;
+}
+
+DefMacroDirective *Preprocessor::AllocateDefMacroDirective(MacroInfo *MI,
+ SourceLocation Loc) {
+ return new (BP) DefMacroDirective(MI, Loc);
+}
+
+UndefMacroDirective *
+Preprocessor::AllocateUndefMacroDirective(SourceLocation UndefLoc) {
+ return new (BP) UndefMacroDirective(UndefLoc);
+}
+
+VisibilityMacroDirective *
+Preprocessor::AllocateVisibilityMacroDirective(SourceLocation Loc,
+ bool isPublic) {
+ return new (BP) VisibilityMacroDirective(Loc, isPublic);
+}
+
+/// \brief Read and discard all tokens remaining on the current line until
+/// the tok::eod token is found.
+void Preprocessor::DiscardUntilEndOfDirective() {
+ Token Tmp;
+ do {
+ LexUnexpandedToken(Tmp);
+ assert(Tmp.isNot(tok::eof) && "EOF seen while discarding directive tokens");
+ } while (Tmp.isNot(tok::eod));
+}
+
+/// \brief Enumerates possible cases of #define/#undef a reserved identifier.
+enum MacroDiag {
+ MD_NoWarn, //> Not a reserved identifier
+ MD_KeywordDef, //> Macro hides keyword, enabled by default
+ MD_ReservedMacro //> #define of #undef reserved id, disabled by default
+};
+
+/// \brief Checks if the specified identifier is reserved in the specified
+/// language.
+/// This function does not check if the identifier is a keyword.
+static bool isReservedId(StringRef Text, const LangOptions &Lang) {
+ // C++ [macro.names], C11 7.1.3:
+ // All identifiers that begin with an underscore and either an uppercase
+ // letter or another underscore are always reserved for any use.
+ if (Text.size() >= 2 && Text[0] == '_' &&
+ (isUppercase(Text[1]) || Text[1] == '_'))
+ return true;
+ // C++ [global.names]
+ // Each name that contains a double underscore ... is reserved to the
+ // implementation for any use.
+ if (Lang.CPlusPlus) {
+ if (Text.find("__") != StringRef::npos)
+ return true;
+ }
+ return false;
+}
+
+static MacroDiag shouldWarnOnMacroDef(Preprocessor &PP, IdentifierInfo *II) {
+ const LangOptions &Lang = PP.getLangOpts();
+ StringRef Text = II->getName();
+ if (isReservedId(Text, Lang))
+ return MD_ReservedMacro;
+ if (II->isKeyword(Lang))
+ return MD_KeywordDef;
+ if (Lang.CPlusPlus11 && (Text.equals("override") || Text.equals("final")))
+ return MD_KeywordDef;
+ return MD_NoWarn;
+}
+
+static MacroDiag shouldWarnOnMacroUndef(Preprocessor &PP, IdentifierInfo *II) {
+ const LangOptions &Lang = PP.getLangOpts();
+ StringRef Text = II->getName();
+ // Do not warn on keyword undef. It is generally harmless and widely used.
+ if (isReservedId(Text, Lang))
+ return MD_ReservedMacro;
+ return MD_NoWarn;
+}
+
+bool Preprocessor::CheckMacroName(Token &MacroNameTok, MacroUse isDefineUndef,
+ bool *ShadowFlag) {
+ // Missing macro name?
+ if (MacroNameTok.is(tok::eod))
+ return Diag(MacroNameTok, diag::err_pp_missing_macro_name);
+
+ IdentifierInfo *II = MacroNameTok.getIdentifierInfo();
+ if (!II) {
+ bool Invalid = false;
+ std::string Spelling = getSpelling(MacroNameTok, &Invalid);
+ if (Invalid)
+ return Diag(MacroNameTok, diag::err_pp_macro_not_identifier);
+ II = getIdentifierInfo(Spelling);
+
+ if (!II->isCPlusPlusOperatorKeyword())
+ return Diag(MacroNameTok, diag::err_pp_macro_not_identifier);
+
+ // C++ 2.5p2: Alternative tokens behave the same as its primary token
+ // except for their spellings.
+ Diag(MacroNameTok, getLangOpts().MicrosoftExt
+ ? diag::ext_pp_operator_used_as_macro_name
+ : diag::err_pp_operator_used_as_macro_name)
+ << II << MacroNameTok.getKind();
+
+ // Allow #defining |and| and friends for Microsoft compatibility or
+ // recovery when legacy C headers are included in C++.
+ MacroNameTok.setIdentifierInfo(II);
+ }
+
+ if ((isDefineUndef != MU_Other) && II->getPPKeywordID() == tok::pp_defined) {
+ // Error if defining "defined": C99 6.10.8/4, C++ [cpp.predefined]p4.
+ return Diag(MacroNameTok, diag::err_defined_macro_name);
+ }
+
+ if (isDefineUndef == MU_Undef) {
+ auto *MI = getMacroInfo(II);
+ if (MI && MI->isBuiltinMacro()) {
+ // Warn if undefining "__LINE__" and other builtins, per C99 6.10.8/4
+ // and C++ [cpp.predefined]p4], but allow it as an extension.
+ Diag(MacroNameTok, diag::ext_pp_undef_builtin_macro);
+ }
+ }
+
+ // If defining/undefining reserved identifier or a keyword, we need to issue
+ // a warning.
+ SourceLocation MacroNameLoc = MacroNameTok.getLocation();
+ if (ShadowFlag)
+ *ShadowFlag = false;
+ if (!SourceMgr.isInSystemHeader(MacroNameLoc) &&
+ (strcmp(SourceMgr.getBufferName(MacroNameLoc), "<built-in>") != 0)) {
+ MacroDiag D = MD_NoWarn;
+ if (isDefineUndef == MU_Define) {
+ D = shouldWarnOnMacroDef(*this, II);
+ }
+ else if (isDefineUndef == MU_Undef)
+ D = shouldWarnOnMacroUndef(*this, II);
+ if (D == MD_KeywordDef) {
+ // We do not want to warn on some patterns widely used in configuration
+ // scripts. This requires analyzing next tokens, so do not issue warnings
+ // now, only inform caller.
+ if (ShadowFlag)
+ *ShadowFlag = true;
+ }
+ if (D == MD_ReservedMacro)
+ Diag(MacroNameTok, diag::warn_pp_macro_is_reserved_id);
+ }
+
+ // Okay, we got a good identifier.
+ return false;
+}
+
+/// \brief Lex and validate a macro name, which occurs after a
+/// \#define or \#undef.
+///
+/// This sets the token kind to eod and discards the rest of the macro line if
+/// the macro name is invalid.
+///
+/// \param MacroNameTok Token that is expected to be a macro name.
+/// \param isDefineUndef Context in which macro is used.
+/// \param ShadowFlag Points to a flag that is set if macro shadows a keyword.
+void Preprocessor::ReadMacroName(Token &MacroNameTok, MacroUse isDefineUndef,
+ bool *ShadowFlag) {
+ // Read the token, don't allow macro expansion on it.
+ LexUnexpandedToken(MacroNameTok);
+
+ if (MacroNameTok.is(tok::code_completion)) {
+ if (CodeComplete)
+ CodeComplete->CodeCompleteMacroName(isDefineUndef == MU_Define);
+ setCodeCompletionReached();
+ LexUnexpandedToken(MacroNameTok);
+ }
+
+ if (!CheckMacroName(MacroNameTok, isDefineUndef, ShadowFlag))
+ return;
+
+ // Invalid macro name, read and discard the rest of the line and set the
+ // token kind to tok::eod if necessary.
+ if (MacroNameTok.isNot(tok::eod)) {
+ MacroNameTok.setKind(tok::eod);
+ DiscardUntilEndOfDirective();
+ }
+}
+
+/// \brief Ensure that the next token is a tok::eod token.
+///
+/// If not, emit a diagnostic and consume up until the eod. If EnableMacros is
+/// true, then we consider macros that expand to zero tokens as being ok.
+void Preprocessor::CheckEndOfDirective(const char *DirType, bool EnableMacros) {
+ Token Tmp;
+ // Lex unexpanded tokens for most directives: macros might expand to zero
+ // tokens, causing us to miss diagnosing invalid lines. Some directives (like
+ // #line) allow empty macros.
+ if (EnableMacros)
+ Lex(Tmp);
+ else
+ LexUnexpandedToken(Tmp);
+
+ // There should be no tokens after the directive, but we allow them as an
+ // extension.
+ while (Tmp.is(tok::comment)) // Skip comments in -C mode.
+ LexUnexpandedToken(Tmp);
+
+ if (Tmp.isNot(tok::eod)) {
+ // Add a fixit in GNU/C99/C++ mode. Don't offer a fixit for strict-C89,
+ // or if this is a macro-style preprocessing directive, because it is more
+ // trouble than it is worth to insert /**/ and check that there is no /**/
+ // in the range also.
+ FixItHint Hint;
+ if ((LangOpts.GNUMode || LangOpts.C99 || LangOpts.CPlusPlus) &&
+ !CurTokenLexer)
+ Hint = FixItHint::CreateInsertion(Tmp.getLocation(),"//");
+ Diag(Tmp, diag::ext_pp_extra_tokens_at_eol) << DirType << Hint;
+ DiscardUntilEndOfDirective();
+ }
+}
+
+
+
+/// SkipExcludedConditionalBlock - We just read a \#if or related directive and
+/// decided that the subsequent tokens are in the \#if'd out portion of the
+/// file. Lex the rest of the file, until we see an \#endif. If
+/// FoundNonSkipPortion is true, then we have already emitted code for part of
+/// this \#if directive, so \#else/\#elif blocks should never be entered.
+/// If ElseOk is true, then \#else directives are ok, if not, then we have
+/// already seen one so a \#else directive is a duplicate. When this returns,
+/// the caller can lex the first valid token.
+void Preprocessor::SkipExcludedConditionalBlock(SourceLocation IfTokenLoc,
+ bool FoundNonSkipPortion,
+ bool FoundElse,
+ SourceLocation ElseLoc) {
+ ++NumSkipped;
+ assert(!CurTokenLexer && CurPPLexer && "Lexing a macro, not a file?");
+
+ CurPPLexer->pushConditionalLevel(IfTokenLoc, /*isSkipping*/false,
+ FoundNonSkipPortion, FoundElse);
+
+ if (CurPTHLexer) {
+ PTHSkipExcludedConditionalBlock();
+ return;
+ }
+
+ // Enter raw mode to disable identifier lookup (and thus macro expansion),
+ // disabling warnings, etc.
+ CurPPLexer->LexingRawMode = true;
+ Token Tok;
+ while (1) {
+ CurLexer->Lex(Tok);
+
+ if (Tok.is(tok::code_completion)) {
+ if (CodeComplete)
+ CodeComplete->CodeCompleteInConditionalExclusion();
+ setCodeCompletionReached();
+ continue;
+ }
+
+ // If this is the end of the buffer, we have an error.
+ if (Tok.is(tok::eof)) {
+ // Emit errors for each unterminated conditional on the stack, including
+ // the current one.
+ while (!CurPPLexer->ConditionalStack.empty()) {
+ if (CurLexer->getFileLoc() != CodeCompletionFileLoc)
+ Diag(CurPPLexer->ConditionalStack.back().IfLoc,
+ diag::err_pp_unterminated_conditional);
+ CurPPLexer->ConditionalStack.pop_back();
+ }
+
+ // Just return and let the caller lex after this #include.
+ break;
+ }
+
+ // If this token is not a preprocessor directive, just skip it.
+ if (Tok.isNot(tok::hash) || !Tok.isAtStartOfLine())
+ continue;
+
+ // We just parsed a # character at the start of a line, so we're in
+ // directive mode. Tell the lexer this so any newlines we see will be
+ // converted into an EOD token (this terminates the macro).
+ CurPPLexer->ParsingPreprocessorDirective = true;
+ if (CurLexer) CurLexer->SetKeepWhitespaceMode(false);
+
+
+ // Read the next token, the directive flavor.
+ LexUnexpandedToken(Tok);
+
+ // If this isn't an identifier directive (e.g. is "# 1\n" or "#\n", or
+ // something bogus), skip it.
+ if (Tok.isNot(tok::raw_identifier)) {
+ CurPPLexer->ParsingPreprocessorDirective = false;
+ // Restore comment saving mode.
+ if (CurLexer) CurLexer->resetExtendedTokenMode();
+ continue;
+ }
+
+ // If the first letter isn't i or e, it isn't intesting to us. We know that
+ // this is safe in the face of spelling differences, because there is no way
+ // to spell an i/e in a strange way that is another letter. Skipping this
+ // allows us to avoid looking up the identifier info for #define/#undef and
+ // other common directives.
+ StringRef RI = Tok.getRawIdentifier();
+
+ char FirstChar = RI[0];
+ if (FirstChar >= 'a' && FirstChar <= 'z' &&
+ FirstChar != 'i' && FirstChar != 'e') {
+ CurPPLexer->ParsingPreprocessorDirective = false;
+ // Restore comment saving mode.
+ if (CurLexer) CurLexer->resetExtendedTokenMode();
+ continue;
+ }
+
+ // Get the identifier name without trigraphs or embedded newlines. Note
+ // that we can't use Tok.getIdentifierInfo() because its lookup is disabled
+ // when skipping.
+ char DirectiveBuf[20];
+ StringRef Directive;
+ if (!Tok.needsCleaning() && RI.size() < 20) {
+ Directive = RI;
+ } else {
+ std::string DirectiveStr = getSpelling(Tok);
+ unsigned IdLen = DirectiveStr.size();
+ if (IdLen >= 20) {
+ CurPPLexer->ParsingPreprocessorDirective = false;
+ // Restore comment saving mode.
+ if (CurLexer) CurLexer->resetExtendedTokenMode();
+ continue;
+ }
+ memcpy(DirectiveBuf, &DirectiveStr[0], IdLen);
+ Directive = StringRef(DirectiveBuf, IdLen);
+ }
+
+ if (Directive.startswith("if")) {
+ StringRef Sub = Directive.substr(2);
+ if (Sub.empty() || // "if"
+ Sub == "def" || // "ifdef"
+ Sub == "ndef") { // "ifndef"
+ // We know the entire #if/#ifdef/#ifndef block will be skipped, don't
+ // bother parsing the condition.
+ DiscardUntilEndOfDirective();
+ CurPPLexer->pushConditionalLevel(Tok.getLocation(), /*wasskipping*/true,
+ /*foundnonskip*/false,
+ /*foundelse*/false);
+ }
+ } else if (Directive[0] == 'e') {
+ StringRef Sub = Directive.substr(1);
+ if (Sub == "ndif") { // "endif"
+ PPConditionalInfo CondInfo;
+ CondInfo.WasSkipping = true; // Silence bogus warning.
+ bool InCond = CurPPLexer->popConditionalLevel(CondInfo);
+ (void)InCond; // Silence warning in no-asserts mode.
+ assert(!InCond && "Can't be skipping if not in a conditional!");
+
+ // If we popped the outermost skipping block, we're done skipping!
+ if (!CondInfo.WasSkipping) {
+ // Restore the value of LexingRawMode so that trailing comments
+ // are handled correctly, if we've reached the outermost block.
+ CurPPLexer->LexingRawMode = false;
+ CheckEndOfDirective("endif");
+ CurPPLexer->LexingRawMode = true;
+ if (Callbacks)
+ Callbacks->Endif(Tok.getLocation(), CondInfo.IfLoc);
+ break;
+ } else {
+ DiscardUntilEndOfDirective();
+ }
+ } else if (Sub == "lse") { // "else".
+ // #else directive in a skipping conditional. If not in some other
+ // skipping conditional, and if #else hasn't already been seen, enter it
+ // as a non-skipping conditional.
+ PPConditionalInfo &CondInfo = CurPPLexer->peekConditionalLevel();
+
+ // If this is a #else with a #else before it, report the error.
+ if (CondInfo.FoundElse) Diag(Tok, diag::pp_err_else_after_else);
+
+ // Note that we've seen a #else in this conditional.
+ CondInfo.FoundElse = true;
+
+ // If the conditional is at the top level, and the #if block wasn't
+ // entered, enter the #else block now.
+ if (!CondInfo.WasSkipping && !CondInfo.FoundNonSkip) {
+ CondInfo.FoundNonSkip = true;
+ // Restore the value of LexingRawMode so that trailing comments
+ // are handled correctly.
+ CurPPLexer->LexingRawMode = false;
+ CheckEndOfDirective("else");
+ CurPPLexer->LexingRawMode = true;
+ if (Callbacks)
+ Callbacks->Else(Tok.getLocation(), CondInfo.IfLoc);
+ break;
+ } else {
+ DiscardUntilEndOfDirective(); // C99 6.10p4.
+ }
+ } else if (Sub == "lif") { // "elif".
+ PPConditionalInfo &CondInfo = CurPPLexer->peekConditionalLevel();
+
+ // If this is a #elif with a #else before it, report the error.
+ if (CondInfo.FoundElse) Diag(Tok, diag::pp_err_elif_after_else);
+
+ // If this is in a skipping block or if we're already handled this #if
+ // block, don't bother parsing the condition.
+ if (CondInfo.WasSkipping || CondInfo.FoundNonSkip) {
+ DiscardUntilEndOfDirective();
+ } else {
+ const SourceLocation CondBegin = CurPPLexer->getSourceLocation();
+ // Restore the value of LexingRawMode so that identifiers are
+ // looked up, etc, inside the #elif expression.
+ assert(CurPPLexer->LexingRawMode && "We have to be skipping here!");
+ CurPPLexer->LexingRawMode = false;
+ IdentifierInfo *IfNDefMacro = nullptr;
+ const bool CondValue = EvaluateDirectiveExpression(IfNDefMacro);
+ CurPPLexer->LexingRawMode = true;
+ if (Callbacks) {
+ const SourceLocation CondEnd = CurPPLexer->getSourceLocation();
+ Callbacks->Elif(Tok.getLocation(),
+ SourceRange(CondBegin, CondEnd),
+ (CondValue ? PPCallbacks::CVK_True : PPCallbacks::CVK_False), CondInfo.IfLoc);
+ }
+ // If this condition is true, enter it!
+ if (CondValue) {
+ CondInfo.FoundNonSkip = true;
+ break;
+ }
+ }
+ }
+ }
+
+ CurPPLexer->ParsingPreprocessorDirective = false;
+ // Restore comment saving mode.
+ if (CurLexer) CurLexer->resetExtendedTokenMode();
+ }
+
+ // Finally, if we are out of the conditional (saw an #endif or ran off the end
+ // of the file, just stop skipping and return to lexing whatever came after
+ // the #if block.
+ CurPPLexer->LexingRawMode = false;
+
+ if (Callbacks) {
+ SourceLocation BeginLoc = ElseLoc.isValid() ? ElseLoc : IfTokenLoc;
+ Callbacks->SourceRangeSkipped(SourceRange(BeginLoc, Tok.getLocation()));
+ }
+}
+
+void Preprocessor::PTHSkipExcludedConditionalBlock() {
+
+ while (1) {
+ assert(CurPTHLexer);
+ assert(CurPTHLexer->LexingRawMode == false);
+
+ // Skip to the next '#else', '#elif', or #endif.
+ if (CurPTHLexer->SkipBlock()) {
+ // We have reached an #endif. Both the '#' and 'endif' tokens
+ // have been consumed by the PTHLexer. Just pop off the condition level.
+ PPConditionalInfo CondInfo;
+ bool InCond = CurPTHLexer->popConditionalLevel(CondInfo);
+ (void)InCond; // Silence warning in no-asserts mode.
+ assert(!InCond && "Can't be skipping if not in a conditional!");
+ break;
+ }
+
+ // We have reached a '#else' or '#elif'. Lex the next token to get
+ // the directive flavor.
+ Token Tok;
+ LexUnexpandedToken(Tok);
+
+ // We can actually look up the IdentifierInfo here since we aren't in
+ // raw mode.
+ tok::PPKeywordKind K = Tok.getIdentifierInfo()->getPPKeywordID();
+
+ if (K == tok::pp_else) {
+ // #else: Enter the else condition. We aren't in a nested condition
+ // since we skip those. We're always in the one matching the last
+ // blocked we skipped.
+ PPConditionalInfo &CondInfo = CurPTHLexer->peekConditionalLevel();
+ // Note that we've seen a #else in this conditional.
+ CondInfo.FoundElse = true;
+
+ // If the #if block wasn't entered then enter the #else block now.
+ if (!CondInfo.FoundNonSkip) {
+ CondInfo.FoundNonSkip = true;
+
+ // Scan until the eod token.
+ CurPTHLexer->ParsingPreprocessorDirective = true;
+ DiscardUntilEndOfDirective();
+ CurPTHLexer->ParsingPreprocessorDirective = false;
+
+ break;
+ }
+
+ // Otherwise skip this block.
+ continue;
+ }
+
+ assert(K == tok::pp_elif);
+ PPConditionalInfo &CondInfo = CurPTHLexer->peekConditionalLevel();
+
+ // If this is a #elif with a #else before it, report the error.
+ if (CondInfo.FoundElse)
+ Diag(Tok, diag::pp_err_elif_after_else);
+
+ // If this is in a skipping block or if we're already handled this #if
+ // block, don't bother parsing the condition. We just skip this block.
+ if (CondInfo.FoundNonSkip)
+ continue;
+
+ // Evaluate the condition of the #elif.
+ IdentifierInfo *IfNDefMacro = nullptr;
+ CurPTHLexer->ParsingPreprocessorDirective = true;
+ bool ShouldEnter = EvaluateDirectiveExpression(IfNDefMacro);
+ CurPTHLexer->ParsingPreprocessorDirective = false;
+
+ // If this condition is true, enter it!
+ if (ShouldEnter) {
+ CondInfo.FoundNonSkip = true;
+ break;
+ }
+
+ // Otherwise, skip this block and go to the next one.
+ continue;
+ }
+}
+
+Module *Preprocessor::getModuleForLocation(SourceLocation Loc) {
+ ModuleMap &ModMap = HeaderInfo.getModuleMap();
+ if (SourceMgr.isInMainFile(Loc)) {
+ if (Module *CurMod = getCurrentModule())
+ return CurMod; // Compiling a module.
+ return HeaderInfo.getModuleMap().SourceModule; // Compiling a source.
+ }
+ // Try to determine the module of the include directive.
+ // FIXME: Look into directly passing the FileEntry from LookupFile instead.
+ FileID IDOfIncl = SourceMgr.getFileID(SourceMgr.getExpansionLoc(Loc));
+ if (const FileEntry *EntryOfIncl = SourceMgr.getFileEntryForID(IDOfIncl)) {
+ // The include comes from a file.
+ return ModMap.findModuleForHeader(EntryOfIncl).getModule();
+ } else {
+ // The include does not come from a file,
+ // so it is probably a module compilation.
+ return getCurrentModule();
+ }
+}
+
+Module *Preprocessor::getModuleContainingLocation(SourceLocation Loc) {
+ return HeaderInfo.getModuleMap().inferModuleFromLocation(
+ FullSourceLoc(Loc, SourceMgr));
+}
+
+const FileEntry *Preprocessor::LookupFile(
+ SourceLocation FilenameLoc,
+ StringRef Filename,
+ bool isAngled,
+ const DirectoryLookup *FromDir,
+ const FileEntry *FromFile,
+ const DirectoryLookup *&CurDir,
+ SmallVectorImpl<char> *SearchPath,
+ SmallVectorImpl<char> *RelativePath,
+ ModuleMap::KnownHeader *SuggestedModule,
+ bool SkipCache) {
+ Module *RequestingModule = getModuleForLocation(FilenameLoc);
+
+ // If the header lookup mechanism may be relative to the current inclusion
+ // stack, record the parent #includes.
+ SmallVector<std::pair<const FileEntry *, const DirectoryEntry *>, 16>
+ Includers;
+ if (!FromDir && !FromFile) {
+ FileID FID = getCurrentFileLexer()->getFileID();
+ const FileEntry *FileEnt = SourceMgr.getFileEntryForID(FID);
+
+ // If there is no file entry associated with this file, it must be the
+ // predefines buffer or the module includes buffer. Any other file is not
+ // lexed with a normal lexer, so it won't be scanned for preprocessor
+ // directives.
+ //
+ // If we have the predefines buffer, resolve #include references (which come
+ // from the -include command line argument) from the current working
+ // directory instead of relative to the main file.
+ //
+ // If we have the module includes buffer, resolve #include references (which
+ // come from header declarations in the module map) relative to the module
+ // map file.
+ if (!FileEnt) {
+ if (FID == SourceMgr.getMainFileID() && MainFileDir)
+ Includers.push_back(std::make_pair(nullptr, MainFileDir));
+ else if ((FileEnt =
+ SourceMgr.getFileEntryForID(SourceMgr.getMainFileID())))
+ Includers.push_back(std::make_pair(FileEnt, FileMgr.getDirectory(".")));
+ } else {
+ Includers.push_back(std::make_pair(FileEnt, FileEnt->getDir()));
+ }
+
+ // MSVC searches the current include stack from top to bottom for
+ // headers included by quoted include directives.
+ // See: http://msdn.microsoft.com/en-us/library/36k2cdd4.aspx
+ if (LangOpts.MSVCCompat && !isAngled) {
+ for (unsigned i = 0, e = IncludeMacroStack.size(); i != e; ++i) {
+ IncludeStackInfo &ISEntry = IncludeMacroStack[e - i - 1];
+ if (IsFileLexer(ISEntry))
+ if ((FileEnt = ISEntry.ThePPLexer->getFileEntry()))
+ Includers.push_back(std::make_pair(FileEnt, FileEnt->getDir()));
+ }
+ }
+ }
+
+ CurDir = CurDirLookup;
+
+ if (FromFile) {
+ // We're supposed to start looking from after a particular file. Search
+ // the include path until we find that file or run out of files.
+ const DirectoryLookup *TmpCurDir = CurDir;
+ const DirectoryLookup *TmpFromDir = nullptr;
+ while (const FileEntry *FE = HeaderInfo.LookupFile(
+ Filename, FilenameLoc, isAngled, TmpFromDir, TmpCurDir,
+ Includers, SearchPath, RelativePath, RequestingModule,
+ SuggestedModule, SkipCache)) {
+ // Keep looking as if this file did a #include_next.
+ TmpFromDir = TmpCurDir;
+ ++TmpFromDir;
+ if (FE == FromFile) {
+ // Found it.
+ FromDir = TmpFromDir;
+ CurDir = TmpCurDir;
+ break;
+ }
+ }
+ }
+
+ // Do a standard file entry lookup.
+ const FileEntry *FE = HeaderInfo.LookupFile(
+ Filename, FilenameLoc, isAngled, FromDir, CurDir, Includers, SearchPath,
+ RelativePath, RequestingModule, SuggestedModule, SkipCache);
+ if (FE) {
+ if (SuggestedModule && !LangOpts.AsmPreprocessor)
+ HeaderInfo.getModuleMap().diagnoseHeaderInclusion(
+ RequestingModule, FilenameLoc, Filename, FE);
+ return FE;
+ }
+
+ const FileEntry *CurFileEnt;
+ // Otherwise, see if this is a subframework header. If so, this is relative
+ // to one of the headers on the #include stack. Walk the list of the current
+ // headers on the #include stack and pass them to HeaderInfo.
+ if (IsFileLexer()) {
+ if ((CurFileEnt = CurPPLexer->getFileEntry())) {
+ if ((FE = HeaderInfo.LookupSubframeworkHeader(Filename, CurFileEnt,
+ SearchPath, RelativePath,
+ RequestingModule,
+ SuggestedModule))) {
+ if (SuggestedModule && !LangOpts.AsmPreprocessor)
+ HeaderInfo.getModuleMap().diagnoseHeaderInclusion(
+ RequestingModule, FilenameLoc, Filename, FE);
+ return FE;
+ }
+ }
+ }
+
+ for (unsigned i = 0, e = IncludeMacroStack.size(); i != e; ++i) {
+ IncludeStackInfo &ISEntry = IncludeMacroStack[e-i-1];
+ if (IsFileLexer(ISEntry)) {
+ if ((CurFileEnt = ISEntry.ThePPLexer->getFileEntry())) {
+ if ((FE = HeaderInfo.LookupSubframeworkHeader(
+ Filename, CurFileEnt, SearchPath, RelativePath,
+ RequestingModule, SuggestedModule))) {
+ if (SuggestedModule && !LangOpts.AsmPreprocessor)
+ HeaderInfo.getModuleMap().diagnoseHeaderInclusion(
+ RequestingModule, FilenameLoc, Filename, FE);
+ return FE;
+ }
+ }
+ }
+ }
+
+ // Otherwise, we really couldn't find the file.
+ return nullptr;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Directive Handling.
+//===----------------------------------------------------------------------===//
+
+class Preprocessor::ResetMacroExpansionHelper {
+public:
+ ResetMacroExpansionHelper(Preprocessor *pp)
+ : PP(pp), save(pp->DisableMacroExpansion) {
+ if (pp->MacroExpansionInDirectivesOverride)
+ pp->DisableMacroExpansion = false;
+ }
+ ~ResetMacroExpansionHelper() {
+ PP->DisableMacroExpansion = save;
+ }
+private:
+ Preprocessor *PP;
+ bool save;
+};
+
+/// HandleDirective - This callback is invoked when the lexer sees a # token
+/// at the start of a line. This consumes the directive, modifies the
+/// lexer/preprocessor state, and advances the lexer(s) so that the next token
+/// read is the correct one.
+void Preprocessor::HandleDirective(Token &Result) {
+ // FIXME: Traditional: # with whitespace before it not recognized by K&R?
+
+ // We just parsed a # character at the start of a line, so we're in directive
+ // mode. Tell the lexer this so any newlines we see will be converted into an
+ // EOD token (which terminates the directive).
+ CurPPLexer->ParsingPreprocessorDirective = true;
+ if (CurLexer) CurLexer->SetKeepWhitespaceMode(false);
+
+ bool ImmediatelyAfterTopLevelIfndef =
+ CurPPLexer->MIOpt.getImmediatelyAfterTopLevelIfndef();
+ CurPPLexer->MIOpt.resetImmediatelyAfterTopLevelIfndef();
+
+ ++NumDirectives;
+
+ // We are about to read a token. For the multiple-include optimization FA to
+ // work, we have to remember if we had read any tokens *before* this
+ // pp-directive.
+ bool ReadAnyTokensBeforeDirective =CurPPLexer->MIOpt.getHasReadAnyTokensVal();
+
+ // Save the '#' token in case we need to return it later.
+ Token SavedHash = Result;
+
+ // Read the next token, the directive flavor. This isn't expanded due to
+ // C99 6.10.3p8.
+ LexUnexpandedToken(Result);
+
+ // C99 6.10.3p11: Is this preprocessor directive in macro invocation? e.g.:
+ // #define A(x) #x
+ // A(abc
+ // #warning blah
+ // def)
+ // If so, the user is relying on undefined behavior, emit a diagnostic. Do
+ // not support this for #include-like directives, since that can result in
+ // terrible diagnostics, and does not work in GCC.
+ if (InMacroArgs) {
+ if (IdentifierInfo *II = Result.getIdentifierInfo()) {
+ switch (II->getPPKeywordID()) {
+ case tok::pp_include:
+ case tok::pp_import:
+ case tok::pp_include_next:
+ case tok::pp___include_macros:
+ case tok::pp_pragma:
+ Diag(Result, diag::err_embedded_directive) << II->getName();
+ DiscardUntilEndOfDirective();
+ return;
+ default:
+ break;
+ }
+ }
+ Diag(Result, diag::ext_embedded_directive);
+ }
+
+ // Temporarily enable macro expansion if set so
+ // and reset to previous state when returning from this function.
+ ResetMacroExpansionHelper helper(this);
+
+ switch (Result.getKind()) {
+ case tok::eod:
+ return; // null directive.
+ case tok::code_completion:
+ if (CodeComplete)
+ CodeComplete->CodeCompleteDirective(
+ CurPPLexer->getConditionalStackDepth() > 0);
+ setCodeCompletionReached();
+ return;
+ case tok::numeric_constant: // # 7 GNU line marker directive.
+ if (getLangOpts().AsmPreprocessor)
+ break; // # 4 is not a preprocessor directive in .S files.
+ return HandleDigitDirective(Result);
+ default:
+ IdentifierInfo *II = Result.getIdentifierInfo();
+ if (!II) break; // Not an identifier.
+
+ // Ask what the preprocessor keyword ID is.
+ switch (II->getPPKeywordID()) {
+ default: break;
+ // C99 6.10.1 - Conditional Inclusion.
+ case tok::pp_if:
+ return HandleIfDirective(Result, ReadAnyTokensBeforeDirective);
+ case tok::pp_ifdef:
+ return HandleIfdefDirective(Result, false, true/*not valid for miopt*/);
+ case tok::pp_ifndef:
+ return HandleIfdefDirective(Result, true, ReadAnyTokensBeforeDirective);
+ case tok::pp_elif:
+ return HandleElifDirective(Result);
+ case tok::pp_else:
+ return HandleElseDirective(Result);
+ case tok::pp_endif:
+ return HandleEndifDirective(Result);
+
+ // C99 6.10.2 - Source File Inclusion.
+ case tok::pp_include:
+ // Handle #include.
+ return HandleIncludeDirective(SavedHash.getLocation(), Result);
+ case tok::pp___include_macros:
+ // Handle -imacros.
+ return HandleIncludeMacrosDirective(SavedHash.getLocation(), Result);
+
+ // C99 6.10.3 - Macro Replacement.
+ case tok::pp_define:
+ return HandleDefineDirective(Result, ImmediatelyAfterTopLevelIfndef);
+ case tok::pp_undef:
+ return HandleUndefDirective(Result);
+
+ // C99 6.10.4 - Line Control.
+ case tok::pp_line:
+ return HandleLineDirective(Result);
+
+ // C99 6.10.5 - Error Directive.
+ case tok::pp_error:
+ return HandleUserDiagnosticDirective(Result, false);
+
+ // C99 6.10.6 - Pragma Directive.
+ case tok::pp_pragma:
+ return HandlePragmaDirective(SavedHash.getLocation(), PIK_HashPragma);
+
+ // GNU Extensions.
+ case tok::pp_import:
+ return HandleImportDirective(SavedHash.getLocation(), Result);
+ case tok::pp_include_next:
+ return HandleIncludeNextDirective(SavedHash.getLocation(), Result);
+
+ case tok::pp_warning:
+ Diag(Result, diag::ext_pp_warning_directive);
+ return HandleUserDiagnosticDirective(Result, true);
+ case tok::pp_ident:
+ return HandleIdentSCCSDirective(Result);
+ case tok::pp_sccs:
+ return HandleIdentSCCSDirective(Result);
+ case tok::pp_assert:
+ //isExtension = true; // FIXME: implement #assert
+ break;
+ case tok::pp_unassert:
+ //isExtension = true; // FIXME: implement #unassert
+ break;
+
+ case tok::pp___public_macro:
+ if (getLangOpts().Modules)
+ return HandleMacroPublicDirective(Result);
+ break;
+
+ case tok::pp___private_macro:
+ if (getLangOpts().Modules)
+ return HandleMacroPrivateDirective(Result);
+ break;
+ }
+ break;
+ }
+
+ // If this is a .S file, treat unknown # directives as non-preprocessor
+ // directives. This is important because # may be a comment or introduce
+ // various pseudo-ops. Just return the # token and push back the following
+ // token to be lexed next time.
+ if (getLangOpts().AsmPreprocessor) {
+ Token *Toks = new Token[2];
+ // Return the # and the token after it.
+ Toks[0] = SavedHash;
+ Toks[1] = Result;
+
+ // If the second token is a hashhash token, then we need to translate it to
+ // unknown so the token lexer doesn't try to perform token pasting.
+ if (Result.is(tok::hashhash))
+ Toks[1].setKind(tok::unknown);
+
+ // Enter this token stream so that we re-lex the tokens. Make sure to
+ // enable macro expansion, in case the token after the # is an identifier
+ // that is expanded.
+ EnterTokenStream(Toks, 2, false, true);
+ return;
+ }
+
+ // If we reached here, the preprocessing token is not valid!
+ Diag(Result, diag::err_pp_invalid_directive);
+
+ // Read the rest of the PP line.
+ DiscardUntilEndOfDirective();
+
+ // Okay, we're done parsing the directive.
+}
+
+/// GetLineValue - Convert a numeric token into an unsigned value, emitting
+/// Diagnostic DiagID if it is invalid, and returning the value in Val.
+static bool GetLineValue(Token &DigitTok, unsigned &Val,
+ unsigned DiagID, Preprocessor &PP,
+ bool IsGNULineDirective=false) {
+ if (DigitTok.isNot(tok::numeric_constant)) {
+ PP.Diag(DigitTok, DiagID);
+
+ if (DigitTok.isNot(tok::eod))
+ PP.DiscardUntilEndOfDirective();
+ return true;
+ }
+
+ SmallString<64> IntegerBuffer;
+ IntegerBuffer.resize(DigitTok.getLength());
+ const char *DigitTokBegin = &IntegerBuffer[0];
+ bool Invalid = false;
+ unsigned ActualLength = PP.getSpelling(DigitTok, DigitTokBegin, &Invalid);
+ if (Invalid)
+ return true;
+
+ // Verify that we have a simple digit-sequence, and compute the value. This
+ // is always a simple digit string computed in decimal, so we do this manually
+ // here.
+ Val = 0;
+ for (unsigned i = 0; i != ActualLength; ++i) {
+ // C++1y [lex.fcon]p1:
+ // Optional separating single quotes in a digit-sequence are ignored
+ if (DigitTokBegin[i] == '\'')
+ continue;
+
+ if (!isDigit(DigitTokBegin[i])) {
+ PP.Diag(PP.AdvanceToTokenCharacter(DigitTok.getLocation(), i),
+ diag::err_pp_line_digit_sequence) << IsGNULineDirective;
+ PP.DiscardUntilEndOfDirective();
+ return true;
+ }
+
+ unsigned NextVal = Val*10+(DigitTokBegin[i]-'0');
+ if (NextVal < Val) { // overflow.
+ PP.Diag(DigitTok, DiagID);
+ PP.DiscardUntilEndOfDirective();
+ return true;
+ }
+ Val = NextVal;
+ }
+
+ if (DigitTokBegin[0] == '0' && Val)
+ PP.Diag(DigitTok.getLocation(), diag::warn_pp_line_decimal)
+ << IsGNULineDirective;
+
+ return false;
+}
+
+/// \brief Handle a \#line directive: C99 6.10.4.
+///
+/// The two acceptable forms are:
+/// \verbatim
+/// # line digit-sequence
+/// # line digit-sequence "s-char-sequence"
+/// \endverbatim
+void Preprocessor::HandleLineDirective(Token &Tok) {
+ // Read the line # and string argument. Per C99 6.10.4p5, these tokens are
+ // expanded.
+ Token DigitTok;
+ Lex(DigitTok);
+
+ // Validate the number and convert it to an unsigned.
+ unsigned LineNo;
+ if (GetLineValue(DigitTok, LineNo, diag::err_pp_line_requires_integer,*this))
+ return;
+
+ if (LineNo == 0)
+ Diag(DigitTok, diag::ext_pp_line_zero);
+
+ // Enforce C99 6.10.4p3: "The digit sequence shall not specify ... a
+ // number greater than 2147483647". C90 requires that the line # be <= 32767.
+ unsigned LineLimit = 32768U;
+ if (LangOpts.C99 || LangOpts.CPlusPlus11)
+ LineLimit = 2147483648U;
+ if (LineNo >= LineLimit)
+ Diag(DigitTok, diag::ext_pp_line_too_big) << LineLimit;
+ else if (LangOpts.CPlusPlus11 && LineNo >= 32768U)
+ Diag(DigitTok, diag::warn_cxx98_compat_pp_line_too_big);
+
+ int FilenameID = -1;
+ Token StrTok;
+ Lex(StrTok);
+
+ // If the StrTok is "eod", then it wasn't present. Otherwise, it must be a
+ // string followed by eod.
+ if (StrTok.is(tok::eod))
+ ; // ok
+ else if (StrTok.isNot(tok::string_literal)) {
+ Diag(StrTok, diag::err_pp_line_invalid_filename);
+ return DiscardUntilEndOfDirective();
+ } else if (StrTok.hasUDSuffix()) {
+ Diag(StrTok, diag::err_invalid_string_udl);
+ return DiscardUntilEndOfDirective();
+ } else {
+ // Parse and validate the string, converting it into a unique ID.
+ StringLiteralParser Literal(StrTok, *this);
+ assert(Literal.isAscii() && "Didn't allow wide strings in");
+ if (Literal.hadError)
+ return DiscardUntilEndOfDirective();
+ if (Literal.Pascal) {
+ Diag(StrTok, diag::err_pp_linemarker_invalid_filename);
+ return DiscardUntilEndOfDirective();
+ }
+ FilenameID = SourceMgr.getLineTableFilenameID(Literal.GetString());
+
+ // Verify that there is nothing after the string, other than EOD. Because
+ // of C99 6.10.4p5, macros that expand to empty tokens are ok.
+ CheckEndOfDirective("line", true);
+ }
+
+ SourceMgr.AddLineNote(DigitTok.getLocation(), LineNo, FilenameID);
+
+ if (Callbacks)
+ Callbacks->FileChanged(CurPPLexer->getSourceLocation(),
+ PPCallbacks::RenameFile,
+ SrcMgr::C_User);
+}
+
+/// ReadLineMarkerFlags - Parse and validate any flags at the end of a GNU line
+/// marker directive.
+static bool ReadLineMarkerFlags(bool &IsFileEntry, bool &IsFileExit,
+ bool &IsSystemHeader, bool &IsExternCHeader,
+ Preprocessor &PP) {
+ unsigned FlagVal;
+ Token FlagTok;
+ PP.Lex(FlagTok);
+ if (FlagTok.is(tok::eod)) return false;
+ if (GetLineValue(FlagTok, FlagVal, diag::err_pp_linemarker_invalid_flag, PP))
+ return true;
+
+ if (FlagVal == 1) {
+ IsFileEntry = true;
+
+ PP.Lex(FlagTok);
+ if (FlagTok.is(tok::eod)) return false;
+ if (GetLineValue(FlagTok, FlagVal, diag::err_pp_linemarker_invalid_flag,PP))
+ return true;
+ } else if (FlagVal == 2) {
+ IsFileExit = true;
+
+ SourceManager &SM = PP.getSourceManager();
+ // If we are leaving the current presumed file, check to make sure the
+ // presumed include stack isn't empty!
+ FileID CurFileID =
+ SM.getDecomposedExpansionLoc(FlagTok.getLocation()).first;
+ PresumedLoc PLoc = SM.getPresumedLoc(FlagTok.getLocation());
+ if (PLoc.isInvalid())
+ return true;
+
+ // If there is no include loc (main file) or if the include loc is in a
+ // different physical file, then we aren't in a "1" line marker flag region.
+ SourceLocation IncLoc = PLoc.getIncludeLoc();
+ if (IncLoc.isInvalid() ||
+ SM.getDecomposedExpansionLoc(IncLoc).first != CurFileID) {
+ PP.Diag(FlagTok, diag::err_pp_linemarker_invalid_pop);
+ PP.DiscardUntilEndOfDirective();
+ return true;
+ }
+
+ PP.Lex(FlagTok);
+ if (FlagTok.is(tok::eod)) return false;
+ if (GetLineValue(FlagTok, FlagVal, diag::err_pp_linemarker_invalid_flag,PP))
+ return true;
+ }
+
+ // We must have 3 if there are still flags.
+ if (FlagVal != 3) {
+ PP.Diag(FlagTok, diag::err_pp_linemarker_invalid_flag);
+ PP.DiscardUntilEndOfDirective();
+ return true;
+ }
+
+ IsSystemHeader = true;
+
+ PP.Lex(FlagTok);
+ if (FlagTok.is(tok::eod)) return false;
+ if (GetLineValue(FlagTok, FlagVal, diag::err_pp_linemarker_invalid_flag, PP))
+ return true;
+
+ // We must have 4 if there is yet another flag.
+ if (FlagVal != 4) {
+ PP.Diag(FlagTok, diag::err_pp_linemarker_invalid_flag);
+ PP.DiscardUntilEndOfDirective();
+ return true;
+ }
+
+ IsExternCHeader = true;
+
+ PP.Lex(FlagTok);
+ if (FlagTok.is(tok::eod)) return false;
+
+ // There are no more valid flags here.
+ PP.Diag(FlagTok, diag::err_pp_linemarker_invalid_flag);
+ PP.DiscardUntilEndOfDirective();
+ return true;
+}
+
+/// HandleDigitDirective - Handle a GNU line marker directive, whose syntax is
+/// one of the following forms:
+///
+/// # 42
+/// # 42 "file" ('1' | '2')?
+/// # 42 "file" ('1' | '2')? '3' '4'?
+///
+void Preprocessor::HandleDigitDirective(Token &DigitTok) {
+ // Validate the number and convert it to an unsigned. GNU does not have a
+ // line # limit other than it fit in 32-bits.
+ unsigned LineNo;
+ if (GetLineValue(DigitTok, LineNo, diag::err_pp_linemarker_requires_integer,
+ *this, true))
+ return;
+
+ Token StrTok;
+ Lex(StrTok);
+
+ bool IsFileEntry = false, IsFileExit = false;
+ bool IsSystemHeader = false, IsExternCHeader = false;
+ int FilenameID = -1;
+
+ // If the StrTok is "eod", then it wasn't present. Otherwise, it must be a
+ // string followed by eod.
+ if (StrTok.is(tok::eod))
+ ; // ok
+ else if (StrTok.isNot(tok::string_literal)) {
+ Diag(StrTok, diag::err_pp_linemarker_invalid_filename);
+ return DiscardUntilEndOfDirective();
+ } else if (StrTok.hasUDSuffix()) {
+ Diag(StrTok, diag::err_invalid_string_udl);
+ return DiscardUntilEndOfDirective();
+ } else {
+ // Parse and validate the string, converting it into a unique ID.
+ StringLiteralParser Literal(StrTok, *this);
+ assert(Literal.isAscii() && "Didn't allow wide strings in");
+ if (Literal.hadError)
+ return DiscardUntilEndOfDirective();
+ if (Literal.Pascal) {
+ Diag(StrTok, diag::err_pp_linemarker_invalid_filename);
+ return DiscardUntilEndOfDirective();
+ }
+ FilenameID = SourceMgr.getLineTableFilenameID(Literal.GetString());
+
+ // If a filename was present, read any flags that are present.
+ if (ReadLineMarkerFlags(IsFileEntry, IsFileExit,
+ IsSystemHeader, IsExternCHeader, *this))
+ return;
+ }
+
+ // Create a line note with this information.
+ SourceMgr.AddLineNote(DigitTok.getLocation(), LineNo, FilenameID,
+ IsFileEntry, IsFileExit,
+ IsSystemHeader, IsExternCHeader);
+
+ // If the preprocessor has callbacks installed, notify them of the #line
+ // change. This is used so that the line marker comes out in -E mode for
+ // example.
+ if (Callbacks) {
+ PPCallbacks::FileChangeReason Reason = PPCallbacks::RenameFile;
+ if (IsFileEntry)
+ Reason = PPCallbacks::EnterFile;
+ else if (IsFileExit)
+ Reason = PPCallbacks::ExitFile;
+ SrcMgr::CharacteristicKind FileKind = SrcMgr::C_User;
+ if (IsExternCHeader)
+ FileKind = SrcMgr::C_ExternCSystem;
+ else if (IsSystemHeader)
+ FileKind = SrcMgr::C_System;
+
+ Callbacks->FileChanged(CurPPLexer->getSourceLocation(), Reason, FileKind);
+ }
+}
+
+
+/// HandleUserDiagnosticDirective - Handle a #warning or #error directive.
+///
+void Preprocessor::HandleUserDiagnosticDirective(Token &Tok,
+ bool isWarning) {
+ // PTH doesn't emit #warning or #error directives.
+ if (CurPTHLexer)
+ return CurPTHLexer->DiscardToEndOfLine();
+
+ // Read the rest of the line raw. We do this because we don't want macros
+ // to be expanded and we don't require that the tokens be valid preprocessing
+ // tokens. For example, this is allowed: "#warning ` 'foo". GCC does
+ // collapse multiple consequtive white space between tokens, but this isn't
+ // specified by the standard.
+ SmallString<128> Message;
+ CurLexer->ReadToEndOfLine(&Message);
+
+ // Find the first non-whitespace character, so that we can make the
+ // diagnostic more succinct.
+ StringRef Msg = StringRef(Message).ltrim(" ");
+
+ if (isWarning)
+ Diag(Tok, diag::pp_hash_warning) << Msg;
+ else
+ Diag(Tok, diag::err_pp_hash_error) << Msg;
+}
+
+/// HandleIdentSCCSDirective - Handle a #ident/#sccs directive.
+///
+void Preprocessor::HandleIdentSCCSDirective(Token &Tok) {
+ // Yes, this directive is an extension.
+ Diag(Tok, diag::ext_pp_ident_directive);
+
+ // Read the string argument.
+ Token StrTok;
+ Lex(StrTok);
+
+ // If the token kind isn't a string, it's a malformed directive.
+ if (StrTok.isNot(tok::string_literal) &&
+ StrTok.isNot(tok::wide_string_literal)) {
+ Diag(StrTok, diag::err_pp_malformed_ident);
+ if (StrTok.isNot(tok::eod))
+ DiscardUntilEndOfDirective();
+ return;
+ }
+
+ if (StrTok.hasUDSuffix()) {
+ Diag(StrTok, diag::err_invalid_string_udl);
+ return DiscardUntilEndOfDirective();
+ }
+
+ // Verify that there is nothing after the string, other than EOD.
+ CheckEndOfDirective("ident");
+
+ if (Callbacks) {
+ bool Invalid = false;
+ std::string Str = getSpelling(StrTok, &Invalid);
+ if (!Invalid)
+ Callbacks->Ident(Tok.getLocation(), Str);
+ }
+}
+
+/// \brief Handle a #public directive.
+void Preprocessor::HandleMacroPublicDirective(Token &Tok) {
+ Token MacroNameTok;
+ ReadMacroName(MacroNameTok, MU_Undef);
+
+ // Error reading macro name? If so, diagnostic already issued.
+ if (MacroNameTok.is(tok::eod))
+ return;
+
+ // Check to see if this is the last token on the #__public_macro line.
+ CheckEndOfDirective("__public_macro");
+
+ IdentifierInfo *II = MacroNameTok.getIdentifierInfo();
+ // Okay, we finally have a valid identifier to undef.
+ MacroDirective *MD = getLocalMacroDirective(II);
+
+ // If the macro is not defined, this is an error.
+ if (!MD) {
+ Diag(MacroNameTok, diag::err_pp_visibility_non_macro) << II;
+ return;
+ }
+
+ // Note that this macro has now been exported.
+ appendMacroDirective(II, AllocateVisibilityMacroDirective(
+ MacroNameTok.getLocation(), /*IsPublic=*/true));
+}
+
+/// \brief Handle a #private directive.
+void Preprocessor::HandleMacroPrivateDirective(Token &Tok) {
+ Token MacroNameTok;
+ ReadMacroName(MacroNameTok, MU_Undef);
+
+ // Error reading macro name? If so, diagnostic already issued.
+ if (MacroNameTok.is(tok::eod))
+ return;
+
+ // Check to see if this is the last token on the #__private_macro line.
+ CheckEndOfDirective("__private_macro");
+
+ IdentifierInfo *II = MacroNameTok.getIdentifierInfo();
+ // Okay, we finally have a valid identifier to undef.
+ MacroDirective *MD = getLocalMacroDirective(II);
+
+ // If the macro is not defined, this is an error.
+ if (!MD) {
+ Diag(MacroNameTok, diag::err_pp_visibility_non_macro) << II;
+ return;
+ }
+
+ // Note that this macro has now been marked private.
+ appendMacroDirective(II, AllocateVisibilityMacroDirective(
+ MacroNameTok.getLocation(), /*IsPublic=*/false));
+}
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Include Directive Handling.
+//===----------------------------------------------------------------------===//
+
+/// GetIncludeFilenameSpelling - Turn the specified lexer token into a fully
+/// checked and spelled filename, e.g. as an operand of \#include. This returns
+/// true if the input filename was in <>'s or false if it were in ""'s. The
+/// caller is expected to provide a buffer that is large enough to hold the
+/// spelling of the filename, but is also expected to handle the case when
+/// this method decides to use a different buffer.
+bool Preprocessor::GetIncludeFilenameSpelling(SourceLocation Loc,
+ StringRef &Buffer) {
+ // Get the text form of the filename.
+ assert(!Buffer.empty() && "Can't have tokens with empty spellings!");
+
+ // Make sure the filename is <x> or "x".
+ bool isAngled;
+ if (Buffer[0] == '<') {
+ if (Buffer.back() != '>') {
+ Diag(Loc, diag::err_pp_expects_filename);
+ Buffer = StringRef();
+ return true;
+ }
+ isAngled = true;
+ } else if (Buffer[0] == '"') {
+ if (Buffer.back() != '"') {
+ Diag(Loc, diag::err_pp_expects_filename);
+ Buffer = StringRef();
+ return true;
+ }
+ isAngled = false;
+ } else {
+ Diag(Loc, diag::err_pp_expects_filename);
+ Buffer = StringRef();
+ return true;
+ }
+
+ // Diagnose #include "" as invalid.
+ if (Buffer.size() <= 2) {
+ Diag(Loc, diag::err_pp_empty_filename);
+ Buffer = StringRef();
+ return true;
+ }
+
+ // Skip the brackets.
+ Buffer = Buffer.substr(1, Buffer.size()-2);
+ return isAngled;
+}
+
+// \brief Handle cases where the \#include name is expanded from a macro
+// as multiple tokens, which need to be glued together.
+//
+// This occurs for code like:
+// \code
+// \#define FOO <a/b.h>
+// \#include FOO
+// \endcode
+// because in this case, "<a/b.h>" is returned as 7 tokens, not one.
+//
+// This code concatenates and consumes tokens up to the '>' token. It returns
+// false if the > was found, otherwise it returns true if it finds and consumes
+// the EOD marker.
+bool Preprocessor::ConcatenateIncludeName(SmallString<128> &FilenameBuffer,
+ SourceLocation &End) {
+ Token CurTok;
+
+ Lex(CurTok);
+ while (CurTok.isNot(tok::eod)) {
+ End = CurTok.getLocation();
+
+ // FIXME: Provide code completion for #includes.
+ if (CurTok.is(tok::code_completion)) {
+ setCodeCompletionReached();
+ Lex(CurTok);
+ continue;
+ }
+
+ // Append the spelling of this token to the buffer. If there was a space
+ // before it, add it now.
+ if (CurTok.hasLeadingSpace())
+ FilenameBuffer.push_back(' ');
+
+ // Get the spelling of the token, directly into FilenameBuffer if possible.
+ unsigned PreAppendSize = FilenameBuffer.size();
+ FilenameBuffer.resize(PreAppendSize+CurTok.getLength());
+
+ const char *BufPtr = &FilenameBuffer[PreAppendSize];
+ unsigned ActualLen = getSpelling(CurTok, BufPtr);
+
+ // If the token was spelled somewhere else, copy it into FilenameBuffer.
+ if (BufPtr != &FilenameBuffer[PreAppendSize])
+ memcpy(&FilenameBuffer[PreAppendSize], BufPtr, ActualLen);
+
+ // Resize FilenameBuffer to the correct size.
+ if (CurTok.getLength() != ActualLen)
+ FilenameBuffer.resize(PreAppendSize+ActualLen);
+
+ // If we found the '>' marker, return success.
+ if (CurTok.is(tok::greater))
+ return false;
+
+ Lex(CurTok);
+ }
+
+ // If we hit the eod marker, emit an error and return true so that the caller
+ // knows the EOD has been read.
+ Diag(CurTok.getLocation(), diag::err_pp_expects_filename);
+ return true;
+}
+
+/// \brief Push a token onto the token stream containing an annotation.
+static void EnterAnnotationToken(Preprocessor &PP,
+ SourceLocation Begin, SourceLocation End,
+ tok::TokenKind Kind, void *AnnotationVal) {
+ // FIXME: Produce this as the current token directly, rather than
+ // allocating a new token for it.
+ Token *Tok = new Token[1];
+ Tok[0].startToken();
+ Tok[0].setKind(Kind);
+ Tok[0].setLocation(Begin);
+ Tok[0].setAnnotationEndLoc(End);
+ Tok[0].setAnnotationValue(AnnotationVal);
+ PP.EnterTokenStream(Tok, 1, true, true);
+}
+
+/// \brief Produce a diagnostic informing the user that a #include or similar
+/// was implicitly treated as a module import.
+static void diagnoseAutoModuleImport(
+ Preprocessor &PP, SourceLocation HashLoc, Token &IncludeTok,
+ ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> Path,
+ SourceLocation PathEnd) {
+ assert(PP.getLangOpts().ObjC2 && "no import syntax available");
+
+ SmallString<128> PathString;
+ for (unsigned I = 0, N = Path.size(); I != N; ++I) {
+ if (I)
+ PathString += '.';
+ PathString += Path[I].first->getName();
+ }
+ int IncludeKind = 0;
+
+ switch (IncludeTok.getIdentifierInfo()->getPPKeywordID()) {
+ case tok::pp_include:
+ IncludeKind = 0;
+ break;
+
+ case tok::pp_import:
+ IncludeKind = 1;
+ break;
+
+ case tok::pp_include_next:
+ IncludeKind = 2;
+ break;
+
+ case tok::pp___include_macros:
+ IncludeKind = 3;
+ break;
+
+ default:
+ llvm_unreachable("unknown include directive kind");
+ }
+
+ CharSourceRange ReplaceRange(SourceRange(HashLoc, PathEnd),
+ /*IsTokenRange=*/false);
+ PP.Diag(HashLoc, diag::warn_auto_module_import)
+ << IncludeKind << PathString
+ << FixItHint::CreateReplacement(ReplaceRange,
+ ("@import " + PathString + ";").str());
+}
+
+/// HandleIncludeDirective - The "\#include" tokens have just been read, read
+/// the file to be included from the lexer, then include it! This is a common
+/// routine with functionality shared between \#include, \#include_next and
+/// \#import. LookupFrom is set when this is a \#include_next directive, it
+/// specifies the file to start searching from.
+void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
+ Token &IncludeTok,
+ const DirectoryLookup *LookupFrom,
+ const FileEntry *LookupFromFile,
+ bool isImport) {
+
+ Token FilenameTok;
+ CurPPLexer->LexIncludeFilename(FilenameTok);
+
+ // Reserve a buffer to get the spelling.
+ SmallString<128> FilenameBuffer;
+ StringRef Filename;
+ SourceLocation End;
+ SourceLocation CharEnd; // the end of this directive, in characters
+
+ switch (FilenameTok.getKind()) {
+ case tok::eod:
+ // If the token kind is EOD, the error has already been diagnosed.
+ return;
+
+ case tok::angle_string_literal:
+ case tok::string_literal:
+ Filename = getSpelling(FilenameTok, FilenameBuffer);
+ End = FilenameTok.getLocation();
+ CharEnd = End.getLocWithOffset(FilenameTok.getLength());
+ break;
+
+ case tok::less:
+ // This could be a <foo/bar.h> file coming from a macro expansion. In this
+ // case, glue the tokens together into FilenameBuffer and interpret those.
+ FilenameBuffer.push_back('<');
+ if (ConcatenateIncludeName(FilenameBuffer, End))
+ return; // Found <eod> but no ">"? Diagnostic already emitted.
+ Filename = FilenameBuffer;
+ CharEnd = End.getLocWithOffset(1);
+ break;
+ default:
+ Diag(FilenameTok.getLocation(), diag::err_pp_expects_filename);
+ DiscardUntilEndOfDirective();
+ return;
+ }
+
+ CharSourceRange FilenameRange
+ = CharSourceRange::getCharRange(FilenameTok.getLocation(), CharEnd);
+ StringRef OriginalFilename = Filename;
+ bool isAngled =
+ GetIncludeFilenameSpelling(FilenameTok.getLocation(), Filename);
+ // If GetIncludeFilenameSpelling set the start ptr to null, there was an
+ // error.
+ if (Filename.empty()) {
+ DiscardUntilEndOfDirective();
+ return;
+ }
+
+ // Verify that there is nothing after the filename, other than EOD. Note that
+ // we allow macros that expand to nothing after the filename, because this
+ // falls into the category of "#include pp-tokens new-line" specified in
+ // C99 6.10.2p4.
+ CheckEndOfDirective(IncludeTok.getIdentifierInfo()->getNameStart(), true);
+
+ // Check that we don't have infinite #include recursion.
+ if (IncludeMacroStack.size() == MaxAllowedIncludeStackDepth-1) {
+ Diag(FilenameTok, diag::err_pp_include_too_deep);
+ return;
+ }
+
+ // Complain about attempts to #include files in an audit pragma.
+ if (PragmaARCCFCodeAuditedLoc.isValid()) {
+ Diag(HashLoc, diag::err_pp_include_in_arc_cf_code_audited);
+ Diag(PragmaARCCFCodeAuditedLoc, diag::note_pragma_entered_here);
+
+ // Immediately leave the pragma.
+ PragmaARCCFCodeAuditedLoc = SourceLocation();
+ }
+
+ // Complain about attempts to #include files in an assume-nonnull pragma.
+ if (PragmaAssumeNonNullLoc.isValid()) {
+ Diag(HashLoc, diag::err_pp_include_in_assume_nonnull);
+ Diag(PragmaAssumeNonNullLoc, diag::note_pragma_entered_here);
+
+ // Immediately leave the pragma.
+ PragmaAssumeNonNullLoc = SourceLocation();
+ }
+
+ if (HeaderInfo.HasIncludeAliasMap()) {
+ // Map the filename with the brackets still attached. If the name doesn't
+ // map to anything, fall back on the filename we've already gotten the
+ // spelling for.
+ StringRef NewName = HeaderInfo.MapHeaderToIncludeAlias(OriginalFilename);
+ if (!NewName.empty())
+ Filename = NewName;
+ }
+
+ // Search include directories.
+ const DirectoryLookup *CurDir;
+ SmallString<1024> SearchPath;
+ SmallString<1024> RelativePath;
+ // We get the raw path only if we have 'Callbacks' to which we later pass
+ // the path.
+ ModuleMap::KnownHeader SuggestedModule;
+ SourceLocation FilenameLoc = FilenameTok.getLocation();
+ SmallString<128> NormalizedPath;
+ if (LangOpts.MSVCCompat) {
+ NormalizedPath = Filename.str();
+#ifndef LLVM_ON_WIN32
+ llvm::sys::path::native(NormalizedPath);
+#endif
+ }
+ const FileEntry *File = LookupFile(
+ FilenameLoc, LangOpts.MSVCCompat ? NormalizedPath.c_str() : Filename,
+ isAngled, LookupFrom, LookupFromFile, CurDir,
+ Callbacks ? &SearchPath : nullptr, Callbacks ? &RelativePath : nullptr,
+ &SuggestedModule);
+
+ if (!File) {
+ if (Callbacks) {
+ // Give the clients a chance to recover.
+ SmallString<128> RecoveryPath;
+ if (Callbacks->FileNotFound(Filename, RecoveryPath)) {
+ if (const DirectoryEntry *DE = FileMgr.getDirectory(RecoveryPath)) {
+ // Add the recovery path to the list of search paths.
+ DirectoryLookup DL(DE, SrcMgr::C_User, false);
+ HeaderInfo.AddSearchPath(DL, isAngled);
+
+ // Try the lookup again, skipping the cache.
+ File = LookupFile(
+ FilenameLoc,
+ LangOpts.MSVCCompat ? NormalizedPath.c_str() : Filename, isAngled,
+ LookupFrom, LookupFromFile, CurDir, nullptr, nullptr,
+ &SuggestedModule, /*SkipCache*/ true);
+ }
+ }
+ }
+
+ if (!SuppressIncludeNotFoundError) {
+ // If the file could not be located and it was included via angle
+ // brackets, we can attempt a lookup as though it were a quoted path to
+ // provide the user with a possible fixit.
+ if (isAngled) {
+ File = LookupFile(
+ FilenameLoc,
+ LangOpts.MSVCCompat ? NormalizedPath.c_str() : Filename, false,
+ LookupFrom, LookupFromFile, CurDir,
+ Callbacks ? &SearchPath : nullptr,
+ Callbacks ? &RelativePath : nullptr,
+ &SuggestedModule);
+ if (File) {
+ SourceRange Range(FilenameTok.getLocation(), CharEnd);
+ Diag(FilenameTok, diag::err_pp_file_not_found_not_fatal) <<
+ Filename <<
+ FixItHint::CreateReplacement(Range, "\"" + Filename.str() + "\"");
+ }
+ }
+
+ // If the file is still not found, just go with the vanilla diagnostic
+ if (!File)
+ Diag(FilenameTok, diag::err_pp_file_not_found) << Filename;
+ }
+ }
+
+ // Should we enter the source file? Set to false if either the source file is
+ // known to have no effect beyond its effect on module visibility -- that is,
+ // if it's got an include guard that is already defined or is a modular header
+ // we've imported or already built.
+ bool ShouldEnter = true;
+
+ // Determine whether we should try to import the module for this #include, if
+ // there is one. Don't do so if precompiled module support is disabled or we
+ // are processing this module textually (because we're building the module).
+ if (File && SuggestedModule && getLangOpts().Modules &&
+ SuggestedModule.getModule()->getTopLevelModuleName() !=
+ getLangOpts().CurrentModule &&
+ SuggestedModule.getModule()->getTopLevelModuleName() !=
+ getLangOpts().ImplementationOfModule) {
+
+ // If this include corresponds to a module but that module is
+ // unavailable, diagnose the situation and bail out.
+ if (!SuggestedModule.getModule()->isAvailable()) {
+ clang::Module::Requirement Requirement;
+ clang::Module::UnresolvedHeaderDirective MissingHeader;
+ Module *M = SuggestedModule.getModule();
+ // Identify the cause.
+ (void)M->isAvailable(getLangOpts(), getTargetInfo(), Requirement,
+ MissingHeader);
+ if (MissingHeader.FileNameLoc.isValid()) {
+ Diag(MissingHeader.FileNameLoc, diag::err_module_header_missing)
+ << MissingHeader.IsUmbrella << MissingHeader.FileName;
+ } else {
+ Diag(M->DefinitionLoc, diag::err_module_unavailable)
+ << M->getFullModuleName() << Requirement.second << Requirement.first;
+ }
+ Diag(FilenameTok.getLocation(),
+ diag::note_implicit_top_level_module_import_here)
+ << M->getTopLevelModuleName();
+ return;
+ }
+
+ // Compute the module access path corresponding to this module.
+ // FIXME: Should we have a second loadModule() overload to avoid this
+ // extra lookup step?
+ SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> Path;
+ for (Module *Mod = SuggestedModule.getModule(); Mod; Mod = Mod->Parent)
+ Path.push_back(std::make_pair(getIdentifierInfo(Mod->Name),
+ FilenameTok.getLocation()));
+ std::reverse(Path.begin(), Path.end());
+
+ // Warn that we're replacing the include/import with a module import.
+ // We only do this in Objective-C, where we have a module-import syntax.
+ if (getLangOpts().ObjC2)
+ diagnoseAutoModuleImport(*this, HashLoc, IncludeTok, Path, CharEnd);
+
+ // Load the module to import its macros. We'll make the declarations
+ // visible when the parser gets here.
+ // FIXME: Pass SuggestedModule in here rather than converting it to a path
+ // and making the module loader convert it back again.
+ ModuleLoadResult Imported = TheModuleLoader.loadModule(
+ IncludeTok.getLocation(), Path, Module::Hidden,
+ /*IsIncludeDirective=*/true);
+ assert((Imported == nullptr || Imported == SuggestedModule.getModule()) &&
+ "the imported module is different than the suggested one");
+
+ if (Imported)
+ ShouldEnter = false;
+ else if (Imported.isMissingExpected()) {
+ // We failed to find a submodule that we assumed would exist (because it
+ // was in the directory of an umbrella header, for instance), but no
+ // actual module exists for it (because the umbrella header is
+ // incomplete). Treat this as a textual inclusion.
+ SuggestedModule = ModuleMap::KnownHeader();
+ } else {
+ // We hit an error processing the import. Bail out.
+ if (hadModuleLoaderFatalFailure()) {
+ // With a fatal failure in the module loader, we abort parsing.
+ Token &Result = IncludeTok;
+ if (CurLexer) {
+ Result.startToken();
+ CurLexer->FormTokenWithChars(Result, CurLexer->BufferEnd, tok::eof);
+ CurLexer->cutOffLexing();
+ } else {
+ assert(CurPTHLexer && "#include but no current lexer set!");
+ CurPTHLexer->getEOF(Result);
+ }
+ }
+ return;
+ }
+ }
+
+ if (Callbacks) {
+ // Notify the callback object that we've seen an inclusion directive.
+ Callbacks->InclusionDirective(
+ HashLoc, IncludeTok,
+ LangOpts.MSVCCompat ? NormalizedPath.c_str() : Filename, isAngled,
+ FilenameRange, File, SearchPath, RelativePath,
+ ShouldEnter ? nullptr : SuggestedModule.getModule());
+ }
+
+ if (!File)
+ return;
+
+ // The #included file will be considered to be a system header if either it is
+ // in a system include directory, or if the #includer is a system include
+ // header.
+ SrcMgr::CharacteristicKind FileCharacter =
+ std::max(HeaderInfo.getFileDirFlavor(File),
+ SourceMgr.getFileCharacteristic(FilenameTok.getLocation()));
+
+ // FIXME: If we have a suggested module, and we've already visited this file,
+ // don't bother entering it again. We know it has no further effect.
+
+ // Ask HeaderInfo if we should enter this #include file. If not, #including
+ // this file will have no effect.
+ if (ShouldEnter &&
+ !HeaderInfo.ShouldEnterIncludeFile(*this, File, isImport,
+ SuggestedModule.getModule())) {
+ ShouldEnter = false;
+ if (Callbacks)
+ Callbacks->FileSkipped(*File, FilenameTok, FileCharacter);
+ }
+
+ // If we don't need to enter the file, stop now.
+ if (!ShouldEnter) {
+ // If this is a module import, make it visible if needed.
+ if (auto *M = SuggestedModule.getModule()) {
+ makeModuleVisible(M, HashLoc);
+
+ if (IncludeTok.getIdentifierInfo()->getPPKeywordID() !=
+ tok::pp___include_macros)
+ EnterAnnotationToken(*this, HashLoc, End, tok::annot_module_include, M);
+ }
+ return;
+ }
+
+ // Look up the file, create a File ID for it.
+ SourceLocation IncludePos = End;
+ // If the filename string was the result of macro expansions, set the include
+ // position on the file where it will be included and after the expansions.
+ if (IncludePos.isMacroID())
+ IncludePos = SourceMgr.getExpansionRange(IncludePos).second;
+ FileID FID = SourceMgr.createFileID(File, IncludePos, FileCharacter);
+ assert(FID.isValid() && "Expected valid file ID");
+
+ // If all is good, enter the new file!
+ if (EnterSourceFile(FID, CurDir, FilenameTok.getLocation()))
+ return;
+
+ // Determine if we're switching to building a new submodule, and which one.
+ if (auto *M = SuggestedModule.getModule()) {
+ assert(!CurSubmodule && "should not have marked this as a module yet");
+ CurSubmodule = M;
+
+ // Let the macro handling code know that any future macros are within
+ // the new submodule.
+ EnterSubmodule(M, HashLoc);
+
+ // Let the parser know that any future declarations are within the new
+ // submodule.
+ // FIXME: There's no point doing this if we're handling a #__include_macros
+ // directive.
+ EnterAnnotationToken(*this, HashLoc, End, tok::annot_module_begin, M);
+ }
+}
+
+/// HandleIncludeNextDirective - Implements \#include_next.
+///
+void Preprocessor::HandleIncludeNextDirective(SourceLocation HashLoc,
+ Token &IncludeNextTok) {
+ Diag(IncludeNextTok, diag::ext_pp_include_next_directive);
+
+ // #include_next is like #include, except that we start searching after
+ // the current found directory. If we can't do this, issue a
+ // diagnostic.
+ const DirectoryLookup *Lookup = CurDirLookup;
+ const FileEntry *LookupFromFile = nullptr;
+ if (isInPrimaryFile()) {
+ Lookup = nullptr;
+ Diag(IncludeNextTok, diag::pp_include_next_in_primary);
+ } else if (CurSubmodule) {
+ // Start looking up in the directory *after* the one in which the current
+ // file would be found, if any.
+ assert(CurPPLexer && "#include_next directive in macro?");
+ LookupFromFile = CurPPLexer->getFileEntry();
+ Lookup = nullptr;
+ } else if (!Lookup) {
+ Diag(IncludeNextTok, diag::pp_include_next_absolute_path);
+ } else {
+ // Start looking up in the next directory.
+ ++Lookup;
+ }
+
+ return HandleIncludeDirective(HashLoc, IncludeNextTok, Lookup,
+ LookupFromFile);
+}
+
+/// HandleMicrosoftImportDirective - Implements \#import for Microsoft Mode
+void Preprocessor::HandleMicrosoftImportDirective(Token &Tok) {
+ // The Microsoft #import directive takes a type library and generates header
+ // files from it, and includes those. This is beyond the scope of what clang
+ // does, so we ignore it and error out. However, #import can optionally have
+ // trailing attributes that span multiple lines. We're going to eat those
+ // so we can continue processing from there.
+ Diag(Tok, diag::err_pp_import_directive_ms );
+
+ // Read tokens until we get to the end of the directive. Note that the
+ // directive can be split over multiple lines using the backslash character.
+ DiscardUntilEndOfDirective();
+}
+
+/// HandleImportDirective - Implements \#import.
+///
+void Preprocessor::HandleImportDirective(SourceLocation HashLoc,
+ Token &ImportTok) {
+ if (!LangOpts.ObjC1) { // #import is standard for ObjC.
+ if (LangOpts.MSVCCompat)
+ return HandleMicrosoftImportDirective(ImportTok);
+ Diag(ImportTok, diag::ext_pp_import_directive);
+ }
+ return HandleIncludeDirective(HashLoc, ImportTok, nullptr, nullptr, true);
+}
+
+/// HandleIncludeMacrosDirective - The -imacros command line option turns into a
+/// pseudo directive in the predefines buffer. This handles it by sucking all
+/// tokens through the preprocessor and discarding them (only keeping the side
+/// effects on the preprocessor).
+void Preprocessor::HandleIncludeMacrosDirective(SourceLocation HashLoc,
+ Token &IncludeMacrosTok) {
+ // This directive should only occur in the predefines buffer. If not, emit an
+ // error and reject it.
+ SourceLocation Loc = IncludeMacrosTok.getLocation();
+ if (strcmp(SourceMgr.getBufferName(Loc), "<built-in>") != 0) {
+ Diag(IncludeMacrosTok.getLocation(),
+ diag::pp_include_macros_out_of_predefines);
+ DiscardUntilEndOfDirective();
+ return;
+ }
+
+ // Treat this as a normal #include for checking purposes. If this is
+ // successful, it will push a new lexer onto the include stack.
+ HandleIncludeDirective(HashLoc, IncludeMacrosTok);
+
+ Token TmpTok;
+ do {
+ Lex(TmpTok);
+ assert(TmpTok.isNot(tok::eof) && "Didn't find end of -imacros!");
+ } while (TmpTok.isNot(tok::hashhash));
+}
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Macro Directive Handling.
+//===----------------------------------------------------------------------===//
+
+/// ReadMacroDefinitionArgList - The ( starting an argument list of a macro
+/// definition has just been read. Lex the rest of the arguments and the
+/// closing ), updating MI with what we learn. Return true if an error occurs
+/// parsing the arg list.
+bool Preprocessor::ReadMacroDefinitionArgList(MacroInfo *MI, Token &Tok) {
+ SmallVector<IdentifierInfo*, 32> Arguments;
+
+ while (1) {
+ LexUnexpandedToken(Tok);
+ switch (Tok.getKind()) {
+ case tok::r_paren:
+ // Found the end of the argument list.
+ if (Arguments.empty()) // #define FOO()
+ return false;
+ // Otherwise we have #define FOO(A,)
+ Diag(Tok, diag::err_pp_expected_ident_in_arg_list);
+ return true;
+ case tok::ellipsis: // #define X(... -> C99 varargs
+ if (!LangOpts.C99)
+ Diag(Tok, LangOpts.CPlusPlus11 ?
+ diag::warn_cxx98_compat_variadic_macro :
+ diag::ext_variadic_macro);
+
+ // OpenCL v1.2 s6.9.e: variadic macros are not supported.
+ if (LangOpts.OpenCL) {
+ Diag(Tok, diag::err_pp_opencl_variadic_macros);
+ return true;
+ }
+
+ // Lex the token after the identifier.
+ LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::err_pp_missing_rparen_in_macro_def);
+ return true;
+ }
+ // Add the __VA_ARGS__ identifier as an argument.
+ Arguments.push_back(Ident__VA_ARGS__);
+ MI->setIsC99Varargs();
+ MI->setArgumentList(Arguments, BP);
+ return false;
+ case tok::eod: // #define X(
+ Diag(Tok, diag::err_pp_missing_rparen_in_macro_def);
+ return true;
+ default:
+ // Handle keywords and identifiers here to accept things like
+ // #define Foo(for) for.
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ if (!II) {
+ // #define X(1
+ Diag(Tok, diag::err_pp_invalid_tok_in_arg_list);
+ return true;
+ }
+
+ // If this is already used as an argument, it is used multiple times (e.g.
+ // #define X(A,A.
+ if (std::find(Arguments.begin(), Arguments.end(), II) !=
+ Arguments.end()) { // C99 6.10.3p6
+ Diag(Tok, diag::err_pp_duplicate_name_in_arg_list) << II;
+ return true;
+ }
+
+ // Add the argument to the macro info.
+ Arguments.push_back(II);
+
+ // Lex the token after the identifier.
+ LexUnexpandedToken(Tok);
+
+ switch (Tok.getKind()) {
+ default: // #define X(A B
+ Diag(Tok, diag::err_pp_expected_comma_in_arg_list);
+ return true;
+ case tok::r_paren: // #define X(A)
+ MI->setArgumentList(Arguments, BP);
+ return false;
+ case tok::comma: // #define X(A,
+ break;
+ case tok::ellipsis: // #define X(A... -> GCC extension
+ // Diagnose extension.
+ Diag(Tok, diag::ext_named_variadic_macro);
+
+ // Lex the token after the identifier.
+ LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::err_pp_missing_rparen_in_macro_def);
+ return true;
+ }
+
+ MI->setIsGNUVarargs();
+ MI->setArgumentList(Arguments, BP);
+ return false;
+ }
+ }
+ }
+}
+
+static bool isConfigurationPattern(Token &MacroName, MacroInfo *MI,
+ const LangOptions &LOptions) {
+ if (MI->getNumTokens() == 1) {
+ const Token &Value = MI->getReplacementToken(0);
+
+ // Macro that is identity, like '#define inline inline' is a valid pattern.
+ if (MacroName.getKind() == Value.getKind())
+ return true;
+
+ // Macro that maps a keyword to the same keyword decorated with leading/
+ // trailing underscores is a valid pattern:
+ // #define inline __inline
+ // #define inline __inline__
+ // #define inline _inline (in MS compatibility mode)
+ StringRef MacroText = MacroName.getIdentifierInfo()->getName();
+ if (IdentifierInfo *II = Value.getIdentifierInfo()) {
+ if (!II->isKeyword(LOptions))
+ return false;
+ StringRef ValueText = II->getName();
+ StringRef TrimmedValue = ValueText;
+ if (!ValueText.startswith("__")) {
+ if (ValueText.startswith("_"))
+ TrimmedValue = TrimmedValue.drop_front(1);
+ else
+ return false;
+ } else {
+ TrimmedValue = TrimmedValue.drop_front(2);
+ if (TrimmedValue.endswith("__"))
+ TrimmedValue = TrimmedValue.drop_back(2);
+ }
+ return TrimmedValue.equals(MacroText);
+ } else {
+ return false;
+ }
+ }
+
+ // #define inline
+ return MacroName.isOneOf(tok::kw_extern, tok::kw_inline, tok::kw_static,
+ tok::kw_const) &&
+ MI->getNumTokens() == 0;
+}
+
+/// HandleDefineDirective - Implements \#define. This consumes the entire macro
+/// line then lets the caller lex the next real token.
+void Preprocessor::HandleDefineDirective(Token &DefineTok,
+ bool ImmediatelyAfterHeaderGuard) {
+ ++NumDefined;
+
+ Token MacroNameTok;
+ bool MacroShadowsKeyword;
+ ReadMacroName(MacroNameTok, MU_Define, &MacroShadowsKeyword);
+
+ // Error reading macro name? If so, diagnostic already issued.
+ if (MacroNameTok.is(tok::eod))
+ return;
+
+ Token LastTok = MacroNameTok;
+
+ // If we are supposed to keep comments in #defines, reenable comment saving
+ // mode.
+ if (CurLexer) CurLexer->SetCommentRetentionState(KeepMacroComments);
+
+ // Create the new macro.
+ MacroInfo *MI = AllocateMacroInfo(MacroNameTok.getLocation());
+
+ Token Tok;
+ LexUnexpandedToken(Tok);
+
+ // If this is a function-like macro definition, parse the argument list,
+ // marking each of the identifiers as being used as macro arguments. Also,
+ // check other constraints on the first token of the macro body.
+ if (Tok.is(tok::eod)) {
+ if (ImmediatelyAfterHeaderGuard) {
+ // Save this macro information since it may part of a header guard.
+ CurPPLexer->MIOpt.SetDefinedMacro(MacroNameTok.getIdentifierInfo(),
+ MacroNameTok.getLocation());
+ }
+ // If there is no body to this macro, we have no special handling here.
+ } else if (Tok.hasLeadingSpace()) {
+ // This is a normal token with leading space. Clear the leading space
+ // marker on the first token to get proper expansion.
+ Tok.clearFlag(Token::LeadingSpace);
+ } else if (Tok.is(tok::l_paren)) {
+ // This is a function-like macro definition. Read the argument list.
+ MI->setIsFunctionLike();
+ if (ReadMacroDefinitionArgList(MI, LastTok)) {
+ // Throw away the rest of the line.
+ if (CurPPLexer->ParsingPreprocessorDirective)
+ DiscardUntilEndOfDirective();
+ return;
+ }
+
+ // If this is a definition of a variadic C99 function-like macro, not using
+ // the GNU named varargs extension, enabled __VA_ARGS__.
+
+ // "Poison" __VA_ARGS__, which can only appear in the expansion of a macro.
+ // This gets unpoisoned where it is allowed.
+ assert(Ident__VA_ARGS__->isPoisoned() && "__VA_ARGS__ should be poisoned!");
+ if (MI->isC99Varargs())
+ Ident__VA_ARGS__->setIsPoisoned(false);
+
+ // Read the first token after the arg list for down below.
+ LexUnexpandedToken(Tok);
+ } else if (LangOpts.C99 || LangOpts.CPlusPlus11) {
+ // C99 requires whitespace between the macro definition and the body. Emit
+ // a diagnostic for something like "#define X+".
+ Diag(Tok, diag::ext_c99_whitespace_required_after_macro_name);
+ } else {
+ // C90 6.8 TC1 says: "In the definition of an object-like macro, if the
+ // first character of a replacement list is not a character required by
+ // subclause 5.2.1, then there shall be white-space separation between the
+ // identifier and the replacement list.". 5.2.1 lists this set:
+ // "A-Za-z0-9!"#%&'()*+,_./:;<=>?[\]^_{|}~" as well as whitespace, which
+ // is irrelevant here.
+ bool isInvalid = false;
+ if (Tok.is(tok::at)) // @ is not in the list above.
+ isInvalid = true;
+ else if (Tok.is(tok::unknown)) {
+ // If we have an unknown token, it is something strange like "`". Since
+ // all of valid characters would have lexed into a single character
+ // token of some sort, we know this is not a valid case.
+ isInvalid = true;
+ }
+ if (isInvalid)
+ Diag(Tok, diag::ext_missing_whitespace_after_macro_name);
+ else
+ Diag(Tok, diag::warn_missing_whitespace_after_macro_name);
+ }
+
+ if (!Tok.is(tok::eod))
+ LastTok = Tok;
+
+ // Read the rest of the macro body.
+ if (MI->isObjectLike()) {
+ // Object-like macros are very simple, just read their body.
+ while (Tok.isNot(tok::eod)) {
+ LastTok = Tok;
+ MI->AddTokenToBody(Tok);
+ // Get the next token of the macro.
+ LexUnexpandedToken(Tok);
+ }
+
+ } else {
+ // Otherwise, read the body of a function-like macro. While we are at it,
+ // check C99 6.10.3.2p1: ensure that # operators are followed by macro
+ // parameters in function-like macro expansions.
+ while (Tok.isNot(tok::eod)) {
+ LastTok = Tok;
+
+ if (Tok.isNot(tok::hash) && Tok.isNot(tok::hashhash)) {
+ MI->AddTokenToBody(Tok);
+
+ // Get the next token of the macro.
+ LexUnexpandedToken(Tok);
+ continue;
+ }
+
+ // If we're in -traditional mode, then we should ignore stringification
+ // and token pasting. Mark the tokens as unknown so as not to confuse
+ // things.
+ if (getLangOpts().TraditionalCPP) {
+ Tok.setKind(tok::unknown);
+ MI->AddTokenToBody(Tok);
+
+ // Get the next token of the macro.
+ LexUnexpandedToken(Tok);
+ continue;
+ }
+
+ if (Tok.is(tok::hashhash)) {
+
+ // If we see token pasting, check if it looks like the gcc comma
+ // pasting extension. We'll use this information to suppress
+ // diagnostics later on.
+
+ // Get the next token of the macro.
+ LexUnexpandedToken(Tok);
+
+ if (Tok.is(tok::eod)) {
+ MI->AddTokenToBody(LastTok);
+ break;
+ }
+
+ unsigned NumTokens = MI->getNumTokens();
+ if (NumTokens && Tok.getIdentifierInfo() == Ident__VA_ARGS__ &&
+ MI->getReplacementToken(NumTokens-1).is(tok::comma))
+ MI->setHasCommaPasting();
+
+ // Things look ok, add the '##' token to the macro.
+ MI->AddTokenToBody(LastTok);
+ continue;
+ }
+
+ // Get the next token of the macro.
+ LexUnexpandedToken(Tok);
+
+ // Check for a valid macro arg identifier.
+ if (Tok.getIdentifierInfo() == nullptr ||
+ MI->getArgumentNum(Tok.getIdentifierInfo()) == -1) {
+
+ // If this is assembler-with-cpp mode, we accept random gibberish after
+ // the '#' because '#' is often a comment character. However, change
+ // the kind of the token to tok::unknown so that the preprocessor isn't
+ // confused.
+ if (getLangOpts().AsmPreprocessor && Tok.isNot(tok::eod)) {
+ LastTok.setKind(tok::unknown);
+ MI->AddTokenToBody(LastTok);
+ continue;
+ } else {
+ Diag(Tok, diag::err_pp_stringize_not_parameter);
+
+ // Disable __VA_ARGS__ again.
+ Ident__VA_ARGS__->setIsPoisoned(true);
+ return;
+ }
+ }
+
+ // Things look ok, add the '#' and param name tokens to the macro.
+ MI->AddTokenToBody(LastTok);
+ MI->AddTokenToBody(Tok);
+ LastTok = Tok;
+
+ // Get the next token of the macro.
+ LexUnexpandedToken(Tok);
+ }
+ }
+
+ if (MacroShadowsKeyword &&
+ !isConfigurationPattern(MacroNameTok, MI, getLangOpts())) {
+ Diag(MacroNameTok, diag::warn_pp_macro_hides_keyword);
+ }
+
+ // Disable __VA_ARGS__ again.
+ Ident__VA_ARGS__->setIsPoisoned(true);
+
+ // Check that there is no paste (##) operator at the beginning or end of the
+ // replacement list.
+ unsigned NumTokens = MI->getNumTokens();
+ if (NumTokens != 0) {
+ if (MI->getReplacementToken(0).is(tok::hashhash)) {
+ Diag(MI->getReplacementToken(0), diag::err_paste_at_start);
+ return;
+ }
+ if (MI->getReplacementToken(NumTokens-1).is(tok::hashhash)) {
+ Diag(MI->getReplacementToken(NumTokens-1), diag::err_paste_at_end);
+ return;
+ }
+ }
+
+ MI->setDefinitionEndLoc(LastTok.getLocation());
+
+ // Finally, if this identifier already had a macro defined for it, verify that
+ // the macro bodies are identical, and issue diagnostics if they are not.
+ if (const MacroInfo *OtherMI=getMacroInfo(MacroNameTok.getIdentifierInfo())) {
+ // In Objective-C, ignore attempts to directly redefine the builtin
+ // definitions of the ownership qualifiers. It's still possible to
+ // #undef them.
+ auto isObjCProtectedMacro = [](const IdentifierInfo *II) -> bool {
+ return II->isStr("__strong") ||
+ II->isStr("__weak") ||
+ II->isStr("__unsafe_unretained") ||
+ II->isStr("__autoreleasing");
+ };
+ if (getLangOpts().ObjC1 &&
+ SourceMgr.getFileID(OtherMI->getDefinitionLoc())
+ == getPredefinesFileID() &&
+ isObjCProtectedMacro(MacroNameTok.getIdentifierInfo())) {
+ // Warn if it changes the tokens.
+ if ((!getDiagnostics().getSuppressSystemWarnings() ||
+ !SourceMgr.isInSystemHeader(DefineTok.getLocation())) &&
+ !MI->isIdenticalTo(*OtherMI, *this,
+ /*Syntactic=*/LangOpts.MicrosoftExt)) {
+ Diag(MI->getDefinitionLoc(), diag::warn_pp_objc_macro_redef_ignored);
+ }
+ assert(!OtherMI->isWarnIfUnused());
+ return;
+ }
+
+ // It is very common for system headers to have tons of macro redefinitions
+ // and for warnings to be disabled in system headers. If this is the case,
+ // then don't bother calling MacroInfo::isIdenticalTo.
+ if (!getDiagnostics().getSuppressSystemWarnings() ||
+ !SourceMgr.isInSystemHeader(DefineTok.getLocation())) {
+ if (!OtherMI->isUsed() && OtherMI->isWarnIfUnused())
+ Diag(OtherMI->getDefinitionLoc(), diag::pp_macro_not_used);
+
+ // Warn if defining "__LINE__" and other builtins, per C99 6.10.8/4 and
+ // C++ [cpp.predefined]p4, but allow it as an extension.
+ if (OtherMI->isBuiltinMacro())
+ Diag(MacroNameTok, diag::ext_pp_redef_builtin_macro);
+ // Macros must be identical. This means all tokens and whitespace
+ // separation must be the same. C99 6.10.3p2.
+ else if (!OtherMI->isAllowRedefinitionsWithoutWarning() &&
+ !MI->isIdenticalTo(*OtherMI, *this, /*Syntactic=*/LangOpts.MicrosoftExt)) {
+ Diag(MI->getDefinitionLoc(), diag::ext_pp_macro_redef)
+ << MacroNameTok.getIdentifierInfo();
+ Diag(OtherMI->getDefinitionLoc(), diag::note_previous_definition);
+ }
+ }
+ if (OtherMI->isWarnIfUnused())
+ WarnUnusedMacroLocs.erase(OtherMI->getDefinitionLoc());
+ }
+
+ DefMacroDirective *MD =
+ appendDefMacroDirective(MacroNameTok.getIdentifierInfo(), MI);
+
+ assert(!MI->isUsed());
+ // If we need warning for not using the macro, add its location in the
+ // warn-because-unused-macro set. If it gets used it will be removed from set.
+ if (getSourceManager().isInMainFile(MI->getDefinitionLoc()) &&
+ !Diags->isIgnored(diag::pp_macro_not_used, MI->getDefinitionLoc())) {
+ MI->setIsWarnIfUnused(true);
+ WarnUnusedMacroLocs.insert(MI->getDefinitionLoc());
+ }
+
+ // If the callbacks want to know, tell them about the macro definition.
+ if (Callbacks)
+ Callbacks->MacroDefined(MacroNameTok, MD);
+}
+
+/// HandleUndefDirective - Implements \#undef.
+///
+void Preprocessor::HandleUndefDirective(Token &UndefTok) {
+ ++NumUndefined;
+
+ Token MacroNameTok;
+ ReadMacroName(MacroNameTok, MU_Undef);
+
+ // Error reading macro name? If so, diagnostic already issued.
+ if (MacroNameTok.is(tok::eod))
+ return;
+
+ // Check to see if this is the last token on the #undef line.
+ CheckEndOfDirective("undef");
+
+ // Okay, we have a valid identifier to undef.
+ auto *II = MacroNameTok.getIdentifierInfo();
+ auto MD = getMacroDefinition(II);
+
+ // If the callbacks want to know, tell them about the macro #undef.
+ // Note: no matter if the macro was defined or not.
+ if (Callbacks)
+ Callbacks->MacroUndefined(MacroNameTok, MD);
+
+ // If the macro is not defined, this is a noop undef, just return.
+ const MacroInfo *MI = MD.getMacroInfo();
+ if (!MI)
+ return;
+
+ if (!MI->isUsed() && MI->isWarnIfUnused())
+ Diag(MI->getDefinitionLoc(), diag::pp_macro_not_used);
+
+ if (MI->isWarnIfUnused())
+ WarnUnusedMacroLocs.erase(MI->getDefinitionLoc());
+
+ appendMacroDirective(MacroNameTok.getIdentifierInfo(),
+ AllocateUndefMacroDirective(MacroNameTok.getLocation()));
+}
+
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Conditional Directive Handling.
+//===----------------------------------------------------------------------===//
+
+/// HandleIfdefDirective - Implements the \#ifdef/\#ifndef directive. isIfndef
+/// is true when this is a \#ifndef directive. ReadAnyTokensBeforeDirective is
+/// true if any tokens have been returned or pp-directives activated before this
+/// \#ifndef has been lexed.
+///
+void Preprocessor::HandleIfdefDirective(Token &Result, bool isIfndef,
+ bool ReadAnyTokensBeforeDirective) {
+ ++NumIf;
+ Token DirectiveTok = Result;
+
+ Token MacroNameTok;
+ ReadMacroName(MacroNameTok);
+
+ // Error reading macro name? If so, diagnostic already issued.
+ if (MacroNameTok.is(tok::eod)) {
+ // Skip code until we get to #endif. This helps with recovery by not
+ // emitting an error when the #endif is reached.
+ SkipExcludedConditionalBlock(DirectiveTok.getLocation(),
+ /*Foundnonskip*/false, /*FoundElse*/false);
+ return;
+ }
+
+ // Check to see if this is the last token on the #if[n]def line.
+ CheckEndOfDirective(isIfndef ? "ifndef" : "ifdef");
+
+ IdentifierInfo *MII = MacroNameTok.getIdentifierInfo();
+ auto MD = getMacroDefinition(MII);
+ MacroInfo *MI = MD.getMacroInfo();
+
+ if (CurPPLexer->getConditionalStackDepth() == 0) {
+ // If the start of a top-level #ifdef and if the macro is not defined,
+ // inform MIOpt that this might be the start of a proper include guard.
+ // Otherwise it is some other form of unknown conditional which we can't
+ // handle.
+ if (!ReadAnyTokensBeforeDirective && !MI) {
+ assert(isIfndef && "#ifdef shouldn't reach here");
+ CurPPLexer->MIOpt.EnterTopLevelIfndef(MII, MacroNameTok.getLocation());
+ } else
+ CurPPLexer->MIOpt.EnterTopLevelConditional();
+ }
+
+ // If there is a macro, process it.
+ if (MI) // Mark it used.
+ markMacroAsUsed(MI);
+
+ if (Callbacks) {
+ if (isIfndef)
+ Callbacks->Ifndef(DirectiveTok.getLocation(), MacroNameTok, MD);
+ else
+ Callbacks->Ifdef(DirectiveTok.getLocation(), MacroNameTok, MD);
+ }
+
+ // Should we include the stuff contained by this directive?
+ if (!MI == isIfndef) {
+ // Yes, remember that we are inside a conditional, then lex the next token.
+ CurPPLexer->pushConditionalLevel(DirectiveTok.getLocation(),
+ /*wasskip*/false, /*foundnonskip*/true,
+ /*foundelse*/false);
+ } else {
+ // No, skip the contents of this block.
+ SkipExcludedConditionalBlock(DirectiveTok.getLocation(),
+ /*Foundnonskip*/false,
+ /*FoundElse*/false);
+ }
+}
+
+/// HandleIfDirective - Implements the \#if directive.
+///
+void Preprocessor::HandleIfDirective(Token &IfToken,
+ bool ReadAnyTokensBeforeDirective) {
+ ++NumIf;
+
+ // Parse and evaluate the conditional expression.
+ IdentifierInfo *IfNDefMacro = nullptr;
+ const SourceLocation ConditionalBegin = CurPPLexer->getSourceLocation();
+ const bool ConditionalTrue = EvaluateDirectiveExpression(IfNDefMacro);
+ const SourceLocation ConditionalEnd = CurPPLexer->getSourceLocation();
+
+ // If this condition is equivalent to #ifndef X, and if this is the first
+ // directive seen, handle it for the multiple-include optimization.
+ if (CurPPLexer->getConditionalStackDepth() == 0) {
+ if (!ReadAnyTokensBeforeDirective && IfNDefMacro && ConditionalTrue)
+ // FIXME: Pass in the location of the macro name, not the 'if' token.
+ CurPPLexer->MIOpt.EnterTopLevelIfndef(IfNDefMacro, IfToken.getLocation());
+ else
+ CurPPLexer->MIOpt.EnterTopLevelConditional();
+ }
+
+ if (Callbacks)
+ Callbacks->If(IfToken.getLocation(),
+ SourceRange(ConditionalBegin, ConditionalEnd),
+ (ConditionalTrue ? PPCallbacks::CVK_True : PPCallbacks::CVK_False));
+
+ // Should we include the stuff contained by this directive?
+ if (ConditionalTrue) {
+ // Yes, remember that we are inside a conditional, then lex the next token.
+ CurPPLexer->pushConditionalLevel(IfToken.getLocation(), /*wasskip*/false,
+ /*foundnonskip*/true, /*foundelse*/false);
+ } else {
+ // No, skip the contents of this block.
+ SkipExcludedConditionalBlock(IfToken.getLocation(), /*Foundnonskip*/false,
+ /*FoundElse*/false);
+ }
+}
+
+/// HandleEndifDirective - Implements the \#endif directive.
+///
+void Preprocessor::HandleEndifDirective(Token &EndifToken) {
+ ++NumEndif;
+
+ // Check that this is the whole directive.
+ CheckEndOfDirective("endif");
+
+ PPConditionalInfo CondInfo;
+ if (CurPPLexer->popConditionalLevel(CondInfo)) {
+ // No conditionals on the stack: this is an #endif without an #if.
+ Diag(EndifToken, diag::err_pp_endif_without_if);
+ return;
+ }
+
+ // If this the end of a top-level #endif, inform MIOpt.
+ if (CurPPLexer->getConditionalStackDepth() == 0)
+ CurPPLexer->MIOpt.ExitTopLevelConditional();
+
+ assert(!CondInfo.WasSkipping && !CurPPLexer->LexingRawMode &&
+ "This code should only be reachable in the non-skipping case!");
+
+ if (Callbacks)
+ Callbacks->Endif(EndifToken.getLocation(), CondInfo.IfLoc);
+}
+
+/// HandleElseDirective - Implements the \#else directive.
+///
+void Preprocessor::HandleElseDirective(Token &Result) {
+ ++NumElse;
+
+ // #else directive in a non-skipping conditional... start skipping.
+ CheckEndOfDirective("else");
+
+ PPConditionalInfo CI;
+ if (CurPPLexer->popConditionalLevel(CI)) {
+ Diag(Result, diag::pp_err_else_without_if);
+ return;
+ }
+
+ // If this is a top-level #else, inform the MIOpt.
+ if (CurPPLexer->getConditionalStackDepth() == 0)
+ CurPPLexer->MIOpt.EnterTopLevelConditional();
+
+ // If this is a #else with a #else before it, report the error.
+ if (CI.FoundElse) Diag(Result, diag::pp_err_else_after_else);
+
+ if (Callbacks)
+ Callbacks->Else(Result.getLocation(), CI.IfLoc);
+
+ // Finally, skip the rest of the contents of this block.
+ SkipExcludedConditionalBlock(CI.IfLoc, /*Foundnonskip*/true,
+ /*FoundElse*/true, Result.getLocation());
+}
+
+/// HandleElifDirective - Implements the \#elif directive.
+///
+void Preprocessor::HandleElifDirective(Token &ElifToken) {
+ ++NumElse;
+
+ // #elif directive in a non-skipping conditional... start skipping.
+ // We don't care what the condition is, because we will always skip it (since
+ // the block immediately before it was included).
+ const SourceLocation ConditionalBegin = CurPPLexer->getSourceLocation();
+ DiscardUntilEndOfDirective();
+ const SourceLocation ConditionalEnd = CurPPLexer->getSourceLocation();
+
+ PPConditionalInfo CI;
+ if (CurPPLexer->popConditionalLevel(CI)) {
+ Diag(ElifToken, diag::pp_err_elif_without_if);
+ return;
+ }
+
+ // If this is a top-level #elif, inform the MIOpt.
+ if (CurPPLexer->getConditionalStackDepth() == 0)
+ CurPPLexer->MIOpt.EnterTopLevelConditional();
+
+ // If this is a #elif with a #else before it, report the error.
+ if (CI.FoundElse) Diag(ElifToken, diag::pp_err_elif_after_else);
+
+ if (Callbacks)
+ Callbacks->Elif(ElifToken.getLocation(),
+ SourceRange(ConditionalBegin, ConditionalEnd),
+ PPCallbacks::CVK_NotEvaluated, CI.IfLoc);
+
+ // Finally, skip the rest of the contents of this block.
+ SkipExcludedConditionalBlock(CI.IfLoc, /*Foundnonskip*/true,
+ /*FoundElse*/CI.FoundElse,
+ ElifToken.getLocation());
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPExpressions.cpp b/contrib/llvm/tools/clang/lib/Lex/PPExpressions.cpp
new file mode 100644
index 0000000..c40598c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/PPExpressions.cpp
@@ -0,0 +1,798 @@
+//===--- PPExpressions.cpp - Preprocessor Expression Evaluation -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Preprocessor::EvaluateDirectiveExpression method,
+// which parses and evaluates integer constant expressions for #if directives.
+//
+//===----------------------------------------------------------------------===//
+//
+// FIXME: implement testing for #assert's.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/CodeCompletionHandler.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/LiteralSupport.h"
+#include "clang/Lex/MacroInfo.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/SaveAndRestore.h"
+using namespace clang;
+
+namespace {
+
+/// PPValue - Represents the value of a subexpression of a preprocessor
+/// conditional and the source range covered by it.
+class PPValue {
+ SourceRange Range;
+public:
+ llvm::APSInt Val;
+
+ // Default ctor - Construct an 'invalid' PPValue.
+ PPValue(unsigned BitWidth) : Val(BitWidth) {}
+
+ unsigned getBitWidth() const { return Val.getBitWidth(); }
+ bool isUnsigned() const { return Val.isUnsigned(); }
+
+ SourceRange getRange() const { return Range; }
+
+ void setRange(SourceLocation L) { Range.setBegin(L); Range.setEnd(L); }
+ void setRange(SourceLocation B, SourceLocation E) {
+ Range.setBegin(B); Range.setEnd(E);
+ }
+ void setBegin(SourceLocation L) { Range.setBegin(L); }
+ void setEnd(SourceLocation L) { Range.setEnd(L); }
+};
+
+}
+
+static bool EvaluateDirectiveSubExpr(PPValue &LHS, unsigned MinPrec,
+ Token &PeekTok, bool ValueLive,
+ Preprocessor &PP);
+
+/// DefinedTracker - This struct is used while parsing expressions to keep track
+/// of whether !defined(X) has been seen.
+///
+/// With this simple scheme, we handle the basic forms:
+/// !defined(X) and !defined X
+/// but we also trivially handle (silly) stuff like:
+/// !!!defined(X) and +!defined(X) and !+!+!defined(X) and !(defined(X)).
+struct DefinedTracker {
+ /// Each time a Value is evaluated, it returns information about whether the
+ /// parsed value is of the form defined(X), !defined(X) or is something else.
+ enum TrackerState {
+ DefinedMacro, // defined(X)
+ NotDefinedMacro, // !defined(X)
+ Unknown // Something else.
+ } State;
+ /// TheMacro - When the state is DefinedMacro or NotDefinedMacro, this
+ /// indicates the macro that was checked.
+ IdentifierInfo *TheMacro;
+};
+
+/// EvaluateDefined - Process a 'defined(sym)' expression.
+static bool EvaluateDefined(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
+ bool ValueLive, Preprocessor &PP) {
+ SourceLocation beginLoc(PeekTok.getLocation());
+ Result.setBegin(beginLoc);
+
+ // Get the next token, don't expand it.
+ PP.LexUnexpandedNonComment(PeekTok);
+
+ // Two options, it can either be a pp-identifier or a (.
+ SourceLocation LParenLoc;
+ if (PeekTok.is(tok::l_paren)) {
+ // Found a paren, remember we saw it and skip it.
+ LParenLoc = PeekTok.getLocation();
+ PP.LexUnexpandedNonComment(PeekTok);
+ }
+
+ if (PeekTok.is(tok::code_completion)) {
+ if (PP.getCodeCompletionHandler())
+ PP.getCodeCompletionHandler()->CodeCompleteMacroName(false);
+ PP.setCodeCompletionReached();
+ PP.LexUnexpandedNonComment(PeekTok);
+ }
+
+ // If we don't have a pp-identifier now, this is an error.
+ if (PP.CheckMacroName(PeekTok, MU_Other))
+ return true;
+
+ // Otherwise, we got an identifier, is it defined to something?
+ IdentifierInfo *II = PeekTok.getIdentifierInfo();
+ MacroDefinition Macro = PP.getMacroDefinition(II);
+ Result.Val = !!Macro;
+ Result.Val.setIsUnsigned(false); // Result is signed intmax_t.
+
+ // If there is a macro, mark it used.
+ if (Result.Val != 0 && ValueLive)
+ PP.markMacroAsUsed(Macro.getMacroInfo());
+
+ // Save macro token for callback.
+ Token macroToken(PeekTok);
+
+ // If we are in parens, ensure we have a trailing ).
+ if (LParenLoc.isValid()) {
+ // Consume identifier.
+ Result.setEnd(PeekTok.getLocation());
+ PP.LexUnexpandedNonComment(PeekTok);
+
+ if (PeekTok.isNot(tok::r_paren)) {
+ PP.Diag(PeekTok.getLocation(), diag::err_pp_expected_after)
+ << "'defined'" << tok::r_paren;
+ PP.Diag(LParenLoc, diag::note_matching) << tok::l_paren;
+ return true;
+ }
+ // Consume the ).
+ Result.setEnd(PeekTok.getLocation());
+ PP.LexNonComment(PeekTok);
+ } else {
+ // Consume identifier.
+ Result.setEnd(PeekTok.getLocation());
+ PP.LexNonComment(PeekTok);
+ }
+
+ // Invoke the 'defined' callback.
+ if (PPCallbacks *Callbacks = PP.getPPCallbacks()) {
+ Callbacks->Defined(macroToken, Macro,
+ SourceRange(beginLoc, PeekTok.getLocation()));
+ }
+
+ // Success, remember that we saw defined(X).
+ DT.State = DefinedTracker::DefinedMacro;
+ DT.TheMacro = II;
+ return false;
+}
+
+/// EvaluateValue - Evaluate the token PeekTok (and any others needed) and
+/// return the computed value in Result. Return true if there was an error
+/// parsing. This function also returns information about the form of the
+/// expression in DT. See above for information on what DT means.
+///
+/// If ValueLive is false, then this value is being evaluated in a context where
+/// the result is not used. As such, avoid diagnostics that relate to
+/// evaluation.
+static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
+ bool ValueLive, Preprocessor &PP) {
+ DT.State = DefinedTracker::Unknown;
+
+ if (PeekTok.is(tok::code_completion)) {
+ if (PP.getCodeCompletionHandler())
+ PP.getCodeCompletionHandler()->CodeCompletePreprocessorExpression();
+ PP.setCodeCompletionReached();
+ PP.LexNonComment(PeekTok);
+ }
+
+ // If this token's spelling is a pp-identifier, check to see if it is
+ // 'defined' or if it is a macro. Note that we check here because many
+ // keywords are pp-identifiers, so we can't check the kind.
+ if (IdentifierInfo *II = PeekTok.getIdentifierInfo()) {
+ // Handle "defined X" and "defined(X)".
+ if (II->isStr("defined"))
+ return(EvaluateDefined(Result, PeekTok, DT, ValueLive, PP));
+
+ // If this identifier isn't 'defined' or one of the special
+ // preprocessor keywords and it wasn't macro expanded, it turns
+ // into a simple 0, unless it is the C++ keyword "true", in which case it
+ // turns into "1".
+ if (ValueLive &&
+ II->getTokenID() != tok::kw_true &&
+ II->getTokenID() != tok::kw_false)
+ PP.Diag(PeekTok, diag::warn_pp_undef_identifier) << II;
+ Result.Val = II->getTokenID() == tok::kw_true;
+ Result.Val.setIsUnsigned(false); // "0" is signed intmax_t 0.
+ Result.setRange(PeekTok.getLocation());
+ PP.LexNonComment(PeekTok);
+ return false;
+ }
+
+ switch (PeekTok.getKind()) {
+ default: // Non-value token.
+ PP.Diag(PeekTok, diag::err_pp_expr_bad_token_start_expr);
+ return true;
+ case tok::eod:
+ case tok::r_paren:
+ // If there is no expression, report and exit.
+ PP.Diag(PeekTok, diag::err_pp_expected_value_in_expr);
+ return true;
+ case tok::numeric_constant: {
+ SmallString<64> IntegerBuffer;
+ bool NumberInvalid = false;
+ StringRef Spelling = PP.getSpelling(PeekTok, IntegerBuffer,
+ &NumberInvalid);
+ if (NumberInvalid)
+ return true; // a diagnostic was already reported
+
+ NumericLiteralParser Literal(Spelling, PeekTok.getLocation(), PP);
+ if (Literal.hadError)
+ return true; // a diagnostic was already reported.
+
+ if (Literal.isFloatingLiteral() || Literal.isImaginary) {
+ PP.Diag(PeekTok, diag::err_pp_illegal_floating_literal);
+ return true;
+ }
+ assert(Literal.isIntegerLiteral() && "Unknown ppnumber");
+
+ // Complain about, and drop, any ud-suffix.
+ if (Literal.hasUDSuffix())
+ PP.Diag(PeekTok, diag::err_pp_invalid_udl) << /*integer*/1;
+
+ // 'long long' is a C99 or C++11 feature.
+ if (!PP.getLangOpts().C99 && Literal.isLongLong) {
+ if (PP.getLangOpts().CPlusPlus)
+ PP.Diag(PeekTok,
+ PP.getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_longlong : diag::ext_cxx11_longlong);
+ else
+ PP.Diag(PeekTok, diag::ext_c99_longlong);
+ }
+
+ // Parse the integer literal into Result.
+ if (Literal.GetIntegerValue(Result.Val)) {
+ // Overflow parsing integer literal.
+ if (ValueLive)
+ PP.Diag(PeekTok, diag::err_integer_literal_too_large)
+ << /* Unsigned */ 1;
+ Result.Val.setIsUnsigned(true);
+ } else {
+ // Set the signedness of the result to match whether there was a U suffix
+ // or not.
+ Result.Val.setIsUnsigned(Literal.isUnsigned);
+
+ // Detect overflow based on whether the value is signed. If signed
+ // and if the value is too large, emit a warning "integer constant is so
+ // large that it is unsigned" e.g. on 12345678901234567890 where intmax_t
+ // is 64-bits.
+ if (!Literal.isUnsigned && Result.Val.isNegative()) {
+ // Octal, hexadecimal, and binary literals are implicitly unsigned if
+ // the value does not fit into a signed integer type.
+ if (ValueLive && Literal.getRadix() == 10)
+ PP.Diag(PeekTok, diag::ext_integer_literal_too_large_for_signed);
+ Result.Val.setIsUnsigned(true);
+ }
+ }
+
+ // Consume the token.
+ Result.setRange(PeekTok.getLocation());
+ PP.LexNonComment(PeekTok);
+ return false;
+ }
+ case tok::char_constant: // 'x'
+ case tok::wide_char_constant: // L'x'
+ case tok::utf8_char_constant: // u8'x'
+ case tok::utf16_char_constant: // u'x'
+ case tok::utf32_char_constant: { // U'x'
+ // Complain about, and drop, any ud-suffix.
+ if (PeekTok.hasUDSuffix())
+ PP.Diag(PeekTok, diag::err_pp_invalid_udl) << /*character*/0;
+
+ SmallString<32> CharBuffer;
+ bool CharInvalid = false;
+ StringRef ThisTok = PP.getSpelling(PeekTok, CharBuffer, &CharInvalid);
+ if (CharInvalid)
+ return true;
+
+ CharLiteralParser Literal(ThisTok.begin(), ThisTok.end(),
+ PeekTok.getLocation(), PP, PeekTok.getKind());
+ if (Literal.hadError())
+ return true; // A diagnostic was already emitted.
+
+ // Character literals are always int or wchar_t, expand to intmax_t.
+ const TargetInfo &TI = PP.getTargetInfo();
+ unsigned NumBits;
+ if (Literal.isMultiChar())
+ NumBits = TI.getIntWidth();
+ else if (Literal.isWide())
+ NumBits = TI.getWCharWidth();
+ else if (Literal.isUTF16())
+ NumBits = TI.getChar16Width();
+ else if (Literal.isUTF32())
+ NumBits = TI.getChar32Width();
+ else
+ NumBits = TI.getCharWidth();
+
+ // Set the width.
+ llvm::APSInt Val(NumBits);
+ // Set the value.
+ Val = Literal.getValue();
+ // Set the signedness. UTF-16 and UTF-32 are always unsigned
+ if (Literal.isWide())
+ Val.setIsUnsigned(!TargetInfo::isTypeSigned(TI.getWCharType()));
+ else if (!Literal.isUTF16() && !Literal.isUTF32())
+ Val.setIsUnsigned(!PP.getLangOpts().CharIsSigned);
+
+ if (Result.Val.getBitWidth() > Val.getBitWidth()) {
+ Result.Val = Val.extend(Result.Val.getBitWidth());
+ } else {
+ assert(Result.Val.getBitWidth() == Val.getBitWidth() &&
+ "intmax_t smaller than char/wchar_t?");
+ Result.Val = Val;
+ }
+
+ // Consume the token.
+ Result.setRange(PeekTok.getLocation());
+ PP.LexNonComment(PeekTok);
+ return false;
+ }
+ case tok::l_paren: {
+ SourceLocation Start = PeekTok.getLocation();
+ PP.LexNonComment(PeekTok); // Eat the (.
+ // Parse the value and if there are any binary operators involved, parse
+ // them.
+ if (EvaluateValue(Result, PeekTok, DT, ValueLive, PP)) return true;
+
+ // If this is a silly value like (X), which doesn't need parens, check for
+ // !(defined X).
+ if (PeekTok.is(tok::r_paren)) {
+ // Just use DT unmodified as our result.
+ } else {
+ // Otherwise, we have something like (x+y), and we consumed '(x'.
+ if (EvaluateDirectiveSubExpr(Result, 1, PeekTok, ValueLive, PP))
+ return true;
+
+ if (PeekTok.isNot(tok::r_paren)) {
+ PP.Diag(PeekTok.getLocation(), diag::err_pp_expected_rparen)
+ << Result.getRange();
+ PP.Diag(Start, diag::note_matching) << tok::l_paren;
+ return true;
+ }
+ DT.State = DefinedTracker::Unknown;
+ }
+ Result.setRange(Start, PeekTok.getLocation());
+ PP.LexNonComment(PeekTok); // Eat the ).
+ return false;
+ }
+ case tok::plus: {
+ SourceLocation Start = PeekTok.getLocation();
+ // Unary plus doesn't modify the value.
+ PP.LexNonComment(PeekTok);
+ if (EvaluateValue(Result, PeekTok, DT, ValueLive, PP)) return true;
+ Result.setBegin(Start);
+ return false;
+ }
+ case tok::minus: {
+ SourceLocation Loc = PeekTok.getLocation();
+ PP.LexNonComment(PeekTok);
+ if (EvaluateValue(Result, PeekTok, DT, ValueLive, PP)) return true;
+ Result.setBegin(Loc);
+
+ // C99 6.5.3.3p3: The sign of the result matches the sign of the operand.
+ Result.Val = -Result.Val;
+
+ // -MININT is the only thing that overflows. Unsigned never overflows.
+ bool Overflow = !Result.isUnsigned() && Result.Val.isMinSignedValue();
+
+ // If this operator is live and overflowed, report the issue.
+ if (Overflow && ValueLive)
+ PP.Diag(Loc, diag::warn_pp_expr_overflow) << Result.getRange();
+
+ DT.State = DefinedTracker::Unknown;
+ return false;
+ }
+
+ case tok::tilde: {
+ SourceLocation Start = PeekTok.getLocation();
+ PP.LexNonComment(PeekTok);
+ if (EvaluateValue(Result, PeekTok, DT, ValueLive, PP)) return true;
+ Result.setBegin(Start);
+
+ // C99 6.5.3.3p4: The sign of the result matches the sign of the operand.
+ Result.Val = ~Result.Val;
+ DT.State = DefinedTracker::Unknown;
+ return false;
+ }
+
+ case tok::exclaim: {
+ SourceLocation Start = PeekTok.getLocation();
+ PP.LexNonComment(PeekTok);
+ if (EvaluateValue(Result, PeekTok, DT, ValueLive, PP)) return true;
+ Result.setBegin(Start);
+ Result.Val = !Result.Val;
+ // C99 6.5.3.3p5: The sign of the result is 'int', aka it is signed.
+ Result.Val.setIsUnsigned(false);
+
+ if (DT.State == DefinedTracker::DefinedMacro)
+ DT.State = DefinedTracker::NotDefinedMacro;
+ else if (DT.State == DefinedTracker::NotDefinedMacro)
+ DT.State = DefinedTracker::DefinedMacro;
+ return false;
+ }
+
+ // FIXME: Handle #assert
+ }
+}
+
+
+
+/// getPrecedence - Return the precedence of the specified binary operator
+/// token. This returns:
+/// ~0 - Invalid token.
+/// 14 -> 3 - various operators.
+/// 0 - 'eod' or ')'
+static unsigned getPrecedence(tok::TokenKind Kind) {
+ switch (Kind) {
+ default: return ~0U;
+ case tok::percent:
+ case tok::slash:
+ case tok::star: return 14;
+ case tok::plus:
+ case tok::minus: return 13;
+ case tok::lessless:
+ case tok::greatergreater: return 12;
+ case tok::lessequal:
+ case tok::less:
+ case tok::greaterequal:
+ case tok::greater: return 11;
+ case tok::exclaimequal:
+ case tok::equalequal: return 10;
+ case tok::amp: return 9;
+ case tok::caret: return 8;
+ case tok::pipe: return 7;
+ case tok::ampamp: return 6;
+ case tok::pipepipe: return 5;
+ case tok::question: return 4;
+ case tok::comma: return 3;
+ case tok::colon: return 2;
+ case tok::r_paren: return 0;// Lowest priority, end of expr.
+ case tok::eod: return 0;// Lowest priority, end of directive.
+ }
+}
+
+
+/// EvaluateDirectiveSubExpr - Evaluate the subexpression whose first token is
+/// PeekTok, and whose precedence is PeekPrec. This returns the result in LHS.
+///
+/// If ValueLive is false, then this value is being evaluated in a context where
+/// the result is not used. As such, avoid diagnostics that relate to
+/// evaluation, such as division by zero warnings.
+static bool EvaluateDirectiveSubExpr(PPValue &LHS, unsigned MinPrec,
+ Token &PeekTok, bool ValueLive,
+ Preprocessor &PP) {
+ unsigned PeekPrec = getPrecedence(PeekTok.getKind());
+ // If this token isn't valid, report the error.
+ if (PeekPrec == ~0U) {
+ PP.Diag(PeekTok.getLocation(), diag::err_pp_expr_bad_token_binop)
+ << LHS.getRange();
+ return true;
+ }
+
+ while (1) {
+ // If this token has a lower precedence than we are allowed to parse, return
+ // it so that higher levels of the recursion can parse it.
+ if (PeekPrec < MinPrec)
+ return false;
+
+ tok::TokenKind Operator = PeekTok.getKind();
+
+ // If this is a short-circuiting operator, see if the RHS of the operator is
+ // dead. Note that this cannot just clobber ValueLive. Consider
+ // "0 && 1 ? 4 : 1 / 0", which is parsed as "(0 && 1) ? 4 : (1 / 0)". In
+ // this example, the RHS of the && being dead does not make the rest of the
+ // expr dead.
+ bool RHSIsLive;
+ if (Operator == tok::ampamp && LHS.Val == 0)
+ RHSIsLive = false; // RHS of "0 && x" is dead.
+ else if (Operator == tok::pipepipe && LHS.Val != 0)
+ RHSIsLive = false; // RHS of "1 || x" is dead.
+ else if (Operator == tok::question && LHS.Val == 0)
+ RHSIsLive = false; // RHS (x) of "0 ? x : y" is dead.
+ else
+ RHSIsLive = ValueLive;
+
+ // Consume the operator, remembering the operator's location for reporting.
+ SourceLocation OpLoc = PeekTok.getLocation();
+ PP.LexNonComment(PeekTok);
+
+ PPValue RHS(LHS.getBitWidth());
+ // Parse the RHS of the operator.
+ DefinedTracker DT;
+ if (EvaluateValue(RHS, PeekTok, DT, RHSIsLive, PP)) return true;
+
+ // Remember the precedence of this operator and get the precedence of the
+ // operator immediately to the right of the RHS.
+ unsigned ThisPrec = PeekPrec;
+ PeekPrec = getPrecedence(PeekTok.getKind());
+
+ // If this token isn't valid, report the error.
+ if (PeekPrec == ~0U) {
+ PP.Diag(PeekTok.getLocation(), diag::err_pp_expr_bad_token_binop)
+ << RHS.getRange();
+ return true;
+ }
+
+ // Decide whether to include the next binop in this subexpression. For
+ // example, when parsing x+y*z and looking at '*', we want to recursively
+ // handle y*z as a single subexpression. We do this because the precedence
+ // of * is higher than that of +. The only strange case we have to handle
+ // here is for the ?: operator, where the precedence is actually lower than
+ // the LHS of the '?'. The grammar rule is:
+ //
+ // conditional-expression ::=
+ // logical-OR-expression ? expression : conditional-expression
+ // where 'expression' is actually comma-expression.
+ unsigned RHSPrec;
+ if (Operator == tok::question)
+ // The RHS of "?" should be maximally consumed as an expression.
+ RHSPrec = getPrecedence(tok::comma);
+ else // All others should munch while higher precedence.
+ RHSPrec = ThisPrec+1;
+
+ if (PeekPrec >= RHSPrec) {
+ if (EvaluateDirectiveSubExpr(RHS, RHSPrec, PeekTok, RHSIsLive, PP))
+ return true;
+ PeekPrec = getPrecedence(PeekTok.getKind());
+ }
+ assert(PeekPrec <= ThisPrec && "Recursion didn't work!");
+
+ // Usual arithmetic conversions (C99 6.3.1.8p1): result is unsigned if
+ // either operand is unsigned.
+ llvm::APSInt Res(LHS.getBitWidth());
+ switch (Operator) {
+ case tok::question: // No UAC for x and y in "x ? y : z".
+ case tok::lessless: // Shift amount doesn't UAC with shift value.
+ case tok::greatergreater: // Shift amount doesn't UAC with shift value.
+ case tok::comma: // Comma operands are not subject to UACs.
+ case tok::pipepipe: // Logical || does not do UACs.
+ case tok::ampamp: // Logical && does not do UACs.
+ break; // No UAC
+ default:
+ Res.setIsUnsigned(LHS.isUnsigned()|RHS.isUnsigned());
+ // If this just promoted something from signed to unsigned, and if the
+ // value was negative, warn about it.
+ if (ValueLive && Res.isUnsigned()) {
+ if (!LHS.isUnsigned() && LHS.Val.isNegative())
+ PP.Diag(OpLoc, diag::warn_pp_convert_to_positive) << 0
+ << LHS.Val.toString(10, true) + " to " +
+ LHS.Val.toString(10, false)
+ << LHS.getRange() << RHS.getRange();
+ if (!RHS.isUnsigned() && RHS.Val.isNegative())
+ PP.Diag(OpLoc, diag::warn_pp_convert_to_positive) << 1
+ << RHS.Val.toString(10, true) + " to " +
+ RHS.Val.toString(10, false)
+ << LHS.getRange() << RHS.getRange();
+ }
+ LHS.Val.setIsUnsigned(Res.isUnsigned());
+ RHS.Val.setIsUnsigned(Res.isUnsigned());
+ }
+
+ bool Overflow = false;
+ switch (Operator) {
+ default: llvm_unreachable("Unknown operator token!");
+ case tok::percent:
+ if (RHS.Val != 0)
+ Res = LHS.Val % RHS.Val;
+ else if (ValueLive) {
+ PP.Diag(OpLoc, diag::err_pp_remainder_by_zero)
+ << LHS.getRange() << RHS.getRange();
+ return true;
+ }
+ break;
+ case tok::slash:
+ if (RHS.Val != 0) {
+ if (LHS.Val.isSigned())
+ Res = llvm::APSInt(LHS.Val.sdiv_ov(RHS.Val, Overflow), false);
+ else
+ Res = LHS.Val / RHS.Val;
+ } else if (ValueLive) {
+ PP.Diag(OpLoc, diag::err_pp_division_by_zero)
+ << LHS.getRange() << RHS.getRange();
+ return true;
+ }
+ break;
+
+ case tok::star:
+ if (Res.isSigned())
+ Res = llvm::APSInt(LHS.Val.smul_ov(RHS.Val, Overflow), false);
+ else
+ Res = LHS.Val * RHS.Val;
+ break;
+ case tok::lessless: {
+ // Determine whether overflow is about to happen.
+ if (LHS.isUnsigned())
+ Res = LHS.Val.ushl_ov(RHS.Val, Overflow);
+ else
+ Res = llvm::APSInt(LHS.Val.sshl_ov(RHS.Val, Overflow), false);
+ break;
+ }
+ case tok::greatergreater: {
+ // Determine whether overflow is about to happen.
+ unsigned ShAmt = static_cast<unsigned>(RHS.Val.getLimitedValue());
+ if (ShAmt >= LHS.getBitWidth())
+ Overflow = true, ShAmt = LHS.getBitWidth()-1;
+ Res = LHS.Val >> ShAmt;
+ break;
+ }
+ case tok::plus:
+ if (LHS.isUnsigned())
+ Res = LHS.Val + RHS.Val;
+ else
+ Res = llvm::APSInt(LHS.Val.sadd_ov(RHS.Val, Overflow), false);
+ break;
+ case tok::minus:
+ if (LHS.isUnsigned())
+ Res = LHS.Val - RHS.Val;
+ else
+ Res = llvm::APSInt(LHS.Val.ssub_ov(RHS.Val, Overflow), false);
+ break;
+ case tok::lessequal:
+ Res = LHS.Val <= RHS.Val;
+ Res.setIsUnsigned(false); // C99 6.5.8p6, result is always int (signed)
+ break;
+ case tok::less:
+ Res = LHS.Val < RHS.Val;
+ Res.setIsUnsigned(false); // C99 6.5.8p6, result is always int (signed)
+ break;
+ case tok::greaterequal:
+ Res = LHS.Val >= RHS.Val;
+ Res.setIsUnsigned(false); // C99 6.5.8p6, result is always int (signed)
+ break;
+ case tok::greater:
+ Res = LHS.Val > RHS.Val;
+ Res.setIsUnsigned(false); // C99 6.5.8p6, result is always int (signed)
+ break;
+ case tok::exclaimequal:
+ Res = LHS.Val != RHS.Val;
+ Res.setIsUnsigned(false); // C99 6.5.9p3, result is always int (signed)
+ break;
+ case tok::equalequal:
+ Res = LHS.Val == RHS.Val;
+ Res.setIsUnsigned(false); // C99 6.5.9p3, result is always int (signed)
+ break;
+ case tok::amp:
+ Res = LHS.Val & RHS.Val;
+ break;
+ case tok::caret:
+ Res = LHS.Val ^ RHS.Val;
+ break;
+ case tok::pipe:
+ Res = LHS.Val | RHS.Val;
+ break;
+ case tok::ampamp:
+ Res = (LHS.Val != 0 && RHS.Val != 0);
+ Res.setIsUnsigned(false); // C99 6.5.13p3, result is always int (signed)
+ break;
+ case tok::pipepipe:
+ Res = (LHS.Val != 0 || RHS.Val != 0);
+ Res.setIsUnsigned(false); // C99 6.5.14p3, result is always int (signed)
+ break;
+ case tok::comma:
+ // Comma is invalid in pp expressions in c89/c++ mode, but is valid in C99
+ // if not being evaluated.
+ if (!PP.getLangOpts().C99 || ValueLive)
+ PP.Diag(OpLoc, diag::ext_pp_comma_expr)
+ << LHS.getRange() << RHS.getRange();
+ Res = RHS.Val; // LHS = LHS,RHS -> RHS.
+ break;
+ case tok::question: {
+ // Parse the : part of the expression.
+ if (PeekTok.isNot(tok::colon)) {
+ PP.Diag(PeekTok.getLocation(), diag::err_expected)
+ << tok::colon << LHS.getRange() << RHS.getRange();
+ PP.Diag(OpLoc, diag::note_matching) << tok::question;
+ return true;
+ }
+ // Consume the :.
+ PP.LexNonComment(PeekTok);
+
+ // Evaluate the value after the :.
+ bool AfterColonLive = ValueLive && LHS.Val == 0;
+ PPValue AfterColonVal(LHS.getBitWidth());
+ DefinedTracker DT;
+ if (EvaluateValue(AfterColonVal, PeekTok, DT, AfterColonLive, PP))
+ return true;
+
+ // Parse anything after the : with the same precedence as ?. We allow
+ // things of equal precedence because ?: is right associative.
+ if (EvaluateDirectiveSubExpr(AfterColonVal, ThisPrec,
+ PeekTok, AfterColonLive, PP))
+ return true;
+
+ // Now that we have the condition, the LHS and the RHS of the :, evaluate.
+ Res = LHS.Val != 0 ? RHS.Val : AfterColonVal.Val;
+ RHS.setEnd(AfterColonVal.getRange().getEnd());
+
+ // Usual arithmetic conversions (C99 6.3.1.8p1): result is unsigned if
+ // either operand is unsigned.
+ Res.setIsUnsigned(RHS.isUnsigned() | AfterColonVal.isUnsigned());
+
+ // Figure out the precedence of the token after the : part.
+ PeekPrec = getPrecedence(PeekTok.getKind());
+ break;
+ }
+ case tok::colon:
+ // Don't allow :'s to float around without being part of ?: exprs.
+ PP.Diag(OpLoc, diag::err_pp_colon_without_question)
+ << LHS.getRange() << RHS.getRange();
+ return true;
+ }
+
+ // If this operator is live and overflowed, report the issue.
+ if (Overflow && ValueLive)
+ PP.Diag(OpLoc, diag::warn_pp_expr_overflow)
+ << LHS.getRange() << RHS.getRange();
+
+ // Put the result back into 'LHS' for our next iteration.
+ LHS.Val = Res;
+ LHS.setEnd(RHS.getRange().getEnd());
+ }
+}
+
+/// EvaluateDirectiveExpression - Evaluate an integer constant expression that
+/// may occur after a #if or #elif directive. If the expression is equivalent
+/// to "!defined(X)" return X in IfNDefMacro.
+bool Preprocessor::EvaluateDirectiveExpression(IdentifierInfo *&IfNDefMacro) {
+ SaveAndRestore<bool> PPDir(ParsingIfOrElifDirective, true);
+ // Save the current state of 'DisableMacroExpansion' and reset it to false. If
+ // 'DisableMacroExpansion' is true, then we must be in a macro argument list
+ // in which case a directive is undefined behavior. We want macros to be able
+ // to recursively expand in order to get more gcc-list behavior, so we force
+ // DisableMacroExpansion to false and restore it when we're done parsing the
+ // expression.
+ bool DisableMacroExpansionAtStartOfDirective = DisableMacroExpansion;
+ DisableMacroExpansion = false;
+
+ // Peek ahead one token.
+ Token Tok;
+ LexNonComment(Tok);
+
+ // C99 6.10.1p3 - All expressions are evaluated as intmax_t or uintmax_t.
+ unsigned BitWidth = getTargetInfo().getIntMaxTWidth();
+
+ PPValue ResVal(BitWidth);
+ DefinedTracker DT;
+ if (EvaluateValue(ResVal, Tok, DT, true, *this)) {
+ // Parse error, skip the rest of the macro line.
+ if (Tok.isNot(tok::eod))
+ DiscardUntilEndOfDirective();
+
+ // Restore 'DisableMacroExpansion'.
+ DisableMacroExpansion = DisableMacroExpansionAtStartOfDirective;
+ return false;
+ }
+
+ // If we are at the end of the expression after just parsing a value, there
+ // must be no (unparenthesized) binary operators involved, so we can exit
+ // directly.
+ if (Tok.is(tok::eod)) {
+ // If the expression we parsed was of the form !defined(macro), return the
+ // macro in IfNDefMacro.
+ if (DT.State == DefinedTracker::NotDefinedMacro)
+ IfNDefMacro = DT.TheMacro;
+
+ // Restore 'DisableMacroExpansion'.
+ DisableMacroExpansion = DisableMacroExpansionAtStartOfDirective;
+ return ResVal.Val != 0;
+ }
+
+ // Otherwise, we must have a binary operator (e.g. "#if 1 < 2"), so parse the
+ // operator and the stuff after it.
+ if (EvaluateDirectiveSubExpr(ResVal, getPrecedence(tok::question),
+ Tok, true, *this)) {
+ // Parse error, skip the rest of the macro line.
+ if (Tok.isNot(tok::eod))
+ DiscardUntilEndOfDirective();
+
+ // Restore 'DisableMacroExpansion'.
+ DisableMacroExpansion = DisableMacroExpansionAtStartOfDirective;
+ return false;
+ }
+
+ // If we aren't at the tok::eod token, something bad happened, like an extra
+ // ')' token.
+ if (Tok.isNot(tok::eod)) {
+ Diag(Tok, diag::err_pp_expected_eol);
+ DiscardUntilEndOfDirective();
+ }
+
+ // Restore 'DisableMacroExpansion'.
+ DisableMacroExpansion = DisableMacroExpansionAtStartOfDirective;
+ return ResVal.Val != 0;
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp b/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp
new file mode 100644
index 0000000..2f09841
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp
@@ -0,0 +1,756 @@
+//===--- PPLexerChange.cpp - Handle changing lexers in the preprocessor ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements pieces of the Preprocessor interface that manage the
+// current lexer stack.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/PTHManager.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+using namespace clang;
+
+PPCallbacks::~PPCallbacks() {}
+
+//===----------------------------------------------------------------------===//
+// Miscellaneous Methods.
+//===----------------------------------------------------------------------===//
+
+/// isInPrimaryFile - Return true if we're in the top-level file, not in a
+/// \#include. This looks through macro expansions and active _Pragma lexers.
+bool Preprocessor::isInPrimaryFile() const {
+ if (IsFileLexer())
+ return IncludeMacroStack.empty();
+
+ // If there are any stacked lexers, we're in a #include.
+ assert(IsFileLexer(IncludeMacroStack[0]) &&
+ "Top level include stack isn't our primary lexer?");
+ for (unsigned i = 1, e = IncludeMacroStack.size(); i != e; ++i)
+ if (IsFileLexer(IncludeMacroStack[i]))
+ return false;
+ return true;
+}
+
+/// getCurrentLexer - Return the current file lexer being lexed from. Note
+/// that this ignores any potentially active macro expansions and _Pragma
+/// expansions going on at the time.
+PreprocessorLexer *Preprocessor::getCurrentFileLexer() const {
+ if (IsFileLexer())
+ return CurPPLexer;
+
+ // Look for a stacked lexer.
+ for (unsigned i = IncludeMacroStack.size(); i != 0; --i) {
+ const IncludeStackInfo& ISI = IncludeMacroStack[i-1];
+ if (IsFileLexer(ISI))
+ return ISI.ThePPLexer;
+ }
+ return nullptr;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Methods for Entering and Callbacks for leaving various contexts
+//===----------------------------------------------------------------------===//
+
+/// EnterSourceFile - Add a source file to the top of the include stack and
+/// start lexing tokens from it instead of the current buffer.
+bool Preprocessor::EnterSourceFile(FileID FID, const DirectoryLookup *CurDir,
+ SourceLocation Loc) {
+ assert(!CurTokenLexer && "Cannot #include a file inside a macro!");
+ ++NumEnteredSourceFiles;
+
+ if (MaxIncludeStackDepth < IncludeMacroStack.size())
+ MaxIncludeStackDepth = IncludeMacroStack.size();
+
+ if (PTH) {
+ if (PTHLexer *PL = PTH->CreateLexer(FID)) {
+ EnterSourceFileWithPTH(PL, CurDir);
+ return false;
+ }
+ }
+
+ // Get the MemoryBuffer for this FID, if it fails, we fail.
+ bool Invalid = false;
+ const llvm::MemoryBuffer *InputFile =
+ getSourceManager().getBuffer(FID, Loc, &Invalid);
+ if (Invalid) {
+ SourceLocation FileStart = SourceMgr.getLocForStartOfFile(FID);
+ Diag(Loc, diag::err_pp_error_opening_file)
+ << std::string(SourceMgr.getBufferName(FileStart)) << "";
+ return true;
+ }
+
+ if (isCodeCompletionEnabled() &&
+ SourceMgr.getFileEntryForID(FID) == CodeCompletionFile) {
+ CodeCompletionFileLoc = SourceMgr.getLocForStartOfFile(FID);
+ CodeCompletionLoc =
+ CodeCompletionFileLoc.getLocWithOffset(CodeCompletionOffset);
+ }
+
+ EnterSourceFileWithLexer(new Lexer(FID, InputFile, *this), CurDir);
+ return false;
+}
+
+/// EnterSourceFileWithLexer - Add a source file to the top of the include stack
+/// and start lexing tokens from it instead of the current buffer.
+void Preprocessor::EnterSourceFileWithLexer(Lexer *TheLexer,
+ const DirectoryLookup *CurDir) {
+
+ // Add the current lexer to the include stack.
+ if (CurPPLexer || CurTokenLexer)
+ PushIncludeMacroStack();
+
+ CurLexer.reset(TheLexer);
+ CurPPLexer = TheLexer;
+ CurDirLookup = CurDir;
+ CurSubmodule = nullptr;
+ if (CurLexerKind != CLK_LexAfterModuleImport)
+ CurLexerKind = CLK_Lexer;
+
+ // Notify the client, if desired, that we are in a new source file.
+ if (Callbacks && !CurLexer->Is_PragmaLexer) {
+ SrcMgr::CharacteristicKind FileType =
+ SourceMgr.getFileCharacteristic(CurLexer->getFileLoc());
+
+ Callbacks->FileChanged(CurLexer->getFileLoc(),
+ PPCallbacks::EnterFile, FileType);
+ }
+}
+
+/// EnterSourceFileWithPTH - Add a source file to the top of the include stack
+/// and start getting tokens from it using the PTH cache.
+void Preprocessor::EnterSourceFileWithPTH(PTHLexer *PL,
+ const DirectoryLookup *CurDir) {
+
+ if (CurPPLexer || CurTokenLexer)
+ PushIncludeMacroStack();
+
+ CurDirLookup = CurDir;
+ CurPTHLexer.reset(PL);
+ CurPPLexer = CurPTHLexer.get();
+ CurSubmodule = nullptr;
+ if (CurLexerKind != CLK_LexAfterModuleImport)
+ CurLexerKind = CLK_PTHLexer;
+
+ // Notify the client, if desired, that we are in a new source file.
+ if (Callbacks) {
+ FileID FID = CurPPLexer->getFileID();
+ SourceLocation EnterLoc = SourceMgr.getLocForStartOfFile(FID);
+ SrcMgr::CharacteristicKind FileType =
+ SourceMgr.getFileCharacteristic(EnterLoc);
+ Callbacks->FileChanged(EnterLoc, PPCallbacks::EnterFile, FileType);
+ }
+}
+
+/// EnterMacro - Add a Macro to the top of the include stack and start lexing
+/// tokens from it instead of the current buffer.
+void Preprocessor::EnterMacro(Token &Tok, SourceLocation ILEnd,
+ MacroInfo *Macro, MacroArgs *Args) {
+ std::unique_ptr<TokenLexer> TokLexer;
+ if (NumCachedTokenLexers == 0) {
+ TokLexer = llvm::make_unique<TokenLexer>(Tok, ILEnd, Macro, Args, *this);
+ } else {
+ TokLexer = std::move(TokenLexerCache[--NumCachedTokenLexers]);
+ TokLexer->Init(Tok, ILEnd, Macro, Args);
+ }
+
+ PushIncludeMacroStack();
+ CurDirLookup = nullptr;
+ CurTokenLexer = std::move(TokLexer);
+ if (CurLexerKind != CLK_LexAfterModuleImport)
+ CurLexerKind = CLK_TokenLexer;
+}
+
+/// EnterTokenStream - Add a "macro" context to the top of the include stack,
+/// which will cause the lexer to start returning the specified tokens.
+///
+/// If DisableMacroExpansion is true, tokens lexed from the token stream will
+/// not be subject to further macro expansion. Otherwise, these tokens will
+/// be re-macro-expanded when/if expansion is enabled.
+///
+/// If OwnsTokens is false, this method assumes that the specified stream of
+/// tokens has a permanent owner somewhere, so they do not need to be copied.
+/// If it is true, it assumes the array of tokens is allocated with new[] and
+/// must be freed.
+///
+void Preprocessor::EnterTokenStream(const Token *Toks, unsigned NumToks,
+ bool DisableMacroExpansion,
+ bool OwnsTokens) {
+ if (CurLexerKind == CLK_CachingLexer) {
+ if (CachedLexPos < CachedTokens.size()) {
+ // We're entering tokens into the middle of our cached token stream. We
+ // can't represent that, so just insert the tokens into the buffer.
+ CachedTokens.insert(CachedTokens.begin() + CachedLexPos,
+ Toks, Toks + NumToks);
+ if (OwnsTokens)
+ delete [] Toks;
+ return;
+ }
+
+ // New tokens are at the end of the cached token sequnece; insert the
+ // token stream underneath the caching lexer.
+ ExitCachingLexMode();
+ EnterTokenStream(Toks, NumToks, DisableMacroExpansion, OwnsTokens);
+ EnterCachingLexMode();
+ return;
+ }
+
+ // Create a macro expander to expand from the specified token stream.
+ std::unique_ptr<TokenLexer> TokLexer;
+ if (NumCachedTokenLexers == 0) {
+ TokLexer = llvm::make_unique<TokenLexer>(
+ Toks, NumToks, DisableMacroExpansion, OwnsTokens, *this);
+ } else {
+ TokLexer = std::move(TokenLexerCache[--NumCachedTokenLexers]);
+ TokLexer->Init(Toks, NumToks, DisableMacroExpansion, OwnsTokens);
+ }
+
+ // Save our current state.
+ PushIncludeMacroStack();
+ CurDirLookup = nullptr;
+ CurTokenLexer = std::move(TokLexer);
+ if (CurLexerKind != CLK_LexAfterModuleImport)
+ CurLexerKind = CLK_TokenLexer;
+}
+
+/// \brief Compute the relative path that names the given file relative to
+/// the given directory.
+static void computeRelativePath(FileManager &FM, const DirectoryEntry *Dir,
+ const FileEntry *File,
+ SmallString<128> &Result) {
+ Result.clear();
+
+ StringRef FilePath = File->getDir()->getName();
+ StringRef Path = FilePath;
+ while (!Path.empty()) {
+ if (const DirectoryEntry *CurDir = FM.getDirectory(Path)) {
+ if (CurDir == Dir) {
+ Result = FilePath.substr(Path.size());
+ llvm::sys::path::append(Result,
+ llvm::sys::path::filename(File->getName()));
+ return;
+ }
+ }
+
+ Path = llvm::sys::path::parent_path(Path);
+ }
+
+ Result = File->getName();
+}
+
+void Preprocessor::PropagateLineStartLeadingSpaceInfo(Token &Result) {
+ if (CurTokenLexer) {
+ CurTokenLexer->PropagateLineStartLeadingSpaceInfo(Result);
+ return;
+ }
+ if (CurLexer) {
+ CurLexer->PropagateLineStartLeadingSpaceInfo(Result);
+ return;
+ }
+ // FIXME: Handle other kinds of lexers? It generally shouldn't matter,
+ // but it might if they're empty?
+}
+
+/// \brief Determine the location to use as the end of the buffer for a lexer.
+///
+/// If the file ends with a newline, form the EOF token on the newline itself,
+/// rather than "on the line following it", which doesn't exist. This makes
+/// diagnostics relating to the end of file include the last file that the user
+/// actually typed, which is goodness.
+const char *Preprocessor::getCurLexerEndPos() {
+ const char *EndPos = CurLexer->BufferEnd;
+ if (EndPos != CurLexer->BufferStart &&
+ (EndPos[-1] == '\n' || EndPos[-1] == '\r')) {
+ --EndPos;
+
+ // Handle \n\r and \r\n:
+ if (EndPos != CurLexer->BufferStart &&
+ (EndPos[-1] == '\n' || EndPos[-1] == '\r') &&
+ EndPos[-1] != EndPos[0])
+ --EndPos;
+ }
+
+ return EndPos;
+}
+
+
+/// HandleEndOfFile - This callback is invoked when the lexer hits the end of
+/// the current file. This either returns the EOF token or pops a level off
+/// the include stack and keeps going.
+bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
+ assert(!CurTokenLexer &&
+ "Ending a file when currently in a macro!");
+
+ // See if this file had a controlling macro.
+ if (CurPPLexer) { // Not ending a macro, ignore it.
+ if (const IdentifierInfo *ControllingMacro =
+ CurPPLexer->MIOpt.GetControllingMacroAtEndOfFile()) {
+ // Okay, this has a controlling macro, remember in HeaderFileInfo.
+ if (const FileEntry *FE = CurPPLexer->getFileEntry()) {
+ HeaderInfo.SetFileControllingMacro(FE, ControllingMacro);
+ if (MacroInfo *MI =
+ getMacroInfo(const_cast<IdentifierInfo*>(ControllingMacro))) {
+ MI->UsedForHeaderGuard = true;
+ }
+ if (const IdentifierInfo *DefinedMacro =
+ CurPPLexer->MIOpt.GetDefinedMacro()) {
+ if (!isMacroDefined(ControllingMacro) &&
+ DefinedMacro != ControllingMacro &&
+ HeaderInfo.FirstTimeLexingFile(FE)) {
+
+ // If the edit distance between the two macros is more than 50%,
+ // DefinedMacro may not be header guard, or can be header guard of
+ // another header file. Therefore, it maybe defining something
+ // completely different. This can be observed in the wild when
+ // handling feature macros or header guards in different files.
+
+ const StringRef ControllingMacroName = ControllingMacro->getName();
+ const StringRef DefinedMacroName = DefinedMacro->getName();
+ const size_t MaxHalfLength = std::max(ControllingMacroName.size(),
+ DefinedMacroName.size()) / 2;
+ const unsigned ED = ControllingMacroName.edit_distance(
+ DefinedMacroName, true, MaxHalfLength);
+ if (ED <= MaxHalfLength) {
+ // Emit a warning for a bad header guard.
+ Diag(CurPPLexer->MIOpt.GetMacroLocation(),
+ diag::warn_header_guard)
+ << CurPPLexer->MIOpt.GetMacroLocation() << ControllingMacro;
+ Diag(CurPPLexer->MIOpt.GetDefinedLocation(),
+ diag::note_header_guard)
+ << CurPPLexer->MIOpt.GetDefinedLocation() << DefinedMacro
+ << ControllingMacro
+ << FixItHint::CreateReplacement(
+ CurPPLexer->MIOpt.GetDefinedLocation(),
+ ControllingMacro->getName());
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Complain about reaching a true EOF within arc_cf_code_audited.
+ // We don't want to complain about reaching the end of a macro
+ // instantiation or a _Pragma.
+ if (PragmaARCCFCodeAuditedLoc.isValid() &&
+ !isEndOfMacro && !(CurLexer && CurLexer->Is_PragmaLexer)) {
+ Diag(PragmaARCCFCodeAuditedLoc, diag::err_pp_eof_in_arc_cf_code_audited);
+
+ // Recover by leaving immediately.
+ PragmaARCCFCodeAuditedLoc = SourceLocation();
+ }
+
+ // Complain about reaching a true EOF within assume_nonnull.
+ // We don't want to complain about reaching the end of a macro
+ // instantiation or a _Pragma.
+ if (PragmaAssumeNonNullLoc.isValid() &&
+ !isEndOfMacro && !(CurLexer && CurLexer->Is_PragmaLexer)) {
+ Diag(PragmaAssumeNonNullLoc, diag::err_pp_eof_in_assume_nonnull);
+
+ // Recover by leaving immediately.
+ PragmaAssumeNonNullLoc = SourceLocation();
+ }
+
+ // If this is a #include'd file, pop it off the include stack and continue
+ // lexing the #includer file.
+ if (!IncludeMacroStack.empty()) {
+
+ // If we lexed the code-completion file, act as if we reached EOF.
+ if (isCodeCompletionEnabled() && CurPPLexer &&
+ SourceMgr.getLocForStartOfFile(CurPPLexer->getFileID()) ==
+ CodeCompletionFileLoc) {
+ if (CurLexer) {
+ Result.startToken();
+ CurLexer->FormTokenWithChars(Result, CurLexer->BufferEnd, tok::eof);
+ CurLexer.reset();
+ } else {
+ assert(CurPTHLexer && "Got EOF but no current lexer set!");
+ CurPTHLexer->getEOF(Result);
+ CurPTHLexer.reset();
+ }
+
+ CurPPLexer = nullptr;
+ return true;
+ }
+
+ if (!isEndOfMacro && CurPPLexer &&
+ SourceMgr.getIncludeLoc(CurPPLexer->getFileID()).isValid()) {
+ // Notify SourceManager to record the number of FileIDs that were created
+ // during lexing of the #include'd file.
+ unsigned NumFIDs =
+ SourceMgr.local_sloc_entry_size() -
+ CurPPLexer->getInitialNumSLocEntries() + 1/*#include'd file*/;
+ SourceMgr.setNumCreatedFIDsForFileID(CurPPLexer->getFileID(), NumFIDs);
+ }
+
+ FileID ExitedFID;
+ if (Callbacks && !isEndOfMacro && CurPPLexer)
+ ExitedFID = CurPPLexer->getFileID();
+
+ bool LeavingSubmodule = CurSubmodule && CurLexer;
+ if (LeavingSubmodule) {
+ // Notify the parser that we've left the module.
+ const char *EndPos = getCurLexerEndPos();
+ Result.startToken();
+ CurLexer->BufferPtr = EndPos;
+ CurLexer->FormTokenWithChars(Result, EndPos, tok::annot_module_end);
+ Result.setAnnotationEndLoc(Result.getLocation());
+ Result.setAnnotationValue(CurSubmodule);
+
+ // We're done with this submodule.
+ LeaveSubmodule();
+ }
+
+ // We're done with the #included file.
+ RemoveTopOfLexerStack();
+
+ // Propagate info about start-of-line/leading white-space/etc.
+ PropagateLineStartLeadingSpaceInfo(Result);
+
+ // Notify the client, if desired, that we are in a new source file.
+ if (Callbacks && !isEndOfMacro && CurPPLexer) {
+ SrcMgr::CharacteristicKind FileType =
+ SourceMgr.getFileCharacteristic(CurPPLexer->getSourceLocation());
+ Callbacks->FileChanged(CurPPLexer->getSourceLocation(),
+ PPCallbacks::ExitFile, FileType, ExitedFID);
+ }
+
+ // Client should lex another token unless we generated an EOM.
+ return LeavingSubmodule;
+ }
+
+ // If this is the end of the main file, form an EOF token.
+ if (CurLexer) {
+ const char *EndPos = getCurLexerEndPos();
+ Result.startToken();
+ CurLexer->BufferPtr = EndPos;
+ CurLexer->FormTokenWithChars(Result, EndPos, tok::eof);
+
+ if (isCodeCompletionEnabled()) {
+ // Inserting the code-completion point increases the source buffer by 1,
+ // but the main FileID was created before inserting the point.
+ // Compensate by reducing the EOF location by 1, otherwise the location
+ // will point to the next FileID.
+ // FIXME: This is hacky, the code-completion point should probably be
+ // inserted before the main FileID is created.
+ if (CurLexer->getFileLoc() == CodeCompletionFileLoc)
+ Result.setLocation(Result.getLocation().getLocWithOffset(-1));
+ }
+
+ if (!isIncrementalProcessingEnabled())
+ // We're done with lexing.
+ CurLexer.reset();
+ } else {
+ assert(CurPTHLexer && "Got EOF but no current lexer set!");
+ CurPTHLexer->getEOF(Result);
+ CurPTHLexer.reset();
+ }
+
+ if (!isIncrementalProcessingEnabled())
+ CurPPLexer = nullptr;
+
+ if (TUKind == TU_Complete) {
+ // This is the end of the top-level file. 'WarnUnusedMacroLocs' has
+ // collected all macro locations that we need to warn because they are not
+ // used.
+ for (WarnUnusedMacroLocsTy::iterator
+ I=WarnUnusedMacroLocs.begin(), E=WarnUnusedMacroLocs.end();
+ I!=E; ++I)
+ Diag(*I, diag::pp_macro_not_used);
+ }
+
+ // If we are building a module that has an umbrella header, make sure that
+ // each of the headers within the directory covered by the umbrella header
+ // was actually included by the umbrella header.
+ if (Module *Mod = getCurrentModule()) {
+ if (Mod->getUmbrellaHeader()) {
+ SourceLocation StartLoc
+ = SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID());
+
+ if (!getDiagnostics().isIgnored(diag::warn_uncovered_module_header,
+ StartLoc)) {
+ ModuleMap &ModMap = getHeaderSearchInfo().getModuleMap();
+ const DirectoryEntry *Dir = Mod->getUmbrellaDir().Entry;
+ vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
+ std::error_code EC;
+ for (vfs::recursive_directory_iterator Entry(FS, Dir->getName(), EC), End;
+ Entry != End && !EC; Entry.increment(EC)) {
+ using llvm::StringSwitch;
+
+ // Check whether this entry has an extension typically associated with
+ // headers.
+ if (!StringSwitch<bool>(llvm::sys::path::extension(Entry->getName()))
+ .Cases(".h", ".H", ".hh", ".hpp", true)
+ .Default(false))
+ continue;
+
+ if (const FileEntry *Header =
+ getFileManager().getFile(Entry->getName()))
+ if (!getSourceManager().hasFileInfo(Header)) {
+ if (!ModMap.isHeaderInUnavailableModule(Header)) {
+ // Find the relative path that would access this header.
+ SmallString<128> RelativePath;
+ computeRelativePath(FileMgr, Dir, Header, RelativePath);
+ Diag(StartLoc, diag::warn_uncovered_module_header)
+ << Mod->getFullModuleName() << RelativePath;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+/// HandleEndOfTokenLexer - This callback is invoked when the current TokenLexer
+/// hits the end of its token stream.
+bool Preprocessor::HandleEndOfTokenLexer(Token &Result) {
+ assert(CurTokenLexer && !CurPPLexer &&
+ "Ending a macro when currently in a #include file!");
+
+ if (!MacroExpandingLexersStack.empty() &&
+ MacroExpandingLexersStack.back().first == CurTokenLexer.get())
+ removeCachedMacroExpandedTokensOfLastLexer();
+
+ // Delete or cache the now-dead macro expander.
+ if (NumCachedTokenLexers == TokenLexerCacheSize)
+ CurTokenLexer.reset();
+ else
+ TokenLexerCache[NumCachedTokenLexers++] = std::move(CurTokenLexer);
+
+ // Handle this like a #include file being popped off the stack.
+ return HandleEndOfFile(Result, true);
+}
+
+/// RemoveTopOfLexerStack - Pop the current lexer/macro exp off the top of the
+/// lexer stack. This should only be used in situations where the current
+/// state of the top-of-stack lexer is unknown.
+void Preprocessor::RemoveTopOfLexerStack() {
+ assert(!IncludeMacroStack.empty() && "Ran out of stack entries to load");
+
+ if (CurTokenLexer) {
+ // Delete or cache the now-dead macro expander.
+ if (NumCachedTokenLexers == TokenLexerCacheSize)
+ CurTokenLexer.reset();
+ else
+ TokenLexerCache[NumCachedTokenLexers++] = std::move(CurTokenLexer);
+ }
+
+ PopIncludeMacroStack();
+}
+
+/// HandleMicrosoftCommentPaste - When the macro expander pastes together a
+/// comment (/##/) in microsoft mode, this method handles updating the current
+/// state, returning the token on the next source line.
+void Preprocessor::HandleMicrosoftCommentPaste(Token &Tok) {
+ assert(CurTokenLexer && !CurPPLexer &&
+ "Pasted comment can only be formed from macro");
+ // We handle this by scanning for the closest real lexer, switching it to
+ // raw mode and preprocessor mode. This will cause it to return \n as an
+ // explicit EOD token.
+ PreprocessorLexer *FoundLexer = nullptr;
+ bool LexerWasInPPMode = false;
+ for (unsigned i = 0, e = IncludeMacroStack.size(); i != e; ++i) {
+ IncludeStackInfo &ISI = *(IncludeMacroStack.end()-i-1);
+ if (ISI.ThePPLexer == nullptr) continue; // Scan for a real lexer.
+
+ // Once we find a real lexer, mark it as raw mode (disabling macro
+ // expansions) and preprocessor mode (return EOD). We know that the lexer
+ // was *not* in raw mode before, because the macro that the comment came
+ // from was expanded. However, it could have already been in preprocessor
+ // mode (#if COMMENT) in which case we have to return it to that mode and
+ // return EOD.
+ FoundLexer = ISI.ThePPLexer;
+ FoundLexer->LexingRawMode = true;
+ LexerWasInPPMode = FoundLexer->ParsingPreprocessorDirective;
+ FoundLexer->ParsingPreprocessorDirective = true;
+ break;
+ }
+
+ // Okay, we either found and switched over the lexer, or we didn't find a
+ // lexer. In either case, finish off the macro the comment came from, getting
+ // the next token.
+ if (!HandleEndOfTokenLexer(Tok)) Lex(Tok);
+
+ // Discarding comments as long as we don't have EOF or EOD. This 'comments
+ // out' the rest of the line, including any tokens that came from other macros
+ // that were active, as in:
+ // #define submacro a COMMENT b
+ // submacro c
+ // which should lex to 'a' only: 'b' and 'c' should be removed.
+ while (Tok.isNot(tok::eod) && Tok.isNot(tok::eof))
+ Lex(Tok);
+
+ // If we got an eod token, then we successfully found the end of the line.
+ if (Tok.is(tok::eod)) {
+ assert(FoundLexer && "Can't get end of line without an active lexer");
+ // Restore the lexer back to normal mode instead of raw mode.
+ FoundLexer->LexingRawMode = false;
+
+ // If the lexer was already in preprocessor mode, just return the EOD token
+ // to finish the preprocessor line.
+ if (LexerWasInPPMode) return;
+
+ // Otherwise, switch out of PP mode and return the next lexed token.
+ FoundLexer->ParsingPreprocessorDirective = false;
+ return Lex(Tok);
+ }
+
+ // If we got an EOF token, then we reached the end of the token stream but
+ // didn't find an explicit \n. This can only happen if there was no lexer
+ // active (an active lexer would return EOD at EOF if there was no \n in
+ // preprocessor directive mode), so just return EOF as our token.
+ assert(!FoundLexer && "Lexer should return EOD before EOF in PP mode");
+}
+
+void Preprocessor::EnterSubmodule(Module *M, SourceLocation ImportLoc) {
+ if (!getLangOpts().ModulesLocalVisibility) {
+ // Just track that we entered this submodule.
+ BuildingSubmoduleStack.push_back(
+ BuildingSubmoduleInfo(M, ImportLoc, CurSubmoduleState));
+ return;
+ }
+
+ // Resolve as much of the module definition as we can now, before we enter
+ // one of its headers.
+ // FIXME: Can we enable Complain here?
+ // FIXME: Can we do this when local visibility is disabled?
+ ModuleMap &ModMap = getHeaderSearchInfo().getModuleMap();
+ ModMap.resolveExports(M, /*Complain=*/false);
+ ModMap.resolveUses(M, /*Complain=*/false);
+ ModMap.resolveConflicts(M, /*Complain=*/false);
+
+ // If this is the first time we've entered this module, set up its state.
+ auto R = Submodules.insert(std::make_pair(M, SubmoduleState()));
+ auto &State = R.first->second;
+ bool FirstTime = R.second;
+ if (FirstTime) {
+ // Determine the set of starting macros for this submodule; take these
+ // from the "null" module (the predefines buffer).
+ //
+ // FIXME: If we have local visibility but not modules enabled, the
+ // NullSubmoduleState is polluted by #defines in the top-level source
+ // file.
+ auto &StartingMacros = NullSubmoduleState.Macros;
+
+ // Restore to the starting state.
+ // FIXME: Do this lazily, when each macro name is first referenced.
+ for (auto &Macro : StartingMacros) {
+ // Skip uninteresting macros.
+ if (!Macro.second.getLatest() &&
+ Macro.second.getOverriddenMacros().empty())
+ continue;
+
+ MacroState MS(Macro.second.getLatest());
+ MS.setOverriddenMacros(*this, Macro.second.getOverriddenMacros());
+ State.Macros.insert(std::make_pair(Macro.first, std::move(MS)));
+ }
+ }
+
+ // Track that we entered this module.
+ BuildingSubmoduleStack.push_back(
+ BuildingSubmoduleInfo(M, ImportLoc, CurSubmoduleState));
+
+ // Switch to this submodule as the current submodule.
+ CurSubmoduleState = &State;
+
+ // This module is visible to itself.
+ if (FirstTime)
+ makeModuleVisible(M, ImportLoc);
+}
+
+void Preprocessor::LeaveSubmodule() {
+ auto &Info = BuildingSubmoduleStack.back();
+
+ Module *LeavingMod = Info.M;
+ SourceLocation ImportLoc = Info.ImportLoc;
+
+ // Create ModuleMacros for any macros defined in this submodule.
+ for (auto &Macro : CurSubmoduleState->Macros) {
+ auto *II = const_cast<IdentifierInfo*>(Macro.first);
+
+ // Find the starting point for the MacroDirective chain in this submodule.
+ MacroDirective *OldMD = nullptr;
+ if (getLangOpts().ModulesLocalVisibility) {
+ // FIXME: It'd be better to start at the state from when we most recently
+ // entered this submodule, but it doesn't really matter.
+ auto &PredefMacros = NullSubmoduleState.Macros;
+ auto PredefMacroIt = PredefMacros.find(Macro.first);
+ if (PredefMacroIt == PredefMacros.end())
+ OldMD = nullptr;
+ else
+ OldMD = PredefMacroIt->second.getLatest();
+ }
+
+ // This module may have exported a new macro. If so, create a ModuleMacro
+ // representing that fact.
+ bool ExplicitlyPublic = false;
+ for (auto *MD = Macro.second.getLatest(); MD != OldMD;
+ MD = MD->getPrevious()) {
+ assert(MD && "broken macro directive chain");
+
+ // Stop on macros defined in other submodules we #included along the way.
+ // There's no point doing this if we're tracking local submodule
+ // visibility, since there can be no such directives in our list.
+ if (!getLangOpts().ModulesLocalVisibility) {
+ Module *Mod = getModuleContainingLocation(MD->getLocation());
+ if (Mod != LeavingMod)
+ break;
+ }
+
+ if (auto *VisMD = dyn_cast<VisibilityMacroDirective>(MD)) {
+ // The latest visibility directive for a name in a submodule affects
+ // all the directives that come before it.
+ if (VisMD->isPublic())
+ ExplicitlyPublic = true;
+ else if (!ExplicitlyPublic)
+ // Private with no following public directive: not exported.
+ break;
+ } else {
+ MacroInfo *Def = nullptr;
+ if (DefMacroDirective *DefMD = dyn_cast<DefMacroDirective>(MD))
+ Def = DefMD->getInfo();
+
+ // FIXME: Issue a warning if multiple headers for the same submodule
+ // define a macro, rather than silently ignoring all but the first.
+ bool IsNew;
+ // Don't bother creating a module macro if it would represent a #undef
+ // that doesn't override anything.
+ if (Def || !Macro.second.getOverriddenMacros().empty())
+ addModuleMacro(LeavingMod, II, Def,
+ Macro.second.getOverriddenMacros(), IsNew);
+ break;
+ }
+ }
+ }
+
+ // FIXME: Before we leave this submodule, we should parse all the other
+ // headers within it. Otherwise, we're left with an inconsistent state
+ // where we've made the module visible but don't yet have its complete
+ // contents.
+
+ // Put back the outer module's state, if we're tracking it.
+ if (getLangOpts().ModulesLocalVisibility)
+ CurSubmoduleState = Info.OuterSubmoduleState;
+
+ BuildingSubmoduleStack.pop_back();
+
+ // A nested #include makes the included submodule visible.
+ makeModuleVisible(LeavingMod, ImportLoc);
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp b/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp
new file mode 100644
index 0000000..18348df
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp
@@ -0,0 +1,1796 @@
+//===--- MacroExpansion.cpp - Top level Macro Expansion -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the top level handling of macro expansion for the
+// preprocessor.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/Attributes.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/CodeCompletionHandler.h"
+#include "clang/Lex/ExternalPreprocessorSource.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/MacroArgs.h"
+#include "clang/Lex/MacroInfo.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstdio>
+#include <ctime>
+using namespace clang;
+
+MacroDirective *
+Preprocessor::getLocalMacroDirectiveHistory(const IdentifierInfo *II) const {
+ if (!II->hadMacroDefinition())
+ return nullptr;
+ auto Pos = CurSubmoduleState->Macros.find(II);
+ return Pos == CurSubmoduleState->Macros.end() ? nullptr
+ : Pos->second.getLatest();
+}
+
+void Preprocessor::appendMacroDirective(IdentifierInfo *II, MacroDirective *MD){
+ assert(MD && "MacroDirective should be non-zero!");
+ assert(!MD->getPrevious() && "Already attached to a MacroDirective history.");
+
+ MacroState &StoredMD = CurSubmoduleState->Macros[II];
+ auto *OldMD = StoredMD.getLatest();
+ MD->setPrevious(OldMD);
+ StoredMD.setLatest(MD);
+ StoredMD.overrideActiveModuleMacros(*this, II);
+
+ // Set up the identifier as having associated macro history.
+ II->setHasMacroDefinition(true);
+ if (!MD->isDefined() && LeafModuleMacros.find(II) == LeafModuleMacros.end())
+ II->setHasMacroDefinition(false);
+ if (II->isFromAST())
+ II->setChangedSinceDeserialization();
+}
+
+void Preprocessor::setLoadedMacroDirective(IdentifierInfo *II,
+ MacroDirective *MD) {
+ assert(II && MD);
+ MacroState &StoredMD = CurSubmoduleState->Macros[II];
+ assert(!StoredMD.getLatest() &&
+ "the macro history was modified before initializing it from a pch");
+ StoredMD = MD;
+ // Setup the identifier as having associated macro history.
+ II->setHasMacroDefinition(true);
+ if (!MD->isDefined() && LeafModuleMacros.find(II) == LeafModuleMacros.end())
+ II->setHasMacroDefinition(false);
+}
+
+ModuleMacro *Preprocessor::addModuleMacro(Module *Mod, IdentifierInfo *II,
+ MacroInfo *Macro,
+ ArrayRef<ModuleMacro *> Overrides,
+ bool &New) {
+ llvm::FoldingSetNodeID ID;
+ ModuleMacro::Profile(ID, Mod, II);
+
+ void *InsertPos;
+ if (auto *MM = ModuleMacros.FindNodeOrInsertPos(ID, InsertPos)) {
+ New = false;
+ return MM;
+ }
+
+ auto *MM = ModuleMacro::create(*this, Mod, II, Macro, Overrides);
+ ModuleMacros.InsertNode(MM, InsertPos);
+
+ // Each overridden macro is now overridden by one more macro.
+ bool HidAny = false;
+ for (auto *O : Overrides) {
+ HidAny |= (O->NumOverriddenBy == 0);
+ ++O->NumOverriddenBy;
+ }
+
+ // If we were the first overrider for any macro, it's no longer a leaf.
+ auto &LeafMacros = LeafModuleMacros[II];
+ if (HidAny) {
+ LeafMacros.erase(std::remove_if(LeafMacros.begin(), LeafMacros.end(),
+ [](ModuleMacro *MM) {
+ return MM->NumOverriddenBy != 0;
+ }),
+ LeafMacros.end());
+ }
+
+ // The new macro is always a leaf macro.
+ LeafMacros.push_back(MM);
+ // The identifier now has defined macros (that may or may not be visible).
+ II->setHasMacroDefinition(true);
+
+ New = true;
+ return MM;
+}
+
+ModuleMacro *Preprocessor::getModuleMacro(Module *Mod, IdentifierInfo *II) {
+ llvm::FoldingSetNodeID ID;
+ ModuleMacro::Profile(ID, Mod, II);
+
+ void *InsertPos;
+ return ModuleMacros.FindNodeOrInsertPos(ID, InsertPos);
+}
+
+void Preprocessor::updateModuleMacroInfo(const IdentifierInfo *II,
+ ModuleMacroInfo &Info) {
+ assert(Info.ActiveModuleMacrosGeneration !=
+ CurSubmoduleState->VisibleModules.getGeneration() &&
+ "don't need to update this macro name info");
+ Info.ActiveModuleMacrosGeneration =
+ CurSubmoduleState->VisibleModules.getGeneration();
+
+ auto Leaf = LeafModuleMacros.find(II);
+ if (Leaf == LeafModuleMacros.end()) {
+ // No imported macros at all: nothing to do.
+ return;
+ }
+
+ Info.ActiveModuleMacros.clear();
+
+ // Every macro that's locally overridden is overridden by a visible macro.
+ llvm::DenseMap<ModuleMacro *, int> NumHiddenOverrides;
+ for (auto *O : Info.OverriddenMacros)
+ NumHiddenOverrides[O] = -1;
+
+ // Collect all macros that are not overridden by a visible macro.
+ llvm::SmallVector<ModuleMacro *, 16> Worklist;
+ for (auto *LeafMM : Leaf->second) {
+ assert(LeafMM->getNumOverridingMacros() == 0 && "leaf macro overridden");
+ if (NumHiddenOverrides.lookup(LeafMM) == 0)
+ Worklist.push_back(LeafMM);
+ }
+ while (!Worklist.empty()) {
+ auto *MM = Worklist.pop_back_val();
+ if (CurSubmoduleState->VisibleModules.isVisible(MM->getOwningModule())) {
+ // We only care about collecting definitions; undefinitions only act
+ // to override other definitions.
+ if (MM->getMacroInfo())
+ Info.ActiveModuleMacros.push_back(MM);
+ } else {
+ for (auto *O : MM->overrides())
+ if ((unsigned)++NumHiddenOverrides[O] == O->getNumOverridingMacros())
+ Worklist.push_back(O);
+ }
+ }
+ // Our reverse postorder walk found the macros in reverse order.
+ std::reverse(Info.ActiveModuleMacros.begin(), Info.ActiveModuleMacros.end());
+
+ // Determine whether the macro name is ambiguous.
+ MacroInfo *MI = nullptr;
+ bool IsSystemMacro = true;
+ bool IsAmbiguous = false;
+ if (auto *MD = Info.MD) {
+ while (MD && isa<VisibilityMacroDirective>(MD))
+ MD = MD->getPrevious();
+ if (auto *DMD = dyn_cast_or_null<DefMacroDirective>(MD)) {
+ MI = DMD->getInfo();
+ IsSystemMacro &= SourceMgr.isInSystemHeader(DMD->getLocation());
+ }
+ }
+ for (auto *Active : Info.ActiveModuleMacros) {
+ auto *NewMI = Active->getMacroInfo();
+
+ // Before marking the macro as ambiguous, check if this is a case where
+ // both macros are in system headers. If so, we trust that the system
+ // did not get it wrong. This also handles cases where Clang's own
+ // headers have a different spelling of certain system macros:
+ // #define LONG_MAX __LONG_MAX__ (clang's limits.h)
+ // #define LONG_MAX 0x7fffffffffffffffL (system's limits.h)
+ //
+ // FIXME: Remove the defined-in-system-headers check. clang's limits.h
+ // overrides the system limits.h's macros, so there's no conflict here.
+ if (MI && NewMI != MI &&
+ !MI->isIdenticalTo(*NewMI, *this, /*Syntactically=*/true))
+ IsAmbiguous = true;
+ IsSystemMacro &= Active->getOwningModule()->IsSystem ||
+ SourceMgr.isInSystemHeader(NewMI->getDefinitionLoc());
+ MI = NewMI;
+ }
+ Info.IsAmbiguous = IsAmbiguous && !IsSystemMacro;
+}
+
+void Preprocessor::dumpMacroInfo(const IdentifierInfo *II) {
+ ArrayRef<ModuleMacro*> Leaf;
+ auto LeafIt = LeafModuleMacros.find(II);
+ if (LeafIt != LeafModuleMacros.end())
+ Leaf = LeafIt->second;
+ const MacroState *State = nullptr;
+ auto Pos = CurSubmoduleState->Macros.find(II);
+ if (Pos != CurSubmoduleState->Macros.end())
+ State = &Pos->second;
+
+ llvm::errs() << "MacroState " << State << " " << II->getNameStart();
+ if (State && State->isAmbiguous(*this, II))
+ llvm::errs() << " ambiguous";
+ if (State && !State->getOverriddenMacros().empty()) {
+ llvm::errs() << " overrides";
+ for (auto *O : State->getOverriddenMacros())
+ llvm::errs() << " " << O->getOwningModule()->getFullModuleName();
+ }
+ llvm::errs() << "\n";
+
+ // Dump local macro directives.
+ for (auto *MD = State ? State->getLatest() : nullptr; MD;
+ MD = MD->getPrevious()) {
+ llvm::errs() << " ";
+ MD->dump();
+ }
+
+ // Dump module macros.
+ llvm::DenseSet<ModuleMacro*> Active;
+ for (auto *MM : State ? State->getActiveModuleMacros(*this, II) : None)
+ Active.insert(MM);
+ llvm::DenseSet<ModuleMacro*> Visited;
+ llvm::SmallVector<ModuleMacro *, 16> Worklist(Leaf.begin(), Leaf.end());
+ while (!Worklist.empty()) {
+ auto *MM = Worklist.pop_back_val();
+ llvm::errs() << " ModuleMacro " << MM << " "
+ << MM->getOwningModule()->getFullModuleName();
+ if (!MM->getMacroInfo())
+ llvm::errs() << " undef";
+
+ if (Active.count(MM))
+ llvm::errs() << " active";
+ else if (!CurSubmoduleState->VisibleModules.isVisible(
+ MM->getOwningModule()))
+ llvm::errs() << " hidden";
+ else if (MM->getMacroInfo())
+ llvm::errs() << " overridden";
+
+ if (!MM->overrides().empty()) {
+ llvm::errs() << " overrides";
+ for (auto *O : MM->overrides()) {
+ llvm::errs() << " " << O->getOwningModule()->getFullModuleName();
+ if (Visited.insert(O).second)
+ Worklist.push_back(O);
+ }
+ }
+ llvm::errs() << "\n";
+ if (auto *MI = MM->getMacroInfo()) {
+ llvm::errs() << " ";
+ MI->dump();
+ llvm::errs() << "\n";
+ }
+ }
+}
+
+/// RegisterBuiltinMacro - Register the specified identifier in the identifier
+/// table and mark it as a builtin macro to be expanded.
+static IdentifierInfo *RegisterBuiltinMacro(Preprocessor &PP, const char *Name){
+ // Get the identifier.
+ IdentifierInfo *Id = PP.getIdentifierInfo(Name);
+
+ // Mark it as being a macro that is builtin.
+ MacroInfo *MI = PP.AllocateMacroInfo(SourceLocation());
+ MI->setIsBuiltinMacro();
+ PP.appendDefMacroDirective(Id, MI);
+ return Id;
+}
+
+
+/// RegisterBuiltinMacros - Register builtin macros, such as __LINE__ with the
+/// identifier table.
+void Preprocessor::RegisterBuiltinMacros() {
+ Ident__LINE__ = RegisterBuiltinMacro(*this, "__LINE__");
+ Ident__FILE__ = RegisterBuiltinMacro(*this, "__FILE__");
+ Ident__DATE__ = RegisterBuiltinMacro(*this, "__DATE__");
+ Ident__TIME__ = RegisterBuiltinMacro(*this, "__TIME__");
+ Ident__COUNTER__ = RegisterBuiltinMacro(*this, "__COUNTER__");
+ Ident_Pragma = RegisterBuiltinMacro(*this, "_Pragma");
+
+ // C++ Standing Document Extensions.
+ if (LangOpts.CPlusPlus)
+ Ident__has_cpp_attribute =
+ RegisterBuiltinMacro(*this, "__has_cpp_attribute");
+ else
+ Ident__has_cpp_attribute = nullptr;
+
+ // GCC Extensions.
+ Ident__BASE_FILE__ = RegisterBuiltinMacro(*this, "__BASE_FILE__");
+ Ident__INCLUDE_LEVEL__ = RegisterBuiltinMacro(*this, "__INCLUDE_LEVEL__");
+ Ident__TIMESTAMP__ = RegisterBuiltinMacro(*this, "__TIMESTAMP__");
+
+ // Microsoft Extensions.
+ if (LangOpts.MicrosoftExt) {
+ Ident__identifier = RegisterBuiltinMacro(*this, "__identifier");
+ Ident__pragma = RegisterBuiltinMacro(*this, "__pragma");
+ } else {
+ Ident__identifier = nullptr;
+ Ident__pragma = nullptr;
+ }
+
+ // Clang Extensions.
+ Ident__has_feature = RegisterBuiltinMacro(*this, "__has_feature");
+ Ident__has_extension = RegisterBuiltinMacro(*this, "__has_extension");
+ Ident__has_builtin = RegisterBuiltinMacro(*this, "__has_builtin");
+ Ident__has_attribute = RegisterBuiltinMacro(*this, "__has_attribute");
+ Ident__has_declspec = RegisterBuiltinMacro(*this, "__has_declspec_attribute");
+ Ident__has_include = RegisterBuiltinMacro(*this, "__has_include");
+ Ident__has_include_next = RegisterBuiltinMacro(*this, "__has_include_next");
+ Ident__has_warning = RegisterBuiltinMacro(*this, "__has_warning");
+ Ident__is_identifier = RegisterBuiltinMacro(*this, "__is_identifier");
+
+ // Modules.
+ if (LangOpts.Modules) {
+ Ident__building_module = RegisterBuiltinMacro(*this, "__building_module");
+
+ // __MODULE__
+ if (!LangOpts.CurrentModule.empty())
+ Ident__MODULE__ = RegisterBuiltinMacro(*this, "__MODULE__");
+ else
+ Ident__MODULE__ = nullptr;
+ } else {
+ Ident__building_module = nullptr;
+ Ident__MODULE__ = nullptr;
+ }
+}
+
+/// isTrivialSingleTokenExpansion - Return true if MI, which has a single token
+/// in its expansion, currently expands to that token literally.
+static bool isTrivialSingleTokenExpansion(const MacroInfo *MI,
+ const IdentifierInfo *MacroIdent,
+ Preprocessor &PP) {
+ IdentifierInfo *II = MI->getReplacementToken(0).getIdentifierInfo();
+
+ // If the token isn't an identifier, it's always literally expanded.
+ if (!II) return true;
+
+ // If the information about this identifier is out of date, update it from
+ // the external source.
+ if (II->isOutOfDate())
+ PP.getExternalSource()->updateOutOfDateIdentifier(*II);
+
+ // If the identifier is a macro, and if that macro is enabled, it may be
+ // expanded so it's not a trivial expansion.
+ if (auto *ExpansionMI = PP.getMacroInfo(II))
+ if (ExpansionMI->isEnabled() &&
+ // Fast expanding "#define X X" is ok, because X would be disabled.
+ II != MacroIdent)
+ return false;
+
+ // If this is an object-like macro invocation, it is safe to trivially expand
+ // it.
+ if (MI->isObjectLike()) return true;
+
+ // If this is a function-like macro invocation, it's safe to trivially expand
+ // as long as the identifier is not a macro argument.
+ return std::find(MI->arg_begin(), MI->arg_end(), II) == MI->arg_end();
+
+}
+
+
+/// isNextPPTokenLParen - Determine whether the next preprocessor token to be
+/// lexed is a '('. If so, consume the token and return true, if not, this
+/// method should have no observable side-effect on the lexed tokens.
+bool Preprocessor::isNextPPTokenLParen() {
+ // Do some quick tests for rejection cases.
+ unsigned Val;
+ if (CurLexer)
+ Val = CurLexer->isNextPPTokenLParen();
+ else if (CurPTHLexer)
+ Val = CurPTHLexer->isNextPPTokenLParen();
+ else
+ Val = CurTokenLexer->isNextTokenLParen();
+
+ if (Val == 2) {
+ // We have run off the end. If it's a source file we don't
+ // examine enclosing ones (C99 5.1.1.2p4). Otherwise walk up the
+ // macro stack.
+ if (CurPPLexer)
+ return false;
+ for (unsigned i = IncludeMacroStack.size(); i != 0; --i) {
+ IncludeStackInfo &Entry = IncludeMacroStack[i-1];
+ if (Entry.TheLexer)
+ Val = Entry.TheLexer->isNextPPTokenLParen();
+ else if (Entry.ThePTHLexer)
+ Val = Entry.ThePTHLexer->isNextPPTokenLParen();
+ else
+ Val = Entry.TheTokenLexer->isNextTokenLParen();
+
+ if (Val != 2)
+ break;
+
+ // Ran off the end of a source file?
+ if (Entry.ThePPLexer)
+ return false;
+ }
+ }
+
+ // Okay, if we know that the token is a '(', lex it and return. Otherwise we
+ // have found something that isn't a '(' or we found the end of the
+ // translation unit. In either case, return false.
+ return Val == 1;
+}
+
+/// HandleMacroExpandedIdentifier - If an identifier token is read that is to be
+/// expanded as a macro, handle it and return the next token as 'Identifier'.
+bool Preprocessor::HandleMacroExpandedIdentifier(Token &Identifier,
+ const MacroDefinition &M) {
+ MacroInfo *MI = M.getMacroInfo();
+
+ // If this is a macro expansion in the "#if !defined(x)" line for the file,
+ // then the macro could expand to different things in other contexts, we need
+ // to disable the optimization in this case.
+ if (CurPPLexer) CurPPLexer->MIOpt.ExpandedMacro();
+
+ // If this is a builtin macro, like __LINE__ or _Pragma, handle it specially.
+ if (MI->isBuiltinMacro()) {
+ if (Callbacks)
+ Callbacks->MacroExpands(Identifier, M, Identifier.getLocation(),
+ /*Args=*/nullptr);
+ ExpandBuiltinMacro(Identifier);
+ return true;
+ }
+
+ /// Args - If this is a function-like macro expansion, this contains,
+ /// for each macro argument, the list of tokens that were provided to the
+ /// invocation.
+ MacroArgs *Args = nullptr;
+
+ // Remember where the end of the expansion occurred. For an object-like
+ // macro, this is the identifier. For a function-like macro, this is the ')'.
+ SourceLocation ExpansionEnd = Identifier.getLocation();
+
+ // If this is a function-like macro, read the arguments.
+ if (MI->isFunctionLike()) {
+ // Remember that we are now parsing the arguments to a macro invocation.
+ // Preprocessor directives used inside macro arguments are not portable, and
+ // this enables the warning.
+ InMacroArgs = true;
+ Args = ReadFunctionLikeMacroArgs(Identifier, MI, ExpansionEnd);
+
+ // Finished parsing args.
+ InMacroArgs = false;
+
+ // If there was an error parsing the arguments, bail out.
+ if (!Args) return true;
+
+ ++NumFnMacroExpanded;
+ } else {
+ ++NumMacroExpanded;
+ }
+
+ // Notice that this macro has been used.
+ markMacroAsUsed(MI);
+
+ // Remember where the token is expanded.
+ SourceLocation ExpandLoc = Identifier.getLocation();
+ SourceRange ExpansionRange(ExpandLoc, ExpansionEnd);
+
+ if (Callbacks) {
+ if (InMacroArgs) {
+ // We can have macro expansion inside a conditional directive while
+ // reading the function macro arguments. To ensure, in that case, that
+ // MacroExpands callbacks still happen in source order, queue this
+ // callback to have it happen after the function macro callback.
+ DelayedMacroExpandsCallbacks.push_back(
+ MacroExpandsInfo(Identifier, M, ExpansionRange));
+ } else {
+ Callbacks->MacroExpands(Identifier, M, ExpansionRange, Args);
+ if (!DelayedMacroExpandsCallbacks.empty()) {
+ for (unsigned i=0, e = DelayedMacroExpandsCallbacks.size(); i!=e; ++i) {
+ MacroExpandsInfo &Info = DelayedMacroExpandsCallbacks[i];
+ // FIXME: We lose macro args info with delayed callback.
+ Callbacks->MacroExpands(Info.Tok, Info.MD, Info.Range,
+ /*Args=*/nullptr);
+ }
+ DelayedMacroExpandsCallbacks.clear();
+ }
+ }
+ }
+
+ // If the macro definition is ambiguous, complain.
+ if (M.isAmbiguous()) {
+ Diag(Identifier, diag::warn_pp_ambiguous_macro)
+ << Identifier.getIdentifierInfo();
+ Diag(MI->getDefinitionLoc(), diag::note_pp_ambiguous_macro_chosen)
+ << Identifier.getIdentifierInfo();
+ M.forAllDefinitions([&](const MacroInfo *OtherMI) {
+ if (OtherMI != MI)
+ Diag(OtherMI->getDefinitionLoc(), diag::note_pp_ambiguous_macro_other)
+ << Identifier.getIdentifierInfo();
+ });
+ }
+
+ // If we started lexing a macro, enter the macro expansion body.
+
+ // If this macro expands to no tokens, don't bother to push it onto the
+ // expansion stack, only to take it right back off.
+ if (MI->getNumTokens() == 0) {
+ // No need for arg info.
+ if (Args) Args->destroy(*this);
+
+ // Propagate whitespace info as if we had pushed, then popped,
+ // a macro context.
+ Identifier.setFlag(Token::LeadingEmptyMacro);
+ PropagateLineStartLeadingSpaceInfo(Identifier);
+ ++NumFastMacroExpanded;
+ return false;
+ } else if (MI->getNumTokens() == 1 &&
+ isTrivialSingleTokenExpansion(MI, Identifier.getIdentifierInfo(),
+ *this)) {
+ // Otherwise, if this macro expands into a single trivially-expanded
+ // token: expand it now. This handles common cases like
+ // "#define VAL 42".
+
+ // No need for arg info.
+ if (Args) Args->destroy(*this);
+
+ // Propagate the isAtStartOfLine/hasLeadingSpace markers of the macro
+ // identifier to the expanded token.
+ bool isAtStartOfLine = Identifier.isAtStartOfLine();
+ bool hasLeadingSpace = Identifier.hasLeadingSpace();
+
+ // Replace the result token.
+ Identifier = MI->getReplacementToken(0);
+
+ // Restore the StartOfLine/LeadingSpace markers.
+ Identifier.setFlagValue(Token::StartOfLine , isAtStartOfLine);
+ Identifier.setFlagValue(Token::LeadingSpace, hasLeadingSpace);
+
+ // Update the tokens location to include both its expansion and physical
+ // locations.
+ SourceLocation Loc =
+ SourceMgr.createExpansionLoc(Identifier.getLocation(), ExpandLoc,
+ ExpansionEnd,Identifier.getLength());
+ Identifier.setLocation(Loc);
+
+ // If this is a disabled macro or #define X X, we must mark the result as
+ // unexpandable.
+ if (IdentifierInfo *NewII = Identifier.getIdentifierInfo()) {
+ if (MacroInfo *NewMI = getMacroInfo(NewII))
+ if (!NewMI->isEnabled() || NewMI == MI) {
+ Identifier.setFlag(Token::DisableExpand);
+ // Don't warn for "#define X X" like "#define bool bool" from
+ // stdbool.h.
+ if (NewMI != MI || MI->isFunctionLike())
+ Diag(Identifier, diag::pp_disabled_macro_expansion);
+ }
+ }
+
+ // Since this is not an identifier token, it can't be macro expanded, so
+ // we're done.
+ ++NumFastMacroExpanded;
+ return true;
+ }
+
+ // Start expanding the macro.
+ EnterMacro(Identifier, ExpansionEnd, MI, Args);
+ return false;
+}
+
+enum Bracket {
+ Brace,
+ Paren
+};
+
+/// CheckMatchedBrackets - Returns true if the braces and parentheses in the
+/// token vector are properly nested.
+static bool CheckMatchedBrackets(const SmallVectorImpl<Token> &Tokens) {
+ SmallVector<Bracket, 8> Brackets;
+ for (SmallVectorImpl<Token>::const_iterator I = Tokens.begin(),
+ E = Tokens.end();
+ I != E; ++I) {
+ if (I->is(tok::l_paren)) {
+ Brackets.push_back(Paren);
+ } else if (I->is(tok::r_paren)) {
+ if (Brackets.empty() || Brackets.back() == Brace)
+ return false;
+ Brackets.pop_back();
+ } else if (I->is(tok::l_brace)) {
+ Brackets.push_back(Brace);
+ } else if (I->is(tok::r_brace)) {
+ if (Brackets.empty() || Brackets.back() == Paren)
+ return false;
+ Brackets.pop_back();
+ }
+ }
+ return Brackets.empty();
+}
+
+/// GenerateNewArgTokens - Returns true if OldTokens can be converted to a new
+/// vector of tokens in NewTokens. The new number of arguments will be placed
+/// in NumArgs and the ranges which need to surrounded in parentheses will be
+/// in ParenHints.
+/// Returns false if the token stream cannot be changed. If this is because
+/// of an initializer list starting a macro argument, the range of those
+/// initializer lists will be place in InitLists.
+static bool GenerateNewArgTokens(Preprocessor &PP,
+ SmallVectorImpl<Token> &OldTokens,
+ SmallVectorImpl<Token> &NewTokens,
+ unsigned &NumArgs,
+ SmallVectorImpl<SourceRange> &ParenHints,
+ SmallVectorImpl<SourceRange> &InitLists) {
+ if (!CheckMatchedBrackets(OldTokens))
+ return false;
+
+ // Once it is known that the brackets are matched, only a simple count of the
+ // braces is needed.
+ unsigned Braces = 0;
+
+ // First token of a new macro argument.
+ SmallVectorImpl<Token>::iterator ArgStartIterator = OldTokens.begin();
+
+ // First closing brace in a new macro argument. Used to generate
+ // SourceRanges for InitLists.
+ SmallVectorImpl<Token>::iterator ClosingBrace = OldTokens.end();
+ NumArgs = 0;
+ Token TempToken;
+ // Set to true when a macro separator token is found inside a braced list.
+ // If true, the fixed argument spans multiple old arguments and ParenHints
+ // will be updated.
+ bool FoundSeparatorToken = false;
+ for (SmallVectorImpl<Token>::iterator I = OldTokens.begin(),
+ E = OldTokens.end();
+ I != E; ++I) {
+ if (I->is(tok::l_brace)) {
+ ++Braces;
+ } else if (I->is(tok::r_brace)) {
+ --Braces;
+ if (Braces == 0 && ClosingBrace == E && FoundSeparatorToken)
+ ClosingBrace = I;
+ } else if (I->is(tok::eof)) {
+ // EOF token is used to separate macro arguments
+ if (Braces != 0) {
+ // Assume comma separator is actually braced list separator and change
+ // it back to a comma.
+ FoundSeparatorToken = true;
+ I->setKind(tok::comma);
+ I->setLength(1);
+ } else { // Braces == 0
+ // Separator token still separates arguments.
+ ++NumArgs;
+
+ // If the argument starts with a brace, it can't be fixed with
+ // parentheses. A different diagnostic will be given.
+ if (FoundSeparatorToken && ArgStartIterator->is(tok::l_brace)) {
+ InitLists.push_back(
+ SourceRange(ArgStartIterator->getLocation(),
+ PP.getLocForEndOfToken(ClosingBrace->getLocation())));
+ ClosingBrace = E;
+ }
+
+ // Add left paren
+ if (FoundSeparatorToken) {
+ TempToken.startToken();
+ TempToken.setKind(tok::l_paren);
+ TempToken.setLocation(ArgStartIterator->getLocation());
+ TempToken.setLength(0);
+ NewTokens.push_back(TempToken);
+ }
+
+ // Copy over argument tokens
+ NewTokens.insert(NewTokens.end(), ArgStartIterator, I);
+
+ // Add right paren and store the paren locations in ParenHints
+ if (FoundSeparatorToken) {
+ SourceLocation Loc = PP.getLocForEndOfToken((I - 1)->getLocation());
+ TempToken.startToken();
+ TempToken.setKind(tok::r_paren);
+ TempToken.setLocation(Loc);
+ TempToken.setLength(0);
+ NewTokens.push_back(TempToken);
+ ParenHints.push_back(SourceRange(ArgStartIterator->getLocation(),
+ Loc));
+ }
+
+ // Copy separator token
+ NewTokens.push_back(*I);
+
+ // Reset values
+ ArgStartIterator = I + 1;
+ FoundSeparatorToken = false;
+ }
+ }
+ }
+
+ return !ParenHints.empty() && InitLists.empty();
+}
+
+/// ReadFunctionLikeMacroArgs - After reading "MACRO" and knowing that the next
+/// token is the '(' of the macro, this method is invoked to read all of the
+/// actual arguments specified for the macro invocation. This returns null on
+/// error.
+MacroArgs *Preprocessor::ReadFunctionLikeMacroArgs(Token &MacroName,
+ MacroInfo *MI,
+ SourceLocation &MacroEnd) {
+ // The number of fixed arguments to parse.
+ unsigned NumFixedArgsLeft = MI->getNumArgs();
+ bool isVariadic = MI->isVariadic();
+
+ // Outer loop, while there are more arguments, keep reading them.
+ Token Tok;
+
+ // Read arguments as unexpanded tokens. This avoids issues, e.g., where
+ // an argument value in a macro could expand to ',' or '(' or ')'.
+ LexUnexpandedToken(Tok);
+ assert(Tok.is(tok::l_paren) && "Error computing l-paren-ness?");
+
+ // ArgTokens - Build up a list of tokens that make up each argument. Each
+ // argument is separated by an EOF token. Use a SmallVector so we can avoid
+ // heap allocations in the common case.
+ SmallVector<Token, 64> ArgTokens;
+ bool ContainsCodeCompletionTok = false;
+
+ SourceLocation TooManyArgsLoc;
+
+ unsigned NumActuals = 0;
+ while (Tok.isNot(tok::r_paren)) {
+ if (ContainsCodeCompletionTok && Tok.isOneOf(tok::eof, tok::eod))
+ break;
+
+ assert(Tok.isOneOf(tok::l_paren, tok::comma) &&
+ "only expect argument separators here");
+
+ unsigned ArgTokenStart = ArgTokens.size();
+ SourceLocation ArgStartLoc = Tok.getLocation();
+
+ // C99 6.10.3p11: Keep track of the number of l_parens we have seen. Note
+ // that we already consumed the first one.
+ unsigned NumParens = 0;
+
+ while (1) {
+ // Read arguments as unexpanded tokens. This avoids issues, e.g., where
+ // an argument value in a macro could expand to ',' or '(' or ')'.
+ LexUnexpandedToken(Tok);
+
+ if (Tok.isOneOf(tok::eof, tok::eod)) { // "#if f(<eof>" & "#if f(\n"
+ if (!ContainsCodeCompletionTok) {
+ Diag(MacroName, diag::err_unterm_macro_invoc);
+ Diag(MI->getDefinitionLoc(), diag::note_macro_here)
+ << MacroName.getIdentifierInfo();
+ // Do not lose the EOF/EOD. Return it to the client.
+ MacroName = Tok;
+ return nullptr;
+ } else {
+ // Do not lose the EOF/EOD.
+ Token *Toks = new Token[1];
+ Toks[0] = Tok;
+ EnterTokenStream(Toks, 1, true, true);
+ break;
+ }
+ } else if (Tok.is(tok::r_paren)) {
+ // If we found the ) token, the macro arg list is done.
+ if (NumParens-- == 0) {
+ MacroEnd = Tok.getLocation();
+ break;
+ }
+ } else if (Tok.is(tok::l_paren)) {
+ ++NumParens;
+ } else if (Tok.is(tok::comma) && NumParens == 0 &&
+ !(Tok.getFlags() & Token::IgnoredComma)) {
+ // In Microsoft-compatibility mode, single commas from nested macro
+ // expansions should not be considered as argument separators. We test
+ // for this with the IgnoredComma token flag above.
+
+ // Comma ends this argument if there are more fixed arguments expected.
+ // However, if this is a variadic macro, and this is part of the
+ // variadic part, then the comma is just an argument token.
+ if (!isVariadic) break;
+ if (NumFixedArgsLeft > 1)
+ break;
+ } else if (Tok.is(tok::comment) && !KeepMacroComments) {
+ // If this is a comment token in the argument list and we're just in
+ // -C mode (not -CC mode), discard the comment.
+ continue;
+ } else if (!Tok.isAnnotation() && Tok.getIdentifierInfo() != nullptr) {
+ // Reading macro arguments can cause macros that we are currently
+ // expanding from to be popped off the expansion stack. Doing so causes
+ // them to be reenabled for expansion. Here we record whether any
+ // identifiers we lex as macro arguments correspond to disabled macros.
+ // If so, we mark the token as noexpand. This is a subtle aspect of
+ // C99 6.10.3.4p2.
+ if (MacroInfo *MI = getMacroInfo(Tok.getIdentifierInfo()))
+ if (!MI->isEnabled())
+ Tok.setFlag(Token::DisableExpand);
+ } else if (Tok.is(tok::code_completion)) {
+ ContainsCodeCompletionTok = true;
+ if (CodeComplete)
+ CodeComplete->CodeCompleteMacroArgument(MacroName.getIdentifierInfo(),
+ MI, NumActuals);
+ // Don't mark that we reached the code-completion point because the
+ // parser is going to handle the token and there will be another
+ // code-completion callback.
+ }
+
+ ArgTokens.push_back(Tok);
+ }
+
+ // If this was an empty argument list foo(), don't add this as an empty
+ // argument.
+ if (ArgTokens.empty() && Tok.getKind() == tok::r_paren)
+ break;
+
+ // If this is not a variadic macro, and too many args were specified, emit
+ // an error.
+ if (!isVariadic && NumFixedArgsLeft == 0 && TooManyArgsLoc.isInvalid()) {
+ if (ArgTokens.size() != ArgTokenStart)
+ TooManyArgsLoc = ArgTokens[ArgTokenStart].getLocation();
+ else
+ TooManyArgsLoc = ArgStartLoc;
+ }
+
+ // Empty arguments are standard in C99 and C++0x, and are supported as an
+ // extension in other modes.
+ if (ArgTokens.size() == ArgTokenStart && !LangOpts.C99)
+ Diag(Tok, LangOpts.CPlusPlus11 ?
+ diag::warn_cxx98_compat_empty_fnmacro_arg :
+ diag::ext_empty_fnmacro_arg);
+
+ // Add a marker EOF token to the end of the token list for this argument.
+ Token EOFTok;
+ EOFTok.startToken();
+ EOFTok.setKind(tok::eof);
+ EOFTok.setLocation(Tok.getLocation());
+ EOFTok.setLength(0);
+ ArgTokens.push_back(EOFTok);
+ ++NumActuals;
+ if (!ContainsCodeCompletionTok && NumFixedArgsLeft != 0)
+ --NumFixedArgsLeft;
+ }
+
+ // Okay, we either found the r_paren. Check to see if we parsed too few
+ // arguments.
+ unsigned MinArgsExpected = MI->getNumArgs();
+
+ // If this is not a variadic macro, and too many args were specified, emit
+ // an error.
+ if (!isVariadic && NumActuals > MinArgsExpected &&
+ !ContainsCodeCompletionTok) {
+ // Emit the diagnostic at the macro name in case there is a missing ).
+ // Emitting it at the , could be far away from the macro name.
+ Diag(TooManyArgsLoc, diag::err_too_many_args_in_macro_invoc);
+ Diag(MI->getDefinitionLoc(), diag::note_macro_here)
+ << MacroName.getIdentifierInfo();
+
+ // Commas from braced initializer lists will be treated as argument
+ // separators inside macros. Attempt to correct for this with parentheses.
+ // TODO: See if this can be generalized to angle brackets for templates
+ // inside macro arguments.
+
+ SmallVector<Token, 4> FixedArgTokens;
+ unsigned FixedNumArgs = 0;
+ SmallVector<SourceRange, 4> ParenHints, InitLists;
+ if (!GenerateNewArgTokens(*this, ArgTokens, FixedArgTokens, FixedNumArgs,
+ ParenHints, InitLists)) {
+ if (!InitLists.empty()) {
+ DiagnosticBuilder DB =
+ Diag(MacroName,
+ diag::note_init_list_at_beginning_of_macro_argument);
+ for (SourceRange Range : InitLists)
+ DB << Range;
+ }
+ return nullptr;
+ }
+ if (FixedNumArgs != MinArgsExpected)
+ return nullptr;
+
+ DiagnosticBuilder DB = Diag(MacroName, diag::note_suggest_parens_for_macro);
+ for (SourceRange ParenLocation : ParenHints) {
+ DB << FixItHint::CreateInsertion(ParenLocation.getBegin(), "(");
+ DB << FixItHint::CreateInsertion(ParenLocation.getEnd(), ")");
+ }
+ ArgTokens.swap(FixedArgTokens);
+ NumActuals = FixedNumArgs;
+ }
+
+ // See MacroArgs instance var for description of this.
+ bool isVarargsElided = false;
+
+ if (ContainsCodeCompletionTok) {
+ // Recover from not-fully-formed macro invocation during code-completion.
+ Token EOFTok;
+ EOFTok.startToken();
+ EOFTok.setKind(tok::eof);
+ EOFTok.setLocation(Tok.getLocation());
+ EOFTok.setLength(0);
+ for (; NumActuals < MinArgsExpected; ++NumActuals)
+ ArgTokens.push_back(EOFTok);
+ }
+
+ if (NumActuals < MinArgsExpected) {
+ // There are several cases where too few arguments is ok, handle them now.
+ if (NumActuals == 0 && MinArgsExpected == 1) {
+ // #define A(X) or #define A(...) ---> A()
+
+ // If there is exactly one argument, and that argument is missing,
+ // then we have an empty "()" argument empty list. This is fine, even if
+ // the macro expects one argument (the argument is just empty).
+ isVarargsElided = MI->isVariadic();
+ } else if (MI->isVariadic() &&
+ (NumActuals+1 == MinArgsExpected || // A(x, ...) -> A(X)
+ (NumActuals == 0 && MinArgsExpected == 2))) {// A(x,...) -> A()
+ // Varargs where the named vararg parameter is missing: OK as extension.
+ // #define A(x, ...)
+ // A("blah")
+ //
+ // If the macro contains the comma pasting extension, the diagnostic
+ // is suppressed; we know we'll get another diagnostic later.
+ if (!MI->hasCommaPasting()) {
+ Diag(Tok, diag::ext_missing_varargs_arg);
+ Diag(MI->getDefinitionLoc(), diag::note_macro_here)
+ << MacroName.getIdentifierInfo();
+ }
+
+ // Remember this occurred, allowing us to elide the comma when used for
+ // cases like:
+ // #define A(x, foo...) blah(a, ## foo)
+ // #define B(x, ...) blah(a, ## __VA_ARGS__)
+ // #define C(...) blah(a, ## __VA_ARGS__)
+ // A(x) B(x) C()
+ isVarargsElided = true;
+ } else if (!ContainsCodeCompletionTok) {
+ // Otherwise, emit the error.
+ Diag(Tok, diag::err_too_few_args_in_macro_invoc);
+ Diag(MI->getDefinitionLoc(), diag::note_macro_here)
+ << MacroName.getIdentifierInfo();
+ return nullptr;
+ }
+
+ // Add a marker EOF token to the end of the token list for this argument.
+ SourceLocation EndLoc = Tok.getLocation();
+ Tok.startToken();
+ Tok.setKind(tok::eof);
+ Tok.setLocation(EndLoc);
+ Tok.setLength(0);
+ ArgTokens.push_back(Tok);
+
+ // If we expect two arguments, add both as empty.
+ if (NumActuals == 0 && MinArgsExpected == 2)
+ ArgTokens.push_back(Tok);
+
+ } else if (NumActuals > MinArgsExpected && !MI->isVariadic() &&
+ !ContainsCodeCompletionTok) {
+ // Emit the diagnostic at the macro name in case there is a missing ).
+ // Emitting it at the , could be far away from the macro name.
+ Diag(MacroName, diag::err_too_many_args_in_macro_invoc);
+ Diag(MI->getDefinitionLoc(), diag::note_macro_here)
+ << MacroName.getIdentifierInfo();
+ return nullptr;
+ }
+
+ return MacroArgs::create(MI, ArgTokens, isVarargsElided, *this);
+}
+
+/// \brief Keeps macro expanded tokens for TokenLexers.
+//
+/// Works like a stack; a TokenLexer adds the macro expanded tokens that is
+/// going to lex in the cache and when it finishes the tokens are removed
+/// from the end of the cache.
+Token *Preprocessor::cacheMacroExpandedTokens(TokenLexer *tokLexer,
+ ArrayRef<Token> tokens) {
+ assert(tokLexer);
+ if (tokens.empty())
+ return nullptr;
+
+ size_t newIndex = MacroExpandedTokens.size();
+ bool cacheNeedsToGrow = tokens.size() >
+ MacroExpandedTokens.capacity()-MacroExpandedTokens.size();
+ MacroExpandedTokens.append(tokens.begin(), tokens.end());
+
+ if (cacheNeedsToGrow) {
+ // Go through all the TokenLexers whose 'Tokens' pointer points in the
+ // buffer and update the pointers to the (potential) new buffer array.
+ for (unsigned i = 0, e = MacroExpandingLexersStack.size(); i != e; ++i) {
+ TokenLexer *prevLexer;
+ size_t tokIndex;
+ std::tie(prevLexer, tokIndex) = MacroExpandingLexersStack[i];
+ prevLexer->Tokens = MacroExpandedTokens.data() + tokIndex;
+ }
+ }
+
+ MacroExpandingLexersStack.push_back(std::make_pair(tokLexer, newIndex));
+ return MacroExpandedTokens.data() + newIndex;
+}
+
+void Preprocessor::removeCachedMacroExpandedTokensOfLastLexer() {
+ assert(!MacroExpandingLexersStack.empty());
+ size_t tokIndex = MacroExpandingLexersStack.back().second;
+ assert(tokIndex < MacroExpandedTokens.size());
+ // Pop the cached macro expanded tokens from the end.
+ MacroExpandedTokens.resize(tokIndex);
+ MacroExpandingLexersStack.pop_back();
+}
+
+/// ComputeDATE_TIME - Compute the current time, enter it into the specified
+/// scratch buffer, then return DATELoc/TIMELoc locations with the position of
+/// the identifier tokens inserted.
+static void ComputeDATE_TIME(SourceLocation &DATELoc, SourceLocation &TIMELoc,
+ Preprocessor &PP) {
+ time_t TT = time(nullptr);
+ struct tm *TM = localtime(&TT);
+
+ static const char * const Months[] = {
+ "Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"
+ };
+
+ {
+ SmallString<32> TmpBuffer;
+ llvm::raw_svector_ostream TmpStream(TmpBuffer);
+ TmpStream << llvm::format("\"%s %2d %4d\"", Months[TM->tm_mon],
+ TM->tm_mday, TM->tm_year + 1900);
+ Token TmpTok;
+ TmpTok.startToken();
+ PP.CreateString(TmpStream.str(), TmpTok);
+ DATELoc = TmpTok.getLocation();
+ }
+
+ {
+ SmallString<32> TmpBuffer;
+ llvm::raw_svector_ostream TmpStream(TmpBuffer);
+ TmpStream << llvm::format("\"%02d:%02d:%02d\"",
+ TM->tm_hour, TM->tm_min, TM->tm_sec);
+ Token TmpTok;
+ TmpTok.startToken();
+ PP.CreateString(TmpStream.str(), TmpTok);
+ TIMELoc = TmpTok.getLocation();
+ }
+}
+
+
+/// HasFeature - Return true if we recognize and implement the feature
+/// specified by the identifier as a standard language feature.
+static bool HasFeature(const Preprocessor &PP, const IdentifierInfo *II) {
+ const LangOptions &LangOpts = PP.getLangOpts();
+ StringRef Feature = II->getName();
+
+ // Normalize the feature name, __foo__ becomes foo.
+ if (Feature.startswith("__") && Feature.endswith("__") && Feature.size() >= 4)
+ Feature = Feature.substr(2, Feature.size() - 4);
+
+ return llvm::StringSwitch<bool>(Feature)
+ .Case("address_sanitizer",
+ LangOpts.Sanitize.hasOneOf(SanitizerKind::Address |
+ SanitizerKind::KernelAddress))
+ .Case("assume_nonnull", true)
+ .Case("attribute_analyzer_noreturn", true)
+ .Case("attribute_availability", true)
+ .Case("attribute_availability_with_message", true)
+ .Case("attribute_availability_app_extension", true)
+ .Case("attribute_availability_with_version_underscores", true)
+ .Case("attribute_availability_tvos", true)
+ .Case("attribute_availability_watchos", true)
+ .Case("attribute_cf_returns_not_retained", true)
+ .Case("attribute_cf_returns_retained", true)
+ .Case("attribute_cf_returns_on_parameters", true)
+ .Case("attribute_deprecated_with_message", true)
+ .Case("attribute_ext_vector_type", true)
+ .Case("attribute_ns_returns_not_retained", true)
+ .Case("attribute_ns_returns_retained", true)
+ .Case("attribute_ns_consumes_self", true)
+ .Case("attribute_ns_consumed", true)
+ .Case("attribute_cf_consumed", true)
+ .Case("attribute_objc_ivar_unused", true)
+ .Case("attribute_objc_method_family", true)
+ .Case("attribute_overloadable", true)
+ .Case("attribute_unavailable_with_message", true)
+ .Case("attribute_unused_on_fields", true)
+ .Case("blocks", LangOpts.Blocks)
+ .Case("c_thread_safety_attributes", true)
+ .Case("cxx_exceptions", LangOpts.CXXExceptions)
+ .Case("cxx_rtti", LangOpts.RTTI && LangOpts.RTTIData)
+ .Case("enumerator_attributes", true)
+ .Case("nullability", true)
+ .Case("memory_sanitizer", LangOpts.Sanitize.has(SanitizerKind::Memory))
+ .Case("thread_sanitizer", LangOpts.Sanitize.has(SanitizerKind::Thread))
+ .Case("dataflow_sanitizer", LangOpts.Sanitize.has(SanitizerKind::DataFlow))
+ // Objective-C features
+ .Case("objc_arr", LangOpts.ObjCAutoRefCount) // FIXME: REMOVE?
+ .Case("objc_arc", LangOpts.ObjCAutoRefCount)
+ .Case("objc_arc_weak", LangOpts.ObjCWeak)
+ .Case("objc_default_synthesize_properties", LangOpts.ObjC2)
+ .Case("objc_fixed_enum", LangOpts.ObjC2)
+ .Case("objc_instancetype", LangOpts.ObjC2)
+ .Case("objc_kindof", LangOpts.ObjC2)
+ .Case("objc_modules", LangOpts.ObjC2 && LangOpts.Modules)
+ .Case("objc_nonfragile_abi", LangOpts.ObjCRuntime.isNonFragile())
+ .Case("objc_property_explicit_atomic",
+ true) // Does clang support explicit "atomic" keyword?
+ .Case("objc_protocol_qualifier_mangling", true)
+ .Case("objc_weak_class", LangOpts.ObjCRuntime.hasWeakClassImport())
+ .Case("ownership_holds", true)
+ .Case("ownership_returns", true)
+ .Case("ownership_takes", true)
+ .Case("objc_bool", true)
+ .Case("objc_subscripting", LangOpts.ObjCRuntime.isNonFragile())
+ .Case("objc_array_literals", LangOpts.ObjC2)
+ .Case("objc_dictionary_literals", LangOpts.ObjC2)
+ .Case("objc_boxed_expressions", LangOpts.ObjC2)
+ .Case("objc_boxed_nsvalue_expressions", LangOpts.ObjC2)
+ .Case("arc_cf_code_audited", true)
+ .Case("objc_bridge_id", true)
+ .Case("objc_bridge_id_on_typedefs", true)
+ .Case("objc_generics", LangOpts.ObjC2)
+ .Case("objc_generics_variance", LangOpts.ObjC2)
+ // C11 features
+ .Case("c_alignas", LangOpts.C11)
+ .Case("c_alignof", LangOpts.C11)
+ .Case("c_atomic", LangOpts.C11)
+ .Case("c_generic_selections", LangOpts.C11)
+ .Case("c_static_assert", LangOpts.C11)
+ .Case("c_thread_local",
+ LangOpts.C11 && PP.getTargetInfo().isTLSSupported())
+ // C++11 features
+ .Case("cxx_access_control_sfinae", LangOpts.CPlusPlus11)
+ .Case("cxx_alias_templates", LangOpts.CPlusPlus11)
+ .Case("cxx_alignas", LangOpts.CPlusPlus11)
+ .Case("cxx_alignof", LangOpts.CPlusPlus11)
+ .Case("cxx_atomic", LangOpts.CPlusPlus11)
+ .Case("cxx_attributes", LangOpts.CPlusPlus11)
+ .Case("cxx_auto_type", LangOpts.CPlusPlus11)
+ .Case("cxx_constexpr", LangOpts.CPlusPlus11)
+ .Case("cxx_decltype", LangOpts.CPlusPlus11)
+ .Case("cxx_decltype_incomplete_return_types", LangOpts.CPlusPlus11)
+ .Case("cxx_default_function_template_args", LangOpts.CPlusPlus11)
+ .Case("cxx_defaulted_functions", LangOpts.CPlusPlus11)
+ .Case("cxx_delegating_constructors", LangOpts.CPlusPlus11)
+ .Case("cxx_deleted_functions", LangOpts.CPlusPlus11)
+ .Case("cxx_explicit_conversions", LangOpts.CPlusPlus11)
+ .Case("cxx_generalized_initializers", LangOpts.CPlusPlus11)
+ .Case("cxx_implicit_moves", LangOpts.CPlusPlus11)
+ .Case("cxx_inheriting_constructors", LangOpts.CPlusPlus11)
+ .Case("cxx_inline_namespaces", LangOpts.CPlusPlus11)
+ .Case("cxx_lambdas", LangOpts.CPlusPlus11)
+ .Case("cxx_local_type_template_args", LangOpts.CPlusPlus11)
+ .Case("cxx_nonstatic_member_init", LangOpts.CPlusPlus11)
+ .Case("cxx_noexcept", LangOpts.CPlusPlus11)
+ .Case("cxx_nullptr", LangOpts.CPlusPlus11)
+ .Case("cxx_override_control", LangOpts.CPlusPlus11)
+ .Case("cxx_range_for", LangOpts.CPlusPlus11)
+ .Case("cxx_raw_string_literals", LangOpts.CPlusPlus11)
+ .Case("cxx_reference_qualified_functions", LangOpts.CPlusPlus11)
+ .Case("cxx_rvalue_references", LangOpts.CPlusPlus11)
+ .Case("cxx_strong_enums", LangOpts.CPlusPlus11)
+ .Case("cxx_static_assert", LangOpts.CPlusPlus11)
+ .Case("cxx_thread_local",
+ LangOpts.CPlusPlus11 && PP.getTargetInfo().isTLSSupported())
+ .Case("cxx_trailing_return", LangOpts.CPlusPlus11)
+ .Case("cxx_unicode_literals", LangOpts.CPlusPlus11)
+ .Case("cxx_unrestricted_unions", LangOpts.CPlusPlus11)
+ .Case("cxx_user_literals", LangOpts.CPlusPlus11)
+ .Case("cxx_variadic_templates", LangOpts.CPlusPlus11)
+ // C++1y features
+ .Case("cxx_aggregate_nsdmi", LangOpts.CPlusPlus14)
+ .Case("cxx_binary_literals", LangOpts.CPlusPlus14)
+ .Case("cxx_contextual_conversions", LangOpts.CPlusPlus14)
+ .Case("cxx_decltype_auto", LangOpts.CPlusPlus14)
+ .Case("cxx_generic_lambdas", LangOpts.CPlusPlus14)
+ .Case("cxx_init_captures", LangOpts.CPlusPlus14)
+ .Case("cxx_relaxed_constexpr", LangOpts.CPlusPlus14)
+ .Case("cxx_return_type_deduction", LangOpts.CPlusPlus14)
+ .Case("cxx_variable_templates", LangOpts.CPlusPlus14)
+ // C++ TSes
+ //.Case("cxx_runtime_arrays", LangOpts.CPlusPlusTSArrays)
+ //.Case("cxx_concepts", LangOpts.CPlusPlusTSConcepts)
+ // FIXME: Should this be __has_feature or __has_extension?
+ //.Case("raw_invocation_type", LangOpts.CPlusPlus)
+ // Type traits
+ .Case("has_nothrow_assign", LangOpts.CPlusPlus)
+ .Case("has_nothrow_copy", LangOpts.CPlusPlus)
+ .Case("has_nothrow_constructor", LangOpts.CPlusPlus)
+ .Case("has_trivial_assign", LangOpts.CPlusPlus)
+ .Case("has_trivial_copy", LangOpts.CPlusPlus)
+ .Case("has_trivial_constructor", LangOpts.CPlusPlus)
+ .Case("has_trivial_destructor", LangOpts.CPlusPlus)
+ .Case("has_virtual_destructor", LangOpts.CPlusPlus)
+ .Case("is_abstract", LangOpts.CPlusPlus)
+ .Case("is_base_of", LangOpts.CPlusPlus)
+ .Case("is_class", LangOpts.CPlusPlus)
+ .Case("is_constructible", LangOpts.CPlusPlus)
+ .Case("is_convertible_to", LangOpts.CPlusPlus)
+ .Case("is_empty", LangOpts.CPlusPlus)
+ .Case("is_enum", LangOpts.CPlusPlus)
+ .Case("is_final", LangOpts.CPlusPlus)
+ .Case("is_literal", LangOpts.CPlusPlus)
+ .Case("is_standard_layout", LangOpts.CPlusPlus)
+ .Case("is_pod", LangOpts.CPlusPlus)
+ .Case("is_polymorphic", LangOpts.CPlusPlus)
+ .Case("is_sealed", LangOpts.MicrosoftExt)
+ .Case("is_trivial", LangOpts.CPlusPlus)
+ .Case("is_trivially_assignable", LangOpts.CPlusPlus)
+ .Case("is_trivially_constructible", LangOpts.CPlusPlus)
+ .Case("is_trivially_copyable", LangOpts.CPlusPlus)
+ .Case("is_union", LangOpts.CPlusPlus)
+ .Case("modules", LangOpts.Modules)
+ .Case("safe_stack", LangOpts.Sanitize.has(SanitizerKind::SafeStack))
+ .Case("tls", PP.getTargetInfo().isTLSSupported())
+ .Case("underlying_type", LangOpts.CPlusPlus)
+ .Default(false);
+}
+
+/// HasExtension - Return true if we recognize and implement the feature
+/// specified by the identifier, either as an extension or a standard language
+/// feature.
+static bool HasExtension(const Preprocessor &PP, const IdentifierInfo *II) {
+ if (HasFeature(PP, II))
+ return true;
+
+ // If the use of an extension results in an error diagnostic, extensions are
+ // effectively unavailable, so just return false here.
+ if (PP.getDiagnostics().getExtensionHandlingBehavior() >=
+ diag::Severity::Error)
+ return false;
+
+ const LangOptions &LangOpts = PP.getLangOpts();
+ StringRef Extension = II->getName();
+
+ // Normalize the extension name, __foo__ becomes foo.
+ if (Extension.startswith("__") && Extension.endswith("__") &&
+ Extension.size() >= 4)
+ Extension = Extension.substr(2, Extension.size() - 4);
+
+ // Because we inherit the feature list from HasFeature, this string switch
+ // must be less restrictive than HasFeature's.
+ return llvm::StringSwitch<bool>(Extension)
+ // C11 features supported by other languages as extensions.
+ .Case("c_alignas", true)
+ .Case("c_alignof", true)
+ .Case("c_atomic", true)
+ .Case("c_generic_selections", true)
+ .Case("c_static_assert", true)
+ .Case("c_thread_local", PP.getTargetInfo().isTLSSupported())
+ // C++11 features supported by other languages as extensions.
+ .Case("cxx_atomic", LangOpts.CPlusPlus)
+ .Case("cxx_deleted_functions", LangOpts.CPlusPlus)
+ .Case("cxx_explicit_conversions", LangOpts.CPlusPlus)
+ .Case("cxx_inline_namespaces", LangOpts.CPlusPlus)
+ .Case("cxx_local_type_template_args", LangOpts.CPlusPlus)
+ .Case("cxx_nonstatic_member_init", LangOpts.CPlusPlus)
+ .Case("cxx_override_control", LangOpts.CPlusPlus)
+ .Case("cxx_range_for", LangOpts.CPlusPlus)
+ .Case("cxx_reference_qualified_functions", LangOpts.CPlusPlus)
+ .Case("cxx_rvalue_references", LangOpts.CPlusPlus)
+ .Case("cxx_variadic_templates", LangOpts.CPlusPlus)
+ // C++1y features supported by other languages as extensions.
+ .Case("cxx_binary_literals", true)
+ .Case("cxx_init_captures", LangOpts.CPlusPlus11)
+ .Case("cxx_variable_templates", LangOpts.CPlusPlus)
+ .Default(false);
+}
+
+/// EvaluateHasIncludeCommon - Process a '__has_include("path")'
+/// or '__has_include_next("path")' expression.
+/// Returns true if successful.
+static bool EvaluateHasIncludeCommon(Token &Tok,
+ IdentifierInfo *II, Preprocessor &PP,
+ const DirectoryLookup *LookupFrom,
+ const FileEntry *LookupFromFile) {
+ // Save the location of the current token. If a '(' is later found, use
+ // that location. If not, use the end of this location instead.
+ SourceLocation LParenLoc = Tok.getLocation();
+
+ // These expressions are only allowed within a preprocessor directive.
+ if (!PP.isParsingIfOrElifDirective()) {
+ PP.Diag(LParenLoc, diag::err_pp_directive_required) << II->getName();
+ // Return a valid identifier token.
+ assert(Tok.is(tok::identifier));
+ Tok.setIdentifierInfo(II);
+ return false;
+ }
+
+ // Get '('.
+ PP.LexNonComment(Tok);
+
+ // Ensure we have a '('.
+ if (Tok.isNot(tok::l_paren)) {
+ // No '(', use end of last token.
+ LParenLoc = PP.getLocForEndOfToken(LParenLoc);
+ PP.Diag(LParenLoc, diag::err_pp_expected_after) << II << tok::l_paren;
+ // If the next token looks like a filename or the start of one,
+ // assume it is and process it as such.
+ if (!Tok.is(tok::angle_string_literal) && !Tok.is(tok::string_literal) &&
+ !Tok.is(tok::less))
+ return false;
+ } else {
+ // Save '(' location for possible missing ')' message.
+ LParenLoc = Tok.getLocation();
+
+ if (PP.getCurrentLexer()) {
+ // Get the file name.
+ PP.getCurrentLexer()->LexIncludeFilename(Tok);
+ } else {
+ // We're in a macro, so we can't use LexIncludeFilename; just
+ // grab the next token.
+ PP.Lex(Tok);
+ }
+ }
+
+ // Reserve a buffer to get the spelling.
+ SmallString<128> FilenameBuffer;
+ StringRef Filename;
+ SourceLocation EndLoc;
+
+ switch (Tok.getKind()) {
+ case tok::eod:
+ // If the token kind is EOD, the error has already been diagnosed.
+ return false;
+
+ case tok::angle_string_literal:
+ case tok::string_literal: {
+ bool Invalid = false;
+ Filename = PP.getSpelling(Tok, FilenameBuffer, &Invalid);
+ if (Invalid)
+ return false;
+ break;
+ }
+
+ case tok::less:
+ // This could be a <foo/bar.h> file coming from a macro expansion. In this
+ // case, glue the tokens together into FilenameBuffer and interpret those.
+ FilenameBuffer.push_back('<');
+ if (PP.ConcatenateIncludeName(FilenameBuffer, EndLoc)) {
+ // Let the caller know a <eod> was found by changing the Token kind.
+ Tok.setKind(tok::eod);
+ return false; // Found <eod> but no ">"? Diagnostic already emitted.
+ }
+ Filename = FilenameBuffer;
+ break;
+ default:
+ PP.Diag(Tok.getLocation(), diag::err_pp_expects_filename);
+ return false;
+ }
+
+ SourceLocation FilenameLoc = Tok.getLocation();
+
+ // Get ')'.
+ PP.LexNonComment(Tok);
+
+ // Ensure we have a trailing ).
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(PP.getLocForEndOfToken(FilenameLoc), diag::err_pp_expected_after)
+ << II << tok::r_paren;
+ PP.Diag(LParenLoc, diag::note_matching) << tok::l_paren;
+ return false;
+ }
+
+ bool isAngled = PP.GetIncludeFilenameSpelling(Tok.getLocation(), Filename);
+ // If GetIncludeFilenameSpelling set the start ptr to null, there was an
+ // error.
+ if (Filename.empty())
+ return false;
+
+ // Search include directories.
+ const DirectoryLookup *CurDir;
+ const FileEntry *File =
+ PP.LookupFile(FilenameLoc, Filename, isAngled, LookupFrom, LookupFromFile,
+ CurDir, nullptr, nullptr, nullptr);
+
+ // Get the result value. A result of true means the file exists.
+ return File != nullptr;
+}
+
+/// EvaluateHasInclude - Process a '__has_include("path")' expression.
+/// Returns true if successful.
+static bool EvaluateHasInclude(Token &Tok, IdentifierInfo *II,
+ Preprocessor &PP) {
+ return EvaluateHasIncludeCommon(Tok, II, PP, nullptr, nullptr);
+}
+
+/// EvaluateHasIncludeNext - Process '__has_include_next("path")' expression.
+/// Returns true if successful.
+static bool EvaluateHasIncludeNext(Token &Tok,
+ IdentifierInfo *II, Preprocessor &PP) {
+ // __has_include_next is like __has_include, except that we start
+ // searching after the current found directory. If we can't do this,
+ // issue a diagnostic.
+ // FIXME: Factor out duplication with
+ // Preprocessor::HandleIncludeNextDirective.
+ const DirectoryLookup *Lookup = PP.GetCurDirLookup();
+ const FileEntry *LookupFromFile = nullptr;
+ if (PP.isInPrimaryFile()) {
+ Lookup = nullptr;
+ PP.Diag(Tok, diag::pp_include_next_in_primary);
+ } else if (PP.getCurrentSubmodule()) {
+ // Start looking up in the directory *after* the one in which the current
+ // file would be found, if any.
+ assert(PP.getCurrentLexer() && "#include_next directive in macro?");
+ LookupFromFile = PP.getCurrentLexer()->getFileEntry();
+ Lookup = nullptr;
+ } else if (!Lookup) {
+ PP.Diag(Tok, diag::pp_include_next_absolute_path);
+ } else {
+ // Start looking up in the next directory.
+ ++Lookup;
+ }
+
+ return EvaluateHasIncludeCommon(Tok, II, PP, Lookup, LookupFromFile);
+}
+
+/// \brief Process __building_module(identifier) expression.
+/// \returns true if we are building the named module, false otherwise.
+static bool EvaluateBuildingModule(Token &Tok,
+ IdentifierInfo *II, Preprocessor &PP) {
+ // Get '('.
+ PP.LexNonComment(Tok);
+
+ // Ensure we have a '('.
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(Tok.getLocation(), diag::err_pp_expected_after) << II
+ << tok::l_paren;
+ return false;
+ }
+
+ // Save '(' location for possible missing ')' message.
+ SourceLocation LParenLoc = Tok.getLocation();
+
+ // Get the module name.
+ PP.LexNonComment(Tok);
+
+ // Ensure that we have an identifier.
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::err_expected_id_building_module);
+ return false;
+ }
+
+ bool Result
+ = Tok.getIdentifierInfo()->getName() == PP.getLangOpts().CurrentModule;
+
+ // Get ')'.
+ PP.LexNonComment(Tok);
+
+ // Ensure we have a trailing ).
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(Tok.getLocation(), diag::err_pp_expected_after) << II
+ << tok::r_paren;
+ PP.Diag(LParenLoc, diag::note_matching) << tok::l_paren;
+ return false;
+ }
+
+ return Result;
+}
+
+/// ExpandBuiltinMacro - If an identifier token is read that is to be expanded
+/// as a builtin macro, handle it and return the next token as 'Tok'.
+void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
+ // Figure out which token this is.
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ assert(II && "Can't be a macro without id info!");
+
+ // If this is an _Pragma or Microsoft __pragma directive, expand it,
+ // invoke the pragma handler, then lex the token after it.
+ if (II == Ident_Pragma)
+ return Handle_Pragma(Tok);
+ else if (II == Ident__pragma) // in non-MS mode this is null
+ return HandleMicrosoft__pragma(Tok);
+
+ ++NumBuiltinMacroExpanded;
+
+ SmallString<128> TmpBuffer;
+ llvm::raw_svector_ostream OS(TmpBuffer);
+
+ // Set up the return result.
+ Tok.setIdentifierInfo(nullptr);
+ Tok.clearFlag(Token::NeedsCleaning);
+
+ if (II == Ident__LINE__) {
+ // C99 6.10.8: "__LINE__: The presumed line number (within the current
+ // source file) of the current source line (an integer constant)". This can
+ // be affected by #line.
+ SourceLocation Loc = Tok.getLocation();
+
+ // Advance to the location of the first _, this might not be the first byte
+ // of the token if it starts with an escaped newline.
+ Loc = AdvanceToTokenCharacter(Loc, 0);
+
+ // One wrinkle here is that GCC expands __LINE__ to location of the *end* of
+ // a macro expansion. This doesn't matter for object-like macros, but
+ // can matter for a function-like macro that expands to contain __LINE__.
+ // Skip down through expansion points until we find a file loc for the
+ // end of the expansion history.
+ Loc = SourceMgr.getExpansionRange(Loc).second;
+ PresumedLoc PLoc = SourceMgr.getPresumedLoc(Loc);
+
+ // __LINE__ expands to a simple numeric value.
+ OS << (PLoc.isValid()? PLoc.getLine() : 1);
+ Tok.setKind(tok::numeric_constant);
+ } else if (II == Ident__FILE__ || II == Ident__BASE_FILE__) {
+ // C99 6.10.8: "__FILE__: The presumed name of the current source file (a
+ // character string literal)". This can be affected by #line.
+ PresumedLoc PLoc = SourceMgr.getPresumedLoc(Tok.getLocation());
+
+ // __BASE_FILE__ is a GNU extension that returns the top of the presumed
+ // #include stack instead of the current file.
+ if (II == Ident__BASE_FILE__ && PLoc.isValid()) {
+ SourceLocation NextLoc = PLoc.getIncludeLoc();
+ while (NextLoc.isValid()) {
+ PLoc = SourceMgr.getPresumedLoc(NextLoc);
+ if (PLoc.isInvalid())
+ break;
+
+ NextLoc = PLoc.getIncludeLoc();
+ }
+ }
+
+ // Escape this filename. Turn '\' -> '\\' '"' -> '\"'
+ SmallString<128> FN;
+ if (PLoc.isValid()) {
+ FN += PLoc.getFilename();
+ Lexer::Stringify(FN);
+ OS << '"' << FN << '"';
+ }
+ Tok.setKind(tok::string_literal);
+ } else if (II == Ident__DATE__) {
+ Diag(Tok.getLocation(), diag::warn_pp_date_time);
+ if (!DATELoc.isValid())
+ ComputeDATE_TIME(DATELoc, TIMELoc, *this);
+ Tok.setKind(tok::string_literal);
+ Tok.setLength(strlen("\"Mmm dd yyyy\""));
+ Tok.setLocation(SourceMgr.createExpansionLoc(DATELoc, Tok.getLocation(),
+ Tok.getLocation(),
+ Tok.getLength()));
+ return;
+ } else if (II == Ident__TIME__) {
+ Diag(Tok.getLocation(), diag::warn_pp_date_time);
+ if (!TIMELoc.isValid())
+ ComputeDATE_TIME(DATELoc, TIMELoc, *this);
+ Tok.setKind(tok::string_literal);
+ Tok.setLength(strlen("\"hh:mm:ss\""));
+ Tok.setLocation(SourceMgr.createExpansionLoc(TIMELoc, Tok.getLocation(),
+ Tok.getLocation(),
+ Tok.getLength()));
+ return;
+ } else if (II == Ident__INCLUDE_LEVEL__) {
+ // Compute the presumed include depth of this token. This can be affected
+ // by GNU line markers.
+ unsigned Depth = 0;
+
+ PresumedLoc PLoc = SourceMgr.getPresumedLoc(Tok.getLocation());
+ if (PLoc.isValid()) {
+ PLoc = SourceMgr.getPresumedLoc(PLoc.getIncludeLoc());
+ for (; PLoc.isValid(); ++Depth)
+ PLoc = SourceMgr.getPresumedLoc(PLoc.getIncludeLoc());
+ }
+
+ // __INCLUDE_LEVEL__ expands to a simple numeric value.
+ OS << Depth;
+ Tok.setKind(tok::numeric_constant);
+ } else if (II == Ident__TIMESTAMP__) {
+ Diag(Tok.getLocation(), diag::warn_pp_date_time);
+ // MSVC, ICC, GCC, VisualAge C++ extension. The generated string should be
+ // of the form "Ddd Mmm dd hh::mm::ss yyyy", which is returned by asctime.
+
+ // Get the file that we are lexing out of. If we're currently lexing from
+ // a macro, dig into the include stack.
+ const FileEntry *CurFile = nullptr;
+ PreprocessorLexer *TheLexer = getCurrentFileLexer();
+
+ if (TheLexer)
+ CurFile = SourceMgr.getFileEntryForID(TheLexer->getFileID());
+
+ const char *Result;
+ if (CurFile) {
+ time_t TT = CurFile->getModificationTime();
+ struct tm *TM = localtime(&TT);
+ Result = asctime(TM);
+ } else {
+ Result = "??? ??? ?? ??:??:?? ????\n";
+ }
+ // Surround the string with " and strip the trailing newline.
+ OS << '"' << StringRef(Result).drop_back() << '"';
+ Tok.setKind(tok::string_literal);
+ } else if (II == Ident__COUNTER__) {
+ // __COUNTER__ expands to a simple numeric value.
+ OS << CounterValue++;
+ Tok.setKind(tok::numeric_constant);
+ } else if (II == Ident__has_feature ||
+ II == Ident__has_extension ||
+ II == Ident__has_builtin ||
+ II == Ident__is_identifier ||
+ II == Ident__has_attribute ||
+ II == Ident__has_declspec ||
+ II == Ident__has_cpp_attribute) {
+ // The argument to these builtins should be a parenthesized identifier.
+ SourceLocation StartLoc = Tok.getLocation();
+
+ bool IsValid = false;
+ IdentifierInfo *FeatureII = nullptr;
+ IdentifierInfo *ScopeII = nullptr;
+
+ // Read the '('.
+ LexUnexpandedToken(Tok);
+ if (Tok.is(tok::l_paren)) {
+ // Read the identifier
+ LexUnexpandedToken(Tok);
+ if ((FeatureII = Tok.getIdentifierInfo())) {
+ // If we're checking __has_cpp_attribute, it is possible to receive a
+ // scope token. Read the "::", if it's available.
+ LexUnexpandedToken(Tok);
+ bool IsScopeValid = true;
+ if (II == Ident__has_cpp_attribute && Tok.is(tok::coloncolon)) {
+ LexUnexpandedToken(Tok);
+ // The first thing we read was not the feature, it was the scope.
+ ScopeII = FeatureII;
+ if ((FeatureII = Tok.getIdentifierInfo()))
+ LexUnexpandedToken(Tok);
+ else
+ IsScopeValid = false;
+ }
+ // Read the closing paren.
+ if (IsScopeValid && Tok.is(tok::r_paren))
+ IsValid = true;
+ }
+ // Eat tokens until ')'.
+ while (Tok.isNot(tok::r_paren) && Tok.isNot(tok::eod) &&
+ Tok.isNot(tok::eof))
+ LexUnexpandedToken(Tok);
+ }
+
+ int Value = 0;
+ if (!IsValid)
+ Diag(StartLoc, diag::err_feature_check_malformed);
+ else if (II == Ident__is_identifier)
+ Value = FeatureII->getTokenID() == tok::identifier;
+ else if (II == Ident__has_builtin) {
+ // Check for a builtin is trivial.
+ if (FeatureII->getBuiltinID() != 0) {
+ Value = true;
+ } else {
+ StringRef Feature = FeatureII->getName();
+ Value = llvm::StringSwitch<bool>(Feature)
+ .Case("__make_integer_seq", getLangOpts().CPlusPlus)
+ .Default(false);
+ }
+ } else if (II == Ident__has_attribute)
+ Value = hasAttribute(AttrSyntax::GNU, nullptr, FeatureII,
+ getTargetInfo(), getLangOpts());
+ else if (II == Ident__has_cpp_attribute)
+ Value = hasAttribute(AttrSyntax::CXX, ScopeII, FeatureII,
+ getTargetInfo(), getLangOpts());
+ else if (II == Ident__has_declspec)
+ Value = hasAttribute(AttrSyntax::Declspec, nullptr, FeatureII,
+ getTargetInfo(), getLangOpts());
+ else if (II == Ident__has_extension)
+ Value = HasExtension(*this, FeatureII);
+ else {
+ assert(II == Ident__has_feature && "Must be feature check");
+ Value = HasFeature(*this, FeatureII);
+ }
+
+ if (!IsValid)
+ return;
+ OS << Value;
+ Tok.setKind(tok::numeric_constant);
+ } else if (II == Ident__has_include ||
+ II == Ident__has_include_next) {
+ // The argument to these two builtins should be a parenthesized
+ // file name string literal using angle brackets (<>) or
+ // double-quotes ("").
+ bool Value;
+ if (II == Ident__has_include)
+ Value = EvaluateHasInclude(Tok, II, *this);
+ else
+ Value = EvaluateHasIncludeNext(Tok, II, *this);
+
+ if (Tok.isNot(tok::r_paren))
+ return;
+ OS << (int)Value;
+ Tok.setKind(tok::numeric_constant);
+ } else if (II == Ident__has_warning) {
+ // The argument should be a parenthesized string literal.
+ // The argument to these builtins should be a parenthesized identifier.
+ SourceLocation StartLoc = Tok.getLocation();
+ bool IsValid = false;
+ bool Value = false;
+ // Read the '('.
+ LexUnexpandedToken(Tok);
+ do {
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(StartLoc, diag::err_warning_check_malformed);
+ break;
+ }
+
+ LexUnexpandedToken(Tok);
+ std::string WarningName;
+ SourceLocation StrStartLoc = Tok.getLocation();
+ if (!FinishLexStringLiteral(Tok, WarningName, "'__has_warning'",
+ /*MacroExpansion=*/false)) {
+ // Eat tokens until ')'.
+ while (Tok.isNot(tok::r_paren) && Tok.isNot(tok::eod) &&
+ Tok.isNot(tok::eof))
+ LexUnexpandedToken(Tok);
+ break;
+ }
+
+ // Is the end a ')'?
+ if (!(IsValid = Tok.is(tok::r_paren))) {
+ Diag(StartLoc, diag::err_warning_check_malformed);
+ break;
+ }
+
+ // FIXME: Should we accept "-R..." flags here, or should that be handled
+ // by a separate __has_remark?
+ if (WarningName.size() < 3 || WarningName[0] != '-' ||
+ WarningName[1] != 'W') {
+ Diag(StrStartLoc, diag::warn_has_warning_invalid_option);
+ break;
+ }
+
+ // Finally, check if the warning flags maps to a diagnostic group.
+ // We construct a SmallVector here to talk to getDiagnosticIDs().
+ // Although we don't use the result, this isn't a hot path, and not
+ // worth special casing.
+ SmallVector<diag::kind, 10> Diags;
+ Value = !getDiagnostics().getDiagnosticIDs()->
+ getDiagnosticsInGroup(diag::Flavor::WarningOrError,
+ WarningName.substr(2), Diags);
+ } while (false);
+
+ if (!IsValid)
+ return;
+ OS << (int)Value;
+ Tok.setKind(tok::numeric_constant);
+ } else if (II == Ident__building_module) {
+ // The argument to this builtin should be an identifier. The
+ // builtin evaluates to 1 when that identifier names the module we are
+ // currently building.
+ OS << (int)EvaluateBuildingModule(Tok, II, *this);
+ Tok.setKind(tok::numeric_constant);
+ } else if (II == Ident__MODULE__) {
+ // The current module as an identifier.
+ OS << getLangOpts().CurrentModule;
+ IdentifierInfo *ModuleII = getIdentifierInfo(getLangOpts().CurrentModule);
+ Tok.setIdentifierInfo(ModuleII);
+ Tok.setKind(ModuleII->getTokenID());
+ } else if (II == Ident__identifier) {
+ SourceLocation Loc = Tok.getLocation();
+
+ // We're expecting '__identifier' '(' identifier ')'. Try to recover
+ // if the parens are missing.
+ LexNonComment(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ // No '(', use end of last token.
+ Diag(getLocForEndOfToken(Loc), diag::err_pp_expected_after)
+ << II << tok::l_paren;
+ // If the next token isn't valid as our argument, we can't recover.
+ if (!Tok.isAnnotation() && Tok.getIdentifierInfo())
+ Tok.setKind(tok::identifier);
+ return;
+ }
+
+ SourceLocation LParenLoc = Tok.getLocation();
+ LexNonComment(Tok);
+
+ if (!Tok.isAnnotation() && Tok.getIdentifierInfo())
+ Tok.setKind(tok::identifier);
+ else {
+ Diag(Tok.getLocation(), diag::err_pp_identifier_arg_not_identifier)
+ << Tok.getKind();
+ // Don't walk past anything that's not a real token.
+ if (Tok.isOneOf(tok::eof, tok::eod) || Tok.isAnnotation())
+ return;
+ }
+
+ // Discard the ')', preserving 'Tok' as our result.
+ Token RParen;
+ LexNonComment(RParen);
+ if (RParen.isNot(tok::r_paren)) {
+ Diag(getLocForEndOfToken(Tok.getLocation()), diag::err_pp_expected_after)
+ << Tok.getKind() << tok::r_paren;
+ Diag(LParenLoc, diag::note_matching) << tok::l_paren;
+ }
+ return;
+ } else {
+ llvm_unreachable("Unknown identifier!");
+ }
+ CreateString(OS.str(), Tok, Tok.getLocation(), Tok.getLocation());
+}
+
+void Preprocessor::markMacroAsUsed(MacroInfo *MI) {
+ // If the 'used' status changed, and the macro requires 'unused' warning,
+ // remove its SourceLocation from the warn-for-unused-macro locations.
+ if (MI->isWarnIfUnused() && !MI->isUsed())
+ WarnUnusedMacroLocs.erase(MI->getDefinitionLoc());
+ MI->setIsUsed(true);
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp b/contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp
new file mode 100644
index 0000000..5f63d35
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp
@@ -0,0 +1,728 @@
+//===--- PTHLexer.cpp - Lex from a token stream ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the PTHLexer interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/PTHLexer.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/FileSystemStatCache.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/TokenKinds.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/PTHManager.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/Token.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/EndianStream.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <memory>
+#include <system_error>
+using namespace clang;
+
+static const unsigned StoredTokenSize = 1 + 1 + 2 + 4 + 4;
+
+//===----------------------------------------------------------------------===//
+// PTHLexer methods.
+//===----------------------------------------------------------------------===//
+
+PTHLexer::PTHLexer(Preprocessor &PP, FileID FID, const unsigned char *D,
+ const unsigned char *ppcond, PTHManager &PM)
+ : PreprocessorLexer(&PP, FID), TokBuf(D), CurPtr(D), LastHashTokPtr(nullptr),
+ PPCond(ppcond), CurPPCondPtr(ppcond), PTHMgr(PM) {
+
+ FileStartLoc = PP.getSourceManager().getLocForStartOfFile(FID);
+}
+
+bool PTHLexer::Lex(Token& Tok) {
+ //===--------------------------------------==//
+ // Read the raw token data.
+ //===--------------------------------------==//
+ using namespace llvm::support;
+
+ // Shadow CurPtr into an automatic variable.
+ const unsigned char *CurPtrShadow = CurPtr;
+
+ // Read in the data for the token.
+ unsigned Word0 = endian::readNext<uint32_t, little, aligned>(CurPtrShadow);
+ uint32_t IdentifierID =
+ endian::readNext<uint32_t, little, aligned>(CurPtrShadow);
+ uint32_t FileOffset =
+ endian::readNext<uint32_t, little, aligned>(CurPtrShadow);
+
+ tok::TokenKind TKind = (tok::TokenKind) (Word0 & 0xFF);
+ Token::TokenFlags TFlags = (Token::TokenFlags) ((Word0 >> 8) & 0xFF);
+ uint32_t Len = Word0 >> 16;
+
+ CurPtr = CurPtrShadow;
+
+ //===--------------------------------------==//
+ // Construct the token itself.
+ //===--------------------------------------==//
+
+ Tok.startToken();
+ Tok.setKind(TKind);
+ Tok.setFlag(TFlags);
+ assert(!LexingRawMode);
+ Tok.setLocation(FileStartLoc.getLocWithOffset(FileOffset));
+ Tok.setLength(Len);
+
+ // Handle identifiers.
+ if (Tok.isLiteral()) {
+ Tok.setLiteralData((const char*) (PTHMgr.SpellingBase + IdentifierID));
+ }
+ else if (IdentifierID) {
+ MIOpt.ReadToken();
+ IdentifierInfo *II = PTHMgr.GetIdentifierInfo(IdentifierID-1);
+
+ Tok.setIdentifierInfo(II);
+
+ // Change the kind of this identifier to the appropriate token kind, e.g.
+ // turning "for" into a keyword.
+ Tok.setKind(II->getTokenID());
+
+ if (II->isHandleIdentifierCase())
+ return PP->HandleIdentifier(Tok);
+
+ return true;
+ }
+
+ //===--------------------------------------==//
+ // Process the token.
+ //===--------------------------------------==//
+ if (TKind == tok::eof) {
+ // Save the end-of-file token.
+ EofToken = Tok;
+
+ assert(!ParsingPreprocessorDirective);
+ assert(!LexingRawMode);
+
+ return LexEndOfFile(Tok);
+ }
+
+ if (TKind == tok::hash && Tok.isAtStartOfLine()) {
+ LastHashTokPtr = CurPtr - StoredTokenSize;
+ assert(!LexingRawMode);
+ PP->HandleDirective(Tok);
+
+ return false;
+ }
+
+ if (TKind == tok::eod) {
+ assert(ParsingPreprocessorDirective);
+ ParsingPreprocessorDirective = false;
+ return true;
+ }
+
+ MIOpt.ReadToken();
+ return true;
+}
+
+bool PTHLexer::LexEndOfFile(Token &Result) {
+ // If we hit the end of the file while parsing a preprocessor directive,
+ // end the preprocessor directive first. The next token returned will
+ // then be the end of file.
+ if (ParsingPreprocessorDirective) {
+ ParsingPreprocessorDirective = false; // Done parsing the "line".
+ return true; // Have a token.
+ }
+
+ assert(!LexingRawMode);
+
+ // If we are in a #if directive, emit an error.
+ while (!ConditionalStack.empty()) {
+ if (PP->getCodeCompletionFileLoc() != FileStartLoc)
+ PP->Diag(ConditionalStack.back().IfLoc,
+ diag::err_pp_unterminated_conditional);
+ ConditionalStack.pop_back();
+ }
+
+ // Finally, let the preprocessor handle this.
+ return PP->HandleEndOfFile(Result);
+}
+
+// FIXME: We can just grab the last token instead of storing a copy
+// into EofToken.
+void PTHLexer::getEOF(Token& Tok) {
+ assert(EofToken.is(tok::eof));
+ Tok = EofToken;
+}
+
+void PTHLexer::DiscardToEndOfLine() {
+ assert(ParsingPreprocessorDirective && ParsingFilename == false &&
+ "Must be in a preprocessing directive!");
+
+ // We assume that if the preprocessor wishes to discard to the end of
+ // the line that it also means to end the current preprocessor directive.
+ ParsingPreprocessorDirective = false;
+
+ // Skip tokens by only peeking at their token kind and the flags.
+ // We don't need to actually reconstruct full tokens from the token buffer.
+ // This saves some copies and it also reduces IdentifierInfo* lookup.
+ const unsigned char* p = CurPtr;
+ while (1) {
+ // Read the token kind. Are we at the end of the file?
+ tok::TokenKind x = (tok::TokenKind) (uint8_t) *p;
+ if (x == tok::eof) break;
+
+ // Read the token flags. Are we at the start of the next line?
+ Token::TokenFlags y = (Token::TokenFlags) (uint8_t) p[1];
+ if (y & Token::StartOfLine) break;
+
+ // Skip to the next token.
+ p += StoredTokenSize;
+ }
+
+ CurPtr = p;
+}
+
+/// SkipBlock - Used by Preprocessor to skip the current conditional block.
+bool PTHLexer::SkipBlock() {
+ using namespace llvm::support;
+ assert(CurPPCondPtr && "No cached PP conditional information.");
+ assert(LastHashTokPtr && "No known '#' token.");
+
+ const unsigned char *HashEntryI = nullptr;
+ uint32_t TableIdx;
+
+ do {
+ // Read the token offset from the side-table.
+ uint32_t Offset = endian::readNext<uint32_t, little, aligned>(CurPPCondPtr);
+
+ // Read the target table index from the side-table.
+ TableIdx = endian::readNext<uint32_t, little, aligned>(CurPPCondPtr);
+
+ // Compute the actual memory address of the '#' token data for this entry.
+ HashEntryI = TokBuf + Offset;
+
+ // Optmization: "Sibling jumping". #if...#else...#endif blocks can
+ // contain nested blocks. In the side-table we can jump over these
+ // nested blocks instead of doing a linear search if the next "sibling"
+ // entry is not at a location greater than LastHashTokPtr.
+ if (HashEntryI < LastHashTokPtr && TableIdx) {
+ // In the side-table we are still at an entry for a '#' token that
+ // is earlier than the last one we saw. Check if the location we would
+ // stride gets us closer.
+ const unsigned char* NextPPCondPtr =
+ PPCond + TableIdx*(sizeof(uint32_t)*2);
+ assert(NextPPCondPtr >= CurPPCondPtr);
+ // Read where we should jump to.
+ const unsigned char *HashEntryJ =
+ TokBuf + endian::readNext<uint32_t, little, aligned>(NextPPCondPtr);
+
+ if (HashEntryJ <= LastHashTokPtr) {
+ // Jump directly to the next entry in the side table.
+ HashEntryI = HashEntryJ;
+ TableIdx = endian::readNext<uint32_t, little, aligned>(NextPPCondPtr);
+ CurPPCondPtr = NextPPCondPtr;
+ }
+ }
+ }
+ while (HashEntryI < LastHashTokPtr);
+ assert(HashEntryI == LastHashTokPtr && "No PP-cond entry found for '#'");
+ assert(TableIdx && "No jumping from #endifs.");
+
+ // Update our side-table iterator.
+ const unsigned char* NextPPCondPtr = PPCond + TableIdx*(sizeof(uint32_t)*2);
+ assert(NextPPCondPtr >= CurPPCondPtr);
+ CurPPCondPtr = NextPPCondPtr;
+
+ // Read where we should jump to.
+ HashEntryI =
+ TokBuf + endian::readNext<uint32_t, little, aligned>(NextPPCondPtr);
+ uint32_t NextIdx = endian::readNext<uint32_t, little, aligned>(NextPPCondPtr);
+
+ // By construction NextIdx will be zero if this is a #endif. This is useful
+ // to know to obviate lexing another token.
+ bool isEndif = NextIdx == 0;
+
+ // This case can occur when we see something like this:
+ //
+ // #if ...
+ // /* a comment or nothing */
+ // #elif
+ //
+ // If we are skipping the first #if block it will be the case that CurPtr
+ // already points 'elif'. Just return.
+
+ if (CurPtr > HashEntryI) {
+ assert(CurPtr == HashEntryI + StoredTokenSize);
+ // Did we reach a #endif? If so, go ahead and consume that token as well.
+ if (isEndif)
+ CurPtr += StoredTokenSize * 2;
+ else
+ LastHashTokPtr = HashEntryI;
+
+ return isEndif;
+ }
+
+ // Otherwise, we need to advance. Update CurPtr to point to the '#' token.
+ CurPtr = HashEntryI;
+
+ // Update the location of the last observed '#'. This is useful if we
+ // are skipping multiple blocks.
+ LastHashTokPtr = CurPtr;
+
+ // Skip the '#' token.
+ assert(((tok::TokenKind)*CurPtr) == tok::hash);
+ CurPtr += StoredTokenSize;
+
+ // Did we reach a #endif? If so, go ahead and consume that token as well.
+ if (isEndif) {
+ CurPtr += StoredTokenSize * 2;
+ }
+
+ return isEndif;
+}
+
+SourceLocation PTHLexer::getSourceLocation() {
+ // getSourceLocation is not on the hot path. It is used to get the location
+ // of the next token when transitioning back to this lexer when done
+ // handling a #included file. Just read the necessary data from the token
+ // data buffer to construct the SourceLocation object.
+ // NOTE: This is a virtual function; hence it is defined out-of-line.
+ using namespace llvm::support;
+
+ const unsigned char *OffsetPtr = CurPtr + (StoredTokenSize - 4);
+ uint32_t Offset = endian::readNext<uint32_t, little, aligned>(OffsetPtr);
+ return FileStartLoc.getLocWithOffset(Offset);
+}
+
+//===----------------------------------------------------------------------===//
+// PTH file lookup: map from strings to file data.
+//===----------------------------------------------------------------------===//
+
+/// PTHFileLookup - This internal data structure is used by the PTHManager
+/// to map from FileEntry objects managed by FileManager to offsets within
+/// the PTH file.
+namespace {
+class PTHFileData {
+ const uint32_t TokenOff;
+ const uint32_t PPCondOff;
+public:
+ PTHFileData(uint32_t tokenOff, uint32_t ppCondOff)
+ : TokenOff(tokenOff), PPCondOff(ppCondOff) {}
+
+ uint32_t getTokenOffset() const { return TokenOff; }
+ uint32_t getPPCondOffset() const { return PPCondOff; }
+};
+
+
+class PTHFileLookupCommonTrait {
+public:
+ typedef std::pair<unsigned char, const char*> internal_key_type;
+ typedef unsigned hash_value_type;
+ typedef unsigned offset_type;
+
+ static hash_value_type ComputeHash(internal_key_type x) {
+ return llvm::HashString(x.second);
+ }
+
+ static std::pair<unsigned, unsigned>
+ ReadKeyDataLength(const unsigned char*& d) {
+ using namespace llvm::support;
+ unsigned keyLen =
+ (unsigned)endian::readNext<uint16_t, little, unaligned>(d);
+ unsigned dataLen = (unsigned) *(d++);
+ return std::make_pair(keyLen, dataLen);
+ }
+
+ static internal_key_type ReadKey(const unsigned char* d, unsigned) {
+ unsigned char k = *(d++); // Read the entry kind.
+ return std::make_pair(k, (const char*) d);
+ }
+};
+
+} // end anonymous namespace
+
+class PTHManager::PTHFileLookupTrait : public PTHFileLookupCommonTrait {
+public:
+ typedef const FileEntry* external_key_type;
+ typedef PTHFileData data_type;
+
+ static internal_key_type GetInternalKey(const FileEntry* FE) {
+ return std::make_pair((unsigned char) 0x1, FE->getName());
+ }
+
+ static bool EqualKey(internal_key_type a, internal_key_type b) {
+ return a.first == b.first && strcmp(a.second, b.second) == 0;
+ }
+
+ static PTHFileData ReadData(const internal_key_type& k,
+ const unsigned char* d, unsigned) {
+ assert(k.first == 0x1 && "Only file lookups can match!");
+ using namespace llvm::support;
+ uint32_t x = endian::readNext<uint32_t, little, unaligned>(d);
+ uint32_t y = endian::readNext<uint32_t, little, unaligned>(d);
+ return PTHFileData(x, y);
+ }
+};
+
+class PTHManager::PTHStringLookupTrait {
+public:
+ typedef uint32_t data_type;
+ typedef const std::pair<const char*, unsigned> external_key_type;
+ typedef external_key_type internal_key_type;
+ typedef uint32_t hash_value_type;
+ typedef unsigned offset_type;
+
+ static bool EqualKey(const internal_key_type& a,
+ const internal_key_type& b) {
+ return (a.second == b.second) ? memcmp(a.first, b.first, a.second) == 0
+ : false;
+ }
+
+ static hash_value_type ComputeHash(const internal_key_type& a) {
+ return llvm::HashString(StringRef(a.first, a.second));
+ }
+
+ // This hopefully will just get inlined and removed by the optimizer.
+ static const internal_key_type&
+ GetInternalKey(const external_key_type& x) { return x; }
+
+ static std::pair<unsigned, unsigned>
+ ReadKeyDataLength(const unsigned char*& d) {
+ using namespace llvm::support;
+ return std::make_pair(
+ (unsigned)endian::readNext<uint16_t, little, unaligned>(d),
+ sizeof(uint32_t));
+ }
+
+ static std::pair<const char*, unsigned>
+ ReadKey(const unsigned char* d, unsigned n) {
+ assert(n >= 2 && d[n-1] == '\0');
+ return std::make_pair((const char*) d, n-1);
+ }
+
+ static uint32_t ReadData(const internal_key_type& k, const unsigned char* d,
+ unsigned) {
+ using namespace llvm::support;
+ return endian::readNext<uint32_t, little, unaligned>(d);
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// PTHManager methods.
+//===----------------------------------------------------------------------===//
+
+PTHManager::PTHManager(
+ std::unique_ptr<const llvm::MemoryBuffer> buf,
+ std::unique_ptr<PTHFileLookup> fileLookup, const unsigned char *idDataTable,
+ std::unique_ptr<IdentifierInfo *[], llvm::FreeDeleter> perIDCache,
+ std::unique_ptr<PTHStringIdLookup> stringIdLookup, unsigned numIds,
+ const unsigned char *spellingBase, const char *originalSourceFile)
+ : Buf(std::move(buf)), PerIDCache(std::move(perIDCache)),
+ FileLookup(std::move(fileLookup)), IdDataTable(idDataTable),
+ StringIdLookup(std::move(stringIdLookup)), NumIds(numIds), PP(nullptr),
+ SpellingBase(spellingBase), OriginalSourceFile(originalSourceFile) {}
+
+PTHManager::~PTHManager() {
+}
+
+static void InvalidPTH(DiagnosticsEngine &Diags, const char *Msg) {
+ Diags.Report(Diags.getCustomDiagID(DiagnosticsEngine::Error, "%0")) << Msg;
+}
+
+PTHManager *PTHManager::Create(StringRef file, DiagnosticsEngine &Diags) {
+ // Memory map the PTH file.
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> FileOrErr =
+ llvm::MemoryBuffer::getFile(file);
+
+ if (!FileOrErr) {
+ // FIXME: Add ec.message() to this diag.
+ Diags.Report(diag::err_invalid_pth_file) << file;
+ return nullptr;
+ }
+ std::unique_ptr<llvm::MemoryBuffer> File = std::move(FileOrErr.get());
+
+ using namespace llvm::support;
+
+ // Get the buffer ranges and check if there are at least three 32-bit
+ // words at the end of the file.
+ const unsigned char *BufBeg = (const unsigned char*)File->getBufferStart();
+ const unsigned char *BufEnd = (const unsigned char*)File->getBufferEnd();
+
+ // Check the prologue of the file.
+ if ((BufEnd - BufBeg) < (signed)(sizeof("cfe-pth") + 4 + 4) ||
+ memcmp(BufBeg, "cfe-pth", sizeof("cfe-pth")) != 0) {
+ Diags.Report(diag::err_invalid_pth_file) << file;
+ return nullptr;
+ }
+
+ // Read the PTH version.
+ const unsigned char *p = BufBeg + (sizeof("cfe-pth"));
+ unsigned Version = endian::readNext<uint32_t, little, aligned>(p);
+
+ if (Version < PTHManager::Version) {
+ InvalidPTH(Diags,
+ Version < PTHManager::Version
+ ? "PTH file uses an older PTH format that is no longer supported"
+ : "PTH file uses a newer PTH format that cannot be read");
+ return nullptr;
+ }
+
+ // Compute the address of the index table at the end of the PTH file.
+ const unsigned char *PrologueOffset = p;
+
+ if (PrologueOffset >= BufEnd) {
+ Diags.Report(diag::err_invalid_pth_file) << file;
+ return nullptr;
+ }
+
+ // Construct the file lookup table. This will be used for mapping from
+ // FileEntry*'s to cached tokens.
+ const unsigned char* FileTableOffset = PrologueOffset + sizeof(uint32_t)*2;
+ const unsigned char *FileTable =
+ BufBeg + endian::readNext<uint32_t, little, aligned>(FileTableOffset);
+
+ if (!(FileTable > BufBeg && FileTable < BufEnd)) {
+ Diags.Report(diag::err_invalid_pth_file) << file;
+ return nullptr; // FIXME: Proper error diagnostic?
+ }
+
+ std::unique_ptr<PTHFileLookup> FL(PTHFileLookup::Create(FileTable, BufBeg));
+
+ // Warn if the PTH file is empty. We still want to create a PTHManager
+ // as the PTH could be used with -include-pth.
+ if (FL->isEmpty())
+ InvalidPTH(Diags, "PTH file contains no cached source data");
+
+ // Get the location of the table mapping from persistent ids to the
+ // data needed to reconstruct identifiers.
+ const unsigned char* IDTableOffset = PrologueOffset + sizeof(uint32_t)*0;
+ const unsigned char *IData =
+ BufBeg + endian::readNext<uint32_t, little, aligned>(IDTableOffset);
+
+ if (!(IData >= BufBeg && IData < BufEnd)) {
+ Diags.Report(diag::err_invalid_pth_file) << file;
+ return nullptr;
+ }
+
+ // Get the location of the hashtable mapping between strings and
+ // persistent IDs.
+ const unsigned char* StringIdTableOffset = PrologueOffset + sizeof(uint32_t)*1;
+ const unsigned char *StringIdTable =
+ BufBeg + endian::readNext<uint32_t, little, aligned>(StringIdTableOffset);
+ if (!(StringIdTable >= BufBeg && StringIdTable < BufEnd)) {
+ Diags.Report(diag::err_invalid_pth_file) << file;
+ return nullptr;
+ }
+
+ std::unique_ptr<PTHStringIdLookup> SL(
+ PTHStringIdLookup::Create(StringIdTable, BufBeg));
+
+ // Get the location of the spelling cache.
+ const unsigned char* spellingBaseOffset = PrologueOffset + sizeof(uint32_t)*3;
+ const unsigned char *spellingBase =
+ BufBeg + endian::readNext<uint32_t, little, aligned>(spellingBaseOffset);
+ if (!(spellingBase >= BufBeg && spellingBase < BufEnd)) {
+ Diags.Report(diag::err_invalid_pth_file) << file;
+ return nullptr;
+ }
+
+ // Get the number of IdentifierInfos and pre-allocate the identifier cache.
+ uint32_t NumIds = endian::readNext<uint32_t, little, aligned>(IData);
+
+ // Pre-allocate the persistent ID -> IdentifierInfo* cache. We use calloc()
+ // so that we in the best case only zero out memory once when the OS returns
+ // us new pages.
+ std::unique_ptr<IdentifierInfo *[], llvm::FreeDeleter> PerIDCache;
+
+ if (NumIds) {
+ PerIDCache.reset((IdentifierInfo **)calloc(NumIds, sizeof(PerIDCache[0])));
+ if (!PerIDCache) {
+ InvalidPTH(Diags, "Could not allocate memory for processing PTH file");
+ return nullptr;
+ }
+ }
+
+ // Compute the address of the original source file.
+ const unsigned char* originalSourceBase = PrologueOffset + sizeof(uint32_t)*4;
+ unsigned len =
+ endian::readNext<uint16_t, little, unaligned>(originalSourceBase);
+ if (!len) originalSourceBase = nullptr;
+
+ // Create the new PTHManager.
+ return new PTHManager(std::move(File), std::move(FL), IData,
+ std::move(PerIDCache), std::move(SL), NumIds,
+ spellingBase, (const char *)originalSourceBase);
+}
+
+IdentifierInfo* PTHManager::LazilyCreateIdentifierInfo(unsigned PersistentID) {
+ using namespace llvm::support;
+ // Look in the PTH file for the string data for the IdentifierInfo object.
+ const unsigned char* TableEntry = IdDataTable + sizeof(uint32_t)*PersistentID;
+ const unsigned char *IDData =
+ (const unsigned char *)Buf->getBufferStart() +
+ endian::readNext<uint32_t, little, aligned>(TableEntry);
+ assert(IDData < (const unsigned char*)Buf->getBufferEnd());
+
+ // Allocate the object.
+ std::pair<IdentifierInfo,const unsigned char*> *Mem =
+ Alloc.Allocate<std::pair<IdentifierInfo,const unsigned char*> >();
+
+ Mem->second = IDData;
+ assert(IDData[0] != '\0');
+ IdentifierInfo *II = new ((void*) Mem) IdentifierInfo();
+
+ // Store the new IdentifierInfo in the cache.
+ PerIDCache[PersistentID] = II;
+ assert(II->getNameStart() && II->getNameStart()[0] != '\0');
+ return II;
+}
+
+IdentifierInfo* PTHManager::get(StringRef Name) {
+ // Double check our assumption that the last character isn't '\0'.
+ assert(Name.empty() || Name.back() != '\0');
+ PTHStringIdLookup::iterator I =
+ StringIdLookup->find(std::make_pair(Name.data(), Name.size()));
+ if (I == StringIdLookup->end()) // No identifier found?
+ return nullptr;
+
+ // Match found. Return the identifier!
+ assert(*I > 0);
+ return GetIdentifierInfo(*I-1);
+}
+
+PTHLexer *PTHManager::CreateLexer(FileID FID) {
+ const FileEntry *FE = PP->getSourceManager().getFileEntryForID(FID);
+ if (!FE)
+ return nullptr;
+
+ using namespace llvm::support;
+
+ // Lookup the FileEntry object in our file lookup data structure. It will
+ // return a variant that indicates whether or not there is an offset within
+ // the PTH file that contains cached tokens.
+ PTHFileLookup::iterator I = FileLookup->find(FE);
+
+ if (I == FileLookup->end()) // No tokens available?
+ return nullptr;
+
+ const PTHFileData& FileData = *I;
+
+ const unsigned char *BufStart = (const unsigned char *)Buf->getBufferStart();
+ // Compute the offset of the token data within the buffer.
+ const unsigned char* data = BufStart + FileData.getTokenOffset();
+
+ // Get the location of pp-conditional table.
+ const unsigned char* ppcond = BufStart + FileData.getPPCondOffset();
+ uint32_t Len = endian::readNext<uint32_t, little, aligned>(ppcond);
+ if (Len == 0) ppcond = nullptr;
+
+ assert(PP && "No preprocessor set yet!");
+ return new PTHLexer(*PP, FID, data, ppcond, *this);
+}
+
+//===----------------------------------------------------------------------===//
+// 'stat' caching.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class PTHStatData {
+public:
+ const bool HasData;
+ uint64_t Size;
+ time_t ModTime;
+ llvm::sys::fs::UniqueID UniqueID;
+ bool IsDirectory;
+
+ PTHStatData(uint64_t Size, time_t ModTime, llvm::sys::fs::UniqueID UniqueID,
+ bool IsDirectory)
+ : HasData(true), Size(Size), ModTime(ModTime), UniqueID(UniqueID),
+ IsDirectory(IsDirectory) {}
+
+ PTHStatData() : HasData(false) {}
+};
+
+class PTHStatLookupTrait : public PTHFileLookupCommonTrait {
+public:
+ typedef const char* external_key_type; // const char*
+ typedef PTHStatData data_type;
+
+ static internal_key_type GetInternalKey(const char *path) {
+ // The key 'kind' doesn't matter here because it is ignored in EqualKey.
+ return std::make_pair((unsigned char) 0x0, path);
+ }
+
+ static bool EqualKey(internal_key_type a, internal_key_type b) {
+ // When doing 'stat' lookups we don't care about the kind of 'a' and 'b',
+ // just the paths.
+ return strcmp(a.second, b.second) == 0;
+ }
+
+ static data_type ReadData(const internal_key_type& k, const unsigned char* d,
+ unsigned) {
+
+ if (k.first /* File or Directory */) {
+ bool IsDirectory = true;
+ if (k.first == 0x1 /* File */) {
+ IsDirectory = false;
+ d += 4 * 2; // Skip the first 2 words.
+ }
+
+ using namespace llvm::support;
+
+ uint64_t File = endian::readNext<uint64_t, little, unaligned>(d);
+ uint64_t Device = endian::readNext<uint64_t, little, unaligned>(d);
+ llvm::sys::fs::UniqueID UniqueID(Device, File);
+ time_t ModTime = endian::readNext<uint64_t, little, unaligned>(d);
+ uint64_t Size = endian::readNext<uint64_t, little, unaligned>(d);
+ return data_type(Size, ModTime, UniqueID, IsDirectory);
+ }
+
+ // Negative stat. Don't read anything.
+ return data_type();
+ }
+};
+} // end anonymous namespace
+
+namespace clang {
+class PTHStatCache : public FileSystemStatCache {
+ typedef llvm::OnDiskChainedHashTable<PTHStatLookupTrait> CacheTy;
+ CacheTy Cache;
+
+public:
+ PTHStatCache(PTHManager::PTHFileLookup &FL)
+ : Cache(FL.getNumBuckets(), FL.getNumEntries(), FL.getBuckets(),
+ FL.getBase()) {}
+
+ LookupResult getStat(const char *Path, FileData &Data, bool isFile,
+ std::unique_ptr<vfs::File> *F,
+ vfs::FileSystem &FS) override {
+ // Do the lookup for the file's data in the PTH file.
+ CacheTy::iterator I = Cache.find(Path);
+
+ // If we don't get a hit in the PTH file just forward to 'stat'.
+ if (I == Cache.end())
+ return statChained(Path, Data, isFile, F, FS);
+
+ const PTHStatData &D = *I;
+
+ if (!D.HasData)
+ return CacheMissing;
+
+ Data.Name = Path;
+ Data.Size = D.Size;
+ Data.ModTime = D.ModTime;
+ Data.UniqueID = D.UniqueID;
+ Data.IsDirectory = D.IsDirectory;
+ Data.IsNamedPipe = false;
+ Data.InPCH = true;
+
+ return CacheExists;
+ }
+};
+}
+
+std::unique_ptr<FileSystemStatCache> PTHManager::createStatCache() {
+ return llvm::make_unique<PTHStatCache>(*FileLookup);
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp b/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp
new file mode 100644
index 0000000..3134790
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp
@@ -0,0 +1,1490 @@
+//===--- Pragma.cpp - Pragma registration and handling --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the PragmaHandler/PragmaTable interfaces and implements
+// pragma related methods of the Preprocessor class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/Pragma.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/LiteralSupport.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/CrashRecoveryContext.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+using namespace clang;
+
+#include "llvm/Support/raw_ostream.h"
+
+// Out-of-line destructor to provide a home for the class.
+PragmaHandler::~PragmaHandler() {
+}
+
+//===----------------------------------------------------------------------===//
+// EmptyPragmaHandler Implementation.
+//===----------------------------------------------------------------------===//
+
+EmptyPragmaHandler::EmptyPragmaHandler(StringRef Name) : PragmaHandler(Name) {}
+
+void EmptyPragmaHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &FirstToken) {}
+
+//===----------------------------------------------------------------------===//
+// PragmaNamespace Implementation.
+//===----------------------------------------------------------------------===//
+
+PragmaNamespace::~PragmaNamespace() {
+ llvm::DeleteContainerSeconds(Handlers);
+}
+
+/// FindHandler - Check to see if there is already a handler for the
+/// specified name. If not, return the handler for the null identifier if it
+/// exists, otherwise return null. If IgnoreNull is true (the default) then
+/// the null handler isn't returned on failure to match.
+PragmaHandler *PragmaNamespace::FindHandler(StringRef Name,
+ bool IgnoreNull) const {
+ if (PragmaHandler *Handler = Handlers.lookup(Name))
+ return Handler;
+ return IgnoreNull ? nullptr : Handlers.lookup(StringRef());
+}
+
+void PragmaNamespace::AddPragma(PragmaHandler *Handler) {
+ assert(!Handlers.lookup(Handler->getName()) &&
+ "A handler with this name is already registered in this namespace");
+ Handlers[Handler->getName()] = Handler;
+}
+
+void PragmaNamespace::RemovePragmaHandler(PragmaHandler *Handler) {
+ assert(Handlers.lookup(Handler->getName()) &&
+ "Handler not registered in this namespace");
+ Handlers.erase(Handler->getName());
+}
+
+void PragmaNamespace::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &Tok) {
+ // Read the 'namespace' that the directive is in, e.g. STDC. Do not macro
+ // expand it, the user can have a STDC #define, that should not affect this.
+ PP.LexUnexpandedToken(Tok);
+
+ // Get the handler for this token. If there is no handler, ignore the pragma.
+ PragmaHandler *Handler
+ = FindHandler(Tok.getIdentifierInfo() ? Tok.getIdentifierInfo()->getName()
+ : StringRef(),
+ /*IgnoreNull=*/false);
+ if (!Handler) {
+ PP.Diag(Tok, diag::warn_pragma_ignored);
+ return;
+ }
+
+ // Otherwise, pass it down.
+ Handler->HandlePragma(PP, Introducer, Tok);
+}
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Pragma Directive Handling.
+//===----------------------------------------------------------------------===//
+
+/// HandlePragmaDirective - The "\#pragma" directive has been parsed. Lex the
+/// rest of the pragma, passing it to the registered pragma handlers.
+void Preprocessor::HandlePragmaDirective(SourceLocation IntroducerLoc,
+ PragmaIntroducerKind Introducer) {
+ if (Callbacks)
+ Callbacks->PragmaDirective(IntroducerLoc, Introducer);
+
+ if (!PragmasEnabled)
+ return;
+
+ ++NumPragma;
+
+ // Invoke the first level of pragma handlers which reads the namespace id.
+ Token Tok;
+ PragmaHandlers->HandlePragma(*this, Introducer, Tok);
+
+ // If the pragma handler didn't read the rest of the line, consume it now.
+ if ((CurTokenLexer && CurTokenLexer->isParsingPreprocessorDirective())
+ || (CurPPLexer && CurPPLexer->ParsingPreprocessorDirective))
+ DiscardUntilEndOfDirective();
+}
+
+namespace {
+/// \brief Helper class for \see Preprocessor::Handle_Pragma.
+class LexingFor_PragmaRAII {
+ Preprocessor &PP;
+ bool InMacroArgPreExpansion;
+ bool Failed;
+ Token &OutTok;
+ Token PragmaTok;
+
+public:
+ LexingFor_PragmaRAII(Preprocessor &PP, bool InMacroArgPreExpansion,
+ Token &Tok)
+ : PP(PP), InMacroArgPreExpansion(InMacroArgPreExpansion),
+ Failed(false), OutTok(Tok) {
+ if (InMacroArgPreExpansion) {
+ PragmaTok = OutTok;
+ PP.EnableBacktrackAtThisPos();
+ }
+ }
+
+ ~LexingFor_PragmaRAII() {
+ if (InMacroArgPreExpansion) {
+ if (Failed) {
+ PP.CommitBacktrackedTokens();
+ } else {
+ PP.Backtrack();
+ OutTok = PragmaTok;
+ }
+ }
+ }
+
+ void failed() {
+ Failed = true;
+ }
+};
+}
+
+/// Handle_Pragma - Read a _Pragma directive, slice it up, process it, then
+/// return the first token after the directive. The _Pragma token has just
+/// been read into 'Tok'.
+void Preprocessor::Handle_Pragma(Token &Tok) {
+
+ // This works differently if we are pre-expanding a macro argument.
+ // In that case we don't actually "activate" the pragma now, we only lex it
+ // until we are sure it is lexically correct and then we backtrack so that
+ // we activate the pragma whenever we encounter the tokens again in the token
+ // stream. This ensures that we will activate it in the correct location
+ // or that we will ignore it if it never enters the token stream, e.g:
+ //
+ // #define EMPTY(x)
+ // #define INACTIVE(x) EMPTY(x)
+ // INACTIVE(_Pragma("clang diagnostic ignored \"-Wconversion\""))
+
+ LexingFor_PragmaRAII _PragmaLexing(*this, InMacroArgPreExpansion, Tok);
+
+ // Remember the pragma token location.
+ SourceLocation PragmaLoc = Tok.getLocation();
+
+ // Read the '('.
+ Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(PragmaLoc, diag::err__Pragma_malformed);
+ return _PragmaLexing.failed();
+ }
+
+ // Read the '"..."'.
+ Lex(Tok);
+ if (!tok::isStringLiteral(Tok.getKind())) {
+ Diag(PragmaLoc, diag::err__Pragma_malformed);
+ // Skip bad tokens, and the ')', if present.
+ if (Tok.isNot(tok::r_paren) && Tok.isNot(tok::eof))
+ Lex(Tok);
+ while (Tok.isNot(tok::r_paren) &&
+ !Tok.isAtStartOfLine() &&
+ Tok.isNot(tok::eof))
+ Lex(Tok);
+ if (Tok.is(tok::r_paren))
+ Lex(Tok);
+ return _PragmaLexing.failed();
+ }
+
+ if (Tok.hasUDSuffix()) {
+ Diag(Tok, diag::err_invalid_string_udl);
+ // Skip this token, and the ')', if present.
+ Lex(Tok);
+ if (Tok.is(tok::r_paren))
+ Lex(Tok);
+ return _PragmaLexing.failed();
+ }
+
+ // Remember the string.
+ Token StrTok = Tok;
+
+ // Read the ')'.
+ Lex(Tok);
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(PragmaLoc, diag::err__Pragma_malformed);
+ return _PragmaLexing.failed();
+ }
+
+ if (InMacroArgPreExpansion)
+ return;
+
+ SourceLocation RParenLoc = Tok.getLocation();
+ std::string StrVal = getSpelling(StrTok);
+
+ // The _Pragma is lexically sound. Destringize according to C11 6.10.9.1:
+ // "The string literal is destringized by deleting any encoding prefix,
+ // deleting the leading and trailing double-quotes, replacing each escape
+ // sequence \" by a double-quote, and replacing each escape sequence \\ by a
+ // single backslash."
+ if (StrVal[0] == 'L' || StrVal[0] == 'U' ||
+ (StrVal[0] == 'u' && StrVal[1] != '8'))
+ StrVal.erase(StrVal.begin());
+ else if (StrVal[0] == 'u')
+ StrVal.erase(StrVal.begin(), StrVal.begin() + 2);
+
+ if (StrVal[0] == 'R') {
+ // FIXME: C++11 does not specify how to handle raw-string-literals here.
+ // We strip off the 'R', the quotes, the d-char-sequences, and the parens.
+ assert(StrVal[1] == '"' && StrVal[StrVal.size() - 1] == '"' &&
+ "Invalid raw string token!");
+
+ // Measure the length of the d-char-sequence.
+ unsigned NumDChars = 0;
+ while (StrVal[2 + NumDChars] != '(') {
+ assert(NumDChars < (StrVal.size() - 5) / 2 &&
+ "Invalid raw string token!");
+ ++NumDChars;
+ }
+ assert(StrVal[StrVal.size() - 2 - NumDChars] == ')');
+
+ // Remove 'R " d-char-sequence' and 'd-char-sequence "'. We'll replace the
+ // parens below.
+ StrVal.erase(0, 2 + NumDChars);
+ StrVal.erase(StrVal.size() - 1 - NumDChars);
+ } else {
+ assert(StrVal[0] == '"' && StrVal[StrVal.size()-1] == '"' &&
+ "Invalid string token!");
+
+ // Remove escaped quotes and escapes.
+ unsigned ResultPos = 1;
+ for (unsigned i = 1, e = StrVal.size() - 1; i != e; ++i) {
+ // Skip escapes. \\ -> '\' and \" -> '"'.
+ if (StrVal[i] == '\\' && i + 1 < e &&
+ (StrVal[i + 1] == '\\' || StrVal[i + 1] == '"'))
+ ++i;
+ StrVal[ResultPos++] = StrVal[i];
+ }
+ StrVal.erase(StrVal.begin() + ResultPos, StrVal.end() - 1);
+ }
+
+ // Remove the front quote, replacing it with a space, so that the pragma
+ // contents appear to have a space before them.
+ StrVal[0] = ' ';
+
+ // Replace the terminating quote with a \n.
+ StrVal[StrVal.size()-1] = '\n';
+
+ // Plop the string (including the newline and trailing null) into a buffer
+ // where we can lex it.
+ Token TmpTok;
+ TmpTok.startToken();
+ CreateString(StrVal, TmpTok);
+ SourceLocation TokLoc = TmpTok.getLocation();
+
+ // Make and enter a lexer object so that we lex and expand the tokens just
+ // like any others.
+ Lexer *TL = Lexer::Create_PragmaLexer(TokLoc, PragmaLoc, RParenLoc,
+ StrVal.size(), *this);
+
+ EnterSourceFileWithLexer(TL, nullptr);
+
+ // With everything set up, lex this as a #pragma directive.
+ HandlePragmaDirective(PragmaLoc, PIK__Pragma);
+
+ // Finally, return whatever came after the pragma directive.
+ return Lex(Tok);
+}
+
+/// HandleMicrosoft__pragma - Like Handle_Pragma except the pragma text
+/// is not enclosed within a string literal.
+void Preprocessor::HandleMicrosoft__pragma(Token &Tok) {
+ // Remember the pragma token location.
+ SourceLocation PragmaLoc = Tok.getLocation();
+
+ // Read the '('.
+ Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(PragmaLoc, diag::err__Pragma_malformed);
+ return;
+ }
+
+ // Get the tokens enclosed within the __pragma(), as well as the final ')'.
+ SmallVector<Token, 32> PragmaToks;
+ int NumParens = 0;
+ Lex(Tok);
+ while (Tok.isNot(tok::eof)) {
+ PragmaToks.push_back(Tok);
+ if (Tok.is(tok::l_paren))
+ NumParens++;
+ else if (Tok.is(tok::r_paren) && NumParens-- == 0)
+ break;
+ Lex(Tok);
+ }
+
+ if (Tok.is(tok::eof)) {
+ Diag(PragmaLoc, diag::err_unterminated___pragma);
+ return;
+ }
+
+ PragmaToks.front().setFlag(Token::LeadingSpace);
+
+ // Replace the ')' with an EOD to mark the end of the pragma.
+ PragmaToks.back().setKind(tok::eod);
+
+ Token *TokArray = new Token[PragmaToks.size()];
+ std::copy(PragmaToks.begin(), PragmaToks.end(), TokArray);
+
+ // Push the tokens onto the stack.
+ EnterTokenStream(TokArray, PragmaToks.size(), true, true);
+
+ // With everything set up, lex this as a #pragma directive.
+ HandlePragmaDirective(PragmaLoc, PIK___pragma);
+
+ // Finally, return whatever came after the pragma directive.
+ return Lex(Tok);
+}
+
+/// HandlePragmaOnce - Handle \#pragma once. OnceTok is the 'once'.
+///
+void Preprocessor::HandlePragmaOnce(Token &OnceTok) {
+ if (isInPrimaryFile()) {
+ Diag(OnceTok, diag::pp_pragma_once_in_main_file);
+ return;
+ }
+
+ // Get the current file lexer we're looking at. Ignore _Pragma 'files' etc.
+ // Mark the file as a once-only file now.
+ HeaderInfo.MarkFileIncludeOnce(getCurrentFileLexer()->getFileEntry());
+}
+
+void Preprocessor::HandlePragmaMark() {
+ assert(CurPPLexer && "No current lexer?");
+ if (CurLexer)
+ CurLexer->ReadToEndOfLine();
+ else
+ CurPTHLexer->DiscardToEndOfLine();
+}
+
+
+/// HandlePragmaPoison - Handle \#pragma GCC poison. PoisonTok is the 'poison'.
+///
+void Preprocessor::HandlePragmaPoison(Token &PoisonTok) {
+ Token Tok;
+
+ while (1) {
+ // Read the next token to poison. While doing this, pretend that we are
+ // skipping while reading the identifier to poison.
+ // This avoids errors on code like:
+ // #pragma GCC poison X
+ // #pragma GCC poison X
+ if (CurPPLexer) CurPPLexer->LexingRawMode = true;
+ LexUnexpandedToken(Tok);
+ if (CurPPLexer) CurPPLexer->LexingRawMode = false;
+
+ // If we reached the end of line, we're done.
+ if (Tok.is(tok::eod)) return;
+
+ // Can only poison identifiers.
+ if (Tok.isNot(tok::raw_identifier)) {
+ Diag(Tok, diag::err_pp_invalid_poison);
+ return;
+ }
+
+ // Look up the identifier info for the token. We disabled identifier lookup
+ // by saying we're skipping contents, so we need to do this manually.
+ IdentifierInfo *II = LookUpIdentifierInfo(Tok);
+
+ // Already poisoned.
+ if (II->isPoisoned()) continue;
+
+ // If this is a macro identifier, emit a warning.
+ if (isMacroDefined(II))
+ Diag(Tok, diag::pp_poisoning_existing_macro);
+
+ // Finally, poison it!
+ II->setIsPoisoned();
+ if (II->isFromAST())
+ II->setChangedSinceDeserialization();
+ }
+}
+
+/// HandlePragmaSystemHeader - Implement \#pragma GCC system_header. We know
+/// that the whole directive has been parsed.
+void Preprocessor::HandlePragmaSystemHeader(Token &SysHeaderTok) {
+ if (isInPrimaryFile()) {
+ Diag(SysHeaderTok, diag::pp_pragma_sysheader_in_main_file);
+ return;
+ }
+
+ // Get the current file lexer we're looking at. Ignore _Pragma 'files' etc.
+ PreprocessorLexer *TheLexer = getCurrentFileLexer();
+
+ // Mark the file as a system header.
+ HeaderInfo.MarkFileSystemHeader(TheLexer->getFileEntry());
+
+
+ PresumedLoc PLoc = SourceMgr.getPresumedLoc(SysHeaderTok.getLocation());
+ if (PLoc.isInvalid())
+ return;
+
+ unsigned FilenameID = SourceMgr.getLineTableFilenameID(PLoc.getFilename());
+
+ // Notify the client, if desired, that we are in a new source file.
+ if (Callbacks)
+ Callbacks->FileChanged(SysHeaderTok.getLocation(),
+ PPCallbacks::SystemHeaderPragma, SrcMgr::C_System);
+
+ // Emit a line marker. This will change any source locations from this point
+ // forward to realize they are in a system header.
+ // Create a line note with this information.
+ SourceMgr.AddLineNote(SysHeaderTok.getLocation(), PLoc.getLine()+1,
+ FilenameID, /*IsEntry=*/false, /*IsExit=*/false,
+ /*IsSystem=*/true, /*IsExternC=*/false);
+}
+
+/// HandlePragmaDependency - Handle \#pragma GCC dependency "foo" blah.
+///
+void Preprocessor::HandlePragmaDependency(Token &DependencyTok) {
+ Token FilenameTok;
+ CurPPLexer->LexIncludeFilename(FilenameTok);
+
+ // If the token kind is EOD, the error has already been diagnosed.
+ if (FilenameTok.is(tok::eod))
+ return;
+
+ // Reserve a buffer to get the spelling.
+ SmallString<128> FilenameBuffer;
+ bool Invalid = false;
+ StringRef Filename = getSpelling(FilenameTok, FilenameBuffer, &Invalid);
+ if (Invalid)
+ return;
+
+ bool isAngled =
+ GetIncludeFilenameSpelling(FilenameTok.getLocation(), Filename);
+ // If GetIncludeFilenameSpelling set the start ptr to null, there was an
+ // error.
+ if (Filename.empty())
+ return;
+
+ // Search include directories for this file.
+ const DirectoryLookup *CurDir;
+ const FileEntry *File =
+ LookupFile(FilenameTok.getLocation(), Filename, isAngled, nullptr,
+ nullptr, CurDir, nullptr, nullptr, nullptr);
+ if (!File) {
+ if (!SuppressIncludeNotFoundError)
+ Diag(FilenameTok, diag::err_pp_file_not_found) << Filename;
+ return;
+ }
+
+ const FileEntry *CurFile = getCurrentFileLexer()->getFileEntry();
+
+ // If this file is older than the file it depends on, emit a diagnostic.
+ if (CurFile && CurFile->getModificationTime() < File->getModificationTime()) {
+ // Lex tokens at the end of the message and include them in the message.
+ std::string Message;
+ Lex(DependencyTok);
+ while (DependencyTok.isNot(tok::eod)) {
+ Message += getSpelling(DependencyTok) + " ";
+ Lex(DependencyTok);
+ }
+
+ // Remove the trailing ' ' if present.
+ if (!Message.empty())
+ Message.erase(Message.end()-1);
+ Diag(FilenameTok, diag::pp_out_of_date_dependency) << Message;
+ }
+}
+
+/// ParsePragmaPushOrPopMacro - Handle parsing of pragma push_macro/pop_macro.
+/// Return the IdentifierInfo* associated with the macro to push or pop.
+IdentifierInfo *Preprocessor::ParsePragmaPushOrPopMacro(Token &Tok) {
+ // Remember the pragma token location.
+ Token PragmaTok = Tok;
+
+ // Read the '('.
+ Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(PragmaTok.getLocation(), diag::err_pragma_push_pop_macro_malformed)
+ << getSpelling(PragmaTok);
+ return nullptr;
+ }
+
+ // Read the macro name string.
+ Lex(Tok);
+ if (Tok.isNot(tok::string_literal)) {
+ Diag(PragmaTok.getLocation(), diag::err_pragma_push_pop_macro_malformed)
+ << getSpelling(PragmaTok);
+ return nullptr;
+ }
+
+ if (Tok.hasUDSuffix()) {
+ Diag(Tok, diag::err_invalid_string_udl);
+ return nullptr;
+ }
+
+ // Remember the macro string.
+ std::string StrVal = getSpelling(Tok);
+
+ // Read the ')'.
+ Lex(Tok);
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(PragmaTok.getLocation(), diag::err_pragma_push_pop_macro_malformed)
+ << getSpelling(PragmaTok);
+ return nullptr;
+ }
+
+ assert(StrVal[0] == '"' && StrVal[StrVal.size()-1] == '"' &&
+ "Invalid string token!");
+
+ // Create a Token from the string.
+ Token MacroTok;
+ MacroTok.startToken();
+ MacroTok.setKind(tok::raw_identifier);
+ CreateString(StringRef(&StrVal[1], StrVal.size() - 2), MacroTok);
+
+ // Get the IdentifierInfo of MacroToPushTok.
+ return LookUpIdentifierInfo(MacroTok);
+}
+
+/// \brief Handle \#pragma push_macro.
+///
+/// The syntax is:
+/// \code
+/// #pragma push_macro("macro")
+/// \endcode
+void Preprocessor::HandlePragmaPushMacro(Token &PushMacroTok) {
+ // Parse the pragma directive and get the macro IdentifierInfo*.
+ IdentifierInfo *IdentInfo = ParsePragmaPushOrPopMacro(PushMacroTok);
+ if (!IdentInfo) return;
+
+ // Get the MacroInfo associated with IdentInfo.
+ MacroInfo *MI = getMacroInfo(IdentInfo);
+
+ if (MI) {
+ // Allow the original MacroInfo to be redefined later.
+ MI->setIsAllowRedefinitionsWithoutWarning(true);
+ }
+
+ // Push the cloned MacroInfo so we can retrieve it later.
+ PragmaPushMacroInfo[IdentInfo].push_back(MI);
+}
+
+/// \brief Handle \#pragma pop_macro.
+///
+/// The syntax is:
+/// \code
+/// #pragma pop_macro("macro")
+/// \endcode
+void Preprocessor::HandlePragmaPopMacro(Token &PopMacroTok) {
+ SourceLocation MessageLoc = PopMacroTok.getLocation();
+
+ // Parse the pragma directive and get the macro IdentifierInfo*.
+ IdentifierInfo *IdentInfo = ParsePragmaPushOrPopMacro(PopMacroTok);
+ if (!IdentInfo) return;
+
+ // Find the vector<MacroInfo*> associated with the macro.
+ llvm::DenseMap<IdentifierInfo*, std::vector<MacroInfo*> >::iterator iter =
+ PragmaPushMacroInfo.find(IdentInfo);
+ if (iter != PragmaPushMacroInfo.end()) {
+ // Forget the MacroInfo currently associated with IdentInfo.
+ if (MacroInfo *MI = getMacroInfo(IdentInfo)) {
+ if (MI->isWarnIfUnused())
+ WarnUnusedMacroLocs.erase(MI->getDefinitionLoc());
+ appendMacroDirective(IdentInfo, AllocateUndefMacroDirective(MessageLoc));
+ }
+
+ // Get the MacroInfo we want to reinstall.
+ MacroInfo *MacroToReInstall = iter->second.back();
+
+ if (MacroToReInstall)
+ // Reinstall the previously pushed macro.
+ appendDefMacroDirective(IdentInfo, MacroToReInstall, MessageLoc);
+
+ // Pop PragmaPushMacroInfo stack.
+ iter->second.pop_back();
+ if (iter->second.size() == 0)
+ PragmaPushMacroInfo.erase(iter);
+ } else {
+ Diag(MessageLoc, diag::warn_pragma_pop_macro_no_push)
+ << IdentInfo->getName();
+ }
+}
+
+void Preprocessor::HandlePragmaIncludeAlias(Token &Tok) {
+ // We will either get a quoted filename or a bracketed filename, and we
+ // have to track which we got. The first filename is the source name,
+ // and the second name is the mapped filename. If the first is quoted,
+ // the second must be as well (cannot mix and match quotes and brackets).
+
+ // Get the open paren
+ Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::warn_pragma_include_alias_expected) << "(";
+ return;
+ }
+
+ // We expect either a quoted string literal, or a bracketed name
+ Token SourceFilenameTok;
+ CurPPLexer->LexIncludeFilename(SourceFilenameTok);
+ if (SourceFilenameTok.is(tok::eod)) {
+ // The diagnostic has already been handled
+ return;
+ }
+
+ StringRef SourceFileName;
+ SmallString<128> FileNameBuffer;
+ if (SourceFilenameTok.is(tok::string_literal) ||
+ SourceFilenameTok.is(tok::angle_string_literal)) {
+ SourceFileName = getSpelling(SourceFilenameTok, FileNameBuffer);
+ } else if (SourceFilenameTok.is(tok::less)) {
+ // This could be a path instead of just a name
+ FileNameBuffer.push_back('<');
+ SourceLocation End;
+ if (ConcatenateIncludeName(FileNameBuffer, End))
+ return; // Diagnostic already emitted
+ SourceFileName = FileNameBuffer;
+ } else {
+ Diag(Tok, diag::warn_pragma_include_alias_expected_filename);
+ return;
+ }
+ FileNameBuffer.clear();
+
+ // Now we expect a comma, followed by another include name
+ Lex(Tok);
+ if (Tok.isNot(tok::comma)) {
+ Diag(Tok, diag::warn_pragma_include_alias_expected) << ",";
+ return;
+ }
+
+ Token ReplaceFilenameTok;
+ CurPPLexer->LexIncludeFilename(ReplaceFilenameTok);
+ if (ReplaceFilenameTok.is(tok::eod)) {
+ // The diagnostic has already been handled
+ return;
+ }
+
+ StringRef ReplaceFileName;
+ if (ReplaceFilenameTok.is(tok::string_literal) ||
+ ReplaceFilenameTok.is(tok::angle_string_literal)) {
+ ReplaceFileName = getSpelling(ReplaceFilenameTok, FileNameBuffer);
+ } else if (ReplaceFilenameTok.is(tok::less)) {
+ // This could be a path instead of just a name
+ FileNameBuffer.push_back('<');
+ SourceLocation End;
+ if (ConcatenateIncludeName(FileNameBuffer, End))
+ return; // Diagnostic already emitted
+ ReplaceFileName = FileNameBuffer;
+ } else {
+ Diag(Tok, diag::warn_pragma_include_alias_expected_filename);
+ return;
+ }
+
+ // Finally, we expect the closing paren
+ Lex(Tok);
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::warn_pragma_include_alias_expected) << ")";
+ return;
+ }
+
+ // Now that we have the source and target filenames, we need to make sure
+ // they're both of the same type (angled vs non-angled)
+ StringRef OriginalSource = SourceFileName;
+
+ bool SourceIsAngled =
+ GetIncludeFilenameSpelling(SourceFilenameTok.getLocation(),
+ SourceFileName);
+ bool ReplaceIsAngled =
+ GetIncludeFilenameSpelling(ReplaceFilenameTok.getLocation(),
+ ReplaceFileName);
+ if (!SourceFileName.empty() && !ReplaceFileName.empty() &&
+ (SourceIsAngled != ReplaceIsAngled)) {
+ unsigned int DiagID;
+ if (SourceIsAngled)
+ DiagID = diag::warn_pragma_include_alias_mismatch_angle;
+ else
+ DiagID = diag::warn_pragma_include_alias_mismatch_quote;
+
+ Diag(SourceFilenameTok.getLocation(), DiagID)
+ << SourceFileName
+ << ReplaceFileName;
+
+ return;
+ }
+
+ // Now we can let the include handler know about this mapping
+ getHeaderSearchInfo().AddIncludeAlias(OriginalSource, ReplaceFileName);
+}
+
+/// AddPragmaHandler - Add the specified pragma handler to the preprocessor.
+/// If 'Namespace' is non-null, then it is a token required to exist on the
+/// pragma line before the pragma string starts, e.g. "STDC" or "GCC".
+void Preprocessor::AddPragmaHandler(StringRef Namespace,
+ PragmaHandler *Handler) {
+ PragmaNamespace *InsertNS = PragmaHandlers.get();
+
+ // If this is specified to be in a namespace, step down into it.
+ if (!Namespace.empty()) {
+ // If there is already a pragma handler with the name of this namespace,
+ // we either have an error (directive with the same name as a namespace) or
+ // we already have the namespace to insert into.
+ if (PragmaHandler *Existing = PragmaHandlers->FindHandler(Namespace)) {
+ InsertNS = Existing->getIfNamespace();
+ assert(InsertNS != nullptr && "Cannot have a pragma namespace and pragma"
+ " handler with the same name!");
+ } else {
+ // Otherwise, this namespace doesn't exist yet, create and insert the
+ // handler for it.
+ InsertNS = new PragmaNamespace(Namespace);
+ PragmaHandlers->AddPragma(InsertNS);
+ }
+ }
+
+ // Check to make sure we don't already have a pragma for this identifier.
+ assert(!InsertNS->FindHandler(Handler->getName()) &&
+ "Pragma handler already exists for this identifier!");
+ InsertNS->AddPragma(Handler);
+}
+
+/// RemovePragmaHandler - Remove the specific pragma handler from the
+/// preprocessor. If \arg Namespace is non-null, then it should be the
+/// namespace that \arg Handler was added to. It is an error to remove
+/// a handler that has not been registered.
+void Preprocessor::RemovePragmaHandler(StringRef Namespace,
+ PragmaHandler *Handler) {
+ PragmaNamespace *NS = PragmaHandlers.get();
+
+ // If this is specified to be in a namespace, step down into it.
+ if (!Namespace.empty()) {
+ PragmaHandler *Existing = PragmaHandlers->FindHandler(Namespace);
+ assert(Existing && "Namespace containing handler does not exist!");
+
+ NS = Existing->getIfNamespace();
+ assert(NS && "Invalid namespace, registered as a regular pragma handler!");
+ }
+
+ NS->RemovePragmaHandler(Handler);
+
+ // If this is a non-default namespace and it is now empty, remove it.
+ if (NS != PragmaHandlers.get() && NS->IsEmpty()) {
+ PragmaHandlers->RemovePragmaHandler(NS);
+ delete NS;
+ }
+}
+
+bool Preprocessor::LexOnOffSwitch(tok::OnOffSwitch &Result) {
+ Token Tok;
+ LexUnexpandedToken(Tok);
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::ext_on_off_switch_syntax);
+ return true;
+ }
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ if (II->isStr("ON"))
+ Result = tok::OOS_ON;
+ else if (II->isStr("OFF"))
+ Result = tok::OOS_OFF;
+ else if (II->isStr("DEFAULT"))
+ Result = tok::OOS_DEFAULT;
+ else {
+ Diag(Tok, diag::ext_on_off_switch_syntax);
+ return true;
+ }
+
+ // Verify that this is followed by EOD.
+ LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::eod))
+ Diag(Tok, diag::ext_pragma_syntax_eod);
+ return false;
+}
+
+namespace {
+/// PragmaOnceHandler - "\#pragma once" marks the file as atomically included.
+struct PragmaOnceHandler : public PragmaHandler {
+ PragmaOnceHandler() : PragmaHandler("once") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &OnceTok) override {
+ PP.CheckEndOfDirective("pragma once");
+ PP.HandlePragmaOnce(OnceTok);
+ }
+};
+
+/// PragmaMarkHandler - "\#pragma mark ..." is ignored by the compiler, and the
+/// rest of the line is not lexed.
+struct PragmaMarkHandler : public PragmaHandler {
+ PragmaMarkHandler() : PragmaHandler("mark") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &MarkTok) override {
+ PP.HandlePragmaMark();
+ }
+};
+
+/// PragmaPoisonHandler - "\#pragma poison x" marks x as not usable.
+struct PragmaPoisonHandler : public PragmaHandler {
+ PragmaPoisonHandler() : PragmaHandler("poison") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &PoisonTok) override {
+ PP.HandlePragmaPoison(PoisonTok);
+ }
+};
+
+/// PragmaSystemHeaderHandler - "\#pragma system_header" marks the current file
+/// as a system header, which silences warnings in it.
+struct PragmaSystemHeaderHandler : public PragmaHandler {
+ PragmaSystemHeaderHandler() : PragmaHandler("system_header") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &SHToken) override {
+ PP.HandlePragmaSystemHeader(SHToken);
+ PP.CheckEndOfDirective("pragma");
+ }
+};
+struct PragmaDependencyHandler : public PragmaHandler {
+ PragmaDependencyHandler() : PragmaHandler("dependency") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &DepToken) override {
+ PP.HandlePragmaDependency(DepToken);
+ }
+};
+
+struct PragmaDebugHandler : public PragmaHandler {
+ PragmaDebugHandler() : PragmaHandler("__debug") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &DepToken) override {
+ Token Tok;
+ PP.LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok, diag::warn_pragma_diagnostic_invalid);
+ return;
+ }
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+
+ if (II->isStr("assert")) {
+ llvm_unreachable("This is an assertion!");
+ } else if (II->isStr("crash")) {
+ LLVM_BUILTIN_TRAP;
+ } else if (II->isStr("parser_crash")) {
+ Token Crasher;
+ Crasher.startToken();
+ Crasher.setKind(tok::annot_pragma_parser_crash);
+ Crasher.setAnnotationRange(SourceRange(Tok.getLocation()));
+ PP.EnterToken(Crasher);
+ } else if (II->isStr("llvm_fatal_error")) {
+ llvm::report_fatal_error("#pragma clang __debug llvm_fatal_error");
+ } else if (II->isStr("llvm_unreachable")) {
+ llvm_unreachable("#pragma clang __debug llvm_unreachable");
+ } else if (II->isStr("macro")) {
+ Token MacroName;
+ PP.LexUnexpandedToken(MacroName);
+ auto *MacroII = MacroName.getIdentifierInfo();
+ if (MacroII)
+ PP.dumpMacroInfo(MacroII);
+ else
+ PP.Diag(MacroName, diag::warn_pragma_diagnostic_invalid);
+ } else if (II->isStr("overflow_stack")) {
+ DebugOverflowStack();
+ } else if (II->isStr("handle_crash")) {
+ llvm::CrashRecoveryContext *CRC =llvm::CrashRecoveryContext::GetCurrent();
+ if (CRC)
+ CRC->HandleCrash();
+ } else if (II->isStr("captured")) {
+ HandleCaptured(PP);
+ } else {
+ PP.Diag(Tok, diag::warn_pragma_debug_unexpected_command)
+ << II->getName();
+ }
+
+ PPCallbacks *Callbacks = PP.getPPCallbacks();
+ if (Callbacks)
+ Callbacks->PragmaDebug(Tok.getLocation(), II->getName());
+ }
+
+ void HandleCaptured(Preprocessor &PP) {
+ // Skip if emitting preprocessed output.
+ if (PP.isPreprocessedOutput())
+ return;
+
+ Token Tok;
+ PP.LexUnexpandedToken(Tok);
+
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok, diag::ext_pp_extra_tokens_at_eol)
+ << "pragma clang __debug captured";
+ return;
+ }
+
+ SourceLocation NameLoc = Tok.getLocation();
+ Token *Toks = PP.getPreprocessorAllocator().Allocate<Token>(1);
+ Toks->startToken();
+ Toks->setKind(tok::annot_pragma_captured);
+ Toks->setLocation(NameLoc);
+
+ PP.EnterTokenStream(Toks, 1, /*DisableMacroExpansion=*/true,
+ /*OwnsTokens=*/false);
+ }
+
+// Disable MSVC warning about runtime stack overflow.
+#ifdef _MSC_VER
+ #pragma warning(disable : 4717)
+#endif
+ static void DebugOverflowStack() {
+ void (*volatile Self)() = DebugOverflowStack;
+ Self();
+ }
+#ifdef _MSC_VER
+ #pragma warning(default : 4717)
+#endif
+
+};
+
+/// PragmaDiagnosticHandler - e.g. '\#pragma GCC diagnostic ignored "-Wformat"'
+struct PragmaDiagnosticHandler : public PragmaHandler {
+private:
+ const char *Namespace;
+public:
+ explicit PragmaDiagnosticHandler(const char *NS) :
+ PragmaHandler("diagnostic"), Namespace(NS) {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &DiagToken) override {
+ SourceLocation DiagLoc = DiagToken.getLocation();
+ Token Tok;
+ PP.LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok, diag::warn_pragma_diagnostic_invalid);
+ return;
+ }
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ PPCallbacks *Callbacks = PP.getPPCallbacks();
+
+ if (II->isStr("pop")) {
+ if (!PP.getDiagnostics().popMappings(DiagLoc))
+ PP.Diag(Tok, diag::warn_pragma_diagnostic_cannot_pop);
+ else if (Callbacks)
+ Callbacks->PragmaDiagnosticPop(DiagLoc, Namespace);
+ return;
+ } else if (II->isStr("push")) {
+ PP.getDiagnostics().pushMappings(DiagLoc);
+ if (Callbacks)
+ Callbacks->PragmaDiagnosticPush(DiagLoc, Namespace);
+ return;
+ }
+
+ diag::Severity SV = llvm::StringSwitch<diag::Severity>(II->getName())
+ .Case("ignored", diag::Severity::Ignored)
+ .Case("warning", diag::Severity::Warning)
+ .Case("error", diag::Severity::Error)
+ .Case("fatal", diag::Severity::Fatal)
+ .Default(diag::Severity());
+
+ if (SV == diag::Severity()) {
+ PP.Diag(Tok, diag::warn_pragma_diagnostic_invalid);
+ return;
+ }
+
+ PP.LexUnexpandedToken(Tok);
+ SourceLocation StringLoc = Tok.getLocation();
+
+ std::string WarningName;
+ if (!PP.FinishLexStringLiteral(Tok, WarningName, "pragma diagnostic",
+ /*MacroExpansion=*/false))
+ return;
+
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_diagnostic_invalid_token);
+ return;
+ }
+
+ if (WarningName.size() < 3 || WarningName[0] != '-' ||
+ (WarningName[1] != 'W' && WarningName[1] != 'R')) {
+ PP.Diag(StringLoc, diag::warn_pragma_diagnostic_invalid_option);
+ return;
+ }
+
+ if (PP.getDiagnostics().setSeverityForGroup(
+ WarningName[1] == 'W' ? diag::Flavor::WarningOrError
+ : diag::Flavor::Remark,
+ WarningName.substr(2), SV, DiagLoc))
+ PP.Diag(StringLoc, diag::warn_pragma_diagnostic_unknown_warning)
+ << WarningName;
+ else if (Callbacks)
+ Callbacks->PragmaDiagnostic(DiagLoc, Namespace, SV, WarningName);
+ }
+};
+
+/// "\#pragma warning(...)". MSVC's diagnostics do not map cleanly to clang's
+/// diagnostics, so we don't really implement this pragma. We parse it and
+/// ignore it to avoid -Wunknown-pragma warnings.
+struct PragmaWarningHandler : public PragmaHandler {
+ PragmaWarningHandler() : PragmaHandler("warning") {}
+
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &Tok) override {
+ // Parse things like:
+ // warning(push, 1)
+ // warning(pop)
+ // warning(disable : 1 2 3 ; error : 4 5 6 ; suppress : 7 8 9)
+ SourceLocation DiagLoc = Tok.getLocation();
+ PPCallbacks *Callbacks = PP.getPPCallbacks();
+
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(Tok, diag::warn_pragma_warning_expected) << "(";
+ return;
+ }
+
+ PP.Lex(Tok);
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+
+ if (II && II->isStr("push")) {
+ // #pragma warning( push[ ,n ] )
+ int Level = -1;
+ PP.Lex(Tok);
+ if (Tok.is(tok::comma)) {
+ PP.Lex(Tok);
+ uint64_t Value;
+ if (Tok.is(tok::numeric_constant) &&
+ PP.parseSimpleIntegerLiteral(Tok, Value))
+ Level = int(Value);
+ if (Level < 0 || Level > 4) {
+ PP.Diag(Tok, diag::warn_pragma_warning_push_level);
+ return;
+ }
+ }
+ if (Callbacks)
+ Callbacks->PragmaWarningPush(DiagLoc, Level);
+ } else if (II && II->isStr("pop")) {
+ // #pragma warning( pop )
+ PP.Lex(Tok);
+ if (Callbacks)
+ Callbacks->PragmaWarningPop(DiagLoc);
+ } else {
+ // #pragma warning( warning-specifier : warning-number-list
+ // [; warning-specifier : warning-number-list...] )
+ while (true) {
+ II = Tok.getIdentifierInfo();
+ if (!II && !Tok.is(tok::numeric_constant)) {
+ PP.Diag(Tok, diag::warn_pragma_warning_spec_invalid);
+ return;
+ }
+
+ // Figure out which warning specifier this is.
+ bool SpecifierValid;
+ StringRef Specifier;
+ llvm::SmallString<1> SpecifierBuf;
+ if (II) {
+ Specifier = II->getName();
+ SpecifierValid = llvm::StringSwitch<bool>(Specifier)
+ .Cases("default", "disable", "error", "once",
+ "suppress", true)
+ .Default(false);
+ // If we read a correct specifier, snatch next token (that should be
+ // ":", checked later).
+ if (SpecifierValid)
+ PP.Lex(Tok);
+ } else {
+ // Token is a numeric constant. It should be either 1, 2, 3 or 4.
+ uint64_t Value;
+ Specifier = PP.getSpelling(Tok, SpecifierBuf);
+ if (PP.parseSimpleIntegerLiteral(Tok, Value)) {
+ SpecifierValid = (Value >= 1) && (Value <= 4);
+ } else
+ SpecifierValid = false;
+ // Next token already snatched by parseSimpleIntegerLiteral.
+ }
+
+ if (!SpecifierValid) {
+ PP.Diag(Tok, diag::warn_pragma_warning_spec_invalid);
+ return;
+ }
+ if (Tok.isNot(tok::colon)) {
+ PP.Diag(Tok, diag::warn_pragma_warning_expected) << ":";
+ return;
+ }
+
+ // Collect the warning ids.
+ SmallVector<int, 4> Ids;
+ PP.Lex(Tok);
+ while (Tok.is(tok::numeric_constant)) {
+ uint64_t Value;
+ if (!PP.parseSimpleIntegerLiteral(Tok, Value) || Value == 0 ||
+ Value > INT_MAX) {
+ PP.Diag(Tok, diag::warn_pragma_warning_expected_number);
+ return;
+ }
+ Ids.push_back(int(Value));
+ }
+ if (Callbacks)
+ Callbacks->PragmaWarning(DiagLoc, Specifier, Ids);
+
+ // Parse the next specifier if there is a semicolon.
+ if (Tok.isNot(tok::semi))
+ break;
+ PP.Lex(Tok);
+ }
+ }
+
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(Tok, diag::warn_pragma_warning_expected) << ")";
+ return;
+ }
+
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::eod))
+ PP.Diag(Tok, diag::ext_pp_extra_tokens_at_eol) << "pragma warning";
+ }
+};
+
+/// PragmaIncludeAliasHandler - "\#pragma include_alias("...")".
+struct PragmaIncludeAliasHandler : public PragmaHandler {
+ PragmaIncludeAliasHandler() : PragmaHandler("include_alias") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &IncludeAliasTok) override {
+ PP.HandlePragmaIncludeAlias(IncludeAliasTok);
+ }
+};
+
+/// PragmaMessageHandler - Handle the microsoft and gcc \#pragma message
+/// extension. The syntax is:
+/// \code
+/// #pragma message(string)
+/// \endcode
+/// OR, in GCC mode:
+/// \code
+/// #pragma message string
+/// \endcode
+/// string is a string, which is fully macro expanded, and permits string
+/// concatenation, embedded escape characters, etc... See MSDN for more details.
+/// Also handles \#pragma GCC warning and \#pragma GCC error which take the same
+/// form as \#pragma message.
+struct PragmaMessageHandler : public PragmaHandler {
+private:
+ const PPCallbacks::PragmaMessageKind Kind;
+ const StringRef Namespace;
+
+ static const char* PragmaKind(PPCallbacks::PragmaMessageKind Kind,
+ bool PragmaNameOnly = false) {
+ switch (Kind) {
+ case PPCallbacks::PMK_Message:
+ return PragmaNameOnly ? "message" : "pragma message";
+ case PPCallbacks::PMK_Warning:
+ return PragmaNameOnly ? "warning" : "pragma warning";
+ case PPCallbacks::PMK_Error:
+ return PragmaNameOnly ? "error" : "pragma error";
+ }
+ llvm_unreachable("Unknown PragmaMessageKind!");
+ }
+
+public:
+ PragmaMessageHandler(PPCallbacks::PragmaMessageKind Kind,
+ StringRef Namespace = StringRef())
+ : PragmaHandler(PragmaKind(Kind, true)), Kind(Kind), Namespace(Namespace) {}
+
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &Tok) override {
+ SourceLocation MessageLoc = Tok.getLocation();
+ PP.Lex(Tok);
+ bool ExpectClosingParen = false;
+ switch (Tok.getKind()) {
+ case tok::l_paren:
+ // We have a MSVC style pragma message.
+ ExpectClosingParen = true;
+ // Read the string.
+ PP.Lex(Tok);
+ break;
+ case tok::string_literal:
+ // We have a GCC style pragma message, and we just read the string.
+ break;
+ default:
+ PP.Diag(MessageLoc, diag::err_pragma_message_malformed) << Kind;
+ return;
+ }
+
+ std::string MessageString;
+ if (!PP.FinishLexStringLiteral(Tok, MessageString, PragmaKind(Kind),
+ /*MacroExpansion=*/true))
+ return;
+
+ if (ExpectClosingParen) {
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_message_malformed) << Kind;
+ return;
+ }
+ PP.Lex(Tok); // eat the r_paren.
+ }
+
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_message_malformed) << Kind;
+ return;
+ }
+
+ // Output the message.
+ PP.Diag(MessageLoc, (Kind == PPCallbacks::PMK_Error)
+ ? diag::err_pragma_message
+ : diag::warn_pragma_message) << MessageString;
+
+ // If the pragma is lexically sound, notify any interested PPCallbacks.
+ if (PPCallbacks *Callbacks = PP.getPPCallbacks())
+ Callbacks->PragmaMessage(MessageLoc, Namespace, Kind, MessageString);
+ }
+};
+
+/// PragmaPushMacroHandler - "\#pragma push_macro" saves the value of the
+/// macro on the top of the stack.
+struct PragmaPushMacroHandler : public PragmaHandler {
+ PragmaPushMacroHandler() : PragmaHandler("push_macro") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &PushMacroTok) override {
+ PP.HandlePragmaPushMacro(PushMacroTok);
+ }
+};
+
+
+/// PragmaPopMacroHandler - "\#pragma pop_macro" sets the value of the
+/// macro to the value on the top of the stack.
+struct PragmaPopMacroHandler : public PragmaHandler {
+ PragmaPopMacroHandler() : PragmaHandler("pop_macro") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &PopMacroTok) override {
+ PP.HandlePragmaPopMacro(PopMacroTok);
+ }
+};
+
+// Pragma STDC implementations.
+
+/// PragmaSTDC_FENV_ACCESSHandler - "\#pragma STDC FENV_ACCESS ...".
+struct PragmaSTDC_FENV_ACCESSHandler : public PragmaHandler {
+ PragmaSTDC_FENV_ACCESSHandler() : PragmaHandler("FENV_ACCESS") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &Tok) override {
+ tok::OnOffSwitch OOS;
+ if (PP.LexOnOffSwitch(OOS))
+ return;
+ if (OOS == tok::OOS_ON)
+ PP.Diag(Tok, diag::warn_stdc_fenv_access_not_supported);
+ }
+};
+
+/// PragmaSTDC_CX_LIMITED_RANGEHandler - "\#pragma STDC CX_LIMITED_RANGE ...".
+struct PragmaSTDC_CX_LIMITED_RANGEHandler : public PragmaHandler {
+ PragmaSTDC_CX_LIMITED_RANGEHandler()
+ : PragmaHandler("CX_LIMITED_RANGE") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &Tok) override {
+ tok::OnOffSwitch OOS;
+ PP.LexOnOffSwitch(OOS);
+ }
+};
+
+/// PragmaSTDC_UnknownHandler - "\#pragma STDC ...".
+struct PragmaSTDC_UnknownHandler : public PragmaHandler {
+ PragmaSTDC_UnknownHandler() {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &UnknownTok) override {
+ // C99 6.10.6p2, unknown forms are not allowed.
+ PP.Diag(UnknownTok, diag::ext_stdc_pragma_ignored);
+ }
+};
+
+/// PragmaARCCFCodeAuditedHandler -
+/// \#pragma clang arc_cf_code_audited begin/end
+struct PragmaARCCFCodeAuditedHandler : public PragmaHandler {
+ PragmaARCCFCodeAuditedHandler() : PragmaHandler("arc_cf_code_audited") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &NameTok) override {
+ SourceLocation Loc = NameTok.getLocation();
+ bool IsBegin;
+
+ Token Tok;
+
+ // Lex the 'begin' or 'end'.
+ PP.LexUnexpandedToken(Tok);
+ const IdentifierInfo *BeginEnd = Tok.getIdentifierInfo();
+ if (BeginEnd && BeginEnd->isStr("begin")) {
+ IsBegin = true;
+ } else if (BeginEnd && BeginEnd->isStr("end")) {
+ IsBegin = false;
+ } else {
+ PP.Diag(Tok.getLocation(), diag::err_pp_arc_cf_code_audited_syntax);
+ return;
+ }
+
+ // Verify that this is followed by EOD.
+ PP.LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::eod))
+ PP.Diag(Tok, diag::ext_pp_extra_tokens_at_eol) << "pragma";
+
+ // The start location of the active audit.
+ SourceLocation BeginLoc = PP.getPragmaARCCFCodeAuditedLoc();
+
+ // The start location we want after processing this.
+ SourceLocation NewLoc;
+
+ if (IsBegin) {
+ // Complain about attempts to re-enter an audit.
+ if (BeginLoc.isValid()) {
+ PP.Diag(Loc, diag::err_pp_double_begin_of_arc_cf_code_audited);
+ PP.Diag(BeginLoc, diag::note_pragma_entered_here);
+ }
+ NewLoc = Loc;
+ } else {
+ // Complain about attempts to leave an audit that doesn't exist.
+ if (!BeginLoc.isValid()) {
+ PP.Diag(Loc, diag::err_pp_unmatched_end_of_arc_cf_code_audited);
+ return;
+ }
+ NewLoc = SourceLocation();
+ }
+
+ PP.setPragmaARCCFCodeAuditedLoc(NewLoc);
+ }
+};
+
+/// PragmaAssumeNonNullHandler -
+/// \#pragma clang assume_nonnull begin/end
+struct PragmaAssumeNonNullHandler : public PragmaHandler {
+ PragmaAssumeNonNullHandler() : PragmaHandler("assume_nonnull") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &NameTok) override {
+ SourceLocation Loc = NameTok.getLocation();
+ bool IsBegin;
+
+ Token Tok;
+
+ // Lex the 'begin' or 'end'.
+ PP.LexUnexpandedToken(Tok);
+ const IdentifierInfo *BeginEnd = Tok.getIdentifierInfo();
+ if (BeginEnd && BeginEnd->isStr("begin")) {
+ IsBegin = true;
+ } else if (BeginEnd && BeginEnd->isStr("end")) {
+ IsBegin = false;
+ } else {
+ PP.Diag(Tok.getLocation(), diag::err_pp_assume_nonnull_syntax);
+ return;
+ }
+
+ // Verify that this is followed by EOD.
+ PP.LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::eod))
+ PP.Diag(Tok, diag::ext_pp_extra_tokens_at_eol) << "pragma";
+
+ // The start location of the active audit.
+ SourceLocation BeginLoc = PP.getPragmaAssumeNonNullLoc();
+
+ // The start location we want after processing this.
+ SourceLocation NewLoc;
+
+ if (IsBegin) {
+ // Complain about attempts to re-enter an audit.
+ if (BeginLoc.isValid()) {
+ PP.Diag(Loc, diag::err_pp_double_begin_of_assume_nonnull);
+ PP.Diag(BeginLoc, diag::note_pragma_entered_here);
+ }
+ NewLoc = Loc;
+ } else {
+ // Complain about attempts to leave an audit that doesn't exist.
+ if (!BeginLoc.isValid()) {
+ PP.Diag(Loc, diag::err_pp_unmatched_end_of_assume_nonnull);
+ return;
+ }
+ NewLoc = SourceLocation();
+ }
+
+ PP.setPragmaAssumeNonNullLoc(NewLoc);
+ }
+};
+
+/// \brief Handle "\#pragma region [...]"
+///
+/// The syntax is
+/// \code
+/// #pragma region [optional name]
+/// #pragma endregion [optional comment]
+/// \endcode
+///
+/// \note This is
+/// <a href="http://msdn.microsoft.com/en-us/library/b6xkz944(v=vs.80).aspx">editor-only</a>
+/// pragma, just skipped by compiler.
+struct PragmaRegionHandler : public PragmaHandler {
+ PragmaRegionHandler(const char *pragma) : PragmaHandler(pragma) { }
+
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &NameTok) override {
+ // #pragma region: endregion matches can be verified
+ // __pragma(region): no sense, but ignored by msvc
+ // _Pragma is not valid for MSVC, but there isn't any point
+ // to handle a _Pragma differently.
+ }
+};
+
+} // end anonymous namespace
+
+
+/// RegisterBuiltinPragmas - Install the standard preprocessor pragmas:
+/// \#pragma GCC poison/system_header/dependency and \#pragma once.
+void Preprocessor::RegisterBuiltinPragmas() {
+ AddPragmaHandler(new PragmaOnceHandler());
+ AddPragmaHandler(new PragmaMarkHandler());
+ AddPragmaHandler(new PragmaPushMacroHandler());
+ AddPragmaHandler(new PragmaPopMacroHandler());
+ AddPragmaHandler(new PragmaMessageHandler(PPCallbacks::PMK_Message));
+
+ // #pragma GCC ...
+ AddPragmaHandler("GCC", new PragmaPoisonHandler());
+ AddPragmaHandler("GCC", new PragmaSystemHeaderHandler());
+ AddPragmaHandler("GCC", new PragmaDependencyHandler());
+ AddPragmaHandler("GCC", new PragmaDiagnosticHandler("GCC"));
+ AddPragmaHandler("GCC", new PragmaMessageHandler(PPCallbacks::PMK_Warning,
+ "GCC"));
+ AddPragmaHandler("GCC", new PragmaMessageHandler(PPCallbacks::PMK_Error,
+ "GCC"));
+ // #pragma clang ...
+ AddPragmaHandler("clang", new PragmaPoisonHandler());
+ AddPragmaHandler("clang", new PragmaSystemHeaderHandler());
+ AddPragmaHandler("clang", new PragmaDebugHandler());
+ AddPragmaHandler("clang", new PragmaDependencyHandler());
+ AddPragmaHandler("clang", new PragmaDiagnosticHandler("clang"));
+ AddPragmaHandler("clang", new PragmaARCCFCodeAuditedHandler());
+ AddPragmaHandler("clang", new PragmaAssumeNonNullHandler());
+
+ AddPragmaHandler("STDC", new PragmaSTDC_FENV_ACCESSHandler());
+ AddPragmaHandler("STDC", new PragmaSTDC_CX_LIMITED_RANGEHandler());
+ AddPragmaHandler("STDC", new PragmaSTDC_UnknownHandler());
+
+ // MS extensions.
+ if (LangOpts.MicrosoftExt) {
+ AddPragmaHandler(new PragmaWarningHandler());
+ AddPragmaHandler(new PragmaIncludeAliasHandler());
+ AddPragmaHandler(new PragmaRegionHandler("region"));
+ AddPragmaHandler(new PragmaRegionHandler("endregion"));
+ }
+}
+
+/// Ignore all pragmas, useful for modes such as -Eonly which would otherwise
+/// warn about those pragmas being unknown.
+void Preprocessor::IgnorePragmas() {
+ AddPragmaHandler(new EmptyPragmaHandler());
+ // Also ignore all pragmas in all namespaces created
+ // in Preprocessor::RegisterBuiltinPragmas().
+ AddPragmaHandler("GCC", new EmptyPragmaHandler());
+ AddPragmaHandler("clang", new EmptyPragmaHandler());
+ if (PragmaHandler *NS = PragmaHandlers->FindHandler("STDC")) {
+ // Preprocessor::RegisterBuiltinPragmas() already registers
+ // PragmaSTDC_UnknownHandler as the empty handler, so remove it first,
+ // otherwise there will be an assert about a duplicate handler.
+ PragmaNamespace *STDCNamespace = NS->getIfNamespace();
+ assert(STDCNamespace &&
+ "Invalid namespace, registered as a regular pragma handler!");
+ if (PragmaHandler *Existing = STDCNamespace->FindHandler("", false)) {
+ RemovePragmaHandler("STDC", Existing);
+ delete Existing;
+ }
+ }
+ AddPragmaHandler("STDC", new EmptyPragmaHandler());
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp b/contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp
new file mode 100644
index 0000000..32e6de6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp
@@ -0,0 +1,488 @@
+//===--- PreprocessingRecord.cpp - Record of Preprocessing ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the PreprocessingRecord class, which maintains a record
+// of what occurred during preprocessing, and its helpers.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Lex/PreprocessingRecord.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/Token.h"
+#include "llvm/Support/Capacity.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace clang;
+
+ExternalPreprocessingRecordSource::~ExternalPreprocessingRecordSource() { }
+
+
+InclusionDirective::InclusionDirective(PreprocessingRecord &PPRec,
+ InclusionKind Kind,
+ StringRef FileName,
+ bool InQuotes, bool ImportedModule,
+ const FileEntry *File,
+ SourceRange Range)
+ : PreprocessingDirective(InclusionDirectiveKind, Range),
+ InQuotes(InQuotes), Kind(Kind), ImportedModule(ImportedModule), File(File)
+{
+ char *Memory
+ = (char*)PPRec.Allocate(FileName.size() + 1, llvm::alignOf<char>());
+ memcpy(Memory, FileName.data(), FileName.size());
+ Memory[FileName.size()] = 0;
+ this->FileName = StringRef(Memory, FileName.size());
+}
+
+PreprocessingRecord::PreprocessingRecord(SourceManager &SM)
+ : SourceMgr(SM),
+ ExternalSource(nullptr) {
+}
+
+/// \brief Returns a pair of [Begin, End) iterators of preprocessed entities
+/// that source range \p Range encompasses.
+llvm::iterator_range<PreprocessingRecord::iterator>
+PreprocessingRecord::getPreprocessedEntitiesInRange(SourceRange Range) {
+ if (Range.isInvalid())
+ return llvm::make_range(iterator(), iterator());
+
+ if (CachedRangeQuery.Range == Range) {
+ return llvm::make_range(iterator(this, CachedRangeQuery.Result.first),
+ iterator(this, CachedRangeQuery.Result.second));
+ }
+
+ std::pair<int, int> Res = getPreprocessedEntitiesInRangeSlow(Range);
+
+ CachedRangeQuery.Range = Range;
+ CachedRangeQuery.Result = Res;
+
+ return llvm::make_range(iterator(this, Res.first),
+ iterator(this, Res.second));
+}
+
+static bool isPreprocessedEntityIfInFileID(PreprocessedEntity *PPE, FileID FID,
+ SourceManager &SM) {
+ assert(FID.isValid());
+ if (!PPE)
+ return false;
+
+ SourceLocation Loc = PPE->getSourceRange().getBegin();
+ if (Loc.isInvalid())
+ return false;
+
+ return SM.isInFileID(SM.getFileLoc(Loc), FID);
+}
+
+/// \brief Returns true if the preprocessed entity that \arg PPEI iterator
+/// points to is coming from the file \arg FID.
+///
+/// Can be used to avoid implicit deserializations of preallocated
+/// preprocessed entities if we only care about entities of a specific file
+/// and not from files \#included in the range given at
+/// \see getPreprocessedEntitiesInRange.
+bool PreprocessingRecord::isEntityInFileID(iterator PPEI, FileID FID) {
+ if (FID.isInvalid())
+ return false;
+
+ int Pos = std::distance(iterator(this, 0), PPEI);
+ if (Pos < 0) {
+ if (unsigned(-Pos-1) >= LoadedPreprocessedEntities.size()) {
+ assert(0 && "Out-of bounds loaded preprocessed entity");
+ return false;
+ }
+ assert(ExternalSource && "No external source to load from");
+ unsigned LoadedIndex = LoadedPreprocessedEntities.size()+Pos;
+ if (PreprocessedEntity *PPE = LoadedPreprocessedEntities[LoadedIndex])
+ return isPreprocessedEntityIfInFileID(PPE, FID, SourceMgr);
+
+ // See if the external source can see if the entity is in the file without
+ // deserializing it.
+ Optional<bool> IsInFile =
+ ExternalSource->isPreprocessedEntityInFileID(LoadedIndex, FID);
+ if (IsInFile.hasValue())
+ return IsInFile.getValue();
+
+ // The external source did not provide a definite answer, go and deserialize
+ // the entity to check it.
+ return isPreprocessedEntityIfInFileID(
+ getLoadedPreprocessedEntity(LoadedIndex),
+ FID, SourceMgr);
+ }
+
+ if (unsigned(Pos) >= PreprocessedEntities.size()) {
+ assert(0 && "Out-of bounds local preprocessed entity");
+ return false;
+ }
+ return isPreprocessedEntityIfInFileID(PreprocessedEntities[Pos],
+ FID, SourceMgr);
+}
+
+/// \brief Returns a pair of [Begin, End) iterators of preprocessed entities
+/// that source range \arg R encompasses.
+std::pair<int, int>
+PreprocessingRecord::getPreprocessedEntitiesInRangeSlow(SourceRange Range) {
+ assert(Range.isValid());
+ assert(!SourceMgr.isBeforeInTranslationUnit(Range.getEnd(),Range.getBegin()));
+
+ std::pair<unsigned, unsigned>
+ Local = findLocalPreprocessedEntitiesInRange(Range);
+
+ // Check if range spans local entities.
+ if (!ExternalSource || SourceMgr.isLocalSourceLocation(Range.getBegin()))
+ return std::make_pair(Local.first, Local.second);
+
+ std::pair<unsigned, unsigned>
+ Loaded = ExternalSource->findPreprocessedEntitiesInRange(Range);
+
+ // Check if range spans local entities.
+ if (Loaded.first == Loaded.second)
+ return std::make_pair(Local.first, Local.second);
+
+ unsigned TotalLoaded = LoadedPreprocessedEntities.size();
+
+ // Check if range spans loaded entities.
+ if (Local.first == Local.second)
+ return std::make_pair(int(Loaded.first)-TotalLoaded,
+ int(Loaded.second)-TotalLoaded);
+
+ // Range spands loaded and local entities.
+ return std::make_pair(int(Loaded.first)-TotalLoaded, Local.second);
+}
+
+std::pair<unsigned, unsigned>
+PreprocessingRecord::findLocalPreprocessedEntitiesInRange(
+ SourceRange Range) const {
+ if (Range.isInvalid())
+ return std::make_pair(0,0);
+ assert(!SourceMgr.isBeforeInTranslationUnit(Range.getEnd(),Range.getBegin()));
+
+ unsigned Begin = findBeginLocalPreprocessedEntity(Range.getBegin());
+ unsigned End = findEndLocalPreprocessedEntity(Range.getEnd());
+ return std::make_pair(Begin, End);
+}
+
+namespace {
+
+template <SourceLocation (SourceRange::*getRangeLoc)() const>
+struct PPEntityComp {
+ const SourceManager &SM;
+
+ explicit PPEntityComp(const SourceManager &SM) : SM(SM) { }
+
+ bool operator()(PreprocessedEntity *L, PreprocessedEntity *R) const {
+ SourceLocation LHS = getLoc(L);
+ SourceLocation RHS = getLoc(R);
+ return SM.isBeforeInTranslationUnit(LHS, RHS);
+ }
+
+ bool operator()(PreprocessedEntity *L, SourceLocation RHS) const {
+ SourceLocation LHS = getLoc(L);
+ return SM.isBeforeInTranslationUnit(LHS, RHS);
+ }
+
+ bool operator()(SourceLocation LHS, PreprocessedEntity *R) const {
+ SourceLocation RHS = getLoc(R);
+ return SM.isBeforeInTranslationUnit(LHS, RHS);
+ }
+
+ SourceLocation getLoc(PreprocessedEntity *PPE) const {
+ SourceRange Range = PPE->getSourceRange();
+ return (Range.*getRangeLoc)();
+ }
+};
+
+}
+
+unsigned PreprocessingRecord::findBeginLocalPreprocessedEntity(
+ SourceLocation Loc) const {
+ if (SourceMgr.isLoadedSourceLocation(Loc))
+ return 0;
+
+ size_t Count = PreprocessedEntities.size();
+ size_t Half;
+ std::vector<PreprocessedEntity *>::const_iterator
+ First = PreprocessedEntities.begin();
+ std::vector<PreprocessedEntity *>::const_iterator I;
+
+ // Do a binary search manually instead of using std::lower_bound because
+ // The end locations of entities may be unordered (when a macro expansion
+ // is inside another macro argument), but for this case it is not important
+ // whether we get the first macro expansion or its containing macro.
+ while (Count > 0) {
+ Half = Count/2;
+ I = First;
+ std::advance(I, Half);
+ if (SourceMgr.isBeforeInTranslationUnit((*I)->getSourceRange().getEnd(),
+ Loc)){
+ First = I;
+ ++First;
+ Count = Count - Half - 1;
+ } else
+ Count = Half;
+ }
+
+ return First - PreprocessedEntities.begin();
+}
+
+unsigned PreprocessingRecord::findEndLocalPreprocessedEntity(
+ SourceLocation Loc) const {
+ if (SourceMgr.isLoadedSourceLocation(Loc))
+ return 0;
+
+ std::vector<PreprocessedEntity *>::const_iterator
+ I = std::upper_bound(PreprocessedEntities.begin(),
+ PreprocessedEntities.end(),
+ Loc,
+ PPEntityComp<&SourceRange::getBegin>(SourceMgr));
+ return I - PreprocessedEntities.begin();
+}
+
+PreprocessingRecord::PPEntityID
+PreprocessingRecord::addPreprocessedEntity(PreprocessedEntity *Entity) {
+ assert(Entity);
+ SourceLocation BeginLoc = Entity->getSourceRange().getBegin();
+
+ if (isa<MacroDefinitionRecord>(Entity)) {
+ assert((PreprocessedEntities.empty() ||
+ !SourceMgr.isBeforeInTranslationUnit(
+ BeginLoc,
+ PreprocessedEntities.back()->getSourceRange().getBegin())) &&
+ "a macro definition was encountered out-of-order");
+ PreprocessedEntities.push_back(Entity);
+ return getPPEntityID(PreprocessedEntities.size()-1, /*isLoaded=*/false);
+ }
+
+ // Check normal case, this entity begin location is after the previous one.
+ if (PreprocessedEntities.empty() ||
+ !SourceMgr.isBeforeInTranslationUnit(BeginLoc,
+ PreprocessedEntities.back()->getSourceRange().getBegin())) {
+ PreprocessedEntities.push_back(Entity);
+ return getPPEntityID(PreprocessedEntities.size()-1, /*isLoaded=*/false);
+ }
+
+ // The entity's location is not after the previous one; this can happen with
+ // include directives that form the filename using macros, e.g:
+ // "#include MACRO(STUFF)"
+ // or with macro expansions inside macro arguments where the arguments are
+ // not expanded in the same order as listed, e.g:
+ // \code
+ // #define M1 1
+ // #define M2 2
+ // #define FM(x,y) y x
+ // FM(M1, M2)
+ // \endcode
+
+ typedef std::vector<PreprocessedEntity *>::iterator pp_iter;
+
+ // Usually there are few macro expansions when defining the filename, do a
+ // linear search for a few entities.
+ unsigned count = 0;
+ for (pp_iter RI = PreprocessedEntities.end(),
+ Begin = PreprocessedEntities.begin();
+ RI != Begin && count < 4; --RI, ++count) {
+ pp_iter I = RI;
+ --I;
+ if (!SourceMgr.isBeforeInTranslationUnit(BeginLoc,
+ (*I)->getSourceRange().getBegin())) {
+ pp_iter insertI = PreprocessedEntities.insert(RI, Entity);
+ return getPPEntityID(insertI - PreprocessedEntities.begin(),
+ /*isLoaded=*/false);
+ }
+ }
+
+ // Linear search unsuccessful. Do a binary search.
+ pp_iter I = std::upper_bound(PreprocessedEntities.begin(),
+ PreprocessedEntities.end(),
+ BeginLoc,
+ PPEntityComp<&SourceRange::getBegin>(SourceMgr));
+ pp_iter insertI = PreprocessedEntities.insert(I, Entity);
+ return getPPEntityID(insertI - PreprocessedEntities.begin(),
+ /*isLoaded=*/false);
+}
+
+void PreprocessingRecord::SetExternalSource(
+ ExternalPreprocessingRecordSource &Source) {
+ assert(!ExternalSource &&
+ "Preprocessing record already has an external source");
+ ExternalSource = &Source;
+}
+
+unsigned PreprocessingRecord::allocateLoadedEntities(unsigned NumEntities) {
+ unsigned Result = LoadedPreprocessedEntities.size();
+ LoadedPreprocessedEntities.resize(LoadedPreprocessedEntities.size()
+ + NumEntities);
+ return Result;
+}
+
+void PreprocessingRecord::RegisterMacroDefinition(MacroInfo *Macro,
+ MacroDefinitionRecord *Def) {
+ MacroDefinitions[Macro] = Def;
+}
+
+/// \brief Retrieve the preprocessed entity at the given ID.
+PreprocessedEntity *PreprocessingRecord::getPreprocessedEntity(PPEntityID PPID){
+ if (PPID.ID < 0) {
+ unsigned Index = -PPID.ID - 1;
+ assert(Index < LoadedPreprocessedEntities.size() &&
+ "Out-of bounds loaded preprocessed entity");
+ return getLoadedPreprocessedEntity(Index);
+ }
+
+ if (PPID.ID == 0)
+ return nullptr;
+ unsigned Index = PPID.ID - 1;
+ assert(Index < PreprocessedEntities.size() &&
+ "Out-of bounds local preprocessed entity");
+ return PreprocessedEntities[Index];
+}
+
+/// \brief Retrieve the loaded preprocessed entity at the given index.
+PreprocessedEntity *
+PreprocessingRecord::getLoadedPreprocessedEntity(unsigned Index) {
+ assert(Index < LoadedPreprocessedEntities.size() &&
+ "Out-of bounds loaded preprocessed entity");
+ assert(ExternalSource && "No external source to load from");
+ PreprocessedEntity *&Entity = LoadedPreprocessedEntities[Index];
+ if (!Entity) {
+ Entity = ExternalSource->ReadPreprocessedEntity(Index);
+ if (!Entity) // Failed to load.
+ Entity = new (*this)
+ PreprocessedEntity(PreprocessedEntity::InvalidKind, SourceRange());
+ }
+ return Entity;
+}
+
+MacroDefinitionRecord *
+PreprocessingRecord::findMacroDefinition(const MacroInfo *MI) {
+ llvm::DenseMap<const MacroInfo *, MacroDefinitionRecord *>::iterator Pos =
+ MacroDefinitions.find(MI);
+ if (Pos == MacroDefinitions.end())
+ return nullptr;
+
+ return Pos->second;
+}
+
+void PreprocessingRecord::addMacroExpansion(const Token &Id,
+ const MacroInfo *MI,
+ SourceRange Range) {
+ // We don't record nested macro expansions.
+ if (Id.getLocation().isMacroID())
+ return;
+
+ if (MI->isBuiltinMacro())
+ addPreprocessedEntity(new (*this)
+ MacroExpansion(Id.getIdentifierInfo(), Range));
+ else if (MacroDefinitionRecord *Def = findMacroDefinition(MI))
+ addPreprocessedEntity(new (*this) MacroExpansion(Def, Range));
+}
+
+void PreprocessingRecord::Ifdef(SourceLocation Loc, const Token &MacroNameTok,
+ const MacroDefinition &MD) {
+ // This is not actually a macro expansion but record it as a macro reference.
+ if (MD)
+ addMacroExpansion(MacroNameTok, MD.getMacroInfo(),
+ MacroNameTok.getLocation());
+}
+
+void PreprocessingRecord::Ifndef(SourceLocation Loc, const Token &MacroNameTok,
+ const MacroDefinition &MD) {
+ // This is not actually a macro expansion but record it as a macro reference.
+ if (MD)
+ addMacroExpansion(MacroNameTok, MD.getMacroInfo(),
+ MacroNameTok.getLocation());
+}
+
+void PreprocessingRecord::Defined(const Token &MacroNameTok,
+ const MacroDefinition &MD,
+ SourceRange Range) {
+ // This is not actually a macro expansion but record it as a macro reference.
+ if (MD)
+ addMacroExpansion(MacroNameTok, MD.getMacroInfo(),
+ MacroNameTok.getLocation());
+}
+
+void PreprocessingRecord::SourceRangeSkipped(SourceRange Range) {
+ SkippedRanges.push_back(Range);
+}
+
+void PreprocessingRecord::MacroExpands(const Token &Id,
+ const MacroDefinition &MD,
+ SourceRange Range,
+ const MacroArgs *Args) {
+ addMacroExpansion(Id, MD.getMacroInfo(), Range);
+}
+
+void PreprocessingRecord::MacroDefined(const Token &Id,
+ const MacroDirective *MD) {
+ const MacroInfo *MI = MD->getMacroInfo();
+ SourceRange R(MI->getDefinitionLoc(), MI->getDefinitionEndLoc());
+ MacroDefinitionRecord *Def =
+ new (*this) MacroDefinitionRecord(Id.getIdentifierInfo(), R);
+ addPreprocessedEntity(Def);
+ MacroDefinitions[MI] = Def;
+}
+
+void PreprocessingRecord::MacroUndefined(const Token &Id,
+ const MacroDefinition &MD) {
+ MD.forAllDefinitions([&](MacroInfo *MI) { MacroDefinitions.erase(MI); });
+}
+
+void PreprocessingRecord::InclusionDirective(
+ SourceLocation HashLoc,
+ const clang::Token &IncludeTok,
+ StringRef FileName,
+ bool IsAngled,
+ CharSourceRange FilenameRange,
+ const FileEntry *File,
+ StringRef SearchPath,
+ StringRef RelativePath,
+ const Module *Imported) {
+ InclusionDirective::InclusionKind Kind = InclusionDirective::Include;
+
+ switch (IncludeTok.getIdentifierInfo()->getPPKeywordID()) {
+ case tok::pp_include:
+ Kind = InclusionDirective::Include;
+ break;
+
+ case tok::pp_import:
+ Kind = InclusionDirective::Import;
+ break;
+
+ case tok::pp_include_next:
+ Kind = InclusionDirective::IncludeNext;
+ break;
+
+ case tok::pp___include_macros:
+ Kind = InclusionDirective::IncludeMacros;
+ break;
+
+ default:
+ llvm_unreachable("Unknown include directive kind");
+ }
+
+ SourceLocation EndLoc;
+ if (!IsAngled) {
+ EndLoc = FilenameRange.getBegin();
+ } else {
+ EndLoc = FilenameRange.getEnd();
+ if (FilenameRange.isCharRange())
+ EndLoc = EndLoc.getLocWithOffset(-1); // the InclusionDirective expects
+ // a token range.
+ }
+ clang::InclusionDirective *ID
+ = new (*this) clang::InclusionDirective(*this, Kind, FileName, !IsAngled,
+ (bool)Imported,
+ File, SourceRange(HashLoc, EndLoc));
+ addPreprocessedEntity(ID);
+}
+
+size_t PreprocessingRecord::getTotalMemory() const {
+ return BumpAlloc.getTotalMemory()
+ + llvm::capacity_in_bytes(MacroDefinitions)
+ + llvm::capacity_in_bytes(PreprocessedEntities)
+ + llvm::capacity_in_bytes(LoadedPreprocessedEntities);
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp b/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp
new file mode 100644
index 0000000..142d9ce
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp
@@ -0,0 +1,913 @@
+//===--- Preprocess.cpp - C Language Family Preprocessor Implementation ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Preprocessor interface.
+//
+//===----------------------------------------------------------------------===//
+//
+// Options to support:
+// -H - Print the name of each header file used.
+// -d[DNI] - Dump various things.
+// -fworking-directory - #line's with preprocessor's working dir.
+// -fpreprocessed
+// -dependency-file,-M,-MM,-MF,-MG,-MP,-MT,-MQ,-MD,-MMD
+// -W*
+// -w
+//
+// Messages to emit:
+// "Multiple include guards may be useful for:\n"
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/FileSystemStatCache.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/CodeCompletionHandler.h"
+#include "clang/Lex/ExternalPreprocessorSource.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/LiteralSupport.h"
+#include "clang/Lex/MacroArgs.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/ModuleLoader.h"
+#include "clang/Lex/PTHManager.h"
+#include "clang/Lex/Pragma.h"
+#include "clang/Lex/PreprocessingRecord.h"
+#include "clang/Lex/PreprocessorOptions.h"
+#include "clang/Lex/ScratchBuffer.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/Capacity.h"
+#include "llvm/Support/ConvertUTF.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+ExternalPreprocessorSource::~ExternalPreprocessorSource() { }
+
+Preprocessor::Preprocessor(IntrusiveRefCntPtr<PreprocessorOptions> PPOpts,
+ DiagnosticsEngine &diags, LangOptions &opts,
+ SourceManager &SM, HeaderSearch &Headers,
+ ModuleLoader &TheModuleLoader,
+ IdentifierInfoLookup *IILookup, bool OwnsHeaders,
+ TranslationUnitKind TUKind)
+ : PPOpts(PPOpts), Diags(&diags), LangOpts(opts), Target(nullptr),
+ AuxTarget(nullptr), FileMgr(Headers.getFileMgr()), SourceMgr(SM),
+ ScratchBuf(new ScratchBuffer(SourceMgr)), HeaderInfo(Headers),
+ TheModuleLoader(TheModuleLoader), ExternalSource(nullptr),
+ Identifiers(opts, IILookup),
+ PragmaHandlers(new PragmaNamespace(StringRef())),
+ IncrementalProcessing(false), TUKind(TUKind), CodeComplete(nullptr),
+ CodeCompletionFile(nullptr), CodeCompletionOffset(0),
+ LastTokenWasAt(false), ModuleImportExpectsIdentifier(false),
+ CodeCompletionReached(0), MainFileDir(nullptr),
+ SkipMainFilePreamble(0, true), CurPPLexer(nullptr), CurDirLookup(nullptr),
+ CurLexerKind(CLK_Lexer), CurSubmodule(nullptr), Callbacks(nullptr),
+ CurSubmoduleState(&NullSubmoduleState), MacroArgCache(nullptr),
+ Record(nullptr), MIChainHead(nullptr), DeserialMIChainHead(nullptr) {
+ OwnsHeaderSearch = OwnsHeaders;
+
+ CounterValue = 0; // __COUNTER__ starts at 0.
+
+ // Clear stats.
+ NumDirectives = NumDefined = NumUndefined = NumPragma = 0;
+ NumIf = NumElse = NumEndif = 0;
+ NumEnteredSourceFiles = 0;
+ NumMacroExpanded = NumFnMacroExpanded = NumBuiltinMacroExpanded = 0;
+ NumFastMacroExpanded = NumTokenPaste = NumFastTokenPaste = 0;
+ MaxIncludeStackDepth = 0;
+ NumSkipped = 0;
+
+ // Default to discarding comments.
+ KeepComments = false;
+ KeepMacroComments = false;
+ SuppressIncludeNotFoundError = false;
+
+ // Macro expansion is enabled.
+ DisableMacroExpansion = false;
+ MacroExpansionInDirectivesOverride = false;
+ InMacroArgs = false;
+ InMacroArgPreExpansion = false;
+ NumCachedTokenLexers = 0;
+ PragmasEnabled = true;
+ ParsingIfOrElifDirective = false;
+ PreprocessedOutput = false;
+
+ CachedLexPos = 0;
+
+ // We haven't read anything from the external source.
+ ReadMacrosFromExternalSource = false;
+
+ // "Poison" __VA_ARGS__, which can only appear in the expansion of a macro.
+ // This gets unpoisoned where it is allowed.
+ (Ident__VA_ARGS__ = getIdentifierInfo("__VA_ARGS__"))->setIsPoisoned();
+ SetPoisonReason(Ident__VA_ARGS__,diag::ext_pp_bad_vaargs_use);
+
+ // Initialize the pragma handlers.
+ RegisterBuiltinPragmas();
+
+ // Initialize builtin macros like __LINE__ and friends.
+ RegisterBuiltinMacros();
+
+ if(LangOpts.Borland) {
+ Ident__exception_info = getIdentifierInfo("_exception_info");
+ Ident___exception_info = getIdentifierInfo("__exception_info");
+ Ident_GetExceptionInfo = getIdentifierInfo("GetExceptionInformation");
+ Ident__exception_code = getIdentifierInfo("_exception_code");
+ Ident___exception_code = getIdentifierInfo("__exception_code");
+ Ident_GetExceptionCode = getIdentifierInfo("GetExceptionCode");
+ Ident__abnormal_termination = getIdentifierInfo("_abnormal_termination");
+ Ident___abnormal_termination = getIdentifierInfo("__abnormal_termination");
+ Ident_AbnormalTermination = getIdentifierInfo("AbnormalTermination");
+ } else {
+ Ident__exception_info = Ident__exception_code = nullptr;
+ Ident__abnormal_termination = Ident___exception_info = nullptr;
+ Ident___exception_code = Ident___abnormal_termination = nullptr;
+ Ident_GetExceptionInfo = Ident_GetExceptionCode = nullptr;
+ Ident_AbnormalTermination = nullptr;
+ }
+}
+
+Preprocessor::~Preprocessor() {
+ assert(BacktrackPositions.empty() && "EnableBacktrack/Backtrack imbalance!");
+
+ IncludeMacroStack.clear();
+
+ // Destroy any macro definitions.
+ while (MacroInfoChain *I = MIChainHead) {
+ MIChainHead = I->Next;
+ I->~MacroInfoChain();
+ }
+
+ // Free any cached macro expanders.
+ // This populates MacroArgCache, so all TokenLexers need to be destroyed
+ // before the code below that frees up the MacroArgCache list.
+ std::fill(TokenLexerCache, TokenLexerCache + NumCachedTokenLexers, nullptr);
+ CurTokenLexer.reset();
+
+ while (DeserializedMacroInfoChain *I = DeserialMIChainHead) {
+ DeserialMIChainHead = I->Next;
+ I->~DeserializedMacroInfoChain();
+ }
+
+ // Free any cached MacroArgs.
+ for (MacroArgs *ArgList = MacroArgCache; ArgList;)
+ ArgList = ArgList->deallocate();
+
+ // Delete the header search info, if we own it.
+ if (OwnsHeaderSearch)
+ delete &HeaderInfo;
+}
+
+void Preprocessor::Initialize(const TargetInfo &Target,
+ const TargetInfo *AuxTarget) {
+ assert((!this->Target || this->Target == &Target) &&
+ "Invalid override of target information");
+ this->Target = &Target;
+
+ assert((!this->AuxTarget || this->AuxTarget == AuxTarget) &&
+ "Invalid override of aux target information.");
+ this->AuxTarget = AuxTarget;
+
+ // Initialize information about built-ins.
+ BuiltinInfo.InitializeTarget(Target, AuxTarget);
+ HeaderInfo.setTarget(Target);
+}
+
+void Preprocessor::InitializeForModelFile() {
+ NumEnteredSourceFiles = 0;
+
+ // Reset pragmas
+ PragmaHandlersBackup = std::move(PragmaHandlers);
+ PragmaHandlers = llvm::make_unique<PragmaNamespace>(StringRef());
+ RegisterBuiltinPragmas();
+
+ // Reset PredefinesFileID
+ PredefinesFileID = FileID();
+}
+
+void Preprocessor::FinalizeForModelFile() {
+ NumEnteredSourceFiles = 1;
+
+ PragmaHandlers = std::move(PragmaHandlersBackup);
+}
+
+void Preprocessor::setPTHManager(PTHManager* pm) {
+ PTH.reset(pm);
+ FileMgr.addStatCache(PTH->createStatCache());
+}
+
+void Preprocessor::DumpToken(const Token &Tok, bool DumpFlags) const {
+ llvm::errs() << tok::getTokenName(Tok.getKind()) << " '"
+ << getSpelling(Tok) << "'";
+
+ if (!DumpFlags) return;
+
+ llvm::errs() << "\t";
+ if (Tok.isAtStartOfLine())
+ llvm::errs() << " [StartOfLine]";
+ if (Tok.hasLeadingSpace())
+ llvm::errs() << " [LeadingSpace]";
+ if (Tok.isExpandDisabled())
+ llvm::errs() << " [ExpandDisabled]";
+ if (Tok.needsCleaning()) {
+ const char *Start = SourceMgr.getCharacterData(Tok.getLocation());
+ llvm::errs() << " [UnClean='" << StringRef(Start, Tok.getLength())
+ << "']";
+ }
+
+ llvm::errs() << "\tLoc=<";
+ DumpLocation(Tok.getLocation());
+ llvm::errs() << ">";
+}
+
+void Preprocessor::DumpLocation(SourceLocation Loc) const {
+ Loc.dump(SourceMgr);
+}
+
+void Preprocessor::DumpMacro(const MacroInfo &MI) const {
+ llvm::errs() << "MACRO: ";
+ for (unsigned i = 0, e = MI.getNumTokens(); i != e; ++i) {
+ DumpToken(MI.getReplacementToken(i));
+ llvm::errs() << " ";
+ }
+ llvm::errs() << "\n";
+}
+
+void Preprocessor::PrintStats() {
+ llvm::errs() << "\n*** Preprocessor Stats:\n";
+ llvm::errs() << NumDirectives << " directives found:\n";
+ llvm::errs() << " " << NumDefined << " #define.\n";
+ llvm::errs() << " " << NumUndefined << " #undef.\n";
+ llvm::errs() << " #include/#include_next/#import:\n";
+ llvm::errs() << " " << NumEnteredSourceFiles << " source files entered.\n";
+ llvm::errs() << " " << MaxIncludeStackDepth << " max include stack depth\n";
+ llvm::errs() << " " << NumIf << " #if/#ifndef/#ifdef.\n";
+ llvm::errs() << " " << NumElse << " #else/#elif.\n";
+ llvm::errs() << " " << NumEndif << " #endif.\n";
+ llvm::errs() << " " << NumPragma << " #pragma.\n";
+ llvm::errs() << NumSkipped << " #if/#ifndef#ifdef regions skipped\n";
+
+ llvm::errs() << NumMacroExpanded << "/" << NumFnMacroExpanded << "/"
+ << NumBuiltinMacroExpanded << " obj/fn/builtin macros expanded, "
+ << NumFastMacroExpanded << " on the fast path.\n";
+ llvm::errs() << (NumFastTokenPaste+NumTokenPaste)
+ << " token paste (##) operations performed, "
+ << NumFastTokenPaste << " on the fast path.\n";
+
+ llvm::errs() << "\nPreprocessor Memory: " << getTotalMemory() << "B total";
+
+ llvm::errs() << "\n BumpPtr: " << BP.getTotalMemory();
+ llvm::errs() << "\n Macro Expanded Tokens: "
+ << llvm::capacity_in_bytes(MacroExpandedTokens);
+ llvm::errs() << "\n Predefines Buffer: " << Predefines.capacity();
+ // FIXME: List information for all submodules.
+ llvm::errs() << "\n Macros: "
+ << llvm::capacity_in_bytes(CurSubmoduleState->Macros);
+ llvm::errs() << "\n #pragma push_macro Info: "
+ << llvm::capacity_in_bytes(PragmaPushMacroInfo);
+ llvm::errs() << "\n Poison Reasons: "
+ << llvm::capacity_in_bytes(PoisonReasons);
+ llvm::errs() << "\n Comment Handlers: "
+ << llvm::capacity_in_bytes(CommentHandlers) << "\n";
+}
+
+Preprocessor::macro_iterator
+Preprocessor::macro_begin(bool IncludeExternalMacros) const {
+ if (IncludeExternalMacros && ExternalSource &&
+ !ReadMacrosFromExternalSource) {
+ ReadMacrosFromExternalSource = true;
+ ExternalSource->ReadDefinedMacros();
+ }
+
+ // Make sure we cover all macros in visible modules.
+ for (const ModuleMacro &Macro : ModuleMacros)
+ CurSubmoduleState->Macros.insert(std::make_pair(Macro.II, MacroState()));
+
+ return CurSubmoduleState->Macros.begin();
+}
+
+size_t Preprocessor::getTotalMemory() const {
+ return BP.getTotalMemory()
+ + llvm::capacity_in_bytes(MacroExpandedTokens)
+ + Predefines.capacity() /* Predefines buffer. */
+ // FIXME: Include sizes from all submodules, and include MacroInfo sizes,
+ // and ModuleMacros.
+ + llvm::capacity_in_bytes(CurSubmoduleState->Macros)
+ + llvm::capacity_in_bytes(PragmaPushMacroInfo)
+ + llvm::capacity_in_bytes(PoisonReasons)
+ + llvm::capacity_in_bytes(CommentHandlers);
+}
+
+Preprocessor::macro_iterator
+Preprocessor::macro_end(bool IncludeExternalMacros) const {
+ if (IncludeExternalMacros && ExternalSource &&
+ !ReadMacrosFromExternalSource) {
+ ReadMacrosFromExternalSource = true;
+ ExternalSource->ReadDefinedMacros();
+ }
+
+ return CurSubmoduleState->Macros.end();
+}
+
+/// \brief Compares macro tokens with a specified token value sequence.
+static bool MacroDefinitionEquals(const MacroInfo *MI,
+ ArrayRef<TokenValue> Tokens) {
+ return Tokens.size() == MI->getNumTokens() &&
+ std::equal(Tokens.begin(), Tokens.end(), MI->tokens_begin());
+}
+
+StringRef Preprocessor::getLastMacroWithSpelling(
+ SourceLocation Loc,
+ ArrayRef<TokenValue> Tokens) const {
+ SourceLocation BestLocation;
+ StringRef BestSpelling;
+ for (Preprocessor::macro_iterator I = macro_begin(), E = macro_end();
+ I != E; ++I) {
+ const MacroDirective::DefInfo
+ Def = I->second.findDirectiveAtLoc(Loc, SourceMgr);
+ if (!Def || !Def.getMacroInfo())
+ continue;
+ if (!Def.getMacroInfo()->isObjectLike())
+ continue;
+ if (!MacroDefinitionEquals(Def.getMacroInfo(), Tokens))
+ continue;
+ SourceLocation Location = Def.getLocation();
+ // Choose the macro defined latest.
+ if (BestLocation.isInvalid() ||
+ (Location.isValid() &&
+ SourceMgr.isBeforeInTranslationUnit(BestLocation, Location))) {
+ BestLocation = Location;
+ BestSpelling = I->first->getName();
+ }
+ }
+ return BestSpelling;
+}
+
+void Preprocessor::recomputeCurLexerKind() {
+ if (CurLexer)
+ CurLexerKind = CLK_Lexer;
+ else if (CurPTHLexer)
+ CurLexerKind = CLK_PTHLexer;
+ else if (CurTokenLexer)
+ CurLexerKind = CLK_TokenLexer;
+ else
+ CurLexerKind = CLK_CachingLexer;
+}
+
+bool Preprocessor::SetCodeCompletionPoint(const FileEntry *File,
+ unsigned CompleteLine,
+ unsigned CompleteColumn) {
+ assert(File);
+ assert(CompleteLine && CompleteColumn && "Starts from 1:1");
+ assert(!CodeCompletionFile && "Already set");
+
+ using llvm::MemoryBuffer;
+
+ // Load the actual file's contents.
+ bool Invalid = false;
+ const MemoryBuffer *Buffer = SourceMgr.getMemoryBufferForFile(File, &Invalid);
+ if (Invalid)
+ return true;
+
+ // Find the byte position of the truncation point.
+ const char *Position = Buffer->getBufferStart();
+ for (unsigned Line = 1; Line < CompleteLine; ++Line) {
+ for (; *Position; ++Position) {
+ if (*Position != '\r' && *Position != '\n')
+ continue;
+
+ // Eat \r\n or \n\r as a single line.
+ if ((Position[1] == '\r' || Position[1] == '\n') &&
+ Position[0] != Position[1])
+ ++Position;
+ ++Position;
+ break;
+ }
+ }
+
+ Position += CompleteColumn - 1;
+
+ // If pointing inside the preamble, adjust the position at the beginning of
+ // the file after the preamble.
+ if (SkipMainFilePreamble.first &&
+ SourceMgr.getFileEntryForID(SourceMgr.getMainFileID()) == File) {
+ if (Position - Buffer->getBufferStart() < SkipMainFilePreamble.first)
+ Position = Buffer->getBufferStart() + SkipMainFilePreamble.first;
+ }
+
+ if (Position > Buffer->getBufferEnd())
+ Position = Buffer->getBufferEnd();
+
+ CodeCompletionFile = File;
+ CodeCompletionOffset = Position - Buffer->getBufferStart();
+
+ std::unique_ptr<MemoryBuffer> NewBuffer =
+ MemoryBuffer::getNewUninitMemBuffer(Buffer->getBufferSize() + 1,
+ Buffer->getBufferIdentifier());
+ char *NewBuf = const_cast<char*>(NewBuffer->getBufferStart());
+ char *NewPos = std::copy(Buffer->getBufferStart(), Position, NewBuf);
+ *NewPos = '\0';
+ std::copy(Position, Buffer->getBufferEnd(), NewPos+1);
+ SourceMgr.overrideFileContents(File, std::move(NewBuffer));
+
+ return false;
+}
+
+void Preprocessor::CodeCompleteNaturalLanguage() {
+ if (CodeComplete)
+ CodeComplete->CodeCompleteNaturalLanguage();
+ setCodeCompletionReached();
+}
+
+/// getSpelling - This method is used to get the spelling of a token into a
+/// SmallVector. Note that the returned StringRef may not point to the
+/// supplied buffer if a copy can be avoided.
+StringRef Preprocessor::getSpelling(const Token &Tok,
+ SmallVectorImpl<char> &Buffer,
+ bool *Invalid) const {
+ // NOTE: this has to be checked *before* testing for an IdentifierInfo.
+ if (Tok.isNot(tok::raw_identifier) && !Tok.hasUCN()) {
+ // Try the fast path.
+ if (const IdentifierInfo *II = Tok.getIdentifierInfo())
+ return II->getName();
+ }
+
+ // Resize the buffer if we need to copy into it.
+ if (Tok.needsCleaning())
+ Buffer.resize(Tok.getLength());
+
+ const char *Ptr = Buffer.data();
+ unsigned Len = getSpelling(Tok, Ptr, Invalid);
+ return StringRef(Ptr, Len);
+}
+
+/// CreateString - Plop the specified string into a scratch buffer and return a
+/// location for it. If specified, the source location provides a source
+/// location for the token.
+void Preprocessor::CreateString(StringRef Str, Token &Tok,
+ SourceLocation ExpansionLocStart,
+ SourceLocation ExpansionLocEnd) {
+ Tok.setLength(Str.size());
+
+ const char *DestPtr;
+ SourceLocation Loc = ScratchBuf->getToken(Str.data(), Str.size(), DestPtr);
+
+ if (ExpansionLocStart.isValid())
+ Loc = SourceMgr.createExpansionLoc(Loc, ExpansionLocStart,
+ ExpansionLocEnd, Str.size());
+ Tok.setLocation(Loc);
+
+ // If this is a raw identifier or a literal token, set the pointer data.
+ if (Tok.is(tok::raw_identifier))
+ Tok.setRawIdentifierData(DestPtr);
+ else if (Tok.isLiteral())
+ Tok.setLiteralData(DestPtr);
+}
+
+Module *Preprocessor::getCurrentModule() {
+ if (getLangOpts().CurrentModule.empty())
+ return nullptr;
+
+ return getHeaderSearchInfo().lookupModule(getLangOpts().CurrentModule);
+}
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Initialization Methods
+//===----------------------------------------------------------------------===//
+
+
+/// EnterMainSourceFile - Enter the specified FileID as the main source file,
+/// which implicitly adds the builtin defines etc.
+void Preprocessor::EnterMainSourceFile() {
+ // We do not allow the preprocessor to reenter the main file. Doing so will
+ // cause FileID's to accumulate information from both runs (e.g. #line
+ // information) and predefined macros aren't guaranteed to be set properly.
+ assert(NumEnteredSourceFiles == 0 && "Cannot reenter the main file!");
+ FileID MainFileID = SourceMgr.getMainFileID();
+
+ // If MainFileID is loaded it means we loaded an AST file, no need to enter
+ // a main file.
+ if (!SourceMgr.isLoadedFileID(MainFileID)) {
+ // Enter the main file source buffer.
+ EnterSourceFile(MainFileID, nullptr, SourceLocation());
+
+ // If we've been asked to skip bytes in the main file (e.g., as part of a
+ // precompiled preamble), do so now.
+ if (SkipMainFilePreamble.first > 0)
+ CurLexer->SkipBytes(SkipMainFilePreamble.first,
+ SkipMainFilePreamble.second);
+
+ // Tell the header info that the main file was entered. If the file is later
+ // #imported, it won't be re-entered.
+ if (const FileEntry *FE = SourceMgr.getFileEntryForID(MainFileID))
+ HeaderInfo.IncrementIncludeCount(FE);
+ }
+
+ // Preprocess Predefines to populate the initial preprocessor state.
+ std::unique_ptr<llvm::MemoryBuffer> SB =
+ llvm::MemoryBuffer::getMemBufferCopy(Predefines, "<built-in>");
+ assert(SB && "Cannot create predefined source buffer");
+ FileID FID = SourceMgr.createFileID(std::move(SB));
+ assert(FID.isValid() && "Could not create FileID for predefines?");
+ setPredefinesFileID(FID);
+
+ // Start parsing the predefines.
+ EnterSourceFile(FID, nullptr, SourceLocation());
+}
+
+void Preprocessor::EndSourceFile() {
+ // Notify the client that we reached the end of the source file.
+ if (Callbacks)
+ Callbacks->EndOfMainFile();
+}
+
+//===----------------------------------------------------------------------===//
+// Lexer Event Handling.
+//===----------------------------------------------------------------------===//
+
+/// LookUpIdentifierInfo - Given a tok::raw_identifier token, look up the
+/// identifier information for the token and install it into the token,
+/// updating the token kind accordingly.
+IdentifierInfo *Preprocessor::LookUpIdentifierInfo(Token &Identifier) const {
+ assert(!Identifier.getRawIdentifier().empty() && "No raw identifier data!");
+
+ // Look up this token, see if it is a macro, or if it is a language keyword.
+ IdentifierInfo *II;
+ if (!Identifier.needsCleaning() && !Identifier.hasUCN()) {
+ // No cleaning needed, just use the characters from the lexed buffer.
+ II = getIdentifierInfo(Identifier.getRawIdentifier());
+ } else {
+ // Cleaning needed, alloca a buffer, clean into it, then use the buffer.
+ SmallString<64> IdentifierBuffer;
+ StringRef CleanedStr = getSpelling(Identifier, IdentifierBuffer);
+
+ if (Identifier.hasUCN()) {
+ SmallString<64> UCNIdentifierBuffer;
+ expandUCNs(UCNIdentifierBuffer, CleanedStr);
+ II = getIdentifierInfo(UCNIdentifierBuffer);
+ } else {
+ II = getIdentifierInfo(CleanedStr);
+ }
+ }
+
+ // Update the token info (identifier info and appropriate token kind).
+ Identifier.setIdentifierInfo(II);
+ Identifier.setKind(II->getTokenID());
+
+ return II;
+}
+
+void Preprocessor::SetPoisonReason(IdentifierInfo *II, unsigned DiagID) {
+ PoisonReasons[II] = DiagID;
+}
+
+void Preprocessor::PoisonSEHIdentifiers(bool Poison) {
+ assert(Ident__exception_code && Ident__exception_info);
+ assert(Ident___exception_code && Ident___exception_info);
+ Ident__exception_code->setIsPoisoned(Poison);
+ Ident___exception_code->setIsPoisoned(Poison);
+ Ident_GetExceptionCode->setIsPoisoned(Poison);
+ Ident__exception_info->setIsPoisoned(Poison);
+ Ident___exception_info->setIsPoisoned(Poison);
+ Ident_GetExceptionInfo->setIsPoisoned(Poison);
+ Ident__abnormal_termination->setIsPoisoned(Poison);
+ Ident___abnormal_termination->setIsPoisoned(Poison);
+ Ident_AbnormalTermination->setIsPoisoned(Poison);
+}
+
+void Preprocessor::HandlePoisonedIdentifier(Token & Identifier) {
+ assert(Identifier.getIdentifierInfo() &&
+ "Can't handle identifiers without identifier info!");
+ llvm::DenseMap<IdentifierInfo*,unsigned>::const_iterator it =
+ PoisonReasons.find(Identifier.getIdentifierInfo());
+ if(it == PoisonReasons.end())
+ Diag(Identifier, diag::err_pp_used_poisoned_id);
+ else
+ Diag(Identifier,it->second) << Identifier.getIdentifierInfo();
+}
+
+/// \brief Returns a diagnostic message kind for reporting a future keyword as
+/// appropriate for the identifier and specified language.
+static diag::kind getFutureCompatDiagKind(const IdentifierInfo &II,
+ const LangOptions &LangOpts) {
+ assert(II.isFutureCompatKeyword() && "diagnostic should not be needed");
+
+ if (LangOpts.CPlusPlus)
+ return llvm::StringSwitch<diag::kind>(II.getName())
+#define CXX11_KEYWORD(NAME, FLAGS) \
+ .Case(#NAME, diag::warn_cxx11_keyword)
+#include "clang/Basic/TokenKinds.def"
+ ;
+
+ llvm_unreachable(
+ "Keyword not known to come from a newer Standard or proposed Standard");
+}
+
+/// HandleIdentifier - This callback is invoked when the lexer reads an
+/// identifier. This callback looks up the identifier in the map and/or
+/// potentially macro expands it or turns it into a named token (like 'for').
+///
+/// Note that callers of this method are guarded by checking the
+/// IdentifierInfo's 'isHandleIdentifierCase' bit. If this method changes, the
+/// IdentifierInfo methods that compute these properties will need to change to
+/// match.
+bool Preprocessor::HandleIdentifier(Token &Identifier) {
+ assert(Identifier.getIdentifierInfo() &&
+ "Can't handle identifiers without identifier info!");
+
+ IdentifierInfo &II = *Identifier.getIdentifierInfo();
+
+ // If the information about this identifier is out of date, update it from
+ // the external source.
+ // We have to treat __VA_ARGS__ in a special way, since it gets
+ // serialized with isPoisoned = true, but our preprocessor may have
+ // unpoisoned it if we're defining a C99 macro.
+ if (II.isOutOfDate()) {
+ bool CurrentIsPoisoned = false;
+ if (&II == Ident__VA_ARGS__)
+ CurrentIsPoisoned = Ident__VA_ARGS__->isPoisoned();
+
+ ExternalSource->updateOutOfDateIdentifier(II);
+ Identifier.setKind(II.getTokenID());
+
+ if (&II == Ident__VA_ARGS__)
+ II.setIsPoisoned(CurrentIsPoisoned);
+ }
+
+ // If this identifier was poisoned, and if it was not produced from a macro
+ // expansion, emit an error.
+ if (II.isPoisoned() && CurPPLexer) {
+ HandlePoisonedIdentifier(Identifier);
+ }
+
+ // If this is a macro to be expanded, do it.
+ if (MacroDefinition MD = getMacroDefinition(&II)) {
+ auto *MI = MD.getMacroInfo();
+ assert(MI && "macro definition with no macro info?");
+ if (!DisableMacroExpansion) {
+ if (!Identifier.isExpandDisabled() && MI->isEnabled()) {
+ // C99 6.10.3p10: If the preprocessing token immediately after the
+ // macro name isn't a '(', this macro should not be expanded.
+ if (!MI->isFunctionLike() || isNextPPTokenLParen())
+ return HandleMacroExpandedIdentifier(Identifier, MD);
+ } else {
+ // C99 6.10.3.4p2 says that a disabled macro may never again be
+ // expanded, even if it's in a context where it could be expanded in the
+ // future.
+ Identifier.setFlag(Token::DisableExpand);
+ if (MI->isObjectLike() || isNextPPTokenLParen())
+ Diag(Identifier, diag::pp_disabled_macro_expansion);
+ }
+ }
+ }
+
+ // If this identifier is a keyword in a newer Standard or proposed Standard,
+ // produce a warning. Don't warn if we're not considering macro expansion,
+ // since this identifier might be the name of a macro.
+ // FIXME: This warning is disabled in cases where it shouldn't be, like
+ // "#define constexpr constexpr", "int constexpr;"
+ if (II.isFutureCompatKeyword() && !DisableMacroExpansion) {
+ Diag(Identifier, getFutureCompatDiagKind(II, getLangOpts()))
+ << II.getName();
+ // Don't diagnose this keyword again in this translation unit.
+ II.setIsFutureCompatKeyword(false);
+ }
+
+ // C++ 2.11p2: If this is an alternative representation of a C++ operator,
+ // then we act as if it is the actual operator and not the textual
+ // representation of it.
+ if (II.isCPlusPlusOperatorKeyword())
+ Identifier.setIdentifierInfo(nullptr);
+
+ // If this is an extension token, diagnose its use.
+ // We avoid diagnosing tokens that originate from macro definitions.
+ // FIXME: This warning is disabled in cases where it shouldn't be,
+ // like "#define TY typeof", "TY(1) x".
+ if (II.isExtensionToken() && !DisableMacroExpansion)
+ Diag(Identifier, diag::ext_token_used);
+
+ // If this is the 'import' contextual keyword following an '@', note
+ // that the next token indicates a module name.
+ //
+ // Note that we do not treat 'import' as a contextual
+ // keyword when we're in a caching lexer, because caching lexers only get
+ // used in contexts where import declarations are disallowed.
+ if (LastTokenWasAt && II.isModulesImport() && !InMacroArgs &&
+ !DisableMacroExpansion &&
+ (getLangOpts().Modules || getLangOpts().DebuggerSupport) &&
+ CurLexerKind != CLK_CachingLexer) {
+ ModuleImportLoc = Identifier.getLocation();
+ ModuleImportPath.clear();
+ ModuleImportExpectsIdentifier = true;
+ CurLexerKind = CLK_LexAfterModuleImport;
+ }
+ return true;
+}
+
+void Preprocessor::Lex(Token &Result) {
+ // We loop here until a lex function returns a token; this avoids recursion.
+ bool ReturnedToken;
+ do {
+ switch (CurLexerKind) {
+ case CLK_Lexer:
+ ReturnedToken = CurLexer->Lex(Result);
+ break;
+ case CLK_PTHLexer:
+ ReturnedToken = CurPTHLexer->Lex(Result);
+ break;
+ case CLK_TokenLexer:
+ ReturnedToken = CurTokenLexer->Lex(Result);
+ break;
+ case CLK_CachingLexer:
+ CachingLex(Result);
+ ReturnedToken = true;
+ break;
+ case CLK_LexAfterModuleImport:
+ LexAfterModuleImport(Result);
+ ReturnedToken = true;
+ break;
+ }
+ } while (!ReturnedToken);
+
+ LastTokenWasAt = Result.is(tok::at);
+}
+
+
+/// \brief Lex a token following the 'import' contextual keyword.
+///
+void Preprocessor::LexAfterModuleImport(Token &Result) {
+ // Figure out what kind of lexer we actually have.
+ recomputeCurLexerKind();
+
+ // Lex the next token.
+ Lex(Result);
+
+ // The token sequence
+ //
+ // import identifier (. identifier)*
+ //
+ // indicates a module import directive. We already saw the 'import'
+ // contextual keyword, so now we're looking for the identifiers.
+ if (ModuleImportExpectsIdentifier && Result.getKind() == tok::identifier) {
+ // We expected to see an identifier here, and we did; continue handling
+ // identifiers.
+ ModuleImportPath.push_back(std::make_pair(Result.getIdentifierInfo(),
+ Result.getLocation()));
+ ModuleImportExpectsIdentifier = false;
+ CurLexerKind = CLK_LexAfterModuleImport;
+ return;
+ }
+
+ // If we're expecting a '.' or a ';', and we got a '.', then wait until we
+ // see the next identifier.
+ if (!ModuleImportExpectsIdentifier && Result.getKind() == tok::period) {
+ ModuleImportExpectsIdentifier = true;
+ CurLexerKind = CLK_LexAfterModuleImport;
+ return;
+ }
+
+ // If we have a non-empty module path, load the named module.
+ if (!ModuleImportPath.empty()) {
+ Module *Imported = nullptr;
+ if (getLangOpts().Modules) {
+ Imported = TheModuleLoader.loadModule(ModuleImportLoc,
+ ModuleImportPath,
+ Module::Hidden,
+ /*IsIncludeDirective=*/false);
+ if (Imported)
+ makeModuleVisible(Imported, ModuleImportLoc);
+ }
+ if (Callbacks && (getLangOpts().Modules || getLangOpts().DebuggerSupport))
+ Callbacks->moduleImport(ModuleImportLoc, ModuleImportPath, Imported);
+ }
+}
+
+void Preprocessor::makeModuleVisible(Module *M, SourceLocation Loc) {
+ CurSubmoduleState->VisibleModules.setVisible(
+ M, Loc, [](Module *) {},
+ [&](ArrayRef<Module *> Path, Module *Conflict, StringRef Message) {
+ // FIXME: Include the path in the diagnostic.
+ // FIXME: Include the import location for the conflicting module.
+ Diag(ModuleImportLoc, diag::warn_module_conflict)
+ << Path[0]->getFullModuleName()
+ << Conflict->getFullModuleName()
+ << Message;
+ });
+
+ // Add this module to the imports list of the currently-built submodule.
+ if (!BuildingSubmoduleStack.empty() && M != BuildingSubmoduleStack.back().M)
+ BuildingSubmoduleStack.back().M->Imports.insert(M);
+}
+
+bool Preprocessor::FinishLexStringLiteral(Token &Result, std::string &String,
+ const char *DiagnosticTag,
+ bool AllowMacroExpansion) {
+ // We need at least one string literal.
+ if (Result.isNot(tok::string_literal)) {
+ Diag(Result, diag::err_expected_string_literal)
+ << /*Source='in...'*/0 << DiagnosticTag;
+ return false;
+ }
+
+ // Lex string literal tokens, optionally with macro expansion.
+ SmallVector<Token, 4> StrToks;
+ do {
+ StrToks.push_back(Result);
+
+ if (Result.hasUDSuffix())
+ Diag(Result, diag::err_invalid_string_udl);
+
+ if (AllowMacroExpansion)
+ Lex(Result);
+ else
+ LexUnexpandedToken(Result);
+ } while (Result.is(tok::string_literal));
+
+ // Concatenate and parse the strings.
+ StringLiteralParser Literal(StrToks, *this);
+ assert(Literal.isAscii() && "Didn't allow wide strings in");
+
+ if (Literal.hadError)
+ return false;
+
+ if (Literal.Pascal) {
+ Diag(StrToks[0].getLocation(), diag::err_expected_string_literal)
+ << /*Source='in...'*/0 << DiagnosticTag;
+ return false;
+ }
+
+ String = Literal.GetString();
+ return true;
+}
+
+bool Preprocessor::parseSimpleIntegerLiteral(Token &Tok, uint64_t &Value) {
+ assert(Tok.is(tok::numeric_constant));
+ SmallString<8> IntegerBuffer;
+ bool NumberInvalid = false;
+ StringRef Spelling = getSpelling(Tok, IntegerBuffer, &NumberInvalid);
+ if (NumberInvalid)
+ return false;
+ NumericLiteralParser Literal(Spelling, Tok.getLocation(), *this);
+ if (Literal.hadError || !Literal.isIntegerLiteral() || Literal.hasUDSuffix())
+ return false;
+ llvm::APInt APVal(64, 0);
+ if (Literal.GetIntegerValue(APVal))
+ return false;
+ Lex(Tok);
+ Value = APVal.getLimitedValue();
+ return true;
+}
+
+void Preprocessor::addCommentHandler(CommentHandler *Handler) {
+ assert(Handler && "NULL comment handler");
+ assert(std::find(CommentHandlers.begin(), CommentHandlers.end(), Handler) ==
+ CommentHandlers.end() && "Comment handler already registered");
+ CommentHandlers.push_back(Handler);
+}
+
+void Preprocessor::removeCommentHandler(CommentHandler *Handler) {
+ std::vector<CommentHandler *>::iterator Pos
+ = std::find(CommentHandlers.begin(), CommentHandlers.end(), Handler);
+ assert(Pos != CommentHandlers.end() && "Comment handler not registered");
+ CommentHandlers.erase(Pos);
+}
+
+bool Preprocessor::HandleComment(Token &result, SourceRange Comment) {
+ bool AnyPendingTokens = false;
+ for (std::vector<CommentHandler *>::iterator H = CommentHandlers.begin(),
+ HEnd = CommentHandlers.end();
+ H != HEnd; ++H) {
+ if ((*H)->HandleComment(*this, Comment))
+ AnyPendingTokens = true;
+ }
+ if (!AnyPendingTokens || getCommentRetentionState())
+ return false;
+ Lex(result);
+ return true;
+}
+
+ModuleLoader::~ModuleLoader() { }
+
+CommentHandler::~CommentHandler() { }
+
+CodeCompletionHandler::~CodeCompletionHandler() { }
+
+void Preprocessor::createPreprocessingRecord() {
+ if (Record)
+ return;
+
+ Record = new PreprocessingRecord(getSourceManager());
+ addPPCallbacks(std::unique_ptr<PPCallbacks>(Record));
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PreprocessorLexer.cpp b/contrib/llvm/tools/clang/lib/Lex/PreprocessorLexer.cpp
new file mode 100644
index 0000000..33ccbc0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/PreprocessorLexer.cpp
@@ -0,0 +1,58 @@
+//===--- PreprocessorLexer.cpp - C Language Family Lexer ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the PreprocessorLexer and Token interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/PreprocessorLexer.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/Preprocessor.h"
+using namespace clang;
+
+void PreprocessorLexer::anchor() { }
+
+PreprocessorLexer::PreprocessorLexer(Preprocessor *pp, FileID fid)
+ : PP(pp), FID(fid), InitialNumSLocEntries(0),
+ ParsingPreprocessorDirective(false),
+ ParsingFilename(false), LexingRawMode(false) {
+ if (pp)
+ InitialNumSLocEntries = pp->getSourceManager().local_sloc_entry_size();
+}
+
+/// \brief After the preprocessor has parsed a \#include, lex and
+/// (potentially) macro expand the filename.
+void PreprocessorLexer::LexIncludeFilename(Token &FilenameTok) {
+ assert(ParsingPreprocessorDirective &&
+ ParsingFilename == false &&
+ "Must be in a preprocessing directive!");
+
+ // We are now parsing a filename!
+ ParsingFilename = true;
+
+ // Lex the filename.
+ if (LexingRawMode)
+ IndirectLex(FilenameTok);
+ else
+ PP->Lex(FilenameTok);
+
+ // We should have obtained the filename now.
+ ParsingFilename = false;
+
+ // No filename?
+ if (FilenameTok.is(tok::eod))
+ PP->Diag(FilenameTok.getLocation(), diag::err_pp_expects_filename);
+}
+
+/// getFileEntry - Return the FileEntry corresponding to this FileID. Like
+/// getFileID(), this only works for lexers with attached preprocessors.
+const FileEntry *PreprocessorLexer::getFileEntry() const {
+ return PP->getSourceManager().getFileEntryForID(getFileID());
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/ScratchBuffer.cpp b/contrib/llvm/tools/clang/lib/Lex/ScratchBuffer.cpp
new file mode 100644
index 0000000..cd8a27e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/ScratchBuffer.cpp
@@ -0,0 +1,76 @@
+//===--- ScratchBuffer.cpp - Scratch space for forming tokens -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ScratchBuffer interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/ScratchBuffer.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <cstring>
+using namespace clang;
+
+// ScratchBufSize - The size of each chunk of scratch memory. Slightly less
+//than a page, almost certainly enough for anything. :)
+static const unsigned ScratchBufSize = 4060;
+
+ScratchBuffer::ScratchBuffer(SourceManager &SM)
+ : SourceMgr(SM), CurBuffer(nullptr) {
+ // Set BytesUsed so that the first call to getToken will require an alloc.
+ BytesUsed = ScratchBufSize;
+}
+
+/// getToken - Splat the specified text into a temporary MemoryBuffer and
+/// return a SourceLocation that refers to the token. This is just like the
+/// method below, but returns a location that indicates the physloc of the
+/// token.
+SourceLocation ScratchBuffer::getToken(const char *Buf, unsigned Len,
+ const char *&DestPtr) {
+ if (BytesUsed+Len+2 > ScratchBufSize)
+ AllocScratchBuffer(Len+2);
+
+ // Prefix the token with a \n, so that it looks like it is the first thing on
+ // its own virtual line in caret diagnostics.
+ CurBuffer[BytesUsed++] = '\n';
+
+ // Return a pointer to the character data.
+ DestPtr = CurBuffer+BytesUsed;
+
+ // Copy the token data into the buffer.
+ memcpy(CurBuffer+BytesUsed, Buf, Len);
+
+ // Remember that we used these bytes.
+ BytesUsed += Len+1;
+
+ // Add a NUL terminator to the token. This keeps the tokens separated, in
+ // case they get relexed, and puts them on their own virtual lines in case a
+ // diagnostic points to one.
+ CurBuffer[BytesUsed-1] = '\0';
+
+ return BufferStartLoc.getLocWithOffset(BytesUsed-Len-1);
+}
+
+void ScratchBuffer::AllocScratchBuffer(unsigned RequestLen) {
+ // Only pay attention to the requested length if it is larger than our default
+ // page size. If it is, we allocate an entire chunk for it. This is to
+ // support gigantic tokens, which almost certainly won't happen. :)
+ if (RequestLen < ScratchBufSize)
+ RequestLen = ScratchBufSize;
+
+ // Get scratch buffer. Zero-initialize it so it can be dumped into a PCH file
+ // deterministically.
+ std::unique_ptr<llvm::MemoryBuffer> OwnBuf =
+ llvm::MemoryBuffer::getNewMemBuffer(RequestLen, "<scratch space>");
+ llvm::MemoryBuffer &Buf = *OwnBuf;
+ FileID FID = SourceMgr.createFileID(std::move(OwnBuf));
+ BufferStartLoc = SourceMgr.getLocForStartOfFile(FID);
+ CurBuffer = const_cast<char*>(Buf.getBufferStart());
+ BytesUsed = 0;
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/TokenConcatenation.cpp b/contrib/llvm/tools/clang/lib/Lex/TokenConcatenation.cpp
new file mode 100644
index 0000000..27b4eab
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/TokenConcatenation.cpp
@@ -0,0 +1,287 @@
+//===--- TokenConcatenation.cpp - Token Concatenation Avoidance -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the TokenConcatenation class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/TokenConcatenation.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace clang;
+
+
+/// IsStringPrefix - Return true if Str is a string prefix.
+/// 'L', 'u', 'U', or 'u8'. Including raw versions.
+static bool IsStringPrefix(StringRef Str, bool CPlusPlus11) {
+
+ if (Str[0] == 'L' ||
+ (CPlusPlus11 && (Str[0] == 'u' || Str[0] == 'U' || Str[0] == 'R'))) {
+
+ if (Str.size() == 1)
+ return true; // "L", "u", "U", and "R"
+
+ // Check for raw flavors. Need to make sure the first character wasn't
+ // already R. Need CPlusPlus11 check for "LR".
+ if (Str[1] == 'R' && Str[0] != 'R' && Str.size() == 2 && CPlusPlus11)
+ return true; // "LR", "uR", "UR"
+
+ // Check for "u8" and "u8R"
+ if (Str[0] == 'u' && Str[1] == '8') {
+ if (Str.size() == 2) return true; // "u8"
+ if (Str.size() == 3 && Str[2] == 'R') return true; // "u8R"
+ }
+ }
+
+ return false;
+}
+
+/// IsIdentifierStringPrefix - Return true if the spelling of the token
+/// is literally 'L', 'u', 'U', or 'u8'. Including raw versions.
+bool TokenConcatenation::IsIdentifierStringPrefix(const Token &Tok) const {
+ const LangOptions &LangOpts = PP.getLangOpts();
+
+ if (!Tok.needsCleaning()) {
+ if (Tok.getLength() < 1 || Tok.getLength() > 3)
+ return false;
+ SourceManager &SM = PP.getSourceManager();
+ const char *Ptr = SM.getCharacterData(SM.getSpellingLoc(Tok.getLocation()));
+ return IsStringPrefix(StringRef(Ptr, Tok.getLength()),
+ LangOpts.CPlusPlus11);
+ }
+
+ if (Tok.getLength() < 256) {
+ char Buffer[256];
+ const char *TokPtr = Buffer;
+ unsigned length = PP.getSpelling(Tok, TokPtr);
+ return IsStringPrefix(StringRef(TokPtr, length), LangOpts.CPlusPlus11);
+ }
+
+ return IsStringPrefix(StringRef(PP.getSpelling(Tok)), LangOpts.CPlusPlus11);
+}
+
+TokenConcatenation::TokenConcatenation(Preprocessor &pp) : PP(pp) {
+ memset(TokenInfo, 0, sizeof(TokenInfo));
+
+ // These tokens have custom code in AvoidConcat.
+ TokenInfo[tok::identifier ] |= aci_custom;
+ TokenInfo[tok::numeric_constant] |= aci_custom_firstchar;
+ TokenInfo[tok::period ] |= aci_custom_firstchar;
+ TokenInfo[tok::amp ] |= aci_custom_firstchar;
+ TokenInfo[tok::plus ] |= aci_custom_firstchar;
+ TokenInfo[tok::minus ] |= aci_custom_firstchar;
+ TokenInfo[tok::slash ] |= aci_custom_firstchar;
+ TokenInfo[tok::less ] |= aci_custom_firstchar;
+ TokenInfo[tok::greater ] |= aci_custom_firstchar;
+ TokenInfo[tok::pipe ] |= aci_custom_firstchar;
+ TokenInfo[tok::percent ] |= aci_custom_firstchar;
+ TokenInfo[tok::colon ] |= aci_custom_firstchar;
+ TokenInfo[tok::hash ] |= aci_custom_firstchar;
+ TokenInfo[tok::arrow ] |= aci_custom_firstchar;
+
+ // These tokens have custom code in C++11 mode.
+ if (PP.getLangOpts().CPlusPlus11) {
+ TokenInfo[tok::string_literal ] |= aci_custom;
+ TokenInfo[tok::wide_string_literal ] |= aci_custom;
+ TokenInfo[tok::utf8_string_literal ] |= aci_custom;
+ TokenInfo[tok::utf16_string_literal] |= aci_custom;
+ TokenInfo[tok::utf32_string_literal] |= aci_custom;
+ TokenInfo[tok::char_constant ] |= aci_custom;
+ TokenInfo[tok::wide_char_constant ] |= aci_custom;
+ TokenInfo[tok::utf16_char_constant ] |= aci_custom;
+ TokenInfo[tok::utf32_char_constant ] |= aci_custom;
+ }
+
+ // These tokens have custom code in C++1z mode.
+ if (PP.getLangOpts().CPlusPlus1z)
+ TokenInfo[tok::utf8_char_constant] |= aci_custom;
+
+ // These tokens change behavior if followed by an '='.
+ TokenInfo[tok::amp ] |= aci_avoid_equal; // &=
+ TokenInfo[tok::plus ] |= aci_avoid_equal; // +=
+ TokenInfo[tok::minus ] |= aci_avoid_equal; // -=
+ TokenInfo[tok::slash ] |= aci_avoid_equal; // /=
+ TokenInfo[tok::less ] |= aci_avoid_equal; // <=
+ TokenInfo[tok::greater ] |= aci_avoid_equal; // >=
+ TokenInfo[tok::pipe ] |= aci_avoid_equal; // |=
+ TokenInfo[tok::percent ] |= aci_avoid_equal; // %=
+ TokenInfo[tok::star ] |= aci_avoid_equal; // *=
+ TokenInfo[tok::exclaim ] |= aci_avoid_equal; // !=
+ TokenInfo[tok::lessless ] |= aci_avoid_equal; // <<=
+ TokenInfo[tok::greatergreater] |= aci_avoid_equal; // >>=
+ TokenInfo[tok::caret ] |= aci_avoid_equal; // ^=
+ TokenInfo[tok::equal ] |= aci_avoid_equal; // ==
+}
+
+/// GetFirstChar - Get the first character of the token \arg Tok,
+/// avoiding calls to getSpelling where possible.
+static char GetFirstChar(Preprocessor &PP, const Token &Tok) {
+ if (IdentifierInfo *II = Tok.getIdentifierInfo()) {
+ // Avoid spelling identifiers, the most common form of token.
+ return II->getNameStart()[0];
+ } else if (!Tok.needsCleaning()) {
+ if (Tok.isLiteral() && Tok.getLiteralData()) {
+ return *Tok.getLiteralData();
+ } else {
+ SourceManager &SM = PP.getSourceManager();
+ return *SM.getCharacterData(SM.getSpellingLoc(Tok.getLocation()));
+ }
+ } else if (Tok.getLength() < 256) {
+ char Buffer[256];
+ const char *TokPtr = Buffer;
+ PP.getSpelling(Tok, TokPtr);
+ return TokPtr[0];
+ } else {
+ return PP.getSpelling(Tok)[0];
+ }
+}
+
+/// AvoidConcat - If printing PrevTok immediately followed by Tok would cause
+/// the two individual tokens to be lexed as a single token, return true
+/// (which causes a space to be printed between them). This allows the output
+/// of -E mode to be lexed to the same token stream as lexing the input
+/// directly would.
+///
+/// This code must conservatively return true if it doesn't want to be 100%
+/// accurate. This will cause the output to include extra space characters,
+/// but the resulting output won't have incorrect concatenations going on.
+/// Examples include "..", which we print with a space between, because we
+/// don't want to track enough to tell "x.." from "...".
+bool TokenConcatenation::AvoidConcat(const Token &PrevPrevTok,
+ const Token &PrevTok,
+ const Token &Tok) const {
+ // First, check to see if the tokens were directly adjacent in the original
+ // source. If they were, it must be okay to stick them together: if there
+ // were an issue, the tokens would have been lexed differently.
+ SourceManager &SM = PP.getSourceManager();
+ SourceLocation PrevSpellLoc = SM.getSpellingLoc(PrevTok.getLocation());
+ SourceLocation SpellLoc = SM.getSpellingLoc(Tok.getLocation());
+ if (PrevSpellLoc.getLocWithOffset(PrevTok.getLength()) == SpellLoc)
+ return false;
+
+ tok::TokenKind PrevKind = PrevTok.getKind();
+ if (!PrevTok.isAnnotation() && PrevTok.getIdentifierInfo())
+ PrevKind = tok::identifier; // Language keyword or named operator.
+
+ // Look up information on when we should avoid concatenation with prevtok.
+ unsigned ConcatInfo = TokenInfo[PrevKind];
+
+ // If prevtok never causes a problem for anything after it, return quickly.
+ if (ConcatInfo == 0) return false;
+
+ if (ConcatInfo & aci_avoid_equal) {
+ // If the next token is '=' or '==', avoid concatenation.
+ if (Tok.isOneOf(tok::equal, tok::equalequal))
+ return true;
+ ConcatInfo &= ~aci_avoid_equal;
+ }
+ if (Tok.isAnnotation()) {
+ // Modules annotation can show up when generated automatically for includes.
+ assert(Tok.isOneOf(tok::annot_module_include, tok::annot_module_begin,
+ tok::annot_module_end) &&
+ "unexpected annotation in AvoidConcat");
+ ConcatInfo = 0;
+ }
+
+ if (ConcatInfo == 0)
+ return false;
+
+ // Basic algorithm: we look at the first character of the second token, and
+ // determine whether it, if appended to the first token, would form (or
+ // would contribute) to a larger token if concatenated.
+ char FirstChar = 0;
+ if (ConcatInfo & aci_custom) {
+ // If the token does not need to know the first character, don't get it.
+ } else {
+ FirstChar = GetFirstChar(PP, Tok);
+ }
+
+ switch (PrevKind) {
+ default:
+ llvm_unreachable("InitAvoidConcatTokenInfo built wrong");
+
+ case tok::raw_identifier:
+ llvm_unreachable("tok::raw_identifier in non-raw lexing mode!");
+
+ case tok::string_literal:
+ case tok::wide_string_literal:
+ case tok::utf8_string_literal:
+ case tok::utf16_string_literal:
+ case tok::utf32_string_literal:
+ case tok::char_constant:
+ case tok::wide_char_constant:
+ case tok::utf8_char_constant:
+ case tok::utf16_char_constant:
+ case tok::utf32_char_constant:
+ if (!PP.getLangOpts().CPlusPlus11)
+ return false;
+
+ // In C++11, a string or character literal followed by an identifier is a
+ // single token.
+ if (Tok.getIdentifierInfo())
+ return true;
+
+ // A ud-suffix is an identifier. If the previous token ends with one, treat
+ // it as an identifier.
+ if (!PrevTok.hasUDSuffix())
+ return false;
+ // FALL THROUGH.
+ case tok::identifier: // id+id or id+number or id+L"foo".
+ // id+'.'... will not append.
+ if (Tok.is(tok::numeric_constant))
+ return GetFirstChar(PP, Tok) != '.';
+
+ if (Tok.getIdentifierInfo() ||
+ Tok.isOneOf(tok::wide_string_literal, tok::utf8_string_literal,
+ tok::utf16_string_literal, tok::utf32_string_literal,
+ tok::wide_char_constant, tok::utf8_char_constant,
+ tok::utf16_char_constant, tok::utf32_char_constant))
+ return true;
+
+ // If this isn't identifier + string, we're done.
+ if (Tok.isNot(tok::char_constant) && Tok.isNot(tok::string_literal))
+ return false;
+
+ // Otherwise, this is a narrow character or string. If the *identifier*
+ // is a literal 'L', 'u8', 'u' or 'U', avoid pasting L "foo" -> L"foo".
+ return IsIdentifierStringPrefix(PrevTok);
+
+ case tok::numeric_constant:
+ return isPreprocessingNumberBody(FirstChar) ||
+ FirstChar == '+' || FirstChar == '-';
+ case tok::period: // ..., .*, .1234
+ return (FirstChar == '.' && PrevPrevTok.is(tok::period)) ||
+ isDigit(FirstChar) ||
+ (PP.getLangOpts().CPlusPlus && FirstChar == '*');
+ case tok::amp: // &&
+ return FirstChar == '&';
+ case tok::plus: // ++
+ return FirstChar == '+';
+ case tok::minus: // --, ->, ->*
+ return FirstChar == '-' || FirstChar == '>';
+ case tok::slash: //, /*, //
+ return FirstChar == '*' || FirstChar == '/';
+ case tok::less: // <<, <<=, <:, <%
+ return FirstChar == '<' || FirstChar == ':' || FirstChar == '%';
+ case tok::greater: // >>, >>=
+ return FirstChar == '>';
+ case tok::pipe: // ||
+ return FirstChar == '|';
+ case tok::percent: // %>, %:
+ return FirstChar == '>' || FirstChar == ':';
+ case tok::colon: // ::, :>
+ return FirstChar == '>' ||
+ (PP.getLangOpts().CPlusPlus && FirstChar == ':');
+ case tok::hash: // ##, #@, %:%:
+ return FirstChar == '#' || FirstChar == '@' || FirstChar == '%';
+ case tok::arrow: // ->*
+ return PP.getLangOpts().CPlusPlus && FirstChar == '*';
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp b/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp
new file mode 100644
index 0000000..c429669
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp
@@ -0,0 +1,853 @@
+//===--- TokenLexer.cpp - Lex from a token stream -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the TokenLexer interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/TokenLexer.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/MacroArgs.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/SmallString.h"
+using namespace clang;
+
+
+/// Create a TokenLexer for the specified macro with the specified actual
+/// arguments. Note that this ctor takes ownership of the ActualArgs pointer.
+void TokenLexer::Init(Token &Tok, SourceLocation ELEnd, MacroInfo *MI,
+ MacroArgs *Actuals) {
+ // If the client is reusing a TokenLexer, make sure to free any memory
+ // associated with it.
+ destroy();
+
+ Macro = MI;
+ ActualArgs = Actuals;
+ CurToken = 0;
+
+ ExpandLocStart = Tok.getLocation();
+ ExpandLocEnd = ELEnd;
+ AtStartOfLine = Tok.isAtStartOfLine();
+ HasLeadingSpace = Tok.hasLeadingSpace();
+ NextTokGetsSpace = false;
+ Tokens = &*Macro->tokens_begin();
+ OwnsTokens = false;
+ DisableMacroExpansion = false;
+ NumTokens = Macro->tokens_end()-Macro->tokens_begin();
+ MacroExpansionStart = SourceLocation();
+
+ SourceManager &SM = PP.getSourceManager();
+ MacroStartSLocOffset = SM.getNextLocalOffset();
+
+ if (NumTokens > 0) {
+ assert(Tokens[0].getLocation().isValid());
+ assert((Tokens[0].getLocation().isFileID() || Tokens[0].is(tok::comment)) &&
+ "Macro defined in macro?");
+ assert(ExpandLocStart.isValid());
+
+ // Reserve a source location entry chunk for the length of the macro
+ // definition. Tokens that get lexed directly from the definition will
+ // have their locations pointing inside this chunk. This is to avoid
+ // creating separate source location entries for each token.
+ MacroDefStart = SM.getExpansionLoc(Tokens[0].getLocation());
+ MacroDefLength = Macro->getDefinitionLength(SM);
+ MacroExpansionStart = SM.createExpansionLoc(MacroDefStart,
+ ExpandLocStart,
+ ExpandLocEnd,
+ MacroDefLength);
+ }
+
+ // If this is a function-like macro, expand the arguments and change
+ // Tokens to point to the expanded tokens.
+ if (Macro->isFunctionLike() && Macro->getNumArgs())
+ ExpandFunctionArguments();
+
+ // Mark the macro as currently disabled, so that it is not recursively
+ // expanded. The macro must be disabled only after argument pre-expansion of
+ // function-like macro arguments occurs.
+ Macro->DisableMacro();
+}
+
+
+
+/// Create a TokenLexer for the specified token stream. This does not
+/// take ownership of the specified token vector.
+void TokenLexer::Init(const Token *TokArray, unsigned NumToks,
+ bool disableMacroExpansion, bool ownsTokens) {
+ // If the client is reusing a TokenLexer, make sure to free any memory
+ // associated with it.
+ destroy();
+
+ Macro = nullptr;
+ ActualArgs = nullptr;
+ Tokens = TokArray;
+ OwnsTokens = ownsTokens;
+ DisableMacroExpansion = disableMacroExpansion;
+ NumTokens = NumToks;
+ CurToken = 0;
+ ExpandLocStart = ExpandLocEnd = SourceLocation();
+ AtStartOfLine = false;
+ HasLeadingSpace = false;
+ NextTokGetsSpace = false;
+ MacroExpansionStart = SourceLocation();
+
+ // Set HasLeadingSpace/AtStartOfLine so that the first token will be
+ // returned unmodified.
+ if (NumToks != 0) {
+ AtStartOfLine = TokArray[0].isAtStartOfLine();
+ HasLeadingSpace = TokArray[0].hasLeadingSpace();
+ }
+}
+
+
+void TokenLexer::destroy() {
+ // If this was a function-like macro that actually uses its arguments, delete
+ // the expanded tokens.
+ if (OwnsTokens) {
+ delete [] Tokens;
+ Tokens = nullptr;
+ OwnsTokens = false;
+ }
+
+ // TokenLexer owns its formal arguments.
+ if (ActualArgs) ActualArgs->destroy(PP);
+}
+
+bool TokenLexer::MaybeRemoveCommaBeforeVaArgs(
+ SmallVectorImpl<Token> &ResultToks, bool HasPasteOperator, MacroInfo *Macro,
+ unsigned MacroArgNo, Preprocessor &PP) {
+ // Is the macro argument __VA_ARGS__?
+ if (!Macro->isVariadic() || MacroArgNo != Macro->getNumArgs()-1)
+ return false;
+
+ // In Microsoft-compatibility mode, a comma is removed in the expansion
+ // of " ... , __VA_ARGS__ " if __VA_ARGS__ is empty. This extension is
+ // not supported by gcc.
+ if (!HasPasteOperator && !PP.getLangOpts().MSVCCompat)
+ return false;
+
+ // GCC removes the comma in the expansion of " ... , ## __VA_ARGS__ " if
+ // __VA_ARGS__ is empty, but not in strict C99 mode where there are no
+ // named arguments, where it remains. In all other modes, including C99
+ // with GNU extensions, it is removed regardless of named arguments.
+ // Microsoft also appears to support this extension, unofficially.
+ if (PP.getLangOpts().C99 && !PP.getLangOpts().GNUMode
+ && Macro->getNumArgs() < 2)
+ return false;
+
+ // Is a comma available to be removed?
+ if (ResultToks.empty() || !ResultToks.back().is(tok::comma))
+ return false;
+
+ // Issue an extension diagnostic for the paste operator.
+ if (HasPasteOperator)
+ PP.Diag(ResultToks.back().getLocation(), diag::ext_paste_comma);
+
+ // Remove the comma.
+ ResultToks.pop_back();
+
+ // If the comma was right after another paste (e.g. "X##,##__VA_ARGS__"),
+ // then removal of the comma should produce a placemarker token (in C99
+ // terms) which we model by popping off the previous ##, giving us a plain
+ // "X" when __VA_ARGS__ is empty.
+ if (!ResultToks.empty() && ResultToks.back().is(tok::hashhash))
+ ResultToks.pop_back();
+
+ // Never add a space, even if the comma, ##, or arg had a space.
+ NextTokGetsSpace = false;
+ return true;
+}
+
+/// Expand the arguments of a function-like macro so that we can quickly
+/// return preexpanded tokens from Tokens.
+void TokenLexer::ExpandFunctionArguments() {
+
+ SmallVector<Token, 128> ResultToks;
+
+ // Loop through 'Tokens', expanding them into ResultToks. Keep
+ // track of whether we change anything. If not, no need to keep them. If so,
+ // we install the newly expanded sequence as the new 'Tokens' list.
+ bool MadeChange = false;
+
+ for (unsigned i = 0, e = NumTokens; i != e; ++i) {
+ // If we found the stringify operator, get the argument stringified. The
+ // preprocessor already verified that the following token is a macro name
+ // when the #define was parsed.
+ const Token &CurTok = Tokens[i];
+ if (i != 0 && !Tokens[i-1].is(tok::hashhash) && CurTok.hasLeadingSpace())
+ NextTokGetsSpace = true;
+
+ if (CurTok.isOneOf(tok::hash, tok::hashat)) {
+ int ArgNo = Macro->getArgumentNum(Tokens[i+1].getIdentifierInfo());
+ assert(ArgNo != -1 && "Token following # is not an argument?");
+
+ SourceLocation ExpansionLocStart =
+ getExpansionLocForMacroDefLoc(CurTok.getLocation());
+ SourceLocation ExpansionLocEnd =
+ getExpansionLocForMacroDefLoc(Tokens[i+1].getLocation());
+
+ Token Res;
+ if (CurTok.is(tok::hash)) // Stringify
+ Res = ActualArgs->getStringifiedArgument(ArgNo, PP,
+ ExpansionLocStart,
+ ExpansionLocEnd);
+ else {
+ // 'charify': don't bother caching these.
+ Res = MacroArgs::StringifyArgument(ActualArgs->getUnexpArgument(ArgNo),
+ PP, true,
+ ExpansionLocStart,
+ ExpansionLocEnd);
+ }
+ Res.setFlag(Token::StringifiedInMacro);
+
+ // The stringified/charified string leading space flag gets set to match
+ // the #/#@ operator.
+ if (NextTokGetsSpace)
+ Res.setFlag(Token::LeadingSpace);
+
+ ResultToks.push_back(Res);
+ MadeChange = true;
+ ++i; // Skip arg name.
+ NextTokGetsSpace = false;
+ continue;
+ }
+
+ // Find out if there is a paste (##) operator before or after the token.
+ bool NonEmptyPasteBefore =
+ !ResultToks.empty() && ResultToks.back().is(tok::hashhash);
+ bool PasteBefore = i != 0 && Tokens[i-1].is(tok::hashhash);
+ bool PasteAfter = i+1 != e && Tokens[i+1].is(tok::hashhash);
+ assert(!NonEmptyPasteBefore || PasteBefore);
+
+ // Otherwise, if this is not an argument token, just add the token to the
+ // output buffer.
+ IdentifierInfo *II = CurTok.getIdentifierInfo();
+ int ArgNo = II ? Macro->getArgumentNum(II) : -1;
+ if (ArgNo == -1) {
+ // This isn't an argument, just add it.
+ ResultToks.push_back(CurTok);
+
+ if (NextTokGetsSpace) {
+ ResultToks.back().setFlag(Token::LeadingSpace);
+ NextTokGetsSpace = false;
+ } else if (PasteBefore && !NonEmptyPasteBefore)
+ ResultToks.back().clearFlag(Token::LeadingSpace);
+
+ continue;
+ }
+
+ // An argument is expanded somehow, the result is different than the
+ // input.
+ MadeChange = true;
+
+ // Otherwise, this is a use of the argument.
+
+ // In Microsoft mode, remove the comma before __VA_ARGS__ to ensure there
+ // are no trailing commas if __VA_ARGS__ is empty.
+ if (!PasteBefore && ActualArgs->isVarargsElidedUse() &&
+ MaybeRemoveCommaBeforeVaArgs(ResultToks,
+ /*HasPasteOperator=*/false,
+ Macro, ArgNo, PP))
+ continue;
+
+ // If it is not the LHS/RHS of a ## operator, we must pre-expand the
+ // argument and substitute the expanded tokens into the result. This is
+ // C99 6.10.3.1p1.
+ if (!PasteBefore && !PasteAfter) {
+ const Token *ResultArgToks;
+
+ // Only preexpand the argument if it could possibly need it. This
+ // avoids some work in common cases.
+ const Token *ArgTok = ActualArgs->getUnexpArgument(ArgNo);
+ if (ActualArgs->ArgNeedsPreexpansion(ArgTok, PP))
+ ResultArgToks = &ActualArgs->getPreExpArgument(ArgNo, Macro, PP)[0];
+ else
+ ResultArgToks = ArgTok; // Use non-preexpanded tokens.
+
+ // If the arg token expanded into anything, append it.
+ if (ResultArgToks->isNot(tok::eof)) {
+ unsigned FirstResult = ResultToks.size();
+ unsigned NumToks = MacroArgs::getArgLength(ResultArgToks);
+ ResultToks.append(ResultArgToks, ResultArgToks+NumToks);
+
+ // In Microsoft-compatibility mode, we follow MSVC's preprocessing
+ // behavior by not considering single commas from nested macro
+ // expansions as argument separators. Set a flag on the token so we can
+ // test for this later when the macro expansion is processed.
+ if (PP.getLangOpts().MSVCCompat && NumToks == 1 &&
+ ResultToks.back().is(tok::comma))
+ ResultToks.back().setFlag(Token::IgnoredComma);
+
+ // If the '##' came from expanding an argument, turn it into 'unknown'
+ // to avoid pasting.
+ for (unsigned i = FirstResult, e = ResultToks.size(); i != e; ++i) {
+ Token &Tok = ResultToks[i];
+ if (Tok.is(tok::hashhash))
+ Tok.setKind(tok::unknown);
+ }
+
+ if(ExpandLocStart.isValid()) {
+ updateLocForMacroArgTokens(CurTok.getLocation(),
+ ResultToks.begin()+FirstResult,
+ ResultToks.end());
+ }
+
+ // If any tokens were substituted from the argument, the whitespace
+ // before the first token should match the whitespace of the arg
+ // identifier.
+ ResultToks[FirstResult].setFlagValue(Token::LeadingSpace,
+ NextTokGetsSpace);
+ NextTokGetsSpace = false;
+ }
+ continue;
+ }
+
+ // Okay, we have a token that is either the LHS or RHS of a paste (##)
+ // argument. It gets substituted as its non-pre-expanded tokens.
+ const Token *ArgToks = ActualArgs->getUnexpArgument(ArgNo);
+ unsigned NumToks = MacroArgs::getArgLength(ArgToks);
+ if (NumToks) { // Not an empty argument?
+ // If this is the GNU ", ## __VA_ARGS__" extension, and we just learned
+ // that __VA_ARGS__ expands to multiple tokens, avoid a pasting error when
+ // the expander trys to paste ',' with the first token of the __VA_ARGS__
+ // expansion.
+ if (NonEmptyPasteBefore && ResultToks.size() >= 2 &&
+ ResultToks[ResultToks.size()-2].is(tok::comma) &&
+ (unsigned)ArgNo == Macro->getNumArgs()-1 &&
+ Macro->isVariadic()) {
+ // Remove the paste operator, report use of the extension.
+ PP.Diag(ResultToks.pop_back_val().getLocation(), diag::ext_paste_comma);
+ }
+
+ ResultToks.append(ArgToks, ArgToks+NumToks);
+
+ // If the '##' came from expanding an argument, turn it into 'unknown'
+ // to avoid pasting.
+ for (unsigned i = ResultToks.size() - NumToks, e = ResultToks.size();
+ i != e; ++i) {
+ Token &Tok = ResultToks[i];
+ if (Tok.is(tok::hashhash))
+ Tok.setKind(tok::unknown);
+ }
+
+ if (ExpandLocStart.isValid()) {
+ updateLocForMacroArgTokens(CurTok.getLocation(),
+ ResultToks.end()-NumToks, ResultToks.end());
+ }
+
+ // If this token (the macro argument) was supposed to get leading
+ // whitespace, transfer this information onto the first token of the
+ // expansion.
+ //
+ // Do not do this if the paste operator occurs before the macro argument,
+ // as in "A ## MACROARG". In valid code, the first token will get
+ // smooshed onto the preceding one anyway (forming AMACROARG). In
+ // assembler-with-cpp mode, invalid pastes are allowed through: in this
+ // case, we do not want the extra whitespace to be added. For example,
+ // we want ". ## foo" -> ".foo" not ". foo".
+ if (NextTokGetsSpace)
+ ResultToks[ResultToks.size()-NumToks].setFlag(Token::LeadingSpace);
+
+ NextTokGetsSpace = false;
+ continue;
+ }
+
+ // If an empty argument is on the LHS or RHS of a paste, the standard (C99
+ // 6.10.3.3p2,3) calls for a bunch of placemarker stuff to occur. We
+ // implement this by eating ## operators when a LHS or RHS expands to
+ // empty.
+ if (PasteAfter) {
+ // Discard the argument token and skip (don't copy to the expansion
+ // buffer) the paste operator after it.
+ ++i;
+ continue;
+ }
+
+ // If this is on the RHS of a paste operator, we've already copied the
+ // paste operator to the ResultToks list, unless the LHS was empty too.
+ // Remove it.
+ assert(PasteBefore);
+ if (NonEmptyPasteBefore) {
+ assert(ResultToks.back().is(tok::hashhash));
+ ResultToks.pop_back();
+ }
+
+ // If this is the __VA_ARGS__ token, and if the argument wasn't provided,
+ // and if the macro had at least one real argument, and if the token before
+ // the ## was a comma, remove the comma. This is a GCC extension which is
+ // disabled when using -std=c99.
+ if (ActualArgs->isVarargsElidedUse())
+ MaybeRemoveCommaBeforeVaArgs(ResultToks,
+ /*HasPasteOperator=*/true,
+ Macro, ArgNo, PP);
+
+ continue;
+ }
+
+ // If anything changed, install this as the new Tokens list.
+ if (MadeChange) {
+ assert(!OwnsTokens && "This would leak if we already own the token list");
+ // This is deleted in the dtor.
+ NumTokens = ResultToks.size();
+ // The tokens will be added to Preprocessor's cache and will be removed
+ // when this TokenLexer finishes lexing them.
+ Tokens = PP.cacheMacroExpandedTokens(this, ResultToks);
+
+ // The preprocessor cache of macro expanded tokens owns these tokens,not us.
+ OwnsTokens = false;
+ }
+}
+
+/// \brief Checks if two tokens form wide string literal.
+static bool isWideStringLiteralFromMacro(const Token &FirstTok,
+ const Token &SecondTok) {
+ return FirstTok.is(tok::identifier) &&
+ FirstTok.getIdentifierInfo()->isStr("L") && SecondTok.isLiteral() &&
+ SecondTok.stringifiedInMacro();
+}
+
+/// Lex - Lex and return a token from this macro stream.
+///
+bool TokenLexer::Lex(Token &Tok) {
+ // Lexing off the end of the macro, pop this macro off the expansion stack.
+ if (isAtEnd()) {
+ // If this is a macro (not a token stream), mark the macro enabled now
+ // that it is no longer being expanded.
+ if (Macro) Macro->EnableMacro();
+
+ Tok.startToken();
+ Tok.setFlagValue(Token::StartOfLine , AtStartOfLine);
+ Tok.setFlagValue(Token::LeadingSpace, HasLeadingSpace || NextTokGetsSpace);
+ if (CurToken == 0)
+ Tok.setFlag(Token::LeadingEmptyMacro);
+ return PP.HandleEndOfTokenLexer(Tok);
+ }
+
+ SourceManager &SM = PP.getSourceManager();
+
+ // If this is the first token of the expanded result, we inherit spacing
+ // properties later.
+ bool isFirstToken = CurToken == 0;
+
+ // Get the next token to return.
+ Tok = Tokens[CurToken++];
+
+ bool TokenIsFromPaste = false;
+
+ // If this token is followed by a token paste (##) operator, paste the tokens!
+ // Note that ## is a normal token when not expanding a macro.
+ if (!isAtEnd() && Macro &&
+ (Tokens[CurToken].is(tok::hashhash) ||
+ // Special processing of L#x macros in -fms-compatibility mode.
+ // Microsoft compiler is able to form a wide string literal from
+ // 'L#macro_arg' construct in a function-like macro.
+ (PP.getLangOpts().MSVCCompat &&
+ isWideStringLiteralFromMacro(Tok, Tokens[CurToken])))) {
+ // When handling the microsoft /##/ extension, the final token is
+ // returned by PasteTokens, not the pasted token.
+ if (PasteTokens(Tok))
+ return true;
+
+ TokenIsFromPaste = true;
+ }
+
+ // The token's current location indicate where the token was lexed from. We
+ // need this information to compute the spelling of the token, but any
+ // diagnostics for the expanded token should appear as if they came from
+ // ExpansionLoc. Pull this information together into a new SourceLocation
+ // that captures all of this.
+ if (ExpandLocStart.isValid() && // Don't do this for token streams.
+ // Check that the token's location was not already set properly.
+ SM.isBeforeInSLocAddrSpace(Tok.getLocation(), MacroStartSLocOffset)) {
+ SourceLocation instLoc;
+ if (Tok.is(tok::comment)) {
+ instLoc = SM.createExpansionLoc(Tok.getLocation(),
+ ExpandLocStart,
+ ExpandLocEnd,
+ Tok.getLength());
+ } else {
+ instLoc = getExpansionLocForMacroDefLoc(Tok.getLocation());
+ }
+
+ Tok.setLocation(instLoc);
+ }
+
+ // If this is the first token, set the lexical properties of the token to
+ // match the lexical properties of the macro identifier.
+ if (isFirstToken) {
+ Tok.setFlagValue(Token::StartOfLine , AtStartOfLine);
+ Tok.setFlagValue(Token::LeadingSpace, HasLeadingSpace);
+ } else {
+ // If this is not the first token, we may still need to pass through
+ // leading whitespace if we've expanded a macro.
+ if (AtStartOfLine) Tok.setFlag(Token::StartOfLine);
+ if (HasLeadingSpace) Tok.setFlag(Token::LeadingSpace);
+ }
+ AtStartOfLine = false;
+ HasLeadingSpace = false;
+
+ // Handle recursive expansion!
+ if (!Tok.isAnnotation() && Tok.getIdentifierInfo() != nullptr) {
+ // Change the kind of this identifier to the appropriate token kind, e.g.
+ // turning "for" into a keyword.
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ Tok.setKind(II->getTokenID());
+
+ // If this identifier was poisoned and from a paste, emit an error. This
+ // won't be handled by Preprocessor::HandleIdentifier because this is coming
+ // from a macro expansion.
+ if (II->isPoisoned() && TokenIsFromPaste) {
+ PP.HandlePoisonedIdentifier(Tok);
+ }
+
+ if (!DisableMacroExpansion && II->isHandleIdentifierCase())
+ return PP.HandleIdentifier(Tok);
+ }
+
+ // Otherwise, return a normal token.
+ return true;
+}
+
+/// PasteTokens - Tok is the LHS of a ## operator, and CurToken is the ##
+/// operator. Read the ## and RHS, and paste the LHS/RHS together. If there
+/// are more ## after it, chomp them iteratively. Return the result as Tok.
+/// If this returns true, the caller should immediately return the token.
+bool TokenLexer::PasteTokens(Token &Tok) {
+ // MSVC: If previous token was pasted, this must be a recovery from an invalid
+ // paste operation. Ignore spaces before this token to mimic MSVC output.
+ // Required for generating valid UUID strings in some MS headers.
+ if (PP.getLangOpts().MicrosoftExt && (CurToken >= 2) &&
+ Tokens[CurToken - 2].is(tok::hashhash))
+ Tok.clearFlag(Token::LeadingSpace);
+
+ SmallString<128> Buffer;
+ const char *ResultTokStrPtr = nullptr;
+ SourceLocation StartLoc = Tok.getLocation();
+ SourceLocation PasteOpLoc;
+ do {
+ // Consume the ## operator if any.
+ PasteOpLoc = Tokens[CurToken].getLocation();
+ if (Tokens[CurToken].is(tok::hashhash))
+ ++CurToken;
+ assert(!isAtEnd() && "No token on the RHS of a paste operator!");
+
+ // Get the RHS token.
+ const Token &RHS = Tokens[CurToken];
+
+ // Allocate space for the result token. This is guaranteed to be enough for
+ // the two tokens.
+ Buffer.resize(Tok.getLength() + RHS.getLength());
+
+ // Get the spelling of the LHS token in Buffer.
+ const char *BufPtr = &Buffer[0];
+ bool Invalid = false;
+ unsigned LHSLen = PP.getSpelling(Tok, BufPtr, &Invalid);
+ if (BufPtr != &Buffer[0]) // Really, we want the chars in Buffer!
+ memcpy(&Buffer[0], BufPtr, LHSLen);
+ if (Invalid)
+ return true;
+
+ BufPtr = Buffer.data() + LHSLen;
+ unsigned RHSLen = PP.getSpelling(RHS, BufPtr, &Invalid);
+ if (Invalid)
+ return true;
+ if (RHSLen && BufPtr != &Buffer[LHSLen])
+ // Really, we want the chars in Buffer!
+ memcpy(&Buffer[LHSLen], BufPtr, RHSLen);
+
+ // Trim excess space.
+ Buffer.resize(LHSLen+RHSLen);
+
+ // Plop the pasted result (including the trailing newline and null) into a
+ // scratch buffer where we can lex it.
+ Token ResultTokTmp;
+ ResultTokTmp.startToken();
+
+ // Claim that the tmp token is a string_literal so that we can get the
+ // character pointer back from CreateString in getLiteralData().
+ ResultTokTmp.setKind(tok::string_literal);
+ PP.CreateString(Buffer, ResultTokTmp);
+ SourceLocation ResultTokLoc = ResultTokTmp.getLocation();
+ ResultTokStrPtr = ResultTokTmp.getLiteralData();
+
+ // Lex the resultant pasted token into Result.
+ Token Result;
+
+ if (Tok.isAnyIdentifier() && RHS.isAnyIdentifier()) {
+ // Common paste case: identifier+identifier = identifier. Avoid creating
+ // a lexer and other overhead.
+ PP.IncrementPasteCounter(true);
+ Result.startToken();
+ Result.setKind(tok::raw_identifier);
+ Result.setRawIdentifierData(ResultTokStrPtr);
+ Result.setLocation(ResultTokLoc);
+ Result.setLength(LHSLen+RHSLen);
+ } else {
+ PP.IncrementPasteCounter(false);
+
+ assert(ResultTokLoc.isFileID() &&
+ "Should be a raw location into scratch buffer");
+ SourceManager &SourceMgr = PP.getSourceManager();
+ FileID LocFileID = SourceMgr.getFileID(ResultTokLoc);
+
+ bool Invalid = false;
+ const char *ScratchBufStart
+ = SourceMgr.getBufferData(LocFileID, &Invalid).data();
+ if (Invalid)
+ return false;
+
+ // Make a lexer to lex this string from. Lex just this one token.
+ // Make a lexer object so that we lex and expand the paste result.
+ Lexer TL(SourceMgr.getLocForStartOfFile(LocFileID),
+ PP.getLangOpts(), ScratchBufStart,
+ ResultTokStrPtr, ResultTokStrPtr+LHSLen+RHSLen);
+
+ // Lex a token in raw mode. This way it won't look up identifiers
+ // automatically, lexing off the end will return an eof token, and
+ // warnings are disabled. This returns true if the result token is the
+ // entire buffer.
+ bool isInvalid = !TL.LexFromRawLexer(Result);
+
+ // If we got an EOF token, we didn't form even ONE token. For example, we
+ // did "/ ## /" to get "//".
+ isInvalid |= Result.is(tok::eof);
+
+ // If pasting the two tokens didn't form a full new token, this is an
+ // error. This occurs with "x ## +" and other stuff. Return with Tok
+ // unmodified and with RHS as the next token to lex.
+ if (isInvalid) {
+ // Explicitly convert the token location to have proper expansion
+ // information so that the user knows where it came from.
+ SourceManager &SM = PP.getSourceManager();
+ SourceLocation Loc =
+ SM.createExpansionLoc(PasteOpLoc, ExpandLocStart, ExpandLocEnd, 2);
+
+ // Test for the Microsoft extension of /##/ turning into // here on the
+ // error path.
+ if (PP.getLangOpts().MicrosoftExt && Tok.is(tok::slash) &&
+ RHS.is(tok::slash)) {
+ HandleMicrosoftCommentPaste(Tok, Loc);
+ return true;
+ }
+
+ // Do not emit the error when preprocessing assembler code.
+ if (!PP.getLangOpts().AsmPreprocessor) {
+ // If we're in microsoft extensions mode, downgrade this from a hard
+ // error to an extension that defaults to an error. This allows
+ // disabling it.
+ PP.Diag(Loc, PP.getLangOpts().MicrosoftExt ? diag::ext_pp_bad_paste_ms
+ : diag::err_pp_bad_paste)
+ << Buffer;
+ }
+
+ // An error has occurred so exit loop.
+ break;
+ }
+
+ // Turn ## into 'unknown' to avoid # ## # from looking like a paste
+ // operator.
+ if (Result.is(tok::hashhash))
+ Result.setKind(tok::unknown);
+ }
+
+ // Transfer properties of the LHS over the Result.
+ Result.setFlagValue(Token::StartOfLine , Tok.isAtStartOfLine());
+ Result.setFlagValue(Token::LeadingSpace, Tok.hasLeadingSpace());
+
+ // Finally, replace LHS with the result, consume the RHS, and iterate.
+ ++CurToken;
+ Tok = Result;
+ } while (!isAtEnd() && Tokens[CurToken].is(tok::hashhash));
+
+ SourceLocation EndLoc = Tokens[CurToken - 1].getLocation();
+
+ // The token's current location indicate where the token was lexed from. We
+ // need this information to compute the spelling of the token, but any
+ // diagnostics for the expanded token should appear as if the token was
+ // expanded from the full ## expression. Pull this information together into
+ // a new SourceLocation that captures all of this.
+ SourceManager &SM = PP.getSourceManager();
+ if (StartLoc.isFileID())
+ StartLoc = getExpansionLocForMacroDefLoc(StartLoc);
+ if (EndLoc.isFileID())
+ EndLoc = getExpansionLocForMacroDefLoc(EndLoc);
+ FileID MacroFID = SM.getFileID(MacroExpansionStart);
+ while (SM.getFileID(StartLoc) != MacroFID)
+ StartLoc = SM.getImmediateExpansionRange(StartLoc).first;
+ while (SM.getFileID(EndLoc) != MacroFID)
+ EndLoc = SM.getImmediateExpansionRange(EndLoc).second;
+
+ Tok.setLocation(SM.createExpansionLoc(Tok.getLocation(), StartLoc, EndLoc,
+ Tok.getLength()));
+
+ // Now that we got the result token, it will be subject to expansion. Since
+ // token pasting re-lexes the result token in raw mode, identifier information
+ // isn't looked up. As such, if the result is an identifier, look up id info.
+ if (Tok.is(tok::raw_identifier)) {
+ // Look up the identifier info for the token. We disabled identifier lookup
+ // by saying we're skipping contents, so we need to do this manually.
+ PP.LookUpIdentifierInfo(Tok);
+ }
+ return false;
+}
+
+/// isNextTokenLParen - If the next token lexed will pop this macro off the
+/// expansion stack, return 2. If the next unexpanded token is a '(', return
+/// 1, otherwise return 0.
+unsigned TokenLexer::isNextTokenLParen() const {
+ // Out of tokens?
+ if (isAtEnd())
+ return 2;
+ return Tokens[CurToken].is(tok::l_paren);
+}
+
+/// isParsingPreprocessorDirective - Return true if we are in the middle of a
+/// preprocessor directive.
+bool TokenLexer::isParsingPreprocessorDirective() const {
+ return Tokens[NumTokens-1].is(tok::eod) && !isAtEnd();
+}
+
+/// HandleMicrosoftCommentPaste - In microsoft compatibility mode, /##/ pastes
+/// together to form a comment that comments out everything in the current
+/// macro, other active macros, and anything left on the current physical
+/// source line of the expanded buffer. Handle this by returning the
+/// first token on the next line.
+void TokenLexer::HandleMicrosoftCommentPaste(Token &Tok, SourceLocation OpLoc) {
+ PP.Diag(OpLoc, diag::ext_comment_paste_microsoft);
+
+ // We 'comment out' the rest of this macro by just ignoring the rest of the
+ // tokens that have not been lexed yet, if any.
+
+ // Since this must be a macro, mark the macro enabled now that it is no longer
+ // being expanded.
+ assert(Macro && "Token streams can't paste comments");
+ Macro->EnableMacro();
+
+ PP.HandleMicrosoftCommentPaste(Tok);
+}
+
+/// \brief If \arg loc is a file ID and points inside the current macro
+/// definition, returns the appropriate source location pointing at the
+/// macro expansion source location entry, otherwise it returns an invalid
+/// SourceLocation.
+SourceLocation
+TokenLexer::getExpansionLocForMacroDefLoc(SourceLocation loc) const {
+ assert(ExpandLocStart.isValid() && MacroExpansionStart.isValid() &&
+ "Not appropriate for token streams");
+ assert(loc.isValid() && loc.isFileID());
+
+ SourceManager &SM = PP.getSourceManager();
+ assert(SM.isInSLocAddrSpace(loc, MacroDefStart, MacroDefLength) &&
+ "Expected loc to come from the macro definition");
+
+ unsigned relativeOffset = 0;
+ SM.isInSLocAddrSpace(loc, MacroDefStart, MacroDefLength, &relativeOffset);
+ return MacroExpansionStart.getLocWithOffset(relativeOffset);
+}
+
+/// \brief Finds the tokens that are consecutive (from the same FileID)
+/// creates a single SLocEntry, and assigns SourceLocations to each token that
+/// point to that SLocEntry. e.g for
+/// assert(foo == bar);
+/// There will be a single SLocEntry for the "foo == bar" chunk and locations
+/// for the 'foo', '==', 'bar' tokens will point inside that chunk.
+///
+/// \arg begin_tokens will be updated to a position past all the found
+/// consecutive tokens.
+static void updateConsecutiveMacroArgTokens(SourceManager &SM,
+ SourceLocation InstLoc,
+ Token *&begin_tokens,
+ Token * end_tokens) {
+ assert(begin_tokens < end_tokens);
+
+ SourceLocation FirstLoc = begin_tokens->getLocation();
+ SourceLocation CurLoc = FirstLoc;
+
+ // Compare the source location offset of tokens and group together tokens that
+ // are close, even if their locations point to different FileIDs. e.g.
+ //
+ // |bar | foo | cake | (3 tokens from 3 consecutive FileIDs)
+ // ^ ^
+ // |bar foo cake| (one SLocEntry chunk for all tokens)
+ //
+ // we can perform this "merge" since the token's spelling location depends
+ // on the relative offset.
+
+ Token *NextTok = begin_tokens + 1;
+ for (; NextTok < end_tokens; ++NextTok) {
+ SourceLocation NextLoc = NextTok->getLocation();
+ if (CurLoc.isFileID() != NextLoc.isFileID())
+ break; // Token from different kind of FileID.
+
+ int RelOffs;
+ if (!SM.isInSameSLocAddrSpace(CurLoc, NextLoc, &RelOffs))
+ break; // Token from different local/loaded location.
+ // Check that token is not before the previous token or more than 50
+ // "characters" away.
+ if (RelOffs < 0 || RelOffs > 50)
+ break;
+ CurLoc = NextLoc;
+ }
+
+ // For the consecutive tokens, find the length of the SLocEntry to contain
+ // all of them.
+ Token &LastConsecutiveTok = *(NextTok-1);
+ int LastRelOffs = 0;
+ SM.isInSameSLocAddrSpace(FirstLoc, LastConsecutiveTok.getLocation(),
+ &LastRelOffs);
+ unsigned FullLength = LastRelOffs + LastConsecutiveTok.getLength();
+
+ // Create a macro expansion SLocEntry that will "contain" all of the tokens.
+ SourceLocation Expansion =
+ SM.createMacroArgExpansionLoc(FirstLoc, InstLoc,FullLength);
+
+ // Change the location of the tokens from the spelling location to the new
+ // expanded location.
+ for (; begin_tokens < NextTok; ++begin_tokens) {
+ Token &Tok = *begin_tokens;
+ int RelOffs = 0;
+ SM.isInSameSLocAddrSpace(FirstLoc, Tok.getLocation(), &RelOffs);
+ Tok.setLocation(Expansion.getLocWithOffset(RelOffs));
+ }
+}
+
+/// \brief Creates SLocEntries and updates the locations of macro argument
+/// tokens to their new expanded locations.
+///
+/// \param ArgIdDefLoc the location of the macro argument id inside the macro
+/// definition.
+/// \param Tokens the macro argument tokens to update.
+void TokenLexer::updateLocForMacroArgTokens(SourceLocation ArgIdSpellLoc,
+ Token *begin_tokens,
+ Token *end_tokens) {
+ SourceManager &SM = PP.getSourceManager();
+
+ SourceLocation InstLoc =
+ getExpansionLocForMacroDefLoc(ArgIdSpellLoc);
+
+ while (begin_tokens < end_tokens) {
+ // If there's only one token just create a SLocEntry for it.
+ if (end_tokens - begin_tokens == 1) {
+ Token &Tok = *begin_tokens;
+ Tok.setLocation(SM.createMacroArgExpansionLoc(Tok.getLocation(),
+ InstLoc,
+ Tok.getLength()));
+ return;
+ }
+
+ updateConsecutiveMacroArgTokens(SM, InstLoc, begin_tokens, end_tokens);
+ }
+}
+
+void TokenLexer::PropagateLineStartLeadingSpaceInfo(Token &Result) {
+ AtStartOfLine = Result.isAtStartOfLine();
+ HasLeadingSpace = Result.hasLeadingSpace();
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/UnicodeCharSets.h b/contrib/llvm/tools/clang/lib/Lex/UnicodeCharSets.h
new file mode 100644
index 0000000..116d553
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/UnicodeCharSets.h
@@ -0,0 +1,408 @@
+//===--- UnicodeCharSets.h - Contains important sets of characters --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_LIB_LEX_UNICODECHARSETS_H
+#define LLVM_CLANG_LIB_LEX_UNICODECHARSETS_H
+
+#include "llvm/Support/UnicodeCharRanges.h"
+
+// C11 D.1, C++11 [charname.allowed]
+static const llvm::sys::UnicodeCharRange C11AllowedIDCharRanges[] = {
+ // 1
+ { 0x00A8, 0x00A8 }, { 0x00AA, 0x00AA }, { 0x00AD, 0x00AD },
+ { 0x00AF, 0x00AF }, { 0x00B2, 0x00B5 }, { 0x00B7, 0x00BA },
+ { 0x00BC, 0x00BE }, { 0x00C0, 0x00D6 }, { 0x00D8, 0x00F6 },
+ { 0x00F8, 0x00FF },
+ // 2
+ { 0x0100, 0x167F }, { 0x1681, 0x180D }, { 0x180F, 0x1FFF },
+ // 3
+ { 0x200B, 0x200D }, { 0x202A, 0x202E }, { 0x203F, 0x2040 },
+ { 0x2054, 0x2054 }, { 0x2060, 0x206F },
+ // 4
+ { 0x2070, 0x218F }, { 0x2460, 0x24FF }, { 0x2776, 0x2793 },
+ { 0x2C00, 0x2DFF }, { 0x2E80, 0x2FFF },
+ // 5
+ { 0x3004, 0x3007 }, { 0x3021, 0x302F }, { 0x3031, 0x303F },
+ // 6
+ { 0x3040, 0xD7FF },
+ // 7
+ { 0xF900, 0xFD3D }, { 0xFD40, 0xFDCF }, { 0xFDF0, 0xFE44 },
+ { 0xFE47, 0xFFFD },
+ // 8
+ { 0x10000, 0x1FFFD }, { 0x20000, 0x2FFFD }, { 0x30000, 0x3FFFD },
+ { 0x40000, 0x4FFFD }, { 0x50000, 0x5FFFD }, { 0x60000, 0x6FFFD },
+ { 0x70000, 0x7FFFD }, { 0x80000, 0x8FFFD }, { 0x90000, 0x9FFFD },
+ { 0xA0000, 0xAFFFD }, { 0xB0000, 0xBFFFD }, { 0xC0000, 0xCFFFD },
+ { 0xD0000, 0xDFFFD }, { 0xE0000, 0xEFFFD }
+};
+
+// C++03 [extendid]
+// Note that this is not the same as C++98, but we don't distinguish C++98
+// and C++03 in Clang.
+static const llvm::sys::UnicodeCharRange CXX03AllowedIDCharRanges[] = {
+ // Latin
+ { 0x00C0, 0x00D6 }, { 0x00D8, 0x00F6 }, { 0x00F8, 0x01F5 },
+ { 0x01FA, 0x0217 }, { 0x0250, 0x02A8 },
+
+ // Greek
+ { 0x0384, 0x0384 }, { 0x0388, 0x038A }, { 0x038C, 0x038C },
+ { 0x038E, 0x03A1 }, { 0x03A3, 0x03CE }, { 0x03D0, 0x03D6 },
+ { 0x03DA, 0x03DA }, { 0x03DC, 0x03DC }, { 0x03DE, 0x03DE },
+ { 0x03E0, 0x03E0 }, { 0x03E2, 0x03F3 },
+
+ // Cyrillic
+ { 0x0401, 0x040D }, { 0x040F, 0x044F }, { 0x0451, 0x045C },
+ { 0x045E, 0x0481 }, { 0x0490, 0x04C4 }, { 0x04C7, 0x04C8 },
+ { 0x04CB, 0x04CC }, { 0x04D0, 0x04EB }, { 0x04EE, 0x04F5 },
+ { 0x04F8, 0x04F9 },
+
+ // Armenian
+ { 0x0531, 0x0556 }, { 0x0561, 0x0587 },
+
+ // Hebrew
+ { 0x05D0, 0x05EA }, { 0x05F0, 0x05F4 },
+
+ // Arabic
+ { 0x0621, 0x063A }, { 0x0640, 0x0652 }, { 0x0670, 0x06B7 },
+ { 0x06BA, 0x06BE }, { 0x06C0, 0x06CE }, { 0x06E5, 0x06E7 },
+
+ // Devanagari
+ { 0x0905, 0x0939 }, { 0x0958, 0x0962 },
+
+ // Bengali
+ { 0x0985, 0x098C }, { 0x098F, 0x0990 }, { 0x0993, 0x09A8 },
+ { 0x09AA, 0x09B0 }, { 0x09B2, 0x09B2 }, { 0x09B6, 0x09B9 },
+ { 0x09DC, 0x09DD }, { 0x09DF, 0x09E1 }, { 0x09F0, 0x09F1 },
+
+ // Gurmukhi
+ { 0x0A05, 0x0A0A }, { 0x0A0F, 0x0A10 }, { 0x0A13, 0x0A28 },
+ { 0x0A2A, 0x0A30 }, { 0x0A32, 0x0A33 }, { 0x0A35, 0x0A36 },
+ { 0x0A38, 0x0A39 }, { 0x0A59, 0x0A5C }, { 0x0A5E, 0x0A5E },
+
+ // Gujarti
+ { 0x0A85, 0x0A8B }, { 0x0A8D, 0x0A8D }, { 0x0A8F, 0x0A91 },
+ { 0x0A93, 0x0AA8 }, { 0x0AAA, 0x0AB0 }, { 0x0AB2, 0x0AB3 },
+ { 0x0AB5, 0x0AB9 }, { 0x0AE0, 0x0AE0 },
+
+ // Oriya
+ { 0x0B05, 0x0B0C }, { 0x0B0F, 0x0B10 }, { 0x0B13, 0x0B28 },
+ { 0x0B2A, 0x0B30 }, { 0x0B32, 0x0B33 }, { 0x0B36, 0x0B39 },
+ { 0x0B5C, 0x0B5D }, { 0x0B5F, 0x0B61 },
+
+ // Tamil
+ { 0x0B85, 0x0B8A }, { 0x0B8E, 0x0B90 }, { 0x0B92, 0x0B95 },
+ { 0x0B99, 0x0B9A }, { 0x0B9C, 0x0B9C }, { 0x0B9E, 0x0B9F },
+ { 0x0BA3, 0x0BA4 }, { 0x0BA8, 0x0BAA }, { 0x0BAE, 0x0BB5 },
+ { 0x0BB7, 0x0BB9 },
+
+ // Telugu
+ { 0x0C05, 0x0C0C }, { 0x0C0E, 0x0C10 }, { 0x0C12, 0x0C28 },
+ { 0x0C2A, 0x0C33 }, { 0x0C35, 0x0C39 }, { 0x0C60, 0x0C61 },
+
+ // Kannada
+ { 0x0C85, 0x0C8C }, { 0x0C8E, 0x0C90 }, { 0x0C92, 0x0CA8 },
+ { 0x0CAA, 0x0CB3 }, { 0x0CB5, 0x0CB9 }, { 0x0CE0, 0x0CE1 },
+
+ // Malayam
+ { 0x0D05, 0x0D0C }, { 0x0D0E, 0x0D10 }, { 0x0D12, 0x0D28 },
+ { 0x0D2A, 0x0D39 }, { 0x0D60, 0x0D61 },
+
+ // Thai
+ { 0x0E01, 0x0E30 }, { 0x0E32, 0x0E33 }, { 0x0E40, 0x0E46 },
+ { 0x0E4F, 0x0E5B },
+
+ // Lao
+ { 0x0E81, 0x0E82 }, { 0x0E84, 0x0E84 }, { 0x0E87, 0x0E87 },
+ { 0x0E88, 0x0E88 }, { 0x0E8A, 0x0E8A }, { 0x0E8D, 0x0E8D },
+ { 0x0E94, 0x0E97 }, { 0x0E99, 0x0E9F }, { 0x0EA1, 0x0EA3 },
+ { 0x0EA5, 0x0EA5 }, { 0x0EA7, 0x0EA7 }, { 0x0EAA, 0x0EAA },
+ { 0x0EAB, 0x0EAB }, { 0x0EAD, 0x0EB0 }, { 0x0EB2, 0x0EB2 },
+ { 0x0EB3, 0x0EB3 }, { 0x0EBD, 0x0EBD }, { 0x0EC0, 0x0EC4 },
+ { 0x0EC6, 0x0EC6 },
+
+ // Georgian
+ { 0x10A0, 0x10C5 }, { 0x10D0, 0x10F6 },
+
+ // Hangul
+ { 0x1100, 0x1159 }, { 0x1161, 0x11A2 }, { 0x11A8, 0x11F9 },
+
+ // Latin (2)
+ { 0x1E00, 0x1E9A }, { 0x1EA0, 0x1EF9 },
+
+ // Greek (2)
+ { 0x1F00, 0x1F15 }, { 0x1F18, 0x1F1D }, { 0x1F20, 0x1F45 },
+ { 0x1F48, 0x1F4D }, { 0x1F50, 0x1F57 }, { 0x1F59, 0x1F59 },
+ { 0x1F5B, 0x1F5B }, { 0x1F5D, 0x1F5D }, { 0x1F5F, 0x1F7D },
+ { 0x1F80, 0x1FB4 }, { 0x1FB6, 0x1FBC }, { 0x1FC2, 0x1FC4 },
+ { 0x1FC6, 0x1FCC }, { 0x1FD0, 0x1FD3 }, { 0x1FD6, 0x1FDB },
+ { 0x1FE0, 0x1FEC }, { 0x1FF2, 0x1FF4 }, { 0x1FF6, 0x1FFC },
+
+ // Hiragana
+ { 0x3041, 0x3094 }, { 0x309B, 0x309E },
+
+ // Katakana
+ { 0x30A1, 0x30FE },
+
+ // Bopmofo [sic]
+ { 0x3105, 0x312C },
+
+ // CJK Unified Ideographs
+ { 0x4E00, 0x9FA5 }, { 0xF900, 0xFA2D }, { 0xFB1F, 0xFB36 },
+ { 0xFB38, 0xFB3C }, { 0xFB3E, 0xFB3E }, { 0xFB40, 0xFB41 },
+ { 0xFB42, 0xFB44 }, { 0xFB46, 0xFBB1 }, { 0xFBD3, 0xFD3F },
+ { 0xFD50, 0xFD8F }, { 0xFD92, 0xFDC7 }, { 0xFDF0, 0xFDFB },
+ { 0xFE70, 0xFE72 }, { 0xFE74, 0xFE74 }, { 0xFE76, 0xFEFC },
+ { 0xFF21, 0xFF3A }, { 0xFF41, 0xFF5A }, { 0xFF66, 0xFFBE },
+ { 0xFFC2, 0xFFC7 }, { 0xFFCA, 0xFFCF }, { 0xFFD2, 0xFFD7 },
+ { 0xFFDA, 0xFFDC }
+};
+
+// C99 Annex D
+static const llvm::sys::UnicodeCharRange C99AllowedIDCharRanges[] = {
+ // Latin (1)
+ { 0x00AA, 0x00AA },
+
+ // Special characters (1)
+ { 0x00B5, 0x00B5 }, { 0x00B7, 0x00B7 },
+
+ // Latin (2)
+ { 0x00BA, 0x00BA }, { 0x00C0, 0x00D6 }, { 0x00D8, 0x00F6 },
+ { 0x00F8, 0x01F5 }, { 0x01FA, 0x0217 }, { 0x0250, 0x02A8 },
+
+ // Special characters (2)
+ { 0x02B0, 0x02B8 }, { 0x02BB, 0x02BB }, { 0x02BD, 0x02C1 },
+ { 0x02D0, 0x02D1 }, { 0x02E0, 0x02E4 }, { 0x037A, 0x037A },
+
+ // Greek (1)
+ { 0x0386, 0x0386 }, { 0x0388, 0x038A }, { 0x038C, 0x038C },
+ { 0x038E, 0x03A1 }, { 0x03A3, 0x03CE }, { 0x03D0, 0x03D6 },
+ { 0x03DA, 0x03DA }, { 0x03DC, 0x03DC }, { 0x03DE, 0x03DE },
+ { 0x03E0, 0x03E0 }, { 0x03E2, 0x03F3 },
+
+ // Cyrillic
+ { 0x0401, 0x040C }, { 0x040E, 0x044F }, { 0x0451, 0x045C },
+ { 0x045E, 0x0481 }, { 0x0490, 0x04C4 }, { 0x04C7, 0x04C8 },
+ { 0x04CB, 0x04CC }, { 0x04D0, 0x04EB }, { 0x04EE, 0x04F5 },
+ { 0x04F8, 0x04F9 },
+
+ // Armenian (1)
+ { 0x0531, 0x0556 },
+
+ // Special characters (3)
+ { 0x0559, 0x0559 },
+
+ // Armenian (2)
+ { 0x0561, 0x0587 },
+
+ // Hebrew
+ { 0x05B0, 0x05B9 }, { 0x05BB, 0x05BD }, { 0x05BF, 0x05BF },
+ { 0x05C1, 0x05C2 }, { 0x05D0, 0x05EA }, { 0x05F0, 0x05F2 },
+
+ // Arabic (1)
+ { 0x0621, 0x063A }, { 0x0640, 0x0652 },
+
+ // Digits (1)
+ { 0x0660, 0x0669 },
+
+ // Arabic (2)
+ { 0x0670, 0x06B7 }, { 0x06BA, 0x06BE }, { 0x06C0, 0x06CE },
+ { 0x06D0, 0x06DC }, { 0x06E5, 0x06E8 }, { 0x06EA, 0x06ED },
+
+ // Digits (2)
+ { 0x06F0, 0x06F9 },
+
+ // Devanagari and Special characeter 0x093D.
+ { 0x0901, 0x0903 }, { 0x0905, 0x0939 }, { 0x093D, 0x094D },
+ { 0x0950, 0x0952 }, { 0x0958, 0x0963 },
+
+ // Digits (3)
+ { 0x0966, 0x096F },
+
+ // Bengali (1)
+ { 0x0981, 0x0983 }, { 0x0985, 0x098C }, { 0x098F, 0x0990 },
+ { 0x0993, 0x09A8 }, { 0x09AA, 0x09B0 }, { 0x09B2, 0x09B2 },
+ { 0x09B6, 0x09B9 }, { 0x09BE, 0x09C4 }, { 0x09C7, 0x09C8 },
+ { 0x09CB, 0x09CD }, { 0x09DC, 0x09DD }, { 0x09DF, 0x09E3 },
+
+ // Digits (4)
+ { 0x09E6, 0x09EF },
+
+ // Bengali (2)
+ { 0x09F0, 0x09F1 },
+
+ // Gurmukhi (1)
+ { 0x0A02, 0x0A02 }, { 0x0A05, 0x0A0A }, { 0x0A0F, 0x0A10 },
+ { 0x0A13, 0x0A28 }, { 0x0A2A, 0x0A30 }, { 0x0A32, 0x0A33 },
+ { 0x0A35, 0x0A36 }, { 0x0A38, 0x0A39 }, { 0x0A3E, 0x0A42 },
+ { 0x0A47, 0x0A48 }, { 0x0A4B, 0x0A4D }, { 0x0A59, 0x0A5C },
+ { 0x0A5E, 0x0A5E },
+
+ // Digits (5)
+ { 0x0A66, 0x0A6F },
+
+ // Gurmukhi (2)
+ { 0x0A74, 0x0A74 },
+
+ // Gujarti
+ { 0x0A81, 0x0A83 }, { 0x0A85, 0x0A8B }, { 0x0A8D, 0x0A8D },
+ { 0x0A8F, 0x0A91 }, { 0x0A93, 0x0AA8 }, { 0x0AAA, 0x0AB0 },
+ { 0x0AB2, 0x0AB3 }, { 0x0AB5, 0x0AB9 }, { 0x0ABD, 0x0AC5 },
+ { 0x0AC7, 0x0AC9 }, { 0x0ACB, 0x0ACD }, { 0x0AD0, 0x0AD0 },
+ { 0x0AE0, 0x0AE0 },
+
+ // Digits (6)
+ { 0x0AE6, 0x0AEF },
+
+ // Oriya and Special character 0x0B3D
+ { 0x0B01, 0x0B03 }, { 0x0B05, 0x0B0C }, { 0x0B0F, 0x0B10 },
+ { 0x0B13, 0x0B28 }, { 0x0B2A, 0x0B30 }, { 0x0B32, 0x0B33 },
+ { 0x0B36, 0x0B39 }, { 0x0B3D, 0x0B43 }, { 0x0B47, 0x0B48 },
+ { 0x0B4B, 0x0B4D }, { 0x0B5C, 0x0B5D }, { 0x0B5F, 0x0B61 },
+
+ // Digits (7)
+ { 0x0B66, 0x0B6F },
+
+ // Tamil
+ { 0x0B82, 0x0B83 }, { 0x0B85, 0x0B8A }, { 0x0B8E, 0x0B90 },
+ { 0x0B92, 0x0B95 }, { 0x0B99, 0x0B9A }, { 0x0B9C, 0x0B9C },
+ { 0x0B9E, 0x0B9F }, { 0x0BA3, 0x0BA4 }, { 0x0BA8, 0x0BAA },
+ { 0x0BAE, 0x0BB5 }, { 0x0BB7, 0x0BB9 }, { 0x0BBE, 0x0BC2 },
+ { 0x0BC6, 0x0BC8 }, { 0x0BCA, 0x0BCD },
+
+ // Digits (8)
+ { 0x0BE7, 0x0BEF },
+
+ // Telugu
+ { 0x0C01, 0x0C03 }, { 0x0C05, 0x0C0C }, { 0x0C0E, 0x0C10 },
+ { 0x0C12, 0x0C28 }, { 0x0C2A, 0x0C33 }, { 0x0C35, 0x0C39 },
+ { 0x0C3E, 0x0C44 }, { 0x0C46, 0x0C48 }, { 0x0C4A, 0x0C4D },
+ { 0x0C60, 0x0C61 },
+
+ // Digits (9)
+ { 0x0C66, 0x0C6F },
+
+ // Kannada
+ { 0x0C82, 0x0C83 }, { 0x0C85, 0x0C8C }, { 0x0C8E, 0x0C90 },
+ { 0x0C92, 0x0CA8 }, { 0x0CAA, 0x0CB3 }, { 0x0CB5, 0x0CB9 },
+ { 0x0CBE, 0x0CC4 }, { 0x0CC6, 0x0CC8 }, { 0x0CCA, 0x0CCD },
+ { 0x0CDE, 0x0CDE }, { 0x0CE0, 0x0CE1 },
+
+ // Digits (10)
+ { 0x0CE6, 0x0CEF },
+
+ // Malayam
+ { 0x0D02, 0x0D03 }, { 0x0D05, 0x0D0C }, { 0x0D0E, 0x0D10 },
+ { 0x0D12, 0x0D28 }, { 0x0D2A, 0x0D39 }, { 0x0D3E, 0x0D43 },
+ { 0x0D46, 0x0D48 }, { 0x0D4A, 0x0D4D }, { 0x0D60, 0x0D61 },
+
+ // Digits (11)
+ { 0x0D66, 0x0D6F },
+
+ // Thai...including Digits { 0x0E50, 0x0E59 }
+ { 0x0E01, 0x0E3A }, { 0x0E40, 0x0E5B },
+
+ // Lao (1)
+ { 0x0E81, 0x0E82 }, { 0x0E84, 0x0E84 }, { 0x0E87, 0x0E88 },
+ { 0x0E8A, 0x0E8A }, { 0x0E8D, 0x0E8D }, { 0x0E94, 0x0E97 },
+ { 0x0E99, 0x0E9F }, { 0x0EA1, 0x0EA3 }, { 0x0EA5, 0x0EA5 },
+ { 0x0EA7, 0x0EA7 }, { 0x0EAA, 0x0EAB }, { 0x0EAD, 0x0EAE },
+ { 0x0EB0, 0x0EB9 }, { 0x0EBB, 0x0EBD }, { 0x0EC0, 0x0EC4 },
+ { 0x0EC6, 0x0EC6 }, { 0x0EC8, 0x0ECD },
+
+ // Digits (12)
+ { 0x0ED0, 0x0ED9 },
+
+ // Lao (2)
+ { 0x0EDC, 0x0EDD },
+
+ // Tibetan (1)
+ { 0x0F00, 0x0F00 }, { 0x0F18, 0x0F19 },
+
+ // Digits (13)
+ { 0x0F20, 0x0F33 },
+
+ // Tibetan (2)
+ { 0x0F35, 0x0F35 }, { 0x0F37, 0x0F37 }, { 0x0F39, 0x0F39 },
+ { 0x0F3E, 0x0F47 }, { 0x0F49, 0x0F69 }, { 0x0F71, 0x0F84 },
+ { 0x0F86, 0x0F8B }, { 0x0F90, 0x0F95 }, { 0x0F97, 0x0F97 },
+ { 0x0F99, 0x0FAD }, { 0x0FB1, 0x0FB7 }, { 0x0FB9, 0x0FB9 },
+
+ // Georgian
+ { 0x10A0, 0x10C5 }, { 0x10D0, 0x10F6 },
+
+ // Latin (3)
+ { 0x1E00, 0x1E9B }, { 0x1EA0, 0x1EF9 },
+
+ // Greek (2)
+ { 0x1F00, 0x1F15 }, { 0x1F18, 0x1F1D }, { 0x1F20, 0x1F45 },
+ { 0x1F48, 0x1F4D }, { 0x1F50, 0x1F57 }, { 0x1F59, 0x1F59 },
+ { 0x1F5B, 0x1F5B }, { 0x1F5D, 0x1F5D }, { 0x1F5F, 0x1F7D },
+ { 0x1F80, 0x1FB4 }, { 0x1FB6, 0x1FBC },
+
+ // Special characters (4)
+ { 0x1FBE, 0x1FBE },
+
+ // Greek (3)
+ { 0x1FC2, 0x1FC4 }, { 0x1FC6, 0x1FCC }, { 0x1FD0, 0x1FD3 },
+ { 0x1FD6, 0x1FDB }, { 0x1FE0, 0x1FEC }, { 0x1FF2, 0x1FF4 },
+ { 0x1FF6, 0x1FFC },
+
+ // Special characters (5)
+ { 0x203F, 0x2040 },
+
+ // Latin (4)
+ { 0x207F, 0x207F },
+
+ // Special characters (6)
+ { 0x2102, 0x2102 }, { 0x2107, 0x2107 }, { 0x210A, 0x2113 },
+ { 0x2115, 0x2115 }, { 0x2118, 0x211D }, { 0x2124, 0x2124 },
+ { 0x2126, 0x2126 }, { 0x2128, 0x2128 }, { 0x212A, 0x2131 },
+ { 0x2133, 0x2138 }, { 0x2160, 0x2182 }, { 0x3005, 0x3007 },
+ { 0x3021, 0x3029 },
+
+ // Hiragana
+ { 0x3041, 0x3093 }, { 0x309B, 0x309C },
+
+ // Katakana
+ { 0x30A1, 0x30F6 }, { 0x30FB, 0x30FC },
+
+ // Bopmofo [sic]
+ { 0x3105, 0x312C },
+
+ // CJK Unified Ideographs
+ { 0x4E00, 0x9FA5 },
+
+ // Hangul,
+ { 0xAC00, 0xD7A3 }
+};
+
+// C11 D.2, C++11 [charname.disallowed]
+static const llvm::sys::UnicodeCharRange C11DisallowedInitialIDCharRanges[] = {
+ { 0x0300, 0x036F }, { 0x1DC0, 0x1DFF }, { 0x20D0, 0x20FF },
+ { 0xFE20, 0xFE2F }
+};
+
+// C99 6.4.2.1p3: The initial character [of an identifier] shall not be a
+// universal character name designating a digit.
+// C99 Annex D defines these characters as "Digits".
+static const llvm::sys::UnicodeCharRange C99DisallowedInitialIDCharRanges[] = {
+ { 0x0660, 0x0669 }, { 0x06F0, 0x06F9 }, { 0x0966, 0x096F },
+ { 0x09E6, 0x09EF }, { 0x0A66, 0x0A6F }, { 0x0AE6, 0x0AEF },
+ { 0x0B66, 0x0B6F }, { 0x0BE7, 0x0BEF }, { 0x0C66, 0x0C6F },
+ { 0x0CE6, 0x0CEF }, { 0x0D66, 0x0D6F }, { 0x0E50, 0x0E59 },
+ { 0x0ED0, 0x0ED9 }, { 0x0F20, 0x0F33 }
+};
+
+// Unicode v6.2, chapter 6.2, table 6-2.
+static const llvm::sys::UnicodeCharRange UnicodeWhitespaceCharRanges[] = {
+ { 0x0085, 0x0085 }, { 0x00A0, 0x00A0 }, { 0x1680, 0x1680 },
+ { 0x180E, 0x180E }, { 0x2000, 0x200A }, { 0x2028, 0x2029 },
+ { 0x202F, 0x202F }, { 0x205F, 0x205F }, { 0x3000, 0x3000 }
+};
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseAST.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseAST.cpp
new file mode 100644
index 0000000..ccf9479
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseAST.cpp
@@ -0,0 +1,179 @@
+//===--- ParseAST.cpp - Provide the clang::ParseAST method ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the clang::ParseAST method.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/ParseAST.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ExternalASTSource.h"
+#include "clang/AST/Stmt.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/Parser.h"
+#include "clang/Sema/CodeCompleteConsumer.h"
+#include "clang/Sema/ExternalSemaSource.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Sema/SemaConsumer.h"
+#include "llvm/Support/CrashRecoveryContext.h"
+#include <cstdio>
+#include <memory>
+
+using namespace clang;
+
+namespace {
+
+/// Resets LLVM's pretty stack state so that stack traces are printed correctly
+/// when there are nested CrashRecoveryContexts and the inner one recovers from
+/// a crash.
+class ResetStackCleanup
+ : public llvm::CrashRecoveryContextCleanupBase<ResetStackCleanup,
+ const void> {
+public:
+ ResetStackCleanup(llvm::CrashRecoveryContext *Context, const void *Top)
+ : llvm::CrashRecoveryContextCleanupBase<ResetStackCleanup, const void>(
+ Context, Top) {}
+ void recoverResources() override {
+ llvm::RestorePrettyStackState(resource);
+ }
+};
+
+/// If a crash happens while the parser is active, an entry is printed for it.
+class PrettyStackTraceParserEntry : public llvm::PrettyStackTraceEntry {
+ const Parser &P;
+public:
+ PrettyStackTraceParserEntry(const Parser &p) : P(p) {}
+ void print(raw_ostream &OS) const override;
+};
+
+/// If a crash happens while the parser is active, print out a line indicating
+/// what the current token is.
+void PrettyStackTraceParserEntry::print(raw_ostream &OS) const {
+ const Token &Tok = P.getCurToken();
+ if (Tok.is(tok::eof)) {
+ OS << "<eof> parser at end of file\n";
+ return;
+ }
+
+ if (Tok.getLocation().isInvalid()) {
+ OS << "<unknown> parser at unknown location\n";
+ return;
+ }
+
+ const Preprocessor &PP = P.getPreprocessor();
+ Tok.getLocation().print(OS, PP.getSourceManager());
+ if (Tok.isAnnotation()) {
+ OS << ": at annotation token\n";
+ } else {
+ // Do the equivalent of PP.getSpelling(Tok) except for the parts that would
+ // allocate memory.
+ bool Invalid = false;
+ const SourceManager &SM = P.getPreprocessor().getSourceManager();
+ unsigned Length = Tok.getLength();
+ const char *Spelling = SM.getCharacterData(Tok.getLocation(), &Invalid);
+ if (Invalid) {
+ OS << ": unknown current parser token\n";
+ return;
+ }
+ OS << ": current parser token '" << StringRef(Spelling, Length) << "'\n";
+ }
+}
+
+} // namespace
+
+//===----------------------------------------------------------------------===//
+// Public interface to the file
+//===----------------------------------------------------------------------===//
+
+/// ParseAST - Parse the entire file specified, notifying the ASTConsumer as
+/// the file is parsed. This inserts the parsed decls into the translation unit
+/// held by Ctx.
+///
+void clang::ParseAST(Preprocessor &PP, ASTConsumer *Consumer,
+ ASTContext &Ctx, bool PrintStats,
+ TranslationUnitKind TUKind,
+ CodeCompleteConsumer *CompletionConsumer,
+ bool SkipFunctionBodies) {
+
+ std::unique_ptr<Sema> S(
+ new Sema(PP, Ctx, *Consumer, TUKind, CompletionConsumer));
+
+ // Recover resources if we crash before exiting this method.
+ llvm::CrashRecoveryContextCleanupRegistrar<Sema> CleanupSema(S.get());
+
+ ParseAST(*S.get(), PrintStats, SkipFunctionBodies);
+}
+
+void clang::ParseAST(Sema &S, bool PrintStats, bool SkipFunctionBodies) {
+ // Collect global stats on Decls/Stmts (until we have a module streamer).
+ if (PrintStats) {
+ Decl::EnableStatistics();
+ Stmt::EnableStatistics();
+ }
+
+ // Also turn on collection of stats inside of the Sema object.
+ bool OldCollectStats = PrintStats;
+ std::swap(OldCollectStats, S.CollectStats);
+
+ ASTConsumer *Consumer = &S.getASTConsumer();
+
+ std::unique_ptr<Parser> ParseOP(
+ new Parser(S.getPreprocessor(), S, SkipFunctionBodies));
+ Parser &P = *ParseOP.get();
+
+ llvm::CrashRecoveryContextCleanupRegistrar<const void, ResetStackCleanup>
+ CleanupPrettyStack(llvm::SavePrettyStackState());
+ PrettyStackTraceParserEntry CrashInfo(P);
+
+ // Recover resources if we crash before exiting this method.
+ llvm::CrashRecoveryContextCleanupRegistrar<Parser>
+ CleanupParser(ParseOP.get());
+
+ S.getPreprocessor().EnterMainSourceFile();
+ P.Initialize();
+
+ // C11 6.9p1 says translation units must have at least one top-level
+ // declaration. C++ doesn't have this restriction. We also don't want to
+ // complain if we have a precompiled header, although technically if the PCH
+ // is empty we should still emit the (pedantic) diagnostic.
+ Parser::DeclGroupPtrTy ADecl;
+ ExternalASTSource *External = S.getASTContext().getExternalSource();
+ if (External)
+ External->StartTranslationUnit(Consumer);
+
+ if (P.ParseTopLevelDecl(ADecl)) {
+ if (!External && !S.getLangOpts().CPlusPlus)
+ P.Diag(diag::ext_empty_translation_unit);
+ } else {
+ do {
+ // If we got a null return and something *was* parsed, ignore it. This
+ // is due to a top-level semicolon, an action override, or a parse error
+ // skipping something.
+ if (ADecl && !Consumer->HandleTopLevelDecl(ADecl.get()))
+ return;
+ } while (!P.ParseTopLevelDecl(ADecl));
+ }
+
+ // Process any TopLevelDecls generated by #pragma weak.
+ for (Decl *D : S.WeakTopLevelDecls())
+ Consumer->HandleTopLevelDecl(DeclGroupRef(D));
+
+ Consumer->HandleTranslationUnit(S.getASTContext());
+
+ std::swap(OldCollectStats, S.CollectStats);
+ if (PrintStats) {
+ llvm::errs() << "\nSTATISTICS:\n";
+ P.getActions().PrintStats();
+ S.getASTContext().PrintStats();
+ Decl::PrintStats();
+ Stmt::PrintStats();
+ Consumer->PrintStats();
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp
new file mode 100644
index 0000000..e536644
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp
@@ -0,0 +1,1215 @@
+//===--- ParseCXXInlineMethods.cpp - C++ class inline methods parsing------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements parsing for C++ class inline methods.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/Parser.h"
+#include "RAIIObjectsForParser.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/Scope.h"
+using namespace clang;
+
+/// ParseCXXInlineMethodDef - We parsed and verified that the specified
+/// Declarator is a well formed C++ inline method definition. Now lex its body
+/// and store its tokens for parsing after the C++ class is complete.
+NamedDecl *Parser::ParseCXXInlineMethodDef(AccessSpecifier AS,
+ AttributeList *AccessAttrs,
+ ParsingDeclarator &D,
+ const ParsedTemplateInfo &TemplateInfo,
+ const VirtSpecifiers& VS,
+ SourceLocation PureSpecLoc) {
+ assert(D.isFunctionDeclarator() && "This isn't a function declarator!");
+ assert(Tok.isOneOf(tok::l_brace, tok::colon, tok::kw_try, tok::equal) &&
+ "Current token not a '{', ':', '=', or 'try'!");
+
+ MultiTemplateParamsArg TemplateParams(
+ TemplateInfo.TemplateParams ? TemplateInfo.TemplateParams->data()
+ : nullptr,
+ TemplateInfo.TemplateParams ? TemplateInfo.TemplateParams->size() : 0);
+
+ NamedDecl *FnD;
+ if (D.getDeclSpec().isFriendSpecified())
+ FnD = Actions.ActOnFriendFunctionDecl(getCurScope(), D,
+ TemplateParams);
+ else {
+ FnD = Actions.ActOnCXXMemberDeclarator(getCurScope(), AS, D,
+ TemplateParams, nullptr,
+ VS, ICIS_NoInit);
+ if (FnD) {
+ Actions.ProcessDeclAttributeList(getCurScope(), FnD, AccessAttrs);
+ if (PureSpecLoc.isValid())
+ Actions.ActOnPureSpecifier(FnD, PureSpecLoc);
+ }
+ }
+
+ HandleMemberFunctionDeclDelays(D, FnD);
+
+ D.complete(FnD);
+
+ if (TryConsumeToken(tok::equal)) {
+ if (!FnD) {
+ SkipUntil(tok::semi);
+ return nullptr;
+ }
+
+ bool Delete = false;
+ SourceLocation KWLoc;
+ SourceLocation KWEndLoc = Tok.getEndLoc().getLocWithOffset(-1);
+ if (TryConsumeToken(tok::kw_delete, KWLoc)) {
+ Diag(KWLoc, getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_defaulted_deleted_function
+ : diag::ext_defaulted_deleted_function)
+ << 1 /* deleted */;
+ Actions.SetDeclDeleted(FnD, KWLoc);
+ Delete = true;
+ if (auto *DeclAsFunction = dyn_cast<FunctionDecl>(FnD)) {
+ DeclAsFunction->setRangeEnd(KWEndLoc);
+ }
+ } else if (TryConsumeToken(tok::kw_default, KWLoc)) {
+ Diag(KWLoc, getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_defaulted_deleted_function
+ : diag::ext_defaulted_deleted_function)
+ << 0 /* defaulted */;
+ Actions.SetDeclDefaulted(FnD, KWLoc);
+ if (auto *DeclAsFunction = dyn_cast<FunctionDecl>(FnD)) {
+ DeclAsFunction->setRangeEnd(KWEndLoc);
+ }
+ } else {
+ llvm_unreachable("function definition after = not 'delete' or 'default'");
+ }
+
+ if (Tok.is(tok::comma)) {
+ Diag(KWLoc, diag::err_default_delete_in_multiple_declaration)
+ << Delete;
+ SkipUntil(tok::semi);
+ } else if (ExpectAndConsume(tok::semi, diag::err_expected_after,
+ Delete ? "delete" : "default")) {
+ SkipUntil(tok::semi);
+ }
+
+ return FnD;
+ }
+
+ // In delayed template parsing mode, if we are within a class template
+ // or if we are about to parse function member template then consume
+ // the tokens and store them for parsing at the end of the translation unit.
+ if (getLangOpts().DelayedTemplateParsing &&
+ D.getFunctionDefinitionKind() == FDK_Definition &&
+ !D.getDeclSpec().isConstexprSpecified() &&
+ !(FnD && FnD->getAsFunction() &&
+ FnD->getAsFunction()->getReturnType()->getContainedAutoType()) &&
+ ((Actions.CurContext->isDependentContext() ||
+ (TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate &&
+ TemplateInfo.Kind != ParsedTemplateInfo::ExplicitSpecialization)) &&
+ !Actions.IsInsideALocalClassWithinATemplateFunction())) {
+
+ CachedTokens Toks;
+ LexTemplateFunctionForLateParsing(Toks);
+
+ if (FnD) {
+ FunctionDecl *FD = FnD->getAsFunction();
+ Actions.CheckForFunctionRedefinition(FD);
+ Actions.MarkAsLateParsedTemplate(FD, FnD, Toks);
+ }
+
+ return FnD;
+ }
+
+ // Consume the tokens and store them for later parsing.
+
+ LexedMethod* LM = new LexedMethod(this, FnD);
+ getCurrentClass().LateParsedDeclarations.push_back(LM);
+ LM->TemplateScope = getCurScope()->isTemplateParamScope();
+ CachedTokens &Toks = LM->Toks;
+
+ tok::TokenKind kind = Tok.getKind();
+ // Consume everything up to (and including) the left brace of the
+ // function body.
+ if (ConsumeAndStoreFunctionPrologue(Toks)) {
+ // We didn't find the left-brace we expected after the
+ // constructor initializer; we already printed an error, and it's likely
+ // impossible to recover, so don't try to parse this method later.
+ // Skip over the rest of the decl and back to somewhere that looks
+ // reasonable.
+ SkipMalformedDecl();
+ delete getCurrentClass().LateParsedDeclarations.back();
+ getCurrentClass().LateParsedDeclarations.pop_back();
+ return FnD;
+ } else {
+ // Consume everything up to (and including) the matching right brace.
+ ConsumeAndStoreUntil(tok::r_brace, Toks, /*StopAtSemi=*/false);
+ }
+
+ // If we're in a function-try-block, we need to store all the catch blocks.
+ if (kind == tok::kw_try) {
+ while (Tok.is(tok::kw_catch)) {
+ ConsumeAndStoreUntil(tok::l_brace, Toks, /*StopAtSemi=*/false);
+ ConsumeAndStoreUntil(tok::r_brace, Toks, /*StopAtSemi=*/false);
+ }
+ }
+
+ if (FnD) {
+ // If this is a friend function, mark that it's late-parsed so that
+ // it's still known to be a definition even before we attach the
+ // parsed body. Sema needs to treat friend function definitions
+ // differently during template instantiation, and it's possible for
+ // the containing class to be instantiated before all its member
+ // function definitions are parsed.
+ //
+ // If you remove this, you can remove the code that clears the flag
+ // after parsing the member.
+ if (D.getDeclSpec().isFriendSpecified()) {
+ FunctionDecl *FD = FnD->getAsFunction();
+ Actions.CheckForFunctionRedefinition(FD);
+ FD->setLateTemplateParsed(true);
+ }
+ } else {
+ // If semantic analysis could not build a function declaration,
+ // just throw away the late-parsed declaration.
+ delete getCurrentClass().LateParsedDeclarations.back();
+ getCurrentClass().LateParsedDeclarations.pop_back();
+ }
+
+ return FnD;
+}
+
+/// ParseCXXNonStaticMemberInitializer - We parsed and verified that the
+/// specified Declarator is a well formed C++ non-static data member
+/// declaration. Now lex its initializer and store its tokens for parsing
+/// after the class is complete.
+void Parser::ParseCXXNonStaticMemberInitializer(Decl *VarD) {
+ assert(Tok.isOneOf(tok::l_brace, tok::equal) &&
+ "Current token not a '{' or '='!");
+
+ LateParsedMemberInitializer *MI =
+ new LateParsedMemberInitializer(this, VarD);
+ getCurrentClass().LateParsedDeclarations.push_back(MI);
+ CachedTokens &Toks = MI->Toks;
+
+ tok::TokenKind kind = Tok.getKind();
+ if (kind == tok::equal) {
+ Toks.push_back(Tok);
+ ConsumeToken();
+ }
+
+ if (kind == tok::l_brace) {
+ // Begin by storing the '{' token.
+ Toks.push_back(Tok);
+ ConsumeBrace();
+
+ // Consume everything up to (and including) the matching right brace.
+ ConsumeAndStoreUntil(tok::r_brace, Toks, /*StopAtSemi=*/true);
+ } else {
+ // Consume everything up to (but excluding) the comma or semicolon.
+ ConsumeAndStoreInitializer(Toks, CIK_DefaultInitializer);
+ }
+
+ // Store an artificial EOF token to ensure that we don't run off the end of
+ // the initializer when we come to parse it.
+ Token Eof;
+ Eof.startToken();
+ Eof.setKind(tok::eof);
+ Eof.setLocation(Tok.getLocation());
+ Eof.setEofData(VarD);
+ Toks.push_back(Eof);
+}
+
+Parser::LateParsedDeclaration::~LateParsedDeclaration() {}
+void Parser::LateParsedDeclaration::ParseLexedMethodDeclarations() {}
+void Parser::LateParsedDeclaration::ParseLexedMemberInitializers() {}
+void Parser::LateParsedDeclaration::ParseLexedMethodDefs() {}
+
+Parser::LateParsedClass::LateParsedClass(Parser *P, ParsingClass *C)
+ : Self(P), Class(C) {}
+
+Parser::LateParsedClass::~LateParsedClass() {
+ Self->DeallocateParsedClasses(Class);
+}
+
+void Parser::LateParsedClass::ParseLexedMethodDeclarations() {
+ Self->ParseLexedMethodDeclarations(*Class);
+}
+
+void Parser::LateParsedClass::ParseLexedMemberInitializers() {
+ Self->ParseLexedMemberInitializers(*Class);
+}
+
+void Parser::LateParsedClass::ParseLexedMethodDefs() {
+ Self->ParseLexedMethodDefs(*Class);
+}
+
+void Parser::LateParsedMethodDeclaration::ParseLexedMethodDeclarations() {
+ Self->ParseLexedMethodDeclaration(*this);
+}
+
+void Parser::LexedMethod::ParseLexedMethodDefs() {
+ Self->ParseLexedMethodDef(*this);
+}
+
+void Parser::LateParsedMemberInitializer::ParseLexedMemberInitializers() {
+ Self->ParseLexedMemberInitializer(*this);
+}
+
+/// ParseLexedMethodDeclarations - We finished parsing the member
+/// specification of a top (non-nested) C++ class. Now go over the
+/// stack of method declarations with some parts for which parsing was
+/// delayed (such as default arguments) and parse them.
+void Parser::ParseLexedMethodDeclarations(ParsingClass &Class) {
+ bool HasTemplateScope = !Class.TopLevelClass && Class.TemplateScope;
+ ParseScope ClassTemplateScope(this, Scope::TemplateParamScope,
+ HasTemplateScope);
+ TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
+ if (HasTemplateScope) {
+ Actions.ActOnReenterTemplateScope(getCurScope(), Class.TagOrTemplate);
+ ++CurTemplateDepthTracker;
+ }
+
+ // The current scope is still active if we're the top-level class.
+ // Otherwise we'll need to push and enter a new scope.
+ bool HasClassScope = !Class.TopLevelClass;
+ ParseScope ClassScope(this, Scope::ClassScope|Scope::DeclScope,
+ HasClassScope);
+ if (HasClassScope)
+ Actions.ActOnStartDelayedMemberDeclarations(getCurScope(),
+ Class.TagOrTemplate);
+
+ for (size_t i = 0; i < Class.LateParsedDeclarations.size(); ++i) {
+ Class.LateParsedDeclarations[i]->ParseLexedMethodDeclarations();
+ }
+
+ if (HasClassScope)
+ Actions.ActOnFinishDelayedMemberDeclarations(getCurScope(),
+ Class.TagOrTemplate);
+}
+
+void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) {
+ // If this is a member template, introduce the template parameter scope.
+ ParseScope TemplateScope(this, Scope::TemplateParamScope, LM.TemplateScope);
+ TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
+ if (LM.TemplateScope) {
+ Actions.ActOnReenterTemplateScope(getCurScope(), LM.Method);
+ ++CurTemplateDepthTracker;
+ }
+ // Start the delayed C++ method declaration
+ Actions.ActOnStartDelayedCXXMethodDeclaration(getCurScope(), LM.Method);
+
+ // Introduce the parameters into scope and parse their default
+ // arguments.
+ ParseScope PrototypeScope(this, Scope::FunctionPrototypeScope |
+ Scope::FunctionDeclarationScope | Scope::DeclScope);
+ for (unsigned I = 0, N = LM.DefaultArgs.size(); I != N; ++I) {
+ auto Param = cast<ParmVarDecl>(LM.DefaultArgs[I].Param);
+ // Introduce the parameter into scope.
+ bool HasUnparsed = Param->hasUnparsedDefaultArg();
+ Actions.ActOnDelayedCXXMethodParameter(getCurScope(), Param);
+ if (CachedTokens *Toks = LM.DefaultArgs[I].Toks) {
+ // Mark the end of the default argument so that we know when to stop when
+ // we parse it later on.
+ Token LastDefaultArgToken = Toks->back();
+ Token DefArgEnd;
+ DefArgEnd.startToken();
+ DefArgEnd.setKind(tok::eof);
+ DefArgEnd.setLocation(LastDefaultArgToken.getEndLoc());
+ DefArgEnd.setEofData(Param);
+ Toks->push_back(DefArgEnd);
+
+ // Parse the default argument from its saved token stream.
+ Toks->push_back(Tok); // So that the current token doesn't get lost
+ PP.EnterTokenStream(&Toks->front(), Toks->size(), true, false);
+
+ // Consume the previously-pushed token.
+ ConsumeAnyToken();
+
+ // Consume the '='.
+ assert(Tok.is(tok::equal) && "Default argument not starting with '='");
+ SourceLocation EqualLoc = ConsumeToken();
+
+ // The argument isn't actually potentially evaluated unless it is
+ // used.
+ EnterExpressionEvaluationContext Eval(Actions,
+ Sema::PotentiallyEvaluatedIfUsed,
+ Param);
+
+ ExprResult DefArgResult;
+ if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
+ DefArgResult = ParseBraceInitializer();
+ } else
+ DefArgResult = ParseAssignmentExpression();
+ DefArgResult = Actions.CorrectDelayedTyposInExpr(DefArgResult);
+ if (DefArgResult.isInvalid()) {
+ Actions.ActOnParamDefaultArgumentError(Param, EqualLoc);
+ } else {
+ if (Tok.isNot(tok::eof) || Tok.getEofData() != Param) {
+ // The last two tokens are the terminator and the saved value of
+ // Tok; the last token in the default argument is the one before
+ // those.
+ assert(Toks->size() >= 3 && "expected a token in default arg");
+ Diag(Tok.getLocation(), diag::err_default_arg_unparsed)
+ << SourceRange(Tok.getLocation(),
+ (*Toks)[Toks->size() - 3].getLocation());
+ }
+ Actions.ActOnParamDefaultArgument(Param, EqualLoc,
+ DefArgResult.get());
+ }
+
+ // There could be leftover tokens (e.g. because of an error).
+ // Skip through until we reach the 'end of default argument' token.
+ while (Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+
+ if (Tok.is(tok::eof) && Tok.getEofData() == Param)
+ ConsumeAnyToken();
+
+ delete Toks;
+ LM.DefaultArgs[I].Toks = nullptr;
+ } else if (HasUnparsed) {
+ assert(Param->hasInheritedDefaultArg());
+ FunctionDecl *Old = cast<FunctionDecl>(LM.Method)->getPreviousDecl();
+ ParmVarDecl *OldParam = Old->getParamDecl(I);
+ assert (!OldParam->hasUnparsedDefaultArg());
+ if (OldParam->hasUninstantiatedDefaultArg())
+ Param->setUninstantiatedDefaultArg(
+ Param->getUninstantiatedDefaultArg());
+ else
+ Param->setDefaultArg(OldParam->getInit());
+ }
+ }
+
+ // Parse a delayed exception-specification, if there is one.
+ if (CachedTokens *Toks = LM.ExceptionSpecTokens) {
+ // Add the 'stop' token.
+ Token LastExceptionSpecToken = Toks->back();
+ Token ExceptionSpecEnd;
+ ExceptionSpecEnd.startToken();
+ ExceptionSpecEnd.setKind(tok::eof);
+ ExceptionSpecEnd.setLocation(LastExceptionSpecToken.getEndLoc());
+ ExceptionSpecEnd.setEofData(LM.Method);
+ Toks->push_back(ExceptionSpecEnd);
+
+ // Parse the default argument from its saved token stream.
+ Toks->push_back(Tok); // So that the current token doesn't get lost
+ PP.EnterTokenStream(&Toks->front(), Toks->size(), true, false);
+
+ // Consume the previously-pushed token.
+ ConsumeAnyToken();
+
+ // C++11 [expr.prim.general]p3:
+ // If a declaration declares a member function or member function
+ // template of a class X, the expression this is a prvalue of type
+ // "pointer to cv-qualifier-seq X" between the optional cv-qualifer-seq
+ // and the end of the function-definition, member-declarator, or
+ // declarator.
+ CXXMethodDecl *Method;
+ if (FunctionTemplateDecl *FunTmpl
+ = dyn_cast<FunctionTemplateDecl>(LM.Method))
+ Method = cast<CXXMethodDecl>(FunTmpl->getTemplatedDecl());
+ else
+ Method = cast<CXXMethodDecl>(LM.Method);
+
+ Sema::CXXThisScopeRAII ThisScope(Actions, Method->getParent(),
+ Method->getTypeQualifiers(),
+ getLangOpts().CPlusPlus11);
+
+ // Parse the exception-specification.
+ SourceRange SpecificationRange;
+ SmallVector<ParsedType, 4> DynamicExceptions;
+ SmallVector<SourceRange, 4> DynamicExceptionRanges;
+ ExprResult NoexceptExpr;
+ CachedTokens *ExceptionSpecTokens;
+
+ ExceptionSpecificationType EST
+ = tryParseExceptionSpecification(/*Delayed=*/false, SpecificationRange,
+ DynamicExceptions,
+ DynamicExceptionRanges, NoexceptExpr,
+ ExceptionSpecTokens);
+
+ if (Tok.isNot(tok::eof) || Tok.getEofData() != LM.Method)
+ Diag(Tok.getLocation(), diag::err_except_spec_unparsed);
+
+ // Attach the exception-specification to the method.
+ Actions.actOnDelayedExceptionSpecification(LM.Method, EST,
+ SpecificationRange,
+ DynamicExceptions,
+ DynamicExceptionRanges,
+ NoexceptExpr.isUsable()?
+ NoexceptExpr.get() : nullptr);
+
+ // There could be leftover tokens (e.g. because of an error).
+ // Skip through until we reach the original token position.
+ while (Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+
+ // Clean up the remaining EOF token.
+ if (Tok.is(tok::eof) && Tok.getEofData() == LM.Method)
+ ConsumeAnyToken();
+
+ delete Toks;
+ LM.ExceptionSpecTokens = nullptr;
+ }
+
+ PrototypeScope.Exit();
+
+ // Finish the delayed C++ method declaration.
+ Actions.ActOnFinishDelayedCXXMethodDeclaration(getCurScope(), LM.Method);
+}
+
+/// ParseLexedMethodDefs - We finished parsing the member specification of a top
+/// (non-nested) C++ class. Now go over the stack of lexed methods that were
+/// collected during its parsing and parse them all.
+void Parser::ParseLexedMethodDefs(ParsingClass &Class) {
+ bool HasTemplateScope = !Class.TopLevelClass && Class.TemplateScope;
+ ParseScope ClassTemplateScope(this, Scope::TemplateParamScope, HasTemplateScope);
+ TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
+ if (HasTemplateScope) {
+ Actions.ActOnReenterTemplateScope(getCurScope(), Class.TagOrTemplate);
+ ++CurTemplateDepthTracker;
+ }
+ bool HasClassScope = !Class.TopLevelClass;
+ ParseScope ClassScope(this, Scope::ClassScope|Scope::DeclScope,
+ HasClassScope);
+
+ for (size_t i = 0; i < Class.LateParsedDeclarations.size(); ++i) {
+ Class.LateParsedDeclarations[i]->ParseLexedMethodDefs();
+ }
+}
+
+void Parser::ParseLexedMethodDef(LexedMethod &LM) {
+ // If this is a member template, introduce the template parameter scope.
+ ParseScope TemplateScope(this, Scope::TemplateParamScope, LM.TemplateScope);
+ TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
+ if (LM.TemplateScope) {
+ Actions.ActOnReenterTemplateScope(getCurScope(), LM.D);
+ ++CurTemplateDepthTracker;
+ }
+
+ assert(!LM.Toks.empty() && "Empty body!");
+ Token LastBodyToken = LM.Toks.back();
+ Token BodyEnd;
+ BodyEnd.startToken();
+ BodyEnd.setKind(tok::eof);
+ BodyEnd.setLocation(LastBodyToken.getEndLoc());
+ BodyEnd.setEofData(LM.D);
+ LM.Toks.push_back(BodyEnd);
+ // Append the current token at the end of the new token stream so that it
+ // doesn't get lost.
+ LM.Toks.push_back(Tok);
+ PP.EnterTokenStream(LM.Toks.data(), LM.Toks.size(), true, false);
+
+ // Consume the previously pushed token.
+ ConsumeAnyToken(/*ConsumeCodeCompletionTok=*/true);
+ assert(Tok.isOneOf(tok::l_brace, tok::colon, tok::kw_try)
+ && "Inline method not starting with '{', ':' or 'try'");
+
+ // Parse the method body. Function body parsing code is similar enough
+ // to be re-used for method bodies as well.
+ ParseScope FnScope(this, Scope::FnScope|Scope::DeclScope);
+ Actions.ActOnStartOfFunctionDef(getCurScope(), LM.D);
+
+ if (Tok.is(tok::kw_try)) {
+ ParseFunctionTryBlock(LM.D, FnScope);
+
+ while (Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+
+ if (Tok.is(tok::eof) && Tok.getEofData() == LM.D)
+ ConsumeAnyToken();
+ return;
+ }
+ if (Tok.is(tok::colon)) {
+ ParseConstructorInitializer(LM.D);
+
+ // Error recovery.
+ if (!Tok.is(tok::l_brace)) {
+ FnScope.Exit();
+ Actions.ActOnFinishFunctionBody(LM.D, nullptr);
+
+ while (Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+
+ if (Tok.is(tok::eof) && Tok.getEofData() == LM.D)
+ ConsumeAnyToken();
+ return;
+ }
+ } else
+ Actions.ActOnDefaultCtorInitializers(LM.D);
+
+ assert((Actions.getDiagnostics().hasErrorOccurred() ||
+ !isa<FunctionTemplateDecl>(LM.D) ||
+ cast<FunctionTemplateDecl>(LM.D)->getTemplateParameters()->getDepth()
+ < TemplateParameterDepth) &&
+ "TemplateParameterDepth should be greater than the depth of "
+ "current template being instantiated!");
+
+ ParseFunctionStatementBody(LM.D, FnScope);
+
+ // Clear the late-template-parsed bit if we set it before.
+ if (LM.D)
+ LM.D->getAsFunction()->setLateTemplateParsed(false);
+
+ while (Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+
+ if (Tok.is(tok::eof) && Tok.getEofData() == LM.D)
+ ConsumeAnyToken();
+
+ if (CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(LM.D))
+ Actions.ActOnFinishInlineMethodDef(MD);
+}
+
+/// ParseLexedMemberInitializers - We finished parsing the member specification
+/// of a top (non-nested) C++ class. Now go over the stack of lexed data member
+/// initializers that were collected during its parsing and parse them all.
+void Parser::ParseLexedMemberInitializers(ParsingClass &Class) {
+ bool HasTemplateScope = !Class.TopLevelClass && Class.TemplateScope;
+ ParseScope ClassTemplateScope(this, Scope::TemplateParamScope,
+ HasTemplateScope);
+ TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
+ if (HasTemplateScope) {
+ Actions.ActOnReenterTemplateScope(getCurScope(), Class.TagOrTemplate);
+ ++CurTemplateDepthTracker;
+ }
+ // Set or update the scope flags.
+ bool AlreadyHasClassScope = Class.TopLevelClass;
+ unsigned ScopeFlags = Scope::ClassScope|Scope::DeclScope;
+ ParseScope ClassScope(this, ScopeFlags, !AlreadyHasClassScope);
+ ParseScopeFlags ClassScopeFlags(this, ScopeFlags, AlreadyHasClassScope);
+
+ if (!AlreadyHasClassScope)
+ Actions.ActOnStartDelayedMemberDeclarations(getCurScope(),
+ Class.TagOrTemplate);
+
+ if (!Class.LateParsedDeclarations.empty()) {
+ // C++11 [expr.prim.general]p4:
+ // Otherwise, if a member-declarator declares a non-static data member
+ // (9.2) of a class X, the expression this is a prvalue of type "pointer
+ // to X" within the optional brace-or-equal-initializer. It shall not
+ // appear elsewhere in the member-declarator.
+ Sema::CXXThisScopeRAII ThisScope(Actions, Class.TagOrTemplate,
+ /*TypeQuals=*/(unsigned)0);
+
+ for (size_t i = 0; i < Class.LateParsedDeclarations.size(); ++i) {
+ Class.LateParsedDeclarations[i]->ParseLexedMemberInitializers();
+ }
+ }
+
+ if (!AlreadyHasClassScope)
+ Actions.ActOnFinishDelayedMemberDeclarations(getCurScope(),
+ Class.TagOrTemplate);
+
+ Actions.ActOnFinishDelayedMemberInitializers(Class.TagOrTemplate);
+}
+
+void Parser::ParseLexedMemberInitializer(LateParsedMemberInitializer &MI) {
+ if (!MI.Field || MI.Field->isInvalidDecl())
+ return;
+
+ // Append the current token at the end of the new token stream so that it
+ // doesn't get lost.
+ MI.Toks.push_back(Tok);
+ PP.EnterTokenStream(MI.Toks.data(), MI.Toks.size(), true, false);
+
+ // Consume the previously pushed token.
+ ConsumeAnyToken(/*ConsumeCodeCompletionTok=*/true);
+
+ SourceLocation EqualLoc;
+
+ Actions.ActOnStartCXXInClassMemberInitializer();
+
+ ExprResult Init = ParseCXXMemberInitializer(MI.Field, /*IsFunction=*/false,
+ EqualLoc);
+
+ Actions.ActOnFinishCXXInClassMemberInitializer(MI.Field, EqualLoc,
+ Init.get());
+
+ // The next token should be our artificial terminating EOF token.
+ if (Tok.isNot(tok::eof)) {
+ if (!Init.isInvalid()) {
+ SourceLocation EndLoc = PP.getLocForEndOfToken(PrevTokLocation);
+ if (!EndLoc.isValid())
+ EndLoc = Tok.getLocation();
+ // No fixit; we can't recover as if there were a semicolon here.
+ Diag(EndLoc, diag::err_expected_semi_decl_list);
+ }
+
+ // Consume tokens until we hit the artificial EOF.
+ while (Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+ }
+ // Make sure this is *our* artificial EOF token.
+ if (Tok.getEofData() == MI.Field)
+ ConsumeAnyToken();
+}
+
+/// ConsumeAndStoreUntil - Consume and store the token at the passed token
+/// container until the token 'T' is reached (which gets
+/// consumed/stored too, if ConsumeFinalToken).
+/// If StopAtSemi is true, then we will stop early at a ';' character.
+/// Returns true if token 'T1' or 'T2' was found.
+/// NOTE: This is a specialized version of Parser::SkipUntil.
+bool Parser::ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
+ CachedTokens &Toks,
+ bool StopAtSemi, bool ConsumeFinalToken) {
+ // We always want this function to consume at least one token if the first
+ // token isn't T and if not at EOF.
+ bool isFirstTokenConsumed = true;
+ while (1) {
+ // If we found one of the tokens, stop and return true.
+ if (Tok.is(T1) || Tok.is(T2)) {
+ if (ConsumeFinalToken) {
+ Toks.push_back(Tok);
+ ConsumeAnyToken();
+ }
+ return true;
+ }
+
+ switch (Tok.getKind()) {
+ case tok::eof:
+ case tok::annot_module_begin:
+ case tok::annot_module_end:
+ case tok::annot_module_include:
+ // Ran out of tokens.
+ return false;
+
+ case tok::l_paren:
+ // Recursively consume properly-nested parens.
+ Toks.push_back(Tok);
+ ConsumeParen();
+ ConsumeAndStoreUntil(tok::r_paren, Toks, /*StopAtSemi=*/false);
+ break;
+ case tok::l_square:
+ // Recursively consume properly-nested square brackets.
+ Toks.push_back(Tok);
+ ConsumeBracket();
+ ConsumeAndStoreUntil(tok::r_square, Toks, /*StopAtSemi=*/false);
+ break;
+ case tok::l_brace:
+ // Recursively consume properly-nested braces.
+ Toks.push_back(Tok);
+ ConsumeBrace();
+ ConsumeAndStoreUntil(tok::r_brace, Toks, /*StopAtSemi=*/false);
+ break;
+
+ // Okay, we found a ']' or '}' or ')', which we think should be balanced.
+ // Since the user wasn't looking for this token (if they were, it would
+ // already be handled), this isn't balanced. If there is a LHS token at a
+ // higher level, we will assume that this matches the unbalanced token
+ // and return it. Otherwise, this is a spurious RHS token, which we skip.
+ case tok::r_paren:
+ if (ParenCount && !isFirstTokenConsumed)
+ return false; // Matches something.
+ Toks.push_back(Tok);
+ ConsumeParen();
+ break;
+ case tok::r_square:
+ if (BracketCount && !isFirstTokenConsumed)
+ return false; // Matches something.
+ Toks.push_back(Tok);
+ ConsumeBracket();
+ break;
+ case tok::r_brace:
+ if (BraceCount && !isFirstTokenConsumed)
+ return false; // Matches something.
+ Toks.push_back(Tok);
+ ConsumeBrace();
+ break;
+
+ case tok::code_completion:
+ Toks.push_back(Tok);
+ ConsumeCodeCompletionToken();
+ break;
+
+ case tok::string_literal:
+ case tok::wide_string_literal:
+ case tok::utf8_string_literal:
+ case tok::utf16_string_literal:
+ case tok::utf32_string_literal:
+ Toks.push_back(Tok);
+ ConsumeStringToken();
+ break;
+ case tok::semi:
+ if (StopAtSemi)
+ return false;
+ // FALL THROUGH.
+ default:
+ // consume this token.
+ Toks.push_back(Tok);
+ ConsumeToken();
+ break;
+ }
+ isFirstTokenConsumed = false;
+ }
+}
+
+/// \brief Consume tokens and store them in the passed token container until
+/// we've passed the try keyword and constructor initializers and have consumed
+/// the opening brace of the function body. The opening brace will be consumed
+/// if and only if there was no error.
+///
+/// \return True on error.
+bool Parser::ConsumeAndStoreFunctionPrologue(CachedTokens &Toks) {
+ if (Tok.is(tok::kw_try)) {
+ Toks.push_back(Tok);
+ ConsumeToken();
+ }
+
+ if (Tok.isNot(tok::colon)) {
+ // Easy case, just a function body.
+
+ // Grab any remaining garbage to be diagnosed later. We stop when we reach a
+ // brace: an opening one is the function body, while a closing one probably
+ // means we've reached the end of the class.
+ ConsumeAndStoreUntil(tok::l_brace, tok::r_brace, Toks,
+ /*StopAtSemi=*/true,
+ /*ConsumeFinalToken=*/false);
+ if (Tok.isNot(tok::l_brace))
+ return Diag(Tok.getLocation(), diag::err_expected) << tok::l_brace;
+
+ Toks.push_back(Tok);
+ ConsumeBrace();
+ return false;
+ }
+
+ Toks.push_back(Tok);
+ ConsumeToken();
+
+ // We can't reliably skip over a mem-initializer-id, because it could be
+ // a template-id involving not-yet-declared names. Given:
+ //
+ // S ( ) : a < b < c > ( e )
+ //
+ // 'e' might be an initializer or part of a template argument, depending
+ // on whether 'b' is a template.
+
+ // Track whether we might be inside a template argument. We can give
+ // significantly better diagnostics if we know that we're not.
+ bool MightBeTemplateArgument = false;
+
+ while (true) {
+ // Skip over the mem-initializer-id, if possible.
+ if (Tok.is(tok::kw_decltype)) {
+ Toks.push_back(Tok);
+ SourceLocation OpenLoc = ConsumeToken();
+ if (Tok.isNot(tok::l_paren))
+ return Diag(Tok.getLocation(), diag::err_expected_lparen_after)
+ << "decltype";
+ Toks.push_back(Tok);
+ ConsumeParen();
+ if (!ConsumeAndStoreUntil(tok::r_paren, Toks, /*StopAtSemi=*/true)) {
+ Diag(Tok.getLocation(), diag::err_expected) << tok::r_paren;
+ Diag(OpenLoc, diag::note_matching) << tok::l_paren;
+ return true;
+ }
+ }
+ do {
+ // Walk over a component of a nested-name-specifier.
+ if (Tok.is(tok::coloncolon)) {
+ Toks.push_back(Tok);
+ ConsumeToken();
+
+ if (Tok.is(tok::kw_template)) {
+ Toks.push_back(Tok);
+ ConsumeToken();
+ }
+ }
+
+ if (Tok.isOneOf(tok::identifier, tok::kw_template)) {
+ Toks.push_back(Tok);
+ ConsumeToken();
+ } else if (Tok.is(tok::code_completion)) {
+ Toks.push_back(Tok);
+ ConsumeCodeCompletionToken();
+ // Consume the rest of the initializers permissively.
+ // FIXME: We should be able to perform code-completion here even if
+ // there isn't a subsequent '{' token.
+ MightBeTemplateArgument = true;
+ break;
+ } else {
+ break;
+ }
+ } while (Tok.is(tok::coloncolon));
+
+ if (Tok.is(tok::less))
+ MightBeTemplateArgument = true;
+
+ if (MightBeTemplateArgument) {
+ // We may be inside a template argument list. Grab up to the start of the
+ // next parenthesized initializer or braced-init-list. This *might* be the
+ // initializer, or it might be a subexpression in the template argument
+ // list.
+ // FIXME: Count angle brackets, and clear MightBeTemplateArgument
+ // if all angles are closed.
+ if (!ConsumeAndStoreUntil(tok::l_paren, tok::l_brace, Toks,
+ /*StopAtSemi=*/true,
+ /*ConsumeFinalToken=*/false)) {
+ // We're not just missing the initializer, we're also missing the
+ // function body!
+ return Diag(Tok.getLocation(), diag::err_expected) << tok::l_brace;
+ }
+ } else if (Tok.isNot(tok::l_paren) && Tok.isNot(tok::l_brace)) {
+ // We found something weird in a mem-initializer-id.
+ if (getLangOpts().CPlusPlus11)
+ return Diag(Tok.getLocation(), diag::err_expected_either)
+ << tok::l_paren << tok::l_brace;
+ else
+ return Diag(Tok.getLocation(), diag::err_expected) << tok::l_paren;
+ }
+
+ tok::TokenKind kind = Tok.getKind();
+ Toks.push_back(Tok);
+ bool IsLParen = (kind == tok::l_paren);
+ SourceLocation OpenLoc = Tok.getLocation();
+
+ if (IsLParen) {
+ ConsumeParen();
+ } else {
+ assert(kind == tok::l_brace && "Must be left paren or brace here.");
+ ConsumeBrace();
+ // In C++03, this has to be the start of the function body, which
+ // means the initializer is malformed; we'll diagnose it later.
+ if (!getLangOpts().CPlusPlus11)
+ return false;
+ }
+
+ // Grab the initializer (or the subexpression of the template argument).
+ // FIXME: If we support lambdas here, we'll need to set StopAtSemi to false
+ // if we might be inside the braces of a lambda-expression.
+ tok::TokenKind CloseKind = IsLParen ? tok::r_paren : tok::r_brace;
+ if (!ConsumeAndStoreUntil(CloseKind, Toks, /*StopAtSemi=*/true)) {
+ Diag(Tok, diag::err_expected) << CloseKind;
+ Diag(OpenLoc, diag::note_matching) << kind;
+ return true;
+ }
+
+ // Grab pack ellipsis, if present.
+ if (Tok.is(tok::ellipsis)) {
+ Toks.push_back(Tok);
+ ConsumeToken();
+ }
+
+ // If we know we just consumed a mem-initializer, we must have ',' or '{'
+ // next.
+ if (Tok.is(tok::comma)) {
+ Toks.push_back(Tok);
+ ConsumeToken();
+ } else if (Tok.is(tok::l_brace)) {
+ // This is the function body if the ')' or '}' is immediately followed by
+ // a '{'. That cannot happen within a template argument, apart from the
+ // case where a template argument contains a compound literal:
+ //
+ // S ( ) : a < b < c > ( d ) { }
+ // // End of declaration, or still inside the template argument?
+ //
+ // ... and the case where the template argument contains a lambda:
+ //
+ // S ( ) : a < 0 && b < c > ( d ) + [ ] ( ) { return 0; }
+ // ( ) > ( ) { }
+ //
+ // FIXME: Disambiguate these cases. Note that the latter case is probably
+ // going to be made ill-formed by core issue 1607.
+ Toks.push_back(Tok);
+ ConsumeBrace();
+ return false;
+ } else if (!MightBeTemplateArgument) {
+ return Diag(Tok.getLocation(), diag::err_expected_either) << tok::l_brace
+ << tok::comma;
+ }
+ }
+}
+
+/// \brief Consume and store tokens from the '?' to the ':' in a conditional
+/// expression.
+bool Parser::ConsumeAndStoreConditional(CachedTokens &Toks) {
+ // Consume '?'.
+ assert(Tok.is(tok::question));
+ Toks.push_back(Tok);
+ ConsumeToken();
+
+ while (Tok.isNot(tok::colon)) {
+ if (!ConsumeAndStoreUntil(tok::question, tok::colon, Toks,
+ /*StopAtSemi=*/true,
+ /*ConsumeFinalToken=*/false))
+ return false;
+
+ // If we found a nested conditional, consume it.
+ if (Tok.is(tok::question) && !ConsumeAndStoreConditional(Toks))
+ return false;
+ }
+
+ // Consume ':'.
+ Toks.push_back(Tok);
+ ConsumeToken();
+ return true;
+}
+
+/// \brief A tentative parsing action that can also revert token annotations.
+class Parser::UnannotatedTentativeParsingAction : public TentativeParsingAction {
+public:
+ explicit UnannotatedTentativeParsingAction(Parser &Self,
+ tok::TokenKind EndKind)
+ : TentativeParsingAction(Self), Self(Self), EndKind(EndKind) {
+ // Stash away the old token stream, so we can restore it once the
+ // tentative parse is complete.
+ TentativeParsingAction Inner(Self);
+ Self.ConsumeAndStoreUntil(EndKind, Toks, true, /*ConsumeFinalToken*/false);
+ Inner.Revert();
+ }
+
+ void RevertAnnotations() {
+ Revert();
+
+ // Put back the original tokens.
+ Self.SkipUntil(EndKind, StopAtSemi | StopBeforeMatch);
+ if (Toks.size()) {
+ Token *Buffer = new Token[Toks.size()];
+ std::copy(Toks.begin() + 1, Toks.end(), Buffer);
+ Buffer[Toks.size() - 1] = Self.Tok;
+ Self.PP.EnterTokenStream(Buffer, Toks.size(), true, /*Owned*/true);
+
+ Self.Tok = Toks.front();
+ }
+ }
+
+private:
+ Parser &Self;
+ CachedTokens Toks;
+ tok::TokenKind EndKind;
+};
+
+/// ConsumeAndStoreInitializer - Consume and store the token at the passed token
+/// container until the end of the current initializer expression (either a
+/// default argument or an in-class initializer for a non-static data member).
+///
+/// Returns \c true if we reached the end of something initializer-shaped,
+/// \c false if we bailed out.
+bool Parser::ConsumeAndStoreInitializer(CachedTokens &Toks,
+ CachedInitKind CIK) {
+ // We always want this function to consume at least one token if not at EOF.
+ bool IsFirstToken = true;
+
+ // Number of possible unclosed <s we've seen so far. These might be templates,
+ // and might not, but if there were none of them (or we know for sure that
+ // we're within a template), we can avoid a tentative parse.
+ unsigned AngleCount = 0;
+ unsigned KnownTemplateCount = 0;
+
+ while (1) {
+ switch (Tok.getKind()) {
+ case tok::comma:
+ // If we might be in a template, perform a tentative parse to check.
+ if (!AngleCount)
+ // Not a template argument: this is the end of the initializer.
+ return true;
+ if (KnownTemplateCount)
+ goto consume_token;
+
+ // We hit a comma inside angle brackets. This is the hard case. The
+ // rule we follow is:
+ // * For a default argument, if the tokens after the comma form a
+ // syntactically-valid parameter-declaration-clause, in which each
+ // parameter has an initializer, then this comma ends the default
+ // argument.
+ // * For a default initializer, if the tokens after the comma form a
+ // syntactically-valid init-declarator-list, then this comma ends
+ // the default initializer.
+ {
+ UnannotatedTentativeParsingAction PA(*this,
+ CIK == CIK_DefaultInitializer
+ ? tok::semi : tok::r_paren);
+ Sema::TentativeAnalysisScope Scope(Actions);
+
+ TPResult Result = TPResult::Error;
+ ConsumeToken();
+ switch (CIK) {
+ case CIK_DefaultInitializer:
+ Result = TryParseInitDeclaratorList();
+ // If we parsed a complete, ambiguous init-declarator-list, this
+ // is only syntactically-valid if it's followed by a semicolon.
+ if (Result == TPResult::Ambiguous && Tok.isNot(tok::semi))
+ Result = TPResult::False;
+ break;
+
+ case CIK_DefaultArgument:
+ bool InvalidAsDeclaration = false;
+ Result = TryParseParameterDeclarationClause(
+ &InvalidAsDeclaration, /*VersusTemplateArgument=*/true);
+ // If this is an expression or a declaration with a missing
+ // 'typename', assume it's not a declaration.
+ if (Result == TPResult::Ambiguous && InvalidAsDeclaration)
+ Result = TPResult::False;
+ break;
+ }
+
+ // If what follows could be a declaration, it is a declaration.
+ if (Result != TPResult::False && Result != TPResult::Error) {
+ PA.Revert();
+ return true;
+ }
+
+ // In the uncommon case that we decide the following tokens are part
+ // of a template argument, revert any annotations we've performed in
+ // those tokens. We're not going to look them up until we've parsed
+ // the rest of the class, and that might add more declarations.
+ PA.RevertAnnotations();
+ }
+
+ // Keep going. We know we're inside a template argument list now.
+ ++KnownTemplateCount;
+ goto consume_token;
+
+ case tok::eof:
+ case tok::annot_module_begin:
+ case tok::annot_module_end:
+ case tok::annot_module_include:
+ // Ran out of tokens.
+ return false;
+
+ case tok::less:
+ // FIXME: A '<' can only start a template-id if it's preceded by an
+ // identifier, an operator-function-id, or a literal-operator-id.
+ ++AngleCount;
+ goto consume_token;
+
+ case tok::question:
+ // In 'a ? b : c', 'b' can contain an unparenthesized comma. If it does,
+ // that is *never* the end of the initializer. Skip to the ':'.
+ if (!ConsumeAndStoreConditional(Toks))
+ return false;
+ break;
+
+ case tok::greatergreatergreater:
+ if (!getLangOpts().CPlusPlus11)
+ goto consume_token;
+ if (AngleCount) --AngleCount;
+ if (KnownTemplateCount) --KnownTemplateCount;
+ // Fall through.
+ case tok::greatergreater:
+ if (!getLangOpts().CPlusPlus11)
+ goto consume_token;
+ if (AngleCount) --AngleCount;
+ if (KnownTemplateCount) --KnownTemplateCount;
+ // Fall through.
+ case tok::greater:
+ if (AngleCount) --AngleCount;
+ if (KnownTemplateCount) --KnownTemplateCount;
+ goto consume_token;
+
+ case tok::kw_template:
+ // 'template' identifier '<' is known to start a template argument list,
+ // and can be used to disambiguate the parse.
+ // FIXME: Support all forms of 'template' unqualified-id '<'.
+ Toks.push_back(Tok);
+ ConsumeToken();
+ if (Tok.is(tok::identifier)) {
+ Toks.push_back(Tok);
+ ConsumeToken();
+ if (Tok.is(tok::less)) {
+ ++AngleCount;
+ ++KnownTemplateCount;
+ Toks.push_back(Tok);
+ ConsumeToken();
+ }
+ }
+ break;
+
+ case tok::kw_operator:
+ // If 'operator' precedes other punctuation, that punctuation loses
+ // its special behavior.
+ Toks.push_back(Tok);
+ ConsumeToken();
+ switch (Tok.getKind()) {
+ case tok::comma:
+ case tok::greatergreatergreater:
+ case tok::greatergreater:
+ case tok::greater:
+ case tok::less:
+ Toks.push_back(Tok);
+ ConsumeToken();
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case tok::l_paren:
+ // Recursively consume properly-nested parens.
+ Toks.push_back(Tok);
+ ConsumeParen();
+ ConsumeAndStoreUntil(tok::r_paren, Toks, /*StopAtSemi=*/false);
+ break;
+ case tok::l_square:
+ // Recursively consume properly-nested square brackets.
+ Toks.push_back(Tok);
+ ConsumeBracket();
+ ConsumeAndStoreUntil(tok::r_square, Toks, /*StopAtSemi=*/false);
+ break;
+ case tok::l_brace:
+ // Recursively consume properly-nested braces.
+ Toks.push_back(Tok);
+ ConsumeBrace();
+ ConsumeAndStoreUntil(tok::r_brace, Toks, /*StopAtSemi=*/false);
+ break;
+
+ // Okay, we found a ']' or '}' or ')', which we think should be balanced.
+ // Since the user wasn't looking for this token (if they were, it would
+ // already be handled), this isn't balanced. If there is a LHS token at a
+ // higher level, we will assume that this matches the unbalanced token
+ // and return it. Otherwise, this is a spurious RHS token, which we
+ // consume and pass on to downstream code to diagnose.
+ case tok::r_paren:
+ if (CIK == CIK_DefaultArgument)
+ return true; // End of the default argument.
+ if (ParenCount && !IsFirstToken)
+ return false;
+ Toks.push_back(Tok);
+ ConsumeParen();
+ continue;
+ case tok::r_square:
+ if (BracketCount && !IsFirstToken)
+ return false;
+ Toks.push_back(Tok);
+ ConsumeBracket();
+ continue;
+ case tok::r_brace:
+ if (BraceCount && !IsFirstToken)
+ return false;
+ Toks.push_back(Tok);
+ ConsumeBrace();
+ continue;
+
+ case tok::code_completion:
+ Toks.push_back(Tok);
+ ConsumeCodeCompletionToken();
+ break;
+
+ case tok::string_literal:
+ case tok::wide_string_literal:
+ case tok::utf8_string_literal:
+ case tok::utf16_string_literal:
+ case tok::utf32_string_literal:
+ Toks.push_back(Tok);
+ ConsumeStringToken();
+ break;
+ case tok::semi:
+ if (CIK == CIK_DefaultInitializer)
+ return true; // End of the default initializer.
+ // FALL THROUGH.
+ default:
+ consume_token:
+ Toks.push_back(Tok);
+ ConsumeToken();
+ break;
+ }
+ IsFirstToken = false;
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp
new file mode 100644
index 0000000..e69bb27
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp
@@ -0,0 +1,6327 @@
+//===--- ParseDecl.cpp - Declaration Parsing --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Declaration portions of the Parser interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/Parser.h"
+#include "RAIIObjectsForParser.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/Basic/AddressSpaces.h"
+#include "clang/Basic/Attributes.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Sema/PrettyDeclStackTrace.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringSwitch.h"
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// C99 6.7: Declarations.
+//===----------------------------------------------------------------------===//
+
+/// ParseTypeName
+/// type-name: [C99 6.7.6]
+/// specifier-qualifier-list abstract-declarator[opt]
+///
+/// Called type-id in C++.
+TypeResult Parser::ParseTypeName(SourceRange *Range,
+ Declarator::TheContext Context,
+ AccessSpecifier AS,
+ Decl **OwnedType,
+ ParsedAttributes *Attrs) {
+ DeclSpecContext DSC = getDeclSpecContextFromDeclaratorContext(Context);
+ if (DSC == DSC_normal)
+ DSC = DSC_type_specifier;
+
+ // Parse the common declaration-specifiers piece.
+ DeclSpec DS(AttrFactory);
+ if (Attrs)
+ DS.addAttributes(Attrs->getList());
+ ParseSpecifierQualifierList(DS, AS, DSC);
+ if (OwnedType)
+ *OwnedType = DS.isTypeSpecOwned() ? DS.getRepAsDecl() : nullptr;
+
+ // Parse the abstract-declarator, if present.
+ Declarator DeclaratorInfo(DS, Context);
+ ParseDeclarator(DeclaratorInfo);
+ if (Range)
+ *Range = DeclaratorInfo.getSourceRange();
+
+ if (DeclaratorInfo.isInvalidType())
+ return true;
+
+ return Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+}
+
+/// isAttributeLateParsed - Return true if the attribute has arguments that
+/// require late parsing.
+static bool isAttributeLateParsed(const IdentifierInfo &II) {
+#define CLANG_ATTR_LATE_PARSED_LIST
+ return llvm::StringSwitch<bool>(II.getName())
+#include "clang/Parse/AttrParserStringSwitches.inc"
+ .Default(false);
+#undef CLANG_ATTR_LATE_PARSED_LIST
+}
+
+/// ParseGNUAttributes - Parse a non-empty attributes list.
+///
+/// [GNU] attributes:
+/// attribute
+/// attributes attribute
+///
+/// [GNU] attribute:
+/// '__attribute__' '(' '(' attribute-list ')' ')'
+///
+/// [GNU] attribute-list:
+/// attrib
+/// attribute_list ',' attrib
+///
+/// [GNU] attrib:
+/// empty
+/// attrib-name
+/// attrib-name '(' identifier ')'
+/// attrib-name '(' identifier ',' nonempty-expr-list ')'
+/// attrib-name '(' argument-expression-list [C99 6.5.2] ')'
+///
+/// [GNU] attrib-name:
+/// identifier
+/// typespec
+/// typequal
+/// storageclass
+///
+/// Whether an attribute takes an 'identifier' is determined by the
+/// attrib-name. GCC's behavior here is not worth imitating:
+///
+/// * In C mode, if the attribute argument list starts with an identifier
+/// followed by a ',' or an ')', and the identifier doesn't resolve to
+/// a type, it is parsed as an identifier. If the attribute actually
+/// wanted an expression, it's out of luck (but it turns out that no
+/// attributes work that way, because C constant expressions are very
+/// limited).
+/// * In C++ mode, if the attribute argument list starts with an identifier,
+/// and the attribute *wants* an identifier, it is parsed as an identifier.
+/// At block scope, any additional tokens between the identifier and the
+/// ',' or ')' are ignored, otherwise they produce a parse error.
+///
+/// We follow the C++ model, but don't allow junk after the identifier.
+void Parser::ParseGNUAttributes(ParsedAttributes &attrs,
+ SourceLocation *endLoc,
+ LateParsedAttrList *LateAttrs,
+ Declarator *D) {
+ assert(Tok.is(tok::kw___attribute) && "Not a GNU attribute list!");
+
+ while (Tok.is(tok::kw___attribute)) {
+ ConsumeToken();
+ if (ExpectAndConsume(tok::l_paren, diag::err_expected_lparen_after,
+ "attribute")) {
+ SkipUntil(tok::r_paren, StopAtSemi); // skip until ) or ;
+ return;
+ }
+ if (ExpectAndConsume(tok::l_paren, diag::err_expected_lparen_after, "(")) {
+ SkipUntil(tok::r_paren, StopAtSemi); // skip until ) or ;
+ return;
+ }
+ // Parse the attribute-list. e.g. __attribute__(( weak, alias("__f") ))
+ while (true) {
+ // Allow empty/non-empty attributes. ((__vector_size__(16),,,,))
+ if (TryConsumeToken(tok::comma))
+ continue;
+
+ // Expect an identifier or declaration specifier (const, int, etc.)
+ if (Tok.isAnnotation())
+ break;
+ IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ if (!AttrName)
+ break;
+
+ SourceLocation AttrNameLoc = ConsumeToken();
+
+ if (Tok.isNot(tok::l_paren)) {
+ attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
+ AttributeList::AS_GNU);
+ continue;
+ }
+
+ // Handle "parameterized" attributes
+ if (!LateAttrs || !isAttributeLateParsed(*AttrName)) {
+ ParseGNUAttributeArgs(AttrName, AttrNameLoc, attrs, endLoc, nullptr,
+ SourceLocation(), AttributeList::AS_GNU, D);
+ continue;
+ }
+
+ // Handle attributes with arguments that require late parsing.
+ LateParsedAttribute *LA =
+ new LateParsedAttribute(this, *AttrName, AttrNameLoc);
+ LateAttrs->push_back(LA);
+
+ // Attributes in a class are parsed at the end of the class, along
+ // with other late-parsed declarations.
+ if (!ClassStack.empty() && !LateAttrs->parseSoon())
+ getCurrentClass().LateParsedDeclarations.push_back(LA);
+
+ // consume everything up to and including the matching right parens
+ ConsumeAndStoreUntil(tok::r_paren, LA->Toks, true, false);
+
+ Token Eof;
+ Eof.startToken();
+ Eof.setLocation(Tok.getLocation());
+ LA->Toks.push_back(Eof);
+ }
+
+ if (ExpectAndConsume(tok::r_paren))
+ SkipUntil(tok::r_paren, StopAtSemi);
+ SourceLocation Loc = Tok.getLocation();
+ if (ExpectAndConsume(tok::r_paren))
+ SkipUntil(tok::r_paren, StopAtSemi);
+ if (endLoc)
+ *endLoc = Loc;
+ }
+}
+
+/// \brief Normalizes an attribute name by dropping prefixed and suffixed __.
+static StringRef normalizeAttrName(StringRef Name) {
+ if (Name.size() >= 4 && Name.startswith("__") && Name.endswith("__"))
+ Name = Name.drop_front(2).drop_back(2);
+ return Name;
+}
+
+/// \brief Determine whether the given attribute has an identifier argument.
+static bool attributeHasIdentifierArg(const IdentifierInfo &II) {
+#define CLANG_ATTR_IDENTIFIER_ARG_LIST
+ return llvm::StringSwitch<bool>(normalizeAttrName(II.getName()))
+#include "clang/Parse/AttrParserStringSwitches.inc"
+ .Default(false);
+#undef CLANG_ATTR_IDENTIFIER_ARG_LIST
+}
+
+/// \brief Determine whether the given attribute parses a type argument.
+static bool attributeIsTypeArgAttr(const IdentifierInfo &II) {
+#define CLANG_ATTR_TYPE_ARG_LIST
+ return llvm::StringSwitch<bool>(normalizeAttrName(II.getName()))
+#include "clang/Parse/AttrParserStringSwitches.inc"
+ .Default(false);
+#undef CLANG_ATTR_TYPE_ARG_LIST
+}
+
+/// \brief Determine whether the given attribute requires parsing its arguments
+/// in an unevaluated context or not.
+static bool attributeParsedArgsUnevaluated(const IdentifierInfo &II) {
+#define CLANG_ATTR_ARG_CONTEXT_LIST
+ return llvm::StringSwitch<bool>(normalizeAttrName(II.getName()))
+#include "clang/Parse/AttrParserStringSwitches.inc"
+ .Default(false);
+#undef CLANG_ATTR_ARG_CONTEXT_LIST
+}
+
+IdentifierLoc *Parser::ParseIdentifierLoc() {
+ assert(Tok.is(tok::identifier) && "expected an identifier");
+ IdentifierLoc *IL = IdentifierLoc::create(Actions.Context,
+ Tok.getLocation(),
+ Tok.getIdentifierInfo());
+ ConsumeToken();
+ return IL;
+}
+
+void Parser::ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
+ SourceLocation AttrNameLoc,
+ ParsedAttributes &Attrs,
+ SourceLocation *EndLoc,
+ IdentifierInfo *ScopeName,
+ SourceLocation ScopeLoc,
+ AttributeList::Syntax Syntax) {
+ BalancedDelimiterTracker Parens(*this, tok::l_paren);
+ Parens.consumeOpen();
+
+ TypeResult T;
+ if (Tok.isNot(tok::r_paren))
+ T = ParseTypeName();
+
+ if (Parens.consumeClose())
+ return;
+
+ if (T.isInvalid())
+ return;
+
+ if (T.isUsable())
+ Attrs.addNewTypeAttr(&AttrName,
+ SourceRange(AttrNameLoc, Parens.getCloseLocation()),
+ ScopeName, ScopeLoc, T.get(), Syntax);
+ else
+ Attrs.addNew(&AttrName, SourceRange(AttrNameLoc, Parens.getCloseLocation()),
+ ScopeName, ScopeLoc, nullptr, 0, Syntax);
+}
+
+unsigned Parser::ParseAttributeArgsCommon(
+ IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
+ ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName,
+ SourceLocation ScopeLoc, AttributeList::Syntax Syntax) {
+ // Ignore the left paren location for now.
+ ConsumeParen();
+
+ ArgsVector ArgExprs;
+ if (Tok.is(tok::identifier)) {
+ // If this attribute wants an 'identifier' argument, make it so.
+ bool IsIdentifierArg = attributeHasIdentifierArg(*AttrName);
+ AttributeList::Kind AttrKind =
+ AttributeList::getKind(AttrName, ScopeName, Syntax);
+
+ // If we don't know how to parse this attribute, but this is the only
+ // token in this argument, assume it's meant to be an identifier.
+ if (AttrKind == AttributeList::UnknownAttribute ||
+ AttrKind == AttributeList::IgnoredAttribute) {
+ const Token &Next = NextToken();
+ IsIdentifierArg = Next.isOneOf(tok::r_paren, tok::comma);
+ }
+
+ if (IsIdentifierArg)
+ ArgExprs.push_back(ParseIdentifierLoc());
+ }
+
+ if (!ArgExprs.empty() ? Tok.is(tok::comma) : Tok.isNot(tok::r_paren)) {
+ // Eat the comma.
+ if (!ArgExprs.empty())
+ ConsumeToken();
+
+ // Parse the non-empty comma-separated list of expressions.
+ do {
+ std::unique_ptr<EnterExpressionEvaluationContext> Unevaluated;
+ if (attributeParsedArgsUnevaluated(*AttrName))
+ Unevaluated.reset(
+ new EnterExpressionEvaluationContext(Actions, Sema::Unevaluated));
+
+ ExprResult ArgExpr(
+ Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression()));
+ if (ArgExpr.isInvalid()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return 0;
+ }
+ ArgExprs.push_back(ArgExpr.get());
+ // Eat the comma, move to the next argument
+ } while (TryConsumeToken(tok::comma));
+ }
+
+ SourceLocation RParen = Tok.getLocation();
+ if (!ExpectAndConsume(tok::r_paren)) {
+ SourceLocation AttrLoc = ScopeLoc.isValid() ? ScopeLoc : AttrNameLoc;
+ Attrs.addNew(AttrName, SourceRange(AttrLoc, RParen), ScopeName, ScopeLoc,
+ ArgExprs.data(), ArgExprs.size(), Syntax);
+ }
+
+ if (EndLoc)
+ *EndLoc = RParen;
+
+ return static_cast<unsigned>(ArgExprs.size());
+}
+
+/// Parse the arguments to a parameterized GNU attribute or
+/// a C++11 attribute in "gnu" namespace.
+void Parser::ParseGNUAttributeArgs(IdentifierInfo *AttrName,
+ SourceLocation AttrNameLoc,
+ ParsedAttributes &Attrs,
+ SourceLocation *EndLoc,
+ IdentifierInfo *ScopeName,
+ SourceLocation ScopeLoc,
+ AttributeList::Syntax Syntax,
+ Declarator *D) {
+
+ assert(Tok.is(tok::l_paren) && "Attribute arg list not starting with '('");
+
+ AttributeList::Kind AttrKind =
+ AttributeList::getKind(AttrName, ScopeName, Syntax);
+
+ if (AttrKind == AttributeList::AT_Availability) {
+ ParseAvailabilityAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
+ ScopeLoc, Syntax);
+ return;
+ } else if (AttrKind == AttributeList::AT_ObjCBridgeRelated) {
+ ParseObjCBridgeRelatedAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc,
+ ScopeName, ScopeLoc, Syntax);
+ return;
+ } else if (AttrKind == AttributeList::AT_TypeTagForDatatype) {
+ ParseTypeTagForDatatypeAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc,
+ ScopeName, ScopeLoc, Syntax);
+ return;
+ } else if (attributeIsTypeArgAttr(*AttrName)) {
+ ParseAttributeWithTypeArg(*AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
+ ScopeLoc, Syntax);
+ return;
+ }
+
+ // These may refer to the function arguments, but need to be parsed early to
+ // participate in determining whether it's a redeclaration.
+ std::unique_ptr<ParseScope> PrototypeScope;
+ if (normalizeAttrName(AttrName->getName()) == "enable_if" &&
+ D && D->isFunctionDeclarator()) {
+ DeclaratorChunk::FunctionTypeInfo FTI = D->getFunctionTypeInfo();
+ PrototypeScope.reset(new ParseScope(this, Scope::FunctionPrototypeScope |
+ Scope::FunctionDeclarationScope |
+ Scope::DeclScope));
+ for (unsigned i = 0; i != FTI.NumParams; ++i) {
+ ParmVarDecl *Param = cast<ParmVarDecl>(FTI.Params[i].Param);
+ Actions.ActOnReenterCXXMethodParameter(getCurScope(), Param);
+ }
+ }
+
+ ParseAttributeArgsCommon(AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
+ ScopeLoc, Syntax);
+}
+
+bool Parser::ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
+ SourceLocation AttrNameLoc,
+ ParsedAttributes &Attrs) {
+ // If the attribute isn't known, we will not attempt to parse any
+ // arguments.
+ if (!hasAttribute(AttrSyntax::Declspec, nullptr, AttrName,
+ getTargetInfo(), getLangOpts())) {
+ // Eat the left paren, then skip to the ending right paren.
+ ConsumeParen();
+ SkipUntil(tok::r_paren);
+ return false;
+ }
+
+ SourceLocation OpenParenLoc = Tok.getLocation();
+
+ if (AttrName->getName() == "property") {
+ // The property declspec is more complex in that it can take one or two
+ // assignment expressions as a parameter, but the lhs of the assignment
+ // must be named get or put.
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.expectAndConsume(diag::err_expected_lparen_after,
+ AttrName->getNameStart(), tok::r_paren);
+
+ enum AccessorKind {
+ AK_Invalid = -1,
+ AK_Put = 0,
+ AK_Get = 1 // indices into AccessorNames
+ };
+ IdentifierInfo *AccessorNames[] = {nullptr, nullptr};
+ bool HasInvalidAccessor = false;
+
+ // Parse the accessor specifications.
+ while (true) {
+ // Stop if this doesn't look like an accessor spec.
+ if (!Tok.is(tok::identifier)) {
+ // If the user wrote a completely empty list, use a special diagnostic.
+ if (Tok.is(tok::r_paren) && !HasInvalidAccessor &&
+ AccessorNames[AK_Put] == nullptr &&
+ AccessorNames[AK_Get] == nullptr) {
+ Diag(AttrNameLoc, diag::err_ms_property_no_getter_or_putter);
+ break;
+ }
+
+ Diag(Tok.getLocation(), diag::err_ms_property_unknown_accessor);
+ break;
+ }
+
+ AccessorKind Kind;
+ SourceLocation KindLoc = Tok.getLocation();
+ StringRef KindStr = Tok.getIdentifierInfo()->getName();
+ if (KindStr == "get") {
+ Kind = AK_Get;
+ } else if (KindStr == "put") {
+ Kind = AK_Put;
+
+ // Recover from the common mistake of using 'set' instead of 'put'.
+ } else if (KindStr == "set") {
+ Diag(KindLoc, diag::err_ms_property_has_set_accessor)
+ << FixItHint::CreateReplacement(KindLoc, "put");
+ Kind = AK_Put;
+
+ // Handle the mistake of forgetting the accessor kind by skipping
+ // this accessor.
+ } else if (NextToken().is(tok::comma) || NextToken().is(tok::r_paren)) {
+ Diag(KindLoc, diag::err_ms_property_missing_accessor_kind);
+ ConsumeToken();
+ HasInvalidAccessor = true;
+ goto next_property_accessor;
+
+ // Otherwise, complain about the unknown accessor kind.
+ } else {
+ Diag(KindLoc, diag::err_ms_property_unknown_accessor);
+ HasInvalidAccessor = true;
+ Kind = AK_Invalid;
+
+ // Try to keep parsing unless it doesn't look like an accessor spec.
+ if (!NextToken().is(tok::equal))
+ break;
+ }
+
+ // Consume the identifier.
+ ConsumeToken();
+
+ // Consume the '='.
+ if (!TryConsumeToken(tok::equal)) {
+ Diag(Tok.getLocation(), diag::err_ms_property_expected_equal)
+ << KindStr;
+ break;
+ }
+
+ // Expect the method name.
+ if (!Tok.is(tok::identifier)) {
+ Diag(Tok.getLocation(), diag::err_ms_property_expected_accessor_name);
+ break;
+ }
+
+ if (Kind == AK_Invalid) {
+ // Just drop invalid accessors.
+ } else if (AccessorNames[Kind] != nullptr) {
+ // Complain about the repeated accessor, ignore it, and keep parsing.
+ Diag(KindLoc, diag::err_ms_property_duplicate_accessor) << KindStr;
+ } else {
+ AccessorNames[Kind] = Tok.getIdentifierInfo();
+ }
+ ConsumeToken();
+
+ next_property_accessor:
+ // Keep processing accessors until we run out.
+ if (TryConsumeToken(tok::comma))
+ continue;
+
+ // If we run into the ')', stop without consuming it.
+ if (Tok.is(tok::r_paren))
+ break;
+
+ Diag(Tok.getLocation(), diag::err_ms_property_expected_comma_or_rparen);
+ break;
+ }
+
+ // Only add the property attribute if it was well-formed.
+ if (!HasInvalidAccessor)
+ Attrs.addNewPropertyAttr(AttrName, AttrNameLoc, nullptr, SourceLocation(),
+ AccessorNames[AK_Get], AccessorNames[AK_Put],
+ AttributeList::AS_Declspec);
+ T.skipToEnd();
+ return !HasInvalidAccessor;
+ }
+
+ unsigned NumArgs =
+ ParseAttributeArgsCommon(AttrName, AttrNameLoc, Attrs, nullptr, nullptr,
+ SourceLocation(), AttributeList::AS_Declspec);
+
+ // If this attribute's args were parsed, and it was expected to have
+ // arguments but none were provided, emit a diagnostic.
+ const AttributeList *Attr = Attrs.getList();
+ if (Attr && Attr->getMaxArgs() && !NumArgs) {
+ Diag(OpenParenLoc, diag::err_attribute_requires_arguments) << AttrName;
+ return false;
+ }
+ return true;
+}
+
+/// [MS] decl-specifier:
+/// __declspec ( extended-decl-modifier-seq )
+///
+/// [MS] extended-decl-modifier-seq:
+/// extended-decl-modifier[opt]
+/// extended-decl-modifier extended-decl-modifier-seq
+void Parser::ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
+ SourceLocation *End) {
+ assert(getLangOpts().DeclSpecKeyword && "__declspec keyword is not enabled");
+ assert(Tok.is(tok::kw___declspec) && "Not a declspec!");
+
+ while (Tok.is(tok::kw___declspec)) {
+ ConsumeToken();
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.expectAndConsume(diag::err_expected_lparen_after, "__declspec",
+ tok::r_paren))
+ return;
+
+ // An empty declspec is perfectly legal and should not warn. Additionally,
+ // you can specify multiple attributes per declspec.
+ while (Tok.isNot(tok::r_paren)) {
+ // Attribute not present.
+ if (TryConsumeToken(tok::comma))
+ continue;
+
+ // We expect either a well-known identifier or a generic string. Anything
+ // else is a malformed declspec.
+ bool IsString = Tok.getKind() == tok::string_literal;
+ if (!IsString && Tok.getKind() != tok::identifier &&
+ Tok.getKind() != tok::kw_restrict) {
+ Diag(Tok, diag::err_ms_declspec_type);
+ T.skipToEnd();
+ return;
+ }
+
+ IdentifierInfo *AttrName;
+ SourceLocation AttrNameLoc;
+ if (IsString) {
+ SmallString<8> StrBuffer;
+ bool Invalid = false;
+ StringRef Str = PP.getSpelling(Tok, StrBuffer, &Invalid);
+ if (Invalid) {
+ T.skipToEnd();
+ return;
+ }
+ AttrName = PP.getIdentifierInfo(Str);
+ AttrNameLoc = ConsumeStringToken();
+ } else {
+ AttrName = Tok.getIdentifierInfo();
+ AttrNameLoc = ConsumeToken();
+ }
+
+ bool AttrHandled = false;
+
+ // Parse attribute arguments.
+ if (Tok.is(tok::l_paren))
+ AttrHandled = ParseMicrosoftDeclSpecArgs(AttrName, AttrNameLoc, Attrs);
+ else if (AttrName->getName() == "property")
+ // The property attribute must have an argument list.
+ Diag(Tok.getLocation(), diag::err_expected_lparen_after)
+ << AttrName->getName();
+
+ if (!AttrHandled)
+ Attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
+ AttributeList::AS_Declspec);
+ }
+ T.consumeClose();
+ if (End)
+ *End = T.getCloseLocation();
+ }
+}
+
+void Parser::ParseMicrosoftTypeAttributes(ParsedAttributes &attrs) {
+ // Treat these like attributes
+ while (true) {
+ switch (Tok.getKind()) {
+ case tok::kw___fastcall:
+ case tok::kw___stdcall:
+ case tok::kw___thiscall:
+ case tok::kw___cdecl:
+ case tok::kw___vectorcall:
+ case tok::kw___ptr64:
+ case tok::kw___w64:
+ case tok::kw___ptr32:
+ case tok::kw___unaligned:
+ case tok::kw___sptr:
+ case tok::kw___uptr: {
+ IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ SourceLocation AttrNameLoc = ConsumeToken();
+ attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
+ AttributeList::AS_Keyword);
+ break;
+ }
+ default:
+ return;
+ }
+ }
+}
+
+void Parser::DiagnoseAndSkipExtendedMicrosoftTypeAttributes() {
+ SourceLocation StartLoc = Tok.getLocation();
+ SourceLocation EndLoc = SkipExtendedMicrosoftTypeAttributes();
+
+ if (EndLoc.isValid()) {
+ SourceRange Range(StartLoc, EndLoc);
+ Diag(StartLoc, diag::warn_microsoft_qualifiers_ignored) << Range;
+ }
+}
+
+SourceLocation Parser::SkipExtendedMicrosoftTypeAttributes() {
+ SourceLocation EndLoc;
+
+ while (true) {
+ switch (Tok.getKind()) {
+ case tok::kw_const:
+ case tok::kw_volatile:
+ case tok::kw___fastcall:
+ case tok::kw___stdcall:
+ case tok::kw___thiscall:
+ case tok::kw___cdecl:
+ case tok::kw___vectorcall:
+ case tok::kw___ptr32:
+ case tok::kw___ptr64:
+ case tok::kw___w64:
+ case tok::kw___unaligned:
+ case tok::kw___sptr:
+ case tok::kw___uptr:
+ EndLoc = ConsumeToken();
+ break;
+ default:
+ return EndLoc;
+ }
+ }
+}
+
+void Parser::ParseBorlandTypeAttributes(ParsedAttributes &attrs) {
+ // Treat these like attributes
+ while (Tok.is(tok::kw___pascal)) {
+ IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ SourceLocation AttrNameLoc = ConsumeToken();
+ attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
+ AttributeList::AS_Keyword);
+ }
+}
+
+void Parser::ParseOpenCLAttributes(ParsedAttributes &attrs) {
+ // Treat these like attributes
+ while (Tok.is(tok::kw___kernel)) {
+ IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ SourceLocation AttrNameLoc = ConsumeToken();
+ attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
+ AttributeList::AS_Keyword);
+ }
+}
+
+void Parser::ParseOpenCLQualifiers(ParsedAttributes &Attrs) {
+ IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ SourceLocation AttrNameLoc = Tok.getLocation();
+ Attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
+ AttributeList::AS_Keyword);
+}
+
+void Parser::ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs) {
+ // Treat these like attributes, even though they're type specifiers.
+ while (true) {
+ switch (Tok.getKind()) {
+ case tok::kw__Nonnull:
+ case tok::kw__Nullable:
+ case tok::kw__Null_unspecified: {
+ IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ SourceLocation AttrNameLoc = ConsumeToken();
+ if (!getLangOpts().ObjC1)
+ Diag(AttrNameLoc, diag::ext_nullability)
+ << AttrName;
+ attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
+ AttributeList::AS_Keyword);
+ break;
+ }
+ default:
+ return;
+ }
+ }
+}
+
+static bool VersionNumberSeparator(const char Separator) {
+ return (Separator == '.' || Separator == '_');
+}
+
+/// \brief Parse a version number.
+///
+/// version:
+/// simple-integer
+/// simple-integer ',' simple-integer
+/// simple-integer ',' simple-integer ',' simple-integer
+VersionTuple Parser::ParseVersionTuple(SourceRange &Range) {
+ Range = Tok.getLocation();
+
+ if (!Tok.is(tok::numeric_constant)) {
+ Diag(Tok, diag::err_expected_version);
+ SkipUntil(tok::comma, tok::r_paren,
+ StopAtSemi | StopBeforeMatch | StopAtCodeCompletion);
+ return VersionTuple();
+ }
+
+ // Parse the major (and possibly minor and subminor) versions, which
+ // are stored in the numeric constant. We utilize a quirk of the
+ // lexer, which is that it handles something like 1.2.3 as a single
+ // numeric constant, rather than two separate tokens.
+ SmallString<512> Buffer;
+ Buffer.resize(Tok.getLength()+1);
+ const char *ThisTokBegin = &Buffer[0];
+
+ // Get the spelling of the token, which eliminates trigraphs, etc.
+ bool Invalid = false;
+ unsigned ActualLength = PP.getSpelling(Tok, ThisTokBegin, &Invalid);
+ if (Invalid)
+ return VersionTuple();
+
+ // Parse the major version.
+ unsigned AfterMajor = 0;
+ unsigned Major = 0;
+ while (AfterMajor < ActualLength && isDigit(ThisTokBegin[AfterMajor])) {
+ Major = Major * 10 + ThisTokBegin[AfterMajor] - '0';
+ ++AfterMajor;
+ }
+
+ if (AfterMajor == 0) {
+ Diag(Tok, diag::err_expected_version);
+ SkipUntil(tok::comma, tok::r_paren,
+ StopAtSemi | StopBeforeMatch | StopAtCodeCompletion);
+ return VersionTuple();
+ }
+
+ if (AfterMajor == ActualLength) {
+ ConsumeToken();
+
+ // We only had a single version component.
+ if (Major == 0) {
+ Diag(Tok, diag::err_zero_version);
+ return VersionTuple();
+ }
+
+ return VersionTuple(Major);
+ }
+
+ const char AfterMajorSeparator = ThisTokBegin[AfterMajor];
+ if (!VersionNumberSeparator(AfterMajorSeparator)
+ || (AfterMajor + 1 == ActualLength)) {
+ Diag(Tok, diag::err_expected_version);
+ SkipUntil(tok::comma, tok::r_paren,
+ StopAtSemi | StopBeforeMatch | StopAtCodeCompletion);
+ return VersionTuple();
+ }
+
+ // Parse the minor version.
+ unsigned AfterMinor = AfterMajor + 1;
+ unsigned Minor = 0;
+ while (AfterMinor < ActualLength && isDigit(ThisTokBegin[AfterMinor])) {
+ Minor = Minor * 10 + ThisTokBegin[AfterMinor] - '0';
+ ++AfterMinor;
+ }
+
+ if (AfterMinor == ActualLength) {
+ ConsumeToken();
+
+ // We had major.minor.
+ if (Major == 0 && Minor == 0) {
+ Diag(Tok, diag::err_zero_version);
+ return VersionTuple();
+ }
+
+ return VersionTuple(Major, Minor, (AfterMajorSeparator == '_'));
+ }
+
+ const char AfterMinorSeparator = ThisTokBegin[AfterMinor];
+ // If what follows is not a '.' or '_', we have a problem.
+ if (!VersionNumberSeparator(AfterMinorSeparator)) {
+ Diag(Tok, diag::err_expected_version);
+ SkipUntil(tok::comma, tok::r_paren,
+ StopAtSemi | StopBeforeMatch | StopAtCodeCompletion);
+ return VersionTuple();
+ }
+
+ // Warn if separators, be it '.' or '_', do not match.
+ if (AfterMajorSeparator != AfterMinorSeparator)
+ Diag(Tok, diag::warn_expected_consistent_version_separator);
+
+ // Parse the subminor version.
+ unsigned AfterSubminor = AfterMinor + 1;
+ unsigned Subminor = 0;
+ while (AfterSubminor < ActualLength && isDigit(ThisTokBegin[AfterSubminor])) {
+ Subminor = Subminor * 10 + ThisTokBegin[AfterSubminor] - '0';
+ ++AfterSubminor;
+ }
+
+ if (AfterSubminor != ActualLength) {
+ Diag(Tok, diag::err_expected_version);
+ SkipUntil(tok::comma, tok::r_paren,
+ StopAtSemi | StopBeforeMatch | StopAtCodeCompletion);
+ return VersionTuple();
+ }
+ ConsumeToken();
+ return VersionTuple(Major, Minor, Subminor, (AfterMajorSeparator == '_'));
+}
+
+/// \brief Parse the contents of the "availability" attribute.
+///
+/// availability-attribute:
+/// 'availability' '(' platform ',' version-arg-list, opt-message')'
+///
+/// platform:
+/// identifier
+///
+/// version-arg-list:
+/// version-arg
+/// version-arg ',' version-arg-list
+///
+/// version-arg:
+/// 'introduced' '=' version
+/// 'deprecated' '=' version
+/// 'obsoleted' = version
+/// 'unavailable'
+/// opt-message:
+/// 'message' '=' <string>
+void Parser::ParseAvailabilityAttribute(IdentifierInfo &Availability,
+ SourceLocation AvailabilityLoc,
+ ParsedAttributes &attrs,
+ SourceLocation *endLoc,
+ IdentifierInfo *ScopeName,
+ SourceLocation ScopeLoc,
+ AttributeList::Syntax Syntax) {
+ enum { Introduced, Deprecated, Obsoleted, Unknown };
+ AvailabilityChange Changes[Unknown];
+ ExprResult MessageExpr;
+
+ // Opening '('.
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.consumeOpen()) {
+ Diag(Tok, diag::err_expected) << tok::l_paren;
+ return;
+ }
+
+ // Parse the platform name,
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_availability_expected_platform);
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return;
+ }
+ IdentifierLoc *Platform = ParseIdentifierLoc();
+
+ // Parse the ',' following the platform name.
+ if (ExpectAndConsume(tok::comma)) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return;
+ }
+
+ // If we haven't grabbed the pointers for the identifiers
+ // "introduced", "deprecated", and "obsoleted", do so now.
+ if (!Ident_introduced) {
+ Ident_introduced = PP.getIdentifierInfo("introduced");
+ Ident_deprecated = PP.getIdentifierInfo("deprecated");
+ Ident_obsoleted = PP.getIdentifierInfo("obsoleted");
+ Ident_unavailable = PP.getIdentifierInfo("unavailable");
+ Ident_message = PP.getIdentifierInfo("message");
+ }
+
+ // Parse the set of introductions/deprecations/removals.
+ SourceLocation UnavailableLoc;
+ do {
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_availability_expected_change);
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return;
+ }
+ IdentifierInfo *Keyword = Tok.getIdentifierInfo();
+ SourceLocation KeywordLoc = ConsumeToken();
+
+ if (Keyword == Ident_unavailable) {
+ if (UnavailableLoc.isValid()) {
+ Diag(KeywordLoc, diag::err_availability_redundant)
+ << Keyword << SourceRange(UnavailableLoc);
+ }
+ UnavailableLoc = KeywordLoc;
+ continue;
+ }
+
+ if (Tok.isNot(tok::equal)) {
+ Diag(Tok, diag::err_expected_after) << Keyword << tok::equal;
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return;
+ }
+ ConsumeToken();
+ if (Keyword == Ident_message) {
+ if (Tok.isNot(tok::string_literal)) {
+ Diag(Tok, diag::err_expected_string_literal)
+ << /*Source='availability attribute'*/2;
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return;
+ }
+ MessageExpr = ParseStringLiteralExpression();
+ // Also reject wide string literals.
+ if (StringLiteral *MessageStringLiteral =
+ cast_or_null<StringLiteral>(MessageExpr.get())) {
+ if (MessageStringLiteral->getCharByteWidth() != 1) {
+ Diag(MessageStringLiteral->getSourceRange().getBegin(),
+ diag::err_expected_string_literal)
+ << /*Source='availability attribute'*/ 2;
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return;
+ }
+ }
+ break;
+ }
+
+ // Special handling of 'NA' only when applied to introduced or
+ // deprecated.
+ if ((Keyword == Ident_introduced || Keyword == Ident_deprecated) &&
+ Tok.is(tok::identifier)) {
+ IdentifierInfo *NA = Tok.getIdentifierInfo();
+ if (NA->getName() == "NA") {
+ ConsumeToken();
+ if (Keyword == Ident_introduced)
+ UnavailableLoc = KeywordLoc;
+ continue;
+ }
+ }
+
+ SourceRange VersionRange;
+ VersionTuple Version = ParseVersionTuple(VersionRange);
+
+ if (Version.empty()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return;
+ }
+
+ unsigned Index;
+ if (Keyword == Ident_introduced)
+ Index = Introduced;
+ else if (Keyword == Ident_deprecated)
+ Index = Deprecated;
+ else if (Keyword == Ident_obsoleted)
+ Index = Obsoleted;
+ else
+ Index = Unknown;
+
+ if (Index < Unknown) {
+ if (!Changes[Index].KeywordLoc.isInvalid()) {
+ Diag(KeywordLoc, diag::err_availability_redundant)
+ << Keyword
+ << SourceRange(Changes[Index].KeywordLoc,
+ Changes[Index].VersionRange.getEnd());
+ }
+
+ Changes[Index].KeywordLoc = KeywordLoc;
+ Changes[Index].Version = Version;
+ Changes[Index].VersionRange = VersionRange;
+ } else {
+ Diag(KeywordLoc, diag::err_availability_unknown_change)
+ << Keyword << VersionRange;
+ }
+
+ } while (TryConsumeToken(tok::comma));
+
+ // Closing ')'.
+ if (T.consumeClose())
+ return;
+
+ if (endLoc)
+ *endLoc = T.getCloseLocation();
+
+ // The 'unavailable' availability cannot be combined with any other
+ // availability changes. Make sure that hasn't happened.
+ if (UnavailableLoc.isValid()) {
+ bool Complained = false;
+ for (unsigned Index = Introduced; Index != Unknown; ++Index) {
+ if (Changes[Index].KeywordLoc.isValid()) {
+ if (!Complained) {
+ Diag(UnavailableLoc, diag::warn_availability_and_unavailable)
+ << SourceRange(Changes[Index].KeywordLoc,
+ Changes[Index].VersionRange.getEnd());
+ Complained = true;
+ }
+
+ // Clear out the availability.
+ Changes[Index] = AvailabilityChange();
+ }
+ }
+ }
+
+ // Record this attribute
+ attrs.addNew(&Availability,
+ SourceRange(AvailabilityLoc, T.getCloseLocation()),
+ ScopeName, ScopeLoc,
+ Platform,
+ Changes[Introduced],
+ Changes[Deprecated],
+ Changes[Obsoleted],
+ UnavailableLoc, MessageExpr.get(),
+ Syntax);
+}
+
+/// \brief Parse the contents of the "objc_bridge_related" attribute.
+/// objc_bridge_related '(' related_class ',' opt-class_method ',' opt-instance_method ')'
+/// related_class:
+/// Identifier
+///
+/// opt-class_method:
+/// Identifier: | <empty>
+///
+/// opt-instance_method:
+/// Identifier | <empty>
+///
+void Parser::ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
+ SourceLocation ObjCBridgeRelatedLoc,
+ ParsedAttributes &attrs,
+ SourceLocation *endLoc,
+ IdentifierInfo *ScopeName,
+ SourceLocation ScopeLoc,
+ AttributeList::Syntax Syntax) {
+ // Opening '('.
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.consumeOpen()) {
+ Diag(Tok, diag::err_expected) << tok::l_paren;
+ return;
+ }
+
+ // Parse the related class name.
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_objcbridge_related_expected_related_class);
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return;
+ }
+ IdentifierLoc *RelatedClass = ParseIdentifierLoc();
+ if (ExpectAndConsume(tok::comma)) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return;
+ }
+
+ // Parse optional class method name.
+ IdentifierLoc *ClassMethod = nullptr;
+ if (Tok.is(tok::identifier)) {
+ ClassMethod = ParseIdentifierLoc();
+ if (!TryConsumeToken(tok::colon)) {
+ Diag(Tok, diag::err_objcbridge_related_selector_name);
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return;
+ }
+ }
+ if (!TryConsumeToken(tok::comma)) {
+ if (Tok.is(tok::colon))
+ Diag(Tok, diag::err_objcbridge_related_selector_name);
+ else
+ Diag(Tok, diag::err_expected) << tok::comma;
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return;
+ }
+
+ // Parse optional instance method name.
+ IdentifierLoc *InstanceMethod = nullptr;
+ if (Tok.is(tok::identifier))
+ InstanceMethod = ParseIdentifierLoc();
+ else if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::err_expected) << tok::r_paren;
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return;
+ }
+
+ // Closing ')'.
+ if (T.consumeClose())
+ return;
+
+ if (endLoc)
+ *endLoc = T.getCloseLocation();
+
+ // Record this attribute
+ attrs.addNew(&ObjCBridgeRelated,
+ SourceRange(ObjCBridgeRelatedLoc, T.getCloseLocation()),
+ ScopeName, ScopeLoc,
+ RelatedClass,
+ ClassMethod,
+ InstanceMethod,
+ Syntax);
+}
+
+// Late Parsed Attributes:
+// See other examples of late parsing in lib/Parse/ParseCXXInlineMethods
+
+void Parser::LateParsedDeclaration::ParseLexedAttributes() {}
+
+void Parser::LateParsedClass::ParseLexedAttributes() {
+ Self->ParseLexedAttributes(*Class);
+}
+
+void Parser::LateParsedAttribute::ParseLexedAttributes() {
+ Self->ParseLexedAttribute(*this, true, false);
+}
+
+/// Wrapper class which calls ParseLexedAttribute, after setting up the
+/// scope appropriately.
+void Parser::ParseLexedAttributes(ParsingClass &Class) {
+ // Deal with templates
+ // FIXME: Test cases to make sure this does the right thing for templates.
+ bool HasTemplateScope = !Class.TopLevelClass && Class.TemplateScope;
+ ParseScope ClassTemplateScope(this, Scope::TemplateParamScope,
+ HasTemplateScope);
+ if (HasTemplateScope)
+ Actions.ActOnReenterTemplateScope(getCurScope(), Class.TagOrTemplate);
+
+ // Set or update the scope flags.
+ bool AlreadyHasClassScope = Class.TopLevelClass;
+ unsigned ScopeFlags = Scope::ClassScope|Scope::DeclScope;
+ ParseScope ClassScope(this, ScopeFlags, !AlreadyHasClassScope);
+ ParseScopeFlags ClassScopeFlags(this, ScopeFlags, AlreadyHasClassScope);
+
+ // Enter the scope of nested classes
+ if (!AlreadyHasClassScope)
+ Actions.ActOnStartDelayedMemberDeclarations(getCurScope(),
+ Class.TagOrTemplate);
+ if (!Class.LateParsedDeclarations.empty()) {
+ for (unsigned i = 0, ni = Class.LateParsedDeclarations.size(); i < ni; ++i){
+ Class.LateParsedDeclarations[i]->ParseLexedAttributes();
+ }
+ }
+
+ if (!AlreadyHasClassScope)
+ Actions.ActOnFinishDelayedMemberDeclarations(getCurScope(),
+ Class.TagOrTemplate);
+}
+
+/// \brief Parse all attributes in LAs, and attach them to Decl D.
+void Parser::ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
+ bool EnterScope, bool OnDefinition) {
+ assert(LAs.parseSoon() &&
+ "Attribute list should be marked for immediate parsing.");
+ for (unsigned i = 0, ni = LAs.size(); i < ni; ++i) {
+ if (D)
+ LAs[i]->addDecl(D);
+ ParseLexedAttribute(*LAs[i], EnterScope, OnDefinition);
+ delete LAs[i];
+ }
+ LAs.clear();
+}
+
+/// \brief Finish parsing an attribute for which parsing was delayed.
+/// This will be called at the end of parsing a class declaration
+/// for each LateParsedAttribute. We consume the saved tokens and
+/// create an attribute with the arguments filled in. We add this
+/// to the Attribute list for the decl.
+void Parser::ParseLexedAttribute(LateParsedAttribute &LA,
+ bool EnterScope, bool OnDefinition) {
+ // Create a fake EOF so that attribute parsing won't go off the end of the
+ // attribute.
+ Token AttrEnd;
+ AttrEnd.startToken();
+ AttrEnd.setKind(tok::eof);
+ AttrEnd.setLocation(Tok.getLocation());
+ AttrEnd.setEofData(LA.Toks.data());
+ LA.Toks.push_back(AttrEnd);
+
+ // Append the current token at the end of the new token stream so that it
+ // doesn't get lost.
+ LA.Toks.push_back(Tok);
+ PP.EnterTokenStream(LA.Toks.data(), LA.Toks.size(), true, false);
+ // Consume the previously pushed token.
+ ConsumeAnyToken(/*ConsumeCodeCompletionTok=*/true);
+
+ ParsedAttributes Attrs(AttrFactory);
+ SourceLocation endLoc;
+
+ if (LA.Decls.size() > 0) {
+ Decl *D = LA.Decls[0];
+ NamedDecl *ND = dyn_cast<NamedDecl>(D);
+ RecordDecl *RD = dyn_cast_or_null<RecordDecl>(D->getDeclContext());
+
+ // Allow 'this' within late-parsed attributes.
+ Sema::CXXThisScopeRAII ThisScope(Actions, RD, /*TypeQuals=*/0,
+ ND && ND->isCXXInstanceMember());
+
+ if (LA.Decls.size() == 1) {
+ // If the Decl is templatized, add template parameters to scope.
+ bool HasTemplateScope = EnterScope && D->isTemplateDecl();
+ ParseScope TempScope(this, Scope::TemplateParamScope, HasTemplateScope);
+ if (HasTemplateScope)
+ Actions.ActOnReenterTemplateScope(Actions.CurScope, D);
+
+ // If the Decl is on a function, add function parameters to the scope.
+ bool HasFunScope = EnterScope && D->isFunctionOrFunctionTemplate();
+ ParseScope FnScope(this, Scope::FnScope|Scope::DeclScope, HasFunScope);
+ if (HasFunScope)
+ Actions.ActOnReenterFunctionContext(Actions.CurScope, D);
+
+ ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, &endLoc,
+ nullptr, SourceLocation(), AttributeList::AS_GNU,
+ nullptr);
+
+ if (HasFunScope) {
+ Actions.ActOnExitFunctionContext();
+ FnScope.Exit(); // Pop scope, and remove Decls from IdResolver
+ }
+ if (HasTemplateScope) {
+ TempScope.Exit();
+ }
+ } else {
+ // If there are multiple decls, then the decl cannot be within the
+ // function scope.
+ ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, &endLoc,
+ nullptr, SourceLocation(), AttributeList::AS_GNU,
+ nullptr);
+ }
+ } else {
+ Diag(Tok, diag::warn_attribute_no_decl) << LA.AttrName.getName();
+ }
+
+ const AttributeList *AL = Attrs.getList();
+ if (OnDefinition && AL && !AL->isCXX11Attribute() &&
+ AL->isKnownToGCC())
+ Diag(Tok, diag::warn_attribute_on_function_definition)
+ << &LA.AttrName;
+
+ for (unsigned i = 0, ni = LA.Decls.size(); i < ni; ++i)
+ Actions.ActOnFinishDelayedAttribute(getCurScope(), LA.Decls[i], Attrs);
+
+ // Due to a parsing error, we either went over the cached tokens or
+ // there are still cached tokens left, so we skip the leftover tokens.
+ while (Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+
+ if (Tok.is(tok::eof) && Tok.getEofData() == AttrEnd.getEofData())
+ ConsumeAnyToken();
+}
+
+void Parser::ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
+ SourceLocation AttrNameLoc,
+ ParsedAttributes &Attrs,
+ SourceLocation *EndLoc,
+ IdentifierInfo *ScopeName,
+ SourceLocation ScopeLoc,
+ AttributeList::Syntax Syntax) {
+ assert(Tok.is(tok::l_paren) && "Attribute arg list not starting with '('");
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected) << tok::identifier;
+ T.skipToEnd();
+ return;
+ }
+ IdentifierLoc *ArgumentKind = ParseIdentifierLoc();
+
+ if (ExpectAndConsume(tok::comma)) {
+ T.skipToEnd();
+ return;
+ }
+
+ SourceRange MatchingCTypeRange;
+ TypeResult MatchingCType = ParseTypeName(&MatchingCTypeRange);
+ if (MatchingCType.isInvalid()) {
+ T.skipToEnd();
+ return;
+ }
+
+ bool LayoutCompatible = false;
+ bool MustBeNull = false;
+ while (TryConsumeToken(tok::comma)) {
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected) << tok::identifier;
+ T.skipToEnd();
+ return;
+ }
+ IdentifierInfo *Flag = Tok.getIdentifierInfo();
+ if (Flag->isStr("layout_compatible"))
+ LayoutCompatible = true;
+ else if (Flag->isStr("must_be_null"))
+ MustBeNull = true;
+ else {
+ Diag(Tok, diag::err_type_safety_unknown_flag) << Flag;
+ T.skipToEnd();
+ return;
+ }
+ ConsumeToken(); // consume flag
+ }
+
+ if (!T.consumeClose()) {
+ Attrs.addNewTypeTagForDatatype(&AttrName, AttrNameLoc, ScopeName, ScopeLoc,
+ ArgumentKind, MatchingCType.get(),
+ LayoutCompatible, MustBeNull, Syntax);
+ }
+
+ if (EndLoc)
+ *EndLoc = T.getCloseLocation();
+}
+
+/// DiagnoseProhibitedCXX11Attribute - We have found the opening square brackets
+/// of a C++11 attribute-specifier in a location where an attribute is not
+/// permitted. By C++11 [dcl.attr.grammar]p6, this is ill-formed. Diagnose this
+/// situation.
+///
+/// \return \c true if we skipped an attribute-like chunk of tokens, \c false if
+/// this doesn't appear to actually be an attribute-specifier, and the caller
+/// should try to parse it.
+bool Parser::DiagnoseProhibitedCXX11Attribute() {
+ assert(Tok.is(tok::l_square) && NextToken().is(tok::l_square));
+
+ switch (isCXX11AttributeSpecifier(/*Disambiguate*/true)) {
+ case CAK_NotAttributeSpecifier:
+ // No diagnostic: we're in Obj-C++11 and this is not actually an attribute.
+ return false;
+
+ case CAK_InvalidAttributeSpecifier:
+ Diag(Tok.getLocation(), diag::err_l_square_l_square_not_attribute);
+ return false;
+
+ case CAK_AttributeSpecifier:
+ // Parse and discard the attributes.
+ SourceLocation BeginLoc = ConsumeBracket();
+ ConsumeBracket();
+ SkipUntil(tok::r_square);
+ assert(Tok.is(tok::r_square) && "isCXX11AttributeSpecifier lied");
+ SourceLocation EndLoc = ConsumeBracket();
+ Diag(BeginLoc, diag::err_attributes_not_allowed)
+ << SourceRange(BeginLoc, EndLoc);
+ return true;
+ }
+ llvm_unreachable("All cases handled above.");
+}
+
+/// \brief We have found the opening square brackets of a C++11
+/// attribute-specifier in a location where an attribute is not permitted, but
+/// we know where the attributes ought to be written. Parse them anyway, and
+/// provide a fixit moving them to the right place.
+void Parser::DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
+ SourceLocation CorrectLocation) {
+ assert((Tok.is(tok::l_square) && NextToken().is(tok::l_square)) ||
+ Tok.is(tok::kw_alignas));
+
+ // Consume the attributes.
+ SourceLocation Loc = Tok.getLocation();
+ ParseCXX11Attributes(Attrs);
+ CharSourceRange AttrRange(SourceRange(Loc, Attrs.Range.getEnd()), true);
+
+ Diag(Loc, diag::err_attributes_not_allowed)
+ << FixItHint::CreateInsertionFromRange(CorrectLocation, AttrRange)
+ << FixItHint::CreateRemoval(AttrRange);
+}
+
+void Parser::DiagnoseProhibitedAttributes(ParsedAttributesWithRange &attrs) {
+ Diag(attrs.Range.getBegin(), diag::err_attributes_not_allowed)
+ << attrs.Range;
+}
+
+void Parser::ProhibitCXX11Attributes(ParsedAttributesWithRange &attrs) {
+ AttributeList *AttrList = attrs.getList();
+ while (AttrList) {
+ if (AttrList->isCXX11Attribute()) {
+ Diag(AttrList->getLoc(), diag::err_attribute_not_type_attr)
+ << AttrList->getName();
+ AttrList->setInvalid();
+ }
+ AttrList = AttrList->getNext();
+ }
+}
+
+// As an exception to the rule, __declspec(align(...)) before the
+// class-key affects the type instead of the variable.
+void Parser::handleDeclspecAlignBeforeClassKey(ParsedAttributesWithRange &Attrs,
+ DeclSpec &DS,
+ Sema::TagUseKind TUK) {
+ if (TUK == Sema::TUK_Reference)
+ return;
+
+ ParsedAttributes &PA = DS.getAttributes();
+ AttributeList *AL = PA.getList();
+ AttributeList *Prev = nullptr;
+ while (AL) {
+ AttributeList *Next = AL->getNext();
+
+ // We only consider attributes using the appropriate '__declspec' spelling,
+ // this behavior doesn't extend to any other spellings.
+ if (AL->getKind() == AttributeList::AT_Aligned &&
+ AL->isDeclspecAttribute()) {
+ // Stitch the attribute into the tag's attribute list.
+ AL->setNext(nullptr);
+ Attrs.add(AL);
+
+ // Remove the attribute from the variable's attribute list.
+ if (Prev) {
+ // Set the last variable attribute's next attribute to be the attribute
+ // after the current one.
+ Prev->setNext(Next);
+ } else {
+ // Removing the head of the list requires us to reset the head to the
+ // next attribute.
+ PA.set(Next);
+ }
+ } else {
+ Prev = AL;
+ }
+
+ AL = Next;
+ }
+}
+
+/// ParseDeclaration - Parse a full 'declaration', which consists of
+/// declaration-specifiers, some number of declarators, and a semicolon.
+/// 'Context' should be a Declarator::TheContext value. This returns the
+/// location of the semicolon in DeclEnd.
+///
+/// declaration: [C99 6.7]
+/// block-declaration ->
+/// simple-declaration
+/// others [FIXME]
+/// [C++] template-declaration
+/// [C++] namespace-definition
+/// [C++] using-directive
+/// [C++] using-declaration
+/// [C++11/C11] static_assert-declaration
+/// others... [FIXME]
+///
+Parser::DeclGroupPtrTy Parser::ParseDeclaration(unsigned Context,
+ SourceLocation &DeclEnd,
+ ParsedAttributesWithRange &attrs) {
+ ParenBraceBracketBalancer BalancerRAIIObj(*this);
+ // Must temporarily exit the objective-c container scope for
+ // parsing c none objective-c decls.
+ ObjCDeclContextSwitch ObjCDC(*this);
+
+ Decl *SingleDecl = nullptr;
+ Decl *OwnedType = nullptr;
+ switch (Tok.getKind()) {
+ case tok::kw_template:
+ case tok::kw_export:
+ ProhibitAttributes(attrs);
+ SingleDecl = ParseDeclarationStartingWithTemplate(Context, DeclEnd);
+ break;
+ case tok::kw_inline:
+ // Could be the start of an inline namespace. Allowed as an ext in C++03.
+ if (getLangOpts().CPlusPlus && NextToken().is(tok::kw_namespace)) {
+ ProhibitAttributes(attrs);
+ SourceLocation InlineLoc = ConsumeToken();
+ return ParseNamespace(Context, DeclEnd, InlineLoc);
+ }
+ return ParseSimpleDeclaration(Context, DeclEnd, attrs,
+ true);
+ case tok::kw_namespace:
+ ProhibitAttributes(attrs);
+ return ParseNamespace(Context, DeclEnd);
+ case tok::kw_using:
+ SingleDecl = ParseUsingDirectiveOrDeclaration(Context, ParsedTemplateInfo(),
+ DeclEnd, attrs, &OwnedType);
+ break;
+ case tok::kw_static_assert:
+ case tok::kw__Static_assert:
+ ProhibitAttributes(attrs);
+ SingleDecl = ParseStaticAssertDeclaration(DeclEnd);
+ break;
+ default:
+ return ParseSimpleDeclaration(Context, DeclEnd, attrs, true);
+ }
+
+ // This routine returns a DeclGroup, if the thing we parsed only contains a
+ // single decl, convert it now. Alias declarations can also declare a type;
+ // include that too if it is present.
+ return Actions.ConvertDeclToDeclGroup(SingleDecl, OwnedType);
+}
+
+/// simple-declaration: [C99 6.7: declaration] [C++ 7p1: dcl.dcl]
+/// declaration-specifiers init-declarator-list[opt] ';'
+/// [C++11] attribute-specifier-seq decl-specifier-seq[opt]
+/// init-declarator-list ';'
+///[C90/C++]init-declarator-list ';' [TODO]
+/// [OMP] threadprivate-directive [TODO]
+///
+/// for-range-declaration: [C++11 6.5p1: stmt.ranged]
+/// attribute-specifier-seq[opt] type-specifier-seq declarator
+///
+/// If RequireSemi is false, this does not check for a ';' at the end of the
+/// declaration. If it is true, it checks for and eats it.
+///
+/// If FRI is non-null, we might be parsing a for-range-declaration instead
+/// of a simple-declaration. If we find that we are, we also parse the
+/// for-range-initializer, and place it here.
+Parser::DeclGroupPtrTy
+Parser::ParseSimpleDeclaration(unsigned Context,
+ SourceLocation &DeclEnd,
+ ParsedAttributesWithRange &Attrs,
+ bool RequireSemi, ForRangeInit *FRI) {
+ // Parse the common declaration-specifiers piece.
+ ParsingDeclSpec DS(*this);
+
+ DeclSpecContext DSContext = getDeclSpecContextFromDeclaratorContext(Context);
+ ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS_none, DSContext);
+
+ // If we had a free-standing type definition with a missing semicolon, we
+ // may get this far before the problem becomes obvious.
+ if (DS.hasTagDefinition() &&
+ DiagnoseMissingSemiAfterTagDefinition(DS, AS_none, DSContext))
+ return DeclGroupPtrTy();
+
+ // C99 6.7.2.3p6: Handle "struct-or-union identifier;", "enum { X };"
+ // declaration-specifiers init-declarator-list[opt] ';'
+ if (Tok.is(tok::semi)) {
+ ProhibitAttributes(Attrs);
+ DeclEnd = Tok.getLocation();
+ if (RequireSemi) ConsumeToken();
+ Decl *TheDecl = Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS_none,
+ DS);
+ DS.complete(TheDecl);
+ return Actions.ConvertDeclToDeclGroup(TheDecl);
+ }
+
+ DS.takeAttributesFrom(Attrs);
+ return ParseDeclGroup(DS, Context, &DeclEnd, FRI);
+}
+
+/// Returns true if this might be the start of a declarator, or a common typo
+/// for a declarator.
+bool Parser::MightBeDeclarator(unsigned Context) {
+ switch (Tok.getKind()) {
+ case tok::annot_cxxscope:
+ case tok::annot_template_id:
+ case tok::caret:
+ case tok::code_completion:
+ case tok::coloncolon:
+ case tok::ellipsis:
+ case tok::kw___attribute:
+ case tok::kw_operator:
+ case tok::l_paren:
+ case tok::star:
+ return true;
+
+ case tok::amp:
+ case tok::ampamp:
+ return getLangOpts().CPlusPlus;
+
+ case tok::l_square: // Might be an attribute on an unnamed bit-field.
+ return Context == Declarator::MemberContext && getLangOpts().CPlusPlus11 &&
+ NextToken().is(tok::l_square);
+
+ case tok::colon: // Might be a typo for '::' or an unnamed bit-field.
+ return Context == Declarator::MemberContext || getLangOpts().CPlusPlus;
+
+ case tok::identifier:
+ switch (NextToken().getKind()) {
+ case tok::code_completion:
+ case tok::coloncolon:
+ case tok::comma:
+ case tok::equal:
+ case tok::equalequal: // Might be a typo for '='.
+ case tok::kw_alignas:
+ case tok::kw_asm:
+ case tok::kw___attribute:
+ case tok::l_brace:
+ case tok::l_paren:
+ case tok::l_square:
+ case tok::less:
+ case tok::r_brace:
+ case tok::r_paren:
+ case tok::r_square:
+ case tok::semi:
+ return true;
+
+ case tok::colon:
+ // At namespace scope, 'identifier:' is probably a typo for 'identifier::'
+ // and in block scope it's probably a label. Inside a class definition,
+ // this is a bit-field.
+ return Context == Declarator::MemberContext ||
+ (getLangOpts().CPlusPlus && Context == Declarator::FileContext);
+
+ case tok::identifier: // Possible virt-specifier.
+ return getLangOpts().CPlusPlus11 && isCXX11VirtSpecifier(NextToken());
+
+ default:
+ return false;
+ }
+
+ default:
+ return false;
+ }
+}
+
+/// Skip until we reach something which seems like a sensible place to pick
+/// up parsing after a malformed declaration. This will sometimes stop sooner
+/// than SkipUntil(tok::r_brace) would, but will never stop later.
+void Parser::SkipMalformedDecl() {
+ while (true) {
+ switch (Tok.getKind()) {
+ case tok::l_brace:
+ // Skip until matching }, then stop. We've probably skipped over
+ // a malformed class or function definition or similar.
+ ConsumeBrace();
+ SkipUntil(tok::r_brace);
+ if (Tok.isOneOf(tok::comma, tok::l_brace, tok::kw_try)) {
+ // This declaration isn't over yet. Keep skipping.
+ continue;
+ }
+ TryConsumeToken(tok::semi);
+ return;
+
+ case tok::l_square:
+ ConsumeBracket();
+ SkipUntil(tok::r_square);
+ continue;
+
+ case tok::l_paren:
+ ConsumeParen();
+ SkipUntil(tok::r_paren);
+ continue;
+
+ case tok::r_brace:
+ return;
+
+ case tok::semi:
+ ConsumeToken();
+ return;
+
+ case tok::kw_inline:
+ // 'inline namespace' at the start of a line is almost certainly
+ // a good place to pick back up parsing, except in an Objective-C
+ // @interface context.
+ if (Tok.isAtStartOfLine() && NextToken().is(tok::kw_namespace) &&
+ (!ParsingInObjCContainer || CurParsedObjCImpl))
+ return;
+ break;
+
+ case tok::kw_namespace:
+ // 'namespace' at the start of a line is almost certainly a good
+ // place to pick back up parsing, except in an Objective-C
+ // @interface context.
+ if (Tok.isAtStartOfLine() &&
+ (!ParsingInObjCContainer || CurParsedObjCImpl))
+ return;
+ break;
+
+ case tok::at:
+ // @end is very much like } in Objective-C contexts.
+ if (NextToken().isObjCAtKeyword(tok::objc_end) &&
+ ParsingInObjCContainer)
+ return;
+ break;
+
+ case tok::minus:
+ case tok::plus:
+ // - and + probably start new method declarations in Objective-C contexts.
+ if (Tok.isAtStartOfLine() && ParsingInObjCContainer)
+ return;
+ break;
+
+ case tok::eof:
+ case tok::annot_module_begin:
+ case tok::annot_module_end:
+ case tok::annot_module_include:
+ return;
+
+ default:
+ break;
+ }
+
+ ConsumeAnyToken();
+ }
+}
+
+/// ParseDeclGroup - Having concluded that this is either a function
+/// definition or a group of object declarations, actually parse the
+/// result.
+Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
+ unsigned Context,
+ SourceLocation *DeclEnd,
+ ForRangeInit *FRI) {
+ // Parse the first declarator.
+ ParsingDeclarator D(*this, DS, static_cast<Declarator::TheContext>(Context));
+ ParseDeclarator(D);
+
+ // Bail out if the first declarator didn't seem well-formed.
+ if (!D.hasName() && !D.mayOmitIdentifier()) {
+ SkipMalformedDecl();
+ return DeclGroupPtrTy();
+ }
+
+ // Save late-parsed attributes for now; they need to be parsed in the
+ // appropriate function scope after the function Decl has been constructed.
+ // These will be parsed in ParseFunctionDefinition or ParseLexedAttrList.
+ LateParsedAttrList LateParsedAttrs(true);
+ if (D.isFunctionDeclarator()) {
+ MaybeParseGNUAttributes(D, &LateParsedAttrs);
+
+ // The _Noreturn keyword can't appear here, unlike the GNU noreturn
+ // attribute. If we find the keyword here, tell the user to put it
+ // at the start instead.
+ if (Tok.is(tok::kw__Noreturn)) {
+ SourceLocation Loc = ConsumeToken();
+ const char *PrevSpec;
+ unsigned DiagID;
+
+ // We can offer a fixit if it's valid to mark this function as _Noreturn
+ // and we don't have any other declarators in this declaration.
+ bool Fixit = !DS.setFunctionSpecNoreturn(Loc, PrevSpec, DiagID);
+ MaybeParseGNUAttributes(D, &LateParsedAttrs);
+ Fixit &= Tok.isOneOf(tok::semi, tok::l_brace, tok::kw_try);
+
+ Diag(Loc, diag::err_c11_noreturn_misplaced)
+ << (Fixit ? FixItHint::CreateRemoval(Loc) : FixItHint())
+ << (Fixit ? FixItHint::CreateInsertion(D.getLocStart(), "_Noreturn ")
+ : FixItHint());
+ }
+ }
+
+ // Check to see if we have a function *definition* which must have a body.
+ if (D.isFunctionDeclarator() &&
+ // Look at the next token to make sure that this isn't a function
+ // declaration. We have to check this because __attribute__ might be the
+ // start of a function definition in GCC-extended K&R C.
+ !isDeclarationAfterDeclarator()) {
+
+ // Function definitions are only allowed at file scope and in C++ classes.
+ // The C++ inline method definition case is handled elsewhere, so we only
+ // need to handle the file scope definition case.
+ if (Context == Declarator::FileContext) {
+ if (isStartOfFunctionDefinition(D)) {
+ if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
+ Diag(Tok, diag::err_function_declared_typedef);
+
+ // Recover by treating the 'typedef' as spurious.
+ DS.ClearStorageClassSpecs();
+ }
+
+ Decl *TheDecl =
+ ParseFunctionDefinition(D, ParsedTemplateInfo(), &LateParsedAttrs);
+ return Actions.ConvertDeclToDeclGroup(TheDecl);
+ }
+
+ if (isDeclarationSpecifier()) {
+ // If there is an invalid declaration specifier right after the
+ // function prototype, then we must be in a missing semicolon case
+ // where this isn't actually a body. Just fall through into the code
+ // that handles it as a prototype, and let the top-level code handle
+ // the erroneous declspec where it would otherwise expect a comma or
+ // semicolon.
+ } else {
+ Diag(Tok, diag::err_expected_fn_body);
+ SkipUntil(tok::semi);
+ return DeclGroupPtrTy();
+ }
+ } else {
+ if (Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::err_function_definition_not_allowed);
+ SkipMalformedDecl();
+ return DeclGroupPtrTy();
+ }
+ }
+ }
+
+ if (ParseAsmAttributesAfterDeclarator(D))
+ return DeclGroupPtrTy();
+
+ // C++0x [stmt.iter]p1: Check if we have a for-range-declarator. If so, we
+ // must parse and analyze the for-range-initializer before the declaration is
+ // analyzed.
+ //
+ // Handle the Objective-C for-in loop variable similarly, although we
+ // don't need to parse the container in advance.
+ if (FRI && (Tok.is(tok::colon) || isTokIdentifier_in())) {
+ bool IsForRangeLoop = false;
+ if (TryConsumeToken(tok::colon, FRI->ColonLoc)) {
+ IsForRangeLoop = true;
+ if (Tok.is(tok::l_brace))
+ FRI->RangeExpr = ParseBraceInitializer();
+ else
+ FRI->RangeExpr = ParseExpression();
+ }
+
+ Decl *ThisDecl = Actions.ActOnDeclarator(getCurScope(), D);
+ if (IsForRangeLoop)
+ Actions.ActOnCXXForRangeDecl(ThisDecl);
+ Actions.FinalizeDeclaration(ThisDecl);
+ D.complete(ThisDecl);
+ return Actions.FinalizeDeclaratorGroup(getCurScope(), DS, ThisDecl);
+ }
+
+ SmallVector<Decl *, 8> DeclsInGroup;
+ Decl *FirstDecl = ParseDeclarationAfterDeclaratorAndAttributes(
+ D, ParsedTemplateInfo(), FRI);
+ if (LateParsedAttrs.size() > 0)
+ ParseLexedAttributeList(LateParsedAttrs, FirstDecl, true, false);
+ D.complete(FirstDecl);
+ if (FirstDecl)
+ DeclsInGroup.push_back(FirstDecl);
+
+ bool ExpectSemi = Context != Declarator::ForContext;
+
+ // If we don't have a comma, it is either the end of the list (a ';') or an
+ // error, bail out.
+ SourceLocation CommaLoc;
+ while (TryConsumeToken(tok::comma, CommaLoc)) {
+ if (Tok.isAtStartOfLine() && ExpectSemi && !MightBeDeclarator(Context)) {
+ // This comma was followed by a line-break and something which can't be
+ // the start of a declarator. The comma was probably a typo for a
+ // semicolon.
+ Diag(CommaLoc, diag::err_expected_semi_declaration)
+ << FixItHint::CreateReplacement(CommaLoc, ";");
+ ExpectSemi = false;
+ break;
+ }
+
+ // Parse the next declarator.
+ D.clear();
+ D.setCommaLoc(CommaLoc);
+
+ // Accept attributes in an init-declarator. In the first declarator in a
+ // declaration, these would be part of the declspec. In subsequent
+ // declarators, they become part of the declarator itself, so that they
+ // don't apply to declarators after *this* one. Examples:
+ // short __attribute__((common)) var; -> declspec
+ // short var __attribute__((common)); -> declarator
+ // short x, __attribute__((common)) var; -> declarator
+ MaybeParseGNUAttributes(D);
+
+ // MSVC parses but ignores qualifiers after the comma as an extension.
+ if (getLangOpts().MicrosoftExt)
+ DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
+
+ ParseDeclarator(D);
+ if (!D.isInvalidType()) {
+ Decl *ThisDecl = ParseDeclarationAfterDeclarator(D);
+ D.complete(ThisDecl);
+ if (ThisDecl)
+ DeclsInGroup.push_back(ThisDecl);
+ }
+ }
+
+ if (DeclEnd)
+ *DeclEnd = Tok.getLocation();
+
+ if (ExpectSemi &&
+ ExpectAndConsumeSemi(Context == Declarator::FileContext
+ ? diag::err_invalid_token_after_toplevel_declarator
+ : diag::err_expected_semi_declaration)) {
+ // Okay, there was no semicolon and one was expected. If we see a
+ // declaration specifier, just assume it was missing and continue parsing.
+ // Otherwise things are very confused and we skip to recover.
+ if (!isDeclarationSpecifier()) {
+ SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
+ TryConsumeToken(tok::semi);
+ }
+ }
+
+ return Actions.FinalizeDeclaratorGroup(getCurScope(), DS, DeclsInGroup);
+}
+
+/// Parse an optional simple-asm-expr and attributes, and attach them to a
+/// declarator. Returns true on an error.
+bool Parser::ParseAsmAttributesAfterDeclarator(Declarator &D) {
+ // If a simple-asm-expr is present, parse it.
+ if (Tok.is(tok::kw_asm)) {
+ SourceLocation Loc;
+ ExprResult AsmLabel(ParseSimpleAsm(&Loc));
+ if (AsmLabel.isInvalid()) {
+ SkipUntil(tok::semi, StopBeforeMatch);
+ return true;
+ }
+
+ D.setAsmLabel(AsmLabel.get());
+ D.SetRangeEnd(Loc);
+ }
+
+ MaybeParseGNUAttributes(D);
+ return false;
+}
+
+/// \brief Parse 'declaration' after parsing 'declaration-specifiers
+/// declarator'. This method parses the remainder of the declaration
+/// (including any attributes or initializer, among other things) and
+/// finalizes the declaration.
+///
+/// init-declarator: [C99 6.7]
+/// declarator
+/// declarator '=' initializer
+/// [GNU] declarator simple-asm-expr[opt] attributes[opt]
+/// [GNU] declarator simple-asm-expr[opt] attributes[opt] '=' initializer
+/// [C++] declarator initializer[opt]
+///
+/// [C++] initializer:
+/// [C++] '=' initializer-clause
+/// [C++] '(' expression-list ')'
+/// [C++0x] '=' 'default' [TODO]
+/// [C++0x] '=' 'delete'
+/// [C++0x] braced-init-list
+///
+/// According to the standard grammar, =default and =delete are function
+/// definitions, but that definitely doesn't fit with the parser here.
+///
+Decl *Parser::ParseDeclarationAfterDeclarator(
+ Declarator &D, const ParsedTemplateInfo &TemplateInfo) {
+ if (ParseAsmAttributesAfterDeclarator(D))
+ return nullptr;
+
+ return ParseDeclarationAfterDeclaratorAndAttributes(D, TemplateInfo);
+}
+
+Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
+ Declarator &D, const ParsedTemplateInfo &TemplateInfo, ForRangeInit *FRI) {
+ // Inform the current actions module that we just parsed this declarator.
+ Decl *ThisDecl = nullptr;
+ switch (TemplateInfo.Kind) {
+ case ParsedTemplateInfo::NonTemplate:
+ ThisDecl = Actions.ActOnDeclarator(getCurScope(), D);
+ break;
+
+ case ParsedTemplateInfo::Template:
+ case ParsedTemplateInfo::ExplicitSpecialization: {
+ ThisDecl = Actions.ActOnTemplateDeclarator(getCurScope(),
+ *TemplateInfo.TemplateParams,
+ D);
+ if (VarTemplateDecl *VT = dyn_cast_or_null<VarTemplateDecl>(ThisDecl))
+ // Re-direct this decl to refer to the templated decl so that we can
+ // initialize it.
+ ThisDecl = VT->getTemplatedDecl();
+ break;
+ }
+ case ParsedTemplateInfo::ExplicitInstantiation: {
+ if (Tok.is(tok::semi)) {
+ DeclResult ThisRes = Actions.ActOnExplicitInstantiation(
+ getCurScope(), TemplateInfo.ExternLoc, TemplateInfo.TemplateLoc, D);
+ if (ThisRes.isInvalid()) {
+ SkipUntil(tok::semi, StopBeforeMatch);
+ return nullptr;
+ }
+ ThisDecl = ThisRes.get();
+ } else {
+ // FIXME: This check should be for a variable template instantiation only.
+
+ // Check that this is a valid instantiation
+ if (D.getName().getKind() != UnqualifiedId::IK_TemplateId) {
+ // If the declarator-id is not a template-id, issue a diagnostic and
+ // recover by ignoring the 'template' keyword.
+ Diag(Tok, diag::err_template_defn_explicit_instantiation)
+ << 2 << FixItHint::CreateRemoval(TemplateInfo.TemplateLoc);
+ ThisDecl = Actions.ActOnDeclarator(getCurScope(), D);
+ } else {
+ SourceLocation LAngleLoc =
+ PP.getLocForEndOfToken(TemplateInfo.TemplateLoc);
+ Diag(D.getIdentifierLoc(),
+ diag::err_explicit_instantiation_with_definition)
+ << SourceRange(TemplateInfo.TemplateLoc)
+ << FixItHint::CreateInsertion(LAngleLoc, "<>");
+
+ // Recover as if it were an explicit specialization.
+ TemplateParameterLists FakedParamLists;
+ FakedParamLists.push_back(Actions.ActOnTemplateParameterList(
+ 0, SourceLocation(), TemplateInfo.TemplateLoc, LAngleLoc, None,
+ LAngleLoc));
+
+ ThisDecl =
+ Actions.ActOnTemplateDeclarator(getCurScope(), FakedParamLists, D);
+ }
+ }
+ break;
+ }
+ }
+
+ bool TypeContainsAuto = D.getDeclSpec().containsPlaceholderType();
+
+ // Parse declarator '=' initializer.
+ // If a '==' or '+=' is found, suggest a fixit to '='.
+ if (isTokenEqualOrEqualTypo()) {
+ SourceLocation EqualLoc = ConsumeToken();
+
+ if (Tok.is(tok::kw_delete)) {
+ if (D.isFunctionDeclarator())
+ Diag(ConsumeToken(), diag::err_default_delete_in_multiple_declaration)
+ << 1 /* delete */;
+ else
+ Diag(ConsumeToken(), diag::err_deleted_non_function);
+ } else if (Tok.is(tok::kw_default)) {
+ if (D.isFunctionDeclarator())
+ Diag(ConsumeToken(), diag::err_default_delete_in_multiple_declaration)
+ << 0 /* default */;
+ else
+ Diag(ConsumeToken(), diag::err_default_special_members);
+ } else {
+ if (getLangOpts().CPlusPlus && D.getCXXScopeSpec().isSet()) {
+ EnterScope(0);
+ Actions.ActOnCXXEnterDeclInitializer(getCurScope(), ThisDecl);
+ }
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteInitializer(getCurScope(), ThisDecl);
+ Actions.FinalizeDeclaration(ThisDecl);
+ cutOffParsing();
+ return nullptr;
+ }
+
+ ExprResult Init(ParseInitializer());
+
+ // If this is the only decl in (possibly) range based for statement,
+ // our best guess is that the user meant ':' instead of '='.
+ if (Tok.is(tok::r_paren) && FRI && D.isFirstDeclarator()) {
+ Diag(EqualLoc, diag::err_single_decl_assign_in_for_range)
+ << FixItHint::CreateReplacement(EqualLoc, ":");
+ // We are trying to stop parser from looking for ';' in this for
+ // statement, therefore preventing spurious errors to be issued.
+ FRI->ColonLoc = EqualLoc;
+ Init = ExprError();
+ FRI->RangeExpr = Init;
+ }
+
+ if (getLangOpts().CPlusPlus && D.getCXXScopeSpec().isSet()) {
+ Actions.ActOnCXXExitDeclInitializer(getCurScope(), ThisDecl);
+ ExitScope();
+ }
+
+ if (Init.isInvalid()) {
+ SmallVector<tok::TokenKind, 2> StopTokens;
+ StopTokens.push_back(tok::comma);
+ if (D.getContext() == Declarator::ForContext)
+ StopTokens.push_back(tok::r_paren);
+ SkipUntil(StopTokens, StopAtSemi | StopBeforeMatch);
+ Actions.ActOnInitializerError(ThisDecl);
+ } else
+ Actions.AddInitializerToDecl(ThisDecl, Init.get(),
+ /*DirectInit=*/false, TypeContainsAuto);
+ }
+ } else if (Tok.is(tok::l_paren)) {
+ // Parse C++ direct initializer: '(' expression-list ')'
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ ExprVector Exprs;
+ CommaLocsTy CommaLocs;
+
+ if (getLangOpts().CPlusPlus && D.getCXXScopeSpec().isSet()) {
+ EnterScope(0);
+ Actions.ActOnCXXEnterDeclInitializer(getCurScope(), ThisDecl);
+ }
+
+ if (ParseExpressionList(Exprs, CommaLocs, [&] {
+ Actions.CodeCompleteConstructor(getCurScope(),
+ cast<VarDecl>(ThisDecl)->getType()->getCanonicalTypeInternal(),
+ ThisDecl->getLocation(), Exprs);
+ })) {
+ Actions.ActOnInitializerError(ThisDecl);
+ SkipUntil(tok::r_paren, StopAtSemi);
+
+ if (getLangOpts().CPlusPlus && D.getCXXScopeSpec().isSet()) {
+ Actions.ActOnCXXExitDeclInitializer(getCurScope(), ThisDecl);
+ ExitScope();
+ }
+ } else {
+ // Match the ')'.
+ T.consumeClose();
+
+ assert(!Exprs.empty() && Exprs.size()-1 == CommaLocs.size() &&
+ "Unexpected number of commas!");
+
+ if (getLangOpts().CPlusPlus && D.getCXXScopeSpec().isSet()) {
+ Actions.ActOnCXXExitDeclInitializer(getCurScope(), ThisDecl);
+ ExitScope();
+ }
+
+ ExprResult Initializer = Actions.ActOnParenListExpr(T.getOpenLocation(),
+ T.getCloseLocation(),
+ Exprs);
+ Actions.AddInitializerToDecl(ThisDecl, Initializer.get(),
+ /*DirectInit=*/true, TypeContainsAuto);
+ }
+ } else if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace) &&
+ (!CurParsedObjCImpl || !D.isFunctionDeclarator())) {
+ // Parse C++0x braced-init-list.
+ Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
+
+ if (D.getCXXScopeSpec().isSet()) {
+ EnterScope(0);
+ Actions.ActOnCXXEnterDeclInitializer(getCurScope(), ThisDecl);
+ }
+
+ ExprResult Init(ParseBraceInitializer());
+
+ if (D.getCXXScopeSpec().isSet()) {
+ Actions.ActOnCXXExitDeclInitializer(getCurScope(), ThisDecl);
+ ExitScope();
+ }
+
+ if (Init.isInvalid()) {
+ Actions.ActOnInitializerError(ThisDecl);
+ } else
+ Actions.AddInitializerToDecl(ThisDecl, Init.get(),
+ /*DirectInit=*/true, TypeContainsAuto);
+
+ } else {
+ Actions.ActOnUninitializedDecl(ThisDecl, TypeContainsAuto);
+ }
+
+ Actions.FinalizeDeclaration(ThisDecl);
+
+ return ThisDecl;
+}
+
+/// ParseSpecifierQualifierList
+/// specifier-qualifier-list:
+/// type-specifier specifier-qualifier-list[opt]
+/// type-qualifier specifier-qualifier-list[opt]
+/// [GNU] attributes specifier-qualifier-list[opt]
+///
+void Parser::ParseSpecifierQualifierList(DeclSpec &DS, AccessSpecifier AS,
+ DeclSpecContext DSC) {
+ /// specifier-qualifier-list is a subset of declaration-specifiers. Just
+ /// parse declaration-specifiers and complain about extra stuff.
+ /// TODO: diagnose attribute-specifiers and alignment-specifiers.
+ ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS, DSC);
+
+ // Validate declspec for type-name.
+ unsigned Specs = DS.getParsedSpecifiers();
+ if (isTypeSpecifier(DSC) && !DS.hasTypeSpecifier()) {
+ Diag(Tok, diag::err_expected_type);
+ DS.SetTypeSpecError();
+ } else if (Specs == DeclSpec::PQ_None && !DS.hasAttributes()) {
+ Diag(Tok, diag::err_typename_requires_specqual);
+ if (!DS.hasTypeSpecifier())
+ DS.SetTypeSpecError();
+ }
+
+ // Issue diagnostic and remove storage class if present.
+ if (Specs & DeclSpec::PQ_StorageClassSpecifier) {
+ if (DS.getStorageClassSpecLoc().isValid())
+ Diag(DS.getStorageClassSpecLoc(),diag::err_typename_invalid_storageclass);
+ else
+ Diag(DS.getThreadStorageClassSpecLoc(),
+ diag::err_typename_invalid_storageclass);
+ DS.ClearStorageClassSpecs();
+ }
+
+ // Issue diagnostic and remove function specifier if present.
+ if (Specs & DeclSpec::PQ_FunctionSpecifier) {
+ if (DS.isInlineSpecified())
+ Diag(DS.getInlineSpecLoc(), diag::err_typename_invalid_functionspec);
+ if (DS.isVirtualSpecified())
+ Diag(DS.getVirtualSpecLoc(), diag::err_typename_invalid_functionspec);
+ if (DS.isExplicitSpecified())
+ Diag(DS.getExplicitSpecLoc(), diag::err_typename_invalid_functionspec);
+ DS.ClearFunctionSpecs();
+ }
+
+ // Issue diagnostic and remove constexpr specfier if present.
+ if (DS.isConstexprSpecified() && DSC != DSC_condition) {
+ Diag(DS.getConstexprSpecLoc(), diag::err_typename_invalid_constexpr);
+ DS.ClearConstexprSpec();
+ }
+}
+
+/// isValidAfterIdentifierInDeclaratorAfterDeclSpec - Return true if the
+/// specified token is valid after the identifier in a declarator which
+/// immediately follows the declspec. For example, these things are valid:
+///
+/// int x [ 4]; // direct-declarator
+/// int x ( int y); // direct-declarator
+/// int(int x ) // direct-declarator
+/// int x ; // simple-declaration
+/// int x = 17; // init-declarator-list
+/// int x , y; // init-declarator-list
+/// int x __asm__ ("foo"); // init-declarator-list
+/// int x : 4; // struct-declarator
+/// int x { 5}; // C++'0x unified initializers
+///
+/// This is not, because 'x' does not immediately follow the declspec (though
+/// ')' happens to be valid anyway).
+/// int (x)
+///
+static bool isValidAfterIdentifierInDeclarator(const Token &T) {
+ return T.isOneOf(tok::l_square, tok::l_paren, tok::r_paren, tok::semi,
+ tok::comma, tok::equal, tok::kw_asm, tok::l_brace,
+ tok::colon);
+}
+
+/// ParseImplicitInt - This method is called when we have an non-typename
+/// identifier in a declspec (which normally terminates the decl spec) when
+/// the declspec has no type specifier. In this case, the declspec is either
+/// malformed or is "implicit int" (in K&R and C89).
+///
+/// This method handles diagnosing this prettily and returns false if the
+/// declspec is done being processed. If it recovers and thinks there may be
+/// other pieces of declspec after it, it returns true.
+///
+bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
+ const ParsedTemplateInfo &TemplateInfo,
+ AccessSpecifier AS, DeclSpecContext DSC,
+ ParsedAttributesWithRange &Attrs) {
+ assert(Tok.is(tok::identifier) && "should have identifier");
+
+ SourceLocation Loc = Tok.getLocation();
+ // If we see an identifier that is not a type name, we normally would
+ // parse it as the identifer being declared. However, when a typename
+ // is typo'd or the definition is not included, this will incorrectly
+ // parse the typename as the identifier name and fall over misparsing
+ // later parts of the diagnostic.
+ //
+ // As such, we try to do some look-ahead in cases where this would
+ // otherwise be an "implicit-int" case to see if this is invalid. For
+ // example: "static foo_t x = 4;" In this case, if we parsed foo_t as
+ // an identifier with implicit int, we'd get a parse error because the
+ // next token is obviously invalid for a type. Parse these as a case
+ // with an invalid type specifier.
+ assert(!DS.hasTypeSpecifier() && "Type specifier checked above");
+
+ // Since we know that this either implicit int (which is rare) or an
+ // error, do lookahead to try to do better recovery. This never applies
+ // within a type specifier. Outside of C++, we allow this even if the
+ // language doesn't "officially" support implicit int -- we support
+ // implicit int as an extension in C99 and C11.
+ if (!isTypeSpecifier(DSC) && !getLangOpts().CPlusPlus &&
+ isValidAfterIdentifierInDeclarator(NextToken())) {
+ // If this token is valid for implicit int, e.g. "static x = 4", then
+ // we just avoid eating the identifier, so it will be parsed as the
+ // identifier in the declarator.
+ return false;
+ }
+
+ if (getLangOpts().CPlusPlus &&
+ DS.getStorageClassSpec() == DeclSpec::SCS_auto) {
+ // Don't require a type specifier if we have the 'auto' storage class
+ // specifier in C++98 -- we'll promote it to a type specifier.
+ if (SS)
+ AnnotateScopeToken(*SS, /*IsNewAnnotation*/false);
+ return false;
+ }
+
+ // Otherwise, if we don't consume this token, we are going to emit an
+ // error anyway. Try to recover from various common problems. Check
+ // to see if this was a reference to a tag name without a tag specified.
+ // This is a common problem in C (saying 'foo' instead of 'struct foo').
+ //
+ // C++ doesn't need this, and isTagName doesn't take SS.
+ if (SS == nullptr) {
+ const char *TagName = nullptr, *FixitTagName = nullptr;
+ tok::TokenKind TagKind = tok::unknown;
+
+ switch (Actions.isTagName(*Tok.getIdentifierInfo(), getCurScope())) {
+ default: break;
+ case DeclSpec::TST_enum:
+ TagName="enum" ; FixitTagName = "enum " ; TagKind=tok::kw_enum ;break;
+ case DeclSpec::TST_union:
+ TagName="union" ; FixitTagName = "union " ;TagKind=tok::kw_union ;break;
+ case DeclSpec::TST_struct:
+ TagName="struct"; FixitTagName = "struct ";TagKind=tok::kw_struct;break;
+ case DeclSpec::TST_interface:
+ TagName="__interface"; FixitTagName = "__interface ";
+ TagKind=tok::kw___interface;break;
+ case DeclSpec::TST_class:
+ TagName="class" ; FixitTagName = "class " ;TagKind=tok::kw_class ;break;
+ }
+
+ if (TagName) {
+ IdentifierInfo *TokenName = Tok.getIdentifierInfo();
+ LookupResult R(Actions, TokenName, SourceLocation(),
+ Sema::LookupOrdinaryName);
+
+ Diag(Loc, diag::err_use_of_tag_name_without_tag)
+ << TokenName << TagName << getLangOpts().CPlusPlus
+ << FixItHint::CreateInsertion(Tok.getLocation(), FixitTagName);
+
+ if (Actions.LookupParsedName(R, getCurScope(), SS)) {
+ for (LookupResult::iterator I = R.begin(), IEnd = R.end();
+ I != IEnd; ++I)
+ Diag((*I)->getLocation(), diag::note_decl_hiding_tag_type)
+ << TokenName << TagName;
+ }
+
+ // Parse this as a tag as if the missing tag were present.
+ if (TagKind == tok::kw_enum)
+ ParseEnumSpecifier(Loc, DS, TemplateInfo, AS, DSC_normal);
+ else
+ ParseClassSpecifier(TagKind, Loc, DS, TemplateInfo, AS,
+ /*EnteringContext*/ false, DSC_normal, Attrs);
+ return true;
+ }
+ }
+
+ // Determine whether this identifier could plausibly be the name of something
+ // being declared (with a missing type).
+ if (!isTypeSpecifier(DSC) &&
+ (!SS || DSC == DSC_top_level || DSC == DSC_class)) {
+ // Look ahead to the next token to try to figure out what this declaration
+ // was supposed to be.
+ switch (NextToken().getKind()) {
+ case tok::l_paren: {
+ // static x(4); // 'x' is not a type
+ // x(int n); // 'x' is not a type
+ // x (*p)[]; // 'x' is a type
+ //
+ // Since we're in an error case, we can afford to perform a tentative
+ // parse to determine which case we're in.
+ TentativeParsingAction PA(*this);
+ ConsumeToken();
+ TPResult TPR = TryParseDeclarator(/*mayBeAbstract*/false);
+ PA.Revert();
+
+ if (TPR != TPResult::False) {
+ // The identifier is followed by a parenthesized declarator.
+ // It's supposed to be a type.
+ break;
+ }
+
+ // If we're in a context where we could be declaring a constructor,
+ // check whether this is a constructor declaration with a bogus name.
+ if (DSC == DSC_class || (DSC == DSC_top_level && SS)) {
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ if (Actions.isCurrentClassNameTypo(II, SS)) {
+ Diag(Loc, diag::err_constructor_bad_name)
+ << Tok.getIdentifierInfo() << II
+ << FixItHint::CreateReplacement(Tok.getLocation(), II->getName());
+ Tok.setIdentifierInfo(II);
+ }
+ }
+ // Fall through.
+ }
+ case tok::comma:
+ case tok::equal:
+ case tok::kw_asm:
+ case tok::l_brace:
+ case tok::l_square:
+ case tok::semi:
+ // This looks like a variable or function declaration. The type is
+ // probably missing. We're done parsing decl-specifiers.
+ if (SS)
+ AnnotateScopeToken(*SS, /*IsNewAnnotation*/false);
+ return false;
+
+ default:
+ // This is probably supposed to be a type. This includes cases like:
+ // int f(itn);
+ // struct S { unsinged : 4; };
+ break;
+ }
+ }
+
+ // This is almost certainly an invalid type name. Let Sema emit a diagnostic
+ // and attempt to recover.
+ ParsedType T;
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ Actions.DiagnoseUnknownTypeName(II, Loc, getCurScope(), SS, T,
+ getLangOpts().CPlusPlus &&
+ NextToken().is(tok::less));
+ if (T) {
+ // The action has suggested that the type T could be used. Set that as
+ // the type in the declaration specifiers, consume the would-be type
+ // name token, and we're done.
+ const char *PrevSpec;
+ unsigned DiagID;
+ DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec, DiagID, T,
+ Actions.getASTContext().getPrintingPolicy());
+ DS.SetRangeEnd(Tok.getLocation());
+ ConsumeToken();
+ // There may be other declaration specifiers after this.
+ return true;
+ } else if (II != Tok.getIdentifierInfo()) {
+ // If no type was suggested, the correction is to a keyword
+ Tok.setKind(II->getTokenID());
+ // There may be other declaration specifiers after this.
+ return true;
+ }
+
+ // Otherwise, the action had no suggestion for us. Mark this as an error.
+ DS.SetTypeSpecError();
+ DS.SetRangeEnd(Tok.getLocation());
+ ConsumeToken();
+
+ // TODO: Could inject an invalid typedef decl in an enclosing scope to
+ // avoid rippling error messages on subsequent uses of the same type,
+ // could be useful if #include was forgotten.
+ return false;
+}
+
+/// \brief Determine the declaration specifier context from the declarator
+/// context.
+///
+/// \param Context the declarator context, which is one of the
+/// Declarator::TheContext enumerator values.
+Parser::DeclSpecContext
+Parser::getDeclSpecContextFromDeclaratorContext(unsigned Context) {
+ if (Context == Declarator::MemberContext)
+ return DSC_class;
+ if (Context == Declarator::FileContext)
+ return DSC_top_level;
+ if (Context == Declarator::TemplateTypeArgContext)
+ return DSC_template_type_arg;
+ if (Context == Declarator::TrailingReturnContext)
+ return DSC_trailing;
+ if (Context == Declarator::AliasDeclContext ||
+ Context == Declarator::AliasTemplateContext)
+ return DSC_alias_declaration;
+ return DSC_normal;
+}
+
+/// ParseAlignArgument - Parse the argument to an alignment-specifier.
+///
+/// FIXME: Simply returns an alignof() expression if the argument is a
+/// type. Ideally, the type should be propagated directly into Sema.
+///
+/// [C11] type-id
+/// [C11] constant-expression
+/// [C++0x] type-id ...[opt]
+/// [C++0x] assignment-expression ...[opt]
+ExprResult Parser::ParseAlignArgument(SourceLocation Start,
+ SourceLocation &EllipsisLoc) {
+ ExprResult ER;
+ if (isTypeIdInParens()) {
+ SourceLocation TypeLoc = Tok.getLocation();
+ ParsedType Ty = ParseTypeName().get();
+ SourceRange TypeRange(Start, Tok.getLocation());
+ ER = Actions.ActOnUnaryExprOrTypeTraitExpr(TypeLoc, UETT_AlignOf, true,
+ Ty.getAsOpaquePtr(), TypeRange);
+ } else
+ ER = ParseConstantExpression();
+
+ if (getLangOpts().CPlusPlus11)
+ TryConsumeToken(tok::ellipsis, EllipsisLoc);
+
+ return ER;
+}
+
+/// ParseAlignmentSpecifier - Parse an alignment-specifier, and add the
+/// attribute to Attrs.
+///
+/// alignment-specifier:
+/// [C11] '_Alignas' '(' type-id ')'
+/// [C11] '_Alignas' '(' constant-expression ')'
+/// [C++11] 'alignas' '(' type-id ...[opt] ')'
+/// [C++11] 'alignas' '(' assignment-expression ...[opt] ')'
+void Parser::ParseAlignmentSpecifier(ParsedAttributes &Attrs,
+ SourceLocation *EndLoc) {
+ assert(Tok.isOneOf(tok::kw_alignas, tok::kw__Alignas) &&
+ "Not an alignment-specifier!");
+
+ IdentifierInfo *KWName = Tok.getIdentifierInfo();
+ SourceLocation KWLoc = ConsumeToken();
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.expectAndConsume())
+ return;
+
+ SourceLocation EllipsisLoc;
+ ExprResult ArgExpr = ParseAlignArgument(T.getOpenLocation(), EllipsisLoc);
+ if (ArgExpr.isInvalid()) {
+ T.skipToEnd();
+ return;
+ }
+
+ T.consumeClose();
+ if (EndLoc)
+ *EndLoc = T.getCloseLocation();
+
+ ArgsVector ArgExprs;
+ ArgExprs.push_back(ArgExpr.get());
+ Attrs.addNew(KWName, KWLoc, nullptr, KWLoc, ArgExprs.data(), 1,
+ AttributeList::AS_Keyword, EllipsisLoc);
+}
+
+/// Determine whether we're looking at something that might be a declarator
+/// in a simple-declaration. If it can't possibly be a declarator, maybe
+/// diagnose a missing semicolon after a prior tag definition in the decl
+/// specifier.
+///
+/// \return \c true if an error occurred and this can't be any kind of
+/// declaration.
+bool
+Parser::DiagnoseMissingSemiAfterTagDefinition(DeclSpec &DS, AccessSpecifier AS,
+ DeclSpecContext DSContext,
+ LateParsedAttrList *LateAttrs) {
+ assert(DS.hasTagDefinition() && "shouldn't call this");
+
+ bool EnteringContext = (DSContext == DSC_class || DSContext == DSC_top_level);
+
+ if (getLangOpts().CPlusPlus &&
+ Tok.isOneOf(tok::identifier, tok::coloncolon, tok::kw_decltype,
+ tok::annot_template_id) &&
+ TryAnnotateCXXScopeToken(EnteringContext)) {
+ SkipMalformedDecl();
+ return true;
+ }
+
+ bool HasScope = Tok.is(tok::annot_cxxscope);
+ // Make a copy in case GetLookAheadToken invalidates the result of NextToken.
+ Token AfterScope = HasScope ? NextToken() : Tok;
+
+ // Determine whether the following tokens could possibly be a
+ // declarator.
+ bool MightBeDeclarator = true;
+ if (Tok.isOneOf(tok::kw_typename, tok::annot_typename)) {
+ // A declarator-id can't start with 'typename'.
+ MightBeDeclarator = false;
+ } else if (AfterScope.is(tok::annot_template_id)) {
+ // If we have a type expressed as a template-id, this cannot be a
+ // declarator-id (such a type cannot be redeclared in a simple-declaration).
+ TemplateIdAnnotation *Annot =
+ static_cast<TemplateIdAnnotation *>(AfterScope.getAnnotationValue());
+ if (Annot->Kind == TNK_Type_template)
+ MightBeDeclarator = false;
+ } else if (AfterScope.is(tok::identifier)) {
+ const Token &Next = HasScope ? GetLookAheadToken(2) : NextToken();
+
+ // These tokens cannot come after the declarator-id in a
+ // simple-declaration, and are likely to come after a type-specifier.
+ if (Next.isOneOf(tok::star, tok::amp, tok::ampamp, tok::identifier,
+ tok::annot_cxxscope, tok::coloncolon)) {
+ // Missing a semicolon.
+ MightBeDeclarator = false;
+ } else if (HasScope) {
+ // If the declarator-id has a scope specifier, it must redeclare a
+ // previously-declared entity. If that's a type (and this is not a
+ // typedef), that's an error.
+ CXXScopeSpec SS;
+ Actions.RestoreNestedNameSpecifierAnnotation(
+ Tok.getAnnotationValue(), Tok.getAnnotationRange(), SS);
+ IdentifierInfo *Name = AfterScope.getIdentifierInfo();
+ Sema::NameClassification Classification = Actions.ClassifyName(
+ getCurScope(), SS, Name, AfterScope.getLocation(), Next,
+ /*IsAddressOfOperand*/false);
+ switch (Classification.getKind()) {
+ case Sema::NC_Error:
+ SkipMalformedDecl();
+ return true;
+
+ case Sema::NC_Keyword:
+ case Sema::NC_NestedNameSpecifier:
+ llvm_unreachable("typo correction and nested name specifiers not "
+ "possible here");
+
+ case Sema::NC_Type:
+ case Sema::NC_TypeTemplate:
+ // Not a previously-declared non-type entity.
+ MightBeDeclarator = false;
+ break;
+
+ case Sema::NC_Unknown:
+ case Sema::NC_Expression:
+ case Sema::NC_VarTemplate:
+ case Sema::NC_FunctionTemplate:
+ // Might be a redeclaration of a prior entity.
+ break;
+ }
+ }
+ }
+
+ if (MightBeDeclarator)
+ return false;
+
+ const PrintingPolicy &PPol = Actions.getASTContext().getPrintingPolicy();
+ Diag(PP.getLocForEndOfToken(DS.getRepAsDecl()->getLocEnd()),
+ diag::err_expected_after)
+ << DeclSpec::getSpecifierName(DS.getTypeSpecType(), PPol) << tok::semi;
+
+ // Try to recover from the typo, by dropping the tag definition and parsing
+ // the problematic tokens as a type.
+ //
+ // FIXME: Split the DeclSpec into pieces for the standalone
+ // declaration and pieces for the following declaration, instead
+ // of assuming that all the other pieces attach to new declaration,
+ // and call ParsedFreeStandingDeclSpec as appropriate.
+ DS.ClearTypeSpecType();
+ ParsedTemplateInfo NotATemplate;
+ ParseDeclarationSpecifiers(DS, NotATemplate, AS, DSContext, LateAttrs);
+ return false;
+}
+
+/// ParseDeclarationSpecifiers
+/// declaration-specifiers: [C99 6.7]
+/// storage-class-specifier declaration-specifiers[opt]
+/// type-specifier declaration-specifiers[opt]
+/// [C99] function-specifier declaration-specifiers[opt]
+/// [C11] alignment-specifier declaration-specifiers[opt]
+/// [GNU] attributes declaration-specifiers[opt]
+/// [Clang] '__module_private__' declaration-specifiers[opt]
+/// [ObjC1] '__kindof' declaration-specifiers[opt]
+///
+/// storage-class-specifier: [C99 6.7.1]
+/// 'typedef'
+/// 'extern'
+/// 'static'
+/// 'auto'
+/// 'register'
+/// [C++] 'mutable'
+/// [C++11] 'thread_local'
+/// [C11] '_Thread_local'
+/// [GNU] '__thread'
+/// function-specifier: [C99 6.7.4]
+/// [C99] 'inline'
+/// [C++] 'virtual'
+/// [C++] 'explicit'
+/// [OpenCL] '__kernel'
+/// 'friend': [C++ dcl.friend]
+/// 'constexpr': [C++0x dcl.constexpr]
+void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
+ const ParsedTemplateInfo &TemplateInfo,
+ AccessSpecifier AS,
+ DeclSpecContext DSContext,
+ LateParsedAttrList *LateAttrs) {
+ if (DS.getSourceRange().isInvalid()) {
+ // Start the range at the current token but make the end of the range
+ // invalid. This will make the entire range invalid unless we successfully
+ // consume a token.
+ DS.SetRangeStart(Tok.getLocation());
+ DS.SetRangeEnd(SourceLocation());
+ }
+
+ bool EnteringContext = (DSContext == DSC_class || DSContext == DSC_top_level);
+ bool AttrsLastTime = false;
+ ParsedAttributesWithRange attrs(AttrFactory);
+ // We use Sema's policy to get bool macros right.
+ const PrintingPolicy &Policy = Actions.getPrintingPolicy();
+ while (1) {
+ bool isInvalid = false;
+ bool isStorageClass = false;
+ const char *PrevSpec = nullptr;
+ unsigned DiagID = 0;
+
+ // HACK: MSVC doesn't consider _Atomic to be a keyword and its STL
+ // implementation for VS2013 uses _Atomic as an identifier for one of the
+ // classes in <atomic>.
+ //
+ // A typedef declaration containing _Atomic<...> is among the places where
+ // the class is used. If we are currently parsing such a declaration, treat
+ // the token as an identifier.
+ if (getLangOpts().MSVCCompat && Tok.is(tok::kw__Atomic) &&
+ DS.getStorageClassSpec() == clang::DeclSpec::SCS_typedef &&
+ !DS.hasTypeSpecifier() && GetLookAheadToken(1).is(tok::less))
+ Tok.setKind(tok::identifier);
+
+ SourceLocation Loc = Tok.getLocation();
+
+ switch (Tok.getKind()) {
+ default:
+ DoneWithDeclSpec:
+ if (!AttrsLastTime)
+ ProhibitAttributes(attrs);
+ else {
+ // Reject C++11 attributes that appertain to decl specifiers as
+ // we don't support any C++11 attributes that appertain to decl
+ // specifiers. This also conforms to what g++ 4.8 is doing.
+ ProhibitCXX11Attributes(attrs);
+
+ DS.takeAttributesFrom(attrs);
+ }
+
+ // If this is not a declaration specifier token, we're done reading decl
+ // specifiers. First verify that DeclSpec's are consistent.
+ DS.Finish(Actions, Policy);
+ return;
+
+ case tok::l_square:
+ case tok::kw_alignas:
+ if (!getLangOpts().CPlusPlus11 || !isCXX11AttributeSpecifier())
+ goto DoneWithDeclSpec;
+
+ ProhibitAttributes(attrs);
+ // FIXME: It would be good to recover by accepting the attributes,
+ // but attempting to do that now would cause serious
+ // madness in terms of diagnostics.
+ attrs.clear();
+ attrs.Range = SourceRange();
+
+ ParseCXX11Attributes(attrs);
+ AttrsLastTime = true;
+ continue;
+
+ case tok::code_completion: {
+ Sema::ParserCompletionContext CCC = Sema::PCC_Namespace;
+ if (DS.hasTypeSpecifier()) {
+ bool AllowNonIdentifiers
+ = (getCurScope()->getFlags() & (Scope::ControlScope |
+ Scope::BlockScope |
+ Scope::TemplateParamScope |
+ Scope::FunctionPrototypeScope |
+ Scope::AtCatchScope)) == 0;
+ bool AllowNestedNameSpecifiers
+ = DSContext == DSC_top_level ||
+ (DSContext == DSC_class && DS.isFriendSpecified());
+
+ Actions.CodeCompleteDeclSpec(getCurScope(), DS,
+ AllowNonIdentifiers,
+ AllowNestedNameSpecifiers);
+ return cutOffParsing();
+ }
+
+ if (getCurScope()->getFnParent() || getCurScope()->getBlockParent())
+ CCC = Sema::PCC_LocalDeclarationSpecifiers;
+ else if (TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate)
+ CCC = DSContext == DSC_class? Sema::PCC_MemberTemplate
+ : Sema::PCC_Template;
+ else if (DSContext == DSC_class)
+ CCC = Sema::PCC_Class;
+ else if (CurParsedObjCImpl)
+ CCC = Sema::PCC_ObjCImplementation;
+
+ Actions.CodeCompleteOrdinaryName(getCurScope(), CCC);
+ return cutOffParsing();
+ }
+
+ case tok::coloncolon: // ::foo::bar
+ // C++ scope specifier. Annotate and loop, or bail out on error.
+ if (TryAnnotateCXXScopeToken(EnteringContext)) {
+ if (!DS.hasTypeSpecifier())
+ DS.SetTypeSpecError();
+ goto DoneWithDeclSpec;
+ }
+ if (Tok.is(tok::coloncolon)) // ::new or ::delete
+ goto DoneWithDeclSpec;
+ continue;
+
+ case tok::annot_cxxscope: {
+ if (DS.hasTypeSpecifier() || DS.isTypeAltiVecVector())
+ goto DoneWithDeclSpec;
+
+ CXXScopeSpec SS;
+ Actions.RestoreNestedNameSpecifierAnnotation(Tok.getAnnotationValue(),
+ Tok.getAnnotationRange(),
+ SS);
+
+ // We are looking for a qualified typename.
+ Token Next = NextToken();
+ if (Next.is(tok::annot_template_id) &&
+ static_cast<TemplateIdAnnotation *>(Next.getAnnotationValue())
+ ->Kind == TNK_Type_template) {
+ // We have a qualified template-id, e.g., N::A<int>
+
+ // C++ [class.qual]p2:
+ // In a lookup in which the constructor is an acceptable lookup
+ // result and the nested-name-specifier nominates a class C:
+ //
+ // - if the name specified after the
+ // nested-name-specifier, when looked up in C, is the
+ // injected-class-name of C (Clause 9), or
+ //
+ // - if the name specified after the nested-name-specifier
+ // is the same as the identifier or the
+ // simple-template-id's template-name in the last
+ // component of the nested-name-specifier,
+ //
+ // the name is instead considered to name the constructor of
+ // class C.
+ //
+ // Thus, if the template-name is actually the constructor
+ // name, then the code is ill-formed; this interpretation is
+ // reinforced by the NAD status of core issue 635.
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Next);
+ if ((DSContext == DSC_top_level || DSContext == DSC_class) &&
+ TemplateId->Name &&
+ Actions.isCurrentClassName(*TemplateId->Name, getCurScope(), &SS)) {
+ if (isConstructorDeclarator(/*Unqualified*/false)) {
+ // The user meant this to be an out-of-line constructor
+ // definition, but template arguments are not allowed
+ // there. Just allow this as a constructor; we'll
+ // complain about it later.
+ goto DoneWithDeclSpec;
+ }
+
+ // The user meant this to name a type, but it actually names
+ // a constructor with some extraneous template
+ // arguments. Complain, then parse it as a type as the user
+ // intended.
+ Diag(TemplateId->TemplateNameLoc,
+ diag::err_out_of_line_template_id_type_names_constructor)
+ << TemplateId->Name << 0 /* template name */;
+ }
+
+ DS.getTypeSpecScope() = SS;
+ ConsumeToken(); // The C++ scope.
+ assert(Tok.is(tok::annot_template_id) &&
+ "ParseOptionalCXXScopeSpecifier not working");
+ AnnotateTemplateIdTokenAsType();
+ continue;
+ }
+
+ if (Next.is(tok::annot_typename)) {
+ DS.getTypeSpecScope() = SS;
+ ConsumeToken(); // The C++ scope.
+ if (Tok.getAnnotationValue()) {
+ ParsedType T = getTypeAnnotation(Tok);
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename,
+ Tok.getAnnotationEndLoc(),
+ PrevSpec, DiagID, T, Policy);
+ if (isInvalid)
+ break;
+ }
+ else
+ DS.SetTypeSpecError();
+ DS.SetRangeEnd(Tok.getAnnotationEndLoc());
+ ConsumeToken(); // The typename
+ }
+
+ if (Next.isNot(tok::identifier))
+ goto DoneWithDeclSpec;
+
+ // If we're in a context where the identifier could be a class name,
+ // check whether this is a constructor declaration.
+ if ((DSContext == DSC_top_level || DSContext == DSC_class) &&
+ Actions.isCurrentClassName(*Next.getIdentifierInfo(), getCurScope(),
+ &SS)) {
+ if (isConstructorDeclarator(/*Unqualified*/false))
+ goto DoneWithDeclSpec;
+
+ // As noted in C++ [class.qual]p2 (cited above), when the name
+ // of the class is qualified in a context where it could name
+ // a constructor, its a constructor name. However, we've
+ // looked at the declarator, and the user probably meant this
+ // to be a type. Complain that it isn't supposed to be treated
+ // as a type, then proceed to parse it as a type.
+ Diag(Next.getLocation(),
+ diag::err_out_of_line_template_id_type_names_constructor)
+ << Next.getIdentifierInfo() << 1 /* type */;
+ }
+
+ ParsedType TypeRep = Actions.getTypeName(*Next.getIdentifierInfo(),
+ Next.getLocation(),
+ getCurScope(), &SS,
+ false, false, ParsedType(),
+ /*IsCtorOrDtorName=*/false,
+ /*NonTrivialSourceInfo=*/true);
+
+ // If the referenced identifier is not a type, then this declspec is
+ // erroneous: We already checked about that it has no type specifier, and
+ // C++ doesn't have implicit int. Diagnose it as a typo w.r.t. to the
+ // typename.
+ if (!TypeRep) {
+ ConsumeToken(); // Eat the scope spec so the identifier is current.
+ ParsedAttributesWithRange Attrs(AttrFactory);
+ if (ParseImplicitInt(DS, &SS, TemplateInfo, AS, DSContext, Attrs)) {
+ if (!Attrs.empty()) {
+ AttrsLastTime = true;
+ attrs.takeAllFrom(Attrs);
+ }
+ continue;
+ }
+ goto DoneWithDeclSpec;
+ }
+
+ DS.getTypeSpecScope() = SS;
+ ConsumeToken(); // The C++ scope.
+
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec,
+ DiagID, TypeRep, Policy);
+ if (isInvalid)
+ break;
+
+ DS.SetRangeEnd(Tok.getLocation());
+ ConsumeToken(); // The typename.
+
+ continue;
+ }
+
+ case tok::annot_typename: {
+ // If we've previously seen a tag definition, we were almost surely
+ // missing a semicolon after it.
+ if (DS.hasTypeSpecifier() && DS.hasTagDefinition())
+ goto DoneWithDeclSpec;
+
+ if (Tok.getAnnotationValue()) {
+ ParsedType T = getTypeAnnotation(Tok);
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec,
+ DiagID, T, Policy);
+ } else
+ DS.SetTypeSpecError();
+
+ if (isInvalid)
+ break;
+
+ DS.SetRangeEnd(Tok.getAnnotationEndLoc());
+ ConsumeToken(); // The typename
+
+ continue;
+ }
+
+ case tok::kw___is_signed:
+ // GNU libstdc++ 4.4 uses __is_signed as an identifier, but Clang
+ // typically treats it as a trait. If we see __is_signed as it appears
+ // in libstdc++, e.g.,
+ //
+ // static const bool __is_signed;
+ //
+ // then treat __is_signed as an identifier rather than as a keyword.
+ if (DS.getTypeSpecType() == TST_bool &&
+ DS.getTypeQualifiers() == DeclSpec::TQ_const &&
+ DS.getStorageClassSpec() == DeclSpec::SCS_static)
+ TryKeywordIdentFallback(true);
+
+ // We're done with the declaration-specifiers.
+ goto DoneWithDeclSpec;
+
+ // typedef-name
+ case tok::kw___super:
+ case tok::kw_decltype:
+ case tok::identifier: {
+ // This identifier can only be a typedef name if we haven't already seen
+ // a type-specifier. Without this check we misparse:
+ // typedef int X; struct Y { short X; }; as 'short int'.
+ if (DS.hasTypeSpecifier())
+ goto DoneWithDeclSpec;
+
+ // In C++, check to see if this is a scope specifier like foo::bar::, if
+ // so handle it as such. This is important for ctor parsing.
+ if (getLangOpts().CPlusPlus) {
+ if (TryAnnotateCXXScopeToken(EnteringContext)) {
+ DS.SetTypeSpecError();
+ goto DoneWithDeclSpec;
+ }
+ if (!Tok.is(tok::identifier))
+ continue;
+ }
+
+ // Check for need to substitute AltiVec keyword tokens.
+ if (TryAltiVecToken(DS, Loc, PrevSpec, DiagID, isInvalid))
+ break;
+
+ // [AltiVec] 2.2: [If the 'vector' specifier is used] The syntax does not
+ // allow the use of a typedef name as a type specifier.
+ if (DS.isTypeAltiVecVector())
+ goto DoneWithDeclSpec;
+
+ if (DSContext == DSC_objc_method_result && isObjCInstancetype()) {
+ ParsedType TypeRep = Actions.ActOnObjCInstanceType(Loc);
+ assert(TypeRep);
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec,
+ DiagID, TypeRep, Policy);
+ if (isInvalid)
+ break;
+
+ DS.SetRangeEnd(Loc);
+ ConsumeToken();
+ continue;
+ }
+
+ ParsedType TypeRep =
+ Actions.getTypeName(*Tok.getIdentifierInfo(),
+ Tok.getLocation(), getCurScope());
+
+ // MSVC: If we weren't able to parse a default template argument, and it's
+ // just a simple identifier, create a DependentNameType. This will allow
+ // us to defer the name lookup to template instantiation time, as long we
+ // forge a NestedNameSpecifier for the current context.
+ if (!TypeRep && DSContext == DSC_template_type_arg &&
+ getLangOpts().MSVCCompat && getCurScope()->isTemplateParamScope()) {
+ TypeRep = Actions.ActOnDelayedDefaultTemplateArg(
+ *Tok.getIdentifierInfo(), Tok.getLocation());
+ }
+
+ // If this is not a typedef name, don't parse it as part of the declspec,
+ // it must be an implicit int or an error.
+ if (!TypeRep) {
+ ParsedAttributesWithRange Attrs(AttrFactory);
+ if (ParseImplicitInt(DS, nullptr, TemplateInfo, AS, DSContext, Attrs)) {
+ if (!Attrs.empty()) {
+ AttrsLastTime = true;
+ attrs.takeAllFrom(Attrs);
+ }
+ continue;
+ }
+ goto DoneWithDeclSpec;
+ }
+
+ // If we're in a context where the identifier could be a class name,
+ // check whether this is a constructor declaration.
+ if (getLangOpts().CPlusPlus && DSContext == DSC_class &&
+ Actions.isCurrentClassName(*Tok.getIdentifierInfo(), getCurScope()) &&
+ isConstructorDeclarator(/*Unqualified*/true))
+ goto DoneWithDeclSpec;
+
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec,
+ DiagID, TypeRep, Policy);
+ if (isInvalid)
+ break;
+
+ DS.SetRangeEnd(Tok.getLocation());
+ ConsumeToken(); // The identifier
+
+ // Objective-C supports type arguments and protocol references
+ // following an Objective-C object or object pointer
+ // type. Handle either one of them.
+ if (Tok.is(tok::less) && getLangOpts().ObjC1) {
+ SourceLocation NewEndLoc;
+ TypeResult NewTypeRep = parseObjCTypeArgsAndProtocolQualifiers(
+ Loc, TypeRep, /*consumeLastToken=*/true,
+ NewEndLoc);
+ if (NewTypeRep.isUsable()) {
+ DS.UpdateTypeRep(NewTypeRep.get());
+ DS.SetRangeEnd(NewEndLoc);
+ }
+ }
+
+ // Need to support trailing type qualifiers (e.g. "id<p> const").
+ // If a type specifier follows, it will be diagnosed elsewhere.
+ continue;
+ }
+
+ // type-name
+ case tok::annot_template_id: {
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+ if (TemplateId->Kind != TNK_Type_template) {
+ // This template-id does not refer to a type name, so we're
+ // done with the type-specifiers.
+ goto DoneWithDeclSpec;
+ }
+
+ // If we're in a context where the template-id could be a
+ // constructor name or specialization, check whether this is a
+ // constructor declaration.
+ if (getLangOpts().CPlusPlus && DSContext == DSC_class &&
+ Actions.isCurrentClassName(*TemplateId->Name, getCurScope()) &&
+ isConstructorDeclarator(TemplateId->SS.isEmpty()))
+ goto DoneWithDeclSpec;
+
+ // Turn the template-id annotation token into a type annotation
+ // token, then try again to parse it as a type-specifier.
+ AnnotateTemplateIdTokenAsType();
+ continue;
+ }
+
+ // GNU attributes support.
+ case tok::kw___attribute:
+ ParseGNUAttributes(DS.getAttributes(), nullptr, LateAttrs);
+ continue;
+
+ // Microsoft declspec support.
+ case tok::kw___declspec:
+ ParseMicrosoftDeclSpecs(DS.getAttributes());
+ continue;
+
+ // Microsoft single token adornments.
+ case tok::kw___forceinline: {
+ isInvalid = DS.setFunctionSpecForceInline(Loc, PrevSpec, DiagID);
+ IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ SourceLocation AttrNameLoc = Tok.getLocation();
+ DS.getAttributes().addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc,
+ nullptr, 0, AttributeList::AS_Keyword);
+ break;
+ }
+
+ case tok::kw___sptr:
+ case tok::kw___uptr:
+ case tok::kw___ptr64:
+ case tok::kw___ptr32:
+ case tok::kw___w64:
+ case tok::kw___cdecl:
+ case tok::kw___stdcall:
+ case tok::kw___fastcall:
+ case tok::kw___thiscall:
+ case tok::kw___vectorcall:
+ case tok::kw___unaligned:
+ ParseMicrosoftTypeAttributes(DS.getAttributes());
+ continue;
+
+ // Borland single token adornments.
+ case tok::kw___pascal:
+ ParseBorlandTypeAttributes(DS.getAttributes());
+ continue;
+
+ // OpenCL single token adornments.
+ case tok::kw___kernel:
+ ParseOpenCLAttributes(DS.getAttributes());
+ continue;
+
+ // Nullability type specifiers.
+ case tok::kw__Nonnull:
+ case tok::kw__Nullable:
+ case tok::kw__Null_unspecified:
+ ParseNullabilityTypeSpecifiers(DS.getAttributes());
+ continue;
+
+ // Objective-C 'kindof' types.
+ case tok::kw___kindof:
+ DS.getAttributes().addNew(Tok.getIdentifierInfo(), Loc, nullptr, Loc,
+ nullptr, 0, AttributeList::AS_Keyword);
+ (void)ConsumeToken();
+ continue;
+
+ // storage-class-specifier
+ case tok::kw_typedef:
+ isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_typedef, Loc,
+ PrevSpec, DiagID, Policy);
+ isStorageClass = true;
+ break;
+ case tok::kw_extern:
+ if (DS.getThreadStorageClassSpec() == DeclSpec::TSCS___thread)
+ Diag(Tok, diag::ext_thread_before) << "extern";
+ isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_extern, Loc,
+ PrevSpec, DiagID, Policy);
+ isStorageClass = true;
+ break;
+ case tok::kw___private_extern__:
+ isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_private_extern,
+ Loc, PrevSpec, DiagID, Policy);
+ isStorageClass = true;
+ break;
+ case tok::kw_static:
+ if (DS.getThreadStorageClassSpec() == DeclSpec::TSCS___thread)
+ Diag(Tok, diag::ext_thread_before) << "static";
+ isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_static, Loc,
+ PrevSpec, DiagID, Policy);
+ isStorageClass = true;
+ break;
+ case tok::kw_auto:
+ if (getLangOpts().CPlusPlus11) {
+ if (isKnownToBeTypeSpecifier(GetLookAheadToken(1))) {
+ isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_auto, Loc,
+ PrevSpec, DiagID, Policy);
+ if (!isInvalid)
+ Diag(Tok, diag::ext_auto_storage_class)
+ << FixItHint::CreateRemoval(DS.getStorageClassSpecLoc());
+ } else
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_auto, Loc, PrevSpec,
+ DiagID, Policy);
+ } else
+ isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_auto, Loc,
+ PrevSpec, DiagID, Policy);
+ isStorageClass = true;
+ break;
+ case tok::kw___auto_type:
+ Diag(Tok, diag::ext_auto_type);
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_auto_type, Loc, PrevSpec,
+ DiagID, Policy);
+ break;
+ case tok::kw_register:
+ isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_register, Loc,
+ PrevSpec, DiagID, Policy);
+ isStorageClass = true;
+ break;
+ case tok::kw_mutable:
+ isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_mutable, Loc,
+ PrevSpec, DiagID, Policy);
+ isStorageClass = true;
+ break;
+ case tok::kw___thread:
+ isInvalid = DS.SetStorageClassSpecThread(DeclSpec::TSCS___thread, Loc,
+ PrevSpec, DiagID);
+ isStorageClass = true;
+ break;
+ case tok::kw_thread_local:
+ isInvalid = DS.SetStorageClassSpecThread(DeclSpec::TSCS_thread_local, Loc,
+ PrevSpec, DiagID);
+ break;
+ case tok::kw__Thread_local:
+ isInvalid = DS.SetStorageClassSpecThread(DeclSpec::TSCS__Thread_local,
+ Loc, PrevSpec, DiagID);
+ isStorageClass = true;
+ break;
+
+ // function-specifier
+ case tok::kw_inline:
+ isInvalid = DS.setFunctionSpecInline(Loc, PrevSpec, DiagID);
+ break;
+ case tok::kw_virtual:
+ isInvalid = DS.setFunctionSpecVirtual(Loc, PrevSpec, DiagID);
+ break;
+ case tok::kw_explicit:
+ isInvalid = DS.setFunctionSpecExplicit(Loc, PrevSpec, DiagID);
+ break;
+ case tok::kw__Noreturn:
+ if (!getLangOpts().C11)
+ Diag(Loc, diag::ext_c11_noreturn);
+ isInvalid = DS.setFunctionSpecNoreturn(Loc, PrevSpec, DiagID);
+ break;
+
+ // alignment-specifier
+ case tok::kw__Alignas:
+ if (!getLangOpts().C11)
+ Diag(Tok, diag::ext_c11_alignment) << Tok.getName();
+ ParseAlignmentSpecifier(DS.getAttributes());
+ continue;
+
+ // friend
+ case tok::kw_friend:
+ if (DSContext == DSC_class)
+ isInvalid = DS.SetFriendSpec(Loc, PrevSpec, DiagID);
+ else {
+ PrevSpec = ""; // not actually used by the diagnostic
+ DiagID = diag::err_friend_invalid_in_context;
+ isInvalid = true;
+ }
+ break;
+
+ // Modules
+ case tok::kw___module_private__:
+ isInvalid = DS.setModulePrivateSpec(Loc, PrevSpec, DiagID);
+ break;
+
+ // constexpr
+ case tok::kw_constexpr:
+ isInvalid = DS.SetConstexprSpec(Loc, PrevSpec, DiagID);
+ break;
+
+ // concept
+ case tok::kw_concept:
+ isInvalid = DS.SetConceptSpec(Loc, PrevSpec, DiagID);
+ break;
+
+ // type-specifier
+ case tok::kw_short:
+ isInvalid = DS.SetTypeSpecWidth(DeclSpec::TSW_short, Loc, PrevSpec,
+ DiagID, Policy);
+ break;
+ case tok::kw_long:
+ if (DS.getTypeSpecWidth() != DeclSpec::TSW_long)
+ isInvalid = DS.SetTypeSpecWidth(DeclSpec::TSW_long, Loc, PrevSpec,
+ DiagID, Policy);
+ else
+ isInvalid = DS.SetTypeSpecWidth(DeclSpec::TSW_longlong, Loc, PrevSpec,
+ DiagID, Policy);
+ break;
+ case tok::kw___int64:
+ isInvalid = DS.SetTypeSpecWidth(DeclSpec::TSW_longlong, Loc, PrevSpec,
+ DiagID, Policy);
+ break;
+ case tok::kw_signed:
+ isInvalid = DS.SetTypeSpecSign(DeclSpec::TSS_signed, Loc, PrevSpec,
+ DiagID);
+ break;
+ case tok::kw_unsigned:
+ isInvalid = DS.SetTypeSpecSign(DeclSpec::TSS_unsigned, Loc, PrevSpec,
+ DiagID);
+ break;
+ case tok::kw__Complex:
+ isInvalid = DS.SetTypeSpecComplex(DeclSpec::TSC_complex, Loc, PrevSpec,
+ DiagID);
+ break;
+ case tok::kw__Imaginary:
+ isInvalid = DS.SetTypeSpecComplex(DeclSpec::TSC_imaginary, Loc, PrevSpec,
+ DiagID);
+ break;
+ case tok::kw_void:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_void, Loc, PrevSpec,
+ DiagID, Policy);
+ break;
+ case tok::kw_char:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_char, Loc, PrevSpec,
+ DiagID, Policy);
+ break;
+ case tok::kw_int:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_int, Loc, PrevSpec,
+ DiagID, Policy);
+ break;
+ case tok::kw___int128:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_int128, Loc, PrevSpec,
+ DiagID, Policy);
+ break;
+ case tok::kw_half:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_half, Loc, PrevSpec,
+ DiagID, Policy);
+ break;
+ case tok::kw_float:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_float, Loc, PrevSpec,
+ DiagID, Policy);
+ break;
+ case tok::kw_double:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_double, Loc, PrevSpec,
+ DiagID, Policy);
+ break;
+ case tok::kw_wchar_t:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_wchar, Loc, PrevSpec,
+ DiagID, Policy);
+ break;
+ case tok::kw_char16_t:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_char16, Loc, PrevSpec,
+ DiagID, Policy);
+ break;
+ case tok::kw_char32_t:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_char32, Loc, PrevSpec,
+ DiagID, Policy);
+ break;
+ case tok::kw_bool:
+ case tok::kw__Bool:
+ if (Tok.is(tok::kw_bool) &&
+ DS.getTypeSpecType() != DeclSpec::TST_unspecified &&
+ DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
+ PrevSpec = ""; // Not used by the diagnostic.
+ DiagID = diag::err_bool_redeclaration;
+ // For better error recovery.
+ Tok.setKind(tok::identifier);
+ isInvalid = true;
+ } else {
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_bool, Loc, PrevSpec,
+ DiagID, Policy);
+ }
+ break;
+ case tok::kw__Decimal32:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_decimal32, Loc, PrevSpec,
+ DiagID, Policy);
+ break;
+ case tok::kw__Decimal64:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_decimal64, Loc, PrevSpec,
+ DiagID, Policy);
+ break;
+ case tok::kw__Decimal128:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_decimal128, Loc, PrevSpec,
+ DiagID, Policy);
+ break;
+ case tok::kw___vector:
+ isInvalid = DS.SetTypeAltiVecVector(true, Loc, PrevSpec, DiagID, Policy);
+ break;
+ case tok::kw___pixel:
+ isInvalid = DS.SetTypeAltiVecPixel(true, Loc, PrevSpec, DiagID, Policy);
+ break;
+ case tok::kw___bool:
+ isInvalid = DS.SetTypeAltiVecBool(true, Loc, PrevSpec, DiagID, Policy);
+ break;
+ case tok::kw___unknown_anytype:
+ isInvalid = DS.SetTypeSpecType(TST_unknown_anytype, Loc,
+ PrevSpec, DiagID, Policy);
+ break;
+
+ // class-specifier:
+ case tok::kw_class:
+ case tok::kw_struct:
+ case tok::kw___interface:
+ case tok::kw_union: {
+ tok::TokenKind Kind = Tok.getKind();
+ ConsumeToken();
+
+ // These are attributes following class specifiers.
+ // To produce better diagnostic, we parse them when
+ // parsing class specifier.
+ ParsedAttributesWithRange Attributes(AttrFactory);
+ ParseClassSpecifier(Kind, Loc, DS, TemplateInfo, AS,
+ EnteringContext, DSContext, Attributes);
+
+ // If there are attributes following class specifier,
+ // take them over and handle them here.
+ if (!Attributes.empty()) {
+ AttrsLastTime = true;
+ attrs.takeAllFrom(Attributes);
+ }
+ continue;
+ }
+
+ // enum-specifier:
+ case tok::kw_enum:
+ ConsumeToken();
+ ParseEnumSpecifier(Loc, DS, TemplateInfo, AS, DSContext);
+ continue;
+
+ // cv-qualifier:
+ case tok::kw_const:
+ isInvalid = DS.SetTypeQual(DeclSpec::TQ_const, Loc, PrevSpec, DiagID,
+ getLangOpts());
+ break;
+ case tok::kw_volatile:
+ isInvalid = DS.SetTypeQual(DeclSpec::TQ_volatile, Loc, PrevSpec, DiagID,
+ getLangOpts());
+ break;
+ case tok::kw_restrict:
+ isInvalid = DS.SetTypeQual(DeclSpec::TQ_restrict, Loc, PrevSpec, DiagID,
+ getLangOpts());
+ break;
+
+ // C++ typename-specifier:
+ case tok::kw_typename:
+ if (TryAnnotateTypeOrScopeToken()) {
+ DS.SetTypeSpecError();
+ goto DoneWithDeclSpec;
+ }
+ if (!Tok.is(tok::kw_typename))
+ continue;
+ break;
+
+ // GNU typeof support.
+ case tok::kw_typeof:
+ ParseTypeofSpecifier(DS);
+ continue;
+
+ case tok::annot_decltype:
+ ParseDecltypeSpecifier(DS);
+ continue;
+
+ case tok::kw___underlying_type:
+ ParseUnderlyingTypeSpecifier(DS);
+ continue;
+
+ case tok::kw__Atomic:
+ // C11 6.7.2.4/4:
+ // If the _Atomic keyword is immediately followed by a left parenthesis,
+ // it is interpreted as a type specifier (with a type name), not as a
+ // type qualifier.
+ if (NextToken().is(tok::l_paren)) {
+ ParseAtomicSpecifier(DS);
+ continue;
+ }
+ isInvalid = DS.SetTypeQual(DeclSpec::TQ_atomic, Loc, PrevSpec, DiagID,
+ getLangOpts());
+ break;
+
+ // OpenCL qualifiers:
+ case tok::kw___generic:
+ // generic address space is introduced only in OpenCL v2.0
+ // see OpenCL C Spec v2.0 s6.5.5
+ if (Actions.getLangOpts().OpenCLVersion < 200) {
+ DiagID = diag::err_opencl_unknown_type_specifier;
+ PrevSpec = Tok.getIdentifierInfo()->getNameStart();
+ isInvalid = true;
+ break;
+ };
+ case tok::kw___private:
+ case tok::kw___global:
+ case tok::kw___local:
+ case tok::kw___constant:
+ case tok::kw___read_only:
+ case tok::kw___write_only:
+ case tok::kw___read_write:
+ ParseOpenCLQualifiers(DS.getAttributes());
+ break;
+
+ case tok::less:
+ // GCC ObjC supports types like "<SomeProtocol>" as a synonym for
+ // "id<SomeProtocol>". This is hopelessly old fashioned and dangerous,
+ // but we support it.
+ if (DS.hasTypeSpecifier() || !getLangOpts().ObjC1)
+ goto DoneWithDeclSpec;
+
+ SourceLocation StartLoc = Tok.getLocation();
+ SourceLocation EndLoc;
+ TypeResult Type = parseObjCProtocolQualifierType(EndLoc);
+ if (Type.isUsable()) {
+ if (DS.SetTypeSpecType(DeclSpec::TST_typename, StartLoc, StartLoc,
+ PrevSpec, DiagID, Type.get(),
+ Actions.getASTContext().getPrintingPolicy()))
+ Diag(StartLoc, DiagID) << PrevSpec;
+
+ DS.SetRangeEnd(EndLoc);
+ } else {
+ DS.SetTypeSpecError();
+ }
+
+ // Need to support trailing type qualifiers (e.g. "id<p> const").
+ // If a type specifier follows, it will be diagnosed elsewhere.
+ continue;
+ }
+ // If the specifier wasn't legal, issue a diagnostic.
+ if (isInvalid) {
+ assert(PrevSpec && "Method did not return previous specifier!");
+ assert(DiagID);
+
+ if (DiagID == diag::ext_duplicate_declspec)
+ Diag(Tok, DiagID)
+ << PrevSpec << FixItHint::CreateRemoval(Tok.getLocation());
+ else if (DiagID == diag::err_opencl_unknown_type_specifier)
+ Diag(Tok, DiagID) << PrevSpec << isStorageClass;
+ else
+ Diag(Tok, DiagID) << PrevSpec;
+ }
+
+ DS.SetRangeEnd(Tok.getLocation());
+ if (DiagID != diag::err_bool_redeclaration)
+ ConsumeToken();
+
+ AttrsLastTime = false;
+ }
+}
+
+/// ParseStructDeclaration - Parse a struct declaration without the terminating
+/// semicolon.
+///
+/// struct-declaration:
+/// specifier-qualifier-list struct-declarator-list
+/// [GNU] __extension__ struct-declaration
+/// [GNU] specifier-qualifier-list
+/// struct-declarator-list:
+/// struct-declarator
+/// struct-declarator-list ',' struct-declarator
+/// [GNU] struct-declarator-list ',' attributes[opt] struct-declarator
+/// struct-declarator:
+/// declarator
+/// [GNU] declarator attributes[opt]
+/// declarator[opt] ':' constant-expression
+/// [GNU] declarator[opt] ':' constant-expression attributes[opt]
+///
+void Parser::ParseStructDeclaration(
+ ParsingDeclSpec &DS,
+ llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback) {
+
+ if (Tok.is(tok::kw___extension__)) {
+ // __extension__ silences extension warnings in the subexpression.
+ ExtensionRAIIObject O(Diags); // Use RAII to do this.
+ ConsumeToken();
+ return ParseStructDeclaration(DS, FieldsCallback);
+ }
+
+ // Parse the common specifier-qualifiers-list piece.
+ ParseSpecifierQualifierList(DS);
+
+ // If there are no declarators, this is a free-standing declaration
+ // specifier. Let the actions module cope with it.
+ if (Tok.is(tok::semi)) {
+ Decl *TheDecl = Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS_none,
+ DS);
+ DS.complete(TheDecl);
+ return;
+ }
+
+ // Read struct-declarators until we find the semicolon.
+ bool FirstDeclarator = true;
+ SourceLocation CommaLoc;
+ while (1) {
+ ParsingFieldDeclarator DeclaratorInfo(*this, DS);
+ DeclaratorInfo.D.setCommaLoc(CommaLoc);
+
+ // Attributes are only allowed here on successive declarators.
+ if (!FirstDeclarator)
+ MaybeParseGNUAttributes(DeclaratorInfo.D);
+
+ /// struct-declarator: declarator
+ /// struct-declarator: declarator[opt] ':' constant-expression
+ if (Tok.isNot(tok::colon)) {
+ // Don't parse FOO:BAR as if it were a typo for FOO::BAR.
+ ColonProtectionRAIIObject X(*this);
+ ParseDeclarator(DeclaratorInfo.D);
+ } else
+ DeclaratorInfo.D.SetIdentifier(nullptr, Tok.getLocation());
+
+ if (TryConsumeToken(tok::colon)) {
+ ExprResult Res(ParseConstantExpression());
+ if (Res.isInvalid())
+ SkipUntil(tok::semi, StopBeforeMatch);
+ else
+ DeclaratorInfo.BitfieldSize = Res.get();
+ }
+
+ // If attributes exist after the declarator, parse them.
+ MaybeParseGNUAttributes(DeclaratorInfo.D);
+
+ // We're done with this declarator; invoke the callback.
+ FieldsCallback(DeclaratorInfo);
+
+ // If we don't have a comma, it is either the end of the list (a ';')
+ // or an error, bail out.
+ if (!TryConsumeToken(tok::comma, CommaLoc))
+ return;
+
+ FirstDeclarator = false;
+ }
+}
+
+/// ParseStructUnionBody
+/// struct-contents:
+/// struct-declaration-list
+/// [EXT] empty
+/// [GNU] "struct-declaration-list" without terminatoring ';'
+/// struct-declaration-list:
+/// struct-declaration
+/// struct-declaration-list struct-declaration
+/// [OBC] '@' 'defs' '(' class-name ')'
+///
+void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
+ unsigned TagType, Decl *TagDecl) {
+ PrettyDeclStackTraceEntry CrashInfo(Actions, TagDecl, RecordLoc,
+ "parsing struct/union body");
+ assert(!getLangOpts().CPlusPlus && "C++ declarations not supported");
+
+ BalancedDelimiterTracker T(*this, tok::l_brace);
+ if (T.consumeOpen())
+ return;
+
+ ParseScope StructScope(this, Scope::ClassScope|Scope::DeclScope);
+ Actions.ActOnTagStartDefinition(getCurScope(), TagDecl);
+
+ SmallVector<Decl *, 32> FieldDecls;
+
+ // While we still have something to read, read the declarations in the struct.
+ while (!tryParseMisplacedModuleImport() && Tok.isNot(tok::r_brace) &&
+ Tok.isNot(tok::eof)) {
+ // Each iteration of this loop reads one struct-declaration.
+
+ // Check for extraneous top-level semicolon.
+ if (Tok.is(tok::semi)) {
+ ConsumeExtraSemi(InsideStruct, TagType);
+ continue;
+ }
+
+ // Parse _Static_assert declaration.
+ if (Tok.is(tok::kw__Static_assert)) {
+ SourceLocation DeclEnd;
+ ParseStaticAssertDeclaration(DeclEnd);
+ continue;
+ }
+
+ if (Tok.is(tok::annot_pragma_pack)) {
+ HandlePragmaPack();
+ continue;
+ }
+
+ if (Tok.is(tok::annot_pragma_align)) {
+ HandlePragmaAlign();
+ continue;
+ }
+
+ if (Tok.is(tok::annot_pragma_openmp)) {
+ // Result can be ignored, because it must be always empty.
+ auto Res = ParseOpenMPDeclarativeDirective();
+ assert(!Res);
+ // Silence possible warnings.
+ (void)Res;
+ continue;
+ }
+ if (!Tok.is(tok::at)) {
+ auto CFieldCallback = [&](ParsingFieldDeclarator &FD) {
+ // Install the declarator into the current TagDecl.
+ Decl *Field =
+ Actions.ActOnField(getCurScope(), TagDecl,
+ FD.D.getDeclSpec().getSourceRange().getBegin(),
+ FD.D, FD.BitfieldSize);
+ FieldDecls.push_back(Field);
+ FD.complete(Field);
+ };
+
+ // Parse all the comma separated declarators.
+ ParsingDeclSpec DS(*this);
+ ParseStructDeclaration(DS, CFieldCallback);
+ } else { // Handle @defs
+ ConsumeToken();
+ if (!Tok.isObjCAtKeyword(tok::objc_defs)) {
+ Diag(Tok, diag::err_unexpected_at);
+ SkipUntil(tok::semi);
+ continue;
+ }
+ ConsumeToken();
+ ExpectAndConsume(tok::l_paren);
+ if (!Tok.is(tok::identifier)) {
+ Diag(Tok, diag::err_expected) << tok::identifier;
+ SkipUntil(tok::semi);
+ continue;
+ }
+ SmallVector<Decl *, 16> Fields;
+ Actions.ActOnDefs(getCurScope(), TagDecl, Tok.getLocation(),
+ Tok.getIdentifierInfo(), Fields);
+ FieldDecls.insert(FieldDecls.end(), Fields.begin(), Fields.end());
+ ConsumeToken();
+ ExpectAndConsume(tok::r_paren);
+ }
+
+ if (TryConsumeToken(tok::semi))
+ continue;
+
+ if (Tok.is(tok::r_brace)) {
+ ExpectAndConsume(tok::semi, diag::ext_expected_semi_decl_list);
+ break;
+ }
+
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_decl_list);
+ // Skip to end of block or statement to avoid ext-warning on extra ';'.
+ SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
+ // If we stopped at a ';', eat it.
+ TryConsumeToken(tok::semi);
+ }
+
+ T.consumeClose();
+
+ ParsedAttributes attrs(AttrFactory);
+ // If attributes exist after struct contents, parse them.
+ MaybeParseGNUAttributes(attrs);
+
+ Actions.ActOnFields(getCurScope(),
+ RecordLoc, TagDecl, FieldDecls,
+ T.getOpenLocation(), T.getCloseLocation(),
+ attrs.getList());
+ StructScope.Exit();
+ Actions.ActOnTagFinishDefinition(getCurScope(), TagDecl,
+ T.getCloseLocation());
+}
+
+/// ParseEnumSpecifier
+/// enum-specifier: [C99 6.7.2.2]
+/// 'enum' identifier[opt] '{' enumerator-list '}'
+///[C99/C++]'enum' identifier[opt] '{' enumerator-list ',' '}'
+/// [GNU] 'enum' attributes[opt] identifier[opt] '{' enumerator-list ',' [opt]
+/// '}' attributes[opt]
+/// [MS] 'enum' __declspec[opt] identifier[opt] '{' enumerator-list ',' [opt]
+/// '}'
+/// 'enum' identifier
+/// [GNU] 'enum' attributes[opt] identifier
+///
+/// [C++11] enum-head '{' enumerator-list[opt] '}'
+/// [C++11] enum-head '{' enumerator-list ',' '}'
+///
+/// enum-head: [C++11]
+/// enum-key attribute-specifier-seq[opt] identifier[opt] enum-base[opt]
+/// enum-key attribute-specifier-seq[opt] nested-name-specifier
+/// identifier enum-base[opt]
+///
+/// enum-key: [C++11]
+/// 'enum'
+/// 'enum' 'class'
+/// 'enum' 'struct'
+///
+/// enum-base: [C++11]
+/// ':' type-specifier-seq
+///
+/// [C++] elaborated-type-specifier:
+/// [C++] 'enum' '::'[opt] nested-name-specifier[opt] identifier
+///
+void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
+ const ParsedTemplateInfo &TemplateInfo,
+ AccessSpecifier AS, DeclSpecContext DSC) {
+ // Parse the tag portion of this.
+ if (Tok.is(tok::code_completion)) {
+ // Code completion for an enum name.
+ Actions.CodeCompleteTag(getCurScope(), DeclSpec::TST_enum);
+ return cutOffParsing();
+ }
+
+ // If attributes exist after tag, parse them.
+ ParsedAttributesWithRange attrs(AttrFactory);
+ MaybeParseGNUAttributes(attrs);
+ MaybeParseCXX11Attributes(attrs);
+ MaybeParseMicrosoftDeclSpecs(attrs);
+
+ SourceLocation ScopedEnumKWLoc;
+ bool IsScopedUsingClassTag = false;
+
+ // In C++11, recognize 'enum class' and 'enum struct'.
+ if (Tok.isOneOf(tok::kw_class, tok::kw_struct)) {
+ Diag(Tok, getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_scoped_enum
+ : diag::ext_scoped_enum);
+ IsScopedUsingClassTag = Tok.is(tok::kw_class);
+ ScopedEnumKWLoc = ConsumeToken();
+
+ // Attributes are not allowed between these keywords. Diagnose,
+ // but then just treat them like they appeared in the right place.
+ ProhibitAttributes(attrs);
+
+ // They are allowed afterwards, though.
+ MaybeParseGNUAttributes(attrs);
+ MaybeParseCXX11Attributes(attrs);
+ MaybeParseMicrosoftDeclSpecs(attrs);
+ }
+
+ // C++11 [temp.explicit]p12:
+ // The usual access controls do not apply to names used to specify
+ // explicit instantiations.
+ // We extend this to also cover explicit specializations. Note that
+ // we don't suppress if this turns out to be an elaborated type
+ // specifier.
+ bool shouldDelayDiagsInTag =
+ (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation ||
+ TemplateInfo.Kind == ParsedTemplateInfo::ExplicitSpecialization);
+ SuppressAccessChecks diagsFromTag(*this, shouldDelayDiagsInTag);
+
+ // Enum definitions should not be parsed in a trailing-return-type.
+ bool AllowDeclaration = DSC != DSC_trailing;
+
+ bool AllowFixedUnderlyingType = AllowDeclaration &&
+ (getLangOpts().CPlusPlus11 || getLangOpts().MicrosoftExt ||
+ getLangOpts().ObjC2);
+
+ CXXScopeSpec &SS = DS.getTypeSpecScope();
+ if (getLangOpts().CPlusPlus) {
+ // "enum foo : bar;" is not a potential typo for "enum foo::bar;"
+ // if a fixed underlying type is allowed.
+ ColonProtectionRAIIObject X(*this, AllowFixedUnderlyingType);
+
+ CXXScopeSpec Spec;
+ if (ParseOptionalCXXScopeSpecifier(Spec, ParsedType(),
+ /*EnteringContext=*/true))
+ return;
+
+ if (Spec.isSet() && Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected) << tok::identifier;
+ if (Tok.isNot(tok::l_brace)) {
+ // Has no name and is not a definition.
+ // Skip the rest of this declarator, up until the comma or semicolon.
+ SkipUntil(tok::comma, StopAtSemi);
+ return;
+ }
+ }
+
+ SS = Spec;
+ }
+
+ // Must have either 'enum name' or 'enum {...}'.
+ if (Tok.isNot(tok::identifier) && Tok.isNot(tok::l_brace) &&
+ !(AllowFixedUnderlyingType && Tok.is(tok::colon))) {
+ Diag(Tok, diag::err_expected_either) << tok::identifier << tok::l_brace;
+
+ // Skip the rest of this declarator, up until the comma or semicolon.
+ SkipUntil(tok::comma, StopAtSemi);
+ return;
+ }
+
+ // If an identifier is present, consume and remember it.
+ IdentifierInfo *Name = nullptr;
+ SourceLocation NameLoc;
+ if (Tok.is(tok::identifier)) {
+ Name = Tok.getIdentifierInfo();
+ NameLoc = ConsumeToken();
+ }
+
+ if (!Name && ScopedEnumKWLoc.isValid()) {
+ // C++0x 7.2p2: The optional identifier shall not be omitted in the
+ // declaration of a scoped enumeration.
+ Diag(Tok, diag::err_scoped_enum_missing_identifier);
+ ScopedEnumKWLoc = SourceLocation();
+ IsScopedUsingClassTag = false;
+ }
+
+ // Okay, end the suppression area. We'll decide whether to emit the
+ // diagnostics in a second.
+ if (shouldDelayDiagsInTag)
+ diagsFromTag.done();
+
+ TypeResult BaseType;
+
+ // Parse the fixed underlying type.
+ bool CanBeBitfield = getCurScope()->getFlags() & Scope::ClassScope;
+ if (AllowFixedUnderlyingType && Tok.is(tok::colon)) {
+ bool PossibleBitfield = false;
+ if (CanBeBitfield) {
+ // If we're in class scope, this can either be an enum declaration with
+ // an underlying type, or a declaration of a bitfield member. We try to
+ // use a simple disambiguation scheme first to catch the common cases
+ // (integer literal, sizeof); if it's still ambiguous, we then consider
+ // anything that's a simple-type-specifier followed by '(' as an
+ // expression. This suffices because function types are not valid
+ // underlying types anyway.
+ EnterExpressionEvaluationContext Unevaluated(Actions,
+ Sema::ConstantEvaluated);
+ TPResult TPR = isExpressionOrTypeSpecifierSimple(NextToken().getKind());
+ // If the next token starts an expression, we know we're parsing a
+ // bit-field. This is the common case.
+ if (TPR == TPResult::True)
+ PossibleBitfield = true;
+ // If the next token starts a type-specifier-seq, it may be either a
+ // a fixed underlying type or the start of a function-style cast in C++;
+ // lookahead one more token to see if it's obvious that we have a
+ // fixed underlying type.
+ else if (TPR == TPResult::False &&
+ GetLookAheadToken(2).getKind() == tok::semi) {
+ // Consume the ':'.
+ ConsumeToken();
+ } else {
+ // We have the start of a type-specifier-seq, so we have to perform
+ // tentative parsing to determine whether we have an expression or a
+ // type.
+ TentativeParsingAction TPA(*this);
+
+ // Consume the ':'.
+ ConsumeToken();
+
+ // If we see a type specifier followed by an open-brace, we have an
+ // ambiguity between an underlying type and a C++11 braced
+ // function-style cast. Resolve this by always treating it as an
+ // underlying type.
+ // FIXME: The standard is not entirely clear on how to disambiguate in
+ // this case.
+ if ((getLangOpts().CPlusPlus &&
+ isCXXDeclarationSpecifier(TPResult::True) != TPResult::True) ||
+ (!getLangOpts().CPlusPlus && !isDeclarationSpecifier(true))) {
+ // We'll parse this as a bitfield later.
+ PossibleBitfield = true;
+ TPA.Revert();
+ } else {
+ // We have a type-specifier-seq.
+ TPA.Commit();
+ }
+ }
+ } else {
+ // Consume the ':'.
+ ConsumeToken();
+ }
+
+ if (!PossibleBitfield) {
+ SourceRange Range;
+ BaseType = ParseTypeName(&Range);
+
+ if (getLangOpts().CPlusPlus11) {
+ Diag(StartLoc, diag::warn_cxx98_compat_enum_fixed_underlying_type);
+ } else if (!getLangOpts().ObjC2) {
+ if (getLangOpts().CPlusPlus)
+ Diag(StartLoc, diag::ext_cxx11_enum_fixed_underlying_type) << Range;
+ else
+ Diag(StartLoc, diag::ext_c_enum_fixed_underlying_type) << Range;
+ }
+ }
+ }
+
+ // There are four options here. If we have 'friend enum foo;' then this is a
+ // friend declaration, and cannot have an accompanying definition. If we have
+ // 'enum foo;', then this is a forward declaration. If we have
+ // 'enum foo {...' then this is a definition. Otherwise we have something
+ // like 'enum foo xyz', a reference.
+ //
+ // This is needed to handle stuff like this right (C99 6.7.2.3p11):
+ // enum foo {..}; void bar() { enum foo; } <- new foo in bar.
+ // enum foo {..}; void bar() { enum foo x; } <- use of old foo.
+ //
+ Sema::TagUseKind TUK;
+ if (!AllowDeclaration) {
+ TUK = Sema::TUK_Reference;
+ } else if (Tok.is(tok::l_brace)) {
+ if (DS.isFriendSpecified()) {
+ Diag(Tok.getLocation(), diag::err_friend_decl_defines_type)
+ << SourceRange(DS.getFriendSpecLoc());
+ ConsumeBrace();
+ SkipUntil(tok::r_brace, StopAtSemi);
+ TUK = Sema::TUK_Friend;
+ } else {
+ TUK = Sema::TUK_Definition;
+ }
+ } else if (!isTypeSpecifier(DSC) &&
+ (Tok.is(tok::semi) ||
+ (Tok.isAtStartOfLine() &&
+ !isValidAfterTypeSpecifier(CanBeBitfield)))) {
+ TUK = DS.isFriendSpecified() ? Sema::TUK_Friend : Sema::TUK_Declaration;
+ if (Tok.isNot(tok::semi)) {
+ // A semicolon was missing after this declaration. Diagnose and recover.
+ ExpectAndConsume(tok::semi, diag::err_expected_after, "enum");
+ PP.EnterToken(Tok);
+ Tok.setKind(tok::semi);
+ }
+ } else {
+ TUK = Sema::TUK_Reference;
+ }
+
+ // If this is an elaborated type specifier, and we delayed
+ // diagnostics before, just merge them into the current pool.
+ if (TUK == Sema::TUK_Reference && shouldDelayDiagsInTag) {
+ diagsFromTag.redelay();
+ }
+
+ MultiTemplateParamsArg TParams;
+ if (TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate &&
+ TUK != Sema::TUK_Reference) {
+ if (!getLangOpts().CPlusPlus11 || !SS.isSet()) {
+ // Skip the rest of this declarator, up until the comma or semicolon.
+ Diag(Tok, diag::err_enum_template);
+ SkipUntil(tok::comma, StopAtSemi);
+ return;
+ }
+
+ if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation) {
+ // Enumerations can't be explicitly instantiated.
+ DS.SetTypeSpecError();
+ Diag(StartLoc, diag::err_explicit_instantiation_enum);
+ return;
+ }
+
+ assert(TemplateInfo.TemplateParams && "no template parameters");
+ TParams = MultiTemplateParamsArg(TemplateInfo.TemplateParams->data(),
+ TemplateInfo.TemplateParams->size());
+ }
+
+ if (TUK == Sema::TUK_Reference)
+ ProhibitAttributes(attrs);
+
+ if (!Name && TUK != Sema::TUK_Definition) {
+ Diag(Tok, diag::err_enumerator_unnamed_no_def);
+
+ // Skip the rest of this declarator, up until the comma or semicolon.
+ SkipUntil(tok::comma, StopAtSemi);
+ return;
+ }
+
+ handleDeclspecAlignBeforeClassKey(attrs, DS, TUK);
+
+ Sema::SkipBodyInfo SkipBody;
+ if (!Name && TUK == Sema::TUK_Definition && Tok.is(tok::l_brace) &&
+ NextToken().is(tok::identifier))
+ SkipBody = Actions.shouldSkipAnonEnumBody(getCurScope(),
+ NextToken().getIdentifierInfo(),
+ NextToken().getLocation());
+
+ bool Owned = false;
+ bool IsDependent = false;
+ const char *PrevSpec = nullptr;
+ unsigned DiagID;
+ Decl *TagDecl = Actions.ActOnTag(getCurScope(), DeclSpec::TST_enum, TUK,
+ StartLoc, SS, Name, NameLoc, attrs.getList(),
+ AS, DS.getModulePrivateSpecLoc(), TParams,
+ Owned, IsDependent, ScopedEnumKWLoc,
+ IsScopedUsingClassTag, BaseType,
+ DSC == DSC_type_specifier, &SkipBody);
+
+ if (SkipBody.ShouldSkip) {
+ assert(TUK == Sema::TUK_Definition && "can only skip a definition");
+
+ BalancedDelimiterTracker T(*this, tok::l_brace);
+ T.consumeOpen();
+ T.skipToEnd();
+
+ if (DS.SetTypeSpecType(DeclSpec::TST_enum, StartLoc,
+ NameLoc.isValid() ? NameLoc : StartLoc,
+ PrevSpec, DiagID, TagDecl, Owned,
+ Actions.getASTContext().getPrintingPolicy()))
+ Diag(StartLoc, DiagID) << PrevSpec;
+ return;
+ }
+
+ if (IsDependent) {
+ // This enum has a dependent nested-name-specifier. Handle it as a
+ // dependent tag.
+ if (!Name) {
+ DS.SetTypeSpecError();
+ Diag(Tok, diag::err_expected_type_name_after_typename);
+ return;
+ }
+
+ TypeResult Type = Actions.ActOnDependentTag(
+ getCurScope(), DeclSpec::TST_enum, TUK, SS, Name, StartLoc, NameLoc);
+ if (Type.isInvalid()) {
+ DS.SetTypeSpecError();
+ return;
+ }
+
+ if (DS.SetTypeSpecType(DeclSpec::TST_typename, StartLoc,
+ NameLoc.isValid() ? NameLoc : StartLoc,
+ PrevSpec, DiagID, Type.get(),
+ Actions.getASTContext().getPrintingPolicy()))
+ Diag(StartLoc, DiagID) << PrevSpec;
+
+ return;
+ }
+
+ if (!TagDecl) {
+ // The action failed to produce an enumeration tag. If this is a
+ // definition, consume the entire definition.
+ if (Tok.is(tok::l_brace) && TUK != Sema::TUK_Reference) {
+ ConsumeBrace();
+ SkipUntil(tok::r_brace, StopAtSemi);
+ }
+
+ DS.SetTypeSpecError();
+ return;
+ }
+
+ if (Tok.is(tok::l_brace) && TUK != Sema::TUK_Reference)
+ ParseEnumBody(StartLoc, TagDecl);
+
+ if (DS.SetTypeSpecType(DeclSpec::TST_enum, StartLoc,
+ NameLoc.isValid() ? NameLoc : StartLoc,
+ PrevSpec, DiagID, TagDecl, Owned,
+ Actions.getASTContext().getPrintingPolicy()))
+ Diag(StartLoc, DiagID) << PrevSpec;
+}
+
+/// ParseEnumBody - Parse a {} enclosed enumerator-list.
+/// enumerator-list:
+/// enumerator
+/// enumerator-list ',' enumerator
+/// enumerator:
+/// enumeration-constant attributes[opt]
+/// enumeration-constant attributes[opt] '=' constant-expression
+/// enumeration-constant:
+/// identifier
+///
+void Parser::ParseEnumBody(SourceLocation StartLoc, Decl *EnumDecl) {
+ // Enter the scope of the enum body and start the definition.
+ ParseScope EnumScope(this, Scope::DeclScope | Scope::EnumScope);
+ Actions.ActOnTagStartDefinition(getCurScope(), EnumDecl);
+
+ BalancedDelimiterTracker T(*this, tok::l_brace);
+ T.consumeOpen();
+
+ // C does not allow an empty enumerator-list, C++ does [dcl.enum].
+ if (Tok.is(tok::r_brace) && !getLangOpts().CPlusPlus)
+ Diag(Tok, diag::error_empty_enum);
+
+ SmallVector<Decl *, 32> EnumConstantDecls;
+ SmallVector<SuppressAccessChecks, 32> EnumAvailabilityDiags;
+
+ Decl *LastEnumConstDecl = nullptr;
+
+ // Parse the enumerator-list.
+ while (Tok.isNot(tok::r_brace)) {
+ // Parse enumerator. If failed, try skipping till the start of the next
+ // enumerator definition.
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok.getLocation(), diag::err_expected) << tok::identifier;
+ if (SkipUntil(tok::comma, tok::r_brace, StopBeforeMatch) &&
+ TryConsumeToken(tok::comma))
+ continue;
+ break;
+ }
+ IdentifierInfo *Ident = Tok.getIdentifierInfo();
+ SourceLocation IdentLoc = ConsumeToken();
+
+ // If attributes exist after the enumerator, parse them.
+ ParsedAttributesWithRange attrs(AttrFactory);
+ MaybeParseGNUAttributes(attrs);
+ ProhibitAttributes(attrs); // GNU-style attributes are prohibited.
+ if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier()) {
+ if (!getLangOpts().CPlusPlus1z)
+ Diag(Tok.getLocation(), diag::warn_cxx14_compat_attribute)
+ << 1 /*enumerator*/;
+ ParseCXX11Attributes(attrs);
+ }
+
+ SourceLocation EqualLoc;
+ ExprResult AssignedVal;
+ EnumAvailabilityDiags.emplace_back(*this);
+
+ if (TryConsumeToken(tok::equal, EqualLoc)) {
+ AssignedVal = ParseConstantExpression();
+ if (AssignedVal.isInvalid())
+ SkipUntil(tok::comma, tok::r_brace, StopBeforeMatch);
+ }
+
+ // Install the enumerator constant into EnumDecl.
+ Decl *EnumConstDecl = Actions.ActOnEnumConstant(getCurScope(), EnumDecl,
+ LastEnumConstDecl,
+ IdentLoc, Ident,
+ attrs.getList(), EqualLoc,
+ AssignedVal.get());
+ EnumAvailabilityDiags.back().done();
+
+ EnumConstantDecls.push_back(EnumConstDecl);
+ LastEnumConstDecl = EnumConstDecl;
+
+ if (Tok.is(tok::identifier)) {
+ // We're missing a comma between enumerators.
+ SourceLocation Loc = PP.getLocForEndOfToken(PrevTokLocation);
+ Diag(Loc, diag::err_enumerator_list_missing_comma)
+ << FixItHint::CreateInsertion(Loc, ", ");
+ continue;
+ }
+
+ // Emumerator definition must be finished, only comma or r_brace are
+ // allowed here.
+ SourceLocation CommaLoc;
+ if (Tok.isNot(tok::r_brace) && !TryConsumeToken(tok::comma, CommaLoc)) {
+ if (EqualLoc.isValid())
+ Diag(Tok.getLocation(), diag::err_expected_either) << tok::r_brace
+ << tok::comma;
+ else
+ Diag(Tok.getLocation(), diag::err_expected_end_of_enumerator);
+ if (SkipUntil(tok::comma, tok::r_brace, StopBeforeMatch)) {
+ if (TryConsumeToken(tok::comma, CommaLoc))
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ // If comma is followed by r_brace, emit appropriate warning.
+ if (Tok.is(tok::r_brace) && CommaLoc.isValid()) {
+ if (!getLangOpts().C99 && !getLangOpts().CPlusPlus11)
+ Diag(CommaLoc, getLangOpts().CPlusPlus ?
+ diag::ext_enumerator_list_comma_cxx :
+ diag::ext_enumerator_list_comma_c)
+ << FixItHint::CreateRemoval(CommaLoc);
+ else if (getLangOpts().CPlusPlus11)
+ Diag(CommaLoc, diag::warn_cxx98_compat_enumerator_list_comma)
+ << FixItHint::CreateRemoval(CommaLoc);
+ break;
+ }
+ }
+
+ // Eat the }.
+ T.consumeClose();
+
+ // If attributes exist after the identifier list, parse them.
+ ParsedAttributes attrs(AttrFactory);
+ MaybeParseGNUAttributes(attrs);
+
+ Actions.ActOnEnumBody(StartLoc, T.getOpenLocation(), T.getCloseLocation(),
+ EnumDecl, EnumConstantDecls,
+ getCurScope(),
+ attrs.getList());
+
+ // Now handle enum constant availability diagnostics.
+ assert(EnumConstantDecls.size() == EnumAvailabilityDiags.size());
+ for (size_t i = 0, e = EnumConstantDecls.size(); i != e; ++i) {
+ ParsingDeclRAIIObject PD(*this, ParsingDeclRAIIObject::NoParent);
+ EnumAvailabilityDiags[i].redelay();
+ PD.complete(EnumConstantDecls[i]);
+ }
+
+ EnumScope.Exit();
+ Actions.ActOnTagFinishDefinition(getCurScope(), EnumDecl,
+ T.getCloseLocation());
+
+ // The next token must be valid after an enum definition. If not, a ';'
+ // was probably forgotten.
+ bool CanBeBitfield = getCurScope()->getFlags() & Scope::ClassScope;
+ if (!isValidAfterTypeSpecifier(CanBeBitfield)) {
+ ExpectAndConsume(tok::semi, diag::err_expected_after, "enum");
+ // Push this token back into the preprocessor and change our current token
+ // to ';' so that the rest of the code recovers as though there were an
+ // ';' after the definition.
+ PP.EnterToken(Tok);
+ Tok.setKind(tok::semi);
+ }
+}
+
+/// isTypeSpecifierQualifier - Return true if the current token could be the
+/// start of a type-qualifier-list.
+bool Parser::isTypeQualifier() const {
+ switch (Tok.getKind()) {
+ default: return false;
+ // type-qualifier
+ case tok::kw_const:
+ case tok::kw_volatile:
+ case tok::kw_restrict:
+ case tok::kw___private:
+ case tok::kw___local:
+ case tok::kw___global:
+ case tok::kw___constant:
+ case tok::kw___generic:
+ case tok::kw___read_only:
+ case tok::kw___read_write:
+ case tok::kw___write_only:
+ return true;
+ }
+}
+
+/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
+/// is definitely a type-specifier. Return false if it isn't part of a type
+/// specifier or if we're not sure.
+bool Parser::isKnownToBeTypeSpecifier(const Token &Tok) const {
+ switch (Tok.getKind()) {
+ default: return false;
+ // type-specifiers
+ case tok::kw_short:
+ case tok::kw_long:
+ case tok::kw___int64:
+ case tok::kw___int128:
+ case tok::kw_signed:
+ case tok::kw_unsigned:
+ case tok::kw__Complex:
+ case tok::kw__Imaginary:
+ case tok::kw_void:
+ case tok::kw_char:
+ case tok::kw_wchar_t:
+ case tok::kw_char16_t:
+ case tok::kw_char32_t:
+ case tok::kw_int:
+ case tok::kw_half:
+ case tok::kw_float:
+ case tok::kw_double:
+ case tok::kw_bool:
+ case tok::kw__Bool:
+ case tok::kw__Decimal32:
+ case tok::kw__Decimal64:
+ case tok::kw__Decimal128:
+ case tok::kw___vector:
+
+ // struct-or-union-specifier (C99) or class-specifier (C++)
+ case tok::kw_class:
+ case tok::kw_struct:
+ case tok::kw___interface:
+ case tok::kw_union:
+ // enum-specifier
+ case tok::kw_enum:
+
+ // typedef-name
+ case tok::annot_typename:
+ return true;
+ }
+}
+
+/// isTypeSpecifierQualifier - Return true if the current token could be the
+/// start of a specifier-qualifier-list.
+bool Parser::isTypeSpecifierQualifier() {
+ switch (Tok.getKind()) {
+ default: return false;
+
+ case tok::identifier: // foo::bar
+ if (TryAltiVecVectorToken())
+ return true;
+ // Fall through.
+ case tok::kw_typename: // typename T::type
+ // Annotate typenames and C++ scope specifiers. If we get one, just
+ // recurse to handle whatever we get.
+ if (TryAnnotateTypeOrScopeToken())
+ return true;
+ if (Tok.is(tok::identifier))
+ return false;
+ return isTypeSpecifierQualifier();
+
+ case tok::coloncolon: // ::foo::bar
+ if (NextToken().is(tok::kw_new) || // ::new
+ NextToken().is(tok::kw_delete)) // ::delete
+ return false;
+
+ if (TryAnnotateTypeOrScopeToken())
+ return true;
+ return isTypeSpecifierQualifier();
+
+ // GNU attributes support.
+ case tok::kw___attribute:
+ // GNU typeof support.
+ case tok::kw_typeof:
+
+ // type-specifiers
+ case tok::kw_short:
+ case tok::kw_long:
+ case tok::kw___int64:
+ case tok::kw___int128:
+ case tok::kw_signed:
+ case tok::kw_unsigned:
+ case tok::kw__Complex:
+ case tok::kw__Imaginary:
+ case tok::kw_void:
+ case tok::kw_char:
+ case tok::kw_wchar_t:
+ case tok::kw_char16_t:
+ case tok::kw_char32_t:
+ case tok::kw_int:
+ case tok::kw_half:
+ case tok::kw_float:
+ case tok::kw_double:
+ case tok::kw_bool:
+ case tok::kw__Bool:
+ case tok::kw__Decimal32:
+ case tok::kw__Decimal64:
+ case tok::kw__Decimal128:
+ case tok::kw___vector:
+
+ // struct-or-union-specifier (C99) or class-specifier (C++)
+ case tok::kw_class:
+ case tok::kw_struct:
+ case tok::kw___interface:
+ case tok::kw_union:
+ // enum-specifier
+ case tok::kw_enum:
+
+ // type-qualifier
+ case tok::kw_const:
+ case tok::kw_volatile:
+ case tok::kw_restrict:
+
+ // Debugger support.
+ case tok::kw___unknown_anytype:
+
+ // typedef-name
+ case tok::annot_typename:
+ return true;
+
+ // GNU ObjC bizarre protocol extension: <proto1,proto2> with implicit 'id'.
+ case tok::less:
+ return getLangOpts().ObjC1;
+
+ case tok::kw___cdecl:
+ case tok::kw___stdcall:
+ case tok::kw___fastcall:
+ case tok::kw___thiscall:
+ case tok::kw___vectorcall:
+ case tok::kw___w64:
+ case tok::kw___ptr64:
+ case tok::kw___ptr32:
+ case tok::kw___pascal:
+ case tok::kw___unaligned:
+
+ case tok::kw__Nonnull:
+ case tok::kw__Nullable:
+ case tok::kw__Null_unspecified:
+
+ case tok::kw___kindof:
+
+ case tok::kw___private:
+ case tok::kw___local:
+ case tok::kw___global:
+ case tok::kw___constant:
+ case tok::kw___generic:
+ case tok::kw___read_only:
+ case tok::kw___read_write:
+ case tok::kw___write_only:
+
+ return true;
+
+ // C11 _Atomic
+ case tok::kw__Atomic:
+ return true;
+ }
+}
+
+/// isDeclarationSpecifier() - Return true if the current token is part of a
+/// declaration specifier.
+///
+/// \param DisambiguatingWithExpression True to indicate that the purpose of
+/// this check is to disambiguate between an expression and a declaration.
+bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
+ switch (Tok.getKind()) {
+ default: return false;
+
+ case tok::identifier: // foo::bar
+ // Unfortunate hack to support "Class.factoryMethod" notation.
+ if (getLangOpts().ObjC1 && NextToken().is(tok::period))
+ return false;
+ if (TryAltiVecVectorToken())
+ return true;
+ // Fall through.
+ case tok::kw_decltype: // decltype(T())::type
+ case tok::kw_typename: // typename T::type
+ // Annotate typenames and C++ scope specifiers. If we get one, just
+ // recurse to handle whatever we get.
+ if (TryAnnotateTypeOrScopeToken())
+ return true;
+ if (Tok.is(tok::identifier))
+ return false;
+
+ // If we're in Objective-C and we have an Objective-C class type followed
+ // by an identifier and then either ':' or ']', in a place where an
+ // expression is permitted, then this is probably a class message send
+ // missing the initial '['. In this case, we won't consider this to be
+ // the start of a declaration.
+ if (DisambiguatingWithExpression &&
+ isStartOfObjCClassMessageMissingOpenBracket())
+ return false;
+
+ return isDeclarationSpecifier();
+
+ case tok::coloncolon: // ::foo::bar
+ if (NextToken().is(tok::kw_new) || // ::new
+ NextToken().is(tok::kw_delete)) // ::delete
+ return false;
+
+ // Annotate typenames and C++ scope specifiers. If we get one, just
+ // recurse to handle whatever we get.
+ if (TryAnnotateTypeOrScopeToken())
+ return true;
+ return isDeclarationSpecifier();
+
+ // storage-class-specifier
+ case tok::kw_typedef:
+ case tok::kw_extern:
+ case tok::kw___private_extern__:
+ case tok::kw_static:
+ case tok::kw_auto:
+ case tok::kw___auto_type:
+ case tok::kw_register:
+ case tok::kw___thread:
+ case tok::kw_thread_local:
+ case tok::kw__Thread_local:
+
+ // Modules
+ case tok::kw___module_private__:
+
+ // Debugger support
+ case tok::kw___unknown_anytype:
+
+ // type-specifiers
+ case tok::kw_short:
+ case tok::kw_long:
+ case tok::kw___int64:
+ case tok::kw___int128:
+ case tok::kw_signed:
+ case tok::kw_unsigned:
+ case tok::kw__Complex:
+ case tok::kw__Imaginary:
+ case tok::kw_void:
+ case tok::kw_char:
+ case tok::kw_wchar_t:
+ case tok::kw_char16_t:
+ case tok::kw_char32_t:
+
+ case tok::kw_int:
+ case tok::kw_half:
+ case tok::kw_float:
+ case tok::kw_double:
+ case tok::kw_bool:
+ case tok::kw__Bool:
+ case tok::kw__Decimal32:
+ case tok::kw__Decimal64:
+ case tok::kw__Decimal128:
+ case tok::kw___vector:
+
+ // struct-or-union-specifier (C99) or class-specifier (C++)
+ case tok::kw_class:
+ case tok::kw_struct:
+ case tok::kw_union:
+ case tok::kw___interface:
+ // enum-specifier
+ case tok::kw_enum:
+
+ // type-qualifier
+ case tok::kw_const:
+ case tok::kw_volatile:
+ case tok::kw_restrict:
+
+ // function-specifier
+ case tok::kw_inline:
+ case tok::kw_virtual:
+ case tok::kw_explicit:
+ case tok::kw__Noreturn:
+
+ // alignment-specifier
+ case tok::kw__Alignas:
+
+ // friend keyword.
+ case tok::kw_friend:
+
+ // static_assert-declaration
+ case tok::kw__Static_assert:
+
+ // GNU typeof support.
+ case tok::kw_typeof:
+
+ // GNU attributes.
+ case tok::kw___attribute:
+
+ // C++11 decltype and constexpr.
+ case tok::annot_decltype:
+ case tok::kw_constexpr:
+
+ // C++ Concepts TS - concept
+ case tok::kw_concept:
+
+ // C11 _Atomic
+ case tok::kw__Atomic:
+ return true;
+
+ // GNU ObjC bizarre protocol extension: <proto1,proto2> with implicit 'id'.
+ case tok::less:
+ return getLangOpts().ObjC1;
+
+ // typedef-name
+ case tok::annot_typename:
+ return !DisambiguatingWithExpression ||
+ !isStartOfObjCClassMessageMissingOpenBracket();
+
+ case tok::kw___declspec:
+ case tok::kw___cdecl:
+ case tok::kw___stdcall:
+ case tok::kw___fastcall:
+ case tok::kw___thiscall:
+ case tok::kw___vectorcall:
+ case tok::kw___w64:
+ case tok::kw___sptr:
+ case tok::kw___uptr:
+ case tok::kw___ptr64:
+ case tok::kw___ptr32:
+ case tok::kw___forceinline:
+ case tok::kw___pascal:
+ case tok::kw___unaligned:
+
+ case tok::kw__Nonnull:
+ case tok::kw__Nullable:
+ case tok::kw__Null_unspecified:
+
+ case tok::kw___kindof:
+
+ case tok::kw___private:
+ case tok::kw___local:
+ case tok::kw___global:
+ case tok::kw___constant:
+ case tok::kw___generic:
+ case tok::kw___read_only:
+ case tok::kw___read_write:
+ case tok::kw___write_only:
+
+ return true;
+ }
+}
+
+bool Parser::isConstructorDeclarator(bool IsUnqualified) {
+ TentativeParsingAction TPA(*this);
+
+ // Parse the C++ scope specifier.
+ CXXScopeSpec SS;
+ if (ParseOptionalCXXScopeSpecifier(SS, ParsedType(),
+ /*EnteringContext=*/true)) {
+ TPA.Revert();
+ return false;
+ }
+
+ // Parse the constructor name.
+ if (Tok.isOneOf(tok::identifier, tok::annot_template_id)) {
+ // We already know that we have a constructor name; just consume
+ // the token.
+ ConsumeToken();
+ } else {
+ TPA.Revert();
+ return false;
+ }
+
+ // Current class name must be followed by a left parenthesis.
+ if (Tok.isNot(tok::l_paren)) {
+ TPA.Revert();
+ return false;
+ }
+ ConsumeParen();
+
+ // A right parenthesis, or ellipsis followed by a right parenthesis signals
+ // that we have a constructor.
+ if (Tok.is(tok::r_paren) ||
+ (Tok.is(tok::ellipsis) && NextToken().is(tok::r_paren))) {
+ TPA.Revert();
+ return true;
+ }
+
+ // A C++11 attribute here signals that we have a constructor, and is an
+ // attribute on the first constructor parameter.
+ if (getLangOpts().CPlusPlus11 &&
+ isCXX11AttributeSpecifier(/*Disambiguate*/ false,
+ /*OuterMightBeMessageSend*/ true)) {
+ TPA.Revert();
+ return true;
+ }
+
+ // If we need to, enter the specified scope.
+ DeclaratorScopeObj DeclScopeObj(*this, SS);
+ if (SS.isSet() && Actions.ShouldEnterDeclaratorScope(getCurScope(), SS))
+ DeclScopeObj.EnterDeclaratorScope();
+
+ // Optionally skip Microsoft attributes.
+ ParsedAttributes Attrs(AttrFactory);
+ MaybeParseMicrosoftAttributes(Attrs);
+
+ // Check whether the next token(s) are part of a declaration
+ // specifier, in which case we have the start of a parameter and,
+ // therefore, we know that this is a constructor.
+ bool IsConstructor = false;
+ if (isDeclarationSpecifier())
+ IsConstructor = true;
+ else if (Tok.is(tok::identifier) ||
+ (Tok.is(tok::annot_cxxscope) && NextToken().is(tok::identifier))) {
+ // We've seen "C ( X" or "C ( X::Y", but "X" / "X::Y" is not a type.
+ // This might be a parenthesized member name, but is more likely to
+ // be a constructor declaration with an invalid argument type. Keep
+ // looking.
+ if (Tok.is(tok::annot_cxxscope))
+ ConsumeToken();
+ ConsumeToken();
+
+ // If this is not a constructor, we must be parsing a declarator,
+ // which must have one of the following syntactic forms (see the
+ // grammar extract at the start of ParseDirectDeclarator):
+ switch (Tok.getKind()) {
+ case tok::l_paren:
+ // C(X ( int));
+ case tok::l_square:
+ // C(X [ 5]);
+ // C(X [ [attribute]]);
+ case tok::coloncolon:
+ // C(X :: Y);
+ // C(X :: *p);
+ // Assume this isn't a constructor, rather than assuming it's a
+ // constructor with an unnamed parameter of an ill-formed type.
+ break;
+
+ case tok::r_paren:
+ // C(X )
+ if (NextToken().is(tok::colon) || NextToken().is(tok::kw_try)) {
+ // Assume these were meant to be constructors:
+ // C(X) : (the name of a bit-field cannot be parenthesized).
+ // C(X) try (this is otherwise ill-formed).
+ IsConstructor = true;
+ }
+ if (NextToken().is(tok::semi) || NextToken().is(tok::l_brace)) {
+ // If we have a constructor name within the class definition,
+ // assume these were meant to be constructors:
+ // C(X) {
+ // C(X) ;
+ // ... because otherwise we would be declaring a non-static data
+ // member that is ill-formed because it's of the same type as its
+ // surrounding class.
+ //
+ // FIXME: We can actually do this whether or not the name is qualified,
+ // because if it is qualified in this context it must be being used as
+ // a constructor name. However, we do not implement that rule correctly
+ // currently, so we're somewhat conservative here.
+ IsConstructor = IsUnqualified;
+ }
+ break;
+
+ default:
+ IsConstructor = true;
+ break;
+ }
+ }
+
+ TPA.Revert();
+ return IsConstructor;
+}
+
+/// ParseTypeQualifierListOpt
+/// type-qualifier-list: [C99 6.7.5]
+/// type-qualifier
+/// [vendor] attributes
+/// [ only if AttrReqs & AR_VendorAttributesParsed ]
+/// type-qualifier-list type-qualifier
+/// [vendor] type-qualifier-list attributes
+/// [ only if AttrReqs & AR_VendorAttributesParsed ]
+/// [C++0x] attribute-specifier[opt] is allowed before cv-qualifier-seq
+/// [ only if AttReqs & AR_CXX11AttributesParsed ]
+/// Note: vendor can be GNU, MS, etc and can be explicitly controlled via
+/// AttrRequirements bitmask values.
+void Parser::ParseTypeQualifierListOpt(DeclSpec &DS, unsigned AttrReqs,
+ bool AtomicAllowed,
+ bool IdentifierRequired) {
+ if (getLangOpts().CPlusPlus11 && (AttrReqs & AR_CXX11AttributesParsed) &&
+ isCXX11AttributeSpecifier()) {
+ ParsedAttributesWithRange attrs(AttrFactory);
+ ParseCXX11Attributes(attrs);
+ DS.takeAttributesFrom(attrs);
+ }
+
+ SourceLocation EndLoc;
+
+ while (1) {
+ bool isInvalid = false;
+ const char *PrevSpec = nullptr;
+ unsigned DiagID = 0;
+ SourceLocation Loc = Tok.getLocation();
+
+ switch (Tok.getKind()) {
+ case tok::code_completion:
+ Actions.CodeCompleteTypeQualifiers(DS);
+ return cutOffParsing();
+
+ case tok::kw_const:
+ isInvalid = DS.SetTypeQual(DeclSpec::TQ_const , Loc, PrevSpec, DiagID,
+ getLangOpts());
+ break;
+ case tok::kw_volatile:
+ isInvalid = DS.SetTypeQual(DeclSpec::TQ_volatile, Loc, PrevSpec, DiagID,
+ getLangOpts());
+ break;
+ case tok::kw_restrict:
+ isInvalid = DS.SetTypeQual(DeclSpec::TQ_restrict, Loc, PrevSpec, DiagID,
+ getLangOpts());
+ break;
+ case tok::kw__Atomic:
+ if (!AtomicAllowed)
+ goto DoneWithTypeQuals;
+ isInvalid = DS.SetTypeQual(DeclSpec::TQ_atomic, Loc, PrevSpec, DiagID,
+ getLangOpts());
+ break;
+
+ // OpenCL qualifiers:
+ case tok::kw___private:
+ case tok::kw___global:
+ case tok::kw___local:
+ case tok::kw___constant:
+ case tok::kw___generic:
+ case tok::kw___read_only:
+ case tok::kw___write_only:
+ case tok::kw___read_write:
+ ParseOpenCLQualifiers(DS.getAttributes());
+ break;
+
+ case tok::kw___uptr:
+ // GNU libc headers in C mode use '__uptr' as an identifer which conflicts
+ // with the MS modifier keyword.
+ if ((AttrReqs & AR_DeclspecAttributesParsed) && !getLangOpts().CPlusPlus &&
+ IdentifierRequired && DS.isEmpty() && NextToken().is(tok::semi)) {
+ if (TryKeywordIdentFallback(false))
+ continue;
+ }
+ case tok::kw___sptr:
+ case tok::kw___w64:
+ case tok::kw___ptr64:
+ case tok::kw___ptr32:
+ case tok::kw___cdecl:
+ case tok::kw___stdcall:
+ case tok::kw___fastcall:
+ case tok::kw___thiscall:
+ case tok::kw___vectorcall:
+ case tok::kw___unaligned:
+ if (AttrReqs & AR_DeclspecAttributesParsed) {
+ ParseMicrosoftTypeAttributes(DS.getAttributes());
+ continue;
+ }
+ goto DoneWithTypeQuals;
+ case tok::kw___pascal:
+ if (AttrReqs & AR_VendorAttributesParsed) {
+ ParseBorlandTypeAttributes(DS.getAttributes());
+ continue;
+ }
+ goto DoneWithTypeQuals;
+
+ // Nullability type specifiers.
+ case tok::kw__Nonnull:
+ case tok::kw__Nullable:
+ case tok::kw__Null_unspecified:
+ ParseNullabilityTypeSpecifiers(DS.getAttributes());
+ continue;
+
+ // Objective-C 'kindof' types.
+ case tok::kw___kindof:
+ DS.getAttributes().addNew(Tok.getIdentifierInfo(), Loc, nullptr, Loc,
+ nullptr, 0, AttributeList::AS_Keyword);
+ (void)ConsumeToken();
+ continue;
+
+ case tok::kw___attribute:
+ if (AttrReqs & AR_GNUAttributesParsedAndRejected)
+ // When GNU attributes are expressly forbidden, diagnose their usage.
+ Diag(Tok, diag::err_attributes_not_allowed);
+
+ // Parse the attributes even if they are rejected to ensure that error
+ // recovery is graceful.
+ if (AttrReqs & AR_GNUAttributesParsed ||
+ AttrReqs & AR_GNUAttributesParsedAndRejected) {
+ ParseGNUAttributes(DS.getAttributes());
+ continue; // do *not* consume the next token!
+ }
+ // otherwise, FALL THROUGH!
+ default:
+ DoneWithTypeQuals:
+ // If this is not a type-qualifier token, we're done reading type
+ // qualifiers. First verify that DeclSpec's are consistent.
+ DS.Finish(Actions, Actions.getASTContext().getPrintingPolicy());
+ if (EndLoc.isValid())
+ DS.SetRangeEnd(EndLoc);
+ return;
+ }
+
+ // If the specifier combination wasn't legal, issue a diagnostic.
+ if (isInvalid) {
+ assert(PrevSpec && "Method did not return previous specifier!");
+ Diag(Tok, DiagID) << PrevSpec;
+ }
+ EndLoc = ConsumeToken();
+ }
+}
+
+/// ParseDeclarator - Parse and verify a newly-initialized declarator.
+///
+void Parser::ParseDeclarator(Declarator &D) {
+ /// This implements the 'declarator' production in the C grammar, then checks
+ /// for well-formedness and issues diagnostics.
+ ParseDeclaratorInternal(D, &Parser::ParseDirectDeclarator);
+}
+
+static bool isPtrOperatorToken(tok::TokenKind Kind, const LangOptions &Lang,
+ unsigned TheContext) {
+ if (Kind == tok::star || Kind == tok::caret)
+ return true;
+
+ if (!Lang.CPlusPlus)
+ return false;
+
+ if (Kind == tok::amp)
+ return true;
+
+ // We parse rvalue refs in C++03, because otherwise the errors are scary.
+ // But we must not parse them in conversion-type-ids and new-type-ids, since
+ // those can be legitimately followed by a && operator.
+ // (The same thing can in theory happen after a trailing-return-type, but
+ // since those are a C++11 feature, there is no rejects-valid issue there.)
+ if (Kind == tok::ampamp)
+ return Lang.CPlusPlus11 || (TheContext != Declarator::ConversionIdContext &&
+ TheContext != Declarator::CXXNewContext);
+
+ return false;
+}
+
+/// ParseDeclaratorInternal - Parse a C or C++ declarator. The direct-declarator
+/// is parsed by the function passed to it. Pass null, and the direct-declarator
+/// isn't parsed at all, making this function effectively parse the C++
+/// ptr-operator production.
+///
+/// If the grammar of this construct is extended, matching changes must also be
+/// made to TryParseDeclarator and MightBeDeclarator, and possibly to
+/// isConstructorDeclarator.
+///
+/// declarator: [C99 6.7.5] [C++ 8p4, dcl.decl]
+/// [C] pointer[opt] direct-declarator
+/// [C++] direct-declarator
+/// [C++] ptr-operator declarator
+///
+/// pointer: [C99 6.7.5]
+/// '*' type-qualifier-list[opt]
+/// '*' type-qualifier-list[opt] pointer
+///
+/// ptr-operator:
+/// '*' cv-qualifier-seq[opt]
+/// '&'
+/// [C++0x] '&&'
+/// [GNU] '&' restrict[opt] attributes[opt]
+/// [GNU?] '&&' restrict[opt] attributes[opt]
+/// '::'[opt] nested-name-specifier '*' cv-qualifier-seq[opt]
+void Parser::ParseDeclaratorInternal(Declarator &D,
+ DirectDeclParseFunction DirectDeclParser) {
+ if (Diags.hasAllExtensionsSilenced())
+ D.setExtension();
+
+ // C++ member pointers start with a '::' or a nested-name.
+ // Member pointers get special handling, since there's no place for the
+ // scope spec in the generic path below.
+ if (getLangOpts().CPlusPlus &&
+ (Tok.is(tok::coloncolon) ||
+ (Tok.is(tok::identifier) &&
+ (NextToken().is(tok::coloncolon) || NextToken().is(tok::less))) ||
+ Tok.is(tok::annot_cxxscope))) {
+ bool EnteringContext = D.getContext() == Declarator::FileContext ||
+ D.getContext() == Declarator::MemberContext;
+ CXXScopeSpec SS;
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(), EnteringContext);
+
+ if (SS.isNotEmpty()) {
+ if (Tok.isNot(tok::star)) {
+ // The scope spec really belongs to the direct-declarator.
+ if (D.mayHaveIdentifier())
+ D.getCXXScopeSpec() = SS;
+ else
+ AnnotateScopeToken(SS, true);
+
+ if (DirectDeclParser)
+ (this->*DirectDeclParser)(D);
+ return;
+ }
+
+ SourceLocation Loc = ConsumeToken();
+ D.SetRangeEnd(Loc);
+ DeclSpec DS(AttrFactory);
+ ParseTypeQualifierListOpt(DS);
+ D.ExtendWithDeclSpec(DS);
+
+ // Recurse to parse whatever is left.
+ ParseDeclaratorInternal(D, DirectDeclParser);
+
+ // Sema will have to catch (syntactically invalid) pointers into global
+ // scope. It has to catch pointers into namespace scope anyway.
+ D.AddTypeInfo(DeclaratorChunk::getMemberPointer(SS,DS.getTypeQualifiers(),
+ DS.getLocEnd()),
+ DS.getAttributes(),
+ /* Don't replace range end. */SourceLocation());
+ return;
+ }
+ }
+
+ tok::TokenKind Kind = Tok.getKind();
+ // Not a pointer, C++ reference, or block.
+ if (!isPtrOperatorToken(Kind, getLangOpts(), D.getContext())) {
+ if (DirectDeclParser)
+ (this->*DirectDeclParser)(D);
+ return;
+ }
+
+ // Otherwise, '*' -> pointer, '^' -> block, '&' -> lvalue reference,
+ // '&&' -> rvalue reference
+ SourceLocation Loc = ConsumeToken(); // Eat the *, ^, & or &&.
+ D.SetRangeEnd(Loc);
+
+ if (Kind == tok::star || Kind == tok::caret) {
+ // Is a pointer.
+ DeclSpec DS(AttrFactory);
+
+ // GNU attributes are not allowed here in a new-type-id, but Declspec and
+ // C++11 attributes are allowed.
+ unsigned Reqs = AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed |
+ ((D.getContext() != Declarator::CXXNewContext)
+ ? AR_GNUAttributesParsed
+ : AR_GNUAttributesParsedAndRejected);
+ ParseTypeQualifierListOpt(DS, Reqs, true, !D.mayOmitIdentifier());
+ D.ExtendWithDeclSpec(DS);
+
+ // Recursively parse the declarator.
+ ParseDeclaratorInternal(D, DirectDeclParser);
+ if (Kind == tok::star)
+ // Remember that we parsed a pointer type, and remember the type-quals.
+ D.AddTypeInfo(DeclaratorChunk::getPointer(DS.getTypeQualifiers(), Loc,
+ DS.getConstSpecLoc(),
+ DS.getVolatileSpecLoc(),
+ DS.getRestrictSpecLoc(),
+ DS.getAtomicSpecLoc()),
+ DS.getAttributes(),
+ SourceLocation());
+ else
+ // Remember that we parsed a Block type, and remember the type-quals.
+ D.AddTypeInfo(DeclaratorChunk::getBlockPointer(DS.getTypeQualifiers(),
+ Loc),
+ DS.getAttributes(),
+ SourceLocation());
+ } else {
+ // Is a reference
+ DeclSpec DS(AttrFactory);
+
+ // Complain about rvalue references in C++03, but then go on and build
+ // the declarator.
+ if (Kind == tok::ampamp)
+ Diag(Loc, getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_rvalue_reference :
+ diag::ext_rvalue_reference);
+
+ // GNU-style and C++11 attributes are allowed here, as is restrict.
+ ParseTypeQualifierListOpt(DS);
+ D.ExtendWithDeclSpec(DS);
+
+ // C++ 8.3.2p1: cv-qualified references are ill-formed except when the
+ // cv-qualifiers are introduced through the use of a typedef or of a
+ // template type argument, in which case the cv-qualifiers are ignored.
+ if (DS.getTypeQualifiers() != DeclSpec::TQ_unspecified) {
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_const)
+ Diag(DS.getConstSpecLoc(),
+ diag::err_invalid_reference_qualifier_application) << "const";
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_volatile)
+ Diag(DS.getVolatileSpecLoc(),
+ diag::err_invalid_reference_qualifier_application) << "volatile";
+ // 'restrict' is permitted as an extension.
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_atomic)
+ Diag(DS.getAtomicSpecLoc(),
+ diag::err_invalid_reference_qualifier_application) << "_Atomic";
+ }
+
+ // Recursively parse the declarator.
+ ParseDeclaratorInternal(D, DirectDeclParser);
+
+ if (D.getNumTypeObjects() > 0) {
+ // C++ [dcl.ref]p4: There shall be no references to references.
+ DeclaratorChunk& InnerChunk = D.getTypeObject(D.getNumTypeObjects() - 1);
+ if (InnerChunk.Kind == DeclaratorChunk::Reference) {
+ if (const IdentifierInfo *II = D.getIdentifier())
+ Diag(InnerChunk.Loc, diag::err_illegal_decl_reference_to_reference)
+ << II;
+ else
+ Diag(InnerChunk.Loc, diag::err_illegal_decl_reference_to_reference)
+ << "type name";
+
+ // Once we've complained about the reference-to-reference, we
+ // can go ahead and build the (technically ill-formed)
+ // declarator: reference collapsing will take care of it.
+ }
+ }
+
+ // Remember that we parsed a reference type.
+ D.AddTypeInfo(DeclaratorChunk::getReference(DS.getTypeQualifiers(), Loc,
+ Kind == tok::amp),
+ DS.getAttributes(),
+ SourceLocation());
+ }
+}
+
+// When correcting from misplaced brackets before the identifier, the location
+// is saved inside the declarator so that other diagnostic messages can use
+// them. This extracts and returns that location, or returns the provided
+// location if a stored location does not exist.
+static SourceLocation getMissingDeclaratorIdLoc(Declarator &D,
+ SourceLocation Loc) {
+ if (D.getName().StartLocation.isInvalid() &&
+ D.getName().EndLocation.isValid())
+ return D.getName().EndLocation;
+
+ return Loc;
+}
+
+/// ParseDirectDeclarator
+/// direct-declarator: [C99 6.7.5]
+/// [C99] identifier
+/// '(' declarator ')'
+/// [GNU] '(' attributes declarator ')'
+/// [C90] direct-declarator '[' constant-expression[opt] ']'
+/// [C99] direct-declarator '[' type-qual-list[opt] assignment-expr[opt] ']'
+/// [C99] direct-declarator '[' 'static' type-qual-list[opt] assign-expr ']'
+/// [C99] direct-declarator '[' type-qual-list 'static' assignment-expr ']'
+/// [C99] direct-declarator '[' type-qual-list[opt] '*' ']'
+/// [C++11] direct-declarator '[' constant-expression[opt] ']'
+/// attribute-specifier-seq[opt]
+/// direct-declarator '(' parameter-type-list ')'
+/// direct-declarator '(' identifier-list[opt] ')'
+/// [GNU] direct-declarator '(' parameter-forward-declarations
+/// parameter-type-list[opt] ')'
+/// [C++] direct-declarator '(' parameter-declaration-clause ')'
+/// cv-qualifier-seq[opt] exception-specification[opt]
+/// [C++11] direct-declarator '(' parameter-declaration-clause ')'
+/// attribute-specifier-seq[opt] cv-qualifier-seq[opt]
+/// ref-qualifier[opt] exception-specification[opt]
+/// [C++] declarator-id
+/// [C++11] declarator-id attribute-specifier-seq[opt]
+///
+/// declarator-id: [C++ 8]
+/// '...'[opt] id-expression
+/// '::'[opt] nested-name-specifier[opt] type-name
+///
+/// id-expression: [C++ 5.1]
+/// unqualified-id
+/// qualified-id
+///
+/// unqualified-id: [C++ 5.1]
+/// identifier
+/// operator-function-id
+/// conversion-function-id
+/// '~' class-name
+/// template-id
+///
+/// Note, any additional constructs added here may need corresponding changes
+/// in isConstructorDeclarator.
+void Parser::ParseDirectDeclarator(Declarator &D) {
+ DeclaratorScopeObj DeclScopeObj(*this, D.getCXXScopeSpec());
+
+ if (getLangOpts().CPlusPlus && D.mayHaveIdentifier()) {
+ // Don't parse FOO:BAR as if it were a typo for FOO::BAR inside a class, in
+ // this context it is a bitfield. Also in range-based for statement colon
+ // may delimit for-range-declaration.
+ ColonProtectionRAIIObject X(*this,
+ D.getContext() == Declarator::MemberContext ||
+ (D.getContext() == Declarator::ForContext &&
+ getLangOpts().CPlusPlus11));
+
+ // ParseDeclaratorInternal might already have parsed the scope.
+ if (D.getCXXScopeSpec().isEmpty()) {
+ bool EnteringContext = D.getContext() == Declarator::FileContext ||
+ D.getContext() == Declarator::MemberContext;
+ ParseOptionalCXXScopeSpecifier(D.getCXXScopeSpec(), ParsedType(),
+ EnteringContext);
+ }
+
+ if (D.getCXXScopeSpec().isValid()) {
+ if (Actions.ShouldEnterDeclaratorScope(getCurScope(),
+ D.getCXXScopeSpec()))
+ // Change the declaration context for name lookup, until this function
+ // is exited (and the declarator has been parsed).
+ DeclScopeObj.EnterDeclaratorScope();
+ }
+
+ // C++0x [dcl.fct]p14:
+ // There is a syntactic ambiguity when an ellipsis occurs at the end of a
+ // parameter-declaration-clause without a preceding comma. In this case,
+ // the ellipsis is parsed as part of the abstract-declarator if the type
+ // of the parameter either names a template parameter pack that has not
+ // been expanded or contains auto; otherwise, it is parsed as part of the
+ // parameter-declaration-clause.
+ if (Tok.is(tok::ellipsis) && D.getCXXScopeSpec().isEmpty() &&
+ !((D.getContext() == Declarator::PrototypeContext ||
+ D.getContext() == Declarator::LambdaExprParameterContext ||
+ D.getContext() == Declarator::BlockLiteralContext) &&
+ NextToken().is(tok::r_paren) &&
+ !D.hasGroupingParens() &&
+ !Actions.containsUnexpandedParameterPacks(D) &&
+ D.getDeclSpec().getTypeSpecType() != TST_auto)) {
+ SourceLocation EllipsisLoc = ConsumeToken();
+ if (isPtrOperatorToken(Tok.getKind(), getLangOpts(), D.getContext())) {
+ // The ellipsis was put in the wrong place. Recover, and explain to
+ // the user what they should have done.
+ ParseDeclarator(D);
+ if (EllipsisLoc.isValid())
+ DiagnoseMisplacedEllipsisInDeclarator(EllipsisLoc, D);
+ return;
+ } else
+ D.setEllipsisLoc(EllipsisLoc);
+
+ // The ellipsis can't be followed by a parenthesized declarator. We
+ // check for that in ParseParenDeclarator, after we have disambiguated
+ // the l_paren token.
+ }
+
+ if (Tok.isOneOf(tok::identifier, tok::kw_operator, tok::annot_template_id,
+ tok::tilde)) {
+ // We found something that indicates the start of an unqualified-id.
+ // Parse that unqualified-id.
+ bool AllowConstructorName;
+ if (D.getDeclSpec().hasTypeSpecifier())
+ AllowConstructorName = false;
+ else if (D.getCXXScopeSpec().isSet())
+ AllowConstructorName =
+ (D.getContext() == Declarator::FileContext ||
+ D.getContext() == Declarator::MemberContext);
+ else
+ AllowConstructorName = (D.getContext() == Declarator::MemberContext);
+
+ SourceLocation TemplateKWLoc;
+ bool HadScope = D.getCXXScopeSpec().isValid();
+ if (ParseUnqualifiedId(D.getCXXScopeSpec(),
+ /*EnteringContext=*/true,
+ /*AllowDestructorName=*/true,
+ AllowConstructorName,
+ ParsedType(),
+ TemplateKWLoc,
+ D.getName()) ||
+ // Once we're past the identifier, if the scope was bad, mark the
+ // whole declarator bad.
+ D.getCXXScopeSpec().isInvalid()) {
+ D.SetIdentifier(nullptr, Tok.getLocation());
+ D.setInvalidType(true);
+ } else {
+ // ParseUnqualifiedId might have parsed a scope specifier during error
+ // recovery. If it did so, enter that scope.
+ if (!HadScope && D.getCXXScopeSpec().isValid() &&
+ Actions.ShouldEnterDeclaratorScope(getCurScope(),
+ D.getCXXScopeSpec()))
+ DeclScopeObj.EnterDeclaratorScope();
+
+ // Parsed the unqualified-id; update range information and move along.
+ if (D.getSourceRange().getBegin().isInvalid())
+ D.SetRangeBegin(D.getName().getSourceRange().getBegin());
+ D.SetRangeEnd(D.getName().getSourceRange().getEnd());
+ }
+ goto PastIdentifier;
+ }
+
+ if (D.getCXXScopeSpec().isNotEmpty()) {
+ // We have a scope specifier but no following unqualified-id.
+ Diag(PP.getLocForEndOfToken(D.getCXXScopeSpec().getEndLoc()),
+ diag::err_expected_unqualified_id)
+ << /*C++*/1;
+ D.SetIdentifier(nullptr, Tok.getLocation());
+ goto PastIdentifier;
+ }
+ } else if (Tok.is(tok::identifier) && D.mayHaveIdentifier()) {
+ assert(!getLangOpts().CPlusPlus &&
+ "There's a C++-specific check for tok::identifier above");
+ assert(Tok.getIdentifierInfo() && "Not an identifier?");
+ D.SetIdentifier(Tok.getIdentifierInfo(), Tok.getLocation());
+ D.SetRangeEnd(Tok.getLocation());
+ ConsumeToken();
+ goto PastIdentifier;
+ } else if (Tok.is(tok::identifier) && D.diagnoseIdentifier()) {
+ // A virt-specifier isn't treated as an identifier if it appears after a
+ // trailing-return-type.
+ if (D.getContext() != Declarator::TrailingReturnContext ||
+ !isCXX11VirtSpecifier(Tok)) {
+ Diag(Tok.getLocation(), diag::err_unexpected_unqualified_id)
+ << FixItHint::CreateRemoval(Tok.getLocation());
+ D.SetIdentifier(nullptr, Tok.getLocation());
+ ConsumeToken();
+ goto PastIdentifier;
+ }
+ }
+
+ if (Tok.is(tok::l_paren)) {
+ // direct-declarator: '(' declarator ')'
+ // direct-declarator: '(' attributes declarator ')'
+ // Example: 'char (*X)' or 'int (*XX)(void)'
+ ParseParenDeclarator(D);
+
+ // If the declarator was parenthesized, we entered the declarator
+ // scope when parsing the parenthesized declarator, then exited
+ // the scope already. Re-enter the scope, if we need to.
+ if (D.getCXXScopeSpec().isSet()) {
+ // If there was an error parsing parenthesized declarator, declarator
+ // scope may have been entered before. Don't do it again.
+ if (!D.isInvalidType() &&
+ Actions.ShouldEnterDeclaratorScope(getCurScope(),
+ D.getCXXScopeSpec()))
+ // Change the declaration context for name lookup, until this function
+ // is exited (and the declarator has been parsed).
+ DeclScopeObj.EnterDeclaratorScope();
+ }
+ } else if (D.mayOmitIdentifier()) {
+ // This could be something simple like "int" (in which case the declarator
+ // portion is empty), if an abstract-declarator is allowed.
+ D.SetIdentifier(nullptr, Tok.getLocation());
+
+ // The grammar for abstract-pack-declarator does not allow grouping parens.
+ // FIXME: Revisit this once core issue 1488 is resolved.
+ if (D.hasEllipsis() && D.hasGroupingParens())
+ Diag(PP.getLocForEndOfToken(D.getEllipsisLoc()),
+ diag::ext_abstract_pack_declarator_parens);
+ } else {
+ if (Tok.getKind() == tok::annot_pragma_parser_crash)
+ LLVM_BUILTIN_TRAP;
+ if (Tok.is(tok::l_square))
+ return ParseMisplacedBracketDeclarator(D);
+ if (D.getContext() == Declarator::MemberContext) {
+ Diag(getMissingDeclaratorIdLoc(D, Tok.getLocation()),
+ diag::err_expected_member_name_or_semi)
+ << (D.getDeclSpec().isEmpty() ? SourceRange()
+ : D.getDeclSpec().getSourceRange());
+ } else if (getLangOpts().CPlusPlus) {
+ if (Tok.isOneOf(tok::period, tok::arrow))
+ Diag(Tok, diag::err_invalid_operator_on_type) << Tok.is(tok::arrow);
+ else {
+ SourceLocation Loc = D.getCXXScopeSpec().getEndLoc();
+ if (Tok.isAtStartOfLine() && Loc.isValid())
+ Diag(PP.getLocForEndOfToken(Loc), diag::err_expected_unqualified_id)
+ << getLangOpts().CPlusPlus;
+ else
+ Diag(getMissingDeclaratorIdLoc(D, Tok.getLocation()),
+ diag::err_expected_unqualified_id)
+ << getLangOpts().CPlusPlus;
+ }
+ } else {
+ Diag(getMissingDeclaratorIdLoc(D, Tok.getLocation()),
+ diag::err_expected_either)
+ << tok::identifier << tok::l_paren;
+ }
+ D.SetIdentifier(nullptr, Tok.getLocation());
+ D.setInvalidType(true);
+ }
+
+ PastIdentifier:
+ assert(D.isPastIdentifier() &&
+ "Haven't past the location of the identifier yet?");
+
+ // Don't parse attributes unless we have parsed an unparenthesized name.
+ if (D.hasName() && !D.getNumTypeObjects())
+ MaybeParseCXX11Attributes(D);
+
+ while (1) {
+ if (Tok.is(tok::l_paren)) {
+ // Enter function-declaration scope, limiting any declarators to the
+ // function prototype scope, including parameter declarators.
+ ParseScope PrototypeScope(this,
+ Scope::FunctionPrototypeScope|Scope::DeclScope|
+ (D.isFunctionDeclaratorAFunctionDeclaration()
+ ? Scope::FunctionDeclarationScope : 0));
+
+ // The paren may be part of a C++ direct initializer, eg. "int x(1);".
+ // In such a case, check if we actually have a function declarator; if it
+ // is not, the declarator has been fully parsed.
+ bool IsAmbiguous = false;
+ if (getLangOpts().CPlusPlus && D.mayBeFollowedByCXXDirectInit()) {
+ // The name of the declarator, if any, is tentatively declared within
+ // a possible direct initializer.
+ TentativelyDeclaredIdentifiers.push_back(D.getIdentifier());
+ bool IsFunctionDecl = isCXXFunctionDeclarator(&IsAmbiguous);
+ TentativelyDeclaredIdentifiers.pop_back();
+ if (!IsFunctionDecl)
+ break;
+ }
+ ParsedAttributes attrs(AttrFactory);
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+ ParseFunctionDeclarator(D, attrs, T, IsAmbiguous);
+ PrototypeScope.Exit();
+ } else if (Tok.is(tok::l_square)) {
+ ParseBracketDeclarator(D);
+ } else {
+ break;
+ }
+ }
+}
+
+/// ParseParenDeclarator - We parsed the declarator D up to a paren. This is
+/// only called before the identifier, so these are most likely just grouping
+/// parens for precedence. If we find that these are actually function
+/// parameter parens in an abstract-declarator, we call ParseFunctionDeclarator.
+///
+/// direct-declarator:
+/// '(' declarator ')'
+/// [GNU] '(' attributes declarator ')'
+/// direct-declarator '(' parameter-type-list ')'
+/// direct-declarator '(' identifier-list[opt] ')'
+/// [GNU] direct-declarator '(' parameter-forward-declarations
+/// parameter-type-list[opt] ')'
+///
+void Parser::ParseParenDeclarator(Declarator &D) {
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ assert(!D.isPastIdentifier() && "Should be called before passing identifier");
+
+ // Eat any attributes before we look at whether this is a grouping or function
+ // declarator paren. If this is a grouping paren, the attribute applies to
+ // the type being built up, for example:
+ // int (__attribute__(()) *x)(long y)
+ // If this ends up not being a grouping paren, the attribute applies to the
+ // first argument, for example:
+ // int (__attribute__(()) int x)
+ // In either case, we need to eat any attributes to be able to determine what
+ // sort of paren this is.
+ //
+ ParsedAttributes attrs(AttrFactory);
+ bool RequiresArg = false;
+ if (Tok.is(tok::kw___attribute)) {
+ ParseGNUAttributes(attrs);
+
+ // We require that the argument list (if this is a non-grouping paren) be
+ // present even if the attribute list was empty.
+ RequiresArg = true;
+ }
+
+ // Eat any Microsoft extensions.
+ ParseMicrosoftTypeAttributes(attrs);
+
+ // Eat any Borland extensions.
+ if (Tok.is(tok::kw___pascal))
+ ParseBorlandTypeAttributes(attrs);
+
+ // If we haven't past the identifier yet (or where the identifier would be
+ // stored, if this is an abstract declarator), then this is probably just
+ // grouping parens. However, if this could be an abstract-declarator, then
+ // this could also be the start of function arguments (consider 'void()').
+ bool isGrouping;
+
+ if (!D.mayOmitIdentifier()) {
+ // If this can't be an abstract-declarator, this *must* be a grouping
+ // paren, because we haven't seen the identifier yet.
+ isGrouping = true;
+ } else if (Tok.is(tok::r_paren) || // 'int()' is a function.
+ (getLangOpts().CPlusPlus && Tok.is(tok::ellipsis) &&
+ NextToken().is(tok::r_paren)) || // C++ int(...)
+ isDeclarationSpecifier() || // 'int(int)' is a function.
+ isCXX11AttributeSpecifier()) { // 'int([[]]int)' is a function.
+ // This handles C99 6.7.5.3p11: in "typedef int X; void foo(X)", X is
+ // considered to be a type, not a K&R identifier-list.
+ isGrouping = false;
+ } else {
+ // Otherwise, this is a grouping paren, e.g. 'int (*X)' or 'int(X)'.
+ isGrouping = true;
+ }
+
+ // If this is a grouping paren, handle:
+ // direct-declarator: '(' declarator ')'
+ // direct-declarator: '(' attributes declarator ')'
+ if (isGrouping) {
+ SourceLocation EllipsisLoc = D.getEllipsisLoc();
+ D.setEllipsisLoc(SourceLocation());
+
+ bool hadGroupingParens = D.hasGroupingParens();
+ D.setGroupingParens(true);
+ ParseDeclaratorInternal(D, &Parser::ParseDirectDeclarator);
+ // Match the ')'.
+ T.consumeClose();
+ D.AddTypeInfo(DeclaratorChunk::getParen(T.getOpenLocation(),
+ T.getCloseLocation()),
+ attrs, T.getCloseLocation());
+
+ D.setGroupingParens(hadGroupingParens);
+
+ // An ellipsis cannot be placed outside parentheses.
+ if (EllipsisLoc.isValid())
+ DiagnoseMisplacedEllipsisInDeclarator(EllipsisLoc, D);
+
+ return;
+ }
+
+ // Okay, if this wasn't a grouping paren, it must be the start of a function
+ // argument list. Recognize that this declarator will never have an
+ // identifier (and remember where it would have been), then call into
+ // ParseFunctionDeclarator to handle of argument list.
+ D.SetIdentifier(nullptr, Tok.getLocation());
+
+ // Enter function-declaration scope, limiting any declarators to the
+ // function prototype scope, including parameter declarators.
+ ParseScope PrototypeScope(this,
+ Scope::FunctionPrototypeScope | Scope::DeclScope |
+ (D.isFunctionDeclaratorAFunctionDeclaration()
+ ? Scope::FunctionDeclarationScope : 0));
+ ParseFunctionDeclarator(D, attrs, T, false, RequiresArg);
+ PrototypeScope.Exit();
+}
+
+/// ParseFunctionDeclarator - We are after the identifier and have parsed the
+/// declarator D up to a paren, which indicates that we are parsing function
+/// arguments.
+///
+/// If FirstArgAttrs is non-null, then the caller parsed those arguments
+/// immediately after the open paren - they should be considered to be the
+/// first argument of a parameter.
+///
+/// If RequiresArg is true, then the first argument of the function is required
+/// to be present and required to not be an identifier list.
+///
+/// For C++, after the parameter-list, it also parses the cv-qualifier-seq[opt],
+/// (C++11) ref-qualifier[opt], exception-specification[opt],
+/// (C++11) attribute-specifier-seq[opt], and (C++11) trailing-return-type[opt].
+///
+/// [C++11] exception-specification:
+/// dynamic-exception-specification
+/// noexcept-specification
+///
+void Parser::ParseFunctionDeclarator(Declarator &D,
+ ParsedAttributes &FirstArgAttrs,
+ BalancedDelimiterTracker &Tracker,
+ bool IsAmbiguous,
+ bool RequiresArg) {
+ assert(getCurScope()->isFunctionPrototypeScope() &&
+ "Should call from a Function scope");
+ // lparen is already consumed!
+ assert(D.isPastIdentifier() && "Should not call before identifier!");
+
+ // This should be true when the function has typed arguments.
+ // Otherwise, it is treated as a K&R-style function.
+ bool HasProto = false;
+ // Build up an array of information about the parsed arguments.
+ SmallVector<DeclaratorChunk::ParamInfo, 16> ParamInfo;
+ // Remember where we see an ellipsis, if any.
+ SourceLocation EllipsisLoc;
+
+ DeclSpec DS(AttrFactory);
+ bool RefQualifierIsLValueRef = true;
+ SourceLocation RefQualifierLoc;
+ SourceLocation ConstQualifierLoc;
+ SourceLocation VolatileQualifierLoc;
+ SourceLocation RestrictQualifierLoc;
+ ExceptionSpecificationType ESpecType = EST_None;
+ SourceRange ESpecRange;
+ SmallVector<ParsedType, 2> DynamicExceptions;
+ SmallVector<SourceRange, 2> DynamicExceptionRanges;
+ ExprResult NoexceptExpr;
+ CachedTokens *ExceptionSpecTokens = nullptr;
+ ParsedAttributes FnAttrs(AttrFactory);
+ TypeResult TrailingReturnType;
+
+ /* LocalEndLoc is the end location for the local FunctionTypeLoc.
+ EndLoc is the end location for the function declarator.
+ They differ for trailing return types. */
+ SourceLocation StartLoc, LocalEndLoc, EndLoc;
+ SourceLocation LParenLoc, RParenLoc;
+ LParenLoc = Tracker.getOpenLocation();
+ StartLoc = LParenLoc;
+
+ if (isFunctionDeclaratorIdentifierList()) {
+ if (RequiresArg)
+ Diag(Tok, diag::err_argument_required_after_attribute);
+
+ ParseFunctionDeclaratorIdentifierList(D, ParamInfo);
+
+ Tracker.consumeClose();
+ RParenLoc = Tracker.getCloseLocation();
+ LocalEndLoc = RParenLoc;
+ EndLoc = RParenLoc;
+ } else {
+ if (Tok.isNot(tok::r_paren))
+ ParseParameterDeclarationClause(D, FirstArgAttrs, ParamInfo,
+ EllipsisLoc);
+ else if (RequiresArg)
+ Diag(Tok, diag::err_argument_required_after_attribute);
+
+ HasProto = ParamInfo.size() || getLangOpts().CPlusPlus;
+
+ // If we have the closing ')', eat it.
+ Tracker.consumeClose();
+ RParenLoc = Tracker.getCloseLocation();
+ LocalEndLoc = RParenLoc;
+ EndLoc = RParenLoc;
+
+ if (getLangOpts().CPlusPlus) {
+ // FIXME: Accept these components in any order, and produce fixits to
+ // correct the order if the user gets it wrong. Ideally we should deal
+ // with the pure-specifier in the same way.
+
+ // Parse cv-qualifier-seq[opt].
+ ParseTypeQualifierListOpt(DS, AR_NoAttributesParsed,
+ /*AtomicAllowed*/ false);
+ if (!DS.getSourceRange().getEnd().isInvalid()) {
+ EndLoc = DS.getSourceRange().getEnd();
+ ConstQualifierLoc = DS.getConstSpecLoc();
+ VolatileQualifierLoc = DS.getVolatileSpecLoc();
+ RestrictQualifierLoc = DS.getRestrictSpecLoc();
+ }
+
+ // Parse ref-qualifier[opt].
+ if (ParseRefQualifier(RefQualifierIsLValueRef, RefQualifierLoc))
+ EndLoc = RefQualifierLoc;
+
+ // C++11 [expr.prim.general]p3:
+ // If a declaration declares a member function or member function
+ // template of a class X, the expression this is a prvalue of type
+ // "pointer to cv-qualifier-seq X" between the optional cv-qualifer-seq
+ // and the end of the function-definition, member-declarator, or
+ // declarator.
+ // FIXME: currently, "static" case isn't handled correctly.
+ bool IsCXX11MemberFunction =
+ getLangOpts().CPlusPlus11 &&
+ D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_typedef &&
+ (D.getContext() == Declarator::MemberContext
+ ? !D.getDeclSpec().isFriendSpecified()
+ : D.getContext() == Declarator::FileContext &&
+ D.getCXXScopeSpec().isValid() &&
+ Actions.CurContext->isRecord());
+ Sema::CXXThisScopeRAII ThisScope(Actions,
+ dyn_cast<CXXRecordDecl>(Actions.CurContext),
+ DS.getTypeQualifiers() |
+ (D.getDeclSpec().isConstexprSpecified() &&
+ !getLangOpts().CPlusPlus14
+ ? Qualifiers::Const : 0),
+ IsCXX11MemberFunction);
+
+ // Parse exception-specification[opt].
+ bool Delayed = D.isFirstDeclarationOfMember() &&
+ D.isFunctionDeclaratorAFunctionDeclaration();
+ if (Delayed && Actions.isLibstdcxxEagerExceptionSpecHack(D) &&
+ GetLookAheadToken(0).is(tok::kw_noexcept) &&
+ GetLookAheadToken(1).is(tok::l_paren) &&
+ GetLookAheadToken(2).is(tok::kw_noexcept) &&
+ GetLookAheadToken(3).is(tok::l_paren) &&
+ GetLookAheadToken(4).is(tok::identifier) &&
+ GetLookAheadToken(4).getIdentifierInfo()->isStr("swap")) {
+ // HACK: We've got an exception-specification
+ // noexcept(noexcept(swap(...)))
+ // or
+ // noexcept(noexcept(swap(...)) && noexcept(swap(...)))
+ // on a 'swap' member function. This is a libstdc++ bug; the lookup
+ // for 'swap' will only find the function we're currently declaring,
+ // whereas it expects to find a non-member swap through ADL. Turn off
+ // delayed parsing to give it a chance to find what it expects.
+ Delayed = false;
+ }
+ ESpecType = tryParseExceptionSpecification(Delayed,
+ ESpecRange,
+ DynamicExceptions,
+ DynamicExceptionRanges,
+ NoexceptExpr,
+ ExceptionSpecTokens);
+ if (ESpecType != EST_None)
+ EndLoc = ESpecRange.getEnd();
+
+ // Parse attribute-specifier-seq[opt]. Per DR 979 and DR 1297, this goes
+ // after the exception-specification.
+ MaybeParseCXX11Attributes(FnAttrs);
+
+ // Parse trailing-return-type[opt].
+ LocalEndLoc = EndLoc;
+ if (getLangOpts().CPlusPlus11 && Tok.is(tok::arrow)) {
+ Diag(Tok, diag::warn_cxx98_compat_trailing_return_type);
+ if (D.getDeclSpec().getTypeSpecType() == TST_auto)
+ StartLoc = D.getDeclSpec().getTypeSpecTypeLoc();
+ LocalEndLoc = Tok.getLocation();
+ SourceRange Range;
+ TrailingReturnType = ParseTrailingReturnType(Range);
+ EndLoc = Range.getEnd();
+ }
+ }
+ }
+
+ // Remember that we parsed a function type, and remember the attributes.
+ D.AddTypeInfo(DeclaratorChunk::getFunction(HasProto,
+ IsAmbiguous,
+ LParenLoc,
+ ParamInfo.data(), ParamInfo.size(),
+ EllipsisLoc, RParenLoc,
+ DS.getTypeQualifiers(),
+ RefQualifierIsLValueRef,
+ RefQualifierLoc, ConstQualifierLoc,
+ VolatileQualifierLoc,
+ RestrictQualifierLoc,
+ /*MutableLoc=*/SourceLocation(),
+ ESpecType, ESpecRange,
+ DynamicExceptions.data(),
+ DynamicExceptionRanges.data(),
+ DynamicExceptions.size(),
+ NoexceptExpr.isUsable() ?
+ NoexceptExpr.get() : nullptr,
+ ExceptionSpecTokens,
+ StartLoc, LocalEndLoc, D,
+ TrailingReturnType),
+ FnAttrs, EndLoc);
+}
+
+/// ParseRefQualifier - Parses a member function ref-qualifier. Returns
+/// true if a ref-qualifier is found.
+bool Parser::ParseRefQualifier(bool &RefQualifierIsLValueRef,
+ SourceLocation &RefQualifierLoc) {
+ if (Tok.isOneOf(tok::amp, tok::ampamp)) {
+ Diag(Tok, getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_ref_qualifier :
+ diag::ext_ref_qualifier);
+
+ RefQualifierIsLValueRef = Tok.is(tok::amp);
+ RefQualifierLoc = ConsumeToken();
+ return true;
+ }
+ return false;
+}
+
+/// isFunctionDeclaratorIdentifierList - This parameter list may have an
+/// identifier list form for a K&R-style function: void foo(a,b,c)
+///
+/// Note that identifier-lists are only allowed for normal declarators, not for
+/// abstract-declarators.
+bool Parser::isFunctionDeclaratorIdentifierList() {
+ return !getLangOpts().CPlusPlus
+ && Tok.is(tok::identifier)
+ && !TryAltiVecVectorToken()
+ // K&R identifier lists can't have typedefs as identifiers, per C99
+ // 6.7.5.3p11.
+ && (TryAnnotateTypeOrScopeToken() || !Tok.is(tok::annot_typename))
+ // Identifier lists follow a really simple grammar: the identifiers can
+ // be followed *only* by a ", identifier" or ")". However, K&R
+ // identifier lists are really rare in the brave new modern world, and
+ // it is very common for someone to typo a type in a non-K&R style
+ // list. If we are presented with something like: "void foo(intptr x,
+ // float y)", we don't want to start parsing the function declarator as
+ // though it is a K&R style declarator just because intptr is an
+ // invalid type.
+ //
+ // To handle this, we check to see if the token after the first
+ // identifier is a "," or ")". Only then do we parse it as an
+ // identifier list.
+ && (NextToken().is(tok::comma) || NextToken().is(tok::r_paren));
+}
+
+/// ParseFunctionDeclaratorIdentifierList - While parsing a function declarator
+/// we found a K&R-style identifier list instead of a typed parameter list.
+///
+/// After returning, ParamInfo will hold the parsed parameters.
+///
+/// identifier-list: [C99 6.7.5]
+/// identifier
+/// identifier-list ',' identifier
+///
+void Parser::ParseFunctionDeclaratorIdentifierList(
+ Declarator &D,
+ SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo) {
+ // If there was no identifier specified for the declarator, either we are in
+ // an abstract-declarator, or we are in a parameter declarator which was found
+ // to be abstract. In abstract-declarators, identifier lists are not valid:
+ // diagnose this.
+ if (!D.getIdentifier())
+ Diag(Tok, diag::ext_ident_list_in_param);
+
+ // Maintain an efficient lookup of params we have seen so far.
+ llvm::SmallSet<const IdentifierInfo*, 16> ParamsSoFar;
+
+ do {
+ // If this isn't an identifier, report the error and skip until ')'.
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected) << tok::identifier;
+ SkipUntil(tok::r_paren, StopAtSemi | StopBeforeMatch);
+ // Forget we parsed anything.
+ ParamInfo.clear();
+ return;
+ }
+
+ IdentifierInfo *ParmII = Tok.getIdentifierInfo();
+
+ // Reject 'typedef int y; int test(x, y)', but continue parsing.
+ if (Actions.getTypeName(*ParmII, Tok.getLocation(), getCurScope()))
+ Diag(Tok, diag::err_unexpected_typedef_ident) << ParmII;
+
+ // Verify that the argument identifier has not already been mentioned.
+ if (!ParamsSoFar.insert(ParmII).second) {
+ Diag(Tok, diag::err_param_redefinition) << ParmII;
+ } else {
+ // Remember this identifier in ParamInfo.
+ ParamInfo.push_back(DeclaratorChunk::ParamInfo(ParmII,
+ Tok.getLocation(),
+ nullptr));
+ }
+
+ // Eat the identifier.
+ ConsumeToken();
+ // The list continues if we see a comma.
+ } while (TryConsumeToken(tok::comma));
+}
+
+/// ParseParameterDeclarationClause - Parse a (possibly empty) parameter-list
+/// after the opening parenthesis. This function will not parse a K&R-style
+/// identifier list.
+///
+/// D is the declarator being parsed. If FirstArgAttrs is non-null, then the
+/// caller parsed those arguments immediately after the open paren - they should
+/// be considered to be part of the first parameter.
+///
+/// After returning, ParamInfo will hold the parsed parameters. EllipsisLoc will
+/// be the location of the ellipsis, if any was parsed.
+///
+/// parameter-type-list: [C99 6.7.5]
+/// parameter-list
+/// parameter-list ',' '...'
+/// [C++] parameter-list '...'
+///
+/// parameter-list: [C99 6.7.5]
+/// parameter-declaration
+/// parameter-list ',' parameter-declaration
+///
+/// parameter-declaration: [C99 6.7.5]
+/// declaration-specifiers declarator
+/// [C++] declaration-specifiers declarator '=' assignment-expression
+/// [C++11] initializer-clause
+/// [GNU] declaration-specifiers declarator attributes
+/// declaration-specifiers abstract-declarator[opt]
+/// [C++] declaration-specifiers abstract-declarator[opt]
+/// '=' assignment-expression
+/// [GNU] declaration-specifiers abstract-declarator[opt] attributes
+/// [C++11] attribute-specifier-seq parameter-declaration
+///
+void Parser::ParseParameterDeclarationClause(
+ Declarator &D,
+ ParsedAttributes &FirstArgAttrs,
+ SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
+ SourceLocation &EllipsisLoc) {
+ do {
+ // FIXME: Issue a diagnostic if we parsed an attribute-specifier-seq
+ // before deciding this was a parameter-declaration-clause.
+ if (TryConsumeToken(tok::ellipsis, EllipsisLoc))
+ break;
+
+ // Parse the declaration-specifiers.
+ // Just use the ParsingDeclaration "scope" of the declarator.
+ DeclSpec DS(AttrFactory);
+
+ // Parse any C++11 attributes.
+ MaybeParseCXX11Attributes(DS.getAttributes());
+
+ // Skip any Microsoft attributes before a param.
+ MaybeParseMicrosoftAttributes(DS.getAttributes());
+
+ SourceLocation DSStart = Tok.getLocation();
+
+ // If the caller parsed attributes for the first argument, add them now.
+ // Take them so that we only apply the attributes to the first parameter.
+ // FIXME: If we can leave the attributes in the token stream somehow, we can
+ // get rid of a parameter (FirstArgAttrs) and this statement. It might be
+ // too much hassle.
+ DS.takeAttributesFrom(FirstArgAttrs);
+
+ ParseDeclarationSpecifiers(DS);
+
+
+ // Parse the declarator. This is "PrototypeContext" or
+ // "LambdaExprParameterContext", because we must accept either
+ // 'declarator' or 'abstract-declarator' here.
+ Declarator ParmDeclarator(DS,
+ D.getContext() == Declarator::LambdaExprContext ?
+ Declarator::LambdaExprParameterContext :
+ Declarator::PrototypeContext);
+ ParseDeclarator(ParmDeclarator);
+
+ // Parse GNU attributes, if present.
+ MaybeParseGNUAttributes(ParmDeclarator);
+
+ // Remember this parsed parameter in ParamInfo.
+ IdentifierInfo *ParmII = ParmDeclarator.getIdentifier();
+
+ // DefArgToks is used when the parsing of default arguments needs
+ // to be delayed.
+ CachedTokens *DefArgToks = nullptr;
+
+ // If no parameter was specified, verify that *something* was specified,
+ // otherwise we have a missing type and identifier.
+ if (DS.isEmpty() && ParmDeclarator.getIdentifier() == nullptr &&
+ ParmDeclarator.getNumTypeObjects() == 0) {
+ // Completely missing, emit error.
+ Diag(DSStart, diag::err_missing_param);
+ } else {
+ // Otherwise, we have something. Add it and let semantic analysis try
+ // to grok it and add the result to the ParamInfo we are building.
+
+ // Last chance to recover from a misplaced ellipsis in an attempted
+ // parameter pack declaration.
+ if (Tok.is(tok::ellipsis) &&
+ (NextToken().isNot(tok::r_paren) ||
+ (!ParmDeclarator.getEllipsisLoc().isValid() &&
+ !Actions.isUnexpandedParameterPackPermitted())) &&
+ Actions.containsUnexpandedParameterPacks(ParmDeclarator))
+ DiagnoseMisplacedEllipsisInDeclarator(ConsumeToken(), ParmDeclarator);
+
+ // Inform the actions module about the parameter declarator, so it gets
+ // added to the current scope.
+ Decl *Param = Actions.ActOnParamDeclarator(getCurScope(), ParmDeclarator);
+ // Parse the default argument, if any. We parse the default
+ // arguments in all dialects; the semantic analysis in
+ // ActOnParamDefaultArgument will reject the default argument in
+ // C.
+ if (Tok.is(tok::equal)) {
+ SourceLocation EqualLoc = Tok.getLocation();
+
+ // Parse the default argument
+ if (D.getContext() == Declarator::MemberContext) {
+ // If we're inside a class definition, cache the tokens
+ // corresponding to the default argument. We'll actually parse
+ // them when we see the end of the class definition.
+ // FIXME: Can we use a smart pointer for Toks?
+ DefArgToks = new CachedTokens;
+
+ SourceLocation ArgStartLoc = NextToken().getLocation();
+ if (!ConsumeAndStoreInitializer(*DefArgToks, CIK_DefaultArgument)) {
+ delete DefArgToks;
+ DefArgToks = nullptr;
+ Actions.ActOnParamDefaultArgumentError(Param, EqualLoc);
+ } else {
+ Actions.ActOnParamUnparsedDefaultArgument(Param, EqualLoc,
+ ArgStartLoc);
+ }
+ } else {
+ // Consume the '='.
+ ConsumeToken();
+
+ // The argument isn't actually potentially evaluated unless it is
+ // used.
+ EnterExpressionEvaluationContext Eval(Actions,
+ Sema::PotentiallyEvaluatedIfUsed,
+ Param);
+
+ ExprResult DefArgResult;
+ if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
+ DefArgResult = ParseBraceInitializer();
+ } else
+ DefArgResult = ParseAssignmentExpression();
+ DefArgResult = Actions.CorrectDelayedTyposInExpr(DefArgResult);
+ if (DefArgResult.isInvalid()) {
+ Actions.ActOnParamDefaultArgumentError(Param, EqualLoc);
+ SkipUntil(tok::comma, tok::r_paren, StopAtSemi | StopBeforeMatch);
+ } else {
+ // Inform the actions module about the default argument
+ Actions.ActOnParamDefaultArgument(Param, EqualLoc,
+ DefArgResult.get());
+ }
+ }
+ }
+
+ ParamInfo.push_back(DeclaratorChunk::ParamInfo(ParmII,
+ ParmDeclarator.getIdentifierLoc(),
+ Param, DefArgToks));
+ }
+
+ if (TryConsumeToken(tok::ellipsis, EllipsisLoc)) {
+ if (!getLangOpts().CPlusPlus) {
+ // We have ellipsis without a preceding ',', which is ill-formed
+ // in C. Complain and provide the fix.
+ Diag(EllipsisLoc, diag::err_missing_comma_before_ellipsis)
+ << FixItHint::CreateInsertion(EllipsisLoc, ", ");
+ } else if (ParmDeclarator.getEllipsisLoc().isValid() ||
+ Actions.containsUnexpandedParameterPacks(ParmDeclarator)) {
+ // It looks like this was supposed to be a parameter pack. Warn and
+ // point out where the ellipsis should have gone.
+ SourceLocation ParmEllipsis = ParmDeclarator.getEllipsisLoc();
+ Diag(EllipsisLoc, diag::warn_misplaced_ellipsis_vararg)
+ << ParmEllipsis.isValid() << ParmEllipsis;
+ if (ParmEllipsis.isValid()) {
+ Diag(ParmEllipsis,
+ diag::note_misplaced_ellipsis_vararg_existing_ellipsis);
+ } else {
+ Diag(ParmDeclarator.getIdentifierLoc(),
+ diag::note_misplaced_ellipsis_vararg_add_ellipsis)
+ << FixItHint::CreateInsertion(ParmDeclarator.getIdentifierLoc(),
+ "...")
+ << !ParmDeclarator.hasName();
+ }
+ Diag(EllipsisLoc, diag::note_misplaced_ellipsis_vararg_add_comma)
+ << FixItHint::CreateInsertion(EllipsisLoc, ", ");
+ }
+
+ // We can't have any more parameters after an ellipsis.
+ break;
+ }
+
+ // If the next token is a comma, consume it and keep reading arguments.
+ } while (TryConsumeToken(tok::comma));
+}
+
+/// [C90] direct-declarator '[' constant-expression[opt] ']'
+/// [C99] direct-declarator '[' type-qual-list[opt] assignment-expr[opt] ']'
+/// [C99] direct-declarator '[' 'static' type-qual-list[opt] assign-expr ']'
+/// [C99] direct-declarator '[' type-qual-list 'static' assignment-expr ']'
+/// [C99] direct-declarator '[' type-qual-list[opt] '*' ']'
+/// [C++11] direct-declarator '[' constant-expression[opt] ']'
+/// attribute-specifier-seq[opt]
+void Parser::ParseBracketDeclarator(Declarator &D) {
+ if (CheckProhibitedCXX11Attribute())
+ return;
+
+ BalancedDelimiterTracker T(*this, tok::l_square);
+ T.consumeOpen();
+
+ // C array syntax has many features, but by-far the most common is [] and [4].
+ // This code does a fast path to handle some of the most obvious cases.
+ if (Tok.getKind() == tok::r_square) {
+ T.consumeClose();
+ ParsedAttributes attrs(AttrFactory);
+ MaybeParseCXX11Attributes(attrs);
+
+ // Remember that we parsed the empty array type.
+ D.AddTypeInfo(DeclaratorChunk::getArray(0, false, false, nullptr,
+ T.getOpenLocation(),
+ T.getCloseLocation()),
+ attrs, T.getCloseLocation());
+ return;
+ } else if (Tok.getKind() == tok::numeric_constant &&
+ GetLookAheadToken(1).is(tok::r_square)) {
+ // [4] is very common. Parse the numeric constant expression.
+ ExprResult ExprRes(Actions.ActOnNumericConstant(Tok, getCurScope()));
+ ConsumeToken();
+
+ T.consumeClose();
+ ParsedAttributes attrs(AttrFactory);
+ MaybeParseCXX11Attributes(attrs);
+
+ // Remember that we parsed a array type, and remember its features.
+ D.AddTypeInfo(DeclaratorChunk::getArray(0, false, false,
+ ExprRes.get(),
+ T.getOpenLocation(),
+ T.getCloseLocation()),
+ attrs, T.getCloseLocation());
+ return;
+ }
+
+ // If valid, this location is the position where we read the 'static' keyword.
+ SourceLocation StaticLoc;
+ TryConsumeToken(tok::kw_static, StaticLoc);
+
+ // If there is a type-qualifier-list, read it now.
+ // Type qualifiers in an array subscript are a C99 feature.
+ DeclSpec DS(AttrFactory);
+ ParseTypeQualifierListOpt(DS, AR_CXX11AttributesParsed);
+
+ // If we haven't already read 'static', check to see if there is one after the
+ // type-qualifier-list.
+ if (!StaticLoc.isValid())
+ TryConsumeToken(tok::kw_static, StaticLoc);
+
+ // Handle "direct-declarator [ type-qual-list[opt] * ]".
+ bool isStar = false;
+ ExprResult NumElements;
+
+ // Handle the case where we have '[*]' as the array size. However, a leading
+ // star could be the start of an expression, for example 'X[*p + 4]'. Verify
+ // the token after the star is a ']'. Since stars in arrays are
+ // infrequent, use of lookahead is not costly here.
+ if (Tok.is(tok::star) && GetLookAheadToken(1).is(tok::r_square)) {
+ ConsumeToken(); // Eat the '*'.
+
+ if (StaticLoc.isValid()) {
+ Diag(StaticLoc, diag::err_unspecified_vla_size_with_static);
+ StaticLoc = SourceLocation(); // Drop the static.
+ }
+ isStar = true;
+ } else if (Tok.isNot(tok::r_square)) {
+ // Note, in C89, this production uses the constant-expr production instead
+ // of assignment-expr. The only difference is that assignment-expr allows
+ // things like '=' and '*='. Sema rejects these in C89 mode because they
+ // are not i-c-e's, so we don't need to distinguish between the two here.
+
+ // Parse the constant-expression or assignment-expression now (depending
+ // on dialect).
+ if (getLangOpts().CPlusPlus) {
+ NumElements = ParseConstantExpression();
+ } else {
+ EnterExpressionEvaluationContext Unevaluated(Actions,
+ Sema::ConstantEvaluated);
+ NumElements =
+ Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression());
+ }
+ } else {
+ if (StaticLoc.isValid()) {
+ Diag(StaticLoc, diag::err_unspecified_size_with_static);
+ StaticLoc = SourceLocation(); // Drop the static.
+ }
+ }
+
+ // If there was an error parsing the assignment-expression, recover.
+ if (NumElements.isInvalid()) {
+ D.setInvalidType(true);
+ // If the expression was invalid, skip it.
+ SkipUntil(tok::r_square, StopAtSemi);
+ return;
+ }
+
+ T.consumeClose();
+
+ ParsedAttributes attrs(AttrFactory);
+ MaybeParseCXX11Attributes(attrs);
+
+ // Remember that we parsed a array type, and remember its features.
+ D.AddTypeInfo(DeclaratorChunk::getArray(DS.getTypeQualifiers(),
+ StaticLoc.isValid(), isStar,
+ NumElements.get(),
+ T.getOpenLocation(),
+ T.getCloseLocation()),
+ attrs, T.getCloseLocation());
+}
+
+/// Diagnose brackets before an identifier.
+void Parser::ParseMisplacedBracketDeclarator(Declarator &D) {
+ assert(Tok.is(tok::l_square) && "Missing opening bracket");
+ assert(!D.mayOmitIdentifier() && "Declarator cannot omit identifier");
+
+ SourceLocation StartBracketLoc = Tok.getLocation();
+ Declarator TempDeclarator(D.getDeclSpec(), D.getContext());
+
+ while (Tok.is(tok::l_square)) {
+ ParseBracketDeclarator(TempDeclarator);
+ }
+
+ // Stuff the location of the start of the brackets into the Declarator.
+ // The diagnostics from ParseDirectDeclarator will make more sense if
+ // they use this location instead.
+ if (Tok.is(tok::semi))
+ D.getName().EndLocation = StartBracketLoc;
+
+ SourceLocation SuggestParenLoc = Tok.getLocation();
+
+ // Now that the brackets are removed, try parsing the declarator again.
+ ParseDeclaratorInternal(D, &Parser::ParseDirectDeclarator);
+
+ // Something went wrong parsing the brackets, in which case,
+ // ParseBracketDeclarator has emitted an error, and we don't need to emit
+ // one here.
+ if (TempDeclarator.getNumTypeObjects() == 0)
+ return;
+
+ // Determine if parens will need to be suggested in the diagnostic.
+ bool NeedParens = false;
+ if (D.getNumTypeObjects() != 0) {
+ switch (D.getTypeObject(D.getNumTypeObjects() - 1).Kind) {
+ case DeclaratorChunk::Pointer:
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::BlockPointer:
+ case DeclaratorChunk::MemberPointer:
+ NeedParens = true;
+ break;
+ case DeclaratorChunk::Array:
+ case DeclaratorChunk::Function:
+ case DeclaratorChunk::Paren:
+ break;
+ }
+ }
+
+ if (NeedParens) {
+ // Create a DeclaratorChunk for the inserted parens.
+ ParsedAttributes attrs(AttrFactory);
+ SourceLocation EndLoc = PP.getLocForEndOfToken(D.getLocEnd());
+ D.AddTypeInfo(DeclaratorChunk::getParen(SuggestParenLoc, EndLoc), attrs,
+ SourceLocation());
+ }
+
+ // Adding back the bracket info to the end of the Declarator.
+ for (unsigned i = 0, e = TempDeclarator.getNumTypeObjects(); i < e; ++i) {
+ const DeclaratorChunk &Chunk = TempDeclarator.getTypeObject(i);
+ ParsedAttributes attrs(AttrFactory);
+ attrs.set(Chunk.Common.AttrList);
+ D.AddTypeInfo(Chunk, attrs, SourceLocation());
+ }
+
+ // The missing identifier would have been diagnosed in ParseDirectDeclarator.
+ // If parentheses are required, always suggest them.
+ if (!D.getIdentifier() && !NeedParens)
+ return;
+
+ SourceLocation EndBracketLoc = TempDeclarator.getLocEnd();
+
+ // Generate the move bracket error message.
+ SourceRange BracketRange(StartBracketLoc, EndBracketLoc);
+ SourceLocation EndLoc = PP.getLocForEndOfToken(D.getLocEnd());
+
+ if (NeedParens) {
+ Diag(EndLoc, diag::err_brackets_go_after_unqualified_id)
+ << getLangOpts().CPlusPlus
+ << FixItHint::CreateInsertion(SuggestParenLoc, "(")
+ << FixItHint::CreateInsertion(EndLoc, ")")
+ << FixItHint::CreateInsertionFromRange(
+ EndLoc, CharSourceRange(BracketRange, true))
+ << FixItHint::CreateRemoval(BracketRange);
+ } else {
+ Diag(EndLoc, diag::err_brackets_go_after_unqualified_id)
+ << getLangOpts().CPlusPlus
+ << FixItHint::CreateInsertionFromRange(
+ EndLoc, CharSourceRange(BracketRange, true))
+ << FixItHint::CreateRemoval(BracketRange);
+ }
+}
+
+/// [GNU] typeof-specifier:
+/// typeof ( expressions )
+/// typeof ( type-name )
+/// [GNU/C++] typeof unary-expression
+///
+void Parser::ParseTypeofSpecifier(DeclSpec &DS) {
+ assert(Tok.is(tok::kw_typeof) && "Not a typeof specifier");
+ Token OpTok = Tok;
+ SourceLocation StartLoc = ConsumeToken();
+
+ const bool hasParens = Tok.is(tok::l_paren);
+
+ EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated,
+ Sema::ReuseLambdaContextDecl);
+
+ bool isCastExpr;
+ ParsedType CastTy;
+ SourceRange CastRange;
+ ExprResult Operand = Actions.CorrectDelayedTyposInExpr(
+ ParseExprAfterUnaryExprOrTypeTrait(OpTok, isCastExpr, CastTy, CastRange));
+ if (hasParens)
+ DS.setTypeofParensRange(CastRange);
+
+ if (CastRange.getEnd().isInvalid())
+ // FIXME: Not accurate, the range gets one token more than it should.
+ DS.SetRangeEnd(Tok.getLocation());
+ else
+ DS.SetRangeEnd(CastRange.getEnd());
+
+ if (isCastExpr) {
+ if (!CastTy) {
+ DS.SetTypeSpecError();
+ return;
+ }
+
+ const char *PrevSpec = nullptr;
+ unsigned DiagID;
+ // Check for duplicate type specifiers (e.g. "int typeof(int)").
+ if (DS.SetTypeSpecType(DeclSpec::TST_typeofType, StartLoc, PrevSpec,
+ DiagID, CastTy,
+ Actions.getASTContext().getPrintingPolicy()))
+ Diag(StartLoc, DiagID) << PrevSpec;
+ return;
+ }
+
+ // If we get here, the operand to the typeof was an expresion.
+ if (Operand.isInvalid()) {
+ DS.SetTypeSpecError();
+ return;
+ }
+
+ // We might need to transform the operand if it is potentially evaluated.
+ Operand = Actions.HandleExprEvaluationContextForTypeof(Operand.get());
+ if (Operand.isInvalid()) {
+ DS.SetTypeSpecError();
+ return;
+ }
+
+ const char *PrevSpec = nullptr;
+ unsigned DiagID;
+ // Check for duplicate type specifiers (e.g. "int typeof(int)").
+ if (DS.SetTypeSpecType(DeclSpec::TST_typeofExpr, StartLoc, PrevSpec,
+ DiagID, Operand.get(),
+ Actions.getASTContext().getPrintingPolicy()))
+ Diag(StartLoc, DiagID) << PrevSpec;
+}
+
+/// [C11] atomic-specifier:
+/// _Atomic ( type-name )
+///
+void Parser::ParseAtomicSpecifier(DeclSpec &DS) {
+ assert(Tok.is(tok::kw__Atomic) && NextToken().is(tok::l_paren) &&
+ "Not an atomic specifier");
+
+ SourceLocation StartLoc = ConsumeToken();
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.consumeOpen())
+ return;
+
+ TypeResult Result = ParseTypeName();
+ if (Result.isInvalid()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return;
+ }
+
+ // Match the ')'
+ T.consumeClose();
+
+ if (T.getCloseLocation().isInvalid())
+ return;
+
+ DS.setTypeofParensRange(T.getRange());
+ DS.SetRangeEnd(T.getCloseLocation());
+
+ const char *PrevSpec = nullptr;
+ unsigned DiagID;
+ if (DS.SetTypeSpecType(DeclSpec::TST_atomic, StartLoc, PrevSpec,
+ DiagID, Result.get(),
+ Actions.getASTContext().getPrintingPolicy()))
+ Diag(StartLoc, DiagID) << PrevSpec;
+}
+
+/// TryAltiVecVectorTokenOutOfLine - Out of line body that should only be called
+/// from TryAltiVecVectorToken.
+bool Parser::TryAltiVecVectorTokenOutOfLine() {
+ Token Next = NextToken();
+ switch (Next.getKind()) {
+ default: return false;
+ case tok::kw_short:
+ case tok::kw_long:
+ case tok::kw_signed:
+ case tok::kw_unsigned:
+ case tok::kw_void:
+ case tok::kw_char:
+ case tok::kw_int:
+ case tok::kw_float:
+ case tok::kw_double:
+ case tok::kw_bool:
+ case tok::kw___bool:
+ case tok::kw___pixel:
+ Tok.setKind(tok::kw___vector);
+ return true;
+ case tok::identifier:
+ if (Next.getIdentifierInfo() == Ident_pixel) {
+ Tok.setKind(tok::kw___vector);
+ return true;
+ }
+ if (Next.getIdentifierInfo() == Ident_bool) {
+ Tok.setKind(tok::kw___vector);
+ return true;
+ }
+ return false;
+ }
+}
+
+bool Parser::TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
+ const char *&PrevSpec, unsigned &DiagID,
+ bool &isInvalid) {
+ const PrintingPolicy &Policy = Actions.getASTContext().getPrintingPolicy();
+ if (Tok.getIdentifierInfo() == Ident_vector) {
+ Token Next = NextToken();
+ switch (Next.getKind()) {
+ case tok::kw_short:
+ case tok::kw_long:
+ case tok::kw_signed:
+ case tok::kw_unsigned:
+ case tok::kw_void:
+ case tok::kw_char:
+ case tok::kw_int:
+ case tok::kw_float:
+ case tok::kw_double:
+ case tok::kw_bool:
+ case tok::kw___bool:
+ case tok::kw___pixel:
+ isInvalid = DS.SetTypeAltiVecVector(true, Loc, PrevSpec, DiagID, Policy);
+ return true;
+ case tok::identifier:
+ if (Next.getIdentifierInfo() == Ident_pixel) {
+ isInvalid = DS.SetTypeAltiVecVector(true, Loc, PrevSpec, DiagID,Policy);
+ return true;
+ }
+ if (Next.getIdentifierInfo() == Ident_bool) {
+ isInvalid = DS.SetTypeAltiVecVector(true, Loc, PrevSpec, DiagID,Policy);
+ return true;
+ }
+ break;
+ default:
+ break;
+ }
+ } else if ((Tok.getIdentifierInfo() == Ident_pixel) &&
+ DS.isTypeAltiVecVector()) {
+ isInvalid = DS.SetTypeAltiVecPixel(true, Loc, PrevSpec, DiagID, Policy);
+ return true;
+ } else if ((Tok.getIdentifierInfo() == Ident_bool) &&
+ DS.isTypeAltiVecVector()) {
+ isInvalid = DS.SetTypeAltiVecBool(true, Loc, PrevSpec, DiagID, Policy);
+ return true;
+ }
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp
new file mode 100644
index 0000000..a4de975
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp
@@ -0,0 +1,3942 @@
+//===--- ParseDeclCXX.cpp - C++ Declaration Parsing -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the C++ Declaration portions of the Parser interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/Parser.h"
+#include "RAIIObjectsForParser.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/Basic/Attributes.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/OperatorKinds.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Sema/PrettyDeclStackTrace.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "llvm/ADT/SmallString.h"
+
+using namespace clang;
+
+/// ParseNamespace - We know that the current token is a namespace keyword. This
+/// may either be a top level namespace or a block-level namespace alias. If
+/// there was an inline keyword, it has already been parsed.
+///
+/// namespace-definition: [C++ 7.3: basic.namespace]
+/// named-namespace-definition
+/// unnamed-namespace-definition
+///
+/// unnamed-namespace-definition:
+/// 'inline'[opt] 'namespace' attributes[opt] '{' namespace-body '}'
+///
+/// named-namespace-definition:
+/// original-namespace-definition
+/// extension-namespace-definition
+///
+/// original-namespace-definition:
+/// 'inline'[opt] 'namespace' identifier attributes[opt]
+/// '{' namespace-body '}'
+///
+/// extension-namespace-definition:
+/// 'inline'[opt] 'namespace' original-namespace-name
+/// '{' namespace-body '}'
+///
+/// namespace-alias-definition: [C++ 7.3.2: namespace.alias]
+/// 'namespace' identifier '=' qualified-namespace-specifier ';'
+///
+Parser::DeclGroupPtrTy Parser::ParseNamespace(unsigned Context,
+ SourceLocation &DeclEnd,
+ SourceLocation InlineLoc) {
+ assert(Tok.is(tok::kw_namespace) && "Not a namespace!");
+ SourceLocation NamespaceLoc = ConsumeToken(); // eat the 'namespace'.
+ ObjCDeclContextSwitch ObjCDC(*this);
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteNamespaceDecl(getCurScope());
+ cutOffParsing();
+ return DeclGroupPtrTy();
+ }
+
+ SourceLocation IdentLoc;
+ IdentifierInfo *Ident = nullptr;
+ std::vector<SourceLocation> ExtraIdentLoc;
+ std::vector<IdentifierInfo*> ExtraIdent;
+ std::vector<SourceLocation> ExtraNamespaceLoc;
+
+ ParsedAttributesWithRange attrs(AttrFactory);
+ SourceLocation attrLoc;
+ if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier()) {
+ if (!getLangOpts().CPlusPlus1z)
+ Diag(Tok.getLocation(), diag::warn_cxx14_compat_attribute)
+ << 0 /*namespace*/;
+ attrLoc = Tok.getLocation();
+ ParseCXX11Attributes(attrs);
+ }
+
+ if (Tok.is(tok::identifier)) {
+ Ident = Tok.getIdentifierInfo();
+ IdentLoc = ConsumeToken(); // eat the identifier.
+ while (Tok.is(tok::coloncolon) && NextToken().is(tok::identifier)) {
+ ExtraNamespaceLoc.push_back(ConsumeToken());
+ ExtraIdent.push_back(Tok.getIdentifierInfo());
+ ExtraIdentLoc.push_back(ConsumeToken());
+ }
+ }
+
+ // A nested namespace definition cannot have attributes.
+ if (!ExtraNamespaceLoc.empty() && attrLoc.isValid())
+ Diag(attrLoc, diag::err_unexpected_nested_namespace_attribute);
+
+ // Read label attributes, if present.
+ if (Tok.is(tok::kw___attribute)) {
+ attrLoc = Tok.getLocation();
+ ParseGNUAttributes(attrs);
+ }
+
+ if (Tok.is(tok::equal)) {
+ if (!Ident) {
+ Diag(Tok, diag::err_expected) << tok::identifier;
+ // Skip to end of the definition and eat the ';'.
+ SkipUntil(tok::semi);
+ return DeclGroupPtrTy();
+ }
+ if (attrLoc.isValid())
+ Diag(attrLoc, diag::err_unexpected_namespace_attributes_alias);
+ if (InlineLoc.isValid())
+ Diag(InlineLoc, diag::err_inline_namespace_alias)
+ << FixItHint::CreateRemoval(InlineLoc);
+ Decl *NSAlias = ParseNamespaceAlias(NamespaceLoc, IdentLoc, Ident, DeclEnd);
+ return Actions.ConvertDeclToDeclGroup(NSAlias);
+}
+
+ BalancedDelimiterTracker T(*this, tok::l_brace);
+ if (T.consumeOpen()) {
+ if (Ident)
+ Diag(Tok, diag::err_expected) << tok::l_brace;
+ else
+ Diag(Tok, diag::err_expected_either) << tok::identifier << tok::l_brace;
+ return DeclGroupPtrTy();
+ }
+
+ if (getCurScope()->isClassScope() || getCurScope()->isTemplateParamScope() ||
+ getCurScope()->isInObjcMethodScope() || getCurScope()->getBlockParent() ||
+ getCurScope()->getFnParent()) {
+ Diag(T.getOpenLocation(), diag::err_namespace_nonnamespace_scope);
+ SkipUntil(tok::r_brace);
+ return DeclGroupPtrTy();
+ }
+
+ if (ExtraIdent.empty()) {
+ // Normal namespace definition, not a nested-namespace-definition.
+ } else if (InlineLoc.isValid()) {
+ Diag(InlineLoc, diag::err_inline_nested_namespace_definition);
+ } else if (getLangOpts().CPlusPlus1z) {
+ Diag(ExtraNamespaceLoc[0],
+ diag::warn_cxx14_compat_nested_namespace_definition);
+ } else {
+ TentativeParsingAction TPA(*this);
+ SkipUntil(tok::r_brace, StopBeforeMatch);
+ Token rBraceToken = Tok;
+ TPA.Revert();
+
+ if (!rBraceToken.is(tok::r_brace)) {
+ Diag(ExtraNamespaceLoc[0], diag::ext_nested_namespace_definition)
+ << SourceRange(ExtraNamespaceLoc.front(), ExtraIdentLoc.back());
+ } else {
+ std::string NamespaceFix;
+ for (std::vector<IdentifierInfo*>::iterator I = ExtraIdent.begin(),
+ E = ExtraIdent.end(); I != E; ++I) {
+ NamespaceFix += " { namespace ";
+ NamespaceFix += (*I)->getName();
+ }
+
+ std::string RBraces;
+ for (unsigned i = 0, e = ExtraIdent.size(); i != e; ++i)
+ RBraces += "} ";
+
+ Diag(ExtraNamespaceLoc[0], diag::ext_nested_namespace_definition)
+ << FixItHint::CreateReplacement(SourceRange(ExtraNamespaceLoc.front(),
+ ExtraIdentLoc.back()),
+ NamespaceFix)
+ << FixItHint::CreateInsertion(rBraceToken.getLocation(), RBraces);
+ }
+ }
+
+ // If we're still good, complain about inline namespaces in non-C++0x now.
+ if (InlineLoc.isValid())
+ Diag(InlineLoc, getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_inline_namespace : diag::ext_inline_namespace);
+
+ // Enter a scope for the namespace.
+ ParseScope NamespaceScope(this, Scope::DeclScope);
+
+ UsingDirectiveDecl *ImplicitUsingDirectiveDecl = nullptr;
+ Decl *NamespcDecl =
+ Actions.ActOnStartNamespaceDef(getCurScope(), InlineLoc, NamespaceLoc,
+ IdentLoc, Ident, T.getOpenLocation(),
+ attrs.getList(), ImplicitUsingDirectiveDecl);
+
+ PrettyDeclStackTraceEntry CrashInfo(Actions, NamespcDecl, NamespaceLoc,
+ "parsing namespace");
+
+ // Parse the contents of the namespace. This includes parsing recovery on
+ // any improperly nested namespaces.
+ ParseInnerNamespace(ExtraIdentLoc, ExtraIdent, ExtraNamespaceLoc, 0,
+ InlineLoc, attrs, T);
+
+ // Leave the namespace scope.
+ NamespaceScope.Exit();
+
+ DeclEnd = T.getCloseLocation();
+ Actions.ActOnFinishNamespaceDef(NamespcDecl, DeclEnd);
+
+ return Actions.ConvertDeclToDeclGroup(NamespcDecl,
+ ImplicitUsingDirectiveDecl);
+}
+
+/// ParseInnerNamespace - Parse the contents of a namespace.
+void Parser::ParseInnerNamespace(std::vector<SourceLocation> &IdentLoc,
+ std::vector<IdentifierInfo *> &Ident,
+ std::vector<SourceLocation> &NamespaceLoc,
+ unsigned int index, SourceLocation &InlineLoc,
+ ParsedAttributes &attrs,
+ BalancedDelimiterTracker &Tracker) {
+ if (index == Ident.size()) {
+ while (!tryParseMisplacedModuleImport() && Tok.isNot(tok::r_brace) &&
+ Tok.isNot(tok::eof)) {
+ ParsedAttributesWithRange attrs(AttrFactory);
+ MaybeParseCXX11Attributes(attrs);
+ MaybeParseMicrosoftAttributes(attrs);
+ ParseExternalDeclaration(attrs);
+ }
+
+ // The caller is what called check -- we are simply calling
+ // the close for it.
+ Tracker.consumeClose();
+
+ return;
+ }
+
+ // Handle a nested namespace definition.
+ // FIXME: Preserve the source information through to the AST rather than
+ // desugaring it here.
+ ParseScope NamespaceScope(this, Scope::DeclScope);
+ UsingDirectiveDecl *ImplicitUsingDirectiveDecl = nullptr;
+ Decl *NamespcDecl =
+ Actions.ActOnStartNamespaceDef(getCurScope(), SourceLocation(),
+ NamespaceLoc[index], IdentLoc[index],
+ Ident[index], Tracker.getOpenLocation(),
+ attrs.getList(), ImplicitUsingDirectiveDecl);
+ assert(!ImplicitUsingDirectiveDecl &&
+ "nested namespace definition cannot define anonymous namespace");
+
+ ParseInnerNamespace(IdentLoc, Ident, NamespaceLoc, ++index, InlineLoc,
+ attrs, Tracker);
+
+ NamespaceScope.Exit();
+ Actions.ActOnFinishNamespaceDef(NamespcDecl, Tracker.getCloseLocation());
+}
+
+/// ParseNamespaceAlias - Parse the part after the '=' in a namespace
+/// alias definition.
+///
+Decl *Parser::ParseNamespaceAlias(SourceLocation NamespaceLoc,
+ SourceLocation AliasLoc,
+ IdentifierInfo *Alias,
+ SourceLocation &DeclEnd) {
+ assert(Tok.is(tok::equal) && "Not equal token");
+
+ ConsumeToken(); // eat the '='.
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteNamespaceAliasDecl(getCurScope());
+ cutOffParsing();
+ return nullptr;
+ }
+
+ CXXScopeSpec SS;
+ // Parse (optional) nested-name-specifier.
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(), /*EnteringContext=*/false);
+
+ if (SS.isInvalid() || Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_namespace_name);
+ // Skip to end of the definition and eat the ';'.
+ SkipUntil(tok::semi);
+ return nullptr;
+ }
+
+ // Parse identifier.
+ IdentifierInfo *Ident = Tok.getIdentifierInfo();
+ SourceLocation IdentLoc = ConsumeToken();
+
+ // Eat the ';'.
+ DeclEnd = Tok.getLocation();
+ if (ExpectAndConsume(tok::semi, diag::err_expected_semi_after_namespace_name))
+ SkipUntil(tok::semi);
+
+ return Actions.ActOnNamespaceAliasDef(getCurScope(), NamespaceLoc, AliasLoc,
+ Alias, SS, IdentLoc, Ident);
+}
+
+/// ParseLinkage - We know that the current token is a string_literal
+/// and just before that, that extern was seen.
+///
+/// linkage-specification: [C++ 7.5p2: dcl.link]
+/// 'extern' string-literal '{' declaration-seq[opt] '}'
+/// 'extern' string-literal declaration
+///
+Decl *Parser::ParseLinkage(ParsingDeclSpec &DS, unsigned Context) {
+ assert(isTokenStringLiteral() && "Not a string literal!");
+ ExprResult Lang = ParseStringLiteralExpression(false);
+
+ ParseScope LinkageScope(this, Scope::DeclScope);
+ Decl *LinkageSpec =
+ Lang.isInvalid()
+ ? nullptr
+ : Actions.ActOnStartLinkageSpecification(
+ getCurScope(), DS.getSourceRange().getBegin(), Lang.get(),
+ Tok.is(tok::l_brace) ? Tok.getLocation() : SourceLocation());
+
+ ParsedAttributesWithRange attrs(AttrFactory);
+ MaybeParseCXX11Attributes(attrs);
+ MaybeParseMicrosoftAttributes(attrs);
+
+ if (Tok.isNot(tok::l_brace)) {
+ // Reset the source range in DS, as the leading "extern"
+ // does not really belong to the inner declaration ...
+ DS.SetRangeStart(SourceLocation());
+ DS.SetRangeEnd(SourceLocation());
+ // ... but anyway remember that such an "extern" was seen.
+ DS.setExternInLinkageSpec(true);
+ ParseExternalDeclaration(attrs, &DS);
+ return LinkageSpec ? Actions.ActOnFinishLinkageSpecification(
+ getCurScope(), LinkageSpec, SourceLocation())
+ : nullptr;
+ }
+
+ DS.abort();
+
+ ProhibitAttributes(attrs);
+
+ BalancedDelimiterTracker T(*this, tok::l_brace);
+ T.consumeOpen();
+
+ unsigned NestedModules = 0;
+ while (true) {
+ switch (Tok.getKind()) {
+ case tok::annot_module_begin:
+ ++NestedModules;
+ ParseTopLevelDecl();
+ continue;
+
+ case tok::annot_module_end:
+ if (!NestedModules)
+ break;
+ --NestedModules;
+ ParseTopLevelDecl();
+ continue;
+
+ case tok::annot_module_include:
+ ParseTopLevelDecl();
+ continue;
+
+ case tok::eof:
+ break;
+
+ case tok::r_brace:
+ if (!NestedModules)
+ break;
+ // Fall through.
+ default:
+ ParsedAttributesWithRange attrs(AttrFactory);
+ MaybeParseCXX11Attributes(attrs);
+ MaybeParseMicrosoftAttributes(attrs);
+ ParseExternalDeclaration(attrs);
+ continue;
+ }
+
+ break;
+ }
+
+ T.consumeClose();
+ return LinkageSpec ? Actions.ActOnFinishLinkageSpecification(
+ getCurScope(), LinkageSpec, T.getCloseLocation())
+ : nullptr;
+}
+
+/// ParseUsingDirectiveOrDeclaration - Parse C++ using using-declaration or
+/// using-directive. Assumes that current token is 'using'.
+Decl *Parser::ParseUsingDirectiveOrDeclaration(unsigned Context,
+ const ParsedTemplateInfo &TemplateInfo,
+ SourceLocation &DeclEnd,
+ ParsedAttributesWithRange &attrs,
+ Decl **OwnedType) {
+ assert(Tok.is(tok::kw_using) && "Not using token");
+ ObjCDeclContextSwitch ObjCDC(*this);
+
+ // Eat 'using'.
+ SourceLocation UsingLoc = ConsumeToken();
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteUsing(getCurScope());
+ cutOffParsing();
+ return nullptr;
+ }
+
+ // 'using namespace' means this is a using-directive.
+ if (Tok.is(tok::kw_namespace)) {
+ // Template parameters are always an error here.
+ if (TemplateInfo.Kind) {
+ SourceRange R = TemplateInfo.getSourceRange();
+ Diag(UsingLoc, diag::err_templated_using_directive_declaration)
+ << 0 /* directive */ << R << FixItHint::CreateRemoval(R);
+ }
+
+ return ParseUsingDirective(Context, UsingLoc, DeclEnd, attrs);
+ }
+
+ // Otherwise, it must be a using-declaration or an alias-declaration.
+
+ // Using declarations can't have attributes.
+ ProhibitAttributes(attrs);
+
+ return ParseUsingDeclaration(Context, TemplateInfo, UsingLoc, DeclEnd,
+ AS_none, OwnedType);
+}
+
+/// ParseUsingDirective - Parse C++ using-directive, assumes
+/// that current token is 'namespace' and 'using' was already parsed.
+///
+/// using-directive: [C++ 7.3.p4: namespace.udir]
+/// 'using' 'namespace' ::[opt] nested-name-specifier[opt]
+/// namespace-name ;
+/// [GNU] using-directive:
+/// 'using' 'namespace' ::[opt] nested-name-specifier[opt]
+/// namespace-name attributes[opt] ;
+///
+Decl *Parser::ParseUsingDirective(unsigned Context,
+ SourceLocation UsingLoc,
+ SourceLocation &DeclEnd,
+ ParsedAttributes &attrs) {
+ assert(Tok.is(tok::kw_namespace) && "Not 'namespace' token");
+
+ // Eat 'namespace'.
+ SourceLocation NamespcLoc = ConsumeToken();
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteUsingDirective(getCurScope());
+ cutOffParsing();
+ return nullptr;
+ }
+
+ CXXScopeSpec SS;
+ // Parse (optional) nested-name-specifier.
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(), /*EnteringContext=*/false);
+
+ IdentifierInfo *NamespcName = nullptr;
+ SourceLocation IdentLoc = SourceLocation();
+
+ // Parse namespace-name.
+ if (SS.isInvalid() || Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_namespace_name);
+ // If there was invalid namespace name, skip to end of decl, and eat ';'.
+ SkipUntil(tok::semi);
+ // FIXME: Are there cases, when we would like to call ActOnUsingDirective?
+ return nullptr;
+ }
+
+ // Parse identifier.
+ NamespcName = Tok.getIdentifierInfo();
+ IdentLoc = ConsumeToken();
+
+ // Parse (optional) attributes (most likely GNU strong-using extension).
+ bool GNUAttr = false;
+ if (Tok.is(tok::kw___attribute)) {
+ GNUAttr = true;
+ ParseGNUAttributes(attrs);
+ }
+
+ // Eat ';'.
+ DeclEnd = Tok.getLocation();
+ if (ExpectAndConsume(tok::semi,
+ GNUAttr ? diag::err_expected_semi_after_attribute_list
+ : diag::err_expected_semi_after_namespace_name))
+ SkipUntil(tok::semi);
+
+ return Actions.ActOnUsingDirective(getCurScope(), UsingLoc, NamespcLoc, SS,
+ IdentLoc, NamespcName, attrs.getList());
+}
+
+/// ParseUsingDeclaration - Parse C++ using-declaration or alias-declaration.
+/// Assumes that 'using' was already seen.
+///
+/// using-declaration: [C++ 7.3.p3: namespace.udecl]
+/// 'using' 'typename'[opt] ::[opt] nested-name-specifier
+/// unqualified-id
+/// 'using' :: unqualified-id
+///
+/// alias-declaration: C++11 [dcl.dcl]p1
+/// 'using' identifier attribute-specifier-seq[opt] = type-id ;
+///
+Decl *Parser::ParseUsingDeclaration(unsigned Context,
+ const ParsedTemplateInfo &TemplateInfo,
+ SourceLocation UsingLoc,
+ SourceLocation &DeclEnd,
+ AccessSpecifier AS,
+ Decl **OwnedType) {
+ CXXScopeSpec SS;
+ SourceLocation TypenameLoc;
+ bool HasTypenameKeyword = false;
+
+ // Check for misplaced attributes before the identifier in an
+ // alias-declaration.
+ ParsedAttributesWithRange MisplacedAttrs(AttrFactory);
+ MaybeParseCXX11Attributes(MisplacedAttrs);
+
+ // Ignore optional 'typename'.
+ // FIXME: This is wrong; we should parse this as a typename-specifier.
+ if (TryConsumeToken(tok::kw_typename, TypenameLoc))
+ HasTypenameKeyword = true;
+
+ if (Tok.is(tok::kw___super)) {
+ Diag(Tok.getLocation(), diag::err_super_in_using_declaration);
+ SkipUntil(tok::semi);
+ return nullptr;
+ }
+
+ // Parse nested-name-specifier.
+ IdentifierInfo *LastII = nullptr;
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(), /*EnteringContext=*/false,
+ /*MayBePseudoDtor=*/nullptr,
+ /*IsTypename=*/false,
+ /*LastII=*/&LastII);
+
+ // Check nested-name specifier.
+ if (SS.isInvalid()) {
+ SkipUntil(tok::semi);
+ return nullptr;
+ }
+
+ SourceLocation TemplateKWLoc;
+ UnqualifiedId Name;
+
+ // Parse the unqualified-id. We allow parsing of both constructor and
+ // destructor names and allow the action module to diagnose any semantic
+ // errors.
+ //
+ // C++11 [class.qual]p2:
+ // [...] in a using-declaration that is a member-declaration, if the name
+ // specified after the nested-name-specifier is the same as the identifier
+ // or the simple-template-id's template-name in the last component of the
+ // nested-name-specifier, the name is [...] considered to name the
+ // constructor.
+ if (getLangOpts().CPlusPlus11 && Context == Declarator::MemberContext &&
+ Tok.is(tok::identifier) && NextToken().is(tok::semi) &&
+ SS.isNotEmpty() && LastII == Tok.getIdentifierInfo() &&
+ !SS.getScopeRep()->getAsNamespace() &&
+ !SS.getScopeRep()->getAsNamespaceAlias()) {
+ SourceLocation IdLoc = ConsumeToken();
+ ParsedType Type = Actions.getInheritingConstructorName(SS, IdLoc, *LastII);
+ Name.setConstructorName(Type, IdLoc, IdLoc);
+ } else if (ParseUnqualifiedId(
+ SS, /*EnteringContext=*/false,
+ /*AllowDestructorName=*/true,
+ /*AllowConstructorName=*/!(Tok.is(tok::identifier) &&
+ NextToken().is(tok::equal)),
+ ParsedType(), TemplateKWLoc, Name)) {
+ SkipUntil(tok::semi);
+ return nullptr;
+ }
+
+ ParsedAttributesWithRange Attrs(AttrFactory);
+ MaybeParseGNUAttributes(Attrs);
+ MaybeParseCXX11Attributes(Attrs);
+
+ // Maybe this is an alias-declaration.
+ TypeResult TypeAlias;
+ bool IsAliasDecl = Tok.is(tok::equal);
+ Decl *DeclFromDeclSpec = nullptr;
+ if (IsAliasDecl) {
+ // If we had any misplaced attributes from earlier, this is where they
+ // should have been written.
+ if (MisplacedAttrs.Range.isValid()) {
+ Diag(MisplacedAttrs.Range.getBegin(), diag::err_attributes_not_allowed)
+ << FixItHint::CreateInsertionFromRange(
+ Tok.getLocation(),
+ CharSourceRange::getTokenRange(MisplacedAttrs.Range))
+ << FixItHint::CreateRemoval(MisplacedAttrs.Range);
+ Attrs.takeAllFrom(MisplacedAttrs);
+ }
+
+ ConsumeToken();
+
+ Diag(Tok.getLocation(), getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_alias_declaration :
+ diag::ext_alias_declaration);
+
+ // Type alias templates cannot be specialized.
+ int SpecKind = -1;
+ if (TemplateInfo.Kind == ParsedTemplateInfo::Template &&
+ Name.getKind() == UnqualifiedId::IK_TemplateId)
+ SpecKind = 0;
+ if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitSpecialization)
+ SpecKind = 1;
+ if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation)
+ SpecKind = 2;
+ if (SpecKind != -1) {
+ SourceRange Range;
+ if (SpecKind == 0)
+ Range = SourceRange(Name.TemplateId->LAngleLoc,
+ Name.TemplateId->RAngleLoc);
+ else
+ Range = TemplateInfo.getSourceRange();
+ Diag(Range.getBegin(), diag::err_alias_declaration_specialization)
+ << SpecKind << Range;
+ SkipUntil(tok::semi);
+ return nullptr;
+ }
+
+ // Name must be an identifier.
+ if (Name.getKind() != UnqualifiedId::IK_Identifier) {
+ Diag(Name.StartLocation, diag::err_alias_declaration_not_identifier);
+ // No removal fixit: can't recover from this.
+ SkipUntil(tok::semi);
+ return nullptr;
+ } else if (HasTypenameKeyword)
+ Diag(TypenameLoc, diag::err_alias_declaration_not_identifier)
+ << FixItHint::CreateRemoval(SourceRange(TypenameLoc,
+ SS.isNotEmpty() ? SS.getEndLoc() : TypenameLoc));
+ else if (SS.isNotEmpty())
+ Diag(SS.getBeginLoc(), diag::err_alias_declaration_not_identifier)
+ << FixItHint::CreateRemoval(SS.getRange());
+
+ TypeAlias = ParseTypeName(nullptr, TemplateInfo.Kind
+ ? Declarator::AliasTemplateContext
+ : Declarator::AliasDeclContext,
+ AS, &DeclFromDeclSpec, &Attrs);
+ if (OwnedType)
+ *OwnedType = DeclFromDeclSpec;
+ } else {
+ // C++11 attributes are not allowed on a using-declaration, but GNU ones
+ // are.
+ ProhibitAttributes(MisplacedAttrs);
+ ProhibitAttributes(Attrs);
+
+ // Parse (optional) attributes (most likely GNU strong-using extension).
+ MaybeParseGNUAttributes(Attrs);
+ }
+
+ // Eat ';'.
+ DeclEnd = Tok.getLocation();
+ if (ExpectAndConsume(tok::semi, diag::err_expected_after,
+ !Attrs.empty() ? "attributes list"
+ : IsAliasDecl ? "alias declaration"
+ : "using declaration"))
+ SkipUntil(tok::semi);
+
+ // Diagnose an attempt to declare a templated using-declaration.
+ // In C++11, alias-declarations can be templates:
+ // template <...> using id = type;
+ if (TemplateInfo.Kind && !IsAliasDecl) {
+ SourceRange R = TemplateInfo.getSourceRange();
+ Diag(UsingLoc, diag::err_templated_using_directive_declaration)
+ << 1 /* declaration */ << R << FixItHint::CreateRemoval(R);
+
+ // Unfortunately, we have to bail out instead of recovering by
+ // ignoring the parameters, just in case the nested name specifier
+ // depends on the parameters.
+ return nullptr;
+ }
+
+ // "typename" keyword is allowed for identifiers only,
+ // because it may be a type definition.
+ if (HasTypenameKeyword && Name.getKind() != UnqualifiedId::IK_Identifier) {
+ Diag(Name.getSourceRange().getBegin(), diag::err_typename_identifiers_only)
+ << FixItHint::CreateRemoval(SourceRange(TypenameLoc));
+ // Proceed parsing, but reset the HasTypenameKeyword flag.
+ HasTypenameKeyword = false;
+ }
+
+ if (IsAliasDecl) {
+ TemplateParameterLists *TemplateParams = TemplateInfo.TemplateParams;
+ MultiTemplateParamsArg TemplateParamsArg(
+ TemplateParams ? TemplateParams->data() : nullptr,
+ TemplateParams ? TemplateParams->size() : 0);
+ return Actions.ActOnAliasDeclaration(getCurScope(), AS, TemplateParamsArg,
+ UsingLoc, Name, Attrs.getList(),
+ TypeAlias, DeclFromDeclSpec);
+ }
+
+ return Actions.ActOnUsingDeclaration(getCurScope(), AS,
+ /* HasUsingKeyword */ true, UsingLoc,
+ SS, Name, Attrs.getList(),
+ HasTypenameKeyword, TypenameLoc);
+}
+
+/// ParseStaticAssertDeclaration - Parse C++0x or C11 static_assert-declaration.
+///
+/// [C++0x] static_assert-declaration:
+/// static_assert ( constant-expression , string-literal ) ;
+///
+/// [C11] static_assert-declaration:
+/// _Static_assert ( constant-expression , string-literal ) ;
+///
+Decl *Parser::ParseStaticAssertDeclaration(SourceLocation &DeclEnd){
+ assert(Tok.isOneOf(tok::kw_static_assert, tok::kw__Static_assert) &&
+ "Not a static_assert declaration");
+
+ if (Tok.is(tok::kw__Static_assert) && !getLangOpts().C11)
+ Diag(Tok, diag::ext_c11_static_assert);
+ if (Tok.is(tok::kw_static_assert))
+ Diag(Tok, diag::warn_cxx98_compat_static_assert);
+
+ SourceLocation StaticAssertLoc = ConsumeToken();
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.consumeOpen()) {
+ Diag(Tok, diag::err_expected) << tok::l_paren;
+ SkipMalformedDecl();
+ return nullptr;
+ }
+
+ ExprResult AssertExpr(ParseConstantExpression());
+ if (AssertExpr.isInvalid()) {
+ SkipMalformedDecl();
+ return nullptr;
+ }
+
+ ExprResult AssertMessage;
+ if (Tok.is(tok::r_paren)) {
+ Diag(Tok, getLangOpts().CPlusPlus1z
+ ? diag::warn_cxx14_compat_static_assert_no_message
+ : diag::ext_static_assert_no_message)
+ << (getLangOpts().CPlusPlus1z
+ ? FixItHint()
+ : FixItHint::CreateInsertion(Tok.getLocation(), ", \"\""));
+ } else {
+ if (ExpectAndConsume(tok::comma)) {
+ SkipUntil(tok::semi);
+ return nullptr;
+ }
+
+ if (!isTokenStringLiteral()) {
+ Diag(Tok, diag::err_expected_string_literal)
+ << /*Source='static_assert'*/1;
+ SkipMalformedDecl();
+ return nullptr;
+ }
+
+ AssertMessage = ParseStringLiteralExpression();
+ if (AssertMessage.isInvalid()) {
+ SkipMalformedDecl();
+ return nullptr;
+ }
+ }
+
+ T.consumeClose();
+
+ DeclEnd = Tok.getLocation();
+ ExpectAndConsumeSemi(diag::err_expected_semi_after_static_assert);
+
+ return Actions.ActOnStaticAssertDeclaration(StaticAssertLoc,
+ AssertExpr.get(),
+ AssertMessage.get(),
+ T.getCloseLocation());
+}
+
+/// ParseDecltypeSpecifier - Parse a C++11 decltype specifier.
+///
+/// 'decltype' ( expression )
+/// 'decltype' ( 'auto' ) [C++1y]
+///
+SourceLocation Parser::ParseDecltypeSpecifier(DeclSpec &DS) {
+ assert(Tok.isOneOf(tok::kw_decltype, tok::annot_decltype)
+ && "Not a decltype specifier");
+
+ ExprResult Result;
+ SourceLocation StartLoc = Tok.getLocation();
+ SourceLocation EndLoc;
+
+ if (Tok.is(tok::annot_decltype)) {
+ Result = getExprAnnotation(Tok);
+ EndLoc = Tok.getAnnotationEndLoc();
+ ConsumeToken();
+ if (Result.isInvalid()) {
+ DS.SetTypeSpecError();
+ return EndLoc;
+ }
+ } else {
+ if (Tok.getIdentifierInfo()->isStr("decltype"))
+ Diag(Tok, diag::warn_cxx98_compat_decltype);
+
+ ConsumeToken();
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.expectAndConsume(diag::err_expected_lparen_after,
+ "decltype", tok::r_paren)) {
+ DS.SetTypeSpecError();
+ return T.getOpenLocation() == Tok.getLocation() ?
+ StartLoc : T.getOpenLocation();
+ }
+
+ // Check for C++1y 'decltype(auto)'.
+ if (Tok.is(tok::kw_auto)) {
+ // No need to disambiguate here: an expression can't start with 'auto',
+ // because the typename-specifier in a function-style cast operation can't
+ // be 'auto'.
+ Diag(Tok.getLocation(),
+ getLangOpts().CPlusPlus14
+ ? diag::warn_cxx11_compat_decltype_auto_type_specifier
+ : diag::ext_decltype_auto_type_specifier);
+ ConsumeToken();
+ } else {
+ // Parse the expression
+
+ // C++11 [dcl.type.simple]p4:
+ // The operand of the decltype specifier is an unevaluated operand.
+ EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated,
+ nullptr,/*IsDecltype=*/true);
+ Result =
+ Actions.CorrectDelayedTyposInExpr(ParseExpression(), [](Expr *E) {
+ return E->hasPlaceholderType() ? ExprError() : E;
+ });
+ if (Result.isInvalid()) {
+ DS.SetTypeSpecError();
+ if (SkipUntil(tok::r_paren, StopAtSemi | StopBeforeMatch)) {
+ EndLoc = ConsumeParen();
+ } else {
+ if (PP.isBacktrackEnabled() && Tok.is(tok::semi)) {
+ // Backtrack to get the location of the last token before the semi.
+ PP.RevertCachedTokens(2);
+ ConsumeToken(); // the semi.
+ EndLoc = ConsumeAnyToken();
+ assert(Tok.is(tok::semi));
+ } else {
+ EndLoc = Tok.getLocation();
+ }
+ }
+ return EndLoc;
+ }
+
+ Result = Actions.ActOnDecltypeExpression(Result.get());
+ }
+
+ // Match the ')'
+ T.consumeClose();
+ if (T.getCloseLocation().isInvalid()) {
+ DS.SetTypeSpecError();
+ // FIXME: this should return the location of the last token
+ // that was consumed (by "consumeClose()")
+ return T.getCloseLocation();
+ }
+
+ if (Result.isInvalid()) {
+ DS.SetTypeSpecError();
+ return T.getCloseLocation();
+ }
+
+ EndLoc = T.getCloseLocation();
+ }
+ assert(!Result.isInvalid());
+
+ const char *PrevSpec = nullptr;
+ unsigned DiagID;
+ const PrintingPolicy &Policy = Actions.getASTContext().getPrintingPolicy();
+ // Check for duplicate type specifiers (e.g. "int decltype(a)").
+ if (Result.get()
+ ? DS.SetTypeSpecType(DeclSpec::TST_decltype, StartLoc, PrevSpec,
+ DiagID, Result.get(), Policy)
+ : DS.SetTypeSpecType(DeclSpec::TST_decltype_auto, StartLoc, PrevSpec,
+ DiagID, Policy)) {
+ Diag(StartLoc, DiagID) << PrevSpec;
+ DS.SetTypeSpecError();
+ }
+ return EndLoc;
+}
+
+void Parser::AnnotateExistingDecltypeSpecifier(const DeclSpec& DS,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ // make sure we have a token we can turn into an annotation token
+ if (PP.isBacktrackEnabled())
+ PP.RevertCachedTokens(1);
+ else
+ PP.EnterToken(Tok);
+
+ Tok.setKind(tok::annot_decltype);
+ setExprAnnotation(Tok,
+ DS.getTypeSpecType() == TST_decltype ? DS.getRepAsExpr() :
+ DS.getTypeSpecType() == TST_decltype_auto ? ExprResult() :
+ ExprError());
+ Tok.setAnnotationEndLoc(EndLoc);
+ Tok.setLocation(StartLoc);
+ PP.AnnotateCachedTokens(Tok);
+}
+
+void Parser::ParseUnderlyingTypeSpecifier(DeclSpec &DS) {
+ assert(Tok.is(tok::kw___underlying_type) &&
+ "Not an underlying type specifier");
+
+ SourceLocation StartLoc = ConsumeToken();
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.expectAndConsume(diag::err_expected_lparen_after,
+ "__underlying_type", tok::r_paren)) {
+ return;
+ }
+
+ TypeResult Result = ParseTypeName();
+ if (Result.isInvalid()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return;
+ }
+
+ // Match the ')'
+ T.consumeClose();
+ if (T.getCloseLocation().isInvalid())
+ return;
+
+ const char *PrevSpec = nullptr;
+ unsigned DiagID;
+ if (DS.SetTypeSpecType(DeclSpec::TST_underlyingType, StartLoc, PrevSpec,
+ DiagID, Result.get(),
+ Actions.getASTContext().getPrintingPolicy()))
+ Diag(StartLoc, DiagID) << PrevSpec;
+ DS.setTypeofParensRange(T.getRange());
+}
+
+/// ParseBaseTypeSpecifier - Parse a C++ base-type-specifier which is either a
+/// class name or decltype-specifier. Note that we only check that the result
+/// names a type; semantic analysis will need to verify that the type names a
+/// class. The result is either a type or null, depending on whether a type
+/// name was found.
+///
+/// base-type-specifier: [C++11 class.derived]
+/// class-or-decltype
+/// class-or-decltype: [C++11 class.derived]
+/// nested-name-specifier[opt] class-name
+/// decltype-specifier
+/// class-name: [C++ class.name]
+/// identifier
+/// simple-template-id
+///
+/// In C++98, instead of base-type-specifier, we have:
+///
+/// ::[opt] nested-name-specifier[opt] class-name
+TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
+ SourceLocation &EndLocation) {
+ // Ignore attempts to use typename
+ if (Tok.is(tok::kw_typename)) {
+ Diag(Tok, diag::err_expected_class_name_not_template)
+ << FixItHint::CreateRemoval(Tok.getLocation());
+ ConsumeToken();
+ }
+
+ // Parse optional nested-name-specifier
+ CXXScopeSpec SS;
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(), /*EnteringContext=*/false);
+
+ BaseLoc = Tok.getLocation();
+
+ // Parse decltype-specifier
+ // tok == kw_decltype is just error recovery, it can only happen when SS
+ // isn't empty
+ if (Tok.isOneOf(tok::kw_decltype, tok::annot_decltype)) {
+ if (SS.isNotEmpty())
+ Diag(SS.getBeginLoc(), diag::err_unexpected_scope_on_base_decltype)
+ << FixItHint::CreateRemoval(SS.getRange());
+ // Fake up a Declarator to use with ActOnTypeName.
+ DeclSpec DS(AttrFactory);
+
+ EndLocation = ParseDecltypeSpecifier(DS);
+
+ Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ return Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+ }
+
+ // Check whether we have a template-id that names a type.
+ if (Tok.is(tok::annot_template_id)) {
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+ if (TemplateId->Kind == TNK_Type_template ||
+ TemplateId->Kind == TNK_Dependent_template_name) {
+ AnnotateTemplateIdTokenAsType();
+
+ assert(Tok.is(tok::annot_typename) && "template-id -> type failed");
+ ParsedType Type = getTypeAnnotation(Tok);
+ EndLocation = Tok.getAnnotationEndLoc();
+ ConsumeToken();
+
+ if (Type)
+ return Type;
+ return true;
+ }
+
+ // Fall through to produce an error below.
+ }
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_class_name);
+ return true;
+ }
+
+ IdentifierInfo *Id = Tok.getIdentifierInfo();
+ SourceLocation IdLoc = ConsumeToken();
+
+ if (Tok.is(tok::less)) {
+ // It looks the user intended to write a template-id here, but the
+ // template-name was wrong. Try to fix that.
+ TemplateNameKind TNK = TNK_Type_template;
+ TemplateTy Template;
+ if (!Actions.DiagnoseUnknownTemplateName(*Id, IdLoc, getCurScope(),
+ &SS, Template, TNK)) {
+ Diag(IdLoc, diag::err_unknown_template_name)
+ << Id;
+ }
+
+ if (!Template) {
+ TemplateArgList TemplateArgs;
+ SourceLocation LAngleLoc, RAngleLoc;
+ ParseTemplateIdAfterTemplateName(TemplateTy(), IdLoc, SS,
+ true, LAngleLoc, TemplateArgs, RAngleLoc);
+ return true;
+ }
+
+ // Form the template name
+ UnqualifiedId TemplateName;
+ TemplateName.setIdentifier(Id, IdLoc);
+
+ // Parse the full template-id, then turn it into a type.
+ if (AnnotateTemplateIdToken(Template, TNK, SS, SourceLocation(),
+ TemplateName, true))
+ return true;
+ if (TNK == TNK_Dependent_template_name)
+ AnnotateTemplateIdTokenAsType();
+
+ // If we didn't end up with a typename token, there's nothing more we
+ // can do.
+ if (Tok.isNot(tok::annot_typename))
+ return true;
+
+ // Retrieve the type from the annotation token, consume that token, and
+ // return.
+ EndLocation = Tok.getAnnotationEndLoc();
+ ParsedType Type = getTypeAnnotation(Tok);
+ ConsumeToken();
+ return Type;
+ }
+
+ // We have an identifier; check whether it is actually a type.
+ IdentifierInfo *CorrectedII = nullptr;
+ ParsedType Type = Actions.getTypeName(*Id, IdLoc, getCurScope(), &SS, true,
+ false, ParsedType(),
+ /*IsCtorOrDtorName=*/false,
+ /*NonTrivialTypeSourceInfo=*/true,
+ &CorrectedII);
+ if (!Type) {
+ Diag(IdLoc, diag::err_expected_class_name);
+ return true;
+ }
+
+ // Consume the identifier.
+ EndLocation = IdLoc;
+
+ // Fake up a Declarator to use with ActOnTypeName.
+ DeclSpec DS(AttrFactory);
+ DS.SetRangeStart(IdLoc);
+ DS.SetRangeEnd(EndLocation);
+ DS.getTypeSpecScope() = SS;
+
+ const char *PrevSpec = nullptr;
+ unsigned DiagID;
+ DS.SetTypeSpecType(TST_typename, IdLoc, PrevSpec, DiagID, Type,
+ Actions.getASTContext().getPrintingPolicy());
+
+ Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ return Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+}
+
+void Parser::ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs) {
+ while (Tok.isOneOf(tok::kw___single_inheritance,
+ tok::kw___multiple_inheritance,
+ tok::kw___virtual_inheritance)) {
+ IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ SourceLocation AttrNameLoc = ConsumeToken();
+ attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
+ AttributeList::AS_Keyword);
+ }
+}
+
+/// Determine whether the following tokens are valid after a type-specifier
+/// which could be a standalone declaration. This will conservatively return
+/// true if there's any doubt, and is appropriate for insert-';' fixits.
+bool Parser::isValidAfterTypeSpecifier(bool CouldBeBitfield) {
+ // This switch enumerates the valid "follow" set for type-specifiers.
+ switch (Tok.getKind()) {
+ default: break;
+ case tok::semi: // struct foo {...} ;
+ case tok::star: // struct foo {...} * P;
+ case tok::amp: // struct foo {...} & R = ...
+ case tok::ampamp: // struct foo {...} && R = ...
+ case tok::identifier: // struct foo {...} V ;
+ case tok::r_paren: //(struct foo {...} ) {4}
+ case tok::annot_cxxscope: // struct foo {...} a:: b;
+ case tok::annot_typename: // struct foo {...} a ::b;
+ case tok::annot_template_id: // struct foo {...} a<int> ::b;
+ case tok::l_paren: // struct foo {...} ( x);
+ case tok::comma: // __builtin_offsetof(struct foo{...} ,
+ case tok::kw_operator: // struct foo operator ++() {...}
+ case tok::kw___declspec: // struct foo {...} __declspec(...)
+ case tok::l_square: // void f(struct f [ 3])
+ case tok::ellipsis: // void f(struct f ... [Ns])
+ // FIXME: we should emit semantic diagnostic when declaration
+ // attribute is in type attribute position.
+ case tok::kw___attribute: // struct foo __attribute__((used)) x;
+ return true;
+ case tok::colon:
+ return CouldBeBitfield; // enum E { ... } : 2;
+ // Type qualifiers
+ case tok::kw_const: // struct foo {...} const x;
+ case tok::kw_volatile: // struct foo {...} volatile x;
+ case tok::kw_restrict: // struct foo {...} restrict x;
+ case tok::kw__Atomic: // struct foo {...} _Atomic x;
+ case tok::kw___unaligned: // struct foo {...} __unaligned *x;
+ // Function specifiers
+ // Note, no 'explicit'. An explicit function must be either a conversion
+ // operator or a constructor. Either way, it can't have a return type.
+ case tok::kw_inline: // struct foo inline f();
+ case tok::kw_virtual: // struct foo virtual f();
+ case tok::kw_friend: // struct foo friend f();
+ // Storage-class specifiers
+ case tok::kw_static: // struct foo {...} static x;
+ case tok::kw_extern: // struct foo {...} extern x;
+ case tok::kw_typedef: // struct foo {...} typedef x;
+ case tok::kw_register: // struct foo {...} register x;
+ case tok::kw_auto: // struct foo {...} auto x;
+ case tok::kw_mutable: // struct foo {...} mutable x;
+ case tok::kw_thread_local: // struct foo {...} thread_local x;
+ case tok::kw_constexpr: // struct foo {...} constexpr x;
+ // As shown above, type qualifiers and storage class specifiers absolutely
+ // can occur after class specifiers according to the grammar. However,
+ // almost no one actually writes code like this. If we see one of these,
+ // it is much more likely that someone missed a semi colon and the
+ // type/storage class specifier we're seeing is part of the *next*
+ // intended declaration, as in:
+ //
+ // struct foo { ... }
+ // typedef int X;
+ //
+ // We'd really like to emit a missing semicolon error instead of emitting
+ // an error on the 'int' saying that you can't have two type specifiers in
+ // the same declaration of X. Because of this, we look ahead past this
+ // token to see if it's a type specifier. If so, we know the code is
+ // otherwise invalid, so we can produce the expected semi error.
+ if (!isKnownToBeTypeSpecifier(NextToken()))
+ return true;
+ break;
+ case tok::r_brace: // struct bar { struct foo {...} }
+ // Missing ';' at end of struct is accepted as an extension in C mode.
+ if (!getLangOpts().CPlusPlus)
+ return true;
+ break;
+ case tok::greater:
+ // template<class T = class X>
+ return getLangOpts().CPlusPlus;
+ }
+ return false;
+}
+
+/// ParseClassSpecifier - Parse a C++ class-specifier [C++ class] or
+/// elaborated-type-specifier [C++ dcl.type.elab]; we can't tell which
+/// until we reach the start of a definition or see a token that
+/// cannot start a definition.
+///
+/// class-specifier: [C++ class]
+/// class-head '{' member-specification[opt] '}'
+/// class-head '{' member-specification[opt] '}' attributes[opt]
+/// class-head:
+/// class-key identifier[opt] base-clause[opt]
+/// class-key nested-name-specifier identifier base-clause[opt]
+/// class-key nested-name-specifier[opt] simple-template-id
+/// base-clause[opt]
+/// [GNU] class-key attributes[opt] identifier[opt] base-clause[opt]
+/// [GNU] class-key attributes[opt] nested-name-specifier
+/// identifier base-clause[opt]
+/// [GNU] class-key attributes[opt] nested-name-specifier[opt]
+/// simple-template-id base-clause[opt]
+/// class-key:
+/// 'class'
+/// 'struct'
+/// 'union'
+///
+/// elaborated-type-specifier: [C++ dcl.type.elab]
+/// class-key ::[opt] nested-name-specifier[opt] identifier
+/// class-key ::[opt] nested-name-specifier[opt] 'template'[opt]
+/// simple-template-id
+///
+/// Note that the C++ class-specifier and elaborated-type-specifier,
+/// together, subsume the C99 struct-or-union-specifier:
+///
+/// struct-or-union-specifier: [C99 6.7.2.1]
+/// struct-or-union identifier[opt] '{' struct-contents '}'
+/// struct-or-union identifier
+/// [GNU] struct-or-union attributes[opt] identifier[opt] '{' struct-contents
+/// '}' attributes[opt]
+/// [GNU] struct-or-union attributes[opt] identifier
+/// struct-or-union:
+/// 'struct'
+/// 'union'
+void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
+ SourceLocation StartLoc, DeclSpec &DS,
+ const ParsedTemplateInfo &TemplateInfo,
+ AccessSpecifier AS,
+ bool EnteringContext, DeclSpecContext DSC,
+ ParsedAttributesWithRange &Attributes) {
+ DeclSpec::TST TagType;
+ if (TagTokKind == tok::kw_struct)
+ TagType = DeclSpec::TST_struct;
+ else if (TagTokKind == tok::kw___interface)
+ TagType = DeclSpec::TST_interface;
+ else if (TagTokKind == tok::kw_class)
+ TagType = DeclSpec::TST_class;
+ else {
+ assert(TagTokKind == tok::kw_union && "Not a class specifier");
+ TagType = DeclSpec::TST_union;
+ }
+
+ if (Tok.is(tok::code_completion)) {
+ // Code completion for a struct, class, or union name.
+ Actions.CodeCompleteTag(getCurScope(), TagType);
+ return cutOffParsing();
+ }
+
+ // C++03 [temp.explicit] 14.7.2/8:
+ // The usual access checking rules do not apply to names used to specify
+ // explicit instantiations.
+ //
+ // As an extension we do not perform access checking on the names used to
+ // specify explicit specializations either. This is important to allow
+ // specializing traits classes for private types.
+ //
+ // Note that we don't suppress if this turns out to be an elaborated
+ // type specifier.
+ bool shouldDelayDiagsInTag =
+ (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation ||
+ TemplateInfo.Kind == ParsedTemplateInfo::ExplicitSpecialization);
+ SuppressAccessChecks diagsFromTag(*this, shouldDelayDiagsInTag);
+
+ ParsedAttributesWithRange attrs(AttrFactory);
+ // If attributes exist after tag, parse them.
+ MaybeParseGNUAttributes(attrs);
+ MaybeParseMicrosoftDeclSpecs(attrs);
+
+ // Parse inheritance specifiers.
+ if (Tok.isOneOf(tok::kw___single_inheritance,
+ tok::kw___multiple_inheritance,
+ tok::kw___virtual_inheritance))
+ ParseMicrosoftInheritanceClassAttributes(attrs);
+
+ // If C++0x attributes exist here, parse them.
+ // FIXME: Are we consistent with the ordering of parsing of different
+ // styles of attributes?
+ MaybeParseCXX11Attributes(attrs);
+
+ // Source location used by FIXIT to insert misplaced
+ // C++11 attributes
+ SourceLocation AttrFixitLoc = Tok.getLocation();
+
+ if (TagType == DeclSpec::TST_struct &&
+ Tok.isNot(tok::identifier) &&
+ !Tok.isAnnotation() &&
+ Tok.getIdentifierInfo() &&
+ Tok.isOneOf(tok::kw___is_abstract,
+ tok::kw___is_arithmetic,
+ tok::kw___is_array,
+ tok::kw___is_base_of,
+ tok::kw___is_class,
+ tok::kw___is_complete_type,
+ tok::kw___is_compound,
+ tok::kw___is_const,
+ tok::kw___is_constructible,
+ tok::kw___is_convertible,
+ tok::kw___is_convertible_to,
+ tok::kw___is_destructible,
+ tok::kw___is_empty,
+ tok::kw___is_enum,
+ tok::kw___is_floating_point,
+ tok::kw___is_final,
+ tok::kw___is_function,
+ tok::kw___is_fundamental,
+ tok::kw___is_integral,
+ tok::kw___is_interface_class,
+ tok::kw___is_literal,
+ tok::kw___is_lvalue_expr,
+ tok::kw___is_lvalue_reference,
+ tok::kw___is_member_function_pointer,
+ tok::kw___is_member_object_pointer,
+ tok::kw___is_member_pointer,
+ tok::kw___is_nothrow_assignable,
+ tok::kw___is_nothrow_constructible,
+ tok::kw___is_nothrow_destructible,
+ tok::kw___is_object,
+ tok::kw___is_pod,
+ tok::kw___is_pointer,
+ tok::kw___is_polymorphic,
+ tok::kw___is_reference,
+ tok::kw___is_rvalue_expr,
+ tok::kw___is_rvalue_reference,
+ tok::kw___is_same,
+ tok::kw___is_scalar,
+ tok::kw___is_sealed,
+ tok::kw___is_signed,
+ tok::kw___is_standard_layout,
+ tok::kw___is_trivial,
+ tok::kw___is_trivially_assignable,
+ tok::kw___is_trivially_constructible,
+ tok::kw___is_trivially_copyable,
+ tok::kw___is_union,
+ tok::kw___is_unsigned,
+ tok::kw___is_void,
+ tok::kw___is_volatile))
+ // GNU libstdc++ 4.2 and libc++ use certain intrinsic names as the
+ // name of struct templates, but some are keywords in GCC >= 4.3
+ // and Clang. Therefore, when we see the token sequence "struct
+ // X", make X into a normal identifier rather than a keyword, to
+ // allow libstdc++ 4.2 and libc++ to work properly.
+ TryKeywordIdentFallback(true);
+
+ struct PreserveAtomicIdentifierInfoRAII {
+ PreserveAtomicIdentifierInfoRAII(Token &Tok, bool Enabled)
+ : AtomicII(nullptr) {
+ if (!Enabled)
+ return;
+ assert(Tok.is(tok::kw__Atomic));
+ AtomicII = Tok.getIdentifierInfo();
+ AtomicII->revertTokenIDToIdentifier();
+ Tok.setKind(tok::identifier);
+ }
+ ~PreserveAtomicIdentifierInfoRAII() {
+ if (!AtomicII)
+ return;
+ AtomicII->revertIdentifierToTokenID(tok::kw__Atomic);
+ }
+ IdentifierInfo *AtomicII;
+ };
+
+ // HACK: MSVC doesn't consider _Atomic to be a keyword and its STL
+ // implementation for VS2013 uses _Atomic as an identifier for one of the
+ // classes in <atomic>. When we are parsing 'struct _Atomic', don't consider
+ // '_Atomic' to be a keyword. We are careful to undo this so that clang can
+ // use '_Atomic' in its own header files.
+ bool ShouldChangeAtomicToIdentifier = getLangOpts().MSVCCompat &&
+ Tok.is(tok::kw__Atomic) &&
+ TagType == DeclSpec::TST_struct;
+ PreserveAtomicIdentifierInfoRAII AtomicTokenGuard(
+ Tok, ShouldChangeAtomicToIdentifier);
+
+ // Parse the (optional) nested-name-specifier.
+ CXXScopeSpec &SS = DS.getTypeSpecScope();
+ if (getLangOpts().CPlusPlus) {
+ // "FOO : BAR" is not a potential typo for "FOO::BAR". In this context it
+ // is a base-specifier-list.
+ ColonProtectionRAIIObject X(*this);
+
+ CXXScopeSpec Spec;
+ bool HasValidSpec = true;
+ if (ParseOptionalCXXScopeSpecifier(Spec, ParsedType(), EnteringContext)) {
+ DS.SetTypeSpecError();
+ HasValidSpec = false;
+ }
+ if (Spec.isSet())
+ if (Tok.isNot(tok::identifier) && Tok.isNot(tok::annot_template_id)) {
+ Diag(Tok, diag::err_expected) << tok::identifier;
+ HasValidSpec = false;
+ }
+ if (HasValidSpec)
+ SS = Spec;
+ }
+
+ TemplateParameterLists *TemplateParams = TemplateInfo.TemplateParams;
+
+ // Parse the (optional) class name or simple-template-id.
+ IdentifierInfo *Name = nullptr;
+ SourceLocation NameLoc;
+ TemplateIdAnnotation *TemplateId = nullptr;
+ if (Tok.is(tok::identifier)) {
+ Name = Tok.getIdentifierInfo();
+ NameLoc = ConsumeToken();
+
+ if (Tok.is(tok::less) && getLangOpts().CPlusPlus) {
+ // The name was supposed to refer to a template, but didn't.
+ // Eat the template argument list and try to continue parsing this as
+ // a class (or template thereof).
+ TemplateArgList TemplateArgs;
+ SourceLocation LAngleLoc, RAngleLoc;
+ if (ParseTemplateIdAfterTemplateName(TemplateTy(), NameLoc, SS,
+ true, LAngleLoc,
+ TemplateArgs, RAngleLoc)) {
+ // We couldn't parse the template argument list at all, so don't
+ // try to give any location information for the list.
+ LAngleLoc = RAngleLoc = SourceLocation();
+ }
+
+ Diag(NameLoc, diag::err_explicit_spec_non_template)
+ << (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation)
+ << TagTokKind << Name << SourceRange(LAngleLoc, RAngleLoc);
+
+ // Strip off the last template parameter list if it was empty, since
+ // we've removed its template argument list.
+ if (TemplateParams && TemplateInfo.LastParameterListWasEmpty) {
+ if (TemplateParams && TemplateParams->size() > 1) {
+ TemplateParams->pop_back();
+ } else {
+ TemplateParams = nullptr;
+ const_cast<ParsedTemplateInfo&>(TemplateInfo).Kind
+ = ParsedTemplateInfo::NonTemplate;
+ }
+ } else if (TemplateInfo.Kind
+ == ParsedTemplateInfo::ExplicitInstantiation) {
+ // Pretend this is just a forward declaration.
+ TemplateParams = nullptr;
+ const_cast<ParsedTemplateInfo&>(TemplateInfo).Kind
+ = ParsedTemplateInfo::NonTemplate;
+ const_cast<ParsedTemplateInfo&>(TemplateInfo).TemplateLoc
+ = SourceLocation();
+ const_cast<ParsedTemplateInfo&>(TemplateInfo).ExternLoc
+ = SourceLocation();
+ }
+ }
+ } else if (Tok.is(tok::annot_template_id)) {
+ TemplateId = takeTemplateIdAnnotation(Tok);
+ NameLoc = ConsumeToken();
+
+ if (TemplateId->Kind != TNK_Type_template &&
+ TemplateId->Kind != TNK_Dependent_template_name) {
+ // The template-name in the simple-template-id refers to
+ // something other than a class template. Give an appropriate
+ // error message and skip to the ';'.
+ SourceRange Range(NameLoc);
+ if (SS.isNotEmpty())
+ Range.setBegin(SS.getBeginLoc());
+
+ // FIXME: Name may be null here.
+ Diag(TemplateId->LAngleLoc, diag::err_template_spec_syntax_non_template)
+ << TemplateId->Name << static_cast<int>(TemplateId->Kind) << Range;
+
+ DS.SetTypeSpecError();
+ SkipUntil(tok::semi, StopBeforeMatch);
+ return;
+ }
+ }
+
+ // There are four options here.
+ // - If we are in a trailing return type, this is always just a reference,
+ // and we must not try to parse a definition. For instance,
+ // [] () -> struct S { };
+ // does not define a type.
+ // - If we have 'struct foo {...', 'struct foo :...',
+ // 'struct foo final :' or 'struct foo final {', then this is a definition.
+ // - If we have 'struct foo;', then this is either a forward declaration
+ // or a friend declaration, which have to be treated differently.
+ // - Otherwise we have something like 'struct foo xyz', a reference.
+ //
+ // We also detect these erroneous cases to provide better diagnostic for
+ // C++11 attributes parsing.
+ // - attributes follow class name:
+ // struct foo [[]] {};
+ // - attributes appear before or after 'final':
+ // struct foo [[]] final [[]] {};
+ //
+ // However, in type-specifier-seq's, things look like declarations but are
+ // just references, e.g.
+ // new struct s;
+ // or
+ // &T::operator struct s;
+ // For these, DSC is DSC_type_specifier or DSC_alias_declaration.
+
+ // If there are attributes after class name, parse them.
+ MaybeParseCXX11Attributes(Attributes);
+
+ const PrintingPolicy &Policy = Actions.getASTContext().getPrintingPolicy();
+ Sema::TagUseKind TUK;
+ if (DSC == DSC_trailing)
+ TUK = Sema::TUK_Reference;
+ else if (Tok.is(tok::l_brace) ||
+ (getLangOpts().CPlusPlus && Tok.is(tok::colon)) ||
+ (isCXX11FinalKeyword() &&
+ (NextToken().is(tok::l_brace) || NextToken().is(tok::colon)))) {
+ if (DS.isFriendSpecified()) {
+ // C++ [class.friend]p2:
+ // A class shall not be defined in a friend declaration.
+ Diag(Tok.getLocation(), diag::err_friend_decl_defines_type)
+ << SourceRange(DS.getFriendSpecLoc());
+
+ // Skip everything up to the semicolon, so that this looks like a proper
+ // friend class (or template thereof) declaration.
+ SkipUntil(tok::semi, StopBeforeMatch);
+ TUK = Sema::TUK_Friend;
+ } else {
+ // Okay, this is a class definition.
+ TUK = Sema::TUK_Definition;
+ }
+ } else if (isCXX11FinalKeyword() && (NextToken().is(tok::l_square) ||
+ NextToken().is(tok::kw_alignas))) {
+ // We can't tell if this is a definition or reference
+ // until we skipped the 'final' and C++11 attribute specifiers.
+ TentativeParsingAction PA(*this);
+
+ // Skip the 'final' keyword.
+ ConsumeToken();
+
+ // Skip C++11 attribute specifiers.
+ while (true) {
+ if (Tok.is(tok::l_square) && NextToken().is(tok::l_square)) {
+ ConsumeBracket();
+ if (!SkipUntil(tok::r_square, StopAtSemi))
+ break;
+ } else if (Tok.is(tok::kw_alignas) && NextToken().is(tok::l_paren)) {
+ ConsumeToken();
+ ConsumeParen();
+ if (!SkipUntil(tok::r_paren, StopAtSemi))
+ break;
+ } else {
+ break;
+ }
+ }
+
+ if (Tok.isOneOf(tok::l_brace, tok::colon))
+ TUK = Sema::TUK_Definition;
+ else
+ TUK = Sema::TUK_Reference;
+
+ PA.Revert();
+ } else if (!isTypeSpecifier(DSC) &&
+ (Tok.is(tok::semi) ||
+ (Tok.isAtStartOfLine() && !isValidAfterTypeSpecifier(false)))) {
+ TUK = DS.isFriendSpecified() ? Sema::TUK_Friend : Sema::TUK_Declaration;
+ if (Tok.isNot(tok::semi)) {
+ const PrintingPolicy &PPol = Actions.getASTContext().getPrintingPolicy();
+ // A semicolon was missing after this declaration. Diagnose and recover.
+ ExpectAndConsume(tok::semi, diag::err_expected_after,
+ DeclSpec::getSpecifierName(TagType, PPol));
+ PP.EnterToken(Tok);
+ Tok.setKind(tok::semi);
+ }
+ } else
+ TUK = Sema::TUK_Reference;
+
+ // Forbid misplaced attributes. In cases of a reference, we pass attributes
+ // to caller to handle.
+ if (TUK != Sema::TUK_Reference) {
+ // If this is not a reference, then the only possible
+ // valid place for C++11 attributes to appear here
+ // is between class-key and class-name. If there are
+ // any attributes after class-name, we try a fixit to move
+ // them to the right place.
+ SourceRange AttrRange = Attributes.Range;
+ if (AttrRange.isValid()) {
+ Diag(AttrRange.getBegin(), diag::err_attributes_not_allowed)
+ << AttrRange
+ << FixItHint::CreateInsertionFromRange(AttrFixitLoc,
+ CharSourceRange(AttrRange, true))
+ << FixItHint::CreateRemoval(AttrRange);
+
+ // Recover by adding misplaced attributes to the attribute list
+ // of the class so they can be applied on the class later.
+ attrs.takeAllFrom(Attributes);
+ }
+ }
+
+ // If this is an elaborated type specifier, and we delayed
+ // diagnostics before, just merge them into the current pool.
+ if (shouldDelayDiagsInTag) {
+ diagsFromTag.done();
+ if (TUK == Sema::TUK_Reference)
+ diagsFromTag.redelay();
+ }
+
+ if (!Name && !TemplateId && (DS.getTypeSpecType() == DeclSpec::TST_error ||
+ TUK != Sema::TUK_Definition)) {
+ if (DS.getTypeSpecType() != DeclSpec::TST_error) {
+ // We have a declaration or reference to an anonymous class.
+ Diag(StartLoc, diag::err_anon_type_definition)
+ << DeclSpec::getSpecifierName(TagType, Policy);
+ }
+
+ // If we are parsing a definition and stop at a base-clause, continue on
+ // until the semicolon. Continuing from the comma will just trick us into
+ // thinking we are seeing a variable declaration.
+ if (TUK == Sema::TUK_Definition && Tok.is(tok::colon))
+ SkipUntil(tok::semi, StopBeforeMatch);
+ else
+ SkipUntil(tok::comma, StopAtSemi);
+ return;
+ }
+
+ // Create the tag portion of the class or class template.
+ DeclResult TagOrTempResult = true; // invalid
+ TypeResult TypeResult = true; // invalid
+
+ bool Owned = false;
+ Sema::SkipBodyInfo SkipBody;
+ if (TemplateId) {
+ // Explicit specialization, class template partial specialization,
+ // or explicit instantiation.
+ ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
+ TemplateId->NumArgs);
+ if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation &&
+ TUK == Sema::TUK_Declaration) {
+ // This is an explicit instantiation of a class template.
+ ProhibitAttributes(attrs);
+
+ TagOrTempResult
+ = Actions.ActOnExplicitInstantiation(getCurScope(),
+ TemplateInfo.ExternLoc,
+ TemplateInfo.TemplateLoc,
+ TagType,
+ StartLoc,
+ SS,
+ TemplateId->Template,
+ TemplateId->TemplateNameLoc,
+ TemplateId->LAngleLoc,
+ TemplateArgsPtr,
+ TemplateId->RAngleLoc,
+ attrs.getList());
+
+ // Friend template-ids are treated as references unless
+ // they have template headers, in which case they're ill-formed
+ // (FIXME: "template <class T> friend class A<T>::B<int>;").
+ // We diagnose this error in ActOnClassTemplateSpecialization.
+ } else if (TUK == Sema::TUK_Reference ||
+ (TUK == Sema::TUK_Friend &&
+ TemplateInfo.Kind == ParsedTemplateInfo::NonTemplate)) {
+ ProhibitAttributes(attrs);
+ TypeResult = Actions.ActOnTagTemplateIdType(TUK, TagType, StartLoc,
+ TemplateId->SS,
+ TemplateId->TemplateKWLoc,
+ TemplateId->Template,
+ TemplateId->TemplateNameLoc,
+ TemplateId->LAngleLoc,
+ TemplateArgsPtr,
+ TemplateId->RAngleLoc);
+ } else {
+ // This is an explicit specialization or a class template
+ // partial specialization.
+ TemplateParameterLists FakedParamLists;
+ if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation) {
+ // This looks like an explicit instantiation, because we have
+ // something like
+ //
+ // template class Foo<X>
+ //
+ // but it actually has a definition. Most likely, this was
+ // meant to be an explicit specialization, but the user forgot
+ // the '<>' after 'template'.
+ // It this is friend declaration however, since it cannot have a
+ // template header, it is most likely that the user meant to
+ // remove the 'template' keyword.
+ assert((TUK == Sema::TUK_Definition || TUK == Sema::TUK_Friend) &&
+ "Expected a definition here");
+
+ if (TUK == Sema::TUK_Friend) {
+ Diag(DS.getFriendSpecLoc(), diag::err_friend_explicit_instantiation);
+ TemplateParams = nullptr;
+ } else {
+ SourceLocation LAngleLoc =
+ PP.getLocForEndOfToken(TemplateInfo.TemplateLoc);
+ Diag(TemplateId->TemplateNameLoc,
+ diag::err_explicit_instantiation_with_definition)
+ << SourceRange(TemplateInfo.TemplateLoc)
+ << FixItHint::CreateInsertion(LAngleLoc, "<>");
+
+ // Create a fake template parameter list that contains only
+ // "template<>", so that we treat this construct as a class
+ // template specialization.
+ FakedParamLists.push_back(Actions.ActOnTemplateParameterList(
+ 0, SourceLocation(), TemplateInfo.TemplateLoc, LAngleLoc, None,
+ LAngleLoc));
+ TemplateParams = &FakedParamLists;
+ }
+ }
+
+ // Build the class template specialization.
+ TagOrTempResult = Actions.ActOnClassTemplateSpecialization(
+ getCurScope(), TagType, TUK, StartLoc, DS.getModulePrivateSpecLoc(),
+ *TemplateId, attrs.getList(),
+ MultiTemplateParamsArg(TemplateParams ? &(*TemplateParams)[0]
+ : nullptr,
+ TemplateParams ? TemplateParams->size() : 0),
+ &SkipBody);
+ }
+ } else if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation &&
+ TUK == Sema::TUK_Declaration) {
+ // Explicit instantiation of a member of a class template
+ // specialization, e.g.,
+ //
+ // template struct Outer<int>::Inner;
+ //
+ ProhibitAttributes(attrs);
+
+ TagOrTempResult
+ = Actions.ActOnExplicitInstantiation(getCurScope(),
+ TemplateInfo.ExternLoc,
+ TemplateInfo.TemplateLoc,
+ TagType, StartLoc, SS, Name,
+ NameLoc, attrs.getList());
+ } else if (TUK == Sema::TUK_Friend &&
+ TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate) {
+ ProhibitAttributes(attrs);
+
+ TagOrTempResult =
+ Actions.ActOnTemplatedFriendTag(getCurScope(), DS.getFriendSpecLoc(),
+ TagType, StartLoc, SS,
+ Name, NameLoc, attrs.getList(),
+ MultiTemplateParamsArg(
+ TemplateParams? &(*TemplateParams)[0]
+ : nullptr,
+ TemplateParams? TemplateParams->size() : 0));
+ } else {
+ if (TUK != Sema::TUK_Declaration && TUK != Sema::TUK_Definition)
+ ProhibitAttributes(attrs);
+
+ if (TUK == Sema::TUK_Definition &&
+ TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation) {
+ // If the declarator-id is not a template-id, issue a diagnostic and
+ // recover by ignoring the 'template' keyword.
+ Diag(Tok, diag::err_template_defn_explicit_instantiation)
+ << 1 << FixItHint::CreateRemoval(TemplateInfo.TemplateLoc);
+ TemplateParams = nullptr;
+ }
+
+ bool IsDependent = false;
+
+ // Don't pass down template parameter lists if this is just a tag
+ // reference. For example, we don't need the template parameters here:
+ // template <class T> class A *makeA(T t);
+ MultiTemplateParamsArg TParams;
+ if (TUK != Sema::TUK_Reference && TemplateParams)
+ TParams =
+ MultiTemplateParamsArg(&(*TemplateParams)[0], TemplateParams->size());
+
+ handleDeclspecAlignBeforeClassKey(attrs, DS, TUK);
+
+ // Declaration or definition of a class type
+ TagOrTempResult = Actions.ActOnTag(getCurScope(), TagType, TUK, StartLoc,
+ SS, Name, NameLoc, attrs.getList(), AS,
+ DS.getModulePrivateSpecLoc(),
+ TParams, Owned, IsDependent,
+ SourceLocation(), false,
+ clang::TypeResult(),
+ DSC == DSC_type_specifier,
+ &SkipBody);
+
+ // If ActOnTag said the type was dependent, try again with the
+ // less common call.
+ if (IsDependent) {
+ assert(TUK == Sema::TUK_Reference || TUK == Sema::TUK_Friend);
+ TypeResult = Actions.ActOnDependentTag(getCurScope(), TagType, TUK,
+ SS, Name, StartLoc, NameLoc);
+ }
+ }
+
+ // If there is a body, parse it and inform the actions module.
+ if (TUK == Sema::TUK_Definition) {
+ assert(Tok.is(tok::l_brace) ||
+ (getLangOpts().CPlusPlus && Tok.is(tok::colon)) ||
+ isCXX11FinalKeyword());
+ if (SkipBody.ShouldSkip)
+ SkipCXXMemberSpecification(StartLoc, AttrFixitLoc, TagType,
+ TagOrTempResult.get());
+ else if (getLangOpts().CPlusPlus)
+ ParseCXXMemberSpecification(StartLoc, AttrFixitLoc, attrs, TagType,
+ TagOrTempResult.get());
+ else
+ ParseStructUnionBody(StartLoc, TagType, TagOrTempResult.get());
+ }
+
+ const char *PrevSpec = nullptr;
+ unsigned DiagID;
+ bool Result;
+ if (!TypeResult.isInvalid()) {
+ Result = DS.SetTypeSpecType(DeclSpec::TST_typename, StartLoc,
+ NameLoc.isValid() ? NameLoc : StartLoc,
+ PrevSpec, DiagID, TypeResult.get(), Policy);
+ } else if (!TagOrTempResult.isInvalid()) {
+ Result = DS.SetTypeSpecType(TagType, StartLoc,
+ NameLoc.isValid() ? NameLoc : StartLoc,
+ PrevSpec, DiagID, TagOrTempResult.get(), Owned,
+ Policy);
+ } else {
+ DS.SetTypeSpecError();
+ return;
+ }
+
+ if (Result)
+ Diag(StartLoc, DiagID) << PrevSpec;
+
+ // At this point, we've successfully parsed a class-specifier in 'definition'
+ // form (e.g. "struct foo { int x; }". While we could just return here, we're
+ // going to look at what comes after it to improve error recovery. If an
+ // impossible token occurs next, we assume that the programmer forgot a ; at
+ // the end of the declaration and recover that way.
+ //
+ // Also enforce C++ [temp]p3:
+ // In a template-declaration which defines a class, no declarator
+ // is permitted.
+ //
+ // After a type-specifier, we don't expect a semicolon. This only happens in
+ // C, since definitions are not permitted in this context in C++.
+ if (TUK == Sema::TUK_Definition &&
+ (getLangOpts().CPlusPlus || !isTypeSpecifier(DSC)) &&
+ (TemplateInfo.Kind || !isValidAfterTypeSpecifier(false))) {
+ if (Tok.isNot(tok::semi)) {
+ const PrintingPolicy &PPol = Actions.getASTContext().getPrintingPolicy();
+ ExpectAndConsume(tok::semi, diag::err_expected_after,
+ DeclSpec::getSpecifierName(TagType, PPol));
+ // Push this token back into the preprocessor and change our current token
+ // to ';' so that the rest of the code recovers as though there were an
+ // ';' after the definition.
+ PP.EnterToken(Tok);
+ Tok.setKind(tok::semi);
+ }
+ }
+}
+
+/// ParseBaseClause - Parse the base-clause of a C++ class [C++ class.derived].
+///
+/// base-clause : [C++ class.derived]
+/// ':' base-specifier-list
+/// base-specifier-list:
+/// base-specifier '...'[opt]
+/// base-specifier-list ',' base-specifier '...'[opt]
+void Parser::ParseBaseClause(Decl *ClassDecl) {
+ assert(Tok.is(tok::colon) && "Not a base clause");
+ ConsumeToken();
+
+ // Build up an array of parsed base specifiers.
+ SmallVector<CXXBaseSpecifier *, 8> BaseInfo;
+
+ while (true) {
+ // Parse a base-specifier.
+ BaseResult Result = ParseBaseSpecifier(ClassDecl);
+ if (Result.isInvalid()) {
+ // Skip the rest of this base specifier, up until the comma or
+ // opening brace.
+ SkipUntil(tok::comma, tok::l_brace, StopAtSemi | StopBeforeMatch);
+ } else {
+ // Add this to our array of base specifiers.
+ BaseInfo.push_back(Result.get());
+ }
+
+ // If the next token is a comma, consume it and keep reading
+ // base-specifiers.
+ if (!TryConsumeToken(tok::comma))
+ break;
+ }
+
+ // Attach the base specifiers
+ Actions.ActOnBaseSpecifiers(ClassDecl, BaseInfo);
+}
+
+/// ParseBaseSpecifier - Parse a C++ base-specifier. A base-specifier is
+/// one entry in the base class list of a class specifier, for example:
+/// class foo : public bar, virtual private baz {
+/// 'public bar' and 'virtual private baz' are each base-specifiers.
+///
+/// base-specifier: [C++ class.derived]
+/// attribute-specifier-seq[opt] base-type-specifier
+/// attribute-specifier-seq[opt] 'virtual' access-specifier[opt]
+/// base-type-specifier
+/// attribute-specifier-seq[opt] access-specifier 'virtual'[opt]
+/// base-type-specifier
+BaseResult Parser::ParseBaseSpecifier(Decl *ClassDecl) {
+ bool IsVirtual = false;
+ SourceLocation StartLoc = Tok.getLocation();
+
+ ParsedAttributesWithRange Attributes(AttrFactory);
+ MaybeParseCXX11Attributes(Attributes);
+
+ // Parse the 'virtual' keyword.
+ if (TryConsumeToken(tok::kw_virtual))
+ IsVirtual = true;
+
+ CheckMisplacedCXX11Attribute(Attributes, StartLoc);
+
+ // Parse an (optional) access specifier.
+ AccessSpecifier Access = getAccessSpecifierIfPresent();
+ if (Access != AS_none)
+ ConsumeToken();
+
+ CheckMisplacedCXX11Attribute(Attributes, StartLoc);
+
+ // Parse the 'virtual' keyword (again!), in case it came after the
+ // access specifier.
+ if (Tok.is(tok::kw_virtual)) {
+ SourceLocation VirtualLoc = ConsumeToken();
+ if (IsVirtual) {
+ // Complain about duplicate 'virtual'
+ Diag(VirtualLoc, diag::err_dup_virtual)
+ << FixItHint::CreateRemoval(VirtualLoc);
+ }
+
+ IsVirtual = true;
+ }
+
+ CheckMisplacedCXX11Attribute(Attributes, StartLoc);
+
+ // Parse the class-name.
+
+ // HACK: MSVC doesn't consider _Atomic to be a keyword and its STL
+ // implementation for VS2013 uses _Atomic as an identifier for one of the
+ // classes in <atomic>. Treat '_Atomic' to be an identifier when we are
+ // parsing the class-name for a base specifier.
+ if (getLangOpts().MSVCCompat && Tok.is(tok::kw__Atomic) &&
+ NextToken().is(tok::less))
+ Tok.setKind(tok::identifier);
+
+ SourceLocation EndLocation;
+ SourceLocation BaseLoc;
+ TypeResult BaseType = ParseBaseTypeSpecifier(BaseLoc, EndLocation);
+ if (BaseType.isInvalid())
+ return true;
+
+ // Parse the optional ellipsis (for a pack expansion). The ellipsis is
+ // actually part of the base-specifier-list grammar productions, but we
+ // parse it here for convenience.
+ SourceLocation EllipsisLoc;
+ TryConsumeToken(tok::ellipsis, EllipsisLoc);
+
+ // Find the complete source range for the base-specifier.
+ SourceRange Range(StartLoc, EndLocation);
+
+ // Notify semantic analysis that we have parsed a complete
+ // base-specifier.
+ return Actions.ActOnBaseSpecifier(ClassDecl, Range, Attributes, IsVirtual,
+ Access, BaseType.get(), BaseLoc,
+ EllipsisLoc);
+}
+
+/// getAccessSpecifierIfPresent - Determine whether the next token is
+/// a C++ access-specifier.
+///
+/// access-specifier: [C++ class.derived]
+/// 'private'
+/// 'protected'
+/// 'public'
+AccessSpecifier Parser::getAccessSpecifierIfPresent() const {
+ switch (Tok.getKind()) {
+ default: return AS_none;
+ case tok::kw_private: return AS_private;
+ case tok::kw_protected: return AS_protected;
+ case tok::kw_public: return AS_public;
+ }
+}
+
+/// \brief If the given declarator has any parts for which parsing has to be
+/// delayed, e.g., default arguments or an exception-specification, create a
+/// late-parsed method declaration record to handle the parsing at the end of
+/// the class definition.
+void Parser::HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
+ Decl *ThisDecl) {
+ DeclaratorChunk::FunctionTypeInfo &FTI
+ = DeclaratorInfo.getFunctionTypeInfo();
+ // If there was a late-parsed exception-specification, we'll need a
+ // late parse
+ bool NeedLateParse = FTI.getExceptionSpecType() == EST_Unparsed;
+
+ if (!NeedLateParse) {
+ // Look ahead to see if there are any default args
+ for (unsigned ParamIdx = 0; ParamIdx < FTI.NumParams; ++ParamIdx) {
+ auto Param = cast<ParmVarDecl>(FTI.Params[ParamIdx].Param);
+ if (Param->hasUnparsedDefaultArg()) {
+ NeedLateParse = true;
+ break;
+ }
+ }
+ }
+
+ if (NeedLateParse) {
+ // Push this method onto the stack of late-parsed method
+ // declarations.
+ auto LateMethod = new LateParsedMethodDeclaration(this, ThisDecl);
+ getCurrentClass().LateParsedDeclarations.push_back(LateMethod);
+ LateMethod->TemplateScope = getCurScope()->isTemplateParamScope();
+
+ // Stash the exception-specification tokens in the late-pased method.
+ LateMethod->ExceptionSpecTokens = FTI.ExceptionSpecTokens;
+ FTI.ExceptionSpecTokens = nullptr;
+
+ // Push tokens for each parameter. Those that do not have
+ // defaults will be NULL.
+ LateMethod->DefaultArgs.reserve(FTI.NumParams);
+ for (unsigned ParamIdx = 0; ParamIdx < FTI.NumParams; ++ParamIdx)
+ LateMethod->DefaultArgs.push_back(LateParsedDefaultArgument(
+ FTI.Params[ParamIdx].Param, FTI.Params[ParamIdx].DefaultArgTokens));
+ }
+}
+
+/// isCXX11VirtSpecifier - Determine whether the given token is a C++11
+/// virt-specifier.
+///
+/// virt-specifier:
+/// override
+/// final
+VirtSpecifiers::Specifier Parser::isCXX11VirtSpecifier(const Token &Tok) const {
+ if (!getLangOpts().CPlusPlus || Tok.isNot(tok::identifier))
+ return VirtSpecifiers::VS_None;
+
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+
+ // Initialize the contextual keywords.
+ if (!Ident_final) {
+ Ident_final = &PP.getIdentifierTable().get("final");
+ if (getLangOpts().MicrosoftExt)
+ Ident_sealed = &PP.getIdentifierTable().get("sealed");
+ Ident_override = &PP.getIdentifierTable().get("override");
+ }
+
+ if (II == Ident_override)
+ return VirtSpecifiers::VS_Override;
+
+ if (II == Ident_sealed)
+ return VirtSpecifiers::VS_Sealed;
+
+ if (II == Ident_final)
+ return VirtSpecifiers::VS_Final;
+
+ return VirtSpecifiers::VS_None;
+}
+
+/// ParseOptionalCXX11VirtSpecifierSeq - Parse a virt-specifier-seq.
+///
+/// virt-specifier-seq:
+/// virt-specifier
+/// virt-specifier-seq virt-specifier
+void Parser::ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS,
+ bool IsInterface,
+ SourceLocation FriendLoc) {
+ while (true) {
+ VirtSpecifiers::Specifier Specifier = isCXX11VirtSpecifier();
+ if (Specifier == VirtSpecifiers::VS_None)
+ return;
+
+ if (FriendLoc.isValid()) {
+ Diag(Tok.getLocation(), diag::err_friend_decl_spec)
+ << VirtSpecifiers::getSpecifierName(Specifier)
+ << FixItHint::CreateRemoval(Tok.getLocation())
+ << SourceRange(FriendLoc, FriendLoc);
+ ConsumeToken();
+ continue;
+ }
+
+ // C++ [class.mem]p8:
+ // A virt-specifier-seq shall contain at most one of each virt-specifier.
+ const char *PrevSpec = nullptr;
+ if (VS.SetSpecifier(Specifier, Tok.getLocation(), PrevSpec))
+ Diag(Tok.getLocation(), diag::err_duplicate_virt_specifier)
+ << PrevSpec
+ << FixItHint::CreateRemoval(Tok.getLocation());
+
+ if (IsInterface && (Specifier == VirtSpecifiers::VS_Final ||
+ Specifier == VirtSpecifiers::VS_Sealed)) {
+ Diag(Tok.getLocation(), diag::err_override_control_interface)
+ << VirtSpecifiers::getSpecifierName(Specifier);
+ } else if (Specifier == VirtSpecifiers::VS_Sealed) {
+ Diag(Tok.getLocation(), diag::ext_ms_sealed_keyword);
+ } else {
+ Diag(Tok.getLocation(),
+ getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_override_control_keyword
+ : diag::ext_override_control_keyword)
+ << VirtSpecifiers::getSpecifierName(Specifier);
+ }
+ ConsumeToken();
+ }
+}
+
+/// isCXX11FinalKeyword - Determine whether the next token is a C++11
+/// 'final' or Microsoft 'sealed' contextual keyword.
+bool Parser::isCXX11FinalKeyword() const {
+ VirtSpecifiers::Specifier Specifier = isCXX11VirtSpecifier();
+ return Specifier == VirtSpecifiers::VS_Final ||
+ Specifier == VirtSpecifiers::VS_Sealed;
+}
+
+/// \brief Parse a C++ member-declarator up to, but not including, the optional
+/// brace-or-equal-initializer or pure-specifier.
+bool Parser::ParseCXXMemberDeclaratorBeforeInitializer(
+ Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize,
+ LateParsedAttrList &LateParsedAttrs) {
+ // member-declarator:
+ // declarator pure-specifier[opt]
+ // declarator brace-or-equal-initializer[opt]
+ // identifier[opt] ':' constant-expression
+ if (Tok.isNot(tok::colon))
+ ParseDeclarator(DeclaratorInfo);
+ else
+ DeclaratorInfo.SetIdentifier(nullptr, Tok.getLocation());
+
+ if (!DeclaratorInfo.isFunctionDeclarator() && TryConsumeToken(tok::colon)) {
+ assert(DeclaratorInfo.isPastIdentifier() &&
+ "don't know where identifier would go yet?");
+ BitfieldSize = ParseConstantExpression();
+ if (BitfieldSize.isInvalid())
+ SkipUntil(tok::comma, StopAtSemi | StopBeforeMatch);
+ } else {
+ ParseOptionalCXX11VirtSpecifierSeq(
+ VS, getCurrentClass().IsInterface,
+ DeclaratorInfo.getDeclSpec().getFriendSpecLoc());
+ if (!VS.isUnset())
+ MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(DeclaratorInfo, VS);
+ }
+
+ // If a simple-asm-expr is present, parse it.
+ if (Tok.is(tok::kw_asm)) {
+ SourceLocation Loc;
+ ExprResult AsmLabel(ParseSimpleAsm(&Loc));
+ if (AsmLabel.isInvalid())
+ SkipUntil(tok::comma, StopAtSemi | StopBeforeMatch);
+
+ DeclaratorInfo.setAsmLabel(AsmLabel.get());
+ DeclaratorInfo.SetRangeEnd(Loc);
+ }
+
+ // If attributes exist after the declarator, but before an '{', parse them.
+ MaybeParseGNUAttributes(DeclaratorInfo, &LateParsedAttrs);
+
+ // For compatibility with code written to older Clang, also accept a
+ // virt-specifier *after* the GNU attributes.
+ if (BitfieldSize.isUnset() && VS.isUnset()) {
+ ParseOptionalCXX11VirtSpecifierSeq(
+ VS, getCurrentClass().IsInterface,
+ DeclaratorInfo.getDeclSpec().getFriendSpecLoc());
+ if (!VS.isUnset()) {
+ // If we saw any GNU-style attributes that are known to GCC followed by a
+ // virt-specifier, issue a GCC-compat warning.
+ const AttributeList *Attr = DeclaratorInfo.getAttributes();
+ while (Attr) {
+ if (Attr->isKnownToGCC() && !Attr->isCXX11Attribute())
+ Diag(Attr->getLoc(), diag::warn_gcc_attribute_location);
+ Attr = Attr->getNext();
+ }
+ MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(DeclaratorInfo, VS);
+ }
+ }
+
+ // If this has neither a name nor a bit width, something has gone seriously
+ // wrong. Skip until the semi-colon or }.
+ if (!DeclaratorInfo.hasName() && BitfieldSize.isUnset()) {
+ // If so, skip until the semi-colon or a }.
+ SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
+ return true;
+ }
+ return false;
+}
+
+/// \brief Look for declaration specifiers possibly occurring after C++11
+/// virt-specifier-seq and diagnose them.
+void Parser::MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(
+ Declarator &D,
+ VirtSpecifiers &VS) {
+ DeclSpec DS(AttrFactory);
+
+ // GNU-style and C++11 attributes are not allowed here, but they will be
+ // handled by the caller. Diagnose everything else.
+ ParseTypeQualifierListOpt(DS, AR_NoAttributesParsed, false);
+ D.ExtendWithDeclSpec(DS);
+
+ if (D.isFunctionDeclarator()) {
+ auto &Function = D.getFunctionTypeInfo();
+ if (DS.getTypeQualifiers() != DeclSpec::TQ_unspecified) {
+ auto DeclSpecCheck = [&] (DeclSpec::TQ TypeQual,
+ const char *FixItName,
+ SourceLocation SpecLoc,
+ unsigned* QualifierLoc) {
+ FixItHint Insertion;
+ if (DS.getTypeQualifiers() & TypeQual) {
+ if (!(Function.TypeQuals & TypeQual)) {
+ std::string Name(FixItName);
+ Name += " ";
+ Insertion = FixItHint::CreateInsertion(VS.getFirstLocation(), Name.c_str());
+ Function.TypeQuals |= TypeQual;
+ *QualifierLoc = SpecLoc.getRawEncoding();
+ }
+ Diag(SpecLoc, diag::err_declspec_after_virtspec)
+ << FixItName
+ << VirtSpecifiers::getSpecifierName(VS.getLastSpecifier())
+ << FixItHint::CreateRemoval(SpecLoc)
+ << Insertion;
+ }
+ };
+ DeclSpecCheck(DeclSpec::TQ_const, "const", DS.getConstSpecLoc(),
+ &Function.ConstQualifierLoc);
+ DeclSpecCheck(DeclSpec::TQ_volatile, "volatile", DS.getVolatileSpecLoc(),
+ &Function.VolatileQualifierLoc);
+ DeclSpecCheck(DeclSpec::TQ_restrict, "restrict", DS.getRestrictSpecLoc(),
+ &Function.RestrictQualifierLoc);
+ }
+
+ // Parse ref-qualifiers.
+ bool RefQualifierIsLValueRef = true;
+ SourceLocation RefQualifierLoc;
+ if (ParseRefQualifier(RefQualifierIsLValueRef, RefQualifierLoc)) {
+ const char *Name = (RefQualifierIsLValueRef ? "& " : "&& ");
+ FixItHint Insertion = FixItHint::CreateInsertion(VS.getFirstLocation(), Name);
+ Function.RefQualifierIsLValueRef = RefQualifierIsLValueRef;
+ Function.RefQualifierLoc = RefQualifierLoc.getRawEncoding();
+
+ Diag(RefQualifierLoc, diag::err_declspec_after_virtspec)
+ << (RefQualifierIsLValueRef ? "&" : "&&")
+ << VirtSpecifiers::getSpecifierName(VS.getLastSpecifier())
+ << FixItHint::CreateRemoval(RefQualifierLoc)
+ << Insertion;
+ D.SetRangeEnd(RefQualifierLoc);
+ }
+ }
+}
+
+/// ParseCXXClassMemberDeclaration - Parse a C++ class member declaration.
+///
+/// member-declaration:
+/// decl-specifier-seq[opt] member-declarator-list[opt] ';'
+/// function-definition ';'[opt]
+/// ::[opt] nested-name-specifier template[opt] unqualified-id ';'[TODO]
+/// using-declaration [TODO]
+/// [C++0x] static_assert-declaration
+/// template-declaration
+/// [GNU] '__extension__' member-declaration
+///
+/// member-declarator-list:
+/// member-declarator
+/// member-declarator-list ',' member-declarator
+///
+/// member-declarator:
+/// declarator virt-specifier-seq[opt] pure-specifier[opt]
+/// declarator constant-initializer[opt]
+/// [C++11] declarator brace-or-equal-initializer[opt]
+/// identifier[opt] ':' constant-expression
+///
+/// virt-specifier-seq:
+/// virt-specifier
+/// virt-specifier-seq virt-specifier
+///
+/// virt-specifier:
+/// override
+/// final
+/// [MS] sealed
+///
+/// pure-specifier:
+/// '= 0'
+///
+/// constant-initializer:
+/// '=' constant-expression
+///
+Parser::DeclGroupPtrTy
+Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
+ AttributeList *AccessAttrs,
+ const ParsedTemplateInfo &TemplateInfo,
+ ParsingDeclRAIIObject *TemplateDiags) {
+ if (Tok.is(tok::at)) {
+ if (getLangOpts().ObjC1 && NextToken().isObjCAtKeyword(tok::objc_defs))
+ Diag(Tok, diag::err_at_defs_cxx);
+ else
+ Diag(Tok, diag::err_at_in_class);
+
+ ConsumeToken();
+ SkipUntil(tok::r_brace, StopAtSemi);
+ return DeclGroupPtrTy();
+ }
+
+ // Turn on colon protection early, while parsing declspec, although there is
+ // nothing to protect there. It prevents from false errors if error recovery
+ // incorrectly determines where the declspec ends, as in the example:
+ // struct A { enum class B { C }; };
+ // const int C = 4;
+ // struct D { A::B : C; };
+ ColonProtectionRAIIObject X(*this);
+
+ // Access declarations.
+ bool MalformedTypeSpec = false;
+ if (!TemplateInfo.Kind &&
+ Tok.isOneOf(tok::identifier, tok::coloncolon, tok::kw___super)) {
+ if (TryAnnotateCXXScopeToken())
+ MalformedTypeSpec = true;
+
+ bool isAccessDecl;
+ if (Tok.isNot(tok::annot_cxxscope))
+ isAccessDecl = false;
+ else if (NextToken().is(tok::identifier))
+ isAccessDecl = GetLookAheadToken(2).is(tok::semi);
+ else
+ isAccessDecl = NextToken().is(tok::kw_operator);
+
+ if (isAccessDecl) {
+ // Collect the scope specifier token we annotated earlier.
+ CXXScopeSpec SS;
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(),
+ /*EnteringContext=*/false);
+
+ if (SS.isInvalid()) {
+ SkipUntil(tok::semi);
+ return DeclGroupPtrTy();
+ }
+
+ // Try to parse an unqualified-id.
+ SourceLocation TemplateKWLoc;
+ UnqualifiedId Name;
+ if (ParseUnqualifiedId(SS, false, true, true, ParsedType(),
+ TemplateKWLoc, Name)) {
+ SkipUntil(tok::semi);
+ return DeclGroupPtrTy();
+ }
+
+ // TODO: recover from mistakenly-qualified operator declarations.
+ if (ExpectAndConsume(tok::semi, diag::err_expected_after,
+ "access declaration")) {
+ SkipUntil(tok::semi);
+ return DeclGroupPtrTy();
+ }
+
+ return DeclGroupPtrTy::make(DeclGroupRef(Actions.ActOnUsingDeclaration(
+ getCurScope(), AS,
+ /* HasUsingKeyword */ false, SourceLocation(), SS, Name,
+ /* AttrList */ nullptr,
+ /* HasTypenameKeyword */ false, SourceLocation())));
+ }
+ }
+
+ // static_assert-declaration. A templated static_assert declaration is
+ // diagnosed in Parser::ParseSingleDeclarationAfterTemplate.
+ if (!TemplateInfo.Kind &&
+ Tok.isOneOf(tok::kw_static_assert, tok::kw__Static_assert)) {
+ SourceLocation DeclEnd;
+ return DeclGroupPtrTy::make(
+ DeclGroupRef(ParseStaticAssertDeclaration(DeclEnd)));
+ }
+
+ if (Tok.is(tok::kw_template)) {
+ assert(!TemplateInfo.TemplateParams &&
+ "Nested template improperly parsed?");
+ SourceLocation DeclEnd;
+ return DeclGroupPtrTy::make(
+ DeclGroupRef(ParseDeclarationStartingWithTemplate(
+ Declarator::MemberContext, DeclEnd, AS, AccessAttrs)));
+ }
+
+ // Handle: member-declaration ::= '__extension__' member-declaration
+ if (Tok.is(tok::kw___extension__)) {
+ // __extension__ silences extension warnings in the subexpression.
+ ExtensionRAIIObject O(Diags); // Use RAII to do this.
+ ConsumeToken();
+ return ParseCXXClassMemberDeclaration(AS, AccessAttrs,
+ TemplateInfo, TemplateDiags);
+ }
+
+ ParsedAttributesWithRange attrs(AttrFactory);
+ ParsedAttributesWithRange FnAttrs(AttrFactory);
+ // Optional C++11 attribute-specifier
+ MaybeParseCXX11Attributes(attrs);
+ // We need to keep these attributes for future diagnostic
+ // before they are taken over by declaration specifier.
+ FnAttrs.addAll(attrs.getList());
+ FnAttrs.Range = attrs.Range;
+
+ MaybeParseMicrosoftAttributes(attrs);
+
+ if (Tok.is(tok::kw_using)) {
+ ProhibitAttributes(attrs);
+
+ // Eat 'using'.
+ SourceLocation UsingLoc = ConsumeToken();
+
+ if (Tok.is(tok::kw_namespace)) {
+ Diag(UsingLoc, diag::err_using_namespace_in_class);
+ SkipUntil(tok::semi, StopBeforeMatch);
+ return DeclGroupPtrTy();
+ }
+ SourceLocation DeclEnd;
+ // Otherwise, it must be a using-declaration or an alias-declaration.
+ return DeclGroupPtrTy::make(DeclGroupRef(ParseUsingDeclaration(
+ Declarator::MemberContext, TemplateInfo, UsingLoc, DeclEnd, AS)));
+ }
+
+ // Hold late-parsed attributes so we can attach a Decl to them later.
+ LateParsedAttrList CommonLateParsedAttrs;
+
+ // decl-specifier-seq:
+ // Parse the common declaration-specifiers piece.
+ ParsingDeclSpec DS(*this, TemplateDiags);
+ DS.takeAttributesFrom(attrs);
+ if (MalformedTypeSpec)
+ DS.SetTypeSpecError();
+
+ ParseDeclarationSpecifiers(DS, TemplateInfo, AS, DSC_class,
+ &CommonLateParsedAttrs);
+
+ // Turn off colon protection that was set for declspec.
+ X.restore();
+
+ // If we had a free-standing type definition with a missing semicolon, we
+ // may get this far before the problem becomes obvious.
+ if (DS.hasTagDefinition() &&
+ TemplateInfo.Kind == ParsedTemplateInfo::NonTemplate &&
+ DiagnoseMissingSemiAfterTagDefinition(DS, AS, DSC_class,
+ &CommonLateParsedAttrs))
+ return DeclGroupPtrTy();
+
+ MultiTemplateParamsArg TemplateParams(
+ TemplateInfo.TemplateParams? TemplateInfo.TemplateParams->data()
+ : nullptr,
+ TemplateInfo.TemplateParams? TemplateInfo.TemplateParams->size() : 0);
+
+ if (TryConsumeToken(tok::semi)) {
+ if (DS.isFriendSpecified())
+ ProhibitAttributes(FnAttrs);
+
+ Decl *TheDecl =
+ Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS, DS, TemplateParams);
+ DS.complete(TheDecl);
+ return DeclGroupPtrTy::make(DeclGroupRef(TheDecl));
+ }
+
+ ParsingDeclarator DeclaratorInfo(*this, DS, Declarator::MemberContext);
+ VirtSpecifiers VS;
+
+ // Hold late-parsed attributes so we can attach a Decl to them later.
+ LateParsedAttrList LateParsedAttrs;
+
+ SourceLocation EqualLoc;
+ SourceLocation PureSpecLoc;
+
+ auto TryConsumePureSpecifier = [&] (bool AllowDefinition) {
+ if (Tok.isNot(tok::equal))
+ return false;
+
+ auto &Zero = NextToken();
+ SmallString<8> Buffer;
+ if (Zero.isNot(tok::numeric_constant) || Zero.getLength() != 1 ||
+ PP.getSpelling(Zero, Buffer) != "0")
+ return false;
+
+ auto &After = GetLookAheadToken(2);
+ if (!After.isOneOf(tok::semi, tok::comma) &&
+ !(AllowDefinition &&
+ After.isOneOf(tok::l_brace, tok::colon, tok::kw_try)))
+ return false;
+
+ EqualLoc = ConsumeToken();
+ PureSpecLoc = ConsumeToken();
+ return true;
+ };
+
+ SmallVector<Decl *, 8> DeclsInGroup;
+ ExprResult BitfieldSize;
+ bool ExpectSemi = true;
+
+ // Parse the first declarator.
+ if (ParseCXXMemberDeclaratorBeforeInitializer(
+ DeclaratorInfo, VS, BitfieldSize, LateParsedAttrs)) {
+ TryConsumeToken(tok::semi);
+ return DeclGroupPtrTy();
+ }
+
+ // Check for a member function definition.
+ if (BitfieldSize.isUnset()) {
+ // MSVC permits pure specifier on inline functions defined at class scope.
+ // Hence check for =0 before checking for function definition.
+ if (getLangOpts().MicrosoftExt && DeclaratorInfo.isDeclarationOfFunction())
+ TryConsumePureSpecifier(/*AllowDefinition*/ true);
+
+ FunctionDefinitionKind DefinitionKind = FDK_Declaration;
+ // function-definition:
+ //
+ // In C++11, a non-function declarator followed by an open brace is a
+ // braced-init-list for an in-class member initialization, not an
+ // erroneous function definition.
+ if (Tok.is(tok::l_brace) && !getLangOpts().CPlusPlus11) {
+ DefinitionKind = FDK_Definition;
+ } else if (DeclaratorInfo.isFunctionDeclarator()) {
+ if (Tok.isOneOf(tok::l_brace, tok::colon, tok::kw_try)) {
+ DefinitionKind = FDK_Definition;
+ } else if (Tok.is(tok::equal)) {
+ const Token &KW = NextToken();
+ if (KW.is(tok::kw_default))
+ DefinitionKind = FDK_Defaulted;
+ else if (KW.is(tok::kw_delete))
+ DefinitionKind = FDK_Deleted;
+ }
+ }
+ DeclaratorInfo.setFunctionDefinitionKind(DefinitionKind);
+
+ // C++11 [dcl.attr.grammar] p4: If an attribute-specifier-seq appertains
+ // to a friend declaration, that declaration shall be a definition.
+ if (DeclaratorInfo.isFunctionDeclarator() &&
+ DefinitionKind != FDK_Definition && DS.isFriendSpecified()) {
+ // Diagnose attributes that appear before decl specifier:
+ // [[]] friend int foo();
+ ProhibitAttributes(FnAttrs);
+ }
+
+ if (DefinitionKind != FDK_Declaration) {
+ if (!DeclaratorInfo.isFunctionDeclarator()) {
+ Diag(DeclaratorInfo.getIdentifierLoc(), diag::err_func_def_no_params);
+ ConsumeBrace();
+ SkipUntil(tok::r_brace);
+
+ // Consume the optional ';'
+ TryConsumeToken(tok::semi);
+
+ return DeclGroupPtrTy();
+ }
+
+ if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
+ Diag(DeclaratorInfo.getIdentifierLoc(),
+ diag::err_function_declared_typedef);
+
+ // Recover by treating the 'typedef' as spurious.
+ DS.ClearStorageClassSpecs();
+ }
+
+ Decl *FunDecl =
+ ParseCXXInlineMethodDef(AS, AccessAttrs, DeclaratorInfo, TemplateInfo,
+ VS, PureSpecLoc);
+
+ if (FunDecl) {
+ for (unsigned i = 0, ni = CommonLateParsedAttrs.size(); i < ni; ++i) {
+ CommonLateParsedAttrs[i]->addDecl(FunDecl);
+ }
+ for (unsigned i = 0, ni = LateParsedAttrs.size(); i < ni; ++i) {
+ LateParsedAttrs[i]->addDecl(FunDecl);
+ }
+ }
+ LateParsedAttrs.clear();
+
+ // Consume the ';' - it's optional unless we have a delete or default
+ if (Tok.is(tok::semi))
+ ConsumeExtraSemi(AfterMemberFunctionDefinition);
+
+ return DeclGroupPtrTy::make(DeclGroupRef(FunDecl));
+ }
+ }
+
+ // member-declarator-list:
+ // member-declarator
+ // member-declarator-list ',' member-declarator
+
+ while (1) {
+ InClassInitStyle HasInClassInit = ICIS_NoInit;
+ bool HasStaticInitializer = false;
+ if (Tok.isOneOf(tok::equal, tok::l_brace) && PureSpecLoc.isInvalid()) {
+ if (BitfieldSize.get()) {
+ Diag(Tok, diag::err_bitfield_member_init);
+ SkipUntil(tok::comma, StopAtSemi | StopBeforeMatch);
+ } else if (DeclaratorInfo.isDeclarationOfFunction()) {
+ // It's a pure-specifier.
+ if (!TryConsumePureSpecifier(/*AllowFunctionDefinition*/ false))
+ // Parse it as an expression so that Sema can diagnose it.
+ HasStaticInitializer = true;
+ } else if (DeclaratorInfo.getDeclSpec().getStorageClassSpec() !=
+ DeclSpec::SCS_static &&
+ DeclaratorInfo.getDeclSpec().getStorageClassSpec() !=
+ DeclSpec::SCS_typedef &&
+ !DS.isFriendSpecified()) {
+ // It's a default member initializer.
+ HasInClassInit = Tok.is(tok::equal) ? ICIS_CopyInit : ICIS_ListInit;
+ } else {
+ HasStaticInitializer = true;
+ }
+ }
+
+ // NOTE: If Sema is the Action module and declarator is an instance field,
+ // this call will *not* return the created decl; It will return null.
+ // See Sema::ActOnCXXMemberDeclarator for details.
+
+ NamedDecl *ThisDecl = nullptr;
+ if (DS.isFriendSpecified()) {
+ // C++11 [dcl.attr.grammar] p4: If an attribute-specifier-seq appertains
+ // to a friend declaration, that declaration shall be a definition.
+ //
+ // Diagnose attributes that appear in a friend member function declarator:
+ // friend int foo [[]] ();
+ SmallVector<SourceRange, 4> Ranges;
+ DeclaratorInfo.getCXX11AttributeRanges(Ranges);
+ for (SmallVectorImpl<SourceRange>::iterator I = Ranges.begin(),
+ E = Ranges.end(); I != E; ++I)
+ Diag((*I).getBegin(), diag::err_attributes_not_allowed) << *I;
+
+ ThisDecl = Actions.ActOnFriendFunctionDecl(getCurScope(), DeclaratorInfo,
+ TemplateParams);
+ } else {
+ ThisDecl = Actions.ActOnCXXMemberDeclarator(getCurScope(), AS,
+ DeclaratorInfo,
+ TemplateParams,
+ BitfieldSize.get(),
+ VS, HasInClassInit);
+
+ if (VarTemplateDecl *VT =
+ ThisDecl ? dyn_cast<VarTemplateDecl>(ThisDecl) : nullptr)
+ // Re-direct this decl to refer to the templated decl so that we can
+ // initialize it.
+ ThisDecl = VT->getTemplatedDecl();
+
+ if (ThisDecl && AccessAttrs)
+ Actions.ProcessDeclAttributeList(getCurScope(), ThisDecl, AccessAttrs);
+ }
+
+ // Error recovery might have converted a non-static member into a static
+ // member.
+ if (HasInClassInit != ICIS_NoInit &&
+ DeclaratorInfo.getDeclSpec().getStorageClassSpec() ==
+ DeclSpec::SCS_static) {
+ HasInClassInit = ICIS_NoInit;
+ HasStaticInitializer = true;
+ }
+
+ if (ThisDecl && PureSpecLoc.isValid())
+ Actions.ActOnPureSpecifier(ThisDecl, PureSpecLoc);
+
+ // Handle the initializer.
+ if (HasInClassInit != ICIS_NoInit) {
+ // The initializer was deferred; parse it and cache the tokens.
+ Diag(Tok, getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_nonstatic_member_init
+ : diag::ext_nonstatic_member_init);
+
+ if (DeclaratorInfo.isArrayOfUnknownBound()) {
+ // C++11 [dcl.array]p3: An array bound may also be omitted when the
+ // declarator is followed by an initializer.
+ //
+ // A brace-or-equal-initializer for a member-declarator is not an
+ // initializer in the grammar, so this is ill-formed.
+ Diag(Tok, diag::err_incomplete_array_member_init);
+ SkipUntil(tok::comma, StopAtSemi | StopBeforeMatch);
+
+ // Avoid later warnings about a class member of incomplete type.
+ if (ThisDecl)
+ ThisDecl->setInvalidDecl();
+ } else
+ ParseCXXNonStaticMemberInitializer(ThisDecl);
+ } else if (HasStaticInitializer) {
+ // Normal initializer.
+ ExprResult Init = ParseCXXMemberInitializer(
+ ThisDecl, DeclaratorInfo.isDeclarationOfFunction(), EqualLoc);
+
+ if (Init.isInvalid())
+ SkipUntil(tok::comma, StopAtSemi | StopBeforeMatch);
+ else if (ThisDecl)
+ Actions.AddInitializerToDecl(ThisDecl, Init.get(), EqualLoc.isInvalid(),
+ DS.containsPlaceholderType());
+ } else if (ThisDecl && DS.getStorageClassSpec() == DeclSpec::SCS_static)
+ // No initializer.
+ Actions.ActOnUninitializedDecl(ThisDecl, DS.containsPlaceholderType());
+
+ if (ThisDecl) {
+ if (!ThisDecl->isInvalidDecl()) {
+ // Set the Decl for any late parsed attributes
+ for (unsigned i = 0, ni = CommonLateParsedAttrs.size(); i < ni; ++i)
+ CommonLateParsedAttrs[i]->addDecl(ThisDecl);
+
+ for (unsigned i = 0, ni = LateParsedAttrs.size(); i < ni; ++i)
+ LateParsedAttrs[i]->addDecl(ThisDecl);
+ }
+ Actions.FinalizeDeclaration(ThisDecl);
+ DeclsInGroup.push_back(ThisDecl);
+
+ if (DeclaratorInfo.isFunctionDeclarator() &&
+ DeclaratorInfo.getDeclSpec().getStorageClassSpec() !=
+ DeclSpec::SCS_typedef)
+ HandleMemberFunctionDeclDelays(DeclaratorInfo, ThisDecl);
+ }
+ LateParsedAttrs.clear();
+
+ DeclaratorInfo.complete(ThisDecl);
+
+ // If we don't have a comma, it is either the end of the list (a ';')
+ // or an error, bail out.
+ SourceLocation CommaLoc;
+ if (!TryConsumeToken(tok::comma, CommaLoc))
+ break;
+
+ if (Tok.isAtStartOfLine() &&
+ !MightBeDeclarator(Declarator::MemberContext)) {
+ // This comma was followed by a line-break and something which can't be
+ // the start of a declarator. The comma was probably a typo for a
+ // semicolon.
+ Diag(CommaLoc, diag::err_expected_semi_declaration)
+ << FixItHint::CreateReplacement(CommaLoc, ";");
+ ExpectSemi = false;
+ break;
+ }
+
+ // Parse the next declarator.
+ DeclaratorInfo.clear();
+ VS.clear();
+ BitfieldSize = ExprResult(/*Invalid=*/false);
+ EqualLoc = PureSpecLoc = SourceLocation();
+ DeclaratorInfo.setCommaLoc(CommaLoc);
+
+ // GNU attributes are allowed before the second and subsequent declarator.
+ MaybeParseGNUAttributes(DeclaratorInfo);
+
+ if (ParseCXXMemberDeclaratorBeforeInitializer(
+ DeclaratorInfo, VS, BitfieldSize, LateParsedAttrs))
+ break;
+ }
+
+ if (ExpectSemi &&
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_decl_list)) {
+ // Skip to end of block or statement.
+ SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
+ // If we stopped at a ';', eat it.
+ TryConsumeToken(tok::semi);
+ return DeclGroupPtrTy();
+ }
+
+ return Actions.FinalizeDeclaratorGroup(getCurScope(), DS, DeclsInGroup);
+}
+
+/// ParseCXXMemberInitializer - Parse the brace-or-equal-initializer.
+/// Also detect and reject any attempted defaulted/deleted function definition.
+/// The location of the '=', if any, will be placed in EqualLoc.
+///
+/// This does not check for a pure-specifier; that's handled elsewhere.
+///
+/// brace-or-equal-initializer:
+/// '=' initializer-expression
+/// braced-init-list
+///
+/// initializer-clause:
+/// assignment-expression
+/// braced-init-list
+///
+/// defaulted/deleted function-definition:
+/// '=' 'default'
+/// '=' 'delete'
+///
+/// Prior to C++0x, the assignment-expression in an initializer-clause must
+/// be a constant-expression.
+ExprResult Parser::ParseCXXMemberInitializer(Decl *D, bool IsFunction,
+ SourceLocation &EqualLoc) {
+ assert(Tok.isOneOf(tok::equal, tok::l_brace)
+ && "Data member initializer not starting with '=' or '{'");
+
+ EnterExpressionEvaluationContext Context(Actions,
+ Sema::PotentiallyEvaluated,
+ D);
+ if (TryConsumeToken(tok::equal, EqualLoc)) {
+ if (Tok.is(tok::kw_delete)) {
+ // In principle, an initializer of '= delete p;' is legal, but it will
+ // never type-check. It's better to diagnose it as an ill-formed expression
+ // than as an ill-formed deleted non-function member.
+ // An initializer of '= delete p, foo' will never be parsed, because
+ // a top-level comma always ends the initializer expression.
+ const Token &Next = NextToken();
+ if (IsFunction || Next.isOneOf(tok::semi, tok::comma, tok::eof)) {
+ if (IsFunction)
+ Diag(ConsumeToken(), diag::err_default_delete_in_multiple_declaration)
+ << 1 /* delete */;
+ else
+ Diag(ConsumeToken(), diag::err_deleted_non_function);
+ return ExprError();
+ }
+ } else if (Tok.is(tok::kw_default)) {
+ if (IsFunction)
+ Diag(Tok, diag::err_default_delete_in_multiple_declaration)
+ << 0 /* default */;
+ else
+ Diag(ConsumeToken(), diag::err_default_special_members);
+ return ExprError();
+ }
+ }
+ if (const auto *PD = dyn_cast_or_null<MSPropertyDecl>(D)) {
+ Diag(Tok, diag::err_ms_property_initializer) << PD;
+ return ExprError();
+ }
+ return ParseInitializer();
+}
+
+void Parser::SkipCXXMemberSpecification(SourceLocation RecordLoc,
+ SourceLocation AttrFixitLoc,
+ unsigned TagType, Decl *TagDecl) {
+ // Skip the optional 'final' keyword.
+ if (getLangOpts().CPlusPlus && Tok.is(tok::identifier)) {
+ assert(isCXX11FinalKeyword() && "not a class definition");
+ ConsumeToken();
+
+ // Diagnose any C++11 attributes after 'final' keyword.
+ // We deliberately discard these attributes.
+ ParsedAttributesWithRange Attrs(AttrFactory);
+ CheckMisplacedCXX11Attribute(Attrs, AttrFixitLoc);
+
+ // This can only happen if we had malformed misplaced attributes;
+ // we only get called if there is a colon or left-brace after the
+ // attributes.
+ if (Tok.isNot(tok::colon) && Tok.isNot(tok::l_brace))
+ return;
+ }
+
+ // Skip the base clauses. This requires actually parsing them, because
+ // otherwise we can't be sure where they end (a left brace may appear
+ // within a template argument).
+ if (Tok.is(tok::colon)) {
+ // Enter the scope of the class so that we can correctly parse its bases.
+ ParseScope ClassScope(this, Scope::ClassScope|Scope::DeclScope);
+ ParsingClassDefinition ParsingDef(*this, TagDecl, /*NonNestedClass*/ true,
+ TagType == DeclSpec::TST_interface);
+ auto OldContext =
+ Actions.ActOnTagStartSkippedDefinition(getCurScope(), TagDecl);
+
+ // Parse the bases but don't attach them to the class.
+ ParseBaseClause(nullptr);
+
+ Actions.ActOnTagFinishSkippedDefinition(OldContext);
+
+ if (!Tok.is(tok::l_brace)) {
+ Diag(PP.getLocForEndOfToken(PrevTokLocation),
+ diag::err_expected_lbrace_after_base_specifiers);
+ return;
+ }
+ }
+
+ // Skip the body.
+ assert(Tok.is(tok::l_brace));
+ BalancedDelimiterTracker T(*this, tok::l_brace);
+ T.consumeOpen();
+ T.skipToEnd();
+
+ // Parse and discard any trailing attributes.
+ ParsedAttributes Attrs(AttrFactory);
+ if (Tok.is(tok::kw___attribute))
+ MaybeParseGNUAttributes(Attrs);
+}
+
+Parser::DeclGroupPtrTy Parser::ParseCXXClassMemberDeclarationWithPragmas(
+ AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
+ DeclSpec::TST TagType, Decl *TagDecl) {
+ if (getLangOpts().MicrosoftExt &&
+ Tok.isOneOf(tok::kw___if_exists, tok::kw___if_not_exists)) {
+ ParseMicrosoftIfExistsClassDeclaration(TagType, AS);
+ return DeclGroupPtrTy();
+ }
+
+ // Check for extraneous top-level semicolon.
+ if (Tok.is(tok::semi)) {
+ ConsumeExtraSemi(InsideStruct, TagType);
+ return DeclGroupPtrTy();
+ }
+
+ if (Tok.is(tok::annot_pragma_vis)) {
+ HandlePragmaVisibility();
+ return DeclGroupPtrTy();
+ }
+
+ if (Tok.is(tok::annot_pragma_pack)) {
+ HandlePragmaPack();
+ return DeclGroupPtrTy();
+ }
+
+ if (Tok.is(tok::annot_pragma_align)) {
+ HandlePragmaAlign();
+ return DeclGroupPtrTy();
+ }
+
+ if (Tok.is(tok::annot_pragma_ms_pointers_to_members)) {
+ HandlePragmaMSPointersToMembers();
+ return DeclGroupPtrTy();
+ }
+
+ if (Tok.is(tok::annot_pragma_ms_pragma)) {
+ HandlePragmaMSPragma();
+ return DeclGroupPtrTy();
+ }
+
+ if (Tok.is(tok::annot_pragma_ms_vtordisp)) {
+ HandlePragmaMSVtorDisp();
+ return DeclGroupPtrTy();
+ }
+
+ // If we see a namespace here, a close brace was missing somewhere.
+ if (Tok.is(tok::kw_namespace)) {
+ DiagnoseUnexpectedNamespace(cast<NamedDecl>(TagDecl));
+ return DeclGroupPtrTy();
+ }
+
+ AccessSpecifier NewAS = getAccessSpecifierIfPresent();
+ if (NewAS != AS_none) {
+ // Current token is a C++ access specifier.
+ AS = NewAS;
+ SourceLocation ASLoc = Tok.getLocation();
+ unsigned TokLength = Tok.getLength();
+ ConsumeToken();
+ AccessAttrs.clear();
+ MaybeParseGNUAttributes(AccessAttrs);
+
+ SourceLocation EndLoc;
+ if (TryConsumeToken(tok::colon, EndLoc)) {
+ } else if (TryConsumeToken(tok::semi, EndLoc)) {
+ Diag(EndLoc, diag::err_expected)
+ << tok::colon << FixItHint::CreateReplacement(EndLoc, ":");
+ } else {
+ EndLoc = ASLoc.getLocWithOffset(TokLength);
+ Diag(EndLoc, diag::err_expected)
+ << tok::colon << FixItHint::CreateInsertion(EndLoc, ":");
+ }
+
+ // The Microsoft extension __interface does not permit non-public
+ // access specifiers.
+ if (TagType == DeclSpec::TST_interface && AS != AS_public) {
+ Diag(ASLoc, diag::err_access_specifier_interface) << (AS == AS_protected);
+ }
+
+ if (Actions.ActOnAccessSpecifier(NewAS, ASLoc, EndLoc,
+ AccessAttrs.getList())) {
+ // found another attribute than only annotations
+ AccessAttrs.clear();
+ }
+
+ return DeclGroupPtrTy();
+ }
+
+ if (Tok.is(tok::annot_pragma_openmp))
+ return ParseOpenMPDeclarativeDirective();
+
+ // Parse all the comma separated declarators.
+ return ParseCXXClassMemberDeclaration(AS, AccessAttrs.getList());
+}
+
+/// ParseCXXMemberSpecification - Parse the class definition.
+///
+/// member-specification:
+/// member-declaration member-specification[opt]
+/// access-specifier ':' member-specification[opt]
+///
+void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
+ SourceLocation AttrFixitLoc,
+ ParsedAttributesWithRange &Attrs,
+ unsigned TagType, Decl *TagDecl) {
+ assert((TagType == DeclSpec::TST_struct ||
+ TagType == DeclSpec::TST_interface ||
+ TagType == DeclSpec::TST_union ||
+ TagType == DeclSpec::TST_class) && "Invalid TagType!");
+
+ PrettyDeclStackTraceEntry CrashInfo(Actions, TagDecl, RecordLoc,
+ "parsing struct/union/class body");
+
+ // Determine whether this is a non-nested class. Note that local
+ // classes are *not* considered to be nested classes.
+ bool NonNestedClass = true;
+ if (!ClassStack.empty()) {
+ for (const Scope *S = getCurScope(); S; S = S->getParent()) {
+ if (S->isClassScope()) {
+ // We're inside a class scope, so this is a nested class.
+ NonNestedClass = false;
+
+ // The Microsoft extension __interface does not permit nested classes.
+ if (getCurrentClass().IsInterface) {
+ Diag(RecordLoc, diag::err_invalid_member_in_interface)
+ << /*ErrorType=*/6
+ << (isa<NamedDecl>(TagDecl)
+ ? cast<NamedDecl>(TagDecl)->getQualifiedNameAsString()
+ : "(anonymous)");
+ }
+ break;
+ }
+
+ if ((S->getFlags() & Scope::FnScope))
+ // If we're in a function or function template then this is a local
+ // class rather than a nested class.
+ break;
+ }
+ }
+
+ // Enter a scope for the class.
+ ParseScope ClassScope(this, Scope::ClassScope|Scope::DeclScope);
+
+ // Note that we are parsing a new (potentially-nested) class definition.
+ ParsingClassDefinition ParsingDef(*this, TagDecl, NonNestedClass,
+ TagType == DeclSpec::TST_interface);
+
+ if (TagDecl)
+ Actions.ActOnTagStartDefinition(getCurScope(), TagDecl);
+
+ SourceLocation FinalLoc;
+ bool IsFinalSpelledSealed = false;
+
+ // Parse the optional 'final' keyword.
+ if (getLangOpts().CPlusPlus && Tok.is(tok::identifier)) {
+ VirtSpecifiers::Specifier Specifier = isCXX11VirtSpecifier(Tok);
+ assert((Specifier == VirtSpecifiers::VS_Final ||
+ Specifier == VirtSpecifiers::VS_Sealed) &&
+ "not a class definition");
+ FinalLoc = ConsumeToken();
+ IsFinalSpelledSealed = Specifier == VirtSpecifiers::VS_Sealed;
+
+ if (TagType == DeclSpec::TST_interface)
+ Diag(FinalLoc, diag::err_override_control_interface)
+ << VirtSpecifiers::getSpecifierName(Specifier);
+ else if (Specifier == VirtSpecifiers::VS_Final)
+ Diag(FinalLoc, getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_override_control_keyword
+ : diag::ext_override_control_keyword)
+ << VirtSpecifiers::getSpecifierName(Specifier);
+ else if (Specifier == VirtSpecifiers::VS_Sealed)
+ Diag(FinalLoc, diag::ext_ms_sealed_keyword);
+
+ // Parse any C++11 attributes after 'final' keyword.
+ // These attributes are not allowed to appear here,
+ // and the only possible place for them to appertain
+ // to the class would be between class-key and class-name.
+ CheckMisplacedCXX11Attribute(Attrs, AttrFixitLoc);
+
+ // ParseClassSpecifier() does only a superficial check for attributes before
+ // deciding to call this method. For example, for
+ // `class C final alignas ([l) {` it will decide that this looks like a
+ // misplaced attribute since it sees `alignas '(' ')'`. But the actual
+ // attribute parsing code will try to parse the '[' as a constexpr lambda
+ // and consume enough tokens that the alignas parsing code will eat the
+ // opening '{'. So bail out if the next token isn't one we expect.
+ if (!Tok.is(tok::colon) && !Tok.is(tok::l_brace)) {
+ if (TagDecl)
+ Actions.ActOnTagDefinitionError(getCurScope(), TagDecl);
+ return;
+ }
+ }
+
+ if (Tok.is(tok::colon)) {
+ ParseBaseClause(TagDecl);
+ if (!Tok.is(tok::l_brace)) {
+ bool SuggestFixIt = false;
+ SourceLocation BraceLoc = PP.getLocForEndOfToken(PrevTokLocation);
+ if (Tok.isAtStartOfLine()) {
+ switch (Tok.getKind()) {
+ case tok::kw_private:
+ case tok::kw_protected:
+ case tok::kw_public:
+ SuggestFixIt = NextToken().getKind() == tok::colon;
+ break;
+ case tok::kw_static_assert:
+ case tok::r_brace:
+ case tok::kw_using:
+ // base-clause can have simple-template-id; 'template' can't be there
+ case tok::kw_template:
+ SuggestFixIt = true;
+ break;
+ case tok::identifier:
+ SuggestFixIt = isConstructorDeclarator(true);
+ break;
+ default:
+ SuggestFixIt = isCXXSimpleDeclaration(/*AllowForRangeDecl=*/false);
+ break;
+ }
+ }
+ DiagnosticBuilder LBraceDiag =
+ Diag(BraceLoc, diag::err_expected_lbrace_after_base_specifiers);
+ if (SuggestFixIt) {
+ LBraceDiag << FixItHint::CreateInsertion(BraceLoc, " {");
+ // Try recovering from missing { after base-clause.
+ PP.EnterToken(Tok);
+ Tok.setKind(tok::l_brace);
+ } else {
+ if (TagDecl)
+ Actions.ActOnTagDefinitionError(getCurScope(), TagDecl);
+ return;
+ }
+ }
+ }
+
+ assert(Tok.is(tok::l_brace));
+ BalancedDelimiterTracker T(*this, tok::l_brace);
+ T.consumeOpen();
+
+ if (TagDecl)
+ Actions.ActOnStartCXXMemberDeclarations(getCurScope(), TagDecl, FinalLoc,
+ IsFinalSpelledSealed,
+ T.getOpenLocation());
+
+ // C++ 11p3: Members of a class defined with the keyword class are private
+ // by default. Members of a class defined with the keywords struct or union
+ // are public by default.
+ AccessSpecifier CurAS;
+ if (TagType == DeclSpec::TST_class)
+ CurAS = AS_private;
+ else
+ CurAS = AS_public;
+ ParsedAttributesWithRange AccessAttrs(AttrFactory);
+
+ if (TagDecl) {
+ // While we still have something to read, read the member-declarations.
+ while (!tryParseMisplacedModuleImport() && Tok.isNot(tok::r_brace) &&
+ Tok.isNot(tok::eof)) {
+ // Each iteration of this loop reads one member-declaration.
+ ParseCXXClassMemberDeclarationWithPragmas(
+ CurAS, AccessAttrs, static_cast<DeclSpec::TST>(TagType), TagDecl);
+ }
+ T.consumeClose();
+ } else {
+ SkipUntil(tok::r_brace);
+ }
+
+ // If attributes exist after class contents, parse them.
+ ParsedAttributes attrs(AttrFactory);
+ MaybeParseGNUAttributes(attrs);
+
+ if (TagDecl)
+ Actions.ActOnFinishCXXMemberSpecification(getCurScope(), RecordLoc, TagDecl,
+ T.getOpenLocation(),
+ T.getCloseLocation(),
+ attrs.getList());
+
+ // C++11 [class.mem]p2:
+ // Within the class member-specification, the class is regarded as complete
+ // within function bodies, default arguments, exception-specifications, and
+ // brace-or-equal-initializers for non-static data members (including such
+ // things in nested classes).
+ if (TagDecl && NonNestedClass) {
+ // We are not inside a nested class. This class and its nested classes
+ // are complete and we can parse the delayed portions of method
+ // declarations and the lexed inline method definitions, along with any
+ // delayed attributes.
+ SourceLocation SavedPrevTokLocation = PrevTokLocation;
+ ParseLexedAttributes(getCurrentClass());
+ ParseLexedMethodDeclarations(getCurrentClass());
+
+ // We've finished with all pending member declarations.
+ Actions.ActOnFinishCXXMemberDecls();
+
+ ParseLexedMemberInitializers(getCurrentClass());
+ ParseLexedMethodDefs(getCurrentClass());
+ PrevTokLocation = SavedPrevTokLocation;
+
+ // We've finished parsing everything, including default argument
+ // initializers.
+ Actions.ActOnFinishCXXNonNestedClass(TagDecl);
+ }
+
+ if (TagDecl)
+ Actions.ActOnTagFinishDefinition(getCurScope(), TagDecl,
+ T.getCloseLocation());
+
+ // Leave the class scope.
+ ParsingDef.Pop();
+ ClassScope.Exit();
+}
+
+void Parser::DiagnoseUnexpectedNamespace(NamedDecl *D) {
+ assert(Tok.is(tok::kw_namespace));
+
+ // FIXME: Suggest where the close brace should have gone by looking
+ // at indentation changes within the definition body.
+ Diag(D->getLocation(),
+ diag::err_missing_end_of_definition) << D;
+ Diag(Tok.getLocation(),
+ diag::note_missing_end_of_definition_before) << D;
+
+ // Push '};' onto the token stream to recover.
+ PP.EnterToken(Tok);
+
+ Tok.startToken();
+ Tok.setLocation(PP.getLocForEndOfToken(PrevTokLocation));
+ Tok.setKind(tok::semi);
+ PP.EnterToken(Tok);
+
+ Tok.setKind(tok::r_brace);
+}
+
+/// ParseConstructorInitializer - Parse a C++ constructor initializer,
+/// which explicitly initializes the members or base classes of a
+/// class (C++ [class.base.init]). For example, the three initializers
+/// after the ':' in the Derived constructor below:
+///
+/// @code
+/// class Base { };
+/// class Derived : Base {
+/// int x;
+/// float f;
+/// public:
+/// Derived(float f) : Base(), x(17), f(f) { }
+/// };
+/// @endcode
+///
+/// [C++] ctor-initializer:
+/// ':' mem-initializer-list
+///
+/// [C++] mem-initializer-list:
+/// mem-initializer ...[opt]
+/// mem-initializer ...[opt] , mem-initializer-list
+void Parser::ParseConstructorInitializer(Decl *ConstructorDecl) {
+ assert(Tok.is(tok::colon) &&
+ "Constructor initializer always starts with ':'");
+
+ // Poison the SEH identifiers so they are flagged as illegal in constructor
+ // initializers.
+ PoisonSEHIdentifiersRAIIObject PoisonSEHIdentifiers(*this, true);
+ SourceLocation ColonLoc = ConsumeToken();
+
+ SmallVector<CXXCtorInitializer*, 4> MemInitializers;
+ bool AnyErrors = false;
+
+ do {
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteConstructorInitializer(ConstructorDecl,
+ MemInitializers);
+ return cutOffParsing();
+ } else {
+ MemInitResult MemInit = ParseMemInitializer(ConstructorDecl);
+ if (!MemInit.isInvalid())
+ MemInitializers.push_back(MemInit.get());
+ else
+ AnyErrors = true;
+ }
+
+ if (Tok.is(tok::comma))
+ ConsumeToken();
+ else if (Tok.is(tok::l_brace))
+ break;
+ // If the next token looks like a base or member initializer, assume that
+ // we're just missing a comma.
+ else if (Tok.isOneOf(tok::identifier, tok::coloncolon)) {
+ SourceLocation Loc = PP.getLocForEndOfToken(PrevTokLocation);
+ Diag(Loc, diag::err_ctor_init_missing_comma)
+ << FixItHint::CreateInsertion(Loc, ", ");
+ } else {
+ // Skip over garbage, until we get to '{'. Don't eat the '{'.
+ Diag(Tok.getLocation(), diag::err_expected_either) << tok::l_brace
+ << tok::comma;
+ SkipUntil(tok::l_brace, StopAtSemi | StopBeforeMatch);
+ break;
+ }
+ } while (true);
+
+ Actions.ActOnMemInitializers(ConstructorDecl, ColonLoc, MemInitializers,
+ AnyErrors);
+}
+
+/// ParseMemInitializer - Parse a C++ member initializer, which is
+/// part of a constructor initializer that explicitly initializes one
+/// member or base class (C++ [class.base.init]). See
+/// ParseConstructorInitializer for an example.
+///
+/// [C++] mem-initializer:
+/// mem-initializer-id '(' expression-list[opt] ')'
+/// [C++0x] mem-initializer-id braced-init-list
+///
+/// [C++] mem-initializer-id:
+/// '::'[opt] nested-name-specifier[opt] class-name
+/// identifier
+MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
+ // parse '::'[opt] nested-name-specifier[opt]
+ CXXScopeSpec SS;
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(), /*EnteringContext=*/false);
+ ParsedType TemplateTypeTy;
+ if (Tok.is(tok::annot_template_id)) {
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+ if (TemplateId->Kind == TNK_Type_template ||
+ TemplateId->Kind == TNK_Dependent_template_name) {
+ AnnotateTemplateIdTokenAsType();
+ assert(Tok.is(tok::annot_typename) && "template-id -> type failed");
+ TemplateTypeTy = getTypeAnnotation(Tok);
+ }
+ }
+ // Uses of decltype will already have been converted to annot_decltype by
+ // ParseOptionalCXXScopeSpecifier at this point.
+ if (!TemplateTypeTy && Tok.isNot(tok::identifier)
+ && Tok.isNot(tok::annot_decltype)) {
+ Diag(Tok, diag::err_expected_member_or_base_name);
+ return true;
+ }
+
+ IdentifierInfo *II = nullptr;
+ DeclSpec DS(AttrFactory);
+ SourceLocation IdLoc = Tok.getLocation();
+ if (Tok.is(tok::annot_decltype)) {
+ // Get the decltype expression, if there is one.
+ ParseDecltypeSpecifier(DS);
+ } else {
+ if (Tok.is(tok::identifier))
+ // Get the identifier. This may be a member name or a class name,
+ // but we'll let the semantic analysis determine which it is.
+ II = Tok.getIdentifierInfo();
+ ConsumeToken();
+ }
+
+
+ // Parse the '('.
+ if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
+
+ ExprResult InitList = ParseBraceInitializer();
+ if (InitList.isInvalid())
+ return true;
+
+ SourceLocation EllipsisLoc;
+ TryConsumeToken(tok::ellipsis, EllipsisLoc);
+
+ return Actions.ActOnMemInitializer(ConstructorDecl, getCurScope(), SS, II,
+ TemplateTypeTy, DS, IdLoc,
+ InitList.get(), EllipsisLoc);
+ } else if(Tok.is(tok::l_paren)) {
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ // Parse the optional expression-list.
+ ExprVector ArgExprs;
+ CommaLocsTy CommaLocs;
+ if (Tok.isNot(tok::r_paren) && ParseExpressionList(ArgExprs, CommaLocs)) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return true;
+ }
+
+ T.consumeClose();
+
+ SourceLocation EllipsisLoc;
+ TryConsumeToken(tok::ellipsis, EllipsisLoc);
+
+ return Actions.ActOnMemInitializer(ConstructorDecl, getCurScope(), SS, II,
+ TemplateTypeTy, DS, IdLoc,
+ T.getOpenLocation(), ArgExprs,
+ T.getCloseLocation(), EllipsisLoc);
+ }
+
+ if (getLangOpts().CPlusPlus11)
+ return Diag(Tok, diag::err_expected_either) << tok::l_paren << tok::l_brace;
+ else
+ return Diag(Tok, diag::err_expected) << tok::l_paren;
+}
+
+/// \brief Parse a C++ exception-specification if present (C++0x [except.spec]).
+///
+/// exception-specification:
+/// dynamic-exception-specification
+/// noexcept-specification
+///
+/// noexcept-specification:
+/// 'noexcept'
+/// 'noexcept' '(' constant-expression ')'
+ExceptionSpecificationType
+Parser::tryParseExceptionSpecification(bool Delayed,
+ SourceRange &SpecificationRange,
+ SmallVectorImpl<ParsedType> &DynamicExceptions,
+ SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
+ ExprResult &NoexceptExpr,
+ CachedTokens *&ExceptionSpecTokens) {
+ ExceptionSpecificationType Result = EST_None;
+ ExceptionSpecTokens = nullptr;
+
+ // Handle delayed parsing of exception-specifications.
+ if (Delayed) {
+ if (Tok.isNot(tok::kw_throw) && Tok.isNot(tok::kw_noexcept))
+ return EST_None;
+
+ // Consume and cache the starting token.
+ bool IsNoexcept = Tok.is(tok::kw_noexcept);
+ Token StartTok = Tok;
+ SpecificationRange = SourceRange(ConsumeToken());
+
+ // Check for a '('.
+ if (!Tok.is(tok::l_paren)) {
+ // If this is a bare 'noexcept', we're done.
+ if (IsNoexcept) {
+ Diag(Tok, diag::warn_cxx98_compat_noexcept_decl);
+ NoexceptExpr = nullptr;
+ return EST_BasicNoexcept;
+ }
+
+ Diag(Tok, diag::err_expected_lparen_after) << "throw";
+ return EST_DynamicNone;
+ }
+
+ // Cache the tokens for the exception-specification.
+ ExceptionSpecTokens = new CachedTokens;
+ ExceptionSpecTokens->push_back(StartTok); // 'throw' or 'noexcept'
+ ExceptionSpecTokens->push_back(Tok); // '('
+ SpecificationRange.setEnd(ConsumeParen()); // '('
+
+ ConsumeAndStoreUntil(tok::r_paren, *ExceptionSpecTokens,
+ /*StopAtSemi=*/true,
+ /*ConsumeFinalToken=*/true);
+ SpecificationRange.setEnd(Tok.getLocation());
+ return EST_Unparsed;
+ }
+
+ // See if there's a dynamic specification.
+ if (Tok.is(tok::kw_throw)) {
+ Result = ParseDynamicExceptionSpecification(SpecificationRange,
+ DynamicExceptions,
+ DynamicExceptionRanges);
+ assert(DynamicExceptions.size() == DynamicExceptionRanges.size() &&
+ "Produced different number of exception types and ranges.");
+ }
+
+ // If there's no noexcept specification, we're done.
+ if (Tok.isNot(tok::kw_noexcept))
+ return Result;
+
+ Diag(Tok, diag::warn_cxx98_compat_noexcept_decl);
+
+ // If we already had a dynamic specification, parse the noexcept for,
+ // recovery, but emit a diagnostic and don't store the results.
+ SourceRange NoexceptRange;
+ ExceptionSpecificationType NoexceptType = EST_None;
+
+ SourceLocation KeywordLoc = ConsumeToken();
+ if (Tok.is(tok::l_paren)) {
+ // There is an argument.
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+ NoexceptType = EST_ComputedNoexcept;
+ NoexceptExpr = ParseConstantExpression();
+ T.consumeClose();
+ // The argument must be contextually convertible to bool. We use
+ // ActOnBooleanCondition for this purpose.
+ if (!NoexceptExpr.isInvalid()) {
+ NoexceptExpr = Actions.ActOnBooleanCondition(getCurScope(), KeywordLoc,
+ NoexceptExpr.get());
+ NoexceptRange = SourceRange(KeywordLoc, T.getCloseLocation());
+ } else {
+ NoexceptType = EST_None;
+ }
+ } else {
+ // There is no argument.
+ NoexceptType = EST_BasicNoexcept;
+ NoexceptRange = SourceRange(KeywordLoc, KeywordLoc);
+ }
+
+ if (Result == EST_None) {
+ SpecificationRange = NoexceptRange;
+ Result = NoexceptType;
+
+ // If there's a dynamic specification after a noexcept specification,
+ // parse that and ignore the results.
+ if (Tok.is(tok::kw_throw)) {
+ Diag(Tok.getLocation(), diag::err_dynamic_and_noexcept_specification);
+ ParseDynamicExceptionSpecification(NoexceptRange, DynamicExceptions,
+ DynamicExceptionRanges);
+ }
+ } else {
+ Diag(Tok.getLocation(), diag::err_dynamic_and_noexcept_specification);
+ }
+
+ return Result;
+}
+
+static void diagnoseDynamicExceptionSpecification(
+ Parser &P, SourceRange Range, bool IsNoexcept) {
+ if (P.getLangOpts().CPlusPlus11) {
+ const char *Replacement = IsNoexcept ? "noexcept" : "noexcept(false)";
+ P.Diag(Range.getBegin(), diag::warn_exception_spec_deprecated) << Range;
+ P.Diag(Range.getBegin(), diag::note_exception_spec_deprecated)
+ << Replacement << FixItHint::CreateReplacement(Range, Replacement);
+ }
+}
+
+/// ParseDynamicExceptionSpecification - Parse a C++
+/// dynamic-exception-specification (C++ [except.spec]).
+///
+/// dynamic-exception-specification:
+/// 'throw' '(' type-id-list [opt] ')'
+/// [MS] 'throw' '(' '...' ')'
+///
+/// type-id-list:
+/// type-id ... [opt]
+/// type-id-list ',' type-id ... [opt]
+///
+ExceptionSpecificationType Parser::ParseDynamicExceptionSpecification(
+ SourceRange &SpecificationRange,
+ SmallVectorImpl<ParsedType> &Exceptions,
+ SmallVectorImpl<SourceRange> &Ranges) {
+ assert(Tok.is(tok::kw_throw) && "expected throw");
+
+ SpecificationRange.setBegin(ConsumeToken());
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.consumeOpen()) {
+ Diag(Tok, diag::err_expected_lparen_after) << "throw";
+ SpecificationRange.setEnd(SpecificationRange.getBegin());
+ return EST_DynamicNone;
+ }
+
+ // Parse throw(...), a Microsoft extension that means "this function
+ // can throw anything".
+ if (Tok.is(tok::ellipsis)) {
+ SourceLocation EllipsisLoc = ConsumeToken();
+ if (!getLangOpts().MicrosoftExt)
+ Diag(EllipsisLoc, diag::ext_ellipsis_exception_spec);
+ T.consumeClose();
+ SpecificationRange.setEnd(T.getCloseLocation());
+ diagnoseDynamicExceptionSpecification(*this, SpecificationRange, false);
+ return EST_MSAny;
+ }
+
+ // Parse the sequence of type-ids.
+ SourceRange Range;
+ while (Tok.isNot(tok::r_paren)) {
+ TypeResult Res(ParseTypeName(&Range));
+
+ if (Tok.is(tok::ellipsis)) {
+ // C++0x [temp.variadic]p5:
+ // - In a dynamic-exception-specification (15.4); the pattern is a
+ // type-id.
+ SourceLocation Ellipsis = ConsumeToken();
+ Range.setEnd(Ellipsis);
+ if (!Res.isInvalid())
+ Res = Actions.ActOnPackExpansion(Res.get(), Ellipsis);
+ }
+
+ if (!Res.isInvalid()) {
+ Exceptions.push_back(Res.get());
+ Ranges.push_back(Range);
+ }
+
+ if (!TryConsumeToken(tok::comma))
+ break;
+ }
+
+ T.consumeClose();
+ SpecificationRange.setEnd(T.getCloseLocation());
+ diagnoseDynamicExceptionSpecification(*this, SpecificationRange,
+ Exceptions.empty());
+ return Exceptions.empty() ? EST_DynamicNone : EST_Dynamic;
+}
+
+/// ParseTrailingReturnType - Parse a trailing return type on a new-style
+/// function declaration.
+TypeResult Parser::ParseTrailingReturnType(SourceRange &Range) {
+ assert(Tok.is(tok::arrow) && "expected arrow");
+
+ ConsumeToken();
+
+ return ParseTypeName(&Range, Declarator::TrailingReturnContext);
+}
+
+/// \brief We have just started parsing the definition of a new class,
+/// so push that class onto our stack of classes that is currently
+/// being parsed.
+Sema::ParsingClassState
+Parser::PushParsingClass(Decl *ClassDecl, bool NonNestedClass,
+ bool IsInterface) {
+ assert((NonNestedClass || !ClassStack.empty()) &&
+ "Nested class without outer class");
+ ClassStack.push(new ParsingClass(ClassDecl, NonNestedClass, IsInterface));
+ return Actions.PushParsingClass();
+}
+
+/// \brief Deallocate the given parsed class and all of its nested
+/// classes.
+void Parser::DeallocateParsedClasses(Parser::ParsingClass *Class) {
+ for (unsigned I = 0, N = Class->LateParsedDeclarations.size(); I != N; ++I)
+ delete Class->LateParsedDeclarations[I];
+ delete Class;
+}
+
+/// \brief Pop the top class of the stack of classes that are
+/// currently being parsed.
+///
+/// This routine should be called when we have finished parsing the
+/// definition of a class, but have not yet popped the Scope
+/// associated with the class's definition.
+void Parser::PopParsingClass(Sema::ParsingClassState state) {
+ assert(!ClassStack.empty() && "Mismatched push/pop for class parsing");
+
+ Actions.PopParsingClass(state);
+
+ ParsingClass *Victim = ClassStack.top();
+ ClassStack.pop();
+ if (Victim->TopLevelClass) {
+ // Deallocate all of the nested classes of this class,
+ // recursively: we don't need to keep any of this information.
+ DeallocateParsedClasses(Victim);
+ return;
+ }
+ assert(!ClassStack.empty() && "Missing top-level class?");
+
+ if (Victim->LateParsedDeclarations.empty()) {
+ // The victim is a nested class, but we will not need to perform
+ // any processing after the definition of this class since it has
+ // no members whose handling was delayed. Therefore, we can just
+ // remove this nested class.
+ DeallocateParsedClasses(Victim);
+ return;
+ }
+
+ // This nested class has some members that will need to be processed
+ // after the top-level class is completely defined. Therefore, add
+ // it to the list of nested classes within its parent.
+ assert(getCurScope()->isClassScope() && "Nested class outside of class scope?");
+ ClassStack.top()->LateParsedDeclarations.push_back(new LateParsedClass(this, Victim));
+ Victim->TemplateScope = getCurScope()->getParent()->isTemplateParamScope();
+}
+
+/// \brief Try to parse an 'identifier' which appears within an attribute-token.
+///
+/// \return the parsed identifier on success, and 0 if the next token is not an
+/// attribute-token.
+///
+/// C++11 [dcl.attr.grammar]p3:
+/// If a keyword or an alternative token that satisfies the syntactic
+/// requirements of an identifier is contained in an attribute-token,
+/// it is considered an identifier.
+IdentifierInfo *Parser::TryParseCXX11AttributeIdentifier(SourceLocation &Loc) {
+ switch (Tok.getKind()) {
+ default:
+ // Identifiers and keywords have identifier info attached.
+ if (!Tok.isAnnotation()) {
+ if (IdentifierInfo *II = Tok.getIdentifierInfo()) {
+ Loc = ConsumeToken();
+ return II;
+ }
+ }
+ return nullptr;
+
+ case tok::ampamp: // 'and'
+ case tok::pipe: // 'bitor'
+ case tok::pipepipe: // 'or'
+ case tok::caret: // 'xor'
+ case tok::tilde: // 'compl'
+ case tok::amp: // 'bitand'
+ case tok::ampequal: // 'and_eq'
+ case tok::pipeequal: // 'or_eq'
+ case tok::caretequal: // 'xor_eq'
+ case tok::exclaim: // 'not'
+ case tok::exclaimequal: // 'not_eq'
+ // Alternative tokens do not have identifier info, but their spelling
+ // starts with an alphabetical character.
+ SmallString<8> SpellingBuf;
+ SourceLocation SpellingLoc =
+ PP.getSourceManager().getSpellingLoc(Tok.getLocation());
+ StringRef Spelling = PP.getSpelling(SpellingLoc, SpellingBuf);
+ if (isLetter(Spelling[0])) {
+ Loc = ConsumeToken();
+ return &PP.getIdentifierTable().get(Spelling);
+ }
+ return nullptr;
+ }
+}
+
+static bool IsBuiltInOrStandardCXX11Attribute(IdentifierInfo *AttrName,
+ IdentifierInfo *ScopeName) {
+ switch (AttributeList::getKind(AttrName, ScopeName,
+ AttributeList::AS_CXX11)) {
+ case AttributeList::AT_CarriesDependency:
+ case AttributeList::AT_Deprecated:
+ case AttributeList::AT_FallThrough:
+ case AttributeList::AT_CXX11NoReturn:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+/// ParseCXX11AttributeArgs -- Parse a C++11 attribute-argument-clause.
+///
+/// [C++11] attribute-argument-clause:
+/// '(' balanced-token-seq ')'
+///
+/// [C++11] balanced-token-seq:
+/// balanced-token
+/// balanced-token-seq balanced-token
+///
+/// [C++11] balanced-token:
+/// '(' balanced-token-seq ')'
+/// '[' balanced-token-seq ']'
+/// '{' balanced-token-seq '}'
+/// any token but '(', ')', '[', ']', '{', or '}'
+bool Parser::ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
+ SourceLocation AttrNameLoc,
+ ParsedAttributes &Attrs,
+ SourceLocation *EndLoc,
+ IdentifierInfo *ScopeName,
+ SourceLocation ScopeLoc) {
+ assert(Tok.is(tok::l_paren) && "Not a C++11 attribute argument list");
+ SourceLocation LParenLoc = Tok.getLocation();
+
+ // If the attribute isn't known, we will not attempt to parse any
+ // arguments.
+ if (!hasAttribute(AttrSyntax::CXX, ScopeName, AttrName,
+ getTargetInfo(), getLangOpts())) {
+ // Eat the left paren, then skip to the ending right paren.
+ ConsumeParen();
+ SkipUntil(tok::r_paren);
+ return false;
+ }
+
+ if (ScopeName && ScopeName->getName() == "gnu")
+ // GNU-scoped attributes have some special cases to handle GNU-specific
+ // behaviors.
+ ParseGNUAttributeArgs(AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
+ ScopeLoc, AttributeList::AS_CXX11, nullptr);
+ else {
+ unsigned NumArgs =
+ ParseAttributeArgsCommon(AttrName, AttrNameLoc, Attrs, EndLoc,
+ ScopeName, ScopeLoc, AttributeList::AS_CXX11);
+
+ const AttributeList *Attr = Attrs.getList();
+ if (Attr && IsBuiltInOrStandardCXX11Attribute(AttrName, ScopeName)) {
+ // If the attribute is a standard or built-in attribute and we are
+ // parsing an argument list, we need to determine whether this attribute
+ // was allowed to have an argument list (such as [[deprecated]]), and how
+ // many arguments were parsed (so we can diagnose on [[deprecated()]]).
+ if (Attr->getMaxArgs() && !NumArgs) {
+ // The attribute was allowed to have arguments, but none were provided
+ // even though the attribute parsed successfully. This is an error.
+ Diag(LParenLoc, diag::err_attribute_requires_arguments) << AttrName;
+ } else if (!Attr->getMaxArgs()) {
+ // The attribute parsed successfully, but was not allowed to have any
+ // arguments. It doesn't matter whether any were provided -- the
+ // presence of the argument list (even if empty) is diagnosed.
+ Diag(LParenLoc, diag::err_cxx11_attribute_forbids_arguments)
+ << AttrName
+ << FixItHint::CreateRemoval(SourceRange(LParenLoc, *EndLoc));
+ }
+ }
+ }
+ return true;
+}
+
+/// ParseCXX11AttributeSpecifier - Parse a C++11 attribute-specifier.
+///
+/// [C++11] attribute-specifier:
+/// '[' '[' attribute-list ']' ']'
+/// alignment-specifier
+///
+/// [C++11] attribute-list:
+/// attribute[opt]
+/// attribute-list ',' attribute[opt]
+/// attribute '...'
+/// attribute-list ',' attribute '...'
+///
+/// [C++11] attribute:
+/// attribute-token attribute-argument-clause[opt]
+///
+/// [C++11] attribute-token:
+/// identifier
+/// attribute-scoped-token
+///
+/// [C++11] attribute-scoped-token:
+/// attribute-namespace '::' identifier
+///
+/// [C++11] attribute-namespace:
+/// identifier
+void Parser::ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
+ SourceLocation *endLoc) {
+ if (Tok.is(tok::kw_alignas)) {
+ Diag(Tok.getLocation(), diag::warn_cxx98_compat_alignas);
+ ParseAlignmentSpecifier(attrs, endLoc);
+ return;
+ }
+
+ assert(Tok.is(tok::l_square) && NextToken().is(tok::l_square)
+ && "Not a C++11 attribute list");
+
+ Diag(Tok.getLocation(), diag::warn_cxx98_compat_attribute);
+
+ ConsumeBracket();
+ ConsumeBracket();
+
+ llvm::SmallDenseMap<IdentifierInfo*, SourceLocation, 4> SeenAttrs;
+
+ while (Tok.isNot(tok::r_square)) {
+ // attribute not present
+ if (TryConsumeToken(tok::comma))
+ continue;
+
+ SourceLocation ScopeLoc, AttrLoc;
+ IdentifierInfo *ScopeName = nullptr, *AttrName = nullptr;
+
+ AttrName = TryParseCXX11AttributeIdentifier(AttrLoc);
+ if (!AttrName)
+ // Break out to the "expected ']'" diagnostic.
+ break;
+
+ // scoped attribute
+ if (TryConsumeToken(tok::coloncolon)) {
+ ScopeName = AttrName;
+ ScopeLoc = AttrLoc;
+
+ AttrName = TryParseCXX11AttributeIdentifier(AttrLoc);
+ if (!AttrName) {
+ Diag(Tok.getLocation(), diag::err_expected) << tok::identifier;
+ SkipUntil(tok::r_square, tok::comma, StopAtSemi | StopBeforeMatch);
+ continue;
+ }
+ }
+
+ bool StandardAttr = IsBuiltInOrStandardCXX11Attribute(AttrName, ScopeName);
+ bool AttrParsed = false;
+
+ if (StandardAttr &&
+ !SeenAttrs.insert(std::make_pair(AttrName, AttrLoc)).second)
+ Diag(AttrLoc, diag::err_cxx11_attribute_repeated)
+ << AttrName << SourceRange(SeenAttrs[AttrName]);
+
+ // Parse attribute arguments
+ if (Tok.is(tok::l_paren))
+ AttrParsed = ParseCXX11AttributeArgs(AttrName, AttrLoc, attrs, endLoc,
+ ScopeName, ScopeLoc);
+
+ if (!AttrParsed)
+ attrs.addNew(AttrName,
+ SourceRange(ScopeLoc.isValid() ? ScopeLoc : AttrLoc,
+ AttrLoc),
+ ScopeName, ScopeLoc, nullptr, 0, AttributeList::AS_CXX11);
+
+ if (TryConsumeToken(tok::ellipsis))
+ Diag(Tok, diag::err_cxx11_attribute_forbids_ellipsis)
+ << AttrName->getName();
+ }
+
+ if (ExpectAndConsume(tok::r_square))
+ SkipUntil(tok::r_square);
+ if (endLoc)
+ *endLoc = Tok.getLocation();
+ if (ExpectAndConsume(tok::r_square))
+ SkipUntil(tok::r_square);
+}
+
+/// ParseCXX11Attributes - Parse a C++11 attribute-specifier-seq.
+///
+/// attribute-specifier-seq:
+/// attribute-specifier-seq[opt] attribute-specifier
+void Parser::ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
+ SourceLocation *endLoc) {
+ assert(getLangOpts().CPlusPlus11);
+
+ SourceLocation StartLoc = Tok.getLocation(), Loc;
+ if (!endLoc)
+ endLoc = &Loc;
+
+ do {
+ ParseCXX11AttributeSpecifier(attrs, endLoc);
+ } while (isCXX11AttributeSpecifier());
+
+ attrs.Range = SourceRange(StartLoc, *endLoc);
+}
+
+void Parser::DiagnoseAndSkipCXX11Attributes() {
+ // Start and end location of an attribute or an attribute list.
+ SourceLocation StartLoc = Tok.getLocation();
+ SourceLocation EndLoc = SkipCXX11Attributes();
+
+ if (EndLoc.isValid()) {
+ SourceRange Range(StartLoc, EndLoc);
+ Diag(StartLoc, diag::err_attributes_not_allowed)
+ << Range;
+ }
+}
+
+SourceLocation Parser::SkipCXX11Attributes() {
+ SourceLocation EndLoc;
+
+ if (!isCXX11AttributeSpecifier())
+ return EndLoc;
+
+ do {
+ if (Tok.is(tok::l_square)) {
+ BalancedDelimiterTracker T(*this, tok::l_square);
+ T.consumeOpen();
+ T.skipToEnd();
+ EndLoc = T.getCloseLocation();
+ } else {
+ assert(Tok.is(tok::kw_alignas) && "not an attribute specifier");
+ ConsumeToken();
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (!T.consumeOpen())
+ T.skipToEnd();
+ EndLoc = T.getCloseLocation();
+ }
+ } while (isCXX11AttributeSpecifier());
+
+ return EndLoc;
+}
+
+/// ParseMicrosoftAttributes - Parse Microsoft attributes [Attr]
+///
+/// [MS] ms-attribute:
+/// '[' token-seq ']'
+///
+/// [MS] ms-attribute-seq:
+/// ms-attribute[opt]
+/// ms-attribute ms-attribute-seq
+void Parser::ParseMicrosoftAttributes(ParsedAttributes &attrs,
+ SourceLocation *endLoc) {
+ assert(Tok.is(tok::l_square) && "Not a Microsoft attribute list");
+
+ do {
+ // FIXME: If this is actually a C++11 attribute, parse it as one.
+ BalancedDelimiterTracker T(*this, tok::l_square);
+ T.consumeOpen();
+ SkipUntil(tok::r_square, StopAtSemi | StopBeforeMatch);
+ T.consumeClose();
+ if (endLoc)
+ *endLoc = T.getCloseLocation();
+ } while (Tok.is(tok::l_square));
+}
+
+void Parser::ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
+ AccessSpecifier& CurAS) {
+ IfExistsCondition Result;
+ if (ParseMicrosoftIfExistsCondition(Result))
+ return;
+
+ BalancedDelimiterTracker Braces(*this, tok::l_brace);
+ if (Braces.consumeOpen()) {
+ Diag(Tok, diag::err_expected) << tok::l_brace;
+ return;
+ }
+
+ switch (Result.Behavior) {
+ case IEB_Parse:
+ // Parse the declarations below.
+ break;
+
+ case IEB_Dependent:
+ Diag(Result.KeywordLoc, diag::warn_microsoft_dependent_exists)
+ << Result.IsIfExists;
+ // Fall through to skip.
+
+ case IEB_Skip:
+ Braces.skipToEnd();
+ return;
+ }
+
+ while (Tok.isNot(tok::r_brace) && !isEofOrEom()) {
+ // __if_exists, __if_not_exists can nest.
+ if (Tok.isOneOf(tok::kw___if_exists, tok::kw___if_not_exists)) {
+ ParseMicrosoftIfExistsClassDeclaration((DeclSpec::TST)TagType, CurAS);
+ continue;
+ }
+
+ // Check for extraneous top-level semicolon.
+ if (Tok.is(tok::semi)) {
+ ConsumeExtraSemi(InsideStruct, TagType);
+ continue;
+ }
+
+ AccessSpecifier AS = getAccessSpecifierIfPresent();
+ if (AS != AS_none) {
+ // Current token is a C++ access specifier.
+ CurAS = AS;
+ SourceLocation ASLoc = Tok.getLocation();
+ ConsumeToken();
+ if (Tok.is(tok::colon))
+ Actions.ActOnAccessSpecifier(AS, ASLoc, Tok.getLocation());
+ else
+ Diag(Tok, diag::err_expected) << tok::colon;
+ ConsumeToken();
+ continue;
+ }
+
+ // Parse all the comma separated declarators.
+ ParseCXXClassMemberDeclaration(CurAS, nullptr);
+ }
+
+ Braces.consumeClose();
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp
new file mode 100644
index 0000000..1fd98c1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp
@@ -0,0 +1,2838 @@
+//===--- ParseExpr.cpp - Expression Parsing -------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Provides the Expression parsing implementation.
+///
+/// Expressions in C99 basically consist of a bunch of binary operators with
+/// unary operators and other random stuff at the leaves.
+///
+/// In the C99 grammar, these unary operators bind tightest and are represented
+/// as the 'cast-expression' production. Everything else is either a binary
+/// operator (e.g. '/') or a ternary operator ("?:"). The unary leaves are
+/// handled by ParseCastExpression, the higher level pieces are handled by
+/// ParseBinaryExpression.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/Parser.h"
+#include "RAIIObjectsForParser.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/TypoCorrection.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+using namespace clang;
+
+/// \brief Simple precedence-based parser for binary/ternary operators.
+///
+/// Note: we diverge from the C99 grammar when parsing the assignment-expression
+/// production. C99 specifies that the LHS of an assignment operator should be
+/// parsed as a unary-expression, but consistency dictates that it be a
+/// conditional-expession. In practice, the important thing here is that the
+/// LHS of an assignment has to be an l-value, which productions between
+/// unary-expression and conditional-expression don't produce. Because we want
+/// consistency, we parse the LHS as a conditional-expression, then check for
+/// l-value-ness in semantic analysis stages.
+///
+/// \verbatim
+/// pm-expression: [C++ 5.5]
+/// cast-expression
+/// pm-expression '.*' cast-expression
+/// pm-expression '->*' cast-expression
+///
+/// multiplicative-expression: [C99 6.5.5]
+/// Note: in C++, apply pm-expression instead of cast-expression
+/// cast-expression
+/// multiplicative-expression '*' cast-expression
+/// multiplicative-expression '/' cast-expression
+/// multiplicative-expression '%' cast-expression
+///
+/// additive-expression: [C99 6.5.6]
+/// multiplicative-expression
+/// additive-expression '+' multiplicative-expression
+/// additive-expression '-' multiplicative-expression
+///
+/// shift-expression: [C99 6.5.7]
+/// additive-expression
+/// shift-expression '<<' additive-expression
+/// shift-expression '>>' additive-expression
+///
+/// relational-expression: [C99 6.5.8]
+/// shift-expression
+/// relational-expression '<' shift-expression
+/// relational-expression '>' shift-expression
+/// relational-expression '<=' shift-expression
+/// relational-expression '>=' shift-expression
+///
+/// equality-expression: [C99 6.5.9]
+/// relational-expression
+/// equality-expression '==' relational-expression
+/// equality-expression '!=' relational-expression
+///
+/// AND-expression: [C99 6.5.10]
+/// equality-expression
+/// AND-expression '&' equality-expression
+///
+/// exclusive-OR-expression: [C99 6.5.11]
+/// AND-expression
+/// exclusive-OR-expression '^' AND-expression
+///
+/// inclusive-OR-expression: [C99 6.5.12]
+/// exclusive-OR-expression
+/// inclusive-OR-expression '|' exclusive-OR-expression
+///
+/// logical-AND-expression: [C99 6.5.13]
+/// inclusive-OR-expression
+/// logical-AND-expression '&&' inclusive-OR-expression
+///
+/// logical-OR-expression: [C99 6.5.14]
+/// logical-AND-expression
+/// logical-OR-expression '||' logical-AND-expression
+///
+/// conditional-expression: [C99 6.5.15]
+/// logical-OR-expression
+/// logical-OR-expression '?' expression ':' conditional-expression
+/// [GNU] logical-OR-expression '?' ':' conditional-expression
+/// [C++] the third operand is an assignment-expression
+///
+/// assignment-expression: [C99 6.5.16]
+/// conditional-expression
+/// unary-expression assignment-operator assignment-expression
+/// [C++] throw-expression [C++ 15]
+///
+/// assignment-operator: one of
+/// = *= /= %= += -= <<= >>= &= ^= |=
+///
+/// expression: [C99 6.5.17]
+/// assignment-expression ...[opt]
+/// expression ',' assignment-expression ...[opt]
+/// \endverbatim
+ExprResult Parser::ParseExpression(TypeCastState isTypeCast) {
+ ExprResult LHS(ParseAssignmentExpression(isTypeCast));
+ return ParseRHSOfBinaryExpression(LHS, prec::Comma);
+}
+
+/// This routine is called when the '@' is seen and consumed.
+/// Current token is an Identifier and is not a 'try'. This
+/// routine is necessary to disambiguate \@try-statement from,
+/// for example, \@encode-expression.
+///
+ExprResult
+Parser::ParseExpressionWithLeadingAt(SourceLocation AtLoc) {
+ ExprResult LHS(ParseObjCAtExpression(AtLoc));
+ return ParseRHSOfBinaryExpression(LHS, prec::Comma);
+}
+
+/// This routine is called when a leading '__extension__' is seen and
+/// consumed. This is necessary because the token gets consumed in the
+/// process of disambiguating between an expression and a declaration.
+ExprResult
+Parser::ParseExpressionWithLeadingExtension(SourceLocation ExtLoc) {
+ ExprResult LHS(true);
+ {
+ // Silence extension warnings in the sub-expression
+ ExtensionRAIIObject O(Diags);
+
+ LHS = ParseCastExpression(false);
+ }
+
+ if (!LHS.isInvalid())
+ LHS = Actions.ActOnUnaryOp(getCurScope(), ExtLoc, tok::kw___extension__,
+ LHS.get());
+
+ return ParseRHSOfBinaryExpression(LHS, prec::Comma);
+}
+
+/// \brief Parse an expr that doesn't include (top-level) commas.
+ExprResult Parser::ParseAssignmentExpression(TypeCastState isTypeCast) {
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Expression);
+ cutOffParsing();
+ return ExprError();
+ }
+
+ if (Tok.is(tok::kw_throw))
+ return ParseThrowExpression();
+ if (Tok.is(tok::kw_co_yield))
+ return ParseCoyieldExpression();
+
+ ExprResult LHS = ParseCastExpression(/*isUnaryExpression=*/false,
+ /*isAddressOfOperand=*/false,
+ isTypeCast);
+ return ParseRHSOfBinaryExpression(LHS, prec::Assignment);
+}
+
+/// \brief Parse an assignment expression where part of an Objective-C message
+/// send has already been parsed.
+///
+/// In this case \p LBracLoc indicates the location of the '[' of the message
+/// send, and either \p ReceiverName or \p ReceiverExpr is non-null indicating
+/// the receiver of the message.
+///
+/// Since this handles full assignment-expression's, it handles postfix
+/// expressions and other binary operators for these expressions as well.
+ExprResult
+Parser::ParseAssignmentExprWithObjCMessageExprStart(SourceLocation LBracLoc,
+ SourceLocation SuperLoc,
+ ParsedType ReceiverType,
+ Expr *ReceiverExpr) {
+ ExprResult R
+ = ParseObjCMessageExpressionBody(LBracLoc, SuperLoc,
+ ReceiverType, ReceiverExpr);
+ R = ParsePostfixExpressionSuffix(R);
+ return ParseRHSOfBinaryExpression(R, prec::Assignment);
+}
+
+
+ExprResult Parser::ParseConstantExpression(TypeCastState isTypeCast) {
+ // C++03 [basic.def.odr]p2:
+ // An expression is potentially evaluated unless it appears where an
+ // integral constant expression is required (see 5.19) [...].
+ // C++98 and C++11 have no such rule, but this is only a defect in C++98.
+ EnterExpressionEvaluationContext Unevaluated(Actions,
+ Sema::ConstantEvaluated);
+
+ ExprResult LHS(ParseCastExpression(false, false, isTypeCast));
+ ExprResult Res(ParseRHSOfBinaryExpression(LHS, prec::Conditional));
+ return Actions.ActOnConstantExpression(Res);
+}
+
+/// \brief Parse a constraint-expression.
+///
+/// \verbatim
+/// constraint-expression: [Concepts TS temp.constr.decl p1]
+/// logical-or-expression
+/// \endverbatim
+ExprResult Parser::ParseConstraintExpression() {
+ // FIXME: this may erroneously consume a function-body as the braced
+ // initializer list of a compound literal
+ //
+ // FIXME: this may erroneously consume a parenthesized rvalue reference
+ // declarator as a parenthesized address-of-label expression
+ ExprResult LHS(ParseCastExpression(/*isUnaryExpression=*/false));
+ ExprResult Res(ParseRHSOfBinaryExpression(LHS, prec::LogicalOr));
+
+ return Res;
+}
+
+bool Parser::isNotExpressionStart() {
+ tok::TokenKind K = Tok.getKind();
+ if (K == tok::l_brace || K == tok::r_brace ||
+ K == tok::kw_for || K == tok::kw_while ||
+ K == tok::kw_if || K == tok::kw_else ||
+ K == tok::kw_goto || K == tok::kw_try)
+ return true;
+ // If this is a decl-specifier, we can't be at the start of an expression.
+ return isKnownToBeDeclarationSpecifier();
+}
+
+static bool isFoldOperator(prec::Level Level) {
+ return Level > prec::Unknown && Level != prec::Conditional;
+}
+static bool isFoldOperator(tok::TokenKind Kind) {
+ return isFoldOperator(getBinOpPrecedence(Kind, false, true));
+}
+
+/// \brief Parse a binary expression that starts with \p LHS and has a
+/// precedence of at least \p MinPrec.
+ExprResult
+Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
+ prec::Level NextTokPrec = getBinOpPrecedence(Tok.getKind(),
+ GreaterThanIsOperator,
+ getLangOpts().CPlusPlus11);
+ SourceLocation ColonLoc;
+
+ while (1) {
+ // If this token has a lower precedence than we are allowed to parse (e.g.
+ // because we are called recursively, or because the token is not a binop),
+ // then we are done!
+ if (NextTokPrec < MinPrec)
+ return LHS;
+
+ // Consume the operator, saving the operator token for error reporting.
+ Token OpToken = Tok;
+ ConsumeToken();
+
+ // Bail out when encountering a comma followed by a token which can't
+ // possibly be the start of an expression. For instance:
+ // int f() { return 1, }
+ // We can't do this before consuming the comma, because
+ // isNotExpressionStart() looks at the token stream.
+ if (OpToken.is(tok::comma) && isNotExpressionStart()) {
+ PP.EnterToken(Tok);
+ Tok = OpToken;
+ return LHS;
+ }
+
+ // If the next token is an ellipsis, then this is a fold-expression. Leave
+ // it alone so we can handle it in the paren expression.
+ if (isFoldOperator(NextTokPrec) && Tok.is(tok::ellipsis)) {
+ // FIXME: We can't check this via lookahead before we consume the token
+ // because that tickles a lexer bug.
+ PP.EnterToken(Tok);
+ Tok = OpToken;
+ return LHS;
+ }
+
+ // Special case handling for the ternary operator.
+ ExprResult TernaryMiddle(true);
+ if (NextTokPrec == prec::Conditional) {
+ if (Tok.isNot(tok::colon)) {
+ // Don't parse FOO:BAR as if it were a typo for FOO::BAR.
+ ColonProtectionRAIIObject X(*this);
+
+ // Handle this production specially:
+ // logical-OR-expression '?' expression ':' conditional-expression
+ // In particular, the RHS of the '?' is 'expression', not
+ // 'logical-OR-expression' as we might expect.
+ TernaryMiddle = ParseExpression();
+ if (TernaryMiddle.isInvalid()) {
+ Actions.CorrectDelayedTyposInExpr(LHS);
+ LHS = ExprError();
+ TernaryMiddle = nullptr;
+ }
+ } else {
+ // Special case handling of "X ? Y : Z" where Y is empty:
+ // logical-OR-expression '?' ':' conditional-expression [GNU]
+ TernaryMiddle = nullptr;
+ Diag(Tok, diag::ext_gnu_conditional_expr);
+ }
+
+ if (!TryConsumeToken(tok::colon, ColonLoc)) {
+ // Otherwise, we're missing a ':'. Assume that this was a typo that
+ // the user forgot. If we're not in a macro expansion, we can suggest
+ // a fixit hint. If there were two spaces before the current token,
+ // suggest inserting the colon in between them, otherwise insert ": ".
+ SourceLocation FILoc = Tok.getLocation();
+ const char *FIText = ": ";
+ const SourceManager &SM = PP.getSourceManager();
+ if (FILoc.isFileID() || PP.isAtStartOfMacroExpansion(FILoc, &FILoc)) {
+ assert(FILoc.isFileID());
+ bool IsInvalid = false;
+ const char *SourcePtr =
+ SM.getCharacterData(FILoc.getLocWithOffset(-1), &IsInvalid);
+ if (!IsInvalid && *SourcePtr == ' ') {
+ SourcePtr =
+ SM.getCharacterData(FILoc.getLocWithOffset(-2), &IsInvalid);
+ if (!IsInvalid && *SourcePtr == ' ') {
+ FILoc = FILoc.getLocWithOffset(-1);
+ FIText = ":";
+ }
+ }
+ }
+
+ Diag(Tok, diag::err_expected)
+ << tok::colon << FixItHint::CreateInsertion(FILoc, FIText);
+ Diag(OpToken, diag::note_matching) << tok::question;
+ ColonLoc = Tok.getLocation();
+ }
+ }
+
+ // Code completion for the right-hand side of an assignment expression
+ // goes through a special hook that takes the left-hand side into account.
+ if (Tok.is(tok::code_completion) && NextTokPrec == prec::Assignment) {
+ Actions.CodeCompleteAssignmentRHS(getCurScope(), LHS.get());
+ cutOffParsing();
+ return ExprError();
+ }
+
+ // Parse another leaf here for the RHS of the operator.
+ // ParseCastExpression works here because all RHS expressions in C have it
+ // as a prefix, at least. However, in C++, an assignment-expression could
+ // be a throw-expression, which is not a valid cast-expression.
+ // Therefore we need some special-casing here.
+ // Also note that the third operand of the conditional operator is
+ // an assignment-expression in C++, and in C++11, we can have a
+ // braced-init-list on the RHS of an assignment. For better diagnostics,
+ // parse as if we were allowed braced-init-lists everywhere, and check that
+ // they only appear on the RHS of assignments later.
+ ExprResult RHS;
+ bool RHSIsInitList = false;
+ if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace)) {
+ RHS = ParseBraceInitializer();
+ RHSIsInitList = true;
+ } else if (getLangOpts().CPlusPlus && NextTokPrec <= prec::Conditional)
+ RHS = ParseAssignmentExpression();
+ else
+ RHS = ParseCastExpression(false);
+
+ if (RHS.isInvalid()) {
+ // FIXME: Errors generated by the delayed typo correction should be
+ // printed before errors from parsing the RHS, not after.
+ Actions.CorrectDelayedTyposInExpr(LHS);
+ if (TernaryMiddle.isUsable())
+ TernaryMiddle = Actions.CorrectDelayedTyposInExpr(TernaryMiddle);
+ LHS = ExprError();
+ }
+
+ // Remember the precedence of this operator and get the precedence of the
+ // operator immediately to the right of the RHS.
+ prec::Level ThisPrec = NextTokPrec;
+ NextTokPrec = getBinOpPrecedence(Tok.getKind(), GreaterThanIsOperator,
+ getLangOpts().CPlusPlus11);
+
+ // Assignment and conditional expressions are right-associative.
+ bool isRightAssoc = ThisPrec == prec::Conditional ||
+ ThisPrec == prec::Assignment;
+
+ // Get the precedence of the operator to the right of the RHS. If it binds
+ // more tightly with RHS than we do, evaluate it completely first.
+ if (ThisPrec < NextTokPrec ||
+ (ThisPrec == NextTokPrec && isRightAssoc)) {
+ if (!RHS.isInvalid() && RHSIsInitList) {
+ Diag(Tok, diag::err_init_list_bin_op)
+ << /*LHS*/0 << PP.getSpelling(Tok) << Actions.getExprRange(RHS.get());
+ RHS = ExprError();
+ }
+ // If this is left-associative, only parse things on the RHS that bind
+ // more tightly than the current operator. If it is left-associative, it
+ // is okay, to bind exactly as tightly. For example, compile A=B=C=D as
+ // A=(B=(C=D)), where each paren is a level of recursion here.
+ // The function takes ownership of the RHS.
+ RHS = ParseRHSOfBinaryExpression(RHS,
+ static_cast<prec::Level>(ThisPrec + !isRightAssoc));
+ RHSIsInitList = false;
+
+ if (RHS.isInvalid()) {
+ // FIXME: Errors generated by the delayed typo correction should be
+ // printed before errors from ParseRHSOfBinaryExpression, not after.
+ Actions.CorrectDelayedTyposInExpr(LHS);
+ if (TernaryMiddle.isUsable())
+ TernaryMiddle = Actions.CorrectDelayedTyposInExpr(TernaryMiddle);
+ LHS = ExprError();
+ }
+
+ NextTokPrec = getBinOpPrecedence(Tok.getKind(), GreaterThanIsOperator,
+ getLangOpts().CPlusPlus11);
+ }
+
+ if (!RHS.isInvalid() && RHSIsInitList) {
+ if (ThisPrec == prec::Assignment) {
+ Diag(OpToken, diag::warn_cxx98_compat_generalized_initializer_lists)
+ << Actions.getExprRange(RHS.get());
+ } else {
+ Diag(OpToken, diag::err_init_list_bin_op)
+ << /*RHS*/1 << PP.getSpelling(OpToken)
+ << Actions.getExprRange(RHS.get());
+ LHS = ExprError();
+ }
+ }
+
+ if (!LHS.isInvalid()) {
+ // Combine the LHS and RHS into the LHS (e.g. build AST).
+ if (TernaryMiddle.isInvalid()) {
+ // If we're using '>>' as an operator within a template
+ // argument list (in C++98), suggest the addition of
+ // parentheses so that the code remains well-formed in C++0x.
+ if (!GreaterThanIsOperator && OpToken.is(tok::greatergreater))
+ SuggestParentheses(OpToken.getLocation(),
+ diag::warn_cxx11_right_shift_in_template_arg,
+ SourceRange(Actions.getExprRange(LHS.get()).getBegin(),
+ Actions.getExprRange(RHS.get()).getEnd()));
+
+ LHS = Actions.ActOnBinOp(getCurScope(), OpToken.getLocation(),
+ OpToken.getKind(), LHS.get(), RHS.get());
+ } else
+ LHS = Actions.ActOnConditionalOp(OpToken.getLocation(), ColonLoc,
+ LHS.get(), TernaryMiddle.get(),
+ RHS.get());
+ } else
+ // Ensure potential typos in the RHS aren't left undiagnosed.
+ Actions.CorrectDelayedTyposInExpr(RHS);
+ }
+}
+
+/// \brief Parse a cast-expression, or, if \p isUnaryExpression is true,
+/// parse a unary-expression.
+///
+/// \p isAddressOfOperand exists because an id-expression that is the
+/// operand of address-of gets special treatment due to member pointers.
+///
+ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
+ bool isAddressOfOperand,
+ TypeCastState isTypeCast) {
+ bool NotCastExpr;
+ ExprResult Res = ParseCastExpression(isUnaryExpression,
+ isAddressOfOperand,
+ NotCastExpr,
+ isTypeCast);
+ if (NotCastExpr)
+ Diag(Tok, diag::err_expected_expression);
+ return Res;
+}
+
+namespace {
+class CastExpressionIdValidator : public CorrectionCandidateCallback {
+ public:
+ CastExpressionIdValidator(Token Next, bool AllowTypes, bool AllowNonTypes)
+ : NextToken(Next), AllowNonTypes(AllowNonTypes) {
+ WantTypeSpecifiers = WantFunctionLikeCasts = AllowTypes;
+ }
+
+ bool ValidateCandidate(const TypoCorrection &candidate) override {
+ NamedDecl *ND = candidate.getCorrectionDecl();
+ if (!ND)
+ return candidate.isKeyword();
+
+ if (isa<TypeDecl>(ND))
+ return WantTypeSpecifiers;
+
+ if (!AllowNonTypes || !CorrectionCandidateCallback::ValidateCandidate(candidate))
+ return false;
+
+ if (!NextToken.isOneOf(tok::equal, tok::arrow, tok::period))
+ return true;
+
+ for (auto *C : candidate) {
+ NamedDecl *ND = C->getUnderlyingDecl();
+ if (isa<ValueDecl>(ND) && !isa<FunctionDecl>(ND))
+ return true;
+ }
+ return false;
+ }
+
+ private:
+ Token NextToken;
+ bool AllowNonTypes;
+};
+}
+
+/// \brief Parse a cast-expression, or, if \pisUnaryExpression is true, parse
+/// a unary-expression.
+///
+/// \p isAddressOfOperand exists because an id-expression that is the operand
+/// of address-of gets special treatment due to member pointers. NotCastExpr
+/// is set to true if the token is not the start of a cast-expression, and no
+/// diagnostic is emitted in this case.
+///
+/// \verbatim
+/// cast-expression: [C99 6.5.4]
+/// unary-expression
+/// '(' type-name ')' cast-expression
+///
+/// unary-expression: [C99 6.5.3]
+/// postfix-expression
+/// '++' unary-expression
+/// '--' unary-expression
+/// [Coro] 'co_await' cast-expression
+/// unary-operator cast-expression
+/// 'sizeof' unary-expression
+/// 'sizeof' '(' type-name ')'
+/// [C++11] 'sizeof' '...' '(' identifier ')'
+/// [GNU] '__alignof' unary-expression
+/// [GNU] '__alignof' '(' type-name ')'
+/// [C11] '_Alignof' '(' type-name ')'
+/// [C++11] 'alignof' '(' type-id ')'
+/// [GNU] '&&' identifier
+/// [C++11] 'noexcept' '(' expression ')' [C++11 5.3.7]
+/// [C++] new-expression
+/// [C++] delete-expression
+///
+/// unary-operator: one of
+/// '&' '*' '+' '-' '~' '!'
+/// [GNU] '__extension__' '__real' '__imag'
+///
+/// primary-expression: [C99 6.5.1]
+/// [C99] identifier
+/// [C++] id-expression
+/// constant
+/// string-literal
+/// [C++] boolean-literal [C++ 2.13.5]
+/// [C++11] 'nullptr' [C++11 2.14.7]
+/// [C++11] user-defined-literal
+/// '(' expression ')'
+/// [C11] generic-selection
+/// '__func__' [C99 6.4.2.2]
+/// [GNU] '__FUNCTION__'
+/// [MS] '__FUNCDNAME__'
+/// [MS] 'L__FUNCTION__'
+/// [GNU] '__PRETTY_FUNCTION__'
+/// [GNU] '(' compound-statement ')'
+/// [GNU] '__builtin_va_arg' '(' assignment-expression ',' type-name ')'
+/// [GNU] '__builtin_offsetof' '(' type-name ',' offsetof-member-designator')'
+/// [GNU] '__builtin_choose_expr' '(' assign-expr ',' assign-expr ','
+/// assign-expr ')'
+/// [GNU] '__builtin_types_compatible_p' '(' type-name ',' type-name ')'
+/// [GNU] '__null'
+/// [OBJC] '[' objc-message-expr ']'
+/// [OBJC] '\@selector' '(' objc-selector-arg ')'
+/// [OBJC] '\@protocol' '(' identifier ')'
+/// [OBJC] '\@encode' '(' type-name ')'
+/// [OBJC] objc-string-literal
+/// [C++] simple-type-specifier '(' expression-list[opt] ')' [C++ 5.2.3]
+/// [C++11] simple-type-specifier braced-init-list [C++11 5.2.3]
+/// [C++] typename-specifier '(' expression-list[opt] ')' [C++ 5.2.3]
+/// [C++11] typename-specifier braced-init-list [C++11 5.2.3]
+/// [C++] 'const_cast' '<' type-name '>' '(' expression ')' [C++ 5.2p1]
+/// [C++] 'dynamic_cast' '<' type-name '>' '(' expression ')' [C++ 5.2p1]
+/// [C++] 'reinterpret_cast' '<' type-name '>' '(' expression ')' [C++ 5.2p1]
+/// [C++] 'static_cast' '<' type-name '>' '(' expression ')' [C++ 5.2p1]
+/// [C++] 'typeid' '(' expression ')' [C++ 5.2p1]
+/// [C++] 'typeid' '(' type-id ')' [C++ 5.2p1]
+/// [C++] 'this' [C++ 9.3.2]
+/// [G++] unary-type-trait '(' type-id ')'
+/// [G++] binary-type-trait '(' type-id ',' type-id ')' [TODO]
+/// [EMBT] array-type-trait '(' type-id ',' integer ')'
+/// [clang] '^' block-literal
+///
+/// constant: [C99 6.4.4]
+/// integer-constant
+/// floating-constant
+/// enumeration-constant -> identifier
+/// character-constant
+///
+/// id-expression: [C++ 5.1]
+/// unqualified-id
+/// qualified-id
+///
+/// unqualified-id: [C++ 5.1]
+/// identifier
+/// operator-function-id
+/// conversion-function-id
+/// '~' class-name
+/// template-id
+///
+/// new-expression: [C++ 5.3.4]
+/// '::'[opt] 'new' new-placement[opt] new-type-id
+/// new-initializer[opt]
+/// '::'[opt] 'new' new-placement[opt] '(' type-id ')'
+/// new-initializer[opt]
+///
+/// delete-expression: [C++ 5.3.5]
+/// '::'[opt] 'delete' cast-expression
+/// '::'[opt] 'delete' '[' ']' cast-expression
+///
+/// [GNU/Embarcadero] unary-type-trait:
+/// '__is_arithmetic'
+/// '__is_floating_point'
+/// '__is_integral'
+/// '__is_lvalue_expr'
+/// '__is_rvalue_expr'
+/// '__is_complete_type'
+/// '__is_void'
+/// '__is_array'
+/// '__is_function'
+/// '__is_reference'
+/// '__is_lvalue_reference'
+/// '__is_rvalue_reference'
+/// '__is_fundamental'
+/// '__is_object'
+/// '__is_scalar'
+/// '__is_compound'
+/// '__is_pointer'
+/// '__is_member_object_pointer'
+/// '__is_member_function_pointer'
+/// '__is_member_pointer'
+/// '__is_const'
+/// '__is_volatile'
+/// '__is_trivial'
+/// '__is_standard_layout'
+/// '__is_signed'
+/// '__is_unsigned'
+///
+/// [GNU] unary-type-trait:
+/// '__has_nothrow_assign'
+/// '__has_nothrow_copy'
+/// '__has_nothrow_constructor'
+/// '__has_trivial_assign' [TODO]
+/// '__has_trivial_copy' [TODO]
+/// '__has_trivial_constructor'
+/// '__has_trivial_destructor'
+/// '__has_virtual_destructor'
+/// '__is_abstract' [TODO]
+/// '__is_class'
+/// '__is_empty' [TODO]
+/// '__is_enum'
+/// '__is_final'
+/// '__is_pod'
+/// '__is_polymorphic'
+/// '__is_sealed' [MS]
+/// '__is_trivial'
+/// '__is_union'
+///
+/// [Clang] unary-type-trait:
+/// '__trivially_copyable'
+///
+/// binary-type-trait:
+/// [GNU] '__is_base_of'
+/// [MS] '__is_convertible_to'
+/// '__is_convertible'
+/// '__is_same'
+///
+/// [Embarcadero] array-type-trait:
+/// '__array_rank'
+/// '__array_extent'
+///
+/// [Embarcadero] expression-trait:
+/// '__is_lvalue_expr'
+/// '__is_rvalue_expr'
+/// \endverbatim
+///
+ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
+ bool isAddressOfOperand,
+ bool &NotCastExpr,
+ TypeCastState isTypeCast) {
+ ExprResult Res;
+ tok::TokenKind SavedKind = Tok.getKind();
+ NotCastExpr = false;
+
+ // This handles all of cast-expression, unary-expression, postfix-expression,
+ // and primary-expression. We handle them together like this for efficiency
+ // and to simplify handling of an expression starting with a '(' token: which
+ // may be one of a parenthesized expression, cast-expression, compound literal
+ // expression, or statement expression.
+ //
+ // If the parsed tokens consist of a primary-expression, the cases below
+ // break out of the switch; at the end we call ParsePostfixExpressionSuffix
+ // to handle the postfix expression suffixes. Cases that cannot be followed
+ // by postfix exprs should return without invoking
+ // ParsePostfixExpressionSuffix.
+ switch (SavedKind) {
+ case tok::l_paren: {
+ // If this expression is limited to being a unary-expression, the parent can
+ // not start a cast expression.
+ ParenParseOption ParenExprType =
+ (isUnaryExpression && !getLangOpts().CPlusPlus) ? CompoundLiteral
+ : CastExpr;
+ ParsedType CastTy;
+ SourceLocation RParenLoc;
+ Res = ParseParenExpression(ParenExprType, false/*stopIfCastExr*/,
+ isTypeCast == IsTypeCast, CastTy, RParenLoc);
+
+ switch (ParenExprType) {
+ case SimpleExpr: break; // Nothing else to do.
+ case CompoundStmt: break; // Nothing else to do.
+ case CompoundLiteral:
+ // We parsed '(' type-name ')' '{' ... '}'. If any suffixes of
+ // postfix-expression exist, parse them now.
+ break;
+ case CastExpr:
+ // We have parsed the cast-expression and no postfix-expr pieces are
+ // following.
+ return Res;
+ }
+
+ break;
+ }
+
+ // primary-expression
+ case tok::numeric_constant:
+ // constant: integer-constant
+ // constant: floating-constant
+
+ Res = Actions.ActOnNumericConstant(Tok, /*UDLScope*/getCurScope());
+ ConsumeToken();
+ break;
+
+ case tok::kw_true:
+ case tok::kw_false:
+ return ParseCXXBoolLiteral();
+
+ case tok::kw___objc_yes:
+ case tok::kw___objc_no:
+ return ParseObjCBoolLiteral();
+
+ case tok::kw_nullptr:
+ Diag(Tok, diag::warn_cxx98_compat_nullptr);
+ return Actions.ActOnCXXNullPtrLiteral(ConsumeToken());
+
+ case tok::annot_primary_expr:
+ assert(Res.get() == nullptr && "Stray primary-expression annotation?");
+ Res = getExprAnnotation(Tok);
+ ConsumeToken();
+ break;
+
+ case tok::kw___super:
+ case tok::kw_decltype:
+ // Annotate the token and tail recurse.
+ if (TryAnnotateTypeOrScopeToken())
+ return ExprError();
+ assert(Tok.isNot(tok::kw_decltype) && Tok.isNot(tok::kw___super));
+ return ParseCastExpression(isUnaryExpression, isAddressOfOperand);
+
+ case tok::identifier: { // primary-expression: identifier
+ // unqualified-id: identifier
+ // constant: enumeration-constant
+ // Turn a potentially qualified name into a annot_typename or
+ // annot_cxxscope if it would be valid. This handles things like x::y, etc.
+ if (getLangOpts().CPlusPlus) {
+ // Avoid the unnecessary parse-time lookup in the common case
+ // where the syntax forbids a type.
+ const Token &Next = NextToken();
+
+ // If this identifier was reverted from a token ID, and the next token
+ // is a parenthesis, this is likely to be a use of a type trait. Check
+ // those tokens.
+ if (Next.is(tok::l_paren) &&
+ Tok.is(tok::identifier) &&
+ Tok.getIdentifierInfo()->hasRevertedTokenIDToIdentifier()) {
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ // Build up the mapping of revertible type traits, for future use.
+ if (RevertibleTypeTraits.empty()) {
+#define RTT_JOIN(X,Y) X##Y
+#define REVERTIBLE_TYPE_TRAIT(Name) \
+ RevertibleTypeTraits[PP.getIdentifierInfo(#Name)] \
+ = RTT_JOIN(tok::kw_,Name)
+
+ REVERTIBLE_TYPE_TRAIT(__is_abstract);
+ REVERTIBLE_TYPE_TRAIT(__is_arithmetic);
+ REVERTIBLE_TYPE_TRAIT(__is_array);
+ REVERTIBLE_TYPE_TRAIT(__is_base_of);
+ REVERTIBLE_TYPE_TRAIT(__is_class);
+ REVERTIBLE_TYPE_TRAIT(__is_complete_type);
+ REVERTIBLE_TYPE_TRAIT(__is_compound);
+ REVERTIBLE_TYPE_TRAIT(__is_const);
+ REVERTIBLE_TYPE_TRAIT(__is_constructible);
+ REVERTIBLE_TYPE_TRAIT(__is_convertible);
+ REVERTIBLE_TYPE_TRAIT(__is_convertible_to);
+ REVERTIBLE_TYPE_TRAIT(__is_destructible);
+ REVERTIBLE_TYPE_TRAIT(__is_empty);
+ REVERTIBLE_TYPE_TRAIT(__is_enum);
+ REVERTIBLE_TYPE_TRAIT(__is_floating_point);
+ REVERTIBLE_TYPE_TRAIT(__is_final);
+ REVERTIBLE_TYPE_TRAIT(__is_function);
+ REVERTIBLE_TYPE_TRAIT(__is_fundamental);
+ REVERTIBLE_TYPE_TRAIT(__is_integral);
+ REVERTIBLE_TYPE_TRAIT(__is_interface_class);
+ REVERTIBLE_TYPE_TRAIT(__is_literal);
+ REVERTIBLE_TYPE_TRAIT(__is_lvalue_expr);
+ REVERTIBLE_TYPE_TRAIT(__is_lvalue_reference);
+ REVERTIBLE_TYPE_TRAIT(__is_member_function_pointer);
+ REVERTIBLE_TYPE_TRAIT(__is_member_object_pointer);
+ REVERTIBLE_TYPE_TRAIT(__is_member_pointer);
+ REVERTIBLE_TYPE_TRAIT(__is_nothrow_assignable);
+ REVERTIBLE_TYPE_TRAIT(__is_nothrow_constructible);
+ REVERTIBLE_TYPE_TRAIT(__is_nothrow_destructible);
+ REVERTIBLE_TYPE_TRAIT(__is_object);
+ REVERTIBLE_TYPE_TRAIT(__is_pod);
+ REVERTIBLE_TYPE_TRAIT(__is_pointer);
+ REVERTIBLE_TYPE_TRAIT(__is_polymorphic);
+ REVERTIBLE_TYPE_TRAIT(__is_reference);
+ REVERTIBLE_TYPE_TRAIT(__is_rvalue_expr);
+ REVERTIBLE_TYPE_TRAIT(__is_rvalue_reference);
+ REVERTIBLE_TYPE_TRAIT(__is_same);
+ REVERTIBLE_TYPE_TRAIT(__is_scalar);
+ REVERTIBLE_TYPE_TRAIT(__is_sealed);
+ REVERTIBLE_TYPE_TRAIT(__is_signed);
+ REVERTIBLE_TYPE_TRAIT(__is_standard_layout);
+ REVERTIBLE_TYPE_TRAIT(__is_trivial);
+ REVERTIBLE_TYPE_TRAIT(__is_trivially_assignable);
+ REVERTIBLE_TYPE_TRAIT(__is_trivially_constructible);
+ REVERTIBLE_TYPE_TRAIT(__is_trivially_copyable);
+ REVERTIBLE_TYPE_TRAIT(__is_union);
+ REVERTIBLE_TYPE_TRAIT(__is_unsigned);
+ REVERTIBLE_TYPE_TRAIT(__is_void);
+ REVERTIBLE_TYPE_TRAIT(__is_volatile);
+#undef REVERTIBLE_TYPE_TRAIT
+#undef RTT_JOIN
+ }
+
+ // If we find that this is in fact the name of a type trait,
+ // update the token kind in place and parse again to treat it as
+ // the appropriate kind of type trait.
+ llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind>::iterator Known
+ = RevertibleTypeTraits.find(II);
+ if (Known != RevertibleTypeTraits.end()) {
+ Tok.setKind(Known->second);
+ return ParseCastExpression(isUnaryExpression, isAddressOfOperand,
+ NotCastExpr, isTypeCast);
+ }
+ }
+
+ if ((!ColonIsSacred && Next.is(tok::colon)) ||
+ Next.isOneOf(tok::coloncolon, tok::less, tok::l_paren,
+ tok::l_brace)) {
+ // If TryAnnotateTypeOrScopeToken annotates the token, tail recurse.
+ if (TryAnnotateTypeOrScopeToken())
+ return ExprError();
+ if (!Tok.is(tok::identifier))
+ return ParseCastExpression(isUnaryExpression, isAddressOfOperand);
+ }
+ }
+
+ // Consume the identifier so that we can see if it is followed by a '(' or
+ // '.'.
+ IdentifierInfo &II = *Tok.getIdentifierInfo();
+ SourceLocation ILoc = ConsumeToken();
+
+ // Support 'Class.property' and 'super.property' notation.
+ if (getLangOpts().ObjC1 && Tok.is(tok::period) &&
+ (Actions.getTypeName(II, ILoc, getCurScope()) ||
+ // Allow the base to be 'super' if in an objc-method.
+ (&II == Ident_super && getCurScope()->isInObjcMethodScope()))) {
+ ConsumeToken();
+
+ // Allow either an identifier or the keyword 'class' (in C++).
+ if (Tok.isNot(tok::identifier) &&
+ !(getLangOpts().CPlusPlus && Tok.is(tok::kw_class))) {
+ Diag(Tok, diag::err_expected_property_name);
+ return ExprError();
+ }
+ IdentifierInfo &PropertyName = *Tok.getIdentifierInfo();
+ SourceLocation PropertyLoc = ConsumeToken();
+
+ Res = Actions.ActOnClassPropertyRefExpr(II, PropertyName,
+ ILoc, PropertyLoc);
+ break;
+ }
+
+ // In an Objective-C method, if we have "super" followed by an identifier,
+ // the token sequence is ill-formed. However, if there's a ':' or ']' after
+ // that identifier, this is probably a message send with a missing open
+ // bracket. Treat it as such.
+ if (getLangOpts().ObjC1 && &II == Ident_super && !InMessageExpression &&
+ getCurScope()->isInObjcMethodScope() &&
+ ((Tok.is(tok::identifier) &&
+ (NextToken().is(tok::colon) || NextToken().is(tok::r_square))) ||
+ Tok.is(tok::code_completion))) {
+ Res = ParseObjCMessageExpressionBody(SourceLocation(), ILoc, ParsedType(),
+ nullptr);
+ break;
+ }
+
+ // If we have an Objective-C class name followed by an identifier
+ // and either ':' or ']', this is an Objective-C class message
+ // send that's missing the opening '['. Recovery
+ // appropriately. Also take this path if we're performing code
+ // completion after an Objective-C class name.
+ if (getLangOpts().ObjC1 &&
+ ((Tok.is(tok::identifier) && !InMessageExpression) ||
+ Tok.is(tok::code_completion))) {
+ const Token& Next = NextToken();
+ if (Tok.is(tok::code_completion) ||
+ Next.is(tok::colon) || Next.is(tok::r_square))
+ if (ParsedType Typ = Actions.getTypeName(II, ILoc, getCurScope()))
+ if (Typ.get()->isObjCObjectOrInterfaceType()) {
+ // Fake up a Declarator to use with ActOnTypeName.
+ DeclSpec DS(AttrFactory);
+ DS.SetRangeStart(ILoc);
+ DS.SetRangeEnd(ILoc);
+ const char *PrevSpec = nullptr;
+ unsigned DiagID;
+ DS.SetTypeSpecType(TST_typename, ILoc, PrevSpec, DiagID, Typ,
+ Actions.getASTContext().getPrintingPolicy());
+
+ Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ TypeResult Ty = Actions.ActOnTypeName(getCurScope(),
+ DeclaratorInfo);
+ if (Ty.isInvalid())
+ break;
+
+ Res = ParseObjCMessageExpressionBody(SourceLocation(),
+ SourceLocation(),
+ Ty.get(), nullptr);
+ break;
+ }
+ }
+
+ // Make sure to pass down the right value for isAddressOfOperand.
+ if (isAddressOfOperand && isPostfixExpressionSuffixStart())
+ isAddressOfOperand = false;
+
+ // Function designators are allowed to be undeclared (C99 6.5.1p2), so we
+ // need to know whether or not this identifier is a function designator or
+ // not.
+ UnqualifiedId Name;
+ CXXScopeSpec ScopeSpec;
+ SourceLocation TemplateKWLoc;
+ Token Replacement;
+ auto Validator = llvm::make_unique<CastExpressionIdValidator>(
+ Tok, isTypeCast != NotTypeCast, isTypeCast != IsTypeCast);
+ Validator->IsAddressOfOperand = isAddressOfOperand;
+ if (Tok.isOneOf(tok::periodstar, tok::arrowstar)) {
+ Validator->WantExpressionKeywords = false;
+ Validator->WantRemainingKeywords = false;
+ } else {
+ Validator->WantRemainingKeywords = Tok.isNot(tok::r_paren);
+ }
+ Name.setIdentifier(&II, ILoc);
+ Res = Actions.ActOnIdExpression(
+ getCurScope(), ScopeSpec, TemplateKWLoc, Name, Tok.is(tok::l_paren),
+ isAddressOfOperand, std::move(Validator),
+ /*IsInlineAsmIdentifier=*/false,
+ Tok.is(tok::r_paren) ? nullptr : &Replacement);
+ if (!Res.isInvalid() && !Res.get()) {
+ UnconsumeToken(Replacement);
+ return ParseCastExpression(isUnaryExpression, isAddressOfOperand,
+ NotCastExpr, isTypeCast);
+ }
+ break;
+ }
+ case tok::char_constant: // constant: character-constant
+ case tok::wide_char_constant:
+ case tok::utf8_char_constant:
+ case tok::utf16_char_constant:
+ case tok::utf32_char_constant:
+ Res = Actions.ActOnCharacterConstant(Tok, /*UDLScope*/getCurScope());
+ ConsumeToken();
+ break;
+ case tok::kw___func__: // primary-expression: __func__ [C99 6.4.2.2]
+ case tok::kw___FUNCTION__: // primary-expression: __FUNCTION__ [GNU]
+ case tok::kw___FUNCDNAME__: // primary-expression: __FUNCDNAME__ [MS]
+ case tok::kw___FUNCSIG__: // primary-expression: __FUNCSIG__ [MS]
+ case tok::kw_L__FUNCTION__: // primary-expression: L__FUNCTION__ [MS]
+ case tok::kw___PRETTY_FUNCTION__: // primary-expression: __P..Y_F..N__ [GNU]
+ Res = Actions.ActOnPredefinedExpr(Tok.getLocation(), SavedKind);
+ ConsumeToken();
+ break;
+ case tok::string_literal: // primary-expression: string-literal
+ case tok::wide_string_literal:
+ case tok::utf8_string_literal:
+ case tok::utf16_string_literal:
+ case tok::utf32_string_literal:
+ Res = ParseStringLiteralExpression(true);
+ break;
+ case tok::kw__Generic: // primary-expression: generic-selection [C11 6.5.1]
+ Res = ParseGenericSelectionExpression();
+ break;
+ case tok::kw___builtin_va_arg:
+ case tok::kw___builtin_offsetof:
+ case tok::kw___builtin_choose_expr:
+ case tok::kw___builtin_astype: // primary-expression: [OCL] as_type()
+ case tok::kw___builtin_convertvector:
+ return ParseBuiltinPrimaryExpression();
+ case tok::kw___null:
+ return Actions.ActOnGNUNullExpr(ConsumeToken());
+
+ case tok::plusplus: // unary-expression: '++' unary-expression [C99]
+ case tok::minusminus: { // unary-expression: '--' unary-expression [C99]
+ // C++ [expr.unary] has:
+ // unary-expression:
+ // ++ cast-expression
+ // -- cast-expression
+ SourceLocation SavedLoc = ConsumeToken();
+ // One special case is implicitly handled here: if the preceding tokens are
+ // an ambiguous cast expression, such as "(T())++", then we recurse to
+ // determine whether the '++' is prefix or postfix.
+ Res = ParseCastExpression(!getLangOpts().CPlusPlus,
+ /*isAddressOfOperand*/false, NotCastExpr,
+ NotTypeCast);
+ if (!Res.isInvalid())
+ Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, Res.get());
+ return Res;
+ }
+ case tok::amp: { // unary-expression: '&' cast-expression
+ // Special treatment because of member pointers
+ SourceLocation SavedLoc = ConsumeToken();
+ Res = ParseCastExpression(false, true);
+ if (!Res.isInvalid())
+ Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, Res.get());
+ return Res;
+ }
+
+ case tok::star: // unary-expression: '*' cast-expression
+ case tok::plus: // unary-expression: '+' cast-expression
+ case tok::minus: // unary-expression: '-' cast-expression
+ case tok::tilde: // unary-expression: '~' cast-expression
+ case tok::exclaim: // unary-expression: '!' cast-expression
+ case tok::kw___real: // unary-expression: '__real' cast-expression [GNU]
+ case tok::kw___imag: { // unary-expression: '__imag' cast-expression [GNU]
+ SourceLocation SavedLoc = ConsumeToken();
+ Res = ParseCastExpression(false);
+ if (!Res.isInvalid())
+ Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, Res.get());
+ return Res;
+ }
+
+ case tok::kw_co_await: { // unary-expression: 'co_await' cast-expression
+ SourceLocation CoawaitLoc = ConsumeToken();
+ Res = ParseCastExpression(false);
+ if (!Res.isInvalid())
+ Res = Actions.ActOnCoawaitExpr(getCurScope(), CoawaitLoc, Res.get());
+ return Res;
+ }
+
+ case tok::kw___extension__:{//unary-expression:'__extension__' cast-expr [GNU]
+ // __extension__ silences extension warnings in the subexpression.
+ ExtensionRAIIObject O(Diags); // Use RAII to do this.
+ SourceLocation SavedLoc = ConsumeToken();
+ Res = ParseCastExpression(false);
+ if (!Res.isInvalid())
+ Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, Res.get());
+ return Res;
+ }
+ case tok::kw__Alignof: // unary-expression: '_Alignof' '(' type-name ')'
+ if (!getLangOpts().C11)
+ Diag(Tok, diag::ext_c11_alignment) << Tok.getName();
+ // fallthrough
+ case tok::kw_alignof: // unary-expression: 'alignof' '(' type-id ')'
+ case tok::kw___alignof: // unary-expression: '__alignof' unary-expression
+ // unary-expression: '__alignof' '(' type-name ')'
+ case tok::kw_sizeof: // unary-expression: 'sizeof' unary-expression
+ // unary-expression: 'sizeof' '(' type-name ')'
+ case tok::kw_vec_step: // unary-expression: OpenCL 'vec_step' expression
+ // unary-expression: '__builtin_omp_required_simd_align' '(' type-name ')'
+ case tok::kw___builtin_omp_required_simd_align:
+ return ParseUnaryExprOrTypeTraitExpression();
+ case tok::ampamp: { // unary-expression: '&&' identifier
+ SourceLocation AmpAmpLoc = ConsumeToken();
+ if (Tok.isNot(tok::identifier))
+ return ExprError(Diag(Tok, diag::err_expected) << tok::identifier);
+
+ if (getCurScope()->getFnParent() == nullptr)
+ return ExprError(Diag(Tok, diag::err_address_of_label_outside_fn));
+
+ Diag(AmpAmpLoc, diag::ext_gnu_address_of_label);
+ LabelDecl *LD = Actions.LookupOrCreateLabel(Tok.getIdentifierInfo(),
+ Tok.getLocation());
+ Res = Actions.ActOnAddrLabel(AmpAmpLoc, Tok.getLocation(), LD);
+ ConsumeToken();
+ return Res;
+ }
+ case tok::kw_const_cast:
+ case tok::kw_dynamic_cast:
+ case tok::kw_reinterpret_cast:
+ case tok::kw_static_cast:
+ Res = ParseCXXCasts();
+ break;
+ case tok::kw_typeid:
+ Res = ParseCXXTypeid();
+ break;
+ case tok::kw___uuidof:
+ Res = ParseCXXUuidof();
+ break;
+ case tok::kw_this:
+ Res = ParseCXXThis();
+ break;
+
+ case tok::annot_typename:
+ if (isStartOfObjCClassMessageMissingOpenBracket()) {
+ ParsedType Type = getTypeAnnotation(Tok);
+
+ // Fake up a Declarator to use with ActOnTypeName.
+ DeclSpec DS(AttrFactory);
+ DS.SetRangeStart(Tok.getLocation());
+ DS.SetRangeEnd(Tok.getLastLoc());
+
+ const char *PrevSpec = nullptr;
+ unsigned DiagID;
+ DS.SetTypeSpecType(TST_typename, Tok.getAnnotationEndLoc(),
+ PrevSpec, DiagID, Type,
+ Actions.getASTContext().getPrintingPolicy());
+
+ Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ TypeResult Ty = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+ if (Ty.isInvalid())
+ break;
+
+ ConsumeToken();
+ Res = ParseObjCMessageExpressionBody(SourceLocation(), SourceLocation(),
+ Ty.get(), nullptr);
+ break;
+ }
+ // Fall through
+
+ case tok::annot_decltype:
+ case tok::kw_char:
+ case tok::kw_wchar_t:
+ case tok::kw_char16_t:
+ case tok::kw_char32_t:
+ case tok::kw_bool:
+ case tok::kw_short:
+ case tok::kw_int:
+ case tok::kw_long:
+ case tok::kw___int64:
+ case tok::kw___int128:
+ case tok::kw_signed:
+ case tok::kw_unsigned:
+ case tok::kw_half:
+ case tok::kw_float:
+ case tok::kw_double:
+ case tok::kw_void:
+ case tok::kw_typename:
+ case tok::kw_typeof:
+ case tok::kw___vector: {
+ if (!getLangOpts().CPlusPlus) {
+ Diag(Tok, diag::err_expected_expression);
+ return ExprError();
+ }
+
+ if (SavedKind == tok::kw_typename) {
+ // postfix-expression: typename-specifier '(' expression-list[opt] ')'
+ // typename-specifier braced-init-list
+ if (TryAnnotateTypeOrScopeToken())
+ return ExprError();
+
+ if (!Actions.isSimpleTypeSpecifier(Tok.getKind()))
+ // We are trying to parse a simple-type-specifier but might not get such
+ // a token after error recovery.
+ return ExprError();
+ }
+
+ // postfix-expression: simple-type-specifier '(' expression-list[opt] ')'
+ // simple-type-specifier braced-init-list
+ //
+ DeclSpec DS(AttrFactory);
+
+ ParseCXXSimpleTypeSpecifier(DS);
+ if (Tok.isNot(tok::l_paren) &&
+ (!getLangOpts().CPlusPlus11 || Tok.isNot(tok::l_brace)))
+ return ExprError(Diag(Tok, diag::err_expected_lparen_after_type)
+ << DS.getSourceRange());
+
+ if (Tok.is(tok::l_brace))
+ Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
+
+ Res = ParseCXXTypeConstructExpression(DS);
+ break;
+ }
+
+ case tok::annot_cxxscope: { // [C++] id-expression: qualified-id
+ // If TryAnnotateTypeOrScopeToken annotates the token, tail recurse.
+ // (We can end up in this situation after tentative parsing.)
+ if (TryAnnotateTypeOrScopeToken())
+ return ExprError();
+ if (!Tok.is(tok::annot_cxxscope))
+ return ParseCastExpression(isUnaryExpression, isAddressOfOperand,
+ NotCastExpr, isTypeCast);
+
+ Token Next = NextToken();
+ if (Next.is(tok::annot_template_id)) {
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Next);
+ if (TemplateId->Kind == TNK_Type_template) {
+ // We have a qualified template-id that we know refers to a
+ // type, translate it into a type and continue parsing as a
+ // cast expression.
+ CXXScopeSpec SS;
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(),
+ /*EnteringContext=*/false);
+ AnnotateTemplateIdTokenAsType();
+ return ParseCastExpression(isUnaryExpression, isAddressOfOperand,
+ NotCastExpr, isTypeCast);
+ }
+ }
+
+ // Parse as an id-expression.
+ Res = ParseCXXIdExpression(isAddressOfOperand);
+ break;
+ }
+
+ case tok::annot_template_id: { // [C++] template-id
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+ if (TemplateId->Kind == TNK_Type_template) {
+ // We have a template-id that we know refers to a type,
+ // translate it into a type and continue parsing as a cast
+ // expression.
+ AnnotateTemplateIdTokenAsType();
+ return ParseCastExpression(isUnaryExpression, isAddressOfOperand,
+ NotCastExpr, isTypeCast);
+ }
+
+ // Fall through to treat the template-id as an id-expression.
+ }
+
+ case tok::kw_operator: // [C++] id-expression: operator/conversion-function-id
+ Res = ParseCXXIdExpression(isAddressOfOperand);
+ break;
+
+ case tok::coloncolon: {
+ // ::foo::bar -> global qualified name etc. If TryAnnotateTypeOrScopeToken
+ // annotates the token, tail recurse.
+ if (TryAnnotateTypeOrScopeToken())
+ return ExprError();
+ if (!Tok.is(tok::coloncolon))
+ return ParseCastExpression(isUnaryExpression, isAddressOfOperand);
+
+ // ::new -> [C++] new-expression
+ // ::delete -> [C++] delete-expression
+ SourceLocation CCLoc = ConsumeToken();
+ if (Tok.is(tok::kw_new))
+ return ParseCXXNewExpression(true, CCLoc);
+ if (Tok.is(tok::kw_delete))
+ return ParseCXXDeleteExpression(true, CCLoc);
+
+ // This is not a type name or scope specifier, it is an invalid expression.
+ Diag(CCLoc, diag::err_expected_expression);
+ return ExprError();
+ }
+
+ case tok::kw_new: // [C++] new-expression
+ return ParseCXXNewExpression(false, Tok.getLocation());
+
+ case tok::kw_delete: // [C++] delete-expression
+ return ParseCXXDeleteExpression(false, Tok.getLocation());
+
+ case tok::kw_noexcept: { // [C++0x] 'noexcept' '(' expression ')'
+ Diag(Tok, diag::warn_cxx98_compat_noexcept_expr);
+ SourceLocation KeyLoc = ConsumeToken();
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+
+ if (T.expectAndConsume(diag::err_expected_lparen_after, "noexcept"))
+ return ExprError();
+ // C++11 [expr.unary.noexcept]p1:
+ // The noexcept operator determines whether the evaluation of its operand,
+ // which is an unevaluated operand, can throw an exception.
+ EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated);
+ ExprResult Result = ParseExpression();
+
+ T.consumeClose();
+
+ if (!Result.isInvalid())
+ Result = Actions.ActOnNoexceptExpr(KeyLoc, T.getOpenLocation(),
+ Result.get(), T.getCloseLocation());
+ return Result;
+ }
+
+#define TYPE_TRAIT(N,Spelling,K) \
+ case tok::kw_##Spelling:
+#include "clang/Basic/TokenKinds.def"
+ return ParseTypeTrait();
+
+ case tok::kw___array_rank:
+ case tok::kw___array_extent:
+ return ParseArrayTypeTrait();
+
+ case tok::kw___is_lvalue_expr:
+ case tok::kw___is_rvalue_expr:
+ return ParseExpressionTrait();
+
+ case tok::at: {
+ SourceLocation AtLoc = ConsumeToken();
+ return ParseObjCAtExpression(AtLoc);
+ }
+ case tok::caret:
+ Res = ParseBlockLiteralExpression();
+ break;
+ case tok::code_completion: {
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Expression);
+ cutOffParsing();
+ return ExprError();
+ }
+ case tok::l_square:
+ if (getLangOpts().CPlusPlus11) {
+ if (getLangOpts().ObjC1) {
+ // C++11 lambda expressions and Objective-C message sends both start with a
+ // square bracket. There are three possibilities here:
+ // we have a valid lambda expression, we have an invalid lambda
+ // expression, or we have something that doesn't appear to be a lambda.
+ // If we're in the last case, we fall back to ParseObjCMessageExpression.
+ Res = TryParseLambdaExpression();
+ if (!Res.isInvalid() && !Res.get())
+ Res = ParseObjCMessageExpression();
+ break;
+ }
+ Res = ParseLambdaExpression();
+ break;
+ }
+ if (getLangOpts().ObjC1) {
+ Res = ParseObjCMessageExpression();
+ break;
+ }
+ // FALL THROUGH.
+ default:
+ NotCastExpr = true;
+ return ExprError();
+ }
+
+ // Check to see whether Res is a function designator only. If it is and we
+ // are compiling for OpenCL, we need to return an error as this implies
+ // that the address of the function is being taken, which is illegal in CL.
+
+ // These can be followed by postfix-expr pieces.
+ Res = ParsePostfixExpressionSuffix(Res);
+ if (getLangOpts().OpenCL)
+ if (Expr *PostfixExpr = Res.get()) {
+ QualType Ty = PostfixExpr->getType();
+ if (!Ty.isNull() && Ty->isFunctionType()) {
+ Diag(PostfixExpr->getExprLoc(),
+ diag::err_opencl_taking_function_address_parser);
+ return ExprError();
+ }
+ }
+
+ return Res;
+}
+
+/// \brief Once the leading part of a postfix-expression is parsed, this
+/// method parses any suffixes that apply.
+///
+/// \verbatim
+/// postfix-expression: [C99 6.5.2]
+/// primary-expression
+/// postfix-expression '[' expression ']'
+/// postfix-expression '[' braced-init-list ']'
+/// postfix-expression '(' argument-expression-list[opt] ')'
+/// postfix-expression '.' identifier
+/// postfix-expression '->' identifier
+/// postfix-expression '++'
+/// postfix-expression '--'
+/// '(' type-name ')' '{' initializer-list '}'
+/// '(' type-name ')' '{' initializer-list ',' '}'
+///
+/// argument-expression-list: [C99 6.5.2]
+/// argument-expression ...[opt]
+/// argument-expression-list ',' assignment-expression ...[opt]
+/// \endverbatim
+ExprResult
+Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
+ // Now that the primary-expression piece of the postfix-expression has been
+ // parsed, see if there are any postfix-expression pieces here.
+ SourceLocation Loc;
+ while (1) {
+ switch (Tok.getKind()) {
+ case tok::code_completion:
+ if (InMessageExpression)
+ return LHS;
+
+ Actions.CodeCompletePostfixExpression(getCurScope(), LHS);
+ cutOffParsing();
+ return ExprError();
+
+ case tok::identifier:
+ // If we see identifier: after an expression, and we're not already in a
+ // message send, then this is probably a message send with a missing
+ // opening bracket '['.
+ if (getLangOpts().ObjC1 && !InMessageExpression &&
+ (NextToken().is(tok::colon) || NextToken().is(tok::r_square))) {
+ LHS = ParseObjCMessageExpressionBody(SourceLocation(), SourceLocation(),
+ ParsedType(), LHS.get());
+ break;
+ }
+
+ // Fall through; this isn't a message send.
+
+ default: // Not a postfix-expression suffix.
+ return LHS;
+ case tok::l_square: { // postfix-expression: p-e '[' expression ']'
+ // If we have a array postfix expression that starts on a new line and
+ // Objective-C is enabled, it is highly likely that the user forgot a
+ // semicolon after the base expression and that the array postfix-expr is
+ // actually another message send. In this case, do some look-ahead to see
+ // if the contents of the square brackets are obviously not a valid
+ // expression and recover by pretending there is no suffix.
+ if (getLangOpts().ObjC1 && Tok.isAtStartOfLine() &&
+ isSimpleObjCMessageExpression())
+ return LHS;
+
+ // Reject array indices starting with a lambda-expression. '[[' is
+ // reserved for attributes.
+ if (CheckProhibitedCXX11Attribute())
+ return ExprError();
+
+ BalancedDelimiterTracker T(*this, tok::l_square);
+ T.consumeOpen();
+ Loc = T.getOpenLocation();
+ ExprResult Idx, Length;
+ SourceLocation ColonLoc;
+ if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
+ Idx = ParseBraceInitializer();
+ } else if (getLangOpts().OpenMP) {
+ ColonProtectionRAIIObject RAII(*this);
+ // Parse [: or [ expr or [ expr :
+ if (!Tok.is(tok::colon)) {
+ // [ expr
+ Idx = ParseExpression();
+ }
+ if (Tok.is(tok::colon)) {
+ // Consume ':'
+ ColonLoc = ConsumeToken();
+ if (Tok.isNot(tok::r_square))
+ Length = ParseExpression();
+ }
+ } else
+ Idx = ParseExpression();
+
+ SourceLocation RLoc = Tok.getLocation();
+
+ if (!LHS.isInvalid() && !Idx.isInvalid() && !Length.isInvalid() &&
+ Tok.is(tok::r_square)) {
+ if (ColonLoc.isValid()) {
+ LHS = Actions.ActOnOMPArraySectionExpr(LHS.get(), Loc, Idx.get(),
+ ColonLoc, Length.get(), RLoc);
+ } else {
+ LHS = Actions.ActOnArraySubscriptExpr(getCurScope(), LHS.get(), Loc,
+ Idx.get(), RLoc);
+ }
+ } else {
+ (void)Actions.CorrectDelayedTyposInExpr(LHS);
+ (void)Actions.CorrectDelayedTyposInExpr(Idx);
+ (void)Actions.CorrectDelayedTyposInExpr(Length);
+ LHS = ExprError();
+ Idx = ExprError();
+ }
+
+ // Match the ']'.
+ T.consumeClose();
+ break;
+ }
+
+ case tok::l_paren: // p-e: p-e '(' argument-expression-list[opt] ')'
+ case tok::lesslessless: { // p-e: p-e '<<<' argument-expression-list '>>>'
+ // '(' argument-expression-list[opt] ')'
+ tok::TokenKind OpKind = Tok.getKind();
+ InMessageExpressionRAIIObject InMessage(*this, false);
+
+ Expr *ExecConfig = nullptr;
+
+ BalancedDelimiterTracker PT(*this, tok::l_paren);
+
+ if (OpKind == tok::lesslessless) {
+ ExprVector ExecConfigExprs;
+ CommaLocsTy ExecConfigCommaLocs;
+ SourceLocation OpenLoc = ConsumeToken();
+
+ if (ParseSimpleExpressionList(ExecConfigExprs, ExecConfigCommaLocs)) {
+ (void)Actions.CorrectDelayedTyposInExpr(LHS);
+ LHS = ExprError();
+ }
+
+ SourceLocation CloseLoc;
+ if (TryConsumeToken(tok::greatergreatergreater, CloseLoc)) {
+ } else if (LHS.isInvalid()) {
+ SkipUntil(tok::greatergreatergreater, StopAtSemi);
+ } else {
+ // There was an error closing the brackets
+ Diag(Tok, diag::err_expected) << tok::greatergreatergreater;
+ Diag(OpenLoc, diag::note_matching) << tok::lesslessless;
+ SkipUntil(tok::greatergreatergreater, StopAtSemi);
+ LHS = ExprError();
+ }
+
+ if (!LHS.isInvalid()) {
+ if (ExpectAndConsume(tok::l_paren))
+ LHS = ExprError();
+ else
+ Loc = PrevTokLocation;
+ }
+
+ if (!LHS.isInvalid()) {
+ ExprResult ECResult = Actions.ActOnCUDAExecConfigExpr(getCurScope(),
+ OpenLoc,
+ ExecConfigExprs,
+ CloseLoc);
+ if (ECResult.isInvalid())
+ LHS = ExprError();
+ else
+ ExecConfig = ECResult.get();
+ }
+ } else {
+ PT.consumeOpen();
+ Loc = PT.getOpenLocation();
+ }
+
+ ExprVector ArgExprs;
+ CommaLocsTy CommaLocs;
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteCall(getCurScope(), LHS.get(), None);
+ cutOffParsing();
+ return ExprError();
+ }
+
+ if (OpKind == tok::l_paren || !LHS.isInvalid()) {
+ if (Tok.isNot(tok::r_paren)) {
+ if (ParseExpressionList(ArgExprs, CommaLocs, [&] {
+ Actions.CodeCompleteCall(getCurScope(), LHS.get(), ArgExprs);
+ })) {
+ (void)Actions.CorrectDelayedTyposInExpr(LHS);
+ LHS = ExprError();
+ } else if (LHS.isInvalid()) {
+ for (auto &E : ArgExprs)
+ Actions.CorrectDelayedTyposInExpr(E);
+ }
+ }
+ }
+
+ // Match the ')'.
+ if (LHS.isInvalid()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ } else if (Tok.isNot(tok::r_paren)) {
+ bool HadDelayedTypo = false;
+ if (Actions.CorrectDelayedTyposInExpr(LHS).get() != LHS.get())
+ HadDelayedTypo = true;
+ for (auto &E : ArgExprs)
+ if (Actions.CorrectDelayedTyposInExpr(E).get() != E)
+ HadDelayedTypo = true;
+ // If there were delayed typos in the LHS or ArgExprs, call SkipUntil
+ // instead of PT.consumeClose() to avoid emitting extra diagnostics for
+ // the unmatched l_paren.
+ if (HadDelayedTypo)
+ SkipUntil(tok::r_paren, StopAtSemi);
+ else
+ PT.consumeClose();
+ LHS = ExprError();
+ } else {
+ assert((ArgExprs.size() == 0 ||
+ ArgExprs.size()-1 == CommaLocs.size())&&
+ "Unexpected number of commas!");
+ LHS = Actions.ActOnCallExpr(getCurScope(), LHS.get(), Loc,
+ ArgExprs, Tok.getLocation(),
+ ExecConfig);
+ PT.consumeClose();
+ }
+
+ break;
+ }
+ case tok::arrow:
+ case tok::period: {
+ // postfix-expression: p-e '->' template[opt] id-expression
+ // postfix-expression: p-e '.' template[opt] id-expression
+ tok::TokenKind OpKind = Tok.getKind();
+ SourceLocation OpLoc = ConsumeToken(); // Eat the "." or "->" token.
+
+ CXXScopeSpec SS;
+ ParsedType ObjectType;
+ bool MayBePseudoDestructor = false;
+ if (getLangOpts().CPlusPlus && !LHS.isInvalid()) {
+ Expr *Base = LHS.get();
+ const Type* BaseType = Base->getType().getTypePtrOrNull();
+ if (BaseType && Tok.is(tok::l_paren) &&
+ (BaseType->isFunctionType() ||
+ BaseType->isSpecificPlaceholderType(BuiltinType::BoundMember))) {
+ Diag(OpLoc, diag::err_function_is_not_record)
+ << OpKind << Base->getSourceRange()
+ << FixItHint::CreateRemoval(OpLoc);
+ return ParsePostfixExpressionSuffix(Base);
+ }
+
+ LHS = Actions.ActOnStartCXXMemberReference(getCurScope(), Base,
+ OpLoc, OpKind, ObjectType,
+ MayBePseudoDestructor);
+ if (LHS.isInvalid())
+ break;
+
+ ParseOptionalCXXScopeSpecifier(SS, ObjectType,
+ /*EnteringContext=*/false,
+ &MayBePseudoDestructor);
+ if (SS.isNotEmpty())
+ ObjectType = ParsedType();
+ }
+
+ if (Tok.is(tok::code_completion)) {
+ // Code completion for a member access expression.
+ Actions.CodeCompleteMemberReferenceExpr(getCurScope(), LHS.get(),
+ OpLoc, OpKind == tok::arrow);
+
+ cutOffParsing();
+ return ExprError();
+ }
+
+ if (MayBePseudoDestructor && !LHS.isInvalid()) {
+ LHS = ParseCXXPseudoDestructor(LHS.get(), OpLoc, OpKind, SS,
+ ObjectType);
+ break;
+ }
+
+ // Either the action has told us that this cannot be a
+ // pseudo-destructor expression (based on the type of base
+ // expression), or we didn't see a '~' in the right place. We
+ // can still parse a destructor name here, but in that case it
+ // names a real destructor.
+ // Allow explicit constructor calls in Microsoft mode.
+ // FIXME: Add support for explicit call of template constructor.
+ SourceLocation TemplateKWLoc;
+ UnqualifiedId Name;
+ if (getLangOpts().ObjC2 && OpKind == tok::period &&
+ Tok.is(tok::kw_class)) {
+ // Objective-C++:
+ // After a '.' in a member access expression, treat the keyword
+ // 'class' as if it were an identifier.
+ //
+ // This hack allows property access to the 'class' method because it is
+ // such a common method name. For other C++ keywords that are
+ // Objective-C method names, one must use the message send syntax.
+ IdentifierInfo *Id = Tok.getIdentifierInfo();
+ SourceLocation Loc = ConsumeToken();
+ Name.setIdentifier(Id, Loc);
+ } else if (ParseUnqualifiedId(SS,
+ /*EnteringContext=*/false,
+ /*AllowDestructorName=*/true,
+ /*AllowConstructorName=*/
+ getLangOpts().MicrosoftExt,
+ ObjectType, TemplateKWLoc, Name)) {
+ (void)Actions.CorrectDelayedTyposInExpr(LHS);
+ LHS = ExprError();
+ }
+
+ if (!LHS.isInvalid())
+ LHS = Actions.ActOnMemberAccessExpr(getCurScope(), LHS.get(), OpLoc,
+ OpKind, SS, TemplateKWLoc, Name,
+ CurParsedObjCImpl ? CurParsedObjCImpl->Dcl
+ : nullptr);
+ break;
+ }
+ case tok::plusplus: // postfix-expression: postfix-expression '++'
+ case tok::minusminus: // postfix-expression: postfix-expression '--'
+ if (!LHS.isInvalid()) {
+ LHS = Actions.ActOnPostfixUnaryOp(getCurScope(), Tok.getLocation(),
+ Tok.getKind(), LHS.get());
+ }
+ ConsumeToken();
+ break;
+ }
+ }
+}
+
+/// ParseExprAfterUnaryExprOrTypeTrait - We parsed a typeof/sizeof/alignof/
+/// vec_step and we are at the start of an expression or a parenthesized
+/// type-id. OpTok is the operand token (typeof/sizeof/alignof). Returns the
+/// expression (isCastExpr == false) or the type (isCastExpr == true).
+///
+/// \verbatim
+/// unary-expression: [C99 6.5.3]
+/// 'sizeof' unary-expression
+/// 'sizeof' '(' type-name ')'
+/// [GNU] '__alignof' unary-expression
+/// [GNU] '__alignof' '(' type-name ')'
+/// [C11] '_Alignof' '(' type-name ')'
+/// [C++0x] 'alignof' '(' type-id ')'
+///
+/// [GNU] typeof-specifier:
+/// typeof ( expressions )
+/// typeof ( type-name )
+/// [GNU/C++] typeof unary-expression
+///
+/// [OpenCL 1.1 6.11.12] vec_step built-in function:
+/// vec_step ( expressions )
+/// vec_step ( type-name )
+/// \endverbatim
+ExprResult
+Parser::ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
+ bool &isCastExpr,
+ ParsedType &CastTy,
+ SourceRange &CastRange) {
+
+ assert(OpTok.isOneOf(tok::kw_typeof, tok::kw_sizeof, tok::kw___alignof,
+ tok::kw_alignof, tok::kw__Alignof, tok::kw_vec_step,
+ tok::kw___builtin_omp_required_simd_align) &&
+ "Not a typeof/sizeof/alignof/vec_step expression!");
+
+ ExprResult Operand;
+
+ // If the operand doesn't start with an '(', it must be an expression.
+ if (Tok.isNot(tok::l_paren)) {
+ // If construct allows a form without parenthesis, user may forget to put
+ // pathenthesis around type name.
+ if (OpTok.isOneOf(tok::kw_sizeof, tok::kw___alignof, tok::kw_alignof,
+ tok::kw__Alignof)) {
+ if (isTypeIdUnambiguously()) {
+ DeclSpec DS(AttrFactory);
+ ParseSpecifierQualifierList(DS);
+ Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ ParseDeclarator(DeclaratorInfo);
+
+ SourceLocation LParenLoc = PP.getLocForEndOfToken(OpTok.getLocation());
+ SourceLocation RParenLoc = PP.getLocForEndOfToken(PrevTokLocation);
+ Diag(LParenLoc, diag::err_expected_parentheses_around_typename)
+ << OpTok.getName()
+ << FixItHint::CreateInsertion(LParenLoc, "(")
+ << FixItHint::CreateInsertion(RParenLoc, ")");
+ isCastExpr = true;
+ return ExprEmpty();
+ }
+ }
+
+ isCastExpr = false;
+ if (OpTok.is(tok::kw_typeof) && !getLangOpts().CPlusPlus) {
+ Diag(Tok, diag::err_expected_after) << OpTok.getIdentifierInfo()
+ << tok::l_paren;
+ return ExprError();
+ }
+
+ Operand = ParseCastExpression(true/*isUnaryExpression*/);
+ } else {
+ // If it starts with a '(', we know that it is either a parenthesized
+ // type-name, or it is a unary-expression that starts with a compound
+ // literal, or starts with a primary-expression that is a parenthesized
+ // expression.
+ ParenParseOption ExprType = CastExpr;
+ SourceLocation LParenLoc = Tok.getLocation(), RParenLoc;
+
+ Operand = ParseParenExpression(ExprType, true/*stopIfCastExpr*/,
+ false, CastTy, RParenLoc);
+ CastRange = SourceRange(LParenLoc, RParenLoc);
+
+ // If ParseParenExpression parsed a '(typename)' sequence only, then this is
+ // a type.
+ if (ExprType == CastExpr) {
+ isCastExpr = true;
+ return ExprEmpty();
+ }
+
+ if (getLangOpts().CPlusPlus || OpTok.isNot(tok::kw_typeof)) {
+ // GNU typeof in C requires the expression to be parenthesized. Not so for
+ // sizeof/alignof or in C++. Therefore, the parenthesized expression is
+ // the start of a unary-expression, but doesn't include any postfix
+ // pieces. Parse these now if present.
+ if (!Operand.isInvalid())
+ Operand = ParsePostfixExpressionSuffix(Operand.get());
+ }
+ }
+
+ // If we get here, the operand to the typeof/sizeof/alignof was an expresion.
+ isCastExpr = false;
+ return Operand;
+}
+
+
+/// \brief Parse a sizeof or alignof expression.
+///
+/// \verbatim
+/// unary-expression: [C99 6.5.3]
+/// 'sizeof' unary-expression
+/// 'sizeof' '(' type-name ')'
+/// [C++11] 'sizeof' '...' '(' identifier ')'
+/// [GNU] '__alignof' unary-expression
+/// [GNU] '__alignof' '(' type-name ')'
+/// [C11] '_Alignof' '(' type-name ')'
+/// [C++11] 'alignof' '(' type-id ')'
+/// \endverbatim
+ExprResult Parser::ParseUnaryExprOrTypeTraitExpression() {
+ assert(Tok.isOneOf(tok::kw_sizeof, tok::kw___alignof, tok::kw_alignof,
+ tok::kw__Alignof, tok::kw_vec_step,
+ tok::kw___builtin_omp_required_simd_align) &&
+ "Not a sizeof/alignof/vec_step expression!");
+ Token OpTok = Tok;
+ ConsumeToken();
+
+ // [C++11] 'sizeof' '...' '(' identifier ')'
+ if (Tok.is(tok::ellipsis) && OpTok.is(tok::kw_sizeof)) {
+ SourceLocation EllipsisLoc = ConsumeToken();
+ SourceLocation LParenLoc, RParenLoc;
+ IdentifierInfo *Name = nullptr;
+ SourceLocation NameLoc;
+ if (Tok.is(tok::l_paren)) {
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+ LParenLoc = T.getOpenLocation();
+ if (Tok.is(tok::identifier)) {
+ Name = Tok.getIdentifierInfo();
+ NameLoc = ConsumeToken();
+ T.consumeClose();
+ RParenLoc = T.getCloseLocation();
+ if (RParenLoc.isInvalid())
+ RParenLoc = PP.getLocForEndOfToken(NameLoc);
+ } else {
+ Diag(Tok, diag::err_expected_parameter_pack);
+ SkipUntil(tok::r_paren, StopAtSemi);
+ }
+ } else if (Tok.is(tok::identifier)) {
+ Name = Tok.getIdentifierInfo();
+ NameLoc = ConsumeToken();
+ LParenLoc = PP.getLocForEndOfToken(EllipsisLoc);
+ RParenLoc = PP.getLocForEndOfToken(NameLoc);
+ Diag(LParenLoc, diag::err_paren_sizeof_parameter_pack)
+ << Name
+ << FixItHint::CreateInsertion(LParenLoc, "(")
+ << FixItHint::CreateInsertion(RParenLoc, ")");
+ } else {
+ Diag(Tok, diag::err_sizeof_parameter_pack);
+ }
+
+ if (!Name)
+ return ExprError();
+
+ EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated,
+ Sema::ReuseLambdaContextDecl);
+
+ return Actions.ActOnSizeofParameterPackExpr(getCurScope(),
+ OpTok.getLocation(),
+ *Name, NameLoc,
+ RParenLoc);
+ }
+
+ if (OpTok.isOneOf(tok::kw_alignof, tok::kw__Alignof))
+ Diag(OpTok, diag::warn_cxx98_compat_alignof);
+
+ EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated,
+ Sema::ReuseLambdaContextDecl);
+
+ bool isCastExpr;
+ ParsedType CastTy;
+ SourceRange CastRange;
+ ExprResult Operand = ParseExprAfterUnaryExprOrTypeTrait(OpTok,
+ isCastExpr,
+ CastTy,
+ CastRange);
+
+ UnaryExprOrTypeTrait ExprKind = UETT_SizeOf;
+ if (OpTok.isOneOf(tok::kw_alignof, tok::kw___alignof, tok::kw__Alignof))
+ ExprKind = UETT_AlignOf;
+ else if (OpTok.is(tok::kw_vec_step))
+ ExprKind = UETT_VecStep;
+ else if (OpTok.is(tok::kw___builtin_omp_required_simd_align))
+ ExprKind = UETT_OpenMPRequiredSimdAlign;
+
+ if (isCastExpr)
+ return Actions.ActOnUnaryExprOrTypeTraitExpr(OpTok.getLocation(),
+ ExprKind,
+ /*isType=*/true,
+ CastTy.getAsOpaquePtr(),
+ CastRange);
+
+ if (OpTok.isOneOf(tok::kw_alignof, tok::kw__Alignof))
+ Diag(OpTok, diag::ext_alignof_expr) << OpTok.getIdentifierInfo();
+
+ // If we get here, the operand to the sizeof/alignof was an expresion.
+ if (!Operand.isInvalid())
+ Operand = Actions.ActOnUnaryExprOrTypeTraitExpr(OpTok.getLocation(),
+ ExprKind,
+ /*isType=*/false,
+ Operand.get(),
+ CastRange);
+ return Operand;
+}
+
+/// ParseBuiltinPrimaryExpression
+///
+/// \verbatim
+/// primary-expression: [C99 6.5.1]
+/// [GNU] '__builtin_va_arg' '(' assignment-expression ',' type-name ')'
+/// [GNU] '__builtin_offsetof' '(' type-name ',' offsetof-member-designator')'
+/// [GNU] '__builtin_choose_expr' '(' assign-expr ',' assign-expr ','
+/// assign-expr ')'
+/// [GNU] '__builtin_types_compatible_p' '(' type-name ',' type-name ')'
+/// [OCL] '__builtin_astype' '(' assignment-expression ',' type-name ')'
+///
+/// [GNU] offsetof-member-designator:
+/// [GNU] identifier
+/// [GNU] offsetof-member-designator '.' identifier
+/// [GNU] offsetof-member-designator '[' expression ']'
+/// \endverbatim
+ExprResult Parser::ParseBuiltinPrimaryExpression() {
+ ExprResult Res;
+ const IdentifierInfo *BuiltinII = Tok.getIdentifierInfo();
+
+ tok::TokenKind T = Tok.getKind();
+ SourceLocation StartLoc = ConsumeToken(); // Eat the builtin identifier.
+
+ // All of these start with an open paren.
+ if (Tok.isNot(tok::l_paren))
+ return ExprError(Diag(Tok, diag::err_expected_after) << BuiltinII
+ << tok::l_paren);
+
+ BalancedDelimiterTracker PT(*this, tok::l_paren);
+ PT.consumeOpen();
+
+ // TODO: Build AST.
+
+ switch (T) {
+ default: llvm_unreachable("Not a builtin primary expression!");
+ case tok::kw___builtin_va_arg: {
+ ExprResult Expr(ParseAssignmentExpression());
+
+ if (ExpectAndConsume(tok::comma)) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ Expr = ExprError();
+ }
+
+ TypeResult Ty = ParseTypeName();
+
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::err_expected) << tok::r_paren;
+ Expr = ExprError();
+ }
+
+ if (Expr.isInvalid() || Ty.isInvalid())
+ Res = ExprError();
+ else
+ Res = Actions.ActOnVAArg(StartLoc, Expr.get(), Ty.get(), ConsumeParen());
+ break;
+ }
+ case tok::kw___builtin_offsetof: {
+ SourceLocation TypeLoc = Tok.getLocation();
+ TypeResult Ty = ParseTypeName();
+ if (Ty.isInvalid()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return ExprError();
+ }
+
+ if (ExpectAndConsume(tok::comma)) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return ExprError();
+ }
+
+ // We must have at least one identifier here.
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected) << tok::identifier;
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return ExprError();
+ }
+
+ // Keep track of the various subcomponents we see.
+ SmallVector<Sema::OffsetOfComponent, 4> Comps;
+
+ Comps.push_back(Sema::OffsetOfComponent());
+ Comps.back().isBrackets = false;
+ Comps.back().U.IdentInfo = Tok.getIdentifierInfo();
+ Comps.back().LocStart = Comps.back().LocEnd = ConsumeToken();
+
+ // FIXME: This loop leaks the index expressions on error.
+ while (1) {
+ if (Tok.is(tok::period)) {
+ // offsetof-member-designator: offsetof-member-designator '.' identifier
+ Comps.push_back(Sema::OffsetOfComponent());
+ Comps.back().isBrackets = false;
+ Comps.back().LocStart = ConsumeToken();
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected) << tok::identifier;
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return ExprError();
+ }
+ Comps.back().U.IdentInfo = Tok.getIdentifierInfo();
+ Comps.back().LocEnd = ConsumeToken();
+
+ } else if (Tok.is(tok::l_square)) {
+ if (CheckProhibitedCXX11Attribute())
+ return ExprError();
+
+ // offsetof-member-designator: offsetof-member-design '[' expression ']'
+ Comps.push_back(Sema::OffsetOfComponent());
+ Comps.back().isBrackets = true;
+ BalancedDelimiterTracker ST(*this, tok::l_square);
+ ST.consumeOpen();
+ Comps.back().LocStart = ST.getOpenLocation();
+ Res = ParseExpression();
+ if (Res.isInvalid()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return Res;
+ }
+ Comps.back().U.E = Res.get();
+
+ ST.consumeClose();
+ Comps.back().LocEnd = ST.getCloseLocation();
+ } else {
+ if (Tok.isNot(tok::r_paren)) {
+ PT.consumeClose();
+ Res = ExprError();
+ } else if (Ty.isInvalid()) {
+ Res = ExprError();
+ } else {
+ PT.consumeClose();
+ Res = Actions.ActOnBuiltinOffsetOf(getCurScope(), StartLoc, TypeLoc,
+ Ty.get(), Comps,
+ PT.getCloseLocation());
+ }
+ break;
+ }
+ }
+ break;
+ }
+ case tok::kw___builtin_choose_expr: {
+ ExprResult Cond(ParseAssignmentExpression());
+ if (Cond.isInvalid()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return Cond;
+ }
+ if (ExpectAndConsume(tok::comma)) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return ExprError();
+ }
+
+ ExprResult Expr1(ParseAssignmentExpression());
+ if (Expr1.isInvalid()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return Expr1;
+ }
+ if (ExpectAndConsume(tok::comma)) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return ExprError();
+ }
+
+ ExprResult Expr2(ParseAssignmentExpression());
+ if (Expr2.isInvalid()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return Expr2;
+ }
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::err_expected) << tok::r_paren;
+ return ExprError();
+ }
+ Res = Actions.ActOnChooseExpr(StartLoc, Cond.get(), Expr1.get(),
+ Expr2.get(), ConsumeParen());
+ break;
+ }
+ case tok::kw___builtin_astype: {
+ // The first argument is an expression to be converted, followed by a comma.
+ ExprResult Expr(ParseAssignmentExpression());
+ if (Expr.isInvalid()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return ExprError();
+ }
+
+ if (ExpectAndConsume(tok::comma)) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return ExprError();
+ }
+
+ // Second argument is the type to bitcast to.
+ TypeResult DestTy = ParseTypeName();
+ if (DestTy.isInvalid())
+ return ExprError();
+
+ // Attempt to consume the r-paren.
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::err_expected) << tok::r_paren;
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return ExprError();
+ }
+
+ Res = Actions.ActOnAsTypeExpr(Expr.get(), DestTy.get(), StartLoc,
+ ConsumeParen());
+ break;
+ }
+ case tok::kw___builtin_convertvector: {
+ // The first argument is an expression to be converted, followed by a comma.
+ ExprResult Expr(ParseAssignmentExpression());
+ if (Expr.isInvalid()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return ExprError();
+ }
+
+ if (ExpectAndConsume(tok::comma)) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return ExprError();
+ }
+
+ // Second argument is the type to bitcast to.
+ TypeResult DestTy = ParseTypeName();
+ if (DestTy.isInvalid())
+ return ExprError();
+
+ // Attempt to consume the r-paren.
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::err_expected) << tok::r_paren;
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return ExprError();
+ }
+
+ Res = Actions.ActOnConvertVectorExpr(Expr.get(), DestTy.get(), StartLoc,
+ ConsumeParen());
+ break;
+ }
+ }
+
+ if (Res.isInvalid())
+ return ExprError();
+
+ // These can be followed by postfix-expr pieces because they are
+ // primary-expressions.
+ return ParsePostfixExpressionSuffix(Res.get());
+}
+
+/// ParseParenExpression - This parses the unit that starts with a '(' token,
+/// based on what is allowed by ExprType. The actual thing parsed is returned
+/// in ExprType. If stopIfCastExpr is true, it will only return the parsed type,
+/// not the parsed cast-expression.
+///
+/// \verbatim
+/// primary-expression: [C99 6.5.1]
+/// '(' expression ')'
+/// [GNU] '(' compound-statement ')' (if !ParenExprOnly)
+/// postfix-expression: [C99 6.5.2]
+/// '(' type-name ')' '{' initializer-list '}'
+/// '(' type-name ')' '{' initializer-list ',' '}'
+/// cast-expression: [C99 6.5.4]
+/// '(' type-name ')' cast-expression
+/// [ARC] bridged-cast-expression
+/// [ARC] bridged-cast-expression:
+/// (__bridge type-name) cast-expression
+/// (__bridge_transfer type-name) cast-expression
+/// (__bridge_retained type-name) cast-expression
+/// fold-expression: [C++1z]
+/// '(' cast-expression fold-operator '...' ')'
+/// '(' '...' fold-operator cast-expression ')'
+/// '(' cast-expression fold-operator '...'
+/// fold-operator cast-expression ')'
+/// \endverbatim
+ExprResult
+Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
+ bool isTypeCast, ParsedType &CastTy,
+ SourceLocation &RParenLoc) {
+ assert(Tok.is(tok::l_paren) && "Not a paren expr!");
+ ColonProtectionRAIIObject ColonProtection(*this, false);
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.consumeOpen())
+ return ExprError();
+ SourceLocation OpenLoc = T.getOpenLocation();
+
+ ExprResult Result(true);
+ bool isAmbiguousTypeId;
+ CastTy = ParsedType();
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteOrdinaryName(getCurScope(),
+ ExprType >= CompoundLiteral? Sema::PCC_ParenthesizedExpression
+ : Sema::PCC_Expression);
+ cutOffParsing();
+ return ExprError();
+ }
+
+ // Diagnose use of bridge casts in non-arc mode.
+ bool BridgeCast = (getLangOpts().ObjC2 &&
+ Tok.isOneOf(tok::kw___bridge,
+ tok::kw___bridge_transfer,
+ tok::kw___bridge_retained,
+ tok::kw___bridge_retain));
+ if (BridgeCast && !getLangOpts().ObjCAutoRefCount) {
+ if (!TryConsumeToken(tok::kw___bridge)) {
+ StringRef BridgeCastName = Tok.getName();
+ SourceLocation BridgeKeywordLoc = ConsumeToken();
+ if (!PP.getSourceManager().isInSystemHeader(BridgeKeywordLoc))
+ Diag(BridgeKeywordLoc, diag::warn_arc_bridge_cast_nonarc)
+ << BridgeCastName
+ << FixItHint::CreateReplacement(BridgeKeywordLoc, "");
+ }
+ BridgeCast = false;
+ }
+
+ // None of these cases should fall through with an invalid Result
+ // unless they've already reported an error.
+ if (ExprType >= CompoundStmt && Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::ext_gnu_statement_expr);
+
+ if (!getCurScope()->getFnParent() && !getCurScope()->getBlockParent()) {
+ Result = ExprError(Diag(OpenLoc, diag::err_stmtexpr_file_scope));
+ } else {
+ // Find the nearest non-record decl context. Variables declared in a
+ // statement expression behave as if they were declared in the enclosing
+ // function, block, or other code construct.
+ DeclContext *CodeDC = Actions.CurContext;
+ while (CodeDC->isRecord() || isa<EnumDecl>(CodeDC)) {
+ CodeDC = CodeDC->getParent();
+ assert(CodeDC && !CodeDC->isFileContext() &&
+ "statement expr not in code context");
+ }
+ Sema::ContextRAII SavedContext(Actions, CodeDC, /*NewThisContext=*/false);
+
+ Actions.ActOnStartStmtExpr();
+
+ StmtResult Stmt(ParseCompoundStatement(true));
+ ExprType = CompoundStmt;
+
+ // If the substmt parsed correctly, build the AST node.
+ if (!Stmt.isInvalid()) {
+ Result = Actions.ActOnStmtExpr(OpenLoc, Stmt.get(), Tok.getLocation());
+ } else {
+ Actions.ActOnStmtExprError();
+ }
+ }
+ } else if (ExprType >= CompoundLiteral && BridgeCast) {
+ tok::TokenKind tokenKind = Tok.getKind();
+ SourceLocation BridgeKeywordLoc = ConsumeToken();
+
+ // Parse an Objective-C ARC ownership cast expression.
+ ObjCBridgeCastKind Kind;
+ if (tokenKind == tok::kw___bridge)
+ Kind = OBC_Bridge;
+ else if (tokenKind == tok::kw___bridge_transfer)
+ Kind = OBC_BridgeTransfer;
+ else if (tokenKind == tok::kw___bridge_retained)
+ Kind = OBC_BridgeRetained;
+ else {
+ // As a hopefully temporary workaround, allow __bridge_retain as
+ // a synonym for __bridge_retained, but only in system headers.
+ assert(tokenKind == tok::kw___bridge_retain);
+ Kind = OBC_BridgeRetained;
+ if (!PP.getSourceManager().isInSystemHeader(BridgeKeywordLoc))
+ Diag(BridgeKeywordLoc, diag::err_arc_bridge_retain)
+ << FixItHint::CreateReplacement(BridgeKeywordLoc,
+ "__bridge_retained");
+ }
+
+ TypeResult Ty = ParseTypeName();
+ T.consumeClose();
+ ColonProtection.restore();
+ RParenLoc = T.getCloseLocation();
+ ExprResult SubExpr = ParseCastExpression(/*isUnaryExpression=*/false);
+
+ if (Ty.isInvalid() || SubExpr.isInvalid())
+ return ExprError();
+
+ return Actions.ActOnObjCBridgedCast(getCurScope(), OpenLoc, Kind,
+ BridgeKeywordLoc, Ty.get(),
+ RParenLoc, SubExpr.get());
+ } else if (ExprType >= CompoundLiteral &&
+ isTypeIdInParens(isAmbiguousTypeId)) {
+
+ // Otherwise, this is a compound literal expression or cast expression.
+
+ // In C++, if the type-id is ambiguous we disambiguate based on context.
+ // If stopIfCastExpr is true the context is a typeof/sizeof/alignof
+ // in which case we should treat it as type-id.
+ // if stopIfCastExpr is false, we need to determine the context past the
+ // parens, so we defer to ParseCXXAmbiguousParenExpression for that.
+ if (isAmbiguousTypeId && !stopIfCastExpr) {
+ ExprResult res = ParseCXXAmbiguousParenExpression(ExprType, CastTy, T,
+ ColonProtection);
+ RParenLoc = T.getCloseLocation();
+ return res;
+ }
+
+ // Parse the type declarator.
+ DeclSpec DS(AttrFactory);
+ ParseSpecifierQualifierList(DS);
+ Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ ParseDeclarator(DeclaratorInfo);
+
+ // If our type is followed by an identifier and either ':' or ']', then
+ // this is probably an Objective-C message send where the leading '[' is
+ // missing. Recover as if that were the case.
+ if (!DeclaratorInfo.isInvalidType() && Tok.is(tok::identifier) &&
+ !InMessageExpression && getLangOpts().ObjC1 &&
+ (NextToken().is(tok::colon) || NextToken().is(tok::r_square))) {
+ TypeResult Ty;
+ {
+ InMessageExpressionRAIIObject InMessage(*this, false);
+ Ty = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+ }
+ Result = ParseObjCMessageExpressionBody(SourceLocation(),
+ SourceLocation(),
+ Ty.get(), nullptr);
+ } else {
+ // Match the ')'.
+ T.consumeClose();
+ ColonProtection.restore();
+ RParenLoc = T.getCloseLocation();
+ if (Tok.is(tok::l_brace)) {
+ ExprType = CompoundLiteral;
+ TypeResult Ty;
+ {
+ InMessageExpressionRAIIObject InMessage(*this, false);
+ Ty = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+ }
+ return ParseCompoundLiteralExpression(Ty.get(), OpenLoc, RParenLoc);
+ }
+
+ if (ExprType == CastExpr) {
+ // We parsed '(' type-name ')' and the thing after it wasn't a '{'.
+
+ if (DeclaratorInfo.isInvalidType())
+ return ExprError();
+
+ // Note that this doesn't parse the subsequent cast-expression, it just
+ // returns the parsed type to the callee.
+ if (stopIfCastExpr) {
+ TypeResult Ty;
+ {
+ InMessageExpressionRAIIObject InMessage(*this, false);
+ Ty = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+ }
+ CastTy = Ty.get();
+ return ExprResult();
+ }
+
+ // Reject the cast of super idiom in ObjC.
+ if (Tok.is(tok::identifier) && getLangOpts().ObjC1 &&
+ Tok.getIdentifierInfo() == Ident_super &&
+ getCurScope()->isInObjcMethodScope() &&
+ GetLookAheadToken(1).isNot(tok::period)) {
+ Diag(Tok.getLocation(), diag::err_illegal_super_cast)
+ << SourceRange(OpenLoc, RParenLoc);
+ return ExprError();
+ }
+
+ // Parse the cast-expression that follows it next.
+ // TODO: For cast expression with CastTy.
+ Result = ParseCastExpression(/*isUnaryExpression=*/false,
+ /*isAddressOfOperand=*/false,
+ /*isTypeCast=*/IsTypeCast);
+ if (!Result.isInvalid()) {
+ Result = Actions.ActOnCastExpr(getCurScope(), OpenLoc,
+ DeclaratorInfo, CastTy,
+ RParenLoc, Result.get());
+ }
+ return Result;
+ }
+
+ Diag(Tok, diag::err_expected_lbrace_in_compound_literal);
+ return ExprError();
+ }
+ } else if (Tok.is(tok::ellipsis) &&
+ isFoldOperator(NextToken().getKind())) {
+ return ParseFoldExpression(ExprResult(), T);
+ } else if (isTypeCast) {
+ // Parse the expression-list.
+ InMessageExpressionRAIIObject InMessage(*this, false);
+
+ ExprVector ArgExprs;
+ CommaLocsTy CommaLocs;
+
+ if (!ParseSimpleExpressionList(ArgExprs, CommaLocs)) {
+ // FIXME: If we ever support comma expressions as operands to
+ // fold-expressions, we'll need to allow multiple ArgExprs here.
+ if (ArgExprs.size() == 1 && isFoldOperator(Tok.getKind()) &&
+ NextToken().is(tok::ellipsis))
+ return ParseFoldExpression(Result, T);
+
+ ExprType = SimpleExpr;
+ Result = Actions.ActOnParenListExpr(OpenLoc, Tok.getLocation(),
+ ArgExprs);
+ }
+ } else {
+ InMessageExpressionRAIIObject InMessage(*this, false);
+
+ Result = ParseExpression(MaybeTypeCast);
+ if (!getLangOpts().CPlusPlus && MaybeTypeCast && Result.isUsable()) {
+ // Correct typos in non-C++ code earlier so that implicit-cast-like
+ // expressions are parsed correctly.
+ Result = Actions.CorrectDelayedTyposInExpr(Result);
+ }
+ ExprType = SimpleExpr;
+
+ if (isFoldOperator(Tok.getKind()) && NextToken().is(tok::ellipsis))
+ return ParseFoldExpression(Result, T);
+
+ // Don't build a paren expression unless we actually match a ')'.
+ if (!Result.isInvalid() && Tok.is(tok::r_paren))
+ Result =
+ Actions.ActOnParenExpr(OpenLoc, Tok.getLocation(), Result.get());
+ }
+
+ // Match the ')'.
+ if (Result.isInvalid()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return ExprError();
+ }
+
+ T.consumeClose();
+ RParenLoc = T.getCloseLocation();
+ return Result;
+}
+
+/// ParseCompoundLiteralExpression - We have parsed the parenthesized type-name
+/// and we are at the left brace.
+///
+/// \verbatim
+/// postfix-expression: [C99 6.5.2]
+/// '(' type-name ')' '{' initializer-list '}'
+/// '(' type-name ')' '{' initializer-list ',' '}'
+/// \endverbatim
+ExprResult
+Parser::ParseCompoundLiteralExpression(ParsedType Ty,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc) {
+ assert(Tok.is(tok::l_brace) && "Not a compound literal!");
+ if (!getLangOpts().C99) // Compound literals don't exist in C90.
+ Diag(LParenLoc, diag::ext_c99_compound_literal);
+ ExprResult Result = ParseInitializer();
+ if (!Result.isInvalid() && Ty)
+ return Actions.ActOnCompoundLiteral(LParenLoc, Ty, RParenLoc, Result.get());
+ return Result;
+}
+
+/// ParseStringLiteralExpression - This handles the various token types that
+/// form string literals, and also handles string concatenation [C99 5.1.1.2,
+/// translation phase #6].
+///
+/// \verbatim
+/// primary-expression: [C99 6.5.1]
+/// string-literal
+/// \verbatim
+ExprResult Parser::ParseStringLiteralExpression(bool AllowUserDefinedLiteral) {
+ assert(isTokenStringLiteral() && "Not a string literal!");
+
+ // String concat. Note that keywords like __func__ and __FUNCTION__ are not
+ // considered to be strings for concatenation purposes.
+ SmallVector<Token, 4> StringToks;
+
+ do {
+ StringToks.push_back(Tok);
+ ConsumeStringToken();
+ } while (isTokenStringLiteral());
+
+ // Pass the set of string tokens, ready for concatenation, to the actions.
+ return Actions.ActOnStringLiteral(StringToks,
+ AllowUserDefinedLiteral ? getCurScope()
+ : nullptr);
+}
+
+/// ParseGenericSelectionExpression - Parse a C11 generic-selection
+/// [C11 6.5.1.1].
+///
+/// \verbatim
+/// generic-selection:
+/// _Generic ( assignment-expression , generic-assoc-list )
+/// generic-assoc-list:
+/// generic-association
+/// generic-assoc-list , generic-association
+/// generic-association:
+/// type-name : assignment-expression
+/// default : assignment-expression
+/// \endverbatim
+ExprResult Parser::ParseGenericSelectionExpression() {
+ assert(Tok.is(tok::kw__Generic) && "_Generic keyword expected");
+ SourceLocation KeyLoc = ConsumeToken();
+
+ if (!getLangOpts().C11)
+ Diag(KeyLoc, diag::ext_c11_generic_selection);
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.expectAndConsume())
+ return ExprError();
+
+ ExprResult ControllingExpr;
+ {
+ // C11 6.5.1.1p3 "The controlling expression of a generic selection is
+ // not evaluated."
+ EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated);
+ ControllingExpr =
+ Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression());
+ if (ControllingExpr.isInvalid()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return ExprError();
+ }
+ }
+
+ if (ExpectAndConsume(tok::comma)) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return ExprError();
+ }
+
+ SourceLocation DefaultLoc;
+ TypeVector Types;
+ ExprVector Exprs;
+ do {
+ ParsedType Ty;
+ if (Tok.is(tok::kw_default)) {
+ // C11 6.5.1.1p2 "A generic selection shall have no more than one default
+ // generic association."
+ if (!DefaultLoc.isInvalid()) {
+ Diag(Tok, diag::err_duplicate_default_assoc);
+ Diag(DefaultLoc, diag::note_previous_default_assoc);
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return ExprError();
+ }
+ DefaultLoc = ConsumeToken();
+ Ty = ParsedType();
+ } else {
+ ColonProtectionRAIIObject X(*this);
+ TypeResult TR = ParseTypeName();
+ if (TR.isInvalid()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return ExprError();
+ }
+ Ty = TR.get();
+ }
+ Types.push_back(Ty);
+
+ if (ExpectAndConsume(tok::colon)) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return ExprError();
+ }
+
+ // FIXME: These expressions should be parsed in a potentially potentially
+ // evaluated context.
+ ExprResult ER(
+ Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression()));
+ if (ER.isInvalid()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return ExprError();
+ }
+ Exprs.push_back(ER.get());
+ } while (TryConsumeToken(tok::comma));
+
+ T.consumeClose();
+ if (T.getCloseLocation().isInvalid())
+ return ExprError();
+
+ return Actions.ActOnGenericSelectionExpr(KeyLoc, DefaultLoc,
+ T.getCloseLocation(),
+ ControllingExpr.get(),
+ Types, Exprs);
+}
+
+/// \brief Parse A C++1z fold-expression after the opening paren and optional
+/// left-hand-side expression.
+///
+/// \verbatim
+/// fold-expression:
+/// ( cast-expression fold-operator ... )
+/// ( ... fold-operator cast-expression )
+/// ( cast-expression fold-operator ... fold-operator cast-expression )
+ExprResult Parser::ParseFoldExpression(ExprResult LHS,
+ BalancedDelimiterTracker &T) {
+ if (LHS.isInvalid()) {
+ T.skipToEnd();
+ return true;
+ }
+
+ tok::TokenKind Kind = tok::unknown;
+ SourceLocation FirstOpLoc;
+ if (LHS.isUsable()) {
+ Kind = Tok.getKind();
+ assert(isFoldOperator(Kind) && "missing fold-operator");
+ FirstOpLoc = ConsumeToken();
+ }
+
+ assert(Tok.is(tok::ellipsis) && "not a fold-expression");
+ SourceLocation EllipsisLoc = ConsumeToken();
+
+ ExprResult RHS;
+ if (Tok.isNot(tok::r_paren)) {
+ if (!isFoldOperator(Tok.getKind()))
+ return Diag(Tok.getLocation(), diag::err_expected_fold_operator);
+
+ if (Kind != tok::unknown && Tok.getKind() != Kind)
+ Diag(Tok.getLocation(), diag::err_fold_operator_mismatch)
+ << SourceRange(FirstOpLoc);
+ Kind = Tok.getKind();
+ ConsumeToken();
+
+ RHS = ParseExpression();
+ if (RHS.isInvalid()) {
+ T.skipToEnd();
+ return true;
+ }
+ }
+
+ Diag(EllipsisLoc, getLangOpts().CPlusPlus1z
+ ? diag::warn_cxx14_compat_fold_expression
+ : diag::ext_fold_expression);
+
+ T.consumeClose();
+ return Actions.ActOnCXXFoldExpr(T.getOpenLocation(), LHS.get(), Kind,
+ EllipsisLoc, RHS.get(), T.getCloseLocation());
+}
+
+/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
+///
+/// \verbatim
+/// argument-expression-list:
+/// assignment-expression
+/// argument-expression-list , assignment-expression
+///
+/// [C++] expression-list:
+/// [C++] assignment-expression
+/// [C++] expression-list , assignment-expression
+///
+/// [C++0x] expression-list:
+/// [C++0x] initializer-list
+///
+/// [C++0x] initializer-list
+/// [C++0x] initializer-clause ...[opt]
+/// [C++0x] initializer-list , initializer-clause ...[opt]
+///
+/// [C++0x] initializer-clause:
+/// [C++0x] assignment-expression
+/// [C++0x] braced-init-list
+/// \endverbatim
+bool Parser::ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
+ SmallVectorImpl<SourceLocation> &CommaLocs,
+ std::function<void()> Completer) {
+ bool SawError = false;
+ while (1) {
+ if (Tok.is(tok::code_completion)) {
+ if (Completer)
+ Completer();
+ else
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Expression);
+ cutOffParsing();
+ return true;
+ }
+
+ ExprResult Expr;
+ if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
+ Expr = ParseBraceInitializer();
+ } else
+ Expr = ParseAssignmentExpression();
+
+ if (Tok.is(tok::ellipsis))
+ Expr = Actions.ActOnPackExpansion(Expr.get(), ConsumeToken());
+ if (Expr.isInvalid()) {
+ SkipUntil(tok::comma, tok::r_paren, StopBeforeMatch);
+ SawError = true;
+ } else {
+ Exprs.push_back(Expr.get());
+ }
+
+ if (Tok.isNot(tok::comma))
+ break;
+ // Move to the next argument, remember where the comma was.
+ CommaLocs.push_back(ConsumeToken());
+ }
+ if (SawError) {
+ // Ensure typos get diagnosed when errors were encountered while parsing the
+ // expression list.
+ for (auto &E : Exprs) {
+ ExprResult Expr = Actions.CorrectDelayedTyposInExpr(E);
+ if (Expr.isUsable()) E = Expr.get();
+ }
+ }
+ return SawError;
+}
+
+/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
+/// used for misc language extensions.
+///
+/// \verbatim
+/// simple-expression-list:
+/// assignment-expression
+/// simple-expression-list , assignment-expression
+/// \endverbatim
+bool
+Parser::ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
+ SmallVectorImpl<SourceLocation> &CommaLocs) {
+ while (1) {
+ ExprResult Expr = ParseAssignmentExpression();
+ if (Expr.isInvalid())
+ return true;
+
+ Exprs.push_back(Expr.get());
+
+ if (Tok.isNot(tok::comma))
+ return false;
+
+ // Move to the next argument, remember where the comma was.
+ CommaLocs.push_back(ConsumeToken());
+ }
+}
+
+/// ParseBlockId - Parse a block-id, which roughly looks like int (int x).
+///
+/// \verbatim
+/// [clang] block-id:
+/// [clang] specifier-qualifier-list block-declarator
+/// \endverbatim
+void Parser::ParseBlockId(SourceLocation CaretLoc) {
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Type);
+ return cutOffParsing();
+ }
+
+ // Parse the specifier-qualifier-list piece.
+ DeclSpec DS(AttrFactory);
+ ParseSpecifierQualifierList(DS);
+
+ // Parse the block-declarator.
+ Declarator DeclaratorInfo(DS, Declarator::BlockLiteralContext);
+ ParseDeclarator(DeclaratorInfo);
+
+ MaybeParseGNUAttributes(DeclaratorInfo);
+
+ // Inform sema that we are starting a block.
+ Actions.ActOnBlockArguments(CaretLoc, DeclaratorInfo, getCurScope());
+}
+
+/// ParseBlockLiteralExpression - Parse a block literal, which roughly looks
+/// like ^(int x){ return x+1; }
+///
+/// \verbatim
+/// block-literal:
+/// [clang] '^' block-args[opt] compound-statement
+/// [clang] '^' block-id compound-statement
+/// [clang] block-args:
+/// [clang] '(' parameter-list ')'
+/// \endverbatim
+ExprResult Parser::ParseBlockLiteralExpression() {
+ assert(Tok.is(tok::caret) && "block literal starts with ^");
+ SourceLocation CaretLoc = ConsumeToken();
+
+ PrettyStackTraceLoc CrashInfo(PP.getSourceManager(), CaretLoc,
+ "block literal parsing");
+
+ // Enter a scope to hold everything within the block. This includes the
+ // argument decls, decls within the compound expression, etc. This also
+ // allows determining whether a variable reference inside the block is
+ // within or outside of the block.
+ ParseScope BlockScope(this, Scope::BlockScope | Scope::FnScope |
+ Scope::DeclScope);
+
+ // Inform sema that we are starting a block.
+ Actions.ActOnBlockStart(CaretLoc, getCurScope());
+
+ // Parse the return type if present.
+ DeclSpec DS(AttrFactory);
+ Declarator ParamInfo(DS, Declarator::BlockLiteralContext);
+ // FIXME: Since the return type isn't actually parsed, it can't be used to
+ // fill ParamInfo with an initial valid range, so do it manually.
+ ParamInfo.SetSourceRange(SourceRange(Tok.getLocation(), Tok.getLocation()));
+
+ // If this block has arguments, parse them. There is no ambiguity here with
+ // the expression case, because the expression case requires a parameter list.
+ if (Tok.is(tok::l_paren)) {
+ ParseParenDeclarator(ParamInfo);
+ // Parse the pieces after the identifier as if we had "int(...)".
+ // SetIdentifier sets the source range end, but in this case we're past
+ // that location.
+ SourceLocation Tmp = ParamInfo.getSourceRange().getEnd();
+ ParamInfo.SetIdentifier(nullptr, CaretLoc);
+ ParamInfo.SetRangeEnd(Tmp);
+ if (ParamInfo.isInvalidType()) {
+ // If there was an error parsing the arguments, they may have
+ // tried to use ^(x+y) which requires an argument list. Just
+ // skip the whole block literal.
+ Actions.ActOnBlockError(CaretLoc, getCurScope());
+ return ExprError();
+ }
+
+ MaybeParseGNUAttributes(ParamInfo);
+
+ // Inform sema that we are starting a block.
+ Actions.ActOnBlockArguments(CaretLoc, ParamInfo, getCurScope());
+ } else if (!Tok.is(tok::l_brace)) {
+ ParseBlockId(CaretLoc);
+ } else {
+ // Otherwise, pretend we saw (void).
+ ParsedAttributes attrs(AttrFactory);
+ SourceLocation NoLoc;
+ ParamInfo.AddTypeInfo(DeclaratorChunk::getFunction(/*HasProto=*/true,
+ /*IsAmbiguous=*/false,
+ /*RParenLoc=*/NoLoc,
+ /*ArgInfo=*/nullptr,
+ /*NumArgs=*/0,
+ /*EllipsisLoc=*/NoLoc,
+ /*RParenLoc=*/NoLoc,
+ /*TypeQuals=*/0,
+ /*RefQualifierIsLvalueRef=*/true,
+ /*RefQualifierLoc=*/NoLoc,
+ /*ConstQualifierLoc=*/NoLoc,
+ /*VolatileQualifierLoc=*/NoLoc,
+ /*RestrictQualifierLoc=*/NoLoc,
+ /*MutableLoc=*/NoLoc,
+ EST_None,
+ /*ESpecRange=*/SourceRange(),
+ /*Exceptions=*/nullptr,
+ /*ExceptionRanges=*/nullptr,
+ /*NumExceptions=*/0,
+ /*NoexceptExpr=*/nullptr,
+ /*ExceptionSpecTokens=*/nullptr,
+ CaretLoc, CaretLoc,
+ ParamInfo),
+ attrs, CaretLoc);
+
+ MaybeParseGNUAttributes(ParamInfo);
+
+ // Inform sema that we are starting a block.
+ Actions.ActOnBlockArguments(CaretLoc, ParamInfo, getCurScope());
+ }
+
+
+ ExprResult Result(true);
+ if (!Tok.is(tok::l_brace)) {
+ // Saw something like: ^expr
+ Diag(Tok, diag::err_expected_expression);
+ Actions.ActOnBlockError(CaretLoc, getCurScope());
+ return ExprError();
+ }
+
+ StmtResult Stmt(ParseCompoundStatementBody());
+ BlockScope.Exit();
+ if (!Stmt.isInvalid())
+ Result = Actions.ActOnBlockStmtExpr(CaretLoc, Stmt.get(), getCurScope());
+ else
+ Actions.ActOnBlockError(CaretLoc, getCurScope());
+ return Result;
+}
+
+/// ParseObjCBoolLiteral - This handles the objective-c Boolean literals.
+///
+/// '__objc_yes'
+/// '__objc_no'
+ExprResult Parser::ParseObjCBoolLiteral() {
+ tok::TokenKind Kind = Tok.getKind();
+ return Actions.ActOnObjCBoolLiteral(ConsumeToken(), Kind);
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp
new file mode 100644
index 0000000..f8938ba
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp
@@ -0,0 +1,3153 @@
+//===--- ParseExprCXX.cpp - C++ Expression Parsing ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Expression parsing implementation for C++.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTContext.h"
+#include "RAIIObjectsForParser.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Lex/LiteralSupport.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/Parser.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Sema/Scope.h"
+#include "llvm/Support/ErrorHandling.h"
+
+
+using namespace clang;
+
+static int SelectDigraphErrorMessage(tok::TokenKind Kind) {
+ switch (Kind) {
+ // template name
+ case tok::unknown: return 0;
+ // casts
+ case tok::kw_const_cast: return 1;
+ case tok::kw_dynamic_cast: return 2;
+ case tok::kw_reinterpret_cast: return 3;
+ case tok::kw_static_cast: return 4;
+ default:
+ llvm_unreachable("Unknown type for digraph error message.");
+ }
+}
+
+// Are the two tokens adjacent in the same source file?
+bool Parser::areTokensAdjacent(const Token &First, const Token &Second) {
+ SourceManager &SM = PP.getSourceManager();
+ SourceLocation FirstLoc = SM.getSpellingLoc(First.getLocation());
+ SourceLocation FirstEnd = FirstLoc.getLocWithOffset(First.getLength());
+ return FirstEnd == SM.getSpellingLoc(Second.getLocation());
+}
+
+// Suggest fixit for "<::" after a cast.
+static void FixDigraph(Parser &P, Preprocessor &PP, Token &DigraphToken,
+ Token &ColonToken, tok::TokenKind Kind, bool AtDigraph) {
+ // Pull '<:' and ':' off token stream.
+ if (!AtDigraph)
+ PP.Lex(DigraphToken);
+ PP.Lex(ColonToken);
+
+ SourceRange Range;
+ Range.setBegin(DigraphToken.getLocation());
+ Range.setEnd(ColonToken.getLocation());
+ P.Diag(DigraphToken.getLocation(), diag::err_missing_whitespace_digraph)
+ << SelectDigraphErrorMessage(Kind)
+ << FixItHint::CreateReplacement(Range, "< ::");
+
+ // Update token information to reflect their change in token type.
+ ColonToken.setKind(tok::coloncolon);
+ ColonToken.setLocation(ColonToken.getLocation().getLocWithOffset(-1));
+ ColonToken.setLength(2);
+ DigraphToken.setKind(tok::less);
+ DigraphToken.setLength(1);
+
+ // Push new tokens back to token stream.
+ PP.EnterToken(ColonToken);
+ if (!AtDigraph)
+ PP.EnterToken(DigraphToken);
+}
+
+// Check for '<::' which should be '< ::' instead of '[:' when following
+// a template name.
+void Parser::CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectType,
+ bool EnteringContext,
+ IdentifierInfo &II, CXXScopeSpec &SS) {
+ if (!Next.is(tok::l_square) || Next.getLength() != 2)
+ return;
+
+ Token SecondToken = GetLookAheadToken(2);
+ if (!SecondToken.is(tok::colon) || !areTokensAdjacent(Next, SecondToken))
+ return;
+
+ TemplateTy Template;
+ UnqualifiedId TemplateName;
+ TemplateName.setIdentifier(&II, Tok.getLocation());
+ bool MemberOfUnknownSpecialization;
+ if (!Actions.isTemplateName(getCurScope(), SS, /*hasTemplateKeyword=*/false,
+ TemplateName, ObjectType, EnteringContext,
+ Template, MemberOfUnknownSpecialization))
+ return;
+
+ FixDigraph(*this, PP, Next, SecondToken, tok::unknown,
+ /*AtDigraph*/false);
+}
+
+/// \brief Emits an error for a left parentheses after a double colon.
+///
+/// When a '(' is found after a '::', emit an error. Attempt to fix the token
+/// stream by removing the '(', and the matching ')' if found.
+void Parser::CheckForLParenAfterColonColon() {
+ if (!Tok.is(tok::l_paren))
+ return;
+
+ Token LParen = Tok;
+ Token NextTok = GetLookAheadToken(1);
+ Token StarTok = NextTok;
+ // Check for (identifier or (*identifier
+ Token IdentifierTok = StarTok.is(tok::star) ? GetLookAheadToken(2) : StarTok;
+ if (IdentifierTok.isNot(tok::identifier))
+ return;
+ // Eat the '('.
+ ConsumeParen();
+ Token RParen;
+ RParen.setLocation(SourceLocation());
+ // Do we have a ')' ?
+ NextTok = StarTok.is(tok::star) ? GetLookAheadToken(2) : GetLookAheadToken(1);
+ if (NextTok.is(tok::r_paren)) {
+ RParen = NextTok;
+ // Eat the '*' if it is present.
+ if (StarTok.is(tok::star))
+ ConsumeToken();
+ // Eat the identifier.
+ ConsumeToken();
+ // Add the identifier token back.
+ PP.EnterToken(IdentifierTok);
+ // Add the '*' back if it was present.
+ if (StarTok.is(tok::star))
+ PP.EnterToken(StarTok);
+ // Eat the ')'.
+ ConsumeParen();
+ }
+
+ Diag(LParen.getLocation(), diag::err_paren_after_colon_colon)
+ << FixItHint::CreateRemoval(LParen.getLocation())
+ << FixItHint::CreateRemoval(RParen.getLocation());
+}
+
+/// \brief Parse global scope or nested-name-specifier if present.
+///
+/// Parses a C++ global scope specifier ('::') or nested-name-specifier (which
+/// may be preceded by '::'). Note that this routine will not parse ::new or
+/// ::delete; it will just leave them in the token stream.
+///
+/// '::'[opt] nested-name-specifier
+/// '::'
+///
+/// nested-name-specifier:
+/// type-name '::'
+/// namespace-name '::'
+/// nested-name-specifier identifier '::'
+/// nested-name-specifier 'template'[opt] simple-template-id '::'
+///
+///
+/// \param SS the scope specifier that will be set to the parsed
+/// nested-name-specifier (or empty)
+///
+/// \param ObjectType if this nested-name-specifier is being parsed following
+/// the "." or "->" of a member access expression, this parameter provides the
+/// type of the object whose members are being accessed.
+///
+/// \param EnteringContext whether we will be entering into the context of
+/// the nested-name-specifier after parsing it.
+///
+/// \param MayBePseudoDestructor When non-NULL, points to a flag that
+/// indicates whether this nested-name-specifier may be part of a
+/// pseudo-destructor name. In this case, the flag will be set false
+/// if we don't actually end up parsing a destructor name. Moreorover,
+/// if we do end up determining that we are parsing a destructor name,
+/// the last component of the nested-name-specifier is not parsed as
+/// part of the scope specifier.
+///
+/// \param IsTypename If \c true, this nested-name-specifier is known to be
+/// part of a type name. This is used to improve error recovery.
+///
+/// \param LastII When non-NULL, points to an IdentifierInfo* that will be
+/// filled in with the leading identifier in the last component of the
+/// nested-name-specifier, if any.
+///
+/// \returns true if there was an error parsing a scope specifier
+bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
+ ParsedType ObjectType,
+ bool EnteringContext,
+ bool *MayBePseudoDestructor,
+ bool IsTypename,
+ IdentifierInfo **LastII) {
+ assert(getLangOpts().CPlusPlus &&
+ "Call sites of this function should be guarded by checking for C++");
+
+ if (Tok.is(tok::annot_cxxscope)) {
+ assert(!LastII && "want last identifier but have already annotated scope");
+ assert(!MayBePseudoDestructor && "unexpected annot_cxxscope");
+ Actions.RestoreNestedNameSpecifierAnnotation(Tok.getAnnotationValue(),
+ Tok.getAnnotationRange(),
+ SS);
+ ConsumeToken();
+ return false;
+ }
+
+ if (Tok.is(tok::annot_template_id)) {
+ // If the current token is an annotated template id, it may already have
+ // a scope specifier. Restore it.
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+ SS = TemplateId->SS;
+ }
+
+ // Has to happen before any "return false"s in this function.
+ bool CheckForDestructor = false;
+ if (MayBePseudoDestructor && *MayBePseudoDestructor) {
+ CheckForDestructor = true;
+ *MayBePseudoDestructor = false;
+ }
+
+ if (LastII)
+ *LastII = nullptr;
+
+ bool HasScopeSpecifier = false;
+
+ if (Tok.is(tok::coloncolon)) {
+ // ::new and ::delete aren't nested-name-specifiers.
+ tok::TokenKind NextKind = NextToken().getKind();
+ if (NextKind == tok::kw_new || NextKind == tok::kw_delete)
+ return false;
+
+ if (NextKind == tok::l_brace) {
+ // It is invalid to have :: {, consume the scope qualifier and pretend
+ // like we never saw it.
+ Diag(ConsumeToken(), diag::err_expected) << tok::identifier;
+ } else {
+ // '::' - Global scope qualifier.
+ if (Actions.ActOnCXXGlobalScopeSpecifier(ConsumeToken(), SS))
+ return true;
+
+ CheckForLParenAfterColonColon();
+
+ HasScopeSpecifier = true;
+ }
+ }
+
+ if (Tok.is(tok::kw___super)) {
+ SourceLocation SuperLoc = ConsumeToken();
+ if (!Tok.is(tok::coloncolon)) {
+ Diag(Tok.getLocation(), diag::err_expected_coloncolon_after_super);
+ return true;
+ }
+
+ return Actions.ActOnSuperScopeSpecifier(SuperLoc, ConsumeToken(), SS);
+ }
+
+ if (!HasScopeSpecifier &&
+ Tok.isOneOf(tok::kw_decltype, tok::annot_decltype)) {
+ DeclSpec DS(AttrFactory);
+ SourceLocation DeclLoc = Tok.getLocation();
+ SourceLocation EndLoc = ParseDecltypeSpecifier(DS);
+
+ SourceLocation CCLoc;
+ if (!TryConsumeToken(tok::coloncolon, CCLoc)) {
+ AnnotateExistingDecltypeSpecifier(DS, DeclLoc, EndLoc);
+ return false;
+ }
+
+ if (Actions.ActOnCXXNestedNameSpecifierDecltype(SS, DS, CCLoc))
+ SS.SetInvalid(SourceRange(DeclLoc, CCLoc));
+
+ HasScopeSpecifier = true;
+ }
+
+ while (true) {
+ if (HasScopeSpecifier) {
+ // C++ [basic.lookup.classref]p5:
+ // If the qualified-id has the form
+ //
+ // ::class-name-or-namespace-name::...
+ //
+ // the class-name-or-namespace-name is looked up in global scope as a
+ // class-name or namespace-name.
+ //
+ // To implement this, we clear out the object type as soon as we've
+ // seen a leading '::' or part of a nested-name-specifier.
+ ObjectType = ParsedType();
+
+ if (Tok.is(tok::code_completion)) {
+ // Code completion for a nested-name-specifier, where the code
+ // code completion token follows the '::'.
+ Actions.CodeCompleteQualifiedId(getCurScope(), SS, EnteringContext);
+ // Include code completion token into the range of the scope otherwise
+ // when we try to annotate the scope tokens the dangling code completion
+ // token will cause assertion in
+ // Preprocessor::AnnotatePreviousCachedTokens.
+ SS.setEndLoc(Tok.getLocation());
+ cutOffParsing();
+ return true;
+ }
+ }
+
+ // nested-name-specifier:
+ // nested-name-specifier 'template'[opt] simple-template-id '::'
+
+ // Parse the optional 'template' keyword, then make sure we have
+ // 'identifier <' after it.
+ if (Tok.is(tok::kw_template)) {
+ // If we don't have a scope specifier or an object type, this isn't a
+ // nested-name-specifier, since they aren't allowed to start with
+ // 'template'.
+ if (!HasScopeSpecifier && !ObjectType)
+ break;
+
+ TentativeParsingAction TPA(*this);
+ SourceLocation TemplateKWLoc = ConsumeToken();
+
+ UnqualifiedId TemplateName;
+ if (Tok.is(tok::identifier)) {
+ // Consume the identifier.
+ TemplateName.setIdentifier(Tok.getIdentifierInfo(), Tok.getLocation());
+ ConsumeToken();
+ } else if (Tok.is(tok::kw_operator)) {
+ // We don't need to actually parse the unqualified-id in this case,
+ // because a simple-template-id cannot start with 'operator', but
+ // go ahead and parse it anyway for consistency with the case where
+ // we already annotated the template-id.
+ if (ParseUnqualifiedIdOperator(SS, EnteringContext, ObjectType,
+ TemplateName)) {
+ TPA.Commit();
+ break;
+ }
+
+ if (TemplateName.getKind() != UnqualifiedId::IK_OperatorFunctionId &&
+ TemplateName.getKind() != UnqualifiedId::IK_LiteralOperatorId) {
+ Diag(TemplateName.getSourceRange().getBegin(),
+ diag::err_id_after_template_in_nested_name_spec)
+ << TemplateName.getSourceRange();
+ TPA.Commit();
+ break;
+ }
+ } else {
+ TPA.Revert();
+ break;
+ }
+
+ // If the next token is not '<', we have a qualified-id that refers
+ // to a template name, such as T::template apply, but is not a
+ // template-id.
+ if (Tok.isNot(tok::less)) {
+ TPA.Revert();
+ break;
+ }
+
+ // Commit to parsing the template-id.
+ TPA.Commit();
+ TemplateTy Template;
+ if (TemplateNameKind TNK
+ = Actions.ActOnDependentTemplateName(getCurScope(),
+ SS, TemplateKWLoc, TemplateName,
+ ObjectType, EnteringContext,
+ Template)) {
+ if (AnnotateTemplateIdToken(Template, TNK, SS, TemplateKWLoc,
+ TemplateName, false))
+ return true;
+ } else
+ return true;
+
+ continue;
+ }
+
+ if (Tok.is(tok::annot_template_id) && NextToken().is(tok::coloncolon)) {
+ // We have
+ //
+ // template-id '::'
+ //
+ // So we need to check whether the template-id is a simple-template-id of
+ // the right kind (it should name a type or be dependent), and then
+ // convert it into a type within the nested-name-specifier.
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+ if (CheckForDestructor && GetLookAheadToken(2).is(tok::tilde)) {
+ *MayBePseudoDestructor = true;
+ return false;
+ }
+
+ if (LastII)
+ *LastII = TemplateId->Name;
+
+ // Consume the template-id token.
+ ConsumeToken();
+
+ assert(Tok.is(tok::coloncolon) && "NextToken() not working properly!");
+ SourceLocation CCLoc = ConsumeToken();
+
+ HasScopeSpecifier = true;
+
+ ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
+ TemplateId->NumArgs);
+
+ if (Actions.ActOnCXXNestedNameSpecifier(getCurScope(),
+ SS,
+ TemplateId->TemplateKWLoc,
+ TemplateId->Template,
+ TemplateId->TemplateNameLoc,
+ TemplateId->LAngleLoc,
+ TemplateArgsPtr,
+ TemplateId->RAngleLoc,
+ CCLoc,
+ EnteringContext)) {
+ SourceLocation StartLoc
+ = SS.getBeginLoc().isValid()? SS.getBeginLoc()
+ : TemplateId->TemplateNameLoc;
+ SS.SetInvalid(SourceRange(StartLoc, CCLoc));
+ }
+
+ continue;
+ }
+
+ // The rest of the nested-name-specifier possibilities start with
+ // tok::identifier.
+ if (Tok.isNot(tok::identifier))
+ break;
+
+ IdentifierInfo &II = *Tok.getIdentifierInfo();
+
+ // nested-name-specifier:
+ // type-name '::'
+ // namespace-name '::'
+ // nested-name-specifier identifier '::'
+ Token Next = NextToken();
+
+ // If we get foo:bar, this is almost certainly a typo for foo::bar. Recover
+ // and emit a fixit hint for it.
+ if (Next.is(tok::colon) && !ColonIsSacred) {
+ if (Actions.IsInvalidUnlessNestedName(getCurScope(), SS, II,
+ Tok.getLocation(),
+ Next.getLocation(), ObjectType,
+ EnteringContext) &&
+ // If the token after the colon isn't an identifier, it's still an
+ // error, but they probably meant something else strange so don't
+ // recover like this.
+ PP.LookAhead(1).is(tok::identifier)) {
+ Diag(Next, diag::err_unexpected_colon_in_nested_name_spec)
+ << FixItHint::CreateReplacement(Next.getLocation(), "::");
+ // Recover as if the user wrote '::'.
+ Next.setKind(tok::coloncolon);
+ }
+ }
+
+ if (Next.is(tok::coloncolon) && GetLookAheadToken(2).is(tok::l_brace)) {
+ // It is invalid to have :: {, consume the scope qualifier and pretend
+ // like we never saw it.
+ Token Identifier = Tok; // Stash away the identifier.
+ ConsumeToken(); // Eat the identifier, current token is now '::'.
+ Diag(PP.getLocForEndOfToken(ConsumeToken()), diag::err_expected)
+ << tok::identifier;
+ UnconsumeToken(Identifier); // Stick the identifier back.
+ Next = NextToken(); // Point Next at the '{' token.
+ }
+
+ if (Next.is(tok::coloncolon)) {
+ if (CheckForDestructor && GetLookAheadToken(2).is(tok::tilde) &&
+ !Actions.isNonTypeNestedNameSpecifier(
+ getCurScope(), SS, Tok.getLocation(), II, ObjectType)) {
+ *MayBePseudoDestructor = true;
+ return false;
+ }
+
+ if (ColonIsSacred) {
+ const Token &Next2 = GetLookAheadToken(2);
+ if (Next2.is(tok::kw_private) || Next2.is(tok::kw_protected) ||
+ Next2.is(tok::kw_public) || Next2.is(tok::kw_virtual)) {
+ Diag(Next2, diag::err_unexpected_token_in_nested_name_spec)
+ << Next2.getName()
+ << FixItHint::CreateReplacement(Next.getLocation(), ":");
+ Token ColonColon;
+ PP.Lex(ColonColon);
+ ColonColon.setKind(tok::colon);
+ PP.EnterToken(ColonColon);
+ break;
+ }
+ }
+
+ if (LastII)
+ *LastII = &II;
+
+ // We have an identifier followed by a '::'. Lookup this name
+ // as the name in a nested-name-specifier.
+ Token Identifier = Tok;
+ SourceLocation IdLoc = ConsumeToken();
+ assert(Tok.isOneOf(tok::coloncolon, tok::colon) &&
+ "NextToken() not working properly!");
+ Token ColonColon = Tok;
+ SourceLocation CCLoc = ConsumeToken();
+
+ CheckForLParenAfterColonColon();
+
+ bool IsCorrectedToColon = false;
+ bool *CorrectionFlagPtr = ColonIsSacred ? &IsCorrectedToColon : nullptr;
+ if (Actions.ActOnCXXNestedNameSpecifier(getCurScope(), II, IdLoc, CCLoc,
+ ObjectType, EnteringContext, SS,
+ false, CorrectionFlagPtr)) {
+ // Identifier is not recognized as a nested name, but we can have
+ // mistyped '::' instead of ':'.
+ if (CorrectionFlagPtr && IsCorrectedToColon) {
+ ColonColon.setKind(tok::colon);
+ PP.EnterToken(Tok);
+ PP.EnterToken(ColonColon);
+ Tok = Identifier;
+ break;
+ }
+ SS.SetInvalid(SourceRange(IdLoc, CCLoc));
+ }
+ HasScopeSpecifier = true;
+ continue;
+ }
+
+ CheckForTemplateAndDigraph(Next, ObjectType, EnteringContext, II, SS);
+
+ // nested-name-specifier:
+ // type-name '<'
+ if (Next.is(tok::less)) {
+ TemplateTy Template;
+ UnqualifiedId TemplateName;
+ TemplateName.setIdentifier(&II, Tok.getLocation());
+ bool MemberOfUnknownSpecialization;
+ if (TemplateNameKind TNK = Actions.isTemplateName(getCurScope(), SS,
+ /*hasTemplateKeyword=*/false,
+ TemplateName,
+ ObjectType,
+ EnteringContext,
+ Template,
+ MemberOfUnknownSpecialization)) {
+ // We have found a template name, so annotate this token
+ // with a template-id annotation. We do not permit the
+ // template-id to be translated into a type annotation,
+ // because some clients (e.g., the parsing of class template
+ // specializations) still want to see the original template-id
+ // token.
+ ConsumeToken();
+ if (AnnotateTemplateIdToken(Template, TNK, SS, SourceLocation(),
+ TemplateName, false))
+ return true;
+ continue;
+ }
+
+ if (MemberOfUnknownSpecialization && (ObjectType || SS.isSet()) &&
+ (IsTypename || IsTemplateArgumentList(1))) {
+ // We have something like t::getAs<T>, where getAs is a
+ // member of an unknown specialization. However, this will only
+ // parse correctly as a template, so suggest the keyword 'template'
+ // before 'getAs' and treat this as a dependent template name.
+ unsigned DiagID = diag::err_missing_dependent_template_keyword;
+ if (getLangOpts().MicrosoftExt)
+ DiagID = diag::warn_missing_dependent_template_keyword;
+
+ Diag(Tok.getLocation(), DiagID)
+ << II.getName()
+ << FixItHint::CreateInsertion(Tok.getLocation(), "template ");
+
+ if (TemplateNameKind TNK
+ = Actions.ActOnDependentTemplateName(getCurScope(),
+ SS, SourceLocation(),
+ TemplateName, ObjectType,
+ EnteringContext, Template)) {
+ // Consume the identifier.
+ ConsumeToken();
+ if (AnnotateTemplateIdToken(Template, TNK, SS, SourceLocation(),
+ TemplateName, false))
+ return true;
+ }
+ else
+ return true;
+
+ continue;
+ }
+ }
+
+ // We don't have any tokens that form the beginning of a
+ // nested-name-specifier, so we're done.
+ break;
+ }
+
+ // Even if we didn't see any pieces of a nested-name-specifier, we
+ // still check whether there is a tilde in this position, which
+ // indicates a potential pseudo-destructor.
+ if (CheckForDestructor && Tok.is(tok::tilde))
+ *MayBePseudoDestructor = true;
+
+ return false;
+}
+
+ExprResult Parser::tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
+ Token &Replacement) {
+ SourceLocation TemplateKWLoc;
+ UnqualifiedId Name;
+ if (ParseUnqualifiedId(SS,
+ /*EnteringContext=*/false,
+ /*AllowDestructorName=*/false,
+ /*AllowConstructorName=*/false,
+ /*ObjectType=*/ParsedType(), TemplateKWLoc, Name))
+ return ExprError();
+
+ // This is only the direct operand of an & operator if it is not
+ // followed by a postfix-expression suffix.
+ if (isAddressOfOperand && isPostfixExpressionSuffixStart())
+ isAddressOfOperand = false;
+
+ return Actions.ActOnIdExpression(getCurScope(), SS, TemplateKWLoc, Name,
+ Tok.is(tok::l_paren), isAddressOfOperand,
+ nullptr, /*IsInlineAsmIdentifier=*/false,
+ &Replacement);
+}
+
+/// ParseCXXIdExpression - Handle id-expression.
+///
+/// id-expression:
+/// unqualified-id
+/// qualified-id
+///
+/// qualified-id:
+/// '::'[opt] nested-name-specifier 'template'[opt] unqualified-id
+/// '::' identifier
+/// '::' operator-function-id
+/// '::' template-id
+///
+/// NOTE: The standard specifies that, for qualified-id, the parser does not
+/// expect:
+///
+/// '::' conversion-function-id
+/// '::' '~' class-name
+///
+/// This may cause a slight inconsistency on diagnostics:
+///
+/// class C {};
+/// namespace A {}
+/// void f() {
+/// :: A :: ~ C(); // Some Sema error about using destructor with a
+/// // namespace.
+/// :: ~ C(); // Some Parser error like 'unexpected ~'.
+/// }
+///
+/// We simplify the parser a bit and make it work like:
+///
+/// qualified-id:
+/// '::'[opt] nested-name-specifier 'template'[opt] unqualified-id
+/// '::' unqualified-id
+///
+/// That way Sema can handle and report similar errors for namespaces and the
+/// global scope.
+///
+/// The isAddressOfOperand parameter indicates that this id-expression is a
+/// direct operand of the address-of operator. This is, besides member contexts,
+/// the only place where a qualified-id naming a non-static class member may
+/// appear.
+///
+ExprResult Parser::ParseCXXIdExpression(bool isAddressOfOperand) {
+ // qualified-id:
+ // '::'[opt] nested-name-specifier 'template'[opt] unqualified-id
+ // '::' unqualified-id
+ //
+ CXXScopeSpec SS;
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(), /*EnteringContext=*/false);
+
+ Token Replacement;
+ ExprResult Result =
+ tryParseCXXIdExpression(SS, isAddressOfOperand, Replacement);
+ if (Result.isUnset()) {
+ // If the ExprResult is valid but null, then typo correction suggested a
+ // keyword replacement that needs to be reparsed.
+ UnconsumeToken(Replacement);
+ Result = tryParseCXXIdExpression(SS, isAddressOfOperand, Replacement);
+ }
+ assert(!Result.isUnset() && "Typo correction suggested a keyword replacement "
+ "for a previous keyword suggestion");
+ return Result;
+}
+
+/// ParseLambdaExpression - Parse a C++11 lambda expression.
+///
+/// lambda-expression:
+/// lambda-introducer lambda-declarator[opt] compound-statement
+///
+/// lambda-introducer:
+/// '[' lambda-capture[opt] ']'
+///
+/// lambda-capture:
+/// capture-default
+/// capture-list
+/// capture-default ',' capture-list
+///
+/// capture-default:
+/// '&'
+/// '='
+///
+/// capture-list:
+/// capture
+/// capture-list ',' capture
+///
+/// capture:
+/// simple-capture
+/// init-capture [C++1y]
+///
+/// simple-capture:
+/// identifier
+/// '&' identifier
+/// 'this'
+///
+/// init-capture: [C++1y]
+/// identifier initializer
+/// '&' identifier initializer
+///
+/// lambda-declarator:
+/// '(' parameter-declaration-clause ')' attribute-specifier[opt]
+/// 'mutable'[opt] exception-specification[opt]
+/// trailing-return-type[opt]
+///
+ExprResult Parser::ParseLambdaExpression() {
+ // Parse lambda-introducer.
+ LambdaIntroducer Intro;
+ Optional<unsigned> DiagID = ParseLambdaIntroducer(Intro);
+ if (DiagID) {
+ Diag(Tok, DiagID.getValue());
+ SkipUntil(tok::r_square, StopAtSemi);
+ SkipUntil(tok::l_brace, StopAtSemi);
+ SkipUntil(tok::r_brace, StopAtSemi);
+ return ExprError();
+ }
+
+ return ParseLambdaExpressionAfterIntroducer(Intro);
+}
+
+/// TryParseLambdaExpression - Use lookahead and potentially tentative
+/// parsing to determine if we are looking at a C++0x lambda expression, and parse
+/// it if we are.
+///
+/// If we are not looking at a lambda expression, returns ExprError().
+ExprResult Parser::TryParseLambdaExpression() {
+ assert(getLangOpts().CPlusPlus11
+ && Tok.is(tok::l_square)
+ && "Not at the start of a possible lambda expression.");
+
+ const Token Next = NextToken(), After = GetLookAheadToken(2);
+
+ // If lookahead indicates this is a lambda...
+ if (Next.is(tok::r_square) || // []
+ Next.is(tok::equal) || // [=
+ (Next.is(tok::amp) && // [&] or [&,
+ (After.is(tok::r_square) ||
+ After.is(tok::comma))) ||
+ (Next.is(tok::identifier) && // [identifier]
+ After.is(tok::r_square))) {
+ return ParseLambdaExpression();
+ }
+
+ // If lookahead indicates an ObjC message send...
+ // [identifier identifier
+ if (Next.is(tok::identifier) && After.is(tok::identifier)) {
+ return ExprEmpty();
+ }
+
+ // Here, we're stuck: lambda introducers and Objective-C message sends are
+ // unambiguous, but it requires arbitrary lookhead. [a,b,c,d,e,f,g] is a
+ // lambda, and [a,b,c,d,e,f,g h] is a Objective-C message send. Instead of
+ // writing two routines to parse a lambda introducer, just try to parse
+ // a lambda introducer first, and fall back if that fails.
+ // (TryParseLambdaIntroducer never produces any diagnostic output.)
+ LambdaIntroducer Intro;
+ if (TryParseLambdaIntroducer(Intro))
+ return ExprEmpty();
+
+ return ParseLambdaExpressionAfterIntroducer(Intro);
+}
+
+/// \brief Parse a lambda introducer.
+/// \param Intro A LambdaIntroducer filled in with information about the
+/// contents of the lambda-introducer.
+/// \param SkippedInits If non-null, we are disambiguating between an Obj-C
+/// message send and a lambda expression. In this mode, we will
+/// sometimes skip the initializers for init-captures and not fully
+/// populate \p Intro. This flag will be set to \c true if we do so.
+/// \return A DiagnosticID if it hit something unexpected. The location for
+/// for the diagnostic is that of the current token.
+Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
+ bool *SkippedInits) {
+ typedef Optional<unsigned> DiagResult;
+
+ assert(Tok.is(tok::l_square) && "Lambda expressions begin with '['.");
+ BalancedDelimiterTracker T(*this, tok::l_square);
+ T.consumeOpen();
+
+ Intro.Range.setBegin(T.getOpenLocation());
+
+ bool first = true;
+
+ // Parse capture-default.
+ if (Tok.is(tok::amp) &&
+ (NextToken().is(tok::comma) || NextToken().is(tok::r_square))) {
+ Intro.Default = LCD_ByRef;
+ Intro.DefaultLoc = ConsumeToken();
+ first = false;
+ } else if (Tok.is(tok::equal)) {
+ Intro.Default = LCD_ByCopy;
+ Intro.DefaultLoc = ConsumeToken();
+ first = false;
+ }
+
+ while (Tok.isNot(tok::r_square)) {
+ if (!first) {
+ if (Tok.isNot(tok::comma)) {
+ // Provide a completion for a lambda introducer here. Except
+ // in Objective-C, where this is Almost Surely meant to be a message
+ // send. In that case, fail here and let the ObjC message
+ // expression parser perform the completion.
+ if (Tok.is(tok::code_completion) &&
+ !(getLangOpts().ObjC1 && Intro.Default == LCD_None &&
+ !Intro.Captures.empty())) {
+ Actions.CodeCompleteLambdaIntroducer(getCurScope(), Intro,
+ /*AfterAmpersand=*/false);
+ cutOffParsing();
+ break;
+ }
+
+ return DiagResult(diag::err_expected_comma_or_rsquare);
+ }
+ ConsumeToken();
+ }
+
+ if (Tok.is(tok::code_completion)) {
+ // If we're in Objective-C++ and we have a bare '[', then this is more
+ // likely to be a message receiver.
+ if (getLangOpts().ObjC1 && first)
+ Actions.CodeCompleteObjCMessageReceiver(getCurScope());
+ else
+ Actions.CodeCompleteLambdaIntroducer(getCurScope(), Intro,
+ /*AfterAmpersand=*/false);
+ cutOffParsing();
+ break;
+ }
+
+ first = false;
+
+ // Parse capture.
+ LambdaCaptureKind Kind = LCK_ByCopy;
+ LambdaCaptureInitKind InitKind = LambdaCaptureInitKind::NoInit;
+ SourceLocation Loc;
+ IdentifierInfo *Id = nullptr;
+ SourceLocation EllipsisLoc;
+ ExprResult Init;
+
+ if (Tok.is(tok::kw_this)) {
+ Kind = LCK_This;
+ Loc = ConsumeToken();
+ } else {
+ if (Tok.is(tok::amp)) {
+ Kind = LCK_ByRef;
+ ConsumeToken();
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteLambdaIntroducer(getCurScope(), Intro,
+ /*AfterAmpersand=*/true);
+ cutOffParsing();
+ break;
+ }
+ }
+
+ if (Tok.is(tok::identifier)) {
+ Id = Tok.getIdentifierInfo();
+ Loc = ConsumeToken();
+ } else if (Tok.is(tok::kw_this)) {
+ // FIXME: If we want to suggest a fixit here, will need to return more
+ // than just DiagnosticID. Perhaps full DiagnosticBuilder that can be
+ // Clear()ed to prevent emission in case of tentative parsing?
+ return DiagResult(diag::err_this_captured_by_reference);
+ } else {
+ return DiagResult(diag::err_expected_capture);
+ }
+
+ if (Tok.is(tok::l_paren)) {
+ BalancedDelimiterTracker Parens(*this, tok::l_paren);
+ Parens.consumeOpen();
+
+ InitKind = LambdaCaptureInitKind::DirectInit;
+
+ ExprVector Exprs;
+ CommaLocsTy Commas;
+ if (SkippedInits) {
+ Parens.skipToEnd();
+ *SkippedInits = true;
+ } else if (ParseExpressionList(Exprs, Commas)) {
+ Parens.skipToEnd();
+ Init = ExprError();
+ } else {
+ Parens.consumeClose();
+ Init = Actions.ActOnParenListExpr(Parens.getOpenLocation(),
+ Parens.getCloseLocation(),
+ Exprs);
+ }
+ } else if (Tok.isOneOf(tok::l_brace, tok::equal)) {
+ // Each lambda init-capture forms its own full expression, which clears
+ // Actions.MaybeODRUseExprs. So create an expression evaluation context
+ // to save the necessary state, and restore it later.
+ EnterExpressionEvaluationContext EC(Actions,
+ Sema::PotentiallyEvaluated);
+
+ if (TryConsumeToken(tok::equal))
+ InitKind = LambdaCaptureInitKind::CopyInit;
+ else
+ InitKind = LambdaCaptureInitKind::ListInit;
+
+ if (!SkippedInits) {
+ Init = ParseInitializer();
+ } else if (Tok.is(tok::l_brace)) {
+ BalancedDelimiterTracker Braces(*this, tok::l_brace);
+ Braces.consumeOpen();
+ Braces.skipToEnd();
+ *SkippedInits = true;
+ } else {
+ // We're disambiguating this:
+ //
+ // [..., x = expr
+ //
+ // We need to find the end of the following expression in order to
+ // determine whether this is an Obj-C message send's receiver, a
+ // C99 designator, or a lambda init-capture.
+ //
+ // Parse the expression to find where it ends, and annotate it back
+ // onto the tokens. We would have parsed this expression the same way
+ // in either case: both the RHS of an init-capture and the RHS of an
+ // assignment expression are parsed as an initializer-clause, and in
+ // neither case can anything be added to the scope between the '[' and
+ // here.
+ //
+ // FIXME: This is horrible. Adding a mechanism to skip an expression
+ // would be much cleaner.
+ // FIXME: If there is a ',' before the next ']' or ':', we can skip to
+ // that instead. (And if we see a ':' with no matching '?', we can
+ // classify this as an Obj-C message send.)
+ SourceLocation StartLoc = Tok.getLocation();
+ InMessageExpressionRAIIObject MaybeInMessageExpression(*this, true);
+ Init = ParseInitializer();
+
+ if (Tok.getLocation() != StartLoc) {
+ // Back out the lexing of the token after the initializer.
+ PP.RevertCachedTokens(1);
+
+ // Replace the consumed tokens with an appropriate annotation.
+ Tok.setLocation(StartLoc);
+ Tok.setKind(tok::annot_primary_expr);
+ setExprAnnotation(Tok, Init);
+ Tok.setAnnotationEndLoc(PP.getLastCachedTokenLocation());
+ PP.AnnotateCachedTokens(Tok);
+
+ // Consume the annotated initializer.
+ ConsumeToken();
+ }
+ }
+ } else
+ TryConsumeToken(tok::ellipsis, EllipsisLoc);
+ }
+ // If this is an init capture, process the initialization expression
+ // right away. For lambda init-captures such as the following:
+ // const int x = 10;
+ // auto L = [i = x+1](int a) {
+ // return [j = x+2,
+ // &k = x](char b) { };
+ // };
+ // keep in mind that each lambda init-capture has to have:
+ // - its initialization expression executed in the context
+ // of the enclosing/parent decl-context.
+ // - but the variable itself has to be 'injected' into the
+ // decl-context of its lambda's call-operator (which has
+ // not yet been created).
+ // Each init-expression is a full-expression that has to get
+ // Sema-analyzed (for capturing etc.) before its lambda's
+ // call-operator's decl-context, scope & scopeinfo are pushed on their
+ // respective stacks. Thus if any variable is odr-used in the init-capture
+ // it will correctly get captured in the enclosing lambda, if one exists.
+ // The init-variables above are created later once the lambdascope and
+ // call-operators decl-context is pushed onto its respective stack.
+
+ // Since the lambda init-capture's initializer expression occurs in the
+ // context of the enclosing function or lambda, therefore we can not wait
+ // till a lambda scope has been pushed on before deciding whether the
+ // variable needs to be captured. We also need to process all
+ // lvalue-to-rvalue conversions and discarded-value conversions,
+ // so that we can avoid capturing certain constant variables.
+ // For e.g.,
+ // void test() {
+ // const int x = 10;
+ // auto L = [&z = x](char a) { <-- don't capture by the current lambda
+ // return [y = x](int i) { <-- don't capture by enclosing lambda
+ // return y;
+ // }
+ // };
+ // If x was not const, the second use would require 'L' to capture, and
+ // that would be an error.
+
+ ParsedType InitCaptureType;
+ if (Init.isUsable()) {
+ // Get the pointer and store it in an lvalue, so we can use it as an
+ // out argument.
+ Expr *InitExpr = Init.get();
+ // This performs any lvalue-to-rvalue conversions if necessary, which
+ // can affect what gets captured in the containing decl-context.
+ InitCaptureType = Actions.actOnLambdaInitCaptureInitialization(
+ Loc, Kind == LCK_ByRef, Id, InitKind, InitExpr);
+ Init = InitExpr;
+ }
+ Intro.addCapture(Kind, Loc, Id, EllipsisLoc, InitKind, Init,
+ InitCaptureType);
+ }
+
+ T.consumeClose();
+ Intro.Range.setEnd(T.getCloseLocation());
+ return DiagResult();
+}
+
+/// TryParseLambdaIntroducer - Tentatively parse a lambda introducer.
+///
+/// Returns true if it hit something unexpected.
+bool Parser::TryParseLambdaIntroducer(LambdaIntroducer &Intro) {
+ TentativeParsingAction PA(*this);
+
+ bool SkippedInits = false;
+ Optional<unsigned> DiagID(ParseLambdaIntroducer(Intro, &SkippedInits));
+
+ if (DiagID) {
+ PA.Revert();
+ return true;
+ }
+
+ if (SkippedInits) {
+ // Parse it again, but this time parse the init-captures too.
+ PA.Revert();
+ Intro = LambdaIntroducer();
+ DiagID = ParseLambdaIntroducer(Intro);
+ assert(!DiagID && "parsing lambda-introducer failed on reparse");
+ return false;
+ }
+
+ PA.Commit();
+ return false;
+}
+
+/// ParseLambdaExpressionAfterIntroducer - Parse the rest of a lambda
+/// expression.
+ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
+ LambdaIntroducer &Intro) {
+ SourceLocation LambdaBeginLoc = Intro.Range.getBegin();
+ Diag(LambdaBeginLoc, diag::warn_cxx98_compat_lambda);
+
+ PrettyStackTraceLoc CrashInfo(PP.getSourceManager(), LambdaBeginLoc,
+ "lambda expression parsing");
+
+
+
+ // FIXME: Call into Actions to add any init-capture declarations to the
+ // scope while parsing the lambda-declarator and compound-statement.
+
+ // Parse lambda-declarator[opt].
+ DeclSpec DS(AttrFactory);
+ Declarator D(DS, Declarator::LambdaExprContext);
+ TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
+ Actions.PushLambdaScope();
+
+ TypeResult TrailingReturnType;
+ if (Tok.is(tok::l_paren)) {
+ ParseScope PrototypeScope(this,
+ Scope::FunctionPrototypeScope |
+ Scope::FunctionDeclarationScope |
+ Scope::DeclScope);
+
+ SourceLocation DeclEndLoc;
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+ SourceLocation LParenLoc = T.getOpenLocation();
+
+ // Parse parameter-declaration-clause.
+ ParsedAttributes Attr(AttrFactory);
+ SmallVector<DeclaratorChunk::ParamInfo, 16> ParamInfo;
+ SourceLocation EllipsisLoc;
+
+ if (Tok.isNot(tok::r_paren)) {
+ Actions.RecordParsingTemplateParameterDepth(TemplateParameterDepth);
+ ParseParameterDeclarationClause(D, Attr, ParamInfo, EllipsisLoc);
+ // For a generic lambda, each 'auto' within the parameter declaration
+ // clause creates a template type parameter, so increment the depth.
+ if (Actions.getCurGenericLambda())
+ ++CurTemplateDepthTracker;
+ }
+ T.consumeClose();
+ SourceLocation RParenLoc = T.getCloseLocation();
+ DeclEndLoc = RParenLoc;
+
+ // GNU-style attributes must be parsed before the mutable specifier to be
+ // compatible with GCC.
+ MaybeParseGNUAttributes(Attr, &DeclEndLoc);
+
+ // MSVC-style attributes must be parsed before the mutable specifier to be
+ // compatible with MSVC.
+ MaybeParseMicrosoftDeclSpecs(Attr, &DeclEndLoc);
+
+ // Parse 'mutable'[opt].
+ SourceLocation MutableLoc;
+ if (TryConsumeToken(tok::kw_mutable, MutableLoc))
+ DeclEndLoc = MutableLoc;
+
+ // Parse exception-specification[opt].
+ ExceptionSpecificationType ESpecType = EST_None;
+ SourceRange ESpecRange;
+ SmallVector<ParsedType, 2> DynamicExceptions;
+ SmallVector<SourceRange, 2> DynamicExceptionRanges;
+ ExprResult NoexceptExpr;
+ CachedTokens *ExceptionSpecTokens;
+ ESpecType = tryParseExceptionSpecification(/*Delayed=*/false,
+ ESpecRange,
+ DynamicExceptions,
+ DynamicExceptionRanges,
+ NoexceptExpr,
+ ExceptionSpecTokens);
+
+ if (ESpecType != EST_None)
+ DeclEndLoc = ESpecRange.getEnd();
+
+ // Parse attribute-specifier[opt].
+ MaybeParseCXX11Attributes(Attr, &DeclEndLoc);
+
+ SourceLocation FunLocalRangeEnd = DeclEndLoc;
+
+ // Parse trailing-return-type[opt].
+ if (Tok.is(tok::arrow)) {
+ FunLocalRangeEnd = Tok.getLocation();
+ SourceRange Range;
+ TrailingReturnType = ParseTrailingReturnType(Range);
+ if (Range.getEnd().isValid())
+ DeclEndLoc = Range.getEnd();
+ }
+
+ PrototypeScope.Exit();
+
+ SourceLocation NoLoc;
+ D.AddTypeInfo(DeclaratorChunk::getFunction(/*hasProto=*/true,
+ /*isAmbiguous=*/false,
+ LParenLoc,
+ ParamInfo.data(), ParamInfo.size(),
+ EllipsisLoc, RParenLoc,
+ DS.getTypeQualifiers(),
+ /*RefQualifierIsLValueRef=*/true,
+ /*RefQualifierLoc=*/NoLoc,
+ /*ConstQualifierLoc=*/NoLoc,
+ /*VolatileQualifierLoc=*/NoLoc,
+ /*RestrictQualifierLoc=*/NoLoc,
+ MutableLoc,
+ ESpecType, ESpecRange,
+ DynamicExceptions.data(),
+ DynamicExceptionRanges.data(),
+ DynamicExceptions.size(),
+ NoexceptExpr.isUsable() ?
+ NoexceptExpr.get() : nullptr,
+ /*ExceptionSpecTokens*/nullptr,
+ LParenLoc, FunLocalRangeEnd, D,
+ TrailingReturnType),
+ Attr, DeclEndLoc);
+ } else if (Tok.isOneOf(tok::kw_mutable, tok::arrow, tok::kw___attribute) ||
+ (Tok.is(tok::l_square) && NextToken().is(tok::l_square))) {
+ // It's common to forget that one needs '()' before 'mutable', an attribute
+ // specifier, or the result type. Deal with this.
+ unsigned TokKind = 0;
+ switch (Tok.getKind()) {
+ case tok::kw_mutable: TokKind = 0; break;
+ case tok::arrow: TokKind = 1; break;
+ case tok::kw___attribute:
+ case tok::l_square: TokKind = 2; break;
+ default: llvm_unreachable("Unknown token kind");
+ }
+
+ Diag(Tok, diag::err_lambda_missing_parens)
+ << TokKind
+ << FixItHint::CreateInsertion(Tok.getLocation(), "() ");
+ SourceLocation DeclLoc = Tok.getLocation();
+ SourceLocation DeclEndLoc = DeclLoc;
+
+ // GNU-style attributes must be parsed before the mutable specifier to be
+ // compatible with GCC.
+ ParsedAttributes Attr(AttrFactory);
+ MaybeParseGNUAttributes(Attr, &DeclEndLoc);
+
+ // Parse 'mutable', if it's there.
+ SourceLocation MutableLoc;
+ if (Tok.is(tok::kw_mutable)) {
+ MutableLoc = ConsumeToken();
+ DeclEndLoc = MutableLoc;
+ }
+
+ // Parse attribute-specifier[opt].
+ MaybeParseCXX11Attributes(Attr, &DeclEndLoc);
+
+ // Parse the return type, if there is one.
+ if (Tok.is(tok::arrow)) {
+ SourceRange Range;
+ TrailingReturnType = ParseTrailingReturnType(Range);
+ if (Range.getEnd().isValid())
+ DeclEndLoc = Range.getEnd();
+ }
+
+ SourceLocation NoLoc;
+ D.AddTypeInfo(DeclaratorChunk::getFunction(/*hasProto=*/true,
+ /*isAmbiguous=*/false,
+ /*LParenLoc=*/NoLoc,
+ /*Params=*/nullptr,
+ /*NumParams=*/0,
+ /*EllipsisLoc=*/NoLoc,
+ /*RParenLoc=*/NoLoc,
+ /*TypeQuals=*/0,
+ /*RefQualifierIsLValueRef=*/true,
+ /*RefQualifierLoc=*/NoLoc,
+ /*ConstQualifierLoc=*/NoLoc,
+ /*VolatileQualifierLoc=*/NoLoc,
+ /*RestrictQualifierLoc=*/NoLoc,
+ MutableLoc,
+ EST_None,
+ /*ESpecRange=*/SourceRange(),
+ /*Exceptions=*/nullptr,
+ /*ExceptionRanges=*/nullptr,
+ /*NumExceptions=*/0,
+ /*NoexceptExpr=*/nullptr,
+ /*ExceptionSpecTokens=*/nullptr,
+ DeclLoc, DeclEndLoc, D,
+ TrailingReturnType),
+ Attr, DeclEndLoc);
+ }
+
+
+ // FIXME: Rename BlockScope -> ClosureScope if we decide to continue using
+ // it.
+ unsigned ScopeFlags = Scope::BlockScope | Scope::FnScope | Scope::DeclScope;
+ ParseScope BodyScope(this, ScopeFlags);
+
+ Actions.ActOnStartOfLambdaDefinition(Intro, D, getCurScope());
+
+ // Parse compound-statement.
+ if (!Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::err_expected_lambda_body);
+ Actions.ActOnLambdaError(LambdaBeginLoc, getCurScope());
+ return ExprError();
+ }
+
+ StmtResult Stmt(ParseCompoundStatementBody());
+ BodyScope.Exit();
+
+ if (!Stmt.isInvalid() && !TrailingReturnType.isInvalid())
+ return Actions.ActOnLambdaExpr(LambdaBeginLoc, Stmt.get(), getCurScope());
+
+ Actions.ActOnLambdaError(LambdaBeginLoc, getCurScope());
+ return ExprError();
+}
+
+/// ParseCXXCasts - This handles the various ways to cast expressions to another
+/// type.
+///
+/// postfix-expression: [C++ 5.2p1]
+/// 'dynamic_cast' '<' type-name '>' '(' expression ')'
+/// 'static_cast' '<' type-name '>' '(' expression ')'
+/// 'reinterpret_cast' '<' type-name '>' '(' expression ')'
+/// 'const_cast' '<' type-name '>' '(' expression ')'
+///
+ExprResult Parser::ParseCXXCasts() {
+ tok::TokenKind Kind = Tok.getKind();
+ const char *CastName = nullptr; // For error messages
+
+ switch (Kind) {
+ default: llvm_unreachable("Unknown C++ cast!");
+ case tok::kw_const_cast: CastName = "const_cast"; break;
+ case tok::kw_dynamic_cast: CastName = "dynamic_cast"; break;
+ case tok::kw_reinterpret_cast: CastName = "reinterpret_cast"; break;
+ case tok::kw_static_cast: CastName = "static_cast"; break;
+ }
+
+ SourceLocation OpLoc = ConsumeToken();
+ SourceLocation LAngleBracketLoc = Tok.getLocation();
+
+ // Check for "<::" which is parsed as "[:". If found, fix token stream,
+ // diagnose error, suggest fix, and recover parsing.
+ if (Tok.is(tok::l_square) && Tok.getLength() == 2) {
+ Token Next = NextToken();
+ if (Next.is(tok::colon) && areTokensAdjacent(Tok, Next))
+ FixDigraph(*this, PP, Tok, Next, Kind, /*AtDigraph*/true);
+ }
+
+ if (ExpectAndConsume(tok::less, diag::err_expected_less_after, CastName))
+ return ExprError();
+
+ // Parse the common declaration-specifiers piece.
+ DeclSpec DS(AttrFactory);
+ ParseSpecifierQualifierList(DS);
+
+ // Parse the abstract-declarator, if present.
+ Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ ParseDeclarator(DeclaratorInfo);
+
+ SourceLocation RAngleBracketLoc = Tok.getLocation();
+
+ if (ExpectAndConsume(tok::greater))
+ return ExprError(Diag(LAngleBracketLoc, diag::note_matching) << tok::less);
+
+ SourceLocation LParenLoc, RParenLoc;
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+
+ if (T.expectAndConsume(diag::err_expected_lparen_after, CastName))
+ return ExprError();
+
+ ExprResult Result = ParseExpression();
+
+ // Match the ')'.
+ T.consumeClose();
+
+ if (!Result.isInvalid() && !DeclaratorInfo.isInvalidType())
+ Result = Actions.ActOnCXXNamedCast(OpLoc, Kind,
+ LAngleBracketLoc, DeclaratorInfo,
+ RAngleBracketLoc,
+ T.getOpenLocation(), Result.get(),
+ T.getCloseLocation());
+
+ return Result;
+}
+
+/// ParseCXXTypeid - This handles the C++ typeid expression.
+///
+/// postfix-expression: [C++ 5.2p1]
+/// 'typeid' '(' expression ')'
+/// 'typeid' '(' type-id ')'
+///
+ExprResult Parser::ParseCXXTypeid() {
+ assert(Tok.is(tok::kw_typeid) && "Not 'typeid'!");
+
+ SourceLocation OpLoc = ConsumeToken();
+ SourceLocation LParenLoc, RParenLoc;
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+
+ // typeid expressions are always parenthesized.
+ if (T.expectAndConsume(diag::err_expected_lparen_after, "typeid"))
+ return ExprError();
+ LParenLoc = T.getOpenLocation();
+
+ ExprResult Result;
+
+ // C++0x [expr.typeid]p3:
+ // When typeid is applied to an expression other than an lvalue of a
+ // polymorphic class type [...] The expression is an unevaluated
+ // operand (Clause 5).
+ //
+ // Note that we can't tell whether the expression is an lvalue of a
+ // polymorphic class type until after we've parsed the expression; we
+ // speculatively assume the subexpression is unevaluated, and fix it up
+ // later.
+ //
+ // We enter the unevaluated context before trying to determine whether we
+ // have a type-id, because the tentative parse logic will try to resolve
+ // names, and must treat them as unevaluated.
+ EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated,
+ Sema::ReuseLambdaContextDecl);
+
+ if (isTypeIdInParens()) {
+ TypeResult Ty = ParseTypeName();
+
+ // Match the ')'.
+ T.consumeClose();
+ RParenLoc = T.getCloseLocation();
+ if (Ty.isInvalid() || RParenLoc.isInvalid())
+ return ExprError();
+
+ Result = Actions.ActOnCXXTypeid(OpLoc, LParenLoc, /*isType=*/true,
+ Ty.get().getAsOpaquePtr(), RParenLoc);
+ } else {
+ Result = ParseExpression();
+
+ // Match the ')'.
+ if (Result.isInvalid())
+ SkipUntil(tok::r_paren, StopAtSemi);
+ else {
+ T.consumeClose();
+ RParenLoc = T.getCloseLocation();
+ if (RParenLoc.isInvalid())
+ return ExprError();
+
+ Result = Actions.ActOnCXXTypeid(OpLoc, LParenLoc, /*isType=*/false,
+ Result.get(), RParenLoc);
+ }
+ }
+
+ return Result;
+}
+
+/// ParseCXXUuidof - This handles the Microsoft C++ __uuidof expression.
+///
+/// '__uuidof' '(' expression ')'
+/// '__uuidof' '(' type-id ')'
+///
+ExprResult Parser::ParseCXXUuidof() {
+ assert(Tok.is(tok::kw___uuidof) && "Not '__uuidof'!");
+
+ SourceLocation OpLoc = ConsumeToken();
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+
+ // __uuidof expressions are always parenthesized.
+ if (T.expectAndConsume(diag::err_expected_lparen_after, "__uuidof"))
+ return ExprError();
+
+ ExprResult Result;
+
+ if (isTypeIdInParens()) {
+ TypeResult Ty = ParseTypeName();
+
+ // Match the ')'.
+ T.consumeClose();
+
+ if (Ty.isInvalid())
+ return ExprError();
+
+ Result = Actions.ActOnCXXUuidof(OpLoc, T.getOpenLocation(), /*isType=*/true,
+ Ty.get().getAsOpaquePtr(),
+ T.getCloseLocation());
+ } else {
+ EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated);
+ Result = ParseExpression();
+
+ // Match the ')'.
+ if (Result.isInvalid())
+ SkipUntil(tok::r_paren, StopAtSemi);
+ else {
+ T.consumeClose();
+
+ Result = Actions.ActOnCXXUuidof(OpLoc, T.getOpenLocation(),
+ /*isType=*/false,
+ Result.get(), T.getCloseLocation());
+ }
+ }
+
+ return Result;
+}
+
+/// \brief Parse a C++ pseudo-destructor expression after the base,
+/// . or -> operator, and nested-name-specifier have already been
+/// parsed.
+///
+/// postfix-expression: [C++ 5.2]
+/// postfix-expression . pseudo-destructor-name
+/// postfix-expression -> pseudo-destructor-name
+///
+/// pseudo-destructor-name:
+/// ::[opt] nested-name-specifier[opt] type-name :: ~type-name
+/// ::[opt] nested-name-specifier template simple-template-id ::
+/// ~type-name
+/// ::[opt] nested-name-specifier[opt] ~type-name
+///
+ExprResult
+Parser::ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
+ tok::TokenKind OpKind,
+ CXXScopeSpec &SS,
+ ParsedType ObjectType) {
+ // We're parsing either a pseudo-destructor-name or a dependent
+ // member access that has the same form as a
+ // pseudo-destructor-name. We parse both in the same way and let
+ // the action model sort them out.
+ //
+ // Note that the ::[opt] nested-name-specifier[opt] has already
+ // been parsed, and if there was a simple-template-id, it has
+ // been coalesced into a template-id annotation token.
+ UnqualifiedId FirstTypeName;
+ SourceLocation CCLoc;
+ if (Tok.is(tok::identifier)) {
+ FirstTypeName.setIdentifier(Tok.getIdentifierInfo(), Tok.getLocation());
+ ConsumeToken();
+ assert(Tok.is(tok::coloncolon) &&"ParseOptionalCXXScopeSpecifier fail");
+ CCLoc = ConsumeToken();
+ } else if (Tok.is(tok::annot_template_id)) {
+ // FIXME: retrieve TemplateKWLoc from template-id annotation and
+ // store it in the pseudo-dtor node (to be used when instantiating it).
+ FirstTypeName.setTemplateId(
+ (TemplateIdAnnotation *)Tok.getAnnotationValue());
+ ConsumeToken();
+ assert(Tok.is(tok::coloncolon) &&"ParseOptionalCXXScopeSpecifier fail");
+ CCLoc = ConsumeToken();
+ } else {
+ FirstTypeName.setIdentifier(nullptr, SourceLocation());
+ }
+
+ // Parse the tilde.
+ assert(Tok.is(tok::tilde) && "ParseOptionalCXXScopeSpecifier fail");
+ SourceLocation TildeLoc = ConsumeToken();
+
+ if (Tok.is(tok::kw_decltype) && !FirstTypeName.isValid() && SS.isEmpty()) {
+ DeclSpec DS(AttrFactory);
+ ParseDecltypeSpecifier(DS);
+ if (DS.getTypeSpecType() == TST_error)
+ return ExprError();
+ return Actions.ActOnPseudoDestructorExpr(getCurScope(), Base, OpLoc, OpKind,
+ TildeLoc, DS);
+ }
+
+ if (!Tok.is(tok::identifier)) {
+ Diag(Tok, diag::err_destructor_tilde_identifier);
+ return ExprError();
+ }
+
+ // Parse the second type.
+ UnqualifiedId SecondTypeName;
+ IdentifierInfo *Name = Tok.getIdentifierInfo();
+ SourceLocation NameLoc = ConsumeToken();
+ SecondTypeName.setIdentifier(Name, NameLoc);
+
+ // If there is a '<', the second type name is a template-id. Parse
+ // it as such.
+ if (Tok.is(tok::less) &&
+ ParseUnqualifiedIdTemplateId(SS, SourceLocation(),
+ Name, NameLoc,
+ false, ObjectType, SecondTypeName,
+ /*AssumeTemplateName=*/true))
+ return ExprError();
+
+ return Actions.ActOnPseudoDestructorExpr(getCurScope(), Base, OpLoc, OpKind,
+ SS, FirstTypeName, CCLoc, TildeLoc,
+ SecondTypeName);
+}
+
+/// ParseCXXBoolLiteral - This handles the C++ Boolean literals.
+///
+/// boolean-literal: [C++ 2.13.5]
+/// 'true'
+/// 'false'
+ExprResult Parser::ParseCXXBoolLiteral() {
+ tok::TokenKind Kind = Tok.getKind();
+ return Actions.ActOnCXXBoolLiteral(ConsumeToken(), Kind);
+}
+
+/// ParseThrowExpression - This handles the C++ throw expression.
+///
+/// throw-expression: [C++ 15]
+/// 'throw' assignment-expression[opt]
+ExprResult Parser::ParseThrowExpression() {
+ assert(Tok.is(tok::kw_throw) && "Not throw!");
+ SourceLocation ThrowLoc = ConsumeToken(); // Eat the throw token.
+
+ // If the current token isn't the start of an assignment-expression,
+ // then the expression is not present. This handles things like:
+ // "C ? throw : (void)42", which is crazy but legal.
+ switch (Tok.getKind()) { // FIXME: move this predicate somewhere common.
+ case tok::semi:
+ case tok::r_paren:
+ case tok::r_square:
+ case tok::r_brace:
+ case tok::colon:
+ case tok::comma:
+ return Actions.ActOnCXXThrow(getCurScope(), ThrowLoc, nullptr);
+
+ default:
+ ExprResult Expr(ParseAssignmentExpression());
+ if (Expr.isInvalid()) return Expr;
+ return Actions.ActOnCXXThrow(getCurScope(), ThrowLoc, Expr.get());
+ }
+}
+
+/// \brief Parse the C++ Coroutines co_yield expression.
+///
+/// co_yield-expression:
+/// 'co_yield' assignment-expression[opt]
+ExprResult Parser::ParseCoyieldExpression() {
+ assert(Tok.is(tok::kw_co_yield) && "Not co_yield!");
+
+ SourceLocation Loc = ConsumeToken();
+ ExprResult Expr = Tok.is(tok::l_brace) ? ParseBraceInitializer()
+ : ParseAssignmentExpression();
+ if (!Expr.isInvalid())
+ Expr = Actions.ActOnCoyieldExpr(getCurScope(), Loc, Expr.get());
+ return Expr;
+}
+
+/// ParseCXXThis - This handles the C++ 'this' pointer.
+///
+/// C++ 9.3.2: In the body of a non-static member function, the keyword this is
+/// a non-lvalue expression whose value is the address of the object for which
+/// the function is called.
+ExprResult Parser::ParseCXXThis() {
+ assert(Tok.is(tok::kw_this) && "Not 'this'!");
+ SourceLocation ThisLoc = ConsumeToken();
+ return Actions.ActOnCXXThis(ThisLoc);
+}
+
+/// ParseCXXTypeConstructExpression - Parse construction of a specified type.
+/// Can be interpreted either as function-style casting ("int(x)")
+/// or class type construction ("ClassType(x,y,z)")
+/// or creation of a value-initialized type ("int()").
+/// See [C++ 5.2.3].
+///
+/// postfix-expression: [C++ 5.2p1]
+/// simple-type-specifier '(' expression-list[opt] ')'
+/// [C++0x] simple-type-specifier braced-init-list
+/// typename-specifier '(' expression-list[opt] ')'
+/// [C++0x] typename-specifier braced-init-list
+///
+ExprResult
+Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
+ Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ ParsedType TypeRep = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo).get();
+
+ assert((Tok.is(tok::l_paren) ||
+ (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace)))
+ && "Expected '(' or '{'!");
+
+ if (Tok.is(tok::l_brace)) {
+ ExprResult Init = ParseBraceInitializer();
+ if (Init.isInvalid())
+ return Init;
+ Expr *InitList = Init.get();
+ return Actions.ActOnCXXTypeConstructExpr(TypeRep, SourceLocation(),
+ MultiExprArg(&InitList, 1),
+ SourceLocation());
+ } else {
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ ExprVector Exprs;
+ CommaLocsTy CommaLocs;
+
+ if (Tok.isNot(tok::r_paren)) {
+ if (ParseExpressionList(Exprs, CommaLocs, [&] {
+ Actions.CodeCompleteConstructor(getCurScope(),
+ TypeRep.get()->getCanonicalTypeInternal(),
+ DS.getLocEnd(), Exprs);
+ })) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return ExprError();
+ }
+ }
+
+ // Match the ')'.
+ T.consumeClose();
+
+ // TypeRep could be null, if it references an invalid typedef.
+ if (!TypeRep)
+ return ExprError();
+
+ assert((Exprs.size() == 0 || Exprs.size()-1 == CommaLocs.size())&&
+ "Unexpected number of commas!");
+ return Actions.ActOnCXXTypeConstructExpr(TypeRep, T.getOpenLocation(),
+ Exprs,
+ T.getCloseLocation());
+ }
+}
+
+/// ParseCXXCondition - if/switch/while condition expression.
+///
+/// condition:
+/// expression
+/// type-specifier-seq declarator '=' assignment-expression
+/// [C++11] type-specifier-seq declarator '=' initializer-clause
+/// [C++11] type-specifier-seq declarator braced-init-list
+/// [GNU] type-specifier-seq declarator simple-asm-expr[opt] attributes[opt]
+/// '=' assignment-expression
+///
+/// \param ExprOut if the condition was parsed as an expression, the parsed
+/// expression.
+///
+/// \param DeclOut if the condition was parsed as a declaration, the parsed
+/// declaration.
+///
+/// \param Loc The location of the start of the statement that requires this
+/// condition, e.g., the "for" in a for loop.
+///
+/// \param ConvertToBoolean Whether the condition expression should be
+/// converted to a boolean value.
+///
+/// \returns true if there was a parsing, false otherwise.
+bool Parser::ParseCXXCondition(ExprResult &ExprOut,
+ Decl *&DeclOut,
+ SourceLocation Loc,
+ bool ConvertToBoolean) {
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Condition);
+ cutOffParsing();
+ return true;
+ }
+
+ ParsedAttributesWithRange attrs(AttrFactory);
+ MaybeParseCXX11Attributes(attrs);
+
+ if (!isCXXConditionDeclaration()) {
+ ProhibitAttributes(attrs);
+
+ // Parse the expression.
+ ExprOut = ParseExpression(); // expression
+ DeclOut = nullptr;
+ if (ExprOut.isInvalid())
+ return true;
+
+ // If required, convert to a boolean value.
+ if (ConvertToBoolean)
+ ExprOut
+ = Actions.ActOnBooleanCondition(getCurScope(), Loc, ExprOut.get());
+ return ExprOut.isInvalid();
+ }
+
+ // type-specifier-seq
+ DeclSpec DS(AttrFactory);
+ DS.takeAttributesFrom(attrs);
+ ParseSpecifierQualifierList(DS, AS_none, DSC_condition);
+
+ // declarator
+ Declarator DeclaratorInfo(DS, Declarator::ConditionContext);
+ ParseDeclarator(DeclaratorInfo);
+
+ // simple-asm-expr[opt]
+ if (Tok.is(tok::kw_asm)) {
+ SourceLocation Loc;
+ ExprResult AsmLabel(ParseSimpleAsm(&Loc));
+ if (AsmLabel.isInvalid()) {
+ SkipUntil(tok::semi, StopAtSemi);
+ return true;
+ }
+ DeclaratorInfo.setAsmLabel(AsmLabel.get());
+ DeclaratorInfo.SetRangeEnd(Loc);
+ }
+
+ // If attributes are present, parse them.
+ MaybeParseGNUAttributes(DeclaratorInfo);
+
+ // Type-check the declaration itself.
+ DeclResult Dcl = Actions.ActOnCXXConditionDeclaration(getCurScope(),
+ DeclaratorInfo);
+ DeclOut = Dcl.get();
+ ExprOut = ExprError();
+
+ // '=' assignment-expression
+ // If a '==' or '+=' is found, suggest a fixit to '='.
+ bool CopyInitialization = isTokenEqualOrEqualTypo();
+ if (CopyInitialization)
+ ConsumeToken();
+
+ ExprResult InitExpr = ExprError();
+ if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace)) {
+ Diag(Tok.getLocation(),
+ diag::warn_cxx98_compat_generalized_initializer_lists);
+ InitExpr = ParseBraceInitializer();
+ } else if (CopyInitialization) {
+ InitExpr = ParseAssignmentExpression();
+ } else if (Tok.is(tok::l_paren)) {
+ // This was probably an attempt to initialize the variable.
+ SourceLocation LParen = ConsumeParen(), RParen = LParen;
+ if (SkipUntil(tok::r_paren, StopAtSemi | StopBeforeMatch))
+ RParen = ConsumeParen();
+ Diag(DeclOut ? DeclOut->getLocation() : LParen,
+ diag::err_expected_init_in_condition_lparen)
+ << SourceRange(LParen, RParen);
+ } else {
+ Diag(DeclOut ? DeclOut->getLocation() : Tok.getLocation(),
+ diag::err_expected_init_in_condition);
+ }
+
+ if (!InitExpr.isInvalid())
+ Actions.AddInitializerToDecl(DeclOut, InitExpr.get(), !CopyInitialization,
+ DS.containsPlaceholderType());
+ else
+ Actions.ActOnInitializerError(DeclOut);
+
+ // FIXME: Build a reference to this declaration? Convert it to bool?
+ // (This is currently handled by Sema).
+
+ Actions.FinalizeDeclaration(DeclOut);
+
+ return false;
+}
+
+/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
+/// This should only be called when the current token is known to be part of
+/// simple-type-specifier.
+///
+/// simple-type-specifier:
+/// '::'[opt] nested-name-specifier[opt] type-name
+/// '::'[opt] nested-name-specifier 'template' simple-template-id [TODO]
+/// char
+/// wchar_t
+/// bool
+/// short
+/// int
+/// long
+/// signed
+/// unsigned
+/// float
+/// double
+/// void
+/// [GNU] typeof-specifier
+/// [C++0x] auto [TODO]
+///
+/// type-name:
+/// class-name
+/// enum-name
+/// typedef-name
+///
+void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
+ DS.SetRangeStart(Tok.getLocation());
+ const char *PrevSpec;
+ unsigned DiagID;
+ SourceLocation Loc = Tok.getLocation();
+ const clang::PrintingPolicy &Policy =
+ Actions.getASTContext().getPrintingPolicy();
+
+ switch (Tok.getKind()) {
+ case tok::identifier: // foo::bar
+ case tok::coloncolon: // ::foo::bar
+ llvm_unreachable("Annotation token should already be formed!");
+ default:
+ llvm_unreachable("Not a simple-type-specifier token!");
+
+ // type-name
+ case tok::annot_typename: {
+ if (getTypeAnnotation(Tok))
+ DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec, DiagID,
+ getTypeAnnotation(Tok), Policy);
+ else
+ DS.SetTypeSpecError();
+
+ DS.SetRangeEnd(Tok.getAnnotationEndLoc());
+ ConsumeToken();
+
+ DS.Finish(Actions, Policy);
+ return;
+ }
+
+ // builtin types
+ case tok::kw_short:
+ DS.SetTypeSpecWidth(DeclSpec::TSW_short, Loc, PrevSpec, DiagID, Policy);
+ break;
+ case tok::kw_long:
+ DS.SetTypeSpecWidth(DeclSpec::TSW_long, Loc, PrevSpec, DiagID, Policy);
+ break;
+ case tok::kw___int64:
+ DS.SetTypeSpecWidth(DeclSpec::TSW_longlong, Loc, PrevSpec, DiagID, Policy);
+ break;
+ case tok::kw_signed:
+ DS.SetTypeSpecSign(DeclSpec::TSS_signed, Loc, PrevSpec, DiagID);
+ break;
+ case tok::kw_unsigned:
+ DS.SetTypeSpecSign(DeclSpec::TSS_unsigned, Loc, PrevSpec, DiagID);
+ break;
+ case tok::kw_void:
+ DS.SetTypeSpecType(DeclSpec::TST_void, Loc, PrevSpec, DiagID, Policy);
+ break;
+ case tok::kw_char:
+ DS.SetTypeSpecType(DeclSpec::TST_char, Loc, PrevSpec, DiagID, Policy);
+ break;
+ case tok::kw_int:
+ DS.SetTypeSpecType(DeclSpec::TST_int, Loc, PrevSpec, DiagID, Policy);
+ break;
+ case tok::kw___int128:
+ DS.SetTypeSpecType(DeclSpec::TST_int128, Loc, PrevSpec, DiagID, Policy);
+ break;
+ case tok::kw_half:
+ DS.SetTypeSpecType(DeclSpec::TST_half, Loc, PrevSpec, DiagID, Policy);
+ break;
+ case tok::kw_float:
+ DS.SetTypeSpecType(DeclSpec::TST_float, Loc, PrevSpec, DiagID, Policy);
+ break;
+ case tok::kw_double:
+ DS.SetTypeSpecType(DeclSpec::TST_double, Loc, PrevSpec, DiagID, Policy);
+ break;
+ case tok::kw_wchar_t:
+ DS.SetTypeSpecType(DeclSpec::TST_wchar, Loc, PrevSpec, DiagID, Policy);
+ break;
+ case tok::kw_char16_t:
+ DS.SetTypeSpecType(DeclSpec::TST_char16, Loc, PrevSpec, DiagID, Policy);
+ break;
+ case tok::kw_char32_t:
+ DS.SetTypeSpecType(DeclSpec::TST_char32, Loc, PrevSpec, DiagID, Policy);
+ break;
+ case tok::kw_bool:
+ DS.SetTypeSpecType(DeclSpec::TST_bool, Loc, PrevSpec, DiagID, Policy);
+ break;
+ case tok::annot_decltype:
+ case tok::kw_decltype:
+ DS.SetRangeEnd(ParseDecltypeSpecifier(DS));
+ return DS.Finish(Actions, Policy);
+
+ // GNU typeof support.
+ case tok::kw_typeof:
+ ParseTypeofSpecifier(DS);
+ DS.Finish(Actions, Policy);
+ return;
+ }
+ if (Tok.is(tok::annot_typename))
+ DS.SetRangeEnd(Tok.getAnnotationEndLoc());
+ else
+ DS.SetRangeEnd(Tok.getLocation());
+ ConsumeToken();
+ DS.Finish(Actions, Policy);
+}
+
+/// ParseCXXTypeSpecifierSeq - Parse a C++ type-specifier-seq (C++
+/// [dcl.name]), which is a non-empty sequence of type-specifiers,
+/// e.g., "const short int". Note that the DeclSpec is *not* finished
+/// by parsing the type-specifier-seq, because these sequences are
+/// typically followed by some form of declarator. Returns true and
+/// emits diagnostics if this is not a type-specifier-seq, false
+/// otherwise.
+///
+/// type-specifier-seq: [C++ 8.1]
+/// type-specifier type-specifier-seq[opt]
+///
+bool Parser::ParseCXXTypeSpecifierSeq(DeclSpec &DS) {
+ ParseSpecifierQualifierList(DS, AS_none, DSC_type_specifier);
+ DS.Finish(Actions, Actions.getASTContext().getPrintingPolicy());
+ return false;
+}
+
+/// \brief Finish parsing a C++ unqualified-id that is a template-id of
+/// some form.
+///
+/// This routine is invoked when a '<' is encountered after an identifier or
+/// operator-function-id is parsed by \c ParseUnqualifiedId() to determine
+/// whether the unqualified-id is actually a template-id. This routine will
+/// then parse the template arguments and form the appropriate template-id to
+/// return to the caller.
+///
+/// \param SS the nested-name-specifier that precedes this template-id, if
+/// we're actually parsing a qualified-id.
+///
+/// \param Name for constructor and destructor names, this is the actual
+/// identifier that may be a template-name.
+///
+/// \param NameLoc the location of the class-name in a constructor or
+/// destructor.
+///
+/// \param EnteringContext whether we're entering the scope of the
+/// nested-name-specifier.
+///
+/// \param ObjectType if this unqualified-id occurs within a member access
+/// expression, the type of the base object whose member is being accessed.
+///
+/// \param Id as input, describes the template-name or operator-function-id
+/// that precedes the '<'. If template arguments were parsed successfully,
+/// will be updated with the template-id.
+///
+/// \param AssumeTemplateId When true, this routine will assume that the name
+/// refers to a template without performing name lookup to verify.
+///
+/// \returns true if a parse error occurred, false otherwise.
+bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ IdentifierInfo *Name,
+ SourceLocation NameLoc,
+ bool EnteringContext,
+ ParsedType ObjectType,
+ UnqualifiedId &Id,
+ bool AssumeTemplateId) {
+ assert((AssumeTemplateId || Tok.is(tok::less)) &&
+ "Expected '<' to finish parsing a template-id");
+
+ TemplateTy Template;
+ TemplateNameKind TNK = TNK_Non_template;
+ switch (Id.getKind()) {
+ case UnqualifiedId::IK_Identifier:
+ case UnqualifiedId::IK_OperatorFunctionId:
+ case UnqualifiedId::IK_LiteralOperatorId:
+ if (AssumeTemplateId) {
+ TNK = Actions.ActOnDependentTemplateName(getCurScope(), SS, TemplateKWLoc,
+ Id, ObjectType, EnteringContext,
+ Template);
+ if (TNK == TNK_Non_template)
+ return true;
+ } else {
+ bool MemberOfUnknownSpecialization;
+ TNK = Actions.isTemplateName(getCurScope(), SS,
+ TemplateKWLoc.isValid(), Id,
+ ObjectType, EnteringContext, Template,
+ MemberOfUnknownSpecialization);
+
+ if (TNK == TNK_Non_template && MemberOfUnknownSpecialization &&
+ ObjectType && IsTemplateArgumentList()) {
+ // We have something like t->getAs<T>(), where getAs is a
+ // member of an unknown specialization. However, this will only
+ // parse correctly as a template, so suggest the keyword 'template'
+ // before 'getAs' and treat this as a dependent template name.
+ std::string Name;
+ if (Id.getKind() == UnqualifiedId::IK_Identifier)
+ Name = Id.Identifier->getName();
+ else {
+ Name = "operator ";
+ if (Id.getKind() == UnqualifiedId::IK_OperatorFunctionId)
+ Name += getOperatorSpelling(Id.OperatorFunctionId.Operator);
+ else
+ Name += Id.Identifier->getName();
+ }
+ Diag(Id.StartLocation, diag::err_missing_dependent_template_keyword)
+ << Name
+ << FixItHint::CreateInsertion(Id.StartLocation, "template ");
+ TNK = Actions.ActOnDependentTemplateName(getCurScope(),
+ SS, TemplateKWLoc, Id,
+ ObjectType, EnteringContext,
+ Template);
+ if (TNK == TNK_Non_template)
+ return true;
+ }
+ }
+ break;
+
+ case UnqualifiedId::IK_ConstructorName: {
+ UnqualifiedId TemplateName;
+ bool MemberOfUnknownSpecialization;
+ TemplateName.setIdentifier(Name, NameLoc);
+ TNK = Actions.isTemplateName(getCurScope(), SS, TemplateKWLoc.isValid(),
+ TemplateName, ObjectType,
+ EnteringContext, Template,
+ MemberOfUnknownSpecialization);
+ break;
+ }
+
+ case UnqualifiedId::IK_DestructorName: {
+ UnqualifiedId TemplateName;
+ bool MemberOfUnknownSpecialization;
+ TemplateName.setIdentifier(Name, NameLoc);
+ if (ObjectType) {
+ TNK = Actions.ActOnDependentTemplateName(getCurScope(),
+ SS, TemplateKWLoc, TemplateName,
+ ObjectType, EnteringContext,
+ Template);
+ if (TNK == TNK_Non_template)
+ return true;
+ } else {
+ TNK = Actions.isTemplateName(getCurScope(), SS, TemplateKWLoc.isValid(),
+ TemplateName, ObjectType,
+ EnteringContext, Template,
+ MemberOfUnknownSpecialization);
+
+ if (TNK == TNK_Non_template && !Id.DestructorName.get()) {
+ Diag(NameLoc, diag::err_destructor_template_id)
+ << Name << SS.getRange();
+ return true;
+ }
+ }
+ break;
+ }
+
+ default:
+ return false;
+ }
+
+ if (TNK == TNK_Non_template)
+ return false;
+
+ // Parse the enclosed template argument list.
+ SourceLocation LAngleLoc, RAngleLoc;
+ TemplateArgList TemplateArgs;
+ if (Tok.is(tok::less) &&
+ ParseTemplateIdAfterTemplateName(Template, Id.StartLocation,
+ SS, true, LAngleLoc,
+ TemplateArgs,
+ RAngleLoc))
+ return true;
+
+ if (Id.getKind() == UnqualifiedId::IK_Identifier ||
+ Id.getKind() == UnqualifiedId::IK_OperatorFunctionId ||
+ Id.getKind() == UnqualifiedId::IK_LiteralOperatorId) {
+ // Form a parsed representation of the template-id to be stored in the
+ // UnqualifiedId.
+ TemplateIdAnnotation *TemplateId
+ = TemplateIdAnnotation::Allocate(TemplateArgs.size(), TemplateIds);
+
+ // FIXME: Store name for literal operator too.
+ if (Id.getKind() == UnqualifiedId::IK_Identifier) {
+ TemplateId->Name = Id.Identifier;
+ TemplateId->Operator = OO_None;
+ TemplateId->TemplateNameLoc = Id.StartLocation;
+ } else {
+ TemplateId->Name = nullptr;
+ TemplateId->Operator = Id.OperatorFunctionId.Operator;
+ TemplateId->TemplateNameLoc = Id.StartLocation;
+ }
+
+ TemplateId->SS = SS;
+ TemplateId->TemplateKWLoc = TemplateKWLoc;
+ TemplateId->Template = Template;
+ TemplateId->Kind = TNK;
+ TemplateId->LAngleLoc = LAngleLoc;
+ TemplateId->RAngleLoc = RAngleLoc;
+ ParsedTemplateArgument *Args = TemplateId->getTemplateArgs();
+ for (unsigned Arg = 0, ArgEnd = TemplateArgs.size();
+ Arg != ArgEnd; ++Arg)
+ Args[Arg] = TemplateArgs[Arg];
+
+ Id.setTemplateId(TemplateId);
+ return false;
+ }
+
+ // Bundle the template arguments together.
+ ASTTemplateArgsPtr TemplateArgsPtr(TemplateArgs);
+
+ // Constructor and destructor names.
+ TypeResult Type
+ = Actions.ActOnTemplateIdType(SS, TemplateKWLoc,
+ Template, NameLoc,
+ LAngleLoc, TemplateArgsPtr, RAngleLoc,
+ /*IsCtorOrDtorName=*/true);
+ if (Type.isInvalid())
+ return true;
+
+ if (Id.getKind() == UnqualifiedId::IK_ConstructorName)
+ Id.setConstructorName(Type.get(), NameLoc, RAngleLoc);
+ else
+ Id.setDestructorName(Id.StartLocation, Type.get(), RAngleLoc);
+
+ return false;
+}
+
+/// \brief Parse an operator-function-id or conversion-function-id as part
+/// of a C++ unqualified-id.
+///
+/// This routine is responsible only for parsing the operator-function-id or
+/// conversion-function-id; it does not handle template arguments in any way.
+///
+/// \code
+/// operator-function-id: [C++ 13.5]
+/// 'operator' operator
+///
+/// operator: one of
+/// new delete new[] delete[]
+/// + - * / % ^ & | ~
+/// ! = < > += -= *= /= %=
+/// ^= &= |= << >> >>= <<= == !=
+/// <= >= && || ++ -- , ->* ->
+/// () []
+///
+/// conversion-function-id: [C++ 12.3.2]
+/// operator conversion-type-id
+///
+/// conversion-type-id:
+/// type-specifier-seq conversion-declarator[opt]
+///
+/// conversion-declarator:
+/// ptr-operator conversion-declarator[opt]
+/// \endcode
+///
+/// \param SS The nested-name-specifier that preceded this unqualified-id. If
+/// non-empty, then we are parsing the unqualified-id of a qualified-id.
+///
+/// \param EnteringContext whether we are entering the scope of the
+/// nested-name-specifier.
+///
+/// \param ObjectType if this unqualified-id occurs within a member access
+/// expression, the type of the base object whose member is being accessed.
+///
+/// \param Result on a successful parse, contains the parsed unqualified-id.
+///
+/// \returns true if parsing fails, false otherwise.
+bool Parser::ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
+ ParsedType ObjectType,
+ UnqualifiedId &Result) {
+ assert(Tok.is(tok::kw_operator) && "Expected 'operator' keyword");
+
+ // Consume the 'operator' keyword.
+ SourceLocation KeywordLoc = ConsumeToken();
+
+ // Determine what kind of operator name we have.
+ unsigned SymbolIdx = 0;
+ SourceLocation SymbolLocations[3];
+ OverloadedOperatorKind Op = OO_None;
+ switch (Tok.getKind()) {
+ case tok::kw_new:
+ case tok::kw_delete: {
+ bool isNew = Tok.getKind() == tok::kw_new;
+ // Consume the 'new' or 'delete'.
+ SymbolLocations[SymbolIdx++] = ConsumeToken();
+ // Check for array new/delete.
+ if (Tok.is(tok::l_square) &&
+ (!getLangOpts().CPlusPlus11 || NextToken().isNot(tok::l_square))) {
+ // Consume the '[' and ']'.
+ BalancedDelimiterTracker T(*this, tok::l_square);
+ T.consumeOpen();
+ T.consumeClose();
+ if (T.getCloseLocation().isInvalid())
+ return true;
+
+ SymbolLocations[SymbolIdx++] = T.getOpenLocation();
+ SymbolLocations[SymbolIdx++] = T.getCloseLocation();
+ Op = isNew? OO_Array_New : OO_Array_Delete;
+ } else {
+ Op = isNew? OO_New : OO_Delete;
+ }
+ break;
+ }
+
+#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
+ case tok::Token: \
+ SymbolLocations[SymbolIdx++] = ConsumeToken(); \
+ Op = OO_##Name; \
+ break;
+#define OVERLOADED_OPERATOR_MULTI(Name,Spelling,Unary,Binary,MemberOnly)
+#include "clang/Basic/OperatorKinds.def"
+
+ case tok::l_paren: {
+ // Consume the '(' and ')'.
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+ T.consumeClose();
+ if (T.getCloseLocation().isInvalid())
+ return true;
+
+ SymbolLocations[SymbolIdx++] = T.getOpenLocation();
+ SymbolLocations[SymbolIdx++] = T.getCloseLocation();
+ Op = OO_Call;
+ break;
+ }
+
+ case tok::l_square: {
+ // Consume the '[' and ']'.
+ BalancedDelimiterTracker T(*this, tok::l_square);
+ T.consumeOpen();
+ T.consumeClose();
+ if (T.getCloseLocation().isInvalid())
+ return true;
+
+ SymbolLocations[SymbolIdx++] = T.getOpenLocation();
+ SymbolLocations[SymbolIdx++] = T.getCloseLocation();
+ Op = OO_Subscript;
+ break;
+ }
+
+ case tok::code_completion: {
+ // Code completion for the operator name.
+ Actions.CodeCompleteOperatorName(getCurScope());
+ cutOffParsing();
+ // Don't try to parse any further.
+ return true;
+ }
+
+ default:
+ break;
+ }
+
+ if (Op != OO_None) {
+ // We have parsed an operator-function-id.
+ Result.setOperatorFunctionId(KeywordLoc, Op, SymbolLocations);
+ return false;
+ }
+
+ // Parse a literal-operator-id.
+ //
+ // literal-operator-id: C++11 [over.literal]
+ // operator string-literal identifier
+ // operator user-defined-string-literal
+
+ if (getLangOpts().CPlusPlus11 && isTokenStringLiteral()) {
+ Diag(Tok.getLocation(), diag::warn_cxx98_compat_literal_operator);
+
+ SourceLocation DiagLoc;
+ unsigned DiagId = 0;
+
+ // We're past translation phase 6, so perform string literal concatenation
+ // before checking for "".
+ SmallVector<Token, 4> Toks;
+ SmallVector<SourceLocation, 4> TokLocs;
+ while (isTokenStringLiteral()) {
+ if (!Tok.is(tok::string_literal) && !DiagId) {
+ // C++11 [over.literal]p1:
+ // The string-literal or user-defined-string-literal in a
+ // literal-operator-id shall have no encoding-prefix [...].
+ DiagLoc = Tok.getLocation();
+ DiagId = diag::err_literal_operator_string_prefix;
+ }
+ Toks.push_back(Tok);
+ TokLocs.push_back(ConsumeStringToken());
+ }
+
+ StringLiteralParser Literal(Toks, PP);
+ if (Literal.hadError)
+ return true;
+
+ // Grab the literal operator's suffix, which will be either the next token
+ // or a ud-suffix from the string literal.
+ IdentifierInfo *II = nullptr;
+ SourceLocation SuffixLoc;
+ if (!Literal.getUDSuffix().empty()) {
+ II = &PP.getIdentifierTable().get(Literal.getUDSuffix());
+ SuffixLoc =
+ Lexer::AdvanceToTokenCharacter(TokLocs[Literal.getUDSuffixToken()],
+ Literal.getUDSuffixOffset(),
+ PP.getSourceManager(), getLangOpts());
+ } else if (Tok.is(tok::identifier)) {
+ II = Tok.getIdentifierInfo();
+ SuffixLoc = ConsumeToken();
+ TokLocs.push_back(SuffixLoc);
+ } else {
+ Diag(Tok.getLocation(), diag::err_expected) << tok::identifier;
+ return true;
+ }
+
+ // The string literal must be empty.
+ if (!Literal.GetString().empty() || Literal.Pascal) {
+ // C++11 [over.literal]p1:
+ // The string-literal or user-defined-string-literal in a
+ // literal-operator-id shall [...] contain no characters
+ // other than the implicit terminating '\0'.
+ DiagLoc = TokLocs.front();
+ DiagId = diag::err_literal_operator_string_not_empty;
+ }
+
+ if (DiagId) {
+ // This isn't a valid literal-operator-id, but we think we know
+ // what the user meant. Tell them what they should have written.
+ SmallString<32> Str;
+ Str += "\"\"";
+ Str += II->getName();
+ Diag(DiagLoc, DiagId) << FixItHint::CreateReplacement(
+ SourceRange(TokLocs.front(), TokLocs.back()), Str);
+ }
+
+ Result.setLiteralOperatorId(II, KeywordLoc, SuffixLoc);
+
+ return Actions.checkLiteralOperatorId(SS, Result);
+ }
+
+ // Parse a conversion-function-id.
+ //
+ // conversion-function-id: [C++ 12.3.2]
+ // operator conversion-type-id
+ //
+ // conversion-type-id:
+ // type-specifier-seq conversion-declarator[opt]
+ //
+ // conversion-declarator:
+ // ptr-operator conversion-declarator[opt]
+
+ // Parse the type-specifier-seq.
+ DeclSpec DS(AttrFactory);
+ if (ParseCXXTypeSpecifierSeq(DS)) // FIXME: ObjectType?
+ return true;
+
+ // Parse the conversion-declarator, which is merely a sequence of
+ // ptr-operators.
+ Declarator D(DS, Declarator::ConversionIdContext);
+ ParseDeclaratorInternal(D, /*DirectDeclParser=*/nullptr);
+
+ // Finish up the type.
+ TypeResult Ty = Actions.ActOnTypeName(getCurScope(), D);
+ if (Ty.isInvalid())
+ return true;
+
+ // Note that this is a conversion-function-id.
+ Result.setConversionFunctionId(KeywordLoc, Ty.get(),
+ D.getSourceRange().getEnd());
+ return false;
+}
+
+/// \brief Parse a C++ unqualified-id (or a C identifier), which describes the
+/// name of an entity.
+///
+/// \code
+/// unqualified-id: [C++ expr.prim.general]
+/// identifier
+/// operator-function-id
+/// conversion-function-id
+/// [C++0x] literal-operator-id [TODO]
+/// ~ class-name
+/// template-id
+///
+/// \endcode
+///
+/// \param SS The nested-name-specifier that preceded this unqualified-id. If
+/// non-empty, then we are parsing the unqualified-id of a qualified-id.
+///
+/// \param EnteringContext whether we are entering the scope of the
+/// nested-name-specifier.
+///
+/// \param AllowDestructorName whether we allow parsing of a destructor name.
+///
+/// \param AllowConstructorName whether we allow parsing a constructor name.
+///
+/// \param ObjectType if this unqualified-id occurs within a member access
+/// expression, the type of the base object whose member is being accessed.
+///
+/// \param Result on a successful parse, contains the parsed unqualified-id.
+///
+/// \returns true if parsing fails, false otherwise.
+bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
+ bool AllowDestructorName,
+ bool AllowConstructorName,
+ ParsedType ObjectType,
+ SourceLocation& TemplateKWLoc,
+ UnqualifiedId &Result) {
+
+ // Handle 'A::template B'. This is for template-ids which have not
+ // already been annotated by ParseOptionalCXXScopeSpecifier().
+ bool TemplateSpecified = false;
+ if (getLangOpts().CPlusPlus && Tok.is(tok::kw_template) &&
+ (ObjectType || SS.isSet())) {
+ TemplateSpecified = true;
+ TemplateKWLoc = ConsumeToken();
+ }
+
+ // unqualified-id:
+ // identifier
+ // template-id (when it hasn't already been annotated)
+ if (Tok.is(tok::identifier)) {
+ // Consume the identifier.
+ IdentifierInfo *Id = Tok.getIdentifierInfo();
+ SourceLocation IdLoc = ConsumeToken();
+
+ if (!getLangOpts().CPlusPlus) {
+ // If we're not in C++, only identifiers matter. Record the
+ // identifier and return.
+ Result.setIdentifier(Id, IdLoc);
+ return false;
+ }
+
+ if (AllowConstructorName &&
+ Actions.isCurrentClassName(*Id, getCurScope(), &SS)) {
+ // We have parsed a constructor name.
+ ParsedType Ty = Actions.getTypeName(*Id, IdLoc, getCurScope(),
+ &SS, false, false,
+ ParsedType(),
+ /*IsCtorOrDtorName=*/true,
+ /*NonTrivialTypeSourceInfo=*/true);
+ Result.setConstructorName(Ty, IdLoc, IdLoc);
+ } else {
+ // We have parsed an identifier.
+ Result.setIdentifier(Id, IdLoc);
+ }
+
+ // If the next token is a '<', we may have a template.
+ if (TemplateSpecified || Tok.is(tok::less))
+ return ParseUnqualifiedIdTemplateId(SS, TemplateKWLoc, Id, IdLoc,
+ EnteringContext, ObjectType,
+ Result, TemplateSpecified);
+
+ return false;
+ }
+
+ // unqualified-id:
+ // template-id (already parsed and annotated)
+ if (Tok.is(tok::annot_template_id)) {
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+
+ // If the template-name names the current class, then this is a constructor
+ if (AllowConstructorName && TemplateId->Name &&
+ Actions.isCurrentClassName(*TemplateId->Name, getCurScope(), &SS)) {
+ if (SS.isSet()) {
+ // C++ [class.qual]p2 specifies that a qualified template-name
+ // is taken as the constructor name where a constructor can be
+ // declared. Thus, the template arguments are extraneous, so
+ // complain about them and remove them entirely.
+ Diag(TemplateId->TemplateNameLoc,
+ diag::err_out_of_line_constructor_template_id)
+ << TemplateId->Name
+ << FixItHint::CreateRemoval(
+ SourceRange(TemplateId->LAngleLoc, TemplateId->RAngleLoc));
+ ParsedType Ty = Actions.getTypeName(*TemplateId->Name,
+ TemplateId->TemplateNameLoc,
+ getCurScope(),
+ &SS, false, false,
+ ParsedType(),
+ /*IsCtorOrDtorName=*/true,
+ /*NontrivialTypeSourceInfo=*/true);
+ Result.setConstructorName(Ty, TemplateId->TemplateNameLoc,
+ TemplateId->RAngleLoc);
+ ConsumeToken();
+ return false;
+ }
+
+ Result.setConstructorTemplateId(TemplateId);
+ ConsumeToken();
+ return false;
+ }
+
+ // We have already parsed a template-id; consume the annotation token as
+ // our unqualified-id.
+ Result.setTemplateId(TemplateId);
+ TemplateKWLoc = TemplateId->TemplateKWLoc;
+ ConsumeToken();
+ return false;
+ }
+
+ // unqualified-id:
+ // operator-function-id
+ // conversion-function-id
+ if (Tok.is(tok::kw_operator)) {
+ if (ParseUnqualifiedIdOperator(SS, EnteringContext, ObjectType, Result))
+ return true;
+
+ // If we have an operator-function-id or a literal-operator-id and the next
+ // token is a '<', we may have a
+ //
+ // template-id:
+ // operator-function-id < template-argument-list[opt] >
+ if ((Result.getKind() == UnqualifiedId::IK_OperatorFunctionId ||
+ Result.getKind() == UnqualifiedId::IK_LiteralOperatorId) &&
+ (TemplateSpecified || Tok.is(tok::less)))
+ return ParseUnqualifiedIdTemplateId(SS, TemplateKWLoc,
+ nullptr, SourceLocation(),
+ EnteringContext, ObjectType,
+ Result, TemplateSpecified);
+
+ return false;
+ }
+
+ if (getLangOpts().CPlusPlus &&
+ (AllowDestructorName || SS.isSet()) && Tok.is(tok::tilde)) {
+ // C++ [expr.unary.op]p10:
+ // There is an ambiguity in the unary-expression ~X(), where X is a
+ // class-name. The ambiguity is resolved in favor of treating ~ as a
+ // unary complement rather than treating ~X as referring to a destructor.
+
+ // Parse the '~'.
+ SourceLocation TildeLoc = ConsumeToken();
+
+ if (SS.isEmpty() && Tok.is(tok::kw_decltype)) {
+ DeclSpec DS(AttrFactory);
+ SourceLocation EndLoc = ParseDecltypeSpecifier(DS);
+ if (ParsedType Type = Actions.getDestructorType(DS, ObjectType)) {
+ Result.setDestructorName(TildeLoc, Type, EndLoc);
+ return false;
+ }
+ return true;
+ }
+
+ // Parse the class-name.
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_destructor_tilde_identifier);
+ return true;
+ }
+
+ // If the user wrote ~T::T, correct it to T::~T.
+ DeclaratorScopeObj DeclScopeObj(*this, SS);
+ if (!TemplateSpecified && NextToken().is(tok::coloncolon)) {
+ // Don't let ParseOptionalCXXScopeSpecifier() "correct"
+ // `int A; struct { ~A::A(); };` to `int A; struct { ~A:A(); };`,
+ // it will confuse this recovery logic.
+ ColonProtectionRAIIObject ColonRAII(*this, false);
+
+ if (SS.isSet()) {
+ AnnotateScopeToken(SS, /*NewAnnotation*/true);
+ SS.clear();
+ }
+ if (ParseOptionalCXXScopeSpecifier(SS, ObjectType, EnteringContext))
+ return true;
+ if (SS.isNotEmpty())
+ ObjectType = ParsedType();
+ if (Tok.isNot(tok::identifier) || NextToken().is(tok::coloncolon) ||
+ !SS.isSet()) {
+ Diag(TildeLoc, diag::err_destructor_tilde_scope);
+ return true;
+ }
+
+ // Recover as if the tilde had been written before the identifier.
+ Diag(TildeLoc, diag::err_destructor_tilde_scope)
+ << FixItHint::CreateRemoval(TildeLoc)
+ << FixItHint::CreateInsertion(Tok.getLocation(), "~");
+
+ // Temporarily enter the scope for the rest of this function.
+ if (Actions.ShouldEnterDeclaratorScope(getCurScope(), SS))
+ DeclScopeObj.EnterDeclaratorScope();
+ }
+
+ // Parse the class-name (or template-name in a simple-template-id).
+ IdentifierInfo *ClassName = Tok.getIdentifierInfo();
+ SourceLocation ClassNameLoc = ConsumeToken();
+
+ if (TemplateSpecified || Tok.is(tok::less)) {
+ Result.setDestructorName(TildeLoc, ParsedType(), ClassNameLoc);
+ return ParseUnqualifiedIdTemplateId(SS, TemplateKWLoc,
+ ClassName, ClassNameLoc,
+ EnteringContext, ObjectType,
+ Result, TemplateSpecified);
+ }
+
+ // Note that this is a destructor name.
+ ParsedType Ty = Actions.getDestructorName(TildeLoc, *ClassName,
+ ClassNameLoc, getCurScope(),
+ SS, ObjectType,
+ EnteringContext);
+ if (!Ty)
+ return true;
+
+ Result.setDestructorName(TildeLoc, Ty, ClassNameLoc);
+ return false;
+ }
+
+ Diag(Tok, diag::err_expected_unqualified_id)
+ << getLangOpts().CPlusPlus;
+ return true;
+}
+
+/// ParseCXXNewExpression - Parse a C++ new-expression. New is used to allocate
+/// memory in a typesafe manner and call constructors.
+///
+/// This method is called to parse the new expression after the optional :: has
+/// been already parsed. If the :: was present, "UseGlobal" is true and "Start"
+/// is its location. Otherwise, "Start" is the location of the 'new' token.
+///
+/// new-expression:
+/// '::'[opt] 'new' new-placement[opt] new-type-id
+/// new-initializer[opt]
+/// '::'[opt] 'new' new-placement[opt] '(' type-id ')'
+/// new-initializer[opt]
+///
+/// new-placement:
+/// '(' expression-list ')'
+///
+/// new-type-id:
+/// type-specifier-seq new-declarator[opt]
+/// [GNU] attributes type-specifier-seq new-declarator[opt]
+///
+/// new-declarator:
+/// ptr-operator new-declarator[opt]
+/// direct-new-declarator
+///
+/// new-initializer:
+/// '(' expression-list[opt] ')'
+/// [C++0x] braced-init-list
+///
+ExprResult
+Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) {
+ assert(Tok.is(tok::kw_new) && "expected 'new' token");
+ ConsumeToken(); // Consume 'new'
+
+ // A '(' now can be a new-placement or the '(' wrapping the type-id in the
+ // second form of new-expression. It can't be a new-type-id.
+
+ ExprVector PlacementArgs;
+ SourceLocation PlacementLParen, PlacementRParen;
+
+ SourceRange TypeIdParens;
+ DeclSpec DS(AttrFactory);
+ Declarator DeclaratorInfo(DS, Declarator::CXXNewContext);
+ if (Tok.is(tok::l_paren)) {
+ // If it turns out to be a placement, we change the type location.
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+ PlacementLParen = T.getOpenLocation();
+ if (ParseExpressionListOrTypeId(PlacementArgs, DeclaratorInfo)) {
+ SkipUntil(tok::semi, StopAtSemi | StopBeforeMatch);
+ return ExprError();
+ }
+
+ T.consumeClose();
+ PlacementRParen = T.getCloseLocation();
+ if (PlacementRParen.isInvalid()) {
+ SkipUntil(tok::semi, StopAtSemi | StopBeforeMatch);
+ return ExprError();
+ }
+
+ if (PlacementArgs.empty()) {
+ // Reset the placement locations. There was no placement.
+ TypeIdParens = T.getRange();
+ PlacementLParen = PlacementRParen = SourceLocation();
+ } else {
+ // We still need the type.
+ if (Tok.is(tok::l_paren)) {
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+ MaybeParseGNUAttributes(DeclaratorInfo);
+ ParseSpecifierQualifierList(DS);
+ DeclaratorInfo.SetSourceRange(DS.getSourceRange());
+ ParseDeclarator(DeclaratorInfo);
+ T.consumeClose();
+ TypeIdParens = T.getRange();
+ } else {
+ MaybeParseGNUAttributes(DeclaratorInfo);
+ if (ParseCXXTypeSpecifierSeq(DS))
+ DeclaratorInfo.setInvalidType(true);
+ else {
+ DeclaratorInfo.SetSourceRange(DS.getSourceRange());
+ ParseDeclaratorInternal(DeclaratorInfo,
+ &Parser::ParseDirectNewDeclarator);
+ }
+ }
+ }
+ } else {
+ // A new-type-id is a simplified type-id, where essentially the
+ // direct-declarator is replaced by a direct-new-declarator.
+ MaybeParseGNUAttributes(DeclaratorInfo);
+ if (ParseCXXTypeSpecifierSeq(DS))
+ DeclaratorInfo.setInvalidType(true);
+ else {
+ DeclaratorInfo.SetSourceRange(DS.getSourceRange());
+ ParseDeclaratorInternal(DeclaratorInfo,
+ &Parser::ParseDirectNewDeclarator);
+ }
+ }
+ if (DeclaratorInfo.isInvalidType()) {
+ SkipUntil(tok::semi, StopAtSemi | StopBeforeMatch);
+ return ExprError();
+ }
+
+ ExprResult Initializer;
+
+ if (Tok.is(tok::l_paren)) {
+ SourceLocation ConstructorLParen, ConstructorRParen;
+ ExprVector ConstructorArgs;
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+ ConstructorLParen = T.getOpenLocation();
+ if (Tok.isNot(tok::r_paren)) {
+ CommaLocsTy CommaLocs;
+ if (ParseExpressionList(ConstructorArgs, CommaLocs, [&] {
+ ParsedType TypeRep = Actions.ActOnTypeName(getCurScope(),
+ DeclaratorInfo).get();
+ Actions.CodeCompleteConstructor(getCurScope(),
+ TypeRep.get()->getCanonicalTypeInternal(),
+ DeclaratorInfo.getLocEnd(),
+ ConstructorArgs);
+ })) {
+ SkipUntil(tok::semi, StopAtSemi | StopBeforeMatch);
+ return ExprError();
+ }
+ }
+ T.consumeClose();
+ ConstructorRParen = T.getCloseLocation();
+ if (ConstructorRParen.isInvalid()) {
+ SkipUntil(tok::semi, StopAtSemi | StopBeforeMatch);
+ return ExprError();
+ }
+ Initializer = Actions.ActOnParenListExpr(ConstructorLParen,
+ ConstructorRParen,
+ ConstructorArgs);
+ } else if (Tok.is(tok::l_brace) && getLangOpts().CPlusPlus11) {
+ Diag(Tok.getLocation(),
+ diag::warn_cxx98_compat_generalized_initializer_lists);
+ Initializer = ParseBraceInitializer();
+ }
+ if (Initializer.isInvalid())
+ return Initializer;
+
+ return Actions.ActOnCXXNew(Start, UseGlobal, PlacementLParen,
+ PlacementArgs, PlacementRParen,
+ TypeIdParens, DeclaratorInfo, Initializer.get());
+}
+
+/// ParseDirectNewDeclarator - Parses a direct-new-declarator. Intended to be
+/// passed to ParseDeclaratorInternal.
+///
+/// direct-new-declarator:
+/// '[' expression ']'
+/// direct-new-declarator '[' constant-expression ']'
+///
+void Parser::ParseDirectNewDeclarator(Declarator &D) {
+ // Parse the array dimensions.
+ bool first = true;
+ while (Tok.is(tok::l_square)) {
+ // An array-size expression can't start with a lambda.
+ if (CheckProhibitedCXX11Attribute())
+ continue;
+
+ BalancedDelimiterTracker T(*this, tok::l_square);
+ T.consumeOpen();
+
+ ExprResult Size(first ? ParseExpression()
+ : ParseConstantExpression());
+ if (Size.isInvalid()) {
+ // Recover
+ SkipUntil(tok::r_square, StopAtSemi);
+ return;
+ }
+ first = false;
+
+ T.consumeClose();
+
+ // Attributes here appertain to the array type. C++11 [expr.new]p5.
+ ParsedAttributes Attrs(AttrFactory);
+ MaybeParseCXX11Attributes(Attrs);
+
+ D.AddTypeInfo(DeclaratorChunk::getArray(0,
+ /*static=*/false, /*star=*/false,
+ Size.get(),
+ T.getOpenLocation(),
+ T.getCloseLocation()),
+ Attrs, T.getCloseLocation());
+
+ if (T.getCloseLocation().isInvalid())
+ return;
+ }
+}
+
+/// ParseExpressionListOrTypeId - Parse either an expression-list or a type-id.
+/// This ambiguity appears in the syntax of the C++ new operator.
+///
+/// new-expression:
+/// '::'[opt] 'new' new-placement[opt] '(' type-id ')'
+/// new-initializer[opt]
+///
+/// new-placement:
+/// '(' expression-list ')'
+///
+bool Parser::ParseExpressionListOrTypeId(
+ SmallVectorImpl<Expr*> &PlacementArgs,
+ Declarator &D) {
+ // The '(' was already consumed.
+ if (isTypeIdInParens()) {
+ ParseSpecifierQualifierList(D.getMutableDeclSpec());
+ D.SetSourceRange(D.getDeclSpec().getSourceRange());
+ ParseDeclarator(D);
+ return D.isInvalidType();
+ }
+
+ // It's not a type, it has to be an expression list.
+ // Discard the comma locations - ActOnCXXNew has enough parameters.
+ CommaLocsTy CommaLocs;
+ return ParseExpressionList(PlacementArgs, CommaLocs);
+}
+
+/// ParseCXXDeleteExpression - Parse a C++ delete-expression. Delete is used
+/// to free memory allocated by new.
+///
+/// This method is called to parse the 'delete' expression after the optional
+/// '::' has been already parsed. If the '::' was present, "UseGlobal" is true
+/// and "Start" is its location. Otherwise, "Start" is the location of the
+/// 'delete' token.
+///
+/// delete-expression:
+/// '::'[opt] 'delete' cast-expression
+/// '::'[opt] 'delete' '[' ']' cast-expression
+ExprResult
+Parser::ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start) {
+ assert(Tok.is(tok::kw_delete) && "Expected 'delete' keyword");
+ ConsumeToken(); // Consume 'delete'
+
+ // Array delete?
+ bool ArrayDelete = false;
+ if (Tok.is(tok::l_square) && NextToken().is(tok::r_square)) {
+ // C++11 [expr.delete]p1:
+ // Whenever the delete keyword is followed by empty square brackets, it
+ // shall be interpreted as [array delete].
+ // [Footnote: A lambda expression with a lambda-introducer that consists
+ // of empty square brackets can follow the delete keyword if
+ // the lambda expression is enclosed in parentheses.]
+ // FIXME: Produce a better diagnostic if the '[]' is unambiguously a
+ // lambda-introducer.
+ ArrayDelete = true;
+ BalancedDelimiterTracker T(*this, tok::l_square);
+
+ T.consumeOpen();
+ T.consumeClose();
+ if (T.getCloseLocation().isInvalid())
+ return ExprError();
+ }
+
+ ExprResult Operand(ParseCastExpression(false));
+ if (Operand.isInvalid())
+ return Operand;
+
+ return Actions.ActOnCXXDelete(Start, UseGlobal, ArrayDelete, Operand.get());
+}
+
+static TypeTrait TypeTraitFromTokKind(tok::TokenKind kind) {
+ switch (kind) {
+ default: llvm_unreachable("Not a known type trait");
+#define TYPE_TRAIT_1(Spelling, Name, Key) \
+case tok::kw_ ## Spelling: return UTT_ ## Name;
+#define TYPE_TRAIT_2(Spelling, Name, Key) \
+case tok::kw_ ## Spelling: return BTT_ ## Name;
+#include "clang/Basic/TokenKinds.def"
+#define TYPE_TRAIT_N(Spelling, Name, Key) \
+ case tok::kw_ ## Spelling: return TT_ ## Name;
+#include "clang/Basic/TokenKinds.def"
+ }
+}
+
+static ArrayTypeTrait ArrayTypeTraitFromTokKind(tok::TokenKind kind) {
+ switch(kind) {
+ default: llvm_unreachable("Not a known binary type trait");
+ case tok::kw___array_rank: return ATT_ArrayRank;
+ case tok::kw___array_extent: return ATT_ArrayExtent;
+ }
+}
+
+static ExpressionTrait ExpressionTraitFromTokKind(tok::TokenKind kind) {
+ switch(kind) {
+ default: llvm_unreachable("Not a known unary expression trait.");
+ case tok::kw___is_lvalue_expr: return ET_IsLValueExpr;
+ case tok::kw___is_rvalue_expr: return ET_IsRValueExpr;
+ }
+}
+
+static unsigned TypeTraitArity(tok::TokenKind kind) {
+ switch (kind) {
+ default: llvm_unreachable("Not a known type trait");
+#define TYPE_TRAIT(N,Spelling,K) case tok::kw_##Spelling: return N;
+#include "clang/Basic/TokenKinds.def"
+ }
+}
+
+/// \brief Parse the built-in type-trait pseudo-functions that allow
+/// implementation of the TR1/C++11 type traits templates.
+///
+/// primary-expression:
+/// unary-type-trait '(' type-id ')'
+/// binary-type-trait '(' type-id ',' type-id ')'
+/// type-trait '(' type-id-seq ')'
+///
+/// type-id-seq:
+/// type-id ...[opt] type-id-seq[opt]
+///
+ExprResult Parser::ParseTypeTrait() {
+ tok::TokenKind Kind = Tok.getKind();
+ unsigned Arity = TypeTraitArity(Kind);
+
+ SourceLocation Loc = ConsumeToken();
+
+ BalancedDelimiterTracker Parens(*this, tok::l_paren);
+ if (Parens.expectAndConsume())
+ return ExprError();
+
+ SmallVector<ParsedType, 2> Args;
+ do {
+ // Parse the next type.
+ TypeResult Ty = ParseTypeName();
+ if (Ty.isInvalid()) {
+ Parens.skipToEnd();
+ return ExprError();
+ }
+
+ // Parse the ellipsis, if present.
+ if (Tok.is(tok::ellipsis)) {
+ Ty = Actions.ActOnPackExpansion(Ty.get(), ConsumeToken());
+ if (Ty.isInvalid()) {
+ Parens.skipToEnd();
+ return ExprError();
+ }
+ }
+
+ // Add this type to the list of arguments.
+ Args.push_back(Ty.get());
+ } while (TryConsumeToken(tok::comma));
+
+ if (Parens.consumeClose())
+ return ExprError();
+
+ SourceLocation EndLoc = Parens.getCloseLocation();
+
+ if (Arity && Args.size() != Arity) {
+ Diag(EndLoc, diag::err_type_trait_arity)
+ << Arity << 0 << (Arity > 1) << (int)Args.size() << SourceRange(Loc);
+ return ExprError();
+ }
+
+ if (!Arity && Args.empty()) {
+ Diag(EndLoc, diag::err_type_trait_arity)
+ << 1 << 1 << 1 << (int)Args.size() << SourceRange(Loc);
+ return ExprError();
+ }
+
+ return Actions.ActOnTypeTrait(TypeTraitFromTokKind(Kind), Loc, Args, EndLoc);
+}
+
+/// ParseArrayTypeTrait - Parse the built-in array type-trait
+/// pseudo-functions.
+///
+/// primary-expression:
+/// [Embarcadero] '__array_rank' '(' type-id ')'
+/// [Embarcadero] '__array_extent' '(' type-id ',' expression ')'
+///
+ExprResult Parser::ParseArrayTypeTrait() {
+ ArrayTypeTrait ATT = ArrayTypeTraitFromTokKind(Tok.getKind());
+ SourceLocation Loc = ConsumeToken();
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.expectAndConsume())
+ return ExprError();
+
+ TypeResult Ty = ParseTypeName();
+ if (Ty.isInvalid()) {
+ SkipUntil(tok::comma, StopAtSemi);
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return ExprError();
+ }
+
+ switch (ATT) {
+ case ATT_ArrayRank: {
+ T.consumeClose();
+ return Actions.ActOnArrayTypeTrait(ATT, Loc, Ty.get(), nullptr,
+ T.getCloseLocation());
+ }
+ case ATT_ArrayExtent: {
+ if (ExpectAndConsume(tok::comma)) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return ExprError();
+ }
+
+ ExprResult DimExpr = ParseExpression();
+ T.consumeClose();
+
+ return Actions.ActOnArrayTypeTrait(ATT, Loc, Ty.get(), DimExpr.get(),
+ T.getCloseLocation());
+ }
+ }
+ llvm_unreachable("Invalid ArrayTypeTrait!");
+}
+
+/// ParseExpressionTrait - Parse built-in expression-trait
+/// pseudo-functions like __is_lvalue_expr( xxx ).
+///
+/// primary-expression:
+/// [Embarcadero] expression-trait '(' expression ')'
+///
+ExprResult Parser::ParseExpressionTrait() {
+ ExpressionTrait ET = ExpressionTraitFromTokKind(Tok.getKind());
+ SourceLocation Loc = ConsumeToken();
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.expectAndConsume())
+ return ExprError();
+
+ ExprResult Expr = ParseExpression();
+
+ T.consumeClose();
+
+ return Actions.ActOnExpressionTrait(ET, Loc, Expr.get(),
+ T.getCloseLocation());
+}
+
+
+/// ParseCXXAmbiguousParenExpression - We have parsed the left paren of a
+/// parenthesized ambiguous type-id. This uses tentative parsing to disambiguate
+/// based on the context past the parens.
+ExprResult
+Parser::ParseCXXAmbiguousParenExpression(ParenParseOption &ExprType,
+ ParsedType &CastTy,
+ BalancedDelimiterTracker &Tracker,
+ ColonProtectionRAIIObject &ColonProt) {
+ assert(getLangOpts().CPlusPlus && "Should only be called for C++!");
+ assert(ExprType == CastExpr && "Compound literals are not ambiguous!");
+ assert(isTypeIdInParens() && "Not a type-id!");
+
+ ExprResult Result(true);
+ CastTy = ParsedType();
+
+ // We need to disambiguate a very ugly part of the C++ syntax:
+ //
+ // (T())x; - type-id
+ // (T())*x; - type-id
+ // (T())/x; - expression
+ // (T()); - expression
+ //
+ // The bad news is that we cannot use the specialized tentative parser, since
+ // it can only verify that the thing inside the parens can be parsed as
+ // type-id, it is not useful for determining the context past the parens.
+ //
+ // The good news is that the parser can disambiguate this part without
+ // making any unnecessary Action calls.
+ //
+ // It uses a scheme similar to parsing inline methods. The parenthesized
+ // tokens are cached, the context that follows is determined (possibly by
+ // parsing a cast-expression), and then we re-introduce the cached tokens
+ // into the token stream and parse them appropriately.
+
+ ParenParseOption ParseAs;
+ CachedTokens Toks;
+
+ // Store the tokens of the parentheses. We will parse them after we determine
+ // the context that follows them.
+ if (!ConsumeAndStoreUntil(tok::r_paren, Toks)) {
+ // We didn't find the ')' we expected.
+ Tracker.consumeClose();
+ return ExprError();
+ }
+
+ if (Tok.is(tok::l_brace)) {
+ ParseAs = CompoundLiteral;
+ } else {
+ bool NotCastExpr;
+ if (Tok.is(tok::l_paren) && NextToken().is(tok::r_paren)) {
+ NotCastExpr = true;
+ } else {
+ // Try parsing the cast-expression that may follow.
+ // If it is not a cast-expression, NotCastExpr will be true and no token
+ // will be consumed.
+ ColonProt.restore();
+ Result = ParseCastExpression(false/*isUnaryExpression*/,
+ false/*isAddressofOperand*/,
+ NotCastExpr,
+ // type-id has priority.
+ IsTypeCast);
+ }
+
+ // If we parsed a cast-expression, it's really a type-id, otherwise it's
+ // an expression.
+ ParseAs = NotCastExpr ? SimpleExpr : CastExpr;
+ }
+
+ // The current token should go after the cached tokens.
+ Toks.push_back(Tok);
+ // Re-enter the stored parenthesized tokens into the token stream, so we may
+ // parse them now.
+ PP.EnterTokenStream(Toks.data(), Toks.size(),
+ true/*DisableMacroExpansion*/, false/*OwnsTokens*/);
+ // Drop the current token and bring the first cached one. It's the same token
+ // as when we entered this function.
+ ConsumeAnyToken();
+
+ if (ParseAs >= CompoundLiteral) {
+ // Parse the type declarator.
+ DeclSpec DS(AttrFactory);
+ Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ {
+ ColonProtectionRAIIObject InnerColonProtection(*this);
+ ParseSpecifierQualifierList(DS);
+ ParseDeclarator(DeclaratorInfo);
+ }
+
+ // Match the ')'.
+ Tracker.consumeClose();
+ ColonProt.restore();
+
+ if (ParseAs == CompoundLiteral) {
+ ExprType = CompoundLiteral;
+ if (DeclaratorInfo.isInvalidType())
+ return ExprError();
+
+ TypeResult Ty = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+ return ParseCompoundLiteralExpression(Ty.get(),
+ Tracker.getOpenLocation(),
+ Tracker.getCloseLocation());
+ }
+
+ // We parsed '(' type-id ')' and the thing after it wasn't a '{'.
+ assert(ParseAs == CastExpr);
+
+ if (DeclaratorInfo.isInvalidType())
+ return ExprError();
+
+ // Result is what ParseCastExpression returned earlier.
+ if (!Result.isInvalid())
+ Result = Actions.ActOnCastExpr(getCurScope(), Tracker.getOpenLocation(),
+ DeclaratorInfo, CastTy,
+ Tracker.getCloseLocation(), Result.get());
+ return Result;
+ }
+
+ // Not a compound literal, and not followed by a cast-expression.
+ assert(ParseAs == SimpleExpr);
+
+ ExprType = SimpleExpr;
+ Result = ParseExpression();
+ if (!Result.isInvalid() && Tok.is(tok::r_paren))
+ Result = Actions.ActOnParenExpr(Tracker.getOpenLocation(),
+ Tok.getLocation(), Result.get());
+
+ // Match the ')'.
+ if (Result.isInvalid()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return ExprError();
+ }
+
+ Tracker.consumeClose();
+ return Result;
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp
new file mode 100644
index 0000000..4896ff0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp
@@ -0,0 +1,544 @@
+//===--- ParseInit.cpp - Initializer Parsing ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements initializer parsing as specified by C99 6.7.8.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/Parser.h"
+#include "RAIIObjectsForParser.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Sema/Designator.h"
+#include "clang/Sema/Scope.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+
+/// MayBeDesignationStart - Return true if the current token might be the start
+/// of a designator. If we can tell it is impossible that it is a designator,
+/// return false.
+bool Parser::MayBeDesignationStart() {
+ switch (Tok.getKind()) {
+ default:
+ return false;
+
+ case tok::period: // designator: '.' identifier
+ return true;
+
+ case tok::l_square: { // designator: array-designator
+ if (!PP.getLangOpts().CPlusPlus11)
+ return true;
+
+ // C++11 lambda expressions and C99 designators can be ambiguous all the
+ // way through the closing ']' and to the next character. Handle the easy
+ // cases here, and fall back to tentative parsing if those fail.
+ switch (PP.LookAhead(0).getKind()) {
+ case tok::equal:
+ case tok::r_square:
+ // Definitely starts a lambda expression.
+ return false;
+
+ case tok::amp:
+ case tok::kw_this:
+ case tok::identifier:
+ // We have to do additional analysis, because these could be the
+ // start of a constant expression or a lambda capture list.
+ break;
+
+ default:
+ // Anything not mentioned above cannot occur following a '[' in a
+ // lambda expression.
+ return true;
+ }
+
+ // Handle the complicated case below.
+ break;
+ }
+ case tok::identifier: // designation: identifier ':'
+ return PP.LookAhead(0).is(tok::colon);
+ }
+
+ // Parse up to (at most) the token after the closing ']' to determine
+ // whether this is a C99 designator or a lambda.
+ TentativeParsingAction Tentative(*this);
+
+ LambdaIntroducer Intro;
+ bool SkippedInits = false;
+ Optional<unsigned> DiagID(ParseLambdaIntroducer(Intro, &SkippedInits));
+
+ if (DiagID) {
+ // If this can't be a lambda capture list, it's a designator.
+ Tentative.Revert();
+ return true;
+ }
+
+ // Once we hit the closing square bracket, we look at the next
+ // token. If it's an '=', this is a designator. Otherwise, it's a
+ // lambda expression. This decision favors lambdas over the older
+ // GNU designator syntax, which allows one to omit the '=', but is
+ // consistent with GCC.
+ tok::TokenKind Kind = Tok.getKind();
+ // FIXME: If we didn't skip any inits, parse the lambda from here
+ // rather than throwing away then reparsing the LambdaIntroducer.
+ Tentative.Revert();
+ return Kind == tok::equal;
+}
+
+static void CheckArrayDesignatorSyntax(Parser &P, SourceLocation Loc,
+ Designation &Desig) {
+ // If we have exactly one array designator, this used the GNU
+ // 'designation: array-designator' extension, otherwise there should be no
+ // designators at all!
+ if (Desig.getNumDesignators() == 1 &&
+ (Desig.getDesignator(0).isArrayDesignator() ||
+ Desig.getDesignator(0).isArrayRangeDesignator()))
+ P.Diag(Loc, diag::ext_gnu_missing_equal_designator);
+ else if (Desig.getNumDesignators() > 0)
+ P.Diag(Loc, diag::err_expected_equal_designator);
+}
+
+/// ParseInitializerWithPotentialDesignator - Parse the 'initializer' production
+/// checking to see if the token stream starts with a designator.
+///
+/// designation:
+/// designator-list '='
+/// [GNU] array-designator
+/// [GNU] identifier ':'
+///
+/// designator-list:
+/// designator
+/// designator-list designator
+///
+/// designator:
+/// array-designator
+/// '.' identifier
+///
+/// array-designator:
+/// '[' constant-expression ']'
+/// [GNU] '[' constant-expression '...' constant-expression ']'
+///
+/// NOTE: [OBC] allows '[ objc-receiver objc-message-args ]' as an
+/// initializer (because it is an expression). We need to consider this case
+/// when parsing array designators.
+///
+ExprResult Parser::ParseInitializerWithPotentialDesignator() {
+
+ // If this is the old-style GNU extension:
+ // designation ::= identifier ':'
+ // Handle it as a field designator. Otherwise, this must be the start of a
+ // normal expression.
+ if (Tok.is(tok::identifier)) {
+ const IdentifierInfo *FieldName = Tok.getIdentifierInfo();
+
+ SmallString<256> NewSyntax;
+ llvm::raw_svector_ostream(NewSyntax) << '.' << FieldName->getName()
+ << " = ";
+
+ SourceLocation NameLoc = ConsumeToken(); // Eat the identifier.
+
+ assert(Tok.is(tok::colon) && "MayBeDesignationStart not working properly!");
+ SourceLocation ColonLoc = ConsumeToken();
+
+ Diag(NameLoc, diag::ext_gnu_old_style_field_designator)
+ << FixItHint::CreateReplacement(SourceRange(NameLoc, ColonLoc),
+ NewSyntax);
+
+ Designation D;
+ D.AddDesignator(Designator::getField(FieldName, SourceLocation(), NameLoc));
+ return Actions.ActOnDesignatedInitializer(D, ColonLoc, true,
+ ParseInitializer());
+ }
+
+ // Desig - This is initialized when we see our first designator. We may have
+ // an objc message send with no designator, so we don't want to create this
+ // eagerly.
+ Designation Desig;
+
+ // Parse each designator in the designator list until we find an initializer.
+ while (Tok.is(tok::period) || Tok.is(tok::l_square)) {
+ if (Tok.is(tok::period)) {
+ // designator: '.' identifier
+ SourceLocation DotLoc = ConsumeToken();
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok.getLocation(), diag::err_expected_field_designator);
+ return ExprError();
+ }
+
+ Desig.AddDesignator(Designator::getField(Tok.getIdentifierInfo(), DotLoc,
+ Tok.getLocation()));
+ ConsumeToken(); // Eat the identifier.
+ continue;
+ }
+
+ // We must have either an array designator now or an objc message send.
+ assert(Tok.is(tok::l_square) && "Unexpected token!");
+
+ // Handle the two forms of array designator:
+ // array-designator: '[' constant-expression ']'
+ // array-designator: '[' constant-expression '...' constant-expression ']'
+ //
+ // Also, we have to handle the case where the expression after the
+ // designator an an objc message send: '[' objc-message-expr ']'.
+ // Interesting cases are:
+ // [foo bar] -> objc message send
+ // [foo] -> array designator
+ // [foo ... bar] -> array designator
+ // [4][foo bar] -> obsolete GNU designation with objc message send.
+ //
+ // We do not need to check for an expression starting with [[ here. If it
+ // contains an Objective-C message send, then it is not an ill-formed
+ // attribute. If it is a lambda-expression within an array-designator, then
+ // it will be rejected because a constant-expression cannot begin with a
+ // lambda-expression.
+ InMessageExpressionRAIIObject InMessage(*this, true);
+
+ BalancedDelimiterTracker T(*this, tok::l_square);
+ T.consumeOpen();
+ SourceLocation StartLoc = T.getOpenLocation();
+
+ ExprResult Idx;
+
+ // If Objective-C is enabled and this is a typename (class message
+ // send) or send to 'super', parse this as a message send
+ // expression. We handle C++ and C separately, since C++ requires
+ // much more complicated parsing.
+ if (getLangOpts().ObjC1 && getLangOpts().CPlusPlus) {
+ // Send to 'super'.
+ if (Tok.is(tok::identifier) && Tok.getIdentifierInfo() == Ident_super &&
+ NextToken().isNot(tok::period) &&
+ getCurScope()->isInObjcMethodScope()) {
+ CheckArrayDesignatorSyntax(*this, StartLoc, Desig);
+ return ParseAssignmentExprWithObjCMessageExprStart(StartLoc,
+ ConsumeToken(),
+ ParsedType(),
+ nullptr);
+ }
+
+ // Parse the receiver, which is either a type or an expression.
+ bool IsExpr;
+ void *TypeOrExpr;
+ if (ParseObjCXXMessageReceiver(IsExpr, TypeOrExpr)) {
+ SkipUntil(tok::r_square, StopAtSemi);
+ return ExprError();
+ }
+
+ // If the receiver was a type, we have a class message; parse
+ // the rest of it.
+ if (!IsExpr) {
+ CheckArrayDesignatorSyntax(*this, StartLoc, Desig);
+ return ParseAssignmentExprWithObjCMessageExprStart(StartLoc,
+ SourceLocation(),
+ ParsedType::getFromOpaquePtr(TypeOrExpr),
+ nullptr);
+ }
+
+ // If the receiver was an expression, we still don't know
+ // whether we have a message send or an array designator; just
+ // adopt the expression for further analysis below.
+ // FIXME: potentially-potentially evaluated expression above?
+ Idx = ExprResult(static_cast<Expr*>(TypeOrExpr));
+ } else if (getLangOpts().ObjC1 && Tok.is(tok::identifier)) {
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ SourceLocation IILoc = Tok.getLocation();
+ ParsedType ReceiverType;
+ // Three cases. This is a message send to a type: [type foo]
+ // This is a message send to super: [super foo]
+ // This is a message sent to an expr: [super.bar foo]
+ switch (Actions.getObjCMessageKind(
+ getCurScope(), II, IILoc, II == Ident_super,
+ NextToken().is(tok::period), ReceiverType)) {
+ case Sema::ObjCSuperMessage:
+ CheckArrayDesignatorSyntax(*this, StartLoc, Desig);
+ return ParseAssignmentExprWithObjCMessageExprStart(StartLoc,
+ ConsumeToken(),
+ ParsedType(),
+ nullptr);
+
+ case Sema::ObjCClassMessage:
+ CheckArrayDesignatorSyntax(*this, StartLoc, Desig);
+ ConsumeToken(); // the identifier
+ if (!ReceiverType) {
+ SkipUntil(tok::r_square, StopAtSemi);
+ return ExprError();
+ }
+
+ // Parse type arguments and protocol qualifiers.
+ if (Tok.is(tok::less)) {
+ SourceLocation NewEndLoc;
+ TypeResult NewReceiverType
+ = parseObjCTypeArgsAndProtocolQualifiers(IILoc, ReceiverType,
+ /*consumeLastToken=*/true,
+ NewEndLoc);
+ if (!NewReceiverType.isUsable()) {
+ SkipUntil(tok::r_square, StopAtSemi);
+ return ExprError();
+ }
+
+ ReceiverType = NewReceiverType.get();
+ }
+
+ return ParseAssignmentExprWithObjCMessageExprStart(StartLoc,
+ SourceLocation(),
+ ReceiverType,
+ nullptr);
+
+ case Sema::ObjCInstanceMessage:
+ // Fall through; we'll just parse the expression and
+ // (possibly) treat this like an Objective-C message send
+ // later.
+ break;
+ }
+ }
+
+ // Parse the index expression, if we haven't already gotten one
+ // above (which can only happen in Objective-C++).
+ // Note that we parse this as an assignment expression, not a constant
+ // expression (allowing *=, =, etc) to handle the objc case. Sema needs
+ // to validate that the expression is a constant.
+ // FIXME: We also need to tell Sema that we're in a
+ // potentially-potentially evaluated context.
+ if (!Idx.get()) {
+ Idx = ParseAssignmentExpression();
+ if (Idx.isInvalid()) {
+ SkipUntil(tok::r_square, StopAtSemi);
+ return Idx;
+ }
+ }
+
+ // Given an expression, we could either have a designator (if the next
+ // tokens are '...' or ']' or an objc message send. If this is an objc
+ // message send, handle it now. An objc-message send is the start of
+ // an assignment-expression production.
+ if (getLangOpts().ObjC1 && Tok.isNot(tok::ellipsis) &&
+ Tok.isNot(tok::r_square)) {
+ CheckArrayDesignatorSyntax(*this, Tok.getLocation(), Desig);
+ return ParseAssignmentExprWithObjCMessageExprStart(StartLoc,
+ SourceLocation(),
+ ParsedType(),
+ Idx.get());
+ }
+
+ // If this is a normal array designator, remember it.
+ if (Tok.isNot(tok::ellipsis)) {
+ Desig.AddDesignator(Designator::getArray(Idx.get(), StartLoc));
+ } else {
+ // Handle the gnu array range extension.
+ Diag(Tok, diag::ext_gnu_array_range);
+ SourceLocation EllipsisLoc = ConsumeToken();
+
+ ExprResult RHS(ParseConstantExpression());
+ if (RHS.isInvalid()) {
+ SkipUntil(tok::r_square, StopAtSemi);
+ return RHS;
+ }
+ Desig.AddDesignator(Designator::getArrayRange(Idx.get(),
+ RHS.get(),
+ StartLoc, EllipsisLoc));
+ }
+
+ T.consumeClose();
+ Desig.getDesignator(Desig.getNumDesignators() - 1).setRBracketLoc(
+ T.getCloseLocation());
+ }
+
+ // Okay, we're done with the designator sequence. We know that there must be
+ // at least one designator, because the only case we can get into this method
+ // without a designator is when we have an objc message send. That case is
+ // handled and returned from above.
+ assert(!Desig.empty() && "Designator is empty?");
+
+ // Handle a normal designator sequence end, which is an equal.
+ if (Tok.is(tok::equal)) {
+ SourceLocation EqualLoc = ConsumeToken();
+ return Actions.ActOnDesignatedInitializer(Desig, EqualLoc, false,
+ ParseInitializer());
+ }
+
+ // We read some number of designators and found something that isn't an = or
+ // an initializer. If we have exactly one array designator, this
+ // is the GNU 'designation: array-designator' extension. Otherwise, it is a
+ // parse error.
+ if (Desig.getNumDesignators() == 1 &&
+ (Desig.getDesignator(0).isArrayDesignator() ||
+ Desig.getDesignator(0).isArrayRangeDesignator())) {
+ Diag(Tok, diag::ext_gnu_missing_equal_designator)
+ << FixItHint::CreateInsertion(Tok.getLocation(), "= ");
+ return Actions.ActOnDesignatedInitializer(Desig, Tok.getLocation(),
+ true, ParseInitializer());
+ }
+
+ Diag(Tok, diag::err_expected_equal_designator);
+ return ExprError();
+}
+
+
+/// ParseBraceInitializer - Called when parsing an initializer that has a
+/// leading open brace.
+///
+/// initializer: [C99 6.7.8]
+/// '{' initializer-list '}'
+/// '{' initializer-list ',' '}'
+/// [GNU] '{' '}'
+///
+/// initializer-list:
+/// designation[opt] initializer ...[opt]
+/// initializer-list ',' designation[opt] initializer ...[opt]
+///
+ExprResult Parser::ParseBraceInitializer() {
+ InMessageExpressionRAIIObject InMessage(*this, false);
+
+ BalancedDelimiterTracker T(*this, tok::l_brace);
+ T.consumeOpen();
+ SourceLocation LBraceLoc = T.getOpenLocation();
+
+ /// InitExprs - This is the actual list of expressions contained in the
+ /// initializer.
+ ExprVector InitExprs;
+
+ if (Tok.is(tok::r_brace)) {
+ // Empty initializers are a C++ feature and a GNU extension to C.
+ if (!getLangOpts().CPlusPlus)
+ Diag(LBraceLoc, diag::ext_gnu_empty_initializer);
+ // Match the '}'.
+ return Actions.ActOnInitList(LBraceLoc, None, ConsumeBrace());
+ }
+
+ bool InitExprsOk = true;
+
+ while (1) {
+ // Handle Microsoft __if_exists/if_not_exists if necessary.
+ if (getLangOpts().MicrosoftExt && (Tok.is(tok::kw___if_exists) ||
+ Tok.is(tok::kw___if_not_exists))) {
+ if (ParseMicrosoftIfExistsBraceInitializer(InitExprs, InitExprsOk)) {
+ if (Tok.isNot(tok::comma)) break;
+ ConsumeToken();
+ }
+ if (Tok.is(tok::r_brace)) break;
+ continue;
+ }
+
+ // Parse: designation[opt] initializer
+
+ // If we know that this cannot be a designation, just parse the nested
+ // initializer directly.
+ ExprResult SubElt;
+ if (MayBeDesignationStart())
+ SubElt = ParseInitializerWithPotentialDesignator();
+ else
+ SubElt = ParseInitializer();
+
+ if (Tok.is(tok::ellipsis))
+ SubElt = Actions.ActOnPackExpansion(SubElt.get(), ConsumeToken());
+
+ SubElt = Actions.CorrectDelayedTyposInExpr(SubElt.get());
+
+ // If we couldn't parse the subelement, bail out.
+ if (SubElt.isUsable()) {
+ InitExprs.push_back(SubElt.get());
+ } else {
+ InitExprsOk = false;
+
+ // We have two ways to try to recover from this error: if the code looks
+ // grammatically ok (i.e. we have a comma coming up) try to continue
+ // parsing the rest of the initializer. This allows us to emit
+ // diagnostics for later elements that we find. If we don't see a comma,
+ // assume there is a parse error, and just skip to recover.
+ // FIXME: This comment doesn't sound right. If there is a r_brace
+ // immediately, it can't be an error, since there is no other way of
+ // leaving this loop except through this if.
+ if (Tok.isNot(tok::comma)) {
+ SkipUntil(tok::r_brace, StopBeforeMatch);
+ break;
+ }
+ }
+
+ // If we don't have a comma continued list, we're done.
+ if (Tok.isNot(tok::comma)) break;
+
+ // TODO: save comma locations if some client cares.
+ ConsumeToken();
+
+ // Handle trailing comma.
+ if (Tok.is(tok::r_brace)) break;
+ }
+
+ bool closed = !T.consumeClose();
+
+ if (InitExprsOk && closed)
+ return Actions.ActOnInitList(LBraceLoc, InitExprs,
+ T.getCloseLocation());
+
+ return ExprError(); // an error occurred.
+}
+
+
+// Return true if a comma (or closing brace) is necessary after the
+// __if_exists/if_not_exists statement.
+bool Parser::ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
+ bool &InitExprsOk) {
+ bool trailingComma = false;
+ IfExistsCondition Result;
+ if (ParseMicrosoftIfExistsCondition(Result))
+ return false;
+
+ BalancedDelimiterTracker Braces(*this, tok::l_brace);
+ if (Braces.consumeOpen()) {
+ Diag(Tok, diag::err_expected) << tok::l_brace;
+ return false;
+ }
+
+ switch (Result.Behavior) {
+ case IEB_Parse:
+ // Parse the declarations below.
+ break;
+
+ case IEB_Dependent:
+ Diag(Result.KeywordLoc, diag::warn_microsoft_dependent_exists)
+ << Result.IsIfExists;
+ // Fall through to skip.
+
+ case IEB_Skip:
+ Braces.skipToEnd();
+ return false;
+ }
+
+ while (!isEofOrEom()) {
+ trailingComma = false;
+ // If we know that this cannot be a designation, just parse the nested
+ // initializer directly.
+ ExprResult SubElt;
+ if (MayBeDesignationStart())
+ SubElt = ParseInitializerWithPotentialDesignator();
+ else
+ SubElt = ParseInitializer();
+
+ if (Tok.is(tok::ellipsis))
+ SubElt = Actions.ActOnPackExpansion(SubElt.get(), ConsumeToken());
+
+ // If we couldn't parse the subelement, bail out.
+ if (!SubElt.isInvalid())
+ InitExprs.push_back(SubElt.get());
+ else
+ InitExprsOk = false;
+
+ if (Tok.is(tok::comma)) {
+ ConsumeToken();
+ trailingComma = true;
+ }
+
+ if (Tok.is(tok::r_brace))
+ break;
+ }
+
+ Braces.consumeClose();
+
+ return !trailingComma;
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp
new file mode 100644
index 0000000..e72a1f6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp
@@ -0,0 +1,3614 @@
+//===--- ParseObjC.cpp - Objective C Parsing ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Objective-C portions of the Parser interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/Parser.h"
+#include "RAIIObjectsForParser.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/PrettyDeclStackTrace.h"
+#include "clang/Sema/Scope.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+using namespace clang;
+
+/// Skips attributes after an Objective-C @ directive. Emits a diagnostic.
+void Parser::MaybeSkipAttributes(tok::ObjCKeywordKind Kind) {
+ ParsedAttributes attrs(AttrFactory);
+ if (Tok.is(tok::kw___attribute)) {
+ if (Kind == tok::objc_interface || Kind == tok::objc_protocol)
+ Diag(Tok, diag::err_objc_postfix_attribute_hint)
+ << (Kind == tok::objc_protocol);
+ else
+ Diag(Tok, diag::err_objc_postfix_attribute);
+ ParseGNUAttributes(attrs);
+ }
+}
+
+/// ParseObjCAtDirectives - Handle parts of the external-declaration production:
+/// external-declaration: [C99 6.9]
+/// [OBJC] objc-class-definition
+/// [OBJC] objc-class-declaration
+/// [OBJC] objc-alias-declaration
+/// [OBJC] objc-protocol-definition
+/// [OBJC] objc-method-definition
+/// [OBJC] '@' 'end'
+Parser::DeclGroupPtrTy Parser::ParseObjCAtDirectives() {
+ SourceLocation AtLoc = ConsumeToken(); // the "@"
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCAtDirective(getCurScope());
+ cutOffParsing();
+ return DeclGroupPtrTy();
+ }
+
+ Decl *SingleDecl = nullptr;
+ switch (Tok.getObjCKeywordID()) {
+ case tok::objc_class:
+ return ParseObjCAtClassDeclaration(AtLoc);
+ case tok::objc_interface: {
+ ParsedAttributes attrs(AttrFactory);
+ SingleDecl = ParseObjCAtInterfaceDeclaration(AtLoc, attrs);
+ break;
+ }
+ case tok::objc_protocol: {
+ ParsedAttributes attrs(AttrFactory);
+ return ParseObjCAtProtocolDeclaration(AtLoc, attrs);
+ }
+ case tok::objc_implementation:
+ return ParseObjCAtImplementationDeclaration(AtLoc);
+ case tok::objc_end:
+ return ParseObjCAtEndDeclaration(AtLoc);
+ case tok::objc_compatibility_alias:
+ SingleDecl = ParseObjCAtAliasDeclaration(AtLoc);
+ break;
+ case tok::objc_synthesize:
+ SingleDecl = ParseObjCPropertySynthesize(AtLoc);
+ break;
+ case tok::objc_dynamic:
+ SingleDecl = ParseObjCPropertyDynamic(AtLoc);
+ break;
+ case tok::objc_import:
+ if (getLangOpts().Modules || getLangOpts().DebuggerSupport)
+ return ParseModuleImport(AtLoc);
+ Diag(AtLoc, diag::err_atimport);
+ SkipUntil(tok::semi);
+ return Actions.ConvertDeclToDeclGroup(nullptr);
+ default:
+ Diag(AtLoc, diag::err_unexpected_at);
+ SkipUntil(tok::semi);
+ SingleDecl = nullptr;
+ break;
+ }
+ return Actions.ConvertDeclToDeclGroup(SingleDecl);
+}
+
+/// Class to handle popping type parameters when leaving the scope.
+class Parser::ObjCTypeParamListScope {
+ Sema &Actions;
+ Scope *S;
+ ObjCTypeParamList *Params;
+public:
+ ObjCTypeParamListScope(Sema &Actions, Scope *S)
+ : Actions(Actions), S(S), Params(nullptr) {}
+ ~ObjCTypeParamListScope() {
+ leave();
+ }
+ void enter(ObjCTypeParamList *P) {
+ assert(!Params);
+ Params = P;
+ }
+ void leave() {
+ if (Params)
+ Actions.popObjCTypeParamList(S, Params);
+ Params = nullptr;
+ }
+};
+
+///
+/// objc-class-declaration:
+/// '@' 'class' objc-class-forward-decl (',' objc-class-forward-decl)* ';'
+///
+/// objc-class-forward-decl:
+/// identifier objc-type-parameter-list[opt]
+///
+Parser::DeclGroupPtrTy
+Parser::ParseObjCAtClassDeclaration(SourceLocation atLoc) {
+ ConsumeToken(); // the identifier "class"
+ SmallVector<IdentifierInfo *, 8> ClassNames;
+ SmallVector<SourceLocation, 8> ClassLocs;
+ SmallVector<ObjCTypeParamList *, 8> ClassTypeParams;
+
+ while (1) {
+ MaybeSkipAttributes(tok::objc_class);
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected) << tok::identifier;
+ SkipUntil(tok::semi);
+ return Actions.ConvertDeclToDeclGroup(nullptr);
+ }
+ ClassNames.push_back(Tok.getIdentifierInfo());
+ ClassLocs.push_back(Tok.getLocation());
+ ConsumeToken();
+
+ // Parse the optional objc-type-parameter-list.
+ ObjCTypeParamList *TypeParams = nullptr;
+ if (Tok.is(tok::less))
+ TypeParams = parseObjCTypeParamList();
+ ClassTypeParams.push_back(TypeParams);
+ if (!TryConsumeToken(tok::comma))
+ break;
+ }
+
+ // Consume the ';'.
+ if (ExpectAndConsume(tok::semi, diag::err_expected_after, "@class"))
+ return Actions.ConvertDeclToDeclGroup(nullptr);
+
+ return Actions.ActOnForwardClassDeclaration(atLoc, ClassNames.data(),
+ ClassLocs.data(),
+ ClassTypeParams,
+ ClassNames.size());
+}
+
+void Parser::CheckNestedObjCContexts(SourceLocation AtLoc)
+{
+ Sema::ObjCContainerKind ock = Actions.getObjCContainerKind();
+ if (ock == Sema::OCK_None)
+ return;
+
+ Decl *Decl = Actions.getObjCDeclContext();
+ if (CurParsedObjCImpl) {
+ CurParsedObjCImpl->finish(AtLoc);
+ } else {
+ Actions.ActOnAtEnd(getCurScope(), AtLoc);
+ }
+ Diag(AtLoc, diag::err_objc_missing_end)
+ << FixItHint::CreateInsertion(AtLoc, "@end\n");
+ if (Decl)
+ Diag(Decl->getLocStart(), diag::note_objc_container_start)
+ << (int) ock;
+}
+
+///
+/// objc-interface:
+/// objc-class-interface-attributes[opt] objc-class-interface
+/// objc-category-interface
+///
+/// objc-class-interface:
+/// '@' 'interface' identifier objc-type-parameter-list[opt]
+/// objc-superclass[opt] objc-protocol-refs[opt]
+/// objc-class-instance-variables[opt]
+/// objc-interface-decl-list
+/// @end
+///
+/// objc-category-interface:
+/// '@' 'interface' identifier objc-type-parameter-list[opt]
+/// '(' identifier[opt] ')' objc-protocol-refs[opt]
+/// objc-interface-decl-list
+/// @end
+///
+/// objc-superclass:
+/// ':' identifier objc-type-arguments[opt]
+///
+/// objc-class-interface-attributes:
+/// __attribute__((visibility("default")))
+/// __attribute__((visibility("hidden")))
+/// __attribute__((deprecated))
+/// __attribute__((unavailable))
+/// __attribute__((objc_exception)) - used by NSException on 64-bit
+/// __attribute__((objc_root_class))
+///
+Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
+ ParsedAttributes &attrs) {
+ assert(Tok.isObjCAtKeyword(tok::objc_interface) &&
+ "ParseObjCAtInterfaceDeclaration(): Expected @interface");
+ CheckNestedObjCContexts(AtLoc);
+ ConsumeToken(); // the "interface" identifier
+
+ // Code completion after '@interface'.
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCInterfaceDecl(getCurScope());
+ cutOffParsing();
+ return nullptr;
+ }
+
+ MaybeSkipAttributes(tok::objc_interface);
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected)
+ << tok::identifier; // missing class or category name.
+ return nullptr;
+ }
+
+ // We have a class or category name - consume it.
+ IdentifierInfo *nameId = Tok.getIdentifierInfo();
+ SourceLocation nameLoc = ConsumeToken();
+
+ // Parse the objc-type-parameter-list or objc-protocol-refs. For the latter
+ // case, LAngleLoc will be valid and ProtocolIdents will capture the
+ // protocol references (that have not yet been resolved).
+ SourceLocation LAngleLoc, EndProtoLoc;
+ SmallVector<IdentifierLocPair, 8> ProtocolIdents;
+ ObjCTypeParamList *typeParameterList = nullptr;
+ ObjCTypeParamListScope typeParamScope(Actions, getCurScope());
+ if (Tok.is(tok::less))
+ typeParameterList = parseObjCTypeParamListOrProtocolRefs(
+ typeParamScope, LAngleLoc, ProtocolIdents, EndProtoLoc);
+
+ if (Tok.is(tok::l_paren) &&
+ !isKnownToBeTypeSpecifier(GetLookAheadToken(1))) { // we have a category.
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ SourceLocation categoryLoc;
+ IdentifierInfo *categoryId = nullptr;
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCInterfaceCategory(getCurScope(), nameId, nameLoc);
+ cutOffParsing();
+ return nullptr;
+ }
+
+ // For ObjC2, the category name is optional (not an error).
+ if (Tok.is(tok::identifier)) {
+ categoryId = Tok.getIdentifierInfo();
+ categoryLoc = ConsumeToken();
+ }
+ else if (!getLangOpts().ObjC2) {
+ Diag(Tok, diag::err_expected)
+ << tok::identifier; // missing category name.
+ return nullptr;
+ }
+
+ T.consumeClose();
+ if (T.getCloseLocation().isInvalid())
+ return nullptr;
+
+ if (!attrs.empty()) { // categories don't support attributes.
+ Diag(nameLoc, diag::err_objc_no_attributes_on_category);
+ attrs.clear();
+ }
+
+ // Next, we need to check for any protocol references.
+ assert(LAngleLoc.isInvalid() && "Cannot have already parsed protocols");
+ SmallVector<Decl *, 8> ProtocolRefs;
+ SmallVector<SourceLocation, 8> ProtocolLocs;
+ if (Tok.is(tok::less) &&
+ ParseObjCProtocolReferences(ProtocolRefs, ProtocolLocs, true, true,
+ LAngleLoc, EndProtoLoc,
+ /*consumeLastToken=*/true))
+ return nullptr;
+
+ Decl *CategoryType =
+ Actions.ActOnStartCategoryInterface(AtLoc,
+ nameId, nameLoc,
+ typeParameterList,
+ categoryId, categoryLoc,
+ ProtocolRefs.data(),
+ ProtocolRefs.size(),
+ ProtocolLocs.data(),
+ EndProtoLoc);
+
+ if (Tok.is(tok::l_brace))
+ ParseObjCClassInstanceVariables(CategoryType, tok::objc_private, AtLoc);
+
+ ParseObjCInterfaceDeclList(tok::objc_not_keyword, CategoryType);
+
+ return CategoryType;
+ }
+ // Parse a class interface.
+ IdentifierInfo *superClassId = nullptr;
+ SourceLocation superClassLoc;
+ SourceLocation typeArgsLAngleLoc;
+ SmallVector<ParsedType, 4> typeArgs;
+ SourceLocation typeArgsRAngleLoc;
+ SmallVector<Decl *, 4> protocols;
+ SmallVector<SourceLocation, 4> protocolLocs;
+ if (Tok.is(tok::colon)) { // a super class is specified.
+ ConsumeToken();
+
+ // Code completion of superclass names.
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCSuperclass(getCurScope(), nameId, nameLoc);
+ cutOffParsing();
+ return nullptr;
+ }
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected)
+ << tok::identifier; // missing super class name.
+ return nullptr;
+ }
+ superClassId = Tok.getIdentifierInfo();
+ superClassLoc = ConsumeToken();
+
+ // Type arguments for the superclass or protocol conformances.
+ if (Tok.is(tok::less)) {
+ parseObjCTypeArgsOrProtocolQualifiers(ParsedType(),
+ typeArgsLAngleLoc,
+ typeArgs,
+ typeArgsRAngleLoc,
+ LAngleLoc,
+ protocols,
+ protocolLocs,
+ EndProtoLoc,
+ /*consumeLastToken=*/true,
+ /*warnOnIncompleteProtocols=*/true);
+ }
+ }
+
+ // Next, we need to check for any protocol references.
+ if (LAngleLoc.isValid()) {
+ if (!ProtocolIdents.empty()) {
+ // We already parsed the protocols named when we thought we had a
+ // type parameter list. Translate them into actual protocol references.
+ for (const auto &pair : ProtocolIdents) {
+ protocolLocs.push_back(pair.second);
+ }
+ Actions.FindProtocolDeclaration(/*WarnOnDeclarations=*/true,
+ /*ForObjCContainer=*/true,
+ ProtocolIdents, protocols);
+ }
+ } else if (protocols.empty() && Tok.is(tok::less) &&
+ ParseObjCProtocolReferences(protocols, protocolLocs, true, true,
+ LAngleLoc, EndProtoLoc,
+ /*consumeLastToken=*/true)) {
+ return nullptr;
+ }
+
+ if (Tok.isNot(tok::less))
+ Actions.ActOnTypedefedProtocols(protocols, superClassId, superClassLoc);
+
+ Decl *ClsType =
+ Actions.ActOnStartClassInterface(getCurScope(), AtLoc, nameId, nameLoc,
+ typeParameterList, superClassId,
+ superClassLoc,
+ typeArgs,
+ SourceRange(typeArgsLAngleLoc,
+ typeArgsRAngleLoc),
+ protocols.data(), protocols.size(),
+ protocolLocs.data(),
+ EndProtoLoc, attrs.getList());
+
+ if (Tok.is(tok::l_brace))
+ ParseObjCClassInstanceVariables(ClsType, tok::objc_protected, AtLoc);
+
+ ParseObjCInterfaceDeclList(tok::objc_interface, ClsType);
+
+ return ClsType;
+}
+
+/// Add an attribute for a context-sensitive type nullability to the given
+/// declarator.
+static void addContextSensitiveTypeNullability(Parser &P,
+ Declarator &D,
+ NullabilityKind nullability,
+ SourceLocation nullabilityLoc,
+ bool &addedToDeclSpec) {
+ // Create the attribute.
+ auto getNullabilityAttr = [&]() -> AttributeList * {
+ return D.getAttributePool().create(
+ P.getNullabilityKeyword(nullability),
+ SourceRange(nullabilityLoc),
+ nullptr, SourceLocation(),
+ nullptr, 0,
+ AttributeList::AS_ContextSensitiveKeyword);
+ };
+
+ if (D.getNumTypeObjects() > 0) {
+ // Add the attribute to the declarator chunk nearest the declarator.
+ auto nullabilityAttr = getNullabilityAttr();
+ DeclaratorChunk &chunk = D.getTypeObject(0);
+ nullabilityAttr->setNext(chunk.getAttrListRef());
+ chunk.getAttrListRef() = nullabilityAttr;
+ } else if (!addedToDeclSpec) {
+ // Otherwise, just put it on the declaration specifiers (if one
+ // isn't there already).
+ D.getMutableDeclSpec().addAttributes(getNullabilityAttr());
+ addedToDeclSpec = true;
+ }
+}
+
+/// Parse an Objective-C type parameter list, if present, or capture
+/// the locations of the protocol identifiers for a list of protocol
+/// references.
+///
+/// objc-type-parameter-list:
+/// '<' objc-type-parameter (',' objc-type-parameter)* '>'
+///
+/// objc-type-parameter:
+/// objc-type-parameter-variance? identifier objc-type-parameter-bound[opt]
+///
+/// objc-type-parameter-bound:
+/// ':' type-name
+///
+/// objc-type-parameter-variance:
+/// '__covariant'
+/// '__contravariant'
+///
+/// \param lAngleLoc The location of the starting '<'.
+///
+/// \param protocolIdents Will capture the list of identifiers, if the
+/// angle brackets contain a list of protocol references rather than a
+/// type parameter list.
+///
+/// \param rAngleLoc The location of the ending '>'.
+ObjCTypeParamList *Parser::parseObjCTypeParamListOrProtocolRefs(
+ ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
+ SmallVectorImpl<IdentifierLocPair> &protocolIdents,
+ SourceLocation &rAngleLoc, bool mayBeProtocolList) {
+ assert(Tok.is(tok::less) && "Not at the beginning of a type parameter list");
+
+ // Within the type parameter list, don't treat '>' as an operator.
+ GreaterThanIsOperatorScope G(GreaterThanIsOperator, false);
+
+ // Local function to "flush" the protocol identifiers, turning them into
+ // type parameters.
+ SmallVector<Decl *, 4> typeParams;
+ auto makeProtocolIdentsIntoTypeParameters = [&]() {
+ unsigned index = 0;
+ for (const auto &pair : protocolIdents) {
+ DeclResult typeParam = Actions.actOnObjCTypeParam(
+ getCurScope(),
+ ObjCTypeParamVariance::Invariant,
+ SourceLocation(),
+ index++,
+ pair.first,
+ pair.second,
+ SourceLocation(),
+ ParsedType());
+ if (typeParam.isUsable())
+ typeParams.push_back(typeParam.get());
+ }
+
+ protocolIdents.clear();
+ mayBeProtocolList = false;
+ };
+
+ bool invalid = false;
+ lAngleLoc = ConsumeToken();
+
+ do {
+ // Parse the variance, if any.
+ SourceLocation varianceLoc;
+ ObjCTypeParamVariance variance = ObjCTypeParamVariance::Invariant;
+ if (Tok.is(tok::kw___covariant) || Tok.is(tok::kw___contravariant)) {
+ variance = Tok.is(tok::kw___covariant)
+ ? ObjCTypeParamVariance::Covariant
+ : ObjCTypeParamVariance::Contravariant;
+ varianceLoc = ConsumeToken();
+
+ // Once we've seen a variance specific , we know this is not a
+ // list of protocol references.
+ if (mayBeProtocolList) {
+ // Up until now, we have been queuing up parameters because they
+ // might be protocol references. Turn them into parameters now.
+ makeProtocolIdentsIntoTypeParameters();
+ }
+ }
+
+ // Parse the identifier.
+ if (!Tok.is(tok::identifier)) {
+ // Code completion.
+ if (Tok.is(tok::code_completion)) {
+ // FIXME: If these aren't protocol references, we'll need different
+ // completions.
+ Actions.CodeCompleteObjCProtocolReferences(protocolIdents);
+ cutOffParsing();
+
+ // FIXME: Better recovery here?.
+ return nullptr;
+ }
+
+ Diag(Tok, diag::err_objc_expected_type_parameter);
+ invalid = true;
+ break;
+ }
+
+ IdentifierInfo *paramName = Tok.getIdentifierInfo();
+ SourceLocation paramLoc = ConsumeToken();
+
+ // If there is a bound, parse it.
+ SourceLocation colonLoc;
+ TypeResult boundType;
+ if (TryConsumeToken(tok::colon, colonLoc)) {
+ // Once we've seen a bound, we know this is not a list of protocol
+ // references.
+ if (mayBeProtocolList) {
+ // Up until now, we have been queuing up parameters because they
+ // might be protocol references. Turn them into parameters now.
+ makeProtocolIdentsIntoTypeParameters();
+ }
+
+ // type-name
+ boundType = ParseTypeName();
+ if (boundType.isInvalid())
+ invalid = true;
+ } else if (mayBeProtocolList) {
+ // If this could still be a protocol list, just capture the identifier.
+ // We don't want to turn it into a parameter.
+ protocolIdents.push_back(std::make_pair(paramName, paramLoc));
+ continue;
+ }
+
+ // Create the type parameter.
+ DeclResult typeParam = Actions.actOnObjCTypeParam(getCurScope(),
+ variance,
+ varianceLoc,
+ typeParams.size(),
+ paramName,
+ paramLoc,
+ colonLoc,
+ boundType.isUsable()
+ ? boundType.get()
+ : ParsedType());
+ if (typeParam.isUsable())
+ typeParams.push_back(typeParam.get());
+ } while (TryConsumeToken(tok::comma));
+
+ // Parse the '>'.
+ if (invalid) {
+ SkipUntil(tok::greater, tok::at, StopBeforeMatch);
+ if (Tok.is(tok::greater))
+ ConsumeToken();
+ } else if (ParseGreaterThanInTemplateList(rAngleLoc,
+ /*ConsumeLastToken=*/true,
+ /*ObjCGenericList=*/true)) {
+ Diag(lAngleLoc, diag::note_matching) << "'<'";
+ SkipUntil({tok::greater, tok::greaterequal, tok::at, tok::minus,
+ tok::minus, tok::plus, tok::colon, tok::l_paren, tok::l_brace,
+ tok::comma, tok::semi },
+ StopBeforeMatch);
+ if (Tok.is(tok::greater))
+ ConsumeToken();
+ }
+
+ if (mayBeProtocolList) {
+ // A type parameter list must be followed by either a ':' (indicating the
+ // presence of a superclass) or a '(' (indicating that this is a category
+ // or extension). This disambiguates between an objc-type-parameter-list
+ // and a objc-protocol-refs.
+ if (Tok.isNot(tok::colon) && Tok.isNot(tok::l_paren)) {
+ // Returning null indicates that we don't have a type parameter list.
+ // The results the caller needs to handle the protocol references are
+ // captured in the reference parameters already.
+ return nullptr;
+ }
+
+ // We have a type parameter list that looks like a list of protocol
+ // references. Turn that parameter list into type parameters.
+ makeProtocolIdentsIntoTypeParameters();
+ }
+
+ // Form the type parameter list and enter its scope.
+ ObjCTypeParamList *list = Actions.actOnObjCTypeParamList(
+ getCurScope(),
+ lAngleLoc,
+ typeParams,
+ rAngleLoc);
+ Scope.enter(list);
+
+ // Clear out the angle locations; they're used by the caller to indicate
+ // whether there are any protocol references.
+ lAngleLoc = SourceLocation();
+ rAngleLoc = SourceLocation();
+ return invalid ? nullptr : list;
+}
+
+/// Parse an objc-type-parameter-list.
+ObjCTypeParamList *Parser::parseObjCTypeParamList() {
+ SourceLocation lAngleLoc;
+ SmallVector<IdentifierLocPair, 1> protocolIdents;
+ SourceLocation rAngleLoc;
+
+ ObjCTypeParamListScope Scope(Actions, getCurScope());
+ return parseObjCTypeParamListOrProtocolRefs(Scope, lAngleLoc, protocolIdents,
+ rAngleLoc,
+ /*mayBeProtocolList=*/false);
+}
+
+/// objc-interface-decl-list:
+/// empty
+/// objc-interface-decl-list objc-property-decl [OBJC2]
+/// objc-interface-decl-list objc-method-requirement [OBJC2]
+/// objc-interface-decl-list objc-method-proto ';'
+/// objc-interface-decl-list declaration
+/// objc-interface-decl-list ';'
+///
+/// objc-method-requirement: [OBJC2]
+/// @required
+/// @optional
+///
+void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
+ Decl *CDecl) {
+ SmallVector<Decl *, 32> allMethods;
+ SmallVector<DeclGroupPtrTy, 8> allTUVariables;
+ tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword;
+
+ SourceRange AtEnd;
+
+ while (1) {
+ // If this is a method prototype, parse it.
+ if (Tok.isOneOf(tok::minus, tok::plus)) {
+ if (Decl *methodPrototype =
+ ParseObjCMethodPrototype(MethodImplKind, false))
+ allMethods.push_back(methodPrototype);
+ // Consume the ';' here, since ParseObjCMethodPrototype() is re-used for
+ // method definitions.
+ if (ExpectAndConsumeSemi(diag::err_expected_semi_after_method_proto)) {
+ // We didn't find a semi and we error'ed out. Skip until a ';' or '@'.
+ SkipUntil(tok::at, StopAtSemi | StopBeforeMatch);
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ }
+ continue;
+ }
+ if (Tok.is(tok::l_paren)) {
+ Diag(Tok, diag::err_expected_minus_or_plus);
+ ParseObjCMethodDecl(Tok.getLocation(),
+ tok::minus,
+ MethodImplKind, false);
+ continue;
+ }
+ // Ignore excess semicolons.
+ if (Tok.is(tok::semi)) {
+ ConsumeToken();
+ continue;
+ }
+
+ // If we got to the end of the file, exit the loop.
+ if (isEofOrEom())
+ break;
+
+ // Code completion within an Objective-C interface.
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteOrdinaryName(getCurScope(),
+ CurParsedObjCImpl? Sema::PCC_ObjCImplementation
+ : Sema::PCC_ObjCInterface);
+ return cutOffParsing();
+ }
+
+ // If we don't have an @ directive, parse it as a function definition.
+ if (Tok.isNot(tok::at)) {
+ // The code below does not consume '}'s because it is afraid of eating the
+ // end of a namespace. Because of the way this code is structured, an
+ // erroneous r_brace would cause an infinite loop if not handled here.
+ if (Tok.is(tok::r_brace))
+ break;
+ ParsedAttributesWithRange attrs(AttrFactory);
+ allTUVariables.push_back(ParseDeclarationOrFunctionDefinition(attrs));
+ continue;
+ }
+
+ // Otherwise, we have an @ directive, eat the @.
+ SourceLocation AtLoc = ConsumeToken(); // the "@"
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCAtDirective(getCurScope());
+ return cutOffParsing();
+ }
+
+ tok::ObjCKeywordKind DirectiveKind = Tok.getObjCKeywordID();
+
+ if (DirectiveKind == tok::objc_end) { // @end -> terminate list
+ AtEnd.setBegin(AtLoc);
+ AtEnd.setEnd(Tok.getLocation());
+ break;
+ } else if (DirectiveKind == tok::objc_not_keyword) {
+ Diag(Tok, diag::err_objc_unknown_at);
+ SkipUntil(tok::semi);
+ continue;
+ }
+
+ // Eat the identifier.
+ ConsumeToken();
+
+ switch (DirectiveKind) {
+ default:
+ // FIXME: If someone forgets an @end on a protocol, this loop will
+ // continue to eat up tons of stuff and spew lots of nonsense errors. It
+ // would probably be better to bail out if we saw an @class or @interface
+ // or something like that.
+ Diag(AtLoc, diag::err_objc_illegal_interface_qual);
+ // Skip until we see an '@' or '}' or ';'.
+ SkipUntil(tok::r_brace, tok::at, StopAtSemi);
+ break;
+
+ case tok::objc_implementation:
+ case tok::objc_interface:
+ Diag(AtLoc, diag::err_objc_missing_end)
+ << FixItHint::CreateInsertion(AtLoc, "@end\n");
+ Diag(CDecl->getLocStart(), diag::note_objc_container_start)
+ << (int) Actions.getObjCContainerKind();
+ ConsumeToken();
+ break;
+
+ case tok::objc_required:
+ case tok::objc_optional:
+ // This is only valid on protocols.
+ // FIXME: Should this check for ObjC2 being enabled?
+ if (contextKey != tok::objc_protocol)
+ Diag(AtLoc, diag::err_objc_directive_only_in_protocol);
+ else
+ MethodImplKind = DirectiveKind;
+ break;
+
+ case tok::objc_property:
+ if (!getLangOpts().ObjC2)
+ Diag(AtLoc, diag::err_objc_properties_require_objc2);
+
+ ObjCDeclSpec OCDS;
+ SourceLocation LParenLoc;
+ // Parse property attribute list, if any.
+ if (Tok.is(tok::l_paren)) {
+ LParenLoc = Tok.getLocation();
+ ParseObjCPropertyAttribute(OCDS);
+ }
+
+ bool addedToDeclSpec = false;
+ auto ObjCPropertyCallback = [&](ParsingFieldDeclarator &FD) {
+ if (FD.D.getIdentifier() == nullptr) {
+ Diag(AtLoc, diag::err_objc_property_requires_field_name)
+ << FD.D.getSourceRange();
+ return;
+ }
+ if (FD.BitfieldSize) {
+ Diag(AtLoc, diag::err_objc_property_bitfield)
+ << FD.D.getSourceRange();
+ return;
+ }
+
+ // Map a nullability property attribute to a context-sensitive keyword
+ // attribute.
+ if (OCDS.getPropertyAttributes() & ObjCDeclSpec::DQ_PR_nullability)
+ addContextSensitiveTypeNullability(*this, FD.D, OCDS.getNullability(),
+ OCDS.getNullabilityLoc(),
+ addedToDeclSpec);
+
+ // Install the property declarator into interfaceDecl.
+ IdentifierInfo *SelName =
+ OCDS.getGetterName() ? OCDS.getGetterName() : FD.D.getIdentifier();
+
+ Selector GetterSel = PP.getSelectorTable().getNullarySelector(SelName);
+ IdentifierInfo *SetterName = OCDS.getSetterName();
+ Selector SetterSel;
+ if (SetterName)
+ SetterSel = PP.getSelectorTable().getSelector(1, &SetterName);
+ else
+ SetterSel = SelectorTable::constructSetterSelector(
+ PP.getIdentifierTable(), PP.getSelectorTable(),
+ FD.D.getIdentifier());
+ Decl *Property = Actions.ActOnProperty(
+ getCurScope(), AtLoc, LParenLoc, FD, OCDS, GetterSel, SetterSel,
+ MethodImplKind);
+
+ FD.complete(Property);
+ };
+
+ // Parse all the comma separated declarators.
+ ParsingDeclSpec DS(*this);
+ ParseStructDeclaration(DS, ObjCPropertyCallback);
+
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_decl_list);
+ break;
+ }
+ }
+
+ // We break out of the big loop in two cases: when we see @end or when we see
+ // EOF. In the former case, eat the @end. In the later case, emit an error.
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCAtDirective(getCurScope());
+ return cutOffParsing();
+ } else if (Tok.isObjCAtKeyword(tok::objc_end)) {
+ ConsumeToken(); // the "end" identifier
+ } else {
+ Diag(Tok, diag::err_objc_missing_end)
+ << FixItHint::CreateInsertion(Tok.getLocation(), "\n@end\n");
+ Diag(CDecl->getLocStart(), diag::note_objc_container_start)
+ << (int) Actions.getObjCContainerKind();
+ AtEnd.setBegin(Tok.getLocation());
+ AtEnd.setEnd(Tok.getLocation());
+ }
+
+ // Insert collected methods declarations into the @interface object.
+ // This passes in an invalid SourceLocation for AtEndLoc when EOF is hit.
+ Actions.ActOnAtEnd(getCurScope(), AtEnd, allMethods, allTUVariables);
+}
+
+/// Diagnose redundant or conflicting nullability information.
+static void diagnoseRedundantPropertyNullability(Parser &P,
+ ObjCDeclSpec &DS,
+ NullabilityKind nullability,
+ SourceLocation nullabilityLoc){
+ if (DS.getNullability() == nullability) {
+ P.Diag(nullabilityLoc, diag::warn_nullability_duplicate)
+ << DiagNullabilityKind(nullability, true)
+ << SourceRange(DS.getNullabilityLoc());
+ return;
+ }
+
+ P.Diag(nullabilityLoc, diag::err_nullability_conflicting)
+ << DiagNullabilityKind(nullability, true)
+ << DiagNullabilityKind(DS.getNullability(), true)
+ << SourceRange(DS.getNullabilityLoc());
+}
+
+/// Parse property attribute declarations.
+///
+/// property-attr-decl: '(' property-attrlist ')'
+/// property-attrlist:
+/// property-attribute
+/// property-attrlist ',' property-attribute
+/// property-attribute:
+/// getter '=' identifier
+/// setter '=' identifier ':'
+/// readonly
+/// readwrite
+/// assign
+/// retain
+/// copy
+/// nonatomic
+/// atomic
+/// strong
+/// weak
+/// unsafe_unretained
+/// nonnull
+/// nullable
+/// null_unspecified
+/// null_resettable
+///
+void Parser::ParseObjCPropertyAttribute(ObjCDeclSpec &DS) {
+ assert(Tok.getKind() == tok::l_paren);
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ while (1) {
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCPropertyFlags(getCurScope(), DS);
+ return cutOffParsing();
+ }
+ const IdentifierInfo *II = Tok.getIdentifierInfo();
+
+ // If this is not an identifier at all, bail out early.
+ if (!II) {
+ T.consumeClose();
+ return;
+ }
+
+ SourceLocation AttrName = ConsumeToken(); // consume last attribute name
+
+ if (II->isStr("readonly"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_readonly);
+ else if (II->isStr("assign"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_assign);
+ else if (II->isStr("unsafe_unretained"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_unsafe_unretained);
+ else if (II->isStr("readwrite"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_readwrite);
+ else if (II->isStr("retain"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_retain);
+ else if (II->isStr("strong"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_strong);
+ else if (II->isStr("copy"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_copy);
+ else if (II->isStr("nonatomic"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_nonatomic);
+ else if (II->isStr("atomic"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_atomic);
+ else if (II->isStr("weak"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_weak);
+ else if (II->isStr("getter") || II->isStr("setter")) {
+ bool IsSetter = II->getNameStart()[0] == 's';
+
+ // getter/setter require extra treatment.
+ unsigned DiagID = IsSetter ? diag::err_objc_expected_equal_for_setter :
+ diag::err_objc_expected_equal_for_getter;
+
+ if (ExpectAndConsume(tok::equal, DiagID)) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return;
+ }
+
+ if (Tok.is(tok::code_completion)) {
+ if (IsSetter)
+ Actions.CodeCompleteObjCPropertySetter(getCurScope());
+ else
+ Actions.CodeCompleteObjCPropertyGetter(getCurScope());
+ return cutOffParsing();
+ }
+
+ SourceLocation SelLoc;
+ IdentifierInfo *SelIdent = ParseObjCSelectorPiece(SelLoc);
+
+ if (!SelIdent) {
+ Diag(Tok, diag::err_objc_expected_selector_for_getter_setter)
+ << IsSetter;
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return;
+ }
+
+ if (IsSetter) {
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_setter);
+ DS.setSetterName(SelIdent);
+
+ if (ExpectAndConsume(tok::colon,
+ diag::err_expected_colon_after_setter_name)) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return;
+ }
+ } else {
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_getter);
+ DS.setGetterName(SelIdent);
+ }
+ } else if (II->isStr("nonnull")) {
+ if (DS.getPropertyAttributes() & ObjCDeclSpec::DQ_PR_nullability)
+ diagnoseRedundantPropertyNullability(*this, DS,
+ NullabilityKind::NonNull,
+ Tok.getLocation());
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_nullability);
+ DS.setNullability(Tok.getLocation(), NullabilityKind::NonNull);
+ } else if (II->isStr("nullable")) {
+ if (DS.getPropertyAttributes() & ObjCDeclSpec::DQ_PR_nullability)
+ diagnoseRedundantPropertyNullability(*this, DS,
+ NullabilityKind::Nullable,
+ Tok.getLocation());
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_nullability);
+ DS.setNullability(Tok.getLocation(), NullabilityKind::Nullable);
+ } else if (II->isStr("null_unspecified")) {
+ if (DS.getPropertyAttributes() & ObjCDeclSpec::DQ_PR_nullability)
+ diagnoseRedundantPropertyNullability(*this, DS,
+ NullabilityKind::Unspecified,
+ Tok.getLocation());
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_nullability);
+ DS.setNullability(Tok.getLocation(), NullabilityKind::Unspecified);
+ } else if (II->isStr("null_resettable")) {
+ if (DS.getPropertyAttributes() & ObjCDeclSpec::DQ_PR_nullability)
+ diagnoseRedundantPropertyNullability(*this, DS,
+ NullabilityKind::Unspecified,
+ Tok.getLocation());
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_nullability);
+ DS.setNullability(Tok.getLocation(), NullabilityKind::Unspecified);
+
+ // Also set the null_resettable bit.
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_null_resettable);
+ } else {
+ Diag(AttrName, diag::err_objc_expected_property_attr) << II;
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return;
+ }
+
+ if (Tok.isNot(tok::comma))
+ break;
+
+ ConsumeToken();
+ }
+
+ T.consumeClose();
+}
+
+/// objc-method-proto:
+/// objc-instance-method objc-method-decl objc-method-attributes[opt]
+/// objc-class-method objc-method-decl objc-method-attributes[opt]
+///
+/// objc-instance-method: '-'
+/// objc-class-method: '+'
+///
+/// objc-method-attributes: [OBJC2]
+/// __attribute__((deprecated))
+///
+Decl *Parser::ParseObjCMethodPrototype(tok::ObjCKeywordKind MethodImplKind,
+ bool MethodDefinition) {
+ assert(Tok.isOneOf(tok::minus, tok::plus) && "expected +/-");
+
+ tok::TokenKind methodType = Tok.getKind();
+ SourceLocation mLoc = ConsumeToken();
+ Decl *MDecl = ParseObjCMethodDecl(mLoc, methodType, MethodImplKind,
+ MethodDefinition);
+ // Since this rule is used for both method declarations and definitions,
+ // the caller is (optionally) responsible for consuming the ';'.
+ return MDecl;
+}
+
+/// objc-selector:
+/// identifier
+/// one of
+/// enum struct union if else while do for switch case default
+/// break continue return goto asm sizeof typeof __alignof
+/// unsigned long const short volatile signed restrict _Complex
+/// in out inout bycopy byref oneway int char float double void _Bool
+///
+IdentifierInfo *Parser::ParseObjCSelectorPiece(SourceLocation &SelectorLoc) {
+
+ switch (Tok.getKind()) {
+ default:
+ return nullptr;
+ case tok::ampamp:
+ case tok::ampequal:
+ case tok::amp:
+ case tok::pipe:
+ case tok::tilde:
+ case tok::exclaim:
+ case tok::exclaimequal:
+ case tok::pipepipe:
+ case tok::pipeequal:
+ case tok::caret:
+ case tok::caretequal: {
+ std::string ThisTok(PP.getSpelling(Tok));
+ if (isLetter(ThisTok[0])) {
+ IdentifierInfo *II = &PP.getIdentifierTable().get(ThisTok.data());
+ Tok.setKind(tok::identifier);
+ SelectorLoc = ConsumeToken();
+ return II;
+ }
+ return nullptr;
+ }
+
+ case tok::identifier:
+ case tok::kw_asm:
+ case tok::kw_auto:
+ case tok::kw_bool:
+ case tok::kw_break:
+ case tok::kw_case:
+ case tok::kw_catch:
+ case tok::kw_char:
+ case tok::kw_class:
+ case tok::kw_const:
+ case tok::kw_const_cast:
+ case tok::kw_continue:
+ case tok::kw_default:
+ case tok::kw_delete:
+ case tok::kw_do:
+ case tok::kw_double:
+ case tok::kw_dynamic_cast:
+ case tok::kw_else:
+ case tok::kw_enum:
+ case tok::kw_explicit:
+ case tok::kw_export:
+ case tok::kw_extern:
+ case tok::kw_false:
+ case tok::kw_float:
+ case tok::kw_for:
+ case tok::kw_friend:
+ case tok::kw_goto:
+ case tok::kw_if:
+ case tok::kw_inline:
+ case tok::kw_int:
+ case tok::kw_long:
+ case tok::kw_mutable:
+ case tok::kw_namespace:
+ case tok::kw_new:
+ case tok::kw_operator:
+ case tok::kw_private:
+ case tok::kw_protected:
+ case tok::kw_public:
+ case tok::kw_register:
+ case tok::kw_reinterpret_cast:
+ case tok::kw_restrict:
+ case tok::kw_return:
+ case tok::kw_short:
+ case tok::kw_signed:
+ case tok::kw_sizeof:
+ case tok::kw_static:
+ case tok::kw_static_cast:
+ case tok::kw_struct:
+ case tok::kw_switch:
+ case tok::kw_template:
+ case tok::kw_this:
+ case tok::kw_throw:
+ case tok::kw_true:
+ case tok::kw_try:
+ case tok::kw_typedef:
+ case tok::kw_typeid:
+ case tok::kw_typename:
+ case tok::kw_typeof:
+ case tok::kw_union:
+ case tok::kw_unsigned:
+ case tok::kw_using:
+ case tok::kw_virtual:
+ case tok::kw_void:
+ case tok::kw_volatile:
+ case tok::kw_wchar_t:
+ case tok::kw_while:
+ case tok::kw__Bool:
+ case tok::kw__Complex:
+ case tok::kw___alignof:
+ case tok::kw___auto_type:
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ SelectorLoc = ConsumeToken();
+ return II;
+ }
+}
+
+/// objc-for-collection-in: 'in'
+///
+bool Parser::isTokIdentifier_in() const {
+ // FIXME: May have to do additional look-ahead to only allow for
+ // valid tokens following an 'in'; such as an identifier, unary operators,
+ // '[' etc.
+ return (getLangOpts().ObjC2 && Tok.is(tok::identifier) &&
+ Tok.getIdentifierInfo() == ObjCTypeQuals[objc_in]);
+}
+
+/// ParseObjCTypeQualifierList - This routine parses the objective-c's type
+/// qualifier list and builds their bitmask representation in the input
+/// argument.
+///
+/// objc-type-qualifiers:
+/// objc-type-qualifier
+/// objc-type-qualifiers objc-type-qualifier
+///
+/// objc-type-qualifier:
+/// 'in'
+/// 'out'
+/// 'inout'
+/// 'oneway'
+/// 'bycopy'
+/// 'byref'
+/// 'nonnull'
+/// 'nullable'
+/// 'null_unspecified'
+///
+void Parser::ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
+ Declarator::TheContext Context) {
+ assert(Context == Declarator::ObjCParameterContext ||
+ Context == Declarator::ObjCResultContext);
+
+ while (1) {
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCPassingType(getCurScope(), DS,
+ Context == Declarator::ObjCParameterContext);
+ return cutOffParsing();
+ }
+
+ if (Tok.isNot(tok::identifier))
+ return;
+
+ const IdentifierInfo *II = Tok.getIdentifierInfo();
+ for (unsigned i = 0; i != objc_NumQuals; ++i) {
+ if (II != ObjCTypeQuals[i] ||
+ NextToken().is(tok::less) ||
+ NextToken().is(tok::coloncolon))
+ continue;
+
+ ObjCDeclSpec::ObjCDeclQualifier Qual;
+ NullabilityKind Nullability;
+ switch (i) {
+ default: llvm_unreachable("Unknown decl qualifier");
+ case objc_in: Qual = ObjCDeclSpec::DQ_In; break;
+ case objc_out: Qual = ObjCDeclSpec::DQ_Out; break;
+ case objc_inout: Qual = ObjCDeclSpec::DQ_Inout; break;
+ case objc_oneway: Qual = ObjCDeclSpec::DQ_Oneway; break;
+ case objc_bycopy: Qual = ObjCDeclSpec::DQ_Bycopy; break;
+ case objc_byref: Qual = ObjCDeclSpec::DQ_Byref; break;
+
+ case objc_nonnull:
+ Qual = ObjCDeclSpec::DQ_CSNullability;
+ Nullability = NullabilityKind::NonNull;
+ break;
+
+ case objc_nullable:
+ Qual = ObjCDeclSpec::DQ_CSNullability;
+ Nullability = NullabilityKind::Nullable;
+ break;
+
+ case objc_null_unspecified:
+ Qual = ObjCDeclSpec::DQ_CSNullability;
+ Nullability = NullabilityKind::Unspecified;
+ break;
+ }
+
+ // FIXME: Diagnose redundant specifiers.
+ DS.setObjCDeclQualifier(Qual);
+ if (Qual == ObjCDeclSpec::DQ_CSNullability)
+ DS.setNullability(Tok.getLocation(), Nullability);
+
+ ConsumeToken();
+ II = nullptr;
+ break;
+ }
+
+ // If this wasn't a recognized qualifier, bail out.
+ if (II) return;
+ }
+}
+
+/// Take all the decl attributes out of the given list and add
+/// them to the given attribute set.
+static void takeDeclAttributes(ParsedAttributes &attrs,
+ AttributeList *list) {
+ while (list) {
+ AttributeList *cur = list;
+ list = cur->getNext();
+
+ if (!cur->isUsedAsTypeAttr()) {
+ // Clear out the next pointer. We're really completely
+ // destroying the internal invariants of the declarator here,
+ // but it doesn't matter because we're done with it.
+ cur->setNext(nullptr);
+ attrs.add(cur);
+ }
+ }
+}
+
+/// takeDeclAttributes - Take all the decl attributes from the given
+/// declarator and add them to the given list.
+static void takeDeclAttributes(ParsedAttributes &attrs,
+ Declarator &D) {
+ // First, take ownership of all attributes.
+ attrs.getPool().takeAllFrom(D.getAttributePool());
+ attrs.getPool().takeAllFrom(D.getDeclSpec().getAttributePool());
+
+ // Now actually move the attributes over.
+ takeDeclAttributes(attrs, D.getDeclSpec().getAttributes().getList());
+ takeDeclAttributes(attrs, D.getAttributes());
+ for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i)
+ takeDeclAttributes(attrs,
+ const_cast<AttributeList*>(D.getTypeObject(i).getAttrs()));
+}
+
+/// objc-type-name:
+/// '(' objc-type-qualifiers[opt] type-name ')'
+/// '(' objc-type-qualifiers[opt] ')'
+///
+ParsedType Parser::ParseObjCTypeName(ObjCDeclSpec &DS,
+ Declarator::TheContext context,
+ ParsedAttributes *paramAttrs) {
+ assert(context == Declarator::ObjCParameterContext ||
+ context == Declarator::ObjCResultContext);
+ assert((paramAttrs != nullptr) ==
+ (context == Declarator::ObjCParameterContext));
+
+ assert(Tok.is(tok::l_paren) && "expected (");
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ SourceLocation TypeStartLoc = Tok.getLocation();
+ ObjCDeclContextSwitch ObjCDC(*this);
+
+ // Parse type qualifiers, in, inout, etc.
+ ParseObjCTypeQualifierList(DS, context);
+
+ ParsedType Ty;
+ if (isTypeSpecifierQualifier() || isObjCInstancetype()) {
+ // Parse an abstract declarator.
+ DeclSpec declSpec(AttrFactory);
+ declSpec.setObjCQualifiers(&DS);
+ DeclSpecContext dsContext = DSC_normal;
+ if (context == Declarator::ObjCResultContext)
+ dsContext = DSC_objc_method_result;
+ ParseSpecifierQualifierList(declSpec, AS_none, dsContext);
+ declSpec.SetRangeEnd(Tok.getLocation());
+ Declarator declarator(declSpec, context);
+ ParseDeclarator(declarator);
+
+ // If that's not invalid, extract a type.
+ if (!declarator.isInvalidType()) {
+ // Map a nullability specifier to a context-sensitive keyword attribute.
+ bool addedToDeclSpec = false;
+ if (DS.getObjCDeclQualifier() & ObjCDeclSpec::DQ_CSNullability)
+ addContextSensitiveTypeNullability(*this, declarator,
+ DS.getNullability(),
+ DS.getNullabilityLoc(),
+ addedToDeclSpec);
+
+ TypeResult type = Actions.ActOnTypeName(getCurScope(), declarator);
+ if (!type.isInvalid())
+ Ty = type.get();
+
+ // If we're parsing a parameter, steal all the decl attributes
+ // and add them to the decl spec.
+ if (context == Declarator::ObjCParameterContext)
+ takeDeclAttributes(*paramAttrs, declarator);
+ }
+ }
+
+ if (Tok.is(tok::r_paren))
+ T.consumeClose();
+ else if (Tok.getLocation() == TypeStartLoc) {
+ // If we didn't eat any tokens, then this isn't a type.
+ Diag(Tok, diag::err_expected_type);
+ SkipUntil(tok::r_paren, StopAtSemi);
+ } else {
+ // Otherwise, we found *something*, but didn't get a ')' in the right
+ // place. Emit an error then return what we have as the type.
+ T.consumeClose();
+ }
+ return Ty;
+}
+
+/// objc-method-decl:
+/// objc-selector
+/// objc-keyword-selector objc-parmlist[opt]
+/// objc-type-name objc-selector
+/// objc-type-name objc-keyword-selector objc-parmlist[opt]
+///
+/// objc-keyword-selector:
+/// objc-keyword-decl
+/// objc-keyword-selector objc-keyword-decl
+///
+/// objc-keyword-decl:
+/// objc-selector ':' objc-type-name objc-keyword-attributes[opt] identifier
+/// objc-selector ':' objc-keyword-attributes[opt] identifier
+/// ':' objc-type-name objc-keyword-attributes[opt] identifier
+/// ':' objc-keyword-attributes[opt] identifier
+///
+/// objc-parmlist:
+/// objc-parms objc-ellipsis[opt]
+///
+/// objc-parms:
+/// objc-parms , parameter-declaration
+///
+/// objc-ellipsis:
+/// , ...
+///
+/// objc-keyword-attributes: [OBJC2]
+/// __attribute__((unused))
+///
+Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
+ tok::TokenKind mType,
+ tok::ObjCKeywordKind MethodImplKind,
+ bool MethodDefinition) {
+ ParsingDeclRAIIObject PD(*this, ParsingDeclRAIIObject::NoParent);
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCMethodDecl(getCurScope(), mType == tok::minus,
+ /*ReturnType=*/ ParsedType());
+ cutOffParsing();
+ return nullptr;
+ }
+
+ // Parse the return type if present.
+ ParsedType ReturnType;
+ ObjCDeclSpec DSRet;
+ if (Tok.is(tok::l_paren))
+ ReturnType = ParseObjCTypeName(DSRet, Declarator::ObjCResultContext,
+ nullptr);
+
+ // If attributes exist before the method, parse them.
+ ParsedAttributes methodAttrs(AttrFactory);
+ if (getLangOpts().ObjC2)
+ MaybeParseGNUAttributes(methodAttrs);
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCMethodDecl(getCurScope(), mType == tok::minus,
+ ReturnType);
+ cutOffParsing();
+ return nullptr;
+ }
+
+ // Now parse the selector.
+ SourceLocation selLoc;
+ IdentifierInfo *SelIdent = ParseObjCSelectorPiece(selLoc);
+
+ // An unnamed colon is valid.
+ if (!SelIdent && Tok.isNot(tok::colon)) { // missing selector name.
+ Diag(Tok, diag::err_expected_selector_for_method)
+ << SourceRange(mLoc, Tok.getLocation());
+ // Skip until we get a ; or @.
+ SkipUntil(tok::at, StopAtSemi | StopBeforeMatch);
+ return nullptr;
+ }
+
+ SmallVector<DeclaratorChunk::ParamInfo, 8> CParamInfo;
+ if (Tok.isNot(tok::colon)) {
+ // If attributes exist after the method, parse them.
+ if (getLangOpts().ObjC2)
+ MaybeParseGNUAttributes(methodAttrs);
+
+ Selector Sel = PP.getSelectorTable().getNullarySelector(SelIdent);
+ Decl *Result
+ = Actions.ActOnMethodDeclaration(getCurScope(), mLoc, Tok.getLocation(),
+ mType, DSRet, ReturnType,
+ selLoc, Sel, nullptr,
+ CParamInfo.data(), CParamInfo.size(),
+ methodAttrs.getList(), MethodImplKind,
+ false, MethodDefinition);
+ PD.complete(Result);
+ return Result;
+ }
+
+ SmallVector<IdentifierInfo *, 12> KeyIdents;
+ SmallVector<SourceLocation, 12> KeyLocs;
+ SmallVector<Sema::ObjCArgInfo, 12> ArgInfos;
+ ParseScope PrototypeScope(this, Scope::FunctionPrototypeScope |
+ Scope::FunctionDeclarationScope | Scope::DeclScope);
+
+ AttributePool allParamAttrs(AttrFactory);
+ while (1) {
+ ParsedAttributes paramAttrs(AttrFactory);
+ Sema::ObjCArgInfo ArgInfo;
+
+ // Each iteration parses a single keyword argument.
+ if (ExpectAndConsume(tok::colon))
+ break;
+
+ ArgInfo.Type = ParsedType();
+ if (Tok.is(tok::l_paren)) // Parse the argument type if present.
+ ArgInfo.Type = ParseObjCTypeName(ArgInfo.DeclSpec,
+ Declarator::ObjCParameterContext,
+ &paramAttrs);
+
+ // If attributes exist before the argument name, parse them.
+ // Regardless, collect all the attributes we've parsed so far.
+ ArgInfo.ArgAttrs = nullptr;
+ if (getLangOpts().ObjC2) {
+ MaybeParseGNUAttributes(paramAttrs);
+ ArgInfo.ArgAttrs = paramAttrs.getList();
+ }
+
+ // Code completion for the next piece of the selector.
+ if (Tok.is(tok::code_completion)) {
+ KeyIdents.push_back(SelIdent);
+ Actions.CodeCompleteObjCMethodDeclSelector(getCurScope(),
+ mType == tok::minus,
+ /*AtParameterName=*/true,
+ ReturnType, KeyIdents);
+ cutOffParsing();
+ return nullptr;
+ }
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected)
+ << tok::identifier; // missing argument name.
+ break;
+ }
+
+ ArgInfo.Name = Tok.getIdentifierInfo();
+ ArgInfo.NameLoc = Tok.getLocation();
+ ConsumeToken(); // Eat the identifier.
+
+ ArgInfos.push_back(ArgInfo);
+ KeyIdents.push_back(SelIdent);
+ KeyLocs.push_back(selLoc);
+
+ // Make sure the attributes persist.
+ allParamAttrs.takeAllFrom(paramAttrs.getPool());
+
+ // Code completion for the next piece of the selector.
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCMethodDeclSelector(getCurScope(),
+ mType == tok::minus,
+ /*AtParameterName=*/false,
+ ReturnType, KeyIdents);
+ cutOffParsing();
+ return nullptr;
+ }
+
+ // Check for another keyword selector.
+ SelIdent = ParseObjCSelectorPiece(selLoc);
+ if (!SelIdent && Tok.isNot(tok::colon))
+ break;
+ if (!SelIdent) {
+ SourceLocation ColonLoc = Tok.getLocation();
+ if (PP.getLocForEndOfToken(ArgInfo.NameLoc) == ColonLoc) {
+ Diag(ArgInfo.NameLoc, diag::warn_missing_selector_name) << ArgInfo.Name;
+ Diag(ArgInfo.NameLoc, diag::note_missing_selector_name) << ArgInfo.Name;
+ Diag(ColonLoc, diag::note_force_empty_selector_name) << ArgInfo.Name;
+ }
+ }
+ // We have a selector or a colon, continue parsing.
+ }
+
+ bool isVariadic = false;
+ bool cStyleParamWarned = false;
+ // Parse the (optional) parameter list.
+ while (Tok.is(tok::comma)) {
+ ConsumeToken();
+ if (Tok.is(tok::ellipsis)) {
+ isVariadic = true;
+ ConsumeToken();
+ break;
+ }
+ if (!cStyleParamWarned) {
+ Diag(Tok, diag::warn_cstyle_param);
+ cStyleParamWarned = true;
+ }
+ DeclSpec DS(AttrFactory);
+ ParseDeclarationSpecifiers(DS);
+ // Parse the declarator.
+ Declarator ParmDecl(DS, Declarator::PrototypeContext);
+ ParseDeclarator(ParmDecl);
+ IdentifierInfo *ParmII = ParmDecl.getIdentifier();
+ Decl *Param = Actions.ActOnParamDeclarator(getCurScope(), ParmDecl);
+ CParamInfo.push_back(DeclaratorChunk::ParamInfo(ParmII,
+ ParmDecl.getIdentifierLoc(),
+ Param,
+ nullptr));
+ }
+
+ // FIXME: Add support for optional parameter list...
+ // If attributes exist after the method, parse them.
+ if (getLangOpts().ObjC2)
+ MaybeParseGNUAttributes(methodAttrs);
+
+ if (KeyIdents.size() == 0)
+ return nullptr;
+
+ Selector Sel = PP.getSelectorTable().getSelector(KeyIdents.size(),
+ &KeyIdents[0]);
+ Decl *Result
+ = Actions.ActOnMethodDeclaration(getCurScope(), mLoc, Tok.getLocation(),
+ mType, DSRet, ReturnType,
+ KeyLocs, Sel, &ArgInfos[0],
+ CParamInfo.data(), CParamInfo.size(),
+ methodAttrs.getList(),
+ MethodImplKind, isVariadic, MethodDefinition);
+
+ PD.complete(Result);
+ return Result;
+}
+
+/// objc-protocol-refs:
+/// '<' identifier-list '>'
+///
+bool Parser::
+ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &Protocols,
+ SmallVectorImpl<SourceLocation> &ProtocolLocs,
+ bool WarnOnDeclarations, bool ForObjCContainer,
+ SourceLocation &LAngleLoc, SourceLocation &EndLoc,
+ bool consumeLastToken) {
+ assert(Tok.is(tok::less) && "expected <");
+
+ LAngleLoc = ConsumeToken(); // the "<"
+
+ SmallVector<IdentifierLocPair, 8> ProtocolIdents;
+
+ while (1) {
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCProtocolReferences(ProtocolIdents);
+ cutOffParsing();
+ return true;
+ }
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected) << tok::identifier;
+ SkipUntil(tok::greater, StopAtSemi);
+ return true;
+ }
+ ProtocolIdents.push_back(std::make_pair(Tok.getIdentifierInfo(),
+ Tok.getLocation()));
+ ProtocolLocs.push_back(Tok.getLocation());
+ ConsumeToken();
+
+ if (!TryConsumeToken(tok::comma))
+ break;
+ }
+
+ // Consume the '>'.
+ if (ParseGreaterThanInTemplateList(EndLoc, consumeLastToken,
+ /*ObjCGenericList=*/false))
+ return true;
+
+ // Convert the list of protocols identifiers into a list of protocol decls.
+ Actions.FindProtocolDeclaration(WarnOnDeclarations, ForObjCContainer,
+ ProtocolIdents, Protocols);
+ return false;
+}
+
+TypeResult Parser::parseObjCProtocolQualifierType(SourceLocation &rAngleLoc) {
+ assert(Tok.is(tok::less) && "Protocol qualifiers start with '<'");
+ assert(getLangOpts().ObjC1 && "Protocol qualifiers only exist in Objective-C");
+
+ SourceLocation lAngleLoc;
+ SmallVector<Decl *, 8> protocols;
+ SmallVector<SourceLocation, 8> protocolLocs;
+ (void)ParseObjCProtocolReferences(protocols, protocolLocs, false, false,
+ lAngleLoc, rAngleLoc,
+ /*consumeLastToken=*/true);
+ TypeResult result = Actions.actOnObjCProtocolQualifierType(lAngleLoc,
+ protocols,
+ protocolLocs,
+ rAngleLoc);
+ if (result.isUsable()) {
+ Diag(lAngleLoc, diag::warn_objc_protocol_qualifier_missing_id)
+ << FixItHint::CreateInsertion(lAngleLoc, "id")
+ << SourceRange(lAngleLoc, rAngleLoc);
+ }
+
+ return result;
+}
+
+/// Parse Objective-C type arguments or protocol qualifiers.
+///
+/// objc-type-arguments:
+/// '<' type-name '...'[opt] (',' type-name '...'[opt])* '>'
+///
+void Parser::parseObjCTypeArgsOrProtocolQualifiers(
+ ParsedType baseType,
+ SourceLocation &typeArgsLAngleLoc,
+ SmallVectorImpl<ParsedType> &typeArgs,
+ SourceLocation &typeArgsRAngleLoc,
+ SourceLocation &protocolLAngleLoc,
+ SmallVectorImpl<Decl *> &protocols,
+ SmallVectorImpl<SourceLocation> &protocolLocs,
+ SourceLocation &protocolRAngleLoc,
+ bool consumeLastToken,
+ bool warnOnIncompleteProtocols) {
+ assert(Tok.is(tok::less) && "Not at the start of type args or protocols");
+ SourceLocation lAngleLoc = ConsumeToken();
+
+ // Whether all of the elements we've parsed thus far are single
+ // identifiers, which might be types or might be protocols.
+ bool allSingleIdentifiers = true;
+ SmallVector<IdentifierInfo *, 4> identifiers;
+ SmallVectorImpl<SourceLocation> &identifierLocs = protocolLocs;
+
+ // Parse a list of comma-separated identifiers, bailing out if we
+ // see something different.
+ do {
+ // Parse a single identifier.
+ if (Tok.is(tok::identifier) &&
+ (NextToken().is(tok::comma) ||
+ NextToken().is(tok::greater) ||
+ NextToken().is(tok::greatergreater))) {
+ identifiers.push_back(Tok.getIdentifierInfo());
+ identifierLocs.push_back(ConsumeToken());
+ continue;
+ }
+
+ if (Tok.is(tok::code_completion)) {
+ // FIXME: Also include types here.
+ SmallVector<IdentifierLocPair, 4> identifierLocPairs;
+ for (unsigned i = 0, n = identifiers.size(); i != n; ++i) {
+ identifierLocPairs.push_back(IdentifierLocPair(identifiers[i],
+ identifierLocs[i]));
+ }
+
+ QualType BaseT = Actions.GetTypeFromParser(baseType);
+ if (!BaseT.isNull() && BaseT->acceptsObjCTypeParams()) {
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Type);
+ } else {
+ Actions.CodeCompleteObjCProtocolReferences(identifierLocPairs);
+ }
+ cutOffParsing();
+ return;
+ }
+
+ allSingleIdentifiers = false;
+ break;
+ } while (TryConsumeToken(tok::comma));
+
+ // If we parsed an identifier list, semantic analysis sorts out
+ // whether it refers to protocols or to type arguments.
+ if (allSingleIdentifiers) {
+ // Parse the closing '>'.
+ SourceLocation rAngleLoc;
+ (void)ParseGreaterThanInTemplateList(rAngleLoc, consumeLastToken,
+ /*ObjCGenericList=*/true);
+
+ // Let Sema figure out what we parsed.
+ Actions.actOnObjCTypeArgsOrProtocolQualifiers(getCurScope(),
+ baseType,
+ lAngleLoc,
+ identifiers,
+ identifierLocs,
+ rAngleLoc,
+ typeArgsLAngleLoc,
+ typeArgs,
+ typeArgsRAngleLoc,
+ protocolLAngleLoc,
+ protocols,
+ protocolRAngleLoc,
+ warnOnIncompleteProtocols);
+ return;
+ }
+
+ // We syntactically matched a type argument, so commit to parsing
+ // type arguments.
+
+ // Convert the identifiers into type arguments.
+ bool invalid = false;
+ for (unsigned i = 0, n = identifiers.size(); i != n; ++i) {
+ ParsedType typeArg
+ = Actions.getTypeName(*identifiers[i], identifierLocs[i], getCurScope());
+ if (typeArg) {
+ DeclSpec DS(AttrFactory);
+ const char *prevSpec = nullptr;
+ unsigned diagID;
+ DS.SetTypeSpecType(TST_typename, identifierLocs[i], prevSpec, diagID,
+ typeArg, Actions.getASTContext().getPrintingPolicy());
+
+ // Form a declarator to turn this into a type.
+ Declarator D(DS, Declarator::TypeNameContext);
+ TypeResult fullTypeArg = Actions.ActOnTypeName(getCurScope(), D);
+ if (fullTypeArg.isUsable())
+ typeArgs.push_back(fullTypeArg.get());
+ else
+ invalid = true;
+ } else {
+ invalid = true;
+ }
+ }
+
+ // Continue parsing type-names.
+ do {
+ TypeResult typeArg = ParseTypeName();
+
+ // Consume the '...' for a pack expansion.
+ SourceLocation ellipsisLoc;
+ TryConsumeToken(tok::ellipsis, ellipsisLoc);
+ if (typeArg.isUsable() && ellipsisLoc.isValid()) {
+ typeArg = Actions.ActOnPackExpansion(typeArg.get(), ellipsisLoc);
+ }
+
+ if (typeArg.isUsable()) {
+ typeArgs.push_back(typeArg.get());
+ } else {
+ invalid = true;
+ }
+ } while (TryConsumeToken(tok::comma));
+
+ // Parse the closing '>'.
+ SourceLocation rAngleLoc;
+ (void)ParseGreaterThanInTemplateList(rAngleLoc, consumeLastToken,
+ /*ObjCGenericList=*/true);
+
+ if (invalid) {
+ typeArgs.clear();
+ return;
+ }
+
+ // Record left/right angle locations.
+ typeArgsLAngleLoc = lAngleLoc;
+ typeArgsRAngleLoc = rAngleLoc;
+}
+
+void Parser::parseObjCTypeArgsAndProtocolQualifiers(
+ ParsedType baseType,
+ SourceLocation &typeArgsLAngleLoc,
+ SmallVectorImpl<ParsedType> &typeArgs,
+ SourceLocation &typeArgsRAngleLoc,
+ SourceLocation &protocolLAngleLoc,
+ SmallVectorImpl<Decl *> &protocols,
+ SmallVectorImpl<SourceLocation> &protocolLocs,
+ SourceLocation &protocolRAngleLoc,
+ bool consumeLastToken) {
+ assert(Tok.is(tok::less));
+
+ // Parse the first angle-bracket-delimited clause.
+ parseObjCTypeArgsOrProtocolQualifiers(baseType,
+ typeArgsLAngleLoc,
+ typeArgs,
+ typeArgsRAngleLoc,
+ protocolLAngleLoc,
+ protocols,
+ protocolLocs,
+ protocolRAngleLoc,
+ consumeLastToken,
+ /*warnOnIncompleteProtocols=*/false);
+
+ // An Objective-C object pointer followed by type arguments
+ // can then be followed again by a set of protocol references, e.g.,
+ // \c NSArray<NSView><NSTextDelegate>
+ if ((consumeLastToken && Tok.is(tok::less)) ||
+ (!consumeLastToken && NextToken().is(tok::less))) {
+ // If we aren't consuming the last token, the prior '>' is still hanging
+ // there. Consume it before we parse the protocol qualifiers.
+ if (!consumeLastToken)
+ ConsumeToken();
+
+ if (!protocols.empty()) {
+ SkipUntilFlags skipFlags = SkipUntilFlags();
+ if (!consumeLastToken)
+ skipFlags = skipFlags | StopBeforeMatch;
+ Diag(Tok, diag::err_objc_type_args_after_protocols)
+ << SourceRange(protocolLAngleLoc, protocolRAngleLoc);
+ SkipUntil(tok::greater, tok::greatergreater, skipFlags);
+ } else {
+ ParseObjCProtocolReferences(protocols, protocolLocs,
+ /*WarnOnDeclarations=*/false,
+ /*ForObjCContainer=*/false,
+ protocolLAngleLoc, protocolRAngleLoc,
+ consumeLastToken);
+ }
+ }
+}
+
+TypeResult Parser::parseObjCTypeArgsAndProtocolQualifiers(
+ SourceLocation loc,
+ ParsedType type,
+ bool consumeLastToken,
+ SourceLocation &endLoc) {
+ assert(Tok.is(tok::less));
+ SourceLocation typeArgsLAngleLoc;
+ SmallVector<ParsedType, 4> typeArgs;
+ SourceLocation typeArgsRAngleLoc;
+ SourceLocation protocolLAngleLoc;
+ SmallVector<Decl *, 4> protocols;
+ SmallVector<SourceLocation, 4> protocolLocs;
+ SourceLocation protocolRAngleLoc;
+
+ // Parse type arguments and protocol qualifiers.
+ parseObjCTypeArgsAndProtocolQualifiers(type, typeArgsLAngleLoc, typeArgs,
+ typeArgsRAngleLoc, protocolLAngleLoc,
+ protocols, protocolLocs,
+ protocolRAngleLoc, consumeLastToken);
+
+ // Compute the location of the last token.
+ if (consumeLastToken)
+ endLoc = PrevTokLocation;
+ else
+ endLoc = Tok.getLocation();
+
+ return Actions.actOnObjCTypeArgsAndProtocolQualifiers(
+ getCurScope(),
+ loc,
+ type,
+ typeArgsLAngleLoc,
+ typeArgs,
+ typeArgsRAngleLoc,
+ protocolLAngleLoc,
+ protocols,
+ protocolLocs,
+ protocolRAngleLoc);
+}
+
+void Parser::HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
+ BalancedDelimiterTracker &T,
+ SmallVectorImpl<Decl *> &AllIvarDecls,
+ bool RBraceMissing) {
+ if (!RBraceMissing)
+ T.consumeClose();
+
+ Actions.ActOnObjCContainerStartDefinition(interfaceDecl);
+ Actions.ActOnLastBitfield(T.getCloseLocation(), AllIvarDecls);
+ Actions.ActOnObjCContainerFinishDefinition();
+ // Call ActOnFields() even if we don't have any decls. This is useful
+ // for code rewriting tools that need to be aware of the empty list.
+ Actions.ActOnFields(getCurScope(), atLoc, interfaceDecl,
+ AllIvarDecls,
+ T.getOpenLocation(), T.getCloseLocation(), nullptr);
+}
+
+/// objc-class-instance-variables:
+/// '{' objc-instance-variable-decl-list[opt] '}'
+///
+/// objc-instance-variable-decl-list:
+/// objc-visibility-spec
+/// objc-instance-variable-decl ';'
+/// ';'
+/// objc-instance-variable-decl-list objc-visibility-spec
+/// objc-instance-variable-decl-list objc-instance-variable-decl ';'
+/// objc-instance-variable-decl-list ';'
+///
+/// objc-visibility-spec:
+/// @private
+/// @protected
+/// @public
+/// @package [OBJC2]
+///
+/// objc-instance-variable-decl:
+/// struct-declaration
+///
+void Parser::ParseObjCClassInstanceVariables(Decl *interfaceDecl,
+ tok::ObjCKeywordKind visibility,
+ SourceLocation atLoc) {
+ assert(Tok.is(tok::l_brace) && "expected {");
+ SmallVector<Decl *, 32> AllIvarDecls;
+
+ ParseScope ClassScope(this, Scope::DeclScope|Scope::ClassScope);
+ ObjCDeclContextSwitch ObjCDC(*this);
+
+ BalancedDelimiterTracker T(*this, tok::l_brace);
+ T.consumeOpen();
+ // While we still have something to read, read the instance variables.
+ while (Tok.isNot(tok::r_brace) && !isEofOrEom()) {
+ // Each iteration of this loop reads one objc-instance-variable-decl.
+
+ // Check for extraneous top-level semicolon.
+ if (Tok.is(tok::semi)) {
+ ConsumeExtraSemi(InstanceVariableList);
+ continue;
+ }
+
+ // Set the default visibility to private.
+ if (TryConsumeToken(tok::at)) { // parse objc-visibility-spec
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCAtVisibility(getCurScope());
+ return cutOffParsing();
+ }
+
+ switch (Tok.getObjCKeywordID()) {
+ case tok::objc_private:
+ case tok::objc_public:
+ case tok::objc_protected:
+ case tok::objc_package:
+ visibility = Tok.getObjCKeywordID();
+ ConsumeToken();
+ continue;
+
+ case tok::objc_end:
+ Diag(Tok, diag::err_objc_unexpected_atend);
+ Tok.setLocation(Tok.getLocation().getLocWithOffset(-1));
+ Tok.setKind(tok::at);
+ Tok.setLength(1);
+ PP.EnterToken(Tok);
+ HelperActionsForIvarDeclarations(interfaceDecl, atLoc,
+ T, AllIvarDecls, true);
+ return;
+
+ default:
+ Diag(Tok, diag::err_objc_illegal_visibility_spec);
+ continue;
+ }
+ }
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteOrdinaryName(getCurScope(),
+ Sema::PCC_ObjCInstanceVariableList);
+ return cutOffParsing();
+ }
+
+ auto ObjCIvarCallback = [&](ParsingFieldDeclarator &FD) {
+ Actions.ActOnObjCContainerStartDefinition(interfaceDecl);
+ // Install the declarator into the interface decl.
+ FD.D.setObjCIvar(true);
+ Decl *Field = Actions.ActOnIvar(
+ getCurScope(), FD.D.getDeclSpec().getSourceRange().getBegin(), FD.D,
+ FD.BitfieldSize, visibility);
+ Actions.ActOnObjCContainerFinishDefinition();
+ if (Field)
+ AllIvarDecls.push_back(Field);
+ FD.complete(Field);
+ };
+
+ // Parse all the comma separated declarators.
+ ParsingDeclSpec DS(*this);
+ ParseStructDeclaration(DS, ObjCIvarCallback);
+
+ if (Tok.is(tok::semi)) {
+ ConsumeToken();
+ } else {
+ Diag(Tok, diag::err_expected_semi_decl_list);
+ // Skip to end of block or statement
+ SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
+ }
+ }
+ HelperActionsForIvarDeclarations(interfaceDecl, atLoc,
+ T, AllIvarDecls, false);
+ return;
+}
+
+/// objc-protocol-declaration:
+/// objc-protocol-definition
+/// objc-protocol-forward-reference
+///
+/// objc-protocol-definition:
+/// \@protocol identifier
+/// objc-protocol-refs[opt]
+/// objc-interface-decl-list
+/// \@end
+///
+/// objc-protocol-forward-reference:
+/// \@protocol identifier-list ';'
+///
+/// "\@protocol identifier ;" should be resolved as "\@protocol
+/// identifier-list ;": objc-interface-decl-list may not start with a
+/// semicolon in the first alternative if objc-protocol-refs are omitted.
+Parser::DeclGroupPtrTy
+Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
+ ParsedAttributes &attrs) {
+ assert(Tok.isObjCAtKeyword(tok::objc_protocol) &&
+ "ParseObjCAtProtocolDeclaration(): Expected @protocol");
+ ConsumeToken(); // the "protocol" identifier
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCProtocolDecl(getCurScope());
+ cutOffParsing();
+ return DeclGroupPtrTy();
+ }
+
+ MaybeSkipAttributes(tok::objc_protocol);
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected) << tok::identifier; // missing protocol name.
+ return DeclGroupPtrTy();
+ }
+ // Save the protocol name, then consume it.
+ IdentifierInfo *protocolName = Tok.getIdentifierInfo();
+ SourceLocation nameLoc = ConsumeToken();
+
+ if (TryConsumeToken(tok::semi)) { // forward declaration of one protocol.
+ IdentifierLocPair ProtoInfo(protocolName, nameLoc);
+ return Actions.ActOnForwardProtocolDeclaration(AtLoc, ProtoInfo,
+ attrs.getList());
+ }
+
+ CheckNestedObjCContexts(AtLoc);
+
+ if (Tok.is(tok::comma)) { // list of forward declarations.
+ SmallVector<IdentifierLocPair, 8> ProtocolRefs;
+ ProtocolRefs.push_back(std::make_pair(protocolName, nameLoc));
+
+ // Parse the list of forward declarations.
+ while (1) {
+ ConsumeToken(); // the ','
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected) << tok::identifier;
+ SkipUntil(tok::semi);
+ return DeclGroupPtrTy();
+ }
+ ProtocolRefs.push_back(IdentifierLocPair(Tok.getIdentifierInfo(),
+ Tok.getLocation()));
+ ConsumeToken(); // the identifier
+
+ if (Tok.isNot(tok::comma))
+ break;
+ }
+ // Consume the ';'.
+ if (ExpectAndConsume(tok::semi, diag::err_expected_after, "@protocol"))
+ return DeclGroupPtrTy();
+
+ return Actions.ActOnForwardProtocolDeclaration(AtLoc, ProtocolRefs,
+ attrs.getList());
+ }
+
+ // Last, and definitely not least, parse a protocol declaration.
+ SourceLocation LAngleLoc, EndProtoLoc;
+
+ SmallVector<Decl *, 8> ProtocolRefs;
+ SmallVector<SourceLocation, 8> ProtocolLocs;
+ if (Tok.is(tok::less) &&
+ ParseObjCProtocolReferences(ProtocolRefs, ProtocolLocs, false, true,
+ LAngleLoc, EndProtoLoc,
+ /*consumeLastToken=*/true))
+ return DeclGroupPtrTy();
+
+ Decl *ProtoType =
+ Actions.ActOnStartProtocolInterface(AtLoc, protocolName, nameLoc,
+ ProtocolRefs.data(),
+ ProtocolRefs.size(),
+ ProtocolLocs.data(),
+ EndProtoLoc, attrs.getList());
+
+ ParseObjCInterfaceDeclList(tok::objc_protocol, ProtoType);
+ return Actions.ConvertDeclToDeclGroup(ProtoType);
+}
+
+/// objc-implementation:
+/// objc-class-implementation-prologue
+/// objc-category-implementation-prologue
+///
+/// objc-class-implementation-prologue:
+/// @implementation identifier objc-superclass[opt]
+/// objc-class-instance-variables[opt]
+///
+/// objc-category-implementation-prologue:
+/// @implementation identifier ( identifier )
+Parser::DeclGroupPtrTy
+Parser::ParseObjCAtImplementationDeclaration(SourceLocation AtLoc) {
+ assert(Tok.isObjCAtKeyword(tok::objc_implementation) &&
+ "ParseObjCAtImplementationDeclaration(): Expected @implementation");
+ CheckNestedObjCContexts(AtLoc);
+ ConsumeToken(); // the "implementation" identifier
+
+ // Code completion after '@implementation'.
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCImplementationDecl(getCurScope());
+ cutOffParsing();
+ return DeclGroupPtrTy();
+ }
+
+ MaybeSkipAttributes(tok::objc_implementation);
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected)
+ << tok::identifier; // missing class or category name.
+ return DeclGroupPtrTy();
+ }
+ // We have a class or category name - consume it.
+ IdentifierInfo *nameId = Tok.getIdentifierInfo();
+ SourceLocation nameLoc = ConsumeToken(); // consume class or category name
+ Decl *ObjCImpDecl = nullptr;
+
+ // Neither a type parameter list nor a list of protocol references is
+ // permitted here. Parse and diagnose them.
+ if (Tok.is(tok::less)) {
+ SourceLocation lAngleLoc, rAngleLoc;
+ SmallVector<IdentifierLocPair, 8> protocolIdents;
+ SourceLocation diagLoc = Tok.getLocation();
+ ObjCTypeParamListScope typeParamScope(Actions, getCurScope());
+ if (parseObjCTypeParamListOrProtocolRefs(typeParamScope, lAngleLoc,
+ protocolIdents, rAngleLoc)) {
+ Diag(diagLoc, diag::err_objc_parameterized_implementation)
+ << SourceRange(diagLoc, PrevTokLocation);
+ } else if (lAngleLoc.isValid()) {
+ Diag(lAngleLoc, diag::err_unexpected_protocol_qualifier)
+ << FixItHint::CreateRemoval(SourceRange(lAngleLoc, rAngleLoc));
+ }
+ }
+
+ if (Tok.is(tok::l_paren)) {
+ // we have a category implementation.
+ ConsumeParen();
+ SourceLocation categoryLoc, rparenLoc;
+ IdentifierInfo *categoryId = nullptr;
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCImplementationCategory(getCurScope(), nameId, nameLoc);
+ cutOffParsing();
+ return DeclGroupPtrTy();
+ }
+
+ if (Tok.is(tok::identifier)) {
+ categoryId = Tok.getIdentifierInfo();
+ categoryLoc = ConsumeToken();
+ } else {
+ Diag(Tok, diag::err_expected)
+ << tok::identifier; // missing category name.
+ return DeclGroupPtrTy();
+ }
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::err_expected) << tok::r_paren;
+ SkipUntil(tok::r_paren); // don't stop at ';'
+ return DeclGroupPtrTy();
+ }
+ rparenLoc = ConsumeParen();
+ if (Tok.is(tok::less)) { // we have illegal '<' try to recover
+ Diag(Tok, diag::err_unexpected_protocol_qualifier);
+ SourceLocation protocolLAngleLoc, protocolRAngleLoc;
+ SmallVector<Decl *, 4> protocols;
+ SmallVector<SourceLocation, 4> protocolLocs;
+ (void)ParseObjCProtocolReferences(protocols, protocolLocs,
+ /*warnOnIncompleteProtocols=*/false,
+ /*ForObjCContainer=*/false,
+ protocolLAngleLoc, protocolRAngleLoc,
+ /*consumeLastToken=*/true);
+ }
+ ObjCImpDecl = Actions.ActOnStartCategoryImplementation(
+ AtLoc, nameId, nameLoc, categoryId,
+ categoryLoc);
+
+ } else {
+ // We have a class implementation
+ SourceLocation superClassLoc;
+ IdentifierInfo *superClassId = nullptr;
+ if (TryConsumeToken(tok::colon)) {
+ // We have a super class
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected)
+ << tok::identifier; // missing super class name.
+ return DeclGroupPtrTy();
+ }
+ superClassId = Tok.getIdentifierInfo();
+ superClassLoc = ConsumeToken(); // Consume super class name
+ }
+ ObjCImpDecl = Actions.ActOnStartClassImplementation(
+ AtLoc, nameId, nameLoc,
+ superClassId, superClassLoc);
+
+ if (Tok.is(tok::l_brace)) // we have ivars
+ ParseObjCClassInstanceVariables(ObjCImpDecl, tok::objc_private, AtLoc);
+ else if (Tok.is(tok::less)) { // we have illegal '<' try to recover
+ Diag(Tok, diag::err_unexpected_protocol_qualifier);
+
+ SourceLocation protocolLAngleLoc, protocolRAngleLoc;
+ SmallVector<Decl *, 4> protocols;
+ SmallVector<SourceLocation, 4> protocolLocs;
+ (void)ParseObjCProtocolReferences(protocols, protocolLocs,
+ /*warnOnIncompleteProtocols=*/false,
+ /*ForObjCContainer=*/false,
+ protocolLAngleLoc, protocolRAngleLoc,
+ /*consumeLastToken=*/true);
+ }
+ }
+ assert(ObjCImpDecl);
+
+ SmallVector<Decl *, 8> DeclsInGroup;
+
+ {
+ ObjCImplParsingDataRAII ObjCImplParsing(*this, ObjCImpDecl);
+ while (!ObjCImplParsing.isFinished() && !isEofOrEom()) {
+ ParsedAttributesWithRange attrs(AttrFactory);
+ MaybeParseCXX11Attributes(attrs);
+ MaybeParseMicrosoftAttributes(attrs);
+ if (DeclGroupPtrTy DGP = ParseExternalDeclaration(attrs)) {
+ DeclGroupRef DG = DGP.get();
+ DeclsInGroup.append(DG.begin(), DG.end());
+ }
+ }
+ }
+
+ return Actions.ActOnFinishObjCImplementation(ObjCImpDecl, DeclsInGroup);
+}
+
+Parser::DeclGroupPtrTy
+Parser::ParseObjCAtEndDeclaration(SourceRange atEnd) {
+ assert(Tok.isObjCAtKeyword(tok::objc_end) &&
+ "ParseObjCAtEndDeclaration(): Expected @end");
+ ConsumeToken(); // the "end" identifier
+ if (CurParsedObjCImpl)
+ CurParsedObjCImpl->finish(atEnd);
+ else
+ // missing @implementation
+ Diag(atEnd.getBegin(), diag::err_expected_objc_container);
+ return DeclGroupPtrTy();
+}
+
+Parser::ObjCImplParsingDataRAII::~ObjCImplParsingDataRAII() {
+ if (!Finished) {
+ finish(P.Tok.getLocation());
+ if (P.isEofOrEom()) {
+ P.Diag(P.Tok, diag::err_objc_missing_end)
+ << FixItHint::CreateInsertion(P.Tok.getLocation(), "\n@end\n");
+ P.Diag(Dcl->getLocStart(), diag::note_objc_container_start)
+ << Sema::OCK_Implementation;
+ }
+ }
+ P.CurParsedObjCImpl = nullptr;
+ assert(LateParsedObjCMethods.empty());
+}
+
+void Parser::ObjCImplParsingDataRAII::finish(SourceRange AtEnd) {
+ assert(!Finished);
+ P.Actions.DefaultSynthesizeProperties(P.getCurScope(), Dcl);
+ for (size_t i = 0; i < LateParsedObjCMethods.size(); ++i)
+ P.ParseLexedObjCMethodDefs(*LateParsedObjCMethods[i],
+ true/*Methods*/);
+
+ P.Actions.ActOnAtEnd(P.getCurScope(), AtEnd);
+
+ if (HasCFunction)
+ for (size_t i = 0; i < LateParsedObjCMethods.size(); ++i)
+ P.ParseLexedObjCMethodDefs(*LateParsedObjCMethods[i],
+ false/*c-functions*/);
+
+ /// \brief Clear and free the cached objc methods.
+ for (LateParsedObjCMethodContainer::iterator
+ I = LateParsedObjCMethods.begin(),
+ E = LateParsedObjCMethods.end(); I != E; ++I)
+ delete *I;
+ LateParsedObjCMethods.clear();
+
+ Finished = true;
+}
+
+/// compatibility-alias-decl:
+/// @compatibility_alias alias-name class-name ';'
+///
+Decl *Parser::ParseObjCAtAliasDeclaration(SourceLocation atLoc) {
+ assert(Tok.isObjCAtKeyword(tok::objc_compatibility_alias) &&
+ "ParseObjCAtAliasDeclaration(): Expected @compatibility_alias");
+ ConsumeToken(); // consume compatibility_alias
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected) << tok::identifier;
+ return nullptr;
+ }
+ IdentifierInfo *aliasId = Tok.getIdentifierInfo();
+ SourceLocation aliasLoc = ConsumeToken(); // consume alias-name
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected) << tok::identifier;
+ return nullptr;
+ }
+ IdentifierInfo *classId = Tok.getIdentifierInfo();
+ SourceLocation classLoc = ConsumeToken(); // consume class-name;
+ ExpectAndConsume(tok::semi, diag::err_expected_after, "@compatibility_alias");
+ return Actions.ActOnCompatibilityAlias(atLoc, aliasId, aliasLoc,
+ classId, classLoc);
+}
+
+/// property-synthesis:
+/// @synthesize property-ivar-list ';'
+///
+/// property-ivar-list:
+/// property-ivar
+/// property-ivar-list ',' property-ivar
+///
+/// property-ivar:
+/// identifier
+/// identifier '=' identifier
+///
+Decl *Parser::ParseObjCPropertySynthesize(SourceLocation atLoc) {
+ assert(Tok.isObjCAtKeyword(tok::objc_synthesize) &&
+ "ParseObjCPropertySynthesize(): Expected '@synthesize'");
+ ConsumeToken(); // consume synthesize
+
+ while (true) {
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCPropertyDefinition(getCurScope());
+ cutOffParsing();
+ return nullptr;
+ }
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_synthesized_property_name);
+ SkipUntil(tok::semi);
+ return nullptr;
+ }
+
+ IdentifierInfo *propertyIvar = nullptr;
+ IdentifierInfo *propertyId = Tok.getIdentifierInfo();
+ SourceLocation propertyLoc = ConsumeToken(); // consume property name
+ SourceLocation propertyIvarLoc;
+ if (TryConsumeToken(tok::equal)) {
+ // property '=' ivar-name
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCPropertySynthesizeIvar(getCurScope(), propertyId);
+ cutOffParsing();
+ return nullptr;
+ }
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected) << tok::identifier;
+ break;
+ }
+ propertyIvar = Tok.getIdentifierInfo();
+ propertyIvarLoc = ConsumeToken(); // consume ivar-name
+ }
+ Actions.ActOnPropertyImplDecl(getCurScope(), atLoc, propertyLoc, true,
+ propertyId, propertyIvar, propertyIvarLoc);
+ if (Tok.isNot(tok::comma))
+ break;
+ ConsumeToken(); // consume ','
+ }
+ ExpectAndConsume(tok::semi, diag::err_expected_after, "@synthesize");
+ return nullptr;
+}
+
+/// property-dynamic:
+/// @dynamic property-list
+///
+/// property-list:
+/// identifier
+/// property-list ',' identifier
+///
+Decl *Parser::ParseObjCPropertyDynamic(SourceLocation atLoc) {
+ assert(Tok.isObjCAtKeyword(tok::objc_dynamic) &&
+ "ParseObjCPropertyDynamic(): Expected '@dynamic'");
+ ConsumeToken(); // consume dynamic
+ while (true) {
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCPropertyDefinition(getCurScope());
+ cutOffParsing();
+ return nullptr;
+ }
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected) << tok::identifier;
+ SkipUntil(tok::semi);
+ return nullptr;
+ }
+
+ IdentifierInfo *propertyId = Tok.getIdentifierInfo();
+ SourceLocation propertyLoc = ConsumeToken(); // consume property name
+ Actions.ActOnPropertyImplDecl(getCurScope(), atLoc, propertyLoc, false,
+ propertyId, nullptr, SourceLocation());
+
+ if (Tok.isNot(tok::comma))
+ break;
+ ConsumeToken(); // consume ','
+ }
+ ExpectAndConsume(tok::semi, diag::err_expected_after, "@dynamic");
+ return nullptr;
+}
+
+/// objc-throw-statement:
+/// throw expression[opt];
+///
+StmtResult Parser::ParseObjCThrowStmt(SourceLocation atLoc) {
+ ExprResult Res;
+ ConsumeToken(); // consume throw
+ if (Tok.isNot(tok::semi)) {
+ Res = ParseExpression();
+ if (Res.isInvalid()) {
+ SkipUntil(tok::semi);
+ return StmtError();
+ }
+ }
+ // consume ';'
+ ExpectAndConsume(tok::semi, diag::err_expected_after, "@throw");
+ return Actions.ActOnObjCAtThrowStmt(atLoc, Res.get(), getCurScope());
+}
+
+/// objc-synchronized-statement:
+/// @synchronized '(' expression ')' compound-statement
+///
+StmtResult
+Parser::ParseObjCSynchronizedStmt(SourceLocation atLoc) {
+ ConsumeToken(); // consume synchronized
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::err_expected_lparen_after) << "@synchronized";
+ return StmtError();
+ }
+
+ // The operand is surrounded with parentheses.
+ ConsumeParen(); // '('
+ ExprResult operand(ParseExpression());
+
+ if (Tok.is(tok::r_paren)) {
+ ConsumeParen(); // ')'
+ } else {
+ if (!operand.isInvalid())
+ Diag(Tok, diag::err_expected) << tok::r_paren;
+
+ // Skip forward until we see a left brace, but don't consume it.
+ SkipUntil(tok::l_brace, StopAtSemi | StopBeforeMatch);
+ }
+
+ // Require a compound statement.
+ if (Tok.isNot(tok::l_brace)) {
+ if (!operand.isInvalid())
+ Diag(Tok, diag::err_expected) << tok::l_brace;
+ return StmtError();
+ }
+
+ // Check the @synchronized operand now.
+ if (!operand.isInvalid())
+ operand = Actions.ActOnObjCAtSynchronizedOperand(atLoc, operand.get());
+
+ // Parse the compound statement within a new scope.
+ ParseScope bodyScope(this, Scope::DeclScope);
+ StmtResult body(ParseCompoundStatementBody());
+ bodyScope.Exit();
+
+ // If there was a semantic or parse error earlier with the
+ // operand, fail now.
+ if (operand.isInvalid())
+ return StmtError();
+
+ if (body.isInvalid())
+ body = Actions.ActOnNullStmt(Tok.getLocation());
+
+ return Actions.ActOnObjCAtSynchronizedStmt(atLoc, operand.get(), body.get());
+}
+
+/// objc-try-catch-statement:
+/// @try compound-statement objc-catch-list[opt]
+/// @try compound-statement objc-catch-list[opt] @finally compound-statement
+///
+/// objc-catch-list:
+/// @catch ( parameter-declaration ) compound-statement
+/// objc-catch-list @catch ( catch-parameter-declaration ) compound-statement
+/// catch-parameter-declaration:
+/// parameter-declaration
+/// '...' [OBJC2]
+///
+StmtResult Parser::ParseObjCTryStmt(SourceLocation atLoc) {
+ bool catch_or_finally_seen = false;
+
+ ConsumeToken(); // consume try
+ if (Tok.isNot(tok::l_brace)) {
+ Diag(Tok, diag::err_expected) << tok::l_brace;
+ return StmtError();
+ }
+ StmtVector CatchStmts;
+ StmtResult FinallyStmt;
+ ParseScope TryScope(this, Scope::DeclScope);
+ StmtResult TryBody(ParseCompoundStatementBody());
+ TryScope.Exit();
+ if (TryBody.isInvalid())
+ TryBody = Actions.ActOnNullStmt(Tok.getLocation());
+
+ while (Tok.is(tok::at)) {
+ // At this point, we need to lookahead to determine if this @ is the start
+ // of an @catch or @finally. We don't want to consume the @ token if this
+ // is an @try or @encode or something else.
+ Token AfterAt = GetLookAheadToken(1);
+ if (!AfterAt.isObjCAtKeyword(tok::objc_catch) &&
+ !AfterAt.isObjCAtKeyword(tok::objc_finally))
+ break;
+
+ SourceLocation AtCatchFinallyLoc = ConsumeToken();
+ if (Tok.isObjCAtKeyword(tok::objc_catch)) {
+ Decl *FirstPart = nullptr;
+ ConsumeToken(); // consume catch
+ if (Tok.is(tok::l_paren)) {
+ ConsumeParen();
+ ParseScope CatchScope(this, Scope::DeclScope|Scope::AtCatchScope);
+ if (Tok.isNot(tok::ellipsis)) {
+ DeclSpec DS(AttrFactory);
+ ParseDeclarationSpecifiers(DS);
+ Declarator ParmDecl(DS, Declarator::ObjCCatchContext);
+ ParseDeclarator(ParmDecl);
+
+ // Inform the actions module about the declarator, so it
+ // gets added to the current scope.
+ FirstPart = Actions.ActOnObjCExceptionDecl(getCurScope(), ParmDecl);
+ } else
+ ConsumeToken(); // consume '...'
+
+ SourceLocation RParenLoc;
+
+ if (Tok.is(tok::r_paren))
+ RParenLoc = ConsumeParen();
+ else // Skip over garbage, until we get to ')'. Eat the ')'.
+ SkipUntil(tok::r_paren, StopAtSemi);
+
+ StmtResult CatchBody(true);
+ if (Tok.is(tok::l_brace))
+ CatchBody = ParseCompoundStatementBody();
+ else
+ Diag(Tok, diag::err_expected) << tok::l_brace;
+ if (CatchBody.isInvalid())
+ CatchBody = Actions.ActOnNullStmt(Tok.getLocation());
+
+ StmtResult Catch = Actions.ActOnObjCAtCatchStmt(AtCatchFinallyLoc,
+ RParenLoc,
+ FirstPart,
+ CatchBody.get());
+ if (!Catch.isInvalid())
+ CatchStmts.push_back(Catch.get());
+
+ } else {
+ Diag(AtCatchFinallyLoc, diag::err_expected_lparen_after)
+ << "@catch clause";
+ return StmtError();
+ }
+ catch_or_finally_seen = true;
+ } else {
+ assert(Tok.isObjCAtKeyword(tok::objc_finally) && "Lookahead confused?");
+ ConsumeToken(); // consume finally
+ ParseScope FinallyScope(this, Scope::DeclScope);
+
+ StmtResult FinallyBody(true);
+ if (Tok.is(tok::l_brace))
+ FinallyBody = ParseCompoundStatementBody();
+ else
+ Diag(Tok, diag::err_expected) << tok::l_brace;
+ if (FinallyBody.isInvalid())
+ FinallyBody = Actions.ActOnNullStmt(Tok.getLocation());
+ FinallyStmt = Actions.ActOnObjCAtFinallyStmt(AtCatchFinallyLoc,
+ FinallyBody.get());
+ catch_or_finally_seen = true;
+ break;
+ }
+ }
+ if (!catch_or_finally_seen) {
+ Diag(atLoc, diag::err_missing_catch_finally);
+ return StmtError();
+ }
+
+ return Actions.ActOnObjCAtTryStmt(atLoc, TryBody.get(),
+ CatchStmts,
+ FinallyStmt.get());
+}
+
+/// objc-autoreleasepool-statement:
+/// @autoreleasepool compound-statement
+///
+StmtResult
+Parser::ParseObjCAutoreleasePoolStmt(SourceLocation atLoc) {
+ ConsumeToken(); // consume autoreleasepool
+ if (Tok.isNot(tok::l_brace)) {
+ Diag(Tok, diag::err_expected) << tok::l_brace;
+ return StmtError();
+ }
+ // Enter a scope to hold everything within the compound stmt. Compound
+ // statements can always hold declarations.
+ ParseScope BodyScope(this, Scope::DeclScope);
+
+ StmtResult AutoreleasePoolBody(ParseCompoundStatementBody());
+
+ BodyScope.Exit();
+ if (AutoreleasePoolBody.isInvalid())
+ AutoreleasePoolBody = Actions.ActOnNullStmt(Tok.getLocation());
+ return Actions.ActOnObjCAutoreleasePoolStmt(atLoc,
+ AutoreleasePoolBody.get());
+}
+
+/// StashAwayMethodOrFunctionBodyTokens - Consume the tokens and store them
+/// for later parsing.
+void Parser::StashAwayMethodOrFunctionBodyTokens(Decl *MDecl) {
+ LexedMethod* LM = new LexedMethod(this, MDecl);
+ CurParsedObjCImpl->LateParsedObjCMethods.push_back(LM);
+ CachedTokens &Toks = LM->Toks;
+ // Begin by storing the '{' or 'try' or ':' token.
+ Toks.push_back(Tok);
+ if (Tok.is(tok::kw_try)) {
+ ConsumeToken();
+ if (Tok.is(tok::colon)) {
+ Toks.push_back(Tok);
+ ConsumeToken();
+ while (Tok.isNot(tok::l_brace)) {
+ ConsumeAndStoreUntil(tok::l_paren, Toks, /*StopAtSemi=*/false);
+ ConsumeAndStoreUntil(tok::r_paren, Toks, /*StopAtSemi=*/false);
+ }
+ }
+ Toks.push_back(Tok); // also store '{'
+ }
+ else if (Tok.is(tok::colon)) {
+ ConsumeToken();
+ // FIXME: This is wrong, due to C++11 braced initialization.
+ while (Tok.isNot(tok::l_brace)) {
+ ConsumeAndStoreUntil(tok::l_paren, Toks, /*StopAtSemi=*/false);
+ ConsumeAndStoreUntil(tok::r_paren, Toks, /*StopAtSemi=*/false);
+ }
+ Toks.push_back(Tok); // also store '{'
+ }
+ ConsumeBrace();
+ // Consume everything up to (and including) the matching right brace.
+ ConsumeAndStoreUntil(tok::r_brace, Toks, /*StopAtSemi=*/false);
+ while (Tok.is(tok::kw_catch)) {
+ ConsumeAndStoreUntil(tok::l_brace, Toks, /*StopAtSemi=*/false);
+ ConsumeAndStoreUntil(tok::r_brace, Toks, /*StopAtSemi=*/false);
+ }
+}
+
+/// objc-method-def: objc-method-proto ';'[opt] '{' body '}'
+///
+Decl *Parser::ParseObjCMethodDefinition() {
+ Decl *MDecl = ParseObjCMethodPrototype();
+
+ PrettyDeclStackTraceEntry CrashInfo(Actions, MDecl, Tok.getLocation(),
+ "parsing Objective-C method");
+
+ // parse optional ';'
+ if (Tok.is(tok::semi)) {
+ if (CurParsedObjCImpl) {
+ Diag(Tok, diag::warn_semicolon_before_method_body)
+ << FixItHint::CreateRemoval(Tok.getLocation());
+ }
+ ConsumeToken();
+ }
+
+ // We should have an opening brace now.
+ if (Tok.isNot(tok::l_brace)) {
+ Diag(Tok, diag::err_expected_method_body);
+
+ // Skip over garbage, until we get to '{'. Don't eat the '{'.
+ SkipUntil(tok::l_brace, StopAtSemi | StopBeforeMatch);
+
+ // If we didn't find the '{', bail out.
+ if (Tok.isNot(tok::l_brace))
+ return nullptr;
+ }
+
+ if (!MDecl) {
+ ConsumeBrace();
+ SkipUntil(tok::r_brace);
+ return nullptr;
+ }
+
+ // Allow the rest of sema to find private method decl implementations.
+ Actions.AddAnyMethodToGlobalPool(MDecl);
+ assert (CurParsedObjCImpl
+ && "ParseObjCMethodDefinition - Method out of @implementation");
+ // Consume the tokens and store them for later parsing.
+ StashAwayMethodOrFunctionBodyTokens(MDecl);
+ return MDecl;
+}
+
+StmtResult Parser::ParseObjCAtStatement(SourceLocation AtLoc) {
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCAtStatement(getCurScope());
+ cutOffParsing();
+ return StmtError();
+ }
+
+ if (Tok.isObjCAtKeyword(tok::objc_try))
+ return ParseObjCTryStmt(AtLoc);
+
+ if (Tok.isObjCAtKeyword(tok::objc_throw))
+ return ParseObjCThrowStmt(AtLoc);
+
+ if (Tok.isObjCAtKeyword(tok::objc_synchronized))
+ return ParseObjCSynchronizedStmt(AtLoc);
+
+ if (Tok.isObjCAtKeyword(tok::objc_autoreleasepool))
+ return ParseObjCAutoreleasePoolStmt(AtLoc);
+
+ if (Tok.isObjCAtKeyword(tok::objc_import) &&
+ getLangOpts().DebuggerSupport) {
+ SkipUntil(tok::semi);
+ return Actions.ActOnNullStmt(Tok.getLocation());
+ }
+
+ ExprResult Res(ParseExpressionWithLeadingAt(AtLoc));
+ if (Res.isInvalid()) {
+ // If the expression is invalid, skip ahead to the next semicolon. Not
+ // doing this opens us up to the possibility of infinite loops if
+ // ParseExpression does not consume any tokens.
+ SkipUntil(tok::semi);
+ return StmtError();
+ }
+
+ // Otherwise, eat the semicolon.
+ ExpectAndConsumeSemi(diag::err_expected_semi_after_expr);
+ return Actions.ActOnExprStmt(Res);
+}
+
+ExprResult Parser::ParseObjCAtExpression(SourceLocation AtLoc) {
+ switch (Tok.getKind()) {
+ case tok::code_completion:
+ Actions.CodeCompleteObjCAtExpression(getCurScope());
+ cutOffParsing();
+ return ExprError();
+
+ case tok::minus:
+ case tok::plus: {
+ tok::TokenKind Kind = Tok.getKind();
+ SourceLocation OpLoc = ConsumeToken();
+
+ if (!Tok.is(tok::numeric_constant)) {
+ const char *Symbol = nullptr;
+ switch (Kind) {
+ case tok::minus: Symbol = "-"; break;
+ case tok::plus: Symbol = "+"; break;
+ default: llvm_unreachable("missing unary operator case");
+ }
+ Diag(Tok, diag::err_nsnumber_nonliteral_unary)
+ << Symbol;
+ return ExprError();
+ }
+
+ ExprResult Lit(Actions.ActOnNumericConstant(Tok));
+ if (Lit.isInvalid()) {
+ return Lit;
+ }
+ ConsumeToken(); // Consume the literal token.
+
+ Lit = Actions.ActOnUnaryOp(getCurScope(), OpLoc, Kind, Lit.get());
+ if (Lit.isInvalid())
+ return Lit;
+
+ return ParsePostfixExpressionSuffix(
+ Actions.BuildObjCNumericLiteral(AtLoc, Lit.get()));
+ }
+
+ case tok::string_literal: // primary-expression: string-literal
+ case tok::wide_string_literal:
+ return ParsePostfixExpressionSuffix(ParseObjCStringLiteral(AtLoc));
+
+ case tok::char_constant:
+ return ParsePostfixExpressionSuffix(ParseObjCCharacterLiteral(AtLoc));
+
+ case tok::numeric_constant:
+ return ParsePostfixExpressionSuffix(ParseObjCNumericLiteral(AtLoc));
+
+ case tok::kw_true: // Objective-C++, etc.
+ case tok::kw___objc_yes: // c/c++/objc/objc++ __objc_yes
+ return ParsePostfixExpressionSuffix(ParseObjCBooleanLiteral(AtLoc, true));
+ case tok::kw_false: // Objective-C++, etc.
+ case tok::kw___objc_no: // c/c++/objc/objc++ __objc_no
+ return ParsePostfixExpressionSuffix(ParseObjCBooleanLiteral(AtLoc, false));
+
+ case tok::l_square:
+ // Objective-C array literal
+ return ParsePostfixExpressionSuffix(ParseObjCArrayLiteral(AtLoc));
+
+ case tok::l_brace:
+ // Objective-C dictionary literal
+ return ParsePostfixExpressionSuffix(ParseObjCDictionaryLiteral(AtLoc));
+
+ case tok::l_paren:
+ // Objective-C boxed expression
+ return ParsePostfixExpressionSuffix(ParseObjCBoxedExpr(AtLoc));
+
+ default:
+ if (Tok.getIdentifierInfo() == nullptr)
+ return ExprError(Diag(AtLoc, diag::err_unexpected_at));
+
+ switch (Tok.getIdentifierInfo()->getObjCKeywordID()) {
+ case tok::objc_encode:
+ return ParsePostfixExpressionSuffix(ParseObjCEncodeExpression(AtLoc));
+ case tok::objc_protocol:
+ return ParsePostfixExpressionSuffix(ParseObjCProtocolExpression(AtLoc));
+ case tok::objc_selector:
+ return ParsePostfixExpressionSuffix(ParseObjCSelectorExpression(AtLoc));
+ default: {
+ const char *str = nullptr;
+ if (GetLookAheadToken(1).is(tok::l_brace)) {
+ char ch = Tok.getIdentifierInfo()->getNameStart()[0];
+ str =
+ ch == 't' ? "try"
+ : (ch == 'f' ? "finally"
+ : (ch == 'a' ? "autoreleasepool" : nullptr));
+ }
+ if (str) {
+ SourceLocation kwLoc = Tok.getLocation();
+ return ExprError(Diag(AtLoc, diag::err_unexpected_at) <<
+ FixItHint::CreateReplacement(kwLoc, str));
+ }
+ else
+ return ExprError(Diag(AtLoc, diag::err_unexpected_at));
+ }
+ }
+ }
+}
+
+/// \brief Parse the receiver of an Objective-C++ message send.
+///
+/// This routine parses the receiver of a message send in
+/// Objective-C++ either as a type or as an expression. Note that this
+/// routine must not be called to parse a send to 'super', since it
+/// has no way to return such a result.
+///
+/// \param IsExpr Whether the receiver was parsed as an expression.
+///
+/// \param TypeOrExpr If the receiver was parsed as an expression (\c
+/// IsExpr is true), the parsed expression. If the receiver was parsed
+/// as a type (\c IsExpr is false), the parsed type.
+///
+/// \returns True if an error occurred during parsing or semantic
+/// analysis, in which case the arguments do not have valid
+/// values. Otherwise, returns false for a successful parse.
+///
+/// objc-receiver: [C++]
+/// 'super' [not parsed here]
+/// expression
+/// simple-type-specifier
+/// typename-specifier
+bool Parser::ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr) {
+ InMessageExpressionRAIIObject InMessage(*this, true);
+
+ if (Tok.isOneOf(tok::identifier, tok::coloncolon, tok::kw_typename,
+ tok::annot_cxxscope))
+ TryAnnotateTypeOrScopeToken();
+
+ if (!Actions.isSimpleTypeSpecifier(Tok.getKind())) {
+ // objc-receiver:
+ // expression
+ // Make sure any typos in the receiver are corrected or diagnosed, so that
+ // proper recovery can happen. FIXME: Perhaps filter the corrected expr to
+ // only the things that are valid ObjC receivers?
+ ExprResult Receiver = Actions.CorrectDelayedTyposInExpr(ParseExpression());
+ if (Receiver.isInvalid())
+ return true;
+
+ IsExpr = true;
+ TypeOrExpr = Receiver.get();
+ return false;
+ }
+
+ // objc-receiver:
+ // typename-specifier
+ // simple-type-specifier
+ // expression (that starts with one of the above)
+ DeclSpec DS(AttrFactory);
+ ParseCXXSimpleTypeSpecifier(DS);
+
+ if (Tok.is(tok::l_paren)) {
+ // If we see an opening parentheses at this point, we are
+ // actually parsing an expression that starts with a
+ // function-style cast, e.g.,
+ //
+ // postfix-expression:
+ // simple-type-specifier ( expression-list [opt] )
+ // typename-specifier ( expression-list [opt] )
+ //
+ // Parse the remainder of this case, then the (optional)
+ // postfix-expression suffix, followed by the (optional)
+ // right-hand side of the binary expression. We have an
+ // instance method.
+ ExprResult Receiver = ParseCXXTypeConstructExpression(DS);
+ if (!Receiver.isInvalid())
+ Receiver = ParsePostfixExpressionSuffix(Receiver.get());
+ if (!Receiver.isInvalid())
+ Receiver = ParseRHSOfBinaryExpression(Receiver.get(), prec::Comma);
+ if (Receiver.isInvalid())
+ return true;
+
+ IsExpr = true;
+ TypeOrExpr = Receiver.get();
+ return false;
+ }
+
+ // We have a class message. Turn the simple-type-specifier or
+ // typename-specifier we parsed into a type and parse the
+ // remainder of the class message.
+ Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ TypeResult Type = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+ if (Type.isInvalid())
+ return true;
+
+ IsExpr = false;
+ TypeOrExpr = Type.get().getAsOpaquePtr();
+ return false;
+}
+
+/// \brief Determine whether the parser is currently referring to a an
+/// Objective-C message send, using a simplified heuristic to avoid overhead.
+///
+/// This routine will only return true for a subset of valid message-send
+/// expressions.
+bool Parser::isSimpleObjCMessageExpression() {
+ assert(Tok.is(tok::l_square) && getLangOpts().ObjC1 &&
+ "Incorrect start for isSimpleObjCMessageExpression");
+ return GetLookAheadToken(1).is(tok::identifier) &&
+ GetLookAheadToken(2).is(tok::identifier);
+}
+
+bool Parser::isStartOfObjCClassMessageMissingOpenBracket() {
+ if (!getLangOpts().ObjC1 || !NextToken().is(tok::identifier) ||
+ InMessageExpression)
+ return false;
+
+
+ ParsedType Type;
+
+ if (Tok.is(tok::annot_typename))
+ Type = getTypeAnnotation(Tok);
+ else if (Tok.is(tok::identifier))
+ Type = Actions.getTypeName(*Tok.getIdentifierInfo(), Tok.getLocation(),
+ getCurScope());
+ else
+ return false;
+
+ if (!Type.get().isNull() && Type.get()->isObjCObjectOrInterfaceType()) {
+ const Token &AfterNext = GetLookAheadToken(2);
+ if (AfterNext.isOneOf(tok::colon, tok::r_square)) {
+ if (Tok.is(tok::identifier))
+ TryAnnotateTypeOrScopeToken();
+
+ return Tok.is(tok::annot_typename);
+ }
+ }
+
+ return false;
+}
+
+/// objc-message-expr:
+/// '[' objc-receiver objc-message-args ']'
+///
+/// objc-receiver: [C]
+/// 'super'
+/// expression
+/// class-name
+/// type-name
+///
+ExprResult Parser::ParseObjCMessageExpression() {
+ assert(Tok.is(tok::l_square) && "'[' expected");
+ SourceLocation LBracLoc = ConsumeBracket(); // consume '['
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCMessageReceiver(getCurScope());
+ cutOffParsing();
+ return ExprError();
+ }
+
+ InMessageExpressionRAIIObject InMessage(*this, true);
+
+ if (getLangOpts().CPlusPlus) {
+ // We completely separate the C and C++ cases because C++ requires
+ // more complicated (read: slower) parsing.
+
+ // Handle send to super.
+ // FIXME: This doesn't benefit from the same typo-correction we
+ // get in Objective-C.
+ if (Tok.is(tok::identifier) && Tok.getIdentifierInfo() == Ident_super &&
+ NextToken().isNot(tok::period) && getCurScope()->isInObjcMethodScope())
+ return ParseObjCMessageExpressionBody(LBracLoc, ConsumeToken(),
+ ParsedType(), nullptr);
+
+ // Parse the receiver, which is either a type or an expression.
+ bool IsExpr;
+ void *TypeOrExpr = nullptr;
+ if (ParseObjCXXMessageReceiver(IsExpr, TypeOrExpr)) {
+ SkipUntil(tok::r_square, StopAtSemi);
+ return ExprError();
+ }
+
+ if (IsExpr)
+ return ParseObjCMessageExpressionBody(LBracLoc, SourceLocation(),
+ ParsedType(),
+ static_cast<Expr*>(TypeOrExpr));
+
+ return ParseObjCMessageExpressionBody(LBracLoc, SourceLocation(),
+ ParsedType::getFromOpaquePtr(TypeOrExpr),
+ nullptr);
+ }
+
+ if (Tok.is(tok::identifier)) {
+ IdentifierInfo *Name = Tok.getIdentifierInfo();
+ SourceLocation NameLoc = Tok.getLocation();
+ ParsedType ReceiverType;
+ switch (Actions.getObjCMessageKind(getCurScope(), Name, NameLoc,
+ Name == Ident_super,
+ NextToken().is(tok::period),
+ ReceiverType)) {
+ case Sema::ObjCSuperMessage:
+ return ParseObjCMessageExpressionBody(LBracLoc, ConsumeToken(),
+ ParsedType(), nullptr);
+
+ case Sema::ObjCClassMessage:
+ if (!ReceiverType) {
+ SkipUntil(tok::r_square, StopAtSemi);
+ return ExprError();
+ }
+
+ ConsumeToken(); // the type name
+
+ // Parse type arguments and protocol qualifiers.
+ if (Tok.is(tok::less)) {
+ SourceLocation NewEndLoc;
+ TypeResult NewReceiverType
+ = parseObjCTypeArgsAndProtocolQualifiers(NameLoc, ReceiverType,
+ /*consumeLastToken=*/true,
+ NewEndLoc);
+ if (!NewReceiverType.isUsable()) {
+ SkipUntil(tok::r_square, StopAtSemi);
+ return ExprError();
+ }
+
+ ReceiverType = NewReceiverType.get();
+ }
+
+ return ParseObjCMessageExpressionBody(LBracLoc, SourceLocation(),
+ ReceiverType, nullptr);
+
+ case Sema::ObjCInstanceMessage:
+ // Fall through to parse an expression.
+ break;
+ }
+ }
+
+ // Otherwise, an arbitrary expression can be the receiver of a send.
+ ExprResult Res = Actions.CorrectDelayedTyposInExpr(ParseExpression());
+ if (Res.isInvalid()) {
+ SkipUntil(tok::r_square, StopAtSemi);
+ return Res;
+ }
+
+ return ParseObjCMessageExpressionBody(LBracLoc, SourceLocation(),
+ ParsedType(), Res.get());
+}
+
+/// \brief Parse the remainder of an Objective-C message following the
+/// '[' objc-receiver.
+///
+/// This routine handles sends to super, class messages (sent to a
+/// class name), and instance messages (sent to an object), and the
+/// target is represented by \p SuperLoc, \p ReceiverType, or \p
+/// ReceiverExpr, respectively. Only one of these parameters may have
+/// a valid value.
+///
+/// \param LBracLoc The location of the opening '['.
+///
+/// \param SuperLoc If this is a send to 'super', the location of the
+/// 'super' keyword that indicates a send to the superclass.
+///
+/// \param ReceiverType If this is a class message, the type of the
+/// class we are sending a message to.
+///
+/// \param ReceiverExpr If this is an instance message, the expression
+/// used to compute the receiver object.
+///
+/// objc-message-args:
+/// objc-selector
+/// objc-keywordarg-list
+///
+/// objc-keywordarg-list:
+/// objc-keywordarg
+/// objc-keywordarg-list objc-keywordarg
+///
+/// objc-keywordarg:
+/// selector-name[opt] ':' objc-keywordexpr
+///
+/// objc-keywordexpr:
+/// nonempty-expr-list
+///
+/// nonempty-expr-list:
+/// assignment-expression
+/// nonempty-expr-list , assignment-expression
+///
+ExprResult
+Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
+ SourceLocation SuperLoc,
+ ParsedType ReceiverType,
+ Expr *ReceiverExpr) {
+ InMessageExpressionRAIIObject InMessage(*this, true);
+
+ if (Tok.is(tok::code_completion)) {
+ if (SuperLoc.isValid())
+ Actions.CodeCompleteObjCSuperMessage(getCurScope(), SuperLoc, None,
+ false);
+ else if (ReceiverType)
+ Actions.CodeCompleteObjCClassMessage(getCurScope(), ReceiverType, None,
+ false);
+ else
+ Actions.CodeCompleteObjCInstanceMessage(getCurScope(), ReceiverExpr,
+ None, false);
+ cutOffParsing();
+ return ExprError();
+ }
+
+ // Parse objc-selector
+ SourceLocation Loc;
+ IdentifierInfo *selIdent = ParseObjCSelectorPiece(Loc);
+
+ SmallVector<IdentifierInfo *, 12> KeyIdents;
+ SmallVector<SourceLocation, 12> KeyLocs;
+ ExprVector KeyExprs;
+
+ if (Tok.is(tok::colon)) {
+ while (1) {
+ // Each iteration parses a single keyword argument.
+ KeyIdents.push_back(selIdent);
+ KeyLocs.push_back(Loc);
+
+ if (ExpectAndConsume(tok::colon)) {
+ // We must manually skip to a ']', otherwise the expression skipper will
+ // stop at the ']' when it skips to the ';'. We want it to skip beyond
+ // the enclosing expression.
+ SkipUntil(tok::r_square, StopAtSemi);
+ return ExprError();
+ }
+
+ /// Parse the expression after ':'
+
+ if (Tok.is(tok::code_completion)) {
+ if (SuperLoc.isValid())
+ Actions.CodeCompleteObjCSuperMessage(getCurScope(), SuperLoc,
+ KeyIdents,
+ /*AtArgumentEpression=*/true);
+ else if (ReceiverType)
+ Actions.CodeCompleteObjCClassMessage(getCurScope(), ReceiverType,
+ KeyIdents,
+ /*AtArgumentEpression=*/true);
+ else
+ Actions.CodeCompleteObjCInstanceMessage(getCurScope(), ReceiverExpr,
+ KeyIdents,
+ /*AtArgumentEpression=*/true);
+
+ cutOffParsing();
+ return ExprError();
+ }
+
+ ExprResult Expr;
+ if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
+ Expr = ParseBraceInitializer();
+ } else
+ Expr = ParseAssignmentExpression();
+
+ ExprResult Res(Expr);
+ if (Res.isInvalid()) {
+ // We must manually skip to a ']', otherwise the expression skipper will
+ // stop at the ']' when it skips to the ';'. We want it to skip beyond
+ // the enclosing expression.
+ SkipUntil(tok::r_square, StopAtSemi);
+ return Res;
+ }
+
+ // We have a valid expression.
+ KeyExprs.push_back(Res.get());
+
+ // Code completion after each argument.
+ if (Tok.is(tok::code_completion)) {
+ if (SuperLoc.isValid())
+ Actions.CodeCompleteObjCSuperMessage(getCurScope(), SuperLoc,
+ KeyIdents,
+ /*AtArgumentEpression=*/false);
+ else if (ReceiverType)
+ Actions.CodeCompleteObjCClassMessage(getCurScope(), ReceiverType,
+ KeyIdents,
+ /*AtArgumentEpression=*/false);
+ else
+ Actions.CodeCompleteObjCInstanceMessage(getCurScope(), ReceiverExpr,
+ KeyIdents,
+ /*AtArgumentEpression=*/false);
+ cutOffParsing();
+ return ExprError();
+ }
+
+ // Check for another keyword selector.
+ selIdent = ParseObjCSelectorPiece(Loc);
+ if (!selIdent && Tok.isNot(tok::colon))
+ break;
+ // We have a selector or a colon, continue parsing.
+ }
+ // Parse the, optional, argument list, comma separated.
+ while (Tok.is(tok::comma)) {
+ SourceLocation commaLoc = ConsumeToken(); // Eat the ','.
+ /// Parse the expression after ','
+ ExprResult Res(ParseAssignmentExpression());
+ if (Tok.is(tok::colon))
+ Res = Actions.CorrectDelayedTyposInExpr(Res);
+ if (Res.isInvalid()) {
+ if (Tok.is(tok::colon)) {
+ Diag(commaLoc, diag::note_extra_comma_message_arg) <<
+ FixItHint::CreateRemoval(commaLoc);
+ }
+ // We must manually skip to a ']', otherwise the expression skipper will
+ // stop at the ']' when it skips to the ';'. We want it to skip beyond
+ // the enclosing expression.
+ SkipUntil(tok::r_square, StopAtSemi);
+ return Res;
+ }
+
+ // We have a valid expression.
+ KeyExprs.push_back(Res.get());
+ }
+ } else if (!selIdent) {
+ Diag(Tok, diag::err_expected) << tok::identifier; // missing selector name.
+
+ // We must manually skip to a ']', otherwise the expression skipper will
+ // stop at the ']' when it skips to the ';'. We want it to skip beyond
+ // the enclosing expression.
+ SkipUntil(tok::r_square, StopAtSemi);
+ return ExprError();
+ }
+
+ if (Tok.isNot(tok::r_square)) {
+ Diag(Tok, diag::err_expected)
+ << (Tok.is(tok::identifier) ? tok::colon : tok::r_square);
+ // We must manually skip to a ']', otherwise the expression skipper will
+ // stop at the ']' when it skips to the ';'. We want it to skip beyond
+ // the enclosing expression.
+ SkipUntil(tok::r_square, StopAtSemi);
+ return ExprError();
+ }
+
+ SourceLocation RBracLoc = ConsumeBracket(); // consume ']'
+
+ unsigned nKeys = KeyIdents.size();
+ if (nKeys == 0) {
+ KeyIdents.push_back(selIdent);
+ KeyLocs.push_back(Loc);
+ }
+ Selector Sel = PP.getSelectorTable().getSelector(nKeys, &KeyIdents[0]);
+
+ if (SuperLoc.isValid())
+ return Actions.ActOnSuperMessage(getCurScope(), SuperLoc, Sel,
+ LBracLoc, KeyLocs, RBracLoc, KeyExprs);
+ else if (ReceiverType)
+ return Actions.ActOnClassMessage(getCurScope(), ReceiverType, Sel,
+ LBracLoc, KeyLocs, RBracLoc, KeyExprs);
+ return Actions.ActOnInstanceMessage(getCurScope(), ReceiverExpr, Sel,
+ LBracLoc, KeyLocs, RBracLoc, KeyExprs);
+}
+
+ExprResult Parser::ParseObjCStringLiteral(SourceLocation AtLoc) {
+ ExprResult Res(ParseStringLiteralExpression());
+ if (Res.isInvalid()) return Res;
+
+ // @"foo" @"bar" is a valid concatenated string. Eat any subsequent string
+ // expressions. At this point, we know that the only valid thing that starts
+ // with '@' is an @"".
+ SmallVector<SourceLocation, 4> AtLocs;
+ ExprVector AtStrings;
+ AtLocs.push_back(AtLoc);
+ AtStrings.push_back(Res.get());
+
+ while (Tok.is(tok::at)) {
+ AtLocs.push_back(ConsumeToken()); // eat the @.
+
+ // Invalid unless there is a string literal.
+ if (!isTokenStringLiteral())
+ return ExprError(Diag(Tok, diag::err_objc_concat_string));
+
+ ExprResult Lit(ParseStringLiteralExpression());
+ if (Lit.isInvalid())
+ return Lit;
+
+ AtStrings.push_back(Lit.get());
+ }
+
+ return Actions.ParseObjCStringLiteral(AtLocs.data(), AtStrings);
+}
+
+/// ParseObjCBooleanLiteral -
+/// objc-scalar-literal : '@' boolean-keyword
+/// ;
+/// boolean-keyword: 'true' | 'false' | '__objc_yes' | '__objc_no'
+/// ;
+ExprResult Parser::ParseObjCBooleanLiteral(SourceLocation AtLoc,
+ bool ArgValue) {
+ SourceLocation EndLoc = ConsumeToken(); // consume the keyword.
+ return Actions.ActOnObjCBoolLiteral(AtLoc, EndLoc, ArgValue);
+}
+
+/// ParseObjCCharacterLiteral -
+/// objc-scalar-literal : '@' character-literal
+/// ;
+ExprResult Parser::ParseObjCCharacterLiteral(SourceLocation AtLoc) {
+ ExprResult Lit(Actions.ActOnCharacterConstant(Tok));
+ if (Lit.isInvalid()) {
+ return Lit;
+ }
+ ConsumeToken(); // Consume the literal token.
+ return Actions.BuildObjCNumericLiteral(AtLoc, Lit.get());
+}
+
+/// ParseObjCNumericLiteral -
+/// objc-scalar-literal : '@' scalar-literal
+/// ;
+/// scalar-literal : | numeric-constant /* any numeric constant. */
+/// ;
+ExprResult Parser::ParseObjCNumericLiteral(SourceLocation AtLoc) {
+ ExprResult Lit(Actions.ActOnNumericConstant(Tok));
+ if (Lit.isInvalid()) {
+ return Lit;
+ }
+ ConsumeToken(); // Consume the literal token.
+ return Actions.BuildObjCNumericLiteral(AtLoc, Lit.get());
+}
+
+/// ParseObjCBoxedExpr -
+/// objc-box-expression:
+/// @( assignment-expression )
+ExprResult
+Parser::ParseObjCBoxedExpr(SourceLocation AtLoc) {
+ if (Tok.isNot(tok::l_paren))
+ return ExprError(Diag(Tok, diag::err_expected_lparen_after) << "@");
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+ ExprResult ValueExpr(ParseAssignmentExpression());
+ if (T.consumeClose())
+ return ExprError();
+
+ if (ValueExpr.isInvalid())
+ return ExprError();
+
+ // Wrap the sub-expression in a parenthesized expression, to distinguish
+ // a boxed expression from a literal.
+ SourceLocation LPLoc = T.getOpenLocation(), RPLoc = T.getCloseLocation();
+ ValueExpr = Actions.ActOnParenExpr(LPLoc, RPLoc, ValueExpr.get());
+ return Actions.BuildObjCBoxedExpr(SourceRange(AtLoc, RPLoc),
+ ValueExpr.get());
+}
+
+ExprResult Parser::ParseObjCArrayLiteral(SourceLocation AtLoc) {
+ ExprVector ElementExprs; // array elements.
+ ConsumeBracket(); // consume the l_square.
+
+ while (Tok.isNot(tok::r_square)) {
+ // Parse list of array element expressions (all must be id types).
+ ExprResult Res(ParseAssignmentExpression());
+ if (Res.isInvalid()) {
+ // We must manually skip to a ']', otherwise the expression skipper will
+ // stop at the ']' when it skips to the ';'. We want it to skip beyond
+ // the enclosing expression.
+ SkipUntil(tok::r_square, StopAtSemi);
+ return Res;
+ }
+
+ // Parse the ellipsis that indicates a pack expansion.
+ if (Tok.is(tok::ellipsis))
+ Res = Actions.ActOnPackExpansion(Res.get(), ConsumeToken());
+ if (Res.isInvalid())
+ return true;
+
+ ElementExprs.push_back(Res.get());
+
+ if (Tok.is(tok::comma))
+ ConsumeToken(); // Eat the ','.
+ else if (Tok.isNot(tok::r_square))
+ return ExprError(Diag(Tok, diag::err_expected_either) << tok::r_square
+ << tok::comma);
+ }
+ SourceLocation EndLoc = ConsumeBracket(); // location of ']'
+ MultiExprArg Args(ElementExprs);
+ return Actions.BuildObjCArrayLiteral(SourceRange(AtLoc, EndLoc), Args);
+}
+
+ExprResult Parser::ParseObjCDictionaryLiteral(SourceLocation AtLoc) {
+ SmallVector<ObjCDictionaryElement, 4> Elements; // dictionary elements.
+ ConsumeBrace(); // consume the l_square.
+ while (Tok.isNot(tok::r_brace)) {
+ // Parse the comma separated key : value expressions.
+ ExprResult KeyExpr;
+ {
+ ColonProtectionRAIIObject X(*this);
+ KeyExpr = ParseAssignmentExpression();
+ if (KeyExpr.isInvalid()) {
+ // We must manually skip to a '}', otherwise the expression skipper will
+ // stop at the '}' when it skips to the ';'. We want it to skip beyond
+ // the enclosing expression.
+ SkipUntil(tok::r_brace, StopAtSemi);
+ return KeyExpr;
+ }
+ }
+
+ if (ExpectAndConsume(tok::colon)) {
+ SkipUntil(tok::r_brace, StopAtSemi);
+ return ExprError();
+ }
+
+ ExprResult ValueExpr(ParseAssignmentExpression());
+ if (ValueExpr.isInvalid()) {
+ // We must manually skip to a '}', otherwise the expression skipper will
+ // stop at the '}' when it skips to the ';'. We want it to skip beyond
+ // the enclosing expression.
+ SkipUntil(tok::r_brace, StopAtSemi);
+ return ValueExpr;
+ }
+
+ // Parse the ellipsis that designates this as a pack expansion.
+ SourceLocation EllipsisLoc;
+ if (getLangOpts().CPlusPlus)
+ TryConsumeToken(tok::ellipsis, EllipsisLoc);
+
+ // We have a valid expression. Collect it in a vector so we can
+ // build the argument list.
+ ObjCDictionaryElement Element = {
+ KeyExpr.get(), ValueExpr.get(), EllipsisLoc, None
+ };
+ Elements.push_back(Element);
+
+ if (!TryConsumeToken(tok::comma) && Tok.isNot(tok::r_brace))
+ return ExprError(Diag(Tok, diag::err_expected_either) << tok::r_brace
+ << tok::comma);
+ }
+ SourceLocation EndLoc = ConsumeBrace();
+
+ // Create the ObjCDictionaryLiteral.
+ return Actions.BuildObjCDictionaryLiteral(SourceRange(AtLoc, EndLoc),
+ Elements);
+}
+
+/// objc-encode-expression:
+/// \@encode ( type-name )
+ExprResult
+Parser::ParseObjCEncodeExpression(SourceLocation AtLoc) {
+ assert(Tok.isObjCAtKeyword(tok::objc_encode) && "Not an @encode expression!");
+
+ SourceLocation EncLoc = ConsumeToken();
+
+ if (Tok.isNot(tok::l_paren))
+ return ExprError(Diag(Tok, diag::err_expected_lparen_after) << "@encode");
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ TypeResult Ty = ParseTypeName();
+
+ T.consumeClose();
+
+ if (Ty.isInvalid())
+ return ExprError();
+
+ return Actions.ParseObjCEncodeExpression(AtLoc, EncLoc, T.getOpenLocation(),
+ Ty.get(), T.getCloseLocation());
+}
+
+/// objc-protocol-expression
+/// \@protocol ( protocol-name )
+ExprResult
+Parser::ParseObjCProtocolExpression(SourceLocation AtLoc) {
+ SourceLocation ProtoLoc = ConsumeToken();
+
+ if (Tok.isNot(tok::l_paren))
+ return ExprError(Diag(Tok, diag::err_expected_lparen_after) << "@protocol");
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ if (Tok.isNot(tok::identifier))
+ return ExprError(Diag(Tok, diag::err_expected) << tok::identifier);
+
+ IdentifierInfo *protocolId = Tok.getIdentifierInfo();
+ SourceLocation ProtoIdLoc = ConsumeToken();
+
+ T.consumeClose();
+
+ return Actions.ParseObjCProtocolExpression(protocolId, AtLoc, ProtoLoc,
+ T.getOpenLocation(), ProtoIdLoc,
+ T.getCloseLocation());
+}
+
+/// objc-selector-expression
+/// @selector '(' '('[opt] objc-keyword-selector ')'[opt] ')'
+ExprResult Parser::ParseObjCSelectorExpression(SourceLocation AtLoc) {
+ SourceLocation SelectorLoc = ConsumeToken();
+
+ if (Tok.isNot(tok::l_paren))
+ return ExprError(Diag(Tok, diag::err_expected_lparen_after) << "@selector");
+
+ SmallVector<IdentifierInfo *, 12> KeyIdents;
+ SourceLocation sLoc;
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+ bool HasOptionalParen = Tok.is(tok::l_paren);
+ if (HasOptionalParen)
+ ConsumeParen();
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCSelector(getCurScope(), KeyIdents);
+ cutOffParsing();
+ return ExprError();
+ }
+
+ IdentifierInfo *SelIdent = ParseObjCSelectorPiece(sLoc);
+ if (!SelIdent && // missing selector name.
+ Tok.isNot(tok::colon) && Tok.isNot(tok::coloncolon))
+ return ExprError(Diag(Tok, diag::err_expected) << tok::identifier);
+
+ KeyIdents.push_back(SelIdent);
+
+ unsigned nColons = 0;
+ if (Tok.isNot(tok::r_paren)) {
+ while (1) {
+ if (TryConsumeToken(tok::coloncolon)) { // Handle :: in C++.
+ ++nColons;
+ KeyIdents.push_back(nullptr);
+ } else if (ExpectAndConsume(tok::colon)) // Otherwise expect ':'.
+ return ExprError();
+ ++nColons;
+
+ if (Tok.is(tok::r_paren))
+ break;
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCSelector(getCurScope(), KeyIdents);
+ cutOffParsing();
+ return ExprError();
+ }
+
+ // Check for another keyword selector.
+ SourceLocation Loc;
+ SelIdent = ParseObjCSelectorPiece(Loc);
+ KeyIdents.push_back(SelIdent);
+ if (!SelIdent && Tok.isNot(tok::colon) && Tok.isNot(tok::coloncolon))
+ break;
+ }
+ }
+ if (HasOptionalParen && Tok.is(tok::r_paren))
+ ConsumeParen(); // ')'
+ T.consumeClose();
+ Selector Sel = PP.getSelectorTable().getSelector(nColons, &KeyIdents[0]);
+ return Actions.ParseObjCSelectorExpression(Sel, AtLoc, SelectorLoc,
+ T.getOpenLocation(),
+ T.getCloseLocation(),
+ !HasOptionalParen);
+ }
+
+void Parser::ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod) {
+ // MCDecl might be null due to error in method or c-function prototype, etc.
+ Decl *MCDecl = LM.D;
+ bool skip = MCDecl &&
+ ((parseMethod && !Actions.isObjCMethodDecl(MCDecl)) ||
+ (!parseMethod && Actions.isObjCMethodDecl(MCDecl)));
+ if (skip)
+ return;
+
+ // Save the current token position.
+ SourceLocation OrigLoc = Tok.getLocation();
+
+ assert(!LM.Toks.empty() && "ParseLexedObjCMethodDef - Empty body!");
+ // Append the current token at the end of the new token stream so that it
+ // doesn't get lost.
+ LM.Toks.push_back(Tok);
+ PP.EnterTokenStream(LM.Toks.data(), LM.Toks.size(), true, false);
+
+ // Consume the previously pushed token.
+ ConsumeAnyToken(/*ConsumeCodeCompletionTok=*/true);
+
+ assert(Tok.isOneOf(tok::l_brace, tok::kw_try, tok::colon) &&
+ "Inline objective-c method not starting with '{' or 'try' or ':'");
+ // Enter a scope for the method or c-function body.
+ ParseScope BodyScope(this,
+ parseMethod
+ ? Scope::ObjCMethodScope|Scope::FnScope|Scope::DeclScope
+ : Scope::FnScope|Scope::DeclScope);
+
+ // Tell the actions module that we have entered a method or c-function definition
+ // with the specified Declarator for the method/function.
+ if (parseMethod)
+ Actions.ActOnStartOfObjCMethodDef(getCurScope(), MCDecl);
+ else
+ Actions.ActOnStartOfFunctionDef(getCurScope(), MCDecl);
+ if (Tok.is(tok::kw_try))
+ ParseFunctionTryBlock(MCDecl, BodyScope);
+ else {
+ if (Tok.is(tok::colon))
+ ParseConstructorInitializer(MCDecl);
+ ParseFunctionStatementBody(MCDecl, BodyScope);
+ }
+
+ if (Tok.getLocation() != OrigLoc) {
+ // Due to parsing error, we either went over the cached tokens or
+ // there are still cached tokens left. If it's the latter case skip the
+ // leftover tokens.
+ // Since this is an uncommon situation that should be avoided, use the
+ // expensive isBeforeInTranslationUnit call.
+ if (PP.getSourceManager().isBeforeInTranslationUnit(Tok.getLocation(),
+ OrigLoc))
+ while (Tok.getLocation() != OrigLoc && Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+ }
+
+ return;
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseOpenMP.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseOpenMP.cpp
new file mode 100644
index 0000000..078f4c3
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseOpenMP.cpp
@@ -0,0 +1,1068 @@
+//===--- ParseOpenMP.cpp - OpenMP directives parsing ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// \brief This file implements parsing of all OpenMP directives and clauses.
+///
+//===----------------------------------------------------------------------===//
+
+#include "RAIIObjectsForParser.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/StmtOpenMP.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/Parser.h"
+#include "clang/Sema/Scope.h"
+#include "llvm/ADT/PointerIntPair.h"
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// OpenMP declarative directives.
+//===----------------------------------------------------------------------===//
+
+static OpenMPDirectiveKind ParseOpenMPDirectiveKind(Parser &P) {
+ // Array of foldings: F[i][0] F[i][1] ===> F[i][2].
+ // E.g.: OMPD_for OMPD_simd ===> OMPD_for_simd
+ // TODO: add other combined directives in topological order.
+ const OpenMPDirectiveKind F[][3] = {
+ {OMPD_unknown /*cancellation*/, OMPD_unknown /*point*/,
+ OMPD_cancellation_point},
+ {OMPD_target, OMPD_unknown /*data*/, OMPD_target_data},
+ {OMPD_for, OMPD_simd, OMPD_for_simd},
+ {OMPD_parallel, OMPD_for, OMPD_parallel_for},
+ {OMPD_parallel_for, OMPD_simd, OMPD_parallel_for_simd},
+ {OMPD_parallel, OMPD_sections, OMPD_parallel_sections},
+ {OMPD_taskloop, OMPD_simd, OMPD_taskloop_simd}};
+ auto Tok = P.getCurToken();
+ auto DKind =
+ Tok.isAnnotation()
+ ? OMPD_unknown
+ : getOpenMPDirectiveKind(P.getPreprocessor().getSpelling(Tok));
+
+ bool TokenMatched = false;
+ for (unsigned i = 0; i < llvm::array_lengthof(F); ++i) {
+ if (!Tok.isAnnotation() && DKind == OMPD_unknown) {
+ TokenMatched =
+ (i == 0) &&
+ !P.getPreprocessor().getSpelling(Tok).compare("cancellation");
+ } else {
+ TokenMatched = DKind == F[i][0] && DKind != OMPD_unknown;
+ }
+
+ if (TokenMatched) {
+ Tok = P.getPreprocessor().LookAhead(0);
+ auto TokenIsAnnotation = Tok.isAnnotation();
+ auto SDKind =
+ TokenIsAnnotation
+ ? OMPD_unknown
+ : getOpenMPDirectiveKind(P.getPreprocessor().getSpelling(Tok));
+
+ if (!TokenIsAnnotation && SDKind == OMPD_unknown) {
+ TokenMatched =
+ ((i == 0) &&
+ !P.getPreprocessor().getSpelling(Tok).compare("point")) ||
+ ((i == 1) && !P.getPreprocessor().getSpelling(Tok).compare("data"));
+ } else {
+ TokenMatched = SDKind == F[i][1] && SDKind != OMPD_unknown;
+ }
+
+ if (TokenMatched) {
+ P.ConsumeToken();
+ DKind = F[i][2];
+ }
+ }
+ }
+ return DKind;
+}
+
+/// \brief Parsing of declarative OpenMP directives.
+///
+/// threadprivate-directive:
+/// annot_pragma_openmp 'threadprivate' simple-variable-list
+///
+Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirective() {
+ assert(Tok.is(tok::annot_pragma_openmp) && "Not an OpenMP directive!");
+ ParenBraceBracketBalancer BalancerRAIIObj(*this);
+
+ SourceLocation Loc = ConsumeToken();
+ SmallVector<Expr *, 5> Identifiers;
+ auto DKind = ParseOpenMPDirectiveKind(*this);
+
+ switch (DKind) {
+ case OMPD_threadprivate:
+ ConsumeToken();
+ if (!ParseOpenMPSimpleVarList(OMPD_threadprivate, Identifiers, true)) {
+ // The last seen token is annot_pragma_openmp_end - need to check for
+ // extra tokens.
+ if (Tok.isNot(tok::annot_pragma_openmp_end)) {
+ Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
+ << getOpenMPDirectiveName(OMPD_threadprivate);
+ SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
+ }
+ // Skip the last annot_pragma_openmp_end.
+ ConsumeToken();
+ return Actions.ActOnOpenMPThreadprivateDirective(Loc, Identifiers);
+ }
+ break;
+ case OMPD_unknown:
+ Diag(Tok, diag::err_omp_unknown_directive);
+ break;
+ case OMPD_parallel:
+ case OMPD_simd:
+ case OMPD_task:
+ case OMPD_taskyield:
+ case OMPD_barrier:
+ case OMPD_taskwait:
+ case OMPD_taskgroup:
+ case OMPD_flush:
+ case OMPD_for:
+ case OMPD_for_simd:
+ case OMPD_sections:
+ case OMPD_section:
+ case OMPD_single:
+ case OMPD_master:
+ case OMPD_ordered:
+ case OMPD_critical:
+ case OMPD_parallel_for:
+ case OMPD_parallel_for_simd:
+ case OMPD_parallel_sections:
+ case OMPD_atomic:
+ case OMPD_target:
+ case OMPD_teams:
+ case OMPD_cancellation_point:
+ case OMPD_cancel:
+ case OMPD_target_data:
+ case OMPD_taskloop:
+ case OMPD_taskloop_simd:
+ case OMPD_distribute:
+ Diag(Tok, diag::err_omp_unexpected_directive)
+ << getOpenMPDirectiveName(DKind);
+ break;
+ }
+ SkipUntil(tok::annot_pragma_openmp_end);
+ return DeclGroupPtrTy();
+}
+
+/// \brief Parsing of declarative or executable OpenMP directives.
+///
+/// threadprivate-directive:
+/// annot_pragma_openmp 'threadprivate' simple-variable-list
+/// annot_pragma_openmp_end
+///
+/// executable-directive:
+/// annot_pragma_openmp 'parallel' | 'simd' | 'for' | 'sections' |
+/// 'section' | 'single' | 'master' | 'critical' [ '(' <name> ')' ] |
+/// 'parallel for' | 'parallel sections' | 'task' | 'taskyield' |
+/// 'barrier' | 'taskwait' | 'flush' | 'ordered' | 'atomic' |
+/// 'for simd' | 'parallel for simd' | 'target' | 'target data' |
+/// 'taskgroup' | 'teams' | 'taskloop' | 'taskloop simd' {clause} |
+/// 'distribute'
+/// annot_pragma_openmp_end
+///
+StmtResult
+Parser::ParseOpenMPDeclarativeOrExecutableDirective(bool StandAloneAllowed) {
+ assert(Tok.is(tok::annot_pragma_openmp) && "Not an OpenMP directive!");
+ ParenBraceBracketBalancer BalancerRAIIObj(*this);
+ SmallVector<Expr *, 5> Identifiers;
+ SmallVector<OMPClause *, 5> Clauses;
+ SmallVector<llvm::PointerIntPair<OMPClause *, 1, bool>, OMPC_unknown + 1>
+ FirstClauses(OMPC_unknown + 1);
+ unsigned ScopeFlags =
+ Scope::FnScope | Scope::DeclScope | Scope::OpenMPDirectiveScope;
+ SourceLocation Loc = ConsumeToken(), EndLoc;
+ auto DKind = ParseOpenMPDirectiveKind(*this);
+ OpenMPDirectiveKind CancelRegion = OMPD_unknown;
+ // Name of critical directive.
+ DeclarationNameInfo DirName;
+ StmtResult Directive = StmtError();
+ bool HasAssociatedStatement = true;
+ bool FlushHasClause = false;
+
+ switch (DKind) {
+ case OMPD_threadprivate:
+ ConsumeToken();
+ if (!ParseOpenMPSimpleVarList(OMPD_threadprivate, Identifiers, false)) {
+ // The last seen token is annot_pragma_openmp_end - need to check for
+ // extra tokens.
+ if (Tok.isNot(tok::annot_pragma_openmp_end)) {
+ Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
+ << getOpenMPDirectiveName(OMPD_threadprivate);
+ SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
+ }
+ DeclGroupPtrTy Res =
+ Actions.ActOnOpenMPThreadprivateDirective(Loc, Identifiers);
+ Directive = Actions.ActOnDeclStmt(Res, Loc, Tok.getLocation());
+ }
+ SkipUntil(tok::annot_pragma_openmp_end);
+ break;
+ case OMPD_flush:
+ if (PP.LookAhead(0).is(tok::l_paren)) {
+ FlushHasClause = true;
+ // Push copy of the current token back to stream to properly parse
+ // pseudo-clause OMPFlushClause.
+ PP.EnterToken(Tok);
+ }
+ case OMPD_taskyield:
+ case OMPD_barrier:
+ case OMPD_taskwait:
+ case OMPD_cancellation_point:
+ case OMPD_cancel:
+ if (!StandAloneAllowed) {
+ Diag(Tok, diag::err_omp_immediate_directive)
+ << getOpenMPDirectiveName(DKind) << 0;
+ }
+ HasAssociatedStatement = false;
+ // Fall through for further analysis.
+ case OMPD_parallel:
+ case OMPD_simd:
+ case OMPD_for:
+ case OMPD_for_simd:
+ case OMPD_sections:
+ case OMPD_single:
+ case OMPD_section:
+ case OMPD_master:
+ case OMPD_critical:
+ case OMPD_parallel_for:
+ case OMPD_parallel_for_simd:
+ case OMPD_parallel_sections:
+ case OMPD_task:
+ case OMPD_ordered:
+ case OMPD_atomic:
+ case OMPD_target:
+ case OMPD_teams:
+ case OMPD_taskgroup:
+ case OMPD_target_data:
+ case OMPD_taskloop:
+ case OMPD_taskloop_simd:
+ case OMPD_distribute: {
+ ConsumeToken();
+ // Parse directive name of the 'critical' directive if any.
+ if (DKind == OMPD_critical) {
+ BalancedDelimiterTracker T(*this, tok::l_paren,
+ tok::annot_pragma_openmp_end);
+ if (!T.consumeOpen()) {
+ if (Tok.isAnyIdentifier()) {
+ DirName =
+ DeclarationNameInfo(Tok.getIdentifierInfo(), Tok.getLocation());
+ ConsumeAnyToken();
+ } else {
+ Diag(Tok, diag::err_omp_expected_identifier_for_critical);
+ }
+ T.consumeClose();
+ }
+ } else if (DKind == OMPD_cancellation_point || DKind == OMPD_cancel) {
+ CancelRegion = ParseOpenMPDirectiveKind(*this);
+ if (Tok.isNot(tok::annot_pragma_openmp_end))
+ ConsumeToken();
+ }
+
+ if (isOpenMPLoopDirective(DKind))
+ ScopeFlags |= Scope::OpenMPLoopDirectiveScope;
+ if (isOpenMPSimdDirective(DKind))
+ ScopeFlags |= Scope::OpenMPSimdDirectiveScope;
+ ParseScope OMPDirectiveScope(this, ScopeFlags);
+ Actions.StartOpenMPDSABlock(DKind, DirName, Actions.getCurScope(), Loc);
+
+ while (Tok.isNot(tok::annot_pragma_openmp_end)) {
+ OpenMPClauseKind CKind =
+ Tok.isAnnotation()
+ ? OMPC_unknown
+ : FlushHasClause ? OMPC_flush
+ : getOpenMPClauseKind(PP.getSpelling(Tok));
+ Actions.StartOpenMPClause(CKind);
+ FlushHasClause = false;
+ OMPClause *Clause =
+ ParseOpenMPClause(DKind, CKind, !FirstClauses[CKind].getInt());
+ FirstClauses[CKind].setInt(true);
+ if (Clause) {
+ FirstClauses[CKind].setPointer(Clause);
+ Clauses.push_back(Clause);
+ }
+
+ // Skip ',' if any.
+ if (Tok.is(tok::comma))
+ ConsumeToken();
+ Actions.EndOpenMPClause();
+ }
+ // End location of the directive.
+ EndLoc = Tok.getLocation();
+ // Consume final annot_pragma_openmp_end.
+ ConsumeToken();
+
+ // OpenMP [2.13.8, ordered Construct, Syntax]
+ // If the depend clause is specified, the ordered construct is a stand-alone
+ // directive.
+ if (DKind == OMPD_ordered && FirstClauses[OMPC_depend].getInt()) {
+ if (!StandAloneAllowed) {
+ Diag(Loc, diag::err_omp_immediate_directive)
+ << getOpenMPDirectiveName(DKind) << 1
+ << getOpenMPClauseName(OMPC_depend);
+ }
+ HasAssociatedStatement = false;
+ }
+
+ StmtResult AssociatedStmt;
+ if (HasAssociatedStatement) {
+ // The body is a block scope like in Lambdas and Blocks.
+ Sema::CompoundScopeRAII CompoundScope(Actions);
+ Actions.ActOnOpenMPRegionStart(DKind, getCurScope());
+ Actions.ActOnStartOfCompoundStmt();
+ // Parse statement
+ AssociatedStmt = ParseStatement();
+ Actions.ActOnFinishOfCompoundStmt();
+ AssociatedStmt = Actions.ActOnOpenMPRegionEnd(AssociatedStmt, Clauses);
+ }
+ Directive = Actions.ActOnOpenMPExecutableDirective(
+ DKind, DirName, CancelRegion, Clauses, AssociatedStmt.get(), Loc,
+ EndLoc);
+
+ // Exit scope.
+ Actions.EndOpenMPDSABlock(Directive.get());
+ OMPDirectiveScope.Exit();
+ break;
+ }
+ case OMPD_unknown:
+ Diag(Tok, diag::err_omp_unknown_directive);
+ SkipUntil(tok::annot_pragma_openmp_end);
+ break;
+ }
+ return Directive;
+}
+
+/// \brief Parses list of simple variables for '#pragma omp threadprivate'
+/// directive.
+///
+/// simple-variable-list:
+/// '(' id-expression {, id-expression} ')'
+///
+bool Parser::ParseOpenMPSimpleVarList(OpenMPDirectiveKind Kind,
+ SmallVectorImpl<Expr *> &VarList,
+ bool AllowScopeSpecifier) {
+ VarList.clear();
+ // Parse '('.
+ BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
+ if (T.expectAndConsume(diag::err_expected_lparen_after,
+ getOpenMPDirectiveName(Kind)))
+ return true;
+ bool IsCorrect = true;
+ bool NoIdentIsFound = true;
+
+ // Read tokens while ')' or annot_pragma_openmp_end is not found.
+ while (Tok.isNot(tok::r_paren) && Tok.isNot(tok::annot_pragma_openmp_end)) {
+ CXXScopeSpec SS;
+ SourceLocation TemplateKWLoc;
+ UnqualifiedId Name;
+ // Read var name.
+ Token PrevTok = Tok;
+ NoIdentIsFound = false;
+
+ if (AllowScopeSpecifier && getLangOpts().CPlusPlus &&
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(), false)) {
+ IsCorrect = false;
+ SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
+ StopBeforeMatch);
+ } else if (ParseUnqualifiedId(SS, false, false, false, ParsedType(),
+ TemplateKWLoc, Name)) {
+ IsCorrect = false;
+ SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
+ StopBeforeMatch);
+ } else if (Tok.isNot(tok::comma) && Tok.isNot(tok::r_paren) &&
+ Tok.isNot(tok::annot_pragma_openmp_end)) {
+ IsCorrect = false;
+ SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
+ StopBeforeMatch);
+ Diag(PrevTok.getLocation(), diag::err_expected)
+ << tok::identifier
+ << SourceRange(PrevTok.getLocation(), PrevTokLocation);
+ } else {
+ DeclarationNameInfo NameInfo = Actions.GetNameFromUnqualifiedId(Name);
+ ExprResult Res =
+ Actions.ActOnOpenMPIdExpression(getCurScope(), SS, NameInfo);
+ if (Res.isUsable())
+ VarList.push_back(Res.get());
+ }
+ // Consume ','.
+ if (Tok.is(tok::comma)) {
+ ConsumeToken();
+ }
+ }
+
+ if (NoIdentIsFound) {
+ Diag(Tok, diag::err_expected) << tok::identifier;
+ IsCorrect = false;
+ }
+
+ // Parse ')'.
+ IsCorrect = !T.consumeClose() && IsCorrect;
+
+ return !IsCorrect && VarList.empty();
+}
+
+/// \brief Parsing of OpenMP clauses.
+///
+/// clause:
+/// if-clause | final-clause | num_threads-clause | safelen-clause |
+/// default-clause | private-clause | firstprivate-clause | shared-clause
+/// | linear-clause | aligned-clause | collapse-clause |
+/// lastprivate-clause | reduction-clause | proc_bind-clause |
+/// schedule-clause | copyin-clause | copyprivate-clause | untied-clause |
+/// mergeable-clause | flush-clause | read-clause | write-clause |
+/// update-clause | capture-clause | seq_cst-clause | device-clause |
+/// simdlen-clause | threads-clause | simd-clause | num_teams-clause |
+/// thread_limit-clause | priority-clause | grainsize-clause |
+/// nogroup-clause | num_tasks-clause | hint-clause
+///
+OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
+ OpenMPClauseKind CKind, bool FirstClause) {
+ OMPClause *Clause = nullptr;
+ bool ErrorFound = false;
+ // Check if clause is allowed for the given directive.
+ if (CKind != OMPC_unknown && !isAllowedClauseForDirective(DKind, CKind)) {
+ Diag(Tok, diag::err_omp_unexpected_clause) << getOpenMPClauseName(CKind)
+ << getOpenMPDirectiveName(DKind);
+ ErrorFound = true;
+ }
+
+ switch (CKind) {
+ case OMPC_final:
+ case OMPC_num_threads:
+ case OMPC_safelen:
+ case OMPC_simdlen:
+ case OMPC_collapse:
+ case OMPC_ordered:
+ case OMPC_device:
+ case OMPC_num_teams:
+ case OMPC_thread_limit:
+ case OMPC_priority:
+ case OMPC_grainsize:
+ case OMPC_num_tasks:
+ case OMPC_hint:
+ // OpenMP [2.5, Restrictions]
+ // At most one num_threads clause can appear on the directive.
+ // OpenMP [2.8.1, simd construct, Restrictions]
+ // Only one safelen clause can appear on a simd directive.
+ // Only one simdlen clause can appear on a simd directive.
+ // Only one collapse clause can appear on a simd directive.
+ // OpenMP [2.9.1, target data construct, Restrictions]
+ // At most one device clause can appear on the directive.
+ // OpenMP [2.11.1, task Construct, Restrictions]
+ // At most one if clause can appear on the directive.
+ // At most one final clause can appear on the directive.
+ // OpenMP [teams Construct, Restrictions]
+ // At most one num_teams clause can appear on the directive.
+ // At most one thread_limit clause can appear on the directive.
+ // OpenMP [2.9.1, task Construct, Restrictions]
+ // At most one priority clause can appear on the directive.
+ // OpenMP [2.9.2, taskloop Construct, Restrictions]
+ // At most one grainsize clause can appear on the directive.
+ // OpenMP [2.9.2, taskloop Construct, Restrictions]
+ // At most one num_tasks clause can appear on the directive.
+ if (!FirstClause) {
+ Diag(Tok, diag::err_omp_more_one_clause)
+ << getOpenMPDirectiveName(DKind) << getOpenMPClauseName(CKind) << 0;
+ ErrorFound = true;
+ }
+
+ if (CKind == OMPC_ordered && PP.LookAhead(/*N=*/0).isNot(tok::l_paren))
+ Clause = ParseOpenMPClause(CKind);
+ else
+ Clause = ParseOpenMPSingleExprClause(CKind);
+ break;
+ case OMPC_default:
+ case OMPC_proc_bind:
+ // OpenMP [2.14.3.1, Restrictions]
+ // Only a single default clause may be specified on a parallel, task or
+ // teams directive.
+ // OpenMP [2.5, parallel Construct, Restrictions]
+ // At most one proc_bind clause can appear on the directive.
+ if (!FirstClause) {
+ Diag(Tok, diag::err_omp_more_one_clause)
+ << getOpenMPDirectiveName(DKind) << getOpenMPClauseName(CKind) << 0;
+ ErrorFound = true;
+ }
+
+ Clause = ParseOpenMPSimpleClause(CKind);
+ break;
+ case OMPC_schedule:
+ // OpenMP [2.7.1, Restrictions, p. 3]
+ // Only one schedule clause can appear on a loop directive.
+ if (!FirstClause) {
+ Diag(Tok, diag::err_omp_more_one_clause)
+ << getOpenMPDirectiveName(DKind) << getOpenMPClauseName(CKind) << 0;
+ ErrorFound = true;
+ }
+
+ case OMPC_if:
+ Clause = ParseOpenMPSingleExprWithArgClause(CKind);
+ break;
+ case OMPC_nowait:
+ case OMPC_untied:
+ case OMPC_mergeable:
+ case OMPC_read:
+ case OMPC_write:
+ case OMPC_update:
+ case OMPC_capture:
+ case OMPC_seq_cst:
+ case OMPC_threads:
+ case OMPC_simd:
+ case OMPC_nogroup:
+ // OpenMP [2.7.1, Restrictions, p. 9]
+ // Only one ordered clause can appear on a loop directive.
+ // OpenMP [2.7.1, Restrictions, C/C++, p. 4]
+ // Only one nowait clause can appear on a for directive.
+ if (!FirstClause) {
+ Diag(Tok, diag::err_omp_more_one_clause)
+ << getOpenMPDirectiveName(DKind) << getOpenMPClauseName(CKind) << 0;
+ ErrorFound = true;
+ }
+
+ Clause = ParseOpenMPClause(CKind);
+ break;
+ case OMPC_private:
+ case OMPC_firstprivate:
+ case OMPC_lastprivate:
+ case OMPC_shared:
+ case OMPC_reduction:
+ case OMPC_linear:
+ case OMPC_aligned:
+ case OMPC_copyin:
+ case OMPC_copyprivate:
+ case OMPC_flush:
+ case OMPC_depend:
+ case OMPC_map:
+ Clause = ParseOpenMPVarListClause(DKind, CKind);
+ break;
+ case OMPC_unknown:
+ Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
+ << getOpenMPDirectiveName(DKind);
+ SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
+ break;
+ case OMPC_threadprivate:
+ Diag(Tok, diag::err_omp_unexpected_clause) << getOpenMPClauseName(CKind)
+ << getOpenMPDirectiveName(DKind);
+ SkipUntil(tok::comma, tok::annot_pragma_openmp_end, StopBeforeMatch);
+ break;
+ }
+ return ErrorFound ? nullptr : Clause;
+}
+
+/// \brief Parsing of OpenMP clauses with single expressions like 'final',
+/// 'collapse', 'safelen', 'num_threads', 'simdlen', 'num_teams',
+/// 'thread_limit', 'simdlen', 'priority', 'grainsize', 'num_tasks' or 'hint'.
+///
+/// final-clause:
+/// 'final' '(' expression ')'
+///
+/// num_threads-clause:
+/// 'num_threads' '(' expression ')'
+///
+/// safelen-clause:
+/// 'safelen' '(' expression ')'
+///
+/// simdlen-clause:
+/// 'simdlen' '(' expression ')'
+///
+/// collapse-clause:
+/// 'collapse' '(' expression ')'
+///
+/// priority-clause:
+/// 'priority' '(' expression ')'
+///
+/// grainsize-clause:
+/// 'grainsize' '(' expression ')'
+///
+/// num_tasks-clause:
+/// 'num_tasks' '(' expression ')'
+///
+/// hint-clause:
+/// 'hint' '(' expression ')'
+///
+OMPClause *Parser::ParseOpenMPSingleExprClause(OpenMPClauseKind Kind) {
+ SourceLocation Loc = ConsumeToken();
+
+ BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
+ if (T.expectAndConsume(diag::err_expected_lparen_after,
+ getOpenMPClauseName(Kind)))
+ return nullptr;
+
+ SourceLocation ELoc = Tok.getLocation();
+ ExprResult LHS(ParseCastExpression(false, false, NotTypeCast));
+ ExprResult Val(ParseRHSOfBinaryExpression(LHS, prec::Conditional));
+ Val = Actions.ActOnFinishFullExpr(Val.get(), ELoc);
+
+ // Parse ')'.
+ T.consumeClose();
+
+ if (Val.isInvalid())
+ return nullptr;
+
+ return Actions.ActOnOpenMPSingleExprClause(
+ Kind, Val.get(), Loc, T.getOpenLocation(), T.getCloseLocation());
+}
+
+/// \brief Parsing of simple OpenMP clauses like 'default' or 'proc_bind'.
+///
+/// default-clause:
+/// 'default' '(' 'none' | 'shared' ')
+///
+/// proc_bind-clause:
+/// 'proc_bind' '(' 'master' | 'close' | 'spread' ')
+///
+OMPClause *Parser::ParseOpenMPSimpleClause(OpenMPClauseKind Kind) {
+ SourceLocation Loc = Tok.getLocation();
+ SourceLocation LOpen = ConsumeToken();
+ // Parse '('.
+ BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
+ if (T.expectAndConsume(diag::err_expected_lparen_after,
+ getOpenMPClauseName(Kind)))
+ return nullptr;
+
+ unsigned Type = getOpenMPSimpleClauseType(
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok));
+ SourceLocation TypeLoc = Tok.getLocation();
+ if (Tok.isNot(tok::r_paren) && Tok.isNot(tok::comma) &&
+ Tok.isNot(tok::annot_pragma_openmp_end))
+ ConsumeAnyToken();
+
+ // Parse ')'.
+ T.consumeClose();
+
+ return Actions.ActOnOpenMPSimpleClause(Kind, Type, TypeLoc, LOpen, Loc,
+ Tok.getLocation());
+}
+
+/// \brief Parsing of OpenMP clauses like 'ordered'.
+///
+/// ordered-clause:
+/// 'ordered'
+///
+/// nowait-clause:
+/// 'nowait'
+///
+/// untied-clause:
+/// 'untied'
+///
+/// mergeable-clause:
+/// 'mergeable'
+///
+/// read-clause:
+/// 'read'
+///
+/// threads-clause:
+/// 'threads'
+///
+/// simd-clause:
+/// 'simd'
+///
+/// nogroup-clause:
+/// 'nogroup'
+///
+OMPClause *Parser::ParseOpenMPClause(OpenMPClauseKind Kind) {
+ SourceLocation Loc = Tok.getLocation();
+ ConsumeAnyToken();
+
+ return Actions.ActOnOpenMPClause(Kind, Loc, Tok.getLocation());
+}
+
+
+/// \brief Parsing of OpenMP clauses with single expressions and some additional
+/// argument like 'schedule' or 'dist_schedule'.
+///
+/// schedule-clause:
+/// 'schedule' '(' [ modifier [ ',' modifier ] ':' ] kind [',' expression ]
+/// ')'
+///
+/// if-clause:
+/// 'if' '(' [ directive-name-modifier ':' ] expression ')'
+///
+OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind) {
+ SourceLocation Loc = ConsumeToken();
+ SourceLocation DelimLoc;
+ // Parse '('.
+ BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
+ if (T.expectAndConsume(diag::err_expected_lparen_after,
+ getOpenMPClauseName(Kind)))
+ return nullptr;
+
+ ExprResult Val;
+ SmallVector<unsigned, 4> Arg;
+ SmallVector<SourceLocation, 4> KLoc;
+ if (Kind == OMPC_schedule) {
+ enum { Modifier1, Modifier2, ScheduleKind, NumberOfElements };
+ Arg.resize(NumberOfElements);
+ KLoc.resize(NumberOfElements);
+ Arg[Modifier1] = OMPC_SCHEDULE_MODIFIER_unknown;
+ Arg[Modifier2] = OMPC_SCHEDULE_MODIFIER_unknown;
+ Arg[ScheduleKind] = OMPC_SCHEDULE_unknown;
+ auto KindModifier = getOpenMPSimpleClauseType(
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok));
+ if (KindModifier > OMPC_SCHEDULE_unknown) {
+ // Parse 'modifier'
+ Arg[Modifier1] = KindModifier;
+ KLoc[Modifier1] = Tok.getLocation();
+ if (Tok.isNot(tok::r_paren) && Tok.isNot(tok::comma) &&
+ Tok.isNot(tok::annot_pragma_openmp_end))
+ ConsumeAnyToken();
+ if (Tok.is(tok::comma)) {
+ // Parse ',' 'modifier'
+ ConsumeAnyToken();
+ KindModifier = getOpenMPSimpleClauseType(
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok));
+ Arg[Modifier2] = KindModifier > OMPC_SCHEDULE_unknown
+ ? KindModifier
+ : (unsigned)OMPC_SCHEDULE_unknown;
+ KLoc[Modifier2] = Tok.getLocation();
+ if (Tok.isNot(tok::r_paren) && Tok.isNot(tok::comma) &&
+ Tok.isNot(tok::annot_pragma_openmp_end))
+ ConsumeAnyToken();
+ }
+ // Parse ':'
+ if (Tok.is(tok::colon))
+ ConsumeAnyToken();
+ else
+ Diag(Tok, diag::warn_pragma_expected_colon) << "schedule modifier";
+ KindModifier = getOpenMPSimpleClauseType(
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok));
+ }
+ Arg[ScheduleKind] = KindModifier;
+ KLoc[ScheduleKind] = Tok.getLocation();
+ if (Tok.isNot(tok::r_paren) && Tok.isNot(tok::comma) &&
+ Tok.isNot(tok::annot_pragma_openmp_end))
+ ConsumeAnyToken();
+ if ((Arg[ScheduleKind] == OMPC_SCHEDULE_static ||
+ Arg[ScheduleKind] == OMPC_SCHEDULE_dynamic ||
+ Arg[ScheduleKind] == OMPC_SCHEDULE_guided) &&
+ Tok.is(tok::comma))
+ DelimLoc = ConsumeAnyToken();
+ } else {
+ assert(Kind == OMPC_if);
+ KLoc.push_back(Tok.getLocation());
+ Arg.push_back(ParseOpenMPDirectiveKind(*this));
+ if (Arg.back() != OMPD_unknown) {
+ ConsumeToken();
+ if (Tok.is(tok::colon))
+ DelimLoc = ConsumeToken();
+ else
+ Diag(Tok, diag::warn_pragma_expected_colon)
+ << "directive name modifier";
+ }
+ }
+
+ bool NeedAnExpression =
+ (Kind == OMPC_schedule && DelimLoc.isValid()) || Kind == OMPC_if;
+ if (NeedAnExpression) {
+ SourceLocation ELoc = Tok.getLocation();
+ ExprResult LHS(ParseCastExpression(false, false, NotTypeCast));
+ Val = ParseRHSOfBinaryExpression(LHS, prec::Conditional);
+ Val = Actions.ActOnFinishFullExpr(Val.get(), ELoc);
+ }
+
+ // Parse ')'.
+ T.consumeClose();
+
+ if (NeedAnExpression && Val.isInvalid())
+ return nullptr;
+
+ return Actions.ActOnOpenMPSingleExprWithArgClause(
+ Kind, Arg, Val.get(), Loc, T.getOpenLocation(), KLoc, DelimLoc,
+ T.getCloseLocation());
+}
+
+static bool ParseReductionId(Parser &P, CXXScopeSpec &ReductionIdScopeSpec,
+ UnqualifiedId &ReductionId) {
+ SourceLocation TemplateKWLoc;
+ if (ReductionIdScopeSpec.isEmpty()) {
+ auto OOK = OO_None;
+ switch (P.getCurToken().getKind()) {
+ case tok::plus:
+ OOK = OO_Plus;
+ break;
+ case tok::minus:
+ OOK = OO_Minus;
+ break;
+ case tok::star:
+ OOK = OO_Star;
+ break;
+ case tok::amp:
+ OOK = OO_Amp;
+ break;
+ case tok::pipe:
+ OOK = OO_Pipe;
+ break;
+ case tok::caret:
+ OOK = OO_Caret;
+ break;
+ case tok::ampamp:
+ OOK = OO_AmpAmp;
+ break;
+ case tok::pipepipe:
+ OOK = OO_PipePipe;
+ break;
+ default:
+ break;
+ }
+ if (OOK != OO_None) {
+ SourceLocation OpLoc = P.ConsumeToken();
+ SourceLocation SymbolLocations[] = {OpLoc, OpLoc, SourceLocation()};
+ ReductionId.setOperatorFunctionId(OpLoc, OOK, SymbolLocations);
+ return false;
+ }
+ }
+ return P.ParseUnqualifiedId(ReductionIdScopeSpec, /*EnteringContext*/ false,
+ /*AllowDestructorName*/ false,
+ /*AllowConstructorName*/ false, ParsedType(),
+ TemplateKWLoc, ReductionId);
+}
+
+/// \brief Parsing of OpenMP clause 'private', 'firstprivate', 'lastprivate',
+/// 'shared', 'copyin', 'copyprivate', 'flush' or 'reduction'.
+///
+/// private-clause:
+/// 'private' '(' list ')'
+/// firstprivate-clause:
+/// 'firstprivate' '(' list ')'
+/// lastprivate-clause:
+/// 'lastprivate' '(' list ')'
+/// shared-clause:
+/// 'shared' '(' list ')'
+/// linear-clause:
+/// 'linear' '(' linear-list [ ':' linear-step ] ')'
+/// aligned-clause:
+/// 'aligned' '(' list [ ':' alignment ] ')'
+/// reduction-clause:
+/// 'reduction' '(' reduction-identifier ':' list ')'
+/// copyprivate-clause:
+/// 'copyprivate' '(' list ')'
+/// flush-clause:
+/// 'flush' '(' list ')'
+/// depend-clause:
+/// 'depend' '(' in | out | inout : list | source ')'
+/// map-clause:
+/// 'map' '(' [ [ always , ]
+/// to | from | tofrom | alloc | release | delete ':' ] list ')';
+///
+/// For 'linear' clause linear-list may have the following forms:
+/// list
+/// modifier(list)
+/// where modifier is 'val' (C) or 'ref', 'val' or 'uval'(C++).
+OMPClause *Parser::ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
+ OpenMPClauseKind Kind) {
+ SourceLocation Loc = Tok.getLocation();
+ SourceLocation LOpen = ConsumeToken();
+ SourceLocation ColonLoc = SourceLocation();
+ // Optional scope specifier and unqualified id for reduction identifier.
+ CXXScopeSpec ReductionIdScopeSpec;
+ UnqualifiedId ReductionId;
+ bool InvalidReductionId = false;
+ OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
+ // OpenMP 4.1 [2.15.3.7, linear Clause]
+ // If no modifier is specified it is assumed to be val.
+ OpenMPLinearClauseKind LinearModifier = OMPC_LINEAR_val;
+ OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
+ OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown;
+ bool MapTypeModifierSpecified = false;
+ bool UnexpectedId = false;
+ SourceLocation DepLinMapLoc;
+
+ // Parse '('.
+ BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
+ if (T.expectAndConsume(diag::err_expected_lparen_after,
+ getOpenMPClauseName(Kind)))
+ return nullptr;
+
+ bool NeedRParenForLinear = false;
+ BalancedDelimiterTracker LinearT(*this, tok::l_paren,
+ tok::annot_pragma_openmp_end);
+ // Handle reduction-identifier for reduction clause.
+ if (Kind == OMPC_reduction) {
+ ColonProtectionRAIIObject ColonRAII(*this);
+ if (getLangOpts().CPlusPlus) {
+ ParseOptionalCXXScopeSpecifier(ReductionIdScopeSpec, ParsedType(), false);
+ }
+ InvalidReductionId =
+ ParseReductionId(*this, ReductionIdScopeSpec, ReductionId);
+ if (InvalidReductionId) {
+ SkipUntil(tok::colon, tok::r_paren, tok::annot_pragma_openmp_end,
+ StopBeforeMatch);
+ }
+ if (Tok.is(tok::colon)) {
+ ColonLoc = ConsumeToken();
+ } else {
+ Diag(Tok, diag::warn_pragma_expected_colon) << "reduction identifier";
+ }
+ } else if (Kind == OMPC_depend) {
+ // Handle dependency type for depend clause.
+ ColonProtectionRAIIObject ColonRAII(*this);
+ DepKind = static_cast<OpenMPDependClauseKind>(getOpenMPSimpleClauseType(
+ Kind, Tok.is(tok::identifier) ? PP.getSpelling(Tok) : ""));
+ DepLinMapLoc = Tok.getLocation();
+
+ if (DepKind == OMPC_DEPEND_unknown) {
+ SkipUntil(tok::colon, tok::r_paren, tok::annot_pragma_openmp_end,
+ StopBeforeMatch);
+ } else {
+ ConsumeToken();
+ // Special processing for depend(source) clause.
+ if (DKind == OMPD_ordered && DepKind == OMPC_DEPEND_source) {
+ // Parse ')'.
+ T.consumeClose();
+ return Actions.ActOnOpenMPVarListClause(
+ Kind, llvm::None, /*TailExpr=*/nullptr, Loc, LOpen,
+ /*ColonLoc=*/SourceLocation(), Tok.getLocation(),
+ ReductionIdScopeSpec, DeclarationNameInfo(), DepKind,
+ LinearModifier, MapTypeModifier, MapType, DepLinMapLoc);
+ }
+ }
+ if (Tok.is(tok::colon)) {
+ ColonLoc = ConsumeToken();
+ } else {
+ Diag(Tok, DKind == OMPD_ordered ? diag::warn_pragma_expected_colon_r_paren
+ : diag::warn_pragma_expected_colon)
+ << "dependency type";
+ }
+ } else if (Kind == OMPC_linear) {
+ // Try to parse modifier if any.
+ if (Tok.is(tok::identifier) && PP.LookAhead(0).is(tok::l_paren)) {
+ LinearModifier = static_cast<OpenMPLinearClauseKind>(
+ getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok)));
+ DepLinMapLoc = ConsumeToken();
+ LinearT.consumeOpen();
+ NeedRParenForLinear = true;
+ }
+ } else if (Kind == OMPC_map) {
+ // Handle map type for map clause.
+ ColonProtectionRAIIObject ColonRAII(*this);
+
+ // the first identifier may be a list item, a map-type or
+ // a map-type-modifier
+ MapType = static_cast<OpenMPMapClauseKind>(getOpenMPSimpleClauseType(
+ Kind, Tok.is(tok::identifier) ? PP.getSpelling(Tok) : ""));
+ DepLinMapLoc = Tok.getLocation();
+ bool ColonExpected = false;
+
+ if (Tok.is(tok::identifier)) {
+ if (PP.LookAhead(0).is(tok::colon)) {
+ MapType = static_cast<OpenMPMapClauseKind>(getOpenMPSimpleClauseType(
+ Kind, Tok.is(tok::identifier) ? PP.getSpelling(Tok) : ""));
+ if (MapType == OMPC_MAP_unknown) {
+ Diag(Tok, diag::err_omp_unknown_map_type);
+ } else if (MapType == OMPC_MAP_always) {
+ Diag(Tok, diag::err_omp_map_type_missing);
+ }
+ ConsumeToken();
+ } else if (PP.LookAhead(0).is(tok::comma)) {
+ if (PP.LookAhead(1).is(tok::identifier) &&
+ PP.LookAhead(2).is(tok::colon)) {
+ MapTypeModifier =
+ static_cast<OpenMPMapClauseKind>(getOpenMPSimpleClauseType(
+ Kind, Tok.is(tok::identifier) ? PP.getSpelling(Tok) : ""));
+ if (MapTypeModifier != OMPC_MAP_always) {
+ Diag(Tok, diag::err_omp_unknown_map_type_modifier);
+ MapTypeModifier = OMPC_MAP_unknown;
+ } else {
+ MapTypeModifierSpecified = true;
+ }
+
+ ConsumeToken();
+ ConsumeToken();
+
+ MapType = static_cast<OpenMPMapClauseKind>(getOpenMPSimpleClauseType(
+ Kind, Tok.is(tok::identifier) ? PP.getSpelling(Tok) : ""));
+ if (MapType == OMPC_MAP_unknown || MapType == OMPC_MAP_always) {
+ Diag(Tok, diag::err_omp_unknown_map_type);
+ }
+ ConsumeToken();
+ } else {
+ MapType = OMPC_MAP_tofrom;
+ }
+ } else {
+ MapType = OMPC_MAP_tofrom;
+ }
+ } else {
+ UnexpectedId = true;
+ }
+
+ if (Tok.is(tok::colon)) {
+ ColonLoc = ConsumeToken();
+ } else if (ColonExpected) {
+ Diag(Tok, diag::warn_pragma_expected_colon) << "map type";
+ }
+ }
+
+ SmallVector<Expr *, 5> Vars;
+ bool IsComma =
+ ((Kind != OMPC_reduction) && (Kind != OMPC_depend) &&
+ (Kind != OMPC_map)) ||
+ ((Kind == OMPC_reduction) && !InvalidReductionId) ||
+ ((Kind == OMPC_map) && (UnexpectedId || MapType != OMPC_MAP_unknown) &&
+ (!MapTypeModifierSpecified ||
+ (MapTypeModifierSpecified && MapTypeModifier == OMPC_MAP_always))) ||
+ ((Kind == OMPC_depend) && DepKind != OMPC_DEPEND_unknown);
+ const bool MayHaveTail = (Kind == OMPC_linear || Kind == OMPC_aligned);
+ while (IsComma || (Tok.isNot(tok::r_paren) && Tok.isNot(tok::colon) &&
+ Tok.isNot(tok::annot_pragma_openmp_end))) {
+ ColonProtectionRAIIObject ColonRAII(*this, MayHaveTail);
+ // Parse variable
+ ExprResult VarExpr =
+ Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression());
+ if (VarExpr.isUsable()) {
+ Vars.push_back(VarExpr.get());
+ } else {
+ SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
+ StopBeforeMatch);
+ }
+ // Skip ',' if any
+ IsComma = Tok.is(tok::comma);
+ if (IsComma)
+ ConsumeToken();
+ else if (Tok.isNot(tok::r_paren) &&
+ Tok.isNot(tok::annot_pragma_openmp_end) &&
+ (!MayHaveTail || Tok.isNot(tok::colon)))
+ Diag(Tok, diag::err_omp_expected_punc)
+ << ((Kind == OMPC_flush) ? getOpenMPDirectiveName(OMPD_flush)
+ : getOpenMPClauseName(Kind))
+ << (Kind == OMPC_flush);
+ }
+
+ // Parse ')' for linear clause with modifier.
+ if (NeedRParenForLinear)
+ LinearT.consumeClose();
+
+ // Parse ':' linear-step (or ':' alignment).
+ Expr *TailExpr = nullptr;
+ const bool MustHaveTail = MayHaveTail && Tok.is(tok::colon);
+ if (MustHaveTail) {
+ ColonLoc = Tok.getLocation();
+ SourceLocation ELoc = ConsumeToken();
+ ExprResult Tail = ParseAssignmentExpression();
+ Tail = Actions.ActOnFinishFullExpr(Tail.get(), ELoc);
+ if (Tail.isUsable())
+ TailExpr = Tail.get();
+ else
+ SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
+ StopBeforeMatch);
+ }
+
+ // Parse ')'.
+ T.consumeClose();
+ if ((Kind == OMPC_depend && DepKind != OMPC_DEPEND_unknown && Vars.empty()) ||
+ (Kind != OMPC_depend && Vars.empty()) || (MustHaveTail && !TailExpr) ||
+ (Kind == OMPC_map && MapType == OMPC_MAP_unknown) ||
+ InvalidReductionId) {
+ return nullptr;
+ }
+
+ return Actions.ActOnOpenMPVarListClause(
+ Kind, Vars, TailExpr, Loc, LOpen, ColonLoc, Tok.getLocation(),
+ ReductionIdScopeSpec,
+ ReductionId.isValid() ? Actions.GetNameFromUnqualifiedId(ReductionId)
+ : DeclarationNameInfo(),
+ DepKind, LinearModifier, MapTypeModifier, MapType, DepLinMapLoc);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp
new file mode 100644
index 0000000..4430eb8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp
@@ -0,0 +1,2130 @@
+//===--- ParsePragma.cpp - Language specific pragma parsing ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the language specific #pragma handlers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RAIIObjectsForParser.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/Parser.h"
+#include "clang/Sema/LoopHint.h"
+#include "clang/Sema/Scope.h"
+#include "llvm/ADT/StringSwitch.h"
+using namespace clang;
+
+namespace {
+
+struct PragmaAlignHandler : public PragmaHandler {
+ explicit PragmaAlignHandler() : PragmaHandler("align") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken) override;
+};
+
+struct PragmaGCCVisibilityHandler : public PragmaHandler {
+ explicit PragmaGCCVisibilityHandler() : PragmaHandler("visibility") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken) override;
+};
+
+struct PragmaOptionsHandler : public PragmaHandler {
+ explicit PragmaOptionsHandler() : PragmaHandler("options") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken) override;
+};
+
+struct PragmaPackHandler : public PragmaHandler {
+ explicit PragmaPackHandler() : PragmaHandler("pack") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken) override;
+};
+
+struct PragmaMSStructHandler : public PragmaHandler {
+ explicit PragmaMSStructHandler() : PragmaHandler("ms_struct") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken) override;
+};
+
+struct PragmaUnusedHandler : public PragmaHandler {
+ PragmaUnusedHandler() : PragmaHandler("unused") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken) override;
+};
+
+struct PragmaWeakHandler : public PragmaHandler {
+ explicit PragmaWeakHandler() : PragmaHandler("weak") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken) override;
+};
+
+struct PragmaRedefineExtnameHandler : public PragmaHandler {
+ explicit PragmaRedefineExtnameHandler() : PragmaHandler("redefine_extname") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken) override;
+};
+
+struct PragmaOpenCLExtensionHandler : public PragmaHandler {
+ PragmaOpenCLExtensionHandler() : PragmaHandler("EXTENSION") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken) override;
+};
+
+
+struct PragmaFPContractHandler : public PragmaHandler {
+ PragmaFPContractHandler() : PragmaHandler("FP_CONTRACT") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken) override;
+};
+
+struct PragmaNoOpenMPHandler : public PragmaHandler {
+ PragmaNoOpenMPHandler() : PragmaHandler("omp") { }
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken) override;
+};
+
+struct PragmaOpenMPHandler : public PragmaHandler {
+ PragmaOpenMPHandler() : PragmaHandler("omp") { }
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken) override;
+};
+
+/// PragmaCommentHandler - "\#pragma comment ...".
+struct PragmaCommentHandler : public PragmaHandler {
+ PragmaCommentHandler(Sema &Actions)
+ : PragmaHandler("comment"), Actions(Actions) {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken) override;
+private:
+ Sema &Actions;
+};
+
+struct PragmaDetectMismatchHandler : public PragmaHandler {
+ PragmaDetectMismatchHandler(Sema &Actions)
+ : PragmaHandler("detect_mismatch"), Actions(Actions) {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken) override;
+private:
+ Sema &Actions;
+};
+
+struct PragmaMSPointersToMembers : public PragmaHandler {
+ explicit PragmaMSPointersToMembers() : PragmaHandler("pointers_to_members") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken) override;
+};
+
+struct PragmaMSVtorDisp : public PragmaHandler {
+ explicit PragmaMSVtorDisp() : PragmaHandler("vtordisp") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken) override;
+};
+
+struct PragmaMSPragma : public PragmaHandler {
+ explicit PragmaMSPragma(const char *name) : PragmaHandler(name) {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken) override;
+};
+
+/// PragmaOptimizeHandler - "\#pragma clang optimize on/off".
+struct PragmaOptimizeHandler : public PragmaHandler {
+ PragmaOptimizeHandler(Sema &S)
+ : PragmaHandler("optimize"), Actions(S) {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken) override;
+private:
+ Sema &Actions;
+};
+
+struct PragmaLoopHintHandler : public PragmaHandler {
+ PragmaLoopHintHandler() : PragmaHandler("loop") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken) override;
+};
+
+struct PragmaUnrollHintHandler : public PragmaHandler {
+ PragmaUnrollHintHandler(const char *name) : PragmaHandler(name) {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken) override;
+};
+
+struct PragmaMSRuntimeChecksHandler : public EmptyPragmaHandler {
+ PragmaMSRuntimeChecksHandler() : EmptyPragmaHandler("runtime_checks") {}
+};
+
+} // end namespace
+
+void Parser::initializePragmaHandlers() {
+ AlignHandler.reset(new PragmaAlignHandler());
+ PP.AddPragmaHandler(AlignHandler.get());
+
+ GCCVisibilityHandler.reset(new PragmaGCCVisibilityHandler());
+ PP.AddPragmaHandler("GCC", GCCVisibilityHandler.get());
+
+ OptionsHandler.reset(new PragmaOptionsHandler());
+ PP.AddPragmaHandler(OptionsHandler.get());
+
+ PackHandler.reset(new PragmaPackHandler());
+ PP.AddPragmaHandler(PackHandler.get());
+
+ MSStructHandler.reset(new PragmaMSStructHandler());
+ PP.AddPragmaHandler(MSStructHandler.get());
+
+ UnusedHandler.reset(new PragmaUnusedHandler());
+ PP.AddPragmaHandler(UnusedHandler.get());
+
+ WeakHandler.reset(new PragmaWeakHandler());
+ PP.AddPragmaHandler(WeakHandler.get());
+
+ RedefineExtnameHandler.reset(new PragmaRedefineExtnameHandler());
+ PP.AddPragmaHandler(RedefineExtnameHandler.get());
+
+ FPContractHandler.reset(new PragmaFPContractHandler());
+ PP.AddPragmaHandler("STDC", FPContractHandler.get());
+
+ if (getLangOpts().OpenCL) {
+ OpenCLExtensionHandler.reset(new PragmaOpenCLExtensionHandler());
+ PP.AddPragmaHandler("OPENCL", OpenCLExtensionHandler.get());
+
+ PP.AddPragmaHandler("OPENCL", FPContractHandler.get());
+ }
+ if (getLangOpts().OpenMP)
+ OpenMPHandler.reset(new PragmaOpenMPHandler());
+ else
+ OpenMPHandler.reset(new PragmaNoOpenMPHandler());
+ PP.AddPragmaHandler(OpenMPHandler.get());
+
+ if (getLangOpts().MicrosoftExt || getTargetInfo().getTriple().isPS4()) {
+ MSCommentHandler.reset(new PragmaCommentHandler(Actions));
+ PP.AddPragmaHandler(MSCommentHandler.get());
+ }
+
+ if (getLangOpts().MicrosoftExt) {
+ MSDetectMismatchHandler.reset(new PragmaDetectMismatchHandler(Actions));
+ PP.AddPragmaHandler(MSDetectMismatchHandler.get());
+ MSPointersToMembers.reset(new PragmaMSPointersToMembers());
+ PP.AddPragmaHandler(MSPointersToMembers.get());
+ MSVtorDisp.reset(new PragmaMSVtorDisp());
+ PP.AddPragmaHandler(MSVtorDisp.get());
+ MSInitSeg.reset(new PragmaMSPragma("init_seg"));
+ PP.AddPragmaHandler(MSInitSeg.get());
+ MSDataSeg.reset(new PragmaMSPragma("data_seg"));
+ PP.AddPragmaHandler(MSDataSeg.get());
+ MSBSSSeg.reset(new PragmaMSPragma("bss_seg"));
+ PP.AddPragmaHandler(MSBSSSeg.get());
+ MSConstSeg.reset(new PragmaMSPragma("const_seg"));
+ PP.AddPragmaHandler(MSConstSeg.get());
+ MSCodeSeg.reset(new PragmaMSPragma("code_seg"));
+ PP.AddPragmaHandler(MSCodeSeg.get());
+ MSSection.reset(new PragmaMSPragma("section"));
+ PP.AddPragmaHandler(MSSection.get());
+ MSRuntimeChecks.reset(new PragmaMSRuntimeChecksHandler());
+ PP.AddPragmaHandler(MSRuntimeChecks.get());
+ }
+
+ OptimizeHandler.reset(new PragmaOptimizeHandler(Actions));
+ PP.AddPragmaHandler("clang", OptimizeHandler.get());
+
+ LoopHintHandler.reset(new PragmaLoopHintHandler());
+ PP.AddPragmaHandler("clang", LoopHintHandler.get());
+
+ UnrollHintHandler.reset(new PragmaUnrollHintHandler("unroll"));
+ PP.AddPragmaHandler(UnrollHintHandler.get());
+
+ NoUnrollHintHandler.reset(new PragmaUnrollHintHandler("nounroll"));
+ PP.AddPragmaHandler(NoUnrollHintHandler.get());
+}
+
+void Parser::resetPragmaHandlers() {
+ // Remove the pragma handlers we installed.
+ PP.RemovePragmaHandler(AlignHandler.get());
+ AlignHandler.reset();
+ PP.RemovePragmaHandler("GCC", GCCVisibilityHandler.get());
+ GCCVisibilityHandler.reset();
+ PP.RemovePragmaHandler(OptionsHandler.get());
+ OptionsHandler.reset();
+ PP.RemovePragmaHandler(PackHandler.get());
+ PackHandler.reset();
+ PP.RemovePragmaHandler(MSStructHandler.get());
+ MSStructHandler.reset();
+ PP.RemovePragmaHandler(UnusedHandler.get());
+ UnusedHandler.reset();
+ PP.RemovePragmaHandler(WeakHandler.get());
+ WeakHandler.reset();
+ PP.RemovePragmaHandler(RedefineExtnameHandler.get());
+ RedefineExtnameHandler.reset();
+
+ if (getLangOpts().OpenCL) {
+ PP.RemovePragmaHandler("OPENCL", OpenCLExtensionHandler.get());
+ OpenCLExtensionHandler.reset();
+ PP.RemovePragmaHandler("OPENCL", FPContractHandler.get());
+ }
+ PP.RemovePragmaHandler(OpenMPHandler.get());
+ OpenMPHandler.reset();
+
+ if (getLangOpts().MicrosoftExt || getTargetInfo().getTriple().isPS4()) {
+ PP.RemovePragmaHandler(MSCommentHandler.get());
+ MSCommentHandler.reset();
+ }
+
+ if (getLangOpts().MicrosoftExt) {
+ PP.RemovePragmaHandler(MSDetectMismatchHandler.get());
+ MSDetectMismatchHandler.reset();
+ PP.RemovePragmaHandler(MSPointersToMembers.get());
+ MSPointersToMembers.reset();
+ PP.RemovePragmaHandler(MSVtorDisp.get());
+ MSVtorDisp.reset();
+ PP.RemovePragmaHandler(MSInitSeg.get());
+ MSInitSeg.reset();
+ PP.RemovePragmaHandler(MSDataSeg.get());
+ MSDataSeg.reset();
+ PP.RemovePragmaHandler(MSBSSSeg.get());
+ MSBSSSeg.reset();
+ PP.RemovePragmaHandler(MSConstSeg.get());
+ MSConstSeg.reset();
+ PP.RemovePragmaHandler(MSCodeSeg.get());
+ MSCodeSeg.reset();
+ PP.RemovePragmaHandler(MSSection.get());
+ MSSection.reset();
+ PP.RemovePragmaHandler(MSRuntimeChecks.get());
+ MSRuntimeChecks.reset();
+ }
+
+ PP.RemovePragmaHandler("STDC", FPContractHandler.get());
+ FPContractHandler.reset();
+
+ PP.RemovePragmaHandler("clang", OptimizeHandler.get());
+ OptimizeHandler.reset();
+
+ PP.RemovePragmaHandler("clang", LoopHintHandler.get());
+ LoopHintHandler.reset();
+
+ PP.RemovePragmaHandler(UnrollHintHandler.get());
+ UnrollHintHandler.reset();
+
+ PP.RemovePragmaHandler(NoUnrollHintHandler.get());
+ NoUnrollHintHandler.reset();
+}
+
+/// \brief Handle the annotation token produced for #pragma unused(...)
+///
+/// Each annot_pragma_unused is followed by the argument token so e.g.
+/// "#pragma unused(x,y)" becomes:
+/// annot_pragma_unused 'x' annot_pragma_unused 'y'
+void Parser::HandlePragmaUnused() {
+ assert(Tok.is(tok::annot_pragma_unused));
+ SourceLocation UnusedLoc = ConsumeToken();
+ Actions.ActOnPragmaUnused(Tok, getCurScope(), UnusedLoc);
+ ConsumeToken(); // The argument token.
+}
+
+void Parser::HandlePragmaVisibility() {
+ assert(Tok.is(tok::annot_pragma_vis));
+ const IdentifierInfo *VisType =
+ static_cast<IdentifierInfo *>(Tok.getAnnotationValue());
+ SourceLocation VisLoc = ConsumeToken();
+ Actions.ActOnPragmaVisibility(VisType, VisLoc);
+}
+
+namespace {
+struct PragmaPackInfo {
+ Sema::PragmaPackKind Kind;
+ IdentifierInfo *Name;
+ Token Alignment;
+ SourceLocation LParenLoc;
+ SourceLocation RParenLoc;
+};
+} // end anonymous namespace
+
+void Parser::HandlePragmaPack() {
+ assert(Tok.is(tok::annot_pragma_pack));
+ PragmaPackInfo *Info =
+ static_cast<PragmaPackInfo *>(Tok.getAnnotationValue());
+ SourceLocation PragmaLoc = ConsumeToken();
+ ExprResult Alignment;
+ if (Info->Alignment.is(tok::numeric_constant)) {
+ Alignment = Actions.ActOnNumericConstant(Info->Alignment);
+ if (Alignment.isInvalid())
+ return;
+ }
+ Actions.ActOnPragmaPack(Info->Kind, Info->Name, Alignment.get(), PragmaLoc,
+ Info->LParenLoc, Info->RParenLoc);
+}
+
+void Parser::HandlePragmaMSStruct() {
+ assert(Tok.is(tok::annot_pragma_msstruct));
+ Sema::PragmaMSStructKind Kind =
+ static_cast<Sema::PragmaMSStructKind>(
+ reinterpret_cast<uintptr_t>(Tok.getAnnotationValue()));
+ Actions.ActOnPragmaMSStruct(Kind);
+ ConsumeToken(); // The annotation token.
+}
+
+void Parser::HandlePragmaAlign() {
+ assert(Tok.is(tok::annot_pragma_align));
+ Sema::PragmaOptionsAlignKind Kind =
+ static_cast<Sema::PragmaOptionsAlignKind>(
+ reinterpret_cast<uintptr_t>(Tok.getAnnotationValue()));
+ SourceLocation PragmaLoc = ConsumeToken();
+ Actions.ActOnPragmaOptionsAlign(Kind, PragmaLoc);
+}
+
+void Parser::HandlePragmaWeak() {
+ assert(Tok.is(tok::annot_pragma_weak));
+ SourceLocation PragmaLoc = ConsumeToken();
+ Actions.ActOnPragmaWeakID(Tok.getIdentifierInfo(), PragmaLoc,
+ Tok.getLocation());
+ ConsumeToken(); // The weak name.
+}
+
+void Parser::HandlePragmaWeakAlias() {
+ assert(Tok.is(tok::annot_pragma_weakalias));
+ SourceLocation PragmaLoc = ConsumeToken();
+ IdentifierInfo *WeakName = Tok.getIdentifierInfo();
+ SourceLocation WeakNameLoc = Tok.getLocation();
+ ConsumeToken();
+ IdentifierInfo *AliasName = Tok.getIdentifierInfo();
+ SourceLocation AliasNameLoc = Tok.getLocation();
+ ConsumeToken();
+ Actions.ActOnPragmaWeakAlias(WeakName, AliasName, PragmaLoc,
+ WeakNameLoc, AliasNameLoc);
+
+}
+
+void Parser::HandlePragmaRedefineExtname() {
+ assert(Tok.is(tok::annot_pragma_redefine_extname));
+ SourceLocation RedefLoc = ConsumeToken();
+ IdentifierInfo *RedefName = Tok.getIdentifierInfo();
+ SourceLocation RedefNameLoc = Tok.getLocation();
+ ConsumeToken();
+ IdentifierInfo *AliasName = Tok.getIdentifierInfo();
+ SourceLocation AliasNameLoc = Tok.getLocation();
+ ConsumeToken();
+ Actions.ActOnPragmaRedefineExtname(RedefName, AliasName, RedefLoc,
+ RedefNameLoc, AliasNameLoc);
+}
+
+void Parser::HandlePragmaFPContract() {
+ assert(Tok.is(tok::annot_pragma_fp_contract));
+ tok::OnOffSwitch OOS =
+ static_cast<tok::OnOffSwitch>(
+ reinterpret_cast<uintptr_t>(Tok.getAnnotationValue()));
+ Actions.ActOnPragmaFPContract(OOS);
+ ConsumeToken(); // The annotation token.
+}
+
+StmtResult Parser::HandlePragmaCaptured()
+{
+ assert(Tok.is(tok::annot_pragma_captured));
+ ConsumeToken();
+
+ if (Tok.isNot(tok::l_brace)) {
+ PP.Diag(Tok, diag::err_expected) << tok::l_brace;
+ return StmtError();
+ }
+
+ SourceLocation Loc = Tok.getLocation();
+
+ ParseScope CapturedRegionScope(this, Scope::FnScope | Scope::DeclScope);
+ Actions.ActOnCapturedRegionStart(Loc, getCurScope(), CR_Default,
+ /*NumParams=*/1);
+
+ StmtResult R = ParseCompoundStatement();
+ CapturedRegionScope.Exit();
+
+ if (R.isInvalid()) {
+ Actions.ActOnCapturedRegionError();
+ return StmtError();
+ }
+
+ return Actions.ActOnCapturedRegionEnd(R.get());
+}
+
+namespace {
+ typedef llvm::PointerIntPair<IdentifierInfo *, 1, bool> OpenCLExtData;
+}
+
+void Parser::HandlePragmaOpenCLExtension() {
+ assert(Tok.is(tok::annot_pragma_opencl_extension));
+ OpenCLExtData data =
+ OpenCLExtData::getFromOpaqueValue(Tok.getAnnotationValue());
+ unsigned state = data.getInt();
+ IdentifierInfo *ename = data.getPointer();
+ SourceLocation NameLoc = Tok.getLocation();
+ ConsumeToken(); // The annotation token.
+
+ OpenCLOptions &f = Actions.getOpenCLOptions();
+ // OpenCL 1.1 9.1: "The all variant sets the behavior for all extensions,
+ // overriding all previously issued extension directives, but only if the
+ // behavior is set to disable."
+ if (state == 0 && ename->isStr("all")) {
+#define OPENCLEXT(nm) f.nm = 0;
+#include "clang/Basic/OpenCLExtensions.def"
+ }
+#define OPENCLEXT(nm) else if (ename->isStr(#nm)) { f.nm = state; }
+#include "clang/Basic/OpenCLExtensions.def"
+ else {
+ PP.Diag(NameLoc, diag::warn_pragma_unknown_extension) << ename;
+ return;
+ }
+}
+
+void Parser::HandlePragmaMSPointersToMembers() {
+ assert(Tok.is(tok::annot_pragma_ms_pointers_to_members));
+ LangOptions::PragmaMSPointersToMembersKind RepresentationMethod =
+ static_cast<LangOptions::PragmaMSPointersToMembersKind>(
+ reinterpret_cast<uintptr_t>(Tok.getAnnotationValue()));
+ SourceLocation PragmaLoc = ConsumeToken(); // The annotation token.
+ Actions.ActOnPragmaMSPointersToMembers(RepresentationMethod, PragmaLoc);
+}
+
+void Parser::HandlePragmaMSVtorDisp() {
+ assert(Tok.is(tok::annot_pragma_ms_vtordisp));
+ uintptr_t Value = reinterpret_cast<uintptr_t>(Tok.getAnnotationValue());
+ Sema::PragmaVtorDispKind Kind =
+ static_cast<Sema::PragmaVtorDispKind>((Value >> 16) & 0xFFFF);
+ MSVtorDispAttr::Mode Mode = MSVtorDispAttr::Mode(Value & 0xFFFF);
+ SourceLocation PragmaLoc = ConsumeToken(); // The annotation token.
+ Actions.ActOnPragmaMSVtorDisp(Kind, PragmaLoc, Mode);
+}
+
+void Parser::HandlePragmaMSPragma() {
+ assert(Tok.is(tok::annot_pragma_ms_pragma));
+ // Grab the tokens out of the annotation and enter them into the stream.
+ auto TheTokens = (std::pair<Token*, size_t> *)Tok.getAnnotationValue();
+ PP.EnterTokenStream(TheTokens->first, TheTokens->second, true, true);
+ SourceLocation PragmaLocation = ConsumeToken(); // The annotation token.
+ assert(Tok.isAnyIdentifier());
+ StringRef PragmaName = Tok.getIdentifierInfo()->getName();
+ PP.Lex(Tok); // pragma kind
+
+ // Figure out which #pragma we're dealing with. The switch has no default
+ // because lex shouldn't emit the annotation token for unrecognized pragmas.
+ typedef bool (Parser::*PragmaHandler)(StringRef, SourceLocation);
+ PragmaHandler Handler = llvm::StringSwitch<PragmaHandler>(PragmaName)
+ .Case("data_seg", &Parser::HandlePragmaMSSegment)
+ .Case("bss_seg", &Parser::HandlePragmaMSSegment)
+ .Case("const_seg", &Parser::HandlePragmaMSSegment)
+ .Case("code_seg", &Parser::HandlePragmaMSSegment)
+ .Case("section", &Parser::HandlePragmaMSSection)
+ .Case("init_seg", &Parser::HandlePragmaMSInitSeg);
+
+ if (!(this->*Handler)(PragmaName, PragmaLocation)) {
+ // Pragma handling failed, and has been diagnosed. Slurp up the tokens
+ // until eof (really end of line) to prevent follow-on errors.
+ while (Tok.isNot(tok::eof))
+ PP.Lex(Tok);
+ PP.Lex(Tok);
+ }
+}
+
+bool Parser::HandlePragmaMSSection(StringRef PragmaName,
+ SourceLocation PragmaLocation) {
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(PragmaLocation, diag::warn_pragma_expected_lparen) << PragmaName;
+ return false;
+ }
+ PP.Lex(Tok); // (
+ // Parsing code for pragma section
+ if (Tok.isNot(tok::string_literal)) {
+ PP.Diag(PragmaLocation, diag::warn_pragma_expected_section_name)
+ << PragmaName;
+ return false;
+ }
+ ExprResult StringResult = ParseStringLiteralExpression();
+ if (StringResult.isInvalid())
+ return false; // Already diagnosed.
+ StringLiteral *SegmentName = cast<StringLiteral>(StringResult.get());
+ if (SegmentName->getCharByteWidth() != 1) {
+ PP.Diag(PragmaLocation, diag::warn_pragma_expected_non_wide_string)
+ << PragmaName;
+ return false;
+ }
+ int SectionFlags = ASTContext::PSF_Read;
+ bool SectionFlagsAreDefault = true;
+ while (Tok.is(tok::comma)) {
+ PP.Lex(Tok); // ,
+ // Ignore "long" and "short".
+ // They are undocumented, but widely used, section attributes which appear
+ // to do nothing.
+ if (Tok.is(tok::kw_long) || Tok.is(tok::kw_short)) {
+ PP.Lex(Tok); // long/short
+ continue;
+ }
+
+ if (!Tok.isAnyIdentifier()) {
+ PP.Diag(PragmaLocation, diag::warn_pragma_expected_action_or_r_paren)
+ << PragmaName;
+ return false;
+ }
+ ASTContext::PragmaSectionFlag Flag =
+ llvm::StringSwitch<ASTContext::PragmaSectionFlag>(
+ Tok.getIdentifierInfo()->getName())
+ .Case("read", ASTContext::PSF_Read)
+ .Case("write", ASTContext::PSF_Write)
+ .Case("execute", ASTContext::PSF_Execute)
+ .Case("shared", ASTContext::PSF_Invalid)
+ .Case("nopage", ASTContext::PSF_Invalid)
+ .Case("nocache", ASTContext::PSF_Invalid)
+ .Case("discard", ASTContext::PSF_Invalid)
+ .Case("remove", ASTContext::PSF_Invalid)
+ .Default(ASTContext::PSF_None);
+ if (Flag == ASTContext::PSF_None || Flag == ASTContext::PSF_Invalid) {
+ PP.Diag(PragmaLocation, Flag == ASTContext::PSF_None
+ ? diag::warn_pragma_invalid_specific_action
+ : diag::warn_pragma_unsupported_action)
+ << PragmaName << Tok.getIdentifierInfo()->getName();
+ return false;
+ }
+ SectionFlags |= Flag;
+ SectionFlagsAreDefault = false;
+ PP.Lex(Tok); // Identifier
+ }
+ // If no section attributes are specified, the section will be marked as
+ // read/write.
+ if (SectionFlagsAreDefault)
+ SectionFlags |= ASTContext::PSF_Write;
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(PragmaLocation, diag::warn_pragma_expected_rparen) << PragmaName;
+ return false;
+ }
+ PP.Lex(Tok); // )
+ if (Tok.isNot(tok::eof)) {
+ PP.Diag(PragmaLocation, diag::warn_pragma_extra_tokens_at_eol)
+ << PragmaName;
+ return false;
+ }
+ PP.Lex(Tok); // eof
+ Actions.ActOnPragmaMSSection(PragmaLocation, SectionFlags, SegmentName);
+ return true;
+}
+
+bool Parser::HandlePragmaMSSegment(StringRef PragmaName,
+ SourceLocation PragmaLocation) {
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(PragmaLocation, diag::warn_pragma_expected_lparen) << PragmaName;
+ return false;
+ }
+ PP.Lex(Tok); // (
+ Sema::PragmaMsStackAction Action = Sema::PSK_Reset;
+ StringRef SlotLabel;
+ if (Tok.isAnyIdentifier()) {
+ StringRef PushPop = Tok.getIdentifierInfo()->getName();
+ if (PushPop == "push")
+ Action = Sema::PSK_Push;
+ else if (PushPop == "pop")
+ Action = Sema::PSK_Pop;
+ else {
+ PP.Diag(PragmaLocation,
+ diag::warn_pragma_expected_section_push_pop_or_name)
+ << PragmaName;
+ return false;
+ }
+ if (Action != Sema::PSK_Reset) {
+ PP.Lex(Tok); // push | pop
+ if (Tok.is(tok::comma)) {
+ PP.Lex(Tok); // ,
+ // If we've got a comma, we either need a label or a string.
+ if (Tok.isAnyIdentifier()) {
+ SlotLabel = Tok.getIdentifierInfo()->getName();
+ PP.Lex(Tok); // identifier
+ if (Tok.is(tok::comma))
+ PP.Lex(Tok);
+ else if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(PragmaLocation, diag::warn_pragma_expected_punc)
+ << PragmaName;
+ return false;
+ }
+ }
+ } else if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(PragmaLocation, diag::warn_pragma_expected_punc) << PragmaName;
+ return false;
+ }
+ }
+ }
+ // Grab the string literal for our section name.
+ StringLiteral *SegmentName = nullptr;
+ if (Tok.isNot(tok::r_paren)) {
+ if (Tok.isNot(tok::string_literal)) {
+ unsigned DiagID = Action != Sema::PSK_Reset ? !SlotLabel.empty() ?
+ diag::warn_pragma_expected_section_name :
+ diag::warn_pragma_expected_section_label_or_name :
+ diag::warn_pragma_expected_section_push_pop_or_name;
+ PP.Diag(PragmaLocation, DiagID) << PragmaName;
+ return false;
+ }
+ ExprResult StringResult = ParseStringLiteralExpression();
+ if (StringResult.isInvalid())
+ return false; // Already diagnosed.
+ SegmentName = cast<StringLiteral>(StringResult.get());
+ if (SegmentName->getCharByteWidth() != 1) {
+ PP.Diag(PragmaLocation, diag::warn_pragma_expected_non_wide_string)
+ << PragmaName;
+ return false;
+ }
+ // Setting section "" has no effect
+ if (SegmentName->getLength())
+ Action = (Sema::PragmaMsStackAction)(Action | Sema::PSK_Set);
+ }
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(PragmaLocation, diag::warn_pragma_expected_rparen) << PragmaName;
+ return false;
+ }
+ PP.Lex(Tok); // )
+ if (Tok.isNot(tok::eof)) {
+ PP.Diag(PragmaLocation, diag::warn_pragma_extra_tokens_at_eol)
+ << PragmaName;
+ return false;
+ }
+ PP.Lex(Tok); // eof
+ Actions.ActOnPragmaMSSeg(PragmaLocation, Action, SlotLabel,
+ SegmentName, PragmaName);
+ return true;
+}
+
+// #pragma init_seg({ compiler | lib | user | "section-name" [, func-name]} )
+bool Parser::HandlePragmaMSInitSeg(StringRef PragmaName,
+ SourceLocation PragmaLocation) {
+ if (getTargetInfo().getTriple().getEnvironment() != llvm::Triple::MSVC) {
+ PP.Diag(PragmaLocation, diag::warn_pragma_init_seg_unsupported_target);
+ return false;
+ }
+
+ if (ExpectAndConsume(tok::l_paren, diag::warn_pragma_expected_lparen,
+ PragmaName))
+ return false;
+
+ // Parse either the known section names or the string section name.
+ StringLiteral *SegmentName = nullptr;
+ if (Tok.isAnyIdentifier()) {
+ auto *II = Tok.getIdentifierInfo();
+ StringRef Section = llvm::StringSwitch<StringRef>(II->getName())
+ .Case("compiler", "\".CRT$XCC\"")
+ .Case("lib", "\".CRT$XCL\"")
+ .Case("user", "\".CRT$XCU\"")
+ .Default("");
+
+ if (!Section.empty()) {
+ // Pretend the user wrote the appropriate string literal here.
+ Token Toks[1];
+ Toks[0].startToken();
+ Toks[0].setKind(tok::string_literal);
+ Toks[0].setLocation(Tok.getLocation());
+ Toks[0].setLiteralData(Section.data());
+ Toks[0].setLength(Section.size());
+ SegmentName =
+ cast<StringLiteral>(Actions.ActOnStringLiteral(Toks, nullptr).get());
+ PP.Lex(Tok);
+ }
+ } else if (Tok.is(tok::string_literal)) {
+ ExprResult StringResult = ParseStringLiteralExpression();
+ if (StringResult.isInvalid())
+ return false;
+ SegmentName = cast<StringLiteral>(StringResult.get());
+ if (SegmentName->getCharByteWidth() != 1) {
+ PP.Diag(PragmaLocation, diag::warn_pragma_expected_non_wide_string)
+ << PragmaName;
+ return false;
+ }
+ // FIXME: Add support for the '[, func-name]' part of the pragma.
+ }
+
+ if (!SegmentName) {
+ PP.Diag(PragmaLocation, diag::warn_pragma_expected_init_seg) << PragmaName;
+ return false;
+ }
+
+ if (ExpectAndConsume(tok::r_paren, diag::warn_pragma_expected_rparen,
+ PragmaName) ||
+ ExpectAndConsume(tok::eof, diag::warn_pragma_extra_tokens_at_eol,
+ PragmaName))
+ return false;
+
+ Actions.ActOnPragmaMSInitSeg(PragmaLocation, SegmentName);
+ return true;
+}
+
+namespace {
+struct PragmaLoopHintInfo {
+ Token PragmaName;
+ Token Option;
+ ArrayRef<Token> Toks;
+};
+} // end anonymous namespace
+
+static std::string PragmaLoopHintString(Token PragmaName, Token Option) {
+ std::string PragmaString;
+ if (PragmaName.getIdentifierInfo()->getName() == "loop") {
+ PragmaString = "clang loop ";
+ PragmaString += Option.getIdentifierInfo()->getName();
+ } else {
+ assert(PragmaName.getIdentifierInfo()->getName() == "unroll" &&
+ "Unexpected pragma name");
+ PragmaString = "unroll";
+ }
+ return PragmaString;
+}
+
+bool Parser::HandlePragmaLoopHint(LoopHint &Hint) {
+ assert(Tok.is(tok::annot_pragma_loop_hint));
+ PragmaLoopHintInfo *Info =
+ static_cast<PragmaLoopHintInfo *>(Tok.getAnnotationValue());
+
+ IdentifierInfo *PragmaNameInfo = Info->PragmaName.getIdentifierInfo();
+ Hint.PragmaNameLoc = IdentifierLoc::create(
+ Actions.Context, Info->PragmaName.getLocation(), PragmaNameInfo);
+
+ // It is possible that the loop hint has no option identifier, such as
+ // #pragma unroll(4).
+ IdentifierInfo *OptionInfo = Info->Option.is(tok::identifier)
+ ? Info->Option.getIdentifierInfo()
+ : nullptr;
+ Hint.OptionLoc = IdentifierLoc::create(
+ Actions.Context, Info->Option.getLocation(), OptionInfo);
+
+ const Token *Toks = Info->Toks.data();
+ size_t TokSize = Info->Toks.size();
+
+ // Return a valid hint if pragma unroll or nounroll were specified
+ // without an argument.
+ bool PragmaUnroll = PragmaNameInfo->getName() == "unroll";
+ bool PragmaNoUnroll = PragmaNameInfo->getName() == "nounroll";
+ if (TokSize == 0 && (PragmaUnroll || PragmaNoUnroll)) {
+ ConsumeToken(); // The annotation token.
+ Hint.Range = Info->PragmaName.getLocation();
+ return true;
+ }
+
+ // The constant expression is always followed by an eof token, which increases
+ // the TokSize by 1.
+ assert(TokSize > 0 &&
+ "PragmaLoopHintInfo::Toks must contain at least one token.");
+
+ // If no option is specified the argument is assumed to be a constant expr.
+ bool OptionUnroll = false;
+ bool StateOption = false;
+ if (OptionInfo) { // Pragma Unroll does not specify an option.
+ OptionUnroll = OptionInfo->isStr("unroll");
+ StateOption = llvm::StringSwitch<bool>(OptionInfo->getName())
+ .Case("vectorize", true)
+ .Case("interleave", true)
+ .Case("unroll", true)
+ .Default(false);
+ }
+
+ // Verify loop hint has an argument.
+ if (Toks[0].is(tok::eof)) {
+ ConsumeToken(); // The annotation token.
+ Diag(Toks[0].getLocation(), diag::err_pragma_loop_missing_argument)
+ << /*StateArgument=*/StateOption << /*FullKeyword=*/OptionUnroll;
+ return false;
+ }
+
+ // Validate the argument.
+ if (StateOption) {
+ ConsumeToken(); // The annotation token.
+ SourceLocation StateLoc = Toks[0].getLocation();
+ IdentifierInfo *StateInfo = Toks[0].getIdentifierInfo();
+ if (!StateInfo ||
+ (!StateInfo->isStr("enable") && !StateInfo->isStr("disable") &&
+ ((OptionUnroll && !StateInfo->isStr("full")) ||
+ (!OptionUnroll && !StateInfo->isStr("assume_safety"))))) {
+ Diag(Toks[0].getLocation(), diag::err_pragma_invalid_keyword)
+ << /*FullKeyword=*/OptionUnroll;
+ return false;
+ }
+ if (TokSize > 2)
+ Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << PragmaLoopHintString(Info->PragmaName, Info->Option);
+ Hint.StateLoc = IdentifierLoc::create(Actions.Context, StateLoc, StateInfo);
+ } else {
+ // Enter constant expression including eof terminator into token stream.
+ PP.EnterTokenStream(Toks, TokSize, /*DisableMacroExpansion=*/false,
+ /*OwnsTokens=*/false);
+ ConsumeToken(); // The annotation token.
+
+ ExprResult R = ParseConstantExpression();
+
+ // Tokens following an error in an ill-formed constant expression will
+ // remain in the token stream and must be removed.
+ if (Tok.isNot(tok::eof)) {
+ Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << PragmaLoopHintString(Info->PragmaName, Info->Option);
+ while (Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+ }
+
+ ConsumeToken(); // Consume the constant expression eof terminator.
+
+ if (R.isInvalid() ||
+ Actions.CheckLoopHintExpr(R.get(), Toks[0].getLocation()))
+ return false;
+
+ // Argument is a constant expression with an integer type.
+ Hint.ValueExpr = R.get();
+ }
+
+ Hint.Range = SourceRange(Info->PragmaName.getLocation(),
+ Info->Toks[TokSize - 1].getLocation());
+ return true;
+}
+
+// #pragma GCC visibility comes in two variants:
+// 'push' '(' [visibility] ')'
+// 'pop'
+void PragmaGCCVisibilityHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &VisTok) {
+ SourceLocation VisLoc = VisTok.getLocation();
+
+ Token Tok;
+ PP.LexUnexpandedToken(Tok);
+
+ const IdentifierInfo *PushPop = Tok.getIdentifierInfo();
+
+ const IdentifierInfo *VisType;
+ if (PushPop && PushPop->isStr("pop")) {
+ VisType = nullptr;
+ } else if (PushPop && PushPop->isStr("push")) {
+ PP.LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_lparen)
+ << "visibility";
+ return;
+ }
+ PP.LexUnexpandedToken(Tok);
+ VisType = Tok.getIdentifierInfo();
+ if (!VisType) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_identifier)
+ << "visibility";
+ return;
+ }
+ PP.LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_rparen)
+ << "visibility";
+ return;
+ }
+ } else {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_identifier)
+ << "visibility";
+ return;
+ }
+ SourceLocation EndLoc = Tok.getLocation();
+ PP.LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << "visibility";
+ return;
+ }
+
+ Token *Toks = new Token[1];
+ Toks[0].startToken();
+ Toks[0].setKind(tok::annot_pragma_vis);
+ Toks[0].setLocation(VisLoc);
+ Toks[0].setAnnotationEndLoc(EndLoc);
+ Toks[0].setAnnotationValue(
+ const_cast<void*>(static_cast<const void*>(VisType)));
+ PP.EnterTokenStream(Toks, 1, /*DisableMacroExpansion=*/true,
+ /*OwnsTokens=*/true);
+}
+
+// #pragma pack(...) comes in the following delicious flavors:
+// pack '(' [integer] ')'
+// pack '(' 'show' ')'
+// pack '(' ('push' | 'pop') [',' identifier] [, integer] ')'
+void PragmaPackHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &PackTok) {
+ SourceLocation PackLoc = PackTok.getLocation();
+
+ Token Tok;
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_lparen) << "pack";
+ return;
+ }
+
+ Sema::PragmaPackKind Kind = Sema::PPK_Default;
+ IdentifierInfo *Name = nullptr;
+ Token Alignment;
+ Alignment.startToken();
+ SourceLocation LParenLoc = Tok.getLocation();
+ PP.Lex(Tok);
+ if (Tok.is(tok::numeric_constant)) {
+ Alignment = Tok;
+
+ PP.Lex(Tok);
+
+ // In MSVC/gcc, #pragma pack(4) sets the alignment without affecting
+ // the push/pop stack.
+ // In Apple gcc, #pragma pack(4) is equivalent to #pragma pack(push, 4)
+ if (PP.getLangOpts().ApplePragmaPack)
+ Kind = Sema::PPK_Push;
+ } else if (Tok.is(tok::identifier)) {
+ const IdentifierInfo *II = Tok.getIdentifierInfo();
+ if (II->isStr("show")) {
+ Kind = Sema::PPK_Show;
+ PP.Lex(Tok);
+ } else {
+ if (II->isStr("push")) {
+ Kind = Sema::PPK_Push;
+ } else if (II->isStr("pop")) {
+ Kind = Sema::PPK_Pop;
+ } else {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_invalid_action) << "pack";
+ return;
+ }
+ PP.Lex(Tok);
+
+ if (Tok.is(tok::comma)) {
+ PP.Lex(Tok);
+
+ if (Tok.is(tok::numeric_constant)) {
+ Alignment = Tok;
+
+ PP.Lex(Tok);
+ } else if (Tok.is(tok::identifier)) {
+ Name = Tok.getIdentifierInfo();
+ PP.Lex(Tok);
+
+ if (Tok.is(tok::comma)) {
+ PP.Lex(Tok);
+
+ if (Tok.isNot(tok::numeric_constant)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_pack_malformed);
+ return;
+ }
+
+ Alignment = Tok;
+
+ PP.Lex(Tok);
+ }
+ } else {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_pack_malformed);
+ return;
+ }
+ }
+ }
+ } else if (PP.getLangOpts().ApplePragmaPack) {
+ // In MSVC/gcc, #pragma pack() resets the alignment without affecting
+ // the push/pop stack.
+ // In Apple gcc #pragma pack() is equivalent to #pragma pack(pop).
+ Kind = Sema::PPK_Pop;
+ }
+
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_rparen) << "pack";
+ return;
+ }
+
+ SourceLocation RParenLoc = Tok.getLocation();
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol) << "pack";
+ return;
+ }
+
+ PragmaPackInfo *Info =
+ (PragmaPackInfo*) PP.getPreprocessorAllocator().Allocate(
+ sizeof(PragmaPackInfo), llvm::alignOf<PragmaPackInfo>());
+ new (Info) PragmaPackInfo();
+ Info->Kind = Kind;
+ Info->Name = Name;
+ Info->Alignment = Alignment;
+ Info->LParenLoc = LParenLoc;
+ Info->RParenLoc = RParenLoc;
+
+ Token *Toks =
+ (Token*) PP.getPreprocessorAllocator().Allocate(
+ sizeof(Token) * 1, llvm::alignOf<Token>());
+ new (Toks) Token();
+ Toks[0].startToken();
+ Toks[0].setKind(tok::annot_pragma_pack);
+ Toks[0].setLocation(PackLoc);
+ Toks[0].setAnnotationEndLoc(RParenLoc);
+ Toks[0].setAnnotationValue(static_cast<void*>(Info));
+ PP.EnterTokenStream(Toks, 1, /*DisableMacroExpansion=*/true,
+ /*OwnsTokens=*/false);
+}
+
+// #pragma ms_struct on
+// #pragma ms_struct off
+void PragmaMSStructHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &MSStructTok) {
+ Sema::PragmaMSStructKind Kind = Sema::PMSST_OFF;
+
+ Token Tok;
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_ms_struct);
+ return;
+ }
+ SourceLocation EndLoc = Tok.getLocation();
+ const IdentifierInfo *II = Tok.getIdentifierInfo();
+ if (II->isStr("on")) {
+ Kind = Sema::PMSST_ON;
+ PP.Lex(Tok);
+ }
+ else if (II->isStr("off") || II->isStr("reset"))
+ PP.Lex(Tok);
+ else {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_ms_struct);
+ return;
+ }
+
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << "ms_struct";
+ return;
+ }
+
+ Token *Toks =
+ (Token*) PP.getPreprocessorAllocator().Allocate(
+ sizeof(Token) * 1, llvm::alignOf<Token>());
+ new (Toks) Token();
+ Toks[0].startToken();
+ Toks[0].setKind(tok::annot_pragma_msstruct);
+ Toks[0].setLocation(MSStructTok.getLocation());
+ Toks[0].setAnnotationEndLoc(EndLoc);
+ Toks[0].setAnnotationValue(reinterpret_cast<void*>(
+ static_cast<uintptr_t>(Kind)));
+ PP.EnterTokenStream(Toks, 1, /*DisableMacroExpansion=*/true,
+ /*OwnsTokens=*/false);
+}
+
+// #pragma 'align' '=' {'native','natural','mac68k','power','reset'}
+// #pragma 'options 'align' '=' {'native','natural','mac68k','power','reset'}
+static void ParseAlignPragma(Preprocessor &PP, Token &FirstTok,
+ bool IsOptions) {
+ Token Tok;
+
+ if (IsOptions) {
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::identifier) ||
+ !Tok.getIdentifierInfo()->isStr("align")) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_options_expected_align);
+ return;
+ }
+ }
+
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::equal)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_align_expected_equal)
+ << IsOptions;
+ return;
+ }
+
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_identifier)
+ << (IsOptions ? "options" : "align");
+ return;
+ }
+
+ Sema::PragmaOptionsAlignKind Kind = Sema::POAK_Natural;
+ const IdentifierInfo *II = Tok.getIdentifierInfo();
+ if (II->isStr("native"))
+ Kind = Sema::POAK_Native;
+ else if (II->isStr("natural"))
+ Kind = Sema::POAK_Natural;
+ else if (II->isStr("packed"))
+ Kind = Sema::POAK_Packed;
+ else if (II->isStr("power"))
+ Kind = Sema::POAK_Power;
+ else if (II->isStr("mac68k"))
+ Kind = Sema::POAK_Mac68k;
+ else if (II->isStr("reset"))
+ Kind = Sema::POAK_Reset;
+ else {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_align_invalid_option)
+ << IsOptions;
+ return;
+ }
+
+ SourceLocation EndLoc = Tok.getLocation();
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << (IsOptions ? "options" : "align");
+ return;
+ }
+
+ Token *Toks =
+ (Token*) PP.getPreprocessorAllocator().Allocate(
+ sizeof(Token) * 1, llvm::alignOf<Token>());
+ new (Toks) Token();
+ Toks[0].startToken();
+ Toks[0].setKind(tok::annot_pragma_align);
+ Toks[0].setLocation(FirstTok.getLocation());
+ Toks[0].setAnnotationEndLoc(EndLoc);
+ Toks[0].setAnnotationValue(reinterpret_cast<void*>(
+ static_cast<uintptr_t>(Kind)));
+ PP.EnterTokenStream(Toks, 1, /*DisableMacroExpansion=*/true,
+ /*OwnsTokens=*/false);
+}
+
+void PragmaAlignHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &AlignTok) {
+ ParseAlignPragma(PP, AlignTok, /*IsOptions=*/false);
+}
+
+void PragmaOptionsHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &OptionsTok) {
+ ParseAlignPragma(PP, OptionsTok, /*IsOptions=*/true);
+}
+
+// #pragma unused(identifier)
+void PragmaUnusedHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &UnusedTok) {
+ // FIXME: Should we be expanding macros here? My guess is no.
+ SourceLocation UnusedLoc = UnusedTok.getLocation();
+
+ // Lex the left '('.
+ Token Tok;
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_lparen) << "unused";
+ return;
+ }
+
+ // Lex the declaration reference(s).
+ SmallVector<Token, 5> Identifiers;
+ SourceLocation RParenLoc;
+ bool LexID = true;
+
+ while (true) {
+ PP.Lex(Tok);
+
+ if (LexID) {
+ if (Tok.is(tok::identifier)) {
+ Identifiers.push_back(Tok);
+ LexID = false;
+ continue;
+ }
+
+ // Illegal token!
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_unused_expected_var);
+ return;
+ }
+
+ // We are execting a ')' or a ','.
+ if (Tok.is(tok::comma)) {
+ LexID = true;
+ continue;
+ }
+
+ if (Tok.is(tok::r_paren)) {
+ RParenLoc = Tok.getLocation();
+ break;
+ }
+
+ // Illegal token!
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_punc) << "unused";
+ return;
+ }
+
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol) <<
+ "unused";
+ return;
+ }
+
+ // Verify that we have a location for the right parenthesis.
+ assert(RParenLoc.isValid() && "Valid '#pragma unused' must have ')'");
+ assert(!Identifiers.empty() && "Valid '#pragma unused' must have arguments");
+
+ // For each identifier token, insert into the token stream a
+ // annot_pragma_unused token followed by the identifier token.
+ // This allows us to cache a "#pragma unused" that occurs inside an inline
+ // C++ member function.
+
+ Token *Toks =
+ (Token*) PP.getPreprocessorAllocator().Allocate(
+ sizeof(Token) * 2 * Identifiers.size(), llvm::alignOf<Token>());
+ for (unsigned i=0; i != Identifiers.size(); i++) {
+ Token &pragmaUnusedTok = Toks[2*i], &idTok = Toks[2*i+1];
+ pragmaUnusedTok.startToken();
+ pragmaUnusedTok.setKind(tok::annot_pragma_unused);
+ pragmaUnusedTok.setLocation(UnusedLoc);
+ idTok = Identifiers[i];
+ }
+ PP.EnterTokenStream(Toks, 2*Identifiers.size(),
+ /*DisableMacroExpansion=*/true, /*OwnsTokens=*/false);
+}
+
+// #pragma weak identifier
+// #pragma weak identifier '=' identifier
+void PragmaWeakHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &WeakTok) {
+ SourceLocation WeakLoc = WeakTok.getLocation();
+
+ Token Tok;
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_identifier) << "weak";
+ return;
+ }
+
+ Token WeakName = Tok;
+ bool HasAlias = false;
+ Token AliasName;
+
+ PP.Lex(Tok);
+ if (Tok.is(tok::equal)) {
+ HasAlias = true;
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_identifier)
+ << "weak";
+ return;
+ }
+ AliasName = Tok;
+ PP.Lex(Tok);
+ }
+
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol) << "weak";
+ return;
+ }
+
+ if (HasAlias) {
+ Token *Toks =
+ (Token*) PP.getPreprocessorAllocator().Allocate(
+ sizeof(Token) * 3, llvm::alignOf<Token>());
+ Token &pragmaUnusedTok = Toks[0];
+ pragmaUnusedTok.startToken();
+ pragmaUnusedTok.setKind(tok::annot_pragma_weakalias);
+ pragmaUnusedTok.setLocation(WeakLoc);
+ pragmaUnusedTok.setAnnotationEndLoc(AliasName.getLocation());
+ Toks[1] = WeakName;
+ Toks[2] = AliasName;
+ PP.EnterTokenStream(Toks, 3,
+ /*DisableMacroExpansion=*/true, /*OwnsTokens=*/false);
+ } else {
+ Token *Toks =
+ (Token*) PP.getPreprocessorAllocator().Allocate(
+ sizeof(Token) * 2, llvm::alignOf<Token>());
+ Token &pragmaUnusedTok = Toks[0];
+ pragmaUnusedTok.startToken();
+ pragmaUnusedTok.setKind(tok::annot_pragma_weak);
+ pragmaUnusedTok.setLocation(WeakLoc);
+ pragmaUnusedTok.setAnnotationEndLoc(WeakLoc);
+ Toks[1] = WeakName;
+ PP.EnterTokenStream(Toks, 2,
+ /*DisableMacroExpansion=*/true, /*OwnsTokens=*/false);
+ }
+}
+
+// #pragma redefine_extname identifier identifier
+void PragmaRedefineExtnameHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &RedefToken) {
+ SourceLocation RedefLoc = RedefToken.getLocation();
+
+ Token Tok;
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_identifier) <<
+ "redefine_extname";
+ return;
+ }
+
+ Token RedefName = Tok;
+ PP.Lex(Tok);
+
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_identifier)
+ << "redefine_extname";
+ return;
+ }
+
+ Token AliasName = Tok;
+ PP.Lex(Tok);
+
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol) <<
+ "redefine_extname";
+ return;
+ }
+
+ Token *Toks =
+ (Token*) PP.getPreprocessorAllocator().Allocate(
+ sizeof(Token) * 3, llvm::alignOf<Token>());
+ Token &pragmaRedefTok = Toks[0];
+ pragmaRedefTok.startToken();
+ pragmaRedefTok.setKind(tok::annot_pragma_redefine_extname);
+ pragmaRedefTok.setLocation(RedefLoc);
+ pragmaRedefTok.setAnnotationEndLoc(AliasName.getLocation());
+ Toks[1] = RedefName;
+ Toks[2] = AliasName;
+ PP.EnterTokenStream(Toks, 3,
+ /*DisableMacroExpansion=*/true, /*OwnsTokens=*/false);
+}
+
+
+void
+PragmaFPContractHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &Tok) {
+ tok::OnOffSwitch OOS;
+ if (PP.LexOnOffSwitch(OOS))
+ return;
+
+ Token *Toks =
+ (Token*) PP.getPreprocessorAllocator().Allocate(
+ sizeof(Token) * 1, llvm::alignOf<Token>());
+ new (Toks) Token();
+ Toks[0].startToken();
+ Toks[0].setKind(tok::annot_pragma_fp_contract);
+ Toks[0].setLocation(Tok.getLocation());
+ Toks[0].setAnnotationEndLoc(Tok.getLocation());
+ Toks[0].setAnnotationValue(reinterpret_cast<void*>(
+ static_cast<uintptr_t>(OOS)));
+ PP.EnterTokenStream(Toks, 1, /*DisableMacroExpansion=*/true,
+ /*OwnsTokens=*/false);
+}
+
+void
+PragmaOpenCLExtensionHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &Tok) {
+ PP.LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_identifier) <<
+ "OPENCL";
+ return;
+ }
+ IdentifierInfo *ename = Tok.getIdentifierInfo();
+ SourceLocation NameLoc = Tok.getLocation();
+
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::colon)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_colon) << ename;
+ return;
+ }
+
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_enable_disable);
+ return;
+ }
+ IdentifierInfo *op = Tok.getIdentifierInfo();
+
+ unsigned state;
+ if (op->isStr("enable")) {
+ state = 1;
+ } else if (op->isStr("disable")) {
+ state = 0;
+ } else {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_enable_disable);
+ return;
+ }
+ SourceLocation StateLoc = Tok.getLocation();
+
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol) <<
+ "OPENCL EXTENSION";
+ return;
+ }
+
+ OpenCLExtData data(ename, state);
+ Token *Toks =
+ (Token*) PP.getPreprocessorAllocator().Allocate(
+ sizeof(Token) * 1, llvm::alignOf<Token>());
+ new (Toks) Token();
+ Toks[0].startToken();
+ Toks[0].setKind(tok::annot_pragma_opencl_extension);
+ Toks[0].setLocation(NameLoc);
+ Toks[0].setAnnotationValue(data.getOpaqueValue());
+ Toks[0].setAnnotationEndLoc(StateLoc);
+ PP.EnterTokenStream(Toks, 1, /*DisableMacroExpansion=*/true,
+ /*OwnsTokens=*/false);
+
+ if (PP.getPPCallbacks())
+ PP.getPPCallbacks()->PragmaOpenCLExtension(NameLoc, ename,
+ StateLoc, state);
+}
+
+/// \brief Handle '#pragma omp ...' when OpenMP is disabled.
+///
+void
+PragmaNoOpenMPHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &FirstTok) {
+ if (!PP.getDiagnostics().isIgnored(diag::warn_pragma_omp_ignored,
+ FirstTok.getLocation())) {
+ PP.Diag(FirstTok, diag::warn_pragma_omp_ignored);
+ PP.getDiagnostics().setSeverity(diag::warn_pragma_omp_ignored,
+ diag::Severity::Ignored, SourceLocation());
+ }
+ PP.DiscardUntilEndOfDirective();
+}
+
+/// \brief Handle '#pragma omp ...' when OpenMP is enabled.
+///
+void
+PragmaOpenMPHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &FirstTok) {
+ SmallVector<Token, 16> Pragma;
+ Token Tok;
+ Tok.startToken();
+ Tok.setKind(tok::annot_pragma_openmp);
+ Tok.setLocation(FirstTok.getLocation());
+
+ while (Tok.isNot(tok::eod)) {
+ Pragma.push_back(Tok);
+ PP.Lex(Tok);
+ }
+ SourceLocation EodLoc = Tok.getLocation();
+ Tok.startToken();
+ Tok.setKind(tok::annot_pragma_openmp_end);
+ Tok.setLocation(EodLoc);
+ Pragma.push_back(Tok);
+
+ Token *Toks = new Token[Pragma.size()];
+ std::copy(Pragma.begin(), Pragma.end(), Toks);
+ PP.EnterTokenStream(Toks, Pragma.size(),
+ /*DisableMacroExpansion=*/false, /*OwnsTokens=*/true);
+}
+
+/// \brief Handle '#pragma pointers_to_members'
+// The grammar for this pragma is as follows:
+//
+// <inheritance model> ::= ('single' | 'multiple' | 'virtual') '_inheritance'
+//
+// #pragma pointers_to_members '(' 'best_case' ')'
+// #pragma pointers_to_members '(' 'full_generality' [',' inheritance-model] ')'
+// #pragma pointers_to_members '(' inheritance-model ')'
+void PragmaMSPointersToMembers::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &Tok) {
+ SourceLocation PointersToMembersLoc = Tok.getLocation();
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(PointersToMembersLoc, diag::warn_pragma_expected_lparen)
+ << "pointers_to_members";
+ return;
+ }
+ PP.Lex(Tok);
+ const IdentifierInfo *Arg = Tok.getIdentifierInfo();
+ if (!Arg) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_identifier)
+ << "pointers_to_members";
+ return;
+ }
+ PP.Lex(Tok);
+
+ LangOptions::PragmaMSPointersToMembersKind RepresentationMethod;
+ if (Arg->isStr("best_case")) {
+ RepresentationMethod = LangOptions::PPTMK_BestCase;
+ } else {
+ if (Arg->isStr("full_generality")) {
+ if (Tok.is(tok::comma)) {
+ PP.Lex(Tok);
+
+ Arg = Tok.getIdentifierInfo();
+ if (!Arg) {
+ PP.Diag(Tok.getLocation(),
+ diag::err_pragma_pointers_to_members_unknown_kind)
+ << Tok.getKind() << /*OnlyInheritanceModels*/ 0;
+ return;
+ }
+ PP.Lex(Tok);
+ } else if (Tok.is(tok::r_paren)) {
+ // #pragma pointers_to_members(full_generality) implicitly specifies
+ // virtual_inheritance.
+ Arg = nullptr;
+ RepresentationMethod = LangOptions::PPTMK_FullGeneralityVirtualInheritance;
+ } else {
+ PP.Diag(Tok.getLocation(), diag::err_expected_punc)
+ << "full_generality";
+ return;
+ }
+ }
+
+ if (Arg) {
+ if (Arg->isStr("single_inheritance")) {
+ RepresentationMethod =
+ LangOptions::PPTMK_FullGeneralitySingleInheritance;
+ } else if (Arg->isStr("multiple_inheritance")) {
+ RepresentationMethod =
+ LangOptions::PPTMK_FullGeneralityMultipleInheritance;
+ } else if (Arg->isStr("virtual_inheritance")) {
+ RepresentationMethod =
+ LangOptions::PPTMK_FullGeneralityVirtualInheritance;
+ } else {
+ PP.Diag(Tok.getLocation(),
+ diag::err_pragma_pointers_to_members_unknown_kind)
+ << Arg << /*HasPointerDeclaration*/ 1;
+ return;
+ }
+ }
+ }
+
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(Tok.getLocation(), diag::err_expected_rparen_after)
+ << (Arg ? Arg->getName() : "full_generality");
+ return;
+ }
+
+ SourceLocation EndLoc = Tok.getLocation();
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << "pointers_to_members";
+ return;
+ }
+
+ Token AnnotTok;
+ AnnotTok.startToken();
+ AnnotTok.setKind(tok::annot_pragma_ms_pointers_to_members);
+ AnnotTok.setLocation(PointersToMembersLoc);
+ AnnotTok.setAnnotationEndLoc(EndLoc);
+ AnnotTok.setAnnotationValue(
+ reinterpret_cast<void *>(static_cast<uintptr_t>(RepresentationMethod)));
+ PP.EnterToken(AnnotTok);
+}
+
+/// \brief Handle '#pragma vtordisp'
+// The grammar for this pragma is as follows:
+//
+// <vtordisp-mode> ::= ('off' | 'on' | '0' | '1' | '2' )
+//
+// #pragma vtordisp '(' ['push' ','] vtordisp-mode ')'
+// #pragma vtordisp '(' 'pop' ')'
+// #pragma vtordisp '(' ')'
+void PragmaMSVtorDisp::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &Tok) {
+ SourceLocation VtorDispLoc = Tok.getLocation();
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(VtorDispLoc, diag::warn_pragma_expected_lparen) << "vtordisp";
+ return;
+ }
+ PP.Lex(Tok);
+
+ Sema::PragmaVtorDispKind Kind = Sema::PVDK_Set;
+ const IdentifierInfo *II = Tok.getIdentifierInfo();
+ if (II) {
+ if (II->isStr("push")) {
+ // #pragma vtordisp(push, mode)
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::comma)) {
+ PP.Diag(VtorDispLoc, diag::warn_pragma_expected_punc) << "vtordisp";
+ return;
+ }
+ PP.Lex(Tok);
+ Kind = Sema::PVDK_Push;
+ // not push, could be on/off
+ } else if (II->isStr("pop")) {
+ // #pragma vtordisp(pop)
+ PP.Lex(Tok);
+ Kind = Sema::PVDK_Pop;
+ }
+ // not push or pop, could be on/off
+ } else {
+ if (Tok.is(tok::r_paren)) {
+ // #pragma vtordisp()
+ Kind = Sema::PVDK_Reset;
+ }
+ }
+
+
+ uint64_t Value = 0;
+ if (Kind == Sema::PVDK_Push || Kind == Sema::PVDK_Set) {
+ const IdentifierInfo *II = Tok.getIdentifierInfo();
+ if (II && II->isStr("off")) {
+ PP.Lex(Tok);
+ Value = 0;
+ } else if (II && II->isStr("on")) {
+ PP.Lex(Tok);
+ Value = 1;
+ } else if (Tok.is(tok::numeric_constant) &&
+ PP.parseSimpleIntegerLiteral(Tok, Value)) {
+ if (Value > 2) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_integer)
+ << 0 << 2 << "vtordisp";
+ return;
+ }
+ } else {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_invalid_action)
+ << "vtordisp";
+ return;
+ }
+ }
+
+ // Finish the pragma: ')' $
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(VtorDispLoc, diag::warn_pragma_expected_rparen) << "vtordisp";
+ return;
+ }
+ SourceLocation EndLoc = Tok.getLocation();
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << "vtordisp";
+ return;
+ }
+
+ // Enter the annotation.
+ Token AnnotTok;
+ AnnotTok.startToken();
+ AnnotTok.setKind(tok::annot_pragma_ms_vtordisp);
+ AnnotTok.setLocation(VtorDispLoc);
+ AnnotTok.setAnnotationEndLoc(EndLoc);
+ AnnotTok.setAnnotationValue(reinterpret_cast<void *>(
+ static_cast<uintptr_t>((Kind << 16) | (Value & 0xFFFF))));
+ PP.EnterToken(AnnotTok);
+}
+
+/// \brief Handle all MS pragmas. Simply forwards the tokens after inserting
+/// an annotation token.
+void PragmaMSPragma::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &Tok) {
+ Token EoF, AnnotTok;
+ EoF.startToken();
+ EoF.setKind(tok::eof);
+ AnnotTok.startToken();
+ AnnotTok.setKind(tok::annot_pragma_ms_pragma);
+ AnnotTok.setLocation(Tok.getLocation());
+ AnnotTok.setAnnotationEndLoc(Tok.getLocation());
+ SmallVector<Token, 8> TokenVector;
+ // Suck up all of the tokens before the eod.
+ for (; Tok.isNot(tok::eod); PP.Lex(Tok)) {
+ TokenVector.push_back(Tok);
+ AnnotTok.setAnnotationEndLoc(Tok.getLocation());
+ }
+ // Add a sentinal EoF token to the end of the list.
+ TokenVector.push_back(EoF);
+ // We must allocate this array with new because EnterTokenStream is going to
+ // delete it later.
+ Token *TokenArray = new Token[TokenVector.size()];
+ std::copy(TokenVector.begin(), TokenVector.end(), TokenArray);
+ auto Value = new (PP.getPreprocessorAllocator())
+ std::pair<Token*, size_t>(std::make_pair(TokenArray, TokenVector.size()));
+ AnnotTok.setAnnotationValue(Value);
+ PP.EnterToken(AnnotTok);
+}
+
+/// \brief Handle the Microsoft \#pragma detect_mismatch extension.
+///
+/// The syntax is:
+/// \code
+/// #pragma detect_mismatch("name", "value")
+/// \endcode
+/// Where 'name' and 'value' are quoted strings. The values are embedded in
+/// the object file and passed along to the linker. If the linker detects a
+/// mismatch in the object file's values for the given name, a LNK2038 error
+/// is emitted. See MSDN for more details.
+void PragmaDetectMismatchHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &Tok) {
+ SourceLocation CommentLoc = Tok.getLocation();
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(CommentLoc, diag::err_expected) << tok::l_paren;
+ return;
+ }
+
+ // Read the name to embed, which must be a string literal.
+ std::string NameString;
+ if (!PP.LexStringLiteral(Tok, NameString,
+ "pragma detect_mismatch",
+ /*MacroExpansion=*/true))
+ return;
+
+ // Read the comma followed by a second string literal.
+ std::string ValueString;
+ if (Tok.isNot(tok::comma)) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_detect_mismatch_malformed);
+ return;
+ }
+
+ if (!PP.LexStringLiteral(Tok, ValueString, "pragma detect_mismatch",
+ /*MacroExpansion=*/true))
+ return;
+
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(Tok.getLocation(), diag::err_expected) << tok::r_paren;
+ return;
+ }
+ PP.Lex(Tok); // Eat the r_paren.
+
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_detect_mismatch_malformed);
+ return;
+ }
+
+ // If the pragma is lexically sound, notify any interested PPCallbacks.
+ if (PP.getPPCallbacks())
+ PP.getPPCallbacks()->PragmaDetectMismatch(CommentLoc, NameString,
+ ValueString);
+
+ Actions.ActOnPragmaDetectMismatch(NameString, ValueString);
+}
+
+/// \brief Handle the microsoft \#pragma comment extension.
+///
+/// The syntax is:
+/// \code
+/// #pragma comment(linker, "foo")
+/// \endcode
+/// 'linker' is one of five identifiers: compiler, exestr, lib, linker, user.
+/// "foo" is a string, which is fully macro expanded, and permits string
+/// concatenation, embedded escape characters etc. See MSDN for more details.
+void PragmaCommentHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &Tok) {
+ SourceLocation CommentLoc = Tok.getLocation();
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(CommentLoc, diag::err_pragma_comment_malformed);
+ return;
+ }
+
+ // Read the identifier.
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(CommentLoc, diag::err_pragma_comment_malformed);
+ return;
+ }
+
+ // Verify that this is one of the 5 whitelisted options.
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ Sema::PragmaMSCommentKind Kind =
+ llvm::StringSwitch<Sema::PragmaMSCommentKind>(II->getName())
+ .Case("linker", Sema::PCK_Linker)
+ .Case("lib", Sema::PCK_Lib)
+ .Case("compiler", Sema::PCK_Compiler)
+ .Case("exestr", Sema::PCK_ExeStr)
+ .Case("user", Sema::PCK_User)
+ .Default(Sema::PCK_Unknown);
+ if (Kind == Sema::PCK_Unknown) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_comment_unknown_kind);
+ return;
+ }
+
+ // On PS4, issue a warning about any pragma comments other than
+ // #pragma comment lib.
+ if (PP.getTargetInfo().getTriple().isPS4() && Kind != Sema::PCK_Lib) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_comment_ignored)
+ << II->getName();
+ return;
+ }
+
+ // Read the optional string if present.
+ PP.Lex(Tok);
+ std::string ArgumentString;
+ if (Tok.is(tok::comma) && !PP.LexStringLiteral(Tok, ArgumentString,
+ "pragma comment",
+ /*MacroExpansion=*/true))
+ return;
+
+ // FIXME: warn that 'exestr' is deprecated.
+ // FIXME: If the kind is "compiler" warn if the string is present (it is
+ // ignored).
+ // The MSDN docs say that "lib" and "linker" require a string and have a short
+ // whitelist of linker options they support, but in practice MSVC doesn't
+ // issue a diagnostic. Therefore neither does clang.
+
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_comment_malformed);
+ return;
+ }
+ PP.Lex(Tok); // eat the r_paren.
+
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_comment_malformed);
+ return;
+ }
+
+ // If the pragma is lexically sound, notify any interested PPCallbacks.
+ if (PP.getPPCallbacks())
+ PP.getPPCallbacks()->PragmaComment(CommentLoc, II, ArgumentString);
+
+ Actions.ActOnPragmaMSComment(Kind, ArgumentString);
+}
+
+// #pragma clang optimize off
+// #pragma clang optimize on
+void PragmaOptimizeHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &FirstToken) {
+ Token Tok;
+ PP.Lex(Tok);
+ if (Tok.is(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_missing_argument)
+ << "clang optimize" << /*Expected=*/true << "'on' or 'off'";
+ return;
+ }
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_optimize_invalid_argument)
+ << PP.getSpelling(Tok);
+ return;
+ }
+ const IdentifierInfo *II = Tok.getIdentifierInfo();
+ // The only accepted values are 'on' or 'off'.
+ bool IsOn = false;
+ if (II->isStr("on")) {
+ IsOn = true;
+ } else if (!II->isStr("off")) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_optimize_invalid_argument)
+ << PP.getSpelling(Tok);
+ return;
+ }
+ PP.Lex(Tok);
+
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_optimize_extra_argument)
+ << PP.getSpelling(Tok);
+ return;
+ }
+
+ Actions.ActOnPragmaOptimize(IsOn, FirstToken.getLocation());
+}
+
+/// \brief Parses loop or unroll pragma hint value and fills in Info.
+static bool ParseLoopHintValue(Preprocessor &PP, Token &Tok, Token PragmaName,
+ Token Option, bool ValueInParens,
+ PragmaLoopHintInfo &Info) {
+ SmallVector<Token, 1> ValueList;
+ int OpenParens = ValueInParens ? 1 : 0;
+ // Read constant expression.
+ while (Tok.isNot(tok::eod)) {
+ if (Tok.is(tok::l_paren))
+ OpenParens++;
+ else if (Tok.is(tok::r_paren)) {
+ OpenParens--;
+ if (OpenParens == 0 && ValueInParens)
+ break;
+ }
+
+ ValueList.push_back(Tok);
+ PP.Lex(Tok);
+ }
+
+ if (ValueInParens) {
+ // Read ')'
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(Tok.getLocation(), diag::err_expected) << tok::r_paren;
+ return true;
+ }
+ PP.Lex(Tok);
+ }
+
+ Token EOFTok;
+ EOFTok.startToken();
+ EOFTok.setKind(tok::eof);
+ EOFTok.setLocation(Tok.getLocation());
+ ValueList.push_back(EOFTok); // Terminates expression for parsing.
+
+ Info.Toks = llvm::makeArrayRef(ValueList).copy(PP.getPreprocessorAllocator());
+
+ Info.PragmaName = PragmaName;
+ Info.Option = Option;
+ return false;
+}
+
+/// \brief Handle the \#pragma clang loop directive.
+/// #pragma clang 'loop' loop-hints
+///
+/// loop-hints:
+/// loop-hint loop-hints[opt]
+///
+/// loop-hint:
+/// 'vectorize' '(' loop-hint-keyword ')'
+/// 'interleave' '(' loop-hint-keyword ')'
+/// 'unroll' '(' unroll-hint-keyword ')'
+/// 'vectorize_width' '(' loop-hint-value ')'
+/// 'interleave_count' '(' loop-hint-value ')'
+/// 'unroll_count' '(' loop-hint-value ')'
+///
+/// loop-hint-keyword:
+/// 'enable'
+/// 'disable'
+/// 'assume_safety'
+///
+/// unroll-hint-keyword:
+/// 'enable'
+/// 'disable'
+/// 'full'
+///
+/// loop-hint-value:
+/// constant-expression
+///
+/// Specifying vectorize(enable) or vectorize_width(_value_) instructs llvm to
+/// try vectorizing the instructions of the loop it precedes. Specifying
+/// interleave(enable) or interleave_count(_value_) instructs llvm to try
+/// interleaving multiple iterations of the loop it precedes. The width of the
+/// vector instructions is specified by vectorize_width() and the number of
+/// interleaved loop iterations is specified by interleave_count(). Specifying a
+/// value of 1 effectively disables vectorization/interleaving, even if it is
+/// possible and profitable, and 0 is invalid. The loop vectorizer currently
+/// only works on inner loops.
+///
+/// The unroll and unroll_count directives control the concatenation
+/// unroller. Specifying unroll(enable) instructs llvm to unroll the loop
+/// completely if the trip count is known at compile time and unroll partially
+/// if the trip count is not known. Specifying unroll(full) is similar to
+/// unroll(enable) but will unroll the loop only if the trip count is known at
+/// compile time. Specifying unroll(disable) disables unrolling for the
+/// loop. Specifying unroll_count(_value_) instructs llvm to try to unroll the
+/// loop the number of times indicated by the value.
+void PragmaLoopHintHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &Tok) {
+ // Incoming token is "loop" from "#pragma clang loop".
+ Token PragmaName = Tok;
+ SmallVector<Token, 1> TokenList;
+
+ // Lex the optimization option and verify it is an identifier.
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_loop_invalid_option)
+ << /*MissingOption=*/true << "";
+ return;
+ }
+
+ while (Tok.is(tok::identifier)) {
+ Token Option = Tok;
+ IdentifierInfo *OptionInfo = Tok.getIdentifierInfo();
+
+ bool OptionValid = llvm::StringSwitch<bool>(OptionInfo->getName())
+ .Case("vectorize", true)
+ .Case("interleave", true)
+ .Case("unroll", true)
+ .Case("vectorize_width", true)
+ .Case("interleave_count", true)
+ .Case("unroll_count", true)
+ .Default(false);
+ if (!OptionValid) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_loop_invalid_option)
+ << /*MissingOption=*/false << OptionInfo;
+ return;
+ }
+ PP.Lex(Tok);
+
+ // Read '('
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(Tok.getLocation(), diag::err_expected) << tok::l_paren;
+ return;
+ }
+ PP.Lex(Tok);
+
+ auto *Info = new (PP.getPreprocessorAllocator()) PragmaLoopHintInfo;
+ if (ParseLoopHintValue(PP, Tok, PragmaName, Option, /*ValueInParens=*/true,
+ *Info))
+ return;
+
+ // Generate the loop hint token.
+ Token LoopHintTok;
+ LoopHintTok.startToken();
+ LoopHintTok.setKind(tok::annot_pragma_loop_hint);
+ LoopHintTok.setLocation(PragmaName.getLocation());
+ LoopHintTok.setAnnotationEndLoc(PragmaName.getLocation());
+ LoopHintTok.setAnnotationValue(static_cast<void *>(Info));
+ TokenList.push_back(LoopHintTok);
+ }
+
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << "clang loop";
+ return;
+ }
+
+ Token *TokenArray = new Token[TokenList.size()];
+ std::copy(TokenList.begin(), TokenList.end(), TokenArray);
+
+ PP.EnterTokenStream(TokenArray, TokenList.size(),
+ /*DisableMacroExpansion=*/false,
+ /*OwnsTokens=*/true);
+}
+
+/// \brief Handle the loop unroll optimization pragmas.
+/// #pragma unroll
+/// #pragma unroll unroll-hint-value
+/// #pragma unroll '(' unroll-hint-value ')'
+/// #pragma nounroll
+///
+/// unroll-hint-value:
+/// constant-expression
+///
+/// Loop unrolling hints can be specified with '#pragma unroll' or
+/// '#pragma nounroll'. '#pragma unroll' can take a numeric argument optionally
+/// contained in parentheses. With no argument the directive instructs llvm to
+/// try to unroll the loop completely. A positive integer argument can be
+/// specified to indicate the number of times the loop should be unrolled. To
+/// maximize compatibility with other compilers the unroll count argument can be
+/// specified with or without parentheses. Specifying, '#pragma nounroll'
+/// disables unrolling of the loop.
+void PragmaUnrollHintHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &Tok) {
+ // Incoming token is "unroll" for "#pragma unroll", or "nounroll" for
+ // "#pragma nounroll".
+ Token PragmaName = Tok;
+ PP.Lex(Tok);
+ auto *Info = new (PP.getPreprocessorAllocator()) PragmaLoopHintInfo;
+ if (Tok.is(tok::eod)) {
+ // nounroll or unroll pragma without an argument.
+ Info->PragmaName = PragmaName;
+ Info->Option.startToken();
+ } else if (PragmaName.getIdentifierInfo()->getName() == "nounroll") {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << "nounroll";
+ return;
+ } else {
+ // Unroll pragma with an argument: "#pragma unroll N" or
+ // "#pragma unroll(N)".
+ // Read '(' if it exists.
+ bool ValueInParens = Tok.is(tok::l_paren);
+ if (ValueInParens)
+ PP.Lex(Tok);
+
+ Token Option;
+ Option.startToken();
+ if (ParseLoopHintValue(PP, Tok, PragmaName, Option, ValueInParens, *Info))
+ return;
+
+ // In CUDA, the argument to '#pragma unroll' should not be contained in
+ // parentheses.
+ if (PP.getLangOpts().CUDA && ValueInParens)
+ PP.Diag(Info->Toks[0].getLocation(),
+ diag::warn_pragma_unroll_cuda_value_in_parens);
+
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << "unroll";
+ return;
+ }
+ }
+
+ // Generate the hint token.
+ Token *TokenArray = new Token[1];
+ TokenArray[0].startToken();
+ TokenArray[0].setKind(tok::annot_pragma_loop_hint);
+ TokenArray[0].setLocation(PragmaName.getLocation());
+ TokenArray[0].setAnnotationEndLoc(PragmaName.getLocation());
+ TokenArray[0].setAnnotationValue(static_cast<void *>(Info));
+ PP.EnterTokenStream(TokenArray, 1, /*DisableMacroExpansion=*/false,
+ /*OwnsTokens=*/true);
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp
new file mode 100644
index 0000000..717bcff
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp
@@ -0,0 +1,2190 @@
+//===--- ParseStmt.cpp - Statement and Block Parser -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Statement and Block portions of the Parser
+// interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/Parser.h"
+#include "RAIIObjectsForParser.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/Basic/Attributes.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/LoopHint.h"
+#include "clang/Sema/PrettyDeclStackTrace.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/TypoCorrection.h"
+#include "llvm/ADT/SmallString.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// C99 6.8: Statements and Blocks.
+//===----------------------------------------------------------------------===//
+
+/// \brief Parse a standalone statement (for instance, as the body of an 'if',
+/// 'while', or 'for').
+StmtResult Parser::ParseStatement(SourceLocation *TrailingElseLoc) {
+ StmtResult Res;
+
+ // We may get back a null statement if we found a #pragma. Keep going until
+ // we get an actual statement.
+ do {
+ StmtVector Stmts;
+ Res = ParseStatementOrDeclaration(Stmts, true, TrailingElseLoc);
+ } while (!Res.isInvalid() && !Res.get());
+
+ return Res;
+}
+
+/// ParseStatementOrDeclaration - Read 'statement' or 'declaration'.
+/// StatementOrDeclaration:
+/// statement
+/// declaration
+///
+/// statement:
+/// labeled-statement
+/// compound-statement
+/// expression-statement
+/// selection-statement
+/// iteration-statement
+/// jump-statement
+/// [C++] declaration-statement
+/// [C++] try-block
+/// [MS] seh-try-block
+/// [OBC] objc-throw-statement
+/// [OBC] objc-try-catch-statement
+/// [OBC] objc-synchronized-statement
+/// [GNU] asm-statement
+/// [OMP] openmp-construct [TODO]
+///
+/// labeled-statement:
+/// identifier ':' statement
+/// 'case' constant-expression ':' statement
+/// 'default' ':' statement
+///
+/// selection-statement:
+/// if-statement
+/// switch-statement
+///
+/// iteration-statement:
+/// while-statement
+/// do-statement
+/// for-statement
+///
+/// expression-statement:
+/// expression[opt] ';'
+///
+/// jump-statement:
+/// 'goto' identifier ';'
+/// 'continue' ';'
+/// 'break' ';'
+/// 'return' expression[opt] ';'
+/// [GNU] 'goto' '*' expression ';'
+///
+/// [OBC] objc-throw-statement:
+/// [OBC] '@' 'throw' expression ';'
+/// [OBC] '@' 'throw' ';'
+///
+StmtResult
+Parser::ParseStatementOrDeclaration(StmtVector &Stmts, bool OnlyStatement,
+ SourceLocation *TrailingElseLoc) {
+
+ ParenBraceBracketBalancer BalancerRAIIObj(*this);
+
+ ParsedAttributesWithRange Attrs(AttrFactory);
+ MaybeParseCXX11Attributes(Attrs, nullptr, /*MightBeObjCMessageSend*/ true);
+
+ StmtResult Res = ParseStatementOrDeclarationAfterAttributes(Stmts,
+ OnlyStatement, TrailingElseLoc, Attrs);
+
+ assert((Attrs.empty() || Res.isInvalid() || Res.isUsable()) &&
+ "attributes on empty statement");
+
+ if (Attrs.empty() || Res.isInvalid())
+ return Res;
+
+ return Actions.ProcessStmtAttributes(Res.get(), Attrs.getList(), Attrs.Range);
+}
+
+namespace {
+class StatementFilterCCC : public CorrectionCandidateCallback {
+public:
+ StatementFilterCCC(Token nextTok) : NextToken(nextTok) {
+ WantTypeSpecifiers = nextTok.isOneOf(tok::l_paren, tok::less, tok::l_square,
+ tok::identifier, tok::star, tok::amp);
+ WantExpressionKeywords =
+ nextTok.isOneOf(tok::l_paren, tok::identifier, tok::arrow, tok::period);
+ WantRemainingKeywords =
+ nextTok.isOneOf(tok::l_paren, tok::semi, tok::identifier, tok::l_brace);
+ WantCXXNamedCasts = false;
+ }
+
+ bool ValidateCandidate(const TypoCorrection &candidate) override {
+ if (FieldDecl *FD = candidate.getCorrectionDeclAs<FieldDecl>())
+ return !candidate.getCorrectionSpecifier() || isa<ObjCIvarDecl>(FD);
+ if (NextToken.is(tok::equal))
+ return candidate.getCorrectionDeclAs<VarDecl>();
+ if (NextToken.is(tok::period) &&
+ candidate.getCorrectionDeclAs<NamespaceDecl>())
+ return false;
+ return CorrectionCandidateCallback::ValidateCandidate(candidate);
+ }
+
+private:
+ Token NextToken;
+};
+}
+
+StmtResult
+Parser::ParseStatementOrDeclarationAfterAttributes(StmtVector &Stmts,
+ bool OnlyStatement, SourceLocation *TrailingElseLoc,
+ ParsedAttributesWithRange &Attrs) {
+ const char *SemiError = nullptr;
+ StmtResult Res;
+
+ // Cases in this switch statement should fall through if the parser expects
+ // the token to end in a semicolon (in which case SemiError should be set),
+ // or they directly 'return;' if not.
+Retry:
+ tok::TokenKind Kind = Tok.getKind();
+ SourceLocation AtLoc;
+ switch (Kind) {
+ case tok::at: // May be a @try or @throw statement
+ {
+ ProhibitAttributes(Attrs); // TODO: is it correct?
+ AtLoc = ConsumeToken(); // consume @
+ return ParseObjCAtStatement(AtLoc);
+ }
+
+ case tok::code_completion:
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Statement);
+ cutOffParsing();
+ return StmtError();
+
+ case tok::identifier: {
+ Token Next = NextToken();
+ if (Next.is(tok::colon)) { // C99 6.8.1: labeled-statement
+ // identifier ':' statement
+ return ParseLabeledStatement(Attrs);
+ }
+
+ // Look up the identifier, and typo-correct it to a keyword if it's not
+ // found.
+ if (Next.isNot(tok::coloncolon)) {
+ // Try to limit which sets of keywords should be included in typo
+ // correction based on what the next token is.
+ if (TryAnnotateName(/*IsAddressOfOperand*/ false,
+ llvm::make_unique<StatementFilterCCC>(Next)) ==
+ ANK_Error) {
+ // Handle errors here by skipping up to the next semicolon or '}', and
+ // eat the semicolon if that's what stopped us.
+ SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ return StmtError();
+ }
+
+ // If the identifier was typo-corrected, try again.
+ if (Tok.isNot(tok::identifier))
+ goto Retry;
+ }
+
+ // Fall through
+ }
+
+ default: {
+ if ((getLangOpts().CPlusPlus || !OnlyStatement) && isDeclarationStatement()) {
+ SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
+ DeclGroupPtrTy Decl = ParseDeclaration(Declarator::BlockContext,
+ DeclEnd, Attrs);
+ return Actions.ActOnDeclStmt(Decl, DeclStart, DeclEnd);
+ }
+
+ if (Tok.is(tok::r_brace)) {
+ Diag(Tok, diag::err_expected_statement);
+ return StmtError();
+ }
+
+ return ParseExprStatement();
+ }
+
+ case tok::kw_case: // C99 6.8.1: labeled-statement
+ return ParseCaseStatement();
+ case tok::kw_default: // C99 6.8.1: labeled-statement
+ return ParseDefaultStatement();
+
+ case tok::l_brace: // C99 6.8.2: compound-statement
+ return ParseCompoundStatement();
+ case tok::semi: { // C99 6.8.3p3: expression[opt] ';'
+ bool HasLeadingEmptyMacro = Tok.hasLeadingEmptyMacro();
+ return Actions.ActOnNullStmt(ConsumeToken(), HasLeadingEmptyMacro);
+ }
+
+ case tok::kw_if: // C99 6.8.4.1: if-statement
+ return ParseIfStatement(TrailingElseLoc);
+ case tok::kw_switch: // C99 6.8.4.2: switch-statement
+ return ParseSwitchStatement(TrailingElseLoc);
+
+ case tok::kw_while: // C99 6.8.5.1: while-statement
+ return ParseWhileStatement(TrailingElseLoc);
+ case tok::kw_do: // C99 6.8.5.2: do-statement
+ Res = ParseDoStatement();
+ SemiError = "do/while";
+ break;
+ case tok::kw_for: // C99 6.8.5.3: for-statement
+ return ParseForStatement(TrailingElseLoc);
+
+ case tok::kw_goto: // C99 6.8.6.1: goto-statement
+ Res = ParseGotoStatement();
+ SemiError = "goto";
+ break;
+ case tok::kw_continue: // C99 6.8.6.2: continue-statement
+ Res = ParseContinueStatement();
+ SemiError = "continue";
+ break;
+ case tok::kw_break: // C99 6.8.6.3: break-statement
+ Res = ParseBreakStatement();
+ SemiError = "break";
+ break;
+ case tok::kw_return: // C99 6.8.6.4: return-statement
+ Res = ParseReturnStatement();
+ SemiError = "return";
+ break;
+ case tok::kw_co_return: // C++ Coroutines: co_return statement
+ Res = ParseReturnStatement();
+ SemiError = "co_return";
+ break;
+
+ case tok::kw_asm: {
+ ProhibitAttributes(Attrs);
+ bool msAsm = false;
+ Res = ParseAsmStatement(msAsm);
+ Res = Actions.ActOnFinishFullStmt(Res.get());
+ if (msAsm) return Res;
+ SemiError = "asm";
+ break;
+ }
+
+ case tok::kw___if_exists:
+ case tok::kw___if_not_exists:
+ ProhibitAttributes(Attrs);
+ ParseMicrosoftIfExistsStatement(Stmts);
+ // An __if_exists block is like a compound statement, but it doesn't create
+ // a new scope.
+ return StmtEmpty();
+
+ case tok::kw_try: // C++ 15: try-block
+ return ParseCXXTryBlock();
+
+ case tok::kw___try:
+ ProhibitAttributes(Attrs); // TODO: is it correct?
+ return ParseSEHTryBlock();
+
+ case tok::kw___leave:
+ Res = ParseSEHLeaveStatement();
+ SemiError = "__leave";
+ break;
+
+ case tok::annot_pragma_vis:
+ ProhibitAttributes(Attrs);
+ HandlePragmaVisibility();
+ return StmtEmpty();
+
+ case tok::annot_pragma_pack:
+ ProhibitAttributes(Attrs);
+ HandlePragmaPack();
+ return StmtEmpty();
+
+ case tok::annot_pragma_msstruct:
+ ProhibitAttributes(Attrs);
+ HandlePragmaMSStruct();
+ return StmtEmpty();
+
+ case tok::annot_pragma_align:
+ ProhibitAttributes(Attrs);
+ HandlePragmaAlign();
+ return StmtEmpty();
+
+ case tok::annot_pragma_weak:
+ ProhibitAttributes(Attrs);
+ HandlePragmaWeak();
+ return StmtEmpty();
+
+ case tok::annot_pragma_weakalias:
+ ProhibitAttributes(Attrs);
+ HandlePragmaWeakAlias();
+ return StmtEmpty();
+
+ case tok::annot_pragma_redefine_extname:
+ ProhibitAttributes(Attrs);
+ HandlePragmaRedefineExtname();
+ return StmtEmpty();
+
+ case tok::annot_pragma_fp_contract:
+ ProhibitAttributes(Attrs);
+ Diag(Tok, diag::err_pragma_fp_contract_scope);
+ ConsumeToken();
+ return StmtError();
+
+ case tok::annot_pragma_opencl_extension:
+ ProhibitAttributes(Attrs);
+ HandlePragmaOpenCLExtension();
+ return StmtEmpty();
+
+ case tok::annot_pragma_captured:
+ ProhibitAttributes(Attrs);
+ return HandlePragmaCaptured();
+
+ case tok::annot_pragma_openmp:
+ ProhibitAttributes(Attrs);
+ return ParseOpenMPDeclarativeOrExecutableDirective(!OnlyStatement);
+
+ case tok::annot_pragma_ms_pointers_to_members:
+ ProhibitAttributes(Attrs);
+ HandlePragmaMSPointersToMembers();
+ return StmtEmpty();
+
+ case tok::annot_pragma_ms_pragma:
+ ProhibitAttributes(Attrs);
+ HandlePragmaMSPragma();
+ return StmtEmpty();
+
+ case tok::annot_pragma_ms_vtordisp:
+ ProhibitAttributes(Attrs);
+ HandlePragmaMSVtorDisp();
+ return StmtEmpty();
+
+ case tok::annot_pragma_loop_hint:
+ ProhibitAttributes(Attrs);
+ return ParsePragmaLoopHint(Stmts, OnlyStatement, TrailingElseLoc, Attrs);
+ }
+
+ // If we reached this code, the statement must end in a semicolon.
+ if (!TryConsumeToken(tok::semi) && !Res.isInvalid()) {
+ // If the result was valid, then we do want to diagnose this. Use
+ // ExpectAndConsume to emit the diagnostic, even though we know it won't
+ // succeed.
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_after_stmt, SemiError);
+ // Skip until we see a } or ;, but don't eat it.
+ SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
+ }
+
+ return Res;
+}
+
+/// \brief Parse an expression statement.
+StmtResult Parser::ParseExprStatement() {
+ // If a case keyword is missing, this is where it should be inserted.
+ Token OldToken = Tok;
+
+ // expression[opt] ';'
+ ExprResult Expr(ParseExpression());
+ if (Expr.isInvalid()) {
+ // If the expression is invalid, skip ahead to the next semicolon or '}'.
+ // Not doing this opens us up to the possibility of infinite loops if
+ // ParseExpression does not consume any tokens.
+ SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ return Actions.ActOnExprStmtError();
+ }
+
+ if (Tok.is(tok::colon) && getCurScope()->isSwitchScope() &&
+ Actions.CheckCaseExpression(Expr.get())) {
+ // If a constant expression is followed by a colon inside a switch block,
+ // suggest a missing case keyword.
+ Diag(OldToken, diag::err_expected_case_before_expression)
+ << FixItHint::CreateInsertion(OldToken.getLocation(), "case ");
+
+ // Recover parsing as a case statement.
+ return ParseCaseStatement(/*MissingCase=*/true, Expr);
+ }
+
+ // Otherwise, eat the semicolon.
+ ExpectAndConsumeSemi(diag::err_expected_semi_after_expr);
+ return Actions.ActOnExprStmt(Expr);
+}
+
+/// ParseSEHTryBlockCommon
+///
+/// seh-try-block:
+/// '__try' compound-statement seh-handler
+///
+/// seh-handler:
+/// seh-except-block
+/// seh-finally-block
+///
+StmtResult Parser::ParseSEHTryBlock() {
+ assert(Tok.is(tok::kw___try) && "Expected '__try'");
+ SourceLocation TryLoc = ConsumeToken();
+
+ if (Tok.isNot(tok::l_brace))
+ return StmtError(Diag(Tok, diag::err_expected) << tok::l_brace);
+
+ StmtResult TryBlock(ParseCompoundStatement(/*isStmtExpr=*/false,
+ Scope::DeclScope | Scope::SEHTryScope));
+ if(TryBlock.isInvalid())
+ return TryBlock;
+
+ StmtResult Handler;
+ if (Tok.is(tok::identifier) &&
+ Tok.getIdentifierInfo() == getSEHExceptKeyword()) {
+ SourceLocation Loc = ConsumeToken();
+ Handler = ParseSEHExceptBlock(Loc);
+ } else if (Tok.is(tok::kw___finally)) {
+ SourceLocation Loc = ConsumeToken();
+ Handler = ParseSEHFinallyBlock(Loc);
+ } else {
+ return StmtError(Diag(Tok, diag::err_seh_expected_handler));
+ }
+
+ if(Handler.isInvalid())
+ return Handler;
+
+ return Actions.ActOnSEHTryBlock(false /* IsCXXTry */,
+ TryLoc,
+ TryBlock.get(),
+ Handler.get());
+}
+
+/// ParseSEHExceptBlock - Handle __except
+///
+/// seh-except-block:
+/// '__except' '(' seh-filter-expression ')' compound-statement
+///
+StmtResult Parser::ParseSEHExceptBlock(SourceLocation ExceptLoc) {
+ PoisonIdentifierRAIIObject raii(Ident__exception_code, false),
+ raii2(Ident___exception_code, false),
+ raii3(Ident_GetExceptionCode, false);
+
+ if (ExpectAndConsume(tok::l_paren))
+ return StmtError();
+
+ ParseScope ExpectScope(this, Scope::DeclScope | Scope::ControlScope |
+ Scope::SEHExceptScope);
+
+ if (getLangOpts().Borland) {
+ Ident__exception_info->setIsPoisoned(false);
+ Ident___exception_info->setIsPoisoned(false);
+ Ident_GetExceptionInfo->setIsPoisoned(false);
+ }
+
+ ExprResult FilterExpr;
+ {
+ ParseScopeFlags FilterScope(this, getCurScope()->getFlags() |
+ Scope::SEHFilterScope);
+ FilterExpr = Actions.CorrectDelayedTyposInExpr(ParseExpression());
+ }
+
+ if (getLangOpts().Borland) {
+ Ident__exception_info->setIsPoisoned(true);
+ Ident___exception_info->setIsPoisoned(true);
+ Ident_GetExceptionInfo->setIsPoisoned(true);
+ }
+
+ if(FilterExpr.isInvalid())
+ return StmtError();
+
+ if (ExpectAndConsume(tok::r_paren))
+ return StmtError();
+
+ if (Tok.isNot(tok::l_brace))
+ return StmtError(Diag(Tok, diag::err_expected) << tok::l_brace);
+
+ StmtResult Block(ParseCompoundStatement());
+
+ if(Block.isInvalid())
+ return Block;
+
+ return Actions.ActOnSEHExceptBlock(ExceptLoc, FilterExpr.get(), Block.get());
+}
+
+/// ParseSEHFinallyBlock - Handle __finally
+///
+/// seh-finally-block:
+/// '__finally' compound-statement
+///
+StmtResult Parser::ParseSEHFinallyBlock(SourceLocation FinallyLoc) {
+ PoisonIdentifierRAIIObject raii(Ident__abnormal_termination, false),
+ raii2(Ident___abnormal_termination, false),
+ raii3(Ident_AbnormalTermination, false);
+
+ if (Tok.isNot(tok::l_brace))
+ return StmtError(Diag(Tok, diag::err_expected) << tok::l_brace);
+
+ ParseScope FinallyScope(this, 0);
+ Actions.ActOnStartSEHFinallyBlock();
+
+ StmtResult Block(ParseCompoundStatement());
+ if(Block.isInvalid()) {
+ Actions.ActOnAbortSEHFinallyBlock();
+ return Block;
+ }
+
+ return Actions.ActOnFinishSEHFinallyBlock(FinallyLoc, Block.get());
+}
+
+/// Handle __leave
+///
+/// seh-leave-statement:
+/// '__leave' ';'
+///
+StmtResult Parser::ParseSEHLeaveStatement() {
+ SourceLocation LeaveLoc = ConsumeToken(); // eat the '__leave'.
+ return Actions.ActOnSEHLeaveStmt(LeaveLoc, getCurScope());
+}
+
+/// ParseLabeledStatement - We have an identifier and a ':' after it.
+///
+/// labeled-statement:
+/// identifier ':' statement
+/// [GNU] identifier ':' attributes[opt] statement
+///
+StmtResult Parser::ParseLabeledStatement(ParsedAttributesWithRange &attrs) {
+ assert(Tok.is(tok::identifier) && Tok.getIdentifierInfo() &&
+ "Not an identifier!");
+
+ Token IdentTok = Tok; // Save the whole token.
+ ConsumeToken(); // eat the identifier.
+
+ assert(Tok.is(tok::colon) && "Not a label!");
+
+ // identifier ':' statement
+ SourceLocation ColonLoc = ConsumeToken();
+
+ // Read label attributes, if present.
+ StmtResult SubStmt;
+ if (Tok.is(tok::kw___attribute)) {
+ ParsedAttributesWithRange TempAttrs(AttrFactory);
+ ParseGNUAttributes(TempAttrs);
+
+ // In C++, GNU attributes only apply to the label if they are followed by a
+ // semicolon, to disambiguate label attributes from attributes on a labeled
+ // declaration.
+ //
+ // This doesn't quite match what GCC does; if the attribute list is empty
+ // and followed by a semicolon, GCC will reject (it appears to parse the
+ // attributes as part of a statement in that case). That looks like a bug.
+ if (!getLangOpts().CPlusPlus || Tok.is(tok::semi))
+ attrs.takeAllFrom(TempAttrs);
+ else if (isDeclarationStatement()) {
+ StmtVector Stmts;
+ // FIXME: We should do this whether or not we have a declaration
+ // statement, but that doesn't work correctly (because ProhibitAttributes
+ // can't handle GNU attributes), so only call it in the one case where
+ // GNU attributes are allowed.
+ SubStmt = ParseStatementOrDeclarationAfterAttributes(
+ Stmts, /*OnlyStmts*/ true, nullptr, TempAttrs);
+ if (!TempAttrs.empty() && !SubStmt.isInvalid())
+ SubStmt = Actions.ProcessStmtAttributes(
+ SubStmt.get(), TempAttrs.getList(), TempAttrs.Range);
+ } else {
+ Diag(Tok, diag::err_expected_after) << "__attribute__" << tok::semi;
+ }
+ }
+
+ // If we've not parsed a statement yet, parse one now.
+ if (!SubStmt.isInvalid() && !SubStmt.isUsable())
+ SubStmt = ParseStatement();
+
+ // Broken substmt shouldn't prevent the label from being added to the AST.
+ if (SubStmt.isInvalid())
+ SubStmt = Actions.ActOnNullStmt(ColonLoc);
+
+ LabelDecl *LD = Actions.LookupOrCreateLabel(IdentTok.getIdentifierInfo(),
+ IdentTok.getLocation());
+ if (AttributeList *Attrs = attrs.getList()) {
+ Actions.ProcessDeclAttributeList(Actions.CurScope, LD, Attrs);
+ attrs.clear();
+ }
+
+ return Actions.ActOnLabelStmt(IdentTok.getLocation(), LD, ColonLoc,
+ SubStmt.get());
+}
+
+/// ParseCaseStatement
+/// labeled-statement:
+/// 'case' constant-expression ':' statement
+/// [GNU] 'case' constant-expression '...' constant-expression ':' statement
+///
+StmtResult Parser::ParseCaseStatement(bool MissingCase, ExprResult Expr) {
+ assert((MissingCase || Tok.is(tok::kw_case)) && "Not a case stmt!");
+
+ // It is very very common for code to contain many case statements recursively
+ // nested, as in (but usually without indentation):
+ // case 1:
+ // case 2:
+ // case 3:
+ // case 4:
+ // case 5: etc.
+ //
+ // Parsing this naively works, but is both inefficient and can cause us to run
+ // out of stack space in our recursive descent parser. As a special case,
+ // flatten this recursion into an iterative loop. This is complex and gross,
+ // but all the grossness is constrained to ParseCaseStatement (and some
+ // weirdness in the actions), so this is just local grossness :).
+
+ // TopLevelCase - This is the highest level we have parsed. 'case 1' in the
+ // example above.
+ StmtResult TopLevelCase(true);
+
+ // DeepestParsedCaseStmt - This is the deepest statement we have parsed, which
+ // gets updated each time a new case is parsed, and whose body is unset so
+ // far. When parsing 'case 4', this is the 'case 3' node.
+ Stmt *DeepestParsedCaseStmt = nullptr;
+
+ // While we have case statements, eat and stack them.
+ SourceLocation ColonLoc;
+ do {
+ SourceLocation CaseLoc = MissingCase ? Expr.get()->getExprLoc() :
+ ConsumeToken(); // eat the 'case'.
+ ColonLoc = SourceLocation();
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteCase(getCurScope());
+ cutOffParsing();
+ return StmtError();
+ }
+
+ /// We don't want to treat 'case x : y' as a potential typo for 'case x::y'.
+ /// Disable this form of error recovery while we're parsing the case
+ /// expression.
+ ColonProtectionRAIIObject ColonProtection(*this);
+
+ ExprResult LHS;
+ if (!MissingCase) {
+ LHS = ParseConstantExpression();
+ if (!getLangOpts().CPlusPlus11) {
+ LHS = Actions.CorrectDelayedTyposInExpr(LHS, [this](class Expr *E) {
+ return Actions.VerifyIntegerConstantExpression(E);
+ });
+ }
+ if (LHS.isInvalid()) {
+ // If constant-expression is parsed unsuccessfully, recover by skipping
+ // current case statement (moving to the colon that ends it).
+ if (SkipUntil(tok::colon, tok::r_brace, StopAtSemi | StopBeforeMatch)) {
+ TryConsumeToken(tok::colon, ColonLoc);
+ continue;
+ }
+ return StmtError();
+ }
+ } else {
+ LHS = Expr;
+ MissingCase = false;
+ }
+
+ // GNU case range extension.
+ SourceLocation DotDotDotLoc;
+ ExprResult RHS;
+ if (TryConsumeToken(tok::ellipsis, DotDotDotLoc)) {
+ Diag(DotDotDotLoc, diag::ext_gnu_case_range);
+ RHS = ParseConstantExpression();
+ if (RHS.isInvalid()) {
+ if (SkipUntil(tok::colon, tok::r_brace, StopAtSemi | StopBeforeMatch)) {
+ TryConsumeToken(tok::colon, ColonLoc);
+ continue;
+ }
+ return StmtError();
+ }
+ }
+
+ ColonProtection.restore();
+
+ if (TryConsumeToken(tok::colon, ColonLoc)) {
+ } else if (TryConsumeToken(tok::semi, ColonLoc) ||
+ TryConsumeToken(tok::coloncolon, ColonLoc)) {
+ // Treat "case blah;" or "case blah::" as a typo for "case blah:".
+ Diag(ColonLoc, diag::err_expected_after)
+ << "'case'" << tok::colon
+ << FixItHint::CreateReplacement(ColonLoc, ":");
+ } else {
+ SourceLocation ExpectedLoc = PP.getLocForEndOfToken(PrevTokLocation);
+ Diag(ExpectedLoc, diag::err_expected_after)
+ << "'case'" << tok::colon
+ << FixItHint::CreateInsertion(ExpectedLoc, ":");
+ ColonLoc = ExpectedLoc;
+ }
+
+ StmtResult Case =
+ Actions.ActOnCaseStmt(CaseLoc, LHS.get(), DotDotDotLoc,
+ RHS.get(), ColonLoc);
+
+ // If we had a sema error parsing this case, then just ignore it and
+ // continue parsing the sub-stmt.
+ if (Case.isInvalid()) {
+ if (TopLevelCase.isInvalid()) // No parsed case stmts.
+ return ParseStatement();
+ // Otherwise, just don't add it as a nested case.
+ } else {
+ // If this is the first case statement we parsed, it becomes TopLevelCase.
+ // Otherwise we link it into the current chain.
+ Stmt *NextDeepest = Case.get();
+ if (TopLevelCase.isInvalid())
+ TopLevelCase = Case;
+ else
+ Actions.ActOnCaseStmtBody(DeepestParsedCaseStmt, Case.get());
+ DeepestParsedCaseStmt = NextDeepest;
+ }
+
+ // Handle all case statements.
+ } while (Tok.is(tok::kw_case));
+
+ // If we found a non-case statement, start by parsing it.
+ StmtResult SubStmt;
+
+ if (Tok.isNot(tok::r_brace)) {
+ SubStmt = ParseStatement();
+ } else {
+ // Nicely diagnose the common error "switch (X) { case 4: }", which is
+ // not valid. If ColonLoc doesn't point to a valid text location, there was
+ // another parsing error, so avoid producing extra diagnostics.
+ if (ColonLoc.isValid()) {
+ SourceLocation AfterColonLoc = PP.getLocForEndOfToken(ColonLoc);
+ Diag(AfterColonLoc, diag::err_label_end_of_compound_statement)
+ << FixItHint::CreateInsertion(AfterColonLoc, " ;");
+ }
+ SubStmt = StmtError();
+ }
+
+ // Install the body into the most deeply-nested case.
+ if (DeepestParsedCaseStmt) {
+ // Broken sub-stmt shouldn't prevent forming the case statement properly.
+ if (SubStmt.isInvalid())
+ SubStmt = Actions.ActOnNullStmt(SourceLocation());
+ Actions.ActOnCaseStmtBody(DeepestParsedCaseStmt, SubStmt.get());
+ }
+
+ // Return the top level parsed statement tree.
+ return TopLevelCase;
+}
+
+/// ParseDefaultStatement
+/// labeled-statement:
+/// 'default' ':' statement
+/// Note that this does not parse the 'statement' at the end.
+///
+StmtResult Parser::ParseDefaultStatement() {
+ assert(Tok.is(tok::kw_default) && "Not a default stmt!");
+ SourceLocation DefaultLoc = ConsumeToken(); // eat the 'default'.
+
+ SourceLocation ColonLoc;
+ if (TryConsumeToken(tok::colon, ColonLoc)) {
+ } else if (TryConsumeToken(tok::semi, ColonLoc)) {
+ // Treat "default;" as a typo for "default:".
+ Diag(ColonLoc, diag::err_expected_after)
+ << "'default'" << tok::colon
+ << FixItHint::CreateReplacement(ColonLoc, ":");
+ } else {
+ SourceLocation ExpectedLoc = PP.getLocForEndOfToken(PrevTokLocation);
+ Diag(ExpectedLoc, diag::err_expected_after)
+ << "'default'" << tok::colon
+ << FixItHint::CreateInsertion(ExpectedLoc, ":");
+ ColonLoc = ExpectedLoc;
+ }
+
+ StmtResult SubStmt;
+
+ if (Tok.isNot(tok::r_brace)) {
+ SubStmt = ParseStatement();
+ } else {
+ // Diagnose the common error "switch (X) {... default: }", which is
+ // not valid.
+ SourceLocation AfterColonLoc = PP.getLocForEndOfToken(ColonLoc);
+ Diag(AfterColonLoc, diag::err_label_end_of_compound_statement)
+ << FixItHint::CreateInsertion(AfterColonLoc, " ;");
+ SubStmt = true;
+ }
+
+ // Broken sub-stmt shouldn't prevent forming the case statement properly.
+ if (SubStmt.isInvalid())
+ SubStmt = Actions.ActOnNullStmt(ColonLoc);
+
+ return Actions.ActOnDefaultStmt(DefaultLoc, ColonLoc,
+ SubStmt.get(), getCurScope());
+}
+
+StmtResult Parser::ParseCompoundStatement(bool isStmtExpr) {
+ return ParseCompoundStatement(isStmtExpr, Scope::DeclScope);
+}
+
+/// ParseCompoundStatement - Parse a "{}" block.
+///
+/// compound-statement: [C99 6.8.2]
+/// { block-item-list[opt] }
+/// [GNU] { label-declarations block-item-list } [TODO]
+///
+/// block-item-list:
+/// block-item
+/// block-item-list block-item
+///
+/// block-item:
+/// declaration
+/// [GNU] '__extension__' declaration
+/// statement
+///
+/// [GNU] label-declarations:
+/// [GNU] label-declaration
+/// [GNU] label-declarations label-declaration
+///
+/// [GNU] label-declaration:
+/// [GNU] '__label__' identifier-list ';'
+///
+StmtResult Parser::ParseCompoundStatement(bool isStmtExpr,
+ unsigned ScopeFlags) {
+ assert(Tok.is(tok::l_brace) && "Not a compount stmt!");
+
+ // Enter a scope to hold everything within the compound stmt. Compound
+ // statements can always hold declarations.
+ ParseScope CompoundScope(this, ScopeFlags);
+
+ // Parse the statements in the body.
+ return ParseCompoundStatementBody(isStmtExpr);
+}
+
+/// Parse any pragmas at the start of the compound expression. We handle these
+/// separately since some pragmas (FP_CONTRACT) must appear before any C
+/// statement in the compound, but may be intermingled with other pragmas.
+void Parser::ParseCompoundStatementLeadingPragmas() {
+ bool checkForPragmas = true;
+ while (checkForPragmas) {
+ switch (Tok.getKind()) {
+ case tok::annot_pragma_vis:
+ HandlePragmaVisibility();
+ break;
+ case tok::annot_pragma_pack:
+ HandlePragmaPack();
+ break;
+ case tok::annot_pragma_msstruct:
+ HandlePragmaMSStruct();
+ break;
+ case tok::annot_pragma_align:
+ HandlePragmaAlign();
+ break;
+ case tok::annot_pragma_weak:
+ HandlePragmaWeak();
+ break;
+ case tok::annot_pragma_weakalias:
+ HandlePragmaWeakAlias();
+ break;
+ case tok::annot_pragma_redefine_extname:
+ HandlePragmaRedefineExtname();
+ break;
+ case tok::annot_pragma_opencl_extension:
+ HandlePragmaOpenCLExtension();
+ break;
+ case tok::annot_pragma_fp_contract:
+ HandlePragmaFPContract();
+ break;
+ case tok::annot_pragma_ms_pointers_to_members:
+ HandlePragmaMSPointersToMembers();
+ break;
+ case tok::annot_pragma_ms_pragma:
+ HandlePragmaMSPragma();
+ break;
+ case tok::annot_pragma_ms_vtordisp:
+ HandlePragmaMSVtorDisp();
+ break;
+ default:
+ checkForPragmas = false;
+ break;
+ }
+ }
+
+}
+
+/// ParseCompoundStatementBody - Parse a sequence of statements and invoke the
+/// ActOnCompoundStmt action. This expects the '{' to be the current token, and
+/// consume the '}' at the end of the block. It does not manipulate the scope
+/// stack.
+StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
+ PrettyStackTraceLoc CrashInfo(PP.getSourceManager(),
+ Tok.getLocation(),
+ "in compound statement ('{}')");
+
+ // Record the state of the FP_CONTRACT pragma, restore on leaving the
+ // compound statement.
+ Sema::FPContractStateRAII SaveFPContractState(Actions);
+
+ InMessageExpressionRAIIObject InMessage(*this, false);
+ BalancedDelimiterTracker T(*this, tok::l_brace);
+ if (T.consumeOpen())
+ return StmtError();
+
+ Sema::CompoundScopeRAII CompoundScope(Actions);
+
+ // Parse any pragmas at the beginning of the compound statement.
+ ParseCompoundStatementLeadingPragmas();
+
+ StmtVector Stmts;
+
+ // "__label__ X, Y, Z;" is the GNU "Local Label" extension. These are
+ // only allowed at the start of a compound stmt regardless of the language.
+ while (Tok.is(tok::kw___label__)) {
+ SourceLocation LabelLoc = ConsumeToken();
+
+ SmallVector<Decl *, 8> DeclsInGroup;
+ while (1) {
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected) << tok::identifier;
+ break;
+ }
+
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ SourceLocation IdLoc = ConsumeToken();
+ DeclsInGroup.push_back(Actions.LookupOrCreateLabel(II, IdLoc, LabelLoc));
+
+ if (!TryConsumeToken(tok::comma))
+ break;
+ }
+
+ DeclSpec DS(AttrFactory);
+ DeclGroupPtrTy Res =
+ Actions.FinalizeDeclaratorGroup(getCurScope(), DS, DeclsInGroup);
+ StmtResult R = Actions.ActOnDeclStmt(Res, LabelLoc, Tok.getLocation());
+
+ ExpectAndConsumeSemi(diag::err_expected_semi_declaration);
+ if (R.isUsable())
+ Stmts.push_back(R.get());
+ }
+
+ while (!tryParseMisplacedModuleImport() && Tok.isNot(tok::r_brace) &&
+ Tok.isNot(tok::eof)) {
+ if (Tok.is(tok::annot_pragma_unused)) {
+ HandlePragmaUnused();
+ continue;
+ }
+
+ StmtResult R;
+ if (Tok.isNot(tok::kw___extension__)) {
+ R = ParseStatementOrDeclaration(Stmts, false);
+ } else {
+ // __extension__ can start declarations and it can also be a unary
+ // operator for expressions. Consume multiple __extension__ markers here
+ // until we can determine which is which.
+ // FIXME: This loses extension expressions in the AST!
+ SourceLocation ExtLoc = ConsumeToken();
+ while (Tok.is(tok::kw___extension__))
+ ConsumeToken();
+
+ ParsedAttributesWithRange attrs(AttrFactory);
+ MaybeParseCXX11Attributes(attrs, nullptr,
+ /*MightBeObjCMessageSend*/ true);
+
+ // If this is the start of a declaration, parse it as such.
+ if (isDeclarationStatement()) {
+ // __extension__ silences extension warnings in the subdeclaration.
+ // FIXME: Save the __extension__ on the decl as a node somehow?
+ ExtensionRAIIObject O(Diags);
+
+ SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
+ DeclGroupPtrTy Res = ParseDeclaration(Declarator::BlockContext, DeclEnd,
+ attrs);
+ R = Actions.ActOnDeclStmt(Res, DeclStart, DeclEnd);
+ } else {
+ // Otherwise this was a unary __extension__ marker.
+ ExprResult Res(ParseExpressionWithLeadingExtension(ExtLoc));
+
+ if (Res.isInvalid()) {
+ SkipUntil(tok::semi);
+ continue;
+ }
+
+ // FIXME: Use attributes?
+ // Eat the semicolon at the end of stmt and convert the expr into a
+ // statement.
+ ExpectAndConsumeSemi(diag::err_expected_semi_after_expr);
+ R = Actions.ActOnExprStmt(Res);
+ }
+ }
+
+ if (R.isUsable())
+ Stmts.push_back(R.get());
+ }
+
+ SourceLocation CloseLoc = Tok.getLocation();
+
+ // We broke out of the while loop because we found a '}' or EOF.
+ if (!T.consumeClose())
+ // Recover by creating a compound statement with what we parsed so far,
+ // instead of dropping everything and returning StmtError();
+ CloseLoc = T.getCloseLocation();
+
+ return Actions.ActOnCompoundStmt(T.getOpenLocation(), CloseLoc,
+ Stmts, isStmtExpr);
+}
+
+/// ParseParenExprOrCondition:
+/// [C ] '(' expression ')'
+/// [C++] '(' condition ')' [not allowed if OnlyAllowCondition=true]
+///
+/// This function parses and performs error recovery on the specified condition
+/// or expression (depending on whether we're in C++ or C mode). This function
+/// goes out of its way to recover well. It returns true if there was a parser
+/// error (the right paren couldn't be found), which indicates that the caller
+/// should try to recover harder. It returns false if the condition is
+/// successfully parsed. Note that a successful parse can still have semantic
+/// errors in the condition.
+bool Parser::ParseParenExprOrCondition(ExprResult &ExprResult,
+ Decl *&DeclResult,
+ SourceLocation Loc,
+ bool ConvertToBoolean) {
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ if (getLangOpts().CPlusPlus)
+ ParseCXXCondition(ExprResult, DeclResult, Loc, ConvertToBoolean);
+ else {
+ ExprResult = ParseExpression();
+ DeclResult = nullptr;
+
+ // If required, convert to a boolean value.
+ if (!ExprResult.isInvalid() && ConvertToBoolean)
+ ExprResult
+ = Actions.ActOnBooleanCondition(getCurScope(), Loc, ExprResult.get());
+ }
+
+ // If the parser was confused by the condition and we don't have a ')', try to
+ // recover by skipping ahead to a semi and bailing out. If condexp is
+ // semantically invalid but we have well formed code, keep going.
+ if (ExprResult.isInvalid() && !DeclResult && Tok.isNot(tok::r_paren)) {
+ SkipUntil(tok::semi);
+ // Skipping may have stopped if it found the containing ')'. If so, we can
+ // continue parsing the if statement.
+ if (Tok.isNot(tok::r_paren))
+ return true;
+ }
+
+ // Otherwise the condition is valid or the rparen is present.
+ T.consumeClose();
+
+ // Check for extraneous ')'s to catch things like "if (foo())) {". We know
+ // that all callers are looking for a statement after the condition, so ")"
+ // isn't valid.
+ while (Tok.is(tok::r_paren)) {
+ Diag(Tok, diag::err_extraneous_rparen_in_condition)
+ << FixItHint::CreateRemoval(Tok.getLocation());
+ ConsumeParen();
+ }
+
+ return false;
+}
+
+
+/// ParseIfStatement
+/// if-statement: [C99 6.8.4.1]
+/// 'if' '(' expression ')' statement
+/// 'if' '(' expression ')' statement 'else' statement
+/// [C++] 'if' '(' condition ')' statement
+/// [C++] 'if' '(' condition ')' statement 'else' statement
+///
+StmtResult Parser::ParseIfStatement(SourceLocation *TrailingElseLoc) {
+ assert(Tok.is(tok::kw_if) && "Not an if stmt!");
+ SourceLocation IfLoc = ConsumeToken(); // eat the 'if'.
+
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::err_expected_lparen_after) << "if";
+ SkipUntil(tok::semi);
+ return StmtError();
+ }
+
+ bool C99orCXX = getLangOpts().C99 || getLangOpts().CPlusPlus;
+
+ // C99 6.8.4p3 - In C99, the if statement is a block. This is not
+ // the case for C90.
+ //
+ // C++ 6.4p3:
+ // A name introduced by a declaration in a condition is in scope from its
+ // point of declaration until the end of the substatements controlled by the
+ // condition.
+ // C++ 3.3.2p4:
+ // Names declared in the for-init-statement, and in the condition of if,
+ // while, for, and switch statements are local to the if, while, for, or
+ // switch statement (including the controlled statement).
+ //
+ ParseScope IfScope(this, Scope::DeclScope | Scope::ControlScope, C99orCXX);
+
+ // Parse the condition.
+ ExprResult CondExp;
+ Decl *CondVar = nullptr;
+ if (ParseParenExprOrCondition(CondExp, CondVar, IfLoc, true))
+ return StmtError();
+
+ FullExprArg FullCondExp(Actions.MakeFullExpr(CondExp.get(), IfLoc));
+
+ // C99 6.8.4p3 - In C99, the body of the if statement is a scope, even if
+ // there is no compound stmt. C90 does not have this clause. We only do this
+ // if the body isn't a compound statement to avoid push/pop in common cases.
+ //
+ // C++ 6.4p1:
+ // The substatement in a selection-statement (each substatement, in the else
+ // form of the if statement) implicitly defines a local scope.
+ //
+ // For C++ we create a scope for the condition and a new scope for
+ // substatements because:
+ // -When the 'then' scope exits, we want the condition declaration to still be
+ // active for the 'else' scope too.
+ // -Sema will detect name clashes by considering declarations of a
+ // 'ControlScope' as part of its direct subscope.
+ // -If we wanted the condition and substatement to be in the same scope, we
+ // would have to notify ParseStatement not to create a new scope. It's
+ // simpler to let it create a new scope.
+ //
+ ParseScope InnerScope(this, Scope::DeclScope, C99orCXX, Tok.is(tok::l_brace));
+
+ // Read the 'then' stmt.
+ SourceLocation ThenStmtLoc = Tok.getLocation();
+
+ SourceLocation InnerStatementTrailingElseLoc;
+ StmtResult ThenStmt(ParseStatement(&InnerStatementTrailingElseLoc));
+
+ // Pop the 'if' scope if needed.
+ InnerScope.Exit();
+
+ // If it has an else, parse it.
+ SourceLocation ElseLoc;
+ SourceLocation ElseStmtLoc;
+ StmtResult ElseStmt;
+
+ if (Tok.is(tok::kw_else)) {
+ if (TrailingElseLoc)
+ *TrailingElseLoc = Tok.getLocation();
+
+ ElseLoc = ConsumeToken();
+ ElseStmtLoc = Tok.getLocation();
+
+ // C99 6.8.4p3 - In C99, the body of the if statement is a scope, even if
+ // there is no compound stmt. C90 does not have this clause. We only do
+ // this if the body isn't a compound statement to avoid push/pop in common
+ // cases.
+ //
+ // C++ 6.4p1:
+ // The substatement in a selection-statement (each substatement, in the else
+ // form of the if statement) implicitly defines a local scope.
+ //
+ ParseScope InnerScope(this, Scope::DeclScope, C99orCXX, Tok.is(tok::l_brace));
+
+ ElseStmt = ParseStatement();
+
+ // Pop the 'else' scope if needed.
+ InnerScope.Exit();
+ } else if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteAfterIf(getCurScope());
+ cutOffParsing();
+ return StmtError();
+ } else if (InnerStatementTrailingElseLoc.isValid()) {
+ Diag(InnerStatementTrailingElseLoc, diag::warn_dangling_else);
+ }
+
+ IfScope.Exit();
+
+ // If the then or else stmt is invalid and the other is valid (and present),
+ // make turn the invalid one into a null stmt to avoid dropping the other
+ // part. If both are invalid, return error.
+ if ((ThenStmt.isInvalid() && ElseStmt.isInvalid()) ||
+ (ThenStmt.isInvalid() && ElseStmt.get() == nullptr) ||
+ (ThenStmt.get() == nullptr && ElseStmt.isInvalid())) {
+ // Both invalid, or one is invalid and other is non-present: return error.
+ return StmtError();
+ }
+
+ // Now if either are invalid, replace with a ';'.
+ if (ThenStmt.isInvalid())
+ ThenStmt = Actions.ActOnNullStmt(ThenStmtLoc);
+ if (ElseStmt.isInvalid())
+ ElseStmt = Actions.ActOnNullStmt(ElseStmtLoc);
+
+ return Actions.ActOnIfStmt(IfLoc, FullCondExp, CondVar, ThenStmt.get(),
+ ElseLoc, ElseStmt.get());
+}
+
+/// ParseSwitchStatement
+/// switch-statement:
+/// 'switch' '(' expression ')' statement
+/// [C++] 'switch' '(' condition ')' statement
+StmtResult Parser::ParseSwitchStatement(SourceLocation *TrailingElseLoc) {
+ assert(Tok.is(tok::kw_switch) && "Not a switch stmt!");
+ SourceLocation SwitchLoc = ConsumeToken(); // eat the 'switch'.
+
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::err_expected_lparen_after) << "switch";
+ SkipUntil(tok::semi);
+ return StmtError();
+ }
+
+ bool C99orCXX = getLangOpts().C99 || getLangOpts().CPlusPlus;
+
+ // C99 6.8.4p3 - In C99, the switch statement is a block. This is
+ // not the case for C90. Start the switch scope.
+ //
+ // C++ 6.4p3:
+ // A name introduced by a declaration in a condition is in scope from its
+ // point of declaration until the end of the substatements controlled by the
+ // condition.
+ // C++ 3.3.2p4:
+ // Names declared in the for-init-statement, and in the condition of if,
+ // while, for, and switch statements are local to the if, while, for, or
+ // switch statement (including the controlled statement).
+ //
+ unsigned ScopeFlags = Scope::SwitchScope;
+ if (C99orCXX)
+ ScopeFlags |= Scope::DeclScope | Scope::ControlScope;
+ ParseScope SwitchScope(this, ScopeFlags);
+
+ // Parse the condition.
+ ExprResult Cond;
+ Decl *CondVar = nullptr;
+ if (ParseParenExprOrCondition(Cond, CondVar, SwitchLoc, false))
+ return StmtError();
+
+ StmtResult Switch
+ = Actions.ActOnStartOfSwitchStmt(SwitchLoc, Cond.get(), CondVar);
+
+ if (Switch.isInvalid()) {
+ // Skip the switch body.
+ // FIXME: This is not optimal recovery, but parsing the body is more
+ // dangerous due to the presence of case and default statements, which
+ // will have no place to connect back with the switch.
+ if (Tok.is(tok::l_brace)) {
+ ConsumeBrace();
+ SkipUntil(tok::r_brace);
+ } else
+ SkipUntil(tok::semi);
+ return Switch;
+ }
+
+ // C99 6.8.4p3 - In C99, the body of the switch statement is a scope, even if
+ // there is no compound stmt. C90 does not have this clause. We only do this
+ // if the body isn't a compound statement to avoid push/pop in common cases.
+ //
+ // C++ 6.4p1:
+ // The substatement in a selection-statement (each substatement, in the else
+ // form of the if statement) implicitly defines a local scope.
+ //
+ // See comments in ParseIfStatement for why we create a scope for the
+ // condition and a new scope for substatement in C++.
+ //
+ getCurScope()->AddFlags(Scope::BreakScope);
+ ParseScope InnerScope(this, Scope::DeclScope, C99orCXX, Tok.is(tok::l_brace));
+
+ // We have incremented the mangling number for the SwitchScope and the
+ // InnerScope, which is one too many.
+ if (C99orCXX)
+ getCurScope()->decrementMSManglingNumber();
+
+ // Read the body statement.
+ StmtResult Body(ParseStatement(TrailingElseLoc));
+
+ // Pop the scopes.
+ InnerScope.Exit();
+ SwitchScope.Exit();
+
+ return Actions.ActOnFinishSwitchStmt(SwitchLoc, Switch.get(), Body.get());
+}
+
+/// ParseWhileStatement
+/// while-statement: [C99 6.8.5.1]
+/// 'while' '(' expression ')' statement
+/// [C++] 'while' '(' condition ')' statement
+StmtResult Parser::ParseWhileStatement(SourceLocation *TrailingElseLoc) {
+ assert(Tok.is(tok::kw_while) && "Not a while stmt!");
+ SourceLocation WhileLoc = Tok.getLocation();
+ ConsumeToken(); // eat the 'while'.
+
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::err_expected_lparen_after) << "while";
+ SkipUntil(tok::semi);
+ return StmtError();
+ }
+
+ bool C99orCXX = getLangOpts().C99 || getLangOpts().CPlusPlus;
+
+ // C99 6.8.5p5 - In C99, the while statement is a block. This is not
+ // the case for C90. Start the loop scope.
+ //
+ // C++ 6.4p3:
+ // A name introduced by a declaration in a condition is in scope from its
+ // point of declaration until the end of the substatements controlled by the
+ // condition.
+ // C++ 3.3.2p4:
+ // Names declared in the for-init-statement, and in the condition of if,
+ // while, for, and switch statements are local to the if, while, for, or
+ // switch statement (including the controlled statement).
+ //
+ unsigned ScopeFlags;
+ if (C99orCXX)
+ ScopeFlags = Scope::BreakScope | Scope::ContinueScope |
+ Scope::DeclScope | Scope::ControlScope;
+ else
+ ScopeFlags = Scope::BreakScope | Scope::ContinueScope;
+ ParseScope WhileScope(this, ScopeFlags);
+
+ // Parse the condition.
+ ExprResult Cond;
+ Decl *CondVar = nullptr;
+ if (ParseParenExprOrCondition(Cond, CondVar, WhileLoc, true))
+ return StmtError();
+
+ FullExprArg FullCond(Actions.MakeFullExpr(Cond.get(), WhileLoc));
+
+ // C99 6.8.5p5 - In C99, the body of the while statement is a scope, even if
+ // there is no compound stmt. C90 does not have this clause. We only do this
+ // if the body isn't a compound statement to avoid push/pop in common cases.
+ //
+ // C++ 6.5p2:
+ // The substatement in an iteration-statement implicitly defines a local scope
+ // which is entered and exited each time through the loop.
+ //
+ // See comments in ParseIfStatement for why we create a scope for the
+ // condition and a new scope for substatement in C++.
+ //
+ ParseScope InnerScope(this, Scope::DeclScope, C99orCXX, Tok.is(tok::l_brace));
+
+ // Read the body statement.
+ StmtResult Body(ParseStatement(TrailingElseLoc));
+
+ // Pop the body scope if needed.
+ InnerScope.Exit();
+ WhileScope.Exit();
+
+ if ((Cond.isInvalid() && !CondVar) || Body.isInvalid())
+ return StmtError();
+
+ return Actions.ActOnWhileStmt(WhileLoc, FullCond, CondVar, Body.get());
+}
+
+/// ParseDoStatement
+/// do-statement: [C99 6.8.5.2]
+/// 'do' statement 'while' '(' expression ')' ';'
+/// Note: this lets the caller parse the end ';'.
+StmtResult Parser::ParseDoStatement() {
+ assert(Tok.is(tok::kw_do) && "Not a do stmt!");
+ SourceLocation DoLoc = ConsumeToken(); // eat the 'do'.
+
+ // C99 6.8.5p5 - In C99, the do statement is a block. This is not
+ // the case for C90. Start the loop scope.
+ unsigned ScopeFlags;
+ if (getLangOpts().C99)
+ ScopeFlags = Scope::BreakScope | Scope::ContinueScope | Scope::DeclScope;
+ else
+ ScopeFlags = Scope::BreakScope | Scope::ContinueScope;
+
+ ParseScope DoScope(this, ScopeFlags);
+
+ // C99 6.8.5p5 - In C99, the body of the do statement is a scope, even if
+ // there is no compound stmt. C90 does not have this clause. We only do this
+ // if the body isn't a compound statement to avoid push/pop in common cases.
+ //
+ // C++ 6.5p2:
+ // The substatement in an iteration-statement implicitly defines a local scope
+ // which is entered and exited each time through the loop.
+ //
+ bool C99orCXX = getLangOpts().C99 || getLangOpts().CPlusPlus;
+ ParseScope InnerScope(this, Scope::DeclScope, C99orCXX, Tok.is(tok::l_brace));
+
+ // Read the body statement.
+ StmtResult Body(ParseStatement());
+
+ // Pop the body scope if needed.
+ InnerScope.Exit();
+
+ if (Tok.isNot(tok::kw_while)) {
+ if (!Body.isInvalid()) {
+ Diag(Tok, diag::err_expected_while);
+ Diag(DoLoc, diag::note_matching) << "'do'";
+ SkipUntil(tok::semi, StopBeforeMatch);
+ }
+ return StmtError();
+ }
+ SourceLocation WhileLoc = ConsumeToken();
+
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::err_expected_lparen_after) << "do/while";
+ SkipUntil(tok::semi, StopBeforeMatch);
+ return StmtError();
+ }
+
+ // Parse the parenthesized expression.
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ // A do-while expression is not a condition, so can't have attributes.
+ DiagnoseAndSkipCXX11Attributes();
+
+ ExprResult Cond = ParseExpression();
+ T.consumeClose();
+ DoScope.Exit();
+
+ if (Cond.isInvalid() || Body.isInvalid())
+ return StmtError();
+
+ return Actions.ActOnDoStmt(DoLoc, Body.get(), WhileLoc, T.getOpenLocation(),
+ Cond.get(), T.getCloseLocation());
+}
+
+bool Parser::isForRangeIdentifier() {
+ assert(Tok.is(tok::identifier));
+
+ const Token &Next = NextToken();
+ if (Next.is(tok::colon))
+ return true;
+
+ if (Next.isOneOf(tok::l_square, tok::kw_alignas)) {
+ TentativeParsingAction PA(*this);
+ ConsumeToken();
+ SkipCXX11Attributes();
+ bool Result = Tok.is(tok::colon);
+ PA.Revert();
+ return Result;
+ }
+
+ return false;
+}
+
+/// ParseForStatement
+/// for-statement: [C99 6.8.5.3]
+/// 'for' '(' expr[opt] ';' expr[opt] ';' expr[opt] ')' statement
+/// 'for' '(' declaration expr[opt] ';' expr[opt] ')' statement
+/// [C++] 'for' '(' for-init-statement condition[opt] ';' expression[opt] ')'
+/// [C++] statement
+/// [C++0x] 'for'
+/// 'co_await'[opt] [Coroutines]
+/// '(' for-range-declaration ':' for-range-initializer ')'
+/// statement
+/// [OBJC2] 'for' '(' declaration 'in' expr ')' statement
+/// [OBJC2] 'for' '(' expr 'in' expr ')' statement
+///
+/// [C++] for-init-statement:
+/// [C++] expression-statement
+/// [C++] simple-declaration
+///
+/// [C++0x] for-range-declaration:
+/// [C++0x] attribute-specifier-seq[opt] type-specifier-seq declarator
+/// [C++0x] for-range-initializer:
+/// [C++0x] expression
+/// [C++0x] braced-init-list [TODO]
+StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
+ assert(Tok.is(tok::kw_for) && "Not a for stmt!");
+ SourceLocation ForLoc = ConsumeToken(); // eat the 'for'.
+
+ SourceLocation CoawaitLoc;
+ if (Tok.is(tok::kw_co_await))
+ CoawaitLoc = ConsumeToken();
+
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::err_expected_lparen_after) << "for";
+ SkipUntil(tok::semi);
+ return StmtError();
+ }
+
+ bool C99orCXXorObjC = getLangOpts().C99 || getLangOpts().CPlusPlus ||
+ getLangOpts().ObjC1;
+
+ // C99 6.8.5p5 - In C99, the for statement is a block. This is not
+ // the case for C90. Start the loop scope.
+ //
+ // C++ 6.4p3:
+ // A name introduced by a declaration in a condition is in scope from its
+ // point of declaration until the end of the substatements controlled by the
+ // condition.
+ // C++ 3.3.2p4:
+ // Names declared in the for-init-statement, and in the condition of if,
+ // while, for, and switch statements are local to the if, while, for, or
+ // switch statement (including the controlled statement).
+ // C++ 6.5.3p1:
+ // Names declared in the for-init-statement are in the same declarative-region
+ // as those declared in the condition.
+ //
+ unsigned ScopeFlags = 0;
+ if (C99orCXXorObjC)
+ ScopeFlags = Scope::DeclScope | Scope::ControlScope;
+
+ ParseScope ForScope(this, ScopeFlags);
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ ExprResult Value;
+
+ bool ForEach = false, ForRange = false;
+ StmtResult FirstPart;
+ bool SecondPartIsInvalid = false;
+ FullExprArg SecondPart(Actions);
+ ExprResult Collection;
+ ForRangeInit ForRangeInit;
+ FullExprArg ThirdPart(Actions);
+ Decl *SecondVar = nullptr;
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteOrdinaryName(getCurScope(),
+ C99orCXXorObjC? Sema::PCC_ForInit
+ : Sema::PCC_Expression);
+ cutOffParsing();
+ return StmtError();
+ }
+
+ ParsedAttributesWithRange attrs(AttrFactory);
+ MaybeParseCXX11Attributes(attrs);
+
+ // Parse the first part of the for specifier.
+ if (Tok.is(tok::semi)) { // for (;
+ ProhibitAttributes(attrs);
+ // no first part, eat the ';'.
+ ConsumeToken();
+ } else if (getLangOpts().CPlusPlus && Tok.is(tok::identifier) &&
+ isForRangeIdentifier()) {
+ ProhibitAttributes(attrs);
+ IdentifierInfo *Name = Tok.getIdentifierInfo();
+ SourceLocation Loc = ConsumeToken();
+ MaybeParseCXX11Attributes(attrs);
+
+ ForRangeInit.ColonLoc = ConsumeToken();
+ if (Tok.is(tok::l_brace))
+ ForRangeInit.RangeExpr = ParseBraceInitializer();
+ else
+ ForRangeInit.RangeExpr = ParseExpression();
+
+ Diag(Loc, diag::err_for_range_identifier)
+ << ((getLangOpts().CPlusPlus11 && !getLangOpts().CPlusPlus1z)
+ ? FixItHint::CreateInsertion(Loc, "auto &&")
+ : FixItHint());
+
+ FirstPart = Actions.ActOnCXXForRangeIdentifier(getCurScope(), Loc, Name,
+ attrs, attrs.Range.getEnd());
+ ForRange = true;
+ } else if (isForInitDeclaration()) { // for (int X = 4;
+ // Parse declaration, which eats the ';'.
+ if (!C99orCXXorObjC) // Use of C99-style for loops in C90 mode?
+ Diag(Tok, diag::ext_c99_variable_decl_in_for_loop);
+
+ // In C++0x, "for (T NS:a" might not be a typo for ::
+ bool MightBeForRangeStmt = getLangOpts().CPlusPlus;
+ ColonProtectionRAIIObject ColonProtection(*this, MightBeForRangeStmt);
+
+ SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
+ DeclGroupPtrTy DG = ParseSimpleDeclaration(
+ Declarator::ForContext, DeclEnd, attrs, false,
+ MightBeForRangeStmt ? &ForRangeInit : nullptr);
+ FirstPart = Actions.ActOnDeclStmt(DG, DeclStart, Tok.getLocation());
+ if (ForRangeInit.ParsedForRangeDecl()) {
+ Diag(ForRangeInit.ColonLoc, getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_for_range : diag::ext_for_range);
+
+ ForRange = true;
+ } else if (Tok.is(tok::semi)) { // for (int x = 4;
+ ConsumeToken();
+ } else if ((ForEach = isTokIdentifier_in())) {
+ Actions.ActOnForEachDeclStmt(DG);
+ // ObjC: for (id x in expr)
+ ConsumeToken(); // consume 'in'
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCForCollection(getCurScope(), DG);
+ cutOffParsing();
+ return StmtError();
+ }
+ Collection = ParseExpression();
+ } else {
+ Diag(Tok, diag::err_expected_semi_for);
+ }
+ } else {
+ ProhibitAttributes(attrs);
+ Value = Actions.CorrectDelayedTyposInExpr(ParseExpression());
+
+ ForEach = isTokIdentifier_in();
+
+ // Turn the expression into a stmt.
+ if (!Value.isInvalid()) {
+ if (ForEach)
+ FirstPart = Actions.ActOnForEachLValueExpr(Value.get());
+ else
+ FirstPart = Actions.ActOnExprStmt(Value);
+ }
+
+ if (Tok.is(tok::semi)) {
+ ConsumeToken();
+ } else if (ForEach) {
+ ConsumeToken(); // consume 'in'
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCForCollection(getCurScope(), DeclGroupPtrTy());
+ cutOffParsing();
+ return StmtError();
+ }
+ Collection = ParseExpression();
+ } else if (getLangOpts().CPlusPlus11 && Tok.is(tok::colon) && FirstPart.get()) {
+ // User tried to write the reasonable, but ill-formed, for-range-statement
+ // for (expr : expr) { ... }
+ Diag(Tok, diag::err_for_range_expected_decl)
+ << FirstPart.get()->getSourceRange();
+ SkipUntil(tok::r_paren, StopBeforeMatch);
+ SecondPartIsInvalid = true;
+ } else {
+ if (!Value.isInvalid()) {
+ Diag(Tok, diag::err_expected_semi_for);
+ } else {
+ // Skip until semicolon or rparen, don't consume it.
+ SkipUntil(tok::r_paren, StopAtSemi | StopBeforeMatch);
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ }
+ }
+ }
+
+ // Parse the second part of the for specifier.
+ getCurScope()->AddFlags(Scope::BreakScope | Scope::ContinueScope);
+ if (!ForEach && !ForRange) {
+ assert(!SecondPart.get() && "Shouldn't have a second expression yet.");
+ // Parse the second part of the for specifier.
+ if (Tok.is(tok::semi)) { // for (...;;
+ // no second part.
+ } else if (Tok.is(tok::r_paren)) {
+ // missing both semicolons.
+ } else {
+ ExprResult Second;
+ if (getLangOpts().CPlusPlus)
+ ParseCXXCondition(Second, SecondVar, ForLoc, true);
+ else {
+ Second = ParseExpression();
+ if (!Second.isInvalid())
+ Second = Actions.ActOnBooleanCondition(getCurScope(), ForLoc,
+ Second.get());
+ }
+ SecondPartIsInvalid = Second.isInvalid();
+ SecondPart = Actions.MakeFullExpr(Second.get(), ForLoc);
+ }
+
+ if (Tok.isNot(tok::semi)) {
+ if (!SecondPartIsInvalid || SecondVar)
+ Diag(Tok, diag::err_expected_semi_for);
+ else
+ // Skip until semicolon or rparen, don't consume it.
+ SkipUntil(tok::r_paren, StopAtSemi | StopBeforeMatch);
+ }
+
+ if (Tok.is(tok::semi)) {
+ ConsumeToken();
+ }
+
+ // Parse the third part of the for specifier.
+ if (Tok.isNot(tok::r_paren)) { // for (...;...;)
+ ExprResult Third = ParseExpression();
+ // FIXME: The C++11 standard doesn't actually say that this is a
+ // discarded-value expression, but it clearly should be.
+ ThirdPart = Actions.MakeFullDiscardedValueExpr(Third.get());
+ }
+ }
+ // Match the ')'.
+ T.consumeClose();
+
+ // C++ Coroutines [stmt.iter]:
+ // 'co_await' can only be used for a range-based for statement.
+ if (CoawaitLoc.isValid() && !ForRange) {
+ Diag(CoawaitLoc, diag::err_for_co_await_not_range_for);
+ CoawaitLoc = SourceLocation();
+ }
+
+ // We need to perform most of the semantic analysis for a C++0x for-range
+ // statememt before parsing the body, in order to be able to deduce the type
+ // of an auto-typed loop variable.
+ StmtResult ForRangeStmt;
+ StmtResult ForEachStmt;
+
+ if (ForRange) {
+ ForRangeStmt = Actions.ActOnCXXForRangeStmt(
+ getCurScope(), ForLoc, CoawaitLoc, FirstPart.get(),
+ ForRangeInit.ColonLoc, ForRangeInit.RangeExpr.get(),
+ T.getCloseLocation(), Sema::BFRK_Build);
+
+ // Similarly, we need to do the semantic analysis for a for-range
+ // statement immediately in order to close over temporaries correctly.
+ } else if (ForEach) {
+ ForEachStmt = Actions.ActOnObjCForCollectionStmt(ForLoc,
+ FirstPart.get(),
+ Collection.get(),
+ T.getCloseLocation());
+ } else {
+ // In OpenMP loop region loop control variable must be captured and be
+ // private. Perform analysis of first part (if any).
+ if (getLangOpts().OpenMP && FirstPart.isUsable()) {
+ Actions.ActOnOpenMPLoopInitialization(ForLoc, FirstPart.get());
+ }
+ }
+
+ // C99 6.8.5p5 - In C99, the body of the for statement is a scope, even if
+ // there is no compound stmt. C90 does not have this clause. We only do this
+ // if the body isn't a compound statement to avoid push/pop in common cases.
+ //
+ // C++ 6.5p2:
+ // The substatement in an iteration-statement implicitly defines a local scope
+ // which is entered and exited each time through the loop.
+ //
+ // See comments in ParseIfStatement for why we create a scope for
+ // for-init-statement/condition and a new scope for substatement in C++.
+ //
+ ParseScope InnerScope(this, Scope::DeclScope, C99orCXXorObjC,
+ Tok.is(tok::l_brace));
+
+ // The body of the for loop has the same local mangling number as the
+ // for-init-statement.
+ // It will only be incremented if the body contains other things that would
+ // normally increment the mangling number (like a compound statement).
+ if (C99orCXXorObjC)
+ getCurScope()->decrementMSManglingNumber();
+
+ // Read the body statement.
+ StmtResult Body(ParseStatement(TrailingElseLoc));
+
+ // Pop the body scope if needed.
+ InnerScope.Exit();
+
+ // Leave the for-scope.
+ ForScope.Exit();
+
+ if (Body.isInvalid())
+ return StmtError();
+
+ if (ForEach)
+ return Actions.FinishObjCForCollectionStmt(ForEachStmt.get(),
+ Body.get());
+
+ if (ForRange)
+ return Actions.FinishCXXForRangeStmt(ForRangeStmt.get(), Body.get());
+
+ return Actions.ActOnForStmt(ForLoc, T.getOpenLocation(), FirstPart.get(),
+ SecondPart, SecondVar, ThirdPart,
+ T.getCloseLocation(), Body.get());
+}
+
+/// ParseGotoStatement
+/// jump-statement:
+/// 'goto' identifier ';'
+/// [GNU] 'goto' '*' expression ';'
+///
+/// Note: this lets the caller parse the end ';'.
+///
+StmtResult Parser::ParseGotoStatement() {
+ assert(Tok.is(tok::kw_goto) && "Not a goto stmt!");
+ SourceLocation GotoLoc = ConsumeToken(); // eat the 'goto'.
+
+ StmtResult Res;
+ if (Tok.is(tok::identifier)) {
+ LabelDecl *LD = Actions.LookupOrCreateLabel(Tok.getIdentifierInfo(),
+ Tok.getLocation());
+ Res = Actions.ActOnGotoStmt(GotoLoc, Tok.getLocation(), LD);
+ ConsumeToken();
+ } else if (Tok.is(tok::star)) {
+ // GNU indirect goto extension.
+ Diag(Tok, diag::ext_gnu_indirect_goto);
+ SourceLocation StarLoc = ConsumeToken();
+ ExprResult R(ParseExpression());
+ if (R.isInvalid()) { // Skip to the semicolon, but don't consume it.
+ SkipUntil(tok::semi, StopBeforeMatch);
+ return StmtError();
+ }
+ Res = Actions.ActOnIndirectGotoStmt(GotoLoc, StarLoc, R.get());
+ } else {
+ Diag(Tok, diag::err_expected) << tok::identifier;
+ return StmtError();
+ }
+
+ return Res;
+}
+
+/// ParseContinueStatement
+/// jump-statement:
+/// 'continue' ';'
+///
+/// Note: this lets the caller parse the end ';'.
+///
+StmtResult Parser::ParseContinueStatement() {
+ SourceLocation ContinueLoc = ConsumeToken(); // eat the 'continue'.
+ return Actions.ActOnContinueStmt(ContinueLoc, getCurScope());
+}
+
+/// ParseBreakStatement
+/// jump-statement:
+/// 'break' ';'
+///
+/// Note: this lets the caller parse the end ';'.
+///
+StmtResult Parser::ParseBreakStatement() {
+ SourceLocation BreakLoc = ConsumeToken(); // eat the 'break'.
+ return Actions.ActOnBreakStmt(BreakLoc, getCurScope());
+}
+
+/// ParseReturnStatement
+/// jump-statement:
+/// 'return' expression[opt] ';'
+/// 'return' braced-init-list ';'
+/// 'co_return' expression[opt] ';'
+/// 'co_return' braced-init-list ';'
+StmtResult Parser::ParseReturnStatement() {
+ assert((Tok.is(tok::kw_return) || Tok.is(tok::kw_co_return)) &&
+ "Not a return stmt!");
+ bool IsCoreturn = Tok.is(tok::kw_co_return);
+ SourceLocation ReturnLoc = ConsumeToken(); // eat the 'return'.
+
+ ExprResult R;
+ if (Tok.isNot(tok::semi)) {
+ // FIXME: Code completion for co_return.
+ if (Tok.is(tok::code_completion) && !IsCoreturn) {
+ Actions.CodeCompleteReturn(getCurScope());
+ cutOffParsing();
+ return StmtError();
+ }
+
+ if (Tok.is(tok::l_brace) && getLangOpts().CPlusPlus) {
+ R = ParseInitializer();
+ if (R.isUsable())
+ Diag(R.get()->getLocStart(), getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_generalized_initializer_lists :
+ diag::ext_generalized_initializer_lists)
+ << R.get()->getSourceRange();
+ } else
+ R = ParseExpression();
+ if (R.isInvalid()) {
+ SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
+ return StmtError();
+ }
+ }
+ if (IsCoreturn)
+ return Actions.ActOnCoreturnStmt(ReturnLoc, R.get());
+ return Actions.ActOnReturnStmt(ReturnLoc, R.get(), getCurScope());
+}
+
+StmtResult Parser::ParsePragmaLoopHint(StmtVector &Stmts, bool OnlyStatement,
+ SourceLocation *TrailingElseLoc,
+ ParsedAttributesWithRange &Attrs) {
+ // Create temporary attribute list.
+ ParsedAttributesWithRange TempAttrs(AttrFactory);
+
+ // Get loop hints and consume annotated token.
+ while (Tok.is(tok::annot_pragma_loop_hint)) {
+ LoopHint Hint;
+ if (!HandlePragmaLoopHint(Hint))
+ continue;
+
+ ArgsUnion ArgHints[] = {Hint.PragmaNameLoc, Hint.OptionLoc, Hint.StateLoc,
+ ArgsUnion(Hint.ValueExpr)};
+ TempAttrs.addNew(Hint.PragmaNameLoc->Ident, Hint.Range, nullptr,
+ Hint.PragmaNameLoc->Loc, ArgHints, 4,
+ AttributeList::AS_Pragma);
+ }
+
+ // Get the next statement.
+ MaybeParseCXX11Attributes(Attrs);
+
+ StmtResult S = ParseStatementOrDeclarationAfterAttributes(
+ Stmts, OnlyStatement, TrailingElseLoc, Attrs);
+
+ Attrs.takeAllFrom(TempAttrs);
+ return S;
+}
+
+Decl *Parser::ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope) {
+ assert(Tok.is(tok::l_brace));
+ SourceLocation LBraceLoc = Tok.getLocation();
+
+ if (SkipFunctionBodies && (!Decl || Actions.canSkipFunctionBody(Decl)) &&
+ trySkippingFunctionBody()) {
+ BodyScope.Exit();
+ return Actions.ActOnSkippedFunctionBody(Decl);
+ }
+
+ PrettyDeclStackTraceEntry CrashInfo(Actions, Decl, LBraceLoc,
+ "parsing function body");
+
+ // Save and reset current vtordisp stack if we have entered a C++ method body.
+ bool IsCXXMethod =
+ getLangOpts().CPlusPlus && Decl && isa<CXXMethodDecl>(Decl);
+ Sema::VtorDispStackRAII SavedVtorDispStack(Actions, IsCXXMethod);
+
+ // Do not enter a scope for the brace, as the arguments are in the same scope
+ // (the function body) as the body itself. Instead, just read the statement
+ // list and put it into a CompoundStmt for safe keeping.
+ StmtResult FnBody(ParseCompoundStatementBody());
+
+ // If the function body could not be parsed, make a bogus compoundstmt.
+ if (FnBody.isInvalid()) {
+ Sema::CompoundScopeRAII CompoundScope(Actions);
+ FnBody = Actions.ActOnCompoundStmt(LBraceLoc, LBraceLoc, None, false);
+ }
+
+ BodyScope.Exit();
+ return Actions.ActOnFinishFunctionBody(Decl, FnBody.get());
+}
+
+/// ParseFunctionTryBlock - Parse a C++ function-try-block.
+///
+/// function-try-block:
+/// 'try' ctor-initializer[opt] compound-statement handler-seq
+///
+Decl *Parser::ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope) {
+ assert(Tok.is(tok::kw_try) && "Expected 'try'");
+ SourceLocation TryLoc = ConsumeToken();
+
+ PrettyDeclStackTraceEntry CrashInfo(Actions, Decl, TryLoc,
+ "parsing function try block");
+
+ // Constructor initializer list?
+ if (Tok.is(tok::colon))
+ ParseConstructorInitializer(Decl);
+ else
+ Actions.ActOnDefaultCtorInitializers(Decl);
+
+ if (SkipFunctionBodies && Actions.canSkipFunctionBody(Decl) &&
+ trySkippingFunctionBody()) {
+ BodyScope.Exit();
+ return Actions.ActOnSkippedFunctionBody(Decl);
+ }
+
+ // Save and reset current vtordisp stack if we have entered a C++ method body.
+ bool IsCXXMethod =
+ getLangOpts().CPlusPlus && Decl && isa<CXXMethodDecl>(Decl);
+ Sema::VtorDispStackRAII SavedVtorDispStack(Actions, IsCXXMethod);
+
+ SourceLocation LBraceLoc = Tok.getLocation();
+ StmtResult FnBody(ParseCXXTryBlockCommon(TryLoc, /*FnTry*/true));
+ // If we failed to parse the try-catch, we just give the function an empty
+ // compound statement as the body.
+ if (FnBody.isInvalid()) {
+ Sema::CompoundScopeRAII CompoundScope(Actions);
+ FnBody = Actions.ActOnCompoundStmt(LBraceLoc, LBraceLoc, None, false);
+ }
+
+ BodyScope.Exit();
+ return Actions.ActOnFinishFunctionBody(Decl, FnBody.get());
+}
+
+bool Parser::trySkippingFunctionBody() {
+ assert(Tok.is(tok::l_brace));
+ assert(SkipFunctionBodies &&
+ "Should only be called when SkipFunctionBodies is enabled");
+
+ if (!PP.isCodeCompletionEnabled()) {
+ ConsumeBrace();
+ SkipUntil(tok::r_brace);
+ return true;
+ }
+
+ // We're in code-completion mode. Skip parsing for all function bodies unless
+ // the body contains the code-completion point.
+ TentativeParsingAction PA(*this);
+ ConsumeBrace();
+ if (SkipUntil(tok::r_brace, StopAtCodeCompletion)) {
+ PA.Commit();
+ return true;
+ }
+
+ PA.Revert();
+ return false;
+}
+
+/// ParseCXXTryBlock - Parse a C++ try-block.
+///
+/// try-block:
+/// 'try' compound-statement handler-seq
+///
+StmtResult Parser::ParseCXXTryBlock() {
+ assert(Tok.is(tok::kw_try) && "Expected 'try'");
+
+ SourceLocation TryLoc = ConsumeToken();
+ return ParseCXXTryBlockCommon(TryLoc);
+}
+
+/// ParseCXXTryBlockCommon - Parse the common part of try-block and
+/// function-try-block.
+///
+/// try-block:
+/// 'try' compound-statement handler-seq
+///
+/// function-try-block:
+/// 'try' ctor-initializer[opt] compound-statement handler-seq
+///
+/// handler-seq:
+/// handler handler-seq[opt]
+///
+/// [Borland] try-block:
+/// 'try' compound-statement seh-except-block
+/// 'try' compound-statement seh-finally-block
+///
+StmtResult Parser::ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry) {
+ if (Tok.isNot(tok::l_brace))
+ return StmtError(Diag(Tok, diag::err_expected) << tok::l_brace);
+
+ StmtResult TryBlock(ParseCompoundStatement(/*isStmtExpr=*/false,
+ Scope::DeclScope | Scope::TryScope |
+ (FnTry ? Scope::FnTryCatchScope : 0)));
+ if (TryBlock.isInvalid())
+ return TryBlock;
+
+ // Borland allows SEH-handlers with 'try'
+
+ if ((Tok.is(tok::identifier) &&
+ Tok.getIdentifierInfo() == getSEHExceptKeyword()) ||
+ Tok.is(tok::kw___finally)) {
+ // TODO: Factor into common return ParseSEHHandlerCommon(...)
+ StmtResult Handler;
+ if(Tok.getIdentifierInfo() == getSEHExceptKeyword()) {
+ SourceLocation Loc = ConsumeToken();
+ Handler = ParseSEHExceptBlock(Loc);
+ }
+ else {
+ SourceLocation Loc = ConsumeToken();
+ Handler = ParseSEHFinallyBlock(Loc);
+ }
+ if(Handler.isInvalid())
+ return Handler;
+
+ return Actions.ActOnSEHTryBlock(true /* IsCXXTry */,
+ TryLoc,
+ TryBlock.get(),
+ Handler.get());
+ }
+ else {
+ StmtVector Handlers;
+
+ // C++11 attributes can't appear here, despite this context seeming
+ // statement-like.
+ DiagnoseAndSkipCXX11Attributes();
+
+ if (Tok.isNot(tok::kw_catch))
+ return StmtError(Diag(Tok, diag::err_expected_catch));
+ while (Tok.is(tok::kw_catch)) {
+ StmtResult Handler(ParseCXXCatchBlock(FnTry));
+ if (!Handler.isInvalid())
+ Handlers.push_back(Handler.get());
+ }
+ // Don't bother creating the full statement if we don't have any usable
+ // handlers.
+ if (Handlers.empty())
+ return StmtError();
+
+ return Actions.ActOnCXXTryBlock(TryLoc, TryBlock.get(), Handlers);
+ }
+}
+
+/// ParseCXXCatchBlock - Parse a C++ catch block, called handler in the standard
+///
+/// handler:
+/// 'catch' '(' exception-declaration ')' compound-statement
+///
+/// exception-declaration:
+/// attribute-specifier-seq[opt] type-specifier-seq declarator
+/// attribute-specifier-seq[opt] type-specifier-seq abstract-declarator[opt]
+/// '...'
+///
+StmtResult Parser::ParseCXXCatchBlock(bool FnCatch) {
+ assert(Tok.is(tok::kw_catch) && "Expected 'catch'");
+
+ SourceLocation CatchLoc = ConsumeToken();
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.expectAndConsume())
+ return StmtError();
+
+ // C++ 3.3.2p3:
+ // The name in a catch exception-declaration is local to the handler and
+ // shall not be redeclared in the outermost block of the handler.
+ ParseScope CatchScope(this, Scope::DeclScope | Scope::ControlScope |
+ (FnCatch ? Scope::FnTryCatchScope : 0));
+
+ // exception-declaration is equivalent to '...' or a parameter-declaration
+ // without default arguments.
+ Decl *ExceptionDecl = nullptr;
+ if (Tok.isNot(tok::ellipsis)) {
+ ParsedAttributesWithRange Attributes(AttrFactory);
+ MaybeParseCXX11Attributes(Attributes);
+
+ DeclSpec DS(AttrFactory);
+ DS.takeAttributesFrom(Attributes);
+
+ if (ParseCXXTypeSpecifierSeq(DS))
+ return StmtError();
+
+ Declarator ExDecl(DS, Declarator::CXXCatchContext);
+ ParseDeclarator(ExDecl);
+ ExceptionDecl = Actions.ActOnExceptionDeclarator(getCurScope(), ExDecl);
+ } else
+ ConsumeToken();
+
+ T.consumeClose();
+ if (T.getCloseLocation().isInvalid())
+ return StmtError();
+
+ if (Tok.isNot(tok::l_brace))
+ return StmtError(Diag(Tok, diag::err_expected) << tok::l_brace);
+
+ // FIXME: Possible draft standard bug: attribute-specifier should be allowed?
+ StmtResult Block(ParseCompoundStatement());
+ if (Block.isInvalid())
+ return Block;
+
+ return Actions.ActOnCXXCatchBlock(CatchLoc, ExceptionDecl, Block.get());
+}
+
+void Parser::ParseMicrosoftIfExistsStatement(StmtVector &Stmts) {
+ IfExistsCondition Result;
+ if (ParseMicrosoftIfExistsCondition(Result))
+ return;
+
+ // Handle dependent statements by parsing the braces as a compound statement.
+ // This is not the same behavior as Visual C++, which don't treat this as a
+ // compound statement, but for Clang's type checking we can't have anything
+ // inside these braces escaping to the surrounding code.
+ if (Result.Behavior == IEB_Dependent) {
+ if (!Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::err_expected) << tok::l_brace;
+ return;
+ }
+
+ StmtResult Compound = ParseCompoundStatement();
+ if (Compound.isInvalid())
+ return;
+
+ StmtResult DepResult = Actions.ActOnMSDependentExistsStmt(Result.KeywordLoc,
+ Result.IsIfExists,
+ Result.SS,
+ Result.Name,
+ Compound.get());
+ if (DepResult.isUsable())
+ Stmts.push_back(DepResult.get());
+ return;
+ }
+
+ BalancedDelimiterTracker Braces(*this, tok::l_brace);
+ if (Braces.consumeOpen()) {
+ Diag(Tok, diag::err_expected) << tok::l_brace;
+ return;
+ }
+
+ switch (Result.Behavior) {
+ case IEB_Parse:
+ // Parse the statements below.
+ break;
+
+ case IEB_Dependent:
+ llvm_unreachable("Dependent case handled above");
+
+ case IEB_Skip:
+ Braces.skipToEnd();
+ return;
+ }
+
+ // Condition is true, parse the statements.
+ while (Tok.isNot(tok::r_brace)) {
+ StmtResult R = ParseStatementOrDeclaration(Stmts, false);
+ if (R.isUsable())
+ Stmts.push_back(R.get());
+ }
+ Braces.consumeClose();
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseStmtAsm.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseStmtAsm.cpp
new file mode 100644
index 0000000..142b473
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseStmtAsm.cpp
@@ -0,0 +1,819 @@
+//===---- ParseStmtAsm.cpp - Assembly Statement Parser --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements parsing for GCC and Microsoft inline assembly.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/Parser.h"
+#include "RAIIObjectsForParser.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCInstPrinter.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCObjectFileInfo.h"
+#include "llvm/MC/MCParser/MCAsmParser.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCTargetAsmParser.h"
+#include "llvm/MC/MCTargetOptions.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/TargetSelect.h"
+using namespace clang;
+
+namespace {
+class ClangAsmParserCallback : public llvm::MCAsmParserSemaCallback {
+ Parser &TheParser;
+ SourceLocation AsmLoc;
+ StringRef AsmString;
+
+ /// The tokens we streamed into AsmString and handed off to MC.
+ ArrayRef<Token> AsmToks;
+
+ /// The offset of each token in AsmToks within AsmString.
+ ArrayRef<unsigned> AsmTokOffsets;
+
+public:
+ ClangAsmParserCallback(Parser &P, SourceLocation Loc, StringRef AsmString,
+ ArrayRef<Token> Toks, ArrayRef<unsigned> Offsets)
+ : TheParser(P), AsmLoc(Loc), AsmString(AsmString), AsmToks(Toks),
+ AsmTokOffsets(Offsets) {
+ assert(AsmToks.size() == AsmTokOffsets.size());
+ }
+
+ void *LookupInlineAsmIdentifier(StringRef &LineBuf,
+ llvm::InlineAsmIdentifierInfo &Info,
+ bool IsUnevaluatedContext) override {
+ // Collect the desired tokens.
+ SmallVector<Token, 16> LineToks;
+ const Token *FirstOrigToken = nullptr;
+ findTokensForString(LineBuf, LineToks, FirstOrigToken);
+
+ unsigned NumConsumedToks;
+ ExprResult Result = TheParser.ParseMSAsmIdentifier(
+ LineToks, NumConsumedToks, &Info, IsUnevaluatedContext);
+
+ // If we consumed the entire line, tell MC that.
+ // Also do this if we consumed nothing as a way of reporting failure.
+ if (NumConsumedToks == 0 || NumConsumedToks == LineToks.size()) {
+ // By not modifying LineBuf, we're implicitly consuming it all.
+
+ // Otherwise, consume up to the original tokens.
+ } else {
+ assert(FirstOrigToken && "not using original tokens?");
+
+ // Since we're using original tokens, apply that offset.
+ assert(FirstOrigToken[NumConsumedToks].getLocation() ==
+ LineToks[NumConsumedToks].getLocation());
+ unsigned FirstIndex = FirstOrigToken - AsmToks.begin();
+ unsigned LastIndex = FirstIndex + NumConsumedToks - 1;
+
+ // The total length we've consumed is the relative offset
+ // of the last token we consumed plus its length.
+ unsigned TotalOffset =
+ (AsmTokOffsets[LastIndex] + AsmToks[LastIndex].getLength() -
+ AsmTokOffsets[FirstIndex]);
+ LineBuf = LineBuf.substr(0, TotalOffset);
+ }
+
+ // Initialize the "decl" with the lookup result.
+ Info.OpDecl = static_cast<void *>(Result.get());
+ return Info.OpDecl;
+ }
+
+ StringRef LookupInlineAsmLabel(StringRef Identifier, llvm::SourceMgr &LSM,
+ llvm::SMLoc Location,
+ bool Create) override {
+ SourceLocation Loc = translateLocation(LSM, Location);
+ LabelDecl *Label =
+ TheParser.getActions().GetOrCreateMSAsmLabel(Identifier, Loc, Create);
+ return Label->getMSAsmLabel();
+ }
+
+ bool LookupInlineAsmField(StringRef Base, StringRef Member,
+ unsigned &Offset) override {
+ return TheParser.getActions().LookupInlineAsmField(Base, Member, Offset,
+ AsmLoc);
+ }
+
+ static void DiagHandlerCallback(const llvm::SMDiagnostic &D, void *Context) {
+ ((ClangAsmParserCallback *)Context)->handleDiagnostic(D);
+ }
+
+private:
+ /// Collect the appropriate tokens for the given string.
+ void findTokensForString(StringRef Str, SmallVectorImpl<Token> &TempToks,
+ const Token *&FirstOrigToken) const {
+ // For now, assert that the string we're working with is a substring
+ // of what we gave to MC. This lets us use the original tokens.
+ assert(!std::less<const char *>()(Str.begin(), AsmString.begin()) &&
+ !std::less<const char *>()(AsmString.end(), Str.end()));
+
+ // Try to find a token whose offset matches the first token.
+ unsigned FirstCharOffset = Str.begin() - AsmString.begin();
+ const unsigned *FirstTokOffset = std::lower_bound(
+ AsmTokOffsets.begin(), AsmTokOffsets.end(), FirstCharOffset);
+
+ // For now, assert that the start of the string exactly
+ // corresponds to the start of a token.
+ assert(*FirstTokOffset == FirstCharOffset);
+
+ // Use all the original tokens for this line. (We assume the
+ // end of the line corresponds cleanly to a token break.)
+ unsigned FirstTokIndex = FirstTokOffset - AsmTokOffsets.begin();
+ FirstOrigToken = &AsmToks[FirstTokIndex];
+ unsigned LastCharOffset = Str.end() - AsmString.begin();
+ for (unsigned i = FirstTokIndex, e = AsmTokOffsets.size(); i != e; ++i) {
+ if (AsmTokOffsets[i] >= LastCharOffset)
+ break;
+ TempToks.push_back(AsmToks[i]);
+ }
+ }
+
+ SourceLocation translateLocation(const llvm::SourceMgr &LSM, llvm::SMLoc SMLoc) {
+ // Compute an offset into the inline asm buffer.
+ // FIXME: This isn't right if .macro is involved (but hopefully, no
+ // real-world code does that).
+ const llvm::MemoryBuffer *LBuf =
+ LSM.getMemoryBuffer(LSM.FindBufferContainingLoc(SMLoc));
+ unsigned Offset = SMLoc.getPointer() - LBuf->getBufferStart();
+
+ // Figure out which token that offset points into.
+ const unsigned *TokOffsetPtr =
+ std::lower_bound(AsmTokOffsets.begin(), AsmTokOffsets.end(), Offset);
+ unsigned TokIndex = TokOffsetPtr - AsmTokOffsets.begin();
+ unsigned TokOffset = *TokOffsetPtr;
+
+ // If we come up with an answer which seems sane, use it; otherwise,
+ // just point at the __asm keyword.
+ // FIXME: Assert the answer is sane once we handle .macro correctly.
+ SourceLocation Loc = AsmLoc;
+ if (TokIndex < AsmToks.size()) {
+ const Token &Tok = AsmToks[TokIndex];
+ Loc = Tok.getLocation();
+ Loc = Loc.getLocWithOffset(Offset - TokOffset);
+ }
+ return Loc;
+ }
+
+ void handleDiagnostic(const llvm::SMDiagnostic &D) {
+ const llvm::SourceMgr &LSM = *D.getSourceMgr();
+ SourceLocation Loc = translateLocation(LSM, D.getLoc());
+ TheParser.Diag(Loc, diag::err_inline_ms_asm_parsing) << D.getMessage();
+ }
+};
+}
+
+/// Parse an identifier in an MS-style inline assembly block.
+///
+/// \param CastInfo - a void* so that we don't have to teach Parser.h
+/// about the actual type.
+ExprResult Parser::ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
+ unsigned &NumLineToksConsumed,
+ void *CastInfo,
+ bool IsUnevaluatedContext) {
+ llvm::InlineAsmIdentifierInfo &Info =
+ *(llvm::InlineAsmIdentifierInfo *)CastInfo;
+
+ // Push a fake token on the end so that we don't overrun the token
+ // stream. We use ';' because it expression-parsing should never
+ // overrun it.
+ const tok::TokenKind EndOfStream = tok::semi;
+ Token EndOfStreamTok;
+ EndOfStreamTok.startToken();
+ EndOfStreamTok.setKind(EndOfStream);
+ LineToks.push_back(EndOfStreamTok);
+
+ // Also copy the current token over.
+ LineToks.push_back(Tok);
+
+ PP.EnterTokenStream(LineToks.begin(), LineToks.size(),
+ /*disable macros*/ true,
+ /*owns tokens*/ false);
+
+ // Clear the current token and advance to the first token in LineToks.
+ ConsumeAnyToken();
+
+ // Parse an optional scope-specifier if we're in C++.
+ CXXScopeSpec SS;
+ if (getLangOpts().CPlusPlus) {
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(), /*EnteringContext=*/false);
+ }
+
+ // Require an identifier here.
+ SourceLocation TemplateKWLoc;
+ UnqualifiedId Id;
+ bool Invalid = true;
+ ExprResult Result;
+ if (Tok.is(tok::kw_this)) {
+ Result = ParseCXXThis();
+ Invalid = false;
+ } else {
+ Invalid =
+ ParseUnqualifiedId(SS,
+ /*EnteringContext=*/false,
+ /*AllowDestructorName=*/false,
+ /*AllowConstructorName=*/false,
+ /*ObjectType=*/ParsedType(), TemplateKWLoc, Id);
+ // Perform the lookup.
+ Result = Actions.LookupInlineAsmIdentifier(SS, TemplateKWLoc, Id, Info,
+ IsUnevaluatedContext);
+ }
+ // While the next two tokens are 'period' 'identifier', repeatedly parse it as
+ // a field access. We have to avoid consuming assembler directives that look
+ // like '.' 'else'.
+ while (Result.isUsable() && Tok.is(tok::period)) {
+ Token IdTok = PP.LookAhead(0);
+ if (IdTok.isNot(tok::identifier))
+ break;
+ ConsumeToken(); // Consume the period.
+ IdentifierInfo *Id = Tok.getIdentifierInfo();
+ ConsumeToken(); // Consume the identifier.
+ Result = Actions.LookupInlineAsmVarDeclField(Result.get(), Id->getName(),
+ Info, Tok.getLocation());
+ }
+
+ // Figure out how many tokens we are into LineToks.
+ unsigned LineIndex = 0;
+ if (Tok.is(EndOfStream)) {
+ LineIndex = LineToks.size() - 2;
+ } else {
+ while (LineToks[LineIndex].getLocation() != Tok.getLocation()) {
+ LineIndex++;
+ assert(LineIndex < LineToks.size() - 2); // we added two extra tokens
+ }
+ }
+
+ // If we've run into the poison token we inserted before, or there
+ // was a parsing error, then claim the entire line.
+ if (Invalid || Tok.is(EndOfStream)) {
+ NumLineToksConsumed = LineToks.size() - 2;
+ } else {
+ // Otherwise, claim up to the start of the next token.
+ NumLineToksConsumed = LineIndex;
+ }
+
+ // Finally, restore the old parsing state by consuming all the tokens we
+ // staged before, implicitly killing off the token-lexer we pushed.
+ for (unsigned i = 0, e = LineToks.size() - LineIndex - 2; i != e; ++i) {
+ ConsumeAnyToken();
+ }
+ assert(Tok.is(EndOfStream));
+ ConsumeToken();
+
+ // Leave LineToks in its original state.
+ LineToks.pop_back();
+ LineToks.pop_back();
+
+ return Result;
+}
+
+/// Turn a sequence of our tokens back into a string that we can hand
+/// to the MC asm parser.
+static bool buildMSAsmString(Preprocessor &PP, SourceLocation AsmLoc,
+ ArrayRef<Token> AsmToks,
+ SmallVectorImpl<unsigned> &TokOffsets,
+ SmallString<512> &Asm) {
+ assert(!AsmToks.empty() && "Didn't expect an empty AsmToks!");
+
+ // Is this the start of a new assembly statement?
+ bool isNewStatement = true;
+
+ for (unsigned i = 0, e = AsmToks.size(); i < e; ++i) {
+ const Token &Tok = AsmToks[i];
+
+ // Start each new statement with a newline and a tab.
+ if (!isNewStatement && (Tok.is(tok::kw_asm) || Tok.isAtStartOfLine())) {
+ Asm += "\n\t";
+ isNewStatement = true;
+ }
+
+ // Preserve the existence of leading whitespace except at the
+ // start of a statement.
+ if (!isNewStatement && Tok.hasLeadingSpace())
+ Asm += ' ';
+
+ // Remember the offset of this token.
+ TokOffsets.push_back(Asm.size());
+
+ // Don't actually write '__asm' into the assembly stream.
+ if (Tok.is(tok::kw_asm)) {
+ // Complain about __asm at the end of the stream.
+ if (i + 1 == e) {
+ PP.Diag(AsmLoc, diag::err_asm_empty);
+ return true;
+ }
+
+ continue;
+ }
+
+ // Append the spelling of the token.
+ SmallString<32> SpellingBuffer;
+ bool SpellingInvalid = false;
+ Asm += PP.getSpelling(Tok, SpellingBuffer, &SpellingInvalid);
+ assert(!SpellingInvalid && "spelling was invalid after correct parse?");
+
+ // We are no longer at the start of a statement.
+ isNewStatement = false;
+ }
+
+ // Ensure that the buffer is null-terminated.
+ Asm.push_back('\0');
+ Asm.pop_back();
+
+ assert(TokOffsets.size() == AsmToks.size());
+ return false;
+}
+
+/// ParseMicrosoftAsmStatement. When -fms-extensions/-fasm-blocks is enabled,
+/// this routine is called to collect the tokens for an MS asm statement.
+///
+/// [MS] ms-asm-statement:
+/// ms-asm-block
+/// ms-asm-block ms-asm-statement
+///
+/// [MS] ms-asm-block:
+/// '__asm' ms-asm-line '\n'
+/// '__asm' '{' ms-asm-instruction-block[opt] '}' ';'[opt]
+///
+/// [MS] ms-asm-instruction-block
+/// ms-asm-line
+/// ms-asm-line '\n' ms-asm-instruction-block
+///
+StmtResult Parser::ParseMicrosoftAsmStatement(SourceLocation AsmLoc) {
+ SourceManager &SrcMgr = PP.getSourceManager();
+ SourceLocation EndLoc = AsmLoc;
+ SmallVector<Token, 4> AsmToks;
+
+ bool SingleLineMode = true;
+ unsigned BraceNesting = 0;
+ unsigned short savedBraceCount = BraceCount;
+ bool InAsmComment = false;
+ FileID FID;
+ unsigned LineNo = 0;
+ unsigned NumTokensRead = 0;
+ SmallVector<SourceLocation, 4> LBraceLocs;
+ bool SkippedStartOfLine = false;
+
+ if (Tok.is(tok::l_brace)) {
+ // Braced inline asm: consume the opening brace.
+ SingleLineMode = false;
+ BraceNesting = 1;
+ EndLoc = ConsumeBrace();
+ LBraceLocs.push_back(EndLoc);
+ ++NumTokensRead;
+ } else {
+ // Single-line inline asm; compute which line it is on.
+ std::pair<FileID, unsigned> ExpAsmLoc =
+ SrcMgr.getDecomposedExpansionLoc(EndLoc);
+ FID = ExpAsmLoc.first;
+ LineNo = SrcMgr.getLineNumber(FID, ExpAsmLoc.second);
+ LBraceLocs.push_back(SourceLocation());
+ }
+
+ SourceLocation TokLoc = Tok.getLocation();
+ do {
+ // If we hit EOF, we're done, period.
+ if (isEofOrEom())
+ break;
+
+ if (!InAsmComment && Tok.is(tok::l_brace)) {
+ // Consume the opening brace.
+ SkippedStartOfLine = Tok.isAtStartOfLine();
+ EndLoc = ConsumeBrace();
+ BraceNesting++;
+ LBraceLocs.push_back(EndLoc);
+ TokLoc = Tok.getLocation();
+ ++NumTokensRead;
+ continue;
+ } else if (!InAsmComment && Tok.is(tok::semi)) {
+ // A semicolon in an asm is the start of a comment.
+ InAsmComment = true;
+ if (!SingleLineMode) {
+ // Compute which line the comment is on.
+ std::pair<FileID, unsigned> ExpSemiLoc =
+ SrcMgr.getDecomposedExpansionLoc(TokLoc);
+ FID = ExpSemiLoc.first;
+ LineNo = SrcMgr.getLineNumber(FID, ExpSemiLoc.second);
+ }
+ } else if (SingleLineMode || InAsmComment) {
+ // If end-of-line is significant, check whether this token is on a
+ // new line.
+ std::pair<FileID, unsigned> ExpLoc =
+ SrcMgr.getDecomposedExpansionLoc(TokLoc);
+ if (ExpLoc.first != FID ||
+ SrcMgr.getLineNumber(ExpLoc.first, ExpLoc.second) != LineNo) {
+ // If this is a single-line __asm, we're done, except if the next
+ // line begins with an __asm too, in which case we finish a comment
+ // if needed and then keep processing the next line as a single
+ // line __asm.
+ bool isAsm = Tok.is(tok::kw_asm);
+ if (SingleLineMode && !isAsm)
+ break;
+ // We're no longer in a comment.
+ InAsmComment = false;
+ if (isAsm) {
+ LineNo = SrcMgr.getLineNumber(ExpLoc.first, ExpLoc.second);
+ SkippedStartOfLine = Tok.isAtStartOfLine();
+ }
+ } else if (!InAsmComment && Tok.is(tok::r_brace)) {
+ // In MSVC mode, braces only participate in brace matching and
+ // separating the asm statements. This is an intentional
+ // departure from the Apple gcc behavior.
+ if (!BraceNesting)
+ break;
+ }
+ }
+ if (!InAsmComment && BraceNesting && Tok.is(tok::r_brace) &&
+ BraceCount == (savedBraceCount + BraceNesting)) {
+ // Consume the closing brace.
+ SkippedStartOfLine = Tok.isAtStartOfLine();
+ EndLoc = ConsumeBrace();
+ BraceNesting--;
+ // Finish if all of the opened braces in the inline asm section were
+ // consumed.
+ if (BraceNesting == 0 && !SingleLineMode)
+ break;
+ else {
+ LBraceLocs.pop_back();
+ TokLoc = Tok.getLocation();
+ ++NumTokensRead;
+ continue;
+ }
+ }
+
+ // Consume the next token; make sure we don't modify the brace count etc.
+ // if we are in a comment.
+ EndLoc = TokLoc;
+ if (InAsmComment)
+ PP.Lex(Tok);
+ else {
+ // Set the token as the start of line if we skipped the original start
+ // of line token in case it was a nested brace.
+ if (SkippedStartOfLine)
+ Tok.setFlag(Token::StartOfLine);
+ AsmToks.push_back(Tok);
+ ConsumeAnyToken();
+ }
+ TokLoc = Tok.getLocation();
+ ++NumTokensRead;
+ SkippedStartOfLine = false;
+ } while (1);
+
+ if (BraceNesting && BraceCount != savedBraceCount) {
+ // __asm without closing brace (this can happen at EOF).
+ for (unsigned i = 0; i < BraceNesting; ++i) {
+ Diag(Tok, diag::err_expected) << tok::r_brace;
+ Diag(LBraceLocs.back(), diag::note_matching) << tok::l_brace;
+ LBraceLocs.pop_back();
+ }
+ return StmtError();
+ } else if (NumTokensRead == 0) {
+ // Empty __asm.
+ Diag(Tok, diag::err_expected) << tok::l_brace;
+ return StmtError();
+ }
+
+ // Okay, prepare to use MC to parse the assembly.
+ SmallVector<StringRef, 4> ConstraintRefs;
+ SmallVector<Expr *, 4> Exprs;
+ SmallVector<StringRef, 4> ClobberRefs;
+
+ // We need an actual supported target.
+ const llvm::Triple &TheTriple = Actions.Context.getTargetInfo().getTriple();
+ llvm::Triple::ArchType ArchTy = TheTriple.getArch();
+ const std::string &TT = TheTriple.getTriple();
+ const llvm::Target *TheTarget = nullptr;
+ bool UnsupportedArch =
+ (ArchTy != llvm::Triple::x86 && ArchTy != llvm::Triple::x86_64);
+ if (UnsupportedArch) {
+ Diag(AsmLoc, diag::err_msasm_unsupported_arch) << TheTriple.getArchName();
+ } else {
+ std::string Error;
+ TheTarget = llvm::TargetRegistry::lookupTarget(TT, Error);
+ if (!TheTarget)
+ Diag(AsmLoc, diag::err_msasm_unable_to_create_target) << Error;
+ }
+
+ assert(!LBraceLocs.empty() && "Should have at least one location here");
+
+ // If we don't support assembly, or the assembly is empty, we don't
+ // need to instantiate the AsmParser, etc.
+ if (!TheTarget || AsmToks.empty()) {
+ return Actions.ActOnMSAsmStmt(AsmLoc, LBraceLocs[0], AsmToks, StringRef(),
+ /*NumOutputs*/ 0, /*NumInputs*/ 0,
+ ConstraintRefs, ClobberRefs, Exprs, EndLoc);
+ }
+
+ // Expand the tokens into a string buffer.
+ SmallString<512> AsmString;
+ SmallVector<unsigned, 8> TokOffsets;
+ if (buildMSAsmString(PP, AsmLoc, AsmToks, TokOffsets, AsmString))
+ return StmtError();
+
+ std::unique_ptr<llvm::MCRegisterInfo> MRI(TheTarget->createMCRegInfo(TT));
+ std::unique_ptr<llvm::MCAsmInfo> MAI(TheTarget->createMCAsmInfo(*MRI, TT));
+ // Get the instruction descriptor.
+ std::unique_ptr<llvm::MCInstrInfo> MII(TheTarget->createMCInstrInfo());
+ std::unique_ptr<llvm::MCObjectFileInfo> MOFI(new llvm::MCObjectFileInfo());
+ std::unique_ptr<llvm::MCSubtargetInfo> STI(
+ TheTarget->createMCSubtargetInfo(TT, "", ""));
+
+ llvm::SourceMgr TempSrcMgr;
+ llvm::MCContext Ctx(MAI.get(), MRI.get(), MOFI.get(), &TempSrcMgr);
+ MOFI->InitMCObjectFileInfo(TheTriple, llvm::Reloc::Default,
+ llvm::CodeModel::Default, Ctx);
+ std::unique_ptr<llvm::MemoryBuffer> Buffer =
+ llvm::MemoryBuffer::getMemBuffer(AsmString, "<MS inline asm>");
+
+ // Tell SrcMgr about this buffer, which is what the parser will pick up.
+ TempSrcMgr.AddNewSourceBuffer(std::move(Buffer), llvm::SMLoc());
+
+ std::unique_ptr<llvm::MCStreamer> Str(createNullStreamer(Ctx));
+ std::unique_ptr<llvm::MCAsmParser> Parser(
+ createMCAsmParser(TempSrcMgr, Ctx, *Str.get(), *MAI));
+
+ // FIXME: init MCOptions from sanitizer flags here.
+ llvm::MCTargetOptions MCOptions;
+ std::unique_ptr<llvm::MCTargetAsmParser> TargetParser(
+ TheTarget->createMCAsmParser(*STI, *Parser, *MII, MCOptions));
+
+ std::unique_ptr<llvm::MCInstPrinter> IP(
+ TheTarget->createMCInstPrinter(llvm::Triple(TT), 1, *MAI, *MII, *MRI));
+
+ // Change to the Intel dialect.
+ Parser->setAssemblerDialect(1);
+ Parser->setTargetParser(*TargetParser.get());
+ Parser->setParsingInlineAsm(true);
+ TargetParser->setParsingInlineAsm(true);
+
+ ClangAsmParserCallback Callback(*this, AsmLoc, AsmString, AsmToks,
+ TokOffsets);
+ TargetParser->setSemaCallback(&Callback);
+ TempSrcMgr.setDiagHandler(ClangAsmParserCallback::DiagHandlerCallback,
+ &Callback);
+
+ unsigned NumOutputs;
+ unsigned NumInputs;
+ std::string AsmStringIR;
+ SmallVector<std::pair<void *, bool>, 4> OpExprs;
+ SmallVector<std::string, 4> Constraints;
+ SmallVector<std::string, 4> Clobbers;
+ if (Parser->parseMSInlineAsm(AsmLoc.getPtrEncoding(), AsmStringIR, NumOutputs,
+ NumInputs, OpExprs, Constraints, Clobbers,
+ MII.get(), IP.get(), Callback))
+ return StmtError();
+
+ // Filter out "fpsw". Clang doesn't accept it, and it always lists flags and
+ // fpsr as clobbers.
+ auto End = std::remove(Clobbers.begin(), Clobbers.end(), "fpsw");
+ Clobbers.erase(End, Clobbers.end());
+
+ // Build the vector of clobber StringRefs.
+ ClobberRefs.insert(ClobberRefs.end(), Clobbers.begin(), Clobbers.end());
+
+ // Recast the void pointers and build the vector of constraint StringRefs.
+ unsigned NumExprs = NumOutputs + NumInputs;
+ ConstraintRefs.resize(NumExprs);
+ Exprs.resize(NumExprs);
+ for (unsigned i = 0, e = NumExprs; i != e; ++i) {
+ Expr *OpExpr = static_cast<Expr *>(OpExprs[i].first);
+ if (!OpExpr)
+ return StmtError();
+
+ // Need address of variable.
+ if (OpExprs[i].second)
+ OpExpr =
+ Actions.BuildUnaryOp(getCurScope(), AsmLoc, UO_AddrOf, OpExpr).get();
+
+ ConstraintRefs[i] = StringRef(Constraints[i]);
+ Exprs[i] = OpExpr;
+ }
+
+ // FIXME: We should be passing source locations for better diagnostics.
+ return Actions.ActOnMSAsmStmt(AsmLoc, LBraceLocs[0], AsmToks, AsmStringIR,
+ NumOutputs, NumInputs, ConstraintRefs,
+ ClobberRefs, Exprs, EndLoc);
+}
+
+/// ParseAsmStatement - Parse a GNU extended asm statement.
+/// asm-statement:
+/// gnu-asm-statement
+/// ms-asm-statement
+///
+/// [GNU] gnu-asm-statement:
+/// 'asm' type-qualifier[opt] '(' asm-argument ')' ';'
+///
+/// [GNU] asm-argument:
+/// asm-string-literal
+/// asm-string-literal ':' asm-operands[opt]
+/// asm-string-literal ':' asm-operands[opt] ':' asm-operands[opt]
+/// asm-string-literal ':' asm-operands[opt] ':' asm-operands[opt]
+/// ':' asm-clobbers
+///
+/// [GNU] asm-clobbers:
+/// asm-string-literal
+/// asm-clobbers ',' asm-string-literal
+///
+StmtResult Parser::ParseAsmStatement(bool &msAsm) {
+ assert(Tok.is(tok::kw_asm) && "Not an asm stmt");
+ SourceLocation AsmLoc = ConsumeToken();
+
+ if (getLangOpts().AsmBlocks && Tok.isNot(tok::l_paren) &&
+ !isTypeQualifier()) {
+ msAsm = true;
+ return ParseMicrosoftAsmStatement(AsmLoc);
+ }
+
+ DeclSpec DS(AttrFactory);
+ SourceLocation Loc = Tok.getLocation();
+ ParseTypeQualifierListOpt(DS, AR_VendorAttributesParsed);
+
+ // GNU asms accept, but warn, about type-qualifiers other than volatile.
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_const)
+ Diag(Loc, diag::w_asm_qualifier_ignored) << "const";
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_restrict)
+ Diag(Loc, diag::w_asm_qualifier_ignored) << "restrict";
+ // FIXME: Once GCC supports _Atomic, check whether it permits it here.
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_atomic)
+ Diag(Loc, diag::w_asm_qualifier_ignored) << "_Atomic";
+
+ // Remember if this was a volatile asm.
+ bool isVolatile = DS.getTypeQualifiers() & DeclSpec::TQ_volatile;
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::err_expected_lparen_after) << "asm";
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return StmtError();
+ }
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ ExprResult AsmString(ParseAsmStringLiteral());
+
+ // Check if GNU-style InlineAsm is disabled.
+ // Error on anything other than empty string.
+ if (!(getLangOpts().GNUAsm || AsmString.isInvalid())) {
+ const auto *SL = cast<StringLiteral>(AsmString.get());
+ if (!SL->getString().trim().empty())
+ Diag(Loc, diag::err_gnu_inline_asm_disabled);
+ }
+
+ if (AsmString.isInvalid()) {
+ // Consume up to and including the closing paren.
+ T.skipToEnd();
+ return StmtError();
+ }
+
+ SmallVector<IdentifierInfo *, 4> Names;
+ ExprVector Constraints;
+ ExprVector Exprs;
+ ExprVector Clobbers;
+
+ if (Tok.is(tok::r_paren)) {
+ // We have a simple asm expression like 'asm("foo")'.
+ T.consumeClose();
+ return Actions.ActOnGCCAsmStmt(AsmLoc, /*isSimple*/ true, isVolatile,
+ /*NumOutputs*/ 0, /*NumInputs*/ 0, nullptr,
+ Constraints, Exprs, AsmString.get(),
+ Clobbers, T.getCloseLocation());
+ }
+
+ // Parse Outputs, if present.
+ bool AteExtraColon = false;
+ if (Tok.is(tok::colon) || Tok.is(tok::coloncolon)) {
+ // In C++ mode, parse "::" like ": :".
+ AteExtraColon = Tok.is(tok::coloncolon);
+ ConsumeToken();
+
+ if (!AteExtraColon && ParseAsmOperandsOpt(Names, Constraints, Exprs))
+ return StmtError();
+ }
+
+ unsigned NumOutputs = Names.size();
+
+ // Parse Inputs, if present.
+ if (AteExtraColon || Tok.is(tok::colon) || Tok.is(tok::coloncolon)) {
+ // In C++ mode, parse "::" like ": :".
+ if (AteExtraColon)
+ AteExtraColon = false;
+ else {
+ AteExtraColon = Tok.is(tok::coloncolon);
+ ConsumeToken();
+ }
+
+ if (!AteExtraColon && ParseAsmOperandsOpt(Names, Constraints, Exprs))
+ return StmtError();
+ }
+
+ assert(Names.size() == Constraints.size() &&
+ Constraints.size() == Exprs.size() && "Input operand size mismatch!");
+
+ unsigned NumInputs = Names.size() - NumOutputs;
+
+ // Parse the clobbers, if present.
+ if (AteExtraColon || Tok.is(tok::colon)) {
+ if (!AteExtraColon)
+ ConsumeToken();
+
+ // Parse the asm-string list for clobbers if present.
+ if (Tok.isNot(tok::r_paren)) {
+ while (1) {
+ ExprResult Clobber(ParseAsmStringLiteral());
+
+ if (Clobber.isInvalid())
+ break;
+
+ Clobbers.push_back(Clobber.get());
+
+ if (!TryConsumeToken(tok::comma))
+ break;
+ }
+ }
+ }
+
+ T.consumeClose();
+ return Actions.ActOnGCCAsmStmt(
+ AsmLoc, false, isVolatile, NumOutputs, NumInputs, Names.data(),
+ Constraints, Exprs, AsmString.get(), Clobbers, T.getCloseLocation());
+}
+
+/// ParseAsmOperands - Parse the asm-operands production as used by
+/// asm-statement, assuming the leading ':' token was eaten.
+///
+/// [GNU] asm-operands:
+/// asm-operand
+/// asm-operands ',' asm-operand
+///
+/// [GNU] asm-operand:
+/// asm-string-literal '(' expression ')'
+/// '[' identifier ']' asm-string-literal '(' expression ')'
+///
+//
+// FIXME: Avoid unnecessary std::string trashing.
+bool Parser::ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
+ SmallVectorImpl<Expr *> &Constraints,
+ SmallVectorImpl<Expr *> &Exprs) {
+ // 'asm-operands' isn't present?
+ if (!isTokenStringLiteral() && Tok.isNot(tok::l_square))
+ return false;
+
+ while (1) {
+ // Read the [id] if present.
+ if (Tok.is(tok::l_square)) {
+ BalancedDelimiterTracker T(*this, tok::l_square);
+ T.consumeOpen();
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected) << tok::identifier;
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return true;
+ }
+
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ ConsumeToken();
+
+ Names.push_back(II);
+ T.consumeClose();
+ } else
+ Names.push_back(nullptr);
+
+ ExprResult Constraint(ParseAsmStringLiteral());
+ if (Constraint.isInvalid()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return true;
+ }
+ Constraints.push_back(Constraint.get());
+
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::err_expected_lparen_after) << "asm operand";
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return true;
+ }
+
+ // Read the parenthesized expression.
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+ ExprResult Res = Actions.CorrectDelayedTyposInExpr(ParseExpression());
+ T.consumeClose();
+ if (Res.isInvalid()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return true;
+ }
+ Exprs.push_back(Res.get());
+ // Eat the comma and continue parsing if it exists.
+ if (!TryConsumeToken(tok::comma))
+ return false;
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp
new file mode 100644
index 0000000..a4dcdb1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp
@@ -0,0 +1,1416 @@
+//===--- ParseTemplate.cpp - Template Parsing -----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements parsing of C++ templates.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/Parser.h"
+#include "RAIIObjectsForParser.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Sema/Scope.h"
+using namespace clang;
+
+/// \brief Parse a template declaration, explicit instantiation, or
+/// explicit specialization.
+Decl *
+Parser::ParseDeclarationStartingWithTemplate(unsigned Context,
+ SourceLocation &DeclEnd,
+ AccessSpecifier AS,
+ AttributeList *AccessAttrs) {
+ ObjCDeclContextSwitch ObjCDC(*this);
+
+ if (Tok.is(tok::kw_template) && NextToken().isNot(tok::less)) {
+ return ParseExplicitInstantiation(Context,
+ SourceLocation(), ConsumeToken(),
+ DeclEnd, AS);
+ }
+ return ParseTemplateDeclarationOrSpecialization(Context, DeclEnd, AS,
+ AccessAttrs);
+}
+
+
+
+/// \brief Parse a template declaration or an explicit specialization.
+///
+/// Template declarations include one or more template parameter lists
+/// and either the function or class template declaration. Explicit
+/// specializations contain one or more 'template < >' prefixes
+/// followed by a (possibly templated) declaration. Since the
+/// syntactic form of both features is nearly identical, we parse all
+/// of the template headers together and let semantic analysis sort
+/// the declarations from the explicit specializations.
+///
+/// template-declaration: [C++ temp]
+/// 'export'[opt] 'template' '<' template-parameter-list '>' declaration
+///
+/// explicit-specialization: [ C++ temp.expl.spec]
+/// 'template' '<' '>' declaration
+Decl *
+Parser::ParseTemplateDeclarationOrSpecialization(unsigned Context,
+ SourceLocation &DeclEnd,
+ AccessSpecifier AS,
+ AttributeList *AccessAttrs) {
+ assert(Tok.isOneOf(tok::kw_export, tok::kw_template) &&
+ "Token does not start a template declaration.");
+
+ // Enter template-parameter scope.
+ ParseScope TemplateParmScope(this, Scope::TemplateParamScope);
+
+ // Tell the action that names should be checked in the context of
+ // the declaration to come.
+ ParsingDeclRAIIObject
+ ParsingTemplateParams(*this, ParsingDeclRAIIObject::NoParent);
+
+ // Parse multiple levels of template headers within this template
+ // parameter scope, e.g.,
+ //
+ // template<typename T>
+ // template<typename U>
+ // class A<T>::B { ... };
+ //
+ // We parse multiple levels non-recursively so that we can build a
+ // single data structure containing all of the template parameter
+ // lists to easily differentiate between the case above and:
+ //
+ // template<typename T>
+ // class A {
+ // template<typename U> class B;
+ // };
+ //
+ // In the first case, the action for declaring A<T>::B receives
+ // both template parameter lists. In the second case, the action for
+ // defining A<T>::B receives just the inner template parameter list
+ // (and retrieves the outer template parameter list from its
+ // context).
+ bool isSpecialization = true;
+ bool LastParamListWasEmpty = false;
+ TemplateParameterLists ParamLists;
+ TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
+
+ do {
+ // Consume the 'export', if any.
+ SourceLocation ExportLoc;
+ TryConsumeToken(tok::kw_export, ExportLoc);
+
+ // Consume the 'template', which should be here.
+ SourceLocation TemplateLoc;
+ if (!TryConsumeToken(tok::kw_template, TemplateLoc)) {
+ Diag(Tok.getLocation(), diag::err_expected_template);
+ return nullptr;
+ }
+
+ // Parse the '<' template-parameter-list '>'
+ SourceLocation LAngleLoc, RAngleLoc;
+ SmallVector<Decl*, 4> TemplateParams;
+ if (ParseTemplateParameters(CurTemplateDepthTracker.getDepth(),
+ TemplateParams, LAngleLoc, RAngleLoc)) {
+ // Skip until the semi-colon or a '}'.
+ SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
+ TryConsumeToken(tok::semi);
+ return nullptr;
+ }
+
+ ParamLists.push_back(
+ Actions.ActOnTemplateParameterList(CurTemplateDepthTracker.getDepth(),
+ ExportLoc,
+ TemplateLoc, LAngleLoc,
+ TemplateParams, RAngleLoc));
+
+ if (!TemplateParams.empty()) {
+ isSpecialization = false;
+ ++CurTemplateDepthTracker;
+
+ if (TryConsumeToken(tok::kw_requires)) {
+ ExprResult ER =
+ Actions.CorrectDelayedTyposInExpr(ParseConstraintExpression());
+ if (!ER.isUsable()) {
+ // Skip until the semi-colon or a '}'.
+ SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
+ TryConsumeToken(tok::semi);
+ return nullptr;
+ }
+ }
+ } else {
+ LastParamListWasEmpty = true;
+ }
+ } while (Tok.isOneOf(tok::kw_export, tok::kw_template));
+
+ // Parse the actual template declaration.
+ return ParseSingleDeclarationAfterTemplate(Context,
+ ParsedTemplateInfo(&ParamLists,
+ isSpecialization,
+ LastParamListWasEmpty),
+ ParsingTemplateParams,
+ DeclEnd, AS, AccessAttrs);
+}
+
+/// \brief Parse a single declaration that declares a template,
+/// template specialization, or explicit instantiation of a template.
+///
+/// \param DeclEnd will receive the source location of the last token
+/// within this declaration.
+///
+/// \param AS the access specifier associated with this
+/// declaration. Will be AS_none for namespace-scope declarations.
+///
+/// \returns the new declaration.
+Decl *
+Parser::ParseSingleDeclarationAfterTemplate(
+ unsigned Context,
+ const ParsedTemplateInfo &TemplateInfo,
+ ParsingDeclRAIIObject &DiagsFromTParams,
+ SourceLocation &DeclEnd,
+ AccessSpecifier AS,
+ AttributeList *AccessAttrs) {
+ assert(TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate &&
+ "Template information required");
+
+ if (Tok.is(tok::kw_static_assert)) {
+ // A static_assert declaration may not be templated.
+ Diag(Tok.getLocation(), diag::err_templated_invalid_declaration)
+ << TemplateInfo.getSourceRange();
+ // Parse the static_assert declaration to improve error recovery.
+ return ParseStaticAssertDeclaration(DeclEnd);
+ }
+
+ if (Context == Declarator::MemberContext) {
+ // We are parsing a member template.
+ ParseCXXClassMemberDeclaration(AS, AccessAttrs, TemplateInfo,
+ &DiagsFromTParams);
+ return nullptr;
+ }
+
+ ParsedAttributesWithRange prefixAttrs(AttrFactory);
+ MaybeParseCXX11Attributes(prefixAttrs);
+
+ if (Tok.is(tok::kw_using))
+ return ParseUsingDirectiveOrDeclaration(Context, TemplateInfo, DeclEnd,
+ prefixAttrs);
+
+ // Parse the declaration specifiers, stealing any diagnostics from
+ // the template parameters.
+ ParsingDeclSpec DS(*this, &DiagsFromTParams);
+
+ ParseDeclarationSpecifiers(DS, TemplateInfo, AS,
+ getDeclSpecContextFromDeclaratorContext(Context));
+
+ if (Tok.is(tok::semi)) {
+ ProhibitAttributes(prefixAttrs);
+ DeclEnd = ConsumeToken();
+ Decl *Decl = Actions.ParsedFreeStandingDeclSpec(
+ getCurScope(), AS, DS,
+ TemplateInfo.TemplateParams ? *TemplateInfo.TemplateParams
+ : MultiTemplateParamsArg(),
+ TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation);
+ DS.complete(Decl);
+ return Decl;
+ }
+
+ // Move the attributes from the prefix into the DS.
+ if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation)
+ ProhibitAttributes(prefixAttrs);
+ else
+ DS.takeAttributesFrom(prefixAttrs);
+
+ // Parse the declarator.
+ ParsingDeclarator DeclaratorInfo(*this, DS, (Declarator::TheContext)Context);
+ ParseDeclarator(DeclaratorInfo);
+ // Error parsing the declarator?
+ if (!DeclaratorInfo.hasName()) {
+ // If so, skip until the semi-colon or a }.
+ SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ return nullptr;
+ }
+
+ LateParsedAttrList LateParsedAttrs(true);
+ if (DeclaratorInfo.isFunctionDeclarator())
+ MaybeParseGNUAttributes(DeclaratorInfo, &LateParsedAttrs);
+
+ if (DeclaratorInfo.isFunctionDeclarator() &&
+ isStartOfFunctionDefinition(DeclaratorInfo)) {
+
+ // Function definitions are only allowed at file scope and in C++ classes.
+ // The C++ inline method definition case is handled elsewhere, so we only
+ // need to handle the file scope definition case.
+ if (Context != Declarator::FileContext) {
+ Diag(Tok, diag::err_function_definition_not_allowed);
+ SkipMalformedDecl();
+ return nullptr;
+ }
+
+ if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
+ // Recover by ignoring the 'typedef'. This was probably supposed to be
+ // the 'typename' keyword, which we should have already suggested adding
+ // if it's appropriate.
+ Diag(DS.getStorageClassSpecLoc(), diag::err_function_declared_typedef)
+ << FixItHint::CreateRemoval(DS.getStorageClassSpecLoc());
+ DS.ClearStorageClassSpecs();
+ }
+
+ if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation) {
+ if (DeclaratorInfo.getName().getKind() != UnqualifiedId::IK_TemplateId) {
+ // If the declarator-id is not a template-id, issue a diagnostic and
+ // recover by ignoring the 'template' keyword.
+ Diag(Tok, diag::err_template_defn_explicit_instantiation) << 0;
+ return ParseFunctionDefinition(DeclaratorInfo, ParsedTemplateInfo(),
+ &LateParsedAttrs);
+ } else {
+ SourceLocation LAngleLoc
+ = PP.getLocForEndOfToken(TemplateInfo.TemplateLoc);
+ Diag(DeclaratorInfo.getIdentifierLoc(),
+ diag::err_explicit_instantiation_with_definition)
+ << SourceRange(TemplateInfo.TemplateLoc)
+ << FixItHint::CreateInsertion(LAngleLoc, "<>");
+
+ // Recover as if it were an explicit specialization.
+ TemplateParameterLists FakedParamLists;
+ FakedParamLists.push_back(Actions.ActOnTemplateParameterList(
+ 0, SourceLocation(), TemplateInfo.TemplateLoc, LAngleLoc, None,
+ LAngleLoc));
+
+ return ParseFunctionDefinition(
+ DeclaratorInfo, ParsedTemplateInfo(&FakedParamLists,
+ /*isSpecialization=*/true,
+ /*LastParamListWasEmpty=*/true),
+ &LateParsedAttrs);
+ }
+ }
+ return ParseFunctionDefinition(DeclaratorInfo, TemplateInfo,
+ &LateParsedAttrs);
+ }
+
+ // Parse this declaration.
+ Decl *ThisDecl = ParseDeclarationAfterDeclarator(DeclaratorInfo,
+ TemplateInfo);
+
+ if (Tok.is(tok::comma)) {
+ Diag(Tok, diag::err_multiple_template_declarators)
+ << (int)TemplateInfo.Kind;
+ SkipUntil(tok::semi);
+ return ThisDecl;
+ }
+
+ // Eat the semi colon after the declaration.
+ ExpectAndConsumeSemi(diag::err_expected_semi_declaration);
+ if (LateParsedAttrs.size() > 0)
+ ParseLexedAttributeList(LateParsedAttrs, ThisDecl, true, false);
+ DeclaratorInfo.complete(ThisDecl);
+ return ThisDecl;
+}
+
+/// ParseTemplateParameters - Parses a template-parameter-list enclosed in
+/// angle brackets. Depth is the depth of this template-parameter-list, which
+/// is the number of template headers directly enclosing this template header.
+/// TemplateParams is the current list of template parameters we're building.
+/// The template parameter we parse will be added to this list. LAngleLoc and
+/// RAngleLoc will receive the positions of the '<' and '>', respectively,
+/// that enclose this template parameter list.
+///
+/// \returns true if an error occurred, false otherwise.
+bool Parser::ParseTemplateParameters(unsigned Depth,
+ SmallVectorImpl<Decl*> &TemplateParams,
+ SourceLocation &LAngleLoc,
+ SourceLocation &RAngleLoc) {
+ // Get the template parameter list.
+ if (!TryConsumeToken(tok::less, LAngleLoc)) {
+ Diag(Tok.getLocation(), diag::err_expected_less_after) << "template";
+ return true;
+ }
+
+ // Try to parse the template parameter list.
+ bool Failed = false;
+ if (!Tok.is(tok::greater) && !Tok.is(tok::greatergreater))
+ Failed = ParseTemplateParameterList(Depth, TemplateParams);
+
+ if (Tok.is(tok::greatergreater)) {
+ // No diagnostic required here: a template-parameter-list can only be
+ // followed by a declaration or, for a template template parameter, the
+ // 'class' keyword. Therefore, the second '>' will be diagnosed later.
+ // This matters for elegant diagnosis of:
+ // template<template<typename>> struct S;
+ Tok.setKind(tok::greater);
+ RAngleLoc = Tok.getLocation();
+ Tok.setLocation(Tok.getLocation().getLocWithOffset(1));
+ } else if (!TryConsumeToken(tok::greater, RAngleLoc) && Failed) {
+ Diag(Tok.getLocation(), diag::err_expected) << tok::greater;
+ return true;
+ }
+ return false;
+}
+
+/// ParseTemplateParameterList - Parse a template parameter list. If
+/// the parsing fails badly (i.e., closing bracket was left out), this
+/// will try to put the token stream in a reasonable position (closing
+/// a statement, etc.) and return false.
+///
+/// template-parameter-list: [C++ temp]
+/// template-parameter
+/// template-parameter-list ',' template-parameter
+bool
+Parser::ParseTemplateParameterList(unsigned Depth,
+ SmallVectorImpl<Decl*> &TemplateParams) {
+ while (1) {
+ if (Decl *TmpParam
+ = ParseTemplateParameter(Depth, TemplateParams.size())) {
+ TemplateParams.push_back(TmpParam);
+ } else {
+ // If we failed to parse a template parameter, skip until we find
+ // a comma or closing brace.
+ SkipUntil(tok::comma, tok::greater, tok::greatergreater,
+ StopAtSemi | StopBeforeMatch);
+ }
+
+ // Did we find a comma or the end of the template parameter list?
+ if (Tok.is(tok::comma)) {
+ ConsumeToken();
+ } else if (Tok.isOneOf(tok::greater, tok::greatergreater)) {
+ // Don't consume this... that's done by template parser.
+ break;
+ } else {
+ // Somebody probably forgot to close the template. Skip ahead and
+ // try to get out of the expression. This error is currently
+ // subsumed by whatever goes on in ParseTemplateParameter.
+ Diag(Tok.getLocation(), diag::err_expected_comma_greater);
+ SkipUntil(tok::comma, tok::greater, tok::greatergreater,
+ StopAtSemi | StopBeforeMatch);
+ return false;
+ }
+ }
+ return true;
+}
+
+/// \brief Determine whether the parser is at the start of a template
+/// type parameter.
+bool Parser::isStartOfTemplateTypeParameter() {
+ if (Tok.is(tok::kw_class)) {
+ // "class" may be the start of an elaborated-type-specifier or a
+ // type-parameter. Per C++ [temp.param]p3, we prefer the type-parameter.
+ switch (NextToken().getKind()) {
+ case tok::equal:
+ case tok::comma:
+ case tok::greater:
+ case tok::greatergreater:
+ case tok::ellipsis:
+ return true;
+
+ case tok::identifier:
+ // This may be either a type-parameter or an elaborated-type-specifier.
+ // We have to look further.
+ break;
+
+ default:
+ return false;
+ }
+
+ switch (GetLookAheadToken(2).getKind()) {
+ case tok::equal:
+ case tok::comma:
+ case tok::greater:
+ case tok::greatergreater:
+ return true;
+
+ default:
+ return false;
+ }
+ }
+
+ if (Tok.isNot(tok::kw_typename))
+ return false;
+
+ // C++ [temp.param]p2:
+ // There is no semantic difference between class and typename in a
+ // template-parameter. typename followed by an unqualified-id
+ // names a template type parameter. typename followed by a
+ // qualified-id denotes the type in a non-type
+ // parameter-declaration.
+ Token Next = NextToken();
+
+ // If we have an identifier, skip over it.
+ if (Next.getKind() == tok::identifier)
+ Next = GetLookAheadToken(2);
+
+ switch (Next.getKind()) {
+ case tok::equal:
+ case tok::comma:
+ case tok::greater:
+ case tok::greatergreater:
+ case tok::ellipsis:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+/// ParseTemplateParameter - Parse a template-parameter (C++ [temp.param]).
+///
+/// template-parameter: [C++ temp.param]
+/// type-parameter
+/// parameter-declaration
+///
+/// type-parameter: (see below)
+/// 'class' ...[opt] identifier[opt]
+/// 'class' identifier[opt] '=' type-id
+/// 'typename' ...[opt] identifier[opt]
+/// 'typename' identifier[opt] '=' type-id
+/// 'template' '<' template-parameter-list '>'
+/// 'class' ...[opt] identifier[opt]
+/// 'template' '<' template-parameter-list '>' 'class' identifier[opt]
+/// = id-expression
+Decl *Parser::ParseTemplateParameter(unsigned Depth, unsigned Position) {
+ if (isStartOfTemplateTypeParameter())
+ return ParseTypeParameter(Depth, Position);
+
+ if (Tok.is(tok::kw_template))
+ return ParseTemplateTemplateParameter(Depth, Position);
+
+ // If it's none of the above, then it must be a parameter declaration.
+ // NOTE: This will pick up errors in the closure of the template parameter
+ // list (e.g., template < ; Check here to implement >> style closures.
+ return ParseNonTypeTemplateParameter(Depth, Position);
+}
+
+/// ParseTypeParameter - Parse a template type parameter (C++ [temp.param]).
+/// Other kinds of template parameters are parsed in
+/// ParseTemplateTemplateParameter and ParseNonTypeTemplateParameter.
+///
+/// type-parameter: [C++ temp.param]
+/// 'class' ...[opt][C++0x] identifier[opt]
+/// 'class' identifier[opt] '=' type-id
+/// 'typename' ...[opt][C++0x] identifier[opt]
+/// 'typename' identifier[opt] '=' type-id
+Decl *Parser::ParseTypeParameter(unsigned Depth, unsigned Position) {
+ assert(Tok.isOneOf(tok::kw_class, tok::kw_typename) &&
+ "A type-parameter starts with 'class' or 'typename'");
+
+ // Consume the 'class' or 'typename' keyword.
+ bool TypenameKeyword = Tok.is(tok::kw_typename);
+ SourceLocation KeyLoc = ConsumeToken();
+
+ // Grab the ellipsis (if given).
+ SourceLocation EllipsisLoc;
+ if (TryConsumeToken(tok::ellipsis, EllipsisLoc)) {
+ Diag(EllipsisLoc,
+ getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_variadic_templates
+ : diag::ext_variadic_templates);
+ }
+
+ // Grab the template parameter name (if given)
+ SourceLocation NameLoc;
+ IdentifierInfo *ParamName = nullptr;
+ if (Tok.is(tok::identifier)) {
+ ParamName = Tok.getIdentifierInfo();
+ NameLoc = ConsumeToken();
+ } else if (Tok.isOneOf(tok::equal, tok::comma, tok::greater,
+ tok::greatergreater)) {
+ // Unnamed template parameter. Don't have to do anything here, just
+ // don't consume this token.
+ } else {
+ Diag(Tok.getLocation(), diag::err_expected) << tok::identifier;
+ return nullptr;
+ }
+
+ // Recover from misplaced ellipsis.
+ bool AlreadyHasEllipsis = EllipsisLoc.isValid();
+ if (TryConsumeToken(tok::ellipsis, EllipsisLoc))
+ DiagnoseMisplacedEllipsis(EllipsisLoc, NameLoc, AlreadyHasEllipsis, true);
+
+ // Grab a default argument (if available).
+ // Per C++0x [basic.scope.pdecl]p9, we parse the default argument before
+ // we introduce the type parameter into the local scope.
+ SourceLocation EqualLoc;
+ ParsedType DefaultArg;
+ if (TryConsumeToken(tok::equal, EqualLoc))
+ DefaultArg = ParseTypeName(/*Range=*/nullptr,
+ Declarator::TemplateTypeArgContext).get();
+
+ return Actions.ActOnTypeParameter(getCurScope(), TypenameKeyword, EllipsisLoc,
+ KeyLoc, ParamName, NameLoc, Depth, Position,
+ EqualLoc, DefaultArg);
+}
+
+/// ParseTemplateTemplateParameter - Handle the parsing of template
+/// template parameters.
+///
+/// type-parameter: [C++ temp.param]
+/// 'template' '<' template-parameter-list '>' type-parameter-key
+/// ...[opt] identifier[opt]
+/// 'template' '<' template-parameter-list '>' type-parameter-key
+/// identifier[opt] = id-expression
+/// type-parameter-key:
+/// 'class'
+/// 'typename' [C++1z]
+Decl *
+Parser::ParseTemplateTemplateParameter(unsigned Depth, unsigned Position) {
+ assert(Tok.is(tok::kw_template) && "Expected 'template' keyword");
+
+ // Handle the template <...> part.
+ SourceLocation TemplateLoc = ConsumeToken();
+ SmallVector<Decl*,8> TemplateParams;
+ SourceLocation LAngleLoc, RAngleLoc;
+ {
+ ParseScope TemplateParmScope(this, Scope::TemplateParamScope);
+ if (ParseTemplateParameters(Depth + 1, TemplateParams, LAngleLoc,
+ RAngleLoc)) {
+ return nullptr;
+ }
+ }
+
+ // Provide an ExtWarn if the C++1z feature of using 'typename' here is used.
+ // Generate a meaningful error if the user forgot to put class before the
+ // identifier, comma, or greater. Provide a fixit if the identifier, comma,
+ // or greater appear immediately or after 'struct'. In the latter case,
+ // replace the keyword with 'class'.
+ if (!TryConsumeToken(tok::kw_class)) {
+ bool Replace = Tok.isOneOf(tok::kw_typename, tok::kw_struct);
+ const Token &Next = Tok.is(tok::kw_struct) ? NextToken() : Tok;
+ if (Tok.is(tok::kw_typename)) {
+ Diag(Tok.getLocation(),
+ getLangOpts().CPlusPlus1z
+ ? diag::warn_cxx14_compat_template_template_param_typename
+ : diag::ext_template_template_param_typename)
+ << (!getLangOpts().CPlusPlus1z
+ ? FixItHint::CreateReplacement(Tok.getLocation(), "class")
+ : FixItHint());
+ } else if (Next.isOneOf(tok::identifier, tok::comma, tok::greater,
+ tok::greatergreater, tok::ellipsis)) {
+ Diag(Tok.getLocation(), diag::err_class_on_template_template_param)
+ << (Replace ? FixItHint::CreateReplacement(Tok.getLocation(), "class")
+ : FixItHint::CreateInsertion(Tok.getLocation(), "class "));
+ } else
+ Diag(Tok.getLocation(), diag::err_class_on_template_template_param);
+
+ if (Replace)
+ ConsumeToken();
+ }
+
+ // Parse the ellipsis, if given.
+ SourceLocation EllipsisLoc;
+ if (TryConsumeToken(tok::ellipsis, EllipsisLoc))
+ Diag(EllipsisLoc,
+ getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_variadic_templates
+ : diag::ext_variadic_templates);
+
+ // Get the identifier, if given.
+ SourceLocation NameLoc;
+ IdentifierInfo *ParamName = nullptr;
+ if (Tok.is(tok::identifier)) {
+ ParamName = Tok.getIdentifierInfo();
+ NameLoc = ConsumeToken();
+ } else if (Tok.isOneOf(tok::equal, tok::comma, tok::greater,
+ tok::greatergreater)) {
+ // Unnamed template parameter. Don't have to do anything here, just
+ // don't consume this token.
+ } else {
+ Diag(Tok.getLocation(), diag::err_expected) << tok::identifier;
+ return nullptr;
+ }
+
+ // Recover from misplaced ellipsis.
+ bool AlreadyHasEllipsis = EllipsisLoc.isValid();
+ if (TryConsumeToken(tok::ellipsis, EllipsisLoc))
+ DiagnoseMisplacedEllipsis(EllipsisLoc, NameLoc, AlreadyHasEllipsis, true);
+
+ TemplateParameterList *ParamList =
+ Actions.ActOnTemplateParameterList(Depth, SourceLocation(),
+ TemplateLoc, LAngleLoc,
+ TemplateParams,
+ RAngleLoc);
+
+ // Grab a default argument (if available).
+ // Per C++0x [basic.scope.pdecl]p9, we parse the default argument before
+ // we introduce the template parameter into the local scope.
+ SourceLocation EqualLoc;
+ ParsedTemplateArgument DefaultArg;
+ if (TryConsumeToken(tok::equal, EqualLoc)) {
+ DefaultArg = ParseTemplateTemplateArgument();
+ if (DefaultArg.isInvalid()) {
+ Diag(Tok.getLocation(),
+ diag::err_default_template_template_parameter_not_template);
+ SkipUntil(tok::comma, tok::greater, tok::greatergreater,
+ StopAtSemi | StopBeforeMatch);
+ }
+ }
+
+ return Actions.ActOnTemplateTemplateParameter(getCurScope(), TemplateLoc,
+ ParamList, EllipsisLoc,
+ ParamName, NameLoc, Depth,
+ Position, EqualLoc, DefaultArg);
+}
+
+/// ParseNonTypeTemplateParameter - Handle the parsing of non-type
+/// template parameters (e.g., in "template<int Size> class array;").
+///
+/// template-parameter:
+/// ...
+/// parameter-declaration
+Decl *
+Parser::ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position) {
+ // Parse the declaration-specifiers (i.e., the type).
+ // FIXME: The type should probably be restricted in some way... Not all
+ // declarators (parts of declarators?) are accepted for parameters.
+ DeclSpec DS(AttrFactory);
+ ParseDeclarationSpecifiers(DS);
+
+ // Parse this as a typename.
+ Declarator ParamDecl(DS, Declarator::TemplateParamContext);
+ ParseDeclarator(ParamDecl);
+ if (DS.getTypeSpecType() == DeclSpec::TST_unspecified) {
+ Diag(Tok.getLocation(), diag::err_expected_template_parameter);
+ return nullptr;
+ }
+
+ // Recover from misplaced ellipsis.
+ SourceLocation EllipsisLoc;
+ if (TryConsumeToken(tok::ellipsis, EllipsisLoc))
+ DiagnoseMisplacedEllipsisInDeclarator(EllipsisLoc, ParamDecl);
+
+ // If there is a default value, parse it.
+ // Per C++0x [basic.scope.pdecl]p9, we parse the default argument before
+ // we introduce the template parameter into the local scope.
+ SourceLocation EqualLoc;
+ ExprResult DefaultArg;
+ if (TryConsumeToken(tok::equal, EqualLoc)) {
+ // C++ [temp.param]p15:
+ // When parsing a default template-argument for a non-type
+ // template-parameter, the first non-nested > is taken as the
+ // end of the template-parameter-list rather than a greater-than
+ // operator.
+ GreaterThanIsOperatorScope G(GreaterThanIsOperator, false);
+ EnterExpressionEvaluationContext ConstantEvaluated(Actions,
+ Sema::ConstantEvaluated);
+
+ DefaultArg = Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression());
+ if (DefaultArg.isInvalid())
+ SkipUntil(tok::comma, tok::greater, StopAtSemi | StopBeforeMatch);
+ }
+
+ // Create the parameter.
+ return Actions.ActOnNonTypeTemplateParameter(getCurScope(), ParamDecl,
+ Depth, Position, EqualLoc,
+ DefaultArg.get());
+}
+
+void Parser::DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
+ SourceLocation CorrectLoc,
+ bool AlreadyHasEllipsis,
+ bool IdentifierHasName) {
+ FixItHint Insertion;
+ if (!AlreadyHasEllipsis)
+ Insertion = FixItHint::CreateInsertion(CorrectLoc, "...");
+ Diag(EllipsisLoc, diag::err_misplaced_ellipsis_in_declaration)
+ << FixItHint::CreateRemoval(EllipsisLoc) << Insertion
+ << !IdentifierHasName;
+}
+
+void Parser::DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
+ Declarator &D) {
+ assert(EllipsisLoc.isValid());
+ bool AlreadyHasEllipsis = D.getEllipsisLoc().isValid();
+ if (!AlreadyHasEllipsis)
+ D.setEllipsisLoc(EllipsisLoc);
+ DiagnoseMisplacedEllipsis(EllipsisLoc, D.getIdentifierLoc(),
+ AlreadyHasEllipsis, D.hasName());
+}
+
+/// \brief Parses a '>' at the end of a template list.
+///
+/// If this function encounters '>>', '>>>', '>=', or '>>=', it tries
+/// to determine if these tokens were supposed to be a '>' followed by
+/// '>', '>>', '>=', or '>='. It emits an appropriate diagnostic if necessary.
+///
+/// \param RAngleLoc the location of the consumed '>'.
+///
+/// \param ConsumeLastToken if true, the '>' is consumed.
+///
+/// \param ObjCGenericList if true, this is the '>' closing an Objective-C
+/// type parameter or type argument list, rather than a C++ template parameter
+/// or argument list.
+///
+/// \returns true, if current token does not start with '>', false otherwise.
+bool Parser::ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
+ bool ConsumeLastToken,
+ bool ObjCGenericList) {
+ // What will be left once we've consumed the '>'.
+ tok::TokenKind RemainingToken;
+ const char *ReplacementStr = "> >";
+
+ switch (Tok.getKind()) {
+ default:
+ Diag(Tok.getLocation(), diag::err_expected) << tok::greater;
+ return true;
+
+ case tok::greater:
+ // Determine the location of the '>' token. Only consume this token
+ // if the caller asked us to.
+ RAngleLoc = Tok.getLocation();
+ if (ConsumeLastToken)
+ ConsumeToken();
+ return false;
+
+ case tok::greatergreater:
+ RemainingToken = tok::greater;
+ break;
+
+ case tok::greatergreatergreater:
+ RemainingToken = tok::greatergreater;
+ break;
+
+ case tok::greaterequal:
+ RemainingToken = tok::equal;
+ ReplacementStr = "> =";
+ break;
+
+ case tok::greatergreaterequal:
+ RemainingToken = tok::greaterequal;
+ break;
+ }
+
+ // This template-id is terminated by a token which starts with a '>'. Outside
+ // C++11, this is now error recovery, and in C++11, this is error recovery if
+ // the token isn't '>>' or '>>>'.
+ // '>>>' is for CUDA, where this sequence of characters is parsed into
+ // tok::greatergreatergreater, rather than two separate tokens.
+ //
+ // We always allow this for Objective-C type parameter and type argument
+ // lists.
+ RAngleLoc = Tok.getLocation();
+ Token Next = NextToken();
+ if (!ObjCGenericList) {
+ // The source range of the '>>' or '>=' at the start of the token.
+ CharSourceRange ReplacementRange =
+ CharSourceRange::getCharRange(RAngleLoc,
+ Lexer::AdvanceToTokenCharacter(RAngleLoc, 2, PP.getSourceManager(),
+ getLangOpts()));
+
+ // A hint to put a space between the '>>'s. In order to make the hint as
+ // clear as possible, we include the characters either side of the space in
+ // the replacement, rather than just inserting a space at SecondCharLoc.
+ FixItHint Hint1 = FixItHint::CreateReplacement(ReplacementRange,
+ ReplacementStr);
+
+ // A hint to put another space after the token, if it would otherwise be
+ // lexed differently.
+ FixItHint Hint2;
+ if ((RemainingToken == tok::greater ||
+ RemainingToken == tok::greatergreater) &&
+ (Next.isOneOf(tok::greater, tok::greatergreater,
+ tok::greatergreatergreater, tok::equal,
+ tok::greaterequal, tok::greatergreaterequal,
+ tok::equalequal)) &&
+ areTokensAdjacent(Tok, Next))
+ Hint2 = FixItHint::CreateInsertion(Next.getLocation(), " ");
+
+ unsigned DiagId = diag::err_two_right_angle_brackets_need_space;
+ if (getLangOpts().CPlusPlus11 &&
+ (Tok.is(tok::greatergreater) || Tok.is(tok::greatergreatergreater)))
+ DiagId = diag::warn_cxx98_compat_two_right_angle_brackets;
+ else if (Tok.is(tok::greaterequal))
+ DiagId = diag::err_right_angle_bracket_equal_needs_space;
+ Diag(Tok.getLocation(), DiagId) << Hint1 << Hint2;
+ }
+
+ // Strip the initial '>' from the token.
+ if (RemainingToken == tok::equal && Next.is(tok::equal) &&
+ areTokensAdjacent(Tok, Next)) {
+ // Join two adjacent '=' tokens into one, for cases like:
+ // void (*p)() = f<int>;
+ // return f<int>==p;
+ ConsumeToken();
+ Tok.setKind(tok::equalequal);
+ Tok.setLength(Tok.getLength() + 1);
+ } else {
+ Tok.setKind(RemainingToken);
+ Tok.setLength(Tok.getLength() - 1);
+ }
+ Tok.setLocation(Lexer::AdvanceToTokenCharacter(RAngleLoc, 1,
+ PP.getSourceManager(),
+ getLangOpts()));
+
+ if (!ConsumeLastToken) {
+ // Since we're not supposed to consume the '>' token, we need to push
+ // this token and revert the current token back to the '>'.
+ PP.EnterToken(Tok);
+ Tok.setKind(tok::greater);
+ Tok.setLength(1);
+ Tok.setLocation(RAngleLoc);
+ }
+ return false;
+}
+
+
+/// \brief Parses a template-id that after the template name has
+/// already been parsed.
+///
+/// This routine takes care of parsing the enclosed template argument
+/// list ('<' template-parameter-list [opt] '>') and placing the
+/// results into a form that can be transferred to semantic analysis.
+///
+/// \param Template the template declaration produced by isTemplateName
+///
+/// \param TemplateNameLoc the source location of the template name
+///
+/// \param SS if non-NULL, the nested-name-specifier preceding the
+/// template name.
+///
+/// \param ConsumeLastToken if true, then we will consume the last
+/// token that forms the template-id. Otherwise, we will leave the
+/// last token in the stream (e.g., so that it can be replaced with an
+/// annotation token).
+bool
+Parser::ParseTemplateIdAfterTemplateName(TemplateTy Template,
+ SourceLocation TemplateNameLoc,
+ const CXXScopeSpec &SS,
+ bool ConsumeLastToken,
+ SourceLocation &LAngleLoc,
+ TemplateArgList &TemplateArgs,
+ SourceLocation &RAngleLoc) {
+ assert(Tok.is(tok::less) && "Must have already parsed the template-name");
+
+ // Consume the '<'.
+ LAngleLoc = ConsumeToken();
+
+ // Parse the optional template-argument-list.
+ bool Invalid = false;
+ {
+ GreaterThanIsOperatorScope G(GreaterThanIsOperator, false);
+ if (Tok.isNot(tok::greater) && Tok.isNot(tok::greatergreater))
+ Invalid = ParseTemplateArgumentList(TemplateArgs);
+
+ if (Invalid) {
+ // Try to find the closing '>'.
+ if (ConsumeLastToken)
+ SkipUntil(tok::greater, StopAtSemi);
+ else
+ SkipUntil(tok::greater, StopAtSemi | StopBeforeMatch);
+ return true;
+ }
+ }
+
+ return ParseGreaterThanInTemplateList(RAngleLoc, ConsumeLastToken,
+ /*ObjCGenericList=*/false);
+}
+
+/// \brief Replace the tokens that form a simple-template-id with an
+/// annotation token containing the complete template-id.
+///
+/// The first token in the stream must be the name of a template that
+/// is followed by a '<'. This routine will parse the complete
+/// simple-template-id and replace the tokens with a single annotation
+/// token with one of two different kinds: if the template-id names a
+/// type (and \p AllowTypeAnnotation is true), the annotation token is
+/// a type annotation that includes the optional nested-name-specifier
+/// (\p SS). Otherwise, the annotation token is a template-id
+/// annotation that does not include the optional
+/// nested-name-specifier.
+///
+/// \param Template the declaration of the template named by the first
+/// token (an identifier), as returned from \c Action::isTemplateName().
+///
+/// \param TNK the kind of template that \p Template
+/// refers to, as returned from \c Action::isTemplateName().
+///
+/// \param SS if non-NULL, the nested-name-specifier that precedes
+/// this template name.
+///
+/// \param TemplateKWLoc if valid, specifies that this template-id
+/// annotation was preceded by the 'template' keyword and gives the
+/// location of that keyword. If invalid (the default), then this
+/// template-id was not preceded by a 'template' keyword.
+///
+/// \param AllowTypeAnnotation if true (the default), then a
+/// simple-template-id that refers to a class template, template
+/// template parameter, or other template that produces a type will be
+/// replaced with a type annotation token. Otherwise, the
+/// simple-template-id is always replaced with a template-id
+/// annotation token.
+///
+/// If an unrecoverable parse error occurs and no annotation token can be
+/// formed, this function returns true.
+///
+bool Parser::AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
+ CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ UnqualifiedId &TemplateName,
+ bool AllowTypeAnnotation) {
+ assert(getLangOpts().CPlusPlus && "Can only annotate template-ids in C++");
+ assert(Template && Tok.is(tok::less) &&
+ "Parser isn't at the beginning of a template-id");
+
+ // Consume the template-name.
+ SourceLocation TemplateNameLoc = TemplateName.getSourceRange().getBegin();
+
+ // Parse the enclosed template argument list.
+ SourceLocation LAngleLoc, RAngleLoc;
+ TemplateArgList TemplateArgs;
+ bool Invalid = ParseTemplateIdAfterTemplateName(Template,
+ TemplateNameLoc,
+ SS, false, LAngleLoc,
+ TemplateArgs,
+ RAngleLoc);
+
+ if (Invalid) {
+ // If we failed to parse the template ID but skipped ahead to a >, we're not
+ // going to be able to form a token annotation. Eat the '>' if present.
+ TryConsumeToken(tok::greater);
+ return true;
+ }
+
+ ASTTemplateArgsPtr TemplateArgsPtr(TemplateArgs);
+
+ // Build the annotation token.
+ if (TNK == TNK_Type_template && AllowTypeAnnotation) {
+ TypeResult Type
+ = Actions.ActOnTemplateIdType(SS, TemplateKWLoc,
+ Template, TemplateNameLoc,
+ LAngleLoc, TemplateArgsPtr, RAngleLoc);
+ if (Type.isInvalid()) {
+ // If we failed to parse the template ID but skipped ahead to a >, we're not
+ // going to be able to form a token annotation. Eat the '>' if present.
+ TryConsumeToken(tok::greater);
+ return true;
+ }
+
+ Tok.setKind(tok::annot_typename);
+ setTypeAnnotation(Tok, Type.get());
+ if (SS.isNotEmpty())
+ Tok.setLocation(SS.getBeginLoc());
+ else if (TemplateKWLoc.isValid())
+ Tok.setLocation(TemplateKWLoc);
+ else
+ Tok.setLocation(TemplateNameLoc);
+ } else {
+ // Build a template-id annotation token that can be processed
+ // later.
+ Tok.setKind(tok::annot_template_id);
+ TemplateIdAnnotation *TemplateId
+ = TemplateIdAnnotation::Allocate(TemplateArgs.size(), TemplateIds);
+ TemplateId->TemplateNameLoc = TemplateNameLoc;
+ if (TemplateName.getKind() == UnqualifiedId::IK_Identifier) {
+ TemplateId->Name = TemplateName.Identifier;
+ TemplateId->Operator = OO_None;
+ } else {
+ TemplateId->Name = nullptr;
+ TemplateId->Operator = TemplateName.OperatorFunctionId.Operator;
+ }
+ TemplateId->SS = SS;
+ TemplateId->TemplateKWLoc = TemplateKWLoc;
+ TemplateId->Template = Template;
+ TemplateId->Kind = TNK;
+ TemplateId->LAngleLoc = LAngleLoc;
+ TemplateId->RAngleLoc = RAngleLoc;
+ ParsedTemplateArgument *Args = TemplateId->getTemplateArgs();
+ for (unsigned Arg = 0, ArgEnd = TemplateArgs.size(); Arg != ArgEnd; ++Arg)
+ Args[Arg] = ParsedTemplateArgument(TemplateArgs[Arg]);
+ Tok.setAnnotationValue(TemplateId);
+ if (TemplateKWLoc.isValid())
+ Tok.setLocation(TemplateKWLoc);
+ else
+ Tok.setLocation(TemplateNameLoc);
+ }
+
+ // Common fields for the annotation token
+ Tok.setAnnotationEndLoc(RAngleLoc);
+
+ // In case the tokens were cached, have Preprocessor replace them with the
+ // annotation token.
+ PP.AnnotateCachedTokens(Tok);
+ return false;
+}
+
+/// \brief Replaces a template-id annotation token with a type
+/// annotation token.
+///
+/// If there was a failure when forming the type from the template-id,
+/// a type annotation token will still be created, but will have a
+/// NULL type pointer to signify an error.
+void Parser::AnnotateTemplateIdTokenAsType() {
+ assert(Tok.is(tok::annot_template_id) && "Requires template-id tokens");
+
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+ assert((TemplateId->Kind == TNK_Type_template ||
+ TemplateId->Kind == TNK_Dependent_template_name) &&
+ "Only works for type and dependent templates");
+
+ ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
+ TemplateId->NumArgs);
+
+ TypeResult Type
+ = Actions.ActOnTemplateIdType(TemplateId->SS,
+ TemplateId->TemplateKWLoc,
+ TemplateId->Template,
+ TemplateId->TemplateNameLoc,
+ TemplateId->LAngleLoc,
+ TemplateArgsPtr,
+ TemplateId->RAngleLoc);
+ // Create the new "type" annotation token.
+ Tok.setKind(tok::annot_typename);
+ setTypeAnnotation(Tok, Type.isInvalid() ? ParsedType() : Type.get());
+ if (TemplateId->SS.isNotEmpty()) // it was a C++ qualified type name.
+ Tok.setLocation(TemplateId->SS.getBeginLoc());
+ // End location stays the same
+
+ // Replace the template-id annotation token, and possible the scope-specifier
+ // that precedes it, with the typename annotation token.
+ PP.AnnotateCachedTokens(Tok);
+}
+
+/// \brief Determine whether the given token can end a template argument.
+static bool isEndOfTemplateArgument(Token Tok) {
+ return Tok.isOneOf(tok::comma, tok::greater, tok::greatergreater);
+}
+
+/// \brief Parse a C++ template template argument.
+ParsedTemplateArgument Parser::ParseTemplateTemplateArgument() {
+ if (!Tok.is(tok::identifier) && !Tok.is(tok::coloncolon) &&
+ !Tok.is(tok::annot_cxxscope))
+ return ParsedTemplateArgument();
+
+ // C++0x [temp.arg.template]p1:
+ // A template-argument for a template template-parameter shall be the name
+ // of a class template or an alias template, expressed as id-expression.
+ //
+ // We parse an id-expression that refers to a class template or alias
+ // template. The grammar we parse is:
+ //
+ // nested-name-specifier[opt] template[opt] identifier ...[opt]
+ //
+ // followed by a token that terminates a template argument, such as ',',
+ // '>', or (in some cases) '>>'.
+ CXXScopeSpec SS; // nested-name-specifier, if present
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(),
+ /*EnteringContext=*/false);
+
+ ParsedTemplateArgument Result;
+ SourceLocation EllipsisLoc;
+ if (SS.isSet() && Tok.is(tok::kw_template)) {
+ // Parse the optional 'template' keyword following the
+ // nested-name-specifier.
+ SourceLocation TemplateKWLoc = ConsumeToken();
+
+ if (Tok.is(tok::identifier)) {
+ // We appear to have a dependent template name.
+ UnqualifiedId Name;
+ Name.setIdentifier(Tok.getIdentifierInfo(), Tok.getLocation());
+ ConsumeToken(); // the identifier
+
+ TryConsumeToken(tok::ellipsis, EllipsisLoc);
+
+ // If the next token signals the end of a template argument,
+ // then we have a dependent template name that could be a template
+ // template argument.
+ TemplateTy Template;
+ if (isEndOfTemplateArgument(Tok) &&
+ Actions.ActOnDependentTemplateName(getCurScope(),
+ SS, TemplateKWLoc, Name,
+ /*ObjectType=*/ ParsedType(),
+ /*EnteringContext=*/false,
+ Template))
+ Result = ParsedTemplateArgument(SS, Template, Name.StartLocation);
+ }
+ } else if (Tok.is(tok::identifier)) {
+ // We may have a (non-dependent) template name.
+ TemplateTy Template;
+ UnqualifiedId Name;
+ Name.setIdentifier(Tok.getIdentifierInfo(), Tok.getLocation());
+ ConsumeToken(); // the identifier
+
+ TryConsumeToken(tok::ellipsis, EllipsisLoc);
+
+ if (isEndOfTemplateArgument(Tok)) {
+ bool MemberOfUnknownSpecialization;
+ TemplateNameKind TNK = Actions.isTemplateName(getCurScope(), SS,
+ /*hasTemplateKeyword=*/false,
+ Name,
+ /*ObjectType=*/ ParsedType(),
+ /*EnteringContext=*/false,
+ Template,
+ MemberOfUnknownSpecialization);
+ if (TNK == TNK_Dependent_template_name || TNK == TNK_Type_template) {
+ // We have an id-expression that refers to a class template or
+ // (C++0x) alias template.
+ Result = ParsedTemplateArgument(SS, Template, Name.StartLocation);
+ }
+ }
+ }
+
+ // If this is a pack expansion, build it as such.
+ if (EllipsisLoc.isValid() && !Result.isInvalid())
+ Result = Actions.ActOnPackExpansion(Result, EllipsisLoc);
+
+ return Result;
+}
+
+/// ParseTemplateArgument - Parse a C++ template argument (C++ [temp.names]).
+///
+/// template-argument: [C++ 14.2]
+/// constant-expression
+/// type-id
+/// id-expression
+ParsedTemplateArgument Parser::ParseTemplateArgument() {
+ // C++ [temp.arg]p2:
+ // In a template-argument, an ambiguity between a type-id and an
+ // expression is resolved to a type-id, regardless of the form of
+ // the corresponding template-parameter.
+ //
+ // Therefore, we initially try to parse a type-id.
+ if (isCXXTypeId(TypeIdAsTemplateArgument)) {
+ SourceLocation Loc = Tok.getLocation();
+ TypeResult TypeArg = ParseTypeName(/*Range=*/nullptr,
+ Declarator::TemplateTypeArgContext);
+ if (TypeArg.isInvalid())
+ return ParsedTemplateArgument();
+
+ return ParsedTemplateArgument(ParsedTemplateArgument::Type,
+ TypeArg.get().getAsOpaquePtr(),
+ Loc);
+ }
+
+ // Try to parse a template template argument.
+ {
+ TentativeParsingAction TPA(*this);
+
+ ParsedTemplateArgument TemplateTemplateArgument
+ = ParseTemplateTemplateArgument();
+ if (!TemplateTemplateArgument.isInvalid()) {
+ TPA.Commit();
+ return TemplateTemplateArgument;
+ }
+
+ // Revert this tentative parse to parse a non-type template argument.
+ TPA.Revert();
+ }
+
+ // Parse a non-type template argument.
+ SourceLocation Loc = Tok.getLocation();
+ ExprResult ExprArg = ParseConstantExpression(MaybeTypeCast);
+ if (ExprArg.isInvalid() || !ExprArg.get())
+ return ParsedTemplateArgument();
+
+ return ParsedTemplateArgument(ParsedTemplateArgument::NonType,
+ ExprArg.get(), Loc);
+}
+
+/// \brief Determine whether the current tokens can only be parsed as a
+/// template argument list (starting with the '<') and never as a '<'
+/// expression.
+bool Parser::IsTemplateArgumentList(unsigned Skip) {
+ struct AlwaysRevertAction : TentativeParsingAction {
+ AlwaysRevertAction(Parser &P) : TentativeParsingAction(P) { }
+ ~AlwaysRevertAction() { Revert(); }
+ } Tentative(*this);
+
+ while (Skip) {
+ ConsumeToken();
+ --Skip;
+ }
+
+ // '<'
+ if (!TryConsumeToken(tok::less))
+ return false;
+
+ // An empty template argument list.
+ if (Tok.is(tok::greater))
+ return true;
+
+ // See whether we have declaration specifiers, which indicate a type.
+ while (isCXXDeclarationSpecifier() == TPResult::True)
+ ConsumeToken();
+
+ // If we have a '>' or a ',' then this is a template argument list.
+ return Tok.isOneOf(tok::greater, tok::comma);
+}
+
+/// ParseTemplateArgumentList - Parse a C++ template-argument-list
+/// (C++ [temp.names]). Returns true if there was an error.
+///
+/// template-argument-list: [C++ 14.2]
+/// template-argument
+/// template-argument-list ',' template-argument
+bool
+Parser::ParseTemplateArgumentList(TemplateArgList &TemplateArgs) {
+ // Template argument lists are constant-evaluation contexts.
+ EnterExpressionEvaluationContext EvalContext(Actions,Sema::ConstantEvaluated);
+ ColonProtectionRAIIObject ColonProtection(*this, false);
+
+ do {
+ ParsedTemplateArgument Arg = ParseTemplateArgument();
+ SourceLocation EllipsisLoc;
+ if (TryConsumeToken(tok::ellipsis, EllipsisLoc))
+ Arg = Actions.ActOnPackExpansion(Arg, EllipsisLoc);
+
+ if (Arg.isInvalid()) {
+ SkipUntil(tok::comma, tok::greater, StopAtSemi | StopBeforeMatch);
+ return true;
+ }
+
+ // Save this template argument.
+ TemplateArgs.push_back(Arg);
+
+ // If the next token is a comma, consume it and keep reading
+ // arguments.
+ } while (TryConsumeToken(tok::comma));
+
+ return false;
+}
+
+/// \brief Parse a C++ explicit template instantiation
+/// (C++ [temp.explicit]).
+///
+/// explicit-instantiation:
+/// 'extern' [opt] 'template' declaration
+///
+/// Note that the 'extern' is a GNU extension and C++11 feature.
+Decl *Parser::ParseExplicitInstantiation(unsigned Context,
+ SourceLocation ExternLoc,
+ SourceLocation TemplateLoc,
+ SourceLocation &DeclEnd,
+ AccessSpecifier AS) {
+ // This isn't really required here.
+ ParsingDeclRAIIObject
+ ParsingTemplateParams(*this, ParsingDeclRAIIObject::NoParent);
+
+ return ParseSingleDeclarationAfterTemplate(Context,
+ ParsedTemplateInfo(ExternLoc,
+ TemplateLoc),
+ ParsingTemplateParams,
+ DeclEnd, AS);
+}
+
+SourceRange Parser::ParsedTemplateInfo::getSourceRange() const {
+ if (TemplateParams)
+ return getTemplateParamsRange(TemplateParams->data(),
+ TemplateParams->size());
+
+ SourceRange R(TemplateLoc);
+ if (ExternLoc.isValid())
+ R.setBegin(ExternLoc);
+ return R;
+}
+
+void Parser::LateTemplateParserCallback(void *P, LateParsedTemplate &LPT) {
+ ((Parser *)P)->ParseLateTemplatedFuncDef(LPT);
+}
+
+/// \brief Late parse a C++ function template in Microsoft mode.
+void Parser::ParseLateTemplatedFuncDef(LateParsedTemplate &LPT) {
+ if (!LPT.D)
+ return;
+
+ // Get the FunctionDecl.
+ FunctionDecl *FunD = LPT.D->getAsFunction();
+ // Track template parameter depth.
+ TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
+
+ // To restore the context after late parsing.
+ Sema::ContextRAII GlobalSavedContext(
+ Actions, Actions.Context.getTranslationUnitDecl());
+
+ SmallVector<ParseScope*, 4> TemplateParamScopeStack;
+
+ // Get the list of DeclContexts to reenter.
+ SmallVector<DeclContext*, 4> DeclContextsToReenter;
+ DeclContext *DD = FunD;
+ while (DD && !DD->isTranslationUnit()) {
+ DeclContextsToReenter.push_back(DD);
+ DD = DD->getLexicalParent();
+ }
+
+ // Reenter template scopes from outermost to innermost.
+ SmallVectorImpl<DeclContext *>::reverse_iterator II =
+ DeclContextsToReenter.rbegin();
+ for (; II != DeclContextsToReenter.rend(); ++II) {
+ TemplateParamScopeStack.push_back(new ParseScope(this,
+ Scope::TemplateParamScope));
+ unsigned NumParamLists =
+ Actions.ActOnReenterTemplateScope(getCurScope(), cast<Decl>(*II));
+ CurTemplateDepthTracker.addDepth(NumParamLists);
+ if (*II != FunD) {
+ TemplateParamScopeStack.push_back(new ParseScope(this, Scope::DeclScope));
+ Actions.PushDeclContext(Actions.getCurScope(), *II);
+ }
+ }
+
+ assert(!LPT.Toks.empty() && "Empty body!");
+
+ // Append the current token at the end of the new token stream so that it
+ // doesn't get lost.
+ LPT.Toks.push_back(Tok);
+ PP.EnterTokenStream(LPT.Toks.data(), LPT.Toks.size(), true, false);
+
+ // Consume the previously pushed token.
+ ConsumeAnyToken(/*ConsumeCodeCompletionTok=*/true);
+ assert(Tok.isOneOf(tok::l_brace, tok::colon, tok::kw_try) &&
+ "Inline method not starting with '{', ':' or 'try'");
+
+ // Parse the method body. Function body parsing code is similar enough
+ // to be re-used for method bodies as well.
+ ParseScope FnScope(this, Scope::FnScope|Scope::DeclScope);
+
+ // Recreate the containing function DeclContext.
+ Sema::ContextRAII FunctionSavedContext(Actions,
+ Actions.getContainingDC(FunD));
+
+ Actions.ActOnStartOfFunctionDef(getCurScope(), FunD);
+
+ if (Tok.is(tok::kw_try)) {
+ ParseFunctionTryBlock(LPT.D, FnScope);
+ } else {
+ if (Tok.is(tok::colon))
+ ParseConstructorInitializer(LPT.D);
+ else
+ Actions.ActOnDefaultCtorInitializers(LPT.D);
+
+ if (Tok.is(tok::l_brace)) {
+ assert((!isa<FunctionTemplateDecl>(LPT.D) ||
+ cast<FunctionTemplateDecl>(LPT.D)
+ ->getTemplateParameters()
+ ->getDepth() == TemplateParameterDepth - 1) &&
+ "TemplateParameterDepth should be greater than the depth of "
+ "current template being instantiated!");
+ ParseFunctionStatementBody(LPT.D, FnScope);
+ Actions.UnmarkAsLateParsedTemplate(FunD);
+ } else
+ Actions.ActOnFinishFunctionBody(LPT.D, nullptr);
+ }
+
+ // Exit scopes.
+ FnScope.Exit();
+ SmallVectorImpl<ParseScope *>::reverse_iterator I =
+ TemplateParamScopeStack.rbegin();
+ for (; I != TemplateParamScopeStack.rend(); ++I)
+ delete *I;
+}
+
+/// \brief Lex a delayed template function for late parsing.
+void Parser::LexTemplateFunctionForLateParsing(CachedTokens &Toks) {
+ tok::TokenKind kind = Tok.getKind();
+ if (!ConsumeAndStoreFunctionPrologue(Toks)) {
+ // Consume everything up to (and including) the matching right brace.
+ ConsumeAndStoreUntil(tok::r_brace, Toks, /*StopAtSemi=*/false);
+ }
+
+ // If we're in a function-try-block, we need to store all the catch blocks.
+ if (kind == tok::kw_try) {
+ while (Tok.is(tok::kw_catch)) {
+ ConsumeAndStoreUntil(tok::l_brace, Toks, /*StopAtSemi=*/false);
+ ConsumeAndStoreUntil(tok::r_brace, Toks, /*StopAtSemi=*/false);
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseTentative.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseTentative.cpp
new file mode 100644
index 0000000..6fbcfd9
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseTentative.cpp
@@ -0,0 +1,1825 @@
+//===--- ParseTentative.cpp - Ambiguity Resolution Parsing ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the tentative parsing portions of the Parser
+// interfaces, for ambiguity resolution.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/Parser.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Sema/ParsedTemplate.h"
+using namespace clang;
+
+/// isCXXDeclarationStatement - C++-specialized function that disambiguates
+/// between a declaration or an expression statement, when parsing function
+/// bodies. Returns true for declaration, false for expression.
+///
+/// declaration-statement:
+/// block-declaration
+///
+/// block-declaration:
+/// simple-declaration
+/// asm-definition
+/// namespace-alias-definition
+/// using-declaration
+/// using-directive
+/// [C++0x] static_assert-declaration
+///
+/// asm-definition:
+/// 'asm' '(' string-literal ')' ';'
+///
+/// namespace-alias-definition:
+/// 'namespace' identifier = qualified-namespace-specifier ';'
+///
+/// using-declaration:
+/// 'using' typename[opt] '::'[opt] nested-name-specifier
+/// unqualified-id ';'
+/// 'using' '::' unqualified-id ;
+///
+/// using-directive:
+/// 'using' 'namespace' '::'[opt] nested-name-specifier[opt]
+/// namespace-name ';'
+///
+bool Parser::isCXXDeclarationStatement() {
+ switch (Tok.getKind()) {
+ // asm-definition
+ case tok::kw_asm:
+ // namespace-alias-definition
+ case tok::kw_namespace:
+ // using-declaration
+ // using-directive
+ case tok::kw_using:
+ // static_assert-declaration
+ case tok::kw_static_assert:
+ case tok::kw__Static_assert:
+ return true;
+ // simple-declaration
+ default:
+ return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/false);
+ }
+}
+
+/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
+/// between a simple-declaration or an expression-statement.
+/// If during the disambiguation process a parsing error is encountered,
+/// the function returns true to let the declaration parsing code handle it.
+/// Returns false if the statement is disambiguated as expression.
+///
+/// simple-declaration:
+/// decl-specifier-seq init-declarator-list[opt] ';'
+///
+/// (if AllowForRangeDecl specified)
+/// for ( for-range-declaration : for-range-initializer ) statement
+/// for-range-declaration:
+/// attribute-specifier-seqopt type-specifier-seq declarator
+bool Parser::isCXXSimpleDeclaration(bool AllowForRangeDecl) {
+ // C++ 6.8p1:
+ // There is an ambiguity in the grammar involving expression-statements and
+ // declarations: An expression-statement with a function-style explicit type
+ // conversion (5.2.3) as its leftmost subexpression can be indistinguishable
+ // from a declaration where the first declarator starts with a '('. In those
+ // cases the statement is a declaration. [Note: To disambiguate, the whole
+ // statement might have to be examined to determine if it is an
+ // expression-statement or a declaration].
+
+ // C++ 6.8p3:
+ // The disambiguation is purely syntactic; that is, the meaning of the names
+ // occurring in such a statement, beyond whether they are type-names or not,
+ // is not generally used in or changed by the disambiguation. Class
+ // templates are instantiated as necessary to determine if a qualified name
+ // is a type-name. Disambiguation precedes parsing, and a statement
+ // disambiguated as a declaration may be an ill-formed declaration.
+
+ // We don't have to parse all of the decl-specifier-seq part. There's only
+ // an ambiguity if the first decl-specifier is
+ // simple-type-specifier/typename-specifier followed by a '(', which may
+ // indicate a function-style cast expression.
+ // isCXXDeclarationSpecifier will return TPResult::Ambiguous only in such
+ // a case.
+
+ bool InvalidAsDeclaration = false;
+ TPResult TPR = isCXXDeclarationSpecifier(TPResult::False,
+ &InvalidAsDeclaration);
+ if (TPR != TPResult::Ambiguous)
+ return TPR != TPResult::False; // Returns true for TPResult::True or
+ // TPResult::Error.
+
+ // FIXME: TryParseSimpleDeclaration doesn't look past the first initializer,
+ // and so gets some cases wrong. We can't carry on if we've already seen
+ // something which makes this statement invalid as a declaration in this case,
+ // since it can cause us to misparse valid code. Revisit this once
+ // TryParseInitDeclaratorList is fixed.
+ if (InvalidAsDeclaration)
+ return false;
+
+ // FIXME: Add statistics about the number of ambiguous statements encountered
+ // and how they were resolved (number of declarations+number of expressions).
+
+ // Ok, we have a simple-type-specifier/typename-specifier followed by a '(',
+ // or an identifier which doesn't resolve as anything. We need tentative
+ // parsing...
+
+ TentativeParsingAction PA(*this);
+ TPR = TryParseSimpleDeclaration(AllowForRangeDecl);
+ PA.Revert();
+
+ // In case of an error, let the declaration parsing code handle it.
+ if (TPR == TPResult::Error)
+ return true;
+
+ // Declarations take precedence over expressions.
+ if (TPR == TPResult::Ambiguous)
+ TPR = TPResult::True;
+
+ assert(TPR == TPResult::True || TPR == TPResult::False);
+ return TPR == TPResult::True;
+}
+
+/// Try to consume a token sequence that we've already identified as
+/// (potentially) starting a decl-specifier.
+Parser::TPResult Parser::TryConsumeDeclarationSpecifier() {
+ switch (Tok.getKind()) {
+ case tok::kw__Atomic:
+ if (NextToken().isNot(tok::l_paren)) {
+ ConsumeToken();
+ break;
+ }
+ // Fall through.
+ case tok::kw_typeof:
+ case tok::kw___attribute:
+ case tok::kw___underlying_type: {
+ ConsumeToken();
+ if (Tok.isNot(tok::l_paren))
+ return TPResult::Error;
+ ConsumeParen();
+ if (!SkipUntil(tok::r_paren))
+ return TPResult::Error;
+ break;
+ }
+
+ case tok::kw_class:
+ case tok::kw_struct:
+ case tok::kw_union:
+ case tok::kw___interface:
+ case tok::kw_enum:
+ // elaborated-type-specifier:
+ // class-key attribute-specifier-seq[opt]
+ // nested-name-specifier[opt] identifier
+ // class-key nested-name-specifier[opt] template[opt] simple-template-id
+ // enum nested-name-specifier[opt] identifier
+ //
+ // FIXME: We don't support class-specifiers nor enum-specifiers here.
+ ConsumeToken();
+
+ // Skip attributes.
+ while (Tok.isOneOf(tok::l_square, tok::kw___attribute, tok::kw___declspec,
+ tok::kw_alignas)) {
+ if (Tok.is(tok::l_square)) {
+ ConsumeBracket();
+ if (!SkipUntil(tok::r_square))
+ return TPResult::Error;
+ } else {
+ ConsumeToken();
+ if (Tok.isNot(tok::l_paren))
+ return TPResult::Error;
+ ConsumeParen();
+ if (!SkipUntil(tok::r_paren))
+ return TPResult::Error;
+ }
+ }
+
+ if (Tok.isOneOf(tok::identifier, tok::coloncolon, tok::kw_decltype,
+ tok::annot_template_id) &&
+ TryAnnotateCXXScopeToken())
+ return TPResult::Error;
+ if (Tok.is(tok::annot_cxxscope))
+ ConsumeToken();
+ if (Tok.isNot(tok::identifier) && Tok.isNot(tok::annot_template_id))
+ return TPResult::Error;
+ ConsumeToken();
+ break;
+
+ case tok::annot_cxxscope:
+ ConsumeToken();
+ // Fall through.
+ default:
+ ConsumeToken();
+
+ if (getLangOpts().ObjC1 && Tok.is(tok::less))
+ return TryParseProtocolQualifiers();
+ break;
+ }
+
+ return TPResult::Ambiguous;
+}
+
+/// simple-declaration:
+/// decl-specifier-seq init-declarator-list[opt] ';'
+///
+/// (if AllowForRangeDecl specified)
+/// for ( for-range-declaration : for-range-initializer ) statement
+/// for-range-declaration:
+/// attribute-specifier-seqopt type-specifier-seq declarator
+///
+Parser::TPResult Parser::TryParseSimpleDeclaration(bool AllowForRangeDecl) {
+ if (TryConsumeDeclarationSpecifier() == TPResult::Error)
+ return TPResult::Error;
+
+ // Two decl-specifiers in a row conclusively disambiguate this as being a
+ // simple-declaration. Don't bother calling isCXXDeclarationSpecifier in the
+ // overwhelmingly common case that the next token is a '('.
+ if (Tok.isNot(tok::l_paren)) {
+ TPResult TPR = isCXXDeclarationSpecifier();
+ if (TPR == TPResult::Ambiguous)
+ return TPResult::True;
+ if (TPR == TPResult::True || TPR == TPResult::Error)
+ return TPR;
+ assert(TPR == TPResult::False);
+ }
+
+ TPResult TPR = TryParseInitDeclaratorList();
+ if (TPR != TPResult::Ambiguous)
+ return TPR;
+
+ if (Tok.isNot(tok::semi) && (!AllowForRangeDecl || Tok.isNot(tok::colon)))
+ return TPResult::False;
+
+ return TPResult::Ambiguous;
+}
+
+/// Tentatively parse an init-declarator-list in order to disambiguate it from
+/// an expression.
+///
+/// init-declarator-list:
+/// init-declarator
+/// init-declarator-list ',' init-declarator
+///
+/// init-declarator:
+/// declarator initializer[opt]
+/// [GNU] declarator simple-asm-expr[opt] attributes[opt] initializer[opt]
+///
+/// initializer:
+/// brace-or-equal-initializer
+/// '(' expression-list ')'
+///
+/// brace-or-equal-initializer:
+/// '=' initializer-clause
+/// [C++11] braced-init-list
+///
+/// initializer-clause:
+/// assignment-expression
+/// braced-init-list
+///
+/// braced-init-list:
+/// '{' initializer-list ','[opt] '}'
+/// '{' '}'
+///
+Parser::TPResult Parser::TryParseInitDeclaratorList() {
+ while (1) {
+ // declarator
+ TPResult TPR = TryParseDeclarator(false/*mayBeAbstract*/);
+ if (TPR != TPResult::Ambiguous)
+ return TPR;
+
+ // [GNU] simple-asm-expr[opt] attributes[opt]
+ if (Tok.isOneOf(tok::kw_asm, tok::kw___attribute))
+ return TPResult::True;
+
+ // initializer[opt]
+ if (Tok.is(tok::l_paren)) {
+ // Parse through the parens.
+ ConsumeParen();
+ if (!SkipUntil(tok::r_paren, StopAtSemi))
+ return TPResult::Error;
+ } else if (Tok.is(tok::l_brace)) {
+ // A left-brace here is sufficient to disambiguate the parse; an
+ // expression can never be followed directly by a braced-init-list.
+ return TPResult::True;
+ } else if (Tok.is(tok::equal) || isTokIdentifier_in()) {
+ // MSVC and g++ won't examine the rest of declarators if '=' is
+ // encountered; they just conclude that we have a declaration.
+ // EDG parses the initializer completely, which is the proper behavior
+ // for this case.
+ //
+ // At present, Clang follows MSVC and g++, since the parser does not have
+ // the ability to parse an expression fully without recording the
+ // results of that parse.
+ // FIXME: Handle this case correctly.
+ //
+ // Also allow 'in' after an Objective-C declaration as in:
+ // for (int (^b)(void) in array). Ideally this should be done in the
+ // context of parsing for-init-statement of a foreach statement only. But,
+ // in any other context 'in' is invalid after a declaration and parser
+ // issues the error regardless of outcome of this decision.
+ // FIXME: Change if above assumption does not hold.
+ return TPResult::True;
+ }
+
+ if (!TryConsumeToken(tok::comma))
+ break;
+ }
+
+ return TPResult::Ambiguous;
+}
+
+/// isCXXConditionDeclaration - Disambiguates between a declaration or an
+/// expression for a condition of a if/switch/while/for statement.
+/// If during the disambiguation process a parsing error is encountered,
+/// the function returns true to let the declaration parsing code handle it.
+///
+/// condition:
+/// expression
+/// type-specifier-seq declarator '=' assignment-expression
+/// [C++11] type-specifier-seq declarator '=' initializer-clause
+/// [C++11] type-specifier-seq declarator braced-init-list
+/// [GNU] type-specifier-seq declarator simple-asm-expr[opt] attributes[opt]
+/// '=' assignment-expression
+///
+bool Parser::isCXXConditionDeclaration() {
+ TPResult TPR = isCXXDeclarationSpecifier();
+ if (TPR != TPResult::Ambiguous)
+ return TPR != TPResult::False; // Returns true for TPResult::True or
+ // TPResult::Error.
+
+ // FIXME: Add statistics about the number of ambiguous statements encountered
+ // and how they were resolved (number of declarations+number of expressions).
+
+ // Ok, we have a simple-type-specifier/typename-specifier followed by a '('.
+ // We need tentative parsing...
+
+ TentativeParsingAction PA(*this);
+
+ // type-specifier-seq
+ TryConsumeDeclarationSpecifier();
+ assert(Tok.is(tok::l_paren) && "Expected '('");
+
+ // declarator
+ TPR = TryParseDeclarator(false/*mayBeAbstract*/);
+
+ // In case of an error, let the declaration parsing code handle it.
+ if (TPR == TPResult::Error)
+ TPR = TPResult::True;
+
+ if (TPR == TPResult::Ambiguous) {
+ // '='
+ // [GNU] simple-asm-expr[opt] attributes[opt]
+ if (Tok.isOneOf(tok::equal, tok::kw_asm, tok::kw___attribute))
+ TPR = TPResult::True;
+ else if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace))
+ TPR = TPResult::True;
+ else
+ TPR = TPResult::False;
+ }
+
+ PA.Revert();
+
+ assert(TPR == TPResult::True || TPR == TPResult::False);
+ return TPR == TPResult::True;
+}
+
+ /// \brief Determine whether the next set of tokens contains a type-id.
+ ///
+ /// The context parameter states what context we're parsing right
+ /// now, which affects how this routine copes with the token
+ /// following the type-id. If the context is TypeIdInParens, we have
+ /// already parsed the '(' and we will cease lookahead when we hit
+ /// the corresponding ')'. If the context is
+ /// TypeIdAsTemplateArgument, we've already parsed the '<' or ','
+ /// before this template argument, and will cease lookahead when we
+ /// hit a '>', '>>' (in C++0x), or ','. Returns true for a type-id
+ /// and false for an expression. If during the disambiguation
+ /// process a parsing error is encountered, the function returns
+ /// true to let the declaration parsing code handle it.
+ ///
+ /// type-id:
+ /// type-specifier-seq abstract-declarator[opt]
+ ///
+bool Parser::isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous) {
+
+ isAmbiguous = false;
+
+ // C++ 8.2p2:
+ // The ambiguity arising from the similarity between a function-style cast and
+ // a type-id can occur in different contexts. The ambiguity appears as a
+ // choice between a function-style cast expression and a declaration of a
+ // type. The resolution is that any construct that could possibly be a type-id
+ // in its syntactic context shall be considered a type-id.
+
+ TPResult TPR = isCXXDeclarationSpecifier();
+ if (TPR != TPResult::Ambiguous)
+ return TPR != TPResult::False; // Returns true for TPResult::True or
+ // TPResult::Error.
+
+ // FIXME: Add statistics about the number of ambiguous statements encountered
+ // and how they were resolved (number of declarations+number of expressions).
+
+ // Ok, we have a simple-type-specifier/typename-specifier followed by a '('.
+ // We need tentative parsing...
+
+ TentativeParsingAction PA(*this);
+
+ // type-specifier-seq
+ TryConsumeDeclarationSpecifier();
+ assert(Tok.is(tok::l_paren) && "Expected '('");
+
+ // declarator
+ TPR = TryParseDeclarator(true/*mayBeAbstract*/, false/*mayHaveIdentifier*/);
+
+ // In case of an error, let the declaration parsing code handle it.
+ if (TPR == TPResult::Error)
+ TPR = TPResult::True;
+
+ if (TPR == TPResult::Ambiguous) {
+ // We are supposed to be inside parens, so if after the abstract declarator
+ // we encounter a ')' this is a type-id, otherwise it's an expression.
+ if (Context == TypeIdInParens && Tok.is(tok::r_paren)) {
+ TPR = TPResult::True;
+ isAmbiguous = true;
+
+ // We are supposed to be inside a template argument, so if after
+ // the abstract declarator we encounter a '>', '>>' (in C++0x), or
+ // ',', this is a type-id. Otherwise, it's an expression.
+ } else if (Context == TypeIdAsTemplateArgument &&
+ (Tok.isOneOf(tok::greater, tok::comma) ||
+ (getLangOpts().CPlusPlus11 && Tok.is(tok::greatergreater)))) {
+ TPR = TPResult::True;
+ isAmbiguous = true;
+
+ } else
+ TPR = TPResult::False;
+ }
+
+ PA.Revert();
+
+ assert(TPR == TPResult::True || TPR == TPResult::False);
+ return TPR == TPResult::True;
+}
+
+/// \brief Returns true if this is a C++11 attribute-specifier. Per
+/// C++11 [dcl.attr.grammar]p6, two consecutive left square bracket tokens
+/// always introduce an attribute. In Objective-C++11, this rule does not
+/// apply if either '[' begins a message-send.
+///
+/// If Disambiguate is true, we try harder to determine whether a '[[' starts
+/// an attribute-specifier, and return CAK_InvalidAttributeSpecifier if not.
+///
+/// If OuterMightBeMessageSend is true, we assume the outer '[' is either an
+/// Obj-C message send or the start of an attribute. Otherwise, we assume it
+/// is not an Obj-C message send.
+///
+/// C++11 [dcl.attr.grammar]:
+///
+/// attribute-specifier:
+/// '[' '[' attribute-list ']' ']'
+/// alignment-specifier
+///
+/// attribute-list:
+/// attribute[opt]
+/// attribute-list ',' attribute[opt]
+/// attribute '...'
+/// attribute-list ',' attribute '...'
+///
+/// attribute:
+/// attribute-token attribute-argument-clause[opt]
+///
+/// attribute-token:
+/// identifier
+/// identifier '::' identifier
+///
+/// attribute-argument-clause:
+/// '(' balanced-token-seq ')'
+Parser::CXX11AttributeKind
+Parser::isCXX11AttributeSpecifier(bool Disambiguate,
+ bool OuterMightBeMessageSend) {
+ if (Tok.is(tok::kw_alignas))
+ return CAK_AttributeSpecifier;
+
+ if (Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square))
+ return CAK_NotAttributeSpecifier;
+
+ // No tentative parsing if we don't need to look for ']]' or a lambda.
+ if (!Disambiguate && !getLangOpts().ObjC1)
+ return CAK_AttributeSpecifier;
+
+ TentativeParsingAction PA(*this);
+
+ // Opening brackets were checked for above.
+ ConsumeBracket();
+
+ // Outside Obj-C++11, treat anything with a matching ']]' as an attribute.
+ if (!getLangOpts().ObjC1) {
+ ConsumeBracket();
+
+ bool IsAttribute = SkipUntil(tok::r_square);
+ IsAttribute &= Tok.is(tok::r_square);
+
+ PA.Revert();
+
+ return IsAttribute ? CAK_AttributeSpecifier : CAK_InvalidAttributeSpecifier;
+ }
+
+ // In Obj-C++11, we need to distinguish four situations:
+ // 1a) int x[[attr]]; C++11 attribute.
+ // 1b) [[attr]]; C++11 statement attribute.
+ // 2) int x[[obj](){ return 1; }()]; Lambda in array size/index.
+ // 3a) int x[[obj get]]; Message send in array size/index.
+ // 3b) [[Class alloc] init]; Message send in message send.
+ // 4) [[obj]{ return self; }() doStuff]; Lambda in message send.
+ // (1) is an attribute, (2) is ill-formed, and (3) and (4) are accepted.
+
+ // If we have a lambda-introducer, then this is definitely not a message send.
+ // FIXME: If this disambiguation is too slow, fold the tentative lambda parse
+ // into the tentative attribute parse below.
+ LambdaIntroducer Intro;
+ if (!TryParseLambdaIntroducer(Intro)) {
+ // A lambda cannot end with ']]', and an attribute must.
+ bool IsAttribute = Tok.is(tok::r_square);
+
+ PA.Revert();
+
+ if (IsAttribute)
+ // Case 1: C++11 attribute.
+ return CAK_AttributeSpecifier;
+
+ if (OuterMightBeMessageSend)
+ // Case 4: Lambda in message send.
+ return CAK_NotAttributeSpecifier;
+
+ // Case 2: Lambda in array size / index.
+ return CAK_InvalidAttributeSpecifier;
+ }
+
+ ConsumeBracket();
+
+ // If we don't have a lambda-introducer, then we have an attribute or a
+ // message-send.
+ bool IsAttribute = true;
+ while (Tok.isNot(tok::r_square)) {
+ if (Tok.is(tok::comma)) {
+ // Case 1: Stray commas can only occur in attributes.
+ PA.Revert();
+ return CAK_AttributeSpecifier;
+ }
+
+ // Parse the attribute-token, if present.
+ // C++11 [dcl.attr.grammar]:
+ // If a keyword or an alternative token that satisfies the syntactic
+ // requirements of an identifier is contained in an attribute-token,
+ // it is considered an identifier.
+ SourceLocation Loc;
+ if (!TryParseCXX11AttributeIdentifier(Loc)) {
+ IsAttribute = false;
+ break;
+ }
+ if (Tok.is(tok::coloncolon)) {
+ ConsumeToken();
+ if (!TryParseCXX11AttributeIdentifier(Loc)) {
+ IsAttribute = false;
+ break;
+ }
+ }
+
+ // Parse the attribute-argument-clause, if present.
+ if (Tok.is(tok::l_paren)) {
+ ConsumeParen();
+ if (!SkipUntil(tok::r_paren)) {
+ IsAttribute = false;
+ break;
+ }
+ }
+
+ TryConsumeToken(tok::ellipsis);
+
+ if (!TryConsumeToken(tok::comma))
+ break;
+ }
+
+ // An attribute must end ']]'.
+ if (IsAttribute) {
+ if (Tok.is(tok::r_square)) {
+ ConsumeBracket();
+ IsAttribute = Tok.is(tok::r_square);
+ } else {
+ IsAttribute = false;
+ }
+ }
+
+ PA.Revert();
+
+ if (IsAttribute)
+ // Case 1: C++11 statement attribute.
+ return CAK_AttributeSpecifier;
+
+ // Case 3: Message send.
+ return CAK_NotAttributeSpecifier;
+}
+
+Parser::TPResult Parser::TryParsePtrOperatorSeq() {
+ while (true) {
+ if (Tok.isOneOf(tok::coloncolon, tok::identifier))
+ if (TryAnnotateCXXScopeToken(true))
+ return TPResult::Error;
+
+ if (Tok.isOneOf(tok::star, tok::amp, tok::caret, tok::ampamp) ||
+ (Tok.is(tok::annot_cxxscope) && NextToken().is(tok::star))) {
+ // ptr-operator
+ ConsumeToken();
+ while (Tok.isOneOf(tok::kw_const, tok::kw_volatile, tok::kw_restrict,
+ tok::kw__Nonnull, tok::kw__Nullable,
+ tok::kw__Null_unspecified))
+ ConsumeToken();
+ } else {
+ return TPResult::True;
+ }
+ }
+}
+
+/// operator-function-id:
+/// 'operator' operator
+///
+/// operator: one of
+/// new delete new[] delete[] + - * / % ^ [...]
+///
+/// conversion-function-id:
+/// 'operator' conversion-type-id
+///
+/// conversion-type-id:
+/// type-specifier-seq conversion-declarator[opt]
+///
+/// conversion-declarator:
+/// ptr-operator conversion-declarator[opt]
+///
+/// literal-operator-id:
+/// 'operator' string-literal identifier
+/// 'operator' user-defined-string-literal
+Parser::TPResult Parser::TryParseOperatorId() {
+ assert(Tok.is(tok::kw_operator));
+ ConsumeToken();
+
+ // Maybe this is an operator-function-id.
+ switch (Tok.getKind()) {
+ case tok::kw_new: case tok::kw_delete:
+ ConsumeToken();
+ if (Tok.is(tok::l_square) && NextToken().is(tok::r_square)) {
+ ConsumeBracket();
+ ConsumeBracket();
+ }
+ return TPResult::True;
+
+#define OVERLOADED_OPERATOR(Name, Spelling, Token, Unary, Binary, MemOnly) \
+ case tok::Token:
+#define OVERLOADED_OPERATOR_MULTI(Name, Spelling, Unary, Binary, MemOnly)
+#include "clang/Basic/OperatorKinds.def"
+ ConsumeToken();
+ return TPResult::True;
+
+ case tok::l_square:
+ if (NextToken().is(tok::r_square)) {
+ ConsumeBracket();
+ ConsumeBracket();
+ return TPResult::True;
+ }
+ break;
+
+ case tok::l_paren:
+ if (NextToken().is(tok::r_paren)) {
+ ConsumeParen();
+ ConsumeParen();
+ return TPResult::True;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ // Maybe this is a literal-operator-id.
+ if (getLangOpts().CPlusPlus11 && isTokenStringLiteral()) {
+ bool FoundUDSuffix = false;
+ do {
+ FoundUDSuffix |= Tok.hasUDSuffix();
+ ConsumeStringToken();
+ } while (isTokenStringLiteral());
+
+ if (!FoundUDSuffix) {
+ if (Tok.is(tok::identifier))
+ ConsumeToken();
+ else
+ return TPResult::Error;
+ }
+ return TPResult::True;
+ }
+
+ // Maybe this is a conversion-function-id.
+ bool AnyDeclSpecifiers = false;
+ while (true) {
+ TPResult TPR = isCXXDeclarationSpecifier();
+ if (TPR == TPResult::Error)
+ return TPR;
+ if (TPR == TPResult::False) {
+ if (!AnyDeclSpecifiers)
+ return TPResult::Error;
+ break;
+ }
+ if (TryConsumeDeclarationSpecifier() == TPResult::Error)
+ return TPResult::Error;
+ AnyDeclSpecifiers = true;
+ }
+ return TryParsePtrOperatorSeq();
+}
+
+/// declarator:
+/// direct-declarator
+/// ptr-operator declarator
+///
+/// direct-declarator:
+/// declarator-id
+/// direct-declarator '(' parameter-declaration-clause ')'
+/// cv-qualifier-seq[opt] exception-specification[opt]
+/// direct-declarator '[' constant-expression[opt] ']'
+/// '(' declarator ')'
+/// [GNU] '(' attributes declarator ')'
+///
+/// abstract-declarator:
+/// ptr-operator abstract-declarator[opt]
+/// direct-abstract-declarator
+/// ...
+///
+/// direct-abstract-declarator:
+/// direct-abstract-declarator[opt]
+/// '(' parameter-declaration-clause ')' cv-qualifier-seq[opt]
+/// exception-specification[opt]
+/// direct-abstract-declarator[opt] '[' constant-expression[opt] ']'
+/// '(' abstract-declarator ')'
+///
+/// ptr-operator:
+/// '*' cv-qualifier-seq[opt]
+/// '&'
+/// [C++0x] '&&' [TODO]
+/// '::'[opt] nested-name-specifier '*' cv-qualifier-seq[opt]
+///
+/// cv-qualifier-seq:
+/// cv-qualifier cv-qualifier-seq[opt]
+///
+/// cv-qualifier:
+/// 'const'
+/// 'volatile'
+///
+/// declarator-id:
+/// '...'[opt] id-expression
+///
+/// id-expression:
+/// unqualified-id
+/// qualified-id [TODO]
+///
+/// unqualified-id:
+/// identifier
+/// operator-function-id
+/// conversion-function-id
+/// literal-operator-id
+/// '~' class-name [TODO]
+/// '~' decltype-specifier [TODO]
+/// template-id [TODO]
+///
+Parser::TPResult Parser::TryParseDeclarator(bool mayBeAbstract,
+ bool mayHaveIdentifier) {
+ // declarator:
+ // direct-declarator
+ // ptr-operator declarator
+ if (TryParsePtrOperatorSeq() == TPResult::Error)
+ return TPResult::Error;
+
+ // direct-declarator:
+ // direct-abstract-declarator:
+ if (Tok.is(tok::ellipsis))
+ ConsumeToken();
+
+ if ((Tok.isOneOf(tok::identifier, tok::kw_operator) ||
+ (Tok.is(tok::annot_cxxscope) && (NextToken().is(tok::identifier) ||
+ NextToken().is(tok::kw_operator)))) &&
+ mayHaveIdentifier) {
+ // declarator-id
+ if (Tok.is(tok::annot_cxxscope))
+ ConsumeToken();
+ else if (Tok.is(tok::identifier))
+ TentativelyDeclaredIdentifiers.push_back(Tok.getIdentifierInfo());
+ if (Tok.is(tok::kw_operator)) {
+ if (TryParseOperatorId() == TPResult::Error)
+ return TPResult::Error;
+ } else
+ ConsumeToken();
+ } else if (Tok.is(tok::l_paren)) {
+ ConsumeParen();
+ if (mayBeAbstract &&
+ (Tok.is(tok::r_paren) || // 'int()' is a function.
+ // 'int(...)' is a function.
+ (Tok.is(tok::ellipsis) && NextToken().is(tok::r_paren)) ||
+ isDeclarationSpecifier())) { // 'int(int)' is a function.
+ // '(' parameter-declaration-clause ')' cv-qualifier-seq[opt]
+ // exception-specification[opt]
+ TPResult TPR = TryParseFunctionDeclarator();
+ if (TPR != TPResult::Ambiguous)
+ return TPR;
+ } else {
+ // '(' declarator ')'
+ // '(' attributes declarator ')'
+ // '(' abstract-declarator ')'
+ if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec, tok::kw___cdecl,
+ tok::kw___stdcall, tok::kw___fastcall, tok::kw___thiscall,
+ tok::kw___vectorcall, tok::kw___unaligned))
+ return TPResult::True; // attributes indicate declaration
+ TPResult TPR = TryParseDeclarator(mayBeAbstract, mayHaveIdentifier);
+ if (TPR != TPResult::Ambiguous)
+ return TPR;
+ if (Tok.isNot(tok::r_paren))
+ return TPResult::False;
+ ConsumeParen();
+ }
+ } else if (!mayBeAbstract) {
+ return TPResult::False;
+ }
+
+ while (1) {
+ TPResult TPR(TPResult::Ambiguous);
+
+ // abstract-declarator: ...
+ if (Tok.is(tok::ellipsis))
+ ConsumeToken();
+
+ if (Tok.is(tok::l_paren)) {
+ // Check whether we have a function declarator or a possible ctor-style
+ // initializer that follows the declarator. Note that ctor-style
+ // initializers are not possible in contexts where abstract declarators
+ // are allowed.
+ if (!mayBeAbstract && !isCXXFunctionDeclarator())
+ break;
+
+ // direct-declarator '(' parameter-declaration-clause ')'
+ // cv-qualifier-seq[opt] exception-specification[opt]
+ ConsumeParen();
+ TPR = TryParseFunctionDeclarator();
+ } else if (Tok.is(tok::l_square)) {
+ // direct-declarator '[' constant-expression[opt] ']'
+ // direct-abstract-declarator[opt] '[' constant-expression[opt] ']'
+ TPR = TryParseBracketDeclarator();
+ } else {
+ break;
+ }
+
+ if (TPR != TPResult::Ambiguous)
+ return TPR;
+ }
+
+ return TPResult::Ambiguous;
+}
+
+Parser::TPResult
+Parser::isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind) {
+ switch (Kind) {
+ // Obviously starts an expression.
+ case tok::numeric_constant:
+ case tok::char_constant:
+ case tok::wide_char_constant:
+ case tok::utf8_char_constant:
+ case tok::utf16_char_constant:
+ case tok::utf32_char_constant:
+ case tok::string_literal:
+ case tok::wide_string_literal:
+ case tok::utf8_string_literal:
+ case tok::utf16_string_literal:
+ case tok::utf32_string_literal:
+ case tok::l_square:
+ case tok::l_paren:
+ case tok::amp:
+ case tok::ampamp:
+ case tok::star:
+ case tok::plus:
+ case tok::plusplus:
+ case tok::minus:
+ case tok::minusminus:
+ case tok::tilde:
+ case tok::exclaim:
+ case tok::kw_sizeof:
+ case tok::kw___func__:
+ case tok::kw_const_cast:
+ case tok::kw_delete:
+ case tok::kw_dynamic_cast:
+ case tok::kw_false:
+ case tok::kw_new:
+ case tok::kw_operator:
+ case tok::kw_reinterpret_cast:
+ case tok::kw_static_cast:
+ case tok::kw_this:
+ case tok::kw_throw:
+ case tok::kw_true:
+ case tok::kw_typeid:
+ case tok::kw_alignof:
+ case tok::kw_noexcept:
+ case tok::kw_nullptr:
+ case tok::kw__Alignof:
+ case tok::kw___null:
+ case tok::kw___alignof:
+ case tok::kw___builtin_choose_expr:
+ case tok::kw___builtin_offsetof:
+ case tok::kw___builtin_va_arg:
+ case tok::kw___imag:
+ case tok::kw___real:
+ case tok::kw___FUNCTION__:
+ case tok::kw___FUNCDNAME__:
+ case tok::kw___FUNCSIG__:
+ case tok::kw_L__FUNCTION__:
+ case tok::kw___PRETTY_FUNCTION__:
+ case tok::kw___uuidof:
+#define TYPE_TRAIT(N,Spelling,K) \
+ case tok::kw_##Spelling:
+#include "clang/Basic/TokenKinds.def"
+ return TPResult::True;
+
+ // Obviously starts a type-specifier-seq:
+ case tok::kw_char:
+ case tok::kw_const:
+ case tok::kw_double:
+ case tok::kw_enum:
+ case tok::kw_half:
+ case tok::kw_float:
+ case tok::kw_int:
+ case tok::kw_long:
+ case tok::kw___int64:
+ case tok::kw___int128:
+ case tok::kw_restrict:
+ case tok::kw_short:
+ case tok::kw_signed:
+ case tok::kw_struct:
+ case tok::kw_union:
+ case tok::kw_unsigned:
+ case tok::kw_void:
+ case tok::kw_volatile:
+ case tok::kw__Bool:
+ case tok::kw__Complex:
+ case tok::kw_class:
+ case tok::kw_typename:
+ case tok::kw_wchar_t:
+ case tok::kw_char16_t:
+ case tok::kw_char32_t:
+ case tok::kw__Decimal32:
+ case tok::kw__Decimal64:
+ case tok::kw__Decimal128:
+ case tok::kw___interface:
+ case tok::kw___thread:
+ case tok::kw_thread_local:
+ case tok::kw__Thread_local:
+ case tok::kw_typeof:
+ case tok::kw___underlying_type:
+ case tok::kw___cdecl:
+ case tok::kw___stdcall:
+ case tok::kw___fastcall:
+ case tok::kw___thiscall:
+ case tok::kw___vectorcall:
+ case tok::kw___unaligned:
+ case tok::kw___vector:
+ case tok::kw___pixel:
+ case tok::kw___bool:
+ case tok::kw__Atomic:
+ case tok::kw___unknown_anytype:
+ return TPResult::False;
+
+ default:
+ break;
+ }
+
+ return TPResult::Ambiguous;
+}
+
+bool Parser::isTentativelyDeclared(IdentifierInfo *II) {
+ return std::find(TentativelyDeclaredIdentifiers.begin(),
+ TentativelyDeclaredIdentifiers.end(), II)
+ != TentativelyDeclaredIdentifiers.end();
+}
+
+namespace {
+class TentativeParseCCC : public CorrectionCandidateCallback {
+public:
+ TentativeParseCCC(const Token &Next) {
+ WantRemainingKeywords = false;
+ WantTypeSpecifiers = Next.isOneOf(tok::l_paren, tok::r_paren, tok::greater,
+ tok::l_brace, tok::identifier);
+ }
+
+ bool ValidateCandidate(const TypoCorrection &Candidate) override {
+ // Reject any candidate that only resolves to instance members since they
+ // aren't viable as standalone identifiers instead of member references.
+ if (Candidate.isResolved() && !Candidate.isKeyword() &&
+ std::all_of(Candidate.begin(), Candidate.end(),
+ [](NamedDecl *ND) { return ND->isCXXInstanceMember(); }))
+ return false;
+
+ return CorrectionCandidateCallback::ValidateCandidate(Candidate);
+ }
+};
+}
+/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a declaration
+/// specifier, TPResult::False if it is not, TPResult::Ambiguous if it could
+/// be either a decl-specifier or a function-style cast, and TPResult::Error
+/// if a parsing error was found and reported.
+///
+/// If HasMissingTypename is provided, a name with a dependent scope specifier
+/// will be treated as ambiguous if the 'typename' keyword is missing. If this
+/// happens, *HasMissingTypename will be set to 'true'. This will also be used
+/// as an indicator that undeclared identifiers (which will trigger a later
+/// parse error) should be treated as types. Returns TPResult::Ambiguous in
+/// such cases.
+///
+/// decl-specifier:
+/// storage-class-specifier
+/// type-specifier
+/// function-specifier
+/// 'friend'
+/// 'typedef'
+/// [C++11] 'constexpr'
+/// [GNU] attributes declaration-specifiers[opt]
+///
+/// storage-class-specifier:
+/// 'register'
+/// 'static'
+/// 'extern'
+/// 'mutable'
+/// 'auto'
+/// [GNU] '__thread'
+/// [C++11] 'thread_local'
+/// [C11] '_Thread_local'
+///
+/// function-specifier:
+/// 'inline'
+/// 'virtual'
+/// 'explicit'
+///
+/// typedef-name:
+/// identifier
+///
+/// type-specifier:
+/// simple-type-specifier
+/// class-specifier
+/// enum-specifier
+/// elaborated-type-specifier
+/// typename-specifier
+/// cv-qualifier
+///
+/// simple-type-specifier:
+/// '::'[opt] nested-name-specifier[opt] type-name
+/// '::'[opt] nested-name-specifier 'template'
+/// simple-template-id [TODO]
+/// 'char'
+/// 'wchar_t'
+/// 'bool'
+/// 'short'
+/// 'int'
+/// 'long'
+/// 'signed'
+/// 'unsigned'
+/// 'float'
+/// 'double'
+/// 'void'
+/// [GNU] typeof-specifier
+/// [GNU] '_Complex'
+/// [C++11] 'auto'
+/// [GNU] '__auto_type'
+/// [C++11] 'decltype' ( expression )
+/// [C++1y] 'decltype' ( 'auto' )
+///
+/// type-name:
+/// class-name
+/// enum-name
+/// typedef-name
+///
+/// elaborated-type-specifier:
+/// class-key '::'[opt] nested-name-specifier[opt] identifier
+/// class-key '::'[opt] nested-name-specifier[opt] 'template'[opt]
+/// simple-template-id
+/// 'enum' '::'[opt] nested-name-specifier[opt] identifier
+///
+/// enum-name:
+/// identifier
+///
+/// enum-specifier:
+/// 'enum' identifier[opt] '{' enumerator-list[opt] '}'
+/// 'enum' identifier[opt] '{' enumerator-list ',' '}'
+///
+/// class-specifier:
+/// class-head '{' member-specification[opt] '}'
+///
+/// class-head:
+/// class-key identifier[opt] base-clause[opt]
+/// class-key nested-name-specifier identifier base-clause[opt]
+/// class-key nested-name-specifier[opt] simple-template-id
+/// base-clause[opt]
+///
+/// class-key:
+/// 'class'
+/// 'struct'
+/// 'union'
+///
+/// cv-qualifier:
+/// 'const'
+/// 'volatile'
+/// [GNU] restrict
+///
+Parser::TPResult
+Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
+ bool *HasMissingTypename) {
+ switch (Tok.getKind()) {
+ case tok::identifier: {
+ // Check for need to substitute AltiVec __vector keyword
+ // for "vector" identifier.
+ if (TryAltiVecVectorToken())
+ return TPResult::True;
+
+ const Token &Next = NextToken();
+ // In 'foo bar', 'foo' is always a type name outside of Objective-C.
+ if (!getLangOpts().ObjC1 && Next.is(tok::identifier))
+ return TPResult::True;
+
+ if (Next.isNot(tok::coloncolon) && Next.isNot(tok::less)) {
+ // Determine whether this is a valid expression. If not, we will hit
+ // a parse error one way or another. In that case, tell the caller that
+ // this is ambiguous. Typo-correct to type and expression keywords and
+ // to types and identifiers, in order to try to recover from errors.
+ switch (TryAnnotateName(false /* no nested name specifier */,
+ llvm::make_unique<TentativeParseCCC>(Next))) {
+ case ANK_Error:
+ return TPResult::Error;
+ case ANK_TentativeDecl:
+ return TPResult::False;
+ case ANK_TemplateName:
+ // A bare type template-name which can't be a template template
+ // argument is an error, and was probably intended to be a type.
+ return GreaterThanIsOperator ? TPResult::True : TPResult::False;
+ case ANK_Unresolved:
+ return HasMissingTypename ? TPResult::Ambiguous : TPResult::False;
+ case ANK_Success:
+ break;
+ }
+ assert(Tok.isNot(tok::identifier) &&
+ "TryAnnotateName succeeded without producing an annotation");
+ } else {
+ // This might possibly be a type with a dependent scope specifier and
+ // a missing 'typename' keyword. Don't use TryAnnotateName in this case,
+ // since it will annotate as a primary expression, and we want to use the
+ // "missing 'typename'" logic.
+ if (TryAnnotateTypeOrScopeToken())
+ return TPResult::Error;
+ // If annotation failed, assume it's a non-type.
+ // FIXME: If this happens due to an undeclared identifier, treat it as
+ // ambiguous.
+ if (Tok.is(tok::identifier))
+ return TPResult::False;
+ }
+
+ // We annotated this token as something. Recurse to handle whatever we got.
+ return isCXXDeclarationSpecifier(BracedCastResult, HasMissingTypename);
+ }
+
+ case tok::kw_typename: // typename T::type
+ // Annotate typenames and C++ scope specifiers. If we get one, just
+ // recurse to handle whatever we get.
+ if (TryAnnotateTypeOrScopeToken())
+ return TPResult::Error;
+ return isCXXDeclarationSpecifier(BracedCastResult, HasMissingTypename);
+
+ case tok::coloncolon: { // ::foo::bar
+ const Token &Next = NextToken();
+ if (Next.isOneOf(tok::kw_new, // ::new
+ tok::kw_delete)) // ::delete
+ return TPResult::False;
+ }
+ // Fall through.
+ case tok::kw___super:
+ case tok::kw_decltype:
+ // Annotate typenames and C++ scope specifiers. If we get one, just
+ // recurse to handle whatever we get.
+ if (TryAnnotateTypeOrScopeToken())
+ return TPResult::Error;
+ return isCXXDeclarationSpecifier(BracedCastResult, HasMissingTypename);
+
+ // decl-specifier:
+ // storage-class-specifier
+ // type-specifier
+ // function-specifier
+ // 'friend'
+ // 'typedef'
+ // 'constexpr'
+ // 'concept'
+ case tok::kw_friend:
+ case tok::kw_typedef:
+ case tok::kw_constexpr:
+ case tok::kw_concept:
+ // storage-class-specifier
+ case tok::kw_register:
+ case tok::kw_static:
+ case tok::kw_extern:
+ case tok::kw_mutable:
+ case tok::kw_auto:
+ case tok::kw___thread:
+ case tok::kw_thread_local:
+ case tok::kw__Thread_local:
+ // function-specifier
+ case tok::kw_inline:
+ case tok::kw_virtual:
+ case tok::kw_explicit:
+
+ // Modules
+ case tok::kw___module_private__:
+
+ // Debugger support
+ case tok::kw___unknown_anytype:
+
+ // type-specifier:
+ // simple-type-specifier
+ // class-specifier
+ // enum-specifier
+ // elaborated-type-specifier
+ // typename-specifier
+ // cv-qualifier
+
+ // class-specifier
+ // elaborated-type-specifier
+ case tok::kw_class:
+ case tok::kw_struct:
+ case tok::kw_union:
+ case tok::kw___interface:
+ // enum-specifier
+ case tok::kw_enum:
+ // cv-qualifier
+ case tok::kw_const:
+ case tok::kw_volatile:
+
+ // GNU
+ case tok::kw_restrict:
+ case tok::kw__Complex:
+ case tok::kw___attribute:
+ case tok::kw___auto_type:
+ return TPResult::True;
+
+ // Microsoft
+ case tok::kw___declspec:
+ case tok::kw___cdecl:
+ case tok::kw___stdcall:
+ case tok::kw___fastcall:
+ case tok::kw___thiscall:
+ case tok::kw___vectorcall:
+ case tok::kw___w64:
+ case tok::kw___sptr:
+ case tok::kw___uptr:
+ case tok::kw___ptr64:
+ case tok::kw___ptr32:
+ case tok::kw___forceinline:
+ case tok::kw___unaligned:
+ case tok::kw__Nonnull:
+ case tok::kw__Nullable:
+ case tok::kw__Null_unspecified:
+ case tok::kw___kindof:
+ return TPResult::True;
+
+ // Borland
+ case tok::kw___pascal:
+ return TPResult::True;
+
+ // AltiVec
+ case tok::kw___vector:
+ return TPResult::True;
+
+ case tok::annot_template_id: {
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+ if (TemplateId->Kind != TNK_Type_template)
+ return TPResult::False;
+ CXXScopeSpec SS;
+ AnnotateTemplateIdTokenAsType();
+ assert(Tok.is(tok::annot_typename));
+ goto case_typename;
+ }
+
+ case tok::annot_cxxscope: // foo::bar or ::foo::bar, but already parsed
+ // We've already annotated a scope; try to annotate a type.
+ if (TryAnnotateTypeOrScopeToken())
+ return TPResult::Error;
+ if (!Tok.is(tok::annot_typename)) {
+ // If the next token is an identifier or a type qualifier, then this
+ // can't possibly be a valid expression either.
+ if (Tok.is(tok::annot_cxxscope) && NextToken().is(tok::identifier)) {
+ CXXScopeSpec SS;
+ Actions.RestoreNestedNameSpecifierAnnotation(Tok.getAnnotationValue(),
+ Tok.getAnnotationRange(),
+ SS);
+ if (SS.getScopeRep() && SS.getScopeRep()->isDependent()) {
+ TentativeParsingAction PA(*this);
+ ConsumeToken();
+ ConsumeToken();
+ bool isIdentifier = Tok.is(tok::identifier);
+ TPResult TPR = TPResult::False;
+ if (!isIdentifier)
+ TPR = isCXXDeclarationSpecifier(BracedCastResult,
+ HasMissingTypename);
+ PA.Revert();
+
+ if (isIdentifier ||
+ TPR == TPResult::True || TPR == TPResult::Error)
+ return TPResult::Error;
+
+ if (HasMissingTypename) {
+ // We can't tell whether this is a missing 'typename' or a valid
+ // expression.
+ *HasMissingTypename = true;
+ return TPResult::Ambiguous;
+ }
+ } else {
+ // Try to resolve the name. If it doesn't exist, assume it was
+ // intended to name a type and keep disambiguating.
+ switch (TryAnnotateName(false /* SS is not dependent */)) {
+ case ANK_Error:
+ return TPResult::Error;
+ case ANK_TentativeDecl:
+ return TPResult::False;
+ case ANK_TemplateName:
+ // A bare type template-name which can't be a template template
+ // argument is an error, and was probably intended to be a type.
+ return GreaterThanIsOperator ? TPResult::True : TPResult::False;
+ case ANK_Unresolved:
+ return HasMissingTypename ? TPResult::Ambiguous
+ : TPResult::False;
+ case ANK_Success:
+ // Annotated it, check again.
+ assert(Tok.isNot(tok::annot_cxxscope) ||
+ NextToken().isNot(tok::identifier));
+ return isCXXDeclarationSpecifier(BracedCastResult,
+ HasMissingTypename);
+ }
+ }
+ }
+ return TPResult::False;
+ }
+ // If that succeeded, fallthrough into the generic simple-type-id case.
+
+ // The ambiguity resides in a simple-type-specifier/typename-specifier
+ // followed by a '('. The '(' could either be the start of:
+ //
+ // direct-declarator:
+ // '(' declarator ')'
+ //
+ // direct-abstract-declarator:
+ // '(' parameter-declaration-clause ')' cv-qualifier-seq[opt]
+ // exception-specification[opt]
+ // '(' abstract-declarator ')'
+ //
+ // or part of a function-style cast expression:
+ //
+ // simple-type-specifier '(' expression-list[opt] ')'
+ //
+
+ // simple-type-specifier:
+
+ case tok::annot_typename:
+ case_typename:
+ // In Objective-C, we might have a protocol-qualified type.
+ if (getLangOpts().ObjC1 && NextToken().is(tok::less)) {
+ // Tentatively parse the protocol qualifiers.
+ TentativeParsingAction PA(*this);
+ ConsumeToken(); // The type token
+
+ TPResult TPR = TryParseProtocolQualifiers();
+ bool isFollowedByParen = Tok.is(tok::l_paren);
+ bool isFollowedByBrace = Tok.is(tok::l_brace);
+
+ PA.Revert();
+
+ if (TPR == TPResult::Error)
+ return TPResult::Error;
+
+ if (isFollowedByParen)
+ return TPResult::Ambiguous;
+
+ if (getLangOpts().CPlusPlus11 && isFollowedByBrace)
+ return BracedCastResult;
+
+ return TPResult::True;
+ }
+
+ case tok::kw_char:
+ case tok::kw_wchar_t:
+ case tok::kw_char16_t:
+ case tok::kw_char32_t:
+ case tok::kw_bool:
+ case tok::kw_short:
+ case tok::kw_int:
+ case tok::kw_long:
+ case tok::kw___int64:
+ case tok::kw___int128:
+ case tok::kw_signed:
+ case tok::kw_unsigned:
+ case tok::kw_half:
+ case tok::kw_float:
+ case tok::kw_double:
+ case tok::kw_void:
+ case tok::annot_decltype:
+ if (NextToken().is(tok::l_paren))
+ return TPResult::Ambiguous;
+
+ // This is a function-style cast in all cases we disambiguate other than
+ // one:
+ // struct S {
+ // enum E : int { a = 4 }; // enum
+ // enum E : int { 4 }; // bit-field
+ // };
+ if (getLangOpts().CPlusPlus11 && NextToken().is(tok::l_brace))
+ return BracedCastResult;
+
+ if (isStartOfObjCClassMessageMissingOpenBracket())
+ return TPResult::False;
+
+ return TPResult::True;
+
+ // GNU typeof support.
+ case tok::kw_typeof: {
+ if (NextToken().isNot(tok::l_paren))
+ return TPResult::True;
+
+ TentativeParsingAction PA(*this);
+
+ TPResult TPR = TryParseTypeofSpecifier();
+ bool isFollowedByParen = Tok.is(tok::l_paren);
+ bool isFollowedByBrace = Tok.is(tok::l_brace);
+
+ PA.Revert();
+
+ if (TPR == TPResult::Error)
+ return TPResult::Error;
+
+ if (isFollowedByParen)
+ return TPResult::Ambiguous;
+
+ if (getLangOpts().CPlusPlus11 && isFollowedByBrace)
+ return BracedCastResult;
+
+ return TPResult::True;
+ }
+
+ // C++0x type traits support
+ case tok::kw___underlying_type:
+ return TPResult::True;
+
+ // C11 _Atomic
+ case tok::kw__Atomic:
+ return TPResult::True;
+
+ default:
+ return TPResult::False;
+ }
+}
+
+bool Parser::isCXXDeclarationSpecifierAType() {
+ switch (Tok.getKind()) {
+ // typename-specifier
+ case tok::annot_decltype:
+ case tok::annot_template_id:
+ case tok::annot_typename:
+ case tok::kw_typeof:
+ case tok::kw___underlying_type:
+ return true;
+
+ // elaborated-type-specifier
+ case tok::kw_class:
+ case tok::kw_struct:
+ case tok::kw_union:
+ case tok::kw___interface:
+ case tok::kw_enum:
+ return true;
+
+ // simple-type-specifier
+ case tok::kw_char:
+ case tok::kw_wchar_t:
+ case tok::kw_char16_t:
+ case tok::kw_char32_t:
+ case tok::kw_bool:
+ case tok::kw_short:
+ case tok::kw_int:
+ case tok::kw_long:
+ case tok::kw___int64:
+ case tok::kw___int128:
+ case tok::kw_signed:
+ case tok::kw_unsigned:
+ case tok::kw_half:
+ case tok::kw_float:
+ case tok::kw_double:
+ case tok::kw_void:
+ case tok::kw___unknown_anytype:
+ case tok::kw___auto_type:
+ return true;
+
+ case tok::kw_auto:
+ return getLangOpts().CPlusPlus11;
+
+ case tok::kw__Atomic:
+ // "_Atomic foo"
+ return NextToken().is(tok::l_paren);
+
+ default:
+ return false;
+ }
+}
+
+/// [GNU] typeof-specifier:
+/// 'typeof' '(' expressions ')'
+/// 'typeof' '(' type-name ')'
+///
+Parser::TPResult Parser::TryParseTypeofSpecifier() {
+ assert(Tok.is(tok::kw_typeof) && "Expected 'typeof'!");
+ ConsumeToken();
+
+ assert(Tok.is(tok::l_paren) && "Expected '('");
+ // Parse through the parens after 'typeof'.
+ ConsumeParen();
+ if (!SkipUntil(tok::r_paren, StopAtSemi))
+ return TPResult::Error;
+
+ return TPResult::Ambiguous;
+}
+
+/// [ObjC] protocol-qualifiers:
+//// '<' identifier-list '>'
+Parser::TPResult Parser::TryParseProtocolQualifiers() {
+ assert(Tok.is(tok::less) && "Expected '<' for qualifier list");
+ ConsumeToken();
+ do {
+ if (Tok.isNot(tok::identifier))
+ return TPResult::Error;
+ ConsumeToken();
+
+ if (Tok.is(tok::comma)) {
+ ConsumeToken();
+ continue;
+ }
+
+ if (Tok.is(tok::greater)) {
+ ConsumeToken();
+ return TPResult::Ambiguous;
+ }
+ } while (false);
+
+ return TPResult::Error;
+}
+
+/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
+/// a constructor-style initializer, when parsing declaration statements.
+/// Returns true for function declarator and false for constructor-style
+/// initializer.
+/// If during the disambiguation process a parsing error is encountered,
+/// the function returns true to let the declaration parsing code handle it.
+///
+/// '(' parameter-declaration-clause ')' cv-qualifier-seq[opt]
+/// exception-specification[opt]
+///
+bool Parser::isCXXFunctionDeclarator(bool *IsAmbiguous) {
+
+ // C++ 8.2p1:
+ // The ambiguity arising from the similarity between a function-style cast and
+ // a declaration mentioned in 6.8 can also occur in the context of a
+ // declaration. In that context, the choice is between a function declaration
+ // with a redundant set of parentheses around a parameter name and an object
+ // declaration with a function-style cast as the initializer. Just as for the
+ // ambiguities mentioned in 6.8, the resolution is to consider any construct
+ // that could possibly be a declaration a declaration.
+
+ TentativeParsingAction PA(*this);
+
+ ConsumeParen();
+ bool InvalidAsDeclaration = false;
+ TPResult TPR = TryParseParameterDeclarationClause(&InvalidAsDeclaration);
+ if (TPR == TPResult::Ambiguous) {
+ if (Tok.isNot(tok::r_paren))
+ TPR = TPResult::False;
+ else {
+ const Token &Next = NextToken();
+ if (Next.isOneOf(tok::amp, tok::ampamp, tok::kw_const, tok::kw_volatile,
+ tok::kw_throw, tok::kw_noexcept, tok::l_square,
+ tok::l_brace, tok::kw_try, tok::equal, tok::arrow) ||
+ isCXX11VirtSpecifier(Next))
+ // The next token cannot appear after a constructor-style initializer,
+ // and can appear next in a function definition. This must be a function
+ // declarator.
+ TPR = TPResult::True;
+ else if (InvalidAsDeclaration)
+ // Use the absence of 'typename' as a tie-breaker.
+ TPR = TPResult::False;
+ }
+ }
+
+ PA.Revert();
+
+ if (IsAmbiguous && TPR == TPResult::Ambiguous)
+ *IsAmbiguous = true;
+
+ // In case of an error, let the declaration parsing code handle it.
+ return TPR != TPResult::False;
+}
+
+/// parameter-declaration-clause:
+/// parameter-declaration-list[opt] '...'[opt]
+/// parameter-declaration-list ',' '...'
+///
+/// parameter-declaration-list:
+/// parameter-declaration
+/// parameter-declaration-list ',' parameter-declaration
+///
+/// parameter-declaration:
+/// attribute-specifier-seq[opt] decl-specifier-seq declarator attributes[opt]
+/// attribute-specifier-seq[opt] decl-specifier-seq declarator attributes[opt]
+/// '=' assignment-expression
+/// attribute-specifier-seq[opt] decl-specifier-seq abstract-declarator[opt]
+/// attributes[opt]
+/// attribute-specifier-seq[opt] decl-specifier-seq abstract-declarator[opt]
+/// attributes[opt] '=' assignment-expression
+///
+Parser::TPResult
+Parser::TryParseParameterDeclarationClause(bool *InvalidAsDeclaration,
+ bool VersusTemplateArgument) {
+
+ if (Tok.is(tok::r_paren))
+ return TPResult::Ambiguous;
+
+ // parameter-declaration-list[opt] '...'[opt]
+ // parameter-declaration-list ',' '...'
+ //
+ // parameter-declaration-list:
+ // parameter-declaration
+ // parameter-declaration-list ',' parameter-declaration
+ //
+ while (1) {
+ // '...'[opt]
+ if (Tok.is(tok::ellipsis)) {
+ ConsumeToken();
+ if (Tok.is(tok::r_paren))
+ return TPResult::True; // '...)' is a sign of a function declarator.
+ else
+ return TPResult::False;
+ }
+
+ // An attribute-specifier-seq here is a sign of a function declarator.
+ if (isCXX11AttributeSpecifier(/*Disambiguate*/false,
+ /*OuterMightBeMessageSend*/true))
+ return TPResult::True;
+
+ ParsedAttributes attrs(AttrFactory);
+ MaybeParseMicrosoftAttributes(attrs);
+
+ // decl-specifier-seq
+ // A parameter-declaration's initializer must be preceded by an '=', so
+ // decl-specifier-seq '{' is not a parameter in C++11.
+ TPResult TPR = isCXXDeclarationSpecifier(TPResult::False,
+ InvalidAsDeclaration);
+
+ if (VersusTemplateArgument && TPR == TPResult::True) {
+ // Consume the decl-specifier-seq. We have to look past it, since a
+ // type-id might appear here in a template argument.
+ bool SeenType = false;
+ do {
+ SeenType |= isCXXDeclarationSpecifierAType();
+ if (TryConsumeDeclarationSpecifier() == TPResult::Error)
+ return TPResult::Error;
+
+ // If we see a parameter name, this can't be a template argument.
+ if (SeenType && Tok.is(tok::identifier))
+ return TPResult::True;
+
+ TPR = isCXXDeclarationSpecifier(TPResult::False,
+ InvalidAsDeclaration);
+ if (TPR == TPResult::Error)
+ return TPR;
+ } while (TPR != TPResult::False);
+ } else if (TPR == TPResult::Ambiguous) {
+ // Disambiguate what follows the decl-specifier.
+ if (TryConsumeDeclarationSpecifier() == TPResult::Error)
+ return TPResult::Error;
+ } else
+ return TPR;
+
+ // declarator
+ // abstract-declarator[opt]
+ TPR = TryParseDeclarator(true/*mayBeAbstract*/);
+ if (TPR != TPResult::Ambiguous)
+ return TPR;
+
+ // [GNU] attributes[opt]
+ if (Tok.is(tok::kw___attribute))
+ return TPResult::True;
+
+ // If we're disambiguating a template argument in a default argument in
+ // a class definition versus a parameter declaration, an '=' here
+ // disambiguates the parse one way or the other.
+ // If this is a parameter, it must have a default argument because
+ // (a) the previous parameter did, and
+ // (b) this must be the first declaration of the function, so we can't
+ // inherit any default arguments from elsewhere.
+ // If we see an ')', then we've reached the end of a
+ // parameter-declaration-clause, and the last param is missing its default
+ // argument.
+ if (VersusTemplateArgument)
+ return Tok.isOneOf(tok::equal, tok::r_paren) ? TPResult::True
+ : TPResult::False;
+
+ if (Tok.is(tok::equal)) {
+ // '=' assignment-expression
+ // Parse through assignment-expression.
+ // FIXME: assignment-expression may contain an unparenthesized comma.
+ if (!SkipUntil(tok::comma, tok::r_paren, StopAtSemi | StopBeforeMatch))
+ return TPResult::Error;
+ }
+
+ if (Tok.is(tok::ellipsis)) {
+ ConsumeToken();
+ if (Tok.is(tok::r_paren))
+ return TPResult::True; // '...)' is a sign of a function declarator.
+ else
+ return TPResult::False;
+ }
+
+ if (!TryConsumeToken(tok::comma))
+ break;
+ }
+
+ return TPResult::Ambiguous;
+}
+
+/// TryParseFunctionDeclarator - We parsed a '(' and we want to try to continue
+/// parsing as a function declarator.
+/// If TryParseFunctionDeclarator fully parsed the function declarator, it will
+/// return TPResult::Ambiguous, otherwise it will return either False() or
+/// Error().
+///
+/// '(' parameter-declaration-clause ')' cv-qualifier-seq[opt]
+/// exception-specification[opt]
+///
+/// exception-specification:
+/// 'throw' '(' type-id-list[opt] ')'
+///
+Parser::TPResult Parser::TryParseFunctionDeclarator() {
+
+ // The '(' is already parsed.
+
+ TPResult TPR = TryParseParameterDeclarationClause();
+ if (TPR == TPResult::Ambiguous && Tok.isNot(tok::r_paren))
+ TPR = TPResult::False;
+
+ if (TPR == TPResult::False || TPR == TPResult::Error)
+ return TPR;
+
+ // Parse through the parens.
+ if (!SkipUntil(tok::r_paren, StopAtSemi))
+ return TPResult::Error;
+
+ // cv-qualifier-seq
+ while (Tok.isOneOf(tok::kw_const, tok::kw_volatile, tok::kw_restrict))
+ ConsumeToken();
+
+ // ref-qualifier[opt]
+ if (Tok.isOneOf(tok::amp, tok::ampamp))
+ ConsumeToken();
+
+ // exception-specification
+ if (Tok.is(tok::kw_throw)) {
+ ConsumeToken();
+ if (Tok.isNot(tok::l_paren))
+ return TPResult::Error;
+
+ // Parse through the parens after 'throw'.
+ ConsumeParen();
+ if (!SkipUntil(tok::r_paren, StopAtSemi))
+ return TPResult::Error;
+ }
+ if (Tok.is(tok::kw_noexcept)) {
+ ConsumeToken();
+ // Possibly an expression as well.
+ if (Tok.is(tok::l_paren)) {
+ // Find the matching rparen.
+ ConsumeParen();
+ if (!SkipUntil(tok::r_paren, StopAtSemi))
+ return TPResult::Error;
+ }
+ }
+
+ return TPResult::Ambiguous;
+}
+
+/// '[' constant-expression[opt] ']'
+///
+Parser::TPResult Parser::TryParseBracketDeclarator() {
+ ConsumeBracket();
+ if (!SkipUntil(tok::r_square, StopAtSemi))
+ return TPResult::Error;
+
+ return TPResult::Ambiguous;
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/Parser.cpp b/contrib/llvm/tools/clang/lib/Parse/Parser.cpp
new file mode 100644
index 0000000..b3eeb9d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/Parser.cpp
@@ -0,0 +1,2102 @@
+//===--- Parser.cpp - C Language Family Parser ----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Parser interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/Parser.h"
+#include "RAIIObjectsForParser.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Sema/Scope.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+
+namespace {
+/// \brief A comment handler that passes comments found by the preprocessor
+/// to the parser action.
+class ActionCommentHandler : public CommentHandler {
+ Sema &S;
+
+public:
+ explicit ActionCommentHandler(Sema &S) : S(S) { }
+
+ bool HandleComment(Preprocessor &PP, SourceRange Comment) override {
+ S.ActOnComment(Comment);
+ return false;
+ }
+};
+
+/// \brief RAIIObject to destroy the contents of a SmallVector of
+/// TemplateIdAnnotation pointers and clear the vector.
+class DestroyTemplateIdAnnotationsRAIIObj {
+ SmallVectorImpl<TemplateIdAnnotation *> &Container;
+
+public:
+ DestroyTemplateIdAnnotationsRAIIObj(
+ SmallVectorImpl<TemplateIdAnnotation *> &Container)
+ : Container(Container) {}
+
+ ~DestroyTemplateIdAnnotationsRAIIObj() {
+ for (SmallVectorImpl<TemplateIdAnnotation *>::iterator I =
+ Container.begin(),
+ E = Container.end();
+ I != E; ++I)
+ (*I)->Destroy();
+ Container.clear();
+ }
+};
+} // end anonymous namespace
+
+IdentifierInfo *Parser::getSEHExceptKeyword() {
+ // __except is accepted as a (contextual) keyword
+ if (!Ident__except && (getLangOpts().MicrosoftExt || getLangOpts().Borland))
+ Ident__except = PP.getIdentifierInfo("__except");
+
+ return Ident__except;
+}
+
+Parser::Parser(Preprocessor &pp, Sema &actions, bool skipFunctionBodies)
+ : PP(pp), Actions(actions), Diags(PP.getDiagnostics()),
+ GreaterThanIsOperator(true), ColonIsSacred(false),
+ InMessageExpression(false), TemplateParameterDepth(0),
+ ParsingInObjCContainer(false) {
+ SkipFunctionBodies = pp.isCodeCompletionEnabled() || skipFunctionBodies;
+ Tok.startToken();
+ Tok.setKind(tok::eof);
+ Actions.CurScope = nullptr;
+ NumCachedScopes = 0;
+ ParenCount = BracketCount = BraceCount = 0;
+ CurParsedObjCImpl = nullptr;
+
+ // Add #pragma handlers. These are removed and destroyed in the
+ // destructor.
+ initializePragmaHandlers();
+
+ CommentSemaHandler.reset(new ActionCommentHandler(actions));
+ PP.addCommentHandler(CommentSemaHandler.get());
+
+ PP.setCodeCompletionHandler(*this);
+}
+
+DiagnosticBuilder Parser::Diag(SourceLocation Loc, unsigned DiagID) {
+ return Diags.Report(Loc, DiagID);
+}
+
+DiagnosticBuilder Parser::Diag(const Token &Tok, unsigned DiagID) {
+ return Diag(Tok.getLocation(), DiagID);
+}
+
+/// \brief Emits a diagnostic suggesting parentheses surrounding a
+/// given range.
+///
+/// \param Loc The location where we'll emit the diagnostic.
+/// \param DK The kind of diagnostic to emit.
+/// \param ParenRange Source range enclosing code that should be parenthesized.
+void Parser::SuggestParentheses(SourceLocation Loc, unsigned DK,
+ SourceRange ParenRange) {
+ SourceLocation EndLoc = PP.getLocForEndOfToken(ParenRange.getEnd());
+ if (!ParenRange.getEnd().isFileID() || EndLoc.isInvalid()) {
+ // We can't display the parentheses, so just dig the
+ // warning/error and return.
+ Diag(Loc, DK);
+ return;
+ }
+
+ Diag(Loc, DK)
+ << FixItHint::CreateInsertion(ParenRange.getBegin(), "(")
+ << FixItHint::CreateInsertion(EndLoc, ")");
+}
+
+static bool IsCommonTypo(tok::TokenKind ExpectedTok, const Token &Tok) {
+ switch (ExpectedTok) {
+ case tok::semi:
+ return Tok.is(tok::colon) || Tok.is(tok::comma); // : or , for ;
+ default: return false;
+ }
+}
+
+bool Parser::ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned DiagID,
+ StringRef Msg) {
+ if (Tok.is(ExpectedTok) || Tok.is(tok::code_completion)) {
+ ConsumeAnyToken();
+ return false;
+ }
+
+ // Detect common single-character typos and resume.
+ if (IsCommonTypo(ExpectedTok, Tok)) {
+ SourceLocation Loc = Tok.getLocation();
+ {
+ DiagnosticBuilder DB = Diag(Loc, DiagID);
+ DB << FixItHint::CreateReplacement(
+ SourceRange(Loc), tok::getPunctuatorSpelling(ExpectedTok));
+ if (DiagID == diag::err_expected)
+ DB << ExpectedTok;
+ else if (DiagID == diag::err_expected_after)
+ DB << Msg << ExpectedTok;
+ else
+ DB << Msg;
+ }
+
+ // Pretend there wasn't a problem.
+ ConsumeAnyToken();
+ return false;
+ }
+
+ SourceLocation EndLoc = PP.getLocForEndOfToken(PrevTokLocation);
+ const char *Spelling = nullptr;
+ if (EndLoc.isValid())
+ Spelling = tok::getPunctuatorSpelling(ExpectedTok);
+
+ DiagnosticBuilder DB =
+ Spelling
+ ? Diag(EndLoc, DiagID) << FixItHint::CreateInsertion(EndLoc, Spelling)
+ : Diag(Tok, DiagID);
+ if (DiagID == diag::err_expected)
+ DB << ExpectedTok;
+ else if (DiagID == diag::err_expected_after)
+ DB << Msg << ExpectedTok;
+ else
+ DB << Msg;
+
+ return true;
+}
+
+bool Parser::ExpectAndConsumeSemi(unsigned DiagID) {
+ if (TryConsumeToken(tok::semi))
+ return false;
+
+ if (Tok.is(tok::code_completion)) {
+ handleUnexpectedCodeCompletionToken();
+ return false;
+ }
+
+ if ((Tok.is(tok::r_paren) || Tok.is(tok::r_square)) &&
+ NextToken().is(tok::semi)) {
+ Diag(Tok, diag::err_extraneous_token_before_semi)
+ << PP.getSpelling(Tok)
+ << FixItHint::CreateRemoval(Tok.getLocation());
+ ConsumeAnyToken(); // The ')' or ']'.
+ ConsumeToken(); // The ';'.
+ return false;
+ }
+
+ return ExpectAndConsume(tok::semi, DiagID);
+}
+
+void Parser::ConsumeExtraSemi(ExtraSemiKind Kind, unsigned TST) {
+ if (!Tok.is(tok::semi)) return;
+
+ bool HadMultipleSemis = false;
+ SourceLocation StartLoc = Tok.getLocation();
+ SourceLocation EndLoc = Tok.getLocation();
+ ConsumeToken();
+
+ while ((Tok.is(tok::semi) && !Tok.isAtStartOfLine())) {
+ HadMultipleSemis = true;
+ EndLoc = Tok.getLocation();
+ ConsumeToken();
+ }
+
+ // C++11 allows extra semicolons at namespace scope, but not in any of the
+ // other contexts.
+ if (Kind == OutsideFunction && getLangOpts().CPlusPlus) {
+ if (getLangOpts().CPlusPlus11)
+ Diag(StartLoc, diag::warn_cxx98_compat_top_level_semi)
+ << FixItHint::CreateRemoval(SourceRange(StartLoc, EndLoc));
+ else
+ Diag(StartLoc, diag::ext_extra_semi_cxx11)
+ << FixItHint::CreateRemoval(SourceRange(StartLoc, EndLoc));
+ return;
+ }
+
+ if (Kind != AfterMemberFunctionDefinition || HadMultipleSemis)
+ Diag(StartLoc, diag::ext_extra_semi)
+ << Kind << DeclSpec::getSpecifierName((DeclSpec::TST)TST,
+ Actions.getASTContext().getPrintingPolicy())
+ << FixItHint::CreateRemoval(SourceRange(StartLoc, EndLoc));
+ else
+ // A single semicolon is valid after a member function definition.
+ Diag(StartLoc, diag::warn_extra_semi_after_mem_fn_def)
+ << FixItHint::CreateRemoval(SourceRange(StartLoc, EndLoc));
+}
+
+//===----------------------------------------------------------------------===//
+// Error recovery.
+//===----------------------------------------------------------------------===//
+
+static bool HasFlagsSet(Parser::SkipUntilFlags L, Parser::SkipUntilFlags R) {
+ return (static_cast<unsigned>(L) & static_cast<unsigned>(R)) != 0;
+}
+
+/// SkipUntil - Read tokens until we get to the specified token, then consume
+/// it (unless no flag StopBeforeMatch). Because we cannot guarantee that the
+/// token will ever occur, this skips to the next token, or to some likely
+/// good stopping point. If StopAtSemi is true, skipping will stop at a ';'
+/// character.
+///
+/// If SkipUntil finds the specified token, it returns true, otherwise it
+/// returns false.
+bool Parser::SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags) {
+ // We always want this function to skip at least one token if the first token
+ // isn't T and if not at EOF.
+ bool isFirstTokenSkipped = true;
+ while (1) {
+ // If we found one of the tokens, stop and return true.
+ for (unsigned i = 0, NumToks = Toks.size(); i != NumToks; ++i) {
+ if (Tok.is(Toks[i])) {
+ if (HasFlagsSet(Flags, StopBeforeMatch)) {
+ // Noop, don't consume the token.
+ } else {
+ ConsumeAnyToken();
+ }
+ return true;
+ }
+ }
+
+ // Important special case: The caller has given up and just wants us to
+ // skip the rest of the file. Do this without recursing, since we can
+ // get here precisely because the caller detected too much recursion.
+ if (Toks.size() == 1 && Toks[0] == tok::eof &&
+ !HasFlagsSet(Flags, StopAtSemi) &&
+ !HasFlagsSet(Flags, StopAtCodeCompletion)) {
+ while (Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+ return true;
+ }
+
+ switch (Tok.getKind()) {
+ case tok::eof:
+ // Ran out of tokens.
+ return false;
+
+ case tok::annot_pragma_openmp:
+ case tok::annot_pragma_openmp_end:
+ // Stop before an OpenMP pragma boundary.
+ case tok::annot_module_begin:
+ case tok::annot_module_end:
+ case tok::annot_module_include:
+ // Stop before we change submodules. They generally indicate a "good"
+ // place to pick up parsing again (except in the special case where
+ // we're trying to skip to EOF).
+ return false;
+
+ case tok::code_completion:
+ if (!HasFlagsSet(Flags, StopAtCodeCompletion))
+ handleUnexpectedCodeCompletionToken();
+ return false;
+
+ case tok::l_paren:
+ // Recursively skip properly-nested parens.
+ ConsumeParen();
+ if (HasFlagsSet(Flags, StopAtCodeCompletion))
+ SkipUntil(tok::r_paren, StopAtCodeCompletion);
+ else
+ SkipUntil(tok::r_paren);
+ break;
+ case tok::l_square:
+ // Recursively skip properly-nested square brackets.
+ ConsumeBracket();
+ if (HasFlagsSet(Flags, StopAtCodeCompletion))
+ SkipUntil(tok::r_square, StopAtCodeCompletion);
+ else
+ SkipUntil(tok::r_square);
+ break;
+ case tok::l_brace:
+ // Recursively skip properly-nested braces.
+ ConsumeBrace();
+ if (HasFlagsSet(Flags, StopAtCodeCompletion))
+ SkipUntil(tok::r_brace, StopAtCodeCompletion);
+ else
+ SkipUntil(tok::r_brace);
+ break;
+
+ // Okay, we found a ']' or '}' or ')', which we think should be balanced.
+ // Since the user wasn't looking for this token (if they were, it would
+ // already be handled), this isn't balanced. If there is a LHS token at a
+ // higher level, we will assume that this matches the unbalanced token
+ // and return it. Otherwise, this is a spurious RHS token, which we skip.
+ case tok::r_paren:
+ if (ParenCount && !isFirstTokenSkipped)
+ return false; // Matches something.
+ ConsumeParen();
+ break;
+ case tok::r_square:
+ if (BracketCount && !isFirstTokenSkipped)
+ return false; // Matches something.
+ ConsumeBracket();
+ break;
+ case tok::r_brace:
+ if (BraceCount && !isFirstTokenSkipped)
+ return false; // Matches something.
+ ConsumeBrace();
+ break;
+
+ case tok::string_literal:
+ case tok::wide_string_literal:
+ case tok::utf8_string_literal:
+ case tok::utf16_string_literal:
+ case tok::utf32_string_literal:
+ ConsumeStringToken();
+ break;
+
+ case tok::semi:
+ if (HasFlagsSet(Flags, StopAtSemi))
+ return false;
+ // FALL THROUGH.
+ default:
+ // Skip this token.
+ ConsumeToken();
+ break;
+ }
+ isFirstTokenSkipped = false;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Scope manipulation
+//===----------------------------------------------------------------------===//
+
+/// EnterScope - Start a new scope.
+void Parser::EnterScope(unsigned ScopeFlags) {
+ if (NumCachedScopes) {
+ Scope *N = ScopeCache[--NumCachedScopes];
+ N->Init(getCurScope(), ScopeFlags);
+ Actions.CurScope = N;
+ } else {
+ Actions.CurScope = new Scope(getCurScope(), ScopeFlags, Diags);
+ }
+}
+
+/// ExitScope - Pop a scope off the scope stack.
+void Parser::ExitScope() {
+ assert(getCurScope() && "Scope imbalance!");
+
+ // Inform the actions module that this scope is going away if there are any
+ // decls in it.
+ Actions.ActOnPopScope(Tok.getLocation(), getCurScope());
+
+ Scope *OldScope = getCurScope();
+ Actions.CurScope = OldScope->getParent();
+
+ if (NumCachedScopes == ScopeCacheSize)
+ delete OldScope;
+ else
+ ScopeCache[NumCachedScopes++] = OldScope;
+}
+
+/// Set the flags for the current scope to ScopeFlags. If ManageFlags is false,
+/// this object does nothing.
+Parser::ParseScopeFlags::ParseScopeFlags(Parser *Self, unsigned ScopeFlags,
+ bool ManageFlags)
+ : CurScope(ManageFlags ? Self->getCurScope() : nullptr) {
+ if (CurScope) {
+ OldFlags = CurScope->getFlags();
+ CurScope->setFlags(ScopeFlags);
+ }
+}
+
+/// Restore the flags for the current scope to what they were before this
+/// object overrode them.
+Parser::ParseScopeFlags::~ParseScopeFlags() {
+ if (CurScope)
+ CurScope->setFlags(OldFlags);
+}
+
+
+//===----------------------------------------------------------------------===//
+// C99 6.9: External Definitions.
+//===----------------------------------------------------------------------===//
+
+Parser::~Parser() {
+ // If we still have scopes active, delete the scope tree.
+ delete getCurScope();
+ Actions.CurScope = nullptr;
+
+ // Free the scope cache.
+ for (unsigned i = 0, e = NumCachedScopes; i != e; ++i)
+ delete ScopeCache[i];
+
+ resetPragmaHandlers();
+
+ PP.removeCommentHandler(CommentSemaHandler.get());
+
+ PP.clearCodeCompletionHandler();
+
+ if (getLangOpts().DelayedTemplateParsing &&
+ !PP.isIncrementalProcessingEnabled() && !TemplateIds.empty()) {
+ // If an ASTConsumer parsed delay-parsed templates in their
+ // HandleTranslationUnit() method, TemplateIds created there were not
+ // guarded by a DestroyTemplateIdAnnotationsRAIIObj object in
+ // ParseTopLevelDecl(). Destroy them here.
+ DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(TemplateIds);
+ }
+
+ assert(TemplateIds.empty() && "Still alive TemplateIdAnnotations around?");
+}
+
+/// Initialize - Warm up the parser.
+///
+void Parser::Initialize() {
+ // Create the translation unit scope. Install it as the current scope.
+ assert(getCurScope() == nullptr && "A scope is already active?");
+ EnterScope(Scope::DeclScope);
+ Actions.ActOnTranslationUnitScope(getCurScope());
+
+ // Initialization for Objective-C context sensitive keywords recognition.
+ // Referenced in Parser::ParseObjCTypeQualifierList.
+ if (getLangOpts().ObjC1) {
+ ObjCTypeQuals[objc_in] = &PP.getIdentifierTable().get("in");
+ ObjCTypeQuals[objc_out] = &PP.getIdentifierTable().get("out");
+ ObjCTypeQuals[objc_inout] = &PP.getIdentifierTable().get("inout");
+ ObjCTypeQuals[objc_oneway] = &PP.getIdentifierTable().get("oneway");
+ ObjCTypeQuals[objc_bycopy] = &PP.getIdentifierTable().get("bycopy");
+ ObjCTypeQuals[objc_byref] = &PP.getIdentifierTable().get("byref");
+ ObjCTypeQuals[objc_nonnull] = &PP.getIdentifierTable().get("nonnull");
+ ObjCTypeQuals[objc_nullable] = &PP.getIdentifierTable().get("nullable");
+ ObjCTypeQuals[objc_null_unspecified]
+ = &PP.getIdentifierTable().get("null_unspecified");
+ }
+
+ Ident_instancetype = nullptr;
+ Ident_final = nullptr;
+ Ident_sealed = nullptr;
+ Ident_override = nullptr;
+
+ Ident_super = &PP.getIdentifierTable().get("super");
+
+ Ident_vector = nullptr;
+ Ident_bool = nullptr;
+ Ident_pixel = nullptr;
+ if (getLangOpts().AltiVec || getLangOpts().ZVector) {
+ Ident_vector = &PP.getIdentifierTable().get("vector");
+ Ident_bool = &PP.getIdentifierTable().get("bool");
+ }
+ if (getLangOpts().AltiVec)
+ Ident_pixel = &PP.getIdentifierTable().get("pixel");
+
+ Ident_introduced = nullptr;
+ Ident_deprecated = nullptr;
+ Ident_obsoleted = nullptr;
+ Ident_unavailable = nullptr;
+
+ Ident__except = nullptr;
+
+ Ident__exception_code = Ident__exception_info = nullptr;
+ Ident__abnormal_termination = Ident___exception_code = nullptr;
+ Ident___exception_info = Ident___abnormal_termination = nullptr;
+ Ident_GetExceptionCode = Ident_GetExceptionInfo = nullptr;
+ Ident_AbnormalTermination = nullptr;
+
+ if(getLangOpts().Borland) {
+ Ident__exception_info = PP.getIdentifierInfo("_exception_info");
+ Ident___exception_info = PP.getIdentifierInfo("__exception_info");
+ Ident_GetExceptionInfo = PP.getIdentifierInfo("GetExceptionInformation");
+ Ident__exception_code = PP.getIdentifierInfo("_exception_code");
+ Ident___exception_code = PP.getIdentifierInfo("__exception_code");
+ Ident_GetExceptionCode = PP.getIdentifierInfo("GetExceptionCode");
+ Ident__abnormal_termination = PP.getIdentifierInfo("_abnormal_termination");
+ Ident___abnormal_termination = PP.getIdentifierInfo("__abnormal_termination");
+ Ident_AbnormalTermination = PP.getIdentifierInfo("AbnormalTermination");
+
+ PP.SetPoisonReason(Ident__exception_code,diag::err_seh___except_block);
+ PP.SetPoisonReason(Ident___exception_code,diag::err_seh___except_block);
+ PP.SetPoisonReason(Ident_GetExceptionCode,diag::err_seh___except_block);
+ PP.SetPoisonReason(Ident__exception_info,diag::err_seh___except_filter);
+ PP.SetPoisonReason(Ident___exception_info,diag::err_seh___except_filter);
+ PP.SetPoisonReason(Ident_GetExceptionInfo,diag::err_seh___except_filter);
+ PP.SetPoisonReason(Ident__abnormal_termination,diag::err_seh___finally_block);
+ PP.SetPoisonReason(Ident___abnormal_termination,diag::err_seh___finally_block);
+ PP.SetPoisonReason(Ident_AbnormalTermination,diag::err_seh___finally_block);
+ }
+
+ Actions.Initialize();
+
+ // Prime the lexer look-ahead.
+ ConsumeToken();
+}
+
+void Parser::LateTemplateParserCleanupCallback(void *P) {
+ // While this RAII helper doesn't bracket any actual work, the destructor will
+ // clean up annotations that were created during ActOnEndOfTranslationUnit
+ // when incremental processing is enabled.
+ DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(((Parser *)P)->TemplateIds);
+}
+
+/// ParseTopLevelDecl - Parse one top-level declaration, return whatever the
+/// action tells us to. This returns true if the EOF was encountered.
+bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result) {
+ DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(TemplateIds);
+
+ // Skip over the EOF token, flagging end of previous input for incremental
+ // processing
+ if (PP.isIncrementalProcessingEnabled() && Tok.is(tok::eof))
+ ConsumeToken();
+
+ Result = DeclGroupPtrTy();
+ switch (Tok.getKind()) {
+ case tok::annot_pragma_unused:
+ HandlePragmaUnused();
+ return false;
+
+ case tok::annot_module_include:
+ Actions.ActOnModuleInclude(Tok.getLocation(),
+ reinterpret_cast<Module *>(
+ Tok.getAnnotationValue()));
+ ConsumeToken();
+ return false;
+
+ case tok::annot_module_begin:
+ Actions.ActOnModuleBegin(Tok.getLocation(), reinterpret_cast<Module *>(
+ Tok.getAnnotationValue()));
+ ConsumeToken();
+ return false;
+
+ case tok::annot_module_end:
+ Actions.ActOnModuleEnd(Tok.getLocation(), reinterpret_cast<Module *>(
+ Tok.getAnnotationValue()));
+ ConsumeToken();
+ return false;
+
+ case tok::eof:
+ // Late template parsing can begin.
+ if (getLangOpts().DelayedTemplateParsing)
+ Actions.SetLateTemplateParser(LateTemplateParserCallback,
+ PP.isIncrementalProcessingEnabled() ?
+ LateTemplateParserCleanupCallback : nullptr,
+ this);
+ if (!PP.isIncrementalProcessingEnabled())
+ Actions.ActOnEndOfTranslationUnit();
+ //else don't tell Sema that we ended parsing: more input might come.
+ return true;
+
+ default:
+ break;
+ }
+
+ ParsedAttributesWithRange attrs(AttrFactory);
+ MaybeParseCXX11Attributes(attrs);
+ MaybeParseMicrosoftAttributes(attrs);
+
+ Result = ParseExternalDeclaration(attrs);
+ return false;
+}
+
+/// ParseExternalDeclaration:
+///
+/// external-declaration: [C99 6.9], declaration: [C++ dcl.dcl]
+/// function-definition
+/// declaration
+/// [GNU] asm-definition
+/// [GNU] __extension__ external-declaration
+/// [OBJC] objc-class-definition
+/// [OBJC] objc-class-declaration
+/// [OBJC] objc-alias-declaration
+/// [OBJC] objc-protocol-definition
+/// [OBJC] objc-method-definition
+/// [OBJC] @end
+/// [C++] linkage-specification
+/// [GNU] asm-definition:
+/// simple-asm-expr ';'
+/// [C++11] empty-declaration
+/// [C++11] attribute-declaration
+///
+/// [C++11] empty-declaration:
+/// ';'
+///
+/// [C++0x/GNU] 'extern' 'template' declaration
+Parser::DeclGroupPtrTy
+Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
+ ParsingDeclSpec *DS) {
+ DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(TemplateIds);
+ ParenBraceBracketBalancer BalancerRAIIObj(*this);
+
+ if (PP.isCodeCompletionReached()) {
+ cutOffParsing();
+ return DeclGroupPtrTy();
+ }
+
+ Decl *SingleDecl = nullptr;
+ switch (Tok.getKind()) {
+ case tok::annot_pragma_vis:
+ HandlePragmaVisibility();
+ return DeclGroupPtrTy();
+ case tok::annot_pragma_pack:
+ HandlePragmaPack();
+ return DeclGroupPtrTy();
+ case tok::annot_pragma_msstruct:
+ HandlePragmaMSStruct();
+ return DeclGroupPtrTy();
+ case tok::annot_pragma_align:
+ HandlePragmaAlign();
+ return DeclGroupPtrTy();
+ case tok::annot_pragma_weak:
+ HandlePragmaWeak();
+ return DeclGroupPtrTy();
+ case tok::annot_pragma_weakalias:
+ HandlePragmaWeakAlias();
+ return DeclGroupPtrTy();
+ case tok::annot_pragma_redefine_extname:
+ HandlePragmaRedefineExtname();
+ return DeclGroupPtrTy();
+ case tok::annot_pragma_fp_contract:
+ HandlePragmaFPContract();
+ return DeclGroupPtrTy();
+ case tok::annot_pragma_opencl_extension:
+ HandlePragmaOpenCLExtension();
+ return DeclGroupPtrTy();
+ case tok::annot_pragma_openmp:
+ return ParseOpenMPDeclarativeDirective();
+ case tok::annot_pragma_ms_pointers_to_members:
+ HandlePragmaMSPointersToMembers();
+ return DeclGroupPtrTy();
+ case tok::annot_pragma_ms_vtordisp:
+ HandlePragmaMSVtorDisp();
+ return DeclGroupPtrTy();
+ case tok::annot_pragma_ms_pragma:
+ HandlePragmaMSPragma();
+ return DeclGroupPtrTy();
+ case tok::semi:
+ // Either a C++11 empty-declaration or attribute-declaration.
+ SingleDecl = Actions.ActOnEmptyDeclaration(getCurScope(),
+ attrs.getList(),
+ Tok.getLocation());
+ ConsumeExtraSemi(OutsideFunction);
+ break;
+ case tok::r_brace:
+ Diag(Tok, diag::err_extraneous_closing_brace);
+ ConsumeBrace();
+ return DeclGroupPtrTy();
+ case tok::eof:
+ Diag(Tok, diag::err_expected_external_declaration);
+ return DeclGroupPtrTy();
+ case tok::kw___extension__: {
+ // __extension__ silences extension warnings in the subexpression.
+ ExtensionRAIIObject O(Diags); // Use RAII to do this.
+ ConsumeToken();
+ return ParseExternalDeclaration(attrs);
+ }
+ case tok::kw_asm: {
+ ProhibitAttributes(attrs);
+
+ SourceLocation StartLoc = Tok.getLocation();
+ SourceLocation EndLoc;
+
+ ExprResult Result(ParseSimpleAsm(&EndLoc));
+
+ // Check if GNU-style InlineAsm is disabled.
+ // Empty asm string is allowed because it will not introduce
+ // any assembly code.
+ if (!(getLangOpts().GNUAsm || Result.isInvalid())) {
+ const auto *SL = cast<StringLiteral>(Result.get());
+ if (!SL->getString().trim().empty())
+ Diag(StartLoc, diag::err_gnu_inline_asm_disabled);
+ }
+
+ ExpectAndConsume(tok::semi, diag::err_expected_after,
+ "top-level asm block");
+
+ if (Result.isInvalid())
+ return DeclGroupPtrTy();
+ SingleDecl = Actions.ActOnFileScopeAsmDecl(Result.get(), StartLoc, EndLoc);
+ break;
+ }
+ case tok::at:
+ return ParseObjCAtDirectives();
+ case tok::minus:
+ case tok::plus:
+ if (!getLangOpts().ObjC1) {
+ Diag(Tok, diag::err_expected_external_declaration);
+ ConsumeToken();
+ return DeclGroupPtrTy();
+ }
+ SingleDecl = ParseObjCMethodDefinition();
+ break;
+ case tok::code_completion:
+ Actions.CodeCompleteOrdinaryName(getCurScope(),
+ CurParsedObjCImpl? Sema::PCC_ObjCImplementation
+ : Sema::PCC_Namespace);
+ cutOffParsing();
+ return DeclGroupPtrTy();
+ case tok::kw_using:
+ case tok::kw_namespace:
+ case tok::kw_typedef:
+ case tok::kw_template:
+ case tok::kw_export: // As in 'export template'
+ case tok::kw_static_assert:
+ case tok::kw__Static_assert:
+ // A function definition cannot start with any of these keywords.
+ {
+ SourceLocation DeclEnd;
+ return ParseDeclaration(Declarator::FileContext, DeclEnd, attrs);
+ }
+
+ case tok::kw_static:
+ // Parse (then ignore) 'static' prior to a template instantiation. This is
+ // a GCC extension that we intentionally do not support.
+ if (getLangOpts().CPlusPlus && NextToken().is(tok::kw_template)) {
+ Diag(ConsumeToken(), diag::warn_static_inline_explicit_inst_ignored)
+ << 0;
+ SourceLocation DeclEnd;
+ return ParseDeclaration(Declarator::FileContext, DeclEnd, attrs);
+ }
+ goto dont_know;
+
+ case tok::kw_inline:
+ if (getLangOpts().CPlusPlus) {
+ tok::TokenKind NextKind = NextToken().getKind();
+
+ // Inline namespaces. Allowed as an extension even in C++03.
+ if (NextKind == tok::kw_namespace) {
+ SourceLocation DeclEnd;
+ return ParseDeclaration(Declarator::FileContext, DeclEnd, attrs);
+ }
+
+ // Parse (then ignore) 'inline' prior to a template instantiation. This is
+ // a GCC extension that we intentionally do not support.
+ if (NextKind == tok::kw_template) {
+ Diag(ConsumeToken(), diag::warn_static_inline_explicit_inst_ignored)
+ << 1;
+ SourceLocation DeclEnd;
+ return ParseDeclaration(Declarator::FileContext, DeclEnd, attrs);
+ }
+ }
+ goto dont_know;
+
+ case tok::kw_extern:
+ if (getLangOpts().CPlusPlus && NextToken().is(tok::kw_template)) {
+ // Extern templates
+ SourceLocation ExternLoc = ConsumeToken();
+ SourceLocation TemplateLoc = ConsumeToken();
+ Diag(ExternLoc, getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_extern_template :
+ diag::ext_extern_template) << SourceRange(ExternLoc, TemplateLoc);
+ SourceLocation DeclEnd;
+ return Actions.ConvertDeclToDeclGroup(
+ ParseExplicitInstantiation(Declarator::FileContext,
+ ExternLoc, TemplateLoc, DeclEnd));
+ }
+ goto dont_know;
+
+ case tok::kw___if_exists:
+ case tok::kw___if_not_exists:
+ ParseMicrosoftIfExistsExternalDeclaration();
+ return DeclGroupPtrTy();
+
+ default:
+ dont_know:
+ // We can't tell whether this is a function-definition or declaration yet.
+ return ParseDeclarationOrFunctionDefinition(attrs, DS);
+ }
+
+ // This routine returns a DeclGroup, if the thing we parsed only contains a
+ // single decl, convert it now.
+ return Actions.ConvertDeclToDeclGroup(SingleDecl);
+}
+
+/// \brief Determine whether the current token, if it occurs after a
+/// declarator, continues a declaration or declaration list.
+bool Parser::isDeclarationAfterDeclarator() {
+ // Check for '= delete' or '= default'
+ if (getLangOpts().CPlusPlus && Tok.is(tok::equal)) {
+ const Token &KW = NextToken();
+ if (KW.is(tok::kw_default) || KW.is(tok::kw_delete))
+ return false;
+ }
+
+ return Tok.is(tok::equal) || // int X()= -> not a function def
+ Tok.is(tok::comma) || // int X(), -> not a function def
+ Tok.is(tok::semi) || // int X(); -> not a function def
+ Tok.is(tok::kw_asm) || // int X() __asm__ -> not a function def
+ Tok.is(tok::kw___attribute) || // int X() __attr__ -> not a function def
+ (getLangOpts().CPlusPlus &&
+ Tok.is(tok::l_paren)); // int X(0) -> not a function def [C++]
+}
+
+/// \brief Determine whether the current token, if it occurs after a
+/// declarator, indicates the start of a function definition.
+bool Parser::isStartOfFunctionDefinition(const ParsingDeclarator &Declarator) {
+ assert(Declarator.isFunctionDeclarator() && "Isn't a function declarator");
+ if (Tok.is(tok::l_brace)) // int X() {}
+ return true;
+
+ // Handle K&R C argument lists: int X(f) int f; {}
+ if (!getLangOpts().CPlusPlus &&
+ Declarator.getFunctionTypeInfo().isKNRPrototype())
+ return isDeclarationSpecifier();
+
+ if (getLangOpts().CPlusPlus && Tok.is(tok::equal)) {
+ const Token &KW = NextToken();
+ return KW.is(tok::kw_default) || KW.is(tok::kw_delete);
+ }
+
+ return Tok.is(tok::colon) || // X() : Base() {} (used for ctors)
+ Tok.is(tok::kw_try); // X() try { ... }
+}
+
+/// ParseDeclarationOrFunctionDefinition - Parse either a function-definition or
+/// a declaration. We can't tell which we have until we read up to the
+/// compound-statement in function-definition. TemplateParams, if
+/// non-NULL, provides the template parameters when we're parsing a
+/// C++ template-declaration.
+///
+/// function-definition: [C99 6.9.1]
+/// decl-specs declarator declaration-list[opt] compound-statement
+/// [C90] function-definition: [C99 6.7.1] - implicit int result
+/// [C90] decl-specs[opt] declarator declaration-list[opt] compound-statement
+///
+/// declaration: [C99 6.7]
+/// declaration-specifiers init-declarator-list[opt] ';'
+/// [!C99] init-declarator-list ';' [TODO: warn in c99 mode]
+/// [OMP] threadprivate-directive [TODO]
+///
+Parser::DeclGroupPtrTy
+Parser::ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
+ ParsingDeclSpec &DS,
+ AccessSpecifier AS) {
+ // Parse the common declaration-specifiers piece.
+ ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS, DSC_top_level);
+
+ // If we had a free-standing type definition with a missing semicolon, we
+ // may get this far before the problem becomes obvious.
+ if (DS.hasTagDefinition() &&
+ DiagnoseMissingSemiAfterTagDefinition(DS, AS, DSC_top_level))
+ return DeclGroupPtrTy();
+
+ // C99 6.7.2.3p6: Handle "struct-or-union identifier;", "enum { X };"
+ // declaration-specifiers init-declarator-list[opt] ';'
+ if (Tok.is(tok::semi)) {
+ ProhibitAttributes(attrs);
+ ConsumeToken();
+ Decl *TheDecl = Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS, DS);
+ DS.complete(TheDecl);
+ return Actions.ConvertDeclToDeclGroup(TheDecl);
+ }
+
+ DS.takeAttributesFrom(attrs);
+
+ // ObjC2 allows prefix attributes on class interfaces and protocols.
+ // FIXME: This still needs better diagnostics. We should only accept
+ // attributes here, no types, etc.
+ if (getLangOpts().ObjC2 && Tok.is(tok::at)) {
+ SourceLocation AtLoc = ConsumeToken(); // the "@"
+ if (!Tok.isObjCAtKeyword(tok::objc_interface) &&
+ !Tok.isObjCAtKeyword(tok::objc_protocol)) {
+ Diag(Tok, diag::err_objc_unexpected_attr);
+ SkipUntil(tok::semi); // FIXME: better skip?
+ return DeclGroupPtrTy();
+ }
+
+ DS.abort();
+
+ const char *PrevSpec = nullptr;
+ unsigned DiagID;
+ if (DS.SetTypeSpecType(DeclSpec::TST_unspecified, AtLoc, PrevSpec, DiagID,
+ Actions.getASTContext().getPrintingPolicy()))
+ Diag(AtLoc, DiagID) << PrevSpec;
+
+ if (Tok.isObjCAtKeyword(tok::objc_protocol))
+ return ParseObjCAtProtocolDeclaration(AtLoc, DS.getAttributes());
+
+ return Actions.ConvertDeclToDeclGroup(
+ ParseObjCAtInterfaceDeclaration(AtLoc, DS.getAttributes()));
+ }
+
+ // If the declspec consisted only of 'extern' and we have a string
+ // literal following it, this must be a C++ linkage specifier like
+ // 'extern "C"'.
+ if (getLangOpts().CPlusPlus && isTokenStringLiteral() &&
+ DS.getStorageClassSpec() == DeclSpec::SCS_extern &&
+ DS.getParsedSpecifiers() == DeclSpec::PQ_StorageClassSpecifier) {
+ Decl *TheDecl = ParseLinkage(DS, Declarator::FileContext);
+ return Actions.ConvertDeclToDeclGroup(TheDecl);
+ }
+
+ return ParseDeclGroup(DS, Declarator::FileContext);
+}
+
+Parser::DeclGroupPtrTy
+Parser::ParseDeclarationOrFunctionDefinition(ParsedAttributesWithRange &attrs,
+ ParsingDeclSpec *DS,
+ AccessSpecifier AS) {
+ if (DS) {
+ return ParseDeclOrFunctionDefInternal(attrs, *DS, AS);
+ } else {
+ ParsingDeclSpec PDS(*this);
+ // Must temporarily exit the objective-c container scope for
+ // parsing c constructs and re-enter objc container scope
+ // afterwards.
+ ObjCDeclContextSwitch ObjCDC(*this);
+
+ return ParseDeclOrFunctionDefInternal(attrs, PDS, AS);
+ }
+}
+
+/// ParseFunctionDefinition - We parsed and verified that the specified
+/// Declarator is well formed. If this is a K&R-style function, read the
+/// parameters declaration-list, then start the compound-statement.
+///
+/// function-definition: [C99 6.9.1]
+/// decl-specs declarator declaration-list[opt] compound-statement
+/// [C90] function-definition: [C99 6.7.1] - implicit int result
+/// [C90] decl-specs[opt] declarator declaration-list[opt] compound-statement
+/// [C++] function-definition: [C++ 8.4]
+/// decl-specifier-seq[opt] declarator ctor-initializer[opt]
+/// function-body
+/// [C++] function-definition: [C++ 8.4]
+/// decl-specifier-seq[opt] declarator function-try-block
+///
+Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
+ const ParsedTemplateInfo &TemplateInfo,
+ LateParsedAttrList *LateParsedAttrs) {
+ // Poison SEH identifiers so they are flagged as illegal in function bodies.
+ PoisonSEHIdentifiersRAIIObject PoisonSEHIdentifiers(*this, true);
+ const DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
+
+ // If this is C90 and the declspecs were completely missing, fudge in an
+ // implicit int. We do this here because this is the only place where
+ // declaration-specifiers are completely optional in the grammar.
+ if (getLangOpts().ImplicitInt && D.getDeclSpec().isEmpty()) {
+ const char *PrevSpec;
+ unsigned DiagID;
+ const PrintingPolicy &Policy = Actions.getASTContext().getPrintingPolicy();
+ D.getMutableDeclSpec().SetTypeSpecType(DeclSpec::TST_int,
+ D.getIdentifierLoc(),
+ PrevSpec, DiagID,
+ Policy);
+ D.SetRangeBegin(D.getDeclSpec().getSourceRange().getBegin());
+ }
+
+ // If this declaration was formed with a K&R-style identifier list for the
+ // arguments, parse declarations for all of the args next.
+ // int foo(a,b) int a; float b; {}
+ if (FTI.isKNRPrototype())
+ ParseKNRParamDeclarations(D);
+
+ // We should have either an opening brace or, in a C++ constructor,
+ // we may have a colon.
+ if (Tok.isNot(tok::l_brace) &&
+ (!getLangOpts().CPlusPlus ||
+ (Tok.isNot(tok::colon) && Tok.isNot(tok::kw_try) &&
+ Tok.isNot(tok::equal)))) {
+ Diag(Tok, diag::err_expected_fn_body);
+
+ // Skip over garbage, until we get to '{'. Don't eat the '{'.
+ SkipUntil(tok::l_brace, StopAtSemi | StopBeforeMatch);
+
+ // If we didn't find the '{', bail out.
+ if (Tok.isNot(tok::l_brace))
+ return nullptr;
+ }
+
+ // Check to make sure that any normal attributes are allowed to be on
+ // a definition. Late parsed attributes are checked at the end.
+ if (Tok.isNot(tok::equal)) {
+ AttributeList *DtorAttrs = D.getAttributes();
+ while (DtorAttrs) {
+ if (DtorAttrs->isKnownToGCC() &&
+ !DtorAttrs->isCXX11Attribute()) {
+ Diag(DtorAttrs->getLoc(), diag::warn_attribute_on_function_definition)
+ << DtorAttrs->getName();
+ }
+ DtorAttrs = DtorAttrs->getNext();
+ }
+ }
+
+ // In delayed template parsing mode, for function template we consume the
+ // tokens and store them for late parsing at the end of the translation unit.
+ if (getLangOpts().DelayedTemplateParsing && Tok.isNot(tok::equal) &&
+ TemplateInfo.Kind == ParsedTemplateInfo::Template &&
+ Actions.canDelayFunctionBody(D)) {
+ MultiTemplateParamsArg TemplateParameterLists(*TemplateInfo.TemplateParams);
+
+ ParseScope BodyScope(this, Scope::FnScope|Scope::DeclScope);
+ Scope *ParentScope = getCurScope()->getParent();
+
+ D.setFunctionDefinitionKind(FDK_Definition);
+ Decl *DP = Actions.HandleDeclarator(ParentScope, D,
+ TemplateParameterLists);
+ D.complete(DP);
+ D.getMutableDeclSpec().abort();
+
+ CachedTokens Toks;
+ LexTemplateFunctionForLateParsing(Toks);
+
+ if (DP) {
+ FunctionDecl *FnD = DP->getAsFunction();
+ Actions.CheckForFunctionRedefinition(FnD);
+ Actions.MarkAsLateParsedTemplate(FnD, DP, Toks);
+ }
+ return DP;
+ }
+ else if (CurParsedObjCImpl &&
+ !TemplateInfo.TemplateParams &&
+ (Tok.is(tok::l_brace) || Tok.is(tok::kw_try) ||
+ Tok.is(tok::colon)) &&
+ Actions.CurContext->isTranslationUnit()) {
+ ParseScope BodyScope(this, Scope::FnScope|Scope::DeclScope);
+ Scope *ParentScope = getCurScope()->getParent();
+
+ D.setFunctionDefinitionKind(FDK_Definition);
+ Decl *FuncDecl = Actions.HandleDeclarator(ParentScope, D,
+ MultiTemplateParamsArg());
+ D.complete(FuncDecl);
+ D.getMutableDeclSpec().abort();
+ if (FuncDecl) {
+ // Consume the tokens and store them for later parsing.
+ StashAwayMethodOrFunctionBodyTokens(FuncDecl);
+ CurParsedObjCImpl->HasCFunction = true;
+ return FuncDecl;
+ }
+ // FIXME: Should we really fall through here?
+ }
+
+ // Enter a scope for the function body.
+ ParseScope BodyScope(this, Scope::FnScope|Scope::DeclScope);
+
+ // Tell the actions module that we have entered a function definition with the
+ // specified Declarator for the function.
+ Sema::SkipBodyInfo SkipBody;
+ Decl *Res = Actions.ActOnStartOfFunctionDef(getCurScope(), D,
+ TemplateInfo.TemplateParams
+ ? *TemplateInfo.TemplateParams
+ : MultiTemplateParamsArg(),
+ &SkipBody);
+
+ if (SkipBody.ShouldSkip) {
+ SkipFunctionBody();
+ return Res;
+ }
+
+ // Break out of the ParsingDeclarator context before we parse the body.
+ D.complete(Res);
+
+ // Break out of the ParsingDeclSpec context, too. This const_cast is
+ // safe because we're always the sole owner.
+ D.getMutableDeclSpec().abort();
+
+ if (TryConsumeToken(tok::equal)) {
+ assert(getLangOpts().CPlusPlus && "Only C++ function definitions have '='");
+
+ bool Delete = false;
+ SourceLocation KWLoc;
+ if (TryConsumeToken(tok::kw_delete, KWLoc)) {
+ Diag(KWLoc, getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_defaulted_deleted_function
+ : diag::ext_defaulted_deleted_function)
+ << 1 /* deleted */;
+ Actions.SetDeclDeleted(Res, KWLoc);
+ Delete = true;
+ } else if (TryConsumeToken(tok::kw_default, KWLoc)) {
+ Diag(KWLoc, getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_defaulted_deleted_function
+ : diag::ext_defaulted_deleted_function)
+ << 0 /* defaulted */;
+ Actions.SetDeclDefaulted(Res, KWLoc);
+ } else {
+ llvm_unreachable("function definition after = not 'delete' or 'default'");
+ }
+
+ if (Tok.is(tok::comma)) {
+ Diag(KWLoc, diag::err_default_delete_in_multiple_declaration)
+ << Delete;
+ SkipUntil(tok::semi);
+ } else if (ExpectAndConsume(tok::semi, diag::err_expected_after,
+ Delete ? "delete" : "default")) {
+ SkipUntil(tok::semi);
+ }
+
+ Stmt *GeneratedBody = Res ? Res->getBody() : nullptr;
+ Actions.ActOnFinishFunctionBody(Res, GeneratedBody, false);
+ return Res;
+ }
+
+ if (Tok.is(tok::kw_try))
+ return ParseFunctionTryBlock(Res, BodyScope);
+
+ // If we have a colon, then we're probably parsing a C++
+ // ctor-initializer.
+ if (Tok.is(tok::colon)) {
+ ParseConstructorInitializer(Res);
+
+ // Recover from error.
+ if (!Tok.is(tok::l_brace)) {
+ BodyScope.Exit();
+ Actions.ActOnFinishFunctionBody(Res, nullptr);
+ return Res;
+ }
+ } else
+ Actions.ActOnDefaultCtorInitializers(Res);
+
+ // Late attributes are parsed in the same scope as the function body.
+ if (LateParsedAttrs)
+ ParseLexedAttributeList(*LateParsedAttrs, Res, false, true);
+
+ return ParseFunctionStatementBody(Res, BodyScope);
+}
+
+void Parser::SkipFunctionBody() {
+ if (Tok.is(tok::equal)) {
+ SkipUntil(tok::semi);
+ return;
+ }
+
+ bool IsFunctionTryBlock = Tok.is(tok::kw_try);
+ if (IsFunctionTryBlock)
+ ConsumeToken();
+
+ CachedTokens Skipped;
+ if (ConsumeAndStoreFunctionPrologue(Skipped))
+ SkipMalformedDecl();
+ else {
+ SkipUntil(tok::r_brace);
+ while (IsFunctionTryBlock && Tok.is(tok::kw_catch)) {
+ SkipUntil(tok::l_brace);
+ SkipUntil(tok::r_brace);
+ }
+ }
+}
+
+/// ParseKNRParamDeclarations - Parse 'declaration-list[opt]' which provides
+/// types for a function with a K&R-style identifier list for arguments.
+void Parser::ParseKNRParamDeclarations(Declarator &D) {
+ // We know that the top-level of this declarator is a function.
+ DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
+
+ // Enter function-declaration scope, limiting any declarators to the
+ // function prototype scope, including parameter declarators.
+ ParseScope PrototypeScope(this, Scope::FunctionPrototypeScope |
+ Scope::FunctionDeclarationScope | Scope::DeclScope);
+
+ // Read all the argument declarations.
+ while (isDeclarationSpecifier()) {
+ SourceLocation DSStart = Tok.getLocation();
+
+ // Parse the common declaration-specifiers piece.
+ DeclSpec DS(AttrFactory);
+ ParseDeclarationSpecifiers(DS);
+
+ // C99 6.9.1p6: 'each declaration in the declaration list shall have at
+ // least one declarator'.
+ // NOTE: GCC just makes this an ext-warn. It's not clear what it does with
+ // the declarations though. It's trivial to ignore them, really hard to do
+ // anything else with them.
+ if (TryConsumeToken(tok::semi)) {
+ Diag(DSStart, diag::err_declaration_does_not_declare_param);
+ continue;
+ }
+
+ // C99 6.9.1p6: Declarations shall contain no storage-class specifiers other
+ // than register.
+ if (DS.getStorageClassSpec() != DeclSpec::SCS_unspecified &&
+ DS.getStorageClassSpec() != DeclSpec::SCS_register) {
+ Diag(DS.getStorageClassSpecLoc(),
+ diag::err_invalid_storage_class_in_func_decl);
+ DS.ClearStorageClassSpecs();
+ }
+ if (DS.getThreadStorageClassSpec() != DeclSpec::TSCS_unspecified) {
+ Diag(DS.getThreadStorageClassSpecLoc(),
+ diag::err_invalid_storage_class_in_func_decl);
+ DS.ClearStorageClassSpecs();
+ }
+
+ // Parse the first declarator attached to this declspec.
+ Declarator ParmDeclarator(DS, Declarator::KNRTypeListContext);
+ ParseDeclarator(ParmDeclarator);
+
+ // Handle the full declarator list.
+ while (1) {
+ // If attributes are present, parse them.
+ MaybeParseGNUAttributes(ParmDeclarator);
+
+ // Ask the actions module to compute the type for this declarator.
+ Decl *Param =
+ Actions.ActOnParamDeclarator(getCurScope(), ParmDeclarator);
+
+ if (Param &&
+ // A missing identifier has already been diagnosed.
+ ParmDeclarator.getIdentifier()) {
+
+ // Scan the argument list looking for the correct param to apply this
+ // type.
+ for (unsigned i = 0; ; ++i) {
+ // C99 6.9.1p6: those declarators shall declare only identifiers from
+ // the identifier list.
+ if (i == FTI.NumParams) {
+ Diag(ParmDeclarator.getIdentifierLoc(), diag::err_no_matching_param)
+ << ParmDeclarator.getIdentifier();
+ break;
+ }
+
+ if (FTI.Params[i].Ident == ParmDeclarator.getIdentifier()) {
+ // Reject redefinitions of parameters.
+ if (FTI.Params[i].Param) {
+ Diag(ParmDeclarator.getIdentifierLoc(),
+ diag::err_param_redefinition)
+ << ParmDeclarator.getIdentifier();
+ } else {
+ FTI.Params[i].Param = Param;
+ }
+ break;
+ }
+ }
+ }
+
+ // If we don't have a comma, it is either the end of the list (a ';') or
+ // an error, bail out.
+ if (Tok.isNot(tok::comma))
+ break;
+
+ ParmDeclarator.clear();
+
+ // Consume the comma.
+ ParmDeclarator.setCommaLoc(ConsumeToken());
+
+ // Parse the next declarator.
+ ParseDeclarator(ParmDeclarator);
+ }
+
+ // Consume ';' and continue parsing.
+ if (!ExpectAndConsumeSemi(diag::err_expected_semi_declaration))
+ continue;
+
+ // Otherwise recover by skipping to next semi or mandatory function body.
+ if (SkipUntil(tok::l_brace, StopAtSemi | StopBeforeMatch))
+ break;
+ TryConsumeToken(tok::semi);
+ }
+
+ // The actions module must verify that all arguments were declared.
+ Actions.ActOnFinishKNRParamDeclarations(getCurScope(), D, Tok.getLocation());
+}
+
+
+/// ParseAsmStringLiteral - This is just a normal string-literal, but is not
+/// allowed to be a wide string, and is not subject to character translation.
+///
+/// [GNU] asm-string-literal:
+/// string-literal
+///
+ExprResult Parser::ParseAsmStringLiteral() {
+ if (!isTokenStringLiteral()) {
+ Diag(Tok, diag::err_expected_string_literal)
+ << /*Source='in...'*/0 << "'asm'";
+ return ExprError();
+ }
+
+ ExprResult AsmString(ParseStringLiteralExpression());
+ if (!AsmString.isInvalid()) {
+ const auto *SL = cast<StringLiteral>(AsmString.get());
+ if (!SL->isAscii()) {
+ Diag(Tok, diag::err_asm_operand_wide_string_literal)
+ << SL->isWide()
+ << SL->getSourceRange();
+ return ExprError();
+ }
+ }
+ return AsmString;
+}
+
+/// ParseSimpleAsm
+///
+/// [GNU] simple-asm-expr:
+/// 'asm' '(' asm-string-literal ')'
+///
+ExprResult Parser::ParseSimpleAsm(SourceLocation *EndLoc) {
+ assert(Tok.is(tok::kw_asm) && "Not an asm!");
+ SourceLocation Loc = ConsumeToken();
+
+ if (Tok.is(tok::kw_volatile)) {
+ // Remove from the end of 'asm' to the end of 'volatile'.
+ SourceRange RemovalRange(PP.getLocForEndOfToken(Loc),
+ PP.getLocForEndOfToken(Tok.getLocation()));
+
+ Diag(Tok, diag::warn_file_asm_volatile)
+ << FixItHint::CreateRemoval(RemovalRange);
+ ConsumeToken();
+ }
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.consumeOpen()) {
+ Diag(Tok, diag::err_expected_lparen_after) << "asm";
+ return ExprError();
+ }
+
+ ExprResult Result(ParseAsmStringLiteral());
+
+ if (!Result.isInvalid()) {
+ // Close the paren and get the location of the end bracket
+ T.consumeClose();
+ if (EndLoc)
+ *EndLoc = T.getCloseLocation();
+ } else if (SkipUntil(tok::r_paren, StopAtSemi | StopBeforeMatch)) {
+ if (EndLoc)
+ *EndLoc = Tok.getLocation();
+ ConsumeParen();
+ }
+
+ return Result;
+}
+
+/// \brief Get the TemplateIdAnnotation from the token and put it in the
+/// cleanup pool so that it gets destroyed when parsing the current top level
+/// declaration is finished.
+TemplateIdAnnotation *Parser::takeTemplateIdAnnotation(const Token &tok) {
+ assert(tok.is(tok::annot_template_id) && "Expected template-id token");
+ TemplateIdAnnotation *
+ Id = static_cast<TemplateIdAnnotation *>(tok.getAnnotationValue());
+ return Id;
+}
+
+void Parser::AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation) {
+ // Push the current token back into the token stream (or revert it if it is
+ // cached) and use an annotation scope token for current token.
+ if (PP.isBacktrackEnabled())
+ PP.RevertCachedTokens(1);
+ else
+ PP.EnterToken(Tok);
+ Tok.setKind(tok::annot_cxxscope);
+ Tok.setAnnotationValue(Actions.SaveNestedNameSpecifierAnnotation(SS));
+ Tok.setAnnotationRange(SS.getRange());
+
+ // In case the tokens were cached, have Preprocessor replace them
+ // with the annotation token. We don't need to do this if we've
+ // just reverted back to a prior state.
+ if (IsNewAnnotation)
+ PP.AnnotateCachedTokens(Tok);
+}
+
+/// \brief Attempt to classify the name at the current token position. This may
+/// form a type, scope or primary expression annotation, or replace the token
+/// with a typo-corrected keyword. This is only appropriate when the current
+/// name must refer to an entity which has already been declared.
+///
+/// \param IsAddressOfOperand Must be \c true if the name is preceded by an '&'
+/// and might possibly have a dependent nested name specifier.
+/// \param CCC Indicates how to perform typo-correction for this name. If NULL,
+/// no typo correction will be performed.
+Parser::AnnotatedNameKind
+Parser::TryAnnotateName(bool IsAddressOfOperand,
+ std::unique_ptr<CorrectionCandidateCallback> CCC) {
+ assert(Tok.is(tok::identifier) || Tok.is(tok::annot_cxxscope));
+
+ const bool EnteringContext = false;
+ const bool WasScopeAnnotation = Tok.is(tok::annot_cxxscope);
+
+ CXXScopeSpec SS;
+ if (getLangOpts().CPlusPlus &&
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(), EnteringContext))
+ return ANK_Error;
+
+ if (Tok.isNot(tok::identifier) || SS.isInvalid()) {
+ if (TryAnnotateTypeOrScopeTokenAfterScopeSpec(EnteringContext, false, SS,
+ !WasScopeAnnotation))
+ return ANK_Error;
+ return ANK_Unresolved;
+ }
+
+ IdentifierInfo *Name = Tok.getIdentifierInfo();
+ SourceLocation NameLoc = Tok.getLocation();
+
+ // FIXME: Move the tentative declaration logic into ClassifyName so we can
+ // typo-correct to tentatively-declared identifiers.
+ if (isTentativelyDeclared(Name)) {
+ // Identifier has been tentatively declared, and thus cannot be resolved as
+ // an expression. Fall back to annotating it as a type.
+ if (TryAnnotateTypeOrScopeTokenAfterScopeSpec(EnteringContext, false, SS,
+ !WasScopeAnnotation))
+ return ANK_Error;
+ return Tok.is(tok::annot_typename) ? ANK_Success : ANK_TentativeDecl;
+ }
+
+ Token Next = NextToken();
+
+ // Look up and classify the identifier. We don't perform any typo-correction
+ // after a scope specifier, because in general we can't recover from typos
+ // there (eg, after correcting 'A::tempalte B<X>::C' [sic], we would need to
+ // jump back into scope specifier parsing).
+ Sema::NameClassification Classification = Actions.ClassifyName(
+ getCurScope(), SS, Name, NameLoc, Next, IsAddressOfOperand,
+ SS.isEmpty() ? std::move(CCC) : nullptr);
+
+ switch (Classification.getKind()) {
+ case Sema::NC_Error:
+ return ANK_Error;
+
+ case Sema::NC_Keyword:
+ // The identifier was typo-corrected to a keyword.
+ Tok.setIdentifierInfo(Name);
+ Tok.setKind(Name->getTokenID());
+ PP.TypoCorrectToken(Tok);
+ if (SS.isNotEmpty())
+ AnnotateScopeToken(SS, !WasScopeAnnotation);
+ // We've "annotated" this as a keyword.
+ return ANK_Success;
+
+ case Sema::NC_Unknown:
+ // It's not something we know about. Leave it unannotated.
+ break;
+
+ case Sema::NC_Type: {
+ SourceLocation BeginLoc = NameLoc;
+ if (SS.isNotEmpty())
+ BeginLoc = SS.getBeginLoc();
+
+ /// An Objective-C object type followed by '<' is a specialization of
+ /// a parameterized class type or a protocol-qualified type.
+ ParsedType Ty = Classification.getType();
+ if (getLangOpts().ObjC1 && NextToken().is(tok::less) &&
+ (Ty.get()->isObjCObjectType() ||
+ Ty.get()->isObjCObjectPointerType())) {
+ // Consume the name.
+ SourceLocation IdentifierLoc = ConsumeToken();
+ SourceLocation NewEndLoc;
+ TypeResult NewType
+ = parseObjCTypeArgsAndProtocolQualifiers(IdentifierLoc, Ty,
+ /*consumeLastToken=*/false,
+ NewEndLoc);
+ if (NewType.isUsable())
+ Ty = NewType.get();
+ }
+
+ Tok.setKind(tok::annot_typename);
+ setTypeAnnotation(Tok, Ty);
+ Tok.setAnnotationEndLoc(Tok.getLocation());
+ Tok.setLocation(BeginLoc);
+ PP.AnnotateCachedTokens(Tok);
+ return ANK_Success;
+ }
+
+ case Sema::NC_Expression:
+ Tok.setKind(tok::annot_primary_expr);
+ setExprAnnotation(Tok, Classification.getExpression());
+ Tok.setAnnotationEndLoc(NameLoc);
+ if (SS.isNotEmpty())
+ Tok.setLocation(SS.getBeginLoc());
+ PP.AnnotateCachedTokens(Tok);
+ return ANK_Success;
+
+ case Sema::NC_TypeTemplate:
+ if (Next.isNot(tok::less)) {
+ // This may be a type template being used as a template template argument.
+ if (SS.isNotEmpty())
+ AnnotateScopeToken(SS, !WasScopeAnnotation);
+ return ANK_TemplateName;
+ }
+ // Fall through.
+ case Sema::NC_VarTemplate:
+ case Sema::NC_FunctionTemplate: {
+ // We have a type, variable or function template followed by '<'.
+ ConsumeToken();
+ UnqualifiedId Id;
+ Id.setIdentifier(Name, NameLoc);
+ if (AnnotateTemplateIdToken(
+ TemplateTy::make(Classification.getTemplateName()),
+ Classification.getTemplateNameKind(), SS, SourceLocation(), Id))
+ return ANK_Error;
+ return ANK_Success;
+ }
+
+ case Sema::NC_NestedNameSpecifier:
+ llvm_unreachable("already parsed nested name specifier");
+ }
+
+ // Unable to classify the name, but maybe we can annotate a scope specifier.
+ if (SS.isNotEmpty())
+ AnnotateScopeToken(SS, !WasScopeAnnotation);
+ return ANK_Unresolved;
+}
+
+bool Parser::TryKeywordIdentFallback(bool DisableKeyword) {
+ assert(Tok.isNot(tok::identifier));
+ Diag(Tok, diag::ext_keyword_as_ident)
+ << PP.getSpelling(Tok)
+ << DisableKeyword;
+ if (DisableKeyword)
+ Tok.getIdentifierInfo()->revertTokenIDToIdentifier();
+ Tok.setKind(tok::identifier);
+ return true;
+}
+
+/// TryAnnotateTypeOrScopeToken - If the current token position is on a
+/// typename (possibly qualified in C++) or a C++ scope specifier not followed
+/// by a typename, TryAnnotateTypeOrScopeToken will replace one or more tokens
+/// with a single annotation token representing the typename or C++ scope
+/// respectively.
+/// This simplifies handling of C++ scope specifiers and allows efficient
+/// backtracking without the need to re-parse and resolve nested-names and
+/// typenames.
+/// It will mainly be called when we expect to treat identifiers as typenames
+/// (if they are typenames). For example, in C we do not expect identifiers
+/// inside expressions to be treated as typenames so it will not be called
+/// for expressions in C.
+/// The benefit for C/ObjC is that a typename will be annotated and
+/// Actions.getTypeName will not be needed to be called again (e.g. getTypeName
+/// will not be called twice, once to check whether we have a declaration
+/// specifier, and another one to get the actual type inside
+/// ParseDeclarationSpecifiers).
+///
+/// This returns true if an error occurred.
+///
+/// Note that this routine emits an error if you call it with ::new or ::delete
+/// as the current tokens, so only call it in contexts where these are invalid.
+bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext, bool NeedType) {
+ assert((Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
+ Tok.is(tok::kw_typename) || Tok.is(tok::annot_cxxscope) ||
+ Tok.is(tok::kw_decltype) || Tok.is(tok::annot_template_id) ||
+ Tok.is(tok::kw___super)) &&
+ "Cannot be a type or scope token!");
+
+ if (Tok.is(tok::kw_typename)) {
+ // MSVC lets you do stuff like:
+ // typename typedef T_::D D;
+ //
+ // We will consume the typedef token here and put it back after we have
+ // parsed the first identifier, transforming it into something more like:
+ // typename T_::D typedef D;
+ if (getLangOpts().MSVCCompat && NextToken().is(tok::kw_typedef)) {
+ Token TypedefToken;
+ PP.Lex(TypedefToken);
+ bool Result = TryAnnotateTypeOrScopeToken(EnteringContext, NeedType);
+ PP.EnterToken(Tok);
+ Tok = TypedefToken;
+ if (!Result)
+ Diag(Tok.getLocation(), diag::warn_expected_qualified_after_typename);
+ return Result;
+ }
+
+ // Parse a C++ typename-specifier, e.g., "typename T::type".
+ //
+ // typename-specifier:
+ // 'typename' '::' [opt] nested-name-specifier identifier
+ // 'typename' '::' [opt] nested-name-specifier template [opt]
+ // simple-template-id
+ SourceLocation TypenameLoc = ConsumeToken();
+ CXXScopeSpec SS;
+ if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/ParsedType(),
+ /*EnteringContext=*/false,
+ nullptr, /*IsTypename*/ true))
+ return true;
+ if (!SS.isSet()) {
+ if (Tok.is(tok::identifier) || Tok.is(tok::annot_template_id) ||
+ Tok.is(tok::annot_decltype)) {
+ // Attempt to recover by skipping the invalid 'typename'
+ if (Tok.is(tok::annot_decltype) ||
+ (!TryAnnotateTypeOrScopeToken(EnteringContext, NeedType) &&
+ Tok.isAnnotation())) {
+ unsigned DiagID = diag::err_expected_qualified_after_typename;
+ // MS compatibility: MSVC permits using known types with typename.
+ // e.g. "typedef typename T* pointer_type"
+ if (getLangOpts().MicrosoftExt)
+ DiagID = diag::warn_expected_qualified_after_typename;
+ Diag(Tok.getLocation(), DiagID);
+ return false;
+ }
+ }
+
+ Diag(Tok.getLocation(), diag::err_expected_qualified_after_typename);
+ return true;
+ }
+
+ TypeResult Ty;
+ if (Tok.is(tok::identifier)) {
+ // FIXME: check whether the next token is '<', first!
+ Ty = Actions.ActOnTypenameType(getCurScope(), TypenameLoc, SS,
+ *Tok.getIdentifierInfo(),
+ Tok.getLocation());
+ } else if (Tok.is(tok::annot_template_id)) {
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+ if (TemplateId->Kind != TNK_Type_template &&
+ TemplateId->Kind != TNK_Dependent_template_name) {
+ Diag(Tok, diag::err_typename_refers_to_non_type_template)
+ << Tok.getAnnotationRange();
+ return true;
+ }
+
+ ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
+ TemplateId->NumArgs);
+
+ Ty = Actions.ActOnTypenameType(getCurScope(), TypenameLoc, SS,
+ TemplateId->TemplateKWLoc,
+ TemplateId->Template,
+ TemplateId->TemplateNameLoc,
+ TemplateId->LAngleLoc,
+ TemplateArgsPtr,
+ TemplateId->RAngleLoc);
+ } else {
+ Diag(Tok, diag::err_expected_type_name_after_typename)
+ << SS.getRange();
+ return true;
+ }
+
+ SourceLocation EndLoc = Tok.getLastLoc();
+ Tok.setKind(tok::annot_typename);
+ setTypeAnnotation(Tok, Ty.isInvalid() ? ParsedType() : Ty.get());
+ Tok.setAnnotationEndLoc(EndLoc);
+ Tok.setLocation(TypenameLoc);
+ PP.AnnotateCachedTokens(Tok);
+ return false;
+ }
+
+ // Remembers whether the token was originally a scope annotation.
+ bool WasScopeAnnotation = Tok.is(tok::annot_cxxscope);
+
+ CXXScopeSpec SS;
+ if (getLangOpts().CPlusPlus)
+ if (ParseOptionalCXXScopeSpecifier(SS, ParsedType(), EnteringContext))
+ return true;
+
+ return TryAnnotateTypeOrScopeTokenAfterScopeSpec(EnteringContext, NeedType,
+ SS, !WasScopeAnnotation);
+}
+
+/// \brief Try to annotate a type or scope token, having already parsed an
+/// optional scope specifier. \p IsNewScope should be \c true unless the scope
+/// specifier was extracted from an existing tok::annot_cxxscope annotation.
+bool Parser::TryAnnotateTypeOrScopeTokenAfterScopeSpec(bool EnteringContext,
+ bool NeedType,
+ CXXScopeSpec &SS,
+ bool IsNewScope) {
+ if (Tok.is(tok::identifier)) {
+ IdentifierInfo *CorrectedII = nullptr;
+ // Determine whether the identifier is a type name.
+ if (ParsedType Ty = Actions.getTypeName(*Tok.getIdentifierInfo(),
+ Tok.getLocation(), getCurScope(),
+ &SS, false,
+ NextToken().is(tok::period),
+ ParsedType(),
+ /*IsCtorOrDtorName=*/false,
+ /*NonTrivialTypeSourceInfo*/ true,
+ NeedType ? &CorrectedII
+ : nullptr)) {
+ // A FixIt was applied as a result of typo correction
+ if (CorrectedII)
+ Tok.setIdentifierInfo(CorrectedII);
+
+ SourceLocation BeginLoc = Tok.getLocation();
+ if (SS.isNotEmpty()) // it was a C++ qualified type name.
+ BeginLoc = SS.getBeginLoc();
+
+ /// An Objective-C object type followed by '<' is a specialization of
+ /// a parameterized class type or a protocol-qualified type.
+ if (getLangOpts().ObjC1 && NextToken().is(tok::less) &&
+ (Ty.get()->isObjCObjectType() ||
+ Ty.get()->isObjCObjectPointerType())) {
+ // Consume the name.
+ SourceLocation IdentifierLoc = ConsumeToken();
+ SourceLocation NewEndLoc;
+ TypeResult NewType
+ = parseObjCTypeArgsAndProtocolQualifiers(IdentifierLoc, Ty,
+ /*consumeLastToken=*/false,
+ NewEndLoc);
+ if (NewType.isUsable())
+ Ty = NewType.get();
+ }
+
+ // This is a typename. Replace the current token in-place with an
+ // annotation type token.
+ Tok.setKind(tok::annot_typename);
+ setTypeAnnotation(Tok, Ty);
+ Tok.setAnnotationEndLoc(Tok.getLocation());
+ Tok.setLocation(BeginLoc);
+
+ // In case the tokens were cached, have Preprocessor replace
+ // them with the annotation token.
+ PP.AnnotateCachedTokens(Tok);
+ return false;
+ }
+
+ if (!getLangOpts().CPlusPlus) {
+ // If we're in C, we can't have :: tokens at all (the lexer won't return
+ // them). If the identifier is not a type, then it can't be scope either,
+ // just early exit.
+ return false;
+ }
+
+ // If this is a template-id, annotate with a template-id or type token.
+ if (NextToken().is(tok::less)) {
+ TemplateTy Template;
+ UnqualifiedId TemplateName;
+ TemplateName.setIdentifier(Tok.getIdentifierInfo(), Tok.getLocation());
+ bool MemberOfUnknownSpecialization;
+ if (TemplateNameKind TNK
+ = Actions.isTemplateName(getCurScope(), SS,
+ /*hasTemplateKeyword=*/false, TemplateName,
+ /*ObjectType=*/ ParsedType(),
+ EnteringContext,
+ Template, MemberOfUnknownSpecialization)) {
+ // Consume the identifier.
+ ConsumeToken();
+ if (AnnotateTemplateIdToken(Template, TNK, SS, SourceLocation(),
+ TemplateName)) {
+ // If an unrecoverable error occurred, we need to return true here,
+ // because the token stream is in a damaged state. We may not return
+ // a valid identifier.
+ return true;
+ }
+ }
+ }
+
+ // The current token, which is either an identifier or a
+ // template-id, is not part of the annotation. Fall through to
+ // push that token back into the stream and complete the C++ scope
+ // specifier annotation.
+ }
+
+ if (Tok.is(tok::annot_template_id)) {
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+ if (TemplateId->Kind == TNK_Type_template) {
+ // A template-id that refers to a type was parsed into a
+ // template-id annotation in a context where we weren't allowed
+ // to produce a type annotation token. Update the template-id
+ // annotation token to a type annotation token now.
+ AnnotateTemplateIdTokenAsType();
+ return false;
+ }
+ }
+
+ if (SS.isEmpty())
+ return false;
+
+ // A C++ scope specifier that isn't followed by a typename.
+ AnnotateScopeToken(SS, IsNewScope);
+ return false;
+}
+
+/// TryAnnotateScopeToken - Like TryAnnotateTypeOrScopeToken but only
+/// annotates C++ scope specifiers and template-ids. This returns
+/// true if there was an error that could not be recovered from.
+///
+/// Note that this routine emits an error if you call it with ::new or ::delete
+/// as the current tokens, so only call it in contexts where these are invalid.
+bool Parser::TryAnnotateCXXScopeToken(bool EnteringContext) {
+ assert(getLangOpts().CPlusPlus &&
+ "Call sites of this function should be guarded by checking for C++");
+ assert((Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
+ (Tok.is(tok::annot_template_id) && NextToken().is(tok::coloncolon)) ||
+ Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super)) &&
+ "Cannot be a type or scope token!");
+
+ CXXScopeSpec SS;
+ if (ParseOptionalCXXScopeSpecifier(SS, ParsedType(), EnteringContext))
+ return true;
+ if (SS.isEmpty())
+ return false;
+
+ AnnotateScopeToken(SS, true);
+ return false;
+}
+
+bool Parser::isTokenEqualOrEqualTypo() {
+ tok::TokenKind Kind = Tok.getKind();
+ switch (Kind) {
+ default:
+ return false;
+ case tok::ampequal: // &=
+ case tok::starequal: // *=
+ case tok::plusequal: // +=
+ case tok::minusequal: // -=
+ case tok::exclaimequal: // !=
+ case tok::slashequal: // /=
+ case tok::percentequal: // %=
+ case tok::lessequal: // <=
+ case tok::lesslessequal: // <<=
+ case tok::greaterequal: // >=
+ case tok::greatergreaterequal: // >>=
+ case tok::caretequal: // ^=
+ case tok::pipeequal: // |=
+ case tok::equalequal: // ==
+ Diag(Tok, diag::err_invalid_token_after_declarator_suggest_equal)
+ << Kind
+ << FixItHint::CreateReplacement(SourceRange(Tok.getLocation()), "=");
+ case tok::equal:
+ return true;
+ }
+}
+
+SourceLocation Parser::handleUnexpectedCodeCompletionToken() {
+ assert(Tok.is(tok::code_completion));
+ PrevTokLocation = Tok.getLocation();
+
+ for (Scope *S = getCurScope(); S; S = S->getParent()) {
+ if (S->getFlags() & Scope::FnScope) {
+ Actions.CodeCompleteOrdinaryName(getCurScope(),
+ Sema::PCC_RecoveryInFunction);
+ cutOffParsing();
+ return PrevTokLocation;
+ }
+
+ if (S->getFlags() & Scope::ClassScope) {
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Class);
+ cutOffParsing();
+ return PrevTokLocation;
+ }
+ }
+
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Namespace);
+ cutOffParsing();
+ return PrevTokLocation;
+}
+
+// Code-completion pass-through functions
+
+void Parser::CodeCompleteDirective(bool InConditional) {
+ Actions.CodeCompletePreprocessorDirective(InConditional);
+}
+
+void Parser::CodeCompleteInConditionalExclusion() {
+ Actions.CodeCompleteInPreprocessorConditionalExclusion(getCurScope());
+}
+
+void Parser::CodeCompleteMacroName(bool IsDefinition) {
+ Actions.CodeCompletePreprocessorMacroName(IsDefinition);
+}
+
+void Parser::CodeCompletePreprocessorExpression() {
+ Actions.CodeCompletePreprocessorExpression();
+}
+
+void Parser::CodeCompleteMacroArgument(IdentifierInfo *Macro,
+ MacroInfo *MacroInfo,
+ unsigned ArgumentIndex) {
+ Actions.CodeCompletePreprocessorMacroArgument(getCurScope(), Macro, MacroInfo,
+ ArgumentIndex);
+}
+
+void Parser::CodeCompleteNaturalLanguage() {
+ Actions.CodeCompleteNaturalLanguage();
+}
+
+bool Parser::ParseMicrosoftIfExistsCondition(IfExistsCondition& Result) {
+ assert((Tok.is(tok::kw___if_exists) || Tok.is(tok::kw___if_not_exists)) &&
+ "Expected '__if_exists' or '__if_not_exists'");
+ Result.IsIfExists = Tok.is(tok::kw___if_exists);
+ Result.KeywordLoc = ConsumeToken();
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.consumeOpen()) {
+ Diag(Tok, diag::err_expected_lparen_after)
+ << (Result.IsIfExists? "__if_exists" : "__if_not_exists");
+ return true;
+ }
+
+ // Parse nested-name-specifier.
+ if (getLangOpts().CPlusPlus)
+ ParseOptionalCXXScopeSpecifier(Result.SS, ParsedType(),
+ /*EnteringContext=*/false);
+
+ // Check nested-name specifier.
+ if (Result.SS.isInvalid()) {
+ T.skipToEnd();
+ return true;
+ }
+
+ // Parse the unqualified-id.
+ SourceLocation TemplateKWLoc; // FIXME: parsed, but unused.
+ if (ParseUnqualifiedId(Result.SS, false, true, true, ParsedType(),
+ TemplateKWLoc, Result.Name)) {
+ T.skipToEnd();
+ return true;
+ }
+
+ if (T.consumeClose())
+ return true;
+
+ // Check if the symbol exists.
+ switch (Actions.CheckMicrosoftIfExistsSymbol(getCurScope(), Result.KeywordLoc,
+ Result.IsIfExists, Result.SS,
+ Result.Name)) {
+ case Sema::IER_Exists:
+ Result.Behavior = Result.IsIfExists ? IEB_Parse : IEB_Skip;
+ break;
+
+ case Sema::IER_DoesNotExist:
+ Result.Behavior = !Result.IsIfExists ? IEB_Parse : IEB_Skip;
+ break;
+
+ case Sema::IER_Dependent:
+ Result.Behavior = IEB_Dependent;
+ break;
+
+ case Sema::IER_Error:
+ return true;
+ }
+
+ return false;
+}
+
+void Parser::ParseMicrosoftIfExistsExternalDeclaration() {
+ IfExistsCondition Result;
+ if (ParseMicrosoftIfExistsCondition(Result))
+ return;
+
+ BalancedDelimiterTracker Braces(*this, tok::l_brace);
+ if (Braces.consumeOpen()) {
+ Diag(Tok, diag::err_expected) << tok::l_brace;
+ return;
+ }
+
+ switch (Result.Behavior) {
+ case IEB_Parse:
+ // Parse declarations below.
+ break;
+
+ case IEB_Dependent:
+ llvm_unreachable("Cannot have a dependent external declaration");
+
+ case IEB_Skip:
+ Braces.skipToEnd();
+ return;
+ }
+
+ // Parse the declarations.
+ // FIXME: Support module import within __if_exists?
+ while (Tok.isNot(tok::r_brace) && !isEofOrEom()) {
+ ParsedAttributesWithRange attrs(AttrFactory);
+ MaybeParseCXX11Attributes(attrs);
+ MaybeParseMicrosoftAttributes(attrs);
+ DeclGroupPtrTy Result = ParseExternalDeclaration(attrs);
+ if (Result && !getCurScope()->getParent())
+ Actions.getASTConsumer().HandleTopLevelDecl(Result.get());
+ }
+ Braces.consumeClose();
+}
+
+Parser::DeclGroupPtrTy Parser::ParseModuleImport(SourceLocation AtLoc) {
+ assert(Tok.isObjCAtKeyword(tok::objc_import) &&
+ "Improper start to module import");
+ SourceLocation ImportLoc = ConsumeToken();
+
+ SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> Path;
+
+ // Parse the module path.
+ do {
+ if (!Tok.is(tok::identifier)) {
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteModuleImport(ImportLoc, Path);
+ cutOffParsing();
+ return DeclGroupPtrTy();
+ }
+
+ Diag(Tok, diag::err_module_expected_ident);
+ SkipUntil(tok::semi);
+ return DeclGroupPtrTy();
+ }
+
+ // Record this part of the module path.
+ Path.push_back(std::make_pair(Tok.getIdentifierInfo(), Tok.getLocation()));
+ ConsumeToken();
+
+ if (Tok.is(tok::period)) {
+ ConsumeToken();
+ continue;
+ }
+
+ break;
+ } while (true);
+
+ if (PP.hadModuleLoaderFatalFailure()) {
+ // With a fatal failure in the module loader, we abort parsing.
+ cutOffParsing();
+ return DeclGroupPtrTy();
+ }
+
+ DeclResult Import = Actions.ActOnModuleImport(AtLoc, ImportLoc, Path);
+ ExpectAndConsumeSemi(diag::err_module_expected_semi);
+ if (Import.isInvalid())
+ return DeclGroupPtrTy();
+
+ return Actions.ConvertDeclToDeclGroup(Import.get());
+}
+
+/// \brief Try recover parser when module annotation appears where it must not
+/// be found.
+/// \returns false if the recover was successful and parsing may be continued, or
+/// true if parser must bail out to top level and handle the token there.
+bool Parser::parseMisplacedModuleImport() {
+ while (true) {
+ switch (Tok.getKind()) {
+ case tok::annot_module_end:
+ // Inform caller that recovery failed, the error must be handled at upper
+ // level.
+ return true;
+ case tok::annot_module_begin:
+ Actions.diagnoseMisplacedModuleImport(reinterpret_cast<Module *>(
+ Tok.getAnnotationValue()), Tok.getLocation());
+ return true;
+ case tok::annot_module_include:
+ // Module import found where it should not be, for instance, inside a
+ // namespace. Recover by importing the module.
+ Actions.ActOnModuleInclude(Tok.getLocation(),
+ reinterpret_cast<Module *>(
+ Tok.getAnnotationValue()));
+ ConsumeToken();
+ // If there is another module import, process it.
+ continue;
+ default:
+ return false;
+ }
+ }
+ return false;
+}
+
+bool BalancedDelimiterTracker::diagnoseOverflow() {
+ P.Diag(P.Tok, diag::err_bracket_depth_exceeded)
+ << P.getLangOpts().BracketDepth;
+ P.Diag(P.Tok, diag::note_bracket_depth);
+ P.cutOffParsing();
+ return true;
+}
+
+bool BalancedDelimiterTracker::expectAndConsume(unsigned DiagID,
+ const char *Msg,
+ tok::TokenKind SkipToTok) {
+ LOpen = P.Tok.getLocation();
+ if (P.ExpectAndConsume(Kind, DiagID, Msg)) {
+ if (SkipToTok != tok::unknown)
+ P.SkipUntil(SkipToTok, Parser::StopAtSemi);
+ return true;
+ }
+
+ if (getDepth() < MaxDepth)
+ return false;
+
+ return diagnoseOverflow();
+}
+
+bool BalancedDelimiterTracker::diagnoseMissingClose() {
+ assert(!P.Tok.is(Close) && "Should have consumed closing delimiter");
+
+ if (P.Tok.is(tok::annot_module_end))
+ P.Diag(P.Tok, diag::err_missing_before_module_end) << Close;
+ else
+ P.Diag(P.Tok, diag::err_expected) << Close;
+ P.Diag(LOpen, diag::note_matching) << Kind;
+
+ // If we're not already at some kind of closing bracket, skip to our closing
+ // token.
+ if (P.Tok.isNot(tok::r_paren) && P.Tok.isNot(tok::r_brace) &&
+ P.Tok.isNot(tok::r_square) &&
+ P.SkipUntil(Close, FinalToken,
+ Parser::StopAtSemi | Parser::StopBeforeMatch) &&
+ P.Tok.is(Close))
+ LClose = P.ConsumeAnyToken();
+ return true;
+}
+
+void BalancedDelimiterTracker::skipToEnd() {
+ P.SkipUntil(Close, Parser::StopBeforeMatch);
+ consumeClose();
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h b/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h
new file mode 100644
index 0000000..36d87eb
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h
@@ -0,0 +1,447 @@
+//===--- RAIIObjectsForParser.h - RAII helpers for the parser ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines and implements the some simple RAII objects that are used
+// by the parser to manage bits in recursion.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_PARSE_RAIIOBJECTSFORPARSER_H
+#define LLVM_CLANG_LIB_PARSE_RAIIOBJECTSFORPARSER_H
+
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/Parser.h"
+#include "clang/Sema/DelayedDiagnostic.h"
+#include "clang/Sema/Sema.h"
+
+namespace clang {
+ // TODO: move ParsingClassDefinition here.
+ // TODO: move TentativeParsingAction here.
+
+ /// \brief A RAII object used to temporarily suppress access-like
+ /// checking. Access-like checks are those associated with
+ /// controlling the use of a declaration, like C++ access control
+ /// errors and deprecation warnings. They are contextually
+ /// dependent, in that they can only be resolved with full
+ /// information about what's being declared. They are also
+ /// suppressed in certain contexts, like the template arguments of
+ /// an explicit instantiation. However, those suppression contexts
+ /// cannot necessarily be fully determined in advance; for
+ /// example, something starting like this:
+ /// template <> class std::vector<A::PrivateType>
+ /// might be the entirety of an explicit instantiation:
+ /// template <> class std::vector<A::PrivateType>;
+ /// or just an elaborated type specifier:
+ /// template <> class std::vector<A::PrivateType> make_vector<>();
+ /// Therefore this class collects all the diagnostics and permits
+ /// them to be re-delayed in a new context.
+ class SuppressAccessChecks {
+ Sema &S;
+ sema::DelayedDiagnosticPool DiagnosticPool;
+ Sema::ParsingDeclState State;
+ bool Active;
+
+ public:
+ /// Begin suppressing access-like checks
+ SuppressAccessChecks(Parser &P, bool activate = true)
+ : S(P.getActions()), DiagnosticPool(nullptr) {
+ if (activate) {
+ State = S.PushParsingDeclaration(DiagnosticPool);
+ Active = true;
+ } else {
+ Active = false;
+ }
+ }
+ SuppressAccessChecks(SuppressAccessChecks &&Other)
+ : S(Other.S), DiagnosticPool(std::move(Other.DiagnosticPool)),
+ State(Other.State), Active(Other.Active) {
+ Other.Active = false;
+ }
+ void operator=(SuppressAccessChecks &&Other) = delete;
+
+ void done() {
+ assert(Active && "trying to end an inactive suppression");
+ S.PopParsingDeclaration(State, nullptr);
+ Active = false;
+ }
+
+ void redelay() {
+ assert(!Active && "redelaying without having ended first");
+ if (!DiagnosticPool.pool_empty())
+ S.redelayDiagnostics(DiagnosticPool);
+ assert(DiagnosticPool.pool_empty());
+ }
+
+ ~SuppressAccessChecks() {
+ if (Active) done();
+ }
+ };
+
+ /// \brief RAII object used to inform the actions that we're
+ /// currently parsing a declaration. This is active when parsing a
+ /// variable's initializer, but not when parsing the body of a
+ /// class or function definition.
+ class ParsingDeclRAIIObject {
+ Sema &Actions;
+ sema::DelayedDiagnosticPool DiagnosticPool;
+ Sema::ParsingDeclState State;
+ bool Popped;
+
+ ParsingDeclRAIIObject(const ParsingDeclRAIIObject &) = delete;
+ void operator=(const ParsingDeclRAIIObject &) = delete;
+
+ public:
+ enum NoParent_t { NoParent };
+ ParsingDeclRAIIObject(Parser &P, NoParent_t _)
+ : Actions(P.getActions()), DiagnosticPool(nullptr) {
+ push();
+ }
+
+ /// Creates a RAII object whose pool is optionally parented by another.
+ ParsingDeclRAIIObject(Parser &P,
+ const sema::DelayedDiagnosticPool *parentPool)
+ : Actions(P.getActions()), DiagnosticPool(parentPool) {
+ push();
+ }
+
+ /// Creates a RAII object and, optionally, initialize its
+ /// diagnostics pool by stealing the diagnostics from another
+ /// RAII object (which is assumed to be the current top pool).
+ ParsingDeclRAIIObject(Parser &P, ParsingDeclRAIIObject *other)
+ : Actions(P.getActions()),
+ DiagnosticPool(other ? other->DiagnosticPool.getParent() : nullptr) {
+ if (other) {
+ DiagnosticPool.steal(other->DiagnosticPool);
+ other->abort();
+ }
+ push();
+ }
+
+ ~ParsingDeclRAIIObject() {
+ abort();
+ }
+
+ sema::DelayedDiagnosticPool &getDelayedDiagnosticPool() {
+ return DiagnosticPool;
+ }
+ const sema::DelayedDiagnosticPool &getDelayedDiagnosticPool() const {
+ return DiagnosticPool;
+ }
+
+ /// Resets the RAII object for a new declaration.
+ void reset() {
+ abort();
+ push();
+ }
+
+ /// Signals that the context was completed without an appropriate
+ /// declaration being parsed.
+ void abort() {
+ pop(nullptr);
+ }
+
+ void complete(Decl *D) {
+ assert(!Popped && "ParsingDeclaration has already been popped!");
+ pop(D);
+ }
+
+ /// Unregister this object from Sema, but remember all the
+ /// diagnostics that were emitted into it.
+ void abortAndRemember() {
+ pop(nullptr);
+ }
+
+ private:
+ void push() {
+ State = Actions.PushParsingDeclaration(DiagnosticPool);
+ Popped = false;
+ }
+
+ void pop(Decl *D) {
+ if (!Popped) {
+ Actions.PopParsingDeclaration(State, D);
+ Popped = true;
+ }
+ }
+ };
+
+ /// A class for parsing a DeclSpec.
+ class ParsingDeclSpec : public DeclSpec {
+ ParsingDeclRAIIObject ParsingRAII;
+
+ public:
+ ParsingDeclSpec(Parser &P)
+ : DeclSpec(P.getAttrFactory()),
+ ParsingRAII(P, ParsingDeclRAIIObject::NoParent) {}
+ ParsingDeclSpec(Parser &P, ParsingDeclRAIIObject *RAII)
+ : DeclSpec(P.getAttrFactory()),
+ ParsingRAII(P, RAII) {}
+
+ const sema::DelayedDiagnosticPool &getDelayedDiagnosticPool() const {
+ return ParsingRAII.getDelayedDiagnosticPool();
+ }
+
+ void complete(Decl *D) {
+ ParsingRAII.complete(D);
+ }
+
+ void abort() {
+ ParsingRAII.abort();
+ }
+ };
+
+ /// A class for parsing a declarator.
+ class ParsingDeclarator : public Declarator {
+ ParsingDeclRAIIObject ParsingRAII;
+
+ public:
+ ParsingDeclarator(Parser &P, const ParsingDeclSpec &DS, TheContext C)
+ : Declarator(DS, C), ParsingRAII(P, &DS.getDelayedDiagnosticPool()) {
+ }
+
+ const ParsingDeclSpec &getDeclSpec() const {
+ return static_cast<const ParsingDeclSpec&>(Declarator::getDeclSpec());
+ }
+
+ ParsingDeclSpec &getMutableDeclSpec() const {
+ return const_cast<ParsingDeclSpec&>(getDeclSpec());
+ }
+
+ void clear() {
+ Declarator::clear();
+ ParsingRAII.reset();
+ }
+
+ void complete(Decl *D) {
+ ParsingRAII.complete(D);
+ }
+ };
+
+ /// A class for parsing a field declarator.
+ class ParsingFieldDeclarator : public FieldDeclarator {
+ ParsingDeclRAIIObject ParsingRAII;
+
+ public:
+ ParsingFieldDeclarator(Parser &P, const ParsingDeclSpec &DS)
+ : FieldDeclarator(DS), ParsingRAII(P, &DS.getDelayedDiagnosticPool()) {
+ }
+
+ const ParsingDeclSpec &getDeclSpec() const {
+ return static_cast<const ParsingDeclSpec&>(D.getDeclSpec());
+ }
+
+ ParsingDeclSpec &getMutableDeclSpec() const {
+ return const_cast<ParsingDeclSpec&>(getDeclSpec());
+ }
+
+ void complete(Decl *D) {
+ ParsingRAII.complete(D);
+ }
+ };
+
+ /// ExtensionRAIIObject - This saves the state of extension warnings when
+ /// constructed and disables them. When destructed, it restores them back to
+ /// the way they used to be. This is used to handle __extension__ in the
+ /// parser.
+ class ExtensionRAIIObject {
+ ExtensionRAIIObject(const ExtensionRAIIObject &) = delete;
+ void operator=(const ExtensionRAIIObject &) = delete;
+
+ DiagnosticsEngine &Diags;
+ public:
+ ExtensionRAIIObject(DiagnosticsEngine &diags) : Diags(diags) {
+ Diags.IncrementAllExtensionsSilenced();
+ }
+
+ ~ExtensionRAIIObject() {
+ Diags.DecrementAllExtensionsSilenced();
+ }
+ };
+
+ /// ColonProtectionRAIIObject - This sets the Parser::ColonIsSacred bool and
+ /// restores it when destroyed. This says that "foo:" should not be
+ /// considered a possible typo for "foo::" for error recovery purposes.
+ class ColonProtectionRAIIObject {
+ Parser &P;
+ bool OldVal;
+ public:
+ ColonProtectionRAIIObject(Parser &p, bool Value = true)
+ : P(p), OldVal(P.ColonIsSacred) {
+ P.ColonIsSacred = Value;
+ }
+
+ /// restore - This can be used to restore the state early, before the dtor
+ /// is run.
+ void restore() {
+ P.ColonIsSacred = OldVal;
+ }
+
+ ~ColonProtectionRAIIObject() {
+ restore();
+ }
+ };
+
+ /// \brief RAII object that makes '>' behave either as an operator
+ /// or as the closing angle bracket for a template argument list.
+ class GreaterThanIsOperatorScope {
+ bool &GreaterThanIsOperator;
+ bool OldGreaterThanIsOperator;
+ public:
+ GreaterThanIsOperatorScope(bool &GTIO, bool Val)
+ : GreaterThanIsOperator(GTIO), OldGreaterThanIsOperator(GTIO) {
+ GreaterThanIsOperator = Val;
+ }
+
+ ~GreaterThanIsOperatorScope() {
+ GreaterThanIsOperator = OldGreaterThanIsOperator;
+ }
+ };
+
+ class InMessageExpressionRAIIObject {
+ bool &InMessageExpression;
+ bool OldValue;
+
+ public:
+ InMessageExpressionRAIIObject(Parser &P, bool Value)
+ : InMessageExpression(P.InMessageExpression),
+ OldValue(P.InMessageExpression) {
+ InMessageExpression = Value;
+ }
+
+ ~InMessageExpressionRAIIObject() {
+ InMessageExpression = OldValue;
+ }
+ };
+
+ /// \brief RAII object that makes sure paren/bracket/brace count is correct
+ /// after declaration/statement parsing, even when there's a parsing error.
+ class ParenBraceBracketBalancer {
+ Parser &P;
+ unsigned short ParenCount, BracketCount, BraceCount;
+ public:
+ ParenBraceBracketBalancer(Parser &p)
+ : P(p), ParenCount(p.ParenCount), BracketCount(p.BracketCount),
+ BraceCount(p.BraceCount) { }
+
+ ~ParenBraceBracketBalancer() {
+ P.ParenCount = ParenCount;
+ P.BracketCount = BracketCount;
+ P.BraceCount = BraceCount;
+ }
+ };
+
+ class PoisonSEHIdentifiersRAIIObject {
+ PoisonIdentifierRAIIObject Ident_AbnormalTermination;
+ PoisonIdentifierRAIIObject Ident_GetExceptionCode;
+ PoisonIdentifierRAIIObject Ident_GetExceptionInfo;
+ PoisonIdentifierRAIIObject Ident__abnormal_termination;
+ PoisonIdentifierRAIIObject Ident__exception_code;
+ PoisonIdentifierRAIIObject Ident__exception_info;
+ PoisonIdentifierRAIIObject Ident___abnormal_termination;
+ PoisonIdentifierRAIIObject Ident___exception_code;
+ PoisonIdentifierRAIIObject Ident___exception_info;
+ public:
+ PoisonSEHIdentifiersRAIIObject(Parser &Self, bool NewValue)
+ : Ident_AbnormalTermination(Self.Ident_AbnormalTermination, NewValue),
+ Ident_GetExceptionCode(Self.Ident_GetExceptionCode, NewValue),
+ Ident_GetExceptionInfo(Self.Ident_GetExceptionInfo, NewValue),
+ Ident__abnormal_termination(Self.Ident__abnormal_termination, NewValue),
+ Ident__exception_code(Self.Ident__exception_code, NewValue),
+ Ident__exception_info(Self.Ident__exception_info, NewValue),
+ Ident___abnormal_termination(Self.Ident___abnormal_termination, NewValue),
+ Ident___exception_code(Self.Ident___exception_code, NewValue),
+ Ident___exception_info(Self.Ident___exception_info, NewValue) {
+ }
+ };
+
+ /// \brief RAII class that helps handle the parsing of an open/close delimiter
+ /// pair, such as braces { ... } or parentheses ( ... ).
+ class BalancedDelimiterTracker : public GreaterThanIsOperatorScope {
+ Parser& P;
+ tok::TokenKind Kind, Close, FinalToken;
+ SourceLocation (Parser::*Consumer)();
+ SourceLocation LOpen, LClose;
+
+ unsigned short &getDepth() {
+ switch (Kind) {
+ case tok::l_brace: return P.BraceCount;
+ case tok::l_square: return P.BracketCount;
+ case tok::l_paren: return P.ParenCount;
+ default: llvm_unreachable("Wrong token kind");
+ }
+ }
+
+ enum { MaxDepth = 256 };
+
+ bool diagnoseOverflow();
+ bool diagnoseMissingClose();
+
+ public:
+ BalancedDelimiterTracker(Parser& p, tok::TokenKind k,
+ tok::TokenKind FinalToken = tok::semi)
+ : GreaterThanIsOperatorScope(p.GreaterThanIsOperator, true),
+ P(p), Kind(k), FinalToken(FinalToken)
+ {
+ switch (Kind) {
+ default: llvm_unreachable("Unexpected balanced token");
+ case tok::l_brace:
+ Close = tok::r_brace;
+ Consumer = &Parser::ConsumeBrace;
+ break;
+ case tok::l_paren:
+ Close = tok::r_paren;
+ Consumer = &Parser::ConsumeParen;
+ break;
+
+ case tok::l_square:
+ Close = tok::r_square;
+ Consumer = &Parser::ConsumeBracket;
+ break;
+ }
+ }
+
+ SourceLocation getOpenLocation() const { return LOpen; }
+ SourceLocation getCloseLocation() const { return LClose; }
+ SourceRange getRange() const { return SourceRange(LOpen, LClose); }
+
+ bool consumeOpen() {
+ if (!P.Tok.is(Kind))
+ return true;
+
+ if (getDepth() < P.getLangOpts().BracketDepth) {
+ LOpen = (P.*Consumer)();
+ return false;
+ }
+
+ return diagnoseOverflow();
+ }
+
+ bool expectAndConsume(unsigned DiagID = diag::err_expected,
+ const char *Msg = "",
+ tok::TokenKind SkipToTok = tok::unknown);
+ bool consumeClose() {
+ if (P.Tok.is(Close)) {
+ LClose = (P.*Consumer)();
+ return false;
+ } else if (P.Tok.is(tok::semi) && P.NextToken().is(Close)) {
+ SourceLocation SemiLoc = P.ConsumeToken();
+ P.Diag(SemiLoc, diag::err_unexpected_semi)
+ << Close << FixItHint::CreateRemoval(SourceRange(SemiLoc, SemiLoc));
+ LClose = (P.*Consumer)();
+ return false;
+ }
+
+ return diagnoseMissingClose();
+ }
+ void skipToEnd();
+ };
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/DeltaTree.cpp b/contrib/llvm/tools/clang/lib/Rewrite/DeltaTree.cpp
new file mode 100644
index 0000000..352fab0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Rewrite/DeltaTree.cpp
@@ -0,0 +1,464 @@
+//===--- DeltaTree.cpp - B-Tree for Rewrite Delta tracking ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the DeltaTree and related classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/Core/DeltaTree.h"
+#include "clang/Basic/LLVM.h"
+#include <cstdio>
+#include <cstring>
+using namespace clang;
+
+/// The DeltaTree class is a multiway search tree (BTree) structure with some
+/// fancy features. B-Trees are generally more memory and cache efficient
+/// than binary trees, because they store multiple keys/values in each node.
+///
+/// DeltaTree implements a key/value mapping from FileIndex to Delta, allowing
+/// fast lookup by FileIndex. However, an added (important) bonus is that it
+/// can also efficiently tell us the full accumulated delta for a specific
+/// file offset as well, without traversing the whole tree.
+///
+/// The nodes of the tree are made up of instances of two classes:
+/// DeltaTreeNode and DeltaTreeInteriorNode. The later subclasses the
+/// former and adds children pointers. Each node knows the full delta of all
+/// entries (recursively) contained inside of it, which allows us to get the
+/// full delta implied by a whole subtree in constant time.
+
+namespace {
+ /// SourceDelta - As code in the original input buffer is added and deleted,
+ /// SourceDelta records are used to keep track of how the input SourceLocation
+ /// object is mapped into the output buffer.
+ struct SourceDelta {
+ unsigned FileLoc;
+ int Delta;
+
+ static SourceDelta get(unsigned Loc, int D) {
+ SourceDelta Delta;
+ Delta.FileLoc = Loc;
+ Delta.Delta = D;
+ return Delta;
+ }
+ };
+
+ /// DeltaTreeNode - The common part of all nodes.
+ ///
+ class DeltaTreeNode {
+ public:
+ struct InsertResult {
+ DeltaTreeNode *LHS, *RHS;
+ SourceDelta Split;
+ };
+
+ private:
+ friend class DeltaTreeInteriorNode;
+
+ /// WidthFactor - This controls the number of K/V slots held in the BTree:
+ /// how wide it is. Each level of the BTree is guaranteed to have at least
+ /// WidthFactor-1 K/V pairs (except the root) and may have at most
+ /// 2*WidthFactor-1 K/V pairs.
+ enum { WidthFactor = 8 };
+
+ /// Values - This tracks the SourceDelta's currently in this node.
+ ///
+ SourceDelta Values[2*WidthFactor-1];
+
+ /// NumValuesUsed - This tracks the number of values this node currently
+ /// holds.
+ unsigned char NumValuesUsed;
+
+ /// IsLeaf - This is true if this is a leaf of the btree. If false, this is
+ /// an interior node, and is actually an instance of DeltaTreeInteriorNode.
+ bool IsLeaf;
+
+ /// FullDelta - This is the full delta of all the values in this node and
+ /// all children nodes.
+ int FullDelta;
+ public:
+ DeltaTreeNode(bool isLeaf = true)
+ : NumValuesUsed(0), IsLeaf(isLeaf), FullDelta(0) {}
+
+ bool isLeaf() const { return IsLeaf; }
+ int getFullDelta() const { return FullDelta; }
+ bool isFull() const { return NumValuesUsed == 2*WidthFactor-1; }
+
+ unsigned getNumValuesUsed() const { return NumValuesUsed; }
+ const SourceDelta &getValue(unsigned i) const {
+ assert(i < NumValuesUsed && "Invalid value #");
+ return Values[i];
+ }
+ SourceDelta &getValue(unsigned i) {
+ assert(i < NumValuesUsed && "Invalid value #");
+ return Values[i];
+ }
+
+ /// DoInsertion - Do an insertion of the specified FileIndex/Delta pair into
+ /// this node. If insertion is easy, do it and return false. Otherwise,
+ /// split the node, populate InsertRes with info about the split, and return
+ /// true.
+ bool DoInsertion(unsigned FileIndex, int Delta, InsertResult *InsertRes);
+
+ void DoSplit(InsertResult &InsertRes);
+
+
+ /// RecomputeFullDeltaLocally - Recompute the FullDelta field by doing a
+ /// local walk over our contained deltas.
+ void RecomputeFullDeltaLocally();
+
+ void Destroy();
+ };
+} // end anonymous namespace
+
+namespace {
+ /// DeltaTreeInteriorNode - When isLeaf = false, a node has child pointers.
+ /// This class tracks them.
+ class DeltaTreeInteriorNode : public DeltaTreeNode {
+ DeltaTreeNode *Children[2*WidthFactor];
+ ~DeltaTreeInteriorNode() {
+ for (unsigned i = 0, e = NumValuesUsed+1; i != e; ++i)
+ Children[i]->Destroy();
+ }
+ friend class DeltaTreeNode;
+ public:
+ DeltaTreeInteriorNode() : DeltaTreeNode(false /*nonleaf*/) {}
+
+ DeltaTreeInteriorNode(const InsertResult &IR)
+ : DeltaTreeNode(false /*nonleaf*/) {
+ Children[0] = IR.LHS;
+ Children[1] = IR.RHS;
+ Values[0] = IR.Split;
+ FullDelta = IR.LHS->getFullDelta()+IR.RHS->getFullDelta()+IR.Split.Delta;
+ NumValuesUsed = 1;
+ }
+
+ const DeltaTreeNode *getChild(unsigned i) const {
+ assert(i < getNumValuesUsed()+1 && "Invalid child");
+ return Children[i];
+ }
+ DeltaTreeNode *getChild(unsigned i) {
+ assert(i < getNumValuesUsed()+1 && "Invalid child");
+ return Children[i];
+ }
+
+ static inline bool classof(const DeltaTreeNode *N) { return !N->isLeaf(); }
+ };
+}
+
+
+/// Destroy - A 'virtual' destructor.
+void DeltaTreeNode::Destroy() {
+ if (isLeaf())
+ delete this;
+ else
+ delete cast<DeltaTreeInteriorNode>(this);
+}
+
+/// RecomputeFullDeltaLocally - Recompute the FullDelta field by doing a
+/// local walk over our contained deltas.
+void DeltaTreeNode::RecomputeFullDeltaLocally() {
+ int NewFullDelta = 0;
+ for (unsigned i = 0, e = getNumValuesUsed(); i != e; ++i)
+ NewFullDelta += Values[i].Delta;
+ if (DeltaTreeInteriorNode *IN = dyn_cast<DeltaTreeInteriorNode>(this))
+ for (unsigned i = 0, e = getNumValuesUsed()+1; i != e; ++i)
+ NewFullDelta += IN->getChild(i)->getFullDelta();
+ FullDelta = NewFullDelta;
+}
+
+/// DoInsertion - Do an insertion of the specified FileIndex/Delta pair into
+/// this node. If insertion is easy, do it and return false. Otherwise,
+/// split the node, populate InsertRes with info about the split, and return
+/// true.
+bool DeltaTreeNode::DoInsertion(unsigned FileIndex, int Delta,
+ InsertResult *InsertRes) {
+ // Maintain full delta for this node.
+ FullDelta += Delta;
+
+ // Find the insertion point, the first delta whose index is >= FileIndex.
+ unsigned i = 0, e = getNumValuesUsed();
+ while (i != e && FileIndex > getValue(i).FileLoc)
+ ++i;
+
+ // If we found an a record for exactly this file index, just merge this
+ // value into the pre-existing record and finish early.
+ if (i != e && getValue(i).FileLoc == FileIndex) {
+ // NOTE: Delta could drop to zero here. This means that the delta entry is
+ // useless and could be removed. Supporting erases is more complex than
+ // leaving an entry with Delta=0, so we just leave an entry with Delta=0 in
+ // the tree.
+ Values[i].Delta += Delta;
+ return false;
+ }
+
+ // Otherwise, we found an insertion point, and we know that the value at the
+ // specified index is > FileIndex. Handle the leaf case first.
+ if (isLeaf()) {
+ if (!isFull()) {
+ // For an insertion into a non-full leaf node, just insert the value in
+ // its sorted position. This requires moving later values over.
+ if (i != e)
+ memmove(&Values[i+1], &Values[i], sizeof(Values[0])*(e-i));
+ Values[i] = SourceDelta::get(FileIndex, Delta);
+ ++NumValuesUsed;
+ return false;
+ }
+
+ // Otherwise, if this is leaf is full, split the node at its median, insert
+ // the value into one of the children, and return the result.
+ assert(InsertRes && "No result location specified");
+ DoSplit(*InsertRes);
+
+ if (InsertRes->Split.FileLoc > FileIndex)
+ InsertRes->LHS->DoInsertion(FileIndex, Delta, nullptr /*can't fail*/);
+ else
+ InsertRes->RHS->DoInsertion(FileIndex, Delta, nullptr /*can't fail*/);
+ return true;
+ }
+
+ // Otherwise, this is an interior node. Send the request down the tree.
+ DeltaTreeInteriorNode *IN = cast<DeltaTreeInteriorNode>(this);
+ if (!IN->Children[i]->DoInsertion(FileIndex, Delta, InsertRes))
+ return false; // If there was space in the child, just return.
+
+ // Okay, this split the subtree, producing a new value and two children to
+ // insert here. If this node is non-full, we can just insert it directly.
+ if (!isFull()) {
+ // Now that we have two nodes and a new element, insert the perclated value
+ // into ourself by moving all the later values/children down, then inserting
+ // the new one.
+ if (i != e)
+ memmove(&IN->Children[i+2], &IN->Children[i+1],
+ (e-i)*sizeof(IN->Children[0]));
+ IN->Children[i] = InsertRes->LHS;
+ IN->Children[i+1] = InsertRes->RHS;
+
+ if (e != i)
+ memmove(&Values[i+1], &Values[i], (e-i)*sizeof(Values[0]));
+ Values[i] = InsertRes->Split;
+ ++NumValuesUsed;
+ return false;
+ }
+
+ // Finally, if this interior node was full and a node is percolated up, split
+ // ourself and return that up the chain. Start by saving all our info to
+ // avoid having the split clobber it.
+ IN->Children[i] = InsertRes->LHS;
+ DeltaTreeNode *SubRHS = InsertRes->RHS;
+ SourceDelta SubSplit = InsertRes->Split;
+
+ // Do the split.
+ DoSplit(*InsertRes);
+
+ // Figure out where to insert SubRHS/NewSplit.
+ DeltaTreeInteriorNode *InsertSide;
+ if (SubSplit.FileLoc < InsertRes->Split.FileLoc)
+ InsertSide = cast<DeltaTreeInteriorNode>(InsertRes->LHS);
+ else
+ InsertSide = cast<DeltaTreeInteriorNode>(InsertRes->RHS);
+
+ // We now have a non-empty interior node 'InsertSide' to insert
+ // SubRHS/SubSplit into. Find out where to insert SubSplit.
+
+ // Find the insertion point, the first delta whose index is >SubSplit.FileLoc.
+ i = 0; e = InsertSide->getNumValuesUsed();
+ while (i != e && SubSplit.FileLoc > InsertSide->getValue(i).FileLoc)
+ ++i;
+
+ // Now we know that i is the place to insert the split value into. Insert it
+ // and the child right after it.
+ if (i != e)
+ memmove(&InsertSide->Children[i+2], &InsertSide->Children[i+1],
+ (e-i)*sizeof(IN->Children[0]));
+ InsertSide->Children[i+1] = SubRHS;
+
+ if (e != i)
+ memmove(&InsertSide->Values[i+1], &InsertSide->Values[i],
+ (e-i)*sizeof(Values[0]));
+ InsertSide->Values[i] = SubSplit;
+ ++InsertSide->NumValuesUsed;
+ InsertSide->FullDelta += SubSplit.Delta + SubRHS->getFullDelta();
+ return true;
+}
+
+/// DoSplit - Split the currently full node (which has 2*WidthFactor-1 values)
+/// into two subtrees each with "WidthFactor-1" values and a pivot value.
+/// Return the pieces in InsertRes.
+void DeltaTreeNode::DoSplit(InsertResult &InsertRes) {
+ assert(isFull() && "Why split a non-full node?");
+
+ // Since this node is full, it contains 2*WidthFactor-1 values. We move
+ // the first 'WidthFactor-1' values to the LHS child (which we leave in this
+ // node), propagate one value up, and move the last 'WidthFactor-1' values
+ // into the RHS child.
+
+ // Create the new child node.
+ DeltaTreeNode *NewNode;
+ if (DeltaTreeInteriorNode *IN = dyn_cast<DeltaTreeInteriorNode>(this)) {
+ // If this is an interior node, also move over 'WidthFactor' children
+ // into the new node.
+ DeltaTreeInteriorNode *New = new DeltaTreeInteriorNode();
+ memcpy(&New->Children[0], &IN->Children[WidthFactor],
+ WidthFactor*sizeof(IN->Children[0]));
+ NewNode = New;
+ } else {
+ // Just create the new leaf node.
+ NewNode = new DeltaTreeNode();
+ }
+
+ // Move over the last 'WidthFactor-1' values from here to NewNode.
+ memcpy(&NewNode->Values[0], &Values[WidthFactor],
+ (WidthFactor-1)*sizeof(Values[0]));
+
+ // Decrease the number of values in the two nodes.
+ NewNode->NumValuesUsed = NumValuesUsed = WidthFactor-1;
+
+ // Recompute the two nodes' full delta.
+ NewNode->RecomputeFullDeltaLocally();
+ RecomputeFullDeltaLocally();
+
+ InsertRes.LHS = this;
+ InsertRes.RHS = NewNode;
+ InsertRes.Split = Values[WidthFactor-1];
+}
+
+
+
+//===----------------------------------------------------------------------===//
+// DeltaTree Implementation
+//===----------------------------------------------------------------------===//
+
+//#define VERIFY_TREE
+
+#ifdef VERIFY_TREE
+/// VerifyTree - Walk the btree performing assertions on various properties to
+/// verify consistency. This is useful for debugging new changes to the tree.
+static void VerifyTree(const DeltaTreeNode *N) {
+ const DeltaTreeInteriorNode *IN = dyn_cast<DeltaTreeInteriorNode>(N);
+ if (IN == 0) {
+ // Verify leaves, just ensure that FullDelta matches up and the elements
+ // are in proper order.
+ int FullDelta = 0;
+ for (unsigned i = 0, e = N->getNumValuesUsed(); i != e; ++i) {
+ if (i)
+ assert(N->getValue(i-1).FileLoc < N->getValue(i).FileLoc);
+ FullDelta += N->getValue(i).Delta;
+ }
+ assert(FullDelta == N->getFullDelta());
+ return;
+ }
+
+ // Verify interior nodes: Ensure that FullDelta matches up and the
+ // elements are in proper order and the children are in proper order.
+ int FullDelta = 0;
+ for (unsigned i = 0, e = IN->getNumValuesUsed(); i != e; ++i) {
+ const SourceDelta &IVal = N->getValue(i);
+ const DeltaTreeNode *IChild = IN->getChild(i);
+ if (i)
+ assert(IN->getValue(i-1).FileLoc < IVal.FileLoc);
+ FullDelta += IVal.Delta;
+ FullDelta += IChild->getFullDelta();
+
+ // The largest value in child #i should be smaller than FileLoc.
+ assert(IChild->getValue(IChild->getNumValuesUsed()-1).FileLoc <
+ IVal.FileLoc);
+
+ // The smallest value in child #i+1 should be larger than FileLoc.
+ assert(IN->getChild(i+1)->getValue(0).FileLoc > IVal.FileLoc);
+ VerifyTree(IChild);
+ }
+
+ FullDelta += IN->getChild(IN->getNumValuesUsed())->getFullDelta();
+
+ assert(FullDelta == N->getFullDelta());
+}
+#endif // VERIFY_TREE
+
+static DeltaTreeNode *getRoot(void *Root) {
+ return (DeltaTreeNode*)Root;
+}
+
+DeltaTree::DeltaTree() {
+ Root = new DeltaTreeNode();
+}
+DeltaTree::DeltaTree(const DeltaTree &RHS) {
+ // Currently we only support copying when the RHS is empty.
+ assert(getRoot(RHS.Root)->getNumValuesUsed() == 0 &&
+ "Can only copy empty tree");
+ Root = new DeltaTreeNode();
+}
+
+DeltaTree::~DeltaTree() {
+ getRoot(Root)->Destroy();
+}
+
+/// getDeltaAt - Return the accumulated delta at the specified file offset.
+/// This includes all insertions or delections that occurred *before* the
+/// specified file index.
+int DeltaTree::getDeltaAt(unsigned FileIndex) const {
+ const DeltaTreeNode *Node = getRoot(Root);
+
+ int Result = 0;
+
+ // Walk down the tree.
+ while (1) {
+ // For all nodes, include any local deltas before the specified file
+ // index by summing them up directly. Keep track of how many were
+ // included.
+ unsigned NumValsGreater = 0;
+ for (unsigned e = Node->getNumValuesUsed(); NumValsGreater != e;
+ ++NumValsGreater) {
+ const SourceDelta &Val = Node->getValue(NumValsGreater);
+
+ if (Val.FileLoc >= FileIndex)
+ break;
+ Result += Val.Delta;
+ }
+
+ // If we have an interior node, include information about children and
+ // recurse. Otherwise, if we have a leaf, we're done.
+ const DeltaTreeInteriorNode *IN = dyn_cast<DeltaTreeInteriorNode>(Node);
+ if (!IN) return Result;
+
+ // Include any children to the left of the values we skipped, all of
+ // their deltas should be included as well.
+ for (unsigned i = 0; i != NumValsGreater; ++i)
+ Result += IN->getChild(i)->getFullDelta();
+
+ // If we found exactly the value we were looking for, break off the
+ // search early. There is no need to search the RHS of the value for
+ // partial results.
+ if (NumValsGreater != Node->getNumValuesUsed() &&
+ Node->getValue(NumValsGreater).FileLoc == FileIndex)
+ return Result+IN->getChild(NumValsGreater)->getFullDelta();
+
+ // Otherwise, traverse down the tree. The selected subtree may be
+ // partially included in the range.
+ Node = IN->getChild(NumValsGreater);
+ }
+ // NOT REACHED.
+}
+
+/// AddDelta - When a change is made that shifts around the text buffer,
+/// this method is used to record that info. It inserts a delta of 'Delta'
+/// into the current DeltaTree at offset FileIndex.
+void DeltaTree::AddDelta(unsigned FileIndex, int Delta) {
+ assert(Delta && "Adding a noop?");
+ DeltaTreeNode *MyRoot = getRoot(Root);
+
+ DeltaTreeNode::InsertResult InsertRes;
+ if (MyRoot->DoInsertion(FileIndex, Delta, &InsertRes)) {
+ Root = MyRoot = new DeltaTreeInteriorNode(InsertRes);
+ }
+
+#ifdef VERIFY_TREE
+ VerifyTree(MyRoot);
+#endif
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/HTMLRewrite.cpp b/contrib/llvm/tools/clang/lib/Rewrite/HTMLRewrite.cpp
new file mode 100644
index 0000000..275fbd0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Rewrite/HTMLRewrite.cpp
@@ -0,0 +1,582 @@
+//== HTMLRewrite.cpp - Translate source code into prettified HTML --*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the HTMLRewriter class, which is used to translate the
+// text of a source file into prettified HTML.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/Core/HTMLRewrite.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/TokenConcatenation.h"
+#include "clang/Rewrite/Core/Rewriter.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include <memory>
+using namespace clang;
+
+
+/// HighlightRange - Highlight a range in the source code with the specified
+/// start/end tags. B/E must be in the same file. This ensures that
+/// start/end tags are placed at the start/end of each line if the range is
+/// multiline.
+void html::HighlightRange(Rewriter &R, SourceLocation B, SourceLocation E,
+ const char *StartTag, const char *EndTag) {
+ SourceManager &SM = R.getSourceMgr();
+ B = SM.getExpansionLoc(B);
+ E = SM.getExpansionLoc(E);
+ FileID FID = SM.getFileID(B);
+ assert(SM.getFileID(E) == FID && "B/E not in the same file!");
+
+ unsigned BOffset = SM.getFileOffset(B);
+ unsigned EOffset = SM.getFileOffset(E);
+
+ // Include the whole end token in the range.
+ EOffset += Lexer::MeasureTokenLength(E, R.getSourceMgr(), R.getLangOpts());
+
+ bool Invalid = false;
+ const char *BufferStart = SM.getBufferData(FID, &Invalid).data();
+ if (Invalid)
+ return;
+
+ HighlightRange(R.getEditBuffer(FID), BOffset, EOffset,
+ BufferStart, StartTag, EndTag);
+}
+
+/// HighlightRange - This is the same as the above method, but takes
+/// decomposed file locations.
+void html::HighlightRange(RewriteBuffer &RB, unsigned B, unsigned E,
+ const char *BufferStart,
+ const char *StartTag, const char *EndTag) {
+ // Insert the tag at the absolute start/end of the range.
+ RB.InsertTextAfter(B, StartTag);
+ RB.InsertTextBefore(E, EndTag);
+
+ // Scan the range to see if there is a \r or \n. If so, and if the line is
+ // not blank, insert tags on that line as well.
+ bool HadOpenTag = true;
+
+ unsigned LastNonWhiteSpace = B;
+ for (unsigned i = B; i != E; ++i) {
+ switch (BufferStart[i]) {
+ case '\r':
+ case '\n':
+ // Okay, we found a newline in the range. If we have an open tag, we need
+ // to insert a close tag at the first non-whitespace before the newline.
+ if (HadOpenTag)
+ RB.InsertTextBefore(LastNonWhiteSpace+1, EndTag);
+
+ // Instead of inserting an open tag immediately after the newline, we
+ // wait until we see a non-whitespace character. This prevents us from
+ // inserting tags around blank lines, and also allows the open tag to
+ // be put *after* whitespace on a non-blank line.
+ HadOpenTag = false;
+ break;
+ case '\0':
+ case ' ':
+ case '\t':
+ case '\f':
+ case '\v':
+ // Ignore whitespace.
+ break;
+
+ default:
+ // If there is no tag open, do it now.
+ if (!HadOpenTag) {
+ RB.InsertTextAfter(i, StartTag);
+ HadOpenTag = true;
+ }
+
+ // Remember this character.
+ LastNonWhiteSpace = i;
+ break;
+ }
+ }
+}
+
+void html::EscapeText(Rewriter &R, FileID FID,
+ bool EscapeSpaces, bool ReplaceTabs) {
+
+ const llvm::MemoryBuffer *Buf = R.getSourceMgr().getBuffer(FID);
+ const char* C = Buf->getBufferStart();
+ const char* FileEnd = Buf->getBufferEnd();
+
+ assert (C <= FileEnd);
+
+ RewriteBuffer &RB = R.getEditBuffer(FID);
+
+ unsigned ColNo = 0;
+ for (unsigned FilePos = 0; C != FileEnd ; ++C, ++FilePos) {
+ switch (*C) {
+ default: ++ColNo; break;
+ case '\n':
+ case '\r':
+ ColNo = 0;
+ break;
+
+ case ' ':
+ if (EscapeSpaces)
+ RB.ReplaceText(FilePos, 1, "&nbsp;");
+ ++ColNo;
+ break;
+ case '\f':
+ RB.ReplaceText(FilePos, 1, "<hr>");
+ ColNo = 0;
+ break;
+
+ case '\t': {
+ if (!ReplaceTabs)
+ break;
+ unsigned NumSpaces = 8-(ColNo&7);
+ if (EscapeSpaces)
+ RB.ReplaceText(FilePos, 1,
+ StringRef("&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;"
+ "&nbsp;&nbsp;&nbsp;", 6*NumSpaces));
+ else
+ RB.ReplaceText(FilePos, 1, StringRef(" ", NumSpaces));
+ ColNo += NumSpaces;
+ break;
+ }
+ case '<':
+ RB.ReplaceText(FilePos, 1, "&lt;");
+ ++ColNo;
+ break;
+
+ case '>':
+ RB.ReplaceText(FilePos, 1, "&gt;");
+ ++ColNo;
+ break;
+
+ case '&':
+ RB.ReplaceText(FilePos, 1, "&amp;");
+ ++ColNo;
+ break;
+ }
+ }
+}
+
+std::string html::EscapeText(StringRef s, bool EscapeSpaces, bool ReplaceTabs) {
+
+ unsigned len = s.size();
+ std::string Str;
+ llvm::raw_string_ostream os(Str);
+
+ for (unsigned i = 0 ; i < len; ++i) {
+
+ char c = s[i];
+ switch (c) {
+ default:
+ os << c; break;
+
+ case ' ':
+ if (EscapeSpaces) os << "&nbsp;";
+ else os << ' ';
+ break;
+
+ case '\t':
+ if (ReplaceTabs) {
+ if (EscapeSpaces)
+ for (unsigned i = 0; i < 4; ++i)
+ os << "&nbsp;";
+ else
+ for (unsigned i = 0; i < 4; ++i)
+ os << " ";
+ }
+ else
+ os << c;
+
+ break;
+
+ case '<': os << "&lt;"; break;
+ case '>': os << "&gt;"; break;
+ case '&': os << "&amp;"; break;
+ }
+ }
+
+ return os.str();
+}
+
+static void AddLineNumber(RewriteBuffer &RB, unsigned LineNo,
+ unsigned B, unsigned E) {
+ SmallString<256> Str;
+ llvm::raw_svector_ostream OS(Str);
+
+ OS << "<tr><td class=\"num\" id=\"LN"
+ << LineNo << "\">"
+ << LineNo << "</td><td class=\"line\">";
+
+ if (B == E) { // Handle empty lines.
+ OS << " </td></tr>";
+ RB.InsertTextBefore(B, OS.str());
+ } else {
+ RB.InsertTextBefore(B, OS.str());
+ RB.InsertTextBefore(E, "</td></tr>");
+ }
+}
+
+void html::AddLineNumbers(Rewriter& R, FileID FID) {
+
+ const llvm::MemoryBuffer *Buf = R.getSourceMgr().getBuffer(FID);
+ const char* FileBeg = Buf->getBufferStart();
+ const char* FileEnd = Buf->getBufferEnd();
+ const char* C = FileBeg;
+ RewriteBuffer &RB = R.getEditBuffer(FID);
+
+ assert (C <= FileEnd);
+
+ unsigned LineNo = 0;
+ unsigned FilePos = 0;
+
+ while (C != FileEnd) {
+
+ ++LineNo;
+ unsigned LineStartPos = FilePos;
+ unsigned LineEndPos = FileEnd - FileBeg;
+
+ assert (FilePos <= LineEndPos);
+ assert (C < FileEnd);
+
+ // Scan until the newline (or end-of-file).
+
+ while (C != FileEnd) {
+ char c = *C;
+ ++C;
+
+ if (c == '\n') {
+ LineEndPos = FilePos++;
+ break;
+ }
+
+ ++FilePos;
+ }
+
+ AddLineNumber(RB, LineNo, LineStartPos, LineEndPos);
+ }
+
+ // Add one big table tag that surrounds all of the code.
+ RB.InsertTextBefore(0, "<table class=\"code\">\n");
+ RB.InsertTextAfter(FileEnd - FileBeg, "</table>");
+}
+
+void html::AddHeaderFooterInternalBuiltinCSS(Rewriter& R, FileID FID,
+ const char *title) {
+
+ const llvm::MemoryBuffer *Buf = R.getSourceMgr().getBuffer(FID);
+ const char* FileStart = Buf->getBufferStart();
+ const char* FileEnd = Buf->getBufferEnd();
+
+ SourceLocation StartLoc = R.getSourceMgr().getLocForStartOfFile(FID);
+ SourceLocation EndLoc = StartLoc.getLocWithOffset(FileEnd-FileStart);
+
+ std::string s;
+ llvm::raw_string_ostream os(s);
+ os << "<!doctype html>\n" // Use HTML 5 doctype
+ "<html>\n<head>\n";
+
+ if (title)
+ os << "<title>" << html::EscapeText(title) << "</title>\n";
+
+ os << "<style type=\"text/css\">\n"
+ " body { color:#000000; background-color:#ffffff }\n"
+ " body { font-family:Helvetica, sans-serif; font-size:10pt }\n"
+ " h1 { font-size:14pt }\n"
+ " .code { border-collapse:collapse; width:100%; }\n"
+ " .code { font-family: \"Monospace\", monospace; font-size:10pt }\n"
+ " .code { line-height: 1.2em }\n"
+ " .comment { color: green; font-style: oblique }\n"
+ " .keyword { color: blue }\n"
+ " .string_literal { color: red }\n"
+ " .directive { color: darkmagenta }\n"
+ // Macro expansions.
+ " .expansion { display: none; }\n"
+ " .macro:hover .expansion { display: block; border: 2px solid #FF0000; "
+ "padding: 2px; background-color:#FFF0F0; font-weight: normal; "
+ " -webkit-border-radius:5px; -webkit-box-shadow:1px 1px 7px #000; "
+ "position: absolute; top: -1em; left:10em; z-index: 1 } \n"
+ " .macro { color: darkmagenta; background-color:LemonChiffon;"
+ // Macros are position: relative to provide base for expansions.
+ " position: relative }\n"
+ " .num { width:2.5em; padding-right:2ex; background-color:#eeeeee }\n"
+ " .num { text-align:right; font-size:8pt }\n"
+ " .num { color:#444444 }\n"
+ " .line { padding-left: 1ex; border-left: 3px solid #ccc }\n"
+ " .line { white-space: pre }\n"
+ " .msg { -webkit-box-shadow:1px 1px 7px #000 }\n"
+ " .msg { -webkit-border-radius:5px }\n"
+ " .msg { font-family:Helvetica, sans-serif; font-size:8pt }\n"
+ " .msg { float:left }\n"
+ " .msg { padding:0.25em 1ex 0.25em 1ex }\n"
+ " .msg { margin-top:10px; margin-bottom:10px }\n"
+ " .msg { font-weight:bold }\n"
+ " .msg { max-width:60em; word-wrap: break-word; white-space: pre-wrap }\n"
+ " .msgT { padding:0x; spacing:0x }\n"
+ " .msgEvent { background-color:#fff8b4; color:#000000 }\n"
+ " .msgControl { background-color:#bbbbbb; color:#000000 }\n"
+ " .mrange { background-color:#dfddf3 }\n"
+ " .mrange { border-bottom:1px solid #6F9DBE }\n"
+ " .PathIndex { font-weight: bold; padding:0px 5px; "
+ "margin-right:5px; }\n"
+ " .PathIndex { -webkit-border-radius:8px }\n"
+ " .PathIndexEvent { background-color:#bfba87 }\n"
+ " .PathIndexControl { background-color:#8c8c8c }\n"
+ " .PathNav a { text-decoration:none; font-size: larger }\n"
+ " .CodeInsertionHint { font-weight: bold; background-color: #10dd10 }\n"
+ " .CodeRemovalHint { background-color:#de1010 }\n"
+ " .CodeRemovalHint { border-bottom:1px solid #6F9DBE }\n"
+ " table.simpletable {\n"
+ " padding: 5px;\n"
+ " font-size:12pt;\n"
+ " margin:20px;\n"
+ " border-collapse: collapse; border-spacing: 0px;\n"
+ " }\n"
+ " td.rowname {\n"
+ " text-align:right; font-weight:bold; color:#444444;\n"
+ " padding-right:2ex; }\n"
+ "</style>\n</head>\n<body>";
+
+ // Generate header
+ R.InsertTextBefore(StartLoc, os.str());
+ // Generate footer
+
+ R.InsertTextAfter(EndLoc, "</body></html>\n");
+}
+
+/// SyntaxHighlight - Relex the specified FileID and annotate the HTML with
+/// information about keywords, macro expansions etc. This uses the macro
+/// table state from the end of the file, so it won't be perfectly perfect,
+/// but it will be reasonably close.
+void html::SyntaxHighlight(Rewriter &R, FileID FID, const Preprocessor &PP) {
+ RewriteBuffer &RB = R.getEditBuffer(FID);
+
+ const SourceManager &SM = PP.getSourceManager();
+ const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
+ Lexer L(FID, FromFile, SM, PP.getLangOpts());
+ const char *BufferStart = L.getBuffer().data();
+
+ // Inform the preprocessor that we want to retain comments as tokens, so we
+ // can highlight them.
+ L.SetCommentRetentionState(true);
+
+ // Lex all the tokens in raw mode, to avoid entering #includes or expanding
+ // macros.
+ Token Tok;
+ L.LexFromRawLexer(Tok);
+
+ while (Tok.isNot(tok::eof)) {
+ // Since we are lexing unexpanded tokens, all tokens are from the main
+ // FileID.
+ unsigned TokOffs = SM.getFileOffset(Tok.getLocation());
+ unsigned TokLen = Tok.getLength();
+ switch (Tok.getKind()) {
+ default: break;
+ case tok::identifier:
+ llvm_unreachable("tok::identifier in raw lexing mode!");
+ case tok::raw_identifier: {
+ // Fill in Result.IdentifierInfo and update the token kind,
+ // looking up the identifier in the identifier table.
+ PP.LookUpIdentifierInfo(Tok);
+
+ // If this is a pp-identifier, for a keyword, highlight it as such.
+ if (Tok.isNot(tok::identifier))
+ HighlightRange(RB, TokOffs, TokOffs+TokLen, BufferStart,
+ "<span class='keyword'>", "</span>");
+ break;
+ }
+ case tok::comment:
+ HighlightRange(RB, TokOffs, TokOffs+TokLen, BufferStart,
+ "<span class='comment'>", "</span>");
+ break;
+ case tok::utf8_string_literal:
+ // Chop off the u part of u8 prefix
+ ++TokOffs;
+ --TokLen;
+ // FALL THROUGH to chop the 8
+ case tok::wide_string_literal:
+ case tok::utf16_string_literal:
+ case tok::utf32_string_literal:
+ // Chop off the L, u, U or 8 prefix
+ ++TokOffs;
+ --TokLen;
+ // FALL THROUGH.
+ case tok::string_literal:
+ // FIXME: Exclude the optional ud-suffix from the highlighted range.
+ HighlightRange(RB, TokOffs, TokOffs+TokLen, BufferStart,
+ "<span class='string_literal'>", "</span>");
+ break;
+ case tok::hash: {
+ // If this is a preprocessor directive, all tokens to end of line are too.
+ if (!Tok.isAtStartOfLine())
+ break;
+
+ // Eat all of the tokens until we get to the next one at the start of
+ // line.
+ unsigned TokEnd = TokOffs+TokLen;
+ L.LexFromRawLexer(Tok);
+ while (!Tok.isAtStartOfLine() && Tok.isNot(tok::eof)) {
+ TokEnd = SM.getFileOffset(Tok.getLocation())+Tok.getLength();
+ L.LexFromRawLexer(Tok);
+ }
+
+ // Find end of line. This is a hack.
+ HighlightRange(RB, TokOffs, TokEnd, BufferStart,
+ "<span class='directive'>", "</span>");
+
+ // Don't skip the next token.
+ continue;
+ }
+ }
+
+ L.LexFromRawLexer(Tok);
+ }
+}
+
+/// HighlightMacros - This uses the macro table state from the end of the
+/// file, to re-expand macros and insert (into the HTML) information about the
+/// macro expansions. This won't be perfectly perfect, but it will be
+/// reasonably close.
+void html::HighlightMacros(Rewriter &R, FileID FID, const Preprocessor& PP) {
+ // Re-lex the raw token stream into a token buffer.
+ const SourceManager &SM = PP.getSourceManager();
+ std::vector<Token> TokenStream;
+
+ const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
+ Lexer L(FID, FromFile, SM, PP.getLangOpts());
+
+ // Lex all the tokens in raw mode, to avoid entering #includes or expanding
+ // macros.
+ while (1) {
+ Token Tok;
+ L.LexFromRawLexer(Tok);
+
+ // If this is a # at the start of a line, discard it from the token stream.
+ // We don't want the re-preprocess step to see #defines, #includes or other
+ // preprocessor directives.
+ if (Tok.is(tok::hash) && Tok.isAtStartOfLine())
+ continue;
+
+ // If this is a ## token, change its kind to unknown so that repreprocessing
+ // it will not produce an error.
+ if (Tok.is(tok::hashhash))
+ Tok.setKind(tok::unknown);
+
+ // If this raw token is an identifier, the raw lexer won't have looked up
+ // the corresponding identifier info for it. Do this now so that it will be
+ // macro expanded when we re-preprocess it.
+ if (Tok.is(tok::raw_identifier))
+ PP.LookUpIdentifierInfo(Tok);
+
+ TokenStream.push_back(Tok);
+
+ if (Tok.is(tok::eof)) break;
+ }
+
+ // Temporarily change the diagnostics object so that we ignore any generated
+ // diagnostics from this pass.
+ DiagnosticsEngine TmpDiags(PP.getDiagnostics().getDiagnosticIDs(),
+ &PP.getDiagnostics().getDiagnosticOptions(),
+ new IgnoringDiagConsumer);
+
+ // FIXME: This is a huge hack; we reuse the input preprocessor because we want
+ // its state, but we aren't actually changing it (we hope). This should really
+ // construct a copy of the preprocessor.
+ Preprocessor &TmpPP = const_cast<Preprocessor&>(PP);
+ DiagnosticsEngine *OldDiags = &TmpPP.getDiagnostics();
+ TmpPP.setDiagnostics(TmpDiags);
+
+ // Inform the preprocessor that we don't want comments.
+ TmpPP.SetCommentRetentionState(false, false);
+
+ // We don't want pragmas either. Although we filtered out #pragma, removing
+ // _Pragma and __pragma is much harder.
+ bool PragmasPreviouslyEnabled = TmpPP.getPragmasEnabled();
+ TmpPP.setPragmasEnabled(false);
+
+ // Enter the tokens we just lexed. This will cause them to be macro expanded
+ // but won't enter sub-files (because we removed #'s).
+ TmpPP.EnterTokenStream(&TokenStream[0], TokenStream.size(), false, false);
+
+ TokenConcatenation ConcatInfo(TmpPP);
+
+ // Lex all the tokens.
+ Token Tok;
+ TmpPP.Lex(Tok);
+ while (Tok.isNot(tok::eof)) {
+ // Ignore non-macro tokens.
+ if (!Tok.getLocation().isMacroID()) {
+ TmpPP.Lex(Tok);
+ continue;
+ }
+
+ // Okay, we have the first token of a macro expansion: highlight the
+ // expansion by inserting a start tag before the macro expansion and
+ // end tag after it.
+ std::pair<SourceLocation, SourceLocation> LLoc =
+ SM.getExpansionRange(Tok.getLocation());
+
+ // Ignore tokens whose instantiation location was not the main file.
+ if (SM.getFileID(LLoc.first) != FID) {
+ TmpPP.Lex(Tok);
+ continue;
+ }
+
+ assert(SM.getFileID(LLoc.second) == FID &&
+ "Start and end of expansion must be in the same ultimate file!");
+
+ std::string Expansion = EscapeText(TmpPP.getSpelling(Tok));
+ unsigned LineLen = Expansion.size();
+
+ Token PrevPrevTok;
+ Token PrevTok = Tok;
+ // Okay, eat this token, getting the next one.
+ TmpPP.Lex(Tok);
+
+ // Skip all the rest of the tokens that are part of this macro
+ // instantiation. It would be really nice to pop up a window with all the
+ // spelling of the tokens or something.
+ while (!Tok.is(tok::eof) &&
+ SM.getExpansionLoc(Tok.getLocation()) == LLoc.first) {
+ // Insert a newline if the macro expansion is getting large.
+ if (LineLen > 60) {
+ Expansion += "<br>";
+ LineLen = 0;
+ }
+
+ LineLen -= Expansion.size();
+
+ // If the tokens were already space separated, or if they must be to avoid
+ // them being implicitly pasted, add a space between them.
+ if (Tok.hasLeadingSpace() ||
+ ConcatInfo.AvoidConcat(PrevPrevTok, PrevTok, Tok))
+ Expansion += ' ';
+
+ // Escape any special characters in the token text.
+ Expansion += EscapeText(TmpPP.getSpelling(Tok));
+ LineLen += Expansion.size();
+
+ PrevPrevTok = PrevTok;
+ PrevTok = Tok;
+ TmpPP.Lex(Tok);
+ }
+
+
+ // Insert the expansion as the end tag, so that multi-line macros all get
+ // highlighted.
+ Expansion = "<span class='expansion'>" + Expansion + "</span></span>";
+
+ HighlightRange(R, LLoc.first, LLoc.second,
+ "<span class='macro'>", Expansion.c_str());
+ }
+
+ // Restore the preprocessor's old state.
+ TmpPP.setDiagnostics(*OldDiags);
+ TmpPP.setPragmasEnabled(PragmasPreviouslyEnabled);
+}
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/RewriteRope.cpp b/contrib/llvm/tools/clang/lib/Rewrite/RewriteRope.cpp
new file mode 100644
index 0000000..451ad07
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Rewrite/RewriteRope.cpp
@@ -0,0 +1,802 @@
+//===--- RewriteRope.cpp - Rope specialized for rewriter --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the RewriteRope class, which is a powerful string.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/Core/RewriteRope.h"
+#include "clang/Basic/LLVM.h"
+#include <algorithm>
+using namespace clang;
+
+/// RewriteRope is a "strong" string class, designed to make insertions and
+/// deletions in the middle of the string nearly constant time (really, they are
+/// O(log N), but with a very low constant factor).
+///
+/// The implementation of this datastructure is a conceptual linear sequence of
+/// RopePiece elements. Each RopePiece represents a view on a separately
+/// allocated and reference counted string. This means that splitting a very
+/// long string can be done in constant time by splitting a RopePiece that
+/// references the whole string into two rope pieces that reference each half.
+/// Once split, another string can be inserted in between the two halves by
+/// inserting a RopePiece in between the two others. All of this is very
+/// inexpensive: it takes time proportional to the number of RopePieces, not the
+/// length of the strings they represent.
+///
+/// While a linear sequences of RopePieces is the conceptual model, the actual
+/// implementation captures them in an adapted B+ Tree. Using a B+ tree (which
+/// is a tree that keeps the values in the leaves and has where each node
+/// contains a reasonable number of pointers to children/values) allows us to
+/// maintain efficient operation when the RewriteRope contains a *huge* number
+/// of RopePieces. The basic idea of the B+ Tree is that it allows us to find
+/// the RopePiece corresponding to some offset very efficiently, and it
+/// automatically balances itself on insertions of RopePieces (which can happen
+/// for both insertions and erases of string ranges).
+///
+/// The one wrinkle on the theory is that we don't attempt to keep the tree
+/// properly balanced when erases happen. Erases of string data can both insert
+/// new RopePieces (e.g. when the middle of some other rope piece is deleted,
+/// which results in two rope pieces, which is just like an insert) or it can
+/// reduce the number of RopePieces maintained by the B+Tree. In the case when
+/// the number of RopePieces is reduced, we don't attempt to maintain the
+/// standard 'invariant' that each node in the tree contains at least
+/// 'WidthFactor' children/values. For our use cases, this doesn't seem to
+/// matter.
+///
+/// The implementation below is primarily implemented in terms of three classes:
+/// RopePieceBTreeNode - Common base class for:
+///
+/// RopePieceBTreeLeaf - Directly manages up to '2*WidthFactor' RopePiece
+/// nodes. This directly represents a chunk of the string with those
+/// RopePieces contatenated.
+/// RopePieceBTreeInterior - An interior node in the B+ Tree, which manages
+/// up to '2*WidthFactor' other nodes in the tree.
+
+
+//===----------------------------------------------------------------------===//
+// RopePieceBTreeNode Class
+//===----------------------------------------------------------------------===//
+
+namespace {
+ /// RopePieceBTreeNode - Common base class of RopePieceBTreeLeaf and
+ /// RopePieceBTreeInterior. This provides some 'virtual' dispatching methods
+ /// and a flag that determines which subclass the instance is. Also
+ /// important, this node knows the full extend of the node, including any
+ /// children that it has. This allows efficient skipping over entire subtrees
+ /// when looking for an offset in the BTree.
+ class RopePieceBTreeNode {
+ protected:
+ /// WidthFactor - This controls the number of K/V slots held in the BTree:
+ /// how wide it is. Each level of the BTree is guaranteed to have at least
+ /// 'WidthFactor' elements in it (either ropepieces or children), (except
+ /// the root, which may have less) and may have at most 2*WidthFactor
+ /// elements.
+ enum { WidthFactor = 8 };
+
+ /// Size - This is the number of bytes of file this node (including any
+ /// potential children) covers.
+ unsigned Size;
+
+ /// IsLeaf - True if this is an instance of RopePieceBTreeLeaf, false if it
+ /// is an instance of RopePieceBTreeInterior.
+ bool IsLeaf;
+
+ RopePieceBTreeNode(bool isLeaf) : Size(0), IsLeaf(isLeaf) {}
+ ~RopePieceBTreeNode() = default;
+
+ public:
+ bool isLeaf() const { return IsLeaf; }
+ unsigned size() const { return Size; }
+
+ void Destroy();
+
+ /// split - Split the range containing the specified offset so that we are
+ /// guaranteed that there is a place to do an insertion at the specified
+ /// offset. The offset is relative, so "0" is the start of the node.
+ ///
+ /// If there is no space in this subtree for the extra piece, the extra tree
+ /// node is returned and must be inserted into a parent.
+ RopePieceBTreeNode *split(unsigned Offset);
+
+ /// insert - Insert the specified ropepiece into this tree node at the
+ /// specified offset. The offset is relative, so "0" is the start of the
+ /// node.
+ ///
+ /// If there is no space in this subtree for the extra piece, the extra tree
+ /// node is returned and must be inserted into a parent.
+ RopePieceBTreeNode *insert(unsigned Offset, const RopePiece &R);
+
+ /// erase - Remove NumBytes from this node at the specified offset. We are
+ /// guaranteed that there is a split at Offset.
+ void erase(unsigned Offset, unsigned NumBytes);
+
+ };
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// RopePieceBTreeLeaf Class
+//===----------------------------------------------------------------------===//
+
+namespace {
+ /// RopePieceBTreeLeaf - Directly manages up to '2*WidthFactor' RopePiece
+ /// nodes. This directly represents a chunk of the string with those
+ /// RopePieces contatenated. Since this is a B+Tree, all values (in this case
+ /// instances of RopePiece) are stored in leaves like this. To make iteration
+ /// over the leaves efficient, they maintain a singly linked list through the
+ /// NextLeaf field. This allows the B+Tree forward iterator to be constant
+ /// time for all increments.
+ class RopePieceBTreeLeaf : public RopePieceBTreeNode {
+ /// NumPieces - This holds the number of rope pieces currently active in the
+ /// Pieces array.
+ unsigned char NumPieces;
+
+ /// Pieces - This tracks the file chunks currently in this leaf.
+ ///
+ RopePiece Pieces[2*WidthFactor];
+
+ /// NextLeaf - This is a pointer to the next leaf in the tree, allowing
+ /// efficient in-order forward iteration of the tree without traversal.
+ RopePieceBTreeLeaf **PrevLeaf, *NextLeaf;
+ public:
+ RopePieceBTreeLeaf() : RopePieceBTreeNode(true), NumPieces(0),
+ PrevLeaf(nullptr), NextLeaf(nullptr) {}
+ ~RopePieceBTreeLeaf() {
+ if (PrevLeaf || NextLeaf)
+ removeFromLeafInOrder();
+ clear();
+ }
+
+ bool isFull() const { return NumPieces == 2*WidthFactor; }
+
+ /// clear - Remove all rope pieces from this leaf.
+ void clear() {
+ while (NumPieces)
+ Pieces[--NumPieces] = RopePiece();
+ Size = 0;
+ }
+
+ unsigned getNumPieces() const { return NumPieces; }
+
+ const RopePiece &getPiece(unsigned i) const {
+ assert(i < getNumPieces() && "Invalid piece ID");
+ return Pieces[i];
+ }
+
+ const RopePieceBTreeLeaf *getNextLeafInOrder() const { return NextLeaf; }
+ void insertAfterLeafInOrder(RopePieceBTreeLeaf *Node) {
+ assert(!PrevLeaf && !NextLeaf && "Already in ordering");
+
+ NextLeaf = Node->NextLeaf;
+ if (NextLeaf)
+ NextLeaf->PrevLeaf = &NextLeaf;
+ PrevLeaf = &Node->NextLeaf;
+ Node->NextLeaf = this;
+ }
+
+ void removeFromLeafInOrder() {
+ if (PrevLeaf) {
+ *PrevLeaf = NextLeaf;
+ if (NextLeaf)
+ NextLeaf->PrevLeaf = PrevLeaf;
+ } else if (NextLeaf) {
+ NextLeaf->PrevLeaf = nullptr;
+ }
+ }
+
+ /// FullRecomputeSizeLocally - This method recomputes the 'Size' field by
+ /// summing the size of all RopePieces.
+ void FullRecomputeSizeLocally() {
+ Size = 0;
+ for (unsigned i = 0, e = getNumPieces(); i != e; ++i)
+ Size += getPiece(i).size();
+ }
+
+ /// split - Split the range containing the specified offset so that we are
+ /// guaranteed that there is a place to do an insertion at the specified
+ /// offset. The offset is relative, so "0" is the start of the node.
+ ///
+ /// If there is no space in this subtree for the extra piece, the extra tree
+ /// node is returned and must be inserted into a parent.
+ RopePieceBTreeNode *split(unsigned Offset);
+
+ /// insert - Insert the specified ropepiece into this tree node at the
+ /// specified offset. The offset is relative, so "0" is the start of the
+ /// node.
+ ///
+ /// If there is no space in this subtree for the extra piece, the extra tree
+ /// node is returned and must be inserted into a parent.
+ RopePieceBTreeNode *insert(unsigned Offset, const RopePiece &R);
+
+
+ /// erase - Remove NumBytes from this node at the specified offset. We are
+ /// guaranteed that there is a split at Offset.
+ void erase(unsigned Offset, unsigned NumBytes);
+
+ static inline bool classof(const RopePieceBTreeNode *N) {
+ return N->isLeaf();
+ }
+ };
+} // end anonymous namespace
+
+/// split - Split the range containing the specified offset so that we are
+/// guaranteed that there is a place to do an insertion at the specified
+/// offset. The offset is relative, so "0" is the start of the node.
+///
+/// If there is no space in this subtree for the extra piece, the extra tree
+/// node is returned and must be inserted into a parent.
+RopePieceBTreeNode *RopePieceBTreeLeaf::split(unsigned Offset) {
+ // Find the insertion point. We are guaranteed that there is a split at the
+ // specified offset so find it.
+ if (Offset == 0 || Offset == size()) {
+ // Fastpath for a common case. There is already a splitpoint at the end.
+ return nullptr;
+ }
+
+ // Find the piece that this offset lands in.
+ unsigned PieceOffs = 0;
+ unsigned i = 0;
+ while (Offset >= PieceOffs+Pieces[i].size()) {
+ PieceOffs += Pieces[i].size();
+ ++i;
+ }
+
+ // If there is already a split point at the specified offset, just return
+ // success.
+ if (PieceOffs == Offset)
+ return nullptr;
+
+ // Otherwise, we need to split piece 'i' at Offset-PieceOffs. Convert Offset
+ // to being Piece relative.
+ unsigned IntraPieceOffset = Offset-PieceOffs;
+
+ // We do this by shrinking the RopePiece and then doing an insert of the tail.
+ RopePiece Tail(Pieces[i].StrData, Pieces[i].StartOffs+IntraPieceOffset,
+ Pieces[i].EndOffs);
+ Size -= Pieces[i].size();
+ Pieces[i].EndOffs = Pieces[i].StartOffs+IntraPieceOffset;
+ Size += Pieces[i].size();
+
+ return insert(Offset, Tail);
+}
+
+
+/// insert - Insert the specified RopePiece into this tree node at the
+/// specified offset. The offset is relative, so "0" is the start of the node.
+///
+/// If there is no space in this subtree for the extra piece, the extra tree
+/// node is returned and must be inserted into a parent.
+RopePieceBTreeNode *RopePieceBTreeLeaf::insert(unsigned Offset,
+ const RopePiece &R) {
+ // If this node is not full, insert the piece.
+ if (!isFull()) {
+ // Find the insertion point. We are guaranteed that there is a split at the
+ // specified offset so find it.
+ unsigned i = 0, e = getNumPieces();
+ if (Offset == size()) {
+ // Fastpath for a common case.
+ i = e;
+ } else {
+ unsigned SlotOffs = 0;
+ for (; Offset > SlotOffs; ++i)
+ SlotOffs += getPiece(i).size();
+ assert(SlotOffs == Offset && "Split didn't occur before insertion!");
+ }
+
+ // For an insertion into a non-full leaf node, just insert the value in
+ // its sorted position. This requires moving later values over.
+ for (; i != e; --e)
+ Pieces[e] = Pieces[e-1];
+ Pieces[i] = R;
+ ++NumPieces;
+ Size += R.size();
+ return nullptr;
+ }
+
+ // Otherwise, if this is leaf is full, split it in two halves. Since this
+ // node is full, it contains 2*WidthFactor values. We move the first
+ // 'WidthFactor' values to the LHS child (which we leave in this node) and
+ // move the last 'WidthFactor' values into the RHS child.
+
+ // Create the new node.
+ RopePieceBTreeLeaf *NewNode = new RopePieceBTreeLeaf();
+
+ // Move over the last 'WidthFactor' values from here to NewNode.
+ std::copy(&Pieces[WidthFactor], &Pieces[2*WidthFactor],
+ &NewNode->Pieces[0]);
+ // Replace old pieces with null RopePieces to drop refcounts.
+ std::fill(&Pieces[WidthFactor], &Pieces[2*WidthFactor], RopePiece());
+
+ // Decrease the number of values in the two nodes.
+ NewNode->NumPieces = NumPieces = WidthFactor;
+
+ // Recompute the two nodes' size.
+ NewNode->FullRecomputeSizeLocally();
+ FullRecomputeSizeLocally();
+
+ // Update the list of leaves.
+ NewNode->insertAfterLeafInOrder(this);
+
+ // These insertions can't fail.
+ if (this->size() >= Offset)
+ this->insert(Offset, R);
+ else
+ NewNode->insert(Offset - this->size(), R);
+ return NewNode;
+}
+
+/// erase - Remove NumBytes from this node at the specified offset. We are
+/// guaranteed that there is a split at Offset.
+void RopePieceBTreeLeaf::erase(unsigned Offset, unsigned NumBytes) {
+ // Since we are guaranteed that there is a split at Offset, we start by
+ // finding the Piece that starts there.
+ unsigned PieceOffs = 0;
+ unsigned i = 0;
+ for (; Offset > PieceOffs; ++i)
+ PieceOffs += getPiece(i).size();
+ assert(PieceOffs == Offset && "Split didn't occur before erase!");
+
+ unsigned StartPiece = i;
+
+ // Figure out how many pieces completely cover 'NumBytes'. We want to remove
+ // all of them.
+ for (; Offset+NumBytes > PieceOffs+getPiece(i).size(); ++i)
+ PieceOffs += getPiece(i).size();
+
+ // If we exactly include the last one, include it in the region to delete.
+ if (Offset+NumBytes == PieceOffs+getPiece(i).size())
+ PieceOffs += getPiece(i).size(), ++i;
+
+ // If we completely cover some RopePieces, erase them now.
+ if (i != StartPiece) {
+ unsigned NumDeleted = i-StartPiece;
+ for (; i != getNumPieces(); ++i)
+ Pieces[i-NumDeleted] = Pieces[i];
+
+ // Drop references to dead rope pieces.
+ std::fill(&Pieces[getNumPieces()-NumDeleted], &Pieces[getNumPieces()],
+ RopePiece());
+ NumPieces -= NumDeleted;
+
+ unsigned CoverBytes = PieceOffs-Offset;
+ NumBytes -= CoverBytes;
+ Size -= CoverBytes;
+ }
+
+ // If we completely removed some stuff, we could be done.
+ if (NumBytes == 0) return;
+
+ // Okay, now might be erasing part of some Piece. If this is the case, then
+ // move the start point of the piece.
+ assert(getPiece(StartPiece).size() > NumBytes);
+ Pieces[StartPiece].StartOffs += NumBytes;
+
+ // The size of this node just shrunk by NumBytes.
+ Size -= NumBytes;
+}
+
+//===----------------------------------------------------------------------===//
+// RopePieceBTreeInterior Class
+//===----------------------------------------------------------------------===//
+
+namespace {
+ /// RopePieceBTreeInterior - This represents an interior node in the B+Tree,
+ /// which holds up to 2*WidthFactor pointers to child nodes.
+ class RopePieceBTreeInterior : public RopePieceBTreeNode {
+ /// NumChildren - This holds the number of children currently active in the
+ /// Children array.
+ unsigned char NumChildren;
+ RopePieceBTreeNode *Children[2*WidthFactor];
+ public:
+ RopePieceBTreeInterior() : RopePieceBTreeNode(false), NumChildren(0) {}
+
+ RopePieceBTreeInterior(RopePieceBTreeNode *LHS, RopePieceBTreeNode *RHS)
+ : RopePieceBTreeNode(false) {
+ Children[0] = LHS;
+ Children[1] = RHS;
+ NumChildren = 2;
+ Size = LHS->size() + RHS->size();
+ }
+
+ ~RopePieceBTreeInterior() {
+ for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
+ Children[i]->Destroy();
+ }
+
+ bool isFull() const { return NumChildren == 2*WidthFactor; }
+
+ unsigned getNumChildren() const { return NumChildren; }
+ const RopePieceBTreeNode *getChild(unsigned i) const {
+ assert(i < NumChildren && "invalid child #");
+ return Children[i];
+ }
+ RopePieceBTreeNode *getChild(unsigned i) {
+ assert(i < NumChildren && "invalid child #");
+ return Children[i];
+ }
+
+ /// FullRecomputeSizeLocally - Recompute the Size field of this node by
+ /// summing up the sizes of the child nodes.
+ void FullRecomputeSizeLocally() {
+ Size = 0;
+ for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
+ Size += getChild(i)->size();
+ }
+
+
+ /// split - Split the range containing the specified offset so that we are
+ /// guaranteed that there is a place to do an insertion at the specified
+ /// offset. The offset is relative, so "0" is the start of the node.
+ ///
+ /// If there is no space in this subtree for the extra piece, the extra tree
+ /// node is returned and must be inserted into a parent.
+ RopePieceBTreeNode *split(unsigned Offset);
+
+
+ /// insert - Insert the specified ropepiece into this tree node at the
+ /// specified offset. The offset is relative, so "0" is the start of the
+ /// node.
+ ///
+ /// If there is no space in this subtree for the extra piece, the extra tree
+ /// node is returned and must be inserted into a parent.
+ RopePieceBTreeNode *insert(unsigned Offset, const RopePiece &R);
+
+ /// HandleChildPiece - A child propagated an insertion result up to us.
+ /// Insert the new child, and/or propagate the result further up the tree.
+ RopePieceBTreeNode *HandleChildPiece(unsigned i, RopePieceBTreeNode *RHS);
+
+ /// erase - Remove NumBytes from this node at the specified offset. We are
+ /// guaranteed that there is a split at Offset.
+ void erase(unsigned Offset, unsigned NumBytes);
+
+ static inline bool classof(const RopePieceBTreeNode *N) {
+ return !N->isLeaf();
+ }
+ };
+} // end anonymous namespace
+
+/// split - Split the range containing the specified offset so that we are
+/// guaranteed that there is a place to do an insertion at the specified
+/// offset. The offset is relative, so "0" is the start of the node.
+///
+/// If there is no space in this subtree for the extra piece, the extra tree
+/// node is returned and must be inserted into a parent.
+RopePieceBTreeNode *RopePieceBTreeInterior::split(unsigned Offset) {
+ // Figure out which child to split.
+ if (Offset == 0 || Offset == size())
+ return nullptr; // If we have an exact offset, we're already split.
+
+ unsigned ChildOffset = 0;
+ unsigned i = 0;
+ for (; Offset >= ChildOffset+getChild(i)->size(); ++i)
+ ChildOffset += getChild(i)->size();
+
+ // If already split there, we're done.
+ if (ChildOffset == Offset)
+ return nullptr;
+
+ // Otherwise, recursively split the child.
+ if (RopePieceBTreeNode *RHS = getChild(i)->split(Offset-ChildOffset))
+ return HandleChildPiece(i, RHS);
+ return nullptr; // Done!
+}
+
+/// insert - Insert the specified ropepiece into this tree node at the
+/// specified offset. The offset is relative, so "0" is the start of the
+/// node.
+///
+/// If there is no space in this subtree for the extra piece, the extra tree
+/// node is returned and must be inserted into a parent.
+RopePieceBTreeNode *RopePieceBTreeInterior::insert(unsigned Offset,
+ const RopePiece &R) {
+ // Find the insertion point. We are guaranteed that there is a split at the
+ // specified offset so find it.
+ unsigned i = 0, e = getNumChildren();
+
+ unsigned ChildOffs = 0;
+ if (Offset == size()) {
+ // Fastpath for a common case. Insert at end of last child.
+ i = e-1;
+ ChildOffs = size()-getChild(i)->size();
+ } else {
+ for (; Offset > ChildOffs+getChild(i)->size(); ++i)
+ ChildOffs += getChild(i)->size();
+ }
+
+ Size += R.size();
+
+ // Insert at the end of this child.
+ if (RopePieceBTreeNode *RHS = getChild(i)->insert(Offset-ChildOffs, R))
+ return HandleChildPiece(i, RHS);
+
+ return nullptr;
+}
+
+/// HandleChildPiece - A child propagated an insertion result up to us.
+/// Insert the new child, and/or propagate the result further up the tree.
+RopePieceBTreeNode *
+RopePieceBTreeInterior::HandleChildPiece(unsigned i, RopePieceBTreeNode *RHS) {
+ // Otherwise the child propagated a subtree up to us as a new child. See if
+ // we have space for it here.
+ if (!isFull()) {
+ // Insert RHS after child 'i'.
+ if (i + 1 != getNumChildren())
+ memmove(&Children[i+2], &Children[i+1],
+ (getNumChildren()-i-1)*sizeof(Children[0]));
+ Children[i+1] = RHS;
+ ++NumChildren;
+ return nullptr;
+ }
+
+ // Okay, this node is full. Split it in half, moving WidthFactor children to
+ // a newly allocated interior node.
+
+ // Create the new node.
+ RopePieceBTreeInterior *NewNode = new RopePieceBTreeInterior();
+
+ // Move over the last 'WidthFactor' values from here to NewNode.
+ memcpy(&NewNode->Children[0], &Children[WidthFactor],
+ WidthFactor*sizeof(Children[0]));
+
+ // Decrease the number of values in the two nodes.
+ NewNode->NumChildren = NumChildren = WidthFactor;
+
+ // Finally, insert the two new children in the side the can (now) hold them.
+ // These insertions can't fail.
+ if (i < WidthFactor)
+ this->HandleChildPiece(i, RHS);
+ else
+ NewNode->HandleChildPiece(i-WidthFactor, RHS);
+
+ // Recompute the two nodes' size.
+ NewNode->FullRecomputeSizeLocally();
+ FullRecomputeSizeLocally();
+ return NewNode;
+}
+
+/// erase - Remove NumBytes from this node at the specified offset. We are
+/// guaranteed that there is a split at Offset.
+void RopePieceBTreeInterior::erase(unsigned Offset, unsigned NumBytes) {
+ // This will shrink this node by NumBytes.
+ Size -= NumBytes;
+
+ // Find the first child that overlaps with Offset.
+ unsigned i = 0;
+ for (; Offset >= getChild(i)->size(); ++i)
+ Offset -= getChild(i)->size();
+
+ // Propagate the delete request into overlapping children, or completely
+ // delete the children as appropriate.
+ while (NumBytes) {
+ RopePieceBTreeNode *CurChild = getChild(i);
+
+ // If we are deleting something contained entirely in the child, pass on the
+ // request.
+ if (Offset+NumBytes < CurChild->size()) {
+ CurChild->erase(Offset, NumBytes);
+ return;
+ }
+
+ // If this deletion request starts somewhere in the middle of the child, it
+ // must be deleting to the end of the child.
+ if (Offset) {
+ unsigned BytesFromChild = CurChild->size()-Offset;
+ CurChild->erase(Offset, BytesFromChild);
+ NumBytes -= BytesFromChild;
+ // Start at the beginning of the next child.
+ Offset = 0;
+ ++i;
+ continue;
+ }
+
+ // If the deletion request completely covers the child, delete it and move
+ // the rest down.
+ NumBytes -= CurChild->size();
+ CurChild->Destroy();
+ --NumChildren;
+ if (i != getNumChildren())
+ memmove(&Children[i], &Children[i+1],
+ (getNumChildren()-i)*sizeof(Children[0]));
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// RopePieceBTreeNode Implementation
+//===----------------------------------------------------------------------===//
+
+void RopePieceBTreeNode::Destroy() {
+ if (RopePieceBTreeLeaf *Leaf = dyn_cast<RopePieceBTreeLeaf>(this))
+ delete Leaf;
+ else
+ delete cast<RopePieceBTreeInterior>(this);
+}
+
+/// split - Split the range containing the specified offset so that we are
+/// guaranteed that there is a place to do an insertion at the specified
+/// offset. The offset is relative, so "0" is the start of the node.
+///
+/// If there is no space in this subtree for the extra piece, the extra tree
+/// node is returned and must be inserted into a parent.
+RopePieceBTreeNode *RopePieceBTreeNode::split(unsigned Offset) {
+ assert(Offset <= size() && "Invalid offset to split!");
+ if (RopePieceBTreeLeaf *Leaf = dyn_cast<RopePieceBTreeLeaf>(this))
+ return Leaf->split(Offset);
+ return cast<RopePieceBTreeInterior>(this)->split(Offset);
+}
+
+/// insert - Insert the specified ropepiece into this tree node at the
+/// specified offset. The offset is relative, so "0" is the start of the
+/// node.
+///
+/// If there is no space in this subtree for the extra piece, the extra tree
+/// node is returned and must be inserted into a parent.
+RopePieceBTreeNode *RopePieceBTreeNode::insert(unsigned Offset,
+ const RopePiece &R) {
+ assert(Offset <= size() && "Invalid offset to insert!");
+ if (RopePieceBTreeLeaf *Leaf = dyn_cast<RopePieceBTreeLeaf>(this))
+ return Leaf->insert(Offset, R);
+ return cast<RopePieceBTreeInterior>(this)->insert(Offset, R);
+}
+
+/// erase - Remove NumBytes from this node at the specified offset. We are
+/// guaranteed that there is a split at Offset.
+void RopePieceBTreeNode::erase(unsigned Offset, unsigned NumBytes) {
+ assert(Offset+NumBytes <= size() && "Invalid offset to erase!");
+ if (RopePieceBTreeLeaf *Leaf = dyn_cast<RopePieceBTreeLeaf>(this))
+ return Leaf->erase(Offset, NumBytes);
+ return cast<RopePieceBTreeInterior>(this)->erase(Offset, NumBytes);
+}
+
+
+//===----------------------------------------------------------------------===//
+// RopePieceBTreeIterator Implementation
+//===----------------------------------------------------------------------===//
+
+static const RopePieceBTreeLeaf *getCN(const void *P) {
+ return static_cast<const RopePieceBTreeLeaf*>(P);
+}
+
+// begin iterator.
+RopePieceBTreeIterator::RopePieceBTreeIterator(const void *n) {
+ const RopePieceBTreeNode *N = static_cast<const RopePieceBTreeNode*>(n);
+
+ // Walk down the left side of the tree until we get to a leaf.
+ while (const RopePieceBTreeInterior *IN = dyn_cast<RopePieceBTreeInterior>(N))
+ N = IN->getChild(0);
+
+ // We must have at least one leaf.
+ CurNode = cast<RopePieceBTreeLeaf>(N);
+
+ // If we found a leaf that happens to be empty, skip over it until we get
+ // to something full.
+ while (CurNode && getCN(CurNode)->getNumPieces() == 0)
+ CurNode = getCN(CurNode)->getNextLeafInOrder();
+
+ if (CurNode)
+ CurPiece = &getCN(CurNode)->getPiece(0);
+ else // Empty tree, this is an end() iterator.
+ CurPiece = nullptr;
+ CurChar = 0;
+}
+
+void RopePieceBTreeIterator::MoveToNextPiece() {
+ if (CurPiece != &getCN(CurNode)->getPiece(getCN(CurNode)->getNumPieces()-1)) {
+ CurChar = 0;
+ ++CurPiece;
+ return;
+ }
+
+ // Find the next non-empty leaf node.
+ do
+ CurNode = getCN(CurNode)->getNextLeafInOrder();
+ while (CurNode && getCN(CurNode)->getNumPieces() == 0);
+
+ if (CurNode)
+ CurPiece = &getCN(CurNode)->getPiece(0);
+ else // Hit end().
+ CurPiece = nullptr;
+ CurChar = 0;
+}
+
+//===----------------------------------------------------------------------===//
+// RopePieceBTree Implementation
+//===----------------------------------------------------------------------===//
+
+static RopePieceBTreeNode *getRoot(void *P) {
+ return static_cast<RopePieceBTreeNode*>(P);
+}
+
+RopePieceBTree::RopePieceBTree() {
+ Root = new RopePieceBTreeLeaf();
+}
+RopePieceBTree::RopePieceBTree(const RopePieceBTree &RHS) {
+ assert(RHS.empty() && "Can't copy non-empty tree yet");
+ Root = new RopePieceBTreeLeaf();
+}
+RopePieceBTree::~RopePieceBTree() {
+ getRoot(Root)->Destroy();
+}
+
+unsigned RopePieceBTree::size() const {
+ return getRoot(Root)->size();
+}
+
+void RopePieceBTree::clear() {
+ if (RopePieceBTreeLeaf *Leaf = dyn_cast<RopePieceBTreeLeaf>(getRoot(Root)))
+ Leaf->clear();
+ else {
+ getRoot(Root)->Destroy();
+ Root = new RopePieceBTreeLeaf();
+ }
+}
+
+void RopePieceBTree::insert(unsigned Offset, const RopePiece &R) {
+ // #1. Split at Offset.
+ if (RopePieceBTreeNode *RHS = getRoot(Root)->split(Offset))
+ Root = new RopePieceBTreeInterior(getRoot(Root), RHS);
+
+ // #2. Do the insertion.
+ if (RopePieceBTreeNode *RHS = getRoot(Root)->insert(Offset, R))
+ Root = new RopePieceBTreeInterior(getRoot(Root), RHS);
+}
+
+void RopePieceBTree::erase(unsigned Offset, unsigned NumBytes) {
+ // #1. Split at Offset.
+ if (RopePieceBTreeNode *RHS = getRoot(Root)->split(Offset))
+ Root = new RopePieceBTreeInterior(getRoot(Root), RHS);
+
+ // #2. Do the erasing.
+ getRoot(Root)->erase(Offset, NumBytes);
+}
+
+//===----------------------------------------------------------------------===//
+// RewriteRope Implementation
+//===----------------------------------------------------------------------===//
+
+/// MakeRopeString - This copies the specified byte range into some instance of
+/// RopeRefCountString, and return a RopePiece that represents it. This uses
+/// the AllocBuffer object to aggregate requests for small strings into one
+/// allocation instead of doing tons of tiny allocations.
+RopePiece RewriteRope::MakeRopeString(const char *Start, const char *End) {
+ unsigned Len = End-Start;
+ assert(Len && "Zero length RopePiece is invalid!");
+
+ // If we have space for this string in the current alloc buffer, use it.
+ if (AllocOffs+Len <= AllocChunkSize) {
+ memcpy(AllocBuffer->Data+AllocOffs, Start, Len);
+ AllocOffs += Len;
+ return RopePiece(AllocBuffer, AllocOffs-Len, AllocOffs);
+ }
+
+ // If we don't have enough room because this specific allocation is huge,
+ // just allocate a new rope piece for it alone.
+ if (Len > AllocChunkSize) {
+ unsigned Size = End-Start+sizeof(RopeRefCountString)-1;
+ RopeRefCountString *Res =
+ reinterpret_cast<RopeRefCountString *>(new char[Size]);
+ Res->RefCount = 0;
+ memcpy(Res->Data, Start, End-Start);
+ return RopePiece(Res, 0, End-Start);
+ }
+
+ // Otherwise, this was a small request but we just don't have space for it
+ // Make a new chunk and share it with later allocations.
+
+ unsigned AllocSize = offsetof(RopeRefCountString, Data) + AllocChunkSize;
+ RopeRefCountString *Res =
+ reinterpret_cast<RopeRefCountString *>(new char[AllocSize]);
+ Res->RefCount = 0;
+ memcpy(Res->Data, Start, Len);
+ AllocBuffer = Res;
+ AllocOffs = Len;
+
+ return RopePiece(AllocBuffer, 0, Len);
+}
+
+
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp b/contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp
new file mode 100644
index 0000000..ae41dec
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp
@@ -0,0 +1,458 @@
+//===--- Rewriter.cpp - Code rewriting interface --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Rewriter class, which is used for code
+// transformations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/Core/Rewriter.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/DiagnosticIDs.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Lexer.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+raw_ostream &RewriteBuffer::write(raw_ostream &os) const {
+ // Walk RewriteRope chunks efficiently using MoveToNextPiece() instead of the
+ // character iterator.
+ for (RopePieceBTreeIterator I = begin(), E = end(); I != E;
+ I.MoveToNextPiece())
+ os << I.piece();
+ return os;
+}
+
+/// \brief Return true if this character is non-new-line whitespace:
+/// ' ', '\\t', '\\f', '\\v', '\\r'.
+static inline bool isWhitespaceExceptNL(unsigned char c) {
+ switch (c) {
+ case ' ':
+ case '\t':
+ case '\f':
+ case '\v':
+ case '\r':
+ return true;
+ default:
+ return false;
+ }
+}
+
+void RewriteBuffer::RemoveText(unsigned OrigOffset, unsigned Size,
+ bool removeLineIfEmpty) {
+ // Nothing to remove, exit early.
+ if (Size == 0) return;
+
+ unsigned RealOffset = getMappedOffset(OrigOffset, true);
+ assert(RealOffset+Size <= Buffer.size() && "Invalid location");
+
+ // Remove the dead characters.
+ Buffer.erase(RealOffset, Size);
+
+ // Add a delta so that future changes are offset correctly.
+ AddReplaceDelta(OrigOffset, -Size);
+
+ if (removeLineIfEmpty) {
+ // Find the line that the remove occurred and if it is completely empty
+ // remove the line as well.
+
+ iterator curLineStart = begin();
+ unsigned curLineStartOffs = 0;
+ iterator posI = begin();
+ for (unsigned i = 0; i != RealOffset; ++i) {
+ if (*posI == '\n') {
+ curLineStart = posI;
+ ++curLineStart;
+ curLineStartOffs = i + 1;
+ }
+ ++posI;
+ }
+
+ unsigned lineSize = 0;
+ posI = curLineStart;
+ while (posI != end() && isWhitespaceExceptNL(*posI)) {
+ ++posI;
+ ++lineSize;
+ }
+ if (posI != end() && *posI == '\n') {
+ Buffer.erase(curLineStartOffs, lineSize + 1/* + '\n'*/);
+ AddReplaceDelta(curLineStartOffs, -(lineSize + 1/* + '\n'*/));
+ }
+ }
+}
+
+void RewriteBuffer::InsertText(unsigned OrigOffset, StringRef Str,
+ bool InsertAfter) {
+
+ // Nothing to insert, exit early.
+ if (Str.empty()) return;
+
+ unsigned RealOffset = getMappedOffset(OrigOffset, InsertAfter);
+ Buffer.insert(RealOffset, Str.begin(), Str.end());
+
+ // Add a delta so that future changes are offset correctly.
+ AddInsertDelta(OrigOffset, Str.size());
+}
+
+/// ReplaceText - This method replaces a range of characters in the input
+/// buffer with a new string. This is effectively a combined "remove+insert"
+/// operation.
+void RewriteBuffer::ReplaceText(unsigned OrigOffset, unsigned OrigLength,
+ StringRef NewStr) {
+ unsigned RealOffset = getMappedOffset(OrigOffset, true);
+ Buffer.erase(RealOffset, OrigLength);
+ Buffer.insert(RealOffset, NewStr.begin(), NewStr.end());
+ if (OrigLength != NewStr.size())
+ AddReplaceDelta(OrigOffset, NewStr.size() - OrigLength);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Rewriter class
+//===----------------------------------------------------------------------===//
+
+/// getRangeSize - Return the size in bytes of the specified range if they
+/// are in the same file. If not, this returns -1.
+int Rewriter::getRangeSize(const CharSourceRange &Range,
+ RewriteOptions opts) const {
+ if (!isRewritable(Range.getBegin()) ||
+ !isRewritable(Range.getEnd())) return -1;
+
+ FileID StartFileID, EndFileID;
+ unsigned StartOff, EndOff;
+
+ StartOff = getLocationOffsetAndFileID(Range.getBegin(), StartFileID);
+ EndOff = getLocationOffsetAndFileID(Range.getEnd(), EndFileID);
+
+ if (StartFileID != EndFileID)
+ return -1;
+
+ // If edits have been made to this buffer, the delta between the range may
+ // have changed.
+ std::map<FileID, RewriteBuffer>::const_iterator I =
+ RewriteBuffers.find(StartFileID);
+ if (I != RewriteBuffers.end()) {
+ const RewriteBuffer &RB = I->second;
+ EndOff = RB.getMappedOffset(EndOff, opts.IncludeInsertsAtEndOfRange);
+ StartOff = RB.getMappedOffset(StartOff, !opts.IncludeInsertsAtBeginOfRange);
+ }
+
+
+ // Adjust the end offset to the end of the last token, instead of being the
+ // start of the last token if this is a token range.
+ if (Range.isTokenRange())
+ EndOff += Lexer::MeasureTokenLength(Range.getEnd(), *SourceMgr, *LangOpts);
+
+ return EndOff-StartOff;
+}
+
+int Rewriter::getRangeSize(SourceRange Range, RewriteOptions opts) const {
+ return getRangeSize(CharSourceRange::getTokenRange(Range), opts);
+}
+
+
+/// getRewrittenText - Return the rewritten form of the text in the specified
+/// range. If the start or end of the range was unrewritable or if they are
+/// in different buffers, this returns an empty string.
+///
+/// Note that this method is not particularly efficient.
+///
+std::string Rewriter::getRewrittenText(SourceRange Range) const {
+ if (!isRewritable(Range.getBegin()) ||
+ !isRewritable(Range.getEnd()))
+ return "";
+
+ FileID StartFileID, EndFileID;
+ unsigned StartOff, EndOff;
+ StartOff = getLocationOffsetAndFileID(Range.getBegin(), StartFileID);
+ EndOff = getLocationOffsetAndFileID(Range.getEnd(), EndFileID);
+
+ if (StartFileID != EndFileID)
+ return ""; // Start and end in different buffers.
+
+ // If edits have been made to this buffer, the delta between the range may
+ // have changed.
+ std::map<FileID, RewriteBuffer>::const_iterator I =
+ RewriteBuffers.find(StartFileID);
+ if (I == RewriteBuffers.end()) {
+ // If the buffer hasn't been rewritten, just return the text from the input.
+ const char *Ptr = SourceMgr->getCharacterData(Range.getBegin());
+
+ // Adjust the end offset to the end of the last token, instead of being the
+ // start of the last token.
+ EndOff += Lexer::MeasureTokenLength(Range.getEnd(), *SourceMgr, *LangOpts);
+ return std::string(Ptr, Ptr+EndOff-StartOff);
+ }
+
+ const RewriteBuffer &RB = I->second;
+ EndOff = RB.getMappedOffset(EndOff, true);
+ StartOff = RB.getMappedOffset(StartOff);
+
+ // Adjust the end offset to the end of the last token, instead of being the
+ // start of the last token.
+ EndOff += Lexer::MeasureTokenLength(Range.getEnd(), *SourceMgr, *LangOpts);
+
+ // Advance the iterators to the right spot, yay for linear time algorithms.
+ RewriteBuffer::iterator Start = RB.begin();
+ std::advance(Start, StartOff);
+ RewriteBuffer::iterator End = Start;
+ std::advance(End, EndOff-StartOff);
+
+ return std::string(Start, End);
+}
+
+unsigned Rewriter::getLocationOffsetAndFileID(SourceLocation Loc,
+ FileID &FID) const {
+ assert(Loc.isValid() && "Invalid location");
+ std::pair<FileID,unsigned> V = SourceMgr->getDecomposedLoc(Loc);
+ FID = V.first;
+ return V.second;
+}
+
+
+/// getEditBuffer - Get or create a RewriteBuffer for the specified FileID.
+///
+RewriteBuffer &Rewriter::getEditBuffer(FileID FID) {
+ std::map<FileID, RewriteBuffer>::iterator I =
+ RewriteBuffers.lower_bound(FID);
+ if (I != RewriteBuffers.end() && I->first == FID)
+ return I->second;
+ I = RewriteBuffers.insert(I, std::make_pair(FID, RewriteBuffer()));
+
+ StringRef MB = SourceMgr->getBufferData(FID);
+ I->second.Initialize(MB.begin(), MB.end());
+
+ return I->second;
+}
+
+/// InsertText - Insert the specified string at the specified location in the
+/// original buffer.
+bool Rewriter::InsertText(SourceLocation Loc, StringRef Str,
+ bool InsertAfter, bool indentNewLines) {
+ if (!isRewritable(Loc)) return true;
+ FileID FID;
+ unsigned StartOffs = getLocationOffsetAndFileID(Loc, FID);
+
+ SmallString<128> indentedStr;
+ if (indentNewLines && Str.find('\n') != StringRef::npos) {
+ StringRef MB = SourceMgr->getBufferData(FID);
+
+ unsigned lineNo = SourceMgr->getLineNumber(FID, StartOffs) - 1;
+ const SrcMgr::ContentCache *
+ Content = SourceMgr->getSLocEntry(FID).getFile().getContentCache();
+ unsigned lineOffs = Content->SourceLineCache[lineNo];
+
+ // Find the whitespace at the start of the line.
+ StringRef indentSpace;
+ {
+ unsigned i = lineOffs;
+ while (isWhitespaceExceptNL(MB[i]))
+ ++i;
+ indentSpace = MB.substr(lineOffs, i-lineOffs);
+ }
+
+ SmallVector<StringRef, 4> lines;
+ Str.split(lines, "\n");
+
+ for (unsigned i = 0, e = lines.size(); i != e; ++i) {
+ indentedStr += lines[i];
+ if (i < e-1) {
+ indentedStr += '\n';
+ indentedStr += indentSpace;
+ }
+ }
+ Str = indentedStr.str();
+ }
+
+ getEditBuffer(FID).InsertText(StartOffs, Str, InsertAfter);
+ return false;
+}
+
+bool Rewriter::InsertTextAfterToken(SourceLocation Loc, StringRef Str) {
+ if (!isRewritable(Loc)) return true;
+ FileID FID;
+ unsigned StartOffs = getLocationOffsetAndFileID(Loc, FID);
+ RewriteOptions rangeOpts;
+ rangeOpts.IncludeInsertsAtBeginOfRange = false;
+ StartOffs += getRangeSize(SourceRange(Loc, Loc), rangeOpts);
+ getEditBuffer(FID).InsertText(StartOffs, Str, /*InsertAfter*/true);
+ return false;
+}
+
+/// RemoveText - Remove the specified text region.
+bool Rewriter::RemoveText(SourceLocation Start, unsigned Length,
+ RewriteOptions opts) {
+ if (!isRewritable(Start)) return true;
+ FileID FID;
+ unsigned StartOffs = getLocationOffsetAndFileID(Start, FID);
+ getEditBuffer(FID).RemoveText(StartOffs, Length, opts.RemoveLineIfEmpty);
+ return false;
+}
+
+/// ReplaceText - This method replaces a range of characters in the input
+/// buffer with a new string. This is effectively a combined "remove/insert"
+/// operation.
+bool Rewriter::ReplaceText(SourceLocation Start, unsigned OrigLength,
+ StringRef NewStr) {
+ if (!isRewritable(Start)) return true;
+ FileID StartFileID;
+ unsigned StartOffs = getLocationOffsetAndFileID(Start, StartFileID);
+
+ getEditBuffer(StartFileID).ReplaceText(StartOffs, OrigLength, NewStr);
+ return false;
+}
+
+bool Rewriter::ReplaceText(SourceRange range, SourceRange replacementRange) {
+ if (!isRewritable(range.getBegin())) return true;
+ if (!isRewritable(range.getEnd())) return true;
+ if (replacementRange.isInvalid()) return true;
+ SourceLocation start = range.getBegin();
+ unsigned origLength = getRangeSize(range);
+ unsigned newLength = getRangeSize(replacementRange);
+ FileID FID;
+ unsigned newOffs = getLocationOffsetAndFileID(replacementRange.getBegin(),
+ FID);
+ StringRef MB = SourceMgr->getBufferData(FID);
+ return ReplaceText(start, origLength, MB.substr(newOffs, newLength));
+}
+
+bool Rewriter::IncreaseIndentation(CharSourceRange range,
+ SourceLocation parentIndent) {
+ if (range.isInvalid()) return true;
+ if (!isRewritable(range.getBegin())) return true;
+ if (!isRewritable(range.getEnd())) return true;
+ if (!isRewritable(parentIndent)) return true;
+
+ FileID StartFileID, EndFileID, parentFileID;
+ unsigned StartOff, EndOff, parentOff;
+
+ StartOff = getLocationOffsetAndFileID(range.getBegin(), StartFileID);
+ EndOff = getLocationOffsetAndFileID(range.getEnd(), EndFileID);
+ parentOff = getLocationOffsetAndFileID(parentIndent, parentFileID);
+
+ if (StartFileID != EndFileID || StartFileID != parentFileID)
+ return true;
+ if (StartOff > EndOff)
+ return true;
+
+ FileID FID = StartFileID;
+ StringRef MB = SourceMgr->getBufferData(FID);
+
+ unsigned parentLineNo = SourceMgr->getLineNumber(FID, parentOff) - 1;
+ unsigned startLineNo = SourceMgr->getLineNumber(FID, StartOff) - 1;
+ unsigned endLineNo = SourceMgr->getLineNumber(FID, EndOff) - 1;
+
+ const SrcMgr::ContentCache *
+ Content = SourceMgr->getSLocEntry(FID).getFile().getContentCache();
+
+ // Find where the lines start.
+ unsigned parentLineOffs = Content->SourceLineCache[parentLineNo];
+ unsigned startLineOffs = Content->SourceLineCache[startLineNo];
+
+ // Find the whitespace at the start of each line.
+ StringRef parentSpace, startSpace;
+ {
+ unsigned i = parentLineOffs;
+ while (isWhitespaceExceptNL(MB[i]))
+ ++i;
+ parentSpace = MB.substr(parentLineOffs, i-parentLineOffs);
+
+ i = startLineOffs;
+ while (isWhitespaceExceptNL(MB[i]))
+ ++i;
+ startSpace = MB.substr(startLineOffs, i-startLineOffs);
+ }
+ if (parentSpace.size() >= startSpace.size())
+ return true;
+ if (!startSpace.startswith(parentSpace))
+ return true;
+
+ StringRef indent = startSpace.substr(parentSpace.size());
+
+ // Indent the lines between start/end offsets.
+ RewriteBuffer &RB = getEditBuffer(FID);
+ for (unsigned lineNo = startLineNo; lineNo <= endLineNo; ++lineNo) {
+ unsigned offs = Content->SourceLineCache[lineNo];
+ unsigned i = offs;
+ while (isWhitespaceExceptNL(MB[i]))
+ ++i;
+ StringRef origIndent = MB.substr(offs, i-offs);
+ if (origIndent.startswith(startSpace))
+ RB.InsertText(offs, indent, /*InsertAfter=*/false);
+ }
+
+ return false;
+}
+
+namespace {
+// A wrapper for a file stream that atomically overwrites the target.
+//
+// Creates a file output stream for a temporary file in the constructor,
+// which is later accessible via getStream() if ok() return true.
+// Flushes the stream and moves the temporary file to the target location
+// in the destructor.
+class AtomicallyMovedFile {
+public:
+ AtomicallyMovedFile(DiagnosticsEngine &Diagnostics, StringRef Filename,
+ bool &AllWritten)
+ : Diagnostics(Diagnostics), Filename(Filename), AllWritten(AllWritten) {
+ TempFilename = Filename;
+ TempFilename += "-%%%%%%%%";
+ int FD;
+ if (llvm::sys::fs::createUniqueFile(TempFilename, FD, TempFilename)) {
+ AllWritten = false;
+ Diagnostics.Report(clang::diag::err_unable_to_make_temp)
+ << TempFilename;
+ } else {
+ FileStream.reset(new llvm::raw_fd_ostream(FD, /*shouldClose=*/true));
+ }
+ }
+
+ ~AtomicallyMovedFile() {
+ if (!ok()) return;
+
+ // Close (will also flush) theFileStream.
+ FileStream->close();
+ if (std::error_code ec = llvm::sys::fs::rename(TempFilename, Filename)) {
+ AllWritten = false;
+ Diagnostics.Report(clang::diag::err_unable_to_rename_temp)
+ << TempFilename << Filename << ec.message();
+ // If the remove fails, there's not a lot we can do - this is already an
+ // error.
+ llvm::sys::fs::remove(TempFilename);
+ }
+ }
+
+ bool ok() { return (bool)FileStream; }
+ raw_ostream &getStream() { return *FileStream; }
+
+private:
+ DiagnosticsEngine &Diagnostics;
+ StringRef Filename;
+ SmallString<128> TempFilename;
+ std::unique_ptr<llvm::raw_fd_ostream> FileStream;
+ bool &AllWritten;
+};
+} // end anonymous namespace
+
+bool Rewriter::overwriteChangedFiles() {
+ bool AllWritten = true;
+ for (buffer_iterator I = buffer_begin(), E = buffer_end(); I != E; ++I) {
+ const FileEntry *Entry =
+ getSourceMgr().getFileEntryForID(I->first);
+ AtomicallyMovedFile File(getSourceMgr().getDiagnostics(), Entry->getName(),
+ AllWritten);
+ if (File.ok()) {
+ I->second.write(File.getStream());
+ }
+ }
+ return !AllWritten;
+}
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/TokenRewriter.cpp b/contrib/llvm/tools/clang/lib/Rewrite/TokenRewriter.cpp
new file mode 100644
index 0000000..494defd
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Rewrite/TokenRewriter.cpp
@@ -0,0 +1,99 @@
+//===--- TokenRewriter.cpp - Token-based code rewriting interface ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the TokenRewriter class, which is used for code
+// transformations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/Core/TokenRewriter.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Lex/ScratchBuffer.h"
+using namespace clang;
+
+TokenRewriter::TokenRewriter(FileID FID, SourceManager &SM,
+ const LangOptions &LangOpts) {
+ ScratchBuf.reset(new ScratchBuffer(SM));
+
+ // Create a lexer to lex all the tokens of the main file in raw mode.
+ const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
+ Lexer RawLex(FID, FromFile, SM, LangOpts);
+
+ // Return all comments and whitespace as tokens.
+ RawLex.SetKeepWhitespaceMode(true);
+
+ // Lex the file, populating our datastructures.
+ Token RawTok;
+ RawLex.LexFromRawLexer(RawTok);
+ while (RawTok.isNot(tok::eof)) {
+#if 0
+ if (Tok.is(tok::raw_identifier)) {
+ // Look up the identifier info for the token. This should use
+ // IdentifierTable directly instead of PP.
+ PP.LookUpIdentifierInfo(Tok);
+ }
+#endif
+
+ AddToken(RawTok, TokenList.end());
+ RawLex.LexFromRawLexer(RawTok);
+ }
+}
+
+TokenRewriter::~TokenRewriter() {
+}
+
+
+/// RemapIterator - Convert from token_iterator (a const iterator) to
+/// TokenRefTy (a non-const iterator).
+TokenRewriter::TokenRefTy TokenRewriter::RemapIterator(token_iterator I) {
+ if (I == token_end()) return TokenList.end();
+
+ // FIXME: This is horrible, we should use our own list or something to avoid
+ // this.
+ std::map<SourceLocation, TokenRefTy>::iterator MapIt =
+ TokenAtLoc.find(I->getLocation());
+ assert(MapIt != TokenAtLoc.end() && "iterator not in rewriter?");
+ return MapIt->second;
+}
+
+
+/// AddToken - Add the specified token into the Rewriter before the other
+/// position.
+TokenRewriter::TokenRefTy
+TokenRewriter::AddToken(const Token &T, TokenRefTy Where) {
+ Where = TokenList.insert(Where, T);
+
+ bool InsertSuccess = TokenAtLoc.insert(std::make_pair(T.getLocation(),
+ Where)).second;
+ assert(InsertSuccess && "Token location already in rewriter!");
+ (void)InsertSuccess;
+ return Where;
+}
+
+
+TokenRewriter::token_iterator
+TokenRewriter::AddTokenBefore(token_iterator I, const char *Val) {
+ unsigned Len = strlen(Val);
+
+ // Plop the string into the scratch buffer, then create a token for this
+ // string.
+ Token Tok;
+ Tok.startToken();
+ const char *Spelling;
+ Tok.setLocation(ScratchBuf->getToken(Val, Len, Spelling));
+ Tok.setLength(Len);
+
+ // TODO: Form a whole lexer around this and relex the token! For now, just
+ // set kind to tok::unknown.
+ Tok.setKind(tok::unknown);
+
+ return AddToken(Tok, RemapIterator(I));
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp b/contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp
new file mode 100644
index 0000000..5f74343
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp
@@ -0,0 +1,2110 @@
+//=- AnalysisBasedWarnings.cpp - Sema warnings based on libAnalysis -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines analysis_warnings::[Policy,Executor].
+// Together they are used by Sema to issue warnings based on inexpensive
+// static analysis algorithms in libAnalysis.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/AnalysisBasedWarnings.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h"
+#include "clang/Analysis/Analyses/Consumed.h"
+#include "clang/Analysis/Analyses/ReachableCode.h"
+#include "clang/Analysis/Analyses/ThreadSafety.h"
+#include "clang/Analysis/Analyses/UninitializedValues.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/CFGStmtMap.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaInternal.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/ImmutableMap.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Casting.h"
+#include <algorithm>
+#include <deque>
+#include <iterator>
+#include <vector>
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Unreachable code analysis.
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class UnreachableCodeHandler : public reachable_code::Callback {
+ Sema &S;
+ public:
+ UnreachableCodeHandler(Sema &s) : S(s) {}
+
+ void HandleUnreachable(reachable_code::UnreachableKind UK,
+ SourceLocation L,
+ SourceRange SilenceableCondVal,
+ SourceRange R1,
+ SourceRange R2) override {
+ unsigned diag = diag::warn_unreachable;
+ switch (UK) {
+ case reachable_code::UK_Break:
+ diag = diag::warn_unreachable_break;
+ break;
+ case reachable_code::UK_Return:
+ diag = diag::warn_unreachable_return;
+ break;
+ case reachable_code::UK_Loop_Increment:
+ diag = diag::warn_unreachable_loop_increment;
+ break;
+ case reachable_code::UK_Other:
+ break;
+ }
+
+ S.Diag(L, diag) << R1 << R2;
+
+ SourceLocation Open = SilenceableCondVal.getBegin();
+ if (Open.isValid()) {
+ SourceLocation Close = SilenceableCondVal.getEnd();
+ Close = S.getLocForEndOfToken(Close);
+ if (Close.isValid()) {
+ S.Diag(Open, diag::note_unreachable_silence)
+ << FixItHint::CreateInsertion(Open, "/* DISABLES CODE */ (")
+ << FixItHint::CreateInsertion(Close, ")");
+ }
+ }
+ }
+ };
+} // anonymous namespace
+
+/// CheckUnreachable - Check for unreachable code.
+static void CheckUnreachable(Sema &S, AnalysisDeclContext &AC) {
+ // As a heuristic prune all diagnostics not in the main file. Currently
+ // the majority of warnings in headers are false positives. These
+ // are largely caused by configuration state, e.g. preprocessor
+ // defined code, etc.
+ //
+ // Note that this is also a performance optimization. Analyzing
+ // headers many times can be expensive.
+ if (!S.getSourceManager().isInMainFile(AC.getDecl()->getLocStart()))
+ return;
+
+ UnreachableCodeHandler UC(S);
+ reachable_code::FindUnreachableCode(AC, S.getPreprocessor(), UC);
+}
+
+namespace {
+/// \brief Warn on logical operator errors in CFGBuilder
+class LogicalErrorHandler : public CFGCallback {
+ Sema &S;
+
+public:
+ LogicalErrorHandler(Sema &S) : CFGCallback(), S(S) {}
+
+ static bool HasMacroID(const Expr *E) {
+ if (E->getExprLoc().isMacroID())
+ return true;
+
+ // Recurse to children.
+ for (const Stmt *SubStmt : E->children())
+ if (const Expr *SubExpr = dyn_cast_or_null<Expr>(SubStmt))
+ if (HasMacroID(SubExpr))
+ return true;
+
+ return false;
+ }
+
+ void compareAlwaysTrue(const BinaryOperator *B, bool isAlwaysTrue) override {
+ if (HasMacroID(B))
+ return;
+
+ SourceRange DiagRange = B->getSourceRange();
+ S.Diag(B->getExprLoc(), diag::warn_tautological_overlap_comparison)
+ << DiagRange << isAlwaysTrue;
+ }
+
+ void compareBitwiseEquality(const BinaryOperator *B,
+ bool isAlwaysTrue) override {
+ if (HasMacroID(B))
+ return;
+
+ SourceRange DiagRange = B->getSourceRange();
+ S.Diag(B->getExprLoc(), diag::warn_comparison_bitwise_always)
+ << DiagRange << isAlwaysTrue;
+ }
+};
+} // anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Check for infinite self-recursion in functions
+//===----------------------------------------------------------------------===//
+
+// Returns true if the function is called anywhere within the CFGBlock.
+// For member functions, the additional condition of being call from the
+// this pointer is required.
+static bool hasRecursiveCallInPath(const FunctionDecl *FD, CFGBlock &Block) {
+ // Process all the Stmt's in this block to find any calls to FD.
+ for (const auto &B : Block) {
+ if (B.getKind() != CFGElement::Statement)
+ continue;
+
+ const CallExpr *CE = dyn_cast<CallExpr>(B.getAs<CFGStmt>()->getStmt());
+ if (!CE || !CE->getCalleeDecl() ||
+ CE->getCalleeDecl()->getCanonicalDecl() != FD)
+ continue;
+
+ // Skip function calls which are qualified with a templated class.
+ if (const DeclRefExpr *DRE =
+ dyn_cast<DeclRefExpr>(CE->getCallee()->IgnoreParenImpCasts())) {
+ if (NestedNameSpecifier *NNS = DRE->getQualifier()) {
+ if (NNS->getKind() == NestedNameSpecifier::TypeSpec &&
+ isa<TemplateSpecializationType>(NNS->getAsType())) {
+ continue;
+ }
+ }
+ }
+
+ const CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(CE);
+ if (!MCE || isa<CXXThisExpr>(MCE->getImplicitObjectArgument()) ||
+ !MCE->getMethodDecl()->isVirtual())
+ return true;
+ }
+ return false;
+}
+
+// All blocks are in one of three states. States are ordered so that blocks
+// can only move to higher states.
+enum RecursiveState {
+ FoundNoPath,
+ FoundPath,
+ FoundPathWithNoRecursiveCall
+};
+
+// Returns true if there exists a path to the exit block and every path
+// to the exit block passes through a call to FD.
+static bool checkForRecursiveFunctionCall(const FunctionDecl *FD, CFG *cfg) {
+
+ const unsigned ExitID = cfg->getExit().getBlockID();
+
+ // Mark all nodes as FoundNoPath, then set the status of the entry block.
+ SmallVector<RecursiveState, 16> States(cfg->getNumBlockIDs(), FoundNoPath);
+ States[cfg->getEntry().getBlockID()] = FoundPathWithNoRecursiveCall;
+
+ // Make the processing stack and seed it with the entry block.
+ SmallVector<CFGBlock *, 16> Stack;
+ Stack.push_back(&cfg->getEntry());
+
+ while (!Stack.empty()) {
+ CFGBlock *CurBlock = Stack.back();
+ Stack.pop_back();
+
+ unsigned ID = CurBlock->getBlockID();
+ RecursiveState CurState = States[ID];
+
+ if (CurState == FoundPathWithNoRecursiveCall) {
+ // Found a path to the exit node without a recursive call.
+ if (ExitID == ID)
+ return false;
+
+ // Only change state if the block has a recursive call.
+ if (hasRecursiveCallInPath(FD, *CurBlock))
+ CurState = FoundPath;
+ }
+
+ // Loop over successor blocks and add them to the Stack if their state
+ // changes.
+ for (auto I = CurBlock->succ_begin(), E = CurBlock->succ_end(); I != E; ++I)
+ if (*I) {
+ unsigned next_ID = (*I)->getBlockID();
+ if (States[next_ID] < CurState) {
+ States[next_ID] = CurState;
+ Stack.push_back(*I);
+ }
+ }
+ }
+
+ // Return true if the exit node is reachable, and only reachable through
+ // a recursive call.
+ return States[ExitID] == FoundPath;
+}
+
+static void checkRecursiveFunction(Sema &S, const FunctionDecl *FD,
+ const Stmt *Body, AnalysisDeclContext &AC) {
+ FD = FD->getCanonicalDecl();
+
+ // Only run on non-templated functions and non-templated members of
+ // templated classes.
+ if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate &&
+ FD->getTemplatedKind() != FunctionDecl::TK_MemberSpecialization)
+ return;
+
+ CFG *cfg = AC.getCFG();
+ if (!cfg) return;
+
+ // If the exit block is unreachable, skip processing the function.
+ if (cfg->getExit().pred_empty())
+ return;
+
+ // Emit diagnostic if a recursive function call is detected for all paths.
+ if (checkForRecursiveFunctionCall(FD, cfg))
+ S.Diag(Body->getLocStart(), diag::warn_infinite_recursive_function);
+}
+
+//===----------------------------------------------------------------------===//
+// Check for missing return value.
+//===----------------------------------------------------------------------===//
+
+enum ControlFlowKind {
+ UnknownFallThrough,
+ NeverFallThrough,
+ MaybeFallThrough,
+ AlwaysFallThrough,
+ NeverFallThroughOrReturn
+};
+
+/// CheckFallThrough - Check that we don't fall off the end of a
+/// Statement that should return a value.
+///
+/// \returns AlwaysFallThrough iff we always fall off the end of the statement,
+/// MaybeFallThrough iff we might or might not fall off the end,
+/// NeverFallThroughOrReturn iff we never fall off the end of the statement or
+/// return. We assume NeverFallThrough iff we never fall off the end of the
+/// statement but we may return. We assume that functions not marked noreturn
+/// will return.
+static ControlFlowKind CheckFallThrough(AnalysisDeclContext &AC) {
+ CFG *cfg = AC.getCFG();
+ if (!cfg) return UnknownFallThrough;
+
+ // The CFG leaves in dead things, and we don't want the dead code paths to
+ // confuse us, so we mark all live things first.
+ llvm::BitVector live(cfg->getNumBlockIDs());
+ unsigned count = reachable_code::ScanReachableFromBlock(&cfg->getEntry(),
+ live);
+
+ bool AddEHEdges = AC.getAddEHEdges();
+ if (!AddEHEdges && count != cfg->getNumBlockIDs())
+ // When there are things remaining dead, and we didn't add EH edges
+ // from CallExprs to the catch clauses, we have to go back and
+ // mark them as live.
+ for (const auto *B : *cfg) {
+ if (!live[B->getBlockID()]) {
+ if (B->pred_begin() == B->pred_end()) {
+ if (B->getTerminator() && isa<CXXTryStmt>(B->getTerminator()))
+ // When not adding EH edges from calls, catch clauses
+ // can otherwise seem dead. Avoid noting them as dead.
+ count += reachable_code::ScanReachableFromBlock(B, live);
+ continue;
+ }
+ }
+ }
+
+ // Now we know what is live, we check the live precessors of the exit block
+ // and look for fall through paths, being careful to ignore normal returns,
+ // and exceptional paths.
+ bool HasLiveReturn = false;
+ bool HasFakeEdge = false;
+ bool HasPlainEdge = false;
+ bool HasAbnormalEdge = false;
+
+ // Ignore default cases that aren't likely to be reachable because all
+ // enums in a switch(X) have explicit case statements.
+ CFGBlock::FilterOptions FO;
+ FO.IgnoreDefaultsWithCoveredEnums = 1;
+
+ for (CFGBlock::filtered_pred_iterator
+ I = cfg->getExit().filtered_pred_start_end(FO); I.hasMore(); ++I) {
+ const CFGBlock& B = **I;
+ if (!live[B.getBlockID()])
+ continue;
+
+ // Skip blocks which contain an element marked as no-return. They don't
+ // represent actually viable edges into the exit block, so mark them as
+ // abnormal.
+ if (B.hasNoReturnElement()) {
+ HasAbnormalEdge = true;
+ continue;
+ }
+
+ // Destructors can appear after the 'return' in the CFG. This is
+ // normal. We need to look pass the destructors for the return
+ // statement (if it exists).
+ CFGBlock::const_reverse_iterator ri = B.rbegin(), re = B.rend();
+
+ for ( ; ri != re ; ++ri)
+ if (ri->getAs<CFGStmt>())
+ break;
+
+ // No more CFGElements in the block?
+ if (ri == re) {
+ if (B.getTerminator() && isa<CXXTryStmt>(B.getTerminator())) {
+ HasAbnormalEdge = true;
+ continue;
+ }
+ // A labeled empty statement, or the entry block...
+ HasPlainEdge = true;
+ continue;
+ }
+
+ CFGStmt CS = ri->castAs<CFGStmt>();
+ const Stmt *S = CS.getStmt();
+ if (isa<ReturnStmt>(S)) {
+ HasLiveReturn = true;
+ continue;
+ }
+ if (isa<ObjCAtThrowStmt>(S)) {
+ HasFakeEdge = true;
+ continue;
+ }
+ if (isa<CXXThrowExpr>(S)) {
+ HasFakeEdge = true;
+ continue;
+ }
+ if (isa<MSAsmStmt>(S)) {
+ // TODO: Verify this is correct.
+ HasFakeEdge = true;
+ HasLiveReturn = true;
+ continue;
+ }
+ if (isa<CXXTryStmt>(S)) {
+ HasAbnormalEdge = true;
+ continue;
+ }
+ if (std::find(B.succ_begin(), B.succ_end(), &cfg->getExit())
+ == B.succ_end()) {
+ HasAbnormalEdge = true;
+ continue;
+ }
+
+ HasPlainEdge = true;
+ }
+ if (!HasPlainEdge) {
+ if (HasLiveReturn)
+ return NeverFallThrough;
+ return NeverFallThroughOrReturn;
+ }
+ if (HasAbnormalEdge || HasFakeEdge || HasLiveReturn)
+ return MaybeFallThrough;
+ // This says AlwaysFallThrough for calls to functions that are not marked
+ // noreturn, that don't return. If people would like this warning to be more
+ // accurate, such functions should be marked as noreturn.
+ return AlwaysFallThrough;
+}
+
+namespace {
+
+struct CheckFallThroughDiagnostics {
+ unsigned diag_MaybeFallThrough_HasNoReturn;
+ unsigned diag_MaybeFallThrough_ReturnsNonVoid;
+ unsigned diag_AlwaysFallThrough_HasNoReturn;
+ unsigned diag_AlwaysFallThrough_ReturnsNonVoid;
+ unsigned diag_NeverFallThroughOrReturn;
+ enum { Function, Block, Lambda } funMode;
+ SourceLocation FuncLoc;
+
+ static CheckFallThroughDiagnostics MakeForFunction(const Decl *Func) {
+ CheckFallThroughDiagnostics D;
+ D.FuncLoc = Func->getLocation();
+ D.diag_MaybeFallThrough_HasNoReturn =
+ diag::warn_falloff_noreturn_function;
+ D.diag_MaybeFallThrough_ReturnsNonVoid =
+ diag::warn_maybe_falloff_nonvoid_function;
+ D.diag_AlwaysFallThrough_HasNoReturn =
+ diag::warn_falloff_noreturn_function;
+ D.diag_AlwaysFallThrough_ReturnsNonVoid =
+ diag::warn_falloff_nonvoid_function;
+
+ // Don't suggest that virtual functions be marked "noreturn", since they
+ // might be overridden by non-noreturn functions.
+ bool isVirtualMethod = false;
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Func))
+ isVirtualMethod = Method->isVirtual();
+
+ // Don't suggest that template instantiations be marked "noreturn"
+ bool isTemplateInstantiation = false;
+ if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(Func))
+ isTemplateInstantiation = Function->isTemplateInstantiation();
+
+ if (!isVirtualMethod && !isTemplateInstantiation)
+ D.diag_NeverFallThroughOrReturn =
+ diag::warn_suggest_noreturn_function;
+ else
+ D.diag_NeverFallThroughOrReturn = 0;
+
+ D.funMode = Function;
+ return D;
+ }
+
+ static CheckFallThroughDiagnostics MakeForBlock() {
+ CheckFallThroughDiagnostics D;
+ D.diag_MaybeFallThrough_HasNoReturn =
+ diag::err_noreturn_block_has_return_expr;
+ D.diag_MaybeFallThrough_ReturnsNonVoid =
+ diag::err_maybe_falloff_nonvoid_block;
+ D.diag_AlwaysFallThrough_HasNoReturn =
+ diag::err_noreturn_block_has_return_expr;
+ D.diag_AlwaysFallThrough_ReturnsNonVoid =
+ diag::err_falloff_nonvoid_block;
+ D.diag_NeverFallThroughOrReturn = 0;
+ D.funMode = Block;
+ return D;
+ }
+
+ static CheckFallThroughDiagnostics MakeForLambda() {
+ CheckFallThroughDiagnostics D;
+ D.diag_MaybeFallThrough_HasNoReturn =
+ diag::err_noreturn_lambda_has_return_expr;
+ D.diag_MaybeFallThrough_ReturnsNonVoid =
+ diag::warn_maybe_falloff_nonvoid_lambda;
+ D.diag_AlwaysFallThrough_HasNoReturn =
+ diag::err_noreturn_lambda_has_return_expr;
+ D.diag_AlwaysFallThrough_ReturnsNonVoid =
+ diag::warn_falloff_nonvoid_lambda;
+ D.diag_NeverFallThroughOrReturn = 0;
+ D.funMode = Lambda;
+ return D;
+ }
+
+ bool checkDiagnostics(DiagnosticsEngine &D, bool ReturnsVoid,
+ bool HasNoReturn) const {
+ if (funMode == Function) {
+ return (ReturnsVoid ||
+ D.isIgnored(diag::warn_maybe_falloff_nonvoid_function,
+ FuncLoc)) &&
+ (!HasNoReturn ||
+ D.isIgnored(diag::warn_noreturn_function_has_return_expr,
+ FuncLoc)) &&
+ (!ReturnsVoid ||
+ D.isIgnored(diag::warn_suggest_noreturn_block, FuncLoc));
+ }
+
+ // For blocks / lambdas.
+ return ReturnsVoid && !HasNoReturn;
+ }
+};
+
+} // anonymous namespace
+
+/// CheckFallThroughForFunctionDef - Check that we don't fall off the end of a
+/// function that should return a value. Check that we don't fall off the end
+/// of a noreturn function. We assume that functions and blocks not marked
+/// noreturn will return.
+static void CheckFallThroughForBody(Sema &S, const Decl *D, const Stmt *Body,
+ const BlockExpr *blkExpr,
+ const CheckFallThroughDiagnostics& CD,
+ AnalysisDeclContext &AC) {
+
+ bool ReturnsVoid = false;
+ bool HasNoReturn = false;
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ ReturnsVoid = FD->getReturnType()->isVoidType();
+ HasNoReturn = FD->isNoReturn();
+ }
+ else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ ReturnsVoid = MD->getReturnType()->isVoidType();
+ HasNoReturn = MD->hasAttr<NoReturnAttr>();
+ }
+ else if (isa<BlockDecl>(D)) {
+ QualType BlockTy = blkExpr->getType();
+ if (const FunctionType *FT =
+ BlockTy->getPointeeType()->getAs<FunctionType>()) {
+ if (FT->getReturnType()->isVoidType())
+ ReturnsVoid = true;
+ if (FT->getNoReturnAttr())
+ HasNoReturn = true;
+ }
+ }
+
+ DiagnosticsEngine &Diags = S.getDiagnostics();
+
+ // Short circuit for compilation speed.
+ if (CD.checkDiagnostics(Diags, ReturnsVoid, HasNoReturn))
+ return;
+
+ SourceLocation LBrace = Body->getLocStart(), RBrace = Body->getLocEnd();
+ // Either in a function body compound statement, or a function-try-block.
+ switch (CheckFallThrough(AC)) {
+ case UnknownFallThrough:
+ break;
+
+ case MaybeFallThrough:
+ if (HasNoReturn)
+ S.Diag(RBrace, CD.diag_MaybeFallThrough_HasNoReturn);
+ else if (!ReturnsVoid)
+ S.Diag(RBrace, CD.diag_MaybeFallThrough_ReturnsNonVoid);
+ break;
+ case AlwaysFallThrough:
+ if (HasNoReturn)
+ S.Diag(RBrace, CD.diag_AlwaysFallThrough_HasNoReturn);
+ else if (!ReturnsVoid)
+ S.Diag(RBrace, CD.diag_AlwaysFallThrough_ReturnsNonVoid);
+ break;
+ case NeverFallThroughOrReturn:
+ if (ReturnsVoid && !HasNoReturn && CD.diag_NeverFallThroughOrReturn) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ S.Diag(LBrace, CD.diag_NeverFallThroughOrReturn) << 0 << FD;
+ } else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ S.Diag(LBrace, CD.diag_NeverFallThroughOrReturn) << 1 << MD;
+ } else {
+ S.Diag(LBrace, CD.diag_NeverFallThroughOrReturn);
+ }
+ }
+ break;
+ case NeverFallThrough:
+ break;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// -Wuninitialized
+//===----------------------------------------------------------------------===//
+
+namespace {
+/// ContainsReference - A visitor class to search for references to
+/// a particular declaration (the needle) within any evaluated component of an
+/// expression (recursively).
+class ContainsReference : public ConstEvaluatedExprVisitor<ContainsReference> {
+ bool FoundReference;
+ const DeclRefExpr *Needle;
+
+public:
+ typedef ConstEvaluatedExprVisitor<ContainsReference> Inherited;
+
+ ContainsReference(ASTContext &Context, const DeclRefExpr *Needle)
+ : Inherited(Context), FoundReference(false), Needle(Needle) {}
+
+ void VisitExpr(const Expr *E) {
+ // Stop evaluating if we already have a reference.
+ if (FoundReference)
+ return;
+
+ Inherited::VisitExpr(E);
+ }
+
+ void VisitDeclRefExpr(const DeclRefExpr *E) {
+ if (E == Needle)
+ FoundReference = true;
+ else
+ Inherited::VisitDeclRefExpr(E);
+ }
+
+ bool doesContainReference() const { return FoundReference; }
+};
+} // anonymous namespace
+
+static bool SuggestInitializationFixit(Sema &S, const VarDecl *VD) {
+ QualType VariableTy = VD->getType().getCanonicalType();
+ if (VariableTy->isBlockPointerType() &&
+ !VD->hasAttr<BlocksAttr>()) {
+ S.Diag(VD->getLocation(), diag::note_block_var_fixit_add_initialization)
+ << VD->getDeclName()
+ << FixItHint::CreateInsertion(VD->getLocation(), "__block ");
+ return true;
+ }
+
+ // Don't issue a fixit if there is already an initializer.
+ if (VD->getInit())
+ return false;
+
+ // Don't suggest a fixit inside macros.
+ if (VD->getLocEnd().isMacroID())
+ return false;
+
+ SourceLocation Loc = S.getLocForEndOfToken(VD->getLocEnd());
+
+ // Suggest possible initialization (if any).
+ std::string Init = S.getFixItZeroInitializerForType(VariableTy, Loc);
+ if (Init.empty())
+ return false;
+
+ S.Diag(Loc, diag::note_var_fixit_add_initialization) << VD->getDeclName()
+ << FixItHint::CreateInsertion(Loc, Init);
+ return true;
+}
+
+/// Create a fixit to remove an if-like statement, on the assumption that its
+/// condition is CondVal.
+static void CreateIfFixit(Sema &S, const Stmt *If, const Stmt *Then,
+ const Stmt *Else, bool CondVal,
+ FixItHint &Fixit1, FixItHint &Fixit2) {
+ if (CondVal) {
+ // If condition is always true, remove all but the 'then'.
+ Fixit1 = FixItHint::CreateRemoval(
+ CharSourceRange::getCharRange(If->getLocStart(),
+ Then->getLocStart()));
+ if (Else) {
+ SourceLocation ElseKwLoc = S.getLocForEndOfToken(Then->getLocEnd());
+ Fixit2 = FixItHint::CreateRemoval(
+ SourceRange(ElseKwLoc, Else->getLocEnd()));
+ }
+ } else {
+ // If condition is always false, remove all but the 'else'.
+ if (Else)
+ Fixit1 = FixItHint::CreateRemoval(
+ CharSourceRange::getCharRange(If->getLocStart(),
+ Else->getLocStart()));
+ else
+ Fixit1 = FixItHint::CreateRemoval(If->getSourceRange());
+ }
+}
+
+/// DiagUninitUse -- Helper function to produce a diagnostic for an
+/// uninitialized use of a variable.
+static void DiagUninitUse(Sema &S, const VarDecl *VD, const UninitUse &Use,
+ bool IsCapturedByBlock) {
+ bool Diagnosed = false;
+
+ switch (Use.getKind()) {
+ case UninitUse::Always:
+ S.Diag(Use.getUser()->getLocStart(), diag::warn_uninit_var)
+ << VD->getDeclName() << IsCapturedByBlock
+ << Use.getUser()->getSourceRange();
+ return;
+
+ case UninitUse::AfterDecl:
+ case UninitUse::AfterCall:
+ S.Diag(VD->getLocation(), diag::warn_sometimes_uninit_var)
+ << VD->getDeclName() << IsCapturedByBlock
+ << (Use.getKind() == UninitUse::AfterDecl ? 4 : 5)
+ << const_cast<DeclContext*>(VD->getLexicalDeclContext())
+ << VD->getSourceRange();
+ S.Diag(Use.getUser()->getLocStart(), diag::note_uninit_var_use)
+ << IsCapturedByBlock << Use.getUser()->getSourceRange();
+ return;
+
+ case UninitUse::Maybe:
+ case UninitUse::Sometimes:
+ // Carry on to report sometimes-uninitialized branches, if possible,
+ // or a 'may be used uninitialized' diagnostic otherwise.
+ break;
+ }
+
+ // Diagnose each branch which leads to a sometimes-uninitialized use.
+ for (UninitUse::branch_iterator I = Use.branch_begin(), E = Use.branch_end();
+ I != E; ++I) {
+ assert(Use.getKind() == UninitUse::Sometimes);
+
+ const Expr *User = Use.getUser();
+ const Stmt *Term = I->Terminator;
+
+ // Information used when building the diagnostic.
+ unsigned DiagKind;
+ StringRef Str;
+ SourceRange Range;
+
+ // FixIts to suppress the diagnostic by removing the dead condition.
+ // For all binary terminators, branch 0 is taken if the condition is true,
+ // and branch 1 is taken if the condition is false.
+ int RemoveDiagKind = -1;
+ const char *FixitStr =
+ S.getLangOpts().CPlusPlus ? (I->Output ? "true" : "false")
+ : (I->Output ? "1" : "0");
+ FixItHint Fixit1, Fixit2;
+
+ switch (Term ? Term->getStmtClass() : Stmt::DeclStmtClass) {
+ default:
+ // Don't know how to report this. Just fall back to 'may be used
+ // uninitialized'. FIXME: Can this happen?
+ continue;
+
+ // "condition is true / condition is false".
+ case Stmt::IfStmtClass: {
+ const IfStmt *IS = cast<IfStmt>(Term);
+ DiagKind = 0;
+ Str = "if";
+ Range = IS->getCond()->getSourceRange();
+ RemoveDiagKind = 0;
+ CreateIfFixit(S, IS, IS->getThen(), IS->getElse(),
+ I->Output, Fixit1, Fixit2);
+ break;
+ }
+ case Stmt::ConditionalOperatorClass: {
+ const ConditionalOperator *CO = cast<ConditionalOperator>(Term);
+ DiagKind = 0;
+ Str = "?:";
+ Range = CO->getCond()->getSourceRange();
+ RemoveDiagKind = 0;
+ CreateIfFixit(S, CO, CO->getTrueExpr(), CO->getFalseExpr(),
+ I->Output, Fixit1, Fixit2);
+ break;
+ }
+ case Stmt::BinaryOperatorClass: {
+ const BinaryOperator *BO = cast<BinaryOperator>(Term);
+ if (!BO->isLogicalOp())
+ continue;
+ DiagKind = 0;
+ Str = BO->getOpcodeStr();
+ Range = BO->getLHS()->getSourceRange();
+ RemoveDiagKind = 0;
+ if ((BO->getOpcode() == BO_LAnd && I->Output) ||
+ (BO->getOpcode() == BO_LOr && !I->Output))
+ // true && y -> y, false || y -> y.
+ Fixit1 = FixItHint::CreateRemoval(SourceRange(BO->getLocStart(),
+ BO->getOperatorLoc()));
+ else
+ // false && y -> false, true || y -> true.
+ Fixit1 = FixItHint::CreateReplacement(BO->getSourceRange(), FixitStr);
+ break;
+ }
+
+ // "loop is entered / loop is exited".
+ case Stmt::WhileStmtClass:
+ DiagKind = 1;
+ Str = "while";
+ Range = cast<WhileStmt>(Term)->getCond()->getSourceRange();
+ RemoveDiagKind = 1;
+ Fixit1 = FixItHint::CreateReplacement(Range, FixitStr);
+ break;
+ case Stmt::ForStmtClass:
+ DiagKind = 1;
+ Str = "for";
+ Range = cast<ForStmt>(Term)->getCond()->getSourceRange();
+ RemoveDiagKind = 1;
+ if (I->Output)
+ Fixit1 = FixItHint::CreateRemoval(Range);
+ else
+ Fixit1 = FixItHint::CreateReplacement(Range, FixitStr);
+ break;
+ case Stmt::CXXForRangeStmtClass:
+ if (I->Output == 1) {
+ // The use occurs if a range-based for loop's body never executes.
+ // That may be impossible, and there's no syntactic fix for this,
+ // so treat it as a 'may be uninitialized' case.
+ continue;
+ }
+ DiagKind = 1;
+ Str = "for";
+ Range = cast<CXXForRangeStmt>(Term)->getRangeInit()->getSourceRange();
+ break;
+
+ // "condition is true / loop is exited".
+ case Stmt::DoStmtClass:
+ DiagKind = 2;
+ Str = "do";
+ Range = cast<DoStmt>(Term)->getCond()->getSourceRange();
+ RemoveDiagKind = 1;
+ Fixit1 = FixItHint::CreateReplacement(Range, FixitStr);
+ break;
+
+ // "switch case is taken".
+ case Stmt::CaseStmtClass:
+ DiagKind = 3;
+ Str = "case";
+ Range = cast<CaseStmt>(Term)->getLHS()->getSourceRange();
+ break;
+ case Stmt::DefaultStmtClass:
+ DiagKind = 3;
+ Str = "default";
+ Range = cast<DefaultStmt>(Term)->getDefaultLoc();
+ break;
+ }
+
+ S.Diag(Range.getBegin(), diag::warn_sometimes_uninit_var)
+ << VD->getDeclName() << IsCapturedByBlock << DiagKind
+ << Str << I->Output << Range;
+ S.Diag(User->getLocStart(), diag::note_uninit_var_use)
+ << IsCapturedByBlock << User->getSourceRange();
+ if (RemoveDiagKind != -1)
+ S.Diag(Fixit1.RemoveRange.getBegin(), diag::note_uninit_fixit_remove_cond)
+ << RemoveDiagKind << Str << I->Output << Fixit1 << Fixit2;
+
+ Diagnosed = true;
+ }
+
+ if (!Diagnosed)
+ S.Diag(Use.getUser()->getLocStart(), diag::warn_maybe_uninit_var)
+ << VD->getDeclName() << IsCapturedByBlock
+ << Use.getUser()->getSourceRange();
+}
+
+/// DiagnoseUninitializedUse -- Helper function for diagnosing uses of an
+/// uninitialized variable. This manages the different forms of diagnostic
+/// emitted for particular types of uses. Returns true if the use was diagnosed
+/// as a warning. If a particular use is one we omit warnings for, returns
+/// false.
+static bool DiagnoseUninitializedUse(Sema &S, const VarDecl *VD,
+ const UninitUse &Use,
+ bool alwaysReportSelfInit = false) {
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Use.getUser())) {
+ // Inspect the initializer of the variable declaration which is
+ // being referenced prior to its initialization. We emit
+ // specialized diagnostics for self-initialization, and we
+ // specifically avoid warning about self references which take the
+ // form of:
+ //
+ // int x = x;
+ //
+ // This is used to indicate to GCC that 'x' is intentionally left
+ // uninitialized. Proven code paths which access 'x' in
+ // an uninitialized state after this will still warn.
+ if (const Expr *Initializer = VD->getInit()) {
+ if (!alwaysReportSelfInit && DRE == Initializer->IgnoreParenImpCasts())
+ return false;
+
+ ContainsReference CR(S.Context, DRE);
+ CR.Visit(Initializer);
+ if (CR.doesContainReference()) {
+ S.Diag(DRE->getLocStart(),
+ diag::warn_uninit_self_reference_in_init)
+ << VD->getDeclName() << VD->getLocation() << DRE->getSourceRange();
+ return true;
+ }
+ }
+
+ DiagUninitUse(S, VD, Use, false);
+ } else {
+ const BlockExpr *BE = cast<BlockExpr>(Use.getUser());
+ if (VD->getType()->isBlockPointerType() && !VD->hasAttr<BlocksAttr>())
+ S.Diag(BE->getLocStart(),
+ diag::warn_uninit_byref_blockvar_captured_by_block)
+ << VD->getDeclName();
+ else
+ DiagUninitUse(S, VD, Use, true);
+ }
+
+ // Report where the variable was declared when the use wasn't within
+ // the initializer of that declaration & we didn't already suggest
+ // an initialization fixit.
+ if (!SuggestInitializationFixit(S, VD))
+ S.Diag(VD->getLocStart(), diag::note_uninit_var_def)
+ << VD->getDeclName();
+
+ return true;
+}
+
+namespace {
+ class FallthroughMapper : public RecursiveASTVisitor<FallthroughMapper> {
+ public:
+ FallthroughMapper(Sema &S)
+ : FoundSwitchStatements(false),
+ S(S) {
+ }
+
+ bool foundSwitchStatements() const { return FoundSwitchStatements; }
+
+ void markFallthroughVisited(const AttributedStmt *Stmt) {
+ bool Found = FallthroughStmts.erase(Stmt);
+ assert(Found);
+ (void)Found;
+ }
+
+ typedef llvm::SmallPtrSet<const AttributedStmt*, 8> AttrStmts;
+
+ const AttrStmts &getFallthroughStmts() const {
+ return FallthroughStmts;
+ }
+
+ void fillReachableBlocks(CFG *Cfg) {
+ assert(ReachableBlocks.empty() && "ReachableBlocks already filled");
+ std::deque<const CFGBlock *> BlockQueue;
+
+ ReachableBlocks.insert(&Cfg->getEntry());
+ BlockQueue.push_back(&Cfg->getEntry());
+ // Mark all case blocks reachable to avoid problems with switching on
+ // constants, covered enums, etc.
+ // These blocks can contain fall-through annotations, and we don't want to
+ // issue a warn_fallthrough_attr_unreachable for them.
+ for (const auto *B : *Cfg) {
+ const Stmt *L = B->getLabel();
+ if (L && isa<SwitchCase>(L) && ReachableBlocks.insert(B).second)
+ BlockQueue.push_back(B);
+ }
+
+ while (!BlockQueue.empty()) {
+ const CFGBlock *P = BlockQueue.front();
+ BlockQueue.pop_front();
+ for (CFGBlock::const_succ_iterator I = P->succ_begin(),
+ E = P->succ_end();
+ I != E; ++I) {
+ if (*I && ReachableBlocks.insert(*I).second)
+ BlockQueue.push_back(*I);
+ }
+ }
+ }
+
+ bool checkFallThroughIntoBlock(const CFGBlock &B, int &AnnotatedCnt) {
+ assert(!ReachableBlocks.empty() && "ReachableBlocks empty");
+
+ int UnannotatedCnt = 0;
+ AnnotatedCnt = 0;
+
+ std::deque<const CFGBlock*> BlockQueue(B.pred_begin(), B.pred_end());
+ while (!BlockQueue.empty()) {
+ const CFGBlock *P = BlockQueue.front();
+ BlockQueue.pop_front();
+ if (!P) continue;
+
+ const Stmt *Term = P->getTerminator();
+ if (Term && isa<SwitchStmt>(Term))
+ continue; // Switch statement, good.
+
+ const SwitchCase *SW = dyn_cast_or_null<SwitchCase>(P->getLabel());
+ if (SW && SW->getSubStmt() == B.getLabel() && P->begin() == P->end())
+ continue; // Previous case label has no statements, good.
+
+ const LabelStmt *L = dyn_cast_or_null<LabelStmt>(P->getLabel());
+ if (L && L->getSubStmt() == B.getLabel() && P->begin() == P->end())
+ continue; // Case label is preceded with a normal label, good.
+
+ if (!ReachableBlocks.count(P)) {
+ for (CFGBlock::const_reverse_iterator ElemIt = P->rbegin(),
+ ElemEnd = P->rend();
+ ElemIt != ElemEnd; ++ElemIt) {
+ if (Optional<CFGStmt> CS = ElemIt->getAs<CFGStmt>()) {
+ if (const AttributedStmt *AS = asFallThroughAttr(CS->getStmt())) {
+ S.Diag(AS->getLocStart(),
+ diag::warn_fallthrough_attr_unreachable);
+ markFallthroughVisited(AS);
+ ++AnnotatedCnt;
+ break;
+ }
+ // Don't care about other unreachable statements.
+ }
+ }
+ // If there are no unreachable statements, this may be a special
+ // case in CFG:
+ // case X: {
+ // A a; // A has a destructor.
+ // break;
+ // }
+ // // <<<< This place is represented by a 'hanging' CFG block.
+ // case Y:
+ continue;
+ }
+
+ const Stmt *LastStmt = getLastStmt(*P);
+ if (const AttributedStmt *AS = asFallThroughAttr(LastStmt)) {
+ markFallthroughVisited(AS);
+ ++AnnotatedCnt;
+ continue; // Fallthrough annotation, good.
+ }
+
+ if (!LastStmt) { // This block contains no executable statements.
+ // Traverse its predecessors.
+ std::copy(P->pred_begin(), P->pred_end(),
+ std::back_inserter(BlockQueue));
+ continue;
+ }
+
+ ++UnannotatedCnt;
+ }
+ return !!UnannotatedCnt;
+ }
+
+ // RecursiveASTVisitor setup.
+ bool shouldWalkTypesOfTypeLocs() const { return false; }
+
+ bool VisitAttributedStmt(AttributedStmt *S) {
+ if (asFallThroughAttr(S))
+ FallthroughStmts.insert(S);
+ return true;
+ }
+
+ bool VisitSwitchStmt(SwitchStmt *S) {
+ FoundSwitchStatements = true;
+ return true;
+ }
+
+ // We don't want to traverse local type declarations. We analyze their
+ // methods separately.
+ bool TraverseDecl(Decl *D) { return true; }
+
+ // We analyze lambda bodies separately. Skip them here.
+ bool TraverseLambdaBody(LambdaExpr *LE) { return true; }
+
+ private:
+
+ static const AttributedStmt *asFallThroughAttr(const Stmt *S) {
+ if (const AttributedStmt *AS = dyn_cast_or_null<AttributedStmt>(S)) {
+ if (hasSpecificAttr<FallThroughAttr>(AS->getAttrs()))
+ return AS;
+ }
+ return nullptr;
+ }
+
+ static const Stmt *getLastStmt(const CFGBlock &B) {
+ if (const Stmt *Term = B.getTerminator())
+ return Term;
+ for (CFGBlock::const_reverse_iterator ElemIt = B.rbegin(),
+ ElemEnd = B.rend();
+ ElemIt != ElemEnd; ++ElemIt) {
+ if (Optional<CFGStmt> CS = ElemIt->getAs<CFGStmt>())
+ return CS->getStmt();
+ }
+ // Workaround to detect a statement thrown out by CFGBuilder:
+ // case X: {} case Y:
+ // case X: ; case Y:
+ if (const SwitchCase *SW = dyn_cast_or_null<SwitchCase>(B.getLabel()))
+ if (!isa<SwitchCase>(SW->getSubStmt()))
+ return SW->getSubStmt();
+
+ return nullptr;
+ }
+
+ bool FoundSwitchStatements;
+ AttrStmts FallthroughStmts;
+ Sema &S;
+ llvm::SmallPtrSet<const CFGBlock *, 16> ReachableBlocks;
+ };
+} // anonymous namespace
+
+static void DiagnoseSwitchLabelsFallthrough(Sema &S, AnalysisDeclContext &AC,
+ bool PerFunction) {
+ // Only perform this analysis when using C++11. There is no good workflow
+ // for this warning when not using C++11. There is no good way to silence
+ // the warning (no attribute is available) unless we are using C++11's support
+ // for generalized attributes. Once could use pragmas to silence the warning,
+ // but as a general solution that is gross and not in the spirit of this
+ // warning.
+ //
+ // NOTE: This an intermediate solution. There are on-going discussions on
+ // how to properly support this warning outside of C++11 with an annotation.
+ if (!AC.getASTContext().getLangOpts().CPlusPlus11)
+ return;
+
+ FallthroughMapper FM(S);
+ FM.TraverseStmt(AC.getBody());
+
+ if (!FM.foundSwitchStatements())
+ return;
+
+ if (PerFunction && FM.getFallthroughStmts().empty())
+ return;
+
+ CFG *Cfg = AC.getCFG();
+
+ if (!Cfg)
+ return;
+
+ FM.fillReachableBlocks(Cfg);
+
+ for (const CFGBlock *B : llvm::reverse(*Cfg)) {
+ const Stmt *Label = B->getLabel();
+
+ if (!Label || !isa<SwitchCase>(Label))
+ continue;
+
+ int AnnotatedCnt;
+
+ if (!FM.checkFallThroughIntoBlock(*B, AnnotatedCnt))
+ continue;
+
+ S.Diag(Label->getLocStart(),
+ PerFunction ? diag::warn_unannotated_fallthrough_per_function
+ : diag::warn_unannotated_fallthrough);
+
+ if (!AnnotatedCnt) {
+ SourceLocation L = Label->getLocStart();
+ if (L.isMacroID())
+ continue;
+ if (S.getLangOpts().CPlusPlus11) {
+ const Stmt *Term = B->getTerminator();
+ // Skip empty cases.
+ while (B->empty() && !Term && B->succ_size() == 1) {
+ B = *B->succ_begin();
+ Term = B->getTerminator();
+ }
+ if (!(B->empty() && Term && isa<BreakStmt>(Term))) {
+ Preprocessor &PP = S.getPreprocessor();
+ TokenValue Tokens[] = {
+ tok::l_square, tok::l_square, PP.getIdentifierInfo("clang"),
+ tok::coloncolon, PP.getIdentifierInfo("fallthrough"),
+ tok::r_square, tok::r_square
+ };
+ StringRef AnnotationSpelling = "[[clang::fallthrough]]";
+ StringRef MacroName = PP.getLastMacroWithSpelling(L, Tokens);
+ if (!MacroName.empty())
+ AnnotationSpelling = MacroName;
+ SmallString<64> TextToInsert(AnnotationSpelling);
+ TextToInsert += "; ";
+ S.Diag(L, diag::note_insert_fallthrough_fixit) <<
+ AnnotationSpelling <<
+ FixItHint::CreateInsertion(L, TextToInsert);
+ }
+ }
+ S.Diag(L, diag::note_insert_break_fixit) <<
+ FixItHint::CreateInsertion(L, "break; ");
+ }
+ }
+
+ for (const auto *F : FM.getFallthroughStmts())
+ S.Diag(F->getLocStart(), diag::warn_fallthrough_attr_invalid_placement);
+}
+
+static bool isInLoop(const ASTContext &Ctx, const ParentMap &PM,
+ const Stmt *S) {
+ assert(S);
+
+ do {
+ switch (S->getStmtClass()) {
+ case Stmt::ForStmtClass:
+ case Stmt::WhileStmtClass:
+ case Stmt::CXXForRangeStmtClass:
+ case Stmt::ObjCForCollectionStmtClass:
+ return true;
+ case Stmt::DoStmtClass: {
+ const Expr *Cond = cast<DoStmt>(S)->getCond();
+ llvm::APSInt Val;
+ if (!Cond->EvaluateAsInt(Val, Ctx))
+ return true;
+ return Val.getBoolValue();
+ }
+ default:
+ break;
+ }
+ } while ((S = PM.getParent(S)));
+
+ return false;
+}
+
+static void diagnoseRepeatedUseOfWeak(Sema &S,
+ const sema::FunctionScopeInfo *CurFn,
+ const Decl *D,
+ const ParentMap &PM) {
+ typedef sema::FunctionScopeInfo::WeakObjectProfileTy WeakObjectProfileTy;
+ typedef sema::FunctionScopeInfo::WeakObjectUseMap WeakObjectUseMap;
+ typedef sema::FunctionScopeInfo::WeakUseVector WeakUseVector;
+ typedef std::pair<const Stmt *, WeakObjectUseMap::const_iterator>
+ StmtUsesPair;
+
+ ASTContext &Ctx = S.getASTContext();
+
+ const WeakObjectUseMap &WeakMap = CurFn->getWeakObjectUses();
+
+ // Extract all weak objects that are referenced more than once.
+ SmallVector<StmtUsesPair, 8> UsesByStmt;
+ for (WeakObjectUseMap::const_iterator I = WeakMap.begin(), E = WeakMap.end();
+ I != E; ++I) {
+ const WeakUseVector &Uses = I->second;
+
+ // Find the first read of the weak object.
+ WeakUseVector::const_iterator UI = Uses.begin(), UE = Uses.end();
+ for ( ; UI != UE; ++UI) {
+ if (UI->isUnsafe())
+ break;
+ }
+
+ // If there were only writes to this object, don't warn.
+ if (UI == UE)
+ continue;
+
+ // If there was only one read, followed by any number of writes, and the
+ // read is not within a loop, don't warn. Additionally, don't warn in a
+ // loop if the base object is a local variable -- local variables are often
+ // changed in loops.
+ if (UI == Uses.begin()) {
+ WeakUseVector::const_iterator UI2 = UI;
+ for (++UI2; UI2 != UE; ++UI2)
+ if (UI2->isUnsafe())
+ break;
+
+ if (UI2 == UE) {
+ if (!isInLoop(Ctx, PM, UI->getUseExpr()))
+ continue;
+
+ const WeakObjectProfileTy &Profile = I->first;
+ if (!Profile.isExactProfile())
+ continue;
+
+ const NamedDecl *Base = Profile.getBase();
+ if (!Base)
+ Base = Profile.getProperty();
+ assert(Base && "A profile always has a base or property.");
+
+ if (const VarDecl *BaseVar = dyn_cast<VarDecl>(Base))
+ if (BaseVar->hasLocalStorage() && !isa<ParmVarDecl>(Base))
+ continue;
+ }
+ }
+
+ UsesByStmt.push_back(StmtUsesPair(UI->getUseExpr(), I));
+ }
+
+ if (UsesByStmt.empty())
+ return;
+
+ // Sort by first use so that we emit the warnings in a deterministic order.
+ SourceManager &SM = S.getSourceManager();
+ std::sort(UsesByStmt.begin(), UsesByStmt.end(),
+ [&SM](const StmtUsesPair &LHS, const StmtUsesPair &RHS) {
+ return SM.isBeforeInTranslationUnit(LHS.first->getLocStart(),
+ RHS.first->getLocStart());
+ });
+
+ // Classify the current code body for better warning text.
+ // This enum should stay in sync with the cases in
+ // warn_arc_repeated_use_of_weak and warn_arc_possible_repeated_use_of_weak.
+ // FIXME: Should we use a common classification enum and the same set of
+ // possibilities all throughout Sema?
+ enum {
+ Function,
+ Method,
+ Block,
+ Lambda
+ } FunctionKind;
+
+ if (isa<sema::BlockScopeInfo>(CurFn))
+ FunctionKind = Block;
+ else if (isa<sema::LambdaScopeInfo>(CurFn))
+ FunctionKind = Lambda;
+ else if (isa<ObjCMethodDecl>(D))
+ FunctionKind = Method;
+ else
+ FunctionKind = Function;
+
+ // Iterate through the sorted problems and emit warnings for each.
+ for (const auto &P : UsesByStmt) {
+ const Stmt *FirstRead = P.first;
+ const WeakObjectProfileTy &Key = P.second->first;
+ const WeakUseVector &Uses = P.second->second;
+
+ // For complicated expressions like 'a.b.c' and 'x.b.c', WeakObjectProfileTy
+ // may not contain enough information to determine that these are different
+ // properties. We can only be 100% sure of a repeated use in certain cases,
+ // and we adjust the diagnostic kind accordingly so that the less certain
+ // case can be turned off if it is too noisy.
+ unsigned DiagKind;
+ if (Key.isExactProfile())
+ DiagKind = diag::warn_arc_repeated_use_of_weak;
+ else
+ DiagKind = diag::warn_arc_possible_repeated_use_of_weak;
+
+ // Classify the weak object being accessed for better warning text.
+ // This enum should stay in sync with the cases in
+ // warn_arc_repeated_use_of_weak and warn_arc_possible_repeated_use_of_weak.
+ enum {
+ Variable,
+ Property,
+ ImplicitProperty,
+ Ivar
+ } ObjectKind;
+
+ const NamedDecl *D = Key.getProperty();
+ if (isa<VarDecl>(D))
+ ObjectKind = Variable;
+ else if (isa<ObjCPropertyDecl>(D))
+ ObjectKind = Property;
+ else if (isa<ObjCMethodDecl>(D))
+ ObjectKind = ImplicitProperty;
+ else if (isa<ObjCIvarDecl>(D))
+ ObjectKind = Ivar;
+ else
+ llvm_unreachable("Unexpected weak object kind!");
+
+ // Show the first time the object was read.
+ S.Diag(FirstRead->getLocStart(), DiagKind)
+ << int(ObjectKind) << D << int(FunctionKind)
+ << FirstRead->getSourceRange();
+
+ // Print all the other accesses as notes.
+ for (const auto &Use : Uses) {
+ if (Use.getUseExpr() == FirstRead)
+ continue;
+ S.Diag(Use.getUseExpr()->getLocStart(),
+ diag::note_arc_weak_also_accessed_here)
+ << Use.getUseExpr()->getSourceRange();
+ }
+ }
+}
+
+namespace {
+class UninitValsDiagReporter : public UninitVariablesHandler {
+ Sema &S;
+ typedef SmallVector<UninitUse, 2> UsesVec;
+ typedef llvm::PointerIntPair<UsesVec *, 1, bool> MappedType;
+ // Prefer using MapVector to DenseMap, so that iteration order will be
+ // the same as insertion order. This is needed to obtain a deterministic
+ // order of diagnostics when calling flushDiagnostics().
+ typedef llvm::MapVector<const VarDecl *, MappedType> UsesMap;
+ UsesMap uses;
+
+public:
+ UninitValsDiagReporter(Sema &S) : S(S) {}
+ ~UninitValsDiagReporter() override { flushDiagnostics(); }
+
+ MappedType &getUses(const VarDecl *vd) {
+ MappedType &V = uses[vd];
+ if (!V.getPointer())
+ V.setPointer(new UsesVec());
+ return V;
+ }
+
+ void handleUseOfUninitVariable(const VarDecl *vd,
+ const UninitUse &use) override {
+ getUses(vd).getPointer()->push_back(use);
+ }
+
+ void handleSelfInit(const VarDecl *vd) override {
+ getUses(vd).setInt(true);
+ }
+
+ void flushDiagnostics() {
+ for (const auto &P : uses) {
+ const VarDecl *vd = P.first;
+ const MappedType &V = P.second;
+
+ UsesVec *vec = V.getPointer();
+ bool hasSelfInit = V.getInt();
+
+ // Specially handle the case where we have uses of an uninitialized
+ // variable, but the root cause is an idiomatic self-init. We want
+ // to report the diagnostic at the self-init since that is the root cause.
+ if (!vec->empty() && hasSelfInit && hasAlwaysUninitializedUse(vec))
+ DiagnoseUninitializedUse(S, vd,
+ UninitUse(vd->getInit()->IgnoreParenCasts(),
+ /* isAlwaysUninit */ true),
+ /* alwaysReportSelfInit */ true);
+ else {
+ // Sort the uses by their SourceLocations. While not strictly
+ // guaranteed to produce them in line/column order, this will provide
+ // a stable ordering.
+ std::sort(vec->begin(), vec->end(),
+ [](const UninitUse &a, const UninitUse &b) {
+ // Prefer a more confident report over a less confident one.
+ if (a.getKind() != b.getKind())
+ return a.getKind() > b.getKind();
+ return a.getUser()->getLocStart() < b.getUser()->getLocStart();
+ });
+
+ for (const auto &U : *vec) {
+ // If we have self-init, downgrade all uses to 'may be uninitialized'.
+ UninitUse Use = hasSelfInit ? UninitUse(U.getUser(), false) : U;
+
+ if (DiagnoseUninitializedUse(S, vd, Use))
+ // Skip further diagnostics for this variable. We try to warn only
+ // on the first point at which a variable is used uninitialized.
+ break;
+ }
+ }
+
+ // Release the uses vector.
+ delete vec;
+ }
+
+ uses.clear();
+ }
+
+private:
+ static bool hasAlwaysUninitializedUse(const UsesVec* vec) {
+ return std::any_of(vec->begin(), vec->end(), [](const UninitUse &U) {
+ return U.getKind() == UninitUse::Always ||
+ U.getKind() == UninitUse::AfterCall ||
+ U.getKind() == UninitUse::AfterDecl;
+ });
+ }
+};
+} // anonymous namespace
+
+namespace clang {
+namespace {
+typedef SmallVector<PartialDiagnosticAt, 1> OptionalNotes;
+typedef std::pair<PartialDiagnosticAt, OptionalNotes> DelayedDiag;
+typedef std::list<DelayedDiag> DiagList;
+
+struct SortDiagBySourceLocation {
+ SourceManager &SM;
+ SortDiagBySourceLocation(SourceManager &SM) : SM(SM) {}
+
+ bool operator()(const DelayedDiag &left, const DelayedDiag &right) {
+ // Although this call will be slow, this is only called when outputting
+ // multiple warnings.
+ return SM.isBeforeInTranslationUnit(left.first.first, right.first.first);
+ }
+};
+} // anonymous namespace
+} // namespace clang
+
+//===----------------------------------------------------------------------===//
+// -Wthread-safety
+//===----------------------------------------------------------------------===//
+namespace clang {
+namespace threadSafety {
+namespace {
+class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
+ Sema &S;
+ DiagList Warnings;
+ SourceLocation FunLocation, FunEndLocation;
+
+ const FunctionDecl *CurrentFunction;
+ bool Verbose;
+
+ OptionalNotes getNotes() const {
+ if (Verbose && CurrentFunction) {
+ PartialDiagnosticAt FNote(CurrentFunction->getBody()->getLocStart(),
+ S.PDiag(diag::note_thread_warning_in_fun)
+ << CurrentFunction->getNameAsString());
+ return OptionalNotes(1, FNote);
+ }
+ return OptionalNotes();
+ }
+
+ OptionalNotes getNotes(const PartialDiagnosticAt &Note) const {
+ OptionalNotes ONS(1, Note);
+ if (Verbose && CurrentFunction) {
+ PartialDiagnosticAt FNote(CurrentFunction->getBody()->getLocStart(),
+ S.PDiag(diag::note_thread_warning_in_fun)
+ << CurrentFunction->getNameAsString());
+ ONS.push_back(std::move(FNote));
+ }
+ return ONS;
+ }
+
+ OptionalNotes getNotes(const PartialDiagnosticAt &Note1,
+ const PartialDiagnosticAt &Note2) const {
+ OptionalNotes ONS;
+ ONS.push_back(Note1);
+ ONS.push_back(Note2);
+ if (Verbose && CurrentFunction) {
+ PartialDiagnosticAt FNote(CurrentFunction->getBody()->getLocStart(),
+ S.PDiag(diag::note_thread_warning_in_fun)
+ << CurrentFunction->getNameAsString());
+ ONS.push_back(std::move(FNote));
+ }
+ return ONS;
+ }
+
+ // Helper functions
+ void warnLockMismatch(unsigned DiagID, StringRef Kind, Name LockName,
+ SourceLocation Loc) {
+ // Gracefully handle rare cases when the analysis can't get a more
+ // precise source location.
+ if (!Loc.isValid())
+ Loc = FunLocation;
+ PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << Kind << LockName);
+ Warnings.emplace_back(std::move(Warning), getNotes());
+ }
+
+ public:
+ ThreadSafetyReporter(Sema &S, SourceLocation FL, SourceLocation FEL)
+ : S(S), FunLocation(FL), FunEndLocation(FEL),
+ CurrentFunction(nullptr), Verbose(false) {}
+
+ void setVerbose(bool b) { Verbose = b; }
+
+ /// \brief Emit all buffered diagnostics in order of sourcelocation.
+ /// We need to output diagnostics produced while iterating through
+ /// the lockset in deterministic order, so this function orders diagnostics
+ /// and outputs them.
+ void emitDiagnostics() {
+ Warnings.sort(SortDiagBySourceLocation(S.getSourceManager()));
+ for (const auto &Diag : Warnings) {
+ S.Diag(Diag.first.first, Diag.first.second);
+ for (const auto &Note : Diag.second)
+ S.Diag(Note.first, Note.second);
+ }
+ }
+
+ void handleInvalidLockExp(StringRef Kind, SourceLocation Loc) override {
+ PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_cannot_resolve_lock)
+ << Loc);
+ Warnings.emplace_back(std::move(Warning), getNotes());
+ }
+
+ void handleUnmatchedUnlock(StringRef Kind, Name LockName,
+ SourceLocation Loc) override {
+ warnLockMismatch(diag::warn_unlock_but_no_lock, Kind, LockName, Loc);
+ }
+
+ void handleIncorrectUnlockKind(StringRef Kind, Name LockName,
+ LockKind Expected, LockKind Received,
+ SourceLocation Loc) override {
+ if (Loc.isInvalid())
+ Loc = FunLocation;
+ PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_unlock_kind_mismatch)
+ << Kind << LockName << Received
+ << Expected);
+ Warnings.emplace_back(std::move(Warning), getNotes());
+ }
+
+ void handleDoubleLock(StringRef Kind, Name LockName, SourceLocation Loc) override {
+ warnLockMismatch(diag::warn_double_lock, Kind, LockName, Loc);
+ }
+
+ void handleMutexHeldEndOfScope(StringRef Kind, Name LockName,
+ SourceLocation LocLocked,
+ SourceLocation LocEndOfScope,
+ LockErrorKind LEK) override {
+ unsigned DiagID = 0;
+ switch (LEK) {
+ case LEK_LockedSomePredecessors:
+ DiagID = diag::warn_lock_some_predecessors;
+ break;
+ case LEK_LockedSomeLoopIterations:
+ DiagID = diag::warn_expecting_lock_held_on_loop;
+ break;
+ case LEK_LockedAtEndOfFunction:
+ DiagID = diag::warn_no_unlock;
+ break;
+ case LEK_NotLockedAtEndOfFunction:
+ DiagID = diag::warn_expecting_locked;
+ break;
+ }
+ if (LocEndOfScope.isInvalid())
+ LocEndOfScope = FunEndLocation;
+
+ PartialDiagnosticAt Warning(LocEndOfScope, S.PDiag(DiagID) << Kind
+ << LockName);
+ if (LocLocked.isValid()) {
+ PartialDiagnosticAt Note(LocLocked, S.PDiag(diag::note_locked_here)
+ << Kind);
+ Warnings.emplace_back(std::move(Warning), getNotes(Note));
+ return;
+ }
+ Warnings.emplace_back(std::move(Warning), getNotes());
+ }
+
+ void handleExclusiveAndShared(StringRef Kind, Name LockName,
+ SourceLocation Loc1,
+ SourceLocation Loc2) override {
+ PartialDiagnosticAt Warning(Loc1,
+ S.PDiag(diag::warn_lock_exclusive_and_shared)
+ << Kind << LockName);
+ PartialDiagnosticAt Note(Loc2, S.PDiag(diag::note_lock_exclusive_and_shared)
+ << Kind << LockName);
+ Warnings.emplace_back(std::move(Warning), getNotes(Note));
+ }
+
+ void handleNoMutexHeld(StringRef Kind, const NamedDecl *D,
+ ProtectedOperationKind POK, AccessKind AK,
+ SourceLocation Loc) override {
+ assert((POK == POK_VarAccess || POK == POK_VarDereference) &&
+ "Only works for variables");
+ unsigned DiagID = POK == POK_VarAccess?
+ diag::warn_variable_requires_any_lock:
+ diag::warn_var_deref_requires_any_lock;
+ PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID)
+ << D->getNameAsString() << getLockKindFromAccessKind(AK));
+ Warnings.emplace_back(std::move(Warning), getNotes());
+ }
+
+ void handleMutexNotHeld(StringRef Kind, const NamedDecl *D,
+ ProtectedOperationKind POK, Name LockName,
+ LockKind LK, SourceLocation Loc,
+ Name *PossibleMatch) override {
+ unsigned DiagID = 0;
+ if (PossibleMatch) {
+ switch (POK) {
+ case POK_VarAccess:
+ DiagID = diag::warn_variable_requires_lock_precise;
+ break;
+ case POK_VarDereference:
+ DiagID = diag::warn_var_deref_requires_lock_precise;
+ break;
+ case POK_FunctionCall:
+ DiagID = diag::warn_fun_requires_lock_precise;
+ break;
+ case POK_PassByRef:
+ DiagID = diag::warn_guarded_pass_by_reference;
+ break;
+ case POK_PtPassByRef:
+ DiagID = diag::warn_pt_guarded_pass_by_reference;
+ break;
+ }
+ PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << Kind
+ << D->getNameAsString()
+ << LockName << LK);
+ PartialDiagnosticAt Note(Loc, S.PDiag(diag::note_found_mutex_near_match)
+ << *PossibleMatch);
+ if (Verbose && POK == POK_VarAccess) {
+ PartialDiagnosticAt VNote(D->getLocation(),
+ S.PDiag(diag::note_guarded_by_declared_here)
+ << D->getNameAsString());
+ Warnings.emplace_back(std::move(Warning), getNotes(Note, VNote));
+ } else
+ Warnings.emplace_back(std::move(Warning), getNotes(Note));
+ } else {
+ switch (POK) {
+ case POK_VarAccess:
+ DiagID = diag::warn_variable_requires_lock;
+ break;
+ case POK_VarDereference:
+ DiagID = diag::warn_var_deref_requires_lock;
+ break;
+ case POK_FunctionCall:
+ DiagID = diag::warn_fun_requires_lock;
+ break;
+ case POK_PassByRef:
+ DiagID = diag::warn_guarded_pass_by_reference;
+ break;
+ case POK_PtPassByRef:
+ DiagID = diag::warn_pt_guarded_pass_by_reference;
+ break;
+ }
+ PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << Kind
+ << D->getNameAsString()
+ << LockName << LK);
+ if (Verbose && POK == POK_VarAccess) {
+ PartialDiagnosticAt Note(D->getLocation(),
+ S.PDiag(diag::note_guarded_by_declared_here)
+ << D->getNameAsString());
+ Warnings.emplace_back(std::move(Warning), getNotes(Note));
+ } else
+ Warnings.emplace_back(std::move(Warning), getNotes());
+ }
+ }
+
+ void handleNegativeNotHeld(StringRef Kind, Name LockName, Name Neg,
+ SourceLocation Loc) override {
+ PartialDiagnosticAt Warning(Loc,
+ S.PDiag(diag::warn_acquire_requires_negative_cap)
+ << Kind << LockName << Neg);
+ Warnings.emplace_back(std::move(Warning), getNotes());
+ }
+
+ void handleFunExcludesLock(StringRef Kind, Name FunName, Name LockName,
+ SourceLocation Loc) override {
+ PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_fun_excludes_mutex)
+ << Kind << FunName << LockName);
+ Warnings.emplace_back(std::move(Warning), getNotes());
+ }
+
+ void handleLockAcquiredBefore(StringRef Kind, Name L1Name, Name L2Name,
+ SourceLocation Loc) override {
+ PartialDiagnosticAt Warning(Loc,
+ S.PDiag(diag::warn_acquired_before) << Kind << L1Name << L2Name);
+ Warnings.emplace_back(std::move(Warning), getNotes());
+ }
+
+ void handleBeforeAfterCycle(Name L1Name, SourceLocation Loc) override {
+ PartialDiagnosticAt Warning(Loc,
+ S.PDiag(diag::warn_acquired_before_after_cycle) << L1Name);
+ Warnings.emplace_back(std::move(Warning), getNotes());
+ }
+
+ void enterFunction(const FunctionDecl* FD) override {
+ CurrentFunction = FD;
+ }
+
+ void leaveFunction(const FunctionDecl* FD) override {
+ CurrentFunction = nullptr;
+ }
+};
+} // anonymous namespace
+} // namespace threadSafety
+} // namespace clang
+
+//===----------------------------------------------------------------------===//
+// -Wconsumed
+//===----------------------------------------------------------------------===//
+
+namespace clang {
+namespace consumed {
+namespace {
+class ConsumedWarningsHandler : public ConsumedWarningsHandlerBase {
+
+ Sema &S;
+ DiagList Warnings;
+
+public:
+
+ ConsumedWarningsHandler(Sema &S) : S(S) {}
+
+ void emitDiagnostics() override {
+ Warnings.sort(SortDiagBySourceLocation(S.getSourceManager()));
+ for (const auto &Diag : Warnings) {
+ S.Diag(Diag.first.first, Diag.first.second);
+ for (const auto &Note : Diag.second)
+ S.Diag(Note.first, Note.second);
+ }
+ }
+
+ void warnLoopStateMismatch(SourceLocation Loc,
+ StringRef VariableName) override {
+ PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_loop_state_mismatch) <<
+ VariableName);
+
+ Warnings.emplace_back(std::move(Warning), OptionalNotes());
+ }
+
+ void warnParamReturnTypestateMismatch(SourceLocation Loc,
+ StringRef VariableName,
+ StringRef ExpectedState,
+ StringRef ObservedState) override {
+
+ PartialDiagnosticAt Warning(Loc, S.PDiag(
+ diag::warn_param_return_typestate_mismatch) << VariableName <<
+ ExpectedState << ObservedState);
+
+ Warnings.emplace_back(std::move(Warning), OptionalNotes());
+ }
+
+ void warnParamTypestateMismatch(SourceLocation Loc, StringRef ExpectedState,
+ StringRef ObservedState) override {
+
+ PartialDiagnosticAt Warning(Loc, S.PDiag(
+ diag::warn_param_typestate_mismatch) << ExpectedState << ObservedState);
+
+ Warnings.emplace_back(std::move(Warning), OptionalNotes());
+ }
+
+ void warnReturnTypestateForUnconsumableType(SourceLocation Loc,
+ StringRef TypeName) override {
+ PartialDiagnosticAt Warning(Loc, S.PDiag(
+ diag::warn_return_typestate_for_unconsumable_type) << TypeName);
+
+ Warnings.emplace_back(std::move(Warning), OptionalNotes());
+ }
+
+ void warnReturnTypestateMismatch(SourceLocation Loc, StringRef ExpectedState,
+ StringRef ObservedState) override {
+
+ PartialDiagnosticAt Warning(Loc, S.PDiag(
+ diag::warn_return_typestate_mismatch) << ExpectedState << ObservedState);
+
+ Warnings.emplace_back(std::move(Warning), OptionalNotes());
+ }
+
+ void warnUseOfTempInInvalidState(StringRef MethodName, StringRef State,
+ SourceLocation Loc) override {
+
+ PartialDiagnosticAt Warning(Loc, S.PDiag(
+ diag::warn_use_of_temp_in_invalid_state) << MethodName << State);
+
+ Warnings.emplace_back(std::move(Warning), OptionalNotes());
+ }
+
+ void warnUseInInvalidState(StringRef MethodName, StringRef VariableName,
+ StringRef State, SourceLocation Loc) override {
+
+ PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_use_in_invalid_state) <<
+ MethodName << VariableName << State);
+
+ Warnings.emplace_back(std::move(Warning), OptionalNotes());
+ }
+};
+} // anonymous namespace
+} // namespace consumed
+} // namespace clang
+
+//===----------------------------------------------------------------------===//
+// AnalysisBasedWarnings - Worker object used by Sema to execute analysis-based
+// warnings on a function, method, or block.
+//===----------------------------------------------------------------------===//
+
+clang::sema::AnalysisBasedWarnings::Policy::Policy() {
+ enableCheckFallThrough = 1;
+ enableCheckUnreachable = 0;
+ enableThreadSafetyAnalysis = 0;
+ enableConsumedAnalysis = 0;
+}
+
+static unsigned isEnabled(DiagnosticsEngine &D, unsigned diag) {
+ return (unsigned)!D.isIgnored(diag, SourceLocation());
+}
+
+clang::sema::AnalysisBasedWarnings::AnalysisBasedWarnings(Sema &s)
+ : S(s),
+ NumFunctionsAnalyzed(0),
+ NumFunctionsWithBadCFGs(0),
+ NumCFGBlocks(0),
+ MaxCFGBlocksPerFunction(0),
+ NumUninitAnalysisFunctions(0),
+ NumUninitAnalysisVariables(0),
+ MaxUninitAnalysisVariablesPerFunction(0),
+ NumUninitAnalysisBlockVisits(0),
+ MaxUninitAnalysisBlockVisitsPerFunction(0) {
+
+ using namespace diag;
+ DiagnosticsEngine &D = S.getDiagnostics();
+
+ DefaultPolicy.enableCheckUnreachable =
+ isEnabled(D, warn_unreachable) ||
+ isEnabled(D, warn_unreachable_break) ||
+ isEnabled(D, warn_unreachable_return) ||
+ isEnabled(D, warn_unreachable_loop_increment);
+
+ DefaultPolicy.enableThreadSafetyAnalysis =
+ isEnabled(D, warn_double_lock);
+
+ DefaultPolicy.enableConsumedAnalysis =
+ isEnabled(D, warn_use_in_invalid_state);
+}
+
+static void flushDiagnostics(Sema &S, const sema::FunctionScopeInfo *fscope) {
+ for (const auto &D : fscope->PossiblyUnreachableDiags)
+ S.Diag(D.Loc, D.PD);
+}
+
+void clang::sema::
+AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
+ sema::FunctionScopeInfo *fscope,
+ const Decl *D, const BlockExpr *blkExpr) {
+
+ // We avoid doing analysis-based warnings when there are errors for
+ // two reasons:
+ // (1) The CFGs often can't be constructed (if the body is invalid), so
+ // don't bother trying.
+ // (2) The code already has problems; running the analysis just takes more
+ // time.
+ DiagnosticsEngine &Diags = S.getDiagnostics();
+
+ // Do not do any analysis for declarations in system headers if we are
+ // going to just ignore them.
+ if (Diags.getSuppressSystemWarnings() &&
+ S.SourceMgr.isInSystemHeader(D->getLocation()))
+ return;
+
+ // For code in dependent contexts, we'll do this at instantiation time.
+ if (cast<DeclContext>(D)->isDependentContext())
+ return;
+
+ if (Diags.hasUncompilableErrorOccurred() || Diags.hasFatalErrorOccurred()) {
+ // Flush out any possibly unreachable diagnostics.
+ flushDiagnostics(S, fscope);
+ return;
+ }
+
+ const Stmt *Body = D->getBody();
+ assert(Body);
+
+ // Construct the analysis context with the specified CFG build options.
+ AnalysisDeclContext AC(/* AnalysisDeclContextManager */ nullptr, D);
+
+ // Don't generate EH edges for CallExprs as we'd like to avoid the n^2
+ // explosion for destructors that can result and the compile time hit.
+ AC.getCFGBuildOptions().PruneTriviallyFalseEdges = true;
+ AC.getCFGBuildOptions().AddEHEdges = false;
+ AC.getCFGBuildOptions().AddInitializers = true;
+ AC.getCFGBuildOptions().AddImplicitDtors = true;
+ AC.getCFGBuildOptions().AddTemporaryDtors = true;
+ AC.getCFGBuildOptions().AddCXXNewAllocator = false;
+ AC.getCFGBuildOptions().AddCXXDefaultInitExprInCtors = true;
+
+ // Force that certain expressions appear as CFGElements in the CFG. This
+ // is used to speed up various analyses.
+ // FIXME: This isn't the right factoring. This is here for initial
+ // prototyping, but we need a way for analyses to say what expressions they
+ // expect to always be CFGElements and then fill in the BuildOptions
+ // appropriately. This is essentially a layering violation.
+ if (P.enableCheckUnreachable || P.enableThreadSafetyAnalysis ||
+ P.enableConsumedAnalysis) {
+ // Unreachable code analysis and thread safety require a linearized CFG.
+ AC.getCFGBuildOptions().setAllAlwaysAdd();
+ }
+ else {
+ AC.getCFGBuildOptions()
+ .setAlwaysAdd(Stmt::BinaryOperatorClass)
+ .setAlwaysAdd(Stmt::CompoundAssignOperatorClass)
+ .setAlwaysAdd(Stmt::BlockExprClass)
+ .setAlwaysAdd(Stmt::CStyleCastExprClass)
+ .setAlwaysAdd(Stmt::DeclRefExprClass)
+ .setAlwaysAdd(Stmt::ImplicitCastExprClass)
+ .setAlwaysAdd(Stmt::UnaryOperatorClass)
+ .setAlwaysAdd(Stmt::AttributedStmtClass);
+ }
+
+ // Install the logical handler for -Wtautological-overlap-compare
+ std::unique_ptr<LogicalErrorHandler> LEH;
+ if (!Diags.isIgnored(diag::warn_tautological_overlap_comparison,
+ D->getLocStart())) {
+ LEH.reset(new LogicalErrorHandler(S));
+ AC.getCFGBuildOptions().Observer = LEH.get();
+ }
+
+ // Emit delayed diagnostics.
+ if (!fscope->PossiblyUnreachableDiags.empty()) {
+ bool analyzed = false;
+
+ // Register the expressions with the CFGBuilder.
+ for (const auto &D : fscope->PossiblyUnreachableDiags) {
+ if (D.stmt)
+ AC.registerForcedBlockExpression(D.stmt);
+ }
+
+ if (AC.getCFG()) {
+ analyzed = true;
+ for (const auto &D : fscope->PossiblyUnreachableDiags) {
+ bool processed = false;
+ if (D.stmt) {
+ const CFGBlock *block = AC.getBlockForRegisteredExpression(D.stmt);
+ CFGReverseBlockReachabilityAnalysis *cra =
+ AC.getCFGReachablityAnalysis();
+ // FIXME: We should be able to assert that block is non-null, but
+ // the CFG analysis can skip potentially-evaluated expressions in
+ // edge cases; see test/Sema/vla-2.c.
+ if (block && cra) {
+ // Can this block be reached from the entrance?
+ if (cra->isReachable(&AC.getCFG()->getEntry(), block))
+ S.Diag(D.Loc, D.PD);
+ processed = true;
+ }
+ }
+ if (!processed) {
+ // Emit the warning anyway if we cannot map to a basic block.
+ S.Diag(D.Loc, D.PD);
+ }
+ }
+ }
+
+ if (!analyzed)
+ flushDiagnostics(S, fscope);
+ }
+
+ // Warning: check missing 'return'
+ if (P.enableCheckFallThrough) {
+ const CheckFallThroughDiagnostics &CD =
+ (isa<BlockDecl>(D) ? CheckFallThroughDiagnostics::MakeForBlock()
+ : (isa<CXXMethodDecl>(D) &&
+ cast<CXXMethodDecl>(D)->getOverloadedOperator() == OO_Call &&
+ cast<CXXMethodDecl>(D)->getParent()->isLambda())
+ ? CheckFallThroughDiagnostics::MakeForLambda()
+ : CheckFallThroughDiagnostics::MakeForFunction(D));
+ CheckFallThroughForBody(S, D, Body, blkExpr, CD, AC);
+ }
+
+ // Warning: check for unreachable code
+ if (P.enableCheckUnreachable) {
+ // Only check for unreachable code on non-template instantiations.
+ // Different template instantiations can effectively change the control-flow
+ // and it is very difficult to prove that a snippet of code in a template
+ // is unreachable for all instantiations.
+ bool isTemplateInstantiation = false;
+ if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D))
+ isTemplateInstantiation = Function->isTemplateInstantiation();
+ if (!isTemplateInstantiation)
+ CheckUnreachable(S, AC);
+ }
+
+ // Check for thread safety violations
+ if (P.enableThreadSafetyAnalysis) {
+ SourceLocation FL = AC.getDecl()->getLocation();
+ SourceLocation FEL = AC.getDecl()->getLocEnd();
+ threadSafety::ThreadSafetyReporter Reporter(S, FL, FEL);
+ if (!Diags.isIgnored(diag::warn_thread_safety_beta, D->getLocStart()))
+ Reporter.setIssueBetaWarnings(true);
+ if (!Diags.isIgnored(diag::warn_thread_safety_verbose, D->getLocStart()))
+ Reporter.setVerbose(true);
+
+ threadSafety::runThreadSafetyAnalysis(AC, Reporter,
+ &S.ThreadSafetyDeclCache);
+ Reporter.emitDiagnostics();
+ }
+
+ // Check for violations of consumed properties.
+ if (P.enableConsumedAnalysis) {
+ consumed::ConsumedWarningsHandler WarningHandler(S);
+ consumed::ConsumedAnalyzer Analyzer(WarningHandler);
+ Analyzer.run(AC);
+ }
+
+ if (!Diags.isIgnored(diag::warn_uninit_var, D->getLocStart()) ||
+ !Diags.isIgnored(diag::warn_sometimes_uninit_var, D->getLocStart()) ||
+ !Diags.isIgnored(diag::warn_maybe_uninit_var, D->getLocStart())) {
+ if (CFG *cfg = AC.getCFG()) {
+ UninitValsDiagReporter reporter(S);
+ UninitVariablesAnalysisStats stats;
+ std::memset(&stats, 0, sizeof(UninitVariablesAnalysisStats));
+ runUninitializedVariablesAnalysis(*cast<DeclContext>(D), *cfg, AC,
+ reporter, stats);
+
+ if (S.CollectStats && stats.NumVariablesAnalyzed > 0) {
+ ++NumUninitAnalysisFunctions;
+ NumUninitAnalysisVariables += stats.NumVariablesAnalyzed;
+ NumUninitAnalysisBlockVisits += stats.NumBlockVisits;
+ MaxUninitAnalysisVariablesPerFunction =
+ std::max(MaxUninitAnalysisVariablesPerFunction,
+ stats.NumVariablesAnalyzed);
+ MaxUninitAnalysisBlockVisitsPerFunction =
+ std::max(MaxUninitAnalysisBlockVisitsPerFunction,
+ stats.NumBlockVisits);
+ }
+ }
+ }
+
+ bool FallThroughDiagFull =
+ !Diags.isIgnored(diag::warn_unannotated_fallthrough, D->getLocStart());
+ bool FallThroughDiagPerFunction = !Diags.isIgnored(
+ diag::warn_unannotated_fallthrough_per_function, D->getLocStart());
+ if (FallThroughDiagFull || FallThroughDiagPerFunction) {
+ DiagnoseSwitchLabelsFallthrough(S, AC, !FallThroughDiagFull);
+ }
+
+ if (S.getLangOpts().ObjCWeak &&
+ !Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, D->getLocStart()))
+ diagnoseRepeatedUseOfWeak(S, fscope, D, AC.getParentMap());
+
+
+ // Check for infinite self-recursion in functions
+ if (!Diags.isIgnored(diag::warn_infinite_recursive_function,
+ D->getLocStart())) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ checkRecursiveFunction(S, FD, Body, AC);
+ }
+ }
+
+ // If none of the previous checks caused a CFG build, trigger one here
+ // for -Wtautological-overlap-compare
+ if (!Diags.isIgnored(diag::warn_tautological_overlap_comparison,
+ D->getLocStart())) {
+ AC.getCFG();
+ }
+
+ // Collect statistics about the CFG if it was built.
+ if (S.CollectStats && AC.isCFGBuilt()) {
+ ++NumFunctionsAnalyzed;
+ if (CFG *cfg = AC.getCFG()) {
+ // If we successfully built a CFG for this context, record some more
+ // detail information about it.
+ NumCFGBlocks += cfg->getNumBlockIDs();
+ MaxCFGBlocksPerFunction = std::max(MaxCFGBlocksPerFunction,
+ cfg->getNumBlockIDs());
+ } else {
+ ++NumFunctionsWithBadCFGs;
+ }
+ }
+}
+
+void clang::sema::AnalysisBasedWarnings::PrintStats() const {
+ llvm::errs() << "\n*** Analysis Based Warnings Stats:\n";
+
+ unsigned NumCFGsBuilt = NumFunctionsAnalyzed - NumFunctionsWithBadCFGs;
+ unsigned AvgCFGBlocksPerFunction =
+ !NumCFGsBuilt ? 0 : NumCFGBlocks/NumCFGsBuilt;
+ llvm::errs() << NumFunctionsAnalyzed << " functions analyzed ("
+ << NumFunctionsWithBadCFGs << " w/o CFGs).\n"
+ << " " << NumCFGBlocks << " CFG blocks built.\n"
+ << " " << AvgCFGBlocksPerFunction
+ << " average CFG blocks per function.\n"
+ << " " << MaxCFGBlocksPerFunction
+ << " max CFG blocks per function.\n";
+
+ unsigned AvgUninitVariablesPerFunction = !NumUninitAnalysisFunctions ? 0
+ : NumUninitAnalysisVariables/NumUninitAnalysisFunctions;
+ unsigned AvgUninitBlockVisitsPerFunction = !NumUninitAnalysisFunctions ? 0
+ : NumUninitAnalysisBlockVisits/NumUninitAnalysisFunctions;
+ llvm::errs() << NumUninitAnalysisFunctions
+ << " functions analyzed for uninitialiazed variables\n"
+ << " " << NumUninitAnalysisVariables << " variables analyzed.\n"
+ << " " << AvgUninitVariablesPerFunction
+ << " average variables per function.\n"
+ << " " << MaxUninitAnalysisVariablesPerFunction
+ << " max variables per function.\n"
+ << " " << NumUninitAnalysisBlockVisits << " block visits.\n"
+ << " " << AvgUninitBlockVisitsPerFunction
+ << " average block visits per function.\n"
+ << " " << MaxUninitAnalysisBlockVisitsPerFunction
+ << " max block visits per function.\n";
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/AttributeList.cpp b/contrib/llvm/tools/clang/lib/Sema/AttributeList.cpp
new file mode 100644
index 0000000..3c61c95
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/AttributeList.cpp
@@ -0,0 +1,225 @@
+//===--- AttributeList.cpp --------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the AttributeList class implementation
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/AttributeList.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/SemaInternal.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringSwitch.h"
+using namespace clang;
+
+IdentifierLoc *IdentifierLoc::create(ASTContext &Ctx, SourceLocation Loc,
+ IdentifierInfo *Ident) {
+ IdentifierLoc *Result = new (Ctx) IdentifierLoc;
+ Result->Loc = Loc;
+ Result->Ident = Ident;
+ return Result;
+}
+
+size_t AttributeList::allocated_size() const {
+ if (IsAvailability) return AttributeFactory::AvailabilityAllocSize;
+ else if (IsTypeTagForDatatype)
+ return AttributeFactory::TypeTagForDatatypeAllocSize;
+ else if (IsProperty)
+ return AttributeFactory::PropertyAllocSize;
+ return (sizeof(AttributeList) + NumArgs * sizeof(ArgsUnion));
+}
+
+AttributeFactory::AttributeFactory() {
+ // Go ahead and configure all the inline capacity. This is just a memset.
+ FreeLists.resize(InlineFreeListsCapacity);
+}
+AttributeFactory::~AttributeFactory() {}
+
+static size_t getFreeListIndexForSize(size_t size) {
+ assert(size >= sizeof(AttributeList));
+ assert((size % sizeof(void*)) == 0);
+ return ((size - sizeof(AttributeList)) / sizeof(void*));
+}
+
+void *AttributeFactory::allocate(size_t size) {
+ // Check for a previously reclaimed attribute.
+ size_t index = getFreeListIndexForSize(size);
+ if (index < FreeLists.size()) {
+ if (AttributeList *attr = FreeLists[index]) {
+ FreeLists[index] = attr->NextInPool;
+ return attr;
+ }
+ }
+
+ // Otherwise, allocate something new.
+ return Alloc.Allocate(size, llvm::AlignOf<AttributeFactory>::Alignment);
+}
+
+void AttributeFactory::reclaimPool(AttributeList *cur) {
+ assert(cur && "reclaiming empty pool!");
+ do {
+ // Read this here, because we're going to overwrite NextInPool
+ // when we toss 'cur' into the appropriate queue.
+ AttributeList *next = cur->NextInPool;
+
+ size_t size = cur->allocated_size();
+ size_t freeListIndex = getFreeListIndexForSize(size);
+
+ // Expand FreeLists to the appropriate size, if required.
+ if (freeListIndex >= FreeLists.size())
+ FreeLists.resize(freeListIndex+1);
+
+ // Add 'cur' to the appropriate free-list.
+ cur->NextInPool = FreeLists[freeListIndex];
+ FreeLists[freeListIndex] = cur;
+
+ cur = next;
+ } while (cur);
+}
+
+void AttributePool::takePool(AttributeList *pool) {
+ assert(pool);
+
+ // Fast path: this pool is empty.
+ if (!Head) {
+ Head = pool;
+ return;
+ }
+
+ // Reverse the pool onto the current head. This optimizes for the
+ // pattern of pulling a lot of pools into a single pool.
+ do {
+ AttributeList *next = pool->NextInPool;
+ pool->NextInPool = Head;
+ Head = pool;
+ pool = next;
+ } while (pool);
+}
+
+#include "clang/Sema/AttrParsedAttrKinds.inc"
+
+static StringRef normalizeAttrName(StringRef AttrName, StringRef ScopeName,
+ AttributeList::Syntax SyntaxUsed) {
+ // Normalize the attribute name, __foo__ becomes foo. This is only allowable
+ // for GNU attributes.
+ bool IsGNU = SyntaxUsed == AttributeList::AS_GNU ||
+ (SyntaxUsed == AttributeList::AS_CXX11 && ScopeName == "gnu");
+ if (IsGNU && AttrName.size() >= 4 && AttrName.startswith("__") &&
+ AttrName.endswith("__"))
+ AttrName = AttrName.slice(2, AttrName.size() - 2);
+
+ return AttrName;
+}
+
+AttributeList::Kind AttributeList::getKind(const IdentifierInfo *Name,
+ const IdentifierInfo *ScopeName,
+ Syntax SyntaxUsed) {
+ StringRef AttrName = Name->getName();
+
+ SmallString<64> FullName;
+ if (ScopeName)
+ FullName += ScopeName->getName();
+
+ AttrName = normalizeAttrName(AttrName, FullName, SyntaxUsed);
+
+ // Ensure that in the case of C++11 attributes, we look for '::foo' if it is
+ // unscoped.
+ if (ScopeName || SyntaxUsed == AS_CXX11)
+ FullName += "::";
+ FullName += AttrName;
+
+ return ::getAttrKind(FullName, SyntaxUsed);
+}
+
+unsigned AttributeList::getAttributeSpellingListIndex() const {
+ // Both variables will be used in tablegen generated
+ // attribute spell list index matching code.
+ StringRef Scope = ScopeName ? ScopeName->getName() : "";
+ StringRef Name = normalizeAttrName(AttrName->getName(), Scope,
+ (AttributeList::Syntax)SyntaxUsed);
+
+#include "clang/Sema/AttrSpellingListIndex.inc"
+
+}
+
+struct ParsedAttrInfo {
+ unsigned NumArgs : 4;
+ unsigned OptArgs : 4;
+ unsigned HasCustomParsing : 1;
+ unsigned IsTargetSpecific : 1;
+ unsigned IsType : 1;
+ unsigned IsKnownToGCC : 1;
+
+ bool (*DiagAppertainsToDecl)(Sema &S, const AttributeList &Attr,
+ const Decl *);
+ bool (*DiagLangOpts)(Sema &S, const AttributeList &Attr);
+ bool (*ExistsInTarget)(const TargetInfo &Target);
+ unsigned (*SpellingIndexToSemanticSpelling)(const AttributeList &Attr);
+};
+
+namespace {
+ #include "clang/Sema/AttrParsedAttrImpl.inc"
+}
+
+static const ParsedAttrInfo &getInfo(const AttributeList &A) {
+ return AttrInfoMap[A.getKind()];
+}
+
+unsigned AttributeList::getMinArgs() const {
+ return getInfo(*this).NumArgs;
+}
+
+unsigned AttributeList::getMaxArgs() const {
+ return getMinArgs() + getInfo(*this).OptArgs;
+}
+
+bool AttributeList::hasCustomParsing() const {
+ return getInfo(*this).HasCustomParsing;
+}
+
+bool AttributeList::diagnoseAppertainsTo(Sema &S, const Decl *D) const {
+ return getInfo(*this).DiagAppertainsToDecl(S, *this, D);
+}
+
+bool AttributeList::diagnoseLangOpts(Sema &S) const {
+ return getInfo(*this).DiagLangOpts(S, *this);
+}
+
+bool AttributeList::isTargetSpecificAttr() const {
+ return getInfo(*this).IsTargetSpecific;
+}
+
+bool AttributeList::isTypeAttr() const {
+ return getInfo(*this).IsType;
+}
+
+bool AttributeList::existsInTarget(const TargetInfo &Target) const {
+ return getInfo(*this).ExistsInTarget(Target);
+}
+
+bool AttributeList::isKnownToGCC() const {
+ return getInfo(*this).IsKnownToGCC;
+}
+
+unsigned AttributeList::getSemanticSpelling() const {
+ return getInfo(*this).SpellingIndexToSemanticSpelling(*this);
+}
+
+bool AttributeList::hasVariadicArg() const {
+ // If the attribute has the maximum number of optional arguments, we will
+ // claim that as being variadic. If we someday get an attribute that
+ // legitimately bumps up against that maximum, we can use another bit to track
+ // whether it's truly variadic or not.
+ return getInfo(*this).OptArgs == 15;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp b/contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp
new file mode 100644
index 0000000..18e9a59
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp
@@ -0,0 +1,638 @@
+//===--- CodeCompleteConsumer.cpp - Code Completion Interface ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the CodeCompleteConsumer class.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Sema/CodeCompleteConsumer.h"
+#include "clang-c/Index.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/Sema.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cstring>
+#include <functional>
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Code completion context implementation
+//===----------------------------------------------------------------------===//
+
+bool CodeCompletionContext::wantConstructorResults() const {
+ switch (Kind) {
+ case CCC_Recovery:
+ case CCC_Statement:
+ case CCC_Expression:
+ case CCC_ObjCMessageReceiver:
+ case CCC_ParenthesizedExpression:
+ return true;
+
+ case CCC_TopLevel:
+ case CCC_ObjCInterface:
+ case CCC_ObjCImplementation:
+ case CCC_ObjCIvarList:
+ case CCC_ClassStructUnion:
+ case CCC_DotMemberAccess:
+ case CCC_ArrowMemberAccess:
+ case CCC_ObjCPropertyAccess:
+ case CCC_EnumTag:
+ case CCC_UnionTag:
+ case CCC_ClassOrStructTag:
+ case CCC_ObjCProtocolName:
+ case CCC_Namespace:
+ case CCC_Type:
+ case CCC_Name:
+ case CCC_PotentiallyQualifiedName:
+ case CCC_MacroName:
+ case CCC_MacroNameUse:
+ case CCC_PreprocessorExpression:
+ case CCC_PreprocessorDirective:
+ case CCC_NaturalLanguage:
+ case CCC_SelectorName:
+ case CCC_TypeQualifiers:
+ case CCC_Other:
+ case CCC_OtherWithMacros:
+ case CCC_ObjCInstanceMessage:
+ case CCC_ObjCClassMessage:
+ case CCC_ObjCInterfaceName:
+ case CCC_ObjCCategoryName:
+ return false;
+ }
+
+ llvm_unreachable("Invalid CodeCompletionContext::Kind!");
+}
+
+//===----------------------------------------------------------------------===//
+// Code completion string implementation
+//===----------------------------------------------------------------------===//
+CodeCompletionString::Chunk::Chunk(ChunkKind Kind, const char *Text)
+ : Kind(Kind), Text("")
+{
+ switch (Kind) {
+ case CK_TypedText:
+ case CK_Text:
+ case CK_Placeholder:
+ case CK_Informative:
+ case CK_ResultType:
+ case CK_CurrentParameter:
+ this->Text = Text;
+ break;
+
+ case CK_Optional:
+ llvm_unreachable("Optional strings cannot be created from text");
+
+ case CK_LeftParen:
+ this->Text = "(";
+ break;
+
+ case CK_RightParen:
+ this->Text = ")";
+ break;
+
+ case CK_LeftBracket:
+ this->Text = "[";
+ break;
+
+ case CK_RightBracket:
+ this->Text = "]";
+ break;
+
+ case CK_LeftBrace:
+ this->Text = "{";
+ break;
+
+ case CK_RightBrace:
+ this->Text = "}";
+ break;
+
+ case CK_LeftAngle:
+ this->Text = "<";
+ break;
+
+ case CK_RightAngle:
+ this->Text = ">";
+ break;
+
+ case CK_Comma:
+ this->Text = ", ";
+ break;
+
+ case CK_Colon:
+ this->Text = ":";
+ break;
+
+ case CK_SemiColon:
+ this->Text = ";";
+ break;
+
+ case CK_Equal:
+ this->Text = " = ";
+ break;
+
+ case CK_HorizontalSpace:
+ this->Text = " ";
+ break;
+
+ case CK_VerticalSpace:
+ this->Text = "\n";
+ break;
+ }
+}
+
+CodeCompletionString::Chunk
+CodeCompletionString::Chunk::CreateText(const char *Text) {
+ return Chunk(CK_Text, Text);
+}
+
+CodeCompletionString::Chunk
+CodeCompletionString::Chunk::CreateOptional(CodeCompletionString *Optional) {
+ Chunk Result;
+ Result.Kind = CK_Optional;
+ Result.Optional = Optional;
+ return Result;
+}
+
+CodeCompletionString::Chunk
+CodeCompletionString::Chunk::CreatePlaceholder(const char *Placeholder) {
+ return Chunk(CK_Placeholder, Placeholder);
+}
+
+CodeCompletionString::Chunk
+CodeCompletionString::Chunk::CreateInformative(const char *Informative) {
+ return Chunk(CK_Informative, Informative);
+}
+
+CodeCompletionString::Chunk
+CodeCompletionString::Chunk::CreateResultType(const char *ResultType) {
+ return Chunk(CK_ResultType, ResultType);
+}
+
+CodeCompletionString::Chunk
+CodeCompletionString::Chunk::CreateCurrentParameter(
+ const char *CurrentParameter) {
+ return Chunk(CK_CurrentParameter, CurrentParameter);
+}
+
+CodeCompletionString::CodeCompletionString(const Chunk *Chunks,
+ unsigned NumChunks,
+ unsigned Priority,
+ CXAvailabilityKind Availability,
+ const char **Annotations,
+ unsigned NumAnnotations,
+ StringRef ParentName,
+ const char *BriefComment)
+ : NumChunks(NumChunks), NumAnnotations(NumAnnotations),
+ Priority(Priority), Availability(Availability),
+ ParentName(ParentName), BriefComment(BriefComment)
+{
+ assert(NumChunks <= 0xffff);
+ assert(NumAnnotations <= 0xffff);
+
+ Chunk *StoredChunks = reinterpret_cast<Chunk *>(this + 1);
+ for (unsigned I = 0; I != NumChunks; ++I)
+ StoredChunks[I] = Chunks[I];
+
+ const char **StoredAnnotations = reinterpret_cast<const char **>(StoredChunks + NumChunks);
+ for (unsigned I = 0; I != NumAnnotations; ++I)
+ StoredAnnotations[I] = Annotations[I];
+}
+
+unsigned CodeCompletionString::getAnnotationCount() const {
+ return NumAnnotations;
+}
+
+const char *CodeCompletionString::getAnnotation(unsigned AnnotationNr) const {
+ if (AnnotationNr < NumAnnotations)
+ return reinterpret_cast<const char * const*>(end())[AnnotationNr];
+ else
+ return nullptr;
+}
+
+
+std::string CodeCompletionString::getAsString() const {
+ std::string Result;
+ llvm::raw_string_ostream OS(Result);
+
+ for (iterator C = begin(), CEnd = end(); C != CEnd; ++C) {
+ switch (C->Kind) {
+ case CK_Optional: OS << "{#" << C->Optional->getAsString() << "#}"; break;
+ case CK_Placeholder: OS << "<#" << C->Text << "#>"; break;
+
+ case CK_Informative:
+ case CK_ResultType:
+ OS << "[#" << C->Text << "#]";
+ break;
+
+ case CK_CurrentParameter: OS << "<#" << C->Text << "#>"; break;
+ default: OS << C->Text; break;
+ }
+ }
+ return OS.str();
+}
+
+const char *CodeCompletionString::getTypedText() const {
+ for (iterator C = begin(), CEnd = end(); C != CEnd; ++C)
+ if (C->Kind == CK_TypedText)
+ return C->Text;
+
+ return nullptr;
+}
+
+const char *CodeCompletionAllocator::CopyString(const Twine &String) {
+ SmallString<128> Data;
+ StringRef Ref = String.toStringRef(Data);
+ // FIXME: It would be more efficient to teach Twine to tell us its size and
+ // then add a routine there to fill in an allocated char* with the contents
+ // of the string.
+ char *Mem = (char *)Allocate(Ref.size() + 1, 1);
+ std::copy(Ref.begin(), Ref.end(), Mem);
+ Mem[Ref.size()] = 0;
+ return Mem;
+}
+
+StringRef CodeCompletionTUInfo::getParentName(const DeclContext *DC) {
+ const NamedDecl *ND = dyn_cast<NamedDecl>(DC);
+ if (!ND)
+ return StringRef();
+
+ // Check whether we've already cached the parent name.
+ StringRef &CachedParentName = ParentNames[DC];
+ if (!CachedParentName.empty())
+ return CachedParentName;
+
+ // If we already processed this DeclContext and assigned empty to it, the
+ // data pointer will be non-null.
+ if (CachedParentName.data() != nullptr)
+ return StringRef();
+
+ // Find the interesting names.
+ SmallVector<const DeclContext *, 2> Contexts;
+ while (DC && !DC->isFunctionOrMethod()) {
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(DC)) {
+ if (ND->getIdentifier())
+ Contexts.push_back(DC);
+ }
+
+ DC = DC->getParent();
+ }
+
+ {
+ SmallString<128> S;
+ llvm::raw_svector_ostream OS(S);
+ bool First = true;
+ for (unsigned I = Contexts.size(); I != 0; --I) {
+ if (First)
+ First = false;
+ else {
+ OS << "::";
+ }
+
+ const DeclContext *CurDC = Contexts[I-1];
+ if (const ObjCCategoryImplDecl *CatImpl = dyn_cast<ObjCCategoryImplDecl>(CurDC))
+ CurDC = CatImpl->getCategoryDecl();
+
+ if (const ObjCCategoryDecl *Cat = dyn_cast<ObjCCategoryDecl>(CurDC)) {
+ const ObjCInterfaceDecl *Interface = Cat->getClassInterface();
+ if (!Interface) {
+ // Assign an empty StringRef but with non-null data to distinguish
+ // between empty because we didn't process the DeclContext yet.
+ CachedParentName = StringRef((const char *)~0U, 0);
+ return StringRef();
+ }
+
+ OS << Interface->getName() << '(' << Cat->getName() << ')';
+ } else {
+ OS << cast<NamedDecl>(CurDC)->getName();
+ }
+ }
+
+ CachedParentName = AllocatorRef->CopyString(OS.str());
+ }
+
+ return CachedParentName;
+}
+
+CodeCompletionString *CodeCompletionBuilder::TakeString() {
+ void *Mem = getAllocator().Allocate(
+ sizeof(CodeCompletionString) + sizeof(Chunk) * Chunks.size()
+ + sizeof(const char *) * Annotations.size(),
+ llvm::alignOf<CodeCompletionString>());
+ CodeCompletionString *Result
+ = new (Mem) CodeCompletionString(Chunks.data(), Chunks.size(),
+ Priority, Availability,
+ Annotations.data(), Annotations.size(),
+ ParentName, BriefComment);
+ Chunks.clear();
+ return Result;
+}
+
+void CodeCompletionBuilder::AddTypedTextChunk(const char *Text) {
+ Chunks.push_back(Chunk(CodeCompletionString::CK_TypedText, Text));
+}
+
+void CodeCompletionBuilder::AddTextChunk(const char *Text) {
+ Chunks.push_back(Chunk::CreateText(Text));
+}
+
+void CodeCompletionBuilder::AddOptionalChunk(CodeCompletionString *Optional) {
+ Chunks.push_back(Chunk::CreateOptional(Optional));
+}
+
+void CodeCompletionBuilder::AddPlaceholderChunk(const char *Placeholder) {
+ Chunks.push_back(Chunk::CreatePlaceholder(Placeholder));
+}
+
+void CodeCompletionBuilder::AddInformativeChunk(const char *Text) {
+ Chunks.push_back(Chunk::CreateInformative(Text));
+}
+
+void CodeCompletionBuilder::AddResultTypeChunk(const char *ResultType) {
+ Chunks.push_back(Chunk::CreateResultType(ResultType));
+}
+
+void
+CodeCompletionBuilder::AddCurrentParameterChunk(const char *CurrentParameter) {
+ Chunks.push_back(Chunk::CreateCurrentParameter(CurrentParameter));
+}
+
+void CodeCompletionBuilder::AddChunk(CodeCompletionString::ChunkKind CK,
+ const char *Text) {
+ Chunks.push_back(Chunk(CK, Text));
+}
+
+void CodeCompletionBuilder::addParentContext(const DeclContext *DC) {
+ if (DC->isTranslationUnit()) {
+ return;
+ }
+
+ if (DC->isFunctionOrMethod())
+ return;
+
+ const NamedDecl *ND = dyn_cast<NamedDecl>(DC);
+ if (!ND)
+ return;
+
+ ParentName = getCodeCompletionTUInfo().getParentName(DC);
+}
+
+void CodeCompletionBuilder::addBriefComment(StringRef Comment) {
+ BriefComment = Allocator.CopyString(Comment);
+}
+
+//===----------------------------------------------------------------------===//
+// Code completion overload candidate implementation
+//===----------------------------------------------------------------------===//
+FunctionDecl *
+CodeCompleteConsumer::OverloadCandidate::getFunction() const {
+ if (getKind() == CK_Function)
+ return Function;
+ else if (getKind() == CK_FunctionTemplate)
+ return FunctionTemplate->getTemplatedDecl();
+ else
+ return nullptr;
+}
+
+const FunctionType *
+CodeCompleteConsumer::OverloadCandidate::getFunctionType() const {
+ switch (Kind) {
+ case CK_Function:
+ return Function->getType()->getAs<FunctionType>();
+
+ case CK_FunctionTemplate:
+ return FunctionTemplate->getTemplatedDecl()->getType()
+ ->getAs<FunctionType>();
+
+ case CK_FunctionType:
+ return Type;
+ }
+
+ llvm_unreachable("Invalid CandidateKind!");
+}
+
+//===----------------------------------------------------------------------===//
+// Code completion consumer implementation
+//===----------------------------------------------------------------------===//
+
+CodeCompleteConsumer::~CodeCompleteConsumer() { }
+
+void
+PrintingCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &SemaRef,
+ CodeCompletionContext Context,
+ CodeCompletionResult *Results,
+ unsigned NumResults) {
+ std::stable_sort(Results, Results + NumResults);
+
+ // Print the results.
+ for (unsigned I = 0; I != NumResults; ++I) {
+ OS << "COMPLETION: ";
+ switch (Results[I].Kind) {
+ case CodeCompletionResult::RK_Declaration:
+ OS << *Results[I].Declaration;
+ if (Results[I].Hidden)
+ OS << " (Hidden)";
+ if (CodeCompletionString *CCS
+ = Results[I].CreateCodeCompletionString(SemaRef, Context,
+ getAllocator(),
+ CCTUInfo,
+ includeBriefComments())) {
+ OS << " : " << CCS->getAsString();
+ if (const char *BriefComment = CCS->getBriefComment())
+ OS << " : " << BriefComment;
+ }
+
+ OS << '\n';
+ break;
+
+ case CodeCompletionResult::RK_Keyword:
+ OS << Results[I].Keyword << '\n';
+ break;
+
+ case CodeCompletionResult::RK_Macro: {
+ OS << Results[I].Macro->getName();
+ if (CodeCompletionString *CCS
+ = Results[I].CreateCodeCompletionString(SemaRef, Context,
+ getAllocator(),
+ CCTUInfo,
+ includeBriefComments())) {
+ OS << " : " << CCS->getAsString();
+ }
+ OS << '\n';
+ break;
+ }
+
+ case CodeCompletionResult::RK_Pattern: {
+ OS << "Pattern : "
+ << Results[I].Pattern->getAsString() << '\n';
+ break;
+ }
+ }
+ }
+}
+
+// This function is used solely to preserve the former presentation of overloads
+// by "clang -cc1 -code-completion-at", since CodeCompletionString::getAsString
+// needs to be improved for printing the newer and more detailed overload
+// chunks.
+static std::string getOverloadAsString(const CodeCompletionString &CCS) {
+ std::string Result;
+ llvm::raw_string_ostream OS(Result);
+
+ for (auto &C : CCS) {
+ switch (C.Kind) {
+ case CodeCompletionString::CK_Informative:
+ case CodeCompletionString::CK_ResultType:
+ OS << "[#" << C.Text << "#]";
+ break;
+
+ case CodeCompletionString::CK_CurrentParameter:
+ OS << "<#" << C.Text << "#>";
+ break;
+
+ default: OS << C.Text; break;
+ }
+ }
+ return OS.str();
+}
+
+void
+PrintingCodeCompleteConsumer::ProcessOverloadCandidates(Sema &SemaRef,
+ unsigned CurrentArg,
+ OverloadCandidate *Candidates,
+ unsigned NumCandidates) {
+ for (unsigned I = 0; I != NumCandidates; ++I) {
+ if (CodeCompletionString *CCS
+ = Candidates[I].CreateSignatureString(CurrentArg, SemaRef,
+ getAllocator(), CCTUInfo,
+ includeBriefComments())) {
+ OS << "OVERLOAD: " << getOverloadAsString(*CCS) << "\n";
+ }
+ }
+}
+
+/// \brief Retrieve the effective availability of the given declaration.
+static AvailabilityResult getDeclAvailability(const Decl *D) {
+ AvailabilityResult AR = D->getAvailability();
+ if (isa<EnumConstantDecl>(D))
+ AR = std::max(AR, cast<Decl>(D->getDeclContext())->getAvailability());
+ return AR;
+}
+
+void CodeCompletionResult::computeCursorKindAndAvailability(bool Accessible) {
+ switch (Kind) {
+ case RK_Pattern:
+ if (!Declaration) {
+ // Do nothing: Patterns can come with cursor kinds!
+ break;
+ }
+ // Fall through
+
+ case RK_Declaration: {
+ // Set the availability based on attributes.
+ switch (getDeclAvailability(Declaration)) {
+ case AR_Available:
+ case AR_NotYetIntroduced:
+ Availability = CXAvailability_Available;
+ break;
+
+ case AR_Deprecated:
+ Availability = CXAvailability_Deprecated;
+ break;
+
+ case AR_Unavailable:
+ Availability = CXAvailability_NotAvailable;
+ break;
+ }
+
+ if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(Declaration))
+ if (Function->isDeleted())
+ Availability = CXAvailability_NotAvailable;
+
+ CursorKind = getCursorKindForDecl(Declaration);
+ if (CursorKind == CXCursor_UnexposedDecl) {
+ // FIXME: Forward declarations of Objective-C classes and protocols
+ // are not directly exposed, but we want code completion to treat them
+ // like a definition.
+ if (isa<ObjCInterfaceDecl>(Declaration))
+ CursorKind = CXCursor_ObjCInterfaceDecl;
+ else if (isa<ObjCProtocolDecl>(Declaration))
+ CursorKind = CXCursor_ObjCProtocolDecl;
+ else
+ CursorKind = CXCursor_NotImplemented;
+ }
+ break;
+ }
+
+ case RK_Macro:
+ case RK_Keyword:
+ llvm_unreachable("Macro and keyword kinds are handled by the constructors");
+ }
+
+ if (!Accessible)
+ Availability = CXAvailability_NotAccessible;
+}
+
+/// \brief Retrieve the name that should be used to order a result.
+///
+/// If the name needs to be constructed as a string, that string will be
+/// saved into Saved and the returned StringRef will refer to it.
+static StringRef getOrderedName(const CodeCompletionResult &R,
+ std::string &Saved) {
+ switch (R.Kind) {
+ case CodeCompletionResult::RK_Keyword:
+ return R.Keyword;
+
+ case CodeCompletionResult::RK_Pattern:
+ return R.Pattern->getTypedText();
+
+ case CodeCompletionResult::RK_Macro:
+ return R.Macro->getName();
+
+ case CodeCompletionResult::RK_Declaration:
+ // Handle declarations below.
+ break;
+ }
+
+ DeclarationName Name = R.Declaration->getDeclName();
+
+ // If the name is a simple identifier (by far the common case), or a
+ // zero-argument selector, just return a reference to that identifier.
+ if (IdentifierInfo *Id = Name.getAsIdentifierInfo())
+ return Id->getName();
+ if (Name.isObjCZeroArgSelector())
+ if (IdentifierInfo *Id
+ = Name.getObjCSelector().getIdentifierInfoForSlot(0))
+ return Id->getName();
+
+ Saved = Name.getAsString();
+ return Saved;
+}
+
+bool clang::operator<(const CodeCompletionResult &X,
+ const CodeCompletionResult &Y) {
+ std::string XSaved, YSaved;
+ StringRef XStr = getOrderedName(X, XSaved);
+ StringRef YStr = getOrderedName(Y, YSaved);
+ int cmp = XStr.compare_lower(YStr);
+ if (cmp)
+ return cmp < 0;
+
+ // If case-insensitive comparison fails, try case-sensitive comparison.
+ cmp = XStr.compare(YStr);
+ if (cmp)
+ return cmp < 0;
+
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp b/contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp
new file mode 100644
index 0000000..d664d87
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp
@@ -0,0 +1,1257 @@
+//===--- DeclSpec.cpp - Declaration Specifier Semantic Analysis -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for declaration specifiers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/DeclSpec.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/LocInfoType.h"
+#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include <cstring>
+using namespace clang;
+
+
+void UnqualifiedId::setTemplateId(TemplateIdAnnotation *TemplateId) {
+ assert(TemplateId && "NULL template-id annotation?");
+ Kind = IK_TemplateId;
+ this->TemplateId = TemplateId;
+ StartLocation = TemplateId->TemplateNameLoc;
+ EndLocation = TemplateId->RAngleLoc;
+}
+
+void UnqualifiedId::setConstructorTemplateId(TemplateIdAnnotation *TemplateId) {
+ assert(TemplateId && "NULL template-id annotation?");
+ Kind = IK_ConstructorTemplateId;
+ this->TemplateId = TemplateId;
+ StartLocation = TemplateId->TemplateNameLoc;
+ EndLocation = TemplateId->RAngleLoc;
+}
+
+void CXXScopeSpec::Extend(ASTContext &Context, SourceLocation TemplateKWLoc,
+ TypeLoc TL, SourceLocation ColonColonLoc) {
+ Builder.Extend(Context, TemplateKWLoc, TL, ColonColonLoc);
+ if (Range.getBegin().isInvalid())
+ Range.setBegin(TL.getBeginLoc());
+ Range.setEnd(ColonColonLoc);
+
+ assert(Range == Builder.getSourceRange() &&
+ "NestedNameSpecifierLoc range computation incorrect");
+}
+
+void CXXScopeSpec::Extend(ASTContext &Context, IdentifierInfo *Identifier,
+ SourceLocation IdentifierLoc,
+ SourceLocation ColonColonLoc) {
+ Builder.Extend(Context, Identifier, IdentifierLoc, ColonColonLoc);
+
+ if (Range.getBegin().isInvalid())
+ Range.setBegin(IdentifierLoc);
+ Range.setEnd(ColonColonLoc);
+
+ assert(Range == Builder.getSourceRange() &&
+ "NestedNameSpecifierLoc range computation incorrect");
+}
+
+void CXXScopeSpec::Extend(ASTContext &Context, NamespaceDecl *Namespace,
+ SourceLocation NamespaceLoc,
+ SourceLocation ColonColonLoc) {
+ Builder.Extend(Context, Namespace, NamespaceLoc, ColonColonLoc);
+
+ if (Range.getBegin().isInvalid())
+ Range.setBegin(NamespaceLoc);
+ Range.setEnd(ColonColonLoc);
+
+ assert(Range == Builder.getSourceRange() &&
+ "NestedNameSpecifierLoc range computation incorrect");
+}
+
+void CXXScopeSpec::Extend(ASTContext &Context, NamespaceAliasDecl *Alias,
+ SourceLocation AliasLoc,
+ SourceLocation ColonColonLoc) {
+ Builder.Extend(Context, Alias, AliasLoc, ColonColonLoc);
+
+ if (Range.getBegin().isInvalid())
+ Range.setBegin(AliasLoc);
+ Range.setEnd(ColonColonLoc);
+
+ assert(Range == Builder.getSourceRange() &&
+ "NestedNameSpecifierLoc range computation incorrect");
+}
+
+void CXXScopeSpec::MakeGlobal(ASTContext &Context,
+ SourceLocation ColonColonLoc) {
+ Builder.MakeGlobal(Context, ColonColonLoc);
+
+ Range = SourceRange(ColonColonLoc);
+
+ assert(Range == Builder.getSourceRange() &&
+ "NestedNameSpecifierLoc range computation incorrect");
+}
+
+void CXXScopeSpec::MakeSuper(ASTContext &Context, CXXRecordDecl *RD,
+ SourceLocation SuperLoc,
+ SourceLocation ColonColonLoc) {
+ Builder.MakeSuper(Context, RD, SuperLoc, ColonColonLoc);
+
+ Range.setBegin(SuperLoc);
+ Range.setEnd(ColonColonLoc);
+
+ assert(Range == Builder.getSourceRange() &&
+ "NestedNameSpecifierLoc range computation incorrect");
+}
+
+void CXXScopeSpec::MakeTrivial(ASTContext &Context,
+ NestedNameSpecifier *Qualifier, SourceRange R) {
+ Builder.MakeTrivial(Context, Qualifier, R);
+ Range = R;
+}
+
+void CXXScopeSpec::Adopt(NestedNameSpecifierLoc Other) {
+ if (!Other) {
+ Range = SourceRange();
+ Builder.Clear();
+ return;
+ }
+
+ Range = Other.getSourceRange();
+ Builder.Adopt(Other);
+}
+
+SourceLocation CXXScopeSpec::getLastQualifierNameLoc() const {
+ if (!Builder.getRepresentation())
+ return SourceLocation();
+ return Builder.getTemporary().getLocalBeginLoc();
+}
+
+NestedNameSpecifierLoc
+CXXScopeSpec::getWithLocInContext(ASTContext &Context) const {
+ if (!Builder.getRepresentation())
+ return NestedNameSpecifierLoc();
+
+ return Builder.getWithLocInContext(Context);
+}
+
+/// DeclaratorChunk::getFunction - Return a DeclaratorChunk for a function.
+/// "TheDeclarator" is the declarator that this will be added to.
+DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto,
+ bool isAmbiguous,
+ SourceLocation LParenLoc,
+ ParamInfo *Params,
+ unsigned NumParams,
+ SourceLocation EllipsisLoc,
+ SourceLocation RParenLoc,
+ unsigned TypeQuals,
+ bool RefQualifierIsLvalueRef,
+ SourceLocation RefQualifierLoc,
+ SourceLocation ConstQualifierLoc,
+ SourceLocation
+ VolatileQualifierLoc,
+ SourceLocation
+ RestrictQualifierLoc,
+ SourceLocation MutableLoc,
+ ExceptionSpecificationType
+ ESpecType,
+ SourceRange ESpecRange,
+ ParsedType *Exceptions,
+ SourceRange *ExceptionRanges,
+ unsigned NumExceptions,
+ Expr *NoexceptExpr,
+ CachedTokens *ExceptionSpecTokens,
+ SourceLocation LocalRangeBegin,
+ SourceLocation LocalRangeEnd,
+ Declarator &TheDeclarator,
+ TypeResult TrailingReturnType) {
+ assert(!(TypeQuals & DeclSpec::TQ_atomic) &&
+ "function cannot have _Atomic qualifier");
+
+ DeclaratorChunk I;
+ I.Kind = Function;
+ I.Loc = LocalRangeBegin;
+ I.EndLoc = LocalRangeEnd;
+ I.Fun.AttrList = nullptr;
+ I.Fun.hasPrototype = hasProto;
+ I.Fun.isVariadic = EllipsisLoc.isValid();
+ I.Fun.isAmbiguous = isAmbiguous;
+ I.Fun.LParenLoc = LParenLoc.getRawEncoding();
+ I.Fun.EllipsisLoc = EllipsisLoc.getRawEncoding();
+ I.Fun.RParenLoc = RParenLoc.getRawEncoding();
+ I.Fun.DeleteParams = false;
+ I.Fun.TypeQuals = TypeQuals;
+ I.Fun.NumParams = NumParams;
+ I.Fun.Params = nullptr;
+ I.Fun.RefQualifierIsLValueRef = RefQualifierIsLvalueRef;
+ I.Fun.RefQualifierLoc = RefQualifierLoc.getRawEncoding();
+ I.Fun.ConstQualifierLoc = ConstQualifierLoc.getRawEncoding();
+ I.Fun.VolatileQualifierLoc = VolatileQualifierLoc.getRawEncoding();
+ I.Fun.RestrictQualifierLoc = RestrictQualifierLoc.getRawEncoding();
+ I.Fun.MutableLoc = MutableLoc.getRawEncoding();
+ I.Fun.ExceptionSpecType = ESpecType;
+ I.Fun.ExceptionSpecLocBeg = ESpecRange.getBegin().getRawEncoding();
+ I.Fun.ExceptionSpecLocEnd = ESpecRange.getEnd().getRawEncoding();
+ I.Fun.NumExceptions = 0;
+ I.Fun.Exceptions = nullptr;
+ I.Fun.NoexceptExpr = nullptr;
+ I.Fun.HasTrailingReturnType = TrailingReturnType.isUsable() ||
+ TrailingReturnType.isInvalid();
+ I.Fun.TrailingReturnType = TrailingReturnType.get();
+
+ assert(I.Fun.TypeQuals == TypeQuals && "bitfield overflow");
+ assert(I.Fun.ExceptionSpecType == ESpecType && "bitfield overflow");
+
+ // new[] a parameter array if needed.
+ if (NumParams) {
+ // If the 'InlineParams' in Declarator is unused and big enough, put our
+ // parameter list there (in an effort to avoid new/delete traffic). If it
+ // is already used (consider a function returning a function pointer) or too
+ // small (function with too many parameters), go to the heap.
+ if (!TheDeclarator.InlineParamsUsed &&
+ NumParams <= llvm::array_lengthof(TheDeclarator.InlineParams)) {
+ I.Fun.Params = TheDeclarator.InlineParams;
+ I.Fun.DeleteParams = false;
+ TheDeclarator.InlineParamsUsed = true;
+ } else {
+ I.Fun.Params = new DeclaratorChunk::ParamInfo[NumParams];
+ I.Fun.DeleteParams = true;
+ }
+ memcpy(I.Fun.Params, Params, sizeof(Params[0]) * NumParams);
+ }
+
+ // Check what exception specification information we should actually store.
+ switch (ESpecType) {
+ default: break; // By default, save nothing.
+ case EST_Dynamic:
+ // new[] an exception array if needed
+ if (NumExceptions) {
+ I.Fun.NumExceptions = NumExceptions;
+ I.Fun.Exceptions = new DeclaratorChunk::TypeAndRange[NumExceptions];
+ for (unsigned i = 0; i != NumExceptions; ++i) {
+ I.Fun.Exceptions[i].Ty = Exceptions[i];
+ I.Fun.Exceptions[i].Range = ExceptionRanges[i];
+ }
+ }
+ break;
+
+ case EST_ComputedNoexcept:
+ I.Fun.NoexceptExpr = NoexceptExpr;
+ break;
+
+ case EST_Unparsed:
+ I.Fun.ExceptionSpecTokens = ExceptionSpecTokens;
+ break;
+ }
+ return I;
+}
+
+bool Declarator::isDeclarationOfFunction() const {
+ for (unsigned i = 0, i_end = DeclTypeInfo.size(); i < i_end; ++i) {
+ switch (DeclTypeInfo[i].Kind) {
+ case DeclaratorChunk::Function:
+ return true;
+ case DeclaratorChunk::Paren:
+ continue;
+ case DeclaratorChunk::Pointer:
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::Array:
+ case DeclaratorChunk::BlockPointer:
+ case DeclaratorChunk::MemberPointer:
+ return false;
+ }
+ llvm_unreachable("Invalid type chunk");
+ }
+
+ switch (DS.getTypeSpecType()) {
+ case TST_atomic:
+ case TST_auto:
+ case TST_auto_type:
+ case TST_bool:
+ case TST_char:
+ case TST_char16:
+ case TST_char32:
+ case TST_class:
+ case TST_decimal128:
+ case TST_decimal32:
+ case TST_decimal64:
+ case TST_double:
+ case TST_enum:
+ case TST_error:
+ case TST_float:
+ case TST_half:
+ case TST_int:
+ case TST_int128:
+ case TST_struct:
+ case TST_interface:
+ case TST_union:
+ case TST_unknown_anytype:
+ case TST_unspecified:
+ case TST_void:
+ case TST_wchar:
+ return false;
+
+ case TST_decltype_auto:
+ // This must have an initializer, so can't be a function declaration,
+ // even if the initializer has function type.
+ return false;
+
+ case TST_decltype:
+ case TST_typeofExpr:
+ if (Expr *E = DS.getRepAsExpr())
+ return E->getType()->isFunctionType();
+ return false;
+
+ case TST_underlyingType:
+ case TST_typename:
+ case TST_typeofType: {
+ QualType QT = DS.getRepAsType().get();
+ if (QT.isNull())
+ return false;
+
+ if (const LocInfoType *LIT = dyn_cast<LocInfoType>(QT))
+ QT = LIT->getType();
+
+ if (QT.isNull())
+ return false;
+
+ return QT->isFunctionType();
+ }
+ }
+
+ llvm_unreachable("Invalid TypeSpecType!");
+}
+
+bool Declarator::isStaticMember() {
+ assert(getContext() == MemberContext);
+ return getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static ||
+ (getName().Kind == UnqualifiedId::IK_OperatorFunctionId &&
+ CXXMethodDecl::isStaticOverloadedOperator(
+ getName().OperatorFunctionId.Operator));
+}
+
+bool Declarator::isCtorOrDtor() {
+ return (getName().getKind() == UnqualifiedId::IK_ConstructorName) ||
+ (getName().getKind() == UnqualifiedId::IK_DestructorName);
+}
+
+bool DeclSpec::hasTagDefinition() const {
+ if (!TypeSpecOwned)
+ return false;
+ return cast<TagDecl>(getRepAsDecl())->isCompleteDefinition();
+}
+
+/// getParsedSpecifiers - Return a bitmask of which flavors of specifiers this
+/// declaration specifier includes.
+///
+unsigned DeclSpec::getParsedSpecifiers() const {
+ unsigned Res = 0;
+ if (StorageClassSpec != SCS_unspecified ||
+ ThreadStorageClassSpec != TSCS_unspecified)
+ Res |= PQ_StorageClassSpecifier;
+
+ if (TypeQualifiers != TQ_unspecified)
+ Res |= PQ_TypeQualifier;
+
+ if (hasTypeSpecifier())
+ Res |= PQ_TypeSpecifier;
+
+ if (FS_inline_specified || FS_virtual_specified || FS_explicit_specified ||
+ FS_noreturn_specified || FS_forceinline_specified)
+ Res |= PQ_FunctionSpecifier;
+ return Res;
+}
+
+template <class T> static bool BadSpecifier(T TNew, T TPrev,
+ const char *&PrevSpec,
+ unsigned &DiagID,
+ bool IsExtension = true) {
+ PrevSpec = DeclSpec::getSpecifierName(TPrev);
+ if (TNew != TPrev)
+ DiagID = diag::err_invalid_decl_spec_combination;
+ else
+ DiagID = IsExtension ? diag::ext_duplicate_declspec :
+ diag::warn_duplicate_declspec;
+ return true;
+}
+
+const char *DeclSpec::getSpecifierName(DeclSpec::SCS S) {
+ switch (S) {
+ case DeclSpec::SCS_unspecified: return "unspecified";
+ case DeclSpec::SCS_typedef: return "typedef";
+ case DeclSpec::SCS_extern: return "extern";
+ case DeclSpec::SCS_static: return "static";
+ case DeclSpec::SCS_auto: return "auto";
+ case DeclSpec::SCS_register: return "register";
+ case DeclSpec::SCS_private_extern: return "__private_extern__";
+ case DeclSpec::SCS_mutable: return "mutable";
+ }
+ llvm_unreachable("Unknown typespec!");
+}
+
+const char *DeclSpec::getSpecifierName(DeclSpec::TSCS S) {
+ switch (S) {
+ case DeclSpec::TSCS_unspecified: return "unspecified";
+ case DeclSpec::TSCS___thread: return "__thread";
+ case DeclSpec::TSCS_thread_local: return "thread_local";
+ case DeclSpec::TSCS__Thread_local: return "_Thread_local";
+ }
+ llvm_unreachable("Unknown typespec!");
+}
+
+const char *DeclSpec::getSpecifierName(TSW W) {
+ switch (W) {
+ case TSW_unspecified: return "unspecified";
+ case TSW_short: return "short";
+ case TSW_long: return "long";
+ case TSW_longlong: return "long long";
+ }
+ llvm_unreachable("Unknown typespec!");
+}
+
+const char *DeclSpec::getSpecifierName(TSC C) {
+ switch (C) {
+ case TSC_unspecified: return "unspecified";
+ case TSC_imaginary: return "imaginary";
+ case TSC_complex: return "complex";
+ }
+ llvm_unreachable("Unknown typespec!");
+}
+
+
+const char *DeclSpec::getSpecifierName(TSS S) {
+ switch (S) {
+ case TSS_unspecified: return "unspecified";
+ case TSS_signed: return "signed";
+ case TSS_unsigned: return "unsigned";
+ }
+ llvm_unreachable("Unknown typespec!");
+}
+
+const char *DeclSpec::getSpecifierName(DeclSpec::TST T,
+ const PrintingPolicy &Policy) {
+ switch (T) {
+ case DeclSpec::TST_unspecified: return "unspecified";
+ case DeclSpec::TST_void: return "void";
+ case DeclSpec::TST_char: return "char";
+ case DeclSpec::TST_wchar: return Policy.MSWChar ? "__wchar_t" : "wchar_t";
+ case DeclSpec::TST_char16: return "char16_t";
+ case DeclSpec::TST_char32: return "char32_t";
+ case DeclSpec::TST_int: return "int";
+ case DeclSpec::TST_int128: return "__int128";
+ case DeclSpec::TST_half: return "half";
+ case DeclSpec::TST_float: return "float";
+ case DeclSpec::TST_double: return "double";
+ case DeclSpec::TST_bool: return Policy.Bool ? "bool" : "_Bool";
+ case DeclSpec::TST_decimal32: return "_Decimal32";
+ case DeclSpec::TST_decimal64: return "_Decimal64";
+ case DeclSpec::TST_decimal128: return "_Decimal128";
+ case DeclSpec::TST_enum: return "enum";
+ case DeclSpec::TST_class: return "class";
+ case DeclSpec::TST_union: return "union";
+ case DeclSpec::TST_struct: return "struct";
+ case DeclSpec::TST_interface: return "__interface";
+ case DeclSpec::TST_typename: return "type-name";
+ case DeclSpec::TST_typeofType:
+ case DeclSpec::TST_typeofExpr: return "typeof";
+ case DeclSpec::TST_auto: return "auto";
+ case DeclSpec::TST_auto_type: return "__auto_type";
+ case DeclSpec::TST_decltype: return "(decltype)";
+ case DeclSpec::TST_decltype_auto: return "decltype(auto)";
+ case DeclSpec::TST_underlyingType: return "__underlying_type";
+ case DeclSpec::TST_unknown_anytype: return "__unknown_anytype";
+ case DeclSpec::TST_atomic: return "_Atomic";
+ case DeclSpec::TST_error: return "(error)";
+ }
+ llvm_unreachable("Unknown typespec!");
+}
+
+const char *DeclSpec::getSpecifierName(TQ T) {
+ switch (T) {
+ case DeclSpec::TQ_unspecified: return "unspecified";
+ case DeclSpec::TQ_const: return "const";
+ case DeclSpec::TQ_restrict: return "restrict";
+ case DeclSpec::TQ_volatile: return "volatile";
+ case DeclSpec::TQ_atomic: return "_Atomic";
+ }
+ llvm_unreachable("Unknown typespec!");
+}
+
+bool DeclSpec::SetStorageClassSpec(Sema &S, SCS SC, SourceLocation Loc,
+ const char *&PrevSpec,
+ unsigned &DiagID,
+ const PrintingPolicy &Policy) {
+ // OpenCL v1.1 s6.8g: "The extern, static, auto and register storage-class
+ // specifiers are not supported.
+ // It seems sensible to prohibit private_extern too
+ // The cl_clang_storage_class_specifiers extension enables support for
+ // these storage-class specifiers.
+ // OpenCL v1.2 s6.8 changes this to "The auto and register storage-class
+ // specifiers are not supported."
+ if (S.getLangOpts().OpenCL &&
+ !S.getOpenCLOptions().cl_clang_storage_class_specifiers) {
+ switch (SC) {
+ case SCS_extern:
+ case SCS_private_extern:
+ case SCS_static:
+ if (S.getLangOpts().OpenCLVersion < 120) {
+ DiagID = diag::err_opencl_unknown_type_specifier;
+ PrevSpec = getSpecifierName(SC);
+ return true;
+ }
+ break;
+ case SCS_auto:
+ case SCS_register:
+ DiagID = diag::err_opencl_unknown_type_specifier;
+ PrevSpec = getSpecifierName(SC);
+ return true;
+ default:
+ break;
+ }
+ }
+
+ if (StorageClassSpec != SCS_unspecified) {
+ // Maybe this is an attempt to use C++11 'auto' outside of C++11 mode.
+ bool isInvalid = true;
+ if (TypeSpecType == TST_unspecified && S.getLangOpts().CPlusPlus) {
+ if (SC == SCS_auto)
+ return SetTypeSpecType(TST_auto, Loc, PrevSpec, DiagID, Policy);
+ if (StorageClassSpec == SCS_auto) {
+ isInvalid = SetTypeSpecType(TST_auto, StorageClassSpecLoc,
+ PrevSpec, DiagID, Policy);
+ assert(!isInvalid && "auto SCS -> TST recovery failed");
+ }
+ }
+
+ // Changing storage class is allowed only if the previous one
+ // was the 'extern' that is part of a linkage specification and
+ // the new storage class is 'typedef'.
+ if (isInvalid &&
+ !(SCS_extern_in_linkage_spec &&
+ StorageClassSpec == SCS_extern &&
+ SC == SCS_typedef))
+ return BadSpecifier(SC, (SCS)StorageClassSpec, PrevSpec, DiagID);
+ }
+ StorageClassSpec = SC;
+ StorageClassSpecLoc = Loc;
+ assert((unsigned)SC == StorageClassSpec && "SCS constants overflow bitfield");
+ return false;
+}
+
+bool DeclSpec::SetStorageClassSpecThread(TSCS TSC, SourceLocation Loc,
+ const char *&PrevSpec,
+ unsigned &DiagID) {
+ if (ThreadStorageClassSpec != TSCS_unspecified)
+ return BadSpecifier(TSC, (TSCS)ThreadStorageClassSpec, PrevSpec, DiagID);
+
+ ThreadStorageClassSpec = TSC;
+ ThreadStorageClassSpecLoc = Loc;
+ return false;
+}
+
+/// These methods set the specified attribute of the DeclSpec, but return true
+/// and ignore the request if invalid (e.g. "extern" then "auto" is
+/// specified).
+bool DeclSpec::SetTypeSpecWidth(TSW W, SourceLocation Loc,
+ const char *&PrevSpec,
+ unsigned &DiagID,
+ const PrintingPolicy &Policy) {
+ // Overwrite TSWLoc only if TypeSpecWidth was unspecified, so that
+ // for 'long long' we will keep the source location of the first 'long'.
+ if (TypeSpecWidth == TSW_unspecified)
+ TSWLoc = Loc;
+ // Allow turning long -> long long.
+ else if (W != TSW_longlong || TypeSpecWidth != TSW_long)
+ return BadSpecifier(W, (TSW)TypeSpecWidth, PrevSpec, DiagID);
+ TypeSpecWidth = W;
+ return false;
+}
+
+bool DeclSpec::SetTypeSpecComplex(TSC C, SourceLocation Loc,
+ const char *&PrevSpec,
+ unsigned &DiagID) {
+ if (TypeSpecComplex != TSC_unspecified)
+ return BadSpecifier(C, (TSC)TypeSpecComplex, PrevSpec, DiagID);
+ TypeSpecComplex = C;
+ TSCLoc = Loc;
+ return false;
+}
+
+bool DeclSpec::SetTypeSpecSign(TSS S, SourceLocation Loc,
+ const char *&PrevSpec,
+ unsigned &DiagID) {
+ if (TypeSpecSign != TSS_unspecified)
+ return BadSpecifier(S, (TSS)TypeSpecSign, PrevSpec, DiagID);
+ TypeSpecSign = S;
+ TSSLoc = Loc;
+ return false;
+}
+
+bool DeclSpec::SetTypeSpecType(TST T, SourceLocation Loc,
+ const char *&PrevSpec,
+ unsigned &DiagID,
+ ParsedType Rep,
+ const PrintingPolicy &Policy) {
+ return SetTypeSpecType(T, Loc, Loc, PrevSpec, DiagID, Rep, Policy);
+}
+
+bool DeclSpec::SetTypeSpecType(TST T, SourceLocation TagKwLoc,
+ SourceLocation TagNameLoc,
+ const char *&PrevSpec,
+ unsigned &DiagID,
+ ParsedType Rep,
+ const PrintingPolicy &Policy) {
+ assert(isTypeRep(T) && "T does not store a type");
+ assert(Rep && "no type provided!");
+ if (TypeSpecType != TST_unspecified) {
+ PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy);
+ DiagID = diag::err_invalid_decl_spec_combination;
+ return true;
+ }
+ TypeSpecType = T;
+ TypeRep = Rep;
+ TSTLoc = TagKwLoc;
+ TSTNameLoc = TagNameLoc;
+ TypeSpecOwned = false;
+ return false;
+}
+
+bool DeclSpec::SetTypeSpecType(TST T, SourceLocation Loc,
+ const char *&PrevSpec,
+ unsigned &DiagID,
+ Expr *Rep,
+ const PrintingPolicy &Policy) {
+ assert(isExprRep(T) && "T does not store an expr");
+ assert(Rep && "no expression provided!");
+ if (TypeSpecType != TST_unspecified) {
+ PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy);
+ DiagID = diag::err_invalid_decl_spec_combination;
+ return true;
+ }
+ TypeSpecType = T;
+ ExprRep = Rep;
+ TSTLoc = Loc;
+ TSTNameLoc = Loc;
+ TypeSpecOwned = false;
+ return false;
+}
+
+bool DeclSpec::SetTypeSpecType(TST T, SourceLocation Loc,
+ const char *&PrevSpec,
+ unsigned &DiagID,
+ Decl *Rep, bool Owned,
+ const PrintingPolicy &Policy) {
+ return SetTypeSpecType(T, Loc, Loc, PrevSpec, DiagID, Rep, Owned, Policy);
+}
+
+bool DeclSpec::SetTypeSpecType(TST T, SourceLocation TagKwLoc,
+ SourceLocation TagNameLoc,
+ const char *&PrevSpec,
+ unsigned &DiagID,
+ Decl *Rep, bool Owned,
+ const PrintingPolicy &Policy) {
+ assert(isDeclRep(T) && "T does not store a decl");
+ // Unlike the other cases, we don't assert that we actually get a decl.
+
+ if (TypeSpecType != TST_unspecified) {
+ PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy);
+ DiagID = diag::err_invalid_decl_spec_combination;
+ return true;
+ }
+ TypeSpecType = T;
+ DeclRep = Rep;
+ TSTLoc = TagKwLoc;
+ TSTNameLoc = TagNameLoc;
+ TypeSpecOwned = Owned && Rep != nullptr;
+ return false;
+}
+
+bool DeclSpec::SetTypeSpecType(TST T, SourceLocation Loc,
+ const char *&PrevSpec,
+ unsigned &DiagID,
+ const PrintingPolicy &Policy) {
+ assert(!isDeclRep(T) && !isTypeRep(T) && !isExprRep(T) &&
+ "rep required for these type-spec kinds!");
+ if (TypeSpecType != TST_unspecified) {
+ PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy);
+ DiagID = diag::err_invalid_decl_spec_combination;
+ return true;
+ }
+ TSTLoc = Loc;
+ TSTNameLoc = Loc;
+ if (TypeAltiVecVector && (T == TST_bool) && !TypeAltiVecBool) {
+ TypeAltiVecBool = true;
+ return false;
+ }
+ TypeSpecType = T;
+ TypeSpecOwned = false;
+ return false;
+}
+
+bool DeclSpec::SetTypeAltiVecVector(bool isAltiVecVector, SourceLocation Loc,
+ const char *&PrevSpec, unsigned &DiagID,
+ const PrintingPolicy &Policy) {
+ if (TypeSpecType != TST_unspecified) {
+ PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy);
+ DiagID = diag::err_invalid_vector_decl_spec_combination;
+ return true;
+ }
+ TypeAltiVecVector = isAltiVecVector;
+ AltiVecLoc = Loc;
+ return false;
+}
+
+bool DeclSpec::SetTypeAltiVecPixel(bool isAltiVecPixel, SourceLocation Loc,
+ const char *&PrevSpec, unsigned &DiagID,
+ const PrintingPolicy &Policy) {
+ if (!TypeAltiVecVector || TypeAltiVecPixel ||
+ (TypeSpecType != TST_unspecified)) {
+ PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy);
+ DiagID = diag::err_invalid_pixel_decl_spec_combination;
+ return true;
+ }
+ TypeAltiVecPixel = isAltiVecPixel;
+ TSTLoc = Loc;
+ TSTNameLoc = Loc;
+ return false;
+}
+
+bool DeclSpec::SetTypeAltiVecBool(bool isAltiVecBool, SourceLocation Loc,
+ const char *&PrevSpec, unsigned &DiagID,
+ const PrintingPolicy &Policy) {
+ if (!TypeAltiVecVector || TypeAltiVecBool ||
+ (TypeSpecType != TST_unspecified)) {
+ PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy);
+ DiagID = diag::err_invalid_vector_bool_decl_spec;
+ return true;
+ }
+ TypeAltiVecBool = isAltiVecBool;
+ TSTLoc = Loc;
+ TSTNameLoc = Loc;
+ return false;
+}
+
+bool DeclSpec::SetTypeSpecError() {
+ TypeSpecType = TST_error;
+ TypeSpecOwned = false;
+ TSTLoc = SourceLocation();
+ TSTNameLoc = SourceLocation();
+ return false;
+}
+
+bool DeclSpec::SetTypeQual(TQ T, SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID, const LangOptions &Lang) {
+ // Duplicates are permitted in C99 onwards, but are not permitted in C89 or
+ // C++. However, since this is likely not what the user intended, we will
+ // always warn. We do not need to set the qualifier's location since we
+ // already have it.
+ if (TypeQualifiers & T) {
+ bool IsExtension = true;
+ if (Lang.C99)
+ IsExtension = false;
+ return BadSpecifier(T, T, PrevSpec, DiagID, IsExtension);
+ }
+ TypeQualifiers |= T;
+
+ switch (T) {
+ case TQ_unspecified: break;
+ case TQ_const: TQ_constLoc = Loc; return false;
+ case TQ_restrict: TQ_restrictLoc = Loc; return false;
+ case TQ_volatile: TQ_volatileLoc = Loc; return false;
+ case TQ_atomic: TQ_atomicLoc = Loc; return false;
+ }
+
+ llvm_unreachable("Unknown type qualifier!");
+}
+
+bool DeclSpec::setFunctionSpecInline(SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID) {
+ // 'inline inline' is ok. However, since this is likely not what the user
+ // intended, we will always warn, similar to duplicates of type qualifiers.
+ if (FS_inline_specified) {
+ DiagID = diag::warn_duplicate_declspec;
+ PrevSpec = "inline";
+ return true;
+ }
+ FS_inline_specified = true;
+ FS_inlineLoc = Loc;
+ return false;
+}
+
+bool DeclSpec::setFunctionSpecForceInline(SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID) {
+ if (FS_forceinline_specified) {
+ DiagID = diag::warn_duplicate_declspec;
+ PrevSpec = "__forceinline";
+ return true;
+ }
+ FS_forceinline_specified = true;
+ FS_forceinlineLoc = Loc;
+ return false;
+}
+
+bool DeclSpec::setFunctionSpecVirtual(SourceLocation Loc,
+ const char *&PrevSpec,
+ unsigned &DiagID) {
+ // 'virtual virtual' is ok, but warn as this is likely not what the user
+ // intended.
+ if (FS_virtual_specified) {
+ DiagID = diag::warn_duplicate_declspec;
+ PrevSpec = "virtual";
+ return true;
+ }
+ FS_virtual_specified = true;
+ FS_virtualLoc = Loc;
+ return false;
+}
+
+bool DeclSpec::setFunctionSpecExplicit(SourceLocation Loc,
+ const char *&PrevSpec,
+ unsigned &DiagID) {
+ // 'explicit explicit' is ok, but warn as this is likely not what the user
+ // intended.
+ if (FS_explicit_specified) {
+ DiagID = diag::warn_duplicate_declspec;
+ PrevSpec = "explicit";
+ return true;
+ }
+ FS_explicit_specified = true;
+ FS_explicitLoc = Loc;
+ return false;
+}
+
+bool DeclSpec::setFunctionSpecNoreturn(SourceLocation Loc,
+ const char *&PrevSpec,
+ unsigned &DiagID) {
+ // '_Noreturn _Noreturn' is ok, but warn as this is likely not what the user
+ // intended.
+ if (FS_noreturn_specified) {
+ DiagID = diag::warn_duplicate_declspec;
+ PrevSpec = "_Noreturn";
+ return true;
+ }
+ FS_noreturn_specified = true;
+ FS_noreturnLoc = Loc;
+ return false;
+}
+
+bool DeclSpec::SetFriendSpec(SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID) {
+ if (Friend_specified) {
+ PrevSpec = "friend";
+ // Keep the later location, so that we can later diagnose ill-formed
+ // declarations like 'friend class X friend;'. Per [class.friend]p3,
+ // 'friend' must be the first token in a friend declaration that is
+ // not a function declaration.
+ FriendLoc = Loc;
+ DiagID = diag::warn_duplicate_declspec;
+ return true;
+ }
+
+ Friend_specified = true;
+ FriendLoc = Loc;
+ return false;
+}
+
+bool DeclSpec::setModulePrivateSpec(SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID) {
+ if (isModulePrivateSpecified()) {
+ PrevSpec = "__module_private__";
+ DiagID = diag::ext_duplicate_declspec;
+ return true;
+ }
+
+ ModulePrivateLoc = Loc;
+ return false;
+}
+
+bool DeclSpec::SetConstexprSpec(SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID) {
+ // 'constexpr constexpr' is ok, but warn as this is likely not what the user
+ // intended.
+ if (Constexpr_specified) {
+ DiagID = diag::warn_duplicate_declspec;
+ PrevSpec = "constexpr";
+ return true;
+ }
+ Constexpr_specified = true;
+ ConstexprLoc = Loc;
+ return false;
+}
+
+bool DeclSpec::SetConceptSpec(SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID) {
+ if (Concept_specified) {
+ DiagID = diag::ext_duplicate_declspec;
+ PrevSpec = "concept";
+ return true;
+ }
+ Concept_specified = true;
+ ConceptLoc = Loc;
+ return false;
+}
+
+void DeclSpec::SaveWrittenBuiltinSpecs() {
+ writtenBS.Sign = getTypeSpecSign();
+ writtenBS.Width = getTypeSpecWidth();
+ writtenBS.Type = getTypeSpecType();
+ // Search the list of attributes for the presence of a mode attribute.
+ writtenBS.ModeAttr = false;
+ AttributeList* attrs = getAttributes().getList();
+ while (attrs) {
+ if (attrs->getKind() == AttributeList::AT_Mode) {
+ writtenBS.ModeAttr = true;
+ break;
+ }
+ attrs = attrs->getNext();
+ }
+}
+
+/// Finish - This does final analysis of the declspec, rejecting things like
+/// "_Imaginary" (lacking an FP type). This returns a diagnostic to issue or
+/// diag::NUM_DIAGNOSTICS if there is no error. After calling this method,
+/// DeclSpec is guaranteed self-consistent, even if an error occurred.
+void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
+ // Before possibly changing their values, save specs as written.
+ SaveWrittenBuiltinSpecs();
+
+ // Check the type specifier components first.
+
+ // If decltype(auto) is used, no other type specifiers are permitted.
+ if (TypeSpecType == TST_decltype_auto &&
+ (TypeSpecWidth != TSW_unspecified ||
+ TypeSpecComplex != TSC_unspecified ||
+ TypeSpecSign != TSS_unspecified ||
+ TypeAltiVecVector || TypeAltiVecPixel || TypeAltiVecBool ||
+ TypeQualifiers)) {
+ const unsigned NumLocs = 8;
+ SourceLocation ExtraLocs[NumLocs] = {
+ TSWLoc, TSCLoc, TSSLoc, AltiVecLoc,
+ TQ_constLoc, TQ_restrictLoc, TQ_volatileLoc, TQ_atomicLoc
+ };
+ FixItHint Hints[NumLocs];
+ SourceLocation FirstLoc;
+ for (unsigned I = 0; I != NumLocs; ++I) {
+ if (ExtraLocs[I].isValid()) {
+ if (FirstLoc.isInvalid() ||
+ S.getSourceManager().isBeforeInTranslationUnit(ExtraLocs[I],
+ FirstLoc))
+ FirstLoc = ExtraLocs[I];
+ Hints[I] = FixItHint::CreateRemoval(ExtraLocs[I]);
+ }
+ }
+ TypeSpecWidth = TSW_unspecified;
+ TypeSpecComplex = TSC_unspecified;
+ TypeSpecSign = TSS_unspecified;
+ TypeAltiVecVector = TypeAltiVecPixel = TypeAltiVecBool = false;
+ TypeQualifiers = 0;
+ S.Diag(TSTLoc, diag::err_decltype_auto_cannot_be_combined)
+ << Hints[0] << Hints[1] << Hints[2] << Hints[3]
+ << Hints[4] << Hints[5] << Hints[6] << Hints[7];
+ }
+
+ // Validate and finalize AltiVec vector declspec.
+ if (TypeAltiVecVector) {
+ if (TypeAltiVecBool) {
+ // Sign specifiers are not allowed with vector bool. (PIM 2.1)
+ if (TypeSpecSign != TSS_unspecified) {
+ S.Diag(TSSLoc, diag::err_invalid_vector_bool_decl_spec)
+ << getSpecifierName((TSS)TypeSpecSign);
+ }
+
+ // Only char/int are valid with vector bool. (PIM 2.1)
+ if (((TypeSpecType != TST_unspecified) && (TypeSpecType != TST_char) &&
+ (TypeSpecType != TST_int)) || TypeAltiVecPixel) {
+ S.Diag(TSTLoc, diag::err_invalid_vector_bool_decl_spec)
+ << (TypeAltiVecPixel ? "__pixel" :
+ getSpecifierName((TST)TypeSpecType, Policy));
+ }
+
+ // Only 'short' and 'long long' are valid with vector bool. (PIM 2.1)
+ if ((TypeSpecWidth != TSW_unspecified) && (TypeSpecWidth != TSW_short) &&
+ (TypeSpecWidth != TSW_longlong))
+ S.Diag(TSWLoc, diag::err_invalid_vector_bool_decl_spec)
+ << getSpecifierName((TSW)TypeSpecWidth);
+
+ // vector bool long long requires VSX support or ZVector.
+ if ((TypeSpecWidth == TSW_longlong) &&
+ (!S.Context.getTargetInfo().hasFeature("vsx")) &&
+ (!S.Context.getTargetInfo().hasFeature("power8-vector")) &&
+ !S.getLangOpts().ZVector)
+ S.Diag(TSTLoc, diag::err_invalid_vector_long_long_decl_spec);
+
+ // Elements of vector bool are interpreted as unsigned. (PIM 2.1)
+ if ((TypeSpecType == TST_char) || (TypeSpecType == TST_int) ||
+ (TypeSpecWidth != TSW_unspecified))
+ TypeSpecSign = TSS_unsigned;
+ } else if (TypeSpecType == TST_double) {
+ // vector long double and vector long long double are never allowed.
+ // vector double is OK for Power7 and later, and ZVector.
+ if (TypeSpecWidth == TSW_long || TypeSpecWidth == TSW_longlong)
+ S.Diag(TSWLoc, diag::err_invalid_vector_long_double_decl_spec);
+ else if (!S.Context.getTargetInfo().hasFeature("vsx") &&
+ !S.getLangOpts().ZVector)
+ S.Diag(TSTLoc, diag::err_invalid_vector_double_decl_spec);
+ } else if (TypeSpecType == TST_float) {
+ // vector float is unsupported for ZVector.
+ if (S.getLangOpts().ZVector)
+ S.Diag(TSTLoc, diag::err_invalid_vector_float_decl_spec);
+ } else if (TypeSpecWidth == TSW_long) {
+ // vector long is unsupported for ZVector and deprecated for AltiVec.
+ if (S.getLangOpts().ZVector)
+ S.Diag(TSWLoc, diag::err_invalid_vector_long_decl_spec);
+ else
+ S.Diag(TSWLoc, diag::warn_vector_long_decl_spec_combination)
+ << getSpecifierName((TST)TypeSpecType, Policy);
+ }
+
+ if (TypeAltiVecPixel) {
+ //TODO: perform validation
+ TypeSpecType = TST_int;
+ TypeSpecSign = TSS_unsigned;
+ TypeSpecWidth = TSW_short;
+ TypeSpecOwned = false;
+ }
+ }
+
+ // signed/unsigned are only valid with int/char/wchar_t.
+ if (TypeSpecSign != TSS_unspecified) {
+ if (TypeSpecType == TST_unspecified)
+ TypeSpecType = TST_int; // unsigned -> unsigned int, signed -> signed int.
+ else if (TypeSpecType != TST_int && TypeSpecType != TST_int128 &&
+ TypeSpecType != TST_char && TypeSpecType != TST_wchar) {
+ S.Diag(TSSLoc, diag::err_invalid_sign_spec)
+ << getSpecifierName((TST)TypeSpecType, Policy);
+ // signed double -> double.
+ TypeSpecSign = TSS_unspecified;
+ }
+ }
+
+ // Validate the width of the type.
+ switch (TypeSpecWidth) {
+ case TSW_unspecified: break;
+ case TSW_short: // short int
+ case TSW_longlong: // long long int
+ if (TypeSpecType == TST_unspecified)
+ TypeSpecType = TST_int; // short -> short int, long long -> long long int.
+ else if (TypeSpecType != TST_int) {
+ S.Diag(TSWLoc, diag::err_invalid_width_spec) << (int)TypeSpecWidth
+ << getSpecifierName((TST)TypeSpecType, Policy);
+ TypeSpecType = TST_int;
+ TypeSpecOwned = false;
+ }
+ break;
+ case TSW_long: // long double, long int
+ if (TypeSpecType == TST_unspecified)
+ TypeSpecType = TST_int; // long -> long int.
+ else if (TypeSpecType != TST_int && TypeSpecType != TST_double) {
+ S.Diag(TSWLoc, diag::err_invalid_width_spec) << (int)TypeSpecWidth
+ << getSpecifierName((TST)TypeSpecType, Policy);
+ TypeSpecType = TST_int;
+ TypeSpecOwned = false;
+ }
+ break;
+ }
+
+ // TODO: if the implementation does not implement _Complex or _Imaginary,
+ // disallow their use. Need information about the backend.
+ if (TypeSpecComplex != TSC_unspecified) {
+ if (TypeSpecType == TST_unspecified) {
+ S.Diag(TSCLoc, diag::ext_plain_complex)
+ << FixItHint::CreateInsertion(
+ S.getLocForEndOfToken(getTypeSpecComplexLoc()),
+ " double");
+ TypeSpecType = TST_double; // _Complex -> _Complex double.
+ } else if (TypeSpecType == TST_int || TypeSpecType == TST_char) {
+ // Note that this intentionally doesn't include _Complex _Bool.
+ if (!S.getLangOpts().CPlusPlus)
+ S.Diag(TSTLoc, diag::ext_integer_complex);
+ } else if (TypeSpecType != TST_float && TypeSpecType != TST_double) {
+ S.Diag(TSCLoc, diag::err_invalid_complex_spec)
+ << getSpecifierName((TST)TypeSpecType, Policy);
+ TypeSpecComplex = TSC_unspecified;
+ }
+ }
+
+ // C11 6.7.1/3, C++11 [dcl.stc]p1, GNU TLS: __thread, thread_local and
+ // _Thread_local can only appear with the 'static' and 'extern' storage class
+ // specifiers. We also allow __private_extern__ as an extension.
+ if (ThreadStorageClassSpec != TSCS_unspecified) {
+ switch (StorageClassSpec) {
+ case SCS_unspecified:
+ case SCS_extern:
+ case SCS_private_extern:
+ case SCS_static:
+ break;
+ default:
+ if (S.getSourceManager().isBeforeInTranslationUnit(
+ getThreadStorageClassSpecLoc(), getStorageClassSpecLoc()))
+ S.Diag(getStorageClassSpecLoc(),
+ diag::err_invalid_decl_spec_combination)
+ << DeclSpec::getSpecifierName(getThreadStorageClassSpec())
+ << SourceRange(getThreadStorageClassSpecLoc());
+ else
+ S.Diag(getThreadStorageClassSpecLoc(),
+ diag::err_invalid_decl_spec_combination)
+ << DeclSpec::getSpecifierName(getStorageClassSpec())
+ << SourceRange(getStorageClassSpecLoc());
+ // Discard the thread storage class specifier to recover.
+ ThreadStorageClassSpec = TSCS_unspecified;
+ ThreadStorageClassSpecLoc = SourceLocation();
+ }
+ }
+
+ // If no type specifier was provided and we're parsing a language where
+ // the type specifier is not optional, but we got 'auto' as a storage
+ // class specifier, then assume this is an attempt to use C++0x's 'auto'
+ // type specifier.
+ if (S.getLangOpts().CPlusPlus &&
+ TypeSpecType == TST_unspecified && StorageClassSpec == SCS_auto) {
+ TypeSpecType = TST_auto;
+ StorageClassSpec = SCS_unspecified;
+ TSTLoc = TSTNameLoc = StorageClassSpecLoc;
+ StorageClassSpecLoc = SourceLocation();
+ }
+ // Diagnose if we've recovered from an ill-formed 'auto' storage class
+ // specifier in a pre-C++11 dialect of C++.
+ if (!S.getLangOpts().CPlusPlus11 && TypeSpecType == TST_auto)
+ S.Diag(TSTLoc, diag::ext_auto_type_specifier);
+ if (S.getLangOpts().CPlusPlus && !S.getLangOpts().CPlusPlus11 &&
+ StorageClassSpec == SCS_auto)
+ S.Diag(StorageClassSpecLoc, diag::warn_auto_storage_class)
+ << FixItHint::CreateRemoval(StorageClassSpecLoc);
+ if (TypeSpecType == TST_char16 || TypeSpecType == TST_char32)
+ S.Diag(TSTLoc, diag::warn_cxx98_compat_unicode_type)
+ << (TypeSpecType == TST_char16 ? "char16_t" : "char32_t");
+ if (Constexpr_specified)
+ S.Diag(ConstexprLoc, diag::warn_cxx98_compat_constexpr);
+
+ // C++ [class.friend]p6:
+ // No storage-class-specifier shall appear in the decl-specifier-seq
+ // of a friend declaration.
+ if (isFriendSpecified() &&
+ (getStorageClassSpec() || getThreadStorageClassSpec())) {
+ SmallString<32> SpecName;
+ SourceLocation SCLoc;
+ FixItHint StorageHint, ThreadHint;
+
+ if (DeclSpec::SCS SC = getStorageClassSpec()) {
+ SpecName = getSpecifierName(SC);
+ SCLoc = getStorageClassSpecLoc();
+ StorageHint = FixItHint::CreateRemoval(SCLoc);
+ }
+
+ if (DeclSpec::TSCS TSC = getThreadStorageClassSpec()) {
+ if (!SpecName.empty()) SpecName += " ";
+ SpecName += getSpecifierName(TSC);
+ SCLoc = getThreadStorageClassSpecLoc();
+ ThreadHint = FixItHint::CreateRemoval(SCLoc);
+ }
+
+ S.Diag(SCLoc, diag::err_friend_decl_spec)
+ << SpecName << StorageHint << ThreadHint;
+
+ ClearStorageClassSpecs();
+ }
+
+ // C++11 [dcl.fct.spec]p5:
+ // The virtual specifier shall be used only in the initial
+ // declaration of a non-static class member function;
+ // C++11 [dcl.fct.spec]p6:
+ // The explicit specifier shall be used only in the declaration of
+ // a constructor or conversion function within its class
+ // definition;
+ if (isFriendSpecified() && (isVirtualSpecified() || isExplicitSpecified())) {
+ StringRef Keyword;
+ SourceLocation SCLoc;
+
+ if (isVirtualSpecified()) {
+ Keyword = "virtual";
+ SCLoc = getVirtualSpecLoc();
+ } else {
+ Keyword = "explicit";
+ SCLoc = getExplicitSpecLoc();
+ }
+
+ FixItHint Hint = FixItHint::CreateRemoval(SCLoc);
+ S.Diag(SCLoc, diag::err_friend_decl_spec)
+ << Keyword << Hint;
+
+ FS_virtual_specified = FS_explicit_specified = false;
+ FS_virtualLoc = FS_explicitLoc = SourceLocation();
+ }
+
+ assert(!TypeSpecOwned || isDeclRep((TST) TypeSpecType));
+
+ // Okay, now we can infer the real type.
+
+ // TODO: return "auto function" and other bad things based on the real type.
+
+ // 'data definition has no type or storage class'?
+}
+
+bool DeclSpec::isMissingDeclaratorOk() {
+ TST tst = getTypeSpecType();
+ return isDeclRep(tst) && getRepAsDecl() != nullptr &&
+ StorageClassSpec != DeclSpec::SCS_typedef;
+}
+
+void UnqualifiedId::setOperatorFunctionId(SourceLocation OperatorLoc,
+ OverloadedOperatorKind Op,
+ SourceLocation SymbolLocations[3]) {
+ Kind = IK_OperatorFunctionId;
+ StartLocation = OperatorLoc;
+ EndLocation = OperatorLoc;
+ OperatorFunctionId.Operator = Op;
+ for (unsigned I = 0; I != 3; ++I) {
+ OperatorFunctionId.SymbolLocations[I] = SymbolLocations[I].getRawEncoding();
+
+ if (SymbolLocations[I].isValid())
+ EndLocation = SymbolLocations[I];
+ }
+}
+
+bool VirtSpecifiers::SetSpecifier(Specifier VS, SourceLocation Loc,
+ const char *&PrevSpec) {
+ if (!FirstLocation.isValid())
+ FirstLocation = Loc;
+ LastLocation = Loc;
+ LastSpecifier = VS;
+
+ if (Specifiers & VS) {
+ PrevSpec = getSpecifierName(VS);
+ return true;
+ }
+
+ Specifiers |= VS;
+
+ switch (VS) {
+ default: llvm_unreachable("Unknown specifier!");
+ case VS_Override: VS_overrideLoc = Loc; break;
+ case VS_Sealed:
+ case VS_Final: VS_finalLoc = Loc; break;
+ }
+
+ return false;
+}
+
+const char *VirtSpecifiers::getSpecifierName(Specifier VS) {
+ switch (VS) {
+ default: llvm_unreachable("Unknown specifier");
+ case VS_Override: return "override";
+ case VS_Final: return "final";
+ case VS_Sealed: return "sealed";
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/DelayedDiagnostic.cpp b/contrib/llvm/tools/clang/lib/Sema/DelayedDiagnostic.cpp
new file mode 100644
index 0000000..ceea04f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/DelayedDiagnostic.cpp
@@ -0,0 +1,72 @@
+//===--- DelayedDiagnostic.cpp - Delayed declarator diagnostics -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DelayedDiagnostic class implementation, which
+// is used to record diagnostics that are being conditionally produced
+// during declarator parsing.
+//
+// This file also defines AccessedEntity.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Sema/DelayedDiagnostic.h"
+#include <string.h>
+using namespace clang;
+using namespace sema;
+
+DelayedDiagnostic
+DelayedDiagnostic::makeAvailability(Sema::AvailabilityDiagnostic AD,
+ SourceLocation Loc,
+ const NamedDecl *D,
+ const ObjCInterfaceDecl *UnknownObjCClass,
+ const ObjCPropertyDecl *ObjCProperty,
+ StringRef Msg,
+ bool ObjCPropertyAccess) {
+ DelayedDiagnostic DD;
+ switch (AD) {
+ case Sema::AD_Deprecation:
+ DD.Kind = Deprecation;
+ break;
+ case Sema::AD_Unavailable:
+ DD.Kind = Unavailable;
+ break;
+ case Sema::AD_Partial:
+ llvm_unreachable("AD_Partial diags should not be delayed");
+ }
+ DD.Triggered = false;
+ DD.Loc = Loc;
+ DD.DeprecationData.Decl = D;
+ DD.DeprecationData.UnknownObjCClass = UnknownObjCClass;
+ DD.DeprecationData.ObjCProperty = ObjCProperty;
+ char *MessageData = nullptr;
+ if (Msg.size()) {
+ MessageData = new char [Msg.size()];
+ memcpy(MessageData, Msg.data(), Msg.size());
+ }
+
+ DD.DeprecationData.Message = MessageData;
+ DD.DeprecationData.MessageLen = Msg.size();
+ DD.DeprecationData.ObjCPropertyAccess = ObjCPropertyAccess;
+ return DD;
+}
+
+void DelayedDiagnostic::Destroy() {
+ switch (static_cast<DDKind>(Kind)) {
+ case Access:
+ getAccessData().~AccessedEntity();
+ break;
+
+ case Deprecation:
+ case Unavailable:
+ delete [] DeprecationData.Message;
+ break;
+
+ case ForbiddenType:
+ break;
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/IdentifierResolver.cpp b/contrib/llvm/tools/clang/lib/Sema/IdentifierResolver.cpp
new file mode 100644
index 0000000..53263ba
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/IdentifierResolver.cpp
@@ -0,0 +1,422 @@
+//===- IdentifierResolver.cpp - Lexical Scope Name lookup -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the IdentifierResolver class, which is used for lexical
+// scoped lookup, based on declaration names.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/IdentifierResolver.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Lex/ExternalPreprocessorSource.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/Scope.h"
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// IdDeclInfoMap class
+//===----------------------------------------------------------------------===//
+
+/// IdDeclInfoMap - Associates IdDeclInfos with declaration names.
+/// Allocates 'pools' (vectors of IdDeclInfos) to avoid allocating each
+/// individual IdDeclInfo to heap.
+class IdentifierResolver::IdDeclInfoMap {
+ static const unsigned int POOL_SIZE = 512;
+
+ /// We use our own linked-list implementation because it is sadly
+ /// impossible to add something to a pre-C++0x STL container without
+ /// a completely unnecessary copy.
+ struct IdDeclInfoPool {
+ IdDeclInfoPool(IdDeclInfoPool *Next) : Next(Next) {}
+
+ IdDeclInfoPool *Next;
+ IdDeclInfo Pool[POOL_SIZE];
+ };
+
+ IdDeclInfoPool *CurPool;
+ unsigned int CurIndex;
+
+public:
+ IdDeclInfoMap() : CurPool(nullptr), CurIndex(POOL_SIZE) {}
+
+ ~IdDeclInfoMap() {
+ IdDeclInfoPool *Cur = CurPool;
+ while (IdDeclInfoPool *P = Cur) {
+ Cur = Cur->Next;
+ delete P;
+ }
+ }
+
+ /// Returns the IdDeclInfo associated to the DeclarationName.
+ /// It creates a new IdDeclInfo if one was not created before for this id.
+ IdDeclInfo &operator[](DeclarationName Name);
+};
+
+
+//===----------------------------------------------------------------------===//
+// IdDeclInfo Implementation
+//===----------------------------------------------------------------------===//
+
+/// RemoveDecl - Remove the decl from the scope chain.
+/// The decl must already be part of the decl chain.
+void IdentifierResolver::IdDeclInfo::RemoveDecl(NamedDecl *D) {
+ for (DeclsTy::iterator I = Decls.end(); I != Decls.begin(); --I) {
+ if (D == *(I-1)) {
+ Decls.erase(I-1);
+ return;
+ }
+ }
+
+ llvm_unreachable("Didn't find this decl on its identifier's chain!");
+}
+
+//===----------------------------------------------------------------------===//
+// IdentifierResolver Implementation
+//===----------------------------------------------------------------------===//
+
+IdentifierResolver::IdentifierResolver(Preprocessor &PP)
+ : LangOpt(PP.getLangOpts()), PP(PP),
+ IdDeclInfos(new IdDeclInfoMap) {
+}
+
+IdentifierResolver::~IdentifierResolver() {
+ delete IdDeclInfos;
+}
+
+/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
+/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
+/// true if 'D' belongs to the given declaration context.
+bool IdentifierResolver::isDeclInScope(Decl *D, DeclContext *Ctx, Scope *S,
+ bool AllowInlineNamespace) const {
+ Ctx = Ctx->getRedeclContext();
+
+ if (Ctx->isFunctionOrMethod() || (S && S->isFunctionPrototypeScope())) {
+ // Ignore the scopes associated within transparent declaration contexts.
+ while (S->getEntity() && S->getEntity()->isTransparentContext())
+ S = S->getParent();
+
+ if (S->isDeclScope(D))
+ return true;
+ if (LangOpt.CPlusPlus) {
+ // C++ 3.3.2p3:
+ // The name declared in a catch exception-declaration is local to the
+ // handler and shall not be redeclared in the outermost block of the
+ // handler.
+ // C++ 3.3.2p4:
+ // Names declared in the for-init-statement, and in the condition of if,
+ // while, for, and switch statements are local to the if, while, for, or
+ // switch statement (including the controlled statement), and shall not be
+ // redeclared in a subsequent condition of that statement nor in the
+ // outermost block (or, for the if statement, any of the outermost blocks)
+ // of the controlled statement.
+ //
+ assert(S->getParent() && "No TUScope?");
+ if (S->getParent()->getFlags() & Scope::ControlScope) {
+ S = S->getParent();
+ if (S->isDeclScope(D))
+ return true;
+ }
+ if (S->getFlags() & Scope::FnTryCatchScope)
+ return S->getParent()->isDeclScope(D);
+ }
+ return false;
+ }
+
+ // FIXME: If D is a local extern declaration, this check doesn't make sense;
+ // we should be checking its lexical context instead in that case, because
+ // that is its scope.
+ DeclContext *DCtx = D->getDeclContext()->getRedeclContext();
+ return AllowInlineNamespace ? Ctx->InEnclosingNamespaceSetOf(DCtx)
+ : Ctx->Equals(DCtx);
+}
+
+/// AddDecl - Link the decl to its shadowed decl chain.
+void IdentifierResolver::AddDecl(NamedDecl *D) {
+ DeclarationName Name = D->getDeclName();
+ if (IdentifierInfo *II = Name.getAsIdentifierInfo())
+ updatingIdentifier(*II);
+
+ void *Ptr = Name.getFETokenInfo<void>();
+
+ if (!Ptr) {
+ Name.setFETokenInfo(D);
+ return;
+ }
+
+ IdDeclInfo *IDI;
+
+ if (isDeclPtr(Ptr)) {
+ Name.setFETokenInfo(nullptr);
+ IDI = &(*IdDeclInfos)[Name];
+ NamedDecl *PrevD = static_cast<NamedDecl*>(Ptr);
+ IDI->AddDecl(PrevD);
+ } else
+ IDI = toIdDeclInfo(Ptr);
+
+ IDI->AddDecl(D);
+}
+
+void IdentifierResolver::InsertDeclAfter(iterator Pos, NamedDecl *D) {
+ DeclarationName Name = D->getDeclName();
+ if (IdentifierInfo *II = Name.getAsIdentifierInfo())
+ updatingIdentifier(*II);
+
+ void *Ptr = Name.getFETokenInfo<void>();
+
+ if (!Ptr) {
+ AddDecl(D);
+ return;
+ }
+
+ if (isDeclPtr(Ptr)) {
+ // We only have a single declaration: insert before or after it,
+ // as appropriate.
+ if (Pos == iterator()) {
+ // Add the new declaration before the existing declaration.
+ NamedDecl *PrevD = static_cast<NamedDecl*>(Ptr);
+ RemoveDecl(PrevD);
+ AddDecl(D);
+ AddDecl(PrevD);
+ } else {
+ // Add new declaration after the existing declaration.
+ AddDecl(D);
+ }
+
+ return;
+ }
+
+ // General case: insert the declaration at the appropriate point in the
+ // list, which already has at least two elements.
+ IdDeclInfo *IDI = toIdDeclInfo(Ptr);
+ if (Pos.isIterator()) {
+ IDI->InsertDecl(Pos.getIterator() + 1, D);
+ } else
+ IDI->InsertDecl(IDI->decls_begin(), D);
+}
+
+/// RemoveDecl - Unlink the decl from its shadowed decl chain.
+/// The decl must already be part of the decl chain.
+void IdentifierResolver::RemoveDecl(NamedDecl *D) {
+ assert(D && "null param passed");
+ DeclarationName Name = D->getDeclName();
+ if (IdentifierInfo *II = Name.getAsIdentifierInfo())
+ updatingIdentifier(*II);
+
+ void *Ptr = Name.getFETokenInfo<void>();
+
+ assert(Ptr && "Didn't find this decl on its identifier's chain!");
+
+ if (isDeclPtr(Ptr)) {
+ assert(D == Ptr && "Didn't find this decl on its identifier's chain!");
+ Name.setFETokenInfo(nullptr);
+ return;
+ }
+
+ return toIdDeclInfo(Ptr)->RemoveDecl(D);
+}
+
+/// begin - Returns an iterator for decls with name 'Name'.
+IdentifierResolver::iterator
+IdentifierResolver::begin(DeclarationName Name) {
+ if (IdentifierInfo *II = Name.getAsIdentifierInfo())
+ readingIdentifier(*II);
+
+ void *Ptr = Name.getFETokenInfo<void>();
+ if (!Ptr) return end();
+
+ if (isDeclPtr(Ptr))
+ return iterator(static_cast<NamedDecl*>(Ptr));
+
+ IdDeclInfo *IDI = toIdDeclInfo(Ptr);
+
+ IdDeclInfo::DeclsTy::iterator I = IDI->decls_end();
+ if (I != IDI->decls_begin())
+ return iterator(I-1);
+ // No decls found.
+ return end();
+}
+
+namespace {
+ enum DeclMatchKind {
+ DMK_Different,
+ DMK_Replace,
+ DMK_Ignore
+ };
+}
+
+/// \brief Compare two declarations to see whether they are different or,
+/// if they are the same, whether the new declaration should replace the
+/// existing declaration.
+static DeclMatchKind compareDeclarations(NamedDecl *Existing, NamedDecl *New) {
+ // If the declarations are identical, ignore the new one.
+ if (Existing == New)
+ return DMK_Ignore;
+
+ // If the declarations have different kinds, they're obviously different.
+ if (Existing->getKind() != New->getKind())
+ return DMK_Different;
+
+ // If the declarations are redeclarations of each other, keep the newest one.
+ if (Existing->getCanonicalDecl() == New->getCanonicalDecl()) {
+ // If we're adding an imported declaration, don't replace another imported
+ // declaration.
+ if (Existing->isFromASTFile() && New->isFromASTFile())
+ return DMK_Different;
+
+ // If either of these is the most recent declaration, use it.
+ Decl *MostRecent = Existing->getMostRecentDecl();
+ if (Existing == MostRecent)
+ return DMK_Ignore;
+
+ if (New == MostRecent)
+ return DMK_Replace;
+
+ // If the existing declaration is somewhere in the previous declaration
+ // chain of the new declaration, then prefer the new declaration.
+ for (auto RD : New->redecls()) {
+ if (RD == Existing)
+ return DMK_Replace;
+
+ if (RD->isCanonicalDecl())
+ break;
+ }
+
+ return DMK_Ignore;
+ }
+
+ return DMK_Different;
+}
+
+bool IdentifierResolver::tryAddTopLevelDecl(NamedDecl *D, DeclarationName Name){
+ if (IdentifierInfo *II = Name.getAsIdentifierInfo())
+ readingIdentifier(*II);
+
+ void *Ptr = Name.getFETokenInfo<void>();
+
+ if (!Ptr) {
+ Name.setFETokenInfo(D);
+ return true;
+ }
+
+ IdDeclInfo *IDI;
+
+ if (isDeclPtr(Ptr)) {
+ NamedDecl *PrevD = static_cast<NamedDecl*>(Ptr);
+
+ switch (compareDeclarations(PrevD, D)) {
+ case DMK_Different:
+ break;
+
+ case DMK_Ignore:
+ return false;
+
+ case DMK_Replace:
+ Name.setFETokenInfo(D);
+ return true;
+ }
+
+ Name.setFETokenInfo(nullptr);
+ IDI = &(*IdDeclInfos)[Name];
+
+ // If the existing declaration is not visible in translation unit scope,
+ // then add the new top-level declaration first.
+ if (!PrevD->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
+ IDI->AddDecl(D);
+ IDI->AddDecl(PrevD);
+ } else {
+ IDI->AddDecl(PrevD);
+ IDI->AddDecl(D);
+ }
+ return true;
+ }
+
+ IDI = toIdDeclInfo(Ptr);
+
+ // See whether this declaration is identical to any existing declarations.
+ // If not, find the right place to insert it.
+ for (IdDeclInfo::DeclsTy::iterator I = IDI->decls_begin(),
+ IEnd = IDI->decls_end();
+ I != IEnd; ++I) {
+
+ switch (compareDeclarations(*I, D)) {
+ case DMK_Different:
+ break;
+
+ case DMK_Ignore:
+ return false;
+
+ case DMK_Replace:
+ *I = D;
+ return true;
+ }
+
+ if (!(*I)->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
+ // We've found a declaration that is not visible from the translation
+ // unit (it's in an inner scope). Insert our declaration here.
+ IDI->InsertDecl(I, D);
+ return true;
+ }
+ }
+
+ // Add the declaration to the end.
+ IDI->AddDecl(D);
+ return true;
+}
+
+void IdentifierResolver::readingIdentifier(IdentifierInfo &II) {
+ if (II.isOutOfDate())
+ PP.getExternalSource()->updateOutOfDateIdentifier(II);
+}
+
+void IdentifierResolver::updatingIdentifier(IdentifierInfo &II) {
+ if (II.isOutOfDate())
+ PP.getExternalSource()->updateOutOfDateIdentifier(II);
+
+ if (II.isFromAST())
+ II.setChangedSinceDeserialization();
+}
+
+//===----------------------------------------------------------------------===//
+// IdDeclInfoMap Implementation
+//===----------------------------------------------------------------------===//
+
+/// Returns the IdDeclInfo associated to the DeclarationName.
+/// It creates a new IdDeclInfo if one was not created before for this id.
+IdentifierResolver::IdDeclInfo &
+IdentifierResolver::IdDeclInfoMap::operator[](DeclarationName Name) {
+ void *Ptr = Name.getFETokenInfo<void>();
+
+ if (Ptr) return *toIdDeclInfo(Ptr);
+
+ if (CurIndex == POOL_SIZE) {
+ CurPool = new IdDeclInfoPool(CurPool);
+ CurIndex = 0;
+ }
+ IdDeclInfo *IDI = &CurPool->Pool[CurIndex];
+ Name.setFETokenInfo(reinterpret_cast<void*>(
+ reinterpret_cast<uintptr_t>(IDI) | 0x1)
+ );
+ ++CurIndex;
+ return *IDI;
+}
+
+void IdentifierResolver::iterator::incrementSlowCase() {
+ NamedDecl *D = **this;
+ void *InfoPtr = D->getDeclName().getFETokenInfo<void>();
+ assert(!isDeclPtr(InfoPtr) && "Decl with wrong id ?");
+ IdDeclInfo *Info = toIdDeclInfo(InfoPtr);
+
+ BaseIter I = getIterator();
+ if (I != Info->decls_begin())
+ *this = iterator(I-1);
+ else // No more decls.
+ *this = iterator();
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp b/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp
new file mode 100644
index 0000000..c394d24
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp
@@ -0,0 +1,855 @@
+//===--- JumpDiagnostics.cpp - Protected scope jump analysis ------*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the JumpScopeChecker class, which is used to diagnose
+// jumps that enter a protected scope in an invalid way.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtObjC.h"
+#include "llvm/ADT/BitVector.h"
+using namespace clang;
+
+namespace {
+
+/// JumpScopeChecker - This object is used by Sema to diagnose invalid jumps
+/// into VLA and other protected scopes. For example, this rejects:
+/// goto L;
+/// int a[n];
+/// L:
+///
+class JumpScopeChecker {
+ Sema &S;
+
+ /// Permissive - True when recovering from errors, in which case precautions
+ /// are taken to handle incomplete scope information.
+ const bool Permissive;
+
+ /// GotoScope - This is a record that we use to keep track of all of the
+ /// scopes that are introduced by VLAs and other things that scope jumps like
+ /// gotos. This scope tree has nothing to do with the source scope tree,
+ /// because you can have multiple VLA scopes per compound statement, and most
+ /// compound statements don't introduce any scopes.
+ struct GotoScope {
+ /// ParentScope - The index in ScopeMap of the parent scope. This is 0 for
+ /// the parent scope is the function body.
+ unsigned ParentScope;
+
+ /// InDiag - The note to emit if there is a jump into this scope.
+ unsigned InDiag;
+
+ /// OutDiag - The note to emit if there is an indirect jump out
+ /// of this scope. Direct jumps always clean up their current scope
+ /// in an orderly way.
+ unsigned OutDiag;
+
+ /// Loc - Location to emit the diagnostic.
+ SourceLocation Loc;
+
+ GotoScope(unsigned parentScope, unsigned InDiag, unsigned OutDiag,
+ SourceLocation L)
+ : ParentScope(parentScope), InDiag(InDiag), OutDiag(OutDiag), Loc(L) {}
+ };
+
+ SmallVector<GotoScope, 48> Scopes;
+ llvm::DenseMap<Stmt*, unsigned> LabelAndGotoScopes;
+ SmallVector<Stmt*, 16> Jumps;
+
+ SmallVector<IndirectGotoStmt*, 4> IndirectJumps;
+ SmallVector<LabelDecl*, 4> IndirectJumpTargets;
+public:
+ JumpScopeChecker(Stmt *Body, Sema &S);
+private:
+ void BuildScopeInformation(Decl *D, unsigned &ParentScope);
+ void BuildScopeInformation(VarDecl *D, const BlockDecl *BDecl,
+ unsigned &ParentScope);
+ void BuildScopeInformation(Stmt *S, unsigned &origParentScope);
+
+ void VerifyJumps();
+ void VerifyIndirectJumps();
+ void NoteJumpIntoScopes(ArrayRef<unsigned> ToScopes);
+ void DiagnoseIndirectJump(IndirectGotoStmt *IG, unsigned IGScope,
+ LabelDecl *Target, unsigned TargetScope);
+ void CheckJump(Stmt *From, Stmt *To, SourceLocation DiagLoc,
+ unsigned JumpDiag, unsigned JumpDiagWarning,
+ unsigned JumpDiagCXX98Compat);
+ void CheckGotoStmt(GotoStmt *GS);
+
+ unsigned GetDeepestCommonScope(unsigned A, unsigned B);
+};
+} // end anonymous namespace
+
+#define CHECK_PERMISSIVE(x) (assert(Permissive || !(x)), (Permissive && (x)))
+
+JumpScopeChecker::JumpScopeChecker(Stmt *Body, Sema &s)
+ : S(s), Permissive(s.hasAnyUnrecoverableErrorsInThisFunction()) {
+ // Add a scope entry for function scope.
+ Scopes.push_back(GotoScope(~0U, ~0U, ~0U, SourceLocation()));
+
+ // Build information for the top level compound statement, so that we have a
+ // defined scope record for every "goto" and label.
+ unsigned BodyParentScope = 0;
+ BuildScopeInformation(Body, BodyParentScope);
+
+ // Check that all jumps we saw are kosher.
+ VerifyJumps();
+ VerifyIndirectJumps();
+}
+
+/// GetDeepestCommonScope - Finds the innermost scope enclosing the
+/// two scopes.
+unsigned JumpScopeChecker::GetDeepestCommonScope(unsigned A, unsigned B) {
+ while (A != B) {
+ // Inner scopes are created after outer scopes and therefore have
+ // higher indices.
+ if (A < B) {
+ assert(Scopes[B].ParentScope < B);
+ B = Scopes[B].ParentScope;
+ } else {
+ assert(Scopes[A].ParentScope < A);
+ A = Scopes[A].ParentScope;
+ }
+ }
+ return A;
+}
+
+typedef std::pair<unsigned,unsigned> ScopePair;
+
+/// GetDiagForGotoScopeDecl - If this decl induces a new goto scope, return a
+/// diagnostic that should be emitted if control goes over it. If not, return 0.
+static ScopePair GetDiagForGotoScopeDecl(Sema &S, const Decl *D) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ unsigned InDiag = 0;
+ unsigned OutDiag = 0;
+
+ if (VD->getType()->isVariablyModifiedType())
+ InDiag = diag::note_protected_by_vla;
+
+ if (VD->hasAttr<BlocksAttr>())
+ return ScopePair(diag::note_protected_by___block,
+ diag::note_exits___block);
+
+ if (VD->hasAttr<CleanupAttr>())
+ return ScopePair(diag::note_protected_by_cleanup,
+ diag::note_exits_cleanup);
+
+ if (VD->hasLocalStorage()) {
+ switch (VD->getType().isDestructedType()) {
+ case QualType::DK_objc_strong_lifetime:
+ return ScopePair(diag::note_protected_by_objc_strong_init,
+ diag::note_exits_objc_strong);
+
+ case QualType::DK_objc_weak_lifetime:
+ return ScopePair(diag::note_protected_by_objc_weak_init,
+ diag::note_exits_objc_weak);
+
+ case QualType::DK_cxx_destructor:
+ OutDiag = diag::note_exits_dtor;
+ break;
+
+ case QualType::DK_none:
+ break;
+ }
+ }
+
+ const Expr *Init = VD->getInit();
+ if (S.Context.getLangOpts().CPlusPlus && VD->hasLocalStorage() && Init) {
+ // C++11 [stmt.dcl]p3:
+ // A program that jumps from a point where a variable with automatic
+ // storage duration is not in scope to a point where it is in scope
+ // is ill-formed unless the variable has scalar type, class type with
+ // a trivial default constructor and a trivial destructor, a
+ // cv-qualified version of one of these types, or an array of one of
+ // the preceding types and is declared without an initializer.
+
+ // C++03 [stmt.dcl.p3:
+ // A program that jumps from a point where a local variable
+ // with automatic storage duration is not in scope to a point
+ // where it is in scope is ill-formed unless the variable has
+ // POD type and is declared without an initializer.
+
+ InDiag = diag::note_protected_by_variable_init;
+
+ // For a variable of (array of) class type declared without an
+ // initializer, we will have call-style initialization and the initializer
+ // will be the CXXConstructExpr with no intervening nodes.
+ if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init)) {
+ const CXXConstructorDecl *Ctor = CCE->getConstructor();
+ if (Ctor->isTrivial() && Ctor->isDefaultConstructor() &&
+ VD->getInitStyle() == VarDecl::CallInit) {
+ if (OutDiag)
+ InDiag = diag::note_protected_by_variable_nontriv_destructor;
+ else if (!Ctor->getParent()->isPOD())
+ InDiag = diag::note_protected_by_variable_non_pod;
+ else
+ InDiag = 0;
+ }
+ }
+ }
+
+ return ScopePair(InDiag, OutDiag);
+ }
+
+ if (const TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
+ if (TD->getUnderlyingType()->isVariablyModifiedType())
+ return ScopePair(isa<TypedefDecl>(TD)
+ ? diag::note_protected_by_vla_typedef
+ : diag::note_protected_by_vla_type_alias,
+ 0);
+ }
+
+ return ScopePair(0U, 0U);
+}
+
+/// \brief Build scope information for a declaration that is part of a DeclStmt.
+void JumpScopeChecker::BuildScopeInformation(Decl *D, unsigned &ParentScope) {
+ // If this decl causes a new scope, push and switch to it.
+ std::pair<unsigned,unsigned> Diags = GetDiagForGotoScopeDecl(S, D);
+ if (Diags.first || Diags.second) {
+ Scopes.push_back(GotoScope(ParentScope, Diags.first, Diags.second,
+ D->getLocation()));
+ ParentScope = Scopes.size()-1;
+ }
+
+ // If the decl has an initializer, walk it with the potentially new
+ // scope we just installed.
+ if (VarDecl *VD = dyn_cast<VarDecl>(D))
+ if (Expr *Init = VD->getInit())
+ BuildScopeInformation(Init, ParentScope);
+}
+
+/// \brief Build scope information for a captured block literal variables.
+void JumpScopeChecker::BuildScopeInformation(VarDecl *D,
+ const BlockDecl *BDecl,
+ unsigned &ParentScope) {
+ // exclude captured __block variables; there's no destructor
+ // associated with the block literal for them.
+ if (D->hasAttr<BlocksAttr>())
+ return;
+ QualType T = D->getType();
+ QualType::DestructionKind destructKind = T.isDestructedType();
+ if (destructKind != QualType::DK_none) {
+ std::pair<unsigned,unsigned> Diags;
+ switch (destructKind) {
+ case QualType::DK_cxx_destructor:
+ Diags = ScopePair(diag::note_enters_block_captures_cxx_obj,
+ diag::note_exits_block_captures_cxx_obj);
+ break;
+ case QualType::DK_objc_strong_lifetime:
+ Diags = ScopePair(diag::note_enters_block_captures_strong,
+ diag::note_exits_block_captures_strong);
+ break;
+ case QualType::DK_objc_weak_lifetime:
+ Diags = ScopePair(diag::note_enters_block_captures_weak,
+ diag::note_exits_block_captures_weak);
+ break;
+ case QualType::DK_none:
+ llvm_unreachable("non-lifetime captured variable");
+ }
+ SourceLocation Loc = D->getLocation();
+ if (Loc.isInvalid())
+ Loc = BDecl->getLocation();
+ Scopes.push_back(GotoScope(ParentScope,
+ Diags.first, Diags.second, Loc));
+ ParentScope = Scopes.size()-1;
+ }
+}
+
+/// BuildScopeInformation - The statements from CI to CE are known to form a
+/// coherent VLA scope with a specified parent node. Walk through the
+/// statements, adding any labels or gotos to LabelAndGotoScopes and recursively
+/// walking the AST as needed.
+void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned &origParentScope) {
+ // If this is a statement, rather than an expression, scopes within it don't
+ // propagate out into the enclosing scope. Otherwise we have to worry
+ // about block literals, which have the lifetime of their enclosing statement.
+ unsigned independentParentScope = origParentScope;
+ unsigned &ParentScope = ((isa<Expr>(S) && !isa<StmtExpr>(S))
+ ? origParentScope : independentParentScope);
+
+ bool SkipFirstSubStmt = false;
+
+ // If we found a label, remember that it is in ParentScope scope.
+ switch (S->getStmtClass()) {
+ case Stmt::AddrLabelExprClass:
+ IndirectJumpTargets.push_back(cast<AddrLabelExpr>(S)->getLabel());
+ break;
+
+ case Stmt::IndirectGotoStmtClass:
+ // "goto *&&lbl;" is a special case which we treat as equivalent
+ // to a normal goto. In addition, we don't calculate scope in the
+ // operand (to avoid recording the address-of-label use), which
+ // works only because of the restricted set of expressions which
+ // we detect as constant targets.
+ if (cast<IndirectGotoStmt>(S)->getConstantTarget()) {
+ LabelAndGotoScopes[S] = ParentScope;
+ Jumps.push_back(S);
+ return;
+ }
+
+ LabelAndGotoScopes[S] = ParentScope;
+ IndirectJumps.push_back(cast<IndirectGotoStmt>(S));
+ break;
+
+ case Stmt::SwitchStmtClass:
+ // Evaluate the condition variable before entering the scope of the switch
+ // statement.
+ if (VarDecl *Var = cast<SwitchStmt>(S)->getConditionVariable()) {
+ BuildScopeInformation(Var, ParentScope);
+ SkipFirstSubStmt = true;
+ }
+ // Fall through
+
+ case Stmt::GotoStmtClass:
+ // Remember both what scope a goto is in as well as the fact that we have
+ // it. This makes the second scan not have to walk the AST again.
+ LabelAndGotoScopes[S] = ParentScope;
+ Jumps.push_back(S);
+ break;
+
+ case Stmt::CXXTryStmtClass: {
+ CXXTryStmt *TS = cast<CXXTryStmt>(S);
+ unsigned newParentScope;
+ Scopes.push_back(GotoScope(ParentScope,
+ diag::note_protected_by_cxx_try,
+ diag::note_exits_cxx_try,
+ TS->getSourceRange().getBegin()));
+ if (Stmt *TryBlock = TS->getTryBlock())
+ BuildScopeInformation(TryBlock, (newParentScope = Scopes.size()-1));
+
+ // Jump from the catch into the try is not allowed either.
+ for (unsigned I = 0, E = TS->getNumHandlers(); I != E; ++I) {
+ CXXCatchStmt *CS = TS->getHandler(I);
+ Scopes.push_back(GotoScope(ParentScope,
+ diag::note_protected_by_cxx_catch,
+ diag::note_exits_cxx_catch,
+ CS->getSourceRange().getBegin()));
+ BuildScopeInformation(CS->getHandlerBlock(),
+ (newParentScope = Scopes.size()-1));
+ }
+ return;
+ }
+
+ case Stmt::SEHTryStmtClass: {
+ SEHTryStmt *TS = cast<SEHTryStmt>(S);
+ unsigned newParentScope;
+ Scopes.push_back(GotoScope(ParentScope,
+ diag::note_protected_by_seh_try,
+ diag::note_exits_seh_try,
+ TS->getSourceRange().getBegin()));
+ if (Stmt *TryBlock = TS->getTryBlock())
+ BuildScopeInformation(TryBlock, (newParentScope = Scopes.size()-1));
+
+ // Jump from __except or __finally into the __try are not allowed either.
+ if (SEHExceptStmt *Except = TS->getExceptHandler()) {
+ Scopes.push_back(GotoScope(ParentScope,
+ diag::note_protected_by_seh_except,
+ diag::note_exits_seh_except,
+ Except->getSourceRange().getBegin()));
+ BuildScopeInformation(Except->getBlock(),
+ (newParentScope = Scopes.size()-1));
+ } else if (SEHFinallyStmt *Finally = TS->getFinallyHandler()) {
+ Scopes.push_back(GotoScope(ParentScope,
+ diag::note_protected_by_seh_finally,
+ diag::note_exits_seh_finally,
+ Finally->getSourceRange().getBegin()));
+ BuildScopeInformation(Finally->getBlock(),
+ (newParentScope = Scopes.size()-1));
+ }
+
+ return;
+ }
+
+ default:
+ break;
+ }
+
+ for (Stmt *SubStmt : S->children()) {
+ if (SkipFirstSubStmt) {
+ SkipFirstSubStmt = false;
+ continue;
+ }
+
+ if (!SubStmt) continue;
+
+ // Cases, labels, and defaults aren't "scope parents". It's also
+ // important to handle these iteratively instead of recursively in
+ // order to avoid blowing out the stack.
+ while (true) {
+ Stmt *Next;
+ if (CaseStmt *CS = dyn_cast<CaseStmt>(SubStmt))
+ Next = CS->getSubStmt();
+ else if (DefaultStmt *DS = dyn_cast<DefaultStmt>(SubStmt))
+ Next = DS->getSubStmt();
+ else if (LabelStmt *LS = dyn_cast<LabelStmt>(SubStmt))
+ Next = LS->getSubStmt();
+ else
+ break;
+
+ LabelAndGotoScopes[SubStmt] = ParentScope;
+ SubStmt = Next;
+ }
+
+ // If this is a declstmt with a VLA definition, it defines a scope from here
+ // to the end of the containing context.
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(SubStmt)) {
+ // The decl statement creates a scope if any of the decls in it are VLAs
+ // or have the cleanup attribute.
+ for (auto *I : DS->decls())
+ BuildScopeInformation(I, ParentScope);
+ continue;
+ }
+ // Disallow jumps into any part of an @try statement by pushing a scope and
+ // walking all sub-stmts in that scope.
+ if (ObjCAtTryStmt *AT = dyn_cast<ObjCAtTryStmt>(SubStmt)) {
+ unsigned newParentScope;
+ // Recursively walk the AST for the @try part.
+ Scopes.push_back(GotoScope(ParentScope,
+ diag::note_protected_by_objc_try,
+ diag::note_exits_objc_try,
+ AT->getAtTryLoc()));
+ if (Stmt *TryPart = AT->getTryBody())
+ BuildScopeInformation(TryPart, (newParentScope = Scopes.size()-1));
+
+ // Jump from the catch to the finally or try is not valid.
+ for (unsigned I = 0, N = AT->getNumCatchStmts(); I != N; ++I) {
+ ObjCAtCatchStmt *AC = AT->getCatchStmt(I);
+ Scopes.push_back(GotoScope(ParentScope,
+ diag::note_protected_by_objc_catch,
+ diag::note_exits_objc_catch,
+ AC->getAtCatchLoc()));
+ // @catches are nested and it isn't
+ BuildScopeInformation(AC->getCatchBody(),
+ (newParentScope = Scopes.size()-1));
+ }
+
+ // Jump from the finally to the try or catch is not valid.
+ if (ObjCAtFinallyStmt *AF = AT->getFinallyStmt()) {
+ Scopes.push_back(GotoScope(ParentScope,
+ diag::note_protected_by_objc_finally,
+ diag::note_exits_objc_finally,
+ AF->getAtFinallyLoc()));
+ BuildScopeInformation(AF, (newParentScope = Scopes.size()-1));
+ }
+
+ continue;
+ }
+
+ unsigned newParentScope;
+ // Disallow jumps into the protected statement of an @synchronized, but
+ // allow jumps into the object expression it protects.
+ if (ObjCAtSynchronizedStmt *AS =
+ dyn_cast<ObjCAtSynchronizedStmt>(SubStmt)) {
+ // Recursively walk the AST for the @synchronized object expr, it is
+ // evaluated in the normal scope.
+ BuildScopeInformation(AS->getSynchExpr(), ParentScope);
+
+ // Recursively walk the AST for the @synchronized part, protected by a new
+ // scope.
+ Scopes.push_back(GotoScope(ParentScope,
+ diag::note_protected_by_objc_synchronized,
+ diag::note_exits_objc_synchronized,
+ AS->getAtSynchronizedLoc()));
+ BuildScopeInformation(AS->getSynchBody(),
+ (newParentScope = Scopes.size()-1));
+ continue;
+ }
+
+ // Disallow jumps into the protected statement of an @autoreleasepool.
+ if (ObjCAutoreleasePoolStmt *AS =
+ dyn_cast<ObjCAutoreleasePoolStmt>(SubStmt)) {
+ // Recursively walk the AST for the @autoreleasepool part, protected by a
+ // new scope.
+ Scopes.push_back(GotoScope(ParentScope,
+ diag::note_protected_by_objc_autoreleasepool,
+ diag::note_exits_objc_autoreleasepool,
+ AS->getAtLoc()));
+ BuildScopeInformation(AS->getSubStmt(),
+ (newParentScope = Scopes.size() - 1));
+ continue;
+ }
+
+ // Disallow jumps past full-expressions that use blocks with
+ // non-trivial cleanups of their captures. This is theoretically
+ // implementable but a lot of work which we haven't felt up to doing.
+ if (ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(SubStmt)) {
+ for (unsigned i = 0, e = EWC->getNumObjects(); i != e; ++i) {
+ const BlockDecl *BDecl = EWC->getObject(i);
+ for (const auto &CI : BDecl->captures()) {
+ VarDecl *variable = CI.getVariable();
+ BuildScopeInformation(variable, BDecl, ParentScope);
+ }
+ }
+ }
+
+ // Disallow jumps out of scopes containing temporaries lifetime-extended to
+ // automatic storage duration.
+ if (MaterializeTemporaryExpr *MTE =
+ dyn_cast<MaterializeTemporaryExpr>(SubStmt)) {
+ if (MTE->getStorageDuration() == SD_Automatic) {
+ SmallVector<const Expr *, 4> CommaLHS;
+ SmallVector<SubobjectAdjustment, 4> Adjustments;
+ const Expr *ExtendedObject =
+ MTE->GetTemporaryExpr()->skipRValueSubobjectAdjustments(
+ CommaLHS, Adjustments);
+ if (ExtendedObject->getType().isDestructedType()) {
+ Scopes.push_back(GotoScope(ParentScope, 0,
+ diag::note_exits_temporary_dtor,
+ ExtendedObject->getExprLoc()));
+ ParentScope = Scopes.size()-1;
+ }
+ }
+ }
+
+ // Recursively walk the AST.
+ BuildScopeInformation(SubStmt, ParentScope);
+ }
+}
+
+/// VerifyJumps - Verify each element of the Jumps array to see if they are
+/// valid, emitting diagnostics if not.
+void JumpScopeChecker::VerifyJumps() {
+ while (!Jumps.empty()) {
+ Stmt *Jump = Jumps.pop_back_val();
+
+ // With a goto,
+ if (GotoStmt *GS = dyn_cast<GotoStmt>(Jump)) {
+ // The label may not have a statement if it's coming from inline MS ASM.
+ if (GS->getLabel()->getStmt()) {
+ CheckJump(GS, GS->getLabel()->getStmt(), GS->getGotoLoc(),
+ diag::err_goto_into_protected_scope,
+ diag::ext_goto_into_protected_scope,
+ diag::warn_cxx98_compat_goto_into_protected_scope);
+ }
+ CheckGotoStmt(GS);
+ continue;
+ }
+
+ // We only get indirect gotos here when they have a constant target.
+ if (IndirectGotoStmt *IGS = dyn_cast<IndirectGotoStmt>(Jump)) {
+ LabelDecl *Target = IGS->getConstantTarget();
+ CheckJump(IGS, Target->getStmt(), IGS->getGotoLoc(),
+ diag::err_goto_into_protected_scope,
+ diag::ext_goto_into_protected_scope,
+ diag::warn_cxx98_compat_goto_into_protected_scope);
+ continue;
+ }
+
+ SwitchStmt *SS = cast<SwitchStmt>(Jump);
+ for (SwitchCase *SC = SS->getSwitchCaseList(); SC;
+ SC = SC->getNextSwitchCase()) {
+ if (CHECK_PERMISSIVE(!LabelAndGotoScopes.count(SC)))
+ continue;
+ SourceLocation Loc;
+ if (CaseStmt *CS = dyn_cast<CaseStmt>(SC))
+ Loc = CS->getLocStart();
+ else if (DefaultStmt *DS = dyn_cast<DefaultStmt>(SC))
+ Loc = DS->getLocStart();
+ else
+ Loc = SC->getLocStart();
+ CheckJump(SS, SC, Loc, diag::err_switch_into_protected_scope, 0,
+ diag::warn_cxx98_compat_switch_into_protected_scope);
+ }
+ }
+}
+
+/// VerifyIndirectJumps - Verify whether any possible indirect jump
+/// might cross a protection boundary. Unlike direct jumps, indirect
+/// jumps count cleanups as protection boundaries: since there's no
+/// way to know where the jump is going, we can't implicitly run the
+/// right cleanups the way we can with direct jumps.
+///
+/// Thus, an indirect jump is "trivial" if it bypasses no
+/// initializations and no teardowns. More formally, an indirect jump
+/// from A to B is trivial if the path out from A to DCA(A,B) is
+/// trivial and the path in from DCA(A,B) to B is trivial, where
+/// DCA(A,B) is the deepest common ancestor of A and B.
+/// Jump-triviality is transitive but asymmetric.
+///
+/// A path in is trivial if none of the entered scopes have an InDiag.
+/// A path out is trivial is none of the exited scopes have an OutDiag.
+///
+/// Under these definitions, this function checks that the indirect
+/// jump between A and B is trivial for every indirect goto statement A
+/// and every label B whose address was taken in the function.
+void JumpScopeChecker::VerifyIndirectJumps() {
+ if (IndirectJumps.empty()) return;
+
+ // If there aren't any address-of-label expressions in this function,
+ // complain about the first indirect goto.
+ if (IndirectJumpTargets.empty()) {
+ S.Diag(IndirectJumps[0]->getGotoLoc(),
+ diag::err_indirect_goto_without_addrlabel);
+ return;
+ }
+
+ // Collect a single representative of every scope containing an
+ // indirect goto. For most code bases, this substantially cuts
+ // down on the number of jump sites we'll have to consider later.
+ typedef std::pair<unsigned, IndirectGotoStmt*> JumpScope;
+ SmallVector<JumpScope, 32> JumpScopes;
+ {
+ llvm::DenseMap<unsigned, IndirectGotoStmt*> JumpScopesMap;
+ for (SmallVectorImpl<IndirectGotoStmt*>::iterator
+ I = IndirectJumps.begin(), E = IndirectJumps.end(); I != E; ++I) {
+ IndirectGotoStmt *IG = *I;
+ if (CHECK_PERMISSIVE(!LabelAndGotoScopes.count(IG)))
+ continue;
+ unsigned IGScope = LabelAndGotoScopes[IG];
+ IndirectGotoStmt *&Entry = JumpScopesMap[IGScope];
+ if (!Entry) Entry = IG;
+ }
+ JumpScopes.reserve(JumpScopesMap.size());
+ for (llvm::DenseMap<unsigned, IndirectGotoStmt*>::iterator
+ I = JumpScopesMap.begin(), E = JumpScopesMap.end(); I != E; ++I)
+ JumpScopes.push_back(*I);
+ }
+
+ // Collect a single representative of every scope containing a
+ // label whose address was taken somewhere in the function.
+ // For most code bases, there will be only one such scope.
+ llvm::DenseMap<unsigned, LabelDecl*> TargetScopes;
+ for (SmallVectorImpl<LabelDecl*>::iterator
+ I = IndirectJumpTargets.begin(), E = IndirectJumpTargets.end();
+ I != E; ++I) {
+ LabelDecl *TheLabel = *I;
+ if (CHECK_PERMISSIVE(!LabelAndGotoScopes.count(TheLabel->getStmt())))
+ continue;
+ unsigned LabelScope = LabelAndGotoScopes[TheLabel->getStmt()];
+ LabelDecl *&Target = TargetScopes[LabelScope];
+ if (!Target) Target = TheLabel;
+ }
+
+ // For each target scope, make sure it's trivially reachable from
+ // every scope containing a jump site.
+ //
+ // A path between scopes always consists of exitting zero or more
+ // scopes, then entering zero or more scopes. We build a set of
+ // of scopes S from which the target scope can be trivially
+ // entered, then verify that every jump scope can be trivially
+ // exitted to reach a scope in S.
+ llvm::BitVector Reachable(Scopes.size(), false);
+ for (llvm::DenseMap<unsigned,LabelDecl*>::iterator
+ TI = TargetScopes.begin(), TE = TargetScopes.end(); TI != TE; ++TI) {
+ unsigned TargetScope = TI->first;
+ LabelDecl *TargetLabel = TI->second;
+
+ Reachable.reset();
+
+ // Mark all the enclosing scopes from which you can safely jump
+ // into the target scope. 'Min' will end up being the index of
+ // the shallowest such scope.
+ unsigned Min = TargetScope;
+ while (true) {
+ Reachable.set(Min);
+
+ // Don't go beyond the outermost scope.
+ if (Min == 0) break;
+
+ // Stop if we can't trivially enter the current scope.
+ if (Scopes[Min].InDiag) break;
+
+ Min = Scopes[Min].ParentScope;
+ }
+
+ // Walk through all the jump sites, checking that they can trivially
+ // reach this label scope.
+ for (SmallVectorImpl<JumpScope>::iterator
+ I = JumpScopes.begin(), E = JumpScopes.end(); I != E; ++I) {
+ unsigned Scope = I->first;
+
+ // Walk out the "scope chain" for this scope, looking for a scope
+ // we've marked reachable. For well-formed code this amortizes
+ // to O(JumpScopes.size() / Scopes.size()): we only iterate
+ // when we see something unmarked, and in well-formed code we
+ // mark everything we iterate past.
+ bool IsReachable = false;
+ while (true) {
+ if (Reachable.test(Scope)) {
+ // If we find something reachable, mark all the scopes we just
+ // walked through as reachable.
+ for (unsigned S = I->first; S != Scope; S = Scopes[S].ParentScope)
+ Reachable.set(S);
+ IsReachable = true;
+ break;
+ }
+
+ // Don't walk out if we've reached the top-level scope or we've
+ // gotten shallower than the shallowest reachable scope.
+ if (Scope == 0 || Scope < Min) break;
+
+ // Don't walk out through an out-diagnostic.
+ if (Scopes[Scope].OutDiag) break;
+
+ Scope = Scopes[Scope].ParentScope;
+ }
+
+ // Only diagnose if we didn't find something.
+ if (IsReachable) continue;
+
+ DiagnoseIndirectJump(I->second, I->first, TargetLabel, TargetScope);
+ }
+ }
+}
+
+/// Return true if a particular error+note combination must be downgraded to a
+/// warning in Microsoft mode.
+static bool IsMicrosoftJumpWarning(unsigned JumpDiag, unsigned InDiagNote) {
+ return (JumpDiag == diag::err_goto_into_protected_scope &&
+ (InDiagNote == diag::note_protected_by_variable_init ||
+ InDiagNote == diag::note_protected_by_variable_nontriv_destructor));
+}
+
+/// Return true if a particular note should be downgraded to a compatibility
+/// warning in C++11 mode.
+static bool IsCXX98CompatWarning(Sema &S, unsigned InDiagNote) {
+ return S.getLangOpts().CPlusPlus11 &&
+ InDiagNote == diag::note_protected_by_variable_non_pod;
+}
+
+/// Produce primary diagnostic for an indirect jump statement.
+static void DiagnoseIndirectJumpStmt(Sema &S, IndirectGotoStmt *Jump,
+ LabelDecl *Target, bool &Diagnosed) {
+ if (Diagnosed)
+ return;
+ S.Diag(Jump->getGotoLoc(), diag::err_indirect_goto_in_protected_scope);
+ S.Diag(Target->getStmt()->getIdentLoc(), diag::note_indirect_goto_target);
+ Diagnosed = true;
+}
+
+/// Produce note diagnostics for a jump into a protected scope.
+void JumpScopeChecker::NoteJumpIntoScopes(ArrayRef<unsigned> ToScopes) {
+ if (CHECK_PERMISSIVE(ToScopes.empty()))
+ return;
+ for (unsigned I = 0, E = ToScopes.size(); I != E; ++I)
+ if (Scopes[ToScopes[I]].InDiag)
+ S.Diag(Scopes[ToScopes[I]].Loc, Scopes[ToScopes[I]].InDiag);
+}
+
+/// Diagnose an indirect jump which is known to cross scopes.
+void JumpScopeChecker::DiagnoseIndirectJump(IndirectGotoStmt *Jump,
+ unsigned JumpScope,
+ LabelDecl *Target,
+ unsigned TargetScope) {
+ if (CHECK_PERMISSIVE(JumpScope == TargetScope))
+ return;
+
+ unsigned Common = GetDeepestCommonScope(JumpScope, TargetScope);
+ bool Diagnosed = false;
+
+ // Walk out the scope chain until we reach the common ancestor.
+ for (unsigned I = JumpScope; I != Common; I = Scopes[I].ParentScope)
+ if (Scopes[I].OutDiag) {
+ DiagnoseIndirectJumpStmt(S, Jump, Target, Diagnosed);
+ S.Diag(Scopes[I].Loc, Scopes[I].OutDiag);
+ }
+
+ SmallVector<unsigned, 10> ToScopesCXX98Compat;
+
+ // Now walk into the scopes containing the label whose address was taken.
+ for (unsigned I = TargetScope; I != Common; I = Scopes[I].ParentScope)
+ if (IsCXX98CompatWarning(S, Scopes[I].InDiag))
+ ToScopesCXX98Compat.push_back(I);
+ else if (Scopes[I].InDiag) {
+ DiagnoseIndirectJumpStmt(S, Jump, Target, Diagnosed);
+ S.Diag(Scopes[I].Loc, Scopes[I].InDiag);
+ }
+
+ // Diagnose this jump if it would be ill-formed in C++98.
+ if (!Diagnosed && !ToScopesCXX98Compat.empty()) {
+ S.Diag(Jump->getGotoLoc(),
+ diag::warn_cxx98_compat_indirect_goto_in_protected_scope);
+ S.Diag(Target->getStmt()->getIdentLoc(), diag::note_indirect_goto_target);
+ NoteJumpIntoScopes(ToScopesCXX98Compat);
+ }
+}
+
+/// CheckJump - Validate that the specified jump statement is valid: that it is
+/// jumping within or out of its current scope, not into a deeper one.
+void JumpScopeChecker::CheckJump(Stmt *From, Stmt *To, SourceLocation DiagLoc,
+ unsigned JumpDiagError, unsigned JumpDiagWarning,
+ unsigned JumpDiagCXX98Compat) {
+ if (CHECK_PERMISSIVE(!LabelAndGotoScopes.count(From)))
+ return;
+ if (CHECK_PERMISSIVE(!LabelAndGotoScopes.count(To)))
+ return;
+
+ unsigned FromScope = LabelAndGotoScopes[From];
+ unsigned ToScope = LabelAndGotoScopes[To];
+
+ // Common case: exactly the same scope, which is fine.
+ if (FromScope == ToScope) return;
+
+ // Warn on gotos out of __finally blocks.
+ if (isa<GotoStmt>(From) || isa<IndirectGotoStmt>(From)) {
+ // If FromScope > ToScope, FromScope is more nested and the jump goes to a
+ // less nested scope. Check if it crosses a __finally along the way.
+ for (unsigned I = FromScope; I > ToScope; I = Scopes[I].ParentScope) {
+ if (Scopes[I].InDiag == diag::note_protected_by_seh_finally) {
+ S.Diag(From->getLocStart(), diag::warn_jump_out_of_seh_finally);
+ break;
+ }
+ }
+ }
+
+ unsigned CommonScope = GetDeepestCommonScope(FromScope, ToScope);
+
+ // It's okay to jump out from a nested scope.
+ if (CommonScope == ToScope) return;
+
+ // Pull out (and reverse) any scopes we might need to diagnose skipping.
+ SmallVector<unsigned, 10> ToScopesCXX98Compat;
+ SmallVector<unsigned, 10> ToScopesError;
+ SmallVector<unsigned, 10> ToScopesWarning;
+ for (unsigned I = ToScope; I != CommonScope; I = Scopes[I].ParentScope) {
+ if (S.getLangOpts().MSVCCompat && JumpDiagWarning != 0 &&
+ IsMicrosoftJumpWarning(JumpDiagError, Scopes[I].InDiag))
+ ToScopesWarning.push_back(I);
+ else if (IsCXX98CompatWarning(S, Scopes[I].InDiag))
+ ToScopesCXX98Compat.push_back(I);
+ else if (Scopes[I].InDiag)
+ ToScopesError.push_back(I);
+ }
+
+ // Handle warnings.
+ if (!ToScopesWarning.empty()) {
+ S.Diag(DiagLoc, JumpDiagWarning);
+ NoteJumpIntoScopes(ToScopesWarning);
+ }
+
+ // Handle errors.
+ if (!ToScopesError.empty()) {
+ S.Diag(DiagLoc, JumpDiagError);
+ NoteJumpIntoScopes(ToScopesError);
+ }
+
+ // Handle -Wc++98-compat warnings if the jump is well-formed.
+ if (ToScopesError.empty() && !ToScopesCXX98Compat.empty()) {
+ S.Diag(DiagLoc, JumpDiagCXX98Compat);
+ NoteJumpIntoScopes(ToScopesCXX98Compat);
+ }
+}
+
+void JumpScopeChecker::CheckGotoStmt(GotoStmt *GS) {
+ if (GS->getLabel()->isMSAsmLabel()) {
+ S.Diag(GS->getGotoLoc(), diag::err_goto_ms_asm_label)
+ << GS->getLabel()->getIdentifier();
+ S.Diag(GS->getLabel()->getLocation(), diag::note_goto_ms_asm_label)
+ << GS->getLabel()->getIdentifier();
+ }
+}
+
+void Sema::DiagnoseInvalidJumps(Stmt *Body) {
+ (void)JumpScopeChecker(Body, *this);
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/MultiplexExternalSemaSource.cpp b/contrib/llvm/tools/clang/lib/Sema/MultiplexExternalSemaSource.cpp
new file mode 100644
index 0000000..0f93421
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/MultiplexExternalSemaSource.cpp
@@ -0,0 +1,311 @@
+//===--- MultiplexExternalSemaSource.cpp ---------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the event dispatching to the subscribed clients.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Sema/MultiplexExternalSemaSource.h"
+#include "clang/AST/DeclContextInternals.h"
+#include "clang/Sema/Lookup.h"
+
+using namespace clang;
+
+///\brief Constructs a new multiplexing external sema source and appends the
+/// given element to it.
+///
+///\param[in] source - An ExternalSemaSource.
+///
+MultiplexExternalSemaSource::MultiplexExternalSemaSource(ExternalSemaSource &s1,
+ ExternalSemaSource &s2){
+ Sources.push_back(&s1);
+ Sources.push_back(&s2);
+}
+
+// pin the vtable here.
+MultiplexExternalSemaSource::~MultiplexExternalSemaSource() {}
+
+///\brief Appends new source to the source list.
+///
+///\param[in] source - An ExternalSemaSource.
+///
+void MultiplexExternalSemaSource::addSource(ExternalSemaSource &source) {
+ Sources.push_back(&source);
+}
+
+//===----------------------------------------------------------------------===//
+// ExternalASTSource.
+//===----------------------------------------------------------------------===//
+
+Decl *MultiplexExternalSemaSource::GetExternalDecl(uint32_t ID) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ if (Decl *Result = Sources[i]->GetExternalDecl(ID))
+ return Result;
+ return nullptr;
+}
+
+void MultiplexExternalSemaSource::CompleteRedeclChain(const Decl *D) {
+ for (size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->CompleteRedeclChain(D);
+}
+
+Selector MultiplexExternalSemaSource::GetExternalSelector(uint32_t ID) {
+ Selector Sel;
+ for(size_t i = 0; i < Sources.size(); ++i) {
+ Sel = Sources[i]->GetExternalSelector(ID);
+ if (!Sel.isNull())
+ return Sel;
+ }
+ return Sel;
+}
+
+uint32_t MultiplexExternalSemaSource::GetNumExternalSelectors() {
+ uint32_t total = 0;
+ for(size_t i = 0; i < Sources.size(); ++i)
+ total += Sources[i]->GetNumExternalSelectors();
+ return total;
+}
+
+Stmt *MultiplexExternalSemaSource::GetExternalDeclStmt(uint64_t Offset) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ if (Stmt *Result = Sources[i]->GetExternalDeclStmt(Offset))
+ return Result;
+ return nullptr;
+}
+
+CXXBaseSpecifier *MultiplexExternalSemaSource::GetExternalCXXBaseSpecifiers(
+ uint64_t Offset){
+ for(size_t i = 0; i < Sources.size(); ++i)
+ if (CXXBaseSpecifier *R = Sources[i]->GetExternalCXXBaseSpecifiers(Offset))
+ return R;
+ return nullptr;
+}
+
+CXXCtorInitializer **
+MultiplexExternalSemaSource::GetExternalCXXCtorInitializers(uint64_t Offset) {
+ for (auto *S : Sources)
+ if (auto *R = S->GetExternalCXXCtorInitializers(Offset))
+ return R;
+ return nullptr;
+}
+
+bool MultiplexExternalSemaSource::
+FindExternalVisibleDeclsByName(const DeclContext *DC, DeclarationName Name) {
+ bool AnyDeclsFound = false;
+ for (size_t i = 0; i < Sources.size(); ++i)
+ AnyDeclsFound |= Sources[i]->FindExternalVisibleDeclsByName(DC, Name);
+ return AnyDeclsFound;
+}
+
+void MultiplexExternalSemaSource::completeVisibleDeclsMap(const DeclContext *DC){
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->completeVisibleDeclsMap(DC);
+}
+
+void MultiplexExternalSemaSource::FindExternalLexicalDecls(
+ const DeclContext *DC, llvm::function_ref<bool(Decl::Kind)> IsKindWeWant,
+ SmallVectorImpl<Decl *> &Result) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->FindExternalLexicalDecls(DC, IsKindWeWant, Result);
+}
+
+void MultiplexExternalSemaSource::FindFileRegionDecls(FileID File,
+ unsigned Offset,
+ unsigned Length,
+ SmallVectorImpl<Decl *> &Decls){
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->FindFileRegionDecls(File, Offset, Length, Decls);
+}
+
+void MultiplexExternalSemaSource::CompleteType(TagDecl *Tag) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->CompleteType(Tag);
+}
+
+void MultiplexExternalSemaSource::CompleteType(ObjCInterfaceDecl *Class) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->CompleteType(Class);
+}
+
+void MultiplexExternalSemaSource::ReadComments() {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadComments();
+}
+
+void MultiplexExternalSemaSource::StartedDeserializing() {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->StartedDeserializing();
+}
+
+void MultiplexExternalSemaSource::FinishedDeserializing() {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->FinishedDeserializing();
+}
+
+void MultiplexExternalSemaSource::StartTranslationUnit(ASTConsumer *Consumer) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->StartTranslationUnit(Consumer);
+}
+
+void MultiplexExternalSemaSource::PrintStats() {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->PrintStats();
+}
+
+bool MultiplexExternalSemaSource::layoutRecordType(const RecordDecl *Record,
+ uint64_t &Size,
+ uint64_t &Alignment,
+ llvm::DenseMap<const FieldDecl *, uint64_t> &FieldOffsets,
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> &BaseOffsets,
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> &VirtualBaseOffsets){
+ for(size_t i = 0; i < Sources.size(); ++i)
+ if (Sources[i]->layoutRecordType(Record, Size, Alignment, FieldOffsets,
+ BaseOffsets, VirtualBaseOffsets))
+ return true;
+ return false;
+}
+
+void MultiplexExternalSemaSource::
+getMemoryBufferSizes(MemoryBufferSizes &sizes) const {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->getMemoryBufferSizes(sizes);
+
+}
+
+//===----------------------------------------------------------------------===//
+// ExternalSemaSource.
+//===----------------------------------------------------------------------===//
+
+
+void MultiplexExternalSemaSource::InitializeSema(Sema &S) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->InitializeSema(S);
+}
+
+void MultiplexExternalSemaSource::ForgetSema() {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ForgetSema();
+}
+
+void MultiplexExternalSemaSource::ReadMethodPool(Selector Sel) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadMethodPool(Sel);
+}
+
+void MultiplexExternalSemaSource::ReadKnownNamespaces(
+ SmallVectorImpl<NamespaceDecl*> &Namespaces){
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadKnownNamespaces(Namespaces);
+}
+
+void MultiplexExternalSemaSource::ReadUndefinedButUsed(
+ llvm::DenseMap<NamedDecl*, SourceLocation> &Undefined){
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadUndefinedButUsed(Undefined);
+}
+
+void MultiplexExternalSemaSource::ReadMismatchingDeleteExpressions(
+ llvm::MapVector<FieldDecl *,
+ llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> &
+ Exprs) {
+ for (auto &Source : Sources)
+ Source->ReadMismatchingDeleteExpressions(Exprs);
+}
+
+bool MultiplexExternalSemaSource::LookupUnqualified(LookupResult &R, Scope *S){
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->LookupUnqualified(R, S);
+
+ return !R.empty();
+}
+
+void MultiplexExternalSemaSource::ReadTentativeDefinitions(
+ SmallVectorImpl<VarDecl*> &TentativeDefs) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadTentativeDefinitions(TentativeDefs);
+}
+
+void MultiplexExternalSemaSource::ReadUnusedFileScopedDecls(
+ SmallVectorImpl<const DeclaratorDecl*> &Decls) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadUnusedFileScopedDecls(Decls);
+}
+
+void MultiplexExternalSemaSource::ReadDelegatingConstructors(
+ SmallVectorImpl<CXXConstructorDecl*> &Decls) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadDelegatingConstructors(Decls);
+}
+
+void MultiplexExternalSemaSource::ReadExtVectorDecls(
+ SmallVectorImpl<TypedefNameDecl*> &Decls) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadExtVectorDecls(Decls);
+}
+
+void MultiplexExternalSemaSource::ReadUnusedLocalTypedefNameCandidates(
+ llvm::SmallSetVector<const TypedefNameDecl *, 4> &Decls) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadUnusedLocalTypedefNameCandidates(Decls);
+}
+
+void MultiplexExternalSemaSource::ReadReferencedSelectors(
+ SmallVectorImpl<std::pair<Selector, SourceLocation> > &Sels) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadReferencedSelectors(Sels);
+}
+
+void MultiplexExternalSemaSource::ReadWeakUndeclaredIdentifiers(
+ SmallVectorImpl<std::pair<IdentifierInfo*, WeakInfo> > &WI) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadWeakUndeclaredIdentifiers(WI);
+}
+
+void MultiplexExternalSemaSource::ReadUsedVTables(
+ SmallVectorImpl<ExternalVTableUse> &VTables) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadUsedVTables(VTables);
+}
+
+void MultiplexExternalSemaSource::ReadPendingInstantiations(
+ SmallVectorImpl<std::pair<ValueDecl*,
+ SourceLocation> > &Pending) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadPendingInstantiations(Pending);
+}
+
+void MultiplexExternalSemaSource::ReadLateParsedTemplates(
+ llvm::MapVector<const FunctionDecl *, LateParsedTemplate *> &LPTMap) {
+ for (size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadLateParsedTemplates(LPTMap);
+}
+
+TypoCorrection MultiplexExternalSemaSource::CorrectTypo(
+ const DeclarationNameInfo &Typo,
+ int LookupKind, Scope *S, CXXScopeSpec *SS,
+ CorrectionCandidateCallback &CCC,
+ DeclContext *MemberContext,
+ bool EnteringContext,
+ const ObjCObjectPointerType *OPT) {
+ for (size_t I = 0, E = Sources.size(); I < E; ++I) {
+ if (TypoCorrection C = Sources[I]->CorrectTypo(Typo, LookupKind, S, SS, CCC,
+ MemberContext,
+ EnteringContext, OPT))
+ return C;
+ }
+ return TypoCorrection();
+}
+
+bool MultiplexExternalSemaSource::MaybeDiagnoseMissingCompleteType(
+ SourceLocation Loc, QualType T) {
+ for (size_t I = 0, E = Sources.size(); I < E; ++I) {
+ if (Sources[I]->MaybeDiagnoseMissingCompleteType(Loc, T))
+ return true;
+ }
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/Scope.cpp b/contrib/llvm/tools/clang/lib/Sema/Scope.cpp
new file mode 100644
index 0000000..7c70048
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/Scope.cpp
@@ -0,0 +1,224 @@
+//===- Scope.cpp - Lexical scope information --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Scope class, which is used for recording
+// information about a lexical scope.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/Scope.h"
+#include "clang/AST/Decl.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+void Scope::Init(Scope *parent, unsigned flags) {
+ AnyParent = parent;
+ Flags = flags;
+
+ if (parent && !(flags & FnScope)) {
+ BreakParent = parent->BreakParent;
+ ContinueParent = parent->ContinueParent;
+ } else {
+ // Control scopes do not contain the contents of nested function scopes for
+ // control flow purposes.
+ BreakParent = ContinueParent = nullptr;
+ }
+
+ if (parent) {
+ Depth = parent->Depth + 1;
+ PrototypeDepth = parent->PrototypeDepth;
+ PrototypeIndex = 0;
+ FnParent = parent->FnParent;
+ BlockParent = parent->BlockParent;
+ TemplateParamParent = parent->TemplateParamParent;
+ MSLastManglingParent = parent->MSLastManglingParent;
+ MSCurManglingNumber = getMSLastManglingNumber();
+ if ((Flags & (FnScope | ClassScope | BlockScope | TemplateParamScope |
+ FunctionPrototypeScope | AtCatchScope | ObjCMethodScope)) ==
+ 0)
+ Flags |= parent->getFlags() & OpenMPSimdDirectiveScope;
+ } else {
+ Depth = 0;
+ PrototypeDepth = 0;
+ PrototypeIndex = 0;
+ MSLastManglingParent = FnParent = BlockParent = nullptr;
+ TemplateParamParent = nullptr;
+ MSLastManglingNumber = 1;
+ MSCurManglingNumber = 1;
+ }
+
+ // If this scope is a function or contains breaks/continues, remember it.
+ if (flags & FnScope) FnParent = this;
+ // The MS mangler uses the number of scopes that can hold declarations as
+ // part of an external name.
+ if (Flags & (ClassScope | FnScope)) {
+ MSLastManglingNumber = getMSLastManglingNumber();
+ MSLastManglingParent = this;
+ MSCurManglingNumber = 1;
+ }
+ if (flags & BreakScope) BreakParent = this;
+ if (flags & ContinueScope) ContinueParent = this;
+ if (flags & BlockScope) BlockParent = this;
+ if (flags & TemplateParamScope) TemplateParamParent = this;
+
+ // If this is a prototype scope, record that.
+ if (flags & FunctionPrototypeScope) PrototypeDepth++;
+
+ if (flags & DeclScope) {
+ if (flags & FunctionPrototypeScope)
+ ; // Prototype scopes are uninteresting.
+ else if ((flags & ClassScope) && getParent()->isClassScope())
+ ; // Nested class scopes aren't ambiguous.
+ else if ((flags & ClassScope) && getParent()->getFlags() == DeclScope)
+ ; // Classes inside of namespaces aren't ambiguous.
+ else if ((flags & EnumScope))
+ ; // Don't increment for enum scopes.
+ else
+ incrementMSManglingNumber();
+ }
+
+ DeclsInScope.clear();
+ UsingDirectives.clear();
+ Entity = nullptr;
+ ErrorTrap.reset();
+ NRVO.setPointerAndInt(nullptr, 0);
+}
+
+bool Scope::containedInPrototypeScope() const {
+ const Scope *S = this;
+ while (S) {
+ if (S->isFunctionPrototypeScope())
+ return true;
+ S = S->getParent();
+ }
+ return false;
+}
+
+void Scope::AddFlags(unsigned FlagsToSet) {
+ assert((FlagsToSet & ~(BreakScope | ContinueScope)) == 0 &&
+ "Unsupported scope flags");
+ if (FlagsToSet & BreakScope) {
+ assert((Flags & BreakScope) == 0 && "Already set");
+ BreakParent = this;
+ }
+ if (FlagsToSet & ContinueScope) {
+ assert((Flags & ContinueScope) == 0 && "Already set");
+ ContinueParent = this;
+ }
+ Flags |= FlagsToSet;
+}
+
+void Scope::mergeNRVOIntoParent() {
+ if (VarDecl *Candidate = NRVO.getPointer()) {
+ if (isDeclScope(Candidate))
+ Candidate->setNRVOVariable(true);
+ }
+
+ if (getEntity())
+ return;
+
+ if (NRVO.getInt())
+ getParent()->setNoNRVO();
+ else if (NRVO.getPointer())
+ getParent()->addNRVOCandidate(NRVO.getPointer());
+}
+
+void Scope::dump() const { dumpImpl(llvm::errs()); }
+
+void Scope::dumpImpl(raw_ostream &OS) const {
+ unsigned Flags = getFlags();
+ bool HasFlags = Flags != 0;
+
+ if (HasFlags)
+ OS << "Flags: ";
+
+ while (Flags) {
+ if (Flags & FnScope) {
+ OS << "FnScope";
+ Flags &= ~FnScope;
+ } else if (Flags & BreakScope) {
+ OS << "BreakScope";
+ Flags &= ~BreakScope;
+ } else if (Flags & ContinueScope) {
+ OS << "ContinueScope";
+ Flags &= ~ContinueScope;
+ } else if (Flags & DeclScope) {
+ OS << "DeclScope";
+ Flags &= ~DeclScope;
+ } else if (Flags & ControlScope) {
+ OS << "ControlScope";
+ Flags &= ~ControlScope;
+ } else if (Flags & ClassScope) {
+ OS << "ClassScope";
+ Flags &= ~ClassScope;
+ } else if (Flags & BlockScope) {
+ OS << "BlockScope";
+ Flags &= ~BlockScope;
+ } else if (Flags & TemplateParamScope) {
+ OS << "TemplateParamScope";
+ Flags &= ~TemplateParamScope;
+ } else if (Flags & FunctionPrototypeScope) {
+ OS << "FunctionPrototypeScope";
+ Flags &= ~FunctionPrototypeScope;
+ } else if (Flags & FunctionDeclarationScope) {
+ OS << "FunctionDeclarationScope";
+ Flags &= ~FunctionDeclarationScope;
+ } else if (Flags & AtCatchScope) {
+ OS << "AtCatchScope";
+ Flags &= ~AtCatchScope;
+ } else if (Flags & ObjCMethodScope) {
+ OS << "ObjCMethodScope";
+ Flags &= ~ObjCMethodScope;
+ } else if (Flags & SwitchScope) {
+ OS << "SwitchScope";
+ Flags &= ~SwitchScope;
+ } else if (Flags & TryScope) {
+ OS << "TryScope";
+ Flags &= ~TryScope;
+ } else if (Flags & FnTryCatchScope) {
+ OS << "FnTryCatchScope";
+ Flags &= ~FnTryCatchScope;
+ } else if (Flags & SEHTryScope) {
+ OS << "SEHTryScope";
+ Flags &= ~SEHTryScope;
+ } else if (Flags & SEHExceptScope) {
+ OS << "SEHExceptScope";
+ Flags &= ~SEHExceptScope;
+ } else if (Flags & OpenMPDirectiveScope) {
+ OS << "OpenMPDirectiveScope";
+ Flags &= ~OpenMPDirectiveScope;
+ } else if (Flags & OpenMPLoopDirectiveScope) {
+ OS << "OpenMPLoopDirectiveScope";
+ Flags &= ~OpenMPLoopDirectiveScope;
+ } else if (Flags & OpenMPSimdDirectiveScope) {
+ OS << "OpenMPSimdDirectiveScope";
+ Flags &= ~OpenMPSimdDirectiveScope;
+ }
+
+ if (Flags)
+ OS << " | ";
+ }
+ if (HasFlags)
+ OS << '\n';
+
+ if (const Scope *Parent = getParent())
+ OS << "Parent: (clang::Scope*)" << Parent << '\n';
+
+ OS << "Depth: " << Depth << '\n';
+ OS << "MSLastManglingNumber: " << getMSLastManglingNumber() << '\n';
+ OS << "MSCurManglingNumber: " << getMSCurManglingNumber() << '\n';
+ if (const DeclContext *DC = getEntity())
+ OS << "Entity : (clang::DeclContext*)" << DC << '\n';
+
+ if (NRVO.getInt())
+ OS << "NRVO not allowed\n";
+ else if (NRVO.getPointer())
+ OS << "NRVO candidate : (clang::VarDecl*)" << NRVO.getPointer() << '\n';
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/ScopeInfo.cpp b/contrib/llvm/tools/clang/lib/Sema/ScopeInfo.cpp
new file mode 100644
index 0000000..cbd7ef7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/ScopeInfo.cpp
@@ -0,0 +1,240 @@
+//===--- ScopeInfo.cpp - Information about a semantic context -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements FunctionScopeInfo and its subclasses, which contain
+// information about a single function, block, lambda, or method body.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+
+using namespace clang;
+using namespace sema;
+
+void FunctionScopeInfo::Clear() {
+ HasBranchProtectedScope = false;
+ HasBranchIntoScope = false;
+ HasIndirectGoto = false;
+ HasDroppedStmt = false;
+ ObjCShouldCallSuper = false;
+ ObjCIsDesignatedInit = false;
+ ObjCWarnForNoDesignatedInitChain = false;
+ ObjCIsSecondaryInit = false;
+ ObjCWarnForNoInitDelegation = false;
+ FirstReturnLoc = SourceLocation();
+ FirstCXXTryLoc = SourceLocation();
+ FirstSEHTryLoc = SourceLocation();
+
+ SwitchStack.clear();
+ Returns.clear();
+ CoroutinePromise = nullptr;
+ CoroutineStmts.clear();
+ ErrorTrap.reset();
+ PossiblyUnreachableDiags.clear();
+ WeakObjectUses.clear();
+ ModifiedNonNullParams.clear();
+}
+
+static const NamedDecl *getBestPropertyDecl(const ObjCPropertyRefExpr *PropE) {
+ if (PropE->isExplicitProperty())
+ return PropE->getExplicitProperty();
+
+ return PropE->getImplicitPropertyGetter();
+}
+
+FunctionScopeInfo::WeakObjectProfileTy::BaseInfoTy
+FunctionScopeInfo::WeakObjectProfileTy::getBaseInfo(const Expr *E) {
+ E = E->IgnoreParenCasts();
+
+ const NamedDecl *D = nullptr;
+ bool IsExact = false;
+
+ switch (E->getStmtClass()) {
+ case Stmt::DeclRefExprClass:
+ D = cast<DeclRefExpr>(E)->getDecl();
+ IsExact = isa<VarDecl>(D);
+ break;
+ case Stmt::MemberExprClass: {
+ const MemberExpr *ME = cast<MemberExpr>(E);
+ D = ME->getMemberDecl();
+ IsExact = isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts());
+ break;
+ }
+ case Stmt::ObjCIvarRefExprClass: {
+ const ObjCIvarRefExpr *IE = cast<ObjCIvarRefExpr>(E);
+ D = IE->getDecl();
+ IsExact = IE->getBase()->isObjCSelfExpr();
+ break;
+ }
+ case Stmt::PseudoObjectExprClass: {
+ const PseudoObjectExpr *POE = cast<PseudoObjectExpr>(E);
+ const ObjCPropertyRefExpr *BaseProp =
+ dyn_cast<ObjCPropertyRefExpr>(POE->getSyntacticForm());
+ if (BaseProp) {
+ D = getBestPropertyDecl(BaseProp);
+
+ const Expr *DoubleBase = BaseProp->getBase();
+ if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(DoubleBase))
+ DoubleBase = OVE->getSourceExpr();
+
+ IsExact = DoubleBase->isObjCSelfExpr();
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ return BaseInfoTy(D, IsExact);
+}
+
+bool CapturingScopeInfo::isVLATypeCaptured(const VariableArrayType *VAT) const {
+ RecordDecl *RD = nullptr;
+ if (auto *LSI = dyn_cast<LambdaScopeInfo>(this))
+ RD = LSI->Lambda;
+ else if (auto CRSI = dyn_cast<CapturedRegionScopeInfo>(this))
+ RD = CRSI->TheRecordDecl;
+
+ if (RD)
+ for (auto *FD : RD->fields()) {
+ if (FD->hasCapturedVLAType() && FD->getCapturedVLAType() == VAT)
+ return true;
+ }
+ return false;
+}
+
+FunctionScopeInfo::WeakObjectProfileTy::WeakObjectProfileTy(
+ const ObjCPropertyRefExpr *PropE)
+ : Base(nullptr, true), Property(getBestPropertyDecl(PropE)) {
+
+ if (PropE->isObjectReceiver()) {
+ const OpaqueValueExpr *OVE = cast<OpaqueValueExpr>(PropE->getBase());
+ const Expr *E = OVE->getSourceExpr();
+ Base = getBaseInfo(E);
+ } else if (PropE->isClassReceiver()) {
+ Base.setPointer(PropE->getClassReceiver());
+ } else {
+ assert(PropE->isSuperReceiver());
+ }
+}
+
+FunctionScopeInfo::WeakObjectProfileTy::WeakObjectProfileTy(const Expr *BaseE,
+ const ObjCPropertyDecl *Prop)
+ : Base(nullptr, true), Property(Prop) {
+ if (BaseE)
+ Base = getBaseInfo(BaseE);
+ // else, this is a message accessing a property on super.
+}
+
+FunctionScopeInfo::WeakObjectProfileTy::WeakObjectProfileTy(
+ const DeclRefExpr *DRE)
+ : Base(nullptr, true), Property(DRE->getDecl()) {
+ assert(isa<VarDecl>(Property));
+}
+
+FunctionScopeInfo::WeakObjectProfileTy::WeakObjectProfileTy(
+ const ObjCIvarRefExpr *IvarE)
+ : Base(getBaseInfo(IvarE->getBase())), Property(IvarE->getDecl()) {
+}
+
+void FunctionScopeInfo::recordUseOfWeak(const ObjCMessageExpr *Msg,
+ const ObjCPropertyDecl *Prop) {
+ assert(Msg && Prop);
+ WeakUseVector &Uses =
+ WeakObjectUses[WeakObjectProfileTy(Msg->getInstanceReceiver(), Prop)];
+ Uses.push_back(WeakUseTy(Msg, Msg->getNumArgs() == 0));
+}
+
+void FunctionScopeInfo::markSafeWeakUse(const Expr *E) {
+ E = E->IgnoreParenCasts();
+
+ if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) {
+ markSafeWeakUse(POE->getSyntacticForm());
+ return;
+ }
+
+ if (const ConditionalOperator *Cond = dyn_cast<ConditionalOperator>(E)) {
+ markSafeWeakUse(Cond->getTrueExpr());
+ markSafeWeakUse(Cond->getFalseExpr());
+ return;
+ }
+
+ if (const BinaryConditionalOperator *Cond =
+ dyn_cast<BinaryConditionalOperator>(E)) {
+ markSafeWeakUse(Cond->getCommon());
+ markSafeWeakUse(Cond->getFalseExpr());
+ return;
+ }
+
+ // Has this weak object been seen before?
+ FunctionScopeInfo::WeakObjectUseMap::iterator Uses;
+ if (const ObjCPropertyRefExpr *RefExpr = dyn_cast<ObjCPropertyRefExpr>(E)) {
+ if (!RefExpr->isObjectReceiver())
+ return;
+ if (isa<OpaqueValueExpr>(RefExpr->getBase()))
+ Uses = WeakObjectUses.find(WeakObjectProfileTy(RefExpr));
+ else {
+ markSafeWeakUse(RefExpr->getBase());
+ return;
+ }
+ }
+ else if (const ObjCIvarRefExpr *IvarE = dyn_cast<ObjCIvarRefExpr>(E))
+ Uses = WeakObjectUses.find(WeakObjectProfileTy(IvarE));
+ else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ Uses = WeakObjectUses.find(WeakObjectProfileTy(DRE));
+ else if (const ObjCMessageExpr *MsgE = dyn_cast<ObjCMessageExpr>(E)) {
+ Uses = WeakObjectUses.end();
+ if (const ObjCMethodDecl *MD = MsgE->getMethodDecl()) {
+ if (const ObjCPropertyDecl *Prop = MD->findPropertyDecl()) {
+ Uses =
+ WeakObjectUses.find(WeakObjectProfileTy(MsgE->getInstanceReceiver(),
+ Prop));
+ }
+ }
+ }
+ else
+ return;
+
+ if (Uses == WeakObjectUses.end())
+ return;
+
+ // Has there been a read from the object using this Expr?
+ FunctionScopeInfo::WeakUseVector::reverse_iterator ThisUse =
+ std::find(Uses->second.rbegin(), Uses->second.rend(), WeakUseTy(E, true));
+ if (ThisUse == Uses->second.rend())
+ return;
+
+ ThisUse->markSafe();
+}
+
+void LambdaScopeInfo::getPotentialVariableCapture(unsigned Idx, VarDecl *&VD,
+ Expr *&E) const {
+ assert(Idx < getNumPotentialVariableCaptures() &&
+ "Index of potential capture must be within 0 to less than the "
+ "number of captures!");
+ E = PotentiallyCapturingExprs[Idx];
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ VD = dyn_cast<VarDecl>(DRE->getFoundDecl());
+ else if (MemberExpr *ME = dyn_cast<MemberExpr>(E))
+ VD = dyn_cast<VarDecl>(ME->getMemberDecl());
+ else
+ llvm_unreachable("Only DeclRefExprs or MemberExprs should be added for "
+ "potential captures");
+ assert(VD);
+}
+
+FunctionScopeInfo::~FunctionScopeInfo() { }
+BlockScopeInfo::~BlockScopeInfo() { }
+CapturedRegionScopeInfo::~CapturedRegionScopeInfo() { }
diff --git a/contrib/llvm/tools/clang/lib/Sema/Sema.cpp b/contrib/llvm/tools/clang/lib/Sema/Sema.cpp
new file mode 100644
index 0000000..39b8cc9
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/Sema.cpp
@@ -0,0 +1,1527 @@
+//===--- Sema.cpp - AST Builder and Semantic Analysis Implementation ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the actions class which performs semantic analysis and
+// builds an AST out of a parse stream.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTDiagnostic.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclFriend.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/CXXFieldCollector.h"
+#include "clang/Sema/DelayedDiagnostic.h"
+#include "clang/Sema/ExternalSemaSource.h"
+#include "clang/Sema/MultiplexExternalSemaSource.h"
+#include "clang/Sema/ObjCMethodList.h"
+#include "clang/Sema/PrettyDeclStackTrace.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaConsumer.h"
+#include "clang/Sema/TemplateDeduction.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallSet.h"
+using namespace clang;
+using namespace sema;
+
+SourceLocation Sema::getLocForEndOfToken(SourceLocation Loc, unsigned Offset) {
+ return Lexer::getLocForEndOfToken(Loc, Offset, SourceMgr, LangOpts);
+}
+
+ModuleLoader &Sema::getModuleLoader() const { return PP.getModuleLoader(); }
+
+PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context,
+ const Preprocessor &PP) {
+ PrintingPolicy Policy = Context.getPrintingPolicy();
+ Policy.Bool = Context.getLangOpts().Bool;
+ if (!Policy.Bool) {
+ if (const MacroInfo *
+ BoolMacro = PP.getMacroInfo(&Context.Idents.get("bool"))) {
+ Policy.Bool = BoolMacro->isObjectLike() &&
+ BoolMacro->getNumTokens() == 1 &&
+ BoolMacro->getReplacementToken(0).is(tok::kw__Bool);
+ }
+ }
+
+ return Policy;
+}
+
+void Sema::ActOnTranslationUnitScope(Scope *S) {
+ TUScope = S;
+ PushDeclContext(S, Context.getTranslationUnitDecl());
+}
+
+Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
+ TranslationUnitKind TUKind,
+ CodeCompleteConsumer *CodeCompleter)
+ : ExternalSource(nullptr),
+ isMultiplexExternalSource(false), FPFeatures(pp.getLangOpts()),
+ LangOpts(pp.getLangOpts()), PP(pp), Context(ctxt), Consumer(consumer),
+ Diags(PP.getDiagnostics()), SourceMgr(PP.getSourceManager()),
+ CollectStats(false), CodeCompleter(CodeCompleter),
+ CurContext(nullptr), OriginalLexicalContext(nullptr),
+ PackContext(nullptr), MSStructPragmaOn(false),
+ MSPointerToMemberRepresentationMethod(
+ LangOpts.getMSPointerToMemberRepresentationMethod()),
+ VtorDispModeStack(1, MSVtorDispAttr::Mode(LangOpts.VtorDispMode)),
+ DataSegStack(nullptr), BSSSegStack(nullptr), ConstSegStack(nullptr),
+ CodeSegStack(nullptr), CurInitSeg(nullptr), VisContext(nullptr),
+ IsBuildingRecoveryCallExpr(false),
+ ExprNeedsCleanups(false), LateTemplateParser(nullptr),
+ LateTemplateParserCleanup(nullptr),
+ OpaqueParser(nullptr), IdResolver(pp), StdInitializerList(nullptr),
+ CXXTypeInfoDecl(nullptr), MSVCGuidDecl(nullptr),
+ NSNumberDecl(nullptr), NSValueDecl(nullptr),
+ NSStringDecl(nullptr), StringWithUTF8StringMethod(nullptr),
+ ValueWithBytesObjCTypeMethod(nullptr),
+ NSArrayDecl(nullptr), ArrayWithObjectsMethod(nullptr),
+ NSDictionaryDecl(nullptr), DictionaryWithObjectsMethod(nullptr),
+ MSAsmLabelNameCounter(0),
+ GlobalNewDeleteDeclared(false),
+ TUKind(TUKind),
+ NumSFINAEErrors(0),
+ CachedFakeTopLevelModule(nullptr),
+ AccessCheckingSFINAE(false), InNonInstantiationSFINAEContext(false),
+ NonInstantiationEntries(0), ArgumentPackSubstitutionIndex(-1),
+ CurrentInstantiationScope(nullptr), DisableTypoCorrection(false),
+ TyposCorrected(0), AnalysisWarnings(*this), ThreadSafetyDeclCache(nullptr),
+ VarDataSharingAttributesStack(nullptr), CurScope(nullptr),
+ Ident_super(nullptr), Ident___float128(nullptr)
+{
+ TUScope = nullptr;
+
+ LoadedExternalKnownNamespaces = false;
+ for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I)
+ NSNumberLiteralMethods[I] = nullptr;
+
+ if (getLangOpts().ObjC1)
+ NSAPIObj.reset(new NSAPI(Context));
+
+ if (getLangOpts().CPlusPlus)
+ FieldCollector.reset(new CXXFieldCollector());
+
+ // Tell diagnostics how to render things from the AST library.
+ Diags.SetArgToStringFn(&FormatASTNodeDiagnosticArgument, &Context);
+
+ ExprEvalContexts.emplace_back(PotentiallyEvaluated, 0, false, nullptr, false);
+
+ FunctionScopes.push_back(new FunctionScopeInfo(Diags));
+
+ // Initilization of data sharing attributes stack for OpenMP
+ InitDataSharingAttributesStack();
+}
+
+void Sema::addImplicitTypedef(StringRef Name, QualType T) {
+ DeclarationName DN = &Context.Idents.get(Name);
+ if (IdResolver.begin(DN) == IdResolver.end())
+ PushOnScopeChains(Context.buildImplicitTypedef(T, Name), TUScope);
+}
+
+void Sema::Initialize() {
+ if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer))
+ SC->InitializeSema(*this);
+
+ // Tell the external Sema source about this Sema object.
+ if (ExternalSemaSource *ExternalSema
+ = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource()))
+ ExternalSema->InitializeSema(*this);
+
+ // This needs to happen after ExternalSemaSource::InitializeSema(this) or we
+ // will not be able to merge any duplicate __va_list_tag decls correctly.
+ VAListTagName = PP.getIdentifierInfo("__va_list_tag");
+
+ if (!TUScope)
+ return;
+
+ // Initialize predefined 128-bit integer types, if needed.
+ if (Context.getTargetInfo().hasInt128Type()) {
+ // If either of the 128-bit integer types are unavailable to name lookup,
+ // define them now.
+ DeclarationName Int128 = &Context.Idents.get("__int128_t");
+ if (IdResolver.begin(Int128) == IdResolver.end())
+ PushOnScopeChains(Context.getInt128Decl(), TUScope);
+
+ DeclarationName UInt128 = &Context.Idents.get("__uint128_t");
+ if (IdResolver.begin(UInt128) == IdResolver.end())
+ PushOnScopeChains(Context.getUInt128Decl(), TUScope);
+ }
+
+
+ // Initialize predefined Objective-C types:
+ if (getLangOpts().ObjC1) {
+ // If 'SEL' does not yet refer to any declarations, make it refer to the
+ // predefined 'SEL'.
+ DeclarationName SEL = &Context.Idents.get("SEL");
+ if (IdResolver.begin(SEL) == IdResolver.end())
+ PushOnScopeChains(Context.getObjCSelDecl(), TUScope);
+
+ // If 'id' does not yet refer to any declarations, make it refer to the
+ // predefined 'id'.
+ DeclarationName Id = &Context.Idents.get("id");
+ if (IdResolver.begin(Id) == IdResolver.end())
+ PushOnScopeChains(Context.getObjCIdDecl(), TUScope);
+
+ // Create the built-in typedef for 'Class'.
+ DeclarationName Class = &Context.Idents.get("Class");
+ if (IdResolver.begin(Class) == IdResolver.end())
+ PushOnScopeChains(Context.getObjCClassDecl(), TUScope);
+
+ // Create the built-in forward declaratino for 'Protocol'.
+ DeclarationName Protocol = &Context.Idents.get("Protocol");
+ if (IdResolver.begin(Protocol) == IdResolver.end())
+ PushOnScopeChains(Context.getObjCProtocolDecl(), TUScope);
+ }
+
+ // Initialize Microsoft "predefined C++ types".
+ if (getLangOpts().MSVCCompat) {
+ if (getLangOpts().CPlusPlus &&
+ IdResolver.begin(&Context.Idents.get("type_info")) == IdResolver.end())
+ PushOnScopeChains(Context.buildImplicitRecord("type_info", TTK_Class),
+ TUScope);
+
+ addImplicitTypedef("size_t", Context.getSizeType());
+ }
+
+ // Initialize predefined OpenCL types.
+ if (getLangOpts().OpenCL) {
+ addImplicitTypedef("image1d_t", Context.OCLImage1dTy);
+ addImplicitTypedef("image1d_array_t", Context.OCLImage1dArrayTy);
+ addImplicitTypedef("image1d_buffer_t", Context.OCLImage1dBufferTy);
+ addImplicitTypedef("image2d_t", Context.OCLImage2dTy);
+ addImplicitTypedef("image2d_array_t", Context.OCLImage2dArrayTy);
+ addImplicitTypedef("image3d_t", Context.OCLImage3dTy);
+ addImplicitTypedef("sampler_t", Context.OCLSamplerTy);
+ addImplicitTypedef("event_t", Context.OCLEventTy);
+ if (getLangOpts().OpenCLVersion >= 200) {
+ addImplicitTypedef("image2d_depth_t", Context.OCLImage2dDepthTy);
+ addImplicitTypedef("image2d_array_depth_t",
+ Context.OCLImage2dArrayDepthTy);
+ addImplicitTypedef("image2d_msaa_t", Context.OCLImage2dMSAATy);
+ addImplicitTypedef("image2d_array_msaa_t", Context.OCLImage2dArrayMSAATy);
+ addImplicitTypedef("image2d_msaa_depth_t", Context.OCLImage2dMSAADepthTy);
+ addImplicitTypedef("image2d_array_msaa_depth_t",
+ Context.OCLImage2dArrayMSAADepthTy);
+ addImplicitTypedef("clk_event_t", Context.OCLClkEventTy);
+ addImplicitTypedef("queue_t", Context.OCLQueueTy);
+ addImplicitTypedef("ndrange_t", Context.OCLNDRangeTy);
+ addImplicitTypedef("reserve_id_t", Context.OCLReserveIDTy);
+ addImplicitTypedef("atomic_int", Context.getAtomicType(Context.IntTy));
+ addImplicitTypedef("atomic_uint",
+ Context.getAtomicType(Context.UnsignedIntTy));
+ addImplicitTypedef("atomic_long", Context.getAtomicType(Context.LongTy));
+ addImplicitTypedef("atomic_ulong",
+ Context.getAtomicType(Context.UnsignedLongTy));
+ addImplicitTypedef("atomic_float",
+ Context.getAtomicType(Context.FloatTy));
+ addImplicitTypedef("atomic_double",
+ Context.getAtomicType(Context.DoubleTy));
+ // OpenCLC v2.0, s6.13.11.6 requires that atomic_flag is implemented as
+ // 32-bit integer and OpenCLC v2.0, s6.1.1 int is always 32-bit wide.
+ addImplicitTypedef("atomic_flag", Context.getAtomicType(Context.IntTy));
+ addImplicitTypedef("atomic_intptr_t",
+ Context.getAtomicType(Context.getIntPtrType()));
+ addImplicitTypedef("atomic_uintptr_t",
+ Context.getAtomicType(Context.getUIntPtrType()));
+ addImplicitTypedef("atomic_size_t",
+ Context.getAtomicType(Context.getSizeType()));
+ addImplicitTypedef("atomic_ptrdiff_t",
+ Context.getAtomicType(Context.getPointerDiffType()));
+ }
+ }
+
+ if (Context.getTargetInfo().hasBuiltinMSVaList()) {
+ DeclarationName MSVaList = &Context.Idents.get("__builtin_ms_va_list");
+ if (IdResolver.begin(MSVaList) == IdResolver.end())
+ PushOnScopeChains(Context.getBuiltinMSVaListDecl(), TUScope);
+ }
+
+ DeclarationName BuiltinVaList = &Context.Idents.get("__builtin_va_list");
+ if (IdResolver.begin(BuiltinVaList) == IdResolver.end())
+ PushOnScopeChains(Context.getBuiltinVaListDecl(), TUScope);
+}
+
+Sema::~Sema() {
+ llvm::DeleteContainerSeconds(LateParsedTemplateMap);
+ if (PackContext) FreePackedContext();
+ if (VisContext) FreeVisContext();
+ // Kill all the active scopes.
+ for (unsigned I = 1, E = FunctionScopes.size(); I != E; ++I)
+ delete FunctionScopes[I];
+ if (FunctionScopes.size() == 1)
+ delete FunctionScopes[0];
+
+ // Tell the SemaConsumer to forget about us; we're going out of scope.
+ if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer))
+ SC->ForgetSema();
+
+ // Detach from the external Sema source.
+ if (ExternalSemaSource *ExternalSema
+ = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource()))
+ ExternalSema->ForgetSema();
+
+ // If Sema's ExternalSource is the multiplexer - we own it.
+ if (isMultiplexExternalSource)
+ delete ExternalSource;
+
+ threadSafety::threadSafetyCleanup(ThreadSafetyDeclCache);
+
+ // Destroys data sharing attributes stack for OpenMP
+ DestroyDataSharingAttributesStack();
+
+ assert(DelayedTypos.empty() && "Uncorrected typos!");
+}
+
+/// makeUnavailableInSystemHeader - There is an error in the current
+/// context. If we're still in a system header, and we can plausibly
+/// make the relevant declaration unavailable instead of erroring, do
+/// so and return true.
+bool Sema::makeUnavailableInSystemHeader(SourceLocation loc,
+ UnavailableAttr::ImplicitReason reason) {
+ // If we're not in a function, it's an error.
+ FunctionDecl *fn = dyn_cast<FunctionDecl>(CurContext);
+ if (!fn) return false;
+
+ // If we're in template instantiation, it's an error.
+ if (!ActiveTemplateInstantiations.empty())
+ return false;
+
+ // If that function's not in a system header, it's an error.
+ if (!Context.getSourceManager().isInSystemHeader(loc))
+ return false;
+
+ // If the function is already unavailable, it's not an error.
+ if (fn->hasAttr<UnavailableAttr>()) return true;
+
+ fn->addAttr(UnavailableAttr::CreateImplicit(Context, "", reason, loc));
+ return true;
+}
+
+ASTMutationListener *Sema::getASTMutationListener() const {
+ return getASTConsumer().GetASTMutationListener();
+}
+
+///\brief Registers an external source. If an external source already exists,
+/// creates a multiplex external source and appends to it.
+///
+///\param[in] E - A non-null external sema source.
+///
+void Sema::addExternalSource(ExternalSemaSource *E) {
+ assert(E && "Cannot use with NULL ptr");
+
+ if (!ExternalSource) {
+ ExternalSource = E;
+ return;
+ }
+
+ if (isMultiplexExternalSource)
+ static_cast<MultiplexExternalSemaSource*>(ExternalSource)->addSource(*E);
+ else {
+ ExternalSource = new MultiplexExternalSemaSource(*ExternalSource, *E);
+ isMultiplexExternalSource = true;
+ }
+}
+
+/// \brief Print out statistics about the semantic analysis.
+void Sema::PrintStats() const {
+ llvm::errs() << "\n*** Semantic Analysis Stats:\n";
+ llvm::errs() << NumSFINAEErrors << " SFINAE diagnostics trapped.\n";
+
+ BumpAlloc.PrintStats();
+ AnalysisWarnings.PrintStats();
+}
+
+void Sema::diagnoseNullableToNonnullConversion(QualType DstType,
+ QualType SrcType,
+ SourceLocation Loc) {
+ Optional<NullabilityKind> ExprNullability = SrcType->getNullability(Context);
+ if (!ExprNullability || *ExprNullability != NullabilityKind::Nullable)
+ return;
+
+ Optional<NullabilityKind> TypeNullability = DstType->getNullability(Context);
+ if (!TypeNullability || *TypeNullability != NullabilityKind::NonNull)
+ return;
+
+ Diag(Loc, diag::warn_nullability_lost) << SrcType << DstType;
+}
+
+/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit cast.
+/// If there is already an implicit cast, merge into the existing one.
+/// The result is of the given category.
+ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
+ CastKind Kind, ExprValueKind VK,
+ const CXXCastPath *BasePath,
+ CheckedConversionKind CCK) {
+#ifndef NDEBUG
+ if (VK == VK_RValue && !E->isRValue()) {
+ switch (Kind) {
+ default:
+ llvm_unreachable("can't implicitly cast lvalue to rvalue with this cast "
+ "kind");
+ case CK_LValueToRValue:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_ToVoid:
+ break;
+ }
+ }
+ assert((VK == VK_RValue || !E->isRValue()) && "can't cast rvalue to lvalue");
+#endif
+
+ diagnoseNullableToNonnullConversion(Ty, E->getType(), E->getLocStart());
+
+ QualType ExprTy = Context.getCanonicalType(E->getType());
+ QualType TypeTy = Context.getCanonicalType(Ty);
+
+ if (ExprTy == TypeTy)
+ return E;
+
+ if (ImplicitCastExpr *ImpCast = dyn_cast<ImplicitCastExpr>(E)) {
+ if (ImpCast->getCastKind() == Kind && (!BasePath || BasePath->empty())) {
+ ImpCast->setType(Ty);
+ ImpCast->setValueKind(VK);
+ return E;
+ }
+ }
+
+ return ImplicitCastExpr::Create(Context, Ty, Kind, E, BasePath, VK);
+}
+
+/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
+/// to the conversion from scalar type ScalarTy to the Boolean type.
+CastKind Sema::ScalarTypeToBooleanCastKind(QualType ScalarTy) {
+ switch (ScalarTy->getScalarTypeKind()) {
+ case Type::STK_Bool: return CK_NoOp;
+ case Type::STK_CPointer: return CK_PointerToBoolean;
+ case Type::STK_BlockPointer: return CK_PointerToBoolean;
+ case Type::STK_ObjCObjectPointer: return CK_PointerToBoolean;
+ case Type::STK_MemberPointer: return CK_MemberPointerToBoolean;
+ case Type::STK_Integral: return CK_IntegralToBoolean;
+ case Type::STK_Floating: return CK_FloatingToBoolean;
+ case Type::STK_IntegralComplex: return CK_IntegralComplexToBoolean;
+ case Type::STK_FloatingComplex: return CK_FloatingComplexToBoolean;
+ }
+ return CK_Invalid;
+}
+
+/// \brief Used to prune the decls of Sema's UnusedFileScopedDecls vector.
+static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) {
+ if (D->getMostRecentDecl()->isUsed())
+ return true;
+
+ if (D->isExternallyVisible())
+ return true;
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // UnusedFileScopedDecls stores the first declaration.
+ // The declaration may have become definition so check again.
+ const FunctionDecl *DeclToCheck;
+ if (FD->hasBody(DeclToCheck))
+ return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
+
+ // Later redecls may add new information resulting in not having to warn,
+ // so check again.
+ DeclToCheck = FD->getMostRecentDecl();
+ if (DeclToCheck != FD)
+ return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
+ }
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ // If a variable usable in constant expressions is referenced,
+ // don't warn if it isn't used: if the value of a variable is required
+ // for the computation of a constant expression, it doesn't make sense to
+ // warn even if the variable isn't odr-used. (isReferenced doesn't
+ // precisely reflect that, but it's a decent approximation.)
+ if (VD->isReferenced() &&
+ VD->isUsableInConstantExpressions(SemaRef->Context))
+ return true;
+
+ // UnusedFileScopedDecls stores the first declaration.
+ // The declaration may have become definition so check again.
+ const VarDecl *DeclToCheck = VD->getDefinition();
+ if (DeclToCheck)
+ return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
+
+ // Later redecls may add new information resulting in not having to warn,
+ // so check again.
+ DeclToCheck = VD->getMostRecentDecl();
+ if (DeclToCheck != VD)
+ return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
+ }
+
+ return false;
+}
+
+/// Obtains a sorted list of functions that are undefined but ODR-used.
+void Sema::getUndefinedButUsed(
+ SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined) {
+ for (llvm::DenseMap<NamedDecl *, SourceLocation>::iterator
+ I = UndefinedButUsed.begin(), E = UndefinedButUsed.end();
+ I != E; ++I) {
+ NamedDecl *ND = I->first;
+
+ // Ignore attributes that have become invalid.
+ if (ND->isInvalidDecl()) continue;
+
+ // __attribute__((weakref)) is basically a definition.
+ if (ND->hasAttr<WeakRefAttr>()) continue;
+
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
+ if (FD->isDefined())
+ continue;
+ if (FD->isExternallyVisible() &&
+ !FD->getMostRecentDecl()->isInlined())
+ continue;
+ } else {
+ if (cast<VarDecl>(ND)->hasDefinition() != VarDecl::DeclarationOnly)
+ continue;
+ if (ND->isExternallyVisible())
+ continue;
+ }
+
+ Undefined.push_back(std::make_pair(ND, I->second));
+ }
+
+ // Sort (in order of use site) so that we're not dependent on the iteration
+ // order through an llvm::DenseMap.
+ SourceManager &SM = Context.getSourceManager();
+ std::sort(Undefined.begin(), Undefined.end(),
+ [&SM](const std::pair<NamedDecl *, SourceLocation> &l,
+ const std::pair<NamedDecl *, SourceLocation> &r) {
+ if (l.second.isValid() && !r.second.isValid())
+ return true;
+ if (!l.second.isValid() && r.second.isValid())
+ return false;
+ if (l.second != r.second)
+ return SM.isBeforeInTranslationUnit(l.second, r.second);
+ return SM.isBeforeInTranslationUnit(l.first->getLocation(),
+ r.first->getLocation());
+ });
+}
+
+/// checkUndefinedButUsed - Check for undefined objects with internal linkage
+/// or that are inline.
+static void checkUndefinedButUsed(Sema &S) {
+ if (S.UndefinedButUsed.empty()) return;
+
+ // Collect all the still-undefined entities with internal linkage.
+ SmallVector<std::pair<NamedDecl *, SourceLocation>, 16> Undefined;
+ S.getUndefinedButUsed(Undefined);
+ if (Undefined.empty()) return;
+
+ for (SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> >::iterator
+ I = Undefined.begin(), E = Undefined.end(); I != E; ++I) {
+ NamedDecl *ND = I->first;
+
+ if (ND->hasAttr<DLLImportAttr>() || ND->hasAttr<DLLExportAttr>()) {
+ // An exported function will always be emitted when defined, so even if
+ // the function is inline, it doesn't have to be emitted in this TU. An
+ // imported function implies that it has been exported somewhere else.
+ continue;
+ }
+
+ if (!ND->isExternallyVisible()) {
+ S.Diag(ND->getLocation(), diag::warn_undefined_internal)
+ << isa<VarDecl>(ND) << ND;
+ } else {
+ assert(cast<FunctionDecl>(ND)->getMostRecentDecl()->isInlined() &&
+ "used object requires definition but isn't inline or internal?");
+ S.Diag(ND->getLocation(), diag::warn_undefined_inline) << ND;
+ }
+ if (I->second.isValid())
+ S.Diag(I->second, diag::note_used_here);
+ }
+}
+
+void Sema::LoadExternalWeakUndeclaredIdentifiers() {
+ if (!ExternalSource)
+ return;
+
+ SmallVector<std::pair<IdentifierInfo *, WeakInfo>, 4> WeakIDs;
+ ExternalSource->ReadWeakUndeclaredIdentifiers(WeakIDs);
+ for (auto &WeakID : WeakIDs)
+ WeakUndeclaredIdentifiers.insert(WeakID);
+}
+
+
+typedef llvm::DenseMap<const CXXRecordDecl*, bool> RecordCompleteMap;
+
+/// \brief Returns true, if all methods and nested classes of the given
+/// CXXRecordDecl are defined in this translation unit.
+///
+/// Should only be called from ActOnEndOfTranslationUnit so that all
+/// definitions are actually read.
+static bool MethodsAndNestedClassesComplete(const CXXRecordDecl *RD,
+ RecordCompleteMap &MNCComplete) {
+ RecordCompleteMap::iterator Cache = MNCComplete.find(RD);
+ if (Cache != MNCComplete.end())
+ return Cache->second;
+ if (!RD->isCompleteDefinition())
+ return false;
+ bool Complete = true;
+ for (DeclContext::decl_iterator I = RD->decls_begin(),
+ E = RD->decls_end();
+ I != E && Complete; ++I) {
+ if (const CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(*I))
+ Complete = M->isDefined() || (M->isPure() && !isa<CXXDestructorDecl>(M));
+ else if (const FunctionTemplateDecl *F = dyn_cast<FunctionTemplateDecl>(*I))
+ // If the template function is marked as late template parsed at this
+ // point, it has not been instantiated and therefore we have not
+ // performed semantic analysis on it yet, so we cannot know if the type
+ // can be considered complete.
+ Complete = !F->getTemplatedDecl()->isLateTemplateParsed() &&
+ F->getTemplatedDecl()->isDefined();
+ else if (const CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(*I)) {
+ if (R->isInjectedClassName())
+ continue;
+ if (R->hasDefinition())
+ Complete = MethodsAndNestedClassesComplete(R->getDefinition(),
+ MNCComplete);
+ else
+ Complete = false;
+ }
+ }
+ MNCComplete[RD] = Complete;
+ return Complete;
+}
+
+/// \brief Returns true, if the given CXXRecordDecl is fully defined in this
+/// translation unit, i.e. all methods are defined or pure virtual and all
+/// friends, friend functions and nested classes are fully defined in this
+/// translation unit.
+///
+/// Should only be called from ActOnEndOfTranslationUnit so that all
+/// definitions are actually read.
+static bool IsRecordFullyDefined(const CXXRecordDecl *RD,
+ RecordCompleteMap &RecordsComplete,
+ RecordCompleteMap &MNCComplete) {
+ RecordCompleteMap::iterator Cache = RecordsComplete.find(RD);
+ if (Cache != RecordsComplete.end())
+ return Cache->second;
+ bool Complete = MethodsAndNestedClassesComplete(RD, MNCComplete);
+ for (CXXRecordDecl::friend_iterator I = RD->friend_begin(),
+ E = RD->friend_end();
+ I != E && Complete; ++I) {
+ // Check if friend classes and methods are complete.
+ if (TypeSourceInfo *TSI = (*I)->getFriendType()) {
+ // Friend classes are available as the TypeSourceInfo of the FriendDecl.
+ if (CXXRecordDecl *FriendD = TSI->getType()->getAsCXXRecordDecl())
+ Complete = MethodsAndNestedClassesComplete(FriendD, MNCComplete);
+ else
+ Complete = false;
+ } else {
+ // Friend functions are available through the NamedDecl of FriendDecl.
+ if (const FunctionDecl *FD =
+ dyn_cast<FunctionDecl>((*I)->getFriendDecl()))
+ Complete = FD->isDefined();
+ else
+ // This is a template friend, give up.
+ Complete = false;
+ }
+ }
+ RecordsComplete[RD] = Complete;
+ return Complete;
+}
+
+void Sema::emitAndClearUnusedLocalTypedefWarnings() {
+ if (ExternalSource)
+ ExternalSource->ReadUnusedLocalTypedefNameCandidates(
+ UnusedLocalTypedefNameCandidates);
+ for (const TypedefNameDecl *TD : UnusedLocalTypedefNameCandidates) {
+ if (TD->isReferenced())
+ continue;
+ Diag(TD->getLocation(), diag::warn_unused_local_typedef)
+ << isa<TypeAliasDecl>(TD) << TD->getDeclName();
+ }
+ UnusedLocalTypedefNameCandidates.clear();
+}
+
+/// ActOnEndOfTranslationUnit - This is called at the very end of the
+/// translation unit when EOF is reached and all but the top-level scope is
+/// popped.
+void Sema::ActOnEndOfTranslationUnit() {
+ assert(DelayedDiagnostics.getCurrentPool() == nullptr
+ && "reached end of translation unit with a pool attached?");
+
+ // If code completion is enabled, don't perform any end-of-translation-unit
+ // work.
+ if (PP.isCodeCompletionEnabled())
+ return;
+
+ // Complete translation units and modules define vtables and perform implicit
+ // instantiations. PCH files do not.
+ if (TUKind != TU_Prefix) {
+ DiagnoseUseOfUnimplementedSelectors();
+
+ // If DefinedUsedVTables ends up marking any virtual member functions it
+ // might lead to more pending template instantiations, which we then need
+ // to instantiate.
+ DefineUsedVTables();
+
+ // C++: Perform implicit template instantiations.
+ //
+ // FIXME: When we perform these implicit instantiations, we do not
+ // carefully keep track of the point of instantiation (C++ [temp.point]).
+ // This means that name lookup that occurs within the template
+ // instantiation will always happen at the end of the translation unit,
+ // so it will find some names that are not required to be found. This is
+ // valid, but we could do better by diagnosing if an instantiation uses a
+ // name that was not visible at its first point of instantiation.
+ if (ExternalSource) {
+ // Load pending instantiations from the external source.
+ SmallVector<PendingImplicitInstantiation, 4> Pending;
+ ExternalSource->ReadPendingInstantiations(Pending);
+ PendingInstantiations.insert(PendingInstantiations.begin(),
+ Pending.begin(), Pending.end());
+ }
+ PerformPendingInstantiations();
+
+ if (LateTemplateParserCleanup)
+ LateTemplateParserCleanup(OpaqueParser);
+
+ CheckDelayedMemberExceptionSpecs();
+ }
+
+ // All delayed member exception specs should be checked or we end up accepting
+ // incompatible declarations.
+ // FIXME: This is wrong for TUKind == TU_Prefix. In that case, we need to
+ // write out the lists to the AST file (if any).
+ assert(DelayedDefaultedMemberExceptionSpecs.empty());
+ assert(DelayedExceptionSpecChecks.empty());
+
+ // All dllexport classes should have been processed already.
+ assert(DelayedDllExportClasses.empty());
+
+ // Remove file scoped decls that turned out to be used.
+ UnusedFileScopedDecls.erase(
+ std::remove_if(UnusedFileScopedDecls.begin(nullptr, true),
+ UnusedFileScopedDecls.end(),
+ std::bind1st(std::ptr_fun(ShouldRemoveFromUnused), this)),
+ UnusedFileScopedDecls.end());
+
+ if (TUKind == TU_Prefix) {
+ // Translation unit prefixes don't need any of the checking below.
+ TUScope = nullptr;
+ return;
+ }
+
+ // Check for #pragma weak identifiers that were never declared
+ LoadExternalWeakUndeclaredIdentifiers();
+ for (auto WeakID : WeakUndeclaredIdentifiers) {
+ if (WeakID.second.getUsed())
+ continue;
+
+ Decl *PrevDecl = LookupSingleName(TUScope, WeakID.first, SourceLocation(),
+ LookupOrdinaryName);
+ if (PrevDecl != nullptr &&
+ !(isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl)))
+ Diag(WeakID.second.getLocation(), diag::warn_attribute_wrong_decl_type)
+ << "'weak'" << ExpectedVariableOrFunction;
+ else
+ Diag(WeakID.second.getLocation(), diag::warn_weak_identifier_undeclared)
+ << WeakID.first;
+ }
+
+ if (LangOpts.CPlusPlus11 &&
+ !Diags.isIgnored(diag::warn_delegating_ctor_cycle, SourceLocation()))
+ CheckDelegatingCtorCycles();
+
+ if (TUKind == TU_Module) {
+ // If we are building a module, resolve all of the exported declarations
+ // now.
+ if (Module *CurrentModule = PP.getCurrentModule()) {
+ ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap();
+
+ SmallVector<Module *, 2> Stack;
+ Stack.push_back(CurrentModule);
+ while (!Stack.empty()) {
+ Module *Mod = Stack.pop_back_val();
+
+ // Resolve the exported declarations and conflicts.
+ // FIXME: Actually complain, once we figure out how to teach the
+ // diagnostic client to deal with complaints in the module map at this
+ // point.
+ ModMap.resolveExports(Mod, /*Complain=*/false);
+ ModMap.resolveUses(Mod, /*Complain=*/false);
+ ModMap.resolveConflicts(Mod, /*Complain=*/false);
+
+ // Queue the submodules, so their exports will also be resolved.
+ Stack.append(Mod->submodule_begin(), Mod->submodule_end());
+ }
+ }
+
+ // Warnings emitted in ActOnEndOfTranslationUnit() should be emitted for
+ // modules when they are built, not every time they are used.
+ emitAndClearUnusedLocalTypedefWarnings();
+
+ // Modules don't need any of the checking below.
+ TUScope = nullptr;
+ return;
+ }
+
+ // C99 6.9.2p2:
+ // A declaration of an identifier for an object that has file
+ // scope without an initializer, and without a storage-class
+ // specifier or with the storage-class specifier static,
+ // constitutes a tentative definition. If a translation unit
+ // contains one or more tentative definitions for an identifier,
+ // and the translation unit contains no external definition for
+ // that identifier, then the behavior is exactly as if the
+ // translation unit contains a file scope declaration of that
+ // identifier, with the composite type as of the end of the
+ // translation unit, with an initializer equal to 0.
+ llvm::SmallSet<VarDecl *, 32> Seen;
+ for (TentativeDefinitionsType::iterator
+ T = TentativeDefinitions.begin(ExternalSource),
+ TEnd = TentativeDefinitions.end();
+ T != TEnd; ++T)
+ {
+ VarDecl *VD = (*T)->getActingDefinition();
+
+ // If the tentative definition was completed, getActingDefinition() returns
+ // null. If we've already seen this variable before, insert()'s second
+ // return value is false.
+ if (!VD || VD->isInvalidDecl() || !Seen.insert(VD).second)
+ continue;
+
+ if (const IncompleteArrayType *ArrayT
+ = Context.getAsIncompleteArrayType(VD->getType())) {
+ // Set the length of the array to 1 (C99 6.9.2p5).
+ Diag(VD->getLocation(), diag::warn_tentative_incomplete_array);
+ llvm::APInt One(Context.getTypeSize(Context.getSizeType()), true);
+ QualType T = Context.getConstantArrayType(ArrayT->getElementType(),
+ One, ArrayType::Normal, 0);
+ VD->setType(T);
+ } else if (RequireCompleteType(VD->getLocation(), VD->getType(),
+ diag::err_tentative_def_incomplete_type))
+ VD->setInvalidDecl();
+
+ CheckCompleteVariableDeclaration(VD);
+
+ // Notify the consumer that we've completed a tentative definition.
+ if (!VD->isInvalidDecl())
+ Consumer.CompleteTentativeDefinition(VD);
+
+ }
+
+ // If there were errors, disable 'unused' warnings since they will mostly be
+ // noise.
+ if (!Diags.hasErrorOccurred()) {
+ // Output warning for unused file scoped decls.
+ for (UnusedFileScopedDeclsType::iterator
+ I = UnusedFileScopedDecls.begin(ExternalSource),
+ E = UnusedFileScopedDecls.end(); I != E; ++I) {
+ if (ShouldRemoveFromUnused(this, *I))
+ continue;
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
+ const FunctionDecl *DiagD;
+ if (!FD->hasBody(DiagD))
+ DiagD = FD;
+ if (DiagD->isDeleted())
+ continue; // Deleted functions are supposed to be unused.
+ if (DiagD->isReferenced()) {
+ if (isa<CXXMethodDecl>(DiagD))
+ Diag(DiagD->getLocation(), diag::warn_unneeded_member_function)
+ << DiagD->getDeclName();
+ else {
+ if (FD->getStorageClass() == SC_Static &&
+ !FD->isInlineSpecified() &&
+ !SourceMgr.isInMainFile(
+ SourceMgr.getExpansionLoc(FD->getLocation())))
+ Diag(DiagD->getLocation(),
+ diag::warn_unneeded_static_internal_decl)
+ << DiagD->getDeclName();
+ else
+ Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
+ << /*function*/0 << DiagD->getDeclName();
+ }
+ } else {
+ Diag(DiagD->getLocation(),
+ isa<CXXMethodDecl>(DiagD) ? diag::warn_unused_member_function
+ : diag::warn_unused_function)
+ << DiagD->getDeclName();
+ }
+ } else {
+ const VarDecl *DiagD = cast<VarDecl>(*I)->getDefinition();
+ if (!DiagD)
+ DiagD = cast<VarDecl>(*I);
+ if (DiagD->isReferenced()) {
+ Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
+ << /*variable*/1 << DiagD->getDeclName();
+ } else if (DiagD->getType().isConstQualified()) {
+ Diag(DiagD->getLocation(), diag::warn_unused_const_variable)
+ << DiagD->getDeclName();
+ } else {
+ Diag(DiagD->getLocation(), diag::warn_unused_variable)
+ << DiagD->getDeclName();
+ }
+ }
+ }
+
+ if (ExternalSource)
+ ExternalSource->ReadUndefinedButUsed(UndefinedButUsed);
+ checkUndefinedButUsed(*this);
+
+ emitAndClearUnusedLocalTypedefWarnings();
+ }
+
+ if (!Diags.isIgnored(diag::warn_unused_private_field, SourceLocation())) {
+ RecordCompleteMap RecordsComplete;
+ RecordCompleteMap MNCComplete;
+ for (NamedDeclSetType::iterator I = UnusedPrivateFields.begin(),
+ E = UnusedPrivateFields.end(); I != E; ++I) {
+ const NamedDecl *D = *I;
+ const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D->getDeclContext());
+ if (RD && !RD->isUnion() &&
+ IsRecordFullyDefined(RD, RecordsComplete, MNCComplete)) {
+ Diag(D->getLocation(), diag::warn_unused_private_field)
+ << D->getDeclName();
+ }
+ }
+ }
+
+ if (!Diags.isIgnored(diag::warn_mismatched_delete_new, SourceLocation())) {
+ if (ExternalSource)
+ ExternalSource->ReadMismatchingDeleteExpressions(DeleteExprs);
+ for (const auto &DeletedFieldInfo : DeleteExprs) {
+ for (const auto &DeleteExprLoc : DeletedFieldInfo.second) {
+ AnalyzeDeleteExprMismatch(DeletedFieldInfo.first, DeleteExprLoc.first,
+ DeleteExprLoc.second);
+ }
+ }
+ }
+
+ // Check we've noticed that we're no longer parsing the initializer for every
+ // variable. If we miss cases, then at best we have a performance issue and
+ // at worst a rejects-valid bug.
+ assert(ParsingInitForAutoVars.empty() &&
+ "Didn't unmark var as having its initializer parsed");
+
+ TUScope = nullptr;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Helper functions.
+//===----------------------------------------------------------------------===//
+
+DeclContext *Sema::getFunctionLevelDeclContext() {
+ DeclContext *DC = CurContext;
+
+ while (true) {
+ if (isa<BlockDecl>(DC) || isa<EnumDecl>(DC) || isa<CapturedDecl>(DC)) {
+ DC = DC->getParent();
+ } else if (isa<CXXMethodDecl>(DC) &&
+ cast<CXXMethodDecl>(DC)->getOverloadedOperator() == OO_Call &&
+ cast<CXXRecordDecl>(DC->getParent())->isLambda()) {
+ DC = DC->getParent()->getParent();
+ }
+ else break;
+ }
+
+ return DC;
+}
+
+/// getCurFunctionDecl - If inside of a function body, this returns a pointer
+/// to the function decl for the function being parsed. If we're currently
+/// in a 'block', this returns the containing context.
+FunctionDecl *Sema::getCurFunctionDecl() {
+ DeclContext *DC = getFunctionLevelDeclContext();
+ return dyn_cast<FunctionDecl>(DC);
+}
+
+ObjCMethodDecl *Sema::getCurMethodDecl() {
+ DeclContext *DC = getFunctionLevelDeclContext();
+ while (isa<RecordDecl>(DC))
+ DC = DC->getParent();
+ return dyn_cast<ObjCMethodDecl>(DC);
+}
+
+NamedDecl *Sema::getCurFunctionOrMethodDecl() {
+ DeclContext *DC = getFunctionLevelDeclContext();
+ if (isa<ObjCMethodDecl>(DC) || isa<FunctionDecl>(DC))
+ return cast<NamedDecl>(DC);
+ return nullptr;
+}
+
+void Sema::EmitCurrentDiagnostic(unsigned DiagID) {
+ // FIXME: It doesn't make sense to me that DiagID is an incoming argument here
+ // and yet we also use the current diag ID on the DiagnosticsEngine. This has
+ // been made more painfully obvious by the refactor that introduced this
+ // function, but it is possible that the incoming argument can be
+ // eliminnated. If it truly cannot be (for example, there is some reentrancy
+ // issue I am not seeing yet), then there should at least be a clarifying
+ // comment somewhere.
+ if (Optional<TemplateDeductionInfo*> Info = isSFINAEContext()) {
+ switch (DiagnosticIDs::getDiagnosticSFINAEResponse(
+ Diags.getCurrentDiagID())) {
+ case DiagnosticIDs::SFINAE_Report:
+ // We'll report the diagnostic below.
+ break;
+
+ case DiagnosticIDs::SFINAE_SubstitutionFailure:
+ // Count this failure so that we know that template argument deduction
+ // has failed.
+ ++NumSFINAEErrors;
+
+ // Make a copy of this suppressed diagnostic and store it with the
+ // template-deduction information.
+ if (*Info && !(*Info)->hasSFINAEDiagnostic()) {
+ Diagnostic DiagInfo(&Diags);
+ (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(),
+ PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
+ }
+
+ Diags.setLastDiagnosticIgnored();
+ Diags.Clear();
+ return;
+
+ case DiagnosticIDs::SFINAE_AccessControl: {
+ // Per C++ Core Issue 1170, access control is part of SFINAE.
+ // Additionally, the AccessCheckingSFINAE flag can be used to temporarily
+ // make access control a part of SFINAE for the purposes of checking
+ // type traits.
+ if (!AccessCheckingSFINAE && !getLangOpts().CPlusPlus11)
+ break;
+
+ SourceLocation Loc = Diags.getCurrentDiagLoc();
+
+ // Suppress this diagnostic.
+ ++NumSFINAEErrors;
+
+ // Make a copy of this suppressed diagnostic and store it with the
+ // template-deduction information.
+ if (*Info && !(*Info)->hasSFINAEDiagnostic()) {
+ Diagnostic DiagInfo(&Diags);
+ (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(),
+ PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
+ }
+
+ Diags.setLastDiagnosticIgnored();
+ Diags.Clear();
+
+ // Now the diagnostic state is clear, produce a C++98 compatibility
+ // warning.
+ Diag(Loc, diag::warn_cxx98_compat_sfinae_access_control);
+
+ // The last diagnostic which Sema produced was ignored. Suppress any
+ // notes attached to it.
+ Diags.setLastDiagnosticIgnored();
+ return;
+ }
+
+ case DiagnosticIDs::SFINAE_Suppress:
+ // Make a copy of this suppressed diagnostic and store it with the
+ // template-deduction information;
+ if (*Info) {
+ Diagnostic DiagInfo(&Diags);
+ (*Info)->addSuppressedDiagnostic(DiagInfo.getLocation(),
+ PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
+ }
+
+ // Suppress this diagnostic.
+ Diags.setLastDiagnosticIgnored();
+ Diags.Clear();
+ return;
+ }
+ }
+
+ // Set up the context's printing policy based on our current state.
+ Context.setPrintingPolicy(getPrintingPolicy());
+
+ // Emit the diagnostic.
+ if (!Diags.EmitCurrentDiagnostic())
+ return;
+
+ // If this is not a note, and we're in a template instantiation
+ // that is different from the last template instantiation where
+ // we emitted an error, print a template instantiation
+ // backtrace.
+ if (!DiagnosticIDs::isBuiltinNote(DiagID) &&
+ !ActiveTemplateInstantiations.empty() &&
+ ActiveTemplateInstantiations.back()
+ != LastTemplateInstantiationErrorContext) {
+ PrintInstantiationStack();
+ LastTemplateInstantiationErrorContext = ActiveTemplateInstantiations.back();
+ }
+}
+
+Sema::SemaDiagnosticBuilder
+Sema::Diag(SourceLocation Loc, const PartialDiagnostic& PD) {
+ SemaDiagnosticBuilder Builder(Diag(Loc, PD.getDiagID()));
+ PD.Emit(Builder);
+
+ return Builder;
+}
+
+/// \brief Looks through the macro-expansion chain for the given
+/// location, looking for a macro expansion with the given name.
+/// If one is found, returns true and sets the location to that
+/// expansion loc.
+bool Sema::findMacroSpelling(SourceLocation &locref, StringRef name) {
+ SourceLocation loc = locref;
+ if (!loc.isMacroID()) return false;
+
+ // There's no good way right now to look at the intermediate
+ // expansions, so just jump to the expansion location.
+ loc = getSourceManager().getExpansionLoc(loc);
+
+ // If that's written with the name, stop here.
+ SmallVector<char, 16> buffer;
+ if (getPreprocessor().getSpelling(loc, buffer) == name) {
+ locref = loc;
+ return true;
+ }
+ return false;
+}
+
+/// \brief Determines the active Scope associated with the given declaration
+/// context.
+///
+/// This routine maps a declaration context to the active Scope object that
+/// represents that declaration context in the parser. It is typically used
+/// from "scope-less" code (e.g., template instantiation, lazy creation of
+/// declarations) that injects a name for name-lookup purposes and, therefore,
+/// must update the Scope.
+///
+/// \returns The scope corresponding to the given declaraion context, or NULL
+/// if no such scope is open.
+Scope *Sema::getScopeForContext(DeclContext *Ctx) {
+
+ if (!Ctx)
+ return nullptr;
+
+ Ctx = Ctx->getPrimaryContext();
+ for (Scope *S = getCurScope(); S; S = S->getParent()) {
+ // Ignore scopes that cannot have declarations. This is important for
+ // out-of-line definitions of static class members.
+ if (S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope))
+ if (DeclContext *Entity = S->getEntity())
+ if (Ctx == Entity->getPrimaryContext())
+ return S;
+ }
+
+ return nullptr;
+}
+
+/// \brief Enter a new function scope
+void Sema::PushFunctionScope() {
+ if (FunctionScopes.size() == 1) {
+ // Use the "top" function scope rather than having to allocate
+ // memory for a new scope.
+ FunctionScopes.back()->Clear();
+ FunctionScopes.push_back(FunctionScopes.back());
+ return;
+ }
+
+ FunctionScopes.push_back(new FunctionScopeInfo(getDiagnostics()));
+}
+
+void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) {
+ FunctionScopes.push_back(new BlockScopeInfo(getDiagnostics(),
+ BlockScope, Block));
+}
+
+LambdaScopeInfo *Sema::PushLambdaScope() {
+ LambdaScopeInfo *const LSI = new LambdaScopeInfo(getDiagnostics());
+ FunctionScopes.push_back(LSI);
+ return LSI;
+}
+
+void Sema::RecordParsingTemplateParameterDepth(unsigned Depth) {
+ if (LambdaScopeInfo *const LSI = getCurLambda()) {
+ LSI->AutoTemplateParameterDepth = Depth;
+ return;
+ }
+ llvm_unreachable(
+ "Remove assertion if intentionally called in a non-lambda context.");
+}
+
+void Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP,
+ const Decl *D, const BlockExpr *blkExpr) {
+ FunctionScopeInfo *Scope = FunctionScopes.pop_back_val();
+ assert(!FunctionScopes.empty() && "mismatched push/pop!");
+
+ // Issue any analysis-based warnings.
+ if (WP && D)
+ AnalysisWarnings.IssueWarnings(*WP, Scope, D, blkExpr);
+ else
+ for (const auto &PUD : Scope->PossiblyUnreachableDiags)
+ Diag(PUD.Loc, PUD.PD);
+
+ if (FunctionScopes.back() != Scope)
+ delete Scope;
+}
+
+void Sema::PushCompoundScope() {
+ getCurFunction()->CompoundScopes.push_back(CompoundScopeInfo());
+}
+
+void Sema::PopCompoundScope() {
+ FunctionScopeInfo *CurFunction = getCurFunction();
+ assert(!CurFunction->CompoundScopes.empty() && "mismatched push/pop");
+
+ CurFunction->CompoundScopes.pop_back();
+}
+
+/// \brief Determine whether any errors occurred within this function/method/
+/// block.
+bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const {
+ return getCurFunction()->ErrorTrap.hasUnrecoverableErrorOccurred();
+}
+
+BlockScopeInfo *Sema::getCurBlock() {
+ if (FunctionScopes.empty())
+ return nullptr;
+
+ auto CurBSI = dyn_cast<BlockScopeInfo>(FunctionScopes.back());
+ if (CurBSI && CurBSI->TheDecl &&
+ !CurBSI->TheDecl->Encloses(CurContext)) {
+ // We have switched contexts due to template instantiation.
+ assert(!ActiveTemplateInstantiations.empty());
+ return nullptr;
+ }
+
+ return CurBSI;
+}
+
+LambdaScopeInfo *Sema::getCurLambda() {
+ if (FunctionScopes.empty())
+ return nullptr;
+
+ auto CurLSI = dyn_cast<LambdaScopeInfo>(FunctionScopes.back());
+ if (CurLSI && CurLSI->Lambda &&
+ !CurLSI->Lambda->Encloses(CurContext)) {
+ // We have switched contexts due to template instantiation.
+ assert(!ActiveTemplateInstantiations.empty());
+ return nullptr;
+ }
+
+ return CurLSI;
+}
+// We have a generic lambda if we parsed auto parameters, or we have
+// an associated template parameter list.
+LambdaScopeInfo *Sema::getCurGenericLambda() {
+ if (LambdaScopeInfo *LSI = getCurLambda()) {
+ return (LSI->AutoTemplateParams.size() ||
+ LSI->GLTemplateParameterList) ? LSI : nullptr;
+ }
+ return nullptr;
+}
+
+
+void Sema::ActOnComment(SourceRange Comment) {
+ if (!LangOpts.RetainCommentsFromSystemHeaders &&
+ SourceMgr.isInSystemHeader(Comment.getBegin()))
+ return;
+ RawComment RC(SourceMgr, Comment, false,
+ LangOpts.CommentOpts.ParseAllComments);
+ if (RC.isAlmostTrailingComment()) {
+ SourceRange MagicMarkerRange(Comment.getBegin(),
+ Comment.getBegin().getLocWithOffset(3));
+ StringRef MagicMarkerText;
+ switch (RC.getKind()) {
+ case RawComment::RCK_OrdinaryBCPL:
+ MagicMarkerText = "///<";
+ break;
+ case RawComment::RCK_OrdinaryC:
+ MagicMarkerText = "/**<";
+ break;
+ default:
+ llvm_unreachable("if this is an almost Doxygen comment, "
+ "it should be ordinary");
+ }
+ Diag(Comment.getBegin(), diag::warn_not_a_doxygen_trailing_member_comment) <<
+ FixItHint::CreateReplacement(MagicMarkerRange, MagicMarkerText);
+ }
+ Context.addComment(RC);
+}
+
+// Pin this vtable to this file.
+ExternalSemaSource::~ExternalSemaSource() {}
+
+void ExternalSemaSource::ReadMethodPool(Selector Sel) { }
+
+void ExternalSemaSource::ReadKnownNamespaces(
+ SmallVectorImpl<NamespaceDecl *> &Namespaces) {
+}
+
+void ExternalSemaSource::ReadUndefinedButUsed(
+ llvm::DenseMap<NamedDecl *, SourceLocation> &Undefined) {
+}
+
+void ExternalSemaSource::ReadMismatchingDeleteExpressions(llvm::MapVector<
+ FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> &) {}
+
+void PrettyDeclStackTraceEntry::print(raw_ostream &OS) const {
+ SourceLocation Loc = this->Loc;
+ if (!Loc.isValid() && TheDecl) Loc = TheDecl->getLocation();
+ if (Loc.isValid()) {
+ Loc.print(OS, S.getSourceManager());
+ OS << ": ";
+ }
+ OS << Message;
+
+ if (TheDecl && isa<NamedDecl>(TheDecl)) {
+ std::string Name = cast<NamedDecl>(TheDecl)->getNameAsString();
+ if (!Name.empty())
+ OS << " '" << Name << '\'';
+ }
+
+ OS << '\n';
+}
+
+/// \brief Figure out if an expression could be turned into a call.
+///
+/// Use this when trying to recover from an error where the programmer may have
+/// written just the name of a function instead of actually calling it.
+///
+/// \param E - The expression to examine.
+/// \param ZeroArgCallReturnTy - If the expression can be turned into a call
+/// with no arguments, this parameter is set to the type returned by such a
+/// call; otherwise, it is set to an empty QualType.
+/// \param OverloadSet - If the expression is an overloaded function
+/// name, this parameter is populated with the decls of the various overloads.
+bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
+ UnresolvedSetImpl &OverloadSet) {
+ ZeroArgCallReturnTy = QualType();
+ OverloadSet.clear();
+
+ const OverloadExpr *Overloads = nullptr;
+ bool IsMemExpr = false;
+ if (E.getType() == Context.OverloadTy) {
+ OverloadExpr::FindResult FR = OverloadExpr::find(const_cast<Expr*>(&E));
+
+ // Ignore overloads that are pointer-to-member constants.
+ if (FR.HasFormOfMemberPointer)
+ return false;
+
+ Overloads = FR.Expression;
+ } else if (E.getType() == Context.BoundMemberTy) {
+ Overloads = dyn_cast<UnresolvedMemberExpr>(E.IgnoreParens());
+ IsMemExpr = true;
+ }
+
+ bool Ambiguous = false;
+
+ if (Overloads) {
+ for (OverloadExpr::decls_iterator it = Overloads->decls_begin(),
+ DeclsEnd = Overloads->decls_end(); it != DeclsEnd; ++it) {
+ OverloadSet.addDecl(*it);
+
+ // Check whether the function is a non-template, non-member which takes no
+ // arguments.
+ if (IsMemExpr)
+ continue;
+ if (const FunctionDecl *OverloadDecl
+ = dyn_cast<FunctionDecl>((*it)->getUnderlyingDecl())) {
+ if (OverloadDecl->getMinRequiredArguments() == 0) {
+ if (!ZeroArgCallReturnTy.isNull() && !Ambiguous) {
+ ZeroArgCallReturnTy = QualType();
+ Ambiguous = true;
+ } else
+ ZeroArgCallReturnTy = OverloadDecl->getReturnType();
+ }
+ }
+ }
+
+ // If it's not a member, use better machinery to try to resolve the call
+ if (!IsMemExpr)
+ return !ZeroArgCallReturnTy.isNull();
+ }
+
+ // Attempt to call the member with no arguments - this will correctly handle
+ // member templates with defaults/deduction of template arguments, overloads
+ // with default arguments, etc.
+ if (IsMemExpr && !E.isTypeDependent()) {
+ bool Suppress = getDiagnostics().getSuppressAllDiagnostics();
+ getDiagnostics().setSuppressAllDiagnostics(true);
+ ExprResult R = BuildCallToMemberFunction(nullptr, &E, SourceLocation(),
+ None, SourceLocation());
+ getDiagnostics().setSuppressAllDiagnostics(Suppress);
+ if (R.isUsable()) {
+ ZeroArgCallReturnTy = R.get()->getType();
+ return true;
+ }
+ return false;
+ }
+
+ if (const DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E.IgnoreParens())) {
+ if (const FunctionDecl *Fun = dyn_cast<FunctionDecl>(DeclRef->getDecl())) {
+ if (Fun->getMinRequiredArguments() == 0)
+ ZeroArgCallReturnTy = Fun->getReturnType();
+ return true;
+ }
+ }
+
+ // We don't have an expression that's convenient to get a FunctionDecl from,
+ // but we can at least check if the type is "function of 0 arguments".
+ QualType ExprTy = E.getType();
+ const FunctionType *FunTy = nullptr;
+ QualType PointeeTy = ExprTy->getPointeeType();
+ if (!PointeeTy.isNull())
+ FunTy = PointeeTy->getAs<FunctionType>();
+ if (!FunTy)
+ FunTy = ExprTy->getAs<FunctionType>();
+
+ if (const FunctionProtoType *FPT =
+ dyn_cast_or_null<FunctionProtoType>(FunTy)) {
+ if (FPT->getNumParams() == 0)
+ ZeroArgCallReturnTy = FunTy->getReturnType();
+ return true;
+ }
+ return false;
+}
+
+/// \brief Give notes for a set of overloads.
+///
+/// A companion to tryExprAsCall. In cases when the name that the programmer
+/// wrote was an overloaded function, we may be able to make some guesses about
+/// plausible overloads based on their return types; such guesses can be handed
+/// off to this method to be emitted as notes.
+///
+/// \param Overloads - The overloads to note.
+/// \param FinalNoteLoc - If we've suppressed printing some overloads due to
+/// -fshow-overloads=best, this is the location to attach to the note about too
+/// many candidates. Typically this will be the location of the original
+/// ill-formed expression.
+static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads,
+ const SourceLocation FinalNoteLoc) {
+ int ShownOverloads = 0;
+ int SuppressedOverloads = 0;
+ for (UnresolvedSetImpl::iterator It = Overloads.begin(),
+ DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
+ // FIXME: Magic number for max shown overloads stolen from
+ // OverloadCandidateSet::NoteCandidates.
+ if (ShownOverloads >= 4 && S.Diags.getShowOverloads() == Ovl_Best) {
+ ++SuppressedOverloads;
+ continue;
+ }
+
+ NamedDecl *Fn = (*It)->getUnderlyingDecl();
+ S.Diag(Fn->getLocation(), diag::note_possible_target_of_call);
+ ++ShownOverloads;
+ }
+
+ if (SuppressedOverloads)
+ S.Diag(FinalNoteLoc, diag::note_ovl_too_many_candidates)
+ << SuppressedOverloads;
+}
+
+static void notePlausibleOverloads(Sema &S, SourceLocation Loc,
+ const UnresolvedSetImpl &Overloads,
+ bool (*IsPlausibleResult)(QualType)) {
+ if (!IsPlausibleResult)
+ return noteOverloads(S, Overloads, Loc);
+
+ UnresolvedSet<2> PlausibleOverloads;
+ for (OverloadExpr::decls_iterator It = Overloads.begin(),
+ DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
+ const FunctionDecl *OverloadDecl = cast<FunctionDecl>(*It);
+ QualType OverloadResultTy = OverloadDecl->getReturnType();
+ if (IsPlausibleResult(OverloadResultTy))
+ PlausibleOverloads.addDecl(It.getDecl());
+ }
+ noteOverloads(S, PlausibleOverloads, Loc);
+}
+
+/// Determine whether the given expression can be called by just
+/// putting parentheses after it. Notably, expressions with unary
+/// operators can't be because the unary operator will start parsing
+/// outside the call.
+static bool IsCallableWithAppend(Expr *E) {
+ E = E->IgnoreImplicit();
+ return (!isa<CStyleCastExpr>(E) &&
+ !isa<UnaryOperator>(E) &&
+ !isa<BinaryOperator>(E) &&
+ !isa<CXXOperatorCallExpr>(E));
+}
+
+bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
+ bool ForceComplain,
+ bool (*IsPlausibleResult)(QualType)) {
+ SourceLocation Loc = E.get()->getExprLoc();
+ SourceRange Range = E.get()->getSourceRange();
+
+ QualType ZeroArgCallTy;
+ UnresolvedSet<4> Overloads;
+ if (tryExprAsCall(*E.get(), ZeroArgCallTy, Overloads) &&
+ !ZeroArgCallTy.isNull() &&
+ (!IsPlausibleResult || IsPlausibleResult(ZeroArgCallTy))) {
+ // At this point, we know E is potentially callable with 0
+ // arguments and that it returns something of a reasonable type,
+ // so we can emit a fixit and carry on pretending that E was
+ // actually a CallExpr.
+ SourceLocation ParenInsertionLoc = getLocForEndOfToken(Range.getEnd());
+ Diag(Loc, PD)
+ << /*zero-arg*/ 1 << Range
+ << (IsCallableWithAppend(E.get())
+ ? FixItHint::CreateInsertion(ParenInsertionLoc, "()")
+ : FixItHint());
+ notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult);
+
+ // FIXME: Try this before emitting the fixit, and suppress diagnostics
+ // while doing so.
+ E = ActOnCallExpr(nullptr, E.get(), Range.getEnd(), None,
+ Range.getEnd().getLocWithOffset(1));
+ return true;
+ }
+
+ if (!ForceComplain) return false;
+
+ Diag(Loc, PD) << /*not zero-arg*/ 0 << Range;
+ notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult);
+ E = ExprError();
+ return true;
+}
+
+IdentifierInfo *Sema::getSuperIdentifier() const {
+ if (!Ident_super)
+ Ident_super = &Context.Idents.get("super");
+ return Ident_super;
+}
+
+IdentifierInfo *Sema::getFloat128Identifier() const {
+ if (!Ident___float128)
+ Ident___float128 = &Context.Idents.get("__float128");
+ return Ident___float128;
+}
+
+void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD,
+ CapturedRegionKind K) {
+ CapturingScopeInfo *CSI = new CapturedRegionScopeInfo(
+ getDiagnostics(), S, CD, RD, CD->getContextParam(), K);
+ CSI->ReturnType = Context.VoidTy;
+ FunctionScopes.push_back(CSI);
+}
+
+CapturedRegionScopeInfo *Sema::getCurCapturedRegion() {
+ if (FunctionScopes.empty())
+ return nullptr;
+
+ return dyn_cast<CapturedRegionScopeInfo>(FunctionScopes.back());
+}
+
+const llvm::MapVector<FieldDecl *, Sema::DeleteLocs> &
+Sema::getMismatchingDeleteExpressions() const {
+ return DeleteExprs;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp
new file mode 100644
index 0000000..e9772bc
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp
@@ -0,0 +1,1913 @@
+//===---- SemaAccess.cpp - C++ Access Control -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides Sema routines for C++ access control semantics.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclFriend.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DependentDiagnostic.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/Sema/DelayedDiagnostic.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
+
+using namespace clang;
+using namespace sema;
+
+/// A copy of Sema's enum without AR_delayed.
+enum AccessResult {
+ AR_accessible,
+ AR_inaccessible,
+ AR_dependent
+};
+
+/// SetMemberAccessSpecifier - Set the access specifier of a member.
+/// Returns true on error (when the previous member decl access specifier
+/// is different from the new member decl access specifier).
+bool Sema::SetMemberAccessSpecifier(NamedDecl *MemberDecl,
+ NamedDecl *PrevMemberDecl,
+ AccessSpecifier LexicalAS) {
+ if (!PrevMemberDecl) {
+ // Use the lexical access specifier.
+ MemberDecl->setAccess(LexicalAS);
+ return false;
+ }
+
+ // C++ [class.access.spec]p3: When a member is redeclared its access
+ // specifier must be same as its initial declaration.
+ if (LexicalAS != AS_none && LexicalAS != PrevMemberDecl->getAccess()) {
+ Diag(MemberDecl->getLocation(),
+ diag::err_class_redeclared_with_different_access)
+ << MemberDecl << LexicalAS;
+ Diag(PrevMemberDecl->getLocation(), diag::note_previous_access_declaration)
+ << PrevMemberDecl << PrevMemberDecl->getAccess();
+
+ MemberDecl->setAccess(LexicalAS);
+ return true;
+ }
+
+ MemberDecl->setAccess(PrevMemberDecl->getAccess());
+ return false;
+}
+
+static CXXRecordDecl *FindDeclaringClass(NamedDecl *D) {
+ DeclContext *DC = D->getDeclContext();
+
+ // This can only happen at top: enum decls only "publish" their
+ // immediate members.
+ if (isa<EnumDecl>(DC))
+ DC = cast<EnumDecl>(DC)->getDeclContext();
+
+ CXXRecordDecl *DeclaringClass = cast<CXXRecordDecl>(DC);
+ while (DeclaringClass->isAnonymousStructOrUnion())
+ DeclaringClass = cast<CXXRecordDecl>(DeclaringClass->getDeclContext());
+ return DeclaringClass;
+}
+
+namespace {
+struct EffectiveContext {
+ EffectiveContext() : Inner(nullptr), Dependent(false) {}
+
+ explicit EffectiveContext(DeclContext *DC)
+ : Inner(DC),
+ Dependent(DC->isDependentContext()) {
+
+ // C++11 [class.access.nest]p1:
+ // A nested class is a member and as such has the same access
+ // rights as any other member.
+ // C++11 [class.access]p2:
+ // A member of a class can also access all the names to which
+ // the class has access. A local class of a member function
+ // may access the same names that the member function itself
+ // may access.
+ // This almost implies that the privileges of nesting are transitive.
+ // Technically it says nothing about the local classes of non-member
+ // functions (which can gain privileges through friendship), but we
+ // take that as an oversight.
+ while (true) {
+ // We want to add canonical declarations to the EC lists for
+ // simplicity of checking, but we need to walk up through the
+ // actual current DC chain. Otherwise, something like a local
+ // extern or friend which happens to be the canonical
+ // declaration will really mess us up.
+
+ if (isa<CXXRecordDecl>(DC)) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(DC);
+ Records.push_back(Record->getCanonicalDecl());
+ DC = Record->getDeclContext();
+ } else if (isa<FunctionDecl>(DC)) {
+ FunctionDecl *Function = cast<FunctionDecl>(DC);
+ Functions.push_back(Function->getCanonicalDecl());
+ if (Function->getFriendObjectKind())
+ DC = Function->getLexicalDeclContext();
+ else
+ DC = Function->getDeclContext();
+ } else if (DC->isFileContext()) {
+ break;
+ } else {
+ DC = DC->getParent();
+ }
+ }
+ }
+
+ bool isDependent() const { return Dependent; }
+
+ bool includesClass(const CXXRecordDecl *R) const {
+ R = R->getCanonicalDecl();
+ return std::find(Records.begin(), Records.end(), R)
+ != Records.end();
+ }
+
+ /// Retrieves the innermost "useful" context. Can be null if we're
+ /// doing access-control without privileges.
+ DeclContext *getInnerContext() const {
+ return Inner;
+ }
+
+ typedef SmallVectorImpl<CXXRecordDecl*>::const_iterator record_iterator;
+
+ DeclContext *Inner;
+ SmallVector<FunctionDecl*, 4> Functions;
+ SmallVector<CXXRecordDecl*, 4> Records;
+ bool Dependent;
+};
+
+/// Like sema::AccessedEntity, but kindly lets us scribble all over
+/// it.
+struct AccessTarget : public AccessedEntity {
+ AccessTarget(const AccessedEntity &Entity)
+ : AccessedEntity(Entity) {
+ initialize();
+ }
+
+ AccessTarget(ASTContext &Context,
+ MemberNonce _,
+ CXXRecordDecl *NamingClass,
+ DeclAccessPair FoundDecl,
+ QualType BaseObjectType)
+ : AccessedEntity(Context.getDiagAllocator(), Member, NamingClass,
+ FoundDecl, BaseObjectType) {
+ initialize();
+ }
+
+ AccessTarget(ASTContext &Context,
+ BaseNonce _,
+ CXXRecordDecl *BaseClass,
+ CXXRecordDecl *DerivedClass,
+ AccessSpecifier Access)
+ : AccessedEntity(Context.getDiagAllocator(), Base, BaseClass, DerivedClass,
+ Access) {
+ initialize();
+ }
+
+ bool isInstanceMember() const {
+ return (isMemberAccess() && getTargetDecl()->isCXXInstanceMember());
+ }
+
+ bool hasInstanceContext() const {
+ return HasInstanceContext;
+ }
+
+ class SavedInstanceContext {
+ public:
+ SavedInstanceContext(SavedInstanceContext &&S)
+ : Target(S.Target), Has(S.Has) {
+ S.Target = nullptr;
+ }
+ ~SavedInstanceContext() {
+ if (Target)
+ Target->HasInstanceContext = Has;
+ }
+
+ private:
+ friend struct AccessTarget;
+ explicit SavedInstanceContext(AccessTarget &Target)
+ : Target(&Target), Has(Target.HasInstanceContext) {}
+ AccessTarget *Target;
+ bool Has;
+ };
+
+ SavedInstanceContext saveInstanceContext() {
+ return SavedInstanceContext(*this);
+ }
+
+ void suppressInstanceContext() {
+ HasInstanceContext = false;
+ }
+
+ const CXXRecordDecl *resolveInstanceContext(Sema &S) const {
+ assert(HasInstanceContext);
+ if (CalculatedInstanceContext)
+ return InstanceContext;
+
+ CalculatedInstanceContext = true;
+ DeclContext *IC = S.computeDeclContext(getBaseObjectType());
+ InstanceContext = (IC ? cast<CXXRecordDecl>(IC)->getCanonicalDecl()
+ : nullptr);
+ return InstanceContext;
+ }
+
+ const CXXRecordDecl *getDeclaringClass() const {
+ return DeclaringClass;
+ }
+
+ /// The "effective" naming class is the canonical non-anonymous
+ /// class containing the actual naming class.
+ const CXXRecordDecl *getEffectiveNamingClass() const {
+ const CXXRecordDecl *namingClass = getNamingClass();
+ while (namingClass->isAnonymousStructOrUnion())
+ namingClass = cast<CXXRecordDecl>(namingClass->getParent());
+ return namingClass->getCanonicalDecl();
+ }
+
+private:
+ void initialize() {
+ HasInstanceContext = (isMemberAccess() &&
+ !getBaseObjectType().isNull() &&
+ getTargetDecl()->isCXXInstanceMember());
+ CalculatedInstanceContext = false;
+ InstanceContext = nullptr;
+
+ if (isMemberAccess())
+ DeclaringClass = FindDeclaringClass(getTargetDecl());
+ else
+ DeclaringClass = getBaseClass();
+ DeclaringClass = DeclaringClass->getCanonicalDecl();
+ }
+
+ bool HasInstanceContext : 1;
+ mutable bool CalculatedInstanceContext : 1;
+ mutable const CXXRecordDecl *InstanceContext;
+ const CXXRecordDecl *DeclaringClass;
+};
+
+}
+
+/// Checks whether one class might instantiate to the other.
+static bool MightInstantiateTo(const CXXRecordDecl *From,
+ const CXXRecordDecl *To) {
+ // Declaration names are always preserved by instantiation.
+ if (From->getDeclName() != To->getDeclName())
+ return false;
+
+ const DeclContext *FromDC = From->getDeclContext()->getPrimaryContext();
+ const DeclContext *ToDC = To->getDeclContext()->getPrimaryContext();
+ if (FromDC == ToDC) return true;
+ if (FromDC->isFileContext() || ToDC->isFileContext()) return false;
+
+ // Be conservative.
+ return true;
+}
+
+/// Checks whether one class is derived from another, inclusively.
+/// Properly indicates when it couldn't be determined due to
+/// dependence.
+///
+/// This should probably be donated to AST or at least Sema.
+static AccessResult IsDerivedFromInclusive(const CXXRecordDecl *Derived,
+ const CXXRecordDecl *Target) {
+ assert(Derived->getCanonicalDecl() == Derived);
+ assert(Target->getCanonicalDecl() == Target);
+
+ if (Derived == Target) return AR_accessible;
+
+ bool CheckDependent = Derived->isDependentContext();
+ if (CheckDependent && MightInstantiateTo(Derived, Target))
+ return AR_dependent;
+
+ AccessResult OnFailure = AR_inaccessible;
+ SmallVector<const CXXRecordDecl*, 8> Queue; // actually a stack
+
+ while (true) {
+ if (Derived->isDependentContext() && !Derived->hasDefinition())
+ return AR_dependent;
+
+ for (const auto &I : Derived->bases()) {
+ const CXXRecordDecl *RD;
+
+ QualType T = I.getType();
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ RD = cast<CXXRecordDecl>(RT->getDecl());
+ } else if (const InjectedClassNameType *IT
+ = T->getAs<InjectedClassNameType>()) {
+ RD = IT->getDecl();
+ } else {
+ assert(T->isDependentType() && "non-dependent base wasn't a record?");
+ OnFailure = AR_dependent;
+ continue;
+ }
+
+ RD = RD->getCanonicalDecl();
+ if (RD == Target) return AR_accessible;
+ if (CheckDependent && MightInstantiateTo(RD, Target))
+ OnFailure = AR_dependent;
+
+ Queue.push_back(RD);
+ }
+
+ if (Queue.empty()) break;
+
+ Derived = Queue.pop_back_val();
+ }
+
+ return OnFailure;
+}
+
+
+static bool MightInstantiateTo(Sema &S, DeclContext *Context,
+ DeclContext *Friend) {
+ if (Friend == Context)
+ return true;
+
+ assert(!Friend->isDependentContext() &&
+ "can't handle friends with dependent contexts here");
+
+ if (!Context->isDependentContext())
+ return false;
+
+ if (Friend->isFileContext())
+ return false;
+
+ // TODO: this is very conservative
+ return true;
+}
+
+// Asks whether the type in 'context' can ever instantiate to the type
+// in 'friend'.
+static bool MightInstantiateTo(Sema &S, CanQualType Context, CanQualType Friend) {
+ if (Friend == Context)
+ return true;
+
+ if (!Friend->isDependentType() && !Context->isDependentType())
+ return false;
+
+ // TODO: this is very conservative.
+ return true;
+}
+
+static bool MightInstantiateTo(Sema &S,
+ FunctionDecl *Context,
+ FunctionDecl *Friend) {
+ if (Context->getDeclName() != Friend->getDeclName())
+ return false;
+
+ if (!MightInstantiateTo(S,
+ Context->getDeclContext(),
+ Friend->getDeclContext()))
+ return false;
+
+ CanQual<FunctionProtoType> FriendTy
+ = S.Context.getCanonicalType(Friend->getType())
+ ->getAs<FunctionProtoType>();
+ CanQual<FunctionProtoType> ContextTy
+ = S.Context.getCanonicalType(Context->getType())
+ ->getAs<FunctionProtoType>();
+
+ // There isn't any way that I know of to add qualifiers
+ // during instantiation.
+ if (FriendTy.getQualifiers() != ContextTy.getQualifiers())
+ return false;
+
+ if (FriendTy->getNumParams() != ContextTy->getNumParams())
+ return false;
+
+ if (!MightInstantiateTo(S, ContextTy->getReturnType(),
+ FriendTy->getReturnType()))
+ return false;
+
+ for (unsigned I = 0, E = FriendTy->getNumParams(); I != E; ++I)
+ if (!MightInstantiateTo(S, ContextTy->getParamType(I),
+ FriendTy->getParamType(I)))
+ return false;
+
+ return true;
+}
+
+static bool MightInstantiateTo(Sema &S,
+ FunctionTemplateDecl *Context,
+ FunctionTemplateDecl *Friend) {
+ return MightInstantiateTo(S,
+ Context->getTemplatedDecl(),
+ Friend->getTemplatedDecl());
+}
+
+static AccessResult MatchesFriend(Sema &S,
+ const EffectiveContext &EC,
+ const CXXRecordDecl *Friend) {
+ if (EC.includesClass(Friend))
+ return AR_accessible;
+
+ if (EC.isDependent()) {
+ CanQualType FriendTy
+ = S.Context.getCanonicalType(S.Context.getTypeDeclType(Friend));
+
+ for (EffectiveContext::record_iterator
+ I = EC.Records.begin(), E = EC.Records.end(); I != E; ++I) {
+ CanQualType ContextTy
+ = S.Context.getCanonicalType(S.Context.getTypeDeclType(*I));
+ if (MightInstantiateTo(S, ContextTy, FriendTy))
+ return AR_dependent;
+ }
+ }
+
+ return AR_inaccessible;
+}
+
+static AccessResult MatchesFriend(Sema &S,
+ const EffectiveContext &EC,
+ CanQualType Friend) {
+ if (const RecordType *RT = Friend->getAs<RecordType>())
+ return MatchesFriend(S, EC, cast<CXXRecordDecl>(RT->getDecl()));
+
+ // TODO: we can do better than this
+ if (Friend->isDependentType())
+ return AR_dependent;
+
+ return AR_inaccessible;
+}
+
+/// Determines whether the given friend class template matches
+/// anything in the effective context.
+static AccessResult MatchesFriend(Sema &S,
+ const EffectiveContext &EC,
+ ClassTemplateDecl *Friend) {
+ AccessResult OnFailure = AR_inaccessible;
+
+ // Check whether the friend is the template of a class in the
+ // context chain.
+ for (SmallVectorImpl<CXXRecordDecl*>::const_iterator
+ I = EC.Records.begin(), E = EC.Records.end(); I != E; ++I) {
+ CXXRecordDecl *Record = *I;
+
+ // Figure out whether the current class has a template:
+ ClassTemplateDecl *CTD;
+
+ // A specialization of the template...
+ if (isa<ClassTemplateSpecializationDecl>(Record)) {
+ CTD = cast<ClassTemplateSpecializationDecl>(Record)
+ ->getSpecializedTemplate();
+
+ // ... or the template pattern itself.
+ } else {
+ CTD = Record->getDescribedClassTemplate();
+ if (!CTD) continue;
+ }
+
+ // It's a match.
+ if (Friend == CTD->getCanonicalDecl())
+ return AR_accessible;
+
+ // If the context isn't dependent, it can't be a dependent match.
+ if (!EC.isDependent())
+ continue;
+
+ // If the template names don't match, it can't be a dependent
+ // match.
+ if (CTD->getDeclName() != Friend->getDeclName())
+ continue;
+
+ // If the class's context can't instantiate to the friend's
+ // context, it can't be a dependent match.
+ if (!MightInstantiateTo(S, CTD->getDeclContext(),
+ Friend->getDeclContext()))
+ continue;
+
+ // Otherwise, it's a dependent match.
+ OnFailure = AR_dependent;
+ }
+
+ return OnFailure;
+}
+
+/// Determines whether the given friend function matches anything in
+/// the effective context.
+static AccessResult MatchesFriend(Sema &S,
+ const EffectiveContext &EC,
+ FunctionDecl *Friend) {
+ AccessResult OnFailure = AR_inaccessible;
+
+ for (SmallVectorImpl<FunctionDecl*>::const_iterator
+ I = EC.Functions.begin(), E = EC.Functions.end(); I != E; ++I) {
+ if (Friend == *I)
+ return AR_accessible;
+
+ if (EC.isDependent() && MightInstantiateTo(S, *I, Friend))
+ OnFailure = AR_dependent;
+ }
+
+ return OnFailure;
+}
+
+/// Determines whether the given friend function template matches
+/// anything in the effective context.
+static AccessResult MatchesFriend(Sema &S,
+ const EffectiveContext &EC,
+ FunctionTemplateDecl *Friend) {
+ if (EC.Functions.empty()) return AR_inaccessible;
+
+ AccessResult OnFailure = AR_inaccessible;
+
+ for (SmallVectorImpl<FunctionDecl*>::const_iterator
+ I = EC.Functions.begin(), E = EC.Functions.end(); I != E; ++I) {
+
+ FunctionTemplateDecl *FTD = (*I)->getPrimaryTemplate();
+ if (!FTD)
+ FTD = (*I)->getDescribedFunctionTemplate();
+ if (!FTD)
+ continue;
+
+ FTD = FTD->getCanonicalDecl();
+
+ if (Friend == FTD)
+ return AR_accessible;
+
+ if (EC.isDependent() && MightInstantiateTo(S, FTD, Friend))
+ OnFailure = AR_dependent;
+ }
+
+ return OnFailure;
+}
+
+/// Determines whether the given friend declaration matches anything
+/// in the effective context.
+static AccessResult MatchesFriend(Sema &S,
+ const EffectiveContext &EC,
+ FriendDecl *FriendD) {
+ // Whitelist accesses if there's an invalid or unsupported friend
+ // declaration.
+ if (FriendD->isInvalidDecl() || FriendD->isUnsupportedFriend())
+ return AR_accessible;
+
+ if (TypeSourceInfo *T = FriendD->getFriendType())
+ return MatchesFriend(S, EC, T->getType()->getCanonicalTypeUnqualified());
+
+ NamedDecl *Friend
+ = cast<NamedDecl>(FriendD->getFriendDecl()->getCanonicalDecl());
+
+ // FIXME: declarations with dependent or templated scope.
+
+ if (isa<ClassTemplateDecl>(Friend))
+ return MatchesFriend(S, EC, cast<ClassTemplateDecl>(Friend));
+
+ if (isa<FunctionTemplateDecl>(Friend))
+ return MatchesFriend(S, EC, cast<FunctionTemplateDecl>(Friend));
+
+ if (isa<CXXRecordDecl>(Friend))
+ return MatchesFriend(S, EC, cast<CXXRecordDecl>(Friend));
+
+ assert(isa<FunctionDecl>(Friend) && "unknown friend decl kind");
+ return MatchesFriend(S, EC, cast<FunctionDecl>(Friend));
+}
+
+static AccessResult GetFriendKind(Sema &S,
+ const EffectiveContext &EC,
+ const CXXRecordDecl *Class) {
+ AccessResult OnFailure = AR_inaccessible;
+
+ // Okay, check friends.
+ for (auto *Friend : Class->friends()) {
+ switch (MatchesFriend(S, EC, Friend)) {
+ case AR_accessible:
+ return AR_accessible;
+
+ case AR_inaccessible:
+ continue;
+
+ case AR_dependent:
+ OnFailure = AR_dependent;
+ break;
+ }
+ }
+
+ // That's it, give up.
+ return OnFailure;
+}
+
+namespace {
+
+/// A helper class for checking for a friend which will grant access
+/// to a protected instance member.
+struct ProtectedFriendContext {
+ Sema &S;
+ const EffectiveContext &EC;
+ const CXXRecordDecl *NamingClass;
+ bool CheckDependent;
+ bool EverDependent;
+
+ /// The path down to the current base class.
+ SmallVector<const CXXRecordDecl*, 20> CurPath;
+
+ ProtectedFriendContext(Sema &S, const EffectiveContext &EC,
+ const CXXRecordDecl *InstanceContext,
+ const CXXRecordDecl *NamingClass)
+ : S(S), EC(EC), NamingClass(NamingClass),
+ CheckDependent(InstanceContext->isDependentContext() ||
+ NamingClass->isDependentContext()),
+ EverDependent(false) {}
+
+ /// Check classes in the current path for friendship, starting at
+ /// the given index.
+ bool checkFriendshipAlongPath(unsigned I) {
+ assert(I < CurPath.size());
+ for (unsigned E = CurPath.size(); I != E; ++I) {
+ switch (GetFriendKind(S, EC, CurPath[I])) {
+ case AR_accessible: return true;
+ case AR_inaccessible: continue;
+ case AR_dependent: EverDependent = true; continue;
+ }
+ }
+ return false;
+ }
+
+ /// Perform a search starting at the given class.
+ ///
+ /// PrivateDepth is the index of the last (least derived) class
+ /// along the current path such that a notional public member of
+ /// the final class in the path would have access in that class.
+ bool findFriendship(const CXXRecordDecl *Cur, unsigned PrivateDepth) {
+ // If we ever reach the naming class, check the current path for
+ // friendship. We can also stop recursing because we obviously
+ // won't find the naming class there again.
+ if (Cur == NamingClass)
+ return checkFriendshipAlongPath(PrivateDepth);
+
+ if (CheckDependent && MightInstantiateTo(Cur, NamingClass))
+ EverDependent = true;
+
+ // Recurse into the base classes.
+ for (const auto &I : Cur->bases()) {
+ // If this is private inheritance, then a public member of the
+ // base will not have any access in classes derived from Cur.
+ unsigned BasePrivateDepth = PrivateDepth;
+ if (I.getAccessSpecifier() == AS_private)
+ BasePrivateDepth = CurPath.size() - 1;
+
+ const CXXRecordDecl *RD;
+
+ QualType T = I.getType();
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ RD = cast<CXXRecordDecl>(RT->getDecl());
+ } else if (const InjectedClassNameType *IT
+ = T->getAs<InjectedClassNameType>()) {
+ RD = IT->getDecl();
+ } else {
+ assert(T->isDependentType() && "non-dependent base wasn't a record?");
+ EverDependent = true;
+ continue;
+ }
+
+ // Recurse. We don't need to clean up if this returns true.
+ CurPath.push_back(RD);
+ if (findFriendship(RD->getCanonicalDecl(), BasePrivateDepth))
+ return true;
+ CurPath.pop_back();
+ }
+
+ return false;
+ }
+
+ bool findFriendship(const CXXRecordDecl *Cur) {
+ assert(CurPath.empty());
+ CurPath.push_back(Cur);
+ return findFriendship(Cur, 0);
+ }
+};
+}
+
+/// Search for a class P that EC is a friend of, under the constraint
+/// InstanceContext <= P
+/// if InstanceContext exists, or else
+/// NamingClass <= P
+/// and with the additional restriction that a protected member of
+/// NamingClass would have some natural access in P, which implicitly
+/// imposes the constraint that P <= NamingClass.
+///
+/// This isn't quite the condition laid out in the standard.
+/// Instead of saying that a notional protected member of NamingClass
+/// would have to have some natural access in P, it says the actual
+/// target has to have some natural access in P, which opens up the
+/// possibility that the target (which is not necessarily a member
+/// of NamingClass) might be more accessible along some path not
+/// passing through it. That's really a bad idea, though, because it
+/// introduces two problems:
+/// - Most importantly, it breaks encapsulation because you can
+/// access a forbidden base class's members by directly subclassing
+/// it elsewhere.
+/// - It also makes access substantially harder to compute because it
+/// breaks the hill-climbing algorithm: knowing that the target is
+/// accessible in some base class would no longer let you change
+/// the question solely to whether the base class is accessible,
+/// because the original target might have been more accessible
+/// because of crazy subclassing.
+/// So we don't implement that.
+static AccessResult GetProtectedFriendKind(Sema &S, const EffectiveContext &EC,
+ const CXXRecordDecl *InstanceContext,
+ const CXXRecordDecl *NamingClass) {
+ assert(InstanceContext == nullptr ||
+ InstanceContext->getCanonicalDecl() == InstanceContext);
+ assert(NamingClass->getCanonicalDecl() == NamingClass);
+
+ // If we don't have an instance context, our constraints give us
+ // that NamingClass <= P <= NamingClass, i.e. P == NamingClass.
+ // This is just the usual friendship check.
+ if (!InstanceContext) return GetFriendKind(S, EC, NamingClass);
+
+ ProtectedFriendContext PRC(S, EC, InstanceContext, NamingClass);
+ if (PRC.findFriendship(InstanceContext)) return AR_accessible;
+ if (PRC.EverDependent) return AR_dependent;
+ return AR_inaccessible;
+}
+
+static AccessResult HasAccess(Sema &S,
+ const EffectiveContext &EC,
+ const CXXRecordDecl *NamingClass,
+ AccessSpecifier Access,
+ const AccessTarget &Target) {
+ assert(NamingClass->getCanonicalDecl() == NamingClass &&
+ "declaration should be canonicalized before being passed here");
+
+ if (Access == AS_public) return AR_accessible;
+ assert(Access == AS_private || Access == AS_protected);
+
+ AccessResult OnFailure = AR_inaccessible;
+
+ for (EffectiveContext::record_iterator
+ I = EC.Records.begin(), E = EC.Records.end(); I != E; ++I) {
+ // All the declarations in EC have been canonicalized, so pointer
+ // equality from this point on will work fine.
+ const CXXRecordDecl *ECRecord = *I;
+
+ // [B2] and [M2]
+ if (Access == AS_private) {
+ if (ECRecord == NamingClass)
+ return AR_accessible;
+
+ if (EC.isDependent() && MightInstantiateTo(ECRecord, NamingClass))
+ OnFailure = AR_dependent;
+
+ // [B3] and [M3]
+ } else {
+ assert(Access == AS_protected);
+ switch (IsDerivedFromInclusive(ECRecord, NamingClass)) {
+ case AR_accessible: break;
+ case AR_inaccessible: continue;
+ case AR_dependent: OnFailure = AR_dependent; continue;
+ }
+
+ // C++ [class.protected]p1:
+ // An additional access check beyond those described earlier in
+ // [class.access] is applied when a non-static data member or
+ // non-static member function is a protected member of its naming
+ // class. As described earlier, access to a protected member is
+ // granted because the reference occurs in a friend or member of
+ // some class C. If the access is to form a pointer to member,
+ // the nested-name-specifier shall name C or a class derived from
+ // C. All other accesses involve a (possibly implicit) object
+ // expression. In this case, the class of the object expression
+ // shall be C or a class derived from C.
+ //
+ // We interpret this as a restriction on [M3].
+
+ // In this part of the code, 'C' is just our context class ECRecord.
+
+ // These rules are different if we don't have an instance context.
+ if (!Target.hasInstanceContext()) {
+ // If it's not an instance member, these restrictions don't apply.
+ if (!Target.isInstanceMember()) return AR_accessible;
+
+ // If it's an instance member, use the pointer-to-member rule
+ // that the naming class has to be derived from the effective
+ // context.
+
+ // Emulate a MSVC bug where the creation of pointer-to-member
+ // to protected member of base class is allowed but only from
+ // static member functions.
+ if (S.getLangOpts().MSVCCompat && !EC.Functions.empty())
+ if (CXXMethodDecl* MD = dyn_cast<CXXMethodDecl>(EC.Functions.front()))
+ if (MD->isStatic()) return AR_accessible;
+
+ // Despite the standard's confident wording, there is a case
+ // where you can have an instance member that's neither in a
+ // pointer-to-member expression nor in a member access: when
+ // it names a field in an unevaluated context that can't be an
+ // implicit member. Pending clarification, we just apply the
+ // same naming-class restriction here.
+ // FIXME: we're probably not correctly adding the
+ // protected-member restriction when we retroactively convert
+ // an expression to being evaluated.
+
+ // We know that ECRecord derives from NamingClass. The
+ // restriction says to check whether NamingClass derives from
+ // ECRecord, but that's not really necessary: two distinct
+ // classes can't be recursively derived from each other. So
+ // along this path, we just need to check whether the classes
+ // are equal.
+ if (NamingClass == ECRecord) return AR_accessible;
+
+ // Otherwise, this context class tells us nothing; on to the next.
+ continue;
+ }
+
+ assert(Target.isInstanceMember());
+
+ const CXXRecordDecl *InstanceContext = Target.resolveInstanceContext(S);
+ if (!InstanceContext) {
+ OnFailure = AR_dependent;
+ continue;
+ }
+
+ switch (IsDerivedFromInclusive(InstanceContext, ECRecord)) {
+ case AR_accessible: return AR_accessible;
+ case AR_inaccessible: continue;
+ case AR_dependent: OnFailure = AR_dependent; continue;
+ }
+ }
+ }
+
+ // [M3] and [B3] say that, if the target is protected in N, we grant
+ // access if the access occurs in a friend or member of some class P
+ // that's a subclass of N and where the target has some natural
+ // access in P. The 'member' aspect is easy to handle because P
+ // would necessarily be one of the effective-context records, and we
+ // address that above. The 'friend' aspect is completely ridiculous
+ // to implement because there are no restrictions at all on P
+ // *unless* the [class.protected] restriction applies. If it does,
+ // however, we should ignore whether the naming class is a friend,
+ // and instead rely on whether any potential P is a friend.
+ if (Access == AS_protected && Target.isInstanceMember()) {
+ // Compute the instance context if possible.
+ const CXXRecordDecl *InstanceContext = nullptr;
+ if (Target.hasInstanceContext()) {
+ InstanceContext = Target.resolveInstanceContext(S);
+ if (!InstanceContext) return AR_dependent;
+ }
+
+ switch (GetProtectedFriendKind(S, EC, InstanceContext, NamingClass)) {
+ case AR_accessible: return AR_accessible;
+ case AR_inaccessible: return OnFailure;
+ case AR_dependent: return AR_dependent;
+ }
+ llvm_unreachable("impossible friendship kind");
+ }
+
+ switch (GetFriendKind(S, EC, NamingClass)) {
+ case AR_accessible: return AR_accessible;
+ case AR_inaccessible: return OnFailure;
+ case AR_dependent: return AR_dependent;
+ }
+
+ // Silence bogus warnings
+ llvm_unreachable("impossible friendship kind");
+}
+
+/// Finds the best path from the naming class to the declaring class,
+/// taking friend declarations into account.
+///
+/// C++0x [class.access.base]p5:
+/// A member m is accessible at the point R when named in class N if
+/// [M1] m as a member of N is public, or
+/// [M2] m as a member of N is private, and R occurs in a member or
+/// friend of class N, or
+/// [M3] m as a member of N is protected, and R occurs in a member or
+/// friend of class N, or in a member or friend of a class P
+/// derived from N, where m as a member of P is public, private,
+/// or protected, or
+/// [M4] there exists a base class B of N that is accessible at R, and
+/// m is accessible at R when named in class B.
+///
+/// C++0x [class.access.base]p4:
+/// A base class B of N is accessible at R, if
+/// [B1] an invented public member of B would be a public member of N, or
+/// [B2] R occurs in a member or friend of class N, and an invented public
+/// member of B would be a private or protected member of N, or
+/// [B3] R occurs in a member or friend of a class P derived from N, and an
+/// invented public member of B would be a private or protected member
+/// of P, or
+/// [B4] there exists a class S such that B is a base class of S accessible
+/// at R and S is a base class of N accessible at R.
+///
+/// Along a single inheritance path we can restate both of these
+/// iteratively:
+///
+/// First, we note that M1-4 are equivalent to B1-4 if the member is
+/// treated as a notional base of its declaring class with inheritance
+/// access equivalent to the member's access. Therefore we need only
+/// ask whether a class B is accessible from a class N in context R.
+///
+/// Let B_1 .. B_n be the inheritance path in question (i.e. where
+/// B_1 = N, B_n = B, and for all i, B_{i+1} is a direct base class of
+/// B_i). For i in 1..n, we will calculate ACAB(i), the access to the
+/// closest accessible base in the path:
+/// Access(a, b) = (* access on the base specifier from a to b *)
+/// Merge(a, forbidden) = forbidden
+/// Merge(a, private) = forbidden
+/// Merge(a, b) = min(a,b)
+/// Accessible(c, forbidden) = false
+/// Accessible(c, private) = (R is c) || IsFriend(c, R)
+/// Accessible(c, protected) = (R derived from c) || IsFriend(c, R)
+/// Accessible(c, public) = true
+/// ACAB(n) = public
+/// ACAB(i) =
+/// let AccessToBase = Merge(Access(B_i, B_{i+1}), ACAB(i+1)) in
+/// if Accessible(B_i, AccessToBase) then public else AccessToBase
+///
+/// B is an accessible base of N at R iff ACAB(1) = public.
+///
+/// \param FinalAccess the access of the "final step", or AS_public if
+/// there is no final step.
+/// \return null if friendship is dependent
+static CXXBasePath *FindBestPath(Sema &S,
+ const EffectiveContext &EC,
+ AccessTarget &Target,
+ AccessSpecifier FinalAccess,
+ CXXBasePaths &Paths) {
+ // Derive the paths to the desired base.
+ const CXXRecordDecl *Derived = Target.getNamingClass();
+ const CXXRecordDecl *Base = Target.getDeclaringClass();
+
+ // FIXME: fail correctly when there are dependent paths.
+ bool isDerived = Derived->isDerivedFrom(const_cast<CXXRecordDecl*>(Base),
+ Paths);
+ assert(isDerived && "derived class not actually derived from base");
+ (void) isDerived;
+
+ CXXBasePath *BestPath = nullptr;
+
+ assert(FinalAccess != AS_none && "forbidden access after declaring class");
+
+ bool AnyDependent = false;
+
+ // Derive the friend-modified access along each path.
+ for (CXXBasePaths::paths_iterator PI = Paths.begin(), PE = Paths.end();
+ PI != PE; ++PI) {
+ AccessTarget::SavedInstanceContext _ = Target.saveInstanceContext();
+
+ // Walk through the path backwards.
+ AccessSpecifier PathAccess = FinalAccess;
+ CXXBasePath::iterator I = PI->end(), E = PI->begin();
+ while (I != E) {
+ --I;
+
+ assert(PathAccess != AS_none);
+
+ // If the declaration is a private member of a base class, there
+ // is no level of friendship in derived classes that can make it
+ // accessible.
+ if (PathAccess == AS_private) {
+ PathAccess = AS_none;
+ break;
+ }
+
+ const CXXRecordDecl *NC = I->Class->getCanonicalDecl();
+
+ AccessSpecifier BaseAccess = I->Base->getAccessSpecifier();
+ PathAccess = std::max(PathAccess, BaseAccess);
+
+ switch (HasAccess(S, EC, NC, PathAccess, Target)) {
+ case AR_inaccessible: break;
+ case AR_accessible:
+ PathAccess = AS_public;
+
+ // Future tests are not against members and so do not have
+ // instance context.
+ Target.suppressInstanceContext();
+ break;
+ case AR_dependent:
+ AnyDependent = true;
+ goto Next;
+ }
+ }
+
+ // Note that we modify the path's Access field to the
+ // friend-modified access.
+ if (BestPath == nullptr || PathAccess < BestPath->Access) {
+ BestPath = &*PI;
+ BestPath->Access = PathAccess;
+
+ // Short-circuit if we found a public path.
+ if (BestPath->Access == AS_public)
+ return BestPath;
+ }
+
+ Next: ;
+ }
+
+ assert((!BestPath || BestPath->Access != AS_public) &&
+ "fell out of loop with public path");
+
+ // We didn't find a public path, but at least one path was subject
+ // to dependent friendship, so delay the check.
+ if (AnyDependent)
+ return nullptr;
+
+ return BestPath;
+}
+
+/// Given that an entity has protected natural access, check whether
+/// access might be denied because of the protected member access
+/// restriction.
+///
+/// \return true if a note was emitted
+static bool TryDiagnoseProtectedAccess(Sema &S, const EffectiveContext &EC,
+ AccessTarget &Target) {
+ // Only applies to instance accesses.
+ if (!Target.isInstanceMember())
+ return false;
+
+ assert(Target.isMemberAccess());
+
+ const CXXRecordDecl *NamingClass = Target.getEffectiveNamingClass();
+
+ for (EffectiveContext::record_iterator
+ I = EC.Records.begin(), E = EC.Records.end(); I != E; ++I) {
+ const CXXRecordDecl *ECRecord = *I;
+ switch (IsDerivedFromInclusive(ECRecord, NamingClass)) {
+ case AR_accessible: break;
+ case AR_inaccessible: continue;
+ case AR_dependent: continue;
+ }
+
+ // The effective context is a subclass of the declaring class.
+ // Check whether the [class.protected] restriction is limiting
+ // access.
+
+ // To get this exactly right, this might need to be checked more
+ // holistically; it's not necessarily the case that gaining
+ // access here would grant us access overall.
+
+ NamedDecl *D = Target.getTargetDecl();
+
+ // If we don't have an instance context, [class.protected] says the
+ // naming class has to equal the context class.
+ if (!Target.hasInstanceContext()) {
+ // If it does, the restriction doesn't apply.
+ if (NamingClass == ECRecord) continue;
+
+ // TODO: it would be great to have a fixit here, since this is
+ // such an obvious error.
+ S.Diag(D->getLocation(), diag::note_access_protected_restricted_noobject)
+ << S.Context.getTypeDeclType(ECRecord);
+ return true;
+ }
+
+ const CXXRecordDecl *InstanceContext = Target.resolveInstanceContext(S);
+ assert(InstanceContext && "diagnosing dependent access");
+
+ switch (IsDerivedFromInclusive(InstanceContext, ECRecord)) {
+ case AR_accessible: continue;
+ case AR_dependent: continue;
+ case AR_inaccessible:
+ break;
+ }
+
+ // Okay, the restriction seems to be what's limiting us.
+
+ // Use a special diagnostic for constructors and destructors.
+ if (isa<CXXConstructorDecl>(D) || isa<CXXDestructorDecl>(D) ||
+ (isa<FunctionTemplateDecl>(D) &&
+ isa<CXXConstructorDecl>(
+ cast<FunctionTemplateDecl>(D)->getTemplatedDecl()))) {
+ return S.Diag(D->getLocation(),
+ diag::note_access_protected_restricted_ctordtor)
+ << isa<CXXDestructorDecl>(D->getAsFunction());
+ }
+
+ // Otherwise, use the generic diagnostic.
+ return S.Diag(D->getLocation(),
+ diag::note_access_protected_restricted_object)
+ << S.Context.getTypeDeclType(ECRecord);
+ }
+
+ return false;
+}
+
+/// We are unable to access a given declaration due to its direct
+/// access control; diagnose that.
+static void diagnoseBadDirectAccess(Sema &S,
+ const EffectiveContext &EC,
+ AccessTarget &entity) {
+ assert(entity.isMemberAccess());
+ NamedDecl *D = entity.getTargetDecl();
+
+ if (D->getAccess() == AS_protected &&
+ TryDiagnoseProtectedAccess(S, EC, entity))
+ return;
+
+ // Find an original declaration.
+ while (D->isOutOfLine()) {
+ NamedDecl *PrevDecl = nullptr;
+ if (VarDecl *VD = dyn_cast<VarDecl>(D))
+ PrevDecl = VD->getPreviousDecl();
+ else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ PrevDecl = FD->getPreviousDecl();
+ else if (TypedefNameDecl *TND = dyn_cast<TypedefNameDecl>(D))
+ PrevDecl = TND->getPreviousDecl();
+ else if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
+ if (isa<RecordDecl>(D) && cast<RecordDecl>(D)->isInjectedClassName())
+ break;
+ PrevDecl = TD->getPreviousDecl();
+ }
+ if (!PrevDecl) break;
+ D = PrevDecl;
+ }
+
+ CXXRecordDecl *DeclaringClass = FindDeclaringClass(D);
+ Decl *ImmediateChild;
+ if (D->getDeclContext() == DeclaringClass)
+ ImmediateChild = D;
+ else {
+ DeclContext *DC = D->getDeclContext();
+ while (DC->getParent() != DeclaringClass)
+ DC = DC->getParent();
+ ImmediateChild = cast<Decl>(DC);
+ }
+
+ // Check whether there's an AccessSpecDecl preceding this in the
+ // chain of the DeclContext.
+ bool isImplicit = true;
+ for (const auto *I : DeclaringClass->decls()) {
+ if (I == ImmediateChild) break;
+ if (isa<AccessSpecDecl>(I)) {
+ isImplicit = false;
+ break;
+ }
+ }
+
+ S.Diag(D->getLocation(), diag::note_access_natural)
+ << (unsigned) (D->getAccess() == AS_protected)
+ << isImplicit;
+}
+
+/// Diagnose the path which caused the given declaration or base class
+/// to become inaccessible.
+static void DiagnoseAccessPath(Sema &S,
+ const EffectiveContext &EC,
+ AccessTarget &entity) {
+ // Save the instance context to preserve invariants.
+ AccessTarget::SavedInstanceContext _ = entity.saveInstanceContext();
+
+ // This basically repeats the main algorithm but keeps some more
+ // information.
+
+ // The natural access so far.
+ AccessSpecifier accessSoFar = AS_public;
+
+ // Check whether we have special rights to the declaring class.
+ if (entity.isMemberAccess()) {
+ NamedDecl *D = entity.getTargetDecl();
+ accessSoFar = D->getAccess();
+ const CXXRecordDecl *declaringClass = entity.getDeclaringClass();
+
+ switch (HasAccess(S, EC, declaringClass, accessSoFar, entity)) {
+ // If the declaration is accessible when named in its declaring
+ // class, then we must be constrained by the path.
+ case AR_accessible:
+ accessSoFar = AS_public;
+ entity.suppressInstanceContext();
+ break;
+
+ case AR_inaccessible:
+ if (accessSoFar == AS_private ||
+ declaringClass == entity.getEffectiveNamingClass())
+ return diagnoseBadDirectAccess(S, EC, entity);
+ break;
+
+ case AR_dependent:
+ llvm_unreachable("cannot diagnose dependent access");
+ }
+ }
+
+ CXXBasePaths paths;
+ CXXBasePath &path = *FindBestPath(S, EC, entity, accessSoFar, paths);
+ assert(path.Access != AS_public);
+
+ CXXBasePath::iterator i = path.end(), e = path.begin();
+ CXXBasePath::iterator constrainingBase = i;
+ while (i != e) {
+ --i;
+
+ assert(accessSoFar != AS_none && accessSoFar != AS_private);
+
+ // Is the entity accessible when named in the deriving class, as
+ // modified by the base specifier?
+ const CXXRecordDecl *derivingClass = i->Class->getCanonicalDecl();
+ const CXXBaseSpecifier *base = i->Base;
+
+ // If the access to this base is worse than the access we have to
+ // the declaration, remember it.
+ AccessSpecifier baseAccess = base->getAccessSpecifier();
+ if (baseAccess > accessSoFar) {
+ constrainingBase = i;
+ accessSoFar = baseAccess;
+ }
+
+ switch (HasAccess(S, EC, derivingClass, accessSoFar, entity)) {
+ case AR_inaccessible: break;
+ case AR_accessible:
+ accessSoFar = AS_public;
+ entity.suppressInstanceContext();
+ constrainingBase = nullptr;
+ break;
+ case AR_dependent:
+ llvm_unreachable("cannot diagnose dependent access");
+ }
+
+ // If this was private inheritance, but we don't have access to
+ // the deriving class, we're done.
+ if (accessSoFar == AS_private) {
+ assert(baseAccess == AS_private);
+ assert(constrainingBase == i);
+ break;
+ }
+ }
+
+ // If we don't have a constraining base, the access failure must be
+ // due to the original declaration.
+ if (constrainingBase == path.end())
+ return diagnoseBadDirectAccess(S, EC, entity);
+
+ // We're constrained by inheritance, but we want to say
+ // "declared private here" if we're diagnosing a hierarchy
+ // conversion and this is the final step.
+ unsigned diagnostic;
+ if (entity.isMemberAccess() ||
+ constrainingBase + 1 != path.end()) {
+ diagnostic = diag::note_access_constrained_by_path;
+ } else {
+ diagnostic = diag::note_access_natural;
+ }
+
+ const CXXBaseSpecifier *base = constrainingBase->Base;
+
+ S.Diag(base->getSourceRange().getBegin(), diagnostic)
+ << base->getSourceRange()
+ << (base->getAccessSpecifier() == AS_protected)
+ << (base->getAccessSpecifierAsWritten() == AS_none);
+
+ if (entity.isMemberAccess())
+ S.Diag(entity.getTargetDecl()->getLocation(),
+ diag::note_member_declared_at);
+}
+
+static void DiagnoseBadAccess(Sema &S, SourceLocation Loc,
+ const EffectiveContext &EC,
+ AccessTarget &Entity) {
+ const CXXRecordDecl *NamingClass = Entity.getNamingClass();
+ const CXXRecordDecl *DeclaringClass = Entity.getDeclaringClass();
+ NamedDecl *D = (Entity.isMemberAccess() ? Entity.getTargetDecl() : nullptr);
+
+ S.Diag(Loc, Entity.getDiag())
+ << (Entity.getAccess() == AS_protected)
+ << (D ? D->getDeclName() : DeclarationName())
+ << S.Context.getTypeDeclType(NamingClass)
+ << S.Context.getTypeDeclType(DeclaringClass);
+ DiagnoseAccessPath(S, EC, Entity);
+}
+
+/// MSVC has a bug where if during an using declaration name lookup,
+/// the declaration found is unaccessible (private) and that declaration
+/// was bring into scope via another using declaration whose target
+/// declaration is accessible (public) then no error is generated.
+/// Example:
+/// class A {
+/// public:
+/// int f();
+/// };
+/// class B : public A {
+/// private:
+/// using A::f;
+/// };
+/// class C : public B {
+/// private:
+/// using B::f;
+/// };
+///
+/// Here, B::f is private so this should fail in Standard C++, but
+/// because B::f refers to A::f which is public MSVC accepts it.
+static bool IsMicrosoftUsingDeclarationAccessBug(Sema& S,
+ SourceLocation AccessLoc,
+ AccessTarget &Entity) {
+ if (UsingShadowDecl *Shadow =
+ dyn_cast<UsingShadowDecl>(Entity.getTargetDecl())) {
+ const NamedDecl *OrigDecl = Entity.getTargetDecl()->getUnderlyingDecl();
+ if (Entity.getTargetDecl()->getAccess() == AS_private &&
+ (OrigDecl->getAccess() == AS_public ||
+ OrigDecl->getAccess() == AS_protected)) {
+ S.Diag(AccessLoc, diag::ext_ms_using_declaration_inaccessible)
+ << Shadow->getUsingDecl()->getQualifiedNameAsString()
+ << OrigDecl->getQualifiedNameAsString();
+ return true;
+ }
+ }
+ return false;
+}
+
+/// Determines whether the accessed entity is accessible. Public members
+/// have been weeded out by this point.
+static AccessResult IsAccessible(Sema &S,
+ const EffectiveContext &EC,
+ AccessTarget &Entity) {
+ // Determine the actual naming class.
+ const CXXRecordDecl *NamingClass = Entity.getEffectiveNamingClass();
+
+ AccessSpecifier UnprivilegedAccess = Entity.getAccess();
+ assert(UnprivilegedAccess != AS_public && "public access not weeded out");
+
+ // Before we try to recalculate access paths, try to white-list
+ // accesses which just trade in on the final step, i.e. accesses
+ // which don't require [M4] or [B4]. These are by far the most
+ // common forms of privileged access.
+ if (UnprivilegedAccess != AS_none) {
+ switch (HasAccess(S, EC, NamingClass, UnprivilegedAccess, Entity)) {
+ case AR_dependent:
+ // This is actually an interesting policy decision. We don't
+ // *have* to delay immediately here: we can do the full access
+ // calculation in the hope that friendship on some intermediate
+ // class will make the declaration accessible non-dependently.
+ // But that's not cheap, and odds are very good (note: assertion
+ // made without data) that the friend declaration will determine
+ // access.
+ return AR_dependent;
+
+ case AR_accessible: return AR_accessible;
+ case AR_inaccessible: break;
+ }
+ }
+
+ AccessTarget::SavedInstanceContext _ = Entity.saveInstanceContext();
+
+ // We lower member accesses to base accesses by pretending that the
+ // member is a base class of its declaring class.
+ AccessSpecifier FinalAccess;
+
+ if (Entity.isMemberAccess()) {
+ // Determine if the declaration is accessible from EC when named
+ // in its declaring class.
+ NamedDecl *Target = Entity.getTargetDecl();
+ const CXXRecordDecl *DeclaringClass = Entity.getDeclaringClass();
+
+ FinalAccess = Target->getAccess();
+ switch (HasAccess(S, EC, DeclaringClass, FinalAccess, Entity)) {
+ case AR_accessible:
+ // Target is accessible at EC when named in its declaring class.
+ // We can now hill-climb and simply check whether the declaring
+ // class is accessible as a base of the naming class. This is
+ // equivalent to checking the access of a notional public
+ // member with no instance context.
+ FinalAccess = AS_public;
+ Entity.suppressInstanceContext();
+ break;
+ case AR_inaccessible: break;
+ case AR_dependent: return AR_dependent; // see above
+ }
+
+ if (DeclaringClass == NamingClass)
+ return (FinalAccess == AS_public ? AR_accessible : AR_inaccessible);
+ } else {
+ FinalAccess = AS_public;
+ }
+
+ assert(Entity.getDeclaringClass() != NamingClass);
+
+ // Append the declaration's access if applicable.
+ CXXBasePaths Paths;
+ CXXBasePath *Path = FindBestPath(S, EC, Entity, FinalAccess, Paths);
+ if (!Path)
+ return AR_dependent;
+
+ assert(Path->Access <= UnprivilegedAccess &&
+ "access along best path worse than direct?");
+ if (Path->Access == AS_public)
+ return AR_accessible;
+ return AR_inaccessible;
+}
+
+static void DelayDependentAccess(Sema &S,
+ const EffectiveContext &EC,
+ SourceLocation Loc,
+ const AccessTarget &Entity) {
+ assert(EC.isDependent() && "delaying non-dependent access");
+ DeclContext *DC = EC.getInnerContext();
+ assert(DC->isDependentContext() && "delaying non-dependent access");
+ DependentDiagnostic::Create(S.Context, DC, DependentDiagnostic::Access,
+ Loc,
+ Entity.isMemberAccess(),
+ Entity.getAccess(),
+ Entity.getTargetDecl(),
+ Entity.getNamingClass(),
+ Entity.getBaseObjectType(),
+ Entity.getDiag());
+}
+
+/// Checks access to an entity from the given effective context.
+static AccessResult CheckEffectiveAccess(Sema &S,
+ const EffectiveContext &EC,
+ SourceLocation Loc,
+ AccessTarget &Entity) {
+ assert(Entity.getAccess() != AS_public && "called for public access!");
+
+ switch (IsAccessible(S, EC, Entity)) {
+ case AR_dependent:
+ DelayDependentAccess(S, EC, Loc, Entity);
+ return AR_dependent;
+
+ case AR_inaccessible:
+ if (S.getLangOpts().MSVCCompat &&
+ IsMicrosoftUsingDeclarationAccessBug(S, Loc, Entity))
+ return AR_accessible;
+ if (!Entity.isQuiet())
+ DiagnoseBadAccess(S, Loc, EC, Entity);
+ return AR_inaccessible;
+
+ case AR_accessible:
+ return AR_accessible;
+ }
+
+ // silence unnecessary warning
+ llvm_unreachable("invalid access result");
+}
+
+static Sema::AccessResult CheckAccess(Sema &S, SourceLocation Loc,
+ AccessTarget &Entity) {
+ // If the access path is public, it's accessible everywhere.
+ if (Entity.getAccess() == AS_public)
+ return Sema::AR_accessible;
+
+ // If we're currently parsing a declaration, we may need to delay
+ // access control checking, because our effective context might be
+ // different based on what the declaration comes out as.
+ //
+ // For example, we might be parsing a declaration with a scope
+ // specifier, like this:
+ // A::private_type A::foo() { ... }
+ //
+ // Or we might be parsing something that will turn out to be a friend:
+ // void foo(A::private_type);
+ // void B::foo(A::private_type);
+ if (S.DelayedDiagnostics.shouldDelayDiagnostics()) {
+ S.DelayedDiagnostics.add(DelayedDiagnostic::makeAccess(Loc, Entity));
+ return Sema::AR_delayed;
+ }
+
+ EffectiveContext EC(S.CurContext);
+ switch (CheckEffectiveAccess(S, EC, Loc, Entity)) {
+ case AR_accessible: return Sema::AR_accessible;
+ case AR_inaccessible: return Sema::AR_inaccessible;
+ case AR_dependent: return Sema::AR_dependent;
+ }
+ llvm_unreachable("invalid access result");
+}
+
+void Sema::HandleDelayedAccessCheck(DelayedDiagnostic &DD, Decl *D) {
+ // Access control for names used in the declarations of functions
+ // and function templates should normally be evaluated in the context
+ // of the declaration, just in case it's a friend of something.
+ // However, this does not apply to local extern declarations.
+
+ DeclContext *DC = D->getDeclContext();
+ if (D->isLocalExternDecl()) {
+ DC = D->getLexicalDeclContext();
+ } else if (FunctionDecl *FN = dyn_cast<FunctionDecl>(D)) {
+ DC = FN;
+ } else if (TemplateDecl *TD = dyn_cast<TemplateDecl>(D)) {
+ DC = cast<DeclContext>(TD->getTemplatedDecl());
+ }
+
+ EffectiveContext EC(DC);
+
+ AccessTarget Target(DD.getAccessData());
+
+ if (CheckEffectiveAccess(*this, EC, DD.Loc, Target) == ::AR_inaccessible)
+ DD.Triggered = true;
+}
+
+void Sema::HandleDependentAccessCheck(const DependentDiagnostic &DD,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+ SourceLocation Loc = DD.getAccessLoc();
+ AccessSpecifier Access = DD.getAccess();
+
+ Decl *NamingD = FindInstantiatedDecl(Loc, DD.getAccessNamingClass(),
+ TemplateArgs);
+ if (!NamingD) return;
+ Decl *TargetD = FindInstantiatedDecl(Loc, DD.getAccessTarget(),
+ TemplateArgs);
+ if (!TargetD) return;
+
+ if (DD.isAccessToMember()) {
+ CXXRecordDecl *NamingClass = cast<CXXRecordDecl>(NamingD);
+ NamedDecl *TargetDecl = cast<NamedDecl>(TargetD);
+ QualType BaseObjectType = DD.getAccessBaseObjectType();
+ if (!BaseObjectType.isNull()) {
+ BaseObjectType = SubstType(BaseObjectType, TemplateArgs, Loc,
+ DeclarationName());
+ if (BaseObjectType.isNull()) return;
+ }
+
+ AccessTarget Entity(Context,
+ AccessTarget::Member,
+ NamingClass,
+ DeclAccessPair::make(TargetDecl, Access),
+ BaseObjectType);
+ Entity.setDiag(DD.getDiagnostic());
+ CheckAccess(*this, Loc, Entity);
+ } else {
+ AccessTarget Entity(Context,
+ AccessTarget::Base,
+ cast<CXXRecordDecl>(TargetD),
+ cast<CXXRecordDecl>(NamingD),
+ Access);
+ Entity.setDiag(DD.getDiagnostic());
+ CheckAccess(*this, Loc, Entity);
+ }
+}
+
+Sema::AccessResult Sema::CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
+ DeclAccessPair Found) {
+ if (!getLangOpts().AccessControl ||
+ !E->getNamingClass() ||
+ Found.getAccess() == AS_public)
+ return AR_accessible;
+
+ AccessTarget Entity(Context, AccessTarget::Member, E->getNamingClass(),
+ Found, QualType());
+ Entity.setDiag(diag::err_access) << E->getSourceRange();
+
+ return CheckAccess(*this, E->getNameLoc(), Entity);
+}
+
+/// Perform access-control checking on a previously-unresolved member
+/// access which has now been resolved to a member.
+Sema::AccessResult Sema::CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
+ DeclAccessPair Found) {
+ if (!getLangOpts().AccessControl ||
+ Found.getAccess() == AS_public)
+ return AR_accessible;
+
+ QualType BaseType = E->getBaseType();
+ if (E->isArrow())
+ BaseType = BaseType->getAs<PointerType>()->getPointeeType();
+
+ AccessTarget Entity(Context, AccessTarget::Member, E->getNamingClass(),
+ Found, BaseType);
+ Entity.setDiag(diag::err_access) << E->getSourceRange();
+
+ return CheckAccess(*this, E->getMemberLoc(), Entity);
+}
+
+/// Is the given special member function accessible for the purposes of
+/// deciding whether to define a special member function as deleted?
+bool Sema::isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
+ AccessSpecifier access,
+ QualType objectType) {
+ // Fast path.
+ if (access == AS_public || !getLangOpts().AccessControl) return true;
+
+ AccessTarget entity(Context, AccessTarget::Member, decl->getParent(),
+ DeclAccessPair::make(decl, access), objectType);
+
+ // Suppress diagnostics.
+ entity.setDiag(PDiag());
+
+ switch (CheckAccess(*this, SourceLocation(), entity)) {
+ case AR_accessible: return true;
+ case AR_inaccessible: return false;
+ case AR_dependent: llvm_unreachable("dependent for =delete computation");
+ case AR_delayed: llvm_unreachable("cannot delay =delete computation");
+ }
+ llvm_unreachable("bad access result");
+}
+
+Sema::AccessResult Sema::CheckDestructorAccess(SourceLocation Loc,
+ CXXDestructorDecl *Dtor,
+ const PartialDiagnostic &PDiag,
+ QualType ObjectTy) {
+ if (!getLangOpts().AccessControl)
+ return AR_accessible;
+
+ // There's never a path involved when checking implicit destructor access.
+ AccessSpecifier Access = Dtor->getAccess();
+ if (Access == AS_public)
+ return AR_accessible;
+
+ CXXRecordDecl *NamingClass = Dtor->getParent();
+ if (ObjectTy.isNull()) ObjectTy = Context.getTypeDeclType(NamingClass);
+
+ AccessTarget Entity(Context, AccessTarget::Member, NamingClass,
+ DeclAccessPair::make(Dtor, Access),
+ ObjectTy);
+ Entity.setDiag(PDiag); // TODO: avoid copy
+
+ return CheckAccess(*this, Loc, Entity);
+}
+
+/// Checks access to a constructor.
+Sema::AccessResult Sema::CheckConstructorAccess(SourceLocation UseLoc,
+ CXXConstructorDecl *Constructor,
+ const InitializedEntity &Entity,
+ AccessSpecifier Access,
+ bool IsCopyBindingRefToTemp) {
+ if (!getLangOpts().AccessControl || Access == AS_public)
+ return AR_accessible;
+
+ PartialDiagnostic PD(PDiag());
+ switch (Entity.getKind()) {
+ default:
+ PD = PDiag(IsCopyBindingRefToTemp
+ ? diag::ext_rvalue_to_reference_access_ctor
+ : diag::err_access_ctor);
+
+ break;
+
+ case InitializedEntity::EK_Base:
+ PD = PDiag(diag::err_access_base_ctor);
+ PD << Entity.isInheritedVirtualBase()
+ << Entity.getBaseSpecifier()->getType() << getSpecialMember(Constructor);
+ break;
+
+ case InitializedEntity::EK_Member: {
+ const FieldDecl *Field = cast<FieldDecl>(Entity.getDecl());
+ PD = PDiag(diag::err_access_field_ctor);
+ PD << Field->getType() << getSpecialMember(Constructor);
+ break;
+ }
+
+ case InitializedEntity::EK_LambdaCapture: {
+ StringRef VarName = Entity.getCapturedVarName();
+ PD = PDiag(diag::err_access_lambda_capture);
+ PD << VarName << Entity.getType() << getSpecialMember(Constructor);
+ break;
+ }
+
+ }
+
+ return CheckConstructorAccess(UseLoc, Constructor, Entity, Access, PD);
+}
+
+/// Checks access to a constructor.
+Sema::AccessResult Sema::CheckConstructorAccess(SourceLocation UseLoc,
+ CXXConstructorDecl *Constructor,
+ const InitializedEntity &Entity,
+ AccessSpecifier Access,
+ const PartialDiagnostic &PD) {
+ if (!getLangOpts().AccessControl ||
+ Access == AS_public)
+ return AR_accessible;
+
+ CXXRecordDecl *NamingClass = Constructor->getParent();
+
+ // Initializing a base sub-object is an instance method call on an
+ // object of the derived class. Otherwise, we have an instance method
+ // call on an object of the constructed type.
+ CXXRecordDecl *ObjectClass;
+ if (Entity.getKind() == InitializedEntity::EK_Base) {
+ ObjectClass = cast<CXXConstructorDecl>(CurContext)->getParent();
+ } else {
+ ObjectClass = NamingClass;
+ }
+
+ AccessTarget AccessEntity(Context, AccessTarget::Member, NamingClass,
+ DeclAccessPair::make(Constructor, Access),
+ Context.getTypeDeclType(ObjectClass));
+ AccessEntity.setDiag(PD);
+
+ return CheckAccess(*this, UseLoc, AccessEntity);
+}
+
+/// Checks access to an overloaded operator new or delete.
+Sema::AccessResult Sema::CheckAllocationAccess(SourceLocation OpLoc,
+ SourceRange PlacementRange,
+ CXXRecordDecl *NamingClass,
+ DeclAccessPair Found,
+ bool Diagnose) {
+ if (!getLangOpts().AccessControl ||
+ !NamingClass ||
+ Found.getAccess() == AS_public)
+ return AR_accessible;
+
+ AccessTarget Entity(Context, AccessTarget::Member, NamingClass, Found,
+ QualType());
+ if (Diagnose)
+ Entity.setDiag(diag::err_access)
+ << PlacementRange;
+
+ return CheckAccess(*this, OpLoc, Entity);
+}
+
+/// \brief Checks access to a member.
+Sema::AccessResult Sema::CheckMemberAccess(SourceLocation UseLoc,
+ CXXRecordDecl *NamingClass,
+ DeclAccessPair Found) {
+ if (!getLangOpts().AccessControl ||
+ !NamingClass ||
+ Found.getAccess() == AS_public)
+ return AR_accessible;
+
+ AccessTarget Entity(Context, AccessTarget::Member, NamingClass,
+ Found, QualType());
+
+ return CheckAccess(*this, UseLoc, Entity);
+}
+
+/// Checks access to an overloaded member operator, including
+/// conversion operators.
+Sema::AccessResult Sema::CheckMemberOperatorAccess(SourceLocation OpLoc,
+ Expr *ObjectExpr,
+ Expr *ArgExpr,
+ DeclAccessPair Found) {
+ if (!getLangOpts().AccessControl ||
+ Found.getAccess() == AS_public)
+ return AR_accessible;
+
+ const RecordType *RT = ObjectExpr->getType()->castAs<RecordType>();
+ CXXRecordDecl *NamingClass = cast<CXXRecordDecl>(RT->getDecl());
+
+ AccessTarget Entity(Context, AccessTarget::Member, NamingClass, Found,
+ ObjectExpr->getType());
+ Entity.setDiag(diag::err_access)
+ << ObjectExpr->getSourceRange()
+ << (ArgExpr ? ArgExpr->getSourceRange() : SourceRange());
+
+ return CheckAccess(*this, OpLoc, Entity);
+}
+
+/// Checks access to the target of a friend declaration.
+Sema::AccessResult Sema::CheckFriendAccess(NamedDecl *target) {
+ assert(isa<CXXMethodDecl>(target->getAsFunction()));
+
+ // Friendship lookup is a redeclaration lookup, so there's never an
+ // inheritance path modifying access.
+ AccessSpecifier access = target->getAccess();
+
+ if (!getLangOpts().AccessControl || access == AS_public)
+ return AR_accessible;
+
+ CXXMethodDecl *method = cast<CXXMethodDecl>(target->getAsFunction());
+
+ AccessTarget entity(Context, AccessTarget::Member,
+ cast<CXXRecordDecl>(target->getDeclContext()),
+ DeclAccessPair::make(target, access),
+ /*no instance context*/ QualType());
+ entity.setDiag(diag::err_access_friend_function)
+ << (method->getQualifier() ? method->getQualifierLoc().getSourceRange()
+ : method->getNameInfo().getSourceRange());
+
+ // We need to bypass delayed-diagnostics because we might be called
+ // while the ParsingDeclarator is active.
+ EffectiveContext EC(CurContext);
+ switch (CheckEffectiveAccess(*this, EC, target->getLocation(), entity)) {
+ case AR_accessible: return Sema::AR_accessible;
+ case AR_inaccessible: return Sema::AR_inaccessible;
+ case AR_dependent: return Sema::AR_dependent;
+ }
+ llvm_unreachable("invalid access result");
+}
+
+Sema::AccessResult Sema::CheckAddressOfMemberAccess(Expr *OvlExpr,
+ DeclAccessPair Found) {
+ if (!getLangOpts().AccessControl ||
+ Found.getAccess() == AS_none ||
+ Found.getAccess() == AS_public)
+ return AR_accessible;
+
+ OverloadExpr *Ovl = OverloadExpr::find(OvlExpr).Expression;
+ CXXRecordDecl *NamingClass = Ovl->getNamingClass();
+
+ AccessTarget Entity(Context, AccessTarget::Member, NamingClass, Found,
+ /*no instance context*/ QualType());
+ Entity.setDiag(diag::err_access)
+ << Ovl->getSourceRange();
+
+ return CheckAccess(*this, Ovl->getNameLoc(), Entity);
+}
+
+/// Checks access for a hierarchy conversion.
+///
+/// \param ForceCheck true if this check should be performed even if access
+/// control is disabled; some things rely on this for semantics
+/// \param ForceUnprivileged true if this check should proceed as if the
+/// context had no special privileges
+Sema::AccessResult Sema::CheckBaseClassAccess(SourceLocation AccessLoc,
+ QualType Base,
+ QualType Derived,
+ const CXXBasePath &Path,
+ unsigned DiagID,
+ bool ForceCheck,
+ bool ForceUnprivileged) {
+ if (!ForceCheck && !getLangOpts().AccessControl)
+ return AR_accessible;
+
+ if (Path.Access == AS_public)
+ return AR_accessible;
+
+ CXXRecordDecl *BaseD, *DerivedD;
+ BaseD = cast<CXXRecordDecl>(Base->getAs<RecordType>()->getDecl());
+ DerivedD = cast<CXXRecordDecl>(Derived->getAs<RecordType>()->getDecl());
+
+ AccessTarget Entity(Context, AccessTarget::Base, BaseD, DerivedD,
+ Path.Access);
+ if (DiagID)
+ Entity.setDiag(DiagID) << Derived << Base;
+
+ if (ForceUnprivileged) {
+ switch (CheckEffectiveAccess(*this, EffectiveContext(),
+ AccessLoc, Entity)) {
+ case ::AR_accessible: return Sema::AR_accessible;
+ case ::AR_inaccessible: return Sema::AR_inaccessible;
+ case ::AR_dependent: return Sema::AR_dependent;
+ }
+ llvm_unreachable("unexpected result from CheckEffectiveAccess");
+ }
+ return CheckAccess(*this, AccessLoc, Entity);
+}
+
+/// Checks access to all the declarations in the given result set.
+void Sema::CheckLookupAccess(const LookupResult &R) {
+ assert(getLangOpts().AccessControl
+ && "performing access check without access control");
+ assert(R.getNamingClass() && "performing access check without naming class");
+
+ for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
+ if (I.getAccess() != AS_public) {
+ AccessTarget Entity(Context, AccessedEntity::Member,
+ R.getNamingClass(), I.getPair(),
+ R.getBaseObjectType());
+ Entity.setDiag(diag::err_access);
+ CheckAccess(*this, R.getNameLoc(), Entity);
+ }
+ }
+}
+
+/// Checks access to Decl from the given class. The check will take access
+/// specifiers into account, but no member access expressions and such.
+///
+/// \param Decl the declaration to check if it can be accessed
+/// \param Ctx the class/context from which to start the search
+/// \return true if the Decl is accessible from the Class, false otherwise.
+bool Sema::IsSimplyAccessible(NamedDecl *Decl, DeclContext *Ctx) {
+ if (CXXRecordDecl *Class = dyn_cast<CXXRecordDecl>(Ctx)) {
+ if (!Decl->isCXXClassMember())
+ return true;
+
+ QualType qType = Class->getTypeForDecl()->getCanonicalTypeInternal();
+ AccessTarget Entity(Context, AccessedEntity::Member, Class,
+ DeclAccessPair::make(Decl, Decl->getAccess()),
+ qType);
+ if (Entity.getAccess() == AS_public)
+ return true;
+
+ EffectiveContext EC(CurContext);
+ return ::IsAccessible(*this, EC, Entity) != ::AR_inaccessible;
+ }
+
+ if (ObjCIvarDecl *Ivar = dyn_cast<ObjCIvarDecl>(Decl)) {
+ // @public and @package ivars are always accessible.
+ if (Ivar->getCanonicalAccessControl() == ObjCIvarDecl::Public ||
+ Ivar->getCanonicalAccessControl() == ObjCIvarDecl::Package)
+ return true;
+
+ // If we are inside a class or category implementation, determine the
+ // interface we're in.
+ ObjCInterfaceDecl *ClassOfMethodDecl = nullptr;
+ if (ObjCMethodDecl *MD = getCurMethodDecl())
+ ClassOfMethodDecl = MD->getClassInterface();
+ else if (FunctionDecl *FD = getCurFunctionDecl()) {
+ if (ObjCImplDecl *Impl
+ = dyn_cast<ObjCImplDecl>(FD->getLexicalDeclContext())) {
+ if (ObjCImplementationDecl *IMPD
+ = dyn_cast<ObjCImplementationDecl>(Impl))
+ ClassOfMethodDecl = IMPD->getClassInterface();
+ else if (ObjCCategoryImplDecl* CatImplClass
+ = dyn_cast<ObjCCategoryImplDecl>(Impl))
+ ClassOfMethodDecl = CatImplClass->getClassInterface();
+ }
+ }
+
+ // If we're not in an interface, this ivar is inaccessible.
+ if (!ClassOfMethodDecl)
+ return false;
+
+ // If we're inside the same interface that owns the ivar, we're fine.
+ if (declaresSameEntity(ClassOfMethodDecl, Ivar->getContainingInterface()))
+ return true;
+
+ // If the ivar is private, it's inaccessible.
+ if (Ivar->getCanonicalAccessControl() == ObjCIvarDecl::Private)
+ return false;
+
+ return Ivar->getContainingInterface()->isSuperClassOf(ClassOfMethodDecl);
+ }
+
+ return true;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp
new file mode 100644
index 0000000..5a29bad
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp
@@ -0,0 +1,619 @@
+//===--- SemaAttr.cpp - Semantic Analysis for Attributes ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for non-trivial attributes and
+// pragmas.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/Lookup.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Pragma 'pack' and 'options align'
+//===----------------------------------------------------------------------===//
+
+namespace {
+ struct PackStackEntry {
+ // We just use a sentinel to represent when the stack is set to mac68k
+ // alignment.
+ static const unsigned kMac68kAlignmentSentinel = ~0U;
+
+ unsigned Alignment;
+ IdentifierInfo *Name;
+ };
+
+ /// PragmaPackStack - Simple class to wrap the stack used by #pragma
+ /// pack.
+ class PragmaPackStack {
+ typedef std::vector<PackStackEntry> stack_ty;
+
+ /// Alignment - The current user specified alignment.
+ unsigned Alignment;
+
+ /// Stack - Entries in the #pragma pack stack, consisting of saved
+ /// alignments and optional names.
+ stack_ty Stack;
+
+ public:
+ PragmaPackStack() : Alignment(0) {}
+
+ void setAlignment(unsigned A) { Alignment = A; }
+ unsigned getAlignment() { return Alignment; }
+
+ /// push - Push the current alignment onto the stack, optionally
+ /// using the given \arg Name for the record, if non-zero.
+ void push(IdentifierInfo *Name) {
+ PackStackEntry PSE = { Alignment, Name };
+ Stack.push_back(PSE);
+ }
+
+ /// pop - Pop a record from the stack and restore the current
+ /// alignment to the previous value. If \arg Name is non-zero then
+ /// the first such named record is popped, otherwise the top record
+ /// is popped. Returns true if the pop succeeded.
+ bool pop(IdentifierInfo *Name, bool IsReset);
+ };
+} // end anonymous namespace.
+
+bool PragmaPackStack::pop(IdentifierInfo *Name, bool IsReset) {
+ // If name is empty just pop top.
+ if (!Name) {
+ // An empty stack is a special case...
+ if (Stack.empty()) {
+ // If this isn't a reset, it is always an error.
+ if (!IsReset)
+ return false;
+
+ // Otherwise, it is an error only if some alignment has been set.
+ if (!Alignment)
+ return false;
+
+ // Otherwise, reset to the default alignment.
+ Alignment = 0;
+ } else {
+ Alignment = Stack.back().Alignment;
+ Stack.pop_back();
+ }
+
+ return true;
+ }
+
+ // Otherwise, find the named record.
+ for (unsigned i = Stack.size(); i != 0; ) {
+ --i;
+ if (Stack[i].Name == Name) {
+ // Found it, pop up to and including this record.
+ Alignment = Stack[i].Alignment;
+ Stack.erase(Stack.begin() + i, Stack.end());
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+/// FreePackedContext - Deallocate and null out PackContext.
+void Sema::FreePackedContext() {
+ delete static_cast<PragmaPackStack*>(PackContext);
+ PackContext = nullptr;
+}
+
+void Sema::AddAlignmentAttributesForRecord(RecordDecl *RD) {
+ // If there is no pack context, we don't need any attributes.
+ if (!PackContext)
+ return;
+
+ PragmaPackStack *Stack = static_cast<PragmaPackStack*>(PackContext);
+
+ // Otherwise, check to see if we need a max field alignment attribute.
+ if (unsigned Alignment = Stack->getAlignment()) {
+ if (Alignment == PackStackEntry::kMac68kAlignmentSentinel)
+ RD->addAttr(AlignMac68kAttr::CreateImplicit(Context));
+ else
+ RD->addAttr(MaxFieldAlignmentAttr::CreateImplicit(Context,
+ Alignment * 8));
+ }
+}
+
+void Sema::AddMsStructLayoutForRecord(RecordDecl *RD) {
+ if (MSStructPragmaOn)
+ RD->addAttr(MSStructAttr::CreateImplicit(Context));
+
+ // FIXME: We should merge AddAlignmentAttributesForRecord with
+ // AddMsStructLayoutForRecord into AddPragmaAttributesForRecord, which takes
+ // all active pragmas and applies them as attributes to class definitions.
+ if (VtorDispModeStack.back() != getLangOpts().VtorDispMode)
+ RD->addAttr(
+ MSVtorDispAttr::CreateImplicit(Context, VtorDispModeStack.back()));
+}
+
+void Sema::ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
+ SourceLocation PragmaLoc) {
+ if (!PackContext)
+ PackContext = new PragmaPackStack();
+
+ PragmaPackStack *Context = static_cast<PragmaPackStack*>(PackContext);
+
+ switch (Kind) {
+ // For all targets we support native and natural are the same.
+ //
+ // FIXME: This is not true on Darwin/PPC.
+ case POAK_Native:
+ case POAK_Power:
+ case POAK_Natural:
+ Context->push(nullptr);
+ Context->setAlignment(0);
+ break;
+
+ // Note that '#pragma options align=packed' is not equivalent to attribute
+ // packed, it has a different precedence relative to attribute aligned.
+ case POAK_Packed:
+ Context->push(nullptr);
+ Context->setAlignment(1);
+ break;
+
+ case POAK_Mac68k:
+ // Check if the target supports this.
+ if (!this->Context.getTargetInfo().hasAlignMac68kSupport()) {
+ Diag(PragmaLoc, diag::err_pragma_options_align_mac68k_target_unsupported);
+ return;
+ }
+ Context->push(nullptr);
+ Context->setAlignment(PackStackEntry::kMac68kAlignmentSentinel);
+ break;
+
+ case POAK_Reset:
+ // Reset just pops the top of the stack, or resets the current alignment to
+ // default.
+ if (!Context->pop(nullptr, /*IsReset=*/true)) {
+ Diag(PragmaLoc, diag::warn_pragma_options_align_reset_failed)
+ << "stack empty";
+ }
+ break;
+ }
+}
+
+void Sema::ActOnPragmaPack(PragmaPackKind Kind, IdentifierInfo *Name,
+ Expr *alignment, SourceLocation PragmaLoc,
+ SourceLocation LParenLoc, SourceLocation RParenLoc) {
+ Expr *Alignment = static_cast<Expr *>(alignment);
+
+ // If specified then alignment must be a "small" power of two.
+ unsigned AlignmentVal = 0;
+ if (Alignment) {
+ llvm::APSInt Val;
+
+ // pack(0) is like pack(), which just works out since that is what
+ // we use 0 for in PackAttr.
+ if (Alignment->isTypeDependent() ||
+ Alignment->isValueDependent() ||
+ !Alignment->isIntegerConstantExpr(Val, Context) ||
+ !(Val == 0 || Val.isPowerOf2()) ||
+ Val.getZExtValue() > 16) {
+ Diag(PragmaLoc, diag::warn_pragma_pack_invalid_alignment);
+ return; // Ignore
+ }
+
+ AlignmentVal = (unsigned) Val.getZExtValue();
+ }
+
+ if (!PackContext)
+ PackContext = new PragmaPackStack();
+
+ PragmaPackStack *Context = static_cast<PragmaPackStack*>(PackContext);
+
+ switch (Kind) {
+ case Sema::PPK_Default: // pack([n])
+ Context->setAlignment(AlignmentVal);
+ break;
+
+ case Sema::PPK_Show: // pack(show)
+ // Show the current alignment, making sure to show the right value
+ // for the default.
+ AlignmentVal = Context->getAlignment();
+ // FIXME: This should come from the target.
+ if (AlignmentVal == 0)
+ AlignmentVal = 8;
+ if (AlignmentVal == PackStackEntry::kMac68kAlignmentSentinel)
+ Diag(PragmaLoc, diag::warn_pragma_pack_show) << "mac68k";
+ else
+ Diag(PragmaLoc, diag::warn_pragma_pack_show) << AlignmentVal;
+ break;
+
+ case Sema::PPK_Push: // pack(push [, id] [, [n])
+ Context->push(Name);
+ // Set the new alignment if specified.
+ if (Alignment)
+ Context->setAlignment(AlignmentVal);
+ break;
+
+ case Sema::PPK_Pop: // pack(pop [, id] [, n])
+ // MSDN, C/C++ Preprocessor Reference > Pragma Directives > pack:
+ // "#pragma pack(pop, identifier, n) is undefined"
+ if (Alignment && Name)
+ Diag(PragmaLoc, diag::warn_pragma_pack_pop_identifer_and_alignment);
+
+ // Do the pop.
+ if (!Context->pop(Name, /*IsReset=*/false)) {
+ // If a name was specified then failure indicates the name
+ // wasn't found. Otherwise failure indicates the stack was
+ // empty.
+ Diag(PragmaLoc, diag::warn_pragma_pop_failed)
+ << "pack" << (Name ? "no record matching name" : "stack empty");
+
+ // FIXME: Warn about popping named records as MSVC does.
+ } else {
+ // Pop succeeded, set the new alignment if specified.
+ if (Alignment)
+ Context->setAlignment(AlignmentVal);
+ }
+ break;
+ }
+}
+
+void Sema::ActOnPragmaMSStruct(PragmaMSStructKind Kind) {
+ MSStructPragmaOn = (Kind == PMSST_ON);
+}
+
+void Sema::ActOnPragmaMSComment(PragmaMSCommentKind Kind, StringRef Arg) {
+ // FIXME: Serialize this.
+ switch (Kind) {
+ case PCK_Unknown:
+ llvm_unreachable("unexpected pragma comment kind");
+ case PCK_Linker:
+ Consumer.HandleLinkerOptionPragma(Arg);
+ return;
+ case PCK_Lib:
+ Consumer.HandleDependentLibrary(Arg);
+ return;
+ case PCK_Compiler:
+ case PCK_ExeStr:
+ case PCK_User:
+ return; // We ignore all of these.
+ }
+ llvm_unreachable("invalid pragma comment kind");
+}
+
+void Sema::ActOnPragmaDetectMismatch(StringRef Name, StringRef Value) {
+ // FIXME: Serialize this.
+ Consumer.HandleDetectMismatch(Name, Value);
+}
+
+void Sema::ActOnPragmaMSPointersToMembers(
+ LangOptions::PragmaMSPointersToMembersKind RepresentationMethod,
+ SourceLocation PragmaLoc) {
+ MSPointerToMemberRepresentationMethod = RepresentationMethod;
+ ImplicitMSInheritanceAttrLoc = PragmaLoc;
+}
+
+void Sema::ActOnPragmaMSVtorDisp(PragmaVtorDispKind Kind,
+ SourceLocation PragmaLoc,
+ MSVtorDispAttr::Mode Mode) {
+ switch (Kind) {
+ case PVDK_Set:
+ VtorDispModeStack.back() = Mode;
+ break;
+ case PVDK_Push:
+ VtorDispModeStack.push_back(Mode);
+ break;
+ case PVDK_Reset:
+ VtorDispModeStack.clear();
+ VtorDispModeStack.push_back(MSVtorDispAttr::Mode(LangOpts.VtorDispMode));
+ break;
+ case PVDK_Pop:
+ VtorDispModeStack.pop_back();
+ if (VtorDispModeStack.empty()) {
+ Diag(PragmaLoc, diag::warn_pragma_pop_failed) << "vtordisp"
+ << "stack empty";
+ VtorDispModeStack.push_back(MSVtorDispAttr::Mode(LangOpts.VtorDispMode));
+ }
+ break;
+ }
+}
+
+template<typename ValueType>
+void Sema::PragmaStack<ValueType>::Act(SourceLocation PragmaLocation,
+ PragmaMsStackAction Action,
+ llvm::StringRef StackSlotLabel,
+ ValueType Value) {
+ if (Action == PSK_Reset) {
+ CurrentValue = nullptr;
+ return;
+ }
+ if (Action & PSK_Push)
+ Stack.push_back(Slot(StackSlotLabel, CurrentValue, CurrentPragmaLocation));
+ else if (Action & PSK_Pop) {
+ if (!StackSlotLabel.empty()) {
+ // If we've got a label, try to find it and jump there.
+ auto I = std::find_if(Stack.rbegin(), Stack.rend(),
+ [&](const Slot &x) { return x.StackSlotLabel == StackSlotLabel; });
+ // If we found the label so pop from there.
+ if (I != Stack.rend()) {
+ CurrentValue = I->Value;
+ CurrentPragmaLocation = I->PragmaLocation;
+ Stack.erase(std::prev(I.base()), Stack.end());
+ }
+ } else if (!Stack.empty()) {
+ // We don't have a label, just pop the last entry.
+ CurrentValue = Stack.back().Value;
+ CurrentPragmaLocation = Stack.back().PragmaLocation;
+ Stack.pop_back();
+ }
+ }
+ if (Action & PSK_Set) {
+ CurrentValue = Value;
+ CurrentPragmaLocation = PragmaLocation;
+ }
+}
+
+bool Sema::UnifySection(StringRef SectionName,
+ int SectionFlags,
+ DeclaratorDecl *Decl) {
+ auto Section = Context.SectionInfos.find(SectionName);
+ if (Section == Context.SectionInfos.end()) {
+ Context.SectionInfos[SectionName] =
+ ASTContext::SectionInfo(Decl, SourceLocation(), SectionFlags);
+ return false;
+ }
+ // A pre-declared section takes precedence w/o diagnostic.
+ if (Section->second.SectionFlags == SectionFlags ||
+ !(Section->second.SectionFlags & ASTContext::PSF_Implicit))
+ return false;
+ auto OtherDecl = Section->second.Decl;
+ Diag(Decl->getLocation(), diag::err_section_conflict)
+ << Decl << OtherDecl;
+ Diag(OtherDecl->getLocation(), diag::note_declared_at)
+ << OtherDecl->getName();
+ if (auto A = Decl->getAttr<SectionAttr>())
+ if (A->isImplicit())
+ Diag(A->getLocation(), diag::note_pragma_entered_here);
+ if (auto A = OtherDecl->getAttr<SectionAttr>())
+ if (A->isImplicit())
+ Diag(A->getLocation(), diag::note_pragma_entered_here);
+ return true;
+}
+
+bool Sema::UnifySection(StringRef SectionName,
+ int SectionFlags,
+ SourceLocation PragmaSectionLocation) {
+ auto Section = Context.SectionInfos.find(SectionName);
+ if (Section != Context.SectionInfos.end()) {
+ if (Section->second.SectionFlags == SectionFlags)
+ return false;
+ if (!(Section->second.SectionFlags & ASTContext::PSF_Implicit)) {
+ Diag(PragmaSectionLocation, diag::err_section_conflict)
+ << "this" << "a prior #pragma section";
+ Diag(Section->second.PragmaSectionLocation,
+ diag::note_pragma_entered_here);
+ return true;
+ }
+ }
+ Context.SectionInfos[SectionName] =
+ ASTContext::SectionInfo(nullptr, PragmaSectionLocation, SectionFlags);
+ return false;
+}
+
+/// \brief Called on well formed \#pragma bss_seg().
+void Sema::ActOnPragmaMSSeg(SourceLocation PragmaLocation,
+ PragmaMsStackAction Action,
+ llvm::StringRef StackSlotLabel,
+ StringLiteral *SegmentName,
+ llvm::StringRef PragmaName) {
+ PragmaStack<StringLiteral *> *Stack =
+ llvm::StringSwitch<PragmaStack<StringLiteral *> *>(PragmaName)
+ .Case("data_seg", &DataSegStack)
+ .Case("bss_seg", &BSSSegStack)
+ .Case("const_seg", &ConstSegStack)
+ .Case("code_seg", &CodeSegStack);
+ if (Action & PSK_Pop && Stack->Stack.empty())
+ Diag(PragmaLocation, diag::warn_pragma_pop_failed) << PragmaName
+ << "stack empty";
+ if (SegmentName &&
+ !checkSectionName(SegmentName->getLocStart(), SegmentName->getString()))
+ return;
+ Stack->Act(PragmaLocation, Action, StackSlotLabel, SegmentName);
+}
+
+/// \brief Called on well formed \#pragma bss_seg().
+void Sema::ActOnPragmaMSSection(SourceLocation PragmaLocation,
+ int SectionFlags, StringLiteral *SegmentName) {
+ UnifySection(SegmentName->getString(), SectionFlags, PragmaLocation);
+}
+
+void Sema::ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
+ StringLiteral *SegmentName) {
+ // There's no stack to maintain, so we just have a current section. When we
+ // see the default section, reset our current section back to null so we stop
+ // tacking on unnecessary attributes.
+ CurInitSeg = SegmentName->getString() == ".CRT$XCU" ? nullptr : SegmentName;
+ CurInitSegLoc = PragmaLocation;
+}
+
+void Sema::ActOnPragmaUnused(const Token &IdTok, Scope *curScope,
+ SourceLocation PragmaLoc) {
+
+ IdentifierInfo *Name = IdTok.getIdentifierInfo();
+ LookupResult Lookup(*this, Name, IdTok.getLocation(), LookupOrdinaryName);
+ LookupParsedName(Lookup, curScope, nullptr, true);
+
+ if (Lookup.empty()) {
+ Diag(PragmaLoc, diag::warn_pragma_unused_undeclared_var)
+ << Name << SourceRange(IdTok.getLocation());
+ return;
+ }
+
+ VarDecl *VD = Lookup.getAsSingle<VarDecl>();
+ if (!VD) {
+ Diag(PragmaLoc, diag::warn_pragma_unused_expected_var_arg)
+ << Name << SourceRange(IdTok.getLocation());
+ return;
+ }
+
+ // Warn if this was used before being marked unused.
+ if (VD->isUsed())
+ Diag(PragmaLoc, diag::warn_used_but_marked_unused) << Name;
+
+ VD->addAttr(UnusedAttr::CreateImplicit(Context, IdTok.getLocation()));
+}
+
+void Sema::AddCFAuditedAttribute(Decl *D) {
+ SourceLocation Loc = PP.getPragmaARCCFCodeAuditedLoc();
+ if (!Loc.isValid()) return;
+
+ // Don't add a redundant or conflicting attribute.
+ if (D->hasAttr<CFAuditedTransferAttr>() ||
+ D->hasAttr<CFUnknownTransferAttr>())
+ return;
+
+ D->addAttr(CFAuditedTransferAttr::CreateImplicit(Context, Loc));
+}
+
+void Sema::ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc) {
+ if(On)
+ OptimizeOffPragmaLocation = SourceLocation();
+ else
+ OptimizeOffPragmaLocation = PragmaLoc;
+}
+
+void Sema::AddRangeBasedOptnone(FunctionDecl *FD) {
+ // In the future, check other pragmas if they're implemented (e.g. pragma
+ // optimize 0 will probably map to this functionality too).
+ if(OptimizeOffPragmaLocation.isValid())
+ AddOptnoneAttributeIfNoConflicts(FD, OptimizeOffPragmaLocation);
+}
+
+void Sema::AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD,
+ SourceLocation Loc) {
+ // Don't add a conflicting attribute. No diagnostic is needed.
+ if (FD->hasAttr<MinSizeAttr>() || FD->hasAttr<AlwaysInlineAttr>())
+ return;
+
+ // Add attributes only if required. Optnone requires noinline as well, but if
+ // either is already present then don't bother adding them.
+ if (!FD->hasAttr<OptimizeNoneAttr>())
+ FD->addAttr(OptimizeNoneAttr::CreateImplicit(Context, Loc));
+ if (!FD->hasAttr<NoInlineAttr>())
+ FD->addAttr(NoInlineAttr::CreateImplicit(Context, Loc));
+}
+
+typedef std::vector<std::pair<unsigned, SourceLocation> > VisStack;
+enum : unsigned { NoVisibility = ~0U };
+
+void Sema::AddPushedVisibilityAttribute(Decl *D) {
+ if (!VisContext)
+ return;
+
+ NamedDecl *ND = dyn_cast<NamedDecl>(D);
+ if (ND && ND->getExplicitVisibility(NamedDecl::VisibilityForValue))
+ return;
+
+ VisStack *Stack = static_cast<VisStack*>(VisContext);
+ unsigned rawType = Stack->back().first;
+ if (rawType == NoVisibility) return;
+
+ VisibilityAttr::VisibilityType type
+ = (VisibilityAttr::VisibilityType) rawType;
+ SourceLocation loc = Stack->back().second;
+
+ D->addAttr(VisibilityAttr::CreateImplicit(Context, type, loc));
+}
+
+/// FreeVisContext - Deallocate and null out VisContext.
+void Sema::FreeVisContext() {
+ delete static_cast<VisStack*>(VisContext);
+ VisContext = nullptr;
+}
+
+static void PushPragmaVisibility(Sema &S, unsigned type, SourceLocation loc) {
+ // Put visibility on stack.
+ if (!S.VisContext)
+ S.VisContext = new VisStack;
+
+ VisStack *Stack = static_cast<VisStack*>(S.VisContext);
+ Stack->push_back(std::make_pair(type, loc));
+}
+
+void Sema::ActOnPragmaVisibility(const IdentifierInfo* VisType,
+ SourceLocation PragmaLoc) {
+ if (VisType) {
+ // Compute visibility to use.
+ VisibilityAttr::VisibilityType T;
+ if (!VisibilityAttr::ConvertStrToVisibilityType(VisType->getName(), T)) {
+ Diag(PragmaLoc, diag::warn_attribute_unknown_visibility) << VisType;
+ return;
+ }
+ PushPragmaVisibility(*this, T, PragmaLoc);
+ } else {
+ PopPragmaVisibility(false, PragmaLoc);
+ }
+}
+
+void Sema::ActOnPragmaFPContract(tok::OnOffSwitch OOS) {
+ switch (OOS) {
+ case tok::OOS_ON:
+ FPFeatures.fp_contract = 1;
+ break;
+ case tok::OOS_OFF:
+ FPFeatures.fp_contract = 0;
+ break;
+ case tok::OOS_DEFAULT:
+ FPFeatures.fp_contract = getLangOpts().DefaultFPContract;
+ break;
+ }
+}
+
+void Sema::PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
+ SourceLocation Loc) {
+ // Visibility calculations will consider the namespace's visibility.
+ // Here we just want to note that we're in a visibility context
+ // which overrides any enclosing #pragma context, but doesn't itself
+ // contribute visibility.
+ PushPragmaVisibility(*this, NoVisibility, Loc);
+}
+
+void Sema::PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc) {
+ if (!VisContext) {
+ Diag(EndLoc, diag::err_pragma_pop_visibility_mismatch);
+ return;
+ }
+
+ // Pop visibility from stack
+ VisStack *Stack = static_cast<VisStack*>(VisContext);
+
+ const std::pair<unsigned, SourceLocation> *Back = &Stack->back();
+ bool StartsWithPragma = Back->first != NoVisibility;
+ if (StartsWithPragma && IsNamespaceEnd) {
+ Diag(Back->second, diag::err_pragma_push_visibility_mismatch);
+ Diag(EndLoc, diag::note_surrounding_namespace_ends_here);
+
+ // For better error recovery, eat all pushes inside the namespace.
+ do {
+ Stack->pop_back();
+ Back = &Stack->back();
+ StartsWithPragma = Back->first != NoVisibility;
+ } while (StartsWithPragma);
+ } else if (!StartsWithPragma && !IsNamespaceEnd) {
+ Diag(EndLoc, diag::err_pragma_pop_visibility_mismatch);
+ Diag(Back->second, diag::note_surrounding_namespace_starts_here);
+ return;
+ }
+
+ Stack->pop_back();
+ // To simplify the implementation, never keep around an empty stack.
+ if (Stack->empty())
+ FreeVisContext();
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCUDA.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCUDA.cpp
new file mode 100644
index 0000000..61dfdd3
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaCUDA.cpp
@@ -0,0 +1,424 @@
+//===--- SemaCUDA.cpp - Semantic Analysis for CUDA constructs -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// \brief This file implements semantic analysis for CUDA constructs.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/Sema.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+using namespace clang;
+
+ExprResult Sema::ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
+ MultiExprArg ExecConfig,
+ SourceLocation GGGLoc) {
+ FunctionDecl *ConfigDecl = Context.getcudaConfigureCallDecl();
+ if (!ConfigDecl)
+ return ExprError(Diag(LLLLoc, diag::err_undeclared_var_use)
+ << "cudaConfigureCall");
+ QualType ConfigQTy = ConfigDecl->getType();
+
+ DeclRefExpr *ConfigDR = new (Context)
+ DeclRefExpr(ConfigDecl, false, ConfigQTy, VK_LValue, LLLLoc);
+ MarkFunctionReferenced(LLLLoc, ConfigDecl);
+
+ return ActOnCallExpr(S, ConfigDR, LLLLoc, ExecConfig, GGGLoc, nullptr,
+ /*IsExecConfig=*/true);
+}
+
+/// IdentifyCUDATarget - Determine the CUDA compilation target for this function
+Sema::CUDAFunctionTarget Sema::IdentifyCUDATarget(const FunctionDecl *D) {
+ if (D->hasAttr<CUDAInvalidTargetAttr>())
+ return CFT_InvalidTarget;
+
+ if (D->hasAttr<CUDAGlobalAttr>())
+ return CFT_Global;
+
+ if (D->hasAttr<CUDADeviceAttr>()) {
+ if (D->hasAttr<CUDAHostAttr>())
+ return CFT_HostDevice;
+ return CFT_Device;
+ } else if (D->hasAttr<CUDAHostAttr>()) {
+ return CFT_Host;
+ } else if (D->isImplicit()) {
+ // Some implicit declarations (like intrinsic functions) are not marked.
+ // Set the most lenient target on them for maximal flexibility.
+ return CFT_HostDevice;
+ }
+
+ return CFT_Host;
+}
+
+// * CUDA Call preference table
+//
+// F - from,
+// T - to
+// Ph - preference in host mode
+// Pd - preference in device mode
+// H - handled in (x)
+// Preferences: b-best, f-fallback, l-last resort, n-never.
+//
+// | F | T | Ph | Pd | H |
+// |----+----+----+----+-----+
+// | d | d | b | b | (b) |
+// | d | g | n | n | (a) |
+// | d | h | l | l | (e) |
+// | d | hd | f | f | (c) |
+// | g | d | b | b | (b) |
+// | g | g | n | n | (a) |
+// | g | h | l | l | (e) |
+// | g | hd | f | f | (c) |
+// | h | d | l | l | (e) |
+// | h | g | b | b | (b) |
+// | h | h | b | b | (b) |
+// | h | hd | f | f | (c) |
+// | hd | d | l | f | (d) |
+// | hd | g | f | n |(d/a)|
+// | hd | h | f | l | (d) |
+// | hd | hd | b | b | (b) |
+
+Sema::CUDAFunctionPreference
+Sema::IdentifyCUDAPreference(const FunctionDecl *Caller,
+ const FunctionDecl *Callee) {
+ assert(getLangOpts().CUDATargetOverloads &&
+ "Should not be called w/o enabled target overloads.");
+
+ assert(Callee && "Callee must be valid.");
+ CUDAFunctionTarget CalleeTarget = IdentifyCUDATarget(Callee);
+ CUDAFunctionTarget CallerTarget =
+ (Caller != nullptr) ? IdentifyCUDATarget(Caller) : Sema::CFT_Host;
+
+ // If one of the targets is invalid, the check always fails, no matter what
+ // the other target is.
+ if (CallerTarget == CFT_InvalidTarget || CalleeTarget == CFT_InvalidTarget)
+ return CFP_Never;
+
+ // (a) Can't call global from some contexts until we support CUDA's
+ // dynamic parallelism.
+ if (CalleeTarget == CFT_Global &&
+ (CallerTarget == CFT_Global || CallerTarget == CFT_Device ||
+ (CallerTarget == CFT_HostDevice && getLangOpts().CUDAIsDevice)))
+ return CFP_Never;
+
+ // (b) Best case scenarios
+ if (CalleeTarget == CallerTarget ||
+ (CallerTarget == CFT_Host && CalleeTarget == CFT_Global) ||
+ (CallerTarget == CFT_Global && CalleeTarget == CFT_Device))
+ return CFP_Best;
+
+ // (c) Calling HostDevice is OK as a fallback that works for everyone.
+ if (CalleeTarget == CFT_HostDevice)
+ return CFP_Fallback;
+
+ // Figure out what should be returned 'last resort' cases. Normally
+ // those would not be allowed, but we'll consider them if
+ // CUDADisableTargetCallChecks is true.
+ CUDAFunctionPreference QuestionableResult =
+ getLangOpts().CUDADisableTargetCallChecks ? CFP_LastResort : CFP_Never;
+
+ // (d) HostDevice behavior depends on compilation mode.
+ if (CallerTarget == CFT_HostDevice) {
+ // Calling a function that matches compilation mode is OK.
+ // Calling a function from the other side is frowned upon.
+ if (getLangOpts().CUDAIsDevice)
+ return CalleeTarget == CFT_Device ? CFP_Fallback : QuestionableResult;
+ else
+ return (CalleeTarget == CFT_Host || CalleeTarget == CFT_Global)
+ ? CFP_Fallback
+ : QuestionableResult;
+ }
+
+ // (e) Calling across device/host boundary is not something you should do.
+ if ((CallerTarget == CFT_Host && CalleeTarget == CFT_Device) ||
+ (CallerTarget == CFT_Device && CalleeTarget == CFT_Host) ||
+ (CallerTarget == CFT_Global && CalleeTarget == CFT_Host))
+ return QuestionableResult;
+
+ llvm_unreachable("All cases should've been handled by now.");
+}
+
+bool Sema::CheckCUDATarget(const FunctionDecl *Caller,
+ const FunctionDecl *Callee) {
+ // With target overloads enabled, we only disallow calling
+ // combinations with CFP_Never.
+ if (getLangOpts().CUDATargetOverloads)
+ return IdentifyCUDAPreference(Caller,Callee) == CFP_Never;
+
+ // The CUDADisableTargetCallChecks short-circuits this check: we assume all
+ // cross-target calls are valid.
+ if (getLangOpts().CUDADisableTargetCallChecks)
+ return false;
+
+ CUDAFunctionTarget CallerTarget = IdentifyCUDATarget(Caller),
+ CalleeTarget = IdentifyCUDATarget(Callee);
+
+ // If one of the targets is invalid, the check always fails, no matter what
+ // the other target is.
+ if (CallerTarget == CFT_InvalidTarget || CalleeTarget == CFT_InvalidTarget)
+ return true;
+
+ // CUDA B.1.1 "The __device__ qualifier declares a function that is [...]
+ // Callable from the device only."
+ if (CallerTarget == CFT_Host && CalleeTarget == CFT_Device)
+ return true;
+
+ // CUDA B.1.2 "The __global__ qualifier declares a function that is [...]
+ // Callable from the host only."
+ // CUDA B.1.3 "The __host__ qualifier declares a function that is [...]
+ // Callable from the host only."
+ if ((CallerTarget == CFT_Device || CallerTarget == CFT_Global) &&
+ (CalleeTarget == CFT_Host || CalleeTarget == CFT_Global))
+ return true;
+
+ // CUDA B.1.3 "The __device__ and __host__ qualifiers can be used together
+ // however, in which case the function is compiled for both the host and the
+ // device. The __CUDA_ARCH__ macro [...] can be used to differentiate code
+ // paths between host and device."
+ if (CallerTarget == CFT_HostDevice && CalleeTarget != CFT_HostDevice) {
+ // If the caller is implicit then the check always passes.
+ if (Caller->isImplicit()) return false;
+
+ bool InDeviceMode = getLangOpts().CUDAIsDevice;
+ if (!InDeviceMode && CalleeTarget != CFT_Host)
+ return true;
+ if (InDeviceMode && CalleeTarget != CFT_Device) {
+ // Allow host device functions to call host functions if explicitly
+ // requested.
+ if (CalleeTarget == CFT_Host &&
+ getLangOpts().CUDAAllowHostCallsFromHostDevice) {
+ Diag(Caller->getLocation(),
+ diag::warn_host_calls_from_host_device)
+ << Callee->getNameAsString() << Caller->getNameAsString();
+ return false;
+ }
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
+template <typename T, typename FetchDeclFn>
+static void EraseUnwantedCUDAMatchesImpl(Sema &S, const FunctionDecl *Caller,
+ llvm::SmallVectorImpl<T> &Matches,
+ FetchDeclFn FetchDecl) {
+ assert(S.getLangOpts().CUDATargetOverloads &&
+ "Should not be called w/o enabled target overloads.");
+ if (Matches.size() <= 1)
+ return;
+
+ // Find the best call preference among the functions in Matches.
+ Sema::CUDAFunctionPreference P, BestCFP = Sema::CFP_Never;
+ for (auto const &Match : Matches) {
+ P = S.IdentifyCUDAPreference(Caller, FetchDecl(Match));
+ if (P > BestCFP)
+ BestCFP = P;
+ }
+
+ // Erase all functions with lower priority.
+ for (unsigned I = 0, N = Matches.size(); I != N;)
+ if (S.IdentifyCUDAPreference(Caller, FetchDecl(Matches[I])) < BestCFP) {
+ Matches[I] = Matches[--N];
+ Matches.resize(N);
+ } else {
+ ++I;
+ }
+}
+
+void Sema::EraseUnwantedCUDAMatches(const FunctionDecl *Caller,
+ SmallVectorImpl<FunctionDecl *> &Matches){
+ EraseUnwantedCUDAMatchesImpl<FunctionDecl *>(
+ *this, Caller, Matches, [](const FunctionDecl *item) { return item; });
+}
+
+void Sema::EraseUnwantedCUDAMatches(const FunctionDecl *Caller,
+ SmallVectorImpl<DeclAccessPair> &Matches) {
+ EraseUnwantedCUDAMatchesImpl<DeclAccessPair>(
+ *this, Caller, Matches, [](const DeclAccessPair &item) {
+ return dyn_cast<FunctionDecl>(item.getDecl());
+ });
+}
+
+void Sema::EraseUnwantedCUDAMatches(
+ const FunctionDecl *Caller,
+ SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches){
+ EraseUnwantedCUDAMatchesImpl<std::pair<DeclAccessPair, FunctionDecl *>>(
+ *this, Caller, Matches,
+ [](const std::pair<DeclAccessPair, FunctionDecl *> &item) {
+ return dyn_cast<FunctionDecl>(item.second);
+ });
+}
+
+/// When an implicitly-declared special member has to invoke more than one
+/// base/field special member, conflicts may occur in the targets of these
+/// members. For example, if one base's member __host__ and another's is
+/// __device__, it's a conflict.
+/// This function figures out if the given targets \param Target1 and
+/// \param Target2 conflict, and if they do not it fills in
+/// \param ResolvedTarget with a target that resolves for both calls.
+/// \return true if there's a conflict, false otherwise.
+static bool
+resolveCalleeCUDATargetConflict(Sema::CUDAFunctionTarget Target1,
+ Sema::CUDAFunctionTarget Target2,
+ Sema::CUDAFunctionTarget *ResolvedTarget) {
+ if (Target1 == Sema::CFT_Global && Target2 == Sema::CFT_Global) {
+ // TODO: this shouldn't happen, really. Methods cannot be marked __global__.
+ // Clang should detect this earlier and produce an error. Then this
+ // condition can be changed to an assertion.
+ return true;
+ }
+
+ if (Target1 == Sema::CFT_HostDevice) {
+ *ResolvedTarget = Target2;
+ } else if (Target2 == Sema::CFT_HostDevice) {
+ *ResolvedTarget = Target1;
+ } else if (Target1 != Target2) {
+ return true;
+ } else {
+ *ResolvedTarget = Target1;
+ }
+
+ return false;
+}
+
+bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
+ CXXSpecialMember CSM,
+ CXXMethodDecl *MemberDecl,
+ bool ConstRHS,
+ bool Diagnose) {
+ llvm::Optional<CUDAFunctionTarget> InferredTarget;
+
+ // We're going to invoke special member lookup; mark that these special
+ // members are called from this one, and not from its caller.
+ ContextRAII MethodContext(*this, MemberDecl);
+
+ // Look for special members in base classes that should be invoked from here.
+ // Infer the target of this member base on the ones it should call.
+ // Skip direct and indirect virtual bases for abstract classes.
+ llvm::SmallVector<const CXXBaseSpecifier *, 16> Bases;
+ for (const auto &B : ClassDecl->bases()) {
+ if (!B.isVirtual()) {
+ Bases.push_back(&B);
+ }
+ }
+
+ if (!ClassDecl->isAbstract()) {
+ for (const auto &VB : ClassDecl->vbases()) {
+ Bases.push_back(&VB);
+ }
+ }
+
+ for (const auto *B : Bases) {
+ const RecordType *BaseType = B->getType()->getAs<RecordType>();
+ if (!BaseType) {
+ continue;
+ }
+
+ CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
+ Sema::SpecialMemberOverloadResult *SMOR =
+ LookupSpecialMember(BaseClassDecl, CSM,
+ /* ConstArg */ ConstRHS,
+ /* VolatileArg */ false,
+ /* RValueThis */ false,
+ /* ConstThis */ false,
+ /* VolatileThis */ false);
+
+ if (!SMOR || !SMOR->getMethod()) {
+ continue;
+ }
+
+ CUDAFunctionTarget BaseMethodTarget = IdentifyCUDATarget(SMOR->getMethod());
+ if (!InferredTarget.hasValue()) {
+ InferredTarget = BaseMethodTarget;
+ } else {
+ bool ResolutionError = resolveCalleeCUDATargetConflict(
+ InferredTarget.getValue(), BaseMethodTarget,
+ InferredTarget.getPointer());
+ if (ResolutionError) {
+ if (Diagnose) {
+ Diag(ClassDecl->getLocation(),
+ diag::note_implicit_member_target_infer_collision)
+ << (unsigned)CSM << InferredTarget.getValue() << BaseMethodTarget;
+ }
+ MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context));
+ return true;
+ }
+ }
+ }
+
+ // Same as for bases, but now for special members of fields.
+ for (const auto *F : ClassDecl->fields()) {
+ if (F->isInvalidDecl()) {
+ continue;
+ }
+
+ const RecordType *FieldType =
+ Context.getBaseElementType(F->getType())->getAs<RecordType>();
+ if (!FieldType) {
+ continue;
+ }
+
+ CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(FieldType->getDecl());
+ Sema::SpecialMemberOverloadResult *SMOR =
+ LookupSpecialMember(FieldRecDecl, CSM,
+ /* ConstArg */ ConstRHS && !F->isMutable(),
+ /* VolatileArg */ false,
+ /* RValueThis */ false,
+ /* ConstThis */ false,
+ /* VolatileThis */ false);
+
+ if (!SMOR || !SMOR->getMethod()) {
+ continue;
+ }
+
+ CUDAFunctionTarget FieldMethodTarget =
+ IdentifyCUDATarget(SMOR->getMethod());
+ if (!InferredTarget.hasValue()) {
+ InferredTarget = FieldMethodTarget;
+ } else {
+ bool ResolutionError = resolveCalleeCUDATargetConflict(
+ InferredTarget.getValue(), FieldMethodTarget,
+ InferredTarget.getPointer());
+ if (ResolutionError) {
+ if (Diagnose) {
+ Diag(ClassDecl->getLocation(),
+ diag::note_implicit_member_target_infer_collision)
+ << (unsigned)CSM << InferredTarget.getValue()
+ << FieldMethodTarget;
+ }
+ MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context));
+ return true;
+ }
+ }
+ }
+
+ if (InferredTarget.hasValue()) {
+ if (InferredTarget.getValue() == CFT_Device) {
+ MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context));
+ } else if (InferredTarget.getValue() == CFT_Host) {
+ MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context));
+ } else {
+ MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context));
+ MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context));
+ }
+ } else {
+ // If no target was inferred, mark this member as __host__ __device__;
+ // it's the least restrictive option that can be invoked from any target.
+ MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context));
+ MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context));
+ }
+
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp
new file mode 100644
index 0000000..f7aace6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp
@@ -0,0 +1,1062 @@
+//===--- SemaCXXScopeSpec.cpp - Semantic Analysis for C++ scope specifiers-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements C++ semantic analysis for scope specifiers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "TypeLocBuilder.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Template.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+/// \brief Find the current instantiation that associated with the given type.
+static CXXRecordDecl *getCurrentInstantiationOf(QualType T,
+ DeclContext *CurContext) {
+ if (T.isNull())
+ return nullptr;
+
+ const Type *Ty = T->getCanonicalTypeInternal().getTypePtr();
+ if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (!Record->isDependentContext() ||
+ Record->isCurrentInstantiation(CurContext))
+ return Record;
+
+ return nullptr;
+ } else if (isa<InjectedClassNameType>(Ty))
+ return cast<InjectedClassNameType>(Ty)->getDecl();
+ else
+ return nullptr;
+}
+
+/// \brief Compute the DeclContext that is associated with the given type.
+///
+/// \param T the type for which we are attempting to find a DeclContext.
+///
+/// \returns the declaration context represented by the type T,
+/// or NULL if the declaration context cannot be computed (e.g., because it is
+/// dependent and not the current instantiation).
+DeclContext *Sema::computeDeclContext(QualType T) {
+ if (!T->isDependentType())
+ if (const TagType *Tag = T->getAs<TagType>())
+ return Tag->getDecl();
+
+ return ::getCurrentInstantiationOf(T, CurContext);
+}
+
+/// \brief Compute the DeclContext that is associated with the given
+/// scope specifier.
+///
+/// \param SS the C++ scope specifier as it appears in the source
+///
+/// \param EnteringContext when true, we will be entering the context of
+/// this scope specifier, so we can retrieve the declaration context of a
+/// class template or class template partial specialization even if it is
+/// not the current instantiation.
+///
+/// \returns the declaration context represented by the scope specifier @p SS,
+/// or NULL if the declaration context cannot be computed (e.g., because it is
+/// dependent and not the current instantiation).
+DeclContext *Sema::computeDeclContext(const CXXScopeSpec &SS,
+ bool EnteringContext) {
+ if (!SS.isSet() || SS.isInvalid())
+ return nullptr;
+
+ NestedNameSpecifier *NNS = SS.getScopeRep();
+ if (NNS->isDependent()) {
+ // If this nested-name-specifier refers to the current
+ // instantiation, return its DeclContext.
+ if (CXXRecordDecl *Record = getCurrentInstantiationOf(NNS))
+ return Record;
+
+ if (EnteringContext) {
+ const Type *NNSType = NNS->getAsType();
+ if (!NNSType) {
+ return nullptr;
+ }
+
+ // Look through type alias templates, per C++0x [temp.dep.type]p1.
+ NNSType = Context.getCanonicalType(NNSType);
+ if (const TemplateSpecializationType *SpecType
+ = NNSType->getAs<TemplateSpecializationType>()) {
+ // We are entering the context of the nested name specifier, so try to
+ // match the nested name specifier to either a primary class template
+ // or a class template partial specialization.
+ if (ClassTemplateDecl *ClassTemplate
+ = dyn_cast_or_null<ClassTemplateDecl>(
+ SpecType->getTemplateName().getAsTemplateDecl())) {
+ QualType ContextType
+ = Context.getCanonicalType(QualType(SpecType, 0));
+
+ // If the type of the nested name specifier is the same as the
+ // injected class name of the named class template, we're entering
+ // into that class template definition.
+ QualType Injected
+ = ClassTemplate->getInjectedClassNameSpecialization();
+ if (Context.hasSameType(Injected, ContextType))
+ return ClassTemplate->getTemplatedDecl();
+
+ // If the type of the nested name specifier is the same as the
+ // type of one of the class template's class template partial
+ // specializations, we're entering into the definition of that
+ // class template partial specialization.
+ if (ClassTemplatePartialSpecializationDecl *PartialSpec
+ = ClassTemplate->findPartialSpecialization(ContextType))
+ return PartialSpec;
+ }
+ } else if (const RecordType *RecordT = NNSType->getAs<RecordType>()) {
+ // The nested name specifier refers to a member of a class template.
+ return RecordT->getDecl();
+ }
+ }
+
+ return nullptr;
+ }
+
+ switch (NNS->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ llvm_unreachable("Dependent nested-name-specifier has no DeclContext");
+
+ case NestedNameSpecifier::Namespace:
+ return NNS->getAsNamespace();
+
+ case NestedNameSpecifier::NamespaceAlias:
+ return NNS->getAsNamespaceAlias()->getNamespace();
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate: {
+ const TagType *Tag = NNS->getAsType()->getAs<TagType>();
+ assert(Tag && "Non-tag type in nested-name-specifier");
+ return Tag->getDecl();
+ }
+
+ case NestedNameSpecifier::Global:
+ return Context.getTranslationUnitDecl();
+
+ case NestedNameSpecifier::Super:
+ return NNS->getAsRecordDecl();
+ }
+
+ llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
+}
+
+bool Sema::isDependentScopeSpecifier(const CXXScopeSpec &SS) {
+ if (!SS.isSet() || SS.isInvalid())
+ return false;
+
+ return SS.getScopeRep()->isDependent();
+}
+
+/// \brief If the given nested name specifier refers to the current
+/// instantiation, return the declaration that corresponds to that
+/// current instantiation (C++0x [temp.dep.type]p1).
+///
+/// \param NNS a dependent nested name specifier.
+CXXRecordDecl *Sema::getCurrentInstantiationOf(NestedNameSpecifier *NNS) {
+ assert(getLangOpts().CPlusPlus && "Only callable in C++");
+ assert(NNS->isDependent() && "Only dependent nested-name-specifier allowed");
+
+ if (!NNS->getAsType())
+ return nullptr;
+
+ QualType T = QualType(NNS->getAsType(), 0);
+ return ::getCurrentInstantiationOf(T, CurContext);
+}
+
+/// \brief Require that the context specified by SS be complete.
+///
+/// If SS refers to a type, this routine checks whether the type is
+/// complete enough (or can be made complete enough) for name lookup
+/// into the DeclContext. A type that is not yet completed can be
+/// considered "complete enough" if it is a class/struct/union/enum
+/// that is currently being defined. Or, if we have a type that names
+/// a class template specialization that is not a complete type, we
+/// will attempt to instantiate that class template.
+bool Sema::RequireCompleteDeclContext(CXXScopeSpec &SS,
+ DeclContext *DC) {
+ assert(DC && "given null context");
+
+ TagDecl *tag = dyn_cast<TagDecl>(DC);
+
+ // If this is a dependent type, then we consider it complete.
+ if (!tag || tag->isDependentContext())
+ return false;
+
+ // If we're currently defining this type, then lookup into the
+ // type is okay: don't complain that it isn't complete yet.
+ QualType type = Context.getTypeDeclType(tag);
+ const TagType *tagType = type->getAs<TagType>();
+ if (tagType && tagType->isBeingDefined())
+ return false;
+
+ SourceLocation loc = SS.getLastQualifierNameLoc();
+ if (loc.isInvalid()) loc = SS.getRange().getBegin();
+
+ // The type must be complete.
+ if (RequireCompleteType(loc, type, diag::err_incomplete_nested_name_spec,
+ SS.getRange())) {
+ SS.SetInvalid(SS.getRange());
+ return true;
+ }
+
+ // Fixed enum types are complete, but they aren't valid as scopes
+ // until we see a definition, so awkwardly pull out this special
+ // case.
+ // FIXME: The definition might not be visible; complain if it is not.
+ const EnumType *enumType = dyn_cast_or_null<EnumType>(tagType);
+ if (!enumType || enumType->getDecl()->isCompleteDefinition())
+ return false;
+
+ // Try to instantiate the definition, if this is a specialization of an
+ // enumeration temploid.
+ EnumDecl *ED = enumType->getDecl();
+ if (EnumDecl *Pattern = ED->getInstantiatedFromMemberEnum()) {
+ MemberSpecializationInfo *MSI = ED->getMemberSpecializationInfo();
+ if (MSI->getTemplateSpecializationKind() != TSK_ExplicitSpecialization) {
+ if (InstantiateEnum(loc, ED, Pattern, getTemplateInstantiationArgs(ED),
+ TSK_ImplicitInstantiation)) {
+ SS.SetInvalid(SS.getRange());
+ return true;
+ }
+ return false;
+ }
+ }
+
+ Diag(loc, diag::err_incomplete_nested_name_spec)
+ << type << SS.getRange();
+ SS.SetInvalid(SS.getRange());
+ return true;
+}
+
+bool Sema::ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc,
+ CXXScopeSpec &SS) {
+ SS.MakeGlobal(Context, CCLoc);
+ return false;
+}
+
+bool Sema::ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
+ SourceLocation ColonColonLoc,
+ CXXScopeSpec &SS) {
+ CXXRecordDecl *RD = nullptr;
+ for (Scope *S = getCurScope(); S; S = S->getParent()) {
+ if (S->isFunctionScope()) {
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(S->getEntity()))
+ RD = MD->getParent();
+ break;
+ }
+ if (S->isClassScope()) {
+ RD = cast<CXXRecordDecl>(S->getEntity());
+ break;
+ }
+ }
+
+ if (!RD) {
+ Diag(SuperLoc, diag::err_invalid_super_scope);
+ return true;
+ } else if (RD->isLambda()) {
+ Diag(SuperLoc, diag::err_super_in_lambda_unsupported);
+ return true;
+ } else if (RD->getNumBases() == 0) {
+ Diag(SuperLoc, diag::err_no_base_classes) << RD->getName();
+ return true;
+ }
+
+ SS.MakeSuper(Context, RD, SuperLoc, ColonColonLoc);
+ return false;
+}
+
+/// \brief Determines whether the given declaration is an valid acceptable
+/// result for name lookup of a nested-name-specifier.
+/// \param SD Declaration checked for nested-name-specifier.
+/// \param IsExtension If not null and the declaration is accepted as an
+/// extension, the pointed variable is assigned true.
+bool Sema::isAcceptableNestedNameSpecifier(const NamedDecl *SD,
+ bool *IsExtension) {
+ if (!SD)
+ return false;
+
+ SD = SD->getUnderlyingDecl();
+
+ // Namespace and namespace aliases are fine.
+ if (isa<NamespaceDecl>(SD))
+ return true;
+
+ if (!isa<TypeDecl>(SD))
+ return false;
+
+ // Determine whether we have a class (or, in C++11, an enum) or
+ // a typedef thereof. If so, build the nested-name-specifier.
+ QualType T = Context.getTypeDeclType(cast<TypeDecl>(SD));
+ if (T->isDependentType())
+ return true;
+ if (const TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(SD)) {
+ if (TD->getUnderlyingType()->isRecordType())
+ return true;
+ if (TD->getUnderlyingType()->isEnumeralType()) {
+ if (Context.getLangOpts().CPlusPlus11)
+ return true;
+ if (IsExtension)
+ *IsExtension = true;
+ }
+ } else if (isa<RecordDecl>(SD)) {
+ return true;
+ } else if (isa<EnumDecl>(SD)) {
+ if (Context.getLangOpts().CPlusPlus11)
+ return true;
+ if (IsExtension)
+ *IsExtension = true;
+ }
+
+ return false;
+}
+
+/// \brief If the given nested-name-specifier begins with a bare identifier
+/// (e.g., Base::), perform name lookup for that identifier as a
+/// nested-name-specifier within the given scope, and return the result of that
+/// name lookup.
+NamedDecl *Sema::FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS) {
+ if (!S || !NNS)
+ return nullptr;
+
+ while (NNS->getPrefix())
+ NNS = NNS->getPrefix();
+
+ if (NNS->getKind() != NestedNameSpecifier::Identifier)
+ return nullptr;
+
+ LookupResult Found(*this, NNS->getAsIdentifier(), SourceLocation(),
+ LookupNestedNameSpecifierName);
+ LookupName(Found, S);
+ assert(!Found.isAmbiguous() && "Cannot handle ambiguities here yet");
+
+ if (!Found.isSingleResult())
+ return nullptr;
+
+ NamedDecl *Result = Found.getFoundDecl();
+ if (isAcceptableNestedNameSpecifier(Result))
+ return Result;
+
+ return nullptr;
+}
+
+bool Sema::isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
+ SourceLocation IdLoc,
+ IdentifierInfo &II,
+ ParsedType ObjectTypePtr) {
+ QualType ObjectType = GetTypeFromParser(ObjectTypePtr);
+ LookupResult Found(*this, &II, IdLoc, LookupNestedNameSpecifierName);
+
+ // Determine where to perform name lookup
+ DeclContext *LookupCtx = nullptr;
+ bool isDependent = false;
+ if (!ObjectType.isNull()) {
+ // This nested-name-specifier occurs in a member access expression, e.g.,
+ // x->B::f, and we are looking into the type of the object.
+ assert(!SS.isSet() && "ObjectType and scope specifier cannot coexist");
+ LookupCtx = computeDeclContext(ObjectType);
+ isDependent = ObjectType->isDependentType();
+ } else if (SS.isSet()) {
+ // This nested-name-specifier occurs after another nested-name-specifier,
+ // so long into the context associated with the prior nested-name-specifier.
+ LookupCtx = computeDeclContext(SS, false);
+ isDependent = isDependentScopeSpecifier(SS);
+ Found.setContextRange(SS.getRange());
+ }
+
+ if (LookupCtx) {
+ // Perform "qualified" name lookup into the declaration context we
+ // computed, which is either the type of the base of a member access
+ // expression or the declaration context associated with a prior
+ // nested-name-specifier.
+
+ // The declaration context must be complete.
+ if (!LookupCtx->isDependentContext() &&
+ RequireCompleteDeclContext(SS, LookupCtx))
+ return false;
+
+ LookupQualifiedName(Found, LookupCtx);
+ } else if (isDependent) {
+ return false;
+ } else {
+ LookupName(Found, S);
+ }
+ Found.suppressDiagnostics();
+
+ return Found.getAsSingle<NamespaceDecl>();
+}
+
+namespace {
+
+// Callback to only accept typo corrections that can be a valid C++ member
+// intializer: either a non-static field member or a base class.
+class NestedNameSpecifierValidatorCCC : public CorrectionCandidateCallback {
+ public:
+ explicit NestedNameSpecifierValidatorCCC(Sema &SRef)
+ : SRef(SRef) {}
+
+ bool ValidateCandidate(const TypoCorrection &candidate) override {
+ return SRef.isAcceptableNestedNameSpecifier(candidate.getCorrectionDecl());
+ }
+
+ private:
+ Sema &SRef;
+};
+
+}
+
+/// \brief Build a new nested-name-specifier for "identifier::", as described
+/// by ActOnCXXNestedNameSpecifier.
+///
+/// \param S Scope in which the nested-name-specifier occurs.
+/// \param Identifier Identifier in the sequence "identifier" "::".
+/// \param IdentifierLoc Location of the \p Identifier.
+/// \param CCLoc Location of "::" following Identifier.
+/// \param ObjectType Type of postfix expression if the nested-name-specifier
+/// occurs in construct like: <tt>ptr->nns::f</tt>.
+/// \param EnteringContext If true, enter the context specified by the
+/// nested-name-specifier.
+/// \param SS Optional nested name specifier preceding the identifier.
+/// \param ScopeLookupResult Provides the result of name lookup within the
+/// scope of the nested-name-specifier that was computed at template
+/// definition time.
+/// \param ErrorRecoveryLookup Specifies if the method is called to improve
+/// error recovery and what kind of recovery is performed.
+/// \param IsCorrectedToColon If not null, suggestion of replace '::' -> ':'
+/// are allowed. The bool value pointed by this parameter is set to
+/// 'true' if the identifier is treated as if it was followed by ':',
+/// not '::'.
+///
+/// This routine differs only slightly from ActOnCXXNestedNameSpecifier, in
+/// that it contains an extra parameter \p ScopeLookupResult, which provides
+/// the result of name lookup within the scope of the nested-name-specifier
+/// that was computed at template definition time.
+///
+/// If ErrorRecoveryLookup is true, then this call is used to improve error
+/// recovery. This means that it should not emit diagnostics, it should
+/// just return true on failure. It also means it should only return a valid
+/// scope if it *knows* that the result is correct. It should not return in a
+/// dependent context, for example. Nor will it extend \p SS with the scope
+/// specifier.
+bool Sema::BuildCXXNestedNameSpecifier(Scope *S,
+ IdentifierInfo &Identifier,
+ SourceLocation IdentifierLoc,
+ SourceLocation CCLoc,
+ QualType ObjectType,
+ bool EnteringContext,
+ CXXScopeSpec &SS,
+ NamedDecl *ScopeLookupResult,
+ bool ErrorRecoveryLookup,
+ bool *IsCorrectedToColon) {
+ LookupResult Found(*this, &Identifier, IdentifierLoc,
+ LookupNestedNameSpecifierName);
+
+ // Determine where to perform name lookup
+ DeclContext *LookupCtx = nullptr;
+ bool isDependent = false;
+ if (IsCorrectedToColon)
+ *IsCorrectedToColon = false;
+ if (!ObjectType.isNull()) {
+ // This nested-name-specifier occurs in a member access expression, e.g.,
+ // x->B::f, and we are looking into the type of the object.
+ assert(!SS.isSet() && "ObjectType and scope specifier cannot coexist");
+ LookupCtx = computeDeclContext(ObjectType);
+ isDependent = ObjectType->isDependentType();
+ } else if (SS.isSet()) {
+ // This nested-name-specifier occurs after another nested-name-specifier,
+ // so look into the context associated with the prior nested-name-specifier.
+ LookupCtx = computeDeclContext(SS, EnteringContext);
+ isDependent = isDependentScopeSpecifier(SS);
+ Found.setContextRange(SS.getRange());
+ }
+
+ bool ObjectTypeSearchedInScope = false;
+ if (LookupCtx) {
+ // Perform "qualified" name lookup into the declaration context we
+ // computed, which is either the type of the base of a member access
+ // expression or the declaration context associated with a prior
+ // nested-name-specifier.
+
+ // The declaration context must be complete.
+ if (!LookupCtx->isDependentContext() &&
+ RequireCompleteDeclContext(SS, LookupCtx))
+ return true;
+
+ LookupQualifiedName(Found, LookupCtx);
+
+ if (!ObjectType.isNull() && Found.empty()) {
+ // C++ [basic.lookup.classref]p4:
+ // If the id-expression in a class member access is a qualified-id of
+ // the form
+ //
+ // class-name-or-namespace-name::...
+ //
+ // the class-name-or-namespace-name following the . or -> operator is
+ // looked up both in the context of the entire postfix-expression and in
+ // the scope of the class of the object expression. If the name is found
+ // only in the scope of the class of the object expression, the name
+ // shall refer to a class-name. If the name is found only in the
+ // context of the entire postfix-expression, the name shall refer to a
+ // class-name or namespace-name. [...]
+ //
+ // Qualified name lookup into a class will not find a namespace-name,
+ // so we do not need to diagnose that case specifically. However,
+ // this qualified name lookup may find nothing. In that case, perform
+ // unqualified name lookup in the given scope (if available) or
+ // reconstruct the result from when name lookup was performed at template
+ // definition time.
+ if (S)
+ LookupName(Found, S);
+ else if (ScopeLookupResult)
+ Found.addDecl(ScopeLookupResult);
+
+ ObjectTypeSearchedInScope = true;
+ }
+ } else if (!isDependent) {
+ // Perform unqualified name lookup in the current scope.
+ LookupName(Found, S);
+ }
+
+ if (Found.isAmbiguous())
+ return true;
+
+ // If we performed lookup into a dependent context and did not find anything,
+ // that's fine: just build a dependent nested-name-specifier.
+ if (Found.empty() && isDependent &&
+ !(LookupCtx && LookupCtx->isRecord() &&
+ (!cast<CXXRecordDecl>(LookupCtx)->hasDefinition() ||
+ !cast<CXXRecordDecl>(LookupCtx)->hasAnyDependentBases()))) {
+ // Don't speculate if we're just trying to improve error recovery.
+ if (ErrorRecoveryLookup)
+ return true;
+
+ // We were not able to compute the declaration context for a dependent
+ // base object type or prior nested-name-specifier, so this
+ // nested-name-specifier refers to an unknown specialization. Just build
+ // a dependent nested-name-specifier.
+ SS.Extend(Context, &Identifier, IdentifierLoc, CCLoc);
+ return false;
+ }
+
+ if (Found.empty() && !ErrorRecoveryLookup) {
+ // If identifier is not found as class-name-or-namespace-name, but is found
+ // as other entity, don't look for typos.
+ LookupResult R(*this, Found.getLookupNameInfo(), LookupOrdinaryName);
+ if (LookupCtx)
+ LookupQualifiedName(R, LookupCtx);
+ else if (S && !isDependent)
+ LookupName(R, S);
+ if (!R.empty()) {
+ // Don't diagnose problems with this speculative lookup.
+ R.suppressDiagnostics();
+ // The identifier is found in ordinary lookup. If correction to colon is
+ // allowed, suggest replacement to ':'.
+ if (IsCorrectedToColon) {
+ *IsCorrectedToColon = true;
+ Diag(CCLoc, diag::err_nested_name_spec_is_not_class)
+ << &Identifier << getLangOpts().CPlusPlus
+ << FixItHint::CreateReplacement(CCLoc, ":");
+ if (NamedDecl *ND = R.getAsSingle<NamedDecl>())
+ Diag(ND->getLocation(), diag::note_declared_at);
+ return true;
+ }
+ // Replacement '::' -> ':' is not allowed, just issue respective error.
+ Diag(R.getNameLoc(), diag::err_expected_class_or_namespace)
+ << &Identifier << getLangOpts().CPlusPlus;
+ if (NamedDecl *ND = R.getAsSingle<NamedDecl>())
+ Diag(ND->getLocation(), diag::note_entity_declared_at) << &Identifier;
+ return true;
+ }
+ }
+
+ if (Found.empty() && !ErrorRecoveryLookup && !getLangOpts().MSVCCompat) {
+ // We haven't found anything, and we're not recovering from a
+ // different kind of error, so look for typos.
+ DeclarationName Name = Found.getLookupName();
+ Found.clear();
+ if (TypoCorrection Corrected = CorrectTypo(
+ Found.getLookupNameInfo(), Found.getLookupKind(), S, &SS,
+ llvm::make_unique<NestedNameSpecifierValidatorCCC>(*this),
+ CTK_ErrorRecovery, LookupCtx, EnteringContext)) {
+ if (LookupCtx) {
+ bool DroppedSpecifier =
+ Corrected.WillReplaceSpecifier() &&
+ Name.getAsString() == Corrected.getAsString(getLangOpts());
+ if (DroppedSpecifier)
+ SS.clear();
+ diagnoseTypo(Corrected, PDiag(diag::err_no_member_suggest)
+ << Name << LookupCtx << DroppedSpecifier
+ << SS.getRange());
+ } else
+ diagnoseTypo(Corrected, PDiag(diag::err_undeclared_var_use_suggest)
+ << Name);
+
+ if (NamedDecl *ND = Corrected.getFoundDecl())
+ Found.addDecl(ND);
+ Found.setLookupName(Corrected.getCorrection());
+ } else {
+ Found.setLookupName(&Identifier);
+ }
+ }
+
+ NamedDecl *SD =
+ Found.isSingleResult() ? Found.getRepresentativeDecl() : nullptr;
+ bool IsExtension = false;
+ bool AcceptSpec = isAcceptableNestedNameSpecifier(SD, &IsExtension);
+ if (!AcceptSpec && IsExtension) {
+ AcceptSpec = true;
+ Diag(IdentifierLoc, diag::ext_nested_name_spec_is_enum);
+ }
+ if (AcceptSpec) {
+ if (!ObjectType.isNull() && !ObjectTypeSearchedInScope &&
+ !getLangOpts().CPlusPlus11) {
+ // C++03 [basic.lookup.classref]p4:
+ // [...] If the name is found in both contexts, the
+ // class-name-or-namespace-name shall refer to the same entity.
+ //
+ // We already found the name in the scope of the object. Now, look
+ // into the current scope (the scope of the postfix-expression) to
+ // see if we can find the same name there. As above, if there is no
+ // scope, reconstruct the result from the template instantiation itself.
+ //
+ // Note that C++11 does *not* perform this redundant lookup.
+ NamedDecl *OuterDecl;
+ if (S) {
+ LookupResult FoundOuter(*this, &Identifier, IdentifierLoc,
+ LookupNestedNameSpecifierName);
+ LookupName(FoundOuter, S);
+ OuterDecl = FoundOuter.getAsSingle<NamedDecl>();
+ } else
+ OuterDecl = ScopeLookupResult;
+
+ if (isAcceptableNestedNameSpecifier(OuterDecl) &&
+ OuterDecl->getCanonicalDecl() != SD->getCanonicalDecl() &&
+ (!isa<TypeDecl>(OuterDecl) || !isa<TypeDecl>(SD) ||
+ !Context.hasSameType(
+ Context.getTypeDeclType(cast<TypeDecl>(OuterDecl)),
+ Context.getTypeDeclType(cast<TypeDecl>(SD))))) {
+ if (ErrorRecoveryLookup)
+ return true;
+
+ Diag(IdentifierLoc,
+ diag::err_nested_name_member_ref_lookup_ambiguous)
+ << &Identifier;
+ Diag(SD->getLocation(), diag::note_ambig_member_ref_object_type)
+ << ObjectType;
+ Diag(OuterDecl->getLocation(), diag::note_ambig_member_ref_scope);
+
+ // Fall through so that we'll pick the name we found in the object
+ // type, since that's probably what the user wanted anyway.
+ }
+ }
+
+ if (auto *TD = dyn_cast_or_null<TypedefNameDecl>(SD))
+ MarkAnyDeclReferenced(TD->getLocation(), TD, /*OdrUse=*/false);
+
+ // If we're just performing this lookup for error-recovery purposes,
+ // don't extend the nested-name-specifier. Just return now.
+ if (ErrorRecoveryLookup)
+ return false;
+
+ // The use of a nested name specifier may trigger deprecation warnings.
+ DiagnoseUseOfDecl(SD, CCLoc);
+
+
+ if (NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(SD)) {
+ SS.Extend(Context, Namespace, IdentifierLoc, CCLoc);
+ return false;
+ }
+
+ if (NamespaceAliasDecl *Alias = dyn_cast<NamespaceAliasDecl>(SD)) {
+ SS.Extend(Context, Alias, IdentifierLoc, CCLoc);
+ return false;
+ }
+
+ QualType T =
+ Context.getTypeDeclType(cast<TypeDecl>(SD->getUnderlyingDecl()));
+ TypeLocBuilder TLB;
+ if (isa<InjectedClassNameType>(T)) {
+ InjectedClassNameTypeLoc InjectedTL
+ = TLB.push<InjectedClassNameTypeLoc>(T);
+ InjectedTL.setNameLoc(IdentifierLoc);
+ } else if (isa<RecordType>(T)) {
+ RecordTypeLoc RecordTL = TLB.push<RecordTypeLoc>(T);
+ RecordTL.setNameLoc(IdentifierLoc);
+ } else if (isa<TypedefType>(T)) {
+ TypedefTypeLoc TypedefTL = TLB.push<TypedefTypeLoc>(T);
+ TypedefTL.setNameLoc(IdentifierLoc);
+ } else if (isa<EnumType>(T)) {
+ EnumTypeLoc EnumTL = TLB.push<EnumTypeLoc>(T);
+ EnumTL.setNameLoc(IdentifierLoc);
+ } else if (isa<TemplateTypeParmType>(T)) {
+ TemplateTypeParmTypeLoc TemplateTypeTL
+ = TLB.push<TemplateTypeParmTypeLoc>(T);
+ TemplateTypeTL.setNameLoc(IdentifierLoc);
+ } else if (isa<UnresolvedUsingType>(T)) {
+ UnresolvedUsingTypeLoc UnresolvedTL
+ = TLB.push<UnresolvedUsingTypeLoc>(T);
+ UnresolvedTL.setNameLoc(IdentifierLoc);
+ } else if (isa<SubstTemplateTypeParmType>(T)) {
+ SubstTemplateTypeParmTypeLoc TL
+ = TLB.push<SubstTemplateTypeParmTypeLoc>(T);
+ TL.setNameLoc(IdentifierLoc);
+ } else if (isa<SubstTemplateTypeParmPackType>(T)) {
+ SubstTemplateTypeParmPackTypeLoc TL
+ = TLB.push<SubstTemplateTypeParmPackTypeLoc>(T);
+ TL.setNameLoc(IdentifierLoc);
+ } else {
+ llvm_unreachable("Unhandled TypeDecl node in nested-name-specifier");
+ }
+
+ if (T->isEnumeralType())
+ Diag(IdentifierLoc, diag::warn_cxx98_compat_enum_nested_name_spec);
+
+ SS.Extend(Context, SourceLocation(), TLB.getTypeLocInContext(Context, T),
+ CCLoc);
+ return false;
+ }
+
+ // Otherwise, we have an error case. If we don't want diagnostics, just
+ // return an error now.
+ if (ErrorRecoveryLookup)
+ return true;
+
+ // If we didn't find anything during our lookup, try again with
+ // ordinary name lookup, which can help us produce better error
+ // messages.
+ if (Found.empty()) {
+ Found.clear(LookupOrdinaryName);
+ LookupName(Found, S);
+ }
+
+ // In Microsoft mode, if we are within a templated function and we can't
+ // resolve Identifier, then extend the SS with Identifier. This will have
+ // the effect of resolving Identifier during template instantiation.
+ // The goal is to be able to resolve a function call whose
+ // nested-name-specifier is located inside a dependent base class.
+ // Example:
+ //
+ // class C {
+ // public:
+ // static void foo2() { }
+ // };
+ // template <class T> class A { public: typedef C D; };
+ //
+ // template <class T> class B : public A<T> {
+ // public:
+ // void foo() { D::foo2(); }
+ // };
+ if (getLangOpts().MSVCCompat) {
+ DeclContext *DC = LookupCtx ? LookupCtx : CurContext;
+ if (DC->isDependentContext() && DC->isFunctionOrMethod()) {
+ CXXRecordDecl *ContainingClass = dyn_cast<CXXRecordDecl>(DC->getParent());
+ if (ContainingClass && ContainingClass->hasAnyDependentBases()) {
+ Diag(IdentifierLoc, diag::ext_undeclared_unqual_id_with_dependent_base)
+ << &Identifier << ContainingClass;
+ SS.Extend(Context, &Identifier, IdentifierLoc, CCLoc);
+ return false;
+ }
+ }
+ }
+
+ if (!Found.empty()) {
+ if (TypeDecl *TD = Found.getAsSingle<TypeDecl>())
+ Diag(IdentifierLoc, diag::err_expected_class_or_namespace)
+ << QualType(TD->getTypeForDecl(), 0) << getLangOpts().CPlusPlus;
+ else {
+ Diag(IdentifierLoc, diag::err_expected_class_or_namespace)
+ << &Identifier << getLangOpts().CPlusPlus;
+ if (NamedDecl *ND = Found.getAsSingle<NamedDecl>())
+ Diag(ND->getLocation(), diag::note_entity_declared_at) << &Identifier;
+ }
+ } else if (SS.isSet())
+ Diag(IdentifierLoc, diag::err_no_member) << &Identifier << LookupCtx
+ << SS.getRange();
+ else
+ Diag(IdentifierLoc, diag::err_undeclared_var_use) << &Identifier;
+
+ return true;
+}
+
+bool Sema::ActOnCXXNestedNameSpecifier(Scope *S,
+ IdentifierInfo &Identifier,
+ SourceLocation IdentifierLoc,
+ SourceLocation CCLoc,
+ ParsedType ObjectType,
+ bool EnteringContext,
+ CXXScopeSpec &SS,
+ bool ErrorRecoveryLookup,
+ bool *IsCorrectedToColon) {
+ if (SS.isInvalid())
+ return true;
+
+ return BuildCXXNestedNameSpecifier(S, Identifier, IdentifierLoc, CCLoc,
+ GetTypeFromParser(ObjectType),
+ EnteringContext, SS,
+ /*ScopeLookupResult=*/nullptr, false,
+ IsCorrectedToColon);
+}
+
+bool Sema::ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
+ const DeclSpec &DS,
+ SourceLocation ColonColonLoc) {
+ if (SS.isInvalid() || DS.getTypeSpecType() == DeclSpec::TST_error)
+ return true;
+
+ assert(DS.getTypeSpecType() == DeclSpec::TST_decltype);
+
+ QualType T = BuildDecltypeType(DS.getRepAsExpr(), DS.getTypeSpecTypeLoc());
+ if (!T->isDependentType() && !T->getAs<TagType>()) {
+ Diag(DS.getTypeSpecTypeLoc(), diag::err_expected_class_or_namespace)
+ << T << getLangOpts().CPlusPlus;
+ return true;
+ }
+
+ TypeLocBuilder TLB;
+ DecltypeTypeLoc DecltypeTL = TLB.push<DecltypeTypeLoc>(T);
+ DecltypeTL.setNameLoc(DS.getTypeSpecTypeLoc());
+ SS.Extend(Context, SourceLocation(), TLB.getTypeLocInContext(Context, T),
+ ColonColonLoc);
+ return false;
+}
+
+/// IsInvalidUnlessNestedName - This method is used for error recovery
+/// purposes to determine whether the specified identifier is only valid as
+/// a nested name specifier, for example a namespace name. It is
+/// conservatively correct to always return false from this method.
+///
+/// The arguments are the same as those passed to ActOnCXXNestedNameSpecifier.
+bool Sema::IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
+ IdentifierInfo &Identifier,
+ SourceLocation IdentifierLoc,
+ SourceLocation ColonLoc,
+ ParsedType ObjectType,
+ bool EnteringContext) {
+ if (SS.isInvalid())
+ return false;
+
+ return !BuildCXXNestedNameSpecifier(S, Identifier, IdentifierLoc, ColonLoc,
+ GetTypeFromParser(ObjectType),
+ EnteringContext, SS,
+ /*ScopeLookupResult=*/nullptr, true);
+}
+
+bool Sema::ActOnCXXNestedNameSpecifier(Scope *S,
+ CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ TemplateTy Template,
+ SourceLocation TemplateNameLoc,
+ SourceLocation LAngleLoc,
+ ASTTemplateArgsPtr TemplateArgsIn,
+ SourceLocation RAngleLoc,
+ SourceLocation CCLoc,
+ bool EnteringContext) {
+ if (SS.isInvalid())
+ return true;
+
+ // Translate the parser's template argument list in our AST format.
+ TemplateArgumentListInfo TemplateArgs(LAngleLoc, RAngleLoc);
+ translateTemplateArguments(TemplateArgsIn, TemplateArgs);
+
+ DependentTemplateName *DTN = Template.get().getAsDependentTemplateName();
+ if (DTN && DTN->isIdentifier()) {
+ // Handle a dependent template specialization for which we cannot resolve
+ // the template name.
+ assert(DTN->getQualifier() == SS.getScopeRep());
+ QualType T = Context.getDependentTemplateSpecializationType(ETK_None,
+ DTN->getQualifier(),
+ DTN->getIdentifier(),
+ TemplateArgs);
+
+ // Create source-location information for this type.
+ TypeLocBuilder Builder;
+ DependentTemplateSpecializationTypeLoc SpecTL
+ = Builder.push<DependentTemplateSpecializationTypeLoc>(T);
+ SpecTL.setElaboratedKeywordLoc(SourceLocation());
+ SpecTL.setQualifierLoc(SS.getWithLocInContext(Context));
+ SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
+ SpecTL.setTemplateNameLoc(TemplateNameLoc);
+ SpecTL.setLAngleLoc(LAngleLoc);
+ SpecTL.setRAngleLoc(RAngleLoc);
+ for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
+ SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
+
+ SS.Extend(Context, TemplateKWLoc, Builder.getTypeLocInContext(Context, T),
+ CCLoc);
+ return false;
+ }
+
+ TemplateDecl *TD = Template.get().getAsTemplateDecl();
+ if (Template.get().getAsOverloadedTemplate() || DTN ||
+ isa<FunctionTemplateDecl>(TD) || isa<VarTemplateDecl>(TD)) {
+ SourceRange R(TemplateNameLoc, RAngleLoc);
+ if (SS.getRange().isValid())
+ R.setBegin(SS.getRange().getBegin());
+
+ Diag(CCLoc, diag::err_non_type_template_in_nested_name_specifier)
+ << (TD && isa<VarTemplateDecl>(TD)) << Template.get() << R;
+ NoteAllFoundTemplates(Template.get());
+ return true;
+ }
+
+ // We were able to resolve the template name to an actual template.
+ // Build an appropriate nested-name-specifier.
+ QualType T = CheckTemplateIdType(Template.get(), TemplateNameLoc,
+ TemplateArgs);
+ if (T.isNull())
+ return true;
+
+ // Alias template specializations can produce types which are not valid
+ // nested name specifiers.
+ if (!T->isDependentType() && !T->getAs<TagType>()) {
+ Diag(TemplateNameLoc, diag::err_nested_name_spec_non_tag) << T;
+ NoteAllFoundTemplates(Template.get());
+ return true;
+ }
+
+ // Provide source-location information for the template specialization type.
+ TypeLocBuilder Builder;
+ TemplateSpecializationTypeLoc SpecTL
+ = Builder.push<TemplateSpecializationTypeLoc>(T);
+ SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
+ SpecTL.setTemplateNameLoc(TemplateNameLoc);
+ SpecTL.setLAngleLoc(LAngleLoc);
+ SpecTL.setRAngleLoc(RAngleLoc);
+ for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
+ SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
+
+
+ SS.Extend(Context, TemplateKWLoc, Builder.getTypeLocInContext(Context, T),
+ CCLoc);
+ return false;
+}
+
+namespace {
+ /// \brief A structure that stores a nested-name-specifier annotation,
+ /// including both the nested-name-specifier
+ struct NestedNameSpecifierAnnotation {
+ NestedNameSpecifier *NNS;
+ };
+}
+
+void *Sema::SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS) {
+ if (SS.isEmpty() || SS.isInvalid())
+ return nullptr;
+
+ void *Mem = Context.Allocate((sizeof(NestedNameSpecifierAnnotation) +
+ SS.location_size()),
+ llvm::alignOf<NestedNameSpecifierAnnotation>());
+ NestedNameSpecifierAnnotation *Annotation
+ = new (Mem) NestedNameSpecifierAnnotation;
+ Annotation->NNS = SS.getScopeRep();
+ memcpy(Annotation + 1, SS.location_data(), SS.location_size());
+ return Annotation;
+}
+
+void Sema::RestoreNestedNameSpecifierAnnotation(void *AnnotationPtr,
+ SourceRange AnnotationRange,
+ CXXScopeSpec &SS) {
+ if (!AnnotationPtr) {
+ SS.SetInvalid(AnnotationRange);
+ return;
+ }
+
+ NestedNameSpecifierAnnotation *Annotation
+ = static_cast<NestedNameSpecifierAnnotation *>(AnnotationPtr);
+ SS.Adopt(NestedNameSpecifierLoc(Annotation->NNS, Annotation + 1));
+}
+
+bool Sema::ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS) {
+ assert(SS.isSet() && "Parser passed invalid CXXScopeSpec.");
+
+ NestedNameSpecifier *Qualifier = SS.getScopeRep();
+
+ // There are only two places a well-formed program may qualify a
+ // declarator: first, when defining a namespace or class member
+ // out-of-line, and second, when naming an explicitly-qualified
+ // friend function. The latter case is governed by
+ // C++03 [basic.lookup.unqual]p10:
+ // In a friend declaration naming a member function, a name used
+ // in the function declarator and not part of a template-argument
+ // in a template-id is first looked up in the scope of the member
+ // function's class. If it is not found, or if the name is part of
+ // a template-argument in a template-id, the look up is as
+ // described for unqualified names in the definition of the class
+ // granting friendship.
+ // i.e. we don't push a scope unless it's a class member.
+
+ switch (Qualifier->getKind()) {
+ case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::NamespaceAlias:
+ // These are always namespace scopes. We never want to enter a
+ // namespace scope from anything but a file context.
+ return CurContext->getRedeclContext()->isFileContext();
+
+ case NestedNameSpecifier::Identifier:
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ case NestedNameSpecifier::Super:
+ // These are never namespace scopes.
+ return true;
+ }
+
+ llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
+}
+
+/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
+/// scope or nested-name-specifier) is parsed, part of a declarator-id.
+/// After this method is called, according to [C++ 3.4.3p3], names should be
+/// looked up in the declarator-id's scope, until the declarator is parsed and
+/// ActOnCXXExitDeclaratorScope is called.
+/// The 'SS' should be a non-empty valid CXXScopeSpec.
+bool Sema::ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS) {
+ assert(SS.isSet() && "Parser passed invalid CXXScopeSpec.");
+
+ if (SS.isInvalid()) return true;
+
+ DeclContext *DC = computeDeclContext(SS, true);
+ if (!DC) return true;
+
+ // Before we enter a declarator's context, we need to make sure that
+ // it is a complete declaration context.
+ if (!DC->isDependentContext() && RequireCompleteDeclContext(SS, DC))
+ return true;
+
+ EnterDeclaratorContext(S, DC);
+
+ // Rebuild the nested name specifier for the new scope.
+ if (DC->isDependentContext())
+ RebuildNestedNameSpecifierInCurrentInstantiation(SS);
+
+ return false;
+}
+
+/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
+/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
+/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
+/// Used to indicate that names should revert to being looked up in the
+/// defining scope.
+void Sema::ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS) {
+ assert(SS.isSet() && "Parser passed invalid CXXScopeSpec.");
+ if (SS.isInvalid())
+ return;
+ assert(!SS.isInvalid() && computeDeclContext(SS, true) &&
+ "exiting declarator scope we never really entered");
+ ExitDeclaratorContext(S);
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCast.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCast.cpp
new file mode 100644
index 0000000..07b0589
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaCast.cpp
@@ -0,0 +1,2506 @@
+//===--- SemaCast.cpp - Semantic Analysis for Casts -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for cast expressions, including
+// 1) C-style casts like '(int) x'
+// 2) C++ functional casts like 'int(x)'
+// 3) C++ named casts like 'static_cast<int>(x)'
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/Initialization.h"
+#include "llvm/ADT/SmallVector.h"
+#include <set>
+using namespace clang;
+
+
+
+enum TryCastResult {
+ TC_NotApplicable, ///< The cast method is not applicable.
+ TC_Success, ///< The cast method is appropriate and successful.
+ TC_Failed ///< The cast method is appropriate, but failed. A
+ ///< diagnostic has been emitted.
+};
+
+enum CastType {
+ CT_Const, ///< const_cast
+ CT_Static, ///< static_cast
+ CT_Reinterpret, ///< reinterpret_cast
+ CT_Dynamic, ///< dynamic_cast
+ CT_CStyle, ///< (Type)expr
+ CT_Functional ///< Type(expr)
+};
+
+namespace {
+ struct CastOperation {
+ CastOperation(Sema &S, QualType destType, ExprResult src)
+ : Self(S), SrcExpr(src), DestType(destType),
+ ResultType(destType.getNonLValueExprType(S.Context)),
+ ValueKind(Expr::getValueKindForType(destType)),
+ Kind(CK_Dependent), IsARCUnbridgedCast(false) {
+
+ if (const BuiltinType *placeholder =
+ src.get()->getType()->getAsPlaceholderType()) {
+ PlaceholderKind = placeholder->getKind();
+ } else {
+ PlaceholderKind = (BuiltinType::Kind) 0;
+ }
+ }
+
+ Sema &Self;
+ ExprResult SrcExpr;
+ QualType DestType;
+ QualType ResultType;
+ ExprValueKind ValueKind;
+ CastKind Kind;
+ BuiltinType::Kind PlaceholderKind;
+ CXXCastPath BasePath;
+ bool IsARCUnbridgedCast;
+
+ SourceRange OpRange;
+ SourceRange DestRange;
+
+ // Top-level semantics-checking routines.
+ void CheckConstCast();
+ void CheckReinterpretCast();
+ void CheckStaticCast();
+ void CheckDynamicCast();
+ void CheckCXXCStyleCast(bool FunctionalCast, bool ListInitialization);
+ void CheckCStyleCast();
+
+ /// Complete an apparently-successful cast operation that yields
+ /// the given expression.
+ ExprResult complete(CastExpr *castExpr) {
+ // If this is an unbridged cast, wrap the result in an implicit
+ // cast that yields the unbridged-cast placeholder type.
+ if (IsARCUnbridgedCast) {
+ castExpr = ImplicitCastExpr::Create(Self.Context,
+ Self.Context.ARCUnbridgedCastTy,
+ CK_Dependent, castExpr, nullptr,
+ castExpr->getValueKind());
+ }
+ return castExpr;
+ }
+
+ // Internal convenience methods.
+
+ /// Try to handle the given placeholder expression kind. Return
+ /// true if the source expression has the appropriate placeholder
+ /// kind. A placeholder can only be claimed once.
+ bool claimPlaceholder(BuiltinType::Kind K) {
+ if (PlaceholderKind != K) return false;
+
+ PlaceholderKind = (BuiltinType::Kind) 0;
+ return true;
+ }
+
+ bool isPlaceholder() const {
+ return PlaceholderKind != 0;
+ }
+ bool isPlaceholder(BuiltinType::Kind K) const {
+ return PlaceholderKind == K;
+ }
+
+ void checkCastAlign() {
+ Self.CheckCastAlign(SrcExpr.get(), DestType, OpRange);
+ }
+
+ void checkObjCARCConversion(Sema::CheckedConversionKind CCK) {
+ assert(Self.getLangOpts().ObjCAutoRefCount);
+
+ Expr *src = SrcExpr.get();
+ if (Self.CheckObjCARCConversion(OpRange, DestType, src, CCK) ==
+ Sema::ACR_unbridged)
+ IsARCUnbridgedCast = true;
+ SrcExpr = src;
+ }
+
+ /// Check for and handle non-overload placeholder expressions.
+ void checkNonOverloadPlaceholders() {
+ if (!isPlaceholder() || isPlaceholder(BuiltinType::Overload))
+ return;
+
+ SrcExpr = Self.CheckPlaceholderExpr(SrcExpr.get());
+ if (SrcExpr.isInvalid())
+ return;
+ PlaceholderKind = (BuiltinType::Kind) 0;
+ }
+ };
+}
+
+// The Try functions attempt a specific way of casting. If they succeed, they
+// return TC_Success. If their way of casting is not appropriate for the given
+// arguments, they return TC_NotApplicable and *may* set diag to a diagnostic
+// to emit if no other way succeeds. If their way of casting is appropriate but
+// fails, they return TC_Failed and *must* set diag; they can set it to 0 if
+// they emit a specialized diagnostic.
+// All diagnostics returned by these functions must expect the same three
+// arguments:
+// %0: Cast Type (a value from the CastType enumeration)
+// %1: Source Type
+// %2: Destination Type
+static TryCastResult TryLValueToRValueCast(Sema &Self, Expr *SrcExpr,
+ QualType DestType, bool CStyle,
+ CastKind &Kind,
+ CXXCastPath &BasePath,
+ unsigned &msg);
+static TryCastResult TryStaticReferenceDowncast(Sema &Self, Expr *SrcExpr,
+ QualType DestType, bool CStyle,
+ SourceRange OpRange,
+ unsigned &msg,
+ CastKind &Kind,
+ CXXCastPath &BasePath);
+static TryCastResult TryStaticPointerDowncast(Sema &Self, QualType SrcType,
+ QualType DestType, bool CStyle,
+ SourceRange OpRange,
+ unsigned &msg,
+ CastKind &Kind,
+ CXXCastPath &BasePath);
+static TryCastResult TryStaticDowncast(Sema &Self, CanQualType SrcType,
+ CanQualType DestType, bool CStyle,
+ SourceRange OpRange,
+ QualType OrigSrcType,
+ QualType OrigDestType, unsigned &msg,
+ CastKind &Kind,
+ CXXCastPath &BasePath);
+static TryCastResult TryStaticMemberPointerUpcast(Sema &Self, ExprResult &SrcExpr,
+ QualType SrcType,
+ QualType DestType,bool CStyle,
+ SourceRange OpRange,
+ unsigned &msg,
+ CastKind &Kind,
+ CXXCastPath &BasePath);
+
+static TryCastResult TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr,
+ QualType DestType,
+ Sema::CheckedConversionKind CCK,
+ SourceRange OpRange,
+ unsigned &msg, CastKind &Kind,
+ bool ListInitialization);
+static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
+ QualType DestType,
+ Sema::CheckedConversionKind CCK,
+ SourceRange OpRange,
+ unsigned &msg, CastKind &Kind,
+ CXXCastPath &BasePath,
+ bool ListInitialization);
+static TryCastResult TryConstCast(Sema &Self, ExprResult &SrcExpr,
+ QualType DestType, bool CStyle,
+ unsigned &msg);
+static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
+ QualType DestType, bool CStyle,
+ SourceRange OpRange,
+ unsigned &msg,
+ CastKind &Kind);
+
+
+/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
+ExprResult
+Sema::ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
+ SourceLocation LAngleBracketLoc, Declarator &D,
+ SourceLocation RAngleBracketLoc,
+ SourceLocation LParenLoc, Expr *E,
+ SourceLocation RParenLoc) {
+
+ assert(!D.isInvalidType());
+
+ TypeSourceInfo *TInfo = GetTypeForDeclaratorCast(D, E->getType());
+ if (D.isInvalidType())
+ return ExprError();
+
+ if (getLangOpts().CPlusPlus) {
+ // Check that there are no default arguments (C++ only).
+ CheckExtraCXXDefaultArguments(D);
+ }
+
+ return BuildCXXNamedCast(OpLoc, Kind, TInfo, E,
+ SourceRange(LAngleBracketLoc, RAngleBracketLoc),
+ SourceRange(LParenLoc, RParenLoc));
+}
+
+ExprResult
+Sema::BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
+ TypeSourceInfo *DestTInfo, Expr *E,
+ SourceRange AngleBrackets, SourceRange Parens) {
+ ExprResult Ex = E;
+ QualType DestType = DestTInfo->getType();
+
+ // If the type is dependent, we won't do the semantic analysis now.
+ bool TypeDependent =
+ DestType->isDependentType() || Ex.get()->isTypeDependent();
+
+ CastOperation Op(*this, DestType, E);
+ Op.OpRange = SourceRange(OpLoc, Parens.getEnd());
+ Op.DestRange = AngleBrackets;
+
+ switch (Kind) {
+ default: llvm_unreachable("Unknown C++ cast!");
+
+ case tok::kw_const_cast:
+ if (!TypeDependent) {
+ Op.CheckConstCast();
+ if (Op.SrcExpr.isInvalid())
+ return ExprError();
+ }
+ return Op.complete(CXXConstCastExpr::Create(Context, Op.ResultType,
+ Op.ValueKind, Op.SrcExpr.get(), DestTInfo,
+ OpLoc, Parens.getEnd(),
+ AngleBrackets));
+
+ case tok::kw_dynamic_cast: {
+ if (!TypeDependent) {
+ Op.CheckDynamicCast();
+ if (Op.SrcExpr.isInvalid())
+ return ExprError();
+ }
+ return Op.complete(CXXDynamicCastExpr::Create(Context, Op.ResultType,
+ Op.ValueKind, Op.Kind, Op.SrcExpr.get(),
+ &Op.BasePath, DestTInfo,
+ OpLoc, Parens.getEnd(),
+ AngleBrackets));
+ }
+ case tok::kw_reinterpret_cast: {
+ if (!TypeDependent) {
+ Op.CheckReinterpretCast();
+ if (Op.SrcExpr.isInvalid())
+ return ExprError();
+ }
+ return Op.complete(CXXReinterpretCastExpr::Create(Context, Op.ResultType,
+ Op.ValueKind, Op.Kind, Op.SrcExpr.get(),
+ nullptr, DestTInfo, OpLoc,
+ Parens.getEnd(),
+ AngleBrackets));
+ }
+ case tok::kw_static_cast: {
+ if (!TypeDependent) {
+ Op.CheckStaticCast();
+ if (Op.SrcExpr.isInvalid())
+ return ExprError();
+ }
+
+ return Op.complete(CXXStaticCastExpr::Create(Context, Op.ResultType,
+ Op.ValueKind, Op.Kind, Op.SrcExpr.get(),
+ &Op.BasePath, DestTInfo,
+ OpLoc, Parens.getEnd(),
+ AngleBrackets));
+ }
+ }
+}
+
+/// Try to diagnose a failed overloaded cast. Returns true if
+/// diagnostics were emitted.
+static bool tryDiagnoseOverloadedCast(Sema &S, CastType CT,
+ SourceRange range, Expr *src,
+ QualType destType,
+ bool listInitialization) {
+ switch (CT) {
+ // These cast kinds don't consider user-defined conversions.
+ case CT_Const:
+ case CT_Reinterpret:
+ case CT_Dynamic:
+ return false;
+
+ // These do.
+ case CT_Static:
+ case CT_CStyle:
+ case CT_Functional:
+ break;
+ }
+
+ QualType srcType = src->getType();
+ if (!destType->isRecordType() && !srcType->isRecordType())
+ return false;
+
+ InitializedEntity entity = InitializedEntity::InitializeTemporary(destType);
+ InitializationKind initKind
+ = (CT == CT_CStyle)? InitializationKind::CreateCStyleCast(range.getBegin(),
+ range, listInitialization)
+ : (CT == CT_Functional)? InitializationKind::CreateFunctionalCast(range,
+ listInitialization)
+ : InitializationKind::CreateCast(/*type range?*/ range);
+ InitializationSequence sequence(S, entity, initKind, src);
+
+ assert(sequence.Failed() && "initialization succeeded on second try?");
+ switch (sequence.getFailureKind()) {
+ default: return false;
+
+ case InitializationSequence::FK_ConstructorOverloadFailed:
+ case InitializationSequence::FK_UserConversionOverloadFailed:
+ break;
+ }
+
+ OverloadCandidateSet &candidates = sequence.getFailedCandidateSet();
+
+ unsigned msg = 0;
+ OverloadCandidateDisplayKind howManyCandidates = OCD_AllCandidates;
+
+ switch (sequence.getFailedOverloadResult()) {
+ case OR_Success: llvm_unreachable("successful failed overload");
+ case OR_No_Viable_Function:
+ if (candidates.empty())
+ msg = diag::err_ovl_no_conversion_in_cast;
+ else
+ msg = diag::err_ovl_no_viable_conversion_in_cast;
+ howManyCandidates = OCD_AllCandidates;
+ break;
+
+ case OR_Ambiguous:
+ msg = diag::err_ovl_ambiguous_conversion_in_cast;
+ howManyCandidates = OCD_ViableCandidates;
+ break;
+
+ case OR_Deleted:
+ msg = diag::err_ovl_deleted_conversion_in_cast;
+ howManyCandidates = OCD_ViableCandidates;
+ break;
+ }
+
+ S.Diag(range.getBegin(), msg)
+ << CT << srcType << destType
+ << range << src->getSourceRange();
+
+ candidates.NoteCandidates(S, howManyCandidates, src);
+
+ return true;
+}
+
+/// Diagnose a failed cast.
+static void diagnoseBadCast(Sema &S, unsigned msg, CastType castType,
+ SourceRange opRange, Expr *src, QualType destType,
+ bool listInitialization) {
+ if (msg == diag::err_bad_cxx_cast_generic &&
+ tryDiagnoseOverloadedCast(S, castType, opRange, src, destType,
+ listInitialization))
+ return;
+
+ S.Diag(opRange.getBegin(), msg) << castType
+ << src->getType() << destType << opRange << src->getSourceRange();
+
+ // Detect if both types are (ptr to) class, and note any incompleteness.
+ int DifferentPtrness = 0;
+ QualType From = destType;
+ if (auto Ptr = From->getAs<PointerType>()) {
+ From = Ptr->getPointeeType();
+ DifferentPtrness++;
+ }
+ QualType To = src->getType();
+ if (auto Ptr = To->getAs<PointerType>()) {
+ To = Ptr->getPointeeType();
+ DifferentPtrness--;
+ }
+ if (!DifferentPtrness) {
+ auto RecFrom = From->getAs<RecordType>();
+ auto RecTo = To->getAs<RecordType>();
+ if (RecFrom && RecTo) {
+ auto DeclFrom = RecFrom->getAsCXXRecordDecl();
+ if (!DeclFrom->isCompleteDefinition())
+ S.Diag(DeclFrom->getLocation(), diag::note_type_incomplete)
+ << DeclFrom->getDeclName();
+ auto DeclTo = RecTo->getAsCXXRecordDecl();
+ if (!DeclTo->isCompleteDefinition())
+ S.Diag(DeclTo->getLocation(), diag::note_type_incomplete)
+ << DeclTo->getDeclName();
+ }
+ }
+}
+
+/// UnwrapDissimilarPointerTypes - Like Sema::UnwrapSimilarPointerTypes,
+/// this removes one level of indirection from both types, provided that they're
+/// the same kind of pointer (plain or to-member). Unlike the Sema function,
+/// this one doesn't care if the two pointers-to-member don't point into the
+/// same class. This is because CastsAwayConstness doesn't care.
+static bool UnwrapDissimilarPointerTypes(QualType& T1, QualType& T2) {
+ const PointerType *T1PtrType = T1->getAs<PointerType>(),
+ *T2PtrType = T2->getAs<PointerType>();
+ if (T1PtrType && T2PtrType) {
+ T1 = T1PtrType->getPointeeType();
+ T2 = T2PtrType->getPointeeType();
+ return true;
+ }
+ const ObjCObjectPointerType *T1ObjCPtrType =
+ T1->getAs<ObjCObjectPointerType>(),
+ *T2ObjCPtrType =
+ T2->getAs<ObjCObjectPointerType>();
+ if (T1ObjCPtrType) {
+ if (T2ObjCPtrType) {
+ T1 = T1ObjCPtrType->getPointeeType();
+ T2 = T2ObjCPtrType->getPointeeType();
+ return true;
+ }
+ else if (T2PtrType) {
+ T1 = T1ObjCPtrType->getPointeeType();
+ T2 = T2PtrType->getPointeeType();
+ return true;
+ }
+ }
+ else if (T2ObjCPtrType) {
+ if (T1PtrType) {
+ T2 = T2ObjCPtrType->getPointeeType();
+ T1 = T1PtrType->getPointeeType();
+ return true;
+ }
+ }
+
+ const MemberPointerType *T1MPType = T1->getAs<MemberPointerType>(),
+ *T2MPType = T2->getAs<MemberPointerType>();
+ if (T1MPType && T2MPType) {
+ T1 = T1MPType->getPointeeType();
+ T2 = T2MPType->getPointeeType();
+ return true;
+ }
+
+ const BlockPointerType *T1BPType = T1->getAs<BlockPointerType>(),
+ *T2BPType = T2->getAs<BlockPointerType>();
+ if (T1BPType && T2BPType) {
+ T1 = T1BPType->getPointeeType();
+ T2 = T2BPType->getPointeeType();
+ return true;
+ }
+
+ return false;
+}
+
+/// CastsAwayConstness - Check if the pointer conversion from SrcType to
+/// DestType casts away constness as defined in C++ 5.2.11p8ff. This is used by
+/// the cast checkers. Both arguments must denote pointer (possibly to member)
+/// types.
+///
+/// \param CheckCVR Whether to check for const/volatile/restrict qualifiers.
+///
+/// \param CheckObjCLifetime Whether to check Objective-C lifetime qualifiers.
+static bool
+CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType,
+ bool CheckCVR, bool CheckObjCLifetime,
+ QualType *TheOffendingSrcType = nullptr,
+ QualType *TheOffendingDestType = nullptr,
+ Qualifiers *CastAwayQualifiers = nullptr) {
+ // If the only checking we care about is for Objective-C lifetime qualifiers,
+ // and we're not in ObjC mode, there's nothing to check.
+ if (!CheckCVR && CheckObjCLifetime &&
+ !Self.Context.getLangOpts().ObjC1)
+ return false;
+
+ // Casting away constness is defined in C++ 5.2.11p8 with reference to
+ // C++ 4.4. We piggyback on Sema::IsQualificationConversion for this, since
+ // the rules are non-trivial. So first we construct Tcv *...cv* as described
+ // in C++ 5.2.11p8.
+ assert((SrcType->isAnyPointerType() || SrcType->isMemberPointerType() ||
+ SrcType->isBlockPointerType()) &&
+ "Source type is not pointer or pointer to member.");
+ assert((DestType->isAnyPointerType() || DestType->isMemberPointerType() ||
+ DestType->isBlockPointerType()) &&
+ "Destination type is not pointer or pointer to member.");
+
+ QualType UnwrappedSrcType = Self.Context.getCanonicalType(SrcType),
+ UnwrappedDestType = Self.Context.getCanonicalType(DestType);
+ SmallVector<Qualifiers, 8> cv1, cv2;
+
+ // Find the qualifiers. We only care about cvr-qualifiers for the
+ // purpose of this check, because other qualifiers (address spaces,
+ // Objective-C GC, etc.) are part of the type's identity.
+ QualType PrevUnwrappedSrcType = UnwrappedSrcType;
+ QualType PrevUnwrappedDestType = UnwrappedDestType;
+ while (UnwrapDissimilarPointerTypes(UnwrappedSrcType, UnwrappedDestType)) {
+ // Determine the relevant qualifiers at this level.
+ Qualifiers SrcQuals, DestQuals;
+ Self.Context.getUnqualifiedArrayType(UnwrappedSrcType, SrcQuals);
+ Self.Context.getUnqualifiedArrayType(UnwrappedDestType, DestQuals);
+
+ Qualifiers RetainedSrcQuals, RetainedDestQuals;
+ if (CheckCVR) {
+ RetainedSrcQuals.setCVRQualifiers(SrcQuals.getCVRQualifiers());
+ RetainedDestQuals.setCVRQualifiers(DestQuals.getCVRQualifiers());
+
+ if (RetainedSrcQuals != RetainedDestQuals && TheOffendingSrcType &&
+ TheOffendingDestType && CastAwayQualifiers) {
+ *TheOffendingSrcType = PrevUnwrappedSrcType;
+ *TheOffendingDestType = PrevUnwrappedDestType;
+ *CastAwayQualifiers = RetainedSrcQuals - RetainedDestQuals;
+ }
+ }
+
+ if (CheckObjCLifetime &&
+ !DestQuals.compatiblyIncludesObjCLifetime(SrcQuals))
+ return true;
+
+ cv1.push_back(RetainedSrcQuals);
+ cv2.push_back(RetainedDestQuals);
+
+ PrevUnwrappedSrcType = UnwrappedSrcType;
+ PrevUnwrappedDestType = UnwrappedDestType;
+ }
+ if (cv1.empty())
+ return false;
+
+ // Construct void pointers with those qualifiers (in reverse order of
+ // unwrapping, of course).
+ QualType SrcConstruct = Self.Context.VoidTy;
+ QualType DestConstruct = Self.Context.VoidTy;
+ ASTContext &Context = Self.Context;
+ for (SmallVectorImpl<Qualifiers>::reverse_iterator i1 = cv1.rbegin(),
+ i2 = cv2.rbegin();
+ i1 != cv1.rend(); ++i1, ++i2) {
+ SrcConstruct
+ = Context.getPointerType(Context.getQualifiedType(SrcConstruct, *i1));
+ DestConstruct
+ = Context.getPointerType(Context.getQualifiedType(DestConstruct, *i2));
+ }
+
+ // Test if they're compatible.
+ bool ObjCLifetimeConversion;
+ return SrcConstruct != DestConstruct &&
+ !Self.IsQualificationConversion(SrcConstruct, DestConstruct, false,
+ ObjCLifetimeConversion);
+}
+
+/// CheckDynamicCast - Check that a dynamic_cast\<DestType\>(SrcExpr) is valid.
+/// Refer to C++ 5.2.7 for details. Dynamic casts are used mostly for runtime-
+/// checked downcasts in class hierarchies.
+void CastOperation::CheckDynamicCast() {
+ if (ValueKind == VK_RValue)
+ SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.get());
+ else if (isPlaceholder())
+ SrcExpr = Self.CheckPlaceholderExpr(SrcExpr.get());
+ if (SrcExpr.isInvalid()) // if conversion failed, don't report another error
+ return;
+
+ QualType OrigSrcType = SrcExpr.get()->getType();
+ QualType DestType = Self.Context.getCanonicalType(this->DestType);
+
+ // C++ 5.2.7p1: T shall be a pointer or reference to a complete class type,
+ // or "pointer to cv void".
+
+ QualType DestPointee;
+ const PointerType *DestPointer = DestType->getAs<PointerType>();
+ const ReferenceType *DestReference = nullptr;
+ if (DestPointer) {
+ DestPointee = DestPointer->getPointeeType();
+ } else if ((DestReference = DestType->getAs<ReferenceType>())) {
+ DestPointee = DestReference->getPointeeType();
+ } else {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_dynamic_cast_not_ref_or_ptr)
+ << this->DestType << DestRange;
+ SrcExpr = ExprError();
+ return;
+ }
+
+ const RecordType *DestRecord = DestPointee->getAs<RecordType>();
+ if (DestPointee->isVoidType()) {
+ assert(DestPointer && "Reference to void is not possible");
+ } else if (DestRecord) {
+ if (Self.RequireCompleteType(OpRange.getBegin(), DestPointee,
+ diag::err_bad_dynamic_cast_incomplete,
+ DestRange)) {
+ SrcExpr = ExprError();
+ return;
+ }
+ } else {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_dynamic_cast_not_class)
+ << DestPointee.getUnqualifiedType() << DestRange;
+ SrcExpr = ExprError();
+ return;
+ }
+
+ // C++0x 5.2.7p2: If T is a pointer type, v shall be an rvalue of a pointer to
+ // complete class type, [...]. If T is an lvalue reference type, v shall be
+ // an lvalue of a complete class type, [...]. If T is an rvalue reference
+ // type, v shall be an expression having a complete class type, [...]
+ QualType SrcType = Self.Context.getCanonicalType(OrigSrcType);
+ QualType SrcPointee;
+ if (DestPointer) {
+ if (const PointerType *SrcPointer = SrcType->getAs<PointerType>()) {
+ SrcPointee = SrcPointer->getPointeeType();
+ } else {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_dynamic_cast_not_ptr)
+ << OrigSrcType << SrcExpr.get()->getSourceRange();
+ SrcExpr = ExprError();
+ return;
+ }
+ } else if (DestReference->isLValueReferenceType()) {
+ if (!SrcExpr.get()->isLValue()) {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_rvalue)
+ << CT_Dynamic << OrigSrcType << this->DestType << OpRange;
+ }
+ SrcPointee = SrcType;
+ } else {
+ // If we're dynamic_casting from a prvalue to an rvalue reference, we need
+ // to materialize the prvalue before we bind the reference to it.
+ if (SrcExpr.get()->isRValue())
+ SrcExpr = new (Self.Context) MaterializeTemporaryExpr(
+ SrcType, SrcExpr.get(), /*IsLValueReference*/false);
+ SrcPointee = SrcType;
+ }
+
+ const RecordType *SrcRecord = SrcPointee->getAs<RecordType>();
+ if (SrcRecord) {
+ if (Self.RequireCompleteType(OpRange.getBegin(), SrcPointee,
+ diag::err_bad_dynamic_cast_incomplete,
+ SrcExpr.get())) {
+ SrcExpr = ExprError();
+ return;
+ }
+ } else {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_dynamic_cast_not_class)
+ << SrcPointee.getUnqualifiedType() << SrcExpr.get()->getSourceRange();
+ SrcExpr = ExprError();
+ return;
+ }
+
+ assert((DestPointer || DestReference) &&
+ "Bad destination non-ptr/ref slipped through.");
+ assert((DestRecord || DestPointee->isVoidType()) &&
+ "Bad destination pointee slipped through.");
+ assert(SrcRecord && "Bad source pointee slipped through.");
+
+ // C++ 5.2.7p1: The dynamic_cast operator shall not cast away constness.
+ if (!DestPointee.isAtLeastAsQualifiedAs(SrcPointee)) {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_qualifiers_away)
+ << CT_Dynamic << OrigSrcType << this->DestType << OpRange;
+ SrcExpr = ExprError();
+ return;
+ }
+
+ // C++ 5.2.7p3: If the type of v is the same as the required result type,
+ // [except for cv].
+ if (DestRecord == SrcRecord) {
+ Kind = CK_NoOp;
+ return;
+ }
+
+ // C++ 5.2.7p5
+ // Upcasts are resolved statically.
+ if (DestRecord &&
+ Self.IsDerivedFrom(OpRange.getBegin(), SrcPointee, DestPointee)) {
+ if (Self.CheckDerivedToBaseConversion(SrcPointee, DestPointee,
+ OpRange.getBegin(), OpRange,
+ &BasePath)) {
+ SrcExpr = ExprError();
+ return;
+ }
+
+ Kind = CK_DerivedToBase;
+ return;
+ }
+
+ // C++ 5.2.7p6: Otherwise, v shall be [polymorphic].
+ const RecordDecl *SrcDecl = SrcRecord->getDecl()->getDefinition();
+ assert(SrcDecl && "Definition missing");
+ if (!cast<CXXRecordDecl>(SrcDecl)->isPolymorphic()) {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_dynamic_cast_not_polymorphic)
+ << SrcPointee.getUnqualifiedType() << SrcExpr.get()->getSourceRange();
+ SrcExpr = ExprError();
+ }
+
+ // dynamic_cast is not available with -fno-rtti.
+ // As an exception, dynamic_cast to void* is available because it doesn't
+ // use RTTI.
+ if (!Self.getLangOpts().RTTI && !DestPointee->isVoidType()) {
+ Self.Diag(OpRange.getBegin(), diag::err_no_dynamic_cast_with_fno_rtti);
+ SrcExpr = ExprError();
+ return;
+ }
+
+ // Done. Everything else is run-time checks.
+ Kind = CK_Dynamic;
+}
+
+/// CheckConstCast - Check that a const_cast\<DestType\>(SrcExpr) is valid.
+/// Refer to C++ 5.2.11 for details. const_cast is typically used in code
+/// like this:
+/// const char *str = "literal";
+/// legacy_function(const_cast\<char*\>(str));
+void CastOperation::CheckConstCast() {
+ if (ValueKind == VK_RValue)
+ SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.get());
+ else if (isPlaceholder())
+ SrcExpr = Self.CheckPlaceholderExpr(SrcExpr.get());
+ if (SrcExpr.isInvalid()) // if conversion failed, don't report another error
+ return;
+
+ unsigned msg = diag::err_bad_cxx_cast_generic;
+ if (TryConstCast(Self, SrcExpr, DestType, /*CStyle*/false, msg) != TC_Success
+ && msg != 0) {
+ Self.Diag(OpRange.getBegin(), msg) << CT_Const
+ << SrcExpr.get()->getType() << DestType << OpRange;
+ SrcExpr = ExprError();
+ }
+}
+
+/// Check that a reinterpret_cast\<DestType\>(SrcExpr) is not used as upcast
+/// or downcast between respective pointers or references.
+static void DiagnoseReinterpretUpDownCast(Sema &Self, const Expr *SrcExpr,
+ QualType DestType,
+ SourceRange OpRange) {
+ QualType SrcType = SrcExpr->getType();
+ // When casting from pointer or reference, get pointee type; use original
+ // type otherwise.
+ const CXXRecordDecl *SrcPointeeRD = SrcType->getPointeeCXXRecordDecl();
+ const CXXRecordDecl *SrcRD =
+ SrcPointeeRD ? SrcPointeeRD : SrcType->getAsCXXRecordDecl();
+
+ // Examining subobjects for records is only possible if the complete and
+ // valid definition is available. Also, template instantiation is not
+ // allowed here.
+ if (!SrcRD || !SrcRD->isCompleteDefinition() || SrcRD->isInvalidDecl())
+ return;
+
+ const CXXRecordDecl *DestRD = DestType->getPointeeCXXRecordDecl();
+
+ if (!DestRD || !DestRD->isCompleteDefinition() || DestRD->isInvalidDecl())
+ return;
+
+ enum {
+ ReinterpretUpcast,
+ ReinterpretDowncast
+ } ReinterpretKind;
+
+ CXXBasePaths BasePaths;
+
+ if (SrcRD->isDerivedFrom(DestRD, BasePaths))
+ ReinterpretKind = ReinterpretUpcast;
+ else if (DestRD->isDerivedFrom(SrcRD, BasePaths))
+ ReinterpretKind = ReinterpretDowncast;
+ else
+ return;
+
+ bool VirtualBase = true;
+ bool NonZeroOffset = false;
+ for (CXXBasePaths::const_paths_iterator I = BasePaths.begin(),
+ E = BasePaths.end();
+ I != E; ++I) {
+ const CXXBasePath &Path = *I;
+ CharUnits Offset = CharUnits::Zero();
+ bool IsVirtual = false;
+ for (CXXBasePath::const_iterator IElem = Path.begin(), EElem = Path.end();
+ IElem != EElem; ++IElem) {
+ IsVirtual = IElem->Base->isVirtual();
+ if (IsVirtual)
+ break;
+ const CXXRecordDecl *BaseRD = IElem->Base->getType()->getAsCXXRecordDecl();
+ assert(BaseRD && "Base type should be a valid unqualified class type");
+ // Don't check if any base has invalid declaration or has no definition
+ // since it has no layout info.
+ const CXXRecordDecl *Class = IElem->Class,
+ *ClassDefinition = Class->getDefinition();
+ if (Class->isInvalidDecl() || !ClassDefinition ||
+ !ClassDefinition->isCompleteDefinition())
+ return;
+
+ const ASTRecordLayout &DerivedLayout =
+ Self.Context.getASTRecordLayout(Class);
+ Offset += DerivedLayout.getBaseClassOffset(BaseRD);
+ }
+ if (!IsVirtual) {
+ // Don't warn if any path is a non-virtually derived base at offset zero.
+ if (Offset.isZero())
+ return;
+ // Offset makes sense only for non-virtual bases.
+ else
+ NonZeroOffset = true;
+ }
+ VirtualBase = VirtualBase && IsVirtual;
+ }
+
+ (void) NonZeroOffset; // Silence set but not used warning.
+ assert((VirtualBase || NonZeroOffset) &&
+ "Should have returned if has non-virtual base with zero offset");
+
+ QualType BaseType =
+ ReinterpretKind == ReinterpretUpcast? DestType : SrcType;
+ QualType DerivedType =
+ ReinterpretKind == ReinterpretUpcast? SrcType : DestType;
+
+ SourceLocation BeginLoc = OpRange.getBegin();
+ Self.Diag(BeginLoc, diag::warn_reinterpret_different_from_static)
+ << DerivedType << BaseType << !VirtualBase << int(ReinterpretKind)
+ << OpRange;
+ Self.Diag(BeginLoc, diag::note_reinterpret_updowncast_use_static)
+ << int(ReinterpretKind)
+ << FixItHint::CreateReplacement(BeginLoc, "static_cast");
+}
+
+/// CheckReinterpretCast - Check that a reinterpret_cast\<DestType\>(SrcExpr) is
+/// valid.
+/// Refer to C++ 5.2.10 for details. reinterpret_cast is typically used in code
+/// like this:
+/// char *bytes = reinterpret_cast\<char*\>(int_ptr);
+void CastOperation::CheckReinterpretCast() {
+ if (ValueKind == VK_RValue && !isPlaceholder(BuiltinType::Overload))
+ SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.get());
+ else
+ checkNonOverloadPlaceholders();
+ if (SrcExpr.isInvalid()) // if conversion failed, don't report another error
+ return;
+
+ unsigned msg = diag::err_bad_cxx_cast_generic;
+ TryCastResult tcr =
+ TryReinterpretCast(Self, SrcExpr, DestType,
+ /*CStyle*/false, OpRange, msg, Kind);
+ if (tcr != TC_Success && msg != 0)
+ {
+ if (SrcExpr.isInvalid()) // if conversion failed, don't report another error
+ return;
+ if (SrcExpr.get()->getType() == Self.Context.OverloadTy) {
+ //FIXME: &f<int>; is overloaded and resolvable
+ Self.Diag(OpRange.getBegin(), diag::err_bad_reinterpret_cast_overload)
+ << OverloadExpr::find(SrcExpr.get()).Expression->getName()
+ << DestType << OpRange;
+ Self.NoteAllOverloadCandidates(SrcExpr.get());
+
+ } else {
+ diagnoseBadCast(Self, msg, CT_Reinterpret, OpRange, SrcExpr.get(),
+ DestType, /*listInitialization=*/false);
+ }
+ SrcExpr = ExprError();
+ } else if (tcr == TC_Success) {
+ if (Self.getLangOpts().ObjCAutoRefCount)
+ checkObjCARCConversion(Sema::CCK_OtherCast);
+ DiagnoseReinterpretUpDownCast(Self, SrcExpr.get(), DestType, OpRange);
+ }
+}
+
+
+/// CheckStaticCast - Check that a static_cast\<DestType\>(SrcExpr) is valid.
+/// Refer to C++ 5.2.9 for details. Static casts are mostly used for making
+/// implicit conversions explicit and getting rid of data loss warnings.
+void CastOperation::CheckStaticCast() {
+ if (isPlaceholder()) {
+ checkNonOverloadPlaceholders();
+ if (SrcExpr.isInvalid())
+ return;
+ }
+
+ // This test is outside everything else because it's the only case where
+ // a non-lvalue-reference target type does not lead to decay.
+ // C++ 5.2.9p4: Any expression can be explicitly converted to type "cv void".
+ if (DestType->isVoidType()) {
+ Kind = CK_ToVoid;
+
+ if (claimPlaceholder(BuiltinType::Overload)) {
+ Self.ResolveAndFixSingleFunctionTemplateSpecialization(SrcExpr,
+ false, // Decay Function to ptr
+ true, // Complain
+ OpRange, DestType, diag::err_bad_static_cast_overload);
+ if (SrcExpr.isInvalid())
+ return;
+ }
+
+ SrcExpr = Self.IgnoredValueConversions(SrcExpr.get());
+ return;
+ }
+
+ if (ValueKind == VK_RValue && !DestType->isRecordType() &&
+ !isPlaceholder(BuiltinType::Overload)) {
+ SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.get());
+ if (SrcExpr.isInvalid()) // if conversion failed, don't report another error
+ return;
+ }
+
+ unsigned msg = diag::err_bad_cxx_cast_generic;
+ TryCastResult tcr
+ = TryStaticCast(Self, SrcExpr, DestType, Sema::CCK_OtherCast, OpRange, msg,
+ Kind, BasePath, /*ListInitialization=*/false);
+ if (tcr != TC_Success && msg != 0) {
+ if (SrcExpr.isInvalid())
+ return;
+ if (SrcExpr.get()->getType() == Self.Context.OverloadTy) {
+ OverloadExpr* oe = OverloadExpr::find(SrcExpr.get()).Expression;
+ Self.Diag(OpRange.getBegin(), diag::err_bad_static_cast_overload)
+ << oe->getName() << DestType << OpRange
+ << oe->getQualifierLoc().getSourceRange();
+ Self.NoteAllOverloadCandidates(SrcExpr.get());
+ } else {
+ diagnoseBadCast(Self, msg, CT_Static, OpRange, SrcExpr.get(), DestType,
+ /*listInitialization=*/false);
+ }
+ SrcExpr = ExprError();
+ } else if (tcr == TC_Success) {
+ if (Kind == CK_BitCast)
+ checkCastAlign();
+ if (Self.getLangOpts().ObjCAutoRefCount)
+ checkObjCARCConversion(Sema::CCK_OtherCast);
+ } else if (Kind == CK_BitCast) {
+ checkCastAlign();
+ }
+}
+
+/// TryStaticCast - Check if a static cast can be performed, and do so if
+/// possible. If @p CStyle, ignore access restrictions on hierarchy casting
+/// and casting away constness.
+static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
+ QualType DestType,
+ Sema::CheckedConversionKind CCK,
+ SourceRange OpRange, unsigned &msg,
+ CastKind &Kind, CXXCastPath &BasePath,
+ bool ListInitialization) {
+ // Determine whether we have the semantics of a C-style cast.
+ bool CStyle
+ = (CCK == Sema::CCK_CStyleCast || CCK == Sema::CCK_FunctionalCast);
+
+ // The order the tests is not entirely arbitrary. There is one conversion
+ // that can be handled in two different ways. Given:
+ // struct A {};
+ // struct B : public A {
+ // B(); B(const A&);
+ // };
+ // const A &a = B();
+ // the cast static_cast<const B&>(a) could be seen as either a static
+ // reference downcast, or an explicit invocation of the user-defined
+ // conversion using B's conversion constructor.
+ // DR 427 specifies that the downcast is to be applied here.
+
+ // C++ 5.2.9p4: Any expression can be explicitly converted to type "cv void".
+ // Done outside this function.
+
+ TryCastResult tcr;
+
+ // C++ 5.2.9p5, reference downcast.
+ // See the function for details.
+ // DR 427 specifies that this is to be applied before paragraph 2.
+ tcr = TryStaticReferenceDowncast(Self, SrcExpr.get(), DestType, CStyle,
+ OpRange, msg, Kind, BasePath);
+ if (tcr != TC_NotApplicable)
+ return tcr;
+
+ // C++11 [expr.static.cast]p3:
+ // A glvalue of type "cv1 T1" can be cast to type "rvalue reference to cv2
+ // T2" if "cv2 T2" is reference-compatible with "cv1 T1".
+ tcr = TryLValueToRValueCast(Self, SrcExpr.get(), DestType, CStyle, Kind,
+ BasePath, msg);
+ if (tcr != TC_NotApplicable)
+ return tcr;
+
+ // C++ 5.2.9p2: An expression e can be explicitly converted to a type T
+ // [...] if the declaration "T t(e);" is well-formed, [...].
+ tcr = TryStaticImplicitCast(Self, SrcExpr, DestType, CCK, OpRange, msg,
+ Kind, ListInitialization);
+ if (SrcExpr.isInvalid())
+ return TC_Failed;
+ if (tcr != TC_NotApplicable)
+ return tcr;
+
+ // C++ 5.2.9p6: May apply the reverse of any standard conversion, except
+ // lvalue-to-rvalue, array-to-pointer, function-to-pointer, and boolean
+ // conversions, subject to further restrictions.
+ // Also, C++ 5.2.9p1 forbids casting away constness, which makes reversal
+ // of qualification conversions impossible.
+ // In the CStyle case, the earlier attempt to const_cast should have taken
+ // care of reverse qualification conversions.
+
+ QualType SrcType = Self.Context.getCanonicalType(SrcExpr.get()->getType());
+
+ // C++0x 5.2.9p9: A value of a scoped enumeration type can be explicitly
+ // converted to an integral type. [...] A value of a scoped enumeration type
+ // can also be explicitly converted to a floating-point type [...].
+ if (const EnumType *Enum = SrcType->getAs<EnumType>()) {
+ if (Enum->getDecl()->isScoped()) {
+ if (DestType->isBooleanType()) {
+ Kind = CK_IntegralToBoolean;
+ return TC_Success;
+ } else if (DestType->isIntegralType(Self.Context)) {
+ Kind = CK_IntegralCast;
+ return TC_Success;
+ } else if (DestType->isRealFloatingType()) {
+ Kind = CK_IntegralToFloating;
+ return TC_Success;
+ }
+ }
+ }
+
+ // Reverse integral promotion/conversion. All such conversions are themselves
+ // again integral promotions or conversions and are thus already handled by
+ // p2 (TryDirectInitialization above).
+ // (Note: any data loss warnings should be suppressed.)
+ // The exception is the reverse of enum->integer, i.e. integer->enum (and
+ // enum->enum). See also C++ 5.2.9p7.
+ // The same goes for reverse floating point promotion/conversion and
+ // floating-integral conversions. Again, only floating->enum is relevant.
+ if (DestType->isEnumeralType()) {
+ if (SrcType->isIntegralOrEnumerationType()) {
+ Kind = CK_IntegralCast;
+ return TC_Success;
+ } else if (SrcType->isRealFloatingType()) {
+ Kind = CK_FloatingToIntegral;
+ return TC_Success;
+ }
+ }
+
+ // Reverse pointer upcast. C++ 4.10p3 specifies pointer upcast.
+ // C++ 5.2.9p8 additionally disallows a cast path through virtual inheritance.
+ tcr = TryStaticPointerDowncast(Self, SrcType, DestType, CStyle, OpRange, msg,
+ Kind, BasePath);
+ if (tcr != TC_NotApplicable)
+ return tcr;
+
+ // Reverse member pointer conversion. C++ 4.11 specifies member pointer
+ // conversion. C++ 5.2.9p9 has additional information.
+ // DR54's access restrictions apply here also.
+ tcr = TryStaticMemberPointerUpcast(Self, SrcExpr, SrcType, DestType, CStyle,
+ OpRange, msg, Kind, BasePath);
+ if (tcr != TC_NotApplicable)
+ return tcr;
+
+ // Reverse pointer conversion to void*. C++ 4.10.p2 specifies conversion to
+ // void*. C++ 5.2.9p10 specifies additional restrictions, which really is
+ // just the usual constness stuff.
+ if (const PointerType *SrcPointer = SrcType->getAs<PointerType>()) {
+ QualType SrcPointee = SrcPointer->getPointeeType();
+ if (SrcPointee->isVoidType()) {
+ if (const PointerType *DestPointer = DestType->getAs<PointerType>()) {
+ QualType DestPointee = DestPointer->getPointeeType();
+ if (DestPointee->isIncompleteOrObjectType()) {
+ // This is definitely the intended conversion, but it might fail due
+ // to a qualifier violation. Note that we permit Objective-C lifetime
+ // and GC qualifier mismatches here.
+ if (!CStyle) {
+ Qualifiers DestPointeeQuals = DestPointee.getQualifiers();
+ Qualifiers SrcPointeeQuals = SrcPointee.getQualifiers();
+ DestPointeeQuals.removeObjCGCAttr();
+ DestPointeeQuals.removeObjCLifetime();
+ SrcPointeeQuals.removeObjCGCAttr();
+ SrcPointeeQuals.removeObjCLifetime();
+ if (DestPointeeQuals != SrcPointeeQuals &&
+ !DestPointeeQuals.compatiblyIncludes(SrcPointeeQuals)) {
+ msg = diag::err_bad_cxx_cast_qualifiers_away;
+ return TC_Failed;
+ }
+ }
+ Kind = CK_BitCast;
+ return TC_Success;
+ }
+
+ // Microsoft permits static_cast from 'pointer-to-void' to
+ // 'pointer-to-function'.
+ if (!CStyle && Self.getLangOpts().MSVCCompat &&
+ DestPointee->isFunctionType()) {
+ Self.Diag(OpRange.getBegin(), diag::ext_ms_cast_fn_obj) << OpRange;
+ Kind = CK_BitCast;
+ return TC_Success;
+ }
+ }
+ else if (DestType->isObjCObjectPointerType()) {
+ // allow both c-style cast and static_cast of objective-c pointers as
+ // they are pervasive.
+ Kind = CK_CPointerToObjCPointerCast;
+ return TC_Success;
+ }
+ else if (CStyle && DestType->isBlockPointerType()) {
+ // allow c-style cast of void * to block pointers.
+ Kind = CK_AnyPointerToBlockPointerCast;
+ return TC_Success;
+ }
+ }
+ }
+ // Allow arbitray objective-c pointer conversion with static casts.
+ if (SrcType->isObjCObjectPointerType() &&
+ DestType->isObjCObjectPointerType()) {
+ Kind = CK_BitCast;
+ return TC_Success;
+ }
+ // Allow ns-pointer to cf-pointer conversion in either direction
+ // with static casts.
+ if (!CStyle &&
+ Self.CheckTollFreeBridgeStaticCast(DestType, SrcExpr.get(), Kind))
+ return TC_Success;
+
+ // See if it looks like the user is trying to convert between
+ // related record types, and select a better diagnostic if so.
+ if (auto SrcPointer = SrcType->getAs<PointerType>())
+ if (auto DestPointer = DestType->getAs<PointerType>())
+ if (SrcPointer->getPointeeType()->getAs<RecordType>() &&
+ DestPointer->getPointeeType()->getAs<RecordType>())
+ msg = diag::err_bad_cxx_cast_unrelated_class;
+
+ // We tried everything. Everything! Nothing works! :-(
+ return TC_NotApplicable;
+}
+
+/// Tests whether a conversion according to N2844 is valid.
+TryCastResult
+TryLValueToRValueCast(Sema &Self, Expr *SrcExpr, QualType DestType,
+ bool CStyle, CastKind &Kind, CXXCastPath &BasePath,
+ unsigned &msg) {
+ // C++11 [expr.static.cast]p3:
+ // A glvalue of type "cv1 T1" can be cast to type "rvalue reference to
+ // cv2 T2" if "cv2 T2" is reference-compatible with "cv1 T1".
+ const RValueReferenceType *R = DestType->getAs<RValueReferenceType>();
+ if (!R)
+ return TC_NotApplicable;
+
+ if (!SrcExpr->isGLValue())
+ return TC_NotApplicable;
+
+ // Because we try the reference downcast before this function, from now on
+ // this is the only cast possibility, so we issue an error if we fail now.
+ // FIXME: Should allow casting away constness if CStyle.
+ bool DerivedToBase;
+ bool ObjCConversion;
+ bool ObjCLifetimeConversion;
+ QualType FromType = SrcExpr->getType();
+ QualType ToType = R->getPointeeType();
+ if (CStyle) {
+ FromType = FromType.getUnqualifiedType();
+ ToType = ToType.getUnqualifiedType();
+ }
+
+ if (Self.CompareReferenceRelationship(SrcExpr->getLocStart(),
+ ToType, FromType,
+ DerivedToBase, ObjCConversion,
+ ObjCLifetimeConversion)
+ < Sema::Ref_Compatible_With_Added_Qualification) {
+ if (CStyle)
+ return TC_NotApplicable;
+ msg = diag::err_bad_lvalue_to_rvalue_cast;
+ return TC_Failed;
+ }
+
+ if (DerivedToBase) {
+ Kind = CK_DerivedToBase;
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/true);
+ if (!Self.IsDerivedFrom(SrcExpr->getLocStart(), SrcExpr->getType(),
+ R->getPointeeType(), Paths))
+ return TC_NotApplicable;
+
+ Self.BuildBasePathArray(Paths, BasePath);
+ } else
+ Kind = CK_NoOp;
+
+ return TC_Success;
+}
+
+/// Tests whether a conversion according to C++ 5.2.9p5 is valid.
+TryCastResult
+TryStaticReferenceDowncast(Sema &Self, Expr *SrcExpr, QualType DestType,
+ bool CStyle, SourceRange OpRange,
+ unsigned &msg, CastKind &Kind,
+ CXXCastPath &BasePath) {
+ // C++ 5.2.9p5: An lvalue of type "cv1 B", where B is a class type, can be
+ // cast to type "reference to cv2 D", where D is a class derived from B,
+ // if a valid standard conversion from "pointer to D" to "pointer to B"
+ // exists, cv2 >= cv1, and B is not a virtual base class of D.
+ // In addition, DR54 clarifies that the base must be accessible in the
+ // current context. Although the wording of DR54 only applies to the pointer
+ // variant of this rule, the intent is clearly for it to apply to the this
+ // conversion as well.
+
+ const ReferenceType *DestReference = DestType->getAs<ReferenceType>();
+ if (!DestReference) {
+ return TC_NotApplicable;
+ }
+ bool RValueRef = DestReference->isRValueReferenceType();
+ if (!RValueRef && !SrcExpr->isLValue()) {
+ // We know the left side is an lvalue reference, so we can suggest a reason.
+ msg = diag::err_bad_cxx_cast_rvalue;
+ return TC_NotApplicable;
+ }
+
+ QualType DestPointee = DestReference->getPointeeType();
+
+ // FIXME: If the source is a prvalue, we should issue a warning (because the
+ // cast always has undefined behavior), and for AST consistency, we should
+ // materialize a temporary.
+ return TryStaticDowncast(Self,
+ Self.Context.getCanonicalType(SrcExpr->getType()),
+ Self.Context.getCanonicalType(DestPointee), CStyle,
+ OpRange, SrcExpr->getType(), DestType, msg, Kind,
+ BasePath);
+}
+
+/// Tests whether a conversion according to C++ 5.2.9p8 is valid.
+TryCastResult
+TryStaticPointerDowncast(Sema &Self, QualType SrcType, QualType DestType,
+ bool CStyle, SourceRange OpRange,
+ unsigned &msg, CastKind &Kind,
+ CXXCastPath &BasePath) {
+ // C++ 5.2.9p8: An rvalue of type "pointer to cv1 B", where B is a class
+ // type, can be converted to an rvalue of type "pointer to cv2 D", where D
+ // is a class derived from B, if a valid standard conversion from "pointer
+ // to D" to "pointer to B" exists, cv2 >= cv1, and B is not a virtual base
+ // class of D.
+ // In addition, DR54 clarifies that the base must be accessible in the
+ // current context.
+
+ const PointerType *DestPointer = DestType->getAs<PointerType>();
+ if (!DestPointer) {
+ return TC_NotApplicable;
+ }
+
+ const PointerType *SrcPointer = SrcType->getAs<PointerType>();
+ if (!SrcPointer) {
+ msg = diag::err_bad_static_cast_pointer_nonpointer;
+ return TC_NotApplicable;
+ }
+
+ return TryStaticDowncast(Self,
+ Self.Context.getCanonicalType(SrcPointer->getPointeeType()),
+ Self.Context.getCanonicalType(DestPointer->getPointeeType()),
+ CStyle, OpRange, SrcType, DestType, msg, Kind,
+ BasePath);
+}
+
+/// TryStaticDowncast - Common functionality of TryStaticReferenceDowncast and
+/// TryStaticPointerDowncast. Tests whether a static downcast from SrcType to
+/// DestType is possible and allowed.
+TryCastResult
+TryStaticDowncast(Sema &Self, CanQualType SrcType, CanQualType DestType,
+ bool CStyle, SourceRange OpRange, QualType OrigSrcType,
+ QualType OrigDestType, unsigned &msg,
+ CastKind &Kind, CXXCastPath &BasePath) {
+ // We can only work with complete types. But don't complain if it doesn't work
+ if (!Self.isCompleteType(OpRange.getBegin(), SrcType) ||
+ !Self.isCompleteType(OpRange.getBegin(), DestType))
+ return TC_NotApplicable;
+
+ // Downcast can only happen in class hierarchies, so we need classes.
+ if (!DestType->getAs<RecordType>() || !SrcType->getAs<RecordType>()) {
+ return TC_NotApplicable;
+ }
+
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/true);
+ if (!Self.IsDerivedFrom(OpRange.getBegin(), DestType, SrcType, Paths)) {
+ return TC_NotApplicable;
+ }
+
+ // Target type does derive from source type. Now we're serious. If an error
+ // appears now, it's not ignored.
+ // This may not be entirely in line with the standard. Take for example:
+ // struct A {};
+ // struct B : virtual A {
+ // B(A&);
+ // };
+ //
+ // void f()
+ // {
+ // (void)static_cast<const B&>(*((A*)0));
+ // }
+ // As far as the standard is concerned, p5 does not apply (A is virtual), so
+ // p2 should be used instead - "const B& t(*((A*)0));" is perfectly valid.
+ // However, both GCC and Comeau reject this example, and accepting it would
+ // mean more complex code if we're to preserve the nice error message.
+ // FIXME: Being 100% compliant here would be nice to have.
+
+ // Must preserve cv, as always, unless we're in C-style mode.
+ if (!CStyle && !DestType.isAtLeastAsQualifiedAs(SrcType)) {
+ msg = diag::err_bad_cxx_cast_qualifiers_away;
+ return TC_Failed;
+ }
+
+ if (Paths.isAmbiguous(SrcType.getUnqualifiedType())) {
+ // This code is analoguous to that in CheckDerivedToBaseConversion, except
+ // that it builds the paths in reverse order.
+ // To sum up: record all paths to the base and build a nice string from
+ // them. Use it to spice up the error message.
+ if (!Paths.isRecordingPaths()) {
+ Paths.clear();
+ Paths.setRecordingPaths(true);
+ Self.IsDerivedFrom(OpRange.getBegin(), DestType, SrcType, Paths);
+ }
+ std::string PathDisplayStr;
+ std::set<unsigned> DisplayedPaths;
+ for (CXXBasePaths::paths_iterator PI = Paths.begin(), PE = Paths.end();
+ PI != PE; ++PI) {
+ if (DisplayedPaths.insert(PI->back().SubobjectNumber).second) {
+ // We haven't displayed a path to this particular base
+ // class subobject yet.
+ PathDisplayStr += "\n ";
+ for (CXXBasePath::const_reverse_iterator EI = PI->rbegin(),
+ EE = PI->rend();
+ EI != EE; ++EI)
+ PathDisplayStr += EI->Base->getType().getAsString() + " -> ";
+ PathDisplayStr += QualType(DestType).getAsString();
+ }
+ }
+
+ Self.Diag(OpRange.getBegin(), diag::err_ambiguous_base_to_derived_cast)
+ << QualType(SrcType).getUnqualifiedType()
+ << QualType(DestType).getUnqualifiedType()
+ << PathDisplayStr << OpRange;
+ msg = 0;
+ return TC_Failed;
+ }
+
+ if (Paths.getDetectedVirtual() != nullptr) {
+ QualType VirtualBase(Paths.getDetectedVirtual(), 0);
+ Self.Diag(OpRange.getBegin(), diag::err_static_downcast_via_virtual)
+ << OrigSrcType << OrigDestType << VirtualBase << OpRange;
+ msg = 0;
+ return TC_Failed;
+ }
+
+ if (!CStyle) {
+ switch (Self.CheckBaseClassAccess(OpRange.getBegin(),
+ SrcType, DestType,
+ Paths.front(),
+ diag::err_downcast_from_inaccessible_base)) {
+ case Sema::AR_accessible:
+ case Sema::AR_delayed: // be optimistic
+ case Sema::AR_dependent: // be optimistic
+ break;
+
+ case Sema::AR_inaccessible:
+ msg = 0;
+ return TC_Failed;
+ }
+ }
+
+ Self.BuildBasePathArray(Paths, BasePath);
+ Kind = CK_BaseToDerived;
+ return TC_Success;
+}
+
+/// TryStaticMemberPointerUpcast - Tests whether a conversion according to
+/// C++ 5.2.9p9 is valid:
+///
+/// An rvalue of type "pointer to member of D of type cv1 T" can be
+/// converted to an rvalue of type "pointer to member of B of type cv2 T",
+/// where B is a base class of D [...].
+///
+TryCastResult
+TryStaticMemberPointerUpcast(Sema &Self, ExprResult &SrcExpr, QualType SrcType,
+ QualType DestType, bool CStyle,
+ SourceRange OpRange,
+ unsigned &msg, CastKind &Kind,
+ CXXCastPath &BasePath) {
+ const MemberPointerType *DestMemPtr = DestType->getAs<MemberPointerType>();
+ if (!DestMemPtr)
+ return TC_NotApplicable;
+
+ bool WasOverloadedFunction = false;
+ DeclAccessPair FoundOverload;
+ if (SrcExpr.get()->getType() == Self.Context.OverloadTy) {
+ if (FunctionDecl *Fn
+ = Self.ResolveAddressOfOverloadedFunction(SrcExpr.get(), DestType, false,
+ FoundOverload)) {
+ CXXMethodDecl *M = cast<CXXMethodDecl>(Fn);
+ SrcType = Self.Context.getMemberPointerType(Fn->getType(),
+ Self.Context.getTypeDeclType(M->getParent()).getTypePtr());
+ WasOverloadedFunction = true;
+ }
+ }
+
+ const MemberPointerType *SrcMemPtr = SrcType->getAs<MemberPointerType>();
+ if (!SrcMemPtr) {
+ msg = diag::err_bad_static_cast_member_pointer_nonmp;
+ return TC_NotApplicable;
+ }
+
+ // Lock down the inheritance model right now in MS ABI, whether or not the
+ // pointee types are the same.
+ if (Self.Context.getTargetInfo().getCXXABI().isMicrosoft())
+ (void)Self.isCompleteType(OpRange.getBegin(), SrcType);
+
+ // T == T, modulo cv
+ if (!Self.Context.hasSameUnqualifiedType(SrcMemPtr->getPointeeType(),
+ DestMemPtr->getPointeeType()))
+ return TC_NotApplicable;
+
+ // B base of D
+ QualType SrcClass(SrcMemPtr->getClass(), 0);
+ QualType DestClass(DestMemPtr->getClass(), 0);
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/true);
+ if (!Self.IsDerivedFrom(OpRange.getBegin(), SrcClass, DestClass, Paths))
+ return TC_NotApplicable;
+
+ // B is a base of D. But is it an allowed base? If not, it's a hard error.
+ if (Paths.isAmbiguous(Self.Context.getCanonicalType(DestClass))) {
+ Paths.clear();
+ Paths.setRecordingPaths(true);
+ bool StillOkay =
+ Self.IsDerivedFrom(OpRange.getBegin(), SrcClass, DestClass, Paths);
+ assert(StillOkay);
+ (void)StillOkay;
+ std::string PathDisplayStr = Self.getAmbiguousPathsDisplayString(Paths);
+ Self.Diag(OpRange.getBegin(), diag::err_ambiguous_memptr_conv)
+ << 1 << SrcClass << DestClass << PathDisplayStr << OpRange;
+ msg = 0;
+ return TC_Failed;
+ }
+
+ if (const RecordType *VBase = Paths.getDetectedVirtual()) {
+ Self.Diag(OpRange.getBegin(), diag::err_memptr_conv_via_virtual)
+ << SrcClass << DestClass << QualType(VBase, 0) << OpRange;
+ msg = 0;
+ return TC_Failed;
+ }
+
+ if (!CStyle) {
+ switch (Self.CheckBaseClassAccess(OpRange.getBegin(),
+ DestClass, SrcClass,
+ Paths.front(),
+ diag::err_upcast_to_inaccessible_base)) {
+ case Sema::AR_accessible:
+ case Sema::AR_delayed:
+ case Sema::AR_dependent:
+ // Optimistically assume that the delayed and dependent cases
+ // will work out.
+ break;
+
+ case Sema::AR_inaccessible:
+ msg = 0;
+ return TC_Failed;
+ }
+ }
+
+ if (WasOverloadedFunction) {
+ // Resolve the address of the overloaded function again, this time
+ // allowing complaints if something goes wrong.
+ FunctionDecl *Fn = Self.ResolveAddressOfOverloadedFunction(SrcExpr.get(),
+ DestType,
+ true,
+ FoundOverload);
+ if (!Fn) {
+ msg = 0;
+ return TC_Failed;
+ }
+
+ SrcExpr = Self.FixOverloadedFunctionReference(SrcExpr, FoundOverload, Fn);
+ if (!SrcExpr.isUsable()) {
+ msg = 0;
+ return TC_Failed;
+ }
+ }
+
+ Self.BuildBasePathArray(Paths, BasePath);
+ Kind = CK_DerivedToBaseMemberPointer;
+ return TC_Success;
+}
+
+/// TryStaticImplicitCast - Tests whether a conversion according to C++ 5.2.9p2
+/// is valid:
+///
+/// An expression e can be explicitly converted to a type T using a
+/// @c static_cast if the declaration "T t(e);" is well-formed [...].
+TryCastResult
+TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr, QualType DestType,
+ Sema::CheckedConversionKind CCK,
+ SourceRange OpRange, unsigned &msg,
+ CastKind &Kind, bool ListInitialization) {
+ if (DestType->isRecordType()) {
+ if (Self.RequireCompleteType(OpRange.getBegin(), DestType,
+ diag::err_bad_dynamic_cast_incomplete) ||
+ Self.RequireNonAbstractType(OpRange.getBegin(), DestType,
+ diag::err_allocation_of_abstract_type)) {
+ msg = 0;
+ return TC_Failed;
+ }
+ }
+
+ InitializedEntity Entity = InitializedEntity::InitializeTemporary(DestType);
+ InitializationKind InitKind
+ = (CCK == Sema::CCK_CStyleCast)
+ ? InitializationKind::CreateCStyleCast(OpRange.getBegin(), OpRange,
+ ListInitialization)
+ : (CCK == Sema::CCK_FunctionalCast)
+ ? InitializationKind::CreateFunctionalCast(OpRange, ListInitialization)
+ : InitializationKind::CreateCast(OpRange);
+ Expr *SrcExprRaw = SrcExpr.get();
+ InitializationSequence InitSeq(Self, Entity, InitKind, SrcExprRaw);
+
+ // At this point of CheckStaticCast, if the destination is a reference,
+ // or the expression is an overload expression this has to work.
+ // There is no other way that works.
+ // On the other hand, if we're checking a C-style cast, we've still got
+ // the reinterpret_cast way.
+ bool CStyle
+ = (CCK == Sema::CCK_CStyleCast || CCK == Sema::CCK_FunctionalCast);
+ if (InitSeq.Failed() && (CStyle || !DestType->isReferenceType()))
+ return TC_NotApplicable;
+
+ ExprResult Result = InitSeq.Perform(Self, Entity, InitKind, SrcExprRaw);
+ if (Result.isInvalid()) {
+ msg = 0;
+ return TC_Failed;
+ }
+
+ if (InitSeq.isConstructorInitialization())
+ Kind = CK_ConstructorConversion;
+ else
+ Kind = CK_NoOp;
+
+ SrcExpr = Result;
+ return TC_Success;
+}
+
+/// TryConstCast - See if a const_cast from source to destination is allowed,
+/// and perform it if it is.
+static TryCastResult TryConstCast(Sema &Self, ExprResult &SrcExpr,
+ QualType DestType, bool CStyle,
+ unsigned &msg) {
+ DestType = Self.Context.getCanonicalType(DestType);
+ QualType SrcType = SrcExpr.get()->getType();
+ bool NeedToMaterializeTemporary = false;
+
+ if (const ReferenceType *DestTypeTmp =DestType->getAs<ReferenceType>()) {
+ // C++11 5.2.11p4:
+ // if a pointer to T1 can be explicitly converted to the type "pointer to
+ // T2" using a const_cast, then the following conversions can also be
+ // made:
+ // -- an lvalue of type T1 can be explicitly converted to an lvalue of
+ // type T2 using the cast const_cast<T2&>;
+ // -- a glvalue of type T1 can be explicitly converted to an xvalue of
+ // type T2 using the cast const_cast<T2&&>; and
+ // -- if T1 is a class type, a prvalue of type T1 can be explicitly
+ // converted to an xvalue of type T2 using the cast const_cast<T2&&>.
+
+ if (isa<LValueReferenceType>(DestTypeTmp) && !SrcExpr.get()->isLValue()) {
+ // Cannot const_cast non-lvalue to lvalue reference type. But if this
+ // is C-style, static_cast might find a way, so we simply suggest a
+ // message and tell the parent to keep searching.
+ msg = diag::err_bad_cxx_cast_rvalue;
+ return TC_NotApplicable;
+ }
+
+ if (isa<RValueReferenceType>(DestTypeTmp) && SrcExpr.get()->isRValue()) {
+ if (!SrcType->isRecordType()) {
+ // Cannot const_cast non-class prvalue to rvalue reference type. But if
+ // this is C-style, static_cast can do this.
+ msg = diag::err_bad_cxx_cast_rvalue;
+ return TC_NotApplicable;
+ }
+
+ // Materialize the class prvalue so that the const_cast can bind a
+ // reference to it.
+ NeedToMaterializeTemporary = true;
+ }
+
+ // It's not completely clear under the standard whether we can
+ // const_cast bit-field gl-values. Doing so would not be
+ // intrinsically complicated, but for now, we say no for
+ // consistency with other compilers and await the word of the
+ // committee.
+ if (SrcExpr.get()->refersToBitField()) {
+ msg = diag::err_bad_cxx_cast_bitfield;
+ return TC_NotApplicable;
+ }
+
+ DestType = Self.Context.getPointerType(DestTypeTmp->getPointeeType());
+ SrcType = Self.Context.getPointerType(SrcType);
+ }
+
+ // C++ 5.2.11p5: For a const_cast involving pointers to data members [...]
+ // the rules for const_cast are the same as those used for pointers.
+
+ if (!DestType->isPointerType() &&
+ !DestType->isMemberPointerType() &&
+ !DestType->isObjCObjectPointerType()) {
+ // Cannot cast to non-pointer, non-reference type. Note that, if DestType
+ // was a reference type, we converted it to a pointer above.
+ // The status of rvalue references isn't entirely clear, but it looks like
+ // conversion to them is simply invalid.
+ // C++ 5.2.11p3: For two pointer types [...]
+ if (!CStyle)
+ msg = diag::err_bad_const_cast_dest;
+ return TC_NotApplicable;
+ }
+ if (DestType->isFunctionPointerType() ||
+ DestType->isMemberFunctionPointerType()) {
+ // Cannot cast direct function pointers.
+ // C++ 5.2.11p2: [...] where T is any object type or the void type [...]
+ // T is the ultimate pointee of source and target type.
+ if (!CStyle)
+ msg = diag::err_bad_const_cast_dest;
+ return TC_NotApplicable;
+ }
+ SrcType = Self.Context.getCanonicalType(SrcType);
+
+ // Unwrap the pointers. Ignore qualifiers. Terminate early if the types are
+ // completely equal.
+ // C++ 5.2.11p3 describes the core semantics of const_cast. All cv specifiers
+ // in multi-level pointers may change, but the level count must be the same,
+ // as must be the final pointee type.
+ while (SrcType != DestType &&
+ Self.Context.UnwrapSimilarPointerTypes(SrcType, DestType)) {
+ Qualifiers SrcQuals, DestQuals;
+ SrcType = Self.Context.getUnqualifiedArrayType(SrcType, SrcQuals);
+ DestType = Self.Context.getUnqualifiedArrayType(DestType, DestQuals);
+
+ // const_cast is permitted to strip cvr-qualifiers, only. Make sure that
+ // the other qualifiers (e.g., address spaces) are identical.
+ SrcQuals.removeCVRQualifiers();
+ DestQuals.removeCVRQualifiers();
+ if (SrcQuals != DestQuals)
+ return TC_NotApplicable;
+ }
+
+ // Since we're dealing in canonical types, the remainder must be the same.
+ if (SrcType != DestType)
+ return TC_NotApplicable;
+
+ if (NeedToMaterializeTemporary)
+ // This is a const_cast from a class prvalue to an rvalue reference type.
+ // Materialize a temporary to store the result of the conversion.
+ SrcExpr = new (Self.Context) MaterializeTemporaryExpr(
+ SrcType, SrcExpr.get(), /*IsLValueReference*/ false);
+
+ return TC_Success;
+}
+
+// Checks for undefined behavior in reinterpret_cast.
+// The cases that is checked for is:
+// *reinterpret_cast<T*>(&a)
+// reinterpret_cast<T&>(a)
+// where accessing 'a' as type 'T' will result in undefined behavior.
+void Sema::CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
+ bool IsDereference,
+ SourceRange Range) {
+ unsigned DiagID = IsDereference ?
+ diag::warn_pointer_indirection_from_incompatible_type :
+ diag::warn_undefined_reinterpret_cast;
+
+ if (Diags.isIgnored(DiagID, Range.getBegin()))
+ return;
+
+ QualType SrcTy, DestTy;
+ if (IsDereference) {
+ if (!SrcType->getAs<PointerType>() || !DestType->getAs<PointerType>()) {
+ return;
+ }
+ SrcTy = SrcType->getPointeeType();
+ DestTy = DestType->getPointeeType();
+ } else {
+ if (!DestType->getAs<ReferenceType>()) {
+ return;
+ }
+ SrcTy = SrcType;
+ DestTy = DestType->getPointeeType();
+ }
+
+ // Cast is compatible if the types are the same.
+ if (Context.hasSameUnqualifiedType(DestTy, SrcTy)) {
+ return;
+ }
+ // or one of the types is a char or void type
+ if (DestTy->isAnyCharacterType() || DestTy->isVoidType() ||
+ SrcTy->isAnyCharacterType() || SrcTy->isVoidType()) {
+ return;
+ }
+ // or one of the types is a tag type.
+ if (SrcTy->getAs<TagType>() || DestTy->getAs<TagType>()) {
+ return;
+ }
+
+ // FIXME: Scoped enums?
+ if ((SrcTy->isUnsignedIntegerType() && DestTy->isSignedIntegerType()) ||
+ (SrcTy->isSignedIntegerType() && DestTy->isUnsignedIntegerType())) {
+ if (Context.getTypeSize(DestTy) == Context.getTypeSize(SrcTy)) {
+ return;
+ }
+ }
+
+ Diag(Range.getBegin(), DiagID) << SrcType << DestType << Range;
+}
+
+static void DiagnoseCastOfObjCSEL(Sema &Self, const ExprResult &SrcExpr,
+ QualType DestType) {
+ QualType SrcType = SrcExpr.get()->getType();
+ if (Self.Context.hasSameType(SrcType, DestType))
+ return;
+ if (const PointerType *SrcPtrTy = SrcType->getAs<PointerType>())
+ if (SrcPtrTy->isObjCSelType()) {
+ QualType DT = DestType;
+ if (isa<PointerType>(DestType))
+ DT = DestType->getPointeeType();
+ if (!DT.getUnqualifiedType()->isVoidType())
+ Self.Diag(SrcExpr.get()->getExprLoc(),
+ diag::warn_cast_pointer_from_sel)
+ << SrcType << DestType << SrcExpr.get()->getSourceRange();
+ }
+}
+
+static void checkIntToPointerCast(bool CStyle, SourceLocation Loc,
+ const Expr *SrcExpr, QualType DestType,
+ Sema &Self) {
+ QualType SrcType = SrcExpr->getType();
+
+ // Not warning on reinterpret_cast, boolean, constant expressions, etc
+ // are not explicit design choices, but consistent with GCC's behavior.
+ // Feel free to modify them if you've reason/evidence for an alternative.
+ if (CStyle && SrcType->isIntegralType(Self.Context)
+ && !SrcType->isBooleanType()
+ && !SrcType->isEnumeralType()
+ && !SrcExpr->isIntegerConstantExpr(Self.Context)
+ && Self.Context.getTypeSize(DestType) >
+ Self.Context.getTypeSize(SrcType)) {
+ // Separate between casts to void* and non-void* pointers.
+ // Some APIs use (abuse) void* for something like a user context,
+ // and often that value is an integer even if it isn't a pointer itself.
+ // Having a separate warning flag allows users to control the warning
+ // for their workflow.
+ unsigned Diag = DestType->isVoidPointerType() ?
+ diag::warn_int_to_void_pointer_cast
+ : diag::warn_int_to_pointer_cast;
+ Self.Diag(Loc, Diag) << SrcType << DestType;
+ }
+}
+
+static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
+ QualType DestType, bool CStyle,
+ SourceRange OpRange,
+ unsigned &msg,
+ CastKind &Kind) {
+ bool IsLValueCast = false;
+
+ DestType = Self.Context.getCanonicalType(DestType);
+ QualType SrcType = SrcExpr.get()->getType();
+
+ // Is the source an overloaded name? (i.e. &foo)
+ // If so, reinterpret_cast can not help us here (13.4, p1, bullet 5) ...
+ if (SrcType == Self.Context.OverloadTy) {
+ // ... unless foo<int> resolves to an lvalue unambiguously.
+ // TODO: what if this fails because of DiagnoseUseOfDecl or something
+ // like it?
+ ExprResult SingleFunctionExpr = SrcExpr;
+ if (Self.ResolveAndFixSingleFunctionTemplateSpecialization(
+ SingleFunctionExpr,
+ Expr::getValueKindForType(DestType) == VK_RValue // Convert Fun to Ptr
+ ) && SingleFunctionExpr.isUsable()) {
+ SrcExpr = SingleFunctionExpr;
+ SrcType = SrcExpr.get()->getType();
+ } else {
+ return TC_NotApplicable;
+ }
+ }
+
+ if (const ReferenceType *DestTypeTmp = DestType->getAs<ReferenceType>()) {
+ if (!SrcExpr.get()->isGLValue()) {
+ // Cannot cast non-glvalue to (lvalue or rvalue) reference type. See the
+ // similar comment in const_cast.
+ msg = diag::err_bad_cxx_cast_rvalue;
+ return TC_NotApplicable;
+ }
+
+ if (!CStyle) {
+ Self.CheckCompatibleReinterpretCast(SrcType, DestType,
+ /*isDereference=*/false, OpRange);
+ }
+
+ // C++ 5.2.10p10: [...] a reference cast reinterpret_cast<T&>(x) has the
+ // same effect as the conversion *reinterpret_cast<T*>(&x) with the
+ // built-in & and * operators.
+
+ const char *inappropriate = nullptr;
+ switch (SrcExpr.get()->getObjectKind()) {
+ case OK_Ordinary:
+ break;
+ case OK_BitField: inappropriate = "bit-field"; break;
+ case OK_VectorComponent: inappropriate = "vector element"; break;
+ case OK_ObjCProperty: inappropriate = "property expression"; break;
+ case OK_ObjCSubscript: inappropriate = "container subscripting expression";
+ break;
+ }
+ if (inappropriate) {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_reinterpret_cast_reference)
+ << inappropriate << DestType
+ << OpRange << SrcExpr.get()->getSourceRange();
+ msg = 0; SrcExpr = ExprError();
+ return TC_NotApplicable;
+ }
+
+ // This code does this transformation for the checked types.
+ DestType = Self.Context.getPointerType(DestTypeTmp->getPointeeType());
+ SrcType = Self.Context.getPointerType(SrcType);
+
+ IsLValueCast = true;
+ }
+
+ // Canonicalize source for comparison.
+ SrcType = Self.Context.getCanonicalType(SrcType);
+
+ const MemberPointerType *DestMemPtr = DestType->getAs<MemberPointerType>(),
+ *SrcMemPtr = SrcType->getAs<MemberPointerType>();
+ if (DestMemPtr && SrcMemPtr) {
+ // C++ 5.2.10p9: An rvalue of type "pointer to member of X of type T1"
+ // can be explicitly converted to an rvalue of type "pointer to member
+ // of Y of type T2" if T1 and T2 are both function types or both object
+ // types.
+ if (DestMemPtr->isMemberFunctionPointer() !=
+ SrcMemPtr->isMemberFunctionPointer())
+ return TC_NotApplicable;
+
+ // C++ 5.2.10p2: The reinterpret_cast operator shall not cast away
+ // constness.
+ // A reinterpret_cast followed by a const_cast can, though, so in C-style,
+ // we accept it.
+ if (CastsAwayConstness(Self, SrcType, DestType, /*CheckCVR=*/!CStyle,
+ /*CheckObjCLifetime=*/CStyle)) {
+ msg = diag::err_bad_cxx_cast_qualifiers_away;
+ return TC_Failed;
+ }
+
+ if (Self.Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+ // We need to determine the inheritance model that the class will use if
+ // haven't yet.
+ (void)Self.isCompleteType(OpRange.getBegin(), SrcType);
+ (void)Self.isCompleteType(OpRange.getBegin(), DestType);
+ }
+
+ // Don't allow casting between member pointers of different sizes.
+ if (Self.Context.getTypeSize(DestMemPtr) !=
+ Self.Context.getTypeSize(SrcMemPtr)) {
+ msg = diag::err_bad_cxx_cast_member_pointer_size;
+ return TC_Failed;
+ }
+
+ // A valid member pointer cast.
+ assert(!IsLValueCast);
+ Kind = CK_ReinterpretMemberPointer;
+ return TC_Success;
+ }
+
+ // See below for the enumeral issue.
+ if (SrcType->isNullPtrType() && DestType->isIntegralType(Self.Context)) {
+ // C++0x 5.2.10p4: A pointer can be explicitly converted to any integral
+ // type large enough to hold it. A value of std::nullptr_t can be
+ // converted to an integral type; the conversion has the same meaning
+ // and validity as a conversion of (void*)0 to the integral type.
+ if (Self.Context.getTypeSize(SrcType) >
+ Self.Context.getTypeSize(DestType)) {
+ msg = diag::err_bad_reinterpret_cast_small_int;
+ return TC_Failed;
+ }
+ Kind = CK_PointerToIntegral;
+ return TC_Success;
+ }
+
+ // Allow reinterpret_casts between vectors of the same size and
+ // between vectors and integers of the same size.
+ bool destIsVector = DestType->isVectorType();
+ bool srcIsVector = SrcType->isVectorType();
+ if (srcIsVector || destIsVector) {
+ // The non-vector type, if any, must have integral type. This is
+ // the same rule that C vector casts use; note, however, that enum
+ // types are not integral in C++.
+ if ((!destIsVector && !DestType->isIntegralType(Self.Context)) ||
+ (!srcIsVector && !SrcType->isIntegralType(Self.Context)))
+ return TC_NotApplicable;
+
+ // The size we want to consider is eltCount * eltSize.
+ // That's exactly what the lax-conversion rules will check.
+ if (Self.areLaxCompatibleVectorTypes(SrcType, DestType)) {
+ Kind = CK_BitCast;
+ return TC_Success;
+ }
+
+ // Otherwise, pick a reasonable diagnostic.
+ if (!destIsVector)
+ msg = diag::err_bad_cxx_cast_vector_to_scalar_different_size;
+ else if (!srcIsVector)
+ msg = diag::err_bad_cxx_cast_scalar_to_vector_different_size;
+ else
+ msg = diag::err_bad_cxx_cast_vector_to_vector_different_size;
+
+ return TC_Failed;
+ }
+
+ if (SrcType == DestType) {
+ // C++ 5.2.10p2 has a note that mentions that, subject to all other
+ // restrictions, a cast to the same type is allowed so long as it does not
+ // cast away constness. In C++98, the intent was not entirely clear here,
+ // since all other paragraphs explicitly forbid casts to the same type.
+ // C++11 clarifies this case with p2.
+ //
+ // The only allowed types are: integral, enumeration, pointer, or
+ // pointer-to-member types. We also won't restrict Obj-C pointers either.
+ Kind = CK_NoOp;
+ TryCastResult Result = TC_NotApplicable;
+ if (SrcType->isIntegralOrEnumerationType() ||
+ SrcType->isAnyPointerType() ||
+ SrcType->isMemberPointerType() ||
+ SrcType->isBlockPointerType()) {
+ Result = TC_Success;
+ }
+ return Result;
+ }
+
+ bool destIsPtr = DestType->isAnyPointerType() ||
+ DestType->isBlockPointerType();
+ bool srcIsPtr = SrcType->isAnyPointerType() ||
+ SrcType->isBlockPointerType();
+ if (!destIsPtr && !srcIsPtr) {
+ // Except for std::nullptr_t->integer and lvalue->reference, which are
+ // handled above, at least one of the two arguments must be a pointer.
+ return TC_NotApplicable;
+ }
+
+ if (DestType->isIntegralType(Self.Context)) {
+ assert(srcIsPtr && "One type must be a pointer");
+ // C++ 5.2.10p4: A pointer can be explicitly converted to any integral
+ // type large enough to hold it; except in Microsoft mode, where the
+ // integral type size doesn't matter (except we don't allow bool).
+ bool MicrosoftException = Self.getLangOpts().MicrosoftExt &&
+ !DestType->isBooleanType();
+ if ((Self.Context.getTypeSize(SrcType) >
+ Self.Context.getTypeSize(DestType)) &&
+ !MicrosoftException) {
+ msg = diag::err_bad_reinterpret_cast_small_int;
+ return TC_Failed;
+ }
+ Kind = CK_PointerToIntegral;
+ return TC_Success;
+ }
+
+ if (SrcType->isIntegralOrEnumerationType()) {
+ assert(destIsPtr && "One type must be a pointer");
+ checkIntToPointerCast(CStyle, OpRange.getBegin(), SrcExpr.get(), DestType,
+ Self);
+ // C++ 5.2.10p5: A value of integral or enumeration type can be explicitly
+ // converted to a pointer.
+ // C++ 5.2.10p9: [Note: ...a null pointer constant of integral type is not
+ // necessarily converted to a null pointer value.]
+ Kind = CK_IntegralToPointer;
+ return TC_Success;
+ }
+
+ if (!destIsPtr || !srcIsPtr) {
+ // With the valid non-pointer conversions out of the way, we can be even
+ // more stringent.
+ return TC_NotApplicable;
+ }
+
+ // C++ 5.2.10p2: The reinterpret_cast operator shall not cast away constness.
+ // The C-style cast operator can.
+ if (CastsAwayConstness(Self, SrcType, DestType, /*CheckCVR=*/!CStyle,
+ /*CheckObjCLifetime=*/CStyle)) {
+ msg = diag::err_bad_cxx_cast_qualifiers_away;
+ return TC_Failed;
+ }
+
+ // Cannot convert between block pointers and Objective-C object pointers.
+ if ((SrcType->isBlockPointerType() && DestType->isObjCObjectPointerType()) ||
+ (DestType->isBlockPointerType() && SrcType->isObjCObjectPointerType()))
+ return TC_NotApplicable;
+
+ if (IsLValueCast) {
+ Kind = CK_LValueBitCast;
+ } else if (DestType->isObjCObjectPointerType()) {
+ Kind = Self.PrepareCastToObjCObjectPointer(SrcExpr);
+ } else if (DestType->isBlockPointerType()) {
+ if (!SrcType->isBlockPointerType()) {
+ Kind = CK_AnyPointerToBlockPointerCast;
+ } else {
+ Kind = CK_BitCast;
+ }
+ } else {
+ Kind = CK_BitCast;
+ }
+
+ // Any pointer can be cast to an Objective-C pointer type with a C-style
+ // cast.
+ if (CStyle && DestType->isObjCObjectPointerType()) {
+ return TC_Success;
+ }
+ if (CStyle)
+ DiagnoseCastOfObjCSEL(Self, SrcExpr, DestType);
+
+ // Not casting away constness, so the only remaining check is for compatible
+ // pointer categories.
+
+ if (SrcType->isFunctionPointerType()) {
+ if (DestType->isFunctionPointerType()) {
+ // C++ 5.2.10p6: A pointer to a function can be explicitly converted to
+ // a pointer to a function of a different type.
+ return TC_Success;
+ }
+
+ // C++0x 5.2.10p8: Converting a pointer to a function into a pointer to
+ // an object type or vice versa is conditionally-supported.
+ // Compilers support it in C++03 too, though, because it's necessary for
+ // casting the return value of dlsym() and GetProcAddress().
+ // FIXME: Conditionally-supported behavior should be configurable in the
+ // TargetInfo or similar.
+ Self.Diag(OpRange.getBegin(),
+ Self.getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_cast_fn_obj : diag::ext_cast_fn_obj)
+ << OpRange;
+ return TC_Success;
+ }
+
+ if (DestType->isFunctionPointerType()) {
+ // See above.
+ Self.Diag(OpRange.getBegin(),
+ Self.getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_cast_fn_obj : diag::ext_cast_fn_obj)
+ << OpRange;
+ return TC_Success;
+ }
+
+ // C++ 5.2.10p7: A pointer to an object can be explicitly converted to
+ // a pointer to an object of different type.
+ // Void pointers are not specified, but supported by every compiler out there.
+ // So we finish by allowing everything that remains - it's got to be two
+ // object pointers.
+ return TC_Success;
+}
+
+void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle,
+ bool ListInitialization) {
+ // Handle placeholders.
+ if (isPlaceholder()) {
+ // C-style casts can resolve __unknown_any types.
+ if (claimPlaceholder(BuiltinType::UnknownAny)) {
+ SrcExpr = Self.checkUnknownAnyCast(DestRange, DestType,
+ SrcExpr.get(), Kind,
+ ValueKind, BasePath);
+ return;
+ }
+
+ checkNonOverloadPlaceholders();
+ if (SrcExpr.isInvalid())
+ return;
+ }
+
+ // C++ 5.2.9p4: Any expression can be explicitly converted to type "cv void".
+ // This test is outside everything else because it's the only case where
+ // a non-lvalue-reference target type does not lead to decay.
+ if (DestType->isVoidType()) {
+ Kind = CK_ToVoid;
+
+ if (claimPlaceholder(BuiltinType::Overload)) {
+ Self.ResolveAndFixSingleFunctionTemplateSpecialization(
+ SrcExpr, /* Decay Function to ptr */ false,
+ /* Complain */ true, DestRange, DestType,
+ diag::err_bad_cstyle_cast_overload);
+ if (SrcExpr.isInvalid())
+ return;
+ }
+
+ SrcExpr = Self.IgnoredValueConversions(SrcExpr.get());
+ return;
+ }
+
+ // If the type is dependent, we won't do any other semantic analysis now.
+ if (DestType->isDependentType() || SrcExpr.get()->isTypeDependent() ||
+ SrcExpr.get()->isValueDependent()) {
+ assert(Kind == CK_Dependent);
+ return;
+ }
+
+ if (ValueKind == VK_RValue && !DestType->isRecordType() &&
+ !isPlaceholder(BuiltinType::Overload)) {
+ SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.get());
+ if (SrcExpr.isInvalid())
+ return;
+ }
+
+ // AltiVec vector initialization with a single literal.
+ if (const VectorType *vecTy = DestType->getAs<VectorType>())
+ if (vecTy->getVectorKind() == VectorType::AltiVecVector
+ && (SrcExpr.get()->getType()->isIntegerType()
+ || SrcExpr.get()->getType()->isFloatingType())) {
+ Kind = CK_VectorSplat;
+ return;
+ }
+
+ // C++ [expr.cast]p5: The conversions performed by
+ // - a const_cast,
+ // - a static_cast,
+ // - a static_cast followed by a const_cast,
+ // - a reinterpret_cast, or
+ // - a reinterpret_cast followed by a const_cast,
+ // can be performed using the cast notation of explicit type conversion.
+ // [...] If a conversion can be interpreted in more than one of the ways
+ // listed above, the interpretation that appears first in the list is used,
+ // even if a cast resulting from that interpretation is ill-formed.
+ // In plain language, this means trying a const_cast ...
+ unsigned msg = diag::err_bad_cxx_cast_generic;
+ TryCastResult tcr = TryConstCast(Self, SrcExpr, DestType,
+ /*CStyle*/true, msg);
+ if (SrcExpr.isInvalid())
+ return;
+ if (tcr == TC_Success)
+ Kind = CK_NoOp;
+
+ Sema::CheckedConversionKind CCK
+ = FunctionalStyle? Sema::CCK_FunctionalCast
+ : Sema::CCK_CStyleCast;
+ if (tcr == TC_NotApplicable) {
+ // ... or if that is not possible, a static_cast, ignoring const, ...
+ tcr = TryStaticCast(Self, SrcExpr, DestType, CCK, OpRange,
+ msg, Kind, BasePath, ListInitialization);
+ if (SrcExpr.isInvalid())
+ return;
+
+ if (tcr == TC_NotApplicable) {
+ // ... and finally a reinterpret_cast, ignoring const.
+ tcr = TryReinterpretCast(Self, SrcExpr, DestType, /*CStyle*/true,
+ OpRange, msg, Kind);
+ if (SrcExpr.isInvalid())
+ return;
+ }
+ }
+
+ if (Self.getLangOpts().ObjCAutoRefCount && tcr == TC_Success)
+ checkObjCARCConversion(CCK);
+
+ if (tcr != TC_Success && msg != 0) {
+ if (SrcExpr.get()->getType() == Self.Context.OverloadTy) {
+ DeclAccessPair Found;
+ FunctionDecl *Fn = Self.ResolveAddressOfOverloadedFunction(SrcExpr.get(),
+ DestType,
+ /*Complain*/ true,
+ Found);
+ if (Fn) {
+ // If DestType is a function type (not to be confused with the function
+ // pointer type), it will be possible to resolve the function address,
+ // but the type cast should be considered as failure.
+ OverloadExpr *OE = OverloadExpr::find(SrcExpr.get()).Expression;
+ Self.Diag(OpRange.getBegin(), diag::err_bad_cstyle_cast_overload)
+ << OE->getName() << DestType << OpRange
+ << OE->getQualifierLoc().getSourceRange();
+ Self.NoteAllOverloadCandidates(SrcExpr.get());
+ }
+ } else {
+ diagnoseBadCast(Self, msg, (FunctionalStyle ? CT_Functional : CT_CStyle),
+ OpRange, SrcExpr.get(), DestType, ListInitialization);
+ }
+ } else if (Kind == CK_BitCast) {
+ checkCastAlign();
+ }
+
+ // Clear out SrcExpr if there was a fatal error.
+ if (tcr != TC_Success)
+ SrcExpr = ExprError();
+}
+
+/// DiagnoseBadFunctionCast - Warn whenever a function call is cast to a
+/// non-matching type. Such as enum function call to int, int call to
+/// pointer; etc. Cast to 'void' is an exception.
+static void DiagnoseBadFunctionCast(Sema &Self, const ExprResult &SrcExpr,
+ QualType DestType) {
+ if (Self.Diags.isIgnored(diag::warn_bad_function_cast,
+ SrcExpr.get()->getExprLoc()))
+ return;
+
+ if (!isa<CallExpr>(SrcExpr.get()))
+ return;
+
+ QualType SrcType = SrcExpr.get()->getType();
+ if (DestType.getUnqualifiedType()->isVoidType())
+ return;
+ if ((SrcType->isAnyPointerType() || SrcType->isBlockPointerType())
+ && (DestType->isAnyPointerType() || DestType->isBlockPointerType()))
+ return;
+ if (SrcType->isIntegerType() && DestType->isIntegerType() &&
+ (SrcType->isBooleanType() == DestType->isBooleanType()) &&
+ (SrcType->isEnumeralType() == DestType->isEnumeralType()))
+ return;
+ if (SrcType->isRealFloatingType() && DestType->isRealFloatingType())
+ return;
+ if (SrcType->isEnumeralType() && DestType->isEnumeralType())
+ return;
+ if (SrcType->isComplexType() && DestType->isComplexType())
+ return;
+ if (SrcType->isComplexIntegerType() && DestType->isComplexIntegerType())
+ return;
+
+ Self.Diag(SrcExpr.get()->getExprLoc(),
+ diag::warn_bad_function_cast)
+ << SrcType << DestType << SrcExpr.get()->getSourceRange();
+}
+
+/// Check the semantics of a C-style cast operation, in C.
+void CastOperation::CheckCStyleCast() {
+ assert(!Self.getLangOpts().CPlusPlus);
+
+ // C-style casts can resolve __unknown_any types.
+ if (claimPlaceholder(BuiltinType::UnknownAny)) {
+ SrcExpr = Self.checkUnknownAnyCast(DestRange, DestType,
+ SrcExpr.get(), Kind,
+ ValueKind, BasePath);
+ return;
+ }
+
+ // C99 6.5.4p2: the cast type needs to be void or scalar and the expression
+ // type needs to be scalar.
+ if (DestType->isVoidType()) {
+ // We don't necessarily do lvalue-to-rvalue conversions on this.
+ SrcExpr = Self.IgnoredValueConversions(SrcExpr.get());
+ if (SrcExpr.isInvalid())
+ return;
+
+ // Cast to void allows any expr type.
+ Kind = CK_ToVoid;
+ return;
+ }
+
+ // Overloads are allowed with C extensions, so we need to support them.
+ if (SrcExpr.get()->getType() == Self.Context.OverloadTy) {
+ DeclAccessPair DAP;
+ if (FunctionDecl *FD = Self.ResolveAddressOfOverloadedFunction(
+ SrcExpr.get(), DestType, /*Complain=*/true, DAP))
+ SrcExpr = Self.FixOverloadedFunctionReference(SrcExpr.get(), DAP, FD);
+ else
+ return;
+ assert(SrcExpr.isUsable());
+ }
+ SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.get());
+ if (SrcExpr.isInvalid())
+ return;
+ QualType SrcType = SrcExpr.get()->getType();
+
+ assert(!SrcType->isPlaceholderType());
+
+ // OpenCL v1 s6.5: Casting a pointer to address space A to a pointer to
+ // address space B is illegal.
+ if (Self.getLangOpts().OpenCL && DestType->isPointerType() &&
+ SrcType->isPointerType()) {
+ const PointerType *DestPtr = DestType->getAs<PointerType>();
+ if (!DestPtr->isAddressSpaceOverlapping(*SrcType->getAs<PointerType>())) {
+ Self.Diag(OpRange.getBegin(),
+ diag::err_typecheck_incompatible_address_space)
+ << SrcType << DestType << Sema::AA_Casting
+ << SrcExpr.get()->getSourceRange();
+ SrcExpr = ExprError();
+ return;
+ }
+ }
+
+ if (Self.RequireCompleteType(OpRange.getBegin(), DestType,
+ diag::err_typecheck_cast_to_incomplete)) {
+ SrcExpr = ExprError();
+ return;
+ }
+
+ if (!DestType->isScalarType() && !DestType->isVectorType()) {
+ const RecordType *DestRecordTy = DestType->getAs<RecordType>();
+
+ if (DestRecordTy && Self.Context.hasSameUnqualifiedType(DestType, SrcType)){
+ // GCC struct/union extension: allow cast to self.
+ Self.Diag(OpRange.getBegin(), diag::ext_typecheck_cast_nonscalar)
+ << DestType << SrcExpr.get()->getSourceRange();
+ Kind = CK_NoOp;
+ return;
+ }
+
+ // GCC's cast to union extension.
+ if (DestRecordTy && DestRecordTy->getDecl()->isUnion()) {
+ RecordDecl *RD = DestRecordTy->getDecl();
+ RecordDecl::field_iterator Field, FieldEnd;
+ for (Field = RD->field_begin(), FieldEnd = RD->field_end();
+ Field != FieldEnd; ++Field) {
+ if (Self.Context.hasSameUnqualifiedType(Field->getType(), SrcType) &&
+ !Field->isUnnamedBitfield()) {
+ Self.Diag(OpRange.getBegin(), diag::ext_typecheck_cast_to_union)
+ << SrcExpr.get()->getSourceRange();
+ break;
+ }
+ }
+ if (Field == FieldEnd) {
+ Self.Diag(OpRange.getBegin(), diag::err_typecheck_cast_to_union_no_type)
+ << SrcType << SrcExpr.get()->getSourceRange();
+ SrcExpr = ExprError();
+ return;
+ }
+ Kind = CK_ToUnion;
+ return;
+ }
+
+ // Reject any other conversions to non-scalar types.
+ Self.Diag(OpRange.getBegin(), diag::err_typecheck_cond_expect_scalar)
+ << DestType << SrcExpr.get()->getSourceRange();
+ SrcExpr = ExprError();
+ return;
+ }
+
+ // The type we're casting to is known to be a scalar or vector.
+
+ // Require the operand to be a scalar or vector.
+ if (!SrcType->isScalarType() && !SrcType->isVectorType()) {
+ Self.Diag(SrcExpr.get()->getExprLoc(),
+ diag::err_typecheck_expect_scalar_operand)
+ << SrcType << SrcExpr.get()->getSourceRange();
+ SrcExpr = ExprError();
+ return;
+ }
+
+ if (DestType->isExtVectorType()) {
+ SrcExpr = Self.CheckExtVectorCast(OpRange, DestType, SrcExpr.get(), Kind);
+ return;
+ }
+
+ if (const VectorType *DestVecTy = DestType->getAs<VectorType>()) {
+ if (DestVecTy->getVectorKind() == VectorType::AltiVecVector &&
+ (SrcType->isIntegerType() || SrcType->isFloatingType())) {
+ Kind = CK_VectorSplat;
+ } else if (Self.CheckVectorCast(OpRange, DestType, SrcType, Kind)) {
+ SrcExpr = ExprError();
+ }
+ return;
+ }
+
+ if (SrcType->isVectorType()) {
+ if (Self.CheckVectorCast(OpRange, SrcType, DestType, Kind))
+ SrcExpr = ExprError();
+ return;
+ }
+
+ // The source and target types are both scalars, i.e.
+ // - arithmetic types (fundamental, enum, and complex)
+ // - all kinds of pointers
+ // Note that member pointers were filtered out with C++, above.
+
+ if (isa<ObjCSelectorExpr>(SrcExpr.get())) {
+ Self.Diag(SrcExpr.get()->getExprLoc(), diag::err_cast_selector_expr);
+ SrcExpr = ExprError();
+ return;
+ }
+
+ // If either type is a pointer, the other type has to be either an
+ // integer or a pointer.
+ if (!DestType->isArithmeticType()) {
+ if (!SrcType->isIntegralType(Self.Context) && SrcType->isArithmeticType()) {
+ Self.Diag(SrcExpr.get()->getExprLoc(),
+ diag::err_cast_pointer_from_non_pointer_int)
+ << SrcType << SrcExpr.get()->getSourceRange();
+ SrcExpr = ExprError();
+ return;
+ }
+ checkIntToPointerCast(/* CStyle */ true, OpRange.getBegin(), SrcExpr.get(),
+ DestType, Self);
+ } else if (!SrcType->isArithmeticType()) {
+ if (!DestType->isIntegralType(Self.Context) &&
+ DestType->isArithmeticType()) {
+ Self.Diag(SrcExpr.get()->getLocStart(),
+ diag::err_cast_pointer_to_non_pointer_int)
+ << DestType << SrcExpr.get()->getSourceRange();
+ SrcExpr = ExprError();
+ return;
+ }
+ }
+
+ if (Self.getLangOpts().OpenCL && !Self.getOpenCLOptions().cl_khr_fp16) {
+ if (DestType->isHalfType()) {
+ Self.Diag(SrcExpr.get()->getLocStart(), diag::err_opencl_cast_to_half)
+ << DestType << SrcExpr.get()->getSourceRange();
+ SrcExpr = ExprError();
+ return;
+ }
+ }
+
+ // ARC imposes extra restrictions on casts.
+ if (Self.getLangOpts().ObjCAutoRefCount) {
+ checkObjCARCConversion(Sema::CCK_CStyleCast);
+ if (SrcExpr.isInvalid())
+ return;
+
+ if (const PointerType *CastPtr = DestType->getAs<PointerType>()) {
+ if (const PointerType *ExprPtr = SrcType->getAs<PointerType>()) {
+ Qualifiers CastQuals = CastPtr->getPointeeType().getQualifiers();
+ Qualifiers ExprQuals = ExprPtr->getPointeeType().getQualifiers();
+ if (CastPtr->getPointeeType()->isObjCLifetimeType() &&
+ ExprPtr->getPointeeType()->isObjCLifetimeType() &&
+ !CastQuals.compatiblyIncludesObjCLifetime(ExprQuals)) {
+ Self.Diag(SrcExpr.get()->getLocStart(),
+ diag::err_typecheck_incompatible_ownership)
+ << SrcType << DestType << Sema::AA_Casting
+ << SrcExpr.get()->getSourceRange();
+ return;
+ }
+ }
+ }
+ else if (!Self.CheckObjCARCUnavailableWeakConversion(DestType, SrcType)) {
+ Self.Diag(SrcExpr.get()->getLocStart(),
+ diag::err_arc_convesion_of_weak_unavailable)
+ << 1 << SrcType << DestType << SrcExpr.get()->getSourceRange();
+ SrcExpr = ExprError();
+ return;
+ }
+ }
+
+ DiagnoseCastOfObjCSEL(Self, SrcExpr, DestType);
+ DiagnoseBadFunctionCast(Self, SrcExpr, DestType);
+ Kind = Self.PrepareScalarCast(SrcExpr, DestType);
+ if (SrcExpr.isInvalid())
+ return;
+
+ if (Kind == CK_BitCast)
+ checkCastAlign();
+
+ // -Wcast-qual
+ QualType TheOffendingSrcType, TheOffendingDestType;
+ Qualifiers CastAwayQualifiers;
+ if (SrcType->isAnyPointerType() && DestType->isAnyPointerType() &&
+ CastsAwayConstness(Self, SrcType, DestType, true, false,
+ &TheOffendingSrcType, &TheOffendingDestType,
+ &CastAwayQualifiers)) {
+ int qualifiers = -1;
+ if (CastAwayQualifiers.hasConst() && CastAwayQualifiers.hasVolatile()) {
+ qualifiers = 0;
+ } else if (CastAwayQualifiers.hasConst()) {
+ qualifiers = 1;
+ } else if (CastAwayQualifiers.hasVolatile()) {
+ qualifiers = 2;
+ }
+ // This is a variant of int **x; const int **y = (const int **)x;
+ if (qualifiers == -1)
+ Self.Diag(SrcExpr.get()->getLocStart(), diag::warn_cast_qual2) <<
+ SrcType << DestType;
+ else
+ Self.Diag(SrcExpr.get()->getLocStart(), diag::warn_cast_qual) <<
+ TheOffendingSrcType << TheOffendingDestType << qualifiers;
+ }
+}
+
+ExprResult Sema::BuildCStyleCastExpr(SourceLocation LPLoc,
+ TypeSourceInfo *CastTypeInfo,
+ SourceLocation RPLoc,
+ Expr *CastExpr) {
+ CastOperation Op(*this, CastTypeInfo->getType(), CastExpr);
+ Op.DestRange = CastTypeInfo->getTypeLoc().getSourceRange();
+ Op.OpRange = SourceRange(LPLoc, CastExpr->getLocEnd());
+
+ if (getLangOpts().CPlusPlus) {
+ Op.CheckCXXCStyleCast(/*FunctionalStyle=*/ false,
+ isa<InitListExpr>(CastExpr));
+ } else {
+ Op.CheckCStyleCast();
+ }
+
+ if (Op.SrcExpr.isInvalid())
+ return ExprError();
+
+ return Op.complete(CStyleCastExpr::Create(Context, Op.ResultType,
+ Op.ValueKind, Op.Kind, Op.SrcExpr.get(),
+ &Op.BasePath, CastTypeInfo, LPLoc, RPLoc));
+}
+
+ExprResult Sema::BuildCXXFunctionalCastExpr(TypeSourceInfo *CastTypeInfo,
+ SourceLocation LPLoc,
+ Expr *CastExpr,
+ SourceLocation RPLoc) {
+ assert(LPLoc.isValid() && "List-initialization shouldn't get here.");
+ CastOperation Op(*this, CastTypeInfo->getType(), CastExpr);
+ Op.DestRange = CastTypeInfo->getTypeLoc().getSourceRange();
+ Op.OpRange = SourceRange(Op.DestRange.getBegin(), CastExpr->getLocEnd());
+
+ Op.CheckCXXCStyleCast(/*FunctionalStyle=*/true, /*ListInit=*/false);
+ if (Op.SrcExpr.isInvalid())
+ return ExprError();
+
+ auto *SubExpr = Op.SrcExpr.get();
+ if (auto *BindExpr = dyn_cast<CXXBindTemporaryExpr>(SubExpr))
+ SubExpr = BindExpr->getSubExpr();
+ if (auto *ConstructExpr = dyn_cast<CXXConstructExpr>(SubExpr))
+ ConstructExpr->setParenOrBraceRange(SourceRange(LPLoc, RPLoc));
+
+ return Op.complete(CXXFunctionalCastExpr::Create(Context, Op.ResultType,
+ Op.ValueKind, CastTypeInfo, Op.Kind,
+ Op.SrcExpr.get(), &Op.BasePath, LPLoc, RPLoc));
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp
new file mode 100644
index 0000000..cbdcb5e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp
@@ -0,0 +1,9908 @@
+//===--- SemaChecking.cpp - Extra Semantic Checking -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements extra semantic analysis beyond what is enforced
+// by the C type system.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ExprOpenMP.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Analysis/Analyses/FormatString.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering.
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/Sema.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallBitVector.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/ConvertUTF.h"
+#include "llvm/Support/raw_ostream.h"
+#include <limits>
+using namespace clang;
+using namespace sema;
+
+SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL,
+ unsigned ByteNo) const {
+ return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts,
+ Context.getTargetInfo());
+}
+
+/// Checks that a call expression's argument count is the desired number.
+/// This is useful when doing custom type-checking. Returns true on error.
+static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) {
+ unsigned argCount = call->getNumArgs();
+ if (argCount == desiredArgCount) return false;
+
+ if (argCount < desiredArgCount)
+ return S.Diag(call->getLocEnd(), diag::err_typecheck_call_too_few_args)
+ << 0 /*function call*/ << desiredArgCount << argCount
+ << call->getSourceRange();
+
+ // Highlight all the excess arguments.
+ SourceRange range(call->getArg(desiredArgCount)->getLocStart(),
+ call->getArg(argCount - 1)->getLocEnd());
+
+ return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args)
+ << 0 /*function call*/ << desiredArgCount << argCount
+ << call->getArg(1)->getSourceRange();
+}
+
+/// Check that the first argument to __builtin_annotation is an integer
+/// and the second argument is a non-wide string literal.
+static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) {
+ if (checkArgCount(S, TheCall, 2))
+ return true;
+
+ // First argument should be an integer.
+ Expr *ValArg = TheCall->getArg(0);
+ QualType Ty = ValArg->getType();
+ if (!Ty->isIntegerType()) {
+ S.Diag(ValArg->getLocStart(), diag::err_builtin_annotation_first_arg)
+ << ValArg->getSourceRange();
+ return true;
+ }
+
+ // Second argument should be a constant string.
+ Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts();
+ StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg);
+ if (!Literal || !Literal->isAscii()) {
+ S.Diag(StrArg->getLocStart(), diag::err_builtin_annotation_second_arg)
+ << StrArg->getSourceRange();
+ return true;
+ }
+
+ TheCall->setType(Ty);
+ return false;
+}
+
+/// Check that the argument to __builtin_addressof is a glvalue, and set the
+/// result type to the corresponding pointer type.
+static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) {
+ if (checkArgCount(S, TheCall, 1))
+ return true;
+
+ ExprResult Arg(TheCall->getArg(0));
+ QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getLocStart());
+ if (ResultType.isNull())
+ return true;
+
+ TheCall->setArg(0, Arg.get());
+ TheCall->setType(ResultType);
+ return false;
+}
+
+static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall) {
+ if (checkArgCount(S, TheCall, 3))
+ return true;
+
+ // First two arguments should be integers.
+ for (unsigned I = 0; I < 2; ++I) {
+ Expr *Arg = TheCall->getArg(I);
+ QualType Ty = Arg->getType();
+ if (!Ty->isIntegerType()) {
+ S.Diag(Arg->getLocStart(), diag::err_overflow_builtin_must_be_int)
+ << Ty << Arg->getSourceRange();
+ return true;
+ }
+ }
+
+ // Third argument should be a pointer to a non-const integer.
+ // IRGen correctly handles volatile, restrict, and address spaces, and
+ // the other qualifiers aren't possible.
+ {
+ Expr *Arg = TheCall->getArg(2);
+ QualType Ty = Arg->getType();
+ const auto *PtrTy = Ty->getAs<PointerType>();
+ if (!(PtrTy && PtrTy->getPointeeType()->isIntegerType() &&
+ !PtrTy->getPointeeType().isConstQualified())) {
+ S.Diag(Arg->getLocStart(), diag::err_overflow_builtin_must_be_ptr_int)
+ << Ty << Arg->getSourceRange();
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void SemaBuiltinMemChkCall(Sema &S, FunctionDecl *FDecl,
+ CallExpr *TheCall, unsigned SizeIdx,
+ unsigned DstSizeIdx) {
+ if (TheCall->getNumArgs() <= SizeIdx ||
+ TheCall->getNumArgs() <= DstSizeIdx)
+ return;
+
+ const Expr *SizeArg = TheCall->getArg(SizeIdx);
+ const Expr *DstSizeArg = TheCall->getArg(DstSizeIdx);
+
+ llvm::APSInt Size, DstSize;
+
+ // find out if both sizes are known at compile time
+ if (!SizeArg->EvaluateAsInt(Size, S.Context) ||
+ !DstSizeArg->EvaluateAsInt(DstSize, S.Context))
+ return;
+
+ if (Size.ule(DstSize))
+ return;
+
+ // confirmed overflow so generate the diagnostic.
+ IdentifierInfo *FnName = FDecl->getIdentifier();
+ SourceLocation SL = TheCall->getLocStart();
+ SourceRange SR = TheCall->getSourceRange();
+
+ S.Diag(SL, diag::warn_memcpy_chk_overflow) << SR << FnName;
+}
+
+static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
+ if (checkArgCount(S, BuiltinCall, 2))
+ return true;
+
+ SourceLocation BuiltinLoc = BuiltinCall->getLocStart();
+ Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts();
+ Expr *Call = BuiltinCall->getArg(0);
+ Expr *Chain = BuiltinCall->getArg(1);
+
+ if (Call->getStmtClass() != Stmt::CallExprClass) {
+ S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call)
+ << Call->getSourceRange();
+ return true;
+ }
+
+ auto CE = cast<CallExpr>(Call);
+ if (CE->getCallee()->getType()->isBlockPointerType()) {
+ S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call)
+ << Call->getSourceRange();
+ return true;
+ }
+
+ const Decl *TargetDecl = CE->getCalleeDecl();
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
+ if (FD->getBuiltinID()) {
+ S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call)
+ << Call->getSourceRange();
+ return true;
+ }
+
+ if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) {
+ S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call)
+ << Call->getSourceRange();
+ return true;
+ }
+
+ ExprResult ChainResult = S.UsualUnaryConversions(Chain);
+ if (ChainResult.isInvalid())
+ return true;
+ if (!ChainResult.get()->getType()->isPointerType()) {
+ S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer)
+ << Chain->getSourceRange();
+ return true;
+ }
+
+ QualType ReturnTy = CE->getCallReturnType(S.Context);
+ QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() };
+ QualType BuiltinTy = S.Context.getFunctionType(
+ ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo());
+ QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy);
+
+ Builtin =
+ S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get();
+
+ BuiltinCall->setType(CE->getType());
+ BuiltinCall->setValueKind(CE->getValueKind());
+ BuiltinCall->setObjectKind(CE->getObjectKind());
+ BuiltinCall->setCallee(Builtin);
+ BuiltinCall->setArg(1, ChainResult.get());
+
+ return false;
+}
+
+static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall,
+ Scope::ScopeFlags NeededScopeFlags,
+ unsigned DiagID) {
+ // Scopes aren't available during instantiation. Fortunately, builtin
+ // functions cannot be template args so they cannot be formed through template
+ // instantiation. Therefore checking once during the parse is sufficient.
+ if (!SemaRef.ActiveTemplateInstantiations.empty())
+ return false;
+
+ Scope *S = SemaRef.getCurScope();
+ while (S && !S->isSEHExceptScope())
+ S = S->getParent();
+ if (!S || !(S->getFlags() & NeededScopeFlags)) {
+ auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
+ SemaRef.Diag(TheCall->getExprLoc(), DiagID)
+ << DRE->getDecl()->getIdentifier();
+ return true;
+ }
+
+ return false;
+}
+
+ExprResult
+Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
+ CallExpr *TheCall) {
+ ExprResult TheCallResult(TheCall);
+
+ // Find out if any arguments are required to be integer constant expressions.
+ unsigned ICEArguments = 0;
+ ASTContext::GetBuiltinTypeError Error;
+ Context.GetBuiltinType(BuiltinID, Error, &ICEArguments);
+ if (Error != ASTContext::GE_None)
+ ICEArguments = 0; // Don't diagnose previously diagnosed errors.
+
+ // If any arguments are required to be ICE's, check and diagnose.
+ for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) {
+ // Skip arguments not required to be ICE's.
+ if ((ICEArguments & (1 << ArgNo)) == 0) continue;
+
+ llvm::APSInt Result;
+ if (SemaBuiltinConstantArg(TheCall, ArgNo, Result))
+ return true;
+ ICEArguments &= ~(1 << ArgNo);
+ }
+
+ switch (BuiltinID) {
+ case Builtin::BI__builtin___CFStringMakeConstantString:
+ assert(TheCall->getNumArgs() == 1 &&
+ "Wrong # arguments to builtin CFStringMakeConstantString");
+ if (CheckObjCString(TheCall->getArg(0)))
+ return ExprError();
+ break;
+ case Builtin::BI__builtin_stdarg_start:
+ case Builtin::BI__builtin_va_start:
+ if (SemaBuiltinVAStart(TheCall))
+ return ExprError();
+ break;
+ case Builtin::BI__va_start: {
+ switch (Context.getTargetInfo().getTriple().getArch()) {
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ if (SemaBuiltinVAStartARM(TheCall))
+ return ExprError();
+ break;
+ default:
+ if (SemaBuiltinVAStart(TheCall))
+ return ExprError();
+ break;
+ }
+ break;
+ }
+ case Builtin::BI__builtin_isgreater:
+ case Builtin::BI__builtin_isgreaterequal:
+ case Builtin::BI__builtin_isless:
+ case Builtin::BI__builtin_islessequal:
+ case Builtin::BI__builtin_islessgreater:
+ case Builtin::BI__builtin_isunordered:
+ if (SemaBuiltinUnorderedCompare(TheCall))
+ return ExprError();
+ break;
+ case Builtin::BI__builtin_fpclassify:
+ if (SemaBuiltinFPClassification(TheCall, 6))
+ return ExprError();
+ break;
+ case Builtin::BI__builtin_isfinite:
+ case Builtin::BI__builtin_isinf:
+ case Builtin::BI__builtin_isinf_sign:
+ case Builtin::BI__builtin_isnan:
+ case Builtin::BI__builtin_isnormal:
+ if (SemaBuiltinFPClassification(TheCall, 1))
+ return ExprError();
+ break;
+ case Builtin::BI__builtin_shufflevector:
+ return SemaBuiltinShuffleVector(TheCall);
+ // TheCall will be freed by the smart pointer here, but that's fine, since
+ // SemaBuiltinShuffleVector guts it, but then doesn't release it.
+ case Builtin::BI__builtin_prefetch:
+ if (SemaBuiltinPrefetch(TheCall))
+ return ExprError();
+ break;
+ case Builtin::BI__assume:
+ case Builtin::BI__builtin_assume:
+ if (SemaBuiltinAssume(TheCall))
+ return ExprError();
+ break;
+ case Builtin::BI__builtin_assume_aligned:
+ if (SemaBuiltinAssumeAligned(TheCall))
+ return ExprError();
+ break;
+ case Builtin::BI__builtin_object_size:
+ if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3))
+ return ExprError();
+ break;
+ case Builtin::BI__builtin_longjmp:
+ if (SemaBuiltinLongjmp(TheCall))
+ return ExprError();
+ break;
+ case Builtin::BI__builtin_setjmp:
+ if (SemaBuiltinSetjmp(TheCall))
+ return ExprError();
+ break;
+ case Builtin::BI_setjmp:
+ case Builtin::BI_setjmpex:
+ if (checkArgCount(*this, TheCall, 1))
+ return true;
+ break;
+
+ case Builtin::BI__builtin_classify_type:
+ if (checkArgCount(*this, TheCall, 1)) return true;
+ TheCall->setType(Context.IntTy);
+ break;
+ case Builtin::BI__builtin_constant_p:
+ if (checkArgCount(*this, TheCall, 1)) return true;
+ TheCall->setType(Context.IntTy);
+ break;
+ case Builtin::BI__sync_fetch_and_add:
+ case Builtin::BI__sync_fetch_and_add_1:
+ case Builtin::BI__sync_fetch_and_add_2:
+ case Builtin::BI__sync_fetch_and_add_4:
+ case Builtin::BI__sync_fetch_and_add_8:
+ case Builtin::BI__sync_fetch_and_add_16:
+ case Builtin::BI__sync_fetch_and_sub:
+ case Builtin::BI__sync_fetch_and_sub_1:
+ case Builtin::BI__sync_fetch_and_sub_2:
+ case Builtin::BI__sync_fetch_and_sub_4:
+ case Builtin::BI__sync_fetch_and_sub_8:
+ case Builtin::BI__sync_fetch_and_sub_16:
+ case Builtin::BI__sync_fetch_and_or:
+ case Builtin::BI__sync_fetch_and_or_1:
+ case Builtin::BI__sync_fetch_and_or_2:
+ case Builtin::BI__sync_fetch_and_or_4:
+ case Builtin::BI__sync_fetch_and_or_8:
+ case Builtin::BI__sync_fetch_and_or_16:
+ case Builtin::BI__sync_fetch_and_and:
+ case Builtin::BI__sync_fetch_and_and_1:
+ case Builtin::BI__sync_fetch_and_and_2:
+ case Builtin::BI__sync_fetch_and_and_4:
+ case Builtin::BI__sync_fetch_and_and_8:
+ case Builtin::BI__sync_fetch_and_and_16:
+ case Builtin::BI__sync_fetch_and_xor:
+ case Builtin::BI__sync_fetch_and_xor_1:
+ case Builtin::BI__sync_fetch_and_xor_2:
+ case Builtin::BI__sync_fetch_and_xor_4:
+ case Builtin::BI__sync_fetch_and_xor_8:
+ case Builtin::BI__sync_fetch_and_xor_16:
+ case Builtin::BI__sync_fetch_and_nand:
+ case Builtin::BI__sync_fetch_and_nand_1:
+ case Builtin::BI__sync_fetch_and_nand_2:
+ case Builtin::BI__sync_fetch_and_nand_4:
+ case Builtin::BI__sync_fetch_and_nand_8:
+ case Builtin::BI__sync_fetch_and_nand_16:
+ case Builtin::BI__sync_add_and_fetch:
+ case Builtin::BI__sync_add_and_fetch_1:
+ case Builtin::BI__sync_add_and_fetch_2:
+ case Builtin::BI__sync_add_and_fetch_4:
+ case Builtin::BI__sync_add_and_fetch_8:
+ case Builtin::BI__sync_add_and_fetch_16:
+ case Builtin::BI__sync_sub_and_fetch:
+ case Builtin::BI__sync_sub_and_fetch_1:
+ case Builtin::BI__sync_sub_and_fetch_2:
+ case Builtin::BI__sync_sub_and_fetch_4:
+ case Builtin::BI__sync_sub_and_fetch_8:
+ case Builtin::BI__sync_sub_and_fetch_16:
+ case Builtin::BI__sync_and_and_fetch:
+ case Builtin::BI__sync_and_and_fetch_1:
+ case Builtin::BI__sync_and_and_fetch_2:
+ case Builtin::BI__sync_and_and_fetch_4:
+ case Builtin::BI__sync_and_and_fetch_8:
+ case Builtin::BI__sync_and_and_fetch_16:
+ case Builtin::BI__sync_or_and_fetch:
+ case Builtin::BI__sync_or_and_fetch_1:
+ case Builtin::BI__sync_or_and_fetch_2:
+ case Builtin::BI__sync_or_and_fetch_4:
+ case Builtin::BI__sync_or_and_fetch_8:
+ case Builtin::BI__sync_or_and_fetch_16:
+ case Builtin::BI__sync_xor_and_fetch:
+ case Builtin::BI__sync_xor_and_fetch_1:
+ case Builtin::BI__sync_xor_and_fetch_2:
+ case Builtin::BI__sync_xor_and_fetch_4:
+ case Builtin::BI__sync_xor_and_fetch_8:
+ case Builtin::BI__sync_xor_and_fetch_16:
+ case Builtin::BI__sync_nand_and_fetch:
+ case Builtin::BI__sync_nand_and_fetch_1:
+ case Builtin::BI__sync_nand_and_fetch_2:
+ case Builtin::BI__sync_nand_and_fetch_4:
+ case Builtin::BI__sync_nand_and_fetch_8:
+ case Builtin::BI__sync_nand_and_fetch_16:
+ case Builtin::BI__sync_val_compare_and_swap:
+ case Builtin::BI__sync_val_compare_and_swap_1:
+ case Builtin::BI__sync_val_compare_and_swap_2:
+ case Builtin::BI__sync_val_compare_and_swap_4:
+ case Builtin::BI__sync_val_compare_and_swap_8:
+ case Builtin::BI__sync_val_compare_and_swap_16:
+ case Builtin::BI__sync_bool_compare_and_swap:
+ case Builtin::BI__sync_bool_compare_and_swap_1:
+ case Builtin::BI__sync_bool_compare_and_swap_2:
+ case Builtin::BI__sync_bool_compare_and_swap_4:
+ case Builtin::BI__sync_bool_compare_and_swap_8:
+ case Builtin::BI__sync_bool_compare_and_swap_16:
+ case Builtin::BI__sync_lock_test_and_set:
+ case Builtin::BI__sync_lock_test_and_set_1:
+ case Builtin::BI__sync_lock_test_and_set_2:
+ case Builtin::BI__sync_lock_test_and_set_4:
+ case Builtin::BI__sync_lock_test_and_set_8:
+ case Builtin::BI__sync_lock_test_and_set_16:
+ case Builtin::BI__sync_lock_release:
+ case Builtin::BI__sync_lock_release_1:
+ case Builtin::BI__sync_lock_release_2:
+ case Builtin::BI__sync_lock_release_4:
+ case Builtin::BI__sync_lock_release_8:
+ case Builtin::BI__sync_lock_release_16:
+ case Builtin::BI__sync_swap:
+ case Builtin::BI__sync_swap_1:
+ case Builtin::BI__sync_swap_2:
+ case Builtin::BI__sync_swap_4:
+ case Builtin::BI__sync_swap_8:
+ case Builtin::BI__sync_swap_16:
+ return SemaBuiltinAtomicOverloaded(TheCallResult);
+ case Builtin::BI__builtin_nontemporal_load:
+ case Builtin::BI__builtin_nontemporal_store:
+ return SemaBuiltinNontemporalOverloaded(TheCallResult);
+#define BUILTIN(ID, TYPE, ATTRS)
+#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
+ case Builtin::BI##ID: \
+ return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID);
+#include "clang/Basic/Builtins.def"
+ case Builtin::BI__builtin_annotation:
+ if (SemaBuiltinAnnotation(*this, TheCall))
+ return ExprError();
+ break;
+ case Builtin::BI__builtin_addressof:
+ if (SemaBuiltinAddressof(*this, TheCall))
+ return ExprError();
+ break;
+ case Builtin::BI__builtin_add_overflow:
+ case Builtin::BI__builtin_sub_overflow:
+ case Builtin::BI__builtin_mul_overflow:
+ if (SemaBuiltinOverflow(*this, TheCall))
+ return ExprError();
+ break;
+ case Builtin::BI__builtin_operator_new:
+ case Builtin::BI__builtin_operator_delete:
+ if (!getLangOpts().CPlusPlus) {
+ Diag(TheCall->getExprLoc(), diag::err_builtin_requires_language)
+ << (BuiltinID == Builtin::BI__builtin_operator_new
+ ? "__builtin_operator_new"
+ : "__builtin_operator_delete")
+ << "C++";
+ return ExprError();
+ }
+ // CodeGen assumes it can find the global new and delete to call,
+ // so ensure that they are declared.
+ DeclareGlobalNewDelete();
+ break;
+
+ // check secure string manipulation functions where overflows
+ // are detectable at compile time
+ case Builtin::BI__builtin___memcpy_chk:
+ case Builtin::BI__builtin___memmove_chk:
+ case Builtin::BI__builtin___memset_chk:
+ case Builtin::BI__builtin___strlcat_chk:
+ case Builtin::BI__builtin___strlcpy_chk:
+ case Builtin::BI__builtin___strncat_chk:
+ case Builtin::BI__builtin___strncpy_chk:
+ case Builtin::BI__builtin___stpncpy_chk:
+ SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3);
+ break;
+ case Builtin::BI__builtin___memccpy_chk:
+ SemaBuiltinMemChkCall(*this, FDecl, TheCall, 3, 4);
+ break;
+ case Builtin::BI__builtin___snprintf_chk:
+ case Builtin::BI__builtin___vsnprintf_chk:
+ SemaBuiltinMemChkCall(*this, FDecl, TheCall, 1, 3);
+ break;
+
+ case Builtin::BI__builtin_call_with_static_chain:
+ if (SemaBuiltinCallWithStaticChain(*this, TheCall))
+ return ExprError();
+ break;
+
+ case Builtin::BI__exception_code:
+ case Builtin::BI_exception_code: {
+ if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope,
+ diag::err_seh___except_block))
+ return ExprError();
+ break;
+ }
+ case Builtin::BI__exception_info:
+ case Builtin::BI_exception_info: {
+ if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope,
+ diag::err_seh___except_filter))
+ return ExprError();
+ break;
+ }
+
+ case Builtin::BI__GetExceptionInfo:
+ if (checkArgCount(*this, TheCall, 1))
+ return ExprError();
+
+ if (CheckCXXThrowOperand(
+ TheCall->getLocStart(),
+ Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()),
+ TheCall))
+ return ExprError();
+
+ TheCall->setType(Context.VoidPtrTy);
+ break;
+
+ }
+
+ // Since the target specific builtins for each arch overlap, only check those
+ // of the arch we are compiling for.
+ if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) {
+ switch (Context.getTargetInfo().getTriple().getArch()) {
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ if (CheckARMBuiltinFunctionCall(BuiltinID, TheCall))
+ return ExprError();
+ break;
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_be:
+ if (CheckAArch64BuiltinFunctionCall(BuiltinID, TheCall))
+ return ExprError();
+ break;
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ if (CheckMipsBuiltinFunctionCall(BuiltinID, TheCall))
+ return ExprError();
+ break;
+ case llvm::Triple::systemz:
+ if (CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall))
+ return ExprError();
+ break;
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ if (CheckX86BuiltinFunctionCall(BuiltinID, TheCall))
+ return ExprError();
+ break;
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
+ if (CheckPPCBuiltinFunctionCall(BuiltinID, TheCall))
+ return ExprError();
+ break;
+ default:
+ break;
+ }
+ }
+
+ return TheCallResult;
+}
+
+// Get the valid immediate range for the specified NEON type code.
+static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) {
+ NeonTypeFlags Type(t);
+ int IsQuad = ForceQuad ? true : Type.isQuad();
+ switch (Type.getEltType()) {
+ case NeonTypeFlags::Int8:
+ case NeonTypeFlags::Poly8:
+ return shift ? 7 : (8 << IsQuad) - 1;
+ case NeonTypeFlags::Int16:
+ case NeonTypeFlags::Poly16:
+ return shift ? 15 : (4 << IsQuad) - 1;
+ case NeonTypeFlags::Int32:
+ return shift ? 31 : (2 << IsQuad) - 1;
+ case NeonTypeFlags::Int64:
+ case NeonTypeFlags::Poly64:
+ return shift ? 63 : (1 << IsQuad) - 1;
+ case NeonTypeFlags::Poly128:
+ return shift ? 127 : (1 << IsQuad) - 1;
+ case NeonTypeFlags::Float16:
+ assert(!shift && "cannot shift float types!");
+ return (4 << IsQuad) - 1;
+ case NeonTypeFlags::Float32:
+ assert(!shift && "cannot shift float types!");
+ return (2 << IsQuad) - 1;
+ case NeonTypeFlags::Float64:
+ assert(!shift && "cannot shift float types!");
+ return (1 << IsQuad) - 1;
+ }
+ llvm_unreachable("Invalid NeonTypeFlag!");
+}
+
+/// getNeonEltType - Return the QualType corresponding to the elements of
+/// the vector type specified by the NeonTypeFlags. This is used to check
+/// the pointer arguments for Neon load/store intrinsics.
+static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context,
+ bool IsPolyUnsigned, bool IsInt64Long) {
+ switch (Flags.getEltType()) {
+ case NeonTypeFlags::Int8:
+ return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy;
+ case NeonTypeFlags::Int16:
+ return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy;
+ case NeonTypeFlags::Int32:
+ return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy;
+ case NeonTypeFlags::Int64:
+ if (IsInt64Long)
+ return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy;
+ else
+ return Flags.isUnsigned() ? Context.UnsignedLongLongTy
+ : Context.LongLongTy;
+ case NeonTypeFlags::Poly8:
+ return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy;
+ case NeonTypeFlags::Poly16:
+ return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy;
+ case NeonTypeFlags::Poly64:
+ if (IsInt64Long)
+ return Context.UnsignedLongTy;
+ else
+ return Context.UnsignedLongLongTy;
+ case NeonTypeFlags::Poly128:
+ break;
+ case NeonTypeFlags::Float16:
+ return Context.HalfTy;
+ case NeonTypeFlags::Float32:
+ return Context.FloatTy;
+ case NeonTypeFlags::Float64:
+ return Context.DoubleTy;
+ }
+ llvm_unreachable("Invalid NeonTypeFlag!");
+}
+
+bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+ llvm::APSInt Result;
+ uint64_t mask = 0;
+ unsigned TV = 0;
+ int PtrArgNum = -1;
+ bool HasConstPtr = false;
+ switch (BuiltinID) {
+#define GET_NEON_OVERLOAD_CHECK
+#include "clang/Basic/arm_neon.inc"
+#undef GET_NEON_OVERLOAD_CHECK
+ }
+
+ // For NEON intrinsics which are overloaded on vector element type, validate
+ // the immediate which specifies which variant to emit.
+ unsigned ImmArg = TheCall->getNumArgs()-1;
+ if (mask) {
+ if (SemaBuiltinConstantArg(TheCall, ImmArg, Result))
+ return true;
+
+ TV = Result.getLimitedValue(64);
+ if ((TV > 63) || (mask & (1ULL << TV)) == 0)
+ return Diag(TheCall->getLocStart(), diag::err_invalid_neon_type_code)
+ << TheCall->getArg(ImmArg)->getSourceRange();
+ }
+
+ if (PtrArgNum >= 0) {
+ // Check that pointer arguments have the specified type.
+ Expr *Arg = TheCall->getArg(PtrArgNum);
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg))
+ Arg = ICE->getSubExpr();
+ ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg);
+ QualType RHSTy = RHS.get()->getType();
+
+ llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch();
+ bool IsPolyUnsigned = Arch == llvm::Triple::aarch64;
+ bool IsInt64Long =
+ Context.getTargetInfo().getInt64Type() == TargetInfo::SignedLong;
+ QualType EltTy =
+ getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long);
+ if (HasConstPtr)
+ EltTy = EltTy.withConst();
+ QualType LHSTy = Context.getPointerType(EltTy);
+ AssignConvertType ConvTy;
+ ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS);
+ if (RHS.isInvalid())
+ return true;
+ if (DiagnoseAssignmentResult(ConvTy, Arg->getLocStart(), LHSTy, RHSTy,
+ RHS.get(), AA_Assigning))
+ return true;
+ }
+
+ // For NEON intrinsics which take an immediate value as part of the
+ // instruction, range check them here.
+ unsigned i = 0, l = 0, u = 0;
+ switch (BuiltinID) {
+ default:
+ return false;
+#define GET_NEON_IMMEDIATE_CHECK
+#include "clang/Basic/arm_neon.inc"
+#undef GET_NEON_IMMEDIATE_CHECK
+ }
+
+ return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
+}
+
+bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
+ unsigned MaxWidth) {
+ assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
+ BuiltinID == ARM::BI__builtin_arm_ldaex ||
+ BuiltinID == ARM::BI__builtin_arm_strex ||
+ BuiltinID == ARM::BI__builtin_arm_stlex ||
+ BuiltinID == AArch64::BI__builtin_arm_ldrex ||
+ BuiltinID == AArch64::BI__builtin_arm_ldaex ||
+ BuiltinID == AArch64::BI__builtin_arm_strex ||
+ BuiltinID == AArch64::BI__builtin_arm_stlex) &&
+ "unexpected ARM builtin");
+ bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex ||
+ BuiltinID == ARM::BI__builtin_arm_ldaex ||
+ BuiltinID == AArch64::BI__builtin_arm_ldrex ||
+ BuiltinID == AArch64::BI__builtin_arm_ldaex;
+
+ DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
+
+ // Ensure that we have the proper number of arguments.
+ if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2))
+ return true;
+
+ // Inspect the pointer argument of the atomic builtin. This should always be
+ // a pointer type, whose element is an integral scalar or pointer type.
+ // Because it is a pointer type, we don't have to worry about any implicit
+ // casts here.
+ Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1);
+ ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg);
+ if (PointerArgRes.isInvalid())
+ return true;
+ PointerArg = PointerArgRes.get();
+
+ const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
+ if (!pointerType) {
+ Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer)
+ << PointerArg->getType() << PointerArg->getSourceRange();
+ return true;
+ }
+
+ // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next
+ // task is to insert the appropriate casts into the AST. First work out just
+ // what the appropriate type is.
+ QualType ValType = pointerType->getPointeeType();
+ QualType AddrType = ValType.getUnqualifiedType().withVolatile();
+ if (IsLdrex)
+ AddrType.addConst();
+
+ // Issue a warning if the cast is dodgy.
+ CastKind CastNeeded = CK_NoOp;
+ if (!AddrType.isAtLeastAsQualifiedAs(ValType)) {
+ CastNeeded = CK_BitCast;
+ Diag(DRE->getLocStart(), diag::ext_typecheck_convert_discards_qualifiers)
+ << PointerArg->getType()
+ << Context.getPointerType(AddrType)
+ << AA_Passing << PointerArg->getSourceRange();
+ }
+
+ // Finally, do the cast and replace the argument with the corrected version.
+ AddrType = Context.getPointerType(AddrType);
+ PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded);
+ if (PointerArgRes.isInvalid())
+ return true;
+ PointerArg = PointerArgRes.get();
+
+ TheCall->setArg(IsLdrex ? 0 : 1, PointerArg);
+
+ // In general, we allow ints, floats and pointers to be loaded and stored.
+ if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
+ !ValType->isBlockPointerType() && !ValType->isFloatingType()) {
+ Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer_intfltptr)
+ << PointerArg->getType() << PointerArg->getSourceRange();
+ return true;
+ }
+
+ // But ARM doesn't have instructions to deal with 128-bit versions.
+ if (Context.getTypeSize(ValType) > MaxWidth) {
+ assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate");
+ Diag(DRE->getLocStart(), diag::err_atomic_exclusive_builtin_pointer_size)
+ << PointerArg->getType() << PointerArg->getSourceRange();
+ return true;
+ }
+
+ switch (ValType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ // okay
+ break;
+
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Autoreleasing:
+ Diag(DRE->getLocStart(), diag::err_arc_atomic_ownership)
+ << ValType << PointerArg->getSourceRange();
+ return true;
+ }
+
+
+ if (IsLdrex) {
+ TheCall->setType(ValType);
+ return false;
+ }
+
+ // Initialize the argument to be stored.
+ ExprResult ValArg = TheCall->getArg(0);
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(
+ Context, ValType, /*consume*/ false);
+ ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
+ if (ValArg.isInvalid())
+ return true;
+ TheCall->setArg(0, ValArg.get());
+
+ // __builtin_arm_strex always returns an int. It's marked as such in the .def,
+ // but the custom checker bypasses all default analysis.
+ TheCall->setType(Context.IntTy);
+ return false;
+}
+
+bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+ llvm::APSInt Result;
+
+ if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
+ BuiltinID == ARM::BI__builtin_arm_ldaex ||
+ BuiltinID == ARM::BI__builtin_arm_strex ||
+ BuiltinID == ARM::BI__builtin_arm_stlex) {
+ return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64);
+ }
+
+ if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
+ SemaBuiltinConstantArgRange(TheCall, 2, 0, 1);
+ }
+
+ if (BuiltinID == ARM::BI__builtin_arm_rsr64 ||
+ BuiltinID == ARM::BI__builtin_arm_wsr64)
+ return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false);
+
+ if (BuiltinID == ARM::BI__builtin_arm_rsr ||
+ BuiltinID == ARM::BI__builtin_arm_rsrp ||
+ BuiltinID == ARM::BI__builtin_arm_wsr ||
+ BuiltinID == ARM::BI__builtin_arm_wsrp)
+ return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
+
+ if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
+ return true;
+
+ // For intrinsics which take an immediate value as part of the instruction,
+ // range check them here.
+ unsigned i = 0, l = 0, u = 0;
+ switch (BuiltinID) {
+ default: return false;
+ case ARM::BI__builtin_arm_ssat: i = 1; l = 1; u = 31; break;
+ case ARM::BI__builtin_arm_usat: i = 1; u = 31; break;
+ case ARM::BI__builtin_arm_vcvtr_f:
+ case ARM::BI__builtin_arm_vcvtr_d: i = 1; u = 1; break;
+ case ARM::BI__builtin_arm_dmb:
+ case ARM::BI__builtin_arm_dsb:
+ case ARM::BI__builtin_arm_isb:
+ case ARM::BI__builtin_arm_dbg: l = 0; u = 15; break;
+ }
+
+ // FIXME: VFP Intrinsics should error if VFP not present.
+ return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
+}
+
+bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID,
+ CallExpr *TheCall) {
+ llvm::APSInt Result;
+
+ if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
+ BuiltinID == AArch64::BI__builtin_arm_ldaex ||
+ BuiltinID == AArch64::BI__builtin_arm_strex ||
+ BuiltinID == AArch64::BI__builtin_arm_stlex) {
+ return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128);
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
+ SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) ||
+ SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) ||
+ SemaBuiltinConstantArgRange(TheCall, 4, 0, 1);
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
+ BuiltinID == AArch64::BI__builtin_arm_wsr64)
+ return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, false);
+
+ if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
+ BuiltinID == AArch64::BI__builtin_arm_rsrp ||
+ BuiltinID == AArch64::BI__builtin_arm_wsr ||
+ BuiltinID == AArch64::BI__builtin_arm_wsrp)
+ return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
+
+ if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
+ return true;
+
+ // For intrinsics which take an immediate value as part of the instruction,
+ // range check them here.
+ unsigned i = 0, l = 0, u = 0;
+ switch (BuiltinID) {
+ default: return false;
+ case AArch64::BI__builtin_arm_dmb:
+ case AArch64::BI__builtin_arm_dsb:
+ case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break;
+ }
+
+ return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
+}
+
+bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+ unsigned i = 0, l = 0, u = 0;
+ switch (BuiltinID) {
+ default: return false;
+ case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break;
+ case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break;
+ case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break;
+ case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break;
+ case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break;
+ case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break;
+ case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break;
+ }
+
+ return SemaBuiltinConstantArgRange(TheCall, i, l, u);
+}
+
+bool Sema::CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+ unsigned i = 0, l = 0, u = 0;
+ bool Is64BitBltin = BuiltinID == PPC::BI__builtin_divde ||
+ BuiltinID == PPC::BI__builtin_divdeu ||
+ BuiltinID == PPC::BI__builtin_bpermd;
+ bool IsTarget64Bit = Context.getTargetInfo()
+ .getTypeWidth(Context
+ .getTargetInfo()
+ .getIntPtrType()) == 64;
+ bool IsBltinExtDiv = BuiltinID == PPC::BI__builtin_divwe ||
+ BuiltinID == PPC::BI__builtin_divweu ||
+ BuiltinID == PPC::BI__builtin_divde ||
+ BuiltinID == PPC::BI__builtin_divdeu;
+
+ if (Is64BitBltin && !IsTarget64Bit)
+ return Diag(TheCall->getLocStart(), diag::err_64_bit_builtin_32_bit_tgt)
+ << TheCall->getSourceRange();
+
+ if ((IsBltinExtDiv && !Context.getTargetInfo().hasFeature("extdiv")) ||
+ (BuiltinID == PPC::BI__builtin_bpermd &&
+ !Context.getTargetInfo().hasFeature("bpermd")))
+ return Diag(TheCall->getLocStart(), diag::err_ppc_builtin_only_on_pwr7)
+ << TheCall->getSourceRange();
+
+ switch (BuiltinID) {
+ default: return false;
+ case PPC::BI__builtin_altivec_crypto_vshasigmaw:
+ case PPC::BI__builtin_altivec_crypto_vshasigmad:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
+ SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
+ case PPC::BI__builtin_tbegin:
+ case PPC::BI__builtin_tend: i = 0; l = 0; u = 1; break;
+ case PPC::BI__builtin_tsr: i = 0; l = 0; u = 7; break;
+ case PPC::BI__builtin_tabortwc:
+ case PPC::BI__builtin_tabortdc: i = 0; l = 0; u = 31; break;
+ case PPC::BI__builtin_tabortwci:
+ case PPC::BI__builtin_tabortdci:
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) ||
+ SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
+ }
+ return SemaBuiltinConstantArgRange(TheCall, i, l, u);
+}
+
+bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
+ CallExpr *TheCall) {
+ if (BuiltinID == SystemZ::BI__builtin_tabort) {
+ Expr *Arg = TheCall->getArg(0);
+ llvm::APSInt AbortCode(32);
+ if (Arg->isIntegerConstantExpr(AbortCode, Context) &&
+ AbortCode.getSExtValue() >= 0 && AbortCode.getSExtValue() < 256)
+ return Diag(Arg->getLocStart(), diag::err_systemz_invalid_tabort_code)
+ << Arg->getSourceRange();
+ }
+
+ // For intrinsics which take an immediate value as part of the instruction,
+ // range check them here.
+ unsigned i = 0, l = 0, u = 0;
+ switch (BuiltinID) {
+ default: return false;
+ case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break;
+ case SystemZ::BI__builtin_s390_verimb:
+ case SystemZ::BI__builtin_s390_verimh:
+ case SystemZ::BI__builtin_s390_verimf:
+ case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break;
+ case SystemZ::BI__builtin_s390_vfaeb:
+ case SystemZ::BI__builtin_s390_vfaeh:
+ case SystemZ::BI__builtin_s390_vfaef:
+ case SystemZ::BI__builtin_s390_vfaebs:
+ case SystemZ::BI__builtin_s390_vfaehs:
+ case SystemZ::BI__builtin_s390_vfaefs:
+ case SystemZ::BI__builtin_s390_vfaezb:
+ case SystemZ::BI__builtin_s390_vfaezh:
+ case SystemZ::BI__builtin_s390_vfaezf:
+ case SystemZ::BI__builtin_s390_vfaezbs:
+ case SystemZ::BI__builtin_s390_vfaezhs:
+ case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break;
+ case SystemZ::BI__builtin_s390_vfidb:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) ||
+ SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
+ case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break;
+ case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break;
+ case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break;
+ case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break;
+ case SystemZ::BI__builtin_s390_vstrcb:
+ case SystemZ::BI__builtin_s390_vstrch:
+ case SystemZ::BI__builtin_s390_vstrcf:
+ case SystemZ::BI__builtin_s390_vstrczb:
+ case SystemZ::BI__builtin_s390_vstrczh:
+ case SystemZ::BI__builtin_s390_vstrczf:
+ case SystemZ::BI__builtin_s390_vstrcbs:
+ case SystemZ::BI__builtin_s390_vstrchs:
+ case SystemZ::BI__builtin_s390_vstrcfs:
+ case SystemZ::BI__builtin_s390_vstrczbs:
+ case SystemZ::BI__builtin_s390_vstrczhs:
+ case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break;
+ }
+ return SemaBuiltinConstantArgRange(TheCall, i, l, u);
+}
+
+/// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *).
+/// This checks that the target supports __builtin_cpu_supports and
+/// that the string argument is constant and valid.
+static bool SemaBuiltinCpuSupports(Sema &S, CallExpr *TheCall) {
+ Expr *Arg = TheCall->getArg(0);
+
+ // Check if the argument is a string literal.
+ if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
+ return S.Diag(TheCall->getLocStart(), diag::err_expr_not_string_literal)
+ << Arg->getSourceRange();
+
+ // Check the contents of the string.
+ StringRef Feature =
+ cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
+ if (!S.Context.getTargetInfo().validateCpuSupports(Feature))
+ return S.Diag(TheCall->getLocStart(), diag::err_invalid_cpu_supports)
+ << Arg->getSourceRange();
+ return false;
+}
+
+bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+ unsigned i = 0, l = 0, u = 0;
+ switch (BuiltinID) {
+ default: return false;
+ case X86::BI__builtin_cpu_supports:
+ return SemaBuiltinCpuSupports(*this, TheCall);
+ case X86::BI__builtin_ms_va_start:
+ return SemaBuiltinMSVAStart(TheCall);
+ case X86::BI_mm_prefetch: i = 1; l = 0; u = 3; break;
+ case X86::BI__builtin_ia32_sha1rnds4: i = 2, l = 0; u = 3; break;
+ case X86::BI__builtin_ia32_vpermil2pd:
+ case X86::BI__builtin_ia32_vpermil2pd256:
+ case X86::BI__builtin_ia32_vpermil2ps:
+ case X86::BI__builtin_ia32_vpermil2ps256: i = 3, l = 0; u = 3; break;
+ case X86::BI__builtin_ia32_cmpb128_mask:
+ case X86::BI__builtin_ia32_cmpw128_mask:
+ case X86::BI__builtin_ia32_cmpd128_mask:
+ case X86::BI__builtin_ia32_cmpq128_mask:
+ case X86::BI__builtin_ia32_cmpb256_mask:
+ case X86::BI__builtin_ia32_cmpw256_mask:
+ case X86::BI__builtin_ia32_cmpd256_mask:
+ case X86::BI__builtin_ia32_cmpq256_mask:
+ case X86::BI__builtin_ia32_cmpb512_mask:
+ case X86::BI__builtin_ia32_cmpw512_mask:
+ case X86::BI__builtin_ia32_cmpd512_mask:
+ case X86::BI__builtin_ia32_cmpq512_mask:
+ case X86::BI__builtin_ia32_ucmpb128_mask:
+ case X86::BI__builtin_ia32_ucmpw128_mask:
+ case X86::BI__builtin_ia32_ucmpd128_mask:
+ case X86::BI__builtin_ia32_ucmpq128_mask:
+ case X86::BI__builtin_ia32_ucmpb256_mask:
+ case X86::BI__builtin_ia32_ucmpw256_mask:
+ case X86::BI__builtin_ia32_ucmpd256_mask:
+ case X86::BI__builtin_ia32_ucmpq256_mask:
+ case X86::BI__builtin_ia32_ucmpb512_mask:
+ case X86::BI__builtin_ia32_ucmpw512_mask:
+ case X86::BI__builtin_ia32_ucmpd512_mask:
+ case X86::BI__builtin_ia32_ucmpq512_mask: i = 2; l = 0; u = 7; break;
+ case X86::BI__builtin_ia32_roundps:
+ case X86::BI__builtin_ia32_roundpd:
+ case X86::BI__builtin_ia32_roundps256:
+ case X86::BI__builtin_ia32_roundpd256: i = 1, l = 0; u = 15; break;
+ case X86::BI__builtin_ia32_roundss:
+ case X86::BI__builtin_ia32_roundsd: i = 2, l = 0; u = 15; break;
+ case X86::BI__builtin_ia32_cmpps:
+ case X86::BI__builtin_ia32_cmpss:
+ case X86::BI__builtin_ia32_cmppd:
+ case X86::BI__builtin_ia32_cmpsd:
+ case X86::BI__builtin_ia32_cmpps256:
+ case X86::BI__builtin_ia32_cmppd256:
+ case X86::BI__builtin_ia32_cmpps512_mask:
+ case X86::BI__builtin_ia32_cmppd512_mask: i = 2; l = 0; u = 31; break;
+ case X86::BI__builtin_ia32_vpcomub:
+ case X86::BI__builtin_ia32_vpcomuw:
+ case X86::BI__builtin_ia32_vpcomud:
+ case X86::BI__builtin_ia32_vpcomuq:
+ case X86::BI__builtin_ia32_vpcomb:
+ case X86::BI__builtin_ia32_vpcomw:
+ case X86::BI__builtin_ia32_vpcomd:
+ case X86::BI__builtin_ia32_vpcomq: i = 2; l = 0; u = 7; break;
+ }
+ return SemaBuiltinConstantArgRange(TheCall, i, l, u);
+}
+
+/// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo
+/// parameter with the FormatAttr's correct format_idx and firstDataArg.
+/// Returns true when the format fits the function and the FormatStringInfo has
+/// been populated.
+bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
+ FormatStringInfo *FSI) {
+ FSI->HasVAListArg = Format->getFirstArg() == 0;
+ FSI->FormatIdx = Format->getFormatIdx() - 1;
+ FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1;
+
+ // The way the format attribute works in GCC, the implicit this argument
+ // of member functions is counted. However, it doesn't appear in our own
+ // lists, so decrement format_idx in that case.
+ if (IsCXXMember) {
+ if(FSI->FormatIdx == 0)
+ return false;
+ --FSI->FormatIdx;
+ if (FSI->FirstDataArg != 0)
+ --FSI->FirstDataArg;
+ }
+ return true;
+}
+
+/// Checks if a the given expression evaluates to null.
+///
+/// \brief Returns true if the value evaluates to null.
+static bool CheckNonNullExpr(Sema &S, const Expr *Expr) {
+ // If the expression has non-null type, it doesn't evaluate to null.
+ if (auto nullability
+ = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) {
+ if (*nullability == NullabilityKind::NonNull)
+ return false;
+ }
+
+ // As a special case, transparent unions initialized with zero are
+ // considered null for the purposes of the nonnull attribute.
+ if (const RecordType *UT = Expr->getType()->getAsUnionType()) {
+ if (UT->getDecl()->hasAttr<TransparentUnionAttr>())
+ if (const CompoundLiteralExpr *CLE =
+ dyn_cast<CompoundLiteralExpr>(Expr))
+ if (const InitListExpr *ILE =
+ dyn_cast<InitListExpr>(CLE->getInitializer()))
+ Expr = ILE->getInit(0);
+ }
+
+ bool Result;
+ return (!Expr->isValueDependent() &&
+ Expr->EvaluateAsBooleanCondition(Result, S.Context) &&
+ !Result);
+}
+
+static void CheckNonNullArgument(Sema &S,
+ const Expr *ArgExpr,
+ SourceLocation CallSiteLoc) {
+ if (CheckNonNullExpr(S, ArgExpr))
+ S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr,
+ S.PDiag(diag::warn_null_arg) << ArgExpr->getSourceRange());
+}
+
+bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) {
+ FormatStringInfo FSI;
+ if ((GetFormatStringType(Format) == FST_NSString) &&
+ getFormatStringInfo(Format, false, &FSI)) {
+ Idx = FSI.FormatIdx;
+ return true;
+ }
+ return false;
+}
+/// \brief Diagnose use of %s directive in an NSString which is being passed
+/// as formatting string to formatting method.
+static void
+DiagnoseCStringFormatDirectiveInCFAPI(Sema &S,
+ const NamedDecl *FDecl,
+ Expr **Args,
+ unsigned NumArgs) {
+ unsigned Idx = 0;
+ bool Format = false;
+ ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily();
+ if (SFFamily == ObjCStringFormatFamily::SFF_CFString) {
+ Idx = 2;
+ Format = true;
+ }
+ else
+ for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
+ if (S.GetFormatNSStringIdx(I, Idx)) {
+ Format = true;
+ break;
+ }
+ }
+ if (!Format || NumArgs <= Idx)
+ return;
+ const Expr *FormatExpr = Args[Idx];
+ if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr))
+ FormatExpr = CSCE->getSubExpr();
+ const StringLiteral *FormatString;
+ if (const ObjCStringLiteral *OSL =
+ dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts()))
+ FormatString = OSL->getString();
+ else
+ FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts());
+ if (!FormatString)
+ return;
+ if (S.FormatStringHasSArg(FormatString)) {
+ S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string)
+ << "%s" << 1 << 1;
+ S.Diag(FDecl->getLocation(), diag::note_entity_declared_at)
+ << FDecl->getDeclName();
+ }
+}
+
+/// Determine whether the given type has a non-null nullability annotation.
+static bool isNonNullType(ASTContext &ctx, QualType type) {
+ if (auto nullability = type->getNullability(ctx))
+ return *nullability == NullabilityKind::NonNull;
+
+ return false;
+}
+
+static void CheckNonNullArguments(Sema &S,
+ const NamedDecl *FDecl,
+ const FunctionProtoType *Proto,
+ ArrayRef<const Expr *> Args,
+ SourceLocation CallSiteLoc) {
+ assert((FDecl || Proto) && "Need a function declaration or prototype");
+
+ // Check the attributes attached to the method/function itself.
+ llvm::SmallBitVector NonNullArgs;
+ if (FDecl) {
+ // Handle the nonnull attribute on the function/method declaration itself.
+ for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) {
+ if (!NonNull->args_size()) {
+ // Easy case: all pointer arguments are nonnull.
+ for (const auto *Arg : Args)
+ if (S.isValidPointerAttrType(Arg->getType()))
+ CheckNonNullArgument(S, Arg, CallSiteLoc);
+ return;
+ }
+
+ for (unsigned Val : NonNull->args()) {
+ if (Val >= Args.size())
+ continue;
+ if (NonNullArgs.empty())
+ NonNullArgs.resize(Args.size());
+ NonNullArgs.set(Val);
+ }
+ }
+ }
+
+ if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) {
+ // Handle the nonnull attribute on the parameters of the
+ // function/method.
+ ArrayRef<ParmVarDecl*> parms;
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl))
+ parms = FD->parameters();
+ else
+ parms = cast<ObjCMethodDecl>(FDecl)->parameters();
+
+ unsigned ParamIndex = 0;
+ for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end();
+ I != E; ++I, ++ParamIndex) {
+ const ParmVarDecl *PVD = *I;
+ if (PVD->hasAttr<NonNullAttr>() ||
+ isNonNullType(S.Context, PVD->getType())) {
+ if (NonNullArgs.empty())
+ NonNullArgs.resize(Args.size());
+
+ NonNullArgs.set(ParamIndex);
+ }
+ }
+ } else {
+ // If we have a non-function, non-method declaration but no
+ // function prototype, try to dig out the function prototype.
+ if (!Proto) {
+ if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) {
+ QualType type = VD->getType().getNonReferenceType();
+ if (auto pointerType = type->getAs<PointerType>())
+ type = pointerType->getPointeeType();
+ else if (auto blockType = type->getAs<BlockPointerType>())
+ type = blockType->getPointeeType();
+ // FIXME: data member pointers?
+
+ // Dig out the function prototype, if there is one.
+ Proto = type->getAs<FunctionProtoType>();
+ }
+ }
+
+ // Fill in non-null argument information from the nullability
+ // information on the parameter types (if we have them).
+ if (Proto) {
+ unsigned Index = 0;
+ for (auto paramType : Proto->getParamTypes()) {
+ if (isNonNullType(S.Context, paramType)) {
+ if (NonNullArgs.empty())
+ NonNullArgs.resize(Args.size());
+
+ NonNullArgs.set(Index);
+ }
+
+ ++Index;
+ }
+ }
+ }
+
+ // Check for non-null arguments.
+ for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size();
+ ArgIndex != ArgIndexEnd; ++ArgIndex) {
+ if (NonNullArgs[ArgIndex])
+ CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc);
+ }
+}
+
+/// Handles the checks for format strings, non-POD arguments to vararg
+/// functions, and NULL arguments passed to non-NULL parameters.
+void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
+ ArrayRef<const Expr *> Args, bool IsMemberFunction,
+ SourceLocation Loc, SourceRange Range,
+ VariadicCallType CallType) {
+ // FIXME: We should check as much as we can in the template definition.
+ if (CurContext->isDependentContext())
+ return;
+
+ // Printf and scanf checking.
+ llvm::SmallBitVector CheckedVarArgs;
+ if (FDecl) {
+ for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
+ // Only create vector if there are format attributes.
+ CheckedVarArgs.resize(Args.size());
+
+ CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range,
+ CheckedVarArgs);
+ }
+ }
+
+ // Refuse POD arguments that weren't caught by the format string
+ // checks above.
+ if (CallType != VariadicDoesNotApply) {
+ unsigned NumParams = Proto ? Proto->getNumParams()
+ : FDecl && isa<FunctionDecl>(FDecl)
+ ? cast<FunctionDecl>(FDecl)->getNumParams()
+ : FDecl && isa<ObjCMethodDecl>(FDecl)
+ ? cast<ObjCMethodDecl>(FDecl)->param_size()
+ : 0;
+
+ for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) {
+ // Args[ArgIdx] can be null in malformed code.
+ if (const Expr *Arg = Args[ArgIdx]) {
+ if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx])
+ checkVariadicArgument(Arg, CallType);
+ }
+ }
+ }
+
+ if (FDecl || Proto) {
+ CheckNonNullArguments(*this, FDecl, Proto, Args, Loc);
+
+ // Type safety checking.
+ if (FDecl) {
+ for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>())
+ CheckArgumentWithTypeTag(I, Args.data());
+ }
+ }
+}
+
+/// CheckConstructorCall - Check a constructor call for correctness and safety
+/// properties not enforced by the C type system.
+void Sema::CheckConstructorCall(FunctionDecl *FDecl,
+ ArrayRef<const Expr *> Args,
+ const FunctionProtoType *Proto,
+ SourceLocation Loc) {
+ VariadicCallType CallType =
+ Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply;
+ checkCall(FDecl, Proto, Args, /*IsMemberFunction=*/true, Loc, SourceRange(),
+ CallType);
+}
+
+/// CheckFunctionCall - Check a direct function call for various correctness
+/// and safety properties not strictly enforced by the C type system.
+bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
+ const FunctionProtoType *Proto) {
+ bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) &&
+ isa<CXXMethodDecl>(FDecl);
+ bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) ||
+ IsMemberOperatorCall;
+ VariadicCallType CallType = getVariadicCallType(FDecl, Proto,
+ TheCall->getCallee());
+ Expr** Args = TheCall->getArgs();
+ unsigned NumArgs = TheCall->getNumArgs();
+ if (IsMemberOperatorCall) {
+ // If this is a call to a member operator, hide the first argument
+ // from checkCall.
+ // FIXME: Our choice of AST representation here is less than ideal.
+ ++Args;
+ --NumArgs;
+ }
+ checkCall(FDecl, Proto, llvm::makeArrayRef(Args, NumArgs),
+ IsMemberFunction, TheCall->getRParenLoc(),
+ TheCall->getCallee()->getSourceRange(), CallType);
+
+ IdentifierInfo *FnInfo = FDecl->getIdentifier();
+ // None of the checks below are needed for functions that don't have
+ // simple names (e.g., C++ conversion functions).
+ if (!FnInfo)
+ return false;
+
+ CheckAbsoluteValueFunction(TheCall, FDecl, FnInfo);
+ if (getLangOpts().ObjC1)
+ DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs);
+
+ unsigned CMId = FDecl->getMemoryFunctionKind();
+ if (CMId == 0)
+ return false;
+
+ // Handle memory setting and copying functions.
+ if (CMId == Builtin::BIstrlcpy || CMId == Builtin::BIstrlcat)
+ CheckStrlcpycatArguments(TheCall, FnInfo);
+ else if (CMId == Builtin::BIstrncat)
+ CheckStrncatArguments(TheCall, FnInfo);
+ else
+ CheckMemaccessArguments(TheCall, CMId, FnInfo);
+
+ return false;
+}
+
+bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac,
+ ArrayRef<const Expr *> Args) {
+ VariadicCallType CallType =
+ Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply;
+
+ checkCall(Method, nullptr, Args,
+ /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(),
+ CallType);
+
+ return false;
+}
+
+bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
+ const FunctionProtoType *Proto) {
+ QualType Ty;
+ if (const auto *V = dyn_cast<VarDecl>(NDecl))
+ Ty = V->getType().getNonReferenceType();
+ else if (const auto *F = dyn_cast<FieldDecl>(NDecl))
+ Ty = F->getType().getNonReferenceType();
+ else
+ return false;
+
+ if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() &&
+ !Ty->isFunctionProtoType())
+ return false;
+
+ VariadicCallType CallType;
+ if (!Proto || !Proto->isVariadic()) {
+ CallType = VariadicDoesNotApply;
+ } else if (Ty->isBlockPointerType()) {
+ CallType = VariadicBlock;
+ } else { // Ty->isFunctionPointerType()
+ CallType = VariadicFunction;
+ }
+
+ checkCall(NDecl, Proto,
+ llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
+ /*IsMemberFunction=*/false, TheCall->getRParenLoc(),
+ TheCall->getCallee()->getSourceRange(), CallType);
+
+ return false;
+}
+
+/// Checks function calls when a FunctionDecl or a NamedDecl is not available,
+/// such as function pointers returned from functions.
+bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) {
+ VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto,
+ TheCall->getCallee());
+ checkCall(/*FDecl=*/nullptr, Proto,
+ llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
+ /*IsMemberFunction=*/false, TheCall->getRParenLoc(),
+ TheCall->getCallee()->getSourceRange(), CallType);
+
+ return false;
+}
+
+static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) {
+ if (Ordering < AtomicExpr::AO_ABI_memory_order_relaxed ||
+ Ordering > AtomicExpr::AO_ABI_memory_order_seq_cst)
+ return false;
+
+ switch (Op) {
+ case AtomicExpr::AO__c11_atomic_init:
+ llvm_unreachable("There is no ordering argument for an init");
+
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
+ case AtomicExpr::AO__atomic_load:
+ return Ordering != AtomicExpr::AO_ABI_memory_order_release &&
+ Ordering != AtomicExpr::AO_ABI_memory_order_acq_rel;
+
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__atomic_store:
+ case AtomicExpr::AO__atomic_store_n:
+ return Ordering != AtomicExpr::AO_ABI_memory_order_consume &&
+ Ordering != AtomicExpr::AO_ABI_memory_order_acquire &&
+ Ordering != AtomicExpr::AO_ABI_memory_order_acq_rel;
+
+ default:
+ return true;
+ }
+}
+
+ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
+ AtomicExpr::AtomicOp Op) {
+ CallExpr *TheCall = cast<CallExpr>(TheCallResult.get());
+ DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
+
+ // All these operations take one of the following forms:
+ enum {
+ // C __c11_atomic_init(A *, C)
+ Init,
+ // C __c11_atomic_load(A *, int)
+ Load,
+ // void __atomic_load(A *, CP, int)
+ Copy,
+ // C __c11_atomic_add(A *, M, int)
+ Arithmetic,
+ // C __atomic_exchange_n(A *, CP, int)
+ Xchg,
+ // void __atomic_exchange(A *, C *, CP, int)
+ GNUXchg,
+ // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int)
+ C11CmpXchg,
+ // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int)
+ GNUCmpXchg
+ } Form = Init;
+ const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 4, 5, 6 };
+ const unsigned NumVals[] = { 1, 0, 1, 1, 1, 2, 2, 3 };
+ // where:
+ // C is an appropriate type,
+ // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins,
+ // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise,
+ // M is C if C is an integer, and ptrdiff_t if C is a pointer, and
+ // the int parameters are for orderings.
+
+ static_assert(AtomicExpr::AO__c11_atomic_init == 0 &&
+ AtomicExpr::AO__c11_atomic_fetch_xor + 1 ==
+ AtomicExpr::AO__atomic_load,
+ "need to update code for modified C11 atomics");
+ bool IsC11 = Op >= AtomicExpr::AO__c11_atomic_init &&
+ Op <= AtomicExpr::AO__c11_atomic_fetch_xor;
+ bool IsN = Op == AtomicExpr::AO__atomic_load_n ||
+ Op == AtomicExpr::AO__atomic_store_n ||
+ Op == AtomicExpr::AO__atomic_exchange_n ||
+ Op == AtomicExpr::AO__atomic_compare_exchange_n;
+ bool IsAddSub = false;
+
+ switch (Op) {
+ case AtomicExpr::AO__c11_atomic_init:
+ Form = Init;
+ break;
+
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
+ Form = Load;
+ break;
+
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__atomic_load:
+ case AtomicExpr::AO__atomic_store:
+ case AtomicExpr::AO__atomic_store_n:
+ Form = Copy;
+ break;
+
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_add_fetch:
+ case AtomicExpr::AO__atomic_sub_fetch:
+ IsAddSub = true;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_nand:
+ case AtomicExpr::AO__atomic_and_fetch:
+ case AtomicExpr::AO__atomic_or_fetch:
+ case AtomicExpr::AO__atomic_xor_fetch:
+ case AtomicExpr::AO__atomic_nand_fetch:
+ Form = Arithmetic;
+ break;
+
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__atomic_exchange_n:
+ Form = Xchg;
+ break;
+
+ case AtomicExpr::AO__atomic_exchange:
+ Form = GNUXchg;
+ break;
+
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ Form = C11CmpXchg;
+ break;
+
+ case AtomicExpr::AO__atomic_compare_exchange:
+ case AtomicExpr::AO__atomic_compare_exchange_n:
+ Form = GNUCmpXchg;
+ break;
+ }
+
+ // Check we have the right number of arguments.
+ if (TheCall->getNumArgs() < NumArgs[Form]) {
+ Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
+ << 0 << NumArgs[Form] << TheCall->getNumArgs()
+ << TheCall->getCallee()->getSourceRange();
+ return ExprError();
+ } else if (TheCall->getNumArgs() > NumArgs[Form]) {
+ Diag(TheCall->getArg(NumArgs[Form])->getLocStart(),
+ diag::err_typecheck_call_too_many_args)
+ << 0 << NumArgs[Form] << TheCall->getNumArgs()
+ << TheCall->getCallee()->getSourceRange();
+ return ExprError();
+ }
+
+ // Inspect the first argument of the atomic operation.
+ Expr *Ptr = TheCall->getArg(0);
+ Ptr = DefaultFunctionArrayLvalueConversion(Ptr).get();
+ const PointerType *pointerType = Ptr->getType()->getAs<PointerType>();
+ if (!pointerType) {
+ Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer)
+ << Ptr->getType() << Ptr->getSourceRange();
+ return ExprError();
+ }
+
+ // For a __c11 builtin, this should be a pointer to an _Atomic type.
+ QualType AtomTy = pointerType->getPointeeType(); // 'A'
+ QualType ValType = AtomTy; // 'C'
+ if (IsC11) {
+ if (!AtomTy->isAtomicType()) {
+ Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic)
+ << Ptr->getType() << Ptr->getSourceRange();
+ return ExprError();
+ }
+ if (AtomTy.isConstQualified()) {
+ Diag(DRE->getLocStart(), diag::err_atomic_op_needs_non_const_atomic)
+ << Ptr->getType() << Ptr->getSourceRange();
+ return ExprError();
+ }
+ ValType = AtomTy->getAs<AtomicType>()->getValueType();
+ } else if (Form != Load && Op != AtomicExpr::AO__atomic_load) {
+ if (ValType.isConstQualified()) {
+ Diag(DRE->getLocStart(), diag::err_atomic_op_needs_non_const_pointer)
+ << Ptr->getType() << Ptr->getSourceRange();
+ return ExprError();
+ }
+ }
+
+ // For an arithmetic operation, the implied arithmetic must be well-formed.
+ if (Form == Arithmetic) {
+ // gcc does not enforce these rules for GNU atomics, but we do so for sanity.
+ if (IsAddSub && !ValType->isIntegerType() && !ValType->isPointerType()) {
+ Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic_int_or_ptr)
+ << IsC11 << Ptr->getType() << Ptr->getSourceRange();
+ return ExprError();
+ }
+ if (!IsAddSub && !ValType->isIntegerType()) {
+ Diag(DRE->getLocStart(), diag::err_atomic_op_bitwise_needs_atomic_int)
+ << IsC11 << Ptr->getType() << Ptr->getSourceRange();
+ return ExprError();
+ }
+ if (IsC11 && ValType->isPointerType() &&
+ RequireCompleteType(Ptr->getLocStart(), ValType->getPointeeType(),
+ diag::err_incomplete_type)) {
+ return ExprError();
+ }
+ } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) {
+ // For __atomic_*_n operations, the value type must be a scalar integral or
+ // pointer type which is 1, 2, 4, 8 or 16 bytes in length.
+ Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic_int_or_ptr)
+ << IsC11 << Ptr->getType() << Ptr->getSourceRange();
+ return ExprError();
+ }
+
+ if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) &&
+ !AtomTy->isScalarType()) {
+ // For GNU atomics, require a trivially-copyable type. This is not part of
+ // the GNU atomics specification, but we enforce it for sanity.
+ Diag(DRE->getLocStart(), diag::err_atomic_op_needs_trivial_copy)
+ << Ptr->getType() << Ptr->getSourceRange();
+ return ExprError();
+ }
+
+ switch (ValType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ // okay
+ break;
+
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Autoreleasing:
+ // FIXME: Can this happen? By this point, ValType should be known
+ // to be trivially copyable.
+ Diag(DRE->getLocStart(), diag::err_arc_atomic_ownership)
+ << ValType << Ptr->getSourceRange();
+ return ExprError();
+ }
+
+ // atomic_fetch_or takes a pointer to a volatile 'A'. We shouldn't let the
+ // volatile-ness of the pointee-type inject itself into the result or the
+ // other operands.
+ ValType.removeLocalVolatile();
+ QualType ResultType = ValType;
+ if (Form == Copy || Form == GNUXchg || Form == Init)
+ ResultType = Context.VoidTy;
+ else if (Form == C11CmpXchg || Form == GNUCmpXchg)
+ ResultType = Context.BoolTy;
+
+ // The type of a parameter passed 'by value'. In the GNU atomics, such
+ // arguments are actually passed as pointers.
+ QualType ByValType = ValType; // 'CP'
+ if (!IsC11 && !IsN)
+ ByValType = Ptr->getType();
+
+ // FIXME: __atomic_load allows the first argument to be a a pointer to const
+ // but not the second argument. We need to manually remove possible const
+ // qualifiers.
+
+ // The first argument --- the pointer --- has a fixed type; we
+ // deduce the types of the rest of the arguments accordingly. Walk
+ // the remaining arguments, converting them to the deduced value type.
+ for (unsigned i = 1; i != NumArgs[Form]; ++i) {
+ QualType Ty;
+ if (i < NumVals[Form] + 1) {
+ switch (i) {
+ case 1:
+ // The second argument is the non-atomic operand. For arithmetic, this
+ // is always passed by value, and for a compare_exchange it is always
+ // passed by address. For the rest, GNU uses by-address and C11 uses
+ // by-value.
+ assert(Form != Load);
+ if (Form == Init || (Form == Arithmetic && ValType->isIntegerType()))
+ Ty = ValType;
+ else if (Form == Copy || Form == Xchg)
+ Ty = ByValType;
+ else if (Form == Arithmetic)
+ Ty = Context.getPointerDiffType();
+ else {
+ Expr *ValArg = TheCall->getArg(i);
+ unsigned AS = 0;
+ // Keep address space of non-atomic pointer type.
+ if (const PointerType *PtrTy =
+ ValArg->getType()->getAs<PointerType>()) {
+ AS = PtrTy->getPointeeType().getAddressSpace();
+ }
+ Ty = Context.getPointerType(
+ Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS));
+ }
+ break;
+ case 2:
+ // The third argument to compare_exchange / GNU exchange is a
+ // (pointer to a) desired value.
+ Ty = ByValType;
+ break;
+ case 3:
+ // The fourth argument to GNU compare_exchange is a 'weak' flag.
+ Ty = Context.BoolTy;
+ break;
+ }
+ } else {
+ // The order(s) are always converted to int.
+ Ty = Context.IntTy;
+ }
+
+ InitializedEntity Entity =
+ InitializedEntity::InitializeParameter(Context, Ty, false);
+ ExprResult Arg = TheCall->getArg(i);
+ Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
+ if (Arg.isInvalid())
+ return true;
+ TheCall->setArg(i, Arg.get());
+ }
+
+ // Permute the arguments into a 'consistent' order.
+ SmallVector<Expr*, 5> SubExprs;
+ SubExprs.push_back(Ptr);
+ switch (Form) {
+ case Init:
+ // Note, AtomicExpr::getVal1() has a special case for this atomic.
+ SubExprs.push_back(TheCall->getArg(1)); // Val1
+ break;
+ case Load:
+ SubExprs.push_back(TheCall->getArg(1)); // Order
+ break;
+ case Copy:
+ case Arithmetic:
+ case Xchg:
+ SubExprs.push_back(TheCall->getArg(2)); // Order
+ SubExprs.push_back(TheCall->getArg(1)); // Val1
+ break;
+ case GNUXchg:
+ // Note, AtomicExpr::getVal2() has a special case for this atomic.
+ SubExprs.push_back(TheCall->getArg(3)); // Order
+ SubExprs.push_back(TheCall->getArg(1)); // Val1
+ SubExprs.push_back(TheCall->getArg(2)); // Val2
+ break;
+ case C11CmpXchg:
+ SubExprs.push_back(TheCall->getArg(3)); // Order
+ SubExprs.push_back(TheCall->getArg(1)); // Val1
+ SubExprs.push_back(TheCall->getArg(4)); // OrderFail
+ SubExprs.push_back(TheCall->getArg(2)); // Val2
+ break;
+ case GNUCmpXchg:
+ SubExprs.push_back(TheCall->getArg(4)); // Order
+ SubExprs.push_back(TheCall->getArg(1)); // Val1
+ SubExprs.push_back(TheCall->getArg(5)); // OrderFail
+ SubExprs.push_back(TheCall->getArg(2)); // Val2
+ SubExprs.push_back(TheCall->getArg(3)); // Weak
+ break;
+ }
+
+ if (SubExprs.size() >= 2 && Form != Init) {
+ llvm::APSInt Result(32);
+ if (SubExprs[1]->isIntegerConstantExpr(Result, Context) &&
+ !isValidOrderingForOp(Result.getSExtValue(), Op))
+ Diag(SubExprs[1]->getLocStart(),
+ diag::warn_atomic_op_has_invalid_memory_order)
+ << SubExprs[1]->getSourceRange();
+ }
+
+ AtomicExpr *AE = new (Context) AtomicExpr(TheCall->getCallee()->getLocStart(),
+ SubExprs, ResultType, Op,
+ TheCall->getRParenLoc());
+
+ if ((Op == AtomicExpr::AO__c11_atomic_load ||
+ (Op == AtomicExpr::AO__c11_atomic_store)) &&
+ Context.AtomicUsesUnsupportedLibcall(AE))
+ Diag(AE->getLocStart(), diag::err_atomic_load_store_uses_lib) <<
+ ((Op == AtomicExpr::AO__c11_atomic_load) ? 0 : 1);
+
+ return AE;
+}
+
+
+/// checkBuiltinArgument - Given a call to a builtin function, perform
+/// normal type-checking on the given argument, updating the call in
+/// place. This is useful when a builtin function requires custom
+/// type-checking for some of its arguments but not necessarily all of
+/// them.
+///
+/// Returns true on error.
+static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) {
+ FunctionDecl *Fn = E->getDirectCallee();
+ assert(Fn && "builtin call without direct callee!");
+
+ ParmVarDecl *Param = Fn->getParamDecl(ArgIndex);
+ InitializedEntity Entity =
+ InitializedEntity::InitializeParameter(S.Context, Param);
+
+ ExprResult Arg = E->getArg(0);
+ Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
+ if (Arg.isInvalid())
+ return true;
+
+ E->setArg(ArgIndex, Arg.get());
+ return false;
+}
+
+/// SemaBuiltinAtomicOverloaded - We have a call to a function like
+/// __sync_fetch_and_add, which is an overloaded function based on the pointer
+/// type of its first argument. The main ActOnCallExpr routines have already
+/// promoted the types of arguments because all of these calls are prototyped as
+/// void(...).
+///
+/// This function goes through and does final semantic checking for these
+/// builtins,
+ExprResult
+Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
+ CallExpr *TheCall = (CallExpr *)TheCallResult.get();
+ DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
+ FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
+
+ // Ensure that we have at least one argument to do type inference from.
+ if (TheCall->getNumArgs() < 1) {
+ Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args_at_least)
+ << 0 << 1 << TheCall->getNumArgs()
+ << TheCall->getCallee()->getSourceRange();
+ return ExprError();
+ }
+
+ // Inspect the first argument of the atomic builtin. This should always be
+ // a pointer type, whose element is an integral scalar or pointer type.
+ // Because it is a pointer type, we don't have to worry about any implicit
+ // casts here.
+ // FIXME: We don't allow floating point scalars as input.
+ Expr *FirstArg = TheCall->getArg(0);
+ ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg);
+ if (FirstArgResult.isInvalid())
+ return ExprError();
+ FirstArg = FirstArgResult.get();
+ TheCall->setArg(0, FirstArg);
+
+ const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>();
+ if (!pointerType) {
+ Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer)
+ << FirstArg->getType() << FirstArg->getSourceRange();
+ return ExprError();
+ }
+
+ QualType ValType = pointerType->getPointeeType();
+ if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
+ !ValType->isBlockPointerType()) {
+ Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer_intptr)
+ << FirstArg->getType() << FirstArg->getSourceRange();
+ return ExprError();
+ }
+
+ switch (ValType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ // okay
+ break;
+
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Autoreleasing:
+ Diag(DRE->getLocStart(), diag::err_arc_atomic_ownership)
+ << ValType << FirstArg->getSourceRange();
+ return ExprError();
+ }
+
+ // Strip any qualifiers off ValType.
+ ValType = ValType.getUnqualifiedType();
+
+ // The majority of builtins return a value, but a few have special return
+ // types, so allow them to override appropriately below.
+ QualType ResultType = ValType;
+
+ // We need to figure out which concrete builtin this maps onto. For example,
+ // __sync_fetch_and_add with a 2 byte object turns into
+ // __sync_fetch_and_add_2.
+#define BUILTIN_ROW(x) \
+ { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \
+ Builtin::BI##x##_8, Builtin::BI##x##_16 }
+
+ static const unsigned BuiltinIndices[][5] = {
+ BUILTIN_ROW(__sync_fetch_and_add),
+ BUILTIN_ROW(__sync_fetch_and_sub),
+ BUILTIN_ROW(__sync_fetch_and_or),
+ BUILTIN_ROW(__sync_fetch_and_and),
+ BUILTIN_ROW(__sync_fetch_and_xor),
+ BUILTIN_ROW(__sync_fetch_and_nand),
+
+ BUILTIN_ROW(__sync_add_and_fetch),
+ BUILTIN_ROW(__sync_sub_and_fetch),
+ BUILTIN_ROW(__sync_and_and_fetch),
+ BUILTIN_ROW(__sync_or_and_fetch),
+ BUILTIN_ROW(__sync_xor_and_fetch),
+ BUILTIN_ROW(__sync_nand_and_fetch),
+
+ BUILTIN_ROW(__sync_val_compare_and_swap),
+ BUILTIN_ROW(__sync_bool_compare_and_swap),
+ BUILTIN_ROW(__sync_lock_test_and_set),
+ BUILTIN_ROW(__sync_lock_release),
+ BUILTIN_ROW(__sync_swap)
+ };
+#undef BUILTIN_ROW
+
+ // Determine the index of the size.
+ unsigned SizeIndex;
+ switch (Context.getTypeSizeInChars(ValType).getQuantity()) {
+ case 1: SizeIndex = 0; break;
+ case 2: SizeIndex = 1; break;
+ case 4: SizeIndex = 2; break;
+ case 8: SizeIndex = 3; break;
+ case 16: SizeIndex = 4; break;
+ default:
+ Diag(DRE->getLocStart(), diag::err_atomic_builtin_pointer_size)
+ << FirstArg->getType() << FirstArg->getSourceRange();
+ return ExprError();
+ }
+
+ // Each of these builtins has one pointer argument, followed by some number of
+ // values (0, 1 or 2) followed by a potentially empty varags list of stuff
+ // that we ignore. Find out which row of BuiltinIndices to read from as well
+ // as the number of fixed args.
+ unsigned BuiltinID = FDecl->getBuiltinID();
+ unsigned BuiltinIndex, NumFixed = 1;
+ bool WarnAboutSemanticsChange = false;
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unknown overloaded atomic builtin!");
+ case Builtin::BI__sync_fetch_and_add:
+ case Builtin::BI__sync_fetch_and_add_1:
+ case Builtin::BI__sync_fetch_and_add_2:
+ case Builtin::BI__sync_fetch_and_add_4:
+ case Builtin::BI__sync_fetch_and_add_8:
+ case Builtin::BI__sync_fetch_and_add_16:
+ BuiltinIndex = 0;
+ break;
+
+ case Builtin::BI__sync_fetch_and_sub:
+ case Builtin::BI__sync_fetch_and_sub_1:
+ case Builtin::BI__sync_fetch_and_sub_2:
+ case Builtin::BI__sync_fetch_and_sub_4:
+ case Builtin::BI__sync_fetch_and_sub_8:
+ case Builtin::BI__sync_fetch_and_sub_16:
+ BuiltinIndex = 1;
+ break;
+
+ case Builtin::BI__sync_fetch_and_or:
+ case Builtin::BI__sync_fetch_and_or_1:
+ case Builtin::BI__sync_fetch_and_or_2:
+ case Builtin::BI__sync_fetch_and_or_4:
+ case Builtin::BI__sync_fetch_and_or_8:
+ case Builtin::BI__sync_fetch_and_or_16:
+ BuiltinIndex = 2;
+ break;
+
+ case Builtin::BI__sync_fetch_and_and:
+ case Builtin::BI__sync_fetch_and_and_1:
+ case Builtin::BI__sync_fetch_and_and_2:
+ case Builtin::BI__sync_fetch_and_and_4:
+ case Builtin::BI__sync_fetch_and_and_8:
+ case Builtin::BI__sync_fetch_and_and_16:
+ BuiltinIndex = 3;
+ break;
+
+ case Builtin::BI__sync_fetch_and_xor:
+ case Builtin::BI__sync_fetch_and_xor_1:
+ case Builtin::BI__sync_fetch_and_xor_2:
+ case Builtin::BI__sync_fetch_and_xor_4:
+ case Builtin::BI__sync_fetch_and_xor_8:
+ case Builtin::BI__sync_fetch_and_xor_16:
+ BuiltinIndex = 4;
+ break;
+
+ case Builtin::BI__sync_fetch_and_nand:
+ case Builtin::BI__sync_fetch_and_nand_1:
+ case Builtin::BI__sync_fetch_and_nand_2:
+ case Builtin::BI__sync_fetch_and_nand_4:
+ case Builtin::BI__sync_fetch_and_nand_8:
+ case Builtin::BI__sync_fetch_and_nand_16:
+ BuiltinIndex = 5;
+ WarnAboutSemanticsChange = true;
+ break;
+
+ case Builtin::BI__sync_add_and_fetch:
+ case Builtin::BI__sync_add_and_fetch_1:
+ case Builtin::BI__sync_add_and_fetch_2:
+ case Builtin::BI__sync_add_and_fetch_4:
+ case Builtin::BI__sync_add_and_fetch_8:
+ case Builtin::BI__sync_add_and_fetch_16:
+ BuiltinIndex = 6;
+ break;
+
+ case Builtin::BI__sync_sub_and_fetch:
+ case Builtin::BI__sync_sub_and_fetch_1:
+ case Builtin::BI__sync_sub_and_fetch_2:
+ case Builtin::BI__sync_sub_and_fetch_4:
+ case Builtin::BI__sync_sub_and_fetch_8:
+ case Builtin::BI__sync_sub_and_fetch_16:
+ BuiltinIndex = 7;
+ break;
+
+ case Builtin::BI__sync_and_and_fetch:
+ case Builtin::BI__sync_and_and_fetch_1:
+ case Builtin::BI__sync_and_and_fetch_2:
+ case Builtin::BI__sync_and_and_fetch_4:
+ case Builtin::BI__sync_and_and_fetch_8:
+ case Builtin::BI__sync_and_and_fetch_16:
+ BuiltinIndex = 8;
+ break;
+
+ case Builtin::BI__sync_or_and_fetch:
+ case Builtin::BI__sync_or_and_fetch_1:
+ case Builtin::BI__sync_or_and_fetch_2:
+ case Builtin::BI__sync_or_and_fetch_4:
+ case Builtin::BI__sync_or_and_fetch_8:
+ case Builtin::BI__sync_or_and_fetch_16:
+ BuiltinIndex = 9;
+ break;
+
+ case Builtin::BI__sync_xor_and_fetch:
+ case Builtin::BI__sync_xor_and_fetch_1:
+ case Builtin::BI__sync_xor_and_fetch_2:
+ case Builtin::BI__sync_xor_and_fetch_4:
+ case Builtin::BI__sync_xor_and_fetch_8:
+ case Builtin::BI__sync_xor_and_fetch_16:
+ BuiltinIndex = 10;
+ break;
+
+ case Builtin::BI__sync_nand_and_fetch:
+ case Builtin::BI__sync_nand_and_fetch_1:
+ case Builtin::BI__sync_nand_and_fetch_2:
+ case Builtin::BI__sync_nand_and_fetch_4:
+ case Builtin::BI__sync_nand_and_fetch_8:
+ case Builtin::BI__sync_nand_and_fetch_16:
+ BuiltinIndex = 11;
+ WarnAboutSemanticsChange = true;
+ break;
+
+ case Builtin::BI__sync_val_compare_and_swap:
+ case Builtin::BI__sync_val_compare_and_swap_1:
+ case Builtin::BI__sync_val_compare_and_swap_2:
+ case Builtin::BI__sync_val_compare_and_swap_4:
+ case Builtin::BI__sync_val_compare_and_swap_8:
+ case Builtin::BI__sync_val_compare_and_swap_16:
+ BuiltinIndex = 12;
+ NumFixed = 2;
+ break;
+
+ case Builtin::BI__sync_bool_compare_and_swap:
+ case Builtin::BI__sync_bool_compare_and_swap_1:
+ case Builtin::BI__sync_bool_compare_and_swap_2:
+ case Builtin::BI__sync_bool_compare_and_swap_4:
+ case Builtin::BI__sync_bool_compare_and_swap_8:
+ case Builtin::BI__sync_bool_compare_and_swap_16:
+ BuiltinIndex = 13;
+ NumFixed = 2;
+ ResultType = Context.BoolTy;
+ break;
+
+ case Builtin::BI__sync_lock_test_and_set:
+ case Builtin::BI__sync_lock_test_and_set_1:
+ case Builtin::BI__sync_lock_test_and_set_2:
+ case Builtin::BI__sync_lock_test_and_set_4:
+ case Builtin::BI__sync_lock_test_and_set_8:
+ case Builtin::BI__sync_lock_test_and_set_16:
+ BuiltinIndex = 14;
+ break;
+
+ case Builtin::BI__sync_lock_release:
+ case Builtin::BI__sync_lock_release_1:
+ case Builtin::BI__sync_lock_release_2:
+ case Builtin::BI__sync_lock_release_4:
+ case Builtin::BI__sync_lock_release_8:
+ case Builtin::BI__sync_lock_release_16:
+ BuiltinIndex = 15;
+ NumFixed = 0;
+ ResultType = Context.VoidTy;
+ break;
+
+ case Builtin::BI__sync_swap:
+ case Builtin::BI__sync_swap_1:
+ case Builtin::BI__sync_swap_2:
+ case Builtin::BI__sync_swap_4:
+ case Builtin::BI__sync_swap_8:
+ case Builtin::BI__sync_swap_16:
+ BuiltinIndex = 16;
+ break;
+ }
+
+ // Now that we know how many fixed arguments we expect, first check that we
+ // have at least that many.
+ if (TheCall->getNumArgs() < 1+NumFixed) {
+ Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args_at_least)
+ << 0 << 1+NumFixed << TheCall->getNumArgs()
+ << TheCall->getCallee()->getSourceRange();
+ return ExprError();
+ }
+
+ if (WarnAboutSemanticsChange) {
+ Diag(TheCall->getLocEnd(), diag::warn_sync_fetch_and_nand_semantics_change)
+ << TheCall->getCallee()->getSourceRange();
+ }
+
+ // Get the decl for the concrete builtin from this, we can tell what the
+ // concrete integer type we should convert to is.
+ unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex];
+ const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID);
+ FunctionDecl *NewBuiltinDecl;
+ if (NewBuiltinID == BuiltinID)
+ NewBuiltinDecl = FDecl;
+ else {
+ // Perform builtin lookup to avoid redeclaring it.
+ DeclarationName DN(&Context.Idents.get(NewBuiltinName));
+ LookupResult Res(*this, DN, DRE->getLocStart(), LookupOrdinaryName);
+ LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true);
+ assert(Res.getFoundDecl());
+ NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl());
+ if (!NewBuiltinDecl)
+ return ExprError();
+ }
+
+ // The first argument --- the pointer --- has a fixed type; we
+ // deduce the types of the rest of the arguments accordingly. Walk
+ // the remaining arguments, converting them to the deduced value type.
+ for (unsigned i = 0; i != NumFixed; ++i) {
+ ExprResult Arg = TheCall->getArg(i+1);
+
+ // GCC does an implicit conversion to the pointer or integer ValType. This
+ // can fail in some cases (1i -> int**), check for this error case now.
+ // Initialize the argument.
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
+ ValType, /*consume*/ false);
+ Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
+ if (Arg.isInvalid())
+ return ExprError();
+
+ // Okay, we have something that *can* be converted to the right type. Check
+ // to see if there is a potentially weird extension going on here. This can
+ // happen when you do an atomic operation on something like an char* and
+ // pass in 42. The 42 gets converted to char. This is even more strange
+ // for things like 45.123 -> char, etc.
+ // FIXME: Do this check.
+ TheCall->setArg(i+1, Arg.get());
+ }
+
+ ASTContext& Context = this->getASTContext();
+
+ // Create a new DeclRefExpr to refer to the new decl.
+ DeclRefExpr* NewDRE = DeclRefExpr::Create(
+ Context,
+ DRE->getQualifierLoc(),
+ SourceLocation(),
+ NewBuiltinDecl,
+ /*enclosing*/ false,
+ DRE->getLocation(),
+ Context.BuiltinFnTy,
+ DRE->getValueKind());
+
+ // Set the callee in the CallExpr.
+ // FIXME: This loses syntactic information.
+ QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType());
+ ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy,
+ CK_BuiltinFnToFnPtr);
+ TheCall->setCallee(PromotedCall.get());
+
+ // Change the result type of the call to match the original value type. This
+ // is arbitrary, but the codegen for these builtins ins design to handle it
+ // gracefully.
+ TheCall->setType(ResultType);
+
+ return TheCallResult;
+}
+
+/// SemaBuiltinNontemporalOverloaded - We have a call to
+/// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an
+/// overloaded function based on the pointer type of its last argument.
+///
+/// This function goes through and does final semantic checking for these
+/// builtins.
+ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) {
+ CallExpr *TheCall = (CallExpr *)TheCallResult.get();
+ DeclRefExpr *DRE =
+ cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
+ FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
+ unsigned BuiltinID = FDecl->getBuiltinID();
+ assert((BuiltinID == Builtin::BI__builtin_nontemporal_store ||
+ BuiltinID == Builtin::BI__builtin_nontemporal_load) &&
+ "Unexpected nontemporal load/store builtin!");
+ bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store;
+ unsigned numArgs = isStore ? 2 : 1;
+
+ // Ensure that we have the proper number of arguments.
+ if (checkArgCount(*this, TheCall, numArgs))
+ return ExprError();
+
+ // Inspect the last argument of the nontemporal builtin. This should always
+ // be a pointer type, from which we imply the type of the memory access.
+ // Because it is a pointer type, we don't have to worry about any implicit
+ // casts here.
+ Expr *PointerArg = TheCall->getArg(numArgs - 1);
+ ExprResult PointerArgResult =
+ DefaultFunctionArrayLvalueConversion(PointerArg);
+
+ if (PointerArgResult.isInvalid())
+ return ExprError();
+ PointerArg = PointerArgResult.get();
+ TheCall->setArg(numArgs - 1, PointerArg);
+
+ const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
+ if (!pointerType) {
+ Diag(DRE->getLocStart(), diag::err_nontemporal_builtin_must_be_pointer)
+ << PointerArg->getType() << PointerArg->getSourceRange();
+ return ExprError();
+ }
+
+ QualType ValType = pointerType->getPointeeType();
+
+ // Strip any qualifiers off ValType.
+ ValType = ValType.getUnqualifiedType();
+ if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
+ !ValType->isBlockPointerType() && !ValType->isFloatingType() &&
+ !ValType->isVectorType()) {
+ Diag(DRE->getLocStart(),
+ diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector)
+ << PointerArg->getType() << PointerArg->getSourceRange();
+ return ExprError();
+ }
+
+ if (!isStore) {
+ TheCall->setType(ValType);
+ return TheCallResult;
+ }
+
+ ExprResult ValArg = TheCall->getArg(0);
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(
+ Context, ValType, /*consume*/ false);
+ ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
+ if (ValArg.isInvalid())
+ return ExprError();
+
+ TheCall->setArg(0, ValArg.get());
+ TheCall->setType(Context.VoidTy);
+ return TheCallResult;
+}
+
+/// CheckObjCString - Checks that the argument to the builtin
+/// CFString constructor is correct
+/// Note: It might also make sense to do the UTF-16 conversion here (would
+/// simplify the backend).
+bool Sema::CheckObjCString(Expr *Arg) {
+ Arg = Arg->IgnoreParenCasts();
+ StringLiteral *Literal = dyn_cast<StringLiteral>(Arg);
+
+ if (!Literal || !Literal->isAscii()) {
+ Diag(Arg->getLocStart(), diag::err_cfstring_literal_not_string_constant)
+ << Arg->getSourceRange();
+ return true;
+ }
+
+ if (Literal->containsNonAsciiOrNull()) {
+ StringRef String = Literal->getString();
+ unsigned NumBytes = String.size();
+ SmallVector<UTF16, 128> ToBuf(NumBytes);
+ const UTF8 *FromPtr = (const UTF8 *)String.data();
+ UTF16 *ToPtr = &ToBuf[0];
+
+ ConversionResult Result = ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes,
+ &ToPtr, ToPtr + NumBytes,
+ strictConversion);
+ // Check for conversion failure.
+ if (Result != conversionOK)
+ Diag(Arg->getLocStart(),
+ diag::warn_cfstring_truncated) << Arg->getSourceRange();
+ }
+ return false;
+}
+
+/// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start'
+/// for validity. Emit an error and return true on failure; return false
+/// on success.
+bool Sema::SemaBuiltinVAStartImpl(CallExpr *TheCall) {
+ Expr *Fn = TheCall->getCallee();
+ if (TheCall->getNumArgs() > 2) {
+ Diag(TheCall->getArg(2)->getLocStart(),
+ diag::err_typecheck_call_too_many_args)
+ << 0 /*function call*/ << 2 << TheCall->getNumArgs()
+ << Fn->getSourceRange()
+ << SourceRange(TheCall->getArg(2)->getLocStart(),
+ (*(TheCall->arg_end()-1))->getLocEnd());
+ return true;
+ }
+
+ if (TheCall->getNumArgs() < 2) {
+ return Diag(TheCall->getLocEnd(),
+ diag::err_typecheck_call_too_few_args_at_least)
+ << 0 /*function call*/ << 2 << TheCall->getNumArgs();
+ }
+
+ // Type-check the first argument normally.
+ if (checkBuiltinArgument(*this, TheCall, 0))
+ return true;
+
+ // Determine whether the current function is variadic or not.
+ BlockScopeInfo *CurBlock = getCurBlock();
+ bool isVariadic;
+ if (CurBlock)
+ isVariadic = CurBlock->TheDecl->isVariadic();
+ else if (FunctionDecl *FD = getCurFunctionDecl())
+ isVariadic = FD->isVariadic();
+ else
+ isVariadic = getCurMethodDecl()->isVariadic();
+
+ if (!isVariadic) {
+ Diag(Fn->getLocStart(), diag::err_va_start_used_in_non_variadic_function);
+ return true;
+ }
+
+ // Verify that the second argument to the builtin is the last argument of the
+ // current function or method.
+ bool SecondArgIsLastNamedArgument = false;
+ const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts();
+
+ // These are valid if SecondArgIsLastNamedArgument is false after the next
+ // block.
+ QualType Type;
+ SourceLocation ParamLoc;
+
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) {
+ if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) {
+ // FIXME: This isn't correct for methods (results in bogus warning).
+ // Get the last formal in the current function.
+ const ParmVarDecl *LastArg;
+ if (CurBlock)
+ LastArg = *(CurBlock->TheDecl->param_end()-1);
+ else if (FunctionDecl *FD = getCurFunctionDecl())
+ LastArg = *(FD->param_end()-1);
+ else
+ LastArg = *(getCurMethodDecl()->param_end()-1);
+ SecondArgIsLastNamedArgument = PV == LastArg;
+
+ Type = PV->getType();
+ ParamLoc = PV->getLocation();
+ }
+ }
+
+ if (!SecondArgIsLastNamedArgument)
+ Diag(TheCall->getArg(1)->getLocStart(),
+ diag::warn_second_parameter_of_va_start_not_last_named_argument);
+ else if (Type->isReferenceType()) {
+ Diag(Arg->getLocStart(),
+ diag::warn_va_start_of_reference_type_is_undefined);
+ Diag(ParamLoc, diag::note_parameter_type) << Type;
+ }
+
+ TheCall->setType(Context.VoidTy);
+ return false;
+}
+
+/// Check the arguments to '__builtin_va_start' for validity, and that
+/// it was called from a function of the native ABI.
+/// Emit an error and return true on failure; return false on success.
+bool Sema::SemaBuiltinVAStart(CallExpr *TheCall) {
+ // On x86-64 Unix, don't allow this in Win64 ABI functions.
+ // On x64 Windows, don't allow this in System V ABI functions.
+ // (Yes, that means there's no corresponding way to support variadic
+ // System V ABI functions on Windows.)
+ if (Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86_64) {
+ unsigned OS = Context.getTargetInfo().getTriple().getOS();
+ clang::CallingConv CC = CC_C;
+ if (const FunctionDecl *FD = getCurFunctionDecl())
+ CC = FD->getType()->getAs<FunctionType>()->getCallConv();
+ if ((OS == llvm::Triple::Win32 && CC == CC_X86_64SysV) ||
+ (OS != llvm::Triple::Win32 && CC == CC_X86_64Win64))
+ return Diag(TheCall->getCallee()->getLocStart(),
+ diag::err_va_start_used_in_wrong_abi_function)
+ << (OS != llvm::Triple::Win32);
+ }
+ return SemaBuiltinVAStartImpl(TheCall);
+}
+
+/// Check the arguments to '__builtin_ms_va_start' for validity, and that
+/// it was called from a Win64 ABI function.
+/// Emit an error and return true on failure; return false on success.
+bool Sema::SemaBuiltinMSVAStart(CallExpr *TheCall) {
+ // This only makes sense for x86-64.
+ const llvm::Triple &TT = Context.getTargetInfo().getTriple();
+ Expr *Callee = TheCall->getCallee();
+ if (TT.getArch() != llvm::Triple::x86_64)
+ return Diag(Callee->getLocStart(), diag::err_x86_builtin_32_bit_tgt);
+ // Don't allow this in System V ABI functions.
+ clang::CallingConv CC = CC_C;
+ if (const FunctionDecl *FD = getCurFunctionDecl())
+ CC = FD->getType()->getAs<FunctionType>()->getCallConv();
+ if (CC == CC_X86_64SysV ||
+ (TT.getOS() != llvm::Triple::Win32 && CC != CC_X86_64Win64))
+ return Diag(Callee->getLocStart(),
+ diag::err_ms_va_start_used_in_sysv_function);
+ return SemaBuiltinVAStartImpl(TheCall);
+}
+
+bool Sema::SemaBuiltinVAStartARM(CallExpr *Call) {
+ // void __va_start(va_list *ap, const char *named_addr, size_t slot_size,
+ // const char *named_addr);
+
+ Expr *Func = Call->getCallee();
+
+ if (Call->getNumArgs() < 3)
+ return Diag(Call->getLocEnd(),
+ diag::err_typecheck_call_too_few_args_at_least)
+ << 0 /*function call*/ << 3 << Call->getNumArgs();
+
+ // Determine whether the current function is variadic or not.
+ bool IsVariadic;
+ if (BlockScopeInfo *CurBlock = getCurBlock())
+ IsVariadic = CurBlock->TheDecl->isVariadic();
+ else if (FunctionDecl *FD = getCurFunctionDecl())
+ IsVariadic = FD->isVariadic();
+ else if (ObjCMethodDecl *MD = getCurMethodDecl())
+ IsVariadic = MD->isVariadic();
+ else
+ llvm_unreachable("unexpected statement type");
+
+ if (!IsVariadic) {
+ Diag(Func->getLocStart(), diag::err_va_start_used_in_non_variadic_function);
+ return true;
+ }
+
+ // Type-check the first argument normally.
+ if (checkBuiltinArgument(*this, Call, 0))
+ return true;
+
+ const struct {
+ unsigned ArgNo;
+ QualType Type;
+ } ArgumentTypes[] = {
+ { 1, Context.getPointerType(Context.CharTy.withConst()) },
+ { 2, Context.getSizeType() },
+ };
+
+ for (const auto &AT : ArgumentTypes) {
+ const Expr *Arg = Call->getArg(AT.ArgNo)->IgnoreParens();
+ if (Arg->getType().getCanonicalType() == AT.Type.getCanonicalType())
+ continue;
+ Diag(Arg->getLocStart(), diag::err_typecheck_convert_incompatible)
+ << Arg->getType() << AT.Type << 1 /* different class */
+ << 0 /* qualifier difference */ << 3 /* parameter mismatch */
+ << AT.ArgNo + 1 << Arg->getType() << AT.Type;
+ }
+
+ return false;
+}
+
+/// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and
+/// friends. This is declared to take (...), so we have to check everything.
+bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) {
+ if (TheCall->getNumArgs() < 2)
+ return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
+ << 0 << 2 << TheCall->getNumArgs()/*function call*/;
+ if (TheCall->getNumArgs() > 2)
+ return Diag(TheCall->getArg(2)->getLocStart(),
+ diag::err_typecheck_call_too_many_args)
+ << 0 /*function call*/ << 2 << TheCall->getNumArgs()
+ << SourceRange(TheCall->getArg(2)->getLocStart(),
+ (*(TheCall->arg_end()-1))->getLocEnd());
+
+ ExprResult OrigArg0 = TheCall->getArg(0);
+ ExprResult OrigArg1 = TheCall->getArg(1);
+
+ // Do standard promotions between the two arguments, returning their common
+ // type.
+ QualType Res = UsualArithmeticConversions(OrigArg0, OrigArg1, false);
+ if (OrigArg0.isInvalid() || OrigArg1.isInvalid())
+ return true;
+
+ // Make sure any conversions are pushed back into the call; this is
+ // type safe since unordered compare builtins are declared as "_Bool
+ // foo(...)".
+ TheCall->setArg(0, OrigArg0.get());
+ TheCall->setArg(1, OrigArg1.get());
+
+ if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent())
+ return false;
+
+ // If the common type isn't a real floating type, then the arguments were
+ // invalid for this operation.
+ if (Res.isNull() || !Res->isRealFloatingType())
+ return Diag(OrigArg0.get()->getLocStart(),
+ diag::err_typecheck_call_invalid_ordered_compare)
+ << OrigArg0.get()->getType() << OrigArg1.get()->getType()
+ << SourceRange(OrigArg0.get()->getLocStart(), OrigArg1.get()->getLocEnd());
+
+ return false;
+}
+
+/// SemaBuiltinSemaBuiltinFPClassification - Handle functions like
+/// __builtin_isnan and friends. This is declared to take (...), so we have
+/// to check everything. We expect the last argument to be a floating point
+/// value.
+bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) {
+ if (TheCall->getNumArgs() < NumArgs)
+ return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
+ << 0 << NumArgs << TheCall->getNumArgs()/*function call*/;
+ if (TheCall->getNumArgs() > NumArgs)
+ return Diag(TheCall->getArg(NumArgs)->getLocStart(),
+ diag::err_typecheck_call_too_many_args)
+ << 0 /*function call*/ << NumArgs << TheCall->getNumArgs()
+ << SourceRange(TheCall->getArg(NumArgs)->getLocStart(),
+ (*(TheCall->arg_end()-1))->getLocEnd());
+
+ Expr *OrigArg = TheCall->getArg(NumArgs-1);
+
+ if (OrigArg->isTypeDependent())
+ return false;
+
+ // This operation requires a non-_Complex floating-point number.
+ if (!OrigArg->getType()->isRealFloatingType())
+ return Diag(OrigArg->getLocStart(),
+ diag::err_typecheck_call_invalid_unary_fp)
+ << OrigArg->getType() << OrigArg->getSourceRange();
+
+ // If this is an implicit conversion from float -> double, remove it.
+ if (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(OrigArg)) {
+ Expr *CastArg = Cast->getSubExpr();
+ if (CastArg->getType()->isSpecificBuiltinType(BuiltinType::Float)) {
+ assert(Cast->getType()->isSpecificBuiltinType(BuiltinType::Double) &&
+ "promotion from float to double is the only expected cast here");
+ Cast->setSubExpr(nullptr);
+ TheCall->setArg(NumArgs-1, CastArg);
+ }
+ }
+
+ return false;
+}
+
+/// SemaBuiltinShuffleVector - Handle __builtin_shufflevector.
+// This is declared to take (...), so we have to check everything.
+ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
+ if (TheCall->getNumArgs() < 2)
+ return ExprError(Diag(TheCall->getLocEnd(),
+ diag::err_typecheck_call_too_few_args_at_least)
+ << 0 /*function call*/ << 2 << TheCall->getNumArgs()
+ << TheCall->getSourceRange());
+
+ // Determine which of the following types of shufflevector we're checking:
+ // 1) unary, vector mask: (lhs, mask)
+ // 2) binary, vector mask: (lhs, rhs, mask)
+ // 3) binary, scalar mask: (lhs, rhs, index, ..., index)
+ QualType resType = TheCall->getArg(0)->getType();
+ unsigned numElements = 0;
+
+ if (!TheCall->getArg(0)->isTypeDependent() &&
+ !TheCall->getArg(1)->isTypeDependent()) {
+ QualType LHSType = TheCall->getArg(0)->getType();
+ QualType RHSType = TheCall->getArg(1)->getType();
+
+ if (!LHSType->isVectorType() || !RHSType->isVectorType())
+ return ExprError(Diag(TheCall->getLocStart(),
+ diag::err_shufflevector_non_vector)
+ << SourceRange(TheCall->getArg(0)->getLocStart(),
+ TheCall->getArg(1)->getLocEnd()));
+
+ numElements = LHSType->getAs<VectorType>()->getNumElements();
+ unsigned numResElements = TheCall->getNumArgs() - 2;
+
+ // Check to see if we have a call with 2 vector arguments, the unary shuffle
+ // with mask. If so, verify that RHS is an integer vector type with the
+ // same number of elts as lhs.
+ if (TheCall->getNumArgs() == 2) {
+ if (!RHSType->hasIntegerRepresentation() ||
+ RHSType->getAs<VectorType>()->getNumElements() != numElements)
+ return ExprError(Diag(TheCall->getLocStart(),
+ diag::err_shufflevector_incompatible_vector)
+ << SourceRange(TheCall->getArg(1)->getLocStart(),
+ TheCall->getArg(1)->getLocEnd()));
+ } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) {
+ return ExprError(Diag(TheCall->getLocStart(),
+ diag::err_shufflevector_incompatible_vector)
+ << SourceRange(TheCall->getArg(0)->getLocStart(),
+ TheCall->getArg(1)->getLocEnd()));
+ } else if (numElements != numResElements) {
+ QualType eltType = LHSType->getAs<VectorType>()->getElementType();
+ resType = Context.getVectorType(eltType, numResElements,
+ VectorType::GenericVector);
+ }
+ }
+
+ for (unsigned i = 2; i < TheCall->getNumArgs(); i++) {
+ if (TheCall->getArg(i)->isTypeDependent() ||
+ TheCall->getArg(i)->isValueDependent())
+ continue;
+
+ llvm::APSInt Result(32);
+ if (!TheCall->getArg(i)->isIntegerConstantExpr(Result, Context))
+ return ExprError(Diag(TheCall->getLocStart(),
+ diag::err_shufflevector_nonconstant_argument)
+ << TheCall->getArg(i)->getSourceRange());
+
+ // Allow -1 which will be translated to undef in the IR.
+ if (Result.isSigned() && Result.isAllOnesValue())
+ continue;
+
+ if (Result.getActiveBits() > 64 || Result.getZExtValue() >= numElements*2)
+ return ExprError(Diag(TheCall->getLocStart(),
+ diag::err_shufflevector_argument_too_large)
+ << TheCall->getArg(i)->getSourceRange());
+ }
+
+ SmallVector<Expr*, 32> exprs;
+
+ for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) {
+ exprs.push_back(TheCall->getArg(i));
+ TheCall->setArg(i, nullptr);
+ }
+
+ return new (Context) ShuffleVectorExpr(Context, exprs, resType,
+ TheCall->getCallee()->getLocStart(),
+ TheCall->getRParenLoc());
+}
+
+/// SemaConvertVectorExpr - Handle __builtin_convertvector
+ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
+ SourceLocation BuiltinLoc,
+ SourceLocation RParenLoc) {
+ ExprValueKind VK = VK_RValue;
+ ExprObjectKind OK = OK_Ordinary;
+ QualType DstTy = TInfo->getType();
+ QualType SrcTy = E->getType();
+
+ if (!SrcTy->isVectorType() && !SrcTy->isDependentType())
+ return ExprError(Diag(BuiltinLoc,
+ diag::err_convertvector_non_vector)
+ << E->getSourceRange());
+ if (!DstTy->isVectorType() && !DstTy->isDependentType())
+ return ExprError(Diag(BuiltinLoc,
+ diag::err_convertvector_non_vector_type));
+
+ if (!SrcTy->isDependentType() && !DstTy->isDependentType()) {
+ unsigned SrcElts = SrcTy->getAs<VectorType>()->getNumElements();
+ unsigned DstElts = DstTy->getAs<VectorType>()->getNumElements();
+ if (SrcElts != DstElts)
+ return ExprError(Diag(BuiltinLoc,
+ diag::err_convertvector_incompatible_vector)
+ << E->getSourceRange());
+ }
+
+ return new (Context)
+ ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc);
+}
+
+/// SemaBuiltinPrefetch - Handle __builtin_prefetch.
+// This is declared to take (const void*, ...) and can take two
+// optional constant int args.
+bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) {
+ unsigned NumArgs = TheCall->getNumArgs();
+
+ if (NumArgs > 3)
+ return Diag(TheCall->getLocEnd(),
+ diag::err_typecheck_call_too_many_args_at_most)
+ << 0 /*function call*/ << 3 << NumArgs
+ << TheCall->getSourceRange();
+
+ // Argument 0 is checked for us and the remaining arguments must be
+ // constant integers.
+ for (unsigned i = 1; i != NumArgs; ++i)
+ if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3))
+ return true;
+
+ return false;
+}
+
+/// SemaBuiltinAssume - Handle __assume (MS Extension).
+// __assume does not evaluate its arguments, and should warn if its argument
+// has side effects.
+bool Sema::SemaBuiltinAssume(CallExpr *TheCall) {
+ Expr *Arg = TheCall->getArg(0);
+ if (Arg->isInstantiationDependent()) return false;
+
+ if (Arg->HasSideEffects(Context))
+ Diag(Arg->getLocStart(), diag::warn_assume_side_effects)
+ << Arg->getSourceRange()
+ << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier();
+
+ return false;
+}
+
+/// Handle __builtin_assume_aligned. This is declared
+/// as (const void*, size_t, ...) and can take one optional constant int arg.
+bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) {
+ unsigned NumArgs = TheCall->getNumArgs();
+
+ if (NumArgs > 3)
+ return Diag(TheCall->getLocEnd(),
+ diag::err_typecheck_call_too_many_args_at_most)
+ << 0 /*function call*/ << 3 << NumArgs
+ << TheCall->getSourceRange();
+
+ // The alignment must be a constant integer.
+ Expr *Arg = TheCall->getArg(1);
+
+ // We can't check the value of a dependent argument.
+ if (!Arg->isTypeDependent() && !Arg->isValueDependent()) {
+ llvm::APSInt Result;
+ if (SemaBuiltinConstantArg(TheCall, 1, Result))
+ return true;
+
+ if (!Result.isPowerOf2())
+ return Diag(TheCall->getLocStart(),
+ diag::err_alignment_not_power_of_two)
+ << Arg->getSourceRange();
+ }
+
+ if (NumArgs > 2) {
+ ExprResult Arg(TheCall->getArg(2));
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
+ Context.getSizeType(), false);
+ Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
+ if (Arg.isInvalid()) return true;
+ TheCall->setArg(2, Arg.get());
+ }
+
+ return false;
+}
+
+/// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr
+/// TheCall is a constant expression.
+bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
+ llvm::APSInt &Result) {
+ Expr *Arg = TheCall->getArg(ArgNum);
+ DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
+ FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
+
+ if (Arg->isTypeDependent() || Arg->isValueDependent()) return false;
+
+ if (!Arg->isIntegerConstantExpr(Result, Context))
+ return Diag(TheCall->getLocStart(), diag::err_constant_integer_arg_type)
+ << FDecl->getDeclName() << Arg->getSourceRange();
+
+ return false;
+}
+
+/// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr
+/// TheCall is a constant expression in the range [Low, High].
+bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
+ int Low, int High) {
+ llvm::APSInt Result;
+
+ // We can't check the value of a dependent argument.
+ Expr *Arg = TheCall->getArg(ArgNum);
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ return false;
+
+ // Check constant-ness first.
+ if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
+ return true;
+
+ if (Result.getSExtValue() < Low || Result.getSExtValue() > High)
+ return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range)
+ << Low << High << Arg->getSourceRange();
+
+ return false;
+}
+
+/// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr
+/// TheCall is an ARM/AArch64 special register string literal.
+bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
+ int ArgNum, unsigned ExpectedFieldNum,
+ bool AllowName) {
+ bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
+ BuiltinID == ARM::BI__builtin_arm_wsr64 ||
+ BuiltinID == ARM::BI__builtin_arm_rsr ||
+ BuiltinID == ARM::BI__builtin_arm_rsrp ||
+ BuiltinID == ARM::BI__builtin_arm_wsr ||
+ BuiltinID == ARM::BI__builtin_arm_wsrp;
+ bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
+ BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
+ BuiltinID == AArch64::BI__builtin_arm_rsr ||
+ BuiltinID == AArch64::BI__builtin_arm_rsrp ||
+ BuiltinID == AArch64::BI__builtin_arm_wsr ||
+ BuiltinID == AArch64::BI__builtin_arm_wsrp;
+ assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin.");
+
+ // We can't check the value of a dependent argument.
+ Expr *Arg = TheCall->getArg(ArgNum);
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ return false;
+
+ // Check if the argument is a string literal.
+ if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
+ return Diag(TheCall->getLocStart(), diag::err_expr_not_string_literal)
+ << Arg->getSourceRange();
+
+ // Check the type of special register given.
+ StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
+ SmallVector<StringRef, 6> Fields;
+ Reg.split(Fields, ":");
+
+ if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1))
+ return Diag(TheCall->getLocStart(), diag::err_arm_invalid_specialreg)
+ << Arg->getSourceRange();
+
+ // If the string is the name of a register then we cannot check that it is
+ // valid here but if the string is of one the forms described in ACLE then we
+ // can check that the supplied fields are integers and within the valid
+ // ranges.
+ if (Fields.size() > 1) {
+ bool FiveFields = Fields.size() == 5;
+
+ bool ValidString = true;
+ if (IsARMBuiltin) {
+ ValidString &= Fields[0].startswith_lower("cp") ||
+ Fields[0].startswith_lower("p");
+ if (ValidString)
+ Fields[0] =
+ Fields[0].drop_front(Fields[0].startswith_lower("cp") ? 2 : 1);
+
+ ValidString &= Fields[2].startswith_lower("c");
+ if (ValidString)
+ Fields[2] = Fields[2].drop_front(1);
+
+ if (FiveFields) {
+ ValidString &= Fields[3].startswith_lower("c");
+ if (ValidString)
+ Fields[3] = Fields[3].drop_front(1);
+ }
+ }
+
+ SmallVector<int, 5> Ranges;
+ if (FiveFields)
+ Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 7, 15, 15});
+ else
+ Ranges.append({15, 7, 15});
+
+ for (unsigned i=0; i<Fields.size(); ++i) {
+ int IntField;
+ ValidString &= !Fields[i].getAsInteger(10, IntField);
+ ValidString &= (IntField >= 0 && IntField <= Ranges[i]);
+ }
+
+ if (!ValidString)
+ return Diag(TheCall->getLocStart(), diag::err_arm_invalid_specialreg)
+ << Arg->getSourceRange();
+
+ } else if (IsAArch64Builtin && Fields.size() == 1) {
+ // If the register name is one of those that appear in the condition below
+ // and the special register builtin being used is one of the write builtins,
+ // then we require that the argument provided for writing to the register
+ // is an integer constant expression. This is because it will be lowered to
+ // an MSR (immediate) instruction, so we need to know the immediate at
+ // compile time.
+ if (TheCall->getNumArgs() != 2)
+ return false;
+
+ std::string RegLower = Reg.lower();
+ if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" &&
+ RegLower != "pan" && RegLower != "uao")
+ return false;
+
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
+ }
+
+ return false;
+}
+
+/// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val).
+/// This checks that the target supports __builtin_longjmp and
+/// that val is a constant 1.
+bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) {
+ if (!Context.getTargetInfo().hasSjLjLowering())
+ return Diag(TheCall->getLocStart(), diag::err_builtin_longjmp_unsupported)
+ << SourceRange(TheCall->getLocStart(), TheCall->getLocEnd());
+
+ Expr *Arg = TheCall->getArg(1);
+ llvm::APSInt Result;
+
+ // TODO: This is less than ideal. Overload this to take a value.
+ if (SemaBuiltinConstantArg(TheCall, 1, Result))
+ return true;
+
+ if (Result != 1)
+ return Diag(TheCall->getLocStart(), diag::err_builtin_longjmp_invalid_val)
+ << SourceRange(Arg->getLocStart(), Arg->getLocEnd());
+
+ return false;
+}
+
+
+/// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]).
+/// This checks that the target supports __builtin_setjmp.
+bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) {
+ if (!Context.getTargetInfo().hasSjLjLowering())
+ return Diag(TheCall->getLocStart(), diag::err_builtin_setjmp_unsupported)
+ << SourceRange(TheCall->getLocStart(), TheCall->getLocEnd());
+ return false;
+}
+
+namespace {
+enum StringLiteralCheckType {
+ SLCT_NotALiteral,
+ SLCT_UncheckedLiteral,
+ SLCT_CheckedLiteral
+};
+}
+
+// Determine if an expression is a string literal or constant string.
+// If this function returns false on the arguments to a function expecting a
+// format string, we will usually need to emit a warning.
+// True string literals are then checked by CheckFormatString.
+static StringLiteralCheckType
+checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
+ bool HasVAListArg, unsigned format_idx,
+ unsigned firstDataArg, Sema::FormatStringType Type,
+ Sema::VariadicCallType CallType, bool InFunctionCall,
+ llvm::SmallBitVector &CheckedVarArgs) {
+ tryAgain:
+ if (E->isTypeDependent() || E->isValueDependent())
+ return SLCT_NotALiteral;
+
+ E = E->IgnoreParenCasts();
+
+ if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull))
+ // Technically -Wformat-nonliteral does not warn about this case.
+ // The behavior of printf and friends in this case is implementation
+ // dependent. Ideally if the format string cannot be null then
+ // it should have a 'nonnull' attribute in the function prototype.
+ return SLCT_UncheckedLiteral;
+
+ switch (E->getStmtClass()) {
+ case Stmt::BinaryConditionalOperatorClass:
+ case Stmt::ConditionalOperatorClass: {
+ // The expression is a literal if both sub-expressions were, and it was
+ // completely checked only if both sub-expressions were checked.
+ const AbstractConditionalOperator *C =
+ cast<AbstractConditionalOperator>(E);
+ StringLiteralCheckType Left =
+ checkFormatStringExpr(S, C->getTrueExpr(), Args,
+ HasVAListArg, format_idx, firstDataArg,
+ Type, CallType, InFunctionCall, CheckedVarArgs);
+ if (Left == SLCT_NotALiteral)
+ return SLCT_NotALiteral;
+ StringLiteralCheckType Right =
+ checkFormatStringExpr(S, C->getFalseExpr(), Args,
+ HasVAListArg, format_idx, firstDataArg,
+ Type, CallType, InFunctionCall, CheckedVarArgs);
+ return Left < Right ? Left : Right;
+ }
+
+ case Stmt::ImplicitCastExprClass: {
+ E = cast<ImplicitCastExpr>(E)->getSubExpr();
+ goto tryAgain;
+ }
+
+ case Stmt::OpaqueValueExprClass:
+ if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) {
+ E = src;
+ goto tryAgain;
+ }
+ return SLCT_NotALiteral;
+
+ case Stmt::PredefinedExprClass:
+ // While __func__, etc., are technically not string literals, they
+ // cannot contain format specifiers and thus are not a security
+ // liability.
+ return SLCT_UncheckedLiteral;
+
+ case Stmt::DeclRefExprClass: {
+ const DeclRefExpr *DR = cast<DeclRefExpr>(E);
+
+ // As an exception, do not flag errors for variables binding to
+ // const string literals.
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ bool isConstant = false;
+ QualType T = DR->getType();
+
+ if (const ArrayType *AT = S.Context.getAsArrayType(T)) {
+ isConstant = AT->getElementType().isConstant(S.Context);
+ } else if (const PointerType *PT = T->getAs<PointerType>()) {
+ isConstant = T.isConstant(S.Context) &&
+ PT->getPointeeType().isConstant(S.Context);
+ } else if (T->isObjCObjectPointerType()) {
+ // In ObjC, there is usually no "const ObjectPointer" type,
+ // so don't check if the pointee type is constant.
+ isConstant = T.isConstant(S.Context);
+ }
+
+ if (isConstant) {
+ if (const Expr *Init = VD->getAnyInitializer()) {
+ // Look through initializers like const char c[] = { "foo" }
+ if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) {
+ if (InitList->isStringLiteralInit())
+ Init = InitList->getInit(0)->IgnoreParenImpCasts();
+ }
+ return checkFormatStringExpr(S, Init, Args,
+ HasVAListArg, format_idx,
+ firstDataArg, Type, CallType,
+ /*InFunctionCall*/false, CheckedVarArgs);
+ }
+ }
+
+ // For vprintf* functions (i.e., HasVAListArg==true), we add a
+ // special check to see if the format string is a function parameter
+ // of the function calling the printf function. If the function
+ // has an attribute indicating it is a printf-like function, then we
+ // should suppress warnings concerning non-literals being used in a call
+ // to a vprintf function. For example:
+ //
+ // void
+ // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){
+ // va_list ap;
+ // va_start(ap, fmt);
+ // vprintf(fmt, ap); // Do NOT emit a warning about "fmt".
+ // ...
+ // }
+ if (HasVAListArg) {
+ if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) {
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(PV->getDeclContext())) {
+ int PVIndex = PV->getFunctionScopeIndex() + 1;
+ for (const auto *PVFormat : ND->specific_attrs<FormatAttr>()) {
+ // adjust for implicit parameter
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND))
+ if (MD->isInstance())
+ ++PVIndex;
+ // We also check if the formats are compatible.
+ // We can't pass a 'scanf' string to a 'printf' function.
+ if (PVIndex == PVFormat->getFormatIdx() &&
+ Type == S.GetFormatStringType(PVFormat))
+ return SLCT_UncheckedLiteral;
+ }
+ }
+ }
+ }
+ }
+
+ return SLCT_NotALiteral;
+ }
+
+ case Stmt::CallExprClass:
+ case Stmt::CXXMemberCallExprClass: {
+ const CallExpr *CE = cast<CallExpr>(E);
+ if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) {
+ if (const FormatArgAttr *FA = ND->getAttr<FormatArgAttr>()) {
+ unsigned ArgIndex = FA->getFormatIdx();
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND))
+ if (MD->isInstance())
+ --ArgIndex;
+ const Expr *Arg = CE->getArg(ArgIndex - 1);
+
+ return checkFormatStringExpr(S, Arg, Args,
+ HasVAListArg, format_idx, firstDataArg,
+ Type, CallType, InFunctionCall,
+ CheckedVarArgs);
+ } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
+ unsigned BuiltinID = FD->getBuiltinID();
+ if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString ||
+ BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) {
+ const Expr *Arg = CE->getArg(0);
+ return checkFormatStringExpr(S, Arg, Args,
+ HasVAListArg, format_idx,
+ firstDataArg, Type, CallType,
+ InFunctionCall, CheckedVarArgs);
+ }
+ }
+ }
+
+ return SLCT_NotALiteral;
+ }
+ case Stmt::ObjCStringLiteralClass:
+ case Stmt::StringLiteralClass: {
+ const StringLiteral *StrE = nullptr;
+
+ if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E))
+ StrE = ObjCFExpr->getString();
+ else
+ StrE = cast<StringLiteral>(E);
+
+ if (StrE) {
+ S.CheckFormatString(StrE, E, Args, HasVAListArg, format_idx, firstDataArg,
+ Type, InFunctionCall, CallType, CheckedVarArgs);
+ return SLCT_CheckedLiteral;
+ }
+
+ return SLCT_NotALiteral;
+ }
+
+ default:
+ return SLCT_NotALiteral;
+ }
+}
+
+Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) {
+ return llvm::StringSwitch<FormatStringType>(Format->getType()->getName())
+ .Case("scanf", FST_Scanf)
+ .Cases("printf", "printf0", FST_Printf)
+ .Cases("NSString", "CFString", FST_NSString)
+ .Case("strftime", FST_Strftime)
+ .Case("strfmon", FST_Strfmon)
+ .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf)
+ .Case("freebsd_kprintf", FST_FreeBSDKPrintf)
+ .Case("os_trace", FST_OSTrace)
+ .Default(FST_Unknown);
+}
+
+/// CheckFormatArguments - Check calls to printf and scanf (and similar
+/// functions) for correct use of format strings.
+/// Returns true if a format string has been fully checked.
+bool Sema::CheckFormatArguments(const FormatAttr *Format,
+ ArrayRef<const Expr *> Args,
+ bool IsCXXMember,
+ VariadicCallType CallType,
+ SourceLocation Loc, SourceRange Range,
+ llvm::SmallBitVector &CheckedVarArgs) {
+ FormatStringInfo FSI;
+ if (getFormatStringInfo(Format, IsCXXMember, &FSI))
+ return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx,
+ FSI.FirstDataArg, GetFormatStringType(Format),
+ CallType, Loc, Range, CheckedVarArgs);
+ return false;
+}
+
+bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args,
+ bool HasVAListArg, unsigned format_idx,
+ unsigned firstDataArg, FormatStringType Type,
+ VariadicCallType CallType,
+ SourceLocation Loc, SourceRange Range,
+ llvm::SmallBitVector &CheckedVarArgs) {
+ // CHECK: printf/scanf-like function is called with no format string.
+ if (format_idx >= Args.size()) {
+ Diag(Loc, diag::warn_missing_format_string) << Range;
+ return false;
+ }
+
+ const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts();
+
+ // CHECK: format string is not a string literal.
+ //
+ // Dynamically generated format strings are difficult to
+ // automatically vet at compile time. Requiring that format strings
+ // are string literals: (1) permits the checking of format strings by
+ // the compiler and thereby (2) can practically remove the source of
+ // many format string exploits.
+
+ // Format string can be either ObjC string (e.g. @"%d") or
+ // C string (e.g. "%d")
+ // ObjC string uses the same format specifiers as C string, so we can use
+ // the same format string checking logic for both ObjC and C strings.
+ StringLiteralCheckType CT =
+ checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg,
+ format_idx, firstDataArg, Type, CallType,
+ /*IsFunctionCall*/true, CheckedVarArgs);
+ if (CT != SLCT_NotALiteral)
+ // Literal format string found, check done!
+ return CT == SLCT_CheckedLiteral;
+
+ // Strftime is particular as it always uses a single 'time' argument,
+ // so it is safe to pass a non-literal string.
+ if (Type == FST_Strftime)
+ return false;
+
+ // Do not emit diag when the string param is a macro expansion and the
+ // format is either NSString or CFString. This is a hack to prevent
+ // diag when using the NSLocalizedString and CFCopyLocalizedString macros
+ // which are usually used in place of NS and CF string literals.
+ if (Type == FST_NSString &&
+ SourceMgr.isInSystemMacro(Args[format_idx]->getLocStart()))
+ return false;
+
+ // If there are no arguments specified, warn with -Wformat-security, otherwise
+ // warn only with -Wformat-nonliteral.
+ if (Args.size() == firstDataArg)
+ Diag(Args[format_idx]->getLocStart(),
+ diag::warn_format_nonliteral_noargs)
+ << OrigFormatExpr->getSourceRange();
+ else
+ Diag(Args[format_idx]->getLocStart(),
+ diag::warn_format_nonliteral)
+ << OrigFormatExpr->getSourceRange();
+ return false;
+}
+
+namespace {
+class CheckFormatHandler : public analyze_format_string::FormatStringHandler {
+protected:
+ Sema &S;
+ const StringLiteral *FExpr;
+ const Expr *OrigFormatExpr;
+ const unsigned FirstDataArg;
+ const unsigned NumDataArgs;
+ const char *Beg; // Start of format string.
+ const bool HasVAListArg;
+ ArrayRef<const Expr *> Args;
+ unsigned FormatIdx;
+ llvm::SmallBitVector CoveredArgs;
+ bool usesPositionalArgs;
+ bool atFirstArg;
+ bool inFunctionCall;
+ Sema::VariadicCallType CallType;
+ llvm::SmallBitVector &CheckedVarArgs;
+public:
+ CheckFormatHandler(Sema &s, const StringLiteral *fexpr,
+ const Expr *origFormatExpr, unsigned firstDataArg,
+ unsigned numDataArgs, const char *beg, bool hasVAListArg,
+ ArrayRef<const Expr *> Args,
+ unsigned formatIdx, bool inFunctionCall,
+ Sema::VariadicCallType callType,
+ llvm::SmallBitVector &CheckedVarArgs)
+ : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr),
+ FirstDataArg(firstDataArg), NumDataArgs(numDataArgs),
+ Beg(beg), HasVAListArg(hasVAListArg),
+ Args(Args), FormatIdx(formatIdx),
+ usesPositionalArgs(false), atFirstArg(true),
+ inFunctionCall(inFunctionCall), CallType(callType),
+ CheckedVarArgs(CheckedVarArgs) {
+ CoveredArgs.resize(numDataArgs);
+ CoveredArgs.reset();
+ }
+
+ void DoneProcessing();
+
+ void HandleIncompleteSpecifier(const char *startSpecifier,
+ unsigned specifierLen) override;
+
+ void HandleInvalidLengthModifier(
+ const analyze_format_string::FormatSpecifier &FS,
+ const analyze_format_string::ConversionSpecifier &CS,
+ const char *startSpecifier, unsigned specifierLen,
+ unsigned DiagID);
+
+ void HandleNonStandardLengthModifier(
+ const analyze_format_string::FormatSpecifier &FS,
+ const char *startSpecifier, unsigned specifierLen);
+
+ void HandleNonStandardConversionSpecifier(
+ const analyze_format_string::ConversionSpecifier &CS,
+ const char *startSpecifier, unsigned specifierLen);
+
+ void HandlePosition(const char *startPos, unsigned posLen) override;
+
+ void HandleInvalidPosition(const char *startSpecifier,
+ unsigned specifierLen,
+ analyze_format_string::PositionContext p) override;
+
+ void HandleZeroPosition(const char *startPos, unsigned posLen) override;
+
+ void HandleNullChar(const char *nullCharacter) override;
+
+ template <typename Range>
+ static void EmitFormatDiagnostic(Sema &S, bool inFunctionCall,
+ const Expr *ArgumentExpr,
+ PartialDiagnostic PDiag,
+ SourceLocation StringLoc,
+ bool IsStringLocation, Range StringRange,
+ ArrayRef<FixItHint> Fixit = None);
+
+protected:
+ bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc,
+ const char *startSpec,
+ unsigned specifierLen,
+ const char *csStart, unsigned csLen);
+
+ void HandlePositionalNonpositionalArgs(SourceLocation Loc,
+ const char *startSpec,
+ unsigned specifierLen);
+
+ SourceRange getFormatStringRange();
+ CharSourceRange getSpecifierRange(const char *startSpecifier,
+ unsigned specifierLen);
+ SourceLocation getLocationOfByte(const char *x);
+
+ const Expr *getDataArg(unsigned i) const;
+
+ bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS,
+ const analyze_format_string::ConversionSpecifier &CS,
+ const char *startSpecifier, unsigned specifierLen,
+ unsigned argIndex);
+
+ template <typename Range>
+ void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc,
+ bool IsStringLocation, Range StringRange,
+ ArrayRef<FixItHint> Fixit = None);
+};
+}
+
+SourceRange CheckFormatHandler::getFormatStringRange() {
+ return OrigFormatExpr->getSourceRange();
+}
+
+CharSourceRange CheckFormatHandler::
+getSpecifierRange(const char *startSpecifier, unsigned specifierLen) {
+ SourceLocation Start = getLocationOfByte(startSpecifier);
+ SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1);
+
+ // Advance the end SourceLocation by one due to half-open ranges.
+ End = End.getLocWithOffset(1);
+
+ return CharSourceRange::getCharRange(Start, End);
+}
+
+SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) {
+ return S.getLocationOfStringLiteralByte(FExpr, x - Beg);
+}
+
+void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier,
+ unsigned specifierLen){
+ EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier),
+ getLocationOfByte(startSpecifier),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
+}
+
+void CheckFormatHandler::HandleInvalidLengthModifier(
+ const analyze_format_string::FormatSpecifier &FS,
+ const analyze_format_string::ConversionSpecifier &CS,
+ const char *startSpecifier, unsigned specifierLen, unsigned DiagID) {
+ using namespace analyze_format_string;
+
+ const LengthModifier &LM = FS.getLengthModifier();
+ CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength());
+
+ // See if we know how to fix this length modifier.
+ Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier();
+ if (FixedLM) {
+ EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(),
+ getLocationOfByte(LM.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
+
+ S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier)
+ << FixedLM->toString()
+ << FixItHint::CreateReplacement(LMRange, FixedLM->toString());
+
+ } else {
+ FixItHint Hint;
+ if (DiagID == diag::warn_format_nonsensical_length)
+ Hint = FixItHint::CreateRemoval(LMRange);
+
+ EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(),
+ getLocationOfByte(LM.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen),
+ Hint);
+ }
+}
+
+void CheckFormatHandler::HandleNonStandardLengthModifier(
+ const analyze_format_string::FormatSpecifier &FS,
+ const char *startSpecifier, unsigned specifierLen) {
+ using namespace analyze_format_string;
+
+ const LengthModifier &LM = FS.getLengthModifier();
+ CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength());
+
+ // See if we know how to fix this length modifier.
+ Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier();
+ if (FixedLM) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
+ << LM.toString() << 0,
+ getLocationOfByte(LM.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
+
+ S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier)
+ << FixedLM->toString()
+ << FixItHint::CreateReplacement(LMRange, FixedLM->toString());
+
+ } else {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
+ << LM.toString() << 0,
+ getLocationOfByte(LM.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
+ }
+}
+
+void CheckFormatHandler::HandleNonStandardConversionSpecifier(
+ const analyze_format_string::ConversionSpecifier &CS,
+ const char *startSpecifier, unsigned specifierLen) {
+ using namespace analyze_format_string;
+
+ // See if we know how to fix this conversion specifier.
+ Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier();
+ if (FixedCS) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
+ << CS.toString() << /*conversion specifier*/1,
+ getLocationOfByte(CS.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
+
+ CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength());
+ S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier)
+ << FixedCS->toString()
+ << FixItHint::CreateReplacement(CSRange, FixedCS->toString());
+ } else {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
+ << CS.toString() << /*conversion specifier*/1,
+ getLocationOfByte(CS.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
+ }
+}
+
+void CheckFormatHandler::HandlePosition(const char *startPos,
+ unsigned posLen) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg),
+ getLocationOfByte(startPos),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startPos, posLen));
+}
+
+void
+CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen,
+ analyze_format_string::PositionContext p) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier)
+ << (unsigned) p,
+ getLocationOfByte(startPos), /*IsStringLocation*/true,
+ getSpecifierRange(startPos, posLen));
+}
+
+void CheckFormatHandler::HandleZeroPosition(const char *startPos,
+ unsigned posLen) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier),
+ getLocationOfByte(startPos),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startPos, posLen));
+}
+
+void CheckFormatHandler::HandleNullChar(const char *nullCharacter) {
+ if (!isa<ObjCStringLiteral>(OrigFormatExpr)) {
+ // The presence of a null character is likely an error.
+ EmitFormatDiagnostic(
+ S.PDiag(diag::warn_printf_format_string_contains_null_char),
+ getLocationOfByte(nullCharacter), /*IsStringLocation*/true,
+ getFormatStringRange());
+ }
+}
+
+// Note that this may return NULL if there was an error parsing or building
+// one of the argument expressions.
+const Expr *CheckFormatHandler::getDataArg(unsigned i) const {
+ return Args[FirstDataArg + i];
+}
+
+void CheckFormatHandler::DoneProcessing() {
+ // Does the number of data arguments exceed the number of
+ // format conversions in the format string?
+ if (!HasVAListArg) {
+ // Find any arguments that weren't covered.
+ CoveredArgs.flip();
+ signed notCoveredArg = CoveredArgs.find_first();
+ if (notCoveredArg >= 0) {
+ assert((unsigned)notCoveredArg < NumDataArgs);
+ if (const Expr *E = getDataArg((unsigned) notCoveredArg)) {
+ SourceLocation Loc = E->getLocStart();
+ if (!S.getSourceManager().isInSystemMacro(Loc)) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_printf_data_arg_not_used),
+ Loc, /*IsStringLocation*/false,
+ getFormatStringRange());
+ }
+ }
+ }
+ }
+}
+
+bool
+CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex,
+ SourceLocation Loc,
+ const char *startSpec,
+ unsigned specifierLen,
+ const char *csStart,
+ unsigned csLen) {
+
+ bool keepGoing = true;
+ if (argIndex < NumDataArgs) {
+ // Consider the argument coverered, even though the specifier doesn't
+ // make sense.
+ CoveredArgs.set(argIndex);
+ }
+ else {
+ // If argIndex exceeds the number of data arguments we
+ // don't issue a warning because that is just a cascade of warnings (and
+ // they may have intended '%%' anyway). We don't want to continue processing
+ // the format string after this point, however, as we will like just get
+ // gibberish when trying to match arguments.
+ keepGoing = false;
+ }
+
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_conversion)
+ << StringRef(csStart, csLen),
+ Loc, /*IsStringLocation*/true,
+ getSpecifierRange(startSpec, specifierLen));
+
+ return keepGoing;
+}
+
+void
+CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc,
+ const char *startSpec,
+ unsigned specifierLen) {
+ EmitFormatDiagnostic(
+ S.PDiag(diag::warn_format_mix_positional_nonpositional_args),
+ Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen));
+}
+
+bool
+CheckFormatHandler::CheckNumArgs(
+ const analyze_format_string::FormatSpecifier &FS,
+ const analyze_format_string::ConversionSpecifier &CS,
+ const char *startSpecifier, unsigned specifierLen, unsigned argIndex) {
+
+ if (argIndex >= NumDataArgs) {
+ PartialDiagnostic PDiag = FS.usesPositionalArg()
+ ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args)
+ << (argIndex+1) << NumDataArgs)
+ : S.PDiag(diag::warn_printf_insufficient_data_args);
+ EmitFormatDiagnostic(
+ PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
+ return false;
+ }
+ return true;
+}
+
+template<typename Range>
+void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag,
+ SourceLocation Loc,
+ bool IsStringLocation,
+ Range StringRange,
+ ArrayRef<FixItHint> FixIt) {
+ EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag,
+ Loc, IsStringLocation, StringRange, FixIt);
+}
+
+/// \brief If the format string is not within the funcion call, emit a note
+/// so that the function call and string are in diagnostic messages.
+///
+/// \param InFunctionCall if true, the format string is within the function
+/// call and only one diagnostic message will be produced. Otherwise, an
+/// extra note will be emitted pointing to location of the format string.
+///
+/// \param ArgumentExpr the expression that is passed as the format string
+/// argument in the function call. Used for getting locations when two
+/// diagnostics are emitted.
+///
+/// \param PDiag the callee should already have provided any strings for the
+/// diagnostic message. This function only adds locations and fixits
+/// to diagnostics.
+///
+/// \param Loc primary location for diagnostic. If two diagnostics are
+/// required, one will be at Loc and a new SourceLocation will be created for
+/// the other one.
+///
+/// \param IsStringLocation if true, Loc points to the format string should be
+/// used for the note. Otherwise, Loc points to the argument list and will
+/// be used with PDiag.
+///
+/// \param StringRange some or all of the string to highlight. This is
+/// templated so it can accept either a CharSourceRange or a SourceRange.
+///
+/// \param FixIt optional fix it hint for the format string.
+template<typename Range>
+void CheckFormatHandler::EmitFormatDiagnostic(Sema &S, bool InFunctionCall,
+ const Expr *ArgumentExpr,
+ PartialDiagnostic PDiag,
+ SourceLocation Loc,
+ bool IsStringLocation,
+ Range StringRange,
+ ArrayRef<FixItHint> FixIt) {
+ if (InFunctionCall) {
+ const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag);
+ D << StringRange;
+ D << FixIt;
+ } else {
+ S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag)
+ << ArgumentExpr->getSourceRange();
+
+ const Sema::SemaDiagnosticBuilder &Note =
+ S.Diag(IsStringLocation ? Loc : StringRange.getBegin(),
+ diag::note_format_string_defined);
+
+ Note << StringRange;
+ Note << FixIt;
+ }
+}
+
+//===--- CHECK: Printf format string checking ------------------------------===//
+
+namespace {
+class CheckPrintfHandler : public CheckFormatHandler {
+ bool ObjCContext;
+public:
+ CheckPrintfHandler(Sema &s, const StringLiteral *fexpr,
+ const Expr *origFormatExpr, unsigned firstDataArg,
+ unsigned numDataArgs, bool isObjC,
+ const char *beg, bool hasVAListArg,
+ ArrayRef<const Expr *> Args,
+ unsigned formatIdx, bool inFunctionCall,
+ Sema::VariadicCallType CallType,
+ llvm::SmallBitVector &CheckedVarArgs)
+ : CheckFormatHandler(s, fexpr, origFormatExpr, firstDataArg,
+ numDataArgs, beg, hasVAListArg, Args,
+ formatIdx, inFunctionCall, CallType, CheckedVarArgs),
+ ObjCContext(isObjC)
+ {}
+
+
+ bool HandleInvalidPrintfConversionSpecifier(
+ const analyze_printf::PrintfSpecifier &FS,
+ const char *startSpecifier,
+ unsigned specifierLen) override;
+
+ bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
+ const char *startSpecifier,
+ unsigned specifierLen) override;
+ bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
+ const char *StartSpecifier,
+ unsigned SpecifierLen,
+ const Expr *E);
+
+ bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k,
+ const char *startSpecifier, unsigned specifierLen);
+ void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS,
+ const analyze_printf::OptionalAmount &Amt,
+ unsigned type,
+ const char *startSpecifier, unsigned specifierLen);
+ void HandleFlag(const analyze_printf::PrintfSpecifier &FS,
+ const analyze_printf::OptionalFlag &flag,
+ const char *startSpecifier, unsigned specifierLen);
+ void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS,
+ const analyze_printf::OptionalFlag &ignoredFlag,
+ const analyze_printf::OptionalFlag &flag,
+ const char *startSpecifier, unsigned specifierLen);
+ bool checkForCStrMembers(const analyze_printf::ArgType &AT,
+ const Expr *E);
+
+ void HandleEmptyObjCModifierFlag(const char *startFlag,
+ unsigned flagLen) override;
+
+ void HandleInvalidObjCModifierFlag(const char *startFlag,
+ unsigned flagLen) override;
+
+ void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart,
+ const char *flagsEnd,
+ const char *conversionPosition)
+ override;
+};
+}
+
+bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier(
+ const analyze_printf::PrintfSpecifier &FS,
+ const char *startSpecifier,
+ unsigned specifierLen) {
+ const analyze_printf::PrintfConversionSpecifier &CS =
+ FS.getConversionSpecifier();
+
+ return HandleInvalidConversionSpecifier(FS.getArgIndex(),
+ getLocationOfByte(CS.getStart()),
+ startSpecifier, specifierLen,
+ CS.getStart(), CS.getLength());
+}
+
+bool CheckPrintfHandler::HandleAmount(
+ const analyze_format_string::OptionalAmount &Amt,
+ unsigned k, const char *startSpecifier,
+ unsigned specifierLen) {
+
+ if (Amt.hasDataArgument()) {
+ if (!HasVAListArg) {
+ unsigned argIndex = Amt.getArgIndex();
+ if (argIndex >= NumDataArgs) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg)
+ << k,
+ getLocationOfByte(Amt.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
+ // Don't do any more checking. We will just emit
+ // spurious errors.
+ return false;
+ }
+
+ // Type check the data argument. It should be an 'int'.
+ // Although not in conformance with C99, we also allow the argument to be
+ // an 'unsigned int' as that is a reasonably safe case. GCC also
+ // doesn't emit a warning for that case.
+ CoveredArgs.set(argIndex);
+ const Expr *Arg = getDataArg(argIndex);
+ if (!Arg)
+ return false;
+
+ QualType T = Arg->getType();
+
+ const analyze_printf::ArgType &AT = Amt.getArgType(S.Context);
+ assert(AT.isValid());
+
+ if (!AT.matchesType(S.Context, T)) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type)
+ << k << AT.getRepresentativeTypeName(S.Context)
+ << T << Arg->getSourceRange(),
+ getLocationOfByte(Amt.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
+ // Don't do any more checking. We will just emit
+ // spurious errors.
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+void CheckPrintfHandler::HandleInvalidAmount(
+ const analyze_printf::PrintfSpecifier &FS,
+ const analyze_printf::OptionalAmount &Amt,
+ unsigned type,
+ const char *startSpecifier,
+ unsigned specifierLen) {
+ const analyze_printf::PrintfConversionSpecifier &CS =
+ FS.getConversionSpecifier();
+
+ FixItHint fixit =
+ Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant
+ ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(),
+ Amt.getConstantLength()))
+ : FixItHint();
+
+ EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount)
+ << type << CS.toString(),
+ getLocationOfByte(Amt.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen),
+ fixit);
+}
+
+void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS,
+ const analyze_printf::OptionalFlag &flag,
+ const char *startSpecifier,
+ unsigned specifierLen) {
+ // Warn about pointless flag with a fixit removal.
+ const analyze_printf::PrintfConversionSpecifier &CS =
+ FS.getConversionSpecifier();
+ EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag)
+ << flag.toString() << CS.toString(),
+ getLocationOfByte(flag.getPosition()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen),
+ FixItHint::CreateRemoval(
+ getSpecifierRange(flag.getPosition(), 1)));
+}
+
+void CheckPrintfHandler::HandleIgnoredFlag(
+ const analyze_printf::PrintfSpecifier &FS,
+ const analyze_printf::OptionalFlag &ignoredFlag,
+ const analyze_printf::OptionalFlag &flag,
+ const char *startSpecifier,
+ unsigned specifierLen) {
+ // Warn about ignored flag with a fixit removal.
+ EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag)
+ << ignoredFlag.toString() << flag.toString(),
+ getLocationOfByte(ignoredFlag.getPosition()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen),
+ FixItHint::CreateRemoval(
+ getSpecifierRange(ignoredFlag.getPosition(), 1)));
+}
+
+// void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc,
+// bool IsStringLocation, Range StringRange,
+// ArrayRef<FixItHint> Fixit = None);
+
+void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag,
+ unsigned flagLen) {
+ // Warn about an empty flag.
+ EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag),
+ getLocationOfByte(startFlag),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startFlag, flagLen));
+}
+
+void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag,
+ unsigned flagLen) {
+ // Warn about an invalid flag.
+ auto Range = getSpecifierRange(startFlag, flagLen);
+ StringRef flag(startFlag, flagLen);
+ EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag,
+ getLocationOfByte(startFlag),
+ /*IsStringLocation*/true,
+ Range, FixItHint::CreateRemoval(Range));
+}
+
+void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion(
+ const char *flagsStart, const char *flagsEnd, const char *conversionPosition) {
+ // Warn about using '[...]' without a '@' conversion.
+ auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1);
+ auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion;
+ EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1),
+ getLocationOfByte(conversionPosition),
+ /*IsStringLocation*/true,
+ Range, FixItHint::CreateRemoval(Range));
+}
+
+// Determines if the specified is a C++ class or struct containing
+// a member with the specified name and kind (e.g. a CXXMethodDecl named
+// "c_str()").
+template<typename MemberKind>
+static llvm::SmallPtrSet<MemberKind*, 1>
+CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) {
+ const RecordType *RT = Ty->getAs<RecordType>();
+ llvm::SmallPtrSet<MemberKind*, 1> Results;
+
+ if (!RT)
+ return Results;
+ const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
+ if (!RD || !RD->getDefinition())
+ return Results;
+
+ LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(),
+ Sema::LookupMemberName);
+ R.suppressDiagnostics();
+
+ // We just need to include all members of the right kind turned up by the
+ // filter, at this point.
+ if (S.LookupQualifiedName(R, RT->getDecl()))
+ for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
+ NamedDecl *decl = (*I)->getUnderlyingDecl();
+ if (MemberKind *FK = dyn_cast<MemberKind>(decl))
+ Results.insert(FK);
+ }
+ return Results;
+}
+
+/// Check if we could call '.c_str()' on an object.
+///
+/// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't
+/// allow the call, or if it would be ambiguous).
+bool Sema::hasCStrMethod(const Expr *E) {
+ typedef llvm::SmallPtrSet<CXXMethodDecl*, 1> MethodSet;
+ MethodSet Results =
+ CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType());
+ for (MethodSet::iterator MI = Results.begin(), ME = Results.end();
+ MI != ME; ++MI)
+ if ((*MI)->getMinRequiredArguments() == 0)
+ return true;
+ return false;
+}
+
+// Check if a (w)string was passed when a (w)char* was needed, and offer a
+// better diagnostic if so. AT is assumed to be valid.
+// Returns true when a c_str() conversion method is found.
+bool CheckPrintfHandler::checkForCStrMembers(
+ const analyze_printf::ArgType &AT, const Expr *E) {
+ typedef llvm::SmallPtrSet<CXXMethodDecl*, 1> MethodSet;
+
+ MethodSet Results =
+ CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType());
+
+ for (MethodSet::iterator MI = Results.begin(), ME = Results.end();
+ MI != ME; ++MI) {
+ const CXXMethodDecl *Method = *MI;
+ if (Method->getMinRequiredArguments() == 0 &&
+ AT.matchesType(S.Context, Method->getReturnType())) {
+ // FIXME: Suggest parens if the expression needs them.
+ SourceLocation EndLoc = S.getLocForEndOfToken(E->getLocEnd());
+ S.Diag(E->getLocStart(), diag::note_printf_c_str)
+ << "c_str()"
+ << FixItHint::CreateInsertion(EndLoc, ".c_str()");
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool
+CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
+ &FS,
+ const char *startSpecifier,
+ unsigned specifierLen) {
+
+ using namespace analyze_format_string;
+ using namespace analyze_printf;
+ const PrintfConversionSpecifier &CS = FS.getConversionSpecifier();
+
+ if (FS.consumesDataArgument()) {
+ if (atFirstArg) {
+ atFirstArg = false;
+ usesPositionalArgs = FS.usesPositionalArg();
+ }
+ else if (usesPositionalArgs != FS.usesPositionalArg()) {
+ HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()),
+ startSpecifier, specifierLen);
+ return false;
+ }
+ }
+
+ // First check if the field width, precision, and conversion specifier
+ // have matching data arguments.
+ if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0,
+ startSpecifier, specifierLen)) {
+ return false;
+ }
+
+ if (!HandleAmount(FS.getPrecision(), /* precision */ 1,
+ startSpecifier, specifierLen)) {
+ return false;
+ }
+
+ if (!CS.consumesDataArgument()) {
+ // FIXME: Technically specifying a precision or field width here
+ // makes no sense. Worth issuing a warning at some point.
+ return true;
+ }
+
+ // Consume the argument.
+ unsigned argIndex = FS.getArgIndex();
+ if (argIndex < NumDataArgs) {
+ // The check to see if the argIndex is valid will come later.
+ // We set the bit here because we may exit early from this
+ // function if we encounter some other error.
+ CoveredArgs.set(argIndex);
+ }
+
+ // FreeBSD kernel extensions.
+ if (CS.getKind() == ConversionSpecifier::FreeBSDbArg ||
+ CS.getKind() == ConversionSpecifier::FreeBSDDArg) {
+ // We need at least two arguments.
+ if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1))
+ return false;
+
+ // Claim the second argument.
+ CoveredArgs.set(argIndex + 1);
+
+ // Type check the first argument (int for %b, pointer for %D)
+ const Expr *Ex = getDataArg(argIndex);
+ const analyze_printf::ArgType &AT =
+ (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ?
+ ArgType(S.Context.IntTy) : ArgType::CPointerTy;
+ if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType()))
+ EmitFormatDiagnostic(
+ S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
+ << AT.getRepresentativeTypeName(S.Context) << Ex->getType()
+ << false << Ex->getSourceRange(),
+ Ex->getLocStart(), /*IsStringLocation*/false,
+ getSpecifierRange(startSpecifier, specifierLen));
+
+ // Type check the second argument (char * for both %b and %D)
+ Ex = getDataArg(argIndex + 1);
+ const analyze_printf::ArgType &AT2 = ArgType::CStrTy;
+ if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType()))
+ EmitFormatDiagnostic(
+ S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
+ << AT2.getRepresentativeTypeName(S.Context) << Ex->getType()
+ << false << Ex->getSourceRange(),
+ Ex->getLocStart(), /*IsStringLocation*/false,
+ getSpecifierRange(startSpecifier, specifierLen));
+
+ return true;
+ }
+
+ // Check for using an Objective-C specific conversion specifier
+ // in a non-ObjC literal.
+ if (!ObjCContext && CS.isObjCArg()) {
+ return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier,
+ specifierLen);
+ }
+
+ // Check for invalid use of field width
+ if (!FS.hasValidFieldWidth()) {
+ HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0,
+ startSpecifier, specifierLen);
+ }
+
+ // Check for invalid use of precision
+ if (!FS.hasValidPrecision()) {
+ HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1,
+ startSpecifier, specifierLen);
+ }
+
+ // Check each flag does not conflict with any other component.
+ if (!FS.hasValidThousandsGroupingPrefix())
+ HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen);
+ if (!FS.hasValidLeadingZeros())
+ HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen);
+ if (!FS.hasValidPlusPrefix())
+ HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen);
+ if (!FS.hasValidSpacePrefix())
+ HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen);
+ if (!FS.hasValidAlternativeForm())
+ HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen);
+ if (!FS.hasValidLeftJustified())
+ HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen);
+
+ // Check that flags are not ignored by another flag
+ if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+'
+ HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(),
+ startSpecifier, specifierLen);
+ if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-'
+ HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(),
+ startSpecifier, specifierLen);
+
+ // Check the length modifier is valid with the given conversion specifier.
+ if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo()))
+ HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
+ diag::warn_format_nonsensical_length);
+ else if (!FS.hasStandardLengthModifier())
+ HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen);
+ else if (!FS.hasStandardLengthConversionCombination())
+ HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
+ diag::warn_format_non_standard_conversion_spec);
+
+ if (!FS.hasStandardConversionSpecifier(S.getLangOpts()))
+ HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen);
+
+ // The remaining checks depend on the data arguments.
+ if (HasVAListArg)
+ return true;
+
+ if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex))
+ return false;
+
+ const Expr *Arg = getDataArg(argIndex);
+ if (!Arg)
+ return true;
+
+ return checkFormatExpr(FS, startSpecifier, specifierLen, Arg);
+}
+
+static bool requiresParensToAddCast(const Expr *E) {
+ // FIXME: We should have a general way to reason about operator
+ // precedence and whether parens are actually needed here.
+ // Take care of a few common cases where they aren't.
+ const Expr *Inside = E->IgnoreImpCasts();
+ if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside))
+ Inside = POE->getSyntacticForm()->IgnoreImpCasts();
+
+ switch (Inside->getStmtClass()) {
+ case Stmt::ArraySubscriptExprClass:
+ case Stmt::CallExprClass:
+ case Stmt::CharacterLiteralClass:
+ case Stmt::CXXBoolLiteralExprClass:
+ case Stmt::DeclRefExprClass:
+ case Stmt::FloatingLiteralClass:
+ case Stmt::IntegerLiteralClass:
+ case Stmt::MemberExprClass:
+ case Stmt::ObjCArrayLiteralClass:
+ case Stmt::ObjCBoolLiteralExprClass:
+ case Stmt::ObjCBoxedExprClass:
+ case Stmt::ObjCDictionaryLiteralClass:
+ case Stmt::ObjCEncodeExprClass:
+ case Stmt::ObjCIvarRefExprClass:
+ case Stmt::ObjCMessageExprClass:
+ case Stmt::ObjCPropertyRefExprClass:
+ case Stmt::ObjCStringLiteralClass:
+ case Stmt::ObjCSubscriptRefExprClass:
+ case Stmt::ParenExprClass:
+ case Stmt::StringLiteralClass:
+ case Stmt::UnaryOperatorClass:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static std::pair<QualType, StringRef>
+shouldNotPrintDirectly(const ASTContext &Context,
+ QualType IntendedTy,
+ const Expr *E) {
+ // Use a 'while' to peel off layers of typedefs.
+ QualType TyTy = IntendedTy;
+ while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) {
+ StringRef Name = UserTy->getDecl()->getName();
+ QualType CastTy = llvm::StringSwitch<QualType>(Name)
+ .Case("NSInteger", Context.LongTy)
+ .Case("NSUInteger", Context.UnsignedLongTy)
+ .Case("SInt32", Context.IntTy)
+ .Case("UInt32", Context.UnsignedIntTy)
+ .Default(QualType());
+
+ if (!CastTy.isNull())
+ return std::make_pair(CastTy, Name);
+
+ TyTy = UserTy->desugar();
+ }
+
+ // Strip parens if necessary.
+ if (const ParenExpr *PE = dyn_cast<ParenExpr>(E))
+ return shouldNotPrintDirectly(Context,
+ PE->getSubExpr()->getType(),
+ PE->getSubExpr());
+
+ // If this is a conditional expression, then its result type is constructed
+ // via usual arithmetic conversions and thus there might be no necessary
+ // typedef sugar there. Recurse to operands to check for NSInteger &
+ // Co. usage condition.
+ if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
+ QualType TrueTy, FalseTy;
+ StringRef TrueName, FalseName;
+
+ std::tie(TrueTy, TrueName) =
+ shouldNotPrintDirectly(Context,
+ CO->getTrueExpr()->getType(),
+ CO->getTrueExpr());
+ std::tie(FalseTy, FalseName) =
+ shouldNotPrintDirectly(Context,
+ CO->getFalseExpr()->getType(),
+ CO->getFalseExpr());
+
+ if (TrueTy == FalseTy)
+ return std::make_pair(TrueTy, TrueName);
+ else if (TrueTy.isNull())
+ return std::make_pair(FalseTy, FalseName);
+ else if (FalseTy.isNull())
+ return std::make_pair(TrueTy, TrueName);
+ }
+
+ return std::make_pair(QualType(), StringRef());
+}
+
+bool
+CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
+ const char *StartSpecifier,
+ unsigned SpecifierLen,
+ const Expr *E) {
+ using namespace analyze_format_string;
+ using namespace analyze_printf;
+ // Now type check the data expression that matches the
+ // format specifier.
+ const analyze_printf::ArgType &AT = FS.getArgType(S.Context,
+ ObjCContext);
+ if (!AT.isValid())
+ return true;
+
+ QualType ExprTy = E->getType();
+ while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) {
+ ExprTy = TET->getUnderlyingExpr()->getType();
+ }
+
+ analyze_printf::ArgType::MatchKind match = AT.matchesType(S.Context, ExprTy);
+
+ if (match == analyze_printf::ArgType::Match) {
+ return true;
+ }
+
+ // Look through argument promotions for our error message's reported type.
+ // This includes the integral and floating promotions, but excludes array
+ // and function pointer decay; seeing that an argument intended to be a
+ // string has type 'char [6]' is probably more confusing than 'char *'.
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
+ if (ICE->getCastKind() == CK_IntegralCast ||
+ ICE->getCastKind() == CK_FloatingCast) {
+ E = ICE->getSubExpr();
+ ExprTy = E->getType();
+
+ // Check if we didn't match because of an implicit cast from a 'char'
+ // or 'short' to an 'int'. This is done because printf is a varargs
+ // function.
+ if (ICE->getType() == S.Context.IntTy ||
+ ICE->getType() == S.Context.UnsignedIntTy) {
+ // All further checking is done on the subexpression.
+ if (AT.matchesType(S.Context, ExprTy))
+ return true;
+ }
+ }
+ } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) {
+ // Special case for 'a', which has type 'int' in C.
+ // Note, however, that we do /not/ want to treat multibyte constants like
+ // 'MooV' as characters! This form is deprecated but still exists.
+ if (ExprTy == S.Context.IntTy)
+ if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue()))
+ ExprTy = S.Context.CharTy;
+ }
+
+ // Look through enums to their underlying type.
+ bool IsEnum = false;
+ if (auto EnumTy = ExprTy->getAs<EnumType>()) {
+ ExprTy = EnumTy->getDecl()->getIntegerType();
+ IsEnum = true;
+ }
+
+ // %C in an Objective-C context prints a unichar, not a wchar_t.
+ // If the argument is an integer of some kind, believe the %C and suggest
+ // a cast instead of changing the conversion specifier.
+ QualType IntendedTy = ExprTy;
+ if (ObjCContext &&
+ FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) {
+ if (ExprTy->isIntegralOrUnscopedEnumerationType() &&
+ !ExprTy->isCharType()) {
+ // 'unichar' is defined as a typedef of unsigned short, but we should
+ // prefer using the typedef if it is visible.
+ IntendedTy = S.Context.UnsignedShortTy;
+
+ // While we are here, check if the value is an IntegerLiteral that happens
+ // to be within the valid range.
+ if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) {
+ const llvm::APInt &V = IL->getValue();
+ if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy))
+ return true;
+ }
+
+ LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getLocStart(),
+ Sema::LookupOrdinaryName);
+ if (S.LookupName(Result, S.getCurScope())) {
+ NamedDecl *ND = Result.getFoundDecl();
+ if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND))
+ if (TD->getUnderlyingType() == IntendedTy)
+ IntendedTy = S.Context.getTypedefType(TD);
+ }
+ }
+ }
+
+ // Special-case some of Darwin's platform-independence types by suggesting
+ // casts to primitive types that are known to be large enough.
+ bool ShouldNotPrintDirectly = false; StringRef CastTyName;
+ if (S.Context.getTargetInfo().getTriple().isOSDarwin()) {
+ QualType CastTy;
+ std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E);
+ if (!CastTy.isNull()) {
+ IntendedTy = CastTy;
+ ShouldNotPrintDirectly = true;
+ }
+ }
+
+ // We may be able to offer a FixItHint if it is a supported type.
+ PrintfSpecifier fixedFS = FS;
+ bool success = fixedFS.fixType(IntendedTy, S.getLangOpts(),
+ S.Context, ObjCContext);
+
+ if (success) {
+ // Get the fix string from the fixed format specifier
+ SmallString<16> buf;
+ llvm::raw_svector_ostream os(buf);
+ fixedFS.toString(os);
+
+ CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen);
+
+ if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) {
+ unsigned diag = diag::warn_format_conversion_argument_type_mismatch;
+ if (match == analyze_format_string::ArgType::NoMatchPedantic) {
+ diag = diag::warn_format_conversion_argument_type_mismatch_pedantic;
+ }
+ // In this case, the specifier is wrong and should be changed to match
+ // the argument.
+ EmitFormatDiagnostic(S.PDiag(diag)
+ << AT.getRepresentativeTypeName(S.Context)
+ << IntendedTy << IsEnum << E->getSourceRange(),
+ E->getLocStart(),
+ /*IsStringLocation*/ false, SpecRange,
+ FixItHint::CreateReplacement(SpecRange, os.str()));
+
+ } else {
+ // The canonical type for formatting this value is different from the
+ // actual type of the expression. (This occurs, for example, with Darwin's
+ // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but
+ // should be printed as 'long' for 64-bit compatibility.)
+ // Rather than emitting a normal format/argument mismatch, we want to
+ // add a cast to the recommended type (and correct the format string
+ // if necessary).
+ SmallString<16> CastBuf;
+ llvm::raw_svector_ostream CastFix(CastBuf);
+ CastFix << "(";
+ IntendedTy.print(CastFix, S.Context.getPrintingPolicy());
+ CastFix << ")";
+
+ SmallVector<FixItHint,4> Hints;
+ if (!AT.matchesType(S.Context, IntendedTy))
+ Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str()));
+
+ if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) {
+ // If there's already a cast present, just replace it.
+ SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc());
+ Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str()));
+
+ } else if (!requiresParensToAddCast(E)) {
+ // If the expression has high enough precedence,
+ // just write the C-style cast.
+ Hints.push_back(FixItHint::CreateInsertion(E->getLocStart(),
+ CastFix.str()));
+ } else {
+ // Otherwise, add parens around the expression as well as the cast.
+ CastFix << "(";
+ Hints.push_back(FixItHint::CreateInsertion(E->getLocStart(),
+ CastFix.str()));
+
+ SourceLocation After = S.getLocForEndOfToken(E->getLocEnd());
+ Hints.push_back(FixItHint::CreateInsertion(After, ")"));
+ }
+
+ if (ShouldNotPrintDirectly) {
+ // The expression has a type that should not be printed directly.
+ // We extract the name from the typedef because we don't want to show
+ // the underlying type in the diagnostic.
+ StringRef Name;
+ if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy))
+ Name = TypedefTy->getDecl()->getName();
+ else
+ Name = CastTyName;
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_argument_needs_cast)
+ << Name << IntendedTy << IsEnum
+ << E->getSourceRange(),
+ E->getLocStart(), /*IsStringLocation=*/false,
+ SpecRange, Hints);
+ } else {
+ // In this case, the expression could be printed using a different
+ // specifier, but we've decided that the specifier is probably correct
+ // and we should cast instead. Just use the normal warning message.
+ EmitFormatDiagnostic(
+ S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
+ << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum
+ << E->getSourceRange(),
+ E->getLocStart(), /*IsStringLocation*/false,
+ SpecRange, Hints);
+ }
+ }
+ } else {
+ const CharSourceRange &CSR = getSpecifierRange(StartSpecifier,
+ SpecifierLen);
+ // Since the warning for passing non-POD types to variadic functions
+ // was deferred until now, we emit a warning for non-POD
+ // arguments here.
+ switch (S.isValidVarArgType(ExprTy)) {
+ case Sema::VAK_Valid:
+ case Sema::VAK_ValidInCXX11: {
+ unsigned diag = diag::warn_format_conversion_argument_type_mismatch;
+ if (match == analyze_printf::ArgType::NoMatchPedantic) {
+ diag = diag::warn_format_conversion_argument_type_mismatch_pedantic;
+ }
+
+ EmitFormatDiagnostic(
+ S.PDiag(diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy
+ << IsEnum << CSR << E->getSourceRange(),
+ E->getLocStart(), /*IsStringLocation*/ false, CSR);
+ break;
+ }
+ case Sema::VAK_Undefined:
+ case Sema::VAK_MSVCUndefined:
+ EmitFormatDiagnostic(
+ S.PDiag(diag::warn_non_pod_vararg_with_format_string)
+ << S.getLangOpts().CPlusPlus11
+ << ExprTy
+ << CallType
+ << AT.getRepresentativeTypeName(S.Context)
+ << CSR
+ << E->getSourceRange(),
+ E->getLocStart(), /*IsStringLocation*/false, CSR);
+ checkForCStrMembers(AT, E);
+ break;
+
+ case Sema::VAK_Invalid:
+ if (ExprTy->isObjCObjectType())
+ EmitFormatDiagnostic(
+ S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format)
+ << S.getLangOpts().CPlusPlus11
+ << ExprTy
+ << CallType
+ << AT.getRepresentativeTypeName(S.Context)
+ << CSR
+ << E->getSourceRange(),
+ E->getLocStart(), /*IsStringLocation*/false, CSR);
+ else
+ // FIXME: If this is an initializer list, suggest removing the braces
+ // or inserting a cast to the target type.
+ S.Diag(E->getLocStart(), diag::err_cannot_pass_to_vararg_format)
+ << isa<InitListExpr>(E) << ExprTy << CallType
+ << AT.getRepresentativeTypeName(S.Context)
+ << E->getSourceRange();
+ break;
+ }
+
+ assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() &&
+ "format string specifier index out of range");
+ CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true;
+ }
+
+ return true;
+}
+
+//===--- CHECK: Scanf format string checking ------------------------------===//
+
+namespace {
+class CheckScanfHandler : public CheckFormatHandler {
+public:
+ CheckScanfHandler(Sema &s, const StringLiteral *fexpr,
+ const Expr *origFormatExpr, unsigned firstDataArg,
+ unsigned numDataArgs, const char *beg, bool hasVAListArg,
+ ArrayRef<const Expr *> Args,
+ unsigned formatIdx, bool inFunctionCall,
+ Sema::VariadicCallType CallType,
+ llvm::SmallBitVector &CheckedVarArgs)
+ : CheckFormatHandler(s, fexpr, origFormatExpr, firstDataArg,
+ numDataArgs, beg, hasVAListArg,
+ Args, formatIdx, inFunctionCall, CallType,
+ CheckedVarArgs)
+ {}
+
+ bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS,
+ const char *startSpecifier,
+ unsigned specifierLen) override;
+
+ bool HandleInvalidScanfConversionSpecifier(
+ const analyze_scanf::ScanfSpecifier &FS,
+ const char *startSpecifier,
+ unsigned specifierLen) override;
+
+ void HandleIncompleteScanList(const char *start, const char *end) override;
+};
+}
+
+void CheckScanfHandler::HandleIncompleteScanList(const char *start,
+ const char *end) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete),
+ getLocationOfByte(end), /*IsStringLocation*/true,
+ getSpecifierRange(start, end - start));
+}
+
+bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier(
+ const analyze_scanf::ScanfSpecifier &FS,
+ const char *startSpecifier,
+ unsigned specifierLen) {
+
+ const analyze_scanf::ScanfConversionSpecifier &CS =
+ FS.getConversionSpecifier();
+
+ return HandleInvalidConversionSpecifier(FS.getArgIndex(),
+ getLocationOfByte(CS.getStart()),
+ startSpecifier, specifierLen,
+ CS.getStart(), CS.getLength());
+}
+
+bool CheckScanfHandler::HandleScanfSpecifier(
+ const analyze_scanf::ScanfSpecifier &FS,
+ const char *startSpecifier,
+ unsigned specifierLen) {
+
+ using namespace analyze_scanf;
+ using namespace analyze_format_string;
+
+ const ScanfConversionSpecifier &CS = FS.getConversionSpecifier();
+
+ // Handle case where '%' and '*' don't consume an argument. These shouldn't
+ // be used to decide if we are using positional arguments consistently.
+ if (FS.consumesDataArgument()) {
+ if (atFirstArg) {
+ atFirstArg = false;
+ usesPositionalArgs = FS.usesPositionalArg();
+ }
+ else if (usesPositionalArgs != FS.usesPositionalArg()) {
+ HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()),
+ startSpecifier, specifierLen);
+ return false;
+ }
+ }
+
+ // Check if the field with is non-zero.
+ const OptionalAmount &Amt = FS.getFieldWidth();
+ if (Amt.getHowSpecified() == OptionalAmount::Constant) {
+ if (Amt.getConstantAmount() == 0) {
+ const CharSourceRange &R = getSpecifierRange(Amt.getStart(),
+ Amt.getConstantLength());
+ EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width),
+ getLocationOfByte(Amt.getStart()),
+ /*IsStringLocation*/true, R,
+ FixItHint::CreateRemoval(R));
+ }
+ }
+
+ if (!FS.consumesDataArgument()) {
+ // FIXME: Technically specifying a precision or field width here
+ // makes no sense. Worth issuing a warning at some point.
+ return true;
+ }
+
+ // Consume the argument.
+ unsigned argIndex = FS.getArgIndex();
+ if (argIndex < NumDataArgs) {
+ // The check to see if the argIndex is valid will come later.
+ // We set the bit here because we may exit early from this
+ // function if we encounter some other error.
+ CoveredArgs.set(argIndex);
+ }
+
+ // Check the length modifier is valid with the given conversion specifier.
+ if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo()))
+ HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
+ diag::warn_format_nonsensical_length);
+ else if (!FS.hasStandardLengthModifier())
+ HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen);
+ else if (!FS.hasStandardLengthConversionCombination())
+ HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
+ diag::warn_format_non_standard_conversion_spec);
+
+ if (!FS.hasStandardConversionSpecifier(S.getLangOpts()))
+ HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen);
+
+ // The remaining checks depend on the data arguments.
+ if (HasVAListArg)
+ return true;
+
+ if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex))
+ return false;
+
+ // Check that the argument type matches the format specifier.
+ const Expr *Ex = getDataArg(argIndex);
+ if (!Ex)
+ return true;
+
+ const analyze_format_string::ArgType &AT = FS.getArgType(S.Context);
+
+ if (!AT.isValid()) {
+ return true;
+ }
+
+ analyze_format_string::ArgType::MatchKind match =
+ AT.matchesType(S.Context, Ex->getType());
+ if (match == analyze_format_string::ArgType::Match) {
+ return true;
+ }
+
+ ScanfSpecifier fixedFS = FS;
+ bool success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(),
+ S.getLangOpts(), S.Context);
+
+ unsigned diag = diag::warn_format_conversion_argument_type_mismatch;
+ if (match == analyze_format_string::ArgType::NoMatchPedantic) {
+ diag = diag::warn_format_conversion_argument_type_mismatch_pedantic;
+ }
+
+ if (success) {
+ // Get the fix string from the fixed format specifier.
+ SmallString<128> buf;
+ llvm::raw_svector_ostream os(buf);
+ fixedFS.toString(os);
+
+ EmitFormatDiagnostic(
+ S.PDiag(diag) << AT.getRepresentativeTypeName(S.Context)
+ << Ex->getType() << false << Ex->getSourceRange(),
+ Ex->getLocStart(),
+ /*IsStringLocation*/ false,
+ getSpecifierRange(startSpecifier, specifierLen),
+ FixItHint::CreateReplacement(
+ getSpecifierRange(startSpecifier, specifierLen), os.str()));
+ } else {
+ EmitFormatDiagnostic(S.PDiag(diag)
+ << AT.getRepresentativeTypeName(S.Context)
+ << Ex->getType() << false << Ex->getSourceRange(),
+ Ex->getLocStart(),
+ /*IsStringLocation*/ false,
+ getSpecifierRange(startSpecifier, specifierLen));
+ }
+
+ return true;
+}
+
+void Sema::CheckFormatString(const StringLiteral *FExpr,
+ const Expr *OrigFormatExpr,
+ ArrayRef<const Expr *> Args,
+ bool HasVAListArg, unsigned format_idx,
+ unsigned firstDataArg, FormatStringType Type,
+ bool inFunctionCall, VariadicCallType CallType,
+ llvm::SmallBitVector &CheckedVarArgs) {
+
+ // CHECK: is the format string a wide literal?
+ if (!FExpr->isAscii() && !FExpr->isUTF8()) {
+ CheckFormatHandler::EmitFormatDiagnostic(
+ *this, inFunctionCall, Args[format_idx],
+ PDiag(diag::warn_format_string_is_wide_literal), FExpr->getLocStart(),
+ /*IsStringLocation*/true, OrigFormatExpr->getSourceRange());
+ return;
+ }
+
+ // Str - The format string. NOTE: this is NOT null-terminated!
+ StringRef StrRef = FExpr->getString();
+ const char *Str = StrRef.data();
+ // Account for cases where the string literal is truncated in a declaration.
+ const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType());
+ assert(T && "String literal not of constant array type!");
+ size_t TypeSize = T->getSize().getZExtValue();
+ size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size());
+ const unsigned numDataArgs = Args.size() - firstDataArg;
+
+ // Emit a warning if the string literal is truncated and does not contain an
+ // embedded null character.
+ if (TypeSize <= StrRef.size() &&
+ StrRef.substr(0, TypeSize).find('\0') == StringRef::npos) {
+ CheckFormatHandler::EmitFormatDiagnostic(
+ *this, inFunctionCall, Args[format_idx],
+ PDiag(diag::warn_printf_format_string_not_null_terminated),
+ FExpr->getLocStart(),
+ /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange());
+ return;
+ }
+
+ // CHECK: empty format string?
+ if (StrLen == 0 && numDataArgs > 0) {
+ CheckFormatHandler::EmitFormatDiagnostic(
+ *this, inFunctionCall, Args[format_idx],
+ PDiag(diag::warn_empty_format_string), FExpr->getLocStart(),
+ /*IsStringLocation*/true, OrigFormatExpr->getSourceRange());
+ return;
+ }
+
+ if (Type == FST_Printf || Type == FST_NSString ||
+ Type == FST_FreeBSDKPrintf || Type == FST_OSTrace) {
+ CheckPrintfHandler H(*this, FExpr, OrigFormatExpr, firstDataArg,
+ numDataArgs, (Type == FST_NSString || Type == FST_OSTrace),
+ Str, HasVAListArg, Args, format_idx,
+ inFunctionCall, CallType, CheckedVarArgs);
+
+ if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen,
+ getLangOpts(),
+ Context.getTargetInfo(),
+ Type == FST_FreeBSDKPrintf))
+ H.DoneProcessing();
+ } else if (Type == FST_Scanf) {
+ CheckScanfHandler H(*this, FExpr, OrigFormatExpr, firstDataArg, numDataArgs,
+ Str, HasVAListArg, Args, format_idx,
+ inFunctionCall, CallType, CheckedVarArgs);
+
+ if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen,
+ getLangOpts(),
+ Context.getTargetInfo()))
+ H.DoneProcessing();
+ } // TODO: handle other formats
+}
+
+bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) {
+ // Str - The format string. NOTE: this is NOT null-terminated!
+ StringRef StrRef = FExpr->getString();
+ const char *Str = StrRef.data();
+ // Account for cases where the string literal is truncated in a declaration.
+ const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType());
+ assert(T && "String literal not of constant array type!");
+ size_t TypeSize = T->getSize().getZExtValue();
+ size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size());
+ return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen,
+ getLangOpts(),
+ Context.getTargetInfo());
+}
+
+//===--- CHECK: Warn on use of wrong absolute value function. -------------===//
+
+// Returns the related absolute value function that is larger, of 0 if one
+// does not exist.
+static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) {
+ switch (AbsFunction) {
+ default:
+ return 0;
+
+ case Builtin::BI__builtin_abs:
+ return Builtin::BI__builtin_labs;
+ case Builtin::BI__builtin_labs:
+ return Builtin::BI__builtin_llabs;
+ case Builtin::BI__builtin_llabs:
+ return 0;
+
+ case Builtin::BI__builtin_fabsf:
+ return Builtin::BI__builtin_fabs;
+ case Builtin::BI__builtin_fabs:
+ return Builtin::BI__builtin_fabsl;
+ case Builtin::BI__builtin_fabsl:
+ return 0;
+
+ case Builtin::BI__builtin_cabsf:
+ return Builtin::BI__builtin_cabs;
+ case Builtin::BI__builtin_cabs:
+ return Builtin::BI__builtin_cabsl;
+ case Builtin::BI__builtin_cabsl:
+ return 0;
+
+ case Builtin::BIabs:
+ return Builtin::BIlabs;
+ case Builtin::BIlabs:
+ return Builtin::BIllabs;
+ case Builtin::BIllabs:
+ return 0;
+
+ case Builtin::BIfabsf:
+ return Builtin::BIfabs;
+ case Builtin::BIfabs:
+ return Builtin::BIfabsl;
+ case Builtin::BIfabsl:
+ return 0;
+
+ case Builtin::BIcabsf:
+ return Builtin::BIcabs;
+ case Builtin::BIcabs:
+ return Builtin::BIcabsl;
+ case Builtin::BIcabsl:
+ return 0;
+ }
+}
+
+// Returns the argument type of the absolute value function.
+static QualType getAbsoluteValueArgumentType(ASTContext &Context,
+ unsigned AbsType) {
+ if (AbsType == 0)
+ return QualType();
+
+ ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None;
+ QualType BuiltinType = Context.GetBuiltinType(AbsType, Error);
+ if (Error != ASTContext::GE_None)
+ return QualType();
+
+ const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>();
+ if (!FT)
+ return QualType();
+
+ if (FT->getNumParams() != 1)
+ return QualType();
+
+ return FT->getParamType(0);
+}
+
+// Returns the best absolute value function, or zero, based on type and
+// current absolute value function.
+static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType,
+ unsigned AbsFunctionKind) {
+ unsigned BestKind = 0;
+ uint64_t ArgSize = Context.getTypeSize(ArgType);
+ for (unsigned Kind = AbsFunctionKind; Kind != 0;
+ Kind = getLargerAbsoluteValueFunction(Kind)) {
+ QualType ParamType = getAbsoluteValueArgumentType(Context, Kind);
+ if (Context.getTypeSize(ParamType) >= ArgSize) {
+ if (BestKind == 0)
+ BestKind = Kind;
+ else if (Context.hasSameType(ParamType, ArgType)) {
+ BestKind = Kind;
+ break;
+ }
+ }
+ }
+ return BestKind;
+}
+
+enum AbsoluteValueKind {
+ AVK_Integer,
+ AVK_Floating,
+ AVK_Complex
+};
+
+static AbsoluteValueKind getAbsoluteValueKind(QualType T) {
+ if (T->isIntegralOrEnumerationType())
+ return AVK_Integer;
+ if (T->isRealFloatingType())
+ return AVK_Floating;
+ if (T->isAnyComplexType())
+ return AVK_Complex;
+
+ llvm_unreachable("Type not integer, floating, or complex");
+}
+
+// Changes the absolute value function to a different type. Preserves whether
+// the function is a builtin.
+static unsigned changeAbsFunction(unsigned AbsKind,
+ AbsoluteValueKind ValueKind) {
+ switch (ValueKind) {
+ case AVK_Integer:
+ switch (AbsKind) {
+ default:
+ return 0;
+ case Builtin::BI__builtin_fabsf:
+ case Builtin::BI__builtin_fabs:
+ case Builtin::BI__builtin_fabsl:
+ case Builtin::BI__builtin_cabsf:
+ case Builtin::BI__builtin_cabs:
+ case Builtin::BI__builtin_cabsl:
+ return Builtin::BI__builtin_abs;
+ case Builtin::BIfabsf:
+ case Builtin::BIfabs:
+ case Builtin::BIfabsl:
+ case Builtin::BIcabsf:
+ case Builtin::BIcabs:
+ case Builtin::BIcabsl:
+ return Builtin::BIabs;
+ }
+ case AVK_Floating:
+ switch (AbsKind) {
+ default:
+ return 0;
+ case Builtin::BI__builtin_abs:
+ case Builtin::BI__builtin_labs:
+ case Builtin::BI__builtin_llabs:
+ case Builtin::BI__builtin_cabsf:
+ case Builtin::BI__builtin_cabs:
+ case Builtin::BI__builtin_cabsl:
+ return Builtin::BI__builtin_fabsf;
+ case Builtin::BIabs:
+ case Builtin::BIlabs:
+ case Builtin::BIllabs:
+ case Builtin::BIcabsf:
+ case Builtin::BIcabs:
+ case Builtin::BIcabsl:
+ return Builtin::BIfabsf;
+ }
+ case AVK_Complex:
+ switch (AbsKind) {
+ default:
+ return 0;
+ case Builtin::BI__builtin_abs:
+ case Builtin::BI__builtin_labs:
+ case Builtin::BI__builtin_llabs:
+ case Builtin::BI__builtin_fabsf:
+ case Builtin::BI__builtin_fabs:
+ case Builtin::BI__builtin_fabsl:
+ return Builtin::BI__builtin_cabsf;
+ case Builtin::BIabs:
+ case Builtin::BIlabs:
+ case Builtin::BIllabs:
+ case Builtin::BIfabsf:
+ case Builtin::BIfabs:
+ case Builtin::BIfabsl:
+ return Builtin::BIcabsf;
+ }
+ }
+ llvm_unreachable("Unable to convert function");
+}
+
+static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) {
+ const IdentifierInfo *FnInfo = FDecl->getIdentifier();
+ if (!FnInfo)
+ return 0;
+
+ switch (FDecl->getBuiltinID()) {
+ default:
+ return 0;
+ case Builtin::BI__builtin_abs:
+ case Builtin::BI__builtin_fabs:
+ case Builtin::BI__builtin_fabsf:
+ case Builtin::BI__builtin_fabsl:
+ case Builtin::BI__builtin_labs:
+ case Builtin::BI__builtin_llabs:
+ case Builtin::BI__builtin_cabs:
+ case Builtin::BI__builtin_cabsf:
+ case Builtin::BI__builtin_cabsl:
+ case Builtin::BIabs:
+ case Builtin::BIlabs:
+ case Builtin::BIllabs:
+ case Builtin::BIfabs:
+ case Builtin::BIfabsf:
+ case Builtin::BIfabsl:
+ case Builtin::BIcabs:
+ case Builtin::BIcabsf:
+ case Builtin::BIcabsl:
+ return FDecl->getBuiltinID();
+ }
+ llvm_unreachable("Unknown Builtin type");
+}
+
+// If the replacement is valid, emit a note with replacement function.
+// Additionally, suggest including the proper header if not already included.
+static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range,
+ unsigned AbsKind, QualType ArgType) {
+ bool EmitHeaderHint = true;
+ const char *HeaderName = nullptr;
+ const char *FunctionName = nullptr;
+ if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) {
+ FunctionName = "std::abs";
+ if (ArgType->isIntegralOrEnumerationType()) {
+ HeaderName = "cstdlib";
+ } else if (ArgType->isRealFloatingType()) {
+ HeaderName = "cmath";
+ } else {
+ llvm_unreachable("Invalid Type");
+ }
+
+ // Lookup all std::abs
+ if (NamespaceDecl *Std = S.getStdNamespace()) {
+ LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName);
+ R.suppressDiagnostics();
+ S.LookupQualifiedName(R, Std);
+
+ for (const auto *I : R) {
+ const FunctionDecl *FDecl = nullptr;
+ if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) {
+ FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl());
+ } else {
+ FDecl = dyn_cast<FunctionDecl>(I);
+ }
+ if (!FDecl)
+ continue;
+
+ // Found std::abs(), check that they are the right ones.
+ if (FDecl->getNumParams() != 1)
+ continue;
+
+ // Check that the parameter type can handle the argument.
+ QualType ParamType = FDecl->getParamDecl(0)->getType();
+ if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) &&
+ S.Context.getTypeSize(ArgType) <=
+ S.Context.getTypeSize(ParamType)) {
+ // Found a function, don't need the header hint.
+ EmitHeaderHint = false;
+ break;
+ }
+ }
+ }
+ } else {
+ FunctionName = S.Context.BuiltinInfo.getName(AbsKind);
+ HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind);
+
+ if (HeaderName) {
+ DeclarationName DN(&S.Context.Idents.get(FunctionName));
+ LookupResult R(S, DN, Loc, Sema::LookupAnyName);
+ R.suppressDiagnostics();
+ S.LookupName(R, S.getCurScope());
+
+ if (R.isSingleResult()) {
+ FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl());
+ if (FD && FD->getBuiltinID() == AbsKind) {
+ EmitHeaderHint = false;
+ } else {
+ return;
+ }
+ } else if (!R.empty()) {
+ return;
+ }
+ }
+ }
+
+ S.Diag(Loc, diag::note_replace_abs_function)
+ << FunctionName << FixItHint::CreateReplacement(Range, FunctionName);
+
+ if (!HeaderName)
+ return;
+
+ if (!EmitHeaderHint)
+ return;
+
+ S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName
+ << FunctionName;
+}
+
+static bool IsFunctionStdAbs(const FunctionDecl *FDecl) {
+ if (!FDecl)
+ return false;
+
+ if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr("abs"))
+ return false;
+
+ const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(FDecl->getDeclContext());
+
+ while (ND && ND->isInlineNamespace()) {
+ ND = dyn_cast<NamespaceDecl>(ND->getDeclContext());
+ }
+
+ if (!ND || !ND->getIdentifier() || !ND->getIdentifier()->isStr("std"))
+ return false;
+
+ if (!isa<TranslationUnitDecl>(ND->getDeclContext()))
+ return false;
+
+ return true;
+}
+
+// Warn when using the wrong abs() function.
+void Sema::CheckAbsoluteValueFunction(const CallExpr *Call,
+ const FunctionDecl *FDecl,
+ IdentifierInfo *FnInfo) {
+ if (Call->getNumArgs() != 1)
+ return;
+
+ unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl);
+ bool IsStdAbs = IsFunctionStdAbs(FDecl);
+ if (AbsKind == 0 && !IsStdAbs)
+ return;
+
+ QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType();
+ QualType ParamType = Call->getArg(0)->getType();
+
+ // Unsigned types cannot be negative. Suggest removing the absolute value
+ // function call.
+ if (ArgType->isUnsignedIntegerType()) {
+ const char *FunctionName =
+ IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind);
+ Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType;
+ Diag(Call->getExprLoc(), diag::note_remove_abs)
+ << FunctionName
+ << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange());
+ return;
+ }
+
+ // Taking the absolute value of a pointer is very suspicious, they probably
+ // wanted to index into an array, dereference a pointer, call a function, etc.
+ if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) {
+ unsigned DiagType = 0;
+ if (ArgType->isFunctionType())
+ DiagType = 1;
+ else if (ArgType->isArrayType())
+ DiagType = 2;
+
+ Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType;
+ return;
+ }
+
+ // std::abs has overloads which prevent most of the absolute value problems
+ // from occurring.
+ if (IsStdAbs)
+ return;
+
+ AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType);
+ AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType);
+
+ // The argument and parameter are the same kind. Check if they are the right
+ // size.
+ if (ArgValueKind == ParamValueKind) {
+ if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType))
+ return;
+
+ unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind);
+ Diag(Call->getExprLoc(), diag::warn_abs_too_small)
+ << FDecl << ArgType << ParamType;
+
+ if (NewAbsKind == 0)
+ return;
+
+ emitReplacement(*this, Call->getExprLoc(),
+ Call->getCallee()->getSourceRange(), NewAbsKind, ArgType);
+ return;
+ }
+
+ // ArgValueKind != ParamValueKind
+ // The wrong type of absolute value function was used. Attempt to find the
+ // proper one.
+ unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind);
+ NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind);
+ if (NewAbsKind == 0)
+ return;
+
+ Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type)
+ << FDecl << ParamValueKind << ArgValueKind;
+
+ emitReplacement(*this, Call->getExprLoc(),
+ Call->getCallee()->getSourceRange(), NewAbsKind, ArgType);
+ return;
+}
+
+//===--- CHECK: Standard memory functions ---------------------------------===//
+
+/// \brief Takes the expression passed to the size_t parameter of functions
+/// such as memcmp, strncat, etc and warns if it's a comparison.
+///
+/// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`.
+static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E,
+ IdentifierInfo *FnName,
+ SourceLocation FnLoc,
+ SourceLocation RParenLoc) {
+ const BinaryOperator *Size = dyn_cast<BinaryOperator>(E);
+ if (!Size)
+ return false;
+
+ // if E is binop and op is >, <, >=, <=, ==, &&, ||:
+ if (!Size->isComparisonOp() && !Size->isEqualityOp() && !Size->isLogicalOp())
+ return false;
+
+ SourceRange SizeRange = Size->getSourceRange();
+ S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison)
+ << SizeRange << FnName;
+ S.Diag(FnLoc, diag::note_memsize_comparison_paren)
+ << FnName << FixItHint::CreateInsertion(
+ S.getLocForEndOfToken(Size->getLHS()->getLocEnd()), ")")
+ << FixItHint::CreateRemoval(RParenLoc);
+ S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence)
+ << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(")
+ << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()),
+ ")");
+
+ return true;
+}
+
+/// \brief Determine whether the given type is or contains a dynamic class type
+/// (e.g., whether it has a vtable).
+static const CXXRecordDecl *getContainedDynamicClass(QualType T,
+ bool &IsContained) {
+ // Look through array types while ignoring qualifiers.
+ const Type *Ty = T->getBaseElementTypeUnsafe();
+ IsContained = false;
+
+ const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
+ RD = RD ? RD->getDefinition() : nullptr;
+ if (!RD)
+ return nullptr;
+
+ if (RD->isDynamicClass())
+ return RD;
+
+ // Check all the fields. If any bases were dynamic, the class is dynamic.
+ // It's impossible for a class to transitively contain itself by value, so
+ // infinite recursion is impossible.
+ for (auto *FD : RD->fields()) {
+ bool SubContained;
+ if (const CXXRecordDecl *ContainedRD =
+ getContainedDynamicClass(FD->getType(), SubContained)) {
+ IsContained = true;
+ return ContainedRD;
+ }
+ }
+
+ return nullptr;
+}
+
+/// \brief If E is a sizeof expression, returns its argument expression,
+/// otherwise returns NULL.
+static const Expr *getSizeOfExprArg(const Expr *E) {
+ if (const UnaryExprOrTypeTraitExpr *SizeOf =
+ dyn_cast<UnaryExprOrTypeTraitExpr>(E))
+ if (SizeOf->getKind() == clang::UETT_SizeOf && !SizeOf->isArgumentType())
+ return SizeOf->getArgumentExpr()->IgnoreParenImpCasts();
+
+ return nullptr;
+}
+
+/// \brief If E is a sizeof expression, returns its argument type.
+static QualType getSizeOfArgType(const Expr *E) {
+ if (const UnaryExprOrTypeTraitExpr *SizeOf =
+ dyn_cast<UnaryExprOrTypeTraitExpr>(E))
+ if (SizeOf->getKind() == clang::UETT_SizeOf)
+ return SizeOf->getTypeOfArgument();
+
+ return QualType();
+}
+
+/// \brief Check for dangerous or invalid arguments to memset().
+///
+/// This issues warnings on known problematic, dangerous or unspecified
+/// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp'
+/// function calls.
+///
+/// \param Call The call expression to diagnose.
+void Sema::CheckMemaccessArguments(const CallExpr *Call,
+ unsigned BId,
+ IdentifierInfo *FnName) {
+ assert(BId != 0);
+
+ // It is possible to have a non-standard definition of memset. Validate
+ // we have enough arguments, and if not, abort further checking.
+ unsigned ExpectedNumArgs = (BId == Builtin::BIstrndup ? 2 : 3);
+ if (Call->getNumArgs() < ExpectedNumArgs)
+ return;
+
+ unsigned LastArg = (BId == Builtin::BImemset ||
+ BId == Builtin::BIstrndup ? 1 : 2);
+ unsigned LenArg = (BId == Builtin::BIstrndup ? 1 : 2);
+ const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts();
+
+ if (CheckMemorySizeofForComparison(*this, LenExpr, FnName,
+ Call->getLocStart(), Call->getRParenLoc()))
+ return;
+
+ // We have special checking when the length is a sizeof expression.
+ QualType SizeOfArgTy = getSizeOfArgType(LenExpr);
+ const Expr *SizeOfArg = getSizeOfExprArg(LenExpr);
+ llvm::FoldingSetNodeID SizeOfArgID;
+
+ for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) {
+ const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts();
+ SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange();
+
+ QualType DestTy = Dest->getType();
+ QualType PointeeTy;
+ if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) {
+ PointeeTy = DestPtrTy->getPointeeType();
+
+ // Never warn about void type pointers. This can be used to suppress
+ // false positives.
+ if (PointeeTy->isVoidType())
+ continue;
+
+ // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by
+ // actually comparing the expressions for equality. Because computing the
+ // expression IDs can be expensive, we only do this if the diagnostic is
+ // enabled.
+ if (SizeOfArg &&
+ !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess,
+ SizeOfArg->getExprLoc())) {
+ // We only compute IDs for expressions if the warning is enabled, and
+ // cache the sizeof arg's ID.
+ if (SizeOfArgID == llvm::FoldingSetNodeID())
+ SizeOfArg->Profile(SizeOfArgID, Context, true);
+ llvm::FoldingSetNodeID DestID;
+ Dest->Profile(DestID, Context, true);
+ if (DestID == SizeOfArgID) {
+ // TODO: For strncpy() and friends, this could suggest sizeof(dst)
+ // over sizeof(src) as well.
+ unsigned ActionIdx = 0; // Default is to suggest dereferencing.
+ StringRef ReadableName = FnName->getName();
+
+ if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest))
+ if (UnaryOp->getOpcode() == UO_AddrOf)
+ ActionIdx = 1; // If its an address-of operator, just remove it.
+ if (!PointeeTy->isIncompleteType() &&
+ (Context.getTypeSize(PointeeTy) == Context.getCharWidth()))
+ ActionIdx = 2; // If the pointee's size is sizeof(char),
+ // suggest an explicit length.
+
+ // If the function is defined as a builtin macro, do not show macro
+ // expansion.
+ SourceLocation SL = SizeOfArg->getExprLoc();
+ SourceRange DSR = Dest->getSourceRange();
+ SourceRange SSR = SizeOfArg->getSourceRange();
+ SourceManager &SM = getSourceManager();
+
+ if (SM.isMacroArgExpansion(SL)) {
+ ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts);
+ SL = SM.getSpellingLoc(SL);
+ DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()),
+ SM.getSpellingLoc(DSR.getEnd()));
+ SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()),
+ SM.getSpellingLoc(SSR.getEnd()));
+ }
+
+ DiagRuntimeBehavior(SL, SizeOfArg,
+ PDiag(diag::warn_sizeof_pointer_expr_memaccess)
+ << ReadableName
+ << PointeeTy
+ << DestTy
+ << DSR
+ << SSR);
+ DiagRuntimeBehavior(SL, SizeOfArg,
+ PDiag(diag::warn_sizeof_pointer_expr_memaccess_note)
+ << ActionIdx
+ << SSR);
+
+ break;
+ }
+ }
+
+ // Also check for cases where the sizeof argument is the exact same
+ // type as the memory argument, and where it points to a user-defined
+ // record type.
+ if (SizeOfArgTy != QualType()) {
+ if (PointeeTy->isRecordType() &&
+ Context.typesAreCompatible(SizeOfArgTy, DestTy)) {
+ DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest,
+ PDiag(diag::warn_sizeof_pointer_type_memaccess)
+ << FnName << SizeOfArgTy << ArgIdx
+ << PointeeTy << Dest->getSourceRange()
+ << LenExpr->getSourceRange());
+ break;
+ }
+ }
+ } else if (DestTy->isArrayType()) {
+ PointeeTy = DestTy;
+ }
+
+ if (PointeeTy == QualType())
+ continue;
+
+ // Always complain about dynamic classes.
+ bool IsContained;
+ if (const CXXRecordDecl *ContainedRD =
+ getContainedDynamicClass(PointeeTy, IsContained)) {
+
+ unsigned OperationType = 0;
+ // "overwritten" if we're warning about the destination for any call
+ // but memcmp; otherwise a verb appropriate to the call.
+ if (ArgIdx != 0 || BId == Builtin::BImemcmp) {
+ if (BId == Builtin::BImemcpy)
+ OperationType = 1;
+ else if(BId == Builtin::BImemmove)
+ OperationType = 2;
+ else if (BId == Builtin::BImemcmp)
+ OperationType = 3;
+ }
+
+ DiagRuntimeBehavior(
+ Dest->getExprLoc(), Dest,
+ PDiag(diag::warn_dyn_class_memaccess)
+ << (BId == Builtin::BImemcmp ? ArgIdx + 2 : ArgIdx)
+ << FnName << IsContained << ContainedRD << OperationType
+ << Call->getCallee()->getSourceRange());
+ } else if (PointeeTy.hasNonTrivialObjCLifetime() &&
+ BId != Builtin::BImemset)
+ DiagRuntimeBehavior(
+ Dest->getExprLoc(), Dest,
+ PDiag(diag::warn_arc_object_memaccess)
+ << ArgIdx << FnName << PointeeTy
+ << Call->getCallee()->getSourceRange());
+ else
+ continue;
+
+ DiagRuntimeBehavior(
+ Dest->getExprLoc(), Dest,
+ PDiag(diag::note_bad_memaccess_silence)
+ << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)"));
+ break;
+ }
+
+}
+
+// A little helper routine: ignore addition and subtraction of integer literals.
+// This intentionally does not ignore all integer constant expressions because
+// we don't want to remove sizeof().
+static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) {
+ Ex = Ex->IgnoreParenCasts();
+
+ for (;;) {
+ const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex);
+ if (!BO || !BO->isAdditiveOp())
+ break;
+
+ const Expr *RHS = BO->getRHS()->IgnoreParenCasts();
+ const Expr *LHS = BO->getLHS()->IgnoreParenCasts();
+
+ if (isa<IntegerLiteral>(RHS))
+ Ex = LHS;
+ else if (isa<IntegerLiteral>(LHS))
+ Ex = RHS;
+ else
+ break;
+ }
+
+ return Ex;
+}
+
+static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty,
+ ASTContext &Context) {
+ // Only handle constant-sized or VLAs, but not flexible members.
+ if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) {
+ // Only issue the FIXIT for arrays of size > 1.
+ if (CAT->getSize().getSExtValue() <= 1)
+ return false;
+ } else if (!Ty->isVariableArrayType()) {
+ return false;
+ }
+ return true;
+}
+
+// Warn if the user has made the 'size' argument to strlcpy or strlcat
+// be the size of the source, instead of the destination.
+void Sema::CheckStrlcpycatArguments(const CallExpr *Call,
+ IdentifierInfo *FnName) {
+
+ // Don't crash if the user has the wrong number of arguments
+ unsigned NumArgs = Call->getNumArgs();
+ if ((NumArgs != 3) && (NumArgs != 4))
+ return;
+
+ const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context);
+ const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context);
+ const Expr *CompareWithSrc = nullptr;
+
+ if (CheckMemorySizeofForComparison(*this, SizeArg, FnName,
+ Call->getLocStart(), Call->getRParenLoc()))
+ return;
+
+ // Look for 'strlcpy(dst, x, sizeof(x))'
+ if (const Expr *Ex = getSizeOfExprArg(SizeArg))
+ CompareWithSrc = Ex;
+ else {
+ // Look for 'strlcpy(dst, x, strlen(x))'
+ if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) {
+ if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen &&
+ SizeCall->getNumArgs() == 1)
+ CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context);
+ }
+ }
+
+ if (!CompareWithSrc)
+ return;
+
+ // Determine if the argument to sizeof/strlen is equal to the source
+ // argument. In principle there's all kinds of things you could do
+ // here, for instance creating an == expression and evaluating it with
+ // EvaluateAsBooleanCondition, but this uses a more direct technique:
+ const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg);
+ if (!SrcArgDRE)
+ return;
+
+ const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc);
+ if (!CompareWithSrcDRE ||
+ SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl())
+ return;
+
+ const Expr *OriginalSizeArg = Call->getArg(2);
+ Diag(CompareWithSrcDRE->getLocStart(), diag::warn_strlcpycat_wrong_size)
+ << OriginalSizeArg->getSourceRange() << FnName;
+
+ // Output a FIXIT hint if the destination is an array (rather than a
+ // pointer to an array). This could be enhanced to handle some
+ // pointers if we know the actual size, like if DstArg is 'array+2'
+ // we could say 'sizeof(array)-2'.
+ const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts();
+ if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context))
+ return;
+
+ SmallString<128> sizeString;
+ llvm::raw_svector_ostream OS(sizeString);
+ OS << "sizeof(";
+ DstArg->printPretty(OS, nullptr, getPrintingPolicy());
+ OS << ")";
+
+ Diag(OriginalSizeArg->getLocStart(), diag::note_strlcpycat_wrong_size)
+ << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(),
+ OS.str());
+}
+
+/// Check if two expressions refer to the same declaration.
+static bool referToTheSameDecl(const Expr *E1, const Expr *E2) {
+ if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1))
+ if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2))
+ return D1->getDecl() == D2->getDecl();
+ return false;
+}
+
+static const Expr *getStrlenExprArg(const Expr *E) {
+ if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
+ const FunctionDecl *FD = CE->getDirectCallee();
+ if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen)
+ return nullptr;
+ return CE->getArg(0)->IgnoreParenCasts();
+ }
+ return nullptr;
+}
+
+// Warn on anti-patterns as the 'size' argument to strncat.
+// The correct size argument should look like following:
+// strncat(dst, src, sizeof(dst) - strlen(dest) - 1);
+void Sema::CheckStrncatArguments(const CallExpr *CE,
+ IdentifierInfo *FnName) {
+ // Don't crash if the user has the wrong number of arguments.
+ if (CE->getNumArgs() < 3)
+ return;
+ const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts();
+ const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts();
+ const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts();
+
+ if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getLocStart(),
+ CE->getRParenLoc()))
+ return;
+
+ // Identify common expressions, which are wrongly used as the size argument
+ // to strncat and may lead to buffer overflows.
+ unsigned PatternType = 0;
+ if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) {
+ // - sizeof(dst)
+ if (referToTheSameDecl(SizeOfArg, DstArg))
+ PatternType = 1;
+ // - sizeof(src)
+ else if (referToTheSameDecl(SizeOfArg, SrcArg))
+ PatternType = 2;
+ } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) {
+ if (BE->getOpcode() == BO_Sub) {
+ const Expr *L = BE->getLHS()->IgnoreParenCasts();
+ const Expr *R = BE->getRHS()->IgnoreParenCasts();
+ // - sizeof(dst) - strlen(dst)
+ if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) &&
+ referToTheSameDecl(DstArg, getStrlenExprArg(R)))
+ PatternType = 1;
+ // - sizeof(src) - (anything)
+ else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L)))
+ PatternType = 2;
+ }
+ }
+
+ if (PatternType == 0)
+ return;
+
+ // Generate the diagnostic.
+ SourceLocation SL = LenArg->getLocStart();
+ SourceRange SR = LenArg->getSourceRange();
+ SourceManager &SM = getSourceManager();
+
+ // If the function is defined as a builtin macro, do not show macro expansion.
+ if (SM.isMacroArgExpansion(SL)) {
+ SL = SM.getSpellingLoc(SL);
+ SR = SourceRange(SM.getSpellingLoc(SR.getBegin()),
+ SM.getSpellingLoc(SR.getEnd()));
+ }
+
+ // Check if the destination is an array (rather than a pointer to an array).
+ QualType DstTy = DstArg->getType();
+ bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy,
+ Context);
+ if (!isKnownSizeArray) {
+ if (PatternType == 1)
+ Diag(SL, diag::warn_strncat_wrong_size) << SR;
+ else
+ Diag(SL, diag::warn_strncat_src_size) << SR;
+ return;
+ }
+
+ if (PatternType == 1)
+ Diag(SL, diag::warn_strncat_large_size) << SR;
+ else
+ Diag(SL, diag::warn_strncat_src_size) << SR;
+
+ SmallString<128> sizeString;
+ llvm::raw_svector_ostream OS(sizeString);
+ OS << "sizeof(";
+ DstArg->printPretty(OS, nullptr, getPrintingPolicy());
+ OS << ") - ";
+ OS << "strlen(";
+ DstArg->printPretty(OS, nullptr, getPrintingPolicy());
+ OS << ") - 1";
+
+ Diag(SL, diag::note_strncat_wrong_size)
+ << FixItHint::CreateReplacement(SR, OS.str());
+}
+
+//===--- CHECK: Return Address of Stack Variable --------------------------===//
+
+static Expr *EvalVal(Expr *E, SmallVectorImpl<DeclRefExpr *> &refVars,
+ Decl *ParentDecl);
+static Expr *EvalAddr(Expr* E, SmallVectorImpl<DeclRefExpr *> &refVars,
+ Decl *ParentDecl);
+
+/// CheckReturnStackAddr - Check if a return statement returns the address
+/// of a stack variable.
+static void
+CheckReturnStackAddr(Sema &S, Expr *RetValExp, QualType lhsType,
+ SourceLocation ReturnLoc) {
+
+ Expr *stackE = nullptr;
+ SmallVector<DeclRefExpr *, 8> refVars;
+
+ // Perform checking for returned stack addresses, local blocks,
+ // label addresses or references to temporaries.
+ if (lhsType->isPointerType() ||
+ (!S.getLangOpts().ObjCAutoRefCount && lhsType->isBlockPointerType())) {
+ stackE = EvalAddr(RetValExp, refVars, /*ParentDecl=*/nullptr);
+ } else if (lhsType->isReferenceType()) {
+ stackE = EvalVal(RetValExp, refVars, /*ParentDecl=*/nullptr);
+ }
+
+ if (!stackE)
+ return; // Nothing suspicious was found.
+
+ SourceLocation diagLoc;
+ SourceRange diagRange;
+ if (refVars.empty()) {
+ diagLoc = stackE->getLocStart();
+ diagRange = stackE->getSourceRange();
+ } else {
+ // We followed through a reference variable. 'stackE' contains the
+ // problematic expression but we will warn at the return statement pointing
+ // at the reference variable. We will later display the "trail" of
+ // reference variables using notes.
+ diagLoc = refVars[0]->getLocStart();
+ diagRange = refVars[0]->getSourceRange();
+ }
+
+ if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(stackE)) { //address of local var.
+ S.Diag(diagLoc, diag::warn_ret_stack_addr_ref) << lhsType->isReferenceType()
+ << DR->getDecl()->getDeclName() << diagRange;
+ } else if (isa<BlockExpr>(stackE)) { // local block.
+ S.Diag(diagLoc, diag::err_ret_local_block) << diagRange;
+ } else if (isa<AddrLabelExpr>(stackE)) { // address of label.
+ S.Diag(diagLoc, diag::warn_ret_addr_label) << diagRange;
+ } else { // local temporary.
+ S.Diag(diagLoc, diag::warn_ret_local_temp_addr_ref)
+ << lhsType->isReferenceType() << diagRange;
+ }
+
+ // Display the "trail" of reference variables that we followed until we
+ // found the problematic expression using notes.
+ for (unsigned i = 0, e = refVars.size(); i != e; ++i) {
+ VarDecl *VD = cast<VarDecl>(refVars[i]->getDecl());
+ // If this var binds to another reference var, show the range of the next
+ // var, otherwise the var binds to the problematic expression, in which case
+ // show the range of the expression.
+ SourceRange range = (i < e-1) ? refVars[i+1]->getSourceRange()
+ : stackE->getSourceRange();
+ S.Diag(VD->getLocation(), diag::note_ref_var_local_bind)
+ << VD->getDeclName() << range;
+ }
+}
+
+/// EvalAddr - EvalAddr and EvalVal are mutually recursive functions that
+/// check if the expression in a return statement evaluates to an address
+/// to a location on the stack, a local block, an address of a label, or a
+/// reference to local temporary. The recursion is used to traverse the
+/// AST of the return expression, with recursion backtracking when we
+/// encounter a subexpression that (1) clearly does not lead to one of the
+/// above problematic expressions (2) is something we cannot determine leads to
+/// a problematic expression based on such local checking.
+///
+/// Both EvalAddr and EvalVal follow through reference variables to evaluate
+/// the expression that they point to. Such variables are added to the
+/// 'refVars' vector so that we know what the reference variable "trail" was.
+///
+/// EvalAddr processes expressions that are pointers that are used as
+/// references (and not L-values). EvalVal handles all other values.
+/// At the base case of the recursion is a check for the above problematic
+/// expressions.
+///
+/// This implementation handles:
+///
+/// * pointer-to-pointer casts
+/// * implicit conversions from array references to pointers
+/// * taking the address of fields
+/// * arbitrary interplay between "&" and "*" operators
+/// * pointer arithmetic from an address of a stack variable
+/// * taking the address of an array element where the array is on the stack
+static Expr *EvalAddr(Expr *E, SmallVectorImpl<DeclRefExpr *> &refVars,
+ Decl *ParentDecl) {
+ if (E->isTypeDependent())
+ return nullptr;
+
+ // We should only be called for evaluating pointer expressions.
+ assert((E->getType()->isAnyPointerType() ||
+ E->getType()->isBlockPointerType() ||
+ E->getType()->isObjCQualifiedIdType()) &&
+ "EvalAddr only works on pointers");
+
+ E = E->IgnoreParens();
+
+ // Our "symbolic interpreter" is just a dispatch off the currently
+ // viewed AST node. We then recursively traverse the AST by calling
+ // EvalAddr and EvalVal appropriately.
+ switch (E->getStmtClass()) {
+ case Stmt::DeclRefExprClass: {
+ DeclRefExpr *DR = cast<DeclRefExpr>(E);
+
+ // If we leave the immediate function, the lifetime isn't about to end.
+ if (DR->refersToEnclosingVariableOrCapture())
+ return nullptr;
+
+ if (VarDecl *V = dyn_cast<VarDecl>(DR->getDecl()))
+ // If this is a reference variable, follow through to the expression that
+ // it points to.
+ if (V->hasLocalStorage() &&
+ V->getType()->isReferenceType() && V->hasInit()) {
+ // Add the reference variable to the "trail".
+ refVars.push_back(DR);
+ return EvalAddr(V->getInit(), refVars, ParentDecl);
+ }
+
+ return nullptr;
+ }
+
+ case Stmt::UnaryOperatorClass: {
+ // The only unary operator that make sense to handle here
+ // is AddrOf. All others don't make sense as pointers.
+ UnaryOperator *U = cast<UnaryOperator>(E);
+
+ if (U->getOpcode() == UO_AddrOf)
+ return EvalVal(U->getSubExpr(), refVars, ParentDecl);
+ else
+ return nullptr;
+ }
+
+ case Stmt::BinaryOperatorClass: {
+ // Handle pointer arithmetic. All other binary operators are not valid
+ // in this context.
+ BinaryOperator *B = cast<BinaryOperator>(E);
+ BinaryOperatorKind op = B->getOpcode();
+
+ if (op != BO_Add && op != BO_Sub)
+ return nullptr;
+
+ Expr *Base = B->getLHS();
+
+ // Determine which argument is the real pointer base. It could be
+ // the RHS argument instead of the LHS.
+ if (!Base->getType()->isPointerType()) Base = B->getRHS();
+
+ assert (Base->getType()->isPointerType());
+ return EvalAddr(Base, refVars, ParentDecl);
+ }
+
+ // For conditional operators we need to see if either the LHS or RHS are
+ // valid DeclRefExpr*s. If one of them is valid, we return it.
+ case Stmt::ConditionalOperatorClass: {
+ ConditionalOperator *C = cast<ConditionalOperator>(E);
+
+ // Handle the GNU extension for missing LHS.
+ // FIXME: That isn't a ConditionalOperator, so doesn't get here.
+ if (Expr *LHSExpr = C->getLHS()) {
+ // In C++, we can have a throw-expression, which has 'void' type.
+ if (!LHSExpr->getType()->isVoidType())
+ if (Expr *LHS = EvalAddr(LHSExpr, refVars, ParentDecl))
+ return LHS;
+ }
+
+ // In C++, we can have a throw-expression, which has 'void' type.
+ if (C->getRHS()->getType()->isVoidType())
+ return nullptr;
+
+ return EvalAddr(C->getRHS(), refVars, ParentDecl);
+ }
+
+ case Stmt::BlockExprClass:
+ if (cast<BlockExpr>(E)->getBlockDecl()->hasCaptures())
+ return E; // local block.
+ return nullptr;
+
+ case Stmt::AddrLabelExprClass:
+ return E; // address of label.
+
+ case Stmt::ExprWithCleanupsClass:
+ return EvalAddr(cast<ExprWithCleanups>(E)->getSubExpr(), refVars,
+ ParentDecl);
+
+ // For casts, we need to handle conversions from arrays to
+ // pointer values, and pointer-to-pointer conversions.
+ case Stmt::ImplicitCastExprClass:
+ case Stmt::CStyleCastExprClass:
+ case Stmt::CXXFunctionalCastExprClass:
+ case Stmt::ObjCBridgedCastExprClass:
+ case Stmt::CXXStaticCastExprClass:
+ case Stmt::CXXDynamicCastExprClass:
+ case Stmt::CXXConstCastExprClass:
+ case Stmt::CXXReinterpretCastExprClass: {
+ Expr* SubExpr = cast<CastExpr>(E)->getSubExpr();
+ switch (cast<CastExpr>(E)->getCastKind()) {
+ case CK_LValueToRValue:
+ case CK_NoOp:
+ case CK_BaseToDerived:
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_Dynamic:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ return EvalAddr(SubExpr, refVars, ParentDecl);
+
+ case CK_ArrayToPointerDecay:
+ return EvalVal(SubExpr, refVars, ParentDecl);
+
+ case CK_BitCast:
+ if (SubExpr->getType()->isAnyPointerType() ||
+ SubExpr->getType()->isBlockPointerType() ||
+ SubExpr->getType()->isObjCQualifiedIdType())
+ return EvalAddr(SubExpr, refVars, ParentDecl);
+ else
+ return nullptr;
+
+ default:
+ return nullptr;
+ }
+ }
+
+ case Stmt::MaterializeTemporaryExprClass:
+ if (Expr *Result = EvalAddr(
+ cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr(),
+ refVars, ParentDecl))
+ return Result;
+
+ return E;
+
+ // Everything else: we simply don't reason about them.
+ default:
+ return nullptr;
+ }
+}
+
+
+/// EvalVal - This function is complements EvalAddr in the mutual recursion.
+/// See the comments for EvalAddr for more details.
+static Expr *EvalVal(Expr *E, SmallVectorImpl<DeclRefExpr *> &refVars,
+ Decl *ParentDecl) {
+do {
+ // We should only be called for evaluating non-pointer expressions, or
+ // expressions with a pointer type that are not used as references but instead
+ // are l-values (e.g., DeclRefExpr with a pointer type).
+
+ // Our "symbolic interpreter" is just a dispatch off the currently
+ // viewed AST node. We then recursively traverse the AST by calling
+ // EvalAddr and EvalVal appropriately.
+
+ E = E->IgnoreParens();
+ switch (E->getStmtClass()) {
+ case Stmt::ImplicitCastExprClass: {
+ ImplicitCastExpr *IE = cast<ImplicitCastExpr>(E);
+ if (IE->getValueKind() == VK_LValue) {
+ E = IE->getSubExpr();
+ continue;
+ }
+ return nullptr;
+ }
+
+ case Stmt::ExprWithCleanupsClass:
+ return EvalVal(cast<ExprWithCleanups>(E)->getSubExpr(), refVars,ParentDecl);
+
+ case Stmt::DeclRefExprClass: {
+ // When we hit a DeclRefExpr we are looking at code that refers to a
+ // variable's name. If it's not a reference variable we check if it has
+ // local storage within the function, and if so, return the expression.
+ DeclRefExpr *DR = cast<DeclRefExpr>(E);
+
+ // If we leave the immediate function, the lifetime isn't about to end.
+ if (DR->refersToEnclosingVariableOrCapture())
+ return nullptr;
+
+ if (VarDecl *V = dyn_cast<VarDecl>(DR->getDecl())) {
+ // Check if it refers to itself, e.g. "int& i = i;".
+ if (V == ParentDecl)
+ return DR;
+
+ if (V->hasLocalStorage()) {
+ if (!V->getType()->isReferenceType())
+ return DR;
+
+ // Reference variable, follow through to the expression that
+ // it points to.
+ if (V->hasInit()) {
+ // Add the reference variable to the "trail".
+ refVars.push_back(DR);
+ return EvalVal(V->getInit(), refVars, V);
+ }
+ }
+ }
+
+ return nullptr;
+ }
+
+ case Stmt::UnaryOperatorClass: {
+ // The only unary operator that make sense to handle here
+ // is Deref. All others don't resolve to a "name." This includes
+ // handling all sorts of rvalues passed to a unary operator.
+ UnaryOperator *U = cast<UnaryOperator>(E);
+
+ if (U->getOpcode() == UO_Deref)
+ return EvalAddr(U->getSubExpr(), refVars, ParentDecl);
+
+ return nullptr;
+ }
+
+ case Stmt::ArraySubscriptExprClass: {
+ // Array subscripts are potential references to data on the stack. We
+ // retrieve the DeclRefExpr* for the array variable if it indeed
+ // has local storage.
+ return EvalAddr(cast<ArraySubscriptExpr>(E)->getBase(), refVars,ParentDecl);
+ }
+
+ case Stmt::OMPArraySectionExprClass: {
+ return EvalAddr(cast<OMPArraySectionExpr>(E)->getBase(), refVars,
+ ParentDecl);
+ }
+
+ case Stmt::ConditionalOperatorClass: {
+ // For conditional operators we need to see if either the LHS or RHS are
+ // non-NULL Expr's. If one is non-NULL, we return it.
+ ConditionalOperator *C = cast<ConditionalOperator>(E);
+
+ // Handle the GNU extension for missing LHS.
+ if (Expr *LHSExpr = C->getLHS()) {
+ // In C++, we can have a throw-expression, which has 'void' type.
+ if (!LHSExpr->getType()->isVoidType())
+ if (Expr *LHS = EvalVal(LHSExpr, refVars, ParentDecl))
+ return LHS;
+ }
+
+ // In C++, we can have a throw-expression, which has 'void' type.
+ if (C->getRHS()->getType()->isVoidType())
+ return nullptr;
+
+ return EvalVal(C->getRHS(), refVars, ParentDecl);
+ }
+
+ // Accesses to members are potential references to data on the stack.
+ case Stmt::MemberExprClass: {
+ MemberExpr *M = cast<MemberExpr>(E);
+
+ // Check for indirect access. We only want direct field accesses.
+ if (M->isArrow())
+ return nullptr;
+
+ // Check whether the member type is itself a reference, in which case
+ // we're not going to refer to the member, but to what the member refers to.
+ if (M->getMemberDecl()->getType()->isReferenceType())
+ return nullptr;
+
+ return EvalVal(M->getBase(), refVars, ParentDecl);
+ }
+
+ case Stmt::MaterializeTemporaryExprClass:
+ if (Expr *Result = EvalVal(
+ cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr(),
+ refVars, ParentDecl))
+ return Result;
+
+ return E;
+
+ default:
+ // Check that we don't return or take the address of a reference to a
+ // temporary. This is only useful in C++.
+ if (!E->isTypeDependent() && E->isRValue())
+ return E;
+
+ // Everything else: we simply don't reason about them.
+ return nullptr;
+ }
+} while (true);
+}
+
+void
+Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
+ SourceLocation ReturnLoc,
+ bool isObjCMethod,
+ const AttrVec *Attrs,
+ const FunctionDecl *FD) {
+ CheckReturnStackAddr(*this, RetValExp, lhsType, ReturnLoc);
+
+ // Check if the return value is null but should not be.
+ if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) ||
+ (!isObjCMethod && isNonNullType(Context, lhsType))) &&
+ CheckNonNullExpr(*this, RetValExp))
+ Diag(ReturnLoc, diag::warn_null_ret)
+ << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange();
+
+ // C++11 [basic.stc.dynamic.allocation]p4:
+ // If an allocation function declared with a non-throwing
+ // exception-specification fails to allocate storage, it shall return
+ // a null pointer. Any other allocation function that fails to allocate
+ // storage shall indicate failure only by throwing an exception [...]
+ if (FD) {
+ OverloadedOperatorKind Op = FD->getOverloadedOperator();
+ if (Op == OO_New || Op == OO_Array_New) {
+ const FunctionProtoType *Proto
+ = FD->getType()->castAs<FunctionProtoType>();
+ if (!Proto->isNothrow(Context, /*ResultIfDependent*/true) &&
+ CheckNonNullExpr(*this, RetValExp))
+ Diag(ReturnLoc, diag::warn_operator_new_returns_null)
+ << FD << getLangOpts().CPlusPlus11;
+ }
+ }
+}
+
+//===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===//
+
+/// Check for comparisons of floating point operands using != and ==.
+/// Issue a warning if these are no self-comparisons, as they are not likely
+/// to do what the programmer intended.
+void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) {
+ Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts();
+ Expr* RightExprSansParen = RHS->IgnoreParenImpCasts();
+
+ // Special case: check for x == x (which is OK).
+ // Do not emit warnings for such cases.
+ if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen))
+ if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen))
+ if (DRL->getDecl() == DRR->getDecl())
+ return;
+
+
+ // Special case: check for comparisons against literals that can be exactly
+ // represented by APFloat. In such cases, do not emit a warning. This
+ // is a heuristic: often comparison against such literals are used to
+ // detect if a value in a variable has not changed. This clearly can
+ // lead to false negatives.
+ if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) {
+ if (FLL->isExact())
+ return;
+ } else
+ if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen))
+ if (FLR->isExact())
+ return;
+
+ // Check for comparisons with builtin types.
+ if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen))
+ if (CL->getBuiltinCallee())
+ return;
+
+ if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen))
+ if (CR->getBuiltinCallee())
+ return;
+
+ // Emit the diagnostic.
+ Diag(Loc, diag::warn_floatingpoint_eq)
+ << LHS->getSourceRange() << RHS->getSourceRange();
+}
+
+//===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===//
+//===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===//
+
+namespace {
+
+/// Structure recording the 'active' range of an integer-valued
+/// expression.
+struct IntRange {
+ /// The number of bits active in the int.
+ unsigned Width;
+
+ /// True if the int is known not to have negative values.
+ bool NonNegative;
+
+ IntRange(unsigned Width, bool NonNegative)
+ : Width(Width), NonNegative(NonNegative)
+ {}
+
+ /// Returns the range of the bool type.
+ static IntRange forBoolType() {
+ return IntRange(1, true);
+ }
+
+ /// Returns the range of an opaque value of the given integral type.
+ static IntRange forValueOfType(ASTContext &C, QualType T) {
+ return forValueOfCanonicalType(C,
+ T->getCanonicalTypeInternal().getTypePtr());
+ }
+
+ /// Returns the range of an opaque value of a canonical integral type.
+ static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) {
+ assert(T->isCanonicalUnqualified());
+
+ if (const VectorType *VT = dyn_cast<VectorType>(T))
+ T = VT->getElementType().getTypePtr();
+ if (const ComplexType *CT = dyn_cast<ComplexType>(T))
+ T = CT->getElementType().getTypePtr();
+ if (const AtomicType *AT = dyn_cast<AtomicType>(T))
+ T = AT->getValueType().getTypePtr();
+
+ // For enum types, use the known bit width of the enumerators.
+ if (const EnumType *ET = dyn_cast<EnumType>(T)) {
+ EnumDecl *Enum = ET->getDecl();
+ if (!Enum->isCompleteDefinition())
+ return IntRange(C.getIntWidth(QualType(T, 0)), false);
+
+ unsigned NumPositive = Enum->getNumPositiveBits();
+ unsigned NumNegative = Enum->getNumNegativeBits();
+
+ if (NumNegative == 0)
+ return IntRange(NumPositive, true/*NonNegative*/);
+ else
+ return IntRange(std::max(NumPositive + 1, NumNegative),
+ false/*NonNegative*/);
+ }
+
+ const BuiltinType *BT = cast<BuiltinType>(T);
+ assert(BT->isInteger());
+
+ return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger());
+ }
+
+ /// Returns the "target" range of a canonical integral type, i.e.
+ /// the range of values expressible in the type.
+ ///
+ /// This matches forValueOfCanonicalType except that enums have the
+ /// full range of their type, not the range of their enumerators.
+ static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) {
+ assert(T->isCanonicalUnqualified());
+
+ if (const VectorType *VT = dyn_cast<VectorType>(T))
+ T = VT->getElementType().getTypePtr();
+ if (const ComplexType *CT = dyn_cast<ComplexType>(T))
+ T = CT->getElementType().getTypePtr();
+ if (const AtomicType *AT = dyn_cast<AtomicType>(T))
+ T = AT->getValueType().getTypePtr();
+ if (const EnumType *ET = dyn_cast<EnumType>(T))
+ T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr();
+
+ const BuiltinType *BT = cast<BuiltinType>(T);
+ assert(BT->isInteger());
+
+ return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger());
+ }
+
+ /// Returns the supremum of two ranges: i.e. their conservative merge.
+ static IntRange join(IntRange L, IntRange R) {
+ return IntRange(std::max(L.Width, R.Width),
+ L.NonNegative && R.NonNegative);
+ }
+
+ /// Returns the infinum of two ranges: i.e. their aggressive merge.
+ static IntRange meet(IntRange L, IntRange R) {
+ return IntRange(std::min(L.Width, R.Width),
+ L.NonNegative || R.NonNegative);
+ }
+};
+
+static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value,
+ unsigned MaxWidth) {
+ if (value.isSigned() && value.isNegative())
+ return IntRange(value.getMinSignedBits(), false);
+
+ if (value.getBitWidth() > MaxWidth)
+ value = value.trunc(MaxWidth);
+
+ // isNonNegative() just checks the sign bit without considering
+ // signedness.
+ return IntRange(value.getActiveBits(), true);
+}
+
+static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty,
+ unsigned MaxWidth) {
+ if (result.isInt())
+ return GetValueRange(C, result.getInt(), MaxWidth);
+
+ if (result.isVector()) {
+ IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth);
+ for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) {
+ IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth);
+ R = IntRange::join(R, El);
+ }
+ return R;
+ }
+
+ if (result.isComplexInt()) {
+ IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth);
+ IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth);
+ return IntRange::join(R, I);
+ }
+
+ // This can happen with lossless casts to intptr_t of "based" lvalues.
+ // Assume it might use arbitrary bits.
+ // FIXME: The only reason we need to pass the type in here is to get
+ // the sign right on this one case. It would be nice if APValue
+ // preserved this.
+ assert(result.isLValue() || result.isAddrLabelDiff());
+ return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType());
+}
+
+static QualType GetExprType(Expr *E) {
+ QualType Ty = E->getType();
+ if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>())
+ Ty = AtomicRHS->getValueType();
+ return Ty;
+}
+
+/// Pseudo-evaluate the given integer expression, estimating the
+/// range of values it might take.
+///
+/// \param MaxWidth - the width to which the value will be truncated
+static IntRange GetExprRange(ASTContext &C, Expr *E, unsigned MaxWidth) {
+ E = E->IgnoreParens();
+
+ // Try a full evaluation first.
+ Expr::EvalResult result;
+ if (E->EvaluateAsRValue(result, C))
+ return GetValueRange(C, result.Val, GetExprType(E), MaxWidth);
+
+ // I think we only want to look through implicit casts here; if the
+ // user has an explicit widening cast, we should treat the value as
+ // being of the new, wider type.
+ if (ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E)) {
+ if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue)
+ return GetExprRange(C, CE->getSubExpr(), MaxWidth);
+
+ IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE));
+
+ bool isIntegerCast = (CE->getCastKind() == CK_IntegralCast);
+
+ // Assume that non-integer casts can span the full range of the type.
+ if (!isIntegerCast)
+ return OutputTypeRange;
+
+ IntRange SubRange
+ = GetExprRange(C, CE->getSubExpr(),
+ std::min(MaxWidth, OutputTypeRange.Width));
+
+ // Bail out if the subexpr's range is as wide as the cast type.
+ if (SubRange.Width >= OutputTypeRange.Width)
+ return OutputTypeRange;
+
+ // Otherwise, we take the smaller width, and we're non-negative if
+ // either the output type or the subexpr is.
+ return IntRange(SubRange.Width,
+ SubRange.NonNegative || OutputTypeRange.NonNegative);
+ }
+
+ if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
+ // If we can fold the condition, just take that operand.
+ bool CondResult;
+ if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C))
+ return GetExprRange(C, CondResult ? CO->getTrueExpr()
+ : CO->getFalseExpr(),
+ MaxWidth);
+
+ // Otherwise, conservatively merge.
+ IntRange L = GetExprRange(C, CO->getTrueExpr(), MaxWidth);
+ IntRange R = GetExprRange(C, CO->getFalseExpr(), MaxWidth);
+ return IntRange::join(L, R);
+ }
+
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ switch (BO->getOpcode()) {
+
+ // Boolean-valued operations are single-bit and positive.
+ case BO_LAnd:
+ case BO_LOr:
+ case BO_LT:
+ case BO_GT:
+ case BO_LE:
+ case BO_GE:
+ case BO_EQ:
+ case BO_NE:
+ return IntRange::forBoolType();
+
+ // The type of the assignments is the type of the LHS, so the RHS
+ // is not necessarily the same type.
+ case BO_MulAssign:
+ case BO_DivAssign:
+ case BO_RemAssign:
+ case BO_AddAssign:
+ case BO_SubAssign:
+ case BO_XorAssign:
+ case BO_OrAssign:
+ // TODO: bitfields?
+ return IntRange::forValueOfType(C, GetExprType(E));
+
+ // Simple assignments just pass through the RHS, which will have
+ // been coerced to the LHS type.
+ case BO_Assign:
+ // TODO: bitfields?
+ return GetExprRange(C, BO->getRHS(), MaxWidth);
+
+ // Operations with opaque sources are black-listed.
+ case BO_PtrMemD:
+ case BO_PtrMemI:
+ return IntRange::forValueOfType(C, GetExprType(E));
+
+ // Bitwise-and uses the *infinum* of the two source ranges.
+ case BO_And:
+ case BO_AndAssign:
+ return IntRange::meet(GetExprRange(C, BO->getLHS(), MaxWidth),
+ GetExprRange(C, BO->getRHS(), MaxWidth));
+
+ // Left shift gets black-listed based on a judgement call.
+ case BO_Shl:
+ // ...except that we want to treat '1 << (blah)' as logically
+ // positive. It's an important idiom.
+ if (IntegerLiteral *I
+ = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) {
+ if (I->getValue() == 1) {
+ IntRange R = IntRange::forValueOfType(C, GetExprType(E));
+ return IntRange(R.Width, /*NonNegative*/ true);
+ }
+ }
+ // fallthrough
+
+ case BO_ShlAssign:
+ return IntRange::forValueOfType(C, GetExprType(E));
+
+ // Right shift by a constant can narrow its left argument.
+ case BO_Shr:
+ case BO_ShrAssign: {
+ IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth);
+
+ // If the shift amount is a positive constant, drop the width by
+ // that much.
+ llvm::APSInt shift;
+ if (BO->getRHS()->isIntegerConstantExpr(shift, C) &&
+ shift.isNonNegative()) {
+ unsigned zext = shift.getZExtValue();
+ if (zext >= L.Width)
+ L.Width = (L.NonNegative ? 0 : 1);
+ else
+ L.Width -= zext;
+ }
+
+ return L;
+ }
+
+ // Comma acts as its right operand.
+ case BO_Comma:
+ return GetExprRange(C, BO->getRHS(), MaxWidth);
+
+ // Black-list pointer subtractions.
+ case BO_Sub:
+ if (BO->getLHS()->getType()->isPointerType())
+ return IntRange::forValueOfType(C, GetExprType(E));
+ break;
+
+ // The width of a division result is mostly determined by the size
+ // of the LHS.
+ case BO_Div: {
+ // Don't 'pre-truncate' the operands.
+ unsigned opWidth = C.getIntWidth(GetExprType(E));
+ IntRange L = GetExprRange(C, BO->getLHS(), opWidth);
+
+ // If the divisor is constant, use that.
+ llvm::APSInt divisor;
+ if (BO->getRHS()->isIntegerConstantExpr(divisor, C)) {
+ unsigned log2 = divisor.logBase2(); // floor(log_2(divisor))
+ if (log2 >= L.Width)
+ L.Width = (L.NonNegative ? 0 : 1);
+ else
+ L.Width = std::min(L.Width - log2, MaxWidth);
+ return L;
+ }
+
+ // Otherwise, just use the LHS's width.
+ IntRange R = GetExprRange(C, BO->getRHS(), opWidth);
+ return IntRange(L.Width, L.NonNegative && R.NonNegative);
+ }
+
+ // The result of a remainder can't be larger than the result of
+ // either side.
+ case BO_Rem: {
+ // Don't 'pre-truncate' the operands.
+ unsigned opWidth = C.getIntWidth(GetExprType(E));
+ IntRange L = GetExprRange(C, BO->getLHS(), opWidth);
+ IntRange R = GetExprRange(C, BO->getRHS(), opWidth);
+
+ IntRange meet = IntRange::meet(L, R);
+ meet.Width = std::min(meet.Width, MaxWidth);
+ return meet;
+ }
+
+ // The default behavior is okay for these.
+ case BO_Mul:
+ case BO_Add:
+ case BO_Xor:
+ case BO_Or:
+ break;
+ }
+
+ // The default case is to treat the operation as if it were closed
+ // on the narrowest type that encompasses both operands.
+ IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth);
+ IntRange R = GetExprRange(C, BO->getRHS(), MaxWidth);
+ return IntRange::join(L, R);
+ }
+
+ if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
+ switch (UO->getOpcode()) {
+ // Boolean-valued operations are white-listed.
+ case UO_LNot:
+ return IntRange::forBoolType();
+
+ // Operations with opaque sources are black-listed.
+ case UO_Deref:
+ case UO_AddrOf: // should be impossible
+ return IntRange::forValueOfType(C, GetExprType(E));
+
+ default:
+ return GetExprRange(C, UO->getSubExpr(), MaxWidth);
+ }
+ }
+
+ if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E))
+ return GetExprRange(C, OVE->getSourceExpr(), MaxWidth);
+
+ if (FieldDecl *BitField = E->getSourceBitField())
+ return IntRange(BitField->getBitWidthValue(C),
+ BitField->getType()->isUnsignedIntegerOrEnumerationType());
+
+ return IntRange::forValueOfType(C, GetExprType(E));
+}
+
+static IntRange GetExprRange(ASTContext &C, Expr *E) {
+ return GetExprRange(C, E, C.getIntWidth(GetExprType(E)));
+}
+
+/// Checks whether the given value, which currently has the given
+/// source semantics, has the same value when coerced through the
+/// target semantics.
+static bool IsSameFloatAfterCast(const llvm::APFloat &value,
+ const llvm::fltSemantics &Src,
+ const llvm::fltSemantics &Tgt) {
+ llvm::APFloat truncated = value;
+
+ bool ignored;
+ truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored);
+ truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored);
+
+ return truncated.bitwiseIsEqual(value);
+}
+
+/// Checks whether the given value, which currently has the given
+/// source semantics, has the same value when coerced through the
+/// target semantics.
+///
+/// The value might be a vector of floats (or a complex number).
+static bool IsSameFloatAfterCast(const APValue &value,
+ const llvm::fltSemantics &Src,
+ const llvm::fltSemantics &Tgt) {
+ if (value.isFloat())
+ return IsSameFloatAfterCast(value.getFloat(), Src, Tgt);
+
+ if (value.isVector()) {
+ for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i)
+ if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt))
+ return false;
+ return true;
+ }
+
+ assert(value.isComplexFloat());
+ return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) &&
+ IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt));
+}
+
+static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC);
+
+static bool IsZero(Sema &S, Expr *E) {
+ // Suppress cases where we are comparing against an enum constant.
+ if (const DeclRefExpr *DR =
+ dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts()))
+ if (isa<EnumConstantDecl>(DR->getDecl()))
+ return false;
+
+ // Suppress cases where the '0' value is expanded from a macro.
+ if (E->getLocStart().isMacroID())
+ return false;
+
+ llvm::APSInt Value;
+ return E->isIntegerConstantExpr(Value, S.Context) && Value == 0;
+}
+
+static bool HasEnumType(Expr *E) {
+ // Strip off implicit integral promotions.
+ while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
+ if (ICE->getCastKind() != CK_IntegralCast &&
+ ICE->getCastKind() != CK_NoOp)
+ break;
+ E = ICE->getSubExpr();
+ }
+
+ return E->getType()->isEnumeralType();
+}
+
+static void CheckTrivialUnsignedComparison(Sema &S, BinaryOperator *E) {
+ // Disable warning in template instantiations.
+ if (!S.ActiveTemplateInstantiations.empty())
+ return;
+
+ BinaryOperatorKind op = E->getOpcode();
+ if (E->isValueDependent())
+ return;
+
+ if (op == BO_LT && IsZero(S, E->getRHS())) {
+ S.Diag(E->getOperatorLoc(), diag::warn_lunsigned_always_true_comparison)
+ << "< 0" << "false" << HasEnumType(E->getLHS())
+ << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange();
+ } else if (op == BO_GE && IsZero(S, E->getRHS())) {
+ S.Diag(E->getOperatorLoc(), diag::warn_lunsigned_always_true_comparison)
+ << ">= 0" << "true" << HasEnumType(E->getLHS())
+ << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange();
+ } else if (op == BO_GT && IsZero(S, E->getLHS())) {
+ S.Diag(E->getOperatorLoc(), diag::warn_runsigned_always_true_comparison)
+ << "0 >" << "false" << HasEnumType(E->getRHS())
+ << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange();
+ } else if (op == BO_LE && IsZero(S, E->getLHS())) {
+ S.Diag(E->getOperatorLoc(), diag::warn_runsigned_always_true_comparison)
+ << "0 <=" << "true" << HasEnumType(E->getRHS())
+ << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange();
+ }
+}
+
+static void DiagnoseOutOfRangeComparison(Sema &S, BinaryOperator *E,
+ Expr *Constant, Expr *Other,
+ llvm::APSInt Value,
+ bool RhsConstant) {
+ // Disable warning in template instantiations.
+ if (!S.ActiveTemplateInstantiations.empty())
+ return;
+
+ // TODO: Investigate using GetExprRange() to get tighter bounds
+ // on the bit ranges.
+ QualType OtherT = Other->getType();
+ if (const auto *AT = OtherT->getAs<AtomicType>())
+ OtherT = AT->getValueType();
+ IntRange OtherRange = IntRange::forValueOfType(S.Context, OtherT);
+ unsigned OtherWidth = OtherRange.Width;
+
+ bool OtherIsBooleanType = Other->isKnownToHaveBooleanValue();
+
+ // 0 values are handled later by CheckTrivialUnsignedComparison().
+ if ((Value == 0) && (!OtherIsBooleanType))
+ return;
+
+ BinaryOperatorKind op = E->getOpcode();
+ bool IsTrue = true;
+
+ // Used for diagnostic printout.
+ enum {
+ LiteralConstant = 0,
+ CXXBoolLiteralTrue,
+ CXXBoolLiteralFalse
+ } LiteralOrBoolConstant = LiteralConstant;
+
+ if (!OtherIsBooleanType) {
+ QualType ConstantT = Constant->getType();
+ QualType CommonT = E->getLHS()->getType();
+
+ if (S.Context.hasSameUnqualifiedType(OtherT, ConstantT))
+ return;
+ assert((OtherT->isIntegerType() && ConstantT->isIntegerType()) &&
+ "comparison with non-integer type");
+
+ bool ConstantSigned = ConstantT->isSignedIntegerType();
+ bool CommonSigned = CommonT->isSignedIntegerType();
+
+ bool EqualityOnly = false;
+
+ if (CommonSigned) {
+ // The common type is signed, therefore no signed to unsigned conversion.
+ if (!OtherRange.NonNegative) {
+ // Check that the constant is representable in type OtherT.
+ if (ConstantSigned) {
+ if (OtherWidth >= Value.getMinSignedBits())
+ return;
+ } else { // !ConstantSigned
+ if (OtherWidth >= Value.getActiveBits() + 1)
+ return;
+ }
+ } else { // !OtherSigned
+ // Check that the constant is representable in type OtherT.
+ // Negative values are out of range.
+ if (ConstantSigned) {
+ if (Value.isNonNegative() && OtherWidth >= Value.getActiveBits())
+ return;
+ } else { // !ConstantSigned
+ if (OtherWidth >= Value.getActiveBits())
+ return;
+ }
+ }
+ } else { // !CommonSigned
+ if (OtherRange.NonNegative) {
+ if (OtherWidth >= Value.getActiveBits())
+ return;
+ } else { // OtherSigned
+ assert(!ConstantSigned &&
+ "Two signed types converted to unsigned types.");
+ // Check to see if the constant is representable in OtherT.
+ if (OtherWidth > Value.getActiveBits())
+ return;
+ // Check to see if the constant is equivalent to a negative value
+ // cast to CommonT.
+ if (S.Context.getIntWidth(ConstantT) ==
+ S.Context.getIntWidth(CommonT) &&
+ Value.isNegative() && Value.getMinSignedBits() <= OtherWidth)
+ return;
+ // The constant value rests between values that OtherT can represent
+ // after conversion. Relational comparison still works, but equality
+ // comparisons will be tautological.
+ EqualityOnly = true;
+ }
+ }
+
+ bool PositiveConstant = !ConstantSigned || Value.isNonNegative();
+
+ if (op == BO_EQ || op == BO_NE) {
+ IsTrue = op == BO_NE;
+ } else if (EqualityOnly) {
+ return;
+ } else if (RhsConstant) {
+ if (op == BO_GT || op == BO_GE)
+ IsTrue = !PositiveConstant;
+ else // op == BO_LT || op == BO_LE
+ IsTrue = PositiveConstant;
+ } else {
+ if (op == BO_LT || op == BO_LE)
+ IsTrue = !PositiveConstant;
+ else // op == BO_GT || op == BO_GE
+ IsTrue = PositiveConstant;
+ }
+ } else {
+ // Other isKnownToHaveBooleanValue
+ enum CompareBoolWithConstantResult { AFals, ATrue, Unkwn };
+ enum ConstantValue { LT_Zero, Zero, One, GT_One, SizeOfConstVal };
+ enum ConstantSide { Lhs, Rhs, SizeOfConstSides };
+
+ static const struct LinkedConditions {
+ CompareBoolWithConstantResult BO_LT_OP[SizeOfConstSides][SizeOfConstVal];
+ CompareBoolWithConstantResult BO_GT_OP[SizeOfConstSides][SizeOfConstVal];
+ CompareBoolWithConstantResult BO_LE_OP[SizeOfConstSides][SizeOfConstVal];
+ CompareBoolWithConstantResult BO_GE_OP[SizeOfConstSides][SizeOfConstVal];
+ CompareBoolWithConstantResult BO_EQ_OP[SizeOfConstSides][SizeOfConstVal];
+ CompareBoolWithConstantResult BO_NE_OP[SizeOfConstSides][SizeOfConstVal];
+
+ } TruthTable = {
+ // Constant on LHS. | Constant on RHS. |
+ // LT_Zero| Zero | One |GT_One| LT_Zero| Zero | One |GT_One|
+ { { ATrue, Unkwn, AFals, AFals }, { AFals, AFals, Unkwn, ATrue } },
+ { { AFals, AFals, Unkwn, ATrue }, { ATrue, Unkwn, AFals, AFals } },
+ { { ATrue, ATrue, Unkwn, AFals }, { AFals, Unkwn, ATrue, ATrue } },
+ { { AFals, Unkwn, ATrue, ATrue }, { ATrue, ATrue, Unkwn, AFals } },
+ { { AFals, Unkwn, Unkwn, AFals }, { AFals, Unkwn, Unkwn, AFals } },
+ { { ATrue, Unkwn, Unkwn, ATrue }, { ATrue, Unkwn, Unkwn, ATrue } }
+ };
+
+ bool ConstantIsBoolLiteral = isa<CXXBoolLiteralExpr>(Constant);
+
+ enum ConstantValue ConstVal = Zero;
+ if (Value.isUnsigned() || Value.isNonNegative()) {
+ if (Value == 0) {
+ LiteralOrBoolConstant =
+ ConstantIsBoolLiteral ? CXXBoolLiteralFalse : LiteralConstant;
+ ConstVal = Zero;
+ } else if (Value == 1) {
+ LiteralOrBoolConstant =
+ ConstantIsBoolLiteral ? CXXBoolLiteralTrue : LiteralConstant;
+ ConstVal = One;
+ } else {
+ LiteralOrBoolConstant = LiteralConstant;
+ ConstVal = GT_One;
+ }
+ } else {
+ ConstVal = LT_Zero;
+ }
+
+ CompareBoolWithConstantResult CmpRes;
+
+ switch (op) {
+ case BO_LT:
+ CmpRes = TruthTable.BO_LT_OP[RhsConstant][ConstVal];
+ break;
+ case BO_GT:
+ CmpRes = TruthTable.BO_GT_OP[RhsConstant][ConstVal];
+ break;
+ case BO_LE:
+ CmpRes = TruthTable.BO_LE_OP[RhsConstant][ConstVal];
+ break;
+ case BO_GE:
+ CmpRes = TruthTable.BO_GE_OP[RhsConstant][ConstVal];
+ break;
+ case BO_EQ:
+ CmpRes = TruthTable.BO_EQ_OP[RhsConstant][ConstVal];
+ break;
+ case BO_NE:
+ CmpRes = TruthTable.BO_NE_OP[RhsConstant][ConstVal];
+ break;
+ default:
+ CmpRes = Unkwn;
+ break;
+ }
+
+ if (CmpRes == AFals) {
+ IsTrue = false;
+ } else if (CmpRes == ATrue) {
+ IsTrue = true;
+ } else {
+ return;
+ }
+ }
+
+ // If this is a comparison to an enum constant, include that
+ // constant in the diagnostic.
+ const EnumConstantDecl *ED = nullptr;
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant))
+ ED = dyn_cast<EnumConstantDecl>(DR->getDecl());
+
+ SmallString<64> PrettySourceValue;
+ llvm::raw_svector_ostream OS(PrettySourceValue);
+ if (ED)
+ OS << '\'' << *ED << "' (" << Value << ")";
+ else
+ OS << Value;
+
+ S.DiagRuntimeBehavior(
+ E->getOperatorLoc(), E,
+ S.PDiag(diag::warn_out_of_range_compare)
+ << OS.str() << LiteralOrBoolConstant
+ << OtherT << (OtherIsBooleanType && !OtherT->isBooleanType()) << IsTrue
+ << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange());
+}
+
+/// Analyze the operands of the given comparison. Implements the
+/// fallback case from AnalyzeComparison.
+static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) {
+ AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc());
+ AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc());
+}
+
+/// \brief Implements -Wsign-compare.
+///
+/// \param E the binary operator to check for warnings
+static void AnalyzeComparison(Sema &S, BinaryOperator *E) {
+ // The type the comparison is being performed in.
+ QualType T = E->getLHS()->getType();
+
+ // Only analyze comparison operators where both sides have been converted to
+ // the same type.
+ if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType()))
+ return AnalyzeImpConvsInComparison(S, E);
+
+ // Don't analyze value-dependent comparisons directly.
+ if (E->isValueDependent())
+ return AnalyzeImpConvsInComparison(S, E);
+
+ Expr *LHS = E->getLHS()->IgnoreParenImpCasts();
+ Expr *RHS = E->getRHS()->IgnoreParenImpCasts();
+
+ bool IsComparisonConstant = false;
+
+ // Check whether an integer constant comparison results in a value
+ // of 'true' or 'false'.
+ if (T->isIntegralType(S.Context)) {
+ llvm::APSInt RHSValue;
+ bool IsRHSIntegralLiteral =
+ RHS->isIntegerConstantExpr(RHSValue, S.Context);
+ llvm::APSInt LHSValue;
+ bool IsLHSIntegralLiteral =
+ LHS->isIntegerConstantExpr(LHSValue, S.Context);
+ if (IsRHSIntegralLiteral && !IsLHSIntegralLiteral)
+ DiagnoseOutOfRangeComparison(S, E, RHS, LHS, RHSValue, true);
+ else if (!IsRHSIntegralLiteral && IsLHSIntegralLiteral)
+ DiagnoseOutOfRangeComparison(S, E, LHS, RHS, LHSValue, false);
+ else
+ IsComparisonConstant =
+ (IsRHSIntegralLiteral && IsLHSIntegralLiteral);
+ } else if (!T->hasUnsignedIntegerRepresentation())
+ IsComparisonConstant = E->isIntegerConstantExpr(S.Context);
+
+ // We don't do anything special if this isn't an unsigned integral
+ // comparison: we're only interested in integral comparisons, and
+ // signed comparisons only happen in cases we don't care to warn about.
+ //
+ // We also don't care about value-dependent expressions or expressions
+ // whose result is a constant.
+ if (!T->hasUnsignedIntegerRepresentation() || IsComparisonConstant)
+ return AnalyzeImpConvsInComparison(S, E);
+
+ // Check to see if one of the (unmodified) operands is of different
+ // signedness.
+ Expr *signedOperand, *unsignedOperand;
+ if (LHS->getType()->hasSignedIntegerRepresentation()) {
+ assert(!RHS->getType()->hasSignedIntegerRepresentation() &&
+ "unsigned comparison between two signed integer expressions?");
+ signedOperand = LHS;
+ unsignedOperand = RHS;
+ } else if (RHS->getType()->hasSignedIntegerRepresentation()) {
+ signedOperand = RHS;
+ unsignedOperand = LHS;
+ } else {
+ CheckTrivialUnsignedComparison(S, E);
+ return AnalyzeImpConvsInComparison(S, E);
+ }
+
+ // Otherwise, calculate the effective range of the signed operand.
+ IntRange signedRange = GetExprRange(S.Context, signedOperand);
+
+ // Go ahead and analyze implicit conversions in the operands. Note
+ // that we skip the implicit conversions on both sides.
+ AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc());
+ AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc());
+
+ // If the signed range is non-negative, -Wsign-compare won't fire,
+ // but we should still check for comparisons which are always true
+ // or false.
+ if (signedRange.NonNegative)
+ return CheckTrivialUnsignedComparison(S, E);
+
+ // For (in)equality comparisons, if the unsigned operand is a
+ // constant which cannot collide with a overflowed signed operand,
+ // then reinterpreting the signed operand as unsigned will not
+ // change the result of the comparison.
+ if (E->isEqualityOp()) {
+ unsigned comparisonWidth = S.Context.getIntWidth(T);
+ IntRange unsignedRange = GetExprRange(S.Context, unsignedOperand);
+
+ // We should never be unable to prove that the unsigned operand is
+ // non-negative.
+ assert(unsignedRange.NonNegative && "unsigned range includes negative?");
+
+ if (unsignedRange.Width < comparisonWidth)
+ return;
+ }
+
+ S.DiagRuntimeBehavior(E->getOperatorLoc(), E,
+ S.PDiag(diag::warn_mixed_sign_comparison)
+ << LHS->getType() << RHS->getType()
+ << LHS->getSourceRange() << RHS->getSourceRange());
+}
+
+/// Analyzes an attempt to assign the given value to a bitfield.
+///
+/// Returns true if there was something fishy about the attempt.
+static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
+ SourceLocation InitLoc) {
+ assert(Bitfield->isBitField());
+ if (Bitfield->isInvalidDecl())
+ return false;
+
+ // White-list bool bitfields.
+ if (Bitfield->getType()->isBooleanType())
+ return false;
+
+ // Ignore value- or type-dependent expressions.
+ if (Bitfield->getBitWidth()->isValueDependent() ||
+ Bitfield->getBitWidth()->isTypeDependent() ||
+ Init->isValueDependent() ||
+ Init->isTypeDependent())
+ return false;
+
+ Expr *OriginalInit = Init->IgnoreParenImpCasts();
+
+ llvm::APSInt Value;
+ if (!OriginalInit->EvaluateAsInt(Value, S.Context, Expr::SE_AllowSideEffects))
+ return false;
+
+ unsigned OriginalWidth = Value.getBitWidth();
+ unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context);
+
+ if (OriginalWidth <= FieldWidth)
+ return false;
+
+ // Compute the value which the bitfield will contain.
+ llvm::APSInt TruncatedValue = Value.trunc(FieldWidth);
+ TruncatedValue.setIsSigned(Bitfield->getType()->isSignedIntegerType());
+
+ // Check whether the stored value is equal to the original value.
+ TruncatedValue = TruncatedValue.extend(OriginalWidth);
+ if (llvm::APSInt::isSameValue(Value, TruncatedValue))
+ return false;
+
+ // Special-case bitfields of width 1: booleans are naturally 0/1, and
+ // therefore don't strictly fit into a signed bitfield of width 1.
+ if (FieldWidth == 1 && Value == 1)
+ return false;
+
+ std::string PrettyValue = Value.toString(10);
+ std::string PrettyTrunc = TruncatedValue.toString(10);
+
+ S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant)
+ << PrettyValue << PrettyTrunc << OriginalInit->getType()
+ << Init->getSourceRange();
+
+ return true;
+}
+
+/// Analyze the given simple or compound assignment for warning-worthy
+/// operations.
+static void AnalyzeAssignment(Sema &S, BinaryOperator *E) {
+ // Just recurse on the LHS.
+ AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc());
+
+ // We want to recurse on the RHS as normal unless we're assigning to
+ // a bitfield.
+ if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) {
+ if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(),
+ E->getOperatorLoc())) {
+ // Recurse, ignoring any implicit conversions on the RHS.
+ return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(),
+ E->getOperatorLoc());
+ }
+ }
+
+ AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc());
+}
+
+/// Diagnose an implicit cast; purely a helper for CheckImplicitConversion.
+static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T,
+ SourceLocation CContext, unsigned diag,
+ bool pruneControlFlow = false) {
+ if (pruneControlFlow) {
+ S.DiagRuntimeBehavior(E->getExprLoc(), E,
+ S.PDiag(diag)
+ << SourceType << T << E->getSourceRange()
+ << SourceRange(CContext));
+ return;
+ }
+ S.Diag(E->getExprLoc(), diag)
+ << SourceType << T << E->getSourceRange() << SourceRange(CContext);
+}
+
+/// Diagnose an implicit cast; purely a helper for CheckImplicitConversion.
+static void DiagnoseImpCast(Sema &S, Expr *E, QualType T,
+ SourceLocation CContext, unsigned diag,
+ bool pruneControlFlow = false) {
+ DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow);
+}
+
+/// Diagnose an implicit cast from a literal expression. Does not warn when the
+/// cast wouldn't lose information.
+void DiagnoseFloatingLiteralImpCast(Sema &S, FloatingLiteral *FL, QualType T,
+ SourceLocation CContext) {
+ // Try to convert the literal exactly to an integer. If we can, don't warn.
+ bool isExact = false;
+ const llvm::APFloat &Value = FL->getValue();
+ llvm::APSInt IntegerValue(S.Context.getIntWidth(T),
+ T->hasUnsignedIntegerRepresentation());
+ if (Value.convertToInteger(IntegerValue,
+ llvm::APFloat::rmTowardZero, &isExact)
+ == llvm::APFloat::opOK && isExact)
+ return;
+
+ // FIXME: Force the precision of the source value down so we don't print
+ // digits which are usually useless (we don't really care here if we
+ // truncate a digit by accident in edge cases). Ideally, APFloat::toString
+ // would automatically print the shortest representation, but it's a bit
+ // tricky to implement.
+ SmallString<16> PrettySourceValue;
+ unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics());
+ precision = (precision * 59 + 195) / 196;
+ Value.toString(PrettySourceValue, precision);
+
+ SmallString<16> PrettyTargetValue;
+ if (T->isSpecificBuiltinType(BuiltinType::Bool))
+ PrettyTargetValue = Value.isZero() ? "false" : "true";
+ else
+ IntegerValue.toString(PrettyTargetValue);
+
+ S.Diag(FL->getExprLoc(), diag::warn_impcast_literal_float_to_integer)
+ << FL->getType() << T.getUnqualifiedType() << PrettySourceValue
+ << PrettyTargetValue << FL->getSourceRange() << SourceRange(CContext);
+}
+
+std::string PrettyPrintInRange(const llvm::APSInt &Value, IntRange Range) {
+ if (!Range.Width) return "0";
+
+ llvm::APSInt ValueInRange = Value;
+ ValueInRange.setIsSigned(!Range.NonNegative);
+ ValueInRange = ValueInRange.trunc(Range.Width);
+ return ValueInRange.toString(10);
+}
+
+static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) {
+ if (!isa<ImplicitCastExpr>(Ex))
+ return false;
+
+ Expr *InnerE = Ex->IgnoreParenImpCasts();
+ const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr();
+ const Type *Source =
+ S.Context.getCanonicalType(InnerE->getType()).getTypePtr();
+ if (Target->isDependentType())
+ return false;
+
+ const BuiltinType *FloatCandidateBT =
+ dyn_cast<BuiltinType>(ToBool ? Source : Target);
+ const Type *BoolCandidateType = ToBool ? Target : Source;
+
+ return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) &&
+ FloatCandidateBT && (FloatCandidateBT->isFloatingPoint()));
+}
+
+void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall,
+ SourceLocation CC) {
+ unsigned NumArgs = TheCall->getNumArgs();
+ for (unsigned i = 0; i < NumArgs; ++i) {
+ Expr *CurrA = TheCall->getArg(i);
+ if (!IsImplicitBoolFloatConversion(S, CurrA, true))
+ continue;
+
+ bool IsSwapped = ((i > 0) &&
+ IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false));
+ IsSwapped |= ((i < (NumArgs - 1)) &&
+ IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false));
+ if (IsSwapped) {
+ // Warn on this floating-point to bool conversion.
+ DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(),
+ CurrA->getType(), CC,
+ diag::warn_impcast_floating_point_to_bool);
+ }
+ }
+}
+
+static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T,
+ SourceLocation CC) {
+ if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer,
+ E->getExprLoc()))
+ return;
+
+ // Check for NULL (GNUNull) or nullptr (CXX11_nullptr).
+ const Expr::NullPointerConstantKind NullKind =
+ E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull);
+ if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr)
+ return;
+
+ // Return if target type is a safe conversion.
+ if (T->isAnyPointerType() || T->isBlockPointerType() ||
+ T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType())
+ return;
+
+ SourceLocation Loc = E->getSourceRange().getBegin();
+
+ // __null is usually wrapped in a macro. Go up a macro if that is the case.
+ if (NullKind == Expr::NPCK_GNUNull) {
+ if (Loc.isMacroID())
+ Loc = S.SourceMgr.getImmediateExpansionRange(Loc).first;
+ }
+
+ // Only warn if the null and context location are in the same macro expansion.
+ if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC))
+ return;
+
+ S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer)
+ << (NullKind == Expr::NPCK_CXX11_nullptr) << T << clang::SourceRange(CC)
+ << FixItHint::CreateReplacement(Loc,
+ S.getFixItZeroLiteralForType(T, Loc));
+}
+
+static void checkObjCArrayLiteral(Sema &S, QualType TargetType,
+ ObjCArrayLiteral *ArrayLiteral);
+static void checkObjCDictionaryLiteral(Sema &S, QualType TargetType,
+ ObjCDictionaryLiteral *DictionaryLiteral);
+
+/// Check a single element within a collection literal against the
+/// target element type.
+static void checkObjCCollectionLiteralElement(Sema &S,
+ QualType TargetElementType,
+ Expr *Element,
+ unsigned ElementKind) {
+ // Skip a bitcast to 'id' or qualified 'id'.
+ if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) {
+ if (ICE->getCastKind() == CK_BitCast &&
+ ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>())
+ Element = ICE->getSubExpr();
+ }
+
+ QualType ElementType = Element->getType();
+ ExprResult ElementResult(Element);
+ if (ElementType->getAs<ObjCObjectPointerType>() &&
+ S.CheckSingleAssignmentConstraints(TargetElementType,
+ ElementResult,
+ false, false)
+ != Sema::Compatible) {
+ S.Diag(Element->getLocStart(),
+ diag::warn_objc_collection_literal_element)
+ << ElementType << ElementKind << TargetElementType
+ << Element->getSourceRange();
+ }
+
+ if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element))
+ checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral);
+ else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element))
+ checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral);
+}
+
+/// Check an Objective-C array literal being converted to the given
+/// target type.
+static void checkObjCArrayLiteral(Sema &S, QualType TargetType,
+ ObjCArrayLiteral *ArrayLiteral) {
+ if (!S.NSArrayDecl)
+ return;
+
+ const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>();
+ if (!TargetObjCPtr)
+ return;
+
+ if (TargetObjCPtr->isUnspecialized() ||
+ TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl()
+ != S.NSArrayDecl->getCanonicalDecl())
+ return;
+
+ auto TypeArgs = TargetObjCPtr->getTypeArgs();
+ if (TypeArgs.size() != 1)
+ return;
+
+ QualType TargetElementType = TypeArgs[0];
+ for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) {
+ checkObjCCollectionLiteralElement(S, TargetElementType,
+ ArrayLiteral->getElement(I),
+ 0);
+ }
+}
+
+/// Check an Objective-C dictionary literal being converted to the given
+/// target type.
+static void checkObjCDictionaryLiteral(
+ Sema &S, QualType TargetType,
+ ObjCDictionaryLiteral *DictionaryLiteral) {
+ if (!S.NSDictionaryDecl)
+ return;
+
+ const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>();
+ if (!TargetObjCPtr)
+ return;
+
+ if (TargetObjCPtr->isUnspecialized() ||
+ TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl()
+ != S.NSDictionaryDecl->getCanonicalDecl())
+ return;
+
+ auto TypeArgs = TargetObjCPtr->getTypeArgs();
+ if (TypeArgs.size() != 2)
+ return;
+
+ QualType TargetKeyType = TypeArgs[0];
+ QualType TargetObjectType = TypeArgs[1];
+ for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) {
+ auto Element = DictionaryLiteral->getKeyValueElement(I);
+ checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1);
+ checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2);
+ }
+}
+
+void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
+ SourceLocation CC, bool *ICContext = nullptr) {
+ if (E->isTypeDependent() || E->isValueDependent()) return;
+
+ const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr();
+ const Type *Target = S.Context.getCanonicalType(T).getTypePtr();
+ if (Source == Target) return;
+ if (Target->isDependentType()) return;
+
+ // If the conversion context location is invalid don't complain. We also
+ // don't want to emit a warning if the issue occurs from the expansion of
+ // a system macro. The problem is that 'getSpellingLoc()' is slow, so we
+ // delay this check as long as possible. Once we detect we are in that
+ // scenario, we just return.
+ if (CC.isInvalid())
+ return;
+
+ // Diagnose implicit casts to bool.
+ if (Target->isSpecificBuiltinType(BuiltinType::Bool)) {
+ if (isa<StringLiteral>(E))
+ // Warn on string literal to bool. Checks for string literals in logical
+ // and expressions, for instance, assert(0 && "error here"), are
+ // prevented by a check in AnalyzeImplicitConversions().
+ return DiagnoseImpCast(S, E, T, CC,
+ diag::warn_impcast_string_literal_to_bool);
+ if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) ||
+ isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) {
+ // This covers the literal expressions that evaluate to Objective-C
+ // objects.
+ return DiagnoseImpCast(S, E, T, CC,
+ diag::warn_impcast_objective_c_literal_to_bool);
+ }
+ if (Source->isPointerType() || Source->canDecayToPointerType()) {
+ // Warn on pointer to bool conversion that is always true.
+ S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false,
+ SourceRange(CC));
+ }
+ }
+
+ // Check implicit casts from Objective-C collection literals to specialized
+ // collection types, e.g., NSArray<NSString *> *.
+ if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E))
+ checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral);
+ else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E))
+ checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral);
+
+ // Strip vector types.
+ if (isa<VectorType>(Source)) {
+ if (!isa<VectorType>(Target)) {
+ if (S.SourceMgr.isInSystemMacro(CC))
+ return;
+ return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar);
+ }
+
+ // If the vector cast is cast between two vectors of the same size, it is
+ // a bitcast, not a conversion.
+ if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target))
+ return;
+
+ Source = cast<VectorType>(Source)->getElementType().getTypePtr();
+ Target = cast<VectorType>(Target)->getElementType().getTypePtr();
+ }
+ if (auto VecTy = dyn_cast<VectorType>(Target))
+ Target = VecTy->getElementType().getTypePtr();
+
+ // Strip complex types.
+ if (isa<ComplexType>(Source)) {
+ if (!isa<ComplexType>(Target)) {
+ if (S.SourceMgr.isInSystemMacro(CC))
+ return;
+
+ return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_complex_scalar);
+ }
+
+ Source = cast<ComplexType>(Source)->getElementType().getTypePtr();
+ Target = cast<ComplexType>(Target)->getElementType().getTypePtr();
+ }
+
+ const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source);
+ const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target);
+
+ // If the source is floating point...
+ if (SourceBT && SourceBT->isFloatingPoint()) {
+ // ...and the target is floating point...
+ if (TargetBT && TargetBT->isFloatingPoint()) {
+ // ...then warn if we're dropping FP rank.
+
+ // Builtin FP kinds are ordered by increasing FP rank.
+ if (SourceBT->getKind() > TargetBT->getKind()) {
+ // Don't warn about float constants that are precisely
+ // representable in the target type.
+ Expr::EvalResult result;
+ if (E->EvaluateAsRValue(result, S.Context)) {
+ // Value might be a float, a float vector, or a float complex.
+ if (IsSameFloatAfterCast(result.Val,
+ S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)),
+ S.Context.getFloatTypeSemantics(QualType(SourceBT, 0))))
+ return;
+ }
+
+ if (S.SourceMgr.isInSystemMacro(CC))
+ return;
+
+ DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision);
+
+ }
+ // ... or possibly if we're increasing rank, too
+ else if (TargetBT->getKind() > SourceBT->getKind()) {
+ if (S.SourceMgr.isInSystemMacro(CC))
+ return;
+
+ DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion);
+ }
+ return;
+ }
+
+ // If the target is integral, always warn.
+ if (TargetBT && TargetBT->isInteger()) {
+ if (S.SourceMgr.isInSystemMacro(CC))
+ return;
+
+ Expr *InnerE = E->IgnoreParenImpCasts();
+ // We also want to warn on, e.g., "int i = -1.234"
+ if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE))
+ if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus)
+ InnerE = UOp->getSubExpr()->IgnoreParenImpCasts();
+
+ if (FloatingLiteral *FL = dyn_cast<FloatingLiteral>(InnerE)) {
+ DiagnoseFloatingLiteralImpCast(S, FL, T, CC);
+ } else {
+ DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_integer);
+ }
+ }
+
+ // Detect the case where a call result is converted from floating-point to
+ // to bool, and the final argument to the call is converted from bool, to
+ // discover this typo:
+ //
+ // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;"
+ //
+ // FIXME: This is an incredibly special case; is there some more general
+ // way to detect this class of misplaced-parentheses bug?
+ if (Target->isBooleanType() && isa<CallExpr>(E)) {
+ // Check last argument of function call to see if it is an
+ // implicit cast from a type matching the type the result
+ // is being cast to.
+ CallExpr *CEx = cast<CallExpr>(E);
+ if (unsigned NumArgs = CEx->getNumArgs()) {
+ Expr *LastA = CEx->getArg(NumArgs - 1);
+ Expr *InnerE = LastA->IgnoreParenImpCasts();
+ if (isa<ImplicitCastExpr>(LastA) &&
+ InnerE->getType()->isBooleanType()) {
+ // Warn on this floating-point to bool conversion
+ DiagnoseImpCast(S, E, T, CC,
+ diag::warn_impcast_floating_point_to_bool);
+ }
+ }
+ }
+ return;
+ }
+
+ DiagnoseNullConversion(S, E, T, CC);
+
+ if (!Source->isIntegerType() || !Target->isIntegerType())
+ return;
+
+ // TODO: remove this early return once the false positives for constant->bool
+ // in templates, macros, etc, are reduced or removed.
+ if (Target->isSpecificBuiltinType(BuiltinType::Bool))
+ return;
+
+ IntRange SourceRange = GetExprRange(S.Context, E);
+ IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target);
+
+ if (SourceRange.Width > TargetRange.Width) {
+ // If the source is a constant, use a default-on diagnostic.
+ // TODO: this should happen for bitfield stores, too.
+ llvm::APSInt Value(32);
+ if (E->isIntegerConstantExpr(Value, S.Context)) {
+ if (S.SourceMgr.isInSystemMacro(CC))
+ return;
+
+ std::string PrettySourceValue = Value.toString(10);
+ std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange);
+
+ S.DiagRuntimeBehavior(E->getExprLoc(), E,
+ S.PDiag(diag::warn_impcast_integer_precision_constant)
+ << PrettySourceValue << PrettyTargetValue
+ << E->getType() << T << E->getSourceRange()
+ << clang::SourceRange(CC));
+ return;
+ }
+
+ // People want to build with -Wshorten-64-to-32 and not -Wconversion.
+ if (S.SourceMgr.isInSystemMacro(CC))
+ return;
+
+ if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64)
+ return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32,
+ /* pruneControlFlow */ true);
+ return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision);
+ }
+
+ if ((TargetRange.NonNegative && !SourceRange.NonNegative) ||
+ (!TargetRange.NonNegative && SourceRange.NonNegative &&
+ SourceRange.Width == TargetRange.Width)) {
+
+ if (S.SourceMgr.isInSystemMacro(CC))
+ return;
+
+ unsigned DiagID = diag::warn_impcast_integer_sign;
+
+ // Traditionally, gcc has warned about this under -Wsign-compare.
+ // We also want to warn about it in -Wconversion.
+ // So if -Wconversion is off, use a completely identical diagnostic
+ // in the sign-compare group.
+ // The conditional-checking code will
+ if (ICContext) {
+ DiagID = diag::warn_impcast_integer_sign_conditional;
+ *ICContext = true;
+ }
+
+ return DiagnoseImpCast(S, E, T, CC, DiagID);
+ }
+
+ // Diagnose conversions between different enumeration types.
+ // In C, we pretend that the type of an EnumConstantDecl is its enumeration
+ // type, to give us better diagnostics.
+ QualType SourceType = E->getType();
+ if (!S.getLangOpts().CPlusPlus) {
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) {
+ EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext());
+ SourceType = S.Context.getTypeDeclType(Enum);
+ Source = S.Context.getCanonicalType(SourceType).getTypePtr();
+ }
+ }
+
+ if (const EnumType *SourceEnum = Source->getAs<EnumType>())
+ if (const EnumType *TargetEnum = Target->getAs<EnumType>())
+ if (SourceEnum->getDecl()->hasNameForLinkage() &&
+ TargetEnum->getDecl()->hasNameForLinkage() &&
+ SourceEnum != TargetEnum) {
+ if (S.SourceMgr.isInSystemMacro(CC))
+ return;
+
+ return DiagnoseImpCast(S, E, SourceType, T, CC,
+ diag::warn_impcast_different_enum_types);
+ }
+
+ return;
+}
+
+void CheckConditionalOperator(Sema &S, ConditionalOperator *E,
+ SourceLocation CC, QualType T);
+
+void CheckConditionalOperand(Sema &S, Expr *E, QualType T,
+ SourceLocation CC, bool &ICContext) {
+ E = E->IgnoreParenImpCasts();
+
+ if (isa<ConditionalOperator>(E))
+ return CheckConditionalOperator(S, cast<ConditionalOperator>(E), CC, T);
+
+ AnalyzeImplicitConversions(S, E, CC);
+ if (E->getType() != T)
+ return CheckImplicitConversion(S, E, T, CC, &ICContext);
+ return;
+}
+
+void CheckConditionalOperator(Sema &S, ConditionalOperator *E,
+ SourceLocation CC, QualType T) {
+ AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc());
+
+ bool Suspicious = false;
+ CheckConditionalOperand(S, E->getTrueExpr(), T, CC, Suspicious);
+ CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious);
+
+ // If -Wconversion would have warned about either of the candidates
+ // for a signedness conversion to the context type...
+ if (!Suspicious) return;
+
+ // ...but it's currently ignored...
+ if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC))
+ return;
+
+ // ...then check whether it would have warned about either of the
+ // candidates for a signedness conversion to the condition type.
+ if (E->getType() == T) return;
+
+ Suspicious = false;
+ CheckImplicitConversion(S, E->getTrueExpr()->IgnoreParenImpCasts(),
+ E->getType(), CC, &Suspicious);
+ if (!Suspicious)
+ CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(),
+ E->getType(), CC, &Suspicious);
+}
+
+/// CheckBoolLikeConversion - Check conversion of given expression to boolean.
+/// Input argument E is a logical expression.
+static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) {
+ if (S.getLangOpts().Bool)
+ return;
+ CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC);
+}
+
+/// AnalyzeImplicitConversions - Find and report any interesting
+/// implicit conversions in the given expression. There are a couple
+/// of competing diagnostics here, -Wconversion and -Wsign-compare.
+void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC) {
+ QualType T = OrigE->getType();
+ Expr *E = OrigE->IgnoreParenImpCasts();
+
+ if (E->isTypeDependent() || E->isValueDependent())
+ return;
+
+ // For conditional operators, we analyze the arguments as if they
+ // were being fed directly into the output.
+ if (isa<ConditionalOperator>(E)) {
+ ConditionalOperator *CO = cast<ConditionalOperator>(E);
+ CheckConditionalOperator(S, CO, CC, T);
+ return;
+ }
+
+ // Check implicit argument conversions for function calls.
+ if (CallExpr *Call = dyn_cast<CallExpr>(E))
+ CheckImplicitArgumentConversions(S, Call, CC);
+
+ // Go ahead and check any implicit conversions we might have skipped.
+ // The non-canonical typecheck is just an optimization;
+ // CheckImplicitConversion will filter out dead implicit conversions.
+ if (E->getType() != T)
+ CheckImplicitConversion(S, E, T, CC);
+
+ // Now continue drilling into this expression.
+
+ if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) {
+ // The bound subexpressions in a PseudoObjectExpr are not reachable
+ // as transitive children.
+ // FIXME: Use a more uniform representation for this.
+ for (auto *SE : POE->semantics())
+ if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE))
+ AnalyzeImplicitConversions(S, OVE->getSourceExpr(), CC);
+ }
+
+ // Skip past explicit casts.
+ if (isa<ExplicitCastExpr>(E)) {
+ E = cast<ExplicitCastExpr>(E)->getSubExpr()->IgnoreParenImpCasts();
+ return AnalyzeImplicitConversions(S, E, CC);
+ }
+
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ // Do a somewhat different check with comparison operators.
+ if (BO->isComparisonOp())
+ return AnalyzeComparison(S, BO);
+
+ // And with simple assignments.
+ if (BO->getOpcode() == BO_Assign)
+ return AnalyzeAssignment(S, BO);
+ }
+
+ // These break the otherwise-useful invariant below. Fortunately,
+ // we don't really need to recurse into them, because any internal
+ // expressions should have been analyzed already when they were
+ // built into statements.
+ if (isa<StmtExpr>(E)) return;
+
+ // Don't descend into unevaluated contexts.
+ if (isa<UnaryExprOrTypeTraitExpr>(E)) return;
+
+ // Now just recurse over the expression's children.
+ CC = E->getExprLoc();
+ BinaryOperator *BO = dyn_cast<BinaryOperator>(E);
+ bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd;
+ for (Stmt *SubStmt : E->children()) {
+ Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt);
+ if (!ChildExpr)
+ continue;
+
+ if (IsLogicalAndOperator &&
+ isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts()))
+ // Ignore checking string literals that are in logical and operators.
+ // This is a common pattern for asserts.
+ continue;
+ AnalyzeImplicitConversions(S, ChildExpr, CC);
+ }
+
+ if (BO && BO->isLogicalOp()) {
+ Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts();
+ if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr))
+ ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc());
+
+ SubExpr = BO->getRHS()->IgnoreParenImpCasts();
+ if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr))
+ ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc());
+ }
+
+ if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E))
+ if (U->getOpcode() == UO_LNot)
+ ::CheckBoolLikeConversion(S, U->getSubExpr(), CC);
+}
+
+} // end anonymous namespace
+
+// Helper function for Sema::DiagnoseAlwaysNonNullPointer.
+// Returns true when emitting a warning about taking the address of a reference.
+static bool CheckForReference(Sema &SemaRef, const Expr *E,
+ PartialDiagnostic PD) {
+ E = E->IgnoreParenImpCasts();
+
+ const FunctionDecl *FD = nullptr;
+
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ if (!DRE->getDecl()->getType()->isReferenceType())
+ return false;
+ } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) {
+ if (!M->getMemberDecl()->getType()->isReferenceType())
+ return false;
+ } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) {
+ if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType())
+ return false;
+ FD = Call->getDirectCallee();
+ } else {
+ return false;
+ }
+
+ SemaRef.Diag(E->getExprLoc(), PD);
+
+ // If possible, point to location of function.
+ if (FD) {
+ SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD;
+ }
+
+ return true;
+}
+
+// Returns true if the SourceLocation is expanded from any macro body.
+// Returns false if the SourceLocation is invalid, is from not in a macro
+// expansion, or is from expanded from a top-level macro argument.
+static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) {
+ if (Loc.isInvalid())
+ return false;
+
+ while (Loc.isMacroID()) {
+ if (SM.isMacroBodyExpansion(Loc))
+ return true;
+ Loc = SM.getImmediateMacroCallerLoc(Loc);
+ }
+
+ return false;
+}
+
+/// \brief Diagnose pointers that are always non-null.
+/// \param E the expression containing the pointer
+/// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is
+/// compared to a null pointer
+/// \param IsEqual True when the comparison is equal to a null pointer
+/// \param Range Extra SourceRange to highlight in the diagnostic
+void Sema::DiagnoseAlwaysNonNullPointer(Expr *E,
+ Expr::NullPointerConstantKind NullKind,
+ bool IsEqual, SourceRange Range) {
+ if (!E)
+ return;
+
+ // Don't warn inside macros.
+ if (E->getExprLoc().isMacroID()) {
+ const SourceManager &SM = getSourceManager();
+ if (IsInAnyMacroBody(SM, E->getExprLoc()) ||
+ IsInAnyMacroBody(SM, Range.getBegin()))
+ return;
+ }
+ E = E->IgnoreImpCasts();
+
+ const bool IsCompare = NullKind != Expr::NPCK_NotNull;
+
+ if (isa<CXXThisExpr>(E)) {
+ unsigned DiagID = IsCompare ? diag::warn_this_null_compare
+ : diag::warn_this_bool_conversion;
+ Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual;
+ return;
+ }
+
+ bool IsAddressOf = false;
+
+ if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
+ if (UO->getOpcode() != UO_AddrOf)
+ return;
+ IsAddressOf = true;
+ E = UO->getSubExpr();
+ }
+
+ if (IsAddressOf) {
+ unsigned DiagID = IsCompare
+ ? diag::warn_address_of_reference_null_compare
+ : diag::warn_address_of_reference_bool_conversion;
+ PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range
+ << IsEqual;
+ if (CheckForReference(*this, E, PD)) {
+ return;
+ }
+ }
+
+ auto ComplainAboutNonnullParamOrCall = [&](bool IsParam) {
+ std::string Str;
+ llvm::raw_string_ostream S(Str);
+ E->printPretty(S, nullptr, getPrintingPolicy());
+ unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare
+ : diag::warn_cast_nonnull_to_bool;
+ Diag(E->getExprLoc(), DiagID) << IsParam << S.str()
+ << E->getSourceRange() << Range << IsEqual;
+ };
+
+ // If we have a CallExpr that is tagged with returns_nonnull, we can complain.
+ if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) {
+ if (auto *Callee = Call->getDirectCallee()) {
+ if (Callee->hasAttr<ReturnsNonNullAttr>()) {
+ ComplainAboutNonnullParamOrCall(false);
+ return;
+ }
+ }
+ }
+
+ // Expect to find a single Decl. Skip anything more complicated.
+ ValueDecl *D = nullptr;
+ if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) {
+ D = R->getDecl();
+ } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) {
+ D = M->getMemberDecl();
+ }
+
+ // Weak Decls can be null.
+ if (!D || D->isWeak())
+ return;
+
+ // Check for parameter decl with nonnull attribute
+ if (const auto* PV = dyn_cast<ParmVarDecl>(D)) {
+ if (getCurFunction() &&
+ !getCurFunction()->ModifiedNonNullParams.count(PV)) {
+ if (PV->hasAttr<NonNullAttr>()) {
+ ComplainAboutNonnullParamOrCall(true);
+ return;
+ }
+
+ if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) {
+ auto ParamIter = std::find(FD->param_begin(), FD->param_end(), PV);
+ assert(ParamIter != FD->param_end());
+ unsigned ParamNo = std::distance(FD->param_begin(), ParamIter);
+
+ for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) {
+ if (!NonNull->args_size()) {
+ ComplainAboutNonnullParamOrCall(true);
+ return;
+ }
+
+ for (unsigned ArgNo : NonNull->args()) {
+ if (ArgNo == ParamNo) {
+ ComplainAboutNonnullParamOrCall(true);
+ return;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ QualType T = D->getType();
+ const bool IsArray = T->isArrayType();
+ const bool IsFunction = T->isFunctionType();
+
+ // Address of function is used to silence the function warning.
+ if (IsAddressOf && IsFunction) {
+ return;
+ }
+
+ // Found nothing.
+ if (!IsAddressOf && !IsFunction && !IsArray)
+ return;
+
+ // Pretty print the expression for the diagnostic.
+ std::string Str;
+ llvm::raw_string_ostream S(Str);
+ E->printPretty(S, nullptr, getPrintingPolicy());
+
+ unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare
+ : diag::warn_impcast_pointer_to_bool;
+ enum {
+ AddressOf,
+ FunctionPointer,
+ ArrayPointer
+ } DiagType;
+ if (IsAddressOf)
+ DiagType = AddressOf;
+ else if (IsFunction)
+ DiagType = FunctionPointer;
+ else if (IsArray)
+ DiagType = ArrayPointer;
+ else
+ llvm_unreachable("Could not determine diagnostic.");
+ Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange()
+ << Range << IsEqual;
+
+ if (!IsFunction)
+ return;
+
+ // Suggest '&' to silence the function warning.
+ Diag(E->getExprLoc(), diag::note_function_warning_silence)
+ << FixItHint::CreateInsertion(E->getLocStart(), "&");
+
+ // Check to see if '()' fixit should be emitted.
+ QualType ReturnType;
+ UnresolvedSet<4> NonTemplateOverloads;
+ tryExprAsCall(*E, ReturnType, NonTemplateOverloads);
+ if (ReturnType.isNull())
+ return;
+
+ if (IsCompare) {
+ // There are two cases here. If there is null constant, the only suggest
+ // for a pointer return type. If the null is 0, then suggest if the return
+ // type is a pointer or an integer type.
+ if (!ReturnType->isPointerType()) {
+ if (NullKind == Expr::NPCK_ZeroExpression ||
+ NullKind == Expr::NPCK_ZeroLiteral) {
+ if (!ReturnType->isIntegerType())
+ return;
+ } else {
+ return;
+ }
+ }
+ } else { // !IsCompare
+ // For function to bool, only suggest if the function pointer has bool
+ // return type.
+ if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool))
+ return;
+ }
+ Diag(E->getExprLoc(), diag::note_function_to_function_call)
+ << FixItHint::CreateInsertion(getLocForEndOfToken(E->getLocEnd()), "()");
+}
+
+
+/// Diagnoses "dangerous" implicit conversions within the given
+/// expression (which is a full expression). Implements -Wconversion
+/// and -Wsign-compare.
+///
+/// \param CC the "context" location of the implicit conversion, i.e.
+/// the most location of the syntactic entity requiring the implicit
+/// conversion
+void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) {
+ // Don't diagnose in unevaluated contexts.
+ if (isUnevaluatedContext())
+ return;
+
+ // Don't diagnose for value- or type-dependent expressions.
+ if (E->isTypeDependent() || E->isValueDependent())
+ return;
+
+ // Check for array bounds violations in cases where the check isn't triggered
+ // elsewhere for other Expr types (like BinaryOperators), e.g. when an
+ // ArraySubscriptExpr is on the RHS of a variable initialization.
+ CheckArrayAccess(E);
+
+ // This is not the right CC for (e.g.) a variable initialization.
+ AnalyzeImplicitConversions(*this, E, CC);
+}
+
+/// CheckBoolLikeConversion - Check conversion of given expression to boolean.
+/// Input argument E is a logical expression.
+void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) {
+ ::CheckBoolLikeConversion(*this, E, CC);
+}
+
+/// Diagnose when expression is an integer constant expression and its evaluation
+/// results in integer overflow
+void Sema::CheckForIntOverflow (Expr *E) {
+ if (isa<BinaryOperator>(E->IgnoreParenCasts()))
+ E->IgnoreParenCasts()->EvaluateForOverflow(Context);
+}
+
+namespace {
+/// \brief Visitor for expressions which looks for unsequenced operations on the
+/// same object.
+class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> {
+ typedef EvaluatedExprVisitor<SequenceChecker> Base;
+
+ /// \brief A tree of sequenced regions within an expression. Two regions are
+ /// unsequenced if one is an ancestor or a descendent of the other. When we
+ /// finish processing an expression with sequencing, such as a comma
+ /// expression, we fold its tree nodes into its parent, since they are
+ /// unsequenced with respect to nodes we will visit later.
+ class SequenceTree {
+ struct Value {
+ explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {}
+ unsigned Parent : 31;
+ bool Merged : 1;
+ };
+ SmallVector<Value, 8> Values;
+
+ public:
+ /// \brief A region within an expression which may be sequenced with respect
+ /// to some other region.
+ class Seq {
+ explicit Seq(unsigned N) : Index(N) {}
+ unsigned Index;
+ friend class SequenceTree;
+ public:
+ Seq() : Index(0) {}
+ };
+
+ SequenceTree() { Values.push_back(Value(0)); }
+ Seq root() const { return Seq(0); }
+
+ /// \brief Create a new sequence of operations, which is an unsequenced
+ /// subset of \p Parent. This sequence of operations is sequenced with
+ /// respect to other children of \p Parent.
+ Seq allocate(Seq Parent) {
+ Values.push_back(Value(Parent.Index));
+ return Seq(Values.size() - 1);
+ }
+
+ /// \brief Merge a sequence of operations into its parent.
+ void merge(Seq S) {
+ Values[S.Index].Merged = true;
+ }
+
+ /// \brief Determine whether two operations are unsequenced. This operation
+ /// is asymmetric: \p Cur should be the more recent sequence, and \p Old
+ /// should have been merged into its parent as appropriate.
+ bool isUnsequenced(Seq Cur, Seq Old) {
+ unsigned C = representative(Cur.Index);
+ unsigned Target = representative(Old.Index);
+ while (C >= Target) {
+ if (C == Target)
+ return true;
+ C = Values[C].Parent;
+ }
+ return false;
+ }
+
+ private:
+ /// \brief Pick a representative for a sequence.
+ unsigned representative(unsigned K) {
+ if (Values[K].Merged)
+ // Perform path compression as we go.
+ return Values[K].Parent = representative(Values[K].Parent);
+ return K;
+ }
+ };
+
+ /// An object for which we can track unsequenced uses.
+ typedef NamedDecl *Object;
+
+ /// Different flavors of object usage which we track. We only track the
+ /// least-sequenced usage of each kind.
+ enum UsageKind {
+ /// A read of an object. Multiple unsequenced reads are OK.
+ UK_Use,
+ /// A modification of an object which is sequenced before the value
+ /// computation of the expression, such as ++n in C++.
+ UK_ModAsValue,
+ /// A modification of an object which is not sequenced before the value
+ /// computation of the expression, such as n++.
+ UK_ModAsSideEffect,
+
+ UK_Count = UK_ModAsSideEffect + 1
+ };
+
+ struct Usage {
+ Usage() : Use(nullptr), Seq() {}
+ Expr *Use;
+ SequenceTree::Seq Seq;
+ };
+
+ struct UsageInfo {
+ UsageInfo() : Diagnosed(false) {}
+ Usage Uses[UK_Count];
+ /// Have we issued a diagnostic for this variable already?
+ bool Diagnosed;
+ };
+ typedef llvm::SmallDenseMap<Object, UsageInfo, 16> UsageInfoMap;
+
+ Sema &SemaRef;
+ /// Sequenced regions within the expression.
+ SequenceTree Tree;
+ /// Declaration modifications and references which we have seen.
+ UsageInfoMap UsageMap;
+ /// The region we are currently within.
+ SequenceTree::Seq Region;
+ /// Filled in with declarations which were modified as a side-effect
+ /// (that is, post-increment operations).
+ SmallVectorImpl<std::pair<Object, Usage> > *ModAsSideEffect;
+ /// Expressions to check later. We defer checking these to reduce
+ /// stack usage.
+ SmallVectorImpl<Expr *> &WorkList;
+
+ /// RAII object wrapping the visitation of a sequenced subexpression of an
+ /// expression. At the end of this process, the side-effects of the evaluation
+ /// become sequenced with respect to the value computation of the result, so
+ /// we downgrade any UK_ModAsSideEffect within the evaluation to
+ /// UK_ModAsValue.
+ struct SequencedSubexpression {
+ SequencedSubexpression(SequenceChecker &Self)
+ : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) {
+ Self.ModAsSideEffect = &ModAsSideEffect;
+ }
+ ~SequencedSubexpression() {
+ for (auto MI = ModAsSideEffect.rbegin(), ME = ModAsSideEffect.rend();
+ MI != ME; ++MI) {
+ UsageInfo &U = Self.UsageMap[MI->first];
+ auto &SideEffectUsage = U.Uses[UK_ModAsSideEffect];
+ Self.addUsage(U, MI->first, SideEffectUsage.Use, UK_ModAsValue);
+ SideEffectUsage = MI->second;
+ }
+ Self.ModAsSideEffect = OldModAsSideEffect;
+ }
+
+ SequenceChecker &Self;
+ SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect;
+ SmallVectorImpl<std::pair<Object, Usage> > *OldModAsSideEffect;
+ };
+
+ /// RAII object wrapping the visitation of a subexpression which we might
+ /// choose to evaluate as a constant. If any subexpression is evaluated and
+ /// found to be non-constant, this allows us to suppress the evaluation of
+ /// the outer expression.
+ class EvaluationTracker {
+ public:
+ EvaluationTracker(SequenceChecker &Self)
+ : Self(Self), Prev(Self.EvalTracker), EvalOK(true) {
+ Self.EvalTracker = this;
+ }
+ ~EvaluationTracker() {
+ Self.EvalTracker = Prev;
+ if (Prev)
+ Prev->EvalOK &= EvalOK;
+ }
+
+ bool evaluate(const Expr *E, bool &Result) {
+ if (!EvalOK || E->isValueDependent())
+ return false;
+ EvalOK = E->EvaluateAsBooleanCondition(Result, Self.SemaRef.Context);
+ return EvalOK;
+ }
+
+ private:
+ SequenceChecker &Self;
+ EvaluationTracker *Prev;
+ bool EvalOK;
+ } *EvalTracker;
+
+ /// \brief Find the object which is produced by the specified expression,
+ /// if any.
+ Object getObject(Expr *E, bool Mod) const {
+ E = E->IgnoreParenCasts();
+ if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
+ if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec))
+ return getObject(UO->getSubExpr(), Mod);
+ } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ if (BO->getOpcode() == BO_Comma)
+ return getObject(BO->getRHS(), Mod);
+ if (Mod && BO->isAssignmentOp())
+ return getObject(BO->getLHS(), Mod);
+ } else if (MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
+ // FIXME: Check for more interesting cases, like "x.n = ++x.n".
+ if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts()))
+ return ME->getMemberDecl();
+ } else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ // FIXME: If this is a reference, map through to its value.
+ return DRE->getDecl();
+ return nullptr;
+ }
+
+ /// \brief Note that an object was modified or used by an expression.
+ void addUsage(UsageInfo &UI, Object O, Expr *Ref, UsageKind UK) {
+ Usage &U = UI.Uses[UK];
+ if (!U.Use || !Tree.isUnsequenced(Region, U.Seq)) {
+ if (UK == UK_ModAsSideEffect && ModAsSideEffect)
+ ModAsSideEffect->push_back(std::make_pair(O, U));
+ U.Use = Ref;
+ U.Seq = Region;
+ }
+ }
+ /// \brief Check whether a modification or use conflicts with a prior usage.
+ void checkUsage(Object O, UsageInfo &UI, Expr *Ref, UsageKind OtherKind,
+ bool IsModMod) {
+ if (UI.Diagnosed)
+ return;
+
+ const Usage &U = UI.Uses[OtherKind];
+ if (!U.Use || !Tree.isUnsequenced(Region, U.Seq))
+ return;
+
+ Expr *Mod = U.Use;
+ Expr *ModOrUse = Ref;
+ if (OtherKind == UK_Use)
+ std::swap(Mod, ModOrUse);
+
+ SemaRef.Diag(Mod->getExprLoc(),
+ IsModMod ? diag::warn_unsequenced_mod_mod
+ : diag::warn_unsequenced_mod_use)
+ << O << SourceRange(ModOrUse->getExprLoc());
+ UI.Diagnosed = true;
+ }
+
+ void notePreUse(Object O, Expr *Use) {
+ UsageInfo &U = UsageMap[O];
+ // Uses conflict with other modifications.
+ checkUsage(O, U, Use, UK_ModAsValue, false);
+ }
+ void notePostUse(Object O, Expr *Use) {
+ UsageInfo &U = UsageMap[O];
+ checkUsage(O, U, Use, UK_ModAsSideEffect, false);
+ addUsage(U, O, Use, UK_Use);
+ }
+
+ void notePreMod(Object O, Expr *Mod) {
+ UsageInfo &U = UsageMap[O];
+ // Modifications conflict with other modifications and with uses.
+ checkUsage(O, U, Mod, UK_ModAsValue, true);
+ checkUsage(O, U, Mod, UK_Use, false);
+ }
+ void notePostMod(Object O, Expr *Use, UsageKind UK) {
+ UsageInfo &U = UsageMap[O];
+ checkUsage(O, U, Use, UK_ModAsSideEffect, true);
+ addUsage(U, O, Use, UK);
+ }
+
+public:
+ SequenceChecker(Sema &S, Expr *E, SmallVectorImpl<Expr *> &WorkList)
+ : Base(S.Context), SemaRef(S), Region(Tree.root()),
+ ModAsSideEffect(nullptr), WorkList(WorkList), EvalTracker(nullptr) {
+ Visit(E);
+ }
+
+ void VisitStmt(Stmt *S) {
+ // Skip all statements which aren't expressions for now.
+ }
+
+ void VisitExpr(Expr *E) {
+ // By default, just recurse to evaluated subexpressions.
+ Base::VisitStmt(E);
+ }
+
+ void VisitCastExpr(CastExpr *E) {
+ Object O = Object();
+ if (E->getCastKind() == CK_LValueToRValue)
+ O = getObject(E->getSubExpr(), false);
+
+ if (O)
+ notePreUse(O, E);
+ VisitExpr(E);
+ if (O)
+ notePostUse(O, E);
+ }
+
+ void VisitBinComma(BinaryOperator *BO) {
+ // C++11 [expr.comma]p1:
+ // Every value computation and side effect associated with the left
+ // expression is sequenced before every value computation and side
+ // effect associated with the right expression.
+ SequenceTree::Seq LHS = Tree.allocate(Region);
+ SequenceTree::Seq RHS = Tree.allocate(Region);
+ SequenceTree::Seq OldRegion = Region;
+
+ {
+ SequencedSubexpression SeqLHS(*this);
+ Region = LHS;
+ Visit(BO->getLHS());
+ }
+
+ Region = RHS;
+ Visit(BO->getRHS());
+
+ Region = OldRegion;
+
+ // Forget that LHS and RHS are sequenced. They are both unsequenced
+ // with respect to other stuff.
+ Tree.merge(LHS);
+ Tree.merge(RHS);
+ }
+
+ void VisitBinAssign(BinaryOperator *BO) {
+ // The modification is sequenced after the value computation of the LHS
+ // and RHS, so check it before inspecting the operands and update the
+ // map afterwards.
+ Object O = getObject(BO->getLHS(), true);
+ if (!O)
+ return VisitExpr(BO);
+
+ notePreMod(O, BO);
+
+ // C++11 [expr.ass]p7:
+ // E1 op= E2 is equivalent to E1 = E1 op E2, except that E1 is evaluated
+ // only once.
+ //
+ // Therefore, for a compound assignment operator, O is considered used
+ // everywhere except within the evaluation of E1 itself.
+ if (isa<CompoundAssignOperator>(BO))
+ notePreUse(O, BO);
+
+ Visit(BO->getLHS());
+
+ if (isa<CompoundAssignOperator>(BO))
+ notePostUse(O, BO);
+
+ Visit(BO->getRHS());
+
+ // C++11 [expr.ass]p1:
+ // the assignment is sequenced [...] before the value computation of the
+ // assignment expression.
+ // C11 6.5.16/3 has no such rule.
+ notePostMod(O, BO, SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue
+ : UK_ModAsSideEffect);
+ }
+ void VisitCompoundAssignOperator(CompoundAssignOperator *CAO) {
+ VisitBinAssign(CAO);
+ }
+
+ void VisitUnaryPreInc(UnaryOperator *UO) { VisitUnaryPreIncDec(UO); }
+ void VisitUnaryPreDec(UnaryOperator *UO) { VisitUnaryPreIncDec(UO); }
+ void VisitUnaryPreIncDec(UnaryOperator *UO) {
+ Object O = getObject(UO->getSubExpr(), true);
+ if (!O)
+ return VisitExpr(UO);
+
+ notePreMod(O, UO);
+ Visit(UO->getSubExpr());
+ // C++11 [expr.pre.incr]p1:
+ // the expression ++x is equivalent to x+=1
+ notePostMod(O, UO, SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue
+ : UK_ModAsSideEffect);
+ }
+
+ void VisitUnaryPostInc(UnaryOperator *UO) { VisitUnaryPostIncDec(UO); }
+ void VisitUnaryPostDec(UnaryOperator *UO) { VisitUnaryPostIncDec(UO); }
+ void VisitUnaryPostIncDec(UnaryOperator *UO) {
+ Object O = getObject(UO->getSubExpr(), true);
+ if (!O)
+ return VisitExpr(UO);
+
+ notePreMod(O, UO);
+ Visit(UO->getSubExpr());
+ notePostMod(O, UO, UK_ModAsSideEffect);
+ }
+
+ /// Don't visit the RHS of '&&' or '||' if it might not be evaluated.
+ void VisitBinLOr(BinaryOperator *BO) {
+ // The side-effects of the LHS of an '&&' are sequenced before the
+ // value computation of the RHS, and hence before the value computation
+ // of the '&&' itself, unless the LHS evaluates to zero. We treat them
+ // as if they were unconditionally sequenced.
+ EvaluationTracker Eval(*this);
+ {
+ SequencedSubexpression Sequenced(*this);
+ Visit(BO->getLHS());
+ }
+
+ bool Result;
+ if (Eval.evaluate(BO->getLHS(), Result)) {
+ if (!Result)
+ Visit(BO->getRHS());
+ } else {
+ // Check for unsequenced operations in the RHS, treating it as an
+ // entirely separate evaluation.
+ //
+ // FIXME: If there are operations in the RHS which are unsequenced
+ // with respect to operations outside the RHS, and those operations
+ // are unconditionally evaluated, diagnose them.
+ WorkList.push_back(BO->getRHS());
+ }
+ }
+ void VisitBinLAnd(BinaryOperator *BO) {
+ EvaluationTracker Eval(*this);
+ {
+ SequencedSubexpression Sequenced(*this);
+ Visit(BO->getLHS());
+ }
+
+ bool Result;
+ if (Eval.evaluate(BO->getLHS(), Result)) {
+ if (Result)
+ Visit(BO->getRHS());
+ } else {
+ WorkList.push_back(BO->getRHS());
+ }
+ }
+
+ // Only visit the condition, unless we can be sure which subexpression will
+ // be chosen.
+ void VisitAbstractConditionalOperator(AbstractConditionalOperator *CO) {
+ EvaluationTracker Eval(*this);
+ {
+ SequencedSubexpression Sequenced(*this);
+ Visit(CO->getCond());
+ }
+
+ bool Result;
+ if (Eval.evaluate(CO->getCond(), Result))
+ Visit(Result ? CO->getTrueExpr() : CO->getFalseExpr());
+ else {
+ WorkList.push_back(CO->getTrueExpr());
+ WorkList.push_back(CO->getFalseExpr());
+ }
+ }
+
+ void VisitCallExpr(CallExpr *CE) {
+ // C++11 [intro.execution]p15:
+ // When calling a function [...], every value computation and side effect
+ // associated with any argument expression, or with the postfix expression
+ // designating the called function, is sequenced before execution of every
+ // expression or statement in the body of the function [and thus before
+ // the value computation of its result].
+ SequencedSubexpression Sequenced(*this);
+ Base::VisitCallExpr(CE);
+
+ // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions.
+ }
+
+ void VisitCXXConstructExpr(CXXConstructExpr *CCE) {
+ // This is a call, so all subexpressions are sequenced before the result.
+ SequencedSubexpression Sequenced(*this);
+
+ if (!CCE->isListInitialization())
+ return VisitExpr(CCE);
+
+ // In C++11, list initializations are sequenced.
+ SmallVector<SequenceTree::Seq, 32> Elts;
+ SequenceTree::Seq Parent = Region;
+ for (CXXConstructExpr::arg_iterator I = CCE->arg_begin(),
+ E = CCE->arg_end();
+ I != E; ++I) {
+ Region = Tree.allocate(Parent);
+ Elts.push_back(Region);
+ Visit(*I);
+ }
+
+ // Forget that the initializers are sequenced.
+ Region = Parent;
+ for (unsigned I = 0; I < Elts.size(); ++I)
+ Tree.merge(Elts[I]);
+ }
+
+ void VisitInitListExpr(InitListExpr *ILE) {
+ if (!SemaRef.getLangOpts().CPlusPlus11)
+ return VisitExpr(ILE);
+
+ // In C++11, list initializations are sequenced.
+ SmallVector<SequenceTree::Seq, 32> Elts;
+ SequenceTree::Seq Parent = Region;
+ for (unsigned I = 0; I < ILE->getNumInits(); ++I) {
+ Expr *E = ILE->getInit(I);
+ if (!E) continue;
+ Region = Tree.allocate(Parent);
+ Elts.push_back(Region);
+ Visit(E);
+ }
+
+ // Forget that the initializers are sequenced.
+ Region = Parent;
+ for (unsigned I = 0; I < Elts.size(); ++I)
+ Tree.merge(Elts[I]);
+ }
+};
+}
+
+void Sema::CheckUnsequencedOperations(Expr *E) {
+ SmallVector<Expr *, 8> WorkList;
+ WorkList.push_back(E);
+ while (!WorkList.empty()) {
+ Expr *Item = WorkList.pop_back_val();
+ SequenceChecker(*this, Item, WorkList);
+ }
+}
+
+void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc,
+ bool IsConstexpr) {
+ CheckImplicitConversions(E, CheckLoc);
+ CheckUnsequencedOperations(E);
+ if (!IsConstexpr && !E->isValueDependent())
+ CheckForIntOverflow(E);
+}
+
+void Sema::CheckBitFieldInitialization(SourceLocation InitLoc,
+ FieldDecl *BitField,
+ Expr *Init) {
+ (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc);
+}
+
+static void diagnoseArrayStarInParamType(Sema &S, QualType PType,
+ SourceLocation Loc) {
+ if (!PType->isVariablyModifiedType())
+ return;
+ if (const auto *PointerTy = dyn_cast<PointerType>(PType)) {
+ diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc);
+ return;
+ }
+ if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) {
+ diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc);
+ return;
+ }
+ if (const auto *ParenTy = dyn_cast<ParenType>(PType)) {
+ diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc);
+ return;
+ }
+
+ const ArrayType *AT = S.Context.getAsArrayType(PType);
+ if (!AT)
+ return;
+
+ if (AT->getSizeModifier() != ArrayType::Star) {
+ diagnoseArrayStarInParamType(S, AT->getElementType(), Loc);
+ return;
+ }
+
+ S.Diag(Loc, diag::err_array_star_in_function_definition);
+}
+
+/// CheckParmsForFunctionDef - Check that the parameters of the given
+/// function are appropriate for the definition of a function. This
+/// takes care of any checks that cannot be performed on the
+/// declaration itself, e.g., that the types of each of the function
+/// parameters are complete.
+bool Sema::CheckParmsForFunctionDef(ParmVarDecl *const *P,
+ ParmVarDecl *const *PEnd,
+ bool CheckParameterNames) {
+ bool HasInvalidParm = false;
+ for (; P != PEnd; ++P) {
+ ParmVarDecl *Param = *P;
+
+ // C99 6.7.5.3p4: the parameters in a parameter type list in a
+ // function declarator that is part of a function definition of
+ // that function shall not have incomplete type.
+ //
+ // This is also C++ [dcl.fct]p6.
+ if (!Param->isInvalidDecl() &&
+ RequireCompleteType(Param->getLocation(), Param->getType(),
+ diag::err_typecheck_decl_incomplete_type)) {
+ Param->setInvalidDecl();
+ HasInvalidParm = true;
+ }
+
+ // C99 6.9.1p5: If the declarator includes a parameter type list, the
+ // declaration of each parameter shall include an identifier.
+ if (CheckParameterNames &&
+ Param->getIdentifier() == nullptr &&
+ !Param->isImplicit() &&
+ !getLangOpts().CPlusPlus)
+ Diag(Param->getLocation(), diag::err_parameter_name_omitted);
+
+ // C99 6.7.5.3p12:
+ // If the function declarator is not part of a definition of that
+ // function, parameters may have incomplete type and may use the [*]
+ // notation in their sequences of declarator specifiers to specify
+ // variable length array types.
+ QualType PType = Param->getOriginalType();
+ // FIXME: This diagnostic should point the '[*]' if source-location
+ // information is added for it.
+ diagnoseArrayStarInParamType(*this, PType, Param->getLocation());
+
+ // MSVC destroys objects passed by value in the callee. Therefore a
+ // function definition which takes such a parameter must be able to call the
+ // object's destructor. However, we don't perform any direct access check
+ // on the dtor.
+ if (getLangOpts().CPlusPlus && Context.getTargetInfo()
+ .getCXXABI()
+ .areArgsDestroyedLeftToRightInCallee()) {
+ if (!Param->isInvalidDecl()) {
+ if (const RecordType *RT = Param->getType()->getAs<RecordType>()) {
+ CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+ if (!ClassDecl->isInvalidDecl() &&
+ !ClassDecl->hasIrrelevantDestructor() &&
+ !ClassDecl->isDependentContext()) {
+ CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl);
+ MarkFunctionReferenced(Param->getLocation(), Destructor);
+ DiagnoseUseOfDecl(Destructor, Param->getLocation());
+ }
+ }
+ }
+ }
+
+ // Parameters with the pass_object_size attribute only need to be marked
+ // constant at function definitions. Because we lack information about
+ // whether we're on a declaration or definition when we're instantiating the
+ // attribute, we need to check for constness here.
+ if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>())
+ if (!Param->getType().isConstQualified())
+ Diag(Param->getLocation(), diag::err_attribute_pointers_only)
+ << Attr->getSpelling() << 1;
+ }
+
+ return HasInvalidParm;
+}
+
+/// CheckCastAlign - Implements -Wcast-align, which warns when a
+/// pointer cast increases the alignment requirements.
+void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) {
+ // This is actually a lot of work to potentially be doing on every
+ // cast; don't do it if we're ignoring -Wcast_align (as is the default).
+ if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin()))
+ return;
+
+ // Ignore dependent types.
+ if (T->isDependentType() || Op->getType()->isDependentType())
+ return;
+
+ // Require that the destination be a pointer type.
+ const PointerType *DestPtr = T->getAs<PointerType>();
+ if (!DestPtr) return;
+
+ // If the destination has alignment 1, we're done.
+ QualType DestPointee = DestPtr->getPointeeType();
+ if (DestPointee->isIncompleteType()) return;
+ CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee);
+ if (DestAlign.isOne()) return;
+
+ // Require that the source be a pointer type.
+ const PointerType *SrcPtr = Op->getType()->getAs<PointerType>();
+ if (!SrcPtr) return;
+ QualType SrcPointee = SrcPtr->getPointeeType();
+
+ // Whitelist casts from cv void*. We already implicitly
+ // whitelisted casts to cv void*, since they have alignment 1.
+ // Also whitelist casts involving incomplete types, which implicitly
+ // includes 'void'.
+ if (SrcPointee->isIncompleteType()) return;
+
+ CharUnits SrcAlign = Context.getTypeAlignInChars(SrcPointee);
+ if (SrcAlign >= DestAlign) return;
+
+ Diag(TRange.getBegin(), diag::warn_cast_align)
+ << Op->getType() << T
+ << static_cast<unsigned>(SrcAlign.getQuantity())
+ << static_cast<unsigned>(DestAlign.getQuantity())
+ << TRange << Op->getSourceRange();
+}
+
+static const Type* getElementType(const Expr *BaseExpr) {
+ const Type* EltType = BaseExpr->getType().getTypePtr();
+ if (EltType->isAnyPointerType())
+ return EltType->getPointeeType().getTypePtr();
+ else if (EltType->isArrayType())
+ return EltType->getBaseElementTypeUnsafe();
+ return EltType;
+}
+
+/// \brief Check whether this array fits the idiom of a size-one tail padded
+/// array member of a struct.
+///
+/// We avoid emitting out-of-bounds access warnings for such arrays as they are
+/// commonly used to emulate flexible arrays in C89 code.
+static bool IsTailPaddedMemberArray(Sema &S, llvm::APInt Size,
+ const NamedDecl *ND) {
+ if (Size != 1 || !ND) return false;
+
+ const FieldDecl *FD = dyn_cast<FieldDecl>(ND);
+ if (!FD) return false;
+
+ // Don't consider sizes resulting from macro expansions or template argument
+ // substitution to form C89 tail-padded arrays.
+
+ TypeSourceInfo *TInfo = FD->getTypeSourceInfo();
+ while (TInfo) {
+ TypeLoc TL = TInfo->getTypeLoc();
+ // Look through typedefs.
+ if (TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>()) {
+ const TypedefNameDecl *TDL = TTL.getTypedefNameDecl();
+ TInfo = TDL->getTypeSourceInfo();
+ continue;
+ }
+ if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) {
+ const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr());
+ if (!SizeExpr || SizeExpr->getExprLoc().isMacroID())
+ return false;
+ }
+ break;
+ }
+
+ const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext());
+ if (!RD) return false;
+ if (RD->isUnion()) return false;
+ if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
+ if (!CRD->isStandardLayout()) return false;
+ }
+
+ // See if this is the last field decl in the record.
+ const Decl *D = FD;
+ while ((D = D->getNextDeclInContext()))
+ if (isa<FieldDecl>(D))
+ return false;
+ return true;
+}
+
+void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
+ const ArraySubscriptExpr *ASE,
+ bool AllowOnePastEnd, bool IndexNegated) {
+ IndexExpr = IndexExpr->IgnoreParenImpCasts();
+ if (IndexExpr->isValueDependent())
+ return;
+
+ const Type *EffectiveType = getElementType(BaseExpr);
+ BaseExpr = BaseExpr->IgnoreParenCasts();
+ const ConstantArrayType *ArrayTy =
+ Context.getAsConstantArrayType(BaseExpr->getType());
+ if (!ArrayTy)
+ return;
+
+ llvm::APSInt index;
+ if (!IndexExpr->EvaluateAsInt(index, Context, Expr::SE_AllowSideEffects))
+ return;
+ if (IndexNegated)
+ index = -index;
+
+ const NamedDecl *ND = nullptr;
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr))
+ ND = dyn_cast<NamedDecl>(DRE->getDecl());
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr))
+ ND = dyn_cast<NamedDecl>(ME->getMemberDecl());
+
+ if (index.isUnsigned() || !index.isNegative()) {
+ llvm::APInt size = ArrayTy->getSize();
+ if (!size.isStrictlyPositive())
+ return;
+
+ const Type* BaseType = getElementType(BaseExpr);
+ if (BaseType != EffectiveType) {
+ // Make sure we're comparing apples to apples when comparing index to size
+ uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType);
+ uint64_t array_typesize = Context.getTypeSize(BaseType);
+ // Handle ptrarith_typesize being zero, such as when casting to void*
+ if (!ptrarith_typesize) ptrarith_typesize = 1;
+ if (ptrarith_typesize != array_typesize) {
+ // There's a cast to a different size type involved
+ uint64_t ratio = array_typesize / ptrarith_typesize;
+ // TODO: Be smarter about handling cases where array_typesize is not a
+ // multiple of ptrarith_typesize
+ if (ptrarith_typesize * ratio == array_typesize)
+ size *= llvm::APInt(size.getBitWidth(), ratio);
+ }
+ }
+
+ if (size.getBitWidth() > index.getBitWidth())
+ index = index.zext(size.getBitWidth());
+ else if (size.getBitWidth() < index.getBitWidth())
+ size = size.zext(index.getBitWidth());
+
+ // For array subscripting the index must be less than size, but for pointer
+ // arithmetic also allow the index (offset) to be equal to size since
+ // computing the next address after the end of the array is legal and
+ // commonly done e.g. in C++ iterators and range-based for loops.
+ if (AllowOnePastEnd ? index.ule(size) : index.ult(size))
+ return;
+
+ // Also don't warn for arrays of size 1 which are members of some
+ // structure. These are often used to approximate flexible arrays in C89
+ // code.
+ if (IsTailPaddedMemberArray(*this, size, ND))
+ return;
+
+ // Suppress the warning if the subscript expression (as identified by the
+ // ']' location) and the index expression are both from macro expansions
+ // within a system header.
+ if (ASE) {
+ SourceLocation RBracketLoc = SourceMgr.getSpellingLoc(
+ ASE->getRBracketLoc());
+ if (SourceMgr.isInSystemHeader(RBracketLoc)) {
+ SourceLocation IndexLoc = SourceMgr.getSpellingLoc(
+ IndexExpr->getLocStart());
+ if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc))
+ return;
+ }
+ }
+
+ unsigned DiagID = diag::warn_ptr_arith_exceeds_bounds;
+ if (ASE)
+ DiagID = diag::warn_array_index_exceeds_bounds;
+
+ DiagRuntimeBehavior(BaseExpr->getLocStart(), BaseExpr,
+ PDiag(DiagID) << index.toString(10, true)
+ << size.toString(10, true)
+ << (unsigned)size.getLimitedValue(~0U)
+ << IndexExpr->getSourceRange());
+ } else {
+ unsigned DiagID = diag::warn_array_index_precedes_bounds;
+ if (!ASE) {
+ DiagID = diag::warn_ptr_arith_precedes_bounds;
+ if (index.isNegative()) index = -index;
+ }
+
+ DiagRuntimeBehavior(BaseExpr->getLocStart(), BaseExpr,
+ PDiag(DiagID) << index.toString(10, true)
+ << IndexExpr->getSourceRange());
+ }
+
+ if (!ND) {
+ // Try harder to find a NamedDecl to point at in the note.
+ while (const ArraySubscriptExpr *ASE =
+ dyn_cast<ArraySubscriptExpr>(BaseExpr))
+ BaseExpr = ASE->getBase()->IgnoreParenCasts();
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr))
+ ND = dyn_cast<NamedDecl>(DRE->getDecl());
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr))
+ ND = dyn_cast<NamedDecl>(ME->getMemberDecl());
+ }
+
+ if (ND)
+ DiagRuntimeBehavior(ND->getLocStart(), BaseExpr,
+ PDiag(diag::note_array_index_out_of_bounds)
+ << ND->getDeclName());
+}
+
+void Sema::CheckArrayAccess(const Expr *expr) {
+ int AllowOnePastEnd = 0;
+ while (expr) {
+ expr = expr->IgnoreParenImpCasts();
+ switch (expr->getStmtClass()) {
+ case Stmt::ArraySubscriptExprClass: {
+ const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr);
+ CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE,
+ AllowOnePastEnd > 0);
+ return;
+ }
+ case Stmt::OMPArraySectionExprClass: {
+ const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr);
+ if (ASE->getLowerBound())
+ CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(),
+ /*ASE=*/nullptr, AllowOnePastEnd > 0);
+ return;
+ }
+ case Stmt::UnaryOperatorClass: {
+ // Only unwrap the * and & unary operators
+ const UnaryOperator *UO = cast<UnaryOperator>(expr);
+ expr = UO->getSubExpr();
+ switch (UO->getOpcode()) {
+ case UO_AddrOf:
+ AllowOnePastEnd++;
+ break;
+ case UO_Deref:
+ AllowOnePastEnd--;
+ break;
+ default:
+ return;
+ }
+ break;
+ }
+ case Stmt::ConditionalOperatorClass: {
+ const ConditionalOperator *cond = cast<ConditionalOperator>(expr);
+ if (const Expr *lhs = cond->getLHS())
+ CheckArrayAccess(lhs);
+ if (const Expr *rhs = cond->getRHS())
+ CheckArrayAccess(rhs);
+ return;
+ }
+ default:
+ return;
+ }
+ }
+}
+
+//===--- CHECK: Objective-C retain cycles ----------------------------------//
+
+namespace {
+ struct RetainCycleOwner {
+ RetainCycleOwner() : Variable(nullptr), Indirect(false) {}
+ VarDecl *Variable;
+ SourceRange Range;
+ SourceLocation Loc;
+ bool Indirect;
+
+ void setLocsFrom(Expr *e) {
+ Loc = e->getExprLoc();
+ Range = e->getSourceRange();
+ }
+ };
+}
+
+/// Consider whether capturing the given variable can possibly lead to
+/// a retain cycle.
+static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) {
+ // In ARC, it's captured strongly iff the variable has __strong
+ // lifetime. In MRR, it's captured strongly if the variable is
+ // __block and has an appropriate type.
+ if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
+ return false;
+
+ owner.Variable = var;
+ if (ref)
+ owner.setLocsFrom(ref);
+ return true;
+}
+
+static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) {
+ while (true) {
+ e = e->IgnoreParens();
+ if (CastExpr *cast = dyn_cast<CastExpr>(e)) {
+ switch (cast->getCastKind()) {
+ case CK_BitCast:
+ case CK_LValueBitCast:
+ case CK_LValueToRValue:
+ case CK_ARCReclaimReturnedObject:
+ e = cast->getSubExpr();
+ continue;
+
+ default:
+ return false;
+ }
+ }
+
+ if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) {
+ ObjCIvarDecl *ivar = ref->getDecl();
+ if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
+ return false;
+
+ // Try to find a retain cycle in the base.
+ if (!findRetainCycleOwner(S, ref->getBase(), owner))
+ return false;
+
+ if (ref->isFreeIvar()) owner.setLocsFrom(ref);
+ owner.Indirect = true;
+ return true;
+ }
+
+ if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) {
+ VarDecl *var = dyn_cast<VarDecl>(ref->getDecl());
+ if (!var) return false;
+ return considerVariable(var, ref, owner);
+ }
+
+ if (MemberExpr *member = dyn_cast<MemberExpr>(e)) {
+ if (member->isArrow()) return false;
+
+ // Don't count this as an indirect ownership.
+ e = member->getBase();
+ continue;
+ }
+
+ if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) {
+ // Only pay attention to pseudo-objects on property references.
+ ObjCPropertyRefExpr *pre
+ = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm()
+ ->IgnoreParens());
+ if (!pre) return false;
+ if (pre->isImplicitProperty()) return false;
+ ObjCPropertyDecl *property = pre->getExplicitProperty();
+ if (!property->isRetaining() &&
+ !(property->getPropertyIvarDecl() &&
+ property->getPropertyIvarDecl()->getType()
+ .getObjCLifetime() == Qualifiers::OCL_Strong))
+ return false;
+
+ owner.Indirect = true;
+ if (pre->isSuperReceiver()) {
+ owner.Variable = S.getCurMethodDecl()->getSelfDecl();
+ if (!owner.Variable)
+ return false;
+ owner.Loc = pre->getLocation();
+ owner.Range = pre->getSourceRange();
+ return true;
+ }
+ e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase())
+ ->getSourceExpr());
+ continue;
+ }
+
+ // Array ivars?
+
+ return false;
+ }
+}
+
+namespace {
+ struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> {
+ FindCaptureVisitor(ASTContext &Context, VarDecl *variable)
+ : EvaluatedExprVisitor<FindCaptureVisitor>(Context),
+ Context(Context), Variable(variable), Capturer(nullptr),
+ VarWillBeReased(false) {}
+ ASTContext &Context;
+ VarDecl *Variable;
+ Expr *Capturer;
+ bool VarWillBeReased;
+
+ void VisitDeclRefExpr(DeclRefExpr *ref) {
+ if (ref->getDecl() == Variable && !Capturer)
+ Capturer = ref;
+ }
+
+ void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) {
+ if (Capturer) return;
+ Visit(ref->getBase());
+ if (Capturer && ref->isFreeIvar())
+ Capturer = ref;
+ }
+
+ void VisitBlockExpr(BlockExpr *block) {
+ // Look inside nested blocks
+ if (block->getBlockDecl()->capturesVariable(Variable))
+ Visit(block->getBlockDecl()->getBody());
+ }
+
+ void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) {
+ if (Capturer) return;
+ if (OVE->getSourceExpr())
+ Visit(OVE->getSourceExpr());
+ }
+ void VisitBinaryOperator(BinaryOperator *BinOp) {
+ if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign)
+ return;
+ Expr *LHS = BinOp->getLHS();
+ if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) {
+ if (DRE->getDecl() != Variable)
+ return;
+ if (Expr *RHS = BinOp->getRHS()) {
+ RHS = RHS->IgnoreParenCasts();
+ llvm::APSInt Value;
+ VarWillBeReased =
+ (RHS && RHS->isIntegerConstantExpr(Value, Context) && Value == 0);
+ }
+ }
+ }
+ };
+}
+
+/// Check whether the given argument is a block which captures a
+/// variable.
+static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) {
+ assert(owner.Variable && owner.Loc.isValid());
+
+ e = e->IgnoreParenCasts();
+
+ // Look through [^{...} copy] and Block_copy(^{...}).
+ if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) {
+ Selector Cmd = ME->getSelector();
+ if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") {
+ e = ME->getInstanceReceiver();
+ if (!e)
+ return nullptr;
+ e = e->IgnoreParenCasts();
+ }
+ } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) {
+ if (CE->getNumArgs() == 1) {
+ FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl());
+ if (Fn) {
+ const IdentifierInfo *FnI = Fn->getIdentifier();
+ if (FnI && FnI->isStr("_Block_copy")) {
+ e = CE->getArg(0)->IgnoreParenCasts();
+ }
+ }
+ }
+ }
+
+ BlockExpr *block = dyn_cast<BlockExpr>(e);
+ if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable))
+ return nullptr;
+
+ FindCaptureVisitor visitor(S.Context, owner.Variable);
+ visitor.Visit(block->getBlockDecl()->getBody());
+ return visitor.VarWillBeReased ? nullptr : visitor.Capturer;
+}
+
+static void diagnoseRetainCycle(Sema &S, Expr *capturer,
+ RetainCycleOwner &owner) {
+ assert(capturer);
+ assert(owner.Variable && owner.Loc.isValid());
+
+ S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle)
+ << owner.Variable << capturer->getSourceRange();
+ S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner)
+ << owner.Indirect << owner.Range;
+}
+
+/// Check for a keyword selector that starts with the word 'add' or
+/// 'set'.
+static bool isSetterLikeSelector(Selector sel) {
+ if (sel.isUnarySelector()) return false;
+
+ StringRef str = sel.getNameForSlot(0);
+ while (!str.empty() && str.front() == '_') str = str.substr(1);
+ if (str.startswith("set"))
+ str = str.substr(3);
+ else if (str.startswith("add")) {
+ // Specially whitelist 'addOperationWithBlock:'.
+ if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock"))
+ return false;
+ str = str.substr(3);
+ }
+ else
+ return false;
+
+ if (str.empty()) return true;
+ return !isLowercase(str.front());
+}
+
+static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S,
+ ObjCMessageExpr *Message) {
+ bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass(
+ Message->getReceiverInterface(),
+ NSAPI::ClassId_NSMutableArray);
+ if (!IsMutableArray) {
+ return None;
+ }
+
+ Selector Sel = Message->getSelector();
+
+ Optional<NSAPI::NSArrayMethodKind> MKOpt =
+ S.NSAPIObj->getNSArrayMethodKind(Sel);
+ if (!MKOpt) {
+ return None;
+ }
+
+ NSAPI::NSArrayMethodKind MK = *MKOpt;
+
+ switch (MK) {
+ case NSAPI::NSMutableArr_addObject:
+ case NSAPI::NSMutableArr_insertObjectAtIndex:
+ case NSAPI::NSMutableArr_setObjectAtIndexedSubscript:
+ return 0;
+ case NSAPI::NSMutableArr_replaceObjectAtIndex:
+ return 1;
+
+ default:
+ return None;
+ }
+
+ return None;
+}
+
+static
+Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S,
+ ObjCMessageExpr *Message) {
+ bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass(
+ Message->getReceiverInterface(),
+ NSAPI::ClassId_NSMutableDictionary);
+ if (!IsMutableDictionary) {
+ return None;
+ }
+
+ Selector Sel = Message->getSelector();
+
+ Optional<NSAPI::NSDictionaryMethodKind> MKOpt =
+ S.NSAPIObj->getNSDictionaryMethodKind(Sel);
+ if (!MKOpt) {
+ return None;
+ }
+
+ NSAPI::NSDictionaryMethodKind MK = *MKOpt;
+
+ switch (MK) {
+ case NSAPI::NSMutableDict_setObjectForKey:
+ case NSAPI::NSMutableDict_setValueForKey:
+ case NSAPI::NSMutableDict_setObjectForKeyedSubscript:
+ return 0;
+
+ default:
+ return None;
+ }
+
+ return None;
+}
+
+static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) {
+ bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass(
+ Message->getReceiverInterface(),
+ NSAPI::ClassId_NSMutableSet);
+
+ bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass(
+ Message->getReceiverInterface(),
+ NSAPI::ClassId_NSMutableOrderedSet);
+ if (!IsMutableSet && !IsMutableOrderedSet) {
+ return None;
+ }
+
+ Selector Sel = Message->getSelector();
+
+ Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel);
+ if (!MKOpt) {
+ return None;
+ }
+
+ NSAPI::NSSetMethodKind MK = *MKOpt;
+
+ switch (MK) {
+ case NSAPI::NSMutableSet_addObject:
+ case NSAPI::NSOrderedSet_setObjectAtIndex:
+ case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript:
+ case NSAPI::NSOrderedSet_insertObjectAtIndex:
+ return 0;
+ case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject:
+ return 1;
+ }
+
+ return None;
+}
+
+void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) {
+ if (!Message->isInstanceMessage()) {
+ return;
+ }
+
+ Optional<int> ArgOpt;
+
+ if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) &&
+ !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) &&
+ !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) {
+ return;
+ }
+
+ int ArgIndex = *ArgOpt;
+
+ Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts();
+ if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) {
+ Arg = OE->getSourceExpr()->IgnoreImpCasts();
+ }
+
+ if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) {
+ if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) {
+ if (ArgRE->isObjCSelfExpr()) {
+ Diag(Message->getSourceRange().getBegin(),
+ diag::warn_objc_circular_container)
+ << ArgRE->getDecl()->getName() << StringRef("super");
+ }
+ }
+ } else {
+ Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts();
+
+ if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) {
+ Receiver = OE->getSourceExpr()->IgnoreImpCasts();
+ }
+
+ if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) {
+ if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) {
+ if (ReceiverRE->getDecl() == ArgRE->getDecl()) {
+ ValueDecl *Decl = ReceiverRE->getDecl();
+ Diag(Message->getSourceRange().getBegin(),
+ diag::warn_objc_circular_container)
+ << Decl->getName() << Decl->getName();
+ if (!ArgRE->isObjCSelfExpr()) {
+ Diag(Decl->getLocation(),
+ diag::note_objc_circular_container_declared_here)
+ << Decl->getName();
+ }
+ }
+ }
+ } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) {
+ if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) {
+ if (IvarRE->getDecl() == IvarArgRE->getDecl()) {
+ ObjCIvarDecl *Decl = IvarRE->getDecl();
+ Diag(Message->getSourceRange().getBegin(),
+ diag::warn_objc_circular_container)
+ << Decl->getName() << Decl->getName();
+ Diag(Decl->getLocation(),
+ diag::note_objc_circular_container_declared_here)
+ << Decl->getName();
+ }
+ }
+ }
+ }
+
+}
+
+/// Check a message send to see if it's likely to cause a retain cycle.
+void Sema::checkRetainCycles(ObjCMessageExpr *msg) {
+ // Only check instance methods whose selector looks like a setter.
+ if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector()))
+ return;
+
+ // Try to find a variable that the receiver is strongly owned by.
+ RetainCycleOwner owner;
+ if (msg->getReceiverKind() == ObjCMessageExpr::Instance) {
+ if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner))
+ return;
+ } else {
+ assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance);
+ owner.Variable = getCurMethodDecl()->getSelfDecl();
+ owner.Loc = msg->getSuperLoc();
+ owner.Range = msg->getSuperLoc();
+ }
+
+ // Check whether the receiver is captured by any of the arguments.
+ for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i)
+ if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner))
+ return diagnoseRetainCycle(*this, capturer, owner);
+}
+
+/// Check a property assign to see if it's likely to cause a retain cycle.
+void Sema::checkRetainCycles(Expr *receiver, Expr *argument) {
+ RetainCycleOwner owner;
+ if (!findRetainCycleOwner(*this, receiver, owner))
+ return;
+
+ if (Expr *capturer = findCapturingExpr(*this, argument, owner))
+ diagnoseRetainCycle(*this, capturer, owner);
+}
+
+void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) {
+ RetainCycleOwner Owner;
+ if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner))
+ return;
+
+ // Because we don't have an expression for the variable, we have to set the
+ // location explicitly here.
+ Owner.Loc = Var->getLocation();
+ Owner.Range = Var->getSourceRange();
+
+ if (Expr *Capturer = findCapturingExpr(*this, Init, Owner))
+ diagnoseRetainCycle(*this, Capturer, Owner);
+}
+
+static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc,
+ Expr *RHS, bool isProperty) {
+ // Check if RHS is an Objective-C object literal, which also can get
+ // immediately zapped in a weak reference. Note that we explicitly
+ // allow ObjCStringLiterals, since those are designed to never really die.
+ RHS = RHS->IgnoreParenImpCasts();
+
+ // This enum needs to match with the 'select' in
+ // warn_objc_arc_literal_assign (off-by-1).
+ Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS);
+ if (Kind == Sema::LK_String || Kind == Sema::LK_None)
+ return false;
+
+ S.Diag(Loc, diag::warn_arc_literal_assign)
+ << (unsigned) Kind
+ << (isProperty ? 0 : 1)
+ << RHS->getSourceRange();
+
+ return true;
+}
+
+static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc,
+ Qualifiers::ObjCLifetime LT,
+ Expr *RHS, bool isProperty) {
+ // Strip off any implicit cast added to get to the one ARC-specific.
+ while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) {
+ if (cast->getCastKind() == CK_ARCConsumeObject) {
+ S.Diag(Loc, diag::warn_arc_retained_assign)
+ << (LT == Qualifiers::OCL_ExplicitNone)
+ << (isProperty ? 0 : 1)
+ << RHS->getSourceRange();
+ return true;
+ }
+ RHS = cast->getSubExpr();
+ }
+
+ if (LT == Qualifiers::OCL_Weak &&
+ checkUnsafeAssignLiteral(S, Loc, RHS, isProperty))
+ return true;
+
+ return false;
+}
+
+bool Sema::checkUnsafeAssigns(SourceLocation Loc,
+ QualType LHS, Expr *RHS) {
+ Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime();
+
+ if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone)
+ return false;
+
+ if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false))
+ return true;
+
+ return false;
+}
+
+void Sema::checkUnsafeExprAssigns(SourceLocation Loc,
+ Expr *LHS, Expr *RHS) {
+ QualType LHSType;
+ // PropertyRef on LHS type need be directly obtained from
+ // its declaration as it has a PseudoType.
+ ObjCPropertyRefExpr *PRE
+ = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens());
+ if (PRE && !PRE->isImplicitProperty()) {
+ const ObjCPropertyDecl *PD = PRE->getExplicitProperty();
+ if (PD)
+ LHSType = PD->getType();
+ }
+
+ if (LHSType.isNull())
+ LHSType = LHS->getType();
+
+ Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime();
+
+ if (LT == Qualifiers::OCL_Weak) {
+ if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc))
+ getCurFunction()->markSafeWeakUse(LHS);
+ }
+
+ if (checkUnsafeAssigns(Loc, LHSType, RHS))
+ return;
+
+ // FIXME. Check for other life times.
+ if (LT != Qualifiers::OCL_None)
+ return;
+
+ if (PRE) {
+ if (PRE->isImplicitProperty())
+ return;
+ const ObjCPropertyDecl *PD = PRE->getExplicitProperty();
+ if (!PD)
+ return;
+
+ unsigned Attributes = PD->getPropertyAttributes();
+ if (Attributes & ObjCPropertyDecl::OBJC_PR_assign) {
+ // when 'assign' attribute was not explicitly specified
+ // by user, ignore it and rely on property type itself
+ // for lifetime info.
+ unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten();
+ if (!(AsWrittenAttr & ObjCPropertyDecl::OBJC_PR_assign) &&
+ LHSType->isObjCRetainableType())
+ return;
+
+ while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) {
+ if (cast->getCastKind() == CK_ARCConsumeObject) {
+ Diag(Loc, diag::warn_arc_retained_property_assign)
+ << RHS->getSourceRange();
+ return;
+ }
+ RHS = cast->getSubExpr();
+ }
+ }
+ else if (Attributes & ObjCPropertyDecl::OBJC_PR_weak) {
+ if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true))
+ return;
+ }
+ }
+}
+
+//===--- CHECK: Empty statement body (-Wempty-body) ---------------------===//
+
+namespace {
+bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr,
+ SourceLocation StmtLoc,
+ const NullStmt *Body) {
+ // Do not warn if the body is a macro that expands to nothing, e.g:
+ //
+ // #define CALL(x)
+ // if (condition)
+ // CALL(0);
+ //
+ if (Body->hasLeadingEmptyMacro())
+ return false;
+
+ // Get line numbers of statement and body.
+ bool StmtLineInvalid;
+ unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc,
+ &StmtLineInvalid);
+ if (StmtLineInvalid)
+ return false;
+
+ bool BodyLineInvalid;
+ unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(),
+ &BodyLineInvalid);
+ if (BodyLineInvalid)
+ return false;
+
+ // Warn if null statement and body are on the same line.
+ if (StmtLine != BodyLine)
+ return false;
+
+ return true;
+}
+} // Unnamed namespace
+
+void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
+ const Stmt *Body,
+ unsigned DiagID) {
+ // Since this is a syntactic check, don't emit diagnostic for template
+ // instantiations, this just adds noise.
+ if (CurrentInstantiationScope)
+ return;
+
+ // The body should be a null statement.
+ const NullStmt *NBody = dyn_cast<NullStmt>(Body);
+ if (!NBody)
+ return;
+
+ // Do the usual checks.
+ if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody))
+ return;
+
+ Diag(NBody->getSemiLoc(), DiagID);
+ Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line);
+}
+
+void Sema::DiagnoseEmptyLoopBody(const Stmt *S,
+ const Stmt *PossibleBody) {
+ assert(!CurrentInstantiationScope); // Ensured by caller
+
+ SourceLocation StmtLoc;
+ const Stmt *Body;
+ unsigned DiagID;
+ if (const ForStmt *FS = dyn_cast<ForStmt>(S)) {
+ StmtLoc = FS->getRParenLoc();
+ Body = FS->getBody();
+ DiagID = diag::warn_empty_for_body;
+ } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) {
+ StmtLoc = WS->getCond()->getSourceRange().getEnd();
+ Body = WS->getBody();
+ DiagID = diag::warn_empty_while_body;
+ } else
+ return; // Neither `for' nor `while'.
+
+ // The body should be a null statement.
+ const NullStmt *NBody = dyn_cast<NullStmt>(Body);
+ if (!NBody)
+ return;
+
+ // Skip expensive checks if diagnostic is disabled.
+ if (Diags.isIgnored(DiagID, NBody->getSemiLoc()))
+ return;
+
+ // Do the usual checks.
+ if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody))
+ return;
+
+ // `for(...);' and `while(...);' are popular idioms, so in order to keep
+ // noise level low, emit diagnostics only if for/while is followed by a
+ // CompoundStmt, e.g.:
+ // for (int i = 0; i < n; i++);
+ // {
+ // a(i);
+ // }
+ // or if for/while is followed by a statement with more indentation
+ // than for/while itself:
+ // for (int i = 0; i < n; i++);
+ // a(i);
+ bool ProbableTypo = isa<CompoundStmt>(PossibleBody);
+ if (!ProbableTypo) {
+ bool BodyColInvalid;
+ unsigned BodyCol = SourceMgr.getPresumedColumnNumber(
+ PossibleBody->getLocStart(),
+ &BodyColInvalid);
+ if (BodyColInvalid)
+ return;
+
+ bool StmtColInvalid;
+ unsigned StmtCol = SourceMgr.getPresumedColumnNumber(
+ S->getLocStart(),
+ &StmtColInvalid);
+ if (StmtColInvalid)
+ return;
+
+ if (BodyCol > StmtCol)
+ ProbableTypo = true;
+ }
+
+ if (ProbableTypo) {
+ Diag(NBody->getSemiLoc(), DiagID);
+ Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line);
+ }
+}
+
+//===--- CHECK: Warn on self move with std::move. -------------------------===//
+
+/// DiagnoseSelfMove - Emits a warning if a value is moved to itself.
+void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
+ SourceLocation OpLoc) {
+
+ if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc))
+ return;
+
+ if (!ActiveTemplateInstantiations.empty())
+ return;
+
+ // Strip parens and casts away.
+ LHSExpr = LHSExpr->IgnoreParenImpCasts();
+ RHSExpr = RHSExpr->IgnoreParenImpCasts();
+
+ // Check for a call expression
+ const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr);
+ if (!CE || CE->getNumArgs() != 1)
+ return;
+
+ // Check for a call to std::move
+ const FunctionDecl *FD = CE->getDirectCallee();
+ if (!FD || !FD->isInStdNamespace() || !FD->getIdentifier() ||
+ !FD->getIdentifier()->isStr("move"))
+ return;
+
+ // Get argument from std::move
+ RHSExpr = CE->getArg(0);
+
+ const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr);
+ const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr);
+
+ // Two DeclRefExpr's, check that the decls are the same.
+ if (LHSDeclRef && RHSDeclRef) {
+ if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl())
+ return;
+ if (LHSDeclRef->getDecl()->getCanonicalDecl() !=
+ RHSDeclRef->getDecl()->getCanonicalDecl())
+ return;
+
+ Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType()
+ << LHSExpr->getSourceRange()
+ << RHSExpr->getSourceRange();
+ return;
+ }
+
+ // Member variables require a different approach to check for self moves.
+ // MemberExpr's are the same if every nested MemberExpr refers to the same
+ // Decl and that the base Expr's are DeclRefExpr's with the same Decl or
+ // the base Expr's are CXXThisExpr's.
+ const Expr *LHSBase = LHSExpr;
+ const Expr *RHSBase = RHSExpr;
+ const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr);
+ const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr);
+ if (!LHSME || !RHSME)
+ return;
+
+ while (LHSME && RHSME) {
+ if (LHSME->getMemberDecl()->getCanonicalDecl() !=
+ RHSME->getMemberDecl()->getCanonicalDecl())
+ return;
+
+ LHSBase = LHSME->getBase();
+ RHSBase = RHSME->getBase();
+ LHSME = dyn_cast<MemberExpr>(LHSBase);
+ RHSME = dyn_cast<MemberExpr>(RHSBase);
+ }
+
+ LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase);
+ RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase);
+ if (LHSDeclRef && RHSDeclRef) {
+ if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl())
+ return;
+ if (LHSDeclRef->getDecl()->getCanonicalDecl() !=
+ RHSDeclRef->getDecl()->getCanonicalDecl())
+ return;
+
+ Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType()
+ << LHSExpr->getSourceRange()
+ << RHSExpr->getSourceRange();
+ return;
+ }
+
+ if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase))
+ Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType()
+ << LHSExpr->getSourceRange()
+ << RHSExpr->getSourceRange();
+}
+
+//===--- Layout compatibility ----------------------------------------------//
+
+namespace {
+
+bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2);
+
+/// \brief Check if two enumeration types are layout-compatible.
+bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) {
+ // C++11 [dcl.enum] p8:
+ // Two enumeration types are layout-compatible if they have the same
+ // underlying type.
+ return ED1->isComplete() && ED2->isComplete() &&
+ C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType());
+}
+
+/// \brief Check if two fields are layout-compatible.
+bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, FieldDecl *Field2) {
+ if (!isLayoutCompatible(C, Field1->getType(), Field2->getType()))
+ return false;
+
+ if (Field1->isBitField() != Field2->isBitField())
+ return false;
+
+ if (Field1->isBitField()) {
+ // Make sure that the bit-fields are the same length.
+ unsigned Bits1 = Field1->getBitWidthValue(C);
+ unsigned Bits2 = Field2->getBitWidthValue(C);
+
+ if (Bits1 != Bits2)
+ return false;
+ }
+
+ return true;
+}
+
+/// \brief Check if two standard-layout structs are layout-compatible.
+/// (C++11 [class.mem] p17)
+bool isLayoutCompatibleStruct(ASTContext &C,
+ RecordDecl *RD1,
+ RecordDecl *RD2) {
+ // If both records are C++ classes, check that base classes match.
+ if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) {
+ // If one of records is a CXXRecordDecl we are in C++ mode,
+ // thus the other one is a CXXRecordDecl, too.
+ const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2);
+ // Check number of base classes.
+ if (D1CXX->getNumBases() != D2CXX->getNumBases())
+ return false;
+
+ // Check the base classes.
+ for (CXXRecordDecl::base_class_const_iterator
+ Base1 = D1CXX->bases_begin(),
+ BaseEnd1 = D1CXX->bases_end(),
+ Base2 = D2CXX->bases_begin();
+ Base1 != BaseEnd1;
+ ++Base1, ++Base2) {
+ if (!isLayoutCompatible(C, Base1->getType(), Base2->getType()))
+ return false;
+ }
+ } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) {
+ // If only RD2 is a C++ class, it should have zero base classes.
+ if (D2CXX->getNumBases() > 0)
+ return false;
+ }
+
+ // Check the fields.
+ RecordDecl::field_iterator Field2 = RD2->field_begin(),
+ Field2End = RD2->field_end(),
+ Field1 = RD1->field_begin(),
+ Field1End = RD1->field_end();
+ for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) {
+ if (!isLayoutCompatible(C, *Field1, *Field2))
+ return false;
+ }
+ if (Field1 != Field1End || Field2 != Field2End)
+ return false;
+
+ return true;
+}
+
+/// \brief Check if two standard-layout unions are layout-compatible.
+/// (C++11 [class.mem] p18)
+bool isLayoutCompatibleUnion(ASTContext &C,
+ RecordDecl *RD1,
+ RecordDecl *RD2) {
+ llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields;
+ for (auto *Field2 : RD2->fields())
+ UnmatchedFields.insert(Field2);
+
+ for (auto *Field1 : RD1->fields()) {
+ llvm::SmallPtrSet<FieldDecl *, 8>::iterator
+ I = UnmatchedFields.begin(),
+ E = UnmatchedFields.end();
+
+ for ( ; I != E; ++I) {
+ if (isLayoutCompatible(C, Field1, *I)) {
+ bool Result = UnmatchedFields.erase(*I);
+ (void) Result;
+ assert(Result);
+ break;
+ }
+ }
+ if (I == E)
+ return false;
+ }
+
+ return UnmatchedFields.empty();
+}
+
+bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, RecordDecl *RD2) {
+ if (RD1->isUnion() != RD2->isUnion())
+ return false;
+
+ if (RD1->isUnion())
+ return isLayoutCompatibleUnion(C, RD1, RD2);
+ else
+ return isLayoutCompatibleStruct(C, RD1, RD2);
+}
+
+/// \brief Check if two types are layout-compatible in C++11 sense.
+bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) {
+ if (T1.isNull() || T2.isNull())
+ return false;
+
+ // C++11 [basic.types] p11:
+ // If two types T1 and T2 are the same type, then T1 and T2 are
+ // layout-compatible types.
+ if (C.hasSameType(T1, T2))
+ return true;
+
+ T1 = T1.getCanonicalType().getUnqualifiedType();
+ T2 = T2.getCanonicalType().getUnqualifiedType();
+
+ const Type::TypeClass TC1 = T1->getTypeClass();
+ const Type::TypeClass TC2 = T2->getTypeClass();
+
+ if (TC1 != TC2)
+ return false;
+
+ if (TC1 == Type::Enum) {
+ return isLayoutCompatible(C,
+ cast<EnumType>(T1)->getDecl(),
+ cast<EnumType>(T2)->getDecl());
+ } else if (TC1 == Type::Record) {
+ if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType())
+ return false;
+
+ return isLayoutCompatible(C,
+ cast<RecordType>(T1)->getDecl(),
+ cast<RecordType>(T2)->getDecl());
+ }
+
+ return false;
+}
+}
+
+//===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----//
+
+namespace {
+/// \brief Given a type tag expression find the type tag itself.
+///
+/// \param TypeExpr Type tag expression, as it appears in user's code.
+///
+/// \param VD Declaration of an identifier that appears in a type tag.
+///
+/// \param MagicValue Type tag magic value.
+bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx,
+ const ValueDecl **VD, uint64_t *MagicValue) {
+ while(true) {
+ if (!TypeExpr)
+ return false;
+
+ TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts();
+
+ switch (TypeExpr->getStmtClass()) {
+ case Stmt::UnaryOperatorClass: {
+ const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr);
+ if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) {
+ TypeExpr = UO->getSubExpr();
+ continue;
+ }
+ return false;
+ }
+
+ case Stmt::DeclRefExprClass: {
+ const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr);
+ *VD = DRE->getDecl();
+ return true;
+ }
+
+ case Stmt::IntegerLiteralClass: {
+ const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr);
+ llvm::APInt MagicValueAPInt = IL->getValue();
+ if (MagicValueAPInt.getActiveBits() <= 64) {
+ *MagicValue = MagicValueAPInt.getZExtValue();
+ return true;
+ } else
+ return false;
+ }
+
+ case Stmt::BinaryConditionalOperatorClass:
+ case Stmt::ConditionalOperatorClass: {
+ const AbstractConditionalOperator *ACO =
+ cast<AbstractConditionalOperator>(TypeExpr);
+ bool Result;
+ if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx)) {
+ if (Result)
+ TypeExpr = ACO->getTrueExpr();
+ else
+ TypeExpr = ACO->getFalseExpr();
+ continue;
+ }
+ return false;
+ }
+
+ case Stmt::BinaryOperatorClass: {
+ const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr);
+ if (BO->getOpcode() == BO_Comma) {
+ TypeExpr = BO->getRHS();
+ continue;
+ }
+ return false;
+ }
+
+ default:
+ return false;
+ }
+ }
+}
+
+/// \brief Retrieve the C type corresponding to type tag TypeExpr.
+///
+/// \param TypeExpr Expression that specifies a type tag.
+///
+/// \param MagicValues Registered magic values.
+///
+/// \param FoundWrongKind Set to true if a type tag was found, but of a wrong
+/// kind.
+///
+/// \param TypeInfo Information about the corresponding C type.
+///
+/// \returns true if the corresponding C type was found.
+bool GetMatchingCType(
+ const IdentifierInfo *ArgumentKind,
+ const Expr *TypeExpr, const ASTContext &Ctx,
+ const llvm::DenseMap<Sema::TypeTagMagicValue,
+ Sema::TypeTagData> *MagicValues,
+ bool &FoundWrongKind,
+ Sema::TypeTagData &TypeInfo) {
+ FoundWrongKind = false;
+
+ // Variable declaration that has type_tag_for_datatype attribute.
+ const ValueDecl *VD = nullptr;
+
+ uint64_t MagicValue;
+
+ if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue))
+ return false;
+
+ if (VD) {
+ if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) {
+ if (I->getArgumentKind() != ArgumentKind) {
+ FoundWrongKind = true;
+ return false;
+ }
+ TypeInfo.Type = I->getMatchingCType();
+ TypeInfo.LayoutCompatible = I->getLayoutCompatible();
+ TypeInfo.MustBeNull = I->getMustBeNull();
+ return true;
+ }
+ return false;
+ }
+
+ if (!MagicValues)
+ return false;
+
+ llvm::DenseMap<Sema::TypeTagMagicValue,
+ Sema::TypeTagData>::const_iterator I =
+ MagicValues->find(std::make_pair(ArgumentKind, MagicValue));
+ if (I == MagicValues->end())
+ return false;
+
+ TypeInfo = I->second;
+ return true;
+}
+} // unnamed namespace
+
+void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
+ uint64_t MagicValue, QualType Type,
+ bool LayoutCompatible,
+ bool MustBeNull) {
+ if (!TypeTagForDatatypeMagicValues)
+ TypeTagForDatatypeMagicValues.reset(
+ new llvm::DenseMap<TypeTagMagicValue, TypeTagData>);
+
+ TypeTagMagicValue Magic(ArgumentKind, MagicValue);
+ (*TypeTagForDatatypeMagicValues)[Magic] =
+ TypeTagData(Type, LayoutCompatible, MustBeNull);
+}
+
+namespace {
+bool IsSameCharType(QualType T1, QualType T2) {
+ const BuiltinType *BT1 = T1->getAs<BuiltinType>();
+ if (!BT1)
+ return false;
+
+ const BuiltinType *BT2 = T2->getAs<BuiltinType>();
+ if (!BT2)
+ return false;
+
+ BuiltinType::Kind T1Kind = BT1->getKind();
+ BuiltinType::Kind T2Kind = BT2->getKind();
+
+ return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) ||
+ (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) ||
+ (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) ||
+ (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar);
+}
+} // unnamed namespace
+
+void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
+ const Expr * const *ExprArgs) {
+ const IdentifierInfo *ArgumentKind = Attr->getArgumentKind();
+ bool IsPointerAttr = Attr->getIsPointer();
+
+ const Expr *TypeTagExpr = ExprArgs[Attr->getTypeTagIdx()];
+ bool FoundWrongKind;
+ TypeTagData TypeInfo;
+ if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context,
+ TypeTagForDatatypeMagicValues.get(),
+ FoundWrongKind, TypeInfo)) {
+ if (FoundWrongKind)
+ Diag(TypeTagExpr->getExprLoc(),
+ diag::warn_type_tag_for_datatype_wrong_kind)
+ << TypeTagExpr->getSourceRange();
+ return;
+ }
+
+ const Expr *ArgumentExpr = ExprArgs[Attr->getArgumentIdx()];
+ if (IsPointerAttr) {
+ // Skip implicit cast of pointer to `void *' (as a function argument).
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr))
+ if (ICE->getType()->isVoidPointerType() &&
+ ICE->getCastKind() == CK_BitCast)
+ ArgumentExpr = ICE->getSubExpr();
+ }
+ QualType ArgumentType = ArgumentExpr->getType();
+
+ // Passing a `void*' pointer shouldn't trigger a warning.
+ if (IsPointerAttr && ArgumentType->isVoidPointerType())
+ return;
+
+ if (TypeInfo.MustBeNull) {
+ // Type tag with matching void type requires a null pointer.
+ if (!ArgumentExpr->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNotNull)) {
+ Diag(ArgumentExpr->getExprLoc(),
+ diag::warn_type_safety_null_pointer_required)
+ << ArgumentKind->getName()
+ << ArgumentExpr->getSourceRange()
+ << TypeTagExpr->getSourceRange();
+ }
+ return;
+ }
+
+ QualType RequiredType = TypeInfo.Type;
+ if (IsPointerAttr)
+ RequiredType = Context.getPointerType(RequiredType);
+
+ bool mismatch = false;
+ if (!TypeInfo.LayoutCompatible) {
+ mismatch = !Context.hasSameType(ArgumentType, RequiredType);
+
+ // C++11 [basic.fundamental] p1:
+ // Plain char, signed char, and unsigned char are three distinct types.
+ //
+ // But we treat plain `char' as equivalent to `signed char' or `unsigned
+ // char' depending on the current char signedness mode.
+ if (mismatch)
+ if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(),
+ RequiredType->getPointeeType())) ||
+ (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType)))
+ mismatch = false;
+ } else
+ if (IsPointerAttr)
+ mismatch = !isLayoutCompatible(Context,
+ ArgumentType->getPointeeType(),
+ RequiredType->getPointeeType());
+ else
+ mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType);
+
+ if (mismatch)
+ Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch)
+ << ArgumentType << ArgumentKind
+ << TypeInfo.LayoutCompatible << RequiredType
+ << ArgumentExpr->getSourceRange()
+ << TypeTagExpr->getSourceRange();
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp
new file mode 100644
index 0000000..21cf625
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp
@@ -0,0 +1,7518 @@
+//===---------------- SemaCodeComplete.cpp - Code Completion ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the code-completion semantic actions.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Sema/SemaInternal.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/CodeCompleteConsumer.h"
+#include "clang/Sema/ExternalSemaSource.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Overload.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallBitVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Twine.h"
+#include <list>
+#include <map>
+#include <vector>
+
+using namespace clang;
+using namespace sema;
+
+namespace {
+ /// \brief A container of code-completion results.
+ class ResultBuilder {
+ public:
+ /// \brief The type of a name-lookup filter, which can be provided to the
+ /// name-lookup routines to specify which declarations should be included in
+ /// the result set (when it returns true) and which declarations should be
+ /// filtered out (returns false).
+ typedef bool (ResultBuilder::*LookupFilter)(const NamedDecl *) const;
+
+ typedef CodeCompletionResult Result;
+
+ private:
+ /// \brief The actual results we have found.
+ std::vector<Result> Results;
+
+ /// \brief A record of all of the declarations we have found and placed
+ /// into the result set, used to ensure that no declaration ever gets into
+ /// the result set twice.
+ llvm::SmallPtrSet<const Decl*, 16> AllDeclsFound;
+
+ typedef std::pair<const NamedDecl *, unsigned> DeclIndexPair;
+
+ /// \brief An entry in the shadow map, which is optimized to store
+ /// a single (declaration, index) mapping (the common case) but
+ /// can also store a list of (declaration, index) mappings.
+ class ShadowMapEntry {
+ typedef SmallVector<DeclIndexPair, 4> DeclIndexPairVector;
+
+ /// \brief Contains either the solitary NamedDecl * or a vector
+ /// of (declaration, index) pairs.
+ llvm::PointerUnion<const NamedDecl *, DeclIndexPairVector*> DeclOrVector;
+
+ /// \brief When the entry contains a single declaration, this is
+ /// the index associated with that entry.
+ unsigned SingleDeclIndex;
+
+ public:
+ ShadowMapEntry() : DeclOrVector(), SingleDeclIndex(0) { }
+
+ void Add(const NamedDecl *ND, unsigned Index) {
+ if (DeclOrVector.isNull()) {
+ // 0 - > 1 elements: just set the single element information.
+ DeclOrVector = ND;
+ SingleDeclIndex = Index;
+ return;
+ }
+
+ if (const NamedDecl *PrevND =
+ DeclOrVector.dyn_cast<const NamedDecl *>()) {
+ // 1 -> 2 elements: create the vector of results and push in the
+ // existing declaration.
+ DeclIndexPairVector *Vec = new DeclIndexPairVector;
+ Vec->push_back(DeclIndexPair(PrevND, SingleDeclIndex));
+ DeclOrVector = Vec;
+ }
+
+ // Add the new element to the end of the vector.
+ DeclOrVector.get<DeclIndexPairVector*>()->push_back(
+ DeclIndexPair(ND, Index));
+ }
+
+ void Destroy() {
+ if (DeclIndexPairVector *Vec
+ = DeclOrVector.dyn_cast<DeclIndexPairVector *>()) {
+ delete Vec;
+ DeclOrVector = ((NamedDecl *)nullptr);
+ }
+ }
+
+ // Iteration.
+ class iterator;
+ iterator begin() const;
+ iterator end() const;
+ };
+
+ /// \brief A mapping from declaration names to the declarations that have
+ /// this name within a particular scope and their index within the list of
+ /// results.
+ typedef llvm::DenseMap<DeclarationName, ShadowMapEntry> ShadowMap;
+
+ /// \brief The semantic analysis object for which results are being
+ /// produced.
+ Sema &SemaRef;
+
+ /// \brief The allocator used to allocate new code-completion strings.
+ CodeCompletionAllocator &Allocator;
+
+ CodeCompletionTUInfo &CCTUInfo;
+
+ /// \brief If non-NULL, a filter function used to remove any code-completion
+ /// results that are not desirable.
+ LookupFilter Filter;
+
+ /// \brief Whether we should allow declarations as
+ /// nested-name-specifiers that would otherwise be filtered out.
+ bool AllowNestedNameSpecifiers;
+
+ /// \brief If set, the type that we would prefer our resulting value
+ /// declarations to have.
+ ///
+ /// Closely matching the preferred type gives a boost to a result's
+ /// priority.
+ CanQualType PreferredType;
+
+ /// \brief A list of shadow maps, which is used to model name hiding at
+ /// different levels of, e.g., the inheritance hierarchy.
+ std::list<ShadowMap> ShadowMaps;
+
+ /// \brief If we're potentially referring to a C++ member function, the set
+ /// of qualifiers applied to the object type.
+ Qualifiers ObjectTypeQualifiers;
+
+ /// \brief Whether the \p ObjectTypeQualifiers field is active.
+ bool HasObjectTypeQualifiers;
+
+ /// \brief The selector that we prefer.
+ Selector PreferredSelector;
+
+ /// \brief The completion context in which we are gathering results.
+ CodeCompletionContext CompletionContext;
+
+ /// \brief If we are in an instance method definition, the \@implementation
+ /// object.
+ ObjCImplementationDecl *ObjCImplementation;
+
+ void AdjustResultPriorityForDecl(Result &R);
+
+ void MaybeAddConstructorResults(Result R);
+
+ public:
+ explicit ResultBuilder(Sema &SemaRef, CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo,
+ const CodeCompletionContext &CompletionContext,
+ LookupFilter Filter = nullptr)
+ : SemaRef(SemaRef), Allocator(Allocator), CCTUInfo(CCTUInfo),
+ Filter(Filter),
+ AllowNestedNameSpecifiers(false), HasObjectTypeQualifiers(false),
+ CompletionContext(CompletionContext),
+ ObjCImplementation(nullptr)
+ {
+ // If this is an Objective-C instance method definition, dig out the
+ // corresponding implementation.
+ switch (CompletionContext.getKind()) {
+ case CodeCompletionContext::CCC_Expression:
+ case CodeCompletionContext::CCC_ObjCMessageReceiver:
+ case CodeCompletionContext::CCC_ParenthesizedExpression:
+ case CodeCompletionContext::CCC_Statement:
+ case CodeCompletionContext::CCC_Recovery:
+ if (ObjCMethodDecl *Method = SemaRef.getCurMethodDecl())
+ if (Method->isInstanceMethod())
+ if (ObjCInterfaceDecl *Interface = Method->getClassInterface())
+ ObjCImplementation = Interface->getImplementation();
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ /// \brief Determine the priority for a reference to the given declaration.
+ unsigned getBasePriority(const NamedDecl *D);
+
+ /// \brief Whether we should include code patterns in the completion
+ /// results.
+ bool includeCodePatterns() const {
+ return SemaRef.CodeCompleter &&
+ SemaRef.CodeCompleter->includeCodePatterns();
+ }
+
+ /// \brief Set the filter used for code-completion results.
+ void setFilter(LookupFilter Filter) {
+ this->Filter = Filter;
+ }
+
+ Result *data() { return Results.empty()? nullptr : &Results.front(); }
+ unsigned size() const { return Results.size(); }
+ bool empty() const { return Results.empty(); }
+
+ /// \brief Specify the preferred type.
+ void setPreferredType(QualType T) {
+ PreferredType = SemaRef.Context.getCanonicalType(T);
+ }
+
+ /// \brief Set the cv-qualifiers on the object type, for us in filtering
+ /// calls to member functions.
+ ///
+ /// When there are qualifiers in this set, they will be used to filter
+ /// out member functions that aren't available (because there will be a
+ /// cv-qualifier mismatch) or prefer functions with an exact qualifier
+ /// match.
+ void setObjectTypeQualifiers(Qualifiers Quals) {
+ ObjectTypeQualifiers = Quals;
+ HasObjectTypeQualifiers = true;
+ }
+
+ /// \brief Set the preferred selector.
+ ///
+ /// When an Objective-C method declaration result is added, and that
+ /// method's selector matches this preferred selector, we give that method
+ /// a slight priority boost.
+ void setPreferredSelector(Selector Sel) {
+ PreferredSelector = Sel;
+ }
+
+ /// \brief Retrieve the code-completion context for which results are
+ /// being collected.
+ const CodeCompletionContext &getCompletionContext() const {
+ return CompletionContext;
+ }
+
+ /// \brief Specify whether nested-name-specifiers are allowed.
+ void allowNestedNameSpecifiers(bool Allow = true) {
+ AllowNestedNameSpecifiers = Allow;
+ }
+
+ /// \brief Return the semantic analysis object for which we are collecting
+ /// code completion results.
+ Sema &getSema() const { return SemaRef; }
+
+ /// \brief Retrieve the allocator used to allocate code completion strings.
+ CodeCompletionAllocator &getAllocator() const { return Allocator; }
+
+ CodeCompletionTUInfo &getCodeCompletionTUInfo() const { return CCTUInfo; }
+
+ /// \brief Determine whether the given declaration is at all interesting
+ /// as a code-completion result.
+ ///
+ /// \param ND the declaration that we are inspecting.
+ ///
+ /// \param AsNestedNameSpecifier will be set true if this declaration is
+ /// only interesting when it is a nested-name-specifier.
+ bool isInterestingDecl(const NamedDecl *ND,
+ bool &AsNestedNameSpecifier) const;
+
+ /// \brief Check whether the result is hidden by the Hiding declaration.
+ ///
+ /// \returns true if the result is hidden and cannot be found, false if
+ /// the hidden result could still be found. When false, \p R may be
+ /// modified to describe how the result can be found (e.g., via extra
+ /// qualification).
+ bool CheckHiddenResult(Result &R, DeclContext *CurContext,
+ const NamedDecl *Hiding);
+
+ /// \brief Add a new result to this result set (if it isn't already in one
+ /// of the shadow maps), or replace an existing result (for, e.g., a
+ /// redeclaration).
+ ///
+ /// \param R the result to add (if it is unique).
+ ///
+ /// \param CurContext the context in which this result will be named.
+ void MaybeAddResult(Result R, DeclContext *CurContext = nullptr);
+
+ /// \brief Add a new result to this result set, where we already know
+ /// the hiding declaration (if any).
+ ///
+ /// \param R the result to add (if it is unique).
+ ///
+ /// \param CurContext the context in which this result will be named.
+ ///
+ /// \param Hiding the declaration that hides the result.
+ ///
+ /// \param InBaseClass whether the result was found in a base
+ /// class of the searched context.
+ void AddResult(Result R, DeclContext *CurContext, NamedDecl *Hiding,
+ bool InBaseClass);
+
+ /// \brief Add a new non-declaration result to this result set.
+ void AddResult(Result R);
+
+ /// \brief Enter into a new scope.
+ void EnterNewScope();
+
+ /// \brief Exit from the current scope.
+ void ExitScope();
+
+ /// \brief Ignore this declaration, if it is seen again.
+ void Ignore(const Decl *D) { AllDeclsFound.insert(D->getCanonicalDecl()); }
+
+ /// \name Name lookup predicates
+ ///
+ /// These predicates can be passed to the name lookup functions to filter the
+ /// results of name lookup. All of the predicates have the same type, so that
+ ///
+ //@{
+ bool IsOrdinaryName(const NamedDecl *ND) const;
+ bool IsOrdinaryNonTypeName(const NamedDecl *ND) const;
+ bool IsIntegralConstantValue(const NamedDecl *ND) const;
+ bool IsOrdinaryNonValueName(const NamedDecl *ND) const;
+ bool IsNestedNameSpecifier(const NamedDecl *ND) const;
+ bool IsEnum(const NamedDecl *ND) const;
+ bool IsClassOrStruct(const NamedDecl *ND) const;
+ bool IsUnion(const NamedDecl *ND) const;
+ bool IsNamespace(const NamedDecl *ND) const;
+ bool IsNamespaceOrAlias(const NamedDecl *ND) const;
+ bool IsType(const NamedDecl *ND) const;
+ bool IsMember(const NamedDecl *ND) const;
+ bool IsObjCIvar(const NamedDecl *ND) const;
+ bool IsObjCMessageReceiver(const NamedDecl *ND) const;
+ bool IsObjCMessageReceiverOrLambdaCapture(const NamedDecl *ND) const;
+ bool IsObjCCollection(const NamedDecl *ND) const;
+ bool IsImpossibleToSatisfy(const NamedDecl *ND) const;
+ //@}
+ };
+}
+
+class ResultBuilder::ShadowMapEntry::iterator {
+ llvm::PointerUnion<const NamedDecl *, const DeclIndexPair *> DeclOrIterator;
+ unsigned SingleDeclIndex;
+
+public:
+ typedef DeclIndexPair value_type;
+ typedef value_type reference;
+ typedef std::ptrdiff_t difference_type;
+ typedef std::input_iterator_tag iterator_category;
+
+ class pointer {
+ DeclIndexPair Value;
+
+ public:
+ pointer(const DeclIndexPair &Value) : Value(Value) { }
+
+ const DeclIndexPair *operator->() const {
+ return &Value;
+ }
+ };
+
+ iterator() : DeclOrIterator((NamedDecl *)nullptr), SingleDeclIndex(0) {}
+
+ iterator(const NamedDecl *SingleDecl, unsigned Index)
+ : DeclOrIterator(SingleDecl), SingleDeclIndex(Index) { }
+
+ iterator(const DeclIndexPair *Iterator)
+ : DeclOrIterator(Iterator), SingleDeclIndex(0) { }
+
+ iterator &operator++() {
+ if (DeclOrIterator.is<const NamedDecl *>()) {
+ DeclOrIterator = (NamedDecl *)nullptr;
+ SingleDeclIndex = 0;
+ return *this;
+ }
+
+ const DeclIndexPair *I = DeclOrIterator.get<const DeclIndexPair*>();
+ ++I;
+ DeclOrIterator = I;
+ return *this;
+ }
+
+ /*iterator operator++(int) {
+ iterator tmp(*this);
+ ++(*this);
+ return tmp;
+ }*/
+
+ reference operator*() const {
+ if (const NamedDecl *ND = DeclOrIterator.dyn_cast<const NamedDecl *>())
+ return reference(ND, SingleDeclIndex);
+
+ return *DeclOrIterator.get<const DeclIndexPair*>();
+ }
+
+ pointer operator->() const {
+ return pointer(**this);
+ }
+
+ friend bool operator==(const iterator &X, const iterator &Y) {
+ return X.DeclOrIterator.getOpaqueValue()
+ == Y.DeclOrIterator.getOpaqueValue() &&
+ X.SingleDeclIndex == Y.SingleDeclIndex;
+ }
+
+ friend bool operator!=(const iterator &X, const iterator &Y) {
+ return !(X == Y);
+ }
+};
+
+ResultBuilder::ShadowMapEntry::iterator
+ResultBuilder::ShadowMapEntry::begin() const {
+ if (DeclOrVector.isNull())
+ return iterator();
+
+ if (const NamedDecl *ND = DeclOrVector.dyn_cast<const NamedDecl *>())
+ return iterator(ND, SingleDeclIndex);
+
+ return iterator(DeclOrVector.get<DeclIndexPairVector *>()->begin());
+}
+
+ResultBuilder::ShadowMapEntry::iterator
+ResultBuilder::ShadowMapEntry::end() const {
+ if (DeclOrVector.is<const NamedDecl *>() || DeclOrVector.isNull())
+ return iterator();
+
+ return iterator(DeclOrVector.get<DeclIndexPairVector *>()->end());
+}
+
+/// \brief Compute the qualification required to get from the current context
+/// (\p CurContext) to the target context (\p TargetContext).
+///
+/// \param Context the AST context in which the qualification will be used.
+///
+/// \param CurContext the context where an entity is being named, which is
+/// typically based on the current scope.
+///
+/// \param TargetContext the context in which the named entity actually
+/// resides.
+///
+/// \returns a nested name specifier that refers into the target context, or
+/// NULL if no qualification is needed.
+static NestedNameSpecifier *
+getRequiredQualification(ASTContext &Context,
+ const DeclContext *CurContext,
+ const DeclContext *TargetContext) {
+ SmallVector<const DeclContext *, 4> TargetParents;
+
+ for (const DeclContext *CommonAncestor = TargetContext;
+ CommonAncestor && !CommonAncestor->Encloses(CurContext);
+ CommonAncestor = CommonAncestor->getLookupParent()) {
+ if (CommonAncestor->isTransparentContext() ||
+ CommonAncestor->isFunctionOrMethod())
+ continue;
+
+ TargetParents.push_back(CommonAncestor);
+ }
+
+ NestedNameSpecifier *Result = nullptr;
+ while (!TargetParents.empty()) {
+ const DeclContext *Parent = TargetParents.pop_back_val();
+
+ if (const NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(Parent)) {
+ if (!Namespace->getIdentifier())
+ continue;
+
+ Result = NestedNameSpecifier::Create(Context, Result, Namespace);
+ }
+ else if (const TagDecl *TD = dyn_cast<TagDecl>(Parent))
+ Result = NestedNameSpecifier::Create(Context, Result,
+ false,
+ Context.getTypeDeclType(TD).getTypePtr());
+ }
+ return Result;
+}
+
+/// Determine whether \p Id is a name reserved for the implementation (C99
+/// 7.1.3, C++ [lib.global.names]).
+static bool isReservedName(const IdentifierInfo *Id) {
+ if (Id->getLength() < 2)
+ return false;
+ const char *Name = Id->getNameStart();
+ return Name[0] == '_' &&
+ (Name[1] == '_' || (Name[1] >= 'A' && Name[1] <= 'Z'));
+}
+
+bool ResultBuilder::isInterestingDecl(const NamedDecl *ND,
+ bool &AsNestedNameSpecifier) const {
+ AsNestedNameSpecifier = false;
+
+ auto *Named = ND;
+ ND = ND->getUnderlyingDecl();
+
+ // Skip unnamed entities.
+ if (!ND->getDeclName())
+ return false;
+
+ // Friend declarations and declarations introduced due to friends are never
+ // added as results.
+ if (ND->getFriendObjectKind() == Decl::FOK_Undeclared)
+ return false;
+
+ // Class template (partial) specializations are never added as results.
+ if (isa<ClassTemplateSpecializationDecl>(ND) ||
+ isa<ClassTemplatePartialSpecializationDecl>(ND))
+ return false;
+
+ // Using declarations themselves are never added as results.
+ if (isa<UsingDecl>(ND))
+ return false;
+
+ // Some declarations have reserved names that we don't want to ever show.
+ // Filter out names reserved for the implementation if they come from a
+ // system header.
+ // TODO: Add a predicate for this.
+ if (const IdentifierInfo *Id = ND->getIdentifier())
+ if (isReservedName(Id) &&
+ (ND->getLocation().isInvalid() ||
+ SemaRef.SourceMgr.isInSystemHeader(
+ SemaRef.SourceMgr.getSpellingLoc(ND->getLocation()))))
+ return false;
+
+ if (Filter == &ResultBuilder::IsNestedNameSpecifier ||
+ (isa<NamespaceDecl>(ND) &&
+ Filter != &ResultBuilder::IsNamespace &&
+ Filter != &ResultBuilder::IsNamespaceOrAlias &&
+ Filter != nullptr))
+ AsNestedNameSpecifier = true;
+
+ // Filter out any unwanted results.
+ if (Filter && !(this->*Filter)(Named)) {
+ // Check whether it is interesting as a nested-name-specifier.
+ if (AllowNestedNameSpecifiers && SemaRef.getLangOpts().CPlusPlus &&
+ IsNestedNameSpecifier(ND) &&
+ (Filter != &ResultBuilder::IsMember ||
+ (isa<CXXRecordDecl>(ND) &&
+ cast<CXXRecordDecl>(ND)->isInjectedClassName()))) {
+ AsNestedNameSpecifier = true;
+ return true;
+ }
+
+ return false;
+ }
+ // ... then it must be interesting!
+ return true;
+}
+
+bool ResultBuilder::CheckHiddenResult(Result &R, DeclContext *CurContext,
+ const NamedDecl *Hiding) {
+ // In C, there is no way to refer to a hidden name.
+ // FIXME: This isn't true; we can find a tag name hidden by an ordinary
+ // name if we introduce the tag type.
+ if (!SemaRef.getLangOpts().CPlusPlus)
+ return true;
+
+ const DeclContext *HiddenCtx =
+ R.Declaration->getDeclContext()->getRedeclContext();
+
+ // There is no way to qualify a name declared in a function or method.
+ if (HiddenCtx->isFunctionOrMethod())
+ return true;
+
+ if (HiddenCtx == Hiding->getDeclContext()->getRedeclContext())
+ return true;
+
+ // We can refer to the result with the appropriate qualification. Do it.
+ R.Hidden = true;
+ R.QualifierIsInformative = false;
+
+ if (!R.Qualifier)
+ R.Qualifier = getRequiredQualification(SemaRef.Context,
+ CurContext,
+ R.Declaration->getDeclContext());
+ return false;
+}
+
+/// \brief A simplified classification of types used to determine whether two
+/// types are "similar enough" when adjusting priorities.
+SimplifiedTypeClass clang::getSimplifiedTypeClass(CanQualType T) {
+ switch (T->getTypeClass()) {
+ case Type::Builtin:
+ switch (cast<BuiltinType>(T)->getKind()) {
+ case BuiltinType::Void:
+ return STC_Void;
+
+ case BuiltinType::NullPtr:
+ return STC_Pointer;
+
+ case BuiltinType::Overload:
+ case BuiltinType::Dependent:
+ return STC_Other;
+
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCSel:
+ return STC_ObjectiveC;
+
+ default:
+ return STC_Arithmetic;
+ }
+
+ case Type::Complex:
+ return STC_Arithmetic;
+
+ case Type::Pointer:
+ return STC_Pointer;
+
+ case Type::BlockPointer:
+ return STC_Block;
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ return getSimplifiedTypeClass(T->getAs<ReferenceType>()->getPointeeType());
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ case Type::DependentSizedArray:
+ return STC_Array;
+
+ case Type::DependentSizedExtVector:
+ case Type::Vector:
+ case Type::ExtVector:
+ return STC_Arithmetic;
+
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ return STC_Function;
+
+ case Type::Record:
+ return STC_Record;
+
+ case Type::Enum:
+ return STC_Arithmetic;
+
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::ObjCObjectPointer:
+ return STC_ObjectiveC;
+
+ default:
+ return STC_Other;
+ }
+}
+
+/// \brief Get the type that a given expression will have if this declaration
+/// is used as an expression in its "typical" code-completion form.
+QualType clang::getDeclUsageType(ASTContext &C, const NamedDecl *ND) {
+ ND = cast<NamedDecl>(ND->getUnderlyingDecl());
+
+ if (const TypeDecl *Type = dyn_cast<TypeDecl>(ND))
+ return C.getTypeDeclType(Type);
+ if (const ObjCInterfaceDecl *Iface = dyn_cast<ObjCInterfaceDecl>(ND))
+ return C.getObjCInterfaceType(Iface);
+
+ QualType T;
+ if (const FunctionDecl *Function = ND->getAsFunction())
+ T = Function->getCallResultType();
+ else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(ND))
+ T = Method->getSendResultType();
+ else if (const EnumConstantDecl *Enumerator = dyn_cast<EnumConstantDecl>(ND))
+ T = C.getTypeDeclType(cast<EnumDecl>(Enumerator->getDeclContext()));
+ else if (const ObjCPropertyDecl *Property = dyn_cast<ObjCPropertyDecl>(ND))
+ T = Property->getType();
+ else if (const ValueDecl *Value = dyn_cast<ValueDecl>(ND))
+ T = Value->getType();
+ else
+ return QualType();
+
+ // Dig through references, function pointers, and block pointers to
+ // get down to the likely type of an expression when the entity is
+ // used.
+ do {
+ if (const ReferenceType *Ref = T->getAs<ReferenceType>()) {
+ T = Ref->getPointeeType();
+ continue;
+ }
+
+ if (const PointerType *Pointer = T->getAs<PointerType>()) {
+ if (Pointer->getPointeeType()->isFunctionType()) {
+ T = Pointer->getPointeeType();
+ continue;
+ }
+
+ break;
+ }
+
+ if (const BlockPointerType *Block = T->getAs<BlockPointerType>()) {
+ T = Block->getPointeeType();
+ continue;
+ }
+
+ if (const FunctionType *Function = T->getAs<FunctionType>()) {
+ T = Function->getReturnType();
+ continue;
+ }
+
+ break;
+ } while (true);
+
+ return T;
+}
+
+unsigned ResultBuilder::getBasePriority(const NamedDecl *ND) {
+ if (!ND)
+ return CCP_Unlikely;
+
+ // Context-based decisions.
+ const DeclContext *LexicalDC = ND->getLexicalDeclContext();
+ if (LexicalDC->isFunctionOrMethod()) {
+ // _cmd is relatively rare
+ if (const ImplicitParamDecl *ImplicitParam =
+ dyn_cast<ImplicitParamDecl>(ND))
+ if (ImplicitParam->getIdentifier() &&
+ ImplicitParam->getIdentifier()->isStr("_cmd"))
+ return CCP_ObjC_cmd;
+
+ return CCP_LocalDeclaration;
+ }
+
+ const DeclContext *DC = ND->getDeclContext()->getRedeclContext();
+ if (DC->isRecord() || isa<ObjCContainerDecl>(DC))
+ return CCP_MemberDeclaration;
+
+ // Content-based decisions.
+ if (isa<EnumConstantDecl>(ND))
+ return CCP_Constant;
+
+ // Use CCP_Type for type declarations unless we're in a statement, Objective-C
+ // message receiver, or parenthesized expression context. There, it's as
+ // likely that the user will want to write a type as other declarations.
+ if ((isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND)) &&
+ !(CompletionContext.getKind() == CodeCompletionContext::CCC_Statement ||
+ CompletionContext.getKind()
+ == CodeCompletionContext::CCC_ObjCMessageReceiver ||
+ CompletionContext.getKind()
+ == CodeCompletionContext::CCC_ParenthesizedExpression))
+ return CCP_Type;
+
+ return CCP_Declaration;
+}
+
+void ResultBuilder::AdjustResultPriorityForDecl(Result &R) {
+ // If this is an Objective-C method declaration whose selector matches our
+ // preferred selector, give it a priority boost.
+ if (!PreferredSelector.isNull())
+ if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(R.Declaration))
+ if (PreferredSelector == Method->getSelector())
+ R.Priority += CCD_SelectorMatch;
+
+ // If we have a preferred type, adjust the priority for results with exactly-
+ // matching or nearly-matching types.
+ if (!PreferredType.isNull()) {
+ QualType T = getDeclUsageType(SemaRef.Context, R.Declaration);
+ if (!T.isNull()) {
+ CanQualType TC = SemaRef.Context.getCanonicalType(T);
+ // Check for exactly-matching types (modulo qualifiers).
+ if (SemaRef.Context.hasSameUnqualifiedType(PreferredType, TC))
+ R.Priority /= CCF_ExactTypeMatch;
+ // Check for nearly-matching types, based on classification of each.
+ else if ((getSimplifiedTypeClass(PreferredType)
+ == getSimplifiedTypeClass(TC)) &&
+ !(PreferredType->isEnumeralType() && TC->isEnumeralType()))
+ R.Priority /= CCF_SimilarTypeMatch;
+ }
+ }
+}
+
+void ResultBuilder::MaybeAddConstructorResults(Result R) {
+ if (!SemaRef.getLangOpts().CPlusPlus || !R.Declaration ||
+ !CompletionContext.wantConstructorResults())
+ return;
+
+ ASTContext &Context = SemaRef.Context;
+ const NamedDecl *D = R.Declaration;
+ const CXXRecordDecl *Record = nullptr;
+ if (const ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(D))
+ Record = ClassTemplate->getTemplatedDecl();
+ else if ((Record = dyn_cast<CXXRecordDecl>(D))) {
+ // Skip specializations and partial specializations.
+ if (isa<ClassTemplateSpecializationDecl>(Record))
+ return;
+ } else {
+ // There are no constructors here.
+ return;
+ }
+
+ Record = Record->getDefinition();
+ if (!Record)
+ return;
+
+
+ QualType RecordTy = Context.getTypeDeclType(Record);
+ DeclarationName ConstructorName
+ = Context.DeclarationNames.getCXXConstructorName(
+ Context.getCanonicalType(RecordTy));
+ DeclContext::lookup_result Ctors = Record->lookup(ConstructorName);
+ for (DeclContext::lookup_iterator I = Ctors.begin(),
+ E = Ctors.end();
+ I != E; ++I) {
+ R.Declaration = *I;
+ R.CursorKind = getCursorKindForDecl(R.Declaration);
+ Results.push_back(R);
+ }
+}
+
+void ResultBuilder::MaybeAddResult(Result R, DeclContext *CurContext) {
+ assert(!ShadowMaps.empty() && "Must enter into a results scope");
+
+ if (R.Kind != Result::RK_Declaration) {
+ // For non-declaration results, just add the result.
+ Results.push_back(R);
+ return;
+ }
+
+ // Look through using declarations.
+ if (const UsingShadowDecl *Using =
+ dyn_cast<UsingShadowDecl>(R.Declaration)) {
+ MaybeAddResult(Result(Using->getTargetDecl(),
+ getBasePriority(Using->getTargetDecl()),
+ R.Qualifier),
+ CurContext);
+ return;
+ }
+
+ const Decl *CanonDecl = R.Declaration->getCanonicalDecl();
+ unsigned IDNS = CanonDecl->getIdentifierNamespace();
+
+ bool AsNestedNameSpecifier = false;
+ if (!isInterestingDecl(R.Declaration, AsNestedNameSpecifier))
+ return;
+
+ // C++ constructors are never found by name lookup.
+ if (isa<CXXConstructorDecl>(R.Declaration))
+ return;
+
+ ShadowMap &SMap = ShadowMaps.back();
+ ShadowMapEntry::iterator I, IEnd;
+ ShadowMap::iterator NamePos = SMap.find(R.Declaration->getDeclName());
+ if (NamePos != SMap.end()) {
+ I = NamePos->second.begin();
+ IEnd = NamePos->second.end();
+ }
+
+ for (; I != IEnd; ++I) {
+ const NamedDecl *ND = I->first;
+ unsigned Index = I->second;
+ if (ND->getCanonicalDecl() == CanonDecl) {
+ // This is a redeclaration. Always pick the newer declaration.
+ Results[Index].Declaration = R.Declaration;
+
+ // We're done.
+ return;
+ }
+ }
+
+ // This is a new declaration in this scope. However, check whether this
+ // declaration name is hidden by a similarly-named declaration in an outer
+ // scope.
+ std::list<ShadowMap>::iterator SM, SMEnd = ShadowMaps.end();
+ --SMEnd;
+ for (SM = ShadowMaps.begin(); SM != SMEnd; ++SM) {
+ ShadowMapEntry::iterator I, IEnd;
+ ShadowMap::iterator NamePos = SM->find(R.Declaration->getDeclName());
+ if (NamePos != SM->end()) {
+ I = NamePos->second.begin();
+ IEnd = NamePos->second.end();
+ }
+ for (; I != IEnd; ++I) {
+ // A tag declaration does not hide a non-tag declaration.
+ if (I->first->hasTagIdentifierNamespace() &&
+ (IDNS & (Decl::IDNS_Member | Decl::IDNS_Ordinary |
+ Decl::IDNS_LocalExtern | Decl::IDNS_ObjCProtocol)))
+ continue;
+
+ // Protocols are in distinct namespaces from everything else.
+ if (((I->first->getIdentifierNamespace() & Decl::IDNS_ObjCProtocol)
+ || (IDNS & Decl::IDNS_ObjCProtocol)) &&
+ I->first->getIdentifierNamespace() != IDNS)
+ continue;
+
+ // The newly-added result is hidden by an entry in the shadow map.
+ if (CheckHiddenResult(R, CurContext, I->first))
+ return;
+
+ break;
+ }
+ }
+
+ // Make sure that any given declaration only shows up in the result set once.
+ if (!AllDeclsFound.insert(CanonDecl).second)
+ return;
+
+ // If the filter is for nested-name-specifiers, then this result starts a
+ // nested-name-specifier.
+ if (AsNestedNameSpecifier) {
+ R.StartsNestedNameSpecifier = true;
+ R.Priority = CCP_NestedNameSpecifier;
+ } else
+ AdjustResultPriorityForDecl(R);
+
+ // If this result is supposed to have an informative qualifier, add one.
+ if (R.QualifierIsInformative && !R.Qualifier &&
+ !R.StartsNestedNameSpecifier) {
+ const DeclContext *Ctx = R.Declaration->getDeclContext();
+ if (const NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(Ctx))
+ R.Qualifier = NestedNameSpecifier::Create(SemaRef.Context, nullptr,
+ Namespace);
+ else if (const TagDecl *Tag = dyn_cast<TagDecl>(Ctx))
+ R.Qualifier = NestedNameSpecifier::Create(SemaRef.Context, nullptr,
+ false, SemaRef.Context.getTypeDeclType(Tag).getTypePtr());
+ else
+ R.QualifierIsInformative = false;
+ }
+
+ // Insert this result into the set of results and into the current shadow
+ // map.
+ SMap[R.Declaration->getDeclName()].Add(R.Declaration, Results.size());
+ Results.push_back(R);
+
+ if (!AsNestedNameSpecifier)
+ MaybeAddConstructorResults(R);
+}
+
+void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
+ NamedDecl *Hiding, bool InBaseClass = false) {
+ if (R.Kind != Result::RK_Declaration) {
+ // For non-declaration results, just add the result.
+ Results.push_back(R);
+ return;
+ }
+
+ // Look through using declarations.
+ if (const UsingShadowDecl *Using = dyn_cast<UsingShadowDecl>(R.Declaration)) {
+ AddResult(Result(Using->getTargetDecl(),
+ getBasePriority(Using->getTargetDecl()),
+ R.Qualifier),
+ CurContext, Hiding);
+ return;
+ }
+
+ bool AsNestedNameSpecifier = false;
+ if (!isInterestingDecl(R.Declaration, AsNestedNameSpecifier))
+ return;
+
+ // C++ constructors are never found by name lookup.
+ if (isa<CXXConstructorDecl>(R.Declaration))
+ return;
+
+ if (Hiding && CheckHiddenResult(R, CurContext, Hiding))
+ return;
+
+ // Make sure that any given declaration only shows up in the result set once.
+ if (!AllDeclsFound.insert(R.Declaration->getCanonicalDecl()).second)
+ return;
+
+ // If the filter is for nested-name-specifiers, then this result starts a
+ // nested-name-specifier.
+ if (AsNestedNameSpecifier) {
+ R.StartsNestedNameSpecifier = true;
+ R.Priority = CCP_NestedNameSpecifier;
+ }
+ else if (Filter == &ResultBuilder::IsMember && !R.Qualifier && InBaseClass &&
+ isa<CXXRecordDecl>(R.Declaration->getDeclContext()
+ ->getRedeclContext()))
+ R.QualifierIsInformative = true;
+
+ // If this result is supposed to have an informative qualifier, add one.
+ if (R.QualifierIsInformative && !R.Qualifier &&
+ !R.StartsNestedNameSpecifier) {
+ const DeclContext *Ctx = R.Declaration->getDeclContext();
+ if (const NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(Ctx))
+ R.Qualifier = NestedNameSpecifier::Create(SemaRef.Context, nullptr,
+ Namespace);
+ else if (const TagDecl *Tag = dyn_cast<TagDecl>(Ctx))
+ R.Qualifier = NestedNameSpecifier::Create(SemaRef.Context, nullptr, false,
+ SemaRef.Context.getTypeDeclType(Tag).getTypePtr());
+ else
+ R.QualifierIsInformative = false;
+ }
+
+ // Adjust the priority if this result comes from a base class.
+ if (InBaseClass)
+ R.Priority += CCD_InBaseClass;
+
+ AdjustResultPriorityForDecl(R);
+
+ if (HasObjectTypeQualifiers)
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(R.Declaration))
+ if (Method->isInstance()) {
+ Qualifiers MethodQuals
+ = Qualifiers::fromCVRMask(Method->getTypeQualifiers());
+ if (ObjectTypeQualifiers == MethodQuals)
+ R.Priority += CCD_ObjectQualifierMatch;
+ else if (ObjectTypeQualifiers - MethodQuals) {
+ // The method cannot be invoked, because doing so would drop
+ // qualifiers.
+ return;
+ }
+ }
+
+ // Insert this result into the set of results.
+ Results.push_back(R);
+
+ if (!AsNestedNameSpecifier)
+ MaybeAddConstructorResults(R);
+}
+
+void ResultBuilder::AddResult(Result R) {
+ assert(R.Kind != Result::RK_Declaration &&
+ "Declaration results need more context");
+ Results.push_back(R);
+}
+
+/// \brief Enter into a new scope.
+void ResultBuilder::EnterNewScope() { ShadowMaps.emplace_back(); }
+
+/// \brief Exit from the current scope.
+void ResultBuilder::ExitScope() {
+ for (ShadowMap::iterator E = ShadowMaps.back().begin(),
+ EEnd = ShadowMaps.back().end();
+ E != EEnd;
+ ++E)
+ E->second.Destroy();
+
+ ShadowMaps.pop_back();
+}
+
+/// \brief Determines whether this given declaration will be found by
+/// ordinary name lookup.
+bool ResultBuilder::IsOrdinaryName(const NamedDecl *ND) const {
+ ND = cast<NamedDecl>(ND->getUnderlyingDecl());
+
+ // If name lookup finds a local extern declaration, then we are in a
+ // context where it behaves like an ordinary name.
+ unsigned IDNS = Decl::IDNS_Ordinary | Decl::IDNS_LocalExtern;
+ if (SemaRef.getLangOpts().CPlusPlus)
+ IDNS |= Decl::IDNS_Tag | Decl::IDNS_Namespace | Decl::IDNS_Member;
+ else if (SemaRef.getLangOpts().ObjC1) {
+ if (isa<ObjCIvarDecl>(ND))
+ return true;
+ }
+
+ return ND->getIdentifierNamespace() & IDNS;
+}
+
+/// \brief Determines whether this given declaration will be found by
+/// ordinary name lookup but is not a type name.
+bool ResultBuilder::IsOrdinaryNonTypeName(const NamedDecl *ND) const {
+ ND = cast<NamedDecl>(ND->getUnderlyingDecl());
+ if (isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND))
+ return false;
+
+ unsigned IDNS = Decl::IDNS_Ordinary | Decl::IDNS_LocalExtern;
+ if (SemaRef.getLangOpts().CPlusPlus)
+ IDNS |= Decl::IDNS_Tag | Decl::IDNS_Namespace | Decl::IDNS_Member;
+ else if (SemaRef.getLangOpts().ObjC1) {
+ if (isa<ObjCIvarDecl>(ND))
+ return true;
+ }
+
+ return ND->getIdentifierNamespace() & IDNS;
+}
+
+bool ResultBuilder::IsIntegralConstantValue(const NamedDecl *ND) const {
+ if (!IsOrdinaryNonTypeName(ND))
+ return 0;
+
+ if (const ValueDecl *VD = dyn_cast<ValueDecl>(ND->getUnderlyingDecl()))
+ if (VD->getType()->isIntegralOrEnumerationType())
+ return true;
+
+ return false;
+}
+
+/// \brief Determines whether this given declaration will be found by
+/// ordinary name lookup.
+bool ResultBuilder::IsOrdinaryNonValueName(const NamedDecl *ND) const {
+ ND = cast<NamedDecl>(ND->getUnderlyingDecl());
+
+ unsigned IDNS = Decl::IDNS_Ordinary | Decl::IDNS_LocalExtern;
+ if (SemaRef.getLangOpts().CPlusPlus)
+ IDNS |= Decl::IDNS_Tag | Decl::IDNS_Namespace;
+
+ return (ND->getIdentifierNamespace() & IDNS) &&
+ !isa<ValueDecl>(ND) && !isa<FunctionTemplateDecl>(ND) &&
+ !isa<ObjCPropertyDecl>(ND);
+}
+
+/// \brief Determines whether the given declaration is suitable as the
+/// start of a C++ nested-name-specifier, e.g., a class or namespace.
+bool ResultBuilder::IsNestedNameSpecifier(const NamedDecl *ND) const {
+ // Allow us to find class templates, too.
+ if (const ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(ND))
+ ND = ClassTemplate->getTemplatedDecl();
+
+ return SemaRef.isAcceptableNestedNameSpecifier(ND);
+}
+
+/// \brief Determines whether the given declaration is an enumeration.
+bool ResultBuilder::IsEnum(const NamedDecl *ND) const {
+ return isa<EnumDecl>(ND);
+}
+
+/// \brief Determines whether the given declaration is a class or struct.
+bool ResultBuilder::IsClassOrStruct(const NamedDecl *ND) const {
+ // Allow us to find class templates, too.
+ if (const ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(ND))
+ ND = ClassTemplate->getTemplatedDecl();
+
+ // For purposes of this check, interfaces match too.
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(ND))
+ return RD->getTagKind() == TTK_Class ||
+ RD->getTagKind() == TTK_Struct ||
+ RD->getTagKind() == TTK_Interface;
+
+ return false;
+}
+
+/// \brief Determines whether the given declaration is a union.
+bool ResultBuilder::IsUnion(const NamedDecl *ND) const {
+ // Allow us to find class templates, too.
+ if (const ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(ND))
+ ND = ClassTemplate->getTemplatedDecl();
+
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(ND))
+ return RD->getTagKind() == TTK_Union;
+
+ return false;
+}
+
+/// \brief Determines whether the given declaration is a namespace.
+bool ResultBuilder::IsNamespace(const NamedDecl *ND) const {
+ return isa<NamespaceDecl>(ND);
+}
+
+/// \brief Determines whether the given declaration is a namespace or
+/// namespace alias.
+bool ResultBuilder::IsNamespaceOrAlias(const NamedDecl *ND) const {
+ return isa<NamespaceDecl>(ND->getUnderlyingDecl());
+}
+
+/// \brief Determines whether the given declaration is a type.
+bool ResultBuilder::IsType(const NamedDecl *ND) const {
+ ND = ND->getUnderlyingDecl();
+ return isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND);
+}
+
+/// \brief Determines which members of a class should be visible via
+/// "." or "->". Only value declarations, nested name specifiers, and
+/// using declarations thereof should show up.
+bool ResultBuilder::IsMember(const NamedDecl *ND) const {
+ ND = ND->getUnderlyingDecl();
+ return isa<ValueDecl>(ND) || isa<FunctionTemplateDecl>(ND) ||
+ isa<ObjCPropertyDecl>(ND);
+}
+
+static bool isObjCReceiverType(ASTContext &C, QualType T) {
+ T = C.getCanonicalType(T);
+ switch (T->getTypeClass()) {
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::ObjCObjectPointer:
+ return true;
+
+ case Type::Builtin:
+ switch (cast<BuiltinType>(T)->getKind()) {
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCSel:
+ return true;
+
+ default:
+ break;
+ }
+ return false;
+
+ default:
+ break;
+ }
+
+ if (!C.getLangOpts().CPlusPlus)
+ return false;
+
+ // FIXME: We could perform more analysis here to determine whether a
+ // particular class type has any conversions to Objective-C types. For now,
+ // just accept all class types.
+ return T->isDependentType() || T->isRecordType();
+}
+
+bool ResultBuilder::IsObjCMessageReceiver(const NamedDecl *ND) const {
+ QualType T = getDeclUsageType(SemaRef.Context, ND);
+ if (T.isNull())
+ return false;
+
+ T = SemaRef.Context.getBaseElementType(T);
+ return isObjCReceiverType(SemaRef.Context, T);
+}
+
+bool ResultBuilder::IsObjCMessageReceiverOrLambdaCapture(const NamedDecl *ND) const {
+ if (IsObjCMessageReceiver(ND))
+ return true;
+
+ const VarDecl *Var = dyn_cast<VarDecl>(ND);
+ if (!Var)
+ return false;
+
+ return Var->hasLocalStorage() && !Var->hasAttr<BlocksAttr>();
+}
+
+bool ResultBuilder::IsObjCCollection(const NamedDecl *ND) const {
+ if ((SemaRef.getLangOpts().CPlusPlus && !IsOrdinaryName(ND)) ||
+ (!SemaRef.getLangOpts().CPlusPlus && !IsOrdinaryNonTypeName(ND)))
+ return false;
+
+ QualType T = getDeclUsageType(SemaRef.Context, ND);
+ if (T.isNull())
+ return false;
+
+ T = SemaRef.Context.getBaseElementType(T);
+ return T->isObjCObjectType() || T->isObjCObjectPointerType() ||
+ T->isObjCIdType() ||
+ (SemaRef.getLangOpts().CPlusPlus && T->isRecordType());
+}
+
+bool ResultBuilder::IsImpossibleToSatisfy(const NamedDecl *ND) const {
+ return false;
+}
+
+/// \brief Determines whether the given declaration is an Objective-C
+/// instance variable.
+bool ResultBuilder::IsObjCIvar(const NamedDecl *ND) const {
+ return isa<ObjCIvarDecl>(ND);
+}
+
+namespace {
+ /// \brief Visible declaration consumer that adds a code-completion result
+ /// for each visible declaration.
+ class CodeCompletionDeclConsumer : public VisibleDeclConsumer {
+ ResultBuilder &Results;
+ DeclContext *CurContext;
+
+ public:
+ CodeCompletionDeclConsumer(ResultBuilder &Results, DeclContext *CurContext)
+ : Results(Results), CurContext(CurContext) { }
+
+ void FoundDecl(NamedDecl *ND, NamedDecl *Hiding, DeclContext *Ctx,
+ bool InBaseClass) override {
+ bool Accessible = true;
+ if (Ctx)
+ Accessible = Results.getSema().IsSimplyAccessible(ND, Ctx);
+
+ ResultBuilder::Result Result(ND, Results.getBasePriority(ND), nullptr,
+ false, Accessible);
+ Results.AddResult(Result, CurContext, Hiding, InBaseClass);
+ }
+ };
+}
+
+/// \brief Add type specifiers for the current language as keyword results.
+static void AddTypeSpecifierResults(const LangOptions &LangOpts,
+ ResultBuilder &Results) {
+ typedef CodeCompletionResult Result;
+ Results.AddResult(Result("short", CCP_Type));
+ Results.AddResult(Result("long", CCP_Type));
+ Results.AddResult(Result("signed", CCP_Type));
+ Results.AddResult(Result("unsigned", CCP_Type));
+ Results.AddResult(Result("void", CCP_Type));
+ Results.AddResult(Result("char", CCP_Type));
+ Results.AddResult(Result("int", CCP_Type));
+ Results.AddResult(Result("float", CCP_Type));
+ Results.AddResult(Result("double", CCP_Type));
+ Results.AddResult(Result("enum", CCP_Type));
+ Results.AddResult(Result("struct", CCP_Type));
+ Results.AddResult(Result("union", CCP_Type));
+ Results.AddResult(Result("const", CCP_Type));
+ Results.AddResult(Result("volatile", CCP_Type));
+
+ if (LangOpts.C99) {
+ // C99-specific
+ Results.AddResult(Result("_Complex", CCP_Type));
+ Results.AddResult(Result("_Imaginary", CCP_Type));
+ Results.AddResult(Result("_Bool", CCP_Type));
+ Results.AddResult(Result("restrict", CCP_Type));
+ }
+
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ if (LangOpts.CPlusPlus) {
+ // C++-specific
+ Results.AddResult(Result("bool", CCP_Type +
+ (LangOpts.ObjC1? CCD_bool_in_ObjC : 0)));
+ Results.AddResult(Result("class", CCP_Type));
+ Results.AddResult(Result("wchar_t", CCP_Type));
+
+ // typename qualified-id
+ Builder.AddTypedTextChunk("typename");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("qualifier");
+ Builder.AddTextChunk("::");
+ Builder.AddPlaceholderChunk("name");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ if (LangOpts.CPlusPlus11) {
+ Results.AddResult(Result("auto", CCP_Type));
+ Results.AddResult(Result("char16_t", CCP_Type));
+ Results.AddResult(Result("char32_t", CCP_Type));
+
+ Builder.AddTypedTextChunk("decltype");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+ }
+
+ // GNU extensions
+ if (LangOpts.GNUMode) {
+ // FIXME: Enable when we actually support decimal floating point.
+ // Results.AddResult(Result("_Decimal32"));
+ // Results.AddResult(Result("_Decimal64"));
+ // Results.AddResult(Result("_Decimal128"));
+
+ Builder.AddTypedTextChunk("typeof");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("expression");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ Builder.AddTypedTextChunk("typeof");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ // Nullability
+ Results.AddResult(Result("_Nonnull", CCP_Type));
+ Results.AddResult(Result("_Null_unspecified", CCP_Type));
+ Results.AddResult(Result("_Nullable", CCP_Type));
+}
+
+static void AddStorageSpecifiers(Sema::ParserCompletionContext CCC,
+ const LangOptions &LangOpts,
+ ResultBuilder &Results) {
+ typedef CodeCompletionResult Result;
+ // Note: we don't suggest either "auto" or "register", because both
+ // are pointless as storage specifiers. Elsewhere, we suggest "auto"
+ // in C++0x as a type specifier.
+ Results.AddResult(Result("extern"));
+ Results.AddResult(Result("static"));
+}
+
+static void AddFunctionSpecifiers(Sema::ParserCompletionContext CCC,
+ const LangOptions &LangOpts,
+ ResultBuilder &Results) {
+ typedef CodeCompletionResult Result;
+ switch (CCC) {
+ case Sema::PCC_Class:
+ case Sema::PCC_MemberTemplate:
+ if (LangOpts.CPlusPlus) {
+ Results.AddResult(Result("explicit"));
+ Results.AddResult(Result("friend"));
+ Results.AddResult(Result("mutable"));
+ Results.AddResult(Result("virtual"));
+ }
+ // Fall through
+
+ case Sema::PCC_ObjCInterface:
+ case Sema::PCC_ObjCImplementation:
+ case Sema::PCC_Namespace:
+ case Sema::PCC_Template:
+ if (LangOpts.CPlusPlus || LangOpts.C99)
+ Results.AddResult(Result("inline"));
+ break;
+
+ case Sema::PCC_ObjCInstanceVariableList:
+ case Sema::PCC_Expression:
+ case Sema::PCC_Statement:
+ case Sema::PCC_ForInit:
+ case Sema::PCC_Condition:
+ case Sema::PCC_RecoveryInFunction:
+ case Sema::PCC_Type:
+ case Sema::PCC_ParenthesizedExpression:
+ case Sema::PCC_LocalDeclarationSpecifiers:
+ break;
+ }
+}
+
+static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt);
+static void AddObjCStatementResults(ResultBuilder &Results, bool NeedAt);
+static void AddObjCVisibilityResults(const LangOptions &LangOpts,
+ ResultBuilder &Results,
+ bool NeedAt);
+static void AddObjCImplementationResults(const LangOptions &LangOpts,
+ ResultBuilder &Results,
+ bool NeedAt);
+static void AddObjCInterfaceResults(const LangOptions &LangOpts,
+ ResultBuilder &Results,
+ bool NeedAt);
+static void AddObjCTopLevelResults(ResultBuilder &Results, bool NeedAt);
+
+static void AddTypedefResult(ResultBuilder &Results) {
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ Builder.AddTypedTextChunk("typedef");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("name");
+ Results.AddResult(CodeCompletionResult(Builder.TakeString()));
+}
+
+static bool WantTypesInContext(Sema::ParserCompletionContext CCC,
+ const LangOptions &LangOpts) {
+ switch (CCC) {
+ case Sema::PCC_Namespace:
+ case Sema::PCC_Class:
+ case Sema::PCC_ObjCInstanceVariableList:
+ case Sema::PCC_Template:
+ case Sema::PCC_MemberTemplate:
+ case Sema::PCC_Statement:
+ case Sema::PCC_RecoveryInFunction:
+ case Sema::PCC_Type:
+ case Sema::PCC_ParenthesizedExpression:
+ case Sema::PCC_LocalDeclarationSpecifiers:
+ return true;
+
+ case Sema::PCC_Expression:
+ case Sema::PCC_Condition:
+ return LangOpts.CPlusPlus;
+
+ case Sema::PCC_ObjCInterface:
+ case Sema::PCC_ObjCImplementation:
+ return false;
+
+ case Sema::PCC_ForInit:
+ return LangOpts.CPlusPlus || LangOpts.ObjC1 || LangOpts.C99;
+ }
+
+ llvm_unreachable("Invalid ParserCompletionContext!");
+}
+
+static PrintingPolicy getCompletionPrintingPolicy(const ASTContext &Context,
+ const Preprocessor &PP) {
+ PrintingPolicy Policy = Sema::getPrintingPolicy(Context, PP);
+ Policy.AnonymousTagLocations = false;
+ Policy.SuppressStrongLifetime = true;
+ Policy.SuppressUnwrittenScope = true;
+ return Policy;
+}
+
+/// \brief Retrieve a printing policy suitable for code completion.
+static PrintingPolicy getCompletionPrintingPolicy(Sema &S) {
+ return getCompletionPrintingPolicy(S.Context, S.PP);
+}
+
+/// \brief Retrieve the string representation of the given type as a string
+/// that has the appropriate lifetime for code completion.
+///
+/// This routine provides a fast path where we provide constant strings for
+/// common type names.
+static const char *GetCompletionTypeString(QualType T,
+ ASTContext &Context,
+ const PrintingPolicy &Policy,
+ CodeCompletionAllocator &Allocator) {
+ if (!T.getLocalQualifiers()) {
+ // Built-in type names are constant strings.
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(T))
+ return BT->getNameAsCString(Policy);
+
+ // Anonymous tag types are constant strings.
+ if (const TagType *TagT = dyn_cast<TagType>(T))
+ if (TagDecl *Tag = TagT->getDecl())
+ if (!Tag->hasNameForLinkage()) {
+ switch (Tag->getTagKind()) {
+ case TTK_Struct: return "struct <anonymous>";
+ case TTK_Interface: return "__interface <anonymous>";
+ case TTK_Class: return "class <anonymous>";
+ case TTK_Union: return "union <anonymous>";
+ case TTK_Enum: return "enum <anonymous>";
+ }
+ }
+ }
+
+ // Slow path: format the type as a string.
+ std::string Result;
+ T.getAsStringInternal(Result, Policy);
+ return Allocator.CopyString(Result);
+}
+
+/// \brief Add a completion for "this", if we're in a member function.
+static void addThisCompletion(Sema &S, ResultBuilder &Results) {
+ QualType ThisTy = S.getCurrentThisType();
+ if (ThisTy.isNull())
+ return;
+
+ CodeCompletionAllocator &Allocator = Results.getAllocator();
+ CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo());
+ PrintingPolicy Policy = getCompletionPrintingPolicy(S);
+ Builder.AddResultTypeChunk(GetCompletionTypeString(ThisTy,
+ S.Context,
+ Policy,
+ Allocator));
+ Builder.AddTypedTextChunk("this");
+ Results.AddResult(CodeCompletionResult(Builder.TakeString()));
+}
+
+/// \brief Add language constructs that show up for "ordinary" names.
+static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
+ Scope *S,
+ Sema &SemaRef,
+ ResultBuilder &Results) {
+ CodeCompletionAllocator &Allocator = Results.getAllocator();
+ CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo());
+ PrintingPolicy Policy = getCompletionPrintingPolicy(SemaRef);
+
+ typedef CodeCompletionResult Result;
+ switch (CCC) {
+ case Sema::PCC_Namespace:
+ if (SemaRef.getLangOpts().CPlusPlus) {
+ if (Results.includeCodePatterns()) {
+ // namespace <identifier> { declarations }
+ Builder.AddTypedTextChunk("namespace");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("identifier");
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("declarations");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ // namespace identifier = identifier ;
+ Builder.AddTypedTextChunk("namespace");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("name");
+ Builder.AddChunk(CodeCompletionString::CK_Equal);
+ Builder.AddPlaceholderChunk("namespace");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // Using directives
+ Builder.AddTypedTextChunk("using");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("namespace");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("identifier");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // asm(string-literal)
+ Builder.AddTypedTextChunk("asm");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("string-literal");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ if (Results.includeCodePatterns()) {
+ // Explicit template instantiation
+ Builder.AddTypedTextChunk("template");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("declaration");
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+ }
+
+ if (SemaRef.getLangOpts().ObjC1)
+ AddObjCTopLevelResults(Results, true);
+
+ AddTypedefResult(Results);
+ // Fall through
+
+ case Sema::PCC_Class:
+ if (SemaRef.getLangOpts().CPlusPlus) {
+ // Using declaration
+ Builder.AddTypedTextChunk("using");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("qualifier");
+ Builder.AddTextChunk("::");
+ Builder.AddPlaceholderChunk("name");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // using typename qualifier::name (only in a dependent context)
+ if (SemaRef.CurContext->isDependentContext()) {
+ Builder.AddTypedTextChunk("using");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("typename");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("qualifier");
+ Builder.AddTextChunk("::");
+ Builder.AddPlaceholderChunk("name");
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ if (CCC == Sema::PCC_Class) {
+ AddTypedefResult(Results);
+
+ // public:
+ Builder.AddTypedTextChunk("public");
+ if (Results.includeCodePatterns())
+ Builder.AddChunk(CodeCompletionString::CK_Colon);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // protected:
+ Builder.AddTypedTextChunk("protected");
+ if (Results.includeCodePatterns())
+ Builder.AddChunk(CodeCompletionString::CK_Colon);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // private:
+ Builder.AddTypedTextChunk("private");
+ if (Results.includeCodePatterns())
+ Builder.AddChunk(CodeCompletionString::CK_Colon);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+ }
+ // Fall through
+
+ case Sema::PCC_Template:
+ case Sema::PCC_MemberTemplate:
+ if (SemaRef.getLangOpts().CPlusPlus && Results.includeCodePatterns()) {
+ // template < parameters >
+ Builder.AddTypedTextChunk("template");
+ Builder.AddChunk(CodeCompletionString::CK_LeftAngle);
+ Builder.AddPlaceholderChunk("parameters");
+ Builder.AddChunk(CodeCompletionString::CK_RightAngle);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ AddStorageSpecifiers(CCC, SemaRef.getLangOpts(), Results);
+ AddFunctionSpecifiers(CCC, SemaRef.getLangOpts(), Results);
+ break;
+
+ case Sema::PCC_ObjCInterface:
+ AddObjCInterfaceResults(SemaRef.getLangOpts(), Results, true);
+ AddStorageSpecifiers(CCC, SemaRef.getLangOpts(), Results);
+ AddFunctionSpecifiers(CCC, SemaRef.getLangOpts(), Results);
+ break;
+
+ case Sema::PCC_ObjCImplementation:
+ AddObjCImplementationResults(SemaRef.getLangOpts(), Results, true);
+ AddStorageSpecifiers(CCC, SemaRef.getLangOpts(), Results);
+ AddFunctionSpecifiers(CCC, SemaRef.getLangOpts(), Results);
+ break;
+
+ case Sema::PCC_ObjCInstanceVariableList:
+ AddObjCVisibilityResults(SemaRef.getLangOpts(), Results, true);
+ break;
+
+ case Sema::PCC_RecoveryInFunction:
+ case Sema::PCC_Statement: {
+ AddTypedefResult(Results);
+
+ if (SemaRef.getLangOpts().CPlusPlus && Results.includeCodePatterns() &&
+ SemaRef.getLangOpts().CXXExceptions) {
+ Builder.AddTypedTextChunk("try");
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Builder.AddTextChunk("catch");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("declaration");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+ if (SemaRef.getLangOpts().ObjC1)
+ AddObjCStatementResults(Results, true);
+
+ if (Results.includeCodePatterns()) {
+ // if (condition) { statements }
+ Builder.AddTypedTextChunk("if");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ if (SemaRef.getLangOpts().CPlusPlus)
+ Builder.AddPlaceholderChunk("condition");
+ else
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // switch (condition) { }
+ Builder.AddTypedTextChunk("switch");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ if (SemaRef.getLangOpts().CPlusPlus)
+ Builder.AddPlaceholderChunk("condition");
+ else
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ // Switch-specific statements.
+ if (!SemaRef.getCurFunction()->SwitchStack.empty()) {
+ // case expression:
+ Builder.AddTypedTextChunk("case");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_Colon);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // default:
+ Builder.AddTypedTextChunk("default");
+ Builder.AddChunk(CodeCompletionString::CK_Colon);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ if (Results.includeCodePatterns()) {
+ /// while (condition) { statements }
+ Builder.AddTypedTextChunk("while");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ if (SemaRef.getLangOpts().CPlusPlus)
+ Builder.AddPlaceholderChunk("condition");
+ else
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // do { statements } while ( expression );
+ Builder.AddTypedTextChunk("do");
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Builder.AddTextChunk("while");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // for ( for-init-statement ; condition ; expression ) { statements }
+ Builder.AddTypedTextChunk("for");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ if (SemaRef.getLangOpts().CPlusPlus || SemaRef.getLangOpts().C99)
+ Builder.AddPlaceholderChunk("init-statement");
+ else
+ Builder.AddPlaceholderChunk("init-expression");
+ Builder.AddChunk(CodeCompletionString::CK_SemiColon);
+ Builder.AddPlaceholderChunk("condition");
+ Builder.AddChunk(CodeCompletionString::CK_SemiColon);
+ Builder.AddPlaceholderChunk("inc-expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ if (S->getContinueParent()) {
+ // continue ;
+ Builder.AddTypedTextChunk("continue");
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ if (S->getBreakParent()) {
+ // break ;
+ Builder.AddTypedTextChunk("break");
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ // "return expression ;" or "return ;", depending on whether we
+ // know the function is void or not.
+ bool isVoid = false;
+ if (FunctionDecl *Function = dyn_cast<FunctionDecl>(SemaRef.CurContext))
+ isVoid = Function->getReturnType()->isVoidType();
+ else if (ObjCMethodDecl *Method
+ = dyn_cast<ObjCMethodDecl>(SemaRef.CurContext))
+ isVoid = Method->getReturnType()->isVoidType();
+ else if (SemaRef.getCurBlock() &&
+ !SemaRef.getCurBlock()->ReturnType.isNull())
+ isVoid = SemaRef.getCurBlock()->ReturnType->isVoidType();
+ Builder.AddTypedTextChunk("return");
+ if (!isVoid) {
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("expression");
+ }
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // goto identifier ;
+ Builder.AddTypedTextChunk("goto");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("label");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // Using directives
+ Builder.AddTypedTextChunk("using");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("namespace");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("identifier");
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ // Fall through (for statement expressions).
+ case Sema::PCC_ForInit:
+ case Sema::PCC_Condition:
+ AddStorageSpecifiers(CCC, SemaRef.getLangOpts(), Results);
+ // Fall through: conditions and statements can have expressions.
+
+ case Sema::PCC_ParenthesizedExpression:
+ if (SemaRef.getLangOpts().ObjCAutoRefCount &&
+ CCC == Sema::PCC_ParenthesizedExpression) {
+ // (__bridge <type>)<expression>
+ Builder.AddTypedTextChunk("__bridge");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddPlaceholderChunk("expression");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // (__bridge_transfer <Objective-C type>)<expression>
+ Builder.AddTypedTextChunk("__bridge_transfer");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("Objective-C type");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddPlaceholderChunk("expression");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // (__bridge_retained <CF type>)<expression>
+ Builder.AddTypedTextChunk("__bridge_retained");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("CF type");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddPlaceholderChunk("expression");
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+ // Fall through
+
+ case Sema::PCC_Expression: {
+ if (SemaRef.getLangOpts().CPlusPlus) {
+ // 'this', if we're in a non-static member function.
+ addThisCompletion(SemaRef, Results);
+
+ // true
+ Builder.AddResultTypeChunk("bool");
+ Builder.AddTypedTextChunk("true");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // false
+ Builder.AddResultTypeChunk("bool");
+ Builder.AddTypedTextChunk("false");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ if (SemaRef.getLangOpts().RTTI) {
+ // dynamic_cast < type-id > ( expression )
+ Builder.AddTypedTextChunk("dynamic_cast");
+ Builder.AddChunk(CodeCompletionString::CK_LeftAngle);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_RightAngle);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ // static_cast < type-id > ( expression )
+ Builder.AddTypedTextChunk("static_cast");
+ Builder.AddChunk(CodeCompletionString::CK_LeftAngle);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_RightAngle);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // reinterpret_cast < type-id > ( expression )
+ Builder.AddTypedTextChunk("reinterpret_cast");
+ Builder.AddChunk(CodeCompletionString::CK_LeftAngle);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_RightAngle);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // const_cast < type-id > ( expression )
+ Builder.AddTypedTextChunk("const_cast");
+ Builder.AddChunk(CodeCompletionString::CK_LeftAngle);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_RightAngle);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ if (SemaRef.getLangOpts().RTTI) {
+ // typeid ( expression-or-type )
+ Builder.AddResultTypeChunk("std::type_info");
+ Builder.AddTypedTextChunk("typeid");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression-or-type");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ // new T ( ... )
+ Builder.AddTypedTextChunk("new");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expressions");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // new T [ ] ( ... )
+ Builder.AddTypedTextChunk("new");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_LeftBracket);
+ Builder.AddPlaceholderChunk("size");
+ Builder.AddChunk(CodeCompletionString::CK_RightBracket);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expressions");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // delete expression
+ Builder.AddResultTypeChunk("void");
+ Builder.AddTypedTextChunk("delete");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("expression");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // delete [] expression
+ Builder.AddResultTypeChunk("void");
+ Builder.AddTypedTextChunk("delete");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBracket);
+ Builder.AddChunk(CodeCompletionString::CK_RightBracket);
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("expression");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ if (SemaRef.getLangOpts().CXXExceptions) {
+ // throw expression
+ Builder.AddResultTypeChunk("void");
+ Builder.AddTypedTextChunk("throw");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("expression");
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ // FIXME: Rethrow?
+
+ if (SemaRef.getLangOpts().CPlusPlus11) {
+ // nullptr
+ Builder.AddResultTypeChunk("std::nullptr_t");
+ Builder.AddTypedTextChunk("nullptr");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // alignof
+ Builder.AddResultTypeChunk("size_t");
+ Builder.AddTypedTextChunk("alignof");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // noexcept
+ Builder.AddResultTypeChunk("bool");
+ Builder.AddTypedTextChunk("noexcept");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // sizeof... expression
+ Builder.AddResultTypeChunk("size_t");
+ Builder.AddTypedTextChunk("sizeof...");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("parameter-pack");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+ }
+
+ if (SemaRef.getLangOpts().ObjC1) {
+ // Add "super", if we're in an Objective-C class with a superclass.
+ if (ObjCMethodDecl *Method = SemaRef.getCurMethodDecl()) {
+ // The interface can be NULL.
+ if (ObjCInterfaceDecl *ID = Method->getClassInterface())
+ if (ID->getSuperClass()) {
+ std::string SuperType;
+ SuperType = ID->getSuperClass()->getNameAsString();
+ if (Method->isInstanceMethod())
+ SuperType += " *";
+
+ Builder.AddResultTypeChunk(Allocator.CopyString(SuperType));
+ Builder.AddTypedTextChunk("super");
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+ }
+
+ AddObjCExpressionResults(Results, true);
+ }
+
+ if (SemaRef.getLangOpts().C11) {
+ // _Alignof
+ Builder.AddResultTypeChunk("size_t");
+ if (SemaRef.PP.isMacroDefined("alignof"))
+ Builder.AddTypedTextChunk("alignof");
+ else
+ Builder.AddTypedTextChunk("_Alignof");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ // sizeof expression
+ Builder.AddResultTypeChunk("size_t");
+ Builder.AddTypedTextChunk("sizeof");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression-or-type");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+ break;
+ }
+
+ case Sema::PCC_Type:
+ case Sema::PCC_LocalDeclarationSpecifiers:
+ break;
+ }
+
+ if (WantTypesInContext(CCC, SemaRef.getLangOpts()))
+ AddTypeSpecifierResults(SemaRef.getLangOpts(), Results);
+
+ if (SemaRef.getLangOpts().CPlusPlus && CCC != Sema::PCC_Type)
+ Results.AddResult(Result("operator"));
+}
+
+/// \brief If the given declaration has an associated type, add it as a result
+/// type chunk.
+static void AddResultTypeChunk(ASTContext &Context,
+ const PrintingPolicy &Policy,
+ const NamedDecl *ND,
+ QualType BaseType,
+ CodeCompletionBuilder &Result) {
+ if (!ND)
+ return;
+
+ // Skip constructors and conversion functions, which have their return types
+ // built into their names.
+ if (isa<CXXConstructorDecl>(ND) || isa<CXXConversionDecl>(ND))
+ return;
+
+ // Determine the type of the declaration (if it has a type).
+ QualType T;
+ if (const FunctionDecl *Function = ND->getAsFunction())
+ T = Function->getReturnType();
+ else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(ND)) {
+ if (!BaseType.isNull())
+ T = Method->getSendResultType(BaseType);
+ else
+ T = Method->getReturnType();
+ } else if (const EnumConstantDecl *Enumerator = dyn_cast<EnumConstantDecl>(ND))
+ T = Context.getTypeDeclType(cast<TypeDecl>(Enumerator->getDeclContext()));
+ else if (isa<UnresolvedUsingValueDecl>(ND)) {
+ /* Do nothing: ignore unresolved using declarations*/
+ } else if (const ObjCIvarDecl *Ivar = dyn_cast<ObjCIvarDecl>(ND)) {
+ if (!BaseType.isNull())
+ T = Ivar->getUsageType(BaseType);
+ else
+ T = Ivar->getType();
+ } else if (const ValueDecl *Value = dyn_cast<ValueDecl>(ND)) {
+ T = Value->getType();
+ } else if (const ObjCPropertyDecl *Property = dyn_cast<ObjCPropertyDecl>(ND)) {
+ if (!BaseType.isNull())
+ T = Property->getUsageType(BaseType);
+ else
+ T = Property->getType();
+ }
+
+ if (T.isNull() || Context.hasSameType(T, Context.DependentTy))
+ return;
+
+ Result.AddResultTypeChunk(GetCompletionTypeString(T, Context, Policy,
+ Result.getAllocator()));
+}
+
+static void MaybeAddSentinel(Preprocessor &PP,
+ const NamedDecl *FunctionOrMethod,
+ CodeCompletionBuilder &Result) {
+ if (SentinelAttr *Sentinel = FunctionOrMethod->getAttr<SentinelAttr>())
+ if (Sentinel->getSentinel() == 0) {
+ if (PP.getLangOpts().ObjC1 && PP.isMacroDefined("nil"))
+ Result.AddTextChunk(", nil");
+ else if (PP.isMacroDefined("NULL"))
+ Result.AddTextChunk(", NULL");
+ else
+ Result.AddTextChunk(", (void*)0");
+ }
+}
+
+static std::string formatObjCParamQualifiers(unsigned ObjCQuals,
+ QualType &Type) {
+ std::string Result;
+ if (ObjCQuals & Decl::OBJC_TQ_In)
+ Result += "in ";
+ else if (ObjCQuals & Decl::OBJC_TQ_Inout)
+ Result += "inout ";
+ else if (ObjCQuals & Decl::OBJC_TQ_Out)
+ Result += "out ";
+ if (ObjCQuals & Decl::OBJC_TQ_Bycopy)
+ Result += "bycopy ";
+ else if (ObjCQuals & Decl::OBJC_TQ_Byref)
+ Result += "byref ";
+ if (ObjCQuals & Decl::OBJC_TQ_Oneway)
+ Result += "oneway ";
+ if (ObjCQuals & Decl::OBJC_TQ_CSNullability) {
+ if (auto nullability = AttributedType::stripOuterNullability(Type)) {
+ switch (*nullability) {
+ case NullabilityKind::NonNull:
+ Result += "nonnull ";
+ break;
+
+ case NullabilityKind::Nullable:
+ Result += "nullable ";
+ break;
+
+ case NullabilityKind::Unspecified:
+ Result += "null_unspecified ";
+ break;
+ }
+ }
+ }
+ return Result;
+}
+
+static std::string FormatFunctionParameter(const PrintingPolicy &Policy,
+ const ParmVarDecl *Param,
+ bool SuppressName = false,
+ bool SuppressBlock = false,
+ Optional<ArrayRef<QualType>> ObjCSubsts = None) {
+ bool ObjCMethodParam = isa<ObjCMethodDecl>(Param->getDeclContext());
+ if (Param->getType()->isDependentType() ||
+ !Param->getType()->isBlockPointerType()) {
+ // The argument for a dependent or non-block parameter is a placeholder
+ // containing that parameter's type.
+ std::string Result;
+
+ if (Param->getIdentifier() && !ObjCMethodParam && !SuppressName)
+ Result = Param->getIdentifier()->getName();
+
+ QualType Type = Param->getType();
+ if (ObjCSubsts)
+ Type = Type.substObjCTypeArgs(Param->getASTContext(), *ObjCSubsts,
+ ObjCSubstitutionContext::Parameter);
+ if (ObjCMethodParam) {
+ Result = "(" + formatObjCParamQualifiers(Param->getObjCDeclQualifier(),
+ Type);
+ Result += Type.getAsString(Policy) + ")";
+ if (Param->getIdentifier() && !SuppressName)
+ Result += Param->getIdentifier()->getName();
+ } else {
+ Type.getAsStringInternal(Result, Policy);
+ }
+ return Result;
+ }
+
+ // The argument for a block pointer parameter is a block literal with
+ // the appropriate type.
+ FunctionTypeLoc Block;
+ FunctionProtoTypeLoc BlockProto;
+ TypeLoc TL;
+ if (TypeSourceInfo *TSInfo = Param->getTypeSourceInfo()) {
+ TL = TSInfo->getTypeLoc().getUnqualifiedLoc();
+ while (true) {
+ // Look through typedefs.
+ if (!SuppressBlock) {
+ if (TypedefTypeLoc TypedefTL = TL.getAs<TypedefTypeLoc>()) {
+ if (TypeSourceInfo *InnerTSInfo =
+ TypedefTL.getTypedefNameDecl()->getTypeSourceInfo()) {
+ TL = InnerTSInfo->getTypeLoc().getUnqualifiedLoc();
+ continue;
+ }
+ }
+
+ // Look through qualified types
+ if (QualifiedTypeLoc QualifiedTL = TL.getAs<QualifiedTypeLoc>()) {
+ TL = QualifiedTL.getUnqualifiedLoc();
+ continue;
+ }
+
+ if (AttributedTypeLoc AttrTL = TL.getAs<AttributedTypeLoc>()) {
+ TL = AttrTL.getModifiedLoc();
+ continue;
+ }
+ }
+
+ // Try to get the function prototype behind the block pointer type,
+ // then we're done.
+ if (BlockPointerTypeLoc BlockPtr = TL.getAs<BlockPointerTypeLoc>()) {
+ TL = BlockPtr.getPointeeLoc().IgnoreParens();
+ Block = TL.getAs<FunctionTypeLoc>();
+ BlockProto = TL.getAs<FunctionProtoTypeLoc>();
+ }
+ break;
+ }
+ }
+
+ if (!Block) {
+ // We were unable to find a FunctionProtoTypeLoc with parameter names
+ // for the block; just use the parameter type as a placeholder.
+ std::string Result;
+ if (!ObjCMethodParam && Param->getIdentifier())
+ Result = Param->getIdentifier()->getName();
+
+ QualType Type = Param->getType().getUnqualifiedType();
+
+ if (ObjCMethodParam) {
+ Result = "(" + formatObjCParamQualifiers(Param->getObjCDeclQualifier(),
+ Type);
+ Result += Type.getAsString(Policy) + Result + ")";
+ if (Param->getIdentifier())
+ Result += Param->getIdentifier()->getName();
+ } else {
+ Type.getAsStringInternal(Result, Policy);
+ }
+
+ return Result;
+ }
+
+ // We have the function prototype behind the block pointer type, as it was
+ // written in the source.
+ std::string Result;
+ QualType ResultType = Block.getTypePtr()->getReturnType();
+ if (ObjCSubsts)
+ ResultType = ResultType.substObjCTypeArgs(Param->getASTContext(),
+ *ObjCSubsts,
+ ObjCSubstitutionContext::Result);
+ if (!ResultType->isVoidType() || SuppressBlock)
+ ResultType.getAsStringInternal(Result, Policy);
+
+ // Format the parameter list.
+ std::string Params;
+ if (!BlockProto || Block.getNumParams() == 0) {
+ if (BlockProto && BlockProto.getTypePtr()->isVariadic())
+ Params = "(...)";
+ else
+ Params = "(void)";
+ } else {
+ Params += "(";
+ for (unsigned I = 0, N = Block.getNumParams(); I != N; ++I) {
+ if (I)
+ Params += ", ";
+ Params += FormatFunctionParameter(Policy, Block.getParam(I),
+ /*SuppressName=*/false,
+ /*SuppressBlock=*/true,
+ ObjCSubsts);
+
+ if (I == N - 1 && BlockProto.getTypePtr()->isVariadic())
+ Params += ", ...";
+ }
+ Params += ")";
+ }
+
+ if (SuppressBlock) {
+ // Format as a parameter.
+ Result = Result + " (^";
+ if (Param->getIdentifier())
+ Result += Param->getIdentifier()->getName();
+ Result += ")";
+ Result += Params;
+ } else {
+ // Format as a block literal argument.
+ Result = '^' + Result;
+ Result += Params;
+
+ if (Param->getIdentifier())
+ Result += Param->getIdentifier()->getName();
+ }
+
+ return Result;
+}
+
+/// \brief Add function parameter chunks to the given code completion string.
+static void AddFunctionParameterChunks(Preprocessor &PP,
+ const PrintingPolicy &Policy,
+ const FunctionDecl *Function,
+ CodeCompletionBuilder &Result,
+ unsigned Start = 0,
+ bool InOptional = false) {
+ bool FirstParameter = true;
+
+ for (unsigned P = Start, N = Function->getNumParams(); P != N; ++P) {
+ const ParmVarDecl *Param = Function->getParamDecl(P);
+
+ if (Param->hasDefaultArg() && !InOptional) {
+ // When we see an optional default argument, put that argument and
+ // the remaining default arguments into a new, optional string.
+ CodeCompletionBuilder Opt(Result.getAllocator(),
+ Result.getCodeCompletionTUInfo());
+ if (!FirstParameter)
+ Opt.AddChunk(CodeCompletionString::CK_Comma);
+ AddFunctionParameterChunks(PP, Policy, Function, Opt, P, true);
+ Result.AddOptionalChunk(Opt.TakeString());
+ break;
+ }
+
+ if (FirstParameter)
+ FirstParameter = false;
+ else
+ Result.AddChunk(CodeCompletionString::CK_Comma);
+
+ InOptional = false;
+
+ // Format the placeholder string.
+ std::string PlaceholderStr = FormatFunctionParameter(Policy, Param);
+
+ if (Function->isVariadic() && P == N - 1)
+ PlaceholderStr += ", ...";
+
+ // Add the placeholder string.
+ Result.AddPlaceholderChunk(
+ Result.getAllocator().CopyString(PlaceholderStr));
+ }
+
+ if (const FunctionProtoType *Proto
+ = Function->getType()->getAs<FunctionProtoType>())
+ if (Proto->isVariadic()) {
+ if (Proto->getNumParams() == 0)
+ Result.AddPlaceholderChunk("...");
+
+ MaybeAddSentinel(PP, Function, Result);
+ }
+}
+
+/// \brief Add template parameter chunks to the given code completion string.
+static void AddTemplateParameterChunks(ASTContext &Context,
+ const PrintingPolicy &Policy,
+ const TemplateDecl *Template,
+ CodeCompletionBuilder &Result,
+ unsigned MaxParameters = 0,
+ unsigned Start = 0,
+ bool InDefaultArg = false) {
+ bool FirstParameter = true;
+
+ // Prefer to take the template parameter names from the first declaration of
+ // the template.
+ Template = cast<TemplateDecl>(Template->getCanonicalDecl());
+
+ TemplateParameterList *Params = Template->getTemplateParameters();
+ TemplateParameterList::iterator PEnd = Params->end();
+ if (MaxParameters)
+ PEnd = Params->begin() + MaxParameters;
+ for (TemplateParameterList::iterator P = Params->begin() + Start;
+ P != PEnd; ++P) {
+ bool HasDefaultArg = false;
+ std::string PlaceholderStr;
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
+ if (TTP->wasDeclaredWithTypename())
+ PlaceholderStr = "typename";
+ else
+ PlaceholderStr = "class";
+
+ if (TTP->getIdentifier()) {
+ PlaceholderStr += ' ';
+ PlaceholderStr += TTP->getIdentifier()->getName();
+ }
+
+ HasDefaultArg = TTP->hasDefaultArgument();
+ } else if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
+ if (NTTP->getIdentifier())
+ PlaceholderStr = NTTP->getIdentifier()->getName();
+ NTTP->getType().getAsStringInternal(PlaceholderStr, Policy);
+ HasDefaultArg = NTTP->hasDefaultArgument();
+ } else {
+ assert(isa<TemplateTemplateParmDecl>(*P));
+ TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(*P);
+
+ // Since putting the template argument list into the placeholder would
+ // be very, very long, we just use an abbreviation.
+ PlaceholderStr = "template<...> class";
+ if (TTP->getIdentifier()) {
+ PlaceholderStr += ' ';
+ PlaceholderStr += TTP->getIdentifier()->getName();
+ }
+
+ HasDefaultArg = TTP->hasDefaultArgument();
+ }
+
+ if (HasDefaultArg && !InDefaultArg) {
+ // When we see an optional default argument, put that argument and
+ // the remaining default arguments into a new, optional string.
+ CodeCompletionBuilder Opt(Result.getAllocator(),
+ Result.getCodeCompletionTUInfo());
+ if (!FirstParameter)
+ Opt.AddChunk(CodeCompletionString::CK_Comma);
+ AddTemplateParameterChunks(Context, Policy, Template, Opt, MaxParameters,
+ P - Params->begin(), true);
+ Result.AddOptionalChunk(Opt.TakeString());
+ break;
+ }
+
+ InDefaultArg = false;
+
+ if (FirstParameter)
+ FirstParameter = false;
+ else
+ Result.AddChunk(CodeCompletionString::CK_Comma);
+
+ // Add the placeholder string.
+ Result.AddPlaceholderChunk(
+ Result.getAllocator().CopyString(PlaceholderStr));
+ }
+}
+
+/// \brief Add a qualifier to the given code-completion string, if the
+/// provided nested-name-specifier is non-NULL.
+static void
+AddQualifierToCompletionString(CodeCompletionBuilder &Result,
+ NestedNameSpecifier *Qualifier,
+ bool QualifierIsInformative,
+ ASTContext &Context,
+ const PrintingPolicy &Policy) {
+ if (!Qualifier)
+ return;
+
+ std::string PrintedNNS;
+ {
+ llvm::raw_string_ostream OS(PrintedNNS);
+ Qualifier->print(OS, Policy);
+ }
+ if (QualifierIsInformative)
+ Result.AddInformativeChunk(Result.getAllocator().CopyString(PrintedNNS));
+ else
+ Result.AddTextChunk(Result.getAllocator().CopyString(PrintedNNS));
+}
+
+static void
+AddFunctionTypeQualsToCompletionString(CodeCompletionBuilder &Result,
+ const FunctionDecl *Function) {
+ const FunctionProtoType *Proto
+ = Function->getType()->getAs<FunctionProtoType>();
+ if (!Proto || !Proto->getTypeQuals())
+ return;
+
+ // FIXME: Add ref-qualifier!
+
+ // Handle single qualifiers without copying
+ if (Proto->getTypeQuals() == Qualifiers::Const) {
+ Result.AddInformativeChunk(" const");
+ return;
+ }
+
+ if (Proto->getTypeQuals() == Qualifiers::Volatile) {
+ Result.AddInformativeChunk(" volatile");
+ return;
+ }
+
+ if (Proto->getTypeQuals() == Qualifiers::Restrict) {
+ Result.AddInformativeChunk(" restrict");
+ return;
+ }
+
+ // Handle multiple qualifiers.
+ std::string QualsStr;
+ if (Proto->isConst())
+ QualsStr += " const";
+ if (Proto->isVolatile())
+ QualsStr += " volatile";
+ if (Proto->isRestrict())
+ QualsStr += " restrict";
+ Result.AddInformativeChunk(Result.getAllocator().CopyString(QualsStr));
+}
+
+/// \brief Add the name of the given declaration
+static void AddTypedNameChunk(ASTContext &Context, const PrintingPolicy &Policy,
+ const NamedDecl *ND,
+ CodeCompletionBuilder &Result) {
+ DeclarationName Name = ND->getDeclName();
+ if (!Name)
+ return;
+
+ switch (Name.getNameKind()) {
+ case DeclarationName::CXXOperatorName: {
+ const char *OperatorName = nullptr;
+ switch (Name.getCXXOverloadedOperator()) {
+ case OO_None:
+ case OO_Conditional:
+ case NUM_OVERLOADED_OPERATORS:
+ OperatorName = "operator";
+ break;
+
+#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
+ case OO_##Name: OperatorName = "operator" Spelling; break;
+#define OVERLOADED_OPERATOR_MULTI(Name,Spelling,Unary,Binary,MemberOnly)
+#include "clang/Basic/OperatorKinds.def"
+
+ case OO_New: OperatorName = "operator new"; break;
+ case OO_Delete: OperatorName = "operator delete"; break;
+ case OO_Array_New: OperatorName = "operator new[]"; break;
+ case OO_Array_Delete: OperatorName = "operator delete[]"; break;
+ case OO_Call: OperatorName = "operator()"; break;
+ case OO_Subscript: OperatorName = "operator[]"; break;
+ }
+ Result.AddTypedTextChunk(OperatorName);
+ break;
+ }
+
+ case DeclarationName::Identifier:
+ case DeclarationName::CXXConversionFunctionName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXLiteralOperatorName:
+ Result.AddTypedTextChunk(
+ Result.getAllocator().CopyString(ND->getNameAsString()));
+ break;
+
+ case DeclarationName::CXXUsingDirective:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ break;
+
+ case DeclarationName::CXXConstructorName: {
+ CXXRecordDecl *Record = nullptr;
+ QualType Ty = Name.getCXXNameType();
+ if (const RecordType *RecordTy = Ty->getAs<RecordType>())
+ Record = cast<CXXRecordDecl>(RecordTy->getDecl());
+ else if (const InjectedClassNameType *InjectedTy
+ = Ty->getAs<InjectedClassNameType>())
+ Record = InjectedTy->getDecl();
+ else {
+ Result.AddTypedTextChunk(
+ Result.getAllocator().CopyString(ND->getNameAsString()));
+ break;
+ }
+
+ Result.AddTypedTextChunk(
+ Result.getAllocator().CopyString(Record->getNameAsString()));
+ if (ClassTemplateDecl *Template = Record->getDescribedClassTemplate()) {
+ Result.AddChunk(CodeCompletionString::CK_LeftAngle);
+ AddTemplateParameterChunks(Context, Policy, Template, Result);
+ Result.AddChunk(CodeCompletionString::CK_RightAngle);
+ }
+ break;
+ }
+ }
+}
+
+CodeCompletionString *CodeCompletionResult::CreateCodeCompletionString(Sema &S,
+ const CodeCompletionContext &CCContext,
+ CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo,
+ bool IncludeBriefComments) {
+ return CreateCodeCompletionString(S.Context, S.PP, CCContext, Allocator,
+ CCTUInfo, IncludeBriefComments);
+}
+
+/// \brief If possible, create a new code completion string for the given
+/// result.
+///
+/// \returns Either a new, heap-allocated code completion string describing
+/// how to use this result, or NULL to indicate that the string or name of the
+/// result is all that is needed.
+CodeCompletionString *
+CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
+ Preprocessor &PP,
+ const CodeCompletionContext &CCContext,
+ CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo,
+ bool IncludeBriefComments) {
+ CodeCompletionBuilder Result(Allocator, CCTUInfo, Priority, Availability);
+
+ PrintingPolicy Policy = getCompletionPrintingPolicy(Ctx, PP);
+ if (Kind == RK_Pattern) {
+ Pattern->Priority = Priority;
+ Pattern->Availability = Availability;
+
+ if (Declaration) {
+ Result.addParentContext(Declaration->getDeclContext());
+ Pattern->ParentName = Result.getParentName();
+ // Provide code completion comment for self.GetterName where
+ // GetterName is the getter method for a property with name
+ // different from the property name (declared via a property
+ // getter attribute.
+ const NamedDecl *ND = Declaration;
+ if (const ObjCMethodDecl *M = dyn_cast<ObjCMethodDecl>(ND))
+ if (M->isPropertyAccessor())
+ if (const ObjCPropertyDecl *PDecl = M->findPropertyDecl())
+ if (PDecl->getGetterName() == M->getSelector() &&
+ PDecl->getIdentifier() != M->getIdentifier()) {
+ if (const RawComment *RC =
+ Ctx.getRawCommentForAnyRedecl(M)) {
+ Result.addBriefComment(RC->getBriefText(Ctx));
+ Pattern->BriefComment = Result.getBriefComment();
+ }
+ else if (const RawComment *RC =
+ Ctx.getRawCommentForAnyRedecl(PDecl)) {
+ Result.addBriefComment(RC->getBriefText(Ctx));
+ Pattern->BriefComment = Result.getBriefComment();
+ }
+ }
+ }
+
+ return Pattern;
+ }
+
+ if (Kind == RK_Keyword) {
+ Result.AddTypedTextChunk(Keyword);
+ return Result.TakeString();
+ }
+
+ if (Kind == RK_Macro) {
+ const MacroInfo *MI = PP.getMacroInfo(Macro);
+ Result.AddTypedTextChunk(
+ Result.getAllocator().CopyString(Macro->getName()));
+
+ if (!MI || !MI->isFunctionLike())
+ return Result.TakeString();
+
+ // Format a function-like macro with placeholders for the arguments.
+ Result.AddChunk(CodeCompletionString::CK_LeftParen);
+ MacroInfo::arg_iterator A = MI->arg_begin(), AEnd = MI->arg_end();
+
+ // C99 variadic macros add __VA_ARGS__ at the end. Skip it.
+ if (MI->isC99Varargs()) {
+ --AEnd;
+
+ if (A == AEnd) {
+ Result.AddPlaceholderChunk("...");
+ }
+ }
+
+ for (MacroInfo::arg_iterator A = MI->arg_begin(); A != AEnd; ++A) {
+ if (A != MI->arg_begin())
+ Result.AddChunk(CodeCompletionString::CK_Comma);
+
+ if (MI->isVariadic() && (A+1) == AEnd) {
+ SmallString<32> Arg = (*A)->getName();
+ if (MI->isC99Varargs())
+ Arg += ", ...";
+ else
+ Arg += "...";
+ Result.AddPlaceholderChunk(Result.getAllocator().CopyString(Arg));
+ break;
+ }
+
+ // Non-variadic macros are simple.
+ Result.AddPlaceholderChunk(
+ Result.getAllocator().CopyString((*A)->getName()));
+ }
+ Result.AddChunk(CodeCompletionString::CK_RightParen);
+ return Result.TakeString();
+ }
+
+ assert(Kind == RK_Declaration && "Missed a result kind?");
+ const NamedDecl *ND = Declaration;
+ Result.addParentContext(ND->getDeclContext());
+
+ if (IncludeBriefComments) {
+ // Add documentation comment, if it exists.
+ if (const RawComment *RC = Ctx.getRawCommentForAnyRedecl(ND)) {
+ Result.addBriefComment(RC->getBriefText(Ctx));
+ }
+ else if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(ND))
+ if (OMD->isPropertyAccessor())
+ if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl())
+ if (const RawComment *RC = Ctx.getRawCommentForAnyRedecl(PDecl))
+ Result.addBriefComment(RC->getBriefText(Ctx));
+ }
+
+ if (StartsNestedNameSpecifier) {
+ Result.AddTypedTextChunk(
+ Result.getAllocator().CopyString(ND->getNameAsString()));
+ Result.AddTextChunk("::");
+ return Result.TakeString();
+ }
+
+ for (const auto *I : ND->specific_attrs<AnnotateAttr>())
+ Result.AddAnnotation(Result.getAllocator().CopyString(I->getAnnotation()));
+
+ AddResultTypeChunk(Ctx, Policy, ND, CCContext.getBaseType(), Result);
+
+ if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(ND)) {
+ AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
+ Ctx, Policy);
+ AddTypedNameChunk(Ctx, Policy, ND, Result);
+ Result.AddChunk(CodeCompletionString::CK_LeftParen);
+ AddFunctionParameterChunks(PP, Policy, Function, Result);
+ Result.AddChunk(CodeCompletionString::CK_RightParen);
+ AddFunctionTypeQualsToCompletionString(Result, Function);
+ return Result.TakeString();
+ }
+
+ if (const FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(ND)) {
+ AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
+ Ctx, Policy);
+ FunctionDecl *Function = FunTmpl->getTemplatedDecl();
+ AddTypedNameChunk(Ctx, Policy, Function, Result);
+
+ // Figure out which template parameters are deduced (or have default
+ // arguments).
+ llvm::SmallBitVector Deduced;
+ Sema::MarkDeducedTemplateParameters(Ctx, FunTmpl, Deduced);
+ unsigned LastDeducibleArgument;
+ for (LastDeducibleArgument = Deduced.size(); LastDeducibleArgument > 0;
+ --LastDeducibleArgument) {
+ if (!Deduced[LastDeducibleArgument - 1]) {
+ // C++0x: Figure out if the template argument has a default. If so,
+ // the user doesn't need to type this argument.
+ // FIXME: We need to abstract template parameters better!
+ bool HasDefaultArg = false;
+ NamedDecl *Param = FunTmpl->getTemplateParameters()->getParam(
+ LastDeducibleArgument - 1);
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(Param))
+ HasDefaultArg = TTP->hasDefaultArgument();
+ else if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(Param))
+ HasDefaultArg = NTTP->hasDefaultArgument();
+ else {
+ assert(isa<TemplateTemplateParmDecl>(Param));
+ HasDefaultArg
+ = cast<TemplateTemplateParmDecl>(Param)->hasDefaultArgument();
+ }
+
+ if (!HasDefaultArg)
+ break;
+ }
+ }
+
+ if (LastDeducibleArgument) {
+ // Some of the function template arguments cannot be deduced from a
+ // function call, so we introduce an explicit template argument list
+ // containing all of the arguments up to the first deducible argument.
+ Result.AddChunk(CodeCompletionString::CK_LeftAngle);
+ AddTemplateParameterChunks(Ctx, Policy, FunTmpl, Result,
+ LastDeducibleArgument);
+ Result.AddChunk(CodeCompletionString::CK_RightAngle);
+ }
+
+ // Add the function parameters
+ Result.AddChunk(CodeCompletionString::CK_LeftParen);
+ AddFunctionParameterChunks(PP, Policy, Function, Result);
+ Result.AddChunk(CodeCompletionString::CK_RightParen);
+ AddFunctionTypeQualsToCompletionString(Result, Function);
+ return Result.TakeString();
+ }
+
+ if (const TemplateDecl *Template = dyn_cast<TemplateDecl>(ND)) {
+ AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
+ Ctx, Policy);
+ Result.AddTypedTextChunk(
+ Result.getAllocator().CopyString(Template->getNameAsString()));
+ Result.AddChunk(CodeCompletionString::CK_LeftAngle);
+ AddTemplateParameterChunks(Ctx, Policy, Template, Result);
+ Result.AddChunk(CodeCompletionString::CK_RightAngle);
+ return Result.TakeString();
+ }
+
+ if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(ND)) {
+ Selector Sel = Method->getSelector();
+ if (Sel.isUnarySelector()) {
+ Result.AddTypedTextChunk(Result.getAllocator().CopyString(
+ Sel.getNameForSlot(0)));
+ return Result.TakeString();
+ }
+
+ std::string SelName = Sel.getNameForSlot(0).str();
+ SelName += ':';
+ if (StartParameter == 0)
+ Result.AddTypedTextChunk(Result.getAllocator().CopyString(SelName));
+ else {
+ Result.AddInformativeChunk(Result.getAllocator().CopyString(SelName));
+
+ // If there is only one parameter, and we're past it, add an empty
+ // typed-text chunk since there is nothing to type.
+ if (Method->param_size() == 1)
+ Result.AddTypedTextChunk("");
+ }
+ unsigned Idx = 0;
+ for (ObjCMethodDecl::param_const_iterator P = Method->param_begin(),
+ PEnd = Method->param_end();
+ P != PEnd; (void)++P, ++Idx) {
+ if (Idx > 0) {
+ std::string Keyword;
+ if (Idx > StartParameter)
+ Result.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ if (IdentifierInfo *II = Sel.getIdentifierInfoForSlot(Idx))
+ Keyword += II->getName();
+ Keyword += ":";
+ if (Idx < StartParameter || AllParametersAreInformative)
+ Result.AddInformativeChunk(Result.getAllocator().CopyString(Keyword));
+ else
+ Result.AddTypedTextChunk(Result.getAllocator().CopyString(Keyword));
+ }
+
+ // If we're before the starting parameter, skip the placeholder.
+ if (Idx < StartParameter)
+ continue;
+
+ std::string Arg;
+ QualType ParamType = (*P)->getType();
+ Optional<ArrayRef<QualType>> ObjCSubsts;
+ if (!CCContext.getBaseType().isNull())
+ ObjCSubsts = CCContext.getBaseType()->getObjCSubstitutions(Method);
+
+ if (ParamType->isBlockPointerType() && !DeclaringEntity)
+ Arg = FormatFunctionParameter(Policy, *P, true,
+ /*SuppressBlock=*/false,
+ ObjCSubsts);
+ else {
+ if (ObjCSubsts)
+ ParamType = ParamType.substObjCTypeArgs(Ctx, *ObjCSubsts,
+ ObjCSubstitutionContext::Parameter);
+ Arg = "(" + formatObjCParamQualifiers((*P)->getObjCDeclQualifier(),
+ ParamType);
+ Arg += ParamType.getAsString(Policy) + ")";
+ if (IdentifierInfo *II = (*P)->getIdentifier())
+ if (DeclaringEntity || AllParametersAreInformative)
+ Arg += II->getName();
+ }
+
+ if (Method->isVariadic() && (P + 1) == PEnd)
+ Arg += ", ...";
+
+ if (DeclaringEntity)
+ Result.AddTextChunk(Result.getAllocator().CopyString(Arg));
+ else if (AllParametersAreInformative)
+ Result.AddInformativeChunk(Result.getAllocator().CopyString(Arg));
+ else
+ Result.AddPlaceholderChunk(Result.getAllocator().CopyString(Arg));
+ }
+
+ if (Method->isVariadic()) {
+ if (Method->param_size() == 0) {
+ if (DeclaringEntity)
+ Result.AddTextChunk(", ...");
+ else if (AllParametersAreInformative)
+ Result.AddInformativeChunk(", ...");
+ else
+ Result.AddPlaceholderChunk(", ...");
+ }
+
+ MaybeAddSentinel(PP, Method, Result);
+ }
+
+ return Result.TakeString();
+ }
+
+ if (Qualifier)
+ AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
+ Ctx, Policy);
+
+ Result.AddTypedTextChunk(
+ Result.getAllocator().CopyString(ND->getNameAsString()));
+ return Result.TakeString();
+}
+
+/// \brief Add function overload parameter chunks to the given code completion
+/// string.
+static void AddOverloadParameterChunks(ASTContext &Context,
+ const PrintingPolicy &Policy,
+ const FunctionDecl *Function,
+ const FunctionProtoType *Prototype,
+ CodeCompletionBuilder &Result,
+ unsigned CurrentArg,
+ unsigned Start = 0,
+ bool InOptional = false) {
+ bool FirstParameter = true;
+ unsigned NumParams = Function ? Function->getNumParams()
+ : Prototype->getNumParams();
+
+ for (unsigned P = Start; P != NumParams; ++P) {
+ if (Function && Function->getParamDecl(P)->hasDefaultArg() && !InOptional) {
+ // When we see an optional default argument, put that argument and
+ // the remaining default arguments into a new, optional string.
+ CodeCompletionBuilder Opt(Result.getAllocator(),
+ Result.getCodeCompletionTUInfo());
+ if (!FirstParameter)
+ Opt.AddChunk(CodeCompletionString::CK_Comma);
+ // Optional sections are nested.
+ AddOverloadParameterChunks(Context, Policy, Function, Prototype, Opt,
+ CurrentArg, P, /*InOptional=*/true);
+ Result.AddOptionalChunk(Opt.TakeString());
+ return;
+ }
+
+ if (FirstParameter)
+ FirstParameter = false;
+ else
+ Result.AddChunk(CodeCompletionString::CK_Comma);
+
+ InOptional = false;
+
+ // Format the placeholder string.
+ std::string Placeholder;
+ if (Function)
+ Placeholder = FormatFunctionParameter(Policy, Function->getParamDecl(P));
+ else
+ Placeholder = Prototype->getParamType(P).getAsString(Policy);
+
+ if (P == CurrentArg)
+ Result.AddCurrentParameterChunk(
+ Result.getAllocator().CopyString(Placeholder));
+ else
+ Result.AddPlaceholderChunk(Result.getAllocator().CopyString(Placeholder));
+ }
+
+ if (Prototype && Prototype->isVariadic()) {
+ CodeCompletionBuilder Opt(Result.getAllocator(),
+ Result.getCodeCompletionTUInfo());
+ if (!FirstParameter)
+ Opt.AddChunk(CodeCompletionString::CK_Comma);
+
+ if (CurrentArg < NumParams)
+ Opt.AddPlaceholderChunk("...");
+ else
+ Opt.AddCurrentParameterChunk("...");
+
+ Result.AddOptionalChunk(Opt.TakeString());
+ }
+}
+
+CodeCompletionString *
+CodeCompleteConsumer::OverloadCandidate::CreateSignatureString(
+ unsigned CurrentArg, Sema &S,
+ CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo,
+ bool IncludeBriefComments) const {
+ PrintingPolicy Policy = getCompletionPrintingPolicy(S);
+
+ // FIXME: Set priority, availability appropriately.
+ CodeCompletionBuilder Result(Allocator,CCTUInfo, 1, CXAvailability_Available);
+ FunctionDecl *FDecl = getFunction();
+ const FunctionProtoType *Proto
+ = dyn_cast<FunctionProtoType>(getFunctionType());
+ if (!FDecl && !Proto) {
+ // Function without a prototype. Just give the return type and a
+ // highlighted ellipsis.
+ const FunctionType *FT = getFunctionType();
+ Result.AddResultTypeChunk(Result.getAllocator().CopyString(
+ FT->getReturnType().getAsString(Policy)));
+ Result.AddChunk(CodeCompletionString::CK_LeftParen);
+ Result.AddChunk(CodeCompletionString::CK_CurrentParameter, "...");
+ Result.AddChunk(CodeCompletionString::CK_RightParen);
+ return Result.TakeString();
+ }
+
+ if (FDecl) {
+ if (IncludeBriefComments && CurrentArg < FDecl->getNumParams())
+ if (auto RC = S.getASTContext().getRawCommentForAnyRedecl(
+ FDecl->getParamDecl(CurrentArg)))
+ Result.addBriefComment(RC->getBriefText(S.getASTContext()));
+ AddResultTypeChunk(S.Context, Policy, FDecl, QualType(), Result);
+ Result.AddTextChunk(
+ Result.getAllocator().CopyString(FDecl->getNameAsString()));
+ } else {
+ Result.AddResultTypeChunk(
+ Result.getAllocator().CopyString(
+ Proto->getReturnType().getAsString(Policy)));
+ }
+
+ Result.AddChunk(CodeCompletionString::CK_LeftParen);
+ AddOverloadParameterChunks(S.getASTContext(), Policy, FDecl, Proto, Result,
+ CurrentArg);
+ Result.AddChunk(CodeCompletionString::CK_RightParen);
+
+ return Result.TakeString();
+}
+
+unsigned clang::getMacroUsagePriority(StringRef MacroName,
+ const LangOptions &LangOpts,
+ bool PreferredTypeIsPointer) {
+ unsigned Priority = CCP_Macro;
+
+ // Treat the "nil", "Nil" and "NULL" macros as null pointer constants.
+ if (MacroName.equals("nil") || MacroName.equals("NULL") ||
+ MacroName.equals("Nil")) {
+ Priority = CCP_Constant;
+ if (PreferredTypeIsPointer)
+ Priority = Priority / CCF_SimilarTypeMatch;
+ }
+ // Treat "YES", "NO", "true", and "false" as constants.
+ else if (MacroName.equals("YES") || MacroName.equals("NO") ||
+ MacroName.equals("true") || MacroName.equals("false"))
+ Priority = CCP_Constant;
+ // Treat "bool" as a type.
+ else if (MacroName.equals("bool"))
+ Priority = CCP_Type + (LangOpts.ObjC1? CCD_bool_in_ObjC : 0);
+
+
+ return Priority;
+}
+
+CXCursorKind clang::getCursorKindForDecl(const Decl *D) {
+ if (!D)
+ return CXCursor_UnexposedDecl;
+
+ switch (D->getKind()) {
+ case Decl::Enum: return CXCursor_EnumDecl;
+ case Decl::EnumConstant: return CXCursor_EnumConstantDecl;
+ case Decl::Field: return CXCursor_FieldDecl;
+ case Decl::Function:
+ return CXCursor_FunctionDecl;
+ case Decl::ObjCCategory: return CXCursor_ObjCCategoryDecl;
+ case Decl::ObjCCategoryImpl: return CXCursor_ObjCCategoryImplDecl;
+ case Decl::ObjCImplementation: return CXCursor_ObjCImplementationDecl;
+
+ case Decl::ObjCInterface: return CXCursor_ObjCInterfaceDecl;
+ case Decl::ObjCIvar: return CXCursor_ObjCIvarDecl;
+ case Decl::ObjCMethod:
+ return cast<ObjCMethodDecl>(D)->isInstanceMethod()
+ ? CXCursor_ObjCInstanceMethodDecl : CXCursor_ObjCClassMethodDecl;
+ case Decl::CXXMethod: return CXCursor_CXXMethod;
+ case Decl::CXXConstructor: return CXCursor_Constructor;
+ case Decl::CXXDestructor: return CXCursor_Destructor;
+ case Decl::CXXConversion: return CXCursor_ConversionFunction;
+ case Decl::ObjCProperty: return CXCursor_ObjCPropertyDecl;
+ case Decl::ObjCProtocol: return CXCursor_ObjCProtocolDecl;
+ case Decl::ParmVar: return CXCursor_ParmDecl;
+ case Decl::Typedef: return CXCursor_TypedefDecl;
+ case Decl::TypeAlias: return CXCursor_TypeAliasDecl;
+ case Decl::TypeAliasTemplate: return CXCursor_TypeAliasTemplateDecl;
+ case Decl::Var: return CXCursor_VarDecl;
+ case Decl::Namespace: return CXCursor_Namespace;
+ case Decl::NamespaceAlias: return CXCursor_NamespaceAlias;
+ case Decl::TemplateTypeParm: return CXCursor_TemplateTypeParameter;
+ case Decl::NonTypeTemplateParm:return CXCursor_NonTypeTemplateParameter;
+ case Decl::TemplateTemplateParm:return CXCursor_TemplateTemplateParameter;
+ case Decl::FunctionTemplate: return CXCursor_FunctionTemplate;
+ case Decl::ClassTemplate: return CXCursor_ClassTemplate;
+ case Decl::AccessSpec: return CXCursor_CXXAccessSpecifier;
+ case Decl::ClassTemplatePartialSpecialization:
+ return CXCursor_ClassTemplatePartialSpecialization;
+ case Decl::UsingDirective: return CXCursor_UsingDirective;
+ case Decl::TranslationUnit: return CXCursor_TranslationUnit;
+
+ case Decl::Using:
+ case Decl::UnresolvedUsingValue:
+ case Decl::UnresolvedUsingTypename:
+ return CXCursor_UsingDeclaration;
+
+ case Decl::ObjCPropertyImpl:
+ switch (cast<ObjCPropertyImplDecl>(D)->getPropertyImplementation()) {
+ case ObjCPropertyImplDecl::Dynamic:
+ return CXCursor_ObjCDynamicDecl;
+
+ case ObjCPropertyImplDecl::Synthesize:
+ return CXCursor_ObjCSynthesizeDecl;
+ }
+
+ case Decl::Import:
+ return CXCursor_ModuleImportDecl;
+
+ case Decl::ObjCTypeParam: return CXCursor_TemplateTypeParameter;
+
+ default:
+ if (const TagDecl *TD = dyn_cast<TagDecl>(D)) {
+ switch (TD->getTagKind()) {
+ case TTK_Interface: // fall through
+ case TTK_Struct: return CXCursor_StructDecl;
+ case TTK_Class: return CXCursor_ClassDecl;
+ case TTK_Union: return CXCursor_UnionDecl;
+ case TTK_Enum: return CXCursor_EnumDecl;
+ }
+ }
+ }
+
+ return CXCursor_UnexposedDecl;
+}
+
+static void AddMacroResults(Preprocessor &PP, ResultBuilder &Results,
+ bool IncludeUndefined,
+ bool TargetTypeIsPointer = false) {
+ typedef CodeCompletionResult Result;
+
+ Results.EnterNewScope();
+
+ for (Preprocessor::macro_iterator M = PP.macro_begin(),
+ MEnd = PP.macro_end();
+ M != MEnd; ++M) {
+ auto MD = PP.getMacroDefinition(M->first);
+ if (IncludeUndefined || MD) {
+ if (MacroInfo *MI = MD.getMacroInfo())
+ if (MI->isUsedForHeaderGuard())
+ continue;
+
+ Results.AddResult(Result(M->first,
+ getMacroUsagePriority(M->first->getName(),
+ PP.getLangOpts(),
+ TargetTypeIsPointer)));
+ }
+ }
+
+ Results.ExitScope();
+
+}
+
+static void AddPrettyFunctionResults(const LangOptions &LangOpts,
+ ResultBuilder &Results) {
+ typedef CodeCompletionResult Result;
+
+ Results.EnterNewScope();
+
+ Results.AddResult(Result("__PRETTY_FUNCTION__", CCP_Constant));
+ Results.AddResult(Result("__FUNCTION__", CCP_Constant));
+ if (LangOpts.C99 || LangOpts.CPlusPlus11)
+ Results.AddResult(Result("__func__", CCP_Constant));
+ Results.ExitScope();
+}
+
+static void HandleCodeCompleteResults(Sema *S,
+ CodeCompleteConsumer *CodeCompleter,
+ CodeCompletionContext Context,
+ CodeCompletionResult *Results,
+ unsigned NumResults) {
+ if (CodeCompleter)
+ CodeCompleter->ProcessCodeCompleteResults(*S, Context, Results, NumResults);
+}
+
+static enum CodeCompletionContext::Kind mapCodeCompletionContext(Sema &S,
+ Sema::ParserCompletionContext PCC) {
+ switch (PCC) {
+ case Sema::PCC_Namespace:
+ return CodeCompletionContext::CCC_TopLevel;
+
+ case Sema::PCC_Class:
+ return CodeCompletionContext::CCC_ClassStructUnion;
+
+ case Sema::PCC_ObjCInterface:
+ return CodeCompletionContext::CCC_ObjCInterface;
+
+ case Sema::PCC_ObjCImplementation:
+ return CodeCompletionContext::CCC_ObjCImplementation;
+
+ case Sema::PCC_ObjCInstanceVariableList:
+ return CodeCompletionContext::CCC_ObjCIvarList;
+
+ case Sema::PCC_Template:
+ case Sema::PCC_MemberTemplate:
+ if (S.CurContext->isFileContext())
+ return CodeCompletionContext::CCC_TopLevel;
+ if (S.CurContext->isRecord())
+ return CodeCompletionContext::CCC_ClassStructUnion;
+ return CodeCompletionContext::CCC_Other;
+
+ case Sema::PCC_RecoveryInFunction:
+ return CodeCompletionContext::CCC_Recovery;
+
+ case Sema::PCC_ForInit:
+ if (S.getLangOpts().CPlusPlus || S.getLangOpts().C99 ||
+ S.getLangOpts().ObjC1)
+ return CodeCompletionContext::CCC_ParenthesizedExpression;
+ else
+ return CodeCompletionContext::CCC_Expression;
+
+ case Sema::PCC_Expression:
+ case Sema::PCC_Condition:
+ return CodeCompletionContext::CCC_Expression;
+
+ case Sema::PCC_Statement:
+ return CodeCompletionContext::CCC_Statement;
+
+ case Sema::PCC_Type:
+ return CodeCompletionContext::CCC_Type;
+
+ case Sema::PCC_ParenthesizedExpression:
+ return CodeCompletionContext::CCC_ParenthesizedExpression;
+
+ case Sema::PCC_LocalDeclarationSpecifiers:
+ return CodeCompletionContext::CCC_Type;
+ }
+
+ llvm_unreachable("Invalid ParserCompletionContext!");
+}
+
+/// \brief If we're in a C++ virtual member function, add completion results
+/// that invoke the functions we override, since it's common to invoke the
+/// overridden function as well as adding new functionality.
+///
+/// \param S The semantic analysis object for which we are generating results.
+///
+/// \param InContext This context in which the nested-name-specifier preceding
+/// the code-completion point
+static void MaybeAddOverrideCalls(Sema &S, DeclContext *InContext,
+ ResultBuilder &Results) {
+ // Look through blocks.
+ DeclContext *CurContext = S.CurContext;
+ while (isa<BlockDecl>(CurContext))
+ CurContext = CurContext->getParent();
+
+
+ CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(CurContext);
+ if (!Method || !Method->isVirtual())
+ return;
+
+ // We need to have names for all of the parameters, if we're going to
+ // generate a forwarding call.
+ for (auto P : Method->params())
+ if (!P->getDeclName())
+ return;
+
+ PrintingPolicy Policy = getCompletionPrintingPolicy(S);
+ for (CXXMethodDecl::method_iterator M = Method->begin_overridden_methods(),
+ MEnd = Method->end_overridden_methods();
+ M != MEnd; ++M) {
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ const CXXMethodDecl *Overridden = *M;
+ if (Overridden->getCanonicalDecl() == Method->getCanonicalDecl())
+ continue;
+
+ // If we need a nested-name-specifier, add one now.
+ if (!InContext) {
+ NestedNameSpecifier *NNS
+ = getRequiredQualification(S.Context, CurContext,
+ Overridden->getDeclContext());
+ if (NNS) {
+ std::string Str;
+ llvm::raw_string_ostream OS(Str);
+ NNS->print(OS, Policy);
+ Builder.AddTextChunk(Results.getAllocator().CopyString(OS.str()));
+ }
+ } else if (!InContext->Equals(Overridden->getDeclContext()))
+ continue;
+
+ Builder.AddTypedTextChunk(Results.getAllocator().CopyString(
+ Overridden->getNameAsString()));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ bool FirstParam = true;
+ for (auto P : Method->params()) {
+ if (FirstParam)
+ FirstParam = false;
+ else
+ Builder.AddChunk(CodeCompletionString::CK_Comma);
+
+ Builder.AddPlaceholderChunk(
+ Results.getAllocator().CopyString(P->getIdentifier()->getName()));
+ }
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(CodeCompletionResult(Builder.TakeString(),
+ CCP_SuperCompletion,
+ CXCursor_CXXMethod,
+ CXAvailability_Available,
+ Overridden));
+ Results.Ignore(Overridden);
+ }
+}
+
+void Sema::CodeCompleteModuleImport(SourceLocation ImportLoc,
+ ModuleIdPath Path) {
+ typedef CodeCompletionResult Result;
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+ Results.EnterNewScope();
+
+ CodeCompletionAllocator &Allocator = Results.getAllocator();
+ CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo());
+ typedef CodeCompletionResult Result;
+ if (Path.empty()) {
+ // Enumerate all top-level modules.
+ SmallVector<Module *, 8> Modules;
+ PP.getHeaderSearchInfo().collectAllModules(Modules);
+ for (unsigned I = 0, N = Modules.size(); I != N; ++I) {
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString(Modules[I]->Name));
+ Results.AddResult(Result(Builder.TakeString(),
+ CCP_Declaration,
+ CXCursor_ModuleImportDecl,
+ Modules[I]->isAvailable()
+ ? CXAvailability_Available
+ : CXAvailability_NotAvailable));
+ }
+ } else if (getLangOpts().Modules) {
+ // Load the named module.
+ Module *Mod = PP.getModuleLoader().loadModule(ImportLoc, Path,
+ Module::AllVisible,
+ /*IsInclusionDirective=*/false);
+ // Enumerate submodules.
+ if (Mod) {
+ for (Module::submodule_iterator Sub = Mod->submodule_begin(),
+ SubEnd = Mod->submodule_end();
+ Sub != SubEnd; ++Sub) {
+
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString((*Sub)->Name));
+ Results.AddResult(Result(Builder.TakeString(),
+ CCP_Declaration,
+ CXCursor_ModuleImportDecl,
+ (*Sub)->isAvailable()
+ ? CXAvailability_Available
+ : CXAvailability_NotAvailable));
+ }
+ }
+ }
+ Results.ExitScope();
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteOrdinaryName(Scope *S,
+ ParserCompletionContext CompletionContext) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ mapCodeCompletionContext(*this, CompletionContext));
+ Results.EnterNewScope();
+
+ // Determine how to filter results, e.g., so that the names of
+ // values (functions, enumerators, function templates, etc.) are
+ // only allowed where we can have an expression.
+ switch (CompletionContext) {
+ case PCC_Namespace:
+ case PCC_Class:
+ case PCC_ObjCInterface:
+ case PCC_ObjCImplementation:
+ case PCC_ObjCInstanceVariableList:
+ case PCC_Template:
+ case PCC_MemberTemplate:
+ case PCC_Type:
+ case PCC_LocalDeclarationSpecifiers:
+ Results.setFilter(&ResultBuilder::IsOrdinaryNonValueName);
+ break;
+
+ case PCC_Statement:
+ case PCC_ParenthesizedExpression:
+ case PCC_Expression:
+ case PCC_ForInit:
+ case PCC_Condition:
+ if (WantTypesInContext(CompletionContext, getLangOpts()))
+ Results.setFilter(&ResultBuilder::IsOrdinaryName);
+ else
+ Results.setFilter(&ResultBuilder::IsOrdinaryNonTypeName);
+
+ if (getLangOpts().CPlusPlus)
+ MaybeAddOverrideCalls(*this, /*InContext=*/nullptr, Results);
+ break;
+
+ case PCC_RecoveryInFunction:
+ // Unfiltered
+ break;
+ }
+
+ // If we are in a C++ non-static member function, check the qualifiers on
+ // the member function to filter/prioritize the results list.
+ if (CXXMethodDecl *CurMethod = dyn_cast<CXXMethodDecl>(CurContext))
+ if (CurMethod->isInstance())
+ Results.setObjectTypeQualifiers(
+ Qualifiers::fromCVRMask(CurMethod->getTypeQualifiers()));
+
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
+ CodeCompleter->includeGlobals());
+
+ AddOrdinaryNameResults(CompletionContext, S, *this, Results);
+ Results.ExitScope();
+
+ switch (CompletionContext) {
+ case PCC_ParenthesizedExpression:
+ case PCC_Expression:
+ case PCC_Statement:
+ case PCC_RecoveryInFunction:
+ if (S->getFnParent())
+ AddPrettyFunctionResults(getLangOpts(), Results);
+ break;
+
+ case PCC_Namespace:
+ case PCC_Class:
+ case PCC_ObjCInterface:
+ case PCC_ObjCImplementation:
+ case PCC_ObjCInstanceVariableList:
+ case PCC_Template:
+ case PCC_MemberTemplate:
+ case PCC_ForInit:
+ case PCC_Condition:
+ case PCC_Type:
+ case PCC_LocalDeclarationSpecifiers:
+ break;
+ }
+
+ if (CodeCompleter->includeMacros())
+ AddMacroResults(PP, Results, false);
+
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(),Results.size());
+}
+
+static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
+ ParsedType Receiver,
+ ArrayRef<IdentifierInfo *> SelIdents,
+ bool AtArgumentExpression,
+ bool IsSuper,
+ ResultBuilder &Results);
+
+void Sema::CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
+ bool AllowNonIdentifiers,
+ bool AllowNestedNameSpecifiers) {
+ typedef CodeCompletionResult Result;
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ AllowNestedNameSpecifiers
+ ? CodeCompletionContext::CCC_PotentiallyQualifiedName
+ : CodeCompletionContext::CCC_Name);
+ Results.EnterNewScope();
+
+ // Type qualifiers can come after names.
+ Results.AddResult(Result("const"));
+ Results.AddResult(Result("volatile"));
+ if (getLangOpts().C99)
+ Results.AddResult(Result("restrict"));
+
+ if (getLangOpts().CPlusPlus) {
+ if (AllowNonIdentifiers) {
+ Results.AddResult(Result("operator"));
+ }
+
+ // Add nested-name-specifiers.
+ if (AllowNestedNameSpecifiers) {
+ Results.allowNestedNameSpecifiers();
+ Results.setFilter(&ResultBuilder::IsImpossibleToSatisfy);
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ LookupVisibleDecls(S, LookupNestedNameSpecifierName, Consumer,
+ CodeCompleter->includeGlobals());
+ Results.setFilter(nullptr);
+ }
+ }
+ Results.ExitScope();
+
+ // If we're in a context where we might have an expression (rather than a
+ // declaration), and what we've seen so far is an Objective-C type that could
+ // be a receiver of a class message, this may be a class message send with
+ // the initial opening bracket '[' missing. Add appropriate completions.
+ if (AllowNonIdentifiers && !AllowNestedNameSpecifiers &&
+ DS.getParsedSpecifiers() == DeclSpec::PQ_TypeSpecifier &&
+ DS.getTypeSpecType() == DeclSpec::TST_typename &&
+ DS.getTypeSpecComplex() == DeclSpec::TSC_unspecified &&
+ DS.getTypeSpecSign() == DeclSpec::TSS_unspecified &&
+ !DS.isTypeAltiVecVector() &&
+ S &&
+ (S->getFlags() & Scope::DeclScope) != 0 &&
+ (S->getFlags() & (Scope::ClassScope | Scope::TemplateParamScope |
+ Scope::FunctionPrototypeScope |
+ Scope::AtCatchScope)) == 0) {
+ ParsedType T = DS.getRepAsType();
+ if (!T.get().isNull() && T.get()->isObjCObjectOrInterfaceType())
+ AddClassMessageCompletions(*this, S, T, None, false, false, Results);
+ }
+
+ // Note that we intentionally suppress macro results here, since we do not
+ // encourage using macros to produce the names of entities.
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ Results.getCompletionContext(),
+ Results.data(), Results.size());
+}
+
+struct Sema::CodeCompleteExpressionData {
+ CodeCompleteExpressionData(QualType PreferredType = QualType())
+ : PreferredType(PreferredType), IntegralConstantExpression(false),
+ ObjCCollection(false) { }
+
+ QualType PreferredType;
+ bool IntegralConstantExpression;
+ bool ObjCCollection;
+ SmallVector<Decl *, 4> IgnoreDecls;
+};
+
+/// \brief Perform code-completion in an expression context when we know what
+/// type we're looking for.
+void Sema::CodeCompleteExpression(Scope *S,
+ const CodeCompleteExpressionData &Data) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Expression);
+ if (Data.ObjCCollection)
+ Results.setFilter(&ResultBuilder::IsObjCCollection);
+ else if (Data.IntegralConstantExpression)
+ Results.setFilter(&ResultBuilder::IsIntegralConstantValue);
+ else if (WantTypesInContext(PCC_Expression, getLangOpts()))
+ Results.setFilter(&ResultBuilder::IsOrdinaryName);
+ else
+ Results.setFilter(&ResultBuilder::IsOrdinaryNonTypeName);
+
+ if (!Data.PreferredType.isNull())
+ Results.setPreferredType(Data.PreferredType.getNonReferenceType());
+
+ // Ignore any declarations that we were told that we don't care about.
+ for (unsigned I = 0, N = Data.IgnoreDecls.size(); I != N; ++I)
+ Results.Ignore(Data.IgnoreDecls[I]);
+
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
+ CodeCompleter->includeGlobals());
+
+ Results.EnterNewScope();
+ AddOrdinaryNameResults(PCC_Expression, S, *this, Results);
+ Results.ExitScope();
+
+ bool PreferredTypeIsPointer = false;
+ if (!Data.PreferredType.isNull())
+ PreferredTypeIsPointer = Data.PreferredType->isAnyPointerType()
+ || Data.PreferredType->isMemberPointerType()
+ || Data.PreferredType->isBlockPointerType();
+
+ if (S->getFnParent() &&
+ !Data.ObjCCollection &&
+ !Data.IntegralConstantExpression)
+ AddPrettyFunctionResults(getLangOpts(), Results);
+
+ if (CodeCompleter->includeMacros())
+ AddMacroResults(PP, Results, false, PreferredTypeIsPointer);
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext(CodeCompletionContext::CCC_Expression,
+ Data.PreferredType),
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompletePostfixExpression(Scope *S, ExprResult E) {
+ if (E.isInvalid())
+ CodeCompleteOrdinaryName(S, PCC_RecoveryInFunction);
+ else if (getLangOpts().ObjC1)
+ CodeCompleteObjCInstanceMessage(S, E.get(), None, false);
+}
+
+/// \brief The set of properties that have already been added, referenced by
+/// property name.
+typedef llvm::SmallPtrSet<IdentifierInfo*, 16> AddedPropertiesSet;
+
+/// \brief Retrieve the container definition, if any?
+static ObjCContainerDecl *getContainerDef(ObjCContainerDecl *Container) {
+ if (ObjCInterfaceDecl *Interface = dyn_cast<ObjCInterfaceDecl>(Container)) {
+ if (Interface->hasDefinition())
+ return Interface->getDefinition();
+
+ return Interface;
+ }
+
+ if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)) {
+ if (Protocol->hasDefinition())
+ return Protocol->getDefinition();
+
+ return Protocol;
+ }
+ return Container;
+}
+
+static void AddObjCProperties(const CodeCompletionContext &CCContext,
+ ObjCContainerDecl *Container,
+ bool AllowCategories,
+ bool AllowNullaryMethods,
+ DeclContext *CurContext,
+ AddedPropertiesSet &AddedProperties,
+ ResultBuilder &Results) {
+ typedef CodeCompletionResult Result;
+
+ // Retrieve the definition.
+ Container = getContainerDef(Container);
+
+ // Add properties in this container.
+ for (const auto *P : Container->properties())
+ if (AddedProperties.insert(P->getIdentifier()).second)
+ Results.MaybeAddResult(Result(P, Results.getBasePriority(P), nullptr),
+ CurContext);
+
+ // Add nullary methods
+ if (AllowNullaryMethods) {
+ ASTContext &Context = Container->getASTContext();
+ PrintingPolicy Policy = getCompletionPrintingPolicy(Results.getSema());
+ for (auto *M : Container->methods()) {
+ if (M->getSelector().isUnarySelector())
+ if (IdentifierInfo *Name = M->getSelector().getIdentifierInfoForSlot(0))
+ if (AddedProperties.insert(Name).second) {
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ AddResultTypeChunk(Context, Policy, M, CCContext.getBaseType(),
+ Builder);
+ Builder.AddTypedTextChunk(
+ Results.getAllocator().CopyString(Name->getName()));
+
+ Results.MaybeAddResult(Result(Builder.TakeString(), M,
+ CCP_MemberDeclaration + CCD_MethodAsProperty),
+ CurContext);
+ }
+ }
+ }
+
+
+ // Add properties in referenced protocols.
+ if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)) {
+ for (auto *P : Protocol->protocols())
+ AddObjCProperties(CCContext, P, AllowCategories, AllowNullaryMethods,
+ CurContext, AddedProperties, Results);
+ } else if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(Container)){
+ if (AllowCategories) {
+ // Look through categories.
+ for (auto *Cat : IFace->known_categories())
+ AddObjCProperties(CCContext, Cat, AllowCategories, AllowNullaryMethods,
+ CurContext, AddedProperties, Results);
+ }
+
+ // Look through protocols.
+ for (auto *I : IFace->all_referenced_protocols())
+ AddObjCProperties(CCContext, I, AllowCategories, AllowNullaryMethods,
+ CurContext, AddedProperties, Results);
+
+ // Look in the superclass.
+ if (IFace->getSuperClass())
+ AddObjCProperties(CCContext, IFace->getSuperClass(), AllowCategories,
+ AllowNullaryMethods, CurContext,
+ AddedProperties, Results);
+ } else if (const ObjCCategoryDecl *Category
+ = dyn_cast<ObjCCategoryDecl>(Container)) {
+ // Look through protocols.
+ for (auto *P : Category->protocols())
+ AddObjCProperties(CCContext, P, AllowCategories, AllowNullaryMethods,
+ CurContext, AddedProperties, Results);
+ }
+}
+
+void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
+ SourceLocation OpLoc,
+ bool IsArrow) {
+ if (!Base || !CodeCompleter)
+ return;
+
+ ExprResult ConvertedBase = PerformMemberExprBaseConversion(Base, IsArrow);
+ if (ConvertedBase.isInvalid())
+ return;
+ Base = ConvertedBase.get();
+
+ typedef CodeCompletionResult Result;
+
+ QualType BaseType = Base->getType();
+
+ if (IsArrow) {
+ if (const PointerType *Ptr = BaseType->getAs<PointerType>())
+ BaseType = Ptr->getPointeeType();
+ else if (BaseType->isObjCObjectPointerType())
+ /*Do nothing*/ ;
+ else
+ return;
+ }
+
+ enum CodeCompletionContext::Kind contextKind;
+
+ if (IsArrow) {
+ contextKind = CodeCompletionContext::CCC_ArrowMemberAccess;
+ }
+ else {
+ if (BaseType->isObjCObjectPointerType() ||
+ BaseType->isObjCObjectOrInterfaceType()) {
+ contextKind = CodeCompletionContext::CCC_ObjCPropertyAccess;
+ }
+ else {
+ contextKind = CodeCompletionContext::CCC_DotMemberAccess;
+ }
+ }
+
+ CodeCompletionContext CCContext(contextKind, BaseType);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CCContext,
+ &ResultBuilder::IsMember);
+ Results.EnterNewScope();
+ if (const RecordType *Record = BaseType->getAs<RecordType>()) {
+ // Indicate that we are performing a member access, and the cv-qualifiers
+ // for the base object type.
+ Results.setObjectTypeQualifiers(BaseType.getQualifiers());
+
+ // Access to a C/C++ class, struct, or union.
+ Results.allowNestedNameSpecifiers();
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ LookupVisibleDecls(Record->getDecl(), LookupMemberName, Consumer,
+ CodeCompleter->includeGlobals());
+
+ if (getLangOpts().CPlusPlus) {
+ if (!Results.empty()) {
+ // The "template" keyword can follow "->" or "." in the grammar.
+ // However, we only want to suggest the template keyword if something
+ // is dependent.
+ bool IsDependent = BaseType->isDependentType();
+ if (!IsDependent) {
+ for (Scope *DepScope = S; DepScope; DepScope = DepScope->getParent())
+ if (DeclContext *Ctx = DepScope->getEntity()) {
+ IsDependent = Ctx->isDependentContext();
+ break;
+ }
+ }
+
+ if (IsDependent)
+ Results.AddResult(Result("template"));
+ }
+ }
+ } else if (!IsArrow && BaseType->getAsObjCInterfacePointerType()) {
+ // Objective-C property reference.
+ AddedPropertiesSet AddedProperties;
+
+ // Add property results based on our interface.
+ const ObjCObjectPointerType *ObjCPtr
+ = BaseType->getAsObjCInterfacePointerType();
+ assert(ObjCPtr && "Non-NULL pointer guaranteed above!");
+ AddObjCProperties(CCContext, ObjCPtr->getInterfaceDecl(), true,
+ /*AllowNullaryMethods=*/true, CurContext,
+ AddedProperties, Results);
+
+ // Add properties from the protocols in a qualified interface.
+ for (auto *I : ObjCPtr->quals())
+ AddObjCProperties(CCContext, I, true, /*AllowNullaryMethods=*/true,
+ CurContext, AddedProperties, Results);
+ } else if ((IsArrow && BaseType->isObjCObjectPointerType()) ||
+ (!IsArrow && BaseType->isObjCObjectType())) {
+ // Objective-C instance variable access.
+ ObjCInterfaceDecl *Class = nullptr;
+ if (const ObjCObjectPointerType *ObjCPtr
+ = BaseType->getAs<ObjCObjectPointerType>())
+ Class = ObjCPtr->getInterfaceDecl();
+ else
+ Class = BaseType->getAs<ObjCObjectType>()->getInterface();
+
+ // Add all ivars from this class and its superclasses.
+ if (Class) {
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ Results.setFilter(&ResultBuilder::IsObjCIvar);
+ LookupVisibleDecls(Class, LookupMemberName, Consumer,
+ CodeCompleter->includeGlobals());
+ }
+ }
+
+ // FIXME: How do we cope with isa?
+
+ Results.ExitScope();
+
+ // Hand off the results found for code completion.
+ HandleCodeCompleteResults(this, CodeCompleter,
+ Results.getCompletionContext(),
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteTag(Scope *S, unsigned TagSpec) {
+ if (!CodeCompleter)
+ return;
+
+ ResultBuilder::LookupFilter Filter = nullptr;
+ enum CodeCompletionContext::Kind ContextKind
+ = CodeCompletionContext::CCC_Other;
+ switch ((DeclSpec::TST)TagSpec) {
+ case DeclSpec::TST_enum:
+ Filter = &ResultBuilder::IsEnum;
+ ContextKind = CodeCompletionContext::CCC_EnumTag;
+ break;
+
+ case DeclSpec::TST_union:
+ Filter = &ResultBuilder::IsUnion;
+ ContextKind = CodeCompletionContext::CCC_UnionTag;
+ break;
+
+ case DeclSpec::TST_struct:
+ case DeclSpec::TST_class:
+ case DeclSpec::TST_interface:
+ Filter = &ResultBuilder::IsClassOrStruct;
+ ContextKind = CodeCompletionContext::CCC_ClassOrStructTag;
+ break;
+
+ default:
+ llvm_unreachable("Unknown type specifier kind in CodeCompleteTag");
+ }
+
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(), ContextKind);
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+
+ // First pass: look for tags.
+ Results.setFilter(Filter);
+ LookupVisibleDecls(S, LookupTagName, Consumer,
+ CodeCompleter->includeGlobals());
+
+ if (CodeCompleter->includeGlobals()) {
+ // Second pass: look for nested name specifiers.
+ Results.setFilter(&ResultBuilder::IsNestedNameSpecifier);
+ LookupVisibleDecls(S, LookupNestedNameSpecifierName, Consumer);
+ }
+
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteTypeQualifiers(DeclSpec &DS) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_TypeQualifiers);
+ Results.EnterNewScope();
+ if (!(DS.getTypeQualifiers() & DeclSpec::TQ_const))
+ Results.AddResult("const");
+ if (!(DS.getTypeQualifiers() & DeclSpec::TQ_volatile))
+ Results.AddResult("volatile");
+ if (getLangOpts().C99 &&
+ !(DS.getTypeQualifiers() & DeclSpec::TQ_restrict))
+ Results.AddResult("restrict");
+ if (getLangOpts().C11 &&
+ !(DS.getTypeQualifiers() & DeclSpec::TQ_atomic))
+ Results.AddResult("_Atomic");
+ Results.ExitScope();
+ HandleCodeCompleteResults(this, CodeCompleter,
+ Results.getCompletionContext(),
+ Results.data(), Results.size());
+}
+
+void Sema::CodeCompleteCase(Scope *S) {
+ if (getCurFunction()->SwitchStack.empty() || !CodeCompleter)
+ return;
+
+ SwitchStmt *Switch = getCurFunction()->SwitchStack.back();
+ QualType type = Switch->getCond()->IgnoreImplicit()->getType();
+ if (!type->isEnumeralType()) {
+ CodeCompleteExpressionData Data(type);
+ Data.IntegralConstantExpression = true;
+ CodeCompleteExpression(S, Data);
+ return;
+ }
+
+ // Code-complete the cases of a switch statement over an enumeration type
+ // by providing the list of
+ EnumDecl *Enum = type->castAs<EnumType>()->getDecl();
+ if (EnumDecl *Def = Enum->getDefinition())
+ Enum = Def;
+
+ // Determine which enumerators we have already seen in the switch statement.
+ // FIXME: Ideally, we would also be able to look *past* the code-completion
+ // token, in case we are code-completing in the middle of the switch and not
+ // at the end. However, we aren't able to do so at the moment.
+ llvm::SmallPtrSet<EnumConstantDecl *, 8> EnumeratorsSeen;
+ NestedNameSpecifier *Qualifier = nullptr;
+ for (SwitchCase *SC = Switch->getSwitchCaseList(); SC;
+ SC = SC->getNextSwitchCase()) {
+ CaseStmt *Case = dyn_cast<CaseStmt>(SC);
+ if (!Case)
+ continue;
+
+ Expr *CaseVal = Case->getLHS()->IgnoreParenCasts();
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CaseVal))
+ if (EnumConstantDecl *Enumerator
+ = dyn_cast<EnumConstantDecl>(DRE->getDecl())) {
+ // We look into the AST of the case statement to determine which
+ // enumerator was named. Alternatively, we could compute the value of
+ // the integral constant expression, then compare it against the
+ // values of each enumerator. However, value-based approach would not
+ // work as well with C++ templates where enumerators declared within a
+ // template are type- and value-dependent.
+ EnumeratorsSeen.insert(Enumerator);
+
+ // If this is a qualified-id, keep track of the nested-name-specifier
+ // so that we can reproduce it as part of code completion, e.g.,
+ //
+ // switch (TagD.getKind()) {
+ // case TagDecl::TK_enum:
+ // break;
+ // case XXX
+ //
+ // At the XXX, our completions are TagDecl::TK_union,
+ // TagDecl::TK_struct, and TagDecl::TK_class, rather than TK_union,
+ // TK_struct, and TK_class.
+ Qualifier = DRE->getQualifier();
+ }
+ }
+
+ if (getLangOpts().CPlusPlus && !Qualifier && EnumeratorsSeen.empty()) {
+ // If there are no prior enumerators in C++, check whether we have to
+ // qualify the names of the enumerators that we suggest, because they
+ // may not be visible in this scope.
+ Qualifier = getRequiredQualification(Context, CurContext, Enum);
+ }
+
+ // Add any enumerators that have not yet been mentioned.
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Expression);
+ Results.EnterNewScope();
+ for (auto *E : Enum->enumerators()) {
+ if (EnumeratorsSeen.count(E))
+ continue;
+
+ CodeCompletionResult R(E, CCP_EnumInCase, Qualifier);
+ Results.AddResult(R, CurContext, nullptr, false);
+ }
+ Results.ExitScope();
+
+ //We need to make sure we're setting the right context,
+ //so only say we include macros if the code completer says we do
+ enum CodeCompletionContext::Kind kind = CodeCompletionContext::CCC_Other;
+ if (CodeCompleter->includeMacros()) {
+ AddMacroResults(PP, Results, false);
+ kind = CodeCompletionContext::CCC_OtherWithMacros;
+ }
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ kind,
+ Results.data(),Results.size());
+}
+
+static bool anyNullArguments(ArrayRef<Expr *> Args) {
+ if (Args.size() && !Args.data())
+ return true;
+
+ for (unsigned I = 0; I != Args.size(); ++I)
+ if (!Args[I])
+ return true;
+
+ return false;
+}
+
+typedef CodeCompleteConsumer::OverloadCandidate ResultCandidate;
+
+static void mergeCandidatesWithResults(Sema &SemaRef,
+ SmallVectorImpl<ResultCandidate> &Results,
+ OverloadCandidateSet &CandidateSet,
+ SourceLocation Loc) {
+ if (!CandidateSet.empty()) {
+ // Sort the overload candidate set by placing the best overloads first.
+ std::stable_sort(
+ CandidateSet.begin(), CandidateSet.end(),
+ [&](const OverloadCandidate &X, const OverloadCandidate &Y) {
+ return isBetterOverloadCandidate(SemaRef, X, Y, Loc);
+ });
+
+ // Add the remaining viable overload candidates as code-completion results.
+ for (auto &Candidate : CandidateSet)
+ if (Candidate.Viable)
+ Results.push_back(ResultCandidate(Candidate.Function));
+ }
+}
+
+/// \brief Get the type of the Nth parameter from a given set of overload
+/// candidates.
+static QualType getParamType(Sema &SemaRef,
+ ArrayRef<ResultCandidate> Candidates,
+ unsigned N) {
+
+ // Given the overloads 'Candidates' for a function call matching all arguments
+ // up to N, return the type of the Nth parameter if it is the same for all
+ // overload candidates.
+ QualType ParamType;
+ for (auto &Candidate : Candidates) {
+ if (auto FType = Candidate.getFunctionType())
+ if (auto Proto = dyn_cast<FunctionProtoType>(FType))
+ if (N < Proto->getNumParams()) {
+ if (ParamType.isNull())
+ ParamType = Proto->getParamType(N);
+ else if (!SemaRef.Context.hasSameUnqualifiedType(
+ ParamType.getNonReferenceType(),
+ Proto->getParamType(N).getNonReferenceType()))
+ // Otherwise return a default-constructed QualType.
+ return QualType();
+ }
+ }
+
+ return ParamType;
+}
+
+static void CodeCompleteOverloadResults(Sema &SemaRef, Scope *S,
+ MutableArrayRef<ResultCandidate> Candidates,
+ unsigned CurrentArg,
+ bool CompleteExpressionWithCurrentArg = true) {
+ QualType ParamType;
+ if (CompleteExpressionWithCurrentArg)
+ ParamType = getParamType(SemaRef, Candidates, CurrentArg);
+
+ if (ParamType.isNull())
+ SemaRef.CodeCompleteOrdinaryName(S, Sema::PCC_Expression);
+ else
+ SemaRef.CodeCompleteExpression(S, ParamType);
+
+ if (!Candidates.empty())
+ SemaRef.CodeCompleter->ProcessOverloadCandidates(SemaRef, CurrentArg,
+ Candidates.data(),
+ Candidates.size());
+}
+
+void Sema::CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args) {
+ if (!CodeCompleter)
+ return;
+
+ // When we're code-completing for a call, we fall back to ordinary
+ // name code-completion whenever we can't produce specific
+ // results. We may want to revisit this strategy in the future,
+ // e.g., by merging the two kinds of results.
+
+ // FIXME: Provide support for variadic template functions.
+
+ // Ignore type-dependent call expressions entirely.
+ if (!Fn || Fn->isTypeDependent() || anyNullArguments(Args) ||
+ Expr::hasAnyTypeDependentArguments(Args)) {
+ CodeCompleteOrdinaryName(S, PCC_Expression);
+ return;
+ }
+
+ // Build an overload candidate set based on the functions we find.
+ SourceLocation Loc = Fn->getExprLoc();
+ OverloadCandidateSet CandidateSet(Loc, OverloadCandidateSet::CSK_Normal);
+
+ SmallVector<ResultCandidate, 8> Results;
+
+ Expr *NakedFn = Fn->IgnoreParenCasts();
+ if (auto ULE = dyn_cast<UnresolvedLookupExpr>(NakedFn))
+ AddOverloadedCallCandidates(ULE, Args, CandidateSet,
+ /*PartialOverloading=*/true);
+ else if (auto UME = dyn_cast<UnresolvedMemberExpr>(NakedFn)) {
+ TemplateArgumentListInfo TemplateArgsBuffer, *TemplateArgs = nullptr;
+ if (UME->hasExplicitTemplateArgs()) {
+ UME->copyTemplateArgumentsInto(TemplateArgsBuffer);
+ TemplateArgs = &TemplateArgsBuffer;
+ }
+ SmallVector<Expr *, 12> ArgExprs(1, UME->getBase());
+ ArgExprs.append(Args.begin(), Args.end());
+ UnresolvedSet<8> Decls;
+ Decls.append(UME->decls_begin(), UME->decls_end());
+ AddFunctionCandidates(Decls, ArgExprs, CandidateSet, TemplateArgs,
+ /*SuppressUsedConversions=*/false,
+ /*PartialOverloading=*/true);
+ } else {
+ FunctionDecl *FD = nullptr;
+ if (auto MCE = dyn_cast<MemberExpr>(NakedFn))
+ FD = dyn_cast<FunctionDecl>(MCE->getMemberDecl());
+ else if (auto DRE = dyn_cast<DeclRefExpr>(NakedFn))
+ FD = dyn_cast<FunctionDecl>(DRE->getDecl());
+ if (FD) { // We check whether it's a resolved function declaration.
+ if (!getLangOpts().CPlusPlus ||
+ !FD->getType()->getAs<FunctionProtoType>())
+ Results.push_back(ResultCandidate(FD));
+ else
+ AddOverloadCandidate(FD, DeclAccessPair::make(FD, FD->getAccess()),
+ Args, CandidateSet,
+ /*SuppressUsedConversions=*/false,
+ /*PartialOverloading=*/true);
+
+ } else if (auto DC = NakedFn->getType()->getAsCXXRecordDecl()) {
+ // If expression's type is CXXRecordDecl, it may overload the function
+ // call operator, so we check if it does and add them as candidates.
+ // A complete type is needed to lookup for member function call operators.
+ if (isCompleteType(Loc, NakedFn->getType())) {
+ DeclarationName OpName = Context.DeclarationNames
+ .getCXXOperatorName(OO_Call);
+ LookupResult R(*this, OpName, Loc, LookupOrdinaryName);
+ LookupQualifiedName(R, DC);
+ R.suppressDiagnostics();
+ SmallVector<Expr *, 12> ArgExprs(1, NakedFn);
+ ArgExprs.append(Args.begin(), Args.end());
+ AddFunctionCandidates(R.asUnresolvedSet(), ArgExprs, CandidateSet,
+ /*ExplicitArgs=*/nullptr,
+ /*SuppressUsedConversions=*/false,
+ /*PartialOverloading=*/true);
+ }
+ } else {
+ // Lastly we check whether expression's type is function pointer or
+ // function.
+ QualType T = NakedFn->getType();
+ if (!T->getPointeeType().isNull())
+ T = T->getPointeeType();
+
+ if (auto FP = T->getAs<FunctionProtoType>()) {
+ if (!TooManyArguments(FP->getNumParams(), Args.size(),
+ /*PartialOverloading=*/true) ||
+ FP->isVariadic())
+ Results.push_back(ResultCandidate(FP));
+ } else if (auto FT = T->getAs<FunctionType>())
+ // No prototype and declaration, it may be a K & R style function.
+ Results.push_back(ResultCandidate(FT));
+ }
+ }
+
+ mergeCandidatesWithResults(*this, Results, CandidateSet, Loc);
+ CodeCompleteOverloadResults(*this, S, Results, Args.size(),
+ !CandidateSet.empty());
+}
+
+void Sema::CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc,
+ ArrayRef<Expr *> Args) {
+ if (!CodeCompleter)
+ return;
+
+ // A complete type is needed to lookup for constructors.
+ if (!isCompleteType(Loc, Type))
+ return;
+
+ CXXRecordDecl *RD = Type->getAsCXXRecordDecl();
+ if (!RD) {
+ CodeCompleteExpression(S, Type);
+ return;
+ }
+
+ // FIXME: Provide support for member initializers.
+ // FIXME: Provide support for variadic template constructors.
+
+ OverloadCandidateSet CandidateSet(Loc, OverloadCandidateSet::CSK_Normal);
+
+ for (auto C : LookupConstructors(RD)) {
+ if (auto FD = dyn_cast<FunctionDecl>(C)) {
+ AddOverloadCandidate(FD, DeclAccessPair::make(FD, C->getAccess()),
+ Args, CandidateSet,
+ /*SuppressUsedConversions=*/false,
+ /*PartialOverloading=*/true);
+ } else if (auto FTD = dyn_cast<FunctionTemplateDecl>(C)) {
+ AddTemplateOverloadCandidate(FTD,
+ DeclAccessPair::make(FTD, C->getAccess()),
+ /*ExplicitTemplateArgs=*/nullptr,
+ Args, CandidateSet,
+ /*SuppressUsedConversions=*/false,
+ /*PartialOverloading=*/true);
+ }
+ }
+
+ SmallVector<ResultCandidate, 8> Results;
+ mergeCandidatesWithResults(*this, Results, CandidateSet, Loc);
+ CodeCompleteOverloadResults(*this, S, Results, Args.size());
+}
+
+void Sema::CodeCompleteInitializer(Scope *S, Decl *D) {
+ ValueDecl *VD = dyn_cast_or_null<ValueDecl>(D);
+ if (!VD) {
+ CodeCompleteOrdinaryName(S, PCC_Expression);
+ return;
+ }
+
+ CodeCompleteExpression(S, VD->getType());
+}
+
+void Sema::CodeCompleteReturn(Scope *S) {
+ QualType ResultType;
+ if (isa<BlockDecl>(CurContext)) {
+ if (BlockScopeInfo *BSI = getCurBlock())
+ ResultType = BSI->ReturnType;
+ } else if (FunctionDecl *Function = dyn_cast<FunctionDecl>(CurContext))
+ ResultType = Function->getReturnType();
+ else if (ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(CurContext))
+ ResultType = Method->getReturnType();
+
+ if (ResultType.isNull())
+ CodeCompleteOrdinaryName(S, PCC_Expression);
+ else
+ CodeCompleteExpression(S, ResultType);
+}
+
+void Sema::CodeCompleteAfterIf(Scope *S) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ mapCodeCompletionContext(*this, PCC_Statement));
+ Results.setFilter(&ResultBuilder::IsOrdinaryName);
+ Results.EnterNewScope();
+
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
+ CodeCompleter->includeGlobals());
+
+ AddOrdinaryNameResults(PCC_Statement, S, *this, Results);
+
+ // "else" block
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ Builder.AddTypedTextChunk("else");
+ if (Results.includeCodePatterns()) {
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ }
+ Results.AddResult(Builder.TakeString());
+
+ // "else if" block
+ Builder.AddTypedTextChunk("else");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("if");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ if (getLangOpts().CPlusPlus)
+ Builder.AddPlaceholderChunk("condition");
+ else
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ if (Results.includeCodePatterns()) {
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ }
+ Results.AddResult(Builder.TakeString());
+
+ Results.ExitScope();
+
+ if (S->getFnParent())
+ AddPrettyFunctionResults(getLangOpts(), Results);
+
+ if (CodeCompleter->includeMacros())
+ AddMacroResults(PP, Results, false);
+
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteAssignmentRHS(Scope *S, Expr *LHS) {
+ if (LHS)
+ CodeCompleteExpression(S, static_cast<Expr *>(LHS)->getType());
+ else
+ CodeCompleteOrdinaryName(S, PCC_Expression);
+}
+
+void Sema::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
+ bool EnteringContext) {
+ if (!SS.getScopeRep() || !CodeCompleter)
+ return;
+
+ DeclContext *Ctx = computeDeclContext(SS, EnteringContext);
+ if (!Ctx)
+ return;
+
+ // Try to instantiate any non-dependent declaration contexts before
+ // we look in them.
+ if (!isDependentScopeSpecifier(SS) && RequireCompleteDeclContext(SS, Ctx))
+ return;
+
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Name);
+ Results.EnterNewScope();
+
+ // The "template" keyword can follow "::" in the grammar, but only
+ // put it into the grammar if the nested-name-specifier is dependent.
+ NestedNameSpecifier *NNS = SS.getScopeRep();
+ if (!Results.empty() && NNS->isDependent())
+ Results.AddResult("template");
+
+ // Add calls to overridden virtual functions, if there are any.
+ //
+ // FIXME: This isn't wonderful, because we don't know whether we're actually
+ // in a context that permits expressions. This is a general issue with
+ // qualified-id completions.
+ if (!EnteringContext)
+ MaybeAddOverrideCalls(*this, Ctx, Results);
+ Results.ExitScope();
+
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ LookupVisibleDecls(Ctx, LookupOrdinaryName, Consumer);
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ Results.getCompletionContext(),
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteUsing(Scope *S) {
+ if (!CodeCompleter)
+ return;
+
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_PotentiallyQualifiedName,
+ &ResultBuilder::IsNestedNameSpecifier);
+ Results.EnterNewScope();
+
+ // If we aren't in class scope, we could see the "namespace" keyword.
+ if (!S->isClassScope())
+ Results.AddResult(CodeCompletionResult("namespace"));
+
+ // After "using", we can see anything that would start a
+ // nested-name-specifier.
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
+ CodeCompleter->includeGlobals());
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_PotentiallyQualifiedName,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteUsingDirective(Scope *S) {
+ if (!CodeCompleter)
+ return;
+
+ // After "using namespace", we expect to see a namespace name or namespace
+ // alias.
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Namespace,
+ &ResultBuilder::IsNamespaceOrAlias);
+ Results.EnterNewScope();
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
+ CodeCompleter->includeGlobals());
+ Results.ExitScope();
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Namespace,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteNamespaceDecl(Scope *S) {
+ if (!CodeCompleter)
+ return;
+
+ DeclContext *Ctx = S->getEntity();
+ if (!S->getParent())
+ Ctx = Context.getTranslationUnitDecl();
+
+ bool SuppressedGlobalResults
+ = Ctx && !CodeCompleter->includeGlobals() && isa<TranslationUnitDecl>(Ctx);
+
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ SuppressedGlobalResults
+ ? CodeCompletionContext::CCC_Namespace
+ : CodeCompletionContext::CCC_Other,
+ &ResultBuilder::IsNamespace);
+
+ if (Ctx && Ctx->isFileContext() && !SuppressedGlobalResults) {
+ // We only want to see those namespaces that have already been defined
+ // within this scope, because its likely that the user is creating an
+ // extended namespace declaration. Keep track of the most recent
+ // definition of each namespace.
+ std::map<NamespaceDecl *, NamespaceDecl *> OrigToLatest;
+ for (DeclContext::specific_decl_iterator<NamespaceDecl>
+ NS(Ctx->decls_begin()), NSEnd(Ctx->decls_end());
+ NS != NSEnd; ++NS)
+ OrigToLatest[NS->getOriginalNamespace()] = *NS;
+
+ // Add the most recent definition (or extended definition) of each
+ // namespace to the list of results.
+ Results.EnterNewScope();
+ for (std::map<NamespaceDecl *, NamespaceDecl *>::iterator
+ NS = OrigToLatest.begin(),
+ NSEnd = OrigToLatest.end();
+ NS != NSEnd; ++NS)
+ Results.AddResult(CodeCompletionResult(
+ NS->second, Results.getBasePriority(NS->second),
+ nullptr),
+ CurContext, nullptr, false);
+ Results.ExitScope();
+ }
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ Results.getCompletionContext(),
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteNamespaceAliasDecl(Scope *S) {
+ if (!CodeCompleter)
+ return;
+
+ // After "namespace", we expect to see a namespace or alias.
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Namespace,
+ &ResultBuilder::IsNamespaceOrAlias);
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
+ CodeCompleter->includeGlobals());
+ HandleCodeCompleteResults(this, CodeCompleter,
+ Results.getCompletionContext(),
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteOperatorName(Scope *S) {
+ if (!CodeCompleter)
+ return;
+
+ typedef CodeCompletionResult Result;
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Type,
+ &ResultBuilder::IsType);
+ Results.EnterNewScope();
+
+ // Add the names of overloadable operators.
+#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
+ if (std::strcmp(Spelling, "?")) \
+ Results.AddResult(Result(Spelling));
+#include "clang/Basic/OperatorKinds.def"
+
+ // Add any type names visible from the current scope
+ Results.allowNestedNameSpecifiers();
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
+ CodeCompleter->includeGlobals());
+
+ // Add any type specifiers
+ AddTypeSpecifierResults(getLangOpts(), Results);
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Type,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteConstructorInitializer(
+ Decl *ConstructorD,
+ ArrayRef <CXXCtorInitializer *> Initializers) {
+ if (!ConstructorD)
+ return;
+
+ AdjustDeclIfTemplate(ConstructorD);
+
+ CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(ConstructorD);
+ if (!Constructor)
+ return;
+
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_PotentiallyQualifiedName);
+ Results.EnterNewScope();
+
+ // Fill in any already-initialized fields or base classes.
+ llvm::SmallPtrSet<FieldDecl *, 4> InitializedFields;
+ llvm::SmallPtrSet<CanQualType, 4> InitializedBases;
+ for (unsigned I = 0, E = Initializers.size(); I != E; ++I) {
+ if (Initializers[I]->isBaseInitializer())
+ InitializedBases.insert(
+ Context.getCanonicalType(QualType(Initializers[I]->getBaseClass(), 0)));
+ else
+ InitializedFields.insert(cast<FieldDecl>(
+ Initializers[I]->getAnyMember()));
+ }
+
+ // Add completions for base classes.
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ PrintingPolicy Policy = getCompletionPrintingPolicy(*this);
+ bool SawLastInitializer = Initializers.empty();
+ CXXRecordDecl *ClassDecl = Constructor->getParent();
+ for (const auto &Base : ClassDecl->bases()) {
+ if (!InitializedBases.insert(Context.getCanonicalType(Base.getType()))
+ .second) {
+ SawLastInitializer
+ = !Initializers.empty() &&
+ Initializers.back()->isBaseInitializer() &&
+ Context.hasSameUnqualifiedType(Base.getType(),
+ QualType(Initializers.back()->getBaseClass(), 0));
+ continue;
+ }
+
+ Builder.AddTypedTextChunk(
+ Results.getAllocator().CopyString(
+ Base.getType().getAsString(Policy)));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("args");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(CodeCompletionResult(Builder.TakeString(),
+ SawLastInitializer? CCP_NextInitializer
+ : CCP_MemberDeclaration));
+ SawLastInitializer = false;
+ }
+
+ // Add completions for virtual base classes.
+ for (const auto &Base : ClassDecl->vbases()) {
+ if (!InitializedBases.insert(Context.getCanonicalType(Base.getType()))
+ .second) {
+ SawLastInitializer
+ = !Initializers.empty() &&
+ Initializers.back()->isBaseInitializer() &&
+ Context.hasSameUnqualifiedType(Base.getType(),
+ QualType(Initializers.back()->getBaseClass(), 0));
+ continue;
+ }
+
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString(
+ Base.getType().getAsString(Policy)));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("args");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(CodeCompletionResult(Builder.TakeString(),
+ SawLastInitializer? CCP_NextInitializer
+ : CCP_MemberDeclaration));
+ SawLastInitializer = false;
+ }
+
+ // Add completions for members.
+ for (auto *Field : ClassDecl->fields()) {
+ if (!InitializedFields.insert(cast<FieldDecl>(Field->getCanonicalDecl()))
+ .second) {
+ SawLastInitializer
+ = !Initializers.empty() &&
+ Initializers.back()->isAnyMemberInitializer() &&
+ Initializers.back()->getAnyMember() == Field;
+ continue;
+ }
+
+ if (!Field->getDeclName())
+ continue;
+
+ Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
+ Field->getIdentifier()->getName()));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("args");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(CodeCompletionResult(Builder.TakeString(),
+ SawLastInitializer? CCP_NextInitializer
+ : CCP_MemberDeclaration,
+ CXCursor_MemberRef,
+ CXAvailability_Available,
+ Field));
+ SawLastInitializer = false;
+ }
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
+}
+
+/// \brief Determine whether this scope denotes a namespace.
+static bool isNamespaceScope(Scope *S) {
+ DeclContext *DC = S->getEntity();
+ if (!DC)
+ return false;
+
+ return DC->isFileContext();
+}
+
+void Sema::CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
+ bool AfterAmpersand) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+ Results.EnterNewScope();
+
+ // Note what has already been captured.
+ llvm::SmallPtrSet<IdentifierInfo *, 4> Known;
+ bool IncludedThis = false;
+ for (const auto &C : Intro.Captures) {
+ if (C.Kind == LCK_This) {
+ IncludedThis = true;
+ continue;
+ }
+
+ Known.insert(C.Id);
+ }
+
+ // Look for other capturable variables.
+ for (; S && !isNamespaceScope(S); S = S->getParent()) {
+ for (const auto *D : S->decls()) {
+ const auto *Var = dyn_cast<VarDecl>(D);
+ if (!Var ||
+ !Var->hasLocalStorage() ||
+ Var->hasAttr<BlocksAttr>())
+ continue;
+
+ if (Known.insert(Var->getIdentifier()).second)
+ Results.AddResult(CodeCompletionResult(Var, CCP_LocalDeclaration),
+ CurContext, nullptr, false);
+ }
+ }
+
+ // Add 'this', if it would be valid.
+ if (!IncludedThis && !AfterAmpersand && Intro.Default != LCD_ByCopy)
+ addThisCompletion(*this, Results);
+
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
+}
+
+/// Macro that optionally prepends an "@" to the string literal passed in via
+/// Keyword, depending on whether NeedAt is true or false.
+#define OBJC_AT_KEYWORD_NAME(NeedAt,Keyword) ((NeedAt)? "@" Keyword : Keyword)
+
+static void AddObjCImplementationResults(const LangOptions &LangOpts,
+ ResultBuilder &Results,
+ bool NeedAt) {
+ typedef CodeCompletionResult Result;
+ // Since we have an implementation, we can end it.
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"end")));
+
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ if (LangOpts.ObjC2) {
+ // @dynamic
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"dynamic"));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("property");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // @synthesize
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"synthesize"));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("property");
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+}
+
+static void AddObjCInterfaceResults(const LangOptions &LangOpts,
+ ResultBuilder &Results,
+ bool NeedAt) {
+ typedef CodeCompletionResult Result;
+
+ // Since we have an interface or protocol, we can end it.
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"end")));
+
+ if (LangOpts.ObjC2) {
+ // @property
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"property")));
+
+ // @required
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"required")));
+
+ // @optional
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"optional")));
+ }
+}
+
+static void AddObjCTopLevelResults(ResultBuilder &Results, bool NeedAt) {
+ typedef CodeCompletionResult Result;
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+
+ // @class name ;
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"class"));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("name");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ if (Results.includeCodePatterns()) {
+ // @interface name
+ // FIXME: Could introduce the whole pattern, including superclasses and
+ // such.
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"interface"));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("class");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // @protocol name
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"protocol"));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("protocol");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // @implementation name
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"implementation"));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("class");
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ // @compatibility_alias name
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"compatibility_alias"));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("alias");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("class");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ if (Results.getSema().getLangOpts().Modules) {
+ // @import name
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "import"));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("module");
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+}
+
+void Sema::CodeCompleteObjCAtDirective(Scope *S) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+ Results.EnterNewScope();
+ if (isa<ObjCImplDecl>(CurContext))
+ AddObjCImplementationResults(getLangOpts(), Results, false);
+ else if (CurContext->isObjCContainer())
+ AddObjCInterfaceResults(getLangOpts(), Results, false);
+ else
+ AddObjCTopLevelResults(Results, false);
+ Results.ExitScope();
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Other,
+ Results.data(),Results.size());
+}
+
+static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt) {
+ typedef CodeCompletionResult Result;
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+
+ // @encode ( type-name )
+ const char *EncodeType = "char[]";
+ if (Results.getSema().getLangOpts().CPlusPlus ||
+ Results.getSema().getLangOpts().ConstStrings)
+ EncodeType = "const char[]";
+ Builder.AddResultTypeChunk(EncodeType);
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"encode"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("type-name");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // @protocol ( protocol-name )
+ Builder.AddResultTypeChunk("Protocol *");
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"protocol"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("protocol-name");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // @selector ( selector )
+ Builder.AddResultTypeChunk("SEL");
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"selector"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("selector");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // @"string"
+ Builder.AddResultTypeChunk("NSString *");
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"\""));
+ Builder.AddPlaceholderChunk("string");
+ Builder.AddTextChunk("\"");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // @[objects, ...]
+ Builder.AddResultTypeChunk("NSArray *");
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"["));
+ Builder.AddPlaceholderChunk("objects, ...");
+ Builder.AddChunk(CodeCompletionString::CK_RightBracket);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // @{key : object, ...}
+ Builder.AddResultTypeChunk("NSDictionary *");
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"{"));
+ Builder.AddPlaceholderChunk("key");
+ Builder.AddChunk(CodeCompletionString::CK_Colon);
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("object, ...");
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // @(expression)
+ Builder.AddResultTypeChunk("id");
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "("));
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+}
+
+static void AddObjCStatementResults(ResultBuilder &Results, bool NeedAt) {
+ typedef CodeCompletionResult Result;
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+
+ if (Results.includeCodePatterns()) {
+ // @try { statements } @catch ( declaration ) { statements } @finally
+ // { statements }
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"try"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Builder.AddTextChunk("@catch");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("parameter");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Builder.AddTextChunk("@finally");
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ // @throw
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"throw"));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("expression");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ if (Results.includeCodePatterns()) {
+ // @synchronized ( expression ) { statements }
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"synchronized"));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+}
+
+static void AddObjCVisibilityResults(const LangOptions &LangOpts,
+ ResultBuilder &Results,
+ bool NeedAt) {
+ typedef CodeCompletionResult Result;
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"private")));
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"protected")));
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"public")));
+ if (LangOpts.ObjC2)
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"package")));
+}
+
+void Sema::CodeCompleteObjCAtVisibility(Scope *S) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+ Results.EnterNewScope();
+ AddObjCVisibilityResults(getLangOpts(), Results, false);
+ Results.ExitScope();
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Other,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteObjCAtStatement(Scope *S) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+ Results.EnterNewScope();
+ AddObjCStatementResults(Results, false);
+ AddObjCExpressionResults(Results, false);
+ Results.ExitScope();
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Other,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteObjCAtExpression(Scope *S) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+ Results.EnterNewScope();
+ AddObjCExpressionResults(Results, false);
+ Results.ExitScope();
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Other,
+ Results.data(),Results.size());
+}
+
+/// \brief Determine whether the addition of the given flag to an Objective-C
+/// property's attributes will cause a conflict.
+static bool ObjCPropertyFlagConflicts(unsigned Attributes, unsigned NewFlag) {
+ // Check if we've already added this flag.
+ if (Attributes & NewFlag)
+ return true;
+
+ Attributes |= NewFlag;
+
+ // Check for collisions with "readonly".
+ if ((Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
+ (Attributes & ObjCDeclSpec::DQ_PR_readwrite))
+ return true;
+
+ // Check for more than one of { assign, copy, retain, strong, weak }.
+ unsigned AssignCopyRetMask = Attributes & (ObjCDeclSpec::DQ_PR_assign |
+ ObjCDeclSpec::DQ_PR_unsafe_unretained |
+ ObjCDeclSpec::DQ_PR_copy |
+ ObjCDeclSpec::DQ_PR_retain |
+ ObjCDeclSpec::DQ_PR_strong |
+ ObjCDeclSpec::DQ_PR_weak);
+ if (AssignCopyRetMask &&
+ AssignCopyRetMask != ObjCDeclSpec::DQ_PR_assign &&
+ AssignCopyRetMask != ObjCDeclSpec::DQ_PR_unsafe_unretained &&
+ AssignCopyRetMask != ObjCDeclSpec::DQ_PR_copy &&
+ AssignCopyRetMask != ObjCDeclSpec::DQ_PR_retain &&
+ AssignCopyRetMask != ObjCDeclSpec::DQ_PR_strong &&
+ AssignCopyRetMask != ObjCDeclSpec::DQ_PR_weak)
+ return true;
+
+ return false;
+}
+
+void Sema::CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS) {
+ if (!CodeCompleter)
+ return;
+
+ unsigned Attributes = ODS.getPropertyAttributes();
+
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+ Results.EnterNewScope();
+ if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_readonly))
+ Results.AddResult(CodeCompletionResult("readonly"));
+ if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_assign))
+ Results.AddResult(CodeCompletionResult("assign"));
+ if (!ObjCPropertyFlagConflicts(Attributes,
+ ObjCDeclSpec::DQ_PR_unsafe_unretained))
+ Results.AddResult(CodeCompletionResult("unsafe_unretained"));
+ if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_readwrite))
+ Results.AddResult(CodeCompletionResult("readwrite"));
+ if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_retain))
+ Results.AddResult(CodeCompletionResult("retain"));
+ if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_strong))
+ Results.AddResult(CodeCompletionResult("strong"));
+ if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_copy))
+ Results.AddResult(CodeCompletionResult("copy"));
+ if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_nonatomic))
+ Results.AddResult(CodeCompletionResult("nonatomic"));
+ if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_atomic))
+ Results.AddResult(CodeCompletionResult("atomic"));
+
+ // Only suggest "weak" if we're compiling for ARC-with-weak-references or GC.
+ if (getLangOpts().ObjCWeak || getLangOpts().getGC() != LangOptions::NonGC)
+ if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_weak))
+ Results.AddResult(CodeCompletionResult("weak"));
+
+ if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_setter)) {
+ CodeCompletionBuilder Setter(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ Setter.AddTypedTextChunk("setter");
+ Setter.AddTextChunk("=");
+ Setter.AddPlaceholderChunk("method");
+ Results.AddResult(CodeCompletionResult(Setter.TakeString()));
+ }
+ if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_getter)) {
+ CodeCompletionBuilder Getter(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ Getter.AddTypedTextChunk("getter");
+ Getter.AddTextChunk("=");
+ Getter.AddPlaceholderChunk("method");
+ Results.AddResult(CodeCompletionResult(Getter.TakeString()));
+ }
+ if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_nullability)) {
+ Results.AddResult(CodeCompletionResult("nonnull"));
+ Results.AddResult(CodeCompletionResult("nullable"));
+ Results.AddResult(CodeCompletionResult("null_unspecified"));
+ Results.AddResult(CodeCompletionResult("null_resettable"));
+ }
+ Results.ExitScope();
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Other,
+ Results.data(),Results.size());
+}
+
+/// \brief Describes the kind of Objective-C method that we want to find
+/// via code completion.
+enum ObjCMethodKind {
+ MK_Any, ///< Any kind of method, provided it means other specified criteria.
+ MK_ZeroArgSelector, ///< Zero-argument (unary) selector.
+ MK_OneArgSelector ///< One-argument selector.
+};
+
+static bool isAcceptableObjCSelector(Selector Sel,
+ ObjCMethodKind WantKind,
+ ArrayRef<IdentifierInfo *> SelIdents,
+ bool AllowSameLength = true) {
+ unsigned NumSelIdents = SelIdents.size();
+ if (NumSelIdents > Sel.getNumArgs())
+ return false;
+
+ switch (WantKind) {
+ case MK_Any: break;
+ case MK_ZeroArgSelector: return Sel.isUnarySelector();
+ case MK_OneArgSelector: return Sel.getNumArgs() == 1;
+ }
+
+ if (!AllowSameLength && NumSelIdents && NumSelIdents == Sel.getNumArgs())
+ return false;
+
+ for (unsigned I = 0; I != NumSelIdents; ++I)
+ if (SelIdents[I] != Sel.getIdentifierInfoForSlot(I))
+ return false;
+
+ return true;
+}
+
+static bool isAcceptableObjCMethod(ObjCMethodDecl *Method,
+ ObjCMethodKind WantKind,
+ ArrayRef<IdentifierInfo *> SelIdents,
+ bool AllowSameLength = true) {
+ return isAcceptableObjCSelector(Method->getSelector(), WantKind, SelIdents,
+ AllowSameLength);
+}
+
+namespace {
+ /// \brief A set of selectors, which is used to avoid introducing multiple
+ /// completions with the same selector into the result set.
+ typedef llvm::SmallPtrSet<Selector, 16> VisitedSelectorSet;
+}
+
+/// \brief Add all of the Objective-C methods in the given Objective-C
+/// container to the set of results.
+///
+/// The container will be a class, protocol, category, or implementation of
+/// any of the above. This mether will recurse to include methods from
+/// the superclasses of classes along with their categories, protocols, and
+/// implementations.
+///
+/// \param Container the container in which we'll look to find methods.
+///
+/// \param WantInstanceMethods Whether to add instance methods (only); if
+/// false, this routine will add factory methods (only).
+///
+/// \param CurContext the context in which we're performing the lookup that
+/// finds methods.
+///
+/// \param AllowSameLength Whether we allow a method to be added to the list
+/// when it has the same number of parameters as we have selector identifiers.
+///
+/// \param Results the structure into which we'll add results.
+static void AddObjCMethods(ObjCContainerDecl *Container,
+ bool WantInstanceMethods,
+ ObjCMethodKind WantKind,
+ ArrayRef<IdentifierInfo *> SelIdents,
+ DeclContext *CurContext,
+ VisitedSelectorSet &Selectors,
+ bool AllowSameLength,
+ ResultBuilder &Results,
+ bool InOriginalClass = true) {
+ typedef CodeCompletionResult Result;
+ Container = getContainerDef(Container);
+ ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(Container);
+ bool isRootClass = IFace && !IFace->getSuperClass();
+ for (auto *M : Container->methods()) {
+ // The instance methods on the root class can be messaged via the
+ // metaclass.
+ if (M->isInstanceMethod() == WantInstanceMethods ||
+ (isRootClass && !WantInstanceMethods)) {
+ // Check whether the selector identifiers we've been given are a
+ // subset of the identifiers for this particular method.
+ if (!isAcceptableObjCMethod(M, WantKind, SelIdents, AllowSameLength))
+ continue;
+
+ if (!Selectors.insert(M->getSelector()).second)
+ continue;
+
+ Result R = Result(M, Results.getBasePriority(M), nullptr);
+ R.StartParameter = SelIdents.size();
+ R.AllParametersAreInformative = (WantKind != MK_Any);
+ if (!InOriginalClass)
+ R.Priority += CCD_InBaseClass;
+ Results.MaybeAddResult(R, CurContext);
+ }
+ }
+
+ // Visit the protocols of protocols.
+ if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)) {
+ if (Protocol->hasDefinition()) {
+ const ObjCList<ObjCProtocolDecl> &Protocols
+ = Protocol->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
+ E = Protocols.end();
+ I != E; ++I)
+ AddObjCMethods(*I, WantInstanceMethods, WantKind, SelIdents,
+ CurContext, Selectors, AllowSameLength, Results, false);
+ }
+ }
+
+ if (!IFace || !IFace->hasDefinition())
+ return;
+
+ // Add methods in protocols.
+ for (auto *I : IFace->protocols())
+ AddObjCMethods(I, WantInstanceMethods, WantKind, SelIdents,
+ CurContext, Selectors, AllowSameLength, Results, false);
+
+ // Add methods in categories.
+ for (auto *CatDecl : IFace->known_categories()) {
+ AddObjCMethods(CatDecl, WantInstanceMethods, WantKind, SelIdents,
+ CurContext, Selectors, AllowSameLength,
+ Results, InOriginalClass);
+
+ // Add a categories protocol methods.
+ const ObjCList<ObjCProtocolDecl> &Protocols
+ = CatDecl->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
+ E = Protocols.end();
+ I != E; ++I)
+ AddObjCMethods(*I, WantInstanceMethods, WantKind, SelIdents,
+ CurContext, Selectors, AllowSameLength,
+ Results, false);
+
+ // Add methods in category implementations.
+ if (ObjCCategoryImplDecl *Impl = CatDecl->getImplementation())
+ AddObjCMethods(Impl, WantInstanceMethods, WantKind, SelIdents,
+ CurContext, Selectors, AllowSameLength,
+ Results, InOriginalClass);
+ }
+
+ // Add methods in superclass.
+ if (IFace->getSuperClass())
+ AddObjCMethods(IFace->getSuperClass(), WantInstanceMethods, WantKind,
+ SelIdents, CurContext, Selectors,
+ AllowSameLength, Results, false);
+
+ // Add methods in our implementation, if any.
+ if (ObjCImplementationDecl *Impl = IFace->getImplementation())
+ AddObjCMethods(Impl, WantInstanceMethods, WantKind, SelIdents,
+ CurContext, Selectors, AllowSameLength,
+ Results, InOriginalClass);
+}
+
+
+void Sema::CodeCompleteObjCPropertyGetter(Scope *S) {
+ // Try to find the interface where getters might live.
+ ObjCInterfaceDecl *Class = dyn_cast_or_null<ObjCInterfaceDecl>(CurContext);
+ if (!Class) {
+ if (ObjCCategoryDecl *Category
+ = dyn_cast_or_null<ObjCCategoryDecl>(CurContext))
+ Class = Category->getClassInterface();
+
+ if (!Class)
+ return;
+ }
+
+ // Find all of the potential getters.
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+ Results.EnterNewScope();
+
+ VisitedSelectorSet Selectors;
+ AddObjCMethods(Class, true, MK_ZeroArgSelector, None, CurContext, Selectors,
+ /*AllowSameLength=*/true, Results);
+ Results.ExitScope();
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Other,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteObjCPropertySetter(Scope *S) {
+ // Try to find the interface where setters might live.
+ ObjCInterfaceDecl *Class
+ = dyn_cast_or_null<ObjCInterfaceDecl>(CurContext);
+ if (!Class) {
+ if (ObjCCategoryDecl *Category
+ = dyn_cast_or_null<ObjCCategoryDecl>(CurContext))
+ Class = Category->getClassInterface();
+
+ if (!Class)
+ return;
+ }
+
+ // Find all of the potential getters.
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+ Results.EnterNewScope();
+
+ VisitedSelectorSet Selectors;
+ AddObjCMethods(Class, true, MK_OneArgSelector, None, CurContext,
+ Selectors, /*AllowSameLength=*/true, Results);
+
+ Results.ExitScope();
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Other,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
+ bool IsParameter) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Type);
+ Results.EnterNewScope();
+
+ // Add context-sensitive, Objective-C parameter-passing keywords.
+ bool AddedInOut = false;
+ if ((DS.getObjCDeclQualifier() &
+ (ObjCDeclSpec::DQ_In | ObjCDeclSpec::DQ_Inout)) == 0) {
+ Results.AddResult("in");
+ Results.AddResult("inout");
+ AddedInOut = true;
+ }
+ if ((DS.getObjCDeclQualifier() &
+ (ObjCDeclSpec::DQ_Out | ObjCDeclSpec::DQ_Inout)) == 0) {
+ Results.AddResult("out");
+ if (!AddedInOut)
+ Results.AddResult("inout");
+ }
+ if ((DS.getObjCDeclQualifier() &
+ (ObjCDeclSpec::DQ_Bycopy | ObjCDeclSpec::DQ_Byref |
+ ObjCDeclSpec::DQ_Oneway)) == 0) {
+ Results.AddResult("bycopy");
+ Results.AddResult("byref");
+ Results.AddResult("oneway");
+ }
+ if ((DS.getObjCDeclQualifier() & ObjCDeclSpec::DQ_CSNullability) == 0) {
+ Results.AddResult("nonnull");
+ Results.AddResult("nullable");
+ Results.AddResult("null_unspecified");
+ }
+
+ // If we're completing the return type of an Objective-C method and the
+ // identifier IBAction refers to a macro, provide a completion item for
+ // an action, e.g.,
+ // IBAction)<#selector#>:(id)sender
+ if (DS.getObjCDeclQualifier() == 0 && !IsParameter &&
+ PP.isMacroDefined("IBAction")) {
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo(),
+ CCP_CodePattern, CXAvailability_Available);
+ Builder.AddTypedTextChunk("IBAction");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddPlaceholderChunk("selector");
+ Builder.AddChunk(CodeCompletionString::CK_Colon);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("id");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("sender");
+ Results.AddResult(CodeCompletionResult(Builder.TakeString()));
+ }
+
+ // If we're completing the return type, provide 'instancetype'.
+ if (!IsParameter) {
+ Results.AddResult(CodeCompletionResult("instancetype"));
+ }
+
+ // Add various builtin type names and specifiers.
+ AddOrdinaryNameResults(PCC_Type, S, *this, Results);
+ Results.ExitScope();
+
+ // Add the various type names
+ Results.setFilter(&ResultBuilder::IsOrdinaryNonValueName);
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
+ CodeCompleter->includeGlobals());
+
+ if (CodeCompleter->includeMacros())
+ AddMacroResults(PP, Results, false);
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Type,
+ Results.data(), Results.size());
+}
+
+/// \brief When we have an expression with type "id", we may assume
+/// that it has some more-specific class type based on knowledge of
+/// common uses of Objective-C. This routine returns that class type,
+/// or NULL if no better result could be determined.
+static ObjCInterfaceDecl *GetAssumedMessageSendExprType(Expr *E) {
+ ObjCMessageExpr *Msg = dyn_cast_or_null<ObjCMessageExpr>(E);
+ if (!Msg)
+ return nullptr;
+
+ Selector Sel = Msg->getSelector();
+ if (Sel.isNull())
+ return nullptr;
+
+ IdentifierInfo *Id = Sel.getIdentifierInfoForSlot(0);
+ if (!Id)
+ return nullptr;
+
+ ObjCMethodDecl *Method = Msg->getMethodDecl();
+ if (!Method)
+ return nullptr;
+
+ // Determine the class that we're sending the message to.
+ ObjCInterfaceDecl *IFace = nullptr;
+ switch (Msg->getReceiverKind()) {
+ case ObjCMessageExpr::Class:
+ if (const ObjCObjectType *ObjType
+ = Msg->getClassReceiver()->getAs<ObjCObjectType>())
+ IFace = ObjType->getInterface();
+ break;
+
+ case ObjCMessageExpr::Instance: {
+ QualType T = Msg->getInstanceReceiver()->getType();
+ if (const ObjCObjectPointerType *Ptr = T->getAs<ObjCObjectPointerType>())
+ IFace = Ptr->getInterfaceDecl();
+ break;
+ }
+
+ case ObjCMessageExpr::SuperInstance:
+ case ObjCMessageExpr::SuperClass:
+ break;
+ }
+
+ if (!IFace)
+ return nullptr;
+
+ ObjCInterfaceDecl *Super = IFace->getSuperClass();
+ if (Method->isInstanceMethod())
+ return llvm::StringSwitch<ObjCInterfaceDecl *>(Id->getName())
+ .Case("retain", IFace)
+ .Case("strong", IFace)
+ .Case("autorelease", IFace)
+ .Case("copy", IFace)
+ .Case("copyWithZone", IFace)
+ .Case("mutableCopy", IFace)
+ .Case("mutableCopyWithZone", IFace)
+ .Case("awakeFromCoder", IFace)
+ .Case("replacementObjectFromCoder", IFace)
+ .Case("class", IFace)
+ .Case("classForCoder", IFace)
+ .Case("superclass", Super)
+ .Default(nullptr);
+
+ return llvm::StringSwitch<ObjCInterfaceDecl *>(Id->getName())
+ .Case("new", IFace)
+ .Case("alloc", IFace)
+ .Case("allocWithZone", IFace)
+ .Case("class", IFace)
+ .Case("superclass", Super)
+ .Default(nullptr);
+}
+
+// Add a special completion for a message send to "super", which fills in the
+// most likely case of forwarding all of our arguments to the superclass
+// function.
+///
+/// \param S The semantic analysis object.
+///
+/// \param NeedSuperKeyword Whether we need to prefix this completion with
+/// the "super" keyword. Otherwise, we just need to provide the arguments.
+///
+/// \param SelIdents The identifiers in the selector that have already been
+/// provided as arguments for a send to "super".
+///
+/// \param Results The set of results to augment.
+///
+/// \returns the Objective-C method declaration that would be invoked by
+/// this "super" completion. If NULL, no completion was added.
+static ObjCMethodDecl *AddSuperSendCompletion(
+ Sema &S, bool NeedSuperKeyword,
+ ArrayRef<IdentifierInfo *> SelIdents,
+ ResultBuilder &Results) {
+ ObjCMethodDecl *CurMethod = S.getCurMethodDecl();
+ if (!CurMethod)
+ return nullptr;
+
+ ObjCInterfaceDecl *Class = CurMethod->getClassInterface();
+ if (!Class)
+ return nullptr;
+
+ // Try to find a superclass method with the same selector.
+ ObjCMethodDecl *SuperMethod = nullptr;
+ while ((Class = Class->getSuperClass()) && !SuperMethod) {
+ // Check in the class
+ SuperMethod = Class->getMethod(CurMethod->getSelector(),
+ CurMethod->isInstanceMethod());
+
+ // Check in categories or class extensions.
+ if (!SuperMethod) {
+ for (const auto *Cat : Class->known_categories()) {
+ if ((SuperMethod = Cat->getMethod(CurMethod->getSelector(),
+ CurMethod->isInstanceMethod())))
+ break;
+ }
+ }
+ }
+
+ if (!SuperMethod)
+ return nullptr;
+
+ // Check whether the superclass method has the same signature.
+ if (CurMethod->param_size() != SuperMethod->param_size() ||
+ CurMethod->isVariadic() != SuperMethod->isVariadic())
+ return nullptr;
+
+ for (ObjCMethodDecl::param_iterator CurP = CurMethod->param_begin(),
+ CurPEnd = CurMethod->param_end(),
+ SuperP = SuperMethod->param_begin();
+ CurP != CurPEnd; ++CurP, ++SuperP) {
+ // Make sure the parameter types are compatible.
+ if (!S.Context.hasSameUnqualifiedType((*CurP)->getType(),
+ (*SuperP)->getType()))
+ return nullptr;
+
+ // Make sure we have a parameter name to forward!
+ if (!(*CurP)->getIdentifier())
+ return nullptr;
+ }
+
+ // We have a superclass method. Now, form the send-to-super completion.
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+
+ // Give this completion a return type.
+ AddResultTypeChunk(S.Context, getCompletionPrintingPolicy(S), SuperMethod,
+ Results.getCompletionContext().getBaseType(),
+ Builder);
+
+ // If we need the "super" keyword, add it (plus some spacing).
+ if (NeedSuperKeyword) {
+ Builder.AddTypedTextChunk("super");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ }
+
+ Selector Sel = CurMethod->getSelector();
+ if (Sel.isUnarySelector()) {
+ if (NeedSuperKeyword)
+ Builder.AddTextChunk(Builder.getAllocator().CopyString(
+ Sel.getNameForSlot(0)));
+ else
+ Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
+ Sel.getNameForSlot(0)));
+ } else {
+ ObjCMethodDecl::param_iterator CurP = CurMethod->param_begin();
+ for (unsigned I = 0, N = Sel.getNumArgs(); I != N; ++I, ++CurP) {
+ if (I > SelIdents.size())
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+
+ if (I < SelIdents.size())
+ Builder.AddInformativeChunk(
+ Builder.getAllocator().CopyString(
+ Sel.getNameForSlot(I) + ":"));
+ else if (NeedSuperKeyword || I > SelIdents.size()) {
+ Builder.AddTextChunk(
+ Builder.getAllocator().CopyString(
+ Sel.getNameForSlot(I) + ":"));
+ Builder.AddPlaceholderChunk(Builder.getAllocator().CopyString(
+ (*CurP)->getIdentifier()->getName()));
+ } else {
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString(
+ Sel.getNameForSlot(I) + ":"));
+ Builder.AddPlaceholderChunk(Builder.getAllocator().CopyString(
+ (*CurP)->getIdentifier()->getName()));
+ }
+ }
+ }
+
+ Results.AddResult(CodeCompletionResult(Builder.TakeString(), SuperMethod,
+ CCP_SuperCompletion));
+ return SuperMethod;
+}
+
+void Sema::CodeCompleteObjCMessageReceiver(Scope *S) {
+ typedef CodeCompletionResult Result;
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_ObjCMessageReceiver,
+ getLangOpts().CPlusPlus11
+ ? &ResultBuilder::IsObjCMessageReceiverOrLambdaCapture
+ : &ResultBuilder::IsObjCMessageReceiver);
+
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ Results.EnterNewScope();
+ LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
+ CodeCompleter->includeGlobals());
+
+ // If we are in an Objective-C method inside a class that has a superclass,
+ // add "super" as an option.
+ if (ObjCMethodDecl *Method = getCurMethodDecl())
+ if (ObjCInterfaceDecl *Iface = Method->getClassInterface())
+ if (Iface->getSuperClass()) {
+ Results.AddResult(Result("super"));
+
+ AddSuperSendCompletion(*this, /*NeedSuperKeyword=*/true, None, Results);
+ }
+
+ if (getLangOpts().CPlusPlus11)
+ addThisCompletion(*this, Results);
+
+ Results.ExitScope();
+
+ if (CodeCompleter->includeMacros())
+ AddMacroResults(PP, Results, false);
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
+
+}
+
+void Sema::CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
+ ArrayRef<IdentifierInfo *> SelIdents,
+ bool AtArgumentExpression) {
+ ObjCInterfaceDecl *CDecl = nullptr;
+ if (ObjCMethodDecl *CurMethod = getCurMethodDecl()) {
+ // Figure out which interface we're in.
+ CDecl = CurMethod->getClassInterface();
+ if (!CDecl)
+ return;
+
+ // Find the superclass of this class.
+ CDecl = CDecl->getSuperClass();
+ if (!CDecl)
+ return;
+
+ if (CurMethod->isInstanceMethod()) {
+ // We are inside an instance method, which means that the message
+ // send [super ...] is actually calling an instance method on the
+ // current object.
+ return CodeCompleteObjCInstanceMessage(S, nullptr, SelIdents,
+ AtArgumentExpression,
+ CDecl);
+ }
+
+ // Fall through to send to the superclass in CDecl.
+ } else {
+ // "super" may be the name of a type or variable. Figure out which
+ // it is.
+ IdentifierInfo *Super = getSuperIdentifier();
+ NamedDecl *ND = LookupSingleName(S, Super, SuperLoc,
+ LookupOrdinaryName);
+ if ((CDecl = dyn_cast_or_null<ObjCInterfaceDecl>(ND))) {
+ // "super" names an interface. Use it.
+ } else if (TypeDecl *TD = dyn_cast_or_null<TypeDecl>(ND)) {
+ if (const ObjCObjectType *Iface
+ = Context.getTypeDeclType(TD)->getAs<ObjCObjectType>())
+ CDecl = Iface->getInterface();
+ } else if (ND && isa<UnresolvedUsingTypenameDecl>(ND)) {
+ // "super" names an unresolved type; we can't be more specific.
+ } else {
+ // Assume that "super" names some kind of value and parse that way.
+ CXXScopeSpec SS;
+ SourceLocation TemplateKWLoc;
+ UnqualifiedId id;
+ id.setIdentifier(Super, SuperLoc);
+ ExprResult SuperExpr = ActOnIdExpression(S, SS, TemplateKWLoc, id,
+ false, false);
+ return CodeCompleteObjCInstanceMessage(S, (Expr *)SuperExpr.get(),
+ SelIdents,
+ AtArgumentExpression);
+ }
+
+ // Fall through
+ }
+
+ ParsedType Receiver;
+ if (CDecl)
+ Receiver = ParsedType::make(Context.getObjCInterfaceType(CDecl));
+ return CodeCompleteObjCClassMessage(S, Receiver, SelIdents,
+ AtArgumentExpression,
+ /*IsSuper=*/true);
+}
+
+/// \brief Given a set of code-completion results for the argument of a message
+/// send, determine the preferred type (if any) for that argument expression.
+static QualType getPreferredArgumentTypeForMessageSend(ResultBuilder &Results,
+ unsigned NumSelIdents) {
+ typedef CodeCompletionResult Result;
+ ASTContext &Context = Results.getSema().Context;
+
+ QualType PreferredType;
+ unsigned BestPriority = CCP_Unlikely * 2;
+ Result *ResultsData = Results.data();
+ for (unsigned I = 0, N = Results.size(); I != N; ++I) {
+ Result &R = ResultsData[I];
+ if (R.Kind == Result::RK_Declaration &&
+ isa<ObjCMethodDecl>(R.Declaration)) {
+ if (R.Priority <= BestPriority) {
+ const ObjCMethodDecl *Method = cast<ObjCMethodDecl>(R.Declaration);
+ if (NumSelIdents <= Method->param_size()) {
+ QualType MyPreferredType = Method->parameters()[NumSelIdents - 1]
+ ->getType();
+ if (R.Priority < BestPriority || PreferredType.isNull()) {
+ BestPriority = R.Priority;
+ PreferredType = MyPreferredType;
+ } else if (!Context.hasSameUnqualifiedType(PreferredType,
+ MyPreferredType)) {
+ PreferredType = QualType();
+ }
+ }
+ }
+ }
+ }
+
+ return PreferredType;
+}
+
+static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
+ ParsedType Receiver,
+ ArrayRef<IdentifierInfo *> SelIdents,
+ bool AtArgumentExpression,
+ bool IsSuper,
+ ResultBuilder &Results) {
+ typedef CodeCompletionResult Result;
+ ObjCInterfaceDecl *CDecl = nullptr;
+
+ // If the given name refers to an interface type, retrieve the
+ // corresponding declaration.
+ if (Receiver) {
+ QualType T = SemaRef.GetTypeFromParser(Receiver, nullptr);
+ if (!T.isNull())
+ if (const ObjCObjectType *Interface = T->getAs<ObjCObjectType>())
+ CDecl = Interface->getInterface();
+ }
+
+ // Add all of the factory methods in this Objective-C class, its protocols,
+ // superclasses, categories, implementation, etc.
+ Results.EnterNewScope();
+
+ // If this is a send-to-super, try to add the special "super" send
+ // completion.
+ if (IsSuper) {
+ if (ObjCMethodDecl *SuperMethod
+ = AddSuperSendCompletion(SemaRef, false, SelIdents, Results))
+ Results.Ignore(SuperMethod);
+ }
+
+ // If we're inside an Objective-C method definition, prefer its selector to
+ // others.
+ if (ObjCMethodDecl *CurMethod = SemaRef.getCurMethodDecl())
+ Results.setPreferredSelector(CurMethod->getSelector());
+
+ VisitedSelectorSet Selectors;
+ if (CDecl)
+ AddObjCMethods(CDecl, false, MK_Any, SelIdents,
+ SemaRef.CurContext, Selectors, AtArgumentExpression,
+ Results);
+ else {
+ // We're messaging "id" as a type; provide all class/factory methods.
+
+ // If we have an external source, load the entire class method
+ // pool from the AST file.
+ if (SemaRef.getExternalSource()) {
+ for (uint32_t I = 0,
+ N = SemaRef.getExternalSource()->GetNumExternalSelectors();
+ I != N; ++I) {
+ Selector Sel = SemaRef.getExternalSource()->GetExternalSelector(I);
+ if (Sel.isNull() || SemaRef.MethodPool.count(Sel))
+ continue;
+
+ SemaRef.ReadMethodPool(Sel);
+ }
+ }
+
+ for (Sema::GlobalMethodPool::iterator M = SemaRef.MethodPool.begin(),
+ MEnd = SemaRef.MethodPool.end();
+ M != MEnd; ++M) {
+ for (ObjCMethodList *MethList = &M->second.second;
+ MethList && MethList->getMethod();
+ MethList = MethList->getNext()) {
+ if (!isAcceptableObjCMethod(MethList->getMethod(), MK_Any, SelIdents))
+ continue;
+
+ Result R(MethList->getMethod(),
+ Results.getBasePriority(MethList->getMethod()), nullptr);
+ R.StartParameter = SelIdents.size();
+ R.AllParametersAreInformative = false;
+ Results.MaybeAddResult(R, SemaRef.CurContext);
+ }
+ }
+ }
+
+ Results.ExitScope();
+}
+
+void Sema::CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
+ ArrayRef<IdentifierInfo *> SelIdents,
+ bool AtArgumentExpression,
+ bool IsSuper) {
+
+ QualType T = this->GetTypeFromParser(Receiver);
+
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext(CodeCompletionContext::CCC_ObjCClassMessage,
+ T, SelIdents));
+
+ AddClassMessageCompletions(*this, S, Receiver, SelIdents,
+ AtArgumentExpression, IsSuper, Results);
+
+ // If we're actually at the argument expression (rather than prior to the
+ // selector), we're actually performing code completion for an expression.
+ // Determine whether we have a single, best method. If so, we can
+ // code-complete the expression using the corresponding parameter type as
+ // our preferred type, improving completion results.
+ if (AtArgumentExpression) {
+ QualType PreferredType = getPreferredArgumentTypeForMessageSend(Results,
+ SelIdents.size());
+ if (PreferredType.isNull())
+ CodeCompleteOrdinaryName(S, PCC_Expression);
+ else
+ CodeCompleteExpression(S, PreferredType);
+ return;
+ }
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ Results.getCompletionContext(),
+ Results.data(), Results.size());
+}
+
+void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
+ ArrayRef<IdentifierInfo *> SelIdents,
+ bool AtArgumentExpression,
+ ObjCInterfaceDecl *Super) {
+ typedef CodeCompletionResult Result;
+
+ Expr *RecExpr = static_cast<Expr *>(Receiver);
+
+ // If necessary, apply function/array conversion to the receiver.
+ // C99 6.7.5.3p[7,8].
+ if (RecExpr) {
+ ExprResult Conv = DefaultFunctionArrayLvalueConversion(RecExpr);
+ if (Conv.isInvalid()) // conversion failed. bail.
+ return;
+ RecExpr = Conv.get();
+ }
+ QualType ReceiverType = RecExpr? RecExpr->getType()
+ : Super? Context.getObjCObjectPointerType(
+ Context.getObjCInterfaceType(Super))
+ : Context.getObjCIdType();
+
+ // If we're messaging an expression with type "id" or "Class", check
+ // whether we know something special about the receiver that allows
+ // us to assume a more-specific receiver type.
+ if (ReceiverType->isObjCIdType() || ReceiverType->isObjCClassType()) {
+ if (ObjCInterfaceDecl *IFace = GetAssumedMessageSendExprType(RecExpr)) {
+ if (ReceiverType->isObjCClassType())
+ return CodeCompleteObjCClassMessage(S,
+ ParsedType::make(Context.getObjCInterfaceType(IFace)),
+ SelIdents,
+ AtArgumentExpression, Super);
+
+ ReceiverType = Context.getObjCObjectPointerType(
+ Context.getObjCInterfaceType(IFace));
+ }
+ } else if (RecExpr && getLangOpts().CPlusPlus) {
+ ExprResult Conv = PerformContextuallyConvertToObjCPointer(RecExpr);
+ if (Conv.isUsable()) {
+ RecExpr = Conv.get();
+ ReceiverType = RecExpr->getType();
+ }
+ }
+
+ // Build the set of methods we can see.
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext(CodeCompletionContext::CCC_ObjCInstanceMessage,
+ ReceiverType, SelIdents));
+
+ Results.EnterNewScope();
+
+ // If this is a send-to-super, try to add the special "super" send
+ // completion.
+ if (Super) {
+ if (ObjCMethodDecl *SuperMethod
+ = AddSuperSendCompletion(*this, false, SelIdents, Results))
+ Results.Ignore(SuperMethod);
+ }
+
+ // If we're inside an Objective-C method definition, prefer its selector to
+ // others.
+ if (ObjCMethodDecl *CurMethod = getCurMethodDecl())
+ Results.setPreferredSelector(CurMethod->getSelector());
+
+ // Keep track of the selectors we've already added.
+ VisitedSelectorSet Selectors;
+
+ // Handle messages to Class. This really isn't a message to an instance
+ // method, so we treat it the same way we would treat a message send to a
+ // class method.
+ if (ReceiverType->isObjCClassType() ||
+ ReceiverType->isObjCQualifiedClassType()) {
+ if (ObjCMethodDecl *CurMethod = getCurMethodDecl()) {
+ if (ObjCInterfaceDecl *ClassDecl = CurMethod->getClassInterface())
+ AddObjCMethods(ClassDecl, false, MK_Any, SelIdents,
+ CurContext, Selectors, AtArgumentExpression, Results);
+ }
+ }
+ // Handle messages to a qualified ID ("id<foo>").
+ else if (const ObjCObjectPointerType *QualID
+ = ReceiverType->getAsObjCQualifiedIdType()) {
+ // Search protocols for instance methods.
+ for (auto *I : QualID->quals())
+ AddObjCMethods(I, true, MK_Any, SelIdents, CurContext,
+ Selectors, AtArgumentExpression, Results);
+ }
+ // Handle messages to a pointer to interface type.
+ else if (const ObjCObjectPointerType *IFacePtr
+ = ReceiverType->getAsObjCInterfacePointerType()) {
+ // Search the class, its superclasses, etc., for instance methods.
+ AddObjCMethods(IFacePtr->getInterfaceDecl(), true, MK_Any, SelIdents,
+ CurContext, Selectors, AtArgumentExpression,
+ Results);
+
+ // Search protocols for instance methods.
+ for (auto *I : IFacePtr->quals())
+ AddObjCMethods(I, true, MK_Any, SelIdents, CurContext,
+ Selectors, AtArgumentExpression, Results);
+ }
+ // Handle messages to "id".
+ else if (ReceiverType->isObjCIdType()) {
+ // We're messaging "id", so provide all instance methods we know
+ // about as code-completion results.
+
+ // If we have an external source, load the entire class method
+ // pool from the AST file.
+ if (ExternalSource) {
+ for (uint32_t I = 0, N = ExternalSource->GetNumExternalSelectors();
+ I != N; ++I) {
+ Selector Sel = ExternalSource->GetExternalSelector(I);
+ if (Sel.isNull() || MethodPool.count(Sel))
+ continue;
+
+ ReadMethodPool(Sel);
+ }
+ }
+
+ for (GlobalMethodPool::iterator M = MethodPool.begin(),
+ MEnd = MethodPool.end();
+ M != MEnd; ++M) {
+ for (ObjCMethodList *MethList = &M->second.first;
+ MethList && MethList->getMethod();
+ MethList = MethList->getNext()) {
+ if (!isAcceptableObjCMethod(MethList->getMethod(), MK_Any, SelIdents))
+ continue;
+
+ if (!Selectors.insert(MethList->getMethod()->getSelector()).second)
+ continue;
+
+ Result R(MethList->getMethod(),
+ Results.getBasePriority(MethList->getMethod()), nullptr);
+ R.StartParameter = SelIdents.size();
+ R.AllParametersAreInformative = false;
+ Results.MaybeAddResult(R, CurContext);
+ }
+ }
+ }
+ Results.ExitScope();
+
+
+ // If we're actually at the argument expression (rather than prior to the
+ // selector), we're actually performing code completion for an expression.
+ // Determine whether we have a single, best method. If so, we can
+ // code-complete the expression using the corresponding parameter type as
+ // our preferred type, improving completion results.
+ if (AtArgumentExpression) {
+ QualType PreferredType = getPreferredArgumentTypeForMessageSend(Results,
+ SelIdents.size());
+ if (PreferredType.isNull())
+ CodeCompleteOrdinaryName(S, PCC_Expression);
+ else
+ CodeCompleteExpression(S, PreferredType);
+ return;
+ }
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ Results.getCompletionContext(),
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteObjCForCollection(Scope *S,
+ DeclGroupPtrTy IterationVar) {
+ CodeCompleteExpressionData Data;
+ Data.ObjCCollection = true;
+
+ if (IterationVar.getAsOpaquePtr()) {
+ DeclGroupRef DG = IterationVar.get();
+ for (DeclGroupRef::iterator I = DG.begin(), End = DG.end(); I != End; ++I) {
+ if (*I)
+ Data.IgnoreDecls.push_back(*I);
+ }
+ }
+
+ CodeCompleteExpression(S, Data);
+}
+
+void Sema::CodeCompleteObjCSelector(Scope *S,
+ ArrayRef<IdentifierInfo *> SelIdents) {
+ // If we have an external source, load the entire class method
+ // pool from the AST file.
+ if (ExternalSource) {
+ for (uint32_t I = 0, N = ExternalSource->GetNumExternalSelectors();
+ I != N; ++I) {
+ Selector Sel = ExternalSource->GetExternalSelector(I);
+ if (Sel.isNull() || MethodPool.count(Sel))
+ continue;
+
+ ReadMethodPool(Sel);
+ }
+ }
+
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_SelectorName);
+ Results.EnterNewScope();
+ for (GlobalMethodPool::iterator M = MethodPool.begin(),
+ MEnd = MethodPool.end();
+ M != MEnd; ++M) {
+
+ Selector Sel = M->first;
+ if (!isAcceptableObjCSelector(Sel, MK_Any, SelIdents))
+ continue;
+
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ if (Sel.isUnarySelector()) {
+ Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
+ Sel.getNameForSlot(0)));
+ Results.AddResult(Builder.TakeString());
+ continue;
+ }
+
+ std::string Accumulator;
+ for (unsigned I = 0, N = Sel.getNumArgs(); I != N; ++I) {
+ if (I == SelIdents.size()) {
+ if (!Accumulator.empty()) {
+ Builder.AddInformativeChunk(Builder.getAllocator().CopyString(
+ Accumulator));
+ Accumulator.clear();
+ }
+ }
+
+ Accumulator += Sel.getNameForSlot(I);
+ Accumulator += ':';
+ }
+ Builder.AddTypedTextChunk(Builder.getAllocator().CopyString( Accumulator));
+ Results.AddResult(Builder.TakeString());
+ }
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_SelectorName,
+ Results.data(), Results.size());
+}
+
+/// \brief Add all of the protocol declarations that we find in the given
+/// (translation unit) context.
+static void AddProtocolResults(DeclContext *Ctx, DeclContext *CurContext,
+ bool OnlyForwardDeclarations,
+ ResultBuilder &Results) {
+ typedef CodeCompletionResult Result;
+
+ for (const auto *D : Ctx->decls()) {
+ // Record any protocols we find.
+ if (const auto *Proto = dyn_cast<ObjCProtocolDecl>(D))
+ if (!OnlyForwardDeclarations || !Proto->hasDefinition())
+ Results.AddResult(Result(Proto, Results.getBasePriority(Proto),nullptr),
+ CurContext, nullptr, false);
+ }
+}
+
+void Sema::CodeCompleteObjCProtocolReferences(
+ ArrayRef<IdentifierLocPair> Protocols) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_ObjCProtocolName);
+
+ if (CodeCompleter && CodeCompleter->includeGlobals()) {
+ Results.EnterNewScope();
+
+ // Tell the result set to ignore all of the protocols we have
+ // already seen.
+ // FIXME: This doesn't work when caching code-completion results.
+ for (const IdentifierLocPair &Pair : Protocols)
+ if (ObjCProtocolDecl *Protocol = LookupProtocol(Pair.first,
+ Pair.second))
+ Results.Ignore(Protocol);
+
+ // Add all protocols.
+ AddProtocolResults(Context.getTranslationUnitDecl(), CurContext, false,
+ Results);
+
+ Results.ExitScope();
+ }
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_ObjCProtocolName,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteObjCProtocolDecl(Scope *) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_ObjCProtocolName);
+
+ if (CodeCompleter && CodeCompleter->includeGlobals()) {
+ Results.EnterNewScope();
+
+ // Add all protocols.
+ AddProtocolResults(Context.getTranslationUnitDecl(), CurContext, true,
+ Results);
+
+ Results.ExitScope();
+ }
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_ObjCProtocolName,
+ Results.data(),Results.size());
+}
+
+/// \brief Add all of the Objective-C interface declarations that we find in
+/// the given (translation unit) context.
+static void AddInterfaceResults(DeclContext *Ctx, DeclContext *CurContext,
+ bool OnlyForwardDeclarations,
+ bool OnlyUnimplemented,
+ ResultBuilder &Results) {
+ typedef CodeCompletionResult Result;
+
+ for (const auto *D : Ctx->decls()) {
+ // Record any interfaces we find.
+ if (const auto *Class = dyn_cast<ObjCInterfaceDecl>(D))
+ if ((!OnlyForwardDeclarations || !Class->hasDefinition()) &&
+ (!OnlyUnimplemented || !Class->getImplementation()))
+ Results.AddResult(Result(Class, Results.getBasePriority(Class),nullptr),
+ CurContext, nullptr, false);
+ }
+}
+
+void Sema::CodeCompleteObjCInterfaceDecl(Scope *S) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+ Results.EnterNewScope();
+
+ if (CodeCompleter->includeGlobals()) {
+ // Add all classes.
+ AddInterfaceResults(Context.getTranslationUnitDecl(), CurContext, false,
+ false, Results);
+ }
+
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_ObjCInterfaceName,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName,
+ SourceLocation ClassNameLoc) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_ObjCInterfaceName);
+ Results.EnterNewScope();
+
+ // Make sure that we ignore the class we're currently defining.
+ NamedDecl *CurClass
+ = LookupSingleName(TUScope, ClassName, ClassNameLoc, LookupOrdinaryName);
+ if (CurClass && isa<ObjCInterfaceDecl>(CurClass))
+ Results.Ignore(CurClass);
+
+ if (CodeCompleter->includeGlobals()) {
+ // Add all classes.
+ AddInterfaceResults(Context.getTranslationUnitDecl(), CurContext, false,
+ false, Results);
+ }
+
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_ObjCInterfaceName,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteObjCImplementationDecl(Scope *S) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+ Results.EnterNewScope();
+
+ if (CodeCompleter->includeGlobals()) {
+ // Add all unimplemented classes.
+ AddInterfaceResults(Context.getTranslationUnitDecl(), CurContext, false,
+ true, Results);
+ }
+
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_ObjCInterfaceName,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteObjCInterfaceCategory(Scope *S,
+ IdentifierInfo *ClassName,
+ SourceLocation ClassNameLoc) {
+ typedef CodeCompletionResult Result;
+
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_ObjCCategoryName);
+
+ // Ignore any categories we find that have already been implemented by this
+ // interface.
+ llvm::SmallPtrSet<IdentifierInfo *, 16> CategoryNames;
+ NamedDecl *CurClass
+ = LookupSingleName(TUScope, ClassName, ClassNameLoc, LookupOrdinaryName);
+ if (ObjCInterfaceDecl *Class = dyn_cast_or_null<ObjCInterfaceDecl>(CurClass)){
+ for (const auto *Cat : Class->visible_categories())
+ CategoryNames.insert(Cat->getIdentifier());
+ }
+
+ // Add all of the categories we know about.
+ Results.EnterNewScope();
+ TranslationUnitDecl *TU = Context.getTranslationUnitDecl();
+ for (const auto *D : TU->decls())
+ if (const auto *Category = dyn_cast<ObjCCategoryDecl>(D))
+ if (CategoryNames.insert(Category->getIdentifier()).second)
+ Results.AddResult(Result(Category, Results.getBasePriority(Category),
+ nullptr),
+ CurContext, nullptr, false);
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_ObjCCategoryName,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteObjCImplementationCategory(Scope *S,
+ IdentifierInfo *ClassName,
+ SourceLocation ClassNameLoc) {
+ typedef CodeCompletionResult Result;
+
+ // Find the corresponding interface. If we couldn't find the interface, the
+ // program itself is ill-formed. However, we'll try to be helpful still by
+ // providing the list of all of the categories we know about.
+ NamedDecl *CurClass
+ = LookupSingleName(TUScope, ClassName, ClassNameLoc, LookupOrdinaryName);
+ ObjCInterfaceDecl *Class = dyn_cast_or_null<ObjCInterfaceDecl>(CurClass);
+ if (!Class)
+ return CodeCompleteObjCInterfaceCategory(S, ClassName, ClassNameLoc);
+
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_ObjCCategoryName);
+
+ // Add all of the categories that have have corresponding interface
+ // declarations in this class and any of its superclasses, except for
+ // already-implemented categories in the class itself.
+ llvm::SmallPtrSet<IdentifierInfo *, 16> CategoryNames;
+ Results.EnterNewScope();
+ bool IgnoreImplemented = true;
+ while (Class) {
+ for (const auto *Cat : Class->visible_categories()) {
+ if ((!IgnoreImplemented || !Cat->getImplementation()) &&
+ CategoryNames.insert(Cat->getIdentifier()).second)
+ Results.AddResult(Result(Cat, Results.getBasePriority(Cat), nullptr),
+ CurContext, nullptr, false);
+ }
+
+ Class = Class->getSuperClass();
+ IgnoreImplemented = false;
+ }
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_ObjCCategoryName,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteObjCPropertyDefinition(Scope *S) {
+ CodeCompletionContext CCContext(CodeCompletionContext::CCC_Other);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CCContext);
+
+ // Figure out where this @synthesize lives.
+ ObjCContainerDecl *Container
+ = dyn_cast_or_null<ObjCContainerDecl>(CurContext);
+ if (!Container ||
+ (!isa<ObjCImplementationDecl>(Container) &&
+ !isa<ObjCCategoryImplDecl>(Container)))
+ return;
+
+ // Ignore any properties that have already been implemented.
+ Container = getContainerDef(Container);
+ for (const auto *D : Container->decls())
+ if (const auto *PropertyImpl = dyn_cast<ObjCPropertyImplDecl>(D))
+ Results.Ignore(PropertyImpl->getPropertyDecl());
+
+ // Add any properties that we find.
+ AddedPropertiesSet AddedProperties;
+ Results.EnterNewScope();
+ if (ObjCImplementationDecl *ClassImpl
+ = dyn_cast<ObjCImplementationDecl>(Container))
+ AddObjCProperties(CCContext, ClassImpl->getClassInterface(), false,
+ /*AllowNullaryMethods=*/false, CurContext,
+ AddedProperties, Results);
+ else
+ AddObjCProperties(CCContext,
+ cast<ObjCCategoryImplDecl>(Container)->getCategoryDecl(),
+ false, /*AllowNullaryMethods=*/false, CurContext,
+ AddedProperties, Results);
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Other,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
+ IdentifierInfo *PropertyName) {
+ typedef CodeCompletionResult Result;
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+
+ // Figure out where this @synthesize lives.
+ ObjCContainerDecl *Container
+ = dyn_cast_or_null<ObjCContainerDecl>(CurContext);
+ if (!Container ||
+ (!isa<ObjCImplementationDecl>(Container) &&
+ !isa<ObjCCategoryImplDecl>(Container)))
+ return;
+
+ // Figure out which interface we're looking into.
+ ObjCInterfaceDecl *Class = nullptr;
+ if (ObjCImplementationDecl *ClassImpl
+ = dyn_cast<ObjCImplementationDecl>(Container))
+ Class = ClassImpl->getClassInterface();
+ else
+ Class = cast<ObjCCategoryImplDecl>(Container)->getCategoryDecl()
+ ->getClassInterface();
+
+ // Determine the type of the property we're synthesizing.
+ QualType PropertyType = Context.getObjCIdType();
+ if (Class) {
+ if (ObjCPropertyDecl *Property
+ = Class->FindPropertyDeclaration(PropertyName)) {
+ PropertyType
+ = Property->getType().getNonReferenceType().getUnqualifiedType();
+
+ // Give preference to ivars
+ Results.setPreferredType(PropertyType);
+ }
+ }
+
+ // Add all of the instance variables in this class and its superclasses.
+ Results.EnterNewScope();
+ bool SawSimilarlyNamedIvar = false;
+ std::string NameWithPrefix;
+ NameWithPrefix += '_';
+ NameWithPrefix += PropertyName->getName();
+ std::string NameWithSuffix = PropertyName->getName().str();
+ NameWithSuffix += '_';
+ for(; Class; Class = Class->getSuperClass()) {
+ for (ObjCIvarDecl *Ivar = Class->all_declared_ivar_begin(); Ivar;
+ Ivar = Ivar->getNextIvar()) {
+ Results.AddResult(Result(Ivar, Results.getBasePriority(Ivar), nullptr),
+ CurContext, nullptr, false);
+
+ // Determine whether we've seen an ivar with a name similar to the
+ // property.
+ if ((PropertyName == Ivar->getIdentifier() ||
+ NameWithPrefix == Ivar->getName() ||
+ NameWithSuffix == Ivar->getName())) {
+ SawSimilarlyNamedIvar = true;
+
+ // Reduce the priority of this result by one, to give it a slight
+ // advantage over other results whose names don't match so closely.
+ if (Results.size() &&
+ Results.data()[Results.size() - 1].Kind
+ == CodeCompletionResult::RK_Declaration &&
+ Results.data()[Results.size() - 1].Declaration == Ivar)
+ Results.data()[Results.size() - 1].Priority--;
+ }
+ }
+ }
+
+ if (!SawSimilarlyNamedIvar) {
+ // Create ivar result _propName, that the user can use to synthesize
+ // an ivar of the appropriate type.
+ unsigned Priority = CCP_MemberDeclaration + 1;
+ typedef CodeCompletionResult Result;
+ CodeCompletionAllocator &Allocator = Results.getAllocator();
+ CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo(),
+ Priority,CXAvailability_Available);
+
+ PrintingPolicy Policy = getCompletionPrintingPolicy(*this);
+ Builder.AddResultTypeChunk(GetCompletionTypeString(PropertyType, Context,
+ Policy, Allocator));
+ Builder.AddTypedTextChunk(Allocator.CopyString(NameWithPrefix));
+ Results.AddResult(Result(Builder.TakeString(), Priority,
+ CXCursor_ObjCIvarDecl));
+ }
+
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Other,
+ Results.data(),Results.size());
+}
+
+// Mapping from selectors to the methods that implement that selector, along
+// with the "in original class" flag.
+typedef llvm::DenseMap<
+ Selector, llvm::PointerIntPair<ObjCMethodDecl *, 1, bool> > KnownMethodsMap;
+
+/// \brief Find all of the methods that reside in the given container
+/// (and its superclasses, protocols, etc.) that meet the given
+/// criteria. Insert those methods into the map of known methods,
+/// indexed by selector so they can be easily found.
+static void FindImplementableMethods(ASTContext &Context,
+ ObjCContainerDecl *Container,
+ bool WantInstanceMethods,
+ QualType ReturnType,
+ KnownMethodsMap &KnownMethods,
+ bool InOriginalClass = true) {
+ if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(Container)) {
+ // Make sure we have a definition; that's what we'll walk.
+ if (!IFace->hasDefinition())
+ return;
+
+ IFace = IFace->getDefinition();
+ Container = IFace;
+
+ const ObjCList<ObjCProtocolDecl> &Protocols
+ = IFace->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
+ E = Protocols.end();
+ I != E; ++I)
+ FindImplementableMethods(Context, *I, WantInstanceMethods, ReturnType,
+ KnownMethods, InOriginalClass);
+
+ // Add methods from any class extensions and categories.
+ for (auto *Cat : IFace->visible_categories()) {
+ FindImplementableMethods(Context, Cat, WantInstanceMethods, ReturnType,
+ KnownMethods, false);
+ }
+
+ // Visit the superclass.
+ if (IFace->getSuperClass())
+ FindImplementableMethods(Context, IFace->getSuperClass(),
+ WantInstanceMethods, ReturnType,
+ KnownMethods, false);
+ }
+
+ if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(Container)) {
+ // Recurse into protocols.
+ const ObjCList<ObjCProtocolDecl> &Protocols
+ = Category->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
+ E = Protocols.end();
+ I != E; ++I)
+ FindImplementableMethods(Context, *I, WantInstanceMethods, ReturnType,
+ KnownMethods, InOriginalClass);
+
+ // If this category is the original class, jump to the interface.
+ if (InOriginalClass && Category->getClassInterface())
+ FindImplementableMethods(Context, Category->getClassInterface(),
+ WantInstanceMethods, ReturnType, KnownMethods,
+ false);
+ }
+
+ if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)) {
+ // Make sure we have a definition; that's what we'll walk.
+ if (!Protocol->hasDefinition())
+ return;
+ Protocol = Protocol->getDefinition();
+ Container = Protocol;
+
+ // Recurse into protocols.
+ const ObjCList<ObjCProtocolDecl> &Protocols
+ = Protocol->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
+ E = Protocols.end();
+ I != E; ++I)
+ FindImplementableMethods(Context, *I, WantInstanceMethods, ReturnType,
+ KnownMethods, false);
+ }
+
+ // Add methods in this container. This operation occurs last because
+ // we want the methods from this container to override any methods
+ // we've previously seen with the same selector.
+ for (auto *M : Container->methods()) {
+ if (M->isInstanceMethod() == WantInstanceMethods) {
+ if (!ReturnType.isNull() &&
+ !Context.hasSameUnqualifiedType(ReturnType, M->getReturnType()))
+ continue;
+
+ KnownMethods[M->getSelector()] =
+ KnownMethodsMap::mapped_type(M, InOriginalClass);
+ }
+ }
+}
+
+/// \brief Add the parenthesized return or parameter type chunk to a code
+/// completion string.
+static void AddObjCPassingTypeChunk(QualType Type,
+ unsigned ObjCDeclQuals,
+ ASTContext &Context,
+ const PrintingPolicy &Policy,
+ CodeCompletionBuilder &Builder) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ std::string Quals = formatObjCParamQualifiers(ObjCDeclQuals, Type);
+ if (!Quals.empty())
+ Builder.AddTextChunk(Builder.getAllocator().CopyString(Quals));
+ Builder.AddTextChunk(GetCompletionTypeString(Type, Context, Policy,
+ Builder.getAllocator()));
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+}
+
+/// \brief Determine whether the given class is or inherits from a class by
+/// the given name.
+static bool InheritsFromClassNamed(ObjCInterfaceDecl *Class,
+ StringRef Name) {
+ if (!Class)
+ return false;
+
+ if (Class->getIdentifier() && Class->getIdentifier()->getName() == Name)
+ return true;
+
+ return InheritsFromClassNamed(Class->getSuperClass(), Name);
+}
+
+/// \brief Add code completions for Objective-C Key-Value Coding (KVC) and
+/// Key-Value Observing (KVO).
+static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
+ bool IsInstanceMethod,
+ QualType ReturnType,
+ ASTContext &Context,
+ VisitedSelectorSet &KnownSelectors,
+ ResultBuilder &Results) {
+ IdentifierInfo *PropName = Property->getIdentifier();
+ if (!PropName || PropName->getLength() == 0)
+ return;
+
+ PrintingPolicy Policy = getCompletionPrintingPolicy(Results.getSema());
+
+ // Builder that will create each code completion.
+ typedef CodeCompletionResult Result;
+ CodeCompletionAllocator &Allocator = Results.getAllocator();
+ CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo());
+
+ // The selector table.
+ SelectorTable &Selectors = Context.Selectors;
+
+ // The property name, copied into the code completion allocation region
+ // on demand.
+ struct KeyHolder {
+ CodeCompletionAllocator &Allocator;
+ StringRef Key;
+ const char *CopiedKey;
+
+ KeyHolder(CodeCompletionAllocator &Allocator, StringRef Key)
+ : Allocator(Allocator), Key(Key), CopiedKey(nullptr) {}
+
+ operator const char *() {
+ if (CopiedKey)
+ return CopiedKey;
+
+ return CopiedKey = Allocator.CopyString(Key);
+ }
+ } Key(Allocator, PropName->getName());
+
+ // The uppercased name of the property name.
+ std::string UpperKey = PropName->getName();
+ if (!UpperKey.empty())
+ UpperKey[0] = toUppercase(UpperKey[0]);
+
+ bool ReturnTypeMatchesProperty = ReturnType.isNull() ||
+ Context.hasSameUnqualifiedType(ReturnType.getNonReferenceType(),
+ Property->getType());
+ bool ReturnTypeMatchesVoid
+ = ReturnType.isNull() || ReturnType->isVoidType();
+
+ // Add the normal accessor -(type)key.
+ if (IsInstanceMethod &&
+ KnownSelectors.insert(Selectors.getNullarySelector(PropName)).second &&
+ ReturnTypeMatchesProperty && !Property->getGetterMethodDecl()) {
+ if (ReturnType.isNull())
+ AddObjCPassingTypeChunk(Property->getType(), /*Quals=*/0,
+ Context, Policy, Builder);
+
+ Builder.AddTypedTextChunk(Key);
+ Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+
+ // If we have an integral or boolean property (or the user has provided
+ // an integral or boolean return type), add the accessor -(type)isKey.
+ if (IsInstanceMethod &&
+ ((!ReturnType.isNull() &&
+ (ReturnType->isIntegerType() || ReturnType->isBooleanType())) ||
+ (ReturnType.isNull() &&
+ (Property->getType()->isIntegerType() ||
+ Property->getType()->isBooleanType())))) {
+ std::string SelectorName = (Twine("is") + UpperKey).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))
+ .second) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("BOOL");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(
+ Allocator.CopyString(SelectorId->getName()));
+ Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // Add the normal mutator.
+ if (IsInstanceMethod && ReturnTypeMatchesVoid &&
+ !Property->getSetterMethodDecl()) {
+ std::string SelectorName = (Twine("set") + UpperKey).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(
+ Allocator.CopyString(SelectorId->getName()));
+ Builder.AddTypedTextChunk(":");
+ AddObjCPassingTypeChunk(Property->getType(), /*Quals=*/0,
+ Context, Policy, Builder);
+ Builder.AddTextChunk(Key);
+ Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // Indexed and unordered accessors
+ unsigned IndexedGetterPriority = CCP_CodePattern;
+ unsigned IndexedSetterPriority = CCP_CodePattern;
+ unsigned UnorderedGetterPriority = CCP_CodePattern;
+ unsigned UnorderedSetterPriority = CCP_CodePattern;
+ if (const ObjCObjectPointerType *ObjCPointer
+ = Property->getType()->getAs<ObjCObjectPointerType>()) {
+ if (ObjCInterfaceDecl *IFace = ObjCPointer->getInterfaceDecl()) {
+ // If this interface type is not provably derived from a known
+ // collection, penalize the corresponding completions.
+ if (!InheritsFromClassNamed(IFace, "NSMutableArray")) {
+ IndexedSetterPriority += CCD_ProbablyNotObjCCollection;
+ if (!InheritsFromClassNamed(IFace, "NSArray"))
+ IndexedGetterPriority += CCD_ProbablyNotObjCCollection;
+ }
+
+ if (!InheritsFromClassNamed(IFace, "NSMutableSet")) {
+ UnorderedSetterPriority += CCD_ProbablyNotObjCCollection;
+ if (!InheritsFromClassNamed(IFace, "NSSet"))
+ UnorderedGetterPriority += CCD_ProbablyNotObjCCollection;
+ }
+ }
+ } else {
+ IndexedGetterPriority += CCD_ProbablyNotObjCCollection;
+ IndexedSetterPriority += CCD_ProbablyNotObjCCollection;
+ UnorderedGetterPriority += CCD_ProbablyNotObjCCollection;
+ UnorderedSetterPriority += CCD_ProbablyNotObjCCollection;
+ }
+
+ // Add -(NSUInteger)countOf<key>
+ if (IsInstanceMethod &&
+ (ReturnType.isNull() || ReturnType->isIntegerType())) {
+ std::string SelectorName = (Twine("countOf") + UpperKey).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))
+ .second) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSUInteger");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(
+ Allocator.CopyString(SelectorId->getName()));
+ Results.AddResult(Result(Builder.TakeString(),
+ std::min(IndexedGetterPriority,
+ UnorderedGetterPriority),
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // Indexed getters
+ // Add -(id)objectInKeyAtIndex:(NSUInteger)index
+ if (IsInstanceMethod &&
+ (ReturnType.isNull() || ReturnType->isObjCObjectPointerType())) {
+ std::string SelectorName
+ = (Twine("objectIn") + UpperKey + "AtIndex").str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("id");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSUInteger");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("index");
+ Results.AddResult(Result(Builder.TakeString(), IndexedGetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // Add -(NSArray *)keyAtIndexes:(NSIndexSet *)indexes
+ if (IsInstanceMethod &&
+ (ReturnType.isNull() ||
+ (ReturnType->isObjCObjectPointerType() &&
+ ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl() &&
+ ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl()
+ ->getName() == "NSArray"))) {
+ std::string SelectorName
+ = (Twine(Property->getName()) + "AtIndexes").str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSArray *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSIndexSet *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("indexes");
+ Results.AddResult(Result(Builder.TakeString(), IndexedGetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // Add -(void)getKey:(type **)buffer range:(NSRange)inRange
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName = (Twine("get") + UpperKey).str();
+ IdentifierInfo *SelectorIds[2] = {
+ &Context.Idents.get(SelectorName),
+ &Context.Idents.get("range")
+ };
+
+ if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("object-type");
+ Builder.AddTextChunk(" **");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("buffer");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTypedTextChunk("range:");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSRange");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("inRange");
+ Results.AddResult(Result(Builder.TakeString(), IndexedGetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // Mutable indexed accessors
+
+ // - (void)insertObject:(type *)object inKeyAtIndex:(NSUInteger)index
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName = (Twine("in") + UpperKey + "AtIndex").str();
+ IdentifierInfo *SelectorIds[2] = {
+ &Context.Idents.get("insertObject"),
+ &Context.Idents.get(SelectorName)
+ };
+
+ if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk("insertObject:");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("object-type");
+ Builder.AddTextChunk(" *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("object");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("NSUInteger");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("index");
+ Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // - (void)insertKey:(NSArray *)array atIndexes:(NSIndexSet *)indexes
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName = (Twine("insert") + UpperKey).str();
+ IdentifierInfo *SelectorIds[2] = {
+ &Context.Idents.get(SelectorName),
+ &Context.Idents.get("atIndexes")
+ };
+
+ if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSArray *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("array");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTypedTextChunk("atIndexes:");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("NSIndexSet *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("indexes");
+ Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // -(void)removeObjectFromKeyAtIndex:(NSUInteger)index
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName
+ = (Twine("removeObjectFrom") + UpperKey + "AtIndex").str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSUInteger");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("index");
+ Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // -(void)removeKeyAtIndexes:(NSIndexSet *)indexes
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName
+ = (Twine("remove") + UpperKey + "AtIndexes").str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSIndexSet *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("indexes");
+ Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // - (void)replaceObjectInKeyAtIndex:(NSUInteger)index withObject:(id)object
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName
+ = (Twine("replaceObjectIn") + UpperKey + "AtIndex").str();
+ IdentifierInfo *SelectorIds[2] = {
+ &Context.Idents.get(SelectorName),
+ &Context.Idents.get("withObject")
+ };
+
+ if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("NSUInteger");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("index");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTypedTextChunk("withObject:");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("id");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("object");
+ Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // - (void)replaceKeyAtIndexes:(NSIndexSet *)indexes withKey:(NSArray *)array
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName1
+ = (Twine("replace") + UpperKey + "AtIndexes").str();
+ std::string SelectorName2 = (Twine("with") + UpperKey).str();
+ IdentifierInfo *SelectorIds[2] = {
+ &Context.Idents.get(SelectorName1),
+ &Context.Idents.get(SelectorName2)
+ };
+
+ if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName1 + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("NSIndexSet *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("indexes");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName2 + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSArray *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("array");
+ Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // Unordered getters
+ // - (NSEnumerator *)enumeratorOfKey
+ if (IsInstanceMethod &&
+ (ReturnType.isNull() ||
+ (ReturnType->isObjCObjectPointerType() &&
+ ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl() &&
+ ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl()
+ ->getName() == "NSEnumerator"))) {
+ std::string SelectorName = (Twine("enumeratorOf") + UpperKey).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))
+ .second) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSEnumerator *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName));
+ Results.AddResult(Result(Builder.TakeString(), UnorderedGetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // - (type *)memberOfKey:(type *)object
+ if (IsInstanceMethod &&
+ (ReturnType.isNull() || ReturnType->isObjCObjectPointerType())) {
+ std::string SelectorName = (Twine("memberOf") + UpperKey).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("object-type");
+ Builder.AddTextChunk(" *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ if (ReturnType.isNull()) {
+ Builder.AddPlaceholderChunk("object-type");
+ Builder.AddTextChunk(" *");
+ } else {
+ Builder.AddTextChunk(GetCompletionTypeString(ReturnType, Context,
+ Policy,
+ Builder.getAllocator()));
+ }
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("object");
+ Results.AddResult(Result(Builder.TakeString(), UnorderedGetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // Mutable unordered accessors
+ // - (void)addKeyObject:(type *)object
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName
+ = (Twine("add") + UpperKey + Twine("Object")).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("object-type");
+ Builder.AddTextChunk(" *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("object");
+ Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // - (void)addKey:(NSSet *)objects
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName = (Twine("add") + UpperKey).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSSet *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("objects");
+ Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // - (void)removeKeyObject:(type *)object
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName
+ = (Twine("remove") + UpperKey + Twine("Object")).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("object-type");
+ Builder.AddTextChunk(" *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("object");
+ Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // - (void)removeKey:(NSSet *)objects
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName = (Twine("remove") + UpperKey).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSSet *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("objects");
+ Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // - (void)intersectKey:(NSSet *)objects
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName = (Twine("intersect") + UpperKey).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSSet *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("objects");
+ Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // Key-Value Observing
+ // + (NSSet *)keyPathsForValuesAffectingKey
+ if (!IsInstanceMethod &&
+ (ReturnType.isNull() ||
+ (ReturnType->isObjCObjectPointerType() &&
+ ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl() &&
+ ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl()
+ ->getName() == "NSSet"))) {
+ std::string SelectorName
+ = (Twine("keyPathsForValuesAffecting") + UpperKey).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))
+ .second) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSSet *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName));
+ Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
+ CXCursor_ObjCClassMethodDecl));
+ }
+ }
+
+ // + (BOOL)automaticallyNotifiesObserversForKey
+ if (!IsInstanceMethod &&
+ (ReturnType.isNull() ||
+ ReturnType->isIntegerType() ||
+ ReturnType->isBooleanType())) {
+ std::string SelectorName
+ = (Twine("automaticallyNotifiesObserversOf") + UpperKey).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))
+ .second) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("BOOL");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName));
+ Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
+ CXCursor_ObjCClassMethodDecl));
+ }
+ }
+}
+
+void Sema::CodeCompleteObjCMethodDecl(Scope *S,
+ bool IsInstanceMethod,
+ ParsedType ReturnTy) {
+ // Determine the return type of the method we're declaring, if
+ // provided.
+ QualType ReturnType = GetTypeFromParser(ReturnTy);
+ Decl *IDecl = nullptr;
+ if (CurContext->isObjCContainer()) {
+ ObjCContainerDecl *OCD = dyn_cast<ObjCContainerDecl>(CurContext);
+ IDecl = cast<Decl>(OCD);
+ }
+ // Determine where we should start searching for methods.
+ ObjCContainerDecl *SearchDecl = nullptr;
+ bool IsInImplementation = false;
+ if (Decl *D = IDecl) {
+ if (ObjCImplementationDecl *Impl = dyn_cast<ObjCImplementationDecl>(D)) {
+ SearchDecl = Impl->getClassInterface();
+ IsInImplementation = true;
+ } else if (ObjCCategoryImplDecl *CatImpl
+ = dyn_cast<ObjCCategoryImplDecl>(D)) {
+ SearchDecl = CatImpl->getCategoryDecl();
+ IsInImplementation = true;
+ } else
+ SearchDecl = dyn_cast<ObjCContainerDecl>(D);
+ }
+
+ if (!SearchDecl && S) {
+ if (DeclContext *DC = S->getEntity())
+ SearchDecl = dyn_cast<ObjCContainerDecl>(DC);
+ }
+
+ if (!SearchDecl) {
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Other,
+ nullptr, 0);
+ return;
+ }
+
+ // Find all of the methods that we could declare/implement here.
+ KnownMethodsMap KnownMethods;
+ FindImplementableMethods(Context, SearchDecl, IsInstanceMethod,
+ ReturnType, KnownMethods);
+
+ // Add declarations or definitions for each of the known methods.
+ typedef CodeCompletionResult Result;
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+ Results.EnterNewScope();
+ PrintingPolicy Policy = getCompletionPrintingPolicy(*this);
+ for (KnownMethodsMap::iterator M = KnownMethods.begin(),
+ MEnd = KnownMethods.end();
+ M != MEnd; ++M) {
+ ObjCMethodDecl *Method = M->second.getPointer();
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+
+ // If the result type was not already provided, add it to the
+ // pattern as (type).
+ if (ReturnType.isNull()) {
+ QualType ResTy = Method->getSendResultType().stripObjCKindOfType(Context);
+ AttributedType::stripOuterNullability(ResTy);
+ AddObjCPassingTypeChunk(ResTy,
+ Method->getObjCDeclQualifier(), Context, Policy,
+ Builder);
+ }
+
+ Selector Sel = Method->getSelector();
+
+ // Add the first part of the selector to the pattern.
+ Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
+ Sel.getNameForSlot(0)));
+
+ // Add parameters to the pattern.
+ unsigned I = 0;
+ for (ObjCMethodDecl::param_iterator P = Method->param_begin(),
+ PEnd = Method->param_end();
+ P != PEnd; (void)++P, ++I) {
+ // Add the part of the selector name.
+ if (I == 0)
+ Builder.AddTypedTextChunk(":");
+ else if (I < Sel.getNumArgs()) {
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString(Sel.getNameForSlot(I) + ":"));
+ } else
+ break;
+
+ // Add the parameter type.
+ QualType ParamType;
+ if ((*P)->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability)
+ ParamType = (*P)->getType();
+ else
+ ParamType = (*P)->getOriginalType();
+ ParamType = ParamType.substObjCTypeArgs(Context, {},
+ ObjCSubstitutionContext::Parameter);
+ AttributedType::stripOuterNullability(ParamType);
+ AddObjCPassingTypeChunk(ParamType,
+ (*P)->getObjCDeclQualifier(),
+ Context, Policy,
+ Builder);
+
+ if (IdentifierInfo *Id = (*P)->getIdentifier())
+ Builder.AddTextChunk(Builder.getAllocator().CopyString( Id->getName()));
+ }
+
+ if (Method->isVariadic()) {
+ if (Method->param_size() > 0)
+ Builder.AddChunk(CodeCompletionString::CK_Comma);
+ Builder.AddTextChunk("...");
+ }
+
+ if (IsInImplementation && Results.includeCodePatterns()) {
+ // We will be defining the method here, so add a compound statement.
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ if (!Method->getReturnType()->isVoidType()) {
+ // If the result type is not void, add a return clause.
+ Builder.AddTextChunk("return");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_SemiColon);
+ } else
+ Builder.AddPlaceholderChunk("statements");
+
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ }
+
+ unsigned Priority = CCP_CodePattern;
+ if (!M->second.getInt())
+ Priority += CCD_InBaseClass;
+
+ Results.AddResult(Result(Builder.TakeString(), Method, Priority));
+ }
+
+ // Add Key-Value-Coding and Key-Value-Observing accessor methods for all of
+ // the properties in this class and its categories.
+ if (Context.getLangOpts().ObjC2) {
+ SmallVector<ObjCContainerDecl *, 4> Containers;
+ Containers.push_back(SearchDecl);
+
+ VisitedSelectorSet KnownSelectors;
+ for (KnownMethodsMap::iterator M = KnownMethods.begin(),
+ MEnd = KnownMethods.end();
+ M != MEnd; ++M)
+ KnownSelectors.insert(M->first);
+
+
+ ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(SearchDecl);
+ if (!IFace)
+ if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(SearchDecl))
+ IFace = Category->getClassInterface();
+
+ if (IFace)
+ for (auto *Cat : IFace->visible_categories())
+ Containers.push_back(Cat);
+
+ for (unsigned I = 0, N = Containers.size(); I != N; ++I)
+ for (auto *P : Containers[I]->properties())
+ AddObjCKeyValueCompletions(P, IsInstanceMethod, ReturnType, Context,
+ KnownSelectors, Results);
+ }
+
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Other,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteObjCMethodDeclSelector(Scope *S,
+ bool IsInstanceMethod,
+ bool AtParameterName,
+ ParsedType ReturnTy,
+ ArrayRef<IdentifierInfo *> SelIdents) {
+ // If we have an external source, load the entire class method
+ // pool from the AST file.
+ if (ExternalSource) {
+ for (uint32_t I = 0, N = ExternalSource->GetNumExternalSelectors();
+ I != N; ++I) {
+ Selector Sel = ExternalSource->GetExternalSelector(I);
+ if (Sel.isNull() || MethodPool.count(Sel))
+ continue;
+
+ ReadMethodPool(Sel);
+ }
+ }
+
+ // Build the set of methods we can see.
+ typedef CodeCompletionResult Result;
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+
+ if (ReturnTy)
+ Results.setPreferredType(GetTypeFromParser(ReturnTy).getNonReferenceType());
+
+ Results.EnterNewScope();
+ for (GlobalMethodPool::iterator M = MethodPool.begin(),
+ MEnd = MethodPool.end();
+ M != MEnd; ++M) {
+ for (ObjCMethodList *MethList = IsInstanceMethod ? &M->second.first :
+ &M->second.second;
+ MethList && MethList->getMethod();
+ MethList = MethList->getNext()) {
+ if (!isAcceptableObjCMethod(MethList->getMethod(), MK_Any, SelIdents))
+ continue;
+
+ if (AtParameterName) {
+ // Suggest parameter names we've seen before.
+ unsigned NumSelIdents = SelIdents.size();
+ if (NumSelIdents &&
+ NumSelIdents <= MethList->getMethod()->param_size()) {
+ ParmVarDecl *Param =
+ MethList->getMethod()->parameters()[NumSelIdents - 1];
+ if (Param->getIdentifier()) {
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
+ Param->getIdentifier()->getName()));
+ Results.AddResult(Builder.TakeString());
+ }
+ }
+
+ continue;
+ }
+
+ Result R(MethList->getMethod(),
+ Results.getBasePriority(MethList->getMethod()), nullptr);
+ R.StartParameter = SelIdents.size();
+ R.AllParametersAreInformative = false;
+ R.DeclaringEntity = true;
+ Results.MaybeAddResult(R, CurContext);
+ }
+ }
+
+ Results.ExitScope();
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Other,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompletePreprocessorDirective(bool InConditional) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_PreprocessorDirective);
+ Results.EnterNewScope();
+
+ // #if <condition>
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ Builder.AddTypedTextChunk("if");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("condition");
+ Results.AddResult(Builder.TakeString());
+
+ // #ifdef <macro>
+ Builder.AddTypedTextChunk("ifdef");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("macro");
+ Results.AddResult(Builder.TakeString());
+
+ // #ifndef <macro>
+ Builder.AddTypedTextChunk("ifndef");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("macro");
+ Results.AddResult(Builder.TakeString());
+
+ if (InConditional) {
+ // #elif <condition>
+ Builder.AddTypedTextChunk("elif");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("condition");
+ Results.AddResult(Builder.TakeString());
+
+ // #else
+ Builder.AddTypedTextChunk("else");
+ Results.AddResult(Builder.TakeString());
+
+ // #endif
+ Builder.AddTypedTextChunk("endif");
+ Results.AddResult(Builder.TakeString());
+ }
+
+ // #include "header"
+ Builder.AddTypedTextChunk("include");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("\"");
+ Builder.AddPlaceholderChunk("header");
+ Builder.AddTextChunk("\"");
+ Results.AddResult(Builder.TakeString());
+
+ // #include <header>
+ Builder.AddTypedTextChunk("include");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("<");
+ Builder.AddPlaceholderChunk("header");
+ Builder.AddTextChunk(">");
+ Results.AddResult(Builder.TakeString());
+
+ // #define <macro>
+ Builder.AddTypedTextChunk("define");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("macro");
+ Results.AddResult(Builder.TakeString());
+
+ // #define <macro>(<args>)
+ Builder.AddTypedTextChunk("define");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("macro");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("args");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Builder.TakeString());
+
+ // #undef <macro>
+ Builder.AddTypedTextChunk("undef");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("macro");
+ Results.AddResult(Builder.TakeString());
+
+ // #line <number>
+ Builder.AddTypedTextChunk("line");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("number");
+ Results.AddResult(Builder.TakeString());
+
+ // #line <number> "filename"
+ Builder.AddTypedTextChunk("line");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("number");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("\"");
+ Builder.AddPlaceholderChunk("filename");
+ Builder.AddTextChunk("\"");
+ Results.AddResult(Builder.TakeString());
+
+ // #error <message>
+ Builder.AddTypedTextChunk("error");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("message");
+ Results.AddResult(Builder.TakeString());
+
+ // #pragma <arguments>
+ Builder.AddTypedTextChunk("pragma");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("arguments");
+ Results.AddResult(Builder.TakeString());
+
+ if (getLangOpts().ObjC1) {
+ // #import "header"
+ Builder.AddTypedTextChunk("import");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("\"");
+ Builder.AddPlaceholderChunk("header");
+ Builder.AddTextChunk("\"");
+ Results.AddResult(Builder.TakeString());
+
+ // #import <header>
+ Builder.AddTypedTextChunk("import");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("<");
+ Builder.AddPlaceholderChunk("header");
+ Builder.AddTextChunk(">");
+ Results.AddResult(Builder.TakeString());
+ }
+
+ // #include_next "header"
+ Builder.AddTypedTextChunk("include_next");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("\"");
+ Builder.AddPlaceholderChunk("header");
+ Builder.AddTextChunk("\"");
+ Results.AddResult(Builder.TakeString());
+
+ // #include_next <header>
+ Builder.AddTypedTextChunk("include_next");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("<");
+ Builder.AddPlaceholderChunk("header");
+ Builder.AddTextChunk(">");
+ Results.AddResult(Builder.TakeString());
+
+ // #warning <message>
+ Builder.AddTypedTextChunk("warning");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("message");
+ Results.AddResult(Builder.TakeString());
+
+ // Note: #ident and #sccs are such crazy anachronisms that we don't provide
+ // completions for them. And __include_macros is a Clang-internal extension
+ // that we don't want to encourage anyone to use.
+
+ // FIXME: we don't support #assert or #unassert, so don't suggest them.
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_PreprocessorDirective,
+ Results.data(), Results.size());
+}
+
+void Sema::CodeCompleteInPreprocessorConditionalExclusion(Scope *S) {
+ CodeCompleteOrdinaryName(S,
+ S->getFnParent()? Sema::PCC_RecoveryInFunction
+ : Sema::PCC_Namespace);
+}
+
+void Sema::CodeCompletePreprocessorMacroName(bool IsDefinition) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ IsDefinition? CodeCompletionContext::CCC_MacroName
+ : CodeCompletionContext::CCC_MacroNameUse);
+ if (!IsDefinition && (!CodeCompleter || CodeCompleter->includeMacros())) {
+ // Add just the names of macros, not their arguments.
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ Results.EnterNewScope();
+ for (Preprocessor::macro_iterator M = PP.macro_begin(),
+ MEnd = PP.macro_end();
+ M != MEnd; ++M) {
+ Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
+ M->first->getName()));
+ Results.AddResult(CodeCompletionResult(Builder.TakeString(),
+ CCP_CodePattern,
+ CXCursor_MacroDefinition));
+ }
+ Results.ExitScope();
+ } else if (IsDefinition) {
+ // FIXME: Can we detect when the user just wrote an include guard above?
+ }
+
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
+}
+
+void Sema::CodeCompletePreprocessorExpression() {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_PreprocessorExpression);
+
+ if (!CodeCompleter || CodeCompleter->includeMacros())
+ AddMacroResults(PP, Results, true);
+
+ // defined (<macro>)
+ Results.EnterNewScope();
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ Builder.AddTypedTextChunk("defined");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("macro");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Builder.TakeString());
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_PreprocessorExpression,
+ Results.data(), Results.size());
+}
+
+void Sema::CodeCompletePreprocessorMacroArgument(Scope *S,
+ IdentifierInfo *Macro,
+ MacroInfo *MacroInfo,
+ unsigned Argument) {
+ // FIXME: In the future, we could provide "overload" results, much like we
+ // do for function calls.
+
+ // Now just ignore this. There will be another code-completion callback
+ // for the expanded tokens.
+}
+
+void Sema::CodeCompleteNaturalLanguage() {
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_NaturalLanguage,
+ nullptr, 0);
+}
+
+void Sema::GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo,
+ SmallVectorImpl<CodeCompletionResult> &Results) {
+ ResultBuilder Builder(*this, Allocator, CCTUInfo,
+ CodeCompletionContext::CCC_Recovery);
+ if (!CodeCompleter || CodeCompleter->includeGlobals()) {
+ CodeCompletionDeclConsumer Consumer(Builder,
+ Context.getTranslationUnitDecl());
+ LookupVisibleDecls(Context.getTranslationUnitDecl(), LookupAnyName,
+ Consumer);
+ }
+
+ if (!CodeCompleter || CodeCompleter->includeMacros())
+ AddMacroResults(PP, Builder, true);
+
+ Results.clear();
+ Results.insert(Results.end(),
+ Builder.data(), Builder.data() + Builder.size());
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaConsumer.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaConsumer.cpp
new file mode 100644
index 0000000..d83a13e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaConsumer.cpp
@@ -0,0 +1,14 @@
+//===-- SemaConsumer.cpp - Abstract interface for AST semantics -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaConsumer.h"
+
+using namespace clang;
+
+void SemaConsumer::anchor() { }
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCoroutine.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCoroutine.cpp
new file mode 100644
index 0000000..4b4fd6b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaCoroutine.cpp
@@ -0,0 +1,448 @@
+//===--- SemaCoroutines.cpp - Semantic Analysis for Coroutines ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for C++ Coroutines.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Overload.h"
+using namespace clang;
+using namespace sema;
+
+/// Look up the std::coroutine_traits<...>::promise_type for the given
+/// function type.
+static QualType lookupPromiseType(Sema &S, const FunctionProtoType *FnType,
+ SourceLocation Loc) {
+ // FIXME: Cache std::coroutine_traits once we've found it.
+ NamespaceDecl *Std = S.getStdNamespace();
+ if (!Std) {
+ S.Diag(Loc, diag::err_implied_std_coroutine_traits_not_found);
+ return QualType();
+ }
+
+ LookupResult Result(S, &S.PP.getIdentifierTable().get("coroutine_traits"),
+ Loc, Sema::LookupOrdinaryName);
+ if (!S.LookupQualifiedName(Result, Std)) {
+ S.Diag(Loc, diag::err_implied_std_coroutine_traits_not_found);
+ return QualType();
+ }
+
+ ClassTemplateDecl *CoroTraits = Result.getAsSingle<ClassTemplateDecl>();
+ if (!CoroTraits) {
+ Result.suppressDiagnostics();
+ // We found something weird. Complain about the first thing we found.
+ NamedDecl *Found = *Result.begin();
+ S.Diag(Found->getLocation(), diag::err_malformed_std_coroutine_traits);
+ return QualType();
+ }
+
+ // Form template argument list for coroutine_traits<R, P1, P2, ...>.
+ TemplateArgumentListInfo Args(Loc, Loc);
+ Args.addArgument(TemplateArgumentLoc(
+ TemplateArgument(FnType->getReturnType()),
+ S.Context.getTrivialTypeSourceInfo(FnType->getReturnType(), Loc)));
+ // FIXME: If the function is a non-static member function, add the type
+ // of the implicit object parameter before the formal parameters.
+ for (QualType T : FnType->getParamTypes())
+ Args.addArgument(TemplateArgumentLoc(
+ TemplateArgument(T), S.Context.getTrivialTypeSourceInfo(T, Loc)));
+
+ // Build the template-id.
+ QualType CoroTrait =
+ S.CheckTemplateIdType(TemplateName(CoroTraits), Loc, Args);
+ if (CoroTrait.isNull())
+ return QualType();
+ if (S.RequireCompleteType(Loc, CoroTrait,
+ diag::err_coroutine_traits_missing_specialization))
+ return QualType();
+
+ CXXRecordDecl *RD = CoroTrait->getAsCXXRecordDecl();
+ assert(RD && "specialization of class template is not a class?");
+
+ // Look up the ::promise_type member.
+ LookupResult R(S, &S.PP.getIdentifierTable().get("promise_type"), Loc,
+ Sema::LookupOrdinaryName);
+ S.LookupQualifiedName(R, RD);
+ auto *Promise = R.getAsSingle<TypeDecl>();
+ if (!Promise) {
+ S.Diag(Loc, diag::err_implied_std_coroutine_traits_promise_type_not_found)
+ << RD;
+ return QualType();
+ }
+
+ // The promise type is required to be a class type.
+ QualType PromiseType = S.Context.getTypeDeclType(Promise);
+ if (!PromiseType->getAsCXXRecordDecl()) {
+ // Use the fully-qualified name of the type.
+ auto *NNS = NestedNameSpecifier::Create(S.Context, nullptr, Std);
+ NNS = NestedNameSpecifier::Create(S.Context, NNS, false,
+ CoroTrait.getTypePtr());
+ PromiseType = S.Context.getElaboratedType(ETK_None, NNS, PromiseType);
+
+ S.Diag(Loc, diag::err_implied_std_coroutine_traits_promise_type_not_class)
+ << PromiseType;
+ return QualType();
+ }
+
+ return PromiseType;
+}
+
+/// Check that this is a context in which a coroutine suspension can appear.
+static FunctionScopeInfo *
+checkCoroutineContext(Sema &S, SourceLocation Loc, StringRef Keyword) {
+ // 'co_await' and 'co_yield' are not permitted in unevaluated operands.
+ if (S.isUnevaluatedContext()) {
+ S.Diag(Loc, diag::err_coroutine_unevaluated_context) << Keyword;
+ return nullptr;
+ }
+
+ // Any other usage must be within a function.
+ // FIXME: Reject a coroutine with a deduced return type.
+ auto *FD = dyn_cast<FunctionDecl>(S.CurContext);
+ if (!FD) {
+ S.Diag(Loc, isa<ObjCMethodDecl>(S.CurContext)
+ ? diag::err_coroutine_objc_method
+ : diag::err_coroutine_outside_function) << Keyword;
+ } else if (isa<CXXConstructorDecl>(FD) || isa<CXXDestructorDecl>(FD)) {
+ // Coroutines TS [special]/6:
+ // A special member function shall not be a coroutine.
+ //
+ // FIXME: We assume that this really means that a coroutine cannot
+ // be a constructor or destructor.
+ S.Diag(Loc, diag::err_coroutine_ctor_dtor)
+ << isa<CXXDestructorDecl>(FD) << Keyword;
+ } else if (FD->isConstexpr()) {
+ S.Diag(Loc, diag::err_coroutine_constexpr) << Keyword;
+ } else if (FD->isVariadic()) {
+ S.Diag(Loc, diag::err_coroutine_varargs) << Keyword;
+ } else {
+ auto *ScopeInfo = S.getCurFunction();
+ assert(ScopeInfo && "missing function scope for function");
+
+ // If we don't have a promise variable, build one now.
+ if (!ScopeInfo->CoroutinePromise) {
+ QualType T =
+ FD->getType()->isDependentType()
+ ? S.Context.DependentTy
+ : lookupPromiseType(S, FD->getType()->castAs<FunctionProtoType>(),
+ Loc);
+ if (T.isNull())
+ return nullptr;
+
+ // Create and default-initialize the promise.
+ ScopeInfo->CoroutinePromise =
+ VarDecl::Create(S.Context, FD, FD->getLocation(), FD->getLocation(),
+ &S.PP.getIdentifierTable().get("__promise"), T,
+ S.Context.getTrivialTypeSourceInfo(T, Loc), SC_None);
+ S.CheckVariableDeclarationType(ScopeInfo->CoroutinePromise);
+ if (!ScopeInfo->CoroutinePromise->isInvalidDecl())
+ S.ActOnUninitializedDecl(ScopeInfo->CoroutinePromise, false);
+ }
+
+ return ScopeInfo;
+ }
+
+ return nullptr;
+}
+
+/// Build a call to 'operator co_await' if there is a suitable operator for
+/// the given expression.
+static ExprResult buildOperatorCoawaitCall(Sema &SemaRef, Scope *S,
+ SourceLocation Loc, Expr *E) {
+ UnresolvedSet<16> Functions;
+ SemaRef.LookupOverloadedOperatorName(OO_Coawait, S, E->getType(), QualType(),
+ Functions);
+ return SemaRef.CreateOverloadedUnaryOp(Loc, UO_Coawait, Functions, E);
+}
+
+struct ReadySuspendResumeResult {
+ bool IsInvalid;
+ Expr *Results[3];
+};
+
+static ExprResult buildMemberCall(Sema &S, Expr *Base, SourceLocation Loc,
+ StringRef Name,
+ MutableArrayRef<Expr *> Args) {
+ DeclarationNameInfo NameInfo(&S.PP.getIdentifierTable().get(Name), Loc);
+
+ // FIXME: Fix BuildMemberReferenceExpr to take a const CXXScopeSpec&.
+ CXXScopeSpec SS;
+ ExprResult Result = S.BuildMemberReferenceExpr(
+ Base, Base->getType(), Loc, /*IsPtr=*/false, SS,
+ SourceLocation(), nullptr, NameInfo, /*TemplateArgs=*/nullptr,
+ /*Scope=*/nullptr);
+ if (Result.isInvalid())
+ return ExprError();
+
+ return S.ActOnCallExpr(nullptr, Result.get(), Loc, Args, Loc, nullptr);
+}
+
+/// Build calls to await_ready, await_suspend, and await_resume for a co_await
+/// expression.
+static ReadySuspendResumeResult buildCoawaitCalls(Sema &S, SourceLocation Loc,
+ Expr *E) {
+ // Assume invalid until we see otherwise.
+ ReadySuspendResumeResult Calls = {true, {}};
+
+ const StringRef Funcs[] = {"await_ready", "await_suspend", "await_resume"};
+ for (size_t I = 0, N = llvm::array_lengthof(Funcs); I != N; ++I) {
+ Expr *Operand = new (S.Context) OpaqueValueExpr(
+ Loc, E->getType(), VK_LValue, E->getObjectKind(), E);
+
+ // FIXME: Pass coroutine handle to await_suspend.
+ ExprResult Result = buildMemberCall(S, Operand, Loc, Funcs[I], None);
+ if (Result.isInvalid())
+ return Calls;
+ Calls.Results[I] = Result.get();
+ }
+
+ Calls.IsInvalid = false;
+ return Calls;
+}
+
+ExprResult Sema::ActOnCoawaitExpr(Scope *S, SourceLocation Loc, Expr *E) {
+ if (E->getType()->isPlaceholderType()) {
+ ExprResult R = CheckPlaceholderExpr(E);
+ if (R.isInvalid()) return ExprError();
+ E = R.get();
+ }
+
+ ExprResult Awaitable = buildOperatorCoawaitCall(*this, S, Loc, E);
+ if (Awaitable.isInvalid())
+ return ExprError();
+ return BuildCoawaitExpr(Loc, Awaitable.get());
+}
+ExprResult Sema::BuildCoawaitExpr(SourceLocation Loc, Expr *E) {
+ auto *Coroutine = checkCoroutineContext(*this, Loc, "co_await");
+ if (!Coroutine)
+ return ExprError();
+
+ if (E->getType()->isPlaceholderType()) {
+ ExprResult R = CheckPlaceholderExpr(E);
+ if (R.isInvalid()) return ExprError();
+ E = R.get();
+ }
+
+ if (E->getType()->isDependentType()) {
+ Expr *Res = new (Context) CoawaitExpr(Loc, Context.DependentTy, E);
+ Coroutine->CoroutineStmts.push_back(Res);
+ return Res;
+ }
+
+ // If the expression is a temporary, materialize it as an lvalue so that we
+ // can use it multiple times.
+ if (E->getValueKind() == VK_RValue)
+ E = new (Context) MaterializeTemporaryExpr(E->getType(), E, true);
+
+ // Build the await_ready, await_suspend, await_resume calls.
+ ReadySuspendResumeResult RSS = buildCoawaitCalls(*this, Loc, E);
+ if (RSS.IsInvalid)
+ return ExprError();
+
+ Expr *Res = new (Context) CoawaitExpr(Loc, E, RSS.Results[0], RSS.Results[1],
+ RSS.Results[2]);
+ Coroutine->CoroutineStmts.push_back(Res);
+ return Res;
+}
+
+static ExprResult buildPromiseCall(Sema &S, FunctionScopeInfo *Coroutine,
+ SourceLocation Loc, StringRef Name,
+ MutableArrayRef<Expr *> Args) {
+ assert(Coroutine->CoroutinePromise && "no promise for coroutine");
+
+ // Form a reference to the promise.
+ auto *Promise = Coroutine->CoroutinePromise;
+ ExprResult PromiseRef = S.BuildDeclRefExpr(
+ Promise, Promise->getType().getNonReferenceType(), VK_LValue, Loc);
+ if (PromiseRef.isInvalid())
+ return ExprError();
+
+ // Call 'yield_value', passing in E.
+ return buildMemberCall(S, PromiseRef.get(), Loc, Name, Args);
+}
+
+ExprResult Sema::ActOnCoyieldExpr(Scope *S, SourceLocation Loc, Expr *E) {
+ auto *Coroutine = checkCoroutineContext(*this, Loc, "co_yield");
+ if (!Coroutine)
+ return ExprError();
+
+ // Build yield_value call.
+ ExprResult Awaitable =
+ buildPromiseCall(*this, Coroutine, Loc, "yield_value", E);
+ if (Awaitable.isInvalid())
+ return ExprError();
+
+ // Build 'operator co_await' call.
+ Awaitable = buildOperatorCoawaitCall(*this, S, Loc, Awaitable.get());
+ if (Awaitable.isInvalid())
+ return ExprError();
+
+ return BuildCoyieldExpr(Loc, Awaitable.get());
+}
+ExprResult Sema::BuildCoyieldExpr(SourceLocation Loc, Expr *E) {
+ auto *Coroutine = checkCoroutineContext(*this, Loc, "co_yield");
+ if (!Coroutine)
+ return ExprError();
+
+ if (E->getType()->isPlaceholderType()) {
+ ExprResult R = CheckPlaceholderExpr(E);
+ if (R.isInvalid()) return ExprError();
+ E = R.get();
+ }
+
+ if (E->getType()->isDependentType()) {
+ Expr *Res = new (Context) CoyieldExpr(Loc, Context.DependentTy, E);
+ Coroutine->CoroutineStmts.push_back(Res);
+ return Res;
+ }
+
+ // If the expression is a temporary, materialize it as an lvalue so that we
+ // can use it multiple times.
+ if (E->getValueKind() == VK_RValue)
+ E = new (Context) MaterializeTemporaryExpr(E->getType(), E, true);
+
+ // Build the await_ready, await_suspend, await_resume calls.
+ ReadySuspendResumeResult RSS = buildCoawaitCalls(*this, Loc, E);
+ if (RSS.IsInvalid)
+ return ExprError();
+
+ Expr *Res = new (Context) CoyieldExpr(Loc, E, RSS.Results[0], RSS.Results[1],
+ RSS.Results[2]);
+ Coroutine->CoroutineStmts.push_back(Res);
+ return Res;
+}
+
+StmtResult Sema::ActOnCoreturnStmt(SourceLocation Loc, Expr *E) {
+ return BuildCoreturnStmt(Loc, E);
+}
+StmtResult Sema::BuildCoreturnStmt(SourceLocation Loc, Expr *E) {
+ auto *Coroutine = checkCoroutineContext(*this, Loc, "co_return");
+ if (!Coroutine)
+ return StmtError();
+
+ if (E && E->getType()->isPlaceholderType() &&
+ !E->getType()->isSpecificPlaceholderType(BuiltinType::Overload)) {
+ ExprResult R = CheckPlaceholderExpr(E);
+ if (R.isInvalid()) return StmtError();
+ E = R.get();
+ }
+
+ // FIXME: If the operand is a reference to a variable that's about to go out
+ // of scope, we should treat the operand as an xvalue for this overload
+ // resolution.
+ ExprResult PC;
+ if (E && !E->getType()->isVoidType()) {
+ PC = buildPromiseCall(*this, Coroutine, Loc, "return_value", E);
+ } else {
+ E = MakeFullDiscardedValueExpr(E).get();
+ PC = buildPromiseCall(*this, Coroutine, Loc, "return_void", None);
+ }
+ if (PC.isInvalid())
+ return StmtError();
+
+ Expr *PCE = ActOnFinishFullExpr(PC.get()).get();
+
+ Stmt *Res = new (Context) CoreturnStmt(Loc, E, PCE);
+ Coroutine->CoroutineStmts.push_back(Res);
+ return Res;
+}
+
+void Sema::CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body) {
+ FunctionScopeInfo *Fn = getCurFunction();
+ assert(Fn && !Fn->CoroutineStmts.empty() && "not a coroutine");
+
+ // Coroutines [stmt.return]p1:
+ // A return statement shall not appear in a coroutine.
+ if (Fn->FirstReturnLoc.isValid()) {
+ Diag(Fn->FirstReturnLoc, diag::err_return_in_coroutine);
+ auto *First = Fn->CoroutineStmts[0];
+ Diag(First->getLocStart(), diag::note_declared_coroutine_here)
+ << (isa<CoawaitExpr>(First) ? 0 :
+ isa<CoyieldExpr>(First) ? 1 : 2);
+ }
+
+ bool AnyCoawaits = false;
+ bool AnyCoyields = false;
+ for (auto *CoroutineStmt : Fn->CoroutineStmts) {
+ AnyCoawaits |= isa<CoawaitExpr>(CoroutineStmt);
+ AnyCoyields |= isa<CoyieldExpr>(CoroutineStmt);
+ }
+
+ if (!AnyCoawaits && !AnyCoyields)
+ Diag(Fn->CoroutineStmts.front()->getLocStart(),
+ diag::ext_coroutine_without_co_await_co_yield);
+
+ SourceLocation Loc = FD->getLocation();
+
+ // Form a declaration statement for the promise declaration, so that AST
+ // visitors can more easily find it.
+ StmtResult PromiseStmt =
+ ActOnDeclStmt(ConvertDeclToDeclGroup(Fn->CoroutinePromise), Loc, Loc);
+ if (PromiseStmt.isInvalid())
+ return FD->setInvalidDecl();
+
+ // Form and check implicit 'co_await p.initial_suspend();' statement.
+ ExprResult InitialSuspend =
+ buildPromiseCall(*this, Fn, Loc, "initial_suspend", None);
+ // FIXME: Support operator co_await here.
+ if (!InitialSuspend.isInvalid())
+ InitialSuspend = BuildCoawaitExpr(Loc, InitialSuspend.get());
+ InitialSuspend = ActOnFinishFullExpr(InitialSuspend.get());
+ if (InitialSuspend.isInvalid())
+ return FD->setInvalidDecl();
+
+ // Form and check implicit 'co_await p.final_suspend();' statement.
+ ExprResult FinalSuspend =
+ buildPromiseCall(*this, Fn, Loc, "final_suspend", None);
+ // FIXME: Support operator co_await here.
+ if (!FinalSuspend.isInvalid())
+ FinalSuspend = BuildCoawaitExpr(Loc, FinalSuspend.get());
+ FinalSuspend = ActOnFinishFullExpr(FinalSuspend.get());
+ if (FinalSuspend.isInvalid())
+ return FD->setInvalidDecl();
+
+ // FIXME: Perform analysis of set_exception call.
+
+ // FIXME: Try to form 'p.return_void();' expression statement to handle
+ // control flowing off the end of the coroutine.
+
+ // Build implicit 'p.get_return_object()' expression and form initialization
+ // of return type from it.
+ ExprResult ReturnObject =
+ buildPromiseCall(*this, Fn, Loc, "get_return_object", None);
+ if (ReturnObject.isInvalid())
+ return FD->setInvalidDecl();
+ QualType RetType = FD->getReturnType();
+ if (!RetType->isDependentType()) {
+ InitializedEntity Entity =
+ InitializedEntity::InitializeResult(Loc, RetType, false);
+ ReturnObject = PerformMoveOrCopyInitialization(Entity, nullptr, RetType,
+ ReturnObject.get());
+ if (ReturnObject.isInvalid())
+ return FD->setInvalidDecl();
+ }
+ ReturnObject = ActOnFinishFullExpr(ReturnObject.get(), Loc);
+ if (ReturnObject.isInvalid())
+ return FD->setInvalidDecl();
+
+ // FIXME: Perform move-initialization of parameters into frame-local copies.
+ SmallVector<Expr*, 16> ParamMoves;
+
+ // Build body for the coroutine wrapper statement.
+ Body = new (Context) CoroutineBodyStmt(
+ Body, PromiseStmt.get(), InitialSuspend.get(), FinalSuspend.get(),
+ /*SetException*/nullptr, /*Fallthrough*/nullptr,
+ ReturnObject.get(), ParamMoves);
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp
new file mode 100644
index 0000000..f27fb2b1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp
@@ -0,0 +1,14810 @@
+//===--- SemaDecl.cpp - Semantic Analysis for Declarations ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for declarations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "TypeLocBuilder.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTLambda.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/CommentDiagnostic.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/HeaderSearch.h" // TODO: Sema shouldn't depend on Lex
+#include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering.
+#include "clang/Lex/ModuleLoader.h" // TODO: Sema shouldn't depend on Lex
+#include "clang/Lex/Preprocessor.h" // Included for isCodeCompletionEnabled()
+#include "clang/Sema/CXXFieldCollector.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/DelayedDiagnostic.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/Template.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Triple.h"
+#include <algorithm>
+#include <cstring>
+#include <functional>
+using namespace clang;
+using namespace sema;
+
+Sema::DeclGroupPtrTy Sema::ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType) {
+ if (OwnedType) {
+ Decl *Group[2] = { OwnedType, Ptr };
+ return DeclGroupPtrTy::make(DeclGroupRef::Create(Context, Group, 2));
+ }
+
+ return DeclGroupPtrTy::make(DeclGroupRef(Ptr));
+}
+
+namespace {
+
+class TypeNameValidatorCCC : public CorrectionCandidateCallback {
+ public:
+ TypeNameValidatorCCC(bool AllowInvalid, bool WantClass=false,
+ bool AllowTemplates=false)
+ : AllowInvalidDecl(AllowInvalid), WantClassName(WantClass),
+ AllowClassTemplates(AllowTemplates) {
+ WantExpressionKeywords = false;
+ WantCXXNamedCasts = false;
+ WantRemainingKeywords = false;
+ }
+
+ bool ValidateCandidate(const TypoCorrection &candidate) override {
+ if (NamedDecl *ND = candidate.getCorrectionDecl()) {
+ bool IsType = isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND);
+ bool AllowedTemplate = AllowClassTemplates && isa<ClassTemplateDecl>(ND);
+ return (IsType || AllowedTemplate) &&
+ (AllowInvalidDecl || !ND->isInvalidDecl());
+ }
+ return !WantClassName && candidate.isKeyword();
+ }
+
+ private:
+ bool AllowInvalidDecl;
+ bool WantClassName;
+ bool AllowClassTemplates;
+};
+
+}
+
+/// \brief Determine whether the token kind starts a simple-type-specifier.
+bool Sema::isSimpleTypeSpecifier(tok::TokenKind Kind) const {
+ switch (Kind) {
+ // FIXME: Take into account the current language when deciding whether a
+ // token kind is a valid type specifier
+ case tok::kw_short:
+ case tok::kw_long:
+ case tok::kw___int64:
+ case tok::kw___int128:
+ case tok::kw_signed:
+ case tok::kw_unsigned:
+ case tok::kw_void:
+ case tok::kw_char:
+ case tok::kw_int:
+ case tok::kw_half:
+ case tok::kw_float:
+ case tok::kw_double:
+ case tok::kw_wchar_t:
+ case tok::kw_bool:
+ case tok::kw___underlying_type:
+ case tok::kw___auto_type:
+ return true;
+
+ case tok::annot_typename:
+ case tok::kw_char16_t:
+ case tok::kw_char32_t:
+ case tok::kw_typeof:
+ case tok::annot_decltype:
+ case tok::kw_decltype:
+ return getLangOpts().CPlusPlus;
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+namespace {
+enum class UnqualifiedTypeNameLookupResult {
+ NotFound,
+ FoundNonType,
+ FoundType
+};
+} // namespace
+
+/// \brief Tries to perform unqualified lookup of the type decls in bases for
+/// dependent class.
+/// \return \a NotFound if no any decls is found, \a FoundNotType if found not a
+/// type decl, \a FoundType if only type decls are found.
+static UnqualifiedTypeNameLookupResult
+lookupUnqualifiedTypeNameInBase(Sema &S, const IdentifierInfo &II,
+ SourceLocation NameLoc,
+ const CXXRecordDecl *RD) {
+ if (!RD->hasDefinition())
+ return UnqualifiedTypeNameLookupResult::NotFound;
+ // Look for type decls in base classes.
+ UnqualifiedTypeNameLookupResult FoundTypeDecl =
+ UnqualifiedTypeNameLookupResult::NotFound;
+ for (const auto &Base : RD->bases()) {
+ const CXXRecordDecl *BaseRD = nullptr;
+ if (auto *BaseTT = Base.getType()->getAs<TagType>())
+ BaseRD = BaseTT->getAsCXXRecordDecl();
+ else if (auto *TST = Base.getType()->getAs<TemplateSpecializationType>()) {
+ // Look for type decls in dependent base classes that have known primary
+ // templates.
+ if (!TST || !TST->isDependentType())
+ continue;
+ auto *TD = TST->getTemplateName().getAsTemplateDecl();
+ if (!TD)
+ continue;
+ auto *BasePrimaryTemplate =
+ dyn_cast_or_null<CXXRecordDecl>(TD->getTemplatedDecl());
+ if (!BasePrimaryTemplate)
+ continue;
+ BaseRD = BasePrimaryTemplate;
+ }
+ if (BaseRD) {
+ for (NamedDecl *ND : BaseRD->lookup(&II)) {
+ if (!isa<TypeDecl>(ND))
+ return UnqualifiedTypeNameLookupResult::FoundNonType;
+ FoundTypeDecl = UnqualifiedTypeNameLookupResult::FoundType;
+ }
+ if (FoundTypeDecl == UnqualifiedTypeNameLookupResult::NotFound) {
+ switch (lookupUnqualifiedTypeNameInBase(S, II, NameLoc, BaseRD)) {
+ case UnqualifiedTypeNameLookupResult::FoundNonType:
+ return UnqualifiedTypeNameLookupResult::FoundNonType;
+ case UnqualifiedTypeNameLookupResult::FoundType:
+ FoundTypeDecl = UnqualifiedTypeNameLookupResult::FoundType;
+ break;
+ case UnqualifiedTypeNameLookupResult::NotFound:
+ break;
+ }
+ }
+ }
+ }
+
+ return FoundTypeDecl;
+}
+
+static ParsedType recoverFromTypeInKnownDependentBase(Sema &S,
+ const IdentifierInfo &II,
+ SourceLocation NameLoc) {
+ // Lookup in the parent class template context, if any.
+ const CXXRecordDecl *RD = nullptr;
+ UnqualifiedTypeNameLookupResult FoundTypeDecl =
+ UnqualifiedTypeNameLookupResult::NotFound;
+ for (DeclContext *DC = S.CurContext;
+ DC && FoundTypeDecl == UnqualifiedTypeNameLookupResult::NotFound;
+ DC = DC->getParent()) {
+ // Look for type decls in dependent base classes that have known primary
+ // templates.
+ RD = dyn_cast<CXXRecordDecl>(DC);
+ if (RD && RD->getDescribedClassTemplate())
+ FoundTypeDecl = lookupUnqualifiedTypeNameInBase(S, II, NameLoc, RD);
+ }
+ if (FoundTypeDecl != UnqualifiedTypeNameLookupResult::FoundType)
+ return ParsedType();
+
+ // We found some types in dependent base classes. Recover as if the user
+ // wrote 'typename MyClass::II' instead of 'II'. We'll fully resolve the
+ // lookup during template instantiation.
+ S.Diag(NameLoc, diag::ext_found_via_dependent_bases_lookup) << &II;
+
+ ASTContext &Context = S.Context;
+ auto *NNS = NestedNameSpecifier::Create(Context, nullptr, false,
+ cast<Type>(Context.getRecordType(RD)));
+ QualType T = Context.getDependentNameType(ETK_Typename, NNS, &II);
+
+ CXXScopeSpec SS;
+ SS.MakeTrivial(Context, NNS, SourceRange(NameLoc));
+
+ TypeLocBuilder Builder;
+ DependentNameTypeLoc DepTL = Builder.push<DependentNameTypeLoc>(T);
+ DepTL.setNameLoc(NameLoc);
+ DepTL.setElaboratedKeywordLoc(SourceLocation());
+ DepTL.setQualifierLoc(SS.getWithLocInContext(Context));
+ return S.CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
+}
+
+/// \brief If the identifier refers to a type name within this scope,
+/// return the declaration of that type.
+///
+/// This routine performs ordinary name lookup of the identifier II
+/// within the given scope, with optional C++ scope specifier SS, to
+/// determine whether the name refers to a type. If so, returns an
+/// opaque pointer (actually a QualType) corresponding to that
+/// type. Otherwise, returns NULL.
+ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
+ Scope *S, CXXScopeSpec *SS,
+ bool isClassName, bool HasTrailingDot,
+ ParsedType ObjectTypePtr,
+ bool IsCtorOrDtorName,
+ bool WantNontrivialTypeSourceInfo,
+ IdentifierInfo **CorrectedII) {
+ // Determine where we will perform name lookup.
+ DeclContext *LookupCtx = nullptr;
+ if (ObjectTypePtr) {
+ QualType ObjectType = ObjectTypePtr.get();
+ if (ObjectType->isRecordType())
+ LookupCtx = computeDeclContext(ObjectType);
+ } else if (SS && SS->isNotEmpty()) {
+ LookupCtx = computeDeclContext(*SS, false);
+
+ if (!LookupCtx) {
+ if (isDependentScopeSpecifier(*SS)) {
+ // C++ [temp.res]p3:
+ // A qualified-id that refers to a type and in which the
+ // nested-name-specifier depends on a template-parameter (14.6.2)
+ // shall be prefixed by the keyword typename to indicate that the
+ // qualified-id denotes a type, forming an
+ // elaborated-type-specifier (7.1.5.3).
+ //
+ // We therefore do not perform any name lookup if the result would
+ // refer to a member of an unknown specialization.
+ if (!isClassName && !IsCtorOrDtorName)
+ return ParsedType();
+
+ // We know from the grammar that this name refers to a type,
+ // so build a dependent node to describe the type.
+ if (WantNontrivialTypeSourceInfo)
+ return ActOnTypenameType(S, SourceLocation(), *SS, II, NameLoc).get();
+
+ NestedNameSpecifierLoc QualifierLoc = SS->getWithLocInContext(Context);
+ QualType T = CheckTypenameType(ETK_None, SourceLocation(), QualifierLoc,
+ II, NameLoc);
+ return ParsedType::make(T);
+ }
+
+ return ParsedType();
+ }
+
+ if (!LookupCtx->isDependentContext() &&
+ RequireCompleteDeclContext(*SS, LookupCtx))
+ return ParsedType();
+ }
+
+ // FIXME: LookupNestedNameSpecifierName isn't the right kind of
+ // lookup for class-names.
+ LookupNameKind Kind = isClassName ? LookupNestedNameSpecifierName :
+ LookupOrdinaryName;
+ LookupResult Result(*this, &II, NameLoc, Kind);
+ if (LookupCtx) {
+ // Perform "qualified" name lookup into the declaration context we
+ // computed, which is either the type of the base of a member access
+ // expression or the declaration context associated with a prior
+ // nested-name-specifier.
+ LookupQualifiedName(Result, LookupCtx);
+
+ if (ObjectTypePtr && Result.empty()) {
+ // C++ [basic.lookup.classref]p3:
+ // If the unqualified-id is ~type-name, the type-name is looked up
+ // in the context of the entire postfix-expression. If the type T of
+ // the object expression is of a class type C, the type-name is also
+ // looked up in the scope of class C. At least one of the lookups shall
+ // find a name that refers to (possibly cv-qualified) T.
+ LookupName(Result, S);
+ }
+ } else {
+ // Perform unqualified name lookup.
+ LookupName(Result, S);
+
+ // For unqualified lookup in a class template in MSVC mode, look into
+ // dependent base classes where the primary class template is known.
+ if (Result.empty() && getLangOpts().MSVCCompat && (!SS || SS->isEmpty())) {
+ if (ParsedType TypeInBase =
+ recoverFromTypeInKnownDependentBase(*this, II, NameLoc))
+ return TypeInBase;
+ }
+ }
+
+ NamedDecl *IIDecl = nullptr;
+ switch (Result.getResultKind()) {
+ case LookupResult::NotFound:
+ case LookupResult::NotFoundInCurrentInstantiation:
+ if (CorrectedII) {
+ TypoCorrection Correction = CorrectTypo(
+ Result.getLookupNameInfo(), Kind, S, SS,
+ llvm::make_unique<TypeNameValidatorCCC>(true, isClassName),
+ CTK_ErrorRecovery);
+ IdentifierInfo *NewII = Correction.getCorrectionAsIdentifierInfo();
+ TemplateTy Template;
+ bool MemberOfUnknownSpecialization;
+ UnqualifiedId TemplateName;
+ TemplateName.setIdentifier(NewII, NameLoc);
+ NestedNameSpecifier *NNS = Correction.getCorrectionSpecifier();
+ CXXScopeSpec NewSS, *NewSSPtr = SS;
+ if (SS && NNS) {
+ NewSS.MakeTrivial(Context, NNS, SourceRange(NameLoc));
+ NewSSPtr = &NewSS;
+ }
+ if (Correction && (NNS || NewII != &II) &&
+ // Ignore a correction to a template type as the to-be-corrected
+ // identifier is not a template (typo correction for template names
+ // is handled elsewhere).
+ !(getLangOpts().CPlusPlus && NewSSPtr &&
+ isTemplateName(S, *NewSSPtr, false, TemplateName, ParsedType(),
+ false, Template, MemberOfUnknownSpecialization))) {
+ ParsedType Ty = getTypeName(*NewII, NameLoc, S, NewSSPtr,
+ isClassName, HasTrailingDot, ObjectTypePtr,
+ IsCtorOrDtorName,
+ WantNontrivialTypeSourceInfo);
+ if (Ty) {
+ diagnoseTypo(Correction,
+ PDiag(diag::err_unknown_type_or_class_name_suggest)
+ << Result.getLookupName() << isClassName);
+ if (SS && NNS)
+ SS->MakeTrivial(Context, NNS, SourceRange(NameLoc));
+ *CorrectedII = NewII;
+ return Ty;
+ }
+ }
+ }
+ // If typo correction failed or was not performed, fall through
+ case LookupResult::FoundOverloaded:
+ case LookupResult::FoundUnresolvedValue:
+ Result.suppressDiagnostics();
+ return ParsedType();
+
+ case LookupResult::Ambiguous:
+ // Recover from type-hiding ambiguities by hiding the type. We'll
+ // do the lookup again when looking for an object, and we can
+ // diagnose the error then. If we don't do this, then the error
+ // about hiding the type will be immediately followed by an error
+ // that only makes sense if the identifier was treated like a type.
+ if (Result.getAmbiguityKind() == LookupResult::AmbiguousTagHiding) {
+ Result.suppressDiagnostics();
+ return ParsedType();
+ }
+
+ // Look to see if we have a type anywhere in the list of results.
+ for (LookupResult::iterator Res = Result.begin(), ResEnd = Result.end();
+ Res != ResEnd; ++Res) {
+ if (isa<TypeDecl>(*Res) || isa<ObjCInterfaceDecl>(*Res)) {
+ if (!IIDecl ||
+ (*Res)->getLocation().getRawEncoding() <
+ IIDecl->getLocation().getRawEncoding())
+ IIDecl = *Res;
+ }
+ }
+
+ if (!IIDecl) {
+ // None of the entities we found is a type, so there is no way
+ // to even assume that the result is a type. In this case, don't
+ // complain about the ambiguity. The parser will either try to
+ // perform this lookup again (e.g., as an object name), which
+ // will produce the ambiguity, or will complain that it expected
+ // a type name.
+ Result.suppressDiagnostics();
+ return ParsedType();
+ }
+
+ // We found a type within the ambiguous lookup; diagnose the
+ // ambiguity and then return that type. This might be the right
+ // answer, or it might not be, but it suppresses any attempt to
+ // perform the name lookup again.
+ break;
+
+ case LookupResult::Found:
+ IIDecl = Result.getFoundDecl();
+ break;
+ }
+
+ assert(IIDecl && "Didn't find decl");
+
+ QualType T;
+ if (TypeDecl *TD = dyn_cast<TypeDecl>(IIDecl)) {
+ DiagnoseUseOfDecl(IIDecl, NameLoc);
+
+ T = Context.getTypeDeclType(TD);
+ MarkAnyDeclReferenced(TD->getLocation(), TD, /*OdrUse=*/false);
+
+ // NOTE: avoid constructing an ElaboratedType(Loc) if this is a
+ // constructor or destructor name (in such a case, the scope specifier
+ // will be attached to the enclosing Expr or Decl node).
+ if (SS && SS->isNotEmpty() && !IsCtorOrDtorName) {
+ if (WantNontrivialTypeSourceInfo) {
+ // Construct a type with type-source information.
+ TypeLocBuilder Builder;
+ Builder.pushTypeSpec(T).setNameLoc(NameLoc);
+
+ T = getElaboratedType(ETK_None, *SS, T);
+ ElaboratedTypeLoc ElabTL = Builder.push<ElaboratedTypeLoc>(T);
+ ElabTL.setElaboratedKeywordLoc(SourceLocation());
+ ElabTL.setQualifierLoc(SS->getWithLocInContext(Context));
+ return CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
+ } else {
+ T = getElaboratedType(ETK_None, *SS, T);
+ }
+ }
+ } else if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(IIDecl)) {
+ (void)DiagnoseUseOfDecl(IDecl, NameLoc);
+ if (!HasTrailingDot)
+ T = Context.getObjCInterfaceType(IDecl);
+ }
+
+ if (T.isNull()) {
+ // If it's not plausibly a type, suppress diagnostics.
+ Result.suppressDiagnostics();
+ return ParsedType();
+ }
+ return ParsedType::make(T);
+}
+
+// Builds a fake NNS for the given decl context.
+static NestedNameSpecifier *
+synthesizeCurrentNestedNameSpecifier(ASTContext &Context, DeclContext *DC) {
+ for (;; DC = DC->getLookupParent()) {
+ DC = DC->getPrimaryContext();
+ auto *ND = dyn_cast<NamespaceDecl>(DC);
+ if (ND && !ND->isInline() && !ND->isAnonymousNamespace())
+ return NestedNameSpecifier::Create(Context, nullptr, ND);
+ else if (auto *RD = dyn_cast<CXXRecordDecl>(DC))
+ return NestedNameSpecifier::Create(Context, nullptr, RD->isTemplateDecl(),
+ RD->getTypeForDecl());
+ else if (isa<TranslationUnitDecl>(DC))
+ return NestedNameSpecifier::GlobalSpecifier(Context);
+ }
+ llvm_unreachable("something isn't in TU scope?");
+}
+
+ParsedType Sema::ActOnDelayedDefaultTemplateArg(const IdentifierInfo &II,
+ SourceLocation NameLoc) {
+ // Accepting an undeclared identifier as a default argument for a template
+ // type parameter is a Microsoft extension.
+ Diag(NameLoc, diag::ext_ms_delayed_template_argument) << &II;
+
+ // Build a fake DependentNameType that will perform lookup into CurContext at
+ // instantiation time. The name specifier isn't dependent, so template
+ // instantiation won't transform it. It will retry the lookup, however.
+ NestedNameSpecifier *NNS =
+ synthesizeCurrentNestedNameSpecifier(Context, CurContext);
+ QualType T = Context.getDependentNameType(ETK_None, NNS, &II);
+
+ // Build type location information. We synthesized the qualifier, so we have
+ // to build a fake NestedNameSpecifierLoc.
+ NestedNameSpecifierLocBuilder NNSLocBuilder;
+ NNSLocBuilder.MakeTrivial(Context, NNS, SourceRange(NameLoc));
+ NestedNameSpecifierLoc QualifierLoc = NNSLocBuilder.getWithLocInContext(Context);
+
+ TypeLocBuilder Builder;
+ DependentNameTypeLoc DepTL = Builder.push<DependentNameTypeLoc>(T);
+ DepTL.setNameLoc(NameLoc);
+ DepTL.setElaboratedKeywordLoc(SourceLocation());
+ DepTL.setQualifierLoc(QualifierLoc);
+ return CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
+}
+
+/// isTagName() - This method is called *for error recovery purposes only*
+/// to determine if the specified name is a valid tag name ("struct foo"). If
+/// so, this returns the TST for the tag corresponding to it (TST_enum,
+/// TST_union, TST_struct, TST_interface, TST_class). This is used to diagnose
+/// cases in C where the user forgot to specify the tag.
+DeclSpec::TST Sema::isTagName(IdentifierInfo &II, Scope *S) {
+ // Do a tag name lookup in this scope.
+ LookupResult R(*this, &II, SourceLocation(), LookupTagName);
+ LookupName(R, S, false);
+ R.suppressDiagnostics();
+ if (R.getResultKind() == LookupResult::Found)
+ if (const TagDecl *TD = R.getAsSingle<TagDecl>()) {
+ switch (TD->getTagKind()) {
+ case TTK_Struct: return DeclSpec::TST_struct;
+ case TTK_Interface: return DeclSpec::TST_interface;
+ case TTK_Union: return DeclSpec::TST_union;
+ case TTK_Class: return DeclSpec::TST_class;
+ case TTK_Enum: return DeclSpec::TST_enum;
+ }
+ }
+
+ return DeclSpec::TST_unspecified;
+}
+
+/// isMicrosoftMissingTypename - In Microsoft mode, within class scope,
+/// if a CXXScopeSpec's type is equal to the type of one of the base classes
+/// then downgrade the missing typename error to a warning.
+/// This is needed for MSVC compatibility; Example:
+/// @code
+/// template<class T> class A {
+/// public:
+/// typedef int TYPE;
+/// };
+/// template<class T> class B : public A<T> {
+/// public:
+/// A<T>::TYPE a; // no typename required because A<T> is a base class.
+/// };
+/// @endcode
+bool Sema::isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S) {
+ if (CurContext->isRecord()) {
+ if (SS->getScopeRep()->getKind() == NestedNameSpecifier::Super)
+ return true;
+
+ const Type *Ty = SS->getScopeRep()->getAsType();
+
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(CurContext);
+ for (const auto &Base : RD->bases())
+ if (Context.hasSameUnqualifiedType(QualType(Ty, 1), Base.getType()))
+ return true;
+ return S->isFunctionPrototypeScope();
+ }
+ return CurContext->isFunctionOrMethod() || S->isFunctionPrototypeScope();
+}
+
+void Sema::DiagnoseUnknownTypeName(IdentifierInfo *&II,
+ SourceLocation IILoc,
+ Scope *S,
+ CXXScopeSpec *SS,
+ ParsedType &SuggestedType,
+ bool AllowClassTemplates) {
+ // We don't have anything to suggest (yet).
+ SuggestedType = ParsedType();
+
+ // There may have been a typo in the name of the type. Look up typo
+ // results, in case we have something that we can suggest.
+ if (TypoCorrection Corrected =
+ CorrectTypo(DeclarationNameInfo(II, IILoc), LookupOrdinaryName, S, SS,
+ llvm::make_unique<TypeNameValidatorCCC>(
+ false, false, AllowClassTemplates),
+ CTK_ErrorRecovery)) {
+ if (Corrected.isKeyword()) {
+ // We corrected to a keyword.
+ diagnoseTypo(Corrected, PDiag(diag::err_unknown_typename_suggest) << II);
+ II = Corrected.getCorrectionAsIdentifierInfo();
+ } else {
+ // We found a similarly-named type or interface; suggest that.
+ if (!SS || !SS->isSet()) {
+ diagnoseTypo(Corrected,
+ PDiag(diag::err_unknown_typename_suggest) << II);
+ } else if (DeclContext *DC = computeDeclContext(*SS, false)) {
+ std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
+ bool DroppedSpecifier = Corrected.WillReplaceSpecifier() &&
+ II->getName().equals(CorrectedStr);
+ diagnoseTypo(Corrected,
+ PDiag(diag::err_unknown_nested_typename_suggest)
+ << II << DC << DroppedSpecifier << SS->getRange());
+ } else {
+ llvm_unreachable("could not have corrected a typo here");
+ }
+
+ CXXScopeSpec tmpSS;
+ if (Corrected.getCorrectionSpecifier())
+ tmpSS.MakeTrivial(Context, Corrected.getCorrectionSpecifier(),
+ SourceRange(IILoc));
+ SuggestedType = getTypeName(*Corrected.getCorrectionAsIdentifierInfo(),
+ IILoc, S, tmpSS.isSet() ? &tmpSS : SS, false,
+ false, ParsedType(),
+ /*IsCtorOrDtorName=*/false,
+ /*NonTrivialTypeSourceInfo=*/true);
+ }
+ return;
+ }
+
+ if (getLangOpts().CPlusPlus) {
+ // See if II is a class template that the user forgot to pass arguments to.
+ UnqualifiedId Name;
+ Name.setIdentifier(II, IILoc);
+ CXXScopeSpec EmptySS;
+ TemplateTy TemplateResult;
+ bool MemberOfUnknownSpecialization;
+ if (isTemplateName(S, SS ? *SS : EmptySS, /*hasTemplateKeyword=*/false,
+ Name, ParsedType(), true, TemplateResult,
+ MemberOfUnknownSpecialization) == TNK_Type_template) {
+ TemplateName TplName = TemplateResult.get();
+ Diag(IILoc, diag::err_template_missing_args) << TplName;
+ if (TemplateDecl *TplDecl = TplName.getAsTemplateDecl()) {
+ Diag(TplDecl->getLocation(), diag::note_template_decl_here)
+ << TplDecl->getTemplateParameters()->getSourceRange();
+ }
+ return;
+ }
+ }
+
+ // FIXME: Should we move the logic that tries to recover from a missing tag
+ // (struct, union, enum) from Parser::ParseImplicitInt here, instead?
+
+ if (!SS || (!SS->isSet() && !SS->isInvalid()))
+ Diag(IILoc, diag::err_unknown_typename) << II;
+ else if (DeclContext *DC = computeDeclContext(*SS, false))
+ Diag(IILoc, diag::err_typename_nested_not_found)
+ << II << DC << SS->getRange();
+ else if (isDependentScopeSpecifier(*SS)) {
+ unsigned DiagID = diag::err_typename_missing;
+ if (getLangOpts().MSVCCompat && isMicrosoftMissingTypename(SS, S))
+ DiagID = diag::ext_typename_missing;
+
+ Diag(SS->getRange().getBegin(), DiagID)
+ << SS->getScopeRep() << II->getName()
+ << SourceRange(SS->getRange().getBegin(), IILoc)
+ << FixItHint::CreateInsertion(SS->getRange().getBegin(), "typename ");
+ SuggestedType = ActOnTypenameType(S, SourceLocation(),
+ *SS, *II, IILoc).get();
+ } else {
+ assert(SS && SS->isInvalid() &&
+ "Invalid scope specifier has already been diagnosed");
+ }
+}
+
+/// \brief Determine whether the given result set contains either a type name
+/// or
+static bool isResultTypeOrTemplate(LookupResult &R, const Token &NextToken) {
+ bool CheckTemplate = R.getSema().getLangOpts().CPlusPlus &&
+ NextToken.is(tok::less);
+
+ for (LookupResult::iterator I = R.begin(), IEnd = R.end(); I != IEnd; ++I) {
+ if (isa<TypeDecl>(*I) || isa<ObjCInterfaceDecl>(*I))
+ return true;
+
+ if (CheckTemplate && isa<TemplateDecl>(*I))
+ return true;
+ }
+
+ return false;
+}
+
+static bool isTagTypeWithMissingTag(Sema &SemaRef, LookupResult &Result,
+ Scope *S, CXXScopeSpec &SS,
+ IdentifierInfo *&Name,
+ SourceLocation NameLoc) {
+ LookupResult R(SemaRef, Name, NameLoc, Sema::LookupTagName);
+ SemaRef.LookupParsedName(R, S, &SS);
+ if (TagDecl *Tag = R.getAsSingle<TagDecl>()) {
+ StringRef FixItTagName;
+ switch (Tag->getTagKind()) {
+ case TTK_Class:
+ FixItTagName = "class ";
+ break;
+
+ case TTK_Enum:
+ FixItTagName = "enum ";
+ break;
+
+ case TTK_Struct:
+ FixItTagName = "struct ";
+ break;
+
+ case TTK_Interface:
+ FixItTagName = "__interface ";
+ break;
+
+ case TTK_Union:
+ FixItTagName = "union ";
+ break;
+ }
+
+ StringRef TagName = FixItTagName.drop_back();
+ SemaRef.Diag(NameLoc, diag::err_use_of_tag_name_without_tag)
+ << Name << TagName << SemaRef.getLangOpts().CPlusPlus
+ << FixItHint::CreateInsertion(NameLoc, FixItTagName);
+
+ for (LookupResult::iterator I = Result.begin(), IEnd = Result.end();
+ I != IEnd; ++I)
+ SemaRef.Diag((*I)->getLocation(), diag::note_decl_hiding_tag_type)
+ << Name << TagName;
+
+ // Replace lookup results with just the tag decl.
+ Result.clear(Sema::LookupTagName);
+ SemaRef.LookupParsedName(Result, S, &SS);
+ return true;
+ }
+
+ return false;
+}
+
+/// Build a ParsedType for a simple-type-specifier with a nested-name-specifier.
+static ParsedType buildNestedType(Sema &S, CXXScopeSpec &SS,
+ QualType T, SourceLocation NameLoc) {
+ ASTContext &Context = S.Context;
+
+ TypeLocBuilder Builder;
+ Builder.pushTypeSpec(T).setNameLoc(NameLoc);
+
+ T = S.getElaboratedType(ETK_None, SS, T);
+ ElaboratedTypeLoc ElabTL = Builder.push<ElaboratedTypeLoc>(T);
+ ElabTL.setElaboratedKeywordLoc(SourceLocation());
+ ElabTL.setQualifierLoc(SS.getWithLocInContext(Context));
+ return S.CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
+}
+
+Sema::NameClassification
+Sema::ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name,
+ SourceLocation NameLoc, const Token &NextToken,
+ bool IsAddressOfOperand,
+ std::unique_ptr<CorrectionCandidateCallback> CCC) {
+ DeclarationNameInfo NameInfo(Name, NameLoc);
+ ObjCMethodDecl *CurMethod = getCurMethodDecl();
+
+ if (NextToken.is(tok::coloncolon)) {
+ BuildCXXNestedNameSpecifier(S, *Name, NameLoc, NextToken.getLocation(),
+ QualType(), false, SS, nullptr, false);
+ }
+
+ LookupResult Result(*this, Name, NameLoc, LookupOrdinaryName);
+ LookupParsedName(Result, S, &SS, !CurMethod);
+
+ // For unqualified lookup in a class template in MSVC mode, look into
+ // dependent base classes where the primary class template is known.
+ if (Result.empty() && SS.isEmpty() && getLangOpts().MSVCCompat) {
+ if (ParsedType TypeInBase =
+ recoverFromTypeInKnownDependentBase(*this, *Name, NameLoc))
+ return TypeInBase;
+ }
+
+ // Perform lookup for Objective-C instance variables (including automatically
+ // synthesized instance variables), if we're in an Objective-C method.
+ // FIXME: This lookup really, really needs to be folded in to the normal
+ // unqualified lookup mechanism.
+ if (!SS.isSet() && CurMethod && !isResultTypeOrTemplate(Result, NextToken)) {
+ ExprResult E = LookupInObjCMethod(Result, S, Name, true);
+ if (E.get() || E.isInvalid())
+ return E;
+ }
+
+ bool SecondTry = false;
+ bool IsFilteredTemplateName = false;
+
+Corrected:
+ switch (Result.getResultKind()) {
+ case LookupResult::NotFound:
+ // If an unqualified-id is followed by a '(', then we have a function
+ // call.
+ if (!SS.isSet() && NextToken.is(tok::l_paren)) {
+ // In C++, this is an ADL-only call.
+ // FIXME: Reference?
+ if (getLangOpts().CPlusPlus)
+ return BuildDeclarationNameExpr(SS, Result, /*ADL=*/true);
+
+ // C90 6.3.2.2:
+ // If the expression that precedes the parenthesized argument list in a
+ // function call consists solely of an identifier, and if no
+ // declaration is visible for this identifier, the identifier is
+ // implicitly declared exactly as if, in the innermost block containing
+ // the function call, the declaration
+ //
+ // extern int identifier ();
+ //
+ // appeared.
+ //
+ // We also allow this in C99 as an extension.
+ if (NamedDecl *D = ImplicitlyDefineFunction(NameLoc, *Name, S)) {
+ Result.addDecl(D);
+ Result.resolveKind();
+ return BuildDeclarationNameExpr(SS, Result, /*ADL=*/false);
+ }
+ }
+
+ // In C, we first see whether there is a tag type by the same name, in
+ // which case it's likely that the user just forgot to write "enum",
+ // "struct", or "union".
+ if (!getLangOpts().CPlusPlus && !SecondTry &&
+ isTagTypeWithMissingTag(*this, Result, S, SS, Name, NameLoc)) {
+ break;
+ }
+
+ // Perform typo correction to determine if there is another name that is
+ // close to this name.
+ if (!SecondTry && CCC) {
+ SecondTry = true;
+ if (TypoCorrection Corrected = CorrectTypo(Result.getLookupNameInfo(),
+ Result.getLookupKind(), S,
+ &SS, std::move(CCC),
+ CTK_ErrorRecovery)) {
+ unsigned UnqualifiedDiag = diag::err_undeclared_var_use_suggest;
+ unsigned QualifiedDiag = diag::err_no_member_suggest;
+
+ NamedDecl *FirstDecl = Corrected.getFoundDecl();
+ NamedDecl *UnderlyingFirstDecl = Corrected.getCorrectionDecl();
+ if (getLangOpts().CPlusPlus && NextToken.is(tok::less) &&
+ UnderlyingFirstDecl && isa<TemplateDecl>(UnderlyingFirstDecl)) {
+ UnqualifiedDiag = diag::err_no_template_suggest;
+ QualifiedDiag = diag::err_no_member_template_suggest;
+ } else if (UnderlyingFirstDecl &&
+ (isa<TypeDecl>(UnderlyingFirstDecl) ||
+ isa<ObjCInterfaceDecl>(UnderlyingFirstDecl) ||
+ isa<ObjCCompatibleAliasDecl>(UnderlyingFirstDecl))) {
+ UnqualifiedDiag = diag::err_unknown_typename_suggest;
+ QualifiedDiag = diag::err_unknown_nested_typename_suggest;
+ }
+
+ if (SS.isEmpty()) {
+ diagnoseTypo(Corrected, PDiag(UnqualifiedDiag) << Name);
+ } else {// FIXME: is this even reachable? Test it.
+ std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
+ bool DroppedSpecifier = Corrected.WillReplaceSpecifier() &&
+ Name->getName().equals(CorrectedStr);
+ diagnoseTypo(Corrected, PDiag(QualifiedDiag)
+ << Name << computeDeclContext(SS, false)
+ << DroppedSpecifier << SS.getRange());
+ }
+
+ // Update the name, so that the caller has the new name.
+ Name = Corrected.getCorrectionAsIdentifierInfo();
+
+ // Typo correction corrected to a keyword.
+ if (Corrected.isKeyword())
+ return Name;
+
+ // Also update the LookupResult...
+ // FIXME: This should probably go away at some point
+ Result.clear();
+ Result.setLookupName(Corrected.getCorrection());
+ if (FirstDecl)
+ Result.addDecl(FirstDecl);
+
+ // If we found an Objective-C instance variable, let
+ // LookupInObjCMethod build the appropriate expression to
+ // reference the ivar.
+ // FIXME: This is a gross hack.
+ if (ObjCIvarDecl *Ivar = Result.getAsSingle<ObjCIvarDecl>()) {
+ Result.clear();
+ ExprResult E(LookupInObjCMethod(Result, S, Ivar->getIdentifier()));
+ return E;
+ }
+
+ goto Corrected;
+ }
+ }
+
+ // We failed to correct; just fall through and let the parser deal with it.
+ Result.suppressDiagnostics();
+ return NameClassification::Unknown();
+
+ case LookupResult::NotFoundInCurrentInstantiation: {
+ // We performed name lookup into the current instantiation, and there were
+ // dependent bases, so we treat this result the same way as any other
+ // dependent nested-name-specifier.
+
+ // C++ [temp.res]p2:
+ // A name used in a template declaration or definition and that is
+ // dependent on a template-parameter is assumed not to name a type
+ // unless the applicable name lookup finds a type name or the name is
+ // qualified by the keyword typename.
+ //
+ // FIXME: If the next token is '<', we might want to ask the parser to
+ // perform some heroics to see if we actually have a
+ // template-argument-list, which would indicate a missing 'template'
+ // keyword here.
+ return ActOnDependentIdExpression(SS, /*TemplateKWLoc=*/SourceLocation(),
+ NameInfo, IsAddressOfOperand,
+ /*TemplateArgs=*/nullptr);
+ }
+
+ case LookupResult::Found:
+ case LookupResult::FoundOverloaded:
+ case LookupResult::FoundUnresolvedValue:
+ break;
+
+ case LookupResult::Ambiguous:
+ if (getLangOpts().CPlusPlus && NextToken.is(tok::less) &&
+ hasAnyAcceptableTemplateNames(Result)) {
+ // C++ [temp.local]p3:
+ // A lookup that finds an injected-class-name (10.2) can result in an
+ // ambiguity in certain cases (for example, if it is found in more than
+ // one base class). If all of the injected-class-names that are found
+ // refer to specializations of the same class template, and if the name
+ // is followed by a template-argument-list, the reference refers to the
+ // class template itself and not a specialization thereof, and is not
+ // ambiguous.
+ //
+ // This filtering can make an ambiguous result into an unambiguous one,
+ // so try again after filtering out template names.
+ FilterAcceptableTemplateNames(Result);
+ if (!Result.isAmbiguous()) {
+ IsFilteredTemplateName = true;
+ break;
+ }
+ }
+
+ // Diagnose the ambiguity and return an error.
+ return NameClassification::Error();
+ }
+
+ if (getLangOpts().CPlusPlus && NextToken.is(tok::less) &&
+ (IsFilteredTemplateName || hasAnyAcceptableTemplateNames(Result))) {
+ // C++ [temp.names]p3:
+ // After name lookup (3.4) finds that a name is a template-name or that
+ // an operator-function-id or a literal- operator-id refers to a set of
+ // overloaded functions any member of which is a function template if
+ // this is followed by a <, the < is always taken as the delimiter of a
+ // template-argument-list and never as the less-than operator.
+ if (!IsFilteredTemplateName)
+ FilterAcceptableTemplateNames(Result);
+
+ if (!Result.empty()) {
+ bool IsFunctionTemplate;
+ bool IsVarTemplate;
+ TemplateName Template;
+ if (Result.end() - Result.begin() > 1) {
+ IsFunctionTemplate = true;
+ Template = Context.getOverloadedTemplateName(Result.begin(),
+ Result.end());
+ } else {
+ TemplateDecl *TD
+ = cast<TemplateDecl>((*Result.begin())->getUnderlyingDecl());
+ IsFunctionTemplate = isa<FunctionTemplateDecl>(TD);
+ IsVarTemplate = isa<VarTemplateDecl>(TD);
+
+ if (SS.isSet() && !SS.isInvalid())
+ Template = Context.getQualifiedTemplateName(SS.getScopeRep(),
+ /*TemplateKeyword=*/false,
+ TD);
+ else
+ Template = TemplateName(TD);
+ }
+
+ if (IsFunctionTemplate) {
+ // Function templates always go through overload resolution, at which
+ // point we'll perform the various checks (e.g., accessibility) we need
+ // to based on which function we selected.
+ Result.suppressDiagnostics();
+
+ return NameClassification::FunctionTemplate(Template);
+ }
+
+ return IsVarTemplate ? NameClassification::VarTemplate(Template)
+ : NameClassification::TypeTemplate(Template);
+ }
+ }
+
+ NamedDecl *FirstDecl = (*Result.begin())->getUnderlyingDecl();
+ if (TypeDecl *Type = dyn_cast<TypeDecl>(FirstDecl)) {
+ DiagnoseUseOfDecl(Type, NameLoc);
+ MarkAnyDeclReferenced(Type->getLocation(), Type, /*OdrUse=*/false);
+ QualType T = Context.getTypeDeclType(Type);
+ if (SS.isNotEmpty())
+ return buildNestedType(*this, SS, T, NameLoc);
+ return ParsedType::make(T);
+ }
+
+ ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(FirstDecl);
+ if (!Class) {
+ // FIXME: It's unfortunate that we don't have a Type node for handling this.
+ if (ObjCCompatibleAliasDecl *Alias =
+ dyn_cast<ObjCCompatibleAliasDecl>(FirstDecl))
+ Class = Alias->getClassInterface();
+ }
+
+ if (Class) {
+ DiagnoseUseOfDecl(Class, NameLoc);
+
+ if (NextToken.is(tok::period)) {
+ // Interface. <something> is parsed as a property reference expression.
+ // Just return "unknown" as a fall-through for now.
+ Result.suppressDiagnostics();
+ return NameClassification::Unknown();
+ }
+
+ QualType T = Context.getObjCInterfaceType(Class);
+ return ParsedType::make(T);
+ }
+
+ // We can have a type template here if we're classifying a template argument.
+ if (isa<TemplateDecl>(FirstDecl) && !isa<FunctionTemplateDecl>(FirstDecl))
+ return NameClassification::TypeTemplate(
+ TemplateName(cast<TemplateDecl>(FirstDecl)));
+
+ // Check for a tag type hidden by a non-type decl in a few cases where it
+ // seems likely a type is wanted instead of the non-type that was found.
+ bool NextIsOp = NextToken.isOneOf(tok::amp, tok::star);
+ if ((NextToken.is(tok::identifier) ||
+ (NextIsOp &&
+ FirstDecl->getUnderlyingDecl()->isFunctionOrFunctionTemplate())) &&
+ isTagTypeWithMissingTag(*this, Result, S, SS, Name, NameLoc)) {
+ TypeDecl *Type = Result.getAsSingle<TypeDecl>();
+ DiagnoseUseOfDecl(Type, NameLoc);
+ QualType T = Context.getTypeDeclType(Type);
+ if (SS.isNotEmpty())
+ return buildNestedType(*this, SS, T, NameLoc);
+ return ParsedType::make(T);
+ }
+
+ if (FirstDecl->isCXXClassMember())
+ return BuildPossibleImplicitMemberExpr(SS, SourceLocation(), Result,
+ nullptr, S);
+
+ bool ADL = UseArgumentDependentLookup(SS, Result, NextToken.is(tok::l_paren));
+ return BuildDeclarationNameExpr(SS, Result, ADL);
+}
+
+// Determines the context to return to after temporarily entering a
+// context. This depends in an unnecessarily complicated way on the
+// exact ordering of callbacks from the parser.
+DeclContext *Sema::getContainingDC(DeclContext *DC) {
+
+ // Functions defined inline within classes aren't parsed until we've
+ // finished parsing the top-level class, so the top-level class is
+ // the context we'll need to return to.
+ // A Lambda call operator whose parent is a class must not be treated
+ // as an inline member function. A Lambda can be used legally
+ // either as an in-class member initializer or a default argument. These
+ // are parsed once the class has been marked complete and so the containing
+ // context would be the nested class (when the lambda is defined in one);
+ // If the class is not complete, then the lambda is being used in an
+ // ill-formed fashion (such as to specify the width of a bit-field, or
+ // in an array-bound) - in which case we still want to return the
+ // lexically containing DC (which could be a nested class).
+ if (isa<FunctionDecl>(DC) && !isLambdaCallOperator(DC)) {
+ DC = DC->getLexicalParent();
+
+ // A function not defined within a class will always return to its
+ // lexical context.
+ if (!isa<CXXRecordDecl>(DC))
+ return DC;
+
+ // A C++ inline method/friend is parsed *after* the topmost class
+ // it was declared in is fully parsed ("complete"); the topmost
+ // class is the context we need to return to.
+ while (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC->getLexicalParent()))
+ DC = RD;
+
+ // Return the declaration context of the topmost class the inline method is
+ // declared in.
+ return DC;
+ }
+
+ return DC->getLexicalParent();
+}
+
+void Sema::PushDeclContext(Scope *S, DeclContext *DC) {
+ assert(getContainingDC(DC) == CurContext &&
+ "The next DeclContext should be lexically contained in the current one.");
+ CurContext = DC;
+ S->setEntity(DC);
+}
+
+void Sema::PopDeclContext() {
+ assert(CurContext && "DeclContext imbalance!");
+
+ CurContext = getContainingDC(CurContext);
+ assert(CurContext && "Popped translation unit!");
+}
+
+Sema::SkippedDefinitionContext Sema::ActOnTagStartSkippedDefinition(Scope *S,
+ Decl *D) {
+ // Unlike PushDeclContext, the context to which we return is not necessarily
+ // the containing DC of TD, because the new context will be some pre-existing
+ // TagDecl definition instead of a fresh one.
+ auto Result = static_cast<SkippedDefinitionContext>(CurContext);
+ CurContext = cast<TagDecl>(D)->getDefinition();
+ assert(CurContext && "skipping definition of undefined tag");
+ // Start lookups from the parent of the current context; we don't want to look
+ // into the pre-existing complete definition.
+ S->setEntity(CurContext->getLookupParent());
+ return Result;
+}
+
+void Sema::ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context) {
+ CurContext = static_cast<decltype(CurContext)>(Context);
+}
+
+/// EnterDeclaratorContext - Used when we must lookup names in the context
+/// of a declarator's nested name specifier.
+///
+void Sema::EnterDeclaratorContext(Scope *S, DeclContext *DC) {
+ // C++0x [basic.lookup.unqual]p13:
+ // A name used in the definition of a static data member of class
+ // X (after the qualified-id of the static member) is looked up as
+ // if the name was used in a member function of X.
+ // C++0x [basic.lookup.unqual]p14:
+ // If a variable member of a namespace is defined outside of the
+ // scope of its namespace then any name used in the definition of
+ // the variable member (after the declarator-id) is looked up as
+ // if the definition of the variable member occurred in its
+ // namespace.
+ // Both of these imply that we should push a scope whose context
+ // is the semantic context of the declaration. We can't use
+ // PushDeclContext here because that context is not necessarily
+ // lexically contained in the current context. Fortunately,
+ // the containing scope should have the appropriate information.
+
+ assert(!S->getEntity() && "scope already has entity");
+
+#ifndef NDEBUG
+ Scope *Ancestor = S->getParent();
+ while (!Ancestor->getEntity()) Ancestor = Ancestor->getParent();
+ assert(Ancestor->getEntity() == CurContext && "ancestor context mismatch");
+#endif
+
+ CurContext = DC;
+ S->setEntity(DC);
+}
+
+void Sema::ExitDeclaratorContext(Scope *S) {
+ assert(S->getEntity() == CurContext && "Context imbalance!");
+
+ // Switch back to the lexical context. The safety of this is
+ // enforced by an assert in EnterDeclaratorContext.
+ Scope *Ancestor = S->getParent();
+ while (!Ancestor->getEntity()) Ancestor = Ancestor->getParent();
+ CurContext = Ancestor->getEntity();
+
+ // We don't need to do anything with the scope, which is going to
+ // disappear.
+}
+
+
+void Sema::ActOnReenterFunctionContext(Scope* S, Decl *D) {
+ // We assume that the caller has already called
+ // ActOnReenterTemplateScope so getTemplatedDecl() works.
+ FunctionDecl *FD = D->getAsFunction();
+ if (!FD)
+ return;
+
+ // Same implementation as PushDeclContext, but enters the context
+ // from the lexical parent, rather than the top-level class.
+ assert(CurContext == FD->getLexicalParent() &&
+ "The next DeclContext should be lexically contained in the current one.");
+ CurContext = FD;
+ S->setEntity(CurContext);
+
+ for (unsigned P = 0, NumParams = FD->getNumParams(); P < NumParams; ++P) {
+ ParmVarDecl *Param = FD->getParamDecl(P);
+ // If the parameter has an identifier, then add it to the scope
+ if (Param->getIdentifier()) {
+ S->AddDecl(Param);
+ IdResolver.AddDecl(Param);
+ }
+ }
+}
+
+
+void Sema::ActOnExitFunctionContext() {
+ // Same implementation as PopDeclContext, but returns to the lexical parent,
+ // rather than the top-level class.
+ assert(CurContext && "DeclContext imbalance!");
+ CurContext = CurContext->getLexicalParent();
+ assert(CurContext && "Popped translation unit!");
+}
+
+
+/// \brief Determine whether we allow overloading of the function
+/// PrevDecl with another declaration.
+///
+/// This routine determines whether overloading is possible, not
+/// whether some new function is actually an overload. It will return
+/// true in C++ (where we can always provide overloads) or, as an
+/// extension, in C when the previous function is already an
+/// overloaded function declaration or has the "overloadable"
+/// attribute.
+static bool AllowOverloadingOfFunction(LookupResult &Previous,
+ ASTContext &Context) {
+ if (Context.getLangOpts().CPlusPlus)
+ return true;
+
+ if (Previous.getResultKind() == LookupResult::FoundOverloaded)
+ return true;
+
+ return (Previous.getResultKind() == LookupResult::Found
+ && Previous.getFoundDecl()->hasAttr<OverloadableAttr>());
+}
+
+/// Add this decl to the scope shadowed decl chains.
+void Sema::PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext) {
+ // Move up the scope chain until we find the nearest enclosing
+ // non-transparent context. The declaration will be introduced into this
+ // scope.
+ while (S->getEntity() && S->getEntity()->isTransparentContext())
+ S = S->getParent();
+
+ // Add scoped declarations into their context, so that they can be
+ // found later. Declarations without a context won't be inserted
+ // into any context.
+ if (AddToContext)
+ CurContext->addDecl(D);
+
+ // Out-of-line definitions shouldn't be pushed into scope in C++, unless they
+ // are function-local declarations.
+ if (getLangOpts().CPlusPlus && D->isOutOfLine() &&
+ !D->getDeclContext()->getRedeclContext()->Equals(
+ D->getLexicalDeclContext()->getRedeclContext()) &&
+ !D->getLexicalDeclContext()->isFunctionOrMethod())
+ return;
+
+ // Template instantiations should also not be pushed into scope.
+ if (isa<FunctionDecl>(D) &&
+ cast<FunctionDecl>(D)->isFunctionTemplateSpecialization())
+ return;
+
+ // If this replaces anything in the current scope,
+ IdentifierResolver::iterator I = IdResolver.begin(D->getDeclName()),
+ IEnd = IdResolver.end();
+ for (; I != IEnd; ++I) {
+ if (S->isDeclScope(*I) && D->declarationReplaces(*I)) {
+ S->RemoveDecl(*I);
+ IdResolver.RemoveDecl(*I);
+
+ // Should only need to replace one decl.
+ break;
+ }
+ }
+
+ S->AddDecl(D);
+
+ if (isa<LabelDecl>(D) && !cast<LabelDecl>(D)->isGnuLocal()) {
+ // Implicitly-generated labels may end up getting generated in an order that
+ // isn't strictly lexical, which breaks name lookup. Be careful to insert
+ // the label at the appropriate place in the identifier chain.
+ for (I = IdResolver.begin(D->getDeclName()); I != IEnd; ++I) {
+ DeclContext *IDC = (*I)->getLexicalDeclContext()->getRedeclContext();
+ if (IDC == CurContext) {
+ if (!S->isDeclScope(*I))
+ continue;
+ } else if (IDC->Encloses(CurContext))
+ break;
+ }
+
+ IdResolver.InsertDeclAfter(I, D);
+ } else {
+ IdResolver.AddDecl(D);
+ }
+}
+
+void Sema::pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name) {
+ if (IdResolver.tryAddTopLevelDecl(D, Name) && TUScope)
+ TUScope->AddDecl(D);
+}
+
+bool Sema::isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S,
+ bool AllowInlineNamespace) {
+ return IdResolver.isDeclInScope(D, Ctx, S, AllowInlineNamespace);
+}
+
+Scope *Sema::getScopeForDeclContext(Scope *S, DeclContext *DC) {
+ DeclContext *TargetDC = DC->getPrimaryContext();
+ do {
+ if (DeclContext *ScopeDC = S->getEntity())
+ if (ScopeDC->getPrimaryContext() == TargetDC)
+ return S;
+ } while ((S = S->getParent()));
+
+ return nullptr;
+}
+
+static bool isOutOfScopePreviousDeclaration(NamedDecl *,
+ DeclContext*,
+ ASTContext&);
+
+/// Filters out lookup results that don't fall within the given scope
+/// as determined by isDeclInScope.
+void Sema::FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
+ bool ConsiderLinkage,
+ bool AllowInlineNamespace) {
+ LookupResult::Filter F = R.makeFilter();
+ while (F.hasNext()) {
+ NamedDecl *D = F.next();
+
+ if (isDeclInScope(D, Ctx, S, AllowInlineNamespace))
+ continue;
+
+ if (ConsiderLinkage && isOutOfScopePreviousDeclaration(D, Ctx, Context))
+ continue;
+
+ F.erase();
+ }
+
+ F.done();
+}
+
+static bool isUsingDecl(NamedDecl *D) {
+ return isa<UsingShadowDecl>(D) ||
+ isa<UnresolvedUsingTypenameDecl>(D) ||
+ isa<UnresolvedUsingValueDecl>(D);
+}
+
+/// Removes using shadow declarations from the lookup results.
+static void RemoveUsingDecls(LookupResult &R) {
+ LookupResult::Filter F = R.makeFilter();
+ while (F.hasNext())
+ if (isUsingDecl(F.next()))
+ F.erase();
+
+ F.done();
+}
+
+/// \brief Check for this common pattern:
+/// @code
+/// class S {
+/// S(const S&); // DO NOT IMPLEMENT
+/// void operator=(const S&); // DO NOT IMPLEMENT
+/// };
+/// @endcode
+static bool IsDisallowedCopyOrAssign(const CXXMethodDecl *D) {
+ // FIXME: Should check for private access too but access is set after we get
+ // the decl here.
+ if (D->doesThisDeclarationHaveABody())
+ return false;
+
+ if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(D))
+ return CD->isCopyConstructor();
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
+ return Method->isCopyAssignmentOperator();
+ return false;
+}
+
+// We need this to handle
+//
+// typedef struct {
+// void *foo() { return 0; }
+// } A;
+//
+// When we see foo we don't know if after the typedef we will get 'A' or '*A'
+// for example. If 'A', foo will have external linkage. If we have '*A',
+// foo will have no linkage. Since we can't know until we get to the end
+// of the typedef, this function finds out if D might have non-external linkage.
+// Callers should verify at the end of the TU if it D has external linkage or
+// not.
+bool Sema::mightHaveNonExternalLinkage(const DeclaratorDecl *D) {
+ const DeclContext *DC = D->getDeclContext();
+ while (!DC->isTranslationUnit()) {
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(DC)){
+ if (!RD->hasNameForLinkage())
+ return true;
+ }
+ DC = DC->getParent();
+ }
+
+ return !D->isExternallyVisible();
+}
+
+// FIXME: This needs to be refactored; some other isInMainFile users want
+// these semantics.
+static bool isMainFileLoc(const Sema &S, SourceLocation Loc) {
+ if (S.TUKind != TU_Complete)
+ return false;
+ return S.SourceMgr.isInMainFile(Loc);
+}
+
+bool Sema::ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const {
+ assert(D);
+
+ if (D->isInvalidDecl() || D->isUsed() || D->hasAttr<UnusedAttr>())
+ return false;
+
+ // Ignore all entities declared within templates, and out-of-line definitions
+ // of members of class templates.
+ if (D->getDeclContext()->isDependentContext() ||
+ D->getLexicalDeclContext()->isDependentContext())
+ return false;
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
+ return false;
+
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ if (MD->isVirtual() || IsDisallowedCopyOrAssign(MD))
+ return false;
+ } else {
+ // 'static inline' functions are defined in headers; don't warn.
+ if (FD->isInlined() && !isMainFileLoc(*this, FD->getLocation()))
+ return false;
+ }
+
+ if (FD->doesThisDeclarationHaveABody() &&
+ Context.DeclMustBeEmitted(FD))
+ return false;
+ } else if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ // Constants and utility variables are defined in headers with internal
+ // linkage; don't warn. (Unlike functions, there isn't a convenient marker
+ // like "inline".)
+ if (!isMainFileLoc(*this, VD->getLocation()))
+ return false;
+
+ if (Context.DeclMustBeEmitted(VD))
+ return false;
+
+ if (VD->isStaticDataMember() &&
+ VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
+ return false;
+ } else {
+ return false;
+ }
+
+ // Only warn for unused decls internal to the translation unit.
+ // FIXME: This seems like a bogus check; it suppresses -Wunused-function
+ // for inline functions defined in the main source file, for instance.
+ return mightHaveNonExternalLinkage(D);
+}
+
+void Sema::MarkUnusedFileScopedDecl(const DeclaratorDecl *D) {
+ if (!D)
+ return;
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ const FunctionDecl *First = FD->getFirstDecl();
+ if (FD != First && ShouldWarnIfUnusedFileScopedDecl(First))
+ return; // First should already be in the vector.
+ }
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ const VarDecl *First = VD->getFirstDecl();
+ if (VD != First && ShouldWarnIfUnusedFileScopedDecl(First))
+ return; // First should already be in the vector.
+ }
+
+ if (ShouldWarnIfUnusedFileScopedDecl(D))
+ UnusedFileScopedDecls.push_back(D);
+}
+
+static bool ShouldDiagnoseUnusedDecl(const NamedDecl *D) {
+ if (D->isInvalidDecl())
+ return false;
+
+ if (D->isReferenced() || D->isUsed() || D->hasAttr<UnusedAttr>() ||
+ D->hasAttr<ObjCPreciseLifetimeAttr>())
+ return false;
+
+ if (isa<LabelDecl>(D))
+ return true;
+
+ // Except for labels, we only care about unused decls that are local to
+ // functions.
+ bool WithinFunction = D->getDeclContext()->isFunctionOrMethod();
+ if (const auto *R = dyn_cast<CXXRecordDecl>(D->getDeclContext()))
+ // For dependent types, the diagnostic is deferred.
+ WithinFunction =
+ WithinFunction || (R->isLocalClass() && !R->isDependentType());
+ if (!WithinFunction)
+ return false;
+
+ if (isa<TypedefNameDecl>(D))
+ return true;
+
+ // White-list anything that isn't a local variable.
+ if (!isa<VarDecl>(D) || isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D))
+ return false;
+
+ // Types of valid local variables should be complete, so this should succeed.
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+
+ // White-list anything with an __attribute__((unused)) type.
+ QualType Ty = VD->getType();
+
+ // Only look at the outermost level of typedef.
+ if (const TypedefType *TT = Ty->getAs<TypedefType>()) {
+ if (TT->getDecl()->hasAttr<UnusedAttr>())
+ return false;
+ }
+
+ // If we failed to complete the type for some reason, or if the type is
+ // dependent, don't diagnose the variable.
+ if (Ty->isIncompleteType() || Ty->isDependentType())
+ return false;
+
+ if (const TagType *TT = Ty->getAs<TagType>()) {
+ const TagDecl *Tag = TT->getDecl();
+ if (Tag->hasAttr<UnusedAttr>())
+ return false;
+
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Tag)) {
+ if (!RD->hasTrivialDestructor() && !RD->hasAttr<WarnUnusedAttr>())
+ return false;
+
+ if (const Expr *Init = VD->getInit()) {
+ if (const ExprWithCleanups *Cleanups =
+ dyn_cast<ExprWithCleanups>(Init))
+ Init = Cleanups->getSubExpr();
+ const CXXConstructExpr *Construct =
+ dyn_cast<CXXConstructExpr>(Init);
+ if (Construct && !Construct->isElidable()) {
+ CXXConstructorDecl *CD = Construct->getConstructor();
+ if (!CD->isTrivial() && !RD->hasAttr<WarnUnusedAttr>())
+ return false;
+ }
+ }
+ }
+ }
+
+ // TODO: __attribute__((unused)) templates?
+ }
+
+ return true;
+}
+
+static void GenerateFixForUnusedDecl(const NamedDecl *D, ASTContext &Ctx,
+ FixItHint &Hint) {
+ if (isa<LabelDecl>(D)) {
+ SourceLocation AfterColon = Lexer::findLocationAfterToken(D->getLocEnd(),
+ tok::colon, Ctx.getSourceManager(), Ctx.getLangOpts(), true);
+ if (AfterColon.isInvalid())
+ return;
+ Hint = FixItHint::CreateRemoval(CharSourceRange::
+ getCharRange(D->getLocStart(), AfterColon));
+ }
+ return;
+}
+
+void Sema::DiagnoseUnusedNestedTypedefs(const RecordDecl *D) {
+ if (D->getTypeForDecl()->isDependentType())
+ return;
+
+ for (auto *TmpD : D->decls()) {
+ if (const auto *T = dyn_cast<TypedefNameDecl>(TmpD))
+ DiagnoseUnusedDecl(T);
+ else if(const auto *R = dyn_cast<RecordDecl>(TmpD))
+ DiagnoseUnusedNestedTypedefs(R);
+ }
+}
+
+/// DiagnoseUnusedDecl - Emit warnings about declarations that are not used
+/// unless they are marked attr(unused).
+void Sema::DiagnoseUnusedDecl(const NamedDecl *D) {
+ if (!ShouldDiagnoseUnusedDecl(D))
+ return;
+
+ if (auto *TD = dyn_cast<TypedefNameDecl>(D)) {
+ // typedefs can be referenced later on, so the diagnostics are emitted
+ // at end-of-translation-unit.
+ UnusedLocalTypedefNameCandidates.insert(TD);
+ return;
+ }
+
+ FixItHint Hint;
+ GenerateFixForUnusedDecl(D, Context, Hint);
+
+ unsigned DiagID;
+ if (isa<VarDecl>(D) && cast<VarDecl>(D)->isExceptionVariable())
+ DiagID = diag::warn_unused_exception_param;
+ else if (isa<LabelDecl>(D))
+ DiagID = diag::warn_unused_label;
+ else
+ DiagID = diag::warn_unused_variable;
+
+ Diag(D->getLocation(), DiagID) << D->getDeclName() << Hint;
+}
+
+static void CheckPoppedLabel(LabelDecl *L, Sema &S) {
+ // Verify that we have no forward references left. If so, there was a goto
+ // or address of a label taken, but no definition of it. Label fwd
+ // definitions are indicated with a null substmt which is also not a resolved
+ // MS inline assembly label name.
+ bool Diagnose = false;
+ if (L->isMSAsmLabel())
+ Diagnose = !L->isResolvedMSAsmLabel();
+ else
+ Diagnose = L->getStmt() == nullptr;
+ if (Diagnose)
+ S.Diag(L->getLocation(), diag::err_undeclared_label_use) <<L->getDeclName();
+}
+
+void Sema::ActOnPopScope(SourceLocation Loc, Scope *S) {
+ S->mergeNRVOIntoParent();
+
+ if (S->decl_empty()) return;
+ assert((S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope)) &&
+ "Scope shouldn't contain decls!");
+
+ for (auto *TmpD : S->decls()) {
+ assert(TmpD && "This decl didn't get pushed??");
+
+ assert(isa<NamedDecl>(TmpD) && "Decl isn't NamedDecl?");
+ NamedDecl *D = cast<NamedDecl>(TmpD);
+
+ if (!D->getDeclName()) continue;
+
+ // Diagnose unused variables in this scope.
+ if (!S->hasUnrecoverableErrorOccurred()) {
+ DiagnoseUnusedDecl(D);
+ if (const auto *RD = dyn_cast<RecordDecl>(D))
+ DiagnoseUnusedNestedTypedefs(RD);
+ }
+
+ // If this was a forward reference to a label, verify it was defined.
+ if (LabelDecl *LD = dyn_cast<LabelDecl>(D))
+ CheckPoppedLabel(LD, *this);
+
+ // Remove this name from our lexical scope.
+ IdResolver.RemoveDecl(D);
+ }
+}
+
+/// \brief Look for an Objective-C class in the translation unit.
+///
+/// \param Id The name of the Objective-C class we're looking for. If
+/// typo-correction fixes this name, the Id will be updated
+/// to the fixed name.
+///
+/// \param IdLoc The location of the name in the translation unit.
+///
+/// \param DoTypoCorrection If true, this routine will attempt typo correction
+/// if there is no class with the given name.
+///
+/// \returns The declaration of the named Objective-C class, or NULL if the
+/// class could not be found.
+ObjCInterfaceDecl *Sema::getObjCInterfaceDecl(IdentifierInfo *&Id,
+ SourceLocation IdLoc,
+ bool DoTypoCorrection) {
+ // The third "scope" argument is 0 since we aren't enabling lazy built-in
+ // creation from this context.
+ NamedDecl *IDecl = LookupSingleName(TUScope, Id, IdLoc, LookupOrdinaryName);
+
+ if (!IDecl && DoTypoCorrection) {
+ // Perform typo correction at the given location, but only if we
+ // find an Objective-C class name.
+ if (TypoCorrection C = CorrectTypo(
+ DeclarationNameInfo(Id, IdLoc), LookupOrdinaryName, TUScope, nullptr,
+ llvm::make_unique<DeclFilterCCC<ObjCInterfaceDecl>>(),
+ CTK_ErrorRecovery)) {
+ diagnoseTypo(C, PDiag(diag::err_undef_interface_suggest) << Id);
+ IDecl = C.getCorrectionDeclAs<ObjCInterfaceDecl>();
+ Id = IDecl->getIdentifier();
+ }
+ }
+ ObjCInterfaceDecl *Def = dyn_cast_or_null<ObjCInterfaceDecl>(IDecl);
+ // This routine must always return a class definition, if any.
+ if (Def && Def->getDefinition())
+ Def = Def->getDefinition();
+ return Def;
+}
+
+/// getNonFieldDeclScope - Retrieves the innermost scope, starting
+/// from S, where a non-field would be declared. This routine copes
+/// with the difference between C and C++ scoping rules in structs and
+/// unions. For example, the following code is well-formed in C but
+/// ill-formed in C++:
+/// @code
+/// struct S6 {
+/// enum { BAR } e;
+/// };
+///
+/// void test_S6() {
+/// struct S6 a;
+/// a.e = BAR;
+/// }
+/// @endcode
+/// For the declaration of BAR, this routine will return a different
+/// scope. The scope S will be the scope of the unnamed enumeration
+/// within S6. In C++, this routine will return the scope associated
+/// with S6, because the enumeration's scope is a transparent
+/// context but structures can contain non-field names. In C, this
+/// routine will return the translation unit scope, since the
+/// enumeration's scope is a transparent context and structures cannot
+/// contain non-field names.
+Scope *Sema::getNonFieldDeclScope(Scope *S) {
+ while (((S->getFlags() & Scope::DeclScope) == 0) ||
+ (S->getEntity() && S->getEntity()->isTransparentContext()) ||
+ (S->isClassScope() && !getLangOpts().CPlusPlus))
+ S = S->getParent();
+ return S;
+}
+
+/// \brief Looks up the declaration of "struct objc_super" and
+/// saves it for later use in building builtin declaration of
+/// objc_msgSendSuper and objc_msgSendSuper_stret. If no such
+/// pre-existing declaration exists no action takes place.
+static void LookupPredefedObjCSuperType(Sema &ThisSema, Scope *S,
+ IdentifierInfo *II) {
+ if (!II->isStr("objc_msgSendSuper"))
+ return;
+ ASTContext &Context = ThisSema.Context;
+
+ LookupResult Result(ThisSema, &Context.Idents.get("objc_super"),
+ SourceLocation(), Sema::LookupTagName);
+ ThisSema.LookupName(Result, S);
+ if (Result.getResultKind() == LookupResult::Found)
+ if (const TagDecl *TD = Result.getAsSingle<TagDecl>())
+ Context.setObjCSuperType(Context.getTagDeclType(TD));
+}
+
+static StringRef getHeaderName(ASTContext::GetBuiltinTypeError Error) {
+ switch (Error) {
+ case ASTContext::GE_None:
+ return "";
+ case ASTContext::GE_Missing_stdio:
+ return "stdio.h";
+ case ASTContext::GE_Missing_setjmp:
+ return "setjmp.h";
+ case ASTContext::GE_Missing_ucontext:
+ return "ucontext.h";
+ }
+ llvm_unreachable("unhandled error kind");
+}
+
+/// LazilyCreateBuiltin - The specified Builtin-ID was first used at
+/// file scope. lazily create a decl for it. ForRedeclaration is true
+/// if we're creating this built-in in anticipation of redeclaring the
+/// built-in.
+NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
+ Scope *S, bool ForRedeclaration,
+ SourceLocation Loc) {
+ LookupPredefedObjCSuperType(*this, S, II);
+
+ ASTContext::GetBuiltinTypeError Error;
+ QualType R = Context.GetBuiltinType(ID, Error);
+ if (Error) {
+ if (ForRedeclaration)
+ Diag(Loc, diag::warn_implicit_decl_requires_sysheader)
+ << getHeaderName(Error) << Context.BuiltinInfo.getName(ID);
+ return nullptr;
+ }
+
+ if (!ForRedeclaration && Context.BuiltinInfo.isPredefinedLibFunction(ID)) {
+ Diag(Loc, diag::ext_implicit_lib_function_decl)
+ << Context.BuiltinInfo.getName(ID) << R;
+ if (Context.BuiltinInfo.getHeaderName(ID) &&
+ !Diags.isIgnored(diag::ext_implicit_lib_function_decl, Loc))
+ Diag(Loc, diag::note_include_header_or_declare)
+ << Context.BuiltinInfo.getHeaderName(ID)
+ << Context.BuiltinInfo.getName(ID);
+ }
+
+ DeclContext *Parent = Context.getTranslationUnitDecl();
+ if (getLangOpts().CPlusPlus) {
+ LinkageSpecDecl *CLinkageDecl =
+ LinkageSpecDecl::Create(Context, Parent, Loc, Loc,
+ LinkageSpecDecl::lang_c, false);
+ CLinkageDecl->setImplicit();
+ Parent->addDecl(CLinkageDecl);
+ Parent = CLinkageDecl;
+ }
+
+ FunctionDecl *New = FunctionDecl::Create(Context,
+ Parent,
+ Loc, Loc, II, R, /*TInfo=*/nullptr,
+ SC_Extern,
+ false,
+ R->isFunctionProtoType());
+ New->setImplicit();
+
+ // Create Decl objects for each parameter, adding them to the
+ // FunctionDecl.
+ if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(R)) {
+ SmallVector<ParmVarDecl*, 16> Params;
+ for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
+ ParmVarDecl *parm =
+ ParmVarDecl::Create(Context, New, SourceLocation(), SourceLocation(),
+ nullptr, FT->getParamType(i), /*TInfo=*/nullptr,
+ SC_None, nullptr);
+ parm->setScopeInfo(0, i);
+ Params.push_back(parm);
+ }
+ New->setParams(Params);
+ }
+
+ AddKnownFunctionAttributes(New);
+ RegisterLocallyScopedExternCDecl(New, S);
+
+ // TUScope is the translation-unit scope to insert this function into.
+ // FIXME: This is hideous. We need to teach PushOnScopeChains to
+ // relate Scopes to DeclContexts, and probably eliminate CurContext
+ // entirely, but we're not there yet.
+ DeclContext *SavedContext = CurContext;
+ CurContext = Parent;
+ PushOnScopeChains(New, TUScope);
+ CurContext = SavedContext;
+ return New;
+}
+
+/// Typedef declarations don't have linkage, but they still denote the same
+/// entity if their types are the same.
+/// FIXME: This is notionally doing the same thing as ASTReaderDecl's
+/// isSameEntity.
+static void filterNonConflictingPreviousTypedefDecls(Sema &S,
+ TypedefNameDecl *Decl,
+ LookupResult &Previous) {
+ // This is only interesting when modules are enabled.
+ if (!S.getLangOpts().Modules && !S.getLangOpts().ModulesLocalVisibility)
+ return;
+
+ // Empty sets are uninteresting.
+ if (Previous.empty())
+ return;
+
+ LookupResult::Filter Filter = Previous.makeFilter();
+ while (Filter.hasNext()) {
+ NamedDecl *Old = Filter.next();
+
+ // Non-hidden declarations are never ignored.
+ if (S.isVisible(Old))
+ continue;
+
+ // Declarations of the same entity are not ignored, even if they have
+ // different linkages.
+ if (auto *OldTD = dyn_cast<TypedefNameDecl>(Old)) {
+ if (S.Context.hasSameType(OldTD->getUnderlyingType(),
+ Decl->getUnderlyingType()))
+ continue;
+
+ // If both declarations give a tag declaration a typedef name for linkage
+ // purposes, then they declare the same entity.
+ if (S.getLangOpts().CPlusPlus &&
+ OldTD->getAnonDeclWithTypedefName(/*AnyRedecl*/true) &&
+ Decl->getAnonDeclWithTypedefName())
+ continue;
+ }
+
+ Filter.erase();
+ }
+
+ Filter.done();
+}
+
+bool Sema::isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New) {
+ QualType OldType;
+ if (TypedefNameDecl *OldTypedef = dyn_cast<TypedefNameDecl>(Old))
+ OldType = OldTypedef->getUnderlyingType();
+ else
+ OldType = Context.getTypeDeclType(Old);
+ QualType NewType = New->getUnderlyingType();
+
+ if (NewType->isVariablyModifiedType()) {
+ // Must not redefine a typedef with a variably-modified type.
+ int Kind = isa<TypeAliasDecl>(Old) ? 1 : 0;
+ Diag(New->getLocation(), diag::err_redefinition_variably_modified_typedef)
+ << Kind << NewType;
+ if (Old->getLocation().isValid())
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ New->setInvalidDecl();
+ return true;
+ }
+
+ if (OldType != NewType &&
+ !OldType->isDependentType() &&
+ !NewType->isDependentType() &&
+ !Context.hasSameType(OldType, NewType)) {
+ int Kind = isa<TypeAliasDecl>(Old) ? 1 : 0;
+ Diag(New->getLocation(), diag::err_redefinition_different_typedef)
+ << Kind << NewType << OldType;
+ if (Old->getLocation().isValid())
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ New->setInvalidDecl();
+ return true;
+ }
+ return false;
+}
+
+/// MergeTypedefNameDecl - We just parsed a typedef 'New' which has the
+/// same name and scope as a previous declaration 'Old'. Figure out
+/// how to resolve this situation, merging decls or emitting
+/// diagnostics as appropriate. If there was an error, set New to be invalid.
+///
+void Sema::MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
+ LookupResult &OldDecls) {
+ // If the new decl is known invalid already, don't bother doing any
+ // merging checks.
+ if (New->isInvalidDecl()) return;
+
+ // Allow multiple definitions for ObjC built-in typedefs.
+ // FIXME: Verify the underlying types are equivalent!
+ if (getLangOpts().ObjC1) {
+ const IdentifierInfo *TypeID = New->getIdentifier();
+ switch (TypeID->getLength()) {
+ default: break;
+ case 2:
+ {
+ if (!TypeID->isStr("id"))
+ break;
+ QualType T = New->getUnderlyingType();
+ if (!T->isPointerType())
+ break;
+ if (!T->isVoidPointerType()) {
+ QualType PT = T->getAs<PointerType>()->getPointeeType();
+ if (!PT->isStructureType())
+ break;
+ }
+ Context.setObjCIdRedefinitionType(T);
+ // Install the built-in type for 'id', ignoring the current definition.
+ New->setTypeForDecl(Context.getObjCIdType().getTypePtr());
+ return;
+ }
+ case 5:
+ if (!TypeID->isStr("Class"))
+ break;
+ Context.setObjCClassRedefinitionType(New->getUnderlyingType());
+ // Install the built-in type for 'Class', ignoring the current definition.
+ New->setTypeForDecl(Context.getObjCClassType().getTypePtr());
+ return;
+ case 3:
+ if (!TypeID->isStr("SEL"))
+ break;
+ Context.setObjCSelRedefinitionType(New->getUnderlyingType());
+ // Install the built-in type for 'SEL', ignoring the current definition.
+ New->setTypeForDecl(Context.getObjCSelType().getTypePtr());
+ return;
+ }
+ // Fall through - the typedef name was not a builtin type.
+ }
+
+ // Verify the old decl was also a type.
+ TypeDecl *Old = OldDecls.getAsSingle<TypeDecl>();
+ if (!Old) {
+ Diag(New->getLocation(), diag::err_redefinition_different_kind)
+ << New->getDeclName();
+
+ NamedDecl *OldD = OldDecls.getRepresentativeDecl();
+ if (OldD->getLocation().isValid())
+ Diag(OldD->getLocation(), diag::note_previous_definition);
+
+ return New->setInvalidDecl();
+ }
+
+ // If the old declaration is invalid, just give up here.
+ if (Old->isInvalidDecl())
+ return New->setInvalidDecl();
+
+ if (auto *OldTD = dyn_cast<TypedefNameDecl>(Old)) {
+ auto *OldTag = OldTD->getAnonDeclWithTypedefName(/*AnyRedecl*/true);
+ auto *NewTag = New->getAnonDeclWithTypedefName();
+ NamedDecl *Hidden = nullptr;
+ if (getLangOpts().CPlusPlus && OldTag && NewTag &&
+ OldTag->getCanonicalDecl() != NewTag->getCanonicalDecl() &&
+ !hasVisibleDefinition(OldTag, &Hidden)) {
+ // There is a definition of this tag, but it is not visible. Use it
+ // instead of our tag.
+ New->setTypeForDecl(OldTD->getTypeForDecl());
+ if (OldTD->isModed())
+ New->setModedTypeSourceInfo(OldTD->getTypeSourceInfo(),
+ OldTD->getUnderlyingType());
+ else
+ New->setTypeSourceInfo(OldTD->getTypeSourceInfo());
+
+ // Make the old tag definition visible.
+ makeMergedDefinitionVisible(Hidden, NewTag->getLocation());
+
+ // If this was an unscoped enumeration, yank all of its enumerators
+ // out of the scope.
+ if (isa<EnumDecl>(NewTag)) {
+ Scope *EnumScope = getNonFieldDeclScope(S);
+ for (auto *D : NewTag->decls()) {
+ auto *ED = cast<EnumConstantDecl>(D);
+ assert(EnumScope->isDeclScope(ED));
+ EnumScope->RemoveDecl(ED);
+ IdResolver.RemoveDecl(ED);
+ ED->getLexicalDeclContext()->removeDecl(ED);
+ }
+ }
+ }
+ }
+
+ // If the typedef types are not identical, reject them in all languages and
+ // with any extensions enabled.
+ if (isIncompatibleTypedef(Old, New))
+ return;
+
+ // The types match. Link up the redeclaration chain and merge attributes if
+ // the old declaration was a typedef.
+ if (TypedefNameDecl *Typedef = dyn_cast<TypedefNameDecl>(Old)) {
+ New->setPreviousDecl(Typedef);
+ mergeDeclAttributes(New, Old);
+ }
+
+ if (getLangOpts().MicrosoftExt)
+ return;
+
+ if (getLangOpts().CPlusPlus) {
+ // C++ [dcl.typedef]p2:
+ // In a given non-class scope, a typedef specifier can be used to
+ // redefine the name of any type declared in that scope to refer
+ // to the type to which it already refers.
+ if (!isa<CXXRecordDecl>(CurContext))
+ return;
+
+ // C++0x [dcl.typedef]p4:
+ // In a given class scope, a typedef specifier can be used to redefine
+ // any class-name declared in that scope that is not also a typedef-name
+ // to refer to the type to which it already refers.
+ //
+ // This wording came in via DR424, which was a correction to the
+ // wording in DR56, which accidentally banned code like:
+ //
+ // struct S {
+ // typedef struct A { } A;
+ // };
+ //
+ // in the C++03 standard. We implement the C++0x semantics, which
+ // allow the above but disallow
+ //
+ // struct S {
+ // typedef int I;
+ // typedef int I;
+ // };
+ //
+ // since that was the intent of DR56.
+ if (!isa<TypedefNameDecl>(Old))
+ return;
+
+ Diag(New->getLocation(), diag::err_redefinition)
+ << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ return New->setInvalidDecl();
+ }
+
+ // Modules always permit redefinition of typedefs, as does C11.
+ if (getLangOpts().Modules || getLangOpts().C11)
+ return;
+
+ // If we have a redefinition of a typedef in C, emit a warning. This warning
+ // is normally mapped to an error, but can be controlled with
+ // -Wtypedef-redefinition. If either the original or the redefinition is
+ // in a system header, don't emit this for compatibility with GCC.
+ if (getDiagnostics().getSuppressSystemWarnings() &&
+ (Context.getSourceManager().isInSystemHeader(Old->getLocation()) ||
+ Context.getSourceManager().isInSystemHeader(New->getLocation())))
+ return;
+
+ Diag(New->getLocation(), diag::ext_redefinition_of_typedef)
+ << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+}
+
+/// DeclhasAttr - returns true if decl Declaration already has the target
+/// attribute.
+static bool DeclHasAttr(const Decl *D, const Attr *A) {
+ const OwnershipAttr *OA = dyn_cast<OwnershipAttr>(A);
+ const AnnotateAttr *Ann = dyn_cast<AnnotateAttr>(A);
+ for (const auto *i : D->attrs())
+ if (i->getKind() == A->getKind()) {
+ if (Ann) {
+ if (Ann->getAnnotation() == cast<AnnotateAttr>(i)->getAnnotation())
+ return true;
+ continue;
+ }
+ // FIXME: Don't hardcode this check
+ if (OA && isa<OwnershipAttr>(i))
+ return OA->getOwnKind() == cast<OwnershipAttr>(i)->getOwnKind();
+ return true;
+ }
+
+ return false;
+}
+
+static bool isAttributeTargetADefinition(Decl *D) {
+ if (VarDecl *VD = dyn_cast<VarDecl>(D))
+ return VD->isThisDeclarationADefinition();
+ if (TagDecl *TD = dyn_cast<TagDecl>(D))
+ return TD->isCompleteDefinition() || TD->isBeingDefined();
+ return true;
+}
+
+/// Merge alignment attributes from \p Old to \p New, taking into account the
+/// special semantics of C11's _Alignas specifier and C++11's alignas attribute.
+///
+/// \return \c true if any attributes were added to \p New.
+static bool mergeAlignedAttrs(Sema &S, NamedDecl *New, Decl *Old) {
+ // Look for alignas attributes on Old, and pick out whichever attribute
+ // specifies the strictest alignment requirement.
+ AlignedAttr *OldAlignasAttr = nullptr;
+ AlignedAttr *OldStrictestAlignAttr = nullptr;
+ unsigned OldAlign = 0;
+ for (auto *I : Old->specific_attrs<AlignedAttr>()) {
+ // FIXME: We have no way of representing inherited dependent alignments
+ // in a case like:
+ // template<int A, int B> struct alignas(A) X;
+ // template<int A, int B> struct alignas(B) X {};
+ // For now, we just ignore any alignas attributes which are not on the
+ // definition in such a case.
+ if (I->isAlignmentDependent())
+ return false;
+
+ if (I->isAlignas())
+ OldAlignasAttr = I;
+
+ unsigned Align = I->getAlignment(S.Context);
+ if (Align > OldAlign) {
+ OldAlign = Align;
+ OldStrictestAlignAttr = I;
+ }
+ }
+
+ // Look for alignas attributes on New.
+ AlignedAttr *NewAlignasAttr = nullptr;
+ unsigned NewAlign = 0;
+ for (auto *I : New->specific_attrs<AlignedAttr>()) {
+ if (I->isAlignmentDependent())
+ return false;
+
+ if (I->isAlignas())
+ NewAlignasAttr = I;
+
+ unsigned Align = I->getAlignment(S.Context);
+ if (Align > NewAlign)
+ NewAlign = Align;
+ }
+
+ if (OldAlignasAttr && NewAlignasAttr && OldAlign != NewAlign) {
+ // Both declarations have 'alignas' attributes. We require them to match.
+ // C++11 [dcl.align]p6 and C11 6.7.5/7 both come close to saying this, but
+ // fall short. (If two declarations both have alignas, they must both match
+ // every definition, and so must match each other if there is a definition.)
+
+ // If either declaration only contains 'alignas(0)' specifiers, then it
+ // specifies the natural alignment for the type.
+ if (OldAlign == 0 || NewAlign == 0) {
+ QualType Ty;
+ if (ValueDecl *VD = dyn_cast<ValueDecl>(New))
+ Ty = VD->getType();
+ else
+ Ty = S.Context.getTagDeclType(cast<TagDecl>(New));
+
+ if (OldAlign == 0)
+ OldAlign = S.Context.getTypeAlign(Ty);
+ if (NewAlign == 0)
+ NewAlign = S.Context.getTypeAlign(Ty);
+ }
+
+ if (OldAlign != NewAlign) {
+ S.Diag(NewAlignasAttr->getLocation(), diag::err_alignas_mismatch)
+ << (unsigned)S.Context.toCharUnitsFromBits(OldAlign).getQuantity()
+ << (unsigned)S.Context.toCharUnitsFromBits(NewAlign).getQuantity();
+ S.Diag(OldAlignasAttr->getLocation(), diag::note_previous_declaration);
+ }
+ }
+
+ if (OldAlignasAttr && !NewAlignasAttr && isAttributeTargetADefinition(New)) {
+ // C++11 [dcl.align]p6:
+ // if any declaration of an entity has an alignment-specifier,
+ // every defining declaration of that entity shall specify an
+ // equivalent alignment.
+ // C11 6.7.5/7:
+ // If the definition of an object does not have an alignment
+ // specifier, any other declaration of that object shall also
+ // have no alignment specifier.
+ S.Diag(New->getLocation(), diag::err_alignas_missing_on_definition)
+ << OldAlignasAttr;
+ S.Diag(OldAlignasAttr->getLocation(), diag::note_alignas_on_declaration)
+ << OldAlignasAttr;
+ }
+
+ bool AnyAdded = false;
+
+ // Ensure we have an attribute representing the strictest alignment.
+ if (OldAlign > NewAlign) {
+ AlignedAttr *Clone = OldStrictestAlignAttr->clone(S.Context);
+ Clone->setInherited(true);
+ New->addAttr(Clone);
+ AnyAdded = true;
+ }
+
+ // Ensure we have an alignas attribute if the old declaration had one.
+ if (OldAlignasAttr && !NewAlignasAttr &&
+ !(AnyAdded && OldStrictestAlignAttr->isAlignas())) {
+ AlignedAttr *Clone = OldAlignasAttr->clone(S.Context);
+ Clone->setInherited(true);
+ New->addAttr(Clone);
+ AnyAdded = true;
+ }
+
+ return AnyAdded;
+}
+
+static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
+ const InheritableAttr *Attr,
+ Sema::AvailabilityMergeKind AMK) {
+ InheritableAttr *NewAttr = nullptr;
+ unsigned AttrSpellingListIndex = Attr->getSpellingListIndex();
+ if (const auto *AA = dyn_cast<AvailabilityAttr>(Attr))
+ NewAttr = S.mergeAvailabilityAttr(D, AA->getRange(), AA->getPlatform(),
+ AA->getIntroduced(), AA->getDeprecated(),
+ AA->getObsoleted(), AA->getUnavailable(),
+ AA->getMessage(), AMK,
+ AttrSpellingListIndex);
+ else if (const auto *VA = dyn_cast<VisibilityAttr>(Attr))
+ NewAttr = S.mergeVisibilityAttr(D, VA->getRange(), VA->getVisibility(),
+ AttrSpellingListIndex);
+ else if (const auto *VA = dyn_cast<TypeVisibilityAttr>(Attr))
+ NewAttr = S.mergeTypeVisibilityAttr(D, VA->getRange(), VA->getVisibility(),
+ AttrSpellingListIndex);
+ else if (const auto *ImportA = dyn_cast<DLLImportAttr>(Attr))
+ NewAttr = S.mergeDLLImportAttr(D, ImportA->getRange(),
+ AttrSpellingListIndex);
+ else if (const auto *ExportA = dyn_cast<DLLExportAttr>(Attr))
+ NewAttr = S.mergeDLLExportAttr(D, ExportA->getRange(),
+ AttrSpellingListIndex);
+ else if (const auto *FA = dyn_cast<FormatAttr>(Attr))
+ NewAttr = S.mergeFormatAttr(D, FA->getRange(), FA->getType(),
+ FA->getFormatIdx(), FA->getFirstArg(),
+ AttrSpellingListIndex);
+ else if (const auto *SA = dyn_cast<SectionAttr>(Attr))
+ NewAttr = S.mergeSectionAttr(D, SA->getRange(), SA->getName(),
+ AttrSpellingListIndex);
+ else if (const auto *IA = dyn_cast<MSInheritanceAttr>(Attr))
+ NewAttr = S.mergeMSInheritanceAttr(D, IA->getRange(), IA->getBestCase(),
+ AttrSpellingListIndex,
+ IA->getSemanticSpelling());
+ else if (const auto *AA = dyn_cast<AlwaysInlineAttr>(Attr))
+ NewAttr = S.mergeAlwaysInlineAttr(D, AA->getRange(),
+ &S.Context.Idents.get(AA->getSpelling()),
+ AttrSpellingListIndex);
+ else if (const auto *MA = dyn_cast<MinSizeAttr>(Attr))
+ NewAttr = S.mergeMinSizeAttr(D, MA->getRange(), AttrSpellingListIndex);
+ else if (const auto *OA = dyn_cast<OptimizeNoneAttr>(Attr))
+ NewAttr = S.mergeOptimizeNoneAttr(D, OA->getRange(), AttrSpellingListIndex);
+ else if (const auto *InternalLinkageA = dyn_cast<InternalLinkageAttr>(Attr))
+ NewAttr = S.mergeInternalLinkageAttr(
+ D, InternalLinkageA->getRange(),
+ &S.Context.Idents.get(InternalLinkageA->getSpelling()),
+ AttrSpellingListIndex);
+ else if (const auto *CommonA = dyn_cast<CommonAttr>(Attr))
+ NewAttr = S.mergeCommonAttr(D, CommonA->getRange(),
+ &S.Context.Idents.get(CommonA->getSpelling()),
+ AttrSpellingListIndex);
+ else if (isa<AlignedAttr>(Attr))
+ // AlignedAttrs are handled separately, because we need to handle all
+ // such attributes on a declaration at the same time.
+ NewAttr = nullptr;
+ else if ((isa<DeprecatedAttr>(Attr) || isa<UnavailableAttr>(Attr)) &&
+ (AMK == Sema::AMK_Override ||
+ AMK == Sema::AMK_ProtocolImplementation))
+ NewAttr = nullptr;
+ else if (Attr->duplicatesAllowed() || !DeclHasAttr(D, Attr))
+ NewAttr = cast<InheritableAttr>(Attr->clone(S.Context));
+
+ if (NewAttr) {
+ NewAttr->setInherited(true);
+ D->addAttr(NewAttr);
+ return true;
+ }
+
+ return false;
+}
+
+static const Decl *getDefinition(const Decl *D) {
+ if (const TagDecl *TD = dyn_cast<TagDecl>(D))
+ return TD->getDefinition();
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ const VarDecl *Def = VD->getDefinition();
+ if (Def)
+ return Def;
+ return VD->getActingDefinition();
+ }
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ const FunctionDecl* Def;
+ if (FD->isDefined(Def))
+ return Def;
+ }
+ return nullptr;
+}
+
+static bool hasAttribute(const Decl *D, attr::Kind Kind) {
+ for (const auto *Attribute : D->attrs())
+ if (Attribute->getKind() == Kind)
+ return true;
+ return false;
+}
+
+/// checkNewAttributesAfterDef - If we already have a definition, check that
+/// there are no new attributes in this declaration.
+static void checkNewAttributesAfterDef(Sema &S, Decl *New, const Decl *Old) {
+ if (!New->hasAttrs())
+ return;
+
+ const Decl *Def = getDefinition(Old);
+ if (!Def || Def == New)
+ return;
+
+ AttrVec &NewAttributes = New->getAttrs();
+ for (unsigned I = 0, E = NewAttributes.size(); I != E;) {
+ const Attr *NewAttribute = NewAttributes[I];
+
+ if (isa<AliasAttr>(NewAttribute)) {
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(New)) {
+ Sema::SkipBodyInfo SkipBody;
+ S.CheckForFunctionRedefinition(FD, cast<FunctionDecl>(Def), &SkipBody);
+
+ // If we're skipping this definition, drop the "alias" attribute.
+ if (SkipBody.ShouldSkip) {
+ NewAttributes.erase(NewAttributes.begin() + I);
+ --E;
+ continue;
+ }
+ } else {
+ VarDecl *VD = cast<VarDecl>(New);
+ unsigned Diag = cast<VarDecl>(Def)->isThisDeclarationADefinition() ==
+ VarDecl::TentativeDefinition
+ ? diag::err_alias_after_tentative
+ : diag::err_redefinition;
+ S.Diag(VD->getLocation(), Diag) << VD->getDeclName();
+ S.Diag(Def->getLocation(), diag::note_previous_definition);
+ VD->setInvalidDecl();
+ }
+ ++I;
+ continue;
+ }
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(Def)) {
+ // Tentative definitions are only interesting for the alias check above.
+ if (VD->isThisDeclarationADefinition() != VarDecl::Definition) {
+ ++I;
+ continue;
+ }
+ }
+
+ if (hasAttribute(Def, NewAttribute->getKind())) {
+ ++I;
+ continue; // regular attr merging will take care of validating this.
+ }
+
+ if (isa<C11NoReturnAttr>(NewAttribute)) {
+ // C's _Noreturn is allowed to be added to a function after it is defined.
+ ++I;
+ continue;
+ } else if (const AlignedAttr *AA = dyn_cast<AlignedAttr>(NewAttribute)) {
+ if (AA->isAlignas()) {
+ // C++11 [dcl.align]p6:
+ // if any declaration of an entity has an alignment-specifier,
+ // every defining declaration of that entity shall specify an
+ // equivalent alignment.
+ // C11 6.7.5/7:
+ // If the definition of an object does not have an alignment
+ // specifier, any other declaration of that object shall also
+ // have no alignment specifier.
+ S.Diag(Def->getLocation(), diag::err_alignas_missing_on_definition)
+ << AA;
+ S.Diag(NewAttribute->getLocation(), diag::note_alignas_on_declaration)
+ << AA;
+ NewAttributes.erase(NewAttributes.begin() + I);
+ --E;
+ continue;
+ }
+ }
+
+ S.Diag(NewAttribute->getLocation(),
+ diag::warn_attribute_precede_definition);
+ S.Diag(Def->getLocation(), diag::note_previous_definition);
+ NewAttributes.erase(NewAttributes.begin() + I);
+ --E;
+ }
+}
+
+/// mergeDeclAttributes - Copy attributes from the Old decl to the New one.
+void Sema::mergeDeclAttributes(NamedDecl *New, Decl *Old,
+ AvailabilityMergeKind AMK) {
+ if (UsedAttr *OldAttr = Old->getMostRecentDecl()->getAttr<UsedAttr>()) {
+ UsedAttr *NewAttr = OldAttr->clone(Context);
+ NewAttr->setInherited(true);
+ New->addAttr(NewAttr);
+ }
+
+ if (!Old->hasAttrs() && !New->hasAttrs())
+ return;
+
+ // Attributes declared post-definition are currently ignored.
+ checkNewAttributesAfterDef(*this, New, Old);
+
+ if (AsmLabelAttr *NewA = New->getAttr<AsmLabelAttr>()) {
+ if (AsmLabelAttr *OldA = Old->getAttr<AsmLabelAttr>()) {
+ if (OldA->getLabel() != NewA->getLabel()) {
+ // This redeclaration changes __asm__ label.
+ Diag(New->getLocation(), diag::err_different_asm_label);
+ Diag(OldA->getLocation(), diag::note_previous_declaration);
+ }
+ } else if (Old->isUsed()) {
+ // This redeclaration adds an __asm__ label to a declaration that has
+ // already been ODR-used.
+ Diag(New->getLocation(), diag::err_late_asm_label_name)
+ << isa<FunctionDecl>(Old) << New->getAttr<AsmLabelAttr>()->getRange();
+ }
+ }
+
+ if (!Old->hasAttrs())
+ return;
+
+ bool foundAny = New->hasAttrs();
+
+ // Ensure that any moving of objects within the allocated map is done before
+ // we process them.
+ if (!foundAny) New->setAttrs(AttrVec());
+
+ for (auto *I : Old->specific_attrs<InheritableAttr>()) {
+ // Ignore deprecated/unavailable/availability attributes if requested.
+ AvailabilityMergeKind LocalAMK = AMK_None;
+ if (isa<DeprecatedAttr>(I) ||
+ isa<UnavailableAttr>(I) ||
+ isa<AvailabilityAttr>(I)) {
+ switch (AMK) {
+ case AMK_None:
+ continue;
+
+ case AMK_Redeclaration:
+ case AMK_Override:
+ case AMK_ProtocolImplementation:
+ LocalAMK = AMK;
+ break;
+ }
+ }
+
+ // Already handled.
+ if (isa<UsedAttr>(I))
+ continue;
+
+ if (mergeDeclAttribute(*this, New, I, LocalAMK))
+ foundAny = true;
+ }
+
+ if (mergeAlignedAttrs(*this, New, Old))
+ foundAny = true;
+
+ if (!foundAny) New->dropAttrs();
+}
+
+/// mergeParamDeclAttributes - Copy attributes from the old parameter
+/// to the new one.
+static void mergeParamDeclAttributes(ParmVarDecl *newDecl,
+ const ParmVarDecl *oldDecl,
+ Sema &S) {
+ // C++11 [dcl.attr.depend]p2:
+ // The first declaration of a function shall specify the
+ // carries_dependency attribute for its declarator-id if any declaration
+ // of the function specifies the carries_dependency attribute.
+ const CarriesDependencyAttr *CDA = newDecl->getAttr<CarriesDependencyAttr>();
+ if (CDA && !oldDecl->hasAttr<CarriesDependencyAttr>()) {
+ S.Diag(CDA->getLocation(),
+ diag::err_carries_dependency_missing_on_first_decl) << 1/*Param*/;
+ // Find the first declaration of the parameter.
+ // FIXME: Should we build redeclaration chains for function parameters?
+ const FunctionDecl *FirstFD =
+ cast<FunctionDecl>(oldDecl->getDeclContext())->getFirstDecl();
+ const ParmVarDecl *FirstVD =
+ FirstFD->getParamDecl(oldDecl->getFunctionScopeIndex());
+ S.Diag(FirstVD->getLocation(),
+ diag::note_carries_dependency_missing_first_decl) << 1/*Param*/;
+ }
+
+ if (!oldDecl->hasAttrs())
+ return;
+
+ bool foundAny = newDecl->hasAttrs();
+
+ // Ensure that any moving of objects within the allocated map is
+ // done before we process them.
+ if (!foundAny) newDecl->setAttrs(AttrVec());
+
+ for (const auto *I : oldDecl->specific_attrs<InheritableParamAttr>()) {
+ if (!DeclHasAttr(newDecl, I)) {
+ InheritableAttr *newAttr =
+ cast<InheritableParamAttr>(I->clone(S.Context));
+ newAttr->setInherited(true);
+ newDecl->addAttr(newAttr);
+ foundAny = true;
+ }
+ }
+
+ if (!foundAny) newDecl->dropAttrs();
+}
+
+static void mergeParamDeclTypes(ParmVarDecl *NewParam,
+ const ParmVarDecl *OldParam,
+ Sema &S) {
+ if (auto Oldnullability = OldParam->getType()->getNullability(S.Context)) {
+ if (auto Newnullability = NewParam->getType()->getNullability(S.Context)) {
+ if (*Oldnullability != *Newnullability) {
+ S.Diag(NewParam->getLocation(), diag::warn_mismatched_nullability_attr)
+ << DiagNullabilityKind(
+ *Newnullability,
+ ((NewParam->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability)
+ != 0))
+ << DiagNullabilityKind(
+ *Oldnullability,
+ ((OldParam->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability)
+ != 0));
+ S.Diag(OldParam->getLocation(), diag::note_previous_declaration);
+ }
+ } else {
+ QualType NewT = NewParam->getType();
+ NewT = S.Context.getAttributedType(
+ AttributedType::getNullabilityAttrKind(*Oldnullability),
+ NewT, NewT);
+ NewParam->setType(NewT);
+ }
+ }
+}
+
+namespace {
+
+/// Used in MergeFunctionDecl to keep track of function parameters in
+/// C.
+struct GNUCompatibleParamWarning {
+ ParmVarDecl *OldParm;
+ ParmVarDecl *NewParm;
+ QualType PromotedType;
+};
+
+}
+
+/// getSpecialMember - get the special member enum for a method.
+Sema::CXXSpecialMember Sema::getSpecialMember(const CXXMethodDecl *MD) {
+ if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
+ if (Ctor->isDefaultConstructor())
+ return Sema::CXXDefaultConstructor;
+
+ if (Ctor->isCopyConstructor())
+ return Sema::CXXCopyConstructor;
+
+ if (Ctor->isMoveConstructor())
+ return Sema::CXXMoveConstructor;
+ } else if (isa<CXXDestructorDecl>(MD)) {
+ return Sema::CXXDestructor;
+ } else if (MD->isCopyAssignmentOperator()) {
+ return Sema::CXXCopyAssignment;
+ } else if (MD->isMoveAssignmentOperator()) {
+ return Sema::CXXMoveAssignment;
+ }
+
+ return Sema::CXXInvalid;
+}
+
+// Determine whether the previous declaration was a definition, implicit
+// declaration, or a declaration.
+template <typename T>
+static std::pair<diag::kind, SourceLocation>
+getNoteDiagForInvalidRedeclaration(const T *Old, const T *New) {
+ diag::kind PrevDiag;
+ SourceLocation OldLocation = Old->getLocation();
+ if (Old->isThisDeclarationADefinition())
+ PrevDiag = diag::note_previous_definition;
+ else if (Old->isImplicit()) {
+ PrevDiag = diag::note_previous_implicit_declaration;
+ if (OldLocation.isInvalid())
+ OldLocation = New->getLocation();
+ } else
+ PrevDiag = diag::note_previous_declaration;
+ return std::make_pair(PrevDiag, OldLocation);
+}
+
+/// canRedefineFunction - checks if a function can be redefined. Currently,
+/// only extern inline functions can be redefined, and even then only in
+/// GNU89 mode.
+static bool canRedefineFunction(const FunctionDecl *FD,
+ const LangOptions& LangOpts) {
+ return ((FD->hasAttr<GNUInlineAttr>() || LangOpts.GNUInline) &&
+ !LangOpts.CPlusPlus &&
+ FD->isInlineSpecified() &&
+ FD->getStorageClass() == SC_Extern);
+}
+
+const AttributedType *Sema::getCallingConvAttributedType(QualType T) const {
+ const AttributedType *AT = T->getAs<AttributedType>();
+ while (AT && !AT->isCallingConv())
+ AT = AT->getModifiedType()->getAs<AttributedType>();
+ return AT;
+}
+
+template <typename T>
+static bool haveIncompatibleLanguageLinkages(const T *Old, const T *New) {
+ const DeclContext *DC = Old->getDeclContext();
+ if (DC->isRecord())
+ return false;
+
+ LanguageLinkage OldLinkage = Old->getLanguageLinkage();
+ if (OldLinkage == CXXLanguageLinkage && New->isInExternCContext())
+ return true;
+ if (OldLinkage == CLanguageLinkage && New->isInExternCXXContext())
+ return true;
+ return false;
+}
+
+template<typename T> static bool isExternC(T *D) { return D->isExternC(); }
+static bool isExternC(VarTemplateDecl *) { return false; }
+
+/// \brief Check whether a redeclaration of an entity introduced by a
+/// using-declaration is valid, given that we know it's not an overload
+/// (nor a hidden tag declaration).
+template<typename ExpectedDecl>
+static bool checkUsingShadowRedecl(Sema &S, UsingShadowDecl *OldS,
+ ExpectedDecl *New) {
+ // C++11 [basic.scope.declarative]p4:
+ // Given a set of declarations in a single declarative region, each of
+ // which specifies the same unqualified name,
+ // -- they shall all refer to the same entity, or all refer to functions
+ // and function templates; or
+ // -- exactly one declaration shall declare a class name or enumeration
+ // name that is not a typedef name and the other declarations shall all
+ // refer to the same variable or enumerator, or all refer to functions
+ // and function templates; in this case the class name or enumeration
+ // name is hidden (3.3.10).
+
+ // C++11 [namespace.udecl]p14:
+ // If a function declaration in namespace scope or block scope has the
+ // same name and the same parameter-type-list as a function introduced
+ // by a using-declaration, and the declarations do not declare the same
+ // function, the program is ill-formed.
+
+ auto *Old = dyn_cast<ExpectedDecl>(OldS->getTargetDecl());
+ if (Old &&
+ !Old->getDeclContext()->getRedeclContext()->Equals(
+ New->getDeclContext()->getRedeclContext()) &&
+ !(isExternC(Old) && isExternC(New)))
+ Old = nullptr;
+
+ if (!Old) {
+ S.Diag(New->getLocation(), diag::err_using_decl_conflict_reverse);
+ S.Diag(OldS->getTargetDecl()->getLocation(), diag::note_using_decl_target);
+ S.Diag(OldS->getUsingDecl()->getLocation(), diag::note_using_decl) << 0;
+ return true;
+ }
+ return false;
+}
+
+static bool hasIdenticalPassObjectSizeAttrs(const FunctionDecl *A,
+ const FunctionDecl *B) {
+ assert(A->getNumParams() == B->getNumParams());
+
+ auto AttrEq = [](const ParmVarDecl *A, const ParmVarDecl *B) {
+ const auto *AttrA = A->getAttr<PassObjectSizeAttr>();
+ const auto *AttrB = B->getAttr<PassObjectSizeAttr>();
+ if (AttrA == AttrB)
+ return true;
+ return AttrA && AttrB && AttrA->getType() == AttrB->getType();
+ };
+
+ return std::equal(A->param_begin(), A->param_end(), B->param_begin(), AttrEq);
+}
+
+/// MergeFunctionDecl - We just parsed a function 'New' from
+/// declarator D which has the same name and scope as a previous
+/// declaration 'Old'. Figure out how to resolve this situation,
+/// merging decls or emitting diagnostics as appropriate.
+///
+/// In C++, New and Old must be declarations that are not
+/// overloaded. Use IsOverload to determine whether New and Old are
+/// overloaded, and to select the Old declaration that New should be
+/// merged with.
+///
+/// Returns true if there was an error, false otherwise.
+bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
+ Scope *S, bool MergeTypeWithOld) {
+ // Verify the old decl was also a function.
+ FunctionDecl *Old = OldD->getAsFunction();
+ if (!Old) {
+ if (UsingShadowDecl *Shadow = dyn_cast<UsingShadowDecl>(OldD)) {
+ if (New->getFriendObjectKind()) {
+ Diag(New->getLocation(), diag::err_using_decl_friend);
+ Diag(Shadow->getTargetDecl()->getLocation(),
+ diag::note_using_decl_target);
+ Diag(Shadow->getUsingDecl()->getLocation(),
+ diag::note_using_decl) << 0;
+ return true;
+ }
+
+ // Check whether the two declarations might declare the same function.
+ if (checkUsingShadowRedecl<FunctionDecl>(*this, Shadow, New))
+ return true;
+ OldD = Old = cast<FunctionDecl>(Shadow->getTargetDecl());
+ } else {
+ Diag(New->getLocation(), diag::err_redefinition_different_kind)
+ << New->getDeclName();
+ Diag(OldD->getLocation(), diag::note_previous_definition);
+ return true;
+ }
+ }
+
+ // If the old declaration is invalid, just give up here.
+ if (Old->isInvalidDecl())
+ return true;
+
+ diag::kind PrevDiag;
+ SourceLocation OldLocation;
+ std::tie(PrevDiag, OldLocation) =
+ getNoteDiagForInvalidRedeclaration(Old, New);
+
+ // Don't complain about this if we're in GNU89 mode and the old function
+ // is an extern inline function.
+ // Don't complain about specializations. They are not supposed to have
+ // storage classes.
+ if (!isa<CXXMethodDecl>(New) && !isa<CXXMethodDecl>(Old) &&
+ New->getStorageClass() == SC_Static &&
+ Old->hasExternalFormalLinkage() &&
+ !New->getTemplateSpecializationInfo() &&
+ !canRedefineFunction(Old, getLangOpts())) {
+ if (getLangOpts().MicrosoftExt) {
+ Diag(New->getLocation(), diag::ext_static_non_static) << New;
+ Diag(OldLocation, PrevDiag);
+ } else {
+ Diag(New->getLocation(), diag::err_static_non_static) << New;
+ Diag(OldLocation, PrevDiag);
+ return true;
+ }
+ }
+
+ if (New->hasAttr<InternalLinkageAttr>() &&
+ !Old->hasAttr<InternalLinkageAttr>()) {
+ Diag(New->getLocation(), diag::err_internal_linkage_redeclaration)
+ << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ New->dropAttr<InternalLinkageAttr>();
+ }
+
+ // If a function is first declared with a calling convention, but is later
+ // declared or defined without one, all following decls assume the calling
+ // convention of the first.
+ //
+ // It's OK if a function is first declared without a calling convention,
+ // but is later declared or defined with the default calling convention.
+ //
+ // To test if either decl has an explicit calling convention, we look for
+ // AttributedType sugar nodes on the type as written. If they are missing or
+ // were canonicalized away, we assume the calling convention was implicit.
+ //
+ // Note also that we DO NOT return at this point, because we still have
+ // other tests to run.
+ QualType OldQType = Context.getCanonicalType(Old->getType());
+ QualType NewQType = Context.getCanonicalType(New->getType());
+ const FunctionType *OldType = cast<FunctionType>(OldQType);
+ const FunctionType *NewType = cast<FunctionType>(NewQType);
+ FunctionType::ExtInfo OldTypeInfo = OldType->getExtInfo();
+ FunctionType::ExtInfo NewTypeInfo = NewType->getExtInfo();
+ bool RequiresAdjustment = false;
+
+ if (OldTypeInfo.getCC() != NewTypeInfo.getCC()) {
+ FunctionDecl *First = Old->getFirstDecl();
+ const FunctionType *FT =
+ First->getType().getCanonicalType()->castAs<FunctionType>();
+ FunctionType::ExtInfo FI = FT->getExtInfo();
+ bool NewCCExplicit = getCallingConvAttributedType(New->getType());
+ if (!NewCCExplicit) {
+ // Inherit the CC from the previous declaration if it was specified
+ // there but not here.
+ NewTypeInfo = NewTypeInfo.withCallingConv(OldTypeInfo.getCC());
+ RequiresAdjustment = true;
+ } else {
+ // Calling conventions aren't compatible, so complain.
+ bool FirstCCExplicit = getCallingConvAttributedType(First->getType());
+ Diag(New->getLocation(), diag::err_cconv_change)
+ << FunctionType::getNameForCallConv(NewTypeInfo.getCC())
+ << !FirstCCExplicit
+ << (!FirstCCExplicit ? "" :
+ FunctionType::getNameForCallConv(FI.getCC()));
+
+ // Put the note on the first decl, since it is the one that matters.
+ Diag(First->getLocation(), diag::note_previous_declaration);
+ return true;
+ }
+ }
+
+ // FIXME: diagnose the other way around?
+ if (OldTypeInfo.getNoReturn() && !NewTypeInfo.getNoReturn()) {
+ NewTypeInfo = NewTypeInfo.withNoReturn(true);
+ RequiresAdjustment = true;
+ }
+
+ // Merge regparm attribute.
+ if (OldTypeInfo.getHasRegParm() != NewTypeInfo.getHasRegParm() ||
+ OldTypeInfo.getRegParm() != NewTypeInfo.getRegParm()) {
+ if (NewTypeInfo.getHasRegParm()) {
+ Diag(New->getLocation(), diag::err_regparm_mismatch)
+ << NewType->getRegParmType()
+ << OldType->getRegParmType();
+ Diag(OldLocation, diag::note_previous_declaration);
+ return true;
+ }
+
+ NewTypeInfo = NewTypeInfo.withRegParm(OldTypeInfo.getRegParm());
+ RequiresAdjustment = true;
+ }
+
+ // Merge ns_returns_retained attribute.
+ if (OldTypeInfo.getProducesResult() != NewTypeInfo.getProducesResult()) {
+ if (NewTypeInfo.getProducesResult()) {
+ Diag(New->getLocation(), diag::err_returns_retained_mismatch);
+ Diag(OldLocation, diag::note_previous_declaration);
+ return true;
+ }
+
+ NewTypeInfo = NewTypeInfo.withProducesResult(true);
+ RequiresAdjustment = true;
+ }
+
+ if (RequiresAdjustment) {
+ const FunctionType *AdjustedType = New->getType()->getAs<FunctionType>();
+ AdjustedType = Context.adjustFunctionType(AdjustedType, NewTypeInfo);
+ New->setType(QualType(AdjustedType, 0));
+ NewQType = Context.getCanonicalType(New->getType());
+ NewType = cast<FunctionType>(NewQType);
+ }
+
+ // If this redeclaration makes the function inline, we may need to add it to
+ // UndefinedButUsed.
+ if (!Old->isInlined() && New->isInlined() &&
+ !New->hasAttr<GNUInlineAttr>() &&
+ !getLangOpts().GNUInline &&
+ Old->isUsed(false) &&
+ !Old->isDefined() && !New->isThisDeclarationADefinition())
+ UndefinedButUsed.insert(std::make_pair(Old->getCanonicalDecl(),
+ SourceLocation()));
+
+ // If this redeclaration makes it newly gnu_inline, we don't want to warn
+ // about it.
+ if (New->hasAttr<GNUInlineAttr>() &&
+ Old->isInlined() && !Old->hasAttr<GNUInlineAttr>()) {
+ UndefinedButUsed.erase(Old->getCanonicalDecl());
+ }
+
+ // If pass_object_size params don't match up perfectly, this isn't a valid
+ // redeclaration.
+ if (Old->getNumParams() > 0 && Old->getNumParams() == New->getNumParams() &&
+ !hasIdenticalPassObjectSizeAttrs(Old, New)) {
+ Diag(New->getLocation(), diag::err_different_pass_object_size_params)
+ << New->getDeclName();
+ Diag(OldLocation, PrevDiag) << Old << Old->getType();
+ return true;
+ }
+
+ if (getLangOpts().CPlusPlus) {
+ // (C++98 13.1p2):
+ // Certain function declarations cannot be overloaded:
+ // -- Function declarations that differ only in the return type
+ // cannot be overloaded.
+
+ // Go back to the type source info to compare the declared return types,
+ // per C++1y [dcl.type.auto]p13:
+ // Redeclarations or specializations of a function or function template
+ // with a declared return type that uses a placeholder type shall also
+ // use that placeholder, not a deduced type.
+ QualType OldDeclaredReturnType =
+ (Old->getTypeSourceInfo()
+ ? Old->getTypeSourceInfo()->getType()->castAs<FunctionType>()
+ : OldType)->getReturnType();
+ QualType NewDeclaredReturnType =
+ (New->getTypeSourceInfo()
+ ? New->getTypeSourceInfo()->getType()->castAs<FunctionType>()
+ : NewType)->getReturnType();
+ QualType ResQT;
+ if (!Context.hasSameType(OldDeclaredReturnType, NewDeclaredReturnType) &&
+ !((NewQType->isDependentType() || OldQType->isDependentType()) &&
+ New->isLocalExternDecl())) {
+ if (NewDeclaredReturnType->isObjCObjectPointerType() &&
+ OldDeclaredReturnType->isObjCObjectPointerType())
+ ResQT = Context.mergeObjCGCQualifiers(NewQType, OldQType);
+ if (ResQT.isNull()) {
+ if (New->isCXXClassMember() && New->isOutOfLine())
+ Diag(New->getLocation(), diag::err_member_def_does_not_match_ret_type)
+ << New << New->getReturnTypeSourceRange();
+ else
+ Diag(New->getLocation(), diag::err_ovl_diff_return_type)
+ << New->getReturnTypeSourceRange();
+ Diag(OldLocation, PrevDiag) << Old << Old->getType()
+ << Old->getReturnTypeSourceRange();
+ return true;
+ }
+ else
+ NewQType = ResQT;
+ }
+
+ QualType OldReturnType = OldType->getReturnType();
+ QualType NewReturnType = cast<FunctionType>(NewQType)->getReturnType();
+ if (OldReturnType != NewReturnType) {
+ // If this function has a deduced return type and has already been
+ // defined, copy the deduced value from the old declaration.
+ AutoType *OldAT = Old->getReturnType()->getContainedAutoType();
+ if (OldAT && OldAT->isDeduced()) {
+ New->setType(
+ SubstAutoType(New->getType(),
+ OldAT->isDependentType() ? Context.DependentTy
+ : OldAT->getDeducedType()));
+ NewQType = Context.getCanonicalType(
+ SubstAutoType(NewQType,
+ OldAT->isDependentType() ? Context.DependentTy
+ : OldAT->getDeducedType()));
+ }
+ }
+
+ const CXXMethodDecl *OldMethod = dyn_cast<CXXMethodDecl>(Old);
+ CXXMethodDecl *NewMethod = dyn_cast<CXXMethodDecl>(New);
+ if (OldMethod && NewMethod) {
+ // Preserve triviality.
+ NewMethod->setTrivial(OldMethod->isTrivial());
+
+ // MSVC allows explicit template specialization at class scope:
+ // 2 CXXMethodDecls referring to the same function will be injected.
+ // We don't want a redeclaration error.
+ bool IsClassScopeExplicitSpecialization =
+ OldMethod->isFunctionTemplateSpecialization() &&
+ NewMethod->isFunctionTemplateSpecialization();
+ bool isFriend = NewMethod->getFriendObjectKind();
+
+ if (!isFriend && NewMethod->getLexicalDeclContext()->isRecord() &&
+ !IsClassScopeExplicitSpecialization) {
+ // -- Member function declarations with the same name and the
+ // same parameter types cannot be overloaded if any of them
+ // is a static member function declaration.
+ if (OldMethod->isStatic() != NewMethod->isStatic()) {
+ Diag(New->getLocation(), diag::err_ovl_static_nonstatic_member);
+ Diag(OldLocation, PrevDiag) << Old << Old->getType();
+ return true;
+ }
+
+ // C++ [class.mem]p1:
+ // [...] A member shall not be declared twice in the
+ // member-specification, except that a nested class or member
+ // class template can be declared and then later defined.
+ if (ActiveTemplateInstantiations.empty()) {
+ unsigned NewDiag;
+ if (isa<CXXConstructorDecl>(OldMethod))
+ NewDiag = diag::err_constructor_redeclared;
+ else if (isa<CXXDestructorDecl>(NewMethod))
+ NewDiag = diag::err_destructor_redeclared;
+ else if (isa<CXXConversionDecl>(NewMethod))
+ NewDiag = diag::err_conv_function_redeclared;
+ else
+ NewDiag = diag::err_member_redeclared;
+
+ Diag(New->getLocation(), NewDiag);
+ } else {
+ Diag(New->getLocation(), diag::err_member_redeclared_in_instantiation)
+ << New << New->getType();
+ }
+ Diag(OldLocation, PrevDiag) << Old << Old->getType();
+ return true;
+
+ // Complain if this is an explicit declaration of a special
+ // member that was initially declared implicitly.
+ //
+ // As an exception, it's okay to befriend such methods in order
+ // to permit the implicit constructor/destructor/operator calls.
+ } else if (OldMethod->isImplicit()) {
+ if (isFriend) {
+ NewMethod->setImplicit();
+ } else {
+ Diag(NewMethod->getLocation(),
+ diag::err_definition_of_implicitly_declared_member)
+ << New << getSpecialMember(OldMethod);
+ return true;
+ }
+ } else if (OldMethod->isExplicitlyDefaulted() && !isFriend) {
+ Diag(NewMethod->getLocation(),
+ diag::err_definition_of_explicitly_defaulted_member)
+ << getSpecialMember(OldMethod);
+ return true;
+ }
+ }
+
+ // C++11 [dcl.attr.noreturn]p1:
+ // The first declaration of a function shall specify the noreturn
+ // attribute if any declaration of that function specifies the noreturn
+ // attribute.
+ const CXX11NoReturnAttr *NRA = New->getAttr<CXX11NoReturnAttr>();
+ if (NRA && !Old->hasAttr<CXX11NoReturnAttr>()) {
+ Diag(NRA->getLocation(), diag::err_noreturn_missing_on_first_decl);
+ Diag(Old->getFirstDecl()->getLocation(),
+ diag::note_noreturn_missing_first_decl);
+ }
+
+ // C++11 [dcl.attr.depend]p2:
+ // The first declaration of a function shall specify the
+ // carries_dependency attribute for its declarator-id if any declaration
+ // of the function specifies the carries_dependency attribute.
+ const CarriesDependencyAttr *CDA = New->getAttr<CarriesDependencyAttr>();
+ if (CDA && !Old->hasAttr<CarriesDependencyAttr>()) {
+ Diag(CDA->getLocation(),
+ diag::err_carries_dependency_missing_on_first_decl) << 0/*Function*/;
+ Diag(Old->getFirstDecl()->getLocation(),
+ diag::note_carries_dependency_missing_first_decl) << 0/*Function*/;
+ }
+
+ // (C++98 8.3.5p3):
+ // All declarations for a function shall agree exactly in both the
+ // return type and the parameter-type-list.
+ // We also want to respect all the extended bits except noreturn.
+
+ // noreturn should now match unless the old type info didn't have it.
+ QualType OldQTypeForComparison = OldQType;
+ if (!OldTypeInfo.getNoReturn() && NewTypeInfo.getNoReturn()) {
+ assert(OldQType == QualType(OldType, 0));
+ const FunctionType *OldTypeForComparison
+ = Context.adjustFunctionType(OldType, OldTypeInfo.withNoReturn(true));
+ OldQTypeForComparison = QualType(OldTypeForComparison, 0);
+ assert(OldQTypeForComparison.isCanonical());
+ }
+
+ if (haveIncompatibleLanguageLinkages(Old, New)) {
+ // As a special case, retain the language linkage from previous
+ // declarations of a friend function as an extension.
+ //
+ // This liberal interpretation of C++ [class.friend]p3 matches GCC/MSVC
+ // and is useful because there's otherwise no way to specify language
+ // linkage within class scope.
+ //
+ // Check cautiously as the friend object kind isn't yet complete.
+ if (New->getFriendObjectKind() != Decl::FOK_None) {
+ Diag(New->getLocation(), diag::ext_retained_language_linkage) << New;
+ Diag(OldLocation, PrevDiag);
+ } else {
+ Diag(New->getLocation(), diag::err_different_language_linkage) << New;
+ Diag(OldLocation, PrevDiag);
+ return true;
+ }
+ }
+
+ if (OldQTypeForComparison == NewQType)
+ return MergeCompatibleFunctionDecls(New, Old, S, MergeTypeWithOld);
+
+ if ((NewQType->isDependentType() || OldQType->isDependentType()) &&
+ New->isLocalExternDecl()) {
+ // It's OK if we couldn't merge types for a local function declaraton
+ // if either the old or new type is dependent. We'll merge the types
+ // when we instantiate the function.
+ return false;
+ }
+
+ // Fall through for conflicting redeclarations and redefinitions.
+ }
+
+ // C: Function types need to be compatible, not identical. This handles
+ // duplicate function decls like "void f(int); void f(enum X);" properly.
+ if (!getLangOpts().CPlusPlus &&
+ Context.typesAreCompatible(OldQType, NewQType)) {
+ const FunctionType *OldFuncType = OldQType->getAs<FunctionType>();
+ const FunctionType *NewFuncType = NewQType->getAs<FunctionType>();
+ const FunctionProtoType *OldProto = nullptr;
+ if (MergeTypeWithOld && isa<FunctionNoProtoType>(NewFuncType) &&
+ (OldProto = dyn_cast<FunctionProtoType>(OldFuncType))) {
+ // The old declaration provided a function prototype, but the
+ // new declaration does not. Merge in the prototype.
+ assert(!OldProto->hasExceptionSpec() && "Exception spec in C");
+ SmallVector<QualType, 16> ParamTypes(OldProto->param_types());
+ NewQType =
+ Context.getFunctionType(NewFuncType->getReturnType(), ParamTypes,
+ OldProto->getExtProtoInfo());
+ New->setType(NewQType);
+ New->setHasInheritedPrototype();
+
+ // Synthesize parameters with the same types.
+ SmallVector<ParmVarDecl*, 16> Params;
+ for (const auto &ParamType : OldProto->param_types()) {
+ ParmVarDecl *Param = ParmVarDecl::Create(Context, New, SourceLocation(),
+ SourceLocation(), nullptr,
+ ParamType, /*TInfo=*/nullptr,
+ SC_None, nullptr);
+ Param->setScopeInfo(0, Params.size());
+ Param->setImplicit();
+ Params.push_back(Param);
+ }
+
+ New->setParams(Params);
+ }
+
+ return MergeCompatibleFunctionDecls(New, Old, S, MergeTypeWithOld);
+ }
+
+ // GNU C permits a K&R definition to follow a prototype declaration
+ // if the declared types of the parameters in the K&R definition
+ // match the types in the prototype declaration, even when the
+ // promoted types of the parameters from the K&R definition differ
+ // from the types in the prototype. GCC then keeps the types from
+ // the prototype.
+ //
+ // If a variadic prototype is followed by a non-variadic K&R definition,
+ // the K&R definition becomes variadic. This is sort of an edge case, but
+ // it's legal per the standard depending on how you read C99 6.7.5.3p15 and
+ // C99 6.9.1p8.
+ if (!getLangOpts().CPlusPlus &&
+ Old->hasPrototype() && !New->hasPrototype() &&
+ New->getType()->getAs<FunctionProtoType>() &&
+ Old->getNumParams() == New->getNumParams()) {
+ SmallVector<QualType, 16> ArgTypes;
+ SmallVector<GNUCompatibleParamWarning, 16> Warnings;
+ const FunctionProtoType *OldProto
+ = Old->getType()->getAs<FunctionProtoType>();
+ const FunctionProtoType *NewProto
+ = New->getType()->getAs<FunctionProtoType>();
+
+ // Determine whether this is the GNU C extension.
+ QualType MergedReturn = Context.mergeTypes(OldProto->getReturnType(),
+ NewProto->getReturnType());
+ bool LooseCompatible = !MergedReturn.isNull();
+ for (unsigned Idx = 0, End = Old->getNumParams();
+ LooseCompatible && Idx != End; ++Idx) {
+ ParmVarDecl *OldParm = Old->getParamDecl(Idx);
+ ParmVarDecl *NewParm = New->getParamDecl(Idx);
+ if (Context.typesAreCompatible(OldParm->getType(),
+ NewProto->getParamType(Idx))) {
+ ArgTypes.push_back(NewParm->getType());
+ } else if (Context.typesAreCompatible(OldParm->getType(),
+ NewParm->getType(),
+ /*CompareUnqualified=*/true)) {
+ GNUCompatibleParamWarning Warn = { OldParm, NewParm,
+ NewProto->getParamType(Idx) };
+ Warnings.push_back(Warn);
+ ArgTypes.push_back(NewParm->getType());
+ } else
+ LooseCompatible = false;
+ }
+
+ if (LooseCompatible) {
+ for (unsigned Warn = 0; Warn < Warnings.size(); ++Warn) {
+ Diag(Warnings[Warn].NewParm->getLocation(),
+ diag::ext_param_promoted_not_compatible_with_prototype)
+ << Warnings[Warn].PromotedType
+ << Warnings[Warn].OldParm->getType();
+ if (Warnings[Warn].OldParm->getLocation().isValid())
+ Diag(Warnings[Warn].OldParm->getLocation(),
+ diag::note_previous_declaration);
+ }
+
+ if (MergeTypeWithOld)
+ New->setType(Context.getFunctionType(MergedReturn, ArgTypes,
+ OldProto->getExtProtoInfo()));
+ return MergeCompatibleFunctionDecls(New, Old, S, MergeTypeWithOld);
+ }
+
+ // Fall through to diagnose conflicting types.
+ }
+
+ // A function that has already been declared has been redeclared or
+ // defined with a different type; show an appropriate diagnostic.
+
+ // If the previous declaration was an implicitly-generated builtin
+ // declaration, then at the very least we should use a specialized note.
+ unsigned BuiltinID;
+ if (Old->isImplicit() && (BuiltinID = Old->getBuiltinID())) {
+ // If it's actually a library-defined builtin function like 'malloc'
+ // or 'printf', just warn about the incompatible redeclaration.
+ if (Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID)) {
+ Diag(New->getLocation(), diag::warn_redecl_library_builtin) << New;
+ Diag(OldLocation, diag::note_previous_builtin_declaration)
+ << Old << Old->getType();
+
+ // If this is a global redeclaration, just forget hereafter
+ // about the "builtin-ness" of the function.
+ //
+ // Doing this for local extern declarations is problematic. If
+ // the builtin declaration remains visible, a second invalid
+ // local declaration will produce a hard error; if it doesn't
+ // remain visible, a single bogus local redeclaration (which is
+ // actually only a warning) could break all the downstream code.
+ if (!New->getLexicalDeclContext()->isFunctionOrMethod())
+ New->getIdentifier()->revertBuiltin();
+
+ return false;
+ }
+
+ PrevDiag = diag::note_previous_builtin_declaration;
+ }
+
+ Diag(New->getLocation(), diag::err_conflicting_types) << New->getDeclName();
+ Diag(OldLocation, PrevDiag) << Old << Old->getType();
+ return true;
+}
+
+/// \brief Completes the merge of two function declarations that are
+/// known to be compatible.
+///
+/// This routine handles the merging of attributes and other
+/// properties of function declarations from the old declaration to
+/// the new declaration, once we know that New is in fact a
+/// redeclaration of Old.
+///
+/// \returns false
+bool Sema::MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
+ Scope *S, bool MergeTypeWithOld) {
+ // Merge the attributes
+ mergeDeclAttributes(New, Old);
+
+ // Merge "pure" flag.
+ if (Old->isPure())
+ New->setPure();
+
+ // Merge "used" flag.
+ if (Old->getMostRecentDecl()->isUsed(false))
+ New->setIsUsed();
+
+ // Merge attributes from the parameters. These can mismatch with K&R
+ // declarations.
+ if (New->getNumParams() == Old->getNumParams())
+ for (unsigned i = 0, e = New->getNumParams(); i != e; ++i) {
+ ParmVarDecl *NewParam = New->getParamDecl(i);
+ ParmVarDecl *OldParam = Old->getParamDecl(i);
+ mergeParamDeclAttributes(NewParam, OldParam, *this);
+ mergeParamDeclTypes(NewParam, OldParam, *this);
+ }
+
+ if (getLangOpts().CPlusPlus)
+ return MergeCXXFunctionDecl(New, Old, S);
+
+ // Merge the function types so the we get the composite types for the return
+ // and argument types. Per C11 6.2.7/4, only update the type if the old decl
+ // was visible.
+ QualType Merged = Context.mergeTypes(Old->getType(), New->getType());
+ if (!Merged.isNull() && MergeTypeWithOld)
+ New->setType(Merged);
+
+ return false;
+}
+
+
+void Sema::mergeObjCMethodDecls(ObjCMethodDecl *newMethod,
+ ObjCMethodDecl *oldMethod) {
+
+ // Merge the attributes, including deprecated/unavailable
+ AvailabilityMergeKind MergeKind =
+ isa<ObjCProtocolDecl>(oldMethod->getDeclContext())
+ ? AMK_ProtocolImplementation
+ : isa<ObjCImplDecl>(newMethod->getDeclContext()) ? AMK_Redeclaration
+ : AMK_Override;
+
+ mergeDeclAttributes(newMethod, oldMethod, MergeKind);
+
+ // Merge attributes from the parameters.
+ ObjCMethodDecl::param_const_iterator oi = oldMethod->param_begin(),
+ oe = oldMethod->param_end();
+ for (ObjCMethodDecl::param_iterator
+ ni = newMethod->param_begin(), ne = newMethod->param_end();
+ ni != ne && oi != oe; ++ni, ++oi)
+ mergeParamDeclAttributes(*ni, *oi, *this);
+
+ CheckObjCMethodOverride(newMethod, oldMethod);
+}
+
+/// MergeVarDeclTypes - We parsed a variable 'New' which has the same name and
+/// scope as a previous declaration 'Old'. Figure out how to merge their types,
+/// emitting diagnostics as appropriate.
+///
+/// Declarations using the auto type specifier (C++ [decl.spec.auto]) call back
+/// to here in AddInitializerToDecl. We can't check them before the initializer
+/// is attached.
+void Sema::MergeVarDeclTypes(VarDecl *New, VarDecl *Old,
+ bool MergeTypeWithOld) {
+ if (New->isInvalidDecl() || Old->isInvalidDecl())
+ return;
+
+ QualType MergedT;
+ if (getLangOpts().CPlusPlus) {
+ if (New->getType()->isUndeducedType()) {
+ // We don't know what the new type is until the initializer is attached.
+ return;
+ } else if (Context.hasSameType(New->getType(), Old->getType())) {
+ // These could still be something that needs exception specs checked.
+ return MergeVarDeclExceptionSpecs(New, Old);
+ }
+ // C++ [basic.link]p10:
+ // [...] the types specified by all declarations referring to a given
+ // object or function shall be identical, except that declarations for an
+ // array object can specify array types that differ by the presence or
+ // absence of a major array bound (8.3.4).
+ else if (Old->getType()->isIncompleteArrayType() &&
+ New->getType()->isArrayType()) {
+ const ArrayType *OldArray = Context.getAsArrayType(Old->getType());
+ const ArrayType *NewArray = Context.getAsArrayType(New->getType());
+ if (Context.hasSameType(OldArray->getElementType(),
+ NewArray->getElementType()))
+ MergedT = New->getType();
+ } else if (Old->getType()->isArrayType() &&
+ New->getType()->isIncompleteArrayType()) {
+ const ArrayType *OldArray = Context.getAsArrayType(Old->getType());
+ const ArrayType *NewArray = Context.getAsArrayType(New->getType());
+ if (Context.hasSameType(OldArray->getElementType(),
+ NewArray->getElementType()))
+ MergedT = Old->getType();
+ } else if (New->getType()->isObjCObjectPointerType() &&
+ Old->getType()->isObjCObjectPointerType()) {
+ MergedT = Context.mergeObjCGCQualifiers(New->getType(),
+ Old->getType());
+ }
+ } else {
+ // C 6.2.7p2:
+ // All declarations that refer to the same object or function shall have
+ // compatible type.
+ MergedT = Context.mergeTypes(New->getType(), Old->getType());
+ }
+ if (MergedT.isNull()) {
+ // It's OK if we couldn't merge types if either type is dependent, for a
+ // block-scope variable. In other cases (static data members of class
+ // templates, variable templates, ...), we require the types to be
+ // equivalent.
+ // FIXME: The C++ standard doesn't say anything about this.
+ if ((New->getType()->isDependentType() ||
+ Old->getType()->isDependentType()) && New->isLocalVarDecl()) {
+ // If the old type was dependent, we can't merge with it, so the new type
+ // becomes dependent for now. We'll reproduce the original type when we
+ // instantiate the TypeSourceInfo for the variable.
+ if (!New->getType()->isDependentType() && MergeTypeWithOld)
+ New->setType(Context.DependentTy);
+ return;
+ }
+
+ // FIXME: Even if this merging succeeds, some other non-visible declaration
+ // of this variable might have an incompatible type. For instance:
+ //
+ // extern int arr[];
+ // void f() { extern int arr[2]; }
+ // void g() { extern int arr[3]; }
+ //
+ // Neither C nor C++ requires a diagnostic for this, but we should still try
+ // to diagnose it.
+ Diag(New->getLocation(), New->isThisDeclarationADefinition()
+ ? diag::err_redefinition_different_type
+ : diag::err_redeclaration_different_type)
+ << New->getDeclName() << New->getType() << Old->getType();
+
+ diag::kind PrevDiag;
+ SourceLocation OldLocation;
+ std::tie(PrevDiag, OldLocation) =
+ getNoteDiagForInvalidRedeclaration(Old, New);
+ Diag(OldLocation, PrevDiag);
+ return New->setInvalidDecl();
+ }
+
+ // Don't actually update the type on the new declaration if the old
+ // declaration was an extern declaration in a different scope.
+ if (MergeTypeWithOld)
+ New->setType(MergedT);
+}
+
+static bool mergeTypeWithPrevious(Sema &S, VarDecl *NewVD, VarDecl *OldVD,
+ LookupResult &Previous) {
+ // C11 6.2.7p4:
+ // For an identifier with internal or external linkage declared
+ // in a scope in which a prior declaration of that identifier is
+ // visible, if the prior declaration specifies internal or
+ // external linkage, the type of the identifier at the later
+ // declaration becomes the composite type.
+ //
+ // If the variable isn't visible, we do not merge with its type.
+ if (Previous.isShadowed())
+ return false;
+
+ if (S.getLangOpts().CPlusPlus) {
+ // C++11 [dcl.array]p3:
+ // If there is a preceding declaration of the entity in the same
+ // scope in which the bound was specified, an omitted array bound
+ // is taken to be the same as in that earlier declaration.
+ return NewVD->isPreviousDeclInSameBlockScope() ||
+ (!OldVD->getLexicalDeclContext()->isFunctionOrMethod() &&
+ !NewVD->getLexicalDeclContext()->isFunctionOrMethod());
+ } else {
+ // If the old declaration was function-local, don't merge with its
+ // type unless we're in the same function.
+ return !OldVD->getLexicalDeclContext()->isFunctionOrMethod() ||
+ OldVD->getLexicalDeclContext() == NewVD->getLexicalDeclContext();
+ }
+}
+
+/// MergeVarDecl - We just parsed a variable 'New' which has the same name
+/// and scope as a previous declaration 'Old'. Figure out how to resolve this
+/// situation, merging decls or emitting diagnostics as appropriate.
+///
+/// Tentative definition rules (C99 6.9.2p2) are checked by
+/// FinalizeDeclaratorGroup. Unfortunately, we can't analyze tentative
+/// definitions here, since the initializer hasn't been attached.
+///
+void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
+ // If the new decl is already invalid, don't do any other checking.
+ if (New->isInvalidDecl())
+ return;
+
+ if (!shouldLinkPossiblyHiddenDecl(Previous, New))
+ return;
+
+ VarTemplateDecl *NewTemplate = New->getDescribedVarTemplate();
+
+ // Verify the old decl was also a variable or variable template.
+ VarDecl *Old = nullptr;
+ VarTemplateDecl *OldTemplate = nullptr;
+ if (Previous.isSingleResult()) {
+ if (NewTemplate) {
+ OldTemplate = dyn_cast<VarTemplateDecl>(Previous.getFoundDecl());
+ Old = OldTemplate ? OldTemplate->getTemplatedDecl() : nullptr;
+
+ if (auto *Shadow =
+ dyn_cast<UsingShadowDecl>(Previous.getRepresentativeDecl()))
+ if (checkUsingShadowRedecl<VarTemplateDecl>(*this, Shadow, NewTemplate))
+ return New->setInvalidDecl();
+ } else {
+ Old = dyn_cast<VarDecl>(Previous.getFoundDecl());
+
+ if (auto *Shadow =
+ dyn_cast<UsingShadowDecl>(Previous.getRepresentativeDecl()))
+ if (checkUsingShadowRedecl<VarDecl>(*this, Shadow, New))
+ return New->setInvalidDecl();
+ }
+ }
+ if (!Old) {
+ Diag(New->getLocation(), diag::err_redefinition_different_kind)
+ << New->getDeclName();
+ Diag(Previous.getRepresentativeDecl()->getLocation(),
+ diag::note_previous_definition);
+ return New->setInvalidDecl();
+ }
+
+ // Ensure the template parameters are compatible.
+ if (NewTemplate &&
+ !TemplateParameterListsAreEqual(NewTemplate->getTemplateParameters(),
+ OldTemplate->getTemplateParameters(),
+ /*Complain=*/true, TPL_TemplateMatch))
+ return New->setInvalidDecl();
+
+ // C++ [class.mem]p1:
+ // A member shall not be declared twice in the member-specification [...]
+ //
+ // Here, we need only consider static data members.
+ if (Old->isStaticDataMember() && !New->isOutOfLine()) {
+ Diag(New->getLocation(), diag::err_duplicate_member)
+ << New->getIdentifier();
+ Diag(Old->getLocation(), diag::note_previous_declaration);
+ New->setInvalidDecl();
+ }
+
+ mergeDeclAttributes(New, Old);
+ // Warn if an already-declared variable is made a weak_import in a subsequent
+ // declaration
+ if (New->hasAttr<WeakImportAttr>() &&
+ Old->getStorageClass() == SC_None &&
+ !Old->hasAttr<WeakImportAttr>()) {
+ Diag(New->getLocation(), diag::warn_weak_import) << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ // Remove weak_import attribute on new declaration.
+ New->dropAttr<WeakImportAttr>();
+ }
+
+ if (New->hasAttr<InternalLinkageAttr>() &&
+ !Old->hasAttr<InternalLinkageAttr>()) {
+ Diag(New->getLocation(), diag::err_internal_linkage_redeclaration)
+ << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ New->dropAttr<InternalLinkageAttr>();
+ }
+
+ // Merge the types.
+ VarDecl *MostRecent = Old->getMostRecentDecl();
+ if (MostRecent != Old) {
+ MergeVarDeclTypes(New, MostRecent,
+ mergeTypeWithPrevious(*this, New, MostRecent, Previous));
+ if (New->isInvalidDecl())
+ return;
+ }
+
+ MergeVarDeclTypes(New, Old, mergeTypeWithPrevious(*this, New, Old, Previous));
+ if (New->isInvalidDecl())
+ return;
+
+ diag::kind PrevDiag;
+ SourceLocation OldLocation;
+ std::tie(PrevDiag, OldLocation) =
+ getNoteDiagForInvalidRedeclaration(Old, New);
+
+ // [dcl.stc]p8: Check if we have a non-static decl followed by a static.
+ if (New->getStorageClass() == SC_Static &&
+ !New->isStaticDataMember() &&
+ Old->hasExternalFormalLinkage()) {
+ if (getLangOpts().MicrosoftExt) {
+ Diag(New->getLocation(), diag::ext_static_non_static)
+ << New->getDeclName();
+ Diag(OldLocation, PrevDiag);
+ } else {
+ Diag(New->getLocation(), diag::err_static_non_static)
+ << New->getDeclName();
+ Diag(OldLocation, PrevDiag);
+ return New->setInvalidDecl();
+ }
+ }
+ // C99 6.2.2p4:
+ // For an identifier declared with the storage-class specifier
+ // extern in a scope in which a prior declaration of that
+ // identifier is visible,23) if the prior declaration specifies
+ // internal or external linkage, the linkage of the identifier at
+ // the later declaration is the same as the linkage specified at
+ // the prior declaration. If no prior declaration is visible, or
+ // if the prior declaration specifies no linkage, then the
+ // identifier has external linkage.
+ if (New->hasExternalStorage() && Old->hasLinkage())
+ /* Okay */;
+ else if (New->getCanonicalDecl()->getStorageClass() != SC_Static &&
+ !New->isStaticDataMember() &&
+ Old->getCanonicalDecl()->getStorageClass() == SC_Static) {
+ Diag(New->getLocation(), diag::err_non_static_static) << New->getDeclName();
+ Diag(OldLocation, PrevDiag);
+ return New->setInvalidDecl();
+ }
+
+ // Check if extern is followed by non-extern and vice-versa.
+ if (New->hasExternalStorage() &&
+ !Old->hasLinkage() && Old->isLocalVarDeclOrParm()) {
+ Diag(New->getLocation(), diag::err_extern_non_extern) << New->getDeclName();
+ Diag(OldLocation, PrevDiag);
+ return New->setInvalidDecl();
+ }
+ if (Old->hasLinkage() && New->isLocalVarDeclOrParm() &&
+ !New->hasExternalStorage()) {
+ Diag(New->getLocation(), diag::err_non_extern_extern) << New->getDeclName();
+ Diag(OldLocation, PrevDiag);
+ return New->setInvalidDecl();
+ }
+
+ // Variables with external linkage are analyzed in FinalizeDeclaratorGroup.
+
+ // FIXME: The test for external storage here seems wrong? We still
+ // need to check for mismatches.
+ if (!New->hasExternalStorage() && !New->isFileVarDecl() &&
+ // Don't complain about out-of-line definitions of static members.
+ !(Old->getLexicalDeclContext()->isRecord() &&
+ !New->getLexicalDeclContext()->isRecord())) {
+ Diag(New->getLocation(), diag::err_redefinition) << New->getDeclName();
+ Diag(OldLocation, PrevDiag);
+ return New->setInvalidDecl();
+ }
+
+ if (New->getTLSKind() != Old->getTLSKind()) {
+ if (!Old->getTLSKind()) {
+ Diag(New->getLocation(), diag::err_thread_non_thread) << New->getDeclName();
+ Diag(OldLocation, PrevDiag);
+ } else if (!New->getTLSKind()) {
+ Diag(New->getLocation(), diag::err_non_thread_thread) << New->getDeclName();
+ Diag(OldLocation, PrevDiag);
+ } else {
+ // Do not allow redeclaration to change the variable between requiring
+ // static and dynamic initialization.
+ // FIXME: GCC allows this, but uses the TLS keyword on the first
+ // declaration to determine the kind. Do we need to be compatible here?
+ Diag(New->getLocation(), diag::err_thread_thread_different_kind)
+ << New->getDeclName() << (New->getTLSKind() == VarDecl::TLS_Dynamic);
+ Diag(OldLocation, PrevDiag);
+ }
+ }
+
+ // C++ doesn't have tentative definitions, so go right ahead and check here.
+ VarDecl *Def;
+ if (getLangOpts().CPlusPlus &&
+ New->isThisDeclarationADefinition() == VarDecl::Definition &&
+ (Def = Old->getDefinition())) {
+ NamedDecl *Hidden = nullptr;
+ if (!hasVisibleDefinition(Def, &Hidden) &&
+ (New->getFormalLinkage() == InternalLinkage ||
+ New->getDescribedVarTemplate() ||
+ New->getNumTemplateParameterLists() ||
+ New->getDeclContext()->isDependentContext())) {
+ // The previous definition is hidden, and multiple definitions are
+ // permitted (in separate TUs). Form another definition of it.
+ } else {
+ Diag(New->getLocation(), diag::err_redefinition) << New;
+ Diag(Def->getLocation(), diag::note_previous_definition);
+ New->setInvalidDecl();
+ return;
+ }
+ }
+
+ if (haveIncompatibleLanguageLinkages(Old, New)) {
+ Diag(New->getLocation(), diag::err_different_language_linkage) << New;
+ Diag(OldLocation, PrevDiag);
+ New->setInvalidDecl();
+ return;
+ }
+
+ // Merge "used" flag.
+ if (Old->getMostRecentDecl()->isUsed(false))
+ New->setIsUsed();
+
+ // Keep a chain of previous declarations.
+ New->setPreviousDecl(Old);
+ if (NewTemplate)
+ NewTemplate->setPreviousDecl(OldTemplate);
+
+ // Inherit access appropriately.
+ New->setAccess(Old->getAccess());
+ if (NewTemplate)
+ NewTemplate->setAccess(New->getAccess());
+}
+
+/// ParsedFreeStandingDeclSpec - This method is invoked when a declspec with
+/// no declarator (e.g. "struct foo;") is parsed.
+Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
+ DeclSpec &DS) {
+ return ParsedFreeStandingDeclSpec(S, AS, DS, MultiTemplateParamsArg());
+}
+
+// The MS ABI changed between VS2013 and VS2015 with regard to numbers used to
+// disambiguate entities defined in different scopes.
+// While the VS2015 ABI fixes potential miscompiles, it is also breaks
+// compatibility.
+// We will pick our mangling number depending on which version of MSVC is being
+// targeted.
+static unsigned getMSManglingNumber(const LangOptions &LO, Scope *S) {
+ return LO.isCompatibleWithMSVC(LangOptions::MSVC2015)
+ ? S->getMSCurManglingNumber()
+ : S->getMSLastManglingNumber();
+}
+
+void Sema::handleTagNumbering(const TagDecl *Tag, Scope *TagScope) {
+ if (!Context.getLangOpts().CPlusPlus)
+ return;
+
+ if (isa<CXXRecordDecl>(Tag->getParent())) {
+ // If this tag is the direct child of a class, number it if
+ // it is anonymous.
+ if (!Tag->getName().empty() || Tag->getTypedefNameForAnonDecl())
+ return;
+ MangleNumberingContext &MCtx =
+ Context.getManglingNumberContext(Tag->getParent());
+ Context.setManglingNumber(
+ Tag, MCtx.getManglingNumber(
+ Tag, getMSManglingNumber(getLangOpts(), TagScope)));
+ return;
+ }
+
+ // If this tag isn't a direct child of a class, number it if it is local.
+ Decl *ManglingContextDecl;
+ if (MangleNumberingContext *MCtx = getCurrentMangleNumberContext(
+ Tag->getDeclContext(), ManglingContextDecl)) {
+ Context.setManglingNumber(
+ Tag, MCtx->getManglingNumber(
+ Tag, getMSManglingNumber(getLangOpts(), TagScope)));
+ }
+}
+
+void Sema::setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
+ TypedefNameDecl *NewTD) {
+ if (TagFromDeclSpec->isInvalidDecl())
+ return;
+
+ // Do nothing if the tag already has a name for linkage purposes.
+ if (TagFromDeclSpec->hasNameForLinkage())
+ return;
+
+ // A well-formed anonymous tag must always be a TUK_Definition.
+ assert(TagFromDeclSpec->isThisDeclarationADefinition());
+
+ // The type must match the tag exactly; no qualifiers allowed.
+ if (!Context.hasSameType(NewTD->getUnderlyingType(),
+ Context.getTagDeclType(TagFromDeclSpec))) {
+ if (getLangOpts().CPlusPlus)
+ Context.addTypedefNameForUnnamedTagDecl(TagFromDeclSpec, NewTD);
+ return;
+ }
+
+ // If we've already computed linkage for the anonymous tag, then
+ // adding a typedef name for the anonymous decl can change that
+ // linkage, which might be a serious problem. Diagnose this as
+ // unsupported and ignore the typedef name. TODO: we should
+ // pursue this as a language defect and establish a formal rule
+ // for how to handle it.
+ if (TagFromDeclSpec->hasLinkageBeenComputed()) {
+ Diag(NewTD->getLocation(), diag::err_typedef_changes_linkage);
+
+ SourceLocation tagLoc = TagFromDeclSpec->getInnerLocStart();
+ tagLoc = getLocForEndOfToken(tagLoc);
+
+ llvm::SmallString<40> textToInsert;
+ textToInsert += ' ';
+ textToInsert += NewTD->getIdentifier()->getName();
+ Diag(tagLoc, diag::note_typedef_changes_linkage)
+ << FixItHint::CreateInsertion(tagLoc, textToInsert);
+ return;
+ }
+
+ // Otherwise, set this is the anon-decl typedef for the tag.
+ TagFromDeclSpec->setTypedefNameForAnonDecl(NewTD);
+}
+
+static unsigned GetDiagnosticTypeSpecifierID(DeclSpec::TST T) {
+ switch (T) {
+ case DeclSpec::TST_class:
+ return 0;
+ case DeclSpec::TST_struct:
+ return 1;
+ case DeclSpec::TST_interface:
+ return 2;
+ case DeclSpec::TST_union:
+ return 3;
+ case DeclSpec::TST_enum:
+ return 4;
+ default:
+ llvm_unreachable("unexpected type specifier");
+ }
+}
+
+/// ParsedFreeStandingDeclSpec - This method is invoked when a declspec with
+/// no declarator (e.g. "struct foo;") is parsed. It also accepts template
+/// parameters to cope with template friend declarations.
+Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
+ DeclSpec &DS,
+ MultiTemplateParamsArg TemplateParams,
+ bool IsExplicitInstantiation) {
+ Decl *TagD = nullptr;
+ TagDecl *Tag = nullptr;
+ if (DS.getTypeSpecType() == DeclSpec::TST_class ||
+ DS.getTypeSpecType() == DeclSpec::TST_struct ||
+ DS.getTypeSpecType() == DeclSpec::TST_interface ||
+ DS.getTypeSpecType() == DeclSpec::TST_union ||
+ DS.getTypeSpecType() == DeclSpec::TST_enum) {
+ TagD = DS.getRepAsDecl();
+
+ if (!TagD) // We probably had an error
+ return nullptr;
+
+ // Note that the above type specs guarantee that the
+ // type rep is a Decl, whereas in many of the others
+ // it's a Type.
+ if (isa<TagDecl>(TagD))
+ Tag = cast<TagDecl>(TagD);
+ else if (ClassTemplateDecl *CTD = dyn_cast<ClassTemplateDecl>(TagD))
+ Tag = CTD->getTemplatedDecl();
+ }
+
+ if (Tag) {
+ handleTagNumbering(Tag, S);
+ Tag->setFreeStanding();
+ if (Tag->isInvalidDecl())
+ return Tag;
+ }
+
+ if (unsigned TypeQuals = DS.getTypeQualifiers()) {
+ // Enforce C99 6.7.3p2: "Types other than pointer types derived from object
+ // or incomplete types shall not be restrict-qualified."
+ if (TypeQuals & DeclSpec::TQ_restrict)
+ Diag(DS.getRestrictSpecLoc(),
+ diag::err_typecheck_invalid_restrict_not_pointer_noarg)
+ << DS.getSourceRange();
+ }
+
+ if (DS.isConstexprSpecified()) {
+ // C++0x [dcl.constexpr]p1: constexpr can only be applied to declarations
+ // and definitions of functions and variables.
+ if (Tag)
+ Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_tag)
+ << GetDiagnosticTypeSpecifierID(DS.getTypeSpecType());
+ else
+ Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_no_declarators);
+ // Don't emit warnings after this error.
+ return TagD;
+ }
+
+ if (DS.isConceptSpecified()) {
+ // C++ Concepts TS [dcl.spec.concept]p1: A concept definition refers to
+ // either a function concept and its definition or a variable concept and
+ // its initializer.
+ Diag(DS.getConceptSpecLoc(), diag::err_concept_wrong_decl_kind);
+ return TagD;
+ }
+
+ DiagnoseFunctionSpecifiers(DS);
+
+ if (DS.isFriendSpecified()) {
+ // If we're dealing with a decl but not a TagDecl, assume that
+ // whatever routines created it handled the friendship aspect.
+ if (TagD && !Tag)
+ return nullptr;
+ return ActOnFriendTypeDecl(S, DS, TemplateParams);
+ }
+
+ const CXXScopeSpec &SS = DS.getTypeSpecScope();
+ bool IsExplicitSpecialization =
+ !TemplateParams.empty() && TemplateParams.back()->size() == 0;
+ if (Tag && SS.isNotEmpty() && !Tag->isCompleteDefinition() &&
+ !IsExplicitInstantiation && !IsExplicitSpecialization &&
+ !isa<ClassTemplatePartialSpecializationDecl>(Tag)) {
+ // Per C++ [dcl.type.elab]p1, a class declaration cannot have a
+ // nested-name-specifier unless it is an explicit instantiation
+ // or an explicit specialization.
+ //
+ // FIXME: We allow class template partial specializations here too, per the
+ // obvious intent of DR1819.
+ //
+ // Per C++ [dcl.enum]p1, an opaque-enum-declaration can't either.
+ Diag(SS.getBeginLoc(), diag::err_standalone_class_nested_name_specifier)
+ << GetDiagnosticTypeSpecifierID(DS.getTypeSpecType()) << SS.getRange();
+ return nullptr;
+ }
+
+ // Track whether this decl-specifier declares anything.
+ bool DeclaresAnything = true;
+
+ // Handle anonymous struct definitions.
+ if (RecordDecl *Record = dyn_cast_or_null<RecordDecl>(Tag)) {
+ if (!Record->getDeclName() && Record->isCompleteDefinition() &&
+ DS.getStorageClassSpec() != DeclSpec::SCS_typedef) {
+ if (getLangOpts().CPlusPlus ||
+ Record->getDeclContext()->isRecord())
+ return BuildAnonymousStructOrUnion(S, DS, AS, Record,
+ Context.getPrintingPolicy());
+
+ DeclaresAnything = false;
+ }
+ }
+
+ // C11 6.7.2.1p2:
+ // A struct-declaration that does not declare an anonymous structure or
+ // anonymous union shall contain a struct-declarator-list.
+ //
+ // This rule also existed in C89 and C99; the grammar for struct-declaration
+ // did not permit a struct-declaration without a struct-declarator-list.
+ if (!getLangOpts().CPlusPlus && CurContext->isRecord() &&
+ DS.getStorageClassSpec() == DeclSpec::SCS_unspecified) {
+ // Check for Microsoft C extension: anonymous struct/union member.
+ // Handle 2 kinds of anonymous struct/union:
+ // struct STRUCT;
+ // union UNION;
+ // and
+ // STRUCT_TYPE; <- where STRUCT_TYPE is a typedef struct.
+ // UNION_TYPE; <- where UNION_TYPE is a typedef union.
+ if ((Tag && Tag->getDeclName()) ||
+ DS.getTypeSpecType() == DeclSpec::TST_typename) {
+ RecordDecl *Record = nullptr;
+ if (Tag)
+ Record = dyn_cast<RecordDecl>(Tag);
+ else if (const RecordType *RT =
+ DS.getRepAsType().get()->getAsStructureType())
+ Record = RT->getDecl();
+ else if (const RecordType *UT = DS.getRepAsType().get()->getAsUnionType())
+ Record = UT->getDecl();
+
+ if (Record && getLangOpts().MicrosoftExt) {
+ Diag(DS.getLocStart(), diag::ext_ms_anonymous_record)
+ << Record->isUnion() << DS.getSourceRange();
+ return BuildMicrosoftCAnonymousStruct(S, DS, Record);
+ }
+
+ DeclaresAnything = false;
+ }
+ }
+
+ // Skip all the checks below if we have a type error.
+ if (DS.getTypeSpecType() == DeclSpec::TST_error ||
+ (TagD && TagD->isInvalidDecl()))
+ return TagD;
+
+ if (getLangOpts().CPlusPlus &&
+ DS.getStorageClassSpec() != DeclSpec::SCS_typedef)
+ if (EnumDecl *Enum = dyn_cast_or_null<EnumDecl>(Tag))
+ if (Enum->enumerator_begin() == Enum->enumerator_end() &&
+ !Enum->getIdentifier() && !Enum->isInvalidDecl())
+ DeclaresAnything = false;
+
+ if (!DS.isMissingDeclaratorOk()) {
+ // Customize diagnostic for a typedef missing a name.
+ if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef)
+ Diag(DS.getLocStart(), diag::ext_typedef_without_a_name)
+ << DS.getSourceRange();
+ else
+ DeclaresAnything = false;
+ }
+
+ if (DS.isModulePrivateSpecified() &&
+ Tag && Tag->getDeclContext()->isFunctionOrMethod())
+ Diag(DS.getModulePrivateSpecLoc(), diag::err_module_private_local_class)
+ << Tag->getTagKind()
+ << FixItHint::CreateRemoval(DS.getModulePrivateSpecLoc());
+
+ ActOnDocumentableDecl(TagD);
+
+ // C 6.7/2:
+ // A declaration [...] shall declare at least a declarator [...], a tag,
+ // or the members of an enumeration.
+ // C++ [dcl.dcl]p3:
+ // [If there are no declarators], and except for the declaration of an
+ // unnamed bit-field, the decl-specifier-seq shall introduce one or more
+ // names into the program, or shall redeclare a name introduced by a
+ // previous declaration.
+ if (!DeclaresAnything) {
+ // In C, we allow this as a (popular) extension / bug. Don't bother
+ // producing further diagnostics for redundant qualifiers after this.
+ Diag(DS.getLocStart(), diag::ext_no_declarators) << DS.getSourceRange();
+ return TagD;
+ }
+
+ // C++ [dcl.stc]p1:
+ // If a storage-class-specifier appears in a decl-specifier-seq, [...] the
+ // init-declarator-list of the declaration shall not be empty.
+ // C++ [dcl.fct.spec]p1:
+ // If a cv-qualifier appears in a decl-specifier-seq, the
+ // init-declarator-list of the declaration shall not be empty.
+ //
+ // Spurious qualifiers here appear to be valid in C.
+ unsigned DiagID = diag::warn_standalone_specifier;
+ if (getLangOpts().CPlusPlus)
+ DiagID = diag::ext_standalone_specifier;
+
+ // Note that a linkage-specification sets a storage class, but
+ // 'extern "C" struct foo;' is actually valid and not theoretically
+ // useless.
+ if (DeclSpec::SCS SCS = DS.getStorageClassSpec()) {
+ if (SCS == DeclSpec::SCS_mutable)
+ // Since mutable is not a viable storage class specifier in C, there is
+ // no reason to treat it as an extension. Instead, diagnose as an error.
+ Diag(DS.getStorageClassSpecLoc(), diag::err_mutable_nonmember);
+ else if (!DS.isExternInLinkageSpec() && SCS != DeclSpec::SCS_typedef)
+ Diag(DS.getStorageClassSpecLoc(), DiagID)
+ << DeclSpec::getSpecifierName(SCS);
+ }
+
+ if (DeclSpec::TSCS TSCS = DS.getThreadStorageClassSpec())
+ Diag(DS.getThreadStorageClassSpecLoc(), DiagID)
+ << DeclSpec::getSpecifierName(TSCS);
+ if (DS.getTypeQualifiers()) {
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_const)
+ Diag(DS.getConstSpecLoc(), DiagID) << "const";
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_volatile)
+ Diag(DS.getConstSpecLoc(), DiagID) << "volatile";
+ // Restrict is covered above.
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_atomic)
+ Diag(DS.getAtomicSpecLoc(), DiagID) << "_Atomic";
+ }
+
+ // Warn about ignored type attributes, for example:
+ // __attribute__((aligned)) struct A;
+ // Attributes should be placed after tag to apply to type declaration.
+ if (!DS.getAttributes().empty()) {
+ DeclSpec::TST TypeSpecType = DS.getTypeSpecType();
+ if (TypeSpecType == DeclSpec::TST_class ||
+ TypeSpecType == DeclSpec::TST_struct ||
+ TypeSpecType == DeclSpec::TST_interface ||
+ TypeSpecType == DeclSpec::TST_union ||
+ TypeSpecType == DeclSpec::TST_enum) {
+ for (AttributeList* attrs = DS.getAttributes().getList(); attrs;
+ attrs = attrs->getNext())
+ Diag(attrs->getLoc(), diag::warn_declspec_attribute_ignored)
+ << attrs->getName() << GetDiagnosticTypeSpecifierID(TypeSpecType);
+ }
+ }
+
+ return TagD;
+}
+
+/// We are trying to inject an anonymous member into the given scope;
+/// check if there's an existing declaration that can't be overloaded.
+///
+/// \return true if this is a forbidden redeclaration
+static bool CheckAnonMemberRedeclaration(Sema &SemaRef,
+ Scope *S,
+ DeclContext *Owner,
+ DeclarationName Name,
+ SourceLocation NameLoc,
+ bool IsUnion) {
+ LookupResult R(SemaRef, Name, NameLoc, Sema::LookupMemberName,
+ Sema::ForRedeclaration);
+ if (!SemaRef.LookupName(R, S)) return false;
+
+ if (R.getAsSingle<TagDecl>())
+ return false;
+
+ // Pick a representative declaration.
+ NamedDecl *PrevDecl = R.getRepresentativeDecl()->getUnderlyingDecl();
+ assert(PrevDecl && "Expected a non-null Decl");
+
+ if (!SemaRef.isDeclInScope(PrevDecl, Owner, S))
+ return false;
+
+ SemaRef.Diag(NameLoc, diag::err_anonymous_record_member_redecl)
+ << IsUnion << Name;
+ SemaRef.Diag(PrevDecl->getLocation(), diag::note_previous_declaration);
+
+ return true;
+}
+
+/// InjectAnonymousStructOrUnionMembers - Inject the members of the
+/// anonymous struct or union AnonRecord into the owning context Owner
+/// and scope S. This routine will be invoked just after we realize
+/// that an unnamed union or struct is actually an anonymous union or
+/// struct, e.g.,
+///
+/// @code
+/// union {
+/// int i;
+/// float f;
+/// }; // InjectAnonymousStructOrUnionMembers called here to inject i and
+/// // f into the surrounding scope.x
+/// @endcode
+///
+/// This routine is recursive, injecting the names of nested anonymous
+/// structs/unions into the owning context and scope as well.
+static bool InjectAnonymousStructOrUnionMembers(Sema &SemaRef, Scope *S,
+ DeclContext *Owner,
+ RecordDecl *AnonRecord,
+ AccessSpecifier AS,
+ SmallVectorImpl<NamedDecl *> &Chaining,
+ bool MSAnonStruct) {
+ bool Invalid = false;
+
+ // Look every FieldDecl and IndirectFieldDecl with a name.
+ for (auto *D : AnonRecord->decls()) {
+ if ((isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D)) &&
+ cast<NamedDecl>(D)->getDeclName()) {
+ ValueDecl *VD = cast<ValueDecl>(D);
+ if (CheckAnonMemberRedeclaration(SemaRef, S, Owner, VD->getDeclName(),
+ VD->getLocation(),
+ AnonRecord->isUnion())) {
+ // C++ [class.union]p2:
+ // The names of the members of an anonymous union shall be
+ // distinct from the names of any other entity in the
+ // scope in which the anonymous union is declared.
+ Invalid = true;
+ } else {
+ // C++ [class.union]p2:
+ // For the purpose of name lookup, after the anonymous union
+ // definition, the members of the anonymous union are
+ // considered to have been defined in the scope in which the
+ // anonymous union is declared.
+ unsigned OldChainingSize = Chaining.size();
+ if (IndirectFieldDecl *IF = dyn_cast<IndirectFieldDecl>(VD))
+ Chaining.append(IF->chain_begin(), IF->chain_end());
+ else
+ Chaining.push_back(VD);
+
+ assert(Chaining.size() >= 2);
+ NamedDecl **NamedChain =
+ new (SemaRef.Context)NamedDecl*[Chaining.size()];
+ for (unsigned i = 0; i < Chaining.size(); i++)
+ NamedChain[i] = Chaining[i];
+
+ IndirectFieldDecl *IndirectField = IndirectFieldDecl::Create(
+ SemaRef.Context, Owner, VD->getLocation(), VD->getIdentifier(),
+ VD->getType(), NamedChain, Chaining.size());
+
+ for (const auto *Attr : VD->attrs())
+ IndirectField->addAttr(Attr->clone(SemaRef.Context));
+
+ IndirectField->setAccess(AS);
+ IndirectField->setImplicit();
+ SemaRef.PushOnScopeChains(IndirectField, S);
+
+ // That includes picking up the appropriate access specifier.
+ if (AS != AS_none) IndirectField->setAccess(AS);
+
+ Chaining.resize(OldChainingSize);
+ }
+ }
+ }
+
+ return Invalid;
+}
+
+/// StorageClassSpecToVarDeclStorageClass - Maps a DeclSpec::SCS to
+/// a VarDecl::StorageClass. Any error reporting is up to the caller:
+/// illegal input values are mapped to SC_None.
+static StorageClass
+StorageClassSpecToVarDeclStorageClass(const DeclSpec &DS) {
+ DeclSpec::SCS StorageClassSpec = DS.getStorageClassSpec();
+ assert(StorageClassSpec != DeclSpec::SCS_typedef &&
+ "Parser allowed 'typedef' as storage class VarDecl.");
+ switch (StorageClassSpec) {
+ case DeclSpec::SCS_unspecified: return SC_None;
+ case DeclSpec::SCS_extern:
+ if (DS.isExternInLinkageSpec())
+ return SC_None;
+ return SC_Extern;
+ case DeclSpec::SCS_static: return SC_Static;
+ case DeclSpec::SCS_auto: return SC_Auto;
+ case DeclSpec::SCS_register: return SC_Register;
+ case DeclSpec::SCS_private_extern: return SC_PrivateExtern;
+ // Illegal SCSs map to None: error reporting is up to the caller.
+ case DeclSpec::SCS_mutable: // Fall through.
+ case DeclSpec::SCS_typedef: return SC_None;
+ }
+ llvm_unreachable("unknown storage class specifier");
+}
+
+static SourceLocation findDefaultInitializer(const CXXRecordDecl *Record) {
+ assert(Record->hasInClassInitializer());
+
+ for (const auto *I : Record->decls()) {
+ const auto *FD = dyn_cast<FieldDecl>(I);
+ if (const auto *IFD = dyn_cast<IndirectFieldDecl>(I))
+ FD = IFD->getAnonField();
+ if (FD && FD->hasInClassInitializer())
+ return FD->getLocation();
+ }
+
+ llvm_unreachable("couldn't find in-class initializer");
+}
+
+static void checkDuplicateDefaultInit(Sema &S, CXXRecordDecl *Parent,
+ SourceLocation DefaultInitLoc) {
+ if (!Parent->isUnion() || !Parent->hasInClassInitializer())
+ return;
+
+ S.Diag(DefaultInitLoc, diag::err_multiple_mem_union_initialization);
+ S.Diag(findDefaultInitializer(Parent), diag::note_previous_initializer) << 0;
+}
+
+static void checkDuplicateDefaultInit(Sema &S, CXXRecordDecl *Parent,
+ CXXRecordDecl *AnonUnion) {
+ if (!Parent->isUnion() || !Parent->hasInClassInitializer())
+ return;
+
+ checkDuplicateDefaultInit(S, Parent, findDefaultInitializer(AnonUnion));
+}
+
+/// BuildAnonymousStructOrUnion - Handle the declaration of an
+/// anonymous structure or union. Anonymous unions are a C++ feature
+/// (C++ [class.union]) and a C11 feature; anonymous structures
+/// are a C11 feature and GNU C++ extension.
+Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
+ AccessSpecifier AS,
+ RecordDecl *Record,
+ const PrintingPolicy &Policy) {
+ DeclContext *Owner = Record->getDeclContext();
+
+ // Diagnose whether this anonymous struct/union is an extension.
+ if (Record->isUnion() && !getLangOpts().CPlusPlus && !getLangOpts().C11)
+ Diag(Record->getLocation(), diag::ext_anonymous_union);
+ else if (!Record->isUnion() && getLangOpts().CPlusPlus)
+ Diag(Record->getLocation(), diag::ext_gnu_anonymous_struct);
+ else if (!Record->isUnion() && !getLangOpts().C11)
+ Diag(Record->getLocation(), diag::ext_c11_anonymous_struct);
+
+ // C and C++ require different kinds of checks for anonymous
+ // structs/unions.
+ bool Invalid = false;
+ if (getLangOpts().CPlusPlus) {
+ const char *PrevSpec = nullptr;
+ unsigned DiagID;
+ if (Record->isUnion()) {
+ // C++ [class.union]p6:
+ // Anonymous unions declared in a named namespace or in the
+ // global namespace shall be declared static.
+ if (DS.getStorageClassSpec() != DeclSpec::SCS_static &&
+ (isa<TranslationUnitDecl>(Owner) ||
+ (isa<NamespaceDecl>(Owner) &&
+ cast<NamespaceDecl>(Owner)->getDeclName()))) {
+ Diag(Record->getLocation(), diag::err_anonymous_union_not_static)
+ << FixItHint::CreateInsertion(Record->getLocation(), "static ");
+
+ // Recover by adding 'static'.
+ DS.SetStorageClassSpec(*this, DeclSpec::SCS_static, SourceLocation(),
+ PrevSpec, DiagID, Policy);
+ }
+ // C++ [class.union]p6:
+ // A storage class is not allowed in a declaration of an
+ // anonymous union in a class scope.
+ else if (DS.getStorageClassSpec() != DeclSpec::SCS_unspecified &&
+ isa<RecordDecl>(Owner)) {
+ Diag(DS.getStorageClassSpecLoc(),
+ diag::err_anonymous_union_with_storage_spec)
+ << FixItHint::CreateRemoval(DS.getStorageClassSpecLoc());
+
+ // Recover by removing the storage specifier.
+ DS.SetStorageClassSpec(*this, DeclSpec::SCS_unspecified,
+ SourceLocation(),
+ PrevSpec, DiagID, Context.getPrintingPolicy());
+ }
+ }
+
+ // Ignore const/volatile/restrict qualifiers.
+ if (DS.getTypeQualifiers()) {
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_const)
+ Diag(DS.getConstSpecLoc(), diag::ext_anonymous_struct_union_qualified)
+ << Record->isUnion() << "const"
+ << FixItHint::CreateRemoval(DS.getConstSpecLoc());
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_volatile)
+ Diag(DS.getVolatileSpecLoc(),
+ diag::ext_anonymous_struct_union_qualified)
+ << Record->isUnion() << "volatile"
+ << FixItHint::CreateRemoval(DS.getVolatileSpecLoc());
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_restrict)
+ Diag(DS.getRestrictSpecLoc(),
+ diag::ext_anonymous_struct_union_qualified)
+ << Record->isUnion() << "restrict"
+ << FixItHint::CreateRemoval(DS.getRestrictSpecLoc());
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_atomic)
+ Diag(DS.getAtomicSpecLoc(),
+ diag::ext_anonymous_struct_union_qualified)
+ << Record->isUnion() << "_Atomic"
+ << FixItHint::CreateRemoval(DS.getAtomicSpecLoc());
+
+ DS.ClearTypeQualifiers();
+ }
+
+ // C++ [class.union]p2:
+ // The member-specification of an anonymous union shall only
+ // define non-static data members. [Note: nested types and
+ // functions cannot be declared within an anonymous union. ]
+ for (auto *Mem : Record->decls()) {
+ if (auto *FD = dyn_cast<FieldDecl>(Mem)) {
+ // C++ [class.union]p3:
+ // An anonymous union shall not have private or protected
+ // members (clause 11).
+ assert(FD->getAccess() != AS_none);
+ if (FD->getAccess() != AS_public) {
+ Diag(FD->getLocation(), diag::err_anonymous_record_nonpublic_member)
+ << Record->isUnion() << (FD->getAccess() == AS_protected);
+ Invalid = true;
+ }
+
+ // C++ [class.union]p1
+ // An object of a class with a non-trivial constructor, a non-trivial
+ // copy constructor, a non-trivial destructor, or a non-trivial copy
+ // assignment operator cannot be a member of a union, nor can an
+ // array of such objects.
+ if (CheckNontrivialField(FD))
+ Invalid = true;
+ } else if (Mem->isImplicit()) {
+ // Any implicit members are fine.
+ } else if (isa<TagDecl>(Mem) && Mem->getDeclContext() != Record) {
+ // This is a type that showed up in an
+ // elaborated-type-specifier inside the anonymous struct or
+ // union, but which actually declares a type outside of the
+ // anonymous struct or union. It's okay.
+ } else if (auto *MemRecord = dyn_cast<RecordDecl>(Mem)) {
+ if (!MemRecord->isAnonymousStructOrUnion() &&
+ MemRecord->getDeclName()) {
+ // Visual C++ allows type definition in anonymous struct or union.
+ if (getLangOpts().MicrosoftExt)
+ Diag(MemRecord->getLocation(), diag::ext_anonymous_record_with_type)
+ << Record->isUnion();
+ else {
+ // This is a nested type declaration.
+ Diag(MemRecord->getLocation(), diag::err_anonymous_record_with_type)
+ << Record->isUnion();
+ Invalid = true;
+ }
+ } else {
+ // This is an anonymous type definition within another anonymous type.
+ // This is a popular extension, provided by Plan9, MSVC and GCC, but
+ // not part of standard C++.
+ Diag(MemRecord->getLocation(),
+ diag::ext_anonymous_record_with_anonymous_type)
+ << Record->isUnion();
+ }
+ } else if (isa<AccessSpecDecl>(Mem)) {
+ // Any access specifier is fine.
+ } else if (isa<StaticAssertDecl>(Mem)) {
+ // In C++1z, static_assert declarations are also fine.
+ } else {
+ // We have something that isn't a non-static data
+ // member. Complain about it.
+ unsigned DK = diag::err_anonymous_record_bad_member;
+ if (isa<TypeDecl>(Mem))
+ DK = diag::err_anonymous_record_with_type;
+ else if (isa<FunctionDecl>(Mem))
+ DK = diag::err_anonymous_record_with_function;
+ else if (isa<VarDecl>(Mem))
+ DK = diag::err_anonymous_record_with_static;
+
+ // Visual C++ allows type definition in anonymous struct or union.
+ if (getLangOpts().MicrosoftExt &&
+ DK == diag::err_anonymous_record_with_type)
+ Diag(Mem->getLocation(), diag::ext_anonymous_record_with_type)
+ << Record->isUnion();
+ else {
+ Diag(Mem->getLocation(), DK) << Record->isUnion();
+ Invalid = true;
+ }
+ }
+ }
+
+ // C++11 [class.union]p8 (DR1460):
+ // At most one variant member of a union may have a
+ // brace-or-equal-initializer.
+ if (cast<CXXRecordDecl>(Record)->hasInClassInitializer() &&
+ Owner->isRecord())
+ checkDuplicateDefaultInit(*this, cast<CXXRecordDecl>(Owner),
+ cast<CXXRecordDecl>(Record));
+ }
+
+ if (!Record->isUnion() && !Owner->isRecord()) {
+ Diag(Record->getLocation(), diag::err_anonymous_struct_not_member)
+ << getLangOpts().CPlusPlus;
+ Invalid = true;
+ }
+
+ // Mock up a declarator.
+ Declarator Dc(DS, Declarator::MemberContext);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(Dc, S);
+ assert(TInfo && "couldn't build declarator info for anonymous struct/union");
+
+ // Create a declaration for this anonymous struct/union.
+ NamedDecl *Anon = nullptr;
+ if (RecordDecl *OwningClass = dyn_cast<RecordDecl>(Owner)) {
+ Anon = FieldDecl::Create(Context, OwningClass,
+ DS.getLocStart(),
+ Record->getLocation(),
+ /*IdentifierInfo=*/nullptr,
+ Context.getTypeDeclType(Record),
+ TInfo,
+ /*BitWidth=*/nullptr, /*Mutable=*/false,
+ /*InitStyle=*/ICIS_NoInit);
+ Anon->setAccess(AS);
+ if (getLangOpts().CPlusPlus)
+ FieldCollector->Add(cast<FieldDecl>(Anon));
+ } else {
+ DeclSpec::SCS SCSpec = DS.getStorageClassSpec();
+ StorageClass SC = StorageClassSpecToVarDeclStorageClass(DS);
+ if (SCSpec == DeclSpec::SCS_mutable) {
+ // mutable can only appear on non-static class members, so it's always
+ // an error here
+ Diag(Record->getLocation(), diag::err_mutable_nonmember);
+ Invalid = true;
+ SC = SC_None;
+ }
+
+ Anon = VarDecl::Create(Context, Owner,
+ DS.getLocStart(),
+ Record->getLocation(), /*IdentifierInfo=*/nullptr,
+ Context.getTypeDeclType(Record),
+ TInfo, SC);
+
+ // Default-initialize the implicit variable. This initialization will be
+ // trivial in almost all cases, except if a union member has an in-class
+ // initializer:
+ // union { int n = 0; };
+ ActOnUninitializedDecl(Anon, /*TypeMayContainAuto=*/false);
+ }
+ Anon->setImplicit();
+
+ // Mark this as an anonymous struct/union type.
+ Record->setAnonymousStructOrUnion(true);
+
+ // Add the anonymous struct/union object to the current
+ // context. We'll be referencing this object when we refer to one of
+ // its members.
+ Owner->addDecl(Anon);
+
+ // Inject the members of the anonymous struct/union into the owning
+ // context and into the identifier resolver chain for name lookup
+ // purposes.
+ SmallVector<NamedDecl*, 2> Chain;
+ Chain.push_back(Anon);
+
+ if (InjectAnonymousStructOrUnionMembers(*this, S, Owner, Record, AS,
+ Chain, false))
+ Invalid = true;
+
+ if (VarDecl *NewVD = dyn_cast<VarDecl>(Anon)) {
+ if (getLangOpts().CPlusPlus && NewVD->isStaticLocal()) {
+ Decl *ManglingContextDecl;
+ if (MangleNumberingContext *MCtx = getCurrentMangleNumberContext(
+ NewVD->getDeclContext(), ManglingContextDecl)) {
+ Context.setManglingNumber(
+ NewVD, MCtx->getManglingNumber(
+ NewVD, getMSManglingNumber(getLangOpts(), S)));
+ Context.setStaticLocalNumber(NewVD, MCtx->getStaticLocalNumber(NewVD));
+ }
+ }
+ }
+
+ if (Invalid)
+ Anon->setInvalidDecl();
+
+ return Anon;
+}
+
+/// BuildMicrosoftCAnonymousStruct - Handle the declaration of an
+/// Microsoft C anonymous structure.
+/// Ref: http://msdn.microsoft.com/en-us/library/z2cx9y4f.aspx
+/// Example:
+///
+/// struct A { int a; };
+/// struct B { struct A; int b; };
+///
+/// void foo() {
+/// B var;
+/// var.a = 3;
+/// }
+///
+Decl *Sema::BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
+ RecordDecl *Record) {
+ assert(Record && "expected a record!");
+
+ // Mock up a declarator.
+ Declarator Dc(DS, Declarator::TypeNameContext);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(Dc, S);
+ assert(TInfo && "couldn't build declarator info for anonymous struct");
+
+ auto *ParentDecl = cast<RecordDecl>(CurContext);
+ QualType RecTy = Context.getTypeDeclType(Record);
+
+ // Create a declaration for this anonymous struct.
+ NamedDecl *Anon = FieldDecl::Create(Context,
+ ParentDecl,
+ DS.getLocStart(),
+ DS.getLocStart(),
+ /*IdentifierInfo=*/nullptr,
+ RecTy,
+ TInfo,
+ /*BitWidth=*/nullptr, /*Mutable=*/false,
+ /*InitStyle=*/ICIS_NoInit);
+ Anon->setImplicit();
+
+ // Add the anonymous struct object to the current context.
+ CurContext->addDecl(Anon);
+
+ // Inject the members of the anonymous struct into the current
+ // context and into the identifier resolver chain for name lookup
+ // purposes.
+ SmallVector<NamedDecl*, 2> Chain;
+ Chain.push_back(Anon);
+
+ RecordDecl *RecordDef = Record->getDefinition();
+ if (RequireCompleteType(Anon->getLocation(), RecTy,
+ diag::err_field_incomplete) ||
+ InjectAnonymousStructOrUnionMembers(*this, S, CurContext, RecordDef,
+ AS_none, Chain, true)) {
+ Anon->setInvalidDecl();
+ ParentDecl->setInvalidDecl();
+ }
+
+ return Anon;
+}
+
+/// GetNameForDeclarator - Determine the full declaration name for the
+/// given Declarator.
+DeclarationNameInfo Sema::GetNameForDeclarator(Declarator &D) {
+ return GetNameFromUnqualifiedId(D.getName());
+}
+
+/// \brief Retrieves the declaration name from a parsed unqualified-id.
+DeclarationNameInfo
+Sema::GetNameFromUnqualifiedId(const UnqualifiedId &Name) {
+ DeclarationNameInfo NameInfo;
+ NameInfo.setLoc(Name.StartLocation);
+
+ switch (Name.getKind()) {
+
+ case UnqualifiedId::IK_ImplicitSelfParam:
+ case UnqualifiedId::IK_Identifier:
+ NameInfo.setName(Name.Identifier);
+ NameInfo.setLoc(Name.StartLocation);
+ return NameInfo;
+
+ case UnqualifiedId::IK_OperatorFunctionId:
+ NameInfo.setName(Context.DeclarationNames.getCXXOperatorName(
+ Name.OperatorFunctionId.Operator));
+ NameInfo.setLoc(Name.StartLocation);
+ NameInfo.getInfo().CXXOperatorName.BeginOpNameLoc
+ = Name.OperatorFunctionId.SymbolLocations[0];
+ NameInfo.getInfo().CXXOperatorName.EndOpNameLoc
+ = Name.EndLocation.getRawEncoding();
+ return NameInfo;
+
+ case UnqualifiedId::IK_LiteralOperatorId:
+ NameInfo.setName(Context.DeclarationNames.getCXXLiteralOperatorName(
+ Name.Identifier));
+ NameInfo.setLoc(Name.StartLocation);
+ NameInfo.setCXXLiteralOperatorNameLoc(Name.EndLocation);
+ return NameInfo;
+
+ case UnqualifiedId::IK_ConversionFunctionId: {
+ TypeSourceInfo *TInfo;
+ QualType Ty = GetTypeFromParser(Name.ConversionFunctionId, &TInfo);
+ if (Ty.isNull())
+ return DeclarationNameInfo();
+ NameInfo.setName(Context.DeclarationNames.getCXXConversionFunctionName(
+ Context.getCanonicalType(Ty)));
+ NameInfo.setLoc(Name.StartLocation);
+ NameInfo.setNamedTypeInfo(TInfo);
+ return NameInfo;
+ }
+
+ case UnqualifiedId::IK_ConstructorName: {
+ TypeSourceInfo *TInfo;
+ QualType Ty = GetTypeFromParser(Name.ConstructorName, &TInfo);
+ if (Ty.isNull())
+ return DeclarationNameInfo();
+ NameInfo.setName(Context.DeclarationNames.getCXXConstructorName(
+ Context.getCanonicalType(Ty)));
+ NameInfo.setLoc(Name.StartLocation);
+ NameInfo.setNamedTypeInfo(TInfo);
+ return NameInfo;
+ }
+
+ case UnqualifiedId::IK_ConstructorTemplateId: {
+ // In well-formed code, we can only have a constructor
+ // template-id that refers to the current context, so go there
+ // to find the actual type being constructed.
+ CXXRecordDecl *CurClass = dyn_cast<CXXRecordDecl>(CurContext);
+ if (!CurClass || CurClass->getIdentifier() != Name.TemplateId->Name)
+ return DeclarationNameInfo();
+
+ // Determine the type of the class being constructed.
+ QualType CurClassType = Context.getTypeDeclType(CurClass);
+
+ // FIXME: Check two things: that the template-id names the same type as
+ // CurClassType, and that the template-id does not occur when the name
+ // was qualified.
+
+ NameInfo.setName(Context.DeclarationNames.getCXXConstructorName(
+ Context.getCanonicalType(CurClassType)));
+ NameInfo.setLoc(Name.StartLocation);
+ // FIXME: should we retrieve TypeSourceInfo?
+ NameInfo.setNamedTypeInfo(nullptr);
+ return NameInfo;
+ }
+
+ case UnqualifiedId::IK_DestructorName: {
+ TypeSourceInfo *TInfo;
+ QualType Ty = GetTypeFromParser(Name.DestructorName, &TInfo);
+ if (Ty.isNull())
+ return DeclarationNameInfo();
+ NameInfo.setName(Context.DeclarationNames.getCXXDestructorName(
+ Context.getCanonicalType(Ty)));
+ NameInfo.setLoc(Name.StartLocation);
+ NameInfo.setNamedTypeInfo(TInfo);
+ return NameInfo;
+ }
+
+ case UnqualifiedId::IK_TemplateId: {
+ TemplateName TName = Name.TemplateId->Template.get();
+ SourceLocation TNameLoc = Name.TemplateId->TemplateNameLoc;
+ return Context.getNameForTemplate(TName, TNameLoc);
+ }
+
+ } // switch (Name.getKind())
+
+ llvm_unreachable("Unknown name kind");
+}
+
+static QualType getCoreType(QualType Ty) {
+ do {
+ if (Ty->isPointerType() || Ty->isReferenceType())
+ Ty = Ty->getPointeeType();
+ else if (Ty->isArrayType())
+ Ty = Ty->castAsArrayTypeUnsafe()->getElementType();
+ else
+ return Ty.withoutLocalFastQualifiers();
+ } while (true);
+}
+
+/// hasSimilarParameters - Determine whether the C++ functions Declaration
+/// and Definition have "nearly" matching parameters. This heuristic is
+/// used to improve diagnostics in the case where an out-of-line function
+/// definition doesn't match any declaration within the class or namespace.
+/// Also sets Params to the list of indices to the parameters that differ
+/// between the declaration and the definition. If hasSimilarParameters
+/// returns true and Params is empty, then all of the parameters match.
+static bool hasSimilarParameters(ASTContext &Context,
+ FunctionDecl *Declaration,
+ FunctionDecl *Definition,
+ SmallVectorImpl<unsigned> &Params) {
+ Params.clear();
+ if (Declaration->param_size() != Definition->param_size())
+ return false;
+ for (unsigned Idx = 0; Idx < Declaration->param_size(); ++Idx) {
+ QualType DeclParamTy = Declaration->getParamDecl(Idx)->getType();
+ QualType DefParamTy = Definition->getParamDecl(Idx)->getType();
+
+ // The parameter types are identical
+ if (Context.hasSameType(DefParamTy, DeclParamTy))
+ continue;
+
+ QualType DeclParamBaseTy = getCoreType(DeclParamTy);
+ QualType DefParamBaseTy = getCoreType(DefParamTy);
+ const IdentifierInfo *DeclTyName = DeclParamBaseTy.getBaseTypeIdentifier();
+ const IdentifierInfo *DefTyName = DefParamBaseTy.getBaseTypeIdentifier();
+
+ if (Context.hasSameUnqualifiedType(DeclParamBaseTy, DefParamBaseTy) ||
+ (DeclTyName && DeclTyName == DefTyName))
+ Params.push_back(Idx);
+ else // The two parameters aren't even close
+ return false;
+ }
+
+ return true;
+}
+
+/// NeedsRebuildingInCurrentInstantiation - Checks whether the given
+/// declarator needs to be rebuilt in the current instantiation.
+/// Any bits of declarator which appear before the name are valid for
+/// consideration here. That's specifically the type in the decl spec
+/// and the base type in any member-pointer chunks.
+static bool RebuildDeclaratorInCurrentInstantiation(Sema &S, Declarator &D,
+ DeclarationName Name) {
+ // The types we specifically need to rebuild are:
+ // - typenames, typeofs, and decltypes
+ // - types which will become injected class names
+ // Of course, we also need to rebuild any type referencing such a
+ // type. It's safest to just say "dependent", but we call out a
+ // few cases here.
+
+ DeclSpec &DS = D.getMutableDeclSpec();
+ switch (DS.getTypeSpecType()) {
+ case DeclSpec::TST_typename:
+ case DeclSpec::TST_typeofType:
+ case DeclSpec::TST_underlyingType:
+ case DeclSpec::TST_atomic: {
+ // Grab the type from the parser.
+ TypeSourceInfo *TSI = nullptr;
+ QualType T = S.GetTypeFromParser(DS.getRepAsType(), &TSI);
+ if (T.isNull() || !T->isDependentType()) break;
+
+ // Make sure there's a type source info. This isn't really much
+ // of a waste; most dependent types should have type source info
+ // attached already.
+ if (!TSI)
+ TSI = S.Context.getTrivialTypeSourceInfo(T, DS.getTypeSpecTypeLoc());
+
+ // Rebuild the type in the current instantiation.
+ TSI = S.RebuildTypeInCurrentInstantiation(TSI, D.getIdentifierLoc(), Name);
+ if (!TSI) return true;
+
+ // Store the new type back in the decl spec.
+ ParsedType LocType = S.CreateParsedType(TSI->getType(), TSI);
+ DS.UpdateTypeRep(LocType);
+ break;
+ }
+
+ case DeclSpec::TST_decltype:
+ case DeclSpec::TST_typeofExpr: {
+ Expr *E = DS.getRepAsExpr();
+ ExprResult Result = S.RebuildExprInCurrentInstantiation(E);
+ if (Result.isInvalid()) return true;
+ DS.UpdateExprRep(Result.get());
+ break;
+ }
+
+ default:
+ // Nothing to do for these decl specs.
+ break;
+ }
+
+ // It doesn't matter what order we do this in.
+ for (unsigned I = 0, E = D.getNumTypeObjects(); I != E; ++I) {
+ DeclaratorChunk &Chunk = D.getTypeObject(I);
+
+ // The only type information in the declarator which can come
+ // before the declaration name is the base type of a member
+ // pointer.
+ if (Chunk.Kind != DeclaratorChunk::MemberPointer)
+ continue;
+
+ // Rebuild the scope specifier in-place.
+ CXXScopeSpec &SS = Chunk.Mem.Scope();
+ if (S.RebuildNestedNameSpecifierInCurrentInstantiation(SS))
+ return true;
+ }
+
+ return false;
+}
+
+Decl *Sema::ActOnDeclarator(Scope *S, Declarator &D) {
+ D.setFunctionDefinitionKind(FDK_Declaration);
+ Decl *Dcl = HandleDeclarator(S, D, MultiTemplateParamsArg());
+
+ if (OriginalLexicalContext && OriginalLexicalContext->isObjCContainer() &&
+ Dcl && Dcl->getDeclContext()->isFileContext())
+ Dcl->setTopLevelDeclInObjCContainer();
+
+ return Dcl;
+}
+
+/// DiagnoseClassNameShadow - Implement C++ [class.mem]p13:
+/// If T is the name of a class, then each of the following shall have a
+/// name different from T:
+/// - every static data member of class T;
+/// - every member function of class T
+/// - every member of class T that is itself a type;
+/// \returns true if the declaration name violates these rules.
+bool Sema::DiagnoseClassNameShadow(DeclContext *DC,
+ DeclarationNameInfo NameInfo) {
+ DeclarationName Name = NameInfo.getName();
+
+ if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(DC))
+ if (Record->getIdentifier() && Record->getDeclName() == Name) {
+ Diag(NameInfo.getLoc(), diag::err_member_name_of_class) << Name;
+ return true;
+ }
+
+ return false;
+}
+
+/// \brief Diagnose a declaration whose declarator-id has the given
+/// nested-name-specifier.
+///
+/// \param SS The nested-name-specifier of the declarator-id.
+///
+/// \param DC The declaration context to which the nested-name-specifier
+/// resolves.
+///
+/// \param Name The name of the entity being declared.
+///
+/// \param Loc The location of the name of the entity being declared.
+///
+/// \returns true if we cannot safely recover from this error, false otherwise.
+bool Sema::diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
+ DeclarationName Name,
+ SourceLocation Loc) {
+ DeclContext *Cur = CurContext;
+ while (isa<LinkageSpecDecl>(Cur) || isa<CapturedDecl>(Cur))
+ Cur = Cur->getParent();
+
+ // If the user provided a superfluous scope specifier that refers back to the
+ // class in which the entity is already declared, diagnose and ignore it.
+ //
+ // class X {
+ // void X::f();
+ // };
+ //
+ // Note, it was once ill-formed to give redundant qualification in all
+ // contexts, but that rule was removed by DR482.
+ if (Cur->Equals(DC)) {
+ if (Cur->isRecord()) {
+ Diag(Loc, LangOpts.MicrosoftExt ? diag::warn_member_extra_qualification
+ : diag::err_member_extra_qualification)
+ << Name << FixItHint::CreateRemoval(SS.getRange());
+ SS.clear();
+ } else {
+ Diag(Loc, diag::warn_namespace_member_extra_qualification) << Name;
+ }
+ return false;
+ }
+
+ // Check whether the qualifying scope encloses the scope of the original
+ // declaration.
+ if (!Cur->Encloses(DC)) {
+ if (Cur->isRecord())
+ Diag(Loc, diag::err_member_qualification)
+ << Name << SS.getRange();
+ else if (isa<TranslationUnitDecl>(DC))
+ Diag(Loc, diag::err_invalid_declarator_global_scope)
+ << Name << SS.getRange();
+ else if (isa<FunctionDecl>(Cur))
+ Diag(Loc, diag::err_invalid_declarator_in_function)
+ << Name << SS.getRange();
+ else if (isa<BlockDecl>(Cur))
+ Diag(Loc, diag::err_invalid_declarator_in_block)
+ << Name << SS.getRange();
+ else
+ Diag(Loc, diag::err_invalid_declarator_scope)
+ << Name << cast<NamedDecl>(Cur) << cast<NamedDecl>(DC) << SS.getRange();
+
+ return true;
+ }
+
+ if (Cur->isRecord()) {
+ // Cannot qualify members within a class.
+ Diag(Loc, diag::err_member_qualification)
+ << Name << SS.getRange();
+ SS.clear();
+
+ // C++ constructors and destructors with incorrect scopes can break
+ // our AST invariants by having the wrong underlying types. If
+ // that's the case, then drop this declaration entirely.
+ if ((Name.getNameKind() == DeclarationName::CXXConstructorName ||
+ Name.getNameKind() == DeclarationName::CXXDestructorName) &&
+ !Context.hasSameType(Name.getCXXNameType(),
+ Context.getTypeDeclType(cast<CXXRecordDecl>(Cur))))
+ return true;
+
+ return false;
+ }
+
+ // C++11 [dcl.meaning]p1:
+ // [...] "The nested-name-specifier of the qualified declarator-id shall
+ // not begin with a decltype-specifer"
+ NestedNameSpecifierLoc SpecLoc(SS.getScopeRep(), SS.location_data());
+ while (SpecLoc.getPrefix())
+ SpecLoc = SpecLoc.getPrefix();
+ if (dyn_cast_or_null<DecltypeType>(
+ SpecLoc.getNestedNameSpecifier()->getAsType()))
+ Diag(Loc, diag::err_decltype_in_declarator)
+ << SpecLoc.getTypeLoc().getSourceRange();
+
+ return false;
+}
+
+NamedDecl *Sema::HandleDeclarator(Scope *S, Declarator &D,
+ MultiTemplateParamsArg TemplateParamLists) {
+ // TODO: consider using NameInfo for diagnostic.
+ DeclarationNameInfo NameInfo = GetNameForDeclarator(D);
+ DeclarationName Name = NameInfo.getName();
+
+ // All of these full declarators require an identifier. If it doesn't have
+ // one, the ParsedFreeStandingDeclSpec action should be used.
+ if (!Name) {
+ if (!D.isInvalidType()) // Reject this if we think it is valid.
+ Diag(D.getDeclSpec().getLocStart(),
+ diag::err_declarator_need_ident)
+ << D.getDeclSpec().getSourceRange() << D.getSourceRange();
+ return nullptr;
+ } else if (DiagnoseUnexpandedParameterPack(NameInfo, UPPC_DeclarationType))
+ return nullptr;
+
+ // The scope passed in may not be a decl scope. Zip up the scope tree until
+ // we find one that is.
+ while ((S->getFlags() & Scope::DeclScope) == 0 ||
+ (S->getFlags() & Scope::TemplateParamScope) != 0)
+ S = S->getParent();
+
+ DeclContext *DC = CurContext;
+ if (D.getCXXScopeSpec().isInvalid())
+ D.setInvalidType();
+ else if (D.getCXXScopeSpec().isSet()) {
+ if (DiagnoseUnexpandedParameterPack(D.getCXXScopeSpec(),
+ UPPC_DeclarationQualifier))
+ return nullptr;
+
+ bool EnteringContext = !D.getDeclSpec().isFriendSpecified();
+ DC = computeDeclContext(D.getCXXScopeSpec(), EnteringContext);
+ if (!DC || isa<EnumDecl>(DC)) {
+ // If we could not compute the declaration context, it's because the
+ // declaration context is dependent but does not refer to a class,
+ // class template, or class template partial specialization. Complain
+ // and return early, to avoid the coming semantic disaster.
+ Diag(D.getIdentifierLoc(),
+ diag::err_template_qualified_declarator_no_match)
+ << D.getCXXScopeSpec().getScopeRep()
+ << D.getCXXScopeSpec().getRange();
+ return nullptr;
+ }
+ bool IsDependentContext = DC->isDependentContext();
+
+ if (!IsDependentContext &&
+ RequireCompleteDeclContext(D.getCXXScopeSpec(), DC))
+ return nullptr;
+
+ // If a class is incomplete, do not parse entities inside it.
+ if (isa<CXXRecordDecl>(DC) && !cast<CXXRecordDecl>(DC)->hasDefinition()) {
+ Diag(D.getIdentifierLoc(),
+ diag::err_member_def_undefined_record)
+ << Name << DC << D.getCXXScopeSpec().getRange();
+ return nullptr;
+ }
+ if (!D.getDeclSpec().isFriendSpecified()) {
+ if (diagnoseQualifiedDeclaration(D.getCXXScopeSpec(), DC,
+ Name, D.getIdentifierLoc())) {
+ if (DC->isRecord())
+ return nullptr;
+
+ D.setInvalidType();
+ }
+ }
+
+ // Check whether we need to rebuild the type of the given
+ // declaration in the current instantiation.
+ if (EnteringContext && IsDependentContext &&
+ TemplateParamLists.size() != 0) {
+ ContextRAII SavedContext(*this, DC);
+ if (RebuildDeclaratorInCurrentInstantiation(*this, D, Name))
+ D.setInvalidType();
+ }
+ }
+
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ QualType R = TInfo->getType();
+
+ if (!R->isFunctionType() && DiagnoseClassNameShadow(DC, NameInfo))
+ // If this is a typedef, we'll end up spewing multiple diagnostics.
+ // Just return early; it's safer. If this is a function, let the
+ // "constructor cannot have a return type" diagnostic handle it.
+ if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef)
+ return nullptr;
+
+ if (DiagnoseUnexpandedParameterPack(D.getIdentifierLoc(), TInfo,
+ UPPC_DeclarationType))
+ D.setInvalidType();
+
+ LookupResult Previous(*this, NameInfo, LookupOrdinaryName,
+ ForRedeclaration);
+
+ // See if this is a redefinition of a variable in the same scope.
+ if (!D.getCXXScopeSpec().isSet()) {
+ bool IsLinkageLookup = false;
+ bool CreateBuiltins = false;
+
+ // If the declaration we're planning to build will be a function
+ // or object with linkage, then look for another declaration with
+ // linkage (C99 6.2.2p4-5 and C++ [basic.link]p6).
+ //
+ // If the declaration we're planning to build will be declared with
+ // external linkage in the translation unit, create any builtin with
+ // the same name.
+ if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef)
+ /* Do nothing*/;
+ else if (CurContext->isFunctionOrMethod() &&
+ (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_extern ||
+ R->isFunctionType())) {
+ IsLinkageLookup = true;
+ CreateBuiltins =
+ CurContext->getEnclosingNamespaceContext()->isTranslationUnit();
+ } else if (CurContext->getRedeclContext()->isTranslationUnit() &&
+ D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_static)
+ CreateBuiltins = true;
+
+ if (IsLinkageLookup)
+ Previous.clear(LookupRedeclarationWithLinkage);
+
+ LookupName(Previous, S, CreateBuiltins);
+ } else { // Something like "int foo::x;"
+ LookupQualifiedName(Previous, DC);
+
+ // C++ [dcl.meaning]p1:
+ // When the declarator-id is qualified, the declaration shall refer to a
+ // previously declared member of the class or namespace to which the
+ // qualifier refers (or, in the case of a namespace, of an element of the
+ // inline namespace set of that namespace (7.3.1)) or to a specialization
+ // thereof; [...]
+ //
+ // Note that we already checked the context above, and that we do not have
+ // enough information to make sure that Previous contains the declaration
+ // we want to match. For example, given:
+ //
+ // class X {
+ // void f();
+ // void f(float);
+ // };
+ //
+ // void X::f(int) { } // ill-formed
+ //
+ // In this case, Previous will point to the overload set
+ // containing the two f's declared in X, but neither of them
+ // matches.
+
+ // C++ [dcl.meaning]p1:
+ // [...] the member shall not merely have been introduced by a
+ // using-declaration in the scope of the class or namespace nominated by
+ // the nested-name-specifier of the declarator-id.
+ RemoveUsingDecls(Previous);
+ }
+
+ if (Previous.isSingleResult() &&
+ Previous.getFoundDecl()->isTemplateParameter()) {
+ // Maybe we will complain about the shadowed template parameter.
+ if (!D.isInvalidType())
+ DiagnoseTemplateParameterShadow(D.getIdentifierLoc(),
+ Previous.getFoundDecl());
+
+ // Just pretend that we didn't see the previous declaration.
+ Previous.clear();
+ }
+
+ // In C++, the previous declaration we find might be a tag type
+ // (class or enum). In this case, the new declaration will hide the
+ // tag type. Note that this does does not apply if we're declaring a
+ // typedef (C++ [dcl.typedef]p4).
+ if (Previous.isSingleTagDecl() &&
+ D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_typedef)
+ Previous.clear();
+
+ // Check that there are no default arguments other than in the parameters
+ // of a function declaration (C++ only).
+ if (getLangOpts().CPlusPlus)
+ CheckExtraCXXDefaultArguments(D);
+
+ if (D.getDeclSpec().isConceptSpecified()) {
+ // C++ Concepts TS [dcl.spec.concept]p1: The concept specifier shall be
+ // applied only to the definition of a function template or variable
+ // template, declared in namespace scope
+ if (!TemplateParamLists.size()) {
+ Diag(D.getDeclSpec().getConceptSpecLoc(),
+ diag:: err_concept_wrong_decl_kind);
+ return nullptr;
+ }
+
+ if (!DC->getRedeclContext()->isFileContext()) {
+ Diag(D.getIdentifierLoc(),
+ diag::err_concept_decls_may_only_appear_in_namespace_scope);
+ return nullptr;
+ }
+ }
+
+ NamedDecl *New;
+
+ bool AddToScope = true;
+ if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef) {
+ if (TemplateParamLists.size()) {
+ Diag(D.getIdentifierLoc(), diag::err_template_typedef);
+ return nullptr;
+ }
+
+ New = ActOnTypedefDeclarator(S, D, DC, TInfo, Previous);
+ } else if (R->isFunctionType()) {
+ New = ActOnFunctionDeclarator(S, D, DC, TInfo, Previous,
+ TemplateParamLists,
+ AddToScope);
+ } else {
+ New = ActOnVariableDeclarator(S, D, DC, TInfo, Previous, TemplateParamLists,
+ AddToScope);
+ }
+
+ if (!New)
+ return nullptr;
+
+ // If this has an identifier and is not an invalid redeclaration or
+ // function template specialization, add it to the scope stack.
+ if (New->getDeclName() && AddToScope &&
+ !(D.isRedeclaration() && New->isInvalidDecl())) {
+ // Only make a locally-scoped extern declaration visible if it is the first
+ // declaration of this entity. Qualified lookup for such an entity should
+ // only find this declaration if there is no visible declaration of it.
+ bool AddToContext = !D.isRedeclaration() || !New->isLocalExternDecl();
+ PushOnScopeChains(New, S, AddToContext);
+ if (!AddToContext)
+ CurContext->addHiddenDecl(New);
+ }
+
+ return New;
+}
+
+/// Helper method to turn variable array types into constant array
+/// types in certain situations which would otherwise be errors (for
+/// GCC compatibility).
+static QualType TryToFixInvalidVariablyModifiedType(QualType T,
+ ASTContext &Context,
+ bool &SizeIsNegative,
+ llvm::APSInt &Oversized) {
+ // This method tries to turn a variable array into a constant
+ // array even when the size isn't an ICE. This is necessary
+ // for compatibility with code that depends on gcc's buggy
+ // constant expression folding, like struct {char x[(int)(char*)2];}
+ SizeIsNegative = false;
+ Oversized = 0;
+
+ if (T->isDependentType())
+ return QualType();
+
+ QualifierCollector Qs;
+ const Type *Ty = Qs.strip(T);
+
+ if (const PointerType* PTy = dyn_cast<PointerType>(Ty)) {
+ QualType Pointee = PTy->getPointeeType();
+ QualType FixedType =
+ TryToFixInvalidVariablyModifiedType(Pointee, Context, SizeIsNegative,
+ Oversized);
+ if (FixedType.isNull()) return FixedType;
+ FixedType = Context.getPointerType(FixedType);
+ return Qs.apply(Context, FixedType);
+ }
+ if (const ParenType* PTy = dyn_cast<ParenType>(Ty)) {
+ QualType Inner = PTy->getInnerType();
+ QualType FixedType =
+ TryToFixInvalidVariablyModifiedType(Inner, Context, SizeIsNegative,
+ Oversized);
+ if (FixedType.isNull()) return FixedType;
+ FixedType = Context.getParenType(FixedType);
+ return Qs.apply(Context, FixedType);
+ }
+
+ const VariableArrayType* VLATy = dyn_cast<VariableArrayType>(T);
+ if (!VLATy)
+ return QualType();
+ // FIXME: We should probably handle this case
+ if (VLATy->getElementType()->isVariablyModifiedType())
+ return QualType();
+
+ llvm::APSInt Res;
+ if (!VLATy->getSizeExpr() ||
+ !VLATy->getSizeExpr()->EvaluateAsInt(Res, Context))
+ return QualType();
+
+ // Check whether the array size is negative.
+ if (Res.isSigned() && Res.isNegative()) {
+ SizeIsNegative = true;
+ return QualType();
+ }
+
+ // Check whether the array is too large to be addressed.
+ unsigned ActiveSizeBits
+ = ConstantArrayType::getNumAddressingBits(Context, VLATy->getElementType(),
+ Res);
+ if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context)) {
+ Oversized = Res;
+ return QualType();
+ }
+
+ return Context.getConstantArrayType(VLATy->getElementType(),
+ Res, ArrayType::Normal, 0);
+}
+
+static void
+FixInvalidVariablyModifiedTypeLoc(TypeLoc SrcTL, TypeLoc DstTL) {
+ SrcTL = SrcTL.getUnqualifiedLoc();
+ DstTL = DstTL.getUnqualifiedLoc();
+ if (PointerTypeLoc SrcPTL = SrcTL.getAs<PointerTypeLoc>()) {
+ PointerTypeLoc DstPTL = DstTL.castAs<PointerTypeLoc>();
+ FixInvalidVariablyModifiedTypeLoc(SrcPTL.getPointeeLoc(),
+ DstPTL.getPointeeLoc());
+ DstPTL.setStarLoc(SrcPTL.getStarLoc());
+ return;
+ }
+ if (ParenTypeLoc SrcPTL = SrcTL.getAs<ParenTypeLoc>()) {
+ ParenTypeLoc DstPTL = DstTL.castAs<ParenTypeLoc>();
+ FixInvalidVariablyModifiedTypeLoc(SrcPTL.getInnerLoc(),
+ DstPTL.getInnerLoc());
+ DstPTL.setLParenLoc(SrcPTL.getLParenLoc());
+ DstPTL.setRParenLoc(SrcPTL.getRParenLoc());
+ return;
+ }
+ ArrayTypeLoc SrcATL = SrcTL.castAs<ArrayTypeLoc>();
+ ArrayTypeLoc DstATL = DstTL.castAs<ArrayTypeLoc>();
+ TypeLoc SrcElemTL = SrcATL.getElementLoc();
+ TypeLoc DstElemTL = DstATL.getElementLoc();
+ DstElemTL.initializeFullCopy(SrcElemTL);
+ DstATL.setLBracketLoc(SrcATL.getLBracketLoc());
+ DstATL.setSizeExpr(SrcATL.getSizeExpr());
+ DstATL.setRBracketLoc(SrcATL.getRBracketLoc());
+}
+
+/// Helper method to turn variable array types into constant array
+/// types in certain situations which would otherwise be errors (for
+/// GCC compatibility).
+static TypeSourceInfo*
+TryToFixInvalidVariablyModifiedTypeSourceInfo(TypeSourceInfo *TInfo,
+ ASTContext &Context,
+ bool &SizeIsNegative,
+ llvm::APSInt &Oversized) {
+ QualType FixedTy
+ = TryToFixInvalidVariablyModifiedType(TInfo->getType(), Context,
+ SizeIsNegative, Oversized);
+ if (FixedTy.isNull())
+ return nullptr;
+ TypeSourceInfo *FixedTInfo = Context.getTrivialTypeSourceInfo(FixedTy);
+ FixInvalidVariablyModifiedTypeLoc(TInfo->getTypeLoc(),
+ FixedTInfo->getTypeLoc());
+ return FixedTInfo;
+}
+
+/// \brief Register the given locally-scoped extern "C" declaration so
+/// that it can be found later for redeclarations. We include any extern "C"
+/// declaration that is not visible in the translation unit here, not just
+/// function-scope declarations.
+void
+Sema::RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S) {
+ if (!getLangOpts().CPlusPlus &&
+ ND->getLexicalDeclContext()->getRedeclContext()->isTranslationUnit())
+ // Don't need to track declarations in the TU in C.
+ return;
+
+ // Note that we have a locally-scoped external with this name.
+ Context.getExternCContextDecl()->makeDeclVisibleInContext(ND);
+}
+
+NamedDecl *Sema::findLocallyScopedExternCDecl(DeclarationName Name) {
+ // FIXME: We can have multiple results via __attribute__((overloadable)).
+ auto Result = Context.getExternCContextDecl()->lookup(Name);
+ return Result.empty() ? nullptr : *Result.begin();
+}
+
+/// \brief Diagnose function specifiers on a declaration of an identifier that
+/// does not identify a function.
+void Sema::DiagnoseFunctionSpecifiers(const DeclSpec &DS) {
+ // FIXME: We should probably indicate the identifier in question to avoid
+ // confusion for constructs like "inline int a(), b;"
+ if (DS.isInlineSpecified())
+ Diag(DS.getInlineSpecLoc(),
+ diag::err_inline_non_function);
+
+ if (DS.isVirtualSpecified())
+ Diag(DS.getVirtualSpecLoc(),
+ diag::err_virtual_non_function);
+
+ if (DS.isExplicitSpecified())
+ Diag(DS.getExplicitSpecLoc(),
+ diag::err_explicit_non_function);
+
+ if (DS.isNoreturnSpecified())
+ Diag(DS.getNoreturnSpecLoc(),
+ diag::err_noreturn_non_function);
+}
+
+NamedDecl*
+Sema::ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
+ TypeSourceInfo *TInfo, LookupResult &Previous) {
+ // Typedef declarators cannot be qualified (C++ [dcl.meaning]p1).
+ if (D.getCXXScopeSpec().isSet()) {
+ Diag(D.getIdentifierLoc(), diag::err_qualified_typedef_declarator)
+ << D.getCXXScopeSpec().getRange();
+ D.setInvalidType();
+ // Pretend we didn't see the scope specifier.
+ DC = CurContext;
+ Previous.clear();
+ }
+
+ DiagnoseFunctionSpecifiers(D.getDeclSpec());
+
+ if (D.getDeclSpec().isConstexprSpecified())
+ Diag(D.getDeclSpec().getConstexprSpecLoc(), diag::err_invalid_constexpr)
+ << 1;
+ if (D.getDeclSpec().isConceptSpecified())
+ Diag(D.getDeclSpec().getConceptSpecLoc(),
+ diag::err_concept_wrong_decl_kind);
+
+ if (D.getName().Kind != UnqualifiedId::IK_Identifier) {
+ Diag(D.getName().StartLocation, diag::err_typedef_not_identifier)
+ << D.getName().getSourceRange();
+ return nullptr;
+ }
+
+ TypedefDecl *NewTD = ParseTypedefDecl(S, D, TInfo->getType(), TInfo);
+ if (!NewTD) return nullptr;
+
+ // Handle attributes prior to checking for duplicates in MergeVarDecl
+ ProcessDeclAttributes(S, NewTD, D);
+
+ CheckTypedefForVariablyModifiedType(S, NewTD);
+
+ bool Redeclaration = D.isRedeclaration();
+ NamedDecl *ND = ActOnTypedefNameDecl(S, DC, NewTD, Previous, Redeclaration);
+ D.setRedeclaration(Redeclaration);
+ return ND;
+}
+
+void
+Sema::CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *NewTD) {
+ // C99 6.7.7p2: If a typedef name specifies a variably modified type
+ // then it shall have block scope.
+ // Note that variably modified types must be fixed before merging the decl so
+ // that redeclarations will match.
+ TypeSourceInfo *TInfo = NewTD->getTypeSourceInfo();
+ QualType T = TInfo->getType();
+ if (T->isVariablyModifiedType()) {
+ getCurFunction()->setHasBranchProtectedScope();
+
+ if (S->getFnParent() == nullptr) {
+ bool SizeIsNegative;
+ llvm::APSInt Oversized;
+ TypeSourceInfo *FixedTInfo =
+ TryToFixInvalidVariablyModifiedTypeSourceInfo(TInfo, Context,
+ SizeIsNegative,
+ Oversized);
+ if (FixedTInfo) {
+ Diag(NewTD->getLocation(), diag::warn_illegal_constant_array_size);
+ NewTD->setTypeSourceInfo(FixedTInfo);
+ } else {
+ if (SizeIsNegative)
+ Diag(NewTD->getLocation(), diag::err_typecheck_negative_array_size);
+ else if (T->isVariableArrayType())
+ Diag(NewTD->getLocation(), diag::err_vla_decl_in_file_scope);
+ else if (Oversized.getBoolValue())
+ Diag(NewTD->getLocation(), diag::err_array_too_large)
+ << Oversized.toString(10);
+ else
+ Diag(NewTD->getLocation(), diag::err_vm_decl_in_file_scope);
+ NewTD->setInvalidDecl();
+ }
+ }
+ }
+}
+
+
+/// ActOnTypedefNameDecl - Perform semantic checking for a declaration which
+/// declares a typedef-name, either using the 'typedef' type specifier or via
+/// a C++0x [dcl.typedef]p2 alias-declaration: 'using T = A;'.
+NamedDecl*
+Sema::ActOnTypedefNameDecl(Scope *S, DeclContext *DC, TypedefNameDecl *NewTD,
+ LookupResult &Previous, bool &Redeclaration) {
+ // Merge the decl with the existing one if appropriate. If the decl is
+ // in an outer scope, it isn't the same thing.
+ FilterLookupForScope(Previous, DC, S, /*ConsiderLinkage*/false,
+ /*AllowInlineNamespace*/false);
+ filterNonConflictingPreviousTypedefDecls(*this, NewTD, Previous);
+ if (!Previous.empty()) {
+ Redeclaration = true;
+ MergeTypedefNameDecl(S, NewTD, Previous);
+ }
+
+ // If this is the C FILE type, notify the AST context.
+ if (IdentifierInfo *II = NewTD->getIdentifier())
+ if (!NewTD->isInvalidDecl() &&
+ NewTD->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
+ if (II->isStr("FILE"))
+ Context.setFILEDecl(NewTD);
+ else if (II->isStr("jmp_buf"))
+ Context.setjmp_bufDecl(NewTD);
+ else if (II->isStr("sigjmp_buf"))
+ Context.setsigjmp_bufDecl(NewTD);
+ else if (II->isStr("ucontext_t"))
+ Context.setucontext_tDecl(NewTD);
+ }
+
+ return NewTD;
+}
+
+/// \brief Determines whether the given declaration is an out-of-scope
+/// previous declaration.
+///
+/// This routine should be invoked when name lookup has found a
+/// previous declaration (PrevDecl) that is not in the scope where a
+/// new declaration by the same name is being introduced. If the new
+/// declaration occurs in a local scope, previous declarations with
+/// linkage may still be considered previous declarations (C99
+/// 6.2.2p4-5, C++ [basic.link]p6).
+///
+/// \param PrevDecl the previous declaration found by name
+/// lookup
+///
+/// \param DC the context in which the new declaration is being
+/// declared.
+///
+/// \returns true if PrevDecl is an out-of-scope previous declaration
+/// for a new delcaration with the same name.
+static bool
+isOutOfScopePreviousDeclaration(NamedDecl *PrevDecl, DeclContext *DC,
+ ASTContext &Context) {
+ if (!PrevDecl)
+ return false;
+
+ if (!PrevDecl->hasLinkage())
+ return false;
+
+ if (Context.getLangOpts().CPlusPlus) {
+ // C++ [basic.link]p6:
+ // If there is a visible declaration of an entity with linkage
+ // having the same name and type, ignoring entities declared
+ // outside the innermost enclosing namespace scope, the block
+ // scope declaration declares that same entity and receives the
+ // linkage of the previous declaration.
+ DeclContext *OuterContext = DC->getRedeclContext();
+ if (!OuterContext->isFunctionOrMethod())
+ // This rule only applies to block-scope declarations.
+ return false;
+
+ DeclContext *PrevOuterContext = PrevDecl->getDeclContext();
+ if (PrevOuterContext->isRecord())
+ // We found a member function: ignore it.
+ return false;
+
+ // Find the innermost enclosing namespace for the new and
+ // previous declarations.
+ OuterContext = OuterContext->getEnclosingNamespaceContext();
+ PrevOuterContext = PrevOuterContext->getEnclosingNamespaceContext();
+
+ // The previous declaration is in a different namespace, so it
+ // isn't the same function.
+ if (!OuterContext->Equals(PrevOuterContext))
+ return false;
+ }
+
+ return true;
+}
+
+static void SetNestedNameSpecifier(DeclaratorDecl *DD, Declarator &D) {
+ CXXScopeSpec &SS = D.getCXXScopeSpec();
+ if (!SS.isSet()) return;
+ DD->setQualifierInfo(SS.getWithLocInContext(DD->getASTContext()));
+}
+
+bool Sema::inferObjCARCLifetime(ValueDecl *decl) {
+ QualType type = decl->getType();
+ Qualifiers::ObjCLifetime lifetime = type.getObjCLifetime();
+ if (lifetime == Qualifiers::OCL_Autoreleasing) {
+ // Various kinds of declaration aren't allowed to be __autoreleasing.
+ unsigned kind = -1U;
+ if (VarDecl *var = dyn_cast<VarDecl>(decl)) {
+ if (var->hasAttr<BlocksAttr>())
+ kind = 0; // __block
+ else if (!var->hasLocalStorage())
+ kind = 1; // global
+ } else if (isa<ObjCIvarDecl>(decl)) {
+ kind = 3; // ivar
+ } else if (isa<FieldDecl>(decl)) {
+ kind = 2; // field
+ }
+
+ if (kind != -1U) {
+ Diag(decl->getLocation(), diag::err_arc_autoreleasing_var)
+ << kind;
+ }
+ } else if (lifetime == Qualifiers::OCL_None) {
+ // Try to infer lifetime.
+ if (!type->isObjCLifetimeType())
+ return false;
+
+ lifetime = type->getObjCARCImplicitLifetime();
+ type = Context.getLifetimeQualifiedType(type, lifetime);
+ decl->setType(type);
+ }
+
+ if (VarDecl *var = dyn_cast<VarDecl>(decl)) {
+ // Thread-local variables cannot have lifetime.
+ if (lifetime && lifetime != Qualifiers::OCL_ExplicitNone &&
+ var->getTLSKind()) {
+ Diag(var->getLocation(), diag::err_arc_thread_ownership)
+ << var->getType();
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void checkAttributesAfterMerging(Sema &S, NamedDecl &ND) {
+ // Ensure that an auto decl is deduced otherwise the checks below might cache
+ // the wrong linkage.
+ assert(S.ParsingInitForAutoVars.count(&ND) == 0);
+
+ // 'weak' only applies to declarations with external linkage.
+ if (WeakAttr *Attr = ND.getAttr<WeakAttr>()) {
+ if (!ND.isExternallyVisible()) {
+ S.Diag(Attr->getLocation(), diag::err_attribute_weak_static);
+ ND.dropAttr<WeakAttr>();
+ }
+ }
+ if (WeakRefAttr *Attr = ND.getAttr<WeakRefAttr>()) {
+ if (ND.isExternallyVisible()) {
+ S.Diag(Attr->getLocation(), diag::err_attribute_weakref_not_static);
+ ND.dropAttr<WeakRefAttr>();
+ ND.dropAttr<AliasAttr>();
+ }
+ }
+
+ if (auto *VD = dyn_cast<VarDecl>(&ND)) {
+ if (VD->hasInit()) {
+ if (const auto *Attr = VD->getAttr<AliasAttr>()) {
+ assert(VD->isThisDeclarationADefinition() &&
+ !VD->isExternallyVisible() && "Broken AliasAttr handled late!");
+ S.Diag(Attr->getLocation(), diag::err_alias_is_definition) << VD;
+ VD->dropAttr<AliasAttr>();
+ }
+ }
+ }
+
+ // 'selectany' only applies to externally visible variable declarations.
+ // It does not apply to functions.
+ if (SelectAnyAttr *Attr = ND.getAttr<SelectAnyAttr>()) {
+ if (isa<FunctionDecl>(ND) || !ND.isExternallyVisible()) {
+ S.Diag(Attr->getLocation(),
+ diag::err_attribute_selectany_non_extern_data);
+ ND.dropAttr<SelectAnyAttr>();
+ }
+ }
+
+ if (const InheritableAttr *Attr = getDLLAttr(&ND)) {
+ // dll attributes require external linkage. Static locals may have external
+ // linkage but still cannot be explicitly imported or exported.
+ auto *VD = dyn_cast<VarDecl>(&ND);
+ if (!ND.isExternallyVisible() || (VD && VD->isStaticLocal())) {
+ S.Diag(ND.getLocation(), diag::err_attribute_dll_not_extern)
+ << &ND << Attr;
+ ND.setInvalidDecl();
+ }
+ }
+
+ // Virtual functions cannot be marked as 'notail'.
+ if (auto *Attr = ND.getAttr<NotTailCalledAttr>())
+ if (auto *MD = dyn_cast<CXXMethodDecl>(&ND))
+ if (MD->isVirtual()) {
+ S.Diag(ND.getLocation(),
+ diag::err_invalid_attribute_on_virtual_function)
+ << Attr;
+ ND.dropAttr<NotTailCalledAttr>();
+ }
+}
+
+static void checkDLLAttributeRedeclaration(Sema &S, NamedDecl *OldDecl,
+ NamedDecl *NewDecl,
+ bool IsSpecialization) {
+ if (TemplateDecl *OldTD = dyn_cast<TemplateDecl>(OldDecl))
+ OldDecl = OldTD->getTemplatedDecl();
+ if (TemplateDecl *NewTD = dyn_cast<TemplateDecl>(NewDecl))
+ NewDecl = NewTD->getTemplatedDecl();
+
+ if (!OldDecl || !NewDecl)
+ return;
+
+ const DLLImportAttr *OldImportAttr = OldDecl->getAttr<DLLImportAttr>();
+ const DLLExportAttr *OldExportAttr = OldDecl->getAttr<DLLExportAttr>();
+ const DLLImportAttr *NewImportAttr = NewDecl->getAttr<DLLImportAttr>();
+ const DLLExportAttr *NewExportAttr = NewDecl->getAttr<DLLExportAttr>();
+
+ // dllimport and dllexport are inheritable attributes so we have to exclude
+ // inherited attribute instances.
+ bool HasNewAttr = (NewImportAttr && !NewImportAttr->isInherited()) ||
+ (NewExportAttr && !NewExportAttr->isInherited());
+
+ // A redeclaration is not allowed to add a dllimport or dllexport attribute,
+ // the only exception being explicit specializations.
+ // Implicitly generated declarations are also excluded for now because there
+ // is no other way to switch these to use dllimport or dllexport.
+ bool AddsAttr = !(OldImportAttr || OldExportAttr) && HasNewAttr;
+
+ if (AddsAttr && !IsSpecialization && !OldDecl->isImplicit()) {
+ // Allow with a warning for free functions and global variables.
+ bool JustWarn = false;
+ if (!OldDecl->isCXXClassMember()) {
+ auto *VD = dyn_cast<VarDecl>(OldDecl);
+ if (VD && !VD->getDescribedVarTemplate())
+ JustWarn = true;
+ auto *FD = dyn_cast<FunctionDecl>(OldDecl);
+ if (FD && FD->getTemplatedKind() == FunctionDecl::TK_NonTemplate)
+ JustWarn = true;
+ }
+
+ // We cannot change a declaration that's been used because IR has already
+ // been emitted. Dllimported functions will still work though (modulo
+ // address equality) as they can use the thunk.
+ if (OldDecl->isUsed())
+ if (!isa<FunctionDecl>(OldDecl) || !NewImportAttr)
+ JustWarn = false;
+
+ unsigned DiagID = JustWarn ? diag::warn_attribute_dll_redeclaration
+ : diag::err_attribute_dll_redeclaration;
+ S.Diag(NewDecl->getLocation(), DiagID)
+ << NewDecl
+ << (NewImportAttr ? (const Attr *)NewImportAttr : NewExportAttr);
+ S.Diag(OldDecl->getLocation(), diag::note_previous_declaration);
+ if (!JustWarn) {
+ NewDecl->setInvalidDecl();
+ return;
+ }
+ }
+
+ // A redeclaration is not allowed to drop a dllimport attribute, the only
+ // exceptions being inline function definitions, local extern declarations,
+ // and qualified friend declarations.
+ // NB: MSVC converts such a declaration to dllexport.
+ bool IsInline = false, IsStaticDataMember = false, IsQualifiedFriend = false;
+ if (const auto *VD = dyn_cast<VarDecl>(NewDecl))
+ // Ignore static data because out-of-line definitions are diagnosed
+ // separately.
+ IsStaticDataMember = VD->isStaticDataMember();
+ else if (const auto *FD = dyn_cast<FunctionDecl>(NewDecl)) {
+ IsInline = FD->isInlined();
+ IsQualifiedFriend = FD->getQualifier() &&
+ FD->getFriendObjectKind() == Decl::FOK_Declared;
+ }
+
+ if (OldImportAttr && !HasNewAttr && !IsInline && !IsStaticDataMember &&
+ !NewDecl->isLocalExternDecl() && !IsQualifiedFriend) {
+ S.Diag(NewDecl->getLocation(),
+ diag::warn_redeclaration_without_attribute_prev_attribute_ignored)
+ << NewDecl << OldImportAttr;
+ S.Diag(OldDecl->getLocation(), diag::note_previous_declaration);
+ S.Diag(OldImportAttr->getLocation(), diag::note_previous_attribute);
+ OldDecl->dropAttr<DLLImportAttr>();
+ NewDecl->dropAttr<DLLImportAttr>();
+ } else if (IsInline && OldImportAttr &&
+ !S.Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+ // In MinGW, seeing a function declared inline drops the dllimport attribute.
+ OldDecl->dropAttr<DLLImportAttr>();
+ NewDecl->dropAttr<DLLImportAttr>();
+ S.Diag(NewDecl->getLocation(),
+ diag::warn_dllimport_dropped_from_inline_function)
+ << NewDecl << OldImportAttr;
+ }
+}
+
+/// Given that we are within the definition of the given function,
+/// will that definition behave like C99's 'inline', where the
+/// definition is discarded except for optimization purposes?
+static bool isFunctionDefinitionDiscarded(Sema &S, FunctionDecl *FD) {
+ // Try to avoid calling GetGVALinkageForFunction.
+
+ // All cases of this require the 'inline' keyword.
+ if (!FD->isInlined()) return false;
+
+ // This is only possible in C++ with the gnu_inline attribute.
+ if (S.getLangOpts().CPlusPlus && !FD->hasAttr<GNUInlineAttr>())
+ return false;
+
+ // Okay, go ahead and call the relatively-more-expensive function.
+
+#ifndef NDEBUG
+ // AST quite reasonably asserts that it's working on a function
+ // definition. We don't really have a way to tell it that we're
+ // currently defining the function, so just lie to it in +Asserts
+ // builds. This is an awful hack.
+ FD->setLazyBody(1);
+#endif
+
+ bool isC99Inline =
+ S.Context.GetGVALinkageForFunction(FD) == GVA_AvailableExternally;
+
+#ifndef NDEBUG
+ FD->setLazyBody(0);
+#endif
+
+ return isC99Inline;
+}
+
+/// Determine whether a variable is extern "C" prior to attaching
+/// an initializer. We can't just call isExternC() here, because that
+/// will also compute and cache whether the declaration is externally
+/// visible, which might change when we attach the initializer.
+///
+/// This can only be used if the declaration is known to not be a
+/// redeclaration of an internal linkage declaration.
+///
+/// For instance:
+///
+/// auto x = []{};
+///
+/// Attaching the initializer here makes this declaration not externally
+/// visible, because its type has internal linkage.
+///
+/// FIXME: This is a hack.
+template<typename T>
+static bool isIncompleteDeclExternC(Sema &S, const T *D) {
+ if (S.getLangOpts().CPlusPlus) {
+ // In C++, the overloadable attribute negates the effects of extern "C".
+ if (!D->isInExternCContext() || D->template hasAttr<OverloadableAttr>())
+ return false;
+
+ // So do CUDA's host/device attributes if overloading is enabled.
+ if (S.getLangOpts().CUDA && S.getLangOpts().CUDATargetOverloads &&
+ (D->template hasAttr<CUDADeviceAttr>() ||
+ D->template hasAttr<CUDAHostAttr>()))
+ return false;
+ }
+ return D->isExternC();
+}
+
+static bool shouldConsiderLinkage(const VarDecl *VD) {
+ const DeclContext *DC = VD->getDeclContext()->getRedeclContext();
+ if (DC->isFunctionOrMethod())
+ return VD->hasExternalStorage();
+ if (DC->isFileContext())
+ return true;
+ if (DC->isRecord())
+ return false;
+ llvm_unreachable("Unexpected context");
+}
+
+static bool shouldConsiderLinkage(const FunctionDecl *FD) {
+ const DeclContext *DC = FD->getDeclContext()->getRedeclContext();
+ if (DC->isFileContext() || DC->isFunctionOrMethod())
+ return true;
+ if (DC->isRecord())
+ return false;
+ llvm_unreachable("Unexpected context");
+}
+
+static bool hasParsedAttr(Scope *S, const AttributeList *AttrList,
+ AttributeList::Kind Kind) {
+ for (const AttributeList *L = AttrList; L; L = L->getNext())
+ if (L->getKind() == Kind)
+ return true;
+ return false;
+}
+
+static bool hasParsedAttr(Scope *S, const Declarator &PD,
+ AttributeList::Kind Kind) {
+ // Check decl attributes on the DeclSpec.
+ if (hasParsedAttr(S, PD.getDeclSpec().getAttributes().getList(), Kind))
+ return true;
+
+ // Walk the declarator structure, checking decl attributes that were in a type
+ // position to the decl itself.
+ for (unsigned I = 0, E = PD.getNumTypeObjects(); I != E; ++I) {
+ if (hasParsedAttr(S, PD.getTypeObject(I).getAttrs(), Kind))
+ return true;
+ }
+
+ // Finally, check attributes on the decl itself.
+ return hasParsedAttr(S, PD.getAttributes(), Kind);
+}
+
+/// Adjust the \c DeclContext for a function or variable that might be a
+/// function-local external declaration.
+bool Sema::adjustContextForLocalExternDecl(DeclContext *&DC) {
+ if (!DC->isFunctionOrMethod())
+ return false;
+
+ // If this is a local extern function or variable declared within a function
+ // template, don't add it into the enclosing namespace scope until it is
+ // instantiated; it might have a dependent type right now.
+ if (DC->isDependentContext())
+ return true;
+
+ // C++11 [basic.link]p7:
+ // When a block scope declaration of an entity with linkage is not found to
+ // refer to some other declaration, then that entity is a member of the
+ // innermost enclosing namespace.
+ //
+ // Per C++11 [namespace.def]p6, the innermost enclosing namespace is a
+ // semantically-enclosing namespace, not a lexically-enclosing one.
+ while (!DC->isFileContext() && !isa<LinkageSpecDecl>(DC))
+ DC = DC->getParent();
+ return true;
+}
+
+/// \brief Returns true if given declaration has external C language linkage.
+static bool isDeclExternC(const Decl *D) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D))
+ return FD->isExternC();
+ if (const auto *VD = dyn_cast<VarDecl>(D))
+ return VD->isExternC();
+
+ llvm_unreachable("Unknown type of decl!");
+}
+
+NamedDecl *
+Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
+ TypeSourceInfo *TInfo, LookupResult &Previous,
+ MultiTemplateParamsArg TemplateParamLists,
+ bool &AddToScope) {
+ QualType R = TInfo->getType();
+ DeclarationName Name = GetNameForDeclarator(D).getName();
+
+ DeclSpec::SCS SCSpec = D.getDeclSpec().getStorageClassSpec();
+ StorageClass SC = StorageClassSpecToVarDeclStorageClass(D.getDeclSpec());
+
+ // dllimport globals without explicit storage class are treated as extern. We
+ // have to change the storage class this early to get the right DeclContext.
+ if (SC == SC_None && !DC->isRecord() &&
+ hasParsedAttr(S, D, AttributeList::AT_DLLImport) &&
+ !hasParsedAttr(S, D, AttributeList::AT_DLLExport))
+ SC = SC_Extern;
+
+ DeclContext *OriginalDC = DC;
+ bool IsLocalExternDecl = SC == SC_Extern &&
+ adjustContextForLocalExternDecl(DC);
+
+ if (getLangOpts().OpenCL) {
+ // OpenCL v1.0 s6.8.a.3: Pointers to functions are not allowed.
+ QualType NR = R;
+ while (NR->isPointerType()) {
+ if (NR->isFunctionPointerType()) {
+ Diag(D.getIdentifierLoc(), diag::err_opencl_function_pointer_variable);
+ D.setInvalidType();
+ break;
+ }
+ NR = NR->getPointeeType();
+ }
+
+ if (!getOpenCLOptions().cl_khr_fp16) {
+ // OpenCL v1.2 s6.1.1.1: reject declaring variables of the half and
+ // half array type (unless the cl_khr_fp16 extension is enabled).
+ if (Context.getBaseElementType(R)->isHalfType()) {
+ Diag(D.getIdentifierLoc(), diag::err_opencl_half_declaration) << R;
+ D.setInvalidType();
+ }
+ }
+ }
+
+ if (SCSpec == DeclSpec::SCS_mutable) {
+ // mutable can only appear on non-static class members, so it's always
+ // an error here
+ Diag(D.getIdentifierLoc(), diag::err_mutable_nonmember);
+ D.setInvalidType();
+ SC = SC_None;
+ }
+
+ if (getLangOpts().CPlusPlus11 && SCSpec == DeclSpec::SCS_register &&
+ !D.getAsmLabel() && !getSourceManager().isInSystemMacro(
+ D.getDeclSpec().getStorageClassSpecLoc())) {
+ // In C++11, the 'register' storage class specifier is deprecated.
+ // Suppress the warning in system macros, it's used in macros in some
+ // popular C system headers, such as in glibc's htonl() macro.
+ Diag(D.getDeclSpec().getStorageClassSpecLoc(),
+ getLangOpts().CPlusPlus1z ? diag::ext_register_storage_class
+ : diag::warn_deprecated_register)
+ << FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
+ }
+
+ IdentifierInfo *II = Name.getAsIdentifierInfo();
+ if (!II) {
+ Diag(D.getIdentifierLoc(), diag::err_bad_variable_name)
+ << Name;
+ return nullptr;
+ }
+
+ DiagnoseFunctionSpecifiers(D.getDeclSpec());
+
+ if (!DC->isRecord() && S->getFnParent() == nullptr) {
+ // C99 6.9p2: The storage-class specifiers auto and register shall not
+ // appear in the declaration specifiers in an external declaration.
+ // Global Register+Asm is a GNU extension we support.
+ if (SC == SC_Auto || (SC == SC_Register && !D.getAsmLabel())) {
+ Diag(D.getIdentifierLoc(), diag::err_typecheck_sclass_fscope);
+ D.setInvalidType();
+ }
+ }
+
+ if (getLangOpts().OpenCL) {
+ // OpenCL v1.2 s6.9.b p4:
+ // The sampler type cannot be used with the __local and __global address
+ // space qualifiers.
+ if (R->isSamplerT() && (R.getAddressSpace() == LangAS::opencl_local ||
+ R.getAddressSpace() == LangAS::opencl_global)) {
+ Diag(D.getIdentifierLoc(), diag::err_wrong_sampler_addressspace);
+ }
+
+ // OpenCL 1.2 spec, p6.9 r:
+ // The event type cannot be used to declare a program scope variable.
+ // The event type cannot be used with the __local, __constant and __global
+ // address space qualifiers.
+ if (R->isEventT()) {
+ if (S->getParent() == nullptr) {
+ Diag(D.getLocStart(), diag::err_event_t_global_var);
+ D.setInvalidType();
+ }
+
+ if (R.getAddressSpace()) {
+ Diag(D.getLocStart(), diag::err_event_t_addr_space_qual);
+ D.setInvalidType();
+ }
+ }
+ }
+
+ bool IsExplicitSpecialization = false;
+ bool IsVariableTemplateSpecialization = false;
+ bool IsPartialSpecialization = false;
+ bool IsVariableTemplate = false;
+ VarDecl *NewVD = nullptr;
+ VarTemplateDecl *NewTemplate = nullptr;
+ TemplateParameterList *TemplateParams = nullptr;
+ if (!getLangOpts().CPlusPlus) {
+ NewVD = VarDecl::Create(Context, DC, D.getLocStart(),
+ D.getIdentifierLoc(), II,
+ R, TInfo, SC);
+
+ if (D.getDeclSpec().containsPlaceholderType() && R->getContainedAutoType())
+ ParsingInitForAutoVars.insert(NewVD);
+
+ if (D.isInvalidType())
+ NewVD->setInvalidDecl();
+ } else {
+ bool Invalid = false;
+
+ if (DC->isRecord() && !CurContext->isRecord()) {
+ // This is an out-of-line definition of a static data member.
+ switch (SC) {
+ case SC_None:
+ break;
+ case SC_Static:
+ Diag(D.getDeclSpec().getStorageClassSpecLoc(),
+ diag::err_static_out_of_line)
+ << FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
+ break;
+ case SC_Auto:
+ case SC_Register:
+ case SC_Extern:
+ // [dcl.stc] p2: The auto or register specifiers shall be applied only
+ // to names of variables declared in a block or to function parameters.
+ // [dcl.stc] p6: The extern specifier cannot be used in the declaration
+ // of class members
+
+ Diag(D.getDeclSpec().getStorageClassSpecLoc(),
+ diag::err_storage_class_for_static_member)
+ << FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
+ break;
+ case SC_PrivateExtern:
+ llvm_unreachable("C storage class in c++!");
+ }
+ }
+
+ if (SC == SC_Static && CurContext->isRecord()) {
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC)) {
+ if (RD->isLocalClass())
+ Diag(D.getIdentifierLoc(),
+ diag::err_static_data_member_not_allowed_in_local_class)
+ << Name << RD->getDeclName();
+
+ // C++98 [class.union]p1: If a union contains a static data member,
+ // the program is ill-formed. C++11 drops this restriction.
+ if (RD->isUnion())
+ Diag(D.getIdentifierLoc(),
+ getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_static_data_member_in_union
+ : diag::ext_static_data_member_in_union) << Name;
+ // We conservatively disallow static data members in anonymous structs.
+ else if (!RD->getDeclName())
+ Diag(D.getIdentifierLoc(),
+ diag::err_static_data_member_not_allowed_in_anon_struct)
+ << Name << RD->isUnion();
+ }
+ }
+
+ // Match up the template parameter lists with the scope specifier, then
+ // determine whether we have a template or a template specialization.
+ TemplateParams = MatchTemplateParametersToScopeSpecifier(
+ D.getDeclSpec().getLocStart(), D.getIdentifierLoc(),
+ D.getCXXScopeSpec(),
+ D.getName().getKind() == UnqualifiedId::IK_TemplateId
+ ? D.getName().TemplateId
+ : nullptr,
+ TemplateParamLists,
+ /*never a friend*/ false, IsExplicitSpecialization, Invalid);
+
+ if (TemplateParams) {
+ if (!TemplateParams->size() &&
+ D.getName().getKind() != UnqualifiedId::IK_TemplateId) {
+ // There is an extraneous 'template<>' for this variable. Complain
+ // about it, but allow the declaration of the variable.
+ Diag(TemplateParams->getTemplateLoc(),
+ diag::err_template_variable_noparams)
+ << II
+ << SourceRange(TemplateParams->getTemplateLoc(),
+ TemplateParams->getRAngleLoc());
+ TemplateParams = nullptr;
+ } else {
+ if (D.getName().getKind() == UnqualifiedId::IK_TemplateId) {
+ // This is an explicit specialization or a partial specialization.
+ // FIXME: Check that we can declare a specialization here.
+ IsVariableTemplateSpecialization = true;
+ IsPartialSpecialization = TemplateParams->size() > 0;
+ } else { // if (TemplateParams->size() > 0)
+ // This is a template declaration.
+ IsVariableTemplate = true;
+
+ // Check that we can declare a template here.
+ if (CheckTemplateDeclScope(S, TemplateParams))
+ return nullptr;
+
+ // Only C++1y supports variable templates (N3651).
+ Diag(D.getIdentifierLoc(),
+ getLangOpts().CPlusPlus14
+ ? diag::warn_cxx11_compat_variable_template
+ : diag::ext_variable_template);
+ }
+ }
+ } else {
+ assert(
+ (Invalid || D.getName().getKind() != UnqualifiedId::IK_TemplateId) &&
+ "should have a 'template<>' for this decl");
+ }
+
+ if (IsVariableTemplateSpecialization) {
+ SourceLocation TemplateKWLoc =
+ TemplateParamLists.size() > 0
+ ? TemplateParamLists[0]->getTemplateLoc()
+ : SourceLocation();
+ DeclResult Res = ActOnVarTemplateSpecialization(
+ S, D, TInfo, TemplateKWLoc, TemplateParams, SC,
+ IsPartialSpecialization);
+ if (Res.isInvalid())
+ return nullptr;
+ NewVD = cast<VarDecl>(Res.get());
+ AddToScope = false;
+ } else
+ NewVD = VarDecl::Create(Context, DC, D.getLocStart(),
+ D.getIdentifierLoc(), II, R, TInfo, SC);
+
+ // If this is supposed to be a variable template, create it as such.
+ if (IsVariableTemplate) {
+ NewTemplate =
+ VarTemplateDecl::Create(Context, DC, D.getIdentifierLoc(), Name,
+ TemplateParams, NewVD);
+ NewVD->setDescribedVarTemplate(NewTemplate);
+ }
+
+ // If this decl has an auto type in need of deduction, make a note of the
+ // Decl so we can diagnose uses of it in its own initializer.
+ if (D.getDeclSpec().containsPlaceholderType() && R->getContainedAutoType())
+ ParsingInitForAutoVars.insert(NewVD);
+
+ if (D.isInvalidType() || Invalid) {
+ NewVD->setInvalidDecl();
+ if (NewTemplate)
+ NewTemplate->setInvalidDecl();
+ }
+
+ SetNestedNameSpecifier(NewVD, D);
+
+ // If we have any template parameter lists that don't directly belong to
+ // the variable (matching the scope specifier), store them.
+ unsigned VDTemplateParamLists = TemplateParams ? 1 : 0;
+ if (TemplateParamLists.size() > VDTemplateParamLists)
+ NewVD->setTemplateParameterListsInfo(
+ Context, TemplateParamLists.drop_back(VDTemplateParamLists));
+
+ if (D.getDeclSpec().isConstexprSpecified())
+ NewVD->setConstexpr(true);
+
+ if (D.getDeclSpec().isConceptSpecified()) {
+ NewVD->setConcept(true);
+
+ // C++ Concepts TS [dcl.spec.concept]p2: A concept definition shall not
+ // be declared with the thread_local, inline, friend, or constexpr
+ // specifiers, [...]
+ if (D.getDeclSpec().getThreadStorageClassSpec() == TSCS_thread_local) {
+ Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
+ diag::err_concept_decl_invalid_specifiers)
+ << 0 << 0;
+ NewVD->setInvalidDecl(true);
+ }
+
+ if (D.getDeclSpec().isConstexprSpecified()) {
+ Diag(D.getDeclSpec().getConstexprSpecLoc(),
+ diag::err_concept_decl_invalid_specifiers)
+ << 0 << 3;
+ NewVD->setInvalidDecl(true);
+ }
+ }
+ }
+
+ // Set the lexical context. If the declarator has a C++ scope specifier, the
+ // lexical context will be different from the semantic context.
+ NewVD->setLexicalDeclContext(CurContext);
+ if (NewTemplate)
+ NewTemplate->setLexicalDeclContext(CurContext);
+
+ if (IsLocalExternDecl)
+ NewVD->setLocalExternDecl();
+
+ bool EmitTLSUnsupportedError = false;
+ if (DeclSpec::TSCS TSCS = D.getDeclSpec().getThreadStorageClassSpec()) {
+ // C++11 [dcl.stc]p4:
+ // When thread_local is applied to a variable of block scope the
+ // storage-class-specifier static is implied if it does not appear
+ // explicitly.
+ // Core issue: 'static' is not implied if the variable is declared
+ // 'extern'.
+ if (NewVD->hasLocalStorage() &&
+ (SCSpec != DeclSpec::SCS_unspecified ||
+ TSCS != DeclSpec::TSCS_thread_local ||
+ !DC->isFunctionOrMethod()))
+ Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
+ diag::err_thread_non_global)
+ << DeclSpec::getSpecifierName(TSCS);
+ else if (!Context.getTargetInfo().isTLSSupported()) {
+ if (getLangOpts().CUDA) {
+ // Postpone error emission until we've collected attributes required to
+ // figure out whether it's a host or device variable and whether the
+ // error should be ignored.
+ EmitTLSUnsupportedError = true;
+ // We still need to mark the variable as TLS so it shows up in AST with
+ // proper storage class for other tools to use even if we're not going
+ // to emit any code for it.
+ NewVD->setTSCSpec(TSCS);
+ } else
+ Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
+ diag::err_thread_unsupported);
+ } else
+ NewVD->setTSCSpec(TSCS);
+ }
+
+ // C99 6.7.4p3
+ // An inline definition of a function with external linkage shall
+ // not contain a definition of a modifiable object with static or
+ // thread storage duration...
+ // We only apply this when the function is required to be defined
+ // elsewhere, i.e. when the function is not 'extern inline'. Note
+ // that a local variable with thread storage duration still has to
+ // be marked 'static'. Also note that it's possible to get these
+ // semantics in C++ using __attribute__((gnu_inline)).
+ if (SC == SC_Static && S->getFnParent() != nullptr &&
+ !NewVD->getType().isConstQualified()) {
+ FunctionDecl *CurFD = getCurFunctionDecl();
+ if (CurFD && isFunctionDefinitionDiscarded(*this, CurFD)) {
+ Diag(D.getDeclSpec().getStorageClassSpecLoc(),
+ diag::warn_static_local_in_extern_inline);
+ MaybeSuggestAddingStaticToDecl(CurFD);
+ }
+ }
+
+ if (D.getDeclSpec().isModulePrivateSpecified()) {
+ if (IsVariableTemplateSpecialization)
+ Diag(NewVD->getLocation(), diag::err_module_private_specialization)
+ << (IsPartialSpecialization ? 1 : 0)
+ << FixItHint::CreateRemoval(
+ D.getDeclSpec().getModulePrivateSpecLoc());
+ else if (IsExplicitSpecialization)
+ Diag(NewVD->getLocation(), diag::err_module_private_specialization)
+ << 2
+ << FixItHint::CreateRemoval(D.getDeclSpec().getModulePrivateSpecLoc());
+ else if (NewVD->hasLocalStorage())
+ Diag(NewVD->getLocation(), diag::err_module_private_local)
+ << 0 << NewVD->getDeclName()
+ << SourceRange(D.getDeclSpec().getModulePrivateSpecLoc())
+ << FixItHint::CreateRemoval(D.getDeclSpec().getModulePrivateSpecLoc());
+ else {
+ NewVD->setModulePrivate();
+ if (NewTemplate)
+ NewTemplate->setModulePrivate();
+ }
+ }
+
+ // Handle attributes prior to checking for duplicates in MergeVarDecl
+ ProcessDeclAttributes(S, NewVD, D);
+
+ if (getLangOpts().CUDA) {
+ if (EmitTLSUnsupportedError && DeclAttrsMatchCUDAMode(getLangOpts(), NewVD))
+ Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
+ diag::err_thread_unsupported);
+ // CUDA B.2.5: "__shared__ and __constant__ variables have implied static
+ // storage [duration]."
+ if (SC == SC_None && S->getFnParent() != nullptr &&
+ (NewVD->hasAttr<CUDASharedAttr>() ||
+ NewVD->hasAttr<CUDAConstantAttr>())) {
+ NewVD->setStorageClass(SC_Static);
+ }
+ }
+
+ // Ensure that dllimport globals without explicit storage class are treated as
+ // extern. The storage class is set above using parsed attributes. Now we can
+ // check the VarDecl itself.
+ assert(!NewVD->hasAttr<DLLImportAttr>() ||
+ NewVD->getAttr<DLLImportAttr>()->isInherited() ||
+ NewVD->isStaticDataMember() || NewVD->getStorageClass() != SC_None);
+
+ // In auto-retain/release, infer strong retension for variables of
+ // retainable type.
+ if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(NewVD))
+ NewVD->setInvalidDecl();
+
+ // Handle GNU asm-label extension (encoded as an attribute).
+ if (Expr *E = (Expr*)D.getAsmLabel()) {
+ // The parser guarantees this is a string.
+ StringLiteral *SE = cast<StringLiteral>(E);
+ StringRef Label = SE->getString();
+ if (S->getFnParent() != nullptr) {
+ switch (SC) {
+ case SC_None:
+ case SC_Auto:
+ Diag(E->getExprLoc(), diag::warn_asm_label_on_auto_decl) << Label;
+ break;
+ case SC_Register:
+ // Local Named register
+ if (!Context.getTargetInfo().isValidGCCRegisterName(Label) &&
+ DeclAttrsMatchCUDAMode(getLangOpts(), getCurFunctionDecl()))
+ Diag(E->getExprLoc(), diag::err_asm_unknown_register_name) << Label;
+ break;
+ case SC_Static:
+ case SC_Extern:
+ case SC_PrivateExtern:
+ break;
+ }
+ } else if (SC == SC_Register) {
+ // Global Named register
+ if (DeclAttrsMatchCUDAMode(getLangOpts(), NewVD)) {
+ const auto &TI = Context.getTargetInfo();
+ bool HasSizeMismatch;
+
+ if (!TI.isValidGCCRegisterName(Label))
+ Diag(E->getExprLoc(), diag::err_asm_unknown_register_name) << Label;
+ else if (!TI.validateGlobalRegisterVariable(Label,
+ Context.getTypeSize(R),
+ HasSizeMismatch))
+ Diag(E->getExprLoc(), diag::err_asm_invalid_global_var_reg) << Label;
+ else if (HasSizeMismatch)
+ Diag(E->getExprLoc(), diag::err_asm_register_size_mismatch) << Label;
+ }
+
+ if (!R->isIntegralType(Context) && !R->isPointerType()) {
+ Diag(D.getLocStart(), diag::err_asm_bad_register_type);
+ NewVD->setInvalidDecl(true);
+ }
+ }
+
+ NewVD->addAttr(::new (Context) AsmLabelAttr(SE->getStrTokenLoc(0),
+ Context, Label, 0));
+ } else if (!ExtnameUndeclaredIdentifiers.empty()) {
+ llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*>::iterator I =
+ ExtnameUndeclaredIdentifiers.find(NewVD->getIdentifier());
+ if (I != ExtnameUndeclaredIdentifiers.end()) {
+ if (isDeclExternC(NewVD)) {
+ NewVD->addAttr(I->second);
+ ExtnameUndeclaredIdentifiers.erase(I);
+ } else
+ Diag(NewVD->getLocation(), diag::warn_redefine_extname_not_applied)
+ << /*Variable*/1 << NewVD;
+ }
+ }
+
+ // Diagnose shadowed variables before filtering for scope.
+ if (D.getCXXScopeSpec().isEmpty())
+ CheckShadow(S, NewVD, Previous);
+
+ // Don't consider existing declarations that are in a different
+ // scope and are out-of-semantic-context declarations (if the new
+ // declaration has linkage).
+ FilterLookupForScope(Previous, OriginalDC, S, shouldConsiderLinkage(NewVD),
+ D.getCXXScopeSpec().isNotEmpty() ||
+ IsExplicitSpecialization ||
+ IsVariableTemplateSpecialization);
+
+ // Check whether the previous declaration is in the same block scope. This
+ // affects whether we merge types with it, per C++11 [dcl.array]p3.
+ if (getLangOpts().CPlusPlus &&
+ NewVD->isLocalVarDecl() && NewVD->hasExternalStorage())
+ NewVD->setPreviousDeclInSameBlockScope(
+ Previous.isSingleResult() && !Previous.isShadowed() &&
+ isDeclInScope(Previous.getFoundDecl(), OriginalDC, S, false));
+
+ if (!getLangOpts().CPlusPlus) {
+ D.setRedeclaration(CheckVariableDeclaration(NewVD, Previous));
+ } else {
+ // If this is an explicit specialization of a static data member, check it.
+ if (IsExplicitSpecialization && !NewVD->isInvalidDecl() &&
+ CheckMemberSpecialization(NewVD, Previous))
+ NewVD->setInvalidDecl();
+
+ // Merge the decl with the existing one if appropriate.
+ if (!Previous.empty()) {
+ if (Previous.isSingleResult() &&
+ isa<FieldDecl>(Previous.getFoundDecl()) &&
+ D.getCXXScopeSpec().isSet()) {
+ // The user tried to define a non-static data member
+ // out-of-line (C++ [dcl.meaning]p1).
+ Diag(NewVD->getLocation(), diag::err_nonstatic_member_out_of_line)
+ << D.getCXXScopeSpec().getRange();
+ Previous.clear();
+ NewVD->setInvalidDecl();
+ }
+ } else if (D.getCXXScopeSpec().isSet()) {
+ // No previous declaration in the qualifying scope.
+ Diag(D.getIdentifierLoc(), diag::err_no_member)
+ << Name << computeDeclContext(D.getCXXScopeSpec(), true)
+ << D.getCXXScopeSpec().getRange();
+ NewVD->setInvalidDecl();
+ }
+
+ if (!IsVariableTemplateSpecialization)
+ D.setRedeclaration(CheckVariableDeclaration(NewVD, Previous));
+
+ if (NewTemplate) {
+ VarTemplateDecl *PrevVarTemplate =
+ NewVD->getPreviousDecl()
+ ? NewVD->getPreviousDecl()->getDescribedVarTemplate()
+ : nullptr;
+
+ // Check the template parameter list of this declaration, possibly
+ // merging in the template parameter list from the previous variable
+ // template declaration.
+ if (CheckTemplateParameterList(
+ TemplateParams,
+ PrevVarTemplate ? PrevVarTemplate->getTemplateParameters()
+ : nullptr,
+ (D.getCXXScopeSpec().isSet() && DC && DC->isRecord() &&
+ DC->isDependentContext())
+ ? TPC_ClassTemplateMember
+ : TPC_VarTemplate))
+ NewVD->setInvalidDecl();
+
+ // If we are providing an explicit specialization of a static variable
+ // template, make a note of that.
+ if (PrevVarTemplate &&
+ PrevVarTemplate->getInstantiatedFromMemberTemplate())
+ PrevVarTemplate->setMemberSpecialization();
+ }
+ }
+
+ ProcessPragmaWeak(S, NewVD);
+
+ // If this is the first declaration of an extern C variable, update
+ // the map of such variables.
+ if (NewVD->isFirstDecl() && !NewVD->isInvalidDecl() &&
+ isIncompleteDeclExternC(*this, NewVD))
+ RegisterLocallyScopedExternCDecl(NewVD, S);
+
+ if (getLangOpts().CPlusPlus && NewVD->isStaticLocal()) {
+ Decl *ManglingContextDecl;
+ if (MangleNumberingContext *MCtx = getCurrentMangleNumberContext(
+ NewVD->getDeclContext(), ManglingContextDecl)) {
+ Context.setManglingNumber(
+ NewVD, MCtx->getManglingNumber(
+ NewVD, getMSManglingNumber(getLangOpts(), S)));
+ Context.setStaticLocalNumber(NewVD, MCtx->getStaticLocalNumber(NewVD));
+ }
+ }
+
+ // Special handling of variable named 'main'.
+ if (Name.isIdentifier() && Name.getAsIdentifierInfo()->isStr("main") &&
+ NewVD->getDeclContext()->getRedeclContext()->isTranslationUnit() &&
+ !getLangOpts().Freestanding && !NewVD->getDescribedVarTemplate()) {
+
+ // C++ [basic.start.main]p3
+ // A program that declares a variable main at global scope is ill-formed.
+ if (getLangOpts().CPlusPlus)
+ Diag(D.getLocStart(), diag::err_main_global_variable);
+
+ // In C, and external-linkage variable named main results in undefined
+ // behavior.
+ else if (NewVD->hasExternalFormalLinkage())
+ Diag(D.getLocStart(), diag::warn_main_redefined);
+ }
+
+ if (D.isRedeclaration() && !Previous.empty()) {
+ checkDLLAttributeRedeclaration(
+ *this, dyn_cast<NamedDecl>(Previous.getRepresentativeDecl()), NewVD,
+ IsExplicitSpecialization);
+ }
+
+ if (NewTemplate) {
+ if (NewVD->isInvalidDecl())
+ NewTemplate->setInvalidDecl();
+ ActOnDocumentableDecl(NewTemplate);
+ return NewTemplate;
+ }
+
+ return NewVD;
+}
+
+/// \brief Diagnose variable or built-in function shadowing. Implements
+/// -Wshadow.
+///
+/// This method is called whenever a VarDecl is added to a "useful"
+/// scope.
+///
+/// \param S the scope in which the shadowing name is being declared
+/// \param R the lookup of the name
+///
+void Sema::CheckShadow(Scope *S, VarDecl *D, const LookupResult& R) {
+ // Return if warning is ignored.
+ if (Diags.isIgnored(diag::warn_decl_shadow, R.getNameLoc()))
+ return;
+
+ // Don't diagnose declarations at file scope.
+ if (D->hasGlobalStorage())
+ return;
+
+ DeclContext *NewDC = D->getDeclContext();
+
+ // Only diagnose if we're shadowing an unambiguous field or variable.
+ if (R.getResultKind() != LookupResult::Found)
+ return;
+
+ NamedDecl* ShadowedDecl = R.getFoundDecl();
+ if (!isa<VarDecl>(ShadowedDecl) && !isa<FieldDecl>(ShadowedDecl))
+ return;
+
+ // Fields are not shadowed by variables in C++ static methods.
+ if (isa<FieldDecl>(ShadowedDecl))
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewDC))
+ if (MD->isStatic())
+ return;
+
+ if (VarDecl *shadowedVar = dyn_cast<VarDecl>(ShadowedDecl))
+ if (shadowedVar->isExternC()) {
+ // For shadowing external vars, make sure that we point to the global
+ // declaration, not a locally scoped extern declaration.
+ for (auto I : shadowedVar->redecls())
+ if (I->isFileVarDecl()) {
+ ShadowedDecl = I;
+ break;
+ }
+ }
+
+ DeclContext *OldDC = ShadowedDecl->getDeclContext();
+
+ // Only warn about certain kinds of shadowing for class members.
+ if (NewDC && NewDC->isRecord()) {
+ // In particular, don't warn about shadowing non-class members.
+ if (!OldDC->isRecord())
+ return;
+
+ // TODO: should we warn about static data members shadowing
+ // static data members from base classes?
+
+ // TODO: don't diagnose for inaccessible shadowed members.
+ // This is hard to do perfectly because we might friend the
+ // shadowing context, but that's just a false negative.
+ }
+
+ // Determine what kind of declaration we're shadowing.
+ unsigned Kind;
+ if (isa<RecordDecl>(OldDC)) {
+ if (isa<FieldDecl>(ShadowedDecl))
+ Kind = 3; // field
+ else
+ Kind = 2; // static data member
+ } else if (OldDC->isFileContext())
+ Kind = 1; // global
+ else
+ Kind = 0; // local
+
+ DeclarationName Name = R.getLookupName();
+
+ // Emit warning and note.
+ if (getSourceManager().isInSystemMacro(R.getNameLoc()))
+ return;
+ Diag(R.getNameLoc(), diag::warn_decl_shadow) << Name << Kind << OldDC;
+ Diag(ShadowedDecl->getLocation(), diag::note_previous_declaration);
+}
+
+/// \brief Check -Wshadow without the advantage of a previous lookup.
+void Sema::CheckShadow(Scope *S, VarDecl *D) {
+ if (Diags.isIgnored(diag::warn_decl_shadow, D->getLocation()))
+ return;
+
+ LookupResult R(*this, D->getDeclName(), D->getLocation(),
+ Sema::LookupOrdinaryName, Sema::ForRedeclaration);
+ LookupName(R, S);
+ CheckShadow(S, D, R);
+}
+
+/// Check for conflict between this global or extern "C" declaration and
+/// previous global or extern "C" declarations. This is only used in C++.
+template<typename T>
+static bool checkGlobalOrExternCConflict(
+ Sema &S, const T *ND, bool IsGlobal, LookupResult &Previous) {
+ assert(S.getLangOpts().CPlusPlus && "only C++ has extern \"C\"");
+ NamedDecl *Prev = S.findLocallyScopedExternCDecl(ND->getDeclName());
+
+ if (!Prev && IsGlobal && !isIncompleteDeclExternC(S, ND)) {
+ // The common case: this global doesn't conflict with any extern "C"
+ // declaration.
+ return false;
+ }
+
+ if (Prev) {
+ if (!IsGlobal || isIncompleteDeclExternC(S, ND)) {
+ // Both the old and new declarations have C language linkage. This is a
+ // redeclaration.
+ Previous.clear();
+ Previous.addDecl(Prev);
+ return true;
+ }
+
+ // This is a global, non-extern "C" declaration, and there is a previous
+ // non-global extern "C" declaration. Diagnose if this is a variable
+ // declaration.
+ if (!isa<VarDecl>(ND))
+ return false;
+ } else {
+ // The declaration is extern "C". Check for any declaration in the
+ // translation unit which might conflict.
+ if (IsGlobal) {
+ // We have already performed the lookup into the translation unit.
+ IsGlobal = false;
+ for (LookupResult::iterator I = Previous.begin(), E = Previous.end();
+ I != E; ++I) {
+ if (isa<VarDecl>(*I)) {
+ Prev = *I;
+ break;
+ }
+ }
+ } else {
+ DeclContext::lookup_result R =
+ S.Context.getTranslationUnitDecl()->lookup(ND->getDeclName());
+ for (DeclContext::lookup_result::iterator I = R.begin(), E = R.end();
+ I != E; ++I) {
+ if (isa<VarDecl>(*I)) {
+ Prev = *I;
+ break;
+ }
+ // FIXME: If we have any other entity with this name in global scope,
+ // the declaration is ill-formed, but that is a defect: it breaks the
+ // 'stat' hack, for instance. Only variables can have mangled name
+ // clashes with extern "C" declarations, so only they deserve a
+ // diagnostic.
+ }
+ }
+
+ if (!Prev)
+ return false;
+ }
+
+ // Use the first declaration's location to ensure we point at something which
+ // is lexically inside an extern "C" linkage-spec.
+ assert(Prev && "should have found a previous declaration to diagnose");
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(Prev))
+ Prev = FD->getFirstDecl();
+ else
+ Prev = cast<VarDecl>(Prev)->getFirstDecl();
+
+ S.Diag(ND->getLocation(), diag::err_extern_c_global_conflict)
+ << IsGlobal << ND;
+ S.Diag(Prev->getLocation(), diag::note_extern_c_global_conflict)
+ << IsGlobal;
+ return false;
+}
+
+/// Apply special rules for handling extern "C" declarations. Returns \c true
+/// if we have found that this is a redeclaration of some prior entity.
+///
+/// Per C++ [dcl.link]p6:
+/// Two declarations [for a function or variable] with C language linkage
+/// with the same name that appear in different scopes refer to the same
+/// [entity]. An entity with C language linkage shall not be declared with
+/// the same name as an entity in global scope.
+template<typename T>
+static bool checkForConflictWithNonVisibleExternC(Sema &S, const T *ND,
+ LookupResult &Previous) {
+ if (!S.getLangOpts().CPlusPlus) {
+ // In C, when declaring a global variable, look for a corresponding 'extern'
+ // variable declared in function scope. We don't need this in C++, because
+ // we find local extern decls in the surrounding file-scope DeclContext.
+ if (ND->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
+ if (NamedDecl *Prev = S.findLocallyScopedExternCDecl(ND->getDeclName())) {
+ Previous.clear();
+ Previous.addDecl(Prev);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // A declaration in the translation unit can conflict with an extern "C"
+ // declaration.
+ if (ND->getDeclContext()->getRedeclContext()->isTranslationUnit())
+ return checkGlobalOrExternCConflict(S, ND, /*IsGlobal*/true, Previous);
+
+ // An extern "C" declaration can conflict with a declaration in the
+ // translation unit or can be a redeclaration of an extern "C" declaration
+ // in another scope.
+ if (isIncompleteDeclExternC(S,ND))
+ return checkGlobalOrExternCConflict(S, ND, /*IsGlobal*/false, Previous);
+
+ // Neither global nor extern "C": nothing to do.
+ return false;
+}
+
+void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
+ // If the decl is already known invalid, don't check it.
+ if (NewVD->isInvalidDecl())
+ return;
+
+ TypeSourceInfo *TInfo = NewVD->getTypeSourceInfo();
+ QualType T = TInfo->getType();
+
+ // Defer checking an 'auto' type until its initializer is attached.
+ if (T->isUndeducedType())
+ return;
+
+ if (NewVD->hasAttrs())
+ CheckAlignasUnderalignment(NewVD);
+
+ if (T->isObjCObjectType()) {
+ Diag(NewVD->getLocation(), diag::err_statically_allocated_object)
+ << FixItHint::CreateInsertion(NewVD->getLocation(), "*");
+ T = Context.getObjCObjectPointerType(T);
+ NewVD->setType(T);
+ }
+
+ // Emit an error if an address space was applied to decl with local storage.
+ // This includes arrays of objects with address space qualifiers, but not
+ // automatic variables that point to other address spaces.
+ // ISO/IEC TR 18037 S5.1.2
+ if (!getLangOpts().OpenCL
+ && NewVD->hasLocalStorage() && T.getAddressSpace() != 0) {
+ Diag(NewVD->getLocation(), diag::err_as_qualified_auto_decl);
+ NewVD->setInvalidDecl();
+ return;
+ }
+
+ // OpenCL v1.2 s6.8 -- The static qualifier is valid only in program
+ // scope.
+ if (getLangOpts().OpenCLVersion == 120 &&
+ !getOpenCLOptions().cl_clang_storage_class_specifiers &&
+ NewVD->isStaticLocal()) {
+ Diag(NewVD->getLocation(), diag::err_static_function_scope);
+ NewVD->setInvalidDecl();
+ return;
+ }
+
+ // OpenCL v1.2 s6.5 - All program scope variables must be declared in the
+ // __constant address space.
+ // OpenCL v2.0 s6.5.1 - Variables defined at program scope and static
+ // variables inside a function can also be declared in the global
+ // address space.
+ if (getLangOpts().OpenCL) {
+ if (NewVD->isFileVarDecl()) {
+ if (!T->isSamplerT() &&
+ !(T.getAddressSpace() == LangAS::opencl_constant ||
+ (T.getAddressSpace() == LangAS::opencl_global &&
+ getLangOpts().OpenCLVersion == 200))) {
+ if (getLangOpts().OpenCLVersion == 200)
+ Diag(NewVD->getLocation(), diag::err_opencl_global_invalid_addr_space)
+ << "global or constant";
+ else
+ Diag(NewVD->getLocation(), diag::err_opencl_global_invalid_addr_space)
+ << "constant";
+ NewVD->setInvalidDecl();
+ return;
+ }
+ } else {
+ // OpenCL v2.0 s6.5.1 - Variables defined at program scope and static
+ // variables inside a function can also be declared in the global
+ // address space.
+ if (NewVD->isStaticLocal() &&
+ !(T.getAddressSpace() == LangAS::opencl_constant ||
+ (T.getAddressSpace() == LangAS::opencl_global &&
+ getLangOpts().OpenCLVersion == 200))) {
+ if (getLangOpts().OpenCLVersion == 200)
+ Diag(NewVD->getLocation(), diag::err_opencl_global_invalid_addr_space)
+ << "global or constant";
+ else
+ Diag(NewVD->getLocation(), diag::err_opencl_global_invalid_addr_space)
+ << "constant";
+ NewVD->setInvalidDecl();
+ return;
+ }
+ // OpenCL v1.1 s6.5.2 and s6.5.3 no local or constant variables
+ // in functions.
+ if (T.getAddressSpace() == LangAS::opencl_constant ||
+ T.getAddressSpace() == LangAS::opencl_local) {
+ FunctionDecl *FD = getCurFunctionDecl();
+ if (FD && !FD->hasAttr<OpenCLKernelAttr>()) {
+ if (T.getAddressSpace() == LangAS::opencl_constant)
+ Diag(NewVD->getLocation(), diag::err_opencl_non_kernel_variable)
+ << "constant";
+ else
+ Diag(NewVD->getLocation(), diag::err_opencl_non_kernel_variable)
+ << "local";
+ NewVD->setInvalidDecl();
+ return;
+ }
+ }
+ }
+ }
+
+ if (NewVD->hasLocalStorage() && T.isObjCGCWeak()
+ && !NewVD->hasAttr<BlocksAttr>()) {
+ if (getLangOpts().getGC() != LangOptions::NonGC)
+ Diag(NewVD->getLocation(), diag::warn_gc_attribute_weak_on_local);
+ else {
+ assert(!getLangOpts().ObjCAutoRefCount);
+ Diag(NewVD->getLocation(), diag::warn_attribute_weak_on_local);
+ }
+ }
+
+ bool isVM = T->isVariablyModifiedType();
+ if (isVM || NewVD->hasAttr<CleanupAttr>() ||
+ NewVD->hasAttr<BlocksAttr>())
+ getCurFunction()->setHasBranchProtectedScope();
+
+ if ((isVM && NewVD->hasLinkage()) ||
+ (T->isVariableArrayType() && NewVD->hasGlobalStorage())) {
+ bool SizeIsNegative;
+ llvm::APSInt Oversized;
+ TypeSourceInfo *FixedTInfo =
+ TryToFixInvalidVariablyModifiedTypeSourceInfo(TInfo, Context,
+ SizeIsNegative, Oversized);
+ if (!FixedTInfo && T->isVariableArrayType()) {
+ const VariableArrayType *VAT = Context.getAsVariableArrayType(T);
+ // FIXME: This won't give the correct result for
+ // int a[10][n];
+ SourceRange SizeRange = VAT->getSizeExpr()->getSourceRange();
+
+ if (NewVD->isFileVarDecl())
+ Diag(NewVD->getLocation(), diag::err_vla_decl_in_file_scope)
+ << SizeRange;
+ else if (NewVD->isStaticLocal())
+ Diag(NewVD->getLocation(), diag::err_vla_decl_has_static_storage)
+ << SizeRange;
+ else
+ Diag(NewVD->getLocation(), diag::err_vla_decl_has_extern_linkage)
+ << SizeRange;
+ NewVD->setInvalidDecl();
+ return;
+ }
+
+ if (!FixedTInfo) {
+ if (NewVD->isFileVarDecl())
+ Diag(NewVD->getLocation(), diag::err_vm_decl_in_file_scope);
+ else
+ Diag(NewVD->getLocation(), diag::err_vm_decl_has_extern_linkage);
+ NewVD->setInvalidDecl();
+ return;
+ }
+
+ Diag(NewVD->getLocation(), diag::warn_illegal_constant_array_size);
+ NewVD->setType(FixedTInfo->getType());
+ NewVD->setTypeSourceInfo(FixedTInfo);
+ }
+
+ if (T->isVoidType()) {
+ // C++98 [dcl.stc]p5: The extern specifier can be applied only to the names
+ // of objects and functions.
+ if (NewVD->isThisDeclarationADefinition() || getLangOpts().CPlusPlus) {
+ Diag(NewVD->getLocation(), diag::err_typecheck_decl_incomplete_type)
+ << T;
+ NewVD->setInvalidDecl();
+ return;
+ }
+ }
+
+ if (!NewVD->hasLocalStorage() && NewVD->hasAttr<BlocksAttr>()) {
+ Diag(NewVD->getLocation(), diag::err_block_on_nonlocal);
+ NewVD->setInvalidDecl();
+ return;
+ }
+
+ if (isVM && NewVD->hasAttr<BlocksAttr>()) {
+ Diag(NewVD->getLocation(), diag::err_block_on_vm);
+ NewVD->setInvalidDecl();
+ return;
+ }
+
+ if (NewVD->isConstexpr() && !T->isDependentType() &&
+ RequireLiteralType(NewVD->getLocation(), T,
+ diag::err_constexpr_var_non_literal)) {
+ NewVD->setInvalidDecl();
+ return;
+ }
+}
+
+/// \brief Perform semantic checking on a newly-created variable
+/// declaration.
+///
+/// This routine performs all of the type-checking required for a
+/// variable declaration once it has been built. It is used both to
+/// check variables after they have been parsed and their declarators
+/// have been translated into a declaration, and to check variables
+/// that have been instantiated from a template.
+///
+/// Sets NewVD->isInvalidDecl() if an error was encountered.
+///
+/// Returns true if the variable declaration is a redeclaration.
+bool Sema::CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous) {
+ CheckVariableDeclarationType(NewVD);
+
+ // If the decl is already known invalid, don't check it.
+ if (NewVD->isInvalidDecl())
+ return false;
+
+ // If we did not find anything by this name, look for a non-visible
+ // extern "C" declaration with the same name.
+ if (Previous.empty() &&
+ checkForConflictWithNonVisibleExternC(*this, NewVD, Previous))
+ Previous.setShadowed();
+
+ if (!Previous.empty()) {
+ MergeVarDecl(NewVD, Previous);
+ return true;
+ }
+ return false;
+}
+
+namespace {
+struct FindOverriddenMethod {
+ Sema *S;
+ CXXMethodDecl *Method;
+
+ /// Member lookup function that determines whether a given C++
+ /// method overrides a method in a base class, to be used with
+ /// CXXRecordDecl::lookupInBases().
+ bool operator()(const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
+ RecordDecl *BaseRecord =
+ Specifier->getType()->getAs<RecordType>()->getDecl();
+
+ DeclarationName Name = Method->getDeclName();
+
+ // FIXME: Do we care about other names here too?
+ if (Name.getNameKind() == DeclarationName::CXXDestructorName) {
+ // We really want to find the base class destructor here.
+ QualType T = S->Context.getTypeDeclType(BaseRecord);
+ CanQualType CT = S->Context.getCanonicalType(T);
+
+ Name = S->Context.DeclarationNames.getCXXDestructorName(CT);
+ }
+
+ for (Path.Decls = BaseRecord->lookup(Name); !Path.Decls.empty();
+ Path.Decls = Path.Decls.slice(1)) {
+ NamedDecl *D = Path.Decls.front();
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
+ if (MD->isVirtual() && !S->IsOverload(Method, MD, false))
+ return true;
+ }
+ }
+
+ return false;
+ }
+};
+
+enum OverrideErrorKind { OEK_All, OEK_NonDeleted, OEK_Deleted };
+} // end anonymous namespace
+
+/// \brief Report an error regarding overriding, along with any relevant
+/// overriden methods.
+///
+/// \param DiagID the primary error to report.
+/// \param MD the overriding method.
+/// \param OEK which overrides to include as notes.
+static void ReportOverrides(Sema& S, unsigned DiagID, const CXXMethodDecl *MD,
+ OverrideErrorKind OEK = OEK_All) {
+ S.Diag(MD->getLocation(), DiagID) << MD->getDeclName();
+ for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
+ E = MD->end_overridden_methods();
+ I != E; ++I) {
+ // This check (& the OEK parameter) could be replaced by a predicate, but
+ // without lambdas that would be overkill. This is still nicer than writing
+ // out the diag loop 3 times.
+ if ((OEK == OEK_All) ||
+ (OEK == OEK_NonDeleted && !(*I)->isDeleted()) ||
+ (OEK == OEK_Deleted && (*I)->isDeleted()))
+ S.Diag((*I)->getLocation(), diag::note_overridden_virtual_function);
+ }
+}
+
+/// AddOverriddenMethods - See if a method overrides any in the base classes,
+/// and if so, check that it's a valid override and remember it.
+bool Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
+ // Look for methods in base classes that this method might override.
+ CXXBasePaths Paths;
+ FindOverriddenMethod FOM;
+ FOM.Method = MD;
+ FOM.S = this;
+ bool hasDeletedOverridenMethods = false;
+ bool hasNonDeletedOverridenMethods = false;
+ bool AddedAny = false;
+ if (DC->lookupInBases(FOM, Paths)) {
+ for (auto *I : Paths.found_decls()) {
+ if (CXXMethodDecl *OldMD = dyn_cast<CXXMethodDecl>(I)) {
+ MD->addOverriddenMethod(OldMD->getCanonicalDecl());
+ if (!CheckOverridingFunctionReturnType(MD, OldMD) &&
+ !CheckOverridingFunctionAttributes(MD, OldMD) &&
+ !CheckOverridingFunctionExceptionSpec(MD, OldMD) &&
+ !CheckIfOverriddenFunctionIsMarkedFinal(MD, OldMD)) {
+ hasDeletedOverridenMethods |= OldMD->isDeleted();
+ hasNonDeletedOverridenMethods |= !OldMD->isDeleted();
+ AddedAny = true;
+ }
+ }
+ }
+ }
+
+ if (hasDeletedOverridenMethods && !MD->isDeleted()) {
+ ReportOverrides(*this, diag::err_non_deleted_override, MD, OEK_Deleted);
+ }
+ if (hasNonDeletedOverridenMethods && MD->isDeleted()) {
+ ReportOverrides(*this, diag::err_deleted_override, MD, OEK_NonDeleted);
+ }
+
+ return AddedAny;
+}
+
+namespace {
+ // Struct for holding all of the extra arguments needed by
+ // DiagnoseInvalidRedeclaration to call Sema::ActOnFunctionDeclarator.
+ struct ActOnFDArgs {
+ Scope *S;
+ Declarator &D;
+ MultiTemplateParamsArg TemplateParamLists;
+ bool AddToScope;
+ };
+}
+
+namespace {
+
+// Callback to only accept typo corrections that have a non-zero edit distance.
+// Also only accept corrections that have the same parent decl.
+class DifferentNameValidatorCCC : public CorrectionCandidateCallback {
+ public:
+ DifferentNameValidatorCCC(ASTContext &Context, FunctionDecl *TypoFD,
+ CXXRecordDecl *Parent)
+ : Context(Context), OriginalFD(TypoFD),
+ ExpectedParent(Parent ? Parent->getCanonicalDecl() : nullptr) {}
+
+ bool ValidateCandidate(const TypoCorrection &candidate) override {
+ if (candidate.getEditDistance() == 0)
+ return false;
+
+ SmallVector<unsigned, 1> MismatchedParams;
+ for (TypoCorrection::const_decl_iterator CDecl = candidate.begin(),
+ CDeclEnd = candidate.end();
+ CDecl != CDeclEnd; ++CDecl) {
+ FunctionDecl *FD = dyn_cast<FunctionDecl>(*CDecl);
+
+ if (FD && !FD->hasBody() &&
+ hasSimilarParameters(Context, FD, OriginalFD, MismatchedParams)) {
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ CXXRecordDecl *Parent = MD->getParent();
+ if (Parent && Parent->getCanonicalDecl() == ExpectedParent)
+ return true;
+ } else if (!ExpectedParent) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+ }
+
+ private:
+ ASTContext &Context;
+ FunctionDecl *OriginalFD;
+ CXXRecordDecl *ExpectedParent;
+};
+
+}
+
+/// \brief Generate diagnostics for an invalid function redeclaration.
+///
+/// This routine handles generating the diagnostic messages for an invalid
+/// function redeclaration, including finding possible similar declarations
+/// or performing typo correction if there are no previous declarations with
+/// the same name.
+///
+/// Returns a NamedDecl iff typo correction was performed and substituting in
+/// the new declaration name does not cause new errors.
+static NamedDecl *DiagnoseInvalidRedeclaration(
+ Sema &SemaRef, LookupResult &Previous, FunctionDecl *NewFD,
+ ActOnFDArgs &ExtraArgs, bool IsLocalFriend, Scope *S) {
+ DeclarationName Name = NewFD->getDeclName();
+ DeclContext *NewDC = NewFD->getDeclContext();
+ SmallVector<unsigned, 1> MismatchedParams;
+ SmallVector<std::pair<FunctionDecl *, unsigned>, 1> NearMatches;
+ TypoCorrection Correction;
+ bool IsDefinition = ExtraArgs.D.isFunctionDefinition();
+ unsigned DiagMsg = IsLocalFriend ? diag::err_no_matching_local_friend
+ : diag::err_member_decl_does_not_match;
+ LookupResult Prev(SemaRef, Name, NewFD->getLocation(),
+ IsLocalFriend ? Sema::LookupLocalFriendName
+ : Sema::LookupOrdinaryName,
+ Sema::ForRedeclaration);
+
+ NewFD->setInvalidDecl();
+ if (IsLocalFriend)
+ SemaRef.LookupName(Prev, S);
+ else
+ SemaRef.LookupQualifiedName(Prev, NewDC);
+ assert(!Prev.isAmbiguous() &&
+ "Cannot have an ambiguity in previous-declaration lookup");
+ CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewFD);
+ if (!Prev.empty()) {
+ for (LookupResult::iterator Func = Prev.begin(), FuncEnd = Prev.end();
+ Func != FuncEnd; ++Func) {
+ FunctionDecl *FD = dyn_cast<FunctionDecl>(*Func);
+ if (FD &&
+ hasSimilarParameters(SemaRef.Context, FD, NewFD, MismatchedParams)) {
+ // Add 1 to the index so that 0 can mean the mismatch didn't
+ // involve a parameter
+ unsigned ParamNum =
+ MismatchedParams.empty() ? 0 : MismatchedParams.front() + 1;
+ NearMatches.push_back(std::make_pair(FD, ParamNum));
+ }
+ }
+ // If the qualified name lookup yielded nothing, try typo correction
+ } else if ((Correction = SemaRef.CorrectTypo(
+ Prev.getLookupNameInfo(), Prev.getLookupKind(), S,
+ &ExtraArgs.D.getCXXScopeSpec(),
+ llvm::make_unique<DifferentNameValidatorCCC>(
+ SemaRef.Context, NewFD, MD ? MD->getParent() : nullptr),
+ Sema::CTK_ErrorRecovery, IsLocalFriend ? nullptr : NewDC))) {
+ // Set up everything for the call to ActOnFunctionDeclarator
+ ExtraArgs.D.SetIdentifier(Correction.getCorrectionAsIdentifierInfo(),
+ ExtraArgs.D.getIdentifierLoc());
+ Previous.clear();
+ Previous.setLookupName(Correction.getCorrection());
+ for (TypoCorrection::decl_iterator CDecl = Correction.begin(),
+ CDeclEnd = Correction.end();
+ CDecl != CDeclEnd; ++CDecl) {
+ FunctionDecl *FD = dyn_cast<FunctionDecl>(*CDecl);
+ if (FD && !FD->hasBody() &&
+ hasSimilarParameters(SemaRef.Context, FD, NewFD, MismatchedParams)) {
+ Previous.addDecl(FD);
+ }
+ }
+ bool wasRedeclaration = ExtraArgs.D.isRedeclaration();
+
+ NamedDecl *Result;
+ // Retry building the function declaration with the new previous
+ // declarations, and with errors suppressed.
+ {
+ // Trap errors.
+ Sema::SFINAETrap Trap(SemaRef);
+
+ // TODO: Refactor ActOnFunctionDeclarator so that we can call only the
+ // pieces need to verify the typo-corrected C++ declaration and hopefully
+ // eliminate the need for the parameter pack ExtraArgs.
+ Result = SemaRef.ActOnFunctionDeclarator(
+ ExtraArgs.S, ExtraArgs.D,
+ Correction.getCorrectionDecl()->getDeclContext(),
+ NewFD->getTypeSourceInfo(), Previous, ExtraArgs.TemplateParamLists,
+ ExtraArgs.AddToScope);
+
+ if (Trap.hasErrorOccurred())
+ Result = nullptr;
+ }
+
+ if (Result) {
+ // Determine which correction we picked.
+ Decl *Canonical = Result->getCanonicalDecl();
+ for (LookupResult::iterator I = Previous.begin(), E = Previous.end();
+ I != E; ++I)
+ if ((*I)->getCanonicalDecl() == Canonical)
+ Correction.setCorrectionDecl(*I);
+
+ SemaRef.diagnoseTypo(
+ Correction,
+ SemaRef.PDiag(IsLocalFriend
+ ? diag::err_no_matching_local_friend_suggest
+ : diag::err_member_decl_does_not_match_suggest)
+ << Name << NewDC << IsDefinition);
+ return Result;
+ }
+
+ // Pretend the typo correction never occurred
+ ExtraArgs.D.SetIdentifier(Name.getAsIdentifierInfo(),
+ ExtraArgs.D.getIdentifierLoc());
+ ExtraArgs.D.setRedeclaration(wasRedeclaration);
+ Previous.clear();
+ Previous.setLookupName(Name);
+ }
+
+ SemaRef.Diag(NewFD->getLocation(), DiagMsg)
+ << Name << NewDC << IsDefinition << NewFD->getLocation();
+
+ bool NewFDisConst = false;
+ if (CXXMethodDecl *NewMD = dyn_cast<CXXMethodDecl>(NewFD))
+ NewFDisConst = NewMD->isConst();
+
+ for (SmallVectorImpl<std::pair<FunctionDecl *, unsigned> >::iterator
+ NearMatch = NearMatches.begin(), NearMatchEnd = NearMatches.end();
+ NearMatch != NearMatchEnd; ++NearMatch) {
+ FunctionDecl *FD = NearMatch->first;
+ CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
+ bool FDisConst = MD && MD->isConst();
+ bool IsMember = MD || !IsLocalFriend;
+
+ // FIXME: These notes are poorly worded for the local friend case.
+ if (unsigned Idx = NearMatch->second) {
+ ParmVarDecl *FDParam = FD->getParamDecl(Idx-1);
+ SourceLocation Loc = FDParam->getTypeSpecStartLoc();
+ if (Loc.isInvalid()) Loc = FD->getLocation();
+ SemaRef.Diag(Loc, IsMember ? diag::note_member_def_close_param_match
+ : diag::note_local_decl_close_param_match)
+ << Idx << FDParam->getType()
+ << NewFD->getParamDecl(Idx - 1)->getType();
+ } else if (FDisConst != NewFDisConst) {
+ SemaRef.Diag(FD->getLocation(), diag::note_member_def_close_const_match)
+ << NewFDisConst << FD->getSourceRange().getEnd();
+ } else
+ SemaRef.Diag(FD->getLocation(),
+ IsMember ? diag::note_member_def_close_match
+ : diag::note_local_decl_close_match);
+ }
+ return nullptr;
+}
+
+static StorageClass getFunctionStorageClass(Sema &SemaRef, Declarator &D) {
+ switch (D.getDeclSpec().getStorageClassSpec()) {
+ default: llvm_unreachable("Unknown storage class!");
+ case DeclSpec::SCS_auto:
+ case DeclSpec::SCS_register:
+ case DeclSpec::SCS_mutable:
+ SemaRef.Diag(D.getDeclSpec().getStorageClassSpecLoc(),
+ diag::err_typecheck_sclass_func);
+ D.setInvalidType();
+ break;
+ case DeclSpec::SCS_unspecified: break;
+ case DeclSpec::SCS_extern:
+ if (D.getDeclSpec().isExternInLinkageSpec())
+ return SC_None;
+ return SC_Extern;
+ case DeclSpec::SCS_static: {
+ if (SemaRef.CurContext->getRedeclContext()->isFunctionOrMethod()) {
+ // C99 6.7.1p5:
+ // The declaration of an identifier for a function that has
+ // block scope shall have no explicit storage-class specifier
+ // other than extern
+ // See also (C++ [dcl.stc]p4).
+ SemaRef.Diag(D.getDeclSpec().getStorageClassSpecLoc(),
+ diag::err_static_block_func);
+ break;
+ } else
+ return SC_Static;
+ }
+ case DeclSpec::SCS_private_extern: return SC_PrivateExtern;
+ }
+
+ // No explicit storage class has already been returned
+ return SC_None;
+}
+
+static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
+ DeclContext *DC, QualType &R,
+ TypeSourceInfo *TInfo,
+ StorageClass SC,
+ bool &IsVirtualOkay) {
+ DeclarationNameInfo NameInfo = SemaRef.GetNameForDeclarator(D);
+ DeclarationName Name = NameInfo.getName();
+
+ FunctionDecl *NewFD = nullptr;
+ bool isInline = D.getDeclSpec().isInlineSpecified();
+
+ if (!SemaRef.getLangOpts().CPlusPlus) {
+ // Determine whether the function was written with a
+ // prototype. This true when:
+ // - there is a prototype in the declarator, or
+ // - the type R of the function is some kind of typedef or other reference
+ // to a type name (which eventually refers to a function type).
+ bool HasPrototype =
+ (D.isFunctionDeclarator() && D.getFunctionTypeInfo().hasPrototype) ||
+ (!isa<FunctionType>(R.getTypePtr()) && R->isFunctionProtoType());
+
+ NewFD = FunctionDecl::Create(SemaRef.Context, DC,
+ D.getLocStart(), NameInfo, R,
+ TInfo, SC, isInline,
+ HasPrototype, false);
+ if (D.isInvalidType())
+ NewFD->setInvalidDecl();
+
+ return NewFD;
+ }
+
+ bool isExplicit = D.getDeclSpec().isExplicitSpecified();
+ bool isConstexpr = D.getDeclSpec().isConstexprSpecified();
+
+ // Check that the return type is not an abstract class type.
+ // For record types, this is done by the AbstractClassUsageDiagnoser once
+ // the class has been completely parsed.
+ if (!DC->isRecord() &&
+ SemaRef.RequireNonAbstractType(
+ D.getIdentifierLoc(), R->getAs<FunctionType>()->getReturnType(),
+ diag::err_abstract_type_in_decl, SemaRef.AbstractReturnType))
+ D.setInvalidType();
+
+ if (Name.getNameKind() == DeclarationName::CXXConstructorName) {
+ // This is a C++ constructor declaration.
+ assert(DC->isRecord() &&
+ "Constructors can only be declared in a member context");
+
+ R = SemaRef.CheckConstructorDeclarator(D, R, SC);
+ return CXXConstructorDecl::Create(SemaRef.Context, cast<CXXRecordDecl>(DC),
+ D.getLocStart(), NameInfo,
+ R, TInfo, isExplicit, isInline,
+ /*isImplicitlyDeclared=*/false,
+ isConstexpr);
+
+ } else if (Name.getNameKind() == DeclarationName::CXXDestructorName) {
+ // This is a C++ destructor declaration.
+ if (DC->isRecord()) {
+ R = SemaRef.CheckDestructorDeclarator(D, R, SC);
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(DC);
+ CXXDestructorDecl *NewDD = CXXDestructorDecl::Create(
+ SemaRef.Context, Record,
+ D.getLocStart(),
+ NameInfo, R, TInfo, isInline,
+ /*isImplicitlyDeclared=*/false);
+
+ // If the class is complete, then we now create the implicit exception
+ // specification. If the class is incomplete or dependent, we can't do
+ // it yet.
+ if (SemaRef.getLangOpts().CPlusPlus11 && !Record->isDependentType() &&
+ Record->getDefinition() && !Record->isBeingDefined() &&
+ R->getAs<FunctionProtoType>()->getExceptionSpecType() == EST_None) {
+ SemaRef.AdjustDestructorExceptionSpec(Record, NewDD);
+ }
+
+ IsVirtualOkay = true;
+ return NewDD;
+
+ } else {
+ SemaRef.Diag(D.getIdentifierLoc(), diag::err_destructor_not_member);
+ D.setInvalidType();
+
+ // Create a FunctionDecl to satisfy the function definition parsing
+ // code path.
+ return FunctionDecl::Create(SemaRef.Context, DC,
+ D.getLocStart(),
+ D.getIdentifierLoc(), Name, R, TInfo,
+ SC, isInline,
+ /*hasPrototype=*/true, isConstexpr);
+ }
+
+ } else if (Name.getNameKind() == DeclarationName::CXXConversionFunctionName) {
+ if (!DC->isRecord()) {
+ SemaRef.Diag(D.getIdentifierLoc(),
+ diag::err_conv_function_not_member);
+ return nullptr;
+ }
+
+ SemaRef.CheckConversionDeclarator(D, R, SC);
+ IsVirtualOkay = true;
+ return CXXConversionDecl::Create(SemaRef.Context, cast<CXXRecordDecl>(DC),
+ D.getLocStart(), NameInfo,
+ R, TInfo, isInline, isExplicit,
+ isConstexpr, SourceLocation());
+
+ } else if (DC->isRecord()) {
+ // If the name of the function is the same as the name of the record,
+ // then this must be an invalid constructor that has a return type.
+ // (The parser checks for a return type and makes the declarator a
+ // constructor if it has no return type).
+ if (Name.getAsIdentifierInfo() &&
+ Name.getAsIdentifierInfo() == cast<CXXRecordDecl>(DC)->getIdentifier()){
+ SemaRef.Diag(D.getIdentifierLoc(), diag::err_constructor_return_type)
+ << SourceRange(D.getDeclSpec().getTypeSpecTypeLoc())
+ << SourceRange(D.getIdentifierLoc());
+ return nullptr;
+ }
+
+ // This is a C++ method declaration.
+ CXXMethodDecl *Ret = CXXMethodDecl::Create(SemaRef.Context,
+ cast<CXXRecordDecl>(DC),
+ D.getLocStart(), NameInfo, R,
+ TInfo, SC, isInline,
+ isConstexpr, SourceLocation());
+ IsVirtualOkay = !Ret->isStatic();
+ return Ret;
+ } else {
+ bool isFriend =
+ SemaRef.getLangOpts().CPlusPlus && D.getDeclSpec().isFriendSpecified();
+ if (!isFriend && SemaRef.CurContext->isRecord())
+ return nullptr;
+
+ // Determine whether the function was written with a
+ // prototype. This true when:
+ // - we're in C++ (where every function has a prototype),
+ return FunctionDecl::Create(SemaRef.Context, DC,
+ D.getLocStart(),
+ NameInfo, R, TInfo, SC, isInline,
+ true/*HasPrototype*/, isConstexpr);
+ }
+}
+
+enum OpenCLParamType {
+ ValidKernelParam,
+ PtrPtrKernelParam,
+ PtrKernelParam,
+ PrivatePtrKernelParam,
+ InvalidKernelParam,
+ RecordKernelParam
+};
+
+static OpenCLParamType getOpenCLKernelParameterType(QualType PT) {
+ if (PT->isPointerType()) {
+ QualType PointeeType = PT->getPointeeType();
+ if (PointeeType->isPointerType())
+ return PtrPtrKernelParam;
+ return PointeeType.getAddressSpace() == 0 ? PrivatePtrKernelParam
+ : PtrKernelParam;
+ }
+
+ // TODO: Forbid the other integer types (size_t, ptrdiff_t...) when they can
+ // be used as builtin types.
+
+ if (PT->isImageType())
+ return PtrKernelParam;
+
+ if (PT->isBooleanType())
+ return InvalidKernelParam;
+
+ if (PT->isEventT())
+ return InvalidKernelParam;
+
+ if (PT->isHalfType())
+ return InvalidKernelParam;
+
+ if (PT->isRecordType())
+ return RecordKernelParam;
+
+ return ValidKernelParam;
+}
+
+static void checkIsValidOpenCLKernelParameter(
+ Sema &S,
+ Declarator &D,
+ ParmVarDecl *Param,
+ llvm::SmallPtrSetImpl<const Type *> &ValidTypes) {
+ QualType PT = Param->getType();
+
+ // Cache the valid types we encounter to avoid rechecking structs that are
+ // used again
+ if (ValidTypes.count(PT.getTypePtr()))
+ return;
+
+ switch (getOpenCLKernelParameterType(PT)) {
+ case PtrPtrKernelParam:
+ // OpenCL v1.2 s6.9.a:
+ // A kernel function argument cannot be declared as a
+ // pointer to a pointer type.
+ S.Diag(Param->getLocation(), diag::err_opencl_ptrptr_kernel_param);
+ D.setInvalidType();
+ return;
+
+ case PrivatePtrKernelParam:
+ // OpenCL v1.2 s6.9.a:
+ // A kernel function argument cannot be declared as a
+ // pointer to the private address space.
+ S.Diag(Param->getLocation(), diag::err_opencl_private_ptr_kernel_param);
+ D.setInvalidType();
+ return;
+
+ // OpenCL v1.2 s6.9.k:
+ // Arguments to kernel functions in a program cannot be declared with the
+ // built-in scalar types bool, half, size_t, ptrdiff_t, intptr_t, and
+ // uintptr_t or a struct and/or union that contain fields declared to be
+ // one of these built-in scalar types.
+
+ case InvalidKernelParam:
+ // OpenCL v1.2 s6.8 n:
+ // A kernel function argument cannot be declared
+ // of event_t type.
+ S.Diag(Param->getLocation(), diag::err_bad_kernel_param_type) << PT;
+ D.setInvalidType();
+ return;
+
+ case PtrKernelParam:
+ case ValidKernelParam:
+ ValidTypes.insert(PT.getTypePtr());
+ return;
+
+ case RecordKernelParam:
+ break;
+ }
+
+ // Track nested structs we will inspect
+ SmallVector<const Decl *, 4> VisitStack;
+
+ // Track where we are in the nested structs. Items will migrate from
+ // VisitStack to HistoryStack as we do the DFS for bad field.
+ SmallVector<const FieldDecl *, 4> HistoryStack;
+ HistoryStack.push_back(nullptr);
+
+ const RecordDecl *PD = PT->castAs<RecordType>()->getDecl();
+ VisitStack.push_back(PD);
+
+ assert(VisitStack.back() && "First decl null?");
+
+ do {
+ const Decl *Next = VisitStack.pop_back_val();
+ if (!Next) {
+ assert(!HistoryStack.empty());
+ // Found a marker, we have gone up a level
+ if (const FieldDecl *Hist = HistoryStack.pop_back_val())
+ ValidTypes.insert(Hist->getType().getTypePtr());
+
+ continue;
+ }
+
+ // Adds everything except the original parameter declaration (which is not a
+ // field itself) to the history stack.
+ const RecordDecl *RD;
+ if (const FieldDecl *Field = dyn_cast<FieldDecl>(Next)) {
+ HistoryStack.push_back(Field);
+ RD = Field->getType()->castAs<RecordType>()->getDecl();
+ } else {
+ RD = cast<RecordDecl>(Next);
+ }
+
+ // Add a null marker so we know when we've gone back up a level
+ VisitStack.push_back(nullptr);
+
+ for (const auto *FD : RD->fields()) {
+ QualType QT = FD->getType();
+
+ if (ValidTypes.count(QT.getTypePtr()))
+ continue;
+
+ OpenCLParamType ParamType = getOpenCLKernelParameterType(QT);
+ if (ParamType == ValidKernelParam)
+ continue;
+
+ if (ParamType == RecordKernelParam) {
+ VisitStack.push_back(FD);
+ continue;
+ }
+
+ // OpenCL v1.2 s6.9.p:
+ // Arguments to kernel functions that are declared to be a struct or union
+ // do not allow OpenCL objects to be passed as elements of the struct or
+ // union.
+ if (ParamType == PtrKernelParam || ParamType == PtrPtrKernelParam ||
+ ParamType == PrivatePtrKernelParam) {
+ S.Diag(Param->getLocation(),
+ diag::err_record_with_pointers_kernel_param)
+ << PT->isUnionType()
+ << PT;
+ } else {
+ S.Diag(Param->getLocation(), diag::err_bad_kernel_param_type) << PT;
+ }
+
+ S.Diag(PD->getLocation(), diag::note_within_field_of_type)
+ << PD->getDeclName();
+
+ // We have an error, now let's go back up through history and show where
+ // the offending field came from
+ for (ArrayRef<const FieldDecl *>::const_iterator
+ I = HistoryStack.begin() + 1,
+ E = HistoryStack.end();
+ I != E; ++I) {
+ const FieldDecl *OuterField = *I;
+ S.Diag(OuterField->getLocation(), diag::note_within_field_of_type)
+ << OuterField->getType();
+ }
+
+ S.Diag(FD->getLocation(), diag::note_illegal_field_declared_here)
+ << QT->isPointerType()
+ << QT;
+ D.setInvalidType();
+ return;
+ }
+ } while (!VisitStack.empty());
+}
+
+NamedDecl*
+Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
+ TypeSourceInfo *TInfo, LookupResult &Previous,
+ MultiTemplateParamsArg TemplateParamLists,
+ bool &AddToScope) {
+ QualType R = TInfo->getType();
+
+ assert(R.getTypePtr()->isFunctionType());
+
+ // TODO: consider using NameInfo for diagnostic.
+ DeclarationNameInfo NameInfo = GetNameForDeclarator(D);
+ DeclarationName Name = NameInfo.getName();
+ StorageClass SC = getFunctionStorageClass(*this, D);
+
+ if (DeclSpec::TSCS TSCS = D.getDeclSpec().getThreadStorageClassSpec())
+ Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
+ diag::err_invalid_thread)
+ << DeclSpec::getSpecifierName(TSCS);
+
+ if (D.isFirstDeclarationOfMember())
+ adjustMemberFunctionCC(R, D.isStaticMember(), D.isCtorOrDtor(),
+ D.getIdentifierLoc());
+
+ bool isFriend = false;
+ FunctionTemplateDecl *FunctionTemplate = nullptr;
+ bool isExplicitSpecialization = false;
+ bool isFunctionTemplateSpecialization = false;
+
+ bool isDependentClassScopeExplicitSpecialization = false;
+ bool HasExplicitTemplateArgs = false;
+ TemplateArgumentListInfo TemplateArgs;
+
+ bool isVirtualOkay = false;
+
+ DeclContext *OriginalDC = DC;
+ bool IsLocalExternDecl = adjustContextForLocalExternDecl(DC);
+
+ FunctionDecl *NewFD = CreateNewFunctionDecl(*this, D, DC, R, TInfo, SC,
+ isVirtualOkay);
+ if (!NewFD) return nullptr;
+
+ if (OriginalLexicalContext && OriginalLexicalContext->isObjCContainer())
+ NewFD->setTopLevelDeclInObjCContainer();
+
+ // Set the lexical context. If this is a function-scope declaration, or has a
+ // C++ scope specifier, or is the object of a friend declaration, the lexical
+ // context will be different from the semantic context.
+ NewFD->setLexicalDeclContext(CurContext);
+
+ if (IsLocalExternDecl)
+ NewFD->setLocalExternDecl();
+
+ if (getLangOpts().CPlusPlus) {
+ bool isInline = D.getDeclSpec().isInlineSpecified();
+ bool isVirtual = D.getDeclSpec().isVirtualSpecified();
+ bool isExplicit = D.getDeclSpec().isExplicitSpecified();
+ bool isConstexpr = D.getDeclSpec().isConstexprSpecified();
+ bool isConcept = D.getDeclSpec().isConceptSpecified();
+ isFriend = D.getDeclSpec().isFriendSpecified();
+ if (isFriend && !isInline && D.isFunctionDefinition()) {
+ // C++ [class.friend]p5
+ // A function can be defined in a friend declaration of a
+ // class . . . . Such a function is implicitly inline.
+ NewFD->setImplicitlyInline();
+ }
+
+ // If this is a method defined in an __interface, and is not a constructor
+ // or an overloaded operator, then set the pure flag (isVirtual will already
+ // return true).
+ if (const CXXRecordDecl *Parent =
+ dyn_cast<CXXRecordDecl>(NewFD->getDeclContext())) {
+ if (Parent->isInterface() && cast<CXXMethodDecl>(NewFD)->isUserProvided())
+ NewFD->setPure(true);
+
+ // C++ [class.union]p2
+ // A union can have member functions, but not virtual functions.
+ if (isVirtual && Parent->isUnion())
+ Diag(D.getDeclSpec().getVirtualSpecLoc(), diag::err_virtual_in_union);
+ }
+
+ SetNestedNameSpecifier(NewFD, D);
+ isExplicitSpecialization = false;
+ isFunctionTemplateSpecialization = false;
+ if (D.isInvalidType())
+ NewFD->setInvalidDecl();
+
+ // Match up the template parameter lists with the scope specifier, then
+ // determine whether we have a template or a template specialization.
+ bool Invalid = false;
+ if (TemplateParameterList *TemplateParams =
+ MatchTemplateParametersToScopeSpecifier(
+ D.getDeclSpec().getLocStart(), D.getIdentifierLoc(),
+ D.getCXXScopeSpec(),
+ D.getName().getKind() == UnqualifiedId::IK_TemplateId
+ ? D.getName().TemplateId
+ : nullptr,
+ TemplateParamLists, isFriend, isExplicitSpecialization,
+ Invalid)) {
+ if (TemplateParams->size() > 0) {
+ // This is a function template
+
+ // Check that we can declare a template here.
+ if (CheckTemplateDeclScope(S, TemplateParams))
+ NewFD->setInvalidDecl();
+
+ // A destructor cannot be a template.
+ if (Name.getNameKind() == DeclarationName::CXXDestructorName) {
+ Diag(NewFD->getLocation(), diag::err_destructor_template);
+ NewFD->setInvalidDecl();
+ }
+
+ // If we're adding a template to a dependent context, we may need to
+ // rebuilding some of the types used within the template parameter list,
+ // now that we know what the current instantiation is.
+ if (DC->isDependentContext()) {
+ ContextRAII SavedContext(*this, DC);
+ if (RebuildTemplateParamsInCurrentInstantiation(TemplateParams))
+ Invalid = true;
+ }
+
+
+ FunctionTemplate = FunctionTemplateDecl::Create(Context, DC,
+ NewFD->getLocation(),
+ Name, TemplateParams,
+ NewFD);
+ FunctionTemplate->setLexicalDeclContext(CurContext);
+ NewFD->setDescribedFunctionTemplate(FunctionTemplate);
+
+ // For source fidelity, store the other template param lists.
+ if (TemplateParamLists.size() > 1) {
+ NewFD->setTemplateParameterListsInfo(Context,
+ TemplateParamLists.drop_back(1));
+ }
+ } else {
+ // This is a function template specialization.
+ isFunctionTemplateSpecialization = true;
+ // For source fidelity, store all the template param lists.
+ if (TemplateParamLists.size() > 0)
+ NewFD->setTemplateParameterListsInfo(Context, TemplateParamLists);
+
+ // C++0x [temp.expl.spec]p20 forbids "template<> friend void foo(int);".
+ if (isFriend) {
+ // We want to remove the "template<>", found here.
+ SourceRange RemoveRange = TemplateParams->getSourceRange();
+
+ // If we remove the template<> and the name is not a
+ // template-id, we're actually silently creating a problem:
+ // the friend declaration will refer to an untemplated decl,
+ // and clearly the user wants a template specialization. So
+ // we need to insert '<>' after the name.
+ SourceLocation InsertLoc;
+ if (D.getName().getKind() != UnqualifiedId::IK_TemplateId) {
+ InsertLoc = D.getName().getSourceRange().getEnd();
+ InsertLoc = getLocForEndOfToken(InsertLoc);
+ }
+
+ Diag(D.getIdentifierLoc(), diag::err_template_spec_decl_friend)
+ << Name << RemoveRange
+ << FixItHint::CreateRemoval(RemoveRange)
+ << FixItHint::CreateInsertion(InsertLoc, "<>");
+ }
+ }
+ }
+ else {
+ // All template param lists were matched against the scope specifier:
+ // this is NOT (an explicit specialization of) a template.
+ if (TemplateParamLists.size() > 0)
+ // For source fidelity, store all the template param lists.
+ NewFD->setTemplateParameterListsInfo(Context, TemplateParamLists);
+ }
+
+ if (Invalid) {
+ NewFD->setInvalidDecl();
+ if (FunctionTemplate)
+ FunctionTemplate->setInvalidDecl();
+ }
+
+ // C++ [dcl.fct.spec]p5:
+ // The virtual specifier shall only be used in declarations of
+ // nonstatic class member functions that appear within a
+ // member-specification of a class declaration; see 10.3.
+ //
+ if (isVirtual && !NewFD->isInvalidDecl()) {
+ if (!isVirtualOkay) {
+ Diag(D.getDeclSpec().getVirtualSpecLoc(),
+ diag::err_virtual_non_function);
+ } else if (!CurContext->isRecord()) {
+ // 'virtual' was specified outside of the class.
+ Diag(D.getDeclSpec().getVirtualSpecLoc(),
+ diag::err_virtual_out_of_class)
+ << FixItHint::CreateRemoval(D.getDeclSpec().getVirtualSpecLoc());
+ } else if (NewFD->getDescribedFunctionTemplate()) {
+ // C++ [temp.mem]p3:
+ // A member function template shall not be virtual.
+ Diag(D.getDeclSpec().getVirtualSpecLoc(),
+ diag::err_virtual_member_function_template)
+ << FixItHint::CreateRemoval(D.getDeclSpec().getVirtualSpecLoc());
+ } else {
+ // Okay: Add virtual to the method.
+ NewFD->setVirtualAsWritten(true);
+ }
+
+ if (getLangOpts().CPlusPlus14 &&
+ NewFD->getReturnType()->isUndeducedType())
+ Diag(D.getDeclSpec().getVirtualSpecLoc(), diag::err_auto_fn_virtual);
+ }
+
+ if (getLangOpts().CPlusPlus14 &&
+ (NewFD->isDependentContext() ||
+ (isFriend && CurContext->isDependentContext())) &&
+ NewFD->getReturnType()->isUndeducedType()) {
+ // If the function template is referenced directly (for instance, as a
+ // member of the current instantiation), pretend it has a dependent type.
+ // This is not really justified by the standard, but is the only sane
+ // thing to do.
+ // FIXME: For a friend function, we have not marked the function as being
+ // a friend yet, so 'isDependentContext' on the FD doesn't work.
+ const FunctionProtoType *FPT =
+ NewFD->getType()->castAs<FunctionProtoType>();
+ QualType Result =
+ SubstAutoType(FPT->getReturnType(), Context.DependentTy);
+ NewFD->setType(Context.getFunctionType(Result, FPT->getParamTypes(),
+ FPT->getExtProtoInfo()));
+ }
+
+ // C++ [dcl.fct.spec]p3:
+ // The inline specifier shall not appear on a block scope function
+ // declaration.
+ if (isInline && !NewFD->isInvalidDecl()) {
+ if (CurContext->isFunctionOrMethod()) {
+ // 'inline' is not allowed on block scope function declaration.
+ Diag(D.getDeclSpec().getInlineSpecLoc(),
+ diag::err_inline_declaration_block_scope) << Name
+ << FixItHint::CreateRemoval(D.getDeclSpec().getInlineSpecLoc());
+ }
+ }
+
+ // C++ [dcl.fct.spec]p6:
+ // The explicit specifier shall be used only in the declaration of a
+ // constructor or conversion function within its class definition;
+ // see 12.3.1 and 12.3.2.
+ if (isExplicit && !NewFD->isInvalidDecl()) {
+ if (!CurContext->isRecord()) {
+ // 'explicit' was specified outside of the class.
+ Diag(D.getDeclSpec().getExplicitSpecLoc(),
+ diag::err_explicit_out_of_class)
+ << FixItHint::CreateRemoval(D.getDeclSpec().getExplicitSpecLoc());
+ } else if (!isa<CXXConstructorDecl>(NewFD) &&
+ !isa<CXXConversionDecl>(NewFD)) {
+ // 'explicit' was specified on a function that wasn't a constructor
+ // or conversion function.
+ Diag(D.getDeclSpec().getExplicitSpecLoc(),
+ diag::err_explicit_non_ctor_or_conv_function)
+ << FixItHint::CreateRemoval(D.getDeclSpec().getExplicitSpecLoc());
+ }
+ }
+
+ if (isConstexpr) {
+ // C++11 [dcl.constexpr]p2: constexpr functions and constexpr constructors
+ // are implicitly inline.
+ NewFD->setImplicitlyInline();
+
+ // C++11 [dcl.constexpr]p3: functions declared constexpr are required to
+ // be either constructors or to return a literal type. Therefore,
+ // destructors cannot be declared constexpr.
+ if (isa<CXXDestructorDecl>(NewFD))
+ Diag(D.getDeclSpec().getConstexprSpecLoc(), diag::err_constexpr_dtor);
+ }
+
+ if (isConcept) {
+ // C++ Concepts TS [dcl.spec.concept]p1: The concept specifier shall be
+ // applied only to the definition of a function template [...]
+ if (!D.isFunctionDefinition()) {
+ Diag(D.getDeclSpec().getConceptSpecLoc(),
+ diag::err_function_concept_not_defined);
+ NewFD->setInvalidDecl();
+ }
+
+ // C++ Concepts TS [dcl.spec.concept]p1: [...] A function concept shall
+ // have no exception-specification and is treated as if it were specified
+ // with noexcept(true) (15.4). [...]
+ if (const FunctionProtoType *FPT = R->getAs<FunctionProtoType>()) {
+ if (FPT->hasExceptionSpec()) {
+ SourceRange Range;
+ if (D.isFunctionDeclarator())
+ Range = D.getFunctionTypeInfo().getExceptionSpecRange();
+ Diag(NewFD->getLocation(), diag::err_function_concept_exception_spec)
+ << FixItHint::CreateRemoval(Range);
+ NewFD->setInvalidDecl();
+ } else {
+ Context.adjustExceptionSpec(NewFD, EST_BasicNoexcept);
+ }
+
+ // C++ Concepts TS [dcl.spec.concept]p5: A function concept has the
+ // following restrictions:
+ // - The declaration's parameter list shall be equivalent to an empty
+ // parameter list.
+ if (FPT->getNumParams() > 0 || FPT->isVariadic())
+ Diag(NewFD->getLocation(), diag::err_function_concept_with_params);
+ }
+
+ // C++ Concepts TS [dcl.spec.concept]p2: Every concept definition is
+ // implicity defined to be a constexpr declaration (implicitly inline)
+ NewFD->setImplicitlyInline();
+
+ // C++ Concepts TS [dcl.spec.concept]p2: A concept definition shall not
+ // be declared with the thread_local, inline, friend, or constexpr
+ // specifiers, [...]
+ if (isInline) {
+ Diag(D.getDeclSpec().getInlineSpecLoc(),
+ diag::err_concept_decl_invalid_specifiers)
+ << 1 << 1;
+ NewFD->setInvalidDecl(true);
+ }
+
+ if (isFriend) {
+ Diag(D.getDeclSpec().getFriendSpecLoc(),
+ diag::err_concept_decl_invalid_specifiers)
+ << 1 << 2;
+ NewFD->setInvalidDecl(true);
+ }
+
+ if (isConstexpr) {
+ Diag(D.getDeclSpec().getConstexprSpecLoc(),
+ diag::err_concept_decl_invalid_specifiers)
+ << 1 << 3;
+ NewFD->setInvalidDecl(true);
+ }
+ }
+
+ // If __module_private__ was specified, mark the function accordingly.
+ if (D.getDeclSpec().isModulePrivateSpecified()) {
+ if (isFunctionTemplateSpecialization) {
+ SourceLocation ModulePrivateLoc
+ = D.getDeclSpec().getModulePrivateSpecLoc();
+ Diag(ModulePrivateLoc, diag::err_module_private_specialization)
+ << 0
+ << FixItHint::CreateRemoval(ModulePrivateLoc);
+ } else {
+ NewFD->setModulePrivate();
+ if (FunctionTemplate)
+ FunctionTemplate->setModulePrivate();
+ }
+ }
+
+ if (isFriend) {
+ if (FunctionTemplate) {
+ FunctionTemplate->setObjectOfFriendDecl();
+ FunctionTemplate->setAccess(AS_public);
+ }
+ NewFD->setObjectOfFriendDecl();
+ NewFD->setAccess(AS_public);
+ }
+
+ // If a function is defined as defaulted or deleted, mark it as such now.
+ // FIXME: Does this ever happen? ActOnStartOfFunctionDef forces the function
+ // definition kind to FDK_Definition.
+ switch (D.getFunctionDefinitionKind()) {
+ case FDK_Declaration:
+ case FDK_Definition:
+ break;
+
+ case FDK_Defaulted:
+ NewFD->setDefaulted();
+ break;
+
+ case FDK_Deleted:
+ NewFD->setDeletedAsWritten();
+ break;
+ }
+
+ if (isa<CXXMethodDecl>(NewFD) && DC == CurContext &&
+ D.isFunctionDefinition()) {
+ // C++ [class.mfct]p2:
+ // A member function may be defined (8.4) in its class definition, in
+ // which case it is an inline member function (7.1.2)
+ NewFD->setImplicitlyInline();
+ }
+
+ if (SC == SC_Static && isa<CXXMethodDecl>(NewFD) &&
+ !CurContext->isRecord()) {
+ // C++ [class.static]p1:
+ // A data or function member of a class may be declared static
+ // in a class definition, in which case it is a static member of
+ // the class.
+
+ // Complain about the 'static' specifier if it's on an out-of-line
+ // member function definition.
+ Diag(D.getDeclSpec().getStorageClassSpecLoc(),
+ diag::err_static_out_of_line)
+ << FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
+ }
+
+ // C++11 [except.spec]p15:
+ // A deallocation function with no exception-specification is treated
+ // as if it were specified with noexcept(true).
+ const FunctionProtoType *FPT = R->getAs<FunctionProtoType>();
+ if ((Name.getCXXOverloadedOperator() == OO_Delete ||
+ Name.getCXXOverloadedOperator() == OO_Array_Delete) &&
+ getLangOpts().CPlusPlus11 && FPT && !FPT->hasExceptionSpec())
+ NewFD->setType(Context.getFunctionType(
+ FPT->getReturnType(), FPT->getParamTypes(),
+ FPT->getExtProtoInfo().withExceptionSpec(EST_BasicNoexcept)));
+ }
+
+ // Filter out previous declarations that don't match the scope.
+ FilterLookupForScope(Previous, OriginalDC, S, shouldConsiderLinkage(NewFD),
+ D.getCXXScopeSpec().isNotEmpty() ||
+ isExplicitSpecialization ||
+ isFunctionTemplateSpecialization);
+
+ // Handle GNU asm-label extension (encoded as an attribute).
+ if (Expr *E = (Expr*) D.getAsmLabel()) {
+ // The parser guarantees this is a string.
+ StringLiteral *SE = cast<StringLiteral>(E);
+ NewFD->addAttr(::new (Context) AsmLabelAttr(SE->getStrTokenLoc(0), Context,
+ SE->getString(), 0));
+ } else if (!ExtnameUndeclaredIdentifiers.empty()) {
+ llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*>::iterator I =
+ ExtnameUndeclaredIdentifiers.find(NewFD->getIdentifier());
+ if (I != ExtnameUndeclaredIdentifiers.end()) {
+ if (isDeclExternC(NewFD)) {
+ NewFD->addAttr(I->second);
+ ExtnameUndeclaredIdentifiers.erase(I);
+ } else
+ Diag(NewFD->getLocation(), diag::warn_redefine_extname_not_applied)
+ << /*Variable*/0 << NewFD;
+ }
+ }
+
+ // Copy the parameter declarations from the declarator D to the function
+ // declaration NewFD, if they are available. First scavenge them into Params.
+ SmallVector<ParmVarDecl*, 16> Params;
+ if (D.isFunctionDeclarator()) {
+ DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
+
+ // Check for C99 6.7.5.3p10 - foo(void) is a non-varargs
+ // function that takes no arguments, not a function that takes a
+ // single void argument.
+ // We let through "const void" here because Sema::GetTypeForDeclarator
+ // already checks for that case.
+ if (FTIHasNonVoidParameters(FTI) && FTI.Params[0].Param) {
+ for (unsigned i = 0, e = FTI.NumParams; i != e; ++i) {
+ ParmVarDecl *Param = cast<ParmVarDecl>(FTI.Params[i].Param);
+ assert(Param->getDeclContext() != NewFD && "Was set before ?");
+ Param->setDeclContext(NewFD);
+ Params.push_back(Param);
+
+ if (Param->isInvalidDecl())
+ NewFD->setInvalidDecl();
+ }
+ }
+
+ } else if (const FunctionProtoType *FT = R->getAs<FunctionProtoType>()) {
+ // When we're declaring a function with a typedef, typeof, etc as in the
+ // following example, we'll need to synthesize (unnamed)
+ // parameters for use in the declaration.
+ //
+ // @code
+ // typedef void fn(int);
+ // fn f;
+ // @endcode
+
+ // Synthesize a parameter for each argument type.
+ for (const auto &AI : FT->param_types()) {
+ ParmVarDecl *Param =
+ BuildParmVarDeclForTypedef(NewFD, D.getIdentifierLoc(), AI);
+ Param->setScopeInfo(0, Params.size());
+ Params.push_back(Param);
+ }
+ } else {
+ assert(R->isFunctionNoProtoType() && NewFD->getNumParams() == 0 &&
+ "Should not need args for typedef of non-prototype fn");
+ }
+
+ // Finally, we know we have the right number of parameters, install them.
+ NewFD->setParams(Params);
+
+ // Find all anonymous symbols defined during the declaration of this function
+ // and add to NewFD. This lets us track decls such 'enum Y' in:
+ //
+ // void f(enum Y {AA} x) {}
+ //
+ // which would otherwise incorrectly end up in the translation unit scope.
+ NewFD->setDeclsInPrototypeScope(DeclsInPrototypeScope);
+ DeclsInPrototypeScope.clear();
+
+ if (D.getDeclSpec().isNoreturnSpecified())
+ NewFD->addAttr(
+ ::new(Context) C11NoReturnAttr(D.getDeclSpec().getNoreturnSpecLoc(),
+ Context, 0));
+
+ // Functions returning a variably modified type violate C99 6.7.5.2p2
+ // because all functions have linkage.
+ if (!NewFD->isInvalidDecl() &&
+ NewFD->getReturnType()->isVariablyModifiedType()) {
+ Diag(NewFD->getLocation(), diag::err_vm_func_decl);
+ NewFD->setInvalidDecl();
+ }
+
+ // Apply an implicit SectionAttr if #pragma code_seg is active.
+ if (CodeSegStack.CurrentValue && D.isFunctionDefinition() &&
+ !NewFD->hasAttr<SectionAttr>()) {
+ NewFD->addAttr(
+ SectionAttr::CreateImplicit(Context, SectionAttr::Declspec_allocate,
+ CodeSegStack.CurrentValue->getString(),
+ CodeSegStack.CurrentPragmaLocation));
+ if (UnifySection(CodeSegStack.CurrentValue->getString(),
+ ASTContext::PSF_Implicit | ASTContext::PSF_Execute |
+ ASTContext::PSF_Read,
+ NewFD))
+ NewFD->dropAttr<SectionAttr>();
+ }
+
+ // Handle attributes.
+ ProcessDeclAttributes(S, NewFD, D);
+
+ if (getLangOpts().OpenCL) {
+ // OpenCL v1.1 s6.5: Using an address space qualifier in a function return
+ // type declaration will generate a compilation error.
+ unsigned AddressSpace = NewFD->getReturnType().getAddressSpace();
+ if (AddressSpace == LangAS::opencl_local ||
+ AddressSpace == LangAS::opencl_global ||
+ AddressSpace == LangAS::opencl_constant) {
+ Diag(NewFD->getLocation(),
+ diag::err_opencl_return_value_with_address_space);
+ NewFD->setInvalidDecl();
+ }
+ }
+
+ if (!getLangOpts().CPlusPlus) {
+ // Perform semantic checking on the function declaration.
+ bool isExplicitSpecialization=false;
+ if (!NewFD->isInvalidDecl() && NewFD->isMain())
+ CheckMain(NewFD, D.getDeclSpec());
+
+ if (!NewFD->isInvalidDecl() && NewFD->isMSVCRTEntryPoint())
+ CheckMSVCRTEntryPoint(NewFD);
+
+ if (!NewFD->isInvalidDecl())
+ D.setRedeclaration(CheckFunctionDeclaration(S, NewFD, Previous,
+ isExplicitSpecialization));
+ else if (!Previous.empty())
+ // Recover gracefully from an invalid redeclaration.
+ D.setRedeclaration(true);
+ assert((NewFD->isInvalidDecl() || !D.isRedeclaration() ||
+ Previous.getResultKind() != LookupResult::FoundOverloaded) &&
+ "previous declaration set still overloaded");
+
+ // Diagnose no-prototype function declarations with calling conventions that
+ // don't support variadic calls. Only do this in C and do it after merging
+ // possibly prototyped redeclarations.
+ const FunctionType *FT = NewFD->getType()->castAs<FunctionType>();
+ if (isa<FunctionNoProtoType>(FT) && !D.isFunctionDefinition()) {
+ CallingConv CC = FT->getExtInfo().getCC();
+ if (!supportsVariadicCall(CC)) {
+ // Windows system headers sometimes accidentally use stdcall without
+ // (void) parameters, so we relax this to a warning.
+ int DiagID =
+ CC == CC_X86StdCall ? diag::warn_cconv_knr : diag::err_cconv_knr;
+ Diag(NewFD->getLocation(), DiagID)
+ << FunctionType::getNameForCallConv(CC);
+ }
+ }
+ } else {
+ // C++11 [replacement.functions]p3:
+ // The program's definitions shall not be specified as inline.
+ //
+ // N.B. We diagnose declarations instead of definitions per LWG issue 2340.
+ //
+ // Suppress the diagnostic if the function is __attribute__((used)), since
+ // that forces an external definition to be emitted.
+ if (D.getDeclSpec().isInlineSpecified() &&
+ NewFD->isReplaceableGlobalAllocationFunction() &&
+ !NewFD->hasAttr<UsedAttr>())
+ Diag(D.getDeclSpec().getInlineSpecLoc(),
+ diag::ext_operator_new_delete_declared_inline)
+ << NewFD->getDeclName();
+
+ // If the declarator is a template-id, translate the parser's template
+ // argument list into our AST format.
+ if (D.getName().getKind() == UnqualifiedId::IK_TemplateId) {
+ TemplateIdAnnotation *TemplateId = D.getName().TemplateId;
+ TemplateArgs.setLAngleLoc(TemplateId->LAngleLoc);
+ TemplateArgs.setRAngleLoc(TemplateId->RAngleLoc);
+ ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
+ TemplateId->NumArgs);
+ translateTemplateArguments(TemplateArgsPtr,
+ TemplateArgs);
+
+ HasExplicitTemplateArgs = true;
+
+ if (NewFD->isInvalidDecl()) {
+ HasExplicitTemplateArgs = false;
+ } else if (FunctionTemplate) {
+ // Function template with explicit template arguments.
+ Diag(D.getIdentifierLoc(), diag::err_function_template_partial_spec)
+ << SourceRange(TemplateId->LAngleLoc, TemplateId->RAngleLoc);
+
+ HasExplicitTemplateArgs = false;
+ } else {
+ assert((isFunctionTemplateSpecialization ||
+ D.getDeclSpec().isFriendSpecified()) &&
+ "should have a 'template<>' for this decl");
+ // "friend void foo<>(int);" is an implicit specialization decl.
+ isFunctionTemplateSpecialization = true;
+ }
+ } else if (isFriend && isFunctionTemplateSpecialization) {
+ // This combination is only possible in a recovery case; the user
+ // wrote something like:
+ // template <> friend void foo(int);
+ // which we're recovering from as if the user had written:
+ // friend void foo<>(int);
+ // Go ahead and fake up a template id.
+ HasExplicitTemplateArgs = true;
+ TemplateArgs.setLAngleLoc(D.getIdentifierLoc());
+ TemplateArgs.setRAngleLoc(D.getIdentifierLoc());
+ }
+
+ // If it's a friend (and only if it's a friend), it's possible
+ // that either the specialized function type or the specialized
+ // template is dependent, and therefore matching will fail. In
+ // this case, don't check the specialization yet.
+ bool InstantiationDependent = false;
+ if (isFunctionTemplateSpecialization && isFriend &&
+ (NewFD->getType()->isDependentType() || DC->isDependentContext() ||
+ TemplateSpecializationType::anyDependentTemplateArguments(
+ TemplateArgs.getArgumentArray(), TemplateArgs.size(),
+ InstantiationDependent))) {
+ assert(HasExplicitTemplateArgs &&
+ "friend function specialization without template args");
+ if (CheckDependentFunctionTemplateSpecialization(NewFD, TemplateArgs,
+ Previous))
+ NewFD->setInvalidDecl();
+ } else if (isFunctionTemplateSpecialization) {
+ if (CurContext->isDependentContext() && CurContext->isRecord()
+ && !isFriend) {
+ isDependentClassScopeExplicitSpecialization = true;
+ Diag(NewFD->getLocation(), getLangOpts().MicrosoftExt ?
+ diag::ext_function_specialization_in_class :
+ diag::err_function_specialization_in_class)
+ << NewFD->getDeclName();
+ } else if (CheckFunctionTemplateSpecialization(NewFD,
+ (HasExplicitTemplateArgs ? &TemplateArgs
+ : nullptr),
+ Previous))
+ NewFD->setInvalidDecl();
+
+ // C++ [dcl.stc]p1:
+ // A storage-class-specifier shall not be specified in an explicit
+ // specialization (14.7.3)
+ FunctionTemplateSpecializationInfo *Info =
+ NewFD->getTemplateSpecializationInfo();
+ if (Info && SC != SC_None) {
+ if (SC != Info->getTemplate()->getTemplatedDecl()->getStorageClass())
+ Diag(NewFD->getLocation(),
+ diag::err_explicit_specialization_inconsistent_storage_class)
+ << SC
+ << FixItHint::CreateRemoval(
+ D.getDeclSpec().getStorageClassSpecLoc());
+
+ else
+ Diag(NewFD->getLocation(),
+ diag::ext_explicit_specialization_storage_class)
+ << FixItHint::CreateRemoval(
+ D.getDeclSpec().getStorageClassSpecLoc());
+ }
+
+ } else if (isExplicitSpecialization && isa<CXXMethodDecl>(NewFD)) {
+ if (CheckMemberSpecialization(NewFD, Previous))
+ NewFD->setInvalidDecl();
+ }
+
+ // Perform semantic checking on the function declaration.
+ if (!isDependentClassScopeExplicitSpecialization) {
+ if (!NewFD->isInvalidDecl() && NewFD->isMain())
+ CheckMain(NewFD, D.getDeclSpec());
+
+ if (!NewFD->isInvalidDecl() && NewFD->isMSVCRTEntryPoint())
+ CheckMSVCRTEntryPoint(NewFD);
+
+ if (!NewFD->isInvalidDecl())
+ D.setRedeclaration(CheckFunctionDeclaration(S, NewFD, Previous,
+ isExplicitSpecialization));
+ else if (!Previous.empty())
+ // Recover gracefully from an invalid redeclaration.
+ D.setRedeclaration(true);
+ }
+
+ assert((NewFD->isInvalidDecl() || !D.isRedeclaration() ||
+ Previous.getResultKind() != LookupResult::FoundOverloaded) &&
+ "previous declaration set still overloaded");
+
+ NamedDecl *PrincipalDecl = (FunctionTemplate
+ ? cast<NamedDecl>(FunctionTemplate)
+ : NewFD);
+
+ if (isFriend && D.isRedeclaration()) {
+ AccessSpecifier Access = AS_public;
+ if (!NewFD->isInvalidDecl())
+ Access = NewFD->getPreviousDecl()->getAccess();
+
+ NewFD->setAccess(Access);
+ if (FunctionTemplate) FunctionTemplate->setAccess(Access);
+ }
+
+ if (NewFD->isOverloadedOperator() && !DC->isRecord() &&
+ PrincipalDecl->isInIdentifierNamespace(Decl::IDNS_Ordinary))
+ PrincipalDecl->setNonMemberOperator();
+
+ // If we have a function template, check the template parameter
+ // list. This will check and merge default template arguments.
+ if (FunctionTemplate) {
+ FunctionTemplateDecl *PrevTemplate =
+ FunctionTemplate->getPreviousDecl();
+ CheckTemplateParameterList(FunctionTemplate->getTemplateParameters(),
+ PrevTemplate ? PrevTemplate->getTemplateParameters()
+ : nullptr,
+ D.getDeclSpec().isFriendSpecified()
+ ? (D.isFunctionDefinition()
+ ? TPC_FriendFunctionTemplateDefinition
+ : TPC_FriendFunctionTemplate)
+ : (D.getCXXScopeSpec().isSet() &&
+ DC && DC->isRecord() &&
+ DC->isDependentContext())
+ ? TPC_ClassTemplateMember
+ : TPC_FunctionTemplate);
+ }
+
+ if (NewFD->isInvalidDecl()) {
+ // Ignore all the rest of this.
+ } else if (!D.isRedeclaration()) {
+ struct ActOnFDArgs ExtraArgs = { S, D, TemplateParamLists,
+ AddToScope };
+ // Fake up an access specifier if it's supposed to be a class member.
+ if (isa<CXXRecordDecl>(NewFD->getDeclContext()))
+ NewFD->setAccess(AS_public);
+
+ // Qualified decls generally require a previous declaration.
+ if (D.getCXXScopeSpec().isSet()) {
+ // ...with the major exception of templated-scope or
+ // dependent-scope friend declarations.
+
+ // TODO: we currently also suppress this check in dependent
+ // contexts because (1) the parameter depth will be off when
+ // matching friend templates and (2) we might actually be
+ // selecting a friend based on a dependent factor. But there
+ // are situations where these conditions don't apply and we
+ // can actually do this check immediately.
+ if (isFriend &&
+ (TemplateParamLists.size() ||
+ D.getCXXScopeSpec().getScopeRep()->isDependent() ||
+ CurContext->isDependentContext())) {
+ // ignore these
+ } else {
+ // The user tried to provide an out-of-line definition for a
+ // function that is a member of a class or namespace, but there
+ // was no such member function declared (C++ [class.mfct]p2,
+ // C++ [namespace.memdef]p2). For example:
+ //
+ // class X {
+ // void f() const;
+ // };
+ //
+ // void X::f() { } // ill-formed
+ //
+ // Complain about this problem, and attempt to suggest close
+ // matches (e.g., those that differ only in cv-qualifiers and
+ // whether the parameter types are references).
+
+ if (NamedDecl *Result = DiagnoseInvalidRedeclaration(
+ *this, Previous, NewFD, ExtraArgs, false, nullptr)) {
+ AddToScope = ExtraArgs.AddToScope;
+ return Result;
+ }
+ }
+
+ // Unqualified local friend declarations are required to resolve
+ // to something.
+ } else if (isFriend && cast<CXXRecordDecl>(CurContext)->isLocalClass()) {
+ if (NamedDecl *Result = DiagnoseInvalidRedeclaration(
+ *this, Previous, NewFD, ExtraArgs, true, S)) {
+ AddToScope = ExtraArgs.AddToScope;
+ return Result;
+ }
+ }
+
+ } else if (!D.isFunctionDefinition() &&
+ isa<CXXMethodDecl>(NewFD) && NewFD->isOutOfLine() &&
+ !isFriend && !isFunctionTemplateSpecialization &&
+ !isExplicitSpecialization) {
+ // An out-of-line member function declaration must also be a
+ // definition (C++ [class.mfct]p2).
+ // Note that this is not the case for explicit specializations of
+ // function templates or member functions of class templates, per
+ // C++ [temp.expl.spec]p2. We also allow these declarations as an
+ // extension for compatibility with old SWIG code which likes to
+ // generate them.
+ Diag(NewFD->getLocation(), diag::ext_out_of_line_declaration)
+ << D.getCXXScopeSpec().getRange();
+ }
+ }
+
+ ProcessPragmaWeak(S, NewFD);
+ checkAttributesAfterMerging(*this, *NewFD);
+
+ AddKnownFunctionAttributes(NewFD);
+
+ if (NewFD->hasAttr<OverloadableAttr>() &&
+ !NewFD->getType()->getAs<FunctionProtoType>()) {
+ Diag(NewFD->getLocation(),
+ diag::err_attribute_overloadable_no_prototype)
+ << NewFD;
+
+ // Turn this into a variadic function with no parameters.
+ const FunctionType *FT = NewFD->getType()->getAs<FunctionType>();
+ FunctionProtoType::ExtProtoInfo EPI(
+ Context.getDefaultCallingConvention(true, false));
+ EPI.Variadic = true;
+ EPI.ExtInfo = FT->getExtInfo();
+
+ QualType R = Context.getFunctionType(FT->getReturnType(), None, EPI);
+ NewFD->setType(R);
+ }
+
+ // If there's a #pragma GCC visibility in scope, and this isn't a class
+ // member, set the visibility of this function.
+ if (!DC->isRecord() && NewFD->isExternallyVisible())
+ AddPushedVisibilityAttribute(NewFD);
+
+ // If there's a #pragma clang arc_cf_code_audited in scope, consider
+ // marking the function.
+ AddCFAuditedAttribute(NewFD);
+
+ // If this is a function definition, check if we have to apply optnone due to
+ // a pragma.
+ if(D.isFunctionDefinition())
+ AddRangeBasedOptnone(NewFD);
+
+ // If this is the first declaration of an extern C variable, update
+ // the map of such variables.
+ if (NewFD->isFirstDecl() && !NewFD->isInvalidDecl() &&
+ isIncompleteDeclExternC(*this, NewFD))
+ RegisterLocallyScopedExternCDecl(NewFD, S);
+
+ // Set this FunctionDecl's range up to the right paren.
+ NewFD->setRangeEnd(D.getSourceRange().getEnd());
+
+ if (D.isRedeclaration() && !Previous.empty()) {
+ checkDLLAttributeRedeclaration(
+ *this, dyn_cast<NamedDecl>(Previous.getRepresentativeDecl()), NewFD,
+ isExplicitSpecialization || isFunctionTemplateSpecialization);
+ }
+
+ if (getLangOpts().CPlusPlus) {
+ if (FunctionTemplate) {
+ if (NewFD->isInvalidDecl())
+ FunctionTemplate->setInvalidDecl();
+ return FunctionTemplate;
+ }
+ }
+
+ if (NewFD->hasAttr<OpenCLKernelAttr>()) {
+ // OpenCL v1.2 s6.8 static is invalid for kernel functions.
+ if ((getLangOpts().OpenCLVersion >= 120)
+ && (SC == SC_Static)) {
+ Diag(D.getIdentifierLoc(), diag::err_static_kernel);
+ D.setInvalidType();
+ }
+
+ // OpenCL v1.2, s6.9 -- Kernels can only have return type void.
+ if (!NewFD->getReturnType()->isVoidType()) {
+ SourceRange RTRange = NewFD->getReturnTypeSourceRange();
+ Diag(D.getIdentifierLoc(), diag::err_expected_kernel_void_return_type)
+ << (RTRange.isValid() ? FixItHint::CreateReplacement(RTRange, "void")
+ : FixItHint());
+ D.setInvalidType();
+ }
+
+ llvm::SmallPtrSet<const Type *, 16> ValidTypes;
+ for (auto Param : NewFD->params())
+ checkIsValidOpenCLKernelParameter(*this, D, Param, ValidTypes);
+ }
+
+ MarkUnusedFileScopedDecl(NewFD);
+
+ if (getLangOpts().CUDA)
+ if (IdentifierInfo *II = NewFD->getIdentifier())
+ if (!NewFD->isInvalidDecl() &&
+ NewFD->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
+ if (II->isStr("cudaConfigureCall")) {
+ if (!R->getAs<FunctionType>()->getReturnType()->isScalarType())
+ Diag(NewFD->getLocation(), diag::err_config_scalar_return);
+
+ Context.setcudaConfigureCallDecl(NewFD);
+ }
+ }
+
+ // Here we have an function template explicit specialization at class scope.
+ // The actually specialization will be postponed to template instatiation
+ // time via the ClassScopeFunctionSpecializationDecl node.
+ if (isDependentClassScopeExplicitSpecialization) {
+ ClassScopeFunctionSpecializationDecl *NewSpec =
+ ClassScopeFunctionSpecializationDecl::Create(
+ Context, CurContext, SourceLocation(),
+ cast<CXXMethodDecl>(NewFD),
+ HasExplicitTemplateArgs, TemplateArgs);
+ CurContext->addDecl(NewSpec);
+ AddToScope = false;
+ }
+
+ return NewFD;
+}
+
+/// \brief Perform semantic checking of a new function declaration.
+///
+/// Performs semantic analysis of the new function declaration
+/// NewFD. This routine performs all semantic checking that does not
+/// require the actual declarator involved in the declaration, and is
+/// used both for the declaration of functions as they are parsed
+/// (called via ActOnDeclarator) and for the declaration of functions
+/// that have been instantiated via C++ template instantiation (called
+/// via InstantiateDecl).
+///
+/// \param IsExplicitSpecialization whether this new function declaration is
+/// an explicit specialization of the previous declaration.
+///
+/// This sets NewFD->isInvalidDecl() to true if there was an error.
+///
+/// \returns true if the function declaration is a redeclaration.
+bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
+ LookupResult &Previous,
+ bool IsExplicitSpecialization) {
+ assert(!NewFD->getReturnType()->isVariablyModifiedType() &&
+ "Variably modified return types are not handled here");
+
+ // Determine whether the type of this function should be merged with
+ // a previous visible declaration. This never happens for functions in C++,
+ // and always happens in C if the previous declaration was visible.
+ bool MergeTypeWithPrevious = !getLangOpts().CPlusPlus &&
+ !Previous.isShadowed();
+
+ bool Redeclaration = false;
+ NamedDecl *OldDecl = nullptr;
+
+ // Merge or overload the declaration with an existing declaration of
+ // the same name, if appropriate.
+ if (!Previous.empty()) {
+ // Determine whether NewFD is an overload of PrevDecl or
+ // a declaration that requires merging. If it's an overload,
+ // there's no more work to do here; we'll just add the new
+ // function to the scope.
+ if (!AllowOverloadingOfFunction(Previous, Context)) {
+ NamedDecl *Candidate = Previous.getRepresentativeDecl();
+ if (shouldLinkPossiblyHiddenDecl(Candidate, NewFD)) {
+ Redeclaration = true;
+ OldDecl = Candidate;
+ }
+ } else {
+ switch (CheckOverload(S, NewFD, Previous, OldDecl,
+ /*NewIsUsingDecl*/ false)) {
+ case Ovl_Match:
+ Redeclaration = true;
+ break;
+
+ case Ovl_NonFunction:
+ Redeclaration = true;
+ break;
+
+ case Ovl_Overload:
+ Redeclaration = false;
+ break;
+ }
+
+ if (!getLangOpts().CPlusPlus && !NewFD->hasAttr<OverloadableAttr>()) {
+ // If a function name is overloadable in C, then every function
+ // with that name must be marked "overloadable".
+ Diag(NewFD->getLocation(), diag::err_attribute_overloadable_missing)
+ << Redeclaration << NewFD;
+ NamedDecl *OverloadedDecl = nullptr;
+ if (Redeclaration)
+ OverloadedDecl = OldDecl;
+ else if (!Previous.empty())
+ OverloadedDecl = Previous.getRepresentativeDecl();
+ if (OverloadedDecl)
+ Diag(OverloadedDecl->getLocation(),
+ diag::note_attribute_overloadable_prev_overload);
+ NewFD->addAttr(OverloadableAttr::CreateImplicit(Context));
+ }
+ }
+ }
+
+ // Check for a previous extern "C" declaration with this name.
+ if (!Redeclaration &&
+ checkForConflictWithNonVisibleExternC(*this, NewFD, Previous)) {
+ if (!Previous.empty()) {
+ // This is an extern "C" declaration with the same name as a previous
+ // declaration, and thus redeclares that entity...
+ Redeclaration = true;
+ OldDecl = Previous.getFoundDecl();
+ MergeTypeWithPrevious = false;
+
+ // ... except in the presence of __attribute__((overloadable)).
+ if (OldDecl->hasAttr<OverloadableAttr>()) {
+ if (!getLangOpts().CPlusPlus && !NewFD->hasAttr<OverloadableAttr>()) {
+ Diag(NewFD->getLocation(), diag::err_attribute_overloadable_missing)
+ << Redeclaration << NewFD;
+ Diag(Previous.getFoundDecl()->getLocation(),
+ diag::note_attribute_overloadable_prev_overload);
+ NewFD->addAttr(OverloadableAttr::CreateImplicit(Context));
+ }
+ if (IsOverload(NewFD, cast<FunctionDecl>(OldDecl), false)) {
+ Redeclaration = false;
+ OldDecl = nullptr;
+ }
+ }
+ }
+ }
+
+ // C++11 [dcl.constexpr]p8:
+ // A constexpr specifier for a non-static member function that is not
+ // a constructor declares that member function to be const.
+ //
+ // This needs to be delayed until we know whether this is an out-of-line
+ // definition of a static member function.
+ //
+ // This rule is not present in C++1y, so we produce a backwards
+ // compatibility warning whenever it happens in C++11.
+ CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewFD);
+ if (!getLangOpts().CPlusPlus14 && MD && MD->isConstexpr() &&
+ !MD->isStatic() && !isa<CXXConstructorDecl>(MD) &&
+ (MD->getTypeQualifiers() & Qualifiers::Const) == 0) {
+ CXXMethodDecl *OldMD = nullptr;
+ if (OldDecl)
+ OldMD = dyn_cast_or_null<CXXMethodDecl>(OldDecl->getAsFunction());
+ if (!OldMD || !OldMD->isStatic()) {
+ const FunctionProtoType *FPT =
+ MD->getType()->castAs<FunctionProtoType>();
+ FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
+ EPI.TypeQuals |= Qualifiers::Const;
+ MD->setType(Context.getFunctionType(FPT->getReturnType(),
+ FPT->getParamTypes(), EPI));
+
+ // Warn that we did this, if we're not performing template instantiation.
+ // In that case, we'll have warned already when the template was defined.
+ if (ActiveTemplateInstantiations.empty()) {
+ SourceLocation AddConstLoc;
+ if (FunctionTypeLoc FTL = MD->getTypeSourceInfo()->getTypeLoc()
+ .IgnoreParens().getAs<FunctionTypeLoc>())
+ AddConstLoc = getLocForEndOfToken(FTL.getRParenLoc());
+
+ Diag(MD->getLocation(), diag::warn_cxx14_compat_constexpr_not_const)
+ << FixItHint::CreateInsertion(AddConstLoc, " const");
+ }
+ }
+ }
+
+ if (Redeclaration) {
+ // NewFD and OldDecl represent declarations that need to be
+ // merged.
+ if (MergeFunctionDecl(NewFD, OldDecl, S, MergeTypeWithPrevious)) {
+ NewFD->setInvalidDecl();
+ return Redeclaration;
+ }
+
+ Previous.clear();
+ Previous.addDecl(OldDecl);
+
+ if (FunctionTemplateDecl *OldTemplateDecl
+ = dyn_cast<FunctionTemplateDecl>(OldDecl)) {
+ NewFD->setPreviousDeclaration(OldTemplateDecl->getTemplatedDecl());
+ FunctionTemplateDecl *NewTemplateDecl
+ = NewFD->getDescribedFunctionTemplate();
+ assert(NewTemplateDecl && "Template/non-template mismatch");
+ if (CXXMethodDecl *Method
+ = dyn_cast<CXXMethodDecl>(NewTemplateDecl->getTemplatedDecl())) {
+ Method->setAccess(OldTemplateDecl->getAccess());
+ NewTemplateDecl->setAccess(OldTemplateDecl->getAccess());
+ }
+
+ // If this is an explicit specialization of a member that is a function
+ // template, mark it as a member specialization.
+ if (IsExplicitSpecialization &&
+ NewTemplateDecl->getInstantiatedFromMemberTemplate()) {
+ NewTemplateDecl->setMemberSpecialization();
+ assert(OldTemplateDecl->isMemberSpecialization());
+ }
+
+ } else {
+ // This needs to happen first so that 'inline' propagates.
+ NewFD->setPreviousDeclaration(cast<FunctionDecl>(OldDecl));
+
+ if (isa<CXXMethodDecl>(NewFD))
+ NewFD->setAccess(OldDecl->getAccess());
+ }
+ }
+
+ // Semantic checking for this function declaration (in isolation).
+
+ if (getLangOpts().CPlusPlus) {
+ // C++-specific checks.
+ if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(NewFD)) {
+ CheckConstructor(Constructor);
+ } else if (CXXDestructorDecl *Destructor =
+ dyn_cast<CXXDestructorDecl>(NewFD)) {
+ CXXRecordDecl *Record = Destructor->getParent();
+ QualType ClassType = Context.getTypeDeclType(Record);
+
+ // FIXME: Shouldn't we be able to perform this check even when the class
+ // type is dependent? Both gcc and edg can handle that.
+ if (!ClassType->isDependentType()) {
+ DeclarationName Name
+ = Context.DeclarationNames.getCXXDestructorName(
+ Context.getCanonicalType(ClassType));
+ if (NewFD->getDeclName() != Name) {
+ Diag(NewFD->getLocation(), diag::err_destructor_name);
+ NewFD->setInvalidDecl();
+ return Redeclaration;
+ }
+ }
+ } else if (CXXConversionDecl *Conversion
+ = dyn_cast<CXXConversionDecl>(NewFD)) {
+ ActOnConversionDeclarator(Conversion);
+ }
+
+ // Find any virtual functions that this function overrides.
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(NewFD)) {
+ if (!Method->isFunctionTemplateSpecialization() &&
+ !Method->getDescribedFunctionTemplate() &&
+ Method->isCanonicalDecl()) {
+ if (AddOverriddenMethods(Method->getParent(), Method)) {
+ // If the function was marked as "static", we have a problem.
+ if (NewFD->getStorageClass() == SC_Static) {
+ ReportOverrides(*this, diag::err_static_overrides_virtual, Method);
+ }
+ }
+ }
+
+ if (Method->isStatic())
+ checkThisInStaticMemberFunctionType(Method);
+ }
+
+ // Extra checking for C++ overloaded operators (C++ [over.oper]).
+ if (NewFD->isOverloadedOperator() &&
+ CheckOverloadedOperatorDeclaration(NewFD)) {
+ NewFD->setInvalidDecl();
+ return Redeclaration;
+ }
+
+ // Extra checking for C++0x literal operators (C++0x [over.literal]).
+ if (NewFD->getLiteralIdentifier() &&
+ CheckLiteralOperatorDeclaration(NewFD)) {
+ NewFD->setInvalidDecl();
+ return Redeclaration;
+ }
+
+ // In C++, check default arguments now that we have merged decls. Unless
+ // the lexical context is the class, because in this case this is done
+ // during delayed parsing anyway.
+ if (!CurContext->isRecord())
+ CheckCXXDefaultArguments(NewFD);
+
+ // If this function declares a builtin function, check the type of this
+ // declaration against the expected type for the builtin.
+ if (unsigned BuiltinID = NewFD->getBuiltinID()) {
+ ASTContext::GetBuiltinTypeError Error;
+ LookupPredefedObjCSuperType(*this, S, NewFD->getIdentifier());
+ QualType T = Context.GetBuiltinType(BuiltinID, Error);
+ if (!T.isNull() && !Context.hasSameType(T, NewFD->getType())) {
+ // The type of this function differs from the type of the builtin,
+ // so forget about the builtin entirely.
+ Context.BuiltinInfo.forgetBuiltin(BuiltinID, Context.Idents);
+ }
+ }
+
+ // If this function is declared as being extern "C", then check to see if
+ // the function returns a UDT (class, struct, or union type) that is not C
+ // compatible, and if it does, warn the user.
+ // But, issue any diagnostic on the first declaration only.
+ if (Previous.empty() && NewFD->isExternC()) {
+ QualType R = NewFD->getReturnType();
+ if (R->isIncompleteType() && !R->isVoidType())
+ Diag(NewFD->getLocation(), diag::warn_return_value_udt_incomplete)
+ << NewFD << R;
+ else if (!R.isPODType(Context) && !R->isVoidType() &&
+ !R->isObjCObjectPointerType())
+ Diag(NewFD->getLocation(), diag::warn_return_value_udt) << NewFD << R;
+ }
+ }
+ return Redeclaration;
+}
+
+void Sema::CheckMain(FunctionDecl* FD, const DeclSpec& DS) {
+ // C++11 [basic.start.main]p3:
+ // A program that [...] declares main to be inline, static or
+ // constexpr is ill-formed.
+ // C11 6.7.4p4: In a hosted environment, no function specifier(s) shall
+ // appear in a declaration of main.
+ // static main is not an error under C99, but we should warn about it.
+ // We accept _Noreturn main as an extension.
+ if (FD->getStorageClass() == SC_Static)
+ Diag(DS.getStorageClassSpecLoc(), getLangOpts().CPlusPlus
+ ? diag::err_static_main : diag::warn_static_main)
+ << FixItHint::CreateRemoval(DS.getStorageClassSpecLoc());
+ if (FD->isInlineSpecified())
+ Diag(DS.getInlineSpecLoc(), diag::err_inline_main)
+ << FixItHint::CreateRemoval(DS.getInlineSpecLoc());
+ if (DS.isNoreturnSpecified()) {
+ SourceLocation NoreturnLoc = DS.getNoreturnSpecLoc();
+ SourceRange NoreturnRange(NoreturnLoc, getLocForEndOfToken(NoreturnLoc));
+ Diag(NoreturnLoc, diag::ext_noreturn_main);
+ Diag(NoreturnLoc, diag::note_main_remove_noreturn)
+ << FixItHint::CreateRemoval(NoreturnRange);
+ }
+ if (FD->isConstexpr()) {
+ Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_main)
+ << FixItHint::CreateRemoval(DS.getConstexprSpecLoc());
+ FD->setConstexpr(false);
+ }
+
+ if (getLangOpts().OpenCL) {
+ Diag(FD->getLocation(), diag::err_opencl_no_main)
+ << FD->hasAttr<OpenCLKernelAttr>();
+ FD->setInvalidDecl();
+ return;
+ }
+
+ QualType T = FD->getType();
+ assert(T->isFunctionType() && "function decl is not of function type");
+ const FunctionType* FT = T->castAs<FunctionType>();
+
+ if (getLangOpts().GNUMode && !getLangOpts().CPlusPlus) {
+ // In C with GNU extensions we allow main() to have non-integer return
+ // type, but we should warn about the extension, and we disable the
+ // implicit-return-zero rule.
+
+ // GCC in C mode accepts qualified 'int'.
+ if (Context.hasSameUnqualifiedType(FT->getReturnType(), Context.IntTy))
+ FD->setHasImplicitReturnZero(true);
+ else {
+ Diag(FD->getTypeSpecStartLoc(), diag::ext_main_returns_nonint);
+ SourceRange RTRange = FD->getReturnTypeSourceRange();
+ if (RTRange.isValid())
+ Diag(RTRange.getBegin(), diag::note_main_change_return_type)
+ << FixItHint::CreateReplacement(RTRange, "int");
+ }
+ } else {
+ // In C and C++, main magically returns 0 if you fall off the end;
+ // set the flag which tells us that.
+ // This is C++ [basic.start.main]p5 and C99 5.1.2.2.3.
+
+ // All the standards say that main() should return 'int'.
+ if (Context.hasSameType(FT->getReturnType(), Context.IntTy))
+ FD->setHasImplicitReturnZero(true);
+ else {
+ // Otherwise, this is just a flat-out error.
+ SourceRange RTRange = FD->getReturnTypeSourceRange();
+ Diag(FD->getTypeSpecStartLoc(), diag::err_main_returns_nonint)
+ << (RTRange.isValid() ? FixItHint::CreateReplacement(RTRange, "int")
+ : FixItHint());
+ FD->setInvalidDecl(true);
+ }
+ }
+
+ // Treat protoless main() as nullary.
+ if (isa<FunctionNoProtoType>(FT)) return;
+
+ const FunctionProtoType* FTP = cast<const FunctionProtoType>(FT);
+ unsigned nparams = FTP->getNumParams();
+ assert(FD->getNumParams() == nparams);
+
+ bool HasExtraParameters = (nparams > 3);
+
+ if (FTP->isVariadic()) {
+ Diag(FD->getLocation(), diag::ext_variadic_main);
+ // FIXME: if we had information about the location of the ellipsis, we
+ // could add a FixIt hint to remove it as a parameter.
+ }
+
+ // Darwin passes an undocumented fourth argument of type char**. If
+ // other platforms start sprouting these, the logic below will start
+ // getting shifty.
+ if (nparams == 4 && Context.getTargetInfo().getTriple().isOSDarwin())
+ HasExtraParameters = false;
+
+ if (HasExtraParameters) {
+ Diag(FD->getLocation(), diag::err_main_surplus_args) << nparams;
+ FD->setInvalidDecl(true);
+ nparams = 3;
+ }
+
+ // FIXME: a lot of the following diagnostics would be improved
+ // if we had some location information about types.
+
+ QualType CharPP =
+ Context.getPointerType(Context.getPointerType(Context.CharTy));
+ QualType Expected[] = { Context.IntTy, CharPP, CharPP, CharPP };
+
+ for (unsigned i = 0; i < nparams; ++i) {
+ QualType AT = FTP->getParamType(i);
+
+ bool mismatch = true;
+
+ if (Context.hasSameUnqualifiedType(AT, Expected[i]))
+ mismatch = false;
+ else if (Expected[i] == CharPP) {
+ // As an extension, the following forms are okay:
+ // char const **
+ // char const * const *
+ // char * const *
+
+ QualifierCollector qs;
+ const PointerType* PT;
+ if ((PT = qs.strip(AT)->getAs<PointerType>()) &&
+ (PT = qs.strip(PT->getPointeeType())->getAs<PointerType>()) &&
+ Context.hasSameType(QualType(qs.strip(PT->getPointeeType()), 0),
+ Context.CharTy)) {
+ qs.removeConst();
+ mismatch = !qs.empty();
+ }
+ }
+
+ if (mismatch) {
+ Diag(FD->getLocation(), diag::err_main_arg_wrong) << i << Expected[i];
+ // TODO: suggest replacing given type with expected type
+ FD->setInvalidDecl(true);
+ }
+ }
+
+ if (nparams == 1 && !FD->isInvalidDecl()) {
+ Diag(FD->getLocation(), diag::warn_main_one_arg);
+ }
+
+ if (!FD->isInvalidDecl() && FD->getDescribedFunctionTemplate()) {
+ Diag(FD->getLocation(), diag::err_mainlike_template_decl) << FD;
+ FD->setInvalidDecl();
+ }
+}
+
+void Sema::CheckMSVCRTEntryPoint(FunctionDecl *FD) {
+ QualType T = FD->getType();
+ assert(T->isFunctionType() && "function decl is not of function type");
+ const FunctionType *FT = T->castAs<FunctionType>();
+
+ // Set an implicit return of 'zero' if the function can return some integral,
+ // enumeration, pointer or nullptr type.
+ if (FT->getReturnType()->isIntegralOrEnumerationType() ||
+ FT->getReturnType()->isAnyPointerType() ||
+ FT->getReturnType()->isNullPtrType())
+ // DllMain is exempt because a return value of zero means it failed.
+ if (FD->getName() != "DllMain")
+ FD->setHasImplicitReturnZero(true);
+
+ if (!FD->isInvalidDecl() && FD->getDescribedFunctionTemplate()) {
+ Diag(FD->getLocation(), diag::err_mainlike_template_decl) << FD;
+ FD->setInvalidDecl();
+ }
+}
+
+bool Sema::CheckForConstantInitializer(Expr *Init, QualType DclT) {
+ // FIXME: Need strict checking. In C89, we need to check for
+ // any assignment, increment, decrement, function-calls, or
+ // commas outside of a sizeof. In C99, it's the same list,
+ // except that the aforementioned are allowed in unevaluated
+ // expressions. Everything else falls under the
+ // "may accept other forms of constant expressions" exception.
+ // (We never end up here for C++, so the constant expression
+ // rules there don't matter.)
+ const Expr *Culprit;
+ if (Init->isConstantInitializer(Context, false, &Culprit))
+ return false;
+ Diag(Culprit->getExprLoc(), diag::err_init_element_not_constant)
+ << Culprit->getSourceRange();
+ return true;
+}
+
+namespace {
+ // Visits an initialization expression to see if OrigDecl is evaluated in
+ // its own initialization and throws a warning if it does.
+ class SelfReferenceChecker
+ : public EvaluatedExprVisitor<SelfReferenceChecker> {
+ Sema &S;
+ Decl *OrigDecl;
+ bool isRecordType;
+ bool isPODType;
+ bool isReferenceType;
+
+ bool isInitList;
+ llvm::SmallVector<unsigned, 4> InitFieldIndex;
+ public:
+ typedef EvaluatedExprVisitor<SelfReferenceChecker> Inherited;
+
+ SelfReferenceChecker(Sema &S, Decl *OrigDecl) : Inherited(S.Context),
+ S(S), OrigDecl(OrigDecl) {
+ isPODType = false;
+ isRecordType = false;
+ isReferenceType = false;
+ isInitList = false;
+ if (ValueDecl *VD = dyn_cast<ValueDecl>(OrigDecl)) {
+ isPODType = VD->getType().isPODType(S.Context);
+ isRecordType = VD->getType()->isRecordType();
+ isReferenceType = VD->getType()->isReferenceType();
+ }
+ }
+
+ // For most expressions, just call the visitor. For initializer lists,
+ // track the index of the field being initialized since fields are
+ // initialized in order allowing use of previously initialized fields.
+ void CheckExpr(Expr *E) {
+ InitListExpr *InitList = dyn_cast<InitListExpr>(E);
+ if (!InitList) {
+ Visit(E);
+ return;
+ }
+
+ // Track and increment the index here.
+ isInitList = true;
+ InitFieldIndex.push_back(0);
+ for (auto Child : InitList->children()) {
+ CheckExpr(cast<Expr>(Child));
+ ++InitFieldIndex.back();
+ }
+ InitFieldIndex.pop_back();
+ }
+
+ // Returns true if MemberExpr is checked and no futher checking is needed.
+ // Returns false if additional checking is required.
+ bool CheckInitListMemberExpr(MemberExpr *E, bool CheckReference) {
+ llvm::SmallVector<FieldDecl*, 4> Fields;
+ Expr *Base = E;
+ bool ReferenceField = false;
+
+ // Get the field memebers used.
+ while (MemberExpr *ME = dyn_cast<MemberExpr>(Base)) {
+ FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl());
+ if (!FD)
+ return false;
+ Fields.push_back(FD);
+ if (FD->getType()->isReferenceType())
+ ReferenceField = true;
+ Base = ME->getBase()->IgnoreParenImpCasts();
+ }
+
+ // Keep checking only if the base Decl is the same.
+ DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base);
+ if (!DRE || DRE->getDecl() != OrigDecl)
+ return false;
+
+ // A reference field can be bound to an unininitialized field.
+ if (CheckReference && !ReferenceField)
+ return true;
+
+ // Convert FieldDecls to their index number.
+ llvm::SmallVector<unsigned, 4> UsedFieldIndex;
+ for (const FieldDecl *I : llvm::reverse(Fields))
+ UsedFieldIndex.push_back(I->getFieldIndex());
+
+ // See if a warning is needed by checking the first difference in index
+ // numbers. If field being used has index less than the field being
+ // initialized, then the use is safe.
+ for (auto UsedIter = UsedFieldIndex.begin(),
+ UsedEnd = UsedFieldIndex.end(),
+ OrigIter = InitFieldIndex.begin(),
+ OrigEnd = InitFieldIndex.end();
+ UsedIter != UsedEnd && OrigIter != OrigEnd; ++UsedIter, ++OrigIter) {
+ if (*UsedIter < *OrigIter)
+ return true;
+ if (*UsedIter > *OrigIter)
+ break;
+ }
+
+ // TODO: Add a different warning which will print the field names.
+ HandleDeclRefExpr(DRE);
+ return true;
+ }
+
+ // For most expressions, the cast is directly above the DeclRefExpr.
+ // For conditional operators, the cast can be outside the conditional
+ // operator if both expressions are DeclRefExpr's.
+ void HandleValue(Expr *E) {
+ E = E->IgnoreParens();
+ if (DeclRefExpr* DRE = dyn_cast<DeclRefExpr>(E)) {
+ HandleDeclRefExpr(DRE);
+ return;
+ }
+
+ if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
+ Visit(CO->getCond());
+ HandleValue(CO->getTrueExpr());
+ HandleValue(CO->getFalseExpr());
+ return;
+ }
+
+ if (BinaryConditionalOperator *BCO =
+ dyn_cast<BinaryConditionalOperator>(E)) {
+ Visit(BCO->getCond());
+ HandleValue(BCO->getFalseExpr());
+ return;
+ }
+
+ if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E)) {
+ HandleValue(OVE->getSourceExpr());
+ return;
+ }
+
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ if (BO->getOpcode() == BO_Comma) {
+ Visit(BO->getLHS());
+ HandleValue(BO->getRHS());
+ return;
+ }
+ }
+
+ if (isa<MemberExpr>(E)) {
+ if (isInitList) {
+ if (CheckInitListMemberExpr(cast<MemberExpr>(E),
+ false /*CheckReference*/))
+ return;
+ }
+
+ Expr *Base = E->IgnoreParenImpCasts();
+ while (MemberExpr *ME = dyn_cast<MemberExpr>(Base)) {
+ // Check for static member variables and don't warn on them.
+ if (!isa<FieldDecl>(ME->getMemberDecl()))
+ return;
+ Base = ME->getBase()->IgnoreParenImpCasts();
+ }
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base))
+ HandleDeclRefExpr(DRE);
+ return;
+ }
+
+ Visit(E);
+ }
+
+ // Reference types not handled in HandleValue are handled here since all
+ // uses of references are bad, not just r-value uses.
+ void VisitDeclRefExpr(DeclRefExpr *E) {
+ if (isReferenceType)
+ HandleDeclRefExpr(E);
+ }
+
+ void VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ if (E->getCastKind() == CK_LValueToRValue) {
+ HandleValue(E->getSubExpr());
+ return;
+ }
+
+ Inherited::VisitImplicitCastExpr(E);
+ }
+
+ void VisitMemberExpr(MemberExpr *E) {
+ if (isInitList) {
+ if (CheckInitListMemberExpr(E, true /*CheckReference*/))
+ return;
+ }
+
+ // Don't warn on arrays since they can be treated as pointers.
+ if (E->getType()->canDecayToPointerType()) return;
+
+ // Warn when a non-static method call is followed by non-static member
+ // field accesses, which is followed by a DeclRefExpr.
+ CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(E->getMemberDecl());
+ bool Warn = (MD && !MD->isStatic());
+ Expr *Base = E->getBase()->IgnoreParenImpCasts();
+ while (MemberExpr *ME = dyn_cast<MemberExpr>(Base)) {
+ if (!isa<FieldDecl>(ME->getMemberDecl()))
+ Warn = false;
+ Base = ME->getBase()->IgnoreParenImpCasts();
+ }
+
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
+ if (Warn)
+ HandleDeclRefExpr(DRE);
+ return;
+ }
+
+ // The base of a MemberExpr is not a MemberExpr or a DeclRefExpr.
+ // Visit that expression.
+ Visit(Base);
+ }
+
+ void VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
+ Expr *Callee = E->getCallee();
+
+ if (isa<UnresolvedLookupExpr>(Callee))
+ return Inherited::VisitCXXOperatorCallExpr(E);
+
+ Visit(Callee);
+ for (auto Arg: E->arguments())
+ HandleValue(Arg->IgnoreParenImpCasts());
+ }
+
+ void VisitUnaryOperator(UnaryOperator *E) {
+ // For POD record types, addresses of its own members are well-defined.
+ if (E->getOpcode() == UO_AddrOf && isRecordType &&
+ isa<MemberExpr>(E->getSubExpr()->IgnoreParens())) {
+ if (!isPODType)
+ HandleValue(E->getSubExpr());
+ return;
+ }
+
+ if (E->isIncrementDecrementOp()) {
+ HandleValue(E->getSubExpr());
+ return;
+ }
+
+ Inherited::VisitUnaryOperator(E);
+ }
+
+ void VisitObjCMessageExpr(ObjCMessageExpr *E) { return; }
+
+ void VisitCXXConstructExpr(CXXConstructExpr *E) {
+ if (E->getConstructor()->isCopyConstructor()) {
+ Expr *ArgExpr = E->getArg(0);
+ if (InitListExpr *ILE = dyn_cast<InitListExpr>(ArgExpr))
+ if (ILE->getNumInits() == 1)
+ ArgExpr = ILE->getInit(0);
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgExpr))
+ if (ICE->getCastKind() == CK_NoOp)
+ ArgExpr = ICE->getSubExpr();
+ HandleValue(ArgExpr);
+ return;
+ }
+ Inherited::VisitCXXConstructExpr(E);
+ }
+
+ void VisitCallExpr(CallExpr *E) {
+ // Treat std::move as a use.
+ if (E->getNumArgs() == 1) {
+ if (FunctionDecl *FD = E->getDirectCallee()) {
+ if (FD->isInStdNamespace() && FD->getIdentifier() &&
+ FD->getIdentifier()->isStr("move")) {
+ HandleValue(E->getArg(0));
+ return;
+ }
+ }
+ }
+
+ Inherited::VisitCallExpr(E);
+ }
+
+ void VisitBinaryOperator(BinaryOperator *E) {
+ if (E->isCompoundAssignmentOp()) {
+ HandleValue(E->getLHS());
+ Visit(E->getRHS());
+ return;
+ }
+
+ Inherited::VisitBinaryOperator(E);
+ }
+
+ // A custom visitor for BinaryConditionalOperator is needed because the
+ // regular visitor would check the condition and true expression separately
+ // but both point to the same place giving duplicate diagnostics.
+ void VisitBinaryConditionalOperator(BinaryConditionalOperator *E) {
+ Visit(E->getCond());
+ Visit(E->getFalseExpr());
+ }
+
+ void HandleDeclRefExpr(DeclRefExpr *DRE) {
+ Decl* ReferenceDecl = DRE->getDecl();
+ if (OrigDecl != ReferenceDecl) return;
+ unsigned diag;
+ if (isReferenceType) {
+ diag = diag::warn_uninit_self_reference_in_reference_init;
+ } else if (cast<VarDecl>(OrigDecl)->isStaticLocal()) {
+ diag = diag::warn_static_self_reference_in_init;
+ } else if (isa<TranslationUnitDecl>(OrigDecl->getDeclContext()) ||
+ isa<NamespaceDecl>(OrigDecl->getDeclContext()) ||
+ DRE->getDecl()->getType()->isRecordType()) {
+ diag = diag::warn_uninit_self_reference_in_init;
+ } else {
+ // Local variables will be handled by the CFG analysis.
+ return;
+ }
+
+ S.DiagRuntimeBehavior(DRE->getLocStart(), DRE,
+ S.PDiag(diag)
+ << DRE->getNameInfo().getName()
+ << OrigDecl->getLocation()
+ << DRE->getSourceRange());
+ }
+ };
+
+ /// CheckSelfReference - Warns if OrigDecl is used in expression E.
+ static void CheckSelfReference(Sema &S, Decl* OrigDecl, Expr *E,
+ bool DirectInit) {
+ // Parameters arguments are occassionially constructed with itself,
+ // for instance, in recursive functions. Skip them.
+ if (isa<ParmVarDecl>(OrigDecl))
+ return;
+
+ E = E->IgnoreParens();
+
+ // Skip checking T a = a where T is not a record or reference type.
+ // Doing so is a way to silence uninitialized warnings.
+ if (!DirectInit && !cast<VarDecl>(OrigDecl)->getType()->isRecordType())
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E))
+ if (ICE->getCastKind() == CK_LValueToRValue)
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr()))
+ if (DRE->getDecl() == OrigDecl)
+ return;
+
+ SelfReferenceChecker(S, OrigDecl).CheckExpr(E);
+ }
+}
+
+QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
+ DeclarationName Name, QualType Type,
+ TypeSourceInfo *TSI,
+ SourceRange Range, bool DirectInit,
+ Expr *Init) {
+ bool IsInitCapture = !VDecl;
+ assert((!VDecl || !VDecl->isInitCapture()) &&
+ "init captures are expected to be deduced prior to initialization");
+
+ ArrayRef<Expr *> DeduceInits = Init;
+ if (DirectInit) {
+ if (auto *PL = dyn_cast<ParenListExpr>(Init))
+ DeduceInits = PL->exprs();
+ else if (auto *IL = dyn_cast<InitListExpr>(Init))
+ DeduceInits = IL->inits();
+ }
+
+ // Deduction only works if we have exactly one source expression.
+ if (DeduceInits.empty()) {
+ // It isn't possible to write this directly, but it is possible to
+ // end up in this situation with "auto x(some_pack...);"
+ Diag(Init->getLocStart(), IsInitCapture
+ ? diag::err_init_capture_no_expression
+ : diag::err_auto_var_init_no_expression)
+ << Name << Type << Range;
+ return QualType();
+ }
+
+ if (DeduceInits.size() > 1) {
+ Diag(DeduceInits[1]->getLocStart(),
+ IsInitCapture ? diag::err_init_capture_multiple_expressions
+ : diag::err_auto_var_init_multiple_expressions)
+ << Name << Type << Range;
+ return QualType();
+ }
+
+ Expr *DeduceInit = DeduceInits[0];
+ if (DirectInit && isa<InitListExpr>(DeduceInit)) {
+ Diag(Init->getLocStart(), IsInitCapture
+ ? diag::err_init_capture_paren_braces
+ : diag::err_auto_var_init_paren_braces)
+ << isa<InitListExpr>(Init) << Name << Type << Range;
+ return QualType();
+ }
+
+ // Expressions default to 'id' when we're in a debugger.
+ bool DefaultedAnyToId = false;
+ if (getLangOpts().DebuggerCastResultToId &&
+ Init->getType() == Context.UnknownAnyTy && !IsInitCapture) {
+ ExprResult Result = forceUnknownAnyToType(Init, Context.getObjCIdType());
+ if (Result.isInvalid()) {
+ return QualType();
+ }
+ Init = Result.get();
+ DefaultedAnyToId = true;
+ }
+
+ QualType DeducedType;
+ if (DeduceAutoType(TSI, DeduceInit, DeducedType) == DAR_Failed) {
+ if (!IsInitCapture)
+ DiagnoseAutoDeductionFailure(VDecl, DeduceInit);
+ else if (isa<InitListExpr>(Init))
+ Diag(Range.getBegin(),
+ diag::err_init_capture_deduction_failure_from_init_list)
+ << Name
+ << (DeduceInit->getType().isNull() ? TSI->getType()
+ : DeduceInit->getType())
+ << DeduceInit->getSourceRange();
+ else
+ Diag(Range.getBegin(), diag::err_init_capture_deduction_failure)
+ << Name << TSI->getType()
+ << (DeduceInit->getType().isNull() ? TSI->getType()
+ : DeduceInit->getType())
+ << DeduceInit->getSourceRange();
+ }
+
+ // Warn if we deduced 'id'. 'auto' usually implies type-safety, but using
+ // 'id' instead of a specific object type prevents most of our usual
+ // checks.
+ // We only want to warn outside of template instantiations, though:
+ // inside a template, the 'id' could have come from a parameter.
+ if (ActiveTemplateInstantiations.empty() && !DefaultedAnyToId &&
+ !IsInitCapture && !DeducedType.isNull() && DeducedType->isObjCIdType()) {
+ SourceLocation Loc = TSI->getTypeLoc().getBeginLoc();
+ Diag(Loc, diag::warn_auto_var_is_id) << Name << Range;
+ }
+
+ return DeducedType;
+}
+
+/// AddInitializerToDecl - Adds the initializer Init to the
+/// declaration dcl. If DirectInit is true, this is C++ direct
+/// initialization rather than copy initialization.
+void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
+ bool DirectInit, bool TypeMayContainAuto) {
+ // If there is no declaration, there was an error parsing it. Just ignore
+ // the initializer.
+ if (!RealDecl || RealDecl->isInvalidDecl()) {
+ CorrectDelayedTyposInExpr(Init, dyn_cast_or_null<VarDecl>(RealDecl));
+ return;
+ }
+
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(RealDecl)) {
+ // Pure-specifiers are handled in ActOnPureSpecifier.
+ Diag(Method->getLocation(), diag::err_member_function_initialization)
+ << Method->getDeclName() << Init->getSourceRange();
+ Method->setInvalidDecl();
+ return;
+ }
+
+ VarDecl *VDecl = dyn_cast<VarDecl>(RealDecl);
+ if (!VDecl) {
+ assert(!isa<FieldDecl>(RealDecl) && "field init shouldn't get here");
+ Diag(RealDecl->getLocation(), diag::err_illegal_initializer);
+ RealDecl->setInvalidDecl();
+ return;
+ }
+
+ // C++11 [decl.spec.auto]p6. Deduce the type which 'auto' stands in for.
+ if (TypeMayContainAuto && VDecl->getType()->isUndeducedType()) {
+ // Attempt typo correction early so that the type of the init expression can
+ // be deduced based on the chosen correction if the original init contains a
+ // TypoExpr.
+ ExprResult Res = CorrectDelayedTyposInExpr(Init, VDecl);
+ if (!Res.isUsable()) {
+ RealDecl->setInvalidDecl();
+ return;
+ }
+ Init = Res.get();
+
+ QualType DeducedType = deduceVarTypeFromInitializer(
+ VDecl, VDecl->getDeclName(), VDecl->getType(),
+ VDecl->getTypeSourceInfo(), VDecl->getSourceRange(), DirectInit, Init);
+ if (DeducedType.isNull()) {
+ RealDecl->setInvalidDecl();
+ return;
+ }
+
+ VDecl->setType(DeducedType);
+ assert(VDecl->isLinkageValid());
+
+ // In ARC, infer lifetime.
+ if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(VDecl))
+ VDecl->setInvalidDecl();
+
+ // If this is a redeclaration, check that the type we just deduced matches
+ // the previously declared type.
+ if (VarDecl *Old = VDecl->getPreviousDecl()) {
+ // We never need to merge the type, because we cannot form an incomplete
+ // array of auto, nor deduce such a type.
+ MergeVarDeclTypes(VDecl, Old, /*MergeTypeWithPrevious*/ false);
+ }
+
+ // Check the deduced type is valid for a variable declaration.
+ CheckVariableDeclarationType(VDecl);
+ if (VDecl->isInvalidDecl())
+ return;
+ }
+
+ // dllimport cannot be used on variable definitions.
+ if (VDecl->hasAttr<DLLImportAttr>() && !VDecl->isStaticDataMember()) {
+ Diag(VDecl->getLocation(), diag::err_attribute_dllimport_data_definition);
+ VDecl->setInvalidDecl();
+ return;
+ }
+
+ if (VDecl->isLocalVarDecl() && VDecl->hasExternalStorage()) {
+ // C99 6.7.8p5. C++ has no such restriction, but that is a defect.
+ Diag(VDecl->getLocation(), diag::err_block_extern_cant_init);
+ VDecl->setInvalidDecl();
+ return;
+ }
+
+ if (!VDecl->getType()->isDependentType()) {
+ // A definition must end up with a complete type, which means it must be
+ // complete with the restriction that an array type might be completed by
+ // the initializer; note that later code assumes this restriction.
+ QualType BaseDeclType = VDecl->getType();
+ if (const ArrayType *Array = Context.getAsIncompleteArrayType(BaseDeclType))
+ BaseDeclType = Array->getElementType();
+ if (RequireCompleteType(VDecl->getLocation(), BaseDeclType,
+ diag::err_typecheck_decl_incomplete_type)) {
+ RealDecl->setInvalidDecl();
+ return;
+ }
+
+ // The variable can not have an abstract class type.
+ if (RequireNonAbstractType(VDecl->getLocation(), VDecl->getType(),
+ diag::err_abstract_type_in_decl,
+ AbstractVariableType))
+ VDecl->setInvalidDecl();
+ }
+
+ VarDecl *Def;
+ if ((Def = VDecl->getDefinition()) && Def != VDecl) {
+ NamedDecl *Hidden = nullptr;
+ if (!hasVisibleDefinition(Def, &Hidden) &&
+ (VDecl->getFormalLinkage() == InternalLinkage ||
+ VDecl->getDescribedVarTemplate() ||
+ VDecl->getNumTemplateParameterLists() ||
+ VDecl->getDeclContext()->isDependentContext())) {
+ // The previous definition is hidden, and multiple definitions are
+ // permitted (in separate TUs). Form another definition of it.
+ } else {
+ Diag(VDecl->getLocation(), diag::err_redefinition)
+ << VDecl->getDeclName();
+ Diag(Def->getLocation(), diag::note_previous_definition);
+ VDecl->setInvalidDecl();
+ return;
+ }
+ }
+
+ if (getLangOpts().CPlusPlus) {
+ // C++ [class.static.data]p4
+ // If a static data member is of const integral or const
+ // enumeration type, its declaration in the class definition can
+ // specify a constant-initializer which shall be an integral
+ // constant expression (5.19). In that case, the member can appear
+ // in integral constant expressions. The member shall still be
+ // defined in a namespace scope if it is used in the program and the
+ // namespace scope definition shall not contain an initializer.
+ //
+ // We already performed a redefinition check above, but for static
+ // data members we also need to check whether there was an in-class
+ // declaration with an initializer.
+ if (VDecl->isStaticDataMember() && VDecl->getCanonicalDecl()->hasInit()) {
+ Diag(Init->getExprLoc(), diag::err_static_data_member_reinitialization)
+ << VDecl->getDeclName();
+ Diag(VDecl->getCanonicalDecl()->getInit()->getExprLoc(),
+ diag::note_previous_initializer)
+ << 0;
+ return;
+ }
+
+ if (VDecl->hasLocalStorage())
+ getCurFunction()->setHasBranchProtectedScope();
+
+ if (DiagnoseUnexpandedParameterPack(Init, UPPC_Initializer)) {
+ VDecl->setInvalidDecl();
+ return;
+ }
+ }
+
+ // OpenCL 1.1 6.5.2: "Variables allocated in the __local address space inside
+ // a kernel function cannot be initialized."
+ if (VDecl->getType().getAddressSpace() == LangAS::opencl_local) {
+ Diag(VDecl->getLocation(), diag::err_local_cant_init);
+ VDecl->setInvalidDecl();
+ return;
+ }
+
+ // Get the decls type and save a reference for later, since
+ // CheckInitializerTypes may change it.
+ QualType DclT = VDecl->getType(), SavT = DclT;
+
+ // Expressions default to 'id' when we're in a debugger
+ // and we are assigning it to a variable of Objective-C pointer type.
+ if (getLangOpts().DebuggerCastResultToId && DclT->isObjCObjectPointerType() &&
+ Init->getType() == Context.UnknownAnyTy) {
+ ExprResult Result = forceUnknownAnyToType(Init, Context.getObjCIdType());
+ if (Result.isInvalid()) {
+ VDecl->setInvalidDecl();
+ return;
+ }
+ Init = Result.get();
+ }
+
+ // Perform the initialization.
+ ParenListExpr *CXXDirectInit = dyn_cast<ParenListExpr>(Init);
+ if (!VDecl->isInvalidDecl()) {
+ InitializedEntity Entity = InitializedEntity::InitializeVariable(VDecl);
+ InitializationKind Kind =
+ DirectInit
+ ? CXXDirectInit
+ ? InitializationKind::CreateDirect(VDecl->getLocation(),
+ Init->getLocStart(),
+ Init->getLocEnd())
+ : InitializationKind::CreateDirectList(VDecl->getLocation())
+ : InitializationKind::CreateCopy(VDecl->getLocation(),
+ Init->getLocStart());
+
+ MultiExprArg Args = Init;
+ if (CXXDirectInit)
+ Args = MultiExprArg(CXXDirectInit->getExprs(),
+ CXXDirectInit->getNumExprs());
+
+ // Try to correct any TypoExprs in the initialization arguments.
+ for (size_t Idx = 0; Idx < Args.size(); ++Idx) {
+ ExprResult Res = CorrectDelayedTyposInExpr(
+ Args[Idx], VDecl, [this, Entity, Kind](Expr *E) {
+ InitializationSequence Init(*this, Entity, Kind, MultiExprArg(E));
+ return Init.Failed() ? ExprError() : E;
+ });
+ if (Res.isInvalid()) {
+ VDecl->setInvalidDecl();
+ } else if (Res.get() != Args[Idx]) {
+ Args[Idx] = Res.get();
+ }
+ }
+ if (VDecl->isInvalidDecl())
+ return;
+
+ InitializationSequence InitSeq(*this, Entity, Kind, Args);
+ ExprResult Result = InitSeq.Perform(*this, Entity, Kind, Args, &DclT);
+ if (Result.isInvalid()) {
+ VDecl->setInvalidDecl();
+ return;
+ }
+
+ Init = Result.getAs<Expr>();
+ }
+
+ // Check for self-references within variable initializers.
+ // Variables declared within a function/method body (except for references)
+ // are handled by a dataflow analysis.
+ if (!VDecl->hasLocalStorage() || VDecl->getType()->isRecordType() ||
+ VDecl->getType()->isReferenceType()) {
+ CheckSelfReference(*this, RealDecl, Init, DirectInit);
+ }
+
+ // If the type changed, it means we had an incomplete type that was
+ // completed by the initializer. For example:
+ // int ary[] = { 1, 3, 5 };
+ // "ary" transitions from an IncompleteArrayType to a ConstantArrayType.
+ if (!VDecl->isInvalidDecl() && (DclT != SavT))
+ VDecl->setType(DclT);
+
+ if (!VDecl->isInvalidDecl()) {
+ checkUnsafeAssigns(VDecl->getLocation(), VDecl->getType(), Init);
+
+ if (VDecl->hasAttr<BlocksAttr>())
+ checkRetainCycles(VDecl, Init);
+
+ // It is safe to assign a weak reference into a strong variable.
+ // Although this code can still have problems:
+ // id x = self.weakProp;
+ // id y = self.weakProp;
+ // we do not warn to warn spuriously when 'x' and 'y' are on separate
+ // paths through the function. This should be revisited if
+ // -Wrepeated-use-of-weak is made flow-sensitive.
+ if (VDecl->getType().getObjCLifetime() == Qualifiers::OCL_Strong &&
+ !Diags.isIgnored(diag::warn_arc_repeated_use_of_weak,
+ Init->getLocStart()))
+ getCurFunction()->markSafeWeakUse(Init);
+ }
+
+ // The initialization is usually a full-expression.
+ //
+ // FIXME: If this is a braced initialization of an aggregate, it is not
+ // an expression, and each individual field initializer is a separate
+ // full-expression. For instance, in:
+ //
+ // struct Temp { ~Temp(); };
+ // struct S { S(Temp); };
+ // struct T { S a, b; } t = { Temp(), Temp() }
+ //
+ // we should destroy the first Temp before constructing the second.
+ ExprResult Result = ActOnFinishFullExpr(Init, VDecl->getLocation(),
+ false,
+ VDecl->isConstexpr());
+ if (Result.isInvalid()) {
+ VDecl->setInvalidDecl();
+ return;
+ }
+ Init = Result.get();
+
+ // Attach the initializer to the decl.
+ VDecl->setInit(Init);
+
+ if (VDecl->isLocalVarDecl()) {
+ // C99 6.7.8p4: All the expressions in an initializer for an object that has
+ // static storage duration shall be constant expressions or string literals.
+ // C++ does not have this restriction.
+ if (!getLangOpts().CPlusPlus && !VDecl->isInvalidDecl()) {
+ const Expr *Culprit;
+ if (VDecl->getStorageClass() == SC_Static)
+ CheckForConstantInitializer(Init, DclT);
+ // C89 is stricter than C99 for non-static aggregate types.
+ // C89 6.5.7p3: All the expressions [...] in an initializer list
+ // for an object that has aggregate or union type shall be
+ // constant expressions.
+ else if (!getLangOpts().C99 && VDecl->getType()->isAggregateType() &&
+ isa<InitListExpr>(Init) &&
+ !Init->isConstantInitializer(Context, false, &Culprit))
+ Diag(Culprit->getExprLoc(),
+ diag::ext_aggregate_init_not_constant)
+ << Culprit->getSourceRange();
+ }
+ } else if (VDecl->isStaticDataMember() &&
+ VDecl->getLexicalDeclContext()->isRecord()) {
+ // This is an in-class initialization for a static data member, e.g.,
+ //
+ // struct S {
+ // static const int value = 17;
+ // };
+
+ // C++ [class.mem]p4:
+ // A member-declarator can contain a constant-initializer only
+ // if it declares a static member (9.4) of const integral or
+ // const enumeration type, see 9.4.2.
+ //
+ // C++11 [class.static.data]p3:
+ // If a non-volatile const static data member is of integral or
+ // enumeration type, its declaration in the class definition can
+ // specify a brace-or-equal-initializer in which every initalizer-clause
+ // that is an assignment-expression is a constant expression. A static
+ // data member of literal type can be declared in the class definition
+ // with the constexpr specifier; if so, its declaration shall specify a
+ // brace-or-equal-initializer in which every initializer-clause that is
+ // an assignment-expression is a constant expression.
+
+ // Do nothing on dependent types.
+ if (DclT->isDependentType()) {
+
+ // Allow any 'static constexpr' members, whether or not they are of literal
+ // type. We separately check that every constexpr variable is of literal
+ // type.
+ } else if (VDecl->isConstexpr()) {
+
+ // Require constness.
+ } else if (!DclT.isConstQualified()) {
+ Diag(VDecl->getLocation(), diag::err_in_class_initializer_non_const)
+ << Init->getSourceRange();
+ VDecl->setInvalidDecl();
+
+ // We allow integer constant expressions in all cases.
+ } else if (DclT->isIntegralOrEnumerationType()) {
+ // Check whether the expression is a constant expression.
+ SourceLocation Loc;
+ if (getLangOpts().CPlusPlus11 && DclT.isVolatileQualified())
+ // In C++11, a non-constexpr const static data member with an
+ // in-class initializer cannot be volatile.
+ Diag(VDecl->getLocation(), diag::err_in_class_initializer_volatile);
+ else if (Init->isValueDependent())
+ ; // Nothing to check.
+ else if (Init->isIntegerConstantExpr(Context, &Loc))
+ ; // Ok, it's an ICE!
+ else if (Init->isEvaluatable(Context)) {
+ // If we can constant fold the initializer through heroics, accept it,
+ // but report this as a use of an extension for -pedantic.
+ Diag(Loc, diag::ext_in_class_initializer_non_constant)
+ << Init->getSourceRange();
+ } else {
+ // Otherwise, this is some crazy unknown case. Report the issue at the
+ // location provided by the isIntegerConstantExpr failed check.
+ Diag(Loc, diag::err_in_class_initializer_non_constant)
+ << Init->getSourceRange();
+ VDecl->setInvalidDecl();
+ }
+
+ // We allow foldable floating-point constants as an extension.
+ } else if (DclT->isFloatingType()) { // also permits complex, which is ok
+ // In C++98, this is a GNU extension. In C++11, it is not, but we support
+ // it anyway and provide a fixit to add the 'constexpr'.
+ if (getLangOpts().CPlusPlus11) {
+ Diag(VDecl->getLocation(),
+ diag::ext_in_class_initializer_float_type_cxx11)
+ << DclT << Init->getSourceRange();
+ Diag(VDecl->getLocStart(),
+ diag::note_in_class_initializer_float_type_cxx11)
+ << FixItHint::CreateInsertion(VDecl->getLocStart(), "constexpr ");
+ } else {
+ Diag(VDecl->getLocation(), diag::ext_in_class_initializer_float_type)
+ << DclT << Init->getSourceRange();
+
+ if (!Init->isValueDependent() && !Init->isEvaluatable(Context)) {
+ Diag(Init->getExprLoc(), diag::err_in_class_initializer_non_constant)
+ << Init->getSourceRange();
+ VDecl->setInvalidDecl();
+ }
+ }
+
+ // Suggest adding 'constexpr' in C++11 for literal types.
+ } else if (getLangOpts().CPlusPlus11 && DclT->isLiteralType(Context)) {
+ Diag(VDecl->getLocation(), diag::err_in_class_initializer_literal_type)
+ << DclT << Init->getSourceRange()
+ << FixItHint::CreateInsertion(VDecl->getLocStart(), "constexpr ");
+ VDecl->setConstexpr(true);
+
+ } else {
+ Diag(VDecl->getLocation(), diag::err_in_class_initializer_bad_type)
+ << DclT << Init->getSourceRange();
+ VDecl->setInvalidDecl();
+ }
+ } else if (VDecl->isFileVarDecl()) {
+ if (VDecl->getStorageClass() == SC_Extern &&
+ (!getLangOpts().CPlusPlus ||
+ !(Context.getBaseElementType(VDecl->getType()).isConstQualified() ||
+ VDecl->isExternC())) &&
+ !isTemplateInstantiation(VDecl->getTemplateSpecializationKind()))
+ Diag(VDecl->getLocation(), diag::warn_extern_init);
+
+ // C99 6.7.8p4. All file scoped initializers need to be constant.
+ if (!getLangOpts().CPlusPlus && !VDecl->isInvalidDecl())
+ CheckForConstantInitializer(Init, DclT);
+ }
+
+ // We will represent direct-initialization similarly to copy-initialization:
+ // int x(1); -as-> int x = 1;
+ // ClassType x(a,b,c); -as-> ClassType x = ClassType(a,b,c);
+ //
+ // Clients that want to distinguish between the two forms, can check for
+ // direct initializer using VarDecl::getInitStyle().
+ // A major benefit is that clients that don't particularly care about which
+ // exactly form was it (like the CodeGen) can handle both cases without
+ // special case code.
+
+ // C++ 8.5p11:
+ // The form of initialization (using parentheses or '=') is generally
+ // insignificant, but does matter when the entity being initialized has a
+ // class type.
+ if (CXXDirectInit) {
+ assert(DirectInit && "Call-style initializer must be direct init.");
+ VDecl->setInitStyle(VarDecl::CallInit);
+ } else if (DirectInit) {
+ // This must be list-initialization. No other way is direct-initialization.
+ VDecl->setInitStyle(VarDecl::ListInit);
+ }
+
+ CheckCompleteVariableDeclaration(VDecl);
+}
+
+/// ActOnInitializerError - Given that there was an error parsing an
+/// initializer for the given declaration, try to return to some form
+/// of sanity.
+void Sema::ActOnInitializerError(Decl *D) {
+ // Our main concern here is re-establishing invariants like "a
+ // variable's type is either dependent or complete".
+ if (!D || D->isInvalidDecl()) return;
+
+ VarDecl *VD = dyn_cast<VarDecl>(D);
+ if (!VD) return;
+
+ // Auto types are meaningless if we can't make sense of the initializer.
+ if (ParsingInitForAutoVars.count(D)) {
+ D->setInvalidDecl();
+ return;
+ }
+
+ QualType Ty = VD->getType();
+ if (Ty->isDependentType()) return;
+
+ // Require a complete type.
+ if (RequireCompleteType(VD->getLocation(),
+ Context.getBaseElementType(Ty),
+ diag::err_typecheck_decl_incomplete_type)) {
+ VD->setInvalidDecl();
+ return;
+ }
+
+ // Require a non-abstract type.
+ if (RequireNonAbstractType(VD->getLocation(), Ty,
+ diag::err_abstract_type_in_decl,
+ AbstractVariableType)) {
+ VD->setInvalidDecl();
+ return;
+ }
+
+ // Don't bother complaining about constructors or destructors,
+ // though.
+}
+
+void Sema::ActOnUninitializedDecl(Decl *RealDecl,
+ bool TypeMayContainAuto) {
+ // If there is no declaration, there was an error parsing it. Just ignore it.
+ if (!RealDecl)
+ return;
+
+ if (VarDecl *Var = dyn_cast<VarDecl>(RealDecl)) {
+ QualType Type = Var->getType();
+
+ // C++11 [dcl.spec.auto]p3
+ if (TypeMayContainAuto && Type->getContainedAutoType()) {
+ Diag(Var->getLocation(), diag::err_auto_var_requires_init)
+ << Var->getDeclName() << Type;
+ Var->setInvalidDecl();
+ return;
+ }
+
+ // C++11 [class.static.data]p3: A static data member can be declared with
+ // the constexpr specifier; if so, its declaration shall specify
+ // a brace-or-equal-initializer.
+ // C++11 [dcl.constexpr]p1: The constexpr specifier shall be applied only to
+ // the definition of a variable [...] or the declaration of a static data
+ // member.
+ if (Var->isConstexpr() && !Var->isThisDeclarationADefinition()) {
+ if (Var->isStaticDataMember())
+ Diag(Var->getLocation(),
+ diag::err_constexpr_static_mem_var_requires_init)
+ << Var->getDeclName();
+ else
+ Diag(Var->getLocation(), diag::err_invalid_constexpr_var_decl);
+ Var->setInvalidDecl();
+ return;
+ }
+
+ // C++ Concepts TS [dcl.spec.concept]p1: [...] A variable template
+ // definition having the concept specifier is called a variable concept. A
+ // concept definition refers to [...] a variable concept and its initializer.
+ if (Var->isConcept()) {
+ Diag(Var->getLocation(), diag::err_var_concept_not_initialized);
+ Var->setInvalidDecl();
+ return;
+ }
+
+ // OpenCL v1.1 s6.5.3: variables declared in the constant address space must
+ // be initialized.
+ if (!Var->isInvalidDecl() &&
+ Var->getType().getAddressSpace() == LangAS::opencl_constant &&
+ Var->getStorageClass() != SC_Extern && !Var->getInit()) {
+ Diag(Var->getLocation(), diag::err_opencl_constant_no_init);
+ Var->setInvalidDecl();
+ return;
+ }
+
+ switch (Var->isThisDeclarationADefinition()) {
+ case VarDecl::Definition:
+ if (!Var->isStaticDataMember() || !Var->getAnyInitializer())
+ break;
+
+ // We have an out-of-line definition of a static data member
+ // that has an in-class initializer, so we type-check this like
+ // a declaration.
+ //
+ // Fall through
+
+ case VarDecl::DeclarationOnly:
+ // It's only a declaration.
+
+ // Block scope. C99 6.7p7: If an identifier for an object is
+ // declared with no linkage (C99 6.2.2p6), the type for the
+ // object shall be complete.
+ if (!Type->isDependentType() && Var->isLocalVarDecl() &&
+ !Var->hasLinkage() && !Var->isInvalidDecl() &&
+ RequireCompleteType(Var->getLocation(), Type,
+ diag::err_typecheck_decl_incomplete_type))
+ Var->setInvalidDecl();
+
+ // Make sure that the type is not abstract.
+ if (!Type->isDependentType() && !Var->isInvalidDecl() &&
+ RequireNonAbstractType(Var->getLocation(), Type,
+ diag::err_abstract_type_in_decl,
+ AbstractVariableType))
+ Var->setInvalidDecl();
+ if (!Type->isDependentType() && !Var->isInvalidDecl() &&
+ Var->getStorageClass() == SC_PrivateExtern) {
+ Diag(Var->getLocation(), diag::warn_private_extern);
+ Diag(Var->getLocation(), diag::note_private_extern);
+ }
+
+ return;
+
+ case VarDecl::TentativeDefinition:
+ // File scope. C99 6.9.2p2: A declaration of an identifier for an
+ // object that has file scope without an initializer, and without a
+ // storage-class specifier or with the storage-class specifier "static",
+ // constitutes a tentative definition. Note: A tentative definition with
+ // external linkage is valid (C99 6.2.2p5).
+ if (!Var->isInvalidDecl()) {
+ if (const IncompleteArrayType *ArrayT
+ = Context.getAsIncompleteArrayType(Type)) {
+ if (RequireCompleteType(Var->getLocation(),
+ ArrayT->getElementType(),
+ diag::err_illegal_decl_array_incomplete_type))
+ Var->setInvalidDecl();
+ } else if (Var->getStorageClass() == SC_Static) {
+ // C99 6.9.2p3: If the declaration of an identifier for an object is
+ // a tentative definition and has internal linkage (C99 6.2.2p3), the
+ // declared type shall not be an incomplete type.
+ // NOTE: code such as the following
+ // static struct s;
+ // struct s { int a; };
+ // is accepted by gcc. Hence here we issue a warning instead of
+ // an error and we do not invalidate the static declaration.
+ // NOTE: to avoid multiple warnings, only check the first declaration.
+ if (Var->isFirstDecl())
+ RequireCompleteType(Var->getLocation(), Type,
+ diag::ext_typecheck_decl_incomplete_type);
+ }
+ }
+
+ // Record the tentative definition; we're done.
+ if (!Var->isInvalidDecl())
+ TentativeDefinitions.push_back(Var);
+ return;
+ }
+
+ // Provide a specific diagnostic for uninitialized variable
+ // definitions with incomplete array type.
+ if (Type->isIncompleteArrayType()) {
+ Diag(Var->getLocation(),
+ diag::err_typecheck_incomplete_array_needs_initializer);
+ Var->setInvalidDecl();
+ return;
+ }
+
+ // Provide a specific diagnostic for uninitialized variable
+ // definitions with reference type.
+ if (Type->isReferenceType()) {
+ Diag(Var->getLocation(), diag::err_reference_var_requires_init)
+ << Var->getDeclName()
+ << SourceRange(Var->getLocation(), Var->getLocation());
+ Var->setInvalidDecl();
+ return;
+ }
+
+ // Do not attempt to type-check the default initializer for a
+ // variable with dependent type.
+ if (Type->isDependentType())
+ return;
+
+ if (Var->isInvalidDecl())
+ return;
+
+ if (!Var->hasAttr<AliasAttr>()) {
+ if (RequireCompleteType(Var->getLocation(),
+ Context.getBaseElementType(Type),
+ diag::err_typecheck_decl_incomplete_type)) {
+ Var->setInvalidDecl();
+ return;
+ }
+ } else {
+ return;
+ }
+
+ // The variable can not have an abstract class type.
+ if (RequireNonAbstractType(Var->getLocation(), Type,
+ diag::err_abstract_type_in_decl,
+ AbstractVariableType)) {
+ Var->setInvalidDecl();
+ return;
+ }
+
+ // Check for jumps past the implicit initializer. C++0x
+ // clarifies that this applies to a "variable with automatic
+ // storage duration", not a "local variable".
+ // C++11 [stmt.dcl]p3
+ // A program that jumps from a point where a variable with automatic
+ // storage duration is not in scope to a point where it is in scope is
+ // ill-formed unless the variable has scalar type, class type with a
+ // trivial default constructor and a trivial destructor, a cv-qualified
+ // version of one of these types, or an array of one of the preceding
+ // types and is declared without an initializer.
+ if (getLangOpts().CPlusPlus && Var->hasLocalStorage()) {
+ if (const RecordType *Record
+ = Context.getBaseElementType(Type)->getAs<RecordType>()) {
+ CXXRecordDecl *CXXRecord = cast<CXXRecordDecl>(Record->getDecl());
+ // Mark the function for further checking even if the looser rules of
+ // C++11 do not require such checks, so that we can diagnose
+ // incompatibilities with C++98.
+ if (!CXXRecord->isPOD())
+ getCurFunction()->setHasBranchProtectedScope();
+ }
+ }
+
+ // C++03 [dcl.init]p9:
+ // If no initializer is specified for an object, and the
+ // object is of (possibly cv-qualified) non-POD class type (or
+ // array thereof), the object shall be default-initialized; if
+ // the object is of const-qualified type, the underlying class
+ // type shall have a user-declared default
+ // constructor. Otherwise, if no initializer is specified for
+ // a non- static object, the object and its subobjects, if
+ // any, have an indeterminate initial value); if the object
+ // or any of its subobjects are of const-qualified type, the
+ // program is ill-formed.
+ // C++0x [dcl.init]p11:
+ // If no initializer is specified for an object, the object is
+ // default-initialized; [...].
+ InitializedEntity Entity = InitializedEntity::InitializeVariable(Var);
+ InitializationKind Kind
+ = InitializationKind::CreateDefault(Var->getLocation());
+
+ InitializationSequence InitSeq(*this, Entity, Kind, None);
+ ExprResult Init = InitSeq.Perform(*this, Entity, Kind, None);
+ if (Init.isInvalid())
+ Var->setInvalidDecl();
+ else if (Init.get()) {
+ Var->setInit(MaybeCreateExprWithCleanups(Init.get()));
+ // This is important for template substitution.
+ Var->setInitStyle(VarDecl::CallInit);
+ }
+
+ CheckCompleteVariableDeclaration(Var);
+ }
+}
+
+void Sema::ActOnCXXForRangeDecl(Decl *D) {
+ VarDecl *VD = dyn_cast<VarDecl>(D);
+ if (!VD) {
+ Diag(D->getLocation(), diag::err_for_range_decl_must_be_var);
+ D->setInvalidDecl();
+ return;
+ }
+
+ VD->setCXXForRangeDecl(true);
+
+ // for-range-declaration cannot be given a storage class specifier.
+ int Error = -1;
+ switch (VD->getStorageClass()) {
+ case SC_None:
+ break;
+ case SC_Extern:
+ Error = 0;
+ break;
+ case SC_Static:
+ Error = 1;
+ break;
+ case SC_PrivateExtern:
+ Error = 2;
+ break;
+ case SC_Auto:
+ Error = 3;
+ break;
+ case SC_Register:
+ Error = 4;
+ break;
+ }
+ if (Error != -1) {
+ Diag(VD->getOuterLocStart(), diag::err_for_range_storage_class)
+ << VD->getDeclName() << Error;
+ D->setInvalidDecl();
+ }
+}
+
+StmtResult
+Sema::ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
+ IdentifierInfo *Ident,
+ ParsedAttributes &Attrs,
+ SourceLocation AttrEnd) {
+ // C++1y [stmt.iter]p1:
+ // A range-based for statement of the form
+ // for ( for-range-identifier : for-range-initializer ) statement
+ // is equivalent to
+ // for ( auto&& for-range-identifier : for-range-initializer ) statement
+ DeclSpec DS(Attrs.getPool().getFactory());
+
+ const char *PrevSpec;
+ unsigned DiagID;
+ DS.SetTypeSpecType(DeclSpec::TST_auto, IdentLoc, PrevSpec, DiagID,
+ getPrintingPolicy());
+
+ Declarator D(DS, Declarator::ForContext);
+ D.SetIdentifier(Ident, IdentLoc);
+ D.takeAttributes(Attrs, AttrEnd);
+
+ ParsedAttributes EmptyAttrs(Attrs.getPool().getFactory());
+ D.AddTypeInfo(DeclaratorChunk::getReference(0, IdentLoc, /*lvalue*/false),
+ EmptyAttrs, IdentLoc);
+ Decl *Var = ActOnDeclarator(S, D);
+ cast<VarDecl>(Var)->setCXXForRangeDecl(true);
+ FinalizeDeclaration(Var);
+ return ActOnDeclStmt(FinalizeDeclaratorGroup(S, DS, Var), IdentLoc,
+ AttrEnd.isValid() ? AttrEnd : IdentLoc);
+}
+
+void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
+ if (var->isInvalidDecl()) return;
+
+ // In Objective-C, don't allow jumps past the implicit initialization of a
+ // local retaining variable.
+ if (getLangOpts().ObjC1 &&
+ var->hasLocalStorage()) {
+ switch (var->getType().getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Strong:
+ getCurFunction()->setHasBranchProtectedScope();
+ break;
+ }
+ }
+
+ // Warn about externally-visible variables being defined without a
+ // prior declaration. We only want to do this for global
+ // declarations, but we also specifically need to avoid doing it for
+ // class members because the linkage of an anonymous class can
+ // change if it's later given a typedef name.
+ if (var->isThisDeclarationADefinition() &&
+ var->getDeclContext()->getRedeclContext()->isFileContext() &&
+ var->isExternallyVisible() && var->hasLinkage() &&
+ !getDiagnostics().isIgnored(diag::warn_missing_variable_declarations,
+ var->getLocation())) {
+ // Find a previous declaration that's not a definition.
+ VarDecl *prev = var->getPreviousDecl();
+ while (prev && prev->isThisDeclarationADefinition())
+ prev = prev->getPreviousDecl();
+
+ if (!prev)
+ Diag(var->getLocation(), diag::warn_missing_variable_declarations) << var;
+ }
+
+ if (var->getTLSKind() == VarDecl::TLS_Static) {
+ const Expr *Culprit;
+ if (var->getType().isDestructedType()) {
+ // GNU C++98 edits for __thread, [basic.start.term]p3:
+ // The type of an object with thread storage duration shall not
+ // have a non-trivial destructor.
+ Diag(var->getLocation(), diag::err_thread_nontrivial_dtor);
+ if (getLangOpts().CPlusPlus11)
+ Diag(var->getLocation(), diag::note_use_thread_local);
+ } else if (getLangOpts().CPlusPlus && var->hasInit() &&
+ !var->getInit()->isConstantInitializer(
+ Context, var->getType()->isReferenceType(), &Culprit)) {
+ // GNU C++98 edits for __thread, [basic.start.init]p4:
+ // An object of thread storage duration shall not require dynamic
+ // initialization.
+ // FIXME: Need strict checking here.
+ Diag(Culprit->getExprLoc(), diag::err_thread_dynamic_init)
+ << Culprit->getSourceRange();
+ if (getLangOpts().CPlusPlus11)
+ Diag(var->getLocation(), diag::note_use_thread_local);
+ }
+
+ }
+
+ // Apply section attributes and pragmas to global variables.
+ bool GlobalStorage = var->hasGlobalStorage();
+ if (GlobalStorage && var->isThisDeclarationADefinition() &&
+ ActiveTemplateInstantiations.empty()) {
+ PragmaStack<StringLiteral *> *Stack = nullptr;
+ int SectionFlags = ASTContext::PSF_Implicit | ASTContext::PSF_Read;
+ if (var->getType().isConstQualified())
+ Stack = &ConstSegStack;
+ else if (!var->getInit()) {
+ Stack = &BSSSegStack;
+ SectionFlags |= ASTContext::PSF_Write;
+ } else {
+ Stack = &DataSegStack;
+ SectionFlags |= ASTContext::PSF_Write;
+ }
+ if (Stack->CurrentValue && !var->hasAttr<SectionAttr>()) {
+ var->addAttr(SectionAttr::CreateImplicit(
+ Context, SectionAttr::Declspec_allocate,
+ Stack->CurrentValue->getString(), Stack->CurrentPragmaLocation));
+ }
+ if (const SectionAttr *SA = var->getAttr<SectionAttr>())
+ if (UnifySection(SA->getName(), SectionFlags, var))
+ var->dropAttr<SectionAttr>();
+
+ // Apply the init_seg attribute if this has an initializer. If the
+ // initializer turns out to not be dynamic, we'll end up ignoring this
+ // attribute.
+ if (CurInitSeg && var->getInit())
+ var->addAttr(InitSegAttr::CreateImplicit(Context, CurInitSeg->getString(),
+ CurInitSegLoc));
+ }
+
+ // All the following checks are C++ only.
+ if (!getLangOpts().CPlusPlus) return;
+
+ QualType type = var->getType();
+ if (type->isDependentType()) return;
+
+ // __block variables might require us to capture a copy-initializer.
+ if (var->hasAttr<BlocksAttr>()) {
+ // It's currently invalid to ever have a __block variable with an
+ // array type; should we diagnose that here?
+
+ // Regardless, we don't want to ignore array nesting when
+ // constructing this copy.
+ if (type->isStructureOrClassType()) {
+ EnterExpressionEvaluationContext scope(*this, PotentiallyEvaluated);
+ SourceLocation poi = var->getLocation();
+ Expr *varRef =new (Context) DeclRefExpr(var, false, type, VK_LValue, poi);
+ ExprResult result
+ = PerformMoveOrCopyInitialization(
+ InitializedEntity::InitializeBlock(poi, type, false),
+ var, var->getType(), varRef, /*AllowNRVO=*/true);
+ if (!result.isInvalid()) {
+ result = MaybeCreateExprWithCleanups(result);
+ Expr *init = result.getAs<Expr>();
+ Context.setBlockVarCopyInits(var, init);
+ }
+ }
+ }
+
+ Expr *Init = var->getInit();
+ bool IsGlobal = GlobalStorage && !var->isStaticLocal();
+ QualType baseType = Context.getBaseElementType(type);
+
+ if (!var->getDeclContext()->isDependentContext() &&
+ Init && !Init->isValueDependent()) {
+ if (IsGlobal && !var->isConstexpr() &&
+ !getDiagnostics().isIgnored(diag::warn_global_constructor,
+ var->getLocation())) {
+ // Warn about globals which don't have a constant initializer. Don't
+ // warn about globals with a non-trivial destructor because we already
+ // warned about them.
+ CXXRecordDecl *RD = baseType->getAsCXXRecordDecl();
+ if (!(RD && !RD->hasTrivialDestructor()) &&
+ !Init->isConstantInitializer(Context, baseType->isReferenceType()))
+ Diag(var->getLocation(), diag::warn_global_constructor)
+ << Init->getSourceRange();
+ }
+
+ if (var->isConstexpr()) {
+ SmallVector<PartialDiagnosticAt, 8> Notes;
+ if (!var->evaluateValue(Notes) || !var->isInitICE()) {
+ SourceLocation DiagLoc = var->getLocation();
+ // If the note doesn't add any useful information other than a source
+ // location, fold it into the primary diagnostic.
+ if (Notes.size() == 1 && Notes[0].second.getDiagID() ==
+ diag::note_invalid_subexpr_in_const_expr) {
+ DiagLoc = Notes[0].first;
+ Notes.clear();
+ }
+ Diag(DiagLoc, diag::err_constexpr_var_requires_const_init)
+ << var << Init->getSourceRange();
+ for (unsigned I = 0, N = Notes.size(); I != N; ++I)
+ Diag(Notes[I].first, Notes[I].second);
+ }
+ } else if (var->isUsableInConstantExpressions(Context)) {
+ // Check whether the initializer of a const variable of integral or
+ // enumeration type is an ICE now, since we can't tell whether it was
+ // initialized by a constant expression if we check later.
+ var->checkInitIsICE();
+ }
+ }
+
+ // Require the destructor.
+ if (const RecordType *recordType = baseType->getAs<RecordType>())
+ FinalizeVarWithDestructor(var, recordType);
+}
+
+/// \brief Determines if a variable's alignment is dependent.
+static bool hasDependentAlignment(VarDecl *VD) {
+ if (VD->getType()->isDependentType())
+ return true;
+ for (auto *I : VD->specific_attrs<AlignedAttr>())
+ if (I->isAlignmentDependent())
+ return true;
+ return false;
+}
+
+/// FinalizeDeclaration - called by ParseDeclarationAfterDeclarator to perform
+/// any semantic actions necessary after any initializer has been attached.
+void
+Sema::FinalizeDeclaration(Decl *ThisDecl) {
+ // Note that we are no longer parsing the initializer for this declaration.
+ ParsingInitForAutoVars.erase(ThisDecl);
+
+ VarDecl *VD = dyn_cast_or_null<VarDecl>(ThisDecl);
+ if (!VD)
+ return;
+
+ checkAttributesAfterMerging(*this, *VD);
+
+ // Perform TLS alignment check here after attributes attached to the variable
+ // which may affect the alignment have been processed. Only perform the check
+ // if the target has a maximum TLS alignment (zero means no constraints).
+ if (unsigned MaxAlign = Context.getTargetInfo().getMaxTLSAlign()) {
+ // Protect the check so that it's not performed on dependent types and
+ // dependent alignments (we can't determine the alignment in that case).
+ if (VD->getTLSKind() && !hasDependentAlignment(VD)) {
+ CharUnits MaxAlignChars = Context.toCharUnitsFromBits(MaxAlign);
+ if (Context.getDeclAlign(VD) > MaxAlignChars) {
+ Diag(VD->getLocation(), diag::err_tls_var_aligned_over_maximum)
+ << (unsigned)Context.getDeclAlign(VD).getQuantity() << VD
+ << (unsigned)MaxAlignChars.getQuantity();
+ }
+ }
+ }
+
+ // Static locals inherit dll attributes from their function.
+ if (VD->isStaticLocal()) {
+ if (FunctionDecl *FD =
+ dyn_cast_or_null<FunctionDecl>(VD->getParentFunctionOrMethod())) {
+ if (Attr *A = getDLLAttr(FD)) {
+ auto *NewAttr = cast<InheritableAttr>(A->clone(getASTContext()));
+ NewAttr->setInherited(true);
+ VD->addAttr(NewAttr);
+ }
+ }
+ }
+
+ // Grab the dllimport or dllexport attribute off of the VarDecl.
+ const InheritableAttr *DLLAttr = getDLLAttr(VD);
+
+ // Imported static data members cannot be defined out-of-line.
+ if (const auto *IA = dyn_cast_or_null<DLLImportAttr>(DLLAttr)) {
+ if (VD->isStaticDataMember() && VD->isOutOfLine() &&
+ VD->isThisDeclarationADefinition()) {
+ // We allow definitions of dllimport class template static data members
+ // with a warning.
+ CXXRecordDecl *Context =
+ cast<CXXRecordDecl>(VD->getFirstDecl()->getDeclContext());
+ bool IsClassTemplateMember =
+ isa<ClassTemplatePartialSpecializationDecl>(Context) ||
+ Context->getDescribedClassTemplate();
+
+ Diag(VD->getLocation(),
+ IsClassTemplateMember
+ ? diag::warn_attribute_dllimport_static_field_definition
+ : diag::err_attribute_dllimport_static_field_definition);
+ Diag(IA->getLocation(), diag::note_attribute);
+ if (!IsClassTemplateMember)
+ VD->setInvalidDecl();
+ }
+ }
+
+ // dllimport/dllexport variables cannot be thread local, their TLS index
+ // isn't exported with the variable.
+ if (DLLAttr && VD->getTLSKind()) {
+ auto *F = dyn_cast_or_null<FunctionDecl>(VD->getParentFunctionOrMethod());
+ if (F && getDLLAttr(F)) {
+ assert(VD->isStaticLocal());
+ // But if this is a static local in a dlimport/dllexport function, the
+ // function will never be inlined, which means the var would never be
+ // imported, so having it marked import/export is safe.
+ } else {
+ Diag(VD->getLocation(), diag::err_attribute_dll_thread_local) << VD
+ << DLLAttr;
+ VD->setInvalidDecl();
+ }
+ }
+
+ if (UsedAttr *Attr = VD->getAttr<UsedAttr>()) {
+ if (!Attr->isInherited() && !VD->isThisDeclarationADefinition()) {
+ Diag(Attr->getLocation(), diag::warn_attribute_ignored) << Attr;
+ VD->dropAttr<UsedAttr>();
+ }
+ }
+
+ const DeclContext *DC = VD->getDeclContext();
+ // If there's a #pragma GCC visibility in scope, and this isn't a class
+ // member, set the visibility of this variable.
+ if (DC->getRedeclContext()->isFileContext() && VD->isExternallyVisible())
+ AddPushedVisibilityAttribute(VD);
+
+ // FIXME: Warn on unused templates.
+ if (VD->isFileVarDecl() && !VD->getDescribedVarTemplate() &&
+ !isa<VarTemplatePartialSpecializationDecl>(VD))
+ MarkUnusedFileScopedDecl(VD);
+
+ // Now we have parsed the initializer and can update the table of magic
+ // tag values.
+ if (!VD->hasAttr<TypeTagForDatatypeAttr>() ||
+ !VD->getType()->isIntegralOrEnumerationType())
+ return;
+
+ for (const auto *I : ThisDecl->specific_attrs<TypeTagForDatatypeAttr>()) {
+ const Expr *MagicValueExpr = VD->getInit();
+ if (!MagicValueExpr) {
+ continue;
+ }
+ llvm::APSInt MagicValueInt;
+ if (!MagicValueExpr->isIntegerConstantExpr(MagicValueInt, Context)) {
+ Diag(I->getRange().getBegin(),
+ diag::err_type_tag_for_datatype_not_ice)
+ << LangOpts.CPlusPlus << MagicValueExpr->getSourceRange();
+ continue;
+ }
+ if (MagicValueInt.getActiveBits() > 64) {
+ Diag(I->getRange().getBegin(),
+ diag::err_type_tag_for_datatype_too_large)
+ << LangOpts.CPlusPlus << MagicValueExpr->getSourceRange();
+ continue;
+ }
+ uint64_t MagicValue = MagicValueInt.getZExtValue();
+ RegisterTypeTagForDatatype(I->getArgumentKind(),
+ MagicValue,
+ I->getMatchingCType(),
+ I->getLayoutCompatible(),
+ I->getMustBeNull());
+ }
+}
+
+Sema::DeclGroupPtrTy Sema::FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
+ ArrayRef<Decl *> Group) {
+ SmallVector<Decl*, 8> Decls;
+
+ if (DS.isTypeSpecOwned())
+ Decls.push_back(DS.getRepAsDecl());
+
+ DeclaratorDecl *FirstDeclaratorInGroup = nullptr;
+ for (unsigned i = 0, e = Group.size(); i != e; ++i)
+ if (Decl *D = Group[i]) {
+ if (DeclaratorDecl *DD = dyn_cast<DeclaratorDecl>(D))
+ if (!FirstDeclaratorInGroup)
+ FirstDeclaratorInGroup = DD;
+ Decls.push_back(D);
+ }
+
+ if (DeclSpec::isDeclRep(DS.getTypeSpecType())) {
+ if (TagDecl *Tag = dyn_cast_or_null<TagDecl>(DS.getRepAsDecl())) {
+ handleTagNumbering(Tag, S);
+ if (FirstDeclaratorInGroup && !Tag->hasNameForLinkage() &&
+ getLangOpts().CPlusPlus)
+ Context.addDeclaratorForUnnamedTagDecl(Tag, FirstDeclaratorInGroup);
+ }
+ }
+
+ return BuildDeclaratorGroup(Decls, DS.containsPlaceholderType());
+}
+
+/// BuildDeclaratorGroup - convert a list of declarations into a declaration
+/// group, performing any necessary semantic checking.
+Sema::DeclGroupPtrTy
+Sema::BuildDeclaratorGroup(MutableArrayRef<Decl *> Group,
+ bool TypeMayContainAuto) {
+ // C++0x [dcl.spec.auto]p7:
+ // If the type deduced for the template parameter U is not the same in each
+ // deduction, the program is ill-formed.
+ // FIXME: When initializer-list support is added, a distinction is needed
+ // between the deduced type U and the deduced type which 'auto' stands for.
+ // auto a = 0, b = { 1, 2, 3 };
+ // is legal because the deduced type U is 'int' in both cases.
+ if (TypeMayContainAuto && Group.size() > 1) {
+ QualType Deduced;
+ CanQualType DeducedCanon;
+ VarDecl *DeducedDecl = nullptr;
+ for (unsigned i = 0, e = Group.size(); i != e; ++i) {
+ if (VarDecl *D = dyn_cast<VarDecl>(Group[i])) {
+ AutoType *AT = D->getType()->getContainedAutoType();
+ // Don't reissue diagnostics when instantiating a template.
+ if (AT && D->isInvalidDecl())
+ break;
+ QualType U = AT ? AT->getDeducedType() : QualType();
+ if (!U.isNull()) {
+ CanQualType UCanon = Context.getCanonicalType(U);
+ if (Deduced.isNull()) {
+ Deduced = U;
+ DeducedCanon = UCanon;
+ DeducedDecl = D;
+ } else if (DeducedCanon != UCanon) {
+ Diag(D->getTypeSourceInfo()->getTypeLoc().getBeginLoc(),
+ diag::err_auto_different_deductions)
+ << (unsigned)AT->getKeyword()
+ << Deduced << DeducedDecl->getDeclName()
+ << U << D->getDeclName()
+ << DeducedDecl->getInit()->getSourceRange()
+ << D->getInit()->getSourceRange();
+ D->setInvalidDecl();
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ ActOnDocumentableDecls(Group);
+
+ return DeclGroupPtrTy::make(
+ DeclGroupRef::Create(Context, Group.data(), Group.size()));
+}
+
+void Sema::ActOnDocumentableDecl(Decl *D) {
+ ActOnDocumentableDecls(D);
+}
+
+void Sema::ActOnDocumentableDecls(ArrayRef<Decl *> Group) {
+ // Don't parse the comment if Doxygen diagnostics are ignored.
+ if (Group.empty() || !Group[0])
+ return;
+
+ if (Diags.isIgnored(diag::warn_doc_param_not_found,
+ Group[0]->getLocation()) &&
+ Diags.isIgnored(diag::warn_unknown_comment_command_name,
+ Group[0]->getLocation()))
+ return;
+
+ if (Group.size() >= 2) {
+ // This is a decl group. Normally it will contain only declarations
+ // produced from declarator list. But in case we have any definitions or
+ // additional declaration references:
+ // 'typedef struct S {} S;'
+ // 'typedef struct S *S;'
+ // 'struct S *pS;'
+ // FinalizeDeclaratorGroup adds these as separate declarations.
+ Decl *MaybeTagDecl = Group[0];
+ if (MaybeTagDecl && isa<TagDecl>(MaybeTagDecl)) {
+ Group = Group.slice(1);
+ }
+ }
+
+ // See if there are any new comments that are not attached to a decl.
+ ArrayRef<RawComment *> Comments = Context.getRawCommentList().getComments();
+ if (!Comments.empty() &&
+ !Comments.back()->isAttached()) {
+ // There is at least one comment that not attached to a decl.
+ // Maybe it should be attached to one of these decls?
+ //
+ // Note that this way we pick up not only comments that precede the
+ // declaration, but also comments that *follow* the declaration -- thanks to
+ // the lookahead in the lexer: we've consumed the semicolon and looked
+ // ahead through comments.
+ for (unsigned i = 0, e = Group.size(); i != e; ++i)
+ Context.getCommentForDecl(Group[i], &PP);
+ }
+}
+
+/// ActOnParamDeclarator - Called from Parser::ParseFunctionDeclarator()
+/// to introduce parameters into function prototype scope.
+Decl *Sema::ActOnParamDeclarator(Scope *S, Declarator &D) {
+ const DeclSpec &DS = D.getDeclSpec();
+
+ // Verify C99 6.7.5.3p2: The only SCS allowed is 'register'.
+
+ // C++03 [dcl.stc]p2 also permits 'auto'.
+ StorageClass SC = SC_None;
+ if (DS.getStorageClassSpec() == DeclSpec::SCS_register) {
+ SC = SC_Register;
+ } else if (getLangOpts().CPlusPlus &&
+ DS.getStorageClassSpec() == DeclSpec::SCS_auto) {
+ SC = SC_Auto;
+ } else if (DS.getStorageClassSpec() != DeclSpec::SCS_unspecified) {
+ Diag(DS.getStorageClassSpecLoc(),
+ diag::err_invalid_storage_class_in_func_decl);
+ D.getMutableDeclSpec().ClearStorageClassSpecs();
+ }
+
+ if (DeclSpec::TSCS TSCS = DS.getThreadStorageClassSpec())
+ Diag(DS.getThreadStorageClassSpecLoc(), diag::err_invalid_thread)
+ << DeclSpec::getSpecifierName(TSCS);
+ if (DS.isConstexprSpecified())
+ Diag(DS.getConstexprSpecLoc(), diag::err_invalid_constexpr)
+ << 0;
+ if (DS.isConceptSpecified())
+ Diag(DS.getConceptSpecLoc(), diag::err_concept_wrong_decl_kind);
+
+ DiagnoseFunctionSpecifiers(DS);
+
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ QualType parmDeclType = TInfo->getType();
+
+ if (getLangOpts().CPlusPlus) {
+ // Check that there are no default arguments inside the type of this
+ // parameter.
+ CheckExtraCXXDefaultArguments(D);
+
+ // Parameter declarators cannot be qualified (C++ [dcl.meaning]p1).
+ if (D.getCXXScopeSpec().isSet()) {
+ Diag(D.getIdentifierLoc(), diag::err_qualified_param_declarator)
+ << D.getCXXScopeSpec().getRange();
+ D.getCXXScopeSpec().clear();
+ }
+ }
+
+ // Ensure we have a valid name
+ IdentifierInfo *II = nullptr;
+ if (D.hasName()) {
+ II = D.getIdentifier();
+ if (!II) {
+ Diag(D.getIdentifierLoc(), diag::err_bad_parameter_name)
+ << GetNameForDeclarator(D).getName();
+ D.setInvalidType(true);
+ }
+ }
+
+ // Check for redeclaration of parameters, e.g. int foo(int x, int x);
+ if (II) {
+ LookupResult R(*this, II, D.getIdentifierLoc(), LookupOrdinaryName,
+ ForRedeclaration);
+ LookupName(R, S);
+ if (R.isSingleResult()) {
+ NamedDecl *PrevDecl = R.getFoundDecl();
+ if (PrevDecl->isTemplateParameter()) {
+ // Maybe we will complain about the shadowed template parameter.
+ DiagnoseTemplateParameterShadow(D.getIdentifierLoc(), PrevDecl);
+ // Just pretend that we didn't see the previous declaration.
+ PrevDecl = nullptr;
+ } else if (S->isDeclScope(PrevDecl)) {
+ Diag(D.getIdentifierLoc(), diag::err_param_redefinition) << II;
+ Diag(PrevDecl->getLocation(), diag::note_previous_declaration);
+
+ // Recover by removing the name
+ II = nullptr;
+ D.SetIdentifier(nullptr, D.getIdentifierLoc());
+ D.setInvalidType(true);
+ }
+ }
+ }
+
+ // Temporarily put parameter variables in the translation unit, not
+ // the enclosing context. This prevents them from accidentally
+ // looking like class members in C++.
+ ParmVarDecl *New = CheckParameter(Context.getTranslationUnitDecl(),
+ D.getLocStart(),
+ D.getIdentifierLoc(), II,
+ parmDeclType, TInfo,
+ SC);
+
+ if (D.isInvalidType())
+ New->setInvalidDecl();
+
+ assert(S->isFunctionPrototypeScope());
+ assert(S->getFunctionPrototypeDepth() >= 1);
+ New->setScopeInfo(S->getFunctionPrototypeDepth() - 1,
+ S->getNextFunctionPrototypeIndex());
+
+ // Add the parameter declaration into this scope.
+ S->AddDecl(New);
+ if (II)
+ IdResolver.AddDecl(New);
+
+ ProcessDeclAttributes(S, New, D);
+
+ if (D.getDeclSpec().isModulePrivateSpecified())
+ Diag(New->getLocation(), diag::err_module_private_local)
+ << 1 << New->getDeclName()
+ << SourceRange(D.getDeclSpec().getModulePrivateSpecLoc())
+ << FixItHint::CreateRemoval(D.getDeclSpec().getModulePrivateSpecLoc());
+
+ if (New->hasAttr<BlocksAttr>()) {
+ Diag(New->getLocation(), diag::err_block_on_nonlocal);
+ }
+ return New;
+}
+
+/// \brief Synthesizes a variable for a parameter arising from a
+/// typedef.
+ParmVarDecl *Sema::BuildParmVarDeclForTypedef(DeclContext *DC,
+ SourceLocation Loc,
+ QualType T) {
+ /* FIXME: setting StartLoc == Loc.
+ Would it be worth to modify callers so as to provide proper source
+ location for the unnamed parameters, embedding the parameter's type? */
+ ParmVarDecl *Param = ParmVarDecl::Create(Context, DC, Loc, Loc, nullptr,
+ T, Context.getTrivialTypeSourceInfo(T, Loc),
+ SC_None, nullptr);
+ Param->setImplicit();
+ return Param;
+}
+
+void Sema::DiagnoseUnusedParameters(ParmVarDecl * const *Param,
+ ParmVarDecl * const *ParamEnd) {
+ // Don't diagnose unused-parameter errors in template instantiations; we
+ // will already have done so in the template itself.
+ if (!ActiveTemplateInstantiations.empty())
+ return;
+
+ for (; Param != ParamEnd; ++Param) {
+ if (!(*Param)->isReferenced() && (*Param)->getDeclName() &&
+ !(*Param)->hasAttr<UnusedAttr>()) {
+ Diag((*Param)->getLocation(), diag::warn_unused_parameter)
+ << (*Param)->getDeclName();
+ }
+ }
+}
+
+void Sema::DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Param,
+ ParmVarDecl * const *ParamEnd,
+ QualType ReturnTy,
+ NamedDecl *D) {
+ if (LangOpts.NumLargeByValueCopy == 0) // No check.
+ return;
+
+ // Warn if the return value is pass-by-value and larger than the specified
+ // threshold.
+ if (!ReturnTy->isDependentType() && ReturnTy.isPODType(Context)) {
+ unsigned Size = Context.getTypeSizeInChars(ReturnTy).getQuantity();
+ if (Size > LangOpts.NumLargeByValueCopy)
+ Diag(D->getLocation(), diag::warn_return_value_size)
+ << D->getDeclName() << Size;
+ }
+
+ // Warn if any parameter is pass-by-value and larger than the specified
+ // threshold.
+ for (; Param != ParamEnd; ++Param) {
+ QualType T = (*Param)->getType();
+ if (T->isDependentType() || !T.isPODType(Context))
+ continue;
+ unsigned Size = Context.getTypeSizeInChars(T).getQuantity();
+ if (Size > LangOpts.NumLargeByValueCopy)
+ Diag((*Param)->getLocation(), diag::warn_parameter_size)
+ << (*Param)->getDeclName() << Size;
+ }
+}
+
+ParmVarDecl *Sema::CheckParameter(DeclContext *DC, SourceLocation StartLoc,
+ SourceLocation NameLoc, IdentifierInfo *Name,
+ QualType T, TypeSourceInfo *TSInfo,
+ StorageClass SC) {
+ // In ARC, infer a lifetime qualifier for appropriate parameter types.
+ if (getLangOpts().ObjCAutoRefCount &&
+ T.getObjCLifetime() == Qualifiers::OCL_None &&
+ T->isObjCLifetimeType()) {
+
+ Qualifiers::ObjCLifetime lifetime;
+
+ // Special cases for arrays:
+ // - if it's const, use __unsafe_unretained
+ // - otherwise, it's an error
+ if (T->isArrayType()) {
+ if (!T.isConstQualified()) {
+ DelayedDiagnostics.add(
+ sema::DelayedDiagnostic::makeForbiddenType(
+ NameLoc, diag::err_arc_array_param_no_ownership, T, false));
+ }
+ lifetime = Qualifiers::OCL_ExplicitNone;
+ } else {
+ lifetime = T->getObjCARCImplicitLifetime();
+ }
+ T = Context.getLifetimeQualifiedType(T, lifetime);
+ }
+
+ ParmVarDecl *New = ParmVarDecl::Create(Context, DC, StartLoc, NameLoc, Name,
+ Context.getAdjustedParameterType(T),
+ TSInfo, SC, nullptr);
+
+ // Parameters can not be abstract class types.
+ // For record types, this is done by the AbstractClassUsageDiagnoser once
+ // the class has been completely parsed.
+ if (!CurContext->isRecord() &&
+ RequireNonAbstractType(NameLoc, T, diag::err_abstract_type_in_decl,
+ AbstractParamType))
+ New->setInvalidDecl();
+
+ // Parameter declarators cannot be interface types. All ObjC objects are
+ // passed by reference.
+ if (T->isObjCObjectType()) {
+ SourceLocation TypeEndLoc = TSInfo->getTypeLoc().getLocEnd();
+ Diag(NameLoc,
+ diag::err_object_cannot_be_passed_returned_by_value) << 1 << T
+ << FixItHint::CreateInsertion(TypeEndLoc, "*");
+ T = Context.getObjCObjectPointerType(T);
+ New->setType(T);
+ }
+
+ // ISO/IEC TR 18037 S6.7.3: "The type of an object with automatic storage
+ // duration shall not be qualified by an address-space qualifier."
+ // Since all parameters have automatic store duration, they can not have
+ // an address space.
+ if (T.getAddressSpace() != 0) {
+ // OpenCL allows function arguments declared to be an array of a type
+ // to be qualified with an address space.
+ if (!(getLangOpts().OpenCL && T->isArrayType())) {
+ Diag(NameLoc, diag::err_arg_with_address_space);
+ New->setInvalidDecl();
+ }
+ }
+
+ return New;
+}
+
+void Sema::ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
+ SourceLocation LocAfterDecls) {
+ DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
+
+ // Verify 6.9.1p6: 'every identifier in the identifier list shall be declared'
+ // for a K&R function.
+ if (!FTI.hasPrototype) {
+ for (int i = FTI.NumParams; i != 0; /* decrement in loop */) {
+ --i;
+ if (FTI.Params[i].Param == nullptr) {
+ SmallString<256> Code;
+ llvm::raw_svector_ostream(Code)
+ << " int " << FTI.Params[i].Ident->getName() << ";\n";
+ Diag(FTI.Params[i].IdentLoc, diag::ext_param_not_declared)
+ << FTI.Params[i].Ident
+ << FixItHint::CreateInsertion(LocAfterDecls, Code);
+
+ // Implicitly declare the argument as type 'int' for lack of a better
+ // type.
+ AttributeFactory attrs;
+ DeclSpec DS(attrs);
+ const char* PrevSpec; // unused
+ unsigned DiagID; // unused
+ DS.SetTypeSpecType(DeclSpec::TST_int, FTI.Params[i].IdentLoc, PrevSpec,
+ DiagID, Context.getPrintingPolicy());
+ // Use the identifier location for the type source range.
+ DS.SetRangeStart(FTI.Params[i].IdentLoc);
+ DS.SetRangeEnd(FTI.Params[i].IdentLoc);
+ Declarator ParamD(DS, Declarator::KNRTypeListContext);
+ ParamD.SetIdentifier(FTI.Params[i].Ident, FTI.Params[i].IdentLoc);
+ FTI.Params[i].Param = ActOnParamDeclarator(S, ParamD);
+ }
+ }
+ }
+}
+
+Decl *
+Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Declarator &D,
+ MultiTemplateParamsArg TemplateParameterLists,
+ SkipBodyInfo *SkipBody) {
+ assert(getCurFunctionDecl() == nullptr && "Function parsing confused");
+ assert(D.isFunctionDeclarator() && "Not a function declarator!");
+ Scope *ParentScope = FnBodyScope->getParent();
+
+ D.setFunctionDefinitionKind(FDK_Definition);
+ Decl *DP = HandleDeclarator(ParentScope, D, TemplateParameterLists);
+ return ActOnStartOfFunctionDef(FnBodyScope, DP, SkipBody);
+}
+
+void Sema::ActOnFinishInlineMethodDef(CXXMethodDecl *D) {
+ Consumer.HandleInlineMethodDefinition(D);
+}
+
+static bool ShouldWarnAboutMissingPrototype(const FunctionDecl *FD,
+ const FunctionDecl*& PossibleZeroParamPrototype) {
+ // Don't warn about invalid declarations.
+ if (FD->isInvalidDecl())
+ return false;
+
+ // Or declarations that aren't global.
+ if (!FD->isGlobal())
+ return false;
+
+ // Don't warn about C++ member functions.
+ if (isa<CXXMethodDecl>(FD))
+ return false;
+
+ // Don't warn about 'main'.
+ if (FD->isMain())
+ return false;
+
+ // Don't warn about inline functions.
+ if (FD->isInlined())
+ return false;
+
+ // Don't warn about function templates.
+ if (FD->getDescribedFunctionTemplate())
+ return false;
+
+ // Don't warn about function template specializations.
+ if (FD->isFunctionTemplateSpecialization())
+ return false;
+
+ // Don't warn for OpenCL kernels.
+ if (FD->hasAttr<OpenCLKernelAttr>())
+ return false;
+
+ // Don't warn on explicitly deleted functions.
+ if (FD->isDeleted())
+ return false;
+
+ bool MissingPrototype = true;
+ for (const FunctionDecl *Prev = FD->getPreviousDecl();
+ Prev; Prev = Prev->getPreviousDecl()) {
+ // Ignore any declarations that occur in function or method
+ // scope, because they aren't visible from the header.
+ if (Prev->getLexicalDeclContext()->isFunctionOrMethod())
+ continue;
+
+ MissingPrototype = !Prev->getType()->isFunctionProtoType();
+ if (FD->getNumParams() == 0)
+ PossibleZeroParamPrototype = Prev;
+ break;
+ }
+
+ return MissingPrototype;
+}
+
+void
+Sema::CheckForFunctionRedefinition(FunctionDecl *FD,
+ const FunctionDecl *EffectiveDefinition,
+ SkipBodyInfo *SkipBody) {
+ // Don't complain if we're in GNU89 mode and the previous definition
+ // was an extern inline function.
+ const FunctionDecl *Definition = EffectiveDefinition;
+ if (!Definition)
+ if (!FD->isDefined(Definition))
+ return;
+
+ if (canRedefineFunction(Definition, getLangOpts()))
+ return;
+
+ // If we don't have a visible definition of the function, and it's inline or
+ // a template, skip the new definition.
+ if (SkipBody && !hasVisibleDefinition(Definition) &&
+ (Definition->getFormalLinkage() == InternalLinkage ||
+ Definition->isInlined() ||
+ Definition->getDescribedFunctionTemplate() ||
+ Definition->getNumTemplateParameterLists())) {
+ SkipBody->ShouldSkip = true;
+ if (auto *TD = Definition->getDescribedFunctionTemplate())
+ makeMergedDefinitionVisible(TD, FD->getLocation());
+ else
+ makeMergedDefinitionVisible(const_cast<FunctionDecl*>(Definition),
+ FD->getLocation());
+ return;
+ }
+
+ if (getLangOpts().GNUMode && Definition->isInlineSpecified() &&
+ Definition->getStorageClass() == SC_Extern)
+ Diag(FD->getLocation(), diag::err_redefinition_extern_inline)
+ << FD->getDeclName() << getLangOpts().CPlusPlus;
+ else
+ Diag(FD->getLocation(), diag::err_redefinition) << FD->getDeclName();
+
+ Diag(Definition->getLocation(), diag::note_previous_definition);
+ FD->setInvalidDecl();
+}
+
+
+static void RebuildLambdaScopeInfo(CXXMethodDecl *CallOperator,
+ Sema &S) {
+ CXXRecordDecl *const LambdaClass = CallOperator->getParent();
+
+ LambdaScopeInfo *LSI = S.PushLambdaScope();
+ LSI->CallOperator = CallOperator;
+ LSI->Lambda = LambdaClass;
+ LSI->ReturnType = CallOperator->getReturnType();
+ const LambdaCaptureDefault LCD = LambdaClass->getLambdaCaptureDefault();
+
+ if (LCD == LCD_None)
+ LSI->ImpCaptureStyle = CapturingScopeInfo::ImpCap_None;
+ else if (LCD == LCD_ByCopy)
+ LSI->ImpCaptureStyle = CapturingScopeInfo::ImpCap_LambdaByval;
+ else if (LCD == LCD_ByRef)
+ LSI->ImpCaptureStyle = CapturingScopeInfo::ImpCap_LambdaByref;
+ DeclarationNameInfo DNI = CallOperator->getNameInfo();
+
+ LSI->IntroducerRange = DNI.getCXXOperatorNameRange();
+ LSI->Mutable = !CallOperator->isConst();
+
+ // Add the captures to the LSI so they can be noted as already
+ // captured within tryCaptureVar.
+ auto I = LambdaClass->field_begin();
+ for (const auto &C : LambdaClass->captures()) {
+ if (C.capturesVariable()) {
+ VarDecl *VD = C.getCapturedVar();
+ if (VD->isInitCapture())
+ S.CurrentInstantiationScope->InstantiatedLocal(VD, VD);
+ QualType CaptureType = VD->getType();
+ const bool ByRef = C.getCaptureKind() == LCK_ByRef;
+ LSI->addCapture(VD, /*IsBlock*/false, ByRef,
+ /*RefersToEnclosingVariableOrCapture*/true, C.getLocation(),
+ /*EllipsisLoc*/C.isPackExpansion()
+ ? C.getEllipsisLoc() : SourceLocation(),
+ CaptureType, /*Expr*/ nullptr);
+
+ } else if (C.capturesThis()) {
+ LSI->addThisCapture(/*Nested*/ false, C.getLocation(),
+ S.getCurrentThisType(), /*Expr*/ nullptr);
+ } else {
+ LSI->addVLATypeCapture(C.getLocation(), I->getType());
+ }
+ ++I;
+ }
+}
+
+Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
+ SkipBodyInfo *SkipBody) {
+ // Clear the last template instantiation error context.
+ LastTemplateInstantiationErrorContext = ActiveTemplateInstantiation();
+
+ if (!D)
+ return D;
+ FunctionDecl *FD = nullptr;
+
+ if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(D))
+ FD = FunTmpl->getTemplatedDecl();
+ else
+ FD = cast<FunctionDecl>(D);
+
+ // See if this is a redefinition.
+ if (!FD->isLateTemplateParsed()) {
+ CheckForFunctionRedefinition(FD, nullptr, SkipBody);
+
+ // If we're skipping the body, we're done. Don't enter the scope.
+ if (SkipBody && SkipBody->ShouldSkip)
+ return D;
+ }
+
+ // If we are instantiating a generic lambda call operator, push
+ // a LambdaScopeInfo onto the function stack. But use the information
+ // that's already been calculated (ActOnLambdaExpr) to prime the current
+ // LambdaScopeInfo.
+ // When the template operator is being specialized, the LambdaScopeInfo,
+ // has to be properly restored so that tryCaptureVariable doesn't try
+ // and capture any new variables. In addition when calculating potential
+ // captures during transformation of nested lambdas, it is necessary to
+ // have the LSI properly restored.
+ if (isGenericLambdaCallOperatorSpecialization(FD)) {
+ assert(ActiveTemplateInstantiations.size() &&
+ "There should be an active template instantiation on the stack "
+ "when instantiating a generic lambda!");
+ RebuildLambdaScopeInfo(cast<CXXMethodDecl>(D), *this);
+ }
+ else
+ // Enter a new function scope
+ PushFunctionScope();
+
+ // Builtin functions cannot be defined.
+ if (unsigned BuiltinID = FD->getBuiltinID()) {
+ if (!Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID) &&
+ !Context.BuiltinInfo.isPredefinedRuntimeFunction(BuiltinID)) {
+ Diag(FD->getLocation(), diag::err_builtin_definition) << FD;
+ FD->setInvalidDecl();
+ }
+ }
+
+ // The return type of a function definition must be complete
+ // (C99 6.9.1p3, C++ [dcl.fct]p6).
+ QualType ResultType = FD->getReturnType();
+ if (!ResultType->isDependentType() && !ResultType->isVoidType() &&
+ !FD->isInvalidDecl() &&
+ RequireCompleteType(FD->getLocation(), ResultType,
+ diag::err_func_def_incomplete_result))
+ FD->setInvalidDecl();
+
+ if (FnBodyScope)
+ PushDeclContext(FnBodyScope, FD);
+
+ // Check the validity of our function parameters
+ CheckParmsForFunctionDef(FD->param_begin(), FD->param_end(),
+ /*CheckParameterNames=*/true);
+
+ // Introduce our parameters into the function scope
+ for (auto Param : FD->params()) {
+ Param->setOwningFunction(FD);
+
+ // If this has an identifier, add it to the scope stack.
+ if (Param->getIdentifier() && FnBodyScope) {
+ CheckShadow(FnBodyScope, Param);
+
+ PushOnScopeChains(Param, FnBodyScope);
+ }
+ }
+
+ // If we had any tags defined in the function prototype,
+ // introduce them into the function scope.
+ if (FnBodyScope) {
+ for (ArrayRef<NamedDecl *>::iterator
+ I = FD->getDeclsInPrototypeScope().begin(),
+ E = FD->getDeclsInPrototypeScope().end();
+ I != E; ++I) {
+ NamedDecl *D = *I;
+
+ // Some of these decls (like enums) may have been pinned to the
+ // translation unit for lack of a real context earlier. If so, remove
+ // from the translation unit and reattach to the current context.
+ if (D->getLexicalDeclContext() == Context.getTranslationUnitDecl()) {
+ // Is the decl actually in the context?
+ if (Context.getTranslationUnitDecl()->containsDecl(D))
+ Context.getTranslationUnitDecl()->removeDecl(D);
+ // Either way, reassign the lexical decl context to our FunctionDecl.
+ D->setLexicalDeclContext(CurContext);
+ }
+
+ // If the decl has a non-null name, make accessible in the current scope.
+ if (!D->getName().empty())
+ PushOnScopeChains(D, FnBodyScope, /*AddToContext=*/false);
+
+ // Similarly, dive into enums and fish their constants out, making them
+ // accessible in this scope.
+ if (auto *ED = dyn_cast<EnumDecl>(D)) {
+ for (auto *EI : ED->enumerators())
+ PushOnScopeChains(EI, FnBodyScope, /*AddToContext=*/false);
+ }
+ }
+ }
+
+ // Ensure that the function's exception specification is instantiated.
+ if (const FunctionProtoType *FPT = FD->getType()->getAs<FunctionProtoType>())
+ ResolveExceptionSpec(D->getLocation(), FPT);
+
+ // dllimport cannot be applied to non-inline function definitions.
+ if (FD->hasAttr<DLLImportAttr>() && !FD->isInlined() &&
+ !FD->isTemplateInstantiation()) {
+ assert(!FD->hasAttr<DLLExportAttr>());
+ Diag(FD->getLocation(), diag::err_attribute_dllimport_function_definition);
+ FD->setInvalidDecl();
+ return D;
+ }
+ // We want to attach documentation to original Decl (which might be
+ // a function template).
+ ActOnDocumentableDecl(D);
+ if (getCurLexicalContext()->isObjCContainer() &&
+ getCurLexicalContext()->getDeclKind() != Decl::ObjCCategoryImpl &&
+ getCurLexicalContext()->getDeclKind() != Decl::ObjCImplementation)
+ Diag(FD->getLocation(), diag::warn_function_def_in_objc_container);
+
+ return D;
+}
+
+/// \brief Given the set of return statements within a function body,
+/// compute the variables that are subject to the named return value
+/// optimization.
+///
+/// Each of the variables that is subject to the named return value
+/// optimization will be marked as NRVO variables in the AST, and any
+/// return statement that has a marked NRVO variable as its NRVO candidate can
+/// use the named return value optimization.
+///
+/// This function applies a very simplistic algorithm for NRVO: if every return
+/// statement in the scope of a variable has the same NRVO candidate, that
+/// candidate is an NRVO variable.
+void Sema::computeNRVO(Stmt *Body, FunctionScopeInfo *Scope) {
+ ReturnStmt **Returns = Scope->Returns.data();
+
+ for (unsigned I = 0, E = Scope->Returns.size(); I != E; ++I) {
+ if (const VarDecl *NRVOCandidate = Returns[I]->getNRVOCandidate()) {
+ if (!NRVOCandidate->isNRVOVariable())
+ Returns[I]->setNRVOCandidate(nullptr);
+ }
+ }
+}
+
+bool Sema::canDelayFunctionBody(const Declarator &D) {
+ // We can't delay parsing the body of a constexpr function template (yet).
+ if (D.getDeclSpec().isConstexprSpecified())
+ return false;
+
+ // We can't delay parsing the body of a function template with a deduced
+ // return type (yet).
+ if (D.getDeclSpec().containsPlaceholderType()) {
+ // If the placeholder introduces a non-deduced trailing return type,
+ // we can still delay parsing it.
+ if (D.getNumTypeObjects()) {
+ const auto &Outer = D.getTypeObject(D.getNumTypeObjects() - 1);
+ if (Outer.Kind == DeclaratorChunk::Function &&
+ Outer.Fun.hasTrailingReturnType()) {
+ QualType Ty = GetTypeFromParser(Outer.Fun.getTrailingReturnType());
+ return Ty.isNull() || !Ty->isUndeducedType();
+ }
+ }
+ return false;
+ }
+
+ return true;
+}
+
+bool Sema::canSkipFunctionBody(Decl *D) {
+ // We cannot skip the body of a function (or function template) which is
+ // constexpr, since we may need to evaluate its body in order to parse the
+ // rest of the file.
+ // We cannot skip the body of a function with an undeduced return type,
+ // because any callers of that function need to know the type.
+ if (const FunctionDecl *FD = D->getAsFunction())
+ if (FD->isConstexpr() || FD->getReturnType()->isUndeducedType())
+ return false;
+ return Consumer.shouldSkipFunctionBody(D);
+}
+
+Decl *Sema::ActOnSkippedFunctionBody(Decl *Decl) {
+ if (FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Decl))
+ FD->setHasSkippedBody();
+ else if (ObjCMethodDecl *MD = dyn_cast_or_null<ObjCMethodDecl>(Decl))
+ MD->setHasSkippedBody();
+ return ActOnFinishFunctionBody(Decl, nullptr);
+}
+
+Decl *Sema::ActOnFinishFunctionBody(Decl *D, Stmt *BodyArg) {
+ return ActOnFinishFunctionBody(D, BodyArg, false);
+}
+
+Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
+ bool IsInstantiation) {
+ FunctionDecl *FD = dcl ? dcl->getAsFunction() : nullptr;
+
+ sema::AnalysisBasedWarnings::Policy WP = AnalysisWarnings.getDefaultPolicy();
+ sema::AnalysisBasedWarnings::Policy *ActivePolicy = nullptr;
+
+ if (getLangOpts().Coroutines && !getCurFunction()->CoroutineStmts.empty())
+ CheckCompletedCoroutineBody(FD, Body);
+
+ if (FD) {
+ FD->setBody(Body);
+
+ if (getLangOpts().CPlusPlus14 && !FD->isInvalidDecl() && Body &&
+ !FD->isDependentContext() && FD->getReturnType()->isUndeducedType()) {
+ // If the function has a deduced result type but contains no 'return'
+ // statements, the result type as written must be exactly 'auto', and
+ // the deduced result type is 'void'.
+ if (!FD->getReturnType()->getAs<AutoType>()) {
+ Diag(dcl->getLocation(), diag::err_auto_fn_no_return_but_not_auto)
+ << FD->getReturnType();
+ FD->setInvalidDecl();
+ } else {
+ // Substitute 'void' for the 'auto' in the type.
+ TypeLoc ResultType = getReturnTypeLoc(FD);
+ Context.adjustDeducedFunctionResultType(
+ FD, SubstAutoType(ResultType.getType(), Context.VoidTy));
+ }
+ } else if (getLangOpts().CPlusPlus11 && isLambdaCallOperator(FD)) {
+ auto *LSI = getCurLambda();
+ if (LSI->HasImplicitReturnType) {
+ deduceClosureReturnType(*LSI);
+
+ // C++11 [expr.prim.lambda]p4:
+ // [...] if there are no return statements in the compound-statement
+ // [the deduced type is] the type void
+ QualType RetType =
+ LSI->ReturnType.isNull() ? Context.VoidTy : LSI->ReturnType;
+
+ // Update the return type to the deduced type.
+ const FunctionProtoType *Proto =
+ FD->getType()->getAs<FunctionProtoType>();
+ FD->setType(Context.getFunctionType(RetType, Proto->getParamTypes(),
+ Proto->getExtProtoInfo()));
+ }
+ }
+
+ // The only way to be included in UndefinedButUsed is if there is an
+ // ODR use before the definition. Avoid the expensive map lookup if this
+ // is the first declaration.
+ if (!FD->isFirstDecl() && FD->getPreviousDecl()->isUsed()) {
+ if (!FD->isExternallyVisible())
+ UndefinedButUsed.erase(FD);
+ else if (FD->isInlined() &&
+ !LangOpts.GNUInline &&
+ (!FD->getPreviousDecl()->hasAttr<GNUInlineAttr>()))
+ UndefinedButUsed.erase(FD);
+ }
+
+ // If the function implicitly returns zero (like 'main') or is naked,
+ // don't complain about missing return statements.
+ if (FD->hasImplicitReturnZero() || FD->hasAttr<NakedAttr>())
+ WP.disableCheckFallThrough();
+
+ // MSVC permits the use of pure specifier (=0) on function definition,
+ // defined at class scope, warn about this non-standard construct.
+ if (getLangOpts().MicrosoftExt && FD->isPure() && FD->isCanonicalDecl())
+ Diag(FD->getLocation(), diag::ext_pure_function_definition);
+
+ if (!FD->isInvalidDecl()) {
+ // Don't diagnose unused parameters of defaulted or deleted functions.
+ if (!FD->isDeleted() && !FD->isDefaulted())
+ DiagnoseUnusedParameters(FD->param_begin(), FD->param_end());
+ DiagnoseSizeOfParametersAndReturnValue(FD->param_begin(), FD->param_end(),
+ FD->getReturnType(), FD);
+
+ // If this is a structor, we need a vtable.
+ if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(FD))
+ MarkVTableUsed(FD->getLocation(), Constructor->getParent());
+ else if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(FD))
+ MarkVTableUsed(FD->getLocation(), Destructor->getParent());
+
+ // Try to apply the named return value optimization. We have to check
+ // if we can do this here because lambdas keep return statements around
+ // to deduce an implicit return type.
+ if (getLangOpts().CPlusPlus && FD->getReturnType()->isRecordType() &&
+ !FD->isDependentContext())
+ computeNRVO(Body, getCurFunction());
+ }
+
+ // GNU warning -Wmissing-prototypes:
+ // Warn if a global function is defined without a previous
+ // prototype declaration. This warning is issued even if the
+ // definition itself provides a prototype. The aim is to detect
+ // global functions that fail to be declared in header files.
+ const FunctionDecl *PossibleZeroParamPrototype = nullptr;
+ if (ShouldWarnAboutMissingPrototype(FD, PossibleZeroParamPrototype)) {
+ Diag(FD->getLocation(), diag::warn_missing_prototype) << FD;
+
+ if (PossibleZeroParamPrototype) {
+ // We found a declaration that is not a prototype,
+ // but that could be a zero-parameter prototype
+ if (TypeSourceInfo *TI =
+ PossibleZeroParamPrototype->getTypeSourceInfo()) {
+ TypeLoc TL = TI->getTypeLoc();
+ if (FunctionNoProtoTypeLoc FTL = TL.getAs<FunctionNoProtoTypeLoc>())
+ Diag(PossibleZeroParamPrototype->getLocation(),
+ diag::note_declaration_not_a_prototype)
+ << PossibleZeroParamPrototype
+ << FixItHint::CreateInsertion(FTL.getRParenLoc(), "void");
+ }
+ }
+ }
+
+ if (auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ const CXXMethodDecl *KeyFunction;
+ if (MD->isOutOfLine() && (MD = MD->getCanonicalDecl()) &&
+ MD->isVirtual() &&
+ (KeyFunction = Context.getCurrentKeyFunction(MD->getParent())) &&
+ MD == KeyFunction->getCanonicalDecl()) {
+ // Update the key-function state if necessary for this ABI.
+ if (FD->isInlined() &&
+ !Context.getTargetInfo().getCXXABI().canKeyFunctionBeInline()) {
+ Context.setNonKeyFunction(MD);
+
+ // If the newly-chosen key function is already defined, then we
+ // need to mark the vtable as used retroactively.
+ KeyFunction = Context.getCurrentKeyFunction(MD->getParent());
+ const FunctionDecl *Definition;
+ if (KeyFunction && KeyFunction->isDefined(Definition))
+ MarkVTableUsed(Definition->getLocation(), MD->getParent(), true);
+ } else {
+ // We just defined they key function; mark the vtable as used.
+ MarkVTableUsed(FD->getLocation(), MD->getParent(), true);
+ }
+ }
+ }
+
+ assert((FD == getCurFunctionDecl() || getCurLambda()->CallOperator == FD) &&
+ "Function parsing confused");
+ } else if (ObjCMethodDecl *MD = dyn_cast_or_null<ObjCMethodDecl>(dcl)) {
+ assert(MD == getCurMethodDecl() && "Method parsing confused");
+ MD->setBody(Body);
+ if (!MD->isInvalidDecl()) {
+ DiagnoseUnusedParameters(MD->param_begin(), MD->param_end());
+ DiagnoseSizeOfParametersAndReturnValue(MD->param_begin(), MD->param_end(),
+ MD->getReturnType(), MD);
+
+ if (Body)
+ computeNRVO(Body, getCurFunction());
+ }
+ if (getCurFunction()->ObjCShouldCallSuper) {
+ Diag(MD->getLocEnd(), diag::warn_objc_missing_super_call)
+ << MD->getSelector().getAsString();
+ getCurFunction()->ObjCShouldCallSuper = false;
+ }
+ if (getCurFunction()->ObjCWarnForNoDesignatedInitChain) {
+ const ObjCMethodDecl *InitMethod = nullptr;
+ bool isDesignated =
+ MD->isDesignatedInitializerForTheInterface(&InitMethod);
+ assert(isDesignated && InitMethod);
+ (void)isDesignated;
+
+ auto superIsNSObject = [&](const ObjCMethodDecl *MD) {
+ auto IFace = MD->getClassInterface();
+ if (!IFace)
+ return false;
+ auto SuperD = IFace->getSuperClass();
+ if (!SuperD)
+ return false;
+ return SuperD->getIdentifier() ==
+ NSAPIObj->getNSClassId(NSAPI::ClassId_NSObject);
+ };
+ // Don't issue this warning for unavailable inits or direct subclasses
+ // of NSObject.
+ if (!MD->isUnavailable() && !superIsNSObject(MD)) {
+ Diag(MD->getLocation(),
+ diag::warn_objc_designated_init_missing_super_call);
+ Diag(InitMethod->getLocation(),
+ diag::note_objc_designated_init_marked_here);
+ }
+ getCurFunction()->ObjCWarnForNoDesignatedInitChain = false;
+ }
+ if (getCurFunction()->ObjCWarnForNoInitDelegation) {
+ // Don't issue this warning for unavaialable inits.
+ if (!MD->isUnavailable())
+ Diag(MD->getLocation(),
+ diag::warn_objc_secondary_init_missing_init_call);
+ getCurFunction()->ObjCWarnForNoInitDelegation = false;
+ }
+ } else {
+ return nullptr;
+ }
+
+ assert(!getCurFunction()->ObjCShouldCallSuper &&
+ "This should only be set for ObjC methods, which should have been "
+ "handled in the block above.");
+
+ // Verify and clean out per-function state.
+ if (Body && (!FD || !FD->isDefaulted())) {
+ // C++ constructors that have function-try-blocks can't have return
+ // statements in the handlers of that block. (C++ [except.handle]p14)
+ // Verify this.
+ if (FD && isa<CXXConstructorDecl>(FD) && isa<CXXTryStmt>(Body))
+ DiagnoseReturnInConstructorExceptionHandler(cast<CXXTryStmt>(Body));
+
+ // Verify that gotos and switch cases don't jump into scopes illegally.
+ if (getCurFunction()->NeedsScopeChecking() &&
+ !PP.isCodeCompletionEnabled())
+ DiagnoseInvalidJumps(Body);
+
+ if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(dcl)) {
+ if (!Destructor->getParent()->isDependentType())
+ CheckDestructor(Destructor);
+
+ MarkBaseAndMemberDestructorsReferenced(Destructor->getLocation(),
+ Destructor->getParent());
+ }
+
+ // If any errors have occurred, clear out any temporaries that may have
+ // been leftover. This ensures that these temporaries won't be picked up for
+ // deletion in some later function.
+ if (getDiagnostics().hasErrorOccurred() ||
+ getDiagnostics().getSuppressAllDiagnostics()) {
+ DiscardCleanupsInEvaluationContext();
+ }
+ if (!getDiagnostics().hasUncompilableErrorOccurred() &&
+ !isa<FunctionTemplateDecl>(dcl)) {
+ // Since the body is valid, issue any analysis-based warnings that are
+ // enabled.
+ ActivePolicy = &WP;
+ }
+
+ if (!IsInstantiation && FD && FD->isConstexpr() && !FD->isInvalidDecl() &&
+ (!CheckConstexprFunctionDecl(FD) ||
+ !CheckConstexprFunctionBody(FD, Body)))
+ FD->setInvalidDecl();
+
+ if (FD && FD->hasAttr<NakedAttr>()) {
+ for (const Stmt *S : Body->children()) {
+ if (!isa<AsmStmt>(S) && !isa<NullStmt>(S)) {
+ Diag(S->getLocStart(), diag::err_non_asm_stmt_in_naked_function);
+ Diag(FD->getAttr<NakedAttr>()->getLocation(), diag::note_attribute);
+ FD->setInvalidDecl();
+ break;
+ }
+ }
+ }
+
+ assert(ExprCleanupObjects.size() ==
+ ExprEvalContexts.back().NumCleanupObjects &&
+ "Leftover temporaries in function");
+ assert(!ExprNeedsCleanups && "Unaccounted cleanups in function");
+ assert(MaybeODRUseExprs.empty() &&
+ "Leftover expressions for odr-use checking");
+ }
+
+ if (!IsInstantiation)
+ PopDeclContext();
+
+ PopFunctionScopeInfo(ActivePolicy, dcl);
+ // If any errors have occurred, clear out any temporaries that may have
+ // been leftover. This ensures that these temporaries won't be picked up for
+ // deletion in some later function.
+ if (getDiagnostics().hasErrorOccurred()) {
+ DiscardCleanupsInEvaluationContext();
+ }
+
+ return dcl;
+}
+
+
+/// When we finish delayed parsing of an attribute, we must attach it to the
+/// relevant Decl.
+void Sema::ActOnFinishDelayedAttribute(Scope *S, Decl *D,
+ ParsedAttributes &Attrs) {
+ // Always attach attributes to the underlying decl.
+ if (TemplateDecl *TD = dyn_cast<TemplateDecl>(D))
+ D = TD->getTemplatedDecl();
+ ProcessDeclAttributeList(S, D, Attrs.getList());
+
+ if (CXXMethodDecl *Method = dyn_cast_or_null<CXXMethodDecl>(D))
+ if (Method->isStatic())
+ checkThisInStaticMemberFunctionAttributes(Method);
+}
+
+
+/// ImplicitlyDefineFunction - An undeclared identifier was used in a function
+/// call, forming a call to an implicitly defined function (per C99 6.5.1p2).
+NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
+ IdentifierInfo &II, Scope *S) {
+ // Before we produce a declaration for an implicitly defined
+ // function, see whether there was a locally-scoped declaration of
+ // this name as a function or variable. If so, use that
+ // (non-visible) declaration, and complain about it.
+ if (NamedDecl *ExternCPrev = findLocallyScopedExternCDecl(&II)) {
+ Diag(Loc, diag::warn_use_out_of_scope_declaration) << ExternCPrev;
+ Diag(ExternCPrev->getLocation(), diag::note_previous_declaration);
+ return ExternCPrev;
+ }
+
+ // Extension in C99. Legal in C90, but warn about it.
+ unsigned diag_id;
+ if (II.getName().startswith("__builtin_"))
+ diag_id = diag::warn_builtin_unknown;
+ else if (getLangOpts().C99)
+ diag_id = diag::ext_implicit_function_decl;
+ else
+ diag_id = diag::warn_implicit_function_decl;
+ Diag(Loc, diag_id) << &II;
+
+ // Because typo correction is expensive, only do it if the implicit
+ // function declaration is going to be treated as an error.
+ if (Diags.getDiagnosticLevel(diag_id, Loc) >= DiagnosticsEngine::Error) {
+ TypoCorrection Corrected;
+ if (S &&
+ (Corrected = CorrectTypo(
+ DeclarationNameInfo(&II, Loc), LookupOrdinaryName, S, nullptr,
+ llvm::make_unique<DeclFilterCCC<FunctionDecl>>(), CTK_NonError)))
+ diagnoseTypo(Corrected, PDiag(diag::note_function_suggestion),
+ /*ErrorRecovery*/false);
+ }
+
+ // Set a Declarator for the implicit definition: int foo();
+ const char *Dummy;
+ AttributeFactory attrFactory;
+ DeclSpec DS(attrFactory);
+ unsigned DiagID;
+ bool Error = DS.SetTypeSpecType(DeclSpec::TST_int, Loc, Dummy, DiagID,
+ Context.getPrintingPolicy());
+ (void)Error; // Silence warning.
+ assert(!Error && "Error setting up implicit decl!");
+ SourceLocation NoLoc;
+ Declarator D(DS, Declarator::BlockContext);
+ D.AddTypeInfo(DeclaratorChunk::getFunction(/*HasProto=*/false,
+ /*IsAmbiguous=*/false,
+ /*LParenLoc=*/NoLoc,
+ /*Params=*/nullptr,
+ /*NumParams=*/0,
+ /*EllipsisLoc=*/NoLoc,
+ /*RParenLoc=*/NoLoc,
+ /*TypeQuals=*/0,
+ /*RefQualifierIsLvalueRef=*/true,
+ /*RefQualifierLoc=*/NoLoc,
+ /*ConstQualifierLoc=*/NoLoc,
+ /*VolatileQualifierLoc=*/NoLoc,
+ /*RestrictQualifierLoc=*/NoLoc,
+ /*MutableLoc=*/NoLoc,
+ EST_None,
+ /*ESpecRange=*/SourceRange(),
+ /*Exceptions=*/nullptr,
+ /*ExceptionRanges=*/nullptr,
+ /*NumExceptions=*/0,
+ /*NoexceptExpr=*/nullptr,
+ /*ExceptionSpecTokens=*/nullptr,
+ Loc, Loc, D),
+ DS.getAttributes(),
+ SourceLocation());
+ D.SetIdentifier(&II, Loc);
+
+ // Insert this function into translation-unit scope.
+
+ DeclContext *PrevDC = CurContext;
+ CurContext = Context.getTranslationUnitDecl();
+
+ FunctionDecl *FD = cast<FunctionDecl>(ActOnDeclarator(TUScope, D));
+ FD->setImplicit();
+
+ CurContext = PrevDC;
+
+ AddKnownFunctionAttributes(FD);
+
+ return FD;
+}
+
+/// \brief Adds any function attributes that we know a priori based on
+/// the declaration of this function.
+///
+/// These attributes can apply both to implicitly-declared builtins
+/// (like __builtin___printf_chk) or to library-declared functions
+/// like NSLog or printf.
+///
+/// We need to check for duplicate attributes both here and where user-written
+/// attributes are applied to declarations.
+void Sema::AddKnownFunctionAttributes(FunctionDecl *FD) {
+ if (FD->isInvalidDecl())
+ return;
+
+ // If this is a built-in function, map its builtin attributes to
+ // actual attributes.
+ if (unsigned BuiltinID = FD->getBuiltinID()) {
+ // Handle printf-formatting attributes.
+ unsigned FormatIdx;
+ bool HasVAListArg;
+ if (Context.BuiltinInfo.isPrintfLike(BuiltinID, FormatIdx, HasVAListArg)) {
+ if (!FD->hasAttr<FormatAttr>()) {
+ const char *fmt = "printf";
+ unsigned int NumParams = FD->getNumParams();
+ if (FormatIdx < NumParams && // NumParams may be 0 (e.g. vfprintf)
+ FD->getParamDecl(FormatIdx)->getType()->isObjCObjectPointerType())
+ fmt = "NSString";
+ FD->addAttr(FormatAttr::CreateImplicit(Context,
+ &Context.Idents.get(fmt),
+ FormatIdx+1,
+ HasVAListArg ? 0 : FormatIdx+2,
+ FD->getLocation()));
+ }
+ }
+ if (Context.BuiltinInfo.isScanfLike(BuiltinID, FormatIdx,
+ HasVAListArg)) {
+ if (!FD->hasAttr<FormatAttr>())
+ FD->addAttr(FormatAttr::CreateImplicit(Context,
+ &Context.Idents.get("scanf"),
+ FormatIdx+1,
+ HasVAListArg ? 0 : FormatIdx+2,
+ FD->getLocation()));
+ }
+
+ // Mark const if we don't care about errno and that is the only
+ // thing preventing the function from being const. This allows
+ // IRgen to use LLVM intrinsics for such functions.
+ if (!getLangOpts().MathErrno &&
+ Context.BuiltinInfo.isConstWithoutErrno(BuiltinID)) {
+ if (!FD->hasAttr<ConstAttr>())
+ FD->addAttr(ConstAttr::CreateImplicit(Context, FD->getLocation()));
+ }
+
+ if (Context.BuiltinInfo.isReturnsTwice(BuiltinID) &&
+ !FD->hasAttr<ReturnsTwiceAttr>())
+ FD->addAttr(ReturnsTwiceAttr::CreateImplicit(Context,
+ FD->getLocation()));
+ if (Context.BuiltinInfo.isNoThrow(BuiltinID) && !FD->hasAttr<NoThrowAttr>())
+ FD->addAttr(NoThrowAttr::CreateImplicit(Context, FD->getLocation()));
+ if (Context.BuiltinInfo.isConst(BuiltinID) && !FD->hasAttr<ConstAttr>())
+ FD->addAttr(ConstAttr::CreateImplicit(Context, FD->getLocation()));
+ if (getLangOpts().CUDA && getLangOpts().CUDATargetOverloads &&
+ Context.BuiltinInfo.isTSBuiltin(BuiltinID) &&
+ !FD->hasAttr<CUDADeviceAttr>() && !FD->hasAttr<CUDAHostAttr>()) {
+ // Assign appropriate attribute depending on CUDA compilation
+ // mode and the target builtin belongs to. E.g. during host
+ // compilation, aux builtins are __device__, the rest are __host__.
+ if (getLangOpts().CUDAIsDevice !=
+ Context.BuiltinInfo.isAuxBuiltinID(BuiltinID))
+ FD->addAttr(CUDADeviceAttr::CreateImplicit(Context, FD->getLocation()));
+ else
+ FD->addAttr(CUDAHostAttr::CreateImplicit(Context, FD->getLocation()));
+ }
+ }
+
+ IdentifierInfo *Name = FD->getIdentifier();
+ if (!Name)
+ return;
+ if ((!getLangOpts().CPlusPlus &&
+ FD->getDeclContext()->isTranslationUnit()) ||
+ (isa<LinkageSpecDecl>(FD->getDeclContext()) &&
+ cast<LinkageSpecDecl>(FD->getDeclContext())->getLanguage() ==
+ LinkageSpecDecl::lang_c)) {
+ // Okay: this could be a libc/libm/Objective-C function we know
+ // about.
+ } else
+ return;
+
+ if (Name->isStr("asprintf") || Name->isStr("vasprintf")) {
+ // FIXME: asprintf and vasprintf aren't C99 functions. Should they be
+ // target-specific builtins, perhaps?
+ if (!FD->hasAttr<FormatAttr>())
+ FD->addAttr(FormatAttr::CreateImplicit(Context,
+ &Context.Idents.get("printf"), 2,
+ Name->isStr("vasprintf") ? 0 : 3,
+ FD->getLocation()));
+ }
+
+ if (Name->isStr("__CFStringMakeConstantString")) {
+ // We already have a __builtin___CFStringMakeConstantString,
+ // but builds that use -fno-constant-cfstrings don't go through that.
+ if (!FD->hasAttr<FormatArgAttr>())
+ FD->addAttr(FormatArgAttr::CreateImplicit(Context, 1,
+ FD->getLocation()));
+ }
+}
+
+TypedefDecl *Sema::ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
+ TypeSourceInfo *TInfo) {
+ assert(D.getIdentifier() && "Wrong callback for declspec without declarator");
+ assert(!T.isNull() && "GetTypeForDeclarator() returned null type");
+
+ if (!TInfo) {
+ assert(D.isInvalidType() && "no declarator info for valid type");
+ TInfo = Context.getTrivialTypeSourceInfo(T);
+ }
+
+ // Scope manipulation handled by caller.
+ TypedefDecl *NewTD = TypedefDecl::Create(Context, CurContext,
+ D.getLocStart(),
+ D.getIdentifierLoc(),
+ D.getIdentifier(),
+ TInfo);
+
+ // Bail out immediately if we have an invalid declaration.
+ if (D.isInvalidType()) {
+ NewTD->setInvalidDecl();
+ return NewTD;
+ }
+
+ if (D.getDeclSpec().isModulePrivateSpecified()) {
+ if (CurContext->isFunctionOrMethod())
+ Diag(NewTD->getLocation(), diag::err_module_private_local)
+ << 2 << NewTD->getDeclName()
+ << SourceRange(D.getDeclSpec().getModulePrivateSpecLoc())
+ << FixItHint::CreateRemoval(D.getDeclSpec().getModulePrivateSpecLoc());
+ else
+ NewTD->setModulePrivate();
+ }
+
+ // C++ [dcl.typedef]p8:
+ // If the typedef declaration defines an unnamed class (or
+ // enum), the first typedef-name declared by the declaration
+ // to be that class type (or enum type) is used to denote the
+ // class type (or enum type) for linkage purposes only.
+ // We need to check whether the type was declared in the declaration.
+ switch (D.getDeclSpec().getTypeSpecType()) {
+ case TST_enum:
+ case TST_struct:
+ case TST_interface:
+ case TST_union:
+ case TST_class: {
+ TagDecl *tagFromDeclSpec = cast<TagDecl>(D.getDeclSpec().getRepAsDecl());
+ setTagNameForLinkagePurposes(tagFromDeclSpec, NewTD);
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ return NewTD;
+}
+
+
+/// \brief Check that this is a valid underlying type for an enum declaration.
+bool Sema::CheckEnumUnderlyingType(TypeSourceInfo *TI) {
+ SourceLocation UnderlyingLoc = TI->getTypeLoc().getBeginLoc();
+ QualType T = TI->getType();
+
+ if (T->isDependentType())
+ return false;
+
+ if (const BuiltinType *BT = T->getAs<BuiltinType>())
+ if (BT->isInteger())
+ return false;
+
+ Diag(UnderlyingLoc, diag::err_enum_invalid_underlying) << T;
+ return true;
+}
+
+/// Check whether this is a valid redeclaration of a previous enumeration.
+/// \return true if the redeclaration was invalid.
+bool Sema::CheckEnumRedeclaration(
+ SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy,
+ bool EnumUnderlyingIsImplicit, const EnumDecl *Prev) {
+ bool IsFixed = !EnumUnderlyingTy.isNull();
+
+ if (IsScoped != Prev->isScoped()) {
+ Diag(EnumLoc, diag::err_enum_redeclare_scoped_mismatch)
+ << Prev->isScoped();
+ Diag(Prev->getLocation(), diag::note_previous_declaration);
+ return true;
+ }
+
+ if (IsFixed && Prev->isFixed()) {
+ if (!EnumUnderlyingTy->isDependentType() &&
+ !Prev->getIntegerType()->isDependentType() &&
+ !Context.hasSameUnqualifiedType(EnumUnderlyingTy,
+ Prev->getIntegerType())) {
+ // TODO: Highlight the underlying type of the redeclaration.
+ Diag(EnumLoc, diag::err_enum_redeclare_type_mismatch)
+ << EnumUnderlyingTy << Prev->getIntegerType();
+ Diag(Prev->getLocation(), diag::note_previous_declaration)
+ << Prev->getIntegerTypeRange();
+ return true;
+ }
+ } else if (IsFixed && !Prev->isFixed() && EnumUnderlyingIsImplicit) {
+ ;
+ } else if (!IsFixed && Prev->isFixed() && !Prev->getIntegerTypeSourceInfo()) {
+ ;
+ } else if (IsFixed != Prev->isFixed()) {
+ Diag(EnumLoc, diag::err_enum_redeclare_fixed_mismatch)
+ << Prev->isFixed();
+ Diag(Prev->getLocation(), diag::note_previous_declaration);
+ return true;
+ }
+
+ return false;
+}
+
+/// \brief Get diagnostic %select index for tag kind for
+/// redeclaration diagnostic message.
+/// WARNING: Indexes apply to particular diagnostics only!
+///
+/// \returns diagnostic %select index.
+static unsigned getRedeclDiagFromTagKind(TagTypeKind Tag) {
+ switch (Tag) {
+ case TTK_Struct: return 0;
+ case TTK_Interface: return 1;
+ case TTK_Class: return 2;
+ default: llvm_unreachable("Invalid tag kind for redecl diagnostic!");
+ }
+}
+
+/// \brief Determine if tag kind is a class-key compatible with
+/// class for redeclaration (class, struct, or __interface).
+///
+/// \returns true iff the tag kind is compatible.
+static bool isClassCompatTagKind(TagTypeKind Tag)
+{
+ return Tag == TTK_Struct || Tag == TTK_Class || Tag == TTK_Interface;
+}
+
+/// \brief Determine whether a tag with a given kind is acceptable
+/// as a redeclaration of the given tag declaration.
+///
+/// \returns true if the new tag kind is acceptable, false otherwise.
+bool Sema::isAcceptableTagRedeclaration(const TagDecl *Previous,
+ TagTypeKind NewTag, bool isDefinition,
+ SourceLocation NewTagLoc,
+ const IdentifierInfo *Name) {
+ // C++ [dcl.type.elab]p3:
+ // The class-key or enum keyword present in the
+ // elaborated-type-specifier shall agree in kind with the
+ // declaration to which the name in the elaborated-type-specifier
+ // refers. This rule also applies to the form of
+ // elaborated-type-specifier that declares a class-name or
+ // friend class since it can be construed as referring to the
+ // definition of the class. Thus, in any
+ // elaborated-type-specifier, the enum keyword shall be used to
+ // refer to an enumeration (7.2), the union class-key shall be
+ // used to refer to a union (clause 9), and either the class or
+ // struct class-key shall be used to refer to a class (clause 9)
+ // declared using the class or struct class-key.
+ TagTypeKind OldTag = Previous->getTagKind();
+ if (!isDefinition || !isClassCompatTagKind(NewTag))
+ if (OldTag == NewTag)
+ return true;
+
+ if (isClassCompatTagKind(OldTag) && isClassCompatTagKind(NewTag)) {
+ // Warn about the struct/class tag mismatch.
+ bool isTemplate = false;
+ if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Previous))
+ isTemplate = Record->getDescribedClassTemplate();
+
+ if (!ActiveTemplateInstantiations.empty()) {
+ // In a template instantiation, do not offer fix-its for tag mismatches
+ // since they usually mess up the template instead of fixing the problem.
+ Diag(NewTagLoc, diag::warn_struct_class_tag_mismatch)
+ << getRedeclDiagFromTagKind(NewTag) << isTemplate << Name
+ << getRedeclDiagFromTagKind(OldTag);
+ return true;
+ }
+
+ if (isDefinition) {
+ // On definitions, check previous tags and issue a fix-it for each
+ // one that doesn't match the current tag.
+ if (Previous->getDefinition()) {
+ // Don't suggest fix-its for redefinitions.
+ return true;
+ }
+
+ bool previousMismatch = false;
+ for (auto I : Previous->redecls()) {
+ if (I->getTagKind() != NewTag) {
+ if (!previousMismatch) {
+ previousMismatch = true;
+ Diag(NewTagLoc, diag::warn_struct_class_previous_tag_mismatch)
+ << getRedeclDiagFromTagKind(NewTag) << isTemplate << Name
+ << getRedeclDiagFromTagKind(I->getTagKind());
+ }
+ Diag(I->getInnerLocStart(), diag::note_struct_class_suggestion)
+ << getRedeclDiagFromTagKind(NewTag)
+ << FixItHint::CreateReplacement(I->getInnerLocStart(),
+ TypeWithKeyword::getTagTypeKindName(NewTag));
+ }
+ }
+ return true;
+ }
+
+ // Check for a previous definition. If current tag and definition
+ // are same type, do nothing. If no definition, but disagree with
+ // with previous tag type, give a warning, but no fix-it.
+ const TagDecl *Redecl = Previous->getDefinition() ?
+ Previous->getDefinition() : Previous;
+ if (Redecl->getTagKind() == NewTag) {
+ return true;
+ }
+
+ Diag(NewTagLoc, diag::warn_struct_class_tag_mismatch)
+ << getRedeclDiagFromTagKind(NewTag) << isTemplate << Name
+ << getRedeclDiagFromTagKind(OldTag);
+ Diag(Redecl->getLocation(), diag::note_previous_use);
+
+ // If there is a previous definition, suggest a fix-it.
+ if (Previous->getDefinition()) {
+ Diag(NewTagLoc, diag::note_struct_class_suggestion)
+ << getRedeclDiagFromTagKind(Redecl->getTagKind())
+ << FixItHint::CreateReplacement(SourceRange(NewTagLoc),
+ TypeWithKeyword::getTagTypeKindName(Redecl->getTagKind()));
+ }
+
+ return true;
+ }
+ return false;
+}
+
+/// Add a minimal nested name specifier fixit hint to allow lookup of a tag name
+/// from an outer enclosing namespace or file scope inside a friend declaration.
+/// This should provide the commented out code in the following snippet:
+/// namespace N {
+/// struct X;
+/// namespace M {
+/// struct Y { friend struct /*N::*/ X; };
+/// }
+/// }
+static FixItHint createFriendTagNNSFixIt(Sema &SemaRef, NamedDecl *ND, Scope *S,
+ SourceLocation NameLoc) {
+ // While the decl is in a namespace, do repeated lookup of that name and see
+ // if we get the same namespace back. If we do not, continue until
+ // translation unit scope, at which point we have a fully qualified NNS.
+ SmallVector<IdentifierInfo *, 4> Namespaces;
+ DeclContext *DC = ND->getDeclContext()->getRedeclContext();
+ for (; !DC->isTranslationUnit(); DC = DC->getParent()) {
+ // This tag should be declared in a namespace, which can only be enclosed by
+ // other namespaces. Bail if there's an anonymous namespace in the chain.
+ NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(DC);
+ if (!Namespace || Namespace->isAnonymousNamespace())
+ return FixItHint();
+ IdentifierInfo *II = Namespace->getIdentifier();
+ Namespaces.push_back(II);
+ NamedDecl *Lookup = SemaRef.LookupSingleName(
+ S, II, NameLoc, Sema::LookupNestedNameSpecifierName);
+ if (Lookup == Namespace)
+ break;
+ }
+
+ // Once we have all the namespaces, reverse them to go outermost first, and
+ // build an NNS.
+ SmallString<64> Insertion;
+ llvm::raw_svector_ostream OS(Insertion);
+ if (DC->isTranslationUnit())
+ OS << "::";
+ std::reverse(Namespaces.begin(), Namespaces.end());
+ for (auto *II : Namespaces)
+ OS << II->getName() << "::";
+ return FixItHint::CreateInsertion(NameLoc, Insertion);
+}
+
+/// \brief Determine whether a tag originally declared in context \p OldDC can
+/// be redeclared with an unqualfied name in \p NewDC (assuming name lookup
+/// found a declaration in \p OldDC as a previous decl, perhaps through a
+/// using-declaration).
+static bool isAcceptableTagRedeclContext(Sema &S, DeclContext *OldDC,
+ DeclContext *NewDC) {
+ OldDC = OldDC->getRedeclContext();
+ NewDC = NewDC->getRedeclContext();
+
+ if (OldDC->Equals(NewDC))
+ return true;
+
+ // In MSVC mode, we allow a redeclaration if the contexts are related (either
+ // encloses the other).
+ if (S.getLangOpts().MSVCCompat &&
+ (OldDC->Encloses(NewDC) || NewDC->Encloses(OldDC)))
+ return true;
+
+ return false;
+}
+
+/// \brief This is invoked when we see 'struct foo' or 'struct {'. In the
+/// former case, Name will be non-null. In the later case, Name will be null.
+/// TagSpec indicates what kind of tag this is. TUK indicates whether this is a
+/// reference/declaration/definition of a tag.
+///
+/// \param IsTypeSpecifier \c true if this is a type-specifier (or
+/// trailing-type-specifier) other than one in an alias-declaration.
+///
+/// \param SkipBody If non-null, will be set to indicate if the caller should
+/// skip the definition of this tag and treat it as if it were a declaration.
+Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
+ SourceLocation KWLoc, CXXScopeSpec &SS,
+ IdentifierInfo *Name, SourceLocation NameLoc,
+ AttributeList *Attr, AccessSpecifier AS,
+ SourceLocation ModulePrivateLoc,
+ MultiTemplateParamsArg TemplateParameterLists,
+ bool &OwnedDecl, bool &IsDependent,
+ SourceLocation ScopedEnumKWLoc,
+ bool ScopedEnumUsesClassTag,
+ TypeResult UnderlyingType,
+ bool IsTypeSpecifier, SkipBodyInfo *SkipBody) {
+ // If this is not a definition, it must have a name.
+ IdentifierInfo *OrigName = Name;
+ assert((Name != nullptr || TUK == TUK_Definition) &&
+ "Nameless record must be a definition!");
+ assert(TemplateParameterLists.size() == 0 || TUK != TUK_Reference);
+
+ OwnedDecl = false;
+ TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
+ bool ScopedEnum = ScopedEnumKWLoc.isValid();
+
+ // FIXME: Check explicit specializations more carefully.
+ bool isExplicitSpecialization = false;
+ bool Invalid = false;
+
+ // We only need to do this matching if we have template parameters
+ // or a scope specifier, which also conveniently avoids this work
+ // for non-C++ cases.
+ if (TemplateParameterLists.size() > 0 ||
+ (SS.isNotEmpty() && TUK != TUK_Reference)) {
+ if (TemplateParameterList *TemplateParams =
+ MatchTemplateParametersToScopeSpecifier(
+ KWLoc, NameLoc, SS, nullptr, TemplateParameterLists,
+ TUK == TUK_Friend, isExplicitSpecialization, Invalid)) {
+ if (Kind == TTK_Enum) {
+ Diag(KWLoc, diag::err_enum_template);
+ return nullptr;
+ }
+
+ if (TemplateParams->size() > 0) {
+ // This is a declaration or definition of a class template (which may
+ // be a member of another template).
+
+ if (Invalid)
+ return nullptr;
+
+ OwnedDecl = false;
+ DeclResult Result = CheckClassTemplate(S, TagSpec, TUK, KWLoc,
+ SS, Name, NameLoc, Attr,
+ TemplateParams, AS,
+ ModulePrivateLoc,
+ /*FriendLoc*/SourceLocation(),
+ TemplateParameterLists.size()-1,
+ TemplateParameterLists.data(),
+ SkipBody);
+ return Result.get();
+ } else {
+ // The "template<>" header is extraneous.
+ Diag(TemplateParams->getTemplateLoc(), diag::err_template_tag_noparams)
+ << TypeWithKeyword::getTagTypeKindName(Kind) << Name;
+ isExplicitSpecialization = true;
+ }
+ }
+ }
+
+ // Figure out the underlying type if this a enum declaration. We need to do
+ // this early, because it's needed to detect if this is an incompatible
+ // redeclaration.
+ llvm::PointerUnion<const Type*, TypeSourceInfo*> EnumUnderlying;
+ bool EnumUnderlyingIsImplicit = false;
+
+ if (Kind == TTK_Enum) {
+ if (UnderlyingType.isInvalid() || (!UnderlyingType.get() && ScopedEnum))
+ // No underlying type explicitly specified, or we failed to parse the
+ // type, default to int.
+ EnumUnderlying = Context.IntTy.getTypePtr();
+ else if (UnderlyingType.get()) {
+ // C++0x 7.2p2: The type-specifier-seq of an enum-base shall name an
+ // integral type; any cv-qualification is ignored.
+ TypeSourceInfo *TI = nullptr;
+ GetTypeFromParser(UnderlyingType.get(), &TI);
+ EnumUnderlying = TI;
+
+ if (CheckEnumUnderlyingType(TI))
+ // Recover by falling back to int.
+ EnumUnderlying = Context.IntTy.getTypePtr();
+
+ if (DiagnoseUnexpandedParameterPack(TI->getTypeLoc().getBeginLoc(), TI,
+ UPPC_FixedUnderlyingType))
+ EnumUnderlying = Context.IntTy.getTypePtr();
+
+ } else if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+ if (getLangOpts().MSVCCompat || TUK == TUK_Definition) {
+ // Microsoft enums are always of int type.
+ EnumUnderlying = Context.IntTy.getTypePtr();
+ EnumUnderlyingIsImplicit = true;
+ }
+ }
+ }
+
+ DeclContext *SearchDC = CurContext;
+ DeclContext *DC = CurContext;
+ bool isStdBadAlloc = false;
+
+ RedeclarationKind Redecl = ForRedeclaration;
+ if (TUK == TUK_Friend || TUK == TUK_Reference)
+ Redecl = NotForRedeclaration;
+
+ LookupResult Previous(*this, Name, NameLoc, LookupTagName, Redecl);
+ if (Name && SS.isNotEmpty()) {
+ // We have a nested-name tag ('struct foo::bar').
+
+ // Check for invalid 'foo::'.
+ if (SS.isInvalid()) {
+ Name = nullptr;
+ goto CreateNewDecl;
+ }
+
+ // If this is a friend or a reference to a class in a dependent
+ // context, don't try to make a decl for it.
+ if (TUK == TUK_Friend || TUK == TUK_Reference) {
+ DC = computeDeclContext(SS, false);
+ if (!DC) {
+ IsDependent = true;
+ return nullptr;
+ }
+ } else {
+ DC = computeDeclContext(SS, true);
+ if (!DC) {
+ Diag(SS.getRange().getBegin(), diag::err_dependent_nested_name_spec)
+ << SS.getRange();
+ return nullptr;
+ }
+ }
+
+ if (RequireCompleteDeclContext(SS, DC))
+ return nullptr;
+
+ SearchDC = DC;
+ // Look-up name inside 'foo::'.
+ LookupQualifiedName(Previous, DC);
+
+ if (Previous.isAmbiguous())
+ return nullptr;
+
+ if (Previous.empty()) {
+ // Name lookup did not find anything. However, if the
+ // nested-name-specifier refers to the current instantiation,
+ // and that current instantiation has any dependent base
+ // classes, we might find something at instantiation time: treat
+ // this as a dependent elaborated-type-specifier.
+ // But this only makes any sense for reference-like lookups.
+ if (Previous.wasNotFoundInCurrentInstantiation() &&
+ (TUK == TUK_Reference || TUK == TUK_Friend)) {
+ IsDependent = true;
+ return nullptr;
+ }
+
+ // A tag 'foo::bar' must already exist.
+ Diag(NameLoc, diag::err_not_tag_in_scope)
+ << Kind << Name << DC << SS.getRange();
+ Name = nullptr;
+ Invalid = true;
+ goto CreateNewDecl;
+ }
+ } else if (Name) {
+ // C++14 [class.mem]p14:
+ // If T is the name of a class, then each of the following shall have a
+ // name different from T:
+ // -- every member of class T that is itself a type
+ if (TUK != TUK_Reference && TUK != TUK_Friend &&
+ DiagnoseClassNameShadow(SearchDC, DeclarationNameInfo(Name, NameLoc)))
+ return nullptr;
+
+ // If this is a named struct, check to see if there was a previous forward
+ // declaration or definition.
+ // FIXME: We're looking into outer scopes here, even when we
+ // shouldn't be. Doing so can result in ambiguities that we
+ // shouldn't be diagnosing.
+ LookupName(Previous, S);
+
+ // When declaring or defining a tag, ignore ambiguities introduced
+ // by types using'ed into this scope.
+ if (Previous.isAmbiguous() &&
+ (TUK == TUK_Definition || TUK == TUK_Declaration)) {
+ LookupResult::Filter F = Previous.makeFilter();
+ while (F.hasNext()) {
+ NamedDecl *ND = F.next();
+ if (ND->getDeclContext()->getRedeclContext() != SearchDC)
+ F.erase();
+ }
+ F.done();
+ }
+
+ // C++11 [namespace.memdef]p3:
+ // If the name in a friend declaration is neither qualified nor
+ // a template-id and the declaration is a function or an
+ // elaborated-type-specifier, the lookup to determine whether
+ // the entity has been previously declared shall not consider
+ // any scopes outside the innermost enclosing namespace.
+ //
+ // MSVC doesn't implement the above rule for types, so a friend tag
+ // declaration may be a redeclaration of a type declared in an enclosing
+ // scope. They do implement this rule for friend functions.
+ //
+ // Does it matter that this should be by scope instead of by
+ // semantic context?
+ if (!Previous.empty() && TUK == TUK_Friend) {
+ DeclContext *EnclosingNS = SearchDC->getEnclosingNamespaceContext();
+ LookupResult::Filter F = Previous.makeFilter();
+ bool FriendSawTagOutsideEnclosingNamespace = false;
+ while (F.hasNext()) {
+ NamedDecl *ND = F.next();
+ DeclContext *DC = ND->getDeclContext()->getRedeclContext();
+ if (DC->isFileContext() &&
+ !EnclosingNS->Encloses(ND->getDeclContext())) {
+ if (getLangOpts().MSVCCompat)
+ FriendSawTagOutsideEnclosingNamespace = true;
+ else
+ F.erase();
+ }
+ }
+ F.done();
+
+ // Diagnose this MSVC extension in the easy case where lookup would have
+ // unambiguously found something outside the enclosing namespace.
+ if (Previous.isSingleResult() && FriendSawTagOutsideEnclosingNamespace) {
+ NamedDecl *ND = Previous.getFoundDecl();
+ Diag(NameLoc, diag::ext_friend_tag_redecl_outside_namespace)
+ << createFriendTagNNSFixIt(*this, ND, S, NameLoc);
+ }
+ }
+
+ // Note: there used to be some attempt at recovery here.
+ if (Previous.isAmbiguous())
+ return nullptr;
+
+ if (!getLangOpts().CPlusPlus && TUK != TUK_Reference) {
+ // FIXME: This makes sure that we ignore the contexts associated
+ // with C structs, unions, and enums when looking for a matching
+ // tag declaration or definition. See the similar lookup tweak
+ // in Sema::LookupName; is there a better way to deal with this?
+ while (isa<RecordDecl>(SearchDC) || isa<EnumDecl>(SearchDC))
+ SearchDC = SearchDC->getParent();
+ }
+ }
+
+ if (Previous.isSingleResult() &&
+ Previous.getFoundDecl()->isTemplateParameter()) {
+ // Maybe we will complain about the shadowed template parameter.
+ DiagnoseTemplateParameterShadow(NameLoc, Previous.getFoundDecl());
+ // Just pretend that we didn't see the previous declaration.
+ Previous.clear();
+ }
+
+ if (getLangOpts().CPlusPlus && Name && DC && StdNamespace &&
+ DC->Equals(getStdNamespace()) && Name->isStr("bad_alloc")) {
+ // This is a declaration of or a reference to "std::bad_alloc".
+ isStdBadAlloc = true;
+
+ if (Previous.empty() && StdBadAlloc) {
+ // std::bad_alloc has been implicitly declared (but made invisible to
+ // name lookup). Fill in this implicit declaration as the previous
+ // declaration, so that the declarations get chained appropriately.
+ Previous.addDecl(getStdBadAlloc());
+ }
+ }
+
+ // If we didn't find a previous declaration, and this is a reference
+ // (or friend reference), move to the correct scope. In C++, we
+ // also need to do a redeclaration lookup there, just in case
+ // there's a shadow friend decl.
+ if (Name && Previous.empty() &&
+ (TUK == TUK_Reference || TUK == TUK_Friend)) {
+ if (Invalid) goto CreateNewDecl;
+ assert(SS.isEmpty());
+
+ if (TUK == TUK_Reference) {
+ // C++ [basic.scope.pdecl]p5:
+ // -- for an elaborated-type-specifier of the form
+ //
+ // class-key identifier
+ //
+ // if the elaborated-type-specifier is used in the
+ // decl-specifier-seq or parameter-declaration-clause of a
+ // function defined in namespace scope, the identifier is
+ // declared as a class-name in the namespace that contains
+ // the declaration; otherwise, except as a friend
+ // declaration, the identifier is declared in the smallest
+ // non-class, non-function-prototype scope that contains the
+ // declaration.
+ //
+ // C99 6.7.2.3p8 has a similar (but not identical!) provision for
+ // C structs and unions.
+ //
+ // It is an error in C++ to declare (rather than define) an enum
+ // type, including via an elaborated type specifier. We'll
+ // diagnose that later; for now, declare the enum in the same
+ // scope as we would have picked for any other tag type.
+ //
+ // GNU C also supports this behavior as part of its incomplete
+ // enum types extension, while GNU C++ does not.
+ //
+ // Find the context where we'll be declaring the tag.
+ // FIXME: We would like to maintain the current DeclContext as the
+ // lexical context,
+ while (!SearchDC->isFileContext() && !SearchDC->isFunctionOrMethod())
+ SearchDC = SearchDC->getParent();
+
+ // Find the scope where we'll be declaring the tag.
+ while (S->isClassScope() ||
+ (getLangOpts().CPlusPlus &&
+ S->isFunctionPrototypeScope()) ||
+ ((S->getFlags() & Scope::DeclScope) == 0) ||
+ (S->getEntity() && S->getEntity()->isTransparentContext()))
+ S = S->getParent();
+ } else {
+ assert(TUK == TUK_Friend);
+ // C++ [namespace.memdef]p3:
+ // If a friend declaration in a non-local class first declares a
+ // class or function, the friend class or function is a member of
+ // the innermost enclosing namespace.
+ SearchDC = SearchDC->getEnclosingNamespaceContext();
+ }
+
+ // In C++, we need to do a redeclaration lookup to properly
+ // diagnose some problems.
+ // FIXME: redeclaration lookup is also used (with and without C++) to find a
+ // hidden declaration so that we don't get ambiguity errors when using a
+ // type declared by an elaborated-type-specifier. In C that is not correct
+ // and we should instead merge compatible types found by lookup.
+ if (getLangOpts().CPlusPlus) {
+ Previous.setRedeclarationKind(ForRedeclaration);
+ LookupQualifiedName(Previous, SearchDC);
+ } else {
+ Previous.setRedeclarationKind(ForRedeclaration);
+ LookupName(Previous, S);
+ }
+ }
+
+ // If we have a known previous declaration to use, then use it.
+ if (Previous.empty() && SkipBody && SkipBody->Previous)
+ Previous.addDecl(SkipBody->Previous);
+
+ if (!Previous.empty()) {
+ NamedDecl *PrevDecl = Previous.getFoundDecl();
+ NamedDecl *DirectPrevDecl = Previous.getRepresentativeDecl();
+
+ // It's okay to have a tag decl in the same scope as a typedef
+ // which hides a tag decl in the same scope. Finding this
+ // insanity with a redeclaration lookup can only actually happen
+ // in C++.
+ //
+ // This is also okay for elaborated-type-specifiers, which is
+ // technically forbidden by the current standard but which is
+ // okay according to the likely resolution of an open issue;
+ // see http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#407
+ if (getLangOpts().CPlusPlus) {
+ if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(PrevDecl)) {
+ if (const TagType *TT = TD->getUnderlyingType()->getAs<TagType>()) {
+ TagDecl *Tag = TT->getDecl();
+ if (Tag->getDeclName() == Name &&
+ Tag->getDeclContext()->getRedeclContext()
+ ->Equals(TD->getDeclContext()->getRedeclContext())) {
+ PrevDecl = Tag;
+ Previous.clear();
+ Previous.addDecl(Tag);
+ Previous.resolveKind();
+ }
+ }
+ }
+ }
+
+ // If this is a redeclaration of a using shadow declaration, it must
+ // declare a tag in the same context. In MSVC mode, we allow a
+ // redefinition if either context is within the other.
+ if (auto *Shadow = dyn_cast<UsingShadowDecl>(DirectPrevDecl)) {
+ auto *OldTag = dyn_cast<TagDecl>(PrevDecl);
+ if (SS.isEmpty() && TUK != TUK_Reference && TUK != TUK_Friend &&
+ isDeclInScope(Shadow, SearchDC, S, isExplicitSpecialization) &&
+ !(OldTag && isAcceptableTagRedeclContext(
+ *this, OldTag->getDeclContext(), SearchDC))) {
+ Diag(KWLoc, diag::err_using_decl_conflict_reverse);
+ Diag(Shadow->getTargetDecl()->getLocation(),
+ diag::note_using_decl_target);
+ Diag(Shadow->getUsingDecl()->getLocation(), diag::note_using_decl)
+ << 0;
+ // Recover by ignoring the old declaration.
+ Previous.clear();
+ goto CreateNewDecl;
+ }
+ }
+
+ if (TagDecl *PrevTagDecl = dyn_cast<TagDecl>(PrevDecl)) {
+ // If this is a use of a previous tag, or if the tag is already declared
+ // in the same scope (so that the definition/declaration completes or
+ // rementions the tag), reuse the decl.
+ if (TUK == TUK_Reference || TUK == TUK_Friend ||
+ isDeclInScope(DirectPrevDecl, SearchDC, S,
+ SS.isNotEmpty() || isExplicitSpecialization)) {
+ // Make sure that this wasn't declared as an enum and now used as a
+ // struct or something similar.
+ if (!isAcceptableTagRedeclaration(PrevTagDecl, Kind,
+ TUK == TUK_Definition, KWLoc,
+ Name)) {
+ bool SafeToContinue
+ = (PrevTagDecl->getTagKind() != TTK_Enum &&
+ Kind != TTK_Enum);
+ if (SafeToContinue)
+ Diag(KWLoc, diag::err_use_with_wrong_tag)
+ << Name
+ << FixItHint::CreateReplacement(SourceRange(KWLoc),
+ PrevTagDecl->getKindName());
+ else
+ Diag(KWLoc, diag::err_use_with_wrong_tag) << Name;
+ Diag(PrevTagDecl->getLocation(), diag::note_previous_use);
+
+ if (SafeToContinue)
+ Kind = PrevTagDecl->getTagKind();
+ else {
+ // Recover by making this an anonymous redefinition.
+ Name = nullptr;
+ Previous.clear();
+ Invalid = true;
+ }
+ }
+
+ if (Kind == TTK_Enum && PrevTagDecl->getTagKind() == TTK_Enum) {
+ const EnumDecl *PrevEnum = cast<EnumDecl>(PrevTagDecl);
+
+ // If this is an elaborated-type-specifier for a scoped enumeration,
+ // the 'class' keyword is not necessary and not permitted.
+ if (TUK == TUK_Reference || TUK == TUK_Friend) {
+ if (ScopedEnum)
+ Diag(ScopedEnumKWLoc, diag::err_enum_class_reference)
+ << PrevEnum->isScoped()
+ << FixItHint::CreateRemoval(ScopedEnumKWLoc);
+ return PrevTagDecl;
+ }
+
+ QualType EnumUnderlyingTy;
+ if (TypeSourceInfo *TI = EnumUnderlying.dyn_cast<TypeSourceInfo*>())
+ EnumUnderlyingTy = TI->getType().getUnqualifiedType();
+ else if (const Type *T = EnumUnderlying.dyn_cast<const Type*>())
+ EnumUnderlyingTy = QualType(T, 0);
+
+ // All conflicts with previous declarations are recovered by
+ // returning the previous declaration, unless this is a definition,
+ // in which case we want the caller to bail out.
+ if (CheckEnumRedeclaration(NameLoc.isValid() ? NameLoc : KWLoc,
+ ScopedEnum, EnumUnderlyingTy,
+ EnumUnderlyingIsImplicit, PrevEnum))
+ return TUK == TUK_Declaration ? PrevTagDecl : nullptr;
+ }
+
+ // C++11 [class.mem]p1:
+ // A member shall not be declared twice in the member-specification,
+ // except that a nested class or member class template can be declared
+ // and then later defined.
+ if (TUK == TUK_Declaration && PrevDecl->isCXXClassMember() &&
+ S->isDeclScope(PrevDecl)) {
+ Diag(NameLoc, diag::ext_member_redeclared);
+ Diag(PrevTagDecl->getLocation(), diag::note_previous_declaration);
+ }
+
+ if (!Invalid) {
+ // If this is a use, just return the declaration we found, unless
+ // we have attributes.
+ if (TUK == TUK_Reference || TUK == TUK_Friend) {
+ if (Attr) {
+ // FIXME: Diagnose these attributes. For now, we create a new
+ // declaration to hold them.
+ } else if (TUK == TUK_Reference &&
+ (PrevTagDecl->getFriendObjectKind() ==
+ Decl::FOK_Undeclared ||
+ getOwningModule(PrevDecl) !=
+ PP.getModuleContainingLocation(KWLoc)) &&
+ SS.isEmpty()) {
+ // This declaration is a reference to an existing entity, but
+ // has different visibility from that entity: it either makes
+ // a friend visible or it makes a type visible in a new module.
+ // In either case, create a new declaration. We only do this if
+ // the declaration would have meant the same thing if no prior
+ // declaration were found, that is, if it was found in the same
+ // scope where we would have injected a declaration.
+ DeclContext *InjectedDC = CurContext;
+ while (!InjectedDC->isFileContext() &&
+ !InjectedDC->isFunctionOrMethod())
+ InjectedDC = InjectedDC->getParent();
+ if (!InjectedDC->getRedeclContext()->Equals(
+ PrevDecl->getDeclContext()->getRedeclContext()))
+ return PrevTagDecl;
+ // This is in the injected scope, create a new declaration.
+ } else {
+ return PrevTagDecl;
+ }
+ }
+
+ // Diagnose attempts to redefine a tag.
+ if (TUK == TUK_Definition) {
+ if (NamedDecl *Def = PrevTagDecl->getDefinition()) {
+ // If we're defining a specialization and the previous definition
+ // is from an implicit instantiation, don't emit an error
+ // here; we'll catch this in the general case below.
+ bool IsExplicitSpecializationAfterInstantiation = false;
+ if (isExplicitSpecialization) {
+ if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Def))
+ IsExplicitSpecializationAfterInstantiation =
+ RD->getTemplateSpecializationKind() !=
+ TSK_ExplicitSpecialization;
+ else if (EnumDecl *ED = dyn_cast<EnumDecl>(Def))
+ IsExplicitSpecializationAfterInstantiation =
+ ED->getTemplateSpecializationKind() !=
+ TSK_ExplicitSpecialization;
+ }
+
+ NamedDecl *Hidden = nullptr;
+ if (SkipBody && getLangOpts().CPlusPlus &&
+ !hasVisibleDefinition(Def, &Hidden)) {
+ // There is a definition of this tag, but it is not visible. We
+ // explicitly make use of C++'s one definition rule here, and
+ // assume that this definition is identical to the hidden one
+ // we already have. Make the existing definition visible and
+ // use it in place of this one.
+ SkipBody->ShouldSkip = true;
+ makeMergedDefinitionVisible(Hidden, KWLoc);
+ return Def;
+ } else if (!IsExplicitSpecializationAfterInstantiation) {
+ // A redeclaration in function prototype scope in C isn't
+ // visible elsewhere, so merely issue a warning.
+ if (!getLangOpts().CPlusPlus && S->containedInPrototypeScope())
+ Diag(NameLoc, diag::warn_redefinition_in_param_list) << Name;
+ else
+ Diag(NameLoc, diag::err_redefinition) << Name;
+ Diag(Def->getLocation(), diag::note_previous_definition);
+ // If this is a redefinition, recover by making this
+ // struct be anonymous, which will make any later
+ // references get the previous definition.
+ Name = nullptr;
+ Previous.clear();
+ Invalid = true;
+ }
+ } else {
+ // If the type is currently being defined, complain
+ // about a nested redefinition.
+ auto *TD = Context.getTagDeclType(PrevTagDecl)->getAsTagDecl();
+ if (TD->isBeingDefined()) {
+ Diag(NameLoc, diag::err_nested_redefinition) << Name;
+ Diag(PrevTagDecl->getLocation(),
+ diag::note_previous_definition);
+ Name = nullptr;
+ Previous.clear();
+ Invalid = true;
+ }
+ }
+
+ // Okay, this is definition of a previously declared or referenced
+ // tag. We're going to create a new Decl for it.
+ }
+
+ // Okay, we're going to make a redeclaration. If this is some kind
+ // of reference, make sure we build the redeclaration in the same DC
+ // as the original, and ignore the current access specifier.
+ if (TUK == TUK_Friend || TUK == TUK_Reference) {
+ SearchDC = PrevTagDecl->getDeclContext();
+ AS = AS_none;
+ }
+ }
+ // If we get here we have (another) forward declaration or we
+ // have a definition. Just create a new decl.
+
+ } else {
+ // If we get here, this is a definition of a new tag type in a nested
+ // scope, e.g. "struct foo; void bar() { struct foo; }", just create a
+ // new decl/type. We set PrevDecl to NULL so that the entities
+ // have distinct types.
+ Previous.clear();
+ }
+ // If we get here, we're going to create a new Decl. If PrevDecl
+ // is non-NULL, it's a definition of the tag declared by
+ // PrevDecl. If it's NULL, we have a new definition.
+
+
+ // Otherwise, PrevDecl is not a tag, but was found with tag
+ // lookup. This is only actually possible in C++, where a few
+ // things like templates still live in the tag namespace.
+ } else {
+ // Use a better diagnostic if an elaborated-type-specifier
+ // found the wrong kind of type on the first
+ // (non-redeclaration) lookup.
+ if ((TUK == TUK_Reference || TUK == TUK_Friend) &&
+ !Previous.isForRedeclaration()) {
+ unsigned Kind = 0;
+ if (isa<TypedefDecl>(PrevDecl)) Kind = 1;
+ else if (isa<TypeAliasDecl>(PrevDecl)) Kind = 2;
+ else if (isa<ClassTemplateDecl>(PrevDecl)) Kind = 3;
+ Diag(NameLoc, diag::err_tag_reference_non_tag) << Kind;
+ Diag(PrevDecl->getLocation(), diag::note_declared_at);
+ Invalid = true;
+
+ // Otherwise, only diagnose if the declaration is in scope.
+ } else if (!isDeclInScope(DirectPrevDecl, SearchDC, S,
+ SS.isNotEmpty() || isExplicitSpecialization)) {
+ // do nothing
+
+ // Diagnose implicit declarations introduced by elaborated types.
+ } else if (TUK == TUK_Reference || TUK == TUK_Friend) {
+ unsigned Kind = 0;
+ if (isa<TypedefDecl>(PrevDecl)) Kind = 1;
+ else if (isa<TypeAliasDecl>(PrevDecl)) Kind = 2;
+ else if (isa<ClassTemplateDecl>(PrevDecl)) Kind = 3;
+ Diag(NameLoc, diag::err_tag_reference_conflict) << Kind;
+ Diag(PrevDecl->getLocation(), diag::note_previous_decl) << PrevDecl;
+ Invalid = true;
+
+ // Otherwise it's a declaration. Call out a particularly common
+ // case here.
+ } else if (TypedefNameDecl *TND = dyn_cast<TypedefNameDecl>(PrevDecl)) {
+ unsigned Kind = 0;
+ if (isa<TypeAliasDecl>(PrevDecl)) Kind = 1;
+ Diag(NameLoc, diag::err_tag_definition_of_typedef)
+ << Name << Kind << TND->getUnderlyingType();
+ Diag(PrevDecl->getLocation(), diag::note_previous_decl) << PrevDecl;
+ Invalid = true;
+
+ // Otherwise, diagnose.
+ } else {
+ // The tag name clashes with something else in the target scope,
+ // issue an error and recover by making this tag be anonymous.
+ Diag(NameLoc, diag::err_redefinition_different_kind) << Name;
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ Name = nullptr;
+ Invalid = true;
+ }
+
+ // The existing declaration isn't relevant to us; we're in a
+ // new scope, so clear out the previous declaration.
+ Previous.clear();
+ }
+ }
+
+CreateNewDecl:
+
+ TagDecl *PrevDecl = nullptr;
+ if (Previous.isSingleResult())
+ PrevDecl = cast<TagDecl>(Previous.getFoundDecl());
+
+ // If there is an identifier, use the location of the identifier as the
+ // location of the decl, otherwise use the location of the struct/union
+ // keyword.
+ SourceLocation Loc = NameLoc.isValid() ? NameLoc : KWLoc;
+
+ // Otherwise, create a new declaration. If there is a previous
+ // declaration of the same entity, the two will be linked via
+ // PrevDecl.
+ TagDecl *New;
+
+ bool IsForwardReference = false;
+ if (Kind == TTK_Enum) {
+ // FIXME: Tag decls should be chained to any simultaneous vardecls, e.g.:
+ // enum X { A, B, C } D; D should chain to X.
+ New = EnumDecl::Create(Context, SearchDC, KWLoc, Loc, Name,
+ cast_or_null<EnumDecl>(PrevDecl), ScopedEnum,
+ ScopedEnumUsesClassTag, !EnumUnderlying.isNull());
+ // If this is an undefined enum, warn.
+ if (TUK != TUK_Definition && !Invalid) {
+ TagDecl *Def;
+ if ((getLangOpts().CPlusPlus11 || getLangOpts().ObjC2) &&
+ cast<EnumDecl>(New)->isFixed()) {
+ // C++0x: 7.2p2: opaque-enum-declaration.
+ // Conflicts are diagnosed above. Do nothing.
+ }
+ else if (PrevDecl && (Def = cast<EnumDecl>(PrevDecl)->getDefinition())) {
+ Diag(Loc, diag::ext_forward_ref_enum_def)
+ << New;
+ Diag(Def->getLocation(), diag::note_previous_definition);
+ } else {
+ unsigned DiagID = diag::ext_forward_ref_enum;
+ if (getLangOpts().MSVCCompat)
+ DiagID = diag::ext_ms_forward_ref_enum;
+ else if (getLangOpts().CPlusPlus)
+ DiagID = diag::err_forward_ref_enum;
+ Diag(Loc, DiagID);
+
+ // If this is a forward-declared reference to an enumeration, make a
+ // note of it; we won't actually be introducing the declaration into
+ // the declaration context.
+ if (TUK == TUK_Reference)
+ IsForwardReference = true;
+ }
+ }
+
+ if (EnumUnderlying) {
+ EnumDecl *ED = cast<EnumDecl>(New);
+ if (TypeSourceInfo *TI = EnumUnderlying.dyn_cast<TypeSourceInfo*>())
+ ED->setIntegerTypeSourceInfo(TI);
+ else
+ ED->setIntegerType(QualType(EnumUnderlying.get<const Type*>(), 0));
+ ED->setPromotionType(ED->getIntegerType());
+ }
+
+ } else {
+ // struct/union/class
+
+ // FIXME: Tag decls should be chained to any simultaneous vardecls, e.g.:
+ // struct X { int A; } D; D should chain to X.
+ if (getLangOpts().CPlusPlus) {
+ // FIXME: Look for a way to use RecordDecl for simple structs.
+ New = CXXRecordDecl::Create(Context, Kind, SearchDC, KWLoc, Loc, Name,
+ cast_or_null<CXXRecordDecl>(PrevDecl));
+
+ if (isStdBadAlloc && (!StdBadAlloc || getStdBadAlloc()->isImplicit()))
+ StdBadAlloc = cast<CXXRecordDecl>(New);
+ } else
+ New = RecordDecl::Create(Context, Kind, SearchDC, KWLoc, Loc, Name,
+ cast_or_null<RecordDecl>(PrevDecl));
+ }
+
+ // C++11 [dcl.type]p3:
+ // A type-specifier-seq shall not define a class or enumeration [...].
+ if (getLangOpts().CPlusPlus && IsTypeSpecifier && TUK == TUK_Definition) {
+ Diag(New->getLocation(), diag::err_type_defined_in_type_specifier)
+ << Context.getTagDeclType(New);
+ Invalid = true;
+ }
+
+ // Maybe add qualifier info.
+ if (SS.isNotEmpty()) {
+ if (SS.isSet()) {
+ // If this is either a declaration or a definition, check the
+ // nested-name-specifier against the current context. We don't do this
+ // for explicit specializations, because they have similar checking
+ // (with more specific diagnostics) in the call to
+ // CheckMemberSpecialization, below.
+ if (!isExplicitSpecialization &&
+ (TUK == TUK_Definition || TUK == TUK_Declaration) &&
+ diagnoseQualifiedDeclaration(SS, DC, OrigName, Loc))
+ Invalid = true;
+
+ New->setQualifierInfo(SS.getWithLocInContext(Context));
+ if (TemplateParameterLists.size() > 0) {
+ New->setTemplateParameterListsInfo(Context, TemplateParameterLists);
+ }
+ }
+ else
+ Invalid = true;
+ }
+
+ if (RecordDecl *RD = dyn_cast<RecordDecl>(New)) {
+ // Add alignment attributes if necessary; these attributes are checked when
+ // the ASTContext lays out the structure.
+ //
+ // It is important for implementing the correct semantics that this
+ // happen here (in act on tag decl). The #pragma pack stack is
+ // maintained as a result of parser callbacks which can occur at
+ // many points during the parsing of a struct declaration (because
+ // the #pragma tokens are effectively skipped over during the
+ // parsing of the struct).
+ if (TUK == TUK_Definition) {
+ AddAlignmentAttributesForRecord(RD);
+ AddMsStructLayoutForRecord(RD);
+ }
+ }
+
+ if (ModulePrivateLoc.isValid()) {
+ if (isExplicitSpecialization)
+ Diag(New->getLocation(), diag::err_module_private_specialization)
+ << 2
+ << FixItHint::CreateRemoval(ModulePrivateLoc);
+ // __module_private__ does not apply to local classes. However, we only
+ // diagnose this as an error when the declaration specifiers are
+ // freestanding. Here, we just ignore the __module_private__.
+ else if (!SearchDC->isFunctionOrMethod())
+ New->setModulePrivate();
+ }
+
+ // If this is a specialization of a member class (of a class template),
+ // check the specialization.
+ if (isExplicitSpecialization && CheckMemberSpecialization(New, Previous))
+ Invalid = true;
+
+ // If we're declaring or defining a tag in function prototype scope in C,
+ // note that this type can only be used within the function and add it to
+ // the list of decls to inject into the function definition scope.
+ if ((Name || Kind == TTK_Enum) &&
+ getNonFieldDeclScope(S)->isFunctionPrototypeScope()) {
+ if (getLangOpts().CPlusPlus) {
+ // C++ [dcl.fct]p6:
+ // Types shall not be defined in return or parameter types.
+ if (TUK == TUK_Definition && !IsTypeSpecifier) {
+ Diag(Loc, diag::err_type_defined_in_param_type)
+ << Name;
+ Invalid = true;
+ }
+ } else {
+ Diag(Loc, diag::warn_decl_in_param_list) << Context.getTagDeclType(New);
+ }
+ DeclsInPrototypeScope.push_back(New);
+ }
+
+ if (Invalid)
+ New->setInvalidDecl();
+
+ if (Attr)
+ ProcessDeclAttributeList(S, New, Attr);
+
+ // Set the lexical context. If the tag has a C++ scope specifier, the
+ // lexical context will be different from the semantic context.
+ New->setLexicalDeclContext(CurContext);
+
+ // Mark this as a friend decl if applicable.
+ // In Microsoft mode, a friend declaration also acts as a forward
+ // declaration so we always pass true to setObjectOfFriendDecl to make
+ // the tag name visible.
+ if (TUK == TUK_Friend)
+ New->setObjectOfFriendDecl(getLangOpts().MSVCCompat);
+
+ // Set the access specifier.
+ if (!Invalid && SearchDC->isRecord())
+ SetMemberAccessSpecifier(New, PrevDecl, AS);
+
+ if (TUK == TUK_Definition)
+ New->startDefinition();
+
+ // If this has an identifier, add it to the scope stack.
+ if (TUK == TUK_Friend) {
+ // We might be replacing an existing declaration in the lookup tables;
+ // if so, borrow its access specifier.
+ if (PrevDecl)
+ New->setAccess(PrevDecl->getAccess());
+
+ DeclContext *DC = New->getDeclContext()->getRedeclContext();
+ DC->makeDeclVisibleInContext(New);
+ if (Name) // can be null along some error paths
+ if (Scope *EnclosingScope = getScopeForDeclContext(S, DC))
+ PushOnScopeChains(New, EnclosingScope, /* AddToContext = */ false);
+ } else if (Name) {
+ S = getNonFieldDeclScope(S);
+ PushOnScopeChains(New, S, !IsForwardReference);
+ if (IsForwardReference)
+ SearchDC->makeDeclVisibleInContext(New);
+
+ } else {
+ CurContext->addDecl(New);
+ }
+
+ // If this is the C FILE type, notify the AST context.
+ if (IdentifierInfo *II = New->getIdentifier())
+ if (!New->isInvalidDecl() &&
+ New->getDeclContext()->getRedeclContext()->isTranslationUnit() &&
+ II->isStr("FILE"))
+ Context.setFILEDecl(New);
+
+ if (PrevDecl)
+ mergeDeclAttributes(New, PrevDecl);
+
+ // If there's a #pragma GCC visibility in scope, set the visibility of this
+ // record.
+ AddPushedVisibilityAttribute(New);
+
+ OwnedDecl = true;
+ // In C++, don't return an invalid declaration. We can't recover well from
+ // the cases where we make the type anonymous.
+ return (Invalid && getLangOpts().CPlusPlus) ? nullptr : New;
+}
+
+void Sema::ActOnTagStartDefinition(Scope *S, Decl *TagD) {
+ AdjustDeclIfTemplate(TagD);
+ TagDecl *Tag = cast<TagDecl>(TagD);
+
+ // Enter the tag context.
+ PushDeclContext(S, Tag);
+
+ ActOnDocumentableDecl(TagD);
+
+ // If there's a #pragma GCC visibility in scope, set the visibility of this
+ // record.
+ AddPushedVisibilityAttribute(Tag);
+}
+
+Decl *Sema::ActOnObjCContainerStartDefinition(Decl *IDecl) {
+ assert(isa<ObjCContainerDecl>(IDecl) &&
+ "ActOnObjCContainerStartDefinition - Not ObjCContainerDecl");
+ DeclContext *OCD = cast<DeclContext>(IDecl);
+ assert(getContainingDC(OCD) == CurContext &&
+ "The next DeclContext should be lexically contained in the current one.");
+ CurContext = OCD;
+ return IDecl;
+}
+
+void Sema::ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagD,
+ SourceLocation FinalLoc,
+ bool IsFinalSpelledSealed,
+ SourceLocation LBraceLoc) {
+ AdjustDeclIfTemplate(TagD);
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(TagD);
+
+ FieldCollector->StartClass();
+
+ if (!Record->getIdentifier())
+ return;
+
+ if (FinalLoc.isValid())
+ Record->addAttr(new (Context)
+ FinalAttr(FinalLoc, Context, IsFinalSpelledSealed));
+
+ // C++ [class]p2:
+ // [...] The class-name is also inserted into the scope of the
+ // class itself; this is known as the injected-class-name. For
+ // purposes of access checking, the injected-class-name is treated
+ // as if it were a public member name.
+ CXXRecordDecl *InjectedClassName
+ = CXXRecordDecl::Create(Context, Record->getTagKind(), CurContext,
+ Record->getLocStart(), Record->getLocation(),
+ Record->getIdentifier(),
+ /*PrevDecl=*/nullptr,
+ /*DelayTypeCreation=*/true);
+ Context.getTypeDeclType(InjectedClassName, Record);
+ InjectedClassName->setImplicit();
+ InjectedClassName->setAccess(AS_public);
+ if (ClassTemplateDecl *Template = Record->getDescribedClassTemplate())
+ InjectedClassName->setDescribedClassTemplate(Template);
+ PushOnScopeChains(InjectedClassName, S);
+ assert(InjectedClassName->isInjectedClassName() &&
+ "Broken injected-class-name");
+}
+
+void Sema::ActOnTagFinishDefinition(Scope *S, Decl *TagD,
+ SourceLocation RBraceLoc) {
+ AdjustDeclIfTemplate(TagD);
+ TagDecl *Tag = cast<TagDecl>(TagD);
+ Tag->setRBraceLoc(RBraceLoc);
+
+ // Make sure we "complete" the definition even it is invalid.
+ if (Tag->isBeingDefined()) {
+ assert(Tag->isInvalidDecl() && "We should already have completed it");
+ if (RecordDecl *RD = dyn_cast<RecordDecl>(Tag))
+ RD->completeDefinition();
+ }
+
+ if (isa<CXXRecordDecl>(Tag))
+ FieldCollector->FinishClass();
+
+ // Exit this scope of this tag's definition.
+ PopDeclContext();
+
+ if (getCurLexicalContext()->isObjCContainer() &&
+ Tag->getDeclContext()->isFileContext())
+ Tag->setTopLevelDeclInObjCContainer();
+
+ // Notify the consumer that we've defined a tag.
+ if (!Tag->isInvalidDecl())
+ Consumer.HandleTagDeclDefinition(Tag);
+}
+
+void Sema::ActOnObjCContainerFinishDefinition() {
+ // Exit this scope of this interface definition.
+ PopDeclContext();
+}
+
+void Sema::ActOnObjCTemporaryExitContainerContext(DeclContext *DC) {
+ assert(DC == CurContext && "Mismatch of container contexts");
+ OriginalLexicalContext = DC;
+ ActOnObjCContainerFinishDefinition();
+}
+
+void Sema::ActOnObjCReenterContainerContext(DeclContext *DC) {
+ ActOnObjCContainerStartDefinition(cast<Decl>(DC));
+ OriginalLexicalContext = nullptr;
+}
+
+void Sema::ActOnTagDefinitionError(Scope *S, Decl *TagD) {
+ AdjustDeclIfTemplate(TagD);
+ TagDecl *Tag = cast<TagDecl>(TagD);
+ Tag->setInvalidDecl();
+
+ // Make sure we "complete" the definition even it is invalid.
+ if (Tag->isBeingDefined()) {
+ if (RecordDecl *RD = dyn_cast<RecordDecl>(Tag))
+ RD->completeDefinition();
+ }
+
+ // We're undoing ActOnTagStartDefinition here, not
+ // ActOnStartCXXMemberDeclarations, so we don't have to mess with
+ // the FieldCollector.
+
+ PopDeclContext();
+}
+
+// Note that FieldName may be null for anonymous bitfields.
+ExprResult Sema::VerifyBitField(SourceLocation FieldLoc,
+ IdentifierInfo *FieldName,
+ QualType FieldTy, bool IsMsStruct,
+ Expr *BitWidth, bool *ZeroWidth) {
+ // Default to true; that shouldn't confuse checks for emptiness
+ if (ZeroWidth)
+ *ZeroWidth = true;
+
+ // C99 6.7.2.1p4 - verify the field type.
+ // C++ 9.6p3: A bit-field shall have integral or enumeration type.
+ if (!FieldTy->isDependentType() && !FieldTy->isIntegralOrEnumerationType()) {
+ // Handle incomplete types with specific error.
+ if (RequireCompleteType(FieldLoc, FieldTy, diag::err_field_incomplete))
+ return ExprError();
+ if (FieldName)
+ return Diag(FieldLoc, diag::err_not_integral_type_bitfield)
+ << FieldName << FieldTy << BitWidth->getSourceRange();
+ return Diag(FieldLoc, diag::err_not_integral_type_anon_bitfield)
+ << FieldTy << BitWidth->getSourceRange();
+ } else if (DiagnoseUnexpandedParameterPack(const_cast<Expr *>(BitWidth),
+ UPPC_BitFieldWidth))
+ return ExprError();
+
+ // If the bit-width is type- or value-dependent, don't try to check
+ // it now.
+ if (BitWidth->isValueDependent() || BitWidth->isTypeDependent())
+ return BitWidth;
+
+ llvm::APSInt Value;
+ ExprResult ICE = VerifyIntegerConstantExpression(BitWidth, &Value);
+ if (ICE.isInvalid())
+ return ICE;
+ BitWidth = ICE.get();
+
+ if (Value != 0 && ZeroWidth)
+ *ZeroWidth = false;
+
+ // Zero-width bitfield is ok for anonymous field.
+ if (Value == 0 && FieldName)
+ return Diag(FieldLoc, diag::err_bitfield_has_zero_width) << FieldName;
+
+ if (Value.isSigned() && Value.isNegative()) {
+ if (FieldName)
+ return Diag(FieldLoc, diag::err_bitfield_has_negative_width)
+ << FieldName << Value.toString(10);
+ return Diag(FieldLoc, diag::err_anon_bitfield_has_negative_width)
+ << Value.toString(10);
+ }
+
+ if (!FieldTy->isDependentType()) {
+ uint64_t TypeStorageSize = Context.getTypeSize(FieldTy);
+ uint64_t TypeWidth = Context.getIntWidth(FieldTy);
+ bool BitfieldIsOverwide = Value.ugt(TypeWidth);
+
+ // Over-wide bitfields are an error in C or when using the MSVC bitfield
+ // ABI.
+ bool CStdConstraintViolation =
+ BitfieldIsOverwide && !getLangOpts().CPlusPlus;
+ bool MSBitfieldViolation =
+ Value.ugt(TypeStorageSize) &&
+ (IsMsStruct || Context.getTargetInfo().getCXXABI().isMicrosoft());
+ if (CStdConstraintViolation || MSBitfieldViolation) {
+ unsigned DiagWidth =
+ CStdConstraintViolation ? TypeWidth : TypeStorageSize;
+ if (FieldName)
+ return Diag(FieldLoc, diag::err_bitfield_width_exceeds_type_width)
+ << FieldName << (unsigned)Value.getZExtValue()
+ << !CStdConstraintViolation << DiagWidth;
+
+ return Diag(FieldLoc, diag::err_anon_bitfield_width_exceeds_type_width)
+ << (unsigned)Value.getZExtValue() << !CStdConstraintViolation
+ << DiagWidth;
+ }
+
+ // Warn on types where the user might conceivably expect to get all
+ // specified bits as value bits: that's all integral types other than
+ // 'bool'.
+ if (BitfieldIsOverwide && !FieldTy->isBooleanType()) {
+ if (FieldName)
+ Diag(FieldLoc, diag::warn_bitfield_width_exceeds_type_width)
+ << FieldName << (unsigned)Value.getZExtValue()
+ << (unsigned)TypeWidth;
+ else
+ Diag(FieldLoc, diag::warn_anon_bitfield_width_exceeds_type_width)
+ << (unsigned)Value.getZExtValue() << (unsigned)TypeWidth;
+ }
+ }
+
+ return BitWidth;
+}
+
+/// ActOnField - Each field of a C struct/union is passed into this in order
+/// to create a FieldDecl object for it.
+Decl *Sema::ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
+ Declarator &D, Expr *BitfieldWidth) {
+ FieldDecl *Res = HandleField(S, cast_or_null<RecordDecl>(TagD),
+ DeclStart, D, static_cast<Expr*>(BitfieldWidth),
+ /*InitStyle=*/ICIS_NoInit, AS_public);
+ return Res;
+}
+
+/// HandleField - Analyze a field of a C struct or a C++ data member.
+///
+FieldDecl *Sema::HandleField(Scope *S, RecordDecl *Record,
+ SourceLocation DeclStart,
+ Declarator &D, Expr *BitWidth,
+ InClassInitStyle InitStyle,
+ AccessSpecifier AS) {
+ IdentifierInfo *II = D.getIdentifier();
+ SourceLocation Loc = DeclStart;
+ if (II) Loc = D.getIdentifierLoc();
+
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ QualType T = TInfo->getType();
+ if (getLangOpts().CPlusPlus) {
+ CheckExtraCXXDefaultArguments(D);
+
+ if (DiagnoseUnexpandedParameterPack(D.getIdentifierLoc(), TInfo,
+ UPPC_DataMemberType)) {
+ D.setInvalidType();
+ T = Context.IntTy;
+ TInfo = Context.getTrivialTypeSourceInfo(T, Loc);
+ }
+ }
+
+ // TR 18037 does not allow fields to be declared with address spaces.
+ if (T.getQualifiers().hasAddressSpace()) {
+ Diag(Loc, diag::err_field_with_address_space);
+ D.setInvalidType();
+ }
+
+ // OpenCL 1.2 spec, s6.9 r:
+ // The event type cannot be used to declare a structure or union field.
+ if (LangOpts.OpenCL && T->isEventT()) {
+ Diag(Loc, diag::err_event_t_struct_field);
+ D.setInvalidType();
+ }
+
+ DiagnoseFunctionSpecifiers(D.getDeclSpec());
+
+ if (DeclSpec::TSCS TSCS = D.getDeclSpec().getThreadStorageClassSpec())
+ Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
+ diag::err_invalid_thread)
+ << DeclSpec::getSpecifierName(TSCS);
+
+ // Check to see if this name was declared as a member previously
+ NamedDecl *PrevDecl = nullptr;
+ LookupResult Previous(*this, II, Loc, LookupMemberName, ForRedeclaration);
+ LookupName(Previous, S);
+ switch (Previous.getResultKind()) {
+ case LookupResult::Found:
+ case LookupResult::FoundUnresolvedValue:
+ PrevDecl = Previous.getAsSingle<NamedDecl>();
+ break;
+
+ case LookupResult::FoundOverloaded:
+ PrevDecl = Previous.getRepresentativeDecl();
+ break;
+
+ case LookupResult::NotFound:
+ case LookupResult::NotFoundInCurrentInstantiation:
+ case LookupResult::Ambiguous:
+ break;
+ }
+ Previous.suppressDiagnostics();
+
+ if (PrevDecl && PrevDecl->isTemplateParameter()) {
+ // Maybe we will complain about the shadowed template parameter.
+ DiagnoseTemplateParameterShadow(D.getIdentifierLoc(), PrevDecl);
+ // Just pretend that we didn't see the previous declaration.
+ PrevDecl = nullptr;
+ }
+
+ if (PrevDecl && !isDeclInScope(PrevDecl, Record, S))
+ PrevDecl = nullptr;
+
+ bool Mutable
+ = (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_mutable);
+ SourceLocation TSSL = D.getLocStart();
+ FieldDecl *NewFD
+ = CheckFieldDecl(II, T, TInfo, Record, Loc, Mutable, BitWidth, InitStyle,
+ TSSL, AS, PrevDecl, &D);
+
+ if (NewFD->isInvalidDecl())
+ Record->setInvalidDecl();
+
+ if (D.getDeclSpec().isModulePrivateSpecified())
+ NewFD->setModulePrivate();
+
+ if (NewFD->isInvalidDecl() && PrevDecl) {
+ // Don't introduce NewFD into scope; there's already something
+ // with the same name in the same scope.
+ } else if (II) {
+ PushOnScopeChains(NewFD, S);
+ } else
+ Record->addDecl(NewFD);
+
+ return NewFD;
+}
+
+/// \brief Build a new FieldDecl and check its well-formedness.
+///
+/// This routine builds a new FieldDecl given the fields name, type,
+/// record, etc. \p PrevDecl should refer to any previous declaration
+/// with the same name and in the same scope as the field to be
+/// created.
+///
+/// \returns a new FieldDecl.
+///
+/// \todo The Declarator argument is a hack. It will be removed once
+FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
+ TypeSourceInfo *TInfo,
+ RecordDecl *Record, SourceLocation Loc,
+ bool Mutable, Expr *BitWidth,
+ InClassInitStyle InitStyle,
+ SourceLocation TSSL,
+ AccessSpecifier AS, NamedDecl *PrevDecl,
+ Declarator *D) {
+ IdentifierInfo *II = Name.getAsIdentifierInfo();
+ bool InvalidDecl = false;
+ if (D) InvalidDecl = D->isInvalidType();
+
+ // If we receive a broken type, recover by assuming 'int' and
+ // marking this declaration as invalid.
+ if (T.isNull()) {
+ InvalidDecl = true;
+ T = Context.IntTy;
+ }
+
+ QualType EltTy = Context.getBaseElementType(T);
+ if (!EltTy->isDependentType()) {
+ if (RequireCompleteType(Loc, EltTy, diag::err_field_incomplete)) {
+ // Fields of incomplete type force their record to be invalid.
+ Record->setInvalidDecl();
+ InvalidDecl = true;
+ } else {
+ NamedDecl *Def;
+ EltTy->isIncompleteType(&Def);
+ if (Def && Def->isInvalidDecl()) {
+ Record->setInvalidDecl();
+ InvalidDecl = true;
+ }
+ }
+ }
+
+ // OpenCL v1.2 s6.9.c: bitfields are not supported.
+ if (BitWidth && getLangOpts().OpenCL) {
+ Diag(Loc, diag::err_opencl_bitfields);
+ InvalidDecl = true;
+ }
+
+ // C99 6.7.2.1p8: A member of a structure or union may have any type other
+ // than a variably modified type.
+ if (!InvalidDecl && T->isVariablyModifiedType()) {
+ bool SizeIsNegative;
+ llvm::APSInt Oversized;
+
+ TypeSourceInfo *FixedTInfo =
+ TryToFixInvalidVariablyModifiedTypeSourceInfo(TInfo, Context,
+ SizeIsNegative,
+ Oversized);
+ if (FixedTInfo) {
+ Diag(Loc, diag::warn_illegal_constant_array_size);
+ TInfo = FixedTInfo;
+ T = FixedTInfo->getType();
+ } else {
+ if (SizeIsNegative)
+ Diag(Loc, diag::err_typecheck_negative_array_size);
+ else if (Oversized.getBoolValue())
+ Diag(Loc, diag::err_array_too_large)
+ << Oversized.toString(10);
+ else
+ Diag(Loc, diag::err_typecheck_field_variable_size);
+ InvalidDecl = true;
+ }
+ }
+
+ // Fields can not have abstract class types
+ if (!InvalidDecl && RequireNonAbstractType(Loc, T,
+ diag::err_abstract_type_in_decl,
+ AbstractFieldType))
+ InvalidDecl = true;
+
+ bool ZeroWidth = false;
+ if (InvalidDecl)
+ BitWidth = nullptr;
+ // If this is declared as a bit-field, check the bit-field.
+ if (BitWidth) {
+ BitWidth = VerifyBitField(Loc, II, T, Record->isMsStruct(Context), BitWidth,
+ &ZeroWidth).get();
+ if (!BitWidth) {
+ InvalidDecl = true;
+ BitWidth = nullptr;
+ ZeroWidth = false;
+ }
+ }
+
+ // Check that 'mutable' is consistent with the type of the declaration.
+ if (!InvalidDecl && Mutable) {
+ unsigned DiagID = 0;
+ if (T->isReferenceType())
+ DiagID = getLangOpts().MSVCCompat ? diag::ext_mutable_reference
+ : diag::err_mutable_reference;
+ else if (T.isConstQualified())
+ DiagID = diag::err_mutable_const;
+
+ if (DiagID) {
+ SourceLocation ErrLoc = Loc;
+ if (D && D->getDeclSpec().getStorageClassSpecLoc().isValid())
+ ErrLoc = D->getDeclSpec().getStorageClassSpecLoc();
+ Diag(ErrLoc, DiagID);
+ if (DiagID != diag::ext_mutable_reference) {
+ Mutable = false;
+ InvalidDecl = true;
+ }
+ }
+ }
+
+ // C++11 [class.union]p8 (DR1460):
+ // At most one variant member of a union may have a
+ // brace-or-equal-initializer.
+ if (InitStyle != ICIS_NoInit)
+ checkDuplicateDefaultInit(*this, cast<CXXRecordDecl>(Record), Loc);
+
+ FieldDecl *NewFD = FieldDecl::Create(Context, Record, TSSL, Loc, II, T, TInfo,
+ BitWidth, Mutable, InitStyle);
+ if (InvalidDecl)
+ NewFD->setInvalidDecl();
+
+ if (PrevDecl && !isa<TagDecl>(PrevDecl)) {
+ Diag(Loc, diag::err_duplicate_member) << II;
+ Diag(PrevDecl->getLocation(), diag::note_previous_declaration);
+ NewFD->setInvalidDecl();
+ }
+
+ if (!InvalidDecl && getLangOpts().CPlusPlus) {
+ if (Record->isUnion()) {
+ if (const RecordType *RT = EltTy->getAs<RecordType>()) {
+ CXXRecordDecl* RDecl = cast<CXXRecordDecl>(RT->getDecl());
+ if (RDecl->getDefinition()) {
+ // C++ [class.union]p1: An object of a class with a non-trivial
+ // constructor, a non-trivial copy constructor, a non-trivial
+ // destructor, or a non-trivial copy assignment operator
+ // cannot be a member of a union, nor can an array of such
+ // objects.
+ if (CheckNontrivialField(NewFD))
+ NewFD->setInvalidDecl();
+ }
+ }
+
+ // C++ [class.union]p1: If a union contains a member of reference type,
+ // the program is ill-formed, except when compiling with MSVC extensions
+ // enabled.
+ if (EltTy->isReferenceType()) {
+ Diag(NewFD->getLocation(), getLangOpts().MicrosoftExt ?
+ diag::ext_union_member_of_reference_type :
+ diag::err_union_member_of_reference_type)
+ << NewFD->getDeclName() << EltTy;
+ if (!getLangOpts().MicrosoftExt)
+ NewFD->setInvalidDecl();
+ }
+ }
+ }
+
+ // FIXME: We need to pass in the attributes given an AST
+ // representation, not a parser representation.
+ if (D) {
+ // FIXME: The current scope is almost... but not entirely... correct here.
+ ProcessDeclAttributes(getCurScope(), NewFD, *D);
+
+ if (NewFD->hasAttrs())
+ CheckAlignasUnderalignment(NewFD);
+ }
+
+ // In auto-retain/release, infer strong retension for fields of
+ // retainable type.
+ if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(NewFD))
+ NewFD->setInvalidDecl();
+
+ if (T.isObjCGCWeak())
+ Diag(Loc, diag::warn_attribute_weak_on_field);
+
+ NewFD->setAccess(AS);
+ return NewFD;
+}
+
+bool Sema::CheckNontrivialField(FieldDecl *FD) {
+ assert(FD);
+ assert(getLangOpts().CPlusPlus && "valid check only for C++");
+
+ if (FD->isInvalidDecl() || FD->getType()->isDependentType())
+ return false;
+
+ QualType EltTy = Context.getBaseElementType(FD->getType());
+ if (const RecordType *RT = EltTy->getAs<RecordType>()) {
+ CXXRecordDecl *RDecl = cast<CXXRecordDecl>(RT->getDecl());
+ if (RDecl->getDefinition()) {
+ // We check for copy constructors before constructors
+ // because otherwise we'll never get complaints about
+ // copy constructors.
+
+ CXXSpecialMember member = CXXInvalid;
+ // We're required to check for any non-trivial constructors. Since the
+ // implicit default constructor is suppressed if there are any
+ // user-declared constructors, we just need to check that there is a
+ // trivial default constructor and a trivial copy constructor. (We don't
+ // worry about move constructors here, since this is a C++98 check.)
+ if (RDecl->hasNonTrivialCopyConstructor())
+ member = CXXCopyConstructor;
+ else if (!RDecl->hasTrivialDefaultConstructor())
+ member = CXXDefaultConstructor;
+ else if (RDecl->hasNonTrivialCopyAssignment())
+ member = CXXCopyAssignment;
+ else if (RDecl->hasNonTrivialDestructor())
+ member = CXXDestructor;
+
+ if (member != CXXInvalid) {
+ if (!getLangOpts().CPlusPlus11 &&
+ getLangOpts().ObjCAutoRefCount && RDecl->hasObjectMember()) {
+ // Objective-C++ ARC: it is an error to have a non-trivial field of
+ // a union. However, system headers in Objective-C programs
+ // occasionally have Objective-C lifetime objects within unions,
+ // and rather than cause the program to fail, we make those
+ // members unavailable.
+ SourceLocation Loc = FD->getLocation();
+ if (getSourceManager().isInSystemHeader(Loc)) {
+ if (!FD->hasAttr<UnavailableAttr>())
+ FD->addAttr(UnavailableAttr::CreateImplicit(Context, "",
+ UnavailableAttr::IR_ARCFieldWithOwnership, Loc));
+ return false;
+ }
+ }
+
+ Diag(FD->getLocation(), getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_nontrivial_union_or_anon_struct_member :
+ diag::err_illegal_union_or_anon_struct_member)
+ << FD->getParent()->isUnion() << FD->getDeclName() << member;
+ DiagnoseNontrivial(RDecl, member);
+ return !getLangOpts().CPlusPlus11;
+ }
+ }
+ }
+
+ return false;
+}
+
+/// TranslateIvarVisibility - Translate visibility from a token ID to an
+/// AST enum value.
+static ObjCIvarDecl::AccessControl
+TranslateIvarVisibility(tok::ObjCKeywordKind ivarVisibility) {
+ switch (ivarVisibility) {
+ default: llvm_unreachable("Unknown visitibility kind");
+ case tok::objc_private: return ObjCIvarDecl::Private;
+ case tok::objc_public: return ObjCIvarDecl::Public;
+ case tok::objc_protected: return ObjCIvarDecl::Protected;
+ case tok::objc_package: return ObjCIvarDecl::Package;
+ }
+}
+
+/// ActOnIvar - Each ivar field of an objective-c class is passed into this
+/// in order to create an IvarDecl object for it.
+Decl *Sema::ActOnIvar(Scope *S,
+ SourceLocation DeclStart,
+ Declarator &D, Expr *BitfieldWidth,
+ tok::ObjCKeywordKind Visibility) {
+
+ IdentifierInfo *II = D.getIdentifier();
+ Expr *BitWidth = (Expr*)BitfieldWidth;
+ SourceLocation Loc = DeclStart;
+ if (II) Loc = D.getIdentifierLoc();
+
+ // FIXME: Unnamed fields can be handled in various different ways, for
+ // example, unnamed unions inject all members into the struct namespace!
+
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ QualType T = TInfo->getType();
+
+ if (BitWidth) {
+ // 6.7.2.1p3, 6.7.2.1p4
+ BitWidth = VerifyBitField(Loc, II, T, /*IsMsStruct*/false, BitWidth).get();
+ if (!BitWidth)
+ D.setInvalidType();
+ } else {
+ // Not a bitfield.
+
+ // validate II.
+
+ }
+ if (T->isReferenceType()) {
+ Diag(Loc, diag::err_ivar_reference_type);
+ D.setInvalidType();
+ }
+ // C99 6.7.2.1p8: A member of a structure or union may have any type other
+ // than a variably modified type.
+ else if (T->isVariablyModifiedType()) {
+ Diag(Loc, diag::err_typecheck_ivar_variable_size);
+ D.setInvalidType();
+ }
+
+ // Get the visibility (access control) for this ivar.
+ ObjCIvarDecl::AccessControl ac =
+ Visibility != tok::objc_not_keyword ? TranslateIvarVisibility(Visibility)
+ : ObjCIvarDecl::None;
+ // Must set ivar's DeclContext to its enclosing interface.
+ ObjCContainerDecl *EnclosingDecl = cast<ObjCContainerDecl>(CurContext);
+ if (!EnclosingDecl || EnclosingDecl->isInvalidDecl())
+ return nullptr;
+ ObjCContainerDecl *EnclosingContext;
+ if (ObjCImplementationDecl *IMPDecl =
+ dyn_cast<ObjCImplementationDecl>(EnclosingDecl)) {
+ if (LangOpts.ObjCRuntime.isFragile()) {
+ // Case of ivar declared in an implementation. Context is that of its class.
+ EnclosingContext = IMPDecl->getClassInterface();
+ assert(EnclosingContext && "Implementation has no class interface!");
+ }
+ else
+ EnclosingContext = EnclosingDecl;
+ } else {
+ if (ObjCCategoryDecl *CDecl =
+ dyn_cast<ObjCCategoryDecl>(EnclosingDecl)) {
+ if (LangOpts.ObjCRuntime.isFragile() || !CDecl->IsClassExtension()) {
+ Diag(Loc, diag::err_misplaced_ivar) << CDecl->IsClassExtension();
+ return nullptr;
+ }
+ }
+ EnclosingContext = EnclosingDecl;
+ }
+
+ // Construct the decl.
+ ObjCIvarDecl *NewID = ObjCIvarDecl::Create(Context, EnclosingContext,
+ DeclStart, Loc, II, T,
+ TInfo, ac, (Expr *)BitfieldWidth);
+
+ if (II) {
+ NamedDecl *PrevDecl = LookupSingleName(S, II, Loc, LookupMemberName,
+ ForRedeclaration);
+ if (PrevDecl && isDeclInScope(PrevDecl, EnclosingContext, S)
+ && !isa<TagDecl>(PrevDecl)) {
+ Diag(Loc, diag::err_duplicate_member) << II;
+ Diag(PrevDecl->getLocation(), diag::note_previous_declaration);
+ NewID->setInvalidDecl();
+ }
+ }
+
+ // Process attributes attached to the ivar.
+ ProcessDeclAttributes(S, NewID, D);
+
+ if (D.isInvalidType())
+ NewID->setInvalidDecl();
+
+ // In ARC, infer 'retaining' for ivars of retainable type.
+ if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(NewID))
+ NewID->setInvalidDecl();
+
+ if (D.getDeclSpec().isModulePrivateSpecified())
+ NewID->setModulePrivate();
+
+ if (II) {
+ // FIXME: When interfaces are DeclContexts, we'll need to add
+ // these to the interface.
+ S->AddDecl(NewID);
+ IdResolver.AddDecl(NewID);
+ }
+
+ if (LangOpts.ObjCRuntime.isNonFragile() &&
+ !NewID->isInvalidDecl() && isa<ObjCInterfaceDecl>(EnclosingDecl))
+ Diag(Loc, diag::warn_ivars_in_interface);
+
+ return NewID;
+}
+
+/// ActOnLastBitfield - This routine handles synthesized bitfields rules for
+/// class and class extensions. For every class \@interface and class
+/// extension \@interface, if the last ivar is a bitfield of any type,
+/// then add an implicit `char :0` ivar to the end of that interface.
+void Sema::ActOnLastBitfield(SourceLocation DeclLoc,
+ SmallVectorImpl<Decl *> &AllIvarDecls) {
+ if (LangOpts.ObjCRuntime.isFragile() || AllIvarDecls.empty())
+ return;
+
+ Decl *ivarDecl = AllIvarDecls[AllIvarDecls.size()-1];
+ ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(ivarDecl);
+
+ if (!Ivar->isBitField() || Ivar->getBitWidthValue(Context) == 0)
+ return;
+ ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(CurContext);
+ if (!ID) {
+ if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(CurContext)) {
+ if (!CD->IsClassExtension())
+ return;
+ }
+ // No need to add this to end of @implementation.
+ else
+ return;
+ }
+ // All conditions are met. Add a new bitfield to the tail end of ivars.
+ llvm::APInt Zero(Context.getTypeSize(Context.IntTy), 0);
+ Expr * BW = IntegerLiteral::Create(Context, Zero, Context.IntTy, DeclLoc);
+
+ Ivar = ObjCIvarDecl::Create(Context, cast<ObjCContainerDecl>(CurContext),
+ DeclLoc, DeclLoc, nullptr,
+ Context.CharTy,
+ Context.getTrivialTypeSourceInfo(Context.CharTy,
+ DeclLoc),
+ ObjCIvarDecl::Private, BW,
+ true);
+ AllIvarDecls.push_back(Ivar);
+}
+
+void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
+ ArrayRef<Decl *> Fields, SourceLocation LBrac,
+ SourceLocation RBrac, AttributeList *Attr) {
+ assert(EnclosingDecl && "missing record or interface decl");
+
+ // If this is an Objective-C @implementation or category and we have
+ // new fields here we should reset the layout of the interface since
+ // it will now change.
+ if (!Fields.empty() && isa<ObjCContainerDecl>(EnclosingDecl)) {
+ ObjCContainerDecl *DC = cast<ObjCContainerDecl>(EnclosingDecl);
+ switch (DC->getKind()) {
+ default: break;
+ case Decl::ObjCCategory:
+ Context.ResetObjCLayout(cast<ObjCCategoryDecl>(DC)->getClassInterface());
+ break;
+ case Decl::ObjCImplementation:
+ Context.
+ ResetObjCLayout(cast<ObjCImplementationDecl>(DC)->getClassInterface());
+ break;
+ }
+ }
+
+ RecordDecl *Record = dyn_cast<RecordDecl>(EnclosingDecl);
+
+ // Start counting up the number of named members; make sure to include
+ // members of anonymous structs and unions in the total.
+ unsigned NumNamedMembers = 0;
+ if (Record) {
+ for (const auto *I : Record->decls()) {
+ if (const auto *IFD = dyn_cast<IndirectFieldDecl>(I))
+ if (IFD->getDeclName())
+ ++NumNamedMembers;
+ }
+ }
+
+ // Verify that all the fields are okay.
+ SmallVector<FieldDecl*, 32> RecFields;
+
+ bool ARCErrReported = false;
+ for (ArrayRef<Decl *>::iterator i = Fields.begin(), end = Fields.end();
+ i != end; ++i) {
+ FieldDecl *FD = cast<FieldDecl>(*i);
+
+ // Get the type for the field.
+ const Type *FDTy = FD->getType().getTypePtr();
+
+ if (!FD->isAnonymousStructOrUnion()) {
+ // Remember all fields written by the user.
+ RecFields.push_back(FD);
+ }
+
+ // If the field is already invalid for some reason, don't emit more
+ // diagnostics about it.
+ if (FD->isInvalidDecl()) {
+ EnclosingDecl->setInvalidDecl();
+ continue;
+ }
+
+ // C99 6.7.2.1p2:
+ // A structure or union shall not contain a member with
+ // incomplete or function type (hence, a structure shall not
+ // contain an instance of itself, but may contain a pointer to
+ // an instance of itself), except that the last member of a
+ // structure with more than one named member may have incomplete
+ // array type; such a structure (and any union containing,
+ // possibly recursively, a member that is such a structure)
+ // shall not be a member of a structure or an element of an
+ // array.
+ if (FDTy->isFunctionType()) {
+ // Field declared as a function.
+ Diag(FD->getLocation(), diag::err_field_declared_as_function)
+ << FD->getDeclName();
+ FD->setInvalidDecl();
+ EnclosingDecl->setInvalidDecl();
+ continue;
+ } else if (FDTy->isIncompleteArrayType() && Record &&
+ ((i + 1 == Fields.end() && !Record->isUnion()) ||
+ ((getLangOpts().MicrosoftExt ||
+ getLangOpts().CPlusPlus) &&
+ (i + 1 == Fields.end() || Record->isUnion())))) {
+ // Flexible array member.
+ // Microsoft and g++ is more permissive regarding flexible array.
+ // It will accept flexible array in union and also
+ // as the sole element of a struct/class.
+ unsigned DiagID = 0;
+ if (Record->isUnion())
+ DiagID = getLangOpts().MicrosoftExt
+ ? diag::ext_flexible_array_union_ms
+ : getLangOpts().CPlusPlus
+ ? diag::ext_flexible_array_union_gnu
+ : diag::err_flexible_array_union;
+ else if (Fields.size() == 1)
+ DiagID = getLangOpts().MicrosoftExt
+ ? diag::ext_flexible_array_empty_aggregate_ms
+ : getLangOpts().CPlusPlus
+ ? diag::ext_flexible_array_empty_aggregate_gnu
+ : NumNamedMembers < 1
+ ? diag::err_flexible_array_empty_aggregate
+ : 0;
+
+ if (DiagID)
+ Diag(FD->getLocation(), DiagID) << FD->getDeclName()
+ << Record->getTagKind();
+ // While the layout of types that contain virtual bases is not specified
+ // by the C++ standard, both the Itanium and Microsoft C++ ABIs place
+ // virtual bases after the derived members. This would make a flexible
+ // array member declared at the end of an object not adjacent to the end
+ // of the type.
+ if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Record))
+ if (RD->getNumVBases() != 0)
+ Diag(FD->getLocation(), diag::err_flexible_array_virtual_base)
+ << FD->getDeclName() << Record->getTagKind();
+ if (!getLangOpts().C99)
+ Diag(FD->getLocation(), diag::ext_c99_flexible_array_member)
+ << FD->getDeclName() << Record->getTagKind();
+
+ // If the element type has a non-trivial destructor, we would not
+ // implicitly destroy the elements, so disallow it for now.
+ //
+ // FIXME: GCC allows this. We should probably either implicitly delete
+ // the destructor of the containing class, or just allow this.
+ QualType BaseElem = Context.getBaseElementType(FD->getType());
+ if (!BaseElem->isDependentType() && BaseElem.isDestructedType()) {
+ Diag(FD->getLocation(), diag::err_flexible_array_has_nontrivial_dtor)
+ << FD->getDeclName() << FD->getType();
+ FD->setInvalidDecl();
+ EnclosingDecl->setInvalidDecl();
+ continue;
+ }
+ // Okay, we have a legal flexible array member at the end of the struct.
+ Record->setHasFlexibleArrayMember(true);
+ } else if (!FDTy->isDependentType() &&
+ RequireCompleteType(FD->getLocation(), FD->getType(),
+ diag::err_field_incomplete)) {
+ // Incomplete type
+ FD->setInvalidDecl();
+ EnclosingDecl->setInvalidDecl();
+ continue;
+ } else if (const RecordType *FDTTy = FDTy->getAs<RecordType>()) {
+ if (Record && FDTTy->getDecl()->hasFlexibleArrayMember()) {
+ // A type which contains a flexible array member is considered to be a
+ // flexible array member.
+ Record->setHasFlexibleArrayMember(true);
+ if (!Record->isUnion()) {
+ // If this is a struct/class and this is not the last element, reject
+ // it. Note that GCC supports variable sized arrays in the middle of
+ // structures.
+ if (i + 1 != Fields.end())
+ Diag(FD->getLocation(), diag::ext_variable_sized_type_in_struct)
+ << FD->getDeclName() << FD->getType();
+ else {
+ // We support flexible arrays at the end of structs in
+ // other structs as an extension.
+ Diag(FD->getLocation(), diag::ext_flexible_array_in_struct)
+ << FD->getDeclName();
+ }
+ }
+ }
+ if (isa<ObjCContainerDecl>(EnclosingDecl) &&
+ RequireNonAbstractType(FD->getLocation(), FD->getType(),
+ diag::err_abstract_type_in_decl,
+ AbstractIvarType)) {
+ // Ivars can not have abstract class types
+ FD->setInvalidDecl();
+ }
+ if (Record && FDTTy->getDecl()->hasObjectMember())
+ Record->setHasObjectMember(true);
+ if (Record && FDTTy->getDecl()->hasVolatileMember())
+ Record->setHasVolatileMember(true);
+ } else if (FDTy->isObjCObjectType()) {
+ /// A field cannot be an Objective-c object
+ Diag(FD->getLocation(), diag::err_statically_allocated_object)
+ << FixItHint::CreateInsertion(FD->getLocation(), "*");
+ QualType T = Context.getObjCObjectPointerType(FD->getType());
+ FD->setType(T);
+ } else if (getLangOpts().ObjCAutoRefCount && Record && !ARCErrReported &&
+ (!getLangOpts().CPlusPlus || Record->isUnion())) {
+ // It's an error in ARC if a field has lifetime.
+ // We don't want to report this in a system header, though,
+ // so we just make the field unavailable.
+ // FIXME: that's really not sufficient; we need to make the type
+ // itself invalid to, say, initialize or copy.
+ QualType T = FD->getType();
+ Qualifiers::ObjCLifetime lifetime = T.getObjCLifetime();
+ if (lifetime && lifetime != Qualifiers::OCL_ExplicitNone) {
+ SourceLocation loc = FD->getLocation();
+ if (getSourceManager().isInSystemHeader(loc)) {
+ if (!FD->hasAttr<UnavailableAttr>()) {
+ FD->addAttr(UnavailableAttr::CreateImplicit(Context, "",
+ UnavailableAttr::IR_ARCFieldWithOwnership, loc));
+ }
+ } else {
+ Diag(FD->getLocation(), diag::err_arc_objc_object_in_tag)
+ << T->isBlockPointerType() << Record->getTagKind();
+ }
+ ARCErrReported = true;
+ }
+ } else if (getLangOpts().ObjC1 &&
+ getLangOpts().getGC() != LangOptions::NonGC &&
+ Record && !Record->hasObjectMember()) {
+ if (FD->getType()->isObjCObjectPointerType() ||
+ FD->getType().isObjCGCStrong())
+ Record->setHasObjectMember(true);
+ else if (Context.getAsArrayType(FD->getType())) {
+ QualType BaseType = Context.getBaseElementType(FD->getType());
+ if (BaseType->isRecordType() &&
+ BaseType->getAs<RecordType>()->getDecl()->hasObjectMember())
+ Record->setHasObjectMember(true);
+ else if (BaseType->isObjCObjectPointerType() ||
+ BaseType.isObjCGCStrong())
+ Record->setHasObjectMember(true);
+ }
+ }
+ if (Record && FD->getType().isVolatileQualified())
+ Record->setHasVolatileMember(true);
+ // Keep track of the number of named members.
+ if (FD->getIdentifier())
+ ++NumNamedMembers;
+ }
+
+ // Okay, we successfully defined 'Record'.
+ if (Record) {
+ bool Completed = false;
+ if (CXXRecordDecl *CXXRecord = dyn_cast<CXXRecordDecl>(Record)) {
+ if (!CXXRecord->isInvalidDecl()) {
+ // Set access bits correctly on the directly-declared conversions.
+ for (CXXRecordDecl::conversion_iterator
+ I = CXXRecord->conversion_begin(),
+ E = CXXRecord->conversion_end(); I != E; ++I)
+ I.setAccess((*I)->getAccess());
+
+ if (!CXXRecord->isDependentType()) {
+ if (CXXRecord->hasUserDeclaredDestructor()) {
+ // Adjust user-defined destructor exception spec.
+ if (getLangOpts().CPlusPlus11)
+ AdjustDestructorExceptionSpec(CXXRecord,
+ CXXRecord->getDestructor());
+ }
+
+ // Add any implicitly-declared members to this class.
+ AddImplicitlyDeclaredMembersToClass(CXXRecord);
+
+ // If we have virtual base classes, we may end up finding multiple
+ // final overriders for a given virtual function. Check for this
+ // problem now.
+ if (CXXRecord->getNumVBases()) {
+ CXXFinalOverriderMap FinalOverriders;
+ CXXRecord->getFinalOverriders(FinalOverriders);
+
+ for (CXXFinalOverriderMap::iterator M = FinalOverriders.begin(),
+ MEnd = FinalOverriders.end();
+ M != MEnd; ++M) {
+ for (OverridingMethods::iterator SO = M->second.begin(),
+ SOEnd = M->second.end();
+ SO != SOEnd; ++SO) {
+ assert(SO->second.size() > 0 &&
+ "Virtual function without overridding functions?");
+ if (SO->second.size() == 1)
+ continue;
+
+ // C++ [class.virtual]p2:
+ // In a derived class, if a virtual member function of a base
+ // class subobject has more than one final overrider the
+ // program is ill-formed.
+ Diag(Record->getLocation(), diag::err_multiple_final_overriders)
+ << (const NamedDecl *)M->first << Record;
+ Diag(M->first->getLocation(),
+ diag::note_overridden_virtual_function);
+ for (OverridingMethods::overriding_iterator
+ OM = SO->second.begin(),
+ OMEnd = SO->second.end();
+ OM != OMEnd; ++OM)
+ Diag(OM->Method->getLocation(), diag::note_final_overrider)
+ << (const NamedDecl *)M->first << OM->Method->getParent();
+
+ Record->setInvalidDecl();
+ }
+ }
+ CXXRecord->completeDefinition(&FinalOverriders);
+ Completed = true;
+ }
+ }
+ }
+ }
+
+ if (!Completed)
+ Record->completeDefinition();
+
+ if (Record->hasAttrs()) {
+ CheckAlignasUnderalignment(Record);
+
+ if (const MSInheritanceAttr *IA = Record->getAttr<MSInheritanceAttr>())
+ checkMSInheritanceAttrOnDefinition(cast<CXXRecordDecl>(Record),
+ IA->getRange(), IA->getBestCase(),
+ IA->getSemanticSpelling());
+ }
+
+ // Check if the structure/union declaration is a type that can have zero
+ // size in C. For C this is a language extension, for C++ it may cause
+ // compatibility problems.
+ bool CheckForZeroSize;
+ if (!getLangOpts().CPlusPlus) {
+ CheckForZeroSize = true;
+ } else {
+ // For C++ filter out types that cannot be referenced in C code.
+ CXXRecordDecl *CXXRecord = cast<CXXRecordDecl>(Record);
+ CheckForZeroSize =
+ CXXRecord->getLexicalDeclContext()->isExternCContext() &&
+ !CXXRecord->isDependentType() &&
+ CXXRecord->isCLike();
+ }
+ if (CheckForZeroSize) {
+ bool ZeroSize = true;
+ bool IsEmpty = true;
+ unsigned NonBitFields = 0;
+ for (RecordDecl::field_iterator I = Record->field_begin(),
+ E = Record->field_end();
+ (NonBitFields == 0 || ZeroSize) && I != E; ++I) {
+ IsEmpty = false;
+ if (I->isUnnamedBitfield()) {
+ if (I->getBitWidthValue(Context) > 0)
+ ZeroSize = false;
+ } else {
+ ++NonBitFields;
+ QualType FieldType = I->getType();
+ if (FieldType->isIncompleteType() ||
+ !Context.getTypeSizeInChars(FieldType).isZero())
+ ZeroSize = false;
+ }
+ }
+
+ // Empty structs are an extension in C (C99 6.7.2.1p7). They are
+ // allowed in C++, but warn if its declaration is inside
+ // extern "C" block.
+ if (ZeroSize) {
+ Diag(RecLoc, getLangOpts().CPlusPlus ?
+ diag::warn_zero_size_struct_union_in_extern_c :
+ diag::warn_zero_size_struct_union_compat)
+ << IsEmpty << Record->isUnion() << (NonBitFields > 1);
+ }
+
+ // Structs without named members are extension in C (C99 6.7.2.1p7),
+ // but are accepted by GCC.
+ if (NonBitFields == 0 && !getLangOpts().CPlusPlus) {
+ Diag(RecLoc, IsEmpty ? diag::ext_empty_struct_union :
+ diag::ext_no_named_members_in_struct_union)
+ << Record->isUnion();
+ }
+ }
+ } else {
+ ObjCIvarDecl **ClsFields =
+ reinterpret_cast<ObjCIvarDecl**>(RecFields.data());
+ if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(EnclosingDecl)) {
+ ID->setEndOfDefinitionLoc(RBrac);
+ // Add ivar's to class's DeclContext.
+ for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
+ ClsFields[i]->setLexicalDeclContext(ID);
+ ID->addDecl(ClsFields[i]);
+ }
+ // Must enforce the rule that ivars in the base classes may not be
+ // duplicates.
+ if (ID->getSuperClass())
+ DiagnoseDuplicateIvars(ID, ID->getSuperClass());
+ } else if (ObjCImplementationDecl *IMPDecl =
+ dyn_cast<ObjCImplementationDecl>(EnclosingDecl)) {
+ assert(IMPDecl && "ActOnFields - missing ObjCImplementationDecl");
+ for (unsigned I = 0, N = RecFields.size(); I != N; ++I)
+ // Ivar declared in @implementation never belongs to the implementation.
+ // Only it is in implementation's lexical context.
+ ClsFields[I]->setLexicalDeclContext(IMPDecl);
+ CheckImplementationIvars(IMPDecl, ClsFields, RecFields.size(), RBrac);
+ IMPDecl->setIvarLBraceLoc(LBrac);
+ IMPDecl->setIvarRBraceLoc(RBrac);
+ } else if (ObjCCategoryDecl *CDecl =
+ dyn_cast<ObjCCategoryDecl>(EnclosingDecl)) {
+ // case of ivars in class extension; all other cases have been
+ // reported as errors elsewhere.
+ // FIXME. Class extension does not have a LocEnd field.
+ // CDecl->setLocEnd(RBrac);
+ // Add ivar's to class extension's DeclContext.
+ // Diagnose redeclaration of private ivars.
+ ObjCInterfaceDecl *IDecl = CDecl->getClassInterface();
+ for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
+ if (IDecl) {
+ if (const ObjCIvarDecl *ClsIvar =
+ IDecl->getIvarDecl(ClsFields[i]->getIdentifier())) {
+ Diag(ClsFields[i]->getLocation(),
+ diag::err_duplicate_ivar_declaration);
+ Diag(ClsIvar->getLocation(), diag::note_previous_definition);
+ continue;
+ }
+ for (const auto *Ext : IDecl->known_extensions()) {
+ if (const ObjCIvarDecl *ClsExtIvar
+ = Ext->getIvarDecl(ClsFields[i]->getIdentifier())) {
+ Diag(ClsFields[i]->getLocation(),
+ diag::err_duplicate_ivar_declaration);
+ Diag(ClsExtIvar->getLocation(), diag::note_previous_definition);
+ continue;
+ }
+ }
+ }
+ ClsFields[i]->setLexicalDeclContext(CDecl);
+ CDecl->addDecl(ClsFields[i]);
+ }
+ CDecl->setIvarLBraceLoc(LBrac);
+ CDecl->setIvarRBraceLoc(RBrac);
+ }
+ }
+
+ if (Attr)
+ ProcessDeclAttributeList(S, Record, Attr);
+}
+
+/// \brief Determine whether the given integral value is representable within
+/// the given type T.
+static bool isRepresentableIntegerValue(ASTContext &Context,
+ llvm::APSInt &Value,
+ QualType T) {
+ assert(T->isIntegralType(Context) && "Integral type required!");
+ unsigned BitWidth = Context.getIntWidth(T);
+
+ if (Value.isUnsigned() || Value.isNonNegative()) {
+ if (T->isSignedIntegerOrEnumerationType())
+ --BitWidth;
+ return Value.getActiveBits() <= BitWidth;
+ }
+ return Value.getMinSignedBits() <= BitWidth;
+}
+
+// \brief Given an integral type, return the next larger integral type
+// (or a NULL type of no such type exists).
+static QualType getNextLargerIntegralType(ASTContext &Context, QualType T) {
+ // FIXME: Int128/UInt128 support, which also needs to be introduced into
+ // enum checking below.
+ assert(T->isIntegralType(Context) && "Integral type required!");
+ const unsigned NumTypes = 4;
+ QualType SignedIntegralTypes[NumTypes] = {
+ Context.ShortTy, Context.IntTy, Context.LongTy, Context.LongLongTy
+ };
+ QualType UnsignedIntegralTypes[NumTypes] = {
+ Context.UnsignedShortTy, Context.UnsignedIntTy, Context.UnsignedLongTy,
+ Context.UnsignedLongLongTy
+ };
+
+ unsigned BitWidth = Context.getTypeSize(T);
+ QualType *Types = T->isSignedIntegerOrEnumerationType()? SignedIntegralTypes
+ : UnsignedIntegralTypes;
+ for (unsigned I = 0; I != NumTypes; ++I)
+ if (Context.getTypeSize(Types[I]) > BitWidth)
+ return Types[I];
+
+ return QualType();
+}
+
+EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum,
+ EnumConstantDecl *LastEnumConst,
+ SourceLocation IdLoc,
+ IdentifierInfo *Id,
+ Expr *Val) {
+ unsigned IntWidth = Context.getTargetInfo().getIntWidth();
+ llvm::APSInt EnumVal(IntWidth);
+ QualType EltTy;
+
+ if (Val && DiagnoseUnexpandedParameterPack(Val, UPPC_EnumeratorValue))
+ Val = nullptr;
+
+ if (Val)
+ Val = DefaultLvalueConversion(Val).get();
+
+ if (Val) {
+ if (Enum->isDependentType() || Val->isTypeDependent())
+ EltTy = Context.DependentTy;
+ else {
+ SourceLocation ExpLoc;
+ if (getLangOpts().CPlusPlus11 && Enum->isFixed() &&
+ !getLangOpts().MSVCCompat) {
+ // C++11 [dcl.enum]p5: If the underlying type is fixed, [...] the
+ // constant-expression in the enumerator-definition shall be a converted
+ // constant expression of the underlying type.
+ EltTy = Enum->getIntegerType();
+ ExprResult Converted =
+ CheckConvertedConstantExpression(Val, EltTy, EnumVal,
+ CCEK_Enumerator);
+ if (Converted.isInvalid())
+ Val = nullptr;
+ else
+ Val = Converted.get();
+ } else if (!Val->isValueDependent() &&
+ !(Val = VerifyIntegerConstantExpression(Val,
+ &EnumVal).get())) {
+ // C99 6.7.2.2p2: Make sure we have an integer constant expression.
+ } else {
+ if (Enum->isFixed()) {
+ EltTy = Enum->getIntegerType();
+
+ // In Obj-C and Microsoft mode, require the enumeration value to be
+ // representable in the underlying type of the enumeration. In C++11,
+ // we perform a non-narrowing conversion as part of converted constant
+ // expression checking.
+ if (!isRepresentableIntegerValue(Context, EnumVal, EltTy)) {
+ if (getLangOpts().MSVCCompat) {
+ Diag(IdLoc, diag::ext_enumerator_too_large) << EltTy;
+ Val = ImpCastExprToType(Val, EltTy, CK_IntegralCast).get();
+ } else
+ Diag(IdLoc, diag::err_enumerator_too_large) << EltTy;
+ } else
+ Val = ImpCastExprToType(Val, EltTy, CK_IntegralCast).get();
+ } else if (getLangOpts().CPlusPlus) {
+ // C++11 [dcl.enum]p5:
+ // If the underlying type is not fixed, the type of each enumerator
+ // is the type of its initializing value:
+ // - If an initializer is specified for an enumerator, the
+ // initializing value has the same type as the expression.
+ EltTy = Val->getType();
+ } else {
+ // C99 6.7.2.2p2:
+ // The expression that defines the value of an enumeration constant
+ // shall be an integer constant expression that has a value
+ // representable as an int.
+
+ // Complain if the value is not representable in an int.
+ if (!isRepresentableIntegerValue(Context, EnumVal, Context.IntTy))
+ Diag(IdLoc, diag::ext_enum_value_not_int)
+ << EnumVal.toString(10) << Val->getSourceRange()
+ << (EnumVal.isUnsigned() || EnumVal.isNonNegative());
+ else if (!Context.hasSameType(Val->getType(), Context.IntTy)) {
+ // Force the type of the expression to 'int'.
+ Val = ImpCastExprToType(Val, Context.IntTy, CK_IntegralCast).get();
+ }
+ EltTy = Val->getType();
+ }
+ }
+ }
+ }
+
+ if (!Val) {
+ if (Enum->isDependentType())
+ EltTy = Context.DependentTy;
+ else if (!LastEnumConst) {
+ // C++0x [dcl.enum]p5:
+ // If the underlying type is not fixed, the type of each enumerator
+ // is the type of its initializing value:
+ // - If no initializer is specified for the first enumerator, the
+ // initializing value has an unspecified integral type.
+ //
+ // GCC uses 'int' for its unspecified integral type, as does
+ // C99 6.7.2.2p3.
+ if (Enum->isFixed()) {
+ EltTy = Enum->getIntegerType();
+ }
+ else {
+ EltTy = Context.IntTy;
+ }
+ } else {
+ // Assign the last value + 1.
+ EnumVal = LastEnumConst->getInitVal();
+ ++EnumVal;
+ EltTy = LastEnumConst->getType();
+
+ // Check for overflow on increment.
+ if (EnumVal < LastEnumConst->getInitVal()) {
+ // C++0x [dcl.enum]p5:
+ // If the underlying type is not fixed, the type of each enumerator
+ // is the type of its initializing value:
+ //
+ // - Otherwise the type of the initializing value is the same as
+ // the type of the initializing value of the preceding enumerator
+ // unless the incremented value is not representable in that type,
+ // in which case the type is an unspecified integral type
+ // sufficient to contain the incremented value. If no such type
+ // exists, the program is ill-formed.
+ QualType T = getNextLargerIntegralType(Context, EltTy);
+ if (T.isNull() || Enum->isFixed()) {
+ // There is no integral type larger enough to represent this
+ // value. Complain, then allow the value to wrap around.
+ EnumVal = LastEnumConst->getInitVal();
+ EnumVal = EnumVal.zext(EnumVal.getBitWidth() * 2);
+ ++EnumVal;
+ if (Enum->isFixed())
+ // When the underlying type is fixed, this is ill-formed.
+ Diag(IdLoc, diag::err_enumerator_wrapped)
+ << EnumVal.toString(10)
+ << EltTy;
+ else
+ Diag(IdLoc, diag::ext_enumerator_increment_too_large)
+ << EnumVal.toString(10);
+ } else {
+ EltTy = T;
+ }
+
+ // Retrieve the last enumerator's value, extent that type to the
+ // type that is supposed to be large enough to represent the incremented
+ // value, then increment.
+ EnumVal = LastEnumConst->getInitVal();
+ EnumVal.setIsSigned(EltTy->isSignedIntegerOrEnumerationType());
+ EnumVal = EnumVal.zextOrTrunc(Context.getIntWidth(EltTy));
+ ++EnumVal;
+
+ // If we're not in C++, diagnose the overflow of enumerator values,
+ // which in C99 means that the enumerator value is not representable in
+ // an int (C99 6.7.2.2p2). However, we support GCC's extension that
+ // permits enumerator values that are representable in some larger
+ // integral type.
+ if (!getLangOpts().CPlusPlus && !T.isNull())
+ Diag(IdLoc, diag::warn_enum_value_overflow);
+ } else if (!getLangOpts().CPlusPlus &&
+ !isRepresentableIntegerValue(Context, EnumVal, EltTy)) {
+ // Enforce C99 6.7.2.2p2 even when we compute the next value.
+ Diag(IdLoc, diag::ext_enum_value_not_int)
+ << EnumVal.toString(10) << 1;
+ }
+ }
+ }
+
+ if (!EltTy->isDependentType()) {
+ // Make the enumerator value match the signedness and size of the
+ // enumerator's type.
+ EnumVal = EnumVal.extOrTrunc(Context.getIntWidth(EltTy));
+ EnumVal.setIsSigned(EltTy->isSignedIntegerOrEnumerationType());
+ }
+
+ return EnumConstantDecl::Create(Context, Enum, IdLoc, Id, EltTy,
+ Val, EnumVal);
+}
+
+Sema::SkipBodyInfo Sema::shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
+ SourceLocation IILoc) {
+ if (!(getLangOpts().Modules || getLangOpts().ModulesLocalVisibility) ||
+ !getLangOpts().CPlusPlus)
+ return SkipBodyInfo();
+
+ // We have an anonymous enum definition. Look up the first enumerator to
+ // determine if we should merge the definition with an existing one and
+ // skip the body.
+ NamedDecl *PrevDecl = LookupSingleName(S, II, IILoc, LookupOrdinaryName,
+ ForRedeclaration);
+ auto *PrevECD = dyn_cast_or_null<EnumConstantDecl>(PrevDecl);
+ if (!PrevECD)
+ return SkipBodyInfo();
+
+ EnumDecl *PrevED = cast<EnumDecl>(PrevECD->getDeclContext());
+ NamedDecl *Hidden;
+ if (!PrevED->getDeclName() && !hasVisibleDefinition(PrevED, &Hidden)) {
+ SkipBodyInfo Skip;
+ Skip.Previous = Hidden;
+ return Skip;
+ }
+
+ return SkipBodyInfo();
+}
+
+Decl *Sema::ActOnEnumConstant(Scope *S, Decl *theEnumDecl, Decl *lastEnumConst,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ AttributeList *Attr,
+ SourceLocation EqualLoc, Expr *Val) {
+ EnumDecl *TheEnumDecl = cast<EnumDecl>(theEnumDecl);
+ EnumConstantDecl *LastEnumConst =
+ cast_or_null<EnumConstantDecl>(lastEnumConst);
+
+ // The scope passed in may not be a decl scope. Zip up the scope tree until
+ // we find one that is.
+ S = getNonFieldDeclScope(S);
+
+ // Verify that there isn't already something declared with this name in this
+ // scope.
+ NamedDecl *PrevDecl = LookupSingleName(S, Id, IdLoc, LookupOrdinaryName,
+ ForRedeclaration);
+ if (PrevDecl && PrevDecl->isTemplateParameter()) {
+ // Maybe we will complain about the shadowed template parameter.
+ DiagnoseTemplateParameterShadow(IdLoc, PrevDecl);
+ // Just pretend that we didn't see the previous declaration.
+ PrevDecl = nullptr;
+ }
+
+ // C++ [class.mem]p15:
+ // If T is the name of a class, then each of the following shall have a name
+ // different from T:
+ // - every enumerator of every member of class T that is an unscoped
+ // enumerated type
+ if (!TheEnumDecl->isScoped())
+ DiagnoseClassNameShadow(TheEnumDecl->getDeclContext(),
+ DeclarationNameInfo(Id, IdLoc));
+
+ EnumConstantDecl *New =
+ CheckEnumConstant(TheEnumDecl, LastEnumConst, IdLoc, Id, Val);
+ if (!New)
+ return nullptr;
+
+ if (PrevDecl) {
+ // When in C++, we may get a TagDecl with the same name; in this case the
+ // enum constant will 'hide' the tag.
+ assert((getLangOpts().CPlusPlus || !isa<TagDecl>(PrevDecl)) &&
+ "Received TagDecl when not in C++!");
+ if (!isa<TagDecl>(PrevDecl) && isDeclInScope(PrevDecl, CurContext, S) &&
+ shouldLinkPossiblyHiddenDecl(PrevDecl, New)) {
+ if (isa<EnumConstantDecl>(PrevDecl))
+ Diag(IdLoc, diag::err_redefinition_of_enumerator) << Id;
+ else
+ Diag(IdLoc, diag::err_redefinition) << Id;
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ return nullptr;
+ }
+ }
+
+ // Process attributes.
+ if (Attr) ProcessDeclAttributeList(S, New, Attr);
+
+ // Register this decl in the current scope stack.
+ New->setAccess(TheEnumDecl->getAccess());
+ PushOnScopeChains(New, S);
+
+ ActOnDocumentableDecl(New);
+
+ return New;
+}
+
+// Returns true when the enum initial expression does not trigger the
+// duplicate enum warning. A few common cases are exempted as follows:
+// Element2 = Element1
+// Element2 = Element1 + 1
+// Element2 = Element1 - 1
+// Where Element2 and Element1 are from the same enum.
+static bool ValidDuplicateEnum(EnumConstantDecl *ECD, EnumDecl *Enum) {
+ Expr *InitExpr = ECD->getInitExpr();
+ if (!InitExpr)
+ return true;
+ InitExpr = InitExpr->IgnoreImpCasts();
+
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(InitExpr)) {
+ if (!BO->isAdditiveOp())
+ return true;
+ IntegerLiteral *IL = dyn_cast<IntegerLiteral>(BO->getRHS());
+ if (!IL)
+ return true;
+ if (IL->getValue() != 1)
+ return true;
+
+ InitExpr = BO->getLHS();
+ }
+
+ // This checks if the elements are from the same enum.
+ DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(InitExpr);
+ if (!DRE)
+ return true;
+
+ EnumConstantDecl *EnumConstant = dyn_cast<EnumConstantDecl>(DRE->getDecl());
+ if (!EnumConstant)
+ return true;
+
+ if (cast<EnumDecl>(TagDecl::castFromDeclContext(ECD->getDeclContext())) !=
+ Enum)
+ return true;
+
+ return false;
+}
+
+namespace {
+struct DupKey {
+ int64_t val;
+ bool isTombstoneOrEmptyKey;
+ DupKey(int64_t val, bool isTombstoneOrEmptyKey)
+ : val(val), isTombstoneOrEmptyKey(isTombstoneOrEmptyKey) {}
+};
+
+static DupKey GetDupKey(const llvm::APSInt& Val) {
+ return DupKey(Val.isSigned() ? Val.getSExtValue() : Val.getZExtValue(),
+ false);
+}
+
+struct DenseMapInfoDupKey {
+ static DupKey getEmptyKey() { return DupKey(0, true); }
+ static DupKey getTombstoneKey() { return DupKey(1, true); }
+ static unsigned getHashValue(const DupKey Key) {
+ return (unsigned)(Key.val * 37);
+ }
+ static bool isEqual(const DupKey& LHS, const DupKey& RHS) {
+ return LHS.isTombstoneOrEmptyKey == RHS.isTombstoneOrEmptyKey &&
+ LHS.val == RHS.val;
+ }
+};
+} // end anonymous namespace
+
+// Emits a warning when an element is implicitly set a value that
+// a previous element has already been set to.
+static void CheckForDuplicateEnumValues(Sema &S, ArrayRef<Decl *> Elements,
+ EnumDecl *Enum,
+ QualType EnumType) {
+ if (S.Diags.isIgnored(diag::warn_duplicate_enum_values, Enum->getLocation()))
+ return;
+ // Avoid anonymous enums
+ if (!Enum->getIdentifier())
+ return;
+
+ // Only check for small enums.
+ if (Enum->getNumPositiveBits() > 63 || Enum->getNumNegativeBits() > 64)
+ return;
+
+ typedef SmallVector<EnumConstantDecl *, 3> ECDVector;
+ typedef SmallVector<ECDVector *, 3> DuplicatesVector;
+
+ typedef llvm::PointerUnion<EnumConstantDecl*, ECDVector*> DeclOrVector;
+ typedef llvm::DenseMap<DupKey, DeclOrVector, DenseMapInfoDupKey>
+ ValueToVectorMap;
+
+ DuplicatesVector DupVector;
+ ValueToVectorMap EnumMap;
+
+ // Populate the EnumMap with all values represented by enum constants without
+ // an initialier.
+ for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
+ EnumConstantDecl *ECD = cast_or_null<EnumConstantDecl>(Elements[i]);
+
+ // Null EnumConstantDecl means a previous diagnostic has been emitted for
+ // this constant. Skip this enum since it may be ill-formed.
+ if (!ECD) {
+ return;
+ }
+
+ if (ECD->getInitExpr())
+ continue;
+
+ DupKey Key = GetDupKey(ECD->getInitVal());
+ DeclOrVector &Entry = EnumMap[Key];
+
+ // First time encountering this value.
+ if (Entry.isNull())
+ Entry = ECD;
+ }
+
+ // Create vectors for any values that has duplicates.
+ for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
+ EnumConstantDecl *ECD = cast<EnumConstantDecl>(Elements[i]);
+ if (!ValidDuplicateEnum(ECD, Enum))
+ continue;
+
+ DupKey Key = GetDupKey(ECD->getInitVal());
+
+ DeclOrVector& Entry = EnumMap[Key];
+ if (Entry.isNull())
+ continue;
+
+ if (EnumConstantDecl *D = Entry.dyn_cast<EnumConstantDecl*>()) {
+ // Ensure constants are different.
+ if (D == ECD)
+ continue;
+
+ // Create new vector and push values onto it.
+ ECDVector *Vec = new ECDVector();
+ Vec->push_back(D);
+ Vec->push_back(ECD);
+
+ // Update entry to point to the duplicates vector.
+ Entry = Vec;
+
+ // Store the vector somewhere we can consult later for quick emission of
+ // diagnostics.
+ DupVector.push_back(Vec);
+ continue;
+ }
+
+ ECDVector *Vec = Entry.get<ECDVector*>();
+ // Make sure constants are not added more than once.
+ if (*Vec->begin() == ECD)
+ continue;
+
+ Vec->push_back(ECD);
+ }
+
+ // Emit diagnostics.
+ for (DuplicatesVector::iterator DupVectorIter = DupVector.begin(),
+ DupVectorEnd = DupVector.end();
+ DupVectorIter != DupVectorEnd; ++DupVectorIter) {
+ ECDVector *Vec = *DupVectorIter;
+ assert(Vec->size() > 1 && "ECDVector should have at least 2 elements.");
+
+ // Emit warning for one enum constant.
+ ECDVector::iterator I = Vec->begin();
+ S.Diag((*I)->getLocation(), diag::warn_duplicate_enum_values)
+ << (*I)->getName() << (*I)->getInitVal().toString(10)
+ << (*I)->getSourceRange();
+ ++I;
+
+ // Emit one note for each of the remaining enum constants with
+ // the same value.
+ for (ECDVector::iterator E = Vec->end(); I != E; ++I)
+ S.Diag((*I)->getLocation(), diag::note_duplicate_element)
+ << (*I)->getName() << (*I)->getInitVal().toString(10)
+ << (*I)->getSourceRange();
+ delete Vec;
+ }
+}
+
+bool Sema::IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
+ bool AllowMask) const {
+ assert(ED->hasAttr<FlagEnumAttr>() && "looking for value in non-flag enum");
+ assert(ED->isCompleteDefinition() && "expected enum definition");
+
+ auto R = FlagBitsCache.insert(std::make_pair(ED, llvm::APInt()));
+ llvm::APInt &FlagBits = R.first->second;
+
+ if (R.second) {
+ for (auto *E : ED->enumerators()) {
+ const auto &EVal = E->getInitVal();
+ // Only single-bit enumerators introduce new flag values.
+ if (EVal.isPowerOf2())
+ FlagBits = FlagBits.zextOrSelf(EVal.getBitWidth()) | EVal;
+ }
+ }
+
+ // A value is in a flag enum if either its bits are a subset of the enum's
+ // flag bits (the first condition) or we are allowing masks and the same is
+ // true of its complement (the second condition). When masks are allowed, we
+ // allow the common idiom of ~(enum1 | enum2) to be a valid enum value.
+ //
+ // While it's true that any value could be used as a mask, the assumption is
+ // that a mask will have all of the insignificant bits set. Anything else is
+ // likely a logic error.
+ llvm::APInt FlagMask = ~FlagBits.zextOrTrunc(Val.getBitWidth());
+ return !(FlagMask & Val) || (AllowMask && !(FlagMask & ~Val));
+}
+
+void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
+ SourceLocation RBraceLoc, Decl *EnumDeclX,
+ ArrayRef<Decl *> Elements,
+ Scope *S, AttributeList *Attr) {
+ EnumDecl *Enum = cast<EnumDecl>(EnumDeclX);
+ QualType EnumType = Context.getTypeDeclType(Enum);
+
+ if (Attr)
+ ProcessDeclAttributeList(S, Enum, Attr);
+
+ if (Enum->isDependentType()) {
+ for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
+ EnumConstantDecl *ECD =
+ cast_or_null<EnumConstantDecl>(Elements[i]);
+ if (!ECD) continue;
+
+ ECD->setType(EnumType);
+ }
+
+ Enum->completeDefinition(Context.DependentTy, Context.DependentTy, 0, 0);
+ return;
+ }
+
+ // TODO: If the result value doesn't fit in an int, it must be a long or long
+ // long value. ISO C does not support this, but GCC does as an extension,
+ // emit a warning.
+ unsigned IntWidth = Context.getTargetInfo().getIntWidth();
+ unsigned CharWidth = Context.getTargetInfo().getCharWidth();
+ unsigned ShortWidth = Context.getTargetInfo().getShortWidth();
+
+ // Verify that all the values are okay, compute the size of the values, and
+ // reverse the list.
+ unsigned NumNegativeBits = 0;
+ unsigned NumPositiveBits = 0;
+
+ // Keep track of whether all elements have type int.
+ bool AllElementsInt = true;
+
+ for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
+ EnumConstantDecl *ECD =
+ cast_or_null<EnumConstantDecl>(Elements[i]);
+ if (!ECD) continue; // Already issued a diagnostic.
+
+ const llvm::APSInt &InitVal = ECD->getInitVal();
+
+ // Keep track of the size of positive and negative values.
+ if (InitVal.isUnsigned() || InitVal.isNonNegative())
+ NumPositiveBits = std::max(NumPositiveBits,
+ (unsigned)InitVal.getActiveBits());
+ else
+ NumNegativeBits = std::max(NumNegativeBits,
+ (unsigned)InitVal.getMinSignedBits());
+
+ // Keep track of whether every enum element has type int (very commmon).
+ if (AllElementsInt)
+ AllElementsInt = ECD->getType() == Context.IntTy;
+ }
+
+ // Figure out the type that should be used for this enum.
+ QualType BestType;
+ unsigned BestWidth;
+
+ // C++0x N3000 [conv.prom]p3:
+ // An rvalue of an unscoped enumeration type whose underlying
+ // type is not fixed can be converted to an rvalue of the first
+ // of the following types that can represent all the values of
+ // the enumeration: int, unsigned int, long int, unsigned long
+ // int, long long int, or unsigned long long int.
+ // C99 6.4.4.3p2:
+ // An identifier declared as an enumeration constant has type int.
+ // The C99 rule is modified by a gcc extension
+ QualType BestPromotionType;
+
+ bool Packed = Enum->hasAttr<PackedAttr>();
+ // -fshort-enums is the equivalent to specifying the packed attribute on all
+ // enum definitions.
+ if (LangOpts.ShortEnums)
+ Packed = true;
+
+ if (Enum->isFixed()) {
+ BestType = Enum->getIntegerType();
+ if (BestType->isPromotableIntegerType())
+ BestPromotionType = Context.getPromotedIntegerType(BestType);
+ else
+ BestPromotionType = BestType;
+
+ BestWidth = Context.getIntWidth(BestType);
+ }
+ else if (NumNegativeBits) {
+ // If there is a negative value, figure out the smallest integer type (of
+ // int/long/longlong) that fits.
+ // If it's packed, check also if it fits a char or a short.
+ if (Packed && NumNegativeBits <= CharWidth && NumPositiveBits < CharWidth) {
+ BestType = Context.SignedCharTy;
+ BestWidth = CharWidth;
+ } else if (Packed && NumNegativeBits <= ShortWidth &&
+ NumPositiveBits < ShortWidth) {
+ BestType = Context.ShortTy;
+ BestWidth = ShortWidth;
+ } else if (NumNegativeBits <= IntWidth && NumPositiveBits < IntWidth) {
+ BestType = Context.IntTy;
+ BestWidth = IntWidth;
+ } else {
+ BestWidth = Context.getTargetInfo().getLongWidth();
+
+ if (NumNegativeBits <= BestWidth && NumPositiveBits < BestWidth) {
+ BestType = Context.LongTy;
+ } else {
+ BestWidth = Context.getTargetInfo().getLongLongWidth();
+
+ if (NumNegativeBits > BestWidth || NumPositiveBits >= BestWidth)
+ Diag(Enum->getLocation(), diag::ext_enum_too_large);
+ BestType = Context.LongLongTy;
+ }
+ }
+ BestPromotionType = (BestWidth <= IntWidth ? Context.IntTy : BestType);
+ } else {
+ // If there is no negative value, figure out the smallest type that fits
+ // all of the enumerator values.
+ // If it's packed, check also if it fits a char or a short.
+ if (Packed && NumPositiveBits <= CharWidth) {
+ BestType = Context.UnsignedCharTy;
+ BestPromotionType = Context.IntTy;
+ BestWidth = CharWidth;
+ } else if (Packed && NumPositiveBits <= ShortWidth) {
+ BestType = Context.UnsignedShortTy;
+ BestPromotionType = Context.IntTy;
+ BestWidth = ShortWidth;
+ } else if (NumPositiveBits <= IntWidth) {
+ BestType = Context.UnsignedIntTy;
+ BestWidth = IntWidth;
+ BestPromotionType
+ = (NumPositiveBits == BestWidth || !getLangOpts().CPlusPlus)
+ ? Context.UnsignedIntTy : Context.IntTy;
+ } else if (NumPositiveBits <=
+ (BestWidth = Context.getTargetInfo().getLongWidth())) {
+ BestType = Context.UnsignedLongTy;
+ BestPromotionType
+ = (NumPositiveBits == BestWidth || !getLangOpts().CPlusPlus)
+ ? Context.UnsignedLongTy : Context.LongTy;
+ } else {
+ BestWidth = Context.getTargetInfo().getLongLongWidth();
+ assert(NumPositiveBits <= BestWidth &&
+ "How could an initializer get larger than ULL?");
+ BestType = Context.UnsignedLongLongTy;
+ BestPromotionType
+ = (NumPositiveBits == BestWidth || !getLangOpts().CPlusPlus)
+ ? Context.UnsignedLongLongTy : Context.LongLongTy;
+ }
+ }
+
+ // Loop over all of the enumerator constants, changing their types to match
+ // the type of the enum if needed.
+ for (auto *D : Elements) {
+ auto *ECD = cast_or_null<EnumConstantDecl>(D);
+ if (!ECD) continue; // Already issued a diagnostic.
+
+ // Standard C says the enumerators have int type, but we allow, as an
+ // extension, the enumerators to be larger than int size. If each
+ // enumerator value fits in an int, type it as an int, otherwise type it the
+ // same as the enumerator decl itself. This means that in "enum { X = 1U }"
+ // that X has type 'int', not 'unsigned'.
+
+ // Determine whether the value fits into an int.
+ llvm::APSInt InitVal = ECD->getInitVal();
+
+ // If it fits into an integer type, force it. Otherwise force it to match
+ // the enum decl type.
+ QualType NewTy;
+ unsigned NewWidth;
+ bool NewSign;
+ if (!getLangOpts().CPlusPlus &&
+ !Enum->isFixed() &&
+ isRepresentableIntegerValue(Context, InitVal, Context.IntTy)) {
+ NewTy = Context.IntTy;
+ NewWidth = IntWidth;
+ NewSign = true;
+ } else if (ECD->getType() == BestType) {
+ // Already the right type!
+ if (getLangOpts().CPlusPlus)
+ // C++ [dcl.enum]p4: Following the closing brace of an
+ // enum-specifier, each enumerator has the type of its
+ // enumeration.
+ ECD->setType(EnumType);
+ continue;
+ } else {
+ NewTy = BestType;
+ NewWidth = BestWidth;
+ NewSign = BestType->isSignedIntegerOrEnumerationType();
+ }
+
+ // Adjust the APSInt value.
+ InitVal = InitVal.extOrTrunc(NewWidth);
+ InitVal.setIsSigned(NewSign);
+ ECD->setInitVal(InitVal);
+
+ // Adjust the Expr initializer and type.
+ if (ECD->getInitExpr() &&
+ !Context.hasSameType(NewTy, ECD->getInitExpr()->getType()))
+ ECD->setInitExpr(ImplicitCastExpr::Create(Context, NewTy,
+ CK_IntegralCast,
+ ECD->getInitExpr(),
+ /*base paths*/ nullptr,
+ VK_RValue));
+ if (getLangOpts().CPlusPlus)
+ // C++ [dcl.enum]p4: Following the closing brace of an
+ // enum-specifier, each enumerator has the type of its
+ // enumeration.
+ ECD->setType(EnumType);
+ else
+ ECD->setType(NewTy);
+ }
+
+ Enum->completeDefinition(BestType, BestPromotionType,
+ NumPositiveBits, NumNegativeBits);
+
+ CheckForDuplicateEnumValues(*this, Elements, Enum, EnumType);
+
+ if (Enum->hasAttr<FlagEnumAttr>()) {
+ for (Decl *D : Elements) {
+ EnumConstantDecl *ECD = cast_or_null<EnumConstantDecl>(D);
+ if (!ECD) continue; // Already issued a diagnostic.
+
+ llvm::APSInt InitVal = ECD->getInitVal();
+ if (InitVal != 0 && !InitVal.isPowerOf2() &&
+ !IsValueInFlagEnum(Enum, InitVal, true))
+ Diag(ECD->getLocation(), diag::warn_flag_enum_constant_out_of_range)
+ << ECD << Enum;
+ }
+ }
+
+ // Now that the enum type is defined, ensure it's not been underaligned.
+ if (Enum->hasAttrs())
+ CheckAlignasUnderalignment(Enum);
+}
+
+Decl *Sema::ActOnFileScopeAsmDecl(Expr *expr,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ StringLiteral *AsmString = cast<StringLiteral>(expr);
+
+ FileScopeAsmDecl *New = FileScopeAsmDecl::Create(Context, CurContext,
+ AsmString, StartLoc,
+ EndLoc);
+ CurContext->addDecl(New);
+ return New;
+}
+
+static void checkModuleImportContext(Sema &S, Module *M,
+ SourceLocation ImportLoc, DeclContext *DC,
+ bool FromInclude = false) {
+ SourceLocation ExternCLoc;
+
+ if (auto *LSD = dyn_cast<LinkageSpecDecl>(DC)) {
+ switch (LSD->getLanguage()) {
+ case LinkageSpecDecl::lang_c:
+ if (ExternCLoc.isInvalid())
+ ExternCLoc = LSD->getLocStart();
+ break;
+ case LinkageSpecDecl::lang_cxx:
+ break;
+ }
+ DC = LSD->getParent();
+ }
+
+ while (isa<LinkageSpecDecl>(DC))
+ DC = DC->getParent();
+
+ if (!isa<TranslationUnitDecl>(DC)) {
+ S.Diag(ImportLoc, (FromInclude && S.isModuleVisible(M))
+ ? diag::ext_module_import_not_at_top_level_noop
+ : diag::err_module_import_not_at_top_level_fatal)
+ << M->getFullModuleName() << DC;
+ S.Diag(cast<Decl>(DC)->getLocStart(),
+ diag::note_module_import_not_at_top_level) << DC;
+ } else if (!M->IsExternC && ExternCLoc.isValid()) {
+ S.Diag(ImportLoc, diag::ext_module_import_in_extern_c)
+ << M->getFullModuleName();
+ S.Diag(ExternCLoc, diag::note_module_import_in_extern_c);
+ }
+}
+
+void Sema::diagnoseMisplacedModuleImport(Module *M, SourceLocation ImportLoc) {
+ return checkModuleImportContext(*this, M, ImportLoc, CurContext);
+}
+
+DeclResult Sema::ActOnModuleImport(SourceLocation AtLoc,
+ SourceLocation ImportLoc,
+ ModuleIdPath Path) {
+ Module *Mod =
+ getModuleLoader().loadModule(ImportLoc, Path, Module::AllVisible,
+ /*IsIncludeDirective=*/false);
+ if (!Mod)
+ return true;
+
+ VisibleModules.setVisible(Mod, ImportLoc);
+
+ checkModuleImportContext(*this, Mod, ImportLoc, CurContext);
+
+ // FIXME: we should support importing a submodule within a different submodule
+ // of the same top-level module. Until we do, make it an error rather than
+ // silently ignoring the import.
+ if (Mod->getTopLevelModuleName() == getLangOpts().CurrentModule)
+ Diag(ImportLoc, diag::err_module_self_import)
+ << Mod->getFullModuleName() << getLangOpts().CurrentModule;
+ else if (Mod->getTopLevelModuleName() == getLangOpts().ImplementationOfModule)
+ Diag(ImportLoc, diag::err_module_import_in_implementation)
+ << Mod->getFullModuleName() << getLangOpts().ImplementationOfModule;
+
+ SmallVector<SourceLocation, 2> IdentifierLocs;
+ Module *ModCheck = Mod;
+ for (unsigned I = 0, N = Path.size(); I != N; ++I) {
+ // If we've run out of module parents, just drop the remaining identifiers.
+ // We need the length to be consistent.
+ if (!ModCheck)
+ break;
+ ModCheck = ModCheck->Parent;
+
+ IdentifierLocs.push_back(Path[I].second);
+ }
+
+ ImportDecl *Import = ImportDecl::Create(Context,
+ Context.getTranslationUnitDecl(),
+ AtLoc.isValid()? AtLoc : ImportLoc,
+ Mod, IdentifierLocs);
+ Context.getTranslationUnitDecl()->addDecl(Import);
+ return Import;
+}
+
+void Sema::ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod) {
+ checkModuleImportContext(*this, Mod, DirectiveLoc, CurContext, true);
+
+ // Determine whether we're in the #include buffer for a module. The #includes
+ // in that buffer do not qualify as module imports; they're just an
+ // implementation detail of us building the module.
+ //
+ // FIXME: Should we even get ActOnModuleInclude calls for those?
+ bool IsInModuleIncludes =
+ TUKind == TU_Module &&
+ getSourceManager().isWrittenInMainFile(DirectiveLoc);
+
+ // If this module import was due to an inclusion directive, create an
+ // implicit import declaration to capture it in the AST.
+ if (!IsInModuleIncludes) {
+ TranslationUnitDecl *TU = getASTContext().getTranslationUnitDecl();
+ ImportDecl *ImportD = ImportDecl::CreateImplicit(getASTContext(), TU,
+ DirectiveLoc, Mod,
+ DirectiveLoc);
+ TU->addDecl(ImportD);
+ Consumer.HandleImplicitImportDecl(ImportD);
+ }
+
+ getModuleLoader().makeModuleVisible(Mod, Module::AllVisible, DirectiveLoc);
+ VisibleModules.setVisible(Mod, DirectiveLoc);
+}
+
+void Sema::ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod) {
+ checkModuleImportContext(*this, Mod, DirectiveLoc, CurContext);
+
+ if (getLangOpts().ModulesLocalVisibility)
+ VisibleModulesStack.push_back(std::move(VisibleModules));
+ VisibleModules.setVisible(Mod, DirectiveLoc);
+}
+
+void Sema::ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod) {
+ checkModuleImportContext(*this, Mod, DirectiveLoc, CurContext);
+
+ if (getLangOpts().ModulesLocalVisibility) {
+ VisibleModules = std::move(VisibleModulesStack.back());
+ VisibleModulesStack.pop_back();
+ VisibleModules.setVisible(Mod, DirectiveLoc);
+ }
+}
+
+void Sema::createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
+ Module *Mod) {
+ // Bail if we're not allowed to implicitly import a module here.
+ if (isSFINAEContext() || !getLangOpts().ModulesErrorRecovery)
+ return;
+
+ // Create the implicit import declaration.
+ TranslationUnitDecl *TU = getASTContext().getTranslationUnitDecl();
+ ImportDecl *ImportD = ImportDecl::CreateImplicit(getASTContext(), TU,
+ Loc, Mod, Loc);
+ TU->addDecl(ImportD);
+ Consumer.HandleImplicitImportDecl(ImportD);
+
+ // Make the module visible.
+ getModuleLoader().makeModuleVisible(Mod, Module::AllVisible, Loc);
+ VisibleModules.setVisible(Mod, Loc);
+}
+
+void Sema::ActOnPragmaRedefineExtname(IdentifierInfo* Name,
+ IdentifierInfo* AliasName,
+ SourceLocation PragmaLoc,
+ SourceLocation NameLoc,
+ SourceLocation AliasNameLoc) {
+ NamedDecl *PrevDecl = LookupSingleName(TUScope, Name, NameLoc,
+ LookupOrdinaryName);
+ AsmLabelAttr *Attr =
+ AsmLabelAttr::CreateImplicit(Context, AliasName->getName(), AliasNameLoc);
+
+ // If a declaration that:
+ // 1) declares a function or a variable
+ // 2) has external linkage
+ // already exists, add a label attribute to it.
+ if (PrevDecl && (isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl))) {
+ if (isDeclExternC(PrevDecl))
+ PrevDecl->addAttr(Attr);
+ else
+ Diag(PrevDecl->getLocation(), diag::warn_redefine_extname_not_applied)
+ << /*Variable*/(isa<FunctionDecl>(PrevDecl) ? 0 : 1) << PrevDecl;
+ // Otherwise, add a label atttibute to ExtnameUndeclaredIdentifiers.
+ } else
+ (void)ExtnameUndeclaredIdentifiers.insert(std::make_pair(Name, Attr));
+}
+
+void Sema::ActOnPragmaWeakID(IdentifierInfo* Name,
+ SourceLocation PragmaLoc,
+ SourceLocation NameLoc) {
+ Decl *PrevDecl = LookupSingleName(TUScope, Name, NameLoc, LookupOrdinaryName);
+
+ if (PrevDecl) {
+ PrevDecl->addAttr(WeakAttr::CreateImplicit(Context, PragmaLoc));
+ } else {
+ (void)WeakUndeclaredIdentifiers.insert(
+ std::pair<IdentifierInfo*,WeakInfo>
+ (Name, WeakInfo((IdentifierInfo*)nullptr, NameLoc)));
+ }
+}
+
+void Sema::ActOnPragmaWeakAlias(IdentifierInfo* Name,
+ IdentifierInfo* AliasName,
+ SourceLocation PragmaLoc,
+ SourceLocation NameLoc,
+ SourceLocation AliasNameLoc) {
+ Decl *PrevDecl = LookupSingleName(TUScope, AliasName, AliasNameLoc,
+ LookupOrdinaryName);
+ WeakInfo W = WeakInfo(Name, NameLoc);
+
+ if (PrevDecl && (isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl))) {
+ if (!PrevDecl->hasAttr<AliasAttr>())
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(PrevDecl))
+ DeclApplyPragmaWeak(TUScope, ND, W);
+ } else {
+ (void)WeakUndeclaredIdentifiers.insert(
+ std::pair<IdentifierInfo*,WeakInfo>(AliasName, W));
+ }
+}
+
+Decl *Sema::getObjCDeclContext() const {
+ return (dyn_cast_or_null<ObjCContainerDecl>(CurContext));
+}
+
+AvailabilityResult Sema::getCurContextAvailability() const {
+ const Decl *D = cast_or_null<Decl>(getCurObjCLexicalContext());
+ if (!D)
+ return AR_Available;
+
+ // If we are within an Objective-C method, we should consult
+ // both the availability of the method as well as the
+ // enclosing class. If the class is (say) deprecated,
+ // the entire method is considered deprecated from the
+ // purpose of checking if the current context is deprecated.
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ AvailabilityResult R = MD->getAvailability();
+ if (R != AR_Available)
+ return R;
+ D = MD->getClassInterface();
+ }
+ // If we are within an Objective-c @implementation, it
+ // gets the same availability context as the @interface.
+ else if (const ObjCImplementationDecl *ID =
+ dyn_cast<ObjCImplementationDecl>(D)) {
+ D = ID->getClassInterface();
+ }
+ // Recover from user error.
+ return D ? D->getAvailability() : AR_Available;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp
new file mode 100644
index 0000000..5a0f0f8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp
@@ -0,0 +1,5950 @@
+//===--- SemaDeclAttr.cpp - Declaration Attribute Handling ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements decl-related attribute processing.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/Mangle.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/DelayedDiagnostic.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Scope.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/MathExtras.h"
+using namespace clang;
+using namespace sema;
+
+namespace AttributeLangSupport {
+ enum LANG {
+ C,
+ Cpp,
+ ObjC
+ };
+}
+
+//===----------------------------------------------------------------------===//
+// Helper functions
+//===----------------------------------------------------------------------===//
+
+/// isFunctionOrMethod - Return true if the given decl has function
+/// type (function or function-typed variable) or an Objective-C
+/// method.
+static bool isFunctionOrMethod(const Decl *D) {
+ return (D->getFunctionType() != nullptr) || isa<ObjCMethodDecl>(D);
+}
+/// \brief Return true if the given decl has function type (function or
+/// function-typed variable) or an Objective-C method or a block.
+static bool isFunctionOrMethodOrBlock(const Decl *D) {
+ return isFunctionOrMethod(D) || isa<BlockDecl>(D);
+}
+
+/// Return true if the given decl has a declarator that should have
+/// been processed by Sema::GetTypeForDeclarator.
+static bool hasDeclarator(const Decl *D) {
+ // In some sense, TypedefDecl really *ought* to be a DeclaratorDecl.
+ return isa<DeclaratorDecl>(D) || isa<BlockDecl>(D) || isa<TypedefNameDecl>(D) ||
+ isa<ObjCPropertyDecl>(D);
+}
+
+/// hasFunctionProto - Return true if the given decl has a argument
+/// information. This decl should have already passed
+/// isFunctionOrMethod or isFunctionOrMethodOrBlock.
+static bool hasFunctionProto(const Decl *D) {
+ if (const FunctionType *FnTy = D->getFunctionType())
+ return isa<FunctionProtoType>(FnTy);
+ return isa<ObjCMethodDecl>(D) || isa<BlockDecl>(D);
+}
+
+/// getFunctionOrMethodNumParams - Return number of function or method
+/// parameters. It is an error to call this on a K&R function (use
+/// hasFunctionProto first).
+static unsigned getFunctionOrMethodNumParams(const Decl *D) {
+ if (const FunctionType *FnTy = D->getFunctionType())
+ return cast<FunctionProtoType>(FnTy)->getNumParams();
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(D))
+ return BD->getNumParams();
+ return cast<ObjCMethodDecl>(D)->param_size();
+}
+
+static QualType getFunctionOrMethodParamType(const Decl *D, unsigned Idx) {
+ if (const FunctionType *FnTy = D->getFunctionType())
+ return cast<FunctionProtoType>(FnTy)->getParamType(Idx);
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(D))
+ return BD->getParamDecl(Idx)->getType();
+
+ return cast<ObjCMethodDecl>(D)->parameters()[Idx]->getType();
+}
+
+static SourceRange getFunctionOrMethodParamRange(const Decl *D, unsigned Idx) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D))
+ return FD->getParamDecl(Idx)->getSourceRange();
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(D))
+ return MD->parameters()[Idx]->getSourceRange();
+ if (const auto *BD = dyn_cast<BlockDecl>(D))
+ return BD->getParamDecl(Idx)->getSourceRange();
+ return SourceRange();
+}
+
+static QualType getFunctionOrMethodResultType(const Decl *D) {
+ if (const FunctionType *FnTy = D->getFunctionType())
+ return cast<FunctionType>(FnTy)->getReturnType();
+ return cast<ObjCMethodDecl>(D)->getReturnType();
+}
+
+static SourceRange getFunctionOrMethodResultSourceRange(const Decl *D) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D))
+ return FD->getReturnTypeSourceRange();
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(D))
+ return MD->getReturnTypeSourceRange();
+ return SourceRange();
+}
+
+static bool isFunctionOrMethodVariadic(const Decl *D) {
+ if (const FunctionType *FnTy = D->getFunctionType()) {
+ const FunctionProtoType *proto = cast<FunctionProtoType>(FnTy);
+ return proto->isVariadic();
+ }
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(D))
+ return BD->isVariadic();
+
+ return cast<ObjCMethodDecl>(D)->isVariadic();
+}
+
+static bool isInstanceMethod(const Decl *D) {
+ if (const CXXMethodDecl *MethodDecl = dyn_cast<CXXMethodDecl>(D))
+ return MethodDecl->isInstance();
+ return false;
+}
+
+static inline bool isNSStringType(QualType T, ASTContext &Ctx) {
+ const ObjCObjectPointerType *PT = T->getAs<ObjCObjectPointerType>();
+ if (!PT)
+ return false;
+
+ ObjCInterfaceDecl *Cls = PT->getObjectType()->getInterface();
+ if (!Cls)
+ return false;
+
+ IdentifierInfo* ClsName = Cls->getIdentifier();
+
+ // FIXME: Should we walk the chain of classes?
+ return ClsName == &Ctx.Idents.get("NSString") ||
+ ClsName == &Ctx.Idents.get("NSMutableString");
+}
+
+static inline bool isCFStringType(QualType T, ASTContext &Ctx) {
+ const PointerType *PT = T->getAs<PointerType>();
+ if (!PT)
+ return false;
+
+ const RecordType *RT = PT->getPointeeType()->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->getTagKind() != TTK_Struct)
+ return false;
+
+ return RD->getIdentifier() == &Ctx.Idents.get("__CFString");
+}
+
+static unsigned getNumAttributeArgs(const AttributeList &Attr) {
+ // FIXME: Include the type in the argument list.
+ return Attr.getNumArgs() + Attr.hasParsedType();
+}
+
+template <typename Compare>
+static bool checkAttributeNumArgsImpl(Sema &S, const AttributeList &Attr,
+ unsigned Num, unsigned Diag,
+ Compare Comp) {
+ if (Comp(getNumAttributeArgs(Attr), Num)) {
+ S.Diag(Attr.getLoc(), Diag) << Attr.getName() << Num;
+ return false;
+ }
+
+ return true;
+}
+
+/// \brief Check if the attribute has exactly as many args as Num. May
+/// output an error.
+static bool checkAttributeNumArgs(Sema &S, const AttributeList &Attr,
+ unsigned Num) {
+ return checkAttributeNumArgsImpl(S, Attr, Num,
+ diag::err_attribute_wrong_number_arguments,
+ std::not_equal_to<unsigned>());
+}
+
+/// \brief Check if the attribute has at least as many args as Num. May
+/// output an error.
+static bool checkAttributeAtLeastNumArgs(Sema &S, const AttributeList &Attr,
+ unsigned Num) {
+ return checkAttributeNumArgsImpl(S, Attr, Num,
+ diag::err_attribute_too_few_arguments,
+ std::less<unsigned>());
+}
+
+/// \brief Check if the attribute has at most as many args as Num. May
+/// output an error.
+static bool checkAttributeAtMostNumArgs(Sema &S, const AttributeList &Attr,
+ unsigned Num) {
+ return checkAttributeNumArgsImpl(S, Attr, Num,
+ diag::err_attribute_too_many_arguments,
+ std::greater<unsigned>());
+}
+
+/// \brief If Expr is a valid integer constant, get the value of the integer
+/// expression and return success or failure. May output an error.
+static bool checkUInt32Argument(Sema &S, const AttributeList &Attr,
+ const Expr *Expr, uint32_t &Val,
+ unsigned Idx = UINT_MAX) {
+ llvm::APSInt I(32);
+ if (Expr->isTypeDependent() || Expr->isValueDependent() ||
+ !Expr->isIntegerConstantExpr(I, S.Context)) {
+ if (Idx != UINT_MAX)
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
+ << Attr.getName() << Idx << AANT_ArgumentIntegerConstant
+ << Expr->getSourceRange();
+ else
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_type)
+ << Attr.getName() << AANT_ArgumentIntegerConstant
+ << Expr->getSourceRange();
+ return false;
+ }
+
+ if (!I.isIntN(32)) {
+ S.Diag(Expr->getExprLoc(), diag::err_ice_too_large)
+ << I.toString(10, false) << 32 << /* Unsigned */ 1;
+ return false;
+ }
+
+ Val = (uint32_t)I.getZExtValue();
+ return true;
+}
+
+/// \brief Diagnose mutually exclusive attributes when present on a given
+/// declaration. Returns true if diagnosed.
+template <typename AttrTy>
+static bool checkAttrMutualExclusion(Sema &S, Decl *D, SourceRange Range,
+ IdentifierInfo *Ident) {
+ if (AttrTy *A = D->getAttr<AttrTy>()) {
+ S.Diag(Range.getBegin(), diag::err_attributes_are_not_compatible) << Ident
+ << A;
+ S.Diag(A->getLocation(), diag::note_conflicting_attribute);
+ return true;
+ }
+ return false;
+}
+
+/// \brief Check if IdxExpr is a valid parameter index for a function or
+/// instance method D. May output an error.
+///
+/// \returns true if IdxExpr is a valid index.
+static bool checkFunctionOrMethodParameterIndex(Sema &S, const Decl *D,
+ const AttributeList &Attr,
+ unsigned AttrArgNum,
+ const Expr *IdxExpr,
+ uint64_t &Idx) {
+ assert(isFunctionOrMethodOrBlock(D));
+
+ // In C++ the implicit 'this' function parameter also counts.
+ // Parameters are counted from one.
+ bool HP = hasFunctionProto(D);
+ bool HasImplicitThisParam = isInstanceMethod(D);
+ bool IV = HP && isFunctionOrMethodVariadic(D);
+ unsigned NumParams =
+ (HP ? getFunctionOrMethodNumParams(D) : 0) + HasImplicitThisParam;
+
+ llvm::APSInt IdxInt;
+ if (IdxExpr->isTypeDependent() || IdxExpr->isValueDependent() ||
+ !IdxExpr->isIntegerConstantExpr(IdxInt, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
+ << Attr.getName() << AttrArgNum << AANT_ArgumentIntegerConstant
+ << IdxExpr->getSourceRange();
+ return false;
+ }
+
+ Idx = IdxInt.getLimitedValue();
+ if (Idx < 1 || (!IV && Idx > NumParams)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_bounds)
+ << Attr.getName() << AttrArgNum << IdxExpr->getSourceRange();
+ return false;
+ }
+ Idx--; // Convert to zero-based.
+ if (HasImplicitThisParam) {
+ if (Idx == 0) {
+ S.Diag(Attr.getLoc(),
+ diag::err_attribute_invalid_implicit_this_argument)
+ << Attr.getName() << IdxExpr->getSourceRange();
+ return false;
+ }
+ --Idx;
+ }
+
+ return true;
+}
+
+/// \brief Check if the argument \p ArgNum of \p Attr is a ASCII string literal.
+/// If not emit an error and return false. If the argument is an identifier it
+/// will emit an error with a fixit hint and treat it as if it was a string
+/// literal.
+bool Sema::checkStringLiteralArgumentAttr(const AttributeList &Attr,
+ unsigned ArgNum, StringRef &Str,
+ SourceLocation *ArgLocation) {
+ // Look for identifiers. If we have one emit a hint to fix it to a literal.
+ if (Attr.isArgIdent(ArgNum)) {
+ IdentifierLoc *Loc = Attr.getArgAsIdent(ArgNum);
+ Diag(Loc->Loc, diag::err_attribute_argument_type)
+ << Attr.getName() << AANT_ArgumentString
+ << FixItHint::CreateInsertion(Loc->Loc, "\"")
+ << FixItHint::CreateInsertion(getLocForEndOfToken(Loc->Loc), "\"");
+ Str = Loc->Ident->getName();
+ if (ArgLocation)
+ *ArgLocation = Loc->Loc;
+ return true;
+ }
+
+ // Now check for an actual string literal.
+ Expr *ArgExpr = Attr.getArgAsExpr(ArgNum);
+ StringLiteral *Literal = dyn_cast<StringLiteral>(ArgExpr->IgnoreParenCasts());
+ if (ArgLocation)
+ *ArgLocation = ArgExpr->getLocStart();
+
+ if (!Literal || !Literal->isAscii()) {
+ Diag(ArgExpr->getLocStart(), diag::err_attribute_argument_type)
+ << Attr.getName() << AANT_ArgumentString;
+ return false;
+ }
+
+ Str = Literal->getString();
+ return true;
+}
+
+/// \brief Applies the given attribute to the Decl without performing any
+/// additional semantic checking.
+template <typename AttrType>
+static void handleSimpleAttribute(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ D->addAttr(::new (S.Context) AttrType(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+/// \brief Check if the passed-in expression is of type int or bool.
+static bool isIntOrBool(Expr *Exp) {
+ QualType QT = Exp->getType();
+ return QT->isBooleanType() || QT->isIntegerType();
+}
+
+
+// Check to see if the type is a smart pointer of some kind. We assume
+// it's a smart pointer if it defines both operator-> and operator*.
+static bool threadSafetyCheckIsSmartPointer(Sema &S, const RecordType* RT) {
+ DeclContextLookupResult Res1 = RT->getDecl()->lookup(
+ S.Context.DeclarationNames.getCXXOperatorName(OO_Star));
+ if (Res1.empty())
+ return false;
+
+ DeclContextLookupResult Res2 = RT->getDecl()->lookup(
+ S.Context.DeclarationNames.getCXXOperatorName(OO_Arrow));
+ if (Res2.empty())
+ return false;
+
+ return true;
+}
+
+/// \brief Check if passed in Decl is a pointer type.
+/// Note that this function may produce an error message.
+/// \return true if the Decl is a pointer type; false otherwise
+static bool threadSafetyCheckIsPointer(Sema &S, const Decl *D,
+ const AttributeList &Attr) {
+ const ValueDecl *vd = cast<ValueDecl>(D);
+ QualType QT = vd->getType();
+ if (QT->isAnyPointerType())
+ return true;
+
+ if (const RecordType *RT = QT->getAs<RecordType>()) {
+ // If it's an incomplete type, it could be a smart pointer; skip it.
+ // (We don't want to force template instantiation if we can avoid it,
+ // since that would alter the order in which templates are instantiated.)
+ if (RT->isIncompleteType())
+ return true;
+
+ if (threadSafetyCheckIsSmartPointer(S, RT))
+ return true;
+ }
+
+ S.Diag(Attr.getLoc(), diag::warn_thread_attribute_decl_not_pointer)
+ << Attr.getName() << QT;
+ return false;
+}
+
+/// \brief Checks that the passed in QualType either is of RecordType or points
+/// to RecordType. Returns the relevant RecordType, null if it does not exit.
+static const RecordType *getRecordType(QualType QT) {
+ if (const RecordType *RT = QT->getAs<RecordType>())
+ return RT;
+
+ // Now check if we point to record type.
+ if (const PointerType *PT = QT->getAs<PointerType>())
+ return PT->getPointeeType()->getAs<RecordType>();
+
+ return nullptr;
+}
+
+static bool checkRecordTypeForCapability(Sema &S, QualType Ty) {
+ const RecordType *RT = getRecordType(Ty);
+
+ if (!RT)
+ return false;
+
+ // Don't check for the capability if the class hasn't been defined yet.
+ if (RT->isIncompleteType())
+ return true;
+
+ // Allow smart pointers to be used as capability objects.
+ // FIXME -- Check the type that the smart pointer points to.
+ if (threadSafetyCheckIsSmartPointer(S, RT))
+ return true;
+
+ // Check if the record itself has a capability.
+ RecordDecl *RD = RT->getDecl();
+ if (RD->hasAttr<CapabilityAttr>())
+ return true;
+
+ // Else check if any base classes have a capability.
+ if (CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
+ CXXBasePaths BPaths(false, false);
+ if (CRD->lookupInBases([](const CXXBaseSpecifier *BS, CXXBasePath &) {
+ const auto *Type = BS->getType()->getAs<RecordType>();
+ return Type->getDecl()->hasAttr<CapabilityAttr>();
+ }, BPaths))
+ return true;
+ }
+ return false;
+}
+
+static bool checkTypedefTypeForCapability(QualType Ty) {
+ const auto *TD = Ty->getAs<TypedefType>();
+ if (!TD)
+ return false;
+
+ TypedefNameDecl *TN = TD->getDecl();
+ if (!TN)
+ return false;
+
+ return TN->hasAttr<CapabilityAttr>();
+}
+
+static bool typeHasCapability(Sema &S, QualType Ty) {
+ if (checkTypedefTypeForCapability(Ty))
+ return true;
+
+ if (checkRecordTypeForCapability(S, Ty))
+ return true;
+
+ return false;
+}
+
+static bool isCapabilityExpr(Sema &S, const Expr *Ex) {
+ // Capability expressions are simple expressions involving the boolean logic
+ // operators &&, || or !, a simple DeclRefExpr, CastExpr or a ParenExpr. Once
+ // a DeclRefExpr is found, its type should be checked to determine whether it
+ // is a capability or not.
+
+ if (const auto *E = dyn_cast<DeclRefExpr>(Ex))
+ return typeHasCapability(S, E->getType());
+ else if (const auto *E = dyn_cast<CastExpr>(Ex))
+ return isCapabilityExpr(S, E->getSubExpr());
+ else if (const auto *E = dyn_cast<ParenExpr>(Ex))
+ return isCapabilityExpr(S, E->getSubExpr());
+ else if (const auto *E = dyn_cast<UnaryOperator>(Ex)) {
+ if (E->getOpcode() == UO_LNot)
+ return isCapabilityExpr(S, E->getSubExpr());
+ return false;
+ } else if (const auto *E = dyn_cast<BinaryOperator>(Ex)) {
+ if (E->getOpcode() == BO_LAnd || E->getOpcode() == BO_LOr)
+ return isCapabilityExpr(S, E->getLHS()) &&
+ isCapabilityExpr(S, E->getRHS());
+ return false;
+ }
+
+ return false;
+}
+
+/// \brief Checks that all attribute arguments, starting from Sidx, resolve to
+/// a capability object.
+/// \param Sidx The attribute argument index to start checking with.
+/// \param ParamIdxOk Whether an argument can be indexing into a function
+/// parameter list.
+static void checkAttrArgsAreCapabilityObjs(Sema &S, Decl *D,
+ const AttributeList &Attr,
+ SmallVectorImpl<Expr *> &Args,
+ int Sidx = 0,
+ bool ParamIdxOk = false) {
+ for (unsigned Idx = Sidx; Idx < Attr.getNumArgs(); ++Idx) {
+ Expr *ArgExp = Attr.getArgAsExpr(Idx);
+
+ if (ArgExp->isTypeDependent()) {
+ // FIXME -- need to check this again on template instantiation
+ Args.push_back(ArgExp);
+ continue;
+ }
+
+ if (StringLiteral *StrLit = dyn_cast<StringLiteral>(ArgExp)) {
+ if (StrLit->getLength() == 0 ||
+ (StrLit->isAscii() && StrLit->getString() == StringRef("*"))) {
+ // Pass empty strings to the analyzer without warnings.
+ // Treat "*" as the universal lock.
+ Args.push_back(ArgExp);
+ continue;
+ }
+
+ // We allow constant strings to be used as a placeholder for expressions
+ // that are not valid C++ syntax, but warn that they are ignored.
+ S.Diag(Attr.getLoc(), diag::warn_thread_attribute_ignored) <<
+ Attr.getName();
+ Args.push_back(ArgExp);
+ continue;
+ }
+
+ QualType ArgTy = ArgExp->getType();
+
+ // A pointer to member expression of the form &MyClass::mu is treated
+ // specially -- we need to look at the type of the member.
+ if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(ArgExp))
+ if (UOp->getOpcode() == UO_AddrOf)
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(UOp->getSubExpr()))
+ if (DRE->getDecl()->isCXXInstanceMember())
+ ArgTy = DRE->getDecl()->getType();
+
+ // First see if we can just cast to record type, or pointer to record type.
+ const RecordType *RT = getRecordType(ArgTy);
+
+ // Now check if we index into a record type function param.
+ if(!RT && ParamIdxOk) {
+ FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ IntegerLiteral *IL = dyn_cast<IntegerLiteral>(ArgExp);
+ if(FD && IL) {
+ unsigned int NumParams = FD->getNumParams();
+ llvm::APInt ArgValue = IL->getValue();
+ uint64_t ParamIdxFromOne = ArgValue.getZExtValue();
+ uint64_t ParamIdxFromZero = ParamIdxFromOne - 1;
+ if(!ArgValue.isStrictlyPositive() || ParamIdxFromOne > NumParams) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_range)
+ << Attr.getName() << Idx + 1 << NumParams;
+ continue;
+ }
+ ArgTy = FD->getParamDecl(ParamIdxFromZero)->getType();
+ }
+ }
+
+ // If the type does not have a capability, see if the components of the
+ // expression have capabilities. This allows for writing C code where the
+ // capability may be on the type, and the expression is a capability
+ // boolean logic expression. Eg) requires_capability(A || B && !C)
+ if (!typeHasCapability(S, ArgTy) && !isCapabilityExpr(S, ArgExp))
+ S.Diag(Attr.getLoc(), diag::warn_thread_attribute_argument_not_lockable)
+ << Attr.getName() << ArgTy;
+
+ Args.push_back(ArgExp);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Attribute Implementations
+//===----------------------------------------------------------------------===//
+
+static void handlePtGuardedVarAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (!threadSafetyCheckIsPointer(S, D, Attr))
+ return;
+
+ D->addAttr(::new (S.Context)
+ PtGuardedVarAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static bool checkGuardedByAttrCommon(Sema &S, Decl *D,
+ const AttributeList &Attr,
+ Expr* &Arg) {
+ SmallVector<Expr*, 1> Args;
+ // check that all arguments are lockable objects
+ checkAttrArgsAreCapabilityObjs(S, D, Attr, Args);
+ unsigned Size = Args.size();
+ if (Size != 1)
+ return false;
+
+ Arg = Args[0];
+
+ return true;
+}
+
+static void handleGuardedByAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ Expr *Arg = nullptr;
+ if (!checkGuardedByAttrCommon(S, D, Attr, Arg))
+ return;
+
+ D->addAttr(::new (S.Context) GuardedByAttr(Attr.getRange(), S.Context, Arg,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handlePtGuardedByAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ Expr *Arg = nullptr;
+ if (!checkGuardedByAttrCommon(S, D, Attr, Arg))
+ return;
+
+ if (!threadSafetyCheckIsPointer(S, D, Attr))
+ return;
+
+ D->addAttr(::new (S.Context) PtGuardedByAttr(Attr.getRange(),
+ S.Context, Arg,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static bool checkAcquireOrderAttrCommon(Sema &S, Decl *D,
+ const AttributeList &Attr,
+ SmallVectorImpl<Expr *> &Args) {
+ if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
+ return false;
+
+ // Check that this attribute only applies to lockable types.
+ QualType QT = cast<ValueDecl>(D)->getType();
+ if (!QT->isDependentType() && !typeHasCapability(S, QT)) {
+ S.Diag(Attr.getLoc(), diag::warn_thread_attribute_decl_not_lockable)
+ << Attr.getName();
+ return false;
+ }
+
+ // Check that all arguments are lockable objects.
+ checkAttrArgsAreCapabilityObjs(S, D, Attr, Args);
+ if (Args.empty())
+ return false;
+
+ return true;
+}
+
+static void handleAcquiredAfterAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ SmallVector<Expr*, 1> Args;
+ if (!checkAcquireOrderAttrCommon(S, D, Attr, Args))
+ return;
+
+ Expr **StartArg = &Args[0];
+ D->addAttr(::new (S.Context)
+ AcquiredAfterAttr(Attr.getRange(), S.Context,
+ StartArg, Args.size(),
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleAcquiredBeforeAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ SmallVector<Expr*, 1> Args;
+ if (!checkAcquireOrderAttrCommon(S, D, Attr, Args))
+ return;
+
+ Expr **StartArg = &Args[0];
+ D->addAttr(::new (S.Context)
+ AcquiredBeforeAttr(Attr.getRange(), S.Context,
+ StartArg, Args.size(),
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static bool checkLockFunAttrCommon(Sema &S, Decl *D,
+ const AttributeList &Attr,
+ SmallVectorImpl<Expr *> &Args) {
+ // zero or more arguments ok
+ // check that all arguments are lockable objects
+ checkAttrArgsAreCapabilityObjs(S, D, Attr, Args, 0, /*ParamIdxOk=*/true);
+
+ return true;
+}
+
+static void handleAssertSharedLockAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ SmallVector<Expr*, 1> Args;
+ if (!checkLockFunAttrCommon(S, D, Attr, Args))
+ return;
+
+ unsigned Size = Args.size();
+ Expr **StartArg = Size == 0 ? nullptr : &Args[0];
+ D->addAttr(::new (S.Context)
+ AssertSharedLockAttr(Attr.getRange(), S.Context, StartArg, Size,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleAssertExclusiveLockAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ SmallVector<Expr*, 1> Args;
+ if (!checkLockFunAttrCommon(S, D, Attr, Args))
+ return;
+
+ unsigned Size = Args.size();
+ Expr **StartArg = Size == 0 ? nullptr : &Args[0];
+ D->addAttr(::new (S.Context)
+ AssertExclusiveLockAttr(Attr.getRange(), S.Context,
+ StartArg, Size,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+
+static bool checkTryLockFunAttrCommon(Sema &S, Decl *D,
+ const AttributeList &Attr,
+ SmallVectorImpl<Expr *> &Args) {
+ if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
+ return false;
+
+ if (!isIntOrBool(Attr.getArgAsExpr(0))) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
+ << Attr.getName() << 1 << AANT_ArgumentIntOrBool;
+ return false;
+ }
+
+ // check that all arguments are lockable objects
+ checkAttrArgsAreCapabilityObjs(S, D, Attr, Args, 1);
+
+ return true;
+}
+
+static void handleSharedTrylockFunctionAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ SmallVector<Expr*, 2> Args;
+ if (!checkTryLockFunAttrCommon(S, D, Attr, Args))
+ return;
+
+ D->addAttr(::new (S.Context)
+ SharedTrylockFunctionAttr(Attr.getRange(), S.Context,
+ Attr.getArgAsExpr(0),
+ Args.data(), Args.size(),
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleExclusiveTrylockFunctionAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ SmallVector<Expr*, 2> Args;
+ if (!checkTryLockFunAttrCommon(S, D, Attr, Args))
+ return;
+
+ D->addAttr(::new (S.Context) ExclusiveTrylockFunctionAttr(
+ Attr.getRange(), S.Context, Attr.getArgAsExpr(0), Args.data(),
+ Args.size(), Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleLockReturnedAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ // check that the argument is lockable object
+ SmallVector<Expr*, 1> Args;
+ checkAttrArgsAreCapabilityObjs(S, D, Attr, Args);
+ unsigned Size = Args.size();
+ if (Size == 0)
+ return;
+
+ D->addAttr(::new (S.Context)
+ LockReturnedAttr(Attr.getRange(), S.Context, Args[0],
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleLocksExcludedAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
+ return;
+
+ // check that all arguments are lockable objects
+ SmallVector<Expr*, 1> Args;
+ checkAttrArgsAreCapabilityObjs(S, D, Attr, Args);
+ unsigned Size = Args.size();
+ if (Size == 0)
+ return;
+ Expr **StartArg = &Args[0];
+
+ D->addAttr(::new (S.Context)
+ LocksExcludedAttr(Attr.getRange(), S.Context, StartArg, Size,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleEnableIfAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ Expr *Cond = Attr.getArgAsExpr(0);
+ if (!Cond->isTypeDependent()) {
+ ExprResult Converted = S.PerformContextuallyConvertToBool(Cond);
+ if (Converted.isInvalid())
+ return;
+ Cond = Converted.get();
+ }
+
+ StringRef Msg;
+ if (!S.checkStringLiteralArgumentAttr(Attr, 1, Msg))
+ return;
+
+ SmallVector<PartialDiagnosticAt, 8> Diags;
+ if (!Cond->isValueDependent() &&
+ !Expr::isPotentialConstantExprUnevaluated(Cond, cast<FunctionDecl>(D),
+ Diags)) {
+ S.Diag(Attr.getLoc(), diag::err_enable_if_never_constant_expr);
+ for (int I = 0, N = Diags.size(); I != N; ++I)
+ S.Diag(Diags[I].first, Diags[I].second);
+ return;
+ }
+
+ D->addAttr(::new (S.Context)
+ EnableIfAttr(Attr.getRange(), S.Context, Cond, Msg,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handlePassObjectSizeAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (D->hasAttr<PassObjectSizeAttr>()) {
+ S.Diag(D->getLocStart(), diag::err_attribute_only_once_per_parameter)
+ << Attr.getName();
+ return;
+ }
+
+ Expr *E = Attr.getArgAsExpr(0);
+ uint32_t Type;
+ if (!checkUInt32Argument(S, Attr, E, Type, /*Idx=*/1))
+ return;
+
+ // pass_object_size's argument is passed in as the second argument of
+ // __builtin_object_size. So, it has the same constraints as that second
+ // argument; namely, it must be in the range [0, 3].
+ if (Type > 3) {
+ S.Diag(E->getLocStart(), diag::err_attribute_argument_outof_range)
+ << Attr.getName() << 0 << 3 << E->getSourceRange();
+ return;
+ }
+
+ // pass_object_size is only supported on constant pointer parameters; as a
+ // kindness to users, we allow the parameter to be non-const for declarations.
+ // At this point, we have no clue if `D` belongs to a function declaration or
+ // definition, so we defer the constness check until later.
+ if (!cast<ParmVarDecl>(D)->getType()->isPointerType()) {
+ S.Diag(D->getLocStart(), diag::err_attribute_pointers_only)
+ << Attr.getName() << 1;
+ return;
+ }
+
+ D->addAttr(::new (S.Context)
+ PassObjectSizeAttr(Attr.getRange(), S.Context, (int)Type,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleConsumableAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ ConsumableAttr::ConsumedState DefaultState;
+
+ if (Attr.isArgIdent(0)) {
+ IdentifierLoc *IL = Attr.getArgAsIdent(0);
+ if (!ConsumableAttr::ConvertStrToConsumedState(IL->Ident->getName(),
+ DefaultState)) {
+ S.Diag(IL->Loc, diag::warn_attribute_type_not_supported)
+ << Attr.getName() << IL->Ident;
+ return;
+ }
+ } else {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_type)
+ << Attr.getName() << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ D->addAttr(::new (S.Context)
+ ConsumableAttr(Attr.getRange(), S.Context, DefaultState,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+
+static bool checkForConsumableClass(Sema &S, const CXXMethodDecl *MD,
+ const AttributeList &Attr) {
+ ASTContext &CurrContext = S.getASTContext();
+ QualType ThisType = MD->getThisType(CurrContext)->getPointeeType();
+
+ if (const CXXRecordDecl *RD = ThisType->getAsCXXRecordDecl()) {
+ if (!RD->hasAttr<ConsumableAttr>()) {
+ S.Diag(Attr.getLoc(), diag::warn_attr_on_unconsumable_class) <<
+ RD->getNameAsString();
+
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
+static void handleCallableWhenAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
+ return;
+
+ if (!checkForConsumableClass(S, cast<CXXMethodDecl>(D), Attr))
+ return;
+
+ SmallVector<CallableWhenAttr::ConsumedState, 3> States;
+ for (unsigned ArgIndex = 0; ArgIndex < Attr.getNumArgs(); ++ArgIndex) {
+ CallableWhenAttr::ConsumedState CallableState;
+
+ StringRef StateString;
+ SourceLocation Loc;
+ if (Attr.isArgIdent(ArgIndex)) {
+ IdentifierLoc *Ident = Attr.getArgAsIdent(ArgIndex);
+ StateString = Ident->Ident->getName();
+ Loc = Ident->Loc;
+ } else {
+ if (!S.checkStringLiteralArgumentAttr(Attr, ArgIndex, StateString, &Loc))
+ return;
+ }
+
+ if (!CallableWhenAttr::ConvertStrToConsumedState(StateString,
+ CallableState)) {
+ S.Diag(Loc, diag::warn_attribute_type_not_supported)
+ << Attr.getName() << StateString;
+ return;
+ }
+
+ States.push_back(CallableState);
+ }
+
+ D->addAttr(::new (S.Context)
+ CallableWhenAttr(Attr.getRange(), S.Context, States.data(),
+ States.size(), Attr.getAttributeSpellingListIndex()));
+}
+
+
+static void handleParamTypestateAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ ParamTypestateAttr::ConsumedState ParamState;
+
+ if (Attr.isArgIdent(0)) {
+ IdentifierLoc *Ident = Attr.getArgAsIdent(0);
+ StringRef StateString = Ident->Ident->getName();
+
+ if (!ParamTypestateAttr::ConvertStrToConsumedState(StateString,
+ ParamState)) {
+ S.Diag(Ident->Loc, diag::warn_attribute_type_not_supported)
+ << Attr.getName() << StateString;
+ return;
+ }
+ } else {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_type) <<
+ Attr.getName() << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ // FIXME: This check is currently being done in the analysis. It can be
+ // enabled here only after the parser propagates attributes at
+ // template specialization definition, not declaration.
+ //QualType ReturnType = cast<ParmVarDecl>(D)->getType();
+ //const CXXRecordDecl *RD = ReturnType->getAsCXXRecordDecl();
+ //
+ //if (!RD || !RD->hasAttr<ConsumableAttr>()) {
+ // S.Diag(Attr.getLoc(), diag::warn_return_state_for_unconsumable_type) <<
+ // ReturnType.getAsString();
+ // return;
+ //}
+
+ D->addAttr(::new (S.Context)
+ ParamTypestateAttr(Attr.getRange(), S.Context, ParamState,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+
+static void handleReturnTypestateAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ ReturnTypestateAttr::ConsumedState ReturnState;
+
+ if (Attr.isArgIdent(0)) {
+ IdentifierLoc *IL = Attr.getArgAsIdent(0);
+ if (!ReturnTypestateAttr::ConvertStrToConsumedState(IL->Ident->getName(),
+ ReturnState)) {
+ S.Diag(IL->Loc, diag::warn_attribute_type_not_supported)
+ << Attr.getName() << IL->Ident;
+ return;
+ }
+ } else {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_type) <<
+ Attr.getName() << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ // FIXME: This check is currently being done in the analysis. It can be
+ // enabled here only after the parser propagates attributes at
+ // template specialization definition, not declaration.
+ //QualType ReturnType;
+ //
+ //if (const ParmVarDecl *Param = dyn_cast<ParmVarDecl>(D)) {
+ // ReturnType = Param->getType();
+ //
+ //} else if (const CXXConstructorDecl *Constructor =
+ // dyn_cast<CXXConstructorDecl>(D)) {
+ // ReturnType = Constructor->getThisType(S.getASTContext())->getPointeeType();
+ //
+ //} else {
+ //
+ // ReturnType = cast<FunctionDecl>(D)->getCallResultType();
+ //}
+ //
+ //const CXXRecordDecl *RD = ReturnType->getAsCXXRecordDecl();
+ //
+ //if (!RD || !RD->hasAttr<ConsumableAttr>()) {
+ // S.Diag(Attr.getLoc(), diag::warn_return_state_for_unconsumable_type) <<
+ // ReturnType.getAsString();
+ // return;
+ //}
+
+ D->addAttr(::new (S.Context)
+ ReturnTypestateAttr(Attr.getRange(), S.Context, ReturnState,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+
+static void handleSetTypestateAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (!checkForConsumableClass(S, cast<CXXMethodDecl>(D), Attr))
+ return;
+
+ SetTypestateAttr::ConsumedState NewState;
+ if (Attr.isArgIdent(0)) {
+ IdentifierLoc *Ident = Attr.getArgAsIdent(0);
+ StringRef Param = Ident->Ident->getName();
+ if (!SetTypestateAttr::ConvertStrToConsumedState(Param, NewState)) {
+ S.Diag(Ident->Loc, diag::warn_attribute_type_not_supported)
+ << Attr.getName() << Param;
+ return;
+ }
+ } else {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_type) <<
+ Attr.getName() << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ D->addAttr(::new (S.Context)
+ SetTypestateAttr(Attr.getRange(), S.Context, NewState,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleTestTypestateAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (!checkForConsumableClass(S, cast<CXXMethodDecl>(D), Attr))
+ return;
+
+ TestTypestateAttr::ConsumedState TestState;
+ if (Attr.isArgIdent(0)) {
+ IdentifierLoc *Ident = Attr.getArgAsIdent(0);
+ StringRef Param = Ident->Ident->getName();
+ if (!TestTypestateAttr::ConvertStrToConsumedState(Param, TestState)) {
+ S.Diag(Ident->Loc, diag::warn_attribute_type_not_supported)
+ << Attr.getName() << Param;
+ return;
+ }
+ } else {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_type) <<
+ Attr.getName() << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ D->addAttr(::new (S.Context)
+ TestTypestateAttr(Attr.getRange(), S.Context, TestState,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleExtVectorTypeAttr(Sema &S, Scope *scope, Decl *D,
+ const AttributeList &Attr) {
+ // Remember this typedef decl, we will need it later for diagnostics.
+ S.ExtVectorDecls.push_back(cast<TypedefNameDecl>(D));
+}
+
+static void handlePackedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (TagDecl *TD = dyn_cast<TagDecl>(D))
+ TD->addAttr(::new (S.Context) PackedAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+ else if (FieldDecl *FD = dyn_cast<FieldDecl>(D)) {
+ // Report warning about changed offset in the newer compiler versions.
+ if (!FD->getType()->isDependentType() &&
+ !FD->getType()->isIncompleteType() && FD->isBitField() &&
+ S.Context.getTypeAlign(FD->getType()) <= 8)
+ S.Diag(Attr.getLoc(), diag::warn_attribute_packed_for_bitfield);
+
+ FD->addAttr(::new (S.Context) PackedAttr(
+ Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+ } else
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
+}
+
+static bool checkIBOutletCommon(Sema &S, Decl *D, const AttributeList &Attr) {
+ // The IBOutlet/IBOutletCollection attributes only apply to instance
+ // variables or properties of Objective-C classes. The outlet must also
+ // have an object reference type.
+ if (const ObjCIvarDecl *VD = dyn_cast<ObjCIvarDecl>(D)) {
+ if (!VD->getType()->getAs<ObjCObjectPointerType>()) {
+ S.Diag(Attr.getLoc(), diag::warn_iboutlet_object_type)
+ << Attr.getName() << VD->getType() << 0;
+ return false;
+ }
+ }
+ else if (const ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(D)) {
+ if (!PD->getType()->getAs<ObjCObjectPointerType>()) {
+ S.Diag(Attr.getLoc(), diag::warn_iboutlet_object_type)
+ << Attr.getName() << PD->getType() << 1;
+ return false;
+ }
+ }
+ else {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_iboutlet) << Attr.getName();
+ return false;
+ }
+
+ return true;
+}
+
+static void handleIBOutlet(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (!checkIBOutletCommon(S, D, Attr))
+ return;
+
+ D->addAttr(::new (S.Context)
+ IBOutletAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleIBOutletCollection(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+
+ // The iboutletcollection attribute can have zero or one arguments.
+ if (Attr.getNumArgs() > 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << Attr.getName() << 1;
+ return;
+ }
+
+ if (!checkIBOutletCommon(S, D, Attr))
+ return;
+
+ ParsedType PT;
+
+ if (Attr.hasParsedType())
+ PT = Attr.getTypeArg();
+ else {
+ PT = S.getTypeName(S.Context.Idents.get("NSObject"), Attr.getLoc(),
+ S.getScopeForContext(D->getDeclContext()->getParent()));
+ if (!PT) {
+ S.Diag(Attr.getLoc(), diag::err_iboutletcollection_type) << "NSObject";
+ return;
+ }
+ }
+
+ TypeSourceInfo *QTLoc = nullptr;
+ QualType QT = S.GetTypeFromParser(PT, &QTLoc);
+ if (!QTLoc)
+ QTLoc = S.Context.getTrivialTypeSourceInfo(QT, Attr.getLoc());
+
+ // Diagnose use of non-object type in iboutletcollection attribute.
+ // FIXME. Gnu attribute extension ignores use of builtin types in
+ // attributes. So, __attribute__((iboutletcollection(char))) will be
+ // treated as __attribute__((iboutletcollection())).
+ if (!QT->isObjCIdType() && !QT->isObjCObjectType()) {
+ S.Diag(Attr.getLoc(),
+ QT->isBuiltinType() ? diag::err_iboutletcollection_builtintype
+ : diag::err_iboutletcollection_type) << QT;
+ return;
+ }
+
+ D->addAttr(::new (S.Context)
+ IBOutletCollectionAttr(Attr.getRange(), S.Context, QTLoc,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+bool Sema::isValidPointerAttrType(QualType T, bool RefOkay) {
+ if (RefOkay) {
+ if (T->isReferenceType())
+ return true;
+ } else {
+ T = T.getNonReferenceType();
+ }
+
+ // The nonnull attribute, and other similar attributes, can be applied to a
+ // transparent union that contains a pointer type.
+ if (const RecordType *UT = T->getAsUnionType()) {
+ if (UT && UT->getDecl()->hasAttr<TransparentUnionAttr>()) {
+ RecordDecl *UD = UT->getDecl();
+ for (const auto *I : UD->fields()) {
+ QualType QT = I->getType();
+ if (QT->isAnyPointerType() || QT->isBlockPointerType())
+ return true;
+ }
+ }
+ }
+
+ return T->isAnyPointerType() || T->isBlockPointerType();
+}
+
+static bool attrNonNullArgCheck(Sema &S, QualType T, const AttributeList &Attr,
+ SourceRange AttrParmRange,
+ SourceRange TypeRange,
+ bool isReturnValue = false) {
+ if (!S.isValidPointerAttrType(T)) {
+ if (isReturnValue)
+ S.Diag(Attr.getLoc(), diag::warn_attribute_return_pointers_only)
+ << Attr.getName() << AttrParmRange << TypeRange;
+ else
+ S.Diag(Attr.getLoc(), diag::warn_attribute_pointers_only)
+ << Attr.getName() << AttrParmRange << TypeRange << 0;
+ return false;
+ }
+ return true;
+}
+
+static void handleNonNullAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ SmallVector<unsigned, 8> NonNullArgs;
+ for (unsigned I = 0; I < Attr.getNumArgs(); ++I) {
+ Expr *Ex = Attr.getArgAsExpr(I);
+ uint64_t Idx;
+ if (!checkFunctionOrMethodParameterIndex(S, D, Attr, I + 1, Ex, Idx))
+ return;
+
+ // Is the function argument a pointer type?
+ if (Idx < getFunctionOrMethodNumParams(D) &&
+ !attrNonNullArgCheck(S, getFunctionOrMethodParamType(D, Idx), Attr,
+ Ex->getSourceRange(),
+ getFunctionOrMethodParamRange(D, Idx)))
+ continue;
+
+ NonNullArgs.push_back(Idx);
+ }
+
+ // If no arguments were specified to __attribute__((nonnull)) then all pointer
+ // arguments have a nonnull attribute; warn if there aren't any. Skip this
+ // check if the attribute came from a macro expansion or a template
+ // instantiation.
+ if (NonNullArgs.empty() && Attr.getLoc().isFileID() &&
+ S.ActiveTemplateInstantiations.empty()) {
+ bool AnyPointers = isFunctionOrMethodVariadic(D);
+ for (unsigned I = 0, E = getFunctionOrMethodNumParams(D);
+ I != E && !AnyPointers; ++I) {
+ QualType T = getFunctionOrMethodParamType(D, I);
+ if (T->isDependentType() || S.isValidPointerAttrType(T))
+ AnyPointers = true;
+ }
+
+ if (!AnyPointers)
+ S.Diag(Attr.getLoc(), diag::warn_attribute_nonnull_no_pointers);
+ }
+
+ unsigned *Start = NonNullArgs.data();
+ unsigned Size = NonNullArgs.size();
+ llvm::array_pod_sort(Start, Start + Size);
+ D->addAttr(::new (S.Context)
+ NonNullAttr(Attr.getRange(), S.Context, Start, Size,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleNonNullAttrParameter(Sema &S, ParmVarDecl *D,
+ const AttributeList &Attr) {
+ if (Attr.getNumArgs() > 0) {
+ if (D->getFunctionType()) {
+ handleNonNullAttr(S, D, Attr);
+ } else {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_nonnull_parm_no_args)
+ << D->getSourceRange();
+ }
+ return;
+ }
+
+ // Is the argument a pointer type?
+ if (!attrNonNullArgCheck(S, D->getType(), Attr, SourceRange(),
+ D->getSourceRange()))
+ return;
+
+ D->addAttr(::new (S.Context)
+ NonNullAttr(Attr.getRange(), S.Context, nullptr, 0,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleReturnsNonNullAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ QualType ResultType = getFunctionOrMethodResultType(D);
+ SourceRange SR = getFunctionOrMethodResultSourceRange(D);
+ if (!attrNonNullArgCheck(S, ResultType, Attr, SourceRange(), SR,
+ /* isReturnValue */ true))
+ return;
+
+ D->addAttr(::new (S.Context)
+ ReturnsNonNullAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleAssumeAlignedAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ Expr *E = Attr.getArgAsExpr(0),
+ *OE = Attr.getNumArgs() > 1 ? Attr.getArgAsExpr(1) : nullptr;
+ S.AddAssumeAlignedAttr(Attr.getRange(), D, E, OE,
+ Attr.getAttributeSpellingListIndex());
+}
+
+void Sema::AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
+ Expr *OE, unsigned SpellingListIndex) {
+ QualType ResultType = getFunctionOrMethodResultType(D);
+ SourceRange SR = getFunctionOrMethodResultSourceRange(D);
+
+ AssumeAlignedAttr TmpAttr(AttrRange, Context, E, OE, SpellingListIndex);
+ SourceLocation AttrLoc = AttrRange.getBegin();
+
+ if (!isValidPointerAttrType(ResultType, /* RefOkay */ true)) {
+ Diag(AttrLoc, diag::warn_attribute_return_pointers_refs_only)
+ << &TmpAttr << AttrRange << SR;
+ return;
+ }
+
+ if (!E->isValueDependent()) {
+ llvm::APSInt I(64);
+ if (!E->isIntegerConstantExpr(I, Context)) {
+ if (OE)
+ Diag(AttrLoc, diag::err_attribute_argument_n_type)
+ << &TmpAttr << 1 << AANT_ArgumentIntegerConstant
+ << E->getSourceRange();
+ else
+ Diag(AttrLoc, diag::err_attribute_argument_type)
+ << &TmpAttr << AANT_ArgumentIntegerConstant
+ << E->getSourceRange();
+ return;
+ }
+
+ if (!I.isPowerOf2()) {
+ Diag(AttrLoc, diag::err_alignment_not_power_of_two)
+ << E->getSourceRange();
+ return;
+ }
+ }
+
+ if (OE) {
+ if (!OE->isValueDependent()) {
+ llvm::APSInt I(64);
+ if (!OE->isIntegerConstantExpr(I, Context)) {
+ Diag(AttrLoc, diag::err_attribute_argument_n_type)
+ << &TmpAttr << 2 << AANT_ArgumentIntegerConstant
+ << OE->getSourceRange();
+ return;
+ }
+ }
+ }
+
+ D->addAttr(::new (Context)
+ AssumeAlignedAttr(AttrRange, Context, E, OE, SpellingListIndex));
+}
+
+/// Normalize the attribute, __foo__ becomes foo.
+/// Returns true if normalization was applied.
+static bool normalizeName(StringRef &AttrName) {
+ if (AttrName.size() > 4 && AttrName.startswith("__") &&
+ AttrName.endswith("__")) {
+ AttrName = AttrName.drop_front(2).drop_back(2);
+ return true;
+ }
+ return false;
+}
+
+static void handleOwnershipAttr(Sema &S, Decl *D, const AttributeList &AL) {
+ // This attribute must be applied to a function declaration. The first
+ // argument to the attribute must be an identifier, the name of the resource,
+ // for example: malloc. The following arguments must be argument indexes, the
+ // arguments must be of integer type for Returns, otherwise of pointer type.
+ // The difference between Holds and Takes is that a pointer may still be used
+ // after being held. free() should be __attribute((ownership_takes)), whereas
+ // a list append function may well be __attribute((ownership_holds)).
+
+ if (!AL.isArgIdent(0)) {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
+ << AL.getName() << 1 << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ // Figure out our Kind.
+ OwnershipAttr::OwnershipKind K =
+ OwnershipAttr(AL.getLoc(), S.Context, nullptr, nullptr, 0,
+ AL.getAttributeSpellingListIndex()).getOwnKind();
+
+ // Check arguments.
+ switch (K) {
+ case OwnershipAttr::Takes:
+ case OwnershipAttr::Holds:
+ if (AL.getNumArgs() < 2) {
+ S.Diag(AL.getLoc(), diag::err_attribute_too_few_arguments)
+ << AL.getName() << 2;
+ return;
+ }
+ break;
+ case OwnershipAttr::Returns:
+ if (AL.getNumArgs() > 2) {
+ S.Diag(AL.getLoc(), diag::err_attribute_too_many_arguments)
+ << AL.getName() << 1;
+ return;
+ }
+ break;
+ }
+
+ IdentifierInfo *Module = AL.getArgAsIdent(0)->Ident;
+
+ StringRef ModuleName = Module->getName();
+ if (normalizeName(ModuleName)) {
+ Module = &S.PP.getIdentifierTable().get(ModuleName);
+ }
+
+ SmallVector<unsigned, 8> OwnershipArgs;
+ for (unsigned i = 1; i < AL.getNumArgs(); ++i) {
+ Expr *Ex = AL.getArgAsExpr(i);
+ uint64_t Idx;
+ if (!checkFunctionOrMethodParameterIndex(S, D, AL, i, Ex, Idx))
+ return;
+
+ // Is the function argument a pointer type?
+ QualType T = getFunctionOrMethodParamType(D, Idx);
+ int Err = -1; // No error
+ switch (K) {
+ case OwnershipAttr::Takes:
+ case OwnershipAttr::Holds:
+ if (!T->isAnyPointerType() && !T->isBlockPointerType())
+ Err = 0;
+ break;
+ case OwnershipAttr::Returns:
+ if (!T->isIntegerType())
+ Err = 1;
+ break;
+ }
+ if (-1 != Err) {
+ S.Diag(AL.getLoc(), diag::err_ownership_type) << AL.getName() << Err
+ << Ex->getSourceRange();
+ return;
+ }
+
+ // Check we don't have a conflict with another ownership attribute.
+ for (const auto *I : D->specific_attrs<OwnershipAttr>()) {
+ // Cannot have two ownership attributes of different kinds for the same
+ // index.
+ if (I->getOwnKind() != K && I->args_end() !=
+ std::find(I->args_begin(), I->args_end(), Idx)) {
+ S.Diag(AL.getLoc(), diag::err_attributes_are_not_compatible)
+ << AL.getName() << I;
+ return;
+ } else if (K == OwnershipAttr::Returns &&
+ I->getOwnKind() == OwnershipAttr::Returns) {
+ // A returns attribute conflicts with any other returns attribute using
+ // a different index. Note, diagnostic reporting is 1-based, but stored
+ // argument indexes are 0-based.
+ if (std::find(I->args_begin(), I->args_end(), Idx) == I->args_end()) {
+ S.Diag(I->getLocation(), diag::err_ownership_returns_index_mismatch)
+ << *(I->args_begin()) + 1;
+ if (I->args_size())
+ S.Diag(AL.getLoc(), diag::note_ownership_returns_index_mismatch)
+ << (unsigned)Idx + 1 << Ex->getSourceRange();
+ return;
+ }
+ }
+ }
+ OwnershipArgs.push_back(Idx);
+ }
+
+ unsigned* start = OwnershipArgs.data();
+ unsigned size = OwnershipArgs.size();
+ llvm::array_pod_sort(start, start + size);
+
+ D->addAttr(::new (S.Context)
+ OwnershipAttr(AL.getLoc(), S.Context, Module, start, size,
+ AL.getAttributeSpellingListIndex()));
+}
+
+static void handleWeakRefAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // Check the attribute arguments.
+ if (Attr.getNumArgs() > 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << Attr.getName() << 1;
+ return;
+ }
+
+ NamedDecl *nd = cast<NamedDecl>(D);
+
+ // gcc rejects
+ // class c {
+ // static int a __attribute__((weakref ("v2")));
+ // static int b() __attribute__((weakref ("f3")));
+ // };
+ // and ignores the attributes of
+ // void f(void) {
+ // static int a __attribute__((weakref ("v2")));
+ // }
+ // we reject them
+ const DeclContext *Ctx = D->getDeclContext()->getRedeclContext();
+ if (!Ctx->isFileContext()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_weakref_not_global_context)
+ << nd;
+ return;
+ }
+
+ // The GCC manual says
+ //
+ // At present, a declaration to which `weakref' is attached can only
+ // be `static'.
+ //
+ // It also says
+ //
+ // Without a TARGET,
+ // given as an argument to `weakref' or to `alias', `weakref' is
+ // equivalent to `weak'.
+ //
+ // gcc 4.4.1 will accept
+ // int a7 __attribute__((weakref));
+ // as
+ // int a7 __attribute__((weak));
+ // This looks like a bug in gcc. We reject that for now. We should revisit
+ // it if this behaviour is actually used.
+
+ // GCC rejects
+ // static ((alias ("y"), weakref)).
+ // Should we? How to check that weakref is before or after alias?
+
+ // FIXME: it would be good for us to keep the WeakRefAttr as-written instead
+ // of transforming it into an AliasAttr. The WeakRefAttr never uses the
+ // StringRef parameter it was given anyway.
+ StringRef Str;
+ if (Attr.getNumArgs() && S.checkStringLiteralArgumentAttr(Attr, 0, Str))
+ // GCC will accept anything as the argument of weakref. Should we
+ // check for an existing decl?
+ D->addAttr(::new (S.Context) AliasAttr(Attr.getRange(), S.Context, Str,
+ Attr.getAttributeSpellingListIndex()));
+
+ D->addAttr(::new (S.Context)
+ WeakRefAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleAliasAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ StringRef Str;
+ if (!S.checkStringLiteralArgumentAttr(Attr, 0, Str))
+ return;
+
+ if (S.Context.getTargetInfo().getTriple().isOSDarwin()) {
+ S.Diag(Attr.getLoc(), diag::err_alias_not_supported_on_darwin);
+ return;
+ }
+
+ // Aliases should be on declarations, not definitions.
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->isThisDeclarationADefinition()) {
+ S.Diag(Attr.getLoc(), diag::err_alias_is_definition) << FD;
+ return;
+ }
+ } else {
+ const auto *VD = cast<VarDecl>(D);
+ if (VD->isThisDeclarationADefinition() && VD->isExternallyVisible()) {
+ S.Diag(Attr.getLoc(), diag::err_alias_is_definition) << VD;
+ return;
+ }
+ }
+
+ // FIXME: check if target symbol exists in current file
+
+ D->addAttr(::new (S.Context) AliasAttr(Attr.getRange(), S.Context, Str,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleColdAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (checkAttrMutualExclusion<HotAttr>(S, D, Attr.getRange(), Attr.getName()))
+ return;
+
+ D->addAttr(::new (S.Context) ColdAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleHotAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (checkAttrMutualExclusion<ColdAttr>(S, D, Attr.getRange(), Attr.getName()))
+ return;
+
+ D->addAttr(::new (S.Context) HotAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleTLSModelAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ StringRef Model;
+ SourceLocation LiteralLoc;
+ // Check that it is a string.
+ if (!S.checkStringLiteralArgumentAttr(Attr, 0, Model, &LiteralLoc))
+ return;
+
+ // Check that the value.
+ if (Model != "global-dynamic" && Model != "local-dynamic"
+ && Model != "initial-exec" && Model != "local-exec") {
+ S.Diag(LiteralLoc, diag::err_attr_tlsmodel_arg);
+ return;
+ }
+
+ D->addAttr(::new (S.Context)
+ TLSModelAttr(Attr.getRange(), S.Context, Model,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleRestrictAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ QualType ResultType = getFunctionOrMethodResultType(D);
+ if (ResultType->isAnyPointerType() || ResultType->isBlockPointerType()) {
+ D->addAttr(::new (S.Context) RestrictAttr(
+ Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+ return;
+ }
+
+ S.Diag(Attr.getLoc(), diag::warn_attribute_return_pointers_only)
+ << Attr.getName() << getFunctionOrMethodResultSourceRange(D);
+}
+
+static void handleCommonAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (S.LangOpts.CPlusPlus) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_not_supported_in_lang)
+ << Attr.getName() << AttributeLangSupport::Cpp;
+ return;
+ }
+
+ if (CommonAttr *CA = S.mergeCommonAttr(D, Attr.getRange(), Attr.getName(),
+ Attr.getAttributeSpellingListIndex()))
+ D->addAttr(CA);
+}
+
+static void handleNakedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (checkAttrMutualExclusion<DisableTailCallsAttr>(S, D, Attr.getRange(),
+ Attr.getName()))
+ return;
+
+ D->addAttr(::new (S.Context) NakedAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleNoReturnAttr(Sema &S, Decl *D, const AttributeList &attr) {
+ if (hasDeclarator(D)) return;
+
+ if (S.CheckNoReturnAttr(attr)) return;
+
+ if (!isa<ObjCMethodDecl>(D)) {
+ S.Diag(attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << attr.getName() << ExpectedFunctionOrMethod;
+ return;
+ }
+
+ D->addAttr(::new (S.Context)
+ NoReturnAttr(attr.getRange(), S.Context,
+ attr.getAttributeSpellingListIndex()));
+}
+
+bool Sema::CheckNoReturnAttr(const AttributeList &attr) {
+ if (!checkAttributeNumArgs(*this, attr, 0)) {
+ attr.setInvalid();
+ return true;
+ }
+
+ return false;
+}
+
+static void handleAnalyzerNoReturnAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+
+ // The checking path for 'noreturn' and 'analyzer_noreturn' are different
+ // because 'analyzer_noreturn' does not impact the type.
+ if (!isFunctionOrMethodOrBlock(D)) {
+ ValueDecl *VD = dyn_cast<ValueDecl>(D);
+ if (!VD || (!VD->getType()->isBlockPointerType() &&
+ !VD->getType()->isFunctionPointerType())) {
+ S.Diag(Attr.getLoc(),
+ Attr.isCXX11Attribute() ? diag::err_attribute_wrong_decl_type
+ : diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunctionMethodOrBlock;
+ return;
+ }
+ }
+
+ D->addAttr(::new (S.Context)
+ AnalyzerNoReturnAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+// PS3 PPU-specific.
+static void handleVecReturnAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+/*
+ Returning a Vector Class in Registers
+
+ According to the PPU ABI specifications, a class with a single member of
+ vector type is returned in memory when used as the return value of a function.
+ This results in inefficient code when implementing vector classes. To return
+ the value in a single vector register, add the vecreturn attribute to the
+ class definition. This attribute is also applicable to struct types.
+
+ Example:
+
+ struct Vector
+ {
+ __vector float xyzw;
+ } __attribute__((vecreturn));
+
+ Vector Add(Vector lhs, Vector rhs)
+ {
+ Vector result;
+ result.xyzw = vec_add(lhs.xyzw, rhs.xyzw);
+ return result; // This will be returned in a register
+ }
+*/
+ if (VecReturnAttr *A = D->getAttr<VecReturnAttr>()) {
+ S.Diag(Attr.getLoc(), diag::err_repeat_attribute) << A;
+ return;
+ }
+
+ RecordDecl *record = cast<RecordDecl>(D);
+ int count = 0;
+
+ if (!isa<CXXRecordDecl>(record)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_vecreturn_only_vector_member);
+ return;
+ }
+
+ if (!cast<CXXRecordDecl>(record)->isPOD()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_vecreturn_only_pod_record);
+ return;
+ }
+
+ for (const auto *I : record->fields()) {
+ if ((count == 1) || !I->getType()->isVectorType()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_vecreturn_only_vector_member);
+ return;
+ }
+ count++;
+ }
+
+ D->addAttr(::new (S.Context)
+ VecReturnAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleDependencyAttr(Sema &S, Scope *Scope, Decl *D,
+ const AttributeList &Attr) {
+ if (isa<ParmVarDecl>(D)) {
+ // [[carries_dependency]] can only be applied to a parameter if it is a
+ // parameter of a function declaration or lambda.
+ if (!(Scope->getFlags() & clang::Scope::FunctionDeclarationScope)) {
+ S.Diag(Attr.getLoc(),
+ diag::err_carries_dependency_param_not_function_decl);
+ return;
+ }
+ }
+
+ D->addAttr(::new (S.Context) CarriesDependencyAttr(
+ Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleNotTailCalledAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (checkAttrMutualExclusion<AlwaysInlineAttr>(S, D, Attr.getRange(),
+ Attr.getName()))
+ return;
+
+ D->addAttr(::new (S.Context) NotTailCalledAttr(
+ Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleDisableTailCallsAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (checkAttrMutualExclusion<NakedAttr>(S, D, Attr.getRange(),
+ Attr.getName()))
+ return;
+
+ D->addAttr(::new (S.Context) DisableTailCallsAttr(
+ Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleUsedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (VD->hasLocalStorage()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
+ return;
+ }
+ } else if (!isFunctionOrMethod(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedVariableOrFunction;
+ return;
+ }
+
+ D->addAttr(::new (S.Context)
+ UsedAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleConstructorAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ uint32_t priority = ConstructorAttr::DefaultPriority;
+ if (Attr.getNumArgs() &&
+ !checkUInt32Argument(S, Attr, Attr.getArgAsExpr(0), priority))
+ return;
+
+ D->addAttr(::new (S.Context)
+ ConstructorAttr(Attr.getRange(), S.Context, priority,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleDestructorAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ uint32_t priority = DestructorAttr::DefaultPriority;
+ if (Attr.getNumArgs() &&
+ !checkUInt32Argument(S, Attr, Attr.getArgAsExpr(0), priority))
+ return;
+
+ D->addAttr(::new (S.Context)
+ DestructorAttr(Attr.getRange(), S.Context, priority,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+template <typename AttrTy>
+static void handleAttrWithMessage(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ // Handle the case where the attribute has a text message.
+ StringRef Str;
+ if (Attr.getNumArgs() == 1 && !S.checkStringLiteralArgumentAttr(Attr, 0, Str))
+ return;
+
+ D->addAttr(::new (S.Context) AttrTy(Attr.getRange(), S.Context, Str,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleObjCSuppresProtocolAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (!cast<ObjCProtocolDecl>(D)->isThisDeclarationADefinition()) {
+ S.Diag(Attr.getLoc(), diag::err_objc_attr_protocol_requires_definition)
+ << Attr.getName() << Attr.getRange();
+ return;
+ }
+
+ D->addAttr(::new (S.Context)
+ ObjCExplicitProtocolImplAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static bool checkAvailabilityAttr(Sema &S, SourceRange Range,
+ IdentifierInfo *Platform,
+ VersionTuple Introduced,
+ VersionTuple Deprecated,
+ VersionTuple Obsoleted) {
+ StringRef PlatformName
+ = AvailabilityAttr::getPrettyPlatformName(Platform->getName());
+ if (PlatformName.empty())
+ PlatformName = Platform->getName();
+
+ // Ensure that Introduced <= Deprecated <= Obsoleted (although not all
+ // of these steps are needed).
+ if (!Introduced.empty() && !Deprecated.empty() &&
+ !(Introduced <= Deprecated)) {
+ S.Diag(Range.getBegin(), diag::warn_availability_version_ordering)
+ << 1 << PlatformName << Deprecated.getAsString()
+ << 0 << Introduced.getAsString();
+ return true;
+ }
+
+ if (!Introduced.empty() && !Obsoleted.empty() &&
+ !(Introduced <= Obsoleted)) {
+ S.Diag(Range.getBegin(), diag::warn_availability_version_ordering)
+ << 2 << PlatformName << Obsoleted.getAsString()
+ << 0 << Introduced.getAsString();
+ return true;
+ }
+
+ if (!Deprecated.empty() && !Obsoleted.empty() &&
+ !(Deprecated <= Obsoleted)) {
+ S.Diag(Range.getBegin(), diag::warn_availability_version_ordering)
+ << 2 << PlatformName << Obsoleted.getAsString()
+ << 1 << Deprecated.getAsString();
+ return true;
+ }
+
+ return false;
+}
+
+/// \brief Check whether the two versions match.
+///
+/// If either version tuple is empty, then they are assumed to match. If
+/// \p BeforeIsOkay is true, then \p X can be less than or equal to \p Y.
+static bool versionsMatch(const VersionTuple &X, const VersionTuple &Y,
+ bool BeforeIsOkay) {
+ if (X.empty() || Y.empty())
+ return true;
+
+ if (X == Y)
+ return true;
+
+ if (BeforeIsOkay && X < Y)
+ return true;
+
+ return false;
+}
+
+AvailabilityAttr *Sema::mergeAvailabilityAttr(NamedDecl *D, SourceRange Range,
+ IdentifierInfo *Platform,
+ VersionTuple Introduced,
+ VersionTuple Deprecated,
+ VersionTuple Obsoleted,
+ bool IsUnavailable,
+ StringRef Message,
+ AvailabilityMergeKind AMK,
+ unsigned AttrSpellingListIndex) {
+ VersionTuple MergedIntroduced = Introduced;
+ VersionTuple MergedDeprecated = Deprecated;
+ VersionTuple MergedObsoleted = Obsoleted;
+ bool FoundAny = false;
+ bool OverrideOrImpl = false;
+ switch (AMK) {
+ case AMK_None:
+ case AMK_Redeclaration:
+ OverrideOrImpl = false;
+ break;
+
+ case AMK_Override:
+ case AMK_ProtocolImplementation:
+ OverrideOrImpl = true;
+ break;
+ }
+
+ if (D->hasAttrs()) {
+ AttrVec &Attrs = D->getAttrs();
+ for (unsigned i = 0, e = Attrs.size(); i != e;) {
+ const AvailabilityAttr *OldAA = dyn_cast<AvailabilityAttr>(Attrs[i]);
+ if (!OldAA) {
+ ++i;
+ continue;
+ }
+
+ IdentifierInfo *OldPlatform = OldAA->getPlatform();
+ if (OldPlatform != Platform) {
+ ++i;
+ continue;
+ }
+
+ // If there is an existing availability attribute for this platform that
+ // is explicit and the new one is implicit use the explicit one and
+ // discard the new implicit attribute.
+ if (OldAA->getRange().isValid() && Range.isInvalid()) {
+ return nullptr;
+ }
+
+ // If there is an existing attribute for this platform that is implicit
+ // and the new attribute is explicit then erase the old one and
+ // continue processing the attributes.
+ if (Range.isValid() && OldAA->getRange().isInvalid()) {
+ Attrs.erase(Attrs.begin() + i);
+ --e;
+ continue;
+ }
+
+ FoundAny = true;
+ VersionTuple OldIntroduced = OldAA->getIntroduced();
+ VersionTuple OldDeprecated = OldAA->getDeprecated();
+ VersionTuple OldObsoleted = OldAA->getObsoleted();
+ bool OldIsUnavailable = OldAA->getUnavailable();
+
+ if (!versionsMatch(OldIntroduced, Introduced, OverrideOrImpl) ||
+ !versionsMatch(Deprecated, OldDeprecated, OverrideOrImpl) ||
+ !versionsMatch(Obsoleted, OldObsoleted, OverrideOrImpl) ||
+ !(OldIsUnavailable == IsUnavailable ||
+ (OverrideOrImpl && !OldIsUnavailable && IsUnavailable))) {
+ if (OverrideOrImpl) {
+ int Which = -1;
+ VersionTuple FirstVersion;
+ VersionTuple SecondVersion;
+ if (!versionsMatch(OldIntroduced, Introduced, OverrideOrImpl)) {
+ Which = 0;
+ FirstVersion = OldIntroduced;
+ SecondVersion = Introduced;
+ } else if (!versionsMatch(Deprecated, OldDeprecated, OverrideOrImpl)) {
+ Which = 1;
+ FirstVersion = Deprecated;
+ SecondVersion = OldDeprecated;
+ } else if (!versionsMatch(Obsoleted, OldObsoleted, OverrideOrImpl)) {
+ Which = 2;
+ FirstVersion = Obsoleted;
+ SecondVersion = OldObsoleted;
+ }
+
+ if (Which == -1) {
+ Diag(OldAA->getLocation(),
+ diag::warn_mismatched_availability_override_unavail)
+ << AvailabilityAttr::getPrettyPlatformName(Platform->getName())
+ << (AMK == AMK_Override);
+ } else {
+ Diag(OldAA->getLocation(),
+ diag::warn_mismatched_availability_override)
+ << Which
+ << AvailabilityAttr::getPrettyPlatformName(Platform->getName())
+ << FirstVersion.getAsString() << SecondVersion.getAsString()
+ << (AMK == AMK_Override);
+ }
+ if (AMK == AMK_Override)
+ Diag(Range.getBegin(), diag::note_overridden_method);
+ else
+ Diag(Range.getBegin(), diag::note_protocol_method);
+ } else {
+ Diag(OldAA->getLocation(), diag::warn_mismatched_availability);
+ Diag(Range.getBegin(), diag::note_previous_attribute);
+ }
+
+ Attrs.erase(Attrs.begin() + i);
+ --e;
+ continue;
+ }
+
+ VersionTuple MergedIntroduced2 = MergedIntroduced;
+ VersionTuple MergedDeprecated2 = MergedDeprecated;
+ VersionTuple MergedObsoleted2 = MergedObsoleted;
+
+ if (MergedIntroduced2.empty())
+ MergedIntroduced2 = OldIntroduced;
+ if (MergedDeprecated2.empty())
+ MergedDeprecated2 = OldDeprecated;
+ if (MergedObsoleted2.empty())
+ MergedObsoleted2 = OldObsoleted;
+
+ if (checkAvailabilityAttr(*this, OldAA->getRange(), Platform,
+ MergedIntroduced2, MergedDeprecated2,
+ MergedObsoleted2)) {
+ Attrs.erase(Attrs.begin() + i);
+ --e;
+ continue;
+ }
+
+ MergedIntroduced = MergedIntroduced2;
+ MergedDeprecated = MergedDeprecated2;
+ MergedObsoleted = MergedObsoleted2;
+ ++i;
+ }
+ }
+
+ if (FoundAny &&
+ MergedIntroduced == Introduced &&
+ MergedDeprecated == Deprecated &&
+ MergedObsoleted == Obsoleted)
+ return nullptr;
+
+ // Only create a new attribute if !OverrideOrImpl, but we want to do
+ // the checking.
+ if (!checkAvailabilityAttr(*this, Range, Platform, MergedIntroduced,
+ MergedDeprecated, MergedObsoleted) &&
+ !OverrideOrImpl) {
+ return ::new (Context) AvailabilityAttr(Range, Context, Platform,
+ Introduced, Deprecated,
+ Obsoleted, IsUnavailable, Message,
+ AttrSpellingListIndex);
+ }
+ return nullptr;
+}
+
+static void handleAvailabilityAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (!checkAttributeNumArgs(S, Attr, 1))
+ return;
+ IdentifierLoc *Platform = Attr.getArgAsIdent(0);
+ unsigned Index = Attr.getAttributeSpellingListIndex();
+
+ IdentifierInfo *II = Platform->Ident;
+ if (AvailabilityAttr::getPrettyPlatformName(II->getName()).empty())
+ S.Diag(Platform->Loc, diag::warn_availability_unknown_platform)
+ << Platform->Ident;
+
+ NamedDecl *ND = dyn_cast<NamedDecl>(D);
+ if (!ND) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
+ return;
+ }
+
+ AvailabilityChange Introduced = Attr.getAvailabilityIntroduced();
+ AvailabilityChange Deprecated = Attr.getAvailabilityDeprecated();
+ AvailabilityChange Obsoleted = Attr.getAvailabilityObsoleted();
+ bool IsUnavailable = Attr.getUnavailableLoc().isValid();
+ StringRef Str;
+ if (const StringLiteral *SE =
+ dyn_cast_or_null<StringLiteral>(Attr.getMessageExpr()))
+ Str = SE->getString();
+
+ AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(ND, Attr.getRange(), II,
+ Introduced.Version,
+ Deprecated.Version,
+ Obsoleted.Version,
+ IsUnavailable, Str,
+ Sema::AMK_None,
+ Index);
+ if (NewAttr)
+ D->addAttr(NewAttr);
+
+ // Transcribe "ios" to "watchos" (and add a new attribute) if the versioning
+ // matches before the start of the watchOS platform.
+ if (S.Context.getTargetInfo().getTriple().isWatchOS()) {
+ IdentifierInfo *NewII = nullptr;
+ if (II->getName() == "ios")
+ NewII = &S.Context.Idents.get("watchos");
+ else if (II->getName() == "ios_app_extension")
+ NewII = &S.Context.Idents.get("watchos_app_extension");
+
+ if (NewII) {
+ auto adjustWatchOSVersion = [](VersionTuple Version) -> VersionTuple {
+ if (Version.empty())
+ return Version;
+ auto Major = Version.getMajor();
+ auto NewMajor = Major >= 9 ? Major - 7 : 0;
+ if (NewMajor >= 2) {
+ if (Version.getMinor().hasValue()) {
+ if (Version.getSubminor().hasValue())
+ return VersionTuple(NewMajor, Version.getMinor().getValue(),
+ Version.getSubminor().getValue());
+ else
+ return VersionTuple(NewMajor, Version.getMinor().getValue());
+ }
+ }
+
+ return VersionTuple(2, 0);
+ };
+
+ auto NewIntroduced = adjustWatchOSVersion(Introduced.Version);
+ auto NewDeprecated = adjustWatchOSVersion(Deprecated.Version);
+ auto NewObsoleted = adjustWatchOSVersion(Obsoleted.Version);
+
+ AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(ND,
+ SourceRange(),
+ NewII,
+ NewIntroduced,
+ NewDeprecated,
+ NewObsoleted,
+ IsUnavailable, Str,
+ Sema::AMK_None,
+ Index);
+ if (NewAttr)
+ D->addAttr(NewAttr);
+ }
+ } else if (S.Context.getTargetInfo().getTriple().isTvOS()) {
+ // Transcribe "ios" to "tvos" (and add a new attribute) if the versioning
+ // matches before the start of the tvOS platform.
+ IdentifierInfo *NewII = nullptr;
+ if (II->getName() == "ios")
+ NewII = &S.Context.Idents.get("tvos");
+ else if (II->getName() == "ios_app_extension")
+ NewII = &S.Context.Idents.get("tvos_app_extension");
+
+ if (NewII) {
+ AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(ND,
+ SourceRange(),
+ NewII,
+ Introduced.Version,
+ Deprecated.Version,
+ Obsoleted.Version,
+ IsUnavailable, Str,
+ Sema::AMK_None,
+ Index);
+ if (NewAttr)
+ D->addAttr(NewAttr);
+ }
+ }
+}
+
+template <class T>
+static T *mergeVisibilityAttr(Sema &S, Decl *D, SourceRange range,
+ typename T::VisibilityType value,
+ unsigned attrSpellingListIndex) {
+ T *existingAttr = D->getAttr<T>();
+ if (existingAttr) {
+ typename T::VisibilityType existingValue = existingAttr->getVisibility();
+ if (existingValue == value)
+ return nullptr;
+ S.Diag(existingAttr->getLocation(), diag::err_mismatched_visibility);
+ S.Diag(range.getBegin(), diag::note_previous_attribute);
+ D->dropAttr<T>();
+ }
+ return ::new (S.Context) T(range, S.Context, value, attrSpellingListIndex);
+}
+
+VisibilityAttr *Sema::mergeVisibilityAttr(Decl *D, SourceRange Range,
+ VisibilityAttr::VisibilityType Vis,
+ unsigned AttrSpellingListIndex) {
+ return ::mergeVisibilityAttr<VisibilityAttr>(*this, D, Range, Vis,
+ AttrSpellingListIndex);
+}
+
+TypeVisibilityAttr *Sema::mergeTypeVisibilityAttr(Decl *D, SourceRange Range,
+ TypeVisibilityAttr::VisibilityType Vis,
+ unsigned AttrSpellingListIndex) {
+ return ::mergeVisibilityAttr<TypeVisibilityAttr>(*this, D, Range, Vis,
+ AttrSpellingListIndex);
+}
+
+static void handleVisibilityAttr(Sema &S, Decl *D, const AttributeList &Attr,
+ bool isTypeVisibility) {
+ // Visibility attributes don't mean anything on a typedef.
+ if (isa<TypedefNameDecl>(D)) {
+ S.Diag(Attr.getRange().getBegin(), diag::warn_attribute_ignored)
+ << Attr.getName();
+ return;
+ }
+
+ // 'type_visibility' can only go on a type or namespace.
+ if (isTypeVisibility &&
+ !(isa<TagDecl>(D) ||
+ isa<ObjCInterfaceDecl>(D) ||
+ isa<NamespaceDecl>(D))) {
+ S.Diag(Attr.getRange().getBegin(), diag::err_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedTypeOrNamespace;
+ return;
+ }
+
+ // Check that the argument is a string literal.
+ StringRef TypeStr;
+ SourceLocation LiteralLoc;
+ if (!S.checkStringLiteralArgumentAttr(Attr, 0, TypeStr, &LiteralLoc))
+ return;
+
+ VisibilityAttr::VisibilityType type;
+ if (!VisibilityAttr::ConvertStrToVisibilityType(TypeStr, type)) {
+ S.Diag(LiteralLoc, diag::warn_attribute_type_not_supported)
+ << Attr.getName() << TypeStr;
+ return;
+ }
+
+ // Complain about attempts to use protected visibility on targets
+ // (like Darwin) that don't support it.
+ if (type == VisibilityAttr::Protected &&
+ !S.Context.getTargetInfo().hasProtectedVisibility()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_protected_visibility);
+ type = VisibilityAttr::Default;
+ }
+
+ unsigned Index = Attr.getAttributeSpellingListIndex();
+ clang::Attr *newAttr;
+ if (isTypeVisibility) {
+ newAttr = S.mergeTypeVisibilityAttr(D, Attr.getRange(),
+ (TypeVisibilityAttr::VisibilityType) type,
+ Index);
+ } else {
+ newAttr = S.mergeVisibilityAttr(D, Attr.getRange(), type, Index);
+ }
+ if (newAttr)
+ D->addAttr(newAttr);
+}
+
+static void handleObjCMethodFamilyAttr(Sema &S, Decl *decl,
+ const AttributeList &Attr) {
+ ObjCMethodDecl *method = cast<ObjCMethodDecl>(decl);
+ if (!Attr.isArgIdent(0)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
+ << Attr.getName() << 1 << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ IdentifierLoc *IL = Attr.getArgAsIdent(0);
+ ObjCMethodFamilyAttr::FamilyKind F;
+ if (!ObjCMethodFamilyAttr::ConvertStrToFamilyKind(IL->Ident->getName(), F)) {
+ S.Diag(IL->Loc, diag::warn_attribute_type_not_supported) << Attr.getName()
+ << IL->Ident;
+ return;
+ }
+
+ if (F == ObjCMethodFamilyAttr::OMF_init &&
+ !method->getReturnType()->isObjCObjectPointerType()) {
+ S.Diag(method->getLocation(), diag::err_init_method_bad_return_type)
+ << method->getReturnType();
+ // Ignore the attribute.
+ return;
+ }
+
+ method->addAttr(new (S.Context) ObjCMethodFamilyAttr(Attr.getRange(),
+ S.Context, F,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleObjCNSObject(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
+ QualType T = TD->getUnderlyingType();
+ if (!T->isCARCBridgableType()) {
+ S.Diag(TD->getLocation(), diag::err_nsobject_attribute);
+ return;
+ }
+ }
+ else if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(D)) {
+ QualType T = PD->getType();
+ if (!T->isCARCBridgableType()) {
+ S.Diag(PD->getLocation(), diag::err_nsobject_attribute);
+ return;
+ }
+ }
+ else {
+ // It is okay to include this attribute on properties, e.g.:
+ //
+ // @property (retain, nonatomic) struct Bork *Q __attribute__((NSObject));
+ //
+ // In this case it follows tradition and suppresses an error in the above
+ // case.
+ S.Diag(D->getLocation(), diag::warn_nsobject_attribute);
+ }
+ D->addAttr(::new (S.Context)
+ ObjCNSObjectAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleObjCIndependentClass(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
+ QualType T = TD->getUnderlyingType();
+ if (!T->isObjCObjectPointerType()) {
+ S.Diag(TD->getLocation(), diag::warn_ptr_independentclass_attribute);
+ return;
+ }
+ } else {
+ S.Diag(D->getLocation(), diag::warn_independentclass_attribute);
+ return;
+ }
+ D->addAttr(::new (S.Context)
+ ObjCIndependentClassAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleBlocksAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (!Attr.isArgIdent(0)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
+ << Attr.getName() << 1 << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ IdentifierInfo *II = Attr.getArgAsIdent(0)->Ident;
+ BlocksAttr::BlockType type;
+ if (!BlocksAttr::ConvertStrToBlockType(II->getName(), type)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_type_not_supported)
+ << Attr.getName() << II;
+ return;
+ }
+
+ D->addAttr(::new (S.Context)
+ BlocksAttr(Attr.getRange(), S.Context, type,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleSentinelAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ unsigned sentinel = (unsigned)SentinelAttr::DefaultSentinel;
+ if (Attr.getNumArgs() > 0) {
+ Expr *E = Attr.getArgAsExpr(0);
+ llvm::APSInt Idx(32);
+ if (E->isTypeDependent() || E->isValueDependent() ||
+ !E->isIntegerConstantExpr(Idx, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
+ << Attr.getName() << 1 << AANT_ArgumentIntegerConstant
+ << E->getSourceRange();
+ return;
+ }
+
+ if (Idx.isSigned() && Idx.isNegative()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_sentinel_less_than_zero)
+ << E->getSourceRange();
+ return;
+ }
+
+ sentinel = Idx.getZExtValue();
+ }
+
+ unsigned nullPos = (unsigned)SentinelAttr::DefaultNullPos;
+ if (Attr.getNumArgs() > 1) {
+ Expr *E = Attr.getArgAsExpr(1);
+ llvm::APSInt Idx(32);
+ if (E->isTypeDependent() || E->isValueDependent() ||
+ !E->isIntegerConstantExpr(Idx, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
+ << Attr.getName() << 2 << AANT_ArgumentIntegerConstant
+ << E->getSourceRange();
+ return;
+ }
+ nullPos = Idx.getZExtValue();
+
+ if ((Idx.isSigned() && Idx.isNegative()) || nullPos > 1) {
+ // FIXME: This error message could be improved, it would be nice
+ // to say what the bounds actually are.
+ S.Diag(Attr.getLoc(), diag::err_attribute_sentinel_not_zero_or_one)
+ << E->getSourceRange();
+ return;
+ }
+ }
+
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ const FunctionType *FT = FD->getType()->castAs<FunctionType>();
+ if (isa<FunctionNoProtoType>(FT)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_sentinel_named_arguments);
+ return;
+ }
+
+ if (!cast<FunctionProtoType>(FT)->isVariadic()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_sentinel_not_variadic) << 0;
+ return;
+ }
+ } else if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ if (!MD->isVariadic()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_sentinel_not_variadic) << 0;
+ return;
+ }
+ } else if (BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
+ if (!BD->isVariadic()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_sentinel_not_variadic) << 1;
+ return;
+ }
+ } else if (const VarDecl *V = dyn_cast<VarDecl>(D)) {
+ QualType Ty = V->getType();
+ if (Ty->isBlockPointerType() || Ty->isFunctionPointerType()) {
+ const FunctionType *FT = Ty->isFunctionPointerType()
+ ? D->getFunctionType()
+ : Ty->getAs<BlockPointerType>()->getPointeeType()->getAs<FunctionType>();
+ if (!cast<FunctionProtoType>(FT)->isVariadic()) {
+ int m = Ty->isFunctionPointerType() ? 0 : 1;
+ S.Diag(Attr.getLoc(), diag::warn_attribute_sentinel_not_variadic) << m;
+ return;
+ }
+ } else {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunctionMethodOrBlock;
+ return;
+ }
+ } else {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunctionMethodOrBlock;
+ return;
+ }
+ D->addAttr(::new (S.Context)
+ SentinelAttr(Attr.getRange(), S.Context, sentinel, nullPos,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleWarnUnusedResult(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (D->getFunctionType() &&
+ D->getFunctionType()->getReturnType()->isVoidType()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_void_function_method)
+ << Attr.getName() << 0;
+ return;
+ }
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
+ if (MD->getReturnType()->isVoidType()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_void_function_method)
+ << Attr.getName() << 1;
+ return;
+ }
+
+ D->addAttr(::new (S.Context)
+ WarnUnusedResultAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleWeakImportAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // weak_import only applies to variable & function declarations.
+ bool isDef = false;
+ if (!D->canBeWeakImported(isDef)) {
+ if (isDef)
+ S.Diag(Attr.getLoc(), diag::warn_attribute_invalid_on_definition)
+ << "weak_import";
+ else if (isa<ObjCPropertyDecl>(D) || isa<ObjCMethodDecl>(D) ||
+ (S.Context.getTargetInfo().getTriple().isOSDarwin() &&
+ (isa<ObjCInterfaceDecl>(D) || isa<EnumDecl>(D)))) {
+ // Nothing to warn about here.
+ } else
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedVariableOrFunction;
+
+ return;
+ }
+
+ D->addAttr(::new (S.Context)
+ WeakImportAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+// Handles reqd_work_group_size and work_group_size_hint.
+template <typename WorkGroupAttr>
+static void handleWorkGroupSize(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ uint32_t WGSize[3];
+ for (unsigned i = 0; i < 3; ++i) {
+ const Expr *E = Attr.getArgAsExpr(i);
+ if (!checkUInt32Argument(S, Attr, E, WGSize[i], i))
+ return;
+ if (WGSize[i] == 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_is_zero)
+ << Attr.getName() << E->getSourceRange();
+ return;
+ }
+ }
+
+ WorkGroupAttr *Existing = D->getAttr<WorkGroupAttr>();
+ if (Existing && !(Existing->getXDim() == WGSize[0] &&
+ Existing->getYDim() == WGSize[1] &&
+ Existing->getZDim() == WGSize[2]))
+ S.Diag(Attr.getLoc(), diag::warn_duplicate_attribute) << Attr.getName();
+
+ D->addAttr(::new (S.Context) WorkGroupAttr(Attr.getRange(), S.Context,
+ WGSize[0], WGSize[1], WGSize[2],
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleVecTypeHint(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (!Attr.hasParsedType()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << Attr.getName() << 1;
+ return;
+ }
+
+ TypeSourceInfo *ParmTSI = nullptr;
+ QualType ParmType = S.GetTypeFromParser(Attr.getTypeArg(), &ParmTSI);
+ assert(ParmTSI && "no type source info for attribute argument");
+
+ if (!ParmType->isExtVectorType() && !ParmType->isFloatingType() &&
+ (ParmType->isBooleanType() ||
+ !ParmType->isIntegralType(S.getASTContext()))) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_vec_type_hint)
+ << ParmType;
+ return;
+ }
+
+ if (VecTypeHintAttr *A = D->getAttr<VecTypeHintAttr>()) {
+ if (!S.Context.hasSameType(A->getTypeHint(), ParmType)) {
+ S.Diag(Attr.getLoc(), diag::warn_duplicate_attribute) << Attr.getName();
+ return;
+ }
+ }
+
+ D->addAttr(::new (S.Context) VecTypeHintAttr(Attr.getLoc(), S.Context,
+ ParmTSI,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+SectionAttr *Sema::mergeSectionAttr(Decl *D, SourceRange Range,
+ StringRef Name,
+ unsigned AttrSpellingListIndex) {
+ if (SectionAttr *ExistingAttr = D->getAttr<SectionAttr>()) {
+ if (ExistingAttr->getName() == Name)
+ return nullptr;
+ Diag(ExistingAttr->getLocation(), diag::warn_mismatched_section);
+ Diag(Range.getBegin(), diag::note_previous_attribute);
+ return nullptr;
+ }
+ return ::new (Context) SectionAttr(Range, Context, Name,
+ AttrSpellingListIndex);
+}
+
+bool Sema::checkSectionName(SourceLocation LiteralLoc, StringRef SecName) {
+ std::string Error = Context.getTargetInfo().isValidSectionSpecifier(SecName);
+ if (!Error.empty()) {
+ Diag(LiteralLoc, diag::err_attribute_section_invalid_for_target) << Error;
+ return false;
+ }
+ return true;
+}
+
+static void handleSectionAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // Make sure that there is a string literal as the sections's single
+ // argument.
+ StringRef Str;
+ SourceLocation LiteralLoc;
+ if (!S.checkStringLiteralArgumentAttr(Attr, 0, Str, &LiteralLoc))
+ return;
+
+ if (!S.checkSectionName(LiteralLoc, Str))
+ return;
+
+ // If the target wants to validate the section specifier, make it happen.
+ std::string Error = S.Context.getTargetInfo().isValidSectionSpecifier(Str);
+ if (!Error.empty()) {
+ S.Diag(LiteralLoc, diag::err_attribute_section_invalid_for_target)
+ << Error;
+ return;
+ }
+
+ unsigned Index = Attr.getAttributeSpellingListIndex();
+ SectionAttr *NewAttr = S.mergeSectionAttr(D, Attr.getRange(), Str, Index);
+ if (NewAttr)
+ D->addAttr(NewAttr);
+}
+
+// Check for things we'd like to warn about, no errors or validation for now.
+// TODO: Validation should use a backend target library that specifies
+// the allowable subtarget features and cpus. We could use something like a
+// TargetCodeGenInfo hook here to do validation.
+void Sema::checkTargetAttr(SourceLocation LiteralLoc, StringRef AttrStr) {
+ for (auto Str : {"tune=", "fpmath="})
+ if (AttrStr.find(Str) != StringRef::npos)
+ Diag(LiteralLoc, diag::warn_unsupported_target_attribute) << Str;
+}
+
+static void handleTargetAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ StringRef Str;
+ SourceLocation LiteralLoc;
+ if (!S.checkStringLiteralArgumentAttr(Attr, 0, Str, &LiteralLoc))
+ return;
+ S.checkTargetAttr(LiteralLoc, Str);
+ unsigned Index = Attr.getAttributeSpellingListIndex();
+ TargetAttr *NewAttr =
+ ::new (S.Context) TargetAttr(Attr.getRange(), S.Context, Str, Index);
+ D->addAttr(NewAttr);
+}
+
+
+static void handleCleanupAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ VarDecl *VD = cast<VarDecl>(D);
+ if (!VD->hasLocalStorage()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
+ return;
+ }
+
+ Expr *E = Attr.getArgAsExpr(0);
+ SourceLocation Loc = E->getExprLoc();
+ FunctionDecl *FD = nullptr;
+ DeclarationNameInfo NI;
+
+ // gcc only allows for simple identifiers. Since we support more than gcc, we
+ // will warn the user.
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ if (DRE->hasQualifier())
+ S.Diag(Loc, diag::warn_cleanup_ext);
+ FD = dyn_cast<FunctionDecl>(DRE->getDecl());
+ NI = DRE->getNameInfo();
+ if (!FD) {
+ S.Diag(Loc, diag::err_attribute_cleanup_arg_not_function) << 1
+ << NI.getName();
+ return;
+ }
+ } else if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(E)) {
+ if (ULE->hasExplicitTemplateArgs())
+ S.Diag(Loc, diag::warn_cleanup_ext);
+ FD = S.ResolveSingleFunctionTemplateSpecialization(ULE, true);
+ NI = ULE->getNameInfo();
+ if (!FD) {
+ S.Diag(Loc, diag::err_attribute_cleanup_arg_not_function) << 2
+ << NI.getName();
+ if (ULE->getType() == S.Context.OverloadTy)
+ S.NoteAllOverloadCandidates(ULE);
+ return;
+ }
+ } else {
+ S.Diag(Loc, diag::err_attribute_cleanup_arg_not_function) << 0;
+ return;
+ }
+
+ if (FD->getNumParams() != 1) {
+ S.Diag(Loc, diag::err_attribute_cleanup_func_must_take_one_arg)
+ << NI.getName();
+ return;
+ }
+
+ // We're currently more strict than GCC about what function types we accept.
+ // If this ever proves to be a problem it should be easy to fix.
+ QualType Ty = S.Context.getPointerType(VD->getType());
+ QualType ParamTy = FD->getParamDecl(0)->getType();
+ if (S.CheckAssignmentConstraints(FD->getParamDecl(0)->getLocation(),
+ ParamTy, Ty) != Sema::Compatible) {
+ S.Diag(Loc, diag::err_attribute_cleanup_func_arg_incompatible_type)
+ << NI.getName() << ParamTy << Ty;
+ return;
+ }
+
+ D->addAttr(::new (S.Context)
+ CleanupAttr(Attr.getRange(), S.Context, FD,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+/// Handle __attribute__((format_arg((idx)))) attribute based on
+/// http://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html
+static void handleFormatArgAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ Expr *IdxExpr = Attr.getArgAsExpr(0);
+ uint64_t Idx;
+ if (!checkFunctionOrMethodParameterIndex(S, D, Attr, 1, IdxExpr, Idx))
+ return;
+
+ // Make sure the format string is really a string.
+ QualType Ty = getFunctionOrMethodParamType(D, Idx);
+
+ bool NotNSStringTy = !isNSStringType(Ty, S.Context);
+ if (NotNSStringTy &&
+ !isCFStringType(Ty, S.Context) &&
+ (!Ty->isPointerType() ||
+ !Ty->getAs<PointerType>()->getPointeeType()->isCharType())) {
+ S.Diag(Attr.getLoc(), diag::err_format_attribute_not)
+ << "a string type" << IdxExpr->getSourceRange()
+ << getFunctionOrMethodParamRange(D, 0);
+ return;
+ }
+ Ty = getFunctionOrMethodResultType(D);
+ if (!isNSStringType(Ty, S.Context) &&
+ !isCFStringType(Ty, S.Context) &&
+ (!Ty->isPointerType() ||
+ !Ty->getAs<PointerType>()->getPointeeType()->isCharType())) {
+ S.Diag(Attr.getLoc(), diag::err_format_attribute_result_not)
+ << (NotNSStringTy ? "string type" : "NSString")
+ << IdxExpr->getSourceRange() << getFunctionOrMethodParamRange(D, 0);
+ return;
+ }
+
+ // We cannot use the Idx returned from checkFunctionOrMethodParameterIndex
+ // because that has corrected for the implicit this parameter, and is zero-
+ // based. The attribute expects what the user wrote explicitly.
+ llvm::APSInt Val;
+ IdxExpr->EvaluateAsInt(Val, S.Context);
+
+ D->addAttr(::new (S.Context)
+ FormatArgAttr(Attr.getRange(), S.Context, Val.getZExtValue(),
+ Attr.getAttributeSpellingListIndex()));
+}
+
+enum FormatAttrKind {
+ CFStringFormat,
+ NSStringFormat,
+ StrftimeFormat,
+ SupportedFormat,
+ IgnoredFormat,
+ InvalidFormat
+};
+
+/// getFormatAttrKind - Map from format attribute names to supported format
+/// types.
+static FormatAttrKind getFormatAttrKind(StringRef Format) {
+ return llvm::StringSwitch<FormatAttrKind>(Format)
+ // Check for formats that get handled specially.
+ .Case("NSString", NSStringFormat)
+ .Case("CFString", CFStringFormat)
+ .Case("strftime", StrftimeFormat)
+
+ // Otherwise, check for supported formats.
+ .Cases("scanf", "printf", "printf0", "strfmon", SupportedFormat)
+ .Cases("cmn_err", "vcmn_err", "zcmn_err", SupportedFormat)
+ .Case("kprintf", SupportedFormat) // OpenBSD.
+ .Case("freebsd_kprintf", SupportedFormat) // FreeBSD.
+ .Case("os_trace", SupportedFormat)
+
+ .Cases("gcc_diag", "gcc_cdiag", "gcc_cxxdiag", "gcc_tdiag", IgnoredFormat)
+ .Default(InvalidFormat);
+}
+
+/// Handle __attribute__((init_priority(priority))) attributes based on
+/// http://gcc.gnu.org/onlinedocs/gcc/C_002b_002b-Attributes.html
+static void handleInitPriorityAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (!S.getLangOpts().CPlusPlus) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
+ return;
+ }
+
+ if (S.getCurFunctionOrMethodDecl()) {
+ S.Diag(Attr.getLoc(), diag::err_init_priority_object_attr);
+ Attr.setInvalid();
+ return;
+ }
+ QualType T = cast<VarDecl>(D)->getType();
+ if (S.Context.getAsArrayType(T))
+ T = S.Context.getBaseElementType(T);
+ if (!T->getAs<RecordType>()) {
+ S.Diag(Attr.getLoc(), diag::err_init_priority_object_attr);
+ Attr.setInvalid();
+ return;
+ }
+
+ Expr *E = Attr.getArgAsExpr(0);
+ uint32_t prioritynum;
+ if (!checkUInt32Argument(S, Attr, E, prioritynum)) {
+ Attr.setInvalid();
+ return;
+ }
+
+ if (prioritynum < 101 || prioritynum > 65535) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_outof_range)
+ << E->getSourceRange() << Attr.getName() << 101 << 65535;
+ Attr.setInvalid();
+ return;
+ }
+ D->addAttr(::new (S.Context)
+ InitPriorityAttr(Attr.getRange(), S.Context, prioritynum,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+FormatAttr *Sema::mergeFormatAttr(Decl *D, SourceRange Range,
+ IdentifierInfo *Format, int FormatIdx,
+ int FirstArg,
+ unsigned AttrSpellingListIndex) {
+ // Check whether we already have an equivalent format attribute.
+ for (auto *F : D->specific_attrs<FormatAttr>()) {
+ if (F->getType() == Format &&
+ F->getFormatIdx() == FormatIdx &&
+ F->getFirstArg() == FirstArg) {
+ // If we don't have a valid location for this attribute, adopt the
+ // location.
+ if (F->getLocation().isInvalid())
+ F->setRange(Range);
+ return nullptr;
+ }
+ }
+
+ return ::new (Context) FormatAttr(Range, Context, Format, FormatIdx,
+ FirstArg, AttrSpellingListIndex);
+}
+
+/// Handle __attribute__((format(type,idx,firstarg))) attributes based on
+/// http://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html
+static void handleFormatAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (!Attr.isArgIdent(0)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
+ << Attr.getName() << 1 << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ // In C++ the implicit 'this' function parameter also counts, and they are
+ // counted from one.
+ bool HasImplicitThisParam = isInstanceMethod(D);
+ unsigned NumArgs = getFunctionOrMethodNumParams(D) + HasImplicitThisParam;
+
+ IdentifierInfo *II = Attr.getArgAsIdent(0)->Ident;
+ StringRef Format = II->getName();
+
+ if (normalizeName(Format)) {
+ // If we've modified the string name, we need a new identifier for it.
+ II = &S.Context.Idents.get(Format);
+ }
+
+ // Check for supported formats.
+ FormatAttrKind Kind = getFormatAttrKind(Format);
+
+ if (Kind == IgnoredFormat)
+ return;
+
+ if (Kind == InvalidFormat) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_type_not_supported)
+ << Attr.getName() << II->getName();
+ return;
+ }
+
+ // checks for the 2nd argument
+ Expr *IdxExpr = Attr.getArgAsExpr(1);
+ uint32_t Idx;
+ if (!checkUInt32Argument(S, Attr, IdxExpr, Idx, 2))
+ return;
+
+ if (Idx < 1 || Idx > NumArgs) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_bounds)
+ << Attr.getName() << 2 << IdxExpr->getSourceRange();
+ return;
+ }
+
+ // FIXME: Do we need to bounds check?
+ unsigned ArgIdx = Idx - 1;
+
+ if (HasImplicitThisParam) {
+ if (ArgIdx == 0) {
+ S.Diag(Attr.getLoc(),
+ diag::err_format_attribute_implicit_this_format_string)
+ << IdxExpr->getSourceRange();
+ return;
+ }
+ ArgIdx--;
+ }
+
+ // make sure the format string is really a string
+ QualType Ty = getFunctionOrMethodParamType(D, ArgIdx);
+
+ if (Kind == CFStringFormat) {
+ if (!isCFStringType(Ty, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_format_attribute_not)
+ << "a CFString" << IdxExpr->getSourceRange()
+ << getFunctionOrMethodParamRange(D, ArgIdx);
+ return;
+ }
+ } else if (Kind == NSStringFormat) {
+ // FIXME: do we need to check if the type is NSString*? What are the
+ // semantics?
+ if (!isNSStringType(Ty, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_format_attribute_not)
+ << "an NSString" << IdxExpr->getSourceRange()
+ << getFunctionOrMethodParamRange(D, ArgIdx);
+ return;
+ }
+ } else if (!Ty->isPointerType() ||
+ !Ty->getAs<PointerType>()->getPointeeType()->isCharType()) {
+ S.Diag(Attr.getLoc(), diag::err_format_attribute_not)
+ << "a string type" << IdxExpr->getSourceRange()
+ << getFunctionOrMethodParamRange(D, ArgIdx);
+ return;
+ }
+
+ // check the 3rd argument
+ Expr *FirstArgExpr = Attr.getArgAsExpr(2);
+ uint32_t FirstArg;
+ if (!checkUInt32Argument(S, Attr, FirstArgExpr, FirstArg, 3))
+ return;
+
+ // check if the function is variadic if the 3rd argument non-zero
+ if (FirstArg != 0) {
+ if (isFunctionOrMethodVariadic(D)) {
+ ++NumArgs; // +1 for ...
+ } else {
+ S.Diag(D->getLocation(), diag::err_format_attribute_requires_variadic);
+ return;
+ }
+ }
+
+ // strftime requires FirstArg to be 0 because it doesn't read from any
+ // variable the input is just the current time + the format string.
+ if (Kind == StrftimeFormat) {
+ if (FirstArg != 0) {
+ S.Diag(Attr.getLoc(), diag::err_format_strftime_third_parameter)
+ << FirstArgExpr->getSourceRange();
+ return;
+ }
+ // if 0 it disables parameter checking (to use with e.g. va_list)
+ } else if (FirstArg != 0 && FirstArg != NumArgs) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_bounds)
+ << Attr.getName() << 3 << FirstArgExpr->getSourceRange();
+ return;
+ }
+
+ FormatAttr *NewAttr = S.mergeFormatAttr(D, Attr.getRange(), II,
+ Idx, FirstArg,
+ Attr.getAttributeSpellingListIndex());
+ if (NewAttr)
+ D->addAttr(NewAttr);
+}
+
+static void handleTransparentUnionAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ // Try to find the underlying union declaration.
+ RecordDecl *RD = nullptr;
+ TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D);
+ if (TD && TD->getUnderlyingType()->isUnionType())
+ RD = TD->getUnderlyingType()->getAsUnionType()->getDecl();
+ else
+ RD = dyn_cast<RecordDecl>(D);
+
+ if (!RD || !RD->isUnion()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedUnion;
+ return;
+ }
+
+ if (!RD->isCompleteDefinition()) {
+ S.Diag(Attr.getLoc(),
+ diag::warn_transparent_union_attribute_not_definition);
+ return;
+ }
+
+ RecordDecl::field_iterator Field = RD->field_begin(),
+ FieldEnd = RD->field_end();
+ if (Field == FieldEnd) {
+ S.Diag(Attr.getLoc(), diag::warn_transparent_union_attribute_zero_fields);
+ return;
+ }
+
+ FieldDecl *FirstField = *Field;
+ QualType FirstType = FirstField->getType();
+ if (FirstType->hasFloatingRepresentation() || FirstType->isVectorType()) {
+ S.Diag(FirstField->getLocation(),
+ diag::warn_transparent_union_attribute_floating)
+ << FirstType->isVectorType() << FirstType;
+ return;
+ }
+
+ uint64_t FirstSize = S.Context.getTypeSize(FirstType);
+ uint64_t FirstAlign = S.Context.getTypeAlign(FirstType);
+ for (; Field != FieldEnd; ++Field) {
+ QualType FieldType = Field->getType();
+ // FIXME: this isn't fully correct; we also need to test whether the
+ // members of the union would all have the same calling convention as the
+ // first member of the union. Checking just the size and alignment isn't
+ // sufficient (consider structs passed on the stack instead of in registers
+ // as an example).
+ if (S.Context.getTypeSize(FieldType) != FirstSize ||
+ S.Context.getTypeAlign(FieldType) > FirstAlign) {
+ // Warn if we drop the attribute.
+ bool isSize = S.Context.getTypeSize(FieldType) != FirstSize;
+ unsigned FieldBits = isSize? S.Context.getTypeSize(FieldType)
+ : S.Context.getTypeAlign(FieldType);
+ S.Diag(Field->getLocation(),
+ diag::warn_transparent_union_attribute_field_size_align)
+ << isSize << Field->getDeclName() << FieldBits;
+ unsigned FirstBits = isSize? FirstSize : FirstAlign;
+ S.Diag(FirstField->getLocation(),
+ diag::note_transparent_union_first_field_size_align)
+ << isSize << FirstBits;
+ return;
+ }
+ }
+
+ RD->addAttr(::new (S.Context)
+ TransparentUnionAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleAnnotateAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // Make sure that there is a string literal as the annotation's single
+ // argument.
+ StringRef Str;
+ if (!S.checkStringLiteralArgumentAttr(Attr, 0, Str))
+ return;
+
+ // Don't duplicate annotations that are already set.
+ for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
+ if (I->getAnnotation() == Str)
+ return;
+ }
+
+ D->addAttr(::new (S.Context)
+ AnnotateAttr(Attr.getRange(), S.Context, Str,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleAlignValueAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ S.AddAlignValueAttr(Attr.getRange(), D, Attr.getArgAsExpr(0),
+ Attr.getAttributeSpellingListIndex());
+}
+
+void Sema::AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E,
+ unsigned SpellingListIndex) {
+ AlignValueAttr TmpAttr(AttrRange, Context, E, SpellingListIndex);
+ SourceLocation AttrLoc = AttrRange.getBegin();
+
+ QualType T;
+ if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D))
+ T = TD->getUnderlyingType();
+ else if (ValueDecl *VD = dyn_cast<ValueDecl>(D))
+ T = VD->getType();
+ else
+ llvm_unreachable("Unknown decl type for align_value");
+
+ if (!T->isDependentType() && !T->isAnyPointerType() &&
+ !T->isReferenceType() && !T->isMemberPointerType()) {
+ Diag(AttrLoc, diag::warn_attribute_pointer_or_reference_only)
+ << &TmpAttr /*TmpAttr.getName()*/ << T << D->getSourceRange();
+ return;
+ }
+
+ if (!E->isValueDependent()) {
+ llvm::APSInt Alignment;
+ ExprResult ICE
+ = VerifyIntegerConstantExpression(E, &Alignment,
+ diag::err_align_value_attribute_argument_not_int,
+ /*AllowFold*/ false);
+ if (ICE.isInvalid())
+ return;
+
+ if (!Alignment.isPowerOf2()) {
+ Diag(AttrLoc, diag::err_alignment_not_power_of_two)
+ << E->getSourceRange();
+ return;
+ }
+
+ D->addAttr(::new (Context)
+ AlignValueAttr(AttrRange, Context, ICE.get(),
+ SpellingListIndex));
+ return;
+ }
+
+ // Save dependent expressions in the AST to be instantiated.
+ D->addAttr(::new (Context) AlignValueAttr(TmpAttr));
+ return;
+}
+
+static void handleAlignedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() > 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << Attr.getName() << 1;
+ return;
+ }
+
+ if (Attr.getNumArgs() == 0) {
+ D->addAttr(::new (S.Context) AlignedAttr(Attr.getRange(), S.Context,
+ true, nullptr, Attr.getAttributeSpellingListIndex()));
+ return;
+ }
+
+ Expr *E = Attr.getArgAsExpr(0);
+ if (Attr.isPackExpansion() && !E->containsUnexpandedParameterPack()) {
+ S.Diag(Attr.getEllipsisLoc(),
+ diag::err_pack_expansion_without_parameter_packs);
+ return;
+ }
+
+ if (!Attr.isPackExpansion() && S.DiagnoseUnexpandedParameterPack(E))
+ return;
+
+ if (E->isValueDependent()) {
+ if (const auto *TND = dyn_cast<TypedefNameDecl>(D)) {
+ if (!TND->getUnderlyingType()->isDependentType()) {
+ S.Diag(Attr.getLoc(), diag::err_alignment_dependent_typedef_name)
+ << E->getSourceRange();
+ return;
+ }
+ }
+ }
+
+ S.AddAlignedAttr(Attr.getRange(), D, E, Attr.getAttributeSpellingListIndex(),
+ Attr.isPackExpansion());
+}
+
+void Sema::AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
+ unsigned SpellingListIndex, bool IsPackExpansion) {
+ AlignedAttr TmpAttr(AttrRange, Context, true, E, SpellingListIndex);
+ SourceLocation AttrLoc = AttrRange.getBegin();
+
+ // C++11 alignas(...) and C11 _Alignas(...) have additional requirements.
+ if (TmpAttr.isAlignas()) {
+ // C++11 [dcl.align]p1:
+ // An alignment-specifier may be applied to a variable or to a class
+ // data member, but it shall not be applied to a bit-field, a function
+ // parameter, the formal parameter of a catch clause, or a variable
+ // declared with the register storage class specifier. An
+ // alignment-specifier may also be applied to the declaration of a class
+ // or enumeration type.
+ // C11 6.7.5/2:
+ // An alignment attribute shall not be specified in a declaration of
+ // a typedef, or a bit-field, or a function, or a parameter, or an
+ // object declared with the register storage-class specifier.
+ int DiagKind = -1;
+ if (isa<ParmVarDecl>(D)) {
+ DiagKind = 0;
+ } else if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (VD->getStorageClass() == SC_Register)
+ DiagKind = 1;
+ if (VD->isExceptionVariable())
+ DiagKind = 2;
+ } else if (FieldDecl *FD = dyn_cast<FieldDecl>(D)) {
+ if (FD->isBitField())
+ DiagKind = 3;
+ } else if (!isa<TagDecl>(D)) {
+ Diag(AttrLoc, diag::err_attribute_wrong_decl_type) << &TmpAttr
+ << (TmpAttr.isC11() ? ExpectedVariableOrField
+ : ExpectedVariableFieldOrTag);
+ return;
+ }
+ if (DiagKind != -1) {
+ Diag(AttrLoc, diag::err_alignas_attribute_wrong_decl_type)
+ << &TmpAttr << DiagKind;
+ return;
+ }
+ }
+
+ if (E->isTypeDependent() || E->isValueDependent()) {
+ // Save dependent expressions in the AST to be instantiated.
+ AlignedAttr *AA = ::new (Context) AlignedAttr(TmpAttr);
+ AA->setPackExpansion(IsPackExpansion);
+ D->addAttr(AA);
+ return;
+ }
+
+ // FIXME: Cache the number on the Attr object?
+ llvm::APSInt Alignment;
+ ExprResult ICE
+ = VerifyIntegerConstantExpression(E, &Alignment,
+ diag::err_aligned_attribute_argument_not_int,
+ /*AllowFold*/ false);
+ if (ICE.isInvalid())
+ return;
+
+ uint64_t AlignVal = Alignment.getZExtValue();
+
+ // C++11 [dcl.align]p2:
+ // -- if the constant expression evaluates to zero, the alignment
+ // specifier shall have no effect
+ // C11 6.7.5p6:
+ // An alignment specification of zero has no effect.
+ if (!(TmpAttr.isAlignas() && !Alignment)) {
+ if (!llvm::isPowerOf2_64(AlignVal)) {
+ Diag(AttrLoc, diag::err_alignment_not_power_of_two)
+ << E->getSourceRange();
+ return;
+ }
+ }
+
+ // Alignment calculations can wrap around if it's greater than 2**28.
+ unsigned MaxValidAlignment =
+ Context.getTargetInfo().getTriple().isOSBinFormatCOFF() ? 8192
+ : 268435456;
+ if (AlignVal > MaxValidAlignment) {
+ Diag(AttrLoc, diag::err_attribute_aligned_too_great) << MaxValidAlignment
+ << E->getSourceRange();
+ return;
+ }
+
+ if (Context.getTargetInfo().isTLSSupported()) {
+ unsigned MaxTLSAlign =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getMaxTLSAlign())
+ .getQuantity();
+ auto *VD = dyn_cast<VarDecl>(D);
+ if (MaxTLSAlign && AlignVal > MaxTLSAlign && VD &&
+ VD->getTLSKind() != VarDecl::TLS_None) {
+ Diag(VD->getLocation(), diag::err_tls_var_aligned_over_maximum)
+ << (unsigned)AlignVal << VD << MaxTLSAlign;
+ return;
+ }
+ }
+
+ AlignedAttr *AA = ::new (Context) AlignedAttr(AttrRange, Context, true,
+ ICE.get(), SpellingListIndex);
+ AA->setPackExpansion(IsPackExpansion);
+ D->addAttr(AA);
+}
+
+void Sema::AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *TS,
+ unsigned SpellingListIndex, bool IsPackExpansion) {
+ // FIXME: Cache the number on the Attr object if non-dependent?
+ // FIXME: Perform checking of type validity
+ AlignedAttr *AA = ::new (Context) AlignedAttr(AttrRange, Context, false, TS,
+ SpellingListIndex);
+ AA->setPackExpansion(IsPackExpansion);
+ D->addAttr(AA);
+}
+
+void Sema::CheckAlignasUnderalignment(Decl *D) {
+ assert(D->hasAttrs() && "no attributes on decl");
+
+ QualType UnderlyingTy, DiagTy;
+ if (ValueDecl *VD = dyn_cast<ValueDecl>(D)) {
+ UnderlyingTy = DiagTy = VD->getType();
+ } else {
+ UnderlyingTy = DiagTy = Context.getTagDeclType(cast<TagDecl>(D));
+ if (EnumDecl *ED = dyn_cast<EnumDecl>(D))
+ UnderlyingTy = ED->getIntegerType();
+ }
+ if (DiagTy->isDependentType() || DiagTy->isIncompleteType())
+ return;
+
+ // C++11 [dcl.align]p5, C11 6.7.5/4:
+ // The combined effect of all alignment attributes in a declaration shall
+ // not specify an alignment that is less strict than the alignment that
+ // would otherwise be required for the entity being declared.
+ AlignedAttr *AlignasAttr = nullptr;
+ unsigned Align = 0;
+ for (auto *I : D->specific_attrs<AlignedAttr>()) {
+ if (I->isAlignmentDependent())
+ return;
+ if (I->isAlignas())
+ AlignasAttr = I;
+ Align = std::max(Align, I->getAlignment(Context));
+ }
+
+ if (AlignasAttr && Align) {
+ CharUnits RequestedAlign = Context.toCharUnitsFromBits(Align);
+ CharUnits NaturalAlign = Context.getTypeAlignInChars(UnderlyingTy);
+ if (NaturalAlign > RequestedAlign)
+ Diag(AlignasAttr->getLocation(), diag::err_alignas_underaligned)
+ << DiagTy << (unsigned)NaturalAlign.getQuantity();
+ }
+}
+
+bool Sema::checkMSInheritanceAttrOnDefinition(
+ CXXRecordDecl *RD, SourceRange Range, bool BestCase,
+ MSInheritanceAttr::Spelling SemanticSpelling) {
+ assert(RD->hasDefinition() && "RD has no definition!");
+
+ // We may not have seen base specifiers or any virtual methods yet. We will
+ // have to wait until the record is defined to catch any mismatches.
+ if (!RD->getDefinition()->isCompleteDefinition())
+ return false;
+
+ // The unspecified model never matches what a definition could need.
+ if (SemanticSpelling == MSInheritanceAttr::Keyword_unspecified_inheritance)
+ return false;
+
+ if (BestCase) {
+ if (RD->calculateInheritanceModel() == SemanticSpelling)
+ return false;
+ } else {
+ if (RD->calculateInheritanceModel() <= SemanticSpelling)
+ return false;
+ }
+
+ Diag(Range.getBegin(), diag::err_mismatched_ms_inheritance)
+ << 0 /*definition*/;
+ Diag(RD->getDefinition()->getLocation(), diag::note_defined_here)
+ << RD->getNameAsString();
+ return true;
+}
+
+/// parseModeAttrArg - Parses attribute mode string and returns parsed type
+/// attribute.
+static void parseModeAttrArg(Sema &S, StringRef Str, unsigned &DestWidth,
+ bool &IntegerMode, bool &ComplexMode) {
+ switch (Str.size()) {
+ case 2:
+ switch (Str[0]) {
+ case 'Q':
+ DestWidth = 8;
+ break;
+ case 'H':
+ DestWidth = 16;
+ break;
+ case 'S':
+ DestWidth = 32;
+ break;
+ case 'D':
+ DestWidth = 64;
+ break;
+ case 'X':
+ DestWidth = 96;
+ break;
+ case 'T':
+ DestWidth = 128;
+ break;
+ }
+ if (Str[1] == 'F') {
+ IntegerMode = false;
+ } else if (Str[1] == 'C') {
+ IntegerMode = false;
+ ComplexMode = true;
+ } else if (Str[1] != 'I') {
+ DestWidth = 0;
+ }
+ break;
+ case 4:
+ // FIXME: glibc uses 'word' to define register_t; this is narrower than a
+ // pointer on PIC16 and other embedded platforms.
+ if (Str == "word")
+ DestWidth = S.Context.getTargetInfo().getPointerWidth(0);
+ else if (Str == "byte")
+ DestWidth = S.Context.getTargetInfo().getCharWidth();
+ break;
+ case 7:
+ if (Str == "pointer")
+ DestWidth = S.Context.getTargetInfo().getPointerWidth(0);
+ break;
+ case 11:
+ if (Str == "unwind_word")
+ DestWidth = S.Context.getTargetInfo().getUnwindWordWidth();
+ break;
+ }
+}
+
+/// handleModeAttr - This attribute modifies the width of a decl with primitive
+/// type.
+///
+/// Despite what would be logical, the mode attribute is a decl attribute, not a
+/// type attribute: 'int ** __attribute((mode(HI))) *G;' tries to make 'G' be
+/// HImode, not an intermediate pointer.
+static void handleModeAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // This attribute isn't documented, but glibc uses it. It changes
+ // the width of an int or unsigned int to the specified size.
+ if (!Attr.isArgIdent(0)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_type) << Attr.getName()
+ << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ IdentifierInfo *Name = Attr.getArgAsIdent(0)->Ident;
+ StringRef Str = Name->getName();
+
+ normalizeName(Str);
+
+ unsigned DestWidth = 0;
+ bool IntegerMode = true;
+ bool ComplexMode = false;
+ llvm::APInt VectorSize(64, 0);
+ if (Str.size() >= 4 && Str[0] == 'V') {
+ // Minimal length of vector mode is 4: 'V' + NUMBER(>=1) + TYPE(>=2).
+ size_t StrSize = Str.size();
+ size_t VectorStringLength = 0;
+ while ((VectorStringLength + 1) < StrSize &&
+ isdigit(Str[VectorStringLength + 1]))
+ ++VectorStringLength;
+ if (VectorStringLength &&
+ !Str.substr(1, VectorStringLength).getAsInteger(10, VectorSize) &&
+ VectorSize.isPowerOf2()) {
+ parseModeAttrArg(S, Str.substr(VectorStringLength + 1), DestWidth,
+ IntegerMode, ComplexMode);
+ S.Diag(Attr.getLoc(), diag::warn_vector_mode_deprecated);
+ } else {
+ VectorSize = 0;
+ }
+ }
+
+ if (!VectorSize)
+ parseModeAttrArg(S, Str, DestWidth, IntegerMode, ComplexMode);
+
+ QualType OldTy;
+ if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D))
+ OldTy = TD->getUnderlyingType();
+ else if (ValueDecl *VD = dyn_cast<ValueDecl>(D))
+ OldTy = VD->getType();
+ else {
+ S.Diag(D->getLocation(), diag::err_attr_wrong_decl)
+ << Attr.getName() << Attr.getRange();
+ return;
+ }
+
+ // Base type can also be a vector type (see PR17453).
+ // Distinguish between base type and base element type.
+ QualType OldElemTy = OldTy;
+ if (const VectorType *VT = OldTy->getAs<VectorType>())
+ OldElemTy = VT->getElementType();
+
+ if (!OldElemTy->getAs<BuiltinType>() && !OldElemTy->isComplexType())
+ S.Diag(Attr.getLoc(), diag::err_mode_not_primitive);
+ else if (IntegerMode) {
+ if (!OldElemTy->isIntegralOrEnumerationType())
+ S.Diag(Attr.getLoc(), diag::err_mode_wrong_type);
+ } else if (ComplexMode) {
+ if (!OldElemTy->isComplexType())
+ S.Diag(Attr.getLoc(), diag::err_mode_wrong_type);
+ } else {
+ if (!OldElemTy->isFloatingType())
+ S.Diag(Attr.getLoc(), diag::err_mode_wrong_type);
+ }
+
+ // FIXME: Sync this with InitializePredefinedMacros; we need to match int8_t
+ // and friends, at least with glibc.
+ // FIXME: Make sure floating-point mappings are accurate
+ // FIXME: Support XF and TF types
+ if (!DestWidth) {
+ S.Diag(Attr.getLoc(), diag::err_machine_mode) << 0 /*Unknown*/ << Name;
+ return;
+ }
+
+ QualType NewElemTy;
+
+ if (IntegerMode)
+ NewElemTy = S.Context.getIntTypeForBitwidth(
+ DestWidth, OldElemTy->isSignedIntegerType());
+ else
+ NewElemTy = S.Context.getRealTypeForBitwidth(DestWidth);
+
+ if (NewElemTy.isNull()) {
+ S.Diag(Attr.getLoc(), diag::err_machine_mode) << 1 /*Unsupported*/ << Name;
+ return;
+ }
+
+ if (ComplexMode) {
+ NewElemTy = S.Context.getComplexType(NewElemTy);
+ }
+
+ QualType NewTy = NewElemTy;
+ if (VectorSize.getBoolValue()) {
+ NewTy = S.Context.getVectorType(NewTy, VectorSize.getZExtValue(),
+ VectorType::GenericVector);
+ } else if (const VectorType *OldVT = OldTy->getAs<VectorType>()) {
+ // Complex machine mode does not support base vector types.
+ if (ComplexMode) {
+ S.Diag(Attr.getLoc(), diag::err_complex_mode_vector_type);
+ return;
+ }
+ unsigned NumElements = S.Context.getTypeSize(OldElemTy) *
+ OldVT->getNumElements() /
+ S.Context.getTypeSize(NewElemTy);
+ NewTy =
+ S.Context.getVectorType(NewElemTy, NumElements, OldVT->getVectorKind());
+ }
+
+ if (NewTy.isNull()) {
+ S.Diag(Attr.getLoc(), diag::err_mode_wrong_type);
+ return;
+ }
+
+ // Install the new type.
+ if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D))
+ TD->setModedTypeSourceInfo(TD->getTypeSourceInfo(), NewTy);
+ else
+ cast<ValueDecl>(D)->setType(NewTy);
+
+ D->addAttr(::new (S.Context)
+ ModeAttr(Attr.getRange(), S.Context, Name,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleNoDebugAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (!VD->hasGlobalStorage())
+ S.Diag(Attr.getLoc(),
+ diag::warn_attribute_requires_functions_or_static_globals)
+ << Attr.getName();
+ } else if (!isFunctionOrMethod(D)) {
+ S.Diag(Attr.getLoc(),
+ diag::warn_attribute_requires_functions_or_static_globals)
+ << Attr.getName();
+ return;
+ }
+
+ D->addAttr(::new (S.Context)
+ NoDebugAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+AlwaysInlineAttr *Sema::mergeAlwaysInlineAttr(Decl *D, SourceRange Range,
+ IdentifierInfo *Ident,
+ unsigned AttrSpellingListIndex) {
+ if (OptimizeNoneAttr *Optnone = D->getAttr<OptimizeNoneAttr>()) {
+ Diag(Range.getBegin(), diag::warn_attribute_ignored) << Ident;
+ Diag(Optnone->getLocation(), diag::note_conflicting_attribute);
+ return nullptr;
+ }
+
+ if (D->hasAttr<AlwaysInlineAttr>())
+ return nullptr;
+
+ return ::new (Context) AlwaysInlineAttr(Range, Context,
+ AttrSpellingListIndex);
+}
+
+CommonAttr *Sema::mergeCommonAttr(Decl *D, SourceRange Range,
+ IdentifierInfo *Ident,
+ unsigned AttrSpellingListIndex) {
+ if (checkAttrMutualExclusion<InternalLinkageAttr>(*this, D, Range, Ident))
+ return nullptr;
+
+ return ::new (Context) CommonAttr(Range, Context, AttrSpellingListIndex);
+}
+
+InternalLinkageAttr *
+Sema::mergeInternalLinkageAttr(Decl *D, SourceRange Range,
+ IdentifierInfo *Ident,
+ unsigned AttrSpellingListIndex) {
+ if (auto VD = dyn_cast<VarDecl>(D)) {
+ // Attribute applies to Var but not any subclass of it (like ParmVar,
+ // ImplicitParm or VarTemplateSpecialization).
+ if (VD->getKind() != Decl::Var) {
+ Diag(Range.getBegin(), diag::warn_attribute_wrong_decl_type)
+ << Ident << (getLangOpts().CPlusPlus ? ExpectedFunctionVariableOrClass
+ : ExpectedVariableOrFunction);
+ return nullptr;
+ }
+ // Attribute does not apply to non-static local variables.
+ if (VD->hasLocalStorage()) {
+ Diag(VD->getLocation(), diag::warn_internal_linkage_local_storage);
+ return nullptr;
+ }
+ }
+
+ if (checkAttrMutualExclusion<CommonAttr>(*this, D, Range, Ident))
+ return nullptr;
+
+ return ::new (Context)
+ InternalLinkageAttr(Range, Context, AttrSpellingListIndex);
+}
+
+MinSizeAttr *Sema::mergeMinSizeAttr(Decl *D, SourceRange Range,
+ unsigned AttrSpellingListIndex) {
+ if (OptimizeNoneAttr *Optnone = D->getAttr<OptimizeNoneAttr>()) {
+ Diag(Range.getBegin(), diag::warn_attribute_ignored) << "'minsize'";
+ Diag(Optnone->getLocation(), diag::note_conflicting_attribute);
+ return nullptr;
+ }
+
+ if (D->hasAttr<MinSizeAttr>())
+ return nullptr;
+
+ return ::new (Context) MinSizeAttr(Range, Context, AttrSpellingListIndex);
+}
+
+OptimizeNoneAttr *Sema::mergeOptimizeNoneAttr(Decl *D, SourceRange Range,
+ unsigned AttrSpellingListIndex) {
+ if (AlwaysInlineAttr *Inline = D->getAttr<AlwaysInlineAttr>()) {
+ Diag(Inline->getLocation(), diag::warn_attribute_ignored) << Inline;
+ Diag(Range.getBegin(), diag::note_conflicting_attribute);
+ D->dropAttr<AlwaysInlineAttr>();
+ }
+ if (MinSizeAttr *MinSize = D->getAttr<MinSizeAttr>()) {
+ Diag(MinSize->getLocation(), diag::warn_attribute_ignored) << MinSize;
+ Diag(Range.getBegin(), diag::note_conflicting_attribute);
+ D->dropAttr<MinSizeAttr>();
+ }
+
+ if (D->hasAttr<OptimizeNoneAttr>())
+ return nullptr;
+
+ return ::new (Context) OptimizeNoneAttr(Range, Context,
+ AttrSpellingListIndex);
+}
+
+static void handleAlwaysInlineAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (checkAttrMutualExclusion<NotTailCalledAttr>(S, D, Attr.getRange(),
+ Attr.getName()))
+ return;
+
+ if (AlwaysInlineAttr *Inline = S.mergeAlwaysInlineAttr(
+ D, Attr.getRange(), Attr.getName(),
+ Attr.getAttributeSpellingListIndex()))
+ D->addAttr(Inline);
+}
+
+static void handleMinSizeAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (MinSizeAttr *MinSize = S.mergeMinSizeAttr(
+ D, Attr.getRange(), Attr.getAttributeSpellingListIndex()))
+ D->addAttr(MinSize);
+}
+
+static void handleOptimizeNoneAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (OptimizeNoneAttr *Optnone = S.mergeOptimizeNoneAttr(
+ D, Attr.getRange(), Attr.getAttributeSpellingListIndex()))
+ D->addAttr(Optnone);
+}
+
+static void handleGlobalAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ FunctionDecl *FD = cast<FunctionDecl>(D);
+ if (!FD->getReturnType()->isVoidType()) {
+ SourceRange RTRange = FD->getReturnTypeSourceRange();
+ S.Diag(FD->getTypeSpecStartLoc(), diag::err_kern_type_not_void_return)
+ << FD->getType()
+ << (RTRange.isValid() ? FixItHint::CreateReplacement(RTRange, "void")
+ : FixItHint());
+ return;
+ }
+
+ D->addAttr(::new (S.Context)
+ CUDAGlobalAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+
+}
+
+static void handleGNUInlineAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ FunctionDecl *Fn = cast<FunctionDecl>(D);
+ if (!Fn->isInlineSpecified()) {
+ S.Diag(Attr.getLoc(), diag::warn_gnu_inline_attribute_requires_inline);
+ return;
+ }
+
+ D->addAttr(::new (S.Context)
+ GNUInlineAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleCallConvAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (hasDeclarator(D)) return;
+
+ // Diagnostic is emitted elsewhere: here we store the (valid) Attr
+ // in the Decl node for syntactic reasoning, e.g., pretty-printing.
+ CallingConv CC;
+ if (S.CheckCallingConvAttr(Attr, CC, /*FD*/nullptr))
+ return;
+
+ if (!isa<ObjCMethodDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunctionOrMethod;
+ return;
+ }
+
+ switch (Attr.getKind()) {
+ case AttributeList::AT_FastCall:
+ D->addAttr(::new (S.Context)
+ FastCallAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+ return;
+ case AttributeList::AT_StdCall:
+ D->addAttr(::new (S.Context)
+ StdCallAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+ return;
+ case AttributeList::AT_ThisCall:
+ D->addAttr(::new (S.Context)
+ ThisCallAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+ return;
+ case AttributeList::AT_CDecl:
+ D->addAttr(::new (S.Context)
+ CDeclAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+ return;
+ case AttributeList::AT_Pascal:
+ D->addAttr(::new (S.Context)
+ PascalAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+ return;
+ case AttributeList::AT_VectorCall:
+ D->addAttr(::new (S.Context)
+ VectorCallAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+ return;
+ case AttributeList::AT_MSABI:
+ D->addAttr(::new (S.Context)
+ MSABIAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+ return;
+ case AttributeList::AT_SysVABI:
+ D->addAttr(::new (S.Context)
+ SysVABIAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+ return;
+ case AttributeList::AT_Pcs: {
+ PcsAttr::PCSType PCS;
+ switch (CC) {
+ case CC_AAPCS:
+ PCS = PcsAttr::AAPCS;
+ break;
+ case CC_AAPCS_VFP:
+ PCS = PcsAttr::AAPCS_VFP;
+ break;
+ default:
+ llvm_unreachable("unexpected calling convention in pcs attribute");
+ }
+
+ D->addAttr(::new (S.Context)
+ PcsAttr(Attr.getRange(), S.Context, PCS,
+ Attr.getAttributeSpellingListIndex()));
+ return;
+ }
+ case AttributeList::AT_IntelOclBicc:
+ D->addAttr(::new (S.Context)
+ IntelOclBiccAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+ return;
+
+ default:
+ llvm_unreachable("unexpected attribute kind");
+ }
+}
+
+bool Sema::CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC,
+ const FunctionDecl *FD) {
+ if (attr.isInvalid())
+ return true;
+
+ unsigned ReqArgs = attr.getKind() == AttributeList::AT_Pcs ? 1 : 0;
+ if (!checkAttributeNumArgs(*this, attr, ReqArgs)) {
+ attr.setInvalid();
+ return true;
+ }
+
+ // TODO: diagnose uses of these conventions on the wrong target.
+ switch (attr.getKind()) {
+ case AttributeList::AT_CDecl: CC = CC_C; break;
+ case AttributeList::AT_FastCall: CC = CC_X86FastCall; break;
+ case AttributeList::AT_StdCall: CC = CC_X86StdCall; break;
+ case AttributeList::AT_ThisCall: CC = CC_X86ThisCall; break;
+ case AttributeList::AT_Pascal: CC = CC_X86Pascal; break;
+ case AttributeList::AT_VectorCall: CC = CC_X86VectorCall; break;
+ case AttributeList::AT_MSABI:
+ CC = Context.getTargetInfo().getTriple().isOSWindows() ? CC_C :
+ CC_X86_64Win64;
+ break;
+ case AttributeList::AT_SysVABI:
+ CC = Context.getTargetInfo().getTriple().isOSWindows() ? CC_X86_64SysV :
+ CC_C;
+ break;
+ case AttributeList::AT_Pcs: {
+ StringRef StrRef;
+ if (!checkStringLiteralArgumentAttr(attr, 0, StrRef)) {
+ attr.setInvalid();
+ return true;
+ }
+ if (StrRef == "aapcs") {
+ CC = CC_AAPCS;
+ break;
+ } else if (StrRef == "aapcs-vfp") {
+ CC = CC_AAPCS_VFP;
+ break;
+ }
+
+ attr.setInvalid();
+ Diag(attr.getLoc(), diag::err_invalid_pcs);
+ return true;
+ }
+ case AttributeList::AT_IntelOclBicc: CC = CC_IntelOclBicc; break;
+ default: llvm_unreachable("unexpected attribute kind");
+ }
+
+ const TargetInfo &TI = Context.getTargetInfo();
+ TargetInfo::CallingConvCheckResult A = TI.checkCallingConvention(CC);
+ if (A != TargetInfo::CCCR_OK) {
+ if (A == TargetInfo::CCCR_Warning)
+ Diag(attr.getLoc(), diag::warn_cconv_ignored) << attr.getName();
+
+ // This convention is not valid for the target. Use the default function or
+ // method calling convention.
+ TargetInfo::CallingConvMethodType MT = TargetInfo::CCMT_Unknown;
+ if (FD)
+ MT = FD->isCXXInstanceMember() ? TargetInfo::CCMT_Member :
+ TargetInfo::CCMT_NonMember;
+ CC = TI.getDefaultCallingConv(MT);
+ }
+
+ return false;
+}
+
+/// Checks a regparm attribute, returning true if it is ill-formed and
+/// otherwise setting numParams to the appropriate value.
+bool Sema::CheckRegparmAttr(const AttributeList &Attr, unsigned &numParams) {
+ if (Attr.isInvalid())
+ return true;
+
+ if (!checkAttributeNumArgs(*this, Attr, 1)) {
+ Attr.setInvalid();
+ return true;
+ }
+
+ uint32_t NP;
+ Expr *NumParamsExpr = Attr.getArgAsExpr(0);
+ if (!checkUInt32Argument(*this, Attr, NumParamsExpr, NP)) {
+ Attr.setInvalid();
+ return true;
+ }
+
+ if (Context.getTargetInfo().getRegParmMax() == 0) {
+ Diag(Attr.getLoc(), diag::err_attribute_regparm_wrong_platform)
+ << NumParamsExpr->getSourceRange();
+ Attr.setInvalid();
+ return true;
+ }
+
+ numParams = NP;
+ if (numParams > Context.getTargetInfo().getRegParmMax()) {
+ Diag(Attr.getLoc(), diag::err_attribute_regparm_invalid_number)
+ << Context.getTargetInfo().getRegParmMax() << NumParamsExpr->getSourceRange();
+ Attr.setInvalid();
+ return true;
+ }
+
+ return false;
+}
+
+// Checks whether an argument of launch_bounds attribute is acceptable
+// May output an error.
+static bool checkLaunchBoundsArgument(Sema &S, Expr *E,
+ const CUDALaunchBoundsAttr &Attr,
+ const unsigned Idx) {
+
+ if (S.DiagnoseUnexpandedParameterPack(E))
+ return false;
+
+ // Accept template arguments for now as they depend on something else.
+ // We'll get to check them when they eventually get instantiated.
+ if (E->isValueDependent())
+ return true;
+
+ llvm::APSInt I(64);
+ if (!E->isIntegerConstantExpr(I, S.Context)) {
+ S.Diag(E->getExprLoc(), diag::err_attribute_argument_n_type)
+ << &Attr << Idx << AANT_ArgumentIntegerConstant << E->getSourceRange();
+ return false;
+ }
+ // Make sure we can fit it in 32 bits.
+ if (!I.isIntN(32)) {
+ S.Diag(E->getExprLoc(), diag::err_ice_too_large) << I.toString(10, false)
+ << 32 << /* Unsigned */ 1;
+ return false;
+ }
+ if (I < 0)
+ S.Diag(E->getExprLoc(), diag::warn_attribute_argument_n_negative)
+ << &Attr << Idx << E->getSourceRange();
+
+ return true;
+}
+
+void Sema::AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads,
+ Expr *MinBlocks, unsigned SpellingListIndex) {
+ CUDALaunchBoundsAttr TmpAttr(AttrRange, Context, MaxThreads, MinBlocks,
+ SpellingListIndex);
+
+ if (!checkLaunchBoundsArgument(*this, MaxThreads, TmpAttr, 0))
+ return;
+
+ if (MinBlocks && !checkLaunchBoundsArgument(*this, MinBlocks, TmpAttr, 1))
+ return;
+
+ D->addAttr(::new (Context) CUDALaunchBoundsAttr(
+ AttrRange, Context, MaxThreads, MinBlocks, SpellingListIndex));
+}
+
+static void handleLaunchBoundsAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (!checkAttributeAtLeastNumArgs(S, Attr, 1) ||
+ !checkAttributeAtMostNumArgs(S, Attr, 2))
+ return;
+
+ S.AddLaunchBoundsAttr(Attr.getRange(), D, Attr.getArgAsExpr(0),
+ Attr.getNumArgs() > 1 ? Attr.getArgAsExpr(1) : nullptr,
+ Attr.getAttributeSpellingListIndex());
+}
+
+static void handleArgumentWithTypeTagAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (!Attr.isArgIdent(0)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
+ << Attr.getName() << /* arg num = */ 1 << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ if (!checkAttributeNumArgs(S, Attr, 3))
+ return;
+
+ IdentifierInfo *ArgumentKind = Attr.getArgAsIdent(0)->Ident;
+
+ if (!isFunctionOrMethod(D) || !hasFunctionProto(D)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunctionOrMethod;
+ return;
+ }
+
+ uint64_t ArgumentIdx;
+ if (!checkFunctionOrMethodParameterIndex(S, D, Attr, 2, Attr.getArgAsExpr(1),
+ ArgumentIdx))
+ return;
+
+ uint64_t TypeTagIdx;
+ if (!checkFunctionOrMethodParameterIndex(S, D, Attr, 3, Attr.getArgAsExpr(2),
+ TypeTagIdx))
+ return;
+
+ bool IsPointer = (Attr.getName()->getName() == "pointer_with_type_tag");
+ if (IsPointer) {
+ // Ensure that buffer has a pointer type.
+ QualType BufferTy = getFunctionOrMethodParamType(D, ArgumentIdx);
+ if (!BufferTy->isPointerType()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_pointers_only)
+ << Attr.getName() << 0;
+ }
+ }
+
+ D->addAttr(::new (S.Context)
+ ArgumentWithTypeTagAttr(Attr.getRange(), S.Context, ArgumentKind,
+ ArgumentIdx, TypeTagIdx, IsPointer,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleTypeTagForDatatypeAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (!Attr.isArgIdent(0)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
+ << Attr.getName() << 1 << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ if (!checkAttributeNumArgs(S, Attr, 1))
+ return;
+
+ if (!isa<VarDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedVariable;
+ return;
+ }
+
+ IdentifierInfo *PointerKind = Attr.getArgAsIdent(0)->Ident;
+ TypeSourceInfo *MatchingCTypeLoc = nullptr;
+ S.GetTypeFromParser(Attr.getMatchingCType(), &MatchingCTypeLoc);
+ assert(MatchingCTypeLoc && "no type source info for attribute argument");
+
+ D->addAttr(::new (S.Context)
+ TypeTagForDatatypeAttr(Attr.getRange(), S.Context, PointerKind,
+ MatchingCTypeLoc,
+ Attr.getLayoutCompatible(),
+ Attr.getMustBeNull(),
+ Attr.getAttributeSpellingListIndex()));
+}
+
+//===----------------------------------------------------------------------===//
+// Checker-specific attribute handlers.
+//===----------------------------------------------------------------------===//
+
+static bool isValidSubjectOfNSReturnsRetainedAttribute(QualType type) {
+ return type->isDependentType() ||
+ type->isObjCRetainableType();
+}
+
+static bool isValidSubjectOfNSAttribute(Sema &S, QualType type) {
+ return type->isDependentType() ||
+ type->isObjCObjectPointerType() ||
+ S.Context.isObjCNSObjectType(type);
+}
+static bool isValidSubjectOfCFAttribute(Sema &S, QualType type) {
+ return type->isDependentType() ||
+ type->isPointerType() ||
+ isValidSubjectOfNSAttribute(S, type);
+}
+
+static void handleNSConsumedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ ParmVarDecl *param = cast<ParmVarDecl>(D);
+ bool typeOK, cf;
+
+ if (Attr.getKind() == AttributeList::AT_NSConsumed) {
+ typeOK = isValidSubjectOfNSAttribute(S, param->getType());
+ cf = false;
+ } else {
+ typeOK = isValidSubjectOfCFAttribute(S, param->getType());
+ cf = true;
+ }
+
+ if (!typeOK) {
+ S.Diag(D->getLocStart(), diag::warn_ns_attribute_wrong_parameter_type)
+ << Attr.getRange() << Attr.getName() << cf;
+ return;
+ }
+
+ if (cf)
+ param->addAttr(::new (S.Context)
+ CFConsumedAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+ else
+ param->addAttr(::new (S.Context)
+ NSConsumedAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleNSReturnsRetainedAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+
+ QualType returnType;
+
+ if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
+ returnType = MD->getReturnType();
+ else if (S.getLangOpts().ObjCAutoRefCount && hasDeclarator(D) &&
+ (Attr.getKind() == AttributeList::AT_NSReturnsRetained))
+ return; // ignore: was handled as a type attribute
+ else if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(D))
+ returnType = PD->getType();
+ else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ returnType = FD->getReturnType();
+ else if (auto *Param = dyn_cast<ParmVarDecl>(D)) {
+ returnType = Param->getType()->getPointeeType();
+ if (returnType.isNull()) {
+ S.Diag(D->getLocStart(), diag::warn_ns_attribute_wrong_parameter_type)
+ << Attr.getName() << /*pointer-to-CF*/2
+ << Attr.getRange();
+ return;
+ }
+ } else {
+ AttributeDeclKind ExpectedDeclKind;
+ switch (Attr.getKind()) {
+ default: llvm_unreachable("invalid ownership attribute");
+ case AttributeList::AT_NSReturnsRetained:
+ case AttributeList::AT_NSReturnsAutoreleased:
+ case AttributeList::AT_NSReturnsNotRetained:
+ ExpectedDeclKind = ExpectedFunctionOrMethod;
+ break;
+
+ case AttributeList::AT_CFReturnsRetained:
+ case AttributeList::AT_CFReturnsNotRetained:
+ ExpectedDeclKind = ExpectedFunctionMethodOrParameter;
+ break;
+ }
+ S.Diag(D->getLocStart(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getRange() << Attr.getName() << ExpectedDeclKind;
+ return;
+ }
+
+ bool typeOK;
+ bool cf;
+ switch (Attr.getKind()) {
+ default: llvm_unreachable("invalid ownership attribute");
+ case AttributeList::AT_NSReturnsRetained:
+ typeOK = isValidSubjectOfNSReturnsRetainedAttribute(returnType);
+ cf = false;
+ break;
+
+ case AttributeList::AT_NSReturnsAutoreleased:
+ case AttributeList::AT_NSReturnsNotRetained:
+ typeOK = isValidSubjectOfNSAttribute(S, returnType);
+ cf = false;
+ break;
+
+ case AttributeList::AT_CFReturnsRetained:
+ case AttributeList::AT_CFReturnsNotRetained:
+ typeOK = isValidSubjectOfCFAttribute(S, returnType);
+ cf = true;
+ break;
+ }
+
+ if (!typeOK) {
+ if (isa<ParmVarDecl>(D)) {
+ S.Diag(D->getLocStart(), diag::warn_ns_attribute_wrong_parameter_type)
+ << Attr.getName() << /*pointer-to-CF*/2
+ << Attr.getRange();
+ } else {
+ // Needs to be kept in sync with warn_ns_attribute_wrong_return_type.
+ enum : unsigned {
+ Function,
+ Method,
+ Property
+ } SubjectKind = Function;
+ if (isa<ObjCMethodDecl>(D))
+ SubjectKind = Method;
+ else if (isa<ObjCPropertyDecl>(D))
+ SubjectKind = Property;
+ S.Diag(D->getLocStart(), diag::warn_ns_attribute_wrong_return_type)
+ << Attr.getName() << SubjectKind << cf
+ << Attr.getRange();
+ }
+ return;
+ }
+
+ switch (Attr.getKind()) {
+ default:
+ llvm_unreachable("invalid ownership attribute");
+ case AttributeList::AT_NSReturnsAutoreleased:
+ D->addAttr(::new (S.Context) NSReturnsAutoreleasedAttr(
+ Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+ return;
+ case AttributeList::AT_CFReturnsNotRetained:
+ D->addAttr(::new (S.Context) CFReturnsNotRetainedAttr(
+ Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+ return;
+ case AttributeList::AT_NSReturnsNotRetained:
+ D->addAttr(::new (S.Context) NSReturnsNotRetainedAttr(
+ Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+ return;
+ case AttributeList::AT_CFReturnsRetained:
+ D->addAttr(::new (S.Context) CFReturnsRetainedAttr(
+ Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+ return;
+ case AttributeList::AT_NSReturnsRetained:
+ D->addAttr(::new (S.Context) NSReturnsRetainedAttr(
+ Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+ return;
+ };
+}
+
+static void handleObjCReturnsInnerPointerAttr(Sema &S, Decl *D,
+ const AttributeList &attr) {
+ const int EP_ObjCMethod = 1;
+ const int EP_ObjCProperty = 2;
+
+ SourceLocation loc = attr.getLoc();
+ QualType resultType;
+ if (isa<ObjCMethodDecl>(D))
+ resultType = cast<ObjCMethodDecl>(D)->getReturnType();
+ else
+ resultType = cast<ObjCPropertyDecl>(D)->getType();
+
+ if (!resultType->isReferenceType() &&
+ (!resultType->isPointerType() || resultType->isObjCRetainableType())) {
+ S.Diag(D->getLocStart(), diag::warn_ns_attribute_wrong_return_type)
+ << SourceRange(loc)
+ << attr.getName()
+ << (isa<ObjCMethodDecl>(D) ? EP_ObjCMethod : EP_ObjCProperty)
+ << /*non-retainable pointer*/ 2;
+
+ // Drop the attribute.
+ return;
+ }
+
+ D->addAttr(::new (S.Context) ObjCReturnsInnerPointerAttr(
+ attr.getRange(), S.Context, attr.getAttributeSpellingListIndex()));
+}
+
+static void handleObjCRequiresSuperAttr(Sema &S, Decl *D,
+ const AttributeList &attr) {
+ ObjCMethodDecl *method = cast<ObjCMethodDecl>(D);
+
+ DeclContext *DC = method->getDeclContext();
+ if (const ObjCProtocolDecl *PDecl = dyn_cast_or_null<ObjCProtocolDecl>(DC)) {
+ S.Diag(D->getLocStart(), diag::warn_objc_requires_super_protocol)
+ << attr.getName() << 0;
+ S.Diag(PDecl->getLocation(), diag::note_protocol_decl);
+ return;
+ }
+ if (method->getMethodFamily() == OMF_dealloc) {
+ S.Diag(D->getLocStart(), diag::warn_objc_requires_super_protocol)
+ << attr.getName() << 1;
+ return;
+ }
+
+ method->addAttr(::new (S.Context)
+ ObjCRequiresSuperAttr(attr.getRange(), S.Context,
+ attr.getAttributeSpellingListIndex()));
+}
+
+static void handleCFAuditedTransferAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (checkAttrMutualExclusion<CFUnknownTransferAttr>(S, D, Attr.getRange(),
+ Attr.getName()))
+ return;
+
+ D->addAttr(::new (S.Context)
+ CFAuditedTransferAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleCFUnknownTransferAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (checkAttrMutualExclusion<CFAuditedTransferAttr>(S, D, Attr.getRange(),
+ Attr.getName()))
+ return;
+
+ D->addAttr(::new (S.Context)
+ CFUnknownTransferAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleObjCBridgeAttr(Sema &S, Scope *Sc, Decl *D,
+ const AttributeList &Attr) {
+ IdentifierLoc * Parm = Attr.isArgIdent(0) ? Attr.getArgAsIdent(0) : nullptr;
+
+ if (!Parm) {
+ S.Diag(D->getLocStart(), diag::err_objc_attr_not_id) << Attr.getName() << 0;
+ return;
+ }
+
+ // Typedefs only allow objc_bridge(id) and have some additional checking.
+ if (auto TD = dyn_cast<TypedefNameDecl>(D)) {
+ if (!Parm->Ident->isStr("id")) {
+ S.Diag(Attr.getLoc(), diag::err_objc_attr_typedef_not_id)
+ << Attr.getName();
+ return;
+ }
+
+ // Only allow 'cv void *'.
+ QualType T = TD->getUnderlyingType();
+ if (!T->isVoidPointerType()) {
+ S.Diag(Attr.getLoc(), diag::err_objc_attr_typedef_not_void_pointer);
+ return;
+ }
+ }
+
+ D->addAttr(::new (S.Context)
+ ObjCBridgeAttr(Attr.getRange(), S.Context, Parm->Ident,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleObjCBridgeMutableAttr(Sema &S, Scope *Sc, Decl *D,
+ const AttributeList &Attr) {
+ IdentifierLoc * Parm = Attr.isArgIdent(0) ? Attr.getArgAsIdent(0) : nullptr;
+
+ if (!Parm) {
+ S.Diag(D->getLocStart(), diag::err_objc_attr_not_id) << Attr.getName() << 0;
+ return;
+ }
+
+ D->addAttr(::new (S.Context)
+ ObjCBridgeMutableAttr(Attr.getRange(), S.Context, Parm->Ident,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleObjCBridgeRelatedAttr(Sema &S, Scope *Sc, Decl *D,
+ const AttributeList &Attr) {
+ IdentifierInfo *RelatedClass =
+ Attr.isArgIdent(0) ? Attr.getArgAsIdent(0)->Ident : nullptr;
+ if (!RelatedClass) {
+ S.Diag(D->getLocStart(), diag::err_objc_attr_not_id) << Attr.getName() << 0;
+ return;
+ }
+ IdentifierInfo *ClassMethod =
+ Attr.getArgAsIdent(1) ? Attr.getArgAsIdent(1)->Ident : nullptr;
+ IdentifierInfo *InstanceMethod =
+ Attr.getArgAsIdent(2) ? Attr.getArgAsIdent(2)->Ident : nullptr;
+ D->addAttr(::new (S.Context)
+ ObjCBridgeRelatedAttr(Attr.getRange(), S.Context, RelatedClass,
+ ClassMethod, InstanceMethod,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleObjCDesignatedInitializer(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ ObjCInterfaceDecl *IFace;
+ if (ObjCCategoryDecl *CatDecl =
+ dyn_cast<ObjCCategoryDecl>(D->getDeclContext()))
+ IFace = CatDecl->getClassInterface();
+ else
+ IFace = cast<ObjCInterfaceDecl>(D->getDeclContext());
+
+ if (!IFace)
+ return;
+
+ IFace->setHasDesignatedInitializers();
+ D->addAttr(::new (S.Context)
+ ObjCDesignatedInitializerAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleObjCRuntimeName(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ StringRef MetaDataName;
+ if (!S.checkStringLiteralArgumentAttr(Attr, 0, MetaDataName))
+ return;
+ D->addAttr(::new (S.Context)
+ ObjCRuntimeNameAttr(Attr.getRange(), S.Context,
+ MetaDataName,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+// when a user wants to use objc_boxable with a union or struct
+// but she doesn't have access to the declaration (legacy/third-party code)
+// then she can 'enable' this feature via trick with a typedef
+// e.g.:
+// typedef struct __attribute((objc_boxable)) legacy_struct legacy_struct;
+static void handleObjCBoxable(Sema &S, Decl *D, const AttributeList &Attr) {
+ bool notify = false;
+
+ RecordDecl *RD = dyn_cast<RecordDecl>(D);
+ if (RD && RD->getDefinition()) {
+ RD = RD->getDefinition();
+ notify = true;
+ }
+
+ if (RD) {
+ ObjCBoxableAttr *BoxableAttr = ::new (S.Context)
+ ObjCBoxableAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex());
+ RD->addAttr(BoxableAttr);
+ if (notify) {
+ // we need to notify ASTReader/ASTWriter about
+ // modification of existing declaration
+ if (ASTMutationListener *L = S.getASTMutationListener())
+ L->AddedAttributeToRecord(BoxableAttr, RD);
+ }
+ }
+}
+
+static void handleObjCOwnershipAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (hasDeclarator(D)) return;
+
+ S.Diag(D->getLocStart(), diag::err_attribute_wrong_decl_type)
+ << Attr.getRange() << Attr.getName() << ExpectedVariable;
+}
+
+static void handleObjCPreciseLifetimeAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ ValueDecl *vd = cast<ValueDecl>(D);
+ QualType type = vd->getType();
+
+ if (!type->isDependentType() &&
+ !type->isObjCLifetimeType()) {
+ S.Diag(Attr.getLoc(), diag::err_objc_precise_lifetime_bad_type)
+ << type;
+ return;
+ }
+
+ Qualifiers::ObjCLifetime lifetime = type.getObjCLifetime();
+
+ // If we have no lifetime yet, check the lifetime we're presumably
+ // going to infer.
+ if (lifetime == Qualifiers::OCL_None && !type->isDependentType())
+ lifetime = type->getObjCARCImplicitLifetime();
+
+ switch (lifetime) {
+ case Qualifiers::OCL_None:
+ assert(type->isDependentType() &&
+ "didn't infer lifetime for non-dependent type?");
+ break;
+
+ case Qualifiers::OCL_Weak: // meaningful
+ case Qualifiers::OCL_Strong: // meaningful
+ break;
+
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ S.Diag(Attr.getLoc(), diag::warn_objc_precise_lifetime_meaningless)
+ << (lifetime == Qualifiers::OCL_Autoreleasing);
+ break;
+ }
+
+ D->addAttr(::new (S.Context)
+ ObjCPreciseLifetimeAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+//===----------------------------------------------------------------------===//
+// Microsoft specific attribute handlers.
+//===----------------------------------------------------------------------===//
+
+static void handleUuidAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (!S.LangOpts.CPlusPlus) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_not_supported_in_lang)
+ << Attr.getName() << AttributeLangSupport::C;
+ return;
+ }
+
+ if (!isa<CXXRecordDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedClass;
+ return;
+ }
+
+ StringRef StrRef;
+ SourceLocation LiteralLoc;
+ if (!S.checkStringLiteralArgumentAttr(Attr, 0, StrRef, &LiteralLoc))
+ return;
+
+ // GUID format is "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" or
+ // "{XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}", normalize to the former.
+ if (StrRef.size() == 38 && StrRef.front() == '{' && StrRef.back() == '}')
+ StrRef = StrRef.drop_front().drop_back();
+
+ // Validate GUID length.
+ if (StrRef.size() != 36) {
+ S.Diag(LiteralLoc, diag::err_attribute_uuid_malformed_guid);
+ return;
+ }
+
+ for (unsigned i = 0; i < 36; ++i) {
+ if (i == 8 || i == 13 || i == 18 || i == 23) {
+ if (StrRef[i] != '-') {
+ S.Diag(LiteralLoc, diag::err_attribute_uuid_malformed_guid);
+ return;
+ }
+ } else if (!isHexDigit(StrRef[i])) {
+ S.Diag(LiteralLoc, diag::err_attribute_uuid_malformed_guid);
+ return;
+ }
+ }
+
+ D->addAttr(::new (S.Context) UuidAttr(Attr.getRange(), S.Context, StrRef,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleMSInheritanceAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (!S.LangOpts.CPlusPlus) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_not_supported_in_lang)
+ << Attr.getName() << AttributeLangSupport::C;
+ return;
+ }
+ MSInheritanceAttr *IA = S.mergeMSInheritanceAttr(
+ D, Attr.getRange(), /*BestCase=*/true,
+ Attr.getAttributeSpellingListIndex(),
+ (MSInheritanceAttr::Spelling)Attr.getSemanticSpelling());
+ if (IA)
+ D->addAttr(IA);
+}
+
+static void handleDeclspecThreadAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ VarDecl *VD = cast<VarDecl>(D);
+ if (!S.Context.getTargetInfo().isTLSSupported()) {
+ S.Diag(Attr.getLoc(), diag::err_thread_unsupported);
+ return;
+ }
+ if (VD->getTSCSpec() != TSCS_unspecified) {
+ S.Diag(Attr.getLoc(), diag::err_declspec_thread_on_thread_variable);
+ return;
+ }
+ if (VD->hasLocalStorage()) {
+ S.Diag(Attr.getLoc(), diag::err_thread_non_global) << "__declspec(thread)";
+ return;
+ }
+ VD->addAttr(::new (S.Context) ThreadAttr(
+ Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleARMInterruptAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ // Check the attribute arguments.
+ if (Attr.getNumArgs() > 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments)
+ << Attr.getName() << 1;
+ return;
+ }
+
+ StringRef Str;
+ SourceLocation ArgLoc;
+
+ if (Attr.getNumArgs() == 0)
+ Str = "";
+ else if (!S.checkStringLiteralArgumentAttr(Attr, 0, Str, &ArgLoc))
+ return;
+
+ ARMInterruptAttr::InterruptType Kind;
+ if (!ARMInterruptAttr::ConvertStrToInterruptType(Str, Kind)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_type_not_supported)
+ << Attr.getName() << Str << ArgLoc;
+ return;
+ }
+
+ unsigned Index = Attr.getAttributeSpellingListIndex();
+ D->addAttr(::new (S.Context)
+ ARMInterruptAttr(Attr.getLoc(), S.Context, Kind, Index));
+}
+
+static void handleMSP430InterruptAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (!checkAttributeNumArgs(S, Attr, 1))
+ return;
+
+ if (!Attr.isArgExpr(0)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_type) << Attr.getName()
+ << AANT_ArgumentIntegerConstant;
+ return;
+ }
+
+ // FIXME: Check for decl - it should be void ()(void).
+
+ Expr *NumParamsExpr = static_cast<Expr *>(Attr.getArgAsExpr(0));
+ llvm::APSInt NumParams(32);
+ if (!NumParamsExpr->isIntegerConstantExpr(NumParams, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_type)
+ << Attr.getName() << AANT_ArgumentIntegerConstant
+ << NumParamsExpr->getSourceRange();
+ return;
+ }
+
+ unsigned Num = NumParams.getLimitedValue(255);
+ if ((Num & 1) || Num > 30) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_bounds)
+ << Attr.getName() << (int)NumParams.getSExtValue()
+ << NumParamsExpr->getSourceRange();
+ return;
+ }
+
+ D->addAttr(::new (S.Context)
+ MSP430InterruptAttr(Attr.getLoc(), S.Context, Num,
+ Attr.getAttributeSpellingListIndex()));
+ D->addAttr(UsedAttr::CreateImplicit(S.Context));
+}
+
+static void handleMipsInterruptAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ // Only one optional argument permitted.
+ if (Attr.getNumArgs() > 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments)
+ << Attr.getName() << 1;
+ return;
+ }
+
+ StringRef Str;
+ SourceLocation ArgLoc;
+
+ if (Attr.getNumArgs() == 0)
+ Str = "";
+ else if (!S.checkStringLiteralArgumentAttr(Attr, 0, Str, &ArgLoc))
+ return;
+
+ // Semantic checks for a function with the 'interrupt' attribute for MIPS:
+ // a) Must be a function.
+ // b) Must have no parameters.
+ // c) Must have the 'void' return type.
+ // d) Cannot have the 'mips16' attribute, as that instruction set
+ // lacks the 'eret' instruction.
+ // e) The attribute itself must either have no argument or one of the
+ // valid interrupt types, see [MipsInterruptDocs].
+
+ if (!isFunctionOrMethod(D)) {
+ S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
+ << "'interrupt'" << ExpectedFunctionOrMethod;
+ return;
+ }
+
+ if (hasFunctionProto(D) && getFunctionOrMethodNumParams(D) != 0) {
+ S.Diag(D->getLocation(), diag::warn_mips_interrupt_attribute)
+ << 0;
+ return;
+ }
+
+ if (!getFunctionOrMethodResultType(D)->isVoidType()) {
+ S.Diag(D->getLocation(), diag::warn_mips_interrupt_attribute)
+ << 1;
+ return;
+ }
+
+ if (checkAttrMutualExclusion<Mips16Attr>(S, D, Attr.getRange(),
+ Attr.getName()))
+ return;
+
+ MipsInterruptAttr::InterruptType Kind;
+ if (!MipsInterruptAttr::ConvertStrToInterruptType(Str, Kind)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_type_not_supported)
+ << Attr.getName() << "'" + std::string(Str) + "'";
+ return;
+ }
+
+ D->addAttr(::new (S.Context) MipsInterruptAttr(
+ Attr.getLoc(), S.Context, Kind, Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleInterruptAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // Dispatch the interrupt attribute based on the current target.
+ if (S.Context.getTargetInfo().getTriple().getArch() == llvm::Triple::msp430)
+ handleMSP430InterruptAttr(S, D, Attr);
+ else if (S.Context.getTargetInfo().getTriple().getArch() ==
+ llvm::Triple::mipsel ||
+ S.Context.getTargetInfo().getTriple().getArch() ==
+ llvm::Triple::mips)
+ handleMipsInterruptAttr(S, D, Attr);
+ else
+ handleARMInterruptAttr(S, D, Attr);
+}
+
+static void handleMips16Attribute(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (checkAttrMutualExclusion<MipsInterruptAttr>(S, D, Attr.getRange(),
+ Attr.getName()))
+ return;
+
+ handleSimpleAttribute<Mips16Attr>(S, D, Attr);
+}
+
+static void handleAMDGPUNumVGPRAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ uint32_t NumRegs;
+ Expr *NumRegsExpr = static_cast<Expr *>(Attr.getArgAsExpr(0));
+ if (!checkUInt32Argument(S, Attr, NumRegsExpr, NumRegs))
+ return;
+
+ D->addAttr(::new (S.Context)
+ AMDGPUNumVGPRAttr(Attr.getLoc(), S.Context,
+ NumRegs,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleAMDGPUNumSGPRAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ uint32_t NumRegs;
+ Expr *NumRegsExpr = static_cast<Expr *>(Attr.getArgAsExpr(0));
+ if (!checkUInt32Argument(S, Attr, NumRegsExpr, NumRegs))
+ return;
+
+ D->addAttr(::new (S.Context)
+ AMDGPUNumSGPRAttr(Attr.getLoc(), S.Context,
+ NumRegs,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleX86ForceAlignArgPointerAttr(Sema &S, Decl *D,
+ const AttributeList& Attr) {
+ // If we try to apply it to a function pointer, don't warn, but don't
+ // do anything, either. It doesn't matter anyway, because there's nothing
+ // special about calling a force_align_arg_pointer function.
+ ValueDecl *VD = dyn_cast<ValueDecl>(D);
+ if (VD && VD->getType()->isFunctionPointerType())
+ return;
+ // Also don't warn on function pointer typedefs.
+ TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D);
+ if (TD && (TD->getUnderlyingType()->isFunctionPointerType() ||
+ TD->getUnderlyingType()->isFunctionType()))
+ return;
+ // Attribute can only be applied to function types.
+ if (!isa<FunctionDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << /* function */0;
+ return;
+ }
+
+ D->addAttr(::new (S.Context)
+ X86ForceAlignArgPointerAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+DLLImportAttr *Sema::mergeDLLImportAttr(Decl *D, SourceRange Range,
+ unsigned AttrSpellingListIndex) {
+ if (D->hasAttr<DLLExportAttr>()) {
+ Diag(Range.getBegin(), diag::warn_attribute_ignored) << "'dllimport'";
+ return nullptr;
+ }
+
+ if (D->hasAttr<DLLImportAttr>())
+ return nullptr;
+
+ return ::new (Context) DLLImportAttr(Range, Context, AttrSpellingListIndex);
+}
+
+DLLExportAttr *Sema::mergeDLLExportAttr(Decl *D, SourceRange Range,
+ unsigned AttrSpellingListIndex) {
+ if (DLLImportAttr *Import = D->getAttr<DLLImportAttr>()) {
+ Diag(Import->getLocation(), diag::warn_attribute_ignored) << Import;
+ D->dropAttr<DLLImportAttr>();
+ }
+
+ if (D->hasAttr<DLLExportAttr>())
+ return nullptr;
+
+ return ::new (Context) DLLExportAttr(Range, Context, AttrSpellingListIndex);
+}
+
+static void handleDLLAttr(Sema &S, Decl *D, const AttributeList &A) {
+ if (isa<ClassTemplatePartialSpecializationDecl>(D) &&
+ S.Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+ S.Diag(A.getRange().getBegin(), diag::warn_attribute_ignored)
+ << A.getName();
+ return;
+ }
+
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->isInlined() && A.getKind() == AttributeList::AT_DLLImport &&
+ !S.Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+ // MinGW doesn't allow dllimport on inline functions.
+ S.Diag(A.getRange().getBegin(), diag::warn_attribute_ignored_on_inline)
+ << A.getName();
+ return;
+ }
+ }
+
+ if (auto *MD = dyn_cast<CXXMethodDecl>(D)) {
+ if (S.Context.getTargetInfo().getCXXABI().isMicrosoft() &&
+ MD->getParent()->isLambda()) {
+ S.Diag(A.getRange().getBegin(), diag::err_attribute_dll_lambda) << A.getName();
+ return;
+ }
+ }
+
+ unsigned Index = A.getAttributeSpellingListIndex();
+ Attr *NewAttr = A.getKind() == AttributeList::AT_DLLExport
+ ? (Attr *)S.mergeDLLExportAttr(D, A.getRange(), Index)
+ : (Attr *)S.mergeDLLImportAttr(D, A.getRange(), Index);
+ if (NewAttr)
+ D->addAttr(NewAttr);
+}
+
+MSInheritanceAttr *
+Sema::mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase,
+ unsigned AttrSpellingListIndex,
+ MSInheritanceAttr::Spelling SemanticSpelling) {
+ if (MSInheritanceAttr *IA = D->getAttr<MSInheritanceAttr>()) {
+ if (IA->getSemanticSpelling() == SemanticSpelling)
+ return nullptr;
+ Diag(IA->getLocation(), diag::err_mismatched_ms_inheritance)
+ << 1 /*previous declaration*/;
+ Diag(Range.getBegin(), diag::note_previous_ms_inheritance);
+ D->dropAttr<MSInheritanceAttr>();
+ }
+
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(D);
+ if (RD->hasDefinition()) {
+ if (checkMSInheritanceAttrOnDefinition(RD, Range, BestCase,
+ SemanticSpelling)) {
+ return nullptr;
+ }
+ } else {
+ if (isa<ClassTemplatePartialSpecializationDecl>(RD)) {
+ Diag(Range.getBegin(), diag::warn_ignored_ms_inheritance)
+ << 1 /*partial specialization*/;
+ return nullptr;
+ }
+ if (RD->getDescribedClassTemplate()) {
+ Diag(Range.getBegin(), diag::warn_ignored_ms_inheritance)
+ << 0 /*primary template*/;
+ return nullptr;
+ }
+ }
+
+ return ::new (Context)
+ MSInheritanceAttr(Range, Context, BestCase, AttrSpellingListIndex);
+}
+
+static void handleCapabilityAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // The capability attributes take a single string parameter for the name of
+ // the capability they represent. The lockable attribute does not take any
+ // parameters. However, semantically, both attributes represent the same
+ // concept, and so they use the same semantic attribute. Eventually, the
+ // lockable attribute will be removed.
+ //
+ // For backward compatibility, any capability which has no specified string
+ // literal will be considered a "mutex."
+ StringRef N("mutex");
+ SourceLocation LiteralLoc;
+ if (Attr.getKind() == AttributeList::AT_Capability &&
+ !S.checkStringLiteralArgumentAttr(Attr, 0, N, &LiteralLoc))
+ return;
+
+ // Currently, there are only two names allowed for a capability: role and
+ // mutex (case insensitive). Diagnose other capability names.
+ if (!N.equals_lower("mutex") && !N.equals_lower("role"))
+ S.Diag(LiteralLoc, diag::warn_invalid_capability_name) << N;
+
+ D->addAttr(::new (S.Context) CapabilityAttr(Attr.getRange(), S.Context, N,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleAssertCapabilityAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ D->addAttr(::new (S.Context) AssertCapabilityAttr(Attr.getRange(), S.Context,
+ Attr.getArgAsExpr(0),
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleAcquireCapabilityAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ SmallVector<Expr*, 1> Args;
+ if (!checkLockFunAttrCommon(S, D, Attr, Args))
+ return;
+
+ D->addAttr(::new (S.Context) AcquireCapabilityAttr(Attr.getRange(),
+ S.Context,
+ Args.data(), Args.size(),
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleTryAcquireCapabilityAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ SmallVector<Expr*, 2> Args;
+ if (!checkTryLockFunAttrCommon(S, D, Attr, Args))
+ return;
+
+ D->addAttr(::new (S.Context) TryAcquireCapabilityAttr(Attr.getRange(),
+ S.Context,
+ Attr.getArgAsExpr(0),
+ Args.data(),
+ Args.size(),
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleReleaseCapabilityAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ // Check that all arguments are lockable objects.
+ SmallVector<Expr *, 1> Args;
+ checkAttrArgsAreCapabilityObjs(S, D, Attr, Args, 0, true);
+
+ D->addAttr(::new (S.Context) ReleaseCapabilityAttr(
+ Attr.getRange(), S.Context, Args.data(), Args.size(),
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleRequiresCapabilityAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
+ return;
+
+ // check that all arguments are lockable objects
+ SmallVector<Expr*, 1> Args;
+ checkAttrArgsAreCapabilityObjs(S, D, Attr, Args);
+ if (Args.empty())
+ return;
+
+ RequiresCapabilityAttr *RCA = ::new (S.Context)
+ RequiresCapabilityAttr(Attr.getRange(), S.Context, Args.data(),
+ Args.size(), Attr.getAttributeSpellingListIndex());
+
+ D->addAttr(RCA);
+}
+
+static void handleDeprecatedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (auto *NSD = dyn_cast<NamespaceDecl>(D)) {
+ if (NSD->isAnonymousNamespace()) {
+ S.Diag(Attr.getLoc(), diag::warn_deprecated_anonymous_namespace);
+ // Do not want to attach the attribute to the namespace because that will
+ // cause confusing diagnostic reports for uses of declarations within the
+ // namespace.
+ return;
+ }
+ }
+
+ if (!S.getLangOpts().CPlusPlus14)
+ if (Attr.isCXX11Attribute() &&
+ !(Attr.hasScope() && Attr.getScopeName()->isStr("gnu")))
+ S.Diag(Attr.getLoc(), diag::ext_deprecated_attr_is_a_cxx14_extension);
+
+ handleAttrWithMessage<DeprecatedAttr>(S, D, Attr);
+}
+
+static void handleNoSanitizeAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
+ return;
+
+ std::vector<std::string> Sanitizers;
+
+ for (unsigned I = 0, E = Attr.getNumArgs(); I != E; ++I) {
+ StringRef SanitizerName;
+ SourceLocation LiteralLoc;
+
+ if (!S.checkStringLiteralArgumentAttr(Attr, I, SanitizerName, &LiteralLoc))
+ return;
+
+ if (parseSanitizerValue(SanitizerName, /*AllowGroups=*/true) == 0)
+ S.Diag(LiteralLoc, diag::warn_unknown_sanitizer_ignored) << SanitizerName;
+
+ Sanitizers.push_back(SanitizerName);
+ }
+
+ D->addAttr(::new (S.Context) NoSanitizeAttr(
+ Attr.getRange(), S.Context, Sanitizers.data(), Sanitizers.size(),
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleNoSanitizeSpecificAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ StringRef AttrName = Attr.getName()->getName();
+ normalizeName(AttrName);
+ std::string SanitizerName =
+ llvm::StringSwitch<std::string>(AttrName)
+ .Case("no_address_safety_analysis", "address")
+ .Case("no_sanitize_address", "address")
+ .Case("no_sanitize_thread", "thread")
+ .Case("no_sanitize_memory", "memory");
+ D->addAttr(::new (S.Context)
+ NoSanitizeAttr(Attr.getRange(), S.Context, &SanitizerName, 1,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleInternalLinkageAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (InternalLinkageAttr *Internal =
+ S.mergeInternalLinkageAttr(D, Attr.getRange(), Attr.getName(),
+ Attr.getAttributeSpellingListIndex()))
+ D->addAttr(Internal);
+}
+
+/// Handles semantic checking for features that are common to all attributes,
+/// such as checking whether a parameter was properly specified, or the correct
+/// number of arguments were passed, etc.
+static bool handleCommonAttributeFeatures(Sema &S, Scope *scope, Decl *D,
+ const AttributeList &Attr) {
+ // Several attributes carry different semantics than the parsing requires, so
+ // those are opted out of the common handling.
+ //
+ // We also bail on unknown and ignored attributes because those are handled
+ // as part of the target-specific handling logic.
+ if (Attr.hasCustomParsing() ||
+ Attr.getKind() == AttributeList::UnknownAttribute)
+ return false;
+
+ // Check whether the attribute requires specific language extensions to be
+ // enabled.
+ if (!Attr.diagnoseLangOpts(S))
+ return true;
+
+ if (Attr.getMinArgs() == Attr.getMaxArgs()) {
+ // If there are no optional arguments, then checking for the argument count
+ // is trivial.
+ if (!checkAttributeNumArgs(S, Attr, Attr.getMinArgs()))
+ return true;
+ } else {
+ // There are optional arguments, so checking is slightly more involved.
+ if (Attr.getMinArgs() &&
+ !checkAttributeAtLeastNumArgs(S, Attr, Attr.getMinArgs()))
+ return true;
+ else if (!Attr.hasVariadicArg() && Attr.getMaxArgs() &&
+ !checkAttributeAtMostNumArgs(S, Attr, Attr.getMaxArgs()))
+ return true;
+ }
+
+ // Check whether the attribute appertains to the given subject.
+ if (!Attr.diagnoseAppertainsTo(S, D))
+ return true;
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Top Level Sema Entry Points
+//===----------------------------------------------------------------------===//
+
+/// ProcessDeclAttribute - Apply the specific attribute to the specified decl if
+/// the attribute applies to decls. If the attribute is a type attribute, just
+/// silently ignore it if a GNU attribute.
+static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
+ const AttributeList &Attr,
+ bool IncludeCXX11Attributes) {
+ if (Attr.isInvalid() || Attr.getKind() == AttributeList::IgnoredAttribute)
+ return;
+
+ // Ignore C++11 attributes on declarator chunks: they appertain to the type
+ // instead.
+ if (Attr.isCXX11Attribute() && !IncludeCXX11Attributes)
+ return;
+
+ // Unknown attributes are automatically warned on. Target-specific attributes
+ // which do not apply to the current target architecture are treated as
+ // though they were unknown attributes.
+ if (Attr.getKind() == AttributeList::UnknownAttribute ||
+ !Attr.existsInTarget(S.Context.getTargetInfo())) {
+ S.Diag(Attr.getLoc(), Attr.isDeclspecAttribute()
+ ? diag::warn_unhandled_ms_attribute_ignored
+ : diag::warn_unknown_attribute_ignored)
+ << Attr.getName();
+ return;
+ }
+
+ if (handleCommonAttributeFeatures(S, scope, D, Attr))
+ return;
+
+ switch (Attr.getKind()) {
+ default:
+ // Type attributes are handled elsewhere; silently move on.
+ assert(Attr.isTypeAttr() && "Non-type attribute not handled");
+ break;
+ case AttributeList::AT_Interrupt:
+ handleInterruptAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_X86ForceAlignArgPointer:
+ handleX86ForceAlignArgPointerAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_DLLExport:
+ case AttributeList::AT_DLLImport:
+ handleDLLAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_Mips16:
+ handleMips16Attribute(S, D, Attr);
+ break;
+ case AttributeList::AT_NoMips16:
+ handleSimpleAttribute<NoMips16Attr>(S, D, Attr);
+ break;
+ case AttributeList::AT_AMDGPUNumVGPR:
+ handleAMDGPUNumVGPRAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_AMDGPUNumSGPR:
+ handleAMDGPUNumSGPRAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_IBAction:
+ handleSimpleAttribute<IBActionAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_IBOutlet:
+ handleIBOutlet(S, D, Attr);
+ break;
+ case AttributeList::AT_IBOutletCollection:
+ handleIBOutletCollection(S, D, Attr);
+ break;
+ case AttributeList::AT_Alias:
+ handleAliasAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_Aligned:
+ handleAlignedAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_AlignValue:
+ handleAlignValueAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_AlwaysInline:
+ handleAlwaysInlineAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_AnalyzerNoReturn:
+ handleAnalyzerNoReturnAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_TLSModel:
+ handleTLSModelAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_Annotate:
+ handleAnnotateAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_Availability:
+ handleAvailabilityAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_CarriesDependency:
+ handleDependencyAttr(S, scope, D, Attr);
+ break;
+ case AttributeList::AT_Common:
+ handleCommonAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_CUDAConstant:
+ handleSimpleAttribute<CUDAConstantAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_PassObjectSize:
+ handlePassObjectSizeAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_Constructor:
+ handleConstructorAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_CXX11NoReturn:
+ handleSimpleAttribute<CXX11NoReturnAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_Deprecated:
+ handleDeprecatedAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_Destructor:
+ handleDestructorAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_EnableIf:
+ handleEnableIfAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_ExtVectorType:
+ handleExtVectorTypeAttr(S, scope, D, Attr);
+ break;
+ case AttributeList::AT_MinSize:
+ handleMinSizeAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_OptimizeNone:
+ handleOptimizeNoneAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_FlagEnum:
+ handleSimpleAttribute<FlagEnumAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_Flatten:
+ handleSimpleAttribute<FlattenAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_Format:
+ handleFormatAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_FormatArg:
+ handleFormatArgAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_CUDAGlobal:
+ handleGlobalAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_CUDADevice:
+ handleSimpleAttribute<CUDADeviceAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_CUDAHost:
+ handleSimpleAttribute<CUDAHostAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_GNUInline:
+ handleGNUInlineAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_CUDALaunchBounds:
+ handleLaunchBoundsAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_Restrict:
+ handleRestrictAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_MayAlias:
+ handleSimpleAttribute<MayAliasAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_Mode:
+ handleModeAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_NoAlias:
+ handleSimpleAttribute<NoAliasAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_NoCommon:
+ handleSimpleAttribute<NoCommonAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_NoSplitStack:
+ handleSimpleAttribute<NoSplitStackAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_NonNull:
+ if (ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(D))
+ handleNonNullAttrParameter(S, PVD, Attr);
+ else
+ handleNonNullAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_ReturnsNonNull:
+ handleReturnsNonNullAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_AssumeAligned:
+ handleAssumeAlignedAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_Overloadable:
+ handleSimpleAttribute<OverloadableAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_Ownership:
+ handleOwnershipAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_Cold:
+ handleColdAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_Hot:
+ handleHotAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_Naked:
+ handleNakedAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_NoReturn:
+ handleNoReturnAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_NoThrow:
+ handleSimpleAttribute<NoThrowAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_CUDAShared:
+ handleSimpleAttribute<CUDASharedAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_VecReturn:
+ handleVecReturnAttr(S, D, Attr);
+ break;
+
+ case AttributeList::AT_ObjCOwnership:
+ handleObjCOwnershipAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_ObjCPreciseLifetime:
+ handleObjCPreciseLifetimeAttr(S, D, Attr);
+ break;
+
+ case AttributeList::AT_ObjCReturnsInnerPointer:
+ handleObjCReturnsInnerPointerAttr(S, D, Attr);
+ break;
+
+ case AttributeList::AT_ObjCRequiresSuper:
+ handleObjCRequiresSuperAttr(S, D, Attr);
+ break;
+
+ case AttributeList::AT_ObjCBridge:
+ handleObjCBridgeAttr(S, scope, D, Attr);
+ break;
+
+ case AttributeList::AT_ObjCBridgeMutable:
+ handleObjCBridgeMutableAttr(S, scope, D, Attr);
+ break;
+
+ case AttributeList::AT_ObjCBridgeRelated:
+ handleObjCBridgeRelatedAttr(S, scope, D, Attr);
+ break;
+
+ case AttributeList::AT_ObjCDesignatedInitializer:
+ handleObjCDesignatedInitializer(S, D, Attr);
+ break;
+
+ case AttributeList::AT_ObjCRuntimeName:
+ handleObjCRuntimeName(S, D, Attr);
+ break;
+
+ case AttributeList::AT_ObjCBoxable:
+ handleObjCBoxable(S, D, Attr);
+ break;
+
+ case AttributeList::AT_CFAuditedTransfer:
+ handleCFAuditedTransferAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_CFUnknownTransfer:
+ handleCFUnknownTransferAttr(S, D, Attr);
+ break;
+
+ case AttributeList::AT_CFConsumed:
+ case AttributeList::AT_NSConsumed:
+ handleNSConsumedAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_NSConsumesSelf:
+ handleSimpleAttribute<NSConsumesSelfAttr>(S, D, Attr);
+ break;
+
+ case AttributeList::AT_NSReturnsAutoreleased:
+ case AttributeList::AT_NSReturnsNotRetained:
+ case AttributeList::AT_CFReturnsNotRetained:
+ case AttributeList::AT_NSReturnsRetained:
+ case AttributeList::AT_CFReturnsRetained:
+ handleNSReturnsRetainedAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_WorkGroupSizeHint:
+ handleWorkGroupSize<WorkGroupSizeHintAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_ReqdWorkGroupSize:
+ handleWorkGroupSize<ReqdWorkGroupSizeAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_VecTypeHint:
+ handleVecTypeHint(S, D, Attr);
+ break;
+
+ case AttributeList::AT_InitPriority:
+ handleInitPriorityAttr(S, D, Attr);
+ break;
+
+ case AttributeList::AT_Packed:
+ handlePackedAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_Section:
+ handleSectionAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_Target:
+ handleTargetAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_Unavailable:
+ handleAttrWithMessage<UnavailableAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_ArcWeakrefUnavailable:
+ handleSimpleAttribute<ArcWeakrefUnavailableAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_ObjCRootClass:
+ handleSimpleAttribute<ObjCRootClassAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_ObjCExplicitProtocolImpl:
+ handleObjCSuppresProtocolAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_ObjCRequiresPropertyDefs:
+ handleSimpleAttribute<ObjCRequiresPropertyDefsAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_Unused:
+ handleSimpleAttribute<UnusedAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_ReturnsTwice:
+ handleSimpleAttribute<ReturnsTwiceAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_NotTailCalled:
+ handleNotTailCalledAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_DisableTailCalls:
+ handleDisableTailCallsAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_Used:
+ handleUsedAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_Visibility:
+ handleVisibilityAttr(S, D, Attr, false);
+ break;
+ case AttributeList::AT_TypeVisibility:
+ handleVisibilityAttr(S, D, Attr, true);
+ break;
+ case AttributeList::AT_WarnUnused:
+ handleSimpleAttribute<WarnUnusedAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_WarnUnusedResult:
+ handleWarnUnusedResult(S, D, Attr);
+ break;
+ case AttributeList::AT_Weak:
+ handleSimpleAttribute<WeakAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_WeakRef:
+ handleWeakRefAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_WeakImport:
+ handleWeakImportAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_TransparentUnion:
+ handleTransparentUnionAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_ObjCException:
+ handleSimpleAttribute<ObjCExceptionAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_ObjCMethodFamily:
+ handleObjCMethodFamilyAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_ObjCNSObject:
+ handleObjCNSObject(S, D, Attr);
+ break;
+ case AttributeList::AT_ObjCIndependentClass:
+ handleObjCIndependentClass(S, D, Attr);
+ break;
+ case AttributeList::AT_Blocks:
+ handleBlocksAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_Sentinel:
+ handleSentinelAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_Const:
+ handleSimpleAttribute<ConstAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_Pure:
+ handleSimpleAttribute<PureAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_Cleanup:
+ handleCleanupAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_NoDebug:
+ handleNoDebugAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_NoDuplicate:
+ handleSimpleAttribute<NoDuplicateAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_NoInline:
+ handleSimpleAttribute<NoInlineAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_NoInstrumentFunction: // Interacts with -pg.
+ handleSimpleAttribute<NoInstrumentFunctionAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_StdCall:
+ case AttributeList::AT_CDecl:
+ case AttributeList::AT_FastCall:
+ case AttributeList::AT_ThisCall:
+ case AttributeList::AT_Pascal:
+ case AttributeList::AT_VectorCall:
+ case AttributeList::AT_MSABI:
+ case AttributeList::AT_SysVABI:
+ case AttributeList::AT_Pcs:
+ case AttributeList::AT_IntelOclBicc:
+ handleCallConvAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_OpenCLKernel:
+ handleSimpleAttribute<OpenCLKernelAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_OpenCLImageAccess:
+ handleSimpleAttribute<OpenCLImageAccessAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_InternalLinkage:
+ handleInternalLinkageAttr(S, D, Attr);
+ break;
+
+ // Microsoft attributes:
+ case AttributeList::AT_MSNoVTable:
+ handleSimpleAttribute<MSNoVTableAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_MSStruct:
+ handleSimpleAttribute<MSStructAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_Uuid:
+ handleUuidAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_MSInheritance:
+ handleMSInheritanceAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_SelectAny:
+ handleSimpleAttribute<SelectAnyAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_Thread:
+ handleDeclspecThreadAttr(S, D, Attr);
+ break;
+
+ // Thread safety attributes:
+ case AttributeList::AT_AssertExclusiveLock:
+ handleAssertExclusiveLockAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_AssertSharedLock:
+ handleAssertSharedLockAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_GuardedVar:
+ handleSimpleAttribute<GuardedVarAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_PtGuardedVar:
+ handlePtGuardedVarAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_ScopedLockable:
+ handleSimpleAttribute<ScopedLockableAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_NoSanitize:
+ handleNoSanitizeAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_NoSanitizeSpecific:
+ handleNoSanitizeSpecificAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_NoThreadSafetyAnalysis:
+ handleSimpleAttribute<NoThreadSafetyAnalysisAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_GuardedBy:
+ handleGuardedByAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_PtGuardedBy:
+ handlePtGuardedByAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_ExclusiveTrylockFunction:
+ handleExclusiveTrylockFunctionAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_LockReturned:
+ handleLockReturnedAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_LocksExcluded:
+ handleLocksExcludedAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_SharedTrylockFunction:
+ handleSharedTrylockFunctionAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_AcquiredBefore:
+ handleAcquiredBeforeAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_AcquiredAfter:
+ handleAcquiredAfterAttr(S, D, Attr);
+ break;
+
+ // Capability analysis attributes.
+ case AttributeList::AT_Capability:
+ case AttributeList::AT_Lockable:
+ handleCapabilityAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_RequiresCapability:
+ handleRequiresCapabilityAttr(S, D, Attr);
+ break;
+
+ case AttributeList::AT_AssertCapability:
+ handleAssertCapabilityAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_AcquireCapability:
+ handleAcquireCapabilityAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_ReleaseCapability:
+ handleReleaseCapabilityAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_TryAcquireCapability:
+ handleTryAcquireCapabilityAttr(S, D, Attr);
+ break;
+
+ // Consumed analysis attributes.
+ case AttributeList::AT_Consumable:
+ handleConsumableAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_ConsumableAutoCast:
+ handleSimpleAttribute<ConsumableAutoCastAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_ConsumableSetOnRead:
+ handleSimpleAttribute<ConsumableSetOnReadAttr>(S, D, Attr);
+ break;
+ case AttributeList::AT_CallableWhen:
+ handleCallableWhenAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_ParamTypestate:
+ handleParamTypestateAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_ReturnTypestate:
+ handleReturnTypestateAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_SetTypestate:
+ handleSetTypestateAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_TestTypestate:
+ handleTestTypestateAttr(S, D, Attr);
+ break;
+
+ // Type safety attributes.
+ case AttributeList::AT_ArgumentWithTypeTag:
+ handleArgumentWithTypeTagAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_TypeTagForDatatype:
+ handleTypeTagForDatatypeAttr(S, D, Attr);
+ break;
+ }
+}
+
+/// ProcessDeclAttributeList - Apply all the decl attributes in the specified
+/// attribute list to the specified decl, ignoring any type attributes.
+void Sema::ProcessDeclAttributeList(Scope *S, Decl *D,
+ const AttributeList *AttrList,
+ bool IncludeCXX11Attributes) {
+ for (const AttributeList* l = AttrList; l; l = l->getNext())
+ ProcessDeclAttribute(*this, S, D, *l, IncludeCXX11Attributes);
+
+ // FIXME: We should be able to handle these cases in TableGen.
+ // GCC accepts
+ // static int a9 __attribute__((weakref));
+ // but that looks really pointless. We reject it.
+ if (D->hasAttr<WeakRefAttr>() && !D->hasAttr<AliasAttr>()) {
+ Diag(AttrList->getLoc(), diag::err_attribute_weakref_without_alias)
+ << cast<NamedDecl>(D);
+ D->dropAttr<WeakRefAttr>();
+ return;
+ }
+
+ // FIXME: We should be able to handle this in TableGen as well. It would be
+ // good to have a way to specify "these attributes must appear as a group",
+ // for these. Additionally, it would be good to have a way to specify "these
+ // attribute must never appear as a group" for attributes like cold and hot.
+ if (!D->hasAttr<OpenCLKernelAttr>()) {
+ // These attributes cannot be applied to a non-kernel function.
+ if (Attr *A = D->getAttr<ReqdWorkGroupSizeAttr>()) {
+ // FIXME: This emits a different error message than
+ // diag::err_attribute_wrong_decl_type + ExpectedKernelFunction.
+ Diag(D->getLocation(), diag::err_opencl_kernel_attr) << A;
+ D->setInvalidDecl();
+ } else if (Attr *A = D->getAttr<WorkGroupSizeHintAttr>()) {
+ Diag(D->getLocation(), diag::err_opencl_kernel_attr) << A;
+ D->setInvalidDecl();
+ } else if (Attr *A = D->getAttr<VecTypeHintAttr>()) {
+ Diag(D->getLocation(), diag::err_opencl_kernel_attr) << A;
+ D->setInvalidDecl();
+ } else if (Attr *A = D->getAttr<AMDGPUNumVGPRAttr>()) {
+ Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
+ << A << ExpectedKernelFunction;
+ D->setInvalidDecl();
+ } else if (Attr *A = D->getAttr<AMDGPUNumSGPRAttr>()) {
+ Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
+ << A << ExpectedKernelFunction;
+ D->setInvalidDecl();
+ }
+ }
+}
+
+// Annotation attributes are the only attributes allowed after an access
+// specifier.
+bool Sema::ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
+ const AttributeList *AttrList) {
+ for (const AttributeList* l = AttrList; l; l = l->getNext()) {
+ if (l->getKind() == AttributeList::AT_Annotate) {
+ ProcessDeclAttribute(*this, nullptr, ASDecl, *l, l->isCXX11Attribute());
+ } else {
+ Diag(l->getLoc(), diag::err_only_annotate_after_access_spec);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/// checkUnusedDeclAttributes - Check a list of attributes to see if it
+/// contains any decl attributes that we should warn about.
+static void checkUnusedDeclAttributes(Sema &S, const AttributeList *A) {
+ for ( ; A; A = A->getNext()) {
+ // Only warn if the attribute is an unignored, non-type attribute.
+ if (A->isUsedAsTypeAttr() || A->isInvalid()) continue;
+ if (A->getKind() == AttributeList::IgnoredAttribute) continue;
+
+ if (A->getKind() == AttributeList::UnknownAttribute) {
+ S.Diag(A->getLoc(), diag::warn_unknown_attribute_ignored)
+ << A->getName() << A->getRange();
+ } else {
+ S.Diag(A->getLoc(), diag::warn_attribute_not_on_decl)
+ << A->getName() << A->getRange();
+ }
+ }
+}
+
+/// checkUnusedDeclAttributes - Given a declarator which is not being
+/// used to build a declaration, complain about any decl attributes
+/// which might be lying around on it.
+void Sema::checkUnusedDeclAttributes(Declarator &D) {
+ ::checkUnusedDeclAttributes(*this, D.getDeclSpec().getAttributes().getList());
+ ::checkUnusedDeclAttributes(*this, D.getAttributes());
+ for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i)
+ ::checkUnusedDeclAttributes(*this, D.getTypeObject(i).getAttrs());
+}
+
+/// DeclClonePragmaWeak - clone existing decl (maybe definition),
+/// \#pragma weak needs a non-definition decl and source may not have one.
+NamedDecl * Sema::DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
+ SourceLocation Loc) {
+ assert(isa<FunctionDecl>(ND) || isa<VarDecl>(ND));
+ NamedDecl *NewD = nullptr;
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
+ FunctionDecl *NewFD;
+ // FIXME: Missing call to CheckFunctionDeclaration().
+ // FIXME: Mangling?
+ // FIXME: Is the qualifier info correct?
+ // FIXME: Is the DeclContext correct?
+ NewFD = FunctionDecl::Create(FD->getASTContext(), FD->getDeclContext(),
+ Loc, Loc, DeclarationName(II),
+ FD->getType(), FD->getTypeSourceInfo(),
+ SC_None, false/*isInlineSpecified*/,
+ FD->hasPrototype(),
+ false/*isConstexprSpecified*/);
+ NewD = NewFD;
+
+ if (FD->getQualifier())
+ NewFD->setQualifierInfo(FD->getQualifierLoc());
+
+ // Fake up parameter variables; they are declared as if this were
+ // a typedef.
+ QualType FDTy = FD->getType();
+ if (const FunctionProtoType *FT = FDTy->getAs<FunctionProtoType>()) {
+ SmallVector<ParmVarDecl*, 16> Params;
+ for (const auto &AI : FT->param_types()) {
+ ParmVarDecl *Param = BuildParmVarDeclForTypedef(NewFD, Loc, AI);
+ Param->setScopeInfo(0, Params.size());
+ Params.push_back(Param);
+ }
+ NewFD->setParams(Params);
+ }
+ } else if (VarDecl *VD = dyn_cast<VarDecl>(ND)) {
+ NewD = VarDecl::Create(VD->getASTContext(), VD->getDeclContext(),
+ VD->getInnerLocStart(), VD->getLocation(), II,
+ VD->getType(), VD->getTypeSourceInfo(),
+ VD->getStorageClass());
+ if (VD->getQualifier()) {
+ VarDecl *NewVD = cast<VarDecl>(NewD);
+ NewVD->setQualifierInfo(VD->getQualifierLoc());
+ }
+ }
+ return NewD;
+}
+
+/// DeclApplyPragmaWeak - A declaration (maybe definition) needs \#pragma weak
+/// applied to it, possibly with an alias.
+void Sema::DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W) {
+ if (W.getUsed()) return; // only do this once
+ W.setUsed(true);
+ if (W.getAlias()) { // clone decl, impersonate __attribute(weak,alias(...))
+ IdentifierInfo *NDId = ND->getIdentifier();
+ NamedDecl *NewD = DeclClonePragmaWeak(ND, W.getAlias(), W.getLocation());
+ NewD->addAttr(AliasAttr::CreateImplicit(Context, NDId->getName(),
+ W.getLocation()));
+ NewD->addAttr(WeakAttr::CreateImplicit(Context, W.getLocation()));
+ WeakTopLevelDecl.push_back(NewD);
+ // FIXME: "hideous" code from Sema::LazilyCreateBuiltin
+ // to insert Decl at TU scope, sorry.
+ DeclContext *SavedContext = CurContext;
+ CurContext = Context.getTranslationUnitDecl();
+ NewD->setDeclContext(CurContext);
+ NewD->setLexicalDeclContext(CurContext);
+ PushOnScopeChains(NewD, S);
+ CurContext = SavedContext;
+ } else { // just add weak to existing
+ ND->addAttr(WeakAttr::CreateImplicit(Context, W.getLocation()));
+ }
+}
+
+void Sema::ProcessPragmaWeak(Scope *S, Decl *D) {
+ // It's valid to "forward-declare" #pragma weak, in which case we
+ // have to do this.
+ LoadExternalWeakUndeclaredIdentifiers();
+ if (!WeakUndeclaredIdentifiers.empty()) {
+ NamedDecl *ND = nullptr;
+ if (VarDecl *VD = dyn_cast<VarDecl>(D))
+ if (VD->isExternC())
+ ND = VD;
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ if (FD->isExternC())
+ ND = FD;
+ if (ND) {
+ if (IdentifierInfo *Id = ND->getIdentifier()) {
+ auto I = WeakUndeclaredIdentifiers.find(Id);
+ if (I != WeakUndeclaredIdentifiers.end()) {
+ WeakInfo W = I->second;
+ DeclApplyPragmaWeak(S, ND, W);
+ WeakUndeclaredIdentifiers[Id] = W;
+ }
+ }
+ }
+ }
+}
+
+/// ProcessDeclAttributes - Given a declarator (PD) with attributes indicated in
+/// it, apply them to D. This is a bit tricky because PD can have attributes
+/// specified in many different places, and we need to find and apply them all.
+void Sema::ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD) {
+ // Apply decl attributes from the DeclSpec if present.
+ if (const AttributeList *Attrs = PD.getDeclSpec().getAttributes().getList())
+ ProcessDeclAttributeList(S, D, Attrs);
+
+ // Walk the declarator structure, applying decl attributes that were in a type
+ // position to the decl itself. This handles cases like:
+ // int *__attr__(x)** D;
+ // when X is a decl attribute.
+ for (unsigned i = 0, e = PD.getNumTypeObjects(); i != e; ++i)
+ if (const AttributeList *Attrs = PD.getTypeObject(i).getAttrs())
+ ProcessDeclAttributeList(S, D, Attrs, /*IncludeCXX11Attributes=*/false);
+
+ // Finally, apply any attributes on the decl itself.
+ if (const AttributeList *Attrs = PD.getAttributes())
+ ProcessDeclAttributeList(S, D, Attrs);
+}
+
+/// Is the given declaration allowed to use a forbidden type?
+/// If so, it'll still be annotated with an attribute that makes it
+/// illegal to actually use.
+static bool isForbiddenTypeAllowed(Sema &S, Decl *decl,
+ const DelayedDiagnostic &diag,
+ UnavailableAttr::ImplicitReason &reason) {
+ // Private ivars are always okay. Unfortunately, people don't
+ // always properly make their ivars private, even in system headers.
+ // Plus we need to make fields okay, too.
+ if (!isa<FieldDecl>(decl) && !isa<ObjCPropertyDecl>(decl) &&
+ !isa<FunctionDecl>(decl))
+ return false;
+
+ // Silently accept unsupported uses of __weak in both user and system
+ // declarations when it's been disabled, for ease of integration with
+ // -fno-objc-arc files. We do have to take some care against attempts
+ // to define such things; for now, we've only done that for ivars
+ // and properties.
+ if ((isa<ObjCIvarDecl>(decl) || isa<ObjCPropertyDecl>(decl))) {
+ if (diag.getForbiddenTypeDiagnostic() == diag::err_arc_weak_disabled ||
+ diag.getForbiddenTypeDiagnostic() == diag::err_arc_weak_no_runtime) {
+ reason = UnavailableAttr::IR_ForbiddenWeak;
+ return true;
+ }
+ }
+
+ // Allow all sorts of things in system headers.
+ if (S.Context.getSourceManager().isInSystemHeader(decl->getLocation())) {
+ // Currently, all the failures dealt with this way are due to ARC
+ // restrictions.
+ reason = UnavailableAttr::IR_ARCForbiddenType;
+ return true;
+ }
+
+ return false;
+}
+
+/// Handle a delayed forbidden-type diagnostic.
+static void handleDelayedForbiddenType(Sema &S, DelayedDiagnostic &diag,
+ Decl *decl) {
+ auto reason = UnavailableAttr::IR_None;
+ if (decl && isForbiddenTypeAllowed(S, decl, diag, reason)) {
+ assert(reason && "didn't set reason?");
+ decl->addAttr(UnavailableAttr::CreateImplicit(S.Context, "", reason,
+ diag.Loc));
+ return;
+ }
+ if (S.getLangOpts().ObjCAutoRefCount)
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(decl)) {
+ // FIXME: we may want to suppress diagnostics for all
+ // kind of forbidden type messages on unavailable functions.
+ if (FD->hasAttr<UnavailableAttr>() &&
+ diag.getForbiddenTypeDiagnostic() ==
+ diag::err_arc_array_param_no_ownership) {
+ diag.Triggered = true;
+ return;
+ }
+ }
+
+ S.Diag(diag.Loc, diag.getForbiddenTypeDiagnostic())
+ << diag.getForbiddenTypeOperand() << diag.getForbiddenTypeArgument();
+ diag.Triggered = true;
+}
+
+
+static bool isDeclDeprecated(Decl *D) {
+ do {
+ if (D->isDeprecated())
+ return true;
+ // A category implicitly has the availability of the interface.
+ if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(D))
+ if (const ObjCInterfaceDecl *Interface = CatD->getClassInterface())
+ return Interface->isDeprecated();
+ } while ((D = cast_or_null<Decl>(D->getDeclContext())));
+ return false;
+}
+
+static bool isDeclUnavailable(Decl *D) {
+ do {
+ if (D->isUnavailable())
+ return true;
+ // A category implicitly has the availability of the interface.
+ if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(D))
+ if (const ObjCInterfaceDecl *Interface = CatD->getClassInterface())
+ return Interface->isUnavailable();
+ } while ((D = cast_or_null<Decl>(D->getDeclContext())));
+ return false;
+}
+
+static void DoEmitAvailabilityWarning(Sema &S, Sema::AvailabilityDiagnostic K,
+ Decl *Ctx, const NamedDecl *D,
+ StringRef Message, SourceLocation Loc,
+ const ObjCInterfaceDecl *UnknownObjCClass,
+ const ObjCPropertyDecl *ObjCProperty,
+ bool ObjCPropertyAccess) {
+ // Diagnostics for deprecated or unavailable.
+ unsigned diag, diag_message, diag_fwdclass_message;
+ unsigned diag_available_here = diag::note_availability_specified_here;
+
+ // Matches 'diag::note_property_attribute' options.
+ unsigned property_note_select;
+
+ // Matches diag::note_availability_specified_here.
+ unsigned available_here_select_kind;
+
+ // Don't warn if our current context is deprecated or unavailable.
+ switch (K) {
+ case Sema::AD_Deprecation:
+ if (isDeclDeprecated(Ctx) || isDeclUnavailable(Ctx))
+ return;
+ diag = !ObjCPropertyAccess ? diag::warn_deprecated
+ : diag::warn_property_method_deprecated;
+ diag_message = diag::warn_deprecated_message;
+ diag_fwdclass_message = diag::warn_deprecated_fwdclass_message;
+ property_note_select = /* deprecated */ 0;
+ available_here_select_kind = /* deprecated */ 2;
+ break;
+
+ case Sema::AD_Unavailable:
+ if (isDeclUnavailable(Ctx))
+ return;
+ diag = !ObjCPropertyAccess ? diag::err_unavailable
+ : diag::err_property_method_unavailable;
+ diag_message = diag::err_unavailable_message;
+ diag_fwdclass_message = diag::warn_unavailable_fwdclass_message;
+ property_note_select = /* unavailable */ 1;
+ available_here_select_kind = /* unavailable */ 0;
+
+ if (auto attr = D->getAttr<UnavailableAttr>()) {
+ if (attr->isImplicit() && attr->getImplicitReason()) {
+ // Most of these failures are due to extra restrictions in ARC;
+ // reflect that in the primary diagnostic when applicable.
+ auto flagARCError = [&] {
+ if (S.getLangOpts().ObjCAutoRefCount &&
+ S.getSourceManager().isInSystemHeader(D->getLocation()))
+ diag = diag::err_unavailable_in_arc;
+ };
+
+ switch (attr->getImplicitReason()) {
+ case UnavailableAttr::IR_None: break;
+
+ case UnavailableAttr::IR_ARCForbiddenType:
+ flagARCError();
+ diag_available_here = diag::note_arc_forbidden_type;
+ break;
+
+ case UnavailableAttr::IR_ForbiddenWeak:
+ if (S.getLangOpts().ObjCWeakRuntime)
+ diag_available_here = diag::note_arc_weak_disabled;
+ else
+ diag_available_here = diag::note_arc_weak_no_runtime;
+ break;
+
+ case UnavailableAttr::IR_ARCForbiddenConversion:
+ flagARCError();
+ diag_available_here = diag::note_performs_forbidden_arc_conversion;
+ break;
+
+ case UnavailableAttr::IR_ARCInitReturnsUnrelated:
+ flagARCError();
+ diag_available_here = diag::note_arc_init_returns_unrelated;
+ break;
+
+ case UnavailableAttr::IR_ARCFieldWithOwnership:
+ flagARCError();
+ diag_available_here = diag::note_arc_field_with_ownership;
+ break;
+ }
+ }
+ }
+
+ break;
+
+ case Sema::AD_Partial:
+ diag = diag::warn_partial_availability;
+ diag_message = diag::warn_partial_message;
+ diag_fwdclass_message = diag::warn_partial_fwdclass_message;
+ property_note_select = /* partial */ 2;
+ available_here_select_kind = /* partial */ 3;
+ break;
+ }
+
+ if (!Message.empty()) {
+ S.Diag(Loc, diag_message) << D << Message;
+ if (ObjCProperty)
+ S.Diag(ObjCProperty->getLocation(), diag::note_property_attribute)
+ << ObjCProperty->getDeclName() << property_note_select;
+ } else if (!UnknownObjCClass) {
+ S.Diag(Loc, diag) << D;
+ if (ObjCProperty)
+ S.Diag(ObjCProperty->getLocation(), diag::note_property_attribute)
+ << ObjCProperty->getDeclName() << property_note_select;
+ } else {
+ S.Diag(Loc, diag_fwdclass_message) << D;
+ S.Diag(UnknownObjCClass->getLocation(), diag::note_forward_class);
+ }
+
+ S.Diag(D->getLocation(), diag_available_here)
+ << D << available_here_select_kind;
+ if (K == Sema::AD_Partial)
+ S.Diag(Loc, diag::note_partial_availability_silence) << D;
+}
+
+static void handleDelayedAvailabilityCheck(Sema &S, DelayedDiagnostic &DD,
+ Decl *Ctx) {
+ assert(DD.Kind == DelayedDiagnostic::Deprecation ||
+ DD.Kind == DelayedDiagnostic::Unavailable);
+ Sema::AvailabilityDiagnostic AD = DD.Kind == DelayedDiagnostic::Deprecation
+ ? Sema::AD_Deprecation
+ : Sema::AD_Unavailable;
+ DD.Triggered = true;
+ DoEmitAvailabilityWarning(
+ S, AD, Ctx, DD.getDeprecationDecl(), DD.getDeprecationMessage(), DD.Loc,
+ DD.getUnknownObjCClass(), DD.getObjCProperty(), false);
+}
+
+void Sema::PopParsingDeclaration(ParsingDeclState state, Decl *decl) {
+ assert(DelayedDiagnostics.getCurrentPool());
+ DelayedDiagnosticPool &poppedPool = *DelayedDiagnostics.getCurrentPool();
+ DelayedDiagnostics.popWithoutEmitting(state);
+
+ // When delaying diagnostics to run in the context of a parsed
+ // declaration, we only want to actually emit anything if parsing
+ // succeeds.
+ if (!decl) return;
+
+ // We emit all the active diagnostics in this pool or any of its
+ // parents. In general, we'll get one pool for the decl spec
+ // and a child pool for each declarator; in a decl group like:
+ // deprecated_typedef foo, *bar, baz();
+ // only the declarator pops will be passed decls. This is correct;
+ // we really do need to consider delayed diagnostics from the decl spec
+ // for each of the different declarations.
+ const DelayedDiagnosticPool *pool = &poppedPool;
+ do {
+ for (DelayedDiagnosticPool::pool_iterator
+ i = pool->pool_begin(), e = pool->pool_end(); i != e; ++i) {
+ // This const_cast is a bit lame. Really, Triggered should be mutable.
+ DelayedDiagnostic &diag = const_cast<DelayedDiagnostic&>(*i);
+ if (diag.Triggered)
+ continue;
+
+ switch (diag.Kind) {
+ case DelayedDiagnostic::Deprecation:
+ case DelayedDiagnostic::Unavailable:
+ // Don't bother giving deprecation/unavailable diagnostics if
+ // the decl is invalid.
+ if (!decl->isInvalidDecl())
+ handleDelayedAvailabilityCheck(*this, diag, decl);
+ break;
+
+ case DelayedDiagnostic::Access:
+ HandleDelayedAccessCheck(diag, decl);
+ break;
+
+ case DelayedDiagnostic::ForbiddenType:
+ handleDelayedForbiddenType(*this, diag, decl);
+ break;
+ }
+ }
+ } while ((pool = pool->getParent()));
+}
+
+/// Given a set of delayed diagnostics, re-emit them as if they had
+/// been delayed in the current context instead of in the given pool.
+/// Essentially, this just moves them to the current pool.
+void Sema::redelayDiagnostics(DelayedDiagnosticPool &pool) {
+ DelayedDiagnosticPool *curPool = DelayedDiagnostics.getCurrentPool();
+ assert(curPool && "re-emitting in undelayed context not supported");
+ curPool->steal(pool);
+}
+
+void Sema::EmitAvailabilityWarning(AvailabilityDiagnostic AD,
+ NamedDecl *D, StringRef Message,
+ SourceLocation Loc,
+ const ObjCInterfaceDecl *UnknownObjCClass,
+ const ObjCPropertyDecl *ObjCProperty,
+ bool ObjCPropertyAccess) {
+ // Delay if we're currently parsing a declaration.
+ if (DelayedDiagnostics.shouldDelayDiagnostics() && AD != AD_Partial) {
+ DelayedDiagnostics.add(DelayedDiagnostic::makeAvailability(
+ AD, Loc, D, UnknownObjCClass, ObjCProperty, Message,
+ ObjCPropertyAccess));
+ return;
+ }
+
+ Decl *Ctx = cast<Decl>(getCurLexicalContext());
+ DoEmitAvailabilityWarning(*this, AD, Ctx, D, Message, Loc, UnknownObjCClass,
+ ObjCProperty, ObjCPropertyAccess);
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp
new file mode 100644
index 0000000..02091a7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp
@@ -0,0 +1,13837 @@
+//===------ SemaDeclCXX.cpp - Semantic Analysis for C++ Declarations ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for C++ declarations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTLambda.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/AST/TypeOrdering.h"
+#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/LiteralSupport.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/CXXFieldCollector.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/Template.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include <map>
+#include <set>
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// CheckDefaultArgumentVisitor
+//===----------------------------------------------------------------------===//
+
+namespace {
+ /// CheckDefaultArgumentVisitor - C++ [dcl.fct.default] Traverses
+ /// the default argument of a parameter to determine whether it
+ /// contains any ill-formed subexpressions. For example, this will
+ /// diagnose the use of local variables or parameters within the
+ /// default argument expression.
+ class CheckDefaultArgumentVisitor
+ : public StmtVisitor<CheckDefaultArgumentVisitor, bool> {
+ Expr *DefaultArg;
+ Sema *S;
+
+ public:
+ CheckDefaultArgumentVisitor(Expr *defarg, Sema *s)
+ : DefaultArg(defarg), S(s) {}
+
+ bool VisitExpr(Expr *Node);
+ bool VisitDeclRefExpr(DeclRefExpr *DRE);
+ bool VisitCXXThisExpr(CXXThisExpr *ThisE);
+ bool VisitLambdaExpr(LambdaExpr *Lambda);
+ bool VisitPseudoObjectExpr(PseudoObjectExpr *POE);
+ };
+
+ /// VisitExpr - Visit all of the children of this expression.
+ bool CheckDefaultArgumentVisitor::VisitExpr(Expr *Node) {
+ bool IsInvalid = false;
+ for (Stmt *SubStmt : Node->children())
+ IsInvalid |= Visit(SubStmt);
+ return IsInvalid;
+ }
+
+ /// VisitDeclRefExpr - Visit a reference to a declaration, to
+ /// determine whether this declaration can be used in the default
+ /// argument expression.
+ bool CheckDefaultArgumentVisitor::VisitDeclRefExpr(DeclRefExpr *DRE) {
+ NamedDecl *Decl = DRE->getDecl();
+ if (ParmVarDecl *Param = dyn_cast<ParmVarDecl>(Decl)) {
+ // C++ [dcl.fct.default]p9
+ // Default arguments are evaluated each time the function is
+ // called. The order of evaluation of function arguments is
+ // unspecified. Consequently, parameters of a function shall not
+ // be used in default argument expressions, even if they are not
+ // evaluated. Parameters of a function declared before a default
+ // argument expression are in scope and can hide namespace and
+ // class member names.
+ return S->Diag(DRE->getLocStart(),
+ diag::err_param_default_argument_references_param)
+ << Param->getDeclName() << DefaultArg->getSourceRange();
+ } else if (VarDecl *VDecl = dyn_cast<VarDecl>(Decl)) {
+ // C++ [dcl.fct.default]p7
+ // Local variables shall not be used in default argument
+ // expressions.
+ if (VDecl->isLocalVarDecl())
+ return S->Diag(DRE->getLocStart(),
+ diag::err_param_default_argument_references_local)
+ << VDecl->getDeclName() << DefaultArg->getSourceRange();
+ }
+
+ return false;
+ }
+
+ /// VisitCXXThisExpr - Visit a C++ "this" expression.
+ bool CheckDefaultArgumentVisitor::VisitCXXThisExpr(CXXThisExpr *ThisE) {
+ // C++ [dcl.fct.default]p8:
+ // The keyword this shall not be used in a default argument of a
+ // member function.
+ return S->Diag(ThisE->getLocStart(),
+ diag::err_param_default_argument_references_this)
+ << ThisE->getSourceRange();
+ }
+
+ bool CheckDefaultArgumentVisitor::VisitPseudoObjectExpr(PseudoObjectExpr *POE) {
+ bool Invalid = false;
+ for (PseudoObjectExpr::semantics_iterator
+ i = POE->semantics_begin(), e = POE->semantics_end(); i != e; ++i) {
+ Expr *E = *i;
+
+ // Look through bindings.
+ if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E)) {
+ E = OVE->getSourceExpr();
+ assert(E && "pseudo-object binding without source expression?");
+ }
+
+ Invalid |= Visit(E);
+ }
+ return Invalid;
+ }
+
+ bool CheckDefaultArgumentVisitor::VisitLambdaExpr(LambdaExpr *Lambda) {
+ // C++11 [expr.lambda.prim]p13:
+ // A lambda-expression appearing in a default argument shall not
+ // implicitly or explicitly capture any entity.
+ if (Lambda->capture_begin() == Lambda->capture_end())
+ return false;
+
+ return S->Diag(Lambda->getLocStart(),
+ diag::err_lambda_capture_default_arg);
+ }
+}
+
+void
+Sema::ImplicitExceptionSpecification::CalledDecl(SourceLocation CallLoc,
+ const CXXMethodDecl *Method) {
+ // If we have an MSAny spec already, don't bother.
+ if (!Method || ComputedEST == EST_MSAny)
+ return;
+
+ const FunctionProtoType *Proto
+ = Method->getType()->getAs<FunctionProtoType>();
+ Proto = Self->ResolveExceptionSpec(CallLoc, Proto);
+ if (!Proto)
+ return;
+
+ ExceptionSpecificationType EST = Proto->getExceptionSpecType();
+
+ // If we have a throw-all spec at this point, ignore the function.
+ if (ComputedEST == EST_None)
+ return;
+
+ switch(EST) {
+ // If this function can throw any exceptions, make a note of that.
+ case EST_MSAny:
+ case EST_None:
+ ClearExceptions();
+ ComputedEST = EST;
+ return;
+ // FIXME: If the call to this decl is using any of its default arguments, we
+ // need to search them for potentially-throwing calls.
+ // If this function has a basic noexcept, it doesn't affect the outcome.
+ case EST_BasicNoexcept:
+ return;
+ // If we're still at noexcept(true) and there's a nothrow() callee,
+ // change to that specification.
+ case EST_DynamicNone:
+ if (ComputedEST == EST_BasicNoexcept)
+ ComputedEST = EST_DynamicNone;
+ return;
+ // Check out noexcept specs.
+ case EST_ComputedNoexcept:
+ {
+ FunctionProtoType::NoexceptResult NR =
+ Proto->getNoexceptSpec(Self->Context);
+ assert(NR != FunctionProtoType::NR_NoNoexcept &&
+ "Must have noexcept result for EST_ComputedNoexcept.");
+ assert(NR != FunctionProtoType::NR_Dependent &&
+ "Should not generate implicit declarations for dependent cases, "
+ "and don't know how to handle them anyway.");
+ // noexcept(false) -> no spec on the new function
+ if (NR == FunctionProtoType::NR_Throw) {
+ ClearExceptions();
+ ComputedEST = EST_None;
+ }
+ // noexcept(true) won't change anything either.
+ return;
+ }
+ default:
+ break;
+ }
+ assert(EST == EST_Dynamic && "EST case not considered earlier.");
+ assert(ComputedEST != EST_None &&
+ "Shouldn't collect exceptions when throw-all is guaranteed.");
+ ComputedEST = EST_Dynamic;
+ // Record the exceptions in this function's exception specification.
+ for (const auto &E : Proto->exceptions())
+ if (ExceptionsSeen.insert(Self->Context.getCanonicalType(E)).second)
+ Exceptions.push_back(E);
+}
+
+void Sema::ImplicitExceptionSpecification::CalledExpr(Expr *E) {
+ if (!E || ComputedEST == EST_MSAny)
+ return;
+
+ // FIXME:
+ //
+ // C++0x [except.spec]p14:
+ // [An] implicit exception-specification specifies the type-id T if and
+ // only if T is allowed by the exception-specification of a function directly
+ // invoked by f's implicit definition; f shall allow all exceptions if any
+ // function it directly invokes allows all exceptions, and f shall allow no
+ // exceptions if every function it directly invokes allows no exceptions.
+ //
+ // Note in particular that if an implicit exception-specification is generated
+ // for a function containing a throw-expression, that specification can still
+ // be noexcept(true).
+ //
+ // Note also that 'directly invoked' is not defined in the standard, and there
+ // is no indication that we should only consider potentially-evaluated calls.
+ //
+ // Ultimately we should implement the intent of the standard: the exception
+ // specification should be the set of exceptions which can be thrown by the
+ // implicit definition. For now, we assume that any non-nothrow expression can
+ // throw any exception.
+
+ if (Self->canThrow(E))
+ ComputedEST = EST_None;
+}
+
+bool
+Sema::SetParamDefaultArgument(ParmVarDecl *Param, Expr *Arg,
+ SourceLocation EqualLoc) {
+ if (RequireCompleteType(Param->getLocation(), Param->getType(),
+ diag::err_typecheck_decl_incomplete_type)) {
+ Param->setInvalidDecl();
+ return true;
+ }
+
+ // C++ [dcl.fct.default]p5
+ // A default argument expression is implicitly converted (clause
+ // 4) to the parameter type. The default argument expression has
+ // the same semantic constraints as the initializer expression in
+ // a declaration of a variable of the parameter type, using the
+ // copy-initialization semantics (8.5).
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
+ Param);
+ InitializationKind Kind = InitializationKind::CreateCopy(Param->getLocation(),
+ EqualLoc);
+ InitializationSequence InitSeq(*this, Entity, Kind, Arg);
+ ExprResult Result = InitSeq.Perform(*this, Entity, Kind, Arg);
+ if (Result.isInvalid())
+ return true;
+ Arg = Result.getAs<Expr>();
+
+ CheckCompletedExpr(Arg, EqualLoc);
+ Arg = MaybeCreateExprWithCleanups(Arg);
+
+ // Okay: add the default argument to the parameter
+ Param->setDefaultArg(Arg);
+
+ // We have already instantiated this parameter; provide each of the
+ // instantiations with the uninstantiated default argument.
+ UnparsedDefaultArgInstantiationsMap::iterator InstPos
+ = UnparsedDefaultArgInstantiations.find(Param);
+ if (InstPos != UnparsedDefaultArgInstantiations.end()) {
+ for (unsigned I = 0, N = InstPos->second.size(); I != N; ++I)
+ InstPos->second[I]->setUninstantiatedDefaultArg(Arg);
+
+ // We're done tracking this parameter's instantiations.
+ UnparsedDefaultArgInstantiations.erase(InstPos);
+ }
+
+ return false;
+}
+
+/// ActOnParamDefaultArgument - Check whether the default argument
+/// provided for a function parameter is well-formed. If so, attach it
+/// to the parameter declaration.
+void
+Sema::ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc,
+ Expr *DefaultArg) {
+ if (!param || !DefaultArg)
+ return;
+
+ ParmVarDecl *Param = cast<ParmVarDecl>(param);
+ UnparsedDefaultArgLocs.erase(Param);
+
+ // Default arguments are only permitted in C++
+ if (!getLangOpts().CPlusPlus) {
+ Diag(EqualLoc, diag::err_param_default_argument)
+ << DefaultArg->getSourceRange();
+ Param->setInvalidDecl();
+ return;
+ }
+
+ // Check for unexpanded parameter packs.
+ if (DiagnoseUnexpandedParameterPack(DefaultArg, UPPC_DefaultArgument)) {
+ Param->setInvalidDecl();
+ return;
+ }
+
+ // C++11 [dcl.fct.default]p3
+ // A default argument expression [...] shall not be specified for a
+ // parameter pack.
+ if (Param->isParameterPack()) {
+ Diag(EqualLoc, diag::err_param_default_argument_on_parameter_pack)
+ << DefaultArg->getSourceRange();
+ return;
+ }
+
+ // Check that the default argument is well-formed
+ CheckDefaultArgumentVisitor DefaultArgChecker(DefaultArg, this);
+ if (DefaultArgChecker.Visit(DefaultArg)) {
+ Param->setInvalidDecl();
+ return;
+ }
+
+ SetParamDefaultArgument(Param, DefaultArg, EqualLoc);
+}
+
+/// ActOnParamUnparsedDefaultArgument - We've seen a default
+/// argument for a function parameter, but we can't parse it yet
+/// because we're inside a class definition. Note that this default
+/// argument will be parsed later.
+void Sema::ActOnParamUnparsedDefaultArgument(Decl *param,
+ SourceLocation EqualLoc,
+ SourceLocation ArgLoc) {
+ if (!param)
+ return;
+
+ ParmVarDecl *Param = cast<ParmVarDecl>(param);
+ Param->setUnparsedDefaultArg();
+ UnparsedDefaultArgLocs[Param] = ArgLoc;
+}
+
+/// ActOnParamDefaultArgumentError - Parsing or semantic analysis of
+/// the default argument for the parameter param failed.
+void Sema::ActOnParamDefaultArgumentError(Decl *param,
+ SourceLocation EqualLoc) {
+ if (!param)
+ return;
+
+ ParmVarDecl *Param = cast<ParmVarDecl>(param);
+ Param->setInvalidDecl();
+ UnparsedDefaultArgLocs.erase(Param);
+ Param->setDefaultArg(new(Context)
+ OpaqueValueExpr(EqualLoc,
+ Param->getType().getNonReferenceType(),
+ VK_RValue));
+}
+
+/// CheckExtraCXXDefaultArguments - Check for any extra default
+/// arguments in the declarator, which is not a function declaration
+/// or definition and therefore is not permitted to have default
+/// arguments. This routine should be invoked for every declarator
+/// that is not a function declaration or definition.
+void Sema::CheckExtraCXXDefaultArguments(Declarator &D) {
+ // C++ [dcl.fct.default]p3
+ // A default argument expression shall be specified only in the
+ // parameter-declaration-clause of a function declaration or in a
+ // template-parameter (14.1). It shall not be specified for a
+ // parameter pack. If it is specified in a
+ // parameter-declaration-clause, it shall not occur within a
+ // declarator or abstract-declarator of a parameter-declaration.
+ bool MightBeFunction = D.isFunctionDeclarationContext();
+ for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
+ DeclaratorChunk &chunk = D.getTypeObject(i);
+ if (chunk.Kind == DeclaratorChunk::Function) {
+ if (MightBeFunction) {
+ // This is a function declaration. It can have default arguments, but
+ // keep looking in case its return type is a function type with default
+ // arguments.
+ MightBeFunction = false;
+ continue;
+ }
+ for (unsigned argIdx = 0, e = chunk.Fun.NumParams; argIdx != e;
+ ++argIdx) {
+ ParmVarDecl *Param = cast<ParmVarDecl>(chunk.Fun.Params[argIdx].Param);
+ if (Param->hasUnparsedDefaultArg()) {
+ CachedTokens *Toks = chunk.Fun.Params[argIdx].DefaultArgTokens;
+ SourceRange SR;
+ if (Toks->size() > 1)
+ SR = SourceRange((*Toks)[1].getLocation(),
+ Toks->back().getLocation());
+ else
+ SR = UnparsedDefaultArgLocs[Param];
+ Diag(Param->getLocation(), diag::err_param_default_argument_nonfunc)
+ << SR;
+ delete Toks;
+ chunk.Fun.Params[argIdx].DefaultArgTokens = nullptr;
+ } else if (Param->getDefaultArg()) {
+ Diag(Param->getLocation(), diag::err_param_default_argument_nonfunc)
+ << Param->getDefaultArg()->getSourceRange();
+ Param->setDefaultArg(nullptr);
+ }
+ }
+ } else if (chunk.Kind != DeclaratorChunk::Paren) {
+ MightBeFunction = false;
+ }
+ }
+}
+
+static bool functionDeclHasDefaultArgument(const FunctionDecl *FD) {
+ for (unsigned NumParams = FD->getNumParams(); NumParams > 0; --NumParams) {
+ const ParmVarDecl *PVD = FD->getParamDecl(NumParams-1);
+ if (!PVD->hasDefaultArg())
+ return false;
+ if (!PVD->hasInheritedDefaultArg())
+ return true;
+ }
+ return false;
+}
+
+/// MergeCXXFunctionDecl - Merge two declarations of the same C++
+/// function, once we already know that they have the same
+/// type. Subroutine of MergeFunctionDecl. Returns true if there was an
+/// error, false otherwise.
+bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old,
+ Scope *S) {
+ bool Invalid = false;
+
+ // The declaration context corresponding to the scope is the semantic
+ // parent, unless this is a local function declaration, in which case
+ // it is that surrounding function.
+ DeclContext *ScopeDC = New->isLocalExternDecl()
+ ? New->getLexicalDeclContext()
+ : New->getDeclContext();
+
+ // Find the previous declaration for the purpose of default arguments.
+ FunctionDecl *PrevForDefaultArgs = Old;
+ for (/**/; PrevForDefaultArgs;
+ // Don't bother looking back past the latest decl if this is a local
+ // extern declaration; nothing else could work.
+ PrevForDefaultArgs = New->isLocalExternDecl()
+ ? nullptr
+ : PrevForDefaultArgs->getPreviousDecl()) {
+ // Ignore hidden declarations.
+ if (!LookupResult::isVisible(*this, PrevForDefaultArgs))
+ continue;
+
+ if (S && !isDeclInScope(PrevForDefaultArgs, ScopeDC, S) &&
+ !New->isCXXClassMember()) {
+ // Ignore default arguments of old decl if they are not in
+ // the same scope and this is not an out-of-line definition of
+ // a member function.
+ continue;
+ }
+
+ if (PrevForDefaultArgs->isLocalExternDecl() != New->isLocalExternDecl()) {
+ // If only one of these is a local function declaration, then they are
+ // declared in different scopes, even though isDeclInScope may think
+ // they're in the same scope. (If both are local, the scope check is
+ // sufficent, and if neither is local, then they are in the same scope.)
+ continue;
+ }
+
+ // We found our guy.
+ break;
+ }
+
+ // C++ [dcl.fct.default]p4:
+ // For non-template functions, default arguments can be added in
+ // later declarations of a function in the same
+ // scope. Declarations in different scopes have completely
+ // distinct sets of default arguments. That is, declarations in
+ // inner scopes do not acquire default arguments from
+ // declarations in outer scopes, and vice versa. In a given
+ // function declaration, all parameters subsequent to a
+ // parameter with a default argument shall have default
+ // arguments supplied in this or previous declarations. A
+ // default argument shall not be redefined by a later
+ // declaration (not even to the same value).
+ //
+ // C++ [dcl.fct.default]p6:
+ // Except for member functions of class templates, the default arguments
+ // in a member function definition that appears outside of the class
+ // definition are added to the set of default arguments provided by the
+ // member function declaration in the class definition.
+ for (unsigned p = 0, NumParams = PrevForDefaultArgs
+ ? PrevForDefaultArgs->getNumParams()
+ : 0;
+ p < NumParams; ++p) {
+ ParmVarDecl *OldParam = PrevForDefaultArgs->getParamDecl(p);
+ ParmVarDecl *NewParam = New->getParamDecl(p);
+
+ bool OldParamHasDfl = OldParam ? OldParam->hasDefaultArg() : false;
+ bool NewParamHasDfl = NewParam->hasDefaultArg();
+
+ if (OldParamHasDfl && NewParamHasDfl) {
+ unsigned DiagDefaultParamID =
+ diag::err_param_default_argument_redefinition;
+
+ // MSVC accepts that default parameters be redefined for member functions
+ // of template class. The new default parameter's value is ignored.
+ Invalid = true;
+ if (getLangOpts().MicrosoftExt) {
+ CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(New);
+ if (MD && MD->getParent()->getDescribedClassTemplate()) {
+ // Merge the old default argument into the new parameter.
+ NewParam->setHasInheritedDefaultArg();
+ if (OldParam->hasUninstantiatedDefaultArg())
+ NewParam->setUninstantiatedDefaultArg(
+ OldParam->getUninstantiatedDefaultArg());
+ else
+ NewParam->setDefaultArg(OldParam->getInit());
+ DiagDefaultParamID = diag::ext_param_default_argument_redefinition;
+ Invalid = false;
+ }
+ }
+
+ // FIXME: If we knew where the '=' was, we could easily provide a fix-it
+ // hint here. Alternatively, we could walk the type-source information
+ // for NewParam to find the last source location in the type... but it
+ // isn't worth the effort right now. This is the kind of test case that
+ // is hard to get right:
+ // int f(int);
+ // void g(int (*fp)(int) = f);
+ // void g(int (*fp)(int) = &f);
+ Diag(NewParam->getLocation(), DiagDefaultParamID)
+ << NewParam->getDefaultArgRange();
+
+ // Look for the function declaration where the default argument was
+ // actually written, which may be a declaration prior to Old.
+ for (auto Older = PrevForDefaultArgs;
+ OldParam->hasInheritedDefaultArg(); /**/) {
+ Older = Older->getPreviousDecl();
+ OldParam = Older->getParamDecl(p);
+ }
+
+ Diag(OldParam->getLocation(), diag::note_previous_definition)
+ << OldParam->getDefaultArgRange();
+ } else if (OldParamHasDfl) {
+ // Merge the old default argument into the new parameter.
+ // It's important to use getInit() here; getDefaultArg()
+ // strips off any top-level ExprWithCleanups.
+ NewParam->setHasInheritedDefaultArg();
+ if (OldParam->hasUnparsedDefaultArg())
+ NewParam->setUnparsedDefaultArg();
+ else if (OldParam->hasUninstantiatedDefaultArg())
+ NewParam->setUninstantiatedDefaultArg(
+ OldParam->getUninstantiatedDefaultArg());
+ else
+ NewParam->setDefaultArg(OldParam->getInit());
+ } else if (NewParamHasDfl) {
+ if (New->getDescribedFunctionTemplate()) {
+ // Paragraph 4, quoted above, only applies to non-template functions.
+ Diag(NewParam->getLocation(),
+ diag::err_param_default_argument_template_redecl)
+ << NewParam->getDefaultArgRange();
+ Diag(PrevForDefaultArgs->getLocation(),
+ diag::note_template_prev_declaration)
+ << false;
+ } else if (New->getTemplateSpecializationKind()
+ != TSK_ImplicitInstantiation &&
+ New->getTemplateSpecializationKind() != TSK_Undeclared) {
+ // C++ [temp.expr.spec]p21:
+ // Default function arguments shall not be specified in a declaration
+ // or a definition for one of the following explicit specializations:
+ // - the explicit specialization of a function template;
+ // - the explicit specialization of a member function template;
+ // - the explicit specialization of a member function of a class
+ // template where the class template specialization to which the
+ // member function specialization belongs is implicitly
+ // instantiated.
+ Diag(NewParam->getLocation(), diag::err_template_spec_default_arg)
+ << (New->getTemplateSpecializationKind() ==TSK_ExplicitSpecialization)
+ << New->getDeclName()
+ << NewParam->getDefaultArgRange();
+ } else if (New->getDeclContext()->isDependentContext()) {
+ // C++ [dcl.fct.default]p6 (DR217):
+ // Default arguments for a member function of a class template shall
+ // be specified on the initial declaration of the member function
+ // within the class template.
+ //
+ // Reading the tea leaves a bit in DR217 and its reference to DR205
+ // leads me to the conclusion that one cannot add default function
+ // arguments for an out-of-line definition of a member function of a
+ // dependent type.
+ int WhichKind = 2;
+ if (CXXRecordDecl *Record
+ = dyn_cast<CXXRecordDecl>(New->getDeclContext())) {
+ if (Record->getDescribedClassTemplate())
+ WhichKind = 0;
+ else if (isa<ClassTemplatePartialSpecializationDecl>(Record))
+ WhichKind = 1;
+ else
+ WhichKind = 2;
+ }
+
+ Diag(NewParam->getLocation(),
+ diag::err_param_default_argument_member_template_redecl)
+ << WhichKind
+ << NewParam->getDefaultArgRange();
+ }
+ }
+ }
+
+ // DR1344: If a default argument is added outside a class definition and that
+ // default argument makes the function a special member function, the program
+ // is ill-formed. This can only happen for constructors.
+ if (isa<CXXConstructorDecl>(New) &&
+ New->getMinRequiredArguments() < Old->getMinRequiredArguments()) {
+ CXXSpecialMember NewSM = getSpecialMember(cast<CXXMethodDecl>(New)),
+ OldSM = getSpecialMember(cast<CXXMethodDecl>(Old));
+ if (NewSM != OldSM) {
+ ParmVarDecl *NewParam = New->getParamDecl(New->getMinRequiredArguments());
+ assert(NewParam->hasDefaultArg());
+ Diag(NewParam->getLocation(), diag::err_default_arg_makes_ctor_special)
+ << NewParam->getDefaultArgRange() << NewSM;
+ Diag(Old->getLocation(), diag::note_previous_declaration);
+ }
+ }
+
+ const FunctionDecl *Def;
+ // C++11 [dcl.constexpr]p1: If any declaration of a function or function
+ // template has a constexpr specifier then all its declarations shall
+ // contain the constexpr specifier.
+ if (New->isConstexpr() != Old->isConstexpr()) {
+ Diag(New->getLocation(), diag::err_constexpr_redecl_mismatch)
+ << New << New->isConstexpr();
+ Diag(Old->getLocation(), diag::note_previous_declaration);
+ Invalid = true;
+ } else if (!Old->getMostRecentDecl()->isInlined() && New->isInlined() &&
+ Old->isDefined(Def)) {
+ // C++11 [dcl.fcn.spec]p4:
+ // If the definition of a function appears in a translation unit before its
+ // first declaration as inline, the program is ill-formed.
+ Diag(New->getLocation(), diag::err_inline_decl_follows_def) << New;
+ Diag(Def->getLocation(), diag::note_previous_definition);
+ Invalid = true;
+ }
+
+ // C++11 [dcl.fct.default]p4: If a friend declaration specifies a default
+ // argument expression, that declaration shall be a definition and shall be
+ // the only declaration of the function or function template in the
+ // translation unit.
+ if (Old->getFriendObjectKind() == Decl::FOK_Undeclared &&
+ functionDeclHasDefaultArgument(Old)) {
+ Diag(New->getLocation(), diag::err_friend_decl_with_def_arg_redeclared);
+ Diag(Old->getLocation(), diag::note_previous_declaration);
+ Invalid = true;
+ }
+
+ if (CheckEquivalentExceptionSpec(Old, New))
+ Invalid = true;
+
+ return Invalid;
+}
+
+/// \brief Merge the exception specifications of two variable declarations.
+///
+/// This is called when there's a redeclaration of a VarDecl. The function
+/// checks if the redeclaration might have an exception specification and
+/// validates compatibility and merges the specs if necessary.
+void Sema::MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old) {
+ // Shortcut if exceptions are disabled.
+ if (!getLangOpts().CXXExceptions)
+ return;
+
+ assert(Context.hasSameType(New->getType(), Old->getType()) &&
+ "Should only be called if types are otherwise the same.");
+
+ QualType NewType = New->getType();
+ QualType OldType = Old->getType();
+
+ // We're only interested in pointers and references to functions, as well
+ // as pointers to member functions.
+ if (const ReferenceType *R = NewType->getAs<ReferenceType>()) {
+ NewType = R->getPointeeType();
+ OldType = OldType->getAs<ReferenceType>()->getPointeeType();
+ } else if (const PointerType *P = NewType->getAs<PointerType>()) {
+ NewType = P->getPointeeType();
+ OldType = OldType->getAs<PointerType>()->getPointeeType();
+ } else if (const MemberPointerType *M = NewType->getAs<MemberPointerType>()) {
+ NewType = M->getPointeeType();
+ OldType = OldType->getAs<MemberPointerType>()->getPointeeType();
+ }
+
+ if (!NewType->isFunctionProtoType())
+ return;
+
+ // There's lots of special cases for functions. For function pointers, system
+ // libraries are hopefully not as broken so that we don't need these
+ // workarounds.
+ if (CheckEquivalentExceptionSpec(
+ OldType->getAs<FunctionProtoType>(), Old->getLocation(),
+ NewType->getAs<FunctionProtoType>(), New->getLocation())) {
+ New->setInvalidDecl();
+ }
+}
+
+/// CheckCXXDefaultArguments - Verify that the default arguments for a
+/// function declaration are well-formed according to C++
+/// [dcl.fct.default].
+void Sema::CheckCXXDefaultArguments(FunctionDecl *FD) {
+ unsigned NumParams = FD->getNumParams();
+ unsigned p;
+
+ // Find first parameter with a default argument
+ for (p = 0; p < NumParams; ++p) {
+ ParmVarDecl *Param = FD->getParamDecl(p);
+ if (Param->hasDefaultArg())
+ break;
+ }
+
+ // C++11 [dcl.fct.default]p4:
+ // In a given function declaration, each parameter subsequent to a parameter
+ // with a default argument shall have a default argument supplied in this or
+ // a previous declaration or shall be a function parameter pack. A default
+ // argument shall not be redefined by a later declaration (not even to the
+ // same value).
+ unsigned LastMissingDefaultArg = 0;
+ for (; p < NumParams; ++p) {
+ ParmVarDecl *Param = FD->getParamDecl(p);
+ if (!Param->hasDefaultArg() && !Param->isParameterPack()) {
+ if (Param->isInvalidDecl())
+ /* We already complained about this parameter. */;
+ else if (Param->getIdentifier())
+ Diag(Param->getLocation(),
+ diag::err_param_default_argument_missing_name)
+ << Param->getIdentifier();
+ else
+ Diag(Param->getLocation(),
+ diag::err_param_default_argument_missing);
+
+ LastMissingDefaultArg = p;
+ }
+ }
+
+ if (LastMissingDefaultArg > 0) {
+ // Some default arguments were missing. Clear out all of the
+ // default arguments up to (and including) the last missing
+ // default argument, so that we leave the function parameters
+ // in a semantically valid state.
+ for (p = 0; p <= LastMissingDefaultArg; ++p) {
+ ParmVarDecl *Param = FD->getParamDecl(p);
+ if (Param->hasDefaultArg()) {
+ Param->setDefaultArg(nullptr);
+ }
+ }
+ }
+}
+
+// CheckConstexprParameterTypes - Check whether a function's parameter types
+// are all literal types. If so, return true. If not, produce a suitable
+// diagnostic and return false.
+static bool CheckConstexprParameterTypes(Sema &SemaRef,
+ const FunctionDecl *FD) {
+ unsigned ArgIndex = 0;
+ const FunctionProtoType *FT = FD->getType()->getAs<FunctionProtoType>();
+ for (FunctionProtoType::param_type_iterator i = FT->param_type_begin(),
+ e = FT->param_type_end();
+ i != e; ++i, ++ArgIndex) {
+ const ParmVarDecl *PD = FD->getParamDecl(ArgIndex);
+ SourceLocation ParamLoc = PD->getLocation();
+ if (!(*i)->isDependentType() &&
+ SemaRef.RequireLiteralType(ParamLoc, *i,
+ diag::err_constexpr_non_literal_param,
+ ArgIndex+1, PD->getSourceRange(),
+ isa<CXXConstructorDecl>(FD)))
+ return false;
+ }
+ return true;
+}
+
+/// \brief Get diagnostic %select index for tag kind for
+/// record diagnostic message.
+/// WARNING: Indexes apply to particular diagnostics only!
+///
+/// \returns diagnostic %select index.
+static unsigned getRecordDiagFromTagKind(TagTypeKind Tag) {
+ switch (Tag) {
+ case TTK_Struct: return 0;
+ case TTK_Interface: return 1;
+ case TTK_Class: return 2;
+ default: llvm_unreachable("Invalid tag kind for record diagnostic!");
+ }
+}
+
+// CheckConstexprFunctionDecl - Check whether a function declaration satisfies
+// the requirements of a constexpr function definition or a constexpr
+// constructor definition. If so, return true. If not, produce appropriate
+// diagnostics and return false.
+//
+// This implements C++11 [dcl.constexpr]p3,4, as amended by DR1360.
+bool Sema::CheckConstexprFunctionDecl(const FunctionDecl *NewFD) {
+ const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewFD);
+ if (MD && MD->isInstance()) {
+ // C++11 [dcl.constexpr]p4:
+ // The definition of a constexpr constructor shall satisfy the following
+ // constraints:
+ // - the class shall not have any virtual base classes;
+ const CXXRecordDecl *RD = MD->getParent();
+ if (RD->getNumVBases()) {
+ Diag(NewFD->getLocation(), diag::err_constexpr_virtual_base)
+ << isa<CXXConstructorDecl>(NewFD)
+ << getRecordDiagFromTagKind(RD->getTagKind()) << RD->getNumVBases();
+ for (const auto &I : RD->vbases())
+ Diag(I.getLocStart(),
+ diag::note_constexpr_virtual_base_here) << I.getSourceRange();
+ return false;
+ }
+ }
+
+ if (!isa<CXXConstructorDecl>(NewFD)) {
+ // C++11 [dcl.constexpr]p3:
+ // The definition of a constexpr function shall satisfy the following
+ // constraints:
+ // - it shall not be virtual;
+ const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(NewFD);
+ if (Method && Method->isVirtual()) {
+ Method = Method->getCanonicalDecl();
+ Diag(Method->getLocation(), diag::err_constexpr_virtual);
+
+ // If it's not obvious why this function is virtual, find an overridden
+ // function which uses the 'virtual' keyword.
+ const CXXMethodDecl *WrittenVirtual = Method;
+ while (!WrittenVirtual->isVirtualAsWritten())
+ WrittenVirtual = *WrittenVirtual->begin_overridden_methods();
+ if (WrittenVirtual != Method)
+ Diag(WrittenVirtual->getLocation(),
+ diag::note_overridden_virtual_function);
+ return false;
+ }
+
+ // - its return type shall be a literal type;
+ QualType RT = NewFD->getReturnType();
+ if (!RT->isDependentType() &&
+ RequireLiteralType(NewFD->getLocation(), RT,
+ diag::err_constexpr_non_literal_return))
+ return false;
+ }
+
+ // - each of its parameter types shall be a literal type;
+ if (!CheckConstexprParameterTypes(*this, NewFD))
+ return false;
+
+ return true;
+}
+
+/// Check the given declaration statement is legal within a constexpr function
+/// body. C++11 [dcl.constexpr]p3,p4, and C++1y [dcl.constexpr]p3.
+///
+/// \return true if the body is OK (maybe only as an extension), false if we
+/// have diagnosed a problem.
+static bool CheckConstexprDeclStmt(Sema &SemaRef, const FunctionDecl *Dcl,
+ DeclStmt *DS, SourceLocation &Cxx1yLoc) {
+ // C++11 [dcl.constexpr]p3 and p4:
+ // The definition of a constexpr function(p3) or constructor(p4) [...] shall
+ // contain only
+ for (const auto *DclIt : DS->decls()) {
+ switch (DclIt->getKind()) {
+ case Decl::StaticAssert:
+ case Decl::Using:
+ case Decl::UsingShadow:
+ case Decl::UsingDirective:
+ case Decl::UnresolvedUsingTypename:
+ case Decl::UnresolvedUsingValue:
+ // - static_assert-declarations
+ // - using-declarations,
+ // - using-directives,
+ continue;
+
+ case Decl::Typedef:
+ case Decl::TypeAlias: {
+ // - typedef declarations and alias-declarations that do not define
+ // classes or enumerations,
+ const auto *TN = cast<TypedefNameDecl>(DclIt);
+ if (TN->getUnderlyingType()->isVariablyModifiedType()) {
+ // Don't allow variably-modified types in constexpr functions.
+ TypeLoc TL = TN->getTypeSourceInfo()->getTypeLoc();
+ SemaRef.Diag(TL.getBeginLoc(), diag::err_constexpr_vla)
+ << TL.getSourceRange() << TL.getType()
+ << isa<CXXConstructorDecl>(Dcl);
+ return false;
+ }
+ continue;
+ }
+
+ case Decl::Enum:
+ case Decl::CXXRecord:
+ // C++1y allows types to be defined, not just declared.
+ if (cast<TagDecl>(DclIt)->isThisDeclarationADefinition())
+ SemaRef.Diag(DS->getLocStart(),
+ SemaRef.getLangOpts().CPlusPlus14
+ ? diag::warn_cxx11_compat_constexpr_type_definition
+ : diag::ext_constexpr_type_definition)
+ << isa<CXXConstructorDecl>(Dcl);
+ continue;
+
+ case Decl::EnumConstant:
+ case Decl::IndirectField:
+ case Decl::ParmVar:
+ // These can only appear with other declarations which are banned in
+ // C++11 and permitted in C++1y, so ignore them.
+ continue;
+
+ case Decl::Var: {
+ // C++1y [dcl.constexpr]p3 allows anything except:
+ // a definition of a variable of non-literal type or of static or
+ // thread storage duration or for which no initialization is performed.
+ const auto *VD = cast<VarDecl>(DclIt);
+ if (VD->isThisDeclarationADefinition()) {
+ if (VD->isStaticLocal()) {
+ SemaRef.Diag(VD->getLocation(),
+ diag::err_constexpr_local_var_static)
+ << isa<CXXConstructorDecl>(Dcl)
+ << (VD->getTLSKind() == VarDecl::TLS_Dynamic);
+ return false;
+ }
+ if (!VD->getType()->isDependentType() &&
+ SemaRef.RequireLiteralType(
+ VD->getLocation(), VD->getType(),
+ diag::err_constexpr_local_var_non_literal_type,
+ isa<CXXConstructorDecl>(Dcl)))
+ return false;
+ if (!VD->getType()->isDependentType() &&
+ !VD->hasInit() && !VD->isCXXForRangeDecl()) {
+ SemaRef.Diag(VD->getLocation(),
+ diag::err_constexpr_local_var_no_init)
+ << isa<CXXConstructorDecl>(Dcl);
+ return false;
+ }
+ }
+ SemaRef.Diag(VD->getLocation(),
+ SemaRef.getLangOpts().CPlusPlus14
+ ? diag::warn_cxx11_compat_constexpr_local_var
+ : diag::ext_constexpr_local_var)
+ << isa<CXXConstructorDecl>(Dcl);
+ continue;
+ }
+
+ case Decl::NamespaceAlias:
+ case Decl::Function:
+ // These are disallowed in C++11 and permitted in C++1y. Allow them
+ // everywhere as an extension.
+ if (!Cxx1yLoc.isValid())
+ Cxx1yLoc = DS->getLocStart();
+ continue;
+
+ default:
+ SemaRef.Diag(DS->getLocStart(), diag::err_constexpr_body_invalid_stmt)
+ << isa<CXXConstructorDecl>(Dcl);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/// Check that the given field is initialized within a constexpr constructor.
+///
+/// \param Dcl The constexpr constructor being checked.
+/// \param Field The field being checked. This may be a member of an anonymous
+/// struct or union nested within the class being checked.
+/// \param Inits All declarations, including anonymous struct/union members and
+/// indirect members, for which any initialization was provided.
+/// \param Diagnosed Set to true if an error is produced.
+static void CheckConstexprCtorInitializer(Sema &SemaRef,
+ const FunctionDecl *Dcl,
+ FieldDecl *Field,
+ llvm::SmallSet<Decl*, 16> &Inits,
+ bool &Diagnosed) {
+ if (Field->isInvalidDecl())
+ return;
+
+ if (Field->isUnnamedBitfield())
+ return;
+
+ // Anonymous unions with no variant members and empty anonymous structs do not
+ // need to be explicitly initialized. FIXME: Anonymous structs that contain no
+ // indirect fields don't need initializing.
+ if (Field->isAnonymousStructOrUnion() &&
+ (Field->getType()->isUnionType()
+ ? !Field->getType()->getAsCXXRecordDecl()->hasVariantMembers()
+ : Field->getType()->getAsCXXRecordDecl()->isEmpty()))
+ return;
+
+ if (!Inits.count(Field)) {
+ if (!Diagnosed) {
+ SemaRef.Diag(Dcl->getLocation(), diag::err_constexpr_ctor_missing_init);
+ Diagnosed = true;
+ }
+ SemaRef.Diag(Field->getLocation(), diag::note_constexpr_ctor_missing_init);
+ } else if (Field->isAnonymousStructOrUnion()) {
+ const RecordDecl *RD = Field->getType()->castAs<RecordType>()->getDecl();
+ for (auto *I : RD->fields())
+ // If an anonymous union contains an anonymous struct of which any member
+ // is initialized, all members must be initialized.
+ if (!RD->isUnion() || Inits.count(I))
+ CheckConstexprCtorInitializer(SemaRef, Dcl, I, Inits, Diagnosed);
+ }
+}
+
+/// Check the provided statement is allowed in a constexpr function
+/// definition.
+static bool
+CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
+ SmallVectorImpl<SourceLocation> &ReturnStmts,
+ SourceLocation &Cxx1yLoc) {
+ // - its function-body shall be [...] a compound-statement that contains only
+ switch (S->getStmtClass()) {
+ case Stmt::NullStmtClass:
+ // - null statements,
+ return true;
+
+ case Stmt::DeclStmtClass:
+ // - static_assert-declarations
+ // - using-declarations,
+ // - using-directives,
+ // - typedef declarations and alias-declarations that do not define
+ // classes or enumerations,
+ if (!CheckConstexprDeclStmt(SemaRef, Dcl, cast<DeclStmt>(S), Cxx1yLoc))
+ return false;
+ return true;
+
+ case Stmt::ReturnStmtClass:
+ // - and exactly one return statement;
+ if (isa<CXXConstructorDecl>(Dcl)) {
+ // C++1y allows return statements in constexpr constructors.
+ if (!Cxx1yLoc.isValid())
+ Cxx1yLoc = S->getLocStart();
+ return true;
+ }
+
+ ReturnStmts.push_back(S->getLocStart());
+ return true;
+
+ case Stmt::CompoundStmtClass: {
+ // C++1y allows compound-statements.
+ if (!Cxx1yLoc.isValid())
+ Cxx1yLoc = S->getLocStart();
+
+ CompoundStmt *CompStmt = cast<CompoundStmt>(S);
+ for (auto *BodyIt : CompStmt->body()) {
+ if (!CheckConstexprFunctionStmt(SemaRef, Dcl, BodyIt, ReturnStmts,
+ Cxx1yLoc))
+ return false;
+ }
+ return true;
+ }
+
+ case Stmt::AttributedStmtClass:
+ if (!Cxx1yLoc.isValid())
+ Cxx1yLoc = S->getLocStart();
+ return true;
+
+ case Stmt::IfStmtClass: {
+ // C++1y allows if-statements.
+ if (!Cxx1yLoc.isValid())
+ Cxx1yLoc = S->getLocStart();
+
+ IfStmt *If = cast<IfStmt>(S);
+ if (!CheckConstexprFunctionStmt(SemaRef, Dcl, If->getThen(), ReturnStmts,
+ Cxx1yLoc))
+ return false;
+ if (If->getElse() &&
+ !CheckConstexprFunctionStmt(SemaRef, Dcl, If->getElse(), ReturnStmts,
+ Cxx1yLoc))
+ return false;
+ return true;
+ }
+
+ case Stmt::WhileStmtClass:
+ case Stmt::DoStmtClass:
+ case Stmt::ForStmtClass:
+ case Stmt::CXXForRangeStmtClass:
+ case Stmt::ContinueStmtClass:
+ // C++1y allows all of these. We don't allow them as extensions in C++11,
+ // because they don't make sense without variable mutation.
+ if (!SemaRef.getLangOpts().CPlusPlus14)
+ break;
+ if (!Cxx1yLoc.isValid())
+ Cxx1yLoc = S->getLocStart();
+ for (Stmt *SubStmt : S->children())
+ if (SubStmt &&
+ !CheckConstexprFunctionStmt(SemaRef, Dcl, SubStmt, ReturnStmts,
+ Cxx1yLoc))
+ return false;
+ return true;
+
+ case Stmt::SwitchStmtClass:
+ case Stmt::CaseStmtClass:
+ case Stmt::DefaultStmtClass:
+ case Stmt::BreakStmtClass:
+ // C++1y allows switch-statements, and since they don't need variable
+ // mutation, we can reasonably allow them in C++11 as an extension.
+ if (!Cxx1yLoc.isValid())
+ Cxx1yLoc = S->getLocStart();
+ for (Stmt *SubStmt : S->children())
+ if (SubStmt &&
+ !CheckConstexprFunctionStmt(SemaRef, Dcl, SubStmt, ReturnStmts,
+ Cxx1yLoc))
+ return false;
+ return true;
+
+ default:
+ if (!isa<Expr>(S))
+ break;
+
+ // C++1y allows expression-statements.
+ if (!Cxx1yLoc.isValid())
+ Cxx1yLoc = S->getLocStart();
+ return true;
+ }
+
+ SemaRef.Diag(S->getLocStart(), diag::err_constexpr_body_invalid_stmt)
+ << isa<CXXConstructorDecl>(Dcl);
+ return false;
+}
+
+/// Check the body for the given constexpr function declaration only contains
+/// the permitted types of statement. C++11 [dcl.constexpr]p3,p4.
+///
+/// \return true if the body is OK, false if we have diagnosed a problem.
+bool Sema::CheckConstexprFunctionBody(const FunctionDecl *Dcl, Stmt *Body) {
+ if (isa<CXXTryStmt>(Body)) {
+ // C++11 [dcl.constexpr]p3:
+ // The definition of a constexpr function shall satisfy the following
+ // constraints: [...]
+ // - its function-body shall be = delete, = default, or a
+ // compound-statement
+ //
+ // C++11 [dcl.constexpr]p4:
+ // In the definition of a constexpr constructor, [...]
+ // - its function-body shall not be a function-try-block;
+ Diag(Body->getLocStart(), diag::err_constexpr_function_try_block)
+ << isa<CXXConstructorDecl>(Dcl);
+ return false;
+ }
+
+ SmallVector<SourceLocation, 4> ReturnStmts;
+
+ // - its function-body shall be [...] a compound-statement that contains only
+ // [... list of cases ...]
+ CompoundStmt *CompBody = cast<CompoundStmt>(Body);
+ SourceLocation Cxx1yLoc;
+ for (auto *BodyIt : CompBody->body()) {
+ if (!CheckConstexprFunctionStmt(*this, Dcl, BodyIt, ReturnStmts, Cxx1yLoc))
+ return false;
+ }
+
+ if (Cxx1yLoc.isValid())
+ Diag(Cxx1yLoc,
+ getLangOpts().CPlusPlus14
+ ? diag::warn_cxx11_compat_constexpr_body_invalid_stmt
+ : diag::ext_constexpr_body_invalid_stmt)
+ << isa<CXXConstructorDecl>(Dcl);
+
+ if (const CXXConstructorDecl *Constructor
+ = dyn_cast<CXXConstructorDecl>(Dcl)) {
+ const CXXRecordDecl *RD = Constructor->getParent();
+ // DR1359:
+ // - every non-variant non-static data member and base class sub-object
+ // shall be initialized;
+ // DR1460:
+ // - if the class is a union having variant members, exactly one of them
+ // shall be initialized;
+ if (RD->isUnion()) {
+ if (Constructor->getNumCtorInitializers() == 0 &&
+ RD->hasVariantMembers()) {
+ Diag(Dcl->getLocation(), diag::err_constexpr_union_ctor_no_init);
+ return false;
+ }
+ } else if (!Constructor->isDependentContext() &&
+ !Constructor->isDelegatingConstructor()) {
+ assert(RD->getNumVBases() == 0 && "constexpr ctor with virtual bases");
+
+ // Skip detailed checking if we have enough initializers, and we would
+ // allow at most one initializer per member.
+ bool AnyAnonStructUnionMembers = false;
+ unsigned Fields = 0;
+ for (CXXRecordDecl::field_iterator I = RD->field_begin(),
+ E = RD->field_end(); I != E; ++I, ++Fields) {
+ if (I->isAnonymousStructOrUnion()) {
+ AnyAnonStructUnionMembers = true;
+ break;
+ }
+ }
+ // DR1460:
+ // - if the class is a union-like class, but is not a union, for each of
+ // its anonymous union members having variant members, exactly one of
+ // them shall be initialized;
+ if (AnyAnonStructUnionMembers ||
+ Constructor->getNumCtorInitializers() != RD->getNumBases() + Fields) {
+ // Check initialization of non-static data members. Base classes are
+ // always initialized so do not need to be checked. Dependent bases
+ // might not have initializers in the member initializer list.
+ llvm::SmallSet<Decl*, 16> Inits;
+ for (const auto *I: Constructor->inits()) {
+ if (FieldDecl *FD = I->getMember())
+ Inits.insert(FD);
+ else if (IndirectFieldDecl *ID = I->getIndirectMember())
+ Inits.insert(ID->chain_begin(), ID->chain_end());
+ }
+
+ bool Diagnosed = false;
+ for (auto *I : RD->fields())
+ CheckConstexprCtorInitializer(*this, Dcl, I, Inits, Diagnosed);
+ if (Diagnosed)
+ return false;
+ }
+ }
+ } else {
+ if (ReturnStmts.empty()) {
+ // C++1y doesn't require constexpr functions to contain a 'return'
+ // statement. We still do, unless the return type might be void, because
+ // otherwise if there's no return statement, the function cannot
+ // be used in a core constant expression.
+ bool OK = getLangOpts().CPlusPlus14 &&
+ (Dcl->getReturnType()->isVoidType() ||
+ Dcl->getReturnType()->isDependentType());
+ Diag(Dcl->getLocation(),
+ OK ? diag::warn_cxx11_compat_constexpr_body_no_return
+ : diag::err_constexpr_body_no_return);
+ if (!OK)
+ return false;
+ } else if (ReturnStmts.size() > 1) {
+ Diag(ReturnStmts.back(),
+ getLangOpts().CPlusPlus14
+ ? diag::warn_cxx11_compat_constexpr_body_multiple_return
+ : diag::ext_constexpr_body_multiple_return);
+ for (unsigned I = 0; I < ReturnStmts.size() - 1; ++I)
+ Diag(ReturnStmts[I], diag::note_constexpr_body_previous_return);
+ }
+ }
+
+ // C++11 [dcl.constexpr]p5:
+ // if no function argument values exist such that the function invocation
+ // substitution would produce a constant expression, the program is
+ // ill-formed; no diagnostic required.
+ // C++11 [dcl.constexpr]p3:
+ // - every constructor call and implicit conversion used in initializing the
+ // return value shall be one of those allowed in a constant expression.
+ // C++11 [dcl.constexpr]p4:
+ // - every constructor involved in initializing non-static data members and
+ // base class sub-objects shall be a constexpr constructor.
+ SmallVector<PartialDiagnosticAt, 8> Diags;
+ if (!Expr::isPotentialConstantExpr(Dcl, Diags)) {
+ Diag(Dcl->getLocation(), diag::ext_constexpr_function_never_constant_expr)
+ << isa<CXXConstructorDecl>(Dcl);
+ for (size_t I = 0, N = Diags.size(); I != N; ++I)
+ Diag(Diags[I].first, Diags[I].second);
+ // Don't return false here: we allow this for compatibility in
+ // system headers.
+ }
+
+ return true;
+}
+
+/// isCurrentClassName - Determine whether the identifier II is the
+/// name of the class type currently being defined. In the case of
+/// nested classes, this will only return true if II is the name of
+/// the innermost class.
+bool Sema::isCurrentClassName(const IdentifierInfo &II, Scope *,
+ const CXXScopeSpec *SS) {
+ assert(getLangOpts().CPlusPlus && "No class names in C!");
+
+ CXXRecordDecl *CurDecl;
+ if (SS && SS->isSet() && !SS->isInvalid()) {
+ DeclContext *DC = computeDeclContext(*SS, true);
+ CurDecl = dyn_cast_or_null<CXXRecordDecl>(DC);
+ } else
+ CurDecl = dyn_cast_or_null<CXXRecordDecl>(CurContext);
+
+ if (CurDecl && CurDecl->getIdentifier())
+ return &II == CurDecl->getIdentifier();
+ return false;
+}
+
+/// \brief Determine whether the identifier II is a typo for the name of
+/// the class type currently being defined. If so, update it to the identifier
+/// that should have been used.
+bool Sema::isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS) {
+ assert(getLangOpts().CPlusPlus && "No class names in C!");
+
+ if (!getLangOpts().SpellChecking)
+ return false;
+
+ CXXRecordDecl *CurDecl;
+ if (SS && SS->isSet() && !SS->isInvalid()) {
+ DeclContext *DC = computeDeclContext(*SS, true);
+ CurDecl = dyn_cast_or_null<CXXRecordDecl>(DC);
+ } else
+ CurDecl = dyn_cast_or_null<CXXRecordDecl>(CurContext);
+
+ if (CurDecl && CurDecl->getIdentifier() && II != CurDecl->getIdentifier() &&
+ 3 * II->getName().edit_distance(CurDecl->getIdentifier()->getName())
+ < II->getLength()) {
+ II = CurDecl->getIdentifier();
+ return true;
+ }
+
+ return false;
+}
+
+/// \brief Determine whether the given class is a base class of the given
+/// class, including looking at dependent bases.
+static bool findCircularInheritance(const CXXRecordDecl *Class,
+ const CXXRecordDecl *Current) {
+ SmallVector<const CXXRecordDecl*, 8> Queue;
+
+ Class = Class->getCanonicalDecl();
+ while (true) {
+ for (const auto &I : Current->bases()) {
+ CXXRecordDecl *Base = I.getType()->getAsCXXRecordDecl();
+ if (!Base)
+ continue;
+
+ Base = Base->getDefinition();
+ if (!Base)
+ continue;
+
+ if (Base->getCanonicalDecl() == Class)
+ return true;
+
+ Queue.push_back(Base);
+ }
+
+ if (Queue.empty())
+ return false;
+
+ Current = Queue.pop_back_val();
+ }
+
+ return false;
+}
+
+/// \brief Check the validity of a C++ base class specifier.
+///
+/// \returns a new CXXBaseSpecifier if well-formed, emits diagnostics
+/// and returns NULL otherwise.
+CXXBaseSpecifier *
+Sema::CheckBaseSpecifier(CXXRecordDecl *Class,
+ SourceRange SpecifierRange,
+ bool Virtual, AccessSpecifier Access,
+ TypeSourceInfo *TInfo,
+ SourceLocation EllipsisLoc) {
+ QualType BaseType = TInfo->getType();
+
+ // C++ [class.union]p1:
+ // A union shall not have base classes.
+ if (Class->isUnion()) {
+ Diag(Class->getLocation(), diag::err_base_clause_on_union)
+ << SpecifierRange;
+ return nullptr;
+ }
+
+ if (EllipsisLoc.isValid() &&
+ !TInfo->getType()->containsUnexpandedParameterPack()) {
+ Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
+ << TInfo->getTypeLoc().getSourceRange();
+ EllipsisLoc = SourceLocation();
+ }
+
+ SourceLocation BaseLoc = TInfo->getTypeLoc().getBeginLoc();
+
+ if (BaseType->isDependentType()) {
+ // Make sure that we don't have circular inheritance among our dependent
+ // bases. For non-dependent bases, the check for completeness below handles
+ // this.
+ if (CXXRecordDecl *BaseDecl = BaseType->getAsCXXRecordDecl()) {
+ if (BaseDecl->getCanonicalDecl() == Class->getCanonicalDecl() ||
+ ((BaseDecl = BaseDecl->getDefinition()) &&
+ findCircularInheritance(Class, BaseDecl))) {
+ Diag(BaseLoc, diag::err_circular_inheritance)
+ << BaseType << Context.getTypeDeclType(Class);
+
+ if (BaseDecl->getCanonicalDecl() != Class->getCanonicalDecl())
+ Diag(BaseDecl->getLocation(), diag::note_previous_decl)
+ << BaseType;
+
+ return nullptr;
+ }
+ }
+
+ return new (Context) CXXBaseSpecifier(SpecifierRange, Virtual,
+ Class->getTagKind() == TTK_Class,
+ Access, TInfo, EllipsisLoc);
+ }
+
+ // Base specifiers must be record types.
+ if (!BaseType->isRecordType()) {
+ Diag(BaseLoc, diag::err_base_must_be_class) << SpecifierRange;
+ return nullptr;
+ }
+
+ // C++ [class.union]p1:
+ // A union shall not be used as a base class.
+ if (BaseType->isUnionType()) {
+ Diag(BaseLoc, diag::err_union_as_base_class) << SpecifierRange;
+ return nullptr;
+ }
+
+ // For the MS ABI, propagate DLL attributes to base class templates.
+ if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+ if (Attr *ClassAttr = getDLLAttr(Class)) {
+ if (auto *BaseTemplate = dyn_cast_or_null<ClassTemplateSpecializationDecl>(
+ BaseType->getAsCXXRecordDecl())) {
+ propagateDLLAttrToBaseClassTemplate(Class, ClassAttr, BaseTemplate,
+ BaseLoc);
+ }
+ }
+ }
+
+ // C++ [class.derived]p2:
+ // The class-name in a base-specifier shall not be an incompletely
+ // defined class.
+ if (RequireCompleteType(BaseLoc, BaseType,
+ diag::err_incomplete_base_class, SpecifierRange)) {
+ Class->setInvalidDecl();
+ return nullptr;
+ }
+
+ // If the base class is polymorphic or isn't empty, the new one is/isn't, too.
+ RecordDecl *BaseDecl = BaseType->getAs<RecordType>()->getDecl();
+ assert(BaseDecl && "Record type has no declaration");
+ BaseDecl = BaseDecl->getDefinition();
+ assert(BaseDecl && "Base type is not incomplete, but has no definition");
+ CXXRecordDecl *CXXBaseDecl = cast<CXXRecordDecl>(BaseDecl);
+ assert(CXXBaseDecl && "Base type is not a C++ type");
+
+ // A class which contains a flexible array member is not suitable for use as a
+ // base class:
+ // - If the layout determines that a base comes before another base,
+ // the flexible array member would index into the subsequent base.
+ // - If the layout determines that base comes before the derived class,
+ // the flexible array member would index into the derived class.
+ if (CXXBaseDecl->hasFlexibleArrayMember()) {
+ Diag(BaseLoc, diag::err_base_class_has_flexible_array_member)
+ << CXXBaseDecl->getDeclName();
+ return nullptr;
+ }
+
+ // C++ [class]p3:
+ // If a class is marked final and it appears as a base-type-specifier in
+ // base-clause, the program is ill-formed.
+ if (FinalAttr *FA = CXXBaseDecl->getAttr<FinalAttr>()) {
+ Diag(BaseLoc, diag::err_class_marked_final_used_as_base)
+ << CXXBaseDecl->getDeclName()
+ << FA->isSpelledAsSealed();
+ Diag(CXXBaseDecl->getLocation(), diag::note_entity_declared_at)
+ << CXXBaseDecl->getDeclName() << FA->getRange();
+ return nullptr;
+ }
+
+ if (BaseDecl->isInvalidDecl())
+ Class->setInvalidDecl();
+
+ // Create the base specifier.
+ return new (Context) CXXBaseSpecifier(SpecifierRange, Virtual,
+ Class->getTagKind() == TTK_Class,
+ Access, TInfo, EllipsisLoc);
+}
+
+/// ActOnBaseSpecifier - Parsed a base specifier. A base specifier is
+/// one entry in the base class list of a class specifier, for
+/// example:
+/// class foo : public bar, virtual private baz {
+/// 'public bar' and 'virtual private baz' are each base-specifiers.
+BaseResult
+Sema::ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange,
+ ParsedAttributes &Attributes,
+ bool Virtual, AccessSpecifier Access,
+ ParsedType basetype, SourceLocation BaseLoc,
+ SourceLocation EllipsisLoc) {
+ if (!classdecl)
+ return true;
+
+ AdjustDeclIfTemplate(classdecl);
+ CXXRecordDecl *Class = dyn_cast<CXXRecordDecl>(classdecl);
+ if (!Class)
+ return true;
+
+ // We haven't yet attached the base specifiers.
+ Class->setIsParsingBaseSpecifiers();
+
+ // We do not support any C++11 attributes on base-specifiers yet.
+ // Diagnose any attributes we see.
+ if (!Attributes.empty()) {
+ for (AttributeList *Attr = Attributes.getList(); Attr;
+ Attr = Attr->getNext()) {
+ if (Attr->isInvalid() ||
+ Attr->getKind() == AttributeList::IgnoredAttribute)
+ continue;
+ Diag(Attr->getLoc(),
+ Attr->getKind() == AttributeList::UnknownAttribute
+ ? diag::warn_unknown_attribute_ignored
+ : diag::err_base_specifier_attribute)
+ << Attr->getName();
+ }
+ }
+
+ TypeSourceInfo *TInfo = nullptr;
+ GetTypeFromParser(basetype, &TInfo);
+
+ if (EllipsisLoc.isInvalid() &&
+ DiagnoseUnexpandedParameterPack(SpecifierRange.getBegin(), TInfo,
+ UPPC_BaseType))
+ return true;
+
+ if (CXXBaseSpecifier *BaseSpec = CheckBaseSpecifier(Class, SpecifierRange,
+ Virtual, Access, TInfo,
+ EllipsisLoc))
+ return BaseSpec;
+ else
+ Class->setInvalidDecl();
+
+ return true;
+}
+
+/// Use small set to collect indirect bases. As this is only used
+/// locally, there's no need to abstract the small size parameter.
+typedef llvm::SmallPtrSet<QualType, 4> IndirectBaseSet;
+
+/// \brief Recursively add the bases of Type. Don't add Type itself.
+static void
+NoteIndirectBases(ASTContext &Context, IndirectBaseSet &Set,
+ const QualType &Type)
+{
+ // Even though the incoming type is a base, it might not be
+ // a class -- it could be a template parm, for instance.
+ if (auto Rec = Type->getAs<RecordType>()) {
+ auto Decl = Rec->getAsCXXRecordDecl();
+
+ // Iterate over its bases.
+ for (const auto &BaseSpec : Decl->bases()) {
+ QualType Base = Context.getCanonicalType(BaseSpec.getType())
+ .getUnqualifiedType();
+ if (Set.insert(Base).second)
+ // If we've not already seen it, recurse.
+ NoteIndirectBases(Context, Set, Base);
+ }
+ }
+}
+
+/// \brief Performs the actual work of attaching the given base class
+/// specifiers to a C++ class.
+bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class,
+ MutableArrayRef<CXXBaseSpecifier *> Bases) {
+ if (Bases.empty())
+ return false;
+
+ // Used to keep track of which base types we have already seen, so
+ // that we can properly diagnose redundant direct base types. Note
+ // that the key is always the unqualified canonical type of the base
+ // class.
+ std::map<QualType, CXXBaseSpecifier*, QualTypeOrdering> KnownBaseTypes;
+
+ // Used to track indirect bases so we can see if a direct base is
+ // ambiguous.
+ IndirectBaseSet IndirectBaseTypes;
+
+ // Copy non-redundant base specifiers into permanent storage.
+ unsigned NumGoodBases = 0;
+ bool Invalid = false;
+ for (unsigned idx = 0; idx < Bases.size(); ++idx) {
+ QualType NewBaseType
+ = Context.getCanonicalType(Bases[idx]->getType());
+ NewBaseType = NewBaseType.getLocalUnqualifiedType();
+
+ CXXBaseSpecifier *&KnownBase = KnownBaseTypes[NewBaseType];
+ if (KnownBase) {
+ // C++ [class.mi]p3:
+ // A class shall not be specified as a direct base class of a
+ // derived class more than once.
+ Diag(Bases[idx]->getLocStart(),
+ diag::err_duplicate_base_class)
+ << KnownBase->getType()
+ << Bases[idx]->getSourceRange();
+
+ // Delete the duplicate base class specifier; we're going to
+ // overwrite its pointer later.
+ Context.Deallocate(Bases[idx]);
+
+ Invalid = true;
+ } else {
+ // Okay, add this new base class.
+ KnownBase = Bases[idx];
+ Bases[NumGoodBases++] = Bases[idx];
+
+ // Note this base's direct & indirect bases, if there could be ambiguity.
+ if (Bases.size() > 1)
+ NoteIndirectBases(Context, IndirectBaseTypes, NewBaseType);
+
+ if (const RecordType *Record = NewBaseType->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
+ if (Class->isInterface() &&
+ (!RD->isInterface() ||
+ KnownBase->getAccessSpecifier() != AS_public)) {
+ // The Microsoft extension __interface does not permit bases that
+ // are not themselves public interfaces.
+ Diag(KnownBase->getLocStart(), diag::err_invalid_base_in_interface)
+ << getRecordDiagFromTagKind(RD->getTagKind()) << RD->getName()
+ << RD->getSourceRange();
+ Invalid = true;
+ }
+ if (RD->hasAttr<WeakAttr>())
+ Class->addAttr(WeakAttr::CreateImplicit(Context));
+ }
+ }
+ }
+
+ // Attach the remaining base class specifiers to the derived class.
+ Class->setBases(Bases.data(), NumGoodBases);
+
+ for (unsigned idx = 0; idx < NumGoodBases; ++idx) {
+ // Check whether this direct base is inaccessible due to ambiguity.
+ QualType BaseType = Bases[idx]->getType();
+ CanQualType CanonicalBase = Context.getCanonicalType(BaseType)
+ .getUnqualifiedType();
+
+ if (IndirectBaseTypes.count(CanonicalBase)) {
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/true);
+ bool found
+ = Class->isDerivedFrom(CanonicalBase->getAsCXXRecordDecl(), Paths);
+ assert(found);
+ (void)found;
+
+ if (Paths.isAmbiguous(CanonicalBase))
+ Diag(Bases[idx]->getLocStart (), diag::warn_inaccessible_base_class)
+ << BaseType << getAmbiguousPathsDisplayString(Paths)
+ << Bases[idx]->getSourceRange();
+ else
+ assert(Bases[idx]->isVirtual());
+ }
+
+ // Delete the base class specifier, since its data has been copied
+ // into the CXXRecordDecl.
+ Context.Deallocate(Bases[idx]);
+ }
+
+ return Invalid;
+}
+
+/// ActOnBaseSpecifiers - Attach the given base specifiers to the
+/// class, after checking whether there are any duplicate base
+/// classes.
+void Sema::ActOnBaseSpecifiers(Decl *ClassDecl,
+ MutableArrayRef<CXXBaseSpecifier *> Bases) {
+ if (!ClassDecl || Bases.empty())
+ return;
+
+ AdjustDeclIfTemplate(ClassDecl);
+ AttachBaseSpecifiers(cast<CXXRecordDecl>(ClassDecl), Bases);
+}
+
+/// \brief Determine whether the type \p Derived is a C++ class that is
+/// derived from the type \p Base.
+bool Sema::IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base) {
+ if (!getLangOpts().CPlusPlus)
+ return false;
+
+ CXXRecordDecl *DerivedRD = Derived->getAsCXXRecordDecl();
+ if (!DerivedRD)
+ return false;
+
+ CXXRecordDecl *BaseRD = Base->getAsCXXRecordDecl();
+ if (!BaseRD)
+ return false;
+
+ // If either the base or the derived type is invalid, don't try to
+ // check whether one is derived from the other.
+ if (BaseRD->isInvalidDecl() || DerivedRD->isInvalidDecl())
+ return false;
+
+ // FIXME: In a modules build, do we need the entire path to be visible for us
+ // to be able to use the inheritance relationship?
+ if (!isCompleteType(Loc, Derived) && !DerivedRD->isBeingDefined())
+ return false;
+
+ return DerivedRD->isDerivedFrom(BaseRD);
+}
+
+/// \brief Determine whether the type \p Derived is a C++ class that is
+/// derived from the type \p Base.
+bool Sema::IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
+ CXXBasePaths &Paths) {
+ if (!getLangOpts().CPlusPlus)
+ return false;
+
+ CXXRecordDecl *DerivedRD = Derived->getAsCXXRecordDecl();
+ if (!DerivedRD)
+ return false;
+
+ CXXRecordDecl *BaseRD = Base->getAsCXXRecordDecl();
+ if (!BaseRD)
+ return false;
+
+ if (!isCompleteType(Loc, Derived) && !DerivedRD->isBeingDefined())
+ return false;
+
+ return DerivedRD->isDerivedFrom(BaseRD, Paths);
+}
+
+void Sema::BuildBasePathArray(const CXXBasePaths &Paths,
+ CXXCastPath &BasePathArray) {
+ assert(BasePathArray.empty() && "Base path array must be empty!");
+ assert(Paths.isRecordingPaths() && "Must record paths!");
+
+ const CXXBasePath &Path = Paths.front();
+
+ // We first go backward and check if we have a virtual base.
+ // FIXME: It would be better if CXXBasePath had the base specifier for
+ // the nearest virtual base.
+ unsigned Start = 0;
+ for (unsigned I = Path.size(); I != 0; --I) {
+ if (Path[I - 1].Base->isVirtual()) {
+ Start = I - 1;
+ break;
+ }
+ }
+
+ // Now add all bases.
+ for (unsigned I = Start, E = Path.size(); I != E; ++I)
+ BasePathArray.push_back(const_cast<CXXBaseSpecifier*>(Path[I].Base));
+}
+
+/// CheckDerivedToBaseConversion - Check whether the Derived-to-Base
+/// conversion (where Derived and Base are class types) is
+/// well-formed, meaning that the conversion is unambiguous (and
+/// that all of the base classes are accessible). Returns true
+/// and emits a diagnostic if the code is ill-formed, returns false
+/// otherwise. Loc is the location where this routine should point to
+/// if there is an error, and Range is the source range to highlight
+/// if there is an error.
+bool
+Sema::CheckDerivedToBaseConversion(QualType Derived, QualType Base,
+ unsigned InaccessibleBaseID,
+ unsigned AmbigiousBaseConvID,
+ SourceLocation Loc, SourceRange Range,
+ DeclarationName Name,
+ CXXCastPath *BasePath) {
+ // First, determine whether the path from Derived to Base is
+ // ambiguous. This is slightly more expensive than checking whether
+ // the Derived to Base conversion exists, because here we need to
+ // explore multiple paths to determine if there is an ambiguity.
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/false);
+ bool DerivationOkay = IsDerivedFrom(Loc, Derived, Base, Paths);
+ assert(DerivationOkay &&
+ "Can only be used with a derived-to-base conversion");
+ (void)DerivationOkay;
+
+ if (!Paths.isAmbiguous(Context.getCanonicalType(Base).getUnqualifiedType())) {
+ if (InaccessibleBaseID) {
+ // Check that the base class can be accessed.
+ switch (CheckBaseClassAccess(Loc, Base, Derived, Paths.front(),
+ InaccessibleBaseID)) {
+ case AR_inaccessible:
+ return true;
+ case AR_accessible:
+ case AR_dependent:
+ case AR_delayed:
+ break;
+ }
+ }
+
+ // Build a base path if necessary.
+ if (BasePath)
+ BuildBasePathArray(Paths, *BasePath);
+ return false;
+ }
+
+ if (AmbigiousBaseConvID) {
+ // We know that the derived-to-base conversion is ambiguous, and
+ // we're going to produce a diagnostic. Perform the derived-to-base
+ // search just one more time to compute all of the possible paths so
+ // that we can print them out. This is more expensive than any of
+ // the previous derived-to-base checks we've done, but at this point
+ // performance isn't as much of an issue.
+ Paths.clear();
+ Paths.setRecordingPaths(true);
+ bool StillOkay = IsDerivedFrom(Loc, Derived, Base, Paths);
+ assert(StillOkay && "Can only be used with a derived-to-base conversion");
+ (void)StillOkay;
+
+ // Build up a textual representation of the ambiguous paths, e.g.,
+ // D -> B -> A, that will be used to illustrate the ambiguous
+ // conversions in the diagnostic. We only print one of the paths
+ // to each base class subobject.
+ std::string PathDisplayStr = getAmbiguousPathsDisplayString(Paths);
+
+ Diag(Loc, AmbigiousBaseConvID)
+ << Derived << Base << PathDisplayStr << Range << Name;
+ }
+ return true;
+}
+
+bool
+Sema::CheckDerivedToBaseConversion(QualType Derived, QualType Base,
+ SourceLocation Loc, SourceRange Range,
+ CXXCastPath *BasePath,
+ bool IgnoreAccess) {
+ return CheckDerivedToBaseConversion(Derived, Base,
+ IgnoreAccess ? 0
+ : diag::err_upcast_to_inaccessible_base,
+ diag::err_ambiguous_derived_to_base_conv,
+ Loc, Range, DeclarationName(),
+ BasePath);
+}
+
+
+/// @brief Builds a string representing ambiguous paths from a
+/// specific derived class to different subobjects of the same base
+/// class.
+///
+/// This function builds a string that can be used in error messages
+/// to show the different paths that one can take through the
+/// inheritance hierarchy to go from the derived class to different
+/// subobjects of a base class. The result looks something like this:
+/// @code
+/// struct D -> struct B -> struct A
+/// struct D -> struct C -> struct A
+/// @endcode
+std::string Sema::getAmbiguousPathsDisplayString(CXXBasePaths &Paths) {
+ std::string PathDisplayStr;
+ std::set<unsigned> DisplayedPaths;
+ for (CXXBasePaths::paths_iterator Path = Paths.begin();
+ Path != Paths.end(); ++Path) {
+ if (DisplayedPaths.insert(Path->back().SubobjectNumber).second) {
+ // We haven't displayed a path to this particular base
+ // class subobject yet.
+ PathDisplayStr += "\n ";
+ PathDisplayStr += Context.getTypeDeclType(Paths.getOrigin()).getAsString();
+ for (CXXBasePath::const_iterator Element = Path->begin();
+ Element != Path->end(); ++Element)
+ PathDisplayStr += " -> " + Element->Base->getType().getAsString();
+ }
+ }
+
+ return PathDisplayStr;
+}
+
+//===----------------------------------------------------------------------===//
+// C++ class member Handling
+//===----------------------------------------------------------------------===//
+
+/// ActOnAccessSpecifier - Parsed an access specifier followed by a colon.
+bool Sema::ActOnAccessSpecifier(AccessSpecifier Access,
+ SourceLocation ASLoc,
+ SourceLocation ColonLoc,
+ AttributeList *Attrs) {
+ assert(Access != AS_none && "Invalid kind for syntactic access specifier!");
+ AccessSpecDecl *ASDecl = AccessSpecDecl::Create(Context, Access, CurContext,
+ ASLoc, ColonLoc);
+ CurContext->addHiddenDecl(ASDecl);
+ return ProcessAccessDeclAttributeList(ASDecl, Attrs);
+}
+
+/// CheckOverrideControl - Check C++11 override control semantics.
+void Sema::CheckOverrideControl(NamedDecl *D) {
+ if (D->isInvalidDecl())
+ return;
+
+ // We only care about "override" and "final" declarations.
+ if (!D->hasAttr<OverrideAttr>() && !D->hasAttr<FinalAttr>())
+ return;
+
+ CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D);
+
+ // We can't check dependent instance methods.
+ if (MD && MD->isInstance() &&
+ (MD->getParent()->hasAnyDependentBases() ||
+ MD->getType()->isDependentType()))
+ return;
+
+ if (MD && !MD->isVirtual()) {
+ // If we have a non-virtual method, check if if hides a virtual method.
+ // (In that case, it's most likely the method has the wrong type.)
+ SmallVector<CXXMethodDecl *, 8> OverloadedMethods;
+ FindHiddenVirtualMethods(MD, OverloadedMethods);
+
+ if (!OverloadedMethods.empty()) {
+ if (OverrideAttr *OA = D->getAttr<OverrideAttr>()) {
+ Diag(OA->getLocation(),
+ diag::override_keyword_hides_virtual_member_function)
+ << "override" << (OverloadedMethods.size() > 1);
+ } else if (FinalAttr *FA = D->getAttr<FinalAttr>()) {
+ Diag(FA->getLocation(),
+ diag::override_keyword_hides_virtual_member_function)
+ << (FA->isSpelledAsSealed() ? "sealed" : "final")
+ << (OverloadedMethods.size() > 1);
+ }
+ NoteHiddenVirtualMethods(MD, OverloadedMethods);
+ MD->setInvalidDecl();
+ return;
+ }
+ // Fall through into the general case diagnostic.
+ // FIXME: We might want to attempt typo correction here.
+ }
+
+ if (!MD || !MD->isVirtual()) {
+ if (OverrideAttr *OA = D->getAttr<OverrideAttr>()) {
+ Diag(OA->getLocation(),
+ diag::override_keyword_only_allowed_on_virtual_member_functions)
+ << "override" << FixItHint::CreateRemoval(OA->getLocation());
+ D->dropAttr<OverrideAttr>();
+ }
+ if (FinalAttr *FA = D->getAttr<FinalAttr>()) {
+ Diag(FA->getLocation(),
+ diag::override_keyword_only_allowed_on_virtual_member_functions)
+ << (FA->isSpelledAsSealed() ? "sealed" : "final")
+ << FixItHint::CreateRemoval(FA->getLocation());
+ D->dropAttr<FinalAttr>();
+ }
+ return;
+ }
+
+ // C++11 [class.virtual]p5:
+ // If a function is marked with the virt-specifier override and
+ // does not override a member function of a base class, the program is
+ // ill-formed.
+ bool HasOverriddenMethods =
+ MD->begin_overridden_methods() != MD->end_overridden_methods();
+ if (MD->hasAttr<OverrideAttr>() && !HasOverriddenMethods)
+ Diag(MD->getLocation(), diag::err_function_marked_override_not_overriding)
+ << MD->getDeclName();
+}
+
+void Sema::DiagnoseAbsenceOfOverrideControl(NamedDecl *D) {
+ if (D->isInvalidDecl() || D->hasAttr<OverrideAttr>())
+ return;
+ CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D);
+ if (!MD || MD->isImplicit() || MD->hasAttr<FinalAttr>() ||
+ isa<CXXDestructorDecl>(MD))
+ return;
+
+ SourceLocation Loc = MD->getLocation();
+ SourceLocation SpellingLoc = Loc;
+ if (getSourceManager().isMacroArgExpansion(Loc))
+ SpellingLoc = getSourceManager().getImmediateExpansionRange(Loc).first;
+ SpellingLoc = getSourceManager().getSpellingLoc(SpellingLoc);
+ if (SpellingLoc.isValid() && getSourceManager().isInSystemHeader(SpellingLoc))
+ return;
+
+ if (MD->size_overridden_methods() > 0) {
+ Diag(MD->getLocation(), diag::warn_function_marked_not_override_overriding)
+ << MD->getDeclName();
+ const CXXMethodDecl *OMD = *MD->begin_overridden_methods();
+ Diag(OMD->getLocation(), diag::note_overridden_virtual_function);
+ }
+}
+
+/// CheckIfOverriddenFunctionIsMarkedFinal - Checks whether a virtual member
+/// function overrides a virtual member function marked 'final', according to
+/// C++11 [class.virtual]p4.
+bool Sema::CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
+ const CXXMethodDecl *Old) {
+ FinalAttr *FA = Old->getAttr<FinalAttr>();
+ if (!FA)
+ return false;
+
+ Diag(New->getLocation(), diag::err_final_function_overridden)
+ << New->getDeclName()
+ << FA->isSpelledAsSealed();
+ Diag(Old->getLocation(), diag::note_overridden_virtual_function);
+ return true;
+}
+
+static bool InitializationHasSideEffects(const FieldDecl &FD) {
+ const Type *T = FD.getType()->getBaseElementTypeUnsafe();
+ // FIXME: Destruction of ObjC lifetime types has side-effects.
+ if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
+ return !RD->isCompleteDefinition() ||
+ !RD->hasTrivialDefaultConstructor() ||
+ !RD->hasTrivialDestructor();
+ return false;
+}
+
+static AttributeList *getMSPropertyAttr(AttributeList *list) {
+ for (AttributeList *it = list; it != nullptr; it = it->getNext())
+ if (it->isDeclspecPropertyAttribute())
+ return it;
+ return nullptr;
+}
+
+/// ActOnCXXMemberDeclarator - This is invoked when a C++ class member
+/// declarator is parsed. 'AS' is the access specifier, 'BW' specifies the
+/// bitfield width if there is one, 'InitExpr' specifies the initializer if
+/// one has been parsed, and 'InitStyle' is set if an in-class initializer is
+/// present (but parsing it has been deferred).
+NamedDecl *
+Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
+ MultiTemplateParamsArg TemplateParameterLists,
+ Expr *BW, const VirtSpecifiers &VS,
+ InClassInitStyle InitStyle) {
+ const DeclSpec &DS = D.getDeclSpec();
+ DeclarationNameInfo NameInfo = GetNameForDeclarator(D);
+ DeclarationName Name = NameInfo.getName();
+ SourceLocation Loc = NameInfo.getLoc();
+
+ // For anonymous bitfields, the location should point to the type.
+ if (Loc.isInvalid())
+ Loc = D.getLocStart();
+
+ Expr *BitWidth = static_cast<Expr*>(BW);
+
+ assert(isa<CXXRecordDecl>(CurContext));
+ assert(!DS.isFriendSpecified());
+
+ bool isFunc = D.isDeclarationOfFunction();
+
+ if (cast<CXXRecordDecl>(CurContext)->isInterface()) {
+ // The Microsoft extension __interface only permits public member functions
+ // and prohibits constructors, destructors, operators, non-public member
+ // functions, static methods and data members.
+ unsigned InvalidDecl;
+ bool ShowDeclName = true;
+ if (!isFunc)
+ InvalidDecl = (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) ? 0 : 1;
+ else if (AS != AS_public)
+ InvalidDecl = 2;
+ else if (DS.getStorageClassSpec() == DeclSpec::SCS_static)
+ InvalidDecl = 3;
+ else switch (Name.getNameKind()) {
+ case DeclarationName::CXXConstructorName:
+ InvalidDecl = 4;
+ ShowDeclName = false;
+ break;
+
+ case DeclarationName::CXXDestructorName:
+ InvalidDecl = 5;
+ ShowDeclName = false;
+ break;
+
+ case DeclarationName::CXXOperatorName:
+ case DeclarationName::CXXConversionFunctionName:
+ InvalidDecl = 6;
+ break;
+
+ default:
+ InvalidDecl = 0;
+ break;
+ }
+
+ if (InvalidDecl) {
+ if (ShowDeclName)
+ Diag(Loc, diag::err_invalid_member_in_interface)
+ << (InvalidDecl-1) << Name;
+ else
+ Diag(Loc, diag::err_invalid_member_in_interface)
+ << (InvalidDecl-1) << "";
+ return nullptr;
+ }
+ }
+
+ // C++ 9.2p6: A member shall not be declared to have automatic storage
+ // duration (auto, register) or with the extern storage-class-specifier.
+ // C++ 7.1.1p8: The mutable specifier can be applied only to names of class
+ // data members and cannot be applied to names declared const or static,
+ // and cannot be applied to reference members.
+ switch (DS.getStorageClassSpec()) {
+ case DeclSpec::SCS_unspecified:
+ case DeclSpec::SCS_typedef:
+ case DeclSpec::SCS_static:
+ break;
+ case DeclSpec::SCS_mutable:
+ if (isFunc) {
+ Diag(DS.getStorageClassSpecLoc(), diag::err_mutable_function);
+
+ // FIXME: It would be nicer if the keyword was ignored only for this
+ // declarator. Otherwise we could get follow-up errors.
+ D.getMutableDeclSpec().ClearStorageClassSpecs();
+ }
+ break;
+ default:
+ Diag(DS.getStorageClassSpecLoc(),
+ diag::err_storageclass_invalid_for_member);
+ D.getMutableDeclSpec().ClearStorageClassSpecs();
+ break;
+ }
+
+ bool isInstField = ((DS.getStorageClassSpec() == DeclSpec::SCS_unspecified ||
+ DS.getStorageClassSpec() == DeclSpec::SCS_mutable) &&
+ !isFunc);
+
+ if (DS.isConstexprSpecified() && isInstField) {
+ SemaDiagnosticBuilder B =
+ Diag(DS.getConstexprSpecLoc(), diag::err_invalid_constexpr_member);
+ SourceLocation ConstexprLoc = DS.getConstexprSpecLoc();
+ if (InitStyle == ICIS_NoInit) {
+ B << 0 << 0;
+ if (D.getDeclSpec().getTypeQualifiers() & DeclSpec::TQ_const)
+ B << FixItHint::CreateRemoval(ConstexprLoc);
+ else {
+ B << FixItHint::CreateReplacement(ConstexprLoc, "const");
+ D.getMutableDeclSpec().ClearConstexprSpec();
+ const char *PrevSpec;
+ unsigned DiagID;
+ bool Failed = D.getMutableDeclSpec().SetTypeQual(
+ DeclSpec::TQ_const, ConstexprLoc, PrevSpec, DiagID, getLangOpts());
+ (void)Failed;
+ assert(!Failed && "Making a constexpr member const shouldn't fail");
+ }
+ } else {
+ B << 1;
+ const char *PrevSpec;
+ unsigned DiagID;
+ if (D.getMutableDeclSpec().SetStorageClassSpec(
+ *this, DeclSpec::SCS_static, ConstexprLoc, PrevSpec, DiagID,
+ Context.getPrintingPolicy())) {
+ assert(DS.getStorageClassSpec() == DeclSpec::SCS_mutable &&
+ "This is the only DeclSpec that should fail to be applied");
+ B << 1;
+ } else {
+ B << 0 << FixItHint::CreateInsertion(ConstexprLoc, "static ");
+ isInstField = false;
+ }
+ }
+ }
+
+ NamedDecl *Member;
+ if (isInstField) {
+ CXXScopeSpec &SS = D.getCXXScopeSpec();
+
+ // Data members must have identifiers for names.
+ if (!Name.isIdentifier()) {
+ Diag(Loc, diag::err_bad_variable_name)
+ << Name;
+ return nullptr;
+ }
+
+ IdentifierInfo *II = Name.getAsIdentifierInfo();
+
+ // Member field could not be with "template" keyword.
+ // So TemplateParameterLists should be empty in this case.
+ if (TemplateParameterLists.size()) {
+ TemplateParameterList* TemplateParams = TemplateParameterLists[0];
+ if (TemplateParams->size()) {
+ // There is no such thing as a member field template.
+ Diag(D.getIdentifierLoc(), diag::err_template_member)
+ << II
+ << SourceRange(TemplateParams->getTemplateLoc(),
+ TemplateParams->getRAngleLoc());
+ } else {
+ // There is an extraneous 'template<>' for this member.
+ Diag(TemplateParams->getTemplateLoc(),
+ diag::err_template_member_noparams)
+ << II
+ << SourceRange(TemplateParams->getTemplateLoc(),
+ TemplateParams->getRAngleLoc());
+ }
+ return nullptr;
+ }
+
+ if (SS.isSet() && !SS.isInvalid()) {
+ // The user provided a superfluous scope specifier inside a class
+ // definition:
+ //
+ // class X {
+ // int X::member;
+ // };
+ if (DeclContext *DC = computeDeclContext(SS, false))
+ diagnoseQualifiedDeclaration(SS, DC, Name, D.getIdentifierLoc());
+ else
+ Diag(D.getIdentifierLoc(), diag::err_member_qualification)
+ << Name << SS.getRange();
+
+ SS.clear();
+ }
+
+ AttributeList *MSPropertyAttr =
+ getMSPropertyAttr(D.getDeclSpec().getAttributes().getList());
+ if (MSPropertyAttr) {
+ Member = HandleMSProperty(S, cast<CXXRecordDecl>(CurContext), Loc, D,
+ BitWidth, InitStyle, AS, MSPropertyAttr);
+ if (!Member)
+ return nullptr;
+ isInstField = false;
+ } else {
+ Member = HandleField(S, cast<CXXRecordDecl>(CurContext), Loc, D,
+ BitWidth, InitStyle, AS);
+ assert(Member && "HandleField never returns null");
+ }
+ } else {
+ Member = HandleDeclarator(S, D, TemplateParameterLists);
+ if (!Member)
+ return nullptr;
+
+ // Non-instance-fields can't have a bitfield.
+ if (BitWidth) {
+ if (Member->isInvalidDecl()) {
+ // don't emit another diagnostic.
+ } else if (isa<VarDecl>(Member) || isa<VarTemplateDecl>(Member)) {
+ // C++ 9.6p3: A bit-field shall not be a static member.
+ // "static member 'A' cannot be a bit-field"
+ Diag(Loc, diag::err_static_not_bitfield)
+ << Name << BitWidth->getSourceRange();
+ } else if (isa<TypedefDecl>(Member)) {
+ // "typedef member 'x' cannot be a bit-field"
+ Diag(Loc, diag::err_typedef_not_bitfield)
+ << Name << BitWidth->getSourceRange();
+ } else {
+ // A function typedef ("typedef int f(); f a;").
+ // C++ 9.6p3: A bit-field shall have integral or enumeration type.
+ Diag(Loc, diag::err_not_integral_type_bitfield)
+ << Name << cast<ValueDecl>(Member)->getType()
+ << BitWidth->getSourceRange();
+ }
+
+ BitWidth = nullptr;
+ Member->setInvalidDecl();
+ }
+
+ Member->setAccess(AS);
+
+ // If we have declared a member function template or static data member
+ // template, set the access of the templated declaration as well.
+ if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(Member))
+ FunTmpl->getTemplatedDecl()->setAccess(AS);
+ else if (VarTemplateDecl *VarTmpl = dyn_cast<VarTemplateDecl>(Member))
+ VarTmpl->getTemplatedDecl()->setAccess(AS);
+ }
+
+ if (VS.isOverrideSpecified())
+ Member->addAttr(new (Context) OverrideAttr(VS.getOverrideLoc(), Context, 0));
+ if (VS.isFinalSpecified())
+ Member->addAttr(new (Context) FinalAttr(VS.getFinalLoc(), Context,
+ VS.isFinalSpelledSealed()));
+
+ if (VS.getLastLocation().isValid()) {
+ // Update the end location of a method that has a virt-specifiers.
+ if (CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(Member))
+ MD->setRangeEnd(VS.getLastLocation());
+ }
+
+ CheckOverrideControl(Member);
+
+ assert((Name || isInstField) && "No identifier for non-field ?");
+
+ if (isInstField) {
+ FieldDecl *FD = cast<FieldDecl>(Member);
+ FieldCollector->Add(FD);
+
+ if (!Diags.isIgnored(diag::warn_unused_private_field, FD->getLocation())) {
+ // Remember all explicit private FieldDecls that have a name, no side
+ // effects and are not part of a dependent type declaration.
+ if (!FD->isImplicit() && FD->getDeclName() &&
+ FD->getAccess() == AS_private &&
+ !FD->hasAttr<UnusedAttr>() &&
+ !FD->getParent()->isDependentContext() &&
+ !InitializationHasSideEffects(*FD))
+ UnusedPrivateFields.insert(FD);
+ }
+ }
+
+ return Member;
+}
+
+namespace {
+ class UninitializedFieldVisitor
+ : public EvaluatedExprVisitor<UninitializedFieldVisitor> {
+ Sema &S;
+ // List of Decls to generate a warning on. Also remove Decls that become
+ // initialized.
+ llvm::SmallPtrSetImpl<ValueDecl*> &Decls;
+ // List of base classes of the record. Classes are removed after their
+ // initializers.
+ llvm::SmallPtrSetImpl<QualType> &BaseClasses;
+ // Vector of decls to be removed from the Decl set prior to visiting the
+ // nodes. These Decls may have been initialized in the prior initializer.
+ llvm::SmallVector<ValueDecl*, 4> DeclsToRemove;
+ // If non-null, add a note to the warning pointing back to the constructor.
+ const CXXConstructorDecl *Constructor;
+ // Variables to hold state when processing an initializer list. When
+ // InitList is true, special case initialization of FieldDecls matching
+ // InitListFieldDecl.
+ bool InitList;
+ FieldDecl *InitListFieldDecl;
+ llvm::SmallVector<unsigned, 4> InitFieldIndex;
+
+ public:
+ typedef EvaluatedExprVisitor<UninitializedFieldVisitor> Inherited;
+ UninitializedFieldVisitor(Sema &S,
+ llvm::SmallPtrSetImpl<ValueDecl*> &Decls,
+ llvm::SmallPtrSetImpl<QualType> &BaseClasses)
+ : Inherited(S.Context), S(S), Decls(Decls), BaseClasses(BaseClasses),
+ Constructor(nullptr), InitList(false), InitListFieldDecl(nullptr) {}
+
+ // Returns true if the use of ME is not an uninitialized use.
+ bool IsInitListMemberExprInitialized(MemberExpr *ME,
+ bool CheckReferenceOnly) {
+ llvm::SmallVector<FieldDecl*, 4> Fields;
+ bool ReferenceField = false;
+ while (ME) {
+ FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl());
+ if (!FD)
+ return false;
+ Fields.push_back(FD);
+ if (FD->getType()->isReferenceType())
+ ReferenceField = true;
+ ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParenImpCasts());
+ }
+
+ // Binding a reference to an unintialized field is not an
+ // uninitialized use.
+ if (CheckReferenceOnly && !ReferenceField)
+ return true;
+
+ llvm::SmallVector<unsigned, 4> UsedFieldIndex;
+ // Discard the first field since it is the field decl that is being
+ // initialized.
+ for (auto I = Fields.rbegin() + 1, E = Fields.rend(); I != E; ++I) {
+ UsedFieldIndex.push_back((*I)->getFieldIndex());
+ }
+
+ for (auto UsedIter = UsedFieldIndex.begin(),
+ UsedEnd = UsedFieldIndex.end(),
+ OrigIter = InitFieldIndex.begin(),
+ OrigEnd = InitFieldIndex.end();
+ UsedIter != UsedEnd && OrigIter != OrigEnd; ++UsedIter, ++OrigIter) {
+ if (*UsedIter < *OrigIter)
+ return true;
+ if (*UsedIter > *OrigIter)
+ break;
+ }
+
+ return false;
+ }
+
+ void HandleMemberExpr(MemberExpr *ME, bool CheckReferenceOnly,
+ bool AddressOf) {
+ if (isa<EnumConstantDecl>(ME->getMemberDecl()))
+ return;
+
+ // FieldME is the inner-most MemberExpr that is not an anonymous struct
+ // or union.
+ MemberExpr *FieldME = ME;
+
+ bool AllPODFields = FieldME->getType().isPODType(S.Context);
+
+ Expr *Base = ME;
+ while (MemberExpr *SubME =
+ dyn_cast<MemberExpr>(Base->IgnoreParenImpCasts())) {
+
+ if (isa<VarDecl>(SubME->getMemberDecl()))
+ return;
+
+ if (FieldDecl *FD = dyn_cast<FieldDecl>(SubME->getMemberDecl()))
+ if (!FD->isAnonymousStructOrUnion())
+ FieldME = SubME;
+
+ if (!FieldME->getType().isPODType(S.Context))
+ AllPODFields = false;
+
+ Base = SubME->getBase();
+ }
+
+ if (!isa<CXXThisExpr>(Base->IgnoreParenImpCasts()))
+ return;
+
+ if (AddressOf && AllPODFields)
+ return;
+
+ ValueDecl* FoundVD = FieldME->getMemberDecl();
+
+ if (ImplicitCastExpr *BaseCast = dyn_cast<ImplicitCastExpr>(Base)) {
+ while (isa<ImplicitCastExpr>(BaseCast->getSubExpr())) {
+ BaseCast = cast<ImplicitCastExpr>(BaseCast->getSubExpr());
+ }
+
+ if (BaseCast->getCastKind() == CK_UncheckedDerivedToBase) {
+ QualType T = BaseCast->getType();
+ if (T->isPointerType() &&
+ BaseClasses.count(T->getPointeeType())) {
+ S.Diag(FieldME->getExprLoc(), diag::warn_base_class_is_uninit)
+ << T->getPointeeType() << FoundVD;
+ }
+ }
+ }
+
+ if (!Decls.count(FoundVD))
+ return;
+
+ const bool IsReference = FoundVD->getType()->isReferenceType();
+
+ if (InitList && !AddressOf && FoundVD == InitListFieldDecl) {
+ // Special checking for initializer lists.
+ if (IsInitListMemberExprInitialized(ME, CheckReferenceOnly)) {
+ return;
+ }
+ } else {
+ // Prevent double warnings on use of unbounded references.
+ if (CheckReferenceOnly && !IsReference)
+ return;
+ }
+
+ unsigned diag = IsReference
+ ? diag::warn_reference_field_is_uninit
+ : diag::warn_field_is_uninit;
+ S.Diag(FieldME->getExprLoc(), diag) << FoundVD;
+ if (Constructor)
+ S.Diag(Constructor->getLocation(),
+ diag::note_uninit_in_this_constructor)
+ << (Constructor->isDefaultConstructor() && Constructor->isImplicit());
+
+ }
+
+ void HandleValue(Expr *E, bool AddressOf) {
+ E = E->IgnoreParens();
+
+ if (MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
+ HandleMemberExpr(ME, false /*CheckReferenceOnly*/,
+ AddressOf /*AddressOf*/);
+ return;
+ }
+
+ if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
+ Visit(CO->getCond());
+ HandleValue(CO->getTrueExpr(), AddressOf);
+ HandleValue(CO->getFalseExpr(), AddressOf);
+ return;
+ }
+
+ if (BinaryConditionalOperator *BCO =
+ dyn_cast<BinaryConditionalOperator>(E)) {
+ Visit(BCO->getCond());
+ HandleValue(BCO->getFalseExpr(), AddressOf);
+ return;
+ }
+
+ if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E)) {
+ HandleValue(OVE->getSourceExpr(), AddressOf);
+ return;
+ }
+
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ switch (BO->getOpcode()) {
+ default:
+ break;
+ case(BO_PtrMemD):
+ case(BO_PtrMemI):
+ HandleValue(BO->getLHS(), AddressOf);
+ Visit(BO->getRHS());
+ return;
+ case(BO_Comma):
+ Visit(BO->getLHS());
+ HandleValue(BO->getRHS(), AddressOf);
+ return;
+ }
+ }
+
+ Visit(E);
+ }
+
+ void CheckInitListExpr(InitListExpr *ILE) {
+ InitFieldIndex.push_back(0);
+ for (auto Child : ILE->children()) {
+ if (InitListExpr *SubList = dyn_cast<InitListExpr>(Child)) {
+ CheckInitListExpr(SubList);
+ } else {
+ Visit(Child);
+ }
+ ++InitFieldIndex.back();
+ }
+ InitFieldIndex.pop_back();
+ }
+
+ void CheckInitializer(Expr *E, const CXXConstructorDecl *FieldConstructor,
+ FieldDecl *Field, const Type *BaseClass) {
+ // Remove Decls that may have been initialized in the previous
+ // initializer.
+ for (ValueDecl* VD : DeclsToRemove)
+ Decls.erase(VD);
+ DeclsToRemove.clear();
+
+ Constructor = FieldConstructor;
+ InitListExpr *ILE = dyn_cast<InitListExpr>(E);
+
+ if (ILE && Field) {
+ InitList = true;
+ InitListFieldDecl = Field;
+ InitFieldIndex.clear();
+ CheckInitListExpr(ILE);
+ } else {
+ InitList = false;
+ Visit(E);
+ }
+
+ if (Field)
+ Decls.erase(Field);
+ if (BaseClass)
+ BaseClasses.erase(BaseClass->getCanonicalTypeInternal());
+ }
+
+ void VisitMemberExpr(MemberExpr *ME) {
+ // All uses of unbounded reference fields will warn.
+ HandleMemberExpr(ME, true /*CheckReferenceOnly*/, false /*AddressOf*/);
+ }
+
+ void VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ if (E->getCastKind() == CK_LValueToRValue) {
+ HandleValue(E->getSubExpr(), false /*AddressOf*/);
+ return;
+ }
+
+ Inherited::VisitImplicitCastExpr(E);
+ }
+
+ void VisitCXXConstructExpr(CXXConstructExpr *E) {
+ if (E->getConstructor()->isCopyConstructor()) {
+ Expr *ArgExpr = E->getArg(0);
+ if (InitListExpr *ILE = dyn_cast<InitListExpr>(ArgExpr))
+ if (ILE->getNumInits() == 1)
+ ArgExpr = ILE->getInit(0);
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgExpr))
+ if (ICE->getCastKind() == CK_NoOp)
+ ArgExpr = ICE->getSubExpr();
+ HandleValue(ArgExpr, false /*AddressOf*/);
+ return;
+ }
+ Inherited::VisitCXXConstructExpr(E);
+ }
+
+ void VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
+ Expr *Callee = E->getCallee();
+ if (isa<MemberExpr>(Callee)) {
+ HandleValue(Callee, false /*AddressOf*/);
+ for (auto Arg : E->arguments())
+ Visit(Arg);
+ return;
+ }
+
+ Inherited::VisitCXXMemberCallExpr(E);
+ }
+
+ void VisitCallExpr(CallExpr *E) {
+ // Treat std::move as a use.
+ if (E->getNumArgs() == 1) {
+ if (FunctionDecl *FD = E->getDirectCallee()) {
+ if (FD->isInStdNamespace() && FD->getIdentifier() &&
+ FD->getIdentifier()->isStr("move")) {
+ HandleValue(E->getArg(0), false /*AddressOf*/);
+ return;
+ }
+ }
+ }
+
+ Inherited::VisitCallExpr(E);
+ }
+
+ void VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
+ Expr *Callee = E->getCallee();
+
+ if (isa<UnresolvedLookupExpr>(Callee))
+ return Inherited::VisitCXXOperatorCallExpr(E);
+
+ Visit(Callee);
+ for (auto Arg : E->arguments())
+ HandleValue(Arg->IgnoreParenImpCasts(), false /*AddressOf*/);
+ }
+
+ void VisitBinaryOperator(BinaryOperator *E) {
+ // If a field assignment is detected, remove the field from the
+ // uninitiailized field set.
+ if (E->getOpcode() == BO_Assign)
+ if (MemberExpr *ME = dyn_cast<MemberExpr>(E->getLHS()))
+ if (FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()))
+ if (!FD->getType()->isReferenceType())
+ DeclsToRemove.push_back(FD);
+
+ if (E->isCompoundAssignmentOp()) {
+ HandleValue(E->getLHS(), false /*AddressOf*/);
+ Visit(E->getRHS());
+ return;
+ }
+
+ Inherited::VisitBinaryOperator(E);
+ }
+
+ void VisitUnaryOperator(UnaryOperator *E) {
+ if (E->isIncrementDecrementOp()) {
+ HandleValue(E->getSubExpr(), false /*AddressOf*/);
+ return;
+ }
+ if (E->getOpcode() == UO_AddrOf) {
+ if (MemberExpr *ME = dyn_cast<MemberExpr>(E->getSubExpr())) {
+ HandleValue(ME->getBase(), true /*AddressOf*/);
+ return;
+ }
+ }
+
+ Inherited::VisitUnaryOperator(E);
+ }
+ };
+
+ // Diagnose value-uses of fields to initialize themselves, e.g.
+ // foo(foo)
+ // where foo is not also a parameter to the constructor.
+ // Also diagnose across field uninitialized use such as
+ // x(y), y(x)
+ // TODO: implement -Wuninitialized and fold this into that framework.
+ static void DiagnoseUninitializedFields(
+ Sema &SemaRef, const CXXConstructorDecl *Constructor) {
+
+ if (SemaRef.getDiagnostics().isIgnored(diag::warn_field_is_uninit,
+ Constructor->getLocation())) {
+ return;
+ }
+
+ if (Constructor->isInvalidDecl())
+ return;
+
+ const CXXRecordDecl *RD = Constructor->getParent();
+
+ if (RD->getDescribedClassTemplate())
+ return;
+
+ // Holds fields that are uninitialized.
+ llvm::SmallPtrSet<ValueDecl*, 4> UninitializedFields;
+
+ // At the beginning, all fields are uninitialized.
+ for (auto *I : RD->decls()) {
+ if (auto *FD = dyn_cast<FieldDecl>(I)) {
+ UninitializedFields.insert(FD);
+ } else if (auto *IFD = dyn_cast<IndirectFieldDecl>(I)) {
+ UninitializedFields.insert(IFD->getAnonField());
+ }
+ }
+
+ llvm::SmallPtrSet<QualType, 4> UninitializedBaseClasses;
+ for (auto I : RD->bases())
+ UninitializedBaseClasses.insert(I.getType().getCanonicalType());
+
+ if (UninitializedFields.empty() && UninitializedBaseClasses.empty())
+ return;
+
+ UninitializedFieldVisitor UninitializedChecker(SemaRef,
+ UninitializedFields,
+ UninitializedBaseClasses);
+
+ for (const auto *FieldInit : Constructor->inits()) {
+ if (UninitializedFields.empty() && UninitializedBaseClasses.empty())
+ break;
+
+ Expr *InitExpr = FieldInit->getInit();
+ if (!InitExpr)
+ continue;
+
+ if (CXXDefaultInitExpr *Default =
+ dyn_cast<CXXDefaultInitExpr>(InitExpr)) {
+ InitExpr = Default->getExpr();
+ if (!InitExpr)
+ continue;
+ // In class initializers will point to the constructor.
+ UninitializedChecker.CheckInitializer(InitExpr, Constructor,
+ FieldInit->getAnyMember(),
+ FieldInit->getBaseClass());
+ } else {
+ UninitializedChecker.CheckInitializer(InitExpr, nullptr,
+ FieldInit->getAnyMember(),
+ FieldInit->getBaseClass());
+ }
+ }
+ }
+} // namespace
+
+/// \brief Enter a new C++ default initializer scope. After calling this, the
+/// caller must call \ref ActOnFinishCXXInClassMemberInitializer, even if
+/// parsing or instantiating the initializer failed.
+void Sema::ActOnStartCXXInClassMemberInitializer() {
+ // Create a synthetic function scope to represent the call to the constructor
+ // that notionally surrounds a use of this initializer.
+ PushFunctionScope();
+}
+
+/// \brief This is invoked after parsing an in-class initializer for a
+/// non-static C++ class member, and after instantiating an in-class initializer
+/// in a class template. Such actions are deferred until the class is complete.
+void Sema::ActOnFinishCXXInClassMemberInitializer(Decl *D,
+ SourceLocation InitLoc,
+ Expr *InitExpr) {
+ // Pop the notional constructor scope we created earlier.
+ PopFunctionScopeInfo(nullptr, D);
+
+ FieldDecl *FD = dyn_cast<FieldDecl>(D);
+ assert((isa<MSPropertyDecl>(D) || FD->getInClassInitStyle() != ICIS_NoInit) &&
+ "must set init style when field is created");
+
+ if (!InitExpr) {
+ D->setInvalidDecl();
+ if (FD)
+ FD->removeInClassInitializer();
+ return;
+ }
+
+ if (DiagnoseUnexpandedParameterPack(InitExpr, UPPC_Initializer)) {
+ FD->setInvalidDecl();
+ FD->removeInClassInitializer();
+ return;
+ }
+
+ ExprResult Init = InitExpr;
+ if (!FD->getType()->isDependentType() && !InitExpr->isTypeDependent()) {
+ InitializedEntity Entity = InitializedEntity::InitializeMember(FD);
+ InitializationKind Kind = FD->getInClassInitStyle() == ICIS_ListInit
+ ? InitializationKind::CreateDirectList(InitExpr->getLocStart())
+ : InitializationKind::CreateCopy(InitExpr->getLocStart(), InitLoc);
+ InitializationSequence Seq(*this, Entity, Kind, InitExpr);
+ Init = Seq.Perform(*this, Entity, Kind, InitExpr);
+ if (Init.isInvalid()) {
+ FD->setInvalidDecl();
+ return;
+ }
+ }
+
+ // C++11 [class.base.init]p7:
+ // The initialization of each base and member constitutes a
+ // full-expression.
+ Init = ActOnFinishFullExpr(Init.get(), InitLoc);
+ if (Init.isInvalid()) {
+ FD->setInvalidDecl();
+ return;
+ }
+
+ InitExpr = Init.get();
+
+ FD->setInClassInitializer(InitExpr);
+}
+
+/// \brief Find the direct and/or virtual base specifiers that
+/// correspond to the given base type, for use in base initialization
+/// within a constructor.
+static bool FindBaseInitializer(Sema &SemaRef,
+ CXXRecordDecl *ClassDecl,
+ QualType BaseType,
+ const CXXBaseSpecifier *&DirectBaseSpec,
+ const CXXBaseSpecifier *&VirtualBaseSpec) {
+ // First, check for a direct base class.
+ DirectBaseSpec = nullptr;
+ for (const auto &Base : ClassDecl->bases()) {
+ if (SemaRef.Context.hasSameUnqualifiedType(BaseType, Base.getType())) {
+ // We found a direct base of this type. That's what we're
+ // initializing.
+ DirectBaseSpec = &Base;
+ break;
+ }
+ }
+
+ // Check for a virtual base class.
+ // FIXME: We might be able to short-circuit this if we know in advance that
+ // there are no virtual bases.
+ VirtualBaseSpec = nullptr;
+ if (!DirectBaseSpec || !DirectBaseSpec->isVirtual()) {
+ // We haven't found a base yet; search the class hierarchy for a
+ // virtual base class.
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/false);
+ if (SemaRef.IsDerivedFrom(ClassDecl->getLocation(),
+ SemaRef.Context.getTypeDeclType(ClassDecl),
+ BaseType, Paths)) {
+ for (CXXBasePaths::paths_iterator Path = Paths.begin();
+ Path != Paths.end(); ++Path) {
+ if (Path->back().Base->isVirtual()) {
+ VirtualBaseSpec = Path->back().Base;
+ break;
+ }
+ }
+ }
+ }
+
+ return DirectBaseSpec || VirtualBaseSpec;
+}
+
+/// \brief Handle a C++ member initializer using braced-init-list syntax.
+MemInitResult
+Sema::ActOnMemInitializer(Decl *ConstructorD,
+ Scope *S,
+ CXXScopeSpec &SS,
+ IdentifierInfo *MemberOrBase,
+ ParsedType TemplateTypeTy,
+ const DeclSpec &DS,
+ SourceLocation IdLoc,
+ Expr *InitList,
+ SourceLocation EllipsisLoc) {
+ return BuildMemInitializer(ConstructorD, S, SS, MemberOrBase, TemplateTypeTy,
+ DS, IdLoc, InitList,
+ EllipsisLoc);
+}
+
+/// \brief Handle a C++ member initializer using parentheses syntax.
+MemInitResult
+Sema::ActOnMemInitializer(Decl *ConstructorD,
+ Scope *S,
+ CXXScopeSpec &SS,
+ IdentifierInfo *MemberOrBase,
+ ParsedType TemplateTypeTy,
+ const DeclSpec &DS,
+ SourceLocation IdLoc,
+ SourceLocation LParenLoc,
+ ArrayRef<Expr *> Args,
+ SourceLocation RParenLoc,
+ SourceLocation EllipsisLoc) {
+ Expr *List = new (Context) ParenListExpr(Context, LParenLoc,
+ Args, RParenLoc);
+ return BuildMemInitializer(ConstructorD, S, SS, MemberOrBase, TemplateTypeTy,
+ DS, IdLoc, List, EllipsisLoc);
+}
+
+namespace {
+
+// Callback to only accept typo corrections that can be a valid C++ member
+// intializer: either a non-static field member or a base class.
+class MemInitializerValidatorCCC : public CorrectionCandidateCallback {
+public:
+ explicit MemInitializerValidatorCCC(CXXRecordDecl *ClassDecl)
+ : ClassDecl(ClassDecl) {}
+
+ bool ValidateCandidate(const TypoCorrection &candidate) override {
+ if (NamedDecl *ND = candidate.getCorrectionDecl()) {
+ if (FieldDecl *Member = dyn_cast<FieldDecl>(ND))
+ return Member->getDeclContext()->getRedeclContext()->Equals(ClassDecl);
+ return isa<TypeDecl>(ND);
+ }
+ return false;
+ }
+
+private:
+ CXXRecordDecl *ClassDecl;
+};
+
+}
+
+/// \brief Handle a C++ member initializer.
+MemInitResult
+Sema::BuildMemInitializer(Decl *ConstructorD,
+ Scope *S,
+ CXXScopeSpec &SS,
+ IdentifierInfo *MemberOrBase,
+ ParsedType TemplateTypeTy,
+ const DeclSpec &DS,
+ SourceLocation IdLoc,
+ Expr *Init,
+ SourceLocation EllipsisLoc) {
+ ExprResult Res = CorrectDelayedTyposInExpr(Init);
+ if (!Res.isUsable())
+ return true;
+ Init = Res.get();
+
+ if (!ConstructorD)
+ return true;
+
+ AdjustDeclIfTemplate(ConstructorD);
+
+ CXXConstructorDecl *Constructor
+ = dyn_cast<CXXConstructorDecl>(ConstructorD);
+ if (!Constructor) {
+ // The user wrote a constructor initializer on a function that is
+ // not a C++ constructor. Ignore the error for now, because we may
+ // have more member initializers coming; we'll diagnose it just
+ // once in ActOnMemInitializers.
+ return true;
+ }
+
+ CXXRecordDecl *ClassDecl = Constructor->getParent();
+
+ // C++ [class.base.init]p2:
+ // Names in a mem-initializer-id are looked up in the scope of the
+ // constructor's class and, if not found in that scope, are looked
+ // up in the scope containing the constructor's definition.
+ // [Note: if the constructor's class contains a member with the
+ // same name as a direct or virtual base class of the class, a
+ // mem-initializer-id naming the member or base class and composed
+ // of a single identifier refers to the class member. A
+ // mem-initializer-id for the hidden base class may be specified
+ // using a qualified name. ]
+ if (!SS.getScopeRep() && !TemplateTypeTy) {
+ // Look for a member, first.
+ DeclContext::lookup_result Result = ClassDecl->lookup(MemberOrBase);
+ if (!Result.empty()) {
+ ValueDecl *Member;
+ if ((Member = dyn_cast<FieldDecl>(Result.front())) ||
+ (Member = dyn_cast<IndirectFieldDecl>(Result.front()))) {
+ if (EllipsisLoc.isValid())
+ Diag(EllipsisLoc, diag::err_pack_expansion_member_init)
+ << MemberOrBase
+ << SourceRange(IdLoc, Init->getSourceRange().getEnd());
+
+ return BuildMemberInitializer(Member, Init, IdLoc);
+ }
+ }
+ }
+ // It didn't name a member, so see if it names a class.
+ QualType BaseType;
+ TypeSourceInfo *TInfo = nullptr;
+
+ if (TemplateTypeTy) {
+ BaseType = GetTypeFromParser(TemplateTypeTy, &TInfo);
+ } else if (DS.getTypeSpecType() == TST_decltype) {
+ BaseType = BuildDecltypeType(DS.getRepAsExpr(), DS.getTypeSpecTypeLoc());
+ } else {
+ LookupResult R(*this, MemberOrBase, IdLoc, LookupOrdinaryName);
+ LookupParsedName(R, S, &SS);
+
+ TypeDecl *TyD = R.getAsSingle<TypeDecl>();
+ if (!TyD) {
+ if (R.isAmbiguous()) return true;
+
+ // We don't want access-control diagnostics here.
+ R.suppressDiagnostics();
+
+ if (SS.isSet() && isDependentScopeSpecifier(SS)) {
+ bool NotUnknownSpecialization = false;
+ DeclContext *DC = computeDeclContext(SS, false);
+ if (CXXRecordDecl *Record = dyn_cast_or_null<CXXRecordDecl>(DC))
+ NotUnknownSpecialization = !Record->hasAnyDependentBases();
+
+ if (!NotUnknownSpecialization) {
+ // When the scope specifier can refer to a member of an unknown
+ // specialization, we take it as a type name.
+ BaseType = CheckTypenameType(ETK_None, SourceLocation(),
+ SS.getWithLocInContext(Context),
+ *MemberOrBase, IdLoc);
+ if (BaseType.isNull())
+ return true;
+
+ R.clear();
+ R.setLookupName(MemberOrBase);
+ }
+ }
+
+ // If no results were found, try to correct typos.
+ TypoCorrection Corr;
+ if (R.empty() && BaseType.isNull() &&
+ (Corr = CorrectTypo(
+ R.getLookupNameInfo(), R.getLookupKind(), S, &SS,
+ llvm::make_unique<MemInitializerValidatorCCC>(ClassDecl),
+ CTK_ErrorRecovery, ClassDecl))) {
+ if (FieldDecl *Member = Corr.getCorrectionDeclAs<FieldDecl>()) {
+ // We have found a non-static data member with a similar
+ // name to what was typed; complain and initialize that
+ // member.
+ diagnoseTypo(Corr,
+ PDiag(diag::err_mem_init_not_member_or_class_suggest)
+ << MemberOrBase << true);
+ return BuildMemberInitializer(Member, Init, IdLoc);
+ } else if (TypeDecl *Type = Corr.getCorrectionDeclAs<TypeDecl>()) {
+ const CXXBaseSpecifier *DirectBaseSpec;
+ const CXXBaseSpecifier *VirtualBaseSpec;
+ if (FindBaseInitializer(*this, ClassDecl,
+ Context.getTypeDeclType(Type),
+ DirectBaseSpec, VirtualBaseSpec)) {
+ // We have found a direct or virtual base class with a
+ // similar name to what was typed; complain and initialize
+ // that base class.
+ diagnoseTypo(Corr,
+ PDiag(diag::err_mem_init_not_member_or_class_suggest)
+ << MemberOrBase << false,
+ PDiag() /*Suppress note, we provide our own.*/);
+
+ const CXXBaseSpecifier *BaseSpec = DirectBaseSpec ? DirectBaseSpec
+ : VirtualBaseSpec;
+ Diag(BaseSpec->getLocStart(),
+ diag::note_base_class_specified_here)
+ << BaseSpec->getType()
+ << BaseSpec->getSourceRange();
+
+ TyD = Type;
+ }
+ }
+ }
+
+ if (!TyD && BaseType.isNull()) {
+ Diag(IdLoc, diag::err_mem_init_not_member_or_class)
+ << MemberOrBase << SourceRange(IdLoc,Init->getSourceRange().getEnd());
+ return true;
+ }
+ }
+
+ if (BaseType.isNull()) {
+ BaseType = Context.getTypeDeclType(TyD);
+ MarkAnyDeclReferenced(TyD->getLocation(), TyD, /*OdrUse=*/false);
+ if (SS.isSet()) {
+ BaseType = Context.getElaboratedType(ETK_None, SS.getScopeRep(),
+ BaseType);
+ TInfo = Context.CreateTypeSourceInfo(BaseType);
+ ElaboratedTypeLoc TL = TInfo->getTypeLoc().castAs<ElaboratedTypeLoc>();
+ TL.getNamedTypeLoc().castAs<TypeSpecTypeLoc>().setNameLoc(IdLoc);
+ TL.setElaboratedKeywordLoc(SourceLocation());
+ TL.setQualifierLoc(SS.getWithLocInContext(Context));
+ }
+ }
+ }
+
+ if (!TInfo)
+ TInfo = Context.getTrivialTypeSourceInfo(BaseType, IdLoc);
+
+ return BuildBaseInitializer(BaseType, TInfo, Init, ClassDecl, EllipsisLoc);
+}
+
+/// Checks a member initializer expression for cases where reference (or
+/// pointer) members are bound to by-value parameters (or their addresses).
+static void CheckForDanglingReferenceOrPointer(Sema &S, ValueDecl *Member,
+ Expr *Init,
+ SourceLocation IdLoc) {
+ QualType MemberTy = Member->getType();
+
+ // We only handle pointers and references currently.
+ // FIXME: Would this be relevant for ObjC object pointers? Or block pointers?
+ if (!MemberTy->isReferenceType() && !MemberTy->isPointerType())
+ return;
+
+ const bool IsPointer = MemberTy->isPointerType();
+ if (IsPointer) {
+ if (const UnaryOperator *Op
+ = dyn_cast<UnaryOperator>(Init->IgnoreParenImpCasts())) {
+ // The only case we're worried about with pointers requires taking the
+ // address.
+ if (Op->getOpcode() != UO_AddrOf)
+ return;
+
+ Init = Op->getSubExpr();
+ } else {
+ // We only handle address-of expression initializers for pointers.
+ return;
+ }
+ }
+
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Init->IgnoreParens())) {
+ // We only warn when referring to a non-reference parameter declaration.
+ const ParmVarDecl *Parameter = dyn_cast<ParmVarDecl>(DRE->getDecl());
+ if (!Parameter || Parameter->getType()->isReferenceType())
+ return;
+
+ S.Diag(Init->getExprLoc(),
+ IsPointer ? diag::warn_init_ptr_member_to_parameter_addr
+ : diag::warn_bind_ref_member_to_parameter)
+ << Member << Parameter << Init->getSourceRange();
+ } else {
+ // Other initializers are fine.
+ return;
+ }
+
+ S.Diag(Member->getLocation(), diag::note_ref_or_ptr_member_declared_here)
+ << (unsigned)IsPointer;
+}
+
+MemInitResult
+Sema::BuildMemberInitializer(ValueDecl *Member, Expr *Init,
+ SourceLocation IdLoc) {
+ FieldDecl *DirectMember = dyn_cast<FieldDecl>(Member);
+ IndirectFieldDecl *IndirectMember = dyn_cast<IndirectFieldDecl>(Member);
+ assert((DirectMember || IndirectMember) &&
+ "Member must be a FieldDecl or IndirectFieldDecl");
+
+ if (DiagnoseUnexpandedParameterPack(Init, UPPC_Initializer))
+ return true;
+
+ if (Member->isInvalidDecl())
+ return true;
+
+ MultiExprArg Args;
+ if (ParenListExpr *ParenList = dyn_cast<ParenListExpr>(Init)) {
+ Args = MultiExprArg(ParenList->getExprs(), ParenList->getNumExprs());
+ } else if (InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) {
+ Args = MultiExprArg(InitList->getInits(), InitList->getNumInits());
+ } else {
+ // Template instantiation doesn't reconstruct ParenListExprs for us.
+ Args = Init;
+ }
+
+ SourceRange InitRange = Init->getSourceRange();
+
+ if (Member->getType()->isDependentType() || Init->isTypeDependent()) {
+ // Can't check initialization for a member of dependent type or when
+ // any of the arguments are type-dependent expressions.
+ DiscardCleanupsInEvaluationContext();
+ } else {
+ bool InitList = false;
+ if (isa<InitListExpr>(Init)) {
+ InitList = true;
+ Args = Init;
+ }
+
+ // Initialize the member.
+ InitializedEntity MemberEntity =
+ DirectMember ? InitializedEntity::InitializeMember(DirectMember, nullptr)
+ : InitializedEntity::InitializeMember(IndirectMember,
+ nullptr);
+ InitializationKind Kind =
+ InitList ? InitializationKind::CreateDirectList(IdLoc)
+ : InitializationKind::CreateDirect(IdLoc, InitRange.getBegin(),
+ InitRange.getEnd());
+
+ InitializationSequence InitSeq(*this, MemberEntity, Kind, Args);
+ ExprResult MemberInit = InitSeq.Perform(*this, MemberEntity, Kind, Args,
+ nullptr);
+ if (MemberInit.isInvalid())
+ return true;
+
+ CheckForDanglingReferenceOrPointer(*this, Member, MemberInit.get(), IdLoc);
+
+ // C++11 [class.base.init]p7:
+ // The initialization of each base and member constitutes a
+ // full-expression.
+ MemberInit = ActOnFinishFullExpr(MemberInit.get(), InitRange.getBegin());
+ if (MemberInit.isInvalid())
+ return true;
+
+ Init = MemberInit.get();
+ }
+
+ if (DirectMember) {
+ return new (Context) CXXCtorInitializer(Context, DirectMember, IdLoc,
+ InitRange.getBegin(), Init,
+ InitRange.getEnd());
+ } else {
+ return new (Context) CXXCtorInitializer(Context, IndirectMember, IdLoc,
+ InitRange.getBegin(), Init,
+ InitRange.getEnd());
+ }
+}
+
+MemInitResult
+Sema::BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init,
+ CXXRecordDecl *ClassDecl) {
+ SourceLocation NameLoc = TInfo->getTypeLoc().getLocalSourceRange().getBegin();
+ if (!LangOpts.CPlusPlus11)
+ return Diag(NameLoc, diag::err_delegating_ctor)
+ << TInfo->getTypeLoc().getLocalSourceRange();
+ Diag(NameLoc, diag::warn_cxx98_compat_delegating_ctor);
+
+ bool InitList = true;
+ MultiExprArg Args = Init;
+ if (ParenListExpr *ParenList = dyn_cast<ParenListExpr>(Init)) {
+ InitList = false;
+ Args = MultiExprArg(ParenList->getExprs(), ParenList->getNumExprs());
+ }
+
+ SourceRange InitRange = Init->getSourceRange();
+ // Initialize the object.
+ InitializedEntity DelegationEntity = InitializedEntity::InitializeDelegation(
+ QualType(ClassDecl->getTypeForDecl(), 0));
+ InitializationKind Kind =
+ InitList ? InitializationKind::CreateDirectList(NameLoc)
+ : InitializationKind::CreateDirect(NameLoc, InitRange.getBegin(),
+ InitRange.getEnd());
+ InitializationSequence InitSeq(*this, DelegationEntity, Kind, Args);
+ ExprResult DelegationInit = InitSeq.Perform(*this, DelegationEntity, Kind,
+ Args, nullptr);
+ if (DelegationInit.isInvalid())
+ return true;
+
+ assert(cast<CXXConstructExpr>(DelegationInit.get())->getConstructor() &&
+ "Delegating constructor with no target?");
+
+ // C++11 [class.base.init]p7:
+ // The initialization of each base and member constitutes a
+ // full-expression.
+ DelegationInit = ActOnFinishFullExpr(DelegationInit.get(),
+ InitRange.getBegin());
+ if (DelegationInit.isInvalid())
+ return true;
+
+ // If we are in a dependent context, template instantiation will
+ // perform this type-checking again. Just save the arguments that we
+ // received in a ParenListExpr.
+ // FIXME: This isn't quite ideal, since our ASTs don't capture all
+ // of the information that we have about the base
+ // initializer. However, deconstructing the ASTs is a dicey process,
+ // and this approach is far more likely to get the corner cases right.
+ if (CurContext->isDependentContext())
+ DelegationInit = Init;
+
+ return new (Context) CXXCtorInitializer(Context, TInfo, InitRange.getBegin(),
+ DelegationInit.getAs<Expr>(),
+ InitRange.getEnd());
+}
+
+MemInitResult
+Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
+ Expr *Init, CXXRecordDecl *ClassDecl,
+ SourceLocation EllipsisLoc) {
+ SourceLocation BaseLoc
+ = BaseTInfo->getTypeLoc().getLocalSourceRange().getBegin();
+
+ if (!BaseType->isDependentType() && !BaseType->isRecordType())
+ return Diag(BaseLoc, diag::err_base_init_does_not_name_class)
+ << BaseType << BaseTInfo->getTypeLoc().getLocalSourceRange();
+
+ // C++ [class.base.init]p2:
+ // [...] Unless the mem-initializer-id names a nonstatic data
+ // member of the constructor's class or a direct or virtual base
+ // of that class, the mem-initializer is ill-formed. A
+ // mem-initializer-list can initialize a base class using any
+ // name that denotes that base class type.
+ bool Dependent = BaseType->isDependentType() || Init->isTypeDependent();
+
+ SourceRange InitRange = Init->getSourceRange();
+ if (EllipsisLoc.isValid()) {
+ // This is a pack expansion.
+ if (!BaseType->containsUnexpandedParameterPack()) {
+ Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
+ << SourceRange(BaseLoc, InitRange.getEnd());
+
+ EllipsisLoc = SourceLocation();
+ }
+ } else {
+ // Check for any unexpanded parameter packs.
+ if (DiagnoseUnexpandedParameterPack(BaseLoc, BaseTInfo, UPPC_Initializer))
+ return true;
+
+ if (DiagnoseUnexpandedParameterPack(Init, UPPC_Initializer))
+ return true;
+ }
+
+ // Check for direct and virtual base classes.
+ const CXXBaseSpecifier *DirectBaseSpec = nullptr;
+ const CXXBaseSpecifier *VirtualBaseSpec = nullptr;
+ if (!Dependent) {
+ if (Context.hasSameUnqualifiedType(QualType(ClassDecl->getTypeForDecl(),0),
+ BaseType))
+ return BuildDelegatingInitializer(BaseTInfo, Init, ClassDecl);
+
+ FindBaseInitializer(*this, ClassDecl, BaseType, DirectBaseSpec,
+ VirtualBaseSpec);
+
+ // C++ [base.class.init]p2:
+ // Unless the mem-initializer-id names a nonstatic data member of the
+ // constructor's class or a direct or virtual base of that class, the
+ // mem-initializer is ill-formed.
+ if (!DirectBaseSpec && !VirtualBaseSpec) {
+ // If the class has any dependent bases, then it's possible that
+ // one of those types will resolve to the same type as
+ // BaseType. Therefore, just treat this as a dependent base
+ // class initialization. FIXME: Should we try to check the
+ // initialization anyway? It seems odd.
+ if (ClassDecl->hasAnyDependentBases())
+ Dependent = true;
+ else
+ return Diag(BaseLoc, diag::err_not_direct_base_or_virtual)
+ << BaseType << Context.getTypeDeclType(ClassDecl)
+ << BaseTInfo->getTypeLoc().getLocalSourceRange();
+ }
+ }
+
+ if (Dependent) {
+ DiscardCleanupsInEvaluationContext();
+
+ return new (Context) CXXCtorInitializer(Context, BaseTInfo,
+ /*IsVirtual=*/false,
+ InitRange.getBegin(), Init,
+ InitRange.getEnd(), EllipsisLoc);
+ }
+
+ // C++ [base.class.init]p2:
+ // If a mem-initializer-id is ambiguous because it designates both
+ // a direct non-virtual base class and an inherited virtual base
+ // class, the mem-initializer is ill-formed.
+ if (DirectBaseSpec && VirtualBaseSpec)
+ return Diag(BaseLoc, diag::err_base_init_direct_and_virtual)
+ << BaseType << BaseTInfo->getTypeLoc().getLocalSourceRange();
+
+ const CXXBaseSpecifier *BaseSpec = DirectBaseSpec;
+ if (!BaseSpec)
+ BaseSpec = VirtualBaseSpec;
+
+ // Initialize the base.
+ bool InitList = true;
+ MultiExprArg Args = Init;
+ if (ParenListExpr *ParenList = dyn_cast<ParenListExpr>(Init)) {
+ InitList = false;
+ Args = MultiExprArg(ParenList->getExprs(), ParenList->getNumExprs());
+ }
+
+ InitializedEntity BaseEntity =
+ InitializedEntity::InitializeBase(Context, BaseSpec, VirtualBaseSpec);
+ InitializationKind Kind =
+ InitList ? InitializationKind::CreateDirectList(BaseLoc)
+ : InitializationKind::CreateDirect(BaseLoc, InitRange.getBegin(),
+ InitRange.getEnd());
+ InitializationSequence InitSeq(*this, BaseEntity, Kind, Args);
+ ExprResult BaseInit = InitSeq.Perform(*this, BaseEntity, Kind, Args, nullptr);
+ if (BaseInit.isInvalid())
+ return true;
+
+ // C++11 [class.base.init]p7:
+ // The initialization of each base and member constitutes a
+ // full-expression.
+ BaseInit = ActOnFinishFullExpr(BaseInit.get(), InitRange.getBegin());
+ if (BaseInit.isInvalid())
+ return true;
+
+ // If we are in a dependent context, template instantiation will
+ // perform this type-checking again. Just save the arguments that we
+ // received in a ParenListExpr.
+ // FIXME: This isn't quite ideal, since our ASTs don't capture all
+ // of the information that we have about the base
+ // initializer. However, deconstructing the ASTs is a dicey process,
+ // and this approach is far more likely to get the corner cases right.
+ if (CurContext->isDependentContext())
+ BaseInit = Init;
+
+ return new (Context) CXXCtorInitializer(Context, BaseTInfo,
+ BaseSpec->isVirtual(),
+ InitRange.getBegin(),
+ BaseInit.getAs<Expr>(),
+ InitRange.getEnd(), EllipsisLoc);
+}
+
+// Create a static_cast\<T&&>(expr).
+static Expr *CastForMoving(Sema &SemaRef, Expr *E, QualType T = QualType()) {
+ if (T.isNull()) T = E->getType();
+ QualType TargetType = SemaRef.BuildReferenceType(
+ T, /*SpelledAsLValue*/false, SourceLocation(), DeclarationName());
+ SourceLocation ExprLoc = E->getLocStart();
+ TypeSourceInfo *TargetLoc = SemaRef.Context.getTrivialTypeSourceInfo(
+ TargetType, ExprLoc);
+
+ return SemaRef.BuildCXXNamedCast(ExprLoc, tok::kw_static_cast, TargetLoc, E,
+ SourceRange(ExprLoc, ExprLoc),
+ E->getSourceRange()).get();
+}
+
+/// ImplicitInitializerKind - How an implicit base or member initializer should
+/// initialize its base or member.
+enum ImplicitInitializerKind {
+ IIK_Default,
+ IIK_Copy,
+ IIK_Move,
+ IIK_Inherit
+};
+
+static bool
+BuildImplicitBaseInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
+ ImplicitInitializerKind ImplicitInitKind,
+ CXXBaseSpecifier *BaseSpec,
+ bool IsInheritedVirtualBase,
+ CXXCtorInitializer *&CXXBaseInit) {
+ InitializedEntity InitEntity
+ = InitializedEntity::InitializeBase(SemaRef.Context, BaseSpec,
+ IsInheritedVirtualBase);
+
+ ExprResult BaseInit;
+
+ switch (ImplicitInitKind) {
+ case IIK_Inherit: {
+ const CXXRecordDecl *Inherited =
+ Constructor->getInheritedConstructor()->getParent();
+ const CXXRecordDecl *Base = BaseSpec->getType()->getAsCXXRecordDecl();
+ if (Base && Inherited->getCanonicalDecl() == Base->getCanonicalDecl()) {
+ // C++11 [class.inhctor]p8:
+ // Each expression in the expression-list is of the form
+ // static_cast<T&&>(p), where p is the name of the corresponding
+ // constructor parameter and T is the declared type of p.
+ SmallVector<Expr*, 16> Args;
+ for (unsigned I = 0, E = Constructor->getNumParams(); I != E; ++I) {
+ ParmVarDecl *PD = Constructor->getParamDecl(I);
+ ExprResult ArgExpr =
+ SemaRef.BuildDeclRefExpr(PD, PD->getType().getNonReferenceType(),
+ VK_LValue, SourceLocation());
+ if (ArgExpr.isInvalid())
+ return true;
+ Args.push_back(CastForMoving(SemaRef, ArgExpr.get(), PD->getType()));
+ }
+
+ InitializationKind InitKind = InitializationKind::CreateDirect(
+ Constructor->getLocation(), SourceLocation(), SourceLocation());
+ InitializationSequence InitSeq(SemaRef, InitEntity, InitKind, Args);
+ BaseInit = InitSeq.Perform(SemaRef, InitEntity, InitKind, Args);
+ break;
+ }
+ }
+ // Fall through.
+ case IIK_Default: {
+ InitializationKind InitKind
+ = InitializationKind::CreateDefault(Constructor->getLocation());
+ InitializationSequence InitSeq(SemaRef, InitEntity, InitKind, None);
+ BaseInit = InitSeq.Perform(SemaRef, InitEntity, InitKind, None);
+ break;
+ }
+
+ case IIK_Move:
+ case IIK_Copy: {
+ bool Moving = ImplicitInitKind == IIK_Move;
+ ParmVarDecl *Param = Constructor->getParamDecl(0);
+ QualType ParamType = Param->getType().getNonReferenceType();
+
+ Expr *CopyCtorArg =
+ DeclRefExpr::Create(SemaRef.Context, NestedNameSpecifierLoc(),
+ SourceLocation(), Param, false,
+ Constructor->getLocation(), ParamType,
+ VK_LValue, nullptr);
+
+ SemaRef.MarkDeclRefReferenced(cast<DeclRefExpr>(CopyCtorArg));
+
+ // Cast to the base class to avoid ambiguities.
+ QualType ArgTy =
+ SemaRef.Context.getQualifiedType(BaseSpec->getType().getUnqualifiedType(),
+ ParamType.getQualifiers());
+
+ if (Moving) {
+ CopyCtorArg = CastForMoving(SemaRef, CopyCtorArg);
+ }
+
+ CXXCastPath BasePath;
+ BasePath.push_back(BaseSpec);
+ CopyCtorArg = SemaRef.ImpCastExprToType(CopyCtorArg, ArgTy,
+ CK_UncheckedDerivedToBase,
+ Moving ? VK_XValue : VK_LValue,
+ &BasePath).get();
+
+ InitializationKind InitKind
+ = InitializationKind::CreateDirect(Constructor->getLocation(),
+ SourceLocation(), SourceLocation());
+ InitializationSequence InitSeq(SemaRef, InitEntity, InitKind, CopyCtorArg);
+ BaseInit = InitSeq.Perform(SemaRef, InitEntity, InitKind, CopyCtorArg);
+ break;
+ }
+ }
+
+ BaseInit = SemaRef.MaybeCreateExprWithCleanups(BaseInit);
+ if (BaseInit.isInvalid())
+ return true;
+
+ CXXBaseInit =
+ new (SemaRef.Context) CXXCtorInitializer(SemaRef.Context,
+ SemaRef.Context.getTrivialTypeSourceInfo(BaseSpec->getType(),
+ SourceLocation()),
+ BaseSpec->isVirtual(),
+ SourceLocation(),
+ BaseInit.getAs<Expr>(),
+ SourceLocation(),
+ SourceLocation());
+
+ return false;
+}
+
+static bool RefersToRValueRef(Expr *MemRef) {
+ ValueDecl *Referenced = cast<MemberExpr>(MemRef)->getMemberDecl();
+ return Referenced->getType()->isRValueReferenceType();
+}
+
+static bool
+BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
+ ImplicitInitializerKind ImplicitInitKind,
+ FieldDecl *Field, IndirectFieldDecl *Indirect,
+ CXXCtorInitializer *&CXXMemberInit) {
+ if (Field->isInvalidDecl())
+ return true;
+
+ SourceLocation Loc = Constructor->getLocation();
+
+ if (ImplicitInitKind == IIK_Copy || ImplicitInitKind == IIK_Move) {
+ bool Moving = ImplicitInitKind == IIK_Move;
+ ParmVarDecl *Param = Constructor->getParamDecl(0);
+ QualType ParamType = Param->getType().getNonReferenceType();
+
+ // Suppress copying zero-width bitfields.
+ if (Field->isBitField() && Field->getBitWidthValue(SemaRef.Context) == 0)
+ return false;
+
+ Expr *MemberExprBase =
+ DeclRefExpr::Create(SemaRef.Context, NestedNameSpecifierLoc(),
+ SourceLocation(), Param, false,
+ Loc, ParamType, VK_LValue, nullptr);
+
+ SemaRef.MarkDeclRefReferenced(cast<DeclRefExpr>(MemberExprBase));
+
+ if (Moving) {
+ MemberExprBase = CastForMoving(SemaRef, MemberExprBase);
+ }
+
+ // Build a reference to this field within the parameter.
+ CXXScopeSpec SS;
+ LookupResult MemberLookup(SemaRef, Field->getDeclName(), Loc,
+ Sema::LookupMemberName);
+ MemberLookup.addDecl(Indirect ? cast<ValueDecl>(Indirect)
+ : cast<ValueDecl>(Field), AS_public);
+ MemberLookup.resolveKind();
+ ExprResult CtorArg
+ = SemaRef.BuildMemberReferenceExpr(MemberExprBase,
+ ParamType, Loc,
+ /*IsArrow=*/false,
+ SS,
+ /*TemplateKWLoc=*/SourceLocation(),
+ /*FirstQualifierInScope=*/nullptr,
+ MemberLookup,
+ /*TemplateArgs=*/nullptr,
+ /*S*/nullptr);
+ if (CtorArg.isInvalid())
+ return true;
+
+ // C++11 [class.copy]p15:
+ // - if a member m has rvalue reference type T&&, it is direct-initialized
+ // with static_cast<T&&>(x.m);
+ if (RefersToRValueRef(CtorArg.get())) {
+ CtorArg = CastForMoving(SemaRef, CtorArg.get());
+ }
+
+ // When the field we are copying is an array, create index variables for
+ // each dimension of the array. We use these index variables to subscript
+ // the source array, and other clients (e.g., CodeGen) will perform the
+ // necessary iteration with these index variables.
+ SmallVector<VarDecl *, 4> IndexVariables;
+ QualType BaseType = Field->getType();
+ QualType SizeType = SemaRef.Context.getSizeType();
+ bool InitializingArray = false;
+ while (const ConstantArrayType *Array
+ = SemaRef.Context.getAsConstantArrayType(BaseType)) {
+ InitializingArray = true;
+ // Create the iteration variable for this array index.
+ IdentifierInfo *IterationVarName = nullptr;
+ {
+ SmallString<8> Str;
+ llvm::raw_svector_ostream OS(Str);
+ OS << "__i" << IndexVariables.size();
+ IterationVarName = &SemaRef.Context.Idents.get(OS.str());
+ }
+ VarDecl *IterationVar
+ = VarDecl::Create(SemaRef.Context, SemaRef.CurContext, Loc, Loc,
+ IterationVarName, SizeType,
+ SemaRef.Context.getTrivialTypeSourceInfo(SizeType, Loc),
+ SC_None);
+ IndexVariables.push_back(IterationVar);
+
+ // Create a reference to the iteration variable.
+ ExprResult IterationVarRef
+ = SemaRef.BuildDeclRefExpr(IterationVar, SizeType, VK_LValue, Loc);
+ assert(!IterationVarRef.isInvalid() &&
+ "Reference to invented variable cannot fail!");
+ IterationVarRef = SemaRef.DefaultLvalueConversion(IterationVarRef.get());
+ assert(!IterationVarRef.isInvalid() &&
+ "Conversion of invented variable cannot fail!");
+
+ // Subscript the array with this iteration variable.
+ CtorArg = SemaRef.CreateBuiltinArraySubscriptExpr(CtorArg.get(), Loc,
+ IterationVarRef.get(),
+ Loc);
+ if (CtorArg.isInvalid())
+ return true;
+
+ BaseType = Array->getElementType();
+ }
+
+ // The array subscript expression is an lvalue, which is wrong for moving.
+ if (Moving && InitializingArray)
+ CtorArg = CastForMoving(SemaRef, CtorArg.get());
+
+ // Construct the entity that we will be initializing. For an array, this
+ // will be first element in the array, which may require several levels
+ // of array-subscript entities.
+ SmallVector<InitializedEntity, 4> Entities;
+ Entities.reserve(1 + IndexVariables.size());
+ if (Indirect)
+ Entities.push_back(InitializedEntity::InitializeMember(Indirect));
+ else
+ Entities.push_back(InitializedEntity::InitializeMember(Field));
+ for (unsigned I = 0, N = IndexVariables.size(); I != N; ++I)
+ Entities.push_back(InitializedEntity::InitializeElement(SemaRef.Context,
+ 0,
+ Entities.back()));
+
+ // Direct-initialize to use the copy constructor.
+ InitializationKind InitKind =
+ InitializationKind::CreateDirect(Loc, SourceLocation(), SourceLocation());
+
+ Expr *CtorArgE = CtorArg.getAs<Expr>();
+ InitializationSequence InitSeq(SemaRef, Entities.back(), InitKind,
+ CtorArgE);
+
+ ExprResult MemberInit
+ = InitSeq.Perform(SemaRef, Entities.back(), InitKind,
+ MultiExprArg(&CtorArgE, 1));
+ MemberInit = SemaRef.MaybeCreateExprWithCleanups(MemberInit);
+ if (MemberInit.isInvalid())
+ return true;
+
+ if (Indirect) {
+ assert(IndexVariables.size() == 0 &&
+ "Indirect field improperly initialized");
+ CXXMemberInit
+ = new (SemaRef.Context) CXXCtorInitializer(SemaRef.Context, Indirect,
+ Loc, Loc,
+ MemberInit.getAs<Expr>(),
+ Loc);
+ } else
+ CXXMemberInit = CXXCtorInitializer::Create(SemaRef.Context, Field, Loc,
+ Loc, MemberInit.getAs<Expr>(),
+ Loc,
+ IndexVariables.data(),
+ IndexVariables.size());
+ return false;
+ }
+
+ assert((ImplicitInitKind == IIK_Default || ImplicitInitKind == IIK_Inherit) &&
+ "Unhandled implicit init kind!");
+
+ QualType FieldBaseElementType =
+ SemaRef.Context.getBaseElementType(Field->getType());
+
+ if (FieldBaseElementType->isRecordType()) {
+ InitializedEntity InitEntity
+ = Indirect? InitializedEntity::InitializeMember(Indirect)
+ : InitializedEntity::InitializeMember(Field);
+ InitializationKind InitKind =
+ InitializationKind::CreateDefault(Loc);
+
+ InitializationSequence InitSeq(SemaRef, InitEntity, InitKind, None);
+ ExprResult MemberInit =
+ InitSeq.Perform(SemaRef, InitEntity, InitKind, None);
+
+ MemberInit = SemaRef.MaybeCreateExprWithCleanups(MemberInit);
+ if (MemberInit.isInvalid())
+ return true;
+
+ if (Indirect)
+ CXXMemberInit = new (SemaRef.Context) CXXCtorInitializer(SemaRef.Context,
+ Indirect, Loc,
+ Loc,
+ MemberInit.get(),
+ Loc);
+ else
+ CXXMemberInit = new (SemaRef.Context) CXXCtorInitializer(SemaRef.Context,
+ Field, Loc, Loc,
+ MemberInit.get(),
+ Loc);
+ return false;
+ }
+
+ if (!Field->getParent()->isUnion()) {
+ if (FieldBaseElementType->isReferenceType()) {
+ SemaRef.Diag(Constructor->getLocation(),
+ diag::err_uninitialized_member_in_ctor)
+ << (int)Constructor->isImplicit()
+ << SemaRef.Context.getTagDeclType(Constructor->getParent())
+ << 0 << Field->getDeclName();
+ SemaRef.Diag(Field->getLocation(), diag::note_declared_at);
+ return true;
+ }
+
+ if (FieldBaseElementType.isConstQualified()) {
+ SemaRef.Diag(Constructor->getLocation(),
+ diag::err_uninitialized_member_in_ctor)
+ << (int)Constructor->isImplicit()
+ << SemaRef.Context.getTagDeclType(Constructor->getParent())
+ << 1 << Field->getDeclName();
+ SemaRef.Diag(Field->getLocation(), diag::note_declared_at);
+ return true;
+ }
+ }
+
+ if (SemaRef.getLangOpts().ObjCAutoRefCount &&
+ FieldBaseElementType->isObjCRetainableType() &&
+ FieldBaseElementType.getObjCLifetime() != Qualifiers::OCL_None &&
+ FieldBaseElementType.getObjCLifetime() != Qualifiers::OCL_ExplicitNone) {
+ // ARC:
+ // Default-initialize Objective-C pointers to NULL.
+ CXXMemberInit
+ = new (SemaRef.Context) CXXCtorInitializer(SemaRef.Context, Field,
+ Loc, Loc,
+ new (SemaRef.Context) ImplicitValueInitExpr(Field->getType()),
+ Loc);
+ return false;
+ }
+
+ // Nothing to initialize.
+ CXXMemberInit = nullptr;
+ return false;
+}
+
+namespace {
+struct BaseAndFieldInfo {
+ Sema &S;
+ CXXConstructorDecl *Ctor;
+ bool AnyErrorsInInits;
+ ImplicitInitializerKind IIK;
+ llvm::DenseMap<const void *, CXXCtorInitializer*> AllBaseFields;
+ SmallVector<CXXCtorInitializer*, 8> AllToInit;
+ llvm::DenseMap<TagDecl*, FieldDecl*> ActiveUnionMember;
+
+ BaseAndFieldInfo(Sema &S, CXXConstructorDecl *Ctor, bool ErrorsInInits)
+ : S(S), Ctor(Ctor), AnyErrorsInInits(ErrorsInInits) {
+ bool Generated = Ctor->isImplicit() || Ctor->isDefaulted();
+ if (Generated && Ctor->isCopyConstructor())
+ IIK = IIK_Copy;
+ else if (Generated && Ctor->isMoveConstructor())
+ IIK = IIK_Move;
+ else if (Ctor->getInheritedConstructor())
+ IIK = IIK_Inherit;
+ else
+ IIK = IIK_Default;
+ }
+
+ bool isImplicitCopyOrMove() const {
+ switch (IIK) {
+ case IIK_Copy:
+ case IIK_Move:
+ return true;
+
+ case IIK_Default:
+ case IIK_Inherit:
+ return false;
+ }
+
+ llvm_unreachable("Invalid ImplicitInitializerKind!");
+ }
+
+ bool addFieldInitializer(CXXCtorInitializer *Init) {
+ AllToInit.push_back(Init);
+
+ // Check whether this initializer makes the field "used".
+ if (Init->getInit()->HasSideEffects(S.Context))
+ S.UnusedPrivateFields.remove(Init->getAnyMember());
+
+ return false;
+ }
+
+ bool isInactiveUnionMember(FieldDecl *Field) {
+ RecordDecl *Record = Field->getParent();
+ if (!Record->isUnion())
+ return false;
+
+ if (FieldDecl *Active =
+ ActiveUnionMember.lookup(Record->getCanonicalDecl()))
+ return Active != Field->getCanonicalDecl();
+
+ // In an implicit copy or move constructor, ignore any in-class initializer.
+ if (isImplicitCopyOrMove())
+ return true;
+
+ // If there's no explicit initialization, the field is active only if it
+ // has an in-class initializer...
+ if (Field->hasInClassInitializer())
+ return false;
+ // ... or it's an anonymous struct or union whose class has an in-class
+ // initializer.
+ if (!Field->isAnonymousStructOrUnion())
+ return true;
+ CXXRecordDecl *FieldRD = Field->getType()->getAsCXXRecordDecl();
+ return !FieldRD->hasInClassInitializer();
+ }
+
+ /// \brief Determine whether the given field is, or is within, a union member
+ /// that is inactive (because there was an initializer given for a different
+ /// member of the union, or because the union was not initialized at all).
+ bool isWithinInactiveUnionMember(FieldDecl *Field,
+ IndirectFieldDecl *Indirect) {
+ if (!Indirect)
+ return isInactiveUnionMember(Field);
+
+ for (auto *C : Indirect->chain()) {
+ FieldDecl *Field = dyn_cast<FieldDecl>(C);
+ if (Field && isInactiveUnionMember(Field))
+ return true;
+ }
+ return false;
+ }
+};
+}
+
+/// \brief Determine whether the given type is an incomplete or zero-lenfgth
+/// array type.
+static bool isIncompleteOrZeroLengthArrayType(ASTContext &Context, QualType T) {
+ if (T->isIncompleteArrayType())
+ return true;
+
+ while (const ConstantArrayType *ArrayT = Context.getAsConstantArrayType(T)) {
+ if (!ArrayT->getSize())
+ return true;
+
+ T = ArrayT->getElementType();
+ }
+
+ return false;
+}
+
+static bool CollectFieldInitializer(Sema &SemaRef, BaseAndFieldInfo &Info,
+ FieldDecl *Field,
+ IndirectFieldDecl *Indirect = nullptr) {
+ if (Field->isInvalidDecl())
+ return false;
+
+ // Overwhelmingly common case: we have a direct initializer for this field.
+ if (CXXCtorInitializer *Init =
+ Info.AllBaseFields.lookup(Field->getCanonicalDecl()))
+ return Info.addFieldInitializer(Init);
+
+ // C++11 [class.base.init]p8:
+ // if the entity is a non-static data member that has a
+ // brace-or-equal-initializer and either
+ // -- the constructor's class is a union and no other variant member of that
+ // union is designated by a mem-initializer-id or
+ // -- the constructor's class is not a union, and, if the entity is a member
+ // of an anonymous union, no other member of that union is designated by
+ // a mem-initializer-id,
+ // the entity is initialized as specified in [dcl.init].
+ //
+ // We also apply the same rules to handle anonymous structs within anonymous
+ // unions.
+ if (Info.isWithinInactiveUnionMember(Field, Indirect))
+ return false;
+
+ if (Field->hasInClassInitializer() && !Info.isImplicitCopyOrMove()) {
+ ExprResult DIE =
+ SemaRef.BuildCXXDefaultInitExpr(Info.Ctor->getLocation(), Field);
+ if (DIE.isInvalid())
+ return true;
+ CXXCtorInitializer *Init;
+ if (Indirect)
+ Init = new (SemaRef.Context)
+ CXXCtorInitializer(SemaRef.Context, Indirect, SourceLocation(),
+ SourceLocation(), DIE.get(), SourceLocation());
+ else
+ Init = new (SemaRef.Context)
+ CXXCtorInitializer(SemaRef.Context, Field, SourceLocation(),
+ SourceLocation(), DIE.get(), SourceLocation());
+ return Info.addFieldInitializer(Init);
+ }
+
+ // Don't initialize incomplete or zero-length arrays.
+ if (isIncompleteOrZeroLengthArrayType(SemaRef.Context, Field->getType()))
+ return false;
+
+ // Don't try to build an implicit initializer if there were semantic
+ // errors in any of the initializers (and therefore we might be
+ // missing some that the user actually wrote).
+ if (Info.AnyErrorsInInits)
+ return false;
+
+ CXXCtorInitializer *Init = nullptr;
+ if (BuildImplicitMemberInitializer(Info.S, Info.Ctor, Info.IIK, Field,
+ Indirect, Init))
+ return true;
+
+ if (!Init)
+ return false;
+
+ return Info.addFieldInitializer(Init);
+}
+
+bool
+Sema::SetDelegatingInitializer(CXXConstructorDecl *Constructor,
+ CXXCtorInitializer *Initializer) {
+ assert(Initializer->isDelegatingInitializer());
+ Constructor->setNumCtorInitializers(1);
+ CXXCtorInitializer **initializer =
+ new (Context) CXXCtorInitializer*[1];
+ memcpy(initializer, &Initializer, sizeof (CXXCtorInitializer*));
+ Constructor->setCtorInitializers(initializer);
+
+ if (CXXDestructorDecl *Dtor = LookupDestructor(Constructor->getParent())) {
+ MarkFunctionReferenced(Initializer->getSourceLocation(), Dtor);
+ DiagnoseUseOfDecl(Dtor, Initializer->getSourceLocation());
+ }
+
+ DelegatingCtorDecls.push_back(Constructor);
+
+ DiagnoseUninitializedFields(*this, Constructor);
+
+ return false;
+}
+
+bool Sema::SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
+ ArrayRef<CXXCtorInitializer *> Initializers) {
+ if (Constructor->isDependentContext()) {
+ // Just store the initializers as written, they will be checked during
+ // instantiation.
+ if (!Initializers.empty()) {
+ Constructor->setNumCtorInitializers(Initializers.size());
+ CXXCtorInitializer **baseOrMemberInitializers =
+ new (Context) CXXCtorInitializer*[Initializers.size()];
+ memcpy(baseOrMemberInitializers, Initializers.data(),
+ Initializers.size() * sizeof(CXXCtorInitializer*));
+ Constructor->setCtorInitializers(baseOrMemberInitializers);
+ }
+
+ // Let template instantiation know whether we had errors.
+ if (AnyErrors)
+ Constructor->setInvalidDecl();
+
+ return false;
+ }
+
+ BaseAndFieldInfo Info(*this, Constructor, AnyErrors);
+
+ // We need to build the initializer AST according to order of construction
+ // and not what user specified in the Initializers list.
+ CXXRecordDecl *ClassDecl = Constructor->getParent()->getDefinition();
+ if (!ClassDecl)
+ return true;
+
+ bool HadError = false;
+
+ for (unsigned i = 0; i < Initializers.size(); i++) {
+ CXXCtorInitializer *Member = Initializers[i];
+
+ if (Member->isBaseInitializer())
+ Info.AllBaseFields[Member->getBaseClass()->getAs<RecordType>()] = Member;
+ else {
+ Info.AllBaseFields[Member->getAnyMember()->getCanonicalDecl()] = Member;
+
+ if (IndirectFieldDecl *F = Member->getIndirectMember()) {
+ for (auto *C : F->chain()) {
+ FieldDecl *FD = dyn_cast<FieldDecl>(C);
+ if (FD && FD->getParent()->isUnion())
+ Info.ActiveUnionMember.insert(std::make_pair(
+ FD->getParent()->getCanonicalDecl(), FD->getCanonicalDecl()));
+ }
+ } else if (FieldDecl *FD = Member->getMember()) {
+ if (FD->getParent()->isUnion())
+ Info.ActiveUnionMember.insert(std::make_pair(
+ FD->getParent()->getCanonicalDecl(), FD->getCanonicalDecl()));
+ }
+ }
+ }
+
+ // Keep track of the direct virtual bases.
+ llvm::SmallPtrSet<CXXBaseSpecifier *, 16> DirectVBases;
+ for (auto &I : ClassDecl->bases()) {
+ if (I.isVirtual())
+ DirectVBases.insert(&I);
+ }
+
+ // Push virtual bases before others.
+ for (auto &VBase : ClassDecl->vbases()) {
+ if (CXXCtorInitializer *Value
+ = Info.AllBaseFields.lookup(VBase.getType()->getAs<RecordType>())) {
+ // [class.base.init]p7, per DR257:
+ // A mem-initializer where the mem-initializer-id names a virtual base
+ // class is ignored during execution of a constructor of any class that
+ // is not the most derived class.
+ if (ClassDecl->isAbstract()) {
+ // FIXME: Provide a fixit to remove the base specifier. This requires
+ // tracking the location of the associated comma for a base specifier.
+ Diag(Value->getSourceLocation(), diag::warn_abstract_vbase_init_ignored)
+ << VBase.getType() << ClassDecl;
+ DiagnoseAbstractType(ClassDecl);
+ }
+
+ Info.AllToInit.push_back(Value);
+ } else if (!AnyErrors && !ClassDecl->isAbstract()) {
+ // [class.base.init]p8, per DR257:
+ // If a given [...] base class is not named by a mem-initializer-id
+ // [...] and the entity is not a virtual base class of an abstract
+ // class, then [...] the entity is default-initialized.
+ bool IsInheritedVirtualBase = !DirectVBases.count(&VBase);
+ CXXCtorInitializer *CXXBaseInit;
+ if (BuildImplicitBaseInitializer(*this, Constructor, Info.IIK,
+ &VBase, IsInheritedVirtualBase,
+ CXXBaseInit)) {
+ HadError = true;
+ continue;
+ }
+
+ Info.AllToInit.push_back(CXXBaseInit);
+ }
+ }
+
+ // Non-virtual bases.
+ for (auto &Base : ClassDecl->bases()) {
+ // Virtuals are in the virtual base list and already constructed.
+ if (Base.isVirtual())
+ continue;
+
+ if (CXXCtorInitializer *Value
+ = Info.AllBaseFields.lookup(Base.getType()->getAs<RecordType>())) {
+ Info.AllToInit.push_back(Value);
+ } else if (!AnyErrors) {
+ CXXCtorInitializer *CXXBaseInit;
+ if (BuildImplicitBaseInitializer(*this, Constructor, Info.IIK,
+ &Base, /*IsInheritedVirtualBase=*/false,
+ CXXBaseInit)) {
+ HadError = true;
+ continue;
+ }
+
+ Info.AllToInit.push_back(CXXBaseInit);
+ }
+ }
+
+ // Fields.
+ for (auto *Mem : ClassDecl->decls()) {
+ if (auto *F = dyn_cast<FieldDecl>(Mem)) {
+ // C++ [class.bit]p2:
+ // A declaration for a bit-field that omits the identifier declares an
+ // unnamed bit-field. Unnamed bit-fields are not members and cannot be
+ // initialized.
+ if (F->isUnnamedBitfield())
+ continue;
+
+ // If we're not generating the implicit copy/move constructor, then we'll
+ // handle anonymous struct/union fields based on their individual
+ // indirect fields.
+ if (F->isAnonymousStructOrUnion() && !Info.isImplicitCopyOrMove())
+ continue;
+
+ if (CollectFieldInitializer(*this, Info, F))
+ HadError = true;
+ continue;
+ }
+
+ // Beyond this point, we only consider default initialization.
+ if (Info.isImplicitCopyOrMove())
+ continue;
+
+ if (auto *F = dyn_cast<IndirectFieldDecl>(Mem)) {
+ if (F->getType()->isIncompleteArrayType()) {
+ assert(ClassDecl->hasFlexibleArrayMember() &&
+ "Incomplete array type is not valid");
+ continue;
+ }
+
+ // Initialize each field of an anonymous struct individually.
+ if (CollectFieldInitializer(*this, Info, F->getAnonField(), F))
+ HadError = true;
+
+ continue;
+ }
+ }
+
+ unsigned NumInitializers = Info.AllToInit.size();
+ if (NumInitializers > 0) {
+ Constructor->setNumCtorInitializers(NumInitializers);
+ CXXCtorInitializer **baseOrMemberInitializers =
+ new (Context) CXXCtorInitializer*[NumInitializers];
+ memcpy(baseOrMemberInitializers, Info.AllToInit.data(),
+ NumInitializers * sizeof(CXXCtorInitializer*));
+ Constructor->setCtorInitializers(baseOrMemberInitializers);
+
+ // Constructors implicitly reference the base and member
+ // destructors.
+ MarkBaseAndMemberDestructorsReferenced(Constructor->getLocation(),
+ Constructor->getParent());
+ }
+
+ return HadError;
+}
+
+static void PopulateKeysForFields(FieldDecl *Field, SmallVectorImpl<const void*> &IdealInits) {
+ if (const RecordType *RT = Field->getType()->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->isAnonymousStructOrUnion()) {
+ for (auto *Field : RD->fields())
+ PopulateKeysForFields(Field, IdealInits);
+ return;
+ }
+ }
+ IdealInits.push_back(Field->getCanonicalDecl());
+}
+
+static const void *GetKeyForBase(ASTContext &Context, QualType BaseType) {
+ return Context.getCanonicalType(BaseType).getTypePtr();
+}
+
+static const void *GetKeyForMember(ASTContext &Context,
+ CXXCtorInitializer *Member) {
+ if (!Member->isAnyMemberInitializer())
+ return GetKeyForBase(Context, QualType(Member->getBaseClass(), 0));
+
+ return Member->getAnyMember()->getCanonicalDecl();
+}
+
+static void DiagnoseBaseOrMemInitializerOrder(
+ Sema &SemaRef, const CXXConstructorDecl *Constructor,
+ ArrayRef<CXXCtorInitializer *> Inits) {
+ if (Constructor->getDeclContext()->isDependentContext())
+ return;
+
+ // Don't check initializers order unless the warning is enabled at the
+ // location of at least one initializer.
+ bool ShouldCheckOrder = false;
+ for (unsigned InitIndex = 0; InitIndex != Inits.size(); ++InitIndex) {
+ CXXCtorInitializer *Init = Inits[InitIndex];
+ if (!SemaRef.Diags.isIgnored(diag::warn_initializer_out_of_order,
+ Init->getSourceLocation())) {
+ ShouldCheckOrder = true;
+ break;
+ }
+ }
+ if (!ShouldCheckOrder)
+ return;
+
+ // Build the list of bases and members in the order that they'll
+ // actually be initialized. The explicit initializers should be in
+ // this same order but may be missing things.
+ SmallVector<const void*, 32> IdealInitKeys;
+
+ const CXXRecordDecl *ClassDecl = Constructor->getParent();
+
+ // 1. Virtual bases.
+ for (const auto &VBase : ClassDecl->vbases())
+ IdealInitKeys.push_back(GetKeyForBase(SemaRef.Context, VBase.getType()));
+
+ // 2. Non-virtual bases.
+ for (const auto &Base : ClassDecl->bases()) {
+ if (Base.isVirtual())
+ continue;
+ IdealInitKeys.push_back(GetKeyForBase(SemaRef.Context, Base.getType()));
+ }
+
+ // 3. Direct fields.
+ for (auto *Field : ClassDecl->fields()) {
+ if (Field->isUnnamedBitfield())
+ continue;
+
+ PopulateKeysForFields(Field, IdealInitKeys);
+ }
+
+ unsigned NumIdealInits = IdealInitKeys.size();
+ unsigned IdealIndex = 0;
+
+ CXXCtorInitializer *PrevInit = nullptr;
+ for (unsigned InitIndex = 0; InitIndex != Inits.size(); ++InitIndex) {
+ CXXCtorInitializer *Init = Inits[InitIndex];
+ const void *InitKey = GetKeyForMember(SemaRef.Context, Init);
+
+ // Scan forward to try to find this initializer in the idealized
+ // initializers list.
+ for (; IdealIndex != NumIdealInits; ++IdealIndex)
+ if (InitKey == IdealInitKeys[IdealIndex])
+ break;
+
+ // If we didn't find this initializer, it must be because we
+ // scanned past it on a previous iteration. That can only
+ // happen if we're out of order; emit a warning.
+ if (IdealIndex == NumIdealInits && PrevInit) {
+ Sema::SemaDiagnosticBuilder D =
+ SemaRef.Diag(PrevInit->getSourceLocation(),
+ diag::warn_initializer_out_of_order);
+
+ if (PrevInit->isAnyMemberInitializer())
+ D << 0 << PrevInit->getAnyMember()->getDeclName();
+ else
+ D << 1 << PrevInit->getTypeSourceInfo()->getType();
+
+ if (Init->isAnyMemberInitializer())
+ D << 0 << Init->getAnyMember()->getDeclName();
+ else
+ D << 1 << Init->getTypeSourceInfo()->getType();
+
+ // Move back to the initializer's location in the ideal list.
+ for (IdealIndex = 0; IdealIndex != NumIdealInits; ++IdealIndex)
+ if (InitKey == IdealInitKeys[IdealIndex])
+ break;
+
+ assert(IdealIndex < NumIdealInits &&
+ "initializer not found in initializer list");
+ }
+
+ PrevInit = Init;
+ }
+}
+
+namespace {
+bool CheckRedundantInit(Sema &S,
+ CXXCtorInitializer *Init,
+ CXXCtorInitializer *&PrevInit) {
+ if (!PrevInit) {
+ PrevInit = Init;
+ return false;
+ }
+
+ if (FieldDecl *Field = Init->getAnyMember())
+ S.Diag(Init->getSourceLocation(),
+ diag::err_multiple_mem_initialization)
+ << Field->getDeclName()
+ << Init->getSourceRange();
+ else {
+ const Type *BaseClass = Init->getBaseClass();
+ assert(BaseClass && "neither field nor base");
+ S.Diag(Init->getSourceLocation(),
+ diag::err_multiple_base_initialization)
+ << QualType(BaseClass, 0)
+ << Init->getSourceRange();
+ }
+ S.Diag(PrevInit->getSourceLocation(), diag::note_previous_initializer)
+ << 0 << PrevInit->getSourceRange();
+
+ return true;
+}
+
+typedef std::pair<NamedDecl *, CXXCtorInitializer *> UnionEntry;
+typedef llvm::DenseMap<RecordDecl*, UnionEntry> RedundantUnionMap;
+
+bool CheckRedundantUnionInit(Sema &S,
+ CXXCtorInitializer *Init,
+ RedundantUnionMap &Unions) {
+ FieldDecl *Field = Init->getAnyMember();
+ RecordDecl *Parent = Field->getParent();
+ NamedDecl *Child = Field;
+
+ while (Parent->isAnonymousStructOrUnion() || Parent->isUnion()) {
+ if (Parent->isUnion()) {
+ UnionEntry &En = Unions[Parent];
+ if (En.first && En.first != Child) {
+ S.Diag(Init->getSourceLocation(),
+ diag::err_multiple_mem_union_initialization)
+ << Field->getDeclName()
+ << Init->getSourceRange();
+ S.Diag(En.second->getSourceLocation(), diag::note_previous_initializer)
+ << 0 << En.second->getSourceRange();
+ return true;
+ }
+ if (!En.first) {
+ En.first = Child;
+ En.second = Init;
+ }
+ if (!Parent->isAnonymousStructOrUnion())
+ return false;
+ }
+
+ Child = Parent;
+ Parent = cast<RecordDecl>(Parent->getDeclContext());
+ }
+
+ return false;
+}
+}
+
+/// ActOnMemInitializers - Handle the member initializers for a constructor.
+void Sema::ActOnMemInitializers(Decl *ConstructorDecl,
+ SourceLocation ColonLoc,
+ ArrayRef<CXXCtorInitializer*> MemInits,
+ bool AnyErrors) {
+ if (!ConstructorDecl)
+ return;
+
+ AdjustDeclIfTemplate(ConstructorDecl);
+
+ CXXConstructorDecl *Constructor
+ = dyn_cast<CXXConstructorDecl>(ConstructorDecl);
+
+ if (!Constructor) {
+ Diag(ColonLoc, diag::err_only_constructors_take_base_inits);
+ return;
+ }
+
+ // Mapping for the duplicate initializers check.
+ // For member initializers, this is keyed with a FieldDecl*.
+ // For base initializers, this is keyed with a Type*.
+ llvm::DenseMap<const void *, CXXCtorInitializer *> Members;
+
+ // Mapping for the inconsistent anonymous-union initializers check.
+ RedundantUnionMap MemberUnions;
+
+ bool HadError = false;
+ for (unsigned i = 0; i < MemInits.size(); i++) {
+ CXXCtorInitializer *Init = MemInits[i];
+
+ // Set the source order index.
+ Init->setSourceOrder(i);
+
+ if (Init->isAnyMemberInitializer()) {
+ const void *Key = GetKeyForMember(Context, Init);
+ if (CheckRedundantInit(*this, Init, Members[Key]) ||
+ CheckRedundantUnionInit(*this, Init, MemberUnions))
+ HadError = true;
+ } else if (Init->isBaseInitializer()) {
+ const void *Key = GetKeyForMember(Context, Init);
+ if (CheckRedundantInit(*this, Init, Members[Key]))
+ HadError = true;
+ } else {
+ assert(Init->isDelegatingInitializer());
+ // This must be the only initializer
+ if (MemInits.size() != 1) {
+ Diag(Init->getSourceLocation(),
+ diag::err_delegating_initializer_alone)
+ << Init->getSourceRange() << MemInits[i ? 0 : 1]->getSourceRange();
+ // We will treat this as being the only initializer.
+ }
+ SetDelegatingInitializer(Constructor, MemInits[i]);
+ // Return immediately as the initializer is set.
+ return;
+ }
+ }
+
+ if (HadError)
+ return;
+
+ DiagnoseBaseOrMemInitializerOrder(*this, Constructor, MemInits);
+
+ SetCtorInitializers(Constructor, AnyErrors, MemInits);
+
+ DiagnoseUninitializedFields(*this, Constructor);
+}
+
+void
+Sema::MarkBaseAndMemberDestructorsReferenced(SourceLocation Location,
+ CXXRecordDecl *ClassDecl) {
+ // Ignore dependent contexts. Also ignore unions, since their members never
+ // have destructors implicitly called.
+ if (ClassDecl->isDependentContext() || ClassDecl->isUnion())
+ return;
+
+ // FIXME: all the access-control diagnostics are positioned on the
+ // field/base declaration. That's probably good; that said, the
+ // user might reasonably want to know why the destructor is being
+ // emitted, and we currently don't say.
+
+ // Non-static data members.
+ for (auto *Field : ClassDecl->fields()) {
+ if (Field->isInvalidDecl())
+ continue;
+
+ // Don't destroy incomplete or zero-length arrays.
+ if (isIncompleteOrZeroLengthArrayType(Context, Field->getType()))
+ continue;
+
+ QualType FieldType = Context.getBaseElementType(Field->getType());
+
+ const RecordType* RT = FieldType->getAs<RecordType>();
+ if (!RT)
+ continue;
+
+ CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+ if (FieldClassDecl->isInvalidDecl())
+ continue;
+ if (FieldClassDecl->hasIrrelevantDestructor())
+ continue;
+ // The destructor for an implicit anonymous union member is never invoked.
+ if (FieldClassDecl->isUnion() && FieldClassDecl->isAnonymousStructOrUnion())
+ continue;
+
+ CXXDestructorDecl *Dtor = LookupDestructor(FieldClassDecl);
+ assert(Dtor && "No dtor found for FieldClassDecl!");
+ CheckDestructorAccess(Field->getLocation(), Dtor,
+ PDiag(diag::err_access_dtor_field)
+ << Field->getDeclName()
+ << FieldType);
+
+ MarkFunctionReferenced(Location, Dtor);
+ DiagnoseUseOfDecl(Dtor, Location);
+ }
+
+ llvm::SmallPtrSet<const RecordType *, 8> DirectVirtualBases;
+
+ // Bases.
+ for (const auto &Base : ClassDecl->bases()) {
+ // Bases are always records in a well-formed non-dependent class.
+ const RecordType *RT = Base.getType()->getAs<RecordType>();
+
+ // Remember direct virtual bases.
+ if (Base.isVirtual())
+ DirectVirtualBases.insert(RT);
+
+ CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+ // If our base class is invalid, we probably can't get its dtor anyway.
+ if (BaseClassDecl->isInvalidDecl())
+ continue;
+ if (BaseClassDecl->hasIrrelevantDestructor())
+ continue;
+
+ CXXDestructorDecl *Dtor = LookupDestructor(BaseClassDecl);
+ assert(Dtor && "No dtor found for BaseClassDecl!");
+
+ // FIXME: caret should be on the start of the class name
+ CheckDestructorAccess(Base.getLocStart(), Dtor,
+ PDiag(diag::err_access_dtor_base)
+ << Base.getType()
+ << Base.getSourceRange(),
+ Context.getTypeDeclType(ClassDecl));
+
+ MarkFunctionReferenced(Location, Dtor);
+ DiagnoseUseOfDecl(Dtor, Location);
+ }
+
+ // Virtual bases.
+ for (const auto &VBase : ClassDecl->vbases()) {
+ // Bases are always records in a well-formed non-dependent class.
+ const RecordType *RT = VBase.getType()->castAs<RecordType>();
+
+ // Ignore direct virtual bases.
+ if (DirectVirtualBases.count(RT))
+ continue;
+
+ CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+ // If our base class is invalid, we probably can't get its dtor anyway.
+ if (BaseClassDecl->isInvalidDecl())
+ continue;
+ if (BaseClassDecl->hasIrrelevantDestructor())
+ continue;
+
+ CXXDestructorDecl *Dtor = LookupDestructor(BaseClassDecl);
+ assert(Dtor && "No dtor found for BaseClassDecl!");
+ if (CheckDestructorAccess(
+ ClassDecl->getLocation(), Dtor,
+ PDiag(diag::err_access_dtor_vbase)
+ << Context.getTypeDeclType(ClassDecl) << VBase.getType(),
+ Context.getTypeDeclType(ClassDecl)) ==
+ AR_accessible) {
+ CheckDerivedToBaseConversion(
+ Context.getTypeDeclType(ClassDecl), VBase.getType(),
+ diag::err_access_dtor_vbase, 0, ClassDecl->getLocation(),
+ SourceRange(), DeclarationName(), nullptr);
+ }
+
+ MarkFunctionReferenced(Location, Dtor);
+ DiagnoseUseOfDecl(Dtor, Location);
+ }
+}
+
+void Sema::ActOnDefaultCtorInitializers(Decl *CDtorDecl) {
+ if (!CDtorDecl)
+ return;
+
+ if (CXXConstructorDecl *Constructor
+ = dyn_cast<CXXConstructorDecl>(CDtorDecl)) {
+ SetCtorInitializers(Constructor, /*AnyErrors=*/false);
+ DiagnoseUninitializedFields(*this, Constructor);
+ }
+}
+
+bool Sema::isAbstractType(SourceLocation Loc, QualType T) {
+ if (!getLangOpts().CPlusPlus)
+ return false;
+
+ const auto *RD = Context.getBaseElementType(T)->getAsCXXRecordDecl();
+ if (!RD)
+ return false;
+
+ // FIXME: Per [temp.inst]p1, we are supposed to trigger instantiation of a
+ // class template specialization here, but doing so breaks a lot of code.
+
+ // We can't answer whether something is abstract until it has a
+ // definition. If it's currently being defined, we'll walk back
+ // over all the declarations when we have a full definition.
+ const CXXRecordDecl *Def = RD->getDefinition();
+ if (!Def || Def->isBeingDefined())
+ return false;
+
+ return RD->isAbstract();
+}
+
+bool Sema::RequireNonAbstractType(SourceLocation Loc, QualType T,
+ TypeDiagnoser &Diagnoser) {
+ if (!isAbstractType(Loc, T))
+ return false;
+
+ T = Context.getBaseElementType(T);
+ Diagnoser.diagnose(*this, Loc, T);
+ DiagnoseAbstractType(T->getAsCXXRecordDecl());
+ return true;
+}
+
+void Sema::DiagnoseAbstractType(const CXXRecordDecl *RD) {
+ // Check if we've already emitted the list of pure virtual functions
+ // for this class.
+ if (PureVirtualClassDiagSet && PureVirtualClassDiagSet->count(RD))
+ return;
+
+ // If the diagnostic is suppressed, don't emit the notes. We're only
+ // going to emit them once, so try to attach them to a diagnostic we're
+ // actually going to show.
+ if (Diags.isLastDiagnosticIgnored())
+ return;
+
+ CXXFinalOverriderMap FinalOverriders;
+ RD->getFinalOverriders(FinalOverriders);
+
+ // Keep a set of seen pure methods so we won't diagnose the same method
+ // more than once.
+ llvm::SmallPtrSet<const CXXMethodDecl *, 8> SeenPureMethods;
+
+ for (CXXFinalOverriderMap::iterator M = FinalOverriders.begin(),
+ MEnd = FinalOverriders.end();
+ M != MEnd;
+ ++M) {
+ for (OverridingMethods::iterator SO = M->second.begin(),
+ SOEnd = M->second.end();
+ SO != SOEnd; ++SO) {
+ // C++ [class.abstract]p4:
+ // A class is abstract if it contains or inherits at least one
+ // pure virtual function for which the final overrider is pure
+ // virtual.
+
+ //
+ if (SO->second.size() != 1)
+ continue;
+
+ if (!SO->second.front().Method->isPure())
+ continue;
+
+ if (!SeenPureMethods.insert(SO->second.front().Method).second)
+ continue;
+
+ Diag(SO->second.front().Method->getLocation(),
+ diag::note_pure_virtual_function)
+ << SO->second.front().Method->getDeclName() << RD->getDeclName();
+ }
+ }
+
+ if (!PureVirtualClassDiagSet)
+ PureVirtualClassDiagSet.reset(new RecordDeclSetTy);
+ PureVirtualClassDiagSet->insert(RD);
+}
+
+namespace {
+struct AbstractUsageInfo {
+ Sema &S;
+ CXXRecordDecl *Record;
+ CanQualType AbstractType;
+ bool Invalid;
+
+ AbstractUsageInfo(Sema &S, CXXRecordDecl *Record)
+ : S(S), Record(Record),
+ AbstractType(S.Context.getCanonicalType(
+ S.Context.getTypeDeclType(Record))),
+ Invalid(false) {}
+
+ void DiagnoseAbstractType() {
+ if (Invalid) return;
+ S.DiagnoseAbstractType(Record);
+ Invalid = true;
+ }
+
+ void CheckType(const NamedDecl *D, TypeLoc TL, Sema::AbstractDiagSelID Sel);
+};
+
+struct CheckAbstractUsage {
+ AbstractUsageInfo &Info;
+ const NamedDecl *Ctx;
+
+ CheckAbstractUsage(AbstractUsageInfo &Info, const NamedDecl *Ctx)
+ : Info(Info), Ctx(Ctx) {}
+
+ void Visit(TypeLoc TL, Sema::AbstractDiagSelID Sel) {
+ switch (TL.getTypeLocClass()) {
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ case TypeLoc::CLASS: Check(TL.castAs<CLASS##TypeLoc>(), Sel); break;
+#include "clang/AST/TypeLocNodes.def"
+ }
+ }
+
+ void Check(FunctionProtoTypeLoc TL, Sema::AbstractDiagSelID Sel) {
+ Visit(TL.getReturnLoc(), Sema::AbstractReturnType);
+ for (unsigned I = 0, E = TL.getNumParams(); I != E; ++I) {
+ if (!TL.getParam(I))
+ continue;
+
+ TypeSourceInfo *TSI = TL.getParam(I)->getTypeSourceInfo();
+ if (TSI) Visit(TSI->getTypeLoc(), Sema::AbstractParamType);
+ }
+ }
+
+ void Check(ArrayTypeLoc TL, Sema::AbstractDiagSelID Sel) {
+ Visit(TL.getElementLoc(), Sema::AbstractArrayType);
+ }
+
+ void Check(TemplateSpecializationTypeLoc TL, Sema::AbstractDiagSelID Sel) {
+ // Visit the type parameters from a permissive context.
+ for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I) {
+ TemplateArgumentLoc TAL = TL.getArgLoc(I);
+ if (TAL.getArgument().getKind() == TemplateArgument::Type)
+ if (TypeSourceInfo *TSI = TAL.getTypeSourceInfo())
+ Visit(TSI->getTypeLoc(), Sema::AbstractNone);
+ // TODO: other template argument types?
+ }
+ }
+
+ // Visit pointee types from a permissive context.
+#define CheckPolymorphic(Type) \
+ void Check(Type TL, Sema::AbstractDiagSelID Sel) { \
+ Visit(TL.getNextTypeLoc(), Sema::AbstractNone); \
+ }
+ CheckPolymorphic(PointerTypeLoc)
+ CheckPolymorphic(ReferenceTypeLoc)
+ CheckPolymorphic(MemberPointerTypeLoc)
+ CheckPolymorphic(BlockPointerTypeLoc)
+ CheckPolymorphic(AtomicTypeLoc)
+
+ /// Handle all the types we haven't given a more specific
+ /// implementation for above.
+ void Check(TypeLoc TL, Sema::AbstractDiagSelID Sel) {
+ // Every other kind of type that we haven't called out already
+ // that has an inner type is either (1) sugar or (2) contains that
+ // inner type in some way as a subobject.
+ if (TypeLoc Next = TL.getNextTypeLoc())
+ return Visit(Next, Sel);
+
+ // If there's no inner type and we're in a permissive context,
+ // don't diagnose.
+ if (Sel == Sema::AbstractNone) return;
+
+ // Check whether the type matches the abstract type.
+ QualType T = TL.getType();
+ if (T->isArrayType()) {
+ Sel = Sema::AbstractArrayType;
+ T = Info.S.Context.getBaseElementType(T);
+ }
+ CanQualType CT = T->getCanonicalTypeUnqualified().getUnqualifiedType();
+ if (CT != Info.AbstractType) return;
+
+ // It matched; do some magic.
+ if (Sel == Sema::AbstractArrayType) {
+ Info.S.Diag(Ctx->getLocation(), diag::err_array_of_abstract_type)
+ << T << TL.getSourceRange();
+ } else {
+ Info.S.Diag(Ctx->getLocation(), diag::err_abstract_type_in_decl)
+ << Sel << T << TL.getSourceRange();
+ }
+ Info.DiagnoseAbstractType();
+ }
+};
+
+void AbstractUsageInfo::CheckType(const NamedDecl *D, TypeLoc TL,
+ Sema::AbstractDiagSelID Sel) {
+ CheckAbstractUsage(*this, D).Visit(TL, Sel);
+}
+
+}
+
+/// Check for invalid uses of an abstract type in a method declaration.
+static void CheckAbstractClassUsage(AbstractUsageInfo &Info,
+ CXXMethodDecl *MD) {
+ // No need to do the check on definitions, which require that
+ // the return/param types be complete.
+ if (MD->doesThisDeclarationHaveABody())
+ return;
+
+ // For safety's sake, just ignore it if we don't have type source
+ // information. This should never happen for non-implicit methods,
+ // but...
+ if (TypeSourceInfo *TSI = MD->getTypeSourceInfo())
+ Info.CheckType(MD, TSI->getTypeLoc(), Sema::AbstractNone);
+}
+
+/// Check for invalid uses of an abstract type within a class definition.
+static void CheckAbstractClassUsage(AbstractUsageInfo &Info,
+ CXXRecordDecl *RD) {
+ for (auto *D : RD->decls()) {
+ if (D->isImplicit()) continue;
+
+ // Methods and method templates.
+ if (isa<CXXMethodDecl>(D)) {
+ CheckAbstractClassUsage(Info, cast<CXXMethodDecl>(D));
+ } else if (isa<FunctionTemplateDecl>(D)) {
+ FunctionDecl *FD = cast<FunctionTemplateDecl>(D)->getTemplatedDecl();
+ CheckAbstractClassUsage(Info, cast<CXXMethodDecl>(FD));
+
+ // Fields and static variables.
+ } else if (isa<FieldDecl>(D)) {
+ FieldDecl *FD = cast<FieldDecl>(D);
+ if (TypeSourceInfo *TSI = FD->getTypeSourceInfo())
+ Info.CheckType(FD, TSI->getTypeLoc(), Sema::AbstractFieldType);
+ } else if (isa<VarDecl>(D)) {
+ VarDecl *VD = cast<VarDecl>(D);
+ if (TypeSourceInfo *TSI = VD->getTypeSourceInfo())
+ Info.CheckType(VD, TSI->getTypeLoc(), Sema::AbstractVariableType);
+
+ // Nested classes and class templates.
+ } else if (isa<CXXRecordDecl>(D)) {
+ CheckAbstractClassUsage(Info, cast<CXXRecordDecl>(D));
+ } else if (isa<ClassTemplateDecl>(D)) {
+ CheckAbstractClassUsage(Info,
+ cast<ClassTemplateDecl>(D)->getTemplatedDecl());
+ }
+ }
+}
+
+static void ReferenceDllExportedMethods(Sema &S, CXXRecordDecl *Class) {
+ Attr *ClassAttr = getDLLAttr(Class);
+ if (!ClassAttr)
+ return;
+
+ assert(ClassAttr->getKind() == attr::DLLExport);
+
+ TemplateSpecializationKind TSK = Class->getTemplateSpecializationKind();
+
+ if (TSK == TSK_ExplicitInstantiationDeclaration)
+ // Don't go any further if this is just an explicit instantiation
+ // declaration.
+ return;
+
+ for (Decl *Member : Class->decls()) {
+ auto *MD = dyn_cast<CXXMethodDecl>(Member);
+ if (!MD)
+ continue;
+
+ if (Member->getAttr<DLLExportAttr>()) {
+ if (MD->isUserProvided()) {
+ // Instantiate non-default class member functions ...
+
+ // .. except for certain kinds of template specializations.
+ if (TSK == TSK_ImplicitInstantiation && !ClassAttr->isInherited())
+ continue;
+
+ S.MarkFunctionReferenced(Class->getLocation(), MD);
+
+ // The function will be passed to the consumer when its definition is
+ // encountered.
+ } else if (!MD->isTrivial() || MD->isExplicitlyDefaulted() ||
+ MD->isCopyAssignmentOperator() ||
+ MD->isMoveAssignmentOperator()) {
+ // Synthesize and instantiate non-trivial implicit methods, explicitly
+ // defaulted methods, and the copy and move assignment operators. The
+ // latter are exported even if they are trivial, because the address of
+ // an operator can be taken and should compare equal accross libraries.
+ DiagnosticErrorTrap Trap(S.Diags);
+ S.MarkFunctionReferenced(Class->getLocation(), MD);
+ if (Trap.hasErrorOccurred()) {
+ S.Diag(ClassAttr->getLocation(), diag::note_due_to_dllexported_class)
+ << Class->getName() << !S.getLangOpts().CPlusPlus11;
+ break;
+ }
+
+ // There is no later point when we will see the definition of this
+ // function, so pass it to the consumer now.
+ S.Consumer.HandleTopLevelDecl(DeclGroupRef(MD));
+ }
+ }
+ }
+}
+
+/// \brief Check class-level dllimport/dllexport attribute.
+void Sema::checkClassLevelDLLAttribute(CXXRecordDecl *Class) {
+ Attr *ClassAttr = getDLLAttr(Class);
+
+ // MSVC inherits DLL attributes to partial class template specializations.
+ if (Context.getTargetInfo().getCXXABI().isMicrosoft() && !ClassAttr) {
+ if (auto *Spec = dyn_cast<ClassTemplatePartialSpecializationDecl>(Class)) {
+ if (Attr *TemplateAttr =
+ getDLLAttr(Spec->getSpecializedTemplate()->getTemplatedDecl())) {
+ auto *A = cast<InheritableAttr>(TemplateAttr->clone(getASTContext()));
+ A->setInherited(true);
+ ClassAttr = A;
+ }
+ }
+ }
+
+ if (!ClassAttr)
+ return;
+
+ if (!Class->isExternallyVisible()) {
+ Diag(Class->getLocation(), diag::err_attribute_dll_not_extern)
+ << Class << ClassAttr;
+ return;
+ }
+
+ if (Context.getTargetInfo().getCXXABI().isMicrosoft() &&
+ !ClassAttr->isInherited()) {
+ // Diagnose dll attributes on members of class with dll attribute.
+ for (Decl *Member : Class->decls()) {
+ if (!isa<VarDecl>(Member) && !isa<CXXMethodDecl>(Member))
+ continue;
+ InheritableAttr *MemberAttr = getDLLAttr(Member);
+ if (!MemberAttr || MemberAttr->isInherited() || Member->isInvalidDecl())
+ continue;
+
+ Diag(MemberAttr->getLocation(),
+ diag::err_attribute_dll_member_of_dll_class)
+ << MemberAttr << ClassAttr;
+ Diag(ClassAttr->getLocation(), diag::note_previous_attribute);
+ Member->setInvalidDecl();
+ }
+ }
+
+ if (Class->getDescribedClassTemplate())
+ // Don't inherit dll attribute until the template is instantiated.
+ return;
+
+ // The class is either imported or exported.
+ const bool ClassExported = ClassAttr->getKind() == attr::DLLExport;
+ const bool ClassImported = !ClassExported;
+
+ TemplateSpecializationKind TSK = Class->getTemplateSpecializationKind();
+
+ // Ignore explicit dllexport on explicit class template instantiation declarations.
+ if (ClassExported && !ClassAttr->isInherited() &&
+ TSK == TSK_ExplicitInstantiationDeclaration) {
+ Class->dropAttr<DLLExportAttr>();
+ return;
+ }
+
+ // Force declaration of implicit members so they can inherit the attribute.
+ ForceDeclarationOfImplicitMembers(Class);
+
+ // FIXME: MSVC's docs say all bases must be exportable, but this doesn't
+ // seem to be true in practice?
+
+ for (Decl *Member : Class->decls()) {
+ VarDecl *VD = dyn_cast<VarDecl>(Member);
+ CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Member);
+
+ // Only methods and static fields inherit the attributes.
+ if (!VD && !MD)
+ continue;
+
+ if (MD) {
+ // Don't process deleted methods.
+ if (MD->isDeleted())
+ continue;
+
+ if (MD->isInlined()) {
+ // MinGW does not import or export inline methods.
+ if (!Context.getTargetInfo().getCXXABI().isMicrosoft())
+ continue;
+
+ // MSVC versions before 2015 don't export the move assignment operators,
+ // so don't attempt to import them if we have a definition.
+ if (ClassImported && MD->isMoveAssignmentOperator() &&
+ !getLangOpts().isCompatibleWithMSVC(LangOptions::MSVC2015))
+ continue;
+ }
+ }
+
+ if (!cast<NamedDecl>(Member)->isExternallyVisible())
+ continue;
+
+ if (!getDLLAttr(Member)) {
+ auto *NewAttr =
+ cast<InheritableAttr>(ClassAttr->clone(getASTContext()));
+ NewAttr->setInherited(true);
+ Member->addAttr(NewAttr);
+ }
+ }
+
+ if (ClassExported)
+ DelayedDllExportClasses.push_back(Class);
+}
+
+/// \brief Perform propagation of DLL attributes from a derived class to a
+/// templated base class for MS compatibility.
+void Sema::propagateDLLAttrToBaseClassTemplate(
+ CXXRecordDecl *Class, Attr *ClassAttr,
+ ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc) {
+ if (getDLLAttr(
+ BaseTemplateSpec->getSpecializedTemplate()->getTemplatedDecl())) {
+ // If the base class template has a DLL attribute, don't try to change it.
+ return;
+ }
+
+ auto TSK = BaseTemplateSpec->getSpecializationKind();
+ if (!getDLLAttr(BaseTemplateSpec) &&
+ (TSK == TSK_Undeclared || TSK == TSK_ExplicitInstantiationDeclaration ||
+ TSK == TSK_ImplicitInstantiation)) {
+ // The template hasn't been instantiated yet (or it has, but only as an
+ // explicit instantiation declaration or implicit instantiation, which means
+ // we haven't codegenned any members yet), so propagate the attribute.
+ auto *NewAttr = cast<InheritableAttr>(ClassAttr->clone(getASTContext()));
+ NewAttr->setInherited(true);
+ BaseTemplateSpec->addAttr(NewAttr);
+
+ // If the template is already instantiated, checkDLLAttributeRedeclaration()
+ // needs to be run again to work see the new attribute. Otherwise this will
+ // get run whenever the template is instantiated.
+ if (TSK != TSK_Undeclared)
+ checkClassLevelDLLAttribute(BaseTemplateSpec);
+
+ return;
+ }
+
+ if (getDLLAttr(BaseTemplateSpec)) {
+ // The template has already been specialized or instantiated with an
+ // attribute, explicitly or through propagation. We should not try to change
+ // it.
+ return;
+ }
+
+ // The template was previously instantiated or explicitly specialized without
+ // a dll attribute, It's too late for us to add an attribute, so warn that
+ // this is unsupported.
+ Diag(BaseLoc, diag::warn_attribute_dll_instantiated_base_class)
+ << BaseTemplateSpec->isExplicitSpecialization();
+ Diag(ClassAttr->getLocation(), diag::note_attribute);
+ if (BaseTemplateSpec->isExplicitSpecialization()) {
+ Diag(BaseTemplateSpec->getLocation(),
+ diag::note_template_class_explicit_specialization_was_here)
+ << BaseTemplateSpec;
+ } else {
+ Diag(BaseTemplateSpec->getPointOfInstantiation(),
+ diag::note_template_class_instantiation_was_here)
+ << BaseTemplateSpec;
+ }
+}
+
+/// \brief Perform semantic checks on a class definition that has been
+/// completing, introducing implicitly-declared members, checking for
+/// abstract types, etc.
+void Sema::CheckCompletedCXXClass(CXXRecordDecl *Record) {
+ if (!Record)
+ return;
+
+ if (Record->isAbstract() && !Record->isInvalidDecl()) {
+ AbstractUsageInfo Info(*this, Record);
+ CheckAbstractClassUsage(Info, Record);
+ }
+
+ // If this is not an aggregate type and has no user-declared constructor,
+ // complain about any non-static data members of reference or const scalar
+ // type, since they will never get initializers.
+ if (!Record->isInvalidDecl() && !Record->isDependentType() &&
+ !Record->isAggregate() && !Record->hasUserDeclaredConstructor() &&
+ !Record->isLambda()) {
+ bool Complained = false;
+ for (const auto *F : Record->fields()) {
+ if (F->hasInClassInitializer() || F->isUnnamedBitfield())
+ continue;
+
+ if (F->getType()->isReferenceType() ||
+ (F->getType().isConstQualified() && F->getType()->isScalarType())) {
+ if (!Complained) {
+ Diag(Record->getLocation(), diag::warn_no_constructor_for_refconst)
+ << Record->getTagKind() << Record;
+ Complained = true;
+ }
+
+ Diag(F->getLocation(), diag::note_refconst_member_not_initialized)
+ << F->getType()->isReferenceType()
+ << F->getDeclName();
+ }
+ }
+ }
+
+ if (Record->getIdentifier()) {
+ // C++ [class.mem]p13:
+ // If T is the name of a class, then each of the following shall have a
+ // name different from T:
+ // - every member of every anonymous union that is a member of class T.
+ //
+ // C++ [class.mem]p14:
+ // In addition, if class T has a user-declared constructor (12.1), every
+ // non-static data member of class T shall have a name different from T.
+ DeclContext::lookup_result R = Record->lookup(Record->getDeclName());
+ for (DeclContext::lookup_iterator I = R.begin(), E = R.end(); I != E;
+ ++I) {
+ NamedDecl *D = *I;
+ if ((isa<FieldDecl>(D) && Record->hasUserDeclaredConstructor()) ||
+ isa<IndirectFieldDecl>(D)) {
+ Diag(D->getLocation(), diag::err_member_name_of_class)
+ << D->getDeclName();
+ break;
+ }
+ }
+ }
+
+ // Warn if the class has virtual methods but non-virtual public destructor.
+ if (Record->isPolymorphic() && !Record->isDependentType()) {
+ CXXDestructorDecl *dtor = Record->getDestructor();
+ if ((!dtor || (!dtor->isVirtual() && dtor->getAccess() == AS_public)) &&
+ !Record->hasAttr<FinalAttr>())
+ Diag(dtor ? dtor->getLocation() : Record->getLocation(),
+ diag::warn_non_virtual_dtor) << Context.getRecordType(Record);
+ }
+
+ if (Record->isAbstract()) {
+ if (FinalAttr *FA = Record->getAttr<FinalAttr>()) {
+ Diag(Record->getLocation(), diag::warn_abstract_final_class)
+ << FA->isSpelledAsSealed();
+ DiagnoseAbstractType(Record);
+ }
+ }
+
+ bool HasMethodWithOverrideControl = false,
+ HasOverridingMethodWithoutOverrideControl = false;
+ if (!Record->isDependentType()) {
+ for (auto *M : Record->methods()) {
+ // See if a method overloads virtual methods in a base
+ // class without overriding any.
+ if (!M->isStatic())
+ DiagnoseHiddenVirtualMethods(M);
+ if (M->hasAttr<OverrideAttr>())
+ HasMethodWithOverrideControl = true;
+ else if (M->size_overridden_methods() > 0)
+ HasOverridingMethodWithoutOverrideControl = true;
+ // Check whether the explicitly-defaulted special members are valid.
+ if (!M->isInvalidDecl() && M->isExplicitlyDefaulted())
+ CheckExplicitlyDefaultedSpecialMember(M);
+
+ // For an explicitly defaulted or deleted special member, we defer
+ // determining triviality until the class is complete. That time is now!
+ if (!M->isImplicit() && !M->isUserProvided()) {
+ CXXSpecialMember CSM = getSpecialMember(M);
+ if (CSM != CXXInvalid) {
+ M->setTrivial(SpecialMemberIsTrivial(M, CSM));
+
+ // Inform the class that we've finished declaring this member.
+ Record->finishedDefaultedOrDeletedMember(M);
+ }
+ }
+ }
+ }
+
+ if (HasMethodWithOverrideControl &&
+ HasOverridingMethodWithoutOverrideControl) {
+ // At least one method has the 'override' control declared.
+ // Diagnose all other overridden methods which do not have 'override' specified on them.
+ for (auto *M : Record->methods())
+ DiagnoseAbsenceOfOverrideControl(M);
+ }
+
+ // ms_struct is a request to use the same ABI rules as MSVC. Check
+ // whether this class uses any C++ features that are implemented
+ // completely differently in MSVC, and if so, emit a diagnostic.
+ // That diagnostic defaults to an error, but we allow projects to
+ // map it down to a warning (or ignore it). It's a fairly common
+ // practice among users of the ms_struct pragma to mass-annotate
+ // headers, sweeping up a bunch of types that the project doesn't
+ // really rely on MSVC-compatible layout for. We must therefore
+ // support "ms_struct except for C++ stuff" as a secondary ABI.
+ if (Record->isMsStruct(Context) &&
+ (Record->isPolymorphic() || Record->getNumBases())) {
+ Diag(Record->getLocation(), diag::warn_cxx_ms_struct);
+ }
+
+ // Declare inheriting constructors. We do this eagerly here because:
+ // - The standard requires an eager diagnostic for conflicting inheriting
+ // constructors from different classes.
+ // - The lazy declaration of the other implicit constructors is so as to not
+ // waste space and performance on classes that are not meant to be
+ // instantiated (e.g. meta-functions). This doesn't apply to classes that
+ // have inheriting constructors.
+ DeclareInheritingConstructors(Record);
+
+ checkClassLevelDLLAttribute(Record);
+}
+
+/// Look up the special member function that would be called by a special
+/// member function for a subobject of class type.
+///
+/// \param Class The class type of the subobject.
+/// \param CSM The kind of special member function.
+/// \param FieldQuals If the subobject is a field, its cv-qualifiers.
+/// \param ConstRHS True if this is a copy operation with a const object
+/// on its RHS, that is, if the argument to the outer special member
+/// function is 'const' and this is not a field marked 'mutable'.
+static Sema::SpecialMemberOverloadResult *lookupCallFromSpecialMember(
+ Sema &S, CXXRecordDecl *Class, Sema::CXXSpecialMember CSM,
+ unsigned FieldQuals, bool ConstRHS) {
+ unsigned LHSQuals = 0;
+ if (CSM == Sema::CXXCopyAssignment || CSM == Sema::CXXMoveAssignment)
+ LHSQuals = FieldQuals;
+
+ unsigned RHSQuals = FieldQuals;
+ if (CSM == Sema::CXXDefaultConstructor || CSM == Sema::CXXDestructor)
+ RHSQuals = 0;
+ else if (ConstRHS)
+ RHSQuals |= Qualifiers::Const;
+
+ return S.LookupSpecialMember(Class, CSM,
+ RHSQuals & Qualifiers::Const,
+ RHSQuals & Qualifiers::Volatile,
+ false,
+ LHSQuals & Qualifiers::Const,
+ LHSQuals & Qualifiers::Volatile);
+}
+
+/// Is the special member function which would be selected to perform the
+/// specified operation on the specified class type a constexpr constructor?
+static bool specialMemberIsConstexpr(Sema &S, CXXRecordDecl *ClassDecl,
+ Sema::CXXSpecialMember CSM,
+ unsigned Quals, bool ConstRHS) {
+ Sema::SpecialMemberOverloadResult *SMOR =
+ lookupCallFromSpecialMember(S, ClassDecl, CSM, Quals, ConstRHS);
+ if (!SMOR || !SMOR->getMethod())
+ // A constructor we wouldn't select can't be "involved in initializing"
+ // anything.
+ return true;
+ return SMOR->getMethod()->isConstexpr();
+}
+
+/// Determine whether the specified special member function would be constexpr
+/// if it were implicitly defined.
+static bool defaultedSpecialMemberIsConstexpr(Sema &S, CXXRecordDecl *ClassDecl,
+ Sema::CXXSpecialMember CSM,
+ bool ConstArg) {
+ if (!S.getLangOpts().CPlusPlus11)
+ return false;
+
+ // C++11 [dcl.constexpr]p4:
+ // In the definition of a constexpr constructor [...]
+ bool Ctor = true;
+ switch (CSM) {
+ case Sema::CXXDefaultConstructor:
+ // Since default constructor lookup is essentially trivial (and cannot
+ // involve, for instance, template instantiation), we compute whether a
+ // defaulted default constructor is constexpr directly within CXXRecordDecl.
+ //
+ // This is important for performance; we need to know whether the default
+ // constructor is constexpr to determine whether the type is a literal type.
+ return ClassDecl->defaultedDefaultConstructorIsConstexpr();
+
+ case Sema::CXXCopyConstructor:
+ case Sema::CXXMoveConstructor:
+ // For copy or move constructors, we need to perform overload resolution.
+ break;
+
+ case Sema::CXXCopyAssignment:
+ case Sema::CXXMoveAssignment:
+ if (!S.getLangOpts().CPlusPlus14)
+ return false;
+ // In C++1y, we need to perform overload resolution.
+ Ctor = false;
+ break;
+
+ case Sema::CXXDestructor:
+ case Sema::CXXInvalid:
+ return false;
+ }
+
+ // -- if the class is a non-empty union, or for each non-empty anonymous
+ // union member of a non-union class, exactly one non-static data member
+ // shall be initialized; [DR1359]
+ //
+ // If we squint, this is guaranteed, since exactly one non-static data member
+ // will be initialized (if the constructor isn't deleted), we just don't know
+ // which one.
+ if (Ctor && ClassDecl->isUnion())
+ return true;
+
+ // -- the class shall not have any virtual base classes;
+ if (Ctor && ClassDecl->getNumVBases())
+ return false;
+
+ // C++1y [class.copy]p26:
+ // -- [the class] is a literal type, and
+ if (!Ctor && !ClassDecl->isLiteral())
+ return false;
+
+ // -- every constructor involved in initializing [...] base class
+ // sub-objects shall be a constexpr constructor;
+ // -- the assignment operator selected to copy/move each direct base
+ // class is a constexpr function, and
+ for (const auto &B : ClassDecl->bases()) {
+ const RecordType *BaseType = B.getType()->getAs<RecordType>();
+ if (!BaseType) continue;
+
+ CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
+ if (!specialMemberIsConstexpr(S, BaseClassDecl, CSM, 0, ConstArg))
+ return false;
+ }
+
+ // -- every constructor involved in initializing non-static data members
+ // [...] shall be a constexpr constructor;
+ // -- every non-static data member and base class sub-object shall be
+ // initialized
+ // -- for each non-static data member of X that is of class type (or array
+ // thereof), the assignment operator selected to copy/move that member is
+ // a constexpr function
+ for (const auto *F : ClassDecl->fields()) {
+ if (F->isInvalidDecl())
+ continue;
+ QualType BaseType = S.Context.getBaseElementType(F->getType());
+ if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
+ CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (!specialMemberIsConstexpr(S, FieldRecDecl, CSM,
+ BaseType.getCVRQualifiers(),
+ ConstArg && !F->isMutable()))
+ return false;
+ }
+ }
+
+ // All OK, it's constexpr!
+ return true;
+}
+
+static Sema::ImplicitExceptionSpecification
+computeImplicitExceptionSpec(Sema &S, SourceLocation Loc, CXXMethodDecl *MD) {
+ switch (S.getSpecialMember(MD)) {
+ case Sema::CXXDefaultConstructor:
+ return S.ComputeDefaultedDefaultCtorExceptionSpec(Loc, MD);
+ case Sema::CXXCopyConstructor:
+ return S.ComputeDefaultedCopyCtorExceptionSpec(MD);
+ case Sema::CXXCopyAssignment:
+ return S.ComputeDefaultedCopyAssignmentExceptionSpec(MD);
+ case Sema::CXXMoveConstructor:
+ return S.ComputeDefaultedMoveCtorExceptionSpec(MD);
+ case Sema::CXXMoveAssignment:
+ return S.ComputeDefaultedMoveAssignmentExceptionSpec(MD);
+ case Sema::CXXDestructor:
+ return S.ComputeDefaultedDtorExceptionSpec(MD);
+ case Sema::CXXInvalid:
+ break;
+ }
+ assert(cast<CXXConstructorDecl>(MD)->getInheritedConstructor() &&
+ "only special members have implicit exception specs");
+ return S.ComputeInheritingCtorExceptionSpec(cast<CXXConstructorDecl>(MD));
+}
+
+static FunctionProtoType::ExtProtoInfo getImplicitMethodEPI(Sema &S,
+ CXXMethodDecl *MD) {
+ FunctionProtoType::ExtProtoInfo EPI;
+
+ // Build an exception specification pointing back at this member.
+ EPI.ExceptionSpec.Type = EST_Unevaluated;
+ EPI.ExceptionSpec.SourceDecl = MD;
+
+ // Set the calling convention to the default for C++ instance methods.
+ EPI.ExtInfo = EPI.ExtInfo.withCallingConv(
+ S.Context.getDefaultCallingConvention(/*IsVariadic=*/false,
+ /*IsCXXMethod=*/true));
+ return EPI;
+}
+
+void Sema::EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD) {
+ const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
+ if (FPT->getExceptionSpecType() != EST_Unevaluated)
+ return;
+
+ // Evaluate the exception specification.
+ auto ESI = computeImplicitExceptionSpec(*this, Loc, MD).getExceptionSpec();
+
+ // Update the type of the special member to use it.
+ UpdateExceptionSpec(MD, ESI);
+
+ // A user-provided destructor can be defined outside the class. When that
+ // happens, be sure to update the exception specification on both
+ // declarations.
+ const FunctionProtoType *CanonicalFPT =
+ MD->getCanonicalDecl()->getType()->castAs<FunctionProtoType>();
+ if (CanonicalFPT->getExceptionSpecType() == EST_Unevaluated)
+ UpdateExceptionSpec(MD->getCanonicalDecl(), ESI);
+}
+
+void Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD) {
+ CXXRecordDecl *RD = MD->getParent();
+ CXXSpecialMember CSM = getSpecialMember(MD);
+
+ assert(MD->isExplicitlyDefaulted() && CSM != CXXInvalid &&
+ "not an explicitly-defaulted special member");
+
+ // Whether this was the first-declared instance of the constructor.
+ // This affects whether we implicitly add an exception spec and constexpr.
+ bool First = MD == MD->getCanonicalDecl();
+
+ bool HadError = false;
+
+ // C++11 [dcl.fct.def.default]p1:
+ // A function that is explicitly defaulted shall
+ // -- be a special member function (checked elsewhere),
+ // -- have the same type (except for ref-qualifiers, and except that a
+ // copy operation can take a non-const reference) as an implicit
+ // declaration, and
+ // -- not have default arguments.
+ unsigned ExpectedParams = 1;
+ if (CSM == CXXDefaultConstructor || CSM == CXXDestructor)
+ ExpectedParams = 0;
+ if (MD->getNumParams() != ExpectedParams) {
+ // This also checks for default arguments: a copy or move constructor with a
+ // default argument is classified as a default constructor, and assignment
+ // operations and destructors can't have default arguments.
+ Diag(MD->getLocation(), diag::err_defaulted_special_member_params)
+ << CSM << MD->getSourceRange();
+ HadError = true;
+ } else if (MD->isVariadic()) {
+ Diag(MD->getLocation(), diag::err_defaulted_special_member_variadic)
+ << CSM << MD->getSourceRange();
+ HadError = true;
+ }
+
+ const FunctionProtoType *Type = MD->getType()->getAs<FunctionProtoType>();
+
+ bool CanHaveConstParam = false;
+ if (CSM == CXXCopyConstructor)
+ CanHaveConstParam = RD->implicitCopyConstructorHasConstParam();
+ else if (CSM == CXXCopyAssignment)
+ CanHaveConstParam = RD->implicitCopyAssignmentHasConstParam();
+
+ QualType ReturnType = Context.VoidTy;
+ if (CSM == CXXCopyAssignment || CSM == CXXMoveAssignment) {
+ // Check for return type matching.
+ ReturnType = Type->getReturnType();
+ QualType ExpectedReturnType =
+ Context.getLValueReferenceType(Context.getTypeDeclType(RD));
+ if (!Context.hasSameType(ReturnType, ExpectedReturnType)) {
+ Diag(MD->getLocation(), diag::err_defaulted_special_member_return_type)
+ << (CSM == CXXMoveAssignment) << ExpectedReturnType;
+ HadError = true;
+ }
+
+ // A defaulted special member cannot have cv-qualifiers.
+ if (Type->getTypeQuals()) {
+ Diag(MD->getLocation(), diag::err_defaulted_special_member_quals)
+ << (CSM == CXXMoveAssignment) << getLangOpts().CPlusPlus14;
+ HadError = true;
+ }
+ }
+
+ // Check for parameter type matching.
+ QualType ArgType = ExpectedParams ? Type->getParamType(0) : QualType();
+ bool HasConstParam = false;
+ if (ExpectedParams && ArgType->isReferenceType()) {
+ // Argument must be reference to possibly-const T.
+ QualType ReferentType = ArgType->getPointeeType();
+ HasConstParam = ReferentType.isConstQualified();
+
+ if (ReferentType.isVolatileQualified()) {
+ Diag(MD->getLocation(),
+ diag::err_defaulted_special_member_volatile_param) << CSM;
+ HadError = true;
+ }
+
+ if (HasConstParam && !CanHaveConstParam) {
+ if (CSM == CXXCopyConstructor || CSM == CXXCopyAssignment) {
+ Diag(MD->getLocation(),
+ diag::err_defaulted_special_member_copy_const_param)
+ << (CSM == CXXCopyAssignment);
+ // FIXME: Explain why this special member can't be const.
+ } else {
+ Diag(MD->getLocation(),
+ diag::err_defaulted_special_member_move_const_param)
+ << (CSM == CXXMoveAssignment);
+ }
+ HadError = true;
+ }
+ } else if (ExpectedParams) {
+ // A copy assignment operator can take its argument by value, but a
+ // defaulted one cannot.
+ assert(CSM == CXXCopyAssignment && "unexpected non-ref argument");
+ Diag(MD->getLocation(), diag::err_defaulted_copy_assign_not_ref);
+ HadError = true;
+ }
+
+ // C++11 [dcl.fct.def.default]p2:
+ // An explicitly-defaulted function may be declared constexpr only if it
+ // would have been implicitly declared as constexpr,
+ // Do not apply this rule to members of class templates, since core issue 1358
+ // makes such functions always instantiate to constexpr functions. For
+ // functions which cannot be constexpr (for non-constructors in C++11 and for
+ // destructors in C++1y), this is checked elsewhere.
+ bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, RD, CSM,
+ HasConstParam);
+ if ((getLangOpts().CPlusPlus14 ? !isa<CXXDestructorDecl>(MD)
+ : isa<CXXConstructorDecl>(MD)) &&
+ MD->isConstexpr() && !Constexpr &&
+ MD->getTemplatedKind() == FunctionDecl::TK_NonTemplate) {
+ Diag(MD->getLocStart(), diag::err_incorrect_defaulted_constexpr) << CSM;
+ // FIXME: Explain why the special member can't be constexpr.
+ HadError = true;
+ }
+
+ // and may have an explicit exception-specification only if it is compatible
+ // with the exception-specification on the implicit declaration.
+ if (Type->hasExceptionSpec()) {
+ // Delay the check if this is the first declaration of the special member,
+ // since we may not have parsed some necessary in-class initializers yet.
+ if (First) {
+ // If the exception specification needs to be instantiated, do so now,
+ // before we clobber it with an EST_Unevaluated specification below.
+ if (Type->getExceptionSpecType() == EST_Uninstantiated) {
+ InstantiateExceptionSpec(MD->getLocStart(), MD);
+ Type = MD->getType()->getAs<FunctionProtoType>();
+ }
+ DelayedDefaultedMemberExceptionSpecs.push_back(std::make_pair(MD, Type));
+ } else
+ CheckExplicitlyDefaultedMemberExceptionSpec(MD, Type);
+ }
+
+ // If a function is explicitly defaulted on its first declaration,
+ if (First) {
+ // -- it is implicitly considered to be constexpr if the implicit
+ // definition would be,
+ MD->setConstexpr(Constexpr);
+
+ // -- it is implicitly considered to have the same exception-specification
+ // as if it had been implicitly declared,
+ FunctionProtoType::ExtProtoInfo EPI = Type->getExtProtoInfo();
+ EPI.ExceptionSpec.Type = EST_Unevaluated;
+ EPI.ExceptionSpec.SourceDecl = MD;
+ MD->setType(Context.getFunctionType(ReturnType,
+ llvm::makeArrayRef(&ArgType,
+ ExpectedParams),
+ EPI));
+ }
+
+ if (ShouldDeleteSpecialMember(MD, CSM)) {
+ if (First) {
+ SetDeclDeleted(MD, MD->getLocation());
+ } else {
+ // C++11 [dcl.fct.def.default]p4:
+ // [For a] user-provided explicitly-defaulted function [...] if such a
+ // function is implicitly defined as deleted, the program is ill-formed.
+ Diag(MD->getLocation(), diag::err_out_of_line_default_deletes) << CSM;
+ ShouldDeleteSpecialMember(MD, CSM, /*Diagnose*/true);
+ HadError = true;
+ }
+ }
+
+ if (HadError)
+ MD->setInvalidDecl();
+}
+
+/// Check whether the exception specification provided for an
+/// explicitly-defaulted special member matches the exception specification
+/// that would have been generated for an implicit special member, per
+/// C++11 [dcl.fct.def.default]p2.
+void Sema::CheckExplicitlyDefaultedMemberExceptionSpec(
+ CXXMethodDecl *MD, const FunctionProtoType *SpecifiedType) {
+ // If the exception specification was explicitly specified but hadn't been
+ // parsed when the method was defaulted, grab it now.
+ if (SpecifiedType->getExceptionSpecType() == EST_Unparsed)
+ SpecifiedType =
+ MD->getTypeSourceInfo()->getType()->castAs<FunctionProtoType>();
+
+ // Compute the implicit exception specification.
+ CallingConv CC = Context.getDefaultCallingConvention(/*IsVariadic=*/false,
+ /*IsCXXMethod=*/true);
+ FunctionProtoType::ExtProtoInfo EPI(CC);
+ EPI.ExceptionSpec = computeImplicitExceptionSpec(*this, MD->getLocation(), MD)
+ .getExceptionSpec();
+ const FunctionProtoType *ImplicitType = cast<FunctionProtoType>(
+ Context.getFunctionType(Context.VoidTy, None, EPI));
+
+ // Ensure that it matches.
+ CheckEquivalentExceptionSpec(
+ PDiag(diag::err_incorrect_defaulted_exception_spec)
+ << getSpecialMember(MD), PDiag(),
+ ImplicitType, SourceLocation(),
+ SpecifiedType, MD->getLocation());
+}
+
+void Sema::CheckDelayedMemberExceptionSpecs() {
+ decltype(DelayedExceptionSpecChecks) Checks;
+ decltype(DelayedDefaultedMemberExceptionSpecs) Specs;
+
+ std::swap(Checks, DelayedExceptionSpecChecks);
+ std::swap(Specs, DelayedDefaultedMemberExceptionSpecs);
+
+ // Perform any deferred checking of exception specifications for virtual
+ // destructors.
+ for (auto &Check : Checks)
+ CheckOverridingFunctionExceptionSpec(Check.first, Check.second);
+
+ // Check that any explicitly-defaulted methods have exception specifications
+ // compatible with their implicit exception specifications.
+ for (auto &Spec : Specs)
+ CheckExplicitlyDefaultedMemberExceptionSpec(Spec.first, Spec.second);
+}
+
+namespace {
+struct SpecialMemberDeletionInfo {
+ Sema &S;
+ CXXMethodDecl *MD;
+ Sema::CXXSpecialMember CSM;
+ bool Diagnose;
+
+ // Properties of the special member, computed for convenience.
+ bool IsConstructor, IsAssignment, IsMove, ConstArg;
+ SourceLocation Loc;
+
+ bool AllFieldsAreConst;
+
+ SpecialMemberDeletionInfo(Sema &S, CXXMethodDecl *MD,
+ Sema::CXXSpecialMember CSM, bool Diagnose)
+ : S(S), MD(MD), CSM(CSM), Diagnose(Diagnose),
+ IsConstructor(false), IsAssignment(false), IsMove(false),
+ ConstArg(false), Loc(MD->getLocation()),
+ AllFieldsAreConst(true) {
+ switch (CSM) {
+ case Sema::CXXDefaultConstructor:
+ case Sema::CXXCopyConstructor:
+ IsConstructor = true;
+ break;
+ case Sema::CXXMoveConstructor:
+ IsConstructor = true;
+ IsMove = true;
+ break;
+ case Sema::CXXCopyAssignment:
+ IsAssignment = true;
+ break;
+ case Sema::CXXMoveAssignment:
+ IsAssignment = true;
+ IsMove = true;
+ break;
+ case Sema::CXXDestructor:
+ break;
+ case Sema::CXXInvalid:
+ llvm_unreachable("invalid special member kind");
+ }
+
+ if (MD->getNumParams()) {
+ if (const ReferenceType *RT =
+ MD->getParamDecl(0)->getType()->getAs<ReferenceType>())
+ ConstArg = RT->getPointeeType().isConstQualified();
+ }
+ }
+
+ bool inUnion() const { return MD->getParent()->isUnion(); }
+
+ /// Look up the corresponding special member in the given class.
+ Sema::SpecialMemberOverloadResult *lookupIn(CXXRecordDecl *Class,
+ unsigned Quals, bool IsMutable) {
+ return lookupCallFromSpecialMember(S, Class, CSM, Quals,
+ ConstArg && !IsMutable);
+ }
+
+ typedef llvm::PointerUnion<CXXBaseSpecifier*, FieldDecl*> Subobject;
+
+ bool shouldDeleteForBase(CXXBaseSpecifier *Base);
+ bool shouldDeleteForField(FieldDecl *FD);
+ bool shouldDeleteForAllConstMembers();
+
+ bool shouldDeleteForClassSubobject(CXXRecordDecl *Class, Subobject Subobj,
+ unsigned Quals);
+ bool shouldDeleteForSubobjectCall(Subobject Subobj,
+ Sema::SpecialMemberOverloadResult *SMOR,
+ bool IsDtorCallInCtor);
+
+ bool isAccessible(Subobject Subobj, CXXMethodDecl *D);
+};
+}
+
+/// Is the given special member inaccessible when used on the given
+/// sub-object.
+bool SpecialMemberDeletionInfo::isAccessible(Subobject Subobj,
+ CXXMethodDecl *target) {
+ /// If we're operating on a base class, the object type is the
+ /// type of this special member.
+ QualType objectTy;
+ AccessSpecifier access = target->getAccess();
+ if (CXXBaseSpecifier *base = Subobj.dyn_cast<CXXBaseSpecifier*>()) {
+ objectTy = S.Context.getTypeDeclType(MD->getParent());
+ access = CXXRecordDecl::MergeAccess(base->getAccessSpecifier(), access);
+
+ // If we're operating on a field, the object type is the type of the field.
+ } else {
+ objectTy = S.Context.getTypeDeclType(target->getParent());
+ }
+
+ return S.isSpecialMemberAccessibleForDeletion(target, access, objectTy);
+}
+
+/// Check whether we should delete a special member due to the implicit
+/// definition containing a call to a special member of a subobject.
+bool SpecialMemberDeletionInfo::shouldDeleteForSubobjectCall(
+ Subobject Subobj, Sema::SpecialMemberOverloadResult *SMOR,
+ bool IsDtorCallInCtor) {
+ CXXMethodDecl *Decl = SMOR->getMethod();
+ FieldDecl *Field = Subobj.dyn_cast<FieldDecl*>();
+
+ int DiagKind = -1;
+
+ if (SMOR->getKind() == Sema::SpecialMemberOverloadResult::NoMemberOrDeleted)
+ DiagKind = !Decl ? 0 : 1;
+ else if (SMOR->getKind() == Sema::SpecialMemberOverloadResult::Ambiguous)
+ DiagKind = 2;
+ else if (!isAccessible(Subobj, Decl))
+ DiagKind = 3;
+ else if (!IsDtorCallInCtor && Field && Field->getParent()->isUnion() &&
+ !Decl->isTrivial()) {
+ // A member of a union must have a trivial corresponding special member.
+ // As a weird special case, a destructor call from a union's constructor
+ // must be accessible and non-deleted, but need not be trivial. Such a
+ // destructor is never actually called, but is semantically checked as
+ // if it were.
+ DiagKind = 4;
+ }
+
+ if (DiagKind == -1)
+ return false;
+
+ if (Diagnose) {
+ if (Field) {
+ S.Diag(Field->getLocation(),
+ diag::note_deleted_special_member_class_subobject)
+ << CSM << MD->getParent() << /*IsField*/true
+ << Field << DiagKind << IsDtorCallInCtor;
+ } else {
+ CXXBaseSpecifier *Base = Subobj.get<CXXBaseSpecifier*>();
+ S.Diag(Base->getLocStart(),
+ diag::note_deleted_special_member_class_subobject)
+ << CSM << MD->getParent() << /*IsField*/false
+ << Base->getType() << DiagKind << IsDtorCallInCtor;
+ }
+
+ if (DiagKind == 1)
+ S.NoteDeletedFunction(Decl);
+ // FIXME: Explain inaccessibility if DiagKind == 3.
+ }
+
+ return true;
+}
+
+/// Check whether we should delete a special member function due to having a
+/// direct or virtual base class or non-static data member of class type M.
+bool SpecialMemberDeletionInfo::shouldDeleteForClassSubobject(
+ CXXRecordDecl *Class, Subobject Subobj, unsigned Quals) {
+ FieldDecl *Field = Subobj.dyn_cast<FieldDecl*>();
+ bool IsMutable = Field && Field->isMutable();
+
+ // C++11 [class.ctor]p5:
+ // -- any direct or virtual base class, or non-static data member with no
+ // brace-or-equal-initializer, has class type M (or array thereof) and
+ // either M has no default constructor or overload resolution as applied
+ // to M's default constructor results in an ambiguity or in a function
+ // that is deleted or inaccessible
+ // C++11 [class.copy]p11, C++11 [class.copy]p23:
+ // -- a direct or virtual base class B that cannot be copied/moved because
+ // overload resolution, as applied to B's corresponding special member,
+ // results in an ambiguity or a function that is deleted or inaccessible
+ // from the defaulted special member
+ // C++11 [class.dtor]p5:
+ // -- any direct or virtual base class [...] has a type with a destructor
+ // that is deleted or inaccessible
+ if (!(CSM == Sema::CXXDefaultConstructor &&
+ Field && Field->hasInClassInitializer()) &&
+ shouldDeleteForSubobjectCall(Subobj, lookupIn(Class, Quals, IsMutable),
+ false))
+ return true;
+
+ // C++11 [class.ctor]p5, C++11 [class.copy]p11:
+ // -- any direct or virtual base class or non-static data member has a
+ // type with a destructor that is deleted or inaccessible
+ if (IsConstructor) {
+ Sema::SpecialMemberOverloadResult *SMOR =
+ S.LookupSpecialMember(Class, Sema::CXXDestructor,
+ false, false, false, false, false);
+ if (shouldDeleteForSubobjectCall(Subobj, SMOR, true))
+ return true;
+ }
+
+ return false;
+}
+
+/// Check whether we should delete a special member function due to the class
+/// having a particular direct or virtual base class.
+bool SpecialMemberDeletionInfo::shouldDeleteForBase(CXXBaseSpecifier *Base) {
+ CXXRecordDecl *BaseClass = Base->getType()->getAsCXXRecordDecl();
+ // If program is correct, BaseClass cannot be null, but if it is, the error
+ // must be reported elsewhere.
+ return BaseClass && shouldDeleteForClassSubobject(BaseClass, Base, 0);
+}
+
+/// Check whether we should delete a special member function due to the class
+/// having a particular non-static data member.
+bool SpecialMemberDeletionInfo::shouldDeleteForField(FieldDecl *FD) {
+ QualType FieldType = S.Context.getBaseElementType(FD->getType());
+ CXXRecordDecl *FieldRecord = FieldType->getAsCXXRecordDecl();
+
+ if (CSM == Sema::CXXDefaultConstructor) {
+ // For a default constructor, all references must be initialized in-class
+ // and, if a union, it must have a non-const member.
+ if (FieldType->isReferenceType() && !FD->hasInClassInitializer()) {
+ if (Diagnose)
+ S.Diag(FD->getLocation(), diag::note_deleted_default_ctor_uninit_field)
+ << MD->getParent() << FD << FieldType << /*Reference*/0;
+ return true;
+ }
+ // C++11 [class.ctor]p5: any non-variant non-static data member of
+ // const-qualified type (or array thereof) with no
+ // brace-or-equal-initializer does not have a user-provided default
+ // constructor.
+ if (!inUnion() && FieldType.isConstQualified() &&
+ !FD->hasInClassInitializer() &&
+ (!FieldRecord || !FieldRecord->hasUserProvidedDefaultConstructor())) {
+ if (Diagnose)
+ S.Diag(FD->getLocation(), diag::note_deleted_default_ctor_uninit_field)
+ << MD->getParent() << FD << FD->getType() << /*Const*/1;
+ return true;
+ }
+
+ if (inUnion() && !FieldType.isConstQualified())
+ AllFieldsAreConst = false;
+ } else if (CSM == Sema::CXXCopyConstructor) {
+ // For a copy constructor, data members must not be of rvalue reference
+ // type.
+ if (FieldType->isRValueReferenceType()) {
+ if (Diagnose)
+ S.Diag(FD->getLocation(), diag::note_deleted_copy_ctor_rvalue_reference)
+ << MD->getParent() << FD << FieldType;
+ return true;
+ }
+ } else if (IsAssignment) {
+ // For an assignment operator, data members must not be of reference type.
+ if (FieldType->isReferenceType()) {
+ if (Diagnose)
+ S.Diag(FD->getLocation(), diag::note_deleted_assign_field)
+ << IsMove << MD->getParent() << FD << FieldType << /*Reference*/0;
+ return true;
+ }
+ if (!FieldRecord && FieldType.isConstQualified()) {
+ // C++11 [class.copy]p23:
+ // -- a non-static data member of const non-class type (or array thereof)
+ if (Diagnose)
+ S.Diag(FD->getLocation(), diag::note_deleted_assign_field)
+ << IsMove << MD->getParent() << FD << FD->getType() << /*Const*/1;
+ return true;
+ }
+ }
+
+ if (FieldRecord) {
+ // Some additional restrictions exist on the variant members.
+ if (!inUnion() && FieldRecord->isUnion() &&
+ FieldRecord->isAnonymousStructOrUnion()) {
+ bool AllVariantFieldsAreConst = true;
+
+ // FIXME: Handle anonymous unions declared within anonymous unions.
+ for (auto *UI : FieldRecord->fields()) {
+ QualType UnionFieldType = S.Context.getBaseElementType(UI->getType());
+
+ if (!UnionFieldType.isConstQualified())
+ AllVariantFieldsAreConst = false;
+
+ CXXRecordDecl *UnionFieldRecord = UnionFieldType->getAsCXXRecordDecl();
+ if (UnionFieldRecord &&
+ shouldDeleteForClassSubobject(UnionFieldRecord, UI,
+ UnionFieldType.getCVRQualifiers()))
+ return true;
+ }
+
+ // At least one member in each anonymous union must be non-const
+ if (CSM == Sema::CXXDefaultConstructor && AllVariantFieldsAreConst &&
+ !FieldRecord->field_empty()) {
+ if (Diagnose)
+ S.Diag(FieldRecord->getLocation(),
+ diag::note_deleted_default_ctor_all_const)
+ << MD->getParent() << /*anonymous union*/1;
+ return true;
+ }
+
+ // Don't check the implicit member of the anonymous union type.
+ // This is technically non-conformant, but sanity demands it.
+ return false;
+ }
+
+ if (shouldDeleteForClassSubobject(FieldRecord, FD,
+ FieldType.getCVRQualifiers()))
+ return true;
+ }
+
+ return false;
+}
+
+/// C++11 [class.ctor] p5:
+/// A defaulted default constructor for a class X is defined as deleted if
+/// X is a union and all of its variant members are of const-qualified type.
+bool SpecialMemberDeletionInfo::shouldDeleteForAllConstMembers() {
+ // This is a silly definition, because it gives an empty union a deleted
+ // default constructor. Don't do that.
+ if (CSM == Sema::CXXDefaultConstructor && inUnion() && AllFieldsAreConst &&
+ !MD->getParent()->field_empty()) {
+ if (Diagnose)
+ S.Diag(MD->getParent()->getLocation(),
+ diag::note_deleted_default_ctor_all_const)
+ << MD->getParent() << /*not anonymous union*/0;
+ return true;
+ }
+ return false;
+}
+
+/// Determine whether a defaulted special member function should be defined as
+/// deleted, as specified in C++11 [class.ctor]p5, C++11 [class.copy]p11,
+/// C++11 [class.copy]p23, and C++11 [class.dtor]p5.
+bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
+ bool Diagnose) {
+ if (MD->isInvalidDecl())
+ return false;
+ CXXRecordDecl *RD = MD->getParent();
+ assert(!RD->isDependentType() && "do deletion after instantiation");
+ if (!LangOpts.CPlusPlus11 || RD->isInvalidDecl())
+ return false;
+
+ // C++11 [expr.lambda.prim]p19:
+ // The closure type associated with a lambda-expression has a
+ // deleted (8.4.3) default constructor and a deleted copy
+ // assignment operator.
+ if (RD->isLambda() &&
+ (CSM == CXXDefaultConstructor || CSM == CXXCopyAssignment)) {
+ if (Diagnose)
+ Diag(RD->getLocation(), diag::note_lambda_decl);
+ return true;
+ }
+
+ // For an anonymous struct or union, the copy and assignment special members
+ // will never be used, so skip the check. For an anonymous union declared at
+ // namespace scope, the constructor and destructor are used.
+ if (CSM != CXXDefaultConstructor && CSM != CXXDestructor &&
+ RD->isAnonymousStructOrUnion())
+ return false;
+
+ // C++11 [class.copy]p7, p18:
+ // If the class definition declares a move constructor or move assignment
+ // operator, an implicitly declared copy constructor or copy assignment
+ // operator is defined as deleted.
+ if (MD->isImplicit() &&
+ (CSM == CXXCopyConstructor || CSM == CXXCopyAssignment)) {
+ CXXMethodDecl *UserDeclaredMove = nullptr;
+
+ // In Microsoft mode, a user-declared move only causes the deletion of the
+ // corresponding copy operation, not both copy operations.
+ if (RD->hasUserDeclaredMoveConstructor() &&
+ (!getLangOpts().MSVCCompat || CSM == CXXCopyConstructor)) {
+ if (!Diagnose) return true;
+
+ // Find any user-declared move constructor.
+ for (auto *I : RD->ctors()) {
+ if (I->isMoveConstructor()) {
+ UserDeclaredMove = I;
+ break;
+ }
+ }
+ assert(UserDeclaredMove);
+ } else if (RD->hasUserDeclaredMoveAssignment() &&
+ (!getLangOpts().MSVCCompat || CSM == CXXCopyAssignment)) {
+ if (!Diagnose) return true;
+
+ // Find any user-declared move assignment operator.
+ for (auto *I : RD->methods()) {
+ if (I->isMoveAssignmentOperator()) {
+ UserDeclaredMove = I;
+ break;
+ }
+ }
+ assert(UserDeclaredMove);
+ }
+
+ if (UserDeclaredMove) {
+ Diag(UserDeclaredMove->getLocation(),
+ diag::note_deleted_copy_user_declared_move)
+ << (CSM == CXXCopyAssignment) << RD
+ << UserDeclaredMove->isMoveAssignmentOperator();
+ return true;
+ }
+ }
+
+ // Do access control from the special member function
+ ContextRAII MethodContext(*this, MD);
+
+ // C++11 [class.dtor]p5:
+ // -- for a virtual destructor, lookup of the non-array deallocation function
+ // results in an ambiguity or in a function that is deleted or inaccessible
+ if (CSM == CXXDestructor && MD->isVirtual()) {
+ FunctionDecl *OperatorDelete = nullptr;
+ DeclarationName Name =
+ Context.DeclarationNames.getCXXOperatorName(OO_Delete);
+ if (FindDeallocationFunction(MD->getLocation(), MD->getParent(), Name,
+ OperatorDelete, false)) {
+ if (Diagnose)
+ Diag(RD->getLocation(), diag::note_deleted_dtor_no_operator_delete);
+ return true;
+ }
+ }
+
+ SpecialMemberDeletionInfo SMI(*this, MD, CSM, Diagnose);
+
+ for (auto &BI : RD->bases())
+ if (!BI.isVirtual() &&
+ SMI.shouldDeleteForBase(&BI))
+ return true;
+
+ // Per DR1611, do not consider virtual bases of constructors of abstract
+ // classes, since we are not going to construct them.
+ if (!RD->isAbstract() || !SMI.IsConstructor) {
+ for (auto &BI : RD->vbases())
+ if (SMI.shouldDeleteForBase(&BI))
+ return true;
+ }
+
+ for (auto *FI : RD->fields())
+ if (!FI->isInvalidDecl() && !FI->isUnnamedBitfield() &&
+ SMI.shouldDeleteForField(FI))
+ return true;
+
+ if (SMI.shouldDeleteForAllConstMembers())
+ return true;
+
+ if (getLangOpts().CUDA) {
+ // We should delete the special member in CUDA mode if target inference
+ // failed.
+ return inferCUDATargetForImplicitSpecialMember(RD, CSM, MD, SMI.ConstArg,
+ Diagnose);
+ }
+
+ return false;
+}
+
+/// Perform lookup for a special member of the specified kind, and determine
+/// whether it is trivial. If the triviality can be determined without the
+/// lookup, skip it. This is intended for use when determining whether a
+/// special member of a containing object is trivial, and thus does not ever
+/// perform overload resolution for default constructors.
+///
+/// If \p Selected is not \c NULL, \c *Selected will be filled in with the
+/// member that was most likely to be intended to be trivial, if any.
+static bool findTrivialSpecialMember(Sema &S, CXXRecordDecl *RD,
+ Sema::CXXSpecialMember CSM, unsigned Quals,
+ bool ConstRHS, CXXMethodDecl **Selected) {
+ if (Selected)
+ *Selected = nullptr;
+
+ switch (CSM) {
+ case Sema::CXXInvalid:
+ llvm_unreachable("not a special member");
+
+ case Sema::CXXDefaultConstructor:
+ // C++11 [class.ctor]p5:
+ // A default constructor is trivial if:
+ // - all the [direct subobjects] have trivial default constructors
+ //
+ // Note, no overload resolution is performed in this case.
+ if (RD->hasTrivialDefaultConstructor())
+ return true;
+
+ if (Selected) {
+ // If there's a default constructor which could have been trivial, dig it
+ // out. Otherwise, if there's any user-provided default constructor, point
+ // to that as an example of why there's not a trivial one.
+ CXXConstructorDecl *DefCtor = nullptr;
+ if (RD->needsImplicitDefaultConstructor())
+ S.DeclareImplicitDefaultConstructor(RD);
+ for (auto *CI : RD->ctors()) {
+ if (!CI->isDefaultConstructor())
+ continue;
+ DefCtor = CI;
+ if (!DefCtor->isUserProvided())
+ break;
+ }
+
+ *Selected = DefCtor;
+ }
+
+ return false;
+
+ case Sema::CXXDestructor:
+ // C++11 [class.dtor]p5:
+ // A destructor is trivial if:
+ // - all the direct [subobjects] have trivial destructors
+ if (RD->hasTrivialDestructor())
+ return true;
+
+ if (Selected) {
+ if (RD->needsImplicitDestructor())
+ S.DeclareImplicitDestructor(RD);
+ *Selected = RD->getDestructor();
+ }
+
+ return false;
+
+ case Sema::CXXCopyConstructor:
+ // C++11 [class.copy]p12:
+ // A copy constructor is trivial if:
+ // - the constructor selected to copy each direct [subobject] is trivial
+ if (RD->hasTrivialCopyConstructor()) {
+ if (Quals == Qualifiers::Const)
+ // We must either select the trivial copy constructor or reach an
+ // ambiguity; no need to actually perform overload resolution.
+ return true;
+ } else if (!Selected) {
+ return false;
+ }
+ // In C++98, we are not supposed to perform overload resolution here, but we
+ // treat that as a language defect, as suggested on cxx-abi-dev, to treat
+ // cases like B as having a non-trivial copy constructor:
+ // struct A { template<typename T> A(T&); };
+ // struct B { mutable A a; };
+ goto NeedOverloadResolution;
+
+ case Sema::CXXCopyAssignment:
+ // C++11 [class.copy]p25:
+ // A copy assignment operator is trivial if:
+ // - the assignment operator selected to copy each direct [subobject] is
+ // trivial
+ if (RD->hasTrivialCopyAssignment()) {
+ if (Quals == Qualifiers::Const)
+ return true;
+ } else if (!Selected) {
+ return false;
+ }
+ // In C++98, we are not supposed to perform overload resolution here, but we
+ // treat that as a language defect.
+ goto NeedOverloadResolution;
+
+ case Sema::CXXMoveConstructor:
+ case Sema::CXXMoveAssignment:
+ NeedOverloadResolution:
+ Sema::SpecialMemberOverloadResult *SMOR =
+ lookupCallFromSpecialMember(S, RD, CSM, Quals, ConstRHS);
+
+ // The standard doesn't describe how to behave if the lookup is ambiguous.
+ // We treat it as not making the member non-trivial, just like the standard
+ // mandates for the default constructor. This should rarely matter, because
+ // the member will also be deleted.
+ if (SMOR->getKind() == Sema::SpecialMemberOverloadResult::Ambiguous)
+ return true;
+
+ if (!SMOR->getMethod()) {
+ assert(SMOR->getKind() ==
+ Sema::SpecialMemberOverloadResult::NoMemberOrDeleted);
+ return false;
+ }
+
+ // We deliberately don't check if we found a deleted special member. We're
+ // not supposed to!
+ if (Selected)
+ *Selected = SMOR->getMethod();
+ return SMOR->getMethod()->isTrivial();
+ }
+
+ llvm_unreachable("unknown special method kind");
+}
+
+static CXXConstructorDecl *findUserDeclaredCtor(CXXRecordDecl *RD) {
+ for (auto *CI : RD->ctors())
+ if (!CI->isImplicit())
+ return CI;
+
+ // Look for constructor templates.
+ typedef CXXRecordDecl::specific_decl_iterator<FunctionTemplateDecl> tmpl_iter;
+ for (tmpl_iter TI(RD->decls_begin()), TE(RD->decls_end()); TI != TE; ++TI) {
+ if (CXXConstructorDecl *CD =
+ dyn_cast<CXXConstructorDecl>(TI->getTemplatedDecl()))
+ return CD;
+ }
+
+ return nullptr;
+}
+
+/// The kind of subobject we are checking for triviality. The values of this
+/// enumeration are used in diagnostics.
+enum TrivialSubobjectKind {
+ /// The subobject is a base class.
+ TSK_BaseClass,
+ /// The subobject is a non-static data member.
+ TSK_Field,
+ /// The object is actually the complete object.
+ TSK_CompleteObject
+};
+
+/// Check whether the special member selected for a given type would be trivial.
+static bool checkTrivialSubobjectCall(Sema &S, SourceLocation SubobjLoc,
+ QualType SubType, bool ConstRHS,
+ Sema::CXXSpecialMember CSM,
+ TrivialSubobjectKind Kind,
+ bool Diagnose) {
+ CXXRecordDecl *SubRD = SubType->getAsCXXRecordDecl();
+ if (!SubRD)
+ return true;
+
+ CXXMethodDecl *Selected;
+ if (findTrivialSpecialMember(S, SubRD, CSM, SubType.getCVRQualifiers(),
+ ConstRHS, Diagnose ? &Selected : nullptr))
+ return true;
+
+ if (Diagnose) {
+ if (ConstRHS)
+ SubType.addConst();
+
+ if (!Selected && CSM == Sema::CXXDefaultConstructor) {
+ S.Diag(SubobjLoc, diag::note_nontrivial_no_def_ctor)
+ << Kind << SubType.getUnqualifiedType();
+ if (CXXConstructorDecl *CD = findUserDeclaredCtor(SubRD))
+ S.Diag(CD->getLocation(), diag::note_user_declared_ctor);
+ } else if (!Selected)
+ S.Diag(SubobjLoc, diag::note_nontrivial_no_copy)
+ << Kind << SubType.getUnqualifiedType() << CSM << SubType;
+ else if (Selected->isUserProvided()) {
+ if (Kind == TSK_CompleteObject)
+ S.Diag(Selected->getLocation(), diag::note_nontrivial_user_provided)
+ << Kind << SubType.getUnqualifiedType() << CSM;
+ else {
+ S.Diag(SubobjLoc, diag::note_nontrivial_user_provided)
+ << Kind << SubType.getUnqualifiedType() << CSM;
+ S.Diag(Selected->getLocation(), diag::note_declared_at);
+ }
+ } else {
+ if (Kind != TSK_CompleteObject)
+ S.Diag(SubobjLoc, diag::note_nontrivial_subobject)
+ << Kind << SubType.getUnqualifiedType() << CSM;
+
+ // Explain why the defaulted or deleted special member isn't trivial.
+ S.SpecialMemberIsTrivial(Selected, CSM, Diagnose);
+ }
+ }
+
+ return false;
+}
+
+/// Check whether the members of a class type allow a special member to be
+/// trivial.
+static bool checkTrivialClassMembers(Sema &S, CXXRecordDecl *RD,
+ Sema::CXXSpecialMember CSM,
+ bool ConstArg, bool Diagnose) {
+ for (const auto *FI : RD->fields()) {
+ if (FI->isInvalidDecl() || FI->isUnnamedBitfield())
+ continue;
+
+ QualType FieldType = S.Context.getBaseElementType(FI->getType());
+
+ // Pretend anonymous struct or union members are members of this class.
+ if (FI->isAnonymousStructOrUnion()) {
+ if (!checkTrivialClassMembers(S, FieldType->getAsCXXRecordDecl(),
+ CSM, ConstArg, Diagnose))
+ return false;
+ continue;
+ }
+
+ // C++11 [class.ctor]p5:
+ // A default constructor is trivial if [...]
+ // -- no non-static data member of its class has a
+ // brace-or-equal-initializer
+ if (CSM == Sema::CXXDefaultConstructor && FI->hasInClassInitializer()) {
+ if (Diagnose)
+ S.Diag(FI->getLocation(), diag::note_nontrivial_in_class_init) << FI;
+ return false;
+ }
+
+ // Objective C ARC 4.3.5:
+ // [...] nontrivally ownership-qualified types are [...] not trivially
+ // default constructible, copy constructible, move constructible, copy
+ // assignable, move assignable, or destructible [...]
+ if (S.getLangOpts().ObjCAutoRefCount &&
+ FieldType.hasNonTrivialObjCLifetime()) {
+ if (Diagnose)
+ S.Diag(FI->getLocation(), diag::note_nontrivial_objc_ownership)
+ << RD << FieldType.getObjCLifetime();
+ return false;
+ }
+
+ bool ConstRHS = ConstArg && !FI->isMutable();
+ if (!checkTrivialSubobjectCall(S, FI->getLocation(), FieldType, ConstRHS,
+ CSM, TSK_Field, Diagnose))
+ return false;
+ }
+
+ return true;
+}
+
+/// Diagnose why the specified class does not have a trivial special member of
+/// the given kind.
+void Sema::DiagnoseNontrivial(const CXXRecordDecl *RD, CXXSpecialMember CSM) {
+ QualType Ty = Context.getRecordType(RD);
+
+ bool ConstArg = (CSM == CXXCopyConstructor || CSM == CXXCopyAssignment);
+ checkTrivialSubobjectCall(*this, RD->getLocation(), Ty, ConstArg, CSM,
+ TSK_CompleteObject, /*Diagnose*/true);
+}
+
+/// Determine whether a defaulted or deleted special member function is trivial,
+/// as specified in C++11 [class.ctor]p5, C++11 [class.copy]p12,
+/// C++11 [class.copy]p25, and C++11 [class.dtor]p5.
+bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
+ bool Diagnose) {
+ assert(!MD->isUserProvided() && CSM != CXXInvalid && "not special enough");
+
+ CXXRecordDecl *RD = MD->getParent();
+
+ bool ConstArg = false;
+
+ // C++11 [class.copy]p12, p25: [DR1593]
+ // A [special member] is trivial if [...] its parameter-type-list is
+ // equivalent to the parameter-type-list of an implicit declaration [...]
+ switch (CSM) {
+ case CXXDefaultConstructor:
+ case CXXDestructor:
+ // Trivial default constructors and destructors cannot have parameters.
+ break;
+
+ case CXXCopyConstructor:
+ case CXXCopyAssignment: {
+ // Trivial copy operations always have const, non-volatile parameter types.
+ ConstArg = true;
+ const ParmVarDecl *Param0 = MD->getParamDecl(0);
+ const ReferenceType *RT = Param0->getType()->getAs<ReferenceType>();
+ if (!RT || RT->getPointeeType().getCVRQualifiers() != Qualifiers::Const) {
+ if (Diagnose)
+ Diag(Param0->getLocation(), diag::note_nontrivial_param_type)
+ << Param0->getSourceRange() << Param0->getType()
+ << Context.getLValueReferenceType(
+ Context.getRecordType(RD).withConst());
+ return false;
+ }
+ break;
+ }
+
+ case CXXMoveConstructor:
+ case CXXMoveAssignment: {
+ // Trivial move operations always have non-cv-qualified parameters.
+ const ParmVarDecl *Param0 = MD->getParamDecl(0);
+ const RValueReferenceType *RT =
+ Param0->getType()->getAs<RValueReferenceType>();
+ if (!RT || RT->getPointeeType().getCVRQualifiers()) {
+ if (Diagnose)
+ Diag(Param0->getLocation(), diag::note_nontrivial_param_type)
+ << Param0->getSourceRange() << Param0->getType()
+ << Context.getRValueReferenceType(Context.getRecordType(RD));
+ return false;
+ }
+ break;
+ }
+
+ case CXXInvalid:
+ llvm_unreachable("not a special member");
+ }
+
+ if (MD->getMinRequiredArguments() < MD->getNumParams()) {
+ if (Diagnose)
+ Diag(MD->getParamDecl(MD->getMinRequiredArguments())->getLocation(),
+ diag::note_nontrivial_default_arg)
+ << MD->getParamDecl(MD->getMinRequiredArguments())->getSourceRange();
+ return false;
+ }
+ if (MD->isVariadic()) {
+ if (Diagnose)
+ Diag(MD->getLocation(), diag::note_nontrivial_variadic);
+ return false;
+ }
+
+ // C++11 [class.ctor]p5, C++11 [class.dtor]p5:
+ // A copy/move [constructor or assignment operator] is trivial if
+ // -- the [member] selected to copy/move each direct base class subobject
+ // is trivial
+ //
+ // C++11 [class.copy]p12, C++11 [class.copy]p25:
+ // A [default constructor or destructor] is trivial if
+ // -- all the direct base classes have trivial [default constructors or
+ // destructors]
+ for (const auto &BI : RD->bases())
+ if (!checkTrivialSubobjectCall(*this, BI.getLocStart(), BI.getType(),
+ ConstArg, CSM, TSK_BaseClass, Diagnose))
+ return false;
+
+ // C++11 [class.ctor]p5, C++11 [class.dtor]p5:
+ // A copy/move [constructor or assignment operator] for a class X is
+ // trivial if
+ // -- for each non-static data member of X that is of class type (or array
+ // thereof), the constructor selected to copy/move that member is
+ // trivial
+ //
+ // C++11 [class.copy]p12, C++11 [class.copy]p25:
+ // A [default constructor or destructor] is trivial if
+ // -- for all of the non-static data members of its class that are of class
+ // type (or array thereof), each such class has a trivial [default
+ // constructor or destructor]
+ if (!checkTrivialClassMembers(*this, RD, CSM, ConstArg, Diagnose))
+ return false;
+
+ // C++11 [class.dtor]p5:
+ // A destructor is trivial if [...]
+ // -- the destructor is not virtual
+ if (CSM == CXXDestructor && MD->isVirtual()) {
+ if (Diagnose)
+ Diag(MD->getLocation(), diag::note_nontrivial_virtual_dtor) << RD;
+ return false;
+ }
+
+ // C++11 [class.ctor]p5, C++11 [class.copy]p12, C++11 [class.copy]p25:
+ // A [special member] for class X is trivial if [...]
+ // -- class X has no virtual functions and no virtual base classes
+ if (CSM != CXXDestructor && MD->getParent()->isDynamicClass()) {
+ if (!Diagnose)
+ return false;
+
+ if (RD->getNumVBases()) {
+ // Check for virtual bases. We already know that the corresponding
+ // member in all bases is trivial, so vbases must all be direct.
+ CXXBaseSpecifier &BS = *RD->vbases_begin();
+ assert(BS.isVirtual());
+ Diag(BS.getLocStart(), diag::note_nontrivial_has_virtual) << RD << 1;
+ return false;
+ }
+
+ // Must have a virtual method.
+ for (const auto *MI : RD->methods()) {
+ if (MI->isVirtual()) {
+ SourceLocation MLoc = MI->getLocStart();
+ Diag(MLoc, diag::note_nontrivial_has_virtual) << RD << 0;
+ return false;
+ }
+ }
+
+ llvm_unreachable("dynamic class with no vbases and no virtual functions");
+ }
+
+ // Looks like it's trivial!
+ return true;
+}
+
+namespace {
+struct FindHiddenVirtualMethod {
+ Sema *S;
+ CXXMethodDecl *Method;
+ llvm::SmallPtrSet<const CXXMethodDecl *, 8> OverridenAndUsingBaseMethods;
+ SmallVector<CXXMethodDecl *, 8> OverloadedMethods;
+
+private:
+ /// Check whether any most overriden method from MD in Methods
+ static bool CheckMostOverridenMethods(
+ const CXXMethodDecl *MD,
+ const llvm::SmallPtrSetImpl<const CXXMethodDecl *> &Methods) {
+ if (MD->size_overridden_methods() == 0)
+ return Methods.count(MD->getCanonicalDecl());
+ for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
+ E = MD->end_overridden_methods();
+ I != E; ++I)
+ if (CheckMostOverridenMethods(*I, Methods))
+ return true;
+ return false;
+ }
+
+public:
+ /// Member lookup function that determines whether a given C++
+ /// method overloads virtual methods in a base class without overriding any,
+ /// to be used with CXXRecordDecl::lookupInBases().
+ bool operator()(const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
+ RecordDecl *BaseRecord =
+ Specifier->getType()->getAs<RecordType>()->getDecl();
+
+ DeclarationName Name = Method->getDeclName();
+ assert(Name.getNameKind() == DeclarationName::Identifier);
+
+ bool foundSameNameMethod = false;
+ SmallVector<CXXMethodDecl *, 8> overloadedMethods;
+ for (Path.Decls = BaseRecord->lookup(Name); !Path.Decls.empty();
+ Path.Decls = Path.Decls.slice(1)) {
+ NamedDecl *D = Path.Decls.front();
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
+ MD = MD->getCanonicalDecl();
+ foundSameNameMethod = true;
+ // Interested only in hidden virtual methods.
+ if (!MD->isVirtual())
+ continue;
+ // If the method we are checking overrides a method from its base
+ // don't warn about the other overloaded methods. Clang deviates from
+ // GCC by only diagnosing overloads of inherited virtual functions that
+ // do not override any other virtual functions in the base. GCC's
+ // -Woverloaded-virtual diagnoses any derived function hiding a virtual
+ // function from a base class. These cases may be better served by a
+ // warning (not specific to virtual functions) on call sites when the
+ // call would select a different function from the base class, were it
+ // visible.
+ // See FIXME in test/SemaCXX/warn-overload-virtual.cpp for an example.
+ if (!S->IsOverload(Method, MD, false))
+ return true;
+ // Collect the overload only if its hidden.
+ if (!CheckMostOverridenMethods(MD, OverridenAndUsingBaseMethods))
+ overloadedMethods.push_back(MD);
+ }
+ }
+
+ if (foundSameNameMethod)
+ OverloadedMethods.append(overloadedMethods.begin(),
+ overloadedMethods.end());
+ return foundSameNameMethod;
+ }
+};
+} // end anonymous namespace
+
+/// \brief Add the most overriden methods from MD to Methods
+static void AddMostOverridenMethods(const CXXMethodDecl *MD,
+ llvm::SmallPtrSetImpl<const CXXMethodDecl *>& Methods) {
+ if (MD->size_overridden_methods() == 0)
+ Methods.insert(MD->getCanonicalDecl());
+ for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
+ E = MD->end_overridden_methods();
+ I != E; ++I)
+ AddMostOverridenMethods(*I, Methods);
+}
+
+/// \brief Check if a method overloads virtual methods in a base class without
+/// overriding any.
+void Sema::FindHiddenVirtualMethods(CXXMethodDecl *MD,
+ SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods) {
+ if (!MD->getDeclName().isIdentifier())
+ return;
+
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, // true to look in all bases.
+ /*bool RecordPaths=*/false,
+ /*bool DetectVirtual=*/false);
+ FindHiddenVirtualMethod FHVM;
+ FHVM.Method = MD;
+ FHVM.S = this;
+
+ // Keep the base methods that were overriden or introduced in the subclass
+ // by 'using' in a set. A base method not in this set is hidden.
+ CXXRecordDecl *DC = MD->getParent();
+ DeclContext::lookup_result R = DC->lookup(MD->getDeclName());
+ for (DeclContext::lookup_iterator I = R.begin(), E = R.end(); I != E; ++I) {
+ NamedDecl *ND = *I;
+ if (UsingShadowDecl *shad = dyn_cast<UsingShadowDecl>(*I))
+ ND = shad->getTargetDecl();
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND))
+ AddMostOverridenMethods(MD, FHVM.OverridenAndUsingBaseMethods);
+ }
+
+ if (DC->lookupInBases(FHVM, Paths))
+ OverloadedMethods = FHVM.OverloadedMethods;
+}
+
+void Sema::NoteHiddenVirtualMethods(CXXMethodDecl *MD,
+ SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods) {
+ for (unsigned i = 0, e = OverloadedMethods.size(); i != e; ++i) {
+ CXXMethodDecl *overloadedMD = OverloadedMethods[i];
+ PartialDiagnostic PD = PDiag(
+ diag::note_hidden_overloaded_virtual_declared_here) << overloadedMD;
+ HandleFunctionTypeMismatch(PD, MD->getType(), overloadedMD->getType());
+ Diag(overloadedMD->getLocation(), PD);
+ }
+}
+
+/// \brief Diagnose methods which overload virtual methods in a base class
+/// without overriding any.
+void Sema::DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD) {
+ if (MD->isInvalidDecl())
+ return;
+
+ if (Diags.isIgnored(diag::warn_overloaded_virtual, MD->getLocation()))
+ return;
+
+ SmallVector<CXXMethodDecl *, 8> OverloadedMethods;
+ FindHiddenVirtualMethods(MD, OverloadedMethods);
+ if (!OverloadedMethods.empty()) {
+ Diag(MD->getLocation(), diag::warn_overloaded_virtual)
+ << MD << (OverloadedMethods.size() > 1);
+
+ NoteHiddenVirtualMethods(MD, OverloadedMethods);
+ }
+}
+
+void Sema::ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc,
+ Decl *TagDecl,
+ SourceLocation LBrac,
+ SourceLocation RBrac,
+ AttributeList *AttrList) {
+ if (!TagDecl)
+ return;
+
+ AdjustDeclIfTemplate(TagDecl);
+
+ for (const AttributeList* l = AttrList; l; l = l->getNext()) {
+ if (l->getKind() != AttributeList::AT_Visibility)
+ continue;
+ l->setInvalid();
+ Diag(l->getLoc(), diag::warn_attribute_after_definition_ignored) <<
+ l->getName();
+ }
+
+ ActOnFields(S, RLoc, TagDecl, llvm::makeArrayRef(
+ // strict aliasing violation!
+ reinterpret_cast<Decl**>(FieldCollector->getCurFields()),
+ FieldCollector->getCurNumFields()), LBrac, RBrac, AttrList);
+
+ CheckCompletedCXXClass(
+ dyn_cast_or_null<CXXRecordDecl>(TagDecl));
+}
+
+/// AddImplicitlyDeclaredMembersToClass - Adds any implicitly-declared
+/// special functions, such as the default constructor, copy
+/// constructor, or destructor, to the given C++ class (C++
+/// [special]p1). This routine can only be executed just before the
+/// definition of the class is complete.
+void Sema::AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl) {
+ if (!ClassDecl->hasUserDeclaredConstructor())
+ ++ASTContext::NumImplicitDefaultConstructors;
+
+ if (!ClassDecl->hasUserDeclaredCopyConstructor()) {
+ ++ASTContext::NumImplicitCopyConstructors;
+
+ // If the properties or semantics of the copy constructor couldn't be
+ // determined while the class was being declared, force a declaration
+ // of it now.
+ if (ClassDecl->needsOverloadResolutionForCopyConstructor())
+ DeclareImplicitCopyConstructor(ClassDecl);
+ }
+
+ if (getLangOpts().CPlusPlus11 && ClassDecl->needsImplicitMoveConstructor()) {
+ ++ASTContext::NumImplicitMoveConstructors;
+
+ if (ClassDecl->needsOverloadResolutionForMoveConstructor())
+ DeclareImplicitMoveConstructor(ClassDecl);
+ }
+
+ if (!ClassDecl->hasUserDeclaredCopyAssignment()) {
+ ++ASTContext::NumImplicitCopyAssignmentOperators;
+
+ // If we have a dynamic class, then the copy assignment operator may be
+ // virtual, so we have to declare it immediately. This ensures that, e.g.,
+ // it shows up in the right place in the vtable and that we diagnose
+ // problems with the implicit exception specification.
+ if (ClassDecl->isDynamicClass() ||
+ ClassDecl->needsOverloadResolutionForCopyAssignment())
+ DeclareImplicitCopyAssignment(ClassDecl);
+ }
+
+ if (getLangOpts().CPlusPlus11 && ClassDecl->needsImplicitMoveAssignment()) {
+ ++ASTContext::NumImplicitMoveAssignmentOperators;
+
+ // Likewise for the move assignment operator.
+ if (ClassDecl->isDynamicClass() ||
+ ClassDecl->needsOverloadResolutionForMoveAssignment())
+ DeclareImplicitMoveAssignment(ClassDecl);
+ }
+
+ if (!ClassDecl->hasUserDeclaredDestructor()) {
+ ++ASTContext::NumImplicitDestructors;
+
+ // If we have a dynamic class, then the destructor may be virtual, so we
+ // have to declare the destructor immediately. This ensures that, e.g., it
+ // shows up in the right place in the vtable and that we diagnose problems
+ // with the implicit exception specification.
+ if (ClassDecl->isDynamicClass() ||
+ ClassDecl->needsOverloadResolutionForDestructor())
+ DeclareImplicitDestructor(ClassDecl);
+ }
+}
+
+unsigned Sema::ActOnReenterTemplateScope(Scope *S, Decl *D) {
+ if (!D)
+ return 0;
+
+ // The order of template parameters is not important here. All names
+ // get added to the same scope.
+ SmallVector<TemplateParameterList *, 4> ParameterLists;
+
+ if (TemplateDecl *TD = dyn_cast<TemplateDecl>(D))
+ D = TD->getTemplatedDecl();
+
+ if (auto *PSD = dyn_cast<ClassTemplatePartialSpecializationDecl>(D))
+ ParameterLists.push_back(PSD->getTemplateParameters());
+
+ if (DeclaratorDecl *DD = dyn_cast<DeclaratorDecl>(D)) {
+ for (unsigned i = 0; i < DD->getNumTemplateParameterLists(); ++i)
+ ParameterLists.push_back(DD->getTemplateParameterList(i));
+
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate())
+ ParameterLists.push_back(FTD->getTemplateParameters());
+ }
+ }
+
+ if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
+ for (unsigned i = 0; i < TD->getNumTemplateParameterLists(); ++i)
+ ParameterLists.push_back(TD->getTemplateParameterList(i));
+
+ if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(TD)) {
+ if (ClassTemplateDecl *CTD = RD->getDescribedClassTemplate())
+ ParameterLists.push_back(CTD->getTemplateParameters());
+ }
+ }
+
+ unsigned Count = 0;
+ for (TemplateParameterList *Params : ParameterLists) {
+ if (Params->size() > 0)
+ // Ignore explicit specializations; they don't contribute to the template
+ // depth.
+ ++Count;
+ for (NamedDecl *Param : *Params) {
+ if (Param->getDeclName()) {
+ S->AddDecl(Param);
+ IdResolver.AddDecl(Param);
+ }
+ }
+ }
+
+ return Count;
+}
+
+void Sema::ActOnStartDelayedMemberDeclarations(Scope *S, Decl *RecordD) {
+ if (!RecordD) return;
+ AdjustDeclIfTemplate(RecordD);
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordD);
+ PushDeclContext(S, Record);
+}
+
+void Sema::ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *RecordD) {
+ if (!RecordD) return;
+ PopDeclContext();
+}
+
+/// This is used to implement the constant expression evaluation part of the
+/// attribute enable_if extension. There is nothing in standard C++ which would
+/// require reentering parameters.
+void Sema::ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param) {
+ if (!Param)
+ return;
+
+ S->AddDecl(Param);
+ if (Param->getDeclName())
+ IdResolver.AddDecl(Param);
+}
+
+/// ActOnStartDelayedCXXMethodDeclaration - We have completed
+/// parsing a top-level (non-nested) C++ class, and we are now
+/// parsing those parts of the given Method declaration that could
+/// not be parsed earlier (C++ [class.mem]p2), such as default
+/// arguments. This action should enter the scope of the given
+/// Method declaration as if we had just parsed the qualified method
+/// name. However, it should not bring the parameters into scope;
+/// that will be performed by ActOnDelayedCXXMethodParameter.
+void Sema::ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *MethodD) {
+}
+
+/// ActOnDelayedCXXMethodParameter - We've already started a delayed
+/// C++ method declaration. We're (re-)introducing the given
+/// function parameter into scope for use in parsing later parts of
+/// the method declaration. For example, we could see an
+/// ActOnParamDefaultArgument event for this parameter.
+void Sema::ActOnDelayedCXXMethodParameter(Scope *S, Decl *ParamD) {
+ if (!ParamD)
+ return;
+
+ ParmVarDecl *Param = cast<ParmVarDecl>(ParamD);
+
+ // If this parameter has an unparsed default argument, clear it out
+ // to make way for the parsed default argument.
+ if (Param->hasUnparsedDefaultArg())
+ Param->setDefaultArg(nullptr);
+
+ S->AddDecl(Param);
+ if (Param->getDeclName())
+ IdResolver.AddDecl(Param);
+}
+
+/// ActOnFinishDelayedCXXMethodDeclaration - We have finished
+/// processing the delayed method declaration for Method. The method
+/// declaration is now considered finished. There may be a separate
+/// ActOnStartOfFunctionDef action later (not necessarily
+/// immediately!) for this method, if it was also defined inside the
+/// class body.
+void Sema::ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *MethodD) {
+ if (!MethodD)
+ return;
+
+ AdjustDeclIfTemplate(MethodD);
+
+ FunctionDecl *Method = cast<FunctionDecl>(MethodD);
+
+ // Now that we have our default arguments, check the constructor
+ // again. It could produce additional diagnostics or affect whether
+ // the class has implicitly-declared destructors, among other
+ // things.
+ if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(Method))
+ CheckConstructor(Constructor);
+
+ // Check the default arguments, which we may have added.
+ if (!Method->isInvalidDecl())
+ CheckCXXDefaultArguments(Method);
+}
+
+/// CheckConstructorDeclarator - Called by ActOnDeclarator to check
+/// the well-formedness of the constructor declarator @p D with type @p
+/// R. If there are any errors in the declarator, this routine will
+/// emit diagnostics and set the invalid bit to true. In any case, the type
+/// will be updated to reflect a well-formed type for the constructor and
+/// returned.
+QualType Sema::CheckConstructorDeclarator(Declarator &D, QualType R,
+ StorageClass &SC) {
+ bool isVirtual = D.getDeclSpec().isVirtualSpecified();
+
+ // C++ [class.ctor]p3:
+ // A constructor shall not be virtual (10.3) or static (9.4). A
+ // constructor can be invoked for a const, volatile or const
+ // volatile object. A constructor shall not be declared const,
+ // volatile, or const volatile (9.3.2).
+ if (isVirtual) {
+ if (!D.isInvalidType())
+ Diag(D.getIdentifierLoc(), diag::err_constructor_cannot_be)
+ << "virtual" << SourceRange(D.getDeclSpec().getVirtualSpecLoc())
+ << SourceRange(D.getIdentifierLoc());
+ D.setInvalidType();
+ }
+ if (SC == SC_Static) {
+ if (!D.isInvalidType())
+ Diag(D.getIdentifierLoc(), diag::err_constructor_cannot_be)
+ << "static" << SourceRange(D.getDeclSpec().getStorageClassSpecLoc())
+ << SourceRange(D.getIdentifierLoc());
+ D.setInvalidType();
+ SC = SC_None;
+ }
+
+ if (unsigned TypeQuals = D.getDeclSpec().getTypeQualifiers()) {
+ diagnoseIgnoredQualifiers(
+ diag::err_constructor_return_type, TypeQuals, SourceLocation(),
+ D.getDeclSpec().getConstSpecLoc(), D.getDeclSpec().getVolatileSpecLoc(),
+ D.getDeclSpec().getRestrictSpecLoc(),
+ D.getDeclSpec().getAtomicSpecLoc());
+ D.setInvalidType();
+ }
+
+ DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
+ if (FTI.TypeQuals != 0) {
+ if (FTI.TypeQuals & Qualifiers::Const)
+ Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_constructor)
+ << "const" << SourceRange(D.getIdentifierLoc());
+ if (FTI.TypeQuals & Qualifiers::Volatile)
+ Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_constructor)
+ << "volatile" << SourceRange(D.getIdentifierLoc());
+ if (FTI.TypeQuals & Qualifiers::Restrict)
+ Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_constructor)
+ << "restrict" << SourceRange(D.getIdentifierLoc());
+ D.setInvalidType();
+ }
+
+ // C++0x [class.ctor]p4:
+ // A constructor shall not be declared with a ref-qualifier.
+ if (FTI.hasRefQualifier()) {
+ Diag(FTI.getRefQualifierLoc(), diag::err_ref_qualifier_constructor)
+ << FTI.RefQualifierIsLValueRef
+ << FixItHint::CreateRemoval(FTI.getRefQualifierLoc());
+ D.setInvalidType();
+ }
+
+ // Rebuild the function type "R" without any type qualifiers (in
+ // case any of the errors above fired) and with "void" as the
+ // return type, since constructors don't have return types.
+ const FunctionProtoType *Proto = R->getAs<FunctionProtoType>();
+ if (Proto->getReturnType() == Context.VoidTy && !D.isInvalidType())
+ return R;
+
+ FunctionProtoType::ExtProtoInfo EPI = Proto->getExtProtoInfo();
+ EPI.TypeQuals = 0;
+ EPI.RefQualifier = RQ_None;
+
+ return Context.getFunctionType(Context.VoidTy, Proto->getParamTypes(), EPI);
+}
+
+/// CheckConstructor - Checks a fully-formed constructor for
+/// well-formedness, issuing any diagnostics required. Returns true if
+/// the constructor declarator is invalid.
+void Sema::CheckConstructor(CXXConstructorDecl *Constructor) {
+ CXXRecordDecl *ClassDecl
+ = dyn_cast<CXXRecordDecl>(Constructor->getDeclContext());
+ if (!ClassDecl)
+ return Constructor->setInvalidDecl();
+
+ // C++ [class.copy]p3:
+ // A declaration of a constructor for a class X is ill-formed if
+ // its first parameter is of type (optionally cv-qualified) X and
+ // either there are no other parameters or else all other
+ // parameters have default arguments.
+ if (!Constructor->isInvalidDecl() &&
+ ((Constructor->getNumParams() == 1) ||
+ (Constructor->getNumParams() > 1 &&
+ Constructor->getParamDecl(1)->hasDefaultArg())) &&
+ Constructor->getTemplateSpecializationKind()
+ != TSK_ImplicitInstantiation) {
+ QualType ParamType = Constructor->getParamDecl(0)->getType();
+ QualType ClassTy = Context.getTagDeclType(ClassDecl);
+ if (Context.getCanonicalType(ParamType).getUnqualifiedType() == ClassTy) {
+ SourceLocation ParamLoc = Constructor->getParamDecl(0)->getLocation();
+ const char *ConstRef
+ = Constructor->getParamDecl(0)->getIdentifier() ? "const &"
+ : " const &";
+ Diag(ParamLoc, diag::err_constructor_byvalue_arg)
+ << FixItHint::CreateInsertion(ParamLoc, ConstRef);
+
+ // FIXME: Rather that making the constructor invalid, we should endeavor
+ // to fix the type.
+ Constructor->setInvalidDecl();
+ }
+ }
+}
+
+/// CheckDestructor - Checks a fully-formed destructor definition for
+/// well-formedness, issuing any diagnostics required. Returns true
+/// on error.
+bool Sema::CheckDestructor(CXXDestructorDecl *Destructor) {
+ CXXRecordDecl *RD = Destructor->getParent();
+
+ if (!Destructor->getOperatorDelete() && Destructor->isVirtual()) {
+ SourceLocation Loc;
+
+ if (!Destructor->isImplicit())
+ Loc = Destructor->getLocation();
+ else
+ Loc = RD->getLocation();
+
+ // If we have a virtual destructor, look up the deallocation function
+ FunctionDecl *OperatorDelete = nullptr;
+ DeclarationName Name =
+ Context.DeclarationNames.getCXXOperatorName(OO_Delete);
+ if (FindDeallocationFunction(Loc, RD, Name, OperatorDelete))
+ return true;
+ // If there's no class-specific operator delete, look up the global
+ // non-array delete.
+ if (!OperatorDelete)
+ OperatorDelete = FindUsualDeallocationFunction(Loc, true, Name);
+
+ MarkFunctionReferenced(Loc, OperatorDelete);
+
+ Destructor->setOperatorDelete(OperatorDelete);
+ }
+
+ return false;
+}
+
+/// CheckDestructorDeclarator - Called by ActOnDeclarator to check
+/// the well-formednes of the destructor declarator @p D with type @p
+/// R. If there are any errors in the declarator, this routine will
+/// emit diagnostics and set the declarator to invalid. Even if this happens,
+/// will be updated to reflect a well-formed type for the destructor and
+/// returned.
+QualType Sema::CheckDestructorDeclarator(Declarator &D, QualType R,
+ StorageClass& SC) {
+ // C++ [class.dtor]p1:
+ // [...] A typedef-name that names a class is a class-name
+ // (7.1.3); however, a typedef-name that names a class shall not
+ // be used as the identifier in the declarator for a destructor
+ // declaration.
+ QualType DeclaratorType = GetTypeFromParser(D.getName().DestructorName);
+ if (const TypedefType *TT = DeclaratorType->getAs<TypedefType>())
+ Diag(D.getIdentifierLoc(), diag::err_destructor_typedef_name)
+ << DeclaratorType << isa<TypeAliasDecl>(TT->getDecl());
+ else if (const TemplateSpecializationType *TST =
+ DeclaratorType->getAs<TemplateSpecializationType>())
+ if (TST->isTypeAlias())
+ Diag(D.getIdentifierLoc(), diag::err_destructor_typedef_name)
+ << DeclaratorType << 1;
+
+ // C++ [class.dtor]p2:
+ // A destructor is used to destroy objects of its class type. A
+ // destructor takes no parameters, and no return type can be
+ // specified for it (not even void). The address of a destructor
+ // shall not be taken. A destructor shall not be static. A
+ // destructor can be invoked for a const, volatile or const
+ // volatile object. A destructor shall not be declared const,
+ // volatile or const volatile (9.3.2).
+ if (SC == SC_Static) {
+ if (!D.isInvalidType())
+ Diag(D.getIdentifierLoc(), diag::err_destructor_cannot_be)
+ << "static" << SourceRange(D.getDeclSpec().getStorageClassSpecLoc())
+ << SourceRange(D.getIdentifierLoc())
+ << FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
+
+ SC = SC_None;
+ }
+ if (!D.isInvalidType()) {
+ // Destructors don't have return types, but the parser will
+ // happily parse something like:
+ //
+ // class X {
+ // float ~X();
+ // };
+ //
+ // The return type will be eliminated later.
+ if (D.getDeclSpec().hasTypeSpecifier())
+ Diag(D.getIdentifierLoc(), diag::err_destructor_return_type)
+ << SourceRange(D.getDeclSpec().getTypeSpecTypeLoc())
+ << SourceRange(D.getIdentifierLoc());
+ else if (unsigned TypeQuals = D.getDeclSpec().getTypeQualifiers()) {
+ diagnoseIgnoredQualifiers(diag::err_destructor_return_type, TypeQuals,
+ SourceLocation(),
+ D.getDeclSpec().getConstSpecLoc(),
+ D.getDeclSpec().getVolatileSpecLoc(),
+ D.getDeclSpec().getRestrictSpecLoc(),
+ D.getDeclSpec().getAtomicSpecLoc());
+ D.setInvalidType();
+ }
+ }
+
+ DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
+ if (FTI.TypeQuals != 0 && !D.isInvalidType()) {
+ if (FTI.TypeQuals & Qualifiers::Const)
+ Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_destructor)
+ << "const" << SourceRange(D.getIdentifierLoc());
+ if (FTI.TypeQuals & Qualifiers::Volatile)
+ Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_destructor)
+ << "volatile" << SourceRange(D.getIdentifierLoc());
+ if (FTI.TypeQuals & Qualifiers::Restrict)
+ Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_destructor)
+ << "restrict" << SourceRange(D.getIdentifierLoc());
+ D.setInvalidType();
+ }
+
+ // C++0x [class.dtor]p2:
+ // A destructor shall not be declared with a ref-qualifier.
+ if (FTI.hasRefQualifier()) {
+ Diag(FTI.getRefQualifierLoc(), diag::err_ref_qualifier_destructor)
+ << FTI.RefQualifierIsLValueRef
+ << FixItHint::CreateRemoval(FTI.getRefQualifierLoc());
+ D.setInvalidType();
+ }
+
+ // Make sure we don't have any parameters.
+ if (FTIHasNonVoidParameters(FTI)) {
+ Diag(D.getIdentifierLoc(), diag::err_destructor_with_params);
+
+ // Delete the parameters.
+ FTI.freeParams();
+ D.setInvalidType();
+ }
+
+ // Make sure the destructor isn't variadic.
+ if (FTI.isVariadic) {
+ Diag(D.getIdentifierLoc(), diag::err_destructor_variadic);
+ D.setInvalidType();
+ }
+
+ // Rebuild the function type "R" without any type qualifiers or
+ // parameters (in case any of the errors above fired) and with
+ // "void" as the return type, since destructors don't have return
+ // types.
+ if (!D.isInvalidType())
+ return R;
+
+ const FunctionProtoType *Proto = R->getAs<FunctionProtoType>();
+ FunctionProtoType::ExtProtoInfo EPI = Proto->getExtProtoInfo();
+ EPI.Variadic = false;
+ EPI.TypeQuals = 0;
+ EPI.RefQualifier = RQ_None;
+ return Context.getFunctionType(Context.VoidTy, None, EPI);
+}
+
+static void extendLeft(SourceRange &R, SourceRange Before) {
+ if (Before.isInvalid())
+ return;
+ R.setBegin(Before.getBegin());
+ if (R.getEnd().isInvalid())
+ R.setEnd(Before.getEnd());
+}
+
+static void extendRight(SourceRange &R, SourceRange After) {
+ if (After.isInvalid())
+ return;
+ if (R.getBegin().isInvalid())
+ R.setBegin(After.getBegin());
+ R.setEnd(After.getEnd());
+}
+
+/// CheckConversionDeclarator - Called by ActOnDeclarator to check the
+/// well-formednes of the conversion function declarator @p D with
+/// type @p R. If there are any errors in the declarator, this routine
+/// will emit diagnostics and return true. Otherwise, it will return
+/// false. Either way, the type @p R will be updated to reflect a
+/// well-formed type for the conversion operator.
+void Sema::CheckConversionDeclarator(Declarator &D, QualType &R,
+ StorageClass& SC) {
+ // C++ [class.conv.fct]p1:
+ // Neither parameter types nor return type can be specified. The
+ // type of a conversion function (8.3.5) is "function taking no
+ // parameter returning conversion-type-id."
+ if (SC == SC_Static) {
+ if (!D.isInvalidType())
+ Diag(D.getIdentifierLoc(), diag::err_conv_function_not_member)
+ << SourceRange(D.getDeclSpec().getStorageClassSpecLoc())
+ << D.getName().getSourceRange();
+ D.setInvalidType();
+ SC = SC_None;
+ }
+
+ TypeSourceInfo *ConvTSI = nullptr;
+ QualType ConvType =
+ GetTypeFromParser(D.getName().ConversionFunctionId, &ConvTSI);
+
+ if (D.getDeclSpec().hasTypeSpecifier() && !D.isInvalidType()) {
+ // Conversion functions don't have return types, but the parser will
+ // happily parse something like:
+ //
+ // class X {
+ // float operator bool();
+ // };
+ //
+ // The return type will be changed later anyway.
+ Diag(D.getIdentifierLoc(), diag::err_conv_function_return_type)
+ << SourceRange(D.getDeclSpec().getTypeSpecTypeLoc())
+ << SourceRange(D.getIdentifierLoc());
+ D.setInvalidType();
+ }
+
+ const FunctionProtoType *Proto = R->getAs<FunctionProtoType>();
+
+ // Make sure we don't have any parameters.
+ if (Proto->getNumParams() > 0) {
+ Diag(D.getIdentifierLoc(), diag::err_conv_function_with_params);
+
+ // Delete the parameters.
+ D.getFunctionTypeInfo().freeParams();
+ D.setInvalidType();
+ } else if (Proto->isVariadic()) {
+ Diag(D.getIdentifierLoc(), diag::err_conv_function_variadic);
+ D.setInvalidType();
+ }
+
+ // Diagnose "&operator bool()" and other such nonsense. This
+ // is actually a gcc extension which we don't support.
+ if (Proto->getReturnType() != ConvType) {
+ bool NeedsTypedef = false;
+ SourceRange Before, After;
+
+ // Walk the chunks and extract information on them for our diagnostic.
+ bool PastFunctionChunk = false;
+ for (auto &Chunk : D.type_objects()) {
+ switch (Chunk.Kind) {
+ case DeclaratorChunk::Function:
+ if (!PastFunctionChunk) {
+ if (Chunk.Fun.HasTrailingReturnType) {
+ TypeSourceInfo *TRT = nullptr;
+ GetTypeFromParser(Chunk.Fun.getTrailingReturnType(), &TRT);
+ if (TRT) extendRight(After, TRT->getTypeLoc().getSourceRange());
+ }
+ PastFunctionChunk = true;
+ break;
+ }
+ // Fall through.
+ case DeclaratorChunk::Array:
+ NeedsTypedef = true;
+ extendRight(After, Chunk.getSourceRange());
+ break;
+
+ case DeclaratorChunk::Pointer:
+ case DeclaratorChunk::BlockPointer:
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::MemberPointer:
+ extendLeft(Before, Chunk.getSourceRange());
+ break;
+
+ case DeclaratorChunk::Paren:
+ extendLeft(Before, Chunk.Loc);
+ extendRight(After, Chunk.EndLoc);
+ break;
+ }
+ }
+
+ SourceLocation Loc = Before.isValid() ? Before.getBegin() :
+ After.isValid() ? After.getBegin() :
+ D.getIdentifierLoc();
+ auto &&DB = Diag(Loc, diag::err_conv_function_with_complex_decl);
+ DB << Before << After;
+
+ if (!NeedsTypedef) {
+ DB << /*don't need a typedef*/0;
+
+ // If we can provide a correct fix-it hint, do so.
+ if (After.isInvalid() && ConvTSI) {
+ SourceLocation InsertLoc =
+ getLocForEndOfToken(ConvTSI->getTypeLoc().getLocEnd());
+ DB << FixItHint::CreateInsertion(InsertLoc, " ")
+ << FixItHint::CreateInsertionFromRange(
+ InsertLoc, CharSourceRange::getTokenRange(Before))
+ << FixItHint::CreateRemoval(Before);
+ }
+ } else if (!Proto->getReturnType()->isDependentType()) {
+ DB << /*typedef*/1 << Proto->getReturnType();
+ } else if (getLangOpts().CPlusPlus11) {
+ DB << /*alias template*/2 << Proto->getReturnType();
+ } else {
+ DB << /*might not be fixable*/3;
+ }
+
+ // Recover by incorporating the other type chunks into the result type.
+ // Note, this does *not* change the name of the function. This is compatible
+ // with the GCC extension:
+ // struct S { &operator int(); } s;
+ // int &r = s.operator int(); // ok in GCC
+ // S::operator int&() {} // error in GCC, function name is 'operator int'.
+ ConvType = Proto->getReturnType();
+ }
+
+ // C++ [class.conv.fct]p4:
+ // The conversion-type-id shall not represent a function type nor
+ // an array type.
+ if (ConvType->isArrayType()) {
+ Diag(D.getIdentifierLoc(), diag::err_conv_function_to_array);
+ ConvType = Context.getPointerType(ConvType);
+ D.setInvalidType();
+ } else if (ConvType->isFunctionType()) {
+ Diag(D.getIdentifierLoc(), diag::err_conv_function_to_function);
+ ConvType = Context.getPointerType(ConvType);
+ D.setInvalidType();
+ }
+
+ // Rebuild the function type "R" without any parameters (in case any
+ // of the errors above fired) and with the conversion type as the
+ // return type.
+ if (D.isInvalidType())
+ R = Context.getFunctionType(ConvType, None, Proto->getExtProtoInfo());
+
+ // C++0x explicit conversion operators.
+ if (D.getDeclSpec().isExplicitSpecified())
+ Diag(D.getDeclSpec().getExplicitSpecLoc(),
+ getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_explicit_conversion_functions :
+ diag::ext_explicit_conversion_functions)
+ << SourceRange(D.getDeclSpec().getExplicitSpecLoc());
+}
+
+/// ActOnConversionDeclarator - Called by ActOnDeclarator to complete
+/// the declaration of the given C++ conversion function. This routine
+/// is responsible for recording the conversion function in the C++
+/// class, if possible.
+Decl *Sema::ActOnConversionDeclarator(CXXConversionDecl *Conversion) {
+ assert(Conversion && "Expected to receive a conversion function declaration");
+
+ CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(Conversion->getDeclContext());
+
+ // Make sure we aren't redeclaring the conversion function.
+ QualType ConvType = Context.getCanonicalType(Conversion->getConversionType());
+
+ // C++ [class.conv.fct]p1:
+ // [...] A conversion function is never used to convert a
+ // (possibly cv-qualified) object to the (possibly cv-qualified)
+ // same object type (or a reference to it), to a (possibly
+ // cv-qualified) base class of that type (or a reference to it),
+ // or to (possibly cv-qualified) void.
+ // FIXME: Suppress this warning if the conversion function ends up being a
+ // virtual function that overrides a virtual function in a base class.
+ QualType ClassType
+ = Context.getCanonicalType(Context.getTypeDeclType(ClassDecl));
+ if (const ReferenceType *ConvTypeRef = ConvType->getAs<ReferenceType>())
+ ConvType = ConvTypeRef->getPointeeType();
+ if (Conversion->getTemplateSpecializationKind() != TSK_Undeclared &&
+ Conversion->getTemplateSpecializationKind() != TSK_ExplicitSpecialization)
+ /* Suppress diagnostics for instantiations. */;
+ else if (ConvType->isRecordType()) {
+ ConvType = Context.getCanonicalType(ConvType).getUnqualifiedType();
+ if (ConvType == ClassType)
+ Diag(Conversion->getLocation(), diag::warn_conv_to_self_not_used)
+ << ClassType;
+ else if (IsDerivedFrom(Conversion->getLocation(), ClassType, ConvType))
+ Diag(Conversion->getLocation(), diag::warn_conv_to_base_not_used)
+ << ClassType << ConvType;
+ } else if (ConvType->isVoidType()) {
+ Diag(Conversion->getLocation(), diag::warn_conv_to_void_not_used)
+ << ClassType << ConvType;
+ }
+
+ if (FunctionTemplateDecl *ConversionTemplate
+ = Conversion->getDescribedFunctionTemplate())
+ return ConversionTemplate;
+
+ return Conversion;
+}
+
+//===----------------------------------------------------------------------===//
+// Namespace Handling
+//===----------------------------------------------------------------------===//
+
+/// \brief Diagnose a mismatch in 'inline' qualifiers when a namespace is
+/// reopened.
+static void DiagnoseNamespaceInlineMismatch(Sema &S, SourceLocation KeywordLoc,
+ SourceLocation Loc,
+ IdentifierInfo *II, bool *IsInline,
+ NamespaceDecl *PrevNS) {
+ assert(*IsInline != PrevNS->isInline());
+
+ // HACK: Work around a bug in libstdc++4.6's <atomic>, where
+ // std::__atomic[0,1,2] are defined as non-inline namespaces, then reopened as
+ // inline namespaces, with the intention of bringing names into namespace std.
+ //
+ // We support this just well enough to get that case working; this is not
+ // sufficient to support reopening namespaces as inline in general.
+ if (*IsInline && II && II->getName().startswith("__atomic") &&
+ S.getSourceManager().isInSystemHeader(Loc)) {
+ // Mark all prior declarations of the namespace as inline.
+ for (NamespaceDecl *NS = PrevNS->getMostRecentDecl(); NS;
+ NS = NS->getPreviousDecl())
+ NS->setInline(*IsInline);
+ // Patch up the lookup table for the containing namespace. This isn't really
+ // correct, but it's good enough for this particular case.
+ for (auto *I : PrevNS->decls())
+ if (auto *ND = dyn_cast<NamedDecl>(I))
+ PrevNS->getParent()->makeDeclVisibleInContext(ND);
+ return;
+ }
+
+ if (PrevNS->isInline())
+ // The user probably just forgot the 'inline', so suggest that it
+ // be added back.
+ S.Diag(Loc, diag::warn_inline_namespace_reopened_noninline)
+ << FixItHint::CreateInsertion(KeywordLoc, "inline ");
+ else
+ S.Diag(Loc, diag::err_inline_namespace_mismatch) << *IsInline;
+
+ S.Diag(PrevNS->getLocation(), diag::note_previous_definition);
+ *IsInline = PrevNS->isInline();
+}
+
+/// ActOnStartNamespaceDef - This is called at the start of a namespace
+/// definition.
+Decl *Sema::ActOnStartNamespaceDef(Scope *NamespcScope,
+ SourceLocation InlineLoc,
+ SourceLocation NamespaceLoc,
+ SourceLocation IdentLoc,
+ IdentifierInfo *II,
+ SourceLocation LBrace,
+ AttributeList *AttrList,
+ UsingDirectiveDecl *&UD) {
+ SourceLocation StartLoc = InlineLoc.isValid() ? InlineLoc : NamespaceLoc;
+ // For anonymous namespace, take the location of the left brace.
+ SourceLocation Loc = II ? IdentLoc : LBrace;
+ bool IsInline = InlineLoc.isValid();
+ bool IsInvalid = false;
+ bool IsStd = false;
+ bool AddToKnown = false;
+ Scope *DeclRegionScope = NamespcScope->getParent();
+
+ NamespaceDecl *PrevNS = nullptr;
+ if (II) {
+ // C++ [namespace.def]p2:
+ // The identifier in an original-namespace-definition shall not
+ // have been previously defined in the declarative region in
+ // which the original-namespace-definition appears. The
+ // identifier in an original-namespace-definition is the name of
+ // the namespace. Subsequently in that declarative region, it is
+ // treated as an original-namespace-name.
+ //
+ // Since namespace names are unique in their scope, and we don't
+ // look through using directives, just look for any ordinary names
+ // as if by qualified name lookup.
+ LookupResult R(*this, II, IdentLoc, LookupOrdinaryName, ForRedeclaration);
+ LookupQualifiedName(R, CurContext->getRedeclContext());
+ NamedDecl *PrevDecl =
+ R.isSingleResult() ? R.getRepresentativeDecl() : nullptr;
+ PrevNS = dyn_cast_or_null<NamespaceDecl>(PrevDecl);
+
+ if (PrevNS) {
+ // This is an extended namespace definition.
+ if (IsInline != PrevNS->isInline())
+ DiagnoseNamespaceInlineMismatch(*this, NamespaceLoc, Loc, II,
+ &IsInline, PrevNS);
+ } else if (PrevDecl) {
+ // This is an invalid name redefinition.
+ Diag(Loc, diag::err_redefinition_different_kind)
+ << II;
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ IsInvalid = true;
+ // Continue on to push Namespc as current DeclContext and return it.
+ } else if (II->isStr("std") &&
+ CurContext->getRedeclContext()->isTranslationUnit()) {
+ // This is the first "real" definition of the namespace "std", so update
+ // our cache of the "std" namespace to point at this definition.
+ PrevNS = getStdNamespace();
+ IsStd = true;
+ AddToKnown = !IsInline;
+ } else {
+ // We've seen this namespace for the first time.
+ AddToKnown = !IsInline;
+ }
+ } else {
+ // Anonymous namespaces.
+
+ // Determine whether the parent already has an anonymous namespace.
+ DeclContext *Parent = CurContext->getRedeclContext();
+ if (TranslationUnitDecl *TU = dyn_cast<TranslationUnitDecl>(Parent)) {
+ PrevNS = TU->getAnonymousNamespace();
+ } else {
+ NamespaceDecl *ND = cast<NamespaceDecl>(Parent);
+ PrevNS = ND->getAnonymousNamespace();
+ }
+
+ if (PrevNS && IsInline != PrevNS->isInline())
+ DiagnoseNamespaceInlineMismatch(*this, NamespaceLoc, NamespaceLoc, II,
+ &IsInline, PrevNS);
+ }
+
+ NamespaceDecl *Namespc = NamespaceDecl::Create(Context, CurContext, IsInline,
+ StartLoc, Loc, II, PrevNS);
+ if (IsInvalid)
+ Namespc->setInvalidDecl();
+
+ ProcessDeclAttributeList(DeclRegionScope, Namespc, AttrList);
+
+ // FIXME: Should we be merging attributes?
+ if (const VisibilityAttr *Attr = Namespc->getAttr<VisibilityAttr>())
+ PushNamespaceVisibilityAttr(Attr, Loc);
+
+ if (IsStd)
+ StdNamespace = Namespc;
+ if (AddToKnown)
+ KnownNamespaces[Namespc] = false;
+
+ if (II) {
+ PushOnScopeChains(Namespc, DeclRegionScope);
+ } else {
+ // Link the anonymous namespace into its parent.
+ DeclContext *Parent = CurContext->getRedeclContext();
+ if (TranslationUnitDecl *TU = dyn_cast<TranslationUnitDecl>(Parent)) {
+ TU->setAnonymousNamespace(Namespc);
+ } else {
+ cast<NamespaceDecl>(Parent)->setAnonymousNamespace(Namespc);
+ }
+
+ CurContext->addDecl(Namespc);
+
+ // C++ [namespace.unnamed]p1. An unnamed-namespace-definition
+ // behaves as if it were replaced by
+ // namespace unique { /* empty body */ }
+ // using namespace unique;
+ // namespace unique { namespace-body }
+ // where all occurrences of 'unique' in a translation unit are
+ // replaced by the same identifier and this identifier differs
+ // from all other identifiers in the entire program.
+
+ // We just create the namespace with an empty name and then add an
+ // implicit using declaration, just like the standard suggests.
+ //
+ // CodeGen enforces the "universally unique" aspect by giving all
+ // declarations semantically contained within an anonymous
+ // namespace internal linkage.
+
+ if (!PrevNS) {
+ UD = UsingDirectiveDecl::Create(Context, Parent,
+ /* 'using' */ LBrace,
+ /* 'namespace' */ SourceLocation(),
+ /* qualifier */ NestedNameSpecifierLoc(),
+ /* identifier */ SourceLocation(),
+ Namespc,
+ /* Ancestor */ Parent);
+ UD->setImplicit();
+ Parent->addDecl(UD);
+ }
+ }
+
+ ActOnDocumentableDecl(Namespc);
+
+ // Although we could have an invalid decl (i.e. the namespace name is a
+ // redefinition), push it as current DeclContext and try to continue parsing.
+ // FIXME: We should be able to push Namespc here, so that the each DeclContext
+ // for the namespace has the declarations that showed up in that particular
+ // namespace definition.
+ PushDeclContext(NamespcScope, Namespc);
+ return Namespc;
+}
+
+/// getNamespaceDecl - Returns the namespace a decl represents. If the decl
+/// is a namespace alias, returns the namespace it points to.
+static inline NamespaceDecl *getNamespaceDecl(NamedDecl *D) {
+ if (NamespaceAliasDecl *AD = dyn_cast_or_null<NamespaceAliasDecl>(D))
+ return AD->getNamespace();
+ return dyn_cast_or_null<NamespaceDecl>(D);
+}
+
+/// ActOnFinishNamespaceDef - This callback is called after a namespace is
+/// exited. Decl is the DeclTy returned by ActOnStartNamespaceDef.
+void Sema::ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace) {
+ NamespaceDecl *Namespc = dyn_cast_or_null<NamespaceDecl>(Dcl);
+ assert(Namespc && "Invalid parameter, expected NamespaceDecl");
+ Namespc->setRBraceLoc(RBrace);
+ PopDeclContext();
+ if (Namespc->hasAttr<VisibilityAttr>())
+ PopPragmaVisibility(true, RBrace);
+}
+
+CXXRecordDecl *Sema::getStdBadAlloc() const {
+ return cast_or_null<CXXRecordDecl>(
+ StdBadAlloc.get(Context.getExternalSource()));
+}
+
+NamespaceDecl *Sema::getStdNamespace() const {
+ return cast_or_null<NamespaceDecl>(
+ StdNamespace.get(Context.getExternalSource()));
+}
+
+/// \brief Retrieve the special "std" namespace, which may require us to
+/// implicitly define the namespace.
+NamespaceDecl *Sema::getOrCreateStdNamespace() {
+ if (!StdNamespace) {
+ // The "std" namespace has not yet been defined, so build one implicitly.
+ StdNamespace = NamespaceDecl::Create(Context,
+ Context.getTranslationUnitDecl(),
+ /*Inline=*/false,
+ SourceLocation(), SourceLocation(),
+ &PP.getIdentifierTable().get("std"),
+ /*PrevDecl=*/nullptr);
+ getStdNamespace()->setImplicit(true);
+ }
+
+ return getStdNamespace();
+}
+
+bool Sema::isStdInitializerList(QualType Ty, QualType *Element) {
+ assert(getLangOpts().CPlusPlus &&
+ "Looking for std::initializer_list outside of C++.");
+
+ // We're looking for implicit instantiations of
+ // template <typename E> class std::initializer_list.
+
+ if (!StdNamespace) // If we haven't seen namespace std yet, this can't be it.
+ return false;
+
+ ClassTemplateDecl *Template = nullptr;
+ const TemplateArgument *Arguments = nullptr;
+
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+
+ ClassTemplateSpecializationDecl *Specialization =
+ dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl());
+ if (!Specialization)
+ return false;
+
+ Template = Specialization->getSpecializedTemplate();
+ Arguments = Specialization->getTemplateArgs().data();
+ } else if (const TemplateSpecializationType *TST =
+ Ty->getAs<TemplateSpecializationType>()) {
+ Template = dyn_cast_or_null<ClassTemplateDecl>(
+ TST->getTemplateName().getAsTemplateDecl());
+ Arguments = TST->getArgs();
+ }
+ if (!Template)
+ return false;
+
+ if (!StdInitializerList) {
+ // Haven't recognized std::initializer_list yet, maybe this is it.
+ CXXRecordDecl *TemplateClass = Template->getTemplatedDecl();
+ if (TemplateClass->getIdentifier() !=
+ &PP.getIdentifierTable().get("initializer_list") ||
+ !getStdNamespace()->InEnclosingNamespaceSetOf(
+ TemplateClass->getDeclContext()))
+ return false;
+ // This is a template called std::initializer_list, but is it the right
+ // template?
+ TemplateParameterList *Params = Template->getTemplateParameters();
+ if (Params->getMinRequiredArguments() != 1)
+ return false;
+ if (!isa<TemplateTypeParmDecl>(Params->getParam(0)))
+ return false;
+
+ // It's the right template.
+ StdInitializerList = Template;
+ }
+
+ if (Template->getCanonicalDecl() != StdInitializerList->getCanonicalDecl())
+ return false;
+
+ // This is an instance of std::initializer_list. Find the argument type.
+ if (Element)
+ *Element = Arguments[0].getAsType();
+ return true;
+}
+
+static ClassTemplateDecl *LookupStdInitializerList(Sema &S, SourceLocation Loc){
+ NamespaceDecl *Std = S.getStdNamespace();
+ if (!Std) {
+ S.Diag(Loc, diag::err_implied_std_initializer_list_not_found);
+ return nullptr;
+ }
+
+ LookupResult Result(S, &S.PP.getIdentifierTable().get("initializer_list"),
+ Loc, Sema::LookupOrdinaryName);
+ if (!S.LookupQualifiedName(Result, Std)) {
+ S.Diag(Loc, diag::err_implied_std_initializer_list_not_found);
+ return nullptr;
+ }
+ ClassTemplateDecl *Template = Result.getAsSingle<ClassTemplateDecl>();
+ if (!Template) {
+ Result.suppressDiagnostics();
+ // We found something weird. Complain about the first thing we found.
+ NamedDecl *Found = *Result.begin();
+ S.Diag(Found->getLocation(), diag::err_malformed_std_initializer_list);
+ return nullptr;
+ }
+
+ // We found some template called std::initializer_list. Now verify that it's
+ // correct.
+ TemplateParameterList *Params = Template->getTemplateParameters();
+ if (Params->getMinRequiredArguments() != 1 ||
+ !isa<TemplateTypeParmDecl>(Params->getParam(0))) {
+ S.Diag(Template->getLocation(), diag::err_malformed_std_initializer_list);
+ return nullptr;
+ }
+
+ return Template;
+}
+
+QualType Sema::BuildStdInitializerList(QualType Element, SourceLocation Loc) {
+ if (!StdInitializerList) {
+ StdInitializerList = LookupStdInitializerList(*this, Loc);
+ if (!StdInitializerList)
+ return QualType();
+ }
+
+ TemplateArgumentListInfo Args(Loc, Loc);
+ Args.addArgument(TemplateArgumentLoc(TemplateArgument(Element),
+ Context.getTrivialTypeSourceInfo(Element,
+ Loc)));
+ return Context.getCanonicalType(
+ CheckTemplateIdType(TemplateName(StdInitializerList), Loc, Args));
+}
+
+bool Sema::isInitListConstructor(const CXXConstructorDecl* Ctor) {
+ // C++ [dcl.init.list]p2:
+ // A constructor is an initializer-list constructor if its first parameter
+ // is of type std::initializer_list<E> or reference to possibly cv-qualified
+ // std::initializer_list<E> for some type E, and either there are no other
+ // parameters or else all other parameters have default arguments.
+ if (Ctor->getNumParams() < 1 ||
+ (Ctor->getNumParams() > 1 && !Ctor->getParamDecl(1)->hasDefaultArg()))
+ return false;
+
+ QualType ArgType = Ctor->getParamDecl(0)->getType();
+ if (const ReferenceType *RT = ArgType->getAs<ReferenceType>())
+ ArgType = RT->getPointeeType().getUnqualifiedType();
+
+ return isStdInitializerList(ArgType, nullptr);
+}
+
+/// \brief Determine whether a using statement is in a context where it will be
+/// apply in all contexts.
+static bool IsUsingDirectiveInToplevelContext(DeclContext *CurContext) {
+ switch (CurContext->getDeclKind()) {
+ case Decl::TranslationUnit:
+ return true;
+ case Decl::LinkageSpec:
+ return IsUsingDirectiveInToplevelContext(CurContext->getParent());
+ default:
+ return false;
+ }
+}
+
+namespace {
+
+// Callback to only accept typo corrections that are namespaces.
+class NamespaceValidatorCCC : public CorrectionCandidateCallback {
+public:
+ bool ValidateCandidate(const TypoCorrection &candidate) override {
+ if (NamedDecl *ND = candidate.getCorrectionDecl())
+ return isa<NamespaceDecl>(ND) || isa<NamespaceAliasDecl>(ND);
+ return false;
+ }
+};
+
+}
+
+static bool TryNamespaceTypoCorrection(Sema &S, LookupResult &R, Scope *Sc,
+ CXXScopeSpec &SS,
+ SourceLocation IdentLoc,
+ IdentifierInfo *Ident) {
+ R.clear();
+ if (TypoCorrection Corrected =
+ S.CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(), Sc, &SS,
+ llvm::make_unique<NamespaceValidatorCCC>(),
+ Sema::CTK_ErrorRecovery)) {
+ if (DeclContext *DC = S.computeDeclContext(SS, false)) {
+ std::string CorrectedStr(Corrected.getAsString(S.getLangOpts()));
+ bool DroppedSpecifier = Corrected.WillReplaceSpecifier() &&
+ Ident->getName().equals(CorrectedStr);
+ S.diagnoseTypo(Corrected,
+ S.PDiag(diag::err_using_directive_member_suggest)
+ << Ident << DC << DroppedSpecifier << SS.getRange(),
+ S.PDiag(diag::note_namespace_defined_here));
+ } else {
+ S.diagnoseTypo(Corrected,
+ S.PDiag(diag::err_using_directive_suggest) << Ident,
+ S.PDiag(diag::note_namespace_defined_here));
+ }
+ R.addDecl(Corrected.getFoundDecl());
+ return true;
+ }
+ return false;
+}
+
+Decl *Sema::ActOnUsingDirective(Scope *S,
+ SourceLocation UsingLoc,
+ SourceLocation NamespcLoc,
+ CXXScopeSpec &SS,
+ SourceLocation IdentLoc,
+ IdentifierInfo *NamespcName,
+ AttributeList *AttrList) {
+ assert(!SS.isInvalid() && "Invalid CXXScopeSpec.");
+ assert(NamespcName && "Invalid NamespcName.");
+ assert(IdentLoc.isValid() && "Invalid NamespceName location.");
+
+ // This can only happen along a recovery path.
+ while (S->isTemplateParamScope())
+ S = S->getParent();
+ assert(S->getFlags() & Scope::DeclScope && "Invalid Scope.");
+
+ UsingDirectiveDecl *UDir = nullptr;
+ NestedNameSpecifier *Qualifier = nullptr;
+ if (SS.isSet())
+ Qualifier = SS.getScopeRep();
+
+ // Lookup namespace name.
+ LookupResult R(*this, NamespcName, IdentLoc, LookupNamespaceName);
+ LookupParsedName(R, S, &SS);
+ if (R.isAmbiguous())
+ return nullptr;
+
+ if (R.empty()) {
+ R.clear();
+ // Allow "using namespace std;" or "using namespace ::std;" even if
+ // "std" hasn't been defined yet, for GCC compatibility.
+ if ((!Qualifier || Qualifier->getKind() == NestedNameSpecifier::Global) &&
+ NamespcName->isStr("std")) {
+ Diag(IdentLoc, diag::ext_using_undefined_std);
+ R.addDecl(getOrCreateStdNamespace());
+ R.resolveKind();
+ }
+ // Otherwise, attempt typo correction.
+ else TryNamespaceTypoCorrection(*this, R, S, SS, IdentLoc, NamespcName);
+ }
+
+ if (!R.empty()) {
+ NamedDecl *Named = R.getRepresentativeDecl();
+ NamespaceDecl *NS = R.getAsSingle<NamespaceDecl>();
+ assert(NS && "expected namespace decl");
+
+ // The use of a nested name specifier may trigger deprecation warnings.
+ DiagnoseUseOfDecl(Named, IdentLoc);
+
+ // C++ [namespace.udir]p1:
+ // A using-directive specifies that the names in the nominated
+ // namespace can be used in the scope in which the
+ // using-directive appears after the using-directive. During
+ // unqualified name lookup (3.4.1), the names appear as if they
+ // were declared in the nearest enclosing namespace which
+ // contains both the using-directive and the nominated
+ // namespace. [Note: in this context, "contains" means "contains
+ // directly or indirectly". ]
+
+ // Find enclosing context containing both using-directive and
+ // nominated namespace.
+ DeclContext *CommonAncestor = cast<DeclContext>(NS);
+ while (CommonAncestor && !CommonAncestor->Encloses(CurContext))
+ CommonAncestor = CommonAncestor->getParent();
+
+ UDir = UsingDirectiveDecl::Create(Context, CurContext, UsingLoc, NamespcLoc,
+ SS.getWithLocInContext(Context),
+ IdentLoc, Named, CommonAncestor);
+
+ if (IsUsingDirectiveInToplevelContext(CurContext) &&
+ !SourceMgr.isInMainFile(SourceMgr.getExpansionLoc(IdentLoc))) {
+ Diag(IdentLoc, diag::warn_using_directive_in_header);
+ }
+
+ PushUsingDirective(S, UDir);
+ } else {
+ Diag(IdentLoc, diag::err_expected_namespace_name) << SS.getRange();
+ }
+
+ if (UDir)
+ ProcessDeclAttributeList(S, UDir, AttrList);
+
+ return UDir;
+}
+
+void Sema::PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir) {
+ // If the scope has an associated entity and the using directive is at
+ // namespace or translation unit scope, add the UsingDirectiveDecl into
+ // its lookup structure so qualified name lookup can find it.
+ DeclContext *Ctx = S->getEntity();
+ if (Ctx && !Ctx->isFunctionOrMethod())
+ Ctx->addDecl(UDir);
+ else
+ // Otherwise, it is at block scope. The using-directives will affect lookup
+ // only to the end of the scope.
+ S->PushUsingDirective(UDir);
+}
+
+
+Decl *Sema::ActOnUsingDeclaration(Scope *S,
+ AccessSpecifier AS,
+ bool HasUsingKeyword,
+ SourceLocation UsingLoc,
+ CXXScopeSpec &SS,
+ UnqualifiedId &Name,
+ AttributeList *AttrList,
+ bool HasTypenameKeyword,
+ SourceLocation TypenameLoc) {
+ assert(S->getFlags() & Scope::DeclScope && "Invalid Scope.");
+
+ switch (Name.getKind()) {
+ case UnqualifiedId::IK_ImplicitSelfParam:
+ case UnqualifiedId::IK_Identifier:
+ case UnqualifiedId::IK_OperatorFunctionId:
+ case UnqualifiedId::IK_LiteralOperatorId:
+ case UnqualifiedId::IK_ConversionFunctionId:
+ break;
+
+ case UnqualifiedId::IK_ConstructorName:
+ case UnqualifiedId::IK_ConstructorTemplateId:
+ // C++11 inheriting constructors.
+ Diag(Name.getLocStart(),
+ getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_using_decl_constructor :
+ diag::err_using_decl_constructor)
+ << SS.getRange();
+
+ if (getLangOpts().CPlusPlus11) break;
+
+ return nullptr;
+
+ case UnqualifiedId::IK_DestructorName:
+ Diag(Name.getLocStart(), diag::err_using_decl_destructor)
+ << SS.getRange();
+ return nullptr;
+
+ case UnqualifiedId::IK_TemplateId:
+ Diag(Name.getLocStart(), diag::err_using_decl_template_id)
+ << SourceRange(Name.TemplateId->LAngleLoc, Name.TemplateId->RAngleLoc);
+ return nullptr;
+ }
+
+ DeclarationNameInfo TargetNameInfo = GetNameFromUnqualifiedId(Name);
+ DeclarationName TargetName = TargetNameInfo.getName();
+ if (!TargetName)
+ return nullptr;
+
+ // Warn about access declarations.
+ if (!HasUsingKeyword) {
+ Diag(Name.getLocStart(),
+ getLangOpts().CPlusPlus11 ? diag::err_access_decl
+ : diag::warn_access_decl_deprecated)
+ << FixItHint::CreateInsertion(SS.getRange().getBegin(), "using ");
+ }
+
+ if (DiagnoseUnexpandedParameterPack(SS, UPPC_UsingDeclaration) ||
+ DiagnoseUnexpandedParameterPack(TargetNameInfo, UPPC_UsingDeclaration))
+ return nullptr;
+
+ NamedDecl *UD = BuildUsingDeclaration(S, AS, UsingLoc, SS,
+ TargetNameInfo, AttrList,
+ /* IsInstantiation */ false,
+ HasTypenameKeyword, TypenameLoc);
+ if (UD)
+ PushOnScopeChains(UD, S, /*AddToContext*/ false);
+
+ return UD;
+}
+
+/// \brief Determine whether a using declaration considers the given
+/// declarations as "equivalent", e.g., if they are redeclarations of
+/// the same entity or are both typedefs of the same type.
+static bool
+IsEquivalentForUsingDecl(ASTContext &Context, NamedDecl *D1, NamedDecl *D2) {
+ if (D1->getCanonicalDecl() == D2->getCanonicalDecl())
+ return true;
+
+ if (TypedefNameDecl *TD1 = dyn_cast<TypedefNameDecl>(D1))
+ if (TypedefNameDecl *TD2 = dyn_cast<TypedefNameDecl>(D2))
+ return Context.hasSameType(TD1->getUnderlyingType(),
+ TD2->getUnderlyingType());
+
+ return false;
+}
+
+
+/// Determines whether to create a using shadow decl for a particular
+/// decl, given the set of decls existing prior to this using lookup.
+bool Sema::CheckUsingShadowDecl(UsingDecl *Using, NamedDecl *Orig,
+ const LookupResult &Previous,
+ UsingShadowDecl *&PrevShadow) {
+ // Diagnose finding a decl which is not from a base class of the
+ // current class. We do this now because there are cases where this
+ // function will silently decide not to build a shadow decl, which
+ // will pre-empt further diagnostics.
+ //
+ // We don't need to do this in C++0x because we do the check once on
+ // the qualifier.
+ //
+ // FIXME: diagnose the following if we care enough:
+ // struct A { int foo; };
+ // struct B : A { using A::foo; };
+ // template <class T> struct C : A {};
+ // template <class T> struct D : C<T> { using B::foo; } // <---
+ // This is invalid (during instantiation) in C++03 because B::foo
+ // resolves to the using decl in B, which is not a base class of D<T>.
+ // We can't diagnose it immediately because C<T> is an unknown
+ // specialization. The UsingShadowDecl in D<T> then points directly
+ // to A::foo, which will look well-formed when we instantiate.
+ // The right solution is to not collapse the shadow-decl chain.
+ if (!getLangOpts().CPlusPlus11 && CurContext->isRecord()) {
+ DeclContext *OrigDC = Orig->getDeclContext();
+
+ // Handle enums and anonymous structs.
+ if (isa<EnumDecl>(OrigDC)) OrigDC = OrigDC->getParent();
+ CXXRecordDecl *OrigRec = cast<CXXRecordDecl>(OrigDC);
+ while (OrigRec->isAnonymousStructOrUnion())
+ OrigRec = cast<CXXRecordDecl>(OrigRec->getDeclContext());
+
+ if (cast<CXXRecordDecl>(CurContext)->isProvablyNotDerivedFrom(OrigRec)) {
+ if (OrigDC == CurContext) {
+ Diag(Using->getLocation(),
+ diag::err_using_decl_nested_name_specifier_is_current_class)
+ << Using->getQualifierLoc().getSourceRange();
+ Diag(Orig->getLocation(), diag::note_using_decl_target);
+ return true;
+ }
+
+ Diag(Using->getQualifierLoc().getBeginLoc(),
+ diag::err_using_decl_nested_name_specifier_is_not_base_class)
+ << Using->getQualifier()
+ << cast<CXXRecordDecl>(CurContext)
+ << Using->getQualifierLoc().getSourceRange();
+ Diag(Orig->getLocation(), diag::note_using_decl_target);
+ return true;
+ }
+ }
+
+ if (Previous.empty()) return false;
+
+ NamedDecl *Target = Orig;
+ if (isa<UsingShadowDecl>(Target))
+ Target = cast<UsingShadowDecl>(Target)->getTargetDecl();
+
+ // If the target happens to be one of the previous declarations, we
+ // don't have a conflict.
+ //
+ // FIXME: but we might be increasing its access, in which case we
+ // should redeclare it.
+ NamedDecl *NonTag = nullptr, *Tag = nullptr;
+ bool FoundEquivalentDecl = false;
+ for (LookupResult::iterator I = Previous.begin(), E = Previous.end();
+ I != E; ++I) {
+ NamedDecl *D = (*I)->getUnderlyingDecl();
+ if (IsEquivalentForUsingDecl(Context, D, Target)) {
+ if (UsingShadowDecl *Shadow = dyn_cast<UsingShadowDecl>(*I))
+ PrevShadow = Shadow;
+ FoundEquivalentDecl = true;
+ }
+
+ if (isVisible(D))
+ (isa<TagDecl>(D) ? Tag : NonTag) = D;
+ }
+
+ if (FoundEquivalentDecl)
+ return false;
+
+ if (FunctionDecl *FD = Target->getAsFunction()) {
+ NamedDecl *OldDecl = nullptr;
+ switch (CheckOverload(nullptr, FD, Previous, OldDecl,
+ /*IsForUsingDecl*/ true)) {
+ case Ovl_Overload:
+ return false;
+
+ case Ovl_NonFunction:
+ Diag(Using->getLocation(), diag::err_using_decl_conflict);
+ break;
+
+ // We found a decl with the exact signature.
+ case Ovl_Match:
+ // If we're in a record, we want to hide the target, so we
+ // return true (without a diagnostic) to tell the caller not to
+ // build a shadow decl.
+ if (CurContext->isRecord())
+ return true;
+
+ // If we're not in a record, this is an error.
+ Diag(Using->getLocation(), diag::err_using_decl_conflict);
+ break;
+ }
+
+ Diag(Target->getLocation(), diag::note_using_decl_target);
+ Diag(OldDecl->getLocation(), diag::note_using_decl_conflict);
+ return true;
+ }
+
+ // Target is not a function.
+
+ if (isa<TagDecl>(Target)) {
+ // No conflict between a tag and a non-tag.
+ if (!Tag) return false;
+
+ Diag(Using->getLocation(), diag::err_using_decl_conflict);
+ Diag(Target->getLocation(), diag::note_using_decl_target);
+ Diag(Tag->getLocation(), diag::note_using_decl_conflict);
+ return true;
+ }
+
+ // No conflict between a tag and a non-tag.
+ if (!NonTag) return false;
+
+ Diag(Using->getLocation(), diag::err_using_decl_conflict);
+ Diag(Target->getLocation(), diag::note_using_decl_target);
+ Diag(NonTag->getLocation(), diag::note_using_decl_conflict);
+ return true;
+}
+
+/// Builds a shadow declaration corresponding to a 'using' declaration.
+UsingShadowDecl *Sema::BuildUsingShadowDecl(Scope *S,
+ UsingDecl *UD,
+ NamedDecl *Orig,
+ UsingShadowDecl *PrevDecl) {
+
+ // If we resolved to another shadow declaration, just coalesce them.
+ NamedDecl *Target = Orig;
+ if (isa<UsingShadowDecl>(Target)) {
+ Target = cast<UsingShadowDecl>(Target)->getTargetDecl();
+ assert(!isa<UsingShadowDecl>(Target) && "nested shadow declaration");
+ }
+
+ UsingShadowDecl *Shadow
+ = UsingShadowDecl::Create(Context, CurContext,
+ UD->getLocation(), UD, Target);
+ UD->addShadowDecl(Shadow);
+
+ Shadow->setAccess(UD->getAccess());
+ if (Orig->isInvalidDecl() || UD->isInvalidDecl())
+ Shadow->setInvalidDecl();
+
+ Shadow->setPreviousDecl(PrevDecl);
+
+ if (S)
+ PushOnScopeChains(Shadow, S);
+ else
+ CurContext->addDecl(Shadow);
+
+
+ return Shadow;
+}
+
+/// Hides a using shadow declaration. This is required by the current
+/// using-decl implementation when a resolvable using declaration in a
+/// class is followed by a declaration which would hide or override
+/// one or more of the using decl's targets; for example:
+///
+/// struct Base { void foo(int); };
+/// struct Derived : Base {
+/// using Base::foo;
+/// void foo(int);
+/// };
+///
+/// The governing language is C++03 [namespace.udecl]p12:
+///
+/// When a using-declaration brings names from a base class into a
+/// derived class scope, member functions in the derived class
+/// override and/or hide member functions with the same name and
+/// parameter types in a base class (rather than conflicting).
+///
+/// There are two ways to implement this:
+/// (1) optimistically create shadow decls when they're not hidden
+/// by existing declarations, or
+/// (2) don't create any shadow decls (or at least don't make them
+/// visible) until we've fully parsed/instantiated the class.
+/// The problem with (1) is that we might have to retroactively remove
+/// a shadow decl, which requires several O(n) operations because the
+/// decl structures are (very reasonably) not designed for removal.
+/// (2) avoids this but is very fiddly and phase-dependent.
+void Sema::HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow) {
+ if (Shadow->getDeclName().getNameKind() ==
+ DeclarationName::CXXConversionFunctionName)
+ cast<CXXRecordDecl>(Shadow->getDeclContext())->removeConversion(Shadow);
+
+ // Remove it from the DeclContext...
+ Shadow->getDeclContext()->removeDecl(Shadow);
+
+ // ...and the scope, if applicable...
+ if (S) {
+ S->RemoveDecl(Shadow);
+ IdResolver.RemoveDecl(Shadow);
+ }
+
+ // ...and the using decl.
+ Shadow->getUsingDecl()->removeShadowDecl(Shadow);
+
+ // TODO: complain somehow if Shadow was used. It shouldn't
+ // be possible for this to happen, because...?
+}
+
+/// Find the base specifier for a base class with the given type.
+static CXXBaseSpecifier *findDirectBaseWithType(CXXRecordDecl *Derived,
+ QualType DesiredBase,
+ bool &AnyDependentBases) {
+ // Check whether the named type is a direct base class.
+ CanQualType CanonicalDesiredBase = DesiredBase->getCanonicalTypeUnqualified();
+ for (auto &Base : Derived->bases()) {
+ CanQualType BaseType = Base.getType()->getCanonicalTypeUnqualified();
+ if (CanonicalDesiredBase == BaseType)
+ return &Base;
+ if (BaseType->isDependentType())
+ AnyDependentBases = true;
+ }
+ return nullptr;
+}
+
+namespace {
+class UsingValidatorCCC : public CorrectionCandidateCallback {
+public:
+ UsingValidatorCCC(bool HasTypenameKeyword, bool IsInstantiation,
+ NestedNameSpecifier *NNS, CXXRecordDecl *RequireMemberOf)
+ : HasTypenameKeyword(HasTypenameKeyword),
+ IsInstantiation(IsInstantiation), OldNNS(NNS),
+ RequireMemberOf(RequireMemberOf) {}
+
+ bool ValidateCandidate(const TypoCorrection &Candidate) override {
+ NamedDecl *ND = Candidate.getCorrectionDecl();
+
+ // Keywords are not valid here.
+ if (!ND || isa<NamespaceDecl>(ND))
+ return false;
+
+ // Completely unqualified names are invalid for a 'using' declaration.
+ if (Candidate.WillReplaceSpecifier() && !Candidate.getCorrectionSpecifier())
+ return false;
+
+ if (RequireMemberOf) {
+ auto *FoundRecord = dyn_cast<CXXRecordDecl>(ND);
+ if (FoundRecord && FoundRecord->isInjectedClassName()) {
+ // No-one ever wants a using-declaration to name an injected-class-name
+ // of a base class, unless they're declaring an inheriting constructor.
+ ASTContext &Ctx = ND->getASTContext();
+ if (!Ctx.getLangOpts().CPlusPlus11)
+ return false;
+ QualType FoundType = Ctx.getRecordType(FoundRecord);
+
+ // Check that the injected-class-name is named as a member of its own
+ // type; we don't want to suggest 'using Derived::Base;', since that
+ // means something else.
+ NestedNameSpecifier *Specifier =
+ Candidate.WillReplaceSpecifier()
+ ? Candidate.getCorrectionSpecifier()
+ : OldNNS;
+ if (!Specifier->getAsType() ||
+ !Ctx.hasSameType(QualType(Specifier->getAsType(), 0), FoundType))
+ return false;
+
+ // Check that this inheriting constructor declaration actually names a
+ // direct base class of the current class.
+ bool AnyDependentBases = false;
+ if (!findDirectBaseWithType(RequireMemberOf,
+ Ctx.getRecordType(FoundRecord),
+ AnyDependentBases) &&
+ !AnyDependentBases)
+ return false;
+ } else {
+ auto *RD = dyn_cast<CXXRecordDecl>(ND->getDeclContext());
+ if (!RD || RequireMemberOf->isProvablyNotDerivedFrom(RD))
+ return false;
+
+ // FIXME: Check that the base class member is accessible?
+ }
+ } else {
+ auto *FoundRecord = dyn_cast<CXXRecordDecl>(ND);
+ if (FoundRecord && FoundRecord->isInjectedClassName())
+ return false;
+ }
+
+ if (isa<TypeDecl>(ND))
+ return HasTypenameKeyword || !IsInstantiation;
+
+ return !HasTypenameKeyword;
+ }
+
+private:
+ bool HasTypenameKeyword;
+ bool IsInstantiation;
+ NestedNameSpecifier *OldNNS;
+ CXXRecordDecl *RequireMemberOf;
+};
+} // end anonymous namespace
+
+/// Builds a using declaration.
+///
+/// \param IsInstantiation - Whether this call arises from an
+/// instantiation of an unresolved using declaration. We treat
+/// the lookup differently for these declarations.
+NamedDecl *Sema::BuildUsingDeclaration(Scope *S, AccessSpecifier AS,
+ SourceLocation UsingLoc,
+ CXXScopeSpec &SS,
+ DeclarationNameInfo NameInfo,
+ AttributeList *AttrList,
+ bool IsInstantiation,
+ bool HasTypenameKeyword,
+ SourceLocation TypenameLoc) {
+ assert(!SS.isInvalid() && "Invalid CXXScopeSpec.");
+ SourceLocation IdentLoc = NameInfo.getLoc();
+ assert(IdentLoc.isValid() && "Invalid TargetName location.");
+
+ // FIXME: We ignore attributes for now.
+
+ if (SS.isEmpty()) {
+ Diag(IdentLoc, diag::err_using_requires_qualname);
+ return nullptr;
+ }
+
+ // Do the redeclaration lookup in the current scope.
+ LookupResult Previous(*this, NameInfo, LookupUsingDeclName,
+ ForRedeclaration);
+ Previous.setHideTags(false);
+ if (S) {
+ LookupName(Previous, S);
+
+ // It is really dumb that we have to do this.
+ LookupResult::Filter F = Previous.makeFilter();
+ while (F.hasNext()) {
+ NamedDecl *D = F.next();
+ if (!isDeclInScope(D, CurContext, S))
+ F.erase();
+ // If we found a local extern declaration that's not ordinarily visible,
+ // and this declaration is being added to a non-block scope, ignore it.
+ // We're only checking for scope conflicts here, not also for violations
+ // of the linkage rules.
+ else if (!CurContext->isFunctionOrMethod() && D->isLocalExternDecl() &&
+ !(D->getIdentifierNamespace() & Decl::IDNS_Ordinary))
+ F.erase();
+ }
+ F.done();
+ } else {
+ assert(IsInstantiation && "no scope in non-instantiation");
+ assert(CurContext->isRecord() && "scope not record in instantiation");
+ LookupQualifiedName(Previous, CurContext);
+ }
+
+ // Check for invalid redeclarations.
+ if (CheckUsingDeclRedeclaration(UsingLoc, HasTypenameKeyword,
+ SS, IdentLoc, Previous))
+ return nullptr;
+
+ // Check for bad qualifiers.
+ if (CheckUsingDeclQualifier(UsingLoc, SS, NameInfo, IdentLoc))
+ return nullptr;
+
+ DeclContext *LookupContext = computeDeclContext(SS);
+ NamedDecl *D;
+ NestedNameSpecifierLoc QualifierLoc = SS.getWithLocInContext(Context);
+ if (!LookupContext) {
+ if (HasTypenameKeyword) {
+ // FIXME: not all declaration name kinds are legal here
+ D = UnresolvedUsingTypenameDecl::Create(Context, CurContext,
+ UsingLoc, TypenameLoc,
+ QualifierLoc,
+ IdentLoc, NameInfo.getName());
+ } else {
+ D = UnresolvedUsingValueDecl::Create(Context, CurContext, UsingLoc,
+ QualifierLoc, NameInfo);
+ }
+ D->setAccess(AS);
+ CurContext->addDecl(D);
+ return D;
+ }
+
+ auto Build = [&](bool Invalid) {
+ UsingDecl *UD =
+ UsingDecl::Create(Context, CurContext, UsingLoc, QualifierLoc, NameInfo,
+ HasTypenameKeyword);
+ UD->setAccess(AS);
+ CurContext->addDecl(UD);
+ UD->setInvalidDecl(Invalid);
+ return UD;
+ };
+ auto BuildInvalid = [&]{ return Build(true); };
+ auto BuildValid = [&]{ return Build(false); };
+
+ if (RequireCompleteDeclContext(SS, LookupContext))
+ return BuildInvalid();
+
+ // Look up the target name.
+ LookupResult R(*this, NameInfo, LookupOrdinaryName);
+
+ // Unlike most lookups, we don't always want to hide tag
+ // declarations: tag names are visible through the using declaration
+ // even if hidden by ordinary names, *except* in a dependent context
+ // where it's important for the sanity of two-phase lookup.
+ if (!IsInstantiation)
+ R.setHideTags(false);
+
+ // For the purposes of this lookup, we have a base object type
+ // equal to that of the current context.
+ if (CurContext->isRecord()) {
+ R.setBaseObjectType(
+ Context.getTypeDeclType(cast<CXXRecordDecl>(CurContext)));
+ }
+
+ LookupQualifiedName(R, LookupContext);
+
+ // Try to correct typos if possible. If constructor name lookup finds no
+ // results, that means the named class has no explicit constructors, and we
+ // suppressed declaring implicit ones (probably because it's dependent or
+ // invalid).
+ if (R.empty() &&
+ NameInfo.getName().getNameKind() != DeclarationName::CXXConstructorName) {
+ if (TypoCorrection Corrected = CorrectTypo(
+ R.getLookupNameInfo(), R.getLookupKind(), S, &SS,
+ llvm::make_unique<UsingValidatorCCC>(
+ HasTypenameKeyword, IsInstantiation, SS.getScopeRep(),
+ dyn_cast<CXXRecordDecl>(CurContext)),
+ CTK_ErrorRecovery)) {
+ // We reject any correction for which ND would be NULL.
+ NamedDecl *ND = Corrected.getCorrectionDecl();
+
+ // We reject candidates where DroppedSpecifier == true, hence the
+ // literal '0' below.
+ diagnoseTypo(Corrected, PDiag(diag::err_no_member_suggest)
+ << NameInfo.getName() << LookupContext << 0
+ << SS.getRange());
+
+ // If we corrected to an inheriting constructor, handle it as one.
+ auto *RD = dyn_cast<CXXRecordDecl>(ND);
+ if (RD && RD->isInjectedClassName()) {
+ // Fix up the information we'll use to build the using declaration.
+ if (Corrected.WillReplaceSpecifier()) {
+ NestedNameSpecifierLocBuilder Builder;
+ Builder.MakeTrivial(Context, Corrected.getCorrectionSpecifier(),
+ QualifierLoc.getSourceRange());
+ QualifierLoc = Builder.getWithLocInContext(Context);
+ }
+
+ NameInfo.setName(Context.DeclarationNames.getCXXConstructorName(
+ Context.getCanonicalType(Context.getRecordType(RD))));
+ NameInfo.setNamedTypeInfo(nullptr);
+ for (auto *Ctor : LookupConstructors(RD))
+ R.addDecl(Ctor);
+ } else {
+ // FIXME: Pick up all the declarations if we found an overloaded function.
+ R.addDecl(ND);
+ }
+ } else {
+ Diag(IdentLoc, diag::err_no_member)
+ << NameInfo.getName() << LookupContext << SS.getRange();
+ return BuildInvalid();
+ }
+ }
+
+ if (R.isAmbiguous())
+ return BuildInvalid();
+
+ if (HasTypenameKeyword) {
+ // If we asked for a typename and got a non-type decl, error out.
+ if (!R.getAsSingle<TypeDecl>()) {
+ Diag(IdentLoc, diag::err_using_typename_non_type);
+ for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I)
+ Diag((*I)->getUnderlyingDecl()->getLocation(),
+ diag::note_using_decl_target);
+ return BuildInvalid();
+ }
+ } else {
+ // If we asked for a non-typename and we got a type, error out,
+ // but only if this is an instantiation of an unresolved using
+ // decl. Otherwise just silently find the type name.
+ if (IsInstantiation && R.getAsSingle<TypeDecl>()) {
+ Diag(IdentLoc, diag::err_using_dependent_value_is_type);
+ Diag(R.getFoundDecl()->getLocation(), diag::note_using_decl_target);
+ return BuildInvalid();
+ }
+ }
+
+ // C++0x N2914 [namespace.udecl]p6:
+ // A using-declaration shall not name a namespace.
+ if (R.getAsSingle<NamespaceDecl>()) {
+ Diag(IdentLoc, diag::err_using_decl_can_not_refer_to_namespace)
+ << SS.getRange();
+ return BuildInvalid();
+ }
+
+ UsingDecl *UD = BuildValid();
+
+ // The normal rules do not apply to inheriting constructor declarations.
+ if (NameInfo.getName().getNameKind() == DeclarationName::CXXConstructorName) {
+ // Suppress access diagnostics; the access check is instead performed at the
+ // point of use for an inheriting constructor.
+ R.suppressDiagnostics();
+ CheckInheritingConstructorUsingDecl(UD);
+ return UD;
+ }
+
+ // Otherwise, look up the target name.
+
+ for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
+ UsingShadowDecl *PrevDecl = nullptr;
+ if (!CheckUsingShadowDecl(UD, *I, Previous, PrevDecl))
+ BuildUsingShadowDecl(S, UD, *I, PrevDecl);
+ }
+
+ return UD;
+}
+
+/// Additional checks for a using declaration referring to a constructor name.
+bool Sema::CheckInheritingConstructorUsingDecl(UsingDecl *UD) {
+ assert(!UD->hasTypename() && "expecting a constructor name");
+
+ const Type *SourceType = UD->getQualifier()->getAsType();
+ assert(SourceType &&
+ "Using decl naming constructor doesn't have type in scope spec.");
+ CXXRecordDecl *TargetClass = cast<CXXRecordDecl>(CurContext);
+
+ // Check whether the named type is a direct base class.
+ bool AnyDependentBases = false;
+ auto *Base = findDirectBaseWithType(TargetClass, QualType(SourceType, 0),
+ AnyDependentBases);
+ if (!Base && !AnyDependentBases) {
+ Diag(UD->getUsingLoc(),
+ diag::err_using_decl_constructor_not_in_direct_base)
+ << UD->getNameInfo().getSourceRange()
+ << QualType(SourceType, 0) << TargetClass;
+ UD->setInvalidDecl();
+ return true;
+ }
+
+ if (Base)
+ Base->setInheritConstructors();
+
+ return false;
+}
+
+/// Checks that the given using declaration is not an invalid
+/// redeclaration. Note that this is checking only for the using decl
+/// itself, not for any ill-formedness among the UsingShadowDecls.
+bool Sema::CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
+ bool HasTypenameKeyword,
+ const CXXScopeSpec &SS,
+ SourceLocation NameLoc,
+ const LookupResult &Prev) {
+ // C++03 [namespace.udecl]p8:
+ // C++0x [namespace.udecl]p10:
+ // A using-declaration is a declaration and can therefore be used
+ // repeatedly where (and only where) multiple declarations are
+ // allowed.
+ //
+ // That's in non-member contexts.
+ if (!CurContext->getRedeclContext()->isRecord())
+ return false;
+
+ NestedNameSpecifier *Qual = SS.getScopeRep();
+
+ for (LookupResult::iterator I = Prev.begin(), E = Prev.end(); I != E; ++I) {
+ NamedDecl *D = *I;
+
+ bool DTypename;
+ NestedNameSpecifier *DQual;
+ if (UsingDecl *UD = dyn_cast<UsingDecl>(D)) {
+ DTypename = UD->hasTypename();
+ DQual = UD->getQualifier();
+ } else if (UnresolvedUsingValueDecl *UD
+ = dyn_cast<UnresolvedUsingValueDecl>(D)) {
+ DTypename = false;
+ DQual = UD->getQualifier();
+ } else if (UnresolvedUsingTypenameDecl *UD
+ = dyn_cast<UnresolvedUsingTypenameDecl>(D)) {
+ DTypename = true;
+ DQual = UD->getQualifier();
+ } else continue;
+
+ // using decls differ if one says 'typename' and the other doesn't.
+ // FIXME: non-dependent using decls?
+ if (HasTypenameKeyword != DTypename) continue;
+
+ // using decls differ if they name different scopes (but note that
+ // template instantiation can cause this check to trigger when it
+ // didn't before instantiation).
+ if (Context.getCanonicalNestedNameSpecifier(Qual) !=
+ Context.getCanonicalNestedNameSpecifier(DQual))
+ continue;
+
+ Diag(NameLoc, diag::err_using_decl_redeclaration) << SS.getRange();
+ Diag(D->getLocation(), diag::note_using_decl) << 1;
+ return true;
+ }
+
+ return false;
+}
+
+
+/// Checks that the given nested-name qualifier used in a using decl
+/// in the current context is appropriately related to the current
+/// scope. If an error is found, diagnoses it and returns true.
+bool Sema::CheckUsingDeclQualifier(SourceLocation UsingLoc,
+ const CXXScopeSpec &SS,
+ const DeclarationNameInfo &NameInfo,
+ SourceLocation NameLoc) {
+ DeclContext *NamedContext = computeDeclContext(SS);
+
+ if (!CurContext->isRecord()) {
+ // C++03 [namespace.udecl]p3:
+ // C++0x [namespace.udecl]p8:
+ // A using-declaration for a class member shall be a member-declaration.
+
+ // If we weren't able to compute a valid scope, it must be a
+ // dependent class scope.
+ if (!NamedContext || NamedContext->isRecord()) {
+ auto *RD = dyn_cast_or_null<CXXRecordDecl>(NamedContext);
+ if (RD && RequireCompleteDeclContext(const_cast<CXXScopeSpec&>(SS), RD))
+ RD = nullptr;
+
+ Diag(NameLoc, diag::err_using_decl_can_not_refer_to_class_member)
+ << SS.getRange();
+
+ // If we have a complete, non-dependent source type, try to suggest a
+ // way to get the same effect.
+ if (!RD)
+ return true;
+
+ // Find what this using-declaration was referring to.
+ LookupResult R(*this, NameInfo, LookupOrdinaryName);
+ R.setHideTags(false);
+ R.suppressDiagnostics();
+ LookupQualifiedName(R, RD);
+
+ if (R.getAsSingle<TypeDecl>()) {
+ if (getLangOpts().CPlusPlus11) {
+ // Convert 'using X::Y;' to 'using Y = X::Y;'.
+ Diag(SS.getBeginLoc(), diag::note_using_decl_class_member_workaround)
+ << 0 // alias declaration
+ << FixItHint::CreateInsertion(SS.getBeginLoc(),
+ NameInfo.getName().getAsString() +
+ " = ");
+ } else {
+ // Convert 'using X::Y;' to 'typedef X::Y Y;'.
+ SourceLocation InsertLoc =
+ getLocForEndOfToken(NameInfo.getLocEnd());
+ Diag(InsertLoc, diag::note_using_decl_class_member_workaround)
+ << 1 // typedef declaration
+ << FixItHint::CreateReplacement(UsingLoc, "typedef")
+ << FixItHint::CreateInsertion(
+ InsertLoc, " " + NameInfo.getName().getAsString());
+ }
+ } else if (R.getAsSingle<VarDecl>()) {
+ // Don't provide a fixit outside C++11 mode; we don't want to suggest
+ // repeating the type of the static data member here.
+ FixItHint FixIt;
+ if (getLangOpts().CPlusPlus11) {
+ // Convert 'using X::Y;' to 'auto &Y = X::Y;'.
+ FixIt = FixItHint::CreateReplacement(
+ UsingLoc, "auto &" + NameInfo.getName().getAsString() + " = ");
+ }
+
+ Diag(UsingLoc, diag::note_using_decl_class_member_workaround)
+ << 2 // reference declaration
+ << FixIt;
+ }
+ return true;
+ }
+
+ // Otherwise, everything is known to be fine.
+ return false;
+ }
+
+ // The current scope is a record.
+
+ // If the named context is dependent, we can't decide much.
+ if (!NamedContext) {
+ // FIXME: in C++0x, we can diagnose if we can prove that the
+ // nested-name-specifier does not refer to a base class, which is
+ // still possible in some cases.
+
+ // Otherwise we have to conservatively report that things might be
+ // okay.
+ return false;
+ }
+
+ if (!NamedContext->isRecord()) {
+ // Ideally this would point at the last name in the specifier,
+ // but we don't have that level of source info.
+ Diag(SS.getRange().getBegin(),
+ diag::err_using_decl_nested_name_specifier_is_not_class)
+ << SS.getScopeRep() << SS.getRange();
+ return true;
+ }
+
+ if (!NamedContext->isDependentContext() &&
+ RequireCompleteDeclContext(const_cast<CXXScopeSpec&>(SS), NamedContext))
+ return true;
+
+ if (getLangOpts().CPlusPlus11) {
+ // C++0x [namespace.udecl]p3:
+ // In a using-declaration used as a member-declaration, the
+ // nested-name-specifier shall name a base class of the class
+ // being defined.
+
+ if (cast<CXXRecordDecl>(CurContext)->isProvablyNotDerivedFrom(
+ cast<CXXRecordDecl>(NamedContext))) {
+ if (CurContext == NamedContext) {
+ Diag(NameLoc,
+ diag::err_using_decl_nested_name_specifier_is_current_class)
+ << SS.getRange();
+ return true;
+ }
+
+ Diag(SS.getRange().getBegin(),
+ diag::err_using_decl_nested_name_specifier_is_not_base_class)
+ << SS.getScopeRep()
+ << cast<CXXRecordDecl>(CurContext)
+ << SS.getRange();
+ return true;
+ }
+
+ return false;
+ }
+
+ // C++03 [namespace.udecl]p4:
+ // A using-declaration used as a member-declaration shall refer
+ // to a member of a base class of the class being defined [etc.].
+
+ // Salient point: SS doesn't have to name a base class as long as
+ // lookup only finds members from base classes. Therefore we can
+ // diagnose here only if we can prove that that can't happen,
+ // i.e. if the class hierarchies provably don't intersect.
+
+ // TODO: it would be nice if "definitely valid" results were cached
+ // in the UsingDecl and UsingShadowDecl so that these checks didn't
+ // need to be repeated.
+
+ llvm::SmallPtrSet<const CXXRecordDecl *, 4> Bases;
+ auto Collect = [&Bases](const CXXRecordDecl *Base) {
+ Bases.insert(Base);
+ return true;
+ };
+
+ // Collect all bases. Return false if we find a dependent base.
+ if (!cast<CXXRecordDecl>(CurContext)->forallBases(Collect))
+ return false;
+
+ // Returns true if the base is dependent or is one of the accumulated base
+ // classes.
+ auto IsNotBase = [&Bases](const CXXRecordDecl *Base) {
+ return !Bases.count(Base);
+ };
+
+ // Return false if the class has a dependent base or if it or one
+ // of its bases is present in the base set of the current context.
+ if (Bases.count(cast<CXXRecordDecl>(NamedContext)) ||
+ !cast<CXXRecordDecl>(NamedContext)->forallBases(IsNotBase))
+ return false;
+
+ Diag(SS.getRange().getBegin(),
+ diag::err_using_decl_nested_name_specifier_is_not_base_class)
+ << SS.getScopeRep()
+ << cast<CXXRecordDecl>(CurContext)
+ << SS.getRange();
+
+ return true;
+}
+
+Decl *Sema::ActOnAliasDeclaration(Scope *S,
+ AccessSpecifier AS,
+ MultiTemplateParamsArg TemplateParamLists,
+ SourceLocation UsingLoc,
+ UnqualifiedId &Name,
+ AttributeList *AttrList,
+ TypeResult Type,
+ Decl *DeclFromDeclSpec) {
+ // Skip up to the relevant declaration scope.
+ while (S->isTemplateParamScope())
+ S = S->getParent();
+ assert((S->getFlags() & Scope::DeclScope) &&
+ "got alias-declaration outside of declaration scope");
+
+ if (Type.isInvalid())
+ return nullptr;
+
+ bool Invalid = false;
+ DeclarationNameInfo NameInfo = GetNameFromUnqualifiedId(Name);
+ TypeSourceInfo *TInfo = nullptr;
+ GetTypeFromParser(Type.get(), &TInfo);
+
+ if (DiagnoseClassNameShadow(CurContext, NameInfo))
+ return nullptr;
+
+ if (DiagnoseUnexpandedParameterPack(Name.StartLocation, TInfo,
+ UPPC_DeclarationType)) {
+ Invalid = true;
+ TInfo = Context.getTrivialTypeSourceInfo(Context.IntTy,
+ TInfo->getTypeLoc().getBeginLoc());
+ }
+
+ LookupResult Previous(*this, NameInfo, LookupOrdinaryName, ForRedeclaration);
+ LookupName(Previous, S);
+
+ // Warn about shadowing the name of a template parameter.
+ if (Previous.isSingleResult() &&
+ Previous.getFoundDecl()->isTemplateParameter()) {
+ DiagnoseTemplateParameterShadow(Name.StartLocation,Previous.getFoundDecl());
+ Previous.clear();
+ }
+
+ assert(Name.Kind == UnqualifiedId::IK_Identifier &&
+ "name in alias declaration must be an identifier");
+ TypeAliasDecl *NewTD = TypeAliasDecl::Create(Context, CurContext, UsingLoc,
+ Name.StartLocation,
+ Name.Identifier, TInfo);
+
+ NewTD->setAccess(AS);
+
+ if (Invalid)
+ NewTD->setInvalidDecl();
+
+ ProcessDeclAttributeList(S, NewTD, AttrList);
+
+ CheckTypedefForVariablyModifiedType(S, NewTD);
+ Invalid |= NewTD->isInvalidDecl();
+
+ bool Redeclaration = false;
+
+ NamedDecl *NewND;
+ if (TemplateParamLists.size()) {
+ TypeAliasTemplateDecl *OldDecl = nullptr;
+ TemplateParameterList *OldTemplateParams = nullptr;
+
+ if (TemplateParamLists.size() != 1) {
+ Diag(UsingLoc, diag::err_alias_template_extra_headers)
+ << SourceRange(TemplateParamLists[1]->getTemplateLoc(),
+ TemplateParamLists[TemplateParamLists.size()-1]->getRAngleLoc());
+ }
+ TemplateParameterList *TemplateParams = TemplateParamLists[0];
+
+ // Only consider previous declarations in the same scope.
+ FilterLookupForScope(Previous, CurContext, S, /*ConsiderLinkage*/false,
+ /*ExplicitInstantiationOrSpecialization*/false);
+ if (!Previous.empty()) {
+ Redeclaration = true;
+
+ OldDecl = Previous.getAsSingle<TypeAliasTemplateDecl>();
+ if (!OldDecl && !Invalid) {
+ Diag(UsingLoc, diag::err_redefinition_different_kind)
+ << Name.Identifier;
+
+ NamedDecl *OldD = Previous.getRepresentativeDecl();
+ if (OldD->getLocation().isValid())
+ Diag(OldD->getLocation(), diag::note_previous_definition);
+
+ Invalid = true;
+ }
+
+ if (!Invalid && OldDecl && !OldDecl->isInvalidDecl()) {
+ if (TemplateParameterListsAreEqual(TemplateParams,
+ OldDecl->getTemplateParameters(),
+ /*Complain=*/true,
+ TPL_TemplateMatch))
+ OldTemplateParams = OldDecl->getTemplateParameters();
+ else
+ Invalid = true;
+
+ TypeAliasDecl *OldTD = OldDecl->getTemplatedDecl();
+ if (!Invalid &&
+ !Context.hasSameType(OldTD->getUnderlyingType(),
+ NewTD->getUnderlyingType())) {
+ // FIXME: The C++0x standard does not clearly say this is ill-formed,
+ // but we can't reasonably accept it.
+ Diag(NewTD->getLocation(), diag::err_redefinition_different_typedef)
+ << 2 << NewTD->getUnderlyingType() << OldTD->getUnderlyingType();
+ if (OldTD->getLocation().isValid())
+ Diag(OldTD->getLocation(), diag::note_previous_definition);
+ Invalid = true;
+ }
+ }
+ }
+
+ // Merge any previous default template arguments into our parameters,
+ // and check the parameter list.
+ if (CheckTemplateParameterList(TemplateParams, OldTemplateParams,
+ TPC_TypeAliasTemplate))
+ return nullptr;
+
+ TypeAliasTemplateDecl *NewDecl =
+ TypeAliasTemplateDecl::Create(Context, CurContext, UsingLoc,
+ Name.Identifier, TemplateParams,
+ NewTD);
+ NewTD->setDescribedAliasTemplate(NewDecl);
+
+ NewDecl->setAccess(AS);
+
+ if (Invalid)
+ NewDecl->setInvalidDecl();
+ else if (OldDecl)
+ NewDecl->setPreviousDecl(OldDecl);
+
+ NewND = NewDecl;
+ } else {
+ if (auto *TD = dyn_cast_or_null<TagDecl>(DeclFromDeclSpec)) {
+ setTagNameForLinkagePurposes(TD, NewTD);
+ handleTagNumbering(TD, S);
+ }
+ ActOnTypedefNameDecl(S, CurContext, NewTD, Previous, Redeclaration);
+ NewND = NewTD;
+ }
+
+ if (!Redeclaration)
+ PushOnScopeChains(NewND, S);
+
+ ActOnDocumentableDecl(NewND);
+ return NewND;
+}
+
+Decl *Sema::ActOnNamespaceAliasDef(Scope *S, SourceLocation NamespaceLoc,
+ SourceLocation AliasLoc,
+ IdentifierInfo *Alias, CXXScopeSpec &SS,
+ SourceLocation IdentLoc,
+ IdentifierInfo *Ident) {
+
+ // Lookup the namespace name.
+ LookupResult R(*this, Ident, IdentLoc, LookupNamespaceName);
+ LookupParsedName(R, S, &SS);
+
+ if (R.isAmbiguous())
+ return nullptr;
+
+ if (R.empty()) {
+ if (!TryNamespaceTypoCorrection(*this, R, S, SS, IdentLoc, Ident)) {
+ Diag(IdentLoc, diag::err_expected_namespace_name) << SS.getRange();
+ return nullptr;
+ }
+ }
+ assert(!R.isAmbiguous() && !R.empty());
+ NamedDecl *ND = R.getRepresentativeDecl();
+
+ // Check if we have a previous declaration with the same name.
+ LookupResult PrevR(*this, Alias, AliasLoc, LookupOrdinaryName,
+ ForRedeclaration);
+ LookupName(PrevR, S);
+
+ // Check we're not shadowing a template parameter.
+ if (PrevR.isSingleResult() && PrevR.getFoundDecl()->isTemplateParameter()) {
+ DiagnoseTemplateParameterShadow(AliasLoc, PrevR.getFoundDecl());
+ PrevR.clear();
+ }
+
+ // Filter out any other lookup result from an enclosing scope.
+ FilterLookupForScope(PrevR, CurContext, S, /*ConsiderLinkage*/false,
+ /*AllowInlineNamespace*/false);
+
+ // Find the previous declaration and check that we can redeclare it.
+ NamespaceAliasDecl *Prev = nullptr;
+ if (PrevR.isSingleResult()) {
+ NamedDecl *PrevDecl = PrevR.getRepresentativeDecl();
+ if (NamespaceAliasDecl *AD = dyn_cast<NamespaceAliasDecl>(PrevDecl)) {
+ // We already have an alias with the same name that points to the same
+ // namespace; check that it matches.
+ if (AD->getNamespace()->Equals(getNamespaceDecl(ND))) {
+ Prev = AD;
+ } else if (isVisible(PrevDecl)) {
+ Diag(AliasLoc, diag::err_redefinition_different_namespace_alias)
+ << Alias;
+ Diag(AD->getLocation(), diag::note_previous_namespace_alias)
+ << AD->getNamespace();
+ return nullptr;
+ }
+ } else if (isVisible(PrevDecl)) {
+ unsigned DiagID = isa<NamespaceDecl>(PrevDecl->getUnderlyingDecl())
+ ? diag::err_redefinition
+ : diag::err_redefinition_different_kind;
+ Diag(AliasLoc, DiagID) << Alias;
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ return nullptr;
+ }
+ }
+
+ // The use of a nested name specifier may trigger deprecation warnings.
+ DiagnoseUseOfDecl(ND, IdentLoc);
+
+ NamespaceAliasDecl *AliasDecl =
+ NamespaceAliasDecl::Create(Context, CurContext, NamespaceLoc, AliasLoc,
+ Alias, SS.getWithLocInContext(Context),
+ IdentLoc, ND);
+ if (Prev)
+ AliasDecl->setPreviousDecl(Prev);
+
+ PushOnScopeChains(AliasDecl, S);
+ return AliasDecl;
+}
+
+Sema::ImplicitExceptionSpecification
+Sema::ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
+ CXXMethodDecl *MD) {
+ CXXRecordDecl *ClassDecl = MD->getParent();
+
+ // C++ [except.spec]p14:
+ // An implicitly declared special member function (Clause 12) shall have an
+ // exception-specification. [...]
+ ImplicitExceptionSpecification ExceptSpec(*this);
+ if (ClassDecl->isInvalidDecl())
+ return ExceptSpec;
+
+ // Direct base-class constructors.
+ for (const auto &B : ClassDecl->bases()) {
+ if (B.isVirtual()) // Handled below.
+ continue;
+
+ if (const RecordType *BaseType = B.getType()->getAs<RecordType>()) {
+ CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
+ CXXConstructorDecl *Constructor = LookupDefaultConstructor(BaseClassDecl);
+ // If this is a deleted function, add it anyway. This might be conformant
+ // with the standard. This might not. I'm not sure. It might not matter.
+ if (Constructor)
+ ExceptSpec.CalledDecl(B.getLocStart(), Constructor);
+ }
+ }
+
+ // Virtual base-class constructors.
+ for (const auto &B : ClassDecl->vbases()) {
+ if (const RecordType *BaseType = B.getType()->getAs<RecordType>()) {
+ CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
+ CXXConstructorDecl *Constructor = LookupDefaultConstructor(BaseClassDecl);
+ // If this is a deleted function, add it anyway. This might be conformant
+ // with the standard. This might not. I'm not sure. It might not matter.
+ if (Constructor)
+ ExceptSpec.CalledDecl(B.getLocStart(), Constructor);
+ }
+ }
+
+ // Field constructors.
+ for (const auto *F : ClassDecl->fields()) {
+ if (F->hasInClassInitializer()) {
+ if (Expr *E = F->getInClassInitializer())
+ ExceptSpec.CalledExpr(E);
+ } else if (const RecordType *RecordTy
+ = Context.getBaseElementType(F->getType())->getAs<RecordType>()) {
+ CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(RecordTy->getDecl());
+ CXXConstructorDecl *Constructor = LookupDefaultConstructor(FieldRecDecl);
+ // If this is a deleted function, add it anyway. This might be conformant
+ // with the standard. This might not. I'm not sure. It might not matter.
+ // In particular, the problem is that this function never gets called. It
+ // might just be ill-formed because this function attempts to refer to
+ // a deleted function here.
+ if (Constructor)
+ ExceptSpec.CalledDecl(F->getLocation(), Constructor);
+ }
+ }
+
+ return ExceptSpec;
+}
+
+Sema::ImplicitExceptionSpecification
+Sema::ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD) {
+ CXXRecordDecl *ClassDecl = CD->getParent();
+
+ // C++ [except.spec]p14:
+ // An inheriting constructor [...] shall have an exception-specification. [...]
+ ImplicitExceptionSpecification ExceptSpec(*this);
+ if (ClassDecl->isInvalidDecl())
+ return ExceptSpec;
+
+ // Inherited constructor.
+ const CXXConstructorDecl *InheritedCD = CD->getInheritedConstructor();
+ const CXXRecordDecl *InheritedDecl = InheritedCD->getParent();
+ // FIXME: Copying or moving the parameters could add extra exceptions to the
+ // set, as could the default arguments for the inherited constructor. This
+ // will be addressed when we implement the resolution of core issue 1351.
+ ExceptSpec.CalledDecl(CD->getLocStart(), InheritedCD);
+
+ // Direct base-class constructors.
+ for (const auto &B : ClassDecl->bases()) {
+ if (B.isVirtual()) // Handled below.
+ continue;
+
+ if (const RecordType *BaseType = B.getType()->getAs<RecordType>()) {
+ CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
+ if (BaseClassDecl == InheritedDecl)
+ continue;
+ CXXConstructorDecl *Constructor = LookupDefaultConstructor(BaseClassDecl);
+ if (Constructor)
+ ExceptSpec.CalledDecl(B.getLocStart(), Constructor);
+ }
+ }
+
+ // Virtual base-class constructors.
+ for (const auto &B : ClassDecl->vbases()) {
+ if (const RecordType *BaseType = B.getType()->getAs<RecordType>()) {
+ CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
+ if (BaseClassDecl == InheritedDecl)
+ continue;
+ CXXConstructorDecl *Constructor = LookupDefaultConstructor(BaseClassDecl);
+ if (Constructor)
+ ExceptSpec.CalledDecl(B.getLocStart(), Constructor);
+ }
+ }
+
+ // Field constructors.
+ for (const auto *F : ClassDecl->fields()) {
+ if (F->hasInClassInitializer()) {
+ if (Expr *E = F->getInClassInitializer())
+ ExceptSpec.CalledExpr(E);
+ } else if (const RecordType *RecordTy
+ = Context.getBaseElementType(F->getType())->getAs<RecordType>()) {
+ CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(RecordTy->getDecl());
+ CXXConstructorDecl *Constructor = LookupDefaultConstructor(FieldRecDecl);
+ if (Constructor)
+ ExceptSpec.CalledDecl(F->getLocation(), Constructor);
+ }
+ }
+
+ return ExceptSpec;
+}
+
+namespace {
+/// RAII object to register a special member as being currently declared.
+struct DeclaringSpecialMember {
+ Sema &S;
+ Sema::SpecialMemberDecl D;
+ bool WasAlreadyBeingDeclared;
+
+ DeclaringSpecialMember(Sema &S, CXXRecordDecl *RD, Sema::CXXSpecialMember CSM)
+ : S(S), D(RD, CSM) {
+ WasAlreadyBeingDeclared = !S.SpecialMembersBeingDeclared.insert(D).second;
+ if (WasAlreadyBeingDeclared)
+ // This almost never happens, but if it does, ensure that our cache
+ // doesn't contain a stale result.
+ S.SpecialMemberCache.clear();
+
+ // FIXME: Register a note to be produced if we encounter an error while
+ // declaring the special member.
+ }
+ ~DeclaringSpecialMember() {
+ if (!WasAlreadyBeingDeclared)
+ S.SpecialMembersBeingDeclared.erase(D);
+ }
+
+ /// \brief Are we already trying to declare this special member?
+ bool isAlreadyBeingDeclared() const {
+ return WasAlreadyBeingDeclared;
+ }
+};
+}
+
+CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor(
+ CXXRecordDecl *ClassDecl) {
+ // C++ [class.ctor]p5:
+ // A default constructor for a class X is a constructor of class X
+ // that can be called without an argument. If there is no
+ // user-declared constructor for class X, a default constructor is
+ // implicitly declared. An implicitly-declared default constructor
+ // is an inline public member of its class.
+ assert(ClassDecl->needsImplicitDefaultConstructor() &&
+ "Should not build implicit default constructor!");
+
+ DeclaringSpecialMember DSM(*this, ClassDecl, CXXDefaultConstructor);
+ if (DSM.isAlreadyBeingDeclared())
+ return nullptr;
+
+ bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, ClassDecl,
+ CXXDefaultConstructor,
+ false);
+
+ // Create the actual constructor declaration.
+ CanQualType ClassType
+ = Context.getCanonicalType(Context.getTypeDeclType(ClassDecl));
+ SourceLocation ClassLoc = ClassDecl->getLocation();
+ DeclarationName Name
+ = Context.DeclarationNames.getCXXConstructorName(ClassType);
+ DeclarationNameInfo NameInfo(Name, ClassLoc);
+ CXXConstructorDecl *DefaultCon = CXXConstructorDecl::Create(
+ Context, ClassDecl, ClassLoc, NameInfo, /*Type*/QualType(),
+ /*TInfo=*/nullptr, /*isExplicit=*/false, /*isInline=*/true,
+ /*isImplicitlyDeclared=*/true, Constexpr);
+ DefaultCon->setAccess(AS_public);
+ DefaultCon->setDefaulted();
+
+ if (getLangOpts().CUDA) {
+ inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXDefaultConstructor,
+ DefaultCon,
+ /* ConstRHS */ false,
+ /* Diagnose */ false);
+ }
+
+ // Build an exception specification pointing back at this constructor.
+ FunctionProtoType::ExtProtoInfo EPI = getImplicitMethodEPI(*this, DefaultCon);
+ DefaultCon->setType(Context.getFunctionType(Context.VoidTy, None, EPI));
+
+ // We don't need to use SpecialMemberIsTrivial here; triviality for default
+ // constructors is easy to compute.
+ DefaultCon->setTrivial(ClassDecl->hasTrivialDefaultConstructor());
+
+ if (ShouldDeleteSpecialMember(DefaultCon, CXXDefaultConstructor))
+ SetDeclDeleted(DefaultCon, ClassLoc);
+
+ // Note that we have declared this constructor.
+ ++ASTContext::NumImplicitDefaultConstructorsDeclared;
+
+ if (Scope *S = getScopeForContext(ClassDecl))
+ PushOnScopeChains(DefaultCon, S, false);
+ ClassDecl->addDecl(DefaultCon);
+
+ return DefaultCon;
+}
+
+void Sema::DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
+ CXXConstructorDecl *Constructor) {
+ assert((Constructor->isDefaulted() && Constructor->isDefaultConstructor() &&
+ !Constructor->doesThisDeclarationHaveABody() &&
+ !Constructor->isDeleted()) &&
+ "DefineImplicitDefaultConstructor - call it for implicit default ctor");
+
+ CXXRecordDecl *ClassDecl = Constructor->getParent();
+ assert(ClassDecl && "DefineImplicitDefaultConstructor - invalid constructor");
+
+ SynthesizedFunctionScope Scope(*this, Constructor);
+ DiagnosticErrorTrap Trap(Diags);
+ if (SetCtorInitializers(Constructor, /*AnyErrors=*/false) ||
+ Trap.hasErrorOccurred()) {
+ Diag(CurrentLocation, diag::note_member_synthesized_at)
+ << CXXDefaultConstructor << Context.getTagDeclType(ClassDecl);
+ Constructor->setInvalidDecl();
+ return;
+ }
+
+ // The exception specification is needed because we are defining the
+ // function.
+ ResolveExceptionSpec(CurrentLocation,
+ Constructor->getType()->castAs<FunctionProtoType>());
+
+ SourceLocation Loc = Constructor->getLocEnd().isValid()
+ ? Constructor->getLocEnd()
+ : Constructor->getLocation();
+ Constructor->setBody(new (Context) CompoundStmt(Loc));
+
+ Constructor->markUsed(Context);
+ MarkVTableUsed(CurrentLocation, ClassDecl);
+
+ if (ASTMutationListener *L = getASTMutationListener()) {
+ L->CompletedImplicitDefinition(Constructor);
+ }
+
+ DiagnoseUninitializedFields(*this, Constructor);
+}
+
+void Sema::ActOnFinishDelayedMemberInitializers(Decl *D) {
+ // Perform any delayed checks on exception specifications.
+ CheckDelayedMemberExceptionSpecs();
+}
+
+namespace {
+/// Information on inheriting constructors to declare.
+class InheritingConstructorInfo {
+public:
+ InheritingConstructorInfo(Sema &SemaRef, CXXRecordDecl *Derived)
+ : SemaRef(SemaRef), Derived(Derived) {
+ // Mark the constructors that we already have in the derived class.
+ //
+ // C++11 [class.inhctor]p3: [...] a constructor is implicitly declared [...]
+ // unless there is a user-declared constructor with the same signature in
+ // the class where the using-declaration appears.
+ visitAll(Derived, &InheritingConstructorInfo::noteDeclaredInDerived);
+ }
+
+ void inheritAll(CXXRecordDecl *RD) {
+ visitAll(RD, &InheritingConstructorInfo::inherit);
+ }
+
+private:
+ /// Information about an inheriting constructor.
+ struct InheritingConstructor {
+ InheritingConstructor()
+ : DeclaredInDerived(false), BaseCtor(nullptr), DerivedCtor(nullptr) {}
+
+ /// If \c true, a constructor with this signature is already declared
+ /// in the derived class.
+ bool DeclaredInDerived;
+
+ /// The constructor which is inherited.
+ const CXXConstructorDecl *BaseCtor;
+
+ /// The derived constructor we declared.
+ CXXConstructorDecl *DerivedCtor;
+ };
+
+ /// Inheriting constructors with a given canonical type. There can be at
+ /// most one such non-template constructor, and any number of templated
+ /// constructors.
+ struct InheritingConstructorsForType {
+ InheritingConstructor NonTemplate;
+ SmallVector<std::pair<TemplateParameterList *, InheritingConstructor>, 4>
+ Templates;
+
+ InheritingConstructor &getEntry(Sema &S, const CXXConstructorDecl *Ctor) {
+ if (FunctionTemplateDecl *FTD = Ctor->getDescribedFunctionTemplate()) {
+ TemplateParameterList *ParamList = FTD->getTemplateParameters();
+ for (unsigned I = 0, N = Templates.size(); I != N; ++I)
+ if (S.TemplateParameterListsAreEqual(ParamList, Templates[I].first,
+ false, S.TPL_TemplateMatch))
+ return Templates[I].second;
+ Templates.push_back(std::make_pair(ParamList, InheritingConstructor()));
+ return Templates.back().second;
+ }
+
+ return NonTemplate;
+ }
+ };
+
+ /// Get or create the inheriting constructor record for a constructor.
+ InheritingConstructor &getEntry(const CXXConstructorDecl *Ctor,
+ QualType CtorType) {
+ return Map[CtorType.getCanonicalType()->castAs<FunctionProtoType>()]
+ .getEntry(SemaRef, Ctor);
+ }
+
+ typedef void (InheritingConstructorInfo::*VisitFn)(const CXXConstructorDecl*);
+
+ /// Process all constructors for a class.
+ void visitAll(const CXXRecordDecl *RD, VisitFn Callback) {
+ for (const auto *Ctor : RD->ctors())
+ (this->*Callback)(Ctor);
+ for (CXXRecordDecl::specific_decl_iterator<FunctionTemplateDecl>
+ I(RD->decls_begin()), E(RD->decls_end());
+ I != E; ++I) {
+ const FunctionDecl *FD = (*I)->getTemplatedDecl();
+ if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
+ (this->*Callback)(CD);
+ }
+ }
+
+ /// Note that a constructor (or constructor template) was declared in Derived.
+ void noteDeclaredInDerived(const CXXConstructorDecl *Ctor) {
+ getEntry(Ctor, Ctor->getType()).DeclaredInDerived = true;
+ }
+
+ /// Inherit a single constructor.
+ void inherit(const CXXConstructorDecl *Ctor) {
+ const FunctionProtoType *CtorType =
+ Ctor->getType()->castAs<FunctionProtoType>();
+ ArrayRef<QualType> ArgTypes = CtorType->getParamTypes();
+ FunctionProtoType::ExtProtoInfo EPI = CtorType->getExtProtoInfo();
+
+ SourceLocation UsingLoc = getUsingLoc(Ctor->getParent());
+
+ // Core issue (no number yet): the ellipsis is always discarded.
+ if (EPI.Variadic) {
+ SemaRef.Diag(UsingLoc, diag::warn_using_decl_constructor_ellipsis);
+ SemaRef.Diag(Ctor->getLocation(),
+ diag::note_using_decl_constructor_ellipsis);
+ EPI.Variadic = false;
+ }
+
+ // Declare a constructor for each number of parameters.
+ //
+ // C++11 [class.inhctor]p1:
+ // The candidate set of inherited constructors from the class X named in
+ // the using-declaration consists of [... modulo defects ...] for each
+ // constructor or constructor template of X, the set of constructors or
+ // constructor templates that results from omitting any ellipsis parameter
+ // specification and successively omitting parameters with a default
+ // argument from the end of the parameter-type-list
+ unsigned MinParams = minParamsToInherit(Ctor);
+ unsigned Params = Ctor->getNumParams();
+ if (Params >= MinParams) {
+ do
+ declareCtor(UsingLoc, Ctor,
+ SemaRef.Context.getFunctionType(
+ Ctor->getReturnType(), ArgTypes.slice(0, Params), EPI));
+ while (Params > MinParams &&
+ Ctor->getParamDecl(--Params)->hasDefaultArg());
+ }
+ }
+
+ /// Find the using-declaration which specified that we should inherit the
+ /// constructors of \p Base.
+ SourceLocation getUsingLoc(const CXXRecordDecl *Base) {
+ // No fancy lookup required; just look for the base constructor name
+ // directly within the derived class.
+ ASTContext &Context = SemaRef.Context;
+ DeclarationName Name = Context.DeclarationNames.getCXXConstructorName(
+ Context.getCanonicalType(Context.getRecordType(Base)));
+ DeclContext::lookup_result Decls = Derived->lookup(Name);
+ return Decls.empty() ? Derived->getLocation() : Decls[0]->getLocation();
+ }
+
+ unsigned minParamsToInherit(const CXXConstructorDecl *Ctor) {
+ // C++11 [class.inhctor]p3:
+ // [F]or each constructor template in the candidate set of inherited
+ // constructors, a constructor template is implicitly declared
+ if (Ctor->getDescribedFunctionTemplate())
+ return 0;
+
+ // For each non-template constructor in the candidate set of inherited
+ // constructors other than a constructor having no parameters or a
+ // copy/move constructor having a single parameter, a constructor is
+ // implicitly declared [...]
+ if (Ctor->getNumParams() == 0)
+ return 1;
+ if (Ctor->isCopyOrMoveConstructor())
+ return 2;
+
+ // Per discussion on core reflector, never inherit a constructor which
+ // would become a default, copy, or move constructor of Derived either.
+ const ParmVarDecl *PD = Ctor->getParamDecl(0);
+ const ReferenceType *RT = PD->getType()->getAs<ReferenceType>();
+ return (RT && RT->getPointeeCXXRecordDecl() == Derived) ? 2 : 1;
+ }
+
+ /// Declare a single inheriting constructor, inheriting the specified
+ /// constructor, with the given type.
+ void declareCtor(SourceLocation UsingLoc, const CXXConstructorDecl *BaseCtor,
+ QualType DerivedType) {
+ InheritingConstructor &Entry = getEntry(BaseCtor, DerivedType);
+
+ // C++11 [class.inhctor]p3:
+ // ... a constructor is implicitly declared with the same constructor
+ // characteristics unless there is a user-declared constructor with
+ // the same signature in the class where the using-declaration appears
+ if (Entry.DeclaredInDerived)
+ return;
+
+ // C++11 [class.inhctor]p7:
+ // If two using-declarations declare inheriting constructors with the
+ // same signature, the program is ill-formed
+ if (Entry.DerivedCtor) {
+ if (BaseCtor->getParent() != Entry.BaseCtor->getParent()) {
+ // Only diagnose this once per constructor.
+ if (Entry.DerivedCtor->isInvalidDecl())
+ return;
+ Entry.DerivedCtor->setInvalidDecl();
+
+ SemaRef.Diag(UsingLoc, diag::err_using_decl_constructor_conflict);
+ SemaRef.Diag(BaseCtor->getLocation(),
+ diag::note_using_decl_constructor_conflict_current_ctor);
+ SemaRef.Diag(Entry.BaseCtor->getLocation(),
+ diag::note_using_decl_constructor_conflict_previous_ctor);
+ SemaRef.Diag(Entry.DerivedCtor->getLocation(),
+ diag::note_using_decl_constructor_conflict_previous_using);
+ } else {
+ // Core issue (no number): if the same inheriting constructor is
+ // produced by multiple base class constructors from the same base
+ // class, the inheriting constructor is defined as deleted.
+ SemaRef.SetDeclDeleted(Entry.DerivedCtor, UsingLoc);
+ }
+
+ return;
+ }
+
+ ASTContext &Context = SemaRef.Context;
+ DeclarationName Name = Context.DeclarationNames.getCXXConstructorName(
+ Context.getCanonicalType(Context.getRecordType(Derived)));
+ DeclarationNameInfo NameInfo(Name, UsingLoc);
+
+ TemplateParameterList *TemplateParams = nullptr;
+ if (const FunctionTemplateDecl *FTD =
+ BaseCtor->getDescribedFunctionTemplate()) {
+ TemplateParams = FTD->getTemplateParameters();
+ // We're reusing template parameters from a different DeclContext. This
+ // is questionable at best, but works out because the template depth in
+ // both places is guaranteed to be 0.
+ // FIXME: Rebuild the template parameters in the new context, and
+ // transform the function type to refer to them.
+ }
+
+ // Build type source info pointing at the using-declaration. This is
+ // required by template instantiation.
+ TypeSourceInfo *TInfo =
+ Context.getTrivialTypeSourceInfo(DerivedType, UsingLoc);
+ FunctionProtoTypeLoc ProtoLoc =
+ TInfo->getTypeLoc().IgnoreParens().castAs<FunctionProtoTypeLoc>();
+
+ CXXConstructorDecl *DerivedCtor = CXXConstructorDecl::Create(
+ Context, Derived, UsingLoc, NameInfo, DerivedType,
+ TInfo, BaseCtor->isExplicit(), /*Inline=*/true,
+ /*ImplicitlyDeclared=*/true, /*Constexpr=*/BaseCtor->isConstexpr());
+
+ // Build an unevaluated exception specification for this constructor.
+ const FunctionProtoType *FPT = DerivedType->castAs<FunctionProtoType>();
+ FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
+ EPI.ExceptionSpec.Type = EST_Unevaluated;
+ EPI.ExceptionSpec.SourceDecl = DerivedCtor;
+ DerivedCtor->setType(Context.getFunctionType(FPT->getReturnType(),
+ FPT->getParamTypes(), EPI));
+
+ // Build the parameter declarations.
+ SmallVector<ParmVarDecl *, 16> ParamDecls;
+ for (unsigned I = 0, N = FPT->getNumParams(); I != N; ++I) {
+ TypeSourceInfo *TInfo =
+ Context.getTrivialTypeSourceInfo(FPT->getParamType(I), UsingLoc);
+ ParmVarDecl *PD = ParmVarDecl::Create(
+ Context, DerivedCtor, UsingLoc, UsingLoc, /*IdentifierInfo=*/nullptr,
+ FPT->getParamType(I), TInfo, SC_None, /*DefaultArg=*/nullptr);
+ PD->setScopeInfo(0, I);
+ PD->setImplicit();
+ ParamDecls.push_back(PD);
+ ProtoLoc.setParam(I, PD);
+ }
+
+ // Set up the new constructor.
+ DerivedCtor->setAccess(BaseCtor->getAccess());
+ DerivedCtor->setParams(ParamDecls);
+ DerivedCtor->setInheritedConstructor(BaseCtor);
+ if (BaseCtor->isDeleted())
+ SemaRef.SetDeclDeleted(DerivedCtor, UsingLoc);
+
+ // If this is a constructor template, build the template declaration.
+ if (TemplateParams) {
+ FunctionTemplateDecl *DerivedTemplate =
+ FunctionTemplateDecl::Create(SemaRef.Context, Derived, UsingLoc, Name,
+ TemplateParams, DerivedCtor);
+ DerivedTemplate->setAccess(BaseCtor->getAccess());
+ DerivedCtor->setDescribedFunctionTemplate(DerivedTemplate);
+ Derived->addDecl(DerivedTemplate);
+ } else {
+ Derived->addDecl(DerivedCtor);
+ }
+
+ Entry.BaseCtor = BaseCtor;
+ Entry.DerivedCtor = DerivedCtor;
+ }
+
+ Sema &SemaRef;
+ CXXRecordDecl *Derived;
+ typedef llvm::DenseMap<const Type *, InheritingConstructorsForType> MapType;
+ MapType Map;
+};
+}
+
+void Sema::DeclareInheritingConstructors(CXXRecordDecl *ClassDecl) {
+ // Defer declaring the inheriting constructors until the class is
+ // instantiated.
+ if (ClassDecl->isDependentContext())
+ return;
+
+ // Find base classes from which we might inherit constructors.
+ SmallVector<CXXRecordDecl*, 4> InheritedBases;
+ for (const auto &BaseIt : ClassDecl->bases())
+ if (BaseIt.getInheritConstructors())
+ InheritedBases.push_back(BaseIt.getType()->getAsCXXRecordDecl());
+
+ // Go no further if we're not inheriting any constructors.
+ if (InheritedBases.empty())
+ return;
+
+ // Declare the inherited constructors.
+ InheritingConstructorInfo ICI(*this, ClassDecl);
+ for (unsigned I = 0, N = InheritedBases.size(); I != N; ++I)
+ ICI.inheritAll(InheritedBases[I]);
+}
+
+void Sema::DefineInheritingConstructor(SourceLocation CurrentLocation,
+ CXXConstructorDecl *Constructor) {
+ CXXRecordDecl *ClassDecl = Constructor->getParent();
+ assert(Constructor->getInheritedConstructor() &&
+ !Constructor->doesThisDeclarationHaveABody() &&
+ !Constructor->isDeleted());
+
+ SynthesizedFunctionScope Scope(*this, Constructor);
+ DiagnosticErrorTrap Trap(Diags);
+ if (SetCtorInitializers(Constructor, /*AnyErrors=*/false) ||
+ Trap.hasErrorOccurred()) {
+ Diag(CurrentLocation, diag::note_inhctor_synthesized_at)
+ << Context.getTagDeclType(ClassDecl);
+ Constructor->setInvalidDecl();
+ return;
+ }
+
+ SourceLocation Loc = Constructor->getLocation();
+ Constructor->setBody(new (Context) CompoundStmt(Loc));
+
+ Constructor->markUsed(Context);
+ MarkVTableUsed(CurrentLocation, ClassDecl);
+
+ if (ASTMutationListener *L = getASTMutationListener()) {
+ L->CompletedImplicitDefinition(Constructor);
+ }
+}
+
+
+Sema::ImplicitExceptionSpecification
+Sema::ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD) {
+ CXXRecordDecl *ClassDecl = MD->getParent();
+
+ // C++ [except.spec]p14:
+ // An implicitly declared special member function (Clause 12) shall have
+ // an exception-specification.
+ ImplicitExceptionSpecification ExceptSpec(*this);
+ if (ClassDecl->isInvalidDecl())
+ return ExceptSpec;
+
+ // Direct base-class destructors.
+ for (const auto &B : ClassDecl->bases()) {
+ if (B.isVirtual()) // Handled below.
+ continue;
+
+ if (const RecordType *BaseType = B.getType()->getAs<RecordType>())
+ ExceptSpec.CalledDecl(B.getLocStart(),
+ LookupDestructor(cast<CXXRecordDecl>(BaseType->getDecl())));
+ }
+
+ // Virtual base-class destructors.
+ for (const auto &B : ClassDecl->vbases()) {
+ if (const RecordType *BaseType = B.getType()->getAs<RecordType>())
+ ExceptSpec.CalledDecl(B.getLocStart(),
+ LookupDestructor(cast<CXXRecordDecl>(BaseType->getDecl())));
+ }
+
+ // Field destructors.
+ for (const auto *F : ClassDecl->fields()) {
+ if (const RecordType *RecordTy
+ = Context.getBaseElementType(F->getType())->getAs<RecordType>())
+ ExceptSpec.CalledDecl(F->getLocation(),
+ LookupDestructor(cast<CXXRecordDecl>(RecordTy->getDecl())));
+ }
+
+ return ExceptSpec;
+}
+
+CXXDestructorDecl *Sema::DeclareImplicitDestructor(CXXRecordDecl *ClassDecl) {
+ // C++ [class.dtor]p2:
+ // If a class has no user-declared destructor, a destructor is
+ // declared implicitly. An implicitly-declared destructor is an
+ // inline public member of its class.
+ assert(ClassDecl->needsImplicitDestructor());
+
+ DeclaringSpecialMember DSM(*this, ClassDecl, CXXDestructor);
+ if (DSM.isAlreadyBeingDeclared())
+ return nullptr;
+
+ // Create the actual destructor declaration.
+ CanQualType ClassType
+ = Context.getCanonicalType(Context.getTypeDeclType(ClassDecl));
+ SourceLocation ClassLoc = ClassDecl->getLocation();
+ DeclarationName Name
+ = Context.DeclarationNames.getCXXDestructorName(ClassType);
+ DeclarationNameInfo NameInfo(Name, ClassLoc);
+ CXXDestructorDecl *Destructor
+ = CXXDestructorDecl::Create(Context, ClassDecl, ClassLoc, NameInfo,
+ QualType(), nullptr, /*isInline=*/true,
+ /*isImplicitlyDeclared=*/true);
+ Destructor->setAccess(AS_public);
+ Destructor->setDefaulted();
+
+ if (getLangOpts().CUDA) {
+ inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXDestructor,
+ Destructor,
+ /* ConstRHS */ false,
+ /* Diagnose */ false);
+ }
+
+ // Build an exception specification pointing back at this destructor.
+ FunctionProtoType::ExtProtoInfo EPI = getImplicitMethodEPI(*this, Destructor);
+ Destructor->setType(Context.getFunctionType(Context.VoidTy, None, EPI));
+
+ AddOverriddenMethods(ClassDecl, Destructor);
+
+ // We don't need to use SpecialMemberIsTrivial here; triviality for
+ // destructors is easy to compute.
+ Destructor->setTrivial(ClassDecl->hasTrivialDestructor());
+
+ if (ShouldDeleteSpecialMember(Destructor, CXXDestructor))
+ SetDeclDeleted(Destructor, ClassLoc);
+
+ // Note that we have declared this destructor.
+ ++ASTContext::NumImplicitDestructorsDeclared;
+
+ // Introduce this destructor into its scope.
+ if (Scope *S = getScopeForContext(ClassDecl))
+ PushOnScopeChains(Destructor, S, false);
+ ClassDecl->addDecl(Destructor);
+
+ return Destructor;
+}
+
+void Sema::DefineImplicitDestructor(SourceLocation CurrentLocation,
+ CXXDestructorDecl *Destructor) {
+ assert((Destructor->isDefaulted() &&
+ !Destructor->doesThisDeclarationHaveABody() &&
+ !Destructor->isDeleted()) &&
+ "DefineImplicitDestructor - call it for implicit default dtor");
+ CXXRecordDecl *ClassDecl = Destructor->getParent();
+ assert(ClassDecl && "DefineImplicitDestructor - invalid destructor");
+
+ if (Destructor->isInvalidDecl())
+ return;
+
+ SynthesizedFunctionScope Scope(*this, Destructor);
+
+ DiagnosticErrorTrap Trap(Diags);
+ MarkBaseAndMemberDestructorsReferenced(Destructor->getLocation(),
+ Destructor->getParent());
+
+ if (CheckDestructor(Destructor) || Trap.hasErrorOccurred()) {
+ Diag(CurrentLocation, diag::note_member_synthesized_at)
+ << CXXDestructor << Context.getTagDeclType(ClassDecl);
+
+ Destructor->setInvalidDecl();
+ return;
+ }
+
+ // The exception specification is needed because we are defining the
+ // function.
+ ResolveExceptionSpec(CurrentLocation,
+ Destructor->getType()->castAs<FunctionProtoType>());
+
+ SourceLocation Loc = Destructor->getLocEnd().isValid()
+ ? Destructor->getLocEnd()
+ : Destructor->getLocation();
+ Destructor->setBody(new (Context) CompoundStmt(Loc));
+ Destructor->markUsed(Context);
+ MarkVTableUsed(CurrentLocation, ClassDecl);
+
+ if (ASTMutationListener *L = getASTMutationListener()) {
+ L->CompletedImplicitDefinition(Destructor);
+ }
+}
+
+/// \brief Perform any semantic analysis which needs to be delayed until all
+/// pending class member declarations have been parsed.
+void Sema::ActOnFinishCXXMemberDecls() {
+ // If the context is an invalid C++ class, just suppress these checks.
+ if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(CurContext)) {
+ if (Record->isInvalidDecl()) {
+ DelayedDefaultedMemberExceptionSpecs.clear();
+ DelayedExceptionSpecChecks.clear();
+ return;
+ }
+ }
+}
+
+static void getDefaultArgExprsForConstructors(Sema &S, CXXRecordDecl *Class) {
+ // Don't do anything for template patterns.
+ if (Class->getDescribedClassTemplate())
+ return;
+
+ CallingConv ExpectedCallingConv = S.Context.getDefaultCallingConvention(
+ /*IsVariadic=*/false, /*IsCXXMethod=*/true);
+
+ CXXConstructorDecl *LastExportedDefaultCtor = nullptr;
+ for (Decl *Member : Class->decls()) {
+ auto *CD = dyn_cast<CXXConstructorDecl>(Member);
+ if (!CD) {
+ // Recurse on nested classes.
+ if (auto *NestedRD = dyn_cast<CXXRecordDecl>(Member))
+ getDefaultArgExprsForConstructors(S, NestedRD);
+ continue;
+ } else if (!CD->isDefaultConstructor() || !CD->hasAttr<DLLExportAttr>()) {
+ continue;
+ }
+
+ CallingConv ActualCallingConv =
+ CD->getType()->getAs<FunctionProtoType>()->getCallConv();
+
+ // Skip default constructors with typical calling conventions and no default
+ // arguments.
+ unsigned NumParams = CD->getNumParams();
+ if (ExpectedCallingConv == ActualCallingConv && NumParams == 0)
+ continue;
+
+ if (LastExportedDefaultCtor) {
+ S.Diag(LastExportedDefaultCtor->getLocation(),
+ diag::err_attribute_dll_ambiguous_default_ctor) << Class;
+ S.Diag(CD->getLocation(), diag::note_entity_declared_at)
+ << CD->getDeclName();
+ return;
+ }
+ LastExportedDefaultCtor = CD;
+
+ for (unsigned I = 0; I != NumParams; ++I) {
+ // Skip any default arguments that we've already instantiated.
+ if (S.Context.getDefaultArgExprForConstructor(CD, I))
+ continue;
+
+ Expr *DefaultArg = S.BuildCXXDefaultArgExpr(Class->getLocation(), CD,
+ CD->getParamDecl(I)).get();
+ S.DiscardCleanupsInEvaluationContext();
+ S.Context.addDefaultArgExprForConstructor(CD, I, DefaultArg);
+ }
+ }
+}
+
+void Sema::ActOnFinishCXXNonNestedClass(Decl *D) {
+ auto *RD = dyn_cast<CXXRecordDecl>(D);
+
+ // Default constructors that are annotated with __declspec(dllexport) which
+ // have default arguments or don't use the standard calling convention are
+ // wrapped with a thunk called the default constructor closure.
+ if (RD && Context.getTargetInfo().getCXXABI().isMicrosoft())
+ getDefaultArgExprsForConstructors(*this, RD);
+
+ if (!DelayedDllExportClasses.empty()) {
+ // Calling ReferenceDllExportedMethods might cause the current function to
+ // be called again, so use a local copy of DelayedDllExportClasses.
+ SmallVector<CXXRecordDecl *, 4> WorkList;
+ std::swap(DelayedDllExportClasses, WorkList);
+ for (CXXRecordDecl *Class : WorkList)
+ ReferenceDllExportedMethods(*this, Class);
+ }
+}
+
+void Sema::AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl,
+ CXXDestructorDecl *Destructor) {
+ assert(getLangOpts().CPlusPlus11 &&
+ "adjusting dtor exception specs was introduced in c++11");
+
+ // C++11 [class.dtor]p3:
+ // A declaration of a destructor that does not have an exception-
+ // specification is implicitly considered to have the same exception-
+ // specification as an implicit declaration.
+ const FunctionProtoType *DtorType = Destructor->getType()->
+ getAs<FunctionProtoType>();
+ if (DtorType->hasExceptionSpec())
+ return;
+
+ // Replace the destructor's type, building off the existing one. Fortunately,
+ // the only thing of interest in the destructor type is its extended info.
+ // The return and arguments are fixed.
+ FunctionProtoType::ExtProtoInfo EPI = DtorType->getExtProtoInfo();
+ EPI.ExceptionSpec.Type = EST_Unevaluated;
+ EPI.ExceptionSpec.SourceDecl = Destructor;
+ Destructor->setType(Context.getFunctionType(Context.VoidTy, None, EPI));
+
+ // FIXME: If the destructor has a body that could throw, and the newly created
+ // spec doesn't allow exceptions, we should emit a warning, because this
+ // change in behavior can break conforming C++03 programs at runtime.
+ // However, we don't have a body or an exception specification yet, so it
+ // needs to be done somewhere else.
+}
+
+namespace {
+/// \brief An abstract base class for all helper classes used in building the
+// copy/move operators. These classes serve as factory functions and help us
+// avoid using the same Expr* in the AST twice.
+class ExprBuilder {
+ ExprBuilder(const ExprBuilder&) = delete;
+ ExprBuilder &operator=(const ExprBuilder&) = delete;
+
+protected:
+ static Expr *assertNotNull(Expr *E) {
+ assert(E && "Expression construction must not fail.");
+ return E;
+ }
+
+public:
+ ExprBuilder() {}
+ virtual ~ExprBuilder() {}
+
+ virtual Expr *build(Sema &S, SourceLocation Loc) const = 0;
+};
+
+class RefBuilder: public ExprBuilder {
+ VarDecl *Var;
+ QualType VarType;
+
+public:
+ Expr *build(Sema &S, SourceLocation Loc) const override {
+ return assertNotNull(S.BuildDeclRefExpr(Var, VarType, VK_LValue, Loc).get());
+ }
+
+ RefBuilder(VarDecl *Var, QualType VarType)
+ : Var(Var), VarType(VarType) {}
+};
+
+class ThisBuilder: public ExprBuilder {
+public:
+ Expr *build(Sema &S, SourceLocation Loc) const override {
+ return assertNotNull(S.ActOnCXXThis(Loc).getAs<Expr>());
+ }
+};
+
+class CastBuilder: public ExprBuilder {
+ const ExprBuilder &Builder;
+ QualType Type;
+ ExprValueKind Kind;
+ const CXXCastPath &Path;
+
+public:
+ Expr *build(Sema &S, SourceLocation Loc) const override {
+ return assertNotNull(S.ImpCastExprToType(Builder.build(S, Loc), Type,
+ CK_UncheckedDerivedToBase, Kind,
+ &Path).get());
+ }
+
+ CastBuilder(const ExprBuilder &Builder, QualType Type, ExprValueKind Kind,
+ const CXXCastPath &Path)
+ : Builder(Builder), Type(Type), Kind(Kind), Path(Path) {}
+};
+
+class DerefBuilder: public ExprBuilder {
+ const ExprBuilder &Builder;
+
+public:
+ Expr *build(Sema &S, SourceLocation Loc) const override {
+ return assertNotNull(
+ S.CreateBuiltinUnaryOp(Loc, UO_Deref, Builder.build(S, Loc)).get());
+ }
+
+ DerefBuilder(const ExprBuilder &Builder) : Builder(Builder) {}
+};
+
+class MemberBuilder: public ExprBuilder {
+ const ExprBuilder &Builder;
+ QualType Type;
+ CXXScopeSpec SS;
+ bool IsArrow;
+ LookupResult &MemberLookup;
+
+public:
+ Expr *build(Sema &S, SourceLocation Loc) const override {
+ return assertNotNull(S.BuildMemberReferenceExpr(
+ Builder.build(S, Loc), Type, Loc, IsArrow, SS, SourceLocation(),
+ nullptr, MemberLookup, nullptr, nullptr).get());
+ }
+
+ MemberBuilder(const ExprBuilder &Builder, QualType Type, bool IsArrow,
+ LookupResult &MemberLookup)
+ : Builder(Builder), Type(Type), IsArrow(IsArrow),
+ MemberLookup(MemberLookup) {}
+};
+
+class MoveCastBuilder: public ExprBuilder {
+ const ExprBuilder &Builder;
+
+public:
+ Expr *build(Sema &S, SourceLocation Loc) const override {
+ return assertNotNull(CastForMoving(S, Builder.build(S, Loc)));
+ }
+
+ MoveCastBuilder(const ExprBuilder &Builder) : Builder(Builder) {}
+};
+
+class LvalueConvBuilder: public ExprBuilder {
+ const ExprBuilder &Builder;
+
+public:
+ Expr *build(Sema &S, SourceLocation Loc) const override {
+ return assertNotNull(
+ S.DefaultLvalueConversion(Builder.build(S, Loc)).get());
+ }
+
+ LvalueConvBuilder(const ExprBuilder &Builder) : Builder(Builder) {}
+};
+
+class SubscriptBuilder: public ExprBuilder {
+ const ExprBuilder &Base;
+ const ExprBuilder &Index;
+
+public:
+ Expr *build(Sema &S, SourceLocation Loc) const override {
+ return assertNotNull(S.CreateBuiltinArraySubscriptExpr(
+ Base.build(S, Loc), Loc, Index.build(S, Loc), Loc).get());
+ }
+
+ SubscriptBuilder(const ExprBuilder &Base, const ExprBuilder &Index)
+ : Base(Base), Index(Index) {}
+};
+
+} // end anonymous namespace
+
+/// When generating a defaulted copy or move assignment operator, if a field
+/// should be copied with __builtin_memcpy rather than via explicit assignments,
+/// do so. This optimization only applies for arrays of scalars, and for arrays
+/// of class type where the selected copy/move-assignment operator is trivial.
+static StmtResult
+buildMemcpyForAssignmentOp(Sema &S, SourceLocation Loc, QualType T,
+ const ExprBuilder &ToB, const ExprBuilder &FromB) {
+ // Compute the size of the memory buffer to be copied.
+ QualType SizeType = S.Context.getSizeType();
+ llvm::APInt Size(S.Context.getTypeSize(SizeType),
+ S.Context.getTypeSizeInChars(T).getQuantity());
+
+ // Take the address of the field references for "from" and "to". We
+ // directly construct UnaryOperators here because semantic analysis
+ // does not permit us to take the address of an xvalue.
+ Expr *From = FromB.build(S, Loc);
+ From = new (S.Context) UnaryOperator(From, UO_AddrOf,
+ S.Context.getPointerType(From->getType()),
+ VK_RValue, OK_Ordinary, Loc);
+ Expr *To = ToB.build(S, Loc);
+ To = new (S.Context) UnaryOperator(To, UO_AddrOf,
+ S.Context.getPointerType(To->getType()),
+ VK_RValue, OK_Ordinary, Loc);
+
+ const Type *E = T->getBaseElementTypeUnsafe();
+ bool NeedsCollectableMemCpy =
+ E->isRecordType() && E->getAs<RecordType>()->getDecl()->hasObjectMember();
+
+ // Create a reference to the __builtin_objc_memmove_collectable function
+ StringRef MemCpyName = NeedsCollectableMemCpy ?
+ "__builtin_objc_memmove_collectable" :
+ "__builtin_memcpy";
+ LookupResult R(S, &S.Context.Idents.get(MemCpyName), Loc,
+ Sema::LookupOrdinaryName);
+ S.LookupName(R, S.TUScope, true);
+
+ FunctionDecl *MemCpy = R.getAsSingle<FunctionDecl>();
+ if (!MemCpy)
+ // Something went horribly wrong earlier, and we will have complained
+ // about it.
+ return StmtError();
+
+ ExprResult MemCpyRef = S.BuildDeclRefExpr(MemCpy, S.Context.BuiltinFnTy,
+ VK_RValue, Loc, nullptr);
+ assert(MemCpyRef.isUsable() && "Builtin reference cannot fail");
+
+ Expr *CallArgs[] = {
+ To, From, IntegerLiteral::Create(S.Context, Size, SizeType, Loc)
+ };
+ ExprResult Call = S.ActOnCallExpr(/*Scope=*/nullptr, MemCpyRef.get(),
+ Loc, CallArgs, Loc);
+
+ assert(!Call.isInvalid() && "Call to __builtin_memcpy cannot fail!");
+ return Call.getAs<Stmt>();
+}
+
+/// \brief Builds a statement that copies/moves the given entity from \p From to
+/// \c To.
+///
+/// This routine is used to copy/move the members of a class with an
+/// implicitly-declared copy/move assignment operator. When the entities being
+/// copied are arrays, this routine builds for loops to copy them.
+///
+/// \param S The Sema object used for type-checking.
+///
+/// \param Loc The location where the implicit copy/move is being generated.
+///
+/// \param T The type of the expressions being copied/moved. Both expressions
+/// must have this type.
+///
+/// \param To The expression we are copying/moving to.
+///
+/// \param From The expression we are copying/moving from.
+///
+/// \param CopyingBaseSubobject Whether we're copying/moving a base subobject.
+/// Otherwise, it's a non-static member subobject.
+///
+/// \param Copying Whether we're copying or moving.
+///
+/// \param Depth Internal parameter recording the depth of the recursion.
+///
+/// \returns A statement or a loop that copies the expressions, or StmtResult(0)
+/// if a memcpy should be used instead.
+static StmtResult
+buildSingleCopyAssignRecursively(Sema &S, SourceLocation Loc, QualType T,
+ const ExprBuilder &To, const ExprBuilder &From,
+ bool CopyingBaseSubobject, bool Copying,
+ unsigned Depth = 0) {
+ // C++11 [class.copy]p28:
+ // Each subobject is assigned in the manner appropriate to its type:
+ //
+ // - if the subobject is of class type, as if by a call to operator= with
+ // the subobject as the object expression and the corresponding
+ // subobject of x as a single function argument (as if by explicit
+ // qualification; that is, ignoring any possible virtual overriding
+ // functions in more derived classes);
+ //
+ // C++03 [class.copy]p13:
+ // - if the subobject is of class type, the copy assignment operator for
+ // the class is used (as if by explicit qualification; that is,
+ // ignoring any possible virtual overriding functions in more derived
+ // classes);
+ if (const RecordType *RecordTy = T->getAs<RecordType>()) {
+ CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RecordTy->getDecl());
+
+ // Look for operator=.
+ DeclarationName Name
+ = S.Context.DeclarationNames.getCXXOperatorName(OO_Equal);
+ LookupResult OpLookup(S, Name, Loc, Sema::LookupOrdinaryName);
+ S.LookupQualifiedName(OpLookup, ClassDecl, false);
+
+ // Prior to C++11, filter out any result that isn't a copy/move-assignment
+ // operator.
+ if (!S.getLangOpts().CPlusPlus11) {
+ LookupResult::Filter F = OpLookup.makeFilter();
+ while (F.hasNext()) {
+ NamedDecl *D = F.next();
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
+ if (Method->isCopyAssignmentOperator() ||
+ (!Copying && Method->isMoveAssignmentOperator()))
+ continue;
+
+ F.erase();
+ }
+ F.done();
+ }
+
+ // Suppress the protected check (C++ [class.protected]) for each of the
+ // assignment operators we found. This strange dance is required when
+ // we're assigning via a base classes's copy-assignment operator. To
+ // ensure that we're getting the right base class subobject (without
+ // ambiguities), we need to cast "this" to that subobject type; to
+ // ensure that we don't go through the virtual call mechanism, we need
+ // to qualify the operator= name with the base class (see below). However,
+ // this means that if the base class has a protected copy assignment
+ // operator, the protected member access check will fail. So, we
+ // rewrite "protected" access to "public" access in this case, since we
+ // know by construction that we're calling from a derived class.
+ if (CopyingBaseSubobject) {
+ for (LookupResult::iterator L = OpLookup.begin(), LEnd = OpLookup.end();
+ L != LEnd; ++L) {
+ if (L.getAccess() == AS_protected)
+ L.setAccess(AS_public);
+ }
+ }
+
+ // Create the nested-name-specifier that will be used to qualify the
+ // reference to operator=; this is required to suppress the virtual
+ // call mechanism.
+ CXXScopeSpec SS;
+ const Type *CanonicalT = S.Context.getCanonicalType(T.getTypePtr());
+ SS.MakeTrivial(S.Context,
+ NestedNameSpecifier::Create(S.Context, nullptr, false,
+ CanonicalT),
+ Loc);
+
+ // Create the reference to operator=.
+ ExprResult OpEqualRef
+ = S.BuildMemberReferenceExpr(To.build(S, Loc), T, Loc, /*isArrow=*/false,
+ SS, /*TemplateKWLoc=*/SourceLocation(),
+ /*FirstQualifierInScope=*/nullptr,
+ OpLookup,
+ /*TemplateArgs=*/nullptr, /*S*/nullptr,
+ /*SuppressQualifierCheck=*/true);
+ if (OpEqualRef.isInvalid())
+ return StmtError();
+
+ // Build the call to the assignment operator.
+
+ Expr *FromInst = From.build(S, Loc);
+ ExprResult Call = S.BuildCallToMemberFunction(/*Scope=*/nullptr,
+ OpEqualRef.getAs<Expr>(),
+ Loc, FromInst, Loc);
+ if (Call.isInvalid())
+ return StmtError();
+
+ // If we built a call to a trivial 'operator=' while copying an array,
+ // bail out. We'll replace the whole shebang with a memcpy.
+ CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(Call.get());
+ if (CE && CE->getMethodDecl()->isTrivial() && Depth)
+ return StmtResult((Stmt*)nullptr);
+
+ // Convert to an expression-statement, and clean up any produced
+ // temporaries.
+ return S.ActOnExprStmt(Call);
+ }
+
+ // - if the subobject is of scalar type, the built-in assignment
+ // operator is used.
+ const ConstantArrayType *ArrayTy = S.Context.getAsConstantArrayType(T);
+ if (!ArrayTy) {
+ ExprResult Assignment = S.CreateBuiltinBinOp(
+ Loc, BO_Assign, To.build(S, Loc), From.build(S, Loc));
+ if (Assignment.isInvalid())
+ return StmtError();
+ return S.ActOnExprStmt(Assignment);
+ }
+
+ // - if the subobject is an array, each element is assigned, in the
+ // manner appropriate to the element type;
+
+ // Construct a loop over the array bounds, e.g.,
+ //
+ // for (__SIZE_TYPE__ i0 = 0; i0 != array-size; ++i0)
+ //
+ // that will copy each of the array elements.
+ QualType SizeType = S.Context.getSizeType();
+
+ // Create the iteration variable.
+ IdentifierInfo *IterationVarName = nullptr;
+ {
+ SmallString<8> Str;
+ llvm::raw_svector_ostream OS(Str);
+ OS << "__i" << Depth;
+ IterationVarName = &S.Context.Idents.get(OS.str());
+ }
+ VarDecl *IterationVar = VarDecl::Create(S.Context, S.CurContext, Loc, Loc,
+ IterationVarName, SizeType,
+ S.Context.getTrivialTypeSourceInfo(SizeType, Loc),
+ SC_None);
+
+ // Initialize the iteration variable to zero.
+ llvm::APInt Zero(S.Context.getTypeSize(SizeType), 0);
+ IterationVar->setInit(IntegerLiteral::Create(S.Context, Zero, SizeType, Loc));
+
+ // Creates a reference to the iteration variable.
+ RefBuilder IterationVarRef(IterationVar, SizeType);
+ LvalueConvBuilder IterationVarRefRVal(IterationVarRef);
+
+ // Create the DeclStmt that holds the iteration variable.
+ Stmt *InitStmt = new (S.Context) DeclStmt(DeclGroupRef(IterationVar),Loc,Loc);
+
+ // Subscript the "from" and "to" expressions with the iteration variable.
+ SubscriptBuilder FromIndexCopy(From, IterationVarRefRVal);
+ MoveCastBuilder FromIndexMove(FromIndexCopy);
+ const ExprBuilder *FromIndex;
+ if (Copying)
+ FromIndex = &FromIndexCopy;
+ else
+ FromIndex = &FromIndexMove;
+
+ SubscriptBuilder ToIndex(To, IterationVarRefRVal);
+
+ // Build the copy/move for an individual element of the array.
+ StmtResult Copy =
+ buildSingleCopyAssignRecursively(S, Loc, ArrayTy->getElementType(),
+ ToIndex, *FromIndex, CopyingBaseSubobject,
+ Copying, Depth + 1);
+ // Bail out if copying fails or if we determined that we should use memcpy.
+ if (Copy.isInvalid() || !Copy.get())
+ return Copy;
+
+ // Create the comparison against the array bound.
+ llvm::APInt Upper
+ = ArrayTy->getSize().zextOrTrunc(S.Context.getTypeSize(SizeType));
+ Expr *Comparison
+ = new (S.Context) BinaryOperator(IterationVarRefRVal.build(S, Loc),
+ IntegerLiteral::Create(S.Context, Upper, SizeType, Loc),
+ BO_NE, S.Context.BoolTy,
+ VK_RValue, OK_Ordinary, Loc, false);
+
+ // Create the pre-increment of the iteration variable.
+ Expr *Increment
+ = new (S.Context) UnaryOperator(IterationVarRef.build(S, Loc), UO_PreInc,
+ SizeType, VK_LValue, OK_Ordinary, Loc);
+
+ // Construct the loop that copies all elements of this array.
+ return S.ActOnForStmt(Loc, Loc, InitStmt,
+ S.MakeFullExpr(Comparison),
+ nullptr, S.MakeFullDiscardedValueExpr(Increment),
+ Loc, Copy.get());
+}
+
+static StmtResult
+buildSingleCopyAssign(Sema &S, SourceLocation Loc, QualType T,
+ const ExprBuilder &To, const ExprBuilder &From,
+ bool CopyingBaseSubobject, bool Copying) {
+ // Maybe we should use a memcpy?
+ if (T->isArrayType() && !T.isConstQualified() && !T.isVolatileQualified() &&
+ T.isTriviallyCopyableType(S.Context))
+ return buildMemcpyForAssignmentOp(S, Loc, T, To, From);
+
+ StmtResult Result(buildSingleCopyAssignRecursively(S, Loc, T, To, From,
+ CopyingBaseSubobject,
+ Copying, 0));
+
+ // If we ended up picking a trivial assignment operator for an array of a
+ // non-trivially-copyable class type, just emit a memcpy.
+ if (!Result.isInvalid() && !Result.get())
+ return buildMemcpyForAssignmentOp(S, Loc, T, To, From);
+
+ return Result;
+}
+
+Sema::ImplicitExceptionSpecification
+Sema::ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD) {
+ CXXRecordDecl *ClassDecl = MD->getParent();
+
+ ImplicitExceptionSpecification ExceptSpec(*this);
+ if (ClassDecl->isInvalidDecl())
+ return ExceptSpec;
+
+ const FunctionProtoType *T = MD->getType()->castAs<FunctionProtoType>();
+ assert(T->getNumParams() == 1 && "not a copy assignment op");
+ unsigned ArgQuals =
+ T->getParamType(0).getNonReferenceType().getCVRQualifiers();
+
+ // C++ [except.spec]p14:
+ // An implicitly declared special member function (Clause 12) shall have an
+ // exception-specification. [...]
+
+ // It is unspecified whether or not an implicit copy assignment operator
+ // attempts to deduplicate calls to assignment operators of virtual bases are
+ // made. As such, this exception specification is effectively unspecified.
+ // Based on a similar decision made for constness in C++0x, we're erring on
+ // the side of assuming such calls to be made regardless of whether they
+ // actually happen.
+ for (const auto &Base : ClassDecl->bases()) {
+ if (Base.isVirtual())
+ continue;
+
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
+ if (CXXMethodDecl *CopyAssign = LookupCopyingAssignment(BaseClassDecl,
+ ArgQuals, false, 0))
+ ExceptSpec.CalledDecl(Base.getLocStart(), CopyAssign);
+ }
+
+ for (const auto &Base : ClassDecl->vbases()) {
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
+ if (CXXMethodDecl *CopyAssign = LookupCopyingAssignment(BaseClassDecl,
+ ArgQuals, false, 0))
+ ExceptSpec.CalledDecl(Base.getLocStart(), CopyAssign);
+ }
+
+ for (const auto *Field : ClassDecl->fields()) {
+ QualType FieldType = Context.getBaseElementType(Field->getType());
+ if (CXXRecordDecl *FieldClassDecl = FieldType->getAsCXXRecordDecl()) {
+ if (CXXMethodDecl *CopyAssign =
+ LookupCopyingAssignment(FieldClassDecl,
+ ArgQuals | FieldType.getCVRQualifiers(),
+ false, 0))
+ ExceptSpec.CalledDecl(Field->getLocation(), CopyAssign);
+ }
+ }
+
+ return ExceptSpec;
+}
+
+CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
+ // Note: The following rules are largely analoguous to the copy
+ // constructor rules. Note that virtual bases are not taken into account
+ // for determining the argument type of the operator. Note also that
+ // operators taking an object instead of a reference are allowed.
+ assert(ClassDecl->needsImplicitCopyAssignment());
+
+ DeclaringSpecialMember DSM(*this, ClassDecl, CXXCopyAssignment);
+ if (DSM.isAlreadyBeingDeclared())
+ return nullptr;
+
+ QualType ArgType = Context.getTypeDeclType(ClassDecl);
+ QualType RetType = Context.getLValueReferenceType(ArgType);
+ bool Const = ClassDecl->implicitCopyAssignmentHasConstParam();
+ if (Const)
+ ArgType = ArgType.withConst();
+ ArgType = Context.getLValueReferenceType(ArgType);
+
+ bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, ClassDecl,
+ CXXCopyAssignment,
+ Const);
+
+ // An implicitly-declared copy assignment operator is an inline public
+ // member of its class.
+ DeclarationName Name = Context.DeclarationNames.getCXXOperatorName(OO_Equal);
+ SourceLocation ClassLoc = ClassDecl->getLocation();
+ DeclarationNameInfo NameInfo(Name, ClassLoc);
+ CXXMethodDecl *CopyAssignment =
+ CXXMethodDecl::Create(Context, ClassDecl, ClassLoc, NameInfo, QualType(),
+ /*TInfo=*/nullptr, /*StorageClass=*/SC_None,
+ /*isInline=*/true, Constexpr, SourceLocation());
+ CopyAssignment->setAccess(AS_public);
+ CopyAssignment->setDefaulted();
+ CopyAssignment->setImplicit();
+
+ if (getLangOpts().CUDA) {
+ inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXCopyAssignment,
+ CopyAssignment,
+ /* ConstRHS */ Const,
+ /* Diagnose */ false);
+ }
+
+ // Build an exception specification pointing back at this member.
+ FunctionProtoType::ExtProtoInfo EPI =
+ getImplicitMethodEPI(*this, CopyAssignment);
+ CopyAssignment->setType(Context.getFunctionType(RetType, ArgType, EPI));
+
+ // Add the parameter to the operator.
+ ParmVarDecl *FromParam = ParmVarDecl::Create(Context, CopyAssignment,
+ ClassLoc, ClassLoc,
+ /*Id=*/nullptr, ArgType,
+ /*TInfo=*/nullptr, SC_None,
+ nullptr);
+ CopyAssignment->setParams(FromParam);
+
+ AddOverriddenMethods(ClassDecl, CopyAssignment);
+
+ CopyAssignment->setTrivial(
+ ClassDecl->needsOverloadResolutionForCopyAssignment()
+ ? SpecialMemberIsTrivial(CopyAssignment, CXXCopyAssignment)
+ : ClassDecl->hasTrivialCopyAssignment());
+
+ if (ShouldDeleteSpecialMember(CopyAssignment, CXXCopyAssignment))
+ SetDeclDeleted(CopyAssignment, ClassLoc);
+
+ // Note that we have added this copy-assignment operator.
+ ++ASTContext::NumImplicitCopyAssignmentOperatorsDeclared;
+
+ if (Scope *S = getScopeForContext(ClassDecl))
+ PushOnScopeChains(CopyAssignment, S, false);
+ ClassDecl->addDecl(CopyAssignment);
+
+ return CopyAssignment;
+}
+
+/// Diagnose an implicit copy operation for a class which is odr-used, but
+/// which is deprecated because the class has a user-declared copy constructor,
+/// copy assignment operator, or destructor.
+static void diagnoseDeprecatedCopyOperation(Sema &S, CXXMethodDecl *CopyOp,
+ SourceLocation UseLoc) {
+ assert(CopyOp->isImplicit());
+
+ CXXRecordDecl *RD = CopyOp->getParent();
+ CXXMethodDecl *UserDeclaredOperation = nullptr;
+
+ // In Microsoft mode, assignment operations don't affect constructors and
+ // vice versa.
+ if (RD->hasUserDeclaredDestructor()) {
+ UserDeclaredOperation = RD->getDestructor();
+ } else if (!isa<CXXConstructorDecl>(CopyOp) &&
+ RD->hasUserDeclaredCopyConstructor() &&
+ !S.getLangOpts().MSVCCompat) {
+ // Find any user-declared copy constructor.
+ for (auto *I : RD->ctors()) {
+ if (I->isCopyConstructor()) {
+ UserDeclaredOperation = I;
+ break;
+ }
+ }
+ assert(UserDeclaredOperation);
+ } else if (isa<CXXConstructorDecl>(CopyOp) &&
+ RD->hasUserDeclaredCopyAssignment() &&
+ !S.getLangOpts().MSVCCompat) {
+ // Find any user-declared move assignment operator.
+ for (auto *I : RD->methods()) {
+ if (I->isCopyAssignmentOperator()) {
+ UserDeclaredOperation = I;
+ break;
+ }
+ }
+ assert(UserDeclaredOperation);
+ }
+
+ if (UserDeclaredOperation) {
+ S.Diag(UserDeclaredOperation->getLocation(),
+ diag::warn_deprecated_copy_operation)
+ << RD << /*copy assignment*/!isa<CXXConstructorDecl>(CopyOp)
+ << /*destructor*/isa<CXXDestructorDecl>(UserDeclaredOperation);
+ S.Diag(UseLoc, diag::note_member_synthesized_at)
+ << (isa<CXXConstructorDecl>(CopyOp) ? Sema::CXXCopyConstructor
+ : Sema::CXXCopyAssignment)
+ << RD;
+ }
+}
+
+void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
+ CXXMethodDecl *CopyAssignOperator) {
+ assert((CopyAssignOperator->isDefaulted() &&
+ CopyAssignOperator->isOverloadedOperator() &&
+ CopyAssignOperator->getOverloadedOperator() == OO_Equal &&
+ !CopyAssignOperator->doesThisDeclarationHaveABody() &&
+ !CopyAssignOperator->isDeleted()) &&
+ "DefineImplicitCopyAssignment called for wrong function");
+
+ CXXRecordDecl *ClassDecl = CopyAssignOperator->getParent();
+
+ if (ClassDecl->isInvalidDecl() || CopyAssignOperator->isInvalidDecl()) {
+ CopyAssignOperator->setInvalidDecl();
+ return;
+ }
+
+ // C++11 [class.copy]p18:
+ // The [definition of an implicitly declared copy assignment operator] is
+ // deprecated if the class has a user-declared copy constructor or a
+ // user-declared destructor.
+ if (getLangOpts().CPlusPlus11 && CopyAssignOperator->isImplicit())
+ diagnoseDeprecatedCopyOperation(*this, CopyAssignOperator, CurrentLocation);
+
+ CopyAssignOperator->markUsed(Context);
+
+ SynthesizedFunctionScope Scope(*this, CopyAssignOperator);
+ DiagnosticErrorTrap Trap(Diags);
+
+ // C++0x [class.copy]p30:
+ // The implicitly-defined or explicitly-defaulted copy assignment operator
+ // for a non-union class X performs memberwise copy assignment of its
+ // subobjects. The direct base classes of X are assigned first, in the
+ // order of their declaration in the base-specifier-list, and then the
+ // immediate non-static data members of X are assigned, in the order in
+ // which they were declared in the class definition.
+
+ // The statements that form the synthesized function body.
+ SmallVector<Stmt*, 8> Statements;
+
+ // The parameter for the "other" object, which we are copying from.
+ ParmVarDecl *Other = CopyAssignOperator->getParamDecl(0);
+ Qualifiers OtherQuals = Other->getType().getQualifiers();
+ QualType OtherRefType = Other->getType();
+ if (const LValueReferenceType *OtherRef
+ = OtherRefType->getAs<LValueReferenceType>()) {
+ OtherRefType = OtherRef->getPointeeType();
+ OtherQuals = OtherRefType.getQualifiers();
+ }
+
+ // Our location for everything implicitly-generated.
+ SourceLocation Loc = CopyAssignOperator->getLocEnd().isValid()
+ ? CopyAssignOperator->getLocEnd()
+ : CopyAssignOperator->getLocation();
+
+ // Builds a DeclRefExpr for the "other" object.
+ RefBuilder OtherRef(Other, OtherRefType);
+
+ // Builds the "this" pointer.
+ ThisBuilder This;
+
+ // Assign base classes.
+ bool Invalid = false;
+ for (auto &Base : ClassDecl->bases()) {
+ // Form the assignment:
+ // static_cast<Base*>(this)->Base::operator=(static_cast<Base&>(other));
+ QualType BaseType = Base.getType().getUnqualifiedType();
+ if (!BaseType->isRecordType()) {
+ Invalid = true;
+ continue;
+ }
+
+ CXXCastPath BasePath;
+ BasePath.push_back(&Base);
+
+ // Construct the "from" expression, which is an implicit cast to the
+ // appropriately-qualified base type.
+ CastBuilder From(OtherRef, Context.getQualifiedType(BaseType, OtherQuals),
+ VK_LValue, BasePath);
+
+ // Dereference "this".
+ DerefBuilder DerefThis(This);
+ CastBuilder To(DerefThis,
+ Context.getCVRQualifiedType(
+ BaseType, CopyAssignOperator->getTypeQualifiers()),
+ VK_LValue, BasePath);
+
+ // Build the copy.
+ StmtResult Copy = buildSingleCopyAssign(*this, Loc, BaseType,
+ To, From,
+ /*CopyingBaseSubobject=*/true,
+ /*Copying=*/true);
+ if (Copy.isInvalid()) {
+ Diag(CurrentLocation, diag::note_member_synthesized_at)
+ << CXXCopyAssignment << Context.getTagDeclType(ClassDecl);
+ CopyAssignOperator->setInvalidDecl();
+ return;
+ }
+
+ // Success! Record the copy.
+ Statements.push_back(Copy.getAs<Expr>());
+ }
+
+ // Assign non-static members.
+ for (auto *Field : ClassDecl->fields()) {
+ // FIXME: We should form some kind of AST representation for the implied
+ // memcpy in a union copy operation.
+ if (Field->isUnnamedBitfield() || Field->getParent()->isUnion())
+ continue;
+
+ if (Field->isInvalidDecl()) {
+ Invalid = true;
+ continue;
+ }
+
+ // Check for members of reference type; we can't copy those.
+ if (Field->getType()->isReferenceType()) {
+ Diag(ClassDecl->getLocation(), diag::err_uninitialized_member_for_assign)
+ << Context.getTagDeclType(ClassDecl) << 0 << Field->getDeclName();
+ Diag(Field->getLocation(), diag::note_declared_at);
+ Diag(CurrentLocation, diag::note_member_synthesized_at)
+ << CXXCopyAssignment << Context.getTagDeclType(ClassDecl);
+ Invalid = true;
+ continue;
+ }
+
+ // Check for members of const-qualified, non-class type.
+ QualType BaseType = Context.getBaseElementType(Field->getType());
+ if (!BaseType->getAs<RecordType>() && BaseType.isConstQualified()) {
+ Diag(ClassDecl->getLocation(), diag::err_uninitialized_member_for_assign)
+ << Context.getTagDeclType(ClassDecl) << 1 << Field->getDeclName();
+ Diag(Field->getLocation(), diag::note_declared_at);
+ Diag(CurrentLocation, diag::note_member_synthesized_at)
+ << CXXCopyAssignment << Context.getTagDeclType(ClassDecl);
+ Invalid = true;
+ continue;
+ }
+
+ // Suppress assigning zero-width bitfields.
+ if (Field->isBitField() && Field->getBitWidthValue(Context) == 0)
+ continue;
+
+ QualType FieldType = Field->getType().getNonReferenceType();
+ if (FieldType->isIncompleteArrayType()) {
+ assert(ClassDecl->hasFlexibleArrayMember() &&
+ "Incomplete array type is not valid");
+ continue;
+ }
+
+ // Build references to the field in the object we're copying from and to.
+ CXXScopeSpec SS; // Intentionally empty
+ LookupResult MemberLookup(*this, Field->getDeclName(), Loc,
+ LookupMemberName);
+ MemberLookup.addDecl(Field);
+ MemberLookup.resolveKind();
+
+ MemberBuilder From(OtherRef, OtherRefType, /*IsArrow=*/false, MemberLookup);
+
+ MemberBuilder To(This, getCurrentThisType(), /*IsArrow=*/true, MemberLookup);
+
+ // Build the copy of this field.
+ StmtResult Copy = buildSingleCopyAssign(*this, Loc, FieldType,
+ To, From,
+ /*CopyingBaseSubobject=*/false,
+ /*Copying=*/true);
+ if (Copy.isInvalid()) {
+ Diag(CurrentLocation, diag::note_member_synthesized_at)
+ << CXXCopyAssignment << Context.getTagDeclType(ClassDecl);
+ CopyAssignOperator->setInvalidDecl();
+ return;
+ }
+
+ // Success! Record the copy.
+ Statements.push_back(Copy.getAs<Stmt>());
+ }
+
+ if (!Invalid) {
+ // Add a "return *this;"
+ ExprResult ThisObj = CreateBuiltinUnaryOp(Loc, UO_Deref, This.build(*this, Loc));
+
+ StmtResult Return = BuildReturnStmt(Loc, ThisObj.get());
+ if (Return.isInvalid())
+ Invalid = true;
+ else {
+ Statements.push_back(Return.getAs<Stmt>());
+
+ if (Trap.hasErrorOccurred()) {
+ Diag(CurrentLocation, diag::note_member_synthesized_at)
+ << CXXCopyAssignment << Context.getTagDeclType(ClassDecl);
+ Invalid = true;
+ }
+ }
+ }
+
+ // The exception specification is needed because we are defining the
+ // function.
+ ResolveExceptionSpec(CurrentLocation,
+ CopyAssignOperator->getType()->castAs<FunctionProtoType>());
+
+ if (Invalid) {
+ CopyAssignOperator->setInvalidDecl();
+ return;
+ }
+
+ StmtResult Body;
+ {
+ CompoundScopeRAII CompoundScope(*this);
+ Body = ActOnCompoundStmt(Loc, Loc, Statements,
+ /*isStmtExpr=*/false);
+ assert(!Body.isInvalid() && "Compound statement creation cannot fail");
+ }
+ CopyAssignOperator->setBody(Body.getAs<Stmt>());
+
+ if (ASTMutationListener *L = getASTMutationListener()) {
+ L->CompletedImplicitDefinition(CopyAssignOperator);
+ }
+}
+
+Sema::ImplicitExceptionSpecification
+Sema::ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD) {
+ CXXRecordDecl *ClassDecl = MD->getParent();
+
+ ImplicitExceptionSpecification ExceptSpec(*this);
+ if (ClassDecl->isInvalidDecl())
+ return ExceptSpec;
+
+ // C++0x [except.spec]p14:
+ // An implicitly declared special member function (Clause 12) shall have an
+ // exception-specification. [...]
+
+ // It is unspecified whether or not an implicit move assignment operator
+ // attempts to deduplicate calls to assignment operators of virtual bases are
+ // made. As such, this exception specification is effectively unspecified.
+ // Based on a similar decision made for constness in C++0x, we're erring on
+ // the side of assuming such calls to be made regardless of whether they
+ // actually happen.
+ // Note that a move constructor is not implicitly declared when there are
+ // virtual bases, but it can still be user-declared and explicitly defaulted.
+ for (const auto &Base : ClassDecl->bases()) {
+ if (Base.isVirtual())
+ continue;
+
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
+ if (CXXMethodDecl *MoveAssign = LookupMovingAssignment(BaseClassDecl,
+ 0, false, 0))
+ ExceptSpec.CalledDecl(Base.getLocStart(), MoveAssign);
+ }
+
+ for (const auto &Base : ClassDecl->vbases()) {
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
+ if (CXXMethodDecl *MoveAssign = LookupMovingAssignment(BaseClassDecl,
+ 0, false, 0))
+ ExceptSpec.CalledDecl(Base.getLocStart(), MoveAssign);
+ }
+
+ for (const auto *Field : ClassDecl->fields()) {
+ QualType FieldType = Context.getBaseElementType(Field->getType());
+ if (CXXRecordDecl *FieldClassDecl = FieldType->getAsCXXRecordDecl()) {
+ if (CXXMethodDecl *MoveAssign =
+ LookupMovingAssignment(FieldClassDecl,
+ FieldType.getCVRQualifiers(),
+ false, 0))
+ ExceptSpec.CalledDecl(Field->getLocation(), MoveAssign);
+ }
+ }
+
+ return ExceptSpec;
+}
+
+CXXMethodDecl *Sema::DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl) {
+ assert(ClassDecl->needsImplicitMoveAssignment());
+
+ DeclaringSpecialMember DSM(*this, ClassDecl, CXXMoveAssignment);
+ if (DSM.isAlreadyBeingDeclared())
+ return nullptr;
+
+ // Note: The following rules are largely analoguous to the move
+ // constructor rules.
+
+ QualType ArgType = Context.getTypeDeclType(ClassDecl);
+ QualType RetType = Context.getLValueReferenceType(ArgType);
+ ArgType = Context.getRValueReferenceType(ArgType);
+
+ bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, ClassDecl,
+ CXXMoveAssignment,
+ false);
+
+ // An implicitly-declared move assignment operator is an inline public
+ // member of its class.
+ DeclarationName Name = Context.DeclarationNames.getCXXOperatorName(OO_Equal);
+ SourceLocation ClassLoc = ClassDecl->getLocation();
+ DeclarationNameInfo NameInfo(Name, ClassLoc);
+ CXXMethodDecl *MoveAssignment =
+ CXXMethodDecl::Create(Context, ClassDecl, ClassLoc, NameInfo, QualType(),
+ /*TInfo=*/nullptr, /*StorageClass=*/SC_None,
+ /*isInline=*/true, Constexpr, SourceLocation());
+ MoveAssignment->setAccess(AS_public);
+ MoveAssignment->setDefaulted();
+ MoveAssignment->setImplicit();
+
+ if (getLangOpts().CUDA) {
+ inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXMoveAssignment,
+ MoveAssignment,
+ /* ConstRHS */ false,
+ /* Diagnose */ false);
+ }
+
+ // Build an exception specification pointing back at this member.
+ FunctionProtoType::ExtProtoInfo EPI =
+ getImplicitMethodEPI(*this, MoveAssignment);
+ MoveAssignment->setType(Context.getFunctionType(RetType, ArgType, EPI));
+
+ // Add the parameter to the operator.
+ ParmVarDecl *FromParam = ParmVarDecl::Create(Context, MoveAssignment,
+ ClassLoc, ClassLoc,
+ /*Id=*/nullptr, ArgType,
+ /*TInfo=*/nullptr, SC_None,
+ nullptr);
+ MoveAssignment->setParams(FromParam);
+
+ AddOverriddenMethods(ClassDecl, MoveAssignment);
+
+ MoveAssignment->setTrivial(
+ ClassDecl->needsOverloadResolutionForMoveAssignment()
+ ? SpecialMemberIsTrivial(MoveAssignment, CXXMoveAssignment)
+ : ClassDecl->hasTrivialMoveAssignment());
+
+ if (ShouldDeleteSpecialMember(MoveAssignment, CXXMoveAssignment)) {
+ ClassDecl->setImplicitMoveAssignmentIsDeleted();
+ SetDeclDeleted(MoveAssignment, ClassLoc);
+ }
+
+ // Note that we have added this copy-assignment operator.
+ ++ASTContext::NumImplicitMoveAssignmentOperatorsDeclared;
+
+ if (Scope *S = getScopeForContext(ClassDecl))
+ PushOnScopeChains(MoveAssignment, S, false);
+ ClassDecl->addDecl(MoveAssignment);
+
+ return MoveAssignment;
+}
+
+/// Check if we're implicitly defining a move assignment operator for a class
+/// with virtual bases. Such a move assignment might move-assign the virtual
+/// base multiple times.
+static void checkMoveAssignmentForRepeatedMove(Sema &S, CXXRecordDecl *Class,
+ SourceLocation CurrentLocation) {
+ assert(!Class->isDependentContext() && "should not define dependent move");
+
+ // Only a virtual base could get implicitly move-assigned multiple times.
+ // Only a non-trivial move assignment can observe this. We only want to
+ // diagnose if we implicitly define an assignment operator that assigns
+ // two base classes, both of which move-assign the same virtual base.
+ if (Class->getNumVBases() == 0 || Class->hasTrivialMoveAssignment() ||
+ Class->getNumBases() < 2)
+ return;
+
+ llvm::SmallVector<CXXBaseSpecifier *, 16> Worklist;
+ typedef llvm::DenseMap<CXXRecordDecl*, CXXBaseSpecifier*> VBaseMap;
+ VBaseMap VBases;
+
+ for (auto &BI : Class->bases()) {
+ Worklist.push_back(&BI);
+ while (!Worklist.empty()) {
+ CXXBaseSpecifier *BaseSpec = Worklist.pop_back_val();
+ CXXRecordDecl *Base = BaseSpec->getType()->getAsCXXRecordDecl();
+
+ // If the base has no non-trivial move assignment operators,
+ // we don't care about moves from it.
+ if (!Base->hasNonTrivialMoveAssignment())
+ continue;
+
+ // If there's nothing virtual here, skip it.
+ if (!BaseSpec->isVirtual() && !Base->getNumVBases())
+ continue;
+
+ // If we're not actually going to call a move assignment for this base,
+ // or the selected move assignment is trivial, skip it.
+ Sema::SpecialMemberOverloadResult *SMOR =
+ S.LookupSpecialMember(Base, Sema::CXXMoveAssignment,
+ /*ConstArg*/false, /*VolatileArg*/false,
+ /*RValueThis*/true, /*ConstThis*/false,
+ /*VolatileThis*/false);
+ if (!SMOR->getMethod() || SMOR->getMethod()->isTrivial() ||
+ !SMOR->getMethod()->isMoveAssignmentOperator())
+ continue;
+
+ if (BaseSpec->isVirtual()) {
+ // We're going to move-assign this virtual base, and its move
+ // assignment operator is not trivial. If this can happen for
+ // multiple distinct direct bases of Class, diagnose it. (If it
+ // only happens in one base, we'll diagnose it when synthesizing
+ // that base class's move assignment operator.)
+ CXXBaseSpecifier *&Existing =
+ VBases.insert(std::make_pair(Base->getCanonicalDecl(), &BI))
+ .first->second;
+ if (Existing && Existing != &BI) {
+ S.Diag(CurrentLocation, diag::warn_vbase_moved_multiple_times)
+ << Class << Base;
+ S.Diag(Existing->getLocStart(), diag::note_vbase_moved_here)
+ << (Base->getCanonicalDecl() ==
+ Existing->getType()->getAsCXXRecordDecl()->getCanonicalDecl())
+ << Base << Existing->getType() << Existing->getSourceRange();
+ S.Diag(BI.getLocStart(), diag::note_vbase_moved_here)
+ << (Base->getCanonicalDecl() ==
+ BI.getType()->getAsCXXRecordDecl()->getCanonicalDecl())
+ << Base << BI.getType() << BaseSpec->getSourceRange();
+
+ // Only diagnose each vbase once.
+ Existing = nullptr;
+ }
+ } else {
+ // Only walk over bases that have defaulted move assignment operators.
+ // We assume that any user-provided move assignment operator handles
+ // the multiple-moves-of-vbase case itself somehow.
+ if (!SMOR->getMethod()->isDefaulted())
+ continue;
+
+ // We're going to move the base classes of Base. Add them to the list.
+ for (auto &BI : Base->bases())
+ Worklist.push_back(&BI);
+ }
+ }
+ }
+}
+
+void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
+ CXXMethodDecl *MoveAssignOperator) {
+ assert((MoveAssignOperator->isDefaulted() &&
+ MoveAssignOperator->isOverloadedOperator() &&
+ MoveAssignOperator->getOverloadedOperator() == OO_Equal &&
+ !MoveAssignOperator->doesThisDeclarationHaveABody() &&
+ !MoveAssignOperator->isDeleted()) &&
+ "DefineImplicitMoveAssignment called for wrong function");
+
+ CXXRecordDecl *ClassDecl = MoveAssignOperator->getParent();
+
+ if (ClassDecl->isInvalidDecl() || MoveAssignOperator->isInvalidDecl()) {
+ MoveAssignOperator->setInvalidDecl();
+ return;
+ }
+
+ MoveAssignOperator->markUsed(Context);
+
+ SynthesizedFunctionScope Scope(*this, MoveAssignOperator);
+ DiagnosticErrorTrap Trap(Diags);
+
+ // C++0x [class.copy]p28:
+ // The implicitly-defined or move assignment operator for a non-union class
+ // X performs memberwise move assignment of its subobjects. The direct base
+ // classes of X are assigned first, in the order of their declaration in the
+ // base-specifier-list, and then the immediate non-static data members of X
+ // are assigned, in the order in which they were declared in the class
+ // definition.
+
+ // Issue a warning if our implicit move assignment operator will move
+ // from a virtual base more than once.
+ checkMoveAssignmentForRepeatedMove(*this, ClassDecl, CurrentLocation);
+
+ // The statements that form the synthesized function body.
+ SmallVector<Stmt*, 8> Statements;
+
+ // The parameter for the "other" object, which we are move from.
+ ParmVarDecl *Other = MoveAssignOperator->getParamDecl(0);
+ QualType OtherRefType = Other->getType()->
+ getAs<RValueReferenceType>()->getPointeeType();
+ assert(!OtherRefType.getQualifiers() &&
+ "Bad argument type of defaulted move assignment");
+
+ // Our location for everything implicitly-generated.
+ SourceLocation Loc = MoveAssignOperator->getLocEnd().isValid()
+ ? MoveAssignOperator->getLocEnd()
+ : MoveAssignOperator->getLocation();
+
+ // Builds a reference to the "other" object.
+ RefBuilder OtherRef(Other, OtherRefType);
+ // Cast to rvalue.
+ MoveCastBuilder MoveOther(OtherRef);
+
+ // Builds the "this" pointer.
+ ThisBuilder This;
+
+ // Assign base classes.
+ bool Invalid = false;
+ for (auto &Base : ClassDecl->bases()) {
+ // C++11 [class.copy]p28:
+ // It is unspecified whether subobjects representing virtual base classes
+ // are assigned more than once by the implicitly-defined copy assignment
+ // operator.
+ // FIXME: Do not assign to a vbase that will be assigned by some other base
+ // class. For a move-assignment, this can result in the vbase being moved
+ // multiple times.
+
+ // Form the assignment:
+ // static_cast<Base*>(this)->Base::operator=(static_cast<Base&&>(other));
+ QualType BaseType = Base.getType().getUnqualifiedType();
+ if (!BaseType->isRecordType()) {
+ Invalid = true;
+ continue;
+ }
+
+ CXXCastPath BasePath;
+ BasePath.push_back(&Base);
+
+ // Construct the "from" expression, which is an implicit cast to the
+ // appropriately-qualified base type.
+ CastBuilder From(OtherRef, BaseType, VK_XValue, BasePath);
+
+ // Dereference "this".
+ DerefBuilder DerefThis(This);
+
+ // Implicitly cast "this" to the appropriately-qualified base type.
+ CastBuilder To(DerefThis,
+ Context.getCVRQualifiedType(
+ BaseType, MoveAssignOperator->getTypeQualifiers()),
+ VK_LValue, BasePath);
+
+ // Build the move.
+ StmtResult Move = buildSingleCopyAssign(*this, Loc, BaseType,
+ To, From,
+ /*CopyingBaseSubobject=*/true,
+ /*Copying=*/false);
+ if (Move.isInvalid()) {
+ Diag(CurrentLocation, diag::note_member_synthesized_at)
+ << CXXMoveAssignment << Context.getTagDeclType(ClassDecl);
+ MoveAssignOperator->setInvalidDecl();
+ return;
+ }
+
+ // Success! Record the move.
+ Statements.push_back(Move.getAs<Expr>());
+ }
+
+ // Assign non-static members.
+ for (auto *Field : ClassDecl->fields()) {
+ // FIXME: We should form some kind of AST representation for the implied
+ // memcpy in a union copy operation.
+ if (Field->isUnnamedBitfield() || Field->getParent()->isUnion())
+ continue;
+
+ if (Field->isInvalidDecl()) {
+ Invalid = true;
+ continue;
+ }
+
+ // Check for members of reference type; we can't move those.
+ if (Field->getType()->isReferenceType()) {
+ Diag(ClassDecl->getLocation(), diag::err_uninitialized_member_for_assign)
+ << Context.getTagDeclType(ClassDecl) << 0 << Field->getDeclName();
+ Diag(Field->getLocation(), diag::note_declared_at);
+ Diag(CurrentLocation, diag::note_member_synthesized_at)
+ << CXXMoveAssignment << Context.getTagDeclType(ClassDecl);
+ Invalid = true;
+ continue;
+ }
+
+ // Check for members of const-qualified, non-class type.
+ QualType BaseType = Context.getBaseElementType(Field->getType());
+ if (!BaseType->getAs<RecordType>() && BaseType.isConstQualified()) {
+ Diag(ClassDecl->getLocation(), diag::err_uninitialized_member_for_assign)
+ << Context.getTagDeclType(ClassDecl) << 1 << Field->getDeclName();
+ Diag(Field->getLocation(), diag::note_declared_at);
+ Diag(CurrentLocation, diag::note_member_synthesized_at)
+ << CXXMoveAssignment << Context.getTagDeclType(ClassDecl);
+ Invalid = true;
+ continue;
+ }
+
+ // Suppress assigning zero-width bitfields.
+ if (Field->isBitField() && Field->getBitWidthValue(Context) == 0)
+ continue;
+
+ QualType FieldType = Field->getType().getNonReferenceType();
+ if (FieldType->isIncompleteArrayType()) {
+ assert(ClassDecl->hasFlexibleArrayMember() &&
+ "Incomplete array type is not valid");
+ continue;
+ }
+
+ // Build references to the field in the object we're copying from and to.
+ LookupResult MemberLookup(*this, Field->getDeclName(), Loc,
+ LookupMemberName);
+ MemberLookup.addDecl(Field);
+ MemberLookup.resolveKind();
+ MemberBuilder From(MoveOther, OtherRefType,
+ /*IsArrow=*/false, MemberLookup);
+ MemberBuilder To(This, getCurrentThisType(),
+ /*IsArrow=*/true, MemberLookup);
+
+ assert(!From.build(*this, Loc)->isLValue() && // could be xvalue or prvalue
+ "Member reference with rvalue base must be rvalue except for reference "
+ "members, which aren't allowed for move assignment.");
+
+ // Build the move of this field.
+ StmtResult Move = buildSingleCopyAssign(*this, Loc, FieldType,
+ To, From,
+ /*CopyingBaseSubobject=*/false,
+ /*Copying=*/false);
+ if (Move.isInvalid()) {
+ Diag(CurrentLocation, diag::note_member_synthesized_at)
+ << CXXMoveAssignment << Context.getTagDeclType(ClassDecl);
+ MoveAssignOperator->setInvalidDecl();
+ return;
+ }
+
+ // Success! Record the copy.
+ Statements.push_back(Move.getAs<Stmt>());
+ }
+
+ if (!Invalid) {
+ // Add a "return *this;"
+ ExprResult ThisObj =
+ CreateBuiltinUnaryOp(Loc, UO_Deref, This.build(*this, Loc));
+
+ StmtResult Return = BuildReturnStmt(Loc, ThisObj.get());
+ if (Return.isInvalid())
+ Invalid = true;
+ else {
+ Statements.push_back(Return.getAs<Stmt>());
+
+ if (Trap.hasErrorOccurred()) {
+ Diag(CurrentLocation, diag::note_member_synthesized_at)
+ << CXXMoveAssignment << Context.getTagDeclType(ClassDecl);
+ Invalid = true;
+ }
+ }
+ }
+
+ // The exception specification is needed because we are defining the
+ // function.
+ ResolveExceptionSpec(CurrentLocation,
+ MoveAssignOperator->getType()->castAs<FunctionProtoType>());
+
+ if (Invalid) {
+ MoveAssignOperator->setInvalidDecl();
+ return;
+ }
+
+ StmtResult Body;
+ {
+ CompoundScopeRAII CompoundScope(*this);
+ Body = ActOnCompoundStmt(Loc, Loc, Statements,
+ /*isStmtExpr=*/false);
+ assert(!Body.isInvalid() && "Compound statement creation cannot fail");
+ }
+ MoveAssignOperator->setBody(Body.getAs<Stmt>());
+
+ if (ASTMutationListener *L = getASTMutationListener()) {
+ L->CompletedImplicitDefinition(MoveAssignOperator);
+ }
+}
+
+Sema::ImplicitExceptionSpecification
+Sema::ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD) {
+ CXXRecordDecl *ClassDecl = MD->getParent();
+
+ ImplicitExceptionSpecification ExceptSpec(*this);
+ if (ClassDecl->isInvalidDecl())
+ return ExceptSpec;
+
+ const FunctionProtoType *T = MD->getType()->castAs<FunctionProtoType>();
+ assert(T->getNumParams() >= 1 && "not a copy ctor");
+ unsigned Quals = T->getParamType(0).getNonReferenceType().getCVRQualifiers();
+
+ // C++ [except.spec]p14:
+ // An implicitly declared special member function (Clause 12) shall have an
+ // exception-specification. [...]
+ for (const auto &Base : ClassDecl->bases()) {
+ // Virtual bases are handled below.
+ if (Base.isVirtual())
+ continue;
+
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
+ if (CXXConstructorDecl *CopyConstructor =
+ LookupCopyingConstructor(BaseClassDecl, Quals))
+ ExceptSpec.CalledDecl(Base.getLocStart(), CopyConstructor);
+ }
+ for (const auto &Base : ClassDecl->vbases()) {
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
+ if (CXXConstructorDecl *CopyConstructor =
+ LookupCopyingConstructor(BaseClassDecl, Quals))
+ ExceptSpec.CalledDecl(Base.getLocStart(), CopyConstructor);
+ }
+ for (const auto *Field : ClassDecl->fields()) {
+ QualType FieldType = Context.getBaseElementType(Field->getType());
+ if (CXXRecordDecl *FieldClassDecl = FieldType->getAsCXXRecordDecl()) {
+ if (CXXConstructorDecl *CopyConstructor =
+ LookupCopyingConstructor(FieldClassDecl,
+ Quals | FieldType.getCVRQualifiers()))
+ ExceptSpec.CalledDecl(Field->getLocation(), CopyConstructor);
+ }
+ }
+
+ return ExceptSpec;
+}
+
+CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
+ CXXRecordDecl *ClassDecl) {
+ // C++ [class.copy]p4:
+ // If the class definition does not explicitly declare a copy
+ // constructor, one is declared implicitly.
+ assert(ClassDecl->needsImplicitCopyConstructor());
+
+ DeclaringSpecialMember DSM(*this, ClassDecl, CXXCopyConstructor);
+ if (DSM.isAlreadyBeingDeclared())
+ return nullptr;
+
+ QualType ClassType = Context.getTypeDeclType(ClassDecl);
+ QualType ArgType = ClassType;
+ bool Const = ClassDecl->implicitCopyConstructorHasConstParam();
+ if (Const)
+ ArgType = ArgType.withConst();
+ ArgType = Context.getLValueReferenceType(ArgType);
+
+ bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, ClassDecl,
+ CXXCopyConstructor,
+ Const);
+
+ DeclarationName Name
+ = Context.DeclarationNames.getCXXConstructorName(
+ Context.getCanonicalType(ClassType));
+ SourceLocation ClassLoc = ClassDecl->getLocation();
+ DeclarationNameInfo NameInfo(Name, ClassLoc);
+
+ // An implicitly-declared copy constructor is an inline public
+ // member of its class.
+ CXXConstructorDecl *CopyConstructor = CXXConstructorDecl::Create(
+ Context, ClassDecl, ClassLoc, NameInfo, QualType(), /*TInfo=*/nullptr,
+ /*isExplicit=*/false, /*isInline=*/true, /*isImplicitlyDeclared=*/true,
+ Constexpr);
+ CopyConstructor->setAccess(AS_public);
+ CopyConstructor->setDefaulted();
+
+ if (getLangOpts().CUDA) {
+ inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXCopyConstructor,
+ CopyConstructor,
+ /* ConstRHS */ Const,
+ /* Diagnose */ false);
+ }
+
+ // Build an exception specification pointing back at this member.
+ FunctionProtoType::ExtProtoInfo EPI =
+ getImplicitMethodEPI(*this, CopyConstructor);
+ CopyConstructor->setType(
+ Context.getFunctionType(Context.VoidTy, ArgType, EPI));
+
+ // Add the parameter to the constructor.
+ ParmVarDecl *FromParam = ParmVarDecl::Create(Context, CopyConstructor,
+ ClassLoc, ClassLoc,
+ /*IdentifierInfo=*/nullptr,
+ ArgType, /*TInfo=*/nullptr,
+ SC_None, nullptr);
+ CopyConstructor->setParams(FromParam);
+
+ CopyConstructor->setTrivial(
+ ClassDecl->needsOverloadResolutionForCopyConstructor()
+ ? SpecialMemberIsTrivial(CopyConstructor, CXXCopyConstructor)
+ : ClassDecl->hasTrivialCopyConstructor());
+
+ if (ShouldDeleteSpecialMember(CopyConstructor, CXXCopyConstructor))
+ SetDeclDeleted(CopyConstructor, ClassLoc);
+
+ // Note that we have declared this constructor.
+ ++ASTContext::NumImplicitCopyConstructorsDeclared;
+
+ if (Scope *S = getScopeForContext(ClassDecl))
+ PushOnScopeChains(CopyConstructor, S, false);
+ ClassDecl->addDecl(CopyConstructor);
+
+ return CopyConstructor;
+}
+
+void Sema::DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
+ CXXConstructorDecl *CopyConstructor) {
+ assert((CopyConstructor->isDefaulted() &&
+ CopyConstructor->isCopyConstructor() &&
+ !CopyConstructor->doesThisDeclarationHaveABody() &&
+ !CopyConstructor->isDeleted()) &&
+ "DefineImplicitCopyConstructor - call it for implicit copy ctor");
+
+ CXXRecordDecl *ClassDecl = CopyConstructor->getParent();
+ assert(ClassDecl && "DefineImplicitCopyConstructor - invalid constructor");
+
+ // C++11 [class.copy]p7:
+ // The [definition of an implicitly declared copy constructor] is
+ // deprecated if the class has a user-declared copy assignment operator
+ // or a user-declared destructor.
+ if (getLangOpts().CPlusPlus11 && CopyConstructor->isImplicit())
+ diagnoseDeprecatedCopyOperation(*this, CopyConstructor, CurrentLocation);
+
+ SynthesizedFunctionScope Scope(*this, CopyConstructor);
+ DiagnosticErrorTrap Trap(Diags);
+
+ if (SetCtorInitializers(CopyConstructor, /*AnyErrors=*/false) ||
+ Trap.hasErrorOccurred()) {
+ Diag(CurrentLocation, diag::note_member_synthesized_at)
+ << CXXCopyConstructor << Context.getTagDeclType(ClassDecl);
+ CopyConstructor->setInvalidDecl();
+ } else {
+ SourceLocation Loc = CopyConstructor->getLocEnd().isValid()
+ ? CopyConstructor->getLocEnd()
+ : CopyConstructor->getLocation();
+ Sema::CompoundScopeRAII CompoundScope(*this);
+ CopyConstructor->setBody(
+ ActOnCompoundStmt(Loc, Loc, None, /*isStmtExpr=*/false).getAs<Stmt>());
+ }
+
+ // The exception specification is needed because we are defining the
+ // function.
+ ResolveExceptionSpec(CurrentLocation,
+ CopyConstructor->getType()->castAs<FunctionProtoType>());
+
+ CopyConstructor->markUsed(Context);
+ MarkVTableUsed(CurrentLocation, ClassDecl);
+
+ if (ASTMutationListener *L = getASTMutationListener()) {
+ L->CompletedImplicitDefinition(CopyConstructor);
+ }
+}
+
+Sema::ImplicitExceptionSpecification
+Sema::ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD) {
+ CXXRecordDecl *ClassDecl = MD->getParent();
+
+ // C++ [except.spec]p14:
+ // An implicitly declared special member function (Clause 12) shall have an
+ // exception-specification. [...]
+ ImplicitExceptionSpecification ExceptSpec(*this);
+ if (ClassDecl->isInvalidDecl())
+ return ExceptSpec;
+
+ // Direct base-class constructors.
+ for (const auto &B : ClassDecl->bases()) {
+ if (B.isVirtual()) // Handled below.
+ continue;
+
+ if (const RecordType *BaseType = B.getType()->getAs<RecordType>()) {
+ CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
+ CXXConstructorDecl *Constructor =
+ LookupMovingConstructor(BaseClassDecl, 0);
+ // If this is a deleted function, add it anyway. This might be conformant
+ // with the standard. This might not. I'm not sure. It might not matter.
+ if (Constructor)
+ ExceptSpec.CalledDecl(B.getLocStart(), Constructor);
+ }
+ }
+
+ // Virtual base-class constructors.
+ for (const auto &B : ClassDecl->vbases()) {
+ if (const RecordType *BaseType = B.getType()->getAs<RecordType>()) {
+ CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
+ CXXConstructorDecl *Constructor =
+ LookupMovingConstructor(BaseClassDecl, 0);
+ // If this is a deleted function, add it anyway. This might be conformant
+ // with the standard. This might not. I'm not sure. It might not matter.
+ if (Constructor)
+ ExceptSpec.CalledDecl(B.getLocStart(), Constructor);
+ }
+ }
+
+ // Field constructors.
+ for (const auto *F : ClassDecl->fields()) {
+ QualType FieldType = Context.getBaseElementType(F->getType());
+ if (CXXRecordDecl *FieldRecDecl = FieldType->getAsCXXRecordDecl()) {
+ CXXConstructorDecl *Constructor =
+ LookupMovingConstructor(FieldRecDecl, FieldType.getCVRQualifiers());
+ // If this is a deleted function, add it anyway. This might be conformant
+ // with the standard. This might not. I'm not sure. It might not matter.
+ // In particular, the problem is that this function never gets called. It
+ // might just be ill-formed because this function attempts to refer to
+ // a deleted function here.
+ if (Constructor)
+ ExceptSpec.CalledDecl(F->getLocation(), Constructor);
+ }
+ }
+
+ return ExceptSpec;
+}
+
+CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor(
+ CXXRecordDecl *ClassDecl) {
+ assert(ClassDecl->needsImplicitMoveConstructor());
+
+ DeclaringSpecialMember DSM(*this, ClassDecl, CXXMoveConstructor);
+ if (DSM.isAlreadyBeingDeclared())
+ return nullptr;
+
+ QualType ClassType = Context.getTypeDeclType(ClassDecl);
+ QualType ArgType = Context.getRValueReferenceType(ClassType);
+
+ bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, ClassDecl,
+ CXXMoveConstructor,
+ false);
+
+ DeclarationName Name
+ = Context.DeclarationNames.getCXXConstructorName(
+ Context.getCanonicalType(ClassType));
+ SourceLocation ClassLoc = ClassDecl->getLocation();
+ DeclarationNameInfo NameInfo(Name, ClassLoc);
+
+ // C++11 [class.copy]p11:
+ // An implicitly-declared copy/move constructor is an inline public
+ // member of its class.
+ CXXConstructorDecl *MoveConstructor = CXXConstructorDecl::Create(
+ Context, ClassDecl, ClassLoc, NameInfo, QualType(), /*TInfo=*/nullptr,
+ /*isExplicit=*/false, /*isInline=*/true, /*isImplicitlyDeclared=*/true,
+ Constexpr);
+ MoveConstructor->setAccess(AS_public);
+ MoveConstructor->setDefaulted();
+
+ if (getLangOpts().CUDA) {
+ inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXMoveConstructor,
+ MoveConstructor,
+ /* ConstRHS */ false,
+ /* Diagnose */ false);
+ }
+
+ // Build an exception specification pointing back at this member.
+ FunctionProtoType::ExtProtoInfo EPI =
+ getImplicitMethodEPI(*this, MoveConstructor);
+ MoveConstructor->setType(
+ Context.getFunctionType(Context.VoidTy, ArgType, EPI));
+
+ // Add the parameter to the constructor.
+ ParmVarDecl *FromParam = ParmVarDecl::Create(Context, MoveConstructor,
+ ClassLoc, ClassLoc,
+ /*IdentifierInfo=*/nullptr,
+ ArgType, /*TInfo=*/nullptr,
+ SC_None, nullptr);
+ MoveConstructor->setParams(FromParam);
+
+ MoveConstructor->setTrivial(
+ ClassDecl->needsOverloadResolutionForMoveConstructor()
+ ? SpecialMemberIsTrivial(MoveConstructor, CXXMoveConstructor)
+ : ClassDecl->hasTrivialMoveConstructor());
+
+ if (ShouldDeleteSpecialMember(MoveConstructor, CXXMoveConstructor)) {
+ ClassDecl->setImplicitMoveConstructorIsDeleted();
+ SetDeclDeleted(MoveConstructor, ClassLoc);
+ }
+
+ // Note that we have declared this constructor.
+ ++ASTContext::NumImplicitMoveConstructorsDeclared;
+
+ if (Scope *S = getScopeForContext(ClassDecl))
+ PushOnScopeChains(MoveConstructor, S, false);
+ ClassDecl->addDecl(MoveConstructor);
+
+ return MoveConstructor;
+}
+
+void Sema::DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
+ CXXConstructorDecl *MoveConstructor) {
+ assert((MoveConstructor->isDefaulted() &&
+ MoveConstructor->isMoveConstructor() &&
+ !MoveConstructor->doesThisDeclarationHaveABody() &&
+ !MoveConstructor->isDeleted()) &&
+ "DefineImplicitMoveConstructor - call it for implicit move ctor");
+
+ CXXRecordDecl *ClassDecl = MoveConstructor->getParent();
+ assert(ClassDecl && "DefineImplicitMoveConstructor - invalid constructor");
+
+ SynthesizedFunctionScope Scope(*this, MoveConstructor);
+ DiagnosticErrorTrap Trap(Diags);
+
+ if (SetCtorInitializers(MoveConstructor, /*AnyErrors=*/false) ||
+ Trap.hasErrorOccurred()) {
+ Diag(CurrentLocation, diag::note_member_synthesized_at)
+ << CXXMoveConstructor << Context.getTagDeclType(ClassDecl);
+ MoveConstructor->setInvalidDecl();
+ } else {
+ SourceLocation Loc = MoveConstructor->getLocEnd().isValid()
+ ? MoveConstructor->getLocEnd()
+ : MoveConstructor->getLocation();
+ Sema::CompoundScopeRAII CompoundScope(*this);
+ MoveConstructor->setBody(ActOnCompoundStmt(
+ Loc, Loc, None, /*isStmtExpr=*/ false).getAs<Stmt>());
+ }
+
+ // The exception specification is needed because we are defining the
+ // function.
+ ResolveExceptionSpec(CurrentLocation,
+ MoveConstructor->getType()->castAs<FunctionProtoType>());
+
+ MoveConstructor->markUsed(Context);
+ MarkVTableUsed(CurrentLocation, ClassDecl);
+
+ if (ASTMutationListener *L = getASTMutationListener()) {
+ L->CompletedImplicitDefinition(MoveConstructor);
+ }
+}
+
+bool Sema::isImplicitlyDeleted(FunctionDecl *FD) {
+ return FD->isDeleted() && FD->isDefaulted() && isa<CXXMethodDecl>(FD);
+}
+
+void Sema::DefineImplicitLambdaToFunctionPointerConversion(
+ SourceLocation CurrentLocation,
+ CXXConversionDecl *Conv) {
+ CXXRecordDecl *Lambda = Conv->getParent();
+ CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator();
+ // If we are defining a specialization of a conversion to function-ptr
+ // cache the deduced template arguments for this specialization
+ // so that we can use them to retrieve the corresponding call-operator
+ // and static-invoker.
+ const TemplateArgumentList *DeducedTemplateArgs = nullptr;
+
+ // Retrieve the corresponding call-operator specialization.
+ if (Lambda->isGenericLambda()) {
+ assert(Conv->isFunctionTemplateSpecialization());
+ FunctionTemplateDecl *CallOpTemplate =
+ CallOp->getDescribedFunctionTemplate();
+ DeducedTemplateArgs = Conv->getTemplateSpecializationArgs();
+ void *InsertPos = nullptr;
+ FunctionDecl *CallOpSpec = CallOpTemplate->findSpecialization(
+ DeducedTemplateArgs->asArray(),
+ InsertPos);
+ assert(CallOpSpec &&
+ "Conversion operator must have a corresponding call operator");
+ CallOp = cast<CXXMethodDecl>(CallOpSpec);
+ }
+ // Mark the call operator referenced (and add to pending instantiations
+ // if necessary).
+ // For both the conversion and static-invoker template specializations
+ // we construct their body's in this function, so no need to add them
+ // to the PendingInstantiations.
+ MarkFunctionReferenced(CurrentLocation, CallOp);
+
+ SynthesizedFunctionScope Scope(*this, Conv);
+ DiagnosticErrorTrap Trap(Diags);
+
+ // Retrieve the static invoker...
+ CXXMethodDecl *Invoker = Lambda->getLambdaStaticInvoker();
+ // ... and get the corresponding specialization for a generic lambda.
+ if (Lambda->isGenericLambda()) {
+ assert(DeducedTemplateArgs &&
+ "Must have deduced template arguments from Conversion Operator");
+ FunctionTemplateDecl *InvokeTemplate =
+ Invoker->getDescribedFunctionTemplate();
+ void *InsertPos = nullptr;
+ FunctionDecl *InvokeSpec = InvokeTemplate->findSpecialization(
+ DeducedTemplateArgs->asArray(),
+ InsertPos);
+ assert(InvokeSpec &&
+ "Must have a corresponding static invoker specialization");
+ Invoker = cast<CXXMethodDecl>(InvokeSpec);
+ }
+ // Construct the body of the conversion function { return __invoke; }.
+ Expr *FunctionRef = BuildDeclRefExpr(Invoker, Invoker->getType(),
+ VK_LValue, Conv->getLocation()).get();
+ assert(FunctionRef && "Can't refer to __invoke function?");
+ Stmt *Return = BuildReturnStmt(Conv->getLocation(), FunctionRef).get();
+ Conv->setBody(new (Context) CompoundStmt(Context, Return,
+ Conv->getLocation(),
+ Conv->getLocation()));
+
+ Conv->markUsed(Context);
+ Conv->setReferenced();
+
+ // Fill in the __invoke function with a dummy implementation. IR generation
+ // will fill in the actual details.
+ Invoker->markUsed(Context);
+ Invoker->setReferenced();
+ Invoker->setBody(new (Context) CompoundStmt(Conv->getLocation()));
+
+ if (ASTMutationListener *L = getASTMutationListener()) {
+ L->CompletedImplicitDefinition(Conv);
+ L->CompletedImplicitDefinition(Invoker);
+ }
+}
+
+
+
+void Sema::DefineImplicitLambdaToBlockPointerConversion(
+ SourceLocation CurrentLocation,
+ CXXConversionDecl *Conv)
+{
+ assert(!Conv->getParent()->isGenericLambda());
+
+ Conv->markUsed(Context);
+
+ SynthesizedFunctionScope Scope(*this, Conv);
+ DiagnosticErrorTrap Trap(Diags);
+
+ // Copy-initialize the lambda object as needed to capture it.
+ Expr *This = ActOnCXXThis(CurrentLocation).get();
+ Expr *DerefThis =CreateBuiltinUnaryOp(CurrentLocation, UO_Deref, This).get();
+
+ ExprResult BuildBlock = BuildBlockForLambdaConversion(CurrentLocation,
+ Conv->getLocation(),
+ Conv, DerefThis);
+
+ // If we're not under ARC, make sure we still get the _Block_copy/autorelease
+ // behavior. Note that only the general conversion function does this
+ // (since it's unusable otherwise); in the case where we inline the
+ // block literal, it has block literal lifetime semantics.
+ if (!BuildBlock.isInvalid() && !getLangOpts().ObjCAutoRefCount)
+ BuildBlock = ImplicitCastExpr::Create(Context, BuildBlock.get()->getType(),
+ CK_CopyAndAutoreleaseBlockObject,
+ BuildBlock.get(), nullptr, VK_RValue);
+
+ if (BuildBlock.isInvalid()) {
+ Diag(CurrentLocation, diag::note_lambda_to_block_conv);
+ Conv->setInvalidDecl();
+ return;
+ }
+
+ // Create the return statement that returns the block from the conversion
+ // function.
+ StmtResult Return = BuildReturnStmt(Conv->getLocation(), BuildBlock.get());
+ if (Return.isInvalid()) {
+ Diag(CurrentLocation, diag::note_lambda_to_block_conv);
+ Conv->setInvalidDecl();
+ return;
+ }
+
+ // Set the body of the conversion function.
+ Stmt *ReturnS = Return.get();
+ Conv->setBody(new (Context) CompoundStmt(Context, ReturnS,
+ Conv->getLocation(),
+ Conv->getLocation()));
+
+ // We're done; notify the mutation listener, if any.
+ if (ASTMutationListener *L = getASTMutationListener()) {
+ L->CompletedImplicitDefinition(Conv);
+ }
+}
+
+/// \brief Determine whether the given list arguments contains exactly one
+/// "real" (non-default) argument.
+static bool hasOneRealArgument(MultiExprArg Args) {
+ switch (Args.size()) {
+ case 0:
+ return false;
+
+ default:
+ if (!Args[1]->isDefaultArgument())
+ return false;
+
+ // fall through
+ case 1:
+ return !Args[0]->isDefaultArgument();
+ }
+
+ return false;
+}
+
+ExprResult
+Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
+ CXXConstructorDecl *Constructor,
+ MultiExprArg ExprArgs,
+ bool HadMultipleCandidates,
+ bool IsListInitialization,
+ bool IsStdInitListInitialization,
+ bool RequiresZeroInit,
+ unsigned ConstructKind,
+ SourceRange ParenRange) {
+ bool Elidable = false;
+
+ // C++0x [class.copy]p34:
+ // When certain criteria are met, an implementation is allowed to
+ // omit the copy/move construction of a class object, even if the
+ // copy/move constructor and/or destructor for the object have
+ // side effects. [...]
+ // - when a temporary class object that has not been bound to a
+ // reference (12.2) would be copied/moved to a class object
+ // with the same cv-unqualified type, the copy/move operation
+ // can be omitted by constructing the temporary object
+ // directly into the target of the omitted copy/move
+ if (ConstructKind == CXXConstructExpr::CK_Complete &&
+ Constructor->isCopyOrMoveConstructor() && hasOneRealArgument(ExprArgs)) {
+ Expr *SubExpr = ExprArgs[0];
+ Elidable = SubExpr->isTemporaryObject(Context, Constructor->getParent());
+ }
+
+ return BuildCXXConstructExpr(ConstructLoc, DeclInitType, Constructor,
+ Elidable, ExprArgs, HadMultipleCandidates,
+ IsListInitialization,
+ IsStdInitListInitialization, RequiresZeroInit,
+ ConstructKind, ParenRange);
+}
+
+/// BuildCXXConstructExpr - Creates a complete call to a constructor,
+/// including handling of its default argument expressions.
+ExprResult
+Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
+ CXXConstructorDecl *Constructor, bool Elidable,
+ MultiExprArg ExprArgs,
+ bool HadMultipleCandidates,
+ bool IsListInitialization,
+ bool IsStdInitListInitialization,
+ bool RequiresZeroInit,
+ unsigned ConstructKind,
+ SourceRange ParenRange) {
+ MarkFunctionReferenced(ConstructLoc, Constructor);
+ return CXXConstructExpr::Create(
+ Context, DeclInitType, ConstructLoc, Constructor, Elidable, ExprArgs,
+ HadMultipleCandidates, IsListInitialization, IsStdInitListInitialization,
+ RequiresZeroInit,
+ static_cast<CXXConstructExpr::ConstructionKind>(ConstructKind),
+ ParenRange);
+}
+
+ExprResult Sema::BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field) {
+ assert(Field->hasInClassInitializer());
+
+ // If we already have the in-class initializer nothing needs to be done.
+ if (Field->getInClassInitializer())
+ return CXXDefaultInitExpr::Create(Context, Loc, Field);
+
+ // Maybe we haven't instantiated the in-class initializer. Go check the
+ // pattern FieldDecl to see if it has one.
+ CXXRecordDecl *ParentRD = cast<CXXRecordDecl>(Field->getParent());
+
+ if (isTemplateInstantiation(ParentRD->getTemplateSpecializationKind())) {
+ CXXRecordDecl *ClassPattern = ParentRD->getTemplateInstantiationPattern();
+ DeclContext::lookup_result Lookup =
+ ClassPattern->lookup(Field->getDeclName());
+ assert(Lookup.size() == 1);
+ FieldDecl *Pattern = cast<FieldDecl>(Lookup[0]);
+ if (InstantiateInClassInitializer(Loc, Field, Pattern,
+ getTemplateInstantiationArgs(Field)))
+ return ExprError();
+ return CXXDefaultInitExpr::Create(Context, Loc, Field);
+ }
+
+ // DR1351:
+ // If the brace-or-equal-initializer of a non-static data member
+ // invokes a defaulted default constructor of its class or of an
+ // enclosing class in a potentially evaluated subexpression, the
+ // program is ill-formed.
+ //
+ // This resolution is unworkable: the exception specification of the
+ // default constructor can be needed in an unevaluated context, in
+ // particular, in the operand of a noexcept-expression, and we can be
+ // unable to compute an exception specification for an enclosed class.
+ //
+ // Any attempt to resolve the exception specification of a defaulted default
+ // constructor before the initializer is lexically complete will ultimately
+ // come here at which point we can diagnose it.
+ RecordDecl *OutermostClass = ParentRD->getOuterLexicalRecordContext();
+ if (OutermostClass == ParentRD) {
+ Diag(Field->getLocEnd(), diag::err_in_class_initializer_not_yet_parsed)
+ << ParentRD << Field;
+ } else {
+ Diag(Field->getLocEnd(),
+ diag::err_in_class_initializer_not_yet_parsed_outer_class)
+ << ParentRD << OutermostClass << Field;
+ }
+
+ return ExprError();
+}
+
+void Sema::FinalizeVarWithDestructor(VarDecl *VD, const RecordType *Record) {
+ if (VD->isInvalidDecl()) return;
+
+ CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(Record->getDecl());
+ if (ClassDecl->isInvalidDecl()) return;
+ if (ClassDecl->hasIrrelevantDestructor()) return;
+ if (ClassDecl->isDependentContext()) return;
+
+ CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl);
+ MarkFunctionReferenced(VD->getLocation(), Destructor);
+ CheckDestructorAccess(VD->getLocation(), Destructor,
+ PDiag(diag::err_access_dtor_var)
+ << VD->getDeclName()
+ << VD->getType());
+ DiagnoseUseOfDecl(Destructor, VD->getLocation());
+
+ if (Destructor->isTrivial()) return;
+ if (!VD->hasGlobalStorage()) return;
+
+ // Emit warning for non-trivial dtor in global scope (a real global,
+ // class-static, function-static).
+ Diag(VD->getLocation(), diag::warn_exit_time_destructor);
+
+ // TODO: this should be re-enabled for static locals by !CXAAtExit
+ if (!VD->isStaticLocal())
+ Diag(VD->getLocation(), diag::warn_global_destructor);
+}
+
+/// \brief Given a constructor and the set of arguments provided for the
+/// constructor, convert the arguments and add any required default arguments
+/// to form a proper call to this constructor.
+///
+/// \returns true if an error occurred, false otherwise.
+bool
+Sema::CompleteConstructorCall(CXXConstructorDecl *Constructor,
+ MultiExprArg ArgsPtr,
+ SourceLocation Loc,
+ SmallVectorImpl<Expr*> &ConvertedArgs,
+ bool AllowExplicit,
+ bool IsListInitialization) {
+ // FIXME: This duplicates a lot of code from Sema::ConvertArgumentsForCall.
+ unsigned NumArgs = ArgsPtr.size();
+ Expr **Args = ArgsPtr.data();
+
+ const FunctionProtoType *Proto
+ = Constructor->getType()->getAs<FunctionProtoType>();
+ assert(Proto && "Constructor without a prototype?");
+ unsigned NumParams = Proto->getNumParams();
+
+ // If too few arguments are available, we'll fill in the rest with defaults.
+ if (NumArgs < NumParams)
+ ConvertedArgs.reserve(NumParams);
+ else
+ ConvertedArgs.reserve(NumArgs);
+
+ VariadicCallType CallType =
+ Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply;
+ SmallVector<Expr *, 8> AllArgs;
+ bool Invalid = GatherArgumentsForCall(Loc, Constructor,
+ Proto, 0,
+ llvm::makeArrayRef(Args, NumArgs),
+ AllArgs,
+ CallType, AllowExplicit,
+ IsListInitialization);
+ ConvertedArgs.append(AllArgs.begin(), AllArgs.end());
+
+ DiagnoseSentinelCalls(Constructor, Loc, AllArgs);
+
+ CheckConstructorCall(Constructor,
+ llvm::makeArrayRef(AllArgs.data(), AllArgs.size()),
+ Proto, Loc);
+
+ return Invalid;
+}
+
+static inline bool
+CheckOperatorNewDeleteDeclarationScope(Sema &SemaRef,
+ const FunctionDecl *FnDecl) {
+ const DeclContext *DC = FnDecl->getDeclContext()->getRedeclContext();
+ if (isa<NamespaceDecl>(DC)) {
+ return SemaRef.Diag(FnDecl->getLocation(),
+ diag::err_operator_new_delete_declared_in_namespace)
+ << FnDecl->getDeclName();
+ }
+
+ if (isa<TranslationUnitDecl>(DC) &&
+ FnDecl->getStorageClass() == SC_Static) {
+ return SemaRef.Diag(FnDecl->getLocation(),
+ diag::err_operator_new_delete_declared_static)
+ << FnDecl->getDeclName();
+ }
+
+ return false;
+}
+
+static inline bool
+CheckOperatorNewDeleteTypes(Sema &SemaRef, const FunctionDecl *FnDecl,
+ CanQualType ExpectedResultType,
+ CanQualType ExpectedFirstParamType,
+ unsigned DependentParamTypeDiag,
+ unsigned InvalidParamTypeDiag) {
+ QualType ResultType =
+ FnDecl->getType()->getAs<FunctionType>()->getReturnType();
+
+ // Check that the result type is not dependent.
+ if (ResultType->isDependentType())
+ return SemaRef.Diag(FnDecl->getLocation(),
+ diag::err_operator_new_delete_dependent_result_type)
+ << FnDecl->getDeclName() << ExpectedResultType;
+
+ // Check that the result type is what we expect.
+ if (SemaRef.Context.getCanonicalType(ResultType) != ExpectedResultType)
+ return SemaRef.Diag(FnDecl->getLocation(),
+ diag::err_operator_new_delete_invalid_result_type)
+ << FnDecl->getDeclName() << ExpectedResultType;
+
+ // A function template must have at least 2 parameters.
+ if (FnDecl->getDescribedFunctionTemplate() && FnDecl->getNumParams() < 2)
+ return SemaRef.Diag(FnDecl->getLocation(),
+ diag::err_operator_new_delete_template_too_few_parameters)
+ << FnDecl->getDeclName();
+
+ // The function decl must have at least 1 parameter.
+ if (FnDecl->getNumParams() == 0)
+ return SemaRef.Diag(FnDecl->getLocation(),
+ diag::err_operator_new_delete_too_few_parameters)
+ << FnDecl->getDeclName();
+
+ // Check the first parameter type is not dependent.
+ QualType FirstParamType = FnDecl->getParamDecl(0)->getType();
+ if (FirstParamType->isDependentType())
+ return SemaRef.Diag(FnDecl->getLocation(), DependentParamTypeDiag)
+ << FnDecl->getDeclName() << ExpectedFirstParamType;
+
+ // Check that the first parameter type is what we expect.
+ if (SemaRef.Context.getCanonicalType(FirstParamType).getUnqualifiedType() !=
+ ExpectedFirstParamType)
+ return SemaRef.Diag(FnDecl->getLocation(), InvalidParamTypeDiag)
+ << FnDecl->getDeclName() << ExpectedFirstParamType;
+
+ return false;
+}
+
+static bool
+CheckOperatorNewDeclaration(Sema &SemaRef, const FunctionDecl *FnDecl) {
+ // C++ [basic.stc.dynamic.allocation]p1:
+ // A program is ill-formed if an allocation function is declared in a
+ // namespace scope other than global scope or declared static in global
+ // scope.
+ if (CheckOperatorNewDeleteDeclarationScope(SemaRef, FnDecl))
+ return true;
+
+ CanQualType SizeTy =
+ SemaRef.Context.getCanonicalType(SemaRef.Context.getSizeType());
+
+ // C++ [basic.stc.dynamic.allocation]p1:
+ // The return type shall be void*. The first parameter shall have type
+ // std::size_t.
+ if (CheckOperatorNewDeleteTypes(SemaRef, FnDecl, SemaRef.Context.VoidPtrTy,
+ SizeTy,
+ diag::err_operator_new_dependent_param_type,
+ diag::err_operator_new_param_type))
+ return true;
+
+ // C++ [basic.stc.dynamic.allocation]p1:
+ // The first parameter shall not have an associated default argument.
+ if (FnDecl->getParamDecl(0)->hasDefaultArg())
+ return SemaRef.Diag(FnDecl->getLocation(),
+ diag::err_operator_new_default_arg)
+ << FnDecl->getDeclName() << FnDecl->getParamDecl(0)->getDefaultArgRange();
+
+ return false;
+}
+
+static bool
+CheckOperatorDeleteDeclaration(Sema &SemaRef, FunctionDecl *FnDecl) {
+ // C++ [basic.stc.dynamic.deallocation]p1:
+ // A program is ill-formed if deallocation functions are declared in a
+ // namespace scope other than global scope or declared static in global
+ // scope.
+ if (CheckOperatorNewDeleteDeclarationScope(SemaRef, FnDecl))
+ return true;
+
+ // C++ [basic.stc.dynamic.deallocation]p2:
+ // Each deallocation function shall return void and its first parameter
+ // shall be void*.
+ if (CheckOperatorNewDeleteTypes(SemaRef, FnDecl, SemaRef.Context.VoidTy,
+ SemaRef.Context.VoidPtrTy,
+ diag::err_operator_delete_dependent_param_type,
+ diag::err_operator_delete_param_type))
+ return true;
+
+ return false;
+}
+
+/// CheckOverloadedOperatorDeclaration - Check whether the declaration
+/// of this overloaded operator is well-formed. If so, returns false;
+/// otherwise, emits appropriate diagnostics and returns true.
+bool Sema::CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl) {
+ assert(FnDecl && FnDecl->isOverloadedOperator() &&
+ "Expected an overloaded operator declaration");
+
+ OverloadedOperatorKind Op = FnDecl->getOverloadedOperator();
+
+ // C++ [over.oper]p5:
+ // The allocation and deallocation functions, operator new,
+ // operator new[], operator delete and operator delete[], are
+ // described completely in 3.7.3. The attributes and restrictions
+ // found in the rest of this subclause do not apply to them unless
+ // explicitly stated in 3.7.3.
+ if (Op == OO_Delete || Op == OO_Array_Delete)
+ return CheckOperatorDeleteDeclaration(*this, FnDecl);
+
+ if (Op == OO_New || Op == OO_Array_New)
+ return CheckOperatorNewDeclaration(*this, FnDecl);
+
+ // C++ [over.oper]p6:
+ // An operator function shall either be a non-static member
+ // function or be a non-member function and have at least one
+ // parameter whose type is a class, a reference to a class, an
+ // enumeration, or a reference to an enumeration.
+ if (CXXMethodDecl *MethodDecl = dyn_cast<CXXMethodDecl>(FnDecl)) {
+ if (MethodDecl->isStatic())
+ return Diag(FnDecl->getLocation(),
+ diag::err_operator_overload_static) << FnDecl->getDeclName();
+ } else {
+ bool ClassOrEnumParam = false;
+ for (auto Param : FnDecl->params()) {
+ QualType ParamType = Param->getType().getNonReferenceType();
+ if (ParamType->isDependentType() || ParamType->isRecordType() ||
+ ParamType->isEnumeralType()) {
+ ClassOrEnumParam = true;
+ break;
+ }
+ }
+
+ if (!ClassOrEnumParam)
+ return Diag(FnDecl->getLocation(),
+ diag::err_operator_overload_needs_class_or_enum)
+ << FnDecl->getDeclName();
+ }
+
+ // C++ [over.oper]p8:
+ // An operator function cannot have default arguments (8.3.6),
+ // except where explicitly stated below.
+ //
+ // Only the function-call operator allows default arguments
+ // (C++ [over.call]p1).
+ if (Op != OO_Call) {
+ for (auto Param : FnDecl->params()) {
+ if (Param->hasDefaultArg())
+ return Diag(Param->getLocation(),
+ diag::err_operator_overload_default_arg)
+ << FnDecl->getDeclName() << Param->getDefaultArgRange();
+ }
+ }
+
+ static const bool OperatorUses[NUM_OVERLOADED_OPERATORS][3] = {
+ { false, false, false }
+#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
+ , { Unary, Binary, MemberOnly }
+#include "clang/Basic/OperatorKinds.def"
+ };
+
+ bool CanBeUnaryOperator = OperatorUses[Op][0];
+ bool CanBeBinaryOperator = OperatorUses[Op][1];
+ bool MustBeMemberOperator = OperatorUses[Op][2];
+
+ // C++ [over.oper]p8:
+ // [...] Operator functions cannot have more or fewer parameters
+ // than the number required for the corresponding operator, as
+ // described in the rest of this subclause.
+ unsigned NumParams = FnDecl->getNumParams()
+ + (isa<CXXMethodDecl>(FnDecl)? 1 : 0);
+ if (Op != OO_Call &&
+ ((NumParams == 1 && !CanBeUnaryOperator) ||
+ (NumParams == 2 && !CanBeBinaryOperator) ||
+ (NumParams < 1) || (NumParams > 2))) {
+ // We have the wrong number of parameters.
+ unsigned ErrorKind;
+ if (CanBeUnaryOperator && CanBeBinaryOperator) {
+ ErrorKind = 2; // 2 -> unary or binary.
+ } else if (CanBeUnaryOperator) {
+ ErrorKind = 0; // 0 -> unary
+ } else {
+ assert(CanBeBinaryOperator &&
+ "All non-call overloaded operators are unary or binary!");
+ ErrorKind = 1; // 1 -> binary
+ }
+
+ return Diag(FnDecl->getLocation(), diag::err_operator_overload_must_be)
+ << FnDecl->getDeclName() << NumParams << ErrorKind;
+ }
+
+ // Overloaded operators other than operator() cannot be variadic.
+ if (Op != OO_Call &&
+ FnDecl->getType()->getAs<FunctionProtoType>()->isVariadic()) {
+ return Diag(FnDecl->getLocation(), diag::err_operator_overload_variadic)
+ << FnDecl->getDeclName();
+ }
+
+ // Some operators must be non-static member functions.
+ if (MustBeMemberOperator && !isa<CXXMethodDecl>(FnDecl)) {
+ return Diag(FnDecl->getLocation(),
+ diag::err_operator_overload_must_be_member)
+ << FnDecl->getDeclName();
+ }
+
+ // C++ [over.inc]p1:
+ // The user-defined function called operator++ implements the
+ // prefix and postfix ++ operator. If this function is a member
+ // function with no parameters, or a non-member function with one
+ // parameter of class or enumeration type, it defines the prefix
+ // increment operator ++ for objects of that type. If the function
+ // is a member function with one parameter (which shall be of type
+ // int) or a non-member function with two parameters (the second
+ // of which shall be of type int), it defines the postfix
+ // increment operator ++ for objects of that type.
+ if ((Op == OO_PlusPlus || Op == OO_MinusMinus) && NumParams == 2) {
+ ParmVarDecl *LastParam = FnDecl->getParamDecl(FnDecl->getNumParams() - 1);
+ QualType ParamType = LastParam->getType();
+
+ if (!ParamType->isSpecificBuiltinType(BuiltinType::Int) &&
+ !ParamType->isDependentType())
+ return Diag(LastParam->getLocation(),
+ diag::err_operator_overload_post_incdec_must_be_int)
+ << LastParam->getType() << (Op == OO_MinusMinus);
+ }
+
+ return false;
+}
+
+/// CheckLiteralOperatorDeclaration - Check whether the declaration
+/// of this literal operator function is well-formed. If so, returns
+/// false; otherwise, emits appropriate diagnostics and returns true.
+bool Sema::CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl) {
+ if (isa<CXXMethodDecl>(FnDecl)) {
+ Diag(FnDecl->getLocation(), diag::err_literal_operator_outside_namespace)
+ << FnDecl->getDeclName();
+ return true;
+ }
+
+ if (FnDecl->isExternC()) {
+ Diag(FnDecl->getLocation(), diag::err_literal_operator_extern_c);
+ return true;
+ }
+
+ bool Valid = false;
+
+ // This might be the definition of a literal operator template.
+ FunctionTemplateDecl *TpDecl = FnDecl->getDescribedFunctionTemplate();
+ // This might be a specialization of a literal operator template.
+ if (!TpDecl)
+ TpDecl = FnDecl->getPrimaryTemplate();
+
+ // template <char...> type operator "" name() and
+ // template <class T, T...> type operator "" name() are the only valid
+ // template signatures, and the only valid signatures with no parameters.
+ if (TpDecl) {
+ if (FnDecl->param_size() == 0) {
+ // Must have one or two template parameters
+ TemplateParameterList *Params = TpDecl->getTemplateParameters();
+ if (Params->size() == 1) {
+ NonTypeTemplateParmDecl *PmDecl =
+ dyn_cast<NonTypeTemplateParmDecl>(Params->getParam(0));
+
+ // The template parameter must be a char parameter pack.
+ if (PmDecl && PmDecl->isTemplateParameterPack() &&
+ Context.hasSameType(PmDecl->getType(), Context.CharTy))
+ Valid = true;
+ } else if (Params->size() == 2) {
+ TemplateTypeParmDecl *PmType =
+ dyn_cast<TemplateTypeParmDecl>(Params->getParam(0));
+ NonTypeTemplateParmDecl *PmArgs =
+ dyn_cast<NonTypeTemplateParmDecl>(Params->getParam(1));
+
+ // The second template parameter must be a parameter pack with the
+ // first template parameter as its type.
+ if (PmType && PmArgs &&
+ !PmType->isTemplateParameterPack() &&
+ PmArgs->isTemplateParameterPack()) {
+ const TemplateTypeParmType *TArgs =
+ PmArgs->getType()->getAs<TemplateTypeParmType>();
+ if (TArgs && TArgs->getDepth() == PmType->getDepth() &&
+ TArgs->getIndex() == PmType->getIndex()) {
+ Valid = true;
+ if (ActiveTemplateInstantiations.empty())
+ Diag(FnDecl->getLocation(),
+ diag::ext_string_literal_operator_template);
+ }
+ }
+ }
+ }
+ } else if (FnDecl->param_size()) {
+ // Check the first parameter
+ FunctionDecl::param_iterator Param = FnDecl->param_begin();
+
+ QualType T = (*Param)->getType().getUnqualifiedType();
+
+ // unsigned long long int, long double, and any character type are allowed
+ // as the only parameters.
+ if (Context.hasSameType(T, Context.UnsignedLongLongTy) ||
+ Context.hasSameType(T, Context.LongDoubleTy) ||
+ Context.hasSameType(T, Context.CharTy) ||
+ Context.hasSameType(T, Context.WideCharTy) ||
+ Context.hasSameType(T, Context.Char16Ty) ||
+ Context.hasSameType(T, Context.Char32Ty)) {
+ if (++Param == FnDecl->param_end())
+ Valid = true;
+ goto FinishedParams;
+ }
+
+ // Otherwise it must be a pointer to const; let's strip those qualifiers.
+ const PointerType *PT = T->getAs<PointerType>();
+ if (!PT)
+ goto FinishedParams;
+ T = PT->getPointeeType();
+ if (!T.isConstQualified() || T.isVolatileQualified())
+ goto FinishedParams;
+ T = T.getUnqualifiedType();
+
+ // Move on to the second parameter;
+ ++Param;
+
+ // If there is no second parameter, the first must be a const char *
+ if (Param == FnDecl->param_end()) {
+ if (Context.hasSameType(T, Context.CharTy))
+ Valid = true;
+ goto FinishedParams;
+ }
+
+ // const char *, const wchar_t*, const char16_t*, and const char32_t*
+ // are allowed as the first parameter to a two-parameter function
+ if (!(Context.hasSameType(T, Context.CharTy) ||
+ Context.hasSameType(T, Context.WideCharTy) ||
+ Context.hasSameType(T, Context.Char16Ty) ||
+ Context.hasSameType(T, Context.Char32Ty)))
+ goto FinishedParams;
+
+ // The second and final parameter must be an std::size_t
+ T = (*Param)->getType().getUnqualifiedType();
+ if (Context.hasSameType(T, Context.getSizeType()) &&
+ ++Param == FnDecl->param_end())
+ Valid = true;
+ }
+
+ // FIXME: This diagnostic is absolutely terrible.
+FinishedParams:
+ if (!Valid) {
+ Diag(FnDecl->getLocation(), diag::err_literal_operator_params)
+ << FnDecl->getDeclName();
+ return true;
+ }
+
+ // A parameter-declaration-clause containing a default argument is not
+ // equivalent to any of the permitted forms.
+ for (auto Param : FnDecl->params()) {
+ if (Param->hasDefaultArg()) {
+ Diag(Param->getDefaultArgRange().getBegin(),
+ diag::err_literal_operator_default_argument)
+ << Param->getDefaultArgRange();
+ break;
+ }
+ }
+
+ StringRef LiteralName
+ = FnDecl->getDeclName().getCXXLiteralIdentifier()->getName();
+ if (LiteralName[0] != '_') {
+ // C++11 [usrlit.suffix]p1:
+ // Literal suffix identifiers that do not start with an underscore
+ // are reserved for future standardization.
+ Diag(FnDecl->getLocation(), diag::warn_user_literal_reserved)
+ << NumericLiteralParser::isValidUDSuffix(getLangOpts(), LiteralName);
+ }
+
+ return false;
+}
+
+/// ActOnStartLinkageSpecification - Parsed the beginning of a C++
+/// linkage specification, including the language and (if present)
+/// the '{'. ExternLoc is the location of the 'extern', Lang is the
+/// language string literal. LBraceLoc, if valid, provides the location of
+/// the '{' brace. Otherwise, this linkage specification does not
+/// have any braces.
+Decl *Sema::ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc,
+ Expr *LangStr,
+ SourceLocation LBraceLoc) {
+ StringLiteral *Lit = cast<StringLiteral>(LangStr);
+ if (!Lit->isAscii()) {
+ Diag(LangStr->getExprLoc(), diag::err_language_linkage_spec_not_ascii)
+ << LangStr->getSourceRange();
+ return nullptr;
+ }
+
+ StringRef Lang = Lit->getString();
+ LinkageSpecDecl::LanguageIDs Language;
+ if (Lang == "C")
+ Language = LinkageSpecDecl::lang_c;
+ else if (Lang == "C++")
+ Language = LinkageSpecDecl::lang_cxx;
+ else {
+ Diag(LangStr->getExprLoc(), diag::err_language_linkage_spec_unknown)
+ << LangStr->getSourceRange();
+ return nullptr;
+ }
+
+ // FIXME: Add all the various semantics of linkage specifications
+
+ LinkageSpecDecl *D = LinkageSpecDecl::Create(Context, CurContext, ExternLoc,
+ LangStr->getExprLoc(), Language,
+ LBraceLoc.isValid());
+ CurContext->addDecl(D);
+ PushDeclContext(S, D);
+ return D;
+}
+
+/// ActOnFinishLinkageSpecification - Complete the definition of
+/// the C++ linkage specification LinkageSpec. If RBraceLoc is
+/// valid, it's the position of the closing '}' brace in a linkage
+/// specification that uses braces.
+Decl *Sema::ActOnFinishLinkageSpecification(Scope *S,
+ Decl *LinkageSpec,
+ SourceLocation RBraceLoc) {
+ if (RBraceLoc.isValid()) {
+ LinkageSpecDecl* LSDecl = cast<LinkageSpecDecl>(LinkageSpec);
+ LSDecl->setRBraceLoc(RBraceLoc);
+ }
+ PopDeclContext();
+ return LinkageSpec;
+}
+
+Decl *Sema::ActOnEmptyDeclaration(Scope *S,
+ AttributeList *AttrList,
+ SourceLocation SemiLoc) {
+ Decl *ED = EmptyDecl::Create(Context, CurContext, SemiLoc);
+ // Attribute declarations appertain to empty declaration so we handle
+ // them here.
+ if (AttrList)
+ ProcessDeclAttributeList(S, ED, AttrList);
+
+ CurContext->addDecl(ED);
+ return ED;
+}
+
+/// \brief Perform semantic analysis for the variable declaration that
+/// occurs within a C++ catch clause, returning the newly-created
+/// variable.
+VarDecl *Sema::BuildExceptionDeclaration(Scope *S,
+ TypeSourceInfo *TInfo,
+ SourceLocation StartLoc,
+ SourceLocation Loc,
+ IdentifierInfo *Name) {
+ bool Invalid = false;
+ QualType ExDeclType = TInfo->getType();
+
+ // Arrays and functions decay.
+ if (ExDeclType->isArrayType())
+ ExDeclType = Context.getArrayDecayedType(ExDeclType);
+ else if (ExDeclType->isFunctionType())
+ ExDeclType = Context.getPointerType(ExDeclType);
+
+ // C++ 15.3p1: The exception-declaration shall not denote an incomplete type.
+ // The exception-declaration shall not denote a pointer or reference to an
+ // incomplete type, other than [cv] void*.
+ // N2844 forbids rvalue references.
+ if (!ExDeclType->isDependentType() && ExDeclType->isRValueReferenceType()) {
+ Diag(Loc, diag::err_catch_rvalue_ref);
+ Invalid = true;
+ }
+
+ QualType BaseType = ExDeclType;
+ int Mode = 0; // 0 for direct type, 1 for pointer, 2 for reference
+ unsigned DK = diag::err_catch_incomplete;
+ if (const PointerType *Ptr = BaseType->getAs<PointerType>()) {
+ BaseType = Ptr->getPointeeType();
+ Mode = 1;
+ DK = diag::err_catch_incomplete_ptr;
+ } else if (const ReferenceType *Ref = BaseType->getAs<ReferenceType>()) {
+ // For the purpose of error recovery, we treat rvalue refs like lvalue refs.
+ BaseType = Ref->getPointeeType();
+ Mode = 2;
+ DK = diag::err_catch_incomplete_ref;
+ }
+ if (!Invalid && (Mode == 0 || !BaseType->isVoidType()) &&
+ !BaseType->isDependentType() && RequireCompleteType(Loc, BaseType, DK))
+ Invalid = true;
+
+ if (!Invalid && !ExDeclType->isDependentType() &&
+ RequireNonAbstractType(Loc, ExDeclType,
+ diag::err_abstract_type_in_decl,
+ AbstractVariableType))
+ Invalid = true;
+
+ // Only the non-fragile NeXT runtime currently supports C++ catches
+ // of ObjC types, and no runtime supports catching ObjC types by value.
+ if (!Invalid && getLangOpts().ObjC1) {
+ QualType T = ExDeclType;
+ if (const ReferenceType *RT = T->getAs<ReferenceType>())
+ T = RT->getPointeeType();
+
+ if (T->isObjCObjectType()) {
+ Diag(Loc, diag::err_objc_object_catch);
+ Invalid = true;
+ } else if (T->isObjCObjectPointerType()) {
+ // FIXME: should this be a test for macosx-fragile specifically?
+ if (getLangOpts().ObjCRuntime.isFragile())
+ Diag(Loc, diag::warn_objc_pointer_cxx_catch_fragile);
+ }
+ }
+
+ VarDecl *ExDecl = VarDecl::Create(Context, CurContext, StartLoc, Loc, Name,
+ ExDeclType, TInfo, SC_None);
+ ExDecl->setExceptionVariable(true);
+
+ // In ARC, infer 'retaining' for variables of retainable type.
+ if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(ExDecl))
+ Invalid = true;
+
+ if (!Invalid && !ExDeclType->isDependentType()) {
+ if (const RecordType *recordType = ExDeclType->getAs<RecordType>()) {
+ // Insulate this from anything else we might currently be parsing.
+ EnterExpressionEvaluationContext scope(*this, PotentiallyEvaluated);
+
+ // C++ [except.handle]p16:
+ // The object declared in an exception-declaration or, if the
+ // exception-declaration does not specify a name, a temporary (12.2) is
+ // copy-initialized (8.5) from the exception object. [...]
+ // The object is destroyed when the handler exits, after the destruction
+ // of any automatic objects initialized within the handler.
+ //
+ // We just pretend to initialize the object with itself, then make sure
+ // it can be destroyed later.
+ QualType initType = Context.getExceptionObjectType(ExDeclType);
+
+ InitializedEntity entity =
+ InitializedEntity::InitializeVariable(ExDecl);
+ InitializationKind initKind =
+ InitializationKind::CreateCopy(Loc, SourceLocation());
+
+ Expr *opaqueValue =
+ new (Context) OpaqueValueExpr(Loc, initType, VK_LValue, OK_Ordinary);
+ InitializationSequence sequence(*this, entity, initKind, opaqueValue);
+ ExprResult result = sequence.Perform(*this, entity, initKind, opaqueValue);
+ if (result.isInvalid())
+ Invalid = true;
+ else {
+ // If the constructor used was non-trivial, set this as the
+ // "initializer".
+ CXXConstructExpr *construct = result.getAs<CXXConstructExpr>();
+ if (!construct->getConstructor()->isTrivial()) {
+ Expr *init = MaybeCreateExprWithCleanups(construct);
+ ExDecl->setInit(init);
+ }
+
+ // And make sure it's destructable.
+ FinalizeVarWithDestructor(ExDecl, recordType);
+ }
+ }
+ }
+
+ if (Invalid)
+ ExDecl->setInvalidDecl();
+
+ return ExDecl;
+}
+
+/// ActOnExceptionDeclarator - Parsed the exception-declarator in a C++ catch
+/// handler.
+Decl *Sema::ActOnExceptionDeclarator(Scope *S, Declarator &D) {
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ bool Invalid = D.isInvalidType();
+
+ // Check for unexpanded parameter packs.
+ if (DiagnoseUnexpandedParameterPack(D.getIdentifierLoc(), TInfo,
+ UPPC_ExceptionType)) {
+ TInfo = Context.getTrivialTypeSourceInfo(Context.IntTy,
+ D.getIdentifierLoc());
+ Invalid = true;
+ }
+
+ IdentifierInfo *II = D.getIdentifier();
+ if (NamedDecl *PrevDecl = LookupSingleName(S, II, D.getIdentifierLoc(),
+ LookupOrdinaryName,
+ ForRedeclaration)) {
+ // The scope should be freshly made just for us. There is just no way
+ // it contains any previous declaration, except for function parameters in
+ // a function-try-block's catch statement.
+ assert(!S->isDeclScope(PrevDecl));
+ if (isDeclInScope(PrevDecl, CurContext, S)) {
+ Diag(D.getIdentifierLoc(), diag::err_redefinition)
+ << D.getIdentifier();
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ Invalid = true;
+ } else if (PrevDecl->isTemplateParameter())
+ // Maybe we will complain about the shadowed template parameter.
+ DiagnoseTemplateParameterShadow(D.getIdentifierLoc(), PrevDecl);
+ }
+
+ if (D.getCXXScopeSpec().isSet() && !Invalid) {
+ Diag(D.getIdentifierLoc(), diag::err_qualified_catch_declarator)
+ << D.getCXXScopeSpec().getRange();
+ Invalid = true;
+ }
+
+ VarDecl *ExDecl = BuildExceptionDeclaration(S, TInfo,
+ D.getLocStart(),
+ D.getIdentifierLoc(),
+ D.getIdentifier());
+ if (Invalid)
+ ExDecl->setInvalidDecl();
+
+ // Add the exception declaration into this scope.
+ if (II)
+ PushOnScopeChains(ExDecl, S);
+ else
+ CurContext->addDecl(ExDecl);
+
+ ProcessDeclAttributes(S, ExDecl, D);
+ return ExDecl;
+}
+
+Decl *Sema::ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
+ Expr *AssertExpr,
+ Expr *AssertMessageExpr,
+ SourceLocation RParenLoc) {
+ StringLiteral *AssertMessage =
+ AssertMessageExpr ? cast<StringLiteral>(AssertMessageExpr) : nullptr;
+
+ if (DiagnoseUnexpandedParameterPack(AssertExpr, UPPC_StaticAssertExpression))
+ return nullptr;
+
+ return BuildStaticAssertDeclaration(StaticAssertLoc, AssertExpr,
+ AssertMessage, RParenLoc, false);
+}
+
+Decl *Sema::BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
+ Expr *AssertExpr,
+ StringLiteral *AssertMessage,
+ SourceLocation RParenLoc,
+ bool Failed) {
+ assert(AssertExpr != nullptr && "Expected non-null condition");
+ if (!AssertExpr->isTypeDependent() && !AssertExpr->isValueDependent() &&
+ !Failed) {
+ // In a static_assert-declaration, the constant-expression shall be a
+ // constant expression that can be contextually converted to bool.
+ ExprResult Converted = PerformContextuallyConvertToBool(AssertExpr);
+ if (Converted.isInvalid())
+ Failed = true;
+
+ llvm::APSInt Cond;
+ if (!Failed && VerifyIntegerConstantExpression(Converted.get(), &Cond,
+ diag::err_static_assert_expression_is_not_constant,
+ /*AllowFold=*/false).isInvalid())
+ Failed = true;
+
+ if (!Failed && !Cond) {
+ SmallString<256> MsgBuffer;
+ llvm::raw_svector_ostream Msg(MsgBuffer);
+ if (AssertMessage)
+ AssertMessage->printPretty(Msg, nullptr, getPrintingPolicy());
+ Diag(StaticAssertLoc, diag::err_static_assert_failed)
+ << !AssertMessage << Msg.str() << AssertExpr->getSourceRange();
+ Failed = true;
+ }
+ }
+
+ Decl *Decl = StaticAssertDecl::Create(Context, CurContext, StaticAssertLoc,
+ AssertExpr, AssertMessage, RParenLoc,
+ Failed);
+
+ CurContext->addDecl(Decl);
+ return Decl;
+}
+
+/// \brief Perform semantic analysis of the given friend type declaration.
+///
+/// \returns A friend declaration that.
+FriendDecl *Sema::CheckFriendTypeDecl(SourceLocation LocStart,
+ SourceLocation FriendLoc,
+ TypeSourceInfo *TSInfo) {
+ assert(TSInfo && "NULL TypeSourceInfo for friend type declaration");
+
+ QualType T = TSInfo->getType();
+ SourceRange TypeRange = TSInfo->getTypeLoc().getLocalSourceRange();
+
+ // C++03 [class.friend]p2:
+ // An elaborated-type-specifier shall be used in a friend declaration
+ // for a class.*
+ //
+ // * The class-key of the elaborated-type-specifier is required.
+ if (!ActiveTemplateInstantiations.empty()) {
+ // Do not complain about the form of friend template types during
+ // template instantiation; we will already have complained when the
+ // template was declared.
+ } else {
+ if (!T->isElaboratedTypeSpecifier()) {
+ // If we evaluated the type to a record type, suggest putting
+ // a tag in front.
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ RecordDecl *RD = RT->getDecl();
+
+ SmallString<16> InsertionText(" ");
+ InsertionText += RD->getKindName();
+
+ Diag(TypeRange.getBegin(),
+ getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_unelaborated_friend_type :
+ diag::ext_unelaborated_friend_type)
+ << (unsigned) RD->getTagKind()
+ << T
+ << FixItHint::CreateInsertion(getLocForEndOfToken(FriendLoc),
+ InsertionText);
+ } else {
+ Diag(FriendLoc,
+ getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_nonclass_type_friend :
+ diag::ext_nonclass_type_friend)
+ << T
+ << TypeRange;
+ }
+ } else if (T->getAs<EnumType>()) {
+ Diag(FriendLoc,
+ getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_enum_friend :
+ diag::ext_enum_friend)
+ << T
+ << TypeRange;
+ }
+
+ // C++11 [class.friend]p3:
+ // A friend declaration that does not declare a function shall have one
+ // of the following forms:
+ // friend elaborated-type-specifier ;
+ // friend simple-type-specifier ;
+ // friend typename-specifier ;
+ if (getLangOpts().CPlusPlus11 && LocStart != FriendLoc)
+ Diag(FriendLoc, diag::err_friend_not_first_in_declaration) << T;
+ }
+
+ // If the type specifier in a friend declaration designates a (possibly
+ // cv-qualified) class type, that class is declared as a friend; otherwise,
+ // the friend declaration is ignored.
+ return FriendDecl::Create(Context, CurContext,
+ TSInfo->getTypeLoc().getLocStart(), TSInfo,
+ FriendLoc);
+}
+
+/// Handle a friend tag declaration where the scope specifier was
+/// templated.
+Decl *Sema::ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
+ unsigned TagSpec, SourceLocation TagLoc,
+ CXXScopeSpec &SS,
+ IdentifierInfo *Name,
+ SourceLocation NameLoc,
+ AttributeList *Attr,
+ MultiTemplateParamsArg TempParamLists) {
+ TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
+
+ bool isExplicitSpecialization = false;
+ bool Invalid = false;
+
+ if (TemplateParameterList *TemplateParams =
+ MatchTemplateParametersToScopeSpecifier(
+ TagLoc, NameLoc, SS, nullptr, TempParamLists, /*friend*/ true,
+ isExplicitSpecialization, Invalid)) {
+ if (TemplateParams->size() > 0) {
+ // This is a declaration of a class template.
+ if (Invalid)
+ return nullptr;
+
+ return CheckClassTemplate(S, TagSpec, TUK_Friend, TagLoc, SS, Name,
+ NameLoc, Attr, TemplateParams, AS_public,
+ /*ModulePrivateLoc=*/SourceLocation(),
+ FriendLoc, TempParamLists.size() - 1,
+ TempParamLists.data()).get();
+ } else {
+ // The "template<>" header is extraneous.
+ Diag(TemplateParams->getTemplateLoc(), diag::err_template_tag_noparams)
+ << TypeWithKeyword::getTagTypeKindName(Kind) << Name;
+ isExplicitSpecialization = true;
+ }
+ }
+
+ if (Invalid) return nullptr;
+
+ bool isAllExplicitSpecializations = true;
+ for (unsigned I = TempParamLists.size(); I-- > 0; ) {
+ if (TempParamLists[I]->size()) {
+ isAllExplicitSpecializations = false;
+ break;
+ }
+ }
+
+ // FIXME: don't ignore attributes.
+
+ // If it's explicit specializations all the way down, just forget
+ // about the template header and build an appropriate non-templated
+ // friend. TODO: for source fidelity, remember the headers.
+ if (isAllExplicitSpecializations) {
+ if (SS.isEmpty()) {
+ bool Owned = false;
+ bool IsDependent = false;
+ return ActOnTag(S, TagSpec, TUK_Friend, TagLoc, SS, Name, NameLoc,
+ Attr, AS_public,
+ /*ModulePrivateLoc=*/SourceLocation(),
+ MultiTemplateParamsArg(), Owned, IsDependent,
+ /*ScopedEnumKWLoc=*/SourceLocation(),
+ /*ScopedEnumUsesClassTag=*/false,
+ /*UnderlyingType=*/TypeResult(),
+ /*IsTypeSpecifier=*/false);
+ }
+
+ NestedNameSpecifierLoc QualifierLoc = SS.getWithLocInContext(Context);
+ ElaboratedTypeKeyword Keyword
+ = TypeWithKeyword::getKeywordForTagTypeKind(Kind);
+ QualType T = CheckTypenameType(Keyword, TagLoc, QualifierLoc,
+ *Name, NameLoc);
+ if (T.isNull())
+ return nullptr;
+
+ TypeSourceInfo *TSI = Context.CreateTypeSourceInfo(T);
+ if (isa<DependentNameType>(T)) {
+ DependentNameTypeLoc TL =
+ TSI->getTypeLoc().castAs<DependentNameTypeLoc>();
+ TL.setElaboratedKeywordLoc(TagLoc);
+ TL.setQualifierLoc(QualifierLoc);
+ TL.setNameLoc(NameLoc);
+ } else {
+ ElaboratedTypeLoc TL = TSI->getTypeLoc().castAs<ElaboratedTypeLoc>();
+ TL.setElaboratedKeywordLoc(TagLoc);
+ TL.setQualifierLoc(QualifierLoc);
+ TL.getNamedTypeLoc().castAs<TypeSpecTypeLoc>().setNameLoc(NameLoc);
+ }
+
+ FriendDecl *Friend = FriendDecl::Create(Context, CurContext, NameLoc,
+ TSI, FriendLoc, TempParamLists);
+ Friend->setAccess(AS_public);
+ CurContext->addDecl(Friend);
+ return Friend;
+ }
+
+ assert(SS.isNotEmpty() && "valid templated tag with no SS and no direct?");
+
+
+
+ // Handle the case of a templated-scope friend class. e.g.
+ // template <class T> class A<T>::B;
+ // FIXME: we don't support these right now.
+ Diag(NameLoc, diag::warn_template_qualified_friend_unsupported)
+ << SS.getScopeRep() << SS.getRange() << cast<CXXRecordDecl>(CurContext);
+ ElaboratedTypeKeyword ETK = TypeWithKeyword::getKeywordForTagTypeKind(Kind);
+ QualType T = Context.getDependentNameType(ETK, SS.getScopeRep(), Name);
+ TypeSourceInfo *TSI = Context.CreateTypeSourceInfo(T);
+ DependentNameTypeLoc TL = TSI->getTypeLoc().castAs<DependentNameTypeLoc>();
+ TL.setElaboratedKeywordLoc(TagLoc);
+ TL.setQualifierLoc(SS.getWithLocInContext(Context));
+ TL.setNameLoc(NameLoc);
+
+ FriendDecl *Friend = FriendDecl::Create(Context, CurContext, NameLoc,
+ TSI, FriendLoc, TempParamLists);
+ Friend->setAccess(AS_public);
+ Friend->setUnsupportedFriend(true);
+ CurContext->addDecl(Friend);
+ return Friend;
+}
+
+
+/// Handle a friend type declaration. This works in tandem with
+/// ActOnTag.
+///
+/// Notes on friend class templates:
+///
+/// We generally treat friend class declarations as if they were
+/// declaring a class. So, for example, the elaborated type specifier
+/// in a friend declaration is required to obey the restrictions of a
+/// class-head (i.e. no typedefs in the scope chain), template
+/// parameters are required to match up with simple template-ids, &c.
+/// However, unlike when declaring a template specialization, it's
+/// okay to refer to a template specialization without an empty
+/// template parameter declaration, e.g.
+/// friend class A<T>::B<unsigned>;
+/// We permit this as a special case; if there are any template
+/// parameters present at all, require proper matching, i.e.
+/// template <> template \<class T> friend class A<int>::B;
+Decl *Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
+ MultiTemplateParamsArg TempParams) {
+ SourceLocation Loc = DS.getLocStart();
+
+ assert(DS.isFriendSpecified());
+ assert(DS.getStorageClassSpec() == DeclSpec::SCS_unspecified);
+
+ // Try to convert the decl specifier to a type. This works for
+ // friend templates because ActOnTag never produces a ClassTemplateDecl
+ // for a TUK_Friend.
+ Declarator TheDeclarator(DS, Declarator::MemberContext);
+ TypeSourceInfo *TSI = GetTypeForDeclarator(TheDeclarator, S);
+ QualType T = TSI->getType();
+ if (TheDeclarator.isInvalidType())
+ return nullptr;
+
+ if (DiagnoseUnexpandedParameterPack(Loc, TSI, UPPC_FriendDeclaration))
+ return nullptr;
+
+ // This is definitely an error in C++98. It's probably meant to
+ // be forbidden in C++0x, too, but the specification is just
+ // poorly written.
+ //
+ // The problem is with declarations like the following:
+ // template <T> friend A<T>::foo;
+ // where deciding whether a class C is a friend or not now hinges
+ // on whether there exists an instantiation of A that causes
+ // 'foo' to equal C. There are restrictions on class-heads
+ // (which we declare (by fiat) elaborated friend declarations to
+ // be) that makes this tractable.
+ //
+ // FIXME: handle "template <> friend class A<T>;", which
+ // is possibly well-formed? Who even knows?
+ if (TempParams.size() && !T->isElaboratedTypeSpecifier()) {
+ Diag(Loc, diag::err_tagless_friend_type_template)
+ << DS.getSourceRange();
+ return nullptr;
+ }
+
+ // C++98 [class.friend]p1: A friend of a class is a function
+ // or class that is not a member of the class . . .
+ // This is fixed in DR77, which just barely didn't make the C++03
+ // deadline. It's also a very silly restriction that seriously
+ // affects inner classes and which nobody else seems to implement;
+ // thus we never diagnose it, not even in -pedantic.
+ //
+ // But note that we could warn about it: it's always useless to
+ // friend one of your own members (it's not, however, worthless to
+ // friend a member of an arbitrary specialization of your template).
+
+ Decl *D;
+ if (unsigned NumTempParamLists = TempParams.size())
+ D = FriendTemplateDecl::Create(Context, CurContext, Loc,
+ NumTempParamLists,
+ TempParams.data(),
+ TSI,
+ DS.getFriendSpecLoc());
+ else
+ D = CheckFriendTypeDecl(Loc, DS.getFriendSpecLoc(), TSI);
+
+ if (!D)
+ return nullptr;
+
+ D->setAccess(AS_public);
+ CurContext->addDecl(D);
+
+ return D;
+}
+
+NamedDecl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D,
+ MultiTemplateParamsArg TemplateParams) {
+ const DeclSpec &DS = D.getDeclSpec();
+
+ assert(DS.isFriendSpecified());
+ assert(DS.getStorageClassSpec() == DeclSpec::SCS_unspecified);
+
+ SourceLocation Loc = D.getIdentifierLoc();
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+
+ // C++ [class.friend]p1
+ // A friend of a class is a function or class....
+ // Note that this sees through typedefs, which is intended.
+ // It *doesn't* see through dependent types, which is correct
+ // according to [temp.arg.type]p3:
+ // If a declaration acquires a function type through a
+ // type dependent on a template-parameter and this causes
+ // a declaration that does not use the syntactic form of a
+ // function declarator to have a function type, the program
+ // is ill-formed.
+ if (!TInfo->getType()->isFunctionType()) {
+ Diag(Loc, diag::err_unexpected_friend);
+
+ // It might be worthwhile to try to recover by creating an
+ // appropriate declaration.
+ return nullptr;
+ }
+
+ // C++ [namespace.memdef]p3
+ // - If a friend declaration in a non-local class first declares a
+ // class or function, the friend class or function is a member
+ // of the innermost enclosing namespace.
+ // - The name of the friend is not found by simple name lookup
+ // until a matching declaration is provided in that namespace
+ // scope (either before or after the class declaration granting
+ // friendship).
+ // - If a friend function is called, its name may be found by the
+ // name lookup that considers functions from namespaces and
+ // classes associated with the types of the function arguments.
+ // - When looking for a prior declaration of a class or a function
+ // declared as a friend, scopes outside the innermost enclosing
+ // namespace scope are not considered.
+
+ CXXScopeSpec &SS = D.getCXXScopeSpec();
+ DeclarationNameInfo NameInfo = GetNameForDeclarator(D);
+ DeclarationName Name = NameInfo.getName();
+ assert(Name);
+
+ // Check for unexpanded parameter packs.
+ if (DiagnoseUnexpandedParameterPack(Loc, TInfo, UPPC_FriendDeclaration) ||
+ DiagnoseUnexpandedParameterPack(NameInfo, UPPC_FriendDeclaration) ||
+ DiagnoseUnexpandedParameterPack(SS, UPPC_FriendDeclaration))
+ return nullptr;
+
+ // The context we found the declaration in, or in which we should
+ // create the declaration.
+ DeclContext *DC;
+ Scope *DCScope = S;
+ LookupResult Previous(*this, NameInfo, LookupOrdinaryName,
+ ForRedeclaration);
+
+ // There are five cases here.
+ // - There's no scope specifier and we're in a local class. Only look
+ // for functions declared in the immediately-enclosing block scope.
+ // We recover from invalid scope qualifiers as if they just weren't there.
+ FunctionDecl *FunctionContainingLocalClass = nullptr;
+ if ((SS.isInvalid() || !SS.isSet()) &&
+ (FunctionContainingLocalClass =
+ cast<CXXRecordDecl>(CurContext)->isLocalClass())) {
+ // C++11 [class.friend]p11:
+ // If a friend declaration appears in a local class and the name
+ // specified is an unqualified name, a prior declaration is
+ // looked up without considering scopes that are outside the
+ // innermost enclosing non-class scope. For a friend function
+ // declaration, if there is no prior declaration, the program is
+ // ill-formed.
+
+ // Find the innermost enclosing non-class scope. This is the block
+ // scope containing the local class definition (or for a nested class,
+ // the outer local class).
+ DCScope = S->getFnParent();
+
+ // Look up the function name in the scope.
+ Previous.clear(LookupLocalFriendName);
+ LookupName(Previous, S, /*AllowBuiltinCreation*/false);
+
+ if (!Previous.empty()) {
+ // All possible previous declarations must have the same context:
+ // either they were declared at block scope or they are members of
+ // one of the enclosing local classes.
+ DC = Previous.getRepresentativeDecl()->getDeclContext();
+ } else {
+ // This is ill-formed, but provide the context that we would have
+ // declared the function in, if we were permitted to, for error recovery.
+ DC = FunctionContainingLocalClass;
+ }
+ adjustContextForLocalExternDecl(DC);
+
+ // C++ [class.friend]p6:
+ // A function can be defined in a friend declaration of a class if and
+ // only if the class is a non-local class (9.8), the function name is
+ // unqualified, and the function has namespace scope.
+ if (D.isFunctionDefinition()) {
+ Diag(NameInfo.getBeginLoc(), diag::err_friend_def_in_local_class);
+ }
+
+ // - There's no scope specifier, in which case we just go to the
+ // appropriate scope and look for a function or function template
+ // there as appropriate.
+ } else if (SS.isInvalid() || !SS.isSet()) {
+ // C++11 [namespace.memdef]p3:
+ // If the name in a friend declaration is neither qualified nor
+ // a template-id and the declaration is a function or an
+ // elaborated-type-specifier, the lookup to determine whether
+ // the entity has been previously declared shall not consider
+ // any scopes outside the innermost enclosing namespace.
+ bool isTemplateId = D.getName().getKind() == UnqualifiedId::IK_TemplateId;
+
+ // Find the appropriate context according to the above.
+ DC = CurContext;
+
+ // Skip class contexts. If someone can cite chapter and verse
+ // for this behavior, that would be nice --- it's what GCC and
+ // EDG do, and it seems like a reasonable intent, but the spec
+ // really only says that checks for unqualified existing
+ // declarations should stop at the nearest enclosing namespace,
+ // not that they should only consider the nearest enclosing
+ // namespace.
+ while (DC->isRecord())
+ DC = DC->getParent();
+
+ DeclContext *LookupDC = DC;
+ while (LookupDC->isTransparentContext())
+ LookupDC = LookupDC->getParent();
+
+ while (true) {
+ LookupQualifiedName(Previous, LookupDC);
+
+ if (!Previous.empty()) {
+ DC = LookupDC;
+ break;
+ }
+
+ if (isTemplateId) {
+ if (isa<TranslationUnitDecl>(LookupDC)) break;
+ } else {
+ if (LookupDC->isFileContext()) break;
+ }
+ LookupDC = LookupDC->getParent();
+ }
+
+ DCScope = getScopeForDeclContext(S, DC);
+
+ // - There's a non-dependent scope specifier, in which case we
+ // compute it and do a previous lookup there for a function
+ // or function template.
+ } else if (!SS.getScopeRep()->isDependent()) {
+ DC = computeDeclContext(SS);
+ if (!DC) return nullptr;
+
+ if (RequireCompleteDeclContext(SS, DC)) return nullptr;
+
+ LookupQualifiedName(Previous, DC);
+
+ // Ignore things found implicitly in the wrong scope.
+ // TODO: better diagnostics for this case. Suggesting the right
+ // qualified scope would be nice...
+ LookupResult::Filter F = Previous.makeFilter();
+ while (F.hasNext()) {
+ NamedDecl *D = F.next();
+ if (!DC->InEnclosingNamespaceSetOf(
+ D->getDeclContext()->getRedeclContext()))
+ F.erase();
+ }
+ F.done();
+
+ if (Previous.empty()) {
+ D.setInvalidType();
+ Diag(Loc, diag::err_qualified_friend_not_found)
+ << Name << TInfo->getType();
+ return nullptr;
+ }
+
+ // C++ [class.friend]p1: A friend of a class is a function or
+ // class that is not a member of the class . . .
+ if (DC->Equals(CurContext))
+ Diag(DS.getFriendSpecLoc(),
+ getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_friend_is_member :
+ diag::err_friend_is_member);
+
+ if (D.isFunctionDefinition()) {
+ // C++ [class.friend]p6:
+ // A function can be defined in a friend declaration of a class if and
+ // only if the class is a non-local class (9.8), the function name is
+ // unqualified, and the function has namespace scope.
+ SemaDiagnosticBuilder DB
+ = Diag(SS.getRange().getBegin(), diag::err_qualified_friend_def);
+
+ DB << SS.getScopeRep();
+ if (DC->isFileContext())
+ DB << FixItHint::CreateRemoval(SS.getRange());
+ SS.clear();
+ }
+
+ // - There's a scope specifier that does not match any template
+ // parameter lists, in which case we use some arbitrary context,
+ // create a method or method template, and wait for instantiation.
+ // - There's a scope specifier that does match some template
+ // parameter lists, which we don't handle right now.
+ } else {
+ if (D.isFunctionDefinition()) {
+ // C++ [class.friend]p6:
+ // A function can be defined in a friend declaration of a class if and
+ // only if the class is a non-local class (9.8), the function name is
+ // unqualified, and the function has namespace scope.
+ Diag(SS.getRange().getBegin(), diag::err_qualified_friend_def)
+ << SS.getScopeRep();
+ }
+
+ DC = CurContext;
+ assert(isa<CXXRecordDecl>(DC) && "friend declaration not in class?");
+ }
+
+ if (!DC->isRecord()) {
+ int DiagArg = -1;
+ switch (D.getName().getKind()) {
+ case UnqualifiedId::IK_ConstructorTemplateId:
+ case UnqualifiedId::IK_ConstructorName:
+ DiagArg = 0;
+ break;
+ case UnqualifiedId::IK_DestructorName:
+ DiagArg = 1;
+ break;
+ case UnqualifiedId::IK_ConversionFunctionId:
+ DiagArg = 2;
+ break;
+ case UnqualifiedId::IK_Identifier:
+ case UnqualifiedId::IK_ImplicitSelfParam:
+ case UnqualifiedId::IK_LiteralOperatorId:
+ case UnqualifiedId::IK_OperatorFunctionId:
+ case UnqualifiedId::IK_TemplateId:
+ break;
+ }
+ // This implies that it has to be an operator or function.
+ if (DiagArg >= 0) {
+ Diag(Loc, diag::err_introducing_special_friend) << DiagArg;
+ return nullptr;
+ }
+ }
+
+ // FIXME: This is an egregious hack to cope with cases where the scope stack
+ // does not contain the declaration context, i.e., in an out-of-line
+ // definition of a class.
+ Scope FakeDCScope(S, Scope::DeclScope, Diags);
+ if (!DCScope) {
+ FakeDCScope.setEntity(DC);
+ DCScope = &FakeDCScope;
+ }
+
+ bool AddToScope = true;
+ NamedDecl *ND = ActOnFunctionDeclarator(DCScope, D, DC, TInfo, Previous,
+ TemplateParams, AddToScope);
+ if (!ND) return nullptr;
+
+ assert(ND->getLexicalDeclContext() == CurContext);
+
+ // If we performed typo correction, we might have added a scope specifier
+ // and changed the decl context.
+ DC = ND->getDeclContext();
+
+ // Add the function declaration to the appropriate lookup tables,
+ // adjusting the redeclarations list as necessary. We don't
+ // want to do this yet if the friending class is dependent.
+ //
+ // Also update the scope-based lookup if the target context's
+ // lookup context is in lexical scope.
+ if (!CurContext->isDependentContext()) {
+ DC = DC->getRedeclContext();
+ DC->makeDeclVisibleInContext(ND);
+ if (Scope *EnclosingScope = getScopeForDeclContext(S, DC))
+ PushOnScopeChains(ND, EnclosingScope, /*AddToContext=*/ false);
+ }
+
+ FriendDecl *FrD = FriendDecl::Create(Context, CurContext,
+ D.getIdentifierLoc(), ND,
+ DS.getFriendSpecLoc());
+ FrD->setAccess(AS_public);
+ CurContext->addDecl(FrD);
+
+ if (ND->isInvalidDecl()) {
+ FrD->setInvalidDecl();
+ } else {
+ if (DC->isRecord()) CheckFriendAccess(ND);
+
+ FunctionDecl *FD;
+ if (FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(ND))
+ FD = FTD->getTemplatedDecl();
+ else
+ FD = cast<FunctionDecl>(ND);
+
+ // C++11 [dcl.fct.default]p4: If a friend declaration specifies a
+ // default argument expression, that declaration shall be a definition
+ // and shall be the only declaration of the function or function
+ // template in the translation unit.
+ if (functionDeclHasDefaultArgument(FD)) {
+ if (FunctionDecl *OldFD = FD->getPreviousDecl()) {
+ Diag(FD->getLocation(), diag::err_friend_decl_with_def_arg_redeclared);
+ Diag(OldFD->getLocation(), diag::note_previous_declaration);
+ } else if (!D.isFunctionDefinition())
+ Diag(FD->getLocation(), diag::err_friend_decl_with_def_arg_must_be_def);
+ }
+
+ // Mark templated-scope function declarations as unsupported.
+ if (FD->getNumTemplateParameterLists() && SS.isValid()) {
+ Diag(FD->getLocation(), diag::warn_template_qualified_friend_unsupported)
+ << SS.getScopeRep() << SS.getRange()
+ << cast<CXXRecordDecl>(CurContext);
+ FrD->setUnsupportedFriend(true);
+ }
+ }
+
+ return ND;
+}
+
+void Sema::SetDeclDeleted(Decl *Dcl, SourceLocation DelLoc) {
+ AdjustDeclIfTemplate(Dcl);
+
+ FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(Dcl);
+ if (!Fn) {
+ Diag(DelLoc, diag::err_deleted_non_function);
+ return;
+ }
+
+ if (const FunctionDecl *Prev = Fn->getPreviousDecl()) {
+ // Don't consider the implicit declaration we generate for explicit
+ // specializations. FIXME: Do not generate these implicit declarations.
+ if ((Prev->getTemplateSpecializationKind() != TSK_ExplicitSpecialization ||
+ Prev->getPreviousDecl()) &&
+ !Prev->isDefined()) {
+ Diag(DelLoc, diag::err_deleted_decl_not_first);
+ Diag(Prev->getLocation().isInvalid() ? DelLoc : Prev->getLocation(),
+ Prev->isImplicit() ? diag::note_previous_implicit_declaration
+ : diag::note_previous_declaration);
+ }
+ // If the declaration wasn't the first, we delete the function anyway for
+ // recovery.
+ Fn = Fn->getCanonicalDecl();
+ }
+
+ // dllimport/dllexport cannot be deleted.
+ if (const InheritableAttr *DLLAttr = getDLLAttr(Fn)) {
+ Diag(Fn->getLocation(), diag::err_attribute_dll_deleted) << DLLAttr;
+ Fn->setInvalidDecl();
+ }
+
+ if (Fn->isDeleted())
+ return;
+
+ // See if we're deleting a function which is already known to override a
+ // non-deleted virtual function.
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn)) {
+ bool IssuedDiagnostic = false;
+ for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
+ E = MD->end_overridden_methods();
+ I != E; ++I) {
+ if (!(*MD->begin_overridden_methods())->isDeleted()) {
+ if (!IssuedDiagnostic) {
+ Diag(DelLoc, diag::err_deleted_override) << MD->getDeclName();
+ IssuedDiagnostic = true;
+ }
+ Diag((*I)->getLocation(), diag::note_overridden_virtual_function);
+ }
+ }
+ }
+
+ // C++11 [basic.start.main]p3:
+ // A program that defines main as deleted [...] is ill-formed.
+ if (Fn->isMain())
+ Diag(DelLoc, diag::err_deleted_main);
+
+ Fn->setDeletedAsWritten();
+}
+
+void Sema::SetDeclDefaulted(Decl *Dcl, SourceLocation DefaultLoc) {
+ CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(Dcl);
+
+ if (MD) {
+ if (MD->getParent()->isDependentType()) {
+ MD->setDefaulted();
+ MD->setExplicitlyDefaulted();
+ return;
+ }
+
+ CXXSpecialMember Member = getSpecialMember(MD);
+ if (Member == CXXInvalid) {
+ if (!MD->isInvalidDecl())
+ Diag(DefaultLoc, diag::err_default_special_members);
+ return;
+ }
+
+ MD->setDefaulted();
+ MD->setExplicitlyDefaulted();
+
+ // If this definition appears within the record, do the checking when
+ // the record is complete.
+ const FunctionDecl *Primary = MD;
+ if (const FunctionDecl *Pattern = MD->getTemplateInstantiationPattern())
+ // Find the uninstantiated declaration that actually had the '= default'
+ // on it.
+ Pattern->isDefined(Primary);
+
+ // If the method was defaulted on its first declaration, we will have
+ // already performed the checking in CheckCompletedCXXClass. Such a
+ // declaration doesn't trigger an implicit definition.
+ if (Primary == Primary->getCanonicalDecl())
+ return;
+
+ CheckExplicitlyDefaultedSpecialMember(MD);
+
+ if (MD->isInvalidDecl())
+ return;
+
+ switch (Member) {
+ case CXXDefaultConstructor:
+ DefineImplicitDefaultConstructor(DefaultLoc,
+ cast<CXXConstructorDecl>(MD));
+ break;
+ case CXXCopyConstructor:
+ DefineImplicitCopyConstructor(DefaultLoc, cast<CXXConstructorDecl>(MD));
+ break;
+ case CXXCopyAssignment:
+ DefineImplicitCopyAssignment(DefaultLoc, MD);
+ break;
+ case CXXDestructor:
+ DefineImplicitDestructor(DefaultLoc, cast<CXXDestructorDecl>(MD));
+ break;
+ case CXXMoveConstructor:
+ DefineImplicitMoveConstructor(DefaultLoc, cast<CXXConstructorDecl>(MD));
+ break;
+ case CXXMoveAssignment:
+ DefineImplicitMoveAssignment(DefaultLoc, MD);
+ break;
+ case CXXInvalid:
+ llvm_unreachable("Invalid special member.");
+ }
+ } else {
+ Diag(DefaultLoc, diag::err_default_special_members);
+ }
+}
+
+static void SearchForReturnInStmt(Sema &Self, Stmt *S) {
+ for (Stmt *SubStmt : S->children()) {
+ if (!SubStmt)
+ continue;
+ if (isa<ReturnStmt>(SubStmt))
+ Self.Diag(SubStmt->getLocStart(),
+ diag::err_return_in_constructor_handler);
+ if (!isa<Expr>(SubStmt))
+ SearchForReturnInStmt(Self, SubStmt);
+ }
+}
+
+void Sema::DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock) {
+ for (unsigned I = 0, E = TryBlock->getNumHandlers(); I != E; ++I) {
+ CXXCatchStmt *Handler = TryBlock->getHandler(I);
+ SearchForReturnInStmt(*this, Handler);
+ }
+}
+
+bool Sema::CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
+ const CXXMethodDecl *Old) {
+ const FunctionType *NewFT = New->getType()->getAs<FunctionType>();
+ const FunctionType *OldFT = Old->getType()->getAs<FunctionType>();
+
+ CallingConv NewCC = NewFT->getCallConv(), OldCC = OldFT->getCallConv();
+
+ // If the calling conventions match, everything is fine
+ if (NewCC == OldCC)
+ return false;
+
+ // If the calling conventions mismatch because the new function is static,
+ // suppress the calling convention mismatch error; the error about static
+ // function override (err_static_overrides_virtual from
+ // Sema::CheckFunctionDeclaration) is more clear.
+ if (New->getStorageClass() == SC_Static)
+ return false;
+
+ Diag(New->getLocation(),
+ diag::err_conflicting_overriding_cc_attributes)
+ << New->getDeclName() << New->getType() << Old->getType();
+ Diag(Old->getLocation(), diag::note_overridden_virtual_function);
+ return true;
+}
+
+bool Sema::CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
+ const CXXMethodDecl *Old) {
+ QualType NewTy = New->getType()->getAs<FunctionType>()->getReturnType();
+ QualType OldTy = Old->getType()->getAs<FunctionType>()->getReturnType();
+
+ if (Context.hasSameType(NewTy, OldTy) ||
+ NewTy->isDependentType() || OldTy->isDependentType())
+ return false;
+
+ // Check if the return types are covariant
+ QualType NewClassTy, OldClassTy;
+
+ /// Both types must be pointers or references to classes.
+ if (const PointerType *NewPT = NewTy->getAs<PointerType>()) {
+ if (const PointerType *OldPT = OldTy->getAs<PointerType>()) {
+ NewClassTy = NewPT->getPointeeType();
+ OldClassTy = OldPT->getPointeeType();
+ }
+ } else if (const ReferenceType *NewRT = NewTy->getAs<ReferenceType>()) {
+ if (const ReferenceType *OldRT = OldTy->getAs<ReferenceType>()) {
+ if (NewRT->getTypeClass() == OldRT->getTypeClass()) {
+ NewClassTy = NewRT->getPointeeType();
+ OldClassTy = OldRT->getPointeeType();
+ }
+ }
+ }
+
+ // The return types aren't either both pointers or references to a class type.
+ if (NewClassTy.isNull()) {
+ Diag(New->getLocation(),
+ diag::err_different_return_type_for_overriding_virtual_function)
+ << New->getDeclName() << NewTy << OldTy
+ << New->getReturnTypeSourceRange();
+ Diag(Old->getLocation(), diag::note_overridden_virtual_function)
+ << Old->getReturnTypeSourceRange();
+
+ return true;
+ }
+
+ // C++ [class.virtual]p6:
+ // If the return type of D::f differs from the return type of B::f, the
+ // class type in the return type of D::f shall be complete at the point of
+ // declaration of D::f or shall be the class type D.
+ if (const RecordType *RT = NewClassTy->getAs<RecordType>()) {
+ if (!RT->isBeingDefined() &&
+ RequireCompleteType(New->getLocation(), NewClassTy,
+ diag::err_covariant_return_incomplete,
+ New->getDeclName()))
+ return true;
+ }
+
+ if (!Context.hasSameUnqualifiedType(NewClassTy, OldClassTy)) {
+ // Check if the new class derives from the old class.
+ if (!IsDerivedFrom(New->getLocation(), NewClassTy, OldClassTy)) {
+ Diag(New->getLocation(), diag::err_covariant_return_not_derived)
+ << New->getDeclName() << NewTy << OldTy
+ << New->getReturnTypeSourceRange();
+ Diag(Old->getLocation(), diag::note_overridden_virtual_function)
+ << Old->getReturnTypeSourceRange();
+ return true;
+ }
+
+ // Check if we the conversion from derived to base is valid.
+ if (CheckDerivedToBaseConversion(
+ NewClassTy, OldClassTy,
+ diag::err_covariant_return_inaccessible_base,
+ diag::err_covariant_return_ambiguous_derived_to_base_conv,
+ New->getLocation(), New->getReturnTypeSourceRange(),
+ New->getDeclName(), nullptr)) {
+ // FIXME: this note won't trigger for delayed access control
+ // diagnostics, and it's impossible to get an undelayed error
+ // here from access control during the original parse because
+ // the ParsingDeclSpec/ParsingDeclarator are still in scope.
+ Diag(Old->getLocation(), diag::note_overridden_virtual_function)
+ << Old->getReturnTypeSourceRange();
+ return true;
+ }
+ }
+
+ // The qualifiers of the return types must be the same.
+ if (NewTy.getLocalCVRQualifiers() != OldTy.getLocalCVRQualifiers()) {
+ Diag(New->getLocation(),
+ diag::err_covariant_return_type_different_qualifications)
+ << New->getDeclName() << NewTy << OldTy
+ << New->getReturnTypeSourceRange();
+ Diag(Old->getLocation(), diag::note_overridden_virtual_function)
+ << Old->getReturnTypeSourceRange();
+ return true;
+ };
+
+
+ // The new class type must have the same or less qualifiers as the old type.
+ if (NewClassTy.isMoreQualifiedThan(OldClassTy)) {
+ Diag(New->getLocation(),
+ diag::err_covariant_return_type_class_type_more_qualified)
+ << New->getDeclName() << NewTy << OldTy
+ << New->getReturnTypeSourceRange();
+ Diag(Old->getLocation(), diag::note_overridden_virtual_function)
+ << Old->getReturnTypeSourceRange();
+ return true;
+ };
+
+ return false;
+}
+
+/// \brief Mark the given method pure.
+///
+/// \param Method the method to be marked pure.
+///
+/// \param InitRange the source range that covers the "0" initializer.
+bool Sema::CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange) {
+ SourceLocation EndLoc = InitRange.getEnd();
+ if (EndLoc.isValid())
+ Method->setRangeEnd(EndLoc);
+
+ if (Method->isVirtual() || Method->getParent()->isDependentContext()) {
+ Method->setPure();
+ return false;
+ }
+
+ if (!Method->isInvalidDecl())
+ Diag(Method->getLocation(), diag::err_non_virtual_pure)
+ << Method->getDeclName() << InitRange;
+ return true;
+}
+
+void Sema::ActOnPureSpecifier(Decl *D, SourceLocation ZeroLoc) {
+ if (D->getFriendObjectKind())
+ Diag(D->getLocation(), diag::err_pure_friend);
+ else if (auto *M = dyn_cast<CXXMethodDecl>(D))
+ CheckPureMethod(M, ZeroLoc);
+ else
+ Diag(D->getLocation(), diag::err_illegal_initializer);
+}
+
+/// \brief Determine whether the given declaration is a static data member.
+static bool isStaticDataMember(const Decl *D) {
+ if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(D))
+ return Var->isStaticDataMember();
+
+ return false;
+}
+
+/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse
+/// an initializer for the out-of-line declaration 'Dcl'. The scope
+/// is a fresh scope pushed for just this purpose.
+///
+/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
+/// static data member of class X, names should be looked up in the scope of
+/// class X.
+void Sema::ActOnCXXEnterDeclInitializer(Scope *S, Decl *D) {
+ // If there is no declaration, there was an error parsing it.
+ if (!D || D->isInvalidDecl())
+ return;
+
+ // We will always have a nested name specifier here, but this declaration
+ // might not be out of line if the specifier names the current namespace:
+ // extern int n;
+ // int ::n = 0;
+ if (D->isOutOfLine())
+ EnterDeclaratorContext(S, D->getDeclContext());
+
+ // If we are parsing the initializer for a static data member, push a
+ // new expression evaluation context that is associated with this static
+ // data member.
+ if (isStaticDataMember(D))
+ PushExpressionEvaluationContext(PotentiallyEvaluated, D);
+}
+
+/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
+/// initializer for the out-of-line declaration 'D'.
+void Sema::ActOnCXXExitDeclInitializer(Scope *S, Decl *D) {
+ // If there is no declaration, there was an error parsing it.
+ if (!D || D->isInvalidDecl())
+ return;
+
+ if (isStaticDataMember(D))
+ PopExpressionEvaluationContext();
+
+ if (D->isOutOfLine())
+ ExitDeclaratorContext(S);
+}
+
+/// ActOnCXXConditionDeclarationExpr - Parsed a condition declaration of a
+/// C++ if/switch/while/for statement.
+/// e.g: "if (int x = f()) {...}"
+DeclResult Sema::ActOnCXXConditionDeclaration(Scope *S, Declarator &D) {
+ // C++ 6.4p2:
+ // The declarator shall not specify a function or an array.
+ // The type-specifier-seq shall not contain typedef and shall not declare a
+ // new class or enumeration.
+ assert(D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_typedef &&
+ "Parser allowed 'typedef' as storage class of condition decl.");
+
+ Decl *Dcl = ActOnDeclarator(S, D);
+ if (!Dcl)
+ return true;
+
+ if (isa<FunctionDecl>(Dcl)) { // The declarator shall not specify a function.
+ Diag(Dcl->getLocation(), diag::err_invalid_use_of_function_type)
+ << D.getSourceRange();
+ return true;
+ }
+
+ return Dcl;
+}
+
+void Sema::LoadExternalVTableUses() {
+ if (!ExternalSource)
+ return;
+
+ SmallVector<ExternalVTableUse, 4> VTables;
+ ExternalSource->ReadUsedVTables(VTables);
+ SmallVector<VTableUse, 4> NewUses;
+ for (unsigned I = 0, N = VTables.size(); I != N; ++I) {
+ llvm::DenseMap<CXXRecordDecl *, bool>::iterator Pos
+ = VTablesUsed.find(VTables[I].Record);
+ // Even if a definition wasn't required before, it may be required now.
+ if (Pos != VTablesUsed.end()) {
+ if (!Pos->second && VTables[I].DefinitionRequired)
+ Pos->second = true;
+ continue;
+ }
+
+ VTablesUsed[VTables[I].Record] = VTables[I].DefinitionRequired;
+ NewUses.push_back(VTableUse(VTables[I].Record, VTables[I].Location));
+ }
+
+ VTableUses.insert(VTableUses.begin(), NewUses.begin(), NewUses.end());
+}
+
+void Sema::MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
+ bool DefinitionRequired) {
+ // Ignore any vtable uses in unevaluated operands or for classes that do
+ // not have a vtable.
+ if (!Class->isDynamicClass() || Class->isDependentContext() ||
+ CurContext->isDependentContext() || isUnevaluatedContext())
+ return;
+
+ // Try to insert this class into the map.
+ LoadExternalVTableUses();
+ Class = cast<CXXRecordDecl>(Class->getCanonicalDecl());
+ std::pair<llvm::DenseMap<CXXRecordDecl *, bool>::iterator, bool>
+ Pos = VTablesUsed.insert(std::make_pair(Class, DefinitionRequired));
+ if (!Pos.second) {
+ // If we already had an entry, check to see if we are promoting this vtable
+ // to require a definition. If so, we need to reappend to the VTableUses
+ // list, since we may have already processed the first entry.
+ if (DefinitionRequired && !Pos.first->second) {
+ Pos.first->second = true;
+ } else {
+ // Otherwise, we can early exit.
+ return;
+ }
+ } else {
+ // The Microsoft ABI requires that we perform the destructor body
+ // checks (i.e. operator delete() lookup) when the vtable is marked used, as
+ // the deleting destructor is emitted with the vtable, not with the
+ // destructor definition as in the Itanium ABI.
+ // If it has a definition, we do the check at that point instead.
+ if (Context.getTargetInfo().getCXXABI().isMicrosoft() &&
+ Class->hasUserDeclaredDestructor() &&
+ !Class->getDestructor()->isDefined() &&
+ !Class->getDestructor()->isDeleted()) {
+ CXXDestructorDecl *DD = Class->getDestructor();
+ ContextRAII SavedContext(*this, DD);
+ CheckDestructor(DD);
+ }
+ }
+
+ // Local classes need to have their virtual members marked
+ // immediately. For all other classes, we mark their virtual members
+ // at the end of the translation unit.
+ if (Class->isLocalClass())
+ MarkVirtualMembersReferenced(Loc, Class);
+ else
+ VTableUses.push_back(std::make_pair(Class, Loc));
+}
+
+bool Sema::DefineUsedVTables() {
+ LoadExternalVTableUses();
+ if (VTableUses.empty())
+ return false;
+
+ // Note: The VTableUses vector could grow as a result of marking
+ // the members of a class as "used", so we check the size each
+ // time through the loop and prefer indices (which are stable) to
+ // iterators (which are not).
+ bool DefinedAnything = false;
+ for (unsigned I = 0; I != VTableUses.size(); ++I) {
+ CXXRecordDecl *Class = VTableUses[I].first->getDefinition();
+ if (!Class)
+ continue;
+
+ SourceLocation Loc = VTableUses[I].second;
+
+ bool DefineVTable = true;
+
+ // If this class has a key function, but that key function is
+ // defined in another translation unit, we don't need to emit the
+ // vtable even though we're using it.
+ const CXXMethodDecl *KeyFunction = Context.getCurrentKeyFunction(Class);
+ if (KeyFunction && !KeyFunction->hasBody()) {
+ // The key function is in another translation unit.
+ DefineVTable = false;
+ TemplateSpecializationKind TSK =
+ KeyFunction->getTemplateSpecializationKind();
+ assert(TSK != TSK_ExplicitInstantiationDefinition &&
+ TSK != TSK_ImplicitInstantiation &&
+ "Instantiations don't have key functions");
+ (void)TSK;
+ } else if (!KeyFunction) {
+ // If we have a class with no key function that is the subject
+ // of an explicit instantiation declaration, suppress the
+ // vtable; it will live with the explicit instantiation
+ // definition.
+ bool IsExplicitInstantiationDeclaration
+ = Class->getTemplateSpecializationKind()
+ == TSK_ExplicitInstantiationDeclaration;
+ for (auto R : Class->redecls()) {
+ TemplateSpecializationKind TSK
+ = cast<CXXRecordDecl>(R)->getTemplateSpecializationKind();
+ if (TSK == TSK_ExplicitInstantiationDeclaration)
+ IsExplicitInstantiationDeclaration = true;
+ else if (TSK == TSK_ExplicitInstantiationDefinition) {
+ IsExplicitInstantiationDeclaration = false;
+ break;
+ }
+ }
+
+ if (IsExplicitInstantiationDeclaration)
+ DefineVTable = false;
+ }
+
+ // The exception specifications for all virtual members may be needed even
+ // if we are not providing an authoritative form of the vtable in this TU.
+ // We may choose to emit it available_externally anyway.
+ if (!DefineVTable) {
+ MarkVirtualMemberExceptionSpecsNeeded(Loc, Class);
+ continue;
+ }
+
+ // Mark all of the virtual members of this class as referenced, so
+ // that we can build a vtable. Then, tell the AST consumer that a
+ // vtable for this class is required.
+ DefinedAnything = true;
+ MarkVirtualMembersReferenced(Loc, Class);
+ CXXRecordDecl *Canonical = cast<CXXRecordDecl>(Class->getCanonicalDecl());
+ if (VTablesUsed[Canonical])
+ Consumer.HandleVTable(Class);
+
+ // Optionally warn if we're emitting a weak vtable.
+ if (Class->isExternallyVisible() &&
+ Class->getTemplateSpecializationKind() != TSK_ImplicitInstantiation) {
+ const FunctionDecl *KeyFunctionDef = nullptr;
+ if (!KeyFunction ||
+ (KeyFunction->hasBody(KeyFunctionDef) &&
+ KeyFunctionDef->isInlined()))
+ Diag(Class->getLocation(), Class->getTemplateSpecializationKind() ==
+ TSK_ExplicitInstantiationDefinition
+ ? diag::warn_weak_template_vtable : diag::warn_weak_vtable)
+ << Class;
+ }
+ }
+ VTableUses.clear();
+
+ return DefinedAnything;
+}
+
+void Sema::MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
+ const CXXRecordDecl *RD) {
+ for (const auto *I : RD->methods())
+ if (I->isVirtual() && !I->isPure())
+ ResolveExceptionSpec(Loc, I->getType()->castAs<FunctionProtoType>());
+}
+
+void Sema::MarkVirtualMembersReferenced(SourceLocation Loc,
+ const CXXRecordDecl *RD) {
+ // Mark all functions which will appear in RD's vtable as used.
+ CXXFinalOverriderMap FinalOverriders;
+ RD->getFinalOverriders(FinalOverriders);
+ for (CXXFinalOverriderMap::const_iterator I = FinalOverriders.begin(),
+ E = FinalOverriders.end();
+ I != E; ++I) {
+ for (OverridingMethods::const_iterator OI = I->second.begin(),
+ OE = I->second.end();
+ OI != OE; ++OI) {
+ assert(OI->second.size() > 0 && "no final overrider");
+ CXXMethodDecl *Overrider = OI->second.front().Method;
+
+ // C++ [basic.def.odr]p2:
+ // [...] A virtual member function is used if it is not pure. [...]
+ if (!Overrider->isPure())
+ MarkFunctionReferenced(Loc, Overrider);
+ }
+ }
+
+ // Only classes that have virtual bases need a VTT.
+ if (RD->getNumVBases() == 0)
+ return;
+
+ for (const auto &I : RD->bases()) {
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
+ if (Base->getNumVBases() == 0)
+ continue;
+ MarkVirtualMembersReferenced(Loc, Base);
+ }
+}
+
+/// SetIvarInitializers - This routine builds initialization ASTs for the
+/// Objective-C implementation whose ivars need be initialized.
+void Sema::SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation) {
+ if (!getLangOpts().CPlusPlus)
+ return;
+ if (ObjCInterfaceDecl *OID = ObjCImplementation->getClassInterface()) {
+ SmallVector<ObjCIvarDecl*, 8> ivars;
+ CollectIvarsToConstructOrDestruct(OID, ivars);
+ if (ivars.empty())
+ return;
+ SmallVector<CXXCtorInitializer*, 32> AllToInit;
+ for (unsigned i = 0; i < ivars.size(); i++) {
+ FieldDecl *Field = ivars[i];
+ if (Field->isInvalidDecl())
+ continue;
+
+ CXXCtorInitializer *Member;
+ InitializedEntity InitEntity = InitializedEntity::InitializeMember(Field);
+ InitializationKind InitKind =
+ InitializationKind::CreateDefault(ObjCImplementation->getLocation());
+
+ InitializationSequence InitSeq(*this, InitEntity, InitKind, None);
+ ExprResult MemberInit =
+ InitSeq.Perform(*this, InitEntity, InitKind, None);
+ MemberInit = MaybeCreateExprWithCleanups(MemberInit);
+ // Note, MemberInit could actually come back empty if no initialization
+ // is required (e.g., because it would call a trivial default constructor)
+ if (!MemberInit.get() || MemberInit.isInvalid())
+ continue;
+
+ Member =
+ new (Context) CXXCtorInitializer(Context, Field, SourceLocation(),
+ SourceLocation(),
+ MemberInit.getAs<Expr>(),
+ SourceLocation());
+ AllToInit.push_back(Member);
+
+ // Be sure that the destructor is accessible and is marked as referenced.
+ if (const RecordType *RecordTy =
+ Context.getBaseElementType(Field->getType())
+ ->getAs<RecordType>()) {
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (CXXDestructorDecl *Destructor = LookupDestructor(RD)) {
+ MarkFunctionReferenced(Field->getLocation(), Destructor);
+ CheckDestructorAccess(Field->getLocation(), Destructor,
+ PDiag(diag::err_access_dtor_ivar)
+ << Context.getBaseElementType(Field->getType()));
+ }
+ }
+ }
+ ObjCImplementation->setIvarInitializers(Context,
+ AllToInit.data(), AllToInit.size());
+ }
+}
+
+static
+void DelegatingCycleHelper(CXXConstructorDecl* Ctor,
+ llvm::SmallSet<CXXConstructorDecl*, 4> &Valid,
+ llvm::SmallSet<CXXConstructorDecl*, 4> &Invalid,
+ llvm::SmallSet<CXXConstructorDecl*, 4> &Current,
+ Sema &S) {
+ if (Ctor->isInvalidDecl())
+ return;
+
+ CXXConstructorDecl *Target = Ctor->getTargetConstructor();
+
+ // Target may not be determinable yet, for instance if this is a dependent
+ // call in an uninstantiated template.
+ if (Target) {
+ const FunctionDecl *FNTarget = nullptr;
+ (void)Target->hasBody(FNTarget);
+ Target = const_cast<CXXConstructorDecl*>(
+ cast_or_null<CXXConstructorDecl>(FNTarget));
+ }
+
+ CXXConstructorDecl *Canonical = Ctor->getCanonicalDecl(),
+ // Avoid dereferencing a null pointer here.
+ *TCanonical = Target? Target->getCanonicalDecl() : nullptr;
+
+ if (!Current.insert(Canonical).second)
+ return;
+
+ // We know that beyond here, we aren't chaining into a cycle.
+ if (!Target || !Target->isDelegatingConstructor() ||
+ Target->isInvalidDecl() || Valid.count(TCanonical)) {
+ Valid.insert(Current.begin(), Current.end());
+ Current.clear();
+ // We've hit a cycle.
+ } else if (TCanonical == Canonical || Invalid.count(TCanonical) ||
+ Current.count(TCanonical)) {
+ // If we haven't diagnosed this cycle yet, do so now.
+ if (!Invalid.count(TCanonical)) {
+ S.Diag((*Ctor->init_begin())->getSourceLocation(),
+ diag::warn_delegating_ctor_cycle)
+ << Ctor;
+
+ // Don't add a note for a function delegating directly to itself.
+ if (TCanonical != Canonical)
+ S.Diag(Target->getLocation(), diag::note_it_delegates_to);
+
+ CXXConstructorDecl *C = Target;
+ while (C->getCanonicalDecl() != Canonical) {
+ const FunctionDecl *FNTarget = nullptr;
+ (void)C->getTargetConstructor()->hasBody(FNTarget);
+ assert(FNTarget && "Ctor cycle through bodiless function");
+
+ C = const_cast<CXXConstructorDecl*>(
+ cast<CXXConstructorDecl>(FNTarget));
+ S.Diag(C->getLocation(), diag::note_which_delegates_to);
+ }
+ }
+
+ Invalid.insert(Current.begin(), Current.end());
+ Current.clear();
+ } else {
+ DelegatingCycleHelper(Target, Valid, Invalid, Current, S);
+ }
+}
+
+
+void Sema::CheckDelegatingCtorCycles() {
+ llvm::SmallSet<CXXConstructorDecl*, 4> Valid, Invalid, Current;
+
+ for (DelegatingCtorDeclsType::iterator
+ I = DelegatingCtorDecls.begin(ExternalSource),
+ E = DelegatingCtorDecls.end();
+ I != E; ++I)
+ DelegatingCycleHelper(*I, Valid, Invalid, Current, *this);
+
+ for (llvm::SmallSet<CXXConstructorDecl *, 4>::iterator CI = Invalid.begin(),
+ CE = Invalid.end();
+ CI != CE; ++CI)
+ (*CI)->setInvalidDecl();
+}
+
+namespace {
+ /// \brief AST visitor that finds references to the 'this' expression.
+ class FindCXXThisExpr : public RecursiveASTVisitor<FindCXXThisExpr> {
+ Sema &S;
+
+ public:
+ explicit FindCXXThisExpr(Sema &S) : S(S) { }
+
+ bool VisitCXXThisExpr(CXXThisExpr *E) {
+ S.Diag(E->getLocation(), diag::err_this_static_member_func)
+ << E->isImplicit();
+ return false;
+ }
+ };
+}
+
+bool Sema::checkThisInStaticMemberFunctionType(CXXMethodDecl *Method) {
+ TypeSourceInfo *TSInfo = Method->getTypeSourceInfo();
+ if (!TSInfo)
+ return false;
+
+ TypeLoc TL = TSInfo->getTypeLoc();
+ FunctionProtoTypeLoc ProtoTL = TL.getAs<FunctionProtoTypeLoc>();
+ if (!ProtoTL)
+ return false;
+
+ // C++11 [expr.prim.general]p3:
+ // [The expression this] shall not appear before the optional
+ // cv-qualifier-seq and it shall not appear within the declaration of a
+ // static member function (although its type and value category are defined
+ // within a static member function as they are within a non-static member
+ // function). [ Note: this is because declaration matching does not occur
+ // until the complete declarator is known. - end note ]
+ const FunctionProtoType *Proto = ProtoTL.getTypePtr();
+ FindCXXThisExpr Finder(*this);
+
+ // If the return type came after the cv-qualifier-seq, check it now.
+ if (Proto->hasTrailingReturn() &&
+ !Finder.TraverseTypeLoc(ProtoTL.getReturnLoc()))
+ return true;
+
+ // Check the exception specification.
+ if (checkThisInStaticMemberFunctionExceptionSpec(Method))
+ return true;
+
+ return checkThisInStaticMemberFunctionAttributes(Method);
+}
+
+bool Sema::checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method) {
+ TypeSourceInfo *TSInfo = Method->getTypeSourceInfo();
+ if (!TSInfo)
+ return false;
+
+ TypeLoc TL = TSInfo->getTypeLoc();
+ FunctionProtoTypeLoc ProtoTL = TL.getAs<FunctionProtoTypeLoc>();
+ if (!ProtoTL)
+ return false;
+
+ const FunctionProtoType *Proto = ProtoTL.getTypePtr();
+ FindCXXThisExpr Finder(*this);
+
+ switch (Proto->getExceptionSpecType()) {
+ case EST_Unparsed:
+ case EST_Uninstantiated:
+ case EST_Unevaluated:
+ case EST_BasicNoexcept:
+ case EST_DynamicNone:
+ case EST_MSAny:
+ case EST_None:
+ break;
+
+ case EST_ComputedNoexcept:
+ if (!Finder.TraverseStmt(Proto->getNoexceptExpr()))
+ return true;
+
+ case EST_Dynamic:
+ for (const auto &E : Proto->exceptions()) {
+ if (!Finder.TraverseType(E))
+ return true;
+ }
+ break;
+ }
+
+ return false;
+}
+
+bool Sema::checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method) {
+ FindCXXThisExpr Finder(*this);
+
+ // Check attributes.
+ for (const auto *A : Method->attrs()) {
+ // FIXME: This should be emitted by tblgen.
+ Expr *Arg = nullptr;
+ ArrayRef<Expr *> Args;
+ if (const auto *G = dyn_cast<GuardedByAttr>(A))
+ Arg = G->getArg();
+ else if (const auto *G = dyn_cast<PtGuardedByAttr>(A))
+ Arg = G->getArg();
+ else if (const auto *AA = dyn_cast<AcquiredAfterAttr>(A))
+ Args = llvm::makeArrayRef(AA->args_begin(), AA->args_size());
+ else if (const auto *AB = dyn_cast<AcquiredBeforeAttr>(A))
+ Args = llvm::makeArrayRef(AB->args_begin(), AB->args_size());
+ else if (const auto *ETLF = dyn_cast<ExclusiveTrylockFunctionAttr>(A)) {
+ Arg = ETLF->getSuccessValue();
+ Args = llvm::makeArrayRef(ETLF->args_begin(), ETLF->args_size());
+ } else if (const auto *STLF = dyn_cast<SharedTrylockFunctionAttr>(A)) {
+ Arg = STLF->getSuccessValue();
+ Args = llvm::makeArrayRef(STLF->args_begin(), STLF->args_size());
+ } else if (const auto *LR = dyn_cast<LockReturnedAttr>(A))
+ Arg = LR->getArg();
+ else if (const auto *LE = dyn_cast<LocksExcludedAttr>(A))
+ Args = llvm::makeArrayRef(LE->args_begin(), LE->args_size());
+ else if (const auto *RC = dyn_cast<RequiresCapabilityAttr>(A))
+ Args = llvm::makeArrayRef(RC->args_begin(), RC->args_size());
+ else if (const auto *AC = dyn_cast<AcquireCapabilityAttr>(A))
+ Args = llvm::makeArrayRef(AC->args_begin(), AC->args_size());
+ else if (const auto *AC = dyn_cast<TryAcquireCapabilityAttr>(A))
+ Args = llvm::makeArrayRef(AC->args_begin(), AC->args_size());
+ else if (const auto *RC = dyn_cast<ReleaseCapabilityAttr>(A))
+ Args = llvm::makeArrayRef(RC->args_begin(), RC->args_size());
+
+ if (Arg && !Finder.TraverseStmt(Arg))
+ return true;
+
+ for (unsigned I = 0, N = Args.size(); I != N; ++I) {
+ if (!Finder.TraverseStmt(Args[I]))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void Sema::checkExceptionSpecification(
+ bool IsTopLevel, ExceptionSpecificationType EST,
+ ArrayRef<ParsedType> DynamicExceptions,
+ ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr,
+ SmallVectorImpl<QualType> &Exceptions,
+ FunctionProtoType::ExceptionSpecInfo &ESI) {
+ Exceptions.clear();
+ ESI.Type = EST;
+ if (EST == EST_Dynamic) {
+ Exceptions.reserve(DynamicExceptions.size());
+ for (unsigned ei = 0, ee = DynamicExceptions.size(); ei != ee; ++ei) {
+ // FIXME: Preserve type source info.
+ QualType ET = GetTypeFromParser(DynamicExceptions[ei]);
+
+ if (IsTopLevel) {
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ collectUnexpandedParameterPacks(ET, Unexpanded);
+ if (!Unexpanded.empty()) {
+ DiagnoseUnexpandedParameterPacks(
+ DynamicExceptionRanges[ei].getBegin(), UPPC_ExceptionType,
+ Unexpanded);
+ continue;
+ }
+ }
+
+ // Check that the type is valid for an exception spec, and
+ // drop it if not.
+ if (!CheckSpecifiedExceptionType(ET, DynamicExceptionRanges[ei]))
+ Exceptions.push_back(ET);
+ }
+ ESI.Exceptions = Exceptions;
+ return;
+ }
+
+ if (EST == EST_ComputedNoexcept) {
+ // If an error occurred, there's no expression here.
+ if (NoexceptExpr) {
+ assert((NoexceptExpr->isTypeDependent() ||
+ NoexceptExpr->getType()->getCanonicalTypeUnqualified() ==
+ Context.BoolTy) &&
+ "Parser should have made sure that the expression is boolean");
+ if (IsTopLevel && NoexceptExpr &&
+ DiagnoseUnexpandedParameterPack(NoexceptExpr)) {
+ ESI.Type = EST_BasicNoexcept;
+ return;
+ }
+
+ if (!NoexceptExpr->isValueDependent())
+ NoexceptExpr = VerifyIntegerConstantExpression(NoexceptExpr, nullptr,
+ diag::err_noexcept_needs_constant_expression,
+ /*AllowFold*/ false).get();
+ ESI.NoexceptExpr = NoexceptExpr;
+ }
+ return;
+ }
+}
+
+void Sema::actOnDelayedExceptionSpecification(Decl *MethodD,
+ ExceptionSpecificationType EST,
+ SourceRange SpecificationRange,
+ ArrayRef<ParsedType> DynamicExceptions,
+ ArrayRef<SourceRange> DynamicExceptionRanges,
+ Expr *NoexceptExpr) {
+ if (!MethodD)
+ return;
+
+ // Dig out the method we're referring to.
+ if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(MethodD))
+ MethodD = FunTmpl->getTemplatedDecl();
+
+ CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(MethodD);
+ if (!Method)
+ return;
+
+ // Check the exception specification.
+ llvm::SmallVector<QualType, 4> Exceptions;
+ FunctionProtoType::ExceptionSpecInfo ESI;
+ checkExceptionSpecification(/*IsTopLevel*/true, EST, DynamicExceptions,
+ DynamicExceptionRanges, NoexceptExpr, Exceptions,
+ ESI);
+
+ // Update the exception specification on the function type.
+ Context.adjustExceptionSpec(Method, ESI, /*AsWritten*/true);
+
+ if (Method->isStatic())
+ checkThisInStaticMemberFunctionExceptionSpec(Method);
+
+ if (Method->isVirtual()) {
+ // Check overrides, which we previously had to delay.
+ for (CXXMethodDecl::method_iterator O = Method->begin_overridden_methods(),
+ OEnd = Method->end_overridden_methods();
+ O != OEnd; ++O)
+ CheckOverridingFunctionExceptionSpec(Method, *O);
+ }
+}
+
+/// HandleMSProperty - Analyze a __delcspec(property) field of a C++ class.
+///
+MSPropertyDecl *Sema::HandleMSProperty(Scope *S, RecordDecl *Record,
+ SourceLocation DeclStart,
+ Declarator &D, Expr *BitWidth,
+ InClassInitStyle InitStyle,
+ AccessSpecifier AS,
+ AttributeList *MSPropertyAttr) {
+ IdentifierInfo *II = D.getIdentifier();
+ if (!II) {
+ Diag(DeclStart, diag::err_anonymous_property);
+ return nullptr;
+ }
+ SourceLocation Loc = D.getIdentifierLoc();
+
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ QualType T = TInfo->getType();
+ if (getLangOpts().CPlusPlus) {
+ CheckExtraCXXDefaultArguments(D);
+
+ if (DiagnoseUnexpandedParameterPack(D.getIdentifierLoc(), TInfo,
+ UPPC_DataMemberType)) {
+ D.setInvalidType();
+ T = Context.IntTy;
+ TInfo = Context.getTrivialTypeSourceInfo(T, Loc);
+ }
+ }
+
+ DiagnoseFunctionSpecifiers(D.getDeclSpec());
+
+ if (DeclSpec::TSCS TSCS = D.getDeclSpec().getThreadStorageClassSpec())
+ Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
+ diag::err_invalid_thread)
+ << DeclSpec::getSpecifierName(TSCS);
+
+ // Check to see if this name was declared as a member previously
+ NamedDecl *PrevDecl = nullptr;
+ LookupResult Previous(*this, II, Loc, LookupMemberName, ForRedeclaration);
+ LookupName(Previous, S);
+ switch (Previous.getResultKind()) {
+ case LookupResult::Found:
+ case LookupResult::FoundUnresolvedValue:
+ PrevDecl = Previous.getAsSingle<NamedDecl>();
+ break;
+
+ case LookupResult::FoundOverloaded:
+ PrevDecl = Previous.getRepresentativeDecl();
+ break;
+
+ case LookupResult::NotFound:
+ case LookupResult::NotFoundInCurrentInstantiation:
+ case LookupResult::Ambiguous:
+ break;
+ }
+
+ if (PrevDecl && PrevDecl->isTemplateParameter()) {
+ // Maybe we will complain about the shadowed template parameter.
+ DiagnoseTemplateParameterShadow(D.getIdentifierLoc(), PrevDecl);
+ // Just pretend that we didn't see the previous declaration.
+ PrevDecl = nullptr;
+ }
+
+ if (PrevDecl && !isDeclInScope(PrevDecl, Record, S))
+ PrevDecl = nullptr;
+
+ SourceLocation TSSL = D.getLocStart();
+ const AttributeList::PropertyData &Data = MSPropertyAttr->getPropertyData();
+ MSPropertyDecl *NewPD = MSPropertyDecl::Create(
+ Context, Record, Loc, II, T, TInfo, TSSL, Data.GetterId, Data.SetterId);
+ ProcessDeclAttributes(TUScope, NewPD, D);
+ NewPD->setAccess(AS);
+
+ if (NewPD->isInvalidDecl())
+ Record->setInvalidDecl();
+
+ if (D.getDeclSpec().isModulePrivateSpecified())
+ NewPD->setModulePrivate();
+
+ if (NewPD->isInvalidDecl() && PrevDecl) {
+ // Don't introduce NewFD into scope; there's already something
+ // with the same name in the same scope.
+ } else if (II) {
+ PushOnScopeChains(NewPD, S);
+ } else
+ Record->addDecl(NewPD);
+
+ return NewPD;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp
new file mode 100644
index 0000000..a2f41a7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp
@@ -0,0 +1,4634 @@
+//===--- SemaDeclObjC.cpp - Semantic Analysis for ObjC Declarations -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for Objective C declarations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/ExternalSemaSource.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "TypeLocBuilder.h"
+
+using namespace clang;
+
+/// Check whether the given method, which must be in the 'init'
+/// family, is a valid member of that family.
+///
+/// \param receiverTypeIfCall - if null, check this as if declaring it;
+/// if non-null, check this as if making a call to it with the given
+/// receiver type
+///
+/// \return true to indicate that there was an error and appropriate
+/// actions were taken
+bool Sema::checkInitMethod(ObjCMethodDecl *method,
+ QualType receiverTypeIfCall) {
+ if (method->isInvalidDecl()) return true;
+
+ // This castAs is safe: methods that don't return an object
+ // pointer won't be inferred as inits and will reject an explicit
+ // objc_method_family(init).
+
+ // We ignore protocols here. Should we? What about Class?
+
+ const ObjCObjectType *result =
+ method->getReturnType()->castAs<ObjCObjectPointerType>()->getObjectType();
+
+ if (result->isObjCId()) {
+ return false;
+ } else if (result->isObjCClass()) {
+ // fall through: always an error
+ } else {
+ ObjCInterfaceDecl *resultClass = result->getInterface();
+ assert(resultClass && "unexpected object type!");
+
+ // It's okay for the result type to still be a forward declaration
+ // if we're checking an interface declaration.
+ if (!resultClass->hasDefinition()) {
+ if (receiverTypeIfCall.isNull() &&
+ !isa<ObjCImplementationDecl>(method->getDeclContext()))
+ return false;
+
+ // Otherwise, we try to compare class types.
+ } else {
+ // If this method was declared in a protocol, we can't check
+ // anything unless we have a receiver type that's an interface.
+ const ObjCInterfaceDecl *receiverClass = nullptr;
+ if (isa<ObjCProtocolDecl>(method->getDeclContext())) {
+ if (receiverTypeIfCall.isNull())
+ return false;
+
+ receiverClass = receiverTypeIfCall->castAs<ObjCObjectPointerType>()
+ ->getInterfaceDecl();
+
+ // This can be null for calls to e.g. id<Foo>.
+ if (!receiverClass) return false;
+ } else {
+ receiverClass = method->getClassInterface();
+ assert(receiverClass && "method not associated with a class!");
+ }
+
+ // If either class is a subclass of the other, it's fine.
+ if (receiverClass->isSuperClassOf(resultClass) ||
+ resultClass->isSuperClassOf(receiverClass))
+ return false;
+ }
+ }
+
+ SourceLocation loc = method->getLocation();
+
+ // If we're in a system header, and this is not a call, just make
+ // the method unusable.
+ if (receiverTypeIfCall.isNull() && getSourceManager().isInSystemHeader(loc)) {
+ method->addAttr(UnavailableAttr::CreateImplicit(Context, "",
+ UnavailableAttr::IR_ARCInitReturnsUnrelated, loc));
+ return true;
+ }
+
+ // Otherwise, it's an error.
+ Diag(loc, diag::err_arc_init_method_unrelated_result_type);
+ method->setInvalidDecl();
+ return true;
+}
+
+void Sema::CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
+ const ObjCMethodDecl *Overridden) {
+ if (Overridden->hasRelatedResultType() &&
+ !NewMethod->hasRelatedResultType()) {
+ // This can only happen when the method follows a naming convention that
+ // implies a related result type, and the original (overridden) method has
+ // a suitable return type, but the new (overriding) method does not have
+ // a suitable return type.
+ QualType ResultType = NewMethod->getReturnType();
+ SourceRange ResultTypeRange = NewMethod->getReturnTypeSourceRange();
+
+ // Figure out which class this method is part of, if any.
+ ObjCInterfaceDecl *CurrentClass
+ = dyn_cast<ObjCInterfaceDecl>(NewMethod->getDeclContext());
+ if (!CurrentClass) {
+ DeclContext *DC = NewMethod->getDeclContext();
+ if (ObjCCategoryDecl *Cat = dyn_cast<ObjCCategoryDecl>(DC))
+ CurrentClass = Cat->getClassInterface();
+ else if (ObjCImplDecl *Impl = dyn_cast<ObjCImplDecl>(DC))
+ CurrentClass = Impl->getClassInterface();
+ else if (ObjCCategoryImplDecl *CatImpl
+ = dyn_cast<ObjCCategoryImplDecl>(DC))
+ CurrentClass = CatImpl->getClassInterface();
+ }
+
+ if (CurrentClass) {
+ Diag(NewMethod->getLocation(),
+ diag::warn_related_result_type_compatibility_class)
+ << Context.getObjCInterfaceType(CurrentClass)
+ << ResultType
+ << ResultTypeRange;
+ } else {
+ Diag(NewMethod->getLocation(),
+ diag::warn_related_result_type_compatibility_protocol)
+ << ResultType
+ << ResultTypeRange;
+ }
+
+ if (ObjCMethodFamily Family = Overridden->getMethodFamily())
+ Diag(Overridden->getLocation(),
+ diag::note_related_result_type_family)
+ << /*overridden method*/ 0
+ << Family;
+ else
+ Diag(Overridden->getLocation(),
+ diag::note_related_result_type_overridden);
+ }
+ if (getLangOpts().ObjCAutoRefCount) {
+ if ((NewMethod->hasAttr<NSReturnsRetainedAttr>() !=
+ Overridden->hasAttr<NSReturnsRetainedAttr>())) {
+ Diag(NewMethod->getLocation(),
+ diag::err_nsreturns_retained_attribute_mismatch) << 1;
+ Diag(Overridden->getLocation(), diag::note_previous_decl)
+ << "method";
+ }
+ if ((NewMethod->hasAttr<NSReturnsNotRetainedAttr>() !=
+ Overridden->hasAttr<NSReturnsNotRetainedAttr>())) {
+ Diag(NewMethod->getLocation(),
+ diag::err_nsreturns_retained_attribute_mismatch) << 0;
+ Diag(Overridden->getLocation(), diag::note_previous_decl)
+ << "method";
+ }
+ ObjCMethodDecl::param_const_iterator oi = Overridden->param_begin(),
+ oe = Overridden->param_end();
+ for (ObjCMethodDecl::param_iterator
+ ni = NewMethod->param_begin(), ne = NewMethod->param_end();
+ ni != ne && oi != oe; ++ni, ++oi) {
+ const ParmVarDecl *oldDecl = (*oi);
+ ParmVarDecl *newDecl = (*ni);
+ if (newDecl->hasAttr<NSConsumedAttr>() !=
+ oldDecl->hasAttr<NSConsumedAttr>()) {
+ Diag(newDecl->getLocation(),
+ diag::err_nsconsumed_attribute_mismatch);
+ Diag(oldDecl->getLocation(), diag::note_previous_decl)
+ << "parameter";
+ }
+ }
+ }
+}
+
+/// \brief Check a method declaration for compatibility with the Objective-C
+/// ARC conventions.
+bool Sema::CheckARCMethodDecl(ObjCMethodDecl *method) {
+ ObjCMethodFamily family = method->getMethodFamily();
+ switch (family) {
+ case OMF_None:
+ case OMF_finalize:
+ case OMF_retain:
+ case OMF_release:
+ case OMF_autorelease:
+ case OMF_retainCount:
+ case OMF_self:
+ case OMF_initialize:
+ case OMF_performSelector:
+ return false;
+
+ case OMF_dealloc:
+ if (!Context.hasSameType(method->getReturnType(), Context.VoidTy)) {
+ SourceRange ResultTypeRange = method->getReturnTypeSourceRange();
+ if (ResultTypeRange.isInvalid())
+ Diag(method->getLocation(), diag::error_dealloc_bad_result_type)
+ << method->getReturnType()
+ << FixItHint::CreateInsertion(method->getSelectorLoc(0), "(void)");
+ else
+ Diag(method->getLocation(), diag::error_dealloc_bad_result_type)
+ << method->getReturnType()
+ << FixItHint::CreateReplacement(ResultTypeRange, "void");
+ return true;
+ }
+ return false;
+
+ case OMF_init:
+ // If the method doesn't obey the init rules, don't bother annotating it.
+ if (checkInitMethod(method, QualType()))
+ return true;
+
+ method->addAttr(NSConsumesSelfAttr::CreateImplicit(Context));
+
+ // Don't add a second copy of this attribute, but otherwise don't
+ // let it be suppressed.
+ if (method->hasAttr<NSReturnsRetainedAttr>())
+ return false;
+ break;
+
+ case OMF_alloc:
+ case OMF_copy:
+ case OMF_mutableCopy:
+ case OMF_new:
+ if (method->hasAttr<NSReturnsRetainedAttr>() ||
+ method->hasAttr<NSReturnsNotRetainedAttr>() ||
+ method->hasAttr<NSReturnsAutoreleasedAttr>())
+ return false;
+ break;
+ }
+
+ method->addAttr(NSReturnsRetainedAttr::CreateImplicit(Context));
+ return false;
+}
+
+static void DiagnoseObjCImplementedDeprecations(Sema &S,
+ NamedDecl *ND,
+ SourceLocation ImplLoc,
+ int select) {
+ if (ND && ND->isDeprecated()) {
+ S.Diag(ImplLoc, diag::warn_deprecated_def) << select;
+ if (select == 0)
+ S.Diag(ND->getLocation(), diag::note_method_declared_at)
+ << ND->getDeclName();
+ else
+ S.Diag(ND->getLocation(), diag::note_previous_decl) << "class";
+ }
+}
+
+/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
+/// pool.
+void Sema::AddAnyMethodToGlobalPool(Decl *D) {
+ ObjCMethodDecl *MDecl = dyn_cast_or_null<ObjCMethodDecl>(D);
+
+ // If we don't have a valid method decl, simply return.
+ if (!MDecl)
+ return;
+ if (MDecl->isInstanceMethod())
+ AddInstanceMethodToGlobalPool(MDecl, true);
+ else
+ AddFactoryMethodToGlobalPool(MDecl, true);
+}
+
+/// HasExplicitOwnershipAttr - returns true when pointer to ObjC pointer
+/// has explicit ownership attribute; false otherwise.
+static bool
+HasExplicitOwnershipAttr(Sema &S, ParmVarDecl *Param) {
+ QualType T = Param->getType();
+
+ if (const PointerType *PT = T->getAs<PointerType>()) {
+ T = PT->getPointeeType();
+ } else if (const ReferenceType *RT = T->getAs<ReferenceType>()) {
+ T = RT->getPointeeType();
+ } else {
+ return true;
+ }
+
+ // If we have a lifetime qualifier, but it's local, we must have
+ // inferred it. So, it is implicit.
+ return !T.getLocalQualifiers().hasObjCLifetime();
+}
+
+/// ActOnStartOfObjCMethodDef - This routine sets up parameters; invisible
+/// and user declared, in the method definition's AST.
+void Sema::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, Decl *D) {
+ assert((getCurMethodDecl() == nullptr) && "Methodparsing confused");
+ ObjCMethodDecl *MDecl = dyn_cast_or_null<ObjCMethodDecl>(D);
+
+ // If we don't have a valid method decl, simply return.
+ if (!MDecl)
+ return;
+
+ // Allow all of Sema to see that we are entering a method definition.
+ PushDeclContext(FnBodyScope, MDecl);
+ PushFunctionScope();
+
+ // Create Decl objects for each parameter, entrring them in the scope for
+ // binding to their use.
+
+ // Insert the invisible arguments, self and _cmd!
+ MDecl->createImplicitParams(Context, MDecl->getClassInterface());
+
+ PushOnScopeChains(MDecl->getSelfDecl(), FnBodyScope);
+ PushOnScopeChains(MDecl->getCmdDecl(), FnBodyScope);
+
+ // The ObjC parser requires parameter names so there's no need to check.
+ CheckParmsForFunctionDef(MDecl->param_begin(), MDecl->param_end(),
+ /*CheckParameterNames=*/false);
+
+ // Introduce all of the other parameters into this scope.
+ for (auto *Param : MDecl->params()) {
+ if (!Param->isInvalidDecl() &&
+ getLangOpts().ObjCAutoRefCount &&
+ !HasExplicitOwnershipAttr(*this, Param))
+ Diag(Param->getLocation(), diag::warn_arc_strong_pointer_objc_pointer) <<
+ Param->getType();
+
+ if (Param->getIdentifier())
+ PushOnScopeChains(Param, FnBodyScope);
+ }
+
+ // In ARC, disallow definition of retain/release/autorelease/retainCount
+ if (getLangOpts().ObjCAutoRefCount) {
+ switch (MDecl->getMethodFamily()) {
+ case OMF_retain:
+ case OMF_retainCount:
+ case OMF_release:
+ case OMF_autorelease:
+ Diag(MDecl->getLocation(), diag::err_arc_illegal_method_def)
+ << 0 << MDecl->getSelector();
+ break;
+
+ case OMF_None:
+ case OMF_dealloc:
+ case OMF_finalize:
+ case OMF_alloc:
+ case OMF_init:
+ case OMF_mutableCopy:
+ case OMF_copy:
+ case OMF_new:
+ case OMF_self:
+ case OMF_initialize:
+ case OMF_performSelector:
+ break;
+ }
+ }
+
+ // Warn on deprecated methods under -Wdeprecated-implementations,
+ // and prepare for warning on missing super calls.
+ if (ObjCInterfaceDecl *IC = MDecl->getClassInterface()) {
+ ObjCMethodDecl *IMD =
+ IC->lookupMethod(MDecl->getSelector(), MDecl->isInstanceMethod());
+
+ if (IMD) {
+ ObjCImplDecl *ImplDeclOfMethodDef =
+ dyn_cast<ObjCImplDecl>(MDecl->getDeclContext());
+ ObjCContainerDecl *ContDeclOfMethodDecl =
+ dyn_cast<ObjCContainerDecl>(IMD->getDeclContext());
+ ObjCImplDecl *ImplDeclOfMethodDecl = nullptr;
+ if (ObjCInterfaceDecl *OID = dyn_cast<ObjCInterfaceDecl>(ContDeclOfMethodDecl))
+ ImplDeclOfMethodDecl = OID->getImplementation();
+ else if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(ContDeclOfMethodDecl)) {
+ if (CD->IsClassExtension()) {
+ if (ObjCInterfaceDecl *OID = CD->getClassInterface())
+ ImplDeclOfMethodDecl = OID->getImplementation();
+ } else
+ ImplDeclOfMethodDecl = CD->getImplementation();
+ }
+ // No need to issue deprecated warning if deprecated mehod in class/category
+ // is being implemented in its own implementation (no overriding is involved).
+ if (!ImplDeclOfMethodDecl || ImplDeclOfMethodDecl != ImplDeclOfMethodDef)
+ DiagnoseObjCImplementedDeprecations(*this,
+ dyn_cast<NamedDecl>(IMD),
+ MDecl->getLocation(), 0);
+ }
+
+ if (MDecl->getMethodFamily() == OMF_init) {
+ if (MDecl->isDesignatedInitializerForTheInterface()) {
+ getCurFunction()->ObjCIsDesignatedInit = true;
+ getCurFunction()->ObjCWarnForNoDesignatedInitChain =
+ IC->getSuperClass() != nullptr;
+ } else if (IC->hasDesignatedInitializers()) {
+ getCurFunction()->ObjCIsSecondaryInit = true;
+ getCurFunction()->ObjCWarnForNoInitDelegation = true;
+ }
+ }
+
+ // If this is "dealloc" or "finalize", set some bit here.
+ // Then in ActOnSuperMessage() (SemaExprObjC), set it back to false.
+ // Finally, in ActOnFinishFunctionBody() (SemaDecl), warn if flag is set.
+ // Only do this if the current class actually has a superclass.
+ if (const ObjCInterfaceDecl *SuperClass = IC->getSuperClass()) {
+ ObjCMethodFamily Family = MDecl->getMethodFamily();
+ if (Family == OMF_dealloc) {
+ if (!(getLangOpts().ObjCAutoRefCount ||
+ getLangOpts().getGC() == LangOptions::GCOnly))
+ getCurFunction()->ObjCShouldCallSuper = true;
+
+ } else if (Family == OMF_finalize) {
+ if (Context.getLangOpts().getGC() != LangOptions::NonGC)
+ getCurFunction()->ObjCShouldCallSuper = true;
+
+ } else {
+ const ObjCMethodDecl *SuperMethod =
+ SuperClass->lookupMethod(MDecl->getSelector(),
+ MDecl->isInstanceMethod());
+ getCurFunction()->ObjCShouldCallSuper =
+ (SuperMethod && SuperMethod->hasAttr<ObjCRequiresSuperAttr>());
+ }
+ }
+ }
+}
+
+namespace {
+
+// Callback to only accept typo corrections that are Objective-C classes.
+// If an ObjCInterfaceDecl* is given to the constructor, then the validation
+// function will reject corrections to that class.
+class ObjCInterfaceValidatorCCC : public CorrectionCandidateCallback {
+ public:
+ ObjCInterfaceValidatorCCC() : CurrentIDecl(nullptr) {}
+ explicit ObjCInterfaceValidatorCCC(ObjCInterfaceDecl *IDecl)
+ : CurrentIDecl(IDecl) {}
+
+ bool ValidateCandidate(const TypoCorrection &candidate) override {
+ ObjCInterfaceDecl *ID = candidate.getCorrectionDeclAs<ObjCInterfaceDecl>();
+ return ID && !declaresSameEntity(ID, CurrentIDecl);
+ }
+
+ private:
+ ObjCInterfaceDecl *CurrentIDecl;
+};
+
+} // end anonymous namespace
+
+static void diagnoseUseOfProtocols(Sema &TheSema,
+ ObjCContainerDecl *CD,
+ ObjCProtocolDecl *const *ProtoRefs,
+ unsigned NumProtoRefs,
+ const SourceLocation *ProtoLocs) {
+ assert(ProtoRefs);
+ // Diagnose availability in the context of the ObjC container.
+ Sema::ContextRAII SavedContext(TheSema, CD);
+ for (unsigned i = 0; i < NumProtoRefs; ++i) {
+ (void)TheSema.DiagnoseUseOfDecl(ProtoRefs[i], ProtoLocs[i]);
+ }
+}
+
+void Sema::
+ActOnSuperClassOfClassInterface(Scope *S,
+ SourceLocation AtInterfaceLoc,
+ ObjCInterfaceDecl *IDecl,
+ IdentifierInfo *ClassName,
+ SourceLocation ClassLoc,
+ IdentifierInfo *SuperName,
+ SourceLocation SuperLoc,
+ ArrayRef<ParsedType> SuperTypeArgs,
+ SourceRange SuperTypeArgsRange) {
+ // Check if a different kind of symbol declared in this scope.
+ NamedDecl *PrevDecl = LookupSingleName(TUScope, SuperName, SuperLoc,
+ LookupOrdinaryName);
+
+ if (!PrevDecl) {
+ // Try to correct for a typo in the superclass name without correcting
+ // to the class we're defining.
+ if (TypoCorrection Corrected = CorrectTypo(
+ DeclarationNameInfo(SuperName, SuperLoc),
+ LookupOrdinaryName, TUScope,
+ nullptr, llvm::make_unique<ObjCInterfaceValidatorCCC>(IDecl),
+ CTK_ErrorRecovery)) {
+ diagnoseTypo(Corrected, PDiag(diag::err_undef_superclass_suggest)
+ << SuperName << ClassName);
+ PrevDecl = Corrected.getCorrectionDeclAs<ObjCInterfaceDecl>();
+ }
+ }
+
+ if (declaresSameEntity(PrevDecl, IDecl)) {
+ Diag(SuperLoc, diag::err_recursive_superclass)
+ << SuperName << ClassName << SourceRange(AtInterfaceLoc, ClassLoc);
+ IDecl->setEndOfDefinitionLoc(ClassLoc);
+ } else {
+ ObjCInterfaceDecl *SuperClassDecl =
+ dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl);
+ QualType SuperClassType;
+
+ // Diagnose classes that inherit from deprecated classes.
+ if (SuperClassDecl) {
+ (void)DiagnoseUseOfDecl(SuperClassDecl, SuperLoc);
+ SuperClassType = Context.getObjCInterfaceType(SuperClassDecl);
+ }
+
+ if (PrevDecl && !SuperClassDecl) {
+ // The previous declaration was not a class decl. Check if we have a
+ // typedef. If we do, get the underlying class type.
+ if (const TypedefNameDecl *TDecl =
+ dyn_cast_or_null<TypedefNameDecl>(PrevDecl)) {
+ QualType T = TDecl->getUnderlyingType();
+ if (T->isObjCObjectType()) {
+ if (NamedDecl *IDecl = T->getAs<ObjCObjectType>()->getInterface()) {
+ SuperClassDecl = dyn_cast<ObjCInterfaceDecl>(IDecl);
+ SuperClassType = Context.getTypeDeclType(TDecl);
+
+ // This handles the following case:
+ // @interface NewI @end
+ // typedef NewI DeprI __attribute__((deprecated("blah")))
+ // @interface SI : DeprI /* warn here */ @end
+ (void)DiagnoseUseOfDecl(const_cast<TypedefNameDecl*>(TDecl), SuperLoc);
+ }
+ }
+ }
+
+ // This handles the following case:
+ //
+ // typedef int SuperClass;
+ // @interface MyClass : SuperClass {} @end
+ //
+ if (!SuperClassDecl) {
+ Diag(SuperLoc, diag::err_redefinition_different_kind) << SuperName;
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ }
+ }
+
+ if (!dyn_cast_or_null<TypedefNameDecl>(PrevDecl)) {
+ if (!SuperClassDecl)
+ Diag(SuperLoc, diag::err_undef_superclass)
+ << SuperName << ClassName << SourceRange(AtInterfaceLoc, ClassLoc);
+ else if (RequireCompleteType(SuperLoc,
+ SuperClassType,
+ diag::err_forward_superclass,
+ SuperClassDecl->getDeclName(),
+ ClassName,
+ SourceRange(AtInterfaceLoc, ClassLoc))) {
+ SuperClassDecl = nullptr;
+ SuperClassType = QualType();
+ }
+ }
+
+ if (SuperClassType.isNull()) {
+ assert(!SuperClassDecl && "Failed to set SuperClassType?");
+ return;
+ }
+
+ // Handle type arguments on the superclass.
+ TypeSourceInfo *SuperClassTInfo = nullptr;
+ if (!SuperTypeArgs.empty()) {
+ TypeResult fullSuperClassType = actOnObjCTypeArgsAndProtocolQualifiers(
+ S,
+ SuperLoc,
+ CreateParsedType(SuperClassType,
+ nullptr),
+ SuperTypeArgsRange.getBegin(),
+ SuperTypeArgs,
+ SuperTypeArgsRange.getEnd(),
+ SourceLocation(),
+ { },
+ { },
+ SourceLocation());
+ if (!fullSuperClassType.isUsable())
+ return;
+
+ SuperClassType = GetTypeFromParser(fullSuperClassType.get(),
+ &SuperClassTInfo);
+ }
+
+ if (!SuperClassTInfo) {
+ SuperClassTInfo = Context.getTrivialTypeSourceInfo(SuperClassType,
+ SuperLoc);
+ }
+
+ IDecl->setSuperClass(SuperClassTInfo);
+ IDecl->setEndOfDefinitionLoc(SuperClassTInfo->getTypeLoc().getLocEnd());
+ }
+}
+
+DeclResult Sema::actOnObjCTypeParam(Scope *S,
+ ObjCTypeParamVariance variance,
+ SourceLocation varianceLoc,
+ unsigned index,
+ IdentifierInfo *paramName,
+ SourceLocation paramLoc,
+ SourceLocation colonLoc,
+ ParsedType parsedTypeBound) {
+ // If there was an explicitly-provided type bound, check it.
+ TypeSourceInfo *typeBoundInfo = nullptr;
+ if (parsedTypeBound) {
+ // The type bound can be any Objective-C pointer type.
+ QualType typeBound = GetTypeFromParser(parsedTypeBound, &typeBoundInfo);
+ if (typeBound->isObjCObjectPointerType()) {
+ // okay
+ } else if (typeBound->isObjCObjectType()) {
+ // The user forgot the * on an Objective-C pointer type, e.g.,
+ // "T : NSView".
+ SourceLocation starLoc = getLocForEndOfToken(
+ typeBoundInfo->getTypeLoc().getEndLoc());
+ Diag(typeBoundInfo->getTypeLoc().getBeginLoc(),
+ diag::err_objc_type_param_bound_missing_pointer)
+ << typeBound << paramName
+ << FixItHint::CreateInsertion(starLoc, " *");
+
+ // Create a new type location builder so we can update the type
+ // location information we have.
+ TypeLocBuilder builder;
+ builder.pushFullCopy(typeBoundInfo->getTypeLoc());
+
+ // Create the Objective-C pointer type.
+ typeBound = Context.getObjCObjectPointerType(typeBound);
+ ObjCObjectPointerTypeLoc newT
+ = builder.push<ObjCObjectPointerTypeLoc>(typeBound);
+ newT.setStarLoc(starLoc);
+
+ // Form the new type source information.
+ typeBoundInfo = builder.getTypeSourceInfo(Context, typeBound);
+ } else {
+ // Not a valid type bound.
+ Diag(typeBoundInfo->getTypeLoc().getBeginLoc(),
+ diag::err_objc_type_param_bound_nonobject)
+ << typeBound << paramName;
+
+ // Forget the bound; we'll default to id later.
+ typeBoundInfo = nullptr;
+ }
+
+ // Type bounds cannot have qualifiers (even indirectly) or explicit
+ // nullability.
+ if (typeBoundInfo) {
+ QualType typeBound = typeBoundInfo->getType();
+ TypeLoc qual = typeBoundInfo->getTypeLoc().findExplicitQualifierLoc();
+ if (qual || typeBound.hasQualifiers()) {
+ bool diagnosed = false;
+ SourceRange rangeToRemove;
+ if (qual) {
+ if (auto attr = qual.getAs<AttributedTypeLoc>()) {
+ rangeToRemove = attr.getLocalSourceRange();
+ if (attr.getTypePtr()->getImmediateNullability()) {
+ Diag(attr.getLocStart(),
+ diag::err_objc_type_param_bound_explicit_nullability)
+ << paramName << typeBound
+ << FixItHint::CreateRemoval(rangeToRemove);
+ diagnosed = true;
+ }
+ }
+ }
+
+ if (!diagnosed) {
+ Diag(qual ? qual.getLocStart()
+ : typeBoundInfo->getTypeLoc().getLocStart(),
+ diag::err_objc_type_param_bound_qualified)
+ << paramName << typeBound << typeBound.getQualifiers().getAsString()
+ << FixItHint::CreateRemoval(rangeToRemove);
+ }
+
+ // If the type bound has qualifiers other than CVR, we need to strip
+ // them or we'll probably assert later when trying to apply new
+ // qualifiers.
+ Qualifiers quals = typeBound.getQualifiers();
+ quals.removeCVRQualifiers();
+ if (!quals.empty()) {
+ typeBoundInfo =
+ Context.getTrivialTypeSourceInfo(typeBound.getUnqualifiedType());
+ }
+ }
+ }
+ }
+
+ // If there was no explicit type bound (or we removed it due to an error),
+ // use 'id' instead.
+ if (!typeBoundInfo) {
+ colonLoc = SourceLocation();
+ typeBoundInfo = Context.getTrivialTypeSourceInfo(Context.getObjCIdType());
+ }
+
+ // Create the type parameter.
+ return ObjCTypeParamDecl::Create(Context, CurContext, variance, varianceLoc,
+ index, paramLoc, paramName, colonLoc,
+ typeBoundInfo);
+}
+
+ObjCTypeParamList *Sema::actOnObjCTypeParamList(Scope *S,
+ SourceLocation lAngleLoc,
+ ArrayRef<Decl *> typeParamsIn,
+ SourceLocation rAngleLoc) {
+ // We know that the array only contains Objective-C type parameters.
+ ArrayRef<ObjCTypeParamDecl *>
+ typeParams(
+ reinterpret_cast<ObjCTypeParamDecl * const *>(typeParamsIn.data()),
+ typeParamsIn.size());
+
+ // Diagnose redeclarations of type parameters.
+ // We do this now because Objective-C type parameters aren't pushed into
+ // scope until later (after the instance variable block), but we want the
+ // diagnostics to occur right after we parse the type parameter list.
+ llvm::SmallDenseMap<IdentifierInfo *, ObjCTypeParamDecl *> knownParams;
+ for (auto typeParam : typeParams) {
+ auto known = knownParams.find(typeParam->getIdentifier());
+ if (known != knownParams.end()) {
+ Diag(typeParam->getLocation(), diag::err_objc_type_param_redecl)
+ << typeParam->getIdentifier()
+ << SourceRange(known->second->getLocation());
+
+ typeParam->setInvalidDecl();
+ } else {
+ knownParams.insert(std::make_pair(typeParam->getIdentifier(), typeParam));
+
+ // Push the type parameter into scope.
+ PushOnScopeChains(typeParam, S, /*AddToContext=*/false);
+ }
+ }
+
+ // Create the parameter list.
+ return ObjCTypeParamList::create(Context, lAngleLoc, typeParams, rAngleLoc);
+}
+
+void Sema::popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList) {
+ for (auto typeParam : *typeParamList) {
+ if (!typeParam->isInvalidDecl()) {
+ S->RemoveDecl(typeParam);
+ IdResolver.RemoveDecl(typeParam);
+ }
+ }
+}
+
+namespace {
+ /// The context in which an Objective-C type parameter list occurs, for use
+ /// in diagnostics.
+ enum class TypeParamListContext {
+ ForwardDeclaration,
+ Definition,
+ Category,
+ Extension
+ };
+} // end anonymous namespace
+
+/// Check consistency between two Objective-C type parameter lists, e.g.,
+/// between a category/extension and an \@interface or between an \@class and an
+/// \@interface.
+static bool checkTypeParamListConsistency(Sema &S,
+ ObjCTypeParamList *prevTypeParams,
+ ObjCTypeParamList *newTypeParams,
+ TypeParamListContext newContext) {
+ // If the sizes don't match, complain about that.
+ if (prevTypeParams->size() != newTypeParams->size()) {
+ SourceLocation diagLoc;
+ if (newTypeParams->size() > prevTypeParams->size()) {
+ diagLoc = newTypeParams->begin()[prevTypeParams->size()]->getLocation();
+ } else {
+ diagLoc = S.getLocForEndOfToken(newTypeParams->back()->getLocEnd());
+ }
+
+ S.Diag(diagLoc, diag::err_objc_type_param_arity_mismatch)
+ << static_cast<unsigned>(newContext)
+ << (newTypeParams->size() > prevTypeParams->size())
+ << prevTypeParams->size()
+ << newTypeParams->size();
+
+ return true;
+ }
+
+ // Match up the type parameters.
+ for (unsigned i = 0, n = prevTypeParams->size(); i != n; ++i) {
+ ObjCTypeParamDecl *prevTypeParam = prevTypeParams->begin()[i];
+ ObjCTypeParamDecl *newTypeParam = newTypeParams->begin()[i];
+
+ // Check for consistency of the variance.
+ if (newTypeParam->getVariance() != prevTypeParam->getVariance()) {
+ if (newTypeParam->getVariance() == ObjCTypeParamVariance::Invariant &&
+ newContext != TypeParamListContext::Definition) {
+ // When the new type parameter is invariant and is not part
+ // of the definition, just propagate the variance.
+ newTypeParam->setVariance(prevTypeParam->getVariance());
+ } else if (prevTypeParam->getVariance()
+ == ObjCTypeParamVariance::Invariant &&
+ !(isa<ObjCInterfaceDecl>(prevTypeParam->getDeclContext()) &&
+ cast<ObjCInterfaceDecl>(prevTypeParam->getDeclContext())
+ ->getDefinition() == prevTypeParam->getDeclContext())) {
+ // When the old parameter is invariant and was not part of the
+ // definition, just ignore the difference because it doesn't
+ // matter.
+ } else {
+ {
+ // Diagnose the conflict and update the second declaration.
+ SourceLocation diagLoc = newTypeParam->getVarianceLoc();
+ if (diagLoc.isInvalid())
+ diagLoc = newTypeParam->getLocStart();
+
+ auto diag = S.Diag(diagLoc,
+ diag::err_objc_type_param_variance_conflict)
+ << static_cast<unsigned>(newTypeParam->getVariance())
+ << newTypeParam->getDeclName()
+ << static_cast<unsigned>(prevTypeParam->getVariance())
+ << prevTypeParam->getDeclName();
+ switch (prevTypeParam->getVariance()) {
+ case ObjCTypeParamVariance::Invariant:
+ diag << FixItHint::CreateRemoval(newTypeParam->getVarianceLoc());
+ break;
+
+ case ObjCTypeParamVariance::Covariant:
+ case ObjCTypeParamVariance::Contravariant: {
+ StringRef newVarianceStr
+ = prevTypeParam->getVariance() == ObjCTypeParamVariance::Covariant
+ ? "__covariant"
+ : "__contravariant";
+ if (newTypeParam->getVariance()
+ == ObjCTypeParamVariance::Invariant) {
+ diag << FixItHint::CreateInsertion(newTypeParam->getLocStart(),
+ (newVarianceStr + " ").str());
+ } else {
+ diag << FixItHint::CreateReplacement(newTypeParam->getVarianceLoc(),
+ newVarianceStr);
+ }
+ }
+ }
+ }
+
+ S.Diag(prevTypeParam->getLocation(), diag::note_objc_type_param_here)
+ << prevTypeParam->getDeclName();
+
+ // Override the variance.
+ newTypeParam->setVariance(prevTypeParam->getVariance());
+ }
+ }
+
+ // If the bound types match, there's nothing to do.
+ if (S.Context.hasSameType(prevTypeParam->getUnderlyingType(),
+ newTypeParam->getUnderlyingType()))
+ continue;
+
+ // If the new type parameter's bound was explicit, complain about it being
+ // different from the original.
+ if (newTypeParam->hasExplicitBound()) {
+ SourceRange newBoundRange = newTypeParam->getTypeSourceInfo()
+ ->getTypeLoc().getSourceRange();
+ S.Diag(newBoundRange.getBegin(), diag::err_objc_type_param_bound_conflict)
+ << newTypeParam->getUnderlyingType()
+ << newTypeParam->getDeclName()
+ << prevTypeParam->hasExplicitBound()
+ << prevTypeParam->getUnderlyingType()
+ << (newTypeParam->getDeclName() == prevTypeParam->getDeclName())
+ << prevTypeParam->getDeclName()
+ << FixItHint::CreateReplacement(
+ newBoundRange,
+ prevTypeParam->getUnderlyingType().getAsString(
+ S.Context.getPrintingPolicy()));
+
+ S.Diag(prevTypeParam->getLocation(), diag::note_objc_type_param_here)
+ << prevTypeParam->getDeclName();
+
+ // Override the new type parameter's bound type with the previous type,
+ // so that it's consistent.
+ newTypeParam->setTypeSourceInfo(
+ S.Context.getTrivialTypeSourceInfo(prevTypeParam->getUnderlyingType()));
+ continue;
+ }
+
+ // The new type parameter got the implicit bound of 'id'. That's okay for
+ // categories and extensions (overwrite it later), but not for forward
+ // declarations and @interfaces, because those must be standalone.
+ if (newContext == TypeParamListContext::ForwardDeclaration ||
+ newContext == TypeParamListContext::Definition) {
+ // Diagnose this problem for forward declarations and definitions.
+ SourceLocation insertionLoc
+ = S.getLocForEndOfToken(newTypeParam->getLocation());
+ std::string newCode
+ = " : " + prevTypeParam->getUnderlyingType().getAsString(
+ S.Context.getPrintingPolicy());
+ S.Diag(newTypeParam->getLocation(),
+ diag::err_objc_type_param_bound_missing)
+ << prevTypeParam->getUnderlyingType()
+ << newTypeParam->getDeclName()
+ << (newContext == TypeParamListContext::ForwardDeclaration)
+ << FixItHint::CreateInsertion(insertionLoc, newCode);
+
+ S.Diag(prevTypeParam->getLocation(), diag::note_objc_type_param_here)
+ << prevTypeParam->getDeclName();
+ }
+
+ // Update the new type parameter's bound to match the previous one.
+ newTypeParam->setTypeSourceInfo(
+ S.Context.getTrivialTypeSourceInfo(prevTypeParam->getUnderlyingType()));
+ }
+
+ return false;
+}
+
+Decl *Sema::
+ActOnStartClassInterface(Scope *S, SourceLocation AtInterfaceLoc,
+ IdentifierInfo *ClassName, SourceLocation ClassLoc,
+ ObjCTypeParamList *typeParamList,
+ IdentifierInfo *SuperName, SourceLocation SuperLoc,
+ ArrayRef<ParsedType> SuperTypeArgs,
+ SourceRange SuperTypeArgsRange,
+ Decl * const *ProtoRefs, unsigned NumProtoRefs,
+ const SourceLocation *ProtoLocs,
+ SourceLocation EndProtoLoc, AttributeList *AttrList) {
+ assert(ClassName && "Missing class identifier");
+
+ // Check for another declaration kind with the same name.
+ NamedDecl *PrevDecl = LookupSingleName(TUScope, ClassName, ClassLoc,
+ LookupOrdinaryName, ForRedeclaration);
+
+ if (PrevDecl && !isa<ObjCInterfaceDecl>(PrevDecl)) {
+ Diag(ClassLoc, diag::err_redefinition_different_kind) << ClassName;
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ }
+
+ // Create a declaration to describe this @interface.
+ ObjCInterfaceDecl* PrevIDecl = dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl);
+
+ if (PrevIDecl && PrevIDecl->getIdentifier() != ClassName) {
+ // A previous decl with a different name is because of
+ // @compatibility_alias, for example:
+ // \code
+ // @class NewImage;
+ // @compatibility_alias OldImage NewImage;
+ // \endcode
+ // A lookup for 'OldImage' will return the 'NewImage' decl.
+ //
+ // In such a case use the real declaration name, instead of the alias one,
+ // otherwise we will break IdentifierResolver and redecls-chain invariants.
+ // FIXME: If necessary, add a bit to indicate that this ObjCInterfaceDecl
+ // has been aliased.
+ ClassName = PrevIDecl->getIdentifier();
+ }
+
+ // If there was a forward declaration with type parameters, check
+ // for consistency.
+ if (PrevIDecl) {
+ if (ObjCTypeParamList *prevTypeParamList = PrevIDecl->getTypeParamList()) {
+ if (typeParamList) {
+ // Both have type parameter lists; check for consistency.
+ if (checkTypeParamListConsistency(*this, prevTypeParamList,
+ typeParamList,
+ TypeParamListContext::Definition)) {
+ typeParamList = nullptr;
+ }
+ } else {
+ Diag(ClassLoc, diag::err_objc_parameterized_forward_class_first)
+ << ClassName;
+ Diag(prevTypeParamList->getLAngleLoc(), diag::note_previous_decl)
+ << ClassName;
+
+ // Clone the type parameter list.
+ SmallVector<ObjCTypeParamDecl *, 4> clonedTypeParams;
+ for (auto typeParam : *prevTypeParamList) {
+ clonedTypeParams.push_back(
+ ObjCTypeParamDecl::Create(
+ Context,
+ CurContext,
+ typeParam->getVariance(),
+ SourceLocation(),
+ typeParam->getIndex(),
+ SourceLocation(),
+ typeParam->getIdentifier(),
+ SourceLocation(),
+ Context.getTrivialTypeSourceInfo(typeParam->getUnderlyingType())));
+ }
+
+ typeParamList = ObjCTypeParamList::create(Context,
+ SourceLocation(),
+ clonedTypeParams,
+ SourceLocation());
+ }
+ }
+ }
+
+ ObjCInterfaceDecl *IDecl
+ = ObjCInterfaceDecl::Create(Context, CurContext, AtInterfaceLoc, ClassName,
+ typeParamList, PrevIDecl, ClassLoc);
+ if (PrevIDecl) {
+ // Class already seen. Was it a definition?
+ if (ObjCInterfaceDecl *Def = PrevIDecl->getDefinition()) {
+ Diag(AtInterfaceLoc, diag::err_duplicate_class_def)
+ << PrevIDecl->getDeclName();
+ Diag(Def->getLocation(), diag::note_previous_definition);
+ IDecl->setInvalidDecl();
+ }
+ }
+
+ if (AttrList)
+ ProcessDeclAttributeList(TUScope, IDecl, AttrList);
+ PushOnScopeChains(IDecl, TUScope);
+
+ // Start the definition of this class. If we're in a redefinition case, there
+ // may already be a definition, so we'll end up adding to it.
+ if (!IDecl->hasDefinition())
+ IDecl->startDefinition();
+
+ if (SuperName) {
+ // Diagnose availability in the context of the @interface.
+ ContextRAII SavedContext(*this, IDecl);
+
+ ActOnSuperClassOfClassInterface(S, AtInterfaceLoc, IDecl,
+ ClassName, ClassLoc,
+ SuperName, SuperLoc, SuperTypeArgs,
+ SuperTypeArgsRange);
+ } else { // we have a root class.
+ IDecl->setEndOfDefinitionLoc(ClassLoc);
+ }
+
+ // Check then save referenced protocols.
+ if (NumProtoRefs) {
+ diagnoseUseOfProtocols(*this, IDecl, (ObjCProtocolDecl*const*)ProtoRefs,
+ NumProtoRefs, ProtoLocs);
+ IDecl->setProtocolList((ObjCProtocolDecl*const*)ProtoRefs, NumProtoRefs,
+ ProtoLocs, Context);
+ IDecl->setEndOfDefinitionLoc(EndProtoLoc);
+ }
+
+ CheckObjCDeclScope(IDecl);
+ return ActOnObjCContainerStartDefinition(IDecl);
+}
+
+/// ActOnTypedefedProtocols - this action finds protocol list as part of the
+/// typedef'ed use for a qualified super class and adds them to the list
+/// of the protocols.
+void Sema::ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
+ IdentifierInfo *SuperName,
+ SourceLocation SuperLoc) {
+ if (!SuperName)
+ return;
+ NamedDecl* IDecl = LookupSingleName(TUScope, SuperName, SuperLoc,
+ LookupOrdinaryName);
+ if (!IDecl)
+ return;
+
+ if (const TypedefNameDecl *TDecl = dyn_cast_or_null<TypedefNameDecl>(IDecl)) {
+ QualType T = TDecl->getUnderlyingType();
+ if (T->isObjCObjectType())
+ if (const ObjCObjectType *OPT = T->getAs<ObjCObjectType>())
+ ProtocolRefs.append(OPT->qual_begin(), OPT->qual_end());
+ }
+}
+
+/// ActOnCompatibilityAlias - this action is called after complete parsing of
+/// a \@compatibility_alias declaration. It sets up the alias relationships.
+Decl *Sema::ActOnCompatibilityAlias(SourceLocation AtLoc,
+ IdentifierInfo *AliasName,
+ SourceLocation AliasLocation,
+ IdentifierInfo *ClassName,
+ SourceLocation ClassLocation) {
+ // Look for previous declaration of alias name
+ NamedDecl *ADecl = LookupSingleName(TUScope, AliasName, AliasLocation,
+ LookupOrdinaryName, ForRedeclaration);
+ if (ADecl) {
+ Diag(AliasLocation, diag::err_conflicting_aliasing_type) << AliasName;
+ Diag(ADecl->getLocation(), diag::note_previous_declaration);
+ return nullptr;
+ }
+ // Check for class declaration
+ NamedDecl *CDeclU = LookupSingleName(TUScope, ClassName, ClassLocation,
+ LookupOrdinaryName, ForRedeclaration);
+ if (const TypedefNameDecl *TDecl =
+ dyn_cast_or_null<TypedefNameDecl>(CDeclU)) {
+ QualType T = TDecl->getUnderlyingType();
+ if (T->isObjCObjectType()) {
+ if (NamedDecl *IDecl = T->getAs<ObjCObjectType>()->getInterface()) {
+ ClassName = IDecl->getIdentifier();
+ CDeclU = LookupSingleName(TUScope, ClassName, ClassLocation,
+ LookupOrdinaryName, ForRedeclaration);
+ }
+ }
+ }
+ ObjCInterfaceDecl *CDecl = dyn_cast_or_null<ObjCInterfaceDecl>(CDeclU);
+ if (!CDecl) {
+ Diag(ClassLocation, diag::warn_undef_interface) << ClassName;
+ if (CDeclU)
+ Diag(CDeclU->getLocation(), diag::note_previous_declaration);
+ return nullptr;
+ }
+
+ // Everything checked out, instantiate a new alias declaration AST.
+ ObjCCompatibleAliasDecl *AliasDecl =
+ ObjCCompatibleAliasDecl::Create(Context, CurContext, AtLoc, AliasName, CDecl);
+
+ if (!CheckObjCDeclScope(AliasDecl))
+ PushOnScopeChains(AliasDecl, TUScope);
+
+ return AliasDecl;
+}
+
+bool Sema::CheckForwardProtocolDeclarationForCircularDependency(
+ IdentifierInfo *PName,
+ SourceLocation &Ploc, SourceLocation PrevLoc,
+ const ObjCList<ObjCProtocolDecl> &PList) {
+
+ bool res = false;
+ for (ObjCList<ObjCProtocolDecl>::iterator I = PList.begin(),
+ E = PList.end(); I != E; ++I) {
+ if (ObjCProtocolDecl *PDecl = LookupProtocol((*I)->getIdentifier(),
+ Ploc)) {
+ if (PDecl->getIdentifier() == PName) {
+ Diag(Ploc, diag::err_protocol_has_circular_dependency);
+ Diag(PrevLoc, diag::note_previous_definition);
+ res = true;
+ }
+
+ if (!PDecl->hasDefinition())
+ continue;
+
+ if (CheckForwardProtocolDeclarationForCircularDependency(PName, Ploc,
+ PDecl->getLocation(), PDecl->getReferencedProtocols()))
+ res = true;
+ }
+ }
+ return res;
+}
+
+Decl *
+Sema::ActOnStartProtocolInterface(SourceLocation AtProtoInterfaceLoc,
+ IdentifierInfo *ProtocolName,
+ SourceLocation ProtocolLoc,
+ Decl * const *ProtoRefs,
+ unsigned NumProtoRefs,
+ const SourceLocation *ProtoLocs,
+ SourceLocation EndProtoLoc,
+ AttributeList *AttrList) {
+ bool err = false;
+ // FIXME: Deal with AttrList.
+ assert(ProtocolName && "Missing protocol identifier");
+ ObjCProtocolDecl *PrevDecl = LookupProtocol(ProtocolName, ProtocolLoc,
+ ForRedeclaration);
+ ObjCProtocolDecl *PDecl = nullptr;
+ if (ObjCProtocolDecl *Def = PrevDecl? PrevDecl->getDefinition() : nullptr) {
+ // If we already have a definition, complain.
+ Diag(ProtocolLoc, diag::warn_duplicate_protocol_def) << ProtocolName;
+ Diag(Def->getLocation(), diag::note_previous_definition);
+
+ // Create a new protocol that is completely distinct from previous
+ // declarations, and do not make this protocol available for name lookup.
+ // That way, we'll end up completely ignoring the duplicate.
+ // FIXME: Can we turn this into an error?
+ PDecl = ObjCProtocolDecl::Create(Context, CurContext, ProtocolName,
+ ProtocolLoc, AtProtoInterfaceLoc,
+ /*PrevDecl=*/nullptr);
+ PDecl->startDefinition();
+ } else {
+ if (PrevDecl) {
+ // Check for circular dependencies among protocol declarations. This can
+ // only happen if this protocol was forward-declared.
+ ObjCList<ObjCProtocolDecl> PList;
+ PList.set((ObjCProtocolDecl *const*)ProtoRefs, NumProtoRefs, Context);
+ err = CheckForwardProtocolDeclarationForCircularDependency(
+ ProtocolName, ProtocolLoc, PrevDecl->getLocation(), PList);
+ }
+
+ // Create the new declaration.
+ PDecl = ObjCProtocolDecl::Create(Context, CurContext, ProtocolName,
+ ProtocolLoc, AtProtoInterfaceLoc,
+ /*PrevDecl=*/PrevDecl);
+
+ PushOnScopeChains(PDecl, TUScope);
+ PDecl->startDefinition();
+ }
+
+ if (AttrList)
+ ProcessDeclAttributeList(TUScope, PDecl, AttrList);
+
+ // Merge attributes from previous declarations.
+ if (PrevDecl)
+ mergeDeclAttributes(PDecl, PrevDecl);
+
+ if (!err && NumProtoRefs ) {
+ /// Check then save referenced protocols.
+ diagnoseUseOfProtocols(*this, PDecl, (ObjCProtocolDecl*const*)ProtoRefs,
+ NumProtoRefs, ProtoLocs);
+ PDecl->setProtocolList((ObjCProtocolDecl*const*)ProtoRefs, NumProtoRefs,
+ ProtoLocs, Context);
+ }
+
+ CheckObjCDeclScope(PDecl);
+ return ActOnObjCContainerStartDefinition(PDecl);
+}
+
+static bool NestedProtocolHasNoDefinition(ObjCProtocolDecl *PDecl,
+ ObjCProtocolDecl *&UndefinedProtocol) {
+ if (!PDecl->hasDefinition() || PDecl->getDefinition()->isHidden()) {
+ UndefinedProtocol = PDecl;
+ return true;
+ }
+
+ for (auto *PI : PDecl->protocols())
+ if (NestedProtocolHasNoDefinition(PI, UndefinedProtocol)) {
+ UndefinedProtocol = PI;
+ return true;
+ }
+ return false;
+}
+
+/// FindProtocolDeclaration - This routine looks up protocols and
+/// issues an error if they are not declared. It returns list of
+/// protocol declarations in its 'Protocols' argument.
+void
+Sema::FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
+ ArrayRef<IdentifierLocPair> ProtocolId,
+ SmallVectorImpl<Decl *> &Protocols) {
+ for (const IdentifierLocPair &Pair : ProtocolId) {
+ ObjCProtocolDecl *PDecl = LookupProtocol(Pair.first, Pair.second);
+ if (!PDecl) {
+ TypoCorrection Corrected = CorrectTypo(
+ DeclarationNameInfo(Pair.first, Pair.second),
+ LookupObjCProtocolName, TUScope, nullptr,
+ llvm::make_unique<DeclFilterCCC<ObjCProtocolDecl>>(),
+ CTK_ErrorRecovery);
+ if ((PDecl = Corrected.getCorrectionDeclAs<ObjCProtocolDecl>()))
+ diagnoseTypo(Corrected, PDiag(diag::err_undeclared_protocol_suggest)
+ << Pair.first);
+ }
+
+ if (!PDecl) {
+ Diag(Pair.second, diag::err_undeclared_protocol) << Pair.first;
+ continue;
+ }
+ // If this is a forward protocol declaration, get its definition.
+ if (!PDecl->isThisDeclarationADefinition() && PDecl->getDefinition())
+ PDecl = PDecl->getDefinition();
+
+ // For an objc container, delay protocol reference checking until after we
+ // can set the objc decl as the availability context, otherwise check now.
+ if (!ForObjCContainer) {
+ (void)DiagnoseUseOfDecl(PDecl, Pair.second);
+ }
+
+ // If this is a forward declaration and we are supposed to warn in this
+ // case, do it.
+ // FIXME: Recover nicely in the hidden case.
+ ObjCProtocolDecl *UndefinedProtocol;
+
+ if (WarnOnDeclarations &&
+ NestedProtocolHasNoDefinition(PDecl, UndefinedProtocol)) {
+ Diag(Pair.second, diag::warn_undef_protocolref) << Pair.first;
+ Diag(UndefinedProtocol->getLocation(), diag::note_protocol_decl_undefined)
+ << UndefinedProtocol;
+ }
+ Protocols.push_back(PDecl);
+ }
+}
+
+namespace {
+// Callback to only accept typo corrections that are either
+// Objective-C protocols or valid Objective-C type arguments.
+class ObjCTypeArgOrProtocolValidatorCCC : public CorrectionCandidateCallback {
+ ASTContext &Context;
+ Sema::LookupNameKind LookupKind;
+ public:
+ ObjCTypeArgOrProtocolValidatorCCC(ASTContext &context,
+ Sema::LookupNameKind lookupKind)
+ : Context(context), LookupKind(lookupKind) { }
+
+ bool ValidateCandidate(const TypoCorrection &candidate) override {
+ // If we're allowed to find protocols and we have a protocol, accept it.
+ if (LookupKind != Sema::LookupOrdinaryName) {
+ if (candidate.getCorrectionDeclAs<ObjCProtocolDecl>())
+ return true;
+ }
+
+ // If we're allowed to find type names and we have one, accept it.
+ if (LookupKind != Sema::LookupObjCProtocolName) {
+ // If we have a type declaration, we might accept this result.
+ if (auto typeDecl = candidate.getCorrectionDeclAs<TypeDecl>()) {
+ // If we found a tag declaration outside of C++, skip it. This
+ // can happy because we look for any name when there is no
+ // bias to protocol or type names.
+ if (isa<RecordDecl>(typeDecl) && !Context.getLangOpts().CPlusPlus)
+ return false;
+
+ // Make sure the type is something we would accept as a type
+ // argument.
+ auto type = Context.getTypeDeclType(typeDecl);
+ if (type->isObjCObjectPointerType() ||
+ type->isBlockPointerType() ||
+ type->isDependentType() ||
+ type->isObjCObjectType())
+ return true;
+
+ return false;
+ }
+
+ // If we have an Objective-C class type, accept it; there will
+ // be another fix to add the '*'.
+ if (candidate.getCorrectionDeclAs<ObjCInterfaceDecl>())
+ return true;
+
+ return false;
+ }
+
+ return false;
+ }
+};
+} // end anonymous namespace
+
+void Sema::actOnObjCTypeArgsOrProtocolQualifiers(
+ Scope *S,
+ ParsedType baseType,
+ SourceLocation lAngleLoc,
+ ArrayRef<IdentifierInfo *> identifiers,
+ ArrayRef<SourceLocation> identifierLocs,
+ SourceLocation rAngleLoc,
+ SourceLocation &typeArgsLAngleLoc,
+ SmallVectorImpl<ParsedType> &typeArgs,
+ SourceLocation &typeArgsRAngleLoc,
+ SourceLocation &protocolLAngleLoc,
+ SmallVectorImpl<Decl *> &protocols,
+ SourceLocation &protocolRAngleLoc,
+ bool warnOnIncompleteProtocols) {
+ // Local function that updates the declaration specifiers with
+ // protocol information.
+ unsigned numProtocolsResolved = 0;
+ auto resolvedAsProtocols = [&] {
+ assert(numProtocolsResolved == identifiers.size() && "Unresolved protocols");
+
+ // Determine whether the base type is a parameterized class, in
+ // which case we want to warn about typos such as
+ // "NSArray<NSObject>" (that should be NSArray<NSObject *>).
+ ObjCInterfaceDecl *baseClass = nullptr;
+ QualType base = GetTypeFromParser(baseType, nullptr);
+ bool allAreTypeNames = false;
+ SourceLocation firstClassNameLoc;
+ if (!base.isNull()) {
+ if (const auto *objcObjectType = base->getAs<ObjCObjectType>()) {
+ baseClass = objcObjectType->getInterface();
+ if (baseClass) {
+ if (auto typeParams = baseClass->getTypeParamList()) {
+ if (typeParams->size() == numProtocolsResolved) {
+ // Note that we should be looking for type names, too.
+ allAreTypeNames = true;
+ }
+ }
+ }
+ }
+ }
+
+ for (unsigned i = 0, n = protocols.size(); i != n; ++i) {
+ ObjCProtocolDecl *&proto
+ = reinterpret_cast<ObjCProtocolDecl *&>(protocols[i]);
+ // For an objc container, delay protocol reference checking until after we
+ // can set the objc decl as the availability context, otherwise check now.
+ if (!warnOnIncompleteProtocols) {
+ (void)DiagnoseUseOfDecl(proto, identifierLocs[i]);
+ }
+
+ // If this is a forward protocol declaration, get its definition.
+ if (!proto->isThisDeclarationADefinition() && proto->getDefinition())
+ proto = proto->getDefinition();
+
+ // If this is a forward declaration and we are supposed to warn in this
+ // case, do it.
+ // FIXME: Recover nicely in the hidden case.
+ ObjCProtocolDecl *forwardDecl = nullptr;
+ if (warnOnIncompleteProtocols &&
+ NestedProtocolHasNoDefinition(proto, forwardDecl)) {
+ Diag(identifierLocs[i], diag::warn_undef_protocolref)
+ << proto->getDeclName();
+ Diag(forwardDecl->getLocation(), diag::note_protocol_decl_undefined)
+ << forwardDecl;
+ }
+
+ // If everything this far has been a type name (and we care
+ // about such things), check whether this name refers to a type
+ // as well.
+ if (allAreTypeNames) {
+ if (auto *decl = LookupSingleName(S, identifiers[i], identifierLocs[i],
+ LookupOrdinaryName)) {
+ if (isa<ObjCInterfaceDecl>(decl)) {
+ if (firstClassNameLoc.isInvalid())
+ firstClassNameLoc = identifierLocs[i];
+ } else if (!isa<TypeDecl>(decl)) {
+ // Not a type.
+ allAreTypeNames = false;
+ }
+ } else {
+ allAreTypeNames = false;
+ }
+ }
+ }
+
+ // All of the protocols listed also have type names, and at least
+ // one is an Objective-C class name. Check whether all of the
+ // protocol conformances are declared by the base class itself, in
+ // which case we warn.
+ if (allAreTypeNames && firstClassNameLoc.isValid()) {
+ llvm::SmallPtrSet<ObjCProtocolDecl*, 8> knownProtocols;
+ Context.CollectInheritedProtocols(baseClass, knownProtocols);
+ bool allProtocolsDeclared = true;
+ for (auto proto : protocols) {
+ if (knownProtocols.count(static_cast<ObjCProtocolDecl *>(proto)) == 0) {
+ allProtocolsDeclared = false;
+ break;
+ }
+ }
+
+ if (allProtocolsDeclared) {
+ Diag(firstClassNameLoc, diag::warn_objc_redundant_qualified_class_type)
+ << baseClass->getDeclName() << SourceRange(lAngleLoc, rAngleLoc)
+ << FixItHint::CreateInsertion(getLocForEndOfToken(firstClassNameLoc),
+ " *");
+ }
+ }
+
+ protocolLAngleLoc = lAngleLoc;
+ protocolRAngleLoc = rAngleLoc;
+ assert(protocols.size() == identifierLocs.size());
+ };
+
+ // Attempt to resolve all of the identifiers as protocols.
+ for (unsigned i = 0, n = identifiers.size(); i != n; ++i) {
+ ObjCProtocolDecl *proto = LookupProtocol(identifiers[i], identifierLocs[i]);
+ protocols.push_back(proto);
+ if (proto)
+ ++numProtocolsResolved;
+ }
+
+ // If all of the names were protocols, these were protocol qualifiers.
+ if (numProtocolsResolved == identifiers.size())
+ return resolvedAsProtocols();
+
+ // Attempt to resolve all of the identifiers as type names or
+ // Objective-C class names. The latter is technically ill-formed,
+ // but is probably something like \c NSArray<NSView *> missing the
+ // \c*.
+ typedef llvm::PointerUnion<TypeDecl *, ObjCInterfaceDecl *> TypeOrClassDecl;
+ SmallVector<TypeOrClassDecl, 4> typeDecls;
+ unsigned numTypeDeclsResolved = 0;
+ for (unsigned i = 0, n = identifiers.size(); i != n; ++i) {
+ NamedDecl *decl = LookupSingleName(S, identifiers[i], identifierLocs[i],
+ LookupOrdinaryName);
+ if (!decl) {
+ typeDecls.push_back(TypeOrClassDecl());
+ continue;
+ }
+
+ if (auto typeDecl = dyn_cast<TypeDecl>(decl)) {
+ typeDecls.push_back(typeDecl);
+ ++numTypeDeclsResolved;
+ continue;
+ }
+
+ if (auto objcClass = dyn_cast<ObjCInterfaceDecl>(decl)) {
+ typeDecls.push_back(objcClass);
+ ++numTypeDeclsResolved;
+ continue;
+ }
+
+ typeDecls.push_back(TypeOrClassDecl());
+ }
+
+ AttributeFactory attrFactory;
+
+ // Local function that forms a reference to the given type or
+ // Objective-C class declaration.
+ auto resolveTypeReference = [&](TypeOrClassDecl typeDecl, SourceLocation loc)
+ -> TypeResult {
+ // Form declaration specifiers. They simply refer to the type.
+ DeclSpec DS(attrFactory);
+ const char* prevSpec; // unused
+ unsigned diagID; // unused
+ QualType type;
+ if (auto *actualTypeDecl = typeDecl.dyn_cast<TypeDecl *>())
+ type = Context.getTypeDeclType(actualTypeDecl);
+ else
+ type = Context.getObjCInterfaceType(typeDecl.get<ObjCInterfaceDecl *>());
+ TypeSourceInfo *parsedTSInfo = Context.getTrivialTypeSourceInfo(type, loc);
+ ParsedType parsedType = CreateParsedType(type, parsedTSInfo);
+ DS.SetTypeSpecType(DeclSpec::TST_typename, loc, prevSpec, diagID,
+ parsedType, Context.getPrintingPolicy());
+ // Use the identifier location for the type source range.
+ DS.SetRangeStart(loc);
+ DS.SetRangeEnd(loc);
+
+ // Form the declarator.
+ Declarator D(DS, Declarator::TypeNameContext);
+
+ // If we have a typedef of an Objective-C class type that is missing a '*',
+ // add the '*'.
+ if (type->getAs<ObjCInterfaceType>()) {
+ SourceLocation starLoc = getLocForEndOfToken(loc);
+ ParsedAttributes parsedAttrs(attrFactory);
+ D.AddTypeInfo(DeclaratorChunk::getPointer(/*typeQuals=*/0, starLoc,
+ SourceLocation(),
+ SourceLocation(),
+ SourceLocation(),
+ SourceLocation()),
+ parsedAttrs,
+ starLoc);
+
+ // Diagnose the missing '*'.
+ Diag(loc, diag::err_objc_type_arg_missing_star)
+ << type
+ << FixItHint::CreateInsertion(starLoc, " *");
+ }
+
+ // Convert this to a type.
+ return ActOnTypeName(S, D);
+ };
+
+ // Local function that updates the declaration specifiers with
+ // type argument information.
+ auto resolvedAsTypeDecls = [&] {
+ // We did not resolve these as protocols.
+ protocols.clear();
+
+ assert(numTypeDeclsResolved == identifiers.size() && "Unresolved type decl");
+ // Map type declarations to type arguments.
+ for (unsigned i = 0, n = identifiers.size(); i != n; ++i) {
+ // Map type reference to a type.
+ TypeResult type = resolveTypeReference(typeDecls[i], identifierLocs[i]);
+ if (!type.isUsable()) {
+ typeArgs.clear();
+ return;
+ }
+
+ typeArgs.push_back(type.get());
+ }
+
+ typeArgsLAngleLoc = lAngleLoc;
+ typeArgsRAngleLoc = rAngleLoc;
+ };
+
+ // If all of the identifiers can be resolved as type names or
+ // Objective-C class names, we have type arguments.
+ if (numTypeDeclsResolved == identifiers.size())
+ return resolvedAsTypeDecls();
+
+ // Error recovery: some names weren't found, or we have a mix of
+ // type and protocol names. Go resolve all of the unresolved names
+ // and complain if we can't find a consistent answer.
+ LookupNameKind lookupKind = LookupAnyName;
+ for (unsigned i = 0, n = identifiers.size(); i != n; ++i) {
+ // If we already have a protocol or type. Check whether it is the
+ // right thing.
+ if (protocols[i] || typeDecls[i]) {
+ // If we haven't figured out whether we want types or protocols
+ // yet, try to figure it out from this name.
+ if (lookupKind == LookupAnyName) {
+ // If this name refers to both a protocol and a type (e.g., \c
+ // NSObject), don't conclude anything yet.
+ if (protocols[i] && typeDecls[i])
+ continue;
+
+ // Otherwise, let this name decide whether we'll be correcting
+ // toward types or protocols.
+ lookupKind = protocols[i] ? LookupObjCProtocolName
+ : LookupOrdinaryName;
+ continue;
+ }
+
+ // If we want protocols and we have a protocol, there's nothing
+ // more to do.
+ if (lookupKind == LookupObjCProtocolName && protocols[i])
+ continue;
+
+ // If we want types and we have a type declaration, there's
+ // nothing more to do.
+ if (lookupKind == LookupOrdinaryName && typeDecls[i])
+ continue;
+
+ // We have a conflict: some names refer to protocols and others
+ // refer to types.
+ Diag(identifierLocs[i], diag::err_objc_type_args_and_protocols)
+ << (protocols[i] != nullptr)
+ << identifiers[i]
+ << identifiers[0]
+ << SourceRange(identifierLocs[0]);
+
+ protocols.clear();
+ typeArgs.clear();
+ return;
+ }
+
+ // Perform typo correction on the name.
+ TypoCorrection corrected = CorrectTypo(
+ DeclarationNameInfo(identifiers[i], identifierLocs[i]), lookupKind, S,
+ nullptr,
+ llvm::make_unique<ObjCTypeArgOrProtocolValidatorCCC>(Context,
+ lookupKind),
+ CTK_ErrorRecovery);
+ if (corrected) {
+ // Did we find a protocol?
+ if (auto proto = corrected.getCorrectionDeclAs<ObjCProtocolDecl>()) {
+ diagnoseTypo(corrected,
+ PDiag(diag::err_undeclared_protocol_suggest)
+ << identifiers[i]);
+ lookupKind = LookupObjCProtocolName;
+ protocols[i] = proto;
+ ++numProtocolsResolved;
+ continue;
+ }
+
+ // Did we find a type?
+ if (auto typeDecl = corrected.getCorrectionDeclAs<TypeDecl>()) {
+ diagnoseTypo(corrected,
+ PDiag(diag::err_unknown_typename_suggest)
+ << identifiers[i]);
+ lookupKind = LookupOrdinaryName;
+ typeDecls[i] = typeDecl;
+ ++numTypeDeclsResolved;
+ continue;
+ }
+
+ // Did we find an Objective-C class?
+ if (auto objcClass = corrected.getCorrectionDeclAs<ObjCInterfaceDecl>()) {
+ diagnoseTypo(corrected,
+ PDiag(diag::err_unknown_type_or_class_name_suggest)
+ << identifiers[i] << true);
+ lookupKind = LookupOrdinaryName;
+ typeDecls[i] = objcClass;
+ ++numTypeDeclsResolved;
+ continue;
+ }
+ }
+
+ // We couldn't find anything.
+ Diag(identifierLocs[i],
+ (lookupKind == LookupAnyName ? diag::err_objc_type_arg_missing
+ : lookupKind == LookupObjCProtocolName ? diag::err_undeclared_protocol
+ : diag::err_unknown_typename))
+ << identifiers[i];
+ protocols.clear();
+ typeArgs.clear();
+ return;
+ }
+
+ // If all of the names were (corrected to) protocols, these were
+ // protocol qualifiers.
+ if (numProtocolsResolved == identifiers.size())
+ return resolvedAsProtocols();
+
+ // Otherwise, all of the names were (corrected to) types.
+ assert(numTypeDeclsResolved == identifiers.size() && "Not all types?");
+ return resolvedAsTypeDecls();
+}
+
+/// DiagnoseClassExtensionDupMethods - Check for duplicate declaration of
+/// a class method in its extension.
+///
+void Sema::DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
+ ObjCInterfaceDecl *ID) {
+ if (!ID)
+ return; // Possibly due to previous error
+
+ llvm::DenseMap<Selector, const ObjCMethodDecl*> MethodMap;
+ for (auto *MD : ID->methods())
+ MethodMap[MD->getSelector()] = MD;
+
+ if (MethodMap.empty())
+ return;
+ for (const auto *Method : CAT->methods()) {
+ const ObjCMethodDecl *&PrevMethod = MethodMap[Method->getSelector()];
+ if (PrevMethod &&
+ (PrevMethod->isInstanceMethod() == Method->isInstanceMethod()) &&
+ !MatchTwoMethodDeclarations(Method, PrevMethod)) {
+ Diag(Method->getLocation(), diag::err_duplicate_method_decl)
+ << Method->getDeclName();
+ Diag(PrevMethod->getLocation(), diag::note_previous_declaration);
+ }
+ }
+}
+
+/// ActOnForwardProtocolDeclaration - Handle \@protocol foo;
+Sema::DeclGroupPtrTy
+Sema::ActOnForwardProtocolDeclaration(SourceLocation AtProtocolLoc,
+ ArrayRef<IdentifierLocPair> IdentList,
+ AttributeList *attrList) {
+ SmallVector<Decl *, 8> DeclsInGroup;
+ for (const IdentifierLocPair &IdentPair : IdentList) {
+ IdentifierInfo *Ident = IdentPair.first;
+ ObjCProtocolDecl *PrevDecl = LookupProtocol(Ident, IdentPair.second,
+ ForRedeclaration);
+ ObjCProtocolDecl *PDecl
+ = ObjCProtocolDecl::Create(Context, CurContext, Ident,
+ IdentPair.second, AtProtocolLoc,
+ PrevDecl);
+
+ PushOnScopeChains(PDecl, TUScope);
+ CheckObjCDeclScope(PDecl);
+
+ if (attrList)
+ ProcessDeclAttributeList(TUScope, PDecl, attrList);
+
+ if (PrevDecl)
+ mergeDeclAttributes(PDecl, PrevDecl);
+
+ DeclsInGroup.push_back(PDecl);
+ }
+
+ return BuildDeclaratorGroup(DeclsInGroup, false);
+}
+
+Decl *Sema::
+ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc,
+ IdentifierInfo *ClassName, SourceLocation ClassLoc,
+ ObjCTypeParamList *typeParamList,
+ IdentifierInfo *CategoryName,
+ SourceLocation CategoryLoc,
+ Decl * const *ProtoRefs,
+ unsigned NumProtoRefs,
+ const SourceLocation *ProtoLocs,
+ SourceLocation EndProtoLoc) {
+ ObjCCategoryDecl *CDecl;
+ ObjCInterfaceDecl *IDecl = getObjCInterfaceDecl(ClassName, ClassLoc, true);
+
+ /// Check that class of this category is already completely declared.
+
+ if (!IDecl
+ || RequireCompleteType(ClassLoc, Context.getObjCInterfaceType(IDecl),
+ diag::err_category_forward_interface,
+ CategoryName == nullptr)) {
+ // Create an invalid ObjCCategoryDecl to serve as context for
+ // the enclosing method declarations. We mark the decl invalid
+ // to make it clear that this isn't a valid AST.
+ CDecl = ObjCCategoryDecl::Create(Context, CurContext, AtInterfaceLoc,
+ ClassLoc, CategoryLoc, CategoryName,
+ IDecl, typeParamList);
+ CDecl->setInvalidDecl();
+ CurContext->addDecl(CDecl);
+
+ if (!IDecl)
+ Diag(ClassLoc, diag::err_undef_interface) << ClassName;
+ return ActOnObjCContainerStartDefinition(CDecl);
+ }
+
+ if (!CategoryName && IDecl->getImplementation()) {
+ Diag(ClassLoc, diag::err_class_extension_after_impl) << ClassName;
+ Diag(IDecl->getImplementation()->getLocation(),
+ diag::note_implementation_declared);
+ }
+
+ if (CategoryName) {
+ /// Check for duplicate interface declaration for this category
+ if (ObjCCategoryDecl *Previous
+ = IDecl->FindCategoryDeclaration(CategoryName)) {
+ // Class extensions can be declared multiple times, categories cannot.
+ Diag(CategoryLoc, diag::warn_dup_category_def)
+ << ClassName << CategoryName;
+ Diag(Previous->getLocation(), diag::note_previous_definition);
+ }
+ }
+
+ // If we have a type parameter list, check it.
+ if (typeParamList) {
+ if (auto prevTypeParamList = IDecl->getTypeParamList()) {
+ if (checkTypeParamListConsistency(*this, prevTypeParamList, typeParamList,
+ CategoryName
+ ? TypeParamListContext::Category
+ : TypeParamListContext::Extension))
+ typeParamList = nullptr;
+ } else {
+ Diag(typeParamList->getLAngleLoc(),
+ diag::err_objc_parameterized_category_nonclass)
+ << (CategoryName != nullptr)
+ << ClassName
+ << typeParamList->getSourceRange();
+
+ typeParamList = nullptr;
+ }
+ }
+
+ CDecl = ObjCCategoryDecl::Create(Context, CurContext, AtInterfaceLoc,
+ ClassLoc, CategoryLoc, CategoryName, IDecl,
+ typeParamList);
+ // FIXME: PushOnScopeChains?
+ CurContext->addDecl(CDecl);
+
+ if (NumProtoRefs) {
+ diagnoseUseOfProtocols(*this, CDecl, (ObjCProtocolDecl*const*)ProtoRefs,
+ NumProtoRefs, ProtoLocs);
+ CDecl->setProtocolList((ObjCProtocolDecl*const*)ProtoRefs, NumProtoRefs,
+ ProtoLocs, Context);
+ // Protocols in the class extension belong to the class.
+ if (CDecl->IsClassExtension())
+ IDecl->mergeClassExtensionProtocolList((ObjCProtocolDecl*const*)ProtoRefs,
+ NumProtoRefs, Context);
+ }
+
+ CheckObjCDeclScope(CDecl);
+ return ActOnObjCContainerStartDefinition(CDecl);
+}
+
+/// ActOnStartCategoryImplementation - Perform semantic checks on the
+/// category implementation declaration and build an ObjCCategoryImplDecl
+/// object.
+Decl *Sema::ActOnStartCategoryImplementation(
+ SourceLocation AtCatImplLoc,
+ IdentifierInfo *ClassName, SourceLocation ClassLoc,
+ IdentifierInfo *CatName, SourceLocation CatLoc) {
+ ObjCInterfaceDecl *IDecl = getObjCInterfaceDecl(ClassName, ClassLoc, true);
+ ObjCCategoryDecl *CatIDecl = nullptr;
+ if (IDecl && IDecl->hasDefinition()) {
+ CatIDecl = IDecl->FindCategoryDeclaration(CatName);
+ if (!CatIDecl) {
+ // Category @implementation with no corresponding @interface.
+ // Create and install one.
+ CatIDecl = ObjCCategoryDecl::Create(Context, CurContext, AtCatImplLoc,
+ ClassLoc, CatLoc,
+ CatName, IDecl,
+ /*typeParamList=*/nullptr);
+ CatIDecl->setImplicit();
+ }
+ }
+
+ ObjCCategoryImplDecl *CDecl =
+ ObjCCategoryImplDecl::Create(Context, CurContext, CatName, IDecl,
+ ClassLoc, AtCatImplLoc, CatLoc);
+ /// Check that class of this category is already completely declared.
+ if (!IDecl) {
+ Diag(ClassLoc, diag::err_undef_interface) << ClassName;
+ CDecl->setInvalidDecl();
+ } else if (RequireCompleteType(ClassLoc, Context.getObjCInterfaceType(IDecl),
+ diag::err_undef_interface)) {
+ CDecl->setInvalidDecl();
+ }
+
+ // FIXME: PushOnScopeChains?
+ CurContext->addDecl(CDecl);
+
+ // If the interface is deprecated/unavailable, warn/error about it.
+ if (IDecl)
+ DiagnoseUseOfDecl(IDecl, ClassLoc);
+
+ /// Check that CatName, category name, is not used in another implementation.
+ if (CatIDecl) {
+ if (CatIDecl->getImplementation()) {
+ Diag(ClassLoc, diag::err_dup_implementation_category) << ClassName
+ << CatName;
+ Diag(CatIDecl->getImplementation()->getLocation(),
+ diag::note_previous_definition);
+ CDecl->setInvalidDecl();
+ } else {
+ CatIDecl->setImplementation(CDecl);
+ // Warn on implementating category of deprecated class under
+ // -Wdeprecated-implementations flag.
+ DiagnoseObjCImplementedDeprecations(*this,
+ dyn_cast<NamedDecl>(IDecl),
+ CDecl->getLocation(), 2);
+ }
+ }
+
+ CheckObjCDeclScope(CDecl);
+ return ActOnObjCContainerStartDefinition(CDecl);
+}
+
+Decl *Sema::ActOnStartClassImplementation(
+ SourceLocation AtClassImplLoc,
+ IdentifierInfo *ClassName, SourceLocation ClassLoc,
+ IdentifierInfo *SuperClassname,
+ SourceLocation SuperClassLoc) {
+ ObjCInterfaceDecl *IDecl = nullptr;
+ // Check for another declaration kind with the same name.
+ NamedDecl *PrevDecl
+ = LookupSingleName(TUScope, ClassName, ClassLoc, LookupOrdinaryName,
+ ForRedeclaration);
+ if (PrevDecl && !isa<ObjCInterfaceDecl>(PrevDecl)) {
+ Diag(ClassLoc, diag::err_redefinition_different_kind) << ClassName;
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ } else if ((IDecl = dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl))) {
+ // FIXME: This will produce an error if the definition of the interface has
+ // been imported from a module but is not visible.
+ RequireCompleteType(ClassLoc, Context.getObjCInterfaceType(IDecl),
+ diag::warn_undef_interface);
+ } else {
+ // We did not find anything with the name ClassName; try to correct for
+ // typos in the class name.
+ TypoCorrection Corrected = CorrectTypo(
+ DeclarationNameInfo(ClassName, ClassLoc), LookupOrdinaryName, TUScope,
+ nullptr, llvm::make_unique<ObjCInterfaceValidatorCCC>(), CTK_NonError);
+ if (Corrected.getCorrectionDeclAs<ObjCInterfaceDecl>()) {
+ // Suggest the (potentially) correct interface name. Don't provide a
+ // code-modification hint or use the typo name for recovery, because
+ // this is just a warning. The program may actually be correct.
+ diagnoseTypo(Corrected,
+ PDiag(diag::warn_undef_interface_suggest) << ClassName,
+ /*ErrorRecovery*/false);
+ } else {
+ Diag(ClassLoc, diag::warn_undef_interface) << ClassName;
+ }
+ }
+
+ // Check that super class name is valid class name
+ ObjCInterfaceDecl *SDecl = nullptr;
+ if (SuperClassname) {
+ // Check if a different kind of symbol declared in this scope.
+ PrevDecl = LookupSingleName(TUScope, SuperClassname, SuperClassLoc,
+ LookupOrdinaryName);
+ if (PrevDecl && !isa<ObjCInterfaceDecl>(PrevDecl)) {
+ Diag(SuperClassLoc, diag::err_redefinition_different_kind)
+ << SuperClassname;
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ } else {
+ SDecl = dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl);
+ if (SDecl && !SDecl->hasDefinition())
+ SDecl = nullptr;
+ if (!SDecl)
+ Diag(SuperClassLoc, diag::err_undef_superclass)
+ << SuperClassname << ClassName;
+ else if (IDecl && !declaresSameEntity(IDecl->getSuperClass(), SDecl)) {
+ // This implementation and its interface do not have the same
+ // super class.
+ Diag(SuperClassLoc, diag::err_conflicting_super_class)
+ << SDecl->getDeclName();
+ Diag(SDecl->getLocation(), diag::note_previous_definition);
+ }
+ }
+ }
+
+ if (!IDecl) {
+ // Legacy case of @implementation with no corresponding @interface.
+ // Build, chain & install the interface decl into the identifier.
+
+ // FIXME: Do we support attributes on the @implementation? If so we should
+ // copy them over.
+ IDecl = ObjCInterfaceDecl::Create(Context, CurContext, AtClassImplLoc,
+ ClassName, /*typeParamList=*/nullptr,
+ /*PrevDecl=*/nullptr, ClassLoc,
+ true);
+ IDecl->startDefinition();
+ if (SDecl) {
+ IDecl->setSuperClass(Context.getTrivialTypeSourceInfo(
+ Context.getObjCInterfaceType(SDecl),
+ SuperClassLoc));
+ IDecl->setEndOfDefinitionLoc(SuperClassLoc);
+ } else {
+ IDecl->setEndOfDefinitionLoc(ClassLoc);
+ }
+
+ PushOnScopeChains(IDecl, TUScope);
+ } else {
+ // Mark the interface as being completed, even if it was just as
+ // @class ....;
+ // declaration; the user cannot reopen it.
+ if (!IDecl->hasDefinition())
+ IDecl->startDefinition();
+ }
+
+ ObjCImplementationDecl* IMPDecl =
+ ObjCImplementationDecl::Create(Context, CurContext, IDecl, SDecl,
+ ClassLoc, AtClassImplLoc, SuperClassLoc);
+
+ if (CheckObjCDeclScope(IMPDecl))
+ return ActOnObjCContainerStartDefinition(IMPDecl);
+
+ // Check that there is no duplicate implementation of this class.
+ if (IDecl->getImplementation()) {
+ // FIXME: Don't leak everything!
+ Diag(ClassLoc, diag::err_dup_implementation_class) << ClassName;
+ Diag(IDecl->getImplementation()->getLocation(),
+ diag::note_previous_definition);
+ IMPDecl->setInvalidDecl();
+ } else { // add it to the list.
+ IDecl->setImplementation(IMPDecl);
+ PushOnScopeChains(IMPDecl, TUScope);
+ // Warn on implementating deprecated class under
+ // -Wdeprecated-implementations flag.
+ DiagnoseObjCImplementedDeprecations(*this,
+ dyn_cast<NamedDecl>(IDecl),
+ IMPDecl->getLocation(), 1);
+ }
+ return ActOnObjCContainerStartDefinition(IMPDecl);
+}
+
+Sema::DeclGroupPtrTy
+Sema::ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls) {
+ SmallVector<Decl *, 64> DeclsInGroup;
+ DeclsInGroup.reserve(Decls.size() + 1);
+
+ for (unsigned i = 0, e = Decls.size(); i != e; ++i) {
+ Decl *Dcl = Decls[i];
+ if (!Dcl)
+ continue;
+ if (Dcl->getDeclContext()->isFileContext())
+ Dcl->setTopLevelDeclInObjCContainer();
+ DeclsInGroup.push_back(Dcl);
+ }
+
+ DeclsInGroup.push_back(ObjCImpDecl);
+
+ return BuildDeclaratorGroup(DeclsInGroup, false);
+}
+
+void Sema::CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
+ ObjCIvarDecl **ivars, unsigned numIvars,
+ SourceLocation RBrace) {
+ assert(ImpDecl && "missing implementation decl");
+ ObjCInterfaceDecl* IDecl = ImpDecl->getClassInterface();
+ if (!IDecl)
+ return;
+ /// Check case of non-existing \@interface decl.
+ /// (legacy objective-c \@implementation decl without an \@interface decl).
+ /// Add implementations's ivar to the synthesize class's ivar list.
+ if (IDecl->isImplicitInterfaceDecl()) {
+ IDecl->setEndOfDefinitionLoc(RBrace);
+ // Add ivar's to class's DeclContext.
+ for (unsigned i = 0, e = numIvars; i != e; ++i) {
+ ivars[i]->setLexicalDeclContext(ImpDecl);
+ IDecl->makeDeclVisibleInContext(ivars[i]);
+ ImpDecl->addDecl(ivars[i]);
+ }
+
+ return;
+ }
+ // If implementation has empty ivar list, just return.
+ if (numIvars == 0)
+ return;
+
+ assert(ivars && "missing @implementation ivars");
+ if (LangOpts.ObjCRuntime.isNonFragile()) {
+ if (ImpDecl->getSuperClass())
+ Diag(ImpDecl->getLocation(), diag::warn_on_superclass_use);
+ for (unsigned i = 0; i < numIvars; i++) {
+ ObjCIvarDecl* ImplIvar = ivars[i];
+ if (const ObjCIvarDecl *ClsIvar =
+ IDecl->getIvarDecl(ImplIvar->getIdentifier())) {
+ Diag(ImplIvar->getLocation(), diag::err_duplicate_ivar_declaration);
+ Diag(ClsIvar->getLocation(), diag::note_previous_definition);
+ continue;
+ }
+ // Check class extensions (unnamed categories) for duplicate ivars.
+ for (const auto *CDecl : IDecl->visible_extensions()) {
+ if (const ObjCIvarDecl *ClsExtIvar =
+ CDecl->getIvarDecl(ImplIvar->getIdentifier())) {
+ Diag(ImplIvar->getLocation(), diag::err_duplicate_ivar_declaration);
+ Diag(ClsExtIvar->getLocation(), diag::note_previous_definition);
+ continue;
+ }
+ }
+ // Instance ivar to Implementation's DeclContext.
+ ImplIvar->setLexicalDeclContext(ImpDecl);
+ IDecl->makeDeclVisibleInContext(ImplIvar);
+ ImpDecl->addDecl(ImplIvar);
+ }
+ return;
+ }
+ // Check interface's Ivar list against those in the implementation.
+ // names and types must match.
+ //
+ unsigned j = 0;
+ ObjCInterfaceDecl::ivar_iterator
+ IVI = IDecl->ivar_begin(), IVE = IDecl->ivar_end();
+ for (; numIvars > 0 && IVI != IVE; ++IVI) {
+ ObjCIvarDecl* ImplIvar = ivars[j++];
+ ObjCIvarDecl* ClsIvar = *IVI;
+ assert (ImplIvar && "missing implementation ivar");
+ assert (ClsIvar && "missing class ivar");
+
+ // First, make sure the types match.
+ if (!Context.hasSameType(ImplIvar->getType(), ClsIvar->getType())) {
+ Diag(ImplIvar->getLocation(), diag::err_conflicting_ivar_type)
+ << ImplIvar->getIdentifier()
+ << ImplIvar->getType() << ClsIvar->getType();
+ Diag(ClsIvar->getLocation(), diag::note_previous_definition);
+ } else if (ImplIvar->isBitField() && ClsIvar->isBitField() &&
+ ImplIvar->getBitWidthValue(Context) !=
+ ClsIvar->getBitWidthValue(Context)) {
+ Diag(ImplIvar->getBitWidth()->getLocStart(),
+ diag::err_conflicting_ivar_bitwidth) << ImplIvar->getIdentifier();
+ Diag(ClsIvar->getBitWidth()->getLocStart(),
+ diag::note_previous_definition);
+ }
+ // Make sure the names are identical.
+ if (ImplIvar->getIdentifier() != ClsIvar->getIdentifier()) {
+ Diag(ImplIvar->getLocation(), diag::err_conflicting_ivar_name)
+ << ImplIvar->getIdentifier() << ClsIvar->getIdentifier();
+ Diag(ClsIvar->getLocation(), diag::note_previous_definition);
+ }
+ --numIvars;
+ }
+
+ if (numIvars > 0)
+ Diag(ivars[j]->getLocation(), diag::err_inconsistent_ivar_count);
+ else if (IVI != IVE)
+ Diag(IVI->getLocation(), diag::err_inconsistent_ivar_count);
+}
+
+static void WarnUndefinedMethod(Sema &S, SourceLocation ImpLoc,
+ ObjCMethodDecl *method,
+ bool &IncompleteImpl,
+ unsigned DiagID,
+ NamedDecl *NeededFor = nullptr) {
+ // No point warning no definition of method which is 'unavailable'.
+ switch (method->getAvailability()) {
+ case AR_Available:
+ case AR_Deprecated:
+ break;
+
+ // Don't warn about unavailable or not-yet-introduced methods.
+ case AR_NotYetIntroduced:
+ case AR_Unavailable:
+ return;
+ }
+
+ // FIXME: For now ignore 'IncompleteImpl'.
+ // Previously we grouped all unimplemented methods under a single
+ // warning, but some users strongly voiced that they would prefer
+ // separate warnings. We will give that approach a try, as that
+ // matches what we do with protocols.
+ {
+ const Sema::SemaDiagnosticBuilder &B = S.Diag(ImpLoc, DiagID);
+ B << method;
+ if (NeededFor)
+ B << NeededFor;
+ }
+
+ // Issue a note to the original declaration.
+ SourceLocation MethodLoc = method->getLocStart();
+ if (MethodLoc.isValid())
+ S.Diag(MethodLoc, diag::note_method_declared_at) << method;
+}
+
+/// Determines if type B can be substituted for type A. Returns true if we can
+/// guarantee that anything that the user will do to an object of type A can
+/// also be done to an object of type B. This is trivially true if the two
+/// types are the same, or if B is a subclass of A. It becomes more complex
+/// in cases where protocols are involved.
+///
+/// Object types in Objective-C describe the minimum requirements for an
+/// object, rather than providing a complete description of a type. For
+/// example, if A is a subclass of B, then B* may refer to an instance of A.
+/// The principle of substitutability means that we may use an instance of A
+/// anywhere that we may use an instance of B - it will implement all of the
+/// ivars of B and all of the methods of B.
+///
+/// This substitutability is important when type checking methods, because
+/// the implementation may have stricter type definitions than the interface.
+/// The interface specifies minimum requirements, but the implementation may
+/// have more accurate ones. For example, a method may privately accept
+/// instances of B, but only publish that it accepts instances of A. Any
+/// object passed to it will be type checked against B, and so will implicitly
+/// by a valid A*. Similarly, a method may return a subclass of the class that
+/// it is declared as returning.
+///
+/// This is most important when considering subclassing. A method in a
+/// subclass must accept any object as an argument that its superclass's
+/// implementation accepts. It may, however, accept a more general type
+/// without breaking substitutability (i.e. you can still use the subclass
+/// anywhere that you can use the superclass, but not vice versa). The
+/// converse requirement applies to return types: the return type for a
+/// subclass method must be a valid object of the kind that the superclass
+/// advertises, but it may be specified more accurately. This avoids the need
+/// for explicit down-casting by callers.
+///
+/// Note: This is a stricter requirement than for assignment.
+static bool isObjCTypeSubstitutable(ASTContext &Context,
+ const ObjCObjectPointerType *A,
+ const ObjCObjectPointerType *B,
+ bool rejectId) {
+ // Reject a protocol-unqualified id.
+ if (rejectId && B->isObjCIdType()) return false;
+
+ // If B is a qualified id, then A must also be a qualified id and it must
+ // implement all of the protocols in B. It may not be a qualified class.
+ // For example, MyClass<A> can be assigned to id<A>, but MyClass<A> is a
+ // stricter definition so it is not substitutable for id<A>.
+ if (B->isObjCQualifiedIdType()) {
+ return A->isObjCQualifiedIdType() &&
+ Context.ObjCQualifiedIdTypesAreCompatible(QualType(A, 0),
+ QualType(B,0),
+ false);
+ }
+
+ /*
+ // id is a special type that bypasses type checking completely. We want a
+ // warning when it is used in one place but not another.
+ if (C.isObjCIdType(A) || C.isObjCIdType(B)) return false;
+
+
+ // If B is a qualified id, then A must also be a qualified id (which it isn't
+ // if we've got this far)
+ if (B->isObjCQualifiedIdType()) return false;
+ */
+
+ // Now we know that A and B are (potentially-qualified) class types. The
+ // normal rules for assignment apply.
+ return Context.canAssignObjCInterfaces(A, B);
+}
+
+static SourceRange getTypeRange(TypeSourceInfo *TSI) {
+ return (TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange());
+}
+
+/// Determine whether two set of Objective-C declaration qualifiers conflict.
+static bool objcModifiersConflict(Decl::ObjCDeclQualifier x,
+ Decl::ObjCDeclQualifier y) {
+ return (x & ~Decl::OBJC_TQ_CSNullability) !=
+ (y & ~Decl::OBJC_TQ_CSNullability);
+}
+
+static bool CheckMethodOverrideReturn(Sema &S,
+ ObjCMethodDecl *MethodImpl,
+ ObjCMethodDecl *MethodDecl,
+ bool IsProtocolMethodDecl,
+ bool IsOverridingMode,
+ bool Warn) {
+ if (IsProtocolMethodDecl &&
+ objcModifiersConflict(MethodDecl->getObjCDeclQualifier(),
+ MethodImpl->getObjCDeclQualifier())) {
+ if (Warn) {
+ S.Diag(MethodImpl->getLocation(),
+ (IsOverridingMode
+ ? diag::warn_conflicting_overriding_ret_type_modifiers
+ : diag::warn_conflicting_ret_type_modifiers))
+ << MethodImpl->getDeclName()
+ << MethodImpl->getReturnTypeSourceRange();
+ S.Diag(MethodDecl->getLocation(), diag::note_previous_declaration)
+ << MethodDecl->getReturnTypeSourceRange();
+ }
+ else
+ return false;
+ }
+ if (Warn && IsOverridingMode &&
+ !isa<ObjCImplementationDecl>(MethodImpl->getDeclContext()) &&
+ !S.Context.hasSameNullabilityTypeQualifier(MethodImpl->getReturnType(),
+ MethodDecl->getReturnType(),
+ false)) {
+ auto nullabilityMethodImpl =
+ *MethodImpl->getReturnType()->getNullability(S.Context);
+ auto nullabilityMethodDecl =
+ *MethodDecl->getReturnType()->getNullability(S.Context);
+ S.Diag(MethodImpl->getLocation(),
+ diag::warn_conflicting_nullability_attr_overriding_ret_types)
+ << DiagNullabilityKind(
+ nullabilityMethodImpl,
+ ((MethodImpl->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability)
+ != 0))
+ << DiagNullabilityKind(
+ nullabilityMethodDecl,
+ ((MethodDecl->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability)
+ != 0));
+ S.Diag(MethodDecl->getLocation(), diag::note_previous_declaration);
+ }
+
+ if (S.Context.hasSameUnqualifiedType(MethodImpl->getReturnType(),
+ MethodDecl->getReturnType()))
+ return true;
+ if (!Warn)
+ return false;
+
+ unsigned DiagID =
+ IsOverridingMode ? diag::warn_conflicting_overriding_ret_types
+ : diag::warn_conflicting_ret_types;
+
+ // Mismatches between ObjC pointers go into a different warning
+ // category, and sometimes they're even completely whitelisted.
+ if (const ObjCObjectPointerType *ImplPtrTy =
+ MethodImpl->getReturnType()->getAs<ObjCObjectPointerType>()) {
+ if (const ObjCObjectPointerType *IfacePtrTy =
+ MethodDecl->getReturnType()->getAs<ObjCObjectPointerType>()) {
+ // Allow non-matching return types as long as they don't violate
+ // the principle of substitutability. Specifically, we permit
+ // return types that are subclasses of the declared return type,
+ // or that are more-qualified versions of the declared type.
+ if (isObjCTypeSubstitutable(S.Context, IfacePtrTy, ImplPtrTy, false))
+ return false;
+
+ DiagID =
+ IsOverridingMode ? diag::warn_non_covariant_overriding_ret_types
+ : diag::warn_non_covariant_ret_types;
+ }
+ }
+
+ S.Diag(MethodImpl->getLocation(), DiagID)
+ << MethodImpl->getDeclName() << MethodDecl->getReturnType()
+ << MethodImpl->getReturnType()
+ << MethodImpl->getReturnTypeSourceRange();
+ S.Diag(MethodDecl->getLocation(), IsOverridingMode
+ ? diag::note_previous_declaration
+ : diag::note_previous_definition)
+ << MethodDecl->getReturnTypeSourceRange();
+ return false;
+}
+
+static bool CheckMethodOverrideParam(Sema &S,
+ ObjCMethodDecl *MethodImpl,
+ ObjCMethodDecl *MethodDecl,
+ ParmVarDecl *ImplVar,
+ ParmVarDecl *IfaceVar,
+ bool IsProtocolMethodDecl,
+ bool IsOverridingMode,
+ bool Warn) {
+ if (IsProtocolMethodDecl &&
+ objcModifiersConflict(ImplVar->getObjCDeclQualifier(),
+ IfaceVar->getObjCDeclQualifier())) {
+ if (Warn) {
+ if (IsOverridingMode)
+ S.Diag(ImplVar->getLocation(),
+ diag::warn_conflicting_overriding_param_modifiers)
+ << getTypeRange(ImplVar->getTypeSourceInfo())
+ << MethodImpl->getDeclName();
+ else S.Diag(ImplVar->getLocation(),
+ diag::warn_conflicting_param_modifiers)
+ << getTypeRange(ImplVar->getTypeSourceInfo())
+ << MethodImpl->getDeclName();
+ S.Diag(IfaceVar->getLocation(), diag::note_previous_declaration)
+ << getTypeRange(IfaceVar->getTypeSourceInfo());
+ }
+ else
+ return false;
+ }
+
+ QualType ImplTy = ImplVar->getType();
+ QualType IfaceTy = IfaceVar->getType();
+ if (Warn && IsOverridingMode &&
+ !isa<ObjCImplementationDecl>(MethodImpl->getDeclContext()) &&
+ !S.Context.hasSameNullabilityTypeQualifier(ImplTy, IfaceTy, true)) {
+ S.Diag(ImplVar->getLocation(),
+ diag::warn_conflicting_nullability_attr_overriding_param_types)
+ << DiagNullabilityKind(
+ *ImplTy->getNullability(S.Context),
+ ((ImplVar->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability)
+ != 0))
+ << DiagNullabilityKind(
+ *IfaceTy->getNullability(S.Context),
+ ((IfaceVar->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability)
+ != 0));
+ S.Diag(IfaceVar->getLocation(), diag::note_previous_declaration);
+ }
+ if (S.Context.hasSameUnqualifiedType(ImplTy, IfaceTy))
+ return true;
+
+ if (!Warn)
+ return false;
+ unsigned DiagID =
+ IsOverridingMode ? diag::warn_conflicting_overriding_param_types
+ : diag::warn_conflicting_param_types;
+
+ // Mismatches between ObjC pointers go into a different warning
+ // category, and sometimes they're even completely whitelisted.
+ if (const ObjCObjectPointerType *ImplPtrTy =
+ ImplTy->getAs<ObjCObjectPointerType>()) {
+ if (const ObjCObjectPointerType *IfacePtrTy =
+ IfaceTy->getAs<ObjCObjectPointerType>()) {
+ // Allow non-matching argument types as long as they don't
+ // violate the principle of substitutability. Specifically, the
+ // implementation must accept any objects that the superclass
+ // accepts, however it may also accept others.
+ if (isObjCTypeSubstitutable(S.Context, ImplPtrTy, IfacePtrTy, true))
+ return false;
+
+ DiagID =
+ IsOverridingMode ? diag::warn_non_contravariant_overriding_param_types
+ : diag::warn_non_contravariant_param_types;
+ }
+ }
+
+ S.Diag(ImplVar->getLocation(), DiagID)
+ << getTypeRange(ImplVar->getTypeSourceInfo())
+ << MethodImpl->getDeclName() << IfaceTy << ImplTy;
+ S.Diag(IfaceVar->getLocation(),
+ (IsOverridingMode ? diag::note_previous_declaration
+ : diag::note_previous_definition))
+ << getTypeRange(IfaceVar->getTypeSourceInfo());
+ return false;
+}
+
+/// In ARC, check whether the conventional meanings of the two methods
+/// match. If they don't, it's a hard error.
+static bool checkMethodFamilyMismatch(Sema &S, ObjCMethodDecl *impl,
+ ObjCMethodDecl *decl) {
+ ObjCMethodFamily implFamily = impl->getMethodFamily();
+ ObjCMethodFamily declFamily = decl->getMethodFamily();
+ if (implFamily == declFamily) return false;
+
+ // Since conventions are sorted by selector, the only possibility is
+ // that the types differ enough to cause one selector or the other
+ // to fall out of the family.
+ assert(implFamily == OMF_None || declFamily == OMF_None);
+
+ // No further diagnostics required on invalid declarations.
+ if (impl->isInvalidDecl() || decl->isInvalidDecl()) return true;
+
+ const ObjCMethodDecl *unmatched = impl;
+ ObjCMethodFamily family = declFamily;
+ unsigned errorID = diag::err_arc_lost_method_convention;
+ unsigned noteID = diag::note_arc_lost_method_convention;
+ if (declFamily == OMF_None) {
+ unmatched = decl;
+ family = implFamily;
+ errorID = diag::err_arc_gained_method_convention;
+ noteID = diag::note_arc_gained_method_convention;
+ }
+
+ // Indexes into a %select clause in the diagnostic.
+ enum FamilySelector {
+ F_alloc, F_copy, F_mutableCopy = F_copy, F_init, F_new
+ };
+ FamilySelector familySelector = FamilySelector();
+
+ switch (family) {
+ case OMF_None: llvm_unreachable("logic error, no method convention");
+ case OMF_retain:
+ case OMF_release:
+ case OMF_autorelease:
+ case OMF_dealloc:
+ case OMF_finalize:
+ case OMF_retainCount:
+ case OMF_self:
+ case OMF_initialize:
+ case OMF_performSelector:
+ // Mismatches for these methods don't change ownership
+ // conventions, so we don't care.
+ return false;
+
+ case OMF_init: familySelector = F_init; break;
+ case OMF_alloc: familySelector = F_alloc; break;
+ case OMF_copy: familySelector = F_copy; break;
+ case OMF_mutableCopy: familySelector = F_mutableCopy; break;
+ case OMF_new: familySelector = F_new; break;
+ }
+
+ enum ReasonSelector { R_NonObjectReturn, R_UnrelatedReturn };
+ ReasonSelector reasonSelector;
+
+ // The only reason these methods don't fall within their families is
+ // due to unusual result types.
+ if (unmatched->getReturnType()->isObjCObjectPointerType()) {
+ reasonSelector = R_UnrelatedReturn;
+ } else {
+ reasonSelector = R_NonObjectReturn;
+ }
+
+ S.Diag(impl->getLocation(), errorID) << int(familySelector) << int(reasonSelector);
+ S.Diag(decl->getLocation(), noteID) << int(familySelector) << int(reasonSelector);
+
+ return true;
+}
+
+void Sema::WarnConflictingTypedMethods(ObjCMethodDecl *ImpMethodDecl,
+ ObjCMethodDecl *MethodDecl,
+ bool IsProtocolMethodDecl) {
+ if (getLangOpts().ObjCAutoRefCount &&
+ checkMethodFamilyMismatch(*this, ImpMethodDecl, MethodDecl))
+ return;
+
+ CheckMethodOverrideReturn(*this, ImpMethodDecl, MethodDecl,
+ IsProtocolMethodDecl, false,
+ true);
+
+ for (ObjCMethodDecl::param_iterator IM = ImpMethodDecl->param_begin(),
+ IF = MethodDecl->param_begin(), EM = ImpMethodDecl->param_end(),
+ EF = MethodDecl->param_end();
+ IM != EM && IF != EF; ++IM, ++IF) {
+ CheckMethodOverrideParam(*this, ImpMethodDecl, MethodDecl, *IM, *IF,
+ IsProtocolMethodDecl, false, true);
+ }
+
+ if (ImpMethodDecl->isVariadic() != MethodDecl->isVariadic()) {
+ Diag(ImpMethodDecl->getLocation(),
+ diag::warn_conflicting_variadic);
+ Diag(MethodDecl->getLocation(), diag::note_previous_declaration);
+ }
+}
+
+void Sema::CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
+ ObjCMethodDecl *Overridden,
+ bool IsProtocolMethodDecl) {
+
+ CheckMethodOverrideReturn(*this, Method, Overridden,
+ IsProtocolMethodDecl, true,
+ true);
+
+ for (ObjCMethodDecl::param_iterator IM = Method->param_begin(),
+ IF = Overridden->param_begin(), EM = Method->param_end(),
+ EF = Overridden->param_end();
+ IM != EM && IF != EF; ++IM, ++IF) {
+ CheckMethodOverrideParam(*this, Method, Overridden, *IM, *IF,
+ IsProtocolMethodDecl, true, true);
+ }
+
+ if (Method->isVariadic() != Overridden->isVariadic()) {
+ Diag(Method->getLocation(),
+ diag::warn_conflicting_overriding_variadic);
+ Diag(Overridden->getLocation(), diag::note_previous_declaration);
+ }
+}
+
+/// WarnExactTypedMethods - This routine issues a warning if method
+/// implementation declaration matches exactly that of its declaration.
+void Sema::WarnExactTypedMethods(ObjCMethodDecl *ImpMethodDecl,
+ ObjCMethodDecl *MethodDecl,
+ bool IsProtocolMethodDecl) {
+ // don't issue warning when protocol method is optional because primary
+ // class is not required to implement it and it is safe for protocol
+ // to implement it.
+ if (MethodDecl->getImplementationControl() == ObjCMethodDecl::Optional)
+ return;
+ // don't issue warning when primary class's method is
+ // depecated/unavailable.
+ if (MethodDecl->hasAttr<UnavailableAttr>() ||
+ MethodDecl->hasAttr<DeprecatedAttr>())
+ return;
+
+ bool match = CheckMethodOverrideReturn(*this, ImpMethodDecl, MethodDecl,
+ IsProtocolMethodDecl, false, false);
+ if (match)
+ for (ObjCMethodDecl::param_iterator IM = ImpMethodDecl->param_begin(),
+ IF = MethodDecl->param_begin(), EM = ImpMethodDecl->param_end(),
+ EF = MethodDecl->param_end();
+ IM != EM && IF != EF; ++IM, ++IF) {
+ match = CheckMethodOverrideParam(*this, ImpMethodDecl, MethodDecl,
+ *IM, *IF,
+ IsProtocolMethodDecl, false, false);
+ if (!match)
+ break;
+ }
+ if (match)
+ match = (ImpMethodDecl->isVariadic() == MethodDecl->isVariadic());
+ if (match)
+ match = !(MethodDecl->isClassMethod() &&
+ MethodDecl->getSelector() == GetNullarySelector("load", Context));
+
+ if (match) {
+ Diag(ImpMethodDecl->getLocation(),
+ diag::warn_category_method_impl_match);
+ Diag(MethodDecl->getLocation(), diag::note_method_declared_at)
+ << MethodDecl->getDeclName();
+ }
+}
+
+/// FIXME: Type hierarchies in Objective-C can be deep. We could most likely
+/// improve the efficiency of selector lookups and type checking by associating
+/// with each protocol / interface / category the flattened instance tables. If
+/// we used an immutable set to keep the table then it wouldn't add significant
+/// memory cost and it would be handy for lookups.
+
+typedef llvm::DenseSet<IdentifierInfo*> ProtocolNameSet;
+typedef std::unique_ptr<ProtocolNameSet> LazyProtocolNameSet;
+
+static void findProtocolsWithExplicitImpls(const ObjCProtocolDecl *PDecl,
+ ProtocolNameSet &PNS) {
+ if (PDecl->hasAttr<ObjCExplicitProtocolImplAttr>())
+ PNS.insert(PDecl->getIdentifier());
+ for (const auto *PI : PDecl->protocols())
+ findProtocolsWithExplicitImpls(PI, PNS);
+}
+
+/// Recursively populates a set with all conformed protocols in a class
+/// hierarchy that have the 'objc_protocol_requires_explicit_implementation'
+/// attribute.
+static void findProtocolsWithExplicitImpls(const ObjCInterfaceDecl *Super,
+ ProtocolNameSet &PNS) {
+ if (!Super)
+ return;
+
+ for (const auto *I : Super->all_referenced_protocols())
+ findProtocolsWithExplicitImpls(I, PNS);
+
+ findProtocolsWithExplicitImpls(Super->getSuperClass(), PNS);
+}
+
+/// CheckProtocolMethodDefs - This routine checks unimplemented methods
+/// Declared in protocol, and those referenced by it.
+static void CheckProtocolMethodDefs(Sema &S,
+ SourceLocation ImpLoc,
+ ObjCProtocolDecl *PDecl,
+ bool& IncompleteImpl,
+ const Sema::SelectorSet &InsMap,
+ const Sema::SelectorSet &ClsMap,
+ ObjCContainerDecl *CDecl,
+ LazyProtocolNameSet &ProtocolsExplictImpl) {
+ ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(CDecl);
+ ObjCInterfaceDecl *IDecl = C ? C->getClassInterface()
+ : dyn_cast<ObjCInterfaceDecl>(CDecl);
+ assert (IDecl && "CheckProtocolMethodDefs - IDecl is null");
+
+ ObjCInterfaceDecl *Super = IDecl->getSuperClass();
+ ObjCInterfaceDecl *NSIDecl = nullptr;
+
+ // If this protocol is marked 'objc_protocol_requires_explicit_implementation'
+ // then we should check if any class in the super class hierarchy also
+ // conforms to this protocol, either directly or via protocol inheritance.
+ // If so, we can skip checking this protocol completely because we
+ // know that a parent class already satisfies this protocol.
+ //
+ // Note: we could generalize this logic for all protocols, and merely
+ // add the limit on looking at the super class chain for just
+ // specially marked protocols. This may be a good optimization. This
+ // change is restricted to 'objc_protocol_requires_explicit_implementation'
+ // protocols for now for controlled evaluation.
+ if (PDecl->hasAttr<ObjCExplicitProtocolImplAttr>()) {
+ if (!ProtocolsExplictImpl) {
+ ProtocolsExplictImpl.reset(new ProtocolNameSet);
+ findProtocolsWithExplicitImpls(Super, *ProtocolsExplictImpl);
+ }
+ if (ProtocolsExplictImpl->find(PDecl->getIdentifier()) !=
+ ProtocolsExplictImpl->end())
+ return;
+
+ // If no super class conforms to the protocol, we should not search
+ // for methods in the super class to implicitly satisfy the protocol.
+ Super = nullptr;
+ }
+
+ if (S.getLangOpts().ObjCRuntime.isNeXTFamily()) {
+ // check to see if class implements forwardInvocation method and objects
+ // of this class are derived from 'NSProxy' so that to forward requests
+ // from one object to another.
+ // Under such conditions, which means that every method possible is
+ // implemented in the class, we should not issue "Method definition not
+ // found" warnings.
+ // FIXME: Use a general GetUnarySelector method for this.
+ IdentifierInfo* II = &S.Context.Idents.get("forwardInvocation");
+ Selector fISelector = S.Context.Selectors.getSelector(1, &II);
+ if (InsMap.count(fISelector))
+ // Is IDecl derived from 'NSProxy'? If so, no instance methods
+ // need be implemented in the implementation.
+ NSIDecl = IDecl->lookupInheritedClass(&S.Context.Idents.get("NSProxy"));
+ }
+
+ // If this is a forward protocol declaration, get its definition.
+ if (!PDecl->isThisDeclarationADefinition() &&
+ PDecl->getDefinition())
+ PDecl = PDecl->getDefinition();
+
+ // If a method lookup fails locally we still need to look and see if
+ // the method was implemented by a base class or an inherited
+ // protocol. This lookup is slow, but occurs rarely in correct code
+ // and otherwise would terminate in a warning.
+
+ // check unimplemented instance methods.
+ if (!NSIDecl)
+ for (auto *method : PDecl->instance_methods()) {
+ if (method->getImplementationControl() != ObjCMethodDecl::Optional &&
+ !method->isPropertyAccessor() &&
+ !InsMap.count(method->getSelector()) &&
+ (!Super || !Super->lookupMethod(method->getSelector(),
+ true /* instance */,
+ false /* shallowCategory */,
+ true /* followsSuper */,
+ nullptr /* category */))) {
+ // If a method is not implemented in the category implementation but
+ // has been declared in its primary class, superclass,
+ // or in one of their protocols, no need to issue the warning.
+ // This is because method will be implemented in the primary class
+ // or one of its super class implementation.
+
+ // Ugly, but necessary. Method declared in protcol might have
+ // have been synthesized due to a property declared in the class which
+ // uses the protocol.
+ if (ObjCMethodDecl *MethodInClass =
+ IDecl->lookupMethod(method->getSelector(),
+ true /* instance */,
+ true /* shallowCategoryLookup */,
+ false /* followSuper */))
+ if (C || MethodInClass->isPropertyAccessor())
+ continue;
+ unsigned DIAG = diag::warn_unimplemented_protocol_method;
+ if (!S.Diags.isIgnored(DIAG, ImpLoc)) {
+ WarnUndefinedMethod(S, ImpLoc, method, IncompleteImpl, DIAG,
+ PDecl);
+ }
+ }
+ }
+ // check unimplemented class methods
+ for (auto *method : PDecl->class_methods()) {
+ if (method->getImplementationControl() != ObjCMethodDecl::Optional &&
+ !ClsMap.count(method->getSelector()) &&
+ (!Super || !Super->lookupMethod(method->getSelector(),
+ false /* class method */,
+ false /* shallowCategoryLookup */,
+ true /* followSuper */,
+ nullptr /* category */))) {
+ // See above comment for instance method lookups.
+ if (C && IDecl->lookupMethod(method->getSelector(),
+ false /* class */,
+ true /* shallowCategoryLookup */,
+ false /* followSuper */))
+ continue;
+
+ unsigned DIAG = diag::warn_unimplemented_protocol_method;
+ if (!S.Diags.isIgnored(DIAG, ImpLoc)) {
+ WarnUndefinedMethod(S, ImpLoc, method, IncompleteImpl, DIAG, PDecl);
+ }
+ }
+ }
+ // Check on this protocols's referenced protocols, recursively.
+ for (auto *PI : PDecl->protocols())
+ CheckProtocolMethodDefs(S, ImpLoc, PI, IncompleteImpl, InsMap, ClsMap,
+ CDecl, ProtocolsExplictImpl);
+}
+
+/// MatchAllMethodDeclarations - Check methods declared in interface
+/// or protocol against those declared in their implementations.
+///
+void Sema::MatchAllMethodDeclarations(const SelectorSet &InsMap,
+ const SelectorSet &ClsMap,
+ SelectorSet &InsMapSeen,
+ SelectorSet &ClsMapSeen,
+ ObjCImplDecl* IMPDecl,
+ ObjCContainerDecl* CDecl,
+ bool &IncompleteImpl,
+ bool ImmediateClass,
+ bool WarnCategoryMethodImpl) {
+ // Check and see if instance methods in class interface have been
+ // implemented in the implementation class. If so, their types match.
+ for (auto *I : CDecl->instance_methods()) {
+ if (!InsMapSeen.insert(I->getSelector()).second)
+ continue;
+ if (!I->isPropertyAccessor() &&
+ !InsMap.count(I->getSelector())) {
+ if (ImmediateClass)
+ WarnUndefinedMethod(*this, IMPDecl->getLocation(), I, IncompleteImpl,
+ diag::warn_undef_method_impl);
+ continue;
+ } else {
+ ObjCMethodDecl *ImpMethodDecl =
+ IMPDecl->getInstanceMethod(I->getSelector());
+ assert(CDecl->getInstanceMethod(I->getSelector()) &&
+ "Expected to find the method through lookup as well");
+ // ImpMethodDecl may be null as in a @dynamic property.
+ if (ImpMethodDecl) {
+ if (!WarnCategoryMethodImpl)
+ WarnConflictingTypedMethods(ImpMethodDecl, I,
+ isa<ObjCProtocolDecl>(CDecl));
+ else if (!I->isPropertyAccessor())
+ WarnExactTypedMethods(ImpMethodDecl, I, isa<ObjCProtocolDecl>(CDecl));
+ }
+ }
+ }
+
+ // Check and see if class methods in class interface have been
+ // implemented in the implementation class. If so, their types match.
+ for (auto *I : CDecl->class_methods()) {
+ if (!ClsMapSeen.insert(I->getSelector()).second)
+ continue;
+ if (!ClsMap.count(I->getSelector())) {
+ if (ImmediateClass)
+ WarnUndefinedMethod(*this, IMPDecl->getLocation(), I, IncompleteImpl,
+ diag::warn_undef_method_impl);
+ } else {
+ ObjCMethodDecl *ImpMethodDecl =
+ IMPDecl->getClassMethod(I->getSelector());
+ assert(CDecl->getClassMethod(I->getSelector()) &&
+ "Expected to find the method through lookup as well");
+ if (!WarnCategoryMethodImpl)
+ WarnConflictingTypedMethods(ImpMethodDecl, I,
+ isa<ObjCProtocolDecl>(CDecl));
+ else
+ WarnExactTypedMethods(ImpMethodDecl, I,
+ isa<ObjCProtocolDecl>(CDecl));
+ }
+ }
+
+ if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl> (CDecl)) {
+ // Also, check for methods declared in protocols inherited by
+ // this protocol.
+ for (auto *PI : PD->protocols())
+ MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
+ IMPDecl, PI, IncompleteImpl, false,
+ WarnCategoryMethodImpl);
+ }
+
+ if (ObjCInterfaceDecl *I = dyn_cast<ObjCInterfaceDecl> (CDecl)) {
+ // when checking that methods in implementation match their declaration,
+ // i.e. when WarnCategoryMethodImpl is false, check declarations in class
+ // extension; as well as those in categories.
+ if (!WarnCategoryMethodImpl) {
+ for (auto *Cat : I->visible_categories())
+ MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
+ IMPDecl, Cat, IncompleteImpl,
+ ImmediateClass && Cat->IsClassExtension(),
+ WarnCategoryMethodImpl);
+ } else {
+ // Also methods in class extensions need be looked at next.
+ for (auto *Ext : I->visible_extensions())
+ MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
+ IMPDecl, Ext, IncompleteImpl, false,
+ WarnCategoryMethodImpl);
+ }
+
+ // Check for any implementation of a methods declared in protocol.
+ for (auto *PI : I->all_referenced_protocols())
+ MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
+ IMPDecl, PI, IncompleteImpl, false,
+ WarnCategoryMethodImpl);
+
+ // FIXME. For now, we are not checking for extact match of methods
+ // in category implementation and its primary class's super class.
+ if (!WarnCategoryMethodImpl && I->getSuperClass())
+ MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
+ IMPDecl,
+ I->getSuperClass(), IncompleteImpl, false);
+ }
+}
+
+/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
+/// category matches with those implemented in its primary class and
+/// warns each time an exact match is found.
+void Sema::CheckCategoryVsClassMethodMatches(
+ ObjCCategoryImplDecl *CatIMPDecl) {
+ // Get category's primary class.
+ ObjCCategoryDecl *CatDecl = CatIMPDecl->getCategoryDecl();
+ if (!CatDecl)
+ return;
+ ObjCInterfaceDecl *IDecl = CatDecl->getClassInterface();
+ if (!IDecl)
+ return;
+ ObjCInterfaceDecl *SuperIDecl = IDecl->getSuperClass();
+ SelectorSet InsMap, ClsMap;
+
+ for (const auto *I : CatIMPDecl->instance_methods()) {
+ Selector Sel = I->getSelector();
+ // When checking for methods implemented in the category, skip over
+ // those declared in category class's super class. This is because
+ // the super class must implement the method.
+ if (SuperIDecl && SuperIDecl->lookupMethod(Sel, true))
+ continue;
+ InsMap.insert(Sel);
+ }
+
+ for (const auto *I : CatIMPDecl->class_methods()) {
+ Selector Sel = I->getSelector();
+ if (SuperIDecl && SuperIDecl->lookupMethod(Sel, false))
+ continue;
+ ClsMap.insert(Sel);
+ }
+ if (InsMap.empty() && ClsMap.empty())
+ return;
+
+ SelectorSet InsMapSeen, ClsMapSeen;
+ bool IncompleteImpl = false;
+ MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
+ CatIMPDecl, IDecl,
+ IncompleteImpl, false,
+ true /*WarnCategoryMethodImpl*/);
+}
+
+void Sema::ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
+ ObjCContainerDecl* CDecl,
+ bool IncompleteImpl) {
+ SelectorSet InsMap;
+ // Check and see if instance methods in class interface have been
+ // implemented in the implementation class.
+ for (const auto *I : IMPDecl->instance_methods())
+ InsMap.insert(I->getSelector());
+
+ // Add the selectors for getters/setters of @dynamic properties.
+ for (const auto *PImpl : IMPDecl->property_impls()) {
+ // We only care about @dynamic implementations.
+ if (PImpl->getPropertyImplementation() != ObjCPropertyImplDecl::Dynamic)
+ continue;
+
+ const auto *P = PImpl->getPropertyDecl();
+ if (!P) continue;
+
+ InsMap.insert(P->getGetterName());
+ if (!P->getSetterName().isNull())
+ InsMap.insert(P->getSetterName());
+ }
+
+ // Check and see if properties declared in the interface have either 1)
+ // an implementation or 2) there is a @synthesize/@dynamic implementation
+ // of the property in the @implementation.
+ if (const ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
+ bool SynthesizeProperties = LangOpts.ObjCDefaultSynthProperties &&
+ LangOpts.ObjCRuntime.isNonFragile() &&
+ !IDecl->isObjCRequiresPropertyDefs();
+ DiagnoseUnimplementedProperties(S, IMPDecl, CDecl, SynthesizeProperties);
+ }
+
+ // Diagnose null-resettable synthesized setters.
+ diagnoseNullResettableSynthesizedSetters(IMPDecl);
+
+ SelectorSet ClsMap;
+ for (const auto *I : IMPDecl->class_methods())
+ ClsMap.insert(I->getSelector());
+
+ // Check for type conflict of methods declared in a class/protocol and
+ // its implementation; if any.
+ SelectorSet InsMapSeen, ClsMapSeen;
+ MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
+ IMPDecl, CDecl,
+ IncompleteImpl, true);
+
+ // check all methods implemented in category against those declared
+ // in its primary class.
+ if (ObjCCategoryImplDecl *CatDecl =
+ dyn_cast<ObjCCategoryImplDecl>(IMPDecl))
+ CheckCategoryVsClassMethodMatches(CatDecl);
+
+ // Check the protocol list for unimplemented methods in the @implementation
+ // class.
+ // Check and see if class methods in class interface have been
+ // implemented in the implementation class.
+
+ LazyProtocolNameSet ExplicitImplProtocols;
+
+ if (ObjCInterfaceDecl *I = dyn_cast<ObjCInterfaceDecl> (CDecl)) {
+ for (auto *PI : I->all_referenced_protocols())
+ CheckProtocolMethodDefs(*this, IMPDecl->getLocation(), PI, IncompleteImpl,
+ InsMap, ClsMap, I, ExplicitImplProtocols);
+ } else if (ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(CDecl)) {
+ // For extended class, unimplemented methods in its protocols will
+ // be reported in the primary class.
+ if (!C->IsClassExtension()) {
+ for (auto *P : C->protocols())
+ CheckProtocolMethodDefs(*this, IMPDecl->getLocation(), P,
+ IncompleteImpl, InsMap, ClsMap, CDecl,
+ ExplicitImplProtocols);
+ DiagnoseUnimplementedProperties(S, IMPDecl, CDecl,
+ /*SynthesizeProperties=*/false);
+ }
+ } else
+ llvm_unreachable("invalid ObjCContainerDecl type.");
+}
+
+Sema::DeclGroupPtrTy
+Sema::ActOnForwardClassDeclaration(SourceLocation AtClassLoc,
+ IdentifierInfo **IdentList,
+ SourceLocation *IdentLocs,
+ ArrayRef<ObjCTypeParamList *> TypeParamLists,
+ unsigned NumElts) {
+ SmallVector<Decl *, 8> DeclsInGroup;
+ for (unsigned i = 0; i != NumElts; ++i) {
+ // Check for another declaration kind with the same name.
+ NamedDecl *PrevDecl
+ = LookupSingleName(TUScope, IdentList[i], IdentLocs[i],
+ LookupOrdinaryName, ForRedeclaration);
+ if (PrevDecl && !isa<ObjCInterfaceDecl>(PrevDecl)) {
+ // GCC apparently allows the following idiom:
+ //
+ // typedef NSObject < XCElementTogglerP > XCElementToggler;
+ // @class XCElementToggler;
+ //
+ // Here we have chosen to ignore the forward class declaration
+ // with a warning. Since this is the implied behavior.
+ TypedefNameDecl *TDD = dyn_cast<TypedefNameDecl>(PrevDecl);
+ if (!TDD || !TDD->getUnderlyingType()->isObjCObjectType()) {
+ Diag(AtClassLoc, diag::err_redefinition_different_kind) << IdentList[i];
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ } else {
+ // a forward class declaration matching a typedef name of a class refers
+ // to the underlying class. Just ignore the forward class with a warning
+ // as this will force the intended behavior which is to lookup the
+ // typedef name.
+ if (isa<ObjCObjectType>(TDD->getUnderlyingType())) {
+ Diag(AtClassLoc, diag::warn_forward_class_redefinition)
+ << IdentList[i];
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ continue;
+ }
+ }
+ }
+
+ // Create a declaration to describe this forward declaration.
+ ObjCInterfaceDecl *PrevIDecl
+ = dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl);
+
+ IdentifierInfo *ClassName = IdentList[i];
+ if (PrevIDecl && PrevIDecl->getIdentifier() != ClassName) {
+ // A previous decl with a different name is because of
+ // @compatibility_alias, for example:
+ // \code
+ // @class NewImage;
+ // @compatibility_alias OldImage NewImage;
+ // \endcode
+ // A lookup for 'OldImage' will return the 'NewImage' decl.
+ //
+ // In such a case use the real declaration name, instead of the alias one,
+ // otherwise we will break IdentifierResolver and redecls-chain invariants.
+ // FIXME: If necessary, add a bit to indicate that this ObjCInterfaceDecl
+ // has been aliased.
+ ClassName = PrevIDecl->getIdentifier();
+ }
+
+ // If this forward declaration has type parameters, compare them with the
+ // type parameters of the previous declaration.
+ ObjCTypeParamList *TypeParams = TypeParamLists[i];
+ if (PrevIDecl && TypeParams) {
+ if (ObjCTypeParamList *PrevTypeParams = PrevIDecl->getTypeParamList()) {
+ // Check for consistency with the previous declaration.
+ if (checkTypeParamListConsistency(
+ *this, PrevTypeParams, TypeParams,
+ TypeParamListContext::ForwardDeclaration)) {
+ TypeParams = nullptr;
+ }
+ } else if (ObjCInterfaceDecl *Def = PrevIDecl->getDefinition()) {
+ // The @interface does not have type parameters. Complain.
+ Diag(IdentLocs[i], diag::err_objc_parameterized_forward_class)
+ << ClassName
+ << TypeParams->getSourceRange();
+ Diag(Def->getLocation(), diag::note_defined_here)
+ << ClassName;
+
+ TypeParams = nullptr;
+ }
+ }
+
+ ObjCInterfaceDecl *IDecl
+ = ObjCInterfaceDecl::Create(Context, CurContext, AtClassLoc,
+ ClassName, TypeParams, PrevIDecl,
+ IdentLocs[i]);
+ IDecl->setAtEndRange(IdentLocs[i]);
+
+ PushOnScopeChains(IDecl, TUScope);
+ CheckObjCDeclScope(IDecl);
+ DeclsInGroup.push_back(IDecl);
+ }
+
+ return BuildDeclaratorGroup(DeclsInGroup, false);
+}
+
+static bool tryMatchRecordTypes(ASTContext &Context,
+ Sema::MethodMatchStrategy strategy,
+ const Type *left, const Type *right);
+
+static bool matchTypes(ASTContext &Context, Sema::MethodMatchStrategy strategy,
+ QualType leftQT, QualType rightQT) {
+ const Type *left =
+ Context.getCanonicalType(leftQT).getUnqualifiedType().getTypePtr();
+ const Type *right =
+ Context.getCanonicalType(rightQT).getUnqualifiedType().getTypePtr();
+
+ if (left == right) return true;
+
+ // If we're doing a strict match, the types have to match exactly.
+ if (strategy == Sema::MMS_strict) return false;
+
+ if (left->isIncompleteType() || right->isIncompleteType()) return false;
+
+ // Otherwise, use this absurdly complicated algorithm to try to
+ // validate the basic, low-level compatibility of the two types.
+
+ // As a minimum, require the sizes and alignments to match.
+ TypeInfo LeftTI = Context.getTypeInfo(left);
+ TypeInfo RightTI = Context.getTypeInfo(right);
+ if (LeftTI.Width != RightTI.Width)
+ return false;
+
+ if (LeftTI.Align != RightTI.Align)
+ return false;
+
+ // Consider all the kinds of non-dependent canonical types:
+ // - functions and arrays aren't possible as return and parameter types
+
+ // - vector types of equal size can be arbitrarily mixed
+ if (isa<VectorType>(left)) return isa<VectorType>(right);
+ if (isa<VectorType>(right)) return false;
+
+ // - references should only match references of identical type
+ // - structs, unions, and Objective-C objects must match more-or-less
+ // exactly
+ // - everything else should be a scalar
+ if (!left->isScalarType() || !right->isScalarType())
+ return tryMatchRecordTypes(Context, strategy, left, right);
+
+ // Make scalars agree in kind, except count bools as chars, and group
+ // all non-member pointers together.
+ Type::ScalarTypeKind leftSK = left->getScalarTypeKind();
+ Type::ScalarTypeKind rightSK = right->getScalarTypeKind();
+ if (leftSK == Type::STK_Bool) leftSK = Type::STK_Integral;
+ if (rightSK == Type::STK_Bool) rightSK = Type::STK_Integral;
+ if (leftSK == Type::STK_CPointer || leftSK == Type::STK_BlockPointer)
+ leftSK = Type::STK_ObjCObjectPointer;
+ if (rightSK == Type::STK_CPointer || rightSK == Type::STK_BlockPointer)
+ rightSK = Type::STK_ObjCObjectPointer;
+
+ // Note that data member pointers and function member pointers don't
+ // intermix because of the size differences.
+
+ return (leftSK == rightSK);
+}
+
+static bool tryMatchRecordTypes(ASTContext &Context,
+ Sema::MethodMatchStrategy strategy,
+ const Type *lt, const Type *rt) {
+ assert(lt && rt && lt != rt);
+
+ if (!isa<RecordType>(lt) || !isa<RecordType>(rt)) return false;
+ RecordDecl *left = cast<RecordType>(lt)->getDecl();
+ RecordDecl *right = cast<RecordType>(rt)->getDecl();
+
+ // Require union-hood to match.
+ if (left->isUnion() != right->isUnion()) return false;
+
+ // Require an exact match if either is non-POD.
+ if ((isa<CXXRecordDecl>(left) && !cast<CXXRecordDecl>(left)->isPOD()) ||
+ (isa<CXXRecordDecl>(right) && !cast<CXXRecordDecl>(right)->isPOD()))
+ return false;
+
+ // Require size and alignment to match.
+ TypeInfo LeftTI = Context.getTypeInfo(lt);
+ TypeInfo RightTI = Context.getTypeInfo(rt);
+ if (LeftTI.Width != RightTI.Width)
+ return false;
+
+ if (LeftTI.Align != RightTI.Align)
+ return false;
+
+ // Require fields to match.
+ RecordDecl::field_iterator li = left->field_begin(), le = left->field_end();
+ RecordDecl::field_iterator ri = right->field_begin(), re = right->field_end();
+ for (; li != le && ri != re; ++li, ++ri) {
+ if (!matchTypes(Context, strategy, li->getType(), ri->getType()))
+ return false;
+ }
+ return (li == le && ri == re);
+}
+
+/// MatchTwoMethodDeclarations - Checks that two methods have matching type and
+/// returns true, or false, accordingly.
+/// TODO: Handle protocol list; such as id<p1,p2> in type comparisons
+bool Sema::MatchTwoMethodDeclarations(const ObjCMethodDecl *left,
+ const ObjCMethodDecl *right,
+ MethodMatchStrategy strategy) {
+ if (!matchTypes(Context, strategy, left->getReturnType(),
+ right->getReturnType()))
+ return false;
+
+ // If either is hidden, it is not considered to match.
+ if (left->isHidden() || right->isHidden())
+ return false;
+
+ if (getLangOpts().ObjCAutoRefCount &&
+ (left->hasAttr<NSReturnsRetainedAttr>()
+ != right->hasAttr<NSReturnsRetainedAttr>() ||
+ left->hasAttr<NSConsumesSelfAttr>()
+ != right->hasAttr<NSConsumesSelfAttr>()))
+ return false;
+
+ ObjCMethodDecl::param_const_iterator
+ li = left->param_begin(), le = left->param_end(), ri = right->param_begin(),
+ re = right->param_end();
+
+ for (; li != le && ri != re; ++li, ++ri) {
+ assert(ri != right->param_end() && "Param mismatch");
+ const ParmVarDecl *lparm = *li, *rparm = *ri;
+
+ if (!matchTypes(Context, strategy, lparm->getType(), rparm->getType()))
+ return false;
+
+ if (getLangOpts().ObjCAutoRefCount &&
+ lparm->hasAttr<NSConsumedAttr>() != rparm->hasAttr<NSConsumedAttr>())
+ return false;
+ }
+ return true;
+}
+
+void Sema::addMethodToGlobalList(ObjCMethodList *List,
+ ObjCMethodDecl *Method) {
+ // Record at the head of the list whether there were 0, 1, or >= 2 methods
+ // inside categories.
+ if (ObjCCategoryDecl *CD =
+ dyn_cast<ObjCCategoryDecl>(Method->getDeclContext()))
+ if (!CD->IsClassExtension() && List->getBits() < 2)
+ List->setBits(List->getBits() + 1);
+
+ // If the list is empty, make it a singleton list.
+ if (List->getMethod() == nullptr) {
+ List->setMethod(Method);
+ List->setNext(nullptr);
+ return;
+ }
+
+ // We've seen a method with this name, see if we have already seen this type
+ // signature.
+ ObjCMethodList *Previous = List;
+ for (; List; Previous = List, List = List->getNext()) {
+ // If we are building a module, keep all of the methods.
+ if (getLangOpts().Modules && !getLangOpts().CurrentModule.empty())
+ continue;
+
+ if (!MatchTwoMethodDeclarations(Method, List->getMethod())) {
+ // Even if two method types do not match, we would like to say
+ // there is more than one declaration so unavailability/deprecated
+ // warning is not too noisy.
+ if (!Method->isDefined())
+ List->setHasMoreThanOneDecl(true);
+ continue;
+ }
+
+ ObjCMethodDecl *PrevObjCMethod = List->getMethod();
+
+ // Propagate the 'defined' bit.
+ if (Method->isDefined())
+ PrevObjCMethod->setDefined(true);
+ else {
+ // Objective-C doesn't allow an @interface for a class after its
+ // @implementation. So if Method is not defined and there already is
+ // an entry for this type signature, Method has to be for a different
+ // class than PrevObjCMethod.
+ List->setHasMoreThanOneDecl(true);
+ }
+
+ // If a method is deprecated, push it in the global pool.
+ // This is used for better diagnostics.
+ if (Method->isDeprecated()) {
+ if (!PrevObjCMethod->isDeprecated())
+ List->setMethod(Method);
+ }
+ // If the new method is unavailable, push it into global pool
+ // unless previous one is deprecated.
+ if (Method->isUnavailable()) {
+ if (PrevObjCMethod->getAvailability() < AR_Deprecated)
+ List->setMethod(Method);
+ }
+
+ return;
+ }
+
+ // We have a new signature for an existing method - add it.
+ // This is extremely rare. Only 1% of Cocoa selectors are "overloaded".
+ ObjCMethodList *Mem = BumpAlloc.Allocate<ObjCMethodList>();
+ Previous->setNext(new (Mem) ObjCMethodList(Method));
+}
+
+/// \brief Read the contents of the method pool for a given selector from
+/// external storage.
+void Sema::ReadMethodPool(Selector Sel) {
+ assert(ExternalSource && "We need an external AST source");
+ ExternalSource->ReadMethodPool(Sel);
+}
+
+void Sema::AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl,
+ bool instance) {
+ // Ignore methods of invalid containers.
+ if (cast<Decl>(Method->getDeclContext())->isInvalidDecl())
+ return;
+
+ if (ExternalSource)
+ ReadMethodPool(Method->getSelector());
+
+ GlobalMethodPool::iterator Pos = MethodPool.find(Method->getSelector());
+ if (Pos == MethodPool.end())
+ Pos = MethodPool.insert(std::make_pair(Method->getSelector(),
+ GlobalMethods())).first;
+
+ Method->setDefined(impl);
+
+ ObjCMethodList &Entry = instance ? Pos->second.first : Pos->second.second;
+ addMethodToGlobalList(&Entry, Method);
+}
+
+/// Determines if this is an "acceptable" loose mismatch in the global
+/// method pool. This exists mostly as a hack to get around certain
+/// global mismatches which we can't afford to make warnings / errors.
+/// Really, what we want is a way to take a method out of the global
+/// method pool.
+static bool isAcceptableMethodMismatch(ObjCMethodDecl *chosen,
+ ObjCMethodDecl *other) {
+ if (!chosen->isInstanceMethod())
+ return false;
+
+ Selector sel = chosen->getSelector();
+ if (!sel.isUnarySelector() || sel.getNameForSlot(0) != "length")
+ return false;
+
+ // Don't complain about mismatches for -length if the method we
+ // chose has an integral result type.
+ return (chosen->getReturnType()->isIntegerType());
+}
+
+bool Sema::CollectMultipleMethodsInGlobalPool(
+ Selector Sel, SmallVectorImpl<ObjCMethodDecl *> &Methods, bool instance) {
+ if (ExternalSource)
+ ReadMethodPool(Sel);
+
+ GlobalMethodPool::iterator Pos = MethodPool.find(Sel);
+ if (Pos == MethodPool.end())
+ return false;
+ // Gather the non-hidden methods.
+ ObjCMethodList &MethList = instance ? Pos->second.first : Pos->second.second;
+ for (ObjCMethodList *M = &MethList; M; M = M->getNext())
+ if (M->getMethod() && !M->getMethod()->isHidden())
+ Methods.push_back(M->getMethod());
+ return Methods.size() > 1;
+}
+
+bool Sema::AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
+ SourceRange R,
+ bool receiverIdOrClass) {
+ GlobalMethodPool::iterator Pos = MethodPool.find(Sel);
+ // Test for no method in the pool which should not trigger any warning by
+ // caller.
+ if (Pos == MethodPool.end())
+ return true;
+ ObjCMethodList &MethList =
+ BestMethod->isInstanceMethod() ? Pos->second.first : Pos->second.second;
+
+ // Diagnose finding more than one method in global pool
+ SmallVector<ObjCMethodDecl *, 4> Methods;
+ Methods.push_back(BestMethod);
+ for (ObjCMethodList *ML = &MethList; ML; ML = ML->getNext())
+ if (ObjCMethodDecl *M = ML->getMethod())
+ if (!M->isHidden() && M != BestMethod && !M->hasAttr<UnavailableAttr>())
+ Methods.push_back(M);
+ if (Methods.size() > 1)
+ DiagnoseMultipleMethodInGlobalPool(Methods, Sel, R, receiverIdOrClass);
+
+ return MethList.hasMoreThanOneDecl();
+}
+
+ObjCMethodDecl *Sema::LookupMethodInGlobalPool(Selector Sel, SourceRange R,
+ bool receiverIdOrClass,
+ bool instance) {
+ if (ExternalSource)
+ ReadMethodPool(Sel);
+
+ GlobalMethodPool::iterator Pos = MethodPool.find(Sel);
+ if (Pos == MethodPool.end())
+ return nullptr;
+
+ // Gather the non-hidden methods.
+ ObjCMethodList &MethList = instance ? Pos->second.first : Pos->second.second;
+ SmallVector<ObjCMethodDecl *, 4> Methods;
+ for (ObjCMethodList *M = &MethList; M; M = M->getNext()) {
+ if (M->getMethod() && !M->getMethod()->isHidden())
+ return M->getMethod();
+ }
+ return nullptr;
+}
+
+void Sema::DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
+ Selector Sel, SourceRange R,
+ bool receiverIdOrClass) {
+ // We found multiple methods, so we may have to complain.
+ bool issueDiagnostic = false, issueError = false;
+
+ // We support a warning which complains about *any* difference in
+ // method signature.
+ bool strictSelectorMatch =
+ receiverIdOrClass &&
+ !Diags.isIgnored(diag::warn_strict_multiple_method_decl, R.getBegin());
+ if (strictSelectorMatch) {
+ for (unsigned I = 1, N = Methods.size(); I != N; ++I) {
+ if (!MatchTwoMethodDeclarations(Methods[0], Methods[I], MMS_strict)) {
+ issueDiagnostic = true;
+ break;
+ }
+ }
+ }
+
+ // If we didn't see any strict differences, we won't see any loose
+ // differences. In ARC, however, we also need to check for loose
+ // mismatches, because most of them are errors.
+ if (!strictSelectorMatch ||
+ (issueDiagnostic && getLangOpts().ObjCAutoRefCount))
+ for (unsigned I = 1, N = Methods.size(); I != N; ++I) {
+ // This checks if the methods differ in type mismatch.
+ if (!MatchTwoMethodDeclarations(Methods[0], Methods[I], MMS_loose) &&
+ !isAcceptableMethodMismatch(Methods[0], Methods[I])) {
+ issueDiagnostic = true;
+ if (getLangOpts().ObjCAutoRefCount)
+ issueError = true;
+ break;
+ }
+ }
+
+ if (issueDiagnostic) {
+ if (issueError)
+ Diag(R.getBegin(), diag::err_arc_multiple_method_decl) << Sel << R;
+ else if (strictSelectorMatch)
+ Diag(R.getBegin(), diag::warn_strict_multiple_method_decl) << Sel << R;
+ else
+ Diag(R.getBegin(), diag::warn_multiple_method_decl) << Sel << R;
+
+ Diag(Methods[0]->getLocStart(),
+ issueError ? diag::note_possibility : diag::note_using)
+ << Methods[0]->getSourceRange();
+ for (unsigned I = 1, N = Methods.size(); I != N; ++I) {
+ Diag(Methods[I]->getLocStart(), diag::note_also_found)
+ << Methods[I]->getSourceRange();
+ }
+ }
+}
+
+ObjCMethodDecl *Sema::LookupImplementedMethodInGlobalPool(Selector Sel) {
+ GlobalMethodPool::iterator Pos = MethodPool.find(Sel);
+ if (Pos == MethodPool.end())
+ return nullptr;
+
+ GlobalMethods &Methods = Pos->second;
+ for (const ObjCMethodList *Method = &Methods.first; Method;
+ Method = Method->getNext())
+ if (Method->getMethod() &&
+ (Method->getMethod()->isDefined() ||
+ Method->getMethod()->isPropertyAccessor()))
+ return Method->getMethod();
+
+ for (const ObjCMethodList *Method = &Methods.second; Method;
+ Method = Method->getNext())
+ if (Method->getMethod() &&
+ (Method->getMethod()->isDefined() ||
+ Method->getMethod()->isPropertyAccessor()))
+ return Method->getMethod();
+ return nullptr;
+}
+
+static void
+HelperSelectorsForTypoCorrection(
+ SmallVectorImpl<const ObjCMethodDecl *> &BestMethod,
+ StringRef Typo, const ObjCMethodDecl * Method) {
+ const unsigned MaxEditDistance = 1;
+ unsigned BestEditDistance = MaxEditDistance + 1;
+ std::string MethodName = Method->getSelector().getAsString();
+
+ unsigned MinPossibleEditDistance = abs((int)MethodName.size() - (int)Typo.size());
+ if (MinPossibleEditDistance > 0 &&
+ Typo.size() / MinPossibleEditDistance < 1)
+ return;
+ unsigned EditDistance = Typo.edit_distance(MethodName, true, MaxEditDistance);
+ if (EditDistance > MaxEditDistance)
+ return;
+ if (EditDistance == BestEditDistance)
+ BestMethod.push_back(Method);
+ else if (EditDistance < BestEditDistance) {
+ BestMethod.clear();
+ BestMethod.push_back(Method);
+ }
+}
+
+static bool HelperIsMethodInObjCType(Sema &S, Selector Sel,
+ QualType ObjectType) {
+ if (ObjectType.isNull())
+ return true;
+ if (S.LookupMethodInObjectType(Sel, ObjectType, true/*Instance method*/))
+ return true;
+ return S.LookupMethodInObjectType(Sel, ObjectType, false/*Class method*/) !=
+ nullptr;
+}
+
+const ObjCMethodDecl *
+Sema::SelectorsForTypoCorrection(Selector Sel,
+ QualType ObjectType) {
+ unsigned NumArgs = Sel.getNumArgs();
+ SmallVector<const ObjCMethodDecl *, 8> Methods;
+ bool ObjectIsId = true, ObjectIsClass = true;
+ if (ObjectType.isNull())
+ ObjectIsId = ObjectIsClass = false;
+ else if (!ObjectType->isObjCObjectPointerType())
+ return nullptr;
+ else if (const ObjCObjectPointerType *ObjCPtr =
+ ObjectType->getAsObjCInterfacePointerType()) {
+ ObjectType = QualType(ObjCPtr->getInterfaceType(), 0);
+ ObjectIsId = ObjectIsClass = false;
+ }
+ else if (ObjectType->isObjCIdType() || ObjectType->isObjCQualifiedIdType())
+ ObjectIsClass = false;
+ else if (ObjectType->isObjCClassType() || ObjectType->isObjCQualifiedClassType())
+ ObjectIsId = false;
+ else
+ return nullptr;
+
+ for (GlobalMethodPool::iterator b = MethodPool.begin(),
+ e = MethodPool.end(); b != e; b++) {
+ // instance methods
+ for (ObjCMethodList *M = &b->second.first; M; M=M->getNext())
+ if (M->getMethod() &&
+ (M->getMethod()->getSelector().getNumArgs() == NumArgs) &&
+ (M->getMethod()->getSelector() != Sel)) {
+ if (ObjectIsId)
+ Methods.push_back(M->getMethod());
+ else if (!ObjectIsClass &&
+ HelperIsMethodInObjCType(*this, M->getMethod()->getSelector(),
+ ObjectType))
+ Methods.push_back(M->getMethod());
+ }
+ // class methods
+ for (ObjCMethodList *M = &b->second.second; M; M=M->getNext())
+ if (M->getMethod() &&
+ (M->getMethod()->getSelector().getNumArgs() == NumArgs) &&
+ (M->getMethod()->getSelector() != Sel)) {
+ if (ObjectIsClass)
+ Methods.push_back(M->getMethod());
+ else if (!ObjectIsId &&
+ HelperIsMethodInObjCType(*this, M->getMethod()->getSelector(),
+ ObjectType))
+ Methods.push_back(M->getMethod());
+ }
+ }
+
+ SmallVector<const ObjCMethodDecl *, 8> SelectedMethods;
+ for (unsigned i = 0, e = Methods.size(); i < e; i++) {
+ HelperSelectorsForTypoCorrection(SelectedMethods,
+ Sel.getAsString(), Methods[i]);
+ }
+ return (SelectedMethods.size() == 1) ? SelectedMethods[0] : nullptr;
+}
+
+/// DiagnoseDuplicateIvars -
+/// Check for duplicate ivars in the entire class at the start of
+/// \@implementation. This becomes necesssary because class extension can
+/// add ivars to a class in random order which will not be known until
+/// class's \@implementation is seen.
+void Sema::DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID,
+ ObjCInterfaceDecl *SID) {
+ for (auto *Ivar : ID->ivars()) {
+ if (Ivar->isInvalidDecl())
+ continue;
+ if (IdentifierInfo *II = Ivar->getIdentifier()) {
+ ObjCIvarDecl* prevIvar = SID->lookupInstanceVariable(II);
+ if (prevIvar) {
+ Diag(Ivar->getLocation(), diag::err_duplicate_member) << II;
+ Diag(prevIvar->getLocation(), diag::note_previous_declaration);
+ Ivar->setInvalidDecl();
+ }
+ }
+ }
+}
+
+/// Diagnose attempts to define ARC-__weak ivars when __weak is disabled.
+static void DiagnoseWeakIvars(Sema &S, ObjCImplementationDecl *ID) {
+ if (S.getLangOpts().ObjCWeak) return;
+
+ for (auto ivar = ID->getClassInterface()->all_declared_ivar_begin();
+ ivar; ivar = ivar->getNextIvar()) {
+ if (ivar->isInvalidDecl()) continue;
+ if (ivar->getType().getObjCLifetime() == Qualifiers::OCL_Weak) {
+ if (S.getLangOpts().ObjCWeakRuntime) {
+ S.Diag(ivar->getLocation(), diag::err_arc_weak_disabled);
+ } else {
+ S.Diag(ivar->getLocation(), diag::err_arc_weak_no_runtime);
+ }
+ }
+ }
+}
+
+Sema::ObjCContainerKind Sema::getObjCContainerKind() const {
+ switch (CurContext->getDeclKind()) {
+ case Decl::ObjCInterface:
+ return Sema::OCK_Interface;
+ case Decl::ObjCProtocol:
+ return Sema::OCK_Protocol;
+ case Decl::ObjCCategory:
+ if (cast<ObjCCategoryDecl>(CurContext)->IsClassExtension())
+ return Sema::OCK_ClassExtension;
+ return Sema::OCK_Category;
+ case Decl::ObjCImplementation:
+ return Sema::OCK_Implementation;
+ case Decl::ObjCCategoryImpl:
+ return Sema::OCK_CategoryImplementation;
+
+ default:
+ return Sema::OCK_None;
+ }
+}
+
+// Note: For class/category implementations, allMethods is always null.
+Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods,
+ ArrayRef<DeclGroupPtrTy> allTUVars) {
+ if (getObjCContainerKind() == Sema::OCK_None)
+ return nullptr;
+
+ assert(AtEnd.isValid() && "Invalid location for '@end'");
+
+ ObjCContainerDecl *OCD = dyn_cast<ObjCContainerDecl>(CurContext);
+ Decl *ClassDecl = cast<Decl>(OCD);
+
+ bool isInterfaceDeclKind =
+ isa<ObjCInterfaceDecl>(ClassDecl) || isa<ObjCCategoryDecl>(ClassDecl)
+ || isa<ObjCProtocolDecl>(ClassDecl);
+ bool checkIdenticalMethods = isa<ObjCImplementationDecl>(ClassDecl);
+
+ // FIXME: Remove these and use the ObjCContainerDecl/DeclContext.
+ llvm::DenseMap<Selector, const ObjCMethodDecl*> InsMap;
+ llvm::DenseMap<Selector, const ObjCMethodDecl*> ClsMap;
+
+ for (unsigned i = 0, e = allMethods.size(); i != e; i++ ) {
+ ObjCMethodDecl *Method =
+ cast_or_null<ObjCMethodDecl>(allMethods[i]);
+
+ if (!Method) continue; // Already issued a diagnostic.
+ if (Method->isInstanceMethod()) {
+ /// Check for instance method of the same name with incompatible types
+ const ObjCMethodDecl *&PrevMethod = InsMap[Method->getSelector()];
+ bool match = PrevMethod ? MatchTwoMethodDeclarations(Method, PrevMethod)
+ : false;
+ if ((isInterfaceDeclKind && PrevMethod && !match)
+ || (checkIdenticalMethods && match)) {
+ Diag(Method->getLocation(), diag::err_duplicate_method_decl)
+ << Method->getDeclName();
+ Diag(PrevMethod->getLocation(), diag::note_previous_declaration);
+ Method->setInvalidDecl();
+ } else {
+ if (PrevMethod) {
+ Method->setAsRedeclaration(PrevMethod);
+ if (!Context.getSourceManager().isInSystemHeader(
+ Method->getLocation()))
+ Diag(Method->getLocation(), diag::warn_duplicate_method_decl)
+ << Method->getDeclName();
+ Diag(PrevMethod->getLocation(), diag::note_previous_declaration);
+ }
+ InsMap[Method->getSelector()] = Method;
+ /// The following allows us to typecheck messages to "id".
+ AddInstanceMethodToGlobalPool(Method);
+ }
+ } else {
+ /// Check for class method of the same name with incompatible types
+ const ObjCMethodDecl *&PrevMethod = ClsMap[Method->getSelector()];
+ bool match = PrevMethod ? MatchTwoMethodDeclarations(Method, PrevMethod)
+ : false;
+ if ((isInterfaceDeclKind && PrevMethod && !match)
+ || (checkIdenticalMethods && match)) {
+ Diag(Method->getLocation(), diag::err_duplicate_method_decl)
+ << Method->getDeclName();
+ Diag(PrevMethod->getLocation(), diag::note_previous_declaration);
+ Method->setInvalidDecl();
+ } else {
+ if (PrevMethod) {
+ Method->setAsRedeclaration(PrevMethod);
+ if (!Context.getSourceManager().isInSystemHeader(
+ Method->getLocation()))
+ Diag(Method->getLocation(), diag::warn_duplicate_method_decl)
+ << Method->getDeclName();
+ Diag(PrevMethod->getLocation(), diag::note_previous_declaration);
+ }
+ ClsMap[Method->getSelector()] = Method;
+ AddFactoryMethodToGlobalPool(Method);
+ }
+ }
+ }
+ if (isa<ObjCInterfaceDecl>(ClassDecl)) {
+ // Nothing to do here.
+ } else if (ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(ClassDecl)) {
+ // Categories are used to extend the class by declaring new methods.
+ // By the same token, they are also used to add new properties. No
+ // need to compare the added property to those in the class.
+
+ if (C->IsClassExtension()) {
+ ObjCInterfaceDecl *CCPrimary = C->getClassInterface();
+ DiagnoseClassExtensionDupMethods(C, CCPrimary);
+ }
+ }
+ if (ObjCContainerDecl *CDecl = dyn_cast<ObjCContainerDecl>(ClassDecl)) {
+ if (CDecl->getIdentifier())
+ // ProcessPropertyDecl is responsible for diagnosing conflicts with any
+ // user-defined setter/getter. It also synthesizes setter/getter methods
+ // and adds them to the DeclContext and global method pools.
+ for (auto *I : CDecl->properties())
+ ProcessPropertyDecl(I);
+ CDecl->setAtEndRange(AtEnd);
+ }
+ if (ObjCImplementationDecl *IC=dyn_cast<ObjCImplementationDecl>(ClassDecl)) {
+ IC->setAtEndRange(AtEnd);
+ if (ObjCInterfaceDecl* IDecl = IC->getClassInterface()) {
+ // Any property declared in a class extension might have user
+ // declared setter or getter in current class extension or one
+ // of the other class extensions. Mark them as synthesized as
+ // property will be synthesized when property with same name is
+ // seen in the @implementation.
+ for (const auto *Ext : IDecl->visible_extensions()) {
+ for (const auto *Property : Ext->properties()) {
+ // Skip over properties declared @dynamic
+ if (const ObjCPropertyImplDecl *PIDecl
+ = IC->FindPropertyImplDecl(Property->getIdentifier()))
+ if (PIDecl->getPropertyImplementation()
+ == ObjCPropertyImplDecl::Dynamic)
+ continue;
+
+ for (const auto *Ext : IDecl->visible_extensions()) {
+ if (ObjCMethodDecl *GetterMethod
+ = Ext->getInstanceMethod(Property->getGetterName()))
+ GetterMethod->setPropertyAccessor(true);
+ if (!Property->isReadOnly())
+ if (ObjCMethodDecl *SetterMethod
+ = Ext->getInstanceMethod(Property->getSetterName()))
+ SetterMethod->setPropertyAccessor(true);
+ }
+ }
+ }
+ ImplMethodsVsClassMethods(S, IC, IDecl);
+ AtomicPropertySetterGetterRules(IC, IDecl);
+ DiagnoseOwningPropertyGetterSynthesis(IC);
+ DiagnoseUnusedBackingIvarInAccessor(S, IC);
+ if (IDecl->hasDesignatedInitializers())
+ DiagnoseMissingDesignatedInitOverrides(IC, IDecl);
+ DiagnoseWeakIvars(*this, IC);
+
+ bool HasRootClassAttr = IDecl->hasAttr<ObjCRootClassAttr>();
+ if (IDecl->getSuperClass() == nullptr) {
+ // This class has no superclass, so check that it has been marked with
+ // __attribute((objc_root_class)).
+ if (!HasRootClassAttr) {
+ SourceLocation DeclLoc(IDecl->getLocation());
+ SourceLocation SuperClassLoc(getLocForEndOfToken(DeclLoc));
+ Diag(DeclLoc, diag::warn_objc_root_class_missing)
+ << IDecl->getIdentifier();
+ // See if NSObject is in the current scope, and if it is, suggest
+ // adding " : NSObject " to the class declaration.
+ NamedDecl *IF = LookupSingleName(TUScope,
+ NSAPIObj->getNSClassId(NSAPI::ClassId_NSObject),
+ DeclLoc, LookupOrdinaryName);
+ ObjCInterfaceDecl *NSObjectDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF);
+ if (NSObjectDecl && NSObjectDecl->getDefinition()) {
+ Diag(SuperClassLoc, diag::note_objc_needs_superclass)
+ << FixItHint::CreateInsertion(SuperClassLoc, " : NSObject ");
+ } else {
+ Diag(SuperClassLoc, diag::note_objc_needs_superclass);
+ }
+ }
+ } else if (HasRootClassAttr) {
+ // Complain that only root classes may have this attribute.
+ Diag(IDecl->getLocation(), diag::err_objc_root_class_subclass);
+ }
+
+ if (LangOpts.ObjCRuntime.isNonFragile()) {
+ while (IDecl->getSuperClass()) {
+ DiagnoseDuplicateIvars(IDecl, IDecl->getSuperClass());
+ IDecl = IDecl->getSuperClass();
+ }
+ }
+ }
+ SetIvarInitializers(IC);
+ } else if (ObjCCategoryImplDecl* CatImplClass =
+ dyn_cast<ObjCCategoryImplDecl>(ClassDecl)) {
+ CatImplClass->setAtEndRange(AtEnd);
+
+ // Find category interface decl and then check that all methods declared
+ // in this interface are implemented in the category @implementation.
+ if (ObjCInterfaceDecl* IDecl = CatImplClass->getClassInterface()) {
+ if (ObjCCategoryDecl *Cat
+ = IDecl->FindCategoryDeclaration(CatImplClass->getIdentifier())) {
+ ImplMethodsVsClassMethods(S, CatImplClass, Cat);
+ }
+ }
+ }
+ if (isInterfaceDeclKind) {
+ // Reject invalid vardecls.
+ for (unsigned i = 0, e = allTUVars.size(); i != e; i++) {
+ DeclGroupRef DG = allTUVars[i].get();
+ for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I)
+ if (VarDecl *VDecl = dyn_cast<VarDecl>(*I)) {
+ if (!VDecl->hasExternalStorage())
+ Diag(VDecl->getLocation(), diag::err_objc_var_decl_inclass);
+ }
+ }
+ }
+ ActOnObjCContainerFinishDefinition();
+
+ for (unsigned i = 0, e = allTUVars.size(); i != e; i++) {
+ DeclGroupRef DG = allTUVars[i].get();
+ for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I)
+ (*I)->setTopLevelDeclInObjCContainer();
+ Consumer.HandleTopLevelDeclInObjCContainer(DG);
+ }
+
+ ActOnDocumentableDecl(ClassDecl);
+ return ClassDecl;
+}
+
+/// CvtQTToAstBitMask - utility routine to produce an AST bitmask for
+/// objective-c's type qualifier from the parser version of the same info.
+static Decl::ObjCDeclQualifier
+CvtQTToAstBitMask(ObjCDeclSpec::ObjCDeclQualifier PQTVal) {
+ return (Decl::ObjCDeclQualifier) (unsigned) PQTVal;
+}
+
+/// \brief Check whether the declared result type of the given Objective-C
+/// method declaration is compatible with the method's class.
+///
+static Sema::ResultTypeCompatibilityKind
+CheckRelatedResultTypeCompatibility(Sema &S, ObjCMethodDecl *Method,
+ ObjCInterfaceDecl *CurrentClass) {
+ QualType ResultType = Method->getReturnType();
+
+ // If an Objective-C method inherits its related result type, then its
+ // declared result type must be compatible with its own class type. The
+ // declared result type is compatible if:
+ if (const ObjCObjectPointerType *ResultObjectType
+ = ResultType->getAs<ObjCObjectPointerType>()) {
+ // - it is id or qualified id, or
+ if (ResultObjectType->isObjCIdType() ||
+ ResultObjectType->isObjCQualifiedIdType())
+ return Sema::RTC_Compatible;
+
+ if (CurrentClass) {
+ if (ObjCInterfaceDecl *ResultClass
+ = ResultObjectType->getInterfaceDecl()) {
+ // - it is the same as the method's class type, or
+ if (declaresSameEntity(CurrentClass, ResultClass))
+ return Sema::RTC_Compatible;
+
+ // - it is a superclass of the method's class type
+ if (ResultClass->isSuperClassOf(CurrentClass))
+ return Sema::RTC_Compatible;
+ }
+ } else {
+ // Any Objective-C pointer type might be acceptable for a protocol
+ // method; we just don't know.
+ return Sema::RTC_Unknown;
+ }
+ }
+
+ return Sema::RTC_Incompatible;
+}
+
+namespace {
+/// A helper class for searching for methods which a particular method
+/// overrides.
+class OverrideSearch {
+public:
+ Sema &S;
+ ObjCMethodDecl *Method;
+ llvm::SmallPtrSet<ObjCMethodDecl*, 4> Overridden;
+ bool Recursive;
+
+public:
+ OverrideSearch(Sema &S, ObjCMethodDecl *method) : S(S), Method(method) {
+ Selector selector = method->getSelector();
+
+ // Bypass this search if we've never seen an instance/class method
+ // with this selector before.
+ Sema::GlobalMethodPool::iterator it = S.MethodPool.find(selector);
+ if (it == S.MethodPool.end()) {
+ if (!S.getExternalSource()) return;
+ S.ReadMethodPool(selector);
+
+ it = S.MethodPool.find(selector);
+ if (it == S.MethodPool.end())
+ return;
+ }
+ ObjCMethodList &list =
+ method->isInstanceMethod() ? it->second.first : it->second.second;
+ if (!list.getMethod()) return;
+
+ ObjCContainerDecl *container
+ = cast<ObjCContainerDecl>(method->getDeclContext());
+
+ // Prevent the search from reaching this container again. This is
+ // important with categories, which override methods from the
+ // interface and each other.
+ if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(container)) {
+ searchFromContainer(container);
+ if (ObjCInterfaceDecl *Interface = Category->getClassInterface())
+ searchFromContainer(Interface);
+ } else {
+ searchFromContainer(container);
+ }
+ }
+
+ typedef llvm::SmallPtrSet<ObjCMethodDecl*, 128>::iterator iterator;
+ iterator begin() const { return Overridden.begin(); }
+ iterator end() const { return Overridden.end(); }
+
+private:
+ void searchFromContainer(ObjCContainerDecl *container) {
+ if (container->isInvalidDecl()) return;
+
+ switch (container->getDeclKind()) {
+#define OBJCCONTAINER(type, base) \
+ case Decl::type: \
+ searchFrom(cast<type##Decl>(container)); \
+ break;
+#define ABSTRACT_DECL(expansion)
+#define DECL(type, base) \
+ case Decl::type:
+#include "clang/AST/DeclNodes.inc"
+ llvm_unreachable("not an ObjC container!");
+ }
+ }
+
+ void searchFrom(ObjCProtocolDecl *protocol) {
+ if (!protocol->hasDefinition())
+ return;
+
+ // A method in a protocol declaration overrides declarations from
+ // referenced ("parent") protocols.
+ search(protocol->getReferencedProtocols());
+ }
+
+ void searchFrom(ObjCCategoryDecl *category) {
+ // A method in a category declaration overrides declarations from
+ // the main class and from protocols the category references.
+ // The main class is handled in the constructor.
+ search(category->getReferencedProtocols());
+ }
+
+ void searchFrom(ObjCCategoryImplDecl *impl) {
+ // A method in a category definition that has a category
+ // declaration overrides declarations from the category
+ // declaration.
+ if (ObjCCategoryDecl *category = impl->getCategoryDecl()) {
+ search(category);
+ if (ObjCInterfaceDecl *Interface = category->getClassInterface())
+ search(Interface);
+
+ // Otherwise it overrides declarations from the class.
+ } else if (ObjCInterfaceDecl *Interface = impl->getClassInterface()) {
+ search(Interface);
+ }
+ }
+
+ void searchFrom(ObjCInterfaceDecl *iface) {
+ // A method in a class declaration overrides declarations from
+ if (!iface->hasDefinition())
+ return;
+
+ // - categories,
+ for (auto *Cat : iface->known_categories())
+ search(Cat);
+
+ // - the super class, and
+ if (ObjCInterfaceDecl *super = iface->getSuperClass())
+ search(super);
+
+ // - any referenced protocols.
+ search(iface->getReferencedProtocols());
+ }
+
+ void searchFrom(ObjCImplementationDecl *impl) {
+ // A method in a class implementation overrides declarations from
+ // the class interface.
+ if (ObjCInterfaceDecl *Interface = impl->getClassInterface())
+ search(Interface);
+ }
+
+ void search(const ObjCProtocolList &protocols) {
+ for (ObjCProtocolList::iterator i = protocols.begin(), e = protocols.end();
+ i != e; ++i)
+ search(*i);
+ }
+
+ void search(ObjCContainerDecl *container) {
+ // Check for a method in this container which matches this selector.
+ ObjCMethodDecl *meth = container->getMethod(Method->getSelector(),
+ Method->isInstanceMethod(),
+ /*AllowHidden=*/true);
+
+ // If we find one, record it and bail out.
+ if (meth) {
+ Overridden.insert(meth);
+ return;
+ }
+
+ // Otherwise, search for methods that a hypothetical method here
+ // would have overridden.
+
+ // Note that we're now in a recursive case.
+ Recursive = true;
+
+ searchFromContainer(container);
+ }
+};
+} // end anonymous namespace
+
+void Sema::CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
+ ObjCInterfaceDecl *CurrentClass,
+ ResultTypeCompatibilityKind RTC) {
+ // Search for overridden methods and merge information down from them.
+ OverrideSearch overrides(*this, ObjCMethod);
+ // Keep track if the method overrides any method in the class's base classes,
+ // its protocols, or its categories' protocols; we will keep that info
+ // in the ObjCMethodDecl.
+ // For this info, a method in an implementation is not considered as
+ // overriding the same method in the interface or its categories.
+ bool hasOverriddenMethodsInBaseOrProtocol = false;
+ for (OverrideSearch::iterator
+ i = overrides.begin(), e = overrides.end(); i != e; ++i) {
+ ObjCMethodDecl *overridden = *i;
+
+ if (!hasOverriddenMethodsInBaseOrProtocol) {
+ if (isa<ObjCProtocolDecl>(overridden->getDeclContext()) ||
+ CurrentClass != overridden->getClassInterface() ||
+ overridden->isOverriding()) {
+ hasOverriddenMethodsInBaseOrProtocol = true;
+
+ } else if (isa<ObjCImplDecl>(ObjCMethod->getDeclContext())) {
+ // OverrideSearch will return as "overridden" the same method in the
+ // interface. For hasOverriddenMethodsInBaseOrProtocol, we need to
+ // check whether a category of a base class introduced a method with the
+ // same selector, after the interface method declaration.
+ // To avoid unnecessary lookups in the majority of cases, we use the
+ // extra info bits in GlobalMethodPool to check whether there were any
+ // category methods with this selector.
+ GlobalMethodPool::iterator It =
+ MethodPool.find(ObjCMethod->getSelector());
+ if (It != MethodPool.end()) {
+ ObjCMethodList &List =
+ ObjCMethod->isInstanceMethod()? It->second.first: It->second.second;
+ unsigned CategCount = List.getBits();
+ if (CategCount > 0) {
+ // If the method is in a category we'll do lookup if there were at
+ // least 2 category methods recorded, otherwise only one will do.
+ if (CategCount > 1 ||
+ !isa<ObjCCategoryImplDecl>(overridden->getDeclContext())) {
+ OverrideSearch overrides(*this, overridden);
+ for (OverrideSearch::iterator
+ OI= overrides.begin(), OE= overrides.end(); OI!=OE; ++OI) {
+ ObjCMethodDecl *SuperOverridden = *OI;
+ if (isa<ObjCProtocolDecl>(SuperOverridden->getDeclContext()) ||
+ CurrentClass != SuperOverridden->getClassInterface()) {
+ hasOverriddenMethodsInBaseOrProtocol = true;
+ overridden->setOverriding(true);
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Propagate down the 'related result type' bit from overridden methods.
+ if (RTC != Sema::RTC_Incompatible && overridden->hasRelatedResultType())
+ ObjCMethod->SetRelatedResultType();
+
+ // Then merge the declarations.
+ mergeObjCMethodDecls(ObjCMethod, overridden);
+
+ if (ObjCMethod->isImplicit() && overridden->isImplicit())
+ continue; // Conflicting properties are detected elsewhere.
+
+ // Check for overriding methods
+ if (isa<ObjCInterfaceDecl>(ObjCMethod->getDeclContext()) ||
+ isa<ObjCImplementationDecl>(ObjCMethod->getDeclContext()))
+ CheckConflictingOverridingMethod(ObjCMethod, overridden,
+ isa<ObjCProtocolDecl>(overridden->getDeclContext()));
+
+ if (CurrentClass && overridden->getDeclContext() != CurrentClass &&
+ isa<ObjCInterfaceDecl>(overridden->getDeclContext()) &&
+ !overridden->isImplicit() /* not meant for properties */) {
+ ObjCMethodDecl::param_iterator ParamI = ObjCMethod->param_begin(),
+ E = ObjCMethod->param_end();
+ ObjCMethodDecl::param_iterator PrevI = overridden->param_begin(),
+ PrevE = overridden->param_end();
+ for (; ParamI != E && PrevI != PrevE; ++ParamI, ++PrevI) {
+ assert(PrevI != overridden->param_end() && "Param mismatch");
+ QualType T1 = Context.getCanonicalType((*ParamI)->getType());
+ QualType T2 = Context.getCanonicalType((*PrevI)->getType());
+ // If type of argument of method in this class does not match its
+ // respective argument type in the super class method, issue warning;
+ if (!Context.typesAreCompatible(T1, T2)) {
+ Diag((*ParamI)->getLocation(), diag::ext_typecheck_base_super)
+ << T1 << T2;
+ Diag(overridden->getLocation(), diag::note_previous_declaration);
+ break;
+ }
+ }
+ }
+ }
+
+ ObjCMethod->setOverriding(hasOverriddenMethodsInBaseOrProtocol);
+}
+
+/// Merge type nullability from for a redeclaration of the same entity,
+/// producing the updated type of the redeclared entity.
+static QualType mergeTypeNullabilityForRedecl(Sema &S, SourceLocation loc,
+ QualType type,
+ bool usesCSKeyword,
+ SourceLocation prevLoc,
+ QualType prevType,
+ bool prevUsesCSKeyword) {
+ // Determine the nullability of both types.
+ auto nullability = type->getNullability(S.Context);
+ auto prevNullability = prevType->getNullability(S.Context);
+
+ // Easy case: both have nullability.
+ if (nullability.hasValue() == prevNullability.hasValue()) {
+ // Neither has nullability; continue.
+ if (!nullability)
+ return type;
+
+ // The nullabilities are equivalent; do nothing.
+ if (*nullability == *prevNullability)
+ return type;
+
+ // Complain about mismatched nullability.
+ S.Diag(loc, diag::err_nullability_conflicting)
+ << DiagNullabilityKind(*nullability, usesCSKeyword)
+ << DiagNullabilityKind(*prevNullability, prevUsesCSKeyword);
+ return type;
+ }
+
+ // If it's the redeclaration that has nullability, don't change anything.
+ if (nullability)
+ return type;
+
+ // Otherwise, provide the result with the same nullability.
+ return S.Context.getAttributedType(
+ AttributedType::getNullabilityAttrKind(*prevNullability),
+ type, type);
+}
+
+/// Merge information from the declaration of a method in the \@interface
+/// (or a category/extension) into the corresponding method in the
+/// @implementation (for a class or category).
+static void mergeInterfaceMethodToImpl(Sema &S,
+ ObjCMethodDecl *method,
+ ObjCMethodDecl *prevMethod) {
+ // Merge the objc_requires_super attribute.
+ if (prevMethod->hasAttr<ObjCRequiresSuperAttr>() &&
+ !method->hasAttr<ObjCRequiresSuperAttr>()) {
+ // merge the attribute into implementation.
+ method->addAttr(
+ ObjCRequiresSuperAttr::CreateImplicit(S.Context,
+ method->getLocation()));
+ }
+
+ // Merge nullability of the result type.
+ QualType newReturnType
+ = mergeTypeNullabilityForRedecl(
+ S, method->getReturnTypeSourceRange().getBegin(),
+ method->getReturnType(),
+ method->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability,
+ prevMethod->getReturnTypeSourceRange().getBegin(),
+ prevMethod->getReturnType(),
+ prevMethod->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability);
+ method->setReturnType(newReturnType);
+
+ // Handle each of the parameters.
+ unsigned numParams = method->param_size();
+ unsigned numPrevParams = prevMethod->param_size();
+ for (unsigned i = 0, n = std::min(numParams, numPrevParams); i != n; ++i) {
+ ParmVarDecl *param = method->param_begin()[i];
+ ParmVarDecl *prevParam = prevMethod->param_begin()[i];
+
+ // Merge nullability.
+ QualType newParamType
+ = mergeTypeNullabilityForRedecl(
+ S, param->getLocation(), param->getType(),
+ param->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability,
+ prevParam->getLocation(), prevParam->getType(),
+ prevParam->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability);
+ param->setType(newParamType);
+ }
+}
+
+Decl *Sema::ActOnMethodDeclaration(
+ Scope *S,
+ SourceLocation MethodLoc, SourceLocation EndLoc,
+ tok::TokenKind MethodType,
+ ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
+ ArrayRef<SourceLocation> SelectorLocs,
+ Selector Sel,
+ // optional arguments. The number of types/arguments is obtained
+ // from the Sel.getNumArgs().
+ ObjCArgInfo *ArgInfo,
+ DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args
+ AttributeList *AttrList, tok::ObjCKeywordKind MethodDeclKind,
+ bool isVariadic, bool MethodDefinition) {
+ // Make sure we can establish a context for the method.
+ if (!CurContext->isObjCContainer()) {
+ Diag(MethodLoc, diag::error_missing_method_context);
+ return nullptr;
+ }
+ ObjCContainerDecl *OCD = dyn_cast<ObjCContainerDecl>(CurContext);
+ Decl *ClassDecl = cast<Decl>(OCD);
+ QualType resultDeclType;
+
+ bool HasRelatedResultType = false;
+ TypeSourceInfo *ReturnTInfo = nullptr;
+ if (ReturnType) {
+ resultDeclType = GetTypeFromParser(ReturnType, &ReturnTInfo);
+
+ if (CheckFunctionReturnType(resultDeclType, MethodLoc))
+ return nullptr;
+
+ QualType bareResultType = resultDeclType;
+ (void)AttributedType::stripOuterNullability(bareResultType);
+ HasRelatedResultType = (bareResultType == Context.getObjCInstanceType());
+ } else { // get the type for "id".
+ resultDeclType = Context.getObjCIdType();
+ Diag(MethodLoc, diag::warn_missing_method_return_type)
+ << FixItHint::CreateInsertion(SelectorLocs.front(), "(id)");
+ }
+
+ ObjCMethodDecl *ObjCMethod = ObjCMethodDecl::Create(
+ Context, MethodLoc, EndLoc, Sel, resultDeclType, ReturnTInfo, CurContext,
+ MethodType == tok::minus, isVariadic,
+ /*isPropertyAccessor=*/false,
+ /*isImplicitlyDeclared=*/false, /*isDefined=*/false,
+ MethodDeclKind == tok::objc_optional ? ObjCMethodDecl::Optional
+ : ObjCMethodDecl::Required,
+ HasRelatedResultType);
+
+ SmallVector<ParmVarDecl*, 16> Params;
+
+ for (unsigned i = 0, e = Sel.getNumArgs(); i != e; ++i) {
+ QualType ArgType;
+ TypeSourceInfo *DI;
+
+ if (!ArgInfo[i].Type) {
+ ArgType = Context.getObjCIdType();
+ DI = nullptr;
+ } else {
+ ArgType = GetTypeFromParser(ArgInfo[i].Type, &DI);
+ }
+
+ LookupResult R(*this, ArgInfo[i].Name, ArgInfo[i].NameLoc,
+ LookupOrdinaryName, ForRedeclaration);
+ LookupName(R, S);
+ if (R.isSingleResult()) {
+ NamedDecl *PrevDecl = R.getFoundDecl();
+ if (S->isDeclScope(PrevDecl)) {
+ Diag(ArgInfo[i].NameLoc,
+ (MethodDefinition ? diag::warn_method_param_redefinition
+ : diag::warn_method_param_declaration))
+ << ArgInfo[i].Name;
+ Diag(PrevDecl->getLocation(),
+ diag::note_previous_declaration);
+ }
+ }
+
+ SourceLocation StartLoc = DI
+ ? DI->getTypeLoc().getBeginLoc()
+ : ArgInfo[i].NameLoc;
+
+ ParmVarDecl* Param = CheckParameter(ObjCMethod, StartLoc,
+ ArgInfo[i].NameLoc, ArgInfo[i].Name,
+ ArgType, DI, SC_None);
+
+ Param->setObjCMethodScopeInfo(i);
+
+ Param->setObjCDeclQualifier(
+ CvtQTToAstBitMask(ArgInfo[i].DeclSpec.getObjCDeclQualifier()));
+
+ // Apply the attributes to the parameter.
+ ProcessDeclAttributeList(TUScope, Param, ArgInfo[i].ArgAttrs);
+
+ if (Param->hasAttr<BlocksAttr>()) {
+ Diag(Param->getLocation(), diag::err_block_on_nonlocal);
+ Param->setInvalidDecl();
+ }
+ S->AddDecl(Param);
+ IdResolver.AddDecl(Param);
+
+ Params.push_back(Param);
+ }
+
+ for (unsigned i = 0, e = CNumArgs; i != e; ++i) {
+ ParmVarDecl *Param = cast<ParmVarDecl>(CParamInfo[i].Param);
+ QualType ArgType = Param->getType();
+ if (ArgType.isNull())
+ ArgType = Context.getObjCIdType();
+ else
+ // Perform the default array/function conversions (C99 6.7.5.3p[7,8]).
+ ArgType = Context.getAdjustedParameterType(ArgType);
+
+ Param->setDeclContext(ObjCMethod);
+ Params.push_back(Param);
+ }
+
+ ObjCMethod->setMethodParams(Context, Params, SelectorLocs);
+ ObjCMethod->setObjCDeclQualifier(
+ CvtQTToAstBitMask(ReturnQT.getObjCDeclQualifier()));
+
+ if (AttrList)
+ ProcessDeclAttributeList(TUScope, ObjCMethod, AttrList);
+
+ // Add the method now.
+ const ObjCMethodDecl *PrevMethod = nullptr;
+ if (ObjCImplDecl *ImpDecl = dyn_cast<ObjCImplDecl>(ClassDecl)) {
+ if (MethodType == tok::minus) {
+ PrevMethod = ImpDecl->getInstanceMethod(Sel);
+ ImpDecl->addInstanceMethod(ObjCMethod);
+ } else {
+ PrevMethod = ImpDecl->getClassMethod(Sel);
+ ImpDecl->addClassMethod(ObjCMethod);
+ }
+
+ // Merge information from the @interface declaration into the
+ // @implementation.
+ if (ObjCInterfaceDecl *IDecl = ImpDecl->getClassInterface()) {
+ if (auto *IMD = IDecl->lookupMethod(ObjCMethod->getSelector(),
+ ObjCMethod->isInstanceMethod())) {
+ mergeInterfaceMethodToImpl(*this, ObjCMethod, IMD);
+
+ // Warn about defining -dealloc in a category.
+ if (isa<ObjCCategoryImplDecl>(ImpDecl) && IMD->isOverriding() &&
+ ObjCMethod->getSelector().getMethodFamily() == OMF_dealloc) {
+ Diag(ObjCMethod->getLocation(), diag::warn_dealloc_in_category)
+ << ObjCMethod->getDeclName();
+ }
+ }
+ }
+ } else {
+ cast<DeclContext>(ClassDecl)->addDecl(ObjCMethod);
+ }
+
+ if (PrevMethod) {
+ // You can never have two method definitions with the same name.
+ Diag(ObjCMethod->getLocation(), diag::err_duplicate_method_decl)
+ << ObjCMethod->getDeclName();
+ Diag(PrevMethod->getLocation(), diag::note_previous_declaration);
+ ObjCMethod->setInvalidDecl();
+ return ObjCMethod;
+ }
+
+ // If this Objective-C method does not have a related result type, but we
+ // are allowed to infer related result types, try to do so based on the
+ // method family.
+ ObjCInterfaceDecl *CurrentClass = dyn_cast<ObjCInterfaceDecl>(ClassDecl);
+ if (!CurrentClass) {
+ if (ObjCCategoryDecl *Cat = dyn_cast<ObjCCategoryDecl>(ClassDecl))
+ CurrentClass = Cat->getClassInterface();
+ else if (ObjCImplDecl *Impl = dyn_cast<ObjCImplDecl>(ClassDecl))
+ CurrentClass = Impl->getClassInterface();
+ else if (ObjCCategoryImplDecl *CatImpl
+ = dyn_cast<ObjCCategoryImplDecl>(ClassDecl))
+ CurrentClass = CatImpl->getClassInterface();
+ }
+
+ ResultTypeCompatibilityKind RTC
+ = CheckRelatedResultTypeCompatibility(*this, ObjCMethod, CurrentClass);
+
+ CheckObjCMethodOverrides(ObjCMethod, CurrentClass, RTC);
+
+ bool ARCError = false;
+ if (getLangOpts().ObjCAutoRefCount)
+ ARCError = CheckARCMethodDecl(ObjCMethod);
+
+ // Infer the related result type when possible.
+ if (!ARCError && RTC == Sema::RTC_Compatible &&
+ !ObjCMethod->hasRelatedResultType() &&
+ LangOpts.ObjCInferRelatedResultType) {
+ bool InferRelatedResultType = false;
+ switch (ObjCMethod->getMethodFamily()) {
+ case OMF_None:
+ case OMF_copy:
+ case OMF_dealloc:
+ case OMF_finalize:
+ case OMF_mutableCopy:
+ case OMF_release:
+ case OMF_retainCount:
+ case OMF_initialize:
+ case OMF_performSelector:
+ break;
+
+ case OMF_alloc:
+ case OMF_new:
+ InferRelatedResultType = ObjCMethod->isClassMethod();
+ break;
+
+ case OMF_init:
+ case OMF_autorelease:
+ case OMF_retain:
+ case OMF_self:
+ InferRelatedResultType = ObjCMethod->isInstanceMethod();
+ break;
+ }
+
+ if (InferRelatedResultType &&
+ !ObjCMethod->getReturnType()->isObjCIndependentClassType())
+ ObjCMethod->SetRelatedResultType();
+ }
+
+ ActOnDocumentableDecl(ObjCMethod);
+
+ return ObjCMethod;
+}
+
+bool Sema::CheckObjCDeclScope(Decl *D) {
+ // Following is also an error. But it is caused by a missing @end
+ // and diagnostic is issued elsewhere.
+ if (isa<ObjCContainerDecl>(CurContext->getRedeclContext()))
+ return false;
+
+ // If we switched context to translation unit while we are still lexically in
+ // an objc container, it means the parser missed emitting an error.
+ if (isa<TranslationUnitDecl>(getCurLexicalContext()->getRedeclContext()))
+ return false;
+
+ Diag(D->getLocation(), diag::err_objc_decls_may_only_appear_in_global_scope);
+ D->setInvalidDecl();
+
+ return true;
+}
+
+/// Called whenever \@defs(ClassName) is encountered in the source. Inserts the
+/// instance variables of ClassName into Decls.
+void Sema::ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
+ IdentifierInfo *ClassName,
+ SmallVectorImpl<Decl*> &Decls) {
+ // Check that ClassName is a valid class
+ ObjCInterfaceDecl *Class = getObjCInterfaceDecl(ClassName, DeclStart);
+ if (!Class) {
+ Diag(DeclStart, diag::err_undef_interface) << ClassName;
+ return;
+ }
+ if (LangOpts.ObjCRuntime.isNonFragile()) {
+ Diag(DeclStart, diag::err_atdef_nonfragile_interface);
+ return;
+ }
+
+ // Collect the instance variables
+ SmallVector<const ObjCIvarDecl*, 32> Ivars;
+ Context.DeepCollectObjCIvars(Class, true, Ivars);
+ // For each ivar, create a fresh ObjCAtDefsFieldDecl.
+ for (unsigned i = 0; i < Ivars.size(); i++) {
+ const FieldDecl* ID = cast<FieldDecl>(Ivars[i]);
+ RecordDecl *Record = dyn_cast<RecordDecl>(TagD);
+ Decl *FD = ObjCAtDefsFieldDecl::Create(Context, Record,
+ /*FIXME: StartL=*/ID->getLocation(),
+ ID->getLocation(),
+ ID->getIdentifier(), ID->getType(),
+ ID->getBitWidth());
+ Decls.push_back(FD);
+ }
+
+ // Introduce all of these fields into the appropriate scope.
+ for (SmallVectorImpl<Decl*>::iterator D = Decls.begin();
+ D != Decls.end(); ++D) {
+ FieldDecl *FD = cast<FieldDecl>(*D);
+ if (getLangOpts().CPlusPlus)
+ PushOnScopeChains(cast<FieldDecl>(FD), S);
+ else if (RecordDecl *Record = dyn_cast<RecordDecl>(TagD))
+ Record->addDecl(FD);
+ }
+}
+
+/// \brief Build a type-check a new Objective-C exception variable declaration.
+VarDecl *Sema::BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType T,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc,
+ IdentifierInfo *Id,
+ bool Invalid) {
+ // ISO/IEC TR 18037 S6.7.3: "The type of an object with automatic storage
+ // duration shall not be qualified by an address-space qualifier."
+ // Since all parameters have automatic store duration, they can not have
+ // an address space.
+ if (T.getAddressSpace() != 0) {
+ Diag(IdLoc, diag::err_arg_with_address_space);
+ Invalid = true;
+ }
+
+ // An @catch parameter must be an unqualified object pointer type;
+ // FIXME: Recover from "NSObject foo" by inserting the * in "NSObject *foo"?
+ if (Invalid) {
+ // Don't do any further checking.
+ } else if (T->isDependentType()) {
+ // Okay: we don't know what this type will instantiate to.
+ } else if (!T->isObjCObjectPointerType()) {
+ Invalid = true;
+ Diag(IdLoc ,diag::err_catch_param_not_objc_type);
+ } else if (T->isObjCQualifiedIdType()) {
+ Invalid = true;
+ Diag(IdLoc, diag::err_illegal_qualifiers_on_catch_parm);
+ }
+
+ VarDecl *New = VarDecl::Create(Context, CurContext, StartLoc, IdLoc, Id,
+ T, TInfo, SC_None);
+ New->setExceptionVariable(true);
+
+ // In ARC, infer 'retaining' for variables of retainable type.
+ if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(New))
+ Invalid = true;
+
+ if (Invalid)
+ New->setInvalidDecl();
+ return New;
+}
+
+Decl *Sema::ActOnObjCExceptionDecl(Scope *S, Declarator &D) {
+ const DeclSpec &DS = D.getDeclSpec();
+
+ // We allow the "register" storage class on exception variables because
+ // GCC did, but we drop it completely. Any other storage class is an error.
+ if (DS.getStorageClassSpec() == DeclSpec::SCS_register) {
+ Diag(DS.getStorageClassSpecLoc(), diag::warn_register_objc_catch_parm)
+ << FixItHint::CreateRemoval(SourceRange(DS.getStorageClassSpecLoc()));
+ } else if (DeclSpec::SCS SCS = DS.getStorageClassSpec()) {
+ Diag(DS.getStorageClassSpecLoc(), diag::err_storage_spec_on_catch_parm)
+ << DeclSpec::getSpecifierName(SCS);
+ }
+ if (DeclSpec::TSCS TSCS = D.getDeclSpec().getThreadStorageClassSpec())
+ Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
+ diag::err_invalid_thread)
+ << DeclSpec::getSpecifierName(TSCS);
+ D.getMutableDeclSpec().ClearStorageClassSpecs();
+
+ DiagnoseFunctionSpecifiers(D.getDeclSpec());
+
+ // Check that there are no default arguments inside the type of this
+ // exception object (C++ only).
+ if (getLangOpts().CPlusPlus)
+ CheckExtraCXXDefaultArguments(D);
+
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ QualType ExceptionType = TInfo->getType();
+
+ VarDecl *New = BuildObjCExceptionDecl(TInfo, ExceptionType,
+ D.getSourceRange().getBegin(),
+ D.getIdentifierLoc(),
+ D.getIdentifier(),
+ D.isInvalidType());
+
+ // Parameter declarators cannot be qualified (C++ [dcl.meaning]p1).
+ if (D.getCXXScopeSpec().isSet()) {
+ Diag(D.getIdentifierLoc(), diag::err_qualified_objc_catch_parm)
+ << D.getCXXScopeSpec().getRange();
+ New->setInvalidDecl();
+ }
+
+ // Add the parameter declaration into this scope.
+ S->AddDecl(New);
+ if (D.getIdentifier())
+ IdResolver.AddDecl(New);
+
+ ProcessDeclAttributes(S, New, D);
+
+ if (New->hasAttr<BlocksAttr>())
+ Diag(New->getLocation(), diag::err_block_on_nonlocal);
+ return New;
+}
+
+/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
+/// initialization.
+void Sema::CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
+ SmallVectorImpl<ObjCIvarDecl*> &Ivars) {
+ for (ObjCIvarDecl *Iv = OI->all_declared_ivar_begin(); Iv;
+ Iv= Iv->getNextIvar()) {
+ QualType QT = Context.getBaseElementType(Iv->getType());
+ if (QT->isRecordType())
+ Ivars.push_back(Iv);
+ }
+}
+
+void Sema::DiagnoseUseOfUnimplementedSelectors() {
+ // Load referenced selectors from the external source.
+ if (ExternalSource) {
+ SmallVector<std::pair<Selector, SourceLocation>, 4> Sels;
+ ExternalSource->ReadReferencedSelectors(Sels);
+ for (unsigned I = 0, N = Sels.size(); I != N; ++I)
+ ReferencedSelectors[Sels[I].first] = Sels[I].second;
+ }
+
+ // Warning will be issued only when selector table is
+ // generated (which means there is at lease one implementation
+ // in the TU). This is to match gcc's behavior.
+ if (ReferencedSelectors.empty() ||
+ !Context.AnyObjCImplementation())
+ return;
+ for (auto &SelectorAndLocation : ReferencedSelectors) {
+ Selector Sel = SelectorAndLocation.first;
+ SourceLocation Loc = SelectorAndLocation.second;
+ if (!LookupImplementedMethodInGlobalPool(Sel))
+ Diag(Loc, diag::warn_unimplemented_selector) << Sel;
+ }
+}
+
+ObjCIvarDecl *
+Sema::GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
+ const ObjCPropertyDecl *&PDecl) const {
+ if (Method->isClassMethod())
+ return nullptr;
+ const ObjCInterfaceDecl *IDecl = Method->getClassInterface();
+ if (!IDecl)
+ return nullptr;
+ Method = IDecl->lookupMethod(Method->getSelector(), /*isInstance=*/true,
+ /*shallowCategoryLookup=*/false,
+ /*followSuper=*/false);
+ if (!Method || !Method->isPropertyAccessor())
+ return nullptr;
+ if ((PDecl = Method->findPropertyDecl()))
+ if (ObjCIvarDecl *IV = PDecl->getPropertyIvarDecl()) {
+ // property backing ivar must belong to property's class
+ // or be a private ivar in class's implementation.
+ // FIXME. fix the const-ness issue.
+ IV = const_cast<ObjCInterfaceDecl *>(IDecl)->lookupInstanceVariable(
+ IV->getIdentifier());
+ return IV;
+ }
+ return nullptr;
+}
+
+namespace {
+ /// Used by Sema::DiagnoseUnusedBackingIvarInAccessor to check if a property
+ /// accessor references the backing ivar.
+ class UnusedBackingIvarChecker :
+ public RecursiveASTVisitor<UnusedBackingIvarChecker> {
+ public:
+ Sema &S;
+ const ObjCMethodDecl *Method;
+ const ObjCIvarDecl *IvarD;
+ bool AccessedIvar;
+ bool InvokedSelfMethod;
+
+ UnusedBackingIvarChecker(Sema &S, const ObjCMethodDecl *Method,
+ const ObjCIvarDecl *IvarD)
+ : S(S), Method(Method), IvarD(IvarD),
+ AccessedIvar(false), InvokedSelfMethod(false) {
+ assert(IvarD);
+ }
+
+ bool VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ if (E->getDecl() == IvarD) {
+ AccessedIvar = true;
+ return false;
+ }
+ return true;
+ }
+
+ bool VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ if (E->getReceiverKind() == ObjCMessageExpr::Instance &&
+ S.isSelfExpr(E->getInstanceReceiver(), Method)) {
+ InvokedSelfMethod = true;
+ }
+ return true;
+ }
+ };
+} // end anonymous namespace
+
+void Sema::DiagnoseUnusedBackingIvarInAccessor(Scope *S,
+ const ObjCImplementationDecl *ImplD) {
+ if (S->hasUnrecoverableErrorOccurred())
+ return;
+
+ for (const auto *CurMethod : ImplD->instance_methods()) {
+ unsigned DIAG = diag::warn_unused_property_backing_ivar;
+ SourceLocation Loc = CurMethod->getLocation();
+ if (Diags.isIgnored(DIAG, Loc))
+ continue;
+
+ const ObjCPropertyDecl *PDecl;
+ const ObjCIvarDecl *IV = GetIvarBackingPropertyAccessor(CurMethod, PDecl);
+ if (!IV)
+ continue;
+
+ UnusedBackingIvarChecker Checker(*this, CurMethod, IV);
+ Checker.TraverseStmt(CurMethod->getBody());
+ if (Checker.AccessedIvar)
+ continue;
+
+ // Do not issue this warning if backing ivar is used somewhere and accessor
+ // implementation makes a self call. This is to prevent false positive in
+ // cases where the ivar is accessed by another method that the accessor
+ // delegates to.
+ if (!IV->isReferenced() || !Checker.InvokedSelfMethod) {
+ Diag(Loc, DIAG) << IV;
+ Diag(PDecl->getLocation(), diag::note_property_declare);
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp
new file mode 100644
index 0000000..f12bf24
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp
@@ -0,0 +1,1197 @@
+//===--- SemaExceptionSpec.cpp - C++ Exception Specifications ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides Sema routines for C++ exception specification testing.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallString.h"
+
+namespace clang {
+
+static const FunctionProtoType *GetUnderlyingFunction(QualType T)
+{
+ if (const PointerType *PtrTy = T->getAs<PointerType>())
+ T = PtrTy->getPointeeType();
+ else if (const ReferenceType *RefTy = T->getAs<ReferenceType>())
+ T = RefTy->getPointeeType();
+ else if (const MemberPointerType *MPTy = T->getAs<MemberPointerType>())
+ T = MPTy->getPointeeType();
+ return T->getAs<FunctionProtoType>();
+}
+
+/// HACK: libstdc++ has a bug where it shadows std::swap with a member
+/// swap function then tries to call std::swap unqualified from the exception
+/// specification of that function. This function detects whether we're in
+/// such a case and turns off delay-parsing of exception specifications.
+bool Sema::isLibstdcxxEagerExceptionSpecHack(const Declarator &D) {
+ auto *RD = dyn_cast<CXXRecordDecl>(CurContext);
+
+ // All the problem cases are member functions named "swap" within class
+ // templates declared directly within namespace std.
+ if (!RD || RD->getEnclosingNamespaceContext() != getStdNamespace() ||
+ !RD->getIdentifier() || !RD->getDescribedClassTemplate() ||
+ !D.getIdentifier() || !D.getIdentifier()->isStr("swap"))
+ return false;
+
+ // Only apply this hack within a system header.
+ if (!Context.getSourceManager().isInSystemHeader(D.getLocStart()))
+ return false;
+
+ return llvm::StringSwitch<bool>(RD->getIdentifier()->getName())
+ .Case("array", true)
+ .Case("pair", true)
+ .Case("priority_queue", true)
+ .Case("stack", true)
+ .Case("queue", true)
+ .Default(false);
+}
+
+/// CheckSpecifiedExceptionType - Check if the given type is valid in an
+/// exception specification. Incomplete types, or pointers to incomplete types
+/// other than void are not allowed.
+///
+/// \param[in,out] T The exception type. This will be decayed to a pointer type
+/// when the input is an array or a function type.
+bool Sema::CheckSpecifiedExceptionType(QualType &T, SourceRange Range) {
+ // C++11 [except.spec]p2:
+ // A type cv T, "array of T", or "function returning T" denoted
+ // in an exception-specification is adjusted to type T, "pointer to T", or
+ // "pointer to function returning T", respectively.
+ //
+ // We also apply this rule in C++98.
+ if (T->isArrayType())
+ T = Context.getArrayDecayedType(T);
+ else if (T->isFunctionType())
+ T = Context.getPointerType(T);
+
+ int Kind = 0;
+ QualType PointeeT = T;
+ if (const PointerType *PT = T->getAs<PointerType>()) {
+ PointeeT = PT->getPointeeType();
+ Kind = 1;
+
+ // cv void* is explicitly permitted, despite being a pointer to an
+ // incomplete type.
+ if (PointeeT->isVoidType())
+ return false;
+ } else if (const ReferenceType *RT = T->getAs<ReferenceType>()) {
+ PointeeT = RT->getPointeeType();
+ Kind = 2;
+
+ if (RT->isRValueReferenceType()) {
+ // C++11 [except.spec]p2:
+ // A type denoted in an exception-specification shall not denote [...]
+ // an rvalue reference type.
+ Diag(Range.getBegin(), diag::err_rref_in_exception_spec)
+ << T << Range;
+ return true;
+ }
+ }
+
+ // C++11 [except.spec]p2:
+ // A type denoted in an exception-specification shall not denote an
+ // incomplete type other than a class currently being defined [...].
+ // A type denoted in an exception-specification shall not denote a
+ // pointer or reference to an incomplete type, other than (cv) void* or a
+ // pointer or reference to a class currently being defined.
+ if (!(PointeeT->isRecordType() &&
+ PointeeT->getAs<RecordType>()->isBeingDefined()) &&
+ RequireCompleteType(Range.getBegin(), PointeeT,
+ diag::err_incomplete_in_exception_spec, Kind, Range))
+ return true;
+
+ return false;
+}
+
+/// CheckDistantExceptionSpec - Check if the given type is a pointer or pointer
+/// to member to a function with an exception specification. This means that
+/// it is invalid to add another level of indirection.
+bool Sema::CheckDistantExceptionSpec(QualType T) {
+ if (const PointerType *PT = T->getAs<PointerType>())
+ T = PT->getPointeeType();
+ else if (const MemberPointerType *PT = T->getAs<MemberPointerType>())
+ T = PT->getPointeeType();
+ else
+ return false;
+
+ const FunctionProtoType *FnT = T->getAs<FunctionProtoType>();
+ if (!FnT)
+ return false;
+
+ return FnT->hasExceptionSpec();
+}
+
+const FunctionProtoType *
+Sema::ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT) {
+ if (FPT->getExceptionSpecType() == EST_Unparsed) {
+ Diag(Loc, diag::err_exception_spec_not_parsed);
+ return nullptr;
+ }
+
+ if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()))
+ return FPT;
+
+ FunctionDecl *SourceDecl = FPT->getExceptionSpecDecl();
+ const FunctionProtoType *SourceFPT =
+ SourceDecl->getType()->castAs<FunctionProtoType>();
+
+ // If the exception specification has already been resolved, just return it.
+ if (!isUnresolvedExceptionSpec(SourceFPT->getExceptionSpecType()))
+ return SourceFPT;
+
+ // Compute or instantiate the exception specification now.
+ if (SourceFPT->getExceptionSpecType() == EST_Unevaluated)
+ EvaluateImplicitExceptionSpec(Loc, cast<CXXMethodDecl>(SourceDecl));
+ else
+ InstantiateExceptionSpec(Loc, SourceDecl);
+
+ const FunctionProtoType *Proto =
+ SourceDecl->getType()->castAs<FunctionProtoType>();
+ if (Proto->getExceptionSpecType() == clang::EST_Unparsed) {
+ Diag(Loc, diag::err_exception_spec_not_parsed);
+ Proto = nullptr;
+ }
+ return Proto;
+}
+
+void
+Sema::UpdateExceptionSpec(FunctionDecl *FD,
+ const FunctionProtoType::ExceptionSpecInfo &ESI) {
+ // If we've fully resolved the exception specification, notify listeners.
+ if (!isUnresolvedExceptionSpec(ESI.Type))
+ if (auto *Listener = getASTMutationListener())
+ Listener->ResolvedExceptionSpec(FD);
+
+ for (auto *Redecl : FD->redecls())
+ Context.adjustExceptionSpec(cast<FunctionDecl>(Redecl), ESI);
+}
+
+/// Determine whether a function has an implicitly-generated exception
+/// specification.
+static bool hasImplicitExceptionSpec(FunctionDecl *Decl) {
+ if (!isa<CXXDestructorDecl>(Decl) &&
+ Decl->getDeclName().getCXXOverloadedOperator() != OO_Delete &&
+ Decl->getDeclName().getCXXOverloadedOperator() != OO_Array_Delete)
+ return false;
+
+ // For a function that the user didn't declare:
+ // - if this is a destructor, its exception specification is implicit.
+ // - if this is 'operator delete' or 'operator delete[]', the exception
+ // specification is as-if an explicit exception specification was given
+ // (per [basic.stc.dynamic]p2).
+ if (!Decl->getTypeSourceInfo())
+ return isa<CXXDestructorDecl>(Decl);
+
+ const FunctionProtoType *Ty =
+ Decl->getTypeSourceInfo()->getType()->getAs<FunctionProtoType>();
+ return !Ty->hasExceptionSpec();
+}
+
+bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
+ OverloadedOperatorKind OO = New->getDeclName().getCXXOverloadedOperator();
+ bool IsOperatorNew = OO == OO_New || OO == OO_Array_New;
+ bool MissingExceptionSpecification = false;
+ bool MissingEmptyExceptionSpecification = false;
+
+ unsigned DiagID = diag::err_mismatched_exception_spec;
+ bool ReturnValueOnError = true;
+ if (getLangOpts().MicrosoftExt) {
+ DiagID = diag::ext_mismatched_exception_spec;
+ ReturnValueOnError = false;
+ }
+
+ // Check the types as written: they must match before any exception
+ // specification adjustment is applied.
+ if (!CheckEquivalentExceptionSpec(
+ PDiag(DiagID), PDiag(diag::note_previous_declaration),
+ Old->getType()->getAs<FunctionProtoType>(), Old->getLocation(),
+ New->getType()->getAs<FunctionProtoType>(), New->getLocation(),
+ &MissingExceptionSpecification, &MissingEmptyExceptionSpecification,
+ /*AllowNoexceptAllMatchWithNoSpec=*/true, IsOperatorNew)) {
+ // C++11 [except.spec]p4 [DR1492]:
+ // If a declaration of a function has an implicit
+ // exception-specification, other declarations of the function shall
+ // not specify an exception-specification.
+ if (getLangOpts().CPlusPlus11 &&
+ hasImplicitExceptionSpec(Old) != hasImplicitExceptionSpec(New)) {
+ Diag(New->getLocation(), diag::ext_implicit_exception_spec_mismatch)
+ << hasImplicitExceptionSpec(Old);
+ if (Old->getLocation().isValid())
+ Diag(Old->getLocation(), diag::note_previous_declaration);
+ }
+ return false;
+ }
+
+ // The failure was something other than an missing exception
+ // specification; return an error, except in MS mode where this is a warning.
+ if (!MissingExceptionSpecification)
+ return ReturnValueOnError;
+
+ const FunctionProtoType *NewProto =
+ New->getType()->castAs<FunctionProtoType>();
+
+ // The new function declaration is only missing an empty exception
+ // specification "throw()". If the throw() specification came from a
+ // function in a system header that has C linkage, just add an empty
+ // exception specification to the "new" declaration. This is an
+ // egregious workaround for glibc, which adds throw() specifications
+ // to many libc functions as an optimization. Unfortunately, that
+ // optimization isn't permitted by the C++ standard, so we're forced
+ // to work around it here.
+ if (MissingEmptyExceptionSpecification && NewProto &&
+ (Old->getLocation().isInvalid() ||
+ Context.getSourceManager().isInSystemHeader(Old->getLocation())) &&
+ Old->isExternC()) {
+ New->setType(Context.getFunctionType(
+ NewProto->getReturnType(), NewProto->getParamTypes(),
+ NewProto->getExtProtoInfo().withExceptionSpec(EST_DynamicNone)));
+ return false;
+ }
+
+ const FunctionProtoType *OldProto =
+ Old->getType()->castAs<FunctionProtoType>();
+
+ FunctionProtoType::ExceptionSpecInfo ESI = OldProto->getExceptionSpecType();
+ if (ESI.Type == EST_Dynamic) {
+ ESI.Exceptions = OldProto->exceptions();
+ }
+
+ if (ESI.Type == EST_ComputedNoexcept) {
+ // For computed noexcept, we can't just take the expression from the old
+ // prototype. It likely contains references to the old prototype's
+ // parameters.
+ New->setInvalidDecl();
+ } else {
+ // Update the type of the function with the appropriate exception
+ // specification.
+ New->setType(Context.getFunctionType(
+ NewProto->getReturnType(), NewProto->getParamTypes(),
+ NewProto->getExtProtoInfo().withExceptionSpec(ESI)));
+ }
+
+ if (getLangOpts().MicrosoftExt && ESI.Type != EST_ComputedNoexcept) {
+ // Allow missing exception specifications in redeclarations as an extension.
+ DiagID = diag::ext_ms_missing_exception_specification;
+ ReturnValueOnError = false;
+ } else if (New->isReplaceableGlobalAllocationFunction() &&
+ ESI.Type != EST_ComputedNoexcept) {
+ // Allow missing exception specifications in redeclarations as an extension,
+ // when declaring a replaceable global allocation function.
+ DiagID = diag::ext_missing_exception_specification;
+ ReturnValueOnError = false;
+ } else {
+ DiagID = diag::err_missing_exception_specification;
+ ReturnValueOnError = true;
+ }
+
+ // Warn about the lack of exception specification.
+ SmallString<128> ExceptionSpecString;
+ llvm::raw_svector_ostream OS(ExceptionSpecString);
+ switch (OldProto->getExceptionSpecType()) {
+ case EST_DynamicNone:
+ OS << "throw()";
+ break;
+
+ case EST_Dynamic: {
+ OS << "throw(";
+ bool OnFirstException = true;
+ for (const auto &E : OldProto->exceptions()) {
+ if (OnFirstException)
+ OnFirstException = false;
+ else
+ OS << ", ";
+
+ OS << E.getAsString(getPrintingPolicy());
+ }
+ OS << ")";
+ break;
+ }
+
+ case EST_BasicNoexcept:
+ OS << "noexcept";
+ break;
+
+ case EST_ComputedNoexcept:
+ OS << "noexcept(";
+ assert(OldProto->getNoexceptExpr() != nullptr && "Expected non-null Expr");
+ OldProto->getNoexceptExpr()->printPretty(OS, nullptr, getPrintingPolicy());
+ OS << ")";
+ break;
+
+ default:
+ llvm_unreachable("This spec type is compatible with none.");
+ }
+
+ SourceLocation FixItLoc;
+ if (TypeSourceInfo *TSInfo = New->getTypeSourceInfo()) {
+ TypeLoc TL = TSInfo->getTypeLoc().IgnoreParens();
+ // FIXME: Preserve enough information so that we can produce a correct fixit
+ // location when there is a trailing return type.
+ if (auto FTLoc = TL.getAs<FunctionProtoTypeLoc>())
+ if (!FTLoc.getTypePtr()->hasTrailingReturn())
+ FixItLoc = getLocForEndOfToken(FTLoc.getLocalRangeEnd());
+ }
+
+ if (FixItLoc.isInvalid())
+ Diag(New->getLocation(), DiagID)
+ << New << OS.str();
+ else {
+ Diag(New->getLocation(), DiagID)
+ << New << OS.str()
+ << FixItHint::CreateInsertion(FixItLoc, " " + OS.str().str());
+ }
+
+ if (Old->getLocation().isValid())
+ Diag(Old->getLocation(), diag::note_previous_declaration);
+
+ return ReturnValueOnError;
+}
+
+/// CheckEquivalentExceptionSpec - Check if the two types have equivalent
+/// exception specifications. Exception specifications are equivalent if
+/// they allow exactly the same set of exception types. It does not matter how
+/// that is achieved. See C++ [except.spec]p2.
+bool Sema::CheckEquivalentExceptionSpec(
+ const FunctionProtoType *Old, SourceLocation OldLoc,
+ const FunctionProtoType *New, SourceLocation NewLoc) {
+ unsigned DiagID = diag::err_mismatched_exception_spec;
+ if (getLangOpts().MicrosoftExt)
+ DiagID = diag::ext_mismatched_exception_spec;
+ bool Result = CheckEquivalentExceptionSpec(PDiag(DiagID),
+ PDiag(diag::note_previous_declaration), Old, OldLoc, New, NewLoc);
+
+ // In Microsoft mode, mismatching exception specifications just cause a warning.
+ if (getLangOpts().MicrosoftExt)
+ return false;
+ return Result;
+}
+
+/// CheckEquivalentExceptionSpec - Check if the two types have compatible
+/// exception specifications. See C++ [except.spec]p3.
+///
+/// \return \c false if the exception specifications match, \c true if there is
+/// a problem. If \c true is returned, either a diagnostic has already been
+/// produced or \c *MissingExceptionSpecification is set to \c true.
+bool Sema::CheckEquivalentExceptionSpec(const PartialDiagnostic &DiagID,
+ const PartialDiagnostic & NoteID,
+ const FunctionProtoType *Old,
+ SourceLocation OldLoc,
+ const FunctionProtoType *New,
+ SourceLocation NewLoc,
+ bool *MissingExceptionSpecification,
+ bool*MissingEmptyExceptionSpecification,
+ bool AllowNoexceptAllMatchWithNoSpec,
+ bool IsOperatorNew) {
+ // Just completely ignore this under -fno-exceptions.
+ if (!getLangOpts().CXXExceptions)
+ return false;
+
+ if (MissingExceptionSpecification)
+ *MissingExceptionSpecification = false;
+
+ if (MissingEmptyExceptionSpecification)
+ *MissingEmptyExceptionSpecification = false;
+
+ Old = ResolveExceptionSpec(NewLoc, Old);
+ if (!Old)
+ return false;
+ New = ResolveExceptionSpec(NewLoc, New);
+ if (!New)
+ return false;
+
+ // C++0x [except.spec]p3: Two exception-specifications are compatible if:
+ // - both are non-throwing, regardless of their form,
+ // - both have the form noexcept(constant-expression) and the constant-
+ // expressions are equivalent,
+ // - both are dynamic-exception-specifications that have the same set of
+ // adjusted types.
+ //
+ // C++0x [except.spec]p12: An exception-specification is non-throwing if it is
+ // of the form throw(), noexcept, or noexcept(constant-expression) where the
+ // constant-expression yields true.
+ //
+ // C++0x [except.spec]p4: If any declaration of a function has an exception-
+ // specifier that is not a noexcept-specification allowing all exceptions,
+ // all declarations [...] of that function shall have a compatible
+ // exception-specification.
+ //
+ // That last point basically means that noexcept(false) matches no spec.
+ // It's considered when AllowNoexceptAllMatchWithNoSpec is true.
+
+ ExceptionSpecificationType OldEST = Old->getExceptionSpecType();
+ ExceptionSpecificationType NewEST = New->getExceptionSpecType();
+
+ assert(!isUnresolvedExceptionSpec(OldEST) &&
+ !isUnresolvedExceptionSpec(NewEST) &&
+ "Shouldn't see unknown exception specifications here");
+
+ // Shortcut the case where both have no spec.
+ if (OldEST == EST_None && NewEST == EST_None)
+ return false;
+
+ FunctionProtoType::NoexceptResult OldNR = Old->getNoexceptSpec(Context);
+ FunctionProtoType::NoexceptResult NewNR = New->getNoexceptSpec(Context);
+ if (OldNR == FunctionProtoType::NR_BadNoexcept ||
+ NewNR == FunctionProtoType::NR_BadNoexcept)
+ return false;
+
+ // Dependent noexcept specifiers are compatible with each other, but nothing
+ // else.
+ // One noexcept is compatible with another if the argument is the same
+ if (OldNR == NewNR &&
+ OldNR != FunctionProtoType::NR_NoNoexcept &&
+ NewNR != FunctionProtoType::NR_NoNoexcept)
+ return false;
+ if (OldNR != NewNR &&
+ OldNR != FunctionProtoType::NR_NoNoexcept &&
+ NewNR != FunctionProtoType::NR_NoNoexcept) {
+ Diag(NewLoc, DiagID);
+ if (NoteID.getDiagID() != 0 && OldLoc.isValid())
+ Diag(OldLoc, NoteID);
+ return true;
+ }
+
+ // The MS extension throw(...) is compatible with itself.
+ if (OldEST == EST_MSAny && NewEST == EST_MSAny)
+ return false;
+
+ // It's also compatible with no spec.
+ if ((OldEST == EST_None && NewEST == EST_MSAny) ||
+ (OldEST == EST_MSAny && NewEST == EST_None))
+ return false;
+
+ // It's also compatible with noexcept(false).
+ if (OldEST == EST_MSAny && NewNR == FunctionProtoType::NR_Throw)
+ return false;
+ if (NewEST == EST_MSAny && OldNR == FunctionProtoType::NR_Throw)
+ return false;
+
+ // As described above, noexcept(false) matches no spec only for functions.
+ if (AllowNoexceptAllMatchWithNoSpec) {
+ if (OldEST == EST_None && NewNR == FunctionProtoType::NR_Throw)
+ return false;
+ if (NewEST == EST_None && OldNR == FunctionProtoType::NR_Throw)
+ return false;
+ }
+
+ // Any non-throwing specifications are compatible.
+ bool OldNonThrowing = OldNR == FunctionProtoType::NR_Nothrow ||
+ OldEST == EST_DynamicNone;
+ bool NewNonThrowing = NewNR == FunctionProtoType::NR_Nothrow ||
+ NewEST == EST_DynamicNone;
+ if (OldNonThrowing && NewNonThrowing)
+ return false;
+
+ // As a special compatibility feature, under C++0x we accept no spec and
+ // throw(std::bad_alloc) as equivalent for operator new and operator new[].
+ // This is because the implicit declaration changed, but old code would break.
+ if (getLangOpts().CPlusPlus11 && IsOperatorNew) {
+ const FunctionProtoType *WithExceptions = nullptr;
+ if (OldEST == EST_None && NewEST == EST_Dynamic)
+ WithExceptions = New;
+ else if (OldEST == EST_Dynamic && NewEST == EST_None)
+ WithExceptions = Old;
+ if (WithExceptions && WithExceptions->getNumExceptions() == 1) {
+ // One has no spec, the other throw(something). If that something is
+ // std::bad_alloc, all conditions are met.
+ QualType Exception = *WithExceptions->exception_begin();
+ if (CXXRecordDecl *ExRecord = Exception->getAsCXXRecordDecl()) {
+ IdentifierInfo* Name = ExRecord->getIdentifier();
+ if (Name && Name->getName() == "bad_alloc") {
+ // It's called bad_alloc, but is it in std?
+ if (ExRecord->isInStdNamespace()) {
+ return false;
+ }
+ }
+ }
+ }
+ }
+
+ // At this point, the only remaining valid case is two matching dynamic
+ // specifications. We return here unless both specifications are dynamic.
+ if (OldEST != EST_Dynamic || NewEST != EST_Dynamic) {
+ if (MissingExceptionSpecification && Old->hasExceptionSpec() &&
+ !New->hasExceptionSpec()) {
+ // The old type has an exception specification of some sort, but
+ // the new type does not.
+ *MissingExceptionSpecification = true;
+
+ if (MissingEmptyExceptionSpecification && OldNonThrowing) {
+ // The old type has a throw() or noexcept(true) exception specification
+ // and the new type has no exception specification, and the caller asked
+ // to handle this itself.
+ *MissingEmptyExceptionSpecification = true;
+ }
+
+ return true;
+ }
+
+ Diag(NewLoc, DiagID);
+ if (NoteID.getDiagID() != 0 && OldLoc.isValid())
+ Diag(OldLoc, NoteID);
+ return true;
+ }
+
+ assert(OldEST == EST_Dynamic && NewEST == EST_Dynamic &&
+ "Exception compatibility logic error: non-dynamic spec slipped through.");
+
+ bool Success = true;
+ // Both have a dynamic exception spec. Collect the first set, then compare
+ // to the second.
+ llvm::SmallPtrSet<CanQualType, 8> OldTypes, NewTypes;
+ for (const auto &I : Old->exceptions())
+ OldTypes.insert(Context.getCanonicalType(I).getUnqualifiedType());
+
+ for (const auto &I : New->exceptions()) {
+ CanQualType TypePtr = Context.getCanonicalType(I).getUnqualifiedType();
+ if(OldTypes.count(TypePtr))
+ NewTypes.insert(TypePtr);
+ else
+ Success = false;
+ }
+
+ Success = Success && OldTypes.size() == NewTypes.size();
+
+ if (Success) {
+ return false;
+ }
+ Diag(NewLoc, DiagID);
+ if (NoteID.getDiagID() != 0 && OldLoc.isValid())
+ Diag(OldLoc, NoteID);
+ return true;
+}
+
+/// CheckExceptionSpecSubset - Check whether the second function type's
+/// exception specification is a subset (or equivalent) of the first function
+/// type. This is used by override and pointer assignment checks.
+bool Sema::CheckExceptionSpecSubset(
+ const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
+ const FunctionProtoType *Superset, SourceLocation SuperLoc,
+ const FunctionProtoType *Subset, SourceLocation SubLoc) {
+
+ // Just auto-succeed under -fno-exceptions.
+ if (!getLangOpts().CXXExceptions)
+ return false;
+
+ // FIXME: As usual, we could be more specific in our error messages, but
+ // that better waits until we've got types with source locations.
+
+ if (!SubLoc.isValid())
+ SubLoc = SuperLoc;
+
+ // Resolve the exception specifications, if needed.
+ Superset = ResolveExceptionSpec(SuperLoc, Superset);
+ if (!Superset)
+ return false;
+ Subset = ResolveExceptionSpec(SubLoc, Subset);
+ if (!Subset)
+ return false;
+
+ ExceptionSpecificationType SuperEST = Superset->getExceptionSpecType();
+
+ // If superset contains everything, we're done.
+ if (SuperEST == EST_None || SuperEST == EST_MSAny)
+ return CheckParamExceptionSpec(NoteID, Superset, SuperLoc, Subset, SubLoc);
+
+ // If there are dependent noexcept specs, assume everything is fine. Unlike
+ // with the equivalency check, this is safe in this case, because we don't
+ // want to merge declarations. Checks after instantiation will catch any
+ // omissions we make here.
+ // We also shortcut checking if a noexcept expression was bad.
+
+ FunctionProtoType::NoexceptResult SuperNR =Superset->getNoexceptSpec(Context);
+ if (SuperNR == FunctionProtoType::NR_BadNoexcept ||
+ SuperNR == FunctionProtoType::NR_Dependent)
+ return false;
+
+ // Another case of the superset containing everything.
+ if (SuperNR == FunctionProtoType::NR_Throw)
+ return CheckParamExceptionSpec(NoteID, Superset, SuperLoc, Subset, SubLoc);
+
+ ExceptionSpecificationType SubEST = Subset->getExceptionSpecType();
+
+ assert(!isUnresolvedExceptionSpec(SuperEST) &&
+ !isUnresolvedExceptionSpec(SubEST) &&
+ "Shouldn't see unknown exception specifications here");
+
+ // It does not. If the subset contains everything, we've failed.
+ if (SubEST == EST_None || SubEST == EST_MSAny) {
+ Diag(SubLoc, DiagID);
+ if (NoteID.getDiagID() != 0)
+ Diag(SuperLoc, NoteID);
+ return true;
+ }
+
+ FunctionProtoType::NoexceptResult SubNR = Subset->getNoexceptSpec(Context);
+ if (SubNR == FunctionProtoType::NR_BadNoexcept ||
+ SubNR == FunctionProtoType::NR_Dependent)
+ return false;
+
+ // Another case of the subset containing everything.
+ if (SubNR == FunctionProtoType::NR_Throw) {
+ Diag(SubLoc, DiagID);
+ if (NoteID.getDiagID() != 0)
+ Diag(SuperLoc, NoteID);
+ return true;
+ }
+
+ // If the subset contains nothing, we're done.
+ if (SubEST == EST_DynamicNone || SubNR == FunctionProtoType::NR_Nothrow)
+ return CheckParamExceptionSpec(NoteID, Superset, SuperLoc, Subset, SubLoc);
+
+ // Otherwise, if the superset contains nothing, we've failed.
+ if (SuperEST == EST_DynamicNone || SuperNR == FunctionProtoType::NR_Nothrow) {
+ Diag(SubLoc, DiagID);
+ if (NoteID.getDiagID() != 0)
+ Diag(SuperLoc, NoteID);
+ return true;
+ }
+
+ assert(SuperEST == EST_Dynamic && SubEST == EST_Dynamic &&
+ "Exception spec subset: non-dynamic case slipped through.");
+
+ // Neither contains everything or nothing. Do a proper comparison.
+ for (const auto &SubI : Subset->exceptions()) {
+ // Take one type from the subset.
+ QualType CanonicalSubT = Context.getCanonicalType(SubI);
+ // Unwrap pointers and references so that we can do checks within a class
+ // hierarchy. Don't unwrap member pointers; they don't have hierarchy
+ // conversions on the pointee.
+ bool SubIsPointer = false;
+ if (const ReferenceType *RefTy = CanonicalSubT->getAs<ReferenceType>())
+ CanonicalSubT = RefTy->getPointeeType();
+ if (const PointerType *PtrTy = CanonicalSubT->getAs<PointerType>()) {
+ CanonicalSubT = PtrTy->getPointeeType();
+ SubIsPointer = true;
+ }
+ bool SubIsClass = CanonicalSubT->isRecordType();
+ CanonicalSubT = CanonicalSubT.getLocalUnqualifiedType();
+
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/false);
+
+ bool Contained = false;
+ // Make sure it's in the superset.
+ for (const auto &SuperI : Superset->exceptions()) {
+ QualType CanonicalSuperT = Context.getCanonicalType(SuperI);
+ // SubT must be SuperT or derived from it, or pointer or reference to
+ // such types.
+ if (const ReferenceType *RefTy = CanonicalSuperT->getAs<ReferenceType>())
+ CanonicalSuperT = RefTy->getPointeeType();
+ if (SubIsPointer) {
+ if (const PointerType *PtrTy = CanonicalSuperT->getAs<PointerType>())
+ CanonicalSuperT = PtrTy->getPointeeType();
+ else {
+ continue;
+ }
+ }
+ CanonicalSuperT = CanonicalSuperT.getLocalUnqualifiedType();
+ // If the types are the same, move on to the next type in the subset.
+ if (CanonicalSubT == CanonicalSuperT) {
+ Contained = true;
+ break;
+ }
+
+ // Otherwise we need to check the inheritance.
+ if (!SubIsClass || !CanonicalSuperT->isRecordType())
+ continue;
+
+ Paths.clear();
+ if (!IsDerivedFrom(SubLoc, CanonicalSubT, CanonicalSuperT, Paths))
+ continue;
+
+ if (Paths.isAmbiguous(Context.getCanonicalType(CanonicalSuperT)))
+ continue;
+
+ // Do this check from a context without privileges.
+ switch (CheckBaseClassAccess(SourceLocation(),
+ CanonicalSuperT, CanonicalSubT,
+ Paths.front(),
+ /*Diagnostic*/ 0,
+ /*ForceCheck*/ true,
+ /*ForceUnprivileged*/ true)) {
+ case AR_accessible: break;
+ case AR_inaccessible: continue;
+ case AR_dependent:
+ llvm_unreachable("access check dependent for unprivileged context");
+ case AR_delayed:
+ llvm_unreachable("access check delayed in non-declaration");
+ }
+
+ Contained = true;
+ break;
+ }
+ if (!Contained) {
+ Diag(SubLoc, DiagID);
+ if (NoteID.getDiagID() != 0)
+ Diag(SuperLoc, NoteID);
+ return true;
+ }
+ }
+ // We've run half the gauntlet.
+ return CheckParamExceptionSpec(NoteID, Superset, SuperLoc, Subset, SubLoc);
+}
+
+static bool CheckSpecForTypesEquivalent(Sema &S,
+ const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
+ QualType Target, SourceLocation TargetLoc,
+ QualType Source, SourceLocation SourceLoc)
+{
+ const FunctionProtoType *TFunc = GetUnderlyingFunction(Target);
+ if (!TFunc)
+ return false;
+ const FunctionProtoType *SFunc = GetUnderlyingFunction(Source);
+ if (!SFunc)
+ return false;
+
+ return S.CheckEquivalentExceptionSpec(DiagID, NoteID, TFunc, TargetLoc,
+ SFunc, SourceLoc);
+}
+
+/// CheckParamExceptionSpec - Check if the parameter and return types of the
+/// two functions have equivalent exception specs. This is part of the
+/// assignment and override compatibility check. We do not check the parameters
+/// of parameter function pointers recursively, as no sane programmer would
+/// even be able to write such a function type.
+bool Sema::CheckParamExceptionSpec(const PartialDiagnostic &NoteID,
+ const FunctionProtoType *Target,
+ SourceLocation TargetLoc,
+ const FunctionProtoType *Source,
+ SourceLocation SourceLoc) {
+ if (CheckSpecForTypesEquivalent(
+ *this, PDiag(diag::err_deep_exception_specs_differ) << 0, PDiag(),
+ Target->getReturnType(), TargetLoc, Source->getReturnType(),
+ SourceLoc))
+ return true;
+
+ // We shouldn't even be testing this unless the arguments are otherwise
+ // compatible.
+ assert(Target->getNumParams() == Source->getNumParams() &&
+ "Functions have different argument counts.");
+ for (unsigned i = 0, E = Target->getNumParams(); i != E; ++i) {
+ if (CheckSpecForTypesEquivalent(
+ *this, PDiag(diag::err_deep_exception_specs_differ) << 1, PDiag(),
+ Target->getParamType(i), TargetLoc, Source->getParamType(i),
+ SourceLoc))
+ return true;
+ }
+ return false;
+}
+
+bool Sema::CheckExceptionSpecCompatibility(Expr *From, QualType ToType) {
+ // First we check for applicability.
+ // Target type must be a function, function pointer or function reference.
+ const FunctionProtoType *ToFunc = GetUnderlyingFunction(ToType);
+ if (!ToFunc || ToFunc->hasDependentExceptionSpec())
+ return false;
+
+ // SourceType must be a function or function pointer.
+ const FunctionProtoType *FromFunc = GetUnderlyingFunction(From->getType());
+ if (!FromFunc || FromFunc->hasDependentExceptionSpec())
+ return false;
+
+ // Now we've got the correct types on both sides, check their compatibility.
+ // This means that the source of the conversion can only throw a subset of
+ // the exceptions of the target, and any exception specs on arguments or
+ // return types must be equivalent.
+ //
+ // FIXME: If there is a nested dependent exception specification, we should
+ // not be checking it here. This is fine:
+ // template<typename T> void f() {
+ // void (*p)(void (*) throw(T));
+ // void (*q)(void (*) throw(int)) = p;
+ // }
+ // ... because it might be instantiated with T=int.
+ return CheckExceptionSpecSubset(PDiag(diag::err_incompatible_exception_specs),
+ PDiag(), ToFunc,
+ From->getSourceRange().getBegin(),
+ FromFunc, SourceLocation());
+}
+
+bool Sema::CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
+ const CXXMethodDecl *Old) {
+ // If the new exception specification hasn't been parsed yet, skip the check.
+ // We'll get called again once it's been parsed.
+ if (New->getType()->castAs<FunctionProtoType>()->getExceptionSpecType() ==
+ EST_Unparsed)
+ return false;
+ if (getLangOpts().CPlusPlus11 && isa<CXXDestructorDecl>(New)) {
+ // Don't check uninstantiated template destructors at all. We can only
+ // synthesize correct specs after the template is instantiated.
+ if (New->getParent()->isDependentType())
+ return false;
+ if (New->getParent()->isBeingDefined()) {
+ // The destructor might be updated once the definition is finished. So
+ // remember it and check later.
+ DelayedExceptionSpecChecks.push_back(std::make_pair(New, Old));
+ return false;
+ }
+ }
+ // If the old exception specification hasn't been parsed yet, remember that
+ // we need to perform this check when we get to the end of the outermost
+ // lexically-surrounding class.
+ if (Old->getType()->castAs<FunctionProtoType>()->getExceptionSpecType() ==
+ EST_Unparsed) {
+ DelayedExceptionSpecChecks.push_back(std::make_pair(New, Old));
+ return false;
+ }
+ unsigned DiagID = diag::err_override_exception_spec;
+ if (getLangOpts().MicrosoftExt)
+ DiagID = diag::ext_override_exception_spec;
+ return CheckExceptionSpecSubset(PDiag(DiagID),
+ PDiag(diag::note_overridden_virtual_function),
+ Old->getType()->getAs<FunctionProtoType>(),
+ Old->getLocation(),
+ New->getType()->getAs<FunctionProtoType>(),
+ New->getLocation());
+}
+
+static CanThrowResult canSubExprsThrow(Sema &S, const Expr *E) {
+ CanThrowResult R = CT_Cannot;
+ for (const Stmt *SubStmt : E->children()) {
+ R = mergeCanThrow(R, S.canThrow(cast<Expr>(SubStmt)));
+ if (R == CT_Can)
+ break;
+ }
+ return R;
+}
+
+static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D) {
+ assert(D && "Expected decl");
+
+ // See if we can get a function type from the decl somehow.
+ const ValueDecl *VD = dyn_cast<ValueDecl>(D);
+ if (!VD) // If we have no clue what we're calling, assume the worst.
+ return CT_Can;
+
+ // As an extension, we assume that __attribute__((nothrow)) functions don't
+ // throw.
+ if (isa<FunctionDecl>(D) && D->hasAttr<NoThrowAttr>())
+ return CT_Cannot;
+
+ QualType T = VD->getType();
+ const FunctionProtoType *FT;
+ if ((FT = T->getAs<FunctionProtoType>())) {
+ } else if (const PointerType *PT = T->getAs<PointerType>())
+ FT = PT->getPointeeType()->getAs<FunctionProtoType>();
+ else if (const ReferenceType *RT = T->getAs<ReferenceType>())
+ FT = RT->getPointeeType()->getAs<FunctionProtoType>();
+ else if (const MemberPointerType *MT = T->getAs<MemberPointerType>())
+ FT = MT->getPointeeType()->getAs<FunctionProtoType>();
+ else if (const BlockPointerType *BT = T->getAs<BlockPointerType>())
+ FT = BT->getPointeeType()->getAs<FunctionProtoType>();
+
+ if (!FT)
+ return CT_Can;
+
+ FT = S.ResolveExceptionSpec(E->getLocStart(), FT);
+ if (!FT)
+ return CT_Can;
+
+ return FT->isNothrow(S.Context) ? CT_Cannot : CT_Can;
+}
+
+static CanThrowResult canDynamicCastThrow(const CXXDynamicCastExpr *DC) {
+ if (DC->isTypeDependent())
+ return CT_Dependent;
+
+ if (!DC->getTypeAsWritten()->isReferenceType())
+ return CT_Cannot;
+
+ if (DC->getSubExpr()->isTypeDependent())
+ return CT_Dependent;
+
+ return DC->getCastKind() == clang::CK_Dynamic? CT_Can : CT_Cannot;
+}
+
+static CanThrowResult canTypeidThrow(Sema &S, const CXXTypeidExpr *DC) {
+ if (DC->isTypeOperand())
+ return CT_Cannot;
+
+ Expr *Op = DC->getExprOperand();
+ if (Op->isTypeDependent())
+ return CT_Dependent;
+
+ const RecordType *RT = Op->getType()->getAs<RecordType>();
+ if (!RT)
+ return CT_Cannot;
+
+ if (!cast<CXXRecordDecl>(RT->getDecl())->isPolymorphic())
+ return CT_Cannot;
+
+ if (Op->Classify(S.Context).isPRValue())
+ return CT_Cannot;
+
+ return CT_Can;
+}
+
+CanThrowResult Sema::canThrow(const Expr *E) {
+ // C++ [expr.unary.noexcept]p3:
+ // [Can throw] if in a potentially-evaluated context the expression would
+ // contain:
+ switch (E->getStmtClass()) {
+ case Expr::CXXThrowExprClass:
+ // - a potentially evaluated throw-expression
+ return CT_Can;
+
+ case Expr::CXXDynamicCastExprClass: {
+ // - a potentially evaluated dynamic_cast expression dynamic_cast<T>(v),
+ // where T is a reference type, that requires a run-time check
+ CanThrowResult CT = canDynamicCastThrow(cast<CXXDynamicCastExpr>(E));
+ if (CT == CT_Can)
+ return CT;
+ return mergeCanThrow(CT, canSubExprsThrow(*this, E));
+ }
+
+ case Expr::CXXTypeidExprClass:
+ // - a potentially evaluated typeid expression applied to a glvalue
+ // expression whose type is a polymorphic class type
+ return canTypeidThrow(*this, cast<CXXTypeidExpr>(E));
+
+ // - a potentially evaluated call to a function, member function, function
+ // pointer, or member function pointer that does not have a non-throwing
+ // exception-specification
+ case Expr::CallExprClass:
+ case Expr::CXXMemberCallExprClass:
+ case Expr::CXXOperatorCallExprClass:
+ case Expr::UserDefinedLiteralClass: {
+ const CallExpr *CE = cast<CallExpr>(E);
+ CanThrowResult CT;
+ if (E->isTypeDependent())
+ CT = CT_Dependent;
+ else if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens()))
+ CT = CT_Cannot;
+ else if (CE->getCalleeDecl())
+ CT = canCalleeThrow(*this, E, CE->getCalleeDecl());
+ else
+ CT = CT_Can;
+ if (CT == CT_Can)
+ return CT;
+ return mergeCanThrow(CT, canSubExprsThrow(*this, E));
+ }
+
+ case Expr::CXXConstructExprClass:
+ case Expr::CXXTemporaryObjectExprClass: {
+ CanThrowResult CT = canCalleeThrow(*this, E,
+ cast<CXXConstructExpr>(E)->getConstructor());
+ if (CT == CT_Can)
+ return CT;
+ return mergeCanThrow(CT, canSubExprsThrow(*this, E));
+ }
+
+ case Expr::LambdaExprClass: {
+ const LambdaExpr *Lambda = cast<LambdaExpr>(E);
+ CanThrowResult CT = CT_Cannot;
+ for (LambdaExpr::const_capture_init_iterator
+ Cap = Lambda->capture_init_begin(),
+ CapEnd = Lambda->capture_init_end();
+ Cap != CapEnd; ++Cap)
+ CT = mergeCanThrow(CT, canThrow(*Cap));
+ return CT;
+ }
+
+ case Expr::CXXNewExprClass: {
+ CanThrowResult CT;
+ if (E->isTypeDependent())
+ CT = CT_Dependent;
+ else
+ CT = canCalleeThrow(*this, E, cast<CXXNewExpr>(E)->getOperatorNew());
+ if (CT == CT_Can)
+ return CT;
+ return mergeCanThrow(CT, canSubExprsThrow(*this, E));
+ }
+
+ case Expr::CXXDeleteExprClass: {
+ CanThrowResult CT;
+ QualType DTy = cast<CXXDeleteExpr>(E)->getDestroyedType();
+ if (DTy.isNull() || DTy->isDependentType()) {
+ CT = CT_Dependent;
+ } else {
+ CT = canCalleeThrow(*this, E,
+ cast<CXXDeleteExpr>(E)->getOperatorDelete());
+ if (const RecordType *RT = DTy->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ const CXXDestructorDecl *DD = RD->getDestructor();
+ if (DD)
+ CT = mergeCanThrow(CT, canCalleeThrow(*this, E, DD));
+ }
+ if (CT == CT_Can)
+ return CT;
+ }
+ return mergeCanThrow(CT, canSubExprsThrow(*this, E));
+ }
+
+ case Expr::CXXBindTemporaryExprClass: {
+ // The bound temporary has to be destroyed again, which might throw.
+ CanThrowResult CT = canCalleeThrow(*this, E,
+ cast<CXXBindTemporaryExpr>(E)->getTemporary()->getDestructor());
+ if (CT == CT_Can)
+ return CT;
+ return mergeCanThrow(CT, canSubExprsThrow(*this, E));
+ }
+
+ // ObjC message sends are like function calls, but never have exception
+ // specs.
+ case Expr::ObjCMessageExprClass:
+ case Expr::ObjCPropertyRefExprClass:
+ case Expr::ObjCSubscriptRefExprClass:
+ return CT_Can;
+
+ // All the ObjC literals that are implemented as calls are
+ // potentially throwing unless we decide to close off that
+ // possibility.
+ case Expr::ObjCArrayLiteralClass:
+ case Expr::ObjCDictionaryLiteralClass:
+ case Expr::ObjCBoxedExprClass:
+ return CT_Can;
+
+ // Many other things have subexpressions, so we have to test those.
+ // Some are simple:
+ case Expr::CoawaitExprClass:
+ case Expr::ConditionalOperatorClass:
+ case Expr::CompoundLiteralExprClass:
+ case Expr::CoyieldExprClass:
+ case Expr::CXXConstCastExprClass:
+ case Expr::CXXReinterpretCastExprClass:
+ case Expr::CXXStdInitializerListExprClass:
+ case Expr::DesignatedInitExprClass:
+ case Expr::DesignatedInitUpdateExprClass:
+ case Expr::ExprWithCleanupsClass:
+ case Expr::ExtVectorElementExprClass:
+ case Expr::InitListExprClass:
+ case Expr::MemberExprClass:
+ case Expr::ObjCIsaExprClass:
+ case Expr::ObjCIvarRefExprClass:
+ case Expr::ParenExprClass:
+ case Expr::ParenListExprClass:
+ case Expr::ShuffleVectorExprClass:
+ case Expr::ConvertVectorExprClass:
+ case Expr::VAArgExprClass:
+ return canSubExprsThrow(*this, E);
+
+ // Some might be dependent for other reasons.
+ case Expr::ArraySubscriptExprClass:
+ case Expr::OMPArraySectionExprClass:
+ case Expr::BinaryOperatorClass:
+ case Expr::CompoundAssignOperatorClass:
+ case Expr::CStyleCastExprClass:
+ case Expr::CXXStaticCastExprClass:
+ case Expr::CXXFunctionalCastExprClass:
+ case Expr::ImplicitCastExprClass:
+ case Expr::MaterializeTemporaryExprClass:
+ case Expr::UnaryOperatorClass: {
+ CanThrowResult CT = E->isTypeDependent() ? CT_Dependent : CT_Cannot;
+ return mergeCanThrow(CT, canSubExprsThrow(*this, E));
+ }
+
+ // FIXME: We should handle StmtExpr, but that opens a MASSIVE can of worms.
+ case Expr::StmtExprClass:
+ return CT_Can;
+
+ case Expr::CXXDefaultArgExprClass:
+ return canThrow(cast<CXXDefaultArgExpr>(E)->getExpr());
+
+ case Expr::CXXDefaultInitExprClass:
+ return canThrow(cast<CXXDefaultInitExpr>(E)->getExpr());
+
+ case Expr::ChooseExprClass:
+ if (E->isTypeDependent() || E->isValueDependent())
+ return CT_Dependent;
+ return canThrow(cast<ChooseExpr>(E)->getChosenSubExpr());
+
+ case Expr::GenericSelectionExprClass:
+ if (cast<GenericSelectionExpr>(E)->isResultDependent())
+ return CT_Dependent;
+ return canThrow(cast<GenericSelectionExpr>(E)->getResultExpr());
+
+ // Some expressions are always dependent.
+ case Expr::CXXDependentScopeMemberExprClass:
+ case Expr::CXXUnresolvedConstructExprClass:
+ case Expr::DependentScopeDeclRefExprClass:
+ case Expr::CXXFoldExprClass:
+ return CT_Dependent;
+
+ case Expr::AsTypeExprClass:
+ case Expr::BinaryConditionalOperatorClass:
+ case Expr::BlockExprClass:
+ case Expr::CUDAKernelCallExprClass:
+ case Expr::DeclRefExprClass:
+ case Expr::ObjCBridgedCastExprClass:
+ case Expr::ObjCIndirectCopyRestoreExprClass:
+ case Expr::ObjCProtocolExprClass:
+ case Expr::ObjCSelectorExprClass:
+ case Expr::OffsetOfExprClass:
+ case Expr::PackExpansionExprClass:
+ case Expr::PseudoObjectExprClass:
+ case Expr::SubstNonTypeTemplateParmExprClass:
+ case Expr::SubstNonTypeTemplateParmPackExprClass:
+ case Expr::FunctionParmPackExprClass:
+ case Expr::UnaryExprOrTypeTraitExprClass:
+ case Expr::UnresolvedLookupExprClass:
+ case Expr::UnresolvedMemberExprClass:
+ case Expr::TypoExprClass:
+ // FIXME: Can any of the above throw? If so, when?
+ return CT_Cannot;
+
+ case Expr::AddrLabelExprClass:
+ case Expr::ArrayTypeTraitExprClass:
+ case Expr::AtomicExprClass:
+ case Expr::TypeTraitExprClass:
+ case Expr::CXXBoolLiteralExprClass:
+ case Expr::CXXNoexceptExprClass:
+ case Expr::CXXNullPtrLiteralExprClass:
+ case Expr::CXXPseudoDestructorExprClass:
+ case Expr::CXXScalarValueInitExprClass:
+ case Expr::CXXThisExprClass:
+ case Expr::CXXUuidofExprClass:
+ case Expr::CharacterLiteralClass:
+ case Expr::ExpressionTraitExprClass:
+ case Expr::FloatingLiteralClass:
+ case Expr::GNUNullExprClass:
+ case Expr::ImaginaryLiteralClass:
+ case Expr::ImplicitValueInitExprClass:
+ case Expr::IntegerLiteralClass:
+ case Expr::NoInitExprClass:
+ case Expr::ObjCEncodeExprClass:
+ case Expr::ObjCStringLiteralClass:
+ case Expr::ObjCBoolLiteralExprClass:
+ case Expr::OpaqueValueExprClass:
+ case Expr::PredefinedExprClass:
+ case Expr::SizeOfPackExprClass:
+ case Expr::StringLiteralClass:
+ // These expressions can never throw.
+ return CT_Cannot;
+
+ case Expr::MSPropertyRefExprClass:
+ case Expr::MSPropertySubscriptExprClass:
+ llvm_unreachable("Invalid class for expression");
+
+#define STMT(CLASS, PARENT) case Expr::CLASS##Class:
+#define STMT_RANGE(Base, First, Last)
+#define LAST_STMT_RANGE(BASE, FIRST, LAST)
+#define EXPR(CLASS, PARENT)
+#define ABSTRACT_STMT(STMT)
+#include "clang/AST/StmtNodes.inc"
+ case Expr::NoStmtClass:
+ llvm_unreachable("Invalid class for expression");
+ }
+ llvm_unreachable("Bogus StmtClass");
+}
+
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp
new file mode 100644
index 0000000..5d0c605
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp
@@ -0,0 +1,14594 @@
+//===--- SemaExpr.cpp - Semantic Analysis for Expressions -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "TreeTransform.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTLambda.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ExprOpenMP.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/LiteralSupport.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/AnalysisBasedWarnings.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/DelayedDiagnostic.h"
+#include "clang/Sema/Designator.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaFixItUtils.h"
+#include "clang/Sema/Template.h"
+#include "llvm/Support/ConvertUTF.h"
+using namespace clang;
+using namespace sema;
+
+/// \brief Determine whether the use of this declaration is valid, without
+/// emitting diagnostics.
+bool Sema::CanUseDecl(NamedDecl *D) {
+ // See if this is an auto-typed variable whose initializer we are parsing.
+ if (ParsingInitForAutoVars.count(D))
+ return false;
+
+ // See if this is a deleted function.
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->isDeleted())
+ return false;
+
+ // If the function has a deduced return type, and we can't deduce it,
+ // then we can't use it either.
+ if (getLangOpts().CPlusPlus14 && FD->getReturnType()->isUndeducedType() &&
+ DeduceReturnType(FD, SourceLocation(), /*Diagnose*/ false))
+ return false;
+ }
+
+ // See if this function is unavailable.
+ if (D->getAvailability() == AR_Unavailable &&
+ cast<Decl>(CurContext)->getAvailability() != AR_Unavailable)
+ return false;
+
+ return true;
+}
+
+static void DiagnoseUnusedOfDecl(Sema &S, NamedDecl *D, SourceLocation Loc) {
+ // Warn if this is used but marked unused.
+ if (D->hasAttr<UnusedAttr>()) {
+ const Decl *DC = cast_or_null<Decl>(S.getCurObjCLexicalContext());
+ if (DC && !DC->hasAttr<UnusedAttr>())
+ S.Diag(Loc, diag::warn_used_but_marked_unused) << D->getDeclName();
+ }
+}
+
+static bool HasRedeclarationWithoutAvailabilityInCategory(const Decl *D) {
+ const auto *OMD = dyn_cast<ObjCMethodDecl>(D);
+ if (!OMD)
+ return false;
+ const ObjCInterfaceDecl *OID = OMD->getClassInterface();
+ if (!OID)
+ return false;
+
+ for (const ObjCCategoryDecl *Cat : OID->visible_categories())
+ if (ObjCMethodDecl *CatMeth =
+ Cat->getMethod(OMD->getSelector(), OMD->isInstanceMethod()))
+ if (!CatMeth->hasAttr<AvailabilityAttr>())
+ return true;
+ return false;
+}
+
+static AvailabilityResult
+DiagnoseAvailabilityOfDecl(Sema &S, NamedDecl *D, SourceLocation Loc,
+ const ObjCInterfaceDecl *UnknownObjCClass,
+ bool ObjCPropertyAccess) {
+ // See if this declaration is unavailable or deprecated.
+ std::string Message;
+ AvailabilityResult Result = D->getAvailability(&Message);
+
+ // For typedefs, if the typedef declaration appears available look
+ // to the underlying type to see if it is more restrictive.
+ while (const TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
+ if (Result == AR_Available) {
+ if (const TagType *TT = TD->getUnderlyingType()->getAs<TagType>()) {
+ D = TT->getDecl();
+ Result = D->getAvailability(&Message);
+ continue;
+ }
+ }
+ break;
+ }
+
+ // Forward class declarations get their attributes from their definition.
+ if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(D)) {
+ if (IDecl->getDefinition()) {
+ D = IDecl->getDefinition();
+ Result = D->getAvailability(&Message);
+ }
+ }
+
+ if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(D))
+ if (Result == AR_Available) {
+ const DeclContext *DC = ECD->getDeclContext();
+ if (const EnumDecl *TheEnumDecl = dyn_cast<EnumDecl>(DC))
+ Result = TheEnumDecl->getAvailability(&Message);
+ }
+
+ const ObjCPropertyDecl *ObjCPDecl = nullptr;
+ if (Result == AR_Deprecated || Result == AR_Unavailable ||
+ AR_NotYetIntroduced) {
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ if (const ObjCPropertyDecl *PD = MD->findPropertyDecl()) {
+ AvailabilityResult PDeclResult = PD->getAvailability(nullptr);
+ if (PDeclResult == Result)
+ ObjCPDecl = PD;
+ }
+ }
+ }
+
+ switch (Result) {
+ case AR_Available:
+ break;
+
+ case AR_Deprecated:
+ if (S.getCurContextAvailability() != AR_Deprecated)
+ S.EmitAvailabilityWarning(Sema::AD_Deprecation,
+ D, Message, Loc, UnknownObjCClass, ObjCPDecl,
+ ObjCPropertyAccess);
+ break;
+
+ case AR_NotYetIntroduced: {
+ // Don't do this for enums, they can't be redeclared.
+ if (isa<EnumConstantDecl>(D) || isa<EnumDecl>(D))
+ break;
+
+ bool Warn = !D->getAttr<AvailabilityAttr>()->isInherited();
+ // Objective-C method declarations in categories are not modelled as
+ // redeclarations, so manually look for a redeclaration in a category
+ // if necessary.
+ if (Warn && HasRedeclarationWithoutAvailabilityInCategory(D))
+ Warn = false;
+ // In general, D will point to the most recent redeclaration. However,
+ // for `@class A;` decls, this isn't true -- manually go through the
+ // redecl chain in that case.
+ if (Warn && isa<ObjCInterfaceDecl>(D))
+ for (Decl *Redecl = D->getMostRecentDecl(); Redecl && Warn;
+ Redecl = Redecl->getPreviousDecl())
+ if (!Redecl->hasAttr<AvailabilityAttr>() ||
+ Redecl->getAttr<AvailabilityAttr>()->isInherited())
+ Warn = false;
+
+ if (Warn)
+ S.EmitAvailabilityWarning(Sema::AD_Partial, D, Message, Loc,
+ UnknownObjCClass, ObjCPDecl,
+ ObjCPropertyAccess);
+ break;
+ }
+
+ case AR_Unavailable:
+ if (S.getCurContextAvailability() != AR_Unavailable)
+ S.EmitAvailabilityWarning(Sema::AD_Unavailable,
+ D, Message, Loc, UnknownObjCClass, ObjCPDecl,
+ ObjCPropertyAccess);
+ break;
+
+ }
+ return Result;
+}
+
+/// \brief Emit a note explaining that this function is deleted.
+void Sema::NoteDeletedFunction(FunctionDecl *Decl) {
+ assert(Decl->isDeleted());
+
+ CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Decl);
+
+ if (Method && Method->isDeleted() && Method->isDefaulted()) {
+ // If the method was explicitly defaulted, point at that declaration.
+ if (!Method->isImplicit())
+ Diag(Decl->getLocation(), diag::note_implicitly_deleted);
+
+ // Try to diagnose why this special member function was implicitly
+ // deleted. This might fail, if that reason no longer applies.
+ CXXSpecialMember CSM = getSpecialMember(Method);
+ if (CSM != CXXInvalid)
+ ShouldDeleteSpecialMember(Method, CSM, /*Diagnose=*/true);
+
+ return;
+ }
+
+ if (CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(Decl)) {
+ if (CXXConstructorDecl *BaseCD =
+ const_cast<CXXConstructorDecl*>(CD->getInheritedConstructor())) {
+ Diag(Decl->getLocation(), diag::note_inherited_deleted_here);
+ if (BaseCD->isDeleted()) {
+ NoteDeletedFunction(BaseCD);
+ } else {
+ // FIXME: An explanation of why exactly it can't be inherited
+ // would be nice.
+ Diag(BaseCD->getLocation(), diag::note_cannot_inherit);
+ }
+ return;
+ }
+ }
+
+ Diag(Decl->getLocation(), diag::note_availability_specified_here)
+ << Decl << true;
+}
+
+/// \brief Determine whether a FunctionDecl was ever declared with an
+/// explicit storage class.
+static bool hasAnyExplicitStorageClass(const FunctionDecl *D) {
+ for (auto I : D->redecls()) {
+ if (I->getStorageClass() != SC_None)
+ return true;
+ }
+ return false;
+}
+
+/// \brief Check whether we're in an extern inline function and referring to a
+/// variable or function with internal linkage (C11 6.7.4p3).
+///
+/// This is only a warning because we used to silently accept this code, but
+/// in many cases it will not behave correctly. This is not enabled in C++ mode
+/// because the restriction language is a bit weaker (C++11 [basic.def.odr]p6)
+/// and so while there may still be user mistakes, most of the time we can't
+/// prove that there are errors.
+static void diagnoseUseOfInternalDeclInInlineFunction(Sema &S,
+ const NamedDecl *D,
+ SourceLocation Loc) {
+ // This is disabled under C++; there are too many ways for this to fire in
+ // contexts where the warning is a false positive, or where it is technically
+ // correct but benign.
+ if (S.getLangOpts().CPlusPlus)
+ return;
+
+ // Check if this is an inlined function or method.
+ FunctionDecl *Current = S.getCurFunctionDecl();
+ if (!Current)
+ return;
+ if (!Current->isInlined())
+ return;
+ if (!Current->isExternallyVisible())
+ return;
+
+ // Check if the decl has internal linkage.
+ if (D->getFormalLinkage() != InternalLinkage)
+ return;
+
+ // Downgrade from ExtWarn to Extension if
+ // (1) the supposedly external inline function is in the main file,
+ // and probably won't be included anywhere else.
+ // (2) the thing we're referencing is a pure function.
+ // (3) the thing we're referencing is another inline function.
+ // This last can give us false negatives, but it's better than warning on
+ // wrappers for simple C library functions.
+ const FunctionDecl *UsedFn = dyn_cast<FunctionDecl>(D);
+ bool DowngradeWarning = S.getSourceManager().isInMainFile(Loc);
+ if (!DowngradeWarning && UsedFn)
+ DowngradeWarning = UsedFn->isInlined() || UsedFn->hasAttr<ConstAttr>();
+
+ S.Diag(Loc, DowngradeWarning ? diag::ext_internal_in_extern_inline_quiet
+ : diag::ext_internal_in_extern_inline)
+ << /*IsVar=*/!UsedFn << D;
+
+ S.MaybeSuggestAddingStaticToDecl(Current);
+
+ S.Diag(D->getCanonicalDecl()->getLocation(), diag::note_entity_declared_at)
+ << D;
+}
+
+void Sema::MaybeSuggestAddingStaticToDecl(const FunctionDecl *Cur) {
+ const FunctionDecl *First = Cur->getFirstDecl();
+
+ // Suggest "static" on the function, if possible.
+ if (!hasAnyExplicitStorageClass(First)) {
+ SourceLocation DeclBegin = First->getSourceRange().getBegin();
+ Diag(DeclBegin, diag::note_convert_inline_to_static)
+ << Cur << FixItHint::CreateInsertion(DeclBegin, "static ");
+ }
+}
+
+/// \brief Determine whether the use of this declaration is valid, and
+/// emit any corresponding diagnostics.
+///
+/// This routine diagnoses various problems with referencing
+/// declarations that can occur when using a declaration. For example,
+/// it might warn if a deprecated or unavailable declaration is being
+/// used, or produce an error (and return true) if a C++0x deleted
+/// function is being used.
+///
+/// \returns true if there was an error (this declaration cannot be
+/// referenced), false otherwise.
+///
+bool Sema::DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
+ const ObjCInterfaceDecl *UnknownObjCClass,
+ bool ObjCPropertyAccess) {
+ if (getLangOpts().CPlusPlus && isa<FunctionDecl>(D)) {
+ // If there were any diagnostics suppressed by template argument deduction,
+ // emit them now.
+ auto Pos = SuppressedDiagnostics.find(D->getCanonicalDecl());
+ if (Pos != SuppressedDiagnostics.end()) {
+ for (const PartialDiagnosticAt &Suppressed : Pos->second)
+ Diag(Suppressed.first, Suppressed.second);
+
+ // Clear out the list of suppressed diagnostics, so that we don't emit
+ // them again for this specialization. However, we don't obsolete this
+ // entry from the table, because we want to avoid ever emitting these
+ // diagnostics again.
+ Pos->second.clear();
+ }
+
+ // C++ [basic.start.main]p3:
+ // The function 'main' shall not be used within a program.
+ if (cast<FunctionDecl>(D)->isMain())
+ Diag(Loc, diag::ext_main_used);
+ }
+
+ // See if this is an auto-typed variable whose initializer we are parsing.
+ if (ParsingInitForAutoVars.count(D)) {
+ const AutoType *AT = cast<VarDecl>(D)->getType()->getContainedAutoType();
+
+ Diag(Loc, diag::err_auto_variable_cannot_appear_in_own_initializer)
+ << D->getDeclName() << (unsigned)AT->getKeyword();
+ return true;
+ }
+
+ // See if this is a deleted function.
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->isDeleted()) {
+ Diag(Loc, diag::err_deleted_function_use);
+ NoteDeletedFunction(FD);
+ return true;
+ }
+
+ // If the function has a deduced return type, and we can't deduce it,
+ // then we can't use it either.
+ if (getLangOpts().CPlusPlus14 && FD->getReturnType()->isUndeducedType() &&
+ DeduceReturnType(FD, Loc))
+ return true;
+ }
+ DiagnoseAvailabilityOfDecl(*this, D, Loc, UnknownObjCClass,
+ ObjCPropertyAccess);
+
+ DiagnoseUnusedOfDecl(*this, D, Loc);
+
+ diagnoseUseOfInternalDeclInInlineFunction(*this, D, Loc);
+
+ return false;
+}
+
+/// \brief Retrieve the message suffix that should be added to a
+/// diagnostic complaining about the given function being deleted or
+/// unavailable.
+std::string Sema::getDeletedOrUnavailableSuffix(const FunctionDecl *FD) {
+ std::string Message;
+ if (FD->getAvailability(&Message))
+ return ": " + Message;
+
+ return std::string();
+}
+
+/// DiagnoseSentinelCalls - This routine checks whether a call or
+/// message-send is to a declaration with the sentinel attribute, and
+/// if so, it checks that the requirements of the sentinel are
+/// satisfied.
+void Sema::DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
+ ArrayRef<Expr *> Args) {
+ const SentinelAttr *attr = D->getAttr<SentinelAttr>();
+ if (!attr)
+ return;
+
+ // The number of formal parameters of the declaration.
+ unsigned numFormalParams;
+
+ // The kind of declaration. This is also an index into a %select in
+ // the diagnostic.
+ enum CalleeType { CT_Function, CT_Method, CT_Block } calleeType;
+
+ if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ numFormalParams = MD->param_size();
+ calleeType = CT_Method;
+ } else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ numFormalParams = FD->param_size();
+ calleeType = CT_Function;
+ } else if (isa<VarDecl>(D)) {
+ QualType type = cast<ValueDecl>(D)->getType();
+ const FunctionType *fn = nullptr;
+ if (const PointerType *ptr = type->getAs<PointerType>()) {
+ fn = ptr->getPointeeType()->getAs<FunctionType>();
+ if (!fn) return;
+ calleeType = CT_Function;
+ } else if (const BlockPointerType *ptr = type->getAs<BlockPointerType>()) {
+ fn = ptr->getPointeeType()->castAs<FunctionType>();
+ calleeType = CT_Block;
+ } else {
+ return;
+ }
+
+ if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fn)) {
+ numFormalParams = proto->getNumParams();
+ } else {
+ numFormalParams = 0;
+ }
+ } else {
+ return;
+ }
+
+ // "nullPos" is the number of formal parameters at the end which
+ // effectively count as part of the variadic arguments. This is
+ // useful if you would prefer to not have *any* formal parameters,
+ // but the language forces you to have at least one.
+ unsigned nullPos = attr->getNullPos();
+ assert((nullPos == 0 || nullPos == 1) && "invalid null position on sentinel");
+ numFormalParams = (nullPos > numFormalParams ? 0 : numFormalParams - nullPos);
+
+ // The number of arguments which should follow the sentinel.
+ unsigned numArgsAfterSentinel = attr->getSentinel();
+
+ // If there aren't enough arguments for all the formal parameters,
+ // the sentinel, and the args after the sentinel, complain.
+ if (Args.size() < numFormalParams + numArgsAfterSentinel + 1) {
+ Diag(Loc, diag::warn_not_enough_argument) << D->getDeclName();
+ Diag(D->getLocation(), diag::note_sentinel_here) << int(calleeType);
+ return;
+ }
+
+ // Otherwise, find the sentinel expression.
+ Expr *sentinelExpr = Args[Args.size() - numArgsAfterSentinel - 1];
+ if (!sentinelExpr) return;
+ if (sentinelExpr->isValueDependent()) return;
+ if (Context.isSentinelNullExpr(sentinelExpr)) return;
+
+ // Pick a reasonable string to insert. Optimistically use 'nil', 'nullptr',
+ // or 'NULL' if those are actually defined in the context. Only use
+ // 'nil' for ObjC methods, where it's much more likely that the
+ // variadic arguments form a list of object pointers.
+ SourceLocation MissingNilLoc
+ = getLocForEndOfToken(sentinelExpr->getLocEnd());
+ std::string NullValue;
+ if (calleeType == CT_Method && PP.isMacroDefined("nil"))
+ NullValue = "nil";
+ else if (getLangOpts().CPlusPlus11)
+ NullValue = "nullptr";
+ else if (PP.isMacroDefined("NULL"))
+ NullValue = "NULL";
+ else
+ NullValue = "(void*) 0";
+
+ if (MissingNilLoc.isInvalid())
+ Diag(Loc, diag::warn_missing_sentinel) << int(calleeType);
+ else
+ Diag(MissingNilLoc, diag::warn_missing_sentinel)
+ << int(calleeType)
+ << FixItHint::CreateInsertion(MissingNilLoc, ", " + NullValue);
+ Diag(D->getLocation(), diag::note_sentinel_here) << int(calleeType);
+}
+
+SourceRange Sema::getExprRange(Expr *E) const {
+ return E ? E->getSourceRange() : SourceRange();
+}
+
+//===----------------------------------------------------------------------===//
+// Standard Promotions and Conversions
+//===----------------------------------------------------------------------===//
+
+/// DefaultFunctionArrayConversion (C99 6.3.2.1p3, C99 6.3.2.1p4).
+ExprResult Sema::DefaultFunctionArrayConversion(Expr *E, bool Diagnose) {
+ // Handle any placeholder expressions which made it here.
+ if (E->getType()->isPlaceholderType()) {
+ ExprResult result = CheckPlaceholderExpr(E);
+ if (result.isInvalid()) return ExprError();
+ E = result.get();
+ }
+
+ QualType Ty = E->getType();
+ assert(!Ty.isNull() && "DefaultFunctionArrayConversion - missing type");
+
+ if (Ty->isFunctionType()) {
+ // If we are here, we are not calling a function but taking
+ // its address (which is not allowed in OpenCL v1.0 s6.8.a.3).
+ if (getLangOpts().OpenCL) {
+ if (Diagnose)
+ Diag(E->getExprLoc(), diag::err_opencl_taking_function_address);
+ return ExprError();
+ }
+
+ if (auto *DRE = dyn_cast<DeclRefExpr>(E->IgnoreParenCasts()))
+ if (auto *FD = dyn_cast<FunctionDecl>(DRE->getDecl()))
+ if (!checkAddressOfFunctionIsAvailable(FD, Diagnose, E->getExprLoc()))
+ return ExprError();
+
+ E = ImpCastExprToType(E, Context.getPointerType(Ty),
+ CK_FunctionToPointerDecay).get();
+ } else if (Ty->isArrayType()) {
+ // In C90 mode, arrays only promote to pointers if the array expression is
+ // an lvalue. The relevant legalese is C90 6.2.2.1p3: "an lvalue that has
+ // type 'array of type' is converted to an expression that has type 'pointer
+ // to type'...". In C99 this was changed to: C99 6.3.2.1p3: "an expression
+ // that has type 'array of type' ...". The relevant change is "an lvalue"
+ // (C90) to "an expression" (C99).
+ //
+ // C++ 4.2p1:
+ // An lvalue or rvalue of type "array of N T" or "array of unknown bound of
+ // T" can be converted to an rvalue of type "pointer to T".
+ //
+ if (getLangOpts().C99 || getLangOpts().CPlusPlus || E->isLValue())
+ E = ImpCastExprToType(E, Context.getArrayDecayedType(Ty),
+ CK_ArrayToPointerDecay).get();
+ }
+ return E;
+}
+
+static void CheckForNullPointerDereference(Sema &S, Expr *E) {
+ // Check to see if we are dereferencing a null pointer. If so,
+ // and if not volatile-qualified, this is undefined behavior that the
+ // optimizer will delete, so warn about it. People sometimes try to use this
+ // to get a deterministic trap and are surprised by clang's behavior. This
+ // only handles the pattern "*null", which is a very syntactic check.
+ if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E->IgnoreParenCasts()))
+ if (UO->getOpcode() == UO_Deref &&
+ UO->getSubExpr()->IgnoreParenCasts()->
+ isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull) &&
+ !UO->getType().isVolatileQualified()) {
+ S.DiagRuntimeBehavior(UO->getOperatorLoc(), UO,
+ S.PDiag(diag::warn_indirection_through_null)
+ << UO->getSubExpr()->getSourceRange());
+ S.DiagRuntimeBehavior(UO->getOperatorLoc(), UO,
+ S.PDiag(diag::note_indirection_through_null));
+ }
+}
+
+static void DiagnoseDirectIsaAccess(Sema &S, const ObjCIvarRefExpr *OIRE,
+ SourceLocation AssignLoc,
+ const Expr* RHS) {
+ const ObjCIvarDecl *IV = OIRE->getDecl();
+ if (!IV)
+ return;
+
+ DeclarationName MemberName = IV->getDeclName();
+ IdentifierInfo *Member = MemberName.getAsIdentifierInfo();
+ if (!Member || !Member->isStr("isa"))
+ return;
+
+ const Expr *Base = OIRE->getBase();
+ QualType BaseType = Base->getType();
+ if (OIRE->isArrow())
+ BaseType = BaseType->getPointeeType();
+ if (const ObjCObjectType *OTy = BaseType->getAs<ObjCObjectType>())
+ if (ObjCInterfaceDecl *IDecl = OTy->getInterface()) {
+ ObjCInterfaceDecl *ClassDeclared = nullptr;
+ ObjCIvarDecl *IV = IDecl->lookupInstanceVariable(Member, ClassDeclared);
+ if (!ClassDeclared->getSuperClass()
+ && (*ClassDeclared->ivar_begin()) == IV) {
+ if (RHS) {
+ NamedDecl *ObjectSetClass =
+ S.LookupSingleName(S.TUScope,
+ &S.Context.Idents.get("object_setClass"),
+ SourceLocation(), S.LookupOrdinaryName);
+ if (ObjectSetClass) {
+ SourceLocation RHSLocEnd = S.getLocForEndOfToken(RHS->getLocEnd());
+ S.Diag(OIRE->getExprLoc(), diag::warn_objc_isa_assign) <<
+ FixItHint::CreateInsertion(OIRE->getLocStart(), "object_setClass(") <<
+ FixItHint::CreateReplacement(SourceRange(OIRE->getOpLoc(),
+ AssignLoc), ",") <<
+ FixItHint::CreateInsertion(RHSLocEnd, ")");
+ }
+ else
+ S.Diag(OIRE->getLocation(), diag::warn_objc_isa_assign);
+ } else {
+ NamedDecl *ObjectGetClass =
+ S.LookupSingleName(S.TUScope,
+ &S.Context.Idents.get("object_getClass"),
+ SourceLocation(), S.LookupOrdinaryName);
+ if (ObjectGetClass)
+ S.Diag(OIRE->getExprLoc(), diag::warn_objc_isa_use) <<
+ FixItHint::CreateInsertion(OIRE->getLocStart(), "object_getClass(") <<
+ FixItHint::CreateReplacement(
+ SourceRange(OIRE->getOpLoc(),
+ OIRE->getLocEnd()), ")");
+ else
+ S.Diag(OIRE->getLocation(), diag::warn_objc_isa_use);
+ }
+ S.Diag(IV->getLocation(), diag::note_ivar_decl);
+ }
+ }
+}
+
+ExprResult Sema::DefaultLvalueConversion(Expr *E) {
+ // Handle any placeholder expressions which made it here.
+ if (E->getType()->isPlaceholderType()) {
+ ExprResult result = CheckPlaceholderExpr(E);
+ if (result.isInvalid()) return ExprError();
+ E = result.get();
+ }
+
+ // C++ [conv.lval]p1:
+ // A glvalue of a non-function, non-array type T can be
+ // converted to a prvalue.
+ if (!E->isGLValue()) return E;
+
+ QualType T = E->getType();
+ assert(!T.isNull() && "r-value conversion on typeless expression?");
+
+ // We don't want to throw lvalue-to-rvalue casts on top of
+ // expressions of certain types in C++.
+ if (getLangOpts().CPlusPlus &&
+ (E->getType() == Context.OverloadTy ||
+ T->isDependentType() ||
+ T->isRecordType()))
+ return E;
+
+ // The C standard is actually really unclear on this point, and
+ // DR106 tells us what the result should be but not why. It's
+ // generally best to say that void types just doesn't undergo
+ // lvalue-to-rvalue at all. Note that expressions of unqualified
+ // 'void' type are never l-values, but qualified void can be.
+ if (T->isVoidType())
+ return E;
+
+ // OpenCL usually rejects direct accesses to values of 'half' type.
+ if (getLangOpts().OpenCL && !getOpenCLOptions().cl_khr_fp16 &&
+ T->isHalfType()) {
+ Diag(E->getExprLoc(), diag::err_opencl_half_load_store)
+ << 0 << T;
+ return ExprError();
+ }
+
+ CheckForNullPointerDereference(*this, E);
+ if (const ObjCIsaExpr *OISA = dyn_cast<ObjCIsaExpr>(E->IgnoreParenCasts())) {
+ NamedDecl *ObjectGetClass = LookupSingleName(TUScope,
+ &Context.Idents.get("object_getClass"),
+ SourceLocation(), LookupOrdinaryName);
+ if (ObjectGetClass)
+ Diag(E->getExprLoc(), diag::warn_objc_isa_use) <<
+ FixItHint::CreateInsertion(OISA->getLocStart(), "object_getClass(") <<
+ FixItHint::CreateReplacement(
+ SourceRange(OISA->getOpLoc(), OISA->getIsaMemberLoc()), ")");
+ else
+ Diag(E->getExprLoc(), diag::warn_objc_isa_use);
+ }
+ else if (const ObjCIvarRefExpr *OIRE =
+ dyn_cast<ObjCIvarRefExpr>(E->IgnoreParenCasts()))
+ DiagnoseDirectIsaAccess(*this, OIRE, SourceLocation(), /* Expr*/nullptr);
+
+ // C++ [conv.lval]p1:
+ // [...] If T is a non-class type, the type of the prvalue is the
+ // cv-unqualified version of T. Otherwise, the type of the
+ // rvalue is T.
+ //
+ // C99 6.3.2.1p2:
+ // If the lvalue has qualified type, the value has the unqualified
+ // version of the type of the lvalue; otherwise, the value has the
+ // type of the lvalue.
+ if (T.hasQualifiers())
+ T = T.getUnqualifiedType();
+
+ // Under the MS ABI, lock down the inheritance model now.
+ if (T->isMemberPointerType() &&
+ Context.getTargetInfo().getCXXABI().isMicrosoft())
+ (void)isCompleteType(E->getExprLoc(), T);
+
+ UpdateMarkingForLValueToRValue(E);
+
+ // Loading a __weak object implicitly retains the value, so we need a cleanup to
+ // balance that.
+ if (getLangOpts().ObjCAutoRefCount &&
+ E->getType().getObjCLifetime() == Qualifiers::OCL_Weak)
+ ExprNeedsCleanups = true;
+
+ ExprResult Res = ImplicitCastExpr::Create(Context, T, CK_LValueToRValue, E,
+ nullptr, VK_RValue);
+
+ // C11 6.3.2.1p2:
+ // ... if the lvalue has atomic type, the value has the non-atomic version
+ // of the type of the lvalue ...
+ if (const AtomicType *Atomic = T->getAs<AtomicType>()) {
+ T = Atomic->getValueType().getUnqualifiedType();
+ Res = ImplicitCastExpr::Create(Context, T, CK_AtomicToNonAtomic, Res.get(),
+ nullptr, VK_RValue);
+ }
+
+ return Res;
+}
+
+ExprResult Sema::DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose) {
+ ExprResult Res = DefaultFunctionArrayConversion(E, Diagnose);
+ if (Res.isInvalid())
+ return ExprError();
+ Res = DefaultLvalueConversion(Res.get());
+ if (Res.isInvalid())
+ return ExprError();
+ return Res;
+}
+
+/// CallExprUnaryConversions - a special case of an unary conversion
+/// performed on a function designator of a call expression.
+ExprResult Sema::CallExprUnaryConversions(Expr *E) {
+ QualType Ty = E->getType();
+ ExprResult Res = E;
+ // Only do implicit cast for a function type, but not for a pointer
+ // to function type.
+ if (Ty->isFunctionType()) {
+ Res = ImpCastExprToType(E, Context.getPointerType(Ty),
+ CK_FunctionToPointerDecay).get();
+ if (Res.isInvalid())
+ return ExprError();
+ }
+ Res = DefaultLvalueConversion(Res.get());
+ if (Res.isInvalid())
+ return ExprError();
+ return Res.get();
+}
+
+/// UsualUnaryConversions - Performs various conversions that are common to most
+/// operators (C99 6.3). The conversions of array and function types are
+/// sometimes suppressed. For example, the array->pointer conversion doesn't
+/// apply if the array is an argument to the sizeof or address (&) operators.
+/// In these instances, this routine should *not* be called.
+ExprResult Sema::UsualUnaryConversions(Expr *E) {
+ // First, convert to an r-value.
+ ExprResult Res = DefaultFunctionArrayLvalueConversion(E);
+ if (Res.isInvalid())
+ return ExprError();
+ E = Res.get();
+
+ QualType Ty = E->getType();
+ assert(!Ty.isNull() && "UsualUnaryConversions - missing type");
+
+ // Half FP have to be promoted to float unless it is natively supported
+ if (Ty->isHalfType() && !getLangOpts().NativeHalfType)
+ return ImpCastExprToType(Res.get(), Context.FloatTy, CK_FloatingCast);
+
+ // Try to perform integral promotions if the object has a theoretically
+ // promotable type.
+ if (Ty->isIntegralOrUnscopedEnumerationType()) {
+ // C99 6.3.1.1p2:
+ //
+ // The following may be used in an expression wherever an int or
+ // unsigned int may be used:
+ // - an object or expression with an integer type whose integer
+ // conversion rank is less than or equal to the rank of int
+ // and unsigned int.
+ // - A bit-field of type _Bool, int, signed int, or unsigned int.
+ //
+ // If an int can represent all values of the original type, the
+ // value is converted to an int; otherwise, it is converted to an
+ // unsigned int. These are called the integer promotions. All
+ // other types are unchanged by the integer promotions.
+
+ QualType PTy = Context.isPromotableBitField(E);
+ if (!PTy.isNull()) {
+ E = ImpCastExprToType(E, PTy, CK_IntegralCast).get();
+ return E;
+ }
+ if (Ty->isPromotableIntegerType()) {
+ QualType PT = Context.getPromotedIntegerType(Ty);
+ E = ImpCastExprToType(E, PT, CK_IntegralCast).get();
+ return E;
+ }
+ }
+ return E;
+}
+
+/// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
+/// do not have a prototype. Arguments that have type float or __fp16
+/// are promoted to double. All other argument types are converted by
+/// UsualUnaryConversions().
+ExprResult Sema::DefaultArgumentPromotion(Expr *E) {
+ QualType Ty = E->getType();
+ assert(!Ty.isNull() && "DefaultArgumentPromotion - missing type");
+
+ ExprResult Res = UsualUnaryConversions(E);
+ if (Res.isInvalid())
+ return ExprError();
+ E = Res.get();
+
+ // If this is a 'float' or '__fp16' (CVR qualified or typedef) promote to
+ // double.
+ const BuiltinType *BTy = Ty->getAs<BuiltinType>();
+ if (BTy && (BTy->getKind() == BuiltinType::Half ||
+ BTy->getKind() == BuiltinType::Float))
+ E = ImpCastExprToType(E, Context.DoubleTy, CK_FloatingCast).get();
+
+ // C++ performs lvalue-to-rvalue conversion as a default argument
+ // promotion, even on class types, but note:
+ // C++11 [conv.lval]p2:
+ // When an lvalue-to-rvalue conversion occurs in an unevaluated
+ // operand or a subexpression thereof the value contained in the
+ // referenced object is not accessed. Otherwise, if the glvalue
+ // has a class type, the conversion copy-initializes a temporary
+ // of type T from the glvalue and the result of the conversion
+ // is a prvalue for the temporary.
+ // FIXME: add some way to gate this entire thing for correctness in
+ // potentially potentially evaluated contexts.
+ if (getLangOpts().CPlusPlus && E->isGLValue() && !isUnevaluatedContext()) {
+ ExprResult Temp = PerformCopyInitialization(
+ InitializedEntity::InitializeTemporary(E->getType()),
+ E->getExprLoc(), E);
+ if (Temp.isInvalid())
+ return ExprError();
+ E = Temp.get();
+ }
+
+ return E;
+}
+
+/// Determine the degree of POD-ness for an expression.
+/// Incomplete types are considered POD, since this check can be performed
+/// when we're in an unevaluated context.
+Sema::VarArgKind Sema::isValidVarArgType(const QualType &Ty) {
+ if (Ty->isIncompleteType()) {
+ // C++11 [expr.call]p7:
+ // After these conversions, if the argument does not have arithmetic,
+ // enumeration, pointer, pointer to member, or class type, the program
+ // is ill-formed.
+ //
+ // Since we've already performed array-to-pointer and function-to-pointer
+ // decay, the only such type in C++ is cv void. This also handles
+ // initializer lists as variadic arguments.
+ if (Ty->isVoidType())
+ return VAK_Invalid;
+
+ if (Ty->isObjCObjectType())
+ return VAK_Invalid;
+ return VAK_Valid;
+ }
+
+ if (Ty.isCXX98PODType(Context))
+ return VAK_Valid;
+
+ // C++11 [expr.call]p7:
+ // Passing a potentially-evaluated argument of class type (Clause 9)
+ // having a non-trivial copy constructor, a non-trivial move constructor,
+ // or a non-trivial destructor, with no corresponding parameter,
+ // is conditionally-supported with implementation-defined semantics.
+ if (getLangOpts().CPlusPlus11 && !Ty->isDependentType())
+ if (CXXRecordDecl *Record = Ty->getAsCXXRecordDecl())
+ if (!Record->hasNonTrivialCopyConstructor() &&
+ !Record->hasNonTrivialMoveConstructor() &&
+ !Record->hasNonTrivialDestructor())
+ return VAK_ValidInCXX11;
+
+ if (getLangOpts().ObjCAutoRefCount && Ty->isObjCLifetimeType())
+ return VAK_Valid;
+
+ if (Ty->isObjCObjectType())
+ return VAK_Invalid;
+
+ if (getLangOpts().MSVCCompat)
+ return VAK_MSVCUndefined;
+
+ // FIXME: In C++11, these cases are conditionally-supported, meaning we're
+ // permitted to reject them. We should consider doing so.
+ return VAK_Undefined;
+}
+
+void Sema::checkVariadicArgument(const Expr *E, VariadicCallType CT) {
+ // Don't allow one to pass an Objective-C interface to a vararg.
+ const QualType &Ty = E->getType();
+ VarArgKind VAK = isValidVarArgType(Ty);
+
+ // Complain about passing non-POD types through varargs.
+ switch (VAK) {
+ case VAK_ValidInCXX11:
+ DiagRuntimeBehavior(
+ E->getLocStart(), nullptr,
+ PDiag(diag::warn_cxx98_compat_pass_non_pod_arg_to_vararg)
+ << Ty << CT);
+ // Fall through.
+ case VAK_Valid:
+ if (Ty->isRecordType()) {
+ // This is unlikely to be what the user intended. If the class has a
+ // 'c_str' member function, the user probably meant to call that.
+ DiagRuntimeBehavior(E->getLocStart(), nullptr,
+ PDiag(diag::warn_pass_class_arg_to_vararg)
+ << Ty << CT << hasCStrMethod(E) << ".c_str()");
+ }
+ break;
+
+ case VAK_Undefined:
+ case VAK_MSVCUndefined:
+ DiagRuntimeBehavior(
+ E->getLocStart(), nullptr,
+ PDiag(diag::warn_cannot_pass_non_pod_arg_to_vararg)
+ << getLangOpts().CPlusPlus11 << Ty << CT);
+ break;
+
+ case VAK_Invalid:
+ if (Ty->isObjCObjectType())
+ DiagRuntimeBehavior(
+ E->getLocStart(), nullptr,
+ PDiag(diag::err_cannot_pass_objc_interface_to_vararg)
+ << Ty << CT);
+ else
+ Diag(E->getLocStart(), diag::err_cannot_pass_to_vararg)
+ << isa<InitListExpr>(E) << Ty << CT;
+ break;
+ }
+}
+
+/// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
+/// will create a trap if the resulting type is not a POD type.
+ExprResult Sema::DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
+ FunctionDecl *FDecl) {
+ if (const BuiltinType *PlaceholderTy = E->getType()->getAsPlaceholderType()) {
+ // Strip the unbridged-cast placeholder expression off, if applicable.
+ if (PlaceholderTy->getKind() == BuiltinType::ARCUnbridgedCast &&
+ (CT == VariadicMethod ||
+ (FDecl && FDecl->hasAttr<CFAuditedTransferAttr>()))) {
+ E = stripARCUnbridgedCast(E);
+
+ // Otherwise, do normal placeholder checking.
+ } else {
+ ExprResult ExprRes = CheckPlaceholderExpr(E);
+ if (ExprRes.isInvalid())
+ return ExprError();
+ E = ExprRes.get();
+ }
+ }
+
+ ExprResult ExprRes = DefaultArgumentPromotion(E);
+ if (ExprRes.isInvalid())
+ return ExprError();
+ E = ExprRes.get();
+
+ // Diagnostics regarding non-POD argument types are
+ // emitted along with format string checking in Sema::CheckFunctionCall().
+ if (isValidVarArgType(E->getType()) == VAK_Undefined) {
+ // Turn this into a trap.
+ CXXScopeSpec SS;
+ SourceLocation TemplateKWLoc;
+ UnqualifiedId Name;
+ Name.setIdentifier(PP.getIdentifierInfo("__builtin_trap"),
+ E->getLocStart());
+ ExprResult TrapFn = ActOnIdExpression(TUScope, SS, TemplateKWLoc,
+ Name, true, false);
+ if (TrapFn.isInvalid())
+ return ExprError();
+
+ ExprResult Call = ActOnCallExpr(TUScope, TrapFn.get(),
+ E->getLocStart(), None,
+ E->getLocEnd());
+ if (Call.isInvalid())
+ return ExprError();
+
+ ExprResult Comma = ActOnBinOp(TUScope, E->getLocStart(), tok::comma,
+ Call.get(), E);
+ if (Comma.isInvalid())
+ return ExprError();
+ return Comma.get();
+ }
+
+ if (!getLangOpts().CPlusPlus &&
+ RequireCompleteType(E->getExprLoc(), E->getType(),
+ diag::err_call_incomplete_argument))
+ return ExprError();
+
+ return E;
+}
+
+/// \brief Converts an integer to complex float type. Helper function of
+/// UsualArithmeticConversions()
+///
+/// \return false if the integer expression is an integer type and is
+/// successfully converted to the complex type.
+static bool handleIntegerToComplexFloatConversion(Sema &S, ExprResult &IntExpr,
+ ExprResult &ComplexExpr,
+ QualType IntTy,
+ QualType ComplexTy,
+ bool SkipCast) {
+ if (IntTy->isComplexType() || IntTy->isRealFloatingType()) return true;
+ if (SkipCast) return false;
+ if (IntTy->isIntegerType()) {
+ QualType fpTy = cast<ComplexType>(ComplexTy)->getElementType();
+ IntExpr = S.ImpCastExprToType(IntExpr.get(), fpTy, CK_IntegralToFloating);
+ IntExpr = S.ImpCastExprToType(IntExpr.get(), ComplexTy,
+ CK_FloatingRealToComplex);
+ } else {
+ assert(IntTy->isComplexIntegerType());
+ IntExpr = S.ImpCastExprToType(IntExpr.get(), ComplexTy,
+ CK_IntegralComplexToFloatingComplex);
+ }
+ return false;
+}
+
+/// \brief Handle arithmetic conversion with complex types. Helper function of
+/// UsualArithmeticConversions()
+static QualType handleComplexFloatConversion(Sema &S, ExprResult &LHS,
+ ExprResult &RHS, QualType LHSType,
+ QualType RHSType,
+ bool IsCompAssign) {
+ // if we have an integer operand, the result is the complex type.
+ if (!handleIntegerToComplexFloatConversion(S, RHS, LHS, RHSType, LHSType,
+ /*skipCast*/false))
+ return LHSType;
+ if (!handleIntegerToComplexFloatConversion(S, LHS, RHS, LHSType, RHSType,
+ /*skipCast*/IsCompAssign))
+ return RHSType;
+
+ // This handles complex/complex, complex/float, or float/complex.
+ // When both operands are complex, the shorter operand is converted to the
+ // type of the longer, and that is the type of the result. This corresponds
+ // to what is done when combining two real floating-point operands.
+ // The fun begins when size promotion occur across type domains.
+ // From H&S 6.3.4: When one operand is complex and the other is a real
+ // floating-point type, the less precise type is converted, within it's
+ // real or complex domain, to the precision of the other type. For example,
+ // when combining a "long double" with a "double _Complex", the
+ // "double _Complex" is promoted to "long double _Complex".
+
+ // Compute the rank of the two types, regardless of whether they are complex.
+ int Order = S.Context.getFloatingTypeOrder(LHSType, RHSType);
+
+ auto *LHSComplexType = dyn_cast<ComplexType>(LHSType);
+ auto *RHSComplexType = dyn_cast<ComplexType>(RHSType);
+ QualType LHSElementType =
+ LHSComplexType ? LHSComplexType->getElementType() : LHSType;
+ QualType RHSElementType =
+ RHSComplexType ? RHSComplexType->getElementType() : RHSType;
+
+ QualType ResultType = S.Context.getComplexType(LHSElementType);
+ if (Order < 0) {
+ // Promote the precision of the LHS if not an assignment.
+ ResultType = S.Context.getComplexType(RHSElementType);
+ if (!IsCompAssign) {
+ if (LHSComplexType)
+ LHS =
+ S.ImpCastExprToType(LHS.get(), ResultType, CK_FloatingComplexCast);
+ else
+ LHS = S.ImpCastExprToType(LHS.get(), RHSElementType, CK_FloatingCast);
+ }
+ } else if (Order > 0) {
+ // Promote the precision of the RHS.
+ if (RHSComplexType)
+ RHS = S.ImpCastExprToType(RHS.get(), ResultType, CK_FloatingComplexCast);
+ else
+ RHS = S.ImpCastExprToType(RHS.get(), LHSElementType, CK_FloatingCast);
+ }
+ return ResultType;
+}
+
+/// \brief Hande arithmetic conversion from integer to float. Helper function
+/// of UsualArithmeticConversions()
+static QualType handleIntToFloatConversion(Sema &S, ExprResult &FloatExpr,
+ ExprResult &IntExpr,
+ QualType FloatTy, QualType IntTy,
+ bool ConvertFloat, bool ConvertInt) {
+ if (IntTy->isIntegerType()) {
+ if (ConvertInt)
+ // Convert intExpr to the lhs floating point type.
+ IntExpr = S.ImpCastExprToType(IntExpr.get(), FloatTy,
+ CK_IntegralToFloating);
+ return FloatTy;
+ }
+
+ // Convert both sides to the appropriate complex float.
+ assert(IntTy->isComplexIntegerType());
+ QualType result = S.Context.getComplexType(FloatTy);
+
+ // _Complex int -> _Complex float
+ if (ConvertInt)
+ IntExpr = S.ImpCastExprToType(IntExpr.get(), result,
+ CK_IntegralComplexToFloatingComplex);
+
+ // float -> _Complex float
+ if (ConvertFloat)
+ FloatExpr = S.ImpCastExprToType(FloatExpr.get(), result,
+ CK_FloatingRealToComplex);
+
+ return result;
+}
+
+/// \brief Handle arithmethic conversion with floating point types. Helper
+/// function of UsualArithmeticConversions()
+static QualType handleFloatConversion(Sema &S, ExprResult &LHS,
+ ExprResult &RHS, QualType LHSType,
+ QualType RHSType, bool IsCompAssign) {
+ bool LHSFloat = LHSType->isRealFloatingType();
+ bool RHSFloat = RHSType->isRealFloatingType();
+
+ // If we have two real floating types, convert the smaller operand
+ // to the bigger result.
+ if (LHSFloat && RHSFloat) {
+ int order = S.Context.getFloatingTypeOrder(LHSType, RHSType);
+ if (order > 0) {
+ RHS = S.ImpCastExprToType(RHS.get(), LHSType, CK_FloatingCast);
+ return LHSType;
+ }
+
+ assert(order < 0 && "illegal float comparison");
+ if (!IsCompAssign)
+ LHS = S.ImpCastExprToType(LHS.get(), RHSType, CK_FloatingCast);
+ return RHSType;
+ }
+
+ if (LHSFloat) {
+ // Half FP has to be promoted to float unless it is natively supported
+ if (LHSType->isHalfType() && !S.getLangOpts().NativeHalfType)
+ LHSType = S.Context.FloatTy;
+
+ return handleIntToFloatConversion(S, LHS, RHS, LHSType, RHSType,
+ /*convertFloat=*/!IsCompAssign,
+ /*convertInt=*/ true);
+ }
+ assert(RHSFloat);
+ return handleIntToFloatConversion(S, RHS, LHS, RHSType, LHSType,
+ /*convertInt=*/ true,
+ /*convertFloat=*/!IsCompAssign);
+}
+
+typedef ExprResult PerformCastFn(Sema &S, Expr *operand, QualType toType);
+
+namespace {
+/// These helper callbacks are placed in an anonymous namespace to
+/// permit their use as function template parameters.
+ExprResult doIntegralCast(Sema &S, Expr *op, QualType toType) {
+ return S.ImpCastExprToType(op, toType, CK_IntegralCast);
+}
+
+ExprResult doComplexIntegralCast(Sema &S, Expr *op, QualType toType) {
+ return S.ImpCastExprToType(op, S.Context.getComplexType(toType),
+ CK_IntegralComplexCast);
+}
+}
+
+/// \brief Handle integer arithmetic conversions. Helper function of
+/// UsualArithmeticConversions()
+template <PerformCastFn doLHSCast, PerformCastFn doRHSCast>
+static QualType handleIntegerConversion(Sema &S, ExprResult &LHS,
+ ExprResult &RHS, QualType LHSType,
+ QualType RHSType, bool IsCompAssign) {
+ // The rules for this case are in C99 6.3.1.8
+ int order = S.Context.getIntegerTypeOrder(LHSType, RHSType);
+ bool LHSSigned = LHSType->hasSignedIntegerRepresentation();
+ bool RHSSigned = RHSType->hasSignedIntegerRepresentation();
+ if (LHSSigned == RHSSigned) {
+ // Same signedness; use the higher-ranked type
+ if (order >= 0) {
+ RHS = (*doRHSCast)(S, RHS.get(), LHSType);
+ return LHSType;
+ } else if (!IsCompAssign)
+ LHS = (*doLHSCast)(S, LHS.get(), RHSType);
+ return RHSType;
+ } else if (order != (LHSSigned ? 1 : -1)) {
+ // The unsigned type has greater than or equal rank to the
+ // signed type, so use the unsigned type
+ if (RHSSigned) {
+ RHS = (*doRHSCast)(S, RHS.get(), LHSType);
+ return LHSType;
+ } else if (!IsCompAssign)
+ LHS = (*doLHSCast)(S, LHS.get(), RHSType);
+ return RHSType;
+ } else if (S.Context.getIntWidth(LHSType) != S.Context.getIntWidth(RHSType)) {
+ // The two types are different widths; if we are here, that
+ // means the signed type is larger than the unsigned type, so
+ // use the signed type.
+ if (LHSSigned) {
+ RHS = (*doRHSCast)(S, RHS.get(), LHSType);
+ return LHSType;
+ } else if (!IsCompAssign)
+ LHS = (*doLHSCast)(S, LHS.get(), RHSType);
+ return RHSType;
+ } else {
+ // The signed type is higher-ranked than the unsigned type,
+ // but isn't actually any bigger (like unsigned int and long
+ // on most 32-bit systems). Use the unsigned type corresponding
+ // to the signed type.
+ QualType result =
+ S.Context.getCorrespondingUnsignedType(LHSSigned ? LHSType : RHSType);
+ RHS = (*doRHSCast)(S, RHS.get(), result);
+ if (!IsCompAssign)
+ LHS = (*doLHSCast)(S, LHS.get(), result);
+ return result;
+ }
+}
+
+/// \brief Handle conversions with GCC complex int extension. Helper function
+/// of UsualArithmeticConversions()
+static QualType handleComplexIntConversion(Sema &S, ExprResult &LHS,
+ ExprResult &RHS, QualType LHSType,
+ QualType RHSType,
+ bool IsCompAssign) {
+ const ComplexType *LHSComplexInt = LHSType->getAsComplexIntegerType();
+ const ComplexType *RHSComplexInt = RHSType->getAsComplexIntegerType();
+
+ if (LHSComplexInt && RHSComplexInt) {
+ QualType LHSEltType = LHSComplexInt->getElementType();
+ QualType RHSEltType = RHSComplexInt->getElementType();
+ QualType ScalarType =
+ handleIntegerConversion<doComplexIntegralCast, doComplexIntegralCast>
+ (S, LHS, RHS, LHSEltType, RHSEltType, IsCompAssign);
+
+ return S.Context.getComplexType(ScalarType);
+ }
+
+ if (LHSComplexInt) {
+ QualType LHSEltType = LHSComplexInt->getElementType();
+ QualType ScalarType =
+ handleIntegerConversion<doComplexIntegralCast, doIntegralCast>
+ (S, LHS, RHS, LHSEltType, RHSType, IsCompAssign);
+ QualType ComplexType = S.Context.getComplexType(ScalarType);
+ RHS = S.ImpCastExprToType(RHS.get(), ComplexType,
+ CK_IntegralRealToComplex);
+
+ return ComplexType;
+ }
+
+ assert(RHSComplexInt);
+
+ QualType RHSEltType = RHSComplexInt->getElementType();
+ QualType ScalarType =
+ handleIntegerConversion<doIntegralCast, doComplexIntegralCast>
+ (S, LHS, RHS, LHSType, RHSEltType, IsCompAssign);
+ QualType ComplexType = S.Context.getComplexType(ScalarType);
+
+ if (!IsCompAssign)
+ LHS = S.ImpCastExprToType(LHS.get(), ComplexType,
+ CK_IntegralRealToComplex);
+ return ComplexType;
+}
+
+/// UsualArithmeticConversions - Performs various conversions that are common to
+/// binary operators (C99 6.3.1.8). If both operands aren't arithmetic, this
+/// routine returns the first non-arithmetic type found. The client is
+/// responsible for emitting appropriate error diagnostics.
+QualType Sema::UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
+ bool IsCompAssign) {
+ if (!IsCompAssign) {
+ LHS = UsualUnaryConversions(LHS.get());
+ if (LHS.isInvalid())
+ return QualType();
+ }
+
+ RHS = UsualUnaryConversions(RHS.get());
+ if (RHS.isInvalid())
+ return QualType();
+
+ // For conversion purposes, we ignore any qualifiers.
+ // For example, "const float" and "float" are equivalent.
+ QualType LHSType =
+ Context.getCanonicalType(LHS.get()->getType()).getUnqualifiedType();
+ QualType RHSType =
+ Context.getCanonicalType(RHS.get()->getType()).getUnqualifiedType();
+
+ // For conversion purposes, we ignore any atomic qualifier on the LHS.
+ if (const AtomicType *AtomicLHS = LHSType->getAs<AtomicType>())
+ LHSType = AtomicLHS->getValueType();
+
+ // If both types are identical, no conversion is needed.
+ if (LHSType == RHSType)
+ return LHSType;
+
+ // If either side is a non-arithmetic type (e.g. a pointer), we are done.
+ // The caller can deal with this (e.g. pointer + int).
+ if (!LHSType->isArithmeticType() || !RHSType->isArithmeticType())
+ return QualType();
+
+ // Apply unary and bitfield promotions to the LHS's type.
+ QualType LHSUnpromotedType = LHSType;
+ if (LHSType->isPromotableIntegerType())
+ LHSType = Context.getPromotedIntegerType(LHSType);
+ QualType LHSBitfieldPromoteTy = Context.isPromotableBitField(LHS.get());
+ if (!LHSBitfieldPromoteTy.isNull())
+ LHSType = LHSBitfieldPromoteTy;
+ if (LHSType != LHSUnpromotedType && !IsCompAssign)
+ LHS = ImpCastExprToType(LHS.get(), LHSType, CK_IntegralCast);
+
+ // If both types are identical, no conversion is needed.
+ if (LHSType == RHSType)
+ return LHSType;
+
+ // At this point, we have two different arithmetic types.
+
+ // Handle complex types first (C99 6.3.1.8p1).
+ if (LHSType->isComplexType() || RHSType->isComplexType())
+ return handleComplexFloatConversion(*this, LHS, RHS, LHSType, RHSType,
+ IsCompAssign);
+
+ // Now handle "real" floating types (i.e. float, double, long double).
+ if (LHSType->isRealFloatingType() || RHSType->isRealFloatingType())
+ return handleFloatConversion(*this, LHS, RHS, LHSType, RHSType,
+ IsCompAssign);
+
+ // Handle GCC complex int extension.
+ if (LHSType->isComplexIntegerType() || RHSType->isComplexIntegerType())
+ return handleComplexIntConversion(*this, LHS, RHS, LHSType, RHSType,
+ IsCompAssign);
+
+ // Finally, we have two differing integer types.
+ return handleIntegerConversion<doIntegralCast, doIntegralCast>
+ (*this, LHS, RHS, LHSType, RHSType, IsCompAssign);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Semantic Analysis for various Expression Types
+//===----------------------------------------------------------------------===//
+
+
+ExprResult
+Sema::ActOnGenericSelectionExpr(SourceLocation KeyLoc,
+ SourceLocation DefaultLoc,
+ SourceLocation RParenLoc,
+ Expr *ControllingExpr,
+ ArrayRef<ParsedType> ArgTypes,
+ ArrayRef<Expr *> ArgExprs) {
+ unsigned NumAssocs = ArgTypes.size();
+ assert(NumAssocs == ArgExprs.size());
+
+ TypeSourceInfo **Types = new TypeSourceInfo*[NumAssocs];
+ for (unsigned i = 0; i < NumAssocs; ++i) {
+ if (ArgTypes[i])
+ (void) GetTypeFromParser(ArgTypes[i], &Types[i]);
+ else
+ Types[i] = nullptr;
+ }
+
+ ExprResult ER = CreateGenericSelectionExpr(KeyLoc, DefaultLoc, RParenLoc,
+ ControllingExpr,
+ llvm::makeArrayRef(Types, NumAssocs),
+ ArgExprs);
+ delete [] Types;
+ return ER;
+}
+
+ExprResult
+Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
+ SourceLocation DefaultLoc,
+ SourceLocation RParenLoc,
+ Expr *ControllingExpr,
+ ArrayRef<TypeSourceInfo *> Types,
+ ArrayRef<Expr *> Exprs) {
+ unsigned NumAssocs = Types.size();
+ assert(NumAssocs == Exprs.size());
+
+ // Decay and strip qualifiers for the controlling expression type, and handle
+ // placeholder type replacement. See committee discussion from WG14 DR423.
+ ExprResult R = DefaultFunctionArrayLvalueConversion(ControllingExpr);
+ if (R.isInvalid())
+ return ExprError();
+ ControllingExpr = R.get();
+
+ // The controlling expression is an unevaluated operand, so side effects are
+ // likely unintended.
+ if (ActiveTemplateInstantiations.empty() &&
+ ControllingExpr->HasSideEffects(Context, false))
+ Diag(ControllingExpr->getExprLoc(),
+ diag::warn_side_effects_unevaluated_context);
+
+ bool TypeErrorFound = false,
+ IsResultDependent = ControllingExpr->isTypeDependent(),
+ ContainsUnexpandedParameterPack
+ = ControllingExpr->containsUnexpandedParameterPack();
+
+ for (unsigned i = 0; i < NumAssocs; ++i) {
+ if (Exprs[i]->containsUnexpandedParameterPack())
+ ContainsUnexpandedParameterPack = true;
+
+ if (Types[i]) {
+ if (Types[i]->getType()->containsUnexpandedParameterPack())
+ ContainsUnexpandedParameterPack = true;
+
+ if (Types[i]->getType()->isDependentType()) {
+ IsResultDependent = true;
+ } else {
+ // C11 6.5.1.1p2 "The type name in a generic association shall specify a
+ // complete object type other than a variably modified type."
+ unsigned D = 0;
+ if (Types[i]->getType()->isIncompleteType())
+ D = diag::err_assoc_type_incomplete;
+ else if (!Types[i]->getType()->isObjectType())
+ D = diag::err_assoc_type_nonobject;
+ else if (Types[i]->getType()->isVariablyModifiedType())
+ D = diag::err_assoc_type_variably_modified;
+
+ if (D != 0) {
+ Diag(Types[i]->getTypeLoc().getBeginLoc(), D)
+ << Types[i]->getTypeLoc().getSourceRange()
+ << Types[i]->getType();
+ TypeErrorFound = true;
+ }
+
+ // C11 6.5.1.1p2 "No two generic associations in the same generic
+ // selection shall specify compatible types."
+ for (unsigned j = i+1; j < NumAssocs; ++j)
+ if (Types[j] && !Types[j]->getType()->isDependentType() &&
+ Context.typesAreCompatible(Types[i]->getType(),
+ Types[j]->getType())) {
+ Diag(Types[j]->getTypeLoc().getBeginLoc(),
+ diag::err_assoc_compatible_types)
+ << Types[j]->getTypeLoc().getSourceRange()
+ << Types[j]->getType()
+ << Types[i]->getType();
+ Diag(Types[i]->getTypeLoc().getBeginLoc(),
+ diag::note_compat_assoc)
+ << Types[i]->getTypeLoc().getSourceRange()
+ << Types[i]->getType();
+ TypeErrorFound = true;
+ }
+ }
+ }
+ }
+ if (TypeErrorFound)
+ return ExprError();
+
+ // If we determined that the generic selection is result-dependent, don't
+ // try to compute the result expression.
+ if (IsResultDependent)
+ return new (Context) GenericSelectionExpr(
+ Context, KeyLoc, ControllingExpr, Types, Exprs, DefaultLoc, RParenLoc,
+ ContainsUnexpandedParameterPack);
+
+ SmallVector<unsigned, 1> CompatIndices;
+ unsigned DefaultIndex = -1U;
+ for (unsigned i = 0; i < NumAssocs; ++i) {
+ if (!Types[i])
+ DefaultIndex = i;
+ else if (Context.typesAreCompatible(ControllingExpr->getType(),
+ Types[i]->getType()))
+ CompatIndices.push_back(i);
+ }
+
+ // C11 6.5.1.1p2 "The controlling expression of a generic selection shall have
+ // type compatible with at most one of the types named in its generic
+ // association list."
+ if (CompatIndices.size() > 1) {
+ // We strip parens here because the controlling expression is typically
+ // parenthesized in macro definitions.
+ ControllingExpr = ControllingExpr->IgnoreParens();
+ Diag(ControllingExpr->getLocStart(), diag::err_generic_sel_multi_match)
+ << ControllingExpr->getSourceRange() << ControllingExpr->getType()
+ << (unsigned) CompatIndices.size();
+ for (unsigned I : CompatIndices) {
+ Diag(Types[I]->getTypeLoc().getBeginLoc(),
+ diag::note_compat_assoc)
+ << Types[I]->getTypeLoc().getSourceRange()
+ << Types[I]->getType();
+ }
+ return ExprError();
+ }
+
+ // C11 6.5.1.1p2 "If a generic selection has no default generic association,
+ // its controlling expression shall have type compatible with exactly one of
+ // the types named in its generic association list."
+ if (DefaultIndex == -1U && CompatIndices.size() == 0) {
+ // We strip parens here because the controlling expression is typically
+ // parenthesized in macro definitions.
+ ControllingExpr = ControllingExpr->IgnoreParens();
+ Diag(ControllingExpr->getLocStart(), diag::err_generic_sel_no_match)
+ << ControllingExpr->getSourceRange() << ControllingExpr->getType();
+ return ExprError();
+ }
+
+ // C11 6.5.1.1p3 "If a generic selection has a generic association with a
+ // type name that is compatible with the type of the controlling expression,
+ // then the result expression of the generic selection is the expression
+ // in that generic association. Otherwise, the result expression of the
+ // generic selection is the expression in the default generic association."
+ unsigned ResultIndex =
+ CompatIndices.size() ? CompatIndices[0] : DefaultIndex;
+
+ return new (Context) GenericSelectionExpr(
+ Context, KeyLoc, ControllingExpr, Types, Exprs, DefaultLoc, RParenLoc,
+ ContainsUnexpandedParameterPack, ResultIndex);
+}
+
+/// getUDSuffixLoc - Create a SourceLocation for a ud-suffix, given the
+/// location of the token and the offset of the ud-suffix within it.
+static SourceLocation getUDSuffixLoc(Sema &S, SourceLocation TokLoc,
+ unsigned Offset) {
+ return Lexer::AdvanceToTokenCharacter(TokLoc, Offset, S.getSourceManager(),
+ S.getLangOpts());
+}
+
+/// BuildCookedLiteralOperatorCall - A user-defined literal was found. Look up
+/// the corresponding cooked (non-raw) literal operator, and build a call to it.
+static ExprResult BuildCookedLiteralOperatorCall(Sema &S, Scope *Scope,
+ IdentifierInfo *UDSuffix,
+ SourceLocation UDSuffixLoc,
+ ArrayRef<Expr*> Args,
+ SourceLocation LitEndLoc) {
+ assert(Args.size() <= 2 && "too many arguments for literal operator");
+
+ QualType ArgTy[2];
+ for (unsigned ArgIdx = 0; ArgIdx != Args.size(); ++ArgIdx) {
+ ArgTy[ArgIdx] = Args[ArgIdx]->getType();
+ if (ArgTy[ArgIdx]->isArrayType())
+ ArgTy[ArgIdx] = S.Context.getArrayDecayedType(ArgTy[ArgIdx]);
+ }
+
+ DeclarationName OpName =
+ S.Context.DeclarationNames.getCXXLiteralOperatorName(UDSuffix);
+ DeclarationNameInfo OpNameInfo(OpName, UDSuffixLoc);
+ OpNameInfo.setCXXLiteralOperatorNameLoc(UDSuffixLoc);
+
+ LookupResult R(S, OpName, UDSuffixLoc, Sema::LookupOrdinaryName);
+ if (S.LookupLiteralOperator(Scope, R, llvm::makeArrayRef(ArgTy, Args.size()),
+ /*AllowRaw*/false, /*AllowTemplate*/false,
+ /*AllowStringTemplate*/false) == Sema::LOLR_Error)
+ return ExprError();
+
+ return S.BuildLiteralOperatorCall(R, OpNameInfo, Args, LitEndLoc);
+}
+
+/// ActOnStringLiteral - The specified tokens were lexed as pasted string
+/// fragments (e.g. "foo" "bar" L"baz"). The result string has to handle string
+/// concatenation ([C99 5.1.1.2, translation phase #6]), so it may come from
+/// multiple tokens. However, the common case is that StringToks points to one
+/// string.
+///
+ExprResult
+Sema::ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope) {
+ assert(!StringToks.empty() && "Must have at least one string!");
+
+ StringLiteralParser Literal(StringToks, PP);
+ if (Literal.hadError)
+ return ExprError();
+
+ SmallVector<SourceLocation, 4> StringTokLocs;
+ for (const Token &Tok : StringToks)
+ StringTokLocs.push_back(Tok.getLocation());
+
+ QualType CharTy = Context.CharTy;
+ StringLiteral::StringKind Kind = StringLiteral::Ascii;
+ if (Literal.isWide()) {
+ CharTy = Context.getWideCharType();
+ Kind = StringLiteral::Wide;
+ } else if (Literal.isUTF8()) {
+ Kind = StringLiteral::UTF8;
+ } else if (Literal.isUTF16()) {
+ CharTy = Context.Char16Ty;
+ Kind = StringLiteral::UTF16;
+ } else if (Literal.isUTF32()) {
+ CharTy = Context.Char32Ty;
+ Kind = StringLiteral::UTF32;
+ } else if (Literal.isPascal()) {
+ CharTy = Context.UnsignedCharTy;
+ }
+
+ QualType CharTyConst = CharTy;
+ // A C++ string literal has a const-qualified element type (C++ 2.13.4p1).
+ if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings)
+ CharTyConst.addConst();
+
+ // Get an array type for the string, according to C99 6.4.5. This includes
+ // the nul terminator character as well as the string length for pascal
+ // strings.
+ QualType StrTy = Context.getConstantArrayType(CharTyConst,
+ llvm::APInt(32, Literal.GetNumStringChars()+1),
+ ArrayType::Normal, 0);
+
+ // OpenCL v1.1 s6.5.3: a string literal is in the constant address space.
+ if (getLangOpts().OpenCL) {
+ StrTy = Context.getAddrSpaceQualType(StrTy, LangAS::opencl_constant);
+ }
+
+ // Pass &StringTokLocs[0], StringTokLocs.size() to factory!
+ StringLiteral *Lit = StringLiteral::Create(Context, Literal.GetString(),
+ Kind, Literal.Pascal, StrTy,
+ &StringTokLocs[0],
+ StringTokLocs.size());
+ if (Literal.getUDSuffix().empty())
+ return Lit;
+
+ // We're building a user-defined literal.
+ IdentifierInfo *UDSuffix = &Context.Idents.get(Literal.getUDSuffix());
+ SourceLocation UDSuffixLoc =
+ getUDSuffixLoc(*this, StringTokLocs[Literal.getUDSuffixToken()],
+ Literal.getUDSuffixOffset());
+
+ // Make sure we're allowed user-defined literals here.
+ if (!UDLScope)
+ return ExprError(Diag(UDSuffixLoc, diag::err_invalid_string_udl));
+
+ // C++11 [lex.ext]p5: The literal L is treated as a call of the form
+ // operator "" X (str, len)
+ QualType SizeType = Context.getSizeType();
+
+ DeclarationName OpName =
+ Context.DeclarationNames.getCXXLiteralOperatorName(UDSuffix);
+ DeclarationNameInfo OpNameInfo(OpName, UDSuffixLoc);
+ OpNameInfo.setCXXLiteralOperatorNameLoc(UDSuffixLoc);
+
+ QualType ArgTy[] = {
+ Context.getArrayDecayedType(StrTy), SizeType
+ };
+
+ LookupResult R(*this, OpName, UDSuffixLoc, LookupOrdinaryName);
+ switch (LookupLiteralOperator(UDLScope, R, ArgTy,
+ /*AllowRaw*/false, /*AllowTemplate*/false,
+ /*AllowStringTemplate*/true)) {
+
+ case LOLR_Cooked: {
+ llvm::APInt Len(Context.getIntWidth(SizeType), Literal.GetNumStringChars());
+ IntegerLiteral *LenArg = IntegerLiteral::Create(Context, Len, SizeType,
+ StringTokLocs[0]);
+ Expr *Args[] = { Lit, LenArg };
+
+ return BuildLiteralOperatorCall(R, OpNameInfo, Args, StringTokLocs.back());
+ }
+
+ case LOLR_StringTemplate: {
+ TemplateArgumentListInfo ExplicitArgs;
+
+ unsigned CharBits = Context.getIntWidth(CharTy);
+ bool CharIsUnsigned = CharTy->isUnsignedIntegerType();
+ llvm::APSInt Value(CharBits, CharIsUnsigned);
+
+ TemplateArgument TypeArg(CharTy);
+ TemplateArgumentLocInfo TypeArgInfo(Context.getTrivialTypeSourceInfo(CharTy));
+ ExplicitArgs.addArgument(TemplateArgumentLoc(TypeArg, TypeArgInfo));
+
+ for (unsigned I = 0, N = Lit->getLength(); I != N; ++I) {
+ Value = Lit->getCodeUnit(I);
+ TemplateArgument Arg(Context, Value, CharTy);
+ TemplateArgumentLocInfo ArgInfo;
+ ExplicitArgs.addArgument(TemplateArgumentLoc(Arg, ArgInfo));
+ }
+ return BuildLiteralOperatorCall(R, OpNameInfo, None, StringTokLocs.back(),
+ &ExplicitArgs);
+ }
+ case LOLR_Raw:
+ case LOLR_Template:
+ llvm_unreachable("unexpected literal operator lookup result");
+ case LOLR_Error:
+ return ExprError();
+ }
+ llvm_unreachable("unexpected literal operator lookup result");
+}
+
+ExprResult
+Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
+ SourceLocation Loc,
+ const CXXScopeSpec *SS) {
+ DeclarationNameInfo NameInfo(D->getDeclName(), Loc);
+ return BuildDeclRefExpr(D, Ty, VK, NameInfo, SS);
+}
+
+/// BuildDeclRefExpr - Build an expression that references a
+/// declaration that does not require a closure capture.
+ExprResult
+Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
+ const DeclarationNameInfo &NameInfo,
+ const CXXScopeSpec *SS, NamedDecl *FoundD,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ if (getLangOpts().CUDA)
+ if (const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext))
+ if (const FunctionDecl *Callee = dyn_cast<FunctionDecl>(D)) {
+ if (CheckCUDATarget(Caller, Callee)) {
+ Diag(NameInfo.getLoc(), diag::err_ref_bad_target)
+ << IdentifyCUDATarget(Callee) << D->getIdentifier()
+ << IdentifyCUDATarget(Caller);
+ Diag(D->getLocation(), diag::note_previous_decl)
+ << D->getIdentifier();
+ return ExprError();
+ }
+ }
+
+ bool RefersToCapturedVariable =
+ isa<VarDecl>(D) &&
+ NeedToCaptureVariable(cast<VarDecl>(D), NameInfo.getLoc());
+
+ DeclRefExpr *E;
+ if (isa<VarTemplateSpecializationDecl>(D)) {
+ VarTemplateSpecializationDecl *VarSpec =
+ cast<VarTemplateSpecializationDecl>(D);
+
+ E = DeclRefExpr::Create(Context, SS ? SS->getWithLocInContext(Context)
+ : NestedNameSpecifierLoc(),
+ VarSpec->getTemplateKeywordLoc(), D,
+ RefersToCapturedVariable, NameInfo.getLoc(), Ty, VK,
+ FoundD, TemplateArgs);
+ } else {
+ assert(!TemplateArgs && "No template arguments for non-variable"
+ " template specialization references");
+ E = DeclRefExpr::Create(Context, SS ? SS->getWithLocInContext(Context)
+ : NestedNameSpecifierLoc(),
+ SourceLocation(), D, RefersToCapturedVariable,
+ NameInfo, Ty, VK, FoundD);
+ }
+
+ MarkDeclRefReferenced(E);
+
+ if (getLangOpts().ObjCWeak && isa<VarDecl>(D) &&
+ Ty.getObjCLifetime() == Qualifiers::OCL_Weak &&
+ !Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, E->getLocStart()))
+ recordUseOfEvaluatedWeak(E);
+
+ // Just in case we're building an illegal pointer-to-member.
+ FieldDecl *FD = dyn_cast<FieldDecl>(D);
+ if (FD && FD->isBitField())
+ E->setObjectKind(OK_BitField);
+
+ return E;
+}
+
+/// Decomposes the given name into a DeclarationNameInfo, its location, and
+/// possibly a list of template arguments.
+///
+/// If this produces template arguments, it is permitted to call
+/// DecomposeTemplateName.
+///
+/// This actually loses a lot of source location information for
+/// non-standard name kinds; we should consider preserving that in
+/// some way.
+void
+Sema::DecomposeUnqualifiedId(const UnqualifiedId &Id,
+ TemplateArgumentListInfo &Buffer,
+ DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *&TemplateArgs) {
+ if (Id.getKind() == UnqualifiedId::IK_TemplateId) {
+ Buffer.setLAngleLoc(Id.TemplateId->LAngleLoc);
+ Buffer.setRAngleLoc(Id.TemplateId->RAngleLoc);
+
+ ASTTemplateArgsPtr TemplateArgsPtr(Id.TemplateId->getTemplateArgs(),
+ Id.TemplateId->NumArgs);
+ translateTemplateArguments(TemplateArgsPtr, Buffer);
+
+ TemplateName TName = Id.TemplateId->Template.get();
+ SourceLocation TNameLoc = Id.TemplateId->TemplateNameLoc;
+ NameInfo = Context.getNameForTemplate(TName, TNameLoc);
+ TemplateArgs = &Buffer;
+ } else {
+ NameInfo = GetNameFromUnqualifiedId(Id);
+ TemplateArgs = nullptr;
+ }
+}
+
+static void emitEmptyLookupTypoDiagnostic(
+ const TypoCorrection &TC, Sema &SemaRef, const CXXScopeSpec &SS,
+ DeclarationName Typo, SourceLocation TypoLoc, ArrayRef<Expr *> Args,
+ unsigned DiagnosticID, unsigned DiagnosticSuggestID) {
+ DeclContext *Ctx =
+ SS.isEmpty() ? nullptr : SemaRef.computeDeclContext(SS, false);
+ if (!TC) {
+ // Emit a special diagnostic for failed member lookups.
+ // FIXME: computing the declaration context might fail here (?)
+ if (Ctx)
+ SemaRef.Diag(TypoLoc, diag::err_no_member) << Typo << Ctx
+ << SS.getRange();
+ else
+ SemaRef.Diag(TypoLoc, DiagnosticID) << Typo;
+ return;
+ }
+
+ std::string CorrectedStr = TC.getAsString(SemaRef.getLangOpts());
+ bool DroppedSpecifier =
+ TC.WillReplaceSpecifier() && Typo.getAsString() == CorrectedStr;
+ unsigned NoteID = TC.getCorrectionDeclAs<ImplicitParamDecl>()
+ ? diag::note_implicit_param_decl
+ : diag::note_previous_decl;
+ if (!Ctx)
+ SemaRef.diagnoseTypo(TC, SemaRef.PDiag(DiagnosticSuggestID) << Typo,
+ SemaRef.PDiag(NoteID));
+ else
+ SemaRef.diagnoseTypo(TC, SemaRef.PDiag(diag::err_no_member_suggest)
+ << Typo << Ctx << DroppedSpecifier
+ << SS.getRange(),
+ SemaRef.PDiag(NoteID));
+}
+
+/// Diagnose an empty lookup.
+///
+/// \return false if new lookup candidates were found
+bool
+Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
+ std::unique_ptr<CorrectionCandidateCallback> CCC,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ ArrayRef<Expr *> Args, TypoExpr **Out) {
+ DeclarationName Name = R.getLookupName();
+
+ unsigned diagnostic = diag::err_undeclared_var_use;
+ unsigned diagnostic_suggest = diag::err_undeclared_var_use_suggest;
+ if (Name.getNameKind() == DeclarationName::CXXOperatorName ||
+ Name.getNameKind() == DeclarationName::CXXLiteralOperatorName ||
+ Name.getNameKind() == DeclarationName::CXXConversionFunctionName) {
+ diagnostic = diag::err_undeclared_use;
+ diagnostic_suggest = diag::err_undeclared_use_suggest;
+ }
+
+ // If the original lookup was an unqualified lookup, fake an
+ // unqualified lookup. This is useful when (for example) the
+ // original lookup would not have found something because it was a
+ // dependent name.
+ DeclContext *DC = SS.isEmpty() ? CurContext : nullptr;
+ while (DC) {
+ if (isa<CXXRecordDecl>(DC)) {
+ LookupQualifiedName(R, DC);
+
+ if (!R.empty()) {
+ // Don't give errors about ambiguities in this lookup.
+ R.suppressDiagnostics();
+
+ // During a default argument instantiation the CurContext points
+ // to a CXXMethodDecl; but we can't apply a this-> fixit inside a
+ // function parameter list, hence add an explicit check.
+ bool isDefaultArgument = !ActiveTemplateInstantiations.empty() &&
+ ActiveTemplateInstantiations.back().Kind ==
+ ActiveTemplateInstantiation::DefaultFunctionArgumentInstantiation;
+ CXXMethodDecl *CurMethod = dyn_cast<CXXMethodDecl>(CurContext);
+ bool isInstance = CurMethod &&
+ CurMethod->isInstance() &&
+ DC == CurMethod->getParent() && !isDefaultArgument;
+
+ // Give a code modification hint to insert 'this->'.
+ // TODO: fixit for inserting 'Base<T>::' in the other cases.
+ // Actually quite difficult!
+ if (getLangOpts().MSVCCompat)
+ diagnostic = diag::ext_found_via_dependent_bases_lookup;
+ if (isInstance) {
+ Diag(R.getNameLoc(), diagnostic) << Name
+ << FixItHint::CreateInsertion(R.getNameLoc(), "this->");
+ CheckCXXThisCapture(R.getNameLoc());
+ } else {
+ Diag(R.getNameLoc(), diagnostic) << Name;
+ }
+
+ // Do we really want to note all of these?
+ for (NamedDecl *D : R)
+ Diag(D->getLocation(), diag::note_dependent_var_use);
+
+ // Return true if we are inside a default argument instantiation
+ // and the found name refers to an instance member function, otherwise
+ // the function calling DiagnoseEmptyLookup will try to create an
+ // implicit member call and this is wrong for default argument.
+ if (isDefaultArgument && ((*R.begin())->isCXXInstanceMember())) {
+ Diag(R.getNameLoc(), diag::err_member_call_without_object);
+ return true;
+ }
+
+ // Tell the callee to try to recover.
+ return false;
+ }
+
+ R.clear();
+ }
+
+ // In Microsoft mode, if we are performing lookup from within a friend
+ // function definition declared at class scope then we must set
+ // DC to the lexical parent to be able to search into the parent
+ // class.
+ if (getLangOpts().MSVCCompat && isa<FunctionDecl>(DC) &&
+ cast<FunctionDecl>(DC)->getFriendObjectKind() &&
+ DC->getLexicalParent()->isRecord())
+ DC = DC->getLexicalParent();
+ else
+ DC = DC->getParent();
+ }
+
+ // We didn't find anything, so try to correct for a typo.
+ TypoCorrection Corrected;
+ if (S && Out) {
+ SourceLocation TypoLoc = R.getNameLoc();
+ assert(!ExplicitTemplateArgs &&
+ "Diagnosing an empty lookup with explicit template args!");
+ *Out = CorrectTypoDelayed(
+ R.getLookupNameInfo(), R.getLookupKind(), S, &SS, std::move(CCC),
+ [=](const TypoCorrection &TC) {
+ emitEmptyLookupTypoDiagnostic(TC, *this, SS, Name, TypoLoc, Args,
+ diagnostic, diagnostic_suggest);
+ },
+ nullptr, CTK_ErrorRecovery);
+ if (*Out)
+ return true;
+ } else if (S && (Corrected =
+ CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(), S,
+ &SS, std::move(CCC), CTK_ErrorRecovery))) {
+ std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
+ bool DroppedSpecifier =
+ Corrected.WillReplaceSpecifier() && Name.getAsString() == CorrectedStr;
+ R.setLookupName(Corrected.getCorrection());
+
+ bool AcceptableWithRecovery = false;
+ bool AcceptableWithoutRecovery = false;
+ NamedDecl *ND = Corrected.getFoundDecl();
+ if (ND) {
+ if (Corrected.isOverloaded()) {
+ OverloadCandidateSet OCS(R.getNameLoc(),
+ OverloadCandidateSet::CSK_Normal);
+ OverloadCandidateSet::iterator Best;
+ for (NamedDecl *CD : Corrected) {
+ if (FunctionTemplateDecl *FTD =
+ dyn_cast<FunctionTemplateDecl>(CD))
+ AddTemplateOverloadCandidate(
+ FTD, DeclAccessPair::make(FTD, AS_none), ExplicitTemplateArgs,
+ Args, OCS);
+ else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(CD))
+ if (!ExplicitTemplateArgs || ExplicitTemplateArgs->size() == 0)
+ AddOverloadCandidate(FD, DeclAccessPair::make(FD, AS_none),
+ Args, OCS);
+ }
+ switch (OCS.BestViableFunction(*this, R.getNameLoc(), Best)) {
+ case OR_Success:
+ ND = Best->FoundDecl;
+ Corrected.setCorrectionDecl(ND);
+ break;
+ default:
+ // FIXME: Arbitrarily pick the first declaration for the note.
+ Corrected.setCorrectionDecl(ND);
+ break;
+ }
+ }
+ R.addDecl(ND);
+ if (getLangOpts().CPlusPlus && ND->isCXXClassMember()) {
+ CXXRecordDecl *Record = nullptr;
+ if (Corrected.getCorrectionSpecifier()) {
+ const Type *Ty = Corrected.getCorrectionSpecifier()->getAsType();
+ Record = Ty->getAsCXXRecordDecl();
+ }
+ if (!Record)
+ Record = cast<CXXRecordDecl>(
+ ND->getDeclContext()->getRedeclContext());
+ R.setNamingClass(Record);
+ }
+
+ auto *UnderlyingND = ND->getUnderlyingDecl();
+ AcceptableWithRecovery = isa<ValueDecl>(UnderlyingND) ||
+ isa<FunctionTemplateDecl>(UnderlyingND);
+ // FIXME: If we ended up with a typo for a type name or
+ // Objective-C class name, we're in trouble because the parser
+ // is in the wrong place to recover. Suggest the typo
+ // correction, but don't make it a fix-it since we're not going
+ // to recover well anyway.
+ AcceptableWithoutRecovery =
+ isa<TypeDecl>(UnderlyingND) || isa<ObjCInterfaceDecl>(UnderlyingND);
+ } else {
+ // FIXME: We found a keyword. Suggest it, but don't provide a fix-it
+ // because we aren't able to recover.
+ AcceptableWithoutRecovery = true;
+ }
+
+ if (AcceptableWithRecovery || AcceptableWithoutRecovery) {
+ unsigned NoteID = Corrected.getCorrectionDeclAs<ImplicitParamDecl>()
+ ? diag::note_implicit_param_decl
+ : diag::note_previous_decl;
+ if (SS.isEmpty())
+ diagnoseTypo(Corrected, PDiag(diagnostic_suggest) << Name,
+ PDiag(NoteID), AcceptableWithRecovery);
+ else
+ diagnoseTypo(Corrected, PDiag(diag::err_no_member_suggest)
+ << Name << computeDeclContext(SS, false)
+ << DroppedSpecifier << SS.getRange(),
+ PDiag(NoteID), AcceptableWithRecovery);
+
+ // Tell the callee whether to try to recover.
+ return !AcceptableWithRecovery;
+ }
+ }
+ R.clear();
+
+ // Emit a special diagnostic for failed member lookups.
+ // FIXME: computing the declaration context might fail here (?)
+ if (!SS.isEmpty()) {
+ Diag(R.getNameLoc(), diag::err_no_member)
+ << Name << computeDeclContext(SS, false)
+ << SS.getRange();
+ return true;
+ }
+
+ // Give up, we can't recover.
+ Diag(R.getNameLoc(), diagnostic) << Name;
+ return true;
+}
+
+/// In Microsoft mode, if we are inside a template class whose parent class has
+/// dependent base classes, and we can't resolve an unqualified identifier, then
+/// assume the identifier is a member of a dependent base class. We can only
+/// recover successfully in static methods, instance methods, and other contexts
+/// where 'this' is available. This doesn't precisely match MSVC's
+/// instantiation model, but it's close enough.
+static Expr *
+recoverFromMSUnqualifiedLookup(Sema &S, ASTContext &Context,
+ DeclarationNameInfo &NameInfo,
+ SourceLocation TemplateKWLoc,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ // Only try to recover from lookup into dependent bases in static methods or
+ // contexts where 'this' is available.
+ QualType ThisType = S.getCurrentThisType();
+ const CXXRecordDecl *RD = nullptr;
+ if (!ThisType.isNull())
+ RD = ThisType->getPointeeType()->getAsCXXRecordDecl();
+ else if (auto *MD = dyn_cast<CXXMethodDecl>(S.CurContext))
+ RD = MD->getParent();
+ if (!RD || !RD->hasAnyDependentBases())
+ return nullptr;
+
+ // Diagnose this as unqualified lookup into a dependent base class. If 'this'
+ // is available, suggest inserting 'this->' as a fixit.
+ SourceLocation Loc = NameInfo.getLoc();
+ auto DB = S.Diag(Loc, diag::ext_undeclared_unqual_id_with_dependent_base);
+ DB << NameInfo.getName() << RD;
+
+ if (!ThisType.isNull()) {
+ DB << FixItHint::CreateInsertion(Loc, "this->");
+ return CXXDependentScopeMemberExpr::Create(
+ Context, /*This=*/nullptr, ThisType, /*IsArrow=*/true,
+ /*Op=*/SourceLocation(), NestedNameSpecifierLoc(), TemplateKWLoc,
+ /*FirstQualifierInScope=*/nullptr, NameInfo, TemplateArgs);
+ }
+
+ // Synthesize a fake NNS that points to the derived class. This will
+ // perform name lookup during template instantiation.
+ CXXScopeSpec SS;
+ auto *NNS =
+ NestedNameSpecifier::Create(Context, nullptr, true, RD->getTypeForDecl());
+ SS.MakeTrivial(Context, NNS, SourceRange(Loc, Loc));
+ return DependentScopeDeclRefExpr::Create(
+ Context, SS.getWithLocInContext(Context), TemplateKWLoc, NameInfo,
+ TemplateArgs);
+}
+
+ExprResult
+Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc, UnqualifiedId &Id,
+ bool HasTrailingLParen, bool IsAddressOfOperand,
+ std::unique_ptr<CorrectionCandidateCallback> CCC,
+ bool IsInlineAsmIdentifier, Token *KeywordReplacement) {
+ assert(!(IsAddressOfOperand && HasTrailingLParen) &&
+ "cannot be direct & operand and have a trailing lparen");
+ if (SS.isInvalid())
+ return ExprError();
+
+ TemplateArgumentListInfo TemplateArgsBuffer;
+
+ // Decompose the UnqualifiedId into the following data.
+ DeclarationNameInfo NameInfo;
+ const TemplateArgumentListInfo *TemplateArgs;
+ DecomposeUnqualifiedId(Id, TemplateArgsBuffer, NameInfo, TemplateArgs);
+
+ DeclarationName Name = NameInfo.getName();
+ IdentifierInfo *II = Name.getAsIdentifierInfo();
+ SourceLocation NameLoc = NameInfo.getLoc();
+
+ // C++ [temp.dep.expr]p3:
+ // An id-expression is type-dependent if it contains:
+ // -- an identifier that was declared with a dependent type,
+ // (note: handled after lookup)
+ // -- a template-id that is dependent,
+ // (note: handled in BuildTemplateIdExpr)
+ // -- a conversion-function-id that specifies a dependent type,
+ // -- a nested-name-specifier that contains a class-name that
+ // names a dependent type.
+ // Determine whether this is a member of an unknown specialization;
+ // we need to handle these differently.
+ bool DependentID = false;
+ if (Name.getNameKind() == DeclarationName::CXXConversionFunctionName &&
+ Name.getCXXNameType()->isDependentType()) {
+ DependentID = true;
+ } else if (SS.isSet()) {
+ if (DeclContext *DC = computeDeclContext(SS, false)) {
+ if (RequireCompleteDeclContext(SS, DC))
+ return ExprError();
+ } else {
+ DependentID = true;
+ }
+ }
+
+ if (DependentID)
+ return ActOnDependentIdExpression(SS, TemplateKWLoc, NameInfo,
+ IsAddressOfOperand, TemplateArgs);
+
+ // Perform the required lookup.
+ LookupResult R(*this, NameInfo,
+ (Id.getKind() == UnqualifiedId::IK_ImplicitSelfParam)
+ ? LookupObjCImplicitSelfParam : LookupOrdinaryName);
+ if (TemplateArgs) {
+ // Lookup the template name again to correctly establish the context in
+ // which it was found. This is really unfortunate as we already did the
+ // lookup to determine that it was a template name in the first place. If
+ // this becomes a performance hit, we can work harder to preserve those
+ // results until we get here but it's likely not worth it.
+ bool MemberOfUnknownSpecialization;
+ LookupTemplateName(R, S, SS, QualType(), /*EnteringContext=*/false,
+ MemberOfUnknownSpecialization);
+
+ if (MemberOfUnknownSpecialization ||
+ (R.getResultKind() == LookupResult::NotFoundInCurrentInstantiation))
+ return ActOnDependentIdExpression(SS, TemplateKWLoc, NameInfo,
+ IsAddressOfOperand, TemplateArgs);
+ } else {
+ bool IvarLookupFollowUp = II && !SS.isSet() && getCurMethodDecl();
+ LookupParsedName(R, S, &SS, !IvarLookupFollowUp);
+
+ // If the result might be in a dependent base class, this is a dependent
+ // id-expression.
+ if (R.getResultKind() == LookupResult::NotFoundInCurrentInstantiation)
+ return ActOnDependentIdExpression(SS, TemplateKWLoc, NameInfo,
+ IsAddressOfOperand, TemplateArgs);
+
+ // If this reference is in an Objective-C method, then we need to do
+ // some special Objective-C lookup, too.
+ if (IvarLookupFollowUp) {
+ ExprResult E(LookupInObjCMethod(R, S, II, true));
+ if (E.isInvalid())
+ return ExprError();
+
+ if (Expr *Ex = E.getAs<Expr>())
+ return Ex;
+ }
+ }
+
+ if (R.isAmbiguous())
+ return ExprError();
+
+ // This could be an implicitly declared function reference (legal in C90,
+ // extension in C99, forbidden in C++).
+ if (R.empty() && HasTrailingLParen && II && !getLangOpts().CPlusPlus) {
+ NamedDecl *D = ImplicitlyDefineFunction(NameLoc, *II, S);
+ if (D) R.addDecl(D);
+ }
+
+ // Determine whether this name might be a candidate for
+ // argument-dependent lookup.
+ bool ADL = UseArgumentDependentLookup(SS, R, HasTrailingLParen);
+
+ if (R.empty() && !ADL) {
+ if (SS.isEmpty() && getLangOpts().MSVCCompat) {
+ if (Expr *E = recoverFromMSUnqualifiedLookup(*this, Context, NameInfo,
+ TemplateKWLoc, TemplateArgs))
+ return E;
+ }
+
+ // Don't diagnose an empty lookup for inline assembly.
+ if (IsInlineAsmIdentifier)
+ return ExprError();
+
+ // If this name wasn't predeclared and if this is not a function
+ // call, diagnose the problem.
+ TypoExpr *TE = nullptr;
+ auto DefaultValidator = llvm::make_unique<CorrectionCandidateCallback>(
+ II, SS.isValid() ? SS.getScopeRep() : nullptr);
+ DefaultValidator->IsAddressOfOperand = IsAddressOfOperand;
+ assert((!CCC || CCC->IsAddressOfOperand == IsAddressOfOperand) &&
+ "Typo correction callback misconfigured");
+ if (CCC) {
+ // Make sure the callback knows what the typo being diagnosed is.
+ CCC->setTypoName(II);
+ if (SS.isValid())
+ CCC->setTypoNNS(SS.getScopeRep());
+ }
+ if (DiagnoseEmptyLookup(S, SS, R,
+ CCC ? std::move(CCC) : std::move(DefaultValidator),
+ nullptr, None, &TE)) {
+ if (TE && KeywordReplacement) {
+ auto &State = getTypoExprState(TE);
+ auto BestTC = State.Consumer->getNextCorrection();
+ if (BestTC.isKeyword()) {
+ auto *II = BestTC.getCorrectionAsIdentifierInfo();
+ if (State.DiagHandler)
+ State.DiagHandler(BestTC);
+ KeywordReplacement->startToken();
+ KeywordReplacement->setKind(II->getTokenID());
+ KeywordReplacement->setIdentifierInfo(II);
+ KeywordReplacement->setLocation(BestTC.getCorrectionRange().getBegin());
+ // Clean up the state associated with the TypoExpr, since it has
+ // now been diagnosed (without a call to CorrectDelayedTyposInExpr).
+ clearDelayedTypo(TE);
+ // Signal that a correction to a keyword was performed by returning a
+ // valid-but-null ExprResult.
+ return (Expr*)nullptr;
+ }
+ State.Consumer->resetCorrectionStream();
+ }
+ return TE ? TE : ExprError();
+ }
+
+ assert(!R.empty() &&
+ "DiagnoseEmptyLookup returned false but added no results");
+
+ // If we found an Objective-C instance variable, let
+ // LookupInObjCMethod build the appropriate expression to
+ // reference the ivar.
+ if (ObjCIvarDecl *Ivar = R.getAsSingle<ObjCIvarDecl>()) {
+ R.clear();
+ ExprResult E(LookupInObjCMethod(R, S, Ivar->getIdentifier()));
+ // In a hopelessly buggy code, Objective-C instance variable
+ // lookup fails and no expression will be built to reference it.
+ if (!E.isInvalid() && !E.get())
+ return ExprError();
+ return E;
+ }
+ }
+
+ // This is guaranteed from this point on.
+ assert(!R.empty() || ADL);
+
+ // Check whether this might be a C++ implicit instance member access.
+ // C++ [class.mfct.non-static]p3:
+ // When an id-expression that is not part of a class member access
+ // syntax and not used to form a pointer to member is used in the
+ // body of a non-static member function of class X, if name lookup
+ // resolves the name in the id-expression to a non-static non-type
+ // member of some class C, the id-expression is transformed into a
+ // class member access expression using (*this) as the
+ // postfix-expression to the left of the . operator.
+ //
+ // But we don't actually need to do this for '&' operands if R
+ // resolved to a function or overloaded function set, because the
+ // expression is ill-formed if it actually works out to be a
+ // non-static member function:
+ //
+ // C++ [expr.ref]p4:
+ // Otherwise, if E1.E2 refers to a non-static member function. . .
+ // [t]he expression can be used only as the left-hand operand of a
+ // member function call.
+ //
+ // There are other safeguards against such uses, but it's important
+ // to get this right here so that we don't end up making a
+ // spuriously dependent expression if we're inside a dependent
+ // instance method.
+ if (!R.empty() && (*R.begin())->isCXXClassMember()) {
+ bool MightBeImplicitMember;
+ if (!IsAddressOfOperand)
+ MightBeImplicitMember = true;
+ else if (!SS.isEmpty())
+ MightBeImplicitMember = false;
+ else if (R.isOverloadedResult())
+ MightBeImplicitMember = false;
+ else if (R.isUnresolvableResult())
+ MightBeImplicitMember = true;
+ else
+ MightBeImplicitMember = isa<FieldDecl>(R.getFoundDecl()) ||
+ isa<IndirectFieldDecl>(R.getFoundDecl()) ||
+ isa<MSPropertyDecl>(R.getFoundDecl());
+
+ if (MightBeImplicitMember)
+ return BuildPossibleImplicitMemberExpr(SS, TemplateKWLoc,
+ R, TemplateArgs, S);
+ }
+
+ if (TemplateArgs || TemplateKWLoc.isValid()) {
+
+ // In C++1y, if this is a variable template id, then check it
+ // in BuildTemplateIdExpr().
+ // The single lookup result must be a variable template declaration.
+ if (Id.getKind() == UnqualifiedId::IK_TemplateId && Id.TemplateId &&
+ Id.TemplateId->Kind == TNK_Var_template) {
+ assert(R.getAsSingle<VarTemplateDecl>() &&
+ "There should only be one declaration found.");
+ }
+
+ return BuildTemplateIdExpr(SS, TemplateKWLoc, R, ADL, TemplateArgs);
+ }
+
+ return BuildDeclarationNameExpr(SS, R, ADL);
+}
+
+/// BuildQualifiedDeclarationNameExpr - Build a C++ qualified
+/// declaration name, generally during template instantiation.
+/// There's a large number of things which don't need to be done along
+/// this path.
+ExprResult Sema::BuildQualifiedDeclarationNameExpr(
+ CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo,
+ bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI) {
+ DeclContext *DC = computeDeclContext(SS, false);
+ if (!DC)
+ return BuildDependentDeclRefExpr(SS, /*TemplateKWLoc=*/SourceLocation(),
+ NameInfo, /*TemplateArgs=*/nullptr);
+
+ if (RequireCompleteDeclContext(SS, DC))
+ return ExprError();
+
+ LookupResult R(*this, NameInfo, LookupOrdinaryName);
+ LookupQualifiedName(R, DC);
+
+ if (R.isAmbiguous())
+ return ExprError();
+
+ if (R.getResultKind() == LookupResult::NotFoundInCurrentInstantiation)
+ return BuildDependentDeclRefExpr(SS, /*TemplateKWLoc=*/SourceLocation(),
+ NameInfo, /*TemplateArgs=*/nullptr);
+
+ if (R.empty()) {
+ Diag(NameInfo.getLoc(), diag::err_no_member)
+ << NameInfo.getName() << DC << SS.getRange();
+ return ExprError();
+ }
+
+ if (const TypeDecl *TD = R.getAsSingle<TypeDecl>()) {
+ // Diagnose a missing typename if this resolved unambiguously to a type in
+ // a dependent context. If we can recover with a type, downgrade this to
+ // a warning in Microsoft compatibility mode.
+ unsigned DiagID = diag::err_typename_missing;
+ if (RecoveryTSI && getLangOpts().MSVCCompat)
+ DiagID = diag::ext_typename_missing;
+ SourceLocation Loc = SS.getBeginLoc();
+ auto D = Diag(Loc, DiagID);
+ D << SS.getScopeRep() << NameInfo.getName().getAsString()
+ << SourceRange(Loc, NameInfo.getEndLoc());
+
+ // Don't recover if the caller isn't expecting us to or if we're in a SFINAE
+ // context.
+ if (!RecoveryTSI)
+ return ExprError();
+
+ // Only issue the fixit if we're prepared to recover.
+ D << FixItHint::CreateInsertion(Loc, "typename ");
+
+ // Recover by pretending this was an elaborated type.
+ QualType Ty = Context.getTypeDeclType(TD);
+ TypeLocBuilder TLB;
+ TLB.pushTypeSpec(Ty).setNameLoc(NameInfo.getLoc());
+
+ QualType ET = getElaboratedType(ETK_None, SS, Ty);
+ ElaboratedTypeLoc QTL = TLB.push<ElaboratedTypeLoc>(ET);
+ QTL.setElaboratedKeywordLoc(SourceLocation());
+ QTL.setQualifierLoc(SS.getWithLocInContext(Context));
+
+ *RecoveryTSI = TLB.getTypeSourceInfo(Context, ET);
+
+ return ExprEmpty();
+ }
+
+ // Defend against this resolving to an implicit member access. We usually
+ // won't get here if this might be a legitimate a class member (we end up in
+ // BuildMemberReferenceExpr instead), but this can be valid if we're forming
+ // a pointer-to-member or in an unevaluated context in C++11.
+ if (!R.empty() && (*R.begin())->isCXXClassMember() && !IsAddressOfOperand)
+ return BuildPossibleImplicitMemberExpr(SS,
+ /*TemplateKWLoc=*/SourceLocation(),
+ R, /*TemplateArgs=*/nullptr, S);
+
+ return BuildDeclarationNameExpr(SS, R, /* ADL */ false);
+}
+
+/// LookupInObjCMethod - The parser has read a name in, and Sema has
+/// detected that we're currently inside an ObjC method. Perform some
+/// additional lookup.
+///
+/// Ideally, most of this would be done by lookup, but there's
+/// actually quite a lot of extra work involved.
+///
+/// Returns a null sentinel to indicate trivial success.
+ExprResult
+Sema::LookupInObjCMethod(LookupResult &Lookup, Scope *S,
+ IdentifierInfo *II, bool AllowBuiltinCreation) {
+ SourceLocation Loc = Lookup.getNameLoc();
+ ObjCMethodDecl *CurMethod = getCurMethodDecl();
+
+ // Check for error condition which is already reported.
+ if (!CurMethod)
+ return ExprError();
+
+ // There are two cases to handle here. 1) scoped lookup could have failed,
+ // in which case we should look for an ivar. 2) scoped lookup could have
+ // found a decl, but that decl is outside the current instance method (i.e.
+ // a global variable). In these two cases, we do a lookup for an ivar with
+ // this name, if the lookup sucedes, we replace it our current decl.
+
+ // If we're in a class method, we don't normally want to look for
+ // ivars. But if we don't find anything else, and there's an
+ // ivar, that's an error.
+ bool IsClassMethod = CurMethod->isClassMethod();
+
+ bool LookForIvars;
+ if (Lookup.empty())
+ LookForIvars = true;
+ else if (IsClassMethod)
+ LookForIvars = false;
+ else
+ LookForIvars = (Lookup.isSingleResult() &&
+ Lookup.getFoundDecl()->isDefinedOutsideFunctionOrMethod());
+ ObjCInterfaceDecl *IFace = nullptr;
+ if (LookForIvars) {
+ IFace = CurMethod->getClassInterface();
+ ObjCInterfaceDecl *ClassDeclared;
+ ObjCIvarDecl *IV = nullptr;
+ if (IFace && (IV = IFace->lookupInstanceVariable(II, ClassDeclared))) {
+ // Diagnose using an ivar in a class method.
+ if (IsClassMethod)
+ return ExprError(Diag(Loc, diag::error_ivar_use_in_class_method)
+ << IV->getDeclName());
+
+ // If we're referencing an invalid decl, just return this as a silent
+ // error node. The error diagnostic was already emitted on the decl.
+ if (IV->isInvalidDecl())
+ return ExprError();
+
+ // Check if referencing a field with __attribute__((deprecated)).
+ if (DiagnoseUseOfDecl(IV, Loc))
+ return ExprError();
+
+ // Diagnose the use of an ivar outside of the declaring class.
+ if (IV->getAccessControl() == ObjCIvarDecl::Private &&
+ !declaresSameEntity(ClassDeclared, IFace) &&
+ !getLangOpts().DebuggerSupport)
+ Diag(Loc, diag::error_private_ivar_access) << IV->getDeclName();
+
+ // FIXME: This should use a new expr for a direct reference, don't
+ // turn this into Self->ivar, just return a BareIVarExpr or something.
+ IdentifierInfo &II = Context.Idents.get("self");
+ UnqualifiedId SelfName;
+ SelfName.setIdentifier(&II, SourceLocation());
+ SelfName.setKind(UnqualifiedId::IK_ImplicitSelfParam);
+ CXXScopeSpec SelfScopeSpec;
+ SourceLocation TemplateKWLoc;
+ ExprResult SelfExpr = ActOnIdExpression(S, SelfScopeSpec, TemplateKWLoc,
+ SelfName, false, false);
+ if (SelfExpr.isInvalid())
+ return ExprError();
+
+ SelfExpr = DefaultLvalueConversion(SelfExpr.get());
+ if (SelfExpr.isInvalid())
+ return ExprError();
+
+ MarkAnyDeclReferenced(Loc, IV, true);
+
+ ObjCMethodFamily MF = CurMethod->getMethodFamily();
+ if (MF != OMF_init && MF != OMF_dealloc && MF != OMF_finalize &&
+ !IvarBacksCurrentMethodAccessor(IFace, CurMethod, IV))
+ Diag(Loc, diag::warn_direct_ivar_access) << IV->getDeclName();
+
+ ObjCIvarRefExpr *Result = new (Context)
+ ObjCIvarRefExpr(IV, IV->getUsageType(SelfExpr.get()->getType()), Loc,
+ IV->getLocation(), SelfExpr.get(), true, true);
+
+ if (getLangOpts().ObjCAutoRefCount) {
+ if (IV->getType().getObjCLifetime() == Qualifiers::OCL_Weak) {
+ if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc))
+ recordUseOfEvaluatedWeak(Result);
+ }
+ if (CurContext->isClosure())
+ Diag(Loc, diag::warn_implicitly_retains_self)
+ << FixItHint::CreateInsertion(Loc, "self->");
+ }
+
+ return Result;
+ }
+ } else if (CurMethod->isInstanceMethod()) {
+ // We should warn if a local variable hides an ivar.
+ if (ObjCInterfaceDecl *IFace = CurMethod->getClassInterface()) {
+ ObjCInterfaceDecl *ClassDeclared;
+ if (ObjCIvarDecl *IV = IFace->lookupInstanceVariable(II, ClassDeclared)) {
+ if (IV->getAccessControl() != ObjCIvarDecl::Private ||
+ declaresSameEntity(IFace, ClassDeclared))
+ Diag(Loc, diag::warn_ivar_use_hidden) << IV->getDeclName();
+ }
+ }
+ } else if (Lookup.isSingleResult() &&
+ Lookup.getFoundDecl()->isDefinedOutsideFunctionOrMethod()) {
+ // If accessing a stand-alone ivar in a class method, this is an error.
+ if (const ObjCIvarDecl *IV = dyn_cast<ObjCIvarDecl>(Lookup.getFoundDecl()))
+ return ExprError(Diag(Loc, diag::error_ivar_use_in_class_method)
+ << IV->getDeclName());
+ }
+
+ if (Lookup.empty() && II && AllowBuiltinCreation) {
+ // FIXME. Consolidate this with similar code in LookupName.
+ if (unsigned BuiltinID = II->getBuiltinID()) {
+ if (!(getLangOpts().CPlusPlus &&
+ Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))) {
+ NamedDecl *D = LazilyCreateBuiltin((IdentifierInfo *)II, BuiltinID,
+ S, Lookup.isForRedeclaration(),
+ Lookup.getNameLoc());
+ if (D) Lookup.addDecl(D);
+ }
+ }
+ }
+ // Sentinel value saying that we didn't do anything special.
+ return ExprResult((Expr *)nullptr);
+}
+
+/// \brief Cast a base object to a member's actual type.
+///
+/// Logically this happens in three phases:
+///
+/// * First we cast from the base type to the naming class.
+/// The naming class is the class into which we were looking
+/// when we found the member; it's the qualifier type if a
+/// qualifier was provided, and otherwise it's the base type.
+///
+/// * Next we cast from the naming class to the declaring class.
+/// If the member we found was brought into a class's scope by
+/// a using declaration, this is that class; otherwise it's
+/// the class declaring the member.
+///
+/// * Finally we cast from the declaring class to the "true"
+/// declaring class of the member. This conversion does not
+/// obey access control.
+ExprResult
+Sema::PerformObjectMemberConversion(Expr *From,
+ NestedNameSpecifier *Qualifier,
+ NamedDecl *FoundDecl,
+ NamedDecl *Member) {
+ CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Member->getDeclContext());
+ if (!RD)
+ return From;
+
+ QualType DestRecordType;
+ QualType DestType;
+ QualType FromRecordType;
+ QualType FromType = From->getType();
+ bool PointerConversions = false;
+ if (isa<FieldDecl>(Member)) {
+ DestRecordType = Context.getCanonicalType(Context.getTypeDeclType(RD));
+
+ if (FromType->getAs<PointerType>()) {
+ DestType = Context.getPointerType(DestRecordType);
+ FromRecordType = FromType->getPointeeType();
+ PointerConversions = true;
+ } else {
+ DestType = DestRecordType;
+ FromRecordType = FromType;
+ }
+ } else if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Member)) {
+ if (Method->isStatic())
+ return From;
+
+ DestType = Method->getThisType(Context);
+ DestRecordType = DestType->getPointeeType();
+
+ if (FromType->getAs<PointerType>()) {
+ FromRecordType = FromType->getPointeeType();
+ PointerConversions = true;
+ } else {
+ FromRecordType = FromType;
+ DestType = DestRecordType;
+ }
+ } else {
+ // No conversion necessary.
+ return From;
+ }
+
+ if (DestType->isDependentType() || FromType->isDependentType())
+ return From;
+
+ // If the unqualified types are the same, no conversion is necessary.
+ if (Context.hasSameUnqualifiedType(FromRecordType, DestRecordType))
+ return From;
+
+ SourceRange FromRange = From->getSourceRange();
+ SourceLocation FromLoc = FromRange.getBegin();
+
+ ExprValueKind VK = From->getValueKind();
+
+ // C++ [class.member.lookup]p8:
+ // [...] Ambiguities can often be resolved by qualifying a name with its
+ // class name.
+ //
+ // If the member was a qualified name and the qualified referred to a
+ // specific base subobject type, we'll cast to that intermediate type
+ // first and then to the object in which the member is declared. That allows
+ // one to resolve ambiguities in, e.g., a diamond-shaped hierarchy such as:
+ //
+ // class Base { public: int x; };
+ // class Derived1 : public Base { };
+ // class Derived2 : public Base { };
+ // class VeryDerived : public Derived1, public Derived2 { void f(); };
+ //
+ // void VeryDerived::f() {
+ // x = 17; // error: ambiguous base subobjects
+ // Derived1::x = 17; // okay, pick the Base subobject of Derived1
+ // }
+ if (Qualifier && Qualifier->getAsType()) {
+ QualType QType = QualType(Qualifier->getAsType(), 0);
+ assert(QType->isRecordType() && "lookup done with non-record type");
+
+ QualType QRecordType = QualType(QType->getAs<RecordType>(), 0);
+
+ // In C++98, the qualifier type doesn't actually have to be a base
+ // type of the object type, in which case we just ignore it.
+ // Otherwise build the appropriate casts.
+ if (IsDerivedFrom(FromLoc, FromRecordType, QRecordType)) {
+ CXXCastPath BasePath;
+ if (CheckDerivedToBaseConversion(FromRecordType, QRecordType,
+ FromLoc, FromRange, &BasePath))
+ return ExprError();
+
+ if (PointerConversions)
+ QType = Context.getPointerType(QType);
+ From = ImpCastExprToType(From, QType, CK_UncheckedDerivedToBase,
+ VK, &BasePath).get();
+
+ FromType = QType;
+ FromRecordType = QRecordType;
+
+ // If the qualifier type was the same as the destination type,
+ // we're done.
+ if (Context.hasSameUnqualifiedType(FromRecordType, DestRecordType))
+ return From;
+ }
+ }
+
+ bool IgnoreAccess = false;
+
+ // If we actually found the member through a using declaration, cast
+ // down to the using declaration's type.
+ //
+ // Pointer equality is fine here because only one declaration of a
+ // class ever has member declarations.
+ if (FoundDecl->getDeclContext() != Member->getDeclContext()) {
+ assert(isa<UsingShadowDecl>(FoundDecl));
+ QualType URecordType = Context.getTypeDeclType(
+ cast<CXXRecordDecl>(FoundDecl->getDeclContext()));
+
+ // We only need to do this if the naming-class to declaring-class
+ // conversion is non-trivial.
+ if (!Context.hasSameUnqualifiedType(FromRecordType, URecordType)) {
+ assert(IsDerivedFrom(FromLoc, FromRecordType, URecordType));
+ CXXCastPath BasePath;
+ if (CheckDerivedToBaseConversion(FromRecordType, URecordType,
+ FromLoc, FromRange, &BasePath))
+ return ExprError();
+
+ QualType UType = URecordType;
+ if (PointerConversions)
+ UType = Context.getPointerType(UType);
+ From = ImpCastExprToType(From, UType, CK_UncheckedDerivedToBase,
+ VK, &BasePath).get();
+ FromType = UType;
+ FromRecordType = URecordType;
+ }
+
+ // We don't do access control for the conversion from the
+ // declaring class to the true declaring class.
+ IgnoreAccess = true;
+ }
+
+ CXXCastPath BasePath;
+ if (CheckDerivedToBaseConversion(FromRecordType, DestRecordType,
+ FromLoc, FromRange, &BasePath,
+ IgnoreAccess))
+ return ExprError();
+
+ return ImpCastExprToType(From, DestType, CK_UncheckedDerivedToBase,
+ VK, &BasePath);
+}
+
+bool Sema::UseArgumentDependentLookup(const CXXScopeSpec &SS,
+ const LookupResult &R,
+ bool HasTrailingLParen) {
+ // Only when used directly as the postfix-expression of a call.
+ if (!HasTrailingLParen)
+ return false;
+
+ // Never if a scope specifier was provided.
+ if (SS.isSet())
+ return false;
+
+ // Only in C++ or ObjC++.
+ if (!getLangOpts().CPlusPlus)
+ return false;
+
+ // Turn off ADL when we find certain kinds of declarations during
+ // normal lookup:
+ for (NamedDecl *D : R) {
+ // C++0x [basic.lookup.argdep]p3:
+ // -- a declaration of a class member
+ // Since using decls preserve this property, we check this on the
+ // original decl.
+ if (D->isCXXClassMember())
+ return false;
+
+ // C++0x [basic.lookup.argdep]p3:
+ // -- a block-scope function declaration that is not a
+ // using-declaration
+ // NOTE: we also trigger this for function templates (in fact, we
+ // don't check the decl type at all, since all other decl types
+ // turn off ADL anyway).
+ if (isa<UsingShadowDecl>(D))
+ D = cast<UsingShadowDecl>(D)->getTargetDecl();
+ else if (D->getLexicalDeclContext()->isFunctionOrMethod())
+ return false;
+
+ // C++0x [basic.lookup.argdep]p3:
+ // -- a declaration that is neither a function or a function
+ // template
+ // And also for builtin functions.
+ if (isa<FunctionDecl>(D)) {
+ FunctionDecl *FDecl = cast<FunctionDecl>(D);
+
+ // But also builtin functions.
+ if (FDecl->getBuiltinID() && FDecl->isImplicit())
+ return false;
+ } else if (!isa<FunctionTemplateDecl>(D))
+ return false;
+ }
+
+ return true;
+}
+
+
+/// Diagnoses obvious problems with the use of the given declaration
+/// as an expression. This is only actually called for lookups that
+/// were not overloaded, and it doesn't promise that the declaration
+/// will in fact be used.
+static bool CheckDeclInExpr(Sema &S, SourceLocation Loc, NamedDecl *D) {
+ if (isa<TypedefNameDecl>(D)) {
+ S.Diag(Loc, diag::err_unexpected_typedef) << D->getDeclName();
+ return true;
+ }
+
+ if (isa<ObjCInterfaceDecl>(D)) {
+ S.Diag(Loc, diag::err_unexpected_interface) << D->getDeclName();
+ return true;
+ }
+
+ if (isa<NamespaceDecl>(D)) {
+ S.Diag(Loc, diag::err_unexpected_namespace) << D->getDeclName();
+ return true;
+ }
+
+ return false;
+}
+
+ExprResult Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS,
+ LookupResult &R, bool NeedsADL,
+ bool AcceptInvalidDecl) {
+ // If this is a single, fully-resolved result and we don't need ADL,
+ // just build an ordinary singleton decl ref.
+ if (!NeedsADL && R.isSingleResult() && !R.getAsSingle<FunctionTemplateDecl>())
+ return BuildDeclarationNameExpr(SS, R.getLookupNameInfo(), R.getFoundDecl(),
+ R.getRepresentativeDecl(), nullptr,
+ AcceptInvalidDecl);
+
+ // We only need to check the declaration if there's exactly one
+ // result, because in the overloaded case the results can only be
+ // functions and function templates.
+ if (R.isSingleResult() &&
+ CheckDeclInExpr(*this, R.getNameLoc(), R.getFoundDecl()))
+ return ExprError();
+
+ // Otherwise, just build an unresolved lookup expression. Suppress
+ // any lookup-related diagnostics; we'll hash these out later, when
+ // we've picked a target.
+ R.suppressDiagnostics();
+
+ UnresolvedLookupExpr *ULE
+ = UnresolvedLookupExpr::Create(Context, R.getNamingClass(),
+ SS.getWithLocInContext(Context),
+ R.getLookupNameInfo(),
+ NeedsADL, R.isOverloadedResult(),
+ R.begin(), R.end());
+
+ return ULE;
+}
+
+/// \brief Complete semantic analysis for a reference to the given declaration.
+ExprResult Sema::BuildDeclarationNameExpr(
+ const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
+ NamedDecl *FoundD, const TemplateArgumentListInfo *TemplateArgs,
+ bool AcceptInvalidDecl) {
+ assert(D && "Cannot refer to a NULL declaration");
+ assert(!isa<FunctionTemplateDecl>(D) &&
+ "Cannot refer unambiguously to a function template");
+
+ SourceLocation Loc = NameInfo.getLoc();
+ if (CheckDeclInExpr(*this, Loc, D))
+ return ExprError();
+
+ if (TemplateDecl *Template = dyn_cast<TemplateDecl>(D)) {
+ // Specifically diagnose references to class templates that are missing
+ // a template argument list.
+ Diag(Loc, diag::err_template_decl_ref) << (isa<VarTemplateDecl>(D) ? 1 : 0)
+ << Template << SS.getRange();
+ Diag(Template->getLocation(), diag::note_template_decl_here);
+ return ExprError();
+ }
+
+ // Make sure that we're referring to a value.
+ ValueDecl *VD = dyn_cast<ValueDecl>(D);
+ if (!VD) {
+ Diag(Loc, diag::err_ref_non_value)
+ << D << SS.getRange();
+ Diag(D->getLocation(), diag::note_declared_at);
+ return ExprError();
+ }
+
+ // Check whether this declaration can be used. Note that we suppress
+ // this check when we're going to perform argument-dependent lookup
+ // on this function name, because this might not be the function
+ // that overload resolution actually selects.
+ if (DiagnoseUseOfDecl(VD, Loc))
+ return ExprError();
+
+ // Only create DeclRefExpr's for valid Decl's.
+ if (VD->isInvalidDecl() && !AcceptInvalidDecl)
+ return ExprError();
+
+ // Handle members of anonymous structs and unions. If we got here,
+ // and the reference is to a class member indirect field, then this
+ // must be the subject of a pointer-to-member expression.
+ if (IndirectFieldDecl *indirectField = dyn_cast<IndirectFieldDecl>(VD))
+ if (!indirectField->isCXXClassMember())
+ return BuildAnonymousStructUnionMemberReference(SS, NameInfo.getLoc(),
+ indirectField);
+
+ {
+ QualType type = VD->getType();
+ ExprValueKind valueKind = VK_RValue;
+
+ switch (D->getKind()) {
+ // Ignore all the non-ValueDecl kinds.
+#define ABSTRACT_DECL(kind)
+#define VALUE(type, base)
+#define DECL(type, base) \
+ case Decl::type:
+#include "clang/AST/DeclNodes.inc"
+ llvm_unreachable("invalid value decl kind");
+
+ // These shouldn't make it here.
+ case Decl::ObjCAtDefsField:
+ case Decl::ObjCIvar:
+ llvm_unreachable("forming non-member reference to ivar?");
+
+ // Enum constants are always r-values and never references.
+ // Unresolved using declarations are dependent.
+ case Decl::EnumConstant:
+ case Decl::UnresolvedUsingValue:
+ valueKind = VK_RValue;
+ break;
+
+ // Fields and indirect fields that got here must be for
+ // pointer-to-member expressions; we just call them l-values for
+ // internal consistency, because this subexpression doesn't really
+ // exist in the high-level semantics.
+ case Decl::Field:
+ case Decl::IndirectField:
+ assert(getLangOpts().CPlusPlus &&
+ "building reference to field in C?");
+
+ // These can't have reference type in well-formed programs, but
+ // for internal consistency we do this anyway.
+ type = type.getNonReferenceType();
+ valueKind = VK_LValue;
+ break;
+
+ // Non-type template parameters are either l-values or r-values
+ // depending on the type.
+ case Decl::NonTypeTemplateParm: {
+ if (const ReferenceType *reftype = type->getAs<ReferenceType>()) {
+ type = reftype->getPointeeType();
+ valueKind = VK_LValue; // even if the parameter is an r-value reference
+ break;
+ }
+
+ // For non-references, we need to strip qualifiers just in case
+ // the template parameter was declared as 'const int' or whatever.
+ valueKind = VK_RValue;
+ type = type.getUnqualifiedType();
+ break;
+ }
+
+ case Decl::Var:
+ case Decl::VarTemplateSpecialization:
+ case Decl::VarTemplatePartialSpecialization:
+ // In C, "extern void blah;" is valid and is an r-value.
+ if (!getLangOpts().CPlusPlus &&
+ !type.hasQualifiers() &&
+ type->isVoidType()) {
+ valueKind = VK_RValue;
+ break;
+ }
+ // fallthrough
+
+ case Decl::ImplicitParam:
+ case Decl::ParmVar: {
+ // These are always l-values.
+ valueKind = VK_LValue;
+ type = type.getNonReferenceType();
+
+ // FIXME: Does the addition of const really only apply in
+ // potentially-evaluated contexts? Since the variable isn't actually
+ // captured in an unevaluated context, it seems that the answer is no.
+ if (!isUnevaluatedContext()) {
+ QualType CapturedType = getCapturedDeclRefType(cast<VarDecl>(VD), Loc);
+ if (!CapturedType.isNull())
+ type = CapturedType;
+ }
+
+ break;
+ }
+
+ case Decl::Function: {
+ if (unsigned BID = cast<FunctionDecl>(VD)->getBuiltinID()) {
+ if (!Context.BuiltinInfo.isPredefinedLibFunction(BID)) {
+ type = Context.BuiltinFnTy;
+ valueKind = VK_RValue;
+ break;
+ }
+ }
+
+ const FunctionType *fty = type->castAs<FunctionType>();
+
+ // If we're referring to a function with an __unknown_anytype
+ // result type, make the entire expression __unknown_anytype.
+ if (fty->getReturnType() == Context.UnknownAnyTy) {
+ type = Context.UnknownAnyTy;
+ valueKind = VK_RValue;
+ break;
+ }
+
+ // Functions are l-values in C++.
+ if (getLangOpts().CPlusPlus) {
+ valueKind = VK_LValue;
+ break;
+ }
+
+ // C99 DR 316 says that, if a function type comes from a
+ // function definition (without a prototype), that type is only
+ // used for checking compatibility. Therefore, when referencing
+ // the function, we pretend that we don't have the full function
+ // type.
+ if (!cast<FunctionDecl>(VD)->hasPrototype() &&
+ isa<FunctionProtoType>(fty))
+ type = Context.getFunctionNoProtoType(fty->getReturnType(),
+ fty->getExtInfo());
+
+ // Functions are r-values in C.
+ valueKind = VK_RValue;
+ break;
+ }
+
+ case Decl::MSProperty:
+ valueKind = VK_LValue;
+ break;
+
+ case Decl::CXXMethod:
+ // If we're referring to a method with an __unknown_anytype
+ // result type, make the entire expression __unknown_anytype.
+ // This should only be possible with a type written directly.
+ if (const FunctionProtoType *proto
+ = dyn_cast<FunctionProtoType>(VD->getType()))
+ if (proto->getReturnType() == Context.UnknownAnyTy) {
+ type = Context.UnknownAnyTy;
+ valueKind = VK_RValue;
+ break;
+ }
+
+ // C++ methods are l-values if static, r-values if non-static.
+ if (cast<CXXMethodDecl>(VD)->isStatic()) {
+ valueKind = VK_LValue;
+ break;
+ }
+ // fallthrough
+
+ case Decl::CXXConversion:
+ case Decl::CXXDestructor:
+ case Decl::CXXConstructor:
+ valueKind = VK_RValue;
+ break;
+ }
+
+ return BuildDeclRefExpr(VD, type, valueKind, NameInfo, &SS, FoundD,
+ TemplateArgs);
+ }
+}
+
+static void ConvertUTF8ToWideString(unsigned CharByteWidth, StringRef Source,
+ SmallString<32> &Target) {
+ Target.resize(CharByteWidth * (Source.size() + 1));
+ char *ResultPtr = &Target[0];
+ const UTF8 *ErrorPtr;
+ bool success = ConvertUTF8toWide(CharByteWidth, Source, ResultPtr, ErrorPtr);
+ (void)success;
+ assert(success);
+ Target.resize(ResultPtr - &Target[0]);
+}
+
+ExprResult Sema::BuildPredefinedExpr(SourceLocation Loc,
+ PredefinedExpr::IdentType IT) {
+ // Pick the current block, lambda, captured statement or function.
+ Decl *currentDecl = nullptr;
+ if (const BlockScopeInfo *BSI = getCurBlock())
+ currentDecl = BSI->TheDecl;
+ else if (const LambdaScopeInfo *LSI = getCurLambda())
+ currentDecl = LSI->CallOperator;
+ else if (const CapturedRegionScopeInfo *CSI = getCurCapturedRegion())
+ currentDecl = CSI->TheCapturedDecl;
+ else
+ currentDecl = getCurFunctionOrMethodDecl();
+
+ if (!currentDecl) {
+ Diag(Loc, diag::ext_predef_outside_function);
+ currentDecl = Context.getTranslationUnitDecl();
+ }
+
+ QualType ResTy;
+ StringLiteral *SL = nullptr;
+ if (cast<DeclContext>(currentDecl)->isDependentContext())
+ ResTy = Context.DependentTy;
+ else {
+ // Pre-defined identifiers are of type char[x], where x is the length of
+ // the string.
+ auto Str = PredefinedExpr::ComputeName(IT, currentDecl);
+ unsigned Length = Str.length();
+
+ llvm::APInt LengthI(32, Length + 1);
+ if (IT == PredefinedExpr::LFunction) {
+ ResTy = Context.WideCharTy.withConst();
+ SmallString<32> RawChars;
+ ConvertUTF8ToWideString(Context.getTypeSizeInChars(ResTy).getQuantity(),
+ Str, RawChars);
+ ResTy = Context.getConstantArrayType(ResTy, LengthI, ArrayType::Normal,
+ /*IndexTypeQuals*/ 0);
+ SL = StringLiteral::Create(Context, RawChars, StringLiteral::Wide,
+ /*Pascal*/ false, ResTy, Loc);
+ } else {
+ ResTy = Context.CharTy.withConst();
+ ResTy = Context.getConstantArrayType(ResTy, LengthI, ArrayType::Normal,
+ /*IndexTypeQuals*/ 0);
+ SL = StringLiteral::Create(Context, Str, StringLiteral::Ascii,
+ /*Pascal*/ false, ResTy, Loc);
+ }
+ }
+
+ return new (Context) PredefinedExpr(Loc, ResTy, IT, SL);
+}
+
+ExprResult Sema::ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind) {
+ PredefinedExpr::IdentType IT;
+
+ switch (Kind) {
+ default: llvm_unreachable("Unknown simple primary expr!");
+ case tok::kw___func__: IT = PredefinedExpr::Func; break; // [C99 6.4.2.2]
+ case tok::kw___FUNCTION__: IT = PredefinedExpr::Function; break;
+ case tok::kw___FUNCDNAME__: IT = PredefinedExpr::FuncDName; break; // [MS]
+ case tok::kw___FUNCSIG__: IT = PredefinedExpr::FuncSig; break; // [MS]
+ case tok::kw_L__FUNCTION__: IT = PredefinedExpr::LFunction; break;
+ case tok::kw___PRETTY_FUNCTION__: IT = PredefinedExpr::PrettyFunction; break;
+ }
+
+ return BuildPredefinedExpr(Loc, IT);
+}
+
+ExprResult Sema::ActOnCharacterConstant(const Token &Tok, Scope *UDLScope) {
+ SmallString<16> CharBuffer;
+ bool Invalid = false;
+ StringRef ThisTok = PP.getSpelling(Tok, CharBuffer, &Invalid);
+ if (Invalid)
+ return ExprError();
+
+ CharLiteralParser Literal(ThisTok.begin(), ThisTok.end(), Tok.getLocation(),
+ PP, Tok.getKind());
+ if (Literal.hadError())
+ return ExprError();
+
+ QualType Ty;
+ if (Literal.isWide())
+ Ty = Context.WideCharTy; // L'x' -> wchar_t in C and C++.
+ else if (Literal.isUTF16())
+ Ty = Context.Char16Ty; // u'x' -> char16_t in C11 and C++11.
+ else if (Literal.isUTF32())
+ Ty = Context.Char32Ty; // U'x' -> char32_t in C11 and C++11.
+ else if (!getLangOpts().CPlusPlus || Literal.isMultiChar())
+ Ty = Context.IntTy; // 'x' -> int in C, 'wxyz' -> int in C++.
+ else
+ Ty = Context.CharTy; // 'x' -> char in C++
+
+ CharacterLiteral::CharacterKind Kind = CharacterLiteral::Ascii;
+ if (Literal.isWide())
+ Kind = CharacterLiteral::Wide;
+ else if (Literal.isUTF16())
+ Kind = CharacterLiteral::UTF16;
+ else if (Literal.isUTF32())
+ Kind = CharacterLiteral::UTF32;
+
+ Expr *Lit = new (Context) CharacterLiteral(Literal.getValue(), Kind, Ty,
+ Tok.getLocation());
+
+ if (Literal.getUDSuffix().empty())
+ return Lit;
+
+ // We're building a user-defined literal.
+ IdentifierInfo *UDSuffix = &Context.Idents.get(Literal.getUDSuffix());
+ SourceLocation UDSuffixLoc =
+ getUDSuffixLoc(*this, Tok.getLocation(), Literal.getUDSuffixOffset());
+
+ // Make sure we're allowed user-defined literals here.
+ if (!UDLScope)
+ return ExprError(Diag(UDSuffixLoc, diag::err_invalid_character_udl));
+
+ // C++11 [lex.ext]p6: The literal L is treated as a call of the form
+ // operator "" X (ch)
+ return BuildCookedLiteralOperatorCall(*this, UDLScope, UDSuffix, UDSuffixLoc,
+ Lit, Tok.getLocation());
+}
+
+ExprResult Sema::ActOnIntegerConstant(SourceLocation Loc, uint64_t Val) {
+ unsigned IntSize = Context.getTargetInfo().getIntWidth();
+ return IntegerLiteral::Create(Context, llvm::APInt(IntSize, Val),
+ Context.IntTy, Loc);
+}
+
+static Expr *BuildFloatingLiteral(Sema &S, NumericLiteralParser &Literal,
+ QualType Ty, SourceLocation Loc) {
+ const llvm::fltSemantics &Format = S.Context.getFloatTypeSemantics(Ty);
+
+ using llvm::APFloat;
+ APFloat Val(Format);
+
+ APFloat::opStatus result = Literal.GetFloatValue(Val);
+
+ // Overflow is always an error, but underflow is only an error if
+ // we underflowed to zero (APFloat reports denormals as underflow).
+ if ((result & APFloat::opOverflow) ||
+ ((result & APFloat::opUnderflow) && Val.isZero())) {
+ unsigned diagnostic;
+ SmallString<20> buffer;
+ if (result & APFloat::opOverflow) {
+ diagnostic = diag::warn_float_overflow;
+ APFloat::getLargest(Format).toString(buffer);
+ } else {
+ diagnostic = diag::warn_float_underflow;
+ APFloat::getSmallest(Format).toString(buffer);
+ }
+
+ S.Diag(Loc, diagnostic)
+ << Ty
+ << StringRef(buffer.data(), buffer.size());
+ }
+
+ bool isExact = (result == APFloat::opOK);
+ return FloatingLiteral::Create(S.Context, Val, isExact, Ty, Loc);
+}
+
+bool Sema::CheckLoopHintExpr(Expr *E, SourceLocation Loc) {
+ assert(E && "Invalid expression");
+
+ if (E->isValueDependent())
+ return false;
+
+ QualType QT = E->getType();
+ if (!QT->isIntegerType() || QT->isBooleanType() || QT->isCharType()) {
+ Diag(E->getExprLoc(), diag::err_pragma_loop_invalid_argument_type) << QT;
+ return true;
+ }
+
+ llvm::APSInt ValueAPS;
+ ExprResult R = VerifyIntegerConstantExpression(E, &ValueAPS);
+
+ if (R.isInvalid())
+ return true;
+
+ bool ValueIsPositive = ValueAPS.isStrictlyPositive();
+ if (!ValueIsPositive || ValueAPS.getActiveBits() > 31) {
+ Diag(E->getExprLoc(), diag::err_pragma_loop_invalid_argument_value)
+ << ValueAPS.toString(10) << ValueIsPositive;
+ return true;
+ }
+
+ return false;
+}
+
+ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
+ // Fast path for a single digit (which is quite common). A single digit
+ // cannot have a trigraph, escaped newline, radix prefix, or suffix.
+ if (Tok.getLength() == 1) {
+ const char Val = PP.getSpellingOfSingleCharacterNumericConstant(Tok);
+ return ActOnIntegerConstant(Tok.getLocation(), Val-'0');
+ }
+
+ SmallString<128> SpellingBuffer;
+ // NumericLiteralParser wants to overread by one character. Add padding to
+ // the buffer in case the token is copied to the buffer. If getSpelling()
+ // returns a StringRef to the memory buffer, it should have a null char at
+ // the EOF, so it is also safe.
+ SpellingBuffer.resize(Tok.getLength() + 1);
+
+ // Get the spelling of the token, which eliminates trigraphs, etc.
+ bool Invalid = false;
+ StringRef TokSpelling = PP.getSpelling(Tok, SpellingBuffer, &Invalid);
+ if (Invalid)
+ return ExprError();
+
+ NumericLiteralParser Literal(TokSpelling, Tok.getLocation(), PP);
+ if (Literal.hadError)
+ return ExprError();
+
+ if (Literal.hasUDSuffix()) {
+ // We're building a user-defined literal.
+ IdentifierInfo *UDSuffix = &Context.Idents.get(Literal.getUDSuffix());
+ SourceLocation UDSuffixLoc =
+ getUDSuffixLoc(*this, Tok.getLocation(), Literal.getUDSuffixOffset());
+
+ // Make sure we're allowed user-defined literals here.
+ if (!UDLScope)
+ return ExprError(Diag(UDSuffixLoc, diag::err_invalid_numeric_udl));
+
+ QualType CookedTy;
+ if (Literal.isFloatingLiteral()) {
+ // C++11 [lex.ext]p4: If S contains a literal operator with parameter type
+ // long double, the literal is treated as a call of the form
+ // operator "" X (f L)
+ CookedTy = Context.LongDoubleTy;
+ } else {
+ // C++11 [lex.ext]p3: If S contains a literal operator with parameter type
+ // unsigned long long, the literal is treated as a call of the form
+ // operator "" X (n ULL)
+ CookedTy = Context.UnsignedLongLongTy;
+ }
+
+ DeclarationName OpName =
+ Context.DeclarationNames.getCXXLiteralOperatorName(UDSuffix);
+ DeclarationNameInfo OpNameInfo(OpName, UDSuffixLoc);
+ OpNameInfo.setCXXLiteralOperatorNameLoc(UDSuffixLoc);
+
+ SourceLocation TokLoc = Tok.getLocation();
+
+ // Perform literal operator lookup to determine if we're building a raw
+ // literal or a cooked one.
+ LookupResult R(*this, OpName, UDSuffixLoc, LookupOrdinaryName);
+ switch (LookupLiteralOperator(UDLScope, R, CookedTy,
+ /*AllowRaw*/true, /*AllowTemplate*/true,
+ /*AllowStringTemplate*/false)) {
+ case LOLR_Error:
+ return ExprError();
+
+ case LOLR_Cooked: {
+ Expr *Lit;
+ if (Literal.isFloatingLiteral()) {
+ Lit = BuildFloatingLiteral(*this, Literal, CookedTy, Tok.getLocation());
+ } else {
+ llvm::APInt ResultVal(Context.getTargetInfo().getLongLongWidth(), 0);
+ if (Literal.GetIntegerValue(ResultVal))
+ Diag(Tok.getLocation(), diag::err_integer_literal_too_large)
+ << /* Unsigned */ 1;
+ Lit = IntegerLiteral::Create(Context, ResultVal, CookedTy,
+ Tok.getLocation());
+ }
+ return BuildLiteralOperatorCall(R, OpNameInfo, Lit, TokLoc);
+ }
+
+ case LOLR_Raw: {
+ // C++11 [lit.ext]p3, p4: If S contains a raw literal operator, the
+ // literal is treated as a call of the form
+ // operator "" X ("n")
+ unsigned Length = Literal.getUDSuffixOffset();
+ QualType StrTy = Context.getConstantArrayType(
+ Context.CharTy.withConst(), llvm::APInt(32, Length + 1),
+ ArrayType::Normal, 0);
+ Expr *Lit = StringLiteral::Create(
+ Context, StringRef(TokSpelling.data(), Length), StringLiteral::Ascii,
+ /*Pascal*/false, StrTy, &TokLoc, 1);
+ return BuildLiteralOperatorCall(R, OpNameInfo, Lit, TokLoc);
+ }
+
+ case LOLR_Template: {
+ // C++11 [lit.ext]p3, p4: Otherwise (S contains a literal operator
+ // template), L is treated as a call fo the form
+ // operator "" X <'c1', 'c2', ... 'ck'>()
+ // where n is the source character sequence c1 c2 ... ck.
+ TemplateArgumentListInfo ExplicitArgs;
+ unsigned CharBits = Context.getIntWidth(Context.CharTy);
+ bool CharIsUnsigned = Context.CharTy->isUnsignedIntegerType();
+ llvm::APSInt Value(CharBits, CharIsUnsigned);
+ for (unsigned I = 0, N = Literal.getUDSuffixOffset(); I != N; ++I) {
+ Value = TokSpelling[I];
+ TemplateArgument Arg(Context, Value, Context.CharTy);
+ TemplateArgumentLocInfo ArgInfo;
+ ExplicitArgs.addArgument(TemplateArgumentLoc(Arg, ArgInfo));
+ }
+ return BuildLiteralOperatorCall(R, OpNameInfo, None, TokLoc,
+ &ExplicitArgs);
+ }
+ case LOLR_StringTemplate:
+ llvm_unreachable("unexpected literal operator lookup result");
+ }
+ }
+
+ Expr *Res;
+
+ if (Literal.isFloatingLiteral()) {
+ QualType Ty;
+ if (Literal.isFloat)
+ Ty = Context.FloatTy;
+ else if (!Literal.isLong)
+ Ty = Context.DoubleTy;
+ else
+ Ty = Context.LongDoubleTy;
+
+ Res = BuildFloatingLiteral(*this, Literal, Ty, Tok.getLocation());
+
+ if (Ty == Context.DoubleTy) {
+ if (getLangOpts().SinglePrecisionConstants) {
+ Res = ImpCastExprToType(Res, Context.FloatTy, CK_FloatingCast).get();
+ } else if (getLangOpts().OpenCL &&
+ !((getLangOpts().OpenCLVersion >= 120) ||
+ getOpenCLOptions().cl_khr_fp64)) {
+ Diag(Tok.getLocation(), diag::warn_double_const_requires_fp64);
+ Res = ImpCastExprToType(Res, Context.FloatTy, CK_FloatingCast).get();
+ }
+ }
+ } else if (!Literal.isIntegerLiteral()) {
+ return ExprError();
+ } else {
+ QualType Ty;
+
+ // 'long long' is a C99 or C++11 feature.
+ if (!getLangOpts().C99 && Literal.isLongLong) {
+ if (getLangOpts().CPlusPlus)
+ Diag(Tok.getLocation(),
+ getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_longlong : diag::ext_cxx11_longlong);
+ else
+ Diag(Tok.getLocation(), diag::ext_c99_longlong);
+ }
+
+ // Get the value in the widest-possible width.
+ unsigned MaxWidth = Context.getTargetInfo().getIntMaxTWidth();
+ llvm::APInt ResultVal(MaxWidth, 0);
+
+ if (Literal.GetIntegerValue(ResultVal)) {
+ // If this value didn't fit into uintmax_t, error and force to ull.
+ Diag(Tok.getLocation(), diag::err_integer_literal_too_large)
+ << /* Unsigned */ 1;
+ Ty = Context.UnsignedLongLongTy;
+ assert(Context.getTypeSize(Ty) == ResultVal.getBitWidth() &&
+ "long long is not intmax_t?");
+ } else {
+ // If this value fits into a ULL, try to figure out what else it fits into
+ // according to the rules of C99 6.4.4.1p5.
+
+ // Octal, Hexadecimal, and integers with a U suffix are allowed to
+ // be an unsigned int.
+ bool AllowUnsigned = Literal.isUnsigned || Literal.getRadix() != 10;
+
+ // Check from smallest to largest, picking the smallest type we can.
+ unsigned Width = 0;
+
+ // Microsoft specific integer suffixes are explicitly sized.
+ if (Literal.MicrosoftInteger) {
+ if (Literal.MicrosoftInteger == 8 && !Literal.isUnsigned) {
+ Width = 8;
+ Ty = Context.CharTy;
+ } else {
+ Width = Literal.MicrosoftInteger;
+ Ty = Context.getIntTypeForBitwidth(Width,
+ /*Signed=*/!Literal.isUnsigned);
+ }
+ }
+
+ if (Ty.isNull() && !Literal.isLong && !Literal.isLongLong) {
+ // Are int/unsigned possibilities?
+ unsigned IntSize = Context.getTargetInfo().getIntWidth();
+
+ // Does it fit in a unsigned int?
+ if (ResultVal.isIntN(IntSize)) {
+ // Does it fit in a signed int?
+ if (!Literal.isUnsigned && ResultVal[IntSize-1] == 0)
+ Ty = Context.IntTy;
+ else if (AllowUnsigned)
+ Ty = Context.UnsignedIntTy;
+ Width = IntSize;
+ }
+ }
+
+ // Are long/unsigned long possibilities?
+ if (Ty.isNull() && !Literal.isLongLong) {
+ unsigned LongSize = Context.getTargetInfo().getLongWidth();
+
+ // Does it fit in a unsigned long?
+ if (ResultVal.isIntN(LongSize)) {
+ // Does it fit in a signed long?
+ if (!Literal.isUnsigned && ResultVal[LongSize-1] == 0)
+ Ty = Context.LongTy;
+ else if (AllowUnsigned)
+ Ty = Context.UnsignedLongTy;
+ // Check according to the rules of C90 6.1.3.2p5. C++03 [lex.icon]p2
+ // is compatible.
+ else if (!getLangOpts().C99 && !getLangOpts().CPlusPlus11) {
+ const unsigned LongLongSize =
+ Context.getTargetInfo().getLongLongWidth();
+ Diag(Tok.getLocation(),
+ getLangOpts().CPlusPlus
+ ? Literal.isLong
+ ? diag::warn_old_implicitly_unsigned_long_cxx
+ : /*C++98 UB*/ diag::
+ ext_old_implicitly_unsigned_long_cxx
+ : diag::warn_old_implicitly_unsigned_long)
+ << (LongLongSize > LongSize ? /*will have type 'long long'*/ 0
+ : /*will be ill-formed*/ 1);
+ Ty = Context.UnsignedLongTy;
+ }
+ Width = LongSize;
+ }
+ }
+
+ // Check long long if needed.
+ if (Ty.isNull()) {
+ unsigned LongLongSize = Context.getTargetInfo().getLongLongWidth();
+
+ // Does it fit in a unsigned long long?
+ if (ResultVal.isIntN(LongLongSize)) {
+ // Does it fit in a signed long long?
+ // To be compatible with MSVC, hex integer literals ending with the
+ // LL or i64 suffix are always signed in Microsoft mode.
+ if (!Literal.isUnsigned && (ResultVal[LongLongSize-1] == 0 ||
+ (getLangOpts().MicrosoftExt && Literal.isLongLong)))
+ Ty = Context.LongLongTy;
+ else if (AllowUnsigned)
+ Ty = Context.UnsignedLongLongTy;
+ Width = LongLongSize;
+ }
+ }
+
+ // If we still couldn't decide a type, we probably have something that
+ // does not fit in a signed long long, but has no U suffix.
+ if (Ty.isNull()) {
+ Diag(Tok.getLocation(), diag::ext_integer_literal_too_large_for_signed);
+ Ty = Context.UnsignedLongLongTy;
+ Width = Context.getTargetInfo().getLongLongWidth();
+ }
+
+ if (ResultVal.getBitWidth() != Width)
+ ResultVal = ResultVal.trunc(Width);
+ }
+ Res = IntegerLiteral::Create(Context, ResultVal, Ty, Tok.getLocation());
+ }
+
+ // If this is an imaginary literal, create the ImaginaryLiteral wrapper.
+ if (Literal.isImaginary)
+ Res = new (Context) ImaginaryLiteral(Res,
+ Context.getComplexType(Res->getType()));
+
+ return Res;
+}
+
+ExprResult Sema::ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E) {
+ assert(E && "ActOnParenExpr() missing expr");
+ return new (Context) ParenExpr(L, R, E);
+}
+
+static bool CheckVecStepTraitOperandType(Sema &S, QualType T,
+ SourceLocation Loc,
+ SourceRange ArgRange) {
+ // [OpenCL 1.1 6.11.12] "The vec_step built-in function takes a built-in
+ // scalar or vector data type argument..."
+ // Every built-in scalar type (OpenCL 1.1 6.1.1) is either an arithmetic
+ // type (C99 6.2.5p18) or void.
+ if (!(T->isArithmeticType() || T->isVoidType() || T->isVectorType())) {
+ S.Diag(Loc, diag::err_vecstep_non_scalar_vector_type)
+ << T << ArgRange;
+ return true;
+ }
+
+ assert((T->isVoidType() || !T->isIncompleteType()) &&
+ "Scalar types should always be complete");
+ return false;
+}
+
+static bool CheckExtensionTraitOperandType(Sema &S, QualType T,
+ SourceLocation Loc,
+ SourceRange ArgRange,
+ UnaryExprOrTypeTrait TraitKind) {
+ // Invalid types must be hard errors for SFINAE in C++.
+ if (S.LangOpts.CPlusPlus)
+ return true;
+
+ // C99 6.5.3.4p1:
+ if (T->isFunctionType() &&
+ (TraitKind == UETT_SizeOf || TraitKind == UETT_AlignOf)) {
+ // sizeof(function)/alignof(function) is allowed as an extension.
+ S.Diag(Loc, diag::ext_sizeof_alignof_function_type)
+ << TraitKind << ArgRange;
+ return false;
+ }
+
+ // Allow sizeof(void)/alignof(void) as an extension, unless in OpenCL where
+ // this is an error (OpenCL v1.1 s6.3.k)
+ if (T->isVoidType()) {
+ unsigned DiagID = S.LangOpts.OpenCL ? diag::err_opencl_sizeof_alignof_type
+ : diag::ext_sizeof_alignof_void_type;
+ S.Diag(Loc, DiagID) << TraitKind << ArgRange;
+ return false;
+ }
+
+ return true;
+}
+
+static bool CheckObjCTraitOperandConstraints(Sema &S, QualType T,
+ SourceLocation Loc,
+ SourceRange ArgRange,
+ UnaryExprOrTypeTrait TraitKind) {
+ // Reject sizeof(interface) and sizeof(interface<proto>) if the
+ // runtime doesn't allow it.
+ if (!S.LangOpts.ObjCRuntime.allowsSizeofAlignof() && T->isObjCObjectType()) {
+ S.Diag(Loc, diag::err_sizeof_nonfragile_interface)
+ << T << (TraitKind == UETT_SizeOf)
+ << ArgRange;
+ return true;
+ }
+
+ return false;
+}
+
+/// \brief Check whether E is a pointer from a decayed array type (the decayed
+/// pointer type is equal to T) and emit a warning if it is.
+static void warnOnSizeofOnArrayDecay(Sema &S, SourceLocation Loc, QualType T,
+ Expr *E) {
+ // Don't warn if the operation changed the type.
+ if (T != E->getType())
+ return;
+
+ // Now look for array decays.
+ ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E);
+ if (!ICE || ICE->getCastKind() != CK_ArrayToPointerDecay)
+ return;
+
+ S.Diag(Loc, diag::warn_sizeof_array_decay) << ICE->getSourceRange()
+ << ICE->getType()
+ << ICE->getSubExpr()->getType();
+}
+
+/// \brief Check the constraints on expression operands to unary type expression
+/// and type traits.
+///
+/// Completes any types necessary and validates the constraints on the operand
+/// expression. The logic mostly mirrors the type-based overload, but may modify
+/// the expression as it completes the type for that expression through template
+/// instantiation, etc.
+bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
+ UnaryExprOrTypeTrait ExprKind) {
+ QualType ExprTy = E->getType();
+ assert(!ExprTy->isReferenceType());
+
+ if (ExprKind == UETT_VecStep)
+ return CheckVecStepTraitOperandType(*this, ExprTy, E->getExprLoc(),
+ E->getSourceRange());
+
+ // Whitelist some types as extensions
+ if (!CheckExtensionTraitOperandType(*this, ExprTy, E->getExprLoc(),
+ E->getSourceRange(), ExprKind))
+ return false;
+
+ // 'alignof' applied to an expression only requires the base element type of
+ // the expression to be complete. 'sizeof' requires the expression's type to
+ // be complete (and will attempt to complete it if it's an array of unknown
+ // bound).
+ if (ExprKind == UETT_AlignOf) {
+ if (RequireCompleteType(E->getExprLoc(),
+ Context.getBaseElementType(E->getType()),
+ diag::err_sizeof_alignof_incomplete_type, ExprKind,
+ E->getSourceRange()))
+ return true;
+ } else {
+ if (RequireCompleteExprType(E, diag::err_sizeof_alignof_incomplete_type,
+ ExprKind, E->getSourceRange()))
+ return true;
+ }
+
+ // Completing the expression's type may have changed it.
+ ExprTy = E->getType();
+ assert(!ExprTy->isReferenceType());
+
+ if (ExprTy->isFunctionType()) {
+ Diag(E->getExprLoc(), diag::err_sizeof_alignof_function_type)
+ << ExprKind << E->getSourceRange();
+ return true;
+ }
+
+ // The operand for sizeof and alignof is in an unevaluated expression context,
+ // so side effects could result in unintended consequences.
+ if ((ExprKind == UETT_SizeOf || ExprKind == UETT_AlignOf) &&
+ ActiveTemplateInstantiations.empty() && E->HasSideEffects(Context, false))
+ Diag(E->getExprLoc(), diag::warn_side_effects_unevaluated_context);
+
+ if (CheckObjCTraitOperandConstraints(*this, ExprTy, E->getExprLoc(),
+ E->getSourceRange(), ExprKind))
+ return true;
+
+ if (ExprKind == UETT_SizeOf) {
+ if (DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParens())) {
+ if (ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(DeclRef->getFoundDecl())) {
+ QualType OType = PVD->getOriginalType();
+ QualType Type = PVD->getType();
+ if (Type->isPointerType() && OType->isArrayType()) {
+ Diag(E->getExprLoc(), diag::warn_sizeof_array_param)
+ << Type << OType;
+ Diag(PVD->getLocation(), diag::note_declared_at);
+ }
+ }
+ }
+
+ // Warn on "sizeof(array op x)" and "sizeof(x op array)", where the array
+ // decays into a pointer and returns an unintended result. This is most
+ // likely a typo for "sizeof(array) op x".
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E->IgnoreParens())) {
+ warnOnSizeofOnArrayDecay(*this, BO->getOperatorLoc(), BO->getType(),
+ BO->getLHS());
+ warnOnSizeofOnArrayDecay(*this, BO->getOperatorLoc(), BO->getType(),
+ BO->getRHS());
+ }
+ }
+
+ return false;
+}
+
+/// \brief Check the constraints on operands to unary expression and type
+/// traits.
+///
+/// This will complete any types necessary, and validate the various constraints
+/// on those operands.
+///
+/// The UsualUnaryConversions() function is *not* called by this routine.
+/// C99 6.3.2.1p[2-4] all state:
+/// Except when it is the operand of the sizeof operator ...
+///
+/// C++ [expr.sizeof]p4
+/// The lvalue-to-rvalue, array-to-pointer, and function-to-pointer
+/// standard conversions are not applied to the operand of sizeof.
+///
+/// This policy is followed for all of the unary trait expressions.
+bool Sema::CheckUnaryExprOrTypeTraitOperand(QualType ExprType,
+ SourceLocation OpLoc,
+ SourceRange ExprRange,
+ UnaryExprOrTypeTrait ExprKind) {
+ if (ExprType->isDependentType())
+ return false;
+
+ // C++ [expr.sizeof]p2:
+ // When applied to a reference or a reference type, the result
+ // is the size of the referenced type.
+ // C++11 [expr.alignof]p3:
+ // When alignof is applied to a reference type, the result
+ // shall be the alignment of the referenced type.
+ if (const ReferenceType *Ref = ExprType->getAs<ReferenceType>())
+ ExprType = Ref->getPointeeType();
+
+ // C11 6.5.3.4/3, C++11 [expr.alignof]p3:
+ // When alignof or _Alignof is applied to an array type, the result
+ // is the alignment of the element type.
+ if (ExprKind == UETT_AlignOf || ExprKind == UETT_OpenMPRequiredSimdAlign)
+ ExprType = Context.getBaseElementType(ExprType);
+
+ if (ExprKind == UETT_VecStep)
+ return CheckVecStepTraitOperandType(*this, ExprType, OpLoc, ExprRange);
+
+ // Whitelist some types as extensions
+ if (!CheckExtensionTraitOperandType(*this, ExprType, OpLoc, ExprRange,
+ ExprKind))
+ return false;
+
+ if (RequireCompleteType(OpLoc, ExprType,
+ diag::err_sizeof_alignof_incomplete_type,
+ ExprKind, ExprRange))
+ return true;
+
+ if (ExprType->isFunctionType()) {
+ Diag(OpLoc, diag::err_sizeof_alignof_function_type)
+ << ExprKind << ExprRange;
+ return true;
+ }
+
+ if (CheckObjCTraitOperandConstraints(*this, ExprType, OpLoc, ExprRange,
+ ExprKind))
+ return true;
+
+ return false;
+}
+
+static bool CheckAlignOfExpr(Sema &S, Expr *E) {
+ E = E->IgnoreParens();
+
+ // Cannot know anything else if the expression is dependent.
+ if (E->isTypeDependent())
+ return false;
+
+ if (E->getObjectKind() == OK_BitField) {
+ S.Diag(E->getExprLoc(), diag::err_sizeof_alignof_typeof_bitfield)
+ << 1 << E->getSourceRange();
+ return true;
+ }
+
+ ValueDecl *D = nullptr;
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ D = DRE->getDecl();
+ } else if (MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
+ D = ME->getMemberDecl();
+ }
+
+ // If it's a field, require the containing struct to have a
+ // complete definition so that we can compute the layout.
+ //
+ // This can happen in C++11 onwards, either by naming the member
+ // in a way that is not transformed into a member access expression
+ // (in an unevaluated operand, for instance), or by naming the member
+ // in a trailing-return-type.
+ //
+ // For the record, since __alignof__ on expressions is a GCC
+ // extension, GCC seems to permit this but always gives the
+ // nonsensical answer 0.
+ //
+ // We don't really need the layout here --- we could instead just
+ // directly check for all the appropriate alignment-lowing
+ // attributes --- but that would require duplicating a lot of
+ // logic that just isn't worth duplicating for such a marginal
+ // use-case.
+ if (FieldDecl *FD = dyn_cast_or_null<FieldDecl>(D)) {
+ // Fast path this check, since we at least know the record has a
+ // definition if we can find a member of it.
+ if (!FD->getParent()->isCompleteDefinition()) {
+ S.Diag(E->getExprLoc(), diag::err_alignof_member_of_incomplete_type)
+ << E->getSourceRange();
+ return true;
+ }
+
+ // Otherwise, if it's a field, and the field doesn't have
+ // reference type, then it must have a complete type (or be a
+ // flexible array member, which we explicitly want to
+ // white-list anyway), which makes the following checks trivial.
+ if (!FD->getType()->isReferenceType())
+ return false;
+ }
+
+ return S.CheckUnaryExprOrTypeTraitOperand(E, UETT_AlignOf);
+}
+
+bool Sema::CheckVecStepExpr(Expr *E) {
+ E = E->IgnoreParens();
+
+ // Cannot know anything else if the expression is dependent.
+ if (E->isTypeDependent())
+ return false;
+
+ return CheckUnaryExprOrTypeTraitOperand(E, UETT_VecStep);
+}
+
+/// \brief Build a sizeof or alignof expression given a type operand.
+ExprResult
+Sema::CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
+ SourceLocation OpLoc,
+ UnaryExprOrTypeTrait ExprKind,
+ SourceRange R) {
+ if (!TInfo)
+ return ExprError();
+
+ QualType T = TInfo->getType();
+
+ if (!T->isDependentType() &&
+ CheckUnaryExprOrTypeTraitOperand(T, OpLoc, R, ExprKind))
+ return ExprError();
+
+ // C99 6.5.3.4p4: the type (an unsigned integer type) is size_t.
+ return new (Context) UnaryExprOrTypeTraitExpr(
+ ExprKind, TInfo, Context.getSizeType(), OpLoc, R.getEnd());
+}
+
+/// \brief Build a sizeof or alignof expression given an expression
+/// operand.
+ExprResult
+Sema::CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
+ UnaryExprOrTypeTrait ExprKind) {
+ ExprResult PE = CheckPlaceholderExpr(E);
+ if (PE.isInvalid())
+ return ExprError();
+
+ E = PE.get();
+
+ // Verify that the operand is valid.
+ bool isInvalid = false;
+ if (E->isTypeDependent()) {
+ // Delay type-checking for type-dependent expressions.
+ } else if (ExprKind == UETT_AlignOf) {
+ isInvalid = CheckAlignOfExpr(*this, E);
+ } else if (ExprKind == UETT_VecStep) {
+ isInvalid = CheckVecStepExpr(E);
+ } else if (ExprKind == UETT_OpenMPRequiredSimdAlign) {
+ Diag(E->getExprLoc(), diag::err_openmp_default_simd_align_expr);
+ isInvalid = true;
+ } else if (E->refersToBitField()) { // C99 6.5.3.4p1.
+ Diag(E->getExprLoc(), diag::err_sizeof_alignof_typeof_bitfield) << 0;
+ isInvalid = true;
+ } else {
+ isInvalid = CheckUnaryExprOrTypeTraitOperand(E, UETT_SizeOf);
+ }
+
+ if (isInvalid)
+ return ExprError();
+
+ if (ExprKind == UETT_SizeOf && E->getType()->isVariableArrayType()) {
+ PE = TransformToPotentiallyEvaluated(E);
+ if (PE.isInvalid()) return ExprError();
+ E = PE.get();
+ }
+
+ // C99 6.5.3.4p4: the type (an unsigned integer type) is size_t.
+ return new (Context) UnaryExprOrTypeTraitExpr(
+ ExprKind, E, Context.getSizeType(), OpLoc, E->getSourceRange().getEnd());
+}
+
+/// ActOnUnaryExprOrTypeTraitExpr - Handle @c sizeof(type) and @c sizeof @c
+/// expr and the same for @c alignof and @c __alignof
+/// Note that the ArgRange is invalid if isType is false.
+ExprResult
+Sema::ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
+ UnaryExprOrTypeTrait ExprKind, bool IsType,
+ void *TyOrEx, SourceRange ArgRange) {
+ // If error parsing type, ignore.
+ if (!TyOrEx) return ExprError();
+
+ if (IsType) {
+ TypeSourceInfo *TInfo;
+ (void) GetTypeFromParser(ParsedType::getFromOpaquePtr(TyOrEx), &TInfo);
+ return CreateUnaryExprOrTypeTraitExpr(TInfo, OpLoc, ExprKind, ArgRange);
+ }
+
+ Expr *ArgEx = (Expr *)TyOrEx;
+ ExprResult Result = CreateUnaryExprOrTypeTraitExpr(ArgEx, OpLoc, ExprKind);
+ return Result;
+}
+
+static QualType CheckRealImagOperand(Sema &S, ExprResult &V, SourceLocation Loc,
+ bool IsReal) {
+ if (V.get()->isTypeDependent())
+ return S.Context.DependentTy;
+
+ // _Real and _Imag are only l-values for normal l-values.
+ if (V.get()->getObjectKind() != OK_Ordinary) {
+ V = S.DefaultLvalueConversion(V.get());
+ if (V.isInvalid())
+ return QualType();
+ }
+
+ // These operators return the element type of a complex type.
+ if (const ComplexType *CT = V.get()->getType()->getAs<ComplexType>())
+ return CT->getElementType();
+
+ // Otherwise they pass through real integer and floating point types here.
+ if (V.get()->getType()->isArithmeticType())
+ return V.get()->getType();
+
+ // Test for placeholders.
+ ExprResult PR = S.CheckPlaceholderExpr(V.get());
+ if (PR.isInvalid()) return QualType();
+ if (PR.get() != V.get()) {
+ V = PR;
+ return CheckRealImagOperand(S, V, Loc, IsReal);
+ }
+
+ // Reject anything else.
+ S.Diag(Loc, diag::err_realimag_invalid_type) << V.get()->getType()
+ << (IsReal ? "__real" : "__imag");
+ return QualType();
+}
+
+
+
+ExprResult
+Sema::ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
+ tok::TokenKind Kind, Expr *Input) {
+ UnaryOperatorKind Opc;
+ switch (Kind) {
+ default: llvm_unreachable("Unknown unary op!");
+ case tok::plusplus: Opc = UO_PostInc; break;
+ case tok::minusminus: Opc = UO_PostDec; break;
+ }
+
+ // Since this might is a postfix expression, get rid of ParenListExprs.
+ ExprResult Result = MaybeConvertParenListExprToParenExpr(S, Input);
+ if (Result.isInvalid()) return ExprError();
+ Input = Result.get();
+
+ return BuildUnaryOp(S, OpLoc, Opc, Input);
+}
+
+/// \brief Diagnose if arithmetic on the given ObjC pointer is illegal.
+///
+/// \return true on error
+static bool checkArithmeticOnObjCPointer(Sema &S,
+ SourceLocation opLoc,
+ Expr *op) {
+ assert(op->getType()->isObjCObjectPointerType());
+ if (S.LangOpts.ObjCRuntime.allowsPointerArithmetic() &&
+ !S.LangOpts.ObjCSubscriptingLegacyRuntime)
+ return false;
+
+ S.Diag(opLoc, diag::err_arithmetic_nonfragile_interface)
+ << op->getType()->castAs<ObjCObjectPointerType>()->getPointeeType()
+ << op->getSourceRange();
+ return true;
+}
+
+static bool isMSPropertySubscriptExpr(Sema &S, Expr *Base) {
+ auto *BaseNoParens = Base->IgnoreParens();
+ if (auto *MSProp = dyn_cast<MSPropertyRefExpr>(BaseNoParens))
+ return MSProp->getPropertyDecl()->getType()->isArrayType();
+ return isa<MSPropertySubscriptExpr>(BaseNoParens);
+}
+
+ExprResult
+Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
+ Expr *idx, SourceLocation rbLoc) {
+ if (base && !base->getType().isNull() &&
+ base->getType()->isSpecificPlaceholderType(BuiltinType::OMPArraySection))
+ return ActOnOMPArraySectionExpr(base, lbLoc, idx, SourceLocation(),
+ /*Length=*/nullptr, rbLoc);
+
+ // Since this might be a postfix expression, get rid of ParenListExprs.
+ if (isa<ParenListExpr>(base)) {
+ ExprResult result = MaybeConvertParenListExprToParenExpr(S, base);
+ if (result.isInvalid()) return ExprError();
+ base = result.get();
+ }
+
+ // Handle any non-overload placeholder types in the base and index
+ // expressions. We can't handle overloads here because the other
+ // operand might be an overloadable type, in which case the overload
+ // resolution for the operator overload should get the first crack
+ // at the overload.
+ bool IsMSPropertySubscript = false;
+ if (base->getType()->isNonOverloadPlaceholderType()) {
+ IsMSPropertySubscript = isMSPropertySubscriptExpr(*this, base);
+ if (!IsMSPropertySubscript) {
+ ExprResult result = CheckPlaceholderExpr(base);
+ if (result.isInvalid())
+ return ExprError();
+ base = result.get();
+ }
+ }
+ if (idx->getType()->isNonOverloadPlaceholderType()) {
+ ExprResult result = CheckPlaceholderExpr(idx);
+ if (result.isInvalid()) return ExprError();
+ idx = result.get();
+ }
+
+ // Build an unanalyzed expression if either operand is type-dependent.
+ if (getLangOpts().CPlusPlus &&
+ (base->isTypeDependent() || idx->isTypeDependent())) {
+ return new (Context) ArraySubscriptExpr(base, idx, Context.DependentTy,
+ VK_LValue, OK_Ordinary, rbLoc);
+ }
+
+ // MSDN, property (C++)
+ // https://msdn.microsoft.com/en-us/library/yhfk0thd(v=vs.120).aspx
+ // This attribute can also be used in the declaration of an empty array in a
+ // class or structure definition. For example:
+ // __declspec(property(get=GetX, put=PutX)) int x[];
+ // The above statement indicates that x[] can be used with one or more array
+ // indices. In this case, i=p->x[a][b] will be turned into i=p->GetX(a, b),
+ // and p->x[a][b] = i will be turned into p->PutX(a, b, i);
+ if (IsMSPropertySubscript) {
+ // Build MS property subscript expression if base is MS property reference
+ // or MS property subscript.
+ return new (Context) MSPropertySubscriptExpr(
+ base, idx, Context.PseudoObjectTy, VK_LValue, OK_Ordinary, rbLoc);
+ }
+
+ // Use C++ overloaded-operator rules if either operand has record
+ // type. The spec says to do this if either type is *overloadable*,
+ // but enum types can't declare subscript operators or conversion
+ // operators, so there's nothing interesting for overload resolution
+ // to do if there aren't any record types involved.
+ //
+ // ObjC pointers have their own subscripting logic that is not tied
+ // to overload resolution and so should not take this path.
+ if (getLangOpts().CPlusPlus &&
+ (base->getType()->isRecordType() ||
+ (!base->getType()->isObjCObjectPointerType() &&
+ idx->getType()->isRecordType()))) {
+ return CreateOverloadedArraySubscriptExpr(lbLoc, rbLoc, base, idx);
+ }
+
+ return CreateBuiltinArraySubscriptExpr(base, lbLoc, idx, rbLoc);
+}
+
+ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
+ Expr *LowerBound,
+ SourceLocation ColonLoc, Expr *Length,
+ SourceLocation RBLoc) {
+ if (Base->getType()->isPlaceholderType() &&
+ !Base->getType()->isSpecificPlaceholderType(
+ BuiltinType::OMPArraySection)) {
+ ExprResult Result = CheckPlaceholderExpr(Base);
+ if (Result.isInvalid())
+ return ExprError();
+ Base = Result.get();
+ }
+ if (LowerBound && LowerBound->getType()->isNonOverloadPlaceholderType()) {
+ ExprResult Result = CheckPlaceholderExpr(LowerBound);
+ if (Result.isInvalid())
+ return ExprError();
+ LowerBound = Result.get();
+ }
+ if (Length && Length->getType()->isNonOverloadPlaceholderType()) {
+ ExprResult Result = CheckPlaceholderExpr(Length);
+ if (Result.isInvalid())
+ return ExprError();
+ Length = Result.get();
+ }
+
+ // Build an unanalyzed expression if either operand is type-dependent.
+ if (Base->isTypeDependent() ||
+ (LowerBound &&
+ (LowerBound->isTypeDependent() || LowerBound->isValueDependent())) ||
+ (Length && (Length->isTypeDependent() || Length->isValueDependent()))) {
+ return new (Context)
+ OMPArraySectionExpr(Base, LowerBound, Length, Context.DependentTy,
+ VK_LValue, OK_Ordinary, ColonLoc, RBLoc);
+ }
+
+ // Perform default conversions.
+ QualType OriginalTy = OMPArraySectionExpr::getBaseOriginalType(Base);
+ QualType ResultTy;
+ if (OriginalTy->isAnyPointerType()) {
+ ResultTy = OriginalTy->getPointeeType();
+ } else if (OriginalTy->isArrayType()) {
+ ResultTy = OriginalTy->getAsArrayTypeUnsafe()->getElementType();
+ } else {
+ return ExprError(
+ Diag(Base->getExprLoc(), diag::err_omp_typecheck_section_value)
+ << Base->getSourceRange());
+ }
+ // C99 6.5.2.1p1
+ if (LowerBound) {
+ auto Res = PerformOpenMPImplicitIntegerConversion(LowerBound->getExprLoc(),
+ LowerBound);
+ if (Res.isInvalid())
+ return ExprError(Diag(LowerBound->getExprLoc(),
+ diag::err_omp_typecheck_section_not_integer)
+ << 0 << LowerBound->getSourceRange());
+ LowerBound = Res.get();
+
+ if (LowerBound->getType()->isSpecificBuiltinType(BuiltinType::Char_S) ||
+ LowerBound->getType()->isSpecificBuiltinType(BuiltinType::Char_U))
+ Diag(LowerBound->getExprLoc(), diag::warn_omp_section_is_char)
+ << 0 << LowerBound->getSourceRange();
+ }
+ if (Length) {
+ auto Res =
+ PerformOpenMPImplicitIntegerConversion(Length->getExprLoc(), Length);
+ if (Res.isInvalid())
+ return ExprError(Diag(Length->getExprLoc(),
+ diag::err_omp_typecheck_section_not_integer)
+ << 1 << Length->getSourceRange());
+ Length = Res.get();
+
+ if (Length->getType()->isSpecificBuiltinType(BuiltinType::Char_S) ||
+ Length->getType()->isSpecificBuiltinType(BuiltinType::Char_U))
+ Diag(Length->getExprLoc(), diag::warn_omp_section_is_char)
+ << 1 << Length->getSourceRange();
+ }
+
+ // C99 6.5.2.1p1: "shall have type "pointer to *object* type". Similarly,
+ // C++ [expr.sub]p1: The type "T" shall be a completely-defined object
+ // type. Note that functions are not objects, and that (in C99 parlance)
+ // incomplete types are not object types.
+ if (ResultTy->isFunctionType()) {
+ Diag(Base->getExprLoc(), diag::err_omp_section_function_type)
+ << ResultTy << Base->getSourceRange();
+ return ExprError();
+ }
+
+ if (RequireCompleteType(Base->getExprLoc(), ResultTy,
+ diag::err_omp_section_incomplete_type, Base))
+ return ExprError();
+
+ if (LowerBound) {
+ llvm::APSInt LowerBoundValue;
+ if (LowerBound->EvaluateAsInt(LowerBoundValue, Context)) {
+ // OpenMP 4.0, [2.4 Array Sections]
+ // The lower-bound and length must evaluate to non-negative integers.
+ if (LowerBoundValue.isNegative()) {
+ Diag(LowerBound->getExprLoc(), diag::err_omp_section_negative)
+ << 0 << LowerBoundValue.toString(/*Radix=*/10, /*Signed=*/true)
+ << LowerBound->getSourceRange();
+ return ExprError();
+ }
+ }
+ }
+
+ if (Length) {
+ llvm::APSInt LengthValue;
+ if (Length->EvaluateAsInt(LengthValue, Context)) {
+ // OpenMP 4.0, [2.4 Array Sections]
+ // The lower-bound and length must evaluate to non-negative integers.
+ if (LengthValue.isNegative()) {
+ Diag(Length->getExprLoc(), diag::err_omp_section_negative)
+ << 1 << LengthValue.toString(/*Radix=*/10, /*Signed=*/true)
+ << Length->getSourceRange();
+ return ExprError();
+ }
+ }
+ } else if (ColonLoc.isValid() &&
+ (OriginalTy.isNull() || (!OriginalTy->isConstantArrayType() &&
+ !OriginalTy->isVariableArrayType()))) {
+ // OpenMP 4.0, [2.4 Array Sections]
+ // When the size of the array dimension is not known, the length must be
+ // specified explicitly.
+ Diag(ColonLoc, diag::err_omp_section_length_undefined)
+ << (!OriginalTy.isNull() && OriginalTy->isArrayType());
+ return ExprError();
+ }
+
+ return new (Context)
+ OMPArraySectionExpr(Base, LowerBound, Length, Context.OMPArraySectionTy,
+ VK_LValue, OK_Ordinary, ColonLoc, RBLoc);
+}
+
+ExprResult
+Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
+ Expr *Idx, SourceLocation RLoc) {
+ Expr *LHSExp = Base;
+ Expr *RHSExp = Idx;
+
+ // Perform default conversions.
+ if (!LHSExp->getType()->getAs<VectorType>()) {
+ ExprResult Result = DefaultFunctionArrayLvalueConversion(LHSExp);
+ if (Result.isInvalid())
+ return ExprError();
+ LHSExp = Result.get();
+ }
+ ExprResult Result = DefaultFunctionArrayLvalueConversion(RHSExp);
+ if (Result.isInvalid())
+ return ExprError();
+ RHSExp = Result.get();
+
+ QualType LHSTy = LHSExp->getType(), RHSTy = RHSExp->getType();
+ ExprValueKind VK = VK_LValue;
+ ExprObjectKind OK = OK_Ordinary;
+
+ // C99 6.5.2.1p2: the expression e1[e2] is by definition precisely equivalent
+ // to the expression *((e1)+(e2)). This means the array "Base" may actually be
+ // in the subscript position. As a result, we need to derive the array base
+ // and index from the expression types.
+ Expr *BaseExpr, *IndexExpr;
+ QualType ResultType;
+ if (LHSTy->isDependentType() || RHSTy->isDependentType()) {
+ BaseExpr = LHSExp;
+ IndexExpr = RHSExp;
+ ResultType = Context.DependentTy;
+ } else if (const PointerType *PTy = LHSTy->getAs<PointerType>()) {
+ BaseExpr = LHSExp;
+ IndexExpr = RHSExp;
+ ResultType = PTy->getPointeeType();
+ } else if (const ObjCObjectPointerType *PTy =
+ LHSTy->getAs<ObjCObjectPointerType>()) {
+ BaseExpr = LHSExp;
+ IndexExpr = RHSExp;
+
+ // Use custom logic if this should be the pseudo-object subscript
+ // expression.
+ if (!LangOpts.isSubscriptPointerArithmetic())
+ return BuildObjCSubscriptExpression(RLoc, BaseExpr, IndexExpr, nullptr,
+ nullptr);
+
+ ResultType = PTy->getPointeeType();
+ } else if (const PointerType *PTy = RHSTy->getAs<PointerType>()) {
+ // Handle the uncommon case of "123[Ptr]".
+ BaseExpr = RHSExp;
+ IndexExpr = LHSExp;
+ ResultType = PTy->getPointeeType();
+ } else if (const ObjCObjectPointerType *PTy =
+ RHSTy->getAs<ObjCObjectPointerType>()) {
+ // Handle the uncommon case of "123[Ptr]".
+ BaseExpr = RHSExp;
+ IndexExpr = LHSExp;
+ ResultType = PTy->getPointeeType();
+ if (!LangOpts.isSubscriptPointerArithmetic()) {
+ Diag(LLoc, diag::err_subscript_nonfragile_interface)
+ << ResultType << BaseExpr->getSourceRange();
+ return ExprError();
+ }
+ } else if (const VectorType *VTy = LHSTy->getAs<VectorType>()) {
+ BaseExpr = LHSExp; // vectors: V[123]
+ IndexExpr = RHSExp;
+ VK = LHSExp->getValueKind();
+ if (VK != VK_RValue)
+ OK = OK_VectorComponent;
+
+ // FIXME: need to deal with const...
+ ResultType = VTy->getElementType();
+ } else if (LHSTy->isArrayType()) {
+ // If we see an array that wasn't promoted by
+ // DefaultFunctionArrayLvalueConversion, it must be an array that
+ // wasn't promoted because of the C90 rule that doesn't
+ // allow promoting non-lvalue arrays. Warn, then
+ // force the promotion here.
+ Diag(LHSExp->getLocStart(), diag::ext_subscript_non_lvalue) <<
+ LHSExp->getSourceRange();
+ LHSExp = ImpCastExprToType(LHSExp, Context.getArrayDecayedType(LHSTy),
+ CK_ArrayToPointerDecay).get();
+ LHSTy = LHSExp->getType();
+
+ BaseExpr = LHSExp;
+ IndexExpr = RHSExp;
+ ResultType = LHSTy->getAs<PointerType>()->getPointeeType();
+ } else if (RHSTy->isArrayType()) {
+ // Same as previous, except for 123[f().a] case
+ Diag(RHSExp->getLocStart(), diag::ext_subscript_non_lvalue) <<
+ RHSExp->getSourceRange();
+ RHSExp = ImpCastExprToType(RHSExp, Context.getArrayDecayedType(RHSTy),
+ CK_ArrayToPointerDecay).get();
+ RHSTy = RHSExp->getType();
+
+ BaseExpr = RHSExp;
+ IndexExpr = LHSExp;
+ ResultType = RHSTy->getAs<PointerType>()->getPointeeType();
+ } else {
+ return ExprError(Diag(LLoc, diag::err_typecheck_subscript_value)
+ << LHSExp->getSourceRange() << RHSExp->getSourceRange());
+ }
+ // C99 6.5.2.1p1
+ if (!IndexExpr->getType()->isIntegerType() && !IndexExpr->isTypeDependent())
+ return ExprError(Diag(LLoc, diag::err_typecheck_subscript_not_integer)
+ << IndexExpr->getSourceRange());
+
+ if ((IndexExpr->getType()->isSpecificBuiltinType(BuiltinType::Char_S) ||
+ IndexExpr->getType()->isSpecificBuiltinType(BuiltinType::Char_U))
+ && !IndexExpr->isTypeDependent())
+ Diag(LLoc, diag::warn_subscript_is_char) << IndexExpr->getSourceRange();
+
+ // C99 6.5.2.1p1: "shall have type "pointer to *object* type". Similarly,
+ // C++ [expr.sub]p1: The type "T" shall be a completely-defined object
+ // type. Note that Functions are not objects, and that (in C99 parlance)
+ // incomplete types are not object types.
+ if (ResultType->isFunctionType()) {
+ Diag(BaseExpr->getLocStart(), diag::err_subscript_function_type)
+ << ResultType << BaseExpr->getSourceRange();
+ return ExprError();
+ }
+
+ if (ResultType->isVoidType() && !getLangOpts().CPlusPlus) {
+ // GNU extension: subscripting on pointer to void
+ Diag(LLoc, diag::ext_gnu_subscript_void_type)
+ << BaseExpr->getSourceRange();
+
+ // C forbids expressions of unqualified void type from being l-values.
+ // See IsCForbiddenLValueType.
+ if (!ResultType.hasQualifiers()) VK = VK_RValue;
+ } else if (!ResultType->isDependentType() &&
+ RequireCompleteType(LLoc, ResultType,
+ diag::err_subscript_incomplete_type, BaseExpr))
+ return ExprError();
+
+ assert(VK == VK_RValue || LangOpts.CPlusPlus ||
+ !ResultType.isCForbiddenLValueType());
+
+ return new (Context)
+ ArraySubscriptExpr(LHSExp, RHSExp, ResultType, VK, OK, RLoc);
+}
+
+ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
+ FunctionDecl *FD,
+ ParmVarDecl *Param) {
+ if (Param->hasUnparsedDefaultArg()) {
+ Diag(CallLoc,
+ diag::err_use_of_default_argument_to_function_declared_later) <<
+ FD << cast<CXXRecordDecl>(FD->getDeclContext())->getDeclName();
+ Diag(UnparsedDefaultArgLocs[Param],
+ diag::note_default_argument_declared_here);
+ return ExprError();
+ }
+
+ if (Param->hasUninstantiatedDefaultArg()) {
+ Expr *UninstExpr = Param->getUninstantiatedDefaultArg();
+
+ EnterExpressionEvaluationContext EvalContext(*this, PotentiallyEvaluated,
+ Param);
+
+ // Instantiate the expression.
+ MultiLevelTemplateArgumentList MutiLevelArgList
+ = getTemplateInstantiationArgs(FD, nullptr, /*RelativeToPrimary=*/true);
+
+ InstantiatingTemplate Inst(*this, CallLoc, Param,
+ MutiLevelArgList.getInnermost());
+ if (Inst.isInvalid())
+ return ExprError();
+
+ ExprResult Result;
+ {
+ // C++ [dcl.fct.default]p5:
+ // The names in the [default argument] expression are bound, and
+ // the semantic constraints are checked, at the point where the
+ // default argument expression appears.
+ ContextRAII SavedContext(*this, FD);
+ LocalInstantiationScope Local(*this);
+ Result = SubstExpr(UninstExpr, MutiLevelArgList);
+ }
+ if (Result.isInvalid())
+ return ExprError();
+
+ // Check the expression as an initializer for the parameter.
+ InitializedEntity Entity
+ = InitializedEntity::InitializeParameter(Context, Param);
+ InitializationKind Kind
+ = InitializationKind::CreateCopy(Param->getLocation(),
+ /*FIXME:EqualLoc*/UninstExpr->getLocStart());
+ Expr *ResultE = Result.getAs<Expr>();
+
+ InitializationSequence InitSeq(*this, Entity, Kind, ResultE);
+ Result = InitSeq.Perform(*this, Entity, Kind, ResultE);
+ if (Result.isInvalid())
+ return ExprError();
+
+ Expr *Arg = Result.getAs<Expr>();
+ CheckCompletedExpr(Arg, Param->getOuterLocStart());
+ // Build the default argument expression.
+ return CXXDefaultArgExpr::Create(Context, CallLoc, Param, Arg);
+ }
+
+ // If the default expression creates temporaries, we need to
+ // push them to the current stack of expression temporaries so they'll
+ // be properly destroyed.
+ // FIXME: We should really be rebuilding the default argument with new
+ // bound temporaries; see the comment in PR5810.
+ // We don't need to do that with block decls, though, because
+ // blocks in default argument expression can never capture anything.
+ if (isa<ExprWithCleanups>(Param->getInit())) {
+ // Set the "needs cleanups" bit regardless of whether there are
+ // any explicit objects.
+ ExprNeedsCleanups = true;
+
+ // Append all the objects to the cleanup list. Right now, this
+ // should always be a no-op, because blocks in default argument
+ // expressions should never be able to capture anything.
+ assert(!cast<ExprWithCleanups>(Param->getInit())->getNumObjects() &&
+ "default argument expression has capturing blocks?");
+ }
+
+ // We already type-checked the argument, so we know it works.
+ // Just mark all of the declarations in this potentially-evaluated expression
+ // as being "referenced".
+ MarkDeclarationsReferencedInExpr(Param->getDefaultArg(),
+ /*SkipLocalVariables=*/true);
+ return CXXDefaultArgExpr::Create(Context, CallLoc, Param);
+}
+
+
+Sema::VariadicCallType
+Sema::getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto,
+ Expr *Fn) {
+ if (Proto && Proto->isVariadic()) {
+ if (dyn_cast_or_null<CXXConstructorDecl>(FDecl))
+ return VariadicConstructor;
+ else if (Fn && Fn->getType()->isBlockPointerType())
+ return VariadicBlock;
+ else if (FDecl) {
+ if (CXXMethodDecl *Method = dyn_cast_or_null<CXXMethodDecl>(FDecl))
+ if (Method->isInstance())
+ return VariadicMethod;
+ } else if (Fn && Fn->getType() == Context.BoundMemberTy)
+ return VariadicMethod;
+ return VariadicFunction;
+ }
+ return VariadicDoesNotApply;
+}
+
+namespace {
+class FunctionCallCCC : public FunctionCallFilterCCC {
+public:
+ FunctionCallCCC(Sema &SemaRef, const IdentifierInfo *FuncName,
+ unsigned NumArgs, MemberExpr *ME)
+ : FunctionCallFilterCCC(SemaRef, NumArgs, false, ME),
+ FunctionName(FuncName) {}
+
+ bool ValidateCandidate(const TypoCorrection &candidate) override {
+ if (!candidate.getCorrectionSpecifier() ||
+ candidate.getCorrectionAsIdentifierInfo() != FunctionName) {
+ return false;
+ }
+
+ return FunctionCallFilterCCC::ValidateCandidate(candidate);
+ }
+
+private:
+ const IdentifierInfo *const FunctionName;
+};
+}
+
+static TypoCorrection TryTypoCorrectionForCall(Sema &S, Expr *Fn,
+ FunctionDecl *FDecl,
+ ArrayRef<Expr *> Args) {
+ MemberExpr *ME = dyn_cast<MemberExpr>(Fn);
+ DeclarationName FuncName = FDecl->getDeclName();
+ SourceLocation NameLoc = ME ? ME->getMemberLoc() : Fn->getLocStart();
+
+ if (TypoCorrection Corrected = S.CorrectTypo(
+ DeclarationNameInfo(FuncName, NameLoc), Sema::LookupOrdinaryName,
+ S.getScopeForContext(S.CurContext), nullptr,
+ llvm::make_unique<FunctionCallCCC>(S, FuncName.getAsIdentifierInfo(),
+ Args.size(), ME),
+ Sema::CTK_ErrorRecovery)) {
+ if (NamedDecl *ND = Corrected.getFoundDecl()) {
+ if (Corrected.isOverloaded()) {
+ OverloadCandidateSet OCS(NameLoc, OverloadCandidateSet::CSK_Normal);
+ OverloadCandidateSet::iterator Best;
+ for (NamedDecl *CD : Corrected) {
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(CD))
+ S.AddOverloadCandidate(FD, DeclAccessPair::make(FD, AS_none), Args,
+ OCS);
+ }
+ switch (OCS.BestViableFunction(S, NameLoc, Best)) {
+ case OR_Success:
+ ND = Best->FoundDecl;
+ Corrected.setCorrectionDecl(ND);
+ break;
+ default:
+ break;
+ }
+ }
+ ND = ND->getUnderlyingDecl();
+ if (isa<ValueDecl>(ND) || isa<FunctionTemplateDecl>(ND))
+ return Corrected;
+ }
+ }
+ return TypoCorrection();
+}
+
+/// ConvertArgumentsForCall - Converts the arguments specified in
+/// Args/NumArgs to the parameter types of the function FDecl with
+/// function prototype Proto. Call is the call expression itself, and
+/// Fn is the function expression. For a C++ member function, this
+/// routine does not attempt to convert the object argument. Returns
+/// true if the call is ill-formed.
+bool
+Sema::ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
+ FunctionDecl *FDecl,
+ const FunctionProtoType *Proto,
+ ArrayRef<Expr *> Args,
+ SourceLocation RParenLoc,
+ bool IsExecConfig) {
+ // Bail out early if calling a builtin with custom typechecking.
+ if (FDecl)
+ if (unsigned ID = FDecl->getBuiltinID())
+ if (Context.BuiltinInfo.hasCustomTypechecking(ID))
+ return false;
+
+ // C99 6.5.2.2p7 - the arguments are implicitly converted, as if by
+ // assignment, to the types of the corresponding parameter, ...
+ unsigned NumParams = Proto->getNumParams();
+ bool Invalid = false;
+ unsigned MinArgs = FDecl ? FDecl->getMinRequiredArguments() : NumParams;
+ unsigned FnKind = Fn->getType()->isBlockPointerType()
+ ? 1 /* block */
+ : (IsExecConfig ? 3 /* kernel function (exec config) */
+ : 0 /* function */);
+
+ // If too few arguments are available (and we don't have default
+ // arguments for the remaining parameters), don't make the call.
+ if (Args.size() < NumParams) {
+ if (Args.size() < MinArgs) {
+ TypoCorrection TC;
+ if (FDecl && (TC = TryTypoCorrectionForCall(*this, Fn, FDecl, Args))) {
+ unsigned diag_id =
+ MinArgs == NumParams && !Proto->isVariadic()
+ ? diag::err_typecheck_call_too_few_args_suggest
+ : diag::err_typecheck_call_too_few_args_at_least_suggest;
+ diagnoseTypo(TC, PDiag(diag_id) << FnKind << MinArgs
+ << static_cast<unsigned>(Args.size())
+ << TC.getCorrectionRange());
+ } else if (MinArgs == 1 && FDecl && FDecl->getParamDecl(0)->getDeclName())
+ Diag(RParenLoc,
+ MinArgs == NumParams && !Proto->isVariadic()
+ ? diag::err_typecheck_call_too_few_args_one
+ : diag::err_typecheck_call_too_few_args_at_least_one)
+ << FnKind << FDecl->getParamDecl(0) << Fn->getSourceRange();
+ else
+ Diag(RParenLoc, MinArgs == NumParams && !Proto->isVariadic()
+ ? diag::err_typecheck_call_too_few_args
+ : diag::err_typecheck_call_too_few_args_at_least)
+ << FnKind << MinArgs << static_cast<unsigned>(Args.size())
+ << Fn->getSourceRange();
+
+ // Emit the location of the prototype.
+ if (!TC && FDecl && !FDecl->getBuiltinID() && !IsExecConfig)
+ Diag(FDecl->getLocStart(), diag::note_callee_decl)
+ << FDecl;
+
+ return true;
+ }
+ Call->setNumArgs(Context, NumParams);
+ }
+
+ // If too many are passed and not variadic, error on the extras and drop
+ // them.
+ if (Args.size() > NumParams) {
+ if (!Proto->isVariadic()) {
+ TypoCorrection TC;
+ if (FDecl && (TC = TryTypoCorrectionForCall(*this, Fn, FDecl, Args))) {
+ unsigned diag_id =
+ MinArgs == NumParams && !Proto->isVariadic()
+ ? diag::err_typecheck_call_too_many_args_suggest
+ : diag::err_typecheck_call_too_many_args_at_most_suggest;
+ diagnoseTypo(TC, PDiag(diag_id) << FnKind << NumParams
+ << static_cast<unsigned>(Args.size())
+ << TC.getCorrectionRange());
+ } else if (NumParams == 1 && FDecl &&
+ FDecl->getParamDecl(0)->getDeclName())
+ Diag(Args[NumParams]->getLocStart(),
+ MinArgs == NumParams
+ ? diag::err_typecheck_call_too_many_args_one
+ : diag::err_typecheck_call_too_many_args_at_most_one)
+ << FnKind << FDecl->getParamDecl(0)
+ << static_cast<unsigned>(Args.size()) << Fn->getSourceRange()
+ << SourceRange(Args[NumParams]->getLocStart(),
+ Args.back()->getLocEnd());
+ else
+ Diag(Args[NumParams]->getLocStart(),
+ MinArgs == NumParams
+ ? diag::err_typecheck_call_too_many_args
+ : diag::err_typecheck_call_too_many_args_at_most)
+ << FnKind << NumParams << static_cast<unsigned>(Args.size())
+ << Fn->getSourceRange()
+ << SourceRange(Args[NumParams]->getLocStart(),
+ Args.back()->getLocEnd());
+
+ // Emit the location of the prototype.
+ if (!TC && FDecl && !FDecl->getBuiltinID() && !IsExecConfig)
+ Diag(FDecl->getLocStart(), diag::note_callee_decl)
+ << FDecl;
+
+ // This deletes the extra arguments.
+ Call->setNumArgs(Context, NumParams);
+ return true;
+ }
+ }
+ SmallVector<Expr *, 8> AllArgs;
+ VariadicCallType CallType = getVariadicCallType(FDecl, Proto, Fn);
+
+ Invalid = GatherArgumentsForCall(Call->getLocStart(), FDecl,
+ Proto, 0, Args, AllArgs, CallType);
+ if (Invalid)
+ return true;
+ unsigned TotalNumArgs = AllArgs.size();
+ for (unsigned i = 0; i < TotalNumArgs; ++i)
+ Call->setArg(i, AllArgs[i]);
+
+ return false;
+}
+
+bool Sema::GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
+ const FunctionProtoType *Proto,
+ unsigned FirstParam, ArrayRef<Expr *> Args,
+ SmallVectorImpl<Expr *> &AllArgs,
+ VariadicCallType CallType, bool AllowExplicit,
+ bool IsListInitialization) {
+ unsigned NumParams = Proto->getNumParams();
+ bool Invalid = false;
+ size_t ArgIx = 0;
+ // Continue to check argument types (even if we have too few/many args).
+ for (unsigned i = FirstParam; i < NumParams; i++) {
+ QualType ProtoArgType = Proto->getParamType(i);
+
+ Expr *Arg;
+ ParmVarDecl *Param = FDecl ? FDecl->getParamDecl(i) : nullptr;
+ if (ArgIx < Args.size()) {
+ Arg = Args[ArgIx++];
+
+ if (RequireCompleteType(Arg->getLocStart(),
+ ProtoArgType,
+ diag::err_call_incomplete_argument, Arg))
+ return true;
+
+ // Strip the unbridged-cast placeholder expression off, if applicable.
+ bool CFAudited = false;
+ if (Arg->getType() == Context.ARCUnbridgedCastTy &&
+ FDecl && FDecl->hasAttr<CFAuditedTransferAttr>() &&
+ (!Param || !Param->hasAttr<CFConsumedAttr>()))
+ Arg = stripARCUnbridgedCast(Arg);
+ else if (getLangOpts().ObjCAutoRefCount &&
+ FDecl && FDecl->hasAttr<CFAuditedTransferAttr>() &&
+ (!Param || !Param->hasAttr<CFConsumedAttr>()))
+ CFAudited = true;
+
+ InitializedEntity Entity =
+ Param ? InitializedEntity::InitializeParameter(Context, Param,
+ ProtoArgType)
+ : InitializedEntity::InitializeParameter(
+ Context, ProtoArgType, Proto->isParamConsumed(i));
+
+ // Remember that parameter belongs to a CF audited API.
+ if (CFAudited)
+ Entity.setParameterCFAudited();
+
+ ExprResult ArgE = PerformCopyInitialization(
+ Entity, SourceLocation(), Arg, IsListInitialization, AllowExplicit);
+ if (ArgE.isInvalid())
+ return true;
+
+ Arg = ArgE.getAs<Expr>();
+ } else {
+ assert(Param && "can't use default arguments without a known callee");
+
+ ExprResult ArgExpr =
+ BuildCXXDefaultArgExpr(CallLoc, FDecl, Param);
+ if (ArgExpr.isInvalid())
+ return true;
+
+ Arg = ArgExpr.getAs<Expr>();
+ }
+
+ // Check for array bounds violations for each argument to the call. This
+ // check only triggers warnings when the argument isn't a more complex Expr
+ // with its own checking, such as a BinaryOperator.
+ CheckArrayAccess(Arg);
+
+ // Check for violations of C99 static array rules (C99 6.7.5.3p7).
+ CheckStaticArrayArgument(CallLoc, Param, Arg);
+
+ AllArgs.push_back(Arg);
+ }
+
+ // If this is a variadic call, handle args passed through "...".
+ if (CallType != VariadicDoesNotApply) {
+ // Assume that extern "C" functions with variadic arguments that
+ // return __unknown_anytype aren't *really* variadic.
+ if (Proto->getReturnType() == Context.UnknownAnyTy && FDecl &&
+ FDecl->isExternC()) {
+ for (Expr *A : Args.slice(ArgIx)) {
+ QualType paramType; // ignored
+ ExprResult arg = checkUnknownAnyArg(CallLoc, A, paramType);
+ Invalid |= arg.isInvalid();
+ AllArgs.push_back(arg.get());
+ }
+
+ // Otherwise do argument promotion, (C99 6.5.2.2p7).
+ } else {
+ for (Expr *A : Args.slice(ArgIx)) {
+ ExprResult Arg = DefaultVariadicArgumentPromotion(A, CallType, FDecl);
+ Invalid |= Arg.isInvalid();
+ AllArgs.push_back(Arg.get());
+ }
+ }
+
+ // Check for array bounds violations.
+ for (Expr *A : Args.slice(ArgIx))
+ CheckArrayAccess(A);
+ }
+ return Invalid;
+}
+
+static void DiagnoseCalleeStaticArrayParam(Sema &S, ParmVarDecl *PVD) {
+ TypeLoc TL = PVD->getTypeSourceInfo()->getTypeLoc();
+ if (DecayedTypeLoc DTL = TL.getAs<DecayedTypeLoc>())
+ TL = DTL.getOriginalLoc();
+ if (ArrayTypeLoc ATL = TL.getAs<ArrayTypeLoc>())
+ S.Diag(PVD->getLocation(), diag::note_callee_static_array)
+ << ATL.getLocalSourceRange();
+}
+
+/// CheckStaticArrayArgument - If the given argument corresponds to a static
+/// array parameter, check that it is non-null, and that if it is formed by
+/// array-to-pointer decay, the underlying array is sufficiently large.
+///
+/// C99 6.7.5.3p7: If the keyword static also appears within the [ and ] of the
+/// array type derivation, then for each call to the function, the value of the
+/// corresponding actual argument shall provide access to the first element of
+/// an array with at least as many elements as specified by the size expression.
+void
+Sema::CheckStaticArrayArgument(SourceLocation CallLoc,
+ ParmVarDecl *Param,
+ const Expr *ArgExpr) {
+ // Static array parameters are not supported in C++.
+ if (!Param || getLangOpts().CPlusPlus)
+ return;
+
+ QualType OrigTy = Param->getOriginalType();
+
+ const ArrayType *AT = Context.getAsArrayType(OrigTy);
+ if (!AT || AT->getSizeModifier() != ArrayType::Static)
+ return;
+
+ if (ArgExpr->isNullPointerConstant(Context,
+ Expr::NPC_NeverValueDependent)) {
+ Diag(CallLoc, diag::warn_null_arg) << ArgExpr->getSourceRange();
+ DiagnoseCalleeStaticArrayParam(*this, Param);
+ return;
+ }
+
+ const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT);
+ if (!CAT)
+ return;
+
+ const ConstantArrayType *ArgCAT =
+ Context.getAsConstantArrayType(ArgExpr->IgnoreParenImpCasts()->getType());
+ if (!ArgCAT)
+ return;
+
+ if (ArgCAT->getSize().ult(CAT->getSize())) {
+ Diag(CallLoc, diag::warn_static_array_too_small)
+ << ArgExpr->getSourceRange()
+ << (unsigned) ArgCAT->getSize().getZExtValue()
+ << (unsigned) CAT->getSize().getZExtValue();
+ DiagnoseCalleeStaticArrayParam(*this, Param);
+ }
+}
+
+/// Given a function expression of unknown-any type, try to rebuild it
+/// to have a function type.
+static ExprResult rebuildUnknownAnyFunction(Sema &S, Expr *fn);
+
+/// Is the given type a placeholder that we need to lower out
+/// immediately during argument processing?
+static bool isPlaceholderToRemoveAsArg(QualType type) {
+ // Placeholders are never sugared.
+ const BuiltinType *placeholder = dyn_cast<BuiltinType>(type);
+ if (!placeholder) return false;
+
+ switch (placeholder->getKind()) {
+ // Ignore all the non-placeholder types.
+#define PLACEHOLDER_TYPE(ID, SINGLETON_ID)
+#define BUILTIN_TYPE(ID, SINGLETON_ID) case BuiltinType::ID:
+#include "clang/AST/BuiltinTypes.def"
+ return false;
+
+ // We cannot lower out overload sets; they might validly be resolved
+ // by the call machinery.
+ case BuiltinType::Overload:
+ return false;
+
+ // Unbridged casts in ARC can be handled in some call positions and
+ // should be left in place.
+ case BuiltinType::ARCUnbridgedCast:
+ return false;
+
+ // Pseudo-objects should be converted as soon as possible.
+ case BuiltinType::PseudoObject:
+ return true;
+
+ // The debugger mode could theoretically but currently does not try
+ // to resolve unknown-typed arguments based on known parameter types.
+ case BuiltinType::UnknownAny:
+ return true;
+
+ // These are always invalid as call arguments and should be reported.
+ case BuiltinType::BoundMember:
+ case BuiltinType::BuiltinFn:
+ case BuiltinType::OMPArraySection:
+ return true;
+
+ }
+ llvm_unreachable("bad builtin type kind");
+}
+
+/// Check an argument list for placeholders that we won't try to
+/// handle later.
+static bool checkArgsForPlaceholders(Sema &S, MultiExprArg args) {
+ // Apply this processing to all the arguments at once instead of
+ // dying at the first failure.
+ bool hasInvalid = false;
+ for (size_t i = 0, e = args.size(); i != e; i++) {
+ if (isPlaceholderToRemoveAsArg(args[i]->getType())) {
+ ExprResult result = S.CheckPlaceholderExpr(args[i]);
+ if (result.isInvalid()) hasInvalid = true;
+ else args[i] = result.get();
+ } else if (hasInvalid) {
+ (void)S.CorrectDelayedTyposInExpr(args[i]);
+ }
+ }
+ return hasInvalid;
+}
+
+/// If a builtin function has a pointer argument with no explicit address
+/// space, then it should be able to accept a pointer to any address
+/// space as input. In order to do this, we need to replace the
+/// standard builtin declaration with one that uses the same address space
+/// as the call.
+///
+/// \returns nullptr If this builtin is not a candidate for a rewrite i.e.
+/// it does not contain any pointer arguments without
+/// an address space qualifer. Otherwise the rewritten
+/// FunctionDecl is returned.
+/// TODO: Handle pointer return types.
+static FunctionDecl *rewriteBuiltinFunctionDecl(Sema *Sema, ASTContext &Context,
+ const FunctionDecl *FDecl,
+ MultiExprArg ArgExprs) {
+
+ QualType DeclType = FDecl->getType();
+ const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(DeclType);
+
+ if (!Context.BuiltinInfo.hasPtrArgsOrResult(FDecl->getBuiltinID()) ||
+ !FT || FT->isVariadic() || ArgExprs.size() != FT->getNumParams())
+ return nullptr;
+
+ bool NeedsNewDecl = false;
+ unsigned i = 0;
+ SmallVector<QualType, 8> OverloadParams;
+
+ for (QualType ParamType : FT->param_types()) {
+
+ // Convert array arguments to pointer to simplify type lookup.
+ Expr *Arg = Sema->DefaultFunctionArrayLvalueConversion(ArgExprs[i++]).get();
+ QualType ArgType = Arg->getType();
+ if (!ParamType->isPointerType() ||
+ ParamType.getQualifiers().hasAddressSpace() ||
+ !ArgType->isPointerType() ||
+ !ArgType->getPointeeType().getQualifiers().hasAddressSpace()) {
+ OverloadParams.push_back(ParamType);
+ continue;
+ }
+
+ NeedsNewDecl = true;
+ unsigned AS = ArgType->getPointeeType().getQualifiers().getAddressSpace();
+
+ QualType PointeeType = ParamType->getPointeeType();
+ PointeeType = Context.getAddrSpaceQualType(PointeeType, AS);
+ OverloadParams.push_back(Context.getPointerType(PointeeType));
+ }
+
+ if (!NeedsNewDecl)
+ return nullptr;
+
+ FunctionProtoType::ExtProtoInfo EPI;
+ QualType OverloadTy = Context.getFunctionType(FT->getReturnType(),
+ OverloadParams, EPI);
+ DeclContext *Parent = Context.getTranslationUnitDecl();
+ FunctionDecl *OverloadDecl = FunctionDecl::Create(Context, Parent,
+ FDecl->getLocation(),
+ FDecl->getLocation(),
+ FDecl->getIdentifier(),
+ OverloadTy,
+ /*TInfo=*/nullptr,
+ SC_Extern, false,
+ /*hasPrototype=*/true);
+ SmallVector<ParmVarDecl*, 16> Params;
+ FT = cast<FunctionProtoType>(OverloadTy);
+ for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
+ QualType ParamType = FT->getParamType(i);
+ ParmVarDecl *Parm =
+ ParmVarDecl::Create(Context, OverloadDecl, SourceLocation(),
+ SourceLocation(), nullptr, ParamType,
+ /*TInfo=*/nullptr, SC_None, nullptr);
+ Parm->setScopeInfo(0, i);
+ Params.push_back(Parm);
+ }
+ OverloadDecl->setParams(Params);
+ return OverloadDecl;
+}
+
+/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
+/// This provides the location of the left/right parens and a list of comma
+/// locations.
+ExprResult
+Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
+ MultiExprArg ArgExprs, SourceLocation RParenLoc,
+ Expr *ExecConfig, bool IsExecConfig) {
+ // Since this might be a postfix expression, get rid of ParenListExprs.
+ ExprResult Result = MaybeConvertParenListExprToParenExpr(S, Fn);
+ if (Result.isInvalid()) return ExprError();
+ Fn = Result.get();
+
+ if (checkArgsForPlaceholders(*this, ArgExprs))
+ return ExprError();
+
+ if (getLangOpts().CPlusPlus) {
+ // If this is a pseudo-destructor expression, build the call immediately.
+ if (isa<CXXPseudoDestructorExpr>(Fn)) {
+ if (!ArgExprs.empty()) {
+ // Pseudo-destructor calls should not have any arguments.
+ Diag(Fn->getLocStart(), diag::err_pseudo_dtor_call_with_args)
+ << FixItHint::CreateRemoval(
+ SourceRange(ArgExprs.front()->getLocStart(),
+ ArgExprs.back()->getLocEnd()));
+ }
+
+ return new (Context)
+ CallExpr(Context, Fn, None, Context.VoidTy, VK_RValue, RParenLoc);
+ }
+ if (Fn->getType() == Context.PseudoObjectTy) {
+ ExprResult result = CheckPlaceholderExpr(Fn);
+ if (result.isInvalid()) return ExprError();
+ Fn = result.get();
+ }
+
+ // Determine whether this is a dependent call inside a C++ template,
+ // in which case we won't do any semantic analysis now.
+ // FIXME: Will need to cache the results of name lookup (including ADL) in
+ // Fn.
+ bool Dependent = false;
+ if (Fn->isTypeDependent())
+ Dependent = true;
+ else if (Expr::hasAnyTypeDependentArguments(ArgExprs))
+ Dependent = true;
+
+ if (Dependent) {
+ if (ExecConfig) {
+ return new (Context) CUDAKernelCallExpr(
+ Context, Fn, cast<CallExpr>(ExecConfig), ArgExprs,
+ Context.DependentTy, VK_RValue, RParenLoc);
+ } else {
+ return new (Context) CallExpr(
+ Context, Fn, ArgExprs, Context.DependentTy, VK_RValue, RParenLoc);
+ }
+ }
+
+ // Determine whether this is a call to an object (C++ [over.call.object]).
+ if (Fn->getType()->isRecordType())
+ return BuildCallToObjectOfClassType(S, Fn, LParenLoc, ArgExprs,
+ RParenLoc);
+
+ if (Fn->getType() == Context.UnknownAnyTy) {
+ ExprResult result = rebuildUnknownAnyFunction(*this, Fn);
+ if (result.isInvalid()) return ExprError();
+ Fn = result.get();
+ }
+
+ if (Fn->getType() == Context.BoundMemberTy) {
+ return BuildCallToMemberFunction(S, Fn, LParenLoc, ArgExprs, RParenLoc);
+ }
+ }
+
+ // Check for overloaded calls. This can happen even in C due to extensions.
+ if (Fn->getType() == Context.OverloadTy) {
+ OverloadExpr::FindResult find = OverloadExpr::find(Fn);
+
+ // We aren't supposed to apply this logic for if there's an '&' involved.
+ if (!find.HasFormOfMemberPointer) {
+ OverloadExpr *ovl = find.Expression;
+ if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(ovl))
+ return BuildOverloadedCallExpr(S, Fn, ULE, LParenLoc, ArgExprs,
+ RParenLoc, ExecConfig);
+ return BuildCallToMemberFunction(S, Fn, LParenLoc, ArgExprs, RParenLoc);
+ }
+ }
+
+ // If we're directly calling a function, get the appropriate declaration.
+ if (Fn->getType() == Context.UnknownAnyTy) {
+ ExprResult result = rebuildUnknownAnyFunction(*this, Fn);
+ if (result.isInvalid()) return ExprError();
+ Fn = result.get();
+ }
+
+ Expr *NakedFn = Fn->IgnoreParens();
+
+ NamedDecl *NDecl = nullptr;
+ if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(NakedFn))
+ if (UnOp->getOpcode() == UO_AddrOf)
+ NakedFn = UnOp->getSubExpr()->IgnoreParens();
+
+ if (isa<DeclRefExpr>(NakedFn)) {
+ NDecl = cast<DeclRefExpr>(NakedFn)->getDecl();
+
+ FunctionDecl *FDecl = dyn_cast<FunctionDecl>(NDecl);
+ if (FDecl && FDecl->getBuiltinID()) {
+ // Rewrite the function decl for this builtin by replacing parameters
+ // with no explicit address space with the address space of the arguments
+ // in ArgExprs.
+ if ((FDecl = rewriteBuiltinFunctionDecl(this, Context, FDecl, ArgExprs))) {
+ NDecl = FDecl;
+ Fn = DeclRefExpr::Create(Context, FDecl->getQualifierLoc(),
+ SourceLocation(), FDecl, false,
+ SourceLocation(), FDecl->getType(),
+ Fn->getValueKind(), FDecl);
+ }
+ }
+ } else if (isa<MemberExpr>(NakedFn))
+ NDecl = cast<MemberExpr>(NakedFn)->getMemberDecl();
+
+ if (FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(NDecl)) {
+ if (FD->hasAttr<EnableIfAttr>()) {
+ if (const EnableIfAttr *Attr = CheckEnableIf(FD, ArgExprs, true)) {
+ Diag(Fn->getLocStart(),
+ isa<CXXMethodDecl>(FD) ?
+ diag::err_ovl_no_viable_member_function_in_call :
+ diag::err_ovl_no_viable_function_in_call)
+ << FD << FD->getSourceRange();
+ Diag(FD->getLocation(),
+ diag::note_ovl_candidate_disabled_by_enable_if_attr)
+ << Attr->getCond()->getSourceRange() << Attr->getMessage();
+ }
+ }
+ }
+
+ return BuildResolvedCallExpr(Fn, NDecl, LParenLoc, ArgExprs, RParenLoc,
+ ExecConfig, IsExecConfig);
+}
+
+/// ActOnAsTypeExpr - create a new asType (bitcast) from the arguments.
+///
+/// __builtin_astype( value, dst type )
+///
+ExprResult Sema::ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
+ SourceLocation BuiltinLoc,
+ SourceLocation RParenLoc) {
+ ExprValueKind VK = VK_RValue;
+ ExprObjectKind OK = OK_Ordinary;
+ QualType DstTy = GetTypeFromParser(ParsedDestTy);
+ QualType SrcTy = E->getType();
+ if (Context.getTypeSize(DstTy) != Context.getTypeSize(SrcTy))
+ return ExprError(Diag(BuiltinLoc,
+ diag::err_invalid_astype_of_different_size)
+ << DstTy
+ << SrcTy
+ << E->getSourceRange());
+ return new (Context) AsTypeExpr(E, DstTy, VK, OK, BuiltinLoc, RParenLoc);
+}
+
+/// ActOnConvertVectorExpr - create a new convert-vector expression from the
+/// provided arguments.
+///
+/// __builtin_convertvector( value, dst type )
+///
+ExprResult Sema::ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
+ SourceLocation BuiltinLoc,
+ SourceLocation RParenLoc) {
+ TypeSourceInfo *TInfo;
+ GetTypeFromParser(ParsedDestTy, &TInfo);
+ return SemaConvertVectorExpr(E, TInfo, BuiltinLoc, RParenLoc);
+}
+
+/// BuildResolvedCallExpr - Build a call to a resolved expression,
+/// i.e. an expression not of \p OverloadTy. The expression should
+/// unary-convert to an expression of function-pointer or
+/// block-pointer type.
+///
+/// \param NDecl the declaration being called, if available
+ExprResult
+Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
+ SourceLocation LParenLoc,
+ ArrayRef<Expr *> Args,
+ SourceLocation RParenLoc,
+ Expr *Config, bool IsExecConfig) {
+ FunctionDecl *FDecl = dyn_cast_or_null<FunctionDecl>(NDecl);
+ unsigned BuiltinID = (FDecl ? FDecl->getBuiltinID() : 0);
+
+ // Promote the function operand.
+ // We special-case function promotion here because we only allow promoting
+ // builtin functions to function pointers in the callee of a call.
+ ExprResult Result;
+ if (BuiltinID &&
+ Fn->getType()->isSpecificBuiltinType(BuiltinType::BuiltinFn)) {
+ Result = ImpCastExprToType(Fn, Context.getPointerType(FDecl->getType()),
+ CK_BuiltinFnToFnPtr).get();
+ } else {
+ Result = CallExprUnaryConversions(Fn);
+ }
+ if (Result.isInvalid())
+ return ExprError();
+ Fn = Result.get();
+
+ // Make the call expr early, before semantic checks. This guarantees cleanup
+ // of arguments and function on error.
+ CallExpr *TheCall;
+ if (Config)
+ TheCall = new (Context) CUDAKernelCallExpr(Context, Fn,
+ cast<CallExpr>(Config), Args,
+ Context.BoolTy, VK_RValue,
+ RParenLoc);
+ else
+ TheCall = new (Context) CallExpr(Context, Fn, Args, Context.BoolTy,
+ VK_RValue, RParenLoc);
+
+ if (!getLangOpts().CPlusPlus) {
+ // C cannot always handle TypoExpr nodes in builtin calls and direct
+ // function calls as their argument checking don't necessarily handle
+ // dependent types properly, so make sure any TypoExprs have been
+ // dealt with.
+ ExprResult Result = CorrectDelayedTyposInExpr(TheCall);
+ if (!Result.isUsable()) return ExprError();
+ TheCall = dyn_cast<CallExpr>(Result.get());
+ if (!TheCall) return Result;
+ Args = llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs());
+ }
+
+ // Bail out early if calling a builtin with custom typechecking.
+ if (BuiltinID && Context.BuiltinInfo.hasCustomTypechecking(BuiltinID))
+ return CheckBuiltinFunctionCall(FDecl, BuiltinID, TheCall);
+
+ retry:
+ const FunctionType *FuncT;
+ if (const PointerType *PT = Fn->getType()->getAs<PointerType>()) {
+ // C99 6.5.2.2p1 - "The expression that denotes the called function shall
+ // have type pointer to function".
+ FuncT = PT->getPointeeType()->getAs<FunctionType>();
+ if (!FuncT)
+ return ExprError(Diag(LParenLoc, diag::err_typecheck_call_not_function)
+ << Fn->getType() << Fn->getSourceRange());
+ } else if (const BlockPointerType *BPT =
+ Fn->getType()->getAs<BlockPointerType>()) {
+ FuncT = BPT->getPointeeType()->castAs<FunctionType>();
+ } else {
+ // Handle calls to expressions of unknown-any type.
+ if (Fn->getType() == Context.UnknownAnyTy) {
+ ExprResult rewrite = rebuildUnknownAnyFunction(*this, Fn);
+ if (rewrite.isInvalid()) return ExprError();
+ Fn = rewrite.get();
+ TheCall->setCallee(Fn);
+ goto retry;
+ }
+
+ return ExprError(Diag(LParenLoc, diag::err_typecheck_call_not_function)
+ << Fn->getType() << Fn->getSourceRange());
+ }
+
+ if (getLangOpts().CUDA) {
+ if (Config) {
+ // CUDA: Kernel calls must be to global functions
+ if (FDecl && !FDecl->hasAttr<CUDAGlobalAttr>())
+ return ExprError(Diag(LParenLoc,diag::err_kern_call_not_global_function)
+ << FDecl->getName() << Fn->getSourceRange());
+
+ // CUDA: Kernel function must have 'void' return type
+ if (!FuncT->getReturnType()->isVoidType())
+ return ExprError(Diag(LParenLoc, diag::err_kern_type_not_void_return)
+ << Fn->getType() << Fn->getSourceRange());
+ } else {
+ // CUDA: Calls to global functions must be configured
+ if (FDecl && FDecl->hasAttr<CUDAGlobalAttr>())
+ return ExprError(Diag(LParenLoc, diag::err_global_call_not_config)
+ << FDecl->getName() << Fn->getSourceRange());
+ }
+ }
+
+ // Check for a valid return type
+ if (CheckCallReturnType(FuncT->getReturnType(), Fn->getLocStart(), TheCall,
+ FDecl))
+ return ExprError();
+
+ // We know the result type of the call, set it.
+ TheCall->setType(FuncT->getCallResultType(Context));
+ TheCall->setValueKind(Expr::getValueKindForType(FuncT->getReturnType()));
+
+ const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FuncT);
+ if (Proto) {
+ if (ConvertArgumentsForCall(TheCall, Fn, FDecl, Proto, Args, RParenLoc,
+ IsExecConfig))
+ return ExprError();
+ } else {
+ assert(isa<FunctionNoProtoType>(FuncT) && "Unknown FunctionType!");
+
+ if (FDecl) {
+ // Check if we have too few/too many template arguments, based
+ // on our knowledge of the function definition.
+ const FunctionDecl *Def = nullptr;
+ if (FDecl->hasBody(Def) && Args.size() != Def->param_size()) {
+ Proto = Def->getType()->getAs<FunctionProtoType>();
+ if (!Proto || !(Proto->isVariadic() && Args.size() >= Def->param_size()))
+ Diag(RParenLoc, diag::warn_call_wrong_number_of_arguments)
+ << (Args.size() > Def->param_size()) << FDecl << Fn->getSourceRange();
+ }
+
+ // If the function we're calling isn't a function prototype, but we have
+ // a function prototype from a prior declaratiom, use that prototype.
+ if (!FDecl->hasPrototype())
+ Proto = FDecl->getType()->getAs<FunctionProtoType>();
+ }
+
+ // Promote the arguments (C99 6.5.2.2p6).
+ for (unsigned i = 0, e = Args.size(); i != e; i++) {
+ Expr *Arg = Args[i];
+
+ if (Proto && i < Proto->getNumParams()) {
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(
+ Context, Proto->getParamType(i), Proto->isParamConsumed(i));
+ ExprResult ArgE =
+ PerformCopyInitialization(Entity, SourceLocation(), Arg);
+ if (ArgE.isInvalid())
+ return true;
+
+ Arg = ArgE.getAs<Expr>();
+
+ } else {
+ ExprResult ArgE = DefaultArgumentPromotion(Arg);
+
+ if (ArgE.isInvalid())
+ return true;
+
+ Arg = ArgE.getAs<Expr>();
+ }
+
+ if (RequireCompleteType(Arg->getLocStart(),
+ Arg->getType(),
+ diag::err_call_incomplete_argument, Arg))
+ return ExprError();
+
+ TheCall->setArg(i, Arg);
+ }
+ }
+
+ if (CXXMethodDecl *Method = dyn_cast_or_null<CXXMethodDecl>(FDecl))
+ if (!Method->isStatic())
+ return ExprError(Diag(LParenLoc, diag::err_member_call_without_object)
+ << Fn->getSourceRange());
+
+ // Check for sentinels
+ if (NDecl)
+ DiagnoseSentinelCalls(NDecl, LParenLoc, Args);
+
+ // Do special checking on direct calls to functions.
+ if (FDecl) {
+ if (CheckFunctionCall(FDecl, TheCall, Proto))
+ return ExprError();
+
+ if (BuiltinID)
+ return CheckBuiltinFunctionCall(FDecl, BuiltinID, TheCall);
+ } else if (NDecl) {
+ if (CheckPointerCall(NDecl, TheCall, Proto))
+ return ExprError();
+ } else {
+ if (CheckOtherCall(TheCall, Proto))
+ return ExprError();
+ }
+
+ return MaybeBindToTemporary(TheCall);
+}
+
+ExprResult
+Sema::ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty,
+ SourceLocation RParenLoc, Expr *InitExpr) {
+ assert(Ty && "ActOnCompoundLiteral(): missing type");
+ assert(InitExpr && "ActOnCompoundLiteral(): missing expression");
+
+ TypeSourceInfo *TInfo;
+ QualType literalType = GetTypeFromParser(Ty, &TInfo);
+ if (!TInfo)
+ TInfo = Context.getTrivialTypeSourceInfo(literalType);
+
+ return BuildCompoundLiteralExpr(LParenLoc, TInfo, RParenLoc, InitExpr);
+}
+
+ExprResult
+Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo,
+ SourceLocation RParenLoc, Expr *LiteralExpr) {
+ QualType literalType = TInfo->getType();
+
+ if (literalType->isArrayType()) {
+ if (RequireCompleteType(LParenLoc, Context.getBaseElementType(literalType),
+ diag::err_illegal_decl_array_incomplete_type,
+ SourceRange(LParenLoc,
+ LiteralExpr->getSourceRange().getEnd())))
+ return ExprError();
+ if (literalType->isVariableArrayType())
+ return ExprError(Diag(LParenLoc, diag::err_variable_object_no_init)
+ << SourceRange(LParenLoc, LiteralExpr->getSourceRange().getEnd()));
+ } else if (!literalType->isDependentType() &&
+ RequireCompleteType(LParenLoc, literalType,
+ diag::err_typecheck_decl_incomplete_type,
+ SourceRange(LParenLoc, LiteralExpr->getSourceRange().getEnd())))
+ return ExprError();
+
+ InitializedEntity Entity
+ = InitializedEntity::InitializeCompoundLiteralInit(TInfo);
+ InitializationKind Kind
+ = InitializationKind::CreateCStyleCast(LParenLoc,
+ SourceRange(LParenLoc, RParenLoc),
+ /*InitList=*/true);
+ InitializationSequence InitSeq(*this, Entity, Kind, LiteralExpr);
+ ExprResult Result = InitSeq.Perform(*this, Entity, Kind, LiteralExpr,
+ &literalType);
+ if (Result.isInvalid())
+ return ExprError();
+ LiteralExpr = Result.get();
+
+ bool isFileScope = getCurFunctionOrMethodDecl() == nullptr;
+ if (isFileScope &&
+ !LiteralExpr->isTypeDependent() &&
+ !LiteralExpr->isValueDependent() &&
+ !literalType->isDependentType()) { // 6.5.2.5p3
+ if (CheckForConstantInitializer(LiteralExpr, literalType))
+ return ExprError();
+ }
+
+ // In C, compound literals are l-values for some reason.
+ ExprValueKind VK = getLangOpts().CPlusPlus ? VK_RValue : VK_LValue;
+
+ return MaybeBindToTemporary(
+ new (Context) CompoundLiteralExpr(LParenLoc, TInfo, literalType,
+ VK, LiteralExpr, isFileScope));
+}
+
+ExprResult
+Sema::ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList,
+ SourceLocation RBraceLoc) {
+ // Immediately handle non-overload placeholders. Overloads can be
+ // resolved contextually, but everything else here can't.
+ for (unsigned I = 0, E = InitArgList.size(); I != E; ++I) {
+ if (InitArgList[I]->getType()->isNonOverloadPlaceholderType()) {
+ ExprResult result = CheckPlaceholderExpr(InitArgList[I]);
+
+ // Ignore failures; dropping the entire initializer list because
+ // of one failure would be terrible for indexing/etc.
+ if (result.isInvalid()) continue;
+
+ InitArgList[I] = result.get();
+ }
+ }
+
+ // Semantic analysis for initializers is done by ActOnDeclarator() and
+ // CheckInitializer() - it requires knowledge of the object being intialized.
+
+ InitListExpr *E = new (Context) InitListExpr(Context, LBraceLoc, InitArgList,
+ RBraceLoc);
+ E->setType(Context.VoidTy); // FIXME: just a place holder for now.
+ return E;
+}
+
+/// Do an explicit extend of the given block pointer if we're in ARC.
+void Sema::maybeExtendBlockObject(ExprResult &E) {
+ assert(E.get()->getType()->isBlockPointerType());
+ assert(E.get()->isRValue());
+
+ // Only do this in an r-value context.
+ if (!getLangOpts().ObjCAutoRefCount) return;
+
+ E = ImplicitCastExpr::Create(Context, E.get()->getType(),
+ CK_ARCExtendBlockObject, E.get(),
+ /*base path*/ nullptr, VK_RValue);
+ ExprNeedsCleanups = true;
+}
+
+/// Prepare a conversion of the given expression to an ObjC object
+/// pointer type.
+CastKind Sema::PrepareCastToObjCObjectPointer(ExprResult &E) {
+ QualType type = E.get()->getType();
+ if (type->isObjCObjectPointerType()) {
+ return CK_BitCast;
+ } else if (type->isBlockPointerType()) {
+ maybeExtendBlockObject(E);
+ return CK_BlockPointerToObjCPointerCast;
+ } else {
+ assert(type->isPointerType());
+ return CK_CPointerToObjCPointerCast;
+ }
+}
+
+/// Prepares for a scalar cast, performing all the necessary stages
+/// except the final cast and returning the kind required.
+CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) {
+ // Both Src and Dest are scalar types, i.e. arithmetic or pointer.
+ // Also, callers should have filtered out the invalid cases with
+ // pointers. Everything else should be possible.
+
+ QualType SrcTy = Src.get()->getType();
+ if (Context.hasSameUnqualifiedType(SrcTy, DestTy))
+ return CK_NoOp;
+
+ switch (Type::ScalarTypeKind SrcKind = SrcTy->getScalarTypeKind()) {
+ case Type::STK_MemberPointer:
+ llvm_unreachable("member pointer type in C");
+
+ case Type::STK_CPointer:
+ case Type::STK_BlockPointer:
+ case Type::STK_ObjCObjectPointer:
+ switch (DestTy->getScalarTypeKind()) {
+ case Type::STK_CPointer: {
+ unsigned SrcAS = SrcTy->getPointeeType().getAddressSpace();
+ unsigned DestAS = DestTy->getPointeeType().getAddressSpace();
+ if (SrcAS != DestAS)
+ return CK_AddressSpaceConversion;
+ return CK_BitCast;
+ }
+ case Type::STK_BlockPointer:
+ return (SrcKind == Type::STK_BlockPointer
+ ? CK_BitCast : CK_AnyPointerToBlockPointerCast);
+ case Type::STK_ObjCObjectPointer:
+ if (SrcKind == Type::STK_ObjCObjectPointer)
+ return CK_BitCast;
+ if (SrcKind == Type::STK_CPointer)
+ return CK_CPointerToObjCPointerCast;
+ maybeExtendBlockObject(Src);
+ return CK_BlockPointerToObjCPointerCast;
+ case Type::STK_Bool:
+ return CK_PointerToBoolean;
+ case Type::STK_Integral:
+ return CK_PointerToIntegral;
+ case Type::STK_Floating:
+ case Type::STK_FloatingComplex:
+ case Type::STK_IntegralComplex:
+ case Type::STK_MemberPointer:
+ llvm_unreachable("illegal cast from pointer");
+ }
+ llvm_unreachable("Should have returned before this");
+
+ case Type::STK_Bool: // casting from bool is like casting from an integer
+ case Type::STK_Integral:
+ switch (DestTy->getScalarTypeKind()) {
+ case Type::STK_CPointer:
+ case Type::STK_ObjCObjectPointer:
+ case Type::STK_BlockPointer:
+ if (Src.get()->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNull))
+ return CK_NullToPointer;
+ return CK_IntegralToPointer;
+ case Type::STK_Bool:
+ return CK_IntegralToBoolean;
+ case Type::STK_Integral:
+ return CK_IntegralCast;
+ case Type::STK_Floating:
+ return CK_IntegralToFloating;
+ case Type::STK_IntegralComplex:
+ Src = ImpCastExprToType(Src.get(),
+ DestTy->castAs<ComplexType>()->getElementType(),
+ CK_IntegralCast);
+ return CK_IntegralRealToComplex;
+ case Type::STK_FloatingComplex:
+ Src = ImpCastExprToType(Src.get(),
+ DestTy->castAs<ComplexType>()->getElementType(),
+ CK_IntegralToFloating);
+ return CK_FloatingRealToComplex;
+ case Type::STK_MemberPointer:
+ llvm_unreachable("member pointer type in C");
+ }
+ llvm_unreachable("Should have returned before this");
+
+ case Type::STK_Floating:
+ switch (DestTy->getScalarTypeKind()) {
+ case Type::STK_Floating:
+ return CK_FloatingCast;
+ case Type::STK_Bool:
+ return CK_FloatingToBoolean;
+ case Type::STK_Integral:
+ return CK_FloatingToIntegral;
+ case Type::STK_FloatingComplex:
+ Src = ImpCastExprToType(Src.get(),
+ DestTy->castAs<ComplexType>()->getElementType(),
+ CK_FloatingCast);
+ return CK_FloatingRealToComplex;
+ case Type::STK_IntegralComplex:
+ Src = ImpCastExprToType(Src.get(),
+ DestTy->castAs<ComplexType>()->getElementType(),
+ CK_FloatingToIntegral);
+ return CK_IntegralRealToComplex;
+ case Type::STK_CPointer:
+ case Type::STK_ObjCObjectPointer:
+ case Type::STK_BlockPointer:
+ llvm_unreachable("valid float->pointer cast?");
+ case Type::STK_MemberPointer:
+ llvm_unreachable("member pointer type in C");
+ }
+ llvm_unreachable("Should have returned before this");
+
+ case Type::STK_FloatingComplex:
+ switch (DestTy->getScalarTypeKind()) {
+ case Type::STK_FloatingComplex:
+ return CK_FloatingComplexCast;
+ case Type::STK_IntegralComplex:
+ return CK_FloatingComplexToIntegralComplex;
+ case Type::STK_Floating: {
+ QualType ET = SrcTy->castAs<ComplexType>()->getElementType();
+ if (Context.hasSameType(ET, DestTy))
+ return CK_FloatingComplexToReal;
+ Src = ImpCastExprToType(Src.get(), ET, CK_FloatingComplexToReal);
+ return CK_FloatingCast;
+ }
+ case Type::STK_Bool:
+ return CK_FloatingComplexToBoolean;
+ case Type::STK_Integral:
+ Src = ImpCastExprToType(Src.get(),
+ SrcTy->castAs<ComplexType>()->getElementType(),
+ CK_FloatingComplexToReal);
+ return CK_FloatingToIntegral;
+ case Type::STK_CPointer:
+ case Type::STK_ObjCObjectPointer:
+ case Type::STK_BlockPointer:
+ llvm_unreachable("valid complex float->pointer cast?");
+ case Type::STK_MemberPointer:
+ llvm_unreachable("member pointer type in C");
+ }
+ llvm_unreachable("Should have returned before this");
+
+ case Type::STK_IntegralComplex:
+ switch (DestTy->getScalarTypeKind()) {
+ case Type::STK_FloatingComplex:
+ return CK_IntegralComplexToFloatingComplex;
+ case Type::STK_IntegralComplex:
+ return CK_IntegralComplexCast;
+ case Type::STK_Integral: {
+ QualType ET = SrcTy->castAs<ComplexType>()->getElementType();
+ if (Context.hasSameType(ET, DestTy))
+ return CK_IntegralComplexToReal;
+ Src = ImpCastExprToType(Src.get(), ET, CK_IntegralComplexToReal);
+ return CK_IntegralCast;
+ }
+ case Type::STK_Bool:
+ return CK_IntegralComplexToBoolean;
+ case Type::STK_Floating:
+ Src = ImpCastExprToType(Src.get(),
+ SrcTy->castAs<ComplexType>()->getElementType(),
+ CK_IntegralComplexToReal);
+ return CK_IntegralToFloating;
+ case Type::STK_CPointer:
+ case Type::STK_ObjCObjectPointer:
+ case Type::STK_BlockPointer:
+ llvm_unreachable("valid complex int->pointer cast?");
+ case Type::STK_MemberPointer:
+ llvm_unreachable("member pointer type in C");
+ }
+ llvm_unreachable("Should have returned before this");
+ }
+
+ llvm_unreachable("Unhandled scalar cast");
+}
+
+static bool breakDownVectorType(QualType type, uint64_t &len,
+ QualType &eltType) {
+ // Vectors are simple.
+ if (const VectorType *vecType = type->getAs<VectorType>()) {
+ len = vecType->getNumElements();
+ eltType = vecType->getElementType();
+ assert(eltType->isScalarType());
+ return true;
+ }
+
+ // We allow lax conversion to and from non-vector types, but only if
+ // they're real types (i.e. non-complex, non-pointer scalar types).
+ if (!type->isRealType()) return false;
+
+ len = 1;
+ eltType = type;
+ return true;
+}
+
+/// Are the two types lax-compatible vector types? That is, given
+/// that one of them is a vector, do they have equal storage sizes,
+/// where the storage size is the number of elements times the element
+/// size?
+///
+/// This will also return false if either of the types is neither a
+/// vector nor a real type.
+bool Sema::areLaxCompatibleVectorTypes(QualType srcTy, QualType destTy) {
+ assert(destTy->isVectorType() || srcTy->isVectorType());
+
+ // Disallow lax conversions between scalars and ExtVectors (these
+ // conversions are allowed for other vector types because common headers
+ // depend on them). Most scalar OP ExtVector cases are handled by the
+ // splat path anyway, which does what we want (convert, not bitcast).
+ // What this rules out for ExtVectors is crazy things like char4*float.
+ if (srcTy->isScalarType() && destTy->isExtVectorType()) return false;
+ if (destTy->isScalarType() && srcTy->isExtVectorType()) return false;
+
+ uint64_t srcLen, destLen;
+ QualType srcEltTy, destEltTy;
+ if (!breakDownVectorType(srcTy, srcLen, srcEltTy)) return false;
+ if (!breakDownVectorType(destTy, destLen, destEltTy)) return false;
+
+ // ASTContext::getTypeSize will return the size rounded up to a
+ // power of 2, so instead of using that, we need to use the raw
+ // element size multiplied by the element count.
+ uint64_t srcEltSize = Context.getTypeSize(srcEltTy);
+ uint64_t destEltSize = Context.getTypeSize(destEltTy);
+
+ return (srcLen * srcEltSize == destLen * destEltSize);
+}
+
+/// Is this a legal conversion between two types, one of which is
+/// known to be a vector type?
+bool Sema::isLaxVectorConversion(QualType srcTy, QualType destTy) {
+ assert(destTy->isVectorType() || srcTy->isVectorType());
+
+ if (!Context.getLangOpts().LaxVectorConversions)
+ return false;
+ return areLaxCompatibleVectorTypes(srcTy, destTy);
+}
+
+bool Sema::CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
+ CastKind &Kind) {
+ assert(VectorTy->isVectorType() && "Not a vector type!");
+
+ if (Ty->isVectorType() || Ty->isIntegralType(Context)) {
+ if (!areLaxCompatibleVectorTypes(Ty, VectorTy))
+ return Diag(R.getBegin(),
+ Ty->isVectorType() ?
+ diag::err_invalid_conversion_between_vectors :
+ diag::err_invalid_conversion_between_vector_and_integer)
+ << VectorTy << Ty << R;
+ } else
+ return Diag(R.getBegin(),
+ diag::err_invalid_conversion_between_vector_and_scalar)
+ << VectorTy << Ty << R;
+
+ Kind = CK_BitCast;
+ return false;
+}
+
+ExprResult Sema::CheckExtVectorCast(SourceRange R, QualType DestTy,
+ Expr *CastExpr, CastKind &Kind) {
+ assert(DestTy->isExtVectorType() && "Not an extended vector type!");
+
+ QualType SrcTy = CastExpr->getType();
+
+ // If SrcTy is a VectorType, the total size must match to explicitly cast to
+ // an ExtVectorType.
+ // In OpenCL, casts between vectors of different types are not allowed.
+ // (See OpenCL 6.2).
+ if (SrcTy->isVectorType()) {
+ if (!areLaxCompatibleVectorTypes(SrcTy, DestTy)
+ || (getLangOpts().OpenCL &&
+ (DestTy.getCanonicalType() != SrcTy.getCanonicalType()))) {
+ Diag(R.getBegin(),diag::err_invalid_conversion_between_ext_vectors)
+ << DestTy << SrcTy << R;
+ return ExprError();
+ }
+ Kind = CK_BitCast;
+ return CastExpr;
+ }
+
+ // All non-pointer scalars can be cast to ExtVector type. The appropriate
+ // conversion will take place first from scalar to elt type, and then
+ // splat from elt type to vector.
+ if (SrcTy->isPointerType())
+ return Diag(R.getBegin(),
+ diag::err_invalid_conversion_between_vector_and_scalar)
+ << DestTy << SrcTy << R;
+
+ QualType DestElemTy = DestTy->getAs<ExtVectorType>()->getElementType();
+ ExprResult CastExprRes = CastExpr;
+ CastKind CK = PrepareScalarCast(CastExprRes, DestElemTy);
+ if (CastExprRes.isInvalid())
+ return ExprError();
+ CastExpr = ImpCastExprToType(CastExprRes.get(), DestElemTy, CK).get();
+
+ Kind = CK_VectorSplat;
+ return CastExpr;
+}
+
+ExprResult
+Sema::ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
+ Declarator &D, ParsedType &Ty,
+ SourceLocation RParenLoc, Expr *CastExpr) {
+ assert(!D.isInvalidType() && (CastExpr != nullptr) &&
+ "ActOnCastExpr(): missing type or expr");
+
+ TypeSourceInfo *castTInfo = GetTypeForDeclaratorCast(D, CastExpr->getType());
+ if (D.isInvalidType())
+ return ExprError();
+
+ if (getLangOpts().CPlusPlus) {
+ // Check that there are no default arguments (C++ only).
+ CheckExtraCXXDefaultArguments(D);
+ } else {
+ // Make sure any TypoExprs have been dealt with.
+ ExprResult Res = CorrectDelayedTyposInExpr(CastExpr);
+ if (!Res.isUsable())
+ return ExprError();
+ CastExpr = Res.get();
+ }
+
+ checkUnusedDeclAttributes(D);
+
+ QualType castType = castTInfo->getType();
+ Ty = CreateParsedType(castType, castTInfo);
+
+ bool isVectorLiteral = false;
+
+ // Check for an altivec or OpenCL literal,
+ // i.e. all the elements are integer constants.
+ ParenExpr *PE = dyn_cast<ParenExpr>(CastExpr);
+ ParenListExpr *PLE = dyn_cast<ParenListExpr>(CastExpr);
+ if ((getLangOpts().AltiVec || getLangOpts().ZVector || getLangOpts().OpenCL)
+ && castType->isVectorType() && (PE || PLE)) {
+ if (PLE && PLE->getNumExprs() == 0) {
+ Diag(PLE->getExprLoc(), diag::err_altivec_empty_initializer);
+ return ExprError();
+ }
+ if (PE || PLE->getNumExprs() == 1) {
+ Expr *E = (PE ? PE->getSubExpr() : PLE->getExpr(0));
+ if (!E->getType()->isVectorType())
+ isVectorLiteral = true;
+ }
+ else
+ isVectorLiteral = true;
+ }
+
+ // If this is a vector initializer, '(' type ')' '(' init, ..., init ')'
+ // then handle it as such.
+ if (isVectorLiteral)
+ return BuildVectorLiteral(LParenLoc, RParenLoc, CastExpr, castTInfo);
+
+ // If the Expr being casted is a ParenListExpr, handle it specially.
+ // This is not an AltiVec-style cast, so turn the ParenListExpr into a
+ // sequence of BinOp comma operators.
+ if (isa<ParenListExpr>(CastExpr)) {
+ ExprResult Result = MaybeConvertParenListExprToParenExpr(S, CastExpr);
+ if (Result.isInvalid()) return ExprError();
+ CastExpr = Result.get();
+ }
+
+ if (getLangOpts().CPlusPlus && !castType->isVoidType() &&
+ !getSourceManager().isInSystemMacro(LParenLoc))
+ Diag(LParenLoc, diag::warn_old_style_cast) << CastExpr->getSourceRange();
+
+ CheckTollFreeBridgeCast(castType, CastExpr);
+
+ CheckObjCBridgeRelatedCast(castType, CastExpr);
+
+ return BuildCStyleCastExpr(LParenLoc, castTInfo, RParenLoc, CastExpr);
+}
+
+ExprResult Sema::BuildVectorLiteral(SourceLocation LParenLoc,
+ SourceLocation RParenLoc, Expr *E,
+ TypeSourceInfo *TInfo) {
+ assert((isa<ParenListExpr>(E) || isa<ParenExpr>(E)) &&
+ "Expected paren or paren list expression");
+
+ Expr **exprs;
+ unsigned numExprs;
+ Expr *subExpr;
+ SourceLocation LiteralLParenLoc, LiteralRParenLoc;
+ if (ParenListExpr *PE = dyn_cast<ParenListExpr>(E)) {
+ LiteralLParenLoc = PE->getLParenLoc();
+ LiteralRParenLoc = PE->getRParenLoc();
+ exprs = PE->getExprs();
+ numExprs = PE->getNumExprs();
+ } else { // isa<ParenExpr> by assertion at function entrance
+ LiteralLParenLoc = cast<ParenExpr>(E)->getLParen();
+ LiteralRParenLoc = cast<ParenExpr>(E)->getRParen();
+ subExpr = cast<ParenExpr>(E)->getSubExpr();
+ exprs = &subExpr;
+ numExprs = 1;
+ }
+
+ QualType Ty = TInfo->getType();
+ assert(Ty->isVectorType() && "Expected vector type");
+
+ SmallVector<Expr *, 8> initExprs;
+ const VectorType *VTy = Ty->getAs<VectorType>();
+ unsigned numElems = Ty->getAs<VectorType>()->getNumElements();
+
+ // '(...)' form of vector initialization in AltiVec: the number of
+ // initializers must be one or must match the size of the vector.
+ // If a single value is specified in the initializer then it will be
+ // replicated to all the components of the vector
+ if (VTy->getVectorKind() == VectorType::AltiVecVector) {
+ // The number of initializers must be one or must match the size of the
+ // vector. If a single value is specified in the initializer then it will
+ // be replicated to all the components of the vector
+ if (numExprs == 1) {
+ QualType ElemTy = Ty->getAs<VectorType>()->getElementType();
+ ExprResult Literal = DefaultLvalueConversion(exprs[0]);
+ if (Literal.isInvalid())
+ return ExprError();
+ Literal = ImpCastExprToType(Literal.get(), ElemTy,
+ PrepareScalarCast(Literal, ElemTy));
+ return BuildCStyleCastExpr(LParenLoc, TInfo, RParenLoc, Literal.get());
+ }
+ else if (numExprs < numElems) {
+ Diag(E->getExprLoc(),
+ diag::err_incorrect_number_of_vector_initializers);
+ return ExprError();
+ }
+ else
+ initExprs.append(exprs, exprs + numExprs);
+ }
+ else {
+ // For OpenCL, when the number of initializers is a single value,
+ // it will be replicated to all components of the vector.
+ if (getLangOpts().OpenCL &&
+ VTy->getVectorKind() == VectorType::GenericVector &&
+ numExprs == 1) {
+ QualType ElemTy = Ty->getAs<VectorType>()->getElementType();
+ ExprResult Literal = DefaultLvalueConversion(exprs[0]);
+ if (Literal.isInvalid())
+ return ExprError();
+ Literal = ImpCastExprToType(Literal.get(), ElemTy,
+ PrepareScalarCast(Literal, ElemTy));
+ return BuildCStyleCastExpr(LParenLoc, TInfo, RParenLoc, Literal.get());
+ }
+
+ initExprs.append(exprs, exprs + numExprs);
+ }
+ // FIXME: This means that pretty-printing the final AST will produce curly
+ // braces instead of the original commas.
+ InitListExpr *initE = new (Context) InitListExpr(Context, LiteralLParenLoc,
+ initExprs, LiteralRParenLoc);
+ initE->setType(Ty);
+ return BuildCompoundLiteralExpr(LParenLoc, TInfo, RParenLoc, initE);
+}
+
+/// This is not an AltiVec-style cast or or C++ direct-initialization, so turn
+/// the ParenListExpr into a sequence of comma binary operators.
+ExprResult
+Sema::MaybeConvertParenListExprToParenExpr(Scope *S, Expr *OrigExpr) {
+ ParenListExpr *E = dyn_cast<ParenListExpr>(OrigExpr);
+ if (!E)
+ return OrigExpr;
+
+ ExprResult Result(E->getExpr(0));
+
+ for (unsigned i = 1, e = E->getNumExprs(); i != e && !Result.isInvalid(); ++i)
+ Result = ActOnBinOp(S, E->getExprLoc(), tok::comma, Result.get(),
+ E->getExpr(i));
+
+ if (Result.isInvalid()) return ExprError();
+
+ return ActOnParenExpr(E->getLParenLoc(), E->getRParenLoc(), Result.get());
+}
+
+ExprResult Sema::ActOnParenListExpr(SourceLocation L,
+ SourceLocation R,
+ MultiExprArg Val) {
+ Expr *expr = new (Context) ParenListExpr(Context, L, Val, R);
+ return expr;
+}
+
+/// \brief Emit a specialized diagnostic when one expression is a null pointer
+/// constant and the other is not a pointer. Returns true if a diagnostic is
+/// emitted.
+bool Sema::DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
+ SourceLocation QuestionLoc) {
+ Expr *NullExpr = LHSExpr;
+ Expr *NonPointerExpr = RHSExpr;
+ Expr::NullPointerConstantKind NullKind =
+ NullExpr->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNotNull);
+
+ if (NullKind == Expr::NPCK_NotNull) {
+ NullExpr = RHSExpr;
+ NonPointerExpr = LHSExpr;
+ NullKind =
+ NullExpr->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNotNull);
+ }
+
+ if (NullKind == Expr::NPCK_NotNull)
+ return false;
+
+ if (NullKind == Expr::NPCK_ZeroExpression)
+ return false;
+
+ if (NullKind == Expr::NPCK_ZeroLiteral) {
+ // In this case, check to make sure that we got here from a "NULL"
+ // string in the source code.
+ NullExpr = NullExpr->IgnoreParenImpCasts();
+ SourceLocation loc = NullExpr->getExprLoc();
+ if (!findMacroSpelling(loc, "NULL"))
+ return false;
+ }
+
+ int DiagType = (NullKind == Expr::NPCK_CXX11_nullptr);
+ Diag(QuestionLoc, diag::err_typecheck_cond_incompatible_operands_null)
+ << NonPointerExpr->getType() << DiagType
+ << NonPointerExpr->getSourceRange();
+ return true;
+}
+
+/// \brief Return false if the condition expression is valid, true otherwise.
+static bool checkCondition(Sema &S, Expr *Cond, SourceLocation QuestionLoc) {
+ QualType CondTy = Cond->getType();
+
+ // OpenCL v1.1 s6.3.i says the condition cannot be a floating point type.
+ if (S.getLangOpts().OpenCL && CondTy->isFloatingType()) {
+ S.Diag(QuestionLoc, diag::err_typecheck_cond_expect_nonfloat)
+ << CondTy << Cond->getSourceRange();
+ return true;
+ }
+
+ // C99 6.5.15p2
+ if (CondTy->isScalarType()) return false;
+
+ S.Diag(QuestionLoc, diag::err_typecheck_cond_expect_scalar)
+ << CondTy << Cond->getSourceRange();
+ return true;
+}
+
+/// \brief Handle when one or both operands are void type.
+static QualType checkConditionalVoidType(Sema &S, ExprResult &LHS,
+ ExprResult &RHS) {
+ Expr *LHSExpr = LHS.get();
+ Expr *RHSExpr = RHS.get();
+
+ if (!LHSExpr->getType()->isVoidType())
+ S.Diag(RHSExpr->getLocStart(), diag::ext_typecheck_cond_one_void)
+ << RHSExpr->getSourceRange();
+ if (!RHSExpr->getType()->isVoidType())
+ S.Diag(LHSExpr->getLocStart(), diag::ext_typecheck_cond_one_void)
+ << LHSExpr->getSourceRange();
+ LHS = S.ImpCastExprToType(LHS.get(), S.Context.VoidTy, CK_ToVoid);
+ RHS = S.ImpCastExprToType(RHS.get(), S.Context.VoidTy, CK_ToVoid);
+ return S.Context.VoidTy;
+}
+
+/// \brief Return false if the NullExpr can be promoted to PointerTy,
+/// true otherwise.
+static bool checkConditionalNullPointer(Sema &S, ExprResult &NullExpr,
+ QualType PointerTy) {
+ if ((!PointerTy->isAnyPointerType() && !PointerTy->isBlockPointerType()) ||
+ !NullExpr.get()->isNullPointerConstant(S.Context,
+ Expr::NPC_ValueDependentIsNull))
+ return true;
+
+ NullExpr = S.ImpCastExprToType(NullExpr.get(), PointerTy, CK_NullToPointer);
+ return false;
+}
+
+/// \brief Checks compatibility between two pointers and return the resulting
+/// type.
+static QualType checkConditionalPointerCompatibility(Sema &S, ExprResult &LHS,
+ ExprResult &RHS,
+ SourceLocation Loc) {
+ QualType LHSTy = LHS.get()->getType();
+ QualType RHSTy = RHS.get()->getType();
+
+ if (S.Context.hasSameType(LHSTy, RHSTy)) {
+ // Two identical pointers types are always compatible.
+ return LHSTy;
+ }
+
+ QualType lhptee, rhptee;
+
+ // Get the pointee types.
+ bool IsBlockPointer = false;
+ if (const BlockPointerType *LHSBTy = LHSTy->getAs<BlockPointerType>()) {
+ lhptee = LHSBTy->getPointeeType();
+ rhptee = RHSTy->castAs<BlockPointerType>()->getPointeeType();
+ IsBlockPointer = true;
+ } else {
+ lhptee = LHSTy->castAs<PointerType>()->getPointeeType();
+ rhptee = RHSTy->castAs<PointerType>()->getPointeeType();
+ }
+
+ // C99 6.5.15p6: If both operands are pointers to compatible types or to
+ // differently qualified versions of compatible types, the result type is
+ // a pointer to an appropriately qualified version of the composite
+ // type.
+
+ // Only CVR-qualifiers exist in the standard, and the differently-qualified
+ // clause doesn't make sense for our extensions. E.g. address space 2 should
+ // be incompatible with address space 3: they may live on different devices or
+ // anything.
+ Qualifiers lhQual = lhptee.getQualifiers();
+ Qualifiers rhQual = rhptee.getQualifiers();
+
+ unsigned MergedCVRQual = lhQual.getCVRQualifiers() | rhQual.getCVRQualifiers();
+ lhQual.removeCVRQualifiers();
+ rhQual.removeCVRQualifiers();
+
+ lhptee = S.Context.getQualifiedType(lhptee.getUnqualifiedType(), lhQual);
+ rhptee = S.Context.getQualifiedType(rhptee.getUnqualifiedType(), rhQual);
+
+ QualType CompositeTy = S.Context.mergeTypes(lhptee, rhptee);
+
+ if (CompositeTy.isNull()) {
+ S.Diag(Loc, diag::ext_typecheck_cond_incompatible_pointers)
+ << LHSTy << RHSTy << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ // In this situation, we assume void* type. No especially good
+ // reason, but this is what gcc does, and we do have to pick
+ // to get a consistent AST.
+ QualType incompatTy = S.Context.getPointerType(S.Context.VoidTy);
+ LHS = S.ImpCastExprToType(LHS.get(), incompatTy, CK_BitCast);
+ RHS = S.ImpCastExprToType(RHS.get(), incompatTy, CK_BitCast);
+ return incompatTy;
+ }
+
+ // The pointer types are compatible.
+ QualType ResultTy = CompositeTy.withCVRQualifiers(MergedCVRQual);
+ if (IsBlockPointer)
+ ResultTy = S.Context.getBlockPointerType(ResultTy);
+ else
+ ResultTy = S.Context.getPointerType(ResultTy);
+
+ LHS = S.ImpCastExprToType(LHS.get(), ResultTy, CK_BitCast);
+ RHS = S.ImpCastExprToType(RHS.get(), ResultTy, CK_BitCast);
+ return ResultTy;
+}
+
+/// \brief Return the resulting type when the operands are both block pointers.
+static QualType checkConditionalBlockPointerCompatibility(Sema &S,
+ ExprResult &LHS,
+ ExprResult &RHS,
+ SourceLocation Loc) {
+ QualType LHSTy = LHS.get()->getType();
+ QualType RHSTy = RHS.get()->getType();
+
+ if (!LHSTy->isBlockPointerType() || !RHSTy->isBlockPointerType()) {
+ if (LHSTy->isVoidPointerType() || RHSTy->isVoidPointerType()) {
+ QualType destType = S.Context.getPointerType(S.Context.VoidTy);
+ LHS = S.ImpCastExprToType(LHS.get(), destType, CK_BitCast);
+ RHS = S.ImpCastExprToType(RHS.get(), destType, CK_BitCast);
+ return destType;
+ }
+ S.Diag(Loc, diag::err_typecheck_cond_incompatible_operands)
+ << LHSTy << RHSTy << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ return QualType();
+ }
+
+ // We have 2 block pointer types.
+ return checkConditionalPointerCompatibility(S, LHS, RHS, Loc);
+}
+
+/// \brief Return the resulting type when the operands are both pointers.
+static QualType
+checkConditionalObjectPointersCompatibility(Sema &S, ExprResult &LHS,
+ ExprResult &RHS,
+ SourceLocation Loc) {
+ // get the pointer types
+ QualType LHSTy = LHS.get()->getType();
+ QualType RHSTy = RHS.get()->getType();
+
+ // get the "pointed to" types
+ QualType lhptee = LHSTy->getAs<PointerType>()->getPointeeType();
+ QualType rhptee = RHSTy->getAs<PointerType>()->getPointeeType();
+
+ // ignore qualifiers on void (C99 6.5.15p3, clause 6)
+ if (lhptee->isVoidType() && rhptee->isIncompleteOrObjectType()) {
+ // Figure out necessary qualifiers (C99 6.5.15p6)
+ QualType destPointee
+ = S.Context.getQualifiedType(lhptee, rhptee.getQualifiers());
+ QualType destType = S.Context.getPointerType(destPointee);
+ // Add qualifiers if necessary.
+ LHS = S.ImpCastExprToType(LHS.get(), destType, CK_NoOp);
+ // Promote to void*.
+ RHS = S.ImpCastExprToType(RHS.get(), destType, CK_BitCast);
+ return destType;
+ }
+ if (rhptee->isVoidType() && lhptee->isIncompleteOrObjectType()) {
+ QualType destPointee
+ = S.Context.getQualifiedType(rhptee, lhptee.getQualifiers());
+ QualType destType = S.Context.getPointerType(destPointee);
+ // Add qualifiers if necessary.
+ RHS = S.ImpCastExprToType(RHS.get(), destType, CK_NoOp);
+ // Promote to void*.
+ LHS = S.ImpCastExprToType(LHS.get(), destType, CK_BitCast);
+ return destType;
+ }
+
+ return checkConditionalPointerCompatibility(S, LHS, RHS, Loc);
+}
+
+/// \brief Return false if the first expression is not an integer and the second
+/// expression is not a pointer, true otherwise.
+static bool checkPointerIntegerMismatch(Sema &S, ExprResult &Int,
+ Expr* PointerExpr, SourceLocation Loc,
+ bool IsIntFirstExpr) {
+ if (!PointerExpr->getType()->isPointerType() ||
+ !Int.get()->getType()->isIntegerType())
+ return false;
+
+ Expr *Expr1 = IsIntFirstExpr ? Int.get() : PointerExpr;
+ Expr *Expr2 = IsIntFirstExpr ? PointerExpr : Int.get();
+
+ S.Diag(Loc, diag::ext_typecheck_cond_pointer_integer_mismatch)
+ << Expr1->getType() << Expr2->getType()
+ << Expr1->getSourceRange() << Expr2->getSourceRange();
+ Int = S.ImpCastExprToType(Int.get(), PointerExpr->getType(),
+ CK_IntegralToPointer);
+ return true;
+}
+
+/// \brief Simple conversion between integer and floating point types.
+///
+/// Used when handling the OpenCL conditional operator where the
+/// condition is a vector while the other operands are scalar.
+///
+/// OpenCL v1.1 s6.3.i and s6.11.6 together require that the scalar
+/// types are either integer or floating type. Between the two
+/// operands, the type with the higher rank is defined as the "result
+/// type". The other operand needs to be promoted to the same type. No
+/// other type promotion is allowed. We cannot use
+/// UsualArithmeticConversions() for this purpose, since it always
+/// promotes promotable types.
+static QualType OpenCLArithmeticConversions(Sema &S, ExprResult &LHS,
+ ExprResult &RHS,
+ SourceLocation QuestionLoc) {
+ LHS = S.DefaultFunctionArrayLvalueConversion(LHS.get());
+ if (LHS.isInvalid())
+ return QualType();
+ RHS = S.DefaultFunctionArrayLvalueConversion(RHS.get());
+ if (RHS.isInvalid())
+ return QualType();
+
+ // For conversion purposes, we ignore any qualifiers.
+ // For example, "const float" and "float" are equivalent.
+ QualType LHSType =
+ S.Context.getCanonicalType(LHS.get()->getType()).getUnqualifiedType();
+ QualType RHSType =
+ S.Context.getCanonicalType(RHS.get()->getType()).getUnqualifiedType();
+
+ if (!LHSType->isIntegerType() && !LHSType->isRealFloatingType()) {
+ S.Diag(QuestionLoc, diag::err_typecheck_cond_expect_int_float)
+ << LHSType << LHS.get()->getSourceRange();
+ return QualType();
+ }
+
+ if (!RHSType->isIntegerType() && !RHSType->isRealFloatingType()) {
+ S.Diag(QuestionLoc, diag::err_typecheck_cond_expect_int_float)
+ << RHSType << RHS.get()->getSourceRange();
+ return QualType();
+ }
+
+ // If both types are identical, no conversion is needed.
+ if (LHSType == RHSType)
+ return LHSType;
+
+ // Now handle "real" floating types (i.e. float, double, long double).
+ if (LHSType->isRealFloatingType() || RHSType->isRealFloatingType())
+ return handleFloatConversion(S, LHS, RHS, LHSType, RHSType,
+ /*IsCompAssign = */ false);
+
+ // Finally, we have two differing integer types.
+ return handleIntegerConversion<doIntegralCast, doIntegralCast>
+ (S, LHS, RHS, LHSType, RHSType, /*IsCompAssign = */ false);
+}
+
+/// \brief Convert scalar operands to a vector that matches the
+/// condition in length.
+///
+/// Used when handling the OpenCL conditional operator where the
+/// condition is a vector while the other operands are scalar.
+///
+/// We first compute the "result type" for the scalar operands
+/// according to OpenCL v1.1 s6.3.i. Both operands are then converted
+/// into a vector of that type where the length matches the condition
+/// vector type. s6.11.6 requires that the element types of the result
+/// and the condition must have the same number of bits.
+static QualType
+OpenCLConvertScalarsToVectors(Sema &S, ExprResult &LHS, ExprResult &RHS,
+ QualType CondTy, SourceLocation QuestionLoc) {
+ QualType ResTy = OpenCLArithmeticConversions(S, LHS, RHS, QuestionLoc);
+ if (ResTy.isNull()) return QualType();
+
+ const VectorType *CV = CondTy->getAs<VectorType>();
+ assert(CV);
+
+ // Determine the vector result type
+ unsigned NumElements = CV->getNumElements();
+ QualType VectorTy = S.Context.getExtVectorType(ResTy, NumElements);
+
+ // Ensure that all types have the same number of bits
+ if (S.Context.getTypeSize(CV->getElementType())
+ != S.Context.getTypeSize(ResTy)) {
+ // Since VectorTy is created internally, it does not pretty print
+ // with an OpenCL name. Instead, we just print a description.
+ std::string EleTyName = ResTy.getUnqualifiedType().getAsString();
+ SmallString<64> Str;
+ llvm::raw_svector_ostream OS(Str);
+ OS << "(vector of " << NumElements << " '" << EleTyName << "' values)";
+ S.Diag(QuestionLoc, diag::err_conditional_vector_element_size)
+ << CondTy << OS.str();
+ return QualType();
+ }
+
+ // Convert operands to the vector result type
+ LHS = S.ImpCastExprToType(LHS.get(), VectorTy, CK_VectorSplat);
+ RHS = S.ImpCastExprToType(RHS.get(), VectorTy, CK_VectorSplat);
+
+ return VectorTy;
+}
+
+/// \brief Return false if this is a valid OpenCL condition vector
+static bool checkOpenCLConditionVector(Sema &S, Expr *Cond,
+ SourceLocation QuestionLoc) {
+ // OpenCL v1.1 s6.11.6 says the elements of the vector must be of
+ // integral type.
+ const VectorType *CondTy = Cond->getType()->getAs<VectorType>();
+ assert(CondTy);
+ QualType EleTy = CondTy->getElementType();
+ if (EleTy->isIntegerType()) return false;
+
+ S.Diag(QuestionLoc, diag::err_typecheck_cond_expect_nonfloat)
+ << Cond->getType() << Cond->getSourceRange();
+ return true;
+}
+
+/// \brief Return false if the vector condition type and the vector
+/// result type are compatible.
+///
+/// OpenCL v1.1 s6.11.6 requires that both vector types have the same
+/// number of elements, and their element types have the same number
+/// of bits.
+static bool checkVectorResult(Sema &S, QualType CondTy, QualType VecResTy,
+ SourceLocation QuestionLoc) {
+ const VectorType *CV = CondTy->getAs<VectorType>();
+ const VectorType *RV = VecResTy->getAs<VectorType>();
+ assert(CV && RV);
+
+ if (CV->getNumElements() != RV->getNumElements()) {
+ S.Diag(QuestionLoc, diag::err_conditional_vector_size)
+ << CondTy << VecResTy;
+ return true;
+ }
+
+ QualType CVE = CV->getElementType();
+ QualType RVE = RV->getElementType();
+
+ if (S.Context.getTypeSize(CVE) != S.Context.getTypeSize(RVE)) {
+ S.Diag(QuestionLoc, diag::err_conditional_vector_element_size)
+ << CondTy << VecResTy;
+ return true;
+ }
+
+ return false;
+}
+
+/// \brief Return the resulting type for the conditional operator in
+/// OpenCL (aka "ternary selection operator", OpenCL v1.1
+/// s6.3.i) when the condition is a vector type.
+static QualType
+OpenCLCheckVectorConditional(Sema &S, ExprResult &Cond,
+ ExprResult &LHS, ExprResult &RHS,
+ SourceLocation QuestionLoc) {
+ Cond = S.DefaultFunctionArrayLvalueConversion(Cond.get());
+ if (Cond.isInvalid())
+ return QualType();
+ QualType CondTy = Cond.get()->getType();
+
+ if (checkOpenCLConditionVector(S, Cond.get(), QuestionLoc))
+ return QualType();
+
+ // If either operand is a vector then find the vector type of the
+ // result as specified in OpenCL v1.1 s6.3.i.
+ if (LHS.get()->getType()->isVectorType() ||
+ RHS.get()->getType()->isVectorType()) {
+ QualType VecResTy = S.CheckVectorOperands(LHS, RHS, QuestionLoc,
+ /*isCompAssign*/false,
+ /*AllowBothBool*/true,
+ /*AllowBoolConversions*/false);
+ if (VecResTy.isNull()) return QualType();
+ // The result type must match the condition type as specified in
+ // OpenCL v1.1 s6.11.6.
+ if (checkVectorResult(S, CondTy, VecResTy, QuestionLoc))
+ return QualType();
+ return VecResTy;
+ }
+
+ // Both operands are scalar.
+ return OpenCLConvertScalarsToVectors(S, LHS, RHS, CondTy, QuestionLoc);
+}
+
+/// Note that LHS is not null here, even if this is the gnu "x ?: y" extension.
+/// In that case, LHS = cond.
+/// C99 6.5.15
+QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
+ ExprResult &RHS, ExprValueKind &VK,
+ ExprObjectKind &OK,
+ SourceLocation QuestionLoc) {
+
+ ExprResult LHSResult = CheckPlaceholderExpr(LHS.get());
+ if (!LHSResult.isUsable()) return QualType();
+ LHS = LHSResult;
+
+ ExprResult RHSResult = CheckPlaceholderExpr(RHS.get());
+ if (!RHSResult.isUsable()) return QualType();
+ RHS = RHSResult;
+
+ // C++ is sufficiently different to merit its own checker.
+ if (getLangOpts().CPlusPlus)
+ return CXXCheckConditionalOperands(Cond, LHS, RHS, VK, OK, QuestionLoc);
+
+ VK = VK_RValue;
+ OK = OK_Ordinary;
+
+ // The OpenCL operator with a vector condition is sufficiently
+ // different to merit its own checker.
+ if (getLangOpts().OpenCL && Cond.get()->getType()->isVectorType())
+ return OpenCLCheckVectorConditional(*this, Cond, LHS, RHS, QuestionLoc);
+
+ // First, check the condition.
+ Cond = UsualUnaryConversions(Cond.get());
+ if (Cond.isInvalid())
+ return QualType();
+ if (checkCondition(*this, Cond.get(), QuestionLoc))
+ return QualType();
+
+ // Now check the two expressions.
+ if (LHS.get()->getType()->isVectorType() ||
+ RHS.get()->getType()->isVectorType())
+ return CheckVectorOperands(LHS, RHS, QuestionLoc, /*isCompAssign*/false,
+ /*AllowBothBool*/true,
+ /*AllowBoolConversions*/false);
+
+ QualType ResTy = UsualArithmeticConversions(LHS, RHS);
+ if (LHS.isInvalid() || RHS.isInvalid())
+ return QualType();
+
+ QualType LHSTy = LHS.get()->getType();
+ QualType RHSTy = RHS.get()->getType();
+
+ // If both operands have arithmetic type, do the usual arithmetic conversions
+ // to find a common type: C99 6.5.15p3,5.
+ if (LHSTy->isArithmeticType() && RHSTy->isArithmeticType()) {
+ LHS = ImpCastExprToType(LHS.get(), ResTy, PrepareScalarCast(LHS, ResTy));
+ RHS = ImpCastExprToType(RHS.get(), ResTy, PrepareScalarCast(RHS, ResTy));
+
+ return ResTy;
+ }
+
+ // If both operands are the same structure or union type, the result is that
+ // type.
+ if (const RecordType *LHSRT = LHSTy->getAs<RecordType>()) { // C99 6.5.15p3
+ if (const RecordType *RHSRT = RHSTy->getAs<RecordType>())
+ if (LHSRT->getDecl() == RHSRT->getDecl())
+ // "If both the operands have structure or union type, the result has
+ // that type." This implies that CV qualifiers are dropped.
+ return LHSTy.getUnqualifiedType();
+ // FIXME: Type of conditional expression must be complete in C mode.
+ }
+
+ // C99 6.5.15p5: "If both operands have void type, the result has void type."
+ // The following || allows only one side to be void (a GCC-ism).
+ if (LHSTy->isVoidType() || RHSTy->isVoidType()) {
+ return checkConditionalVoidType(*this, LHS, RHS);
+ }
+
+ // C99 6.5.15p6 - "if one operand is a null pointer constant, the result has
+ // the type of the other operand."
+ if (!checkConditionalNullPointer(*this, RHS, LHSTy)) return LHSTy;
+ if (!checkConditionalNullPointer(*this, LHS, RHSTy)) return RHSTy;
+
+ // All objective-c pointer type analysis is done here.
+ QualType compositeType = FindCompositeObjCPointerType(LHS, RHS,
+ QuestionLoc);
+ if (LHS.isInvalid() || RHS.isInvalid())
+ return QualType();
+ if (!compositeType.isNull())
+ return compositeType;
+
+
+ // Handle block pointer types.
+ if (LHSTy->isBlockPointerType() || RHSTy->isBlockPointerType())
+ return checkConditionalBlockPointerCompatibility(*this, LHS, RHS,
+ QuestionLoc);
+
+ // Check constraints for C object pointers types (C99 6.5.15p3,6).
+ if (LHSTy->isPointerType() && RHSTy->isPointerType())
+ return checkConditionalObjectPointersCompatibility(*this, LHS, RHS,
+ QuestionLoc);
+
+ // GCC compatibility: soften pointer/integer mismatch. Note that
+ // null pointers have been filtered out by this point.
+ if (checkPointerIntegerMismatch(*this, LHS, RHS.get(), QuestionLoc,
+ /*isIntFirstExpr=*/true))
+ return RHSTy;
+ if (checkPointerIntegerMismatch(*this, RHS, LHS.get(), QuestionLoc,
+ /*isIntFirstExpr=*/false))
+ return LHSTy;
+
+ // Emit a better diagnostic if one of the expressions is a null pointer
+ // constant and the other is not a pointer type. In this case, the user most
+ // likely forgot to take the address of the other expression.
+ if (DiagnoseConditionalForNull(LHS.get(), RHS.get(), QuestionLoc))
+ return QualType();
+
+ // Otherwise, the operands are not compatible.
+ Diag(QuestionLoc, diag::err_typecheck_cond_incompatible_operands)
+ << LHSTy << RHSTy << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ return QualType();
+}
+
+/// FindCompositeObjCPointerType - Helper method to find composite type of
+/// two objective-c pointer types of the two input expressions.
+QualType Sema::FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation QuestionLoc) {
+ QualType LHSTy = LHS.get()->getType();
+ QualType RHSTy = RHS.get()->getType();
+
+ // Handle things like Class and struct objc_class*. Here we case the result
+ // to the pseudo-builtin, because that will be implicitly cast back to the
+ // redefinition type if an attempt is made to access its fields.
+ if (LHSTy->isObjCClassType() &&
+ (Context.hasSameType(RHSTy, Context.getObjCClassRedefinitionType()))) {
+ RHS = ImpCastExprToType(RHS.get(), LHSTy, CK_CPointerToObjCPointerCast);
+ return LHSTy;
+ }
+ if (RHSTy->isObjCClassType() &&
+ (Context.hasSameType(LHSTy, Context.getObjCClassRedefinitionType()))) {
+ LHS = ImpCastExprToType(LHS.get(), RHSTy, CK_CPointerToObjCPointerCast);
+ return RHSTy;
+ }
+ // And the same for struct objc_object* / id
+ if (LHSTy->isObjCIdType() &&
+ (Context.hasSameType(RHSTy, Context.getObjCIdRedefinitionType()))) {
+ RHS = ImpCastExprToType(RHS.get(), LHSTy, CK_CPointerToObjCPointerCast);
+ return LHSTy;
+ }
+ if (RHSTy->isObjCIdType() &&
+ (Context.hasSameType(LHSTy, Context.getObjCIdRedefinitionType()))) {
+ LHS = ImpCastExprToType(LHS.get(), RHSTy, CK_CPointerToObjCPointerCast);
+ return RHSTy;
+ }
+ // And the same for struct objc_selector* / SEL
+ if (Context.isObjCSelType(LHSTy) &&
+ (Context.hasSameType(RHSTy, Context.getObjCSelRedefinitionType()))) {
+ RHS = ImpCastExprToType(RHS.get(), LHSTy, CK_BitCast);
+ return LHSTy;
+ }
+ if (Context.isObjCSelType(RHSTy) &&
+ (Context.hasSameType(LHSTy, Context.getObjCSelRedefinitionType()))) {
+ LHS = ImpCastExprToType(LHS.get(), RHSTy, CK_BitCast);
+ return RHSTy;
+ }
+ // Check constraints for Objective-C object pointers types.
+ if (LHSTy->isObjCObjectPointerType() && RHSTy->isObjCObjectPointerType()) {
+
+ if (Context.getCanonicalType(LHSTy) == Context.getCanonicalType(RHSTy)) {
+ // Two identical object pointer types are always compatible.
+ return LHSTy;
+ }
+ const ObjCObjectPointerType *LHSOPT = LHSTy->castAs<ObjCObjectPointerType>();
+ const ObjCObjectPointerType *RHSOPT = RHSTy->castAs<ObjCObjectPointerType>();
+ QualType compositeType = LHSTy;
+
+ // If both operands are interfaces and either operand can be
+ // assigned to the other, use that type as the composite
+ // type. This allows
+ // xxx ? (A*) a : (B*) b
+ // where B is a subclass of A.
+ //
+ // Additionally, as for assignment, if either type is 'id'
+ // allow silent coercion. Finally, if the types are
+ // incompatible then make sure to use 'id' as the composite
+ // type so the result is acceptable for sending messages to.
+
+ // FIXME: Consider unifying with 'areComparableObjCPointerTypes'.
+ // It could return the composite type.
+ if (!(compositeType =
+ Context.areCommonBaseCompatible(LHSOPT, RHSOPT)).isNull()) {
+ // Nothing more to do.
+ } else if (Context.canAssignObjCInterfaces(LHSOPT, RHSOPT)) {
+ compositeType = RHSOPT->isObjCBuiltinType() ? RHSTy : LHSTy;
+ } else if (Context.canAssignObjCInterfaces(RHSOPT, LHSOPT)) {
+ compositeType = LHSOPT->isObjCBuiltinType() ? LHSTy : RHSTy;
+ } else if ((LHSTy->isObjCQualifiedIdType() ||
+ RHSTy->isObjCQualifiedIdType()) &&
+ Context.ObjCQualifiedIdTypesAreCompatible(LHSTy, RHSTy, true)) {
+ // Need to handle "id<xx>" explicitly.
+ // GCC allows qualified id and any Objective-C type to devolve to
+ // id. Currently localizing to here until clear this should be
+ // part of ObjCQualifiedIdTypesAreCompatible.
+ compositeType = Context.getObjCIdType();
+ } else if (LHSTy->isObjCIdType() || RHSTy->isObjCIdType()) {
+ compositeType = Context.getObjCIdType();
+ } else {
+ Diag(QuestionLoc, diag::ext_typecheck_cond_incompatible_operands)
+ << LHSTy << RHSTy
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ QualType incompatTy = Context.getObjCIdType();
+ LHS = ImpCastExprToType(LHS.get(), incompatTy, CK_BitCast);
+ RHS = ImpCastExprToType(RHS.get(), incompatTy, CK_BitCast);
+ return incompatTy;
+ }
+ // The object pointer types are compatible.
+ LHS = ImpCastExprToType(LHS.get(), compositeType, CK_BitCast);
+ RHS = ImpCastExprToType(RHS.get(), compositeType, CK_BitCast);
+ return compositeType;
+ }
+ // Check Objective-C object pointer types and 'void *'
+ if (LHSTy->isVoidPointerType() && RHSTy->isObjCObjectPointerType()) {
+ if (getLangOpts().ObjCAutoRefCount) {
+ // ARC forbids the implicit conversion of object pointers to 'void *',
+ // so these types are not compatible.
+ Diag(QuestionLoc, diag::err_cond_voidptr_arc) << LHSTy << RHSTy
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ LHS = RHS = true;
+ return QualType();
+ }
+ QualType lhptee = LHSTy->getAs<PointerType>()->getPointeeType();
+ QualType rhptee = RHSTy->getAs<ObjCObjectPointerType>()->getPointeeType();
+ QualType destPointee
+ = Context.getQualifiedType(lhptee, rhptee.getQualifiers());
+ QualType destType = Context.getPointerType(destPointee);
+ // Add qualifiers if necessary.
+ LHS = ImpCastExprToType(LHS.get(), destType, CK_NoOp);
+ // Promote to void*.
+ RHS = ImpCastExprToType(RHS.get(), destType, CK_BitCast);
+ return destType;
+ }
+ if (LHSTy->isObjCObjectPointerType() && RHSTy->isVoidPointerType()) {
+ if (getLangOpts().ObjCAutoRefCount) {
+ // ARC forbids the implicit conversion of object pointers to 'void *',
+ // so these types are not compatible.
+ Diag(QuestionLoc, diag::err_cond_voidptr_arc) << LHSTy << RHSTy
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ LHS = RHS = true;
+ return QualType();
+ }
+ QualType lhptee = LHSTy->getAs<ObjCObjectPointerType>()->getPointeeType();
+ QualType rhptee = RHSTy->getAs<PointerType>()->getPointeeType();
+ QualType destPointee
+ = Context.getQualifiedType(rhptee, lhptee.getQualifiers());
+ QualType destType = Context.getPointerType(destPointee);
+ // Add qualifiers if necessary.
+ RHS = ImpCastExprToType(RHS.get(), destType, CK_NoOp);
+ // Promote to void*.
+ LHS = ImpCastExprToType(LHS.get(), destType, CK_BitCast);
+ return destType;
+ }
+ return QualType();
+}
+
+/// SuggestParentheses - Emit a note with a fixit hint that wraps
+/// ParenRange in parentheses.
+static void SuggestParentheses(Sema &Self, SourceLocation Loc,
+ const PartialDiagnostic &Note,
+ SourceRange ParenRange) {
+ SourceLocation EndLoc = Self.getLocForEndOfToken(ParenRange.getEnd());
+ if (ParenRange.getBegin().isFileID() && ParenRange.getEnd().isFileID() &&
+ EndLoc.isValid()) {
+ Self.Diag(Loc, Note)
+ << FixItHint::CreateInsertion(ParenRange.getBegin(), "(")
+ << FixItHint::CreateInsertion(EndLoc, ")");
+ } else {
+ // We can't display the parentheses, so just show the bare note.
+ Self.Diag(Loc, Note) << ParenRange;
+ }
+}
+
+static bool IsArithmeticOp(BinaryOperatorKind Opc) {
+ return BinaryOperator::isAdditiveOp(Opc) ||
+ BinaryOperator::isMultiplicativeOp(Opc) ||
+ BinaryOperator::isShiftOp(Opc);
+}
+
+/// IsArithmeticBinaryExpr - Returns true if E is an arithmetic binary
+/// expression, either using a built-in or overloaded operator,
+/// and sets *OpCode to the opcode and *RHSExprs to the right-hand side
+/// expression.
+static bool IsArithmeticBinaryExpr(Expr *E, BinaryOperatorKind *Opcode,
+ Expr **RHSExprs) {
+ // Don't strip parenthesis: we should not warn if E is in parenthesis.
+ E = E->IgnoreImpCasts();
+ E = E->IgnoreConversionOperator();
+ E = E->IgnoreImpCasts();
+
+ // Built-in binary operator.
+ if (BinaryOperator *OP = dyn_cast<BinaryOperator>(E)) {
+ if (IsArithmeticOp(OP->getOpcode())) {
+ *Opcode = OP->getOpcode();
+ *RHSExprs = OP->getRHS();
+ return true;
+ }
+ }
+
+ // Overloaded operator.
+ if (CXXOperatorCallExpr *Call = dyn_cast<CXXOperatorCallExpr>(E)) {
+ if (Call->getNumArgs() != 2)
+ return false;
+
+ // Make sure this is really a binary operator that is safe to pass into
+ // BinaryOperator::getOverloadedOpcode(), e.g. it's not a subscript op.
+ OverloadedOperatorKind OO = Call->getOperator();
+ if (OO < OO_Plus || OO > OO_Arrow ||
+ OO == OO_PlusPlus || OO == OO_MinusMinus)
+ return false;
+
+ BinaryOperatorKind OpKind = BinaryOperator::getOverloadedOpcode(OO);
+ if (IsArithmeticOp(OpKind)) {
+ *Opcode = OpKind;
+ *RHSExprs = Call->getArg(1);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/// ExprLooksBoolean - Returns true if E looks boolean, i.e. it has boolean type
+/// or is a logical expression such as (x==y) which has int type, but is
+/// commonly interpreted as boolean.
+static bool ExprLooksBoolean(Expr *E) {
+ E = E->IgnoreParenImpCasts();
+
+ if (E->getType()->isBooleanType())
+ return true;
+ if (BinaryOperator *OP = dyn_cast<BinaryOperator>(E))
+ return OP->isComparisonOp() || OP->isLogicalOp();
+ if (UnaryOperator *OP = dyn_cast<UnaryOperator>(E))
+ return OP->getOpcode() == UO_LNot;
+ if (E->getType()->isPointerType())
+ return true;
+
+ return false;
+}
+
+/// DiagnoseConditionalPrecedence - Emit a warning when a conditional operator
+/// and binary operator are mixed in a way that suggests the programmer assumed
+/// the conditional operator has higher precedence, for example:
+/// "int x = a + someBinaryCondition ? 1 : 2".
+static void DiagnoseConditionalPrecedence(Sema &Self,
+ SourceLocation OpLoc,
+ Expr *Condition,
+ Expr *LHSExpr,
+ Expr *RHSExpr) {
+ BinaryOperatorKind CondOpcode;
+ Expr *CondRHS;
+
+ if (!IsArithmeticBinaryExpr(Condition, &CondOpcode, &CondRHS))
+ return;
+ if (!ExprLooksBoolean(CondRHS))
+ return;
+
+ // The condition is an arithmetic binary expression, with a right-
+ // hand side that looks boolean, so warn.
+
+ Self.Diag(OpLoc, diag::warn_precedence_conditional)
+ << Condition->getSourceRange()
+ << BinaryOperator::getOpcodeStr(CondOpcode);
+
+ SuggestParentheses(Self, OpLoc,
+ Self.PDiag(diag::note_precedence_silence)
+ << BinaryOperator::getOpcodeStr(CondOpcode),
+ SourceRange(Condition->getLocStart(), Condition->getLocEnd()));
+
+ SuggestParentheses(Self, OpLoc,
+ Self.PDiag(diag::note_precedence_conditional_first),
+ SourceRange(CondRHS->getLocStart(), RHSExpr->getLocEnd()));
+}
+
+/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
+/// in the case of a the GNU conditional expr extension.
+ExprResult Sema::ActOnConditionalOp(SourceLocation QuestionLoc,
+ SourceLocation ColonLoc,
+ Expr *CondExpr, Expr *LHSExpr,
+ Expr *RHSExpr) {
+ if (!getLangOpts().CPlusPlus) {
+ // C cannot handle TypoExpr nodes in the condition because it
+ // doesn't handle dependent types properly, so make sure any TypoExprs have
+ // been dealt with before checking the operands.
+ ExprResult CondResult = CorrectDelayedTyposInExpr(CondExpr);
+ if (!CondResult.isUsable()) return ExprError();
+ CondExpr = CondResult.get();
+ }
+
+ // If this is the gnu "x ?: y" extension, analyze the types as though the LHS
+ // was the condition.
+ OpaqueValueExpr *opaqueValue = nullptr;
+ Expr *commonExpr = nullptr;
+ if (!LHSExpr) {
+ commonExpr = CondExpr;
+ // Lower out placeholder types first. This is important so that we don't
+ // try to capture a placeholder. This happens in few cases in C++; such
+ // as Objective-C++'s dictionary subscripting syntax.
+ if (commonExpr->hasPlaceholderType()) {
+ ExprResult result = CheckPlaceholderExpr(commonExpr);
+ if (!result.isUsable()) return ExprError();
+ commonExpr = result.get();
+ }
+ // We usually want to apply unary conversions *before* saving, except
+ // in the special case of a C++ l-value conditional.
+ if (!(getLangOpts().CPlusPlus
+ && !commonExpr->isTypeDependent()
+ && commonExpr->getValueKind() == RHSExpr->getValueKind()
+ && commonExpr->isGLValue()
+ && commonExpr->isOrdinaryOrBitFieldObject()
+ && RHSExpr->isOrdinaryOrBitFieldObject()
+ && Context.hasSameType(commonExpr->getType(), RHSExpr->getType()))) {
+ ExprResult commonRes = UsualUnaryConversions(commonExpr);
+ if (commonRes.isInvalid())
+ return ExprError();
+ commonExpr = commonRes.get();
+ }
+
+ opaqueValue = new (Context) OpaqueValueExpr(commonExpr->getExprLoc(),
+ commonExpr->getType(),
+ commonExpr->getValueKind(),
+ commonExpr->getObjectKind(),
+ commonExpr);
+ LHSExpr = CondExpr = opaqueValue;
+ }
+
+ ExprValueKind VK = VK_RValue;
+ ExprObjectKind OK = OK_Ordinary;
+ ExprResult Cond = CondExpr, LHS = LHSExpr, RHS = RHSExpr;
+ QualType result = CheckConditionalOperands(Cond, LHS, RHS,
+ VK, OK, QuestionLoc);
+ if (result.isNull() || Cond.isInvalid() || LHS.isInvalid() ||
+ RHS.isInvalid())
+ return ExprError();
+
+ DiagnoseConditionalPrecedence(*this, QuestionLoc, Cond.get(), LHS.get(),
+ RHS.get());
+
+ CheckBoolLikeConversion(Cond.get(), QuestionLoc);
+
+ if (!commonExpr)
+ return new (Context)
+ ConditionalOperator(Cond.get(), QuestionLoc, LHS.get(), ColonLoc,
+ RHS.get(), result, VK, OK);
+
+ return new (Context) BinaryConditionalOperator(
+ commonExpr, opaqueValue, Cond.get(), LHS.get(), RHS.get(), QuestionLoc,
+ ColonLoc, result, VK, OK);
+}
+
+// checkPointerTypesForAssignment - This is a very tricky routine (despite
+// being closely modeled after the C99 spec:-). The odd characteristic of this
+// routine is it effectively iqnores the qualifiers on the top level pointee.
+// This circumvents the usual type rules specified in 6.2.7p1 & 6.7.5.[1-3].
+// FIXME: add a couple examples in this comment.
+static Sema::AssignConvertType
+checkPointerTypesForAssignment(Sema &S, QualType LHSType, QualType RHSType) {
+ assert(LHSType.isCanonical() && "LHS not canonicalized!");
+ assert(RHSType.isCanonical() && "RHS not canonicalized!");
+
+ // get the "pointed to" type (ignoring qualifiers at the top level)
+ const Type *lhptee, *rhptee;
+ Qualifiers lhq, rhq;
+ std::tie(lhptee, lhq) =
+ cast<PointerType>(LHSType)->getPointeeType().split().asPair();
+ std::tie(rhptee, rhq) =
+ cast<PointerType>(RHSType)->getPointeeType().split().asPair();
+
+ Sema::AssignConvertType ConvTy = Sema::Compatible;
+
+ // C99 6.5.16.1p1: This following citation is common to constraints
+ // 3 & 4 (below). ...and the type *pointed to* by the left has all the
+ // qualifiers of the type *pointed to* by the right;
+
+ // As a special case, 'non-__weak A *' -> 'non-__weak const *' is okay.
+ if (lhq.getObjCLifetime() != rhq.getObjCLifetime() &&
+ lhq.compatiblyIncludesObjCLifetime(rhq)) {
+ // Ignore lifetime for further calculation.
+ lhq.removeObjCLifetime();
+ rhq.removeObjCLifetime();
+ }
+
+ if (!lhq.compatiblyIncludes(rhq)) {
+ // Treat address-space mismatches as fatal. TODO: address subspaces
+ if (!lhq.isAddressSpaceSupersetOf(rhq))
+ ConvTy = Sema::IncompatiblePointerDiscardsQualifiers;
+
+ // It's okay to add or remove GC or lifetime qualifiers when converting to
+ // and from void*.
+ else if (lhq.withoutObjCGCAttr().withoutObjCLifetime()
+ .compatiblyIncludes(
+ rhq.withoutObjCGCAttr().withoutObjCLifetime())
+ && (lhptee->isVoidType() || rhptee->isVoidType()))
+ ; // keep old
+
+ // Treat lifetime mismatches as fatal.
+ else if (lhq.getObjCLifetime() != rhq.getObjCLifetime())
+ ConvTy = Sema::IncompatiblePointerDiscardsQualifiers;
+
+ // For GCC compatibility, other qualifier mismatches are treated
+ // as still compatible in C.
+ else ConvTy = Sema::CompatiblePointerDiscardsQualifiers;
+ }
+
+ // C99 6.5.16.1p1 (constraint 4): If one operand is a pointer to an object or
+ // incomplete type and the other is a pointer to a qualified or unqualified
+ // version of void...
+ if (lhptee->isVoidType()) {
+ if (rhptee->isIncompleteOrObjectType())
+ return ConvTy;
+
+ // As an extension, we allow cast to/from void* to function pointer.
+ assert(rhptee->isFunctionType());
+ return Sema::FunctionVoidPointer;
+ }
+
+ if (rhptee->isVoidType()) {
+ if (lhptee->isIncompleteOrObjectType())
+ return ConvTy;
+
+ // As an extension, we allow cast to/from void* to function pointer.
+ assert(lhptee->isFunctionType());
+ return Sema::FunctionVoidPointer;
+ }
+
+ // C99 6.5.16.1p1 (constraint 3): both operands are pointers to qualified or
+ // unqualified versions of compatible types, ...
+ QualType ltrans = QualType(lhptee, 0), rtrans = QualType(rhptee, 0);
+ if (!S.Context.typesAreCompatible(ltrans, rtrans)) {
+ // Check if the pointee types are compatible ignoring the sign.
+ // We explicitly check for char so that we catch "char" vs
+ // "unsigned char" on systems where "char" is unsigned.
+ if (lhptee->isCharType())
+ ltrans = S.Context.UnsignedCharTy;
+ else if (lhptee->hasSignedIntegerRepresentation())
+ ltrans = S.Context.getCorrespondingUnsignedType(ltrans);
+
+ if (rhptee->isCharType())
+ rtrans = S.Context.UnsignedCharTy;
+ else if (rhptee->hasSignedIntegerRepresentation())
+ rtrans = S.Context.getCorrespondingUnsignedType(rtrans);
+
+ if (ltrans == rtrans) {
+ // Types are compatible ignoring the sign. Qualifier incompatibility
+ // takes priority over sign incompatibility because the sign
+ // warning can be disabled.
+ if (ConvTy != Sema::Compatible)
+ return ConvTy;
+
+ return Sema::IncompatiblePointerSign;
+ }
+
+ // If we are a multi-level pointer, it's possible that our issue is simply
+ // one of qualification - e.g. char ** -> const char ** is not allowed. If
+ // the eventual target type is the same and the pointers have the same
+ // level of indirection, this must be the issue.
+ if (isa<PointerType>(lhptee) && isa<PointerType>(rhptee)) {
+ do {
+ lhptee = cast<PointerType>(lhptee)->getPointeeType().getTypePtr();
+ rhptee = cast<PointerType>(rhptee)->getPointeeType().getTypePtr();
+ } while (isa<PointerType>(lhptee) && isa<PointerType>(rhptee));
+
+ if (lhptee == rhptee)
+ return Sema::IncompatibleNestedPointerQualifiers;
+ }
+
+ // General pointer incompatibility takes priority over qualifiers.
+ return Sema::IncompatiblePointer;
+ }
+ if (!S.getLangOpts().CPlusPlus &&
+ S.IsNoReturnConversion(ltrans, rtrans, ltrans))
+ return Sema::IncompatiblePointer;
+ return ConvTy;
+}
+
+/// checkBlockPointerTypesForAssignment - This routine determines whether two
+/// block pointer types are compatible or whether a block and normal pointer
+/// are compatible. It is more restrict than comparing two function pointer
+// types.
+static Sema::AssignConvertType
+checkBlockPointerTypesForAssignment(Sema &S, QualType LHSType,
+ QualType RHSType) {
+ assert(LHSType.isCanonical() && "LHS not canonicalized!");
+ assert(RHSType.isCanonical() && "RHS not canonicalized!");
+
+ QualType lhptee, rhptee;
+
+ // get the "pointed to" type (ignoring qualifiers at the top level)
+ lhptee = cast<BlockPointerType>(LHSType)->getPointeeType();
+ rhptee = cast<BlockPointerType>(RHSType)->getPointeeType();
+
+ // In C++, the types have to match exactly.
+ if (S.getLangOpts().CPlusPlus)
+ return Sema::IncompatibleBlockPointer;
+
+ Sema::AssignConvertType ConvTy = Sema::Compatible;
+
+ // For blocks we enforce that qualifiers are identical.
+ if (lhptee.getLocalQualifiers() != rhptee.getLocalQualifiers())
+ ConvTy = Sema::CompatiblePointerDiscardsQualifiers;
+
+ if (!S.Context.typesAreBlockPointerCompatible(LHSType, RHSType))
+ return Sema::IncompatibleBlockPointer;
+
+ return ConvTy;
+}
+
+/// checkObjCPointerTypesForAssignment - Compares two objective-c pointer types
+/// for assignment compatibility.
+static Sema::AssignConvertType
+checkObjCPointerTypesForAssignment(Sema &S, QualType LHSType,
+ QualType RHSType) {
+ assert(LHSType.isCanonical() && "LHS was not canonicalized!");
+ assert(RHSType.isCanonical() && "RHS was not canonicalized!");
+
+ if (LHSType->isObjCBuiltinType()) {
+ // Class is not compatible with ObjC object pointers.
+ if (LHSType->isObjCClassType() && !RHSType->isObjCBuiltinType() &&
+ !RHSType->isObjCQualifiedClassType())
+ return Sema::IncompatiblePointer;
+ return Sema::Compatible;
+ }
+ if (RHSType->isObjCBuiltinType()) {
+ if (RHSType->isObjCClassType() && !LHSType->isObjCBuiltinType() &&
+ !LHSType->isObjCQualifiedClassType())
+ return Sema::IncompatiblePointer;
+ return Sema::Compatible;
+ }
+ QualType lhptee = LHSType->getAs<ObjCObjectPointerType>()->getPointeeType();
+ QualType rhptee = RHSType->getAs<ObjCObjectPointerType>()->getPointeeType();
+
+ if (!lhptee.isAtLeastAsQualifiedAs(rhptee) &&
+ // make an exception for id<P>
+ !LHSType->isObjCQualifiedIdType())
+ return Sema::CompatiblePointerDiscardsQualifiers;
+
+ if (S.Context.typesAreCompatible(LHSType, RHSType))
+ return Sema::Compatible;
+ if (LHSType->isObjCQualifiedIdType() || RHSType->isObjCQualifiedIdType())
+ return Sema::IncompatibleObjCQualifiedId;
+ return Sema::IncompatiblePointer;
+}
+
+Sema::AssignConvertType
+Sema::CheckAssignmentConstraints(SourceLocation Loc,
+ QualType LHSType, QualType RHSType) {
+ // Fake up an opaque expression. We don't actually care about what
+ // cast operations are required, so if CheckAssignmentConstraints
+ // adds casts to this they'll be wasted, but fortunately that doesn't
+ // usually happen on valid code.
+ OpaqueValueExpr RHSExpr(Loc, RHSType, VK_RValue);
+ ExprResult RHSPtr = &RHSExpr;
+ CastKind K = CK_Invalid;
+
+ return CheckAssignmentConstraints(LHSType, RHSPtr, K, /*ConvertRHS=*/false);
+}
+
+/// CheckAssignmentConstraints (C99 6.5.16) - This routine currently
+/// has code to accommodate several GCC extensions when type checking
+/// pointers. Here are some objectionable examples that GCC considers warnings:
+///
+/// int a, *pint;
+/// short *pshort;
+/// struct foo *pfoo;
+///
+/// pint = pshort; // warning: assignment from incompatible pointer type
+/// a = pint; // warning: assignment makes integer from pointer without a cast
+/// pint = a; // warning: assignment makes pointer from integer without a cast
+/// pint = pfoo; // warning: assignment from incompatible pointer type
+///
+/// As a result, the code for dealing with pointers is more complex than the
+/// C99 spec dictates.
+///
+/// Sets 'Kind' for any result kind except Incompatible.
+Sema::AssignConvertType
+Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
+ CastKind &Kind, bool ConvertRHS) {
+ QualType RHSType = RHS.get()->getType();
+ QualType OrigLHSType = LHSType;
+
+ // Get canonical types. We're not formatting these types, just comparing
+ // them.
+ LHSType = Context.getCanonicalType(LHSType).getUnqualifiedType();
+ RHSType = Context.getCanonicalType(RHSType).getUnqualifiedType();
+
+ // Common case: no conversion required.
+ if (LHSType == RHSType) {
+ Kind = CK_NoOp;
+ return Compatible;
+ }
+
+ // If we have an atomic type, try a non-atomic assignment, then just add an
+ // atomic qualification step.
+ if (const AtomicType *AtomicTy = dyn_cast<AtomicType>(LHSType)) {
+ Sema::AssignConvertType result =
+ CheckAssignmentConstraints(AtomicTy->getValueType(), RHS, Kind);
+ if (result != Compatible)
+ return result;
+ if (Kind != CK_NoOp && ConvertRHS)
+ RHS = ImpCastExprToType(RHS.get(), AtomicTy->getValueType(), Kind);
+ Kind = CK_NonAtomicToAtomic;
+ return Compatible;
+ }
+
+ // If the left-hand side is a reference type, then we are in a
+ // (rare!) case where we've allowed the use of references in C,
+ // e.g., as a parameter type in a built-in function. In this case,
+ // just make sure that the type referenced is compatible with the
+ // right-hand side type. The caller is responsible for adjusting
+ // LHSType so that the resulting expression does not have reference
+ // type.
+ if (const ReferenceType *LHSTypeRef = LHSType->getAs<ReferenceType>()) {
+ if (Context.typesAreCompatible(LHSTypeRef->getPointeeType(), RHSType)) {
+ Kind = CK_LValueBitCast;
+ return Compatible;
+ }
+ return Incompatible;
+ }
+
+ // Allow scalar to ExtVector assignments, and assignments of an ExtVector type
+ // to the same ExtVector type.
+ if (LHSType->isExtVectorType()) {
+ if (RHSType->isExtVectorType())
+ return Incompatible;
+ if (RHSType->isArithmeticType()) {
+ // CK_VectorSplat does T -> vector T, so first cast to the
+ // element type.
+ QualType elType = cast<ExtVectorType>(LHSType)->getElementType();
+ if (elType != RHSType && ConvertRHS) {
+ Kind = PrepareScalarCast(RHS, elType);
+ RHS = ImpCastExprToType(RHS.get(), elType, Kind);
+ }
+ Kind = CK_VectorSplat;
+ return Compatible;
+ }
+ }
+
+ // Conversions to or from vector type.
+ if (LHSType->isVectorType() || RHSType->isVectorType()) {
+ if (LHSType->isVectorType() && RHSType->isVectorType()) {
+ // Allow assignments of an AltiVec vector type to an equivalent GCC
+ // vector type and vice versa
+ if (Context.areCompatibleVectorTypes(LHSType, RHSType)) {
+ Kind = CK_BitCast;
+ return Compatible;
+ }
+
+ // If we are allowing lax vector conversions, and LHS and RHS are both
+ // vectors, the total size only needs to be the same. This is a bitcast;
+ // no bits are changed but the result type is different.
+ if (isLaxVectorConversion(RHSType, LHSType)) {
+ Kind = CK_BitCast;
+ return IncompatibleVectors;
+ }
+ }
+ return Incompatible;
+ }
+
+ // Arithmetic conversions.
+ if (LHSType->isArithmeticType() && RHSType->isArithmeticType() &&
+ !(getLangOpts().CPlusPlus && LHSType->isEnumeralType())) {
+ if (ConvertRHS)
+ Kind = PrepareScalarCast(RHS, LHSType);
+ return Compatible;
+ }
+
+ // Conversions to normal pointers.
+ if (const PointerType *LHSPointer = dyn_cast<PointerType>(LHSType)) {
+ // U* -> T*
+ if (isa<PointerType>(RHSType)) {
+ unsigned AddrSpaceL = LHSPointer->getPointeeType().getAddressSpace();
+ unsigned AddrSpaceR = RHSType->getPointeeType().getAddressSpace();
+ Kind = AddrSpaceL != AddrSpaceR ? CK_AddressSpaceConversion : CK_BitCast;
+ return checkPointerTypesForAssignment(*this, LHSType, RHSType);
+ }
+
+ // int -> T*
+ if (RHSType->isIntegerType()) {
+ Kind = CK_IntegralToPointer; // FIXME: null?
+ return IntToPointer;
+ }
+
+ // C pointers are not compatible with ObjC object pointers,
+ // with two exceptions:
+ if (isa<ObjCObjectPointerType>(RHSType)) {
+ // - conversions to void*
+ if (LHSPointer->getPointeeType()->isVoidType()) {
+ Kind = CK_BitCast;
+ return Compatible;
+ }
+
+ // - conversions from 'Class' to the redefinition type
+ if (RHSType->isObjCClassType() &&
+ Context.hasSameType(LHSType,
+ Context.getObjCClassRedefinitionType())) {
+ Kind = CK_BitCast;
+ return Compatible;
+ }
+
+ Kind = CK_BitCast;
+ return IncompatiblePointer;
+ }
+
+ // U^ -> void*
+ if (RHSType->getAs<BlockPointerType>()) {
+ if (LHSPointer->getPointeeType()->isVoidType()) {
+ Kind = CK_BitCast;
+ return Compatible;
+ }
+ }
+
+ return Incompatible;
+ }
+
+ // Conversions to block pointers.
+ if (isa<BlockPointerType>(LHSType)) {
+ // U^ -> T^
+ if (RHSType->isBlockPointerType()) {
+ Kind = CK_BitCast;
+ return checkBlockPointerTypesForAssignment(*this, LHSType, RHSType);
+ }
+
+ // int or null -> T^
+ if (RHSType->isIntegerType()) {
+ Kind = CK_IntegralToPointer; // FIXME: null
+ return IntToBlockPointer;
+ }
+
+ // id -> T^
+ if (getLangOpts().ObjC1 && RHSType->isObjCIdType()) {
+ Kind = CK_AnyPointerToBlockPointerCast;
+ return Compatible;
+ }
+
+ // void* -> T^
+ if (const PointerType *RHSPT = RHSType->getAs<PointerType>())
+ if (RHSPT->getPointeeType()->isVoidType()) {
+ Kind = CK_AnyPointerToBlockPointerCast;
+ return Compatible;
+ }
+
+ return Incompatible;
+ }
+
+ // Conversions to Objective-C pointers.
+ if (isa<ObjCObjectPointerType>(LHSType)) {
+ // A* -> B*
+ if (RHSType->isObjCObjectPointerType()) {
+ Kind = CK_BitCast;
+ Sema::AssignConvertType result =
+ checkObjCPointerTypesForAssignment(*this, LHSType, RHSType);
+ if (getLangOpts().ObjCAutoRefCount &&
+ result == Compatible &&
+ !CheckObjCARCUnavailableWeakConversion(OrigLHSType, RHSType))
+ result = IncompatibleObjCWeakRef;
+ return result;
+ }
+
+ // int or null -> A*
+ if (RHSType->isIntegerType()) {
+ Kind = CK_IntegralToPointer; // FIXME: null
+ return IntToPointer;
+ }
+
+ // In general, C pointers are not compatible with ObjC object pointers,
+ // with two exceptions:
+ if (isa<PointerType>(RHSType)) {
+ Kind = CK_CPointerToObjCPointerCast;
+
+ // - conversions from 'void*'
+ if (RHSType->isVoidPointerType()) {
+ return Compatible;
+ }
+
+ // - conversions to 'Class' from its redefinition type
+ if (LHSType->isObjCClassType() &&
+ Context.hasSameType(RHSType,
+ Context.getObjCClassRedefinitionType())) {
+ return Compatible;
+ }
+
+ return IncompatiblePointer;
+ }
+
+ // Only under strict condition T^ is compatible with an Objective-C pointer.
+ if (RHSType->isBlockPointerType() &&
+ LHSType->isBlockCompatibleObjCPointerType(Context)) {
+ if (ConvertRHS)
+ maybeExtendBlockObject(RHS);
+ Kind = CK_BlockPointerToObjCPointerCast;
+ return Compatible;
+ }
+
+ return Incompatible;
+ }
+
+ // Conversions from pointers that are not covered by the above.
+ if (isa<PointerType>(RHSType)) {
+ // T* -> _Bool
+ if (LHSType == Context.BoolTy) {
+ Kind = CK_PointerToBoolean;
+ return Compatible;
+ }
+
+ // T* -> int
+ if (LHSType->isIntegerType()) {
+ Kind = CK_PointerToIntegral;
+ return PointerToInt;
+ }
+
+ return Incompatible;
+ }
+
+ // Conversions from Objective-C pointers that are not covered by the above.
+ if (isa<ObjCObjectPointerType>(RHSType)) {
+ // T* -> _Bool
+ if (LHSType == Context.BoolTy) {
+ Kind = CK_PointerToBoolean;
+ return Compatible;
+ }
+
+ // T* -> int
+ if (LHSType->isIntegerType()) {
+ Kind = CK_PointerToIntegral;
+ return PointerToInt;
+ }
+
+ return Incompatible;
+ }
+
+ // struct A -> struct B
+ if (isa<TagType>(LHSType) && isa<TagType>(RHSType)) {
+ if (Context.typesAreCompatible(LHSType, RHSType)) {
+ Kind = CK_NoOp;
+ return Compatible;
+ }
+ }
+
+ return Incompatible;
+}
+
+/// \brief Constructs a transparent union from an expression that is
+/// used to initialize the transparent union.
+static void ConstructTransparentUnion(Sema &S, ASTContext &C,
+ ExprResult &EResult, QualType UnionType,
+ FieldDecl *Field) {
+ // Build an initializer list that designates the appropriate member
+ // of the transparent union.
+ Expr *E = EResult.get();
+ InitListExpr *Initializer = new (C) InitListExpr(C, SourceLocation(),
+ E, SourceLocation());
+ Initializer->setType(UnionType);
+ Initializer->setInitializedFieldInUnion(Field);
+
+ // Build a compound literal constructing a value of the transparent
+ // union type from this initializer list.
+ TypeSourceInfo *unionTInfo = C.getTrivialTypeSourceInfo(UnionType);
+ EResult = new (C) CompoundLiteralExpr(SourceLocation(), unionTInfo, UnionType,
+ VK_RValue, Initializer, false);
+}
+
+Sema::AssignConvertType
+Sema::CheckTransparentUnionArgumentConstraints(QualType ArgType,
+ ExprResult &RHS) {
+ QualType RHSType = RHS.get()->getType();
+
+ // If the ArgType is a Union type, we want to handle a potential
+ // transparent_union GCC extension.
+ const RecordType *UT = ArgType->getAsUnionType();
+ if (!UT || !UT->getDecl()->hasAttr<TransparentUnionAttr>())
+ return Incompatible;
+
+ // The field to initialize within the transparent union.
+ RecordDecl *UD = UT->getDecl();
+ FieldDecl *InitField = nullptr;
+ // It's compatible if the expression matches any of the fields.
+ for (auto *it : UD->fields()) {
+ if (it->getType()->isPointerType()) {
+ // If the transparent union contains a pointer type, we allow:
+ // 1) void pointer
+ // 2) null pointer constant
+ if (RHSType->isPointerType())
+ if (RHSType->castAs<PointerType>()->getPointeeType()->isVoidType()) {
+ RHS = ImpCastExprToType(RHS.get(), it->getType(), CK_BitCast);
+ InitField = it;
+ break;
+ }
+
+ if (RHS.get()->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNull)) {
+ RHS = ImpCastExprToType(RHS.get(), it->getType(),
+ CK_NullToPointer);
+ InitField = it;
+ break;
+ }
+ }
+
+ CastKind Kind = CK_Invalid;
+ if (CheckAssignmentConstraints(it->getType(), RHS, Kind)
+ == Compatible) {
+ RHS = ImpCastExprToType(RHS.get(), it->getType(), Kind);
+ InitField = it;
+ break;
+ }
+ }
+
+ if (!InitField)
+ return Incompatible;
+
+ ConstructTransparentUnion(*this, Context, RHS, ArgType, InitField);
+ return Compatible;
+}
+
+Sema::AssignConvertType
+Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
+ bool Diagnose,
+ bool DiagnoseCFAudited,
+ bool ConvertRHS) {
+ // If ConvertRHS is false, we want to leave the caller's RHS untouched. Sadly,
+ // we can't avoid *all* modifications at the moment, so we need some somewhere
+ // to put the updated value.
+ ExprResult LocalRHS = CallerRHS;
+ ExprResult &RHS = ConvertRHS ? CallerRHS : LocalRHS;
+
+ if (getLangOpts().CPlusPlus) {
+ if (!LHSType->isRecordType() && !LHSType->isAtomicType()) {
+ // C++ 5.17p3: If the left operand is not of class type, the
+ // expression is implicitly converted (C++ 4) to the
+ // cv-unqualified type of the left operand.
+ ExprResult Res;
+ if (Diagnose) {
+ Res = PerformImplicitConversion(RHS.get(), LHSType.getUnqualifiedType(),
+ AA_Assigning);
+ } else {
+ ImplicitConversionSequence ICS =
+ TryImplicitConversion(RHS.get(), LHSType.getUnqualifiedType(),
+ /*SuppressUserConversions=*/false,
+ /*AllowExplicit=*/false,
+ /*InOverloadResolution=*/false,
+ /*CStyle=*/false,
+ /*AllowObjCWritebackConversion=*/false);
+ if (ICS.isFailure())
+ return Incompatible;
+ Res = PerformImplicitConversion(RHS.get(), LHSType.getUnqualifiedType(),
+ ICS, AA_Assigning);
+ }
+ if (Res.isInvalid())
+ return Incompatible;
+ Sema::AssignConvertType result = Compatible;
+ if (getLangOpts().ObjCAutoRefCount &&
+ !CheckObjCARCUnavailableWeakConversion(LHSType,
+ RHS.get()->getType()))
+ result = IncompatibleObjCWeakRef;
+ RHS = Res;
+ return result;
+ }
+
+ // FIXME: Currently, we fall through and treat C++ classes like C
+ // structures.
+ // FIXME: We also fall through for atomics; not sure what should
+ // happen there, though.
+ } else if (RHS.get()->getType() == Context.OverloadTy) {
+ // As a set of extensions to C, we support overloading on functions. These
+ // functions need to be resolved here.
+ DeclAccessPair DAP;
+ if (FunctionDecl *FD = ResolveAddressOfOverloadedFunction(
+ RHS.get(), LHSType, /*Complain=*/false, DAP))
+ RHS = FixOverloadedFunctionReference(RHS.get(), DAP, FD);
+ else
+ return Incompatible;
+ }
+
+ // C99 6.5.16.1p1: the left operand is a pointer and the right is
+ // a null pointer constant.
+ if ((LHSType->isPointerType() || LHSType->isObjCObjectPointerType() ||
+ LHSType->isBlockPointerType()) &&
+ RHS.get()->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNull)) {
+ CastKind Kind;
+ CXXCastPath Path;
+ CheckPointerConversion(RHS.get(), LHSType, Kind, Path, false);
+ if (ConvertRHS)
+ RHS = ImpCastExprToType(RHS.get(), LHSType, Kind, VK_RValue, &Path);
+ return Compatible;
+ }
+
+ // This check seems unnatural, however it is necessary to ensure the proper
+ // conversion of functions/arrays. If the conversion were done for all
+ // DeclExpr's (created by ActOnIdExpression), it would mess up the unary
+ // expressions that suppress this implicit conversion (&, sizeof).
+ //
+ // Suppress this for references: C++ 8.5.3p5.
+ if (!LHSType->isReferenceType()) {
+ // FIXME: We potentially allocate here even if ConvertRHS is false.
+ RHS = DefaultFunctionArrayLvalueConversion(RHS.get(), Diagnose);
+ if (RHS.isInvalid())
+ return Incompatible;
+ }
+
+ Expr *PRE = RHS.get()->IgnoreParenCasts();
+ if (ObjCProtocolExpr *OPE = dyn_cast<ObjCProtocolExpr>(PRE)) {
+ ObjCProtocolDecl *PDecl = OPE->getProtocol();
+ if (PDecl && !PDecl->hasDefinition()) {
+ Diag(PRE->getExprLoc(), diag::warn_atprotocol_protocol) << PDecl->getName();
+ Diag(PDecl->getLocation(), diag::note_entity_declared_at) << PDecl;
+ }
+ }
+
+ CastKind Kind = CK_Invalid;
+ Sema::AssignConvertType result =
+ CheckAssignmentConstraints(LHSType, RHS, Kind, ConvertRHS);
+
+ // C99 6.5.16.1p2: The value of the right operand is converted to the
+ // type of the assignment expression.
+ // CheckAssignmentConstraints allows the left-hand side to be a reference,
+ // so that we can use references in built-in functions even in C.
+ // The getNonReferenceType() call makes sure that the resulting expression
+ // does not have reference type.
+ if (result != Incompatible && RHS.get()->getType() != LHSType) {
+ QualType Ty = LHSType.getNonLValueExprType(Context);
+ Expr *E = RHS.get();
+ if (getLangOpts().ObjCAutoRefCount)
+ CheckObjCARCConversion(SourceRange(), Ty, E, CCK_ImplicitConversion,
+ DiagnoseCFAudited);
+ if (getLangOpts().ObjC1 &&
+ (CheckObjCBridgeRelatedConversions(E->getLocStart(),
+ LHSType, E->getType(), E) ||
+ ConversionToObjCStringLiteralCheck(LHSType, E))) {
+ RHS = E;
+ return Compatible;
+ }
+
+ if (ConvertRHS)
+ RHS = ImpCastExprToType(E, Ty, Kind);
+ }
+ return result;
+}
+
+QualType Sema::InvalidOperands(SourceLocation Loc, ExprResult &LHS,
+ ExprResult &RHS) {
+ Diag(Loc, diag::err_typecheck_invalid_operands)
+ << LHS.get()->getType() << RHS.get()->getType()
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ return QualType();
+}
+
+/// Try to convert a value of non-vector type to a vector type by converting
+/// the type to the element type of the vector and then performing a splat.
+/// If the language is OpenCL, we only use conversions that promote scalar
+/// rank; for C, Obj-C, and C++ we allow any real scalar conversion except
+/// for float->int.
+///
+/// \param scalar - if non-null, actually perform the conversions
+/// \return true if the operation fails (but without diagnosing the failure)
+static bool tryVectorConvertAndSplat(Sema &S, ExprResult *scalar,
+ QualType scalarTy,
+ QualType vectorEltTy,
+ QualType vectorTy) {
+ // The conversion to apply to the scalar before splatting it,
+ // if necessary.
+ CastKind scalarCast = CK_Invalid;
+
+ if (vectorEltTy->isIntegralType(S.Context)) {
+ if (!scalarTy->isIntegralType(S.Context))
+ return true;
+ if (S.getLangOpts().OpenCL &&
+ S.Context.getIntegerTypeOrder(vectorEltTy, scalarTy) < 0)
+ return true;
+ scalarCast = CK_IntegralCast;
+ } else if (vectorEltTy->isRealFloatingType()) {
+ if (scalarTy->isRealFloatingType()) {
+ if (S.getLangOpts().OpenCL &&
+ S.Context.getFloatingTypeOrder(vectorEltTy, scalarTy) < 0)
+ return true;
+ scalarCast = CK_FloatingCast;
+ }
+ else if (scalarTy->isIntegralType(S.Context))
+ scalarCast = CK_IntegralToFloating;
+ else
+ return true;
+ } else {
+ return true;
+ }
+
+ // Adjust scalar if desired.
+ if (scalar) {
+ if (scalarCast != CK_Invalid)
+ *scalar = S.ImpCastExprToType(scalar->get(), vectorEltTy, scalarCast);
+ *scalar = S.ImpCastExprToType(scalar->get(), vectorTy, CK_VectorSplat);
+ }
+ return false;
+}
+
+QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc, bool IsCompAssign,
+ bool AllowBothBool,
+ bool AllowBoolConversions) {
+ if (!IsCompAssign) {
+ LHS = DefaultFunctionArrayLvalueConversion(LHS.get());
+ if (LHS.isInvalid())
+ return QualType();
+ }
+ RHS = DefaultFunctionArrayLvalueConversion(RHS.get());
+ if (RHS.isInvalid())
+ return QualType();
+
+ // For conversion purposes, we ignore any qualifiers.
+ // For example, "const float" and "float" are equivalent.
+ QualType LHSType = LHS.get()->getType().getUnqualifiedType();
+ QualType RHSType = RHS.get()->getType().getUnqualifiedType();
+
+ const VectorType *LHSVecType = LHSType->getAs<VectorType>();
+ const VectorType *RHSVecType = RHSType->getAs<VectorType>();
+ assert(LHSVecType || RHSVecType);
+
+ // AltiVec-style "vector bool op vector bool" combinations are allowed
+ // for some operators but not others.
+ if (!AllowBothBool &&
+ LHSVecType && LHSVecType->getVectorKind() == VectorType::AltiVecBool &&
+ RHSVecType && RHSVecType->getVectorKind() == VectorType::AltiVecBool)
+ return InvalidOperands(Loc, LHS, RHS);
+
+ // If the vector types are identical, return.
+ if (Context.hasSameType(LHSType, RHSType))
+ return LHSType;
+
+ // If we have compatible AltiVec and GCC vector types, use the AltiVec type.
+ if (LHSVecType && RHSVecType &&
+ Context.areCompatibleVectorTypes(LHSType, RHSType)) {
+ if (isa<ExtVectorType>(LHSVecType)) {
+ RHS = ImpCastExprToType(RHS.get(), LHSType, CK_BitCast);
+ return LHSType;
+ }
+
+ if (!IsCompAssign)
+ LHS = ImpCastExprToType(LHS.get(), RHSType, CK_BitCast);
+ return RHSType;
+ }
+
+ // AllowBoolConversions says that bool and non-bool AltiVec vectors
+ // can be mixed, with the result being the non-bool type. The non-bool
+ // operand must have integer element type.
+ if (AllowBoolConversions && LHSVecType && RHSVecType &&
+ LHSVecType->getNumElements() == RHSVecType->getNumElements() &&
+ (Context.getTypeSize(LHSVecType->getElementType()) ==
+ Context.getTypeSize(RHSVecType->getElementType()))) {
+ if (LHSVecType->getVectorKind() == VectorType::AltiVecVector &&
+ LHSVecType->getElementType()->isIntegerType() &&
+ RHSVecType->getVectorKind() == VectorType::AltiVecBool) {
+ RHS = ImpCastExprToType(RHS.get(), LHSType, CK_BitCast);
+ return LHSType;
+ }
+ if (!IsCompAssign &&
+ LHSVecType->getVectorKind() == VectorType::AltiVecBool &&
+ RHSVecType->getVectorKind() == VectorType::AltiVecVector &&
+ RHSVecType->getElementType()->isIntegerType()) {
+ LHS = ImpCastExprToType(LHS.get(), RHSType, CK_BitCast);
+ return RHSType;
+ }
+ }
+
+ // If there's an ext-vector type and a scalar, try to convert the scalar to
+ // the vector element type and splat.
+ if (!RHSVecType && isa<ExtVectorType>(LHSVecType)) {
+ if (!tryVectorConvertAndSplat(*this, &RHS, RHSType,
+ LHSVecType->getElementType(), LHSType))
+ return LHSType;
+ }
+ if (!LHSVecType && isa<ExtVectorType>(RHSVecType)) {
+ if (!tryVectorConvertAndSplat(*this, (IsCompAssign ? nullptr : &LHS),
+ LHSType, RHSVecType->getElementType(),
+ RHSType))
+ return RHSType;
+ }
+
+ // If we're allowing lax vector conversions, only the total (data) size
+ // needs to be the same.
+ // FIXME: Should we really be allowing this?
+ // FIXME: We really just pick the LHS type arbitrarily?
+ if (isLaxVectorConversion(RHSType, LHSType)) {
+ QualType resultType = LHSType;
+ RHS = ImpCastExprToType(RHS.get(), resultType, CK_BitCast);
+ return resultType;
+ }
+
+ // Okay, the expression is invalid.
+
+ // If there's a non-vector, non-real operand, diagnose that.
+ if ((!RHSVecType && !RHSType->isRealType()) ||
+ (!LHSVecType && !LHSType->isRealType())) {
+ Diag(Loc, diag::err_typecheck_vector_not_convertable_non_scalar)
+ << LHSType << RHSType
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ return QualType();
+ }
+
+ // OpenCL V1.1 6.2.6.p1:
+ // If the operands are of more than one vector type, then an error shall
+ // occur. Implicit conversions between vector types are not permitted, per
+ // section 6.2.1.
+ if (getLangOpts().OpenCL &&
+ RHSVecType && isa<ExtVectorType>(RHSVecType) &&
+ LHSVecType && isa<ExtVectorType>(LHSVecType)) {
+ Diag(Loc, diag::err_opencl_implicit_vector_conversion) << LHSType
+ << RHSType;
+ return QualType();
+ }
+
+ // Otherwise, use the generic diagnostic.
+ Diag(Loc, diag::err_typecheck_vector_not_convertable)
+ << LHSType << RHSType
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ return QualType();
+}
+
+// checkArithmeticNull - Detect when a NULL constant is used improperly in an
+// expression. These are mainly cases where the null pointer is used as an
+// integer instead of a pointer.
+static void checkArithmeticNull(Sema &S, ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc, bool IsCompare) {
+ // The canonical way to check for a GNU null is with isNullPointerConstant,
+ // but we use a bit of a hack here for speed; this is a relatively
+ // hot path, and isNullPointerConstant is slow.
+ bool LHSNull = isa<GNUNullExpr>(LHS.get()->IgnoreParenImpCasts());
+ bool RHSNull = isa<GNUNullExpr>(RHS.get()->IgnoreParenImpCasts());
+
+ QualType NonNullType = LHSNull ? RHS.get()->getType() : LHS.get()->getType();
+
+ // Avoid analyzing cases where the result will either be invalid (and
+ // diagnosed as such) or entirely valid and not something to warn about.
+ if ((!LHSNull && !RHSNull) || NonNullType->isBlockPointerType() ||
+ NonNullType->isMemberPointerType() || NonNullType->isFunctionType())
+ return;
+
+ // Comparison operations would not make sense with a null pointer no matter
+ // what the other expression is.
+ if (!IsCompare) {
+ S.Diag(Loc, diag::warn_null_in_arithmetic_operation)
+ << (LHSNull ? LHS.get()->getSourceRange() : SourceRange())
+ << (RHSNull ? RHS.get()->getSourceRange() : SourceRange());
+ return;
+ }
+
+ // The rest of the operations only make sense with a null pointer
+ // if the other expression is a pointer.
+ if (LHSNull == RHSNull || NonNullType->isAnyPointerType() ||
+ NonNullType->canDecayToPointerType())
+ return;
+
+ S.Diag(Loc, diag::warn_null_in_comparison_operation)
+ << LHSNull /* LHS is NULL */ << NonNullType
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+}
+
+static void DiagnoseBadDivideOrRemainderValues(Sema& S, ExprResult &LHS,
+ ExprResult &RHS,
+ SourceLocation Loc, bool IsDiv) {
+ // Check for division/remainder by zero.
+ llvm::APSInt RHSValue;
+ if (!RHS.get()->isValueDependent() &&
+ RHS.get()->EvaluateAsInt(RHSValue, S.Context) && RHSValue == 0)
+ S.DiagRuntimeBehavior(Loc, RHS.get(),
+ S.PDiag(diag::warn_remainder_division_by_zero)
+ << IsDiv << RHS.get()->getSourceRange());
+}
+
+QualType Sema::CheckMultiplyDivideOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc,
+ bool IsCompAssign, bool IsDiv) {
+ checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/false);
+
+ if (LHS.get()->getType()->isVectorType() ||
+ RHS.get()->getType()->isVectorType())
+ return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign,
+ /*AllowBothBool*/getLangOpts().AltiVec,
+ /*AllowBoolConversions*/false);
+
+ QualType compType = UsualArithmeticConversions(LHS, RHS, IsCompAssign);
+ if (LHS.isInvalid() || RHS.isInvalid())
+ return QualType();
+
+
+ if (compType.isNull() || !compType->isArithmeticType())
+ return InvalidOperands(Loc, LHS, RHS);
+ if (IsDiv)
+ DiagnoseBadDivideOrRemainderValues(*this, LHS, RHS, Loc, IsDiv);
+ return compType;
+}
+
+QualType Sema::CheckRemainderOperands(
+ ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign) {
+ checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/false);
+
+ if (LHS.get()->getType()->isVectorType() ||
+ RHS.get()->getType()->isVectorType()) {
+ if (LHS.get()->getType()->hasIntegerRepresentation() &&
+ RHS.get()->getType()->hasIntegerRepresentation())
+ return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign,
+ /*AllowBothBool*/getLangOpts().AltiVec,
+ /*AllowBoolConversions*/false);
+ return InvalidOperands(Loc, LHS, RHS);
+ }
+
+ QualType compType = UsualArithmeticConversions(LHS, RHS, IsCompAssign);
+ if (LHS.isInvalid() || RHS.isInvalid())
+ return QualType();
+
+ if (compType.isNull() || !compType->isIntegerType())
+ return InvalidOperands(Loc, LHS, RHS);
+ DiagnoseBadDivideOrRemainderValues(*this, LHS, RHS, Loc, false /* IsDiv */);
+ return compType;
+}
+
+/// \brief Diagnose invalid arithmetic on two void pointers.
+static void diagnoseArithmeticOnTwoVoidPointers(Sema &S, SourceLocation Loc,
+ Expr *LHSExpr, Expr *RHSExpr) {
+ S.Diag(Loc, S.getLangOpts().CPlusPlus
+ ? diag::err_typecheck_pointer_arith_void_type
+ : diag::ext_gnu_void_ptr)
+ << 1 /* two pointers */ << LHSExpr->getSourceRange()
+ << RHSExpr->getSourceRange();
+}
+
+/// \brief Diagnose invalid arithmetic on a void pointer.
+static void diagnoseArithmeticOnVoidPointer(Sema &S, SourceLocation Loc,
+ Expr *Pointer) {
+ S.Diag(Loc, S.getLangOpts().CPlusPlus
+ ? diag::err_typecheck_pointer_arith_void_type
+ : diag::ext_gnu_void_ptr)
+ << 0 /* one pointer */ << Pointer->getSourceRange();
+}
+
+/// \brief Diagnose invalid arithmetic on two function pointers.
+static void diagnoseArithmeticOnTwoFunctionPointers(Sema &S, SourceLocation Loc,
+ Expr *LHS, Expr *RHS) {
+ assert(LHS->getType()->isAnyPointerType());
+ assert(RHS->getType()->isAnyPointerType());
+ S.Diag(Loc, S.getLangOpts().CPlusPlus
+ ? diag::err_typecheck_pointer_arith_function_type
+ : diag::ext_gnu_ptr_func_arith)
+ << 1 /* two pointers */ << LHS->getType()->getPointeeType()
+ // We only show the second type if it differs from the first.
+ << (unsigned)!S.Context.hasSameUnqualifiedType(LHS->getType(),
+ RHS->getType())
+ << RHS->getType()->getPointeeType()
+ << LHS->getSourceRange() << RHS->getSourceRange();
+}
+
+/// \brief Diagnose invalid arithmetic on a function pointer.
+static void diagnoseArithmeticOnFunctionPointer(Sema &S, SourceLocation Loc,
+ Expr *Pointer) {
+ assert(Pointer->getType()->isAnyPointerType());
+ S.Diag(Loc, S.getLangOpts().CPlusPlus
+ ? diag::err_typecheck_pointer_arith_function_type
+ : diag::ext_gnu_ptr_func_arith)
+ << 0 /* one pointer */ << Pointer->getType()->getPointeeType()
+ << 0 /* one pointer, so only one type */
+ << Pointer->getSourceRange();
+}
+
+/// \brief Emit error if Operand is incomplete pointer type
+///
+/// \returns True if pointer has incomplete type
+static bool checkArithmeticIncompletePointerType(Sema &S, SourceLocation Loc,
+ Expr *Operand) {
+ QualType ResType = Operand->getType();
+ if (const AtomicType *ResAtomicType = ResType->getAs<AtomicType>())
+ ResType = ResAtomicType->getValueType();
+
+ assert(ResType->isAnyPointerType() && !ResType->isDependentType());
+ QualType PointeeTy = ResType->getPointeeType();
+ return S.RequireCompleteType(Loc, PointeeTy,
+ diag::err_typecheck_arithmetic_incomplete_type,
+ PointeeTy, Operand->getSourceRange());
+}
+
+/// \brief Check the validity of an arithmetic pointer operand.
+///
+/// If the operand has pointer type, this code will check for pointer types
+/// which are invalid in arithmetic operations. These will be diagnosed
+/// appropriately, including whether or not the use is supported as an
+/// extension.
+///
+/// \returns True when the operand is valid to use (even if as an extension).
+static bool checkArithmeticOpPointerOperand(Sema &S, SourceLocation Loc,
+ Expr *Operand) {
+ QualType ResType = Operand->getType();
+ if (const AtomicType *ResAtomicType = ResType->getAs<AtomicType>())
+ ResType = ResAtomicType->getValueType();
+
+ if (!ResType->isAnyPointerType()) return true;
+
+ QualType PointeeTy = ResType->getPointeeType();
+ if (PointeeTy->isVoidType()) {
+ diagnoseArithmeticOnVoidPointer(S, Loc, Operand);
+ return !S.getLangOpts().CPlusPlus;
+ }
+ if (PointeeTy->isFunctionType()) {
+ diagnoseArithmeticOnFunctionPointer(S, Loc, Operand);
+ return !S.getLangOpts().CPlusPlus;
+ }
+
+ if (checkArithmeticIncompletePointerType(S, Loc, Operand)) return false;
+
+ return true;
+}
+
+/// \brief Check the validity of a binary arithmetic operation w.r.t. pointer
+/// operands.
+///
+/// This routine will diagnose any invalid arithmetic on pointer operands much
+/// like \see checkArithmeticOpPointerOperand. However, it has special logic
+/// for emitting a single diagnostic even for operations where both LHS and RHS
+/// are (potentially problematic) pointers.
+///
+/// \returns True when the operand is valid to use (even if as an extension).
+static bool checkArithmeticBinOpPointerOperands(Sema &S, SourceLocation Loc,
+ Expr *LHSExpr, Expr *RHSExpr) {
+ bool isLHSPointer = LHSExpr->getType()->isAnyPointerType();
+ bool isRHSPointer = RHSExpr->getType()->isAnyPointerType();
+ if (!isLHSPointer && !isRHSPointer) return true;
+
+ QualType LHSPointeeTy, RHSPointeeTy;
+ if (isLHSPointer) LHSPointeeTy = LHSExpr->getType()->getPointeeType();
+ if (isRHSPointer) RHSPointeeTy = RHSExpr->getType()->getPointeeType();
+
+ // if both are pointers check if operation is valid wrt address spaces
+ if (S.getLangOpts().OpenCL && isLHSPointer && isRHSPointer) {
+ const PointerType *lhsPtr = LHSExpr->getType()->getAs<PointerType>();
+ const PointerType *rhsPtr = RHSExpr->getType()->getAs<PointerType>();
+ if (!lhsPtr->isAddressSpaceOverlapping(*rhsPtr)) {
+ S.Diag(Loc,
+ diag::err_typecheck_op_on_nonoverlapping_address_space_pointers)
+ << LHSExpr->getType() << RHSExpr->getType() << 1 /*arithmetic op*/
+ << LHSExpr->getSourceRange() << RHSExpr->getSourceRange();
+ return false;
+ }
+ }
+
+ // Check for arithmetic on pointers to incomplete types.
+ bool isLHSVoidPtr = isLHSPointer && LHSPointeeTy->isVoidType();
+ bool isRHSVoidPtr = isRHSPointer && RHSPointeeTy->isVoidType();
+ if (isLHSVoidPtr || isRHSVoidPtr) {
+ if (!isRHSVoidPtr) diagnoseArithmeticOnVoidPointer(S, Loc, LHSExpr);
+ else if (!isLHSVoidPtr) diagnoseArithmeticOnVoidPointer(S, Loc, RHSExpr);
+ else diagnoseArithmeticOnTwoVoidPointers(S, Loc, LHSExpr, RHSExpr);
+
+ return !S.getLangOpts().CPlusPlus;
+ }
+
+ bool isLHSFuncPtr = isLHSPointer && LHSPointeeTy->isFunctionType();
+ bool isRHSFuncPtr = isRHSPointer && RHSPointeeTy->isFunctionType();
+ if (isLHSFuncPtr || isRHSFuncPtr) {
+ if (!isRHSFuncPtr) diagnoseArithmeticOnFunctionPointer(S, Loc, LHSExpr);
+ else if (!isLHSFuncPtr) diagnoseArithmeticOnFunctionPointer(S, Loc,
+ RHSExpr);
+ else diagnoseArithmeticOnTwoFunctionPointers(S, Loc, LHSExpr, RHSExpr);
+
+ return !S.getLangOpts().CPlusPlus;
+ }
+
+ if (isLHSPointer && checkArithmeticIncompletePointerType(S, Loc, LHSExpr))
+ return false;
+ if (isRHSPointer && checkArithmeticIncompletePointerType(S, Loc, RHSExpr))
+ return false;
+
+ return true;
+}
+
+/// diagnoseStringPlusInt - Emit a warning when adding an integer to a string
+/// literal.
+static void diagnoseStringPlusInt(Sema &Self, SourceLocation OpLoc,
+ Expr *LHSExpr, Expr *RHSExpr) {
+ StringLiteral* StrExpr = dyn_cast<StringLiteral>(LHSExpr->IgnoreImpCasts());
+ Expr* IndexExpr = RHSExpr;
+ if (!StrExpr) {
+ StrExpr = dyn_cast<StringLiteral>(RHSExpr->IgnoreImpCasts());
+ IndexExpr = LHSExpr;
+ }
+
+ bool IsStringPlusInt = StrExpr &&
+ IndexExpr->getType()->isIntegralOrUnscopedEnumerationType();
+ if (!IsStringPlusInt || IndexExpr->isValueDependent())
+ return;
+
+ llvm::APSInt index;
+ if (IndexExpr->EvaluateAsInt(index, Self.getASTContext())) {
+ unsigned StrLenWithNull = StrExpr->getLength() + 1;
+ if (index.isNonNegative() &&
+ index <= llvm::APSInt(llvm::APInt(index.getBitWidth(), StrLenWithNull),
+ index.isUnsigned()))
+ return;
+ }
+
+ SourceRange DiagRange(LHSExpr->getLocStart(), RHSExpr->getLocEnd());
+ Self.Diag(OpLoc, diag::warn_string_plus_int)
+ << DiagRange << IndexExpr->IgnoreImpCasts()->getType();
+
+ // Only print a fixit for "str" + int, not for int + "str".
+ if (IndexExpr == RHSExpr) {
+ SourceLocation EndLoc = Self.getLocForEndOfToken(RHSExpr->getLocEnd());
+ Self.Diag(OpLoc, diag::note_string_plus_scalar_silence)
+ << FixItHint::CreateInsertion(LHSExpr->getLocStart(), "&")
+ << FixItHint::CreateReplacement(SourceRange(OpLoc), "[")
+ << FixItHint::CreateInsertion(EndLoc, "]");
+ } else
+ Self.Diag(OpLoc, diag::note_string_plus_scalar_silence);
+}
+
+/// \brief Emit a warning when adding a char literal to a string.
+static void diagnoseStringPlusChar(Sema &Self, SourceLocation OpLoc,
+ Expr *LHSExpr, Expr *RHSExpr) {
+ const Expr *StringRefExpr = LHSExpr;
+ const CharacterLiteral *CharExpr =
+ dyn_cast<CharacterLiteral>(RHSExpr->IgnoreImpCasts());
+
+ if (!CharExpr) {
+ CharExpr = dyn_cast<CharacterLiteral>(LHSExpr->IgnoreImpCasts());
+ StringRefExpr = RHSExpr;
+ }
+
+ if (!CharExpr || !StringRefExpr)
+ return;
+
+ const QualType StringType = StringRefExpr->getType();
+
+ // Return if not a PointerType.
+ if (!StringType->isAnyPointerType())
+ return;
+
+ // Return if not a CharacterType.
+ if (!StringType->getPointeeType()->isAnyCharacterType())
+ return;
+
+ ASTContext &Ctx = Self.getASTContext();
+ SourceRange DiagRange(LHSExpr->getLocStart(), RHSExpr->getLocEnd());
+
+ const QualType CharType = CharExpr->getType();
+ if (!CharType->isAnyCharacterType() &&
+ CharType->isIntegerType() &&
+ llvm::isUIntN(Ctx.getCharWidth(), CharExpr->getValue())) {
+ Self.Diag(OpLoc, diag::warn_string_plus_char)
+ << DiagRange << Ctx.CharTy;
+ } else {
+ Self.Diag(OpLoc, diag::warn_string_plus_char)
+ << DiagRange << CharExpr->getType();
+ }
+
+ // Only print a fixit for str + char, not for char + str.
+ if (isa<CharacterLiteral>(RHSExpr->IgnoreImpCasts())) {
+ SourceLocation EndLoc = Self.getLocForEndOfToken(RHSExpr->getLocEnd());
+ Self.Diag(OpLoc, diag::note_string_plus_scalar_silence)
+ << FixItHint::CreateInsertion(LHSExpr->getLocStart(), "&")
+ << FixItHint::CreateReplacement(SourceRange(OpLoc), "[")
+ << FixItHint::CreateInsertion(EndLoc, "]");
+ } else {
+ Self.Diag(OpLoc, diag::note_string_plus_scalar_silence);
+ }
+}
+
+/// \brief Emit error when two pointers are incompatible.
+static void diagnosePointerIncompatibility(Sema &S, SourceLocation Loc,
+ Expr *LHSExpr, Expr *RHSExpr) {
+ assert(LHSExpr->getType()->isAnyPointerType());
+ assert(RHSExpr->getType()->isAnyPointerType());
+ S.Diag(Loc, diag::err_typecheck_sub_ptr_compatible)
+ << LHSExpr->getType() << RHSExpr->getType() << LHSExpr->getSourceRange()
+ << RHSExpr->getSourceRange();
+}
+
+// C99 6.5.6
+QualType Sema::CheckAdditionOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc, BinaryOperatorKind Opc,
+ QualType* CompLHSTy) {
+ checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/false);
+
+ if (LHS.get()->getType()->isVectorType() ||
+ RHS.get()->getType()->isVectorType()) {
+ QualType compType = CheckVectorOperands(
+ LHS, RHS, Loc, CompLHSTy,
+ /*AllowBothBool*/getLangOpts().AltiVec,
+ /*AllowBoolConversions*/getLangOpts().ZVector);
+ if (CompLHSTy) *CompLHSTy = compType;
+ return compType;
+ }
+
+ QualType compType = UsualArithmeticConversions(LHS, RHS, CompLHSTy);
+ if (LHS.isInvalid() || RHS.isInvalid())
+ return QualType();
+
+ // Diagnose "string literal" '+' int and string '+' "char literal".
+ if (Opc == BO_Add) {
+ diagnoseStringPlusInt(*this, Loc, LHS.get(), RHS.get());
+ diagnoseStringPlusChar(*this, Loc, LHS.get(), RHS.get());
+ }
+
+ // handle the common case first (both operands are arithmetic).
+ if (!compType.isNull() && compType->isArithmeticType()) {
+ if (CompLHSTy) *CompLHSTy = compType;
+ return compType;
+ }
+
+ // Type-checking. Ultimately the pointer's going to be in PExp;
+ // note that we bias towards the LHS being the pointer.
+ Expr *PExp = LHS.get(), *IExp = RHS.get();
+
+ bool isObjCPointer;
+ if (PExp->getType()->isPointerType()) {
+ isObjCPointer = false;
+ } else if (PExp->getType()->isObjCObjectPointerType()) {
+ isObjCPointer = true;
+ } else {
+ std::swap(PExp, IExp);
+ if (PExp->getType()->isPointerType()) {
+ isObjCPointer = false;
+ } else if (PExp->getType()->isObjCObjectPointerType()) {
+ isObjCPointer = true;
+ } else {
+ return InvalidOperands(Loc, LHS, RHS);
+ }
+ }
+ assert(PExp->getType()->isAnyPointerType());
+
+ if (!IExp->getType()->isIntegerType())
+ return InvalidOperands(Loc, LHS, RHS);
+
+ if (!checkArithmeticOpPointerOperand(*this, Loc, PExp))
+ return QualType();
+
+ if (isObjCPointer && checkArithmeticOnObjCPointer(*this, Loc, PExp))
+ return QualType();
+
+ // Check array bounds for pointer arithemtic
+ CheckArrayAccess(PExp, IExp);
+
+ if (CompLHSTy) {
+ QualType LHSTy = Context.isPromotableBitField(LHS.get());
+ if (LHSTy.isNull()) {
+ LHSTy = LHS.get()->getType();
+ if (LHSTy->isPromotableIntegerType())
+ LHSTy = Context.getPromotedIntegerType(LHSTy);
+ }
+ *CompLHSTy = LHSTy;
+ }
+
+ return PExp->getType();
+}
+
+// C99 6.5.6
+QualType Sema::CheckSubtractionOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc,
+ QualType* CompLHSTy) {
+ checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/false);
+
+ if (LHS.get()->getType()->isVectorType() ||
+ RHS.get()->getType()->isVectorType()) {
+ QualType compType = CheckVectorOperands(
+ LHS, RHS, Loc, CompLHSTy,
+ /*AllowBothBool*/getLangOpts().AltiVec,
+ /*AllowBoolConversions*/getLangOpts().ZVector);
+ if (CompLHSTy) *CompLHSTy = compType;
+ return compType;
+ }
+
+ QualType compType = UsualArithmeticConversions(LHS, RHS, CompLHSTy);
+ if (LHS.isInvalid() || RHS.isInvalid())
+ return QualType();
+
+ // Enforce type constraints: C99 6.5.6p3.
+
+ // Handle the common case first (both operands are arithmetic).
+ if (!compType.isNull() && compType->isArithmeticType()) {
+ if (CompLHSTy) *CompLHSTy = compType;
+ return compType;
+ }
+
+ // Either ptr - int or ptr - ptr.
+ if (LHS.get()->getType()->isAnyPointerType()) {
+ QualType lpointee = LHS.get()->getType()->getPointeeType();
+
+ // Diagnose bad cases where we step over interface counts.
+ if (LHS.get()->getType()->isObjCObjectPointerType() &&
+ checkArithmeticOnObjCPointer(*this, Loc, LHS.get()))
+ return QualType();
+
+ // The result type of a pointer-int computation is the pointer type.
+ if (RHS.get()->getType()->isIntegerType()) {
+ if (!checkArithmeticOpPointerOperand(*this, Loc, LHS.get()))
+ return QualType();
+
+ // Check array bounds for pointer arithemtic
+ CheckArrayAccess(LHS.get(), RHS.get(), /*ArraySubscriptExpr*/nullptr,
+ /*AllowOnePastEnd*/true, /*IndexNegated*/true);
+
+ if (CompLHSTy) *CompLHSTy = LHS.get()->getType();
+ return LHS.get()->getType();
+ }
+
+ // Handle pointer-pointer subtractions.
+ if (const PointerType *RHSPTy
+ = RHS.get()->getType()->getAs<PointerType>()) {
+ QualType rpointee = RHSPTy->getPointeeType();
+
+ if (getLangOpts().CPlusPlus) {
+ // Pointee types must be the same: C++ [expr.add]
+ if (!Context.hasSameUnqualifiedType(lpointee, rpointee)) {
+ diagnosePointerIncompatibility(*this, Loc, LHS.get(), RHS.get());
+ }
+ } else {
+ // Pointee types must be compatible C99 6.5.6p3
+ if (!Context.typesAreCompatible(
+ Context.getCanonicalType(lpointee).getUnqualifiedType(),
+ Context.getCanonicalType(rpointee).getUnqualifiedType())) {
+ diagnosePointerIncompatibility(*this, Loc, LHS.get(), RHS.get());
+ return QualType();
+ }
+ }
+
+ if (!checkArithmeticBinOpPointerOperands(*this, Loc,
+ LHS.get(), RHS.get()))
+ return QualType();
+
+ // The pointee type may have zero size. As an extension, a structure or
+ // union may have zero size or an array may have zero length. In this
+ // case subtraction does not make sense.
+ if (!rpointee->isVoidType() && !rpointee->isFunctionType()) {
+ CharUnits ElementSize = Context.getTypeSizeInChars(rpointee);
+ if (ElementSize.isZero()) {
+ Diag(Loc,diag::warn_sub_ptr_zero_size_types)
+ << rpointee.getUnqualifiedType()
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ }
+ }
+
+ if (CompLHSTy) *CompLHSTy = LHS.get()->getType();
+ return Context.getPointerDiffType();
+ }
+ }
+
+ return InvalidOperands(Loc, LHS, RHS);
+}
+
+static bool isScopedEnumerationType(QualType T) {
+ if (const EnumType *ET = T->getAs<EnumType>())
+ return ET->getDecl()->isScoped();
+ return false;
+}
+
+static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc, BinaryOperatorKind Opc,
+ QualType LHSType) {
+ // OpenCL 6.3j: shift values are effectively % word size of LHS (more defined),
+ // so skip remaining warnings as we don't want to modify values within Sema.
+ if (S.getLangOpts().OpenCL)
+ return;
+
+ llvm::APSInt Right;
+ // Check right/shifter operand
+ if (RHS.get()->isValueDependent() ||
+ !RHS.get()->EvaluateAsInt(Right, S.Context))
+ return;
+
+ if (Right.isNegative()) {
+ S.DiagRuntimeBehavior(Loc, RHS.get(),
+ S.PDiag(diag::warn_shift_negative)
+ << RHS.get()->getSourceRange());
+ return;
+ }
+ llvm::APInt LeftBits(Right.getBitWidth(),
+ S.Context.getTypeSize(LHS.get()->getType()));
+ if (Right.uge(LeftBits)) {
+ S.DiagRuntimeBehavior(Loc, RHS.get(),
+ S.PDiag(diag::warn_shift_gt_typewidth)
+ << RHS.get()->getSourceRange());
+ return;
+ }
+ if (Opc != BO_Shl)
+ return;
+
+ // When left shifting an ICE which is signed, we can check for overflow which
+ // according to C++ has undefined behavior ([expr.shift] 5.8/2). Unsigned
+ // integers have defined behavior modulo one more than the maximum value
+ // representable in the result type, so never warn for those.
+ llvm::APSInt Left;
+ if (LHS.get()->isValueDependent() ||
+ LHSType->hasUnsignedIntegerRepresentation() ||
+ !LHS.get()->EvaluateAsInt(Left, S.Context))
+ return;
+
+ // If LHS does not have a signed type and non-negative value
+ // then, the behavior is undefined. Warn about it.
+ if (Left.isNegative()) {
+ S.DiagRuntimeBehavior(Loc, LHS.get(),
+ S.PDiag(diag::warn_shift_lhs_negative)
+ << LHS.get()->getSourceRange());
+ return;
+ }
+
+ llvm::APInt ResultBits =
+ static_cast<llvm::APInt&>(Right) + Left.getMinSignedBits();
+ if (LeftBits.uge(ResultBits))
+ return;
+ llvm::APSInt Result = Left.extend(ResultBits.getLimitedValue());
+ Result = Result.shl(Right);
+
+ // Print the bit representation of the signed integer as an unsigned
+ // hexadecimal number.
+ SmallString<40> HexResult;
+ Result.toString(HexResult, 16, /*Signed =*/false, /*Literal =*/true);
+
+ // If we are only missing a sign bit, this is less likely to result in actual
+ // bugs -- if the result is cast back to an unsigned type, it will have the
+ // expected value. Thus we place this behind a different warning that can be
+ // turned off separately if needed.
+ if (LeftBits == ResultBits - 1) {
+ S.Diag(Loc, diag::warn_shift_result_sets_sign_bit)
+ << HexResult << LHSType
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ return;
+ }
+
+ S.Diag(Loc, diag::warn_shift_result_gt_typewidth)
+ << HexResult.str() << Result.getMinSignedBits() << LHSType
+ << Left.getBitWidth() << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+}
+
+/// \brief Return the resulting type when an OpenCL vector is shifted
+/// by a scalar or vector shift amount.
+static QualType checkOpenCLVectorShift(Sema &S,
+ ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc, bool IsCompAssign) {
+ // OpenCL v1.1 s6.3.j says RHS can be a vector only if LHS is a vector.
+ if (!LHS.get()->getType()->isVectorType()) {
+ S.Diag(Loc, diag::err_shift_rhs_only_vector)
+ << RHS.get()->getType() << LHS.get()->getType()
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ return QualType();
+ }
+
+ if (!IsCompAssign) {
+ LHS = S.UsualUnaryConversions(LHS.get());
+ if (LHS.isInvalid()) return QualType();
+ }
+
+ RHS = S.UsualUnaryConversions(RHS.get());
+ if (RHS.isInvalid()) return QualType();
+
+ QualType LHSType = LHS.get()->getType();
+ const VectorType *LHSVecTy = LHSType->getAs<VectorType>();
+ QualType LHSEleType = LHSVecTy->getElementType();
+
+ // Note that RHS might not be a vector.
+ QualType RHSType = RHS.get()->getType();
+ const VectorType *RHSVecTy = RHSType->getAs<VectorType>();
+ QualType RHSEleType = RHSVecTy ? RHSVecTy->getElementType() : RHSType;
+
+ // OpenCL v1.1 s6.3.j says that the operands need to be integers.
+ if (!LHSEleType->isIntegerType()) {
+ S.Diag(Loc, diag::err_typecheck_expect_int)
+ << LHS.get()->getType() << LHS.get()->getSourceRange();
+ return QualType();
+ }
+
+ if (!RHSEleType->isIntegerType()) {
+ S.Diag(Loc, diag::err_typecheck_expect_int)
+ << RHS.get()->getType() << RHS.get()->getSourceRange();
+ return QualType();
+ }
+
+ if (RHSVecTy) {
+ // OpenCL v1.1 s6.3.j says that for vector types, the operators
+ // are applied component-wise. So if RHS is a vector, then ensure
+ // that the number of elements is the same as LHS...
+ if (RHSVecTy->getNumElements() != LHSVecTy->getNumElements()) {
+ S.Diag(Loc, diag::err_typecheck_vector_lengths_not_equal)
+ << LHS.get()->getType() << RHS.get()->getType()
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ return QualType();
+ }
+ } else {
+ // ...else expand RHS to match the number of elements in LHS.
+ QualType VecTy =
+ S.Context.getExtVectorType(RHSEleType, LHSVecTy->getNumElements());
+ RHS = S.ImpCastExprToType(RHS.get(), VecTy, CK_VectorSplat);
+ }
+
+ return LHSType;
+}
+
+// C99 6.5.7
+QualType Sema::CheckShiftOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc, BinaryOperatorKind Opc,
+ bool IsCompAssign) {
+ checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/false);
+
+ // Vector shifts promote their scalar inputs to vector type.
+ if (LHS.get()->getType()->isVectorType() ||
+ RHS.get()->getType()->isVectorType()) {
+ if (LangOpts.OpenCL)
+ return checkOpenCLVectorShift(*this, LHS, RHS, Loc, IsCompAssign);
+ if (LangOpts.ZVector) {
+ // The shift operators for the z vector extensions work basically
+ // like OpenCL shifts, except that neither the LHS nor the RHS is
+ // allowed to be a "vector bool".
+ if (auto LHSVecType = LHS.get()->getType()->getAs<VectorType>())
+ if (LHSVecType->getVectorKind() == VectorType::AltiVecBool)
+ return InvalidOperands(Loc, LHS, RHS);
+ if (auto RHSVecType = RHS.get()->getType()->getAs<VectorType>())
+ if (RHSVecType->getVectorKind() == VectorType::AltiVecBool)
+ return InvalidOperands(Loc, LHS, RHS);
+ return checkOpenCLVectorShift(*this, LHS, RHS, Loc, IsCompAssign);
+ }
+ return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign,
+ /*AllowBothBool*/true,
+ /*AllowBoolConversions*/false);
+ }
+
+ // Shifts don't perform usual arithmetic conversions, they just do integer
+ // promotions on each operand. C99 6.5.7p3
+
+ // For the LHS, do usual unary conversions, but then reset them away
+ // if this is a compound assignment.
+ ExprResult OldLHS = LHS;
+ LHS = UsualUnaryConversions(LHS.get());
+ if (LHS.isInvalid())
+ return QualType();
+ QualType LHSType = LHS.get()->getType();
+ if (IsCompAssign) LHS = OldLHS;
+
+ // The RHS is simpler.
+ RHS = UsualUnaryConversions(RHS.get());
+ if (RHS.isInvalid())
+ return QualType();
+ QualType RHSType = RHS.get()->getType();
+
+ // C99 6.5.7p2: Each of the operands shall have integer type.
+ if (!LHSType->hasIntegerRepresentation() ||
+ !RHSType->hasIntegerRepresentation())
+ return InvalidOperands(Loc, LHS, RHS);
+
+ // C++0x: Don't allow scoped enums. FIXME: Use something better than
+ // hasIntegerRepresentation() above instead of this.
+ if (isScopedEnumerationType(LHSType) ||
+ isScopedEnumerationType(RHSType)) {
+ return InvalidOperands(Loc, LHS, RHS);
+ }
+ // Sanity-check shift operands
+ DiagnoseBadShiftValues(*this, LHS, RHS, Loc, Opc, LHSType);
+
+ // "The type of the result is that of the promoted left operand."
+ return LHSType;
+}
+
+static bool IsWithinTemplateSpecialization(Decl *D) {
+ if (DeclContext *DC = D->getDeclContext()) {
+ if (isa<ClassTemplateSpecializationDecl>(DC))
+ return true;
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(DC))
+ return FD->isFunctionTemplateSpecialization();
+ }
+ return false;
+}
+
+/// If two different enums are compared, raise a warning.
+static void checkEnumComparison(Sema &S, SourceLocation Loc, Expr *LHS,
+ Expr *RHS) {
+ QualType LHSStrippedType = LHS->IgnoreParenImpCasts()->getType();
+ QualType RHSStrippedType = RHS->IgnoreParenImpCasts()->getType();
+
+ const EnumType *LHSEnumType = LHSStrippedType->getAs<EnumType>();
+ if (!LHSEnumType)
+ return;
+ const EnumType *RHSEnumType = RHSStrippedType->getAs<EnumType>();
+ if (!RHSEnumType)
+ return;
+
+ // Ignore anonymous enums.
+ if (!LHSEnumType->getDecl()->getIdentifier())
+ return;
+ if (!RHSEnumType->getDecl()->getIdentifier())
+ return;
+
+ if (S.Context.hasSameUnqualifiedType(LHSStrippedType, RHSStrippedType))
+ return;
+
+ S.Diag(Loc, diag::warn_comparison_of_mixed_enum_types)
+ << LHSStrippedType << RHSStrippedType
+ << LHS->getSourceRange() << RHS->getSourceRange();
+}
+
+/// \brief Diagnose bad pointer comparisons.
+static void diagnoseDistinctPointerComparison(Sema &S, SourceLocation Loc,
+ ExprResult &LHS, ExprResult &RHS,
+ bool IsError) {
+ S.Diag(Loc, IsError ? diag::err_typecheck_comparison_of_distinct_pointers
+ : diag::ext_typecheck_comparison_of_distinct_pointers)
+ << LHS.get()->getType() << RHS.get()->getType()
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+}
+
+/// \brief Returns false if the pointers are converted to a composite type,
+/// true otherwise.
+static bool convertPointersToCompositeType(Sema &S, SourceLocation Loc,
+ ExprResult &LHS, ExprResult &RHS) {
+ // C++ [expr.rel]p2:
+ // [...] Pointer conversions (4.10) and qualification
+ // conversions (4.4) are performed on pointer operands (or on
+ // a pointer operand and a null pointer constant) to bring
+ // them to their composite pointer type. [...]
+ //
+ // C++ [expr.eq]p1 uses the same notion for (in)equality
+ // comparisons of pointers.
+
+ // C++ [expr.eq]p2:
+ // In addition, pointers to members can be compared, or a pointer to
+ // member and a null pointer constant. Pointer to member conversions
+ // (4.11) and qualification conversions (4.4) are performed to bring
+ // them to a common type. If one operand is a null pointer constant,
+ // the common type is the type of the other operand. Otherwise, the
+ // common type is a pointer to member type similar (4.4) to the type
+ // of one of the operands, with a cv-qualification signature (4.4)
+ // that is the union of the cv-qualification signatures of the operand
+ // types.
+
+ QualType LHSType = LHS.get()->getType();
+ QualType RHSType = RHS.get()->getType();
+ assert((LHSType->isPointerType() && RHSType->isPointerType()) ||
+ (LHSType->isMemberPointerType() && RHSType->isMemberPointerType()));
+
+ bool NonStandardCompositeType = false;
+ bool *BoolPtr = S.isSFINAEContext() ? nullptr : &NonStandardCompositeType;
+ QualType T = S.FindCompositePointerType(Loc, LHS, RHS, BoolPtr);
+ if (T.isNull()) {
+ diagnoseDistinctPointerComparison(S, Loc, LHS, RHS, /*isError*/true);
+ return true;
+ }
+
+ if (NonStandardCompositeType)
+ S.Diag(Loc, diag::ext_typecheck_comparison_of_distinct_pointers_nonstandard)
+ << LHSType << RHSType << T << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+
+ LHS = S.ImpCastExprToType(LHS.get(), T, CK_BitCast);
+ RHS = S.ImpCastExprToType(RHS.get(), T, CK_BitCast);
+ return false;
+}
+
+static void diagnoseFunctionPointerToVoidComparison(Sema &S, SourceLocation Loc,
+ ExprResult &LHS,
+ ExprResult &RHS,
+ bool IsError) {
+ S.Diag(Loc, IsError ? diag::err_typecheck_comparison_of_fptr_to_void
+ : diag::ext_typecheck_comparison_of_fptr_to_void)
+ << LHS.get()->getType() << RHS.get()->getType()
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+}
+
+static bool isObjCObjectLiteral(ExprResult &E) {
+ switch (E.get()->IgnoreParenImpCasts()->getStmtClass()) {
+ case Stmt::ObjCArrayLiteralClass:
+ case Stmt::ObjCDictionaryLiteralClass:
+ case Stmt::ObjCStringLiteralClass:
+ case Stmt::ObjCBoxedExprClass:
+ return true;
+ default:
+ // Note that ObjCBoolLiteral is NOT an object literal!
+ return false;
+ }
+}
+
+static bool hasIsEqualMethod(Sema &S, const Expr *LHS, const Expr *RHS) {
+ const ObjCObjectPointerType *Type =
+ LHS->getType()->getAs<ObjCObjectPointerType>();
+
+ // If this is not actually an Objective-C object, bail out.
+ if (!Type)
+ return false;
+
+ // Get the LHS object's interface type.
+ QualType InterfaceType = Type->getPointeeType();
+
+ // If the RHS isn't an Objective-C object, bail out.
+ if (!RHS->getType()->isObjCObjectPointerType())
+ return false;
+
+ // Try to find the -isEqual: method.
+ Selector IsEqualSel = S.NSAPIObj->getIsEqualSelector();
+ ObjCMethodDecl *Method = S.LookupMethodInObjectType(IsEqualSel,
+ InterfaceType,
+ /*instance=*/true);
+ if (!Method) {
+ if (Type->isObjCIdType()) {
+ // For 'id', just check the global pool.
+ Method = S.LookupInstanceMethodInGlobalPool(IsEqualSel, SourceRange(),
+ /*receiverId=*/true);
+ } else {
+ // Check protocols.
+ Method = S.LookupMethodInQualifiedType(IsEqualSel, Type,
+ /*instance=*/true);
+ }
+ }
+
+ if (!Method)
+ return false;
+
+ QualType T = Method->parameters()[0]->getType();
+ if (!T->isObjCObjectPointerType())
+ return false;
+
+ QualType R = Method->getReturnType();
+ if (!R->isScalarType())
+ return false;
+
+ return true;
+}
+
+Sema::ObjCLiteralKind Sema::CheckLiteralKind(Expr *FromE) {
+ FromE = FromE->IgnoreParenImpCasts();
+ switch (FromE->getStmtClass()) {
+ default:
+ break;
+ case Stmt::ObjCStringLiteralClass:
+ // "string literal"
+ return LK_String;
+ case Stmt::ObjCArrayLiteralClass:
+ // "array literal"
+ return LK_Array;
+ case Stmt::ObjCDictionaryLiteralClass:
+ // "dictionary literal"
+ return LK_Dictionary;
+ case Stmt::BlockExprClass:
+ return LK_Block;
+ case Stmt::ObjCBoxedExprClass: {
+ Expr *Inner = cast<ObjCBoxedExpr>(FromE)->getSubExpr()->IgnoreParens();
+ switch (Inner->getStmtClass()) {
+ case Stmt::IntegerLiteralClass:
+ case Stmt::FloatingLiteralClass:
+ case Stmt::CharacterLiteralClass:
+ case Stmt::ObjCBoolLiteralExprClass:
+ case Stmt::CXXBoolLiteralExprClass:
+ // "numeric literal"
+ return LK_Numeric;
+ case Stmt::ImplicitCastExprClass: {
+ CastKind CK = cast<CastExpr>(Inner)->getCastKind();
+ // Boolean literals can be represented by implicit casts.
+ if (CK == CK_IntegralToBoolean || CK == CK_IntegralCast)
+ return LK_Numeric;
+ break;
+ }
+ default:
+ break;
+ }
+ return LK_Boxed;
+ }
+ }
+ return LK_None;
+}
+
+static void diagnoseObjCLiteralComparison(Sema &S, SourceLocation Loc,
+ ExprResult &LHS, ExprResult &RHS,
+ BinaryOperator::Opcode Opc){
+ Expr *Literal;
+ Expr *Other;
+ if (isObjCObjectLiteral(LHS)) {
+ Literal = LHS.get();
+ Other = RHS.get();
+ } else {
+ Literal = RHS.get();
+ Other = LHS.get();
+ }
+
+ // Don't warn on comparisons against nil.
+ Other = Other->IgnoreParenCasts();
+ if (Other->isNullPointerConstant(S.getASTContext(),
+ Expr::NPC_ValueDependentIsNotNull))
+ return;
+
+ // This should be kept in sync with warn_objc_literal_comparison.
+ // LK_String should always be after the other literals, since it has its own
+ // warning flag.
+ Sema::ObjCLiteralKind LiteralKind = S.CheckLiteralKind(Literal);
+ assert(LiteralKind != Sema::LK_Block);
+ if (LiteralKind == Sema::LK_None) {
+ llvm_unreachable("Unknown Objective-C object literal kind");
+ }
+
+ if (LiteralKind == Sema::LK_String)
+ S.Diag(Loc, diag::warn_objc_string_literal_comparison)
+ << Literal->getSourceRange();
+ else
+ S.Diag(Loc, diag::warn_objc_literal_comparison)
+ << LiteralKind << Literal->getSourceRange();
+
+ if (BinaryOperator::isEqualityOp(Opc) &&
+ hasIsEqualMethod(S, LHS.get(), RHS.get())) {
+ SourceLocation Start = LHS.get()->getLocStart();
+ SourceLocation End = S.getLocForEndOfToken(RHS.get()->getLocEnd());
+ CharSourceRange OpRange =
+ CharSourceRange::getCharRange(Loc, S.getLocForEndOfToken(Loc));
+
+ S.Diag(Loc, diag::note_objc_literal_comparison_isequal)
+ << FixItHint::CreateInsertion(Start, Opc == BO_EQ ? "[" : "![")
+ << FixItHint::CreateReplacement(OpRange, " isEqual:")
+ << FixItHint::CreateInsertion(End, "]");
+ }
+}
+
+static void diagnoseLogicalNotOnLHSofComparison(Sema &S, ExprResult &LHS,
+ ExprResult &RHS,
+ SourceLocation Loc,
+ BinaryOperatorKind Opc) {
+ // Check that left hand side is !something.
+ UnaryOperator *UO = dyn_cast<UnaryOperator>(LHS.get()->IgnoreImpCasts());
+ if (!UO || UO->getOpcode() != UO_LNot) return;
+
+ // Only check if the right hand side is non-bool arithmetic type.
+ if (RHS.get()->isKnownToHaveBooleanValue()) return;
+
+ // Make sure that the something in !something is not bool.
+ Expr *SubExpr = UO->getSubExpr()->IgnoreImpCasts();
+ if (SubExpr->isKnownToHaveBooleanValue()) return;
+
+ // Emit warning.
+ S.Diag(UO->getOperatorLoc(), diag::warn_logical_not_on_lhs_of_comparison)
+ << Loc;
+
+ // First note suggest !(x < y)
+ SourceLocation FirstOpen = SubExpr->getLocStart();
+ SourceLocation FirstClose = RHS.get()->getLocEnd();
+ FirstClose = S.getLocForEndOfToken(FirstClose);
+ if (FirstClose.isInvalid())
+ FirstOpen = SourceLocation();
+ S.Diag(UO->getOperatorLoc(), diag::note_logical_not_fix)
+ << FixItHint::CreateInsertion(FirstOpen, "(")
+ << FixItHint::CreateInsertion(FirstClose, ")");
+
+ // Second note suggests (!x) < y
+ SourceLocation SecondOpen = LHS.get()->getLocStart();
+ SourceLocation SecondClose = LHS.get()->getLocEnd();
+ SecondClose = S.getLocForEndOfToken(SecondClose);
+ if (SecondClose.isInvalid())
+ SecondOpen = SourceLocation();
+ S.Diag(UO->getOperatorLoc(), diag::note_logical_not_silence_with_parens)
+ << FixItHint::CreateInsertion(SecondOpen, "(")
+ << FixItHint::CreateInsertion(SecondClose, ")");
+}
+
+// Get the decl for a simple expression: a reference to a variable,
+// an implicit C++ field reference, or an implicit ObjC ivar reference.
+static ValueDecl *getCompareDecl(Expr *E) {
+ if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(E))
+ return DR->getDecl();
+ if (ObjCIvarRefExpr* Ivar = dyn_cast<ObjCIvarRefExpr>(E)) {
+ if (Ivar->isFreeIvar())
+ return Ivar->getDecl();
+ }
+ if (MemberExpr* Mem = dyn_cast<MemberExpr>(E)) {
+ if (Mem->isImplicitAccess())
+ return Mem->getMemberDecl();
+ }
+ return nullptr;
+}
+
+// C99 6.5.8, C++ [expr.rel]
+QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc, BinaryOperatorKind Opc,
+ bool IsRelational) {
+ checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/true);
+
+ // Handle vector comparisons separately.
+ if (LHS.get()->getType()->isVectorType() ||
+ RHS.get()->getType()->isVectorType())
+ return CheckVectorCompareOperands(LHS, RHS, Loc, IsRelational);
+
+ QualType LHSType = LHS.get()->getType();
+ QualType RHSType = RHS.get()->getType();
+
+ Expr *LHSStripped = LHS.get()->IgnoreParenImpCasts();
+ Expr *RHSStripped = RHS.get()->IgnoreParenImpCasts();
+
+ checkEnumComparison(*this, Loc, LHS.get(), RHS.get());
+ diagnoseLogicalNotOnLHSofComparison(*this, LHS, RHS, Loc, Opc);
+
+ if (!LHSType->hasFloatingRepresentation() &&
+ !(LHSType->isBlockPointerType() && IsRelational) &&
+ !LHS.get()->getLocStart().isMacroID() &&
+ !RHS.get()->getLocStart().isMacroID() &&
+ ActiveTemplateInstantiations.empty()) {
+ // For non-floating point types, check for self-comparisons of the form
+ // x == x, x != x, x < x, etc. These always evaluate to a constant, and
+ // often indicate logic errors in the program.
+ //
+ // NOTE: Don't warn about comparison expressions resulting from macro
+ // expansion. Also don't warn about comparisons which are only self
+ // comparisons within a template specialization. The warnings should catch
+ // obvious cases in the definition of the template anyways. The idea is to
+ // warn when the typed comparison operator will always evaluate to the same
+ // result.
+ ValueDecl *DL = getCompareDecl(LHSStripped);
+ ValueDecl *DR = getCompareDecl(RHSStripped);
+ if (DL && DR && DL == DR && !IsWithinTemplateSpecialization(DL)) {
+ DiagRuntimeBehavior(Loc, nullptr, PDiag(diag::warn_comparison_always)
+ << 0 // self-
+ << (Opc == BO_EQ
+ || Opc == BO_LE
+ || Opc == BO_GE));
+ } else if (DL && DR && LHSType->isArrayType() && RHSType->isArrayType() &&
+ !DL->getType()->isReferenceType() &&
+ !DR->getType()->isReferenceType()) {
+ // what is it always going to eval to?
+ char always_evals_to;
+ switch(Opc) {
+ case BO_EQ: // e.g. array1 == array2
+ always_evals_to = 0; // false
+ break;
+ case BO_NE: // e.g. array1 != array2
+ always_evals_to = 1; // true
+ break;
+ default:
+ // best we can say is 'a constant'
+ always_evals_to = 2; // e.g. array1 <= array2
+ break;
+ }
+ DiagRuntimeBehavior(Loc, nullptr, PDiag(diag::warn_comparison_always)
+ << 1 // array
+ << always_evals_to);
+ }
+
+ if (isa<CastExpr>(LHSStripped))
+ LHSStripped = LHSStripped->IgnoreParenCasts();
+ if (isa<CastExpr>(RHSStripped))
+ RHSStripped = RHSStripped->IgnoreParenCasts();
+
+ // Warn about comparisons against a string constant (unless the other
+ // operand is null), the user probably wants strcmp.
+ Expr *literalString = nullptr;
+ Expr *literalStringStripped = nullptr;
+ if ((isa<StringLiteral>(LHSStripped) || isa<ObjCEncodeExpr>(LHSStripped)) &&
+ !RHSStripped->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNull)) {
+ literalString = LHS.get();
+ literalStringStripped = LHSStripped;
+ } else if ((isa<StringLiteral>(RHSStripped) ||
+ isa<ObjCEncodeExpr>(RHSStripped)) &&
+ !LHSStripped->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNull)) {
+ literalString = RHS.get();
+ literalStringStripped = RHSStripped;
+ }
+
+ if (literalString) {
+ DiagRuntimeBehavior(Loc, nullptr,
+ PDiag(diag::warn_stringcompare)
+ << isa<ObjCEncodeExpr>(literalStringStripped)
+ << literalString->getSourceRange());
+ }
+ }
+
+ // C99 6.5.8p3 / C99 6.5.9p4
+ UsualArithmeticConversions(LHS, RHS);
+ if (LHS.isInvalid() || RHS.isInvalid())
+ return QualType();
+
+ LHSType = LHS.get()->getType();
+ RHSType = RHS.get()->getType();
+
+ // The result of comparisons is 'bool' in C++, 'int' in C.
+ QualType ResultTy = Context.getLogicalOperationType();
+
+ if (IsRelational) {
+ if (LHSType->isRealType() && RHSType->isRealType())
+ return ResultTy;
+ } else {
+ // Check for comparisons of floating point operands using != and ==.
+ if (LHSType->hasFloatingRepresentation())
+ CheckFloatComparison(Loc, LHS.get(), RHS.get());
+
+ if (LHSType->isArithmeticType() && RHSType->isArithmeticType())
+ return ResultTy;
+ }
+
+ const Expr::NullPointerConstantKind LHSNullKind =
+ LHS.get()->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull);
+ const Expr::NullPointerConstantKind RHSNullKind =
+ RHS.get()->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull);
+ bool LHSIsNull = LHSNullKind != Expr::NPCK_NotNull;
+ bool RHSIsNull = RHSNullKind != Expr::NPCK_NotNull;
+
+ if (!IsRelational && LHSIsNull != RHSIsNull) {
+ bool IsEquality = Opc == BO_EQ;
+ if (RHSIsNull)
+ DiagnoseAlwaysNonNullPointer(LHS.get(), RHSNullKind, IsEquality,
+ RHS.get()->getSourceRange());
+ else
+ DiagnoseAlwaysNonNullPointer(RHS.get(), LHSNullKind, IsEquality,
+ LHS.get()->getSourceRange());
+ }
+
+ // All of the following pointer-related warnings are GCC extensions, except
+ // when handling null pointer constants.
+ if (LHSType->isPointerType() && RHSType->isPointerType()) { // C99 6.5.8p2
+ QualType LCanPointeeTy =
+ LHSType->castAs<PointerType>()->getPointeeType().getCanonicalType();
+ QualType RCanPointeeTy =
+ RHSType->castAs<PointerType>()->getPointeeType().getCanonicalType();
+
+ if (getLangOpts().CPlusPlus) {
+ if (LCanPointeeTy == RCanPointeeTy)
+ return ResultTy;
+ if (!IsRelational &&
+ (LCanPointeeTy->isVoidType() || RCanPointeeTy->isVoidType())) {
+ // Valid unless comparison between non-null pointer and function pointer
+ // This is a gcc extension compatibility comparison.
+ // In a SFINAE context, we treat this as a hard error to maintain
+ // conformance with the C++ standard.
+ if ((LCanPointeeTy->isFunctionType() || RCanPointeeTy->isFunctionType())
+ && !LHSIsNull && !RHSIsNull) {
+ diagnoseFunctionPointerToVoidComparison(
+ *this, Loc, LHS, RHS, /*isError*/ (bool)isSFINAEContext());
+
+ if (isSFINAEContext())
+ return QualType();
+
+ RHS = ImpCastExprToType(RHS.get(), LHSType, CK_BitCast);
+ return ResultTy;
+ }
+ }
+
+ if (convertPointersToCompositeType(*this, Loc, LHS, RHS))
+ return QualType();
+ else
+ return ResultTy;
+ }
+ // C99 6.5.9p2 and C99 6.5.8p2
+ if (Context.typesAreCompatible(LCanPointeeTy.getUnqualifiedType(),
+ RCanPointeeTy.getUnqualifiedType())) {
+ // Valid unless a relational comparison of function pointers
+ if (IsRelational && LCanPointeeTy->isFunctionType()) {
+ Diag(Loc, diag::ext_typecheck_ordered_comparison_of_function_pointers)
+ << LHSType << RHSType << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ }
+ } else if (!IsRelational &&
+ (LCanPointeeTy->isVoidType() || RCanPointeeTy->isVoidType())) {
+ // Valid unless comparison between non-null pointer and function pointer
+ if ((LCanPointeeTy->isFunctionType() || RCanPointeeTy->isFunctionType())
+ && !LHSIsNull && !RHSIsNull)
+ diagnoseFunctionPointerToVoidComparison(*this, Loc, LHS, RHS,
+ /*isError*/false);
+ } else {
+ // Invalid
+ diagnoseDistinctPointerComparison(*this, Loc, LHS, RHS, /*isError*/false);
+ }
+ if (LCanPointeeTy != RCanPointeeTy) {
+ // Treat NULL constant as a special case in OpenCL.
+ if (getLangOpts().OpenCL && !LHSIsNull && !RHSIsNull) {
+ const PointerType *LHSPtr = LHSType->getAs<PointerType>();
+ if (!LHSPtr->isAddressSpaceOverlapping(*RHSType->getAs<PointerType>())) {
+ Diag(Loc,
+ diag::err_typecheck_op_on_nonoverlapping_address_space_pointers)
+ << LHSType << RHSType << 0 /* comparison */
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ }
+ }
+ unsigned AddrSpaceL = LCanPointeeTy.getAddressSpace();
+ unsigned AddrSpaceR = RCanPointeeTy.getAddressSpace();
+ CastKind Kind = AddrSpaceL != AddrSpaceR ? CK_AddressSpaceConversion
+ : CK_BitCast;
+ if (LHSIsNull && !RHSIsNull)
+ LHS = ImpCastExprToType(LHS.get(), RHSType, Kind);
+ else
+ RHS = ImpCastExprToType(RHS.get(), LHSType, Kind);
+ }
+ return ResultTy;
+ }
+
+ if (getLangOpts().CPlusPlus) {
+ // Comparison of nullptr_t with itself.
+ if (LHSType->isNullPtrType() && RHSType->isNullPtrType())
+ return ResultTy;
+
+ // Comparison of pointers with null pointer constants and equality
+ // comparisons of member pointers to null pointer constants.
+ if (RHSIsNull &&
+ ((LHSType->isAnyPointerType() || LHSType->isNullPtrType()) ||
+ (!IsRelational &&
+ (LHSType->isMemberPointerType() || LHSType->isBlockPointerType())))) {
+ RHS = ImpCastExprToType(RHS.get(), LHSType,
+ LHSType->isMemberPointerType()
+ ? CK_NullToMemberPointer
+ : CK_NullToPointer);
+ return ResultTy;
+ }
+ if (LHSIsNull &&
+ ((RHSType->isAnyPointerType() || RHSType->isNullPtrType()) ||
+ (!IsRelational &&
+ (RHSType->isMemberPointerType() || RHSType->isBlockPointerType())))) {
+ LHS = ImpCastExprToType(LHS.get(), RHSType,
+ RHSType->isMemberPointerType()
+ ? CK_NullToMemberPointer
+ : CK_NullToPointer);
+ return ResultTy;
+ }
+
+ // Comparison of member pointers.
+ if (!IsRelational &&
+ LHSType->isMemberPointerType() && RHSType->isMemberPointerType()) {
+ if (convertPointersToCompositeType(*this, Loc, LHS, RHS))
+ return QualType();
+ else
+ return ResultTy;
+ }
+
+ // Handle scoped enumeration types specifically, since they don't promote
+ // to integers.
+ if (LHS.get()->getType()->isEnumeralType() &&
+ Context.hasSameUnqualifiedType(LHS.get()->getType(),
+ RHS.get()->getType()))
+ return ResultTy;
+ }
+
+ // Handle block pointer types.
+ if (!IsRelational && LHSType->isBlockPointerType() &&
+ RHSType->isBlockPointerType()) {
+ QualType lpointee = LHSType->castAs<BlockPointerType>()->getPointeeType();
+ QualType rpointee = RHSType->castAs<BlockPointerType>()->getPointeeType();
+
+ if (!LHSIsNull && !RHSIsNull &&
+ !Context.typesAreCompatible(lpointee, rpointee)) {
+ Diag(Loc, diag::err_typecheck_comparison_of_distinct_blocks)
+ << LHSType << RHSType << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ }
+ RHS = ImpCastExprToType(RHS.get(), LHSType, CK_BitCast);
+ return ResultTy;
+ }
+
+ // Allow block pointers to be compared with null pointer constants.
+ if (!IsRelational
+ && ((LHSType->isBlockPointerType() && RHSType->isPointerType())
+ || (LHSType->isPointerType() && RHSType->isBlockPointerType()))) {
+ if (!LHSIsNull && !RHSIsNull) {
+ if (!((RHSType->isPointerType() && RHSType->castAs<PointerType>()
+ ->getPointeeType()->isVoidType())
+ || (LHSType->isPointerType() && LHSType->castAs<PointerType>()
+ ->getPointeeType()->isVoidType())))
+ Diag(Loc, diag::err_typecheck_comparison_of_distinct_blocks)
+ << LHSType << RHSType << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ }
+ if (LHSIsNull && !RHSIsNull)
+ LHS = ImpCastExprToType(LHS.get(), RHSType,
+ RHSType->isPointerType() ? CK_BitCast
+ : CK_AnyPointerToBlockPointerCast);
+ else
+ RHS = ImpCastExprToType(RHS.get(), LHSType,
+ LHSType->isPointerType() ? CK_BitCast
+ : CK_AnyPointerToBlockPointerCast);
+ return ResultTy;
+ }
+
+ if (LHSType->isObjCObjectPointerType() ||
+ RHSType->isObjCObjectPointerType()) {
+ const PointerType *LPT = LHSType->getAs<PointerType>();
+ const PointerType *RPT = RHSType->getAs<PointerType>();
+ if (LPT || RPT) {
+ bool LPtrToVoid = LPT ? LPT->getPointeeType()->isVoidType() : false;
+ bool RPtrToVoid = RPT ? RPT->getPointeeType()->isVoidType() : false;
+
+ if (!LPtrToVoid && !RPtrToVoid &&
+ !Context.typesAreCompatible(LHSType, RHSType)) {
+ diagnoseDistinctPointerComparison(*this, Loc, LHS, RHS,
+ /*isError*/false);
+ }
+ if (LHSIsNull && !RHSIsNull) {
+ Expr *E = LHS.get();
+ if (getLangOpts().ObjCAutoRefCount)
+ CheckObjCARCConversion(SourceRange(), RHSType, E, CCK_ImplicitConversion);
+ LHS = ImpCastExprToType(E, RHSType,
+ RPT ? CK_BitCast :CK_CPointerToObjCPointerCast);
+ }
+ else {
+ Expr *E = RHS.get();
+ if (getLangOpts().ObjCAutoRefCount)
+ CheckObjCARCConversion(SourceRange(), LHSType, E, CCK_ImplicitConversion, false,
+ Opc);
+ RHS = ImpCastExprToType(E, LHSType,
+ LPT ? CK_BitCast :CK_CPointerToObjCPointerCast);
+ }
+ return ResultTy;
+ }
+ if (LHSType->isObjCObjectPointerType() &&
+ RHSType->isObjCObjectPointerType()) {
+ if (!Context.areComparableObjCPointerTypes(LHSType, RHSType))
+ diagnoseDistinctPointerComparison(*this, Loc, LHS, RHS,
+ /*isError*/false);
+ if (isObjCObjectLiteral(LHS) || isObjCObjectLiteral(RHS))
+ diagnoseObjCLiteralComparison(*this, Loc, LHS, RHS, Opc);
+
+ if (LHSIsNull && !RHSIsNull)
+ LHS = ImpCastExprToType(LHS.get(), RHSType, CK_BitCast);
+ else
+ RHS = ImpCastExprToType(RHS.get(), LHSType, CK_BitCast);
+ return ResultTy;
+ }
+ }
+ if ((LHSType->isAnyPointerType() && RHSType->isIntegerType()) ||
+ (LHSType->isIntegerType() && RHSType->isAnyPointerType())) {
+ unsigned DiagID = 0;
+ bool isError = false;
+ if (LangOpts.DebuggerSupport) {
+ // Under a debugger, allow the comparison of pointers to integers,
+ // since users tend to want to compare addresses.
+ } else if ((LHSIsNull && LHSType->isIntegerType()) ||
+ (RHSIsNull && RHSType->isIntegerType())) {
+ if (IsRelational && !getLangOpts().CPlusPlus)
+ DiagID = diag::ext_typecheck_ordered_comparison_of_pointer_and_zero;
+ } else if (IsRelational && !getLangOpts().CPlusPlus)
+ DiagID = diag::ext_typecheck_ordered_comparison_of_pointer_integer;
+ else if (getLangOpts().CPlusPlus) {
+ DiagID = diag::err_typecheck_comparison_of_pointer_integer;
+ isError = true;
+ } else
+ DiagID = diag::ext_typecheck_comparison_of_pointer_integer;
+
+ if (DiagID) {
+ Diag(Loc, DiagID)
+ << LHSType << RHSType << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ if (isError)
+ return QualType();
+ }
+
+ if (LHSType->isIntegerType())
+ LHS = ImpCastExprToType(LHS.get(), RHSType,
+ LHSIsNull ? CK_NullToPointer : CK_IntegralToPointer);
+ else
+ RHS = ImpCastExprToType(RHS.get(), LHSType,
+ RHSIsNull ? CK_NullToPointer : CK_IntegralToPointer);
+ return ResultTy;
+ }
+
+ // Handle block pointers.
+ if (!IsRelational && RHSIsNull
+ && LHSType->isBlockPointerType() && RHSType->isIntegerType()) {
+ RHS = ImpCastExprToType(RHS.get(), LHSType, CK_NullToPointer);
+ return ResultTy;
+ }
+ if (!IsRelational && LHSIsNull
+ && LHSType->isIntegerType() && RHSType->isBlockPointerType()) {
+ LHS = ImpCastExprToType(LHS.get(), RHSType, CK_NullToPointer);
+ return ResultTy;
+ }
+
+ return InvalidOperands(Loc, LHS, RHS);
+}
+
+
+// Return a signed type that is of identical size and number of elements.
+// For floating point vectors, return an integer type of identical size
+// and number of elements.
+QualType Sema::GetSignedVectorType(QualType V) {
+ const VectorType *VTy = V->getAs<VectorType>();
+ unsigned TypeSize = Context.getTypeSize(VTy->getElementType());
+ if (TypeSize == Context.getTypeSize(Context.CharTy))
+ return Context.getExtVectorType(Context.CharTy, VTy->getNumElements());
+ else if (TypeSize == Context.getTypeSize(Context.ShortTy))
+ return Context.getExtVectorType(Context.ShortTy, VTy->getNumElements());
+ else if (TypeSize == Context.getTypeSize(Context.IntTy))
+ return Context.getExtVectorType(Context.IntTy, VTy->getNumElements());
+ else if (TypeSize == Context.getTypeSize(Context.LongTy))
+ return Context.getExtVectorType(Context.LongTy, VTy->getNumElements());
+ assert(TypeSize == Context.getTypeSize(Context.LongLongTy) &&
+ "Unhandled vector element size in vector compare");
+ return Context.getExtVectorType(Context.LongLongTy, VTy->getNumElements());
+}
+
+/// CheckVectorCompareOperands - vector comparisons are a clang extension that
+/// operates on extended vector types. Instead of producing an IntTy result,
+/// like a scalar comparison, a vector comparison produces a vector of integer
+/// types.
+QualType Sema::CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc,
+ bool IsRelational) {
+ // Check to make sure we're operating on vectors of the same type and width,
+ // Allowing one side to be a scalar of element type.
+ QualType vType = CheckVectorOperands(LHS, RHS, Loc, /*isCompAssign*/false,
+ /*AllowBothBool*/true,
+ /*AllowBoolConversions*/getLangOpts().ZVector);
+ if (vType.isNull())
+ return vType;
+
+ QualType LHSType = LHS.get()->getType();
+
+ // If AltiVec, the comparison results in a numeric type, i.e.
+ // bool for C++, int for C
+ if (getLangOpts().AltiVec &&
+ vType->getAs<VectorType>()->getVectorKind() == VectorType::AltiVecVector)
+ return Context.getLogicalOperationType();
+
+ // For non-floating point types, check for self-comparisons of the form
+ // x == x, x != x, x < x, etc. These always evaluate to a constant, and
+ // often indicate logic errors in the program.
+ if (!LHSType->hasFloatingRepresentation() &&
+ ActiveTemplateInstantiations.empty()) {
+ if (DeclRefExpr* DRL
+ = dyn_cast<DeclRefExpr>(LHS.get()->IgnoreParenImpCasts()))
+ if (DeclRefExpr* DRR
+ = dyn_cast<DeclRefExpr>(RHS.get()->IgnoreParenImpCasts()))
+ if (DRL->getDecl() == DRR->getDecl())
+ DiagRuntimeBehavior(Loc, nullptr,
+ PDiag(diag::warn_comparison_always)
+ << 0 // self-
+ << 2 // "a constant"
+ );
+ }
+
+ // Check for comparisons of floating point operands using != and ==.
+ if (!IsRelational && LHSType->hasFloatingRepresentation()) {
+ assert (RHS.get()->getType()->hasFloatingRepresentation());
+ CheckFloatComparison(Loc, LHS.get(), RHS.get());
+ }
+
+ // Return a signed type for the vector.
+ return GetSignedVectorType(LHSType);
+}
+
+QualType Sema::CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc) {
+ // Ensure that either both operands are of the same vector type, or
+ // one operand is of a vector type and the other is of its element type.
+ QualType vType = CheckVectorOperands(LHS, RHS, Loc, false,
+ /*AllowBothBool*/true,
+ /*AllowBoolConversions*/false);
+ if (vType.isNull())
+ return InvalidOperands(Loc, LHS, RHS);
+ if (getLangOpts().OpenCL && getLangOpts().OpenCLVersion < 120 &&
+ vType->hasFloatingRepresentation())
+ return InvalidOperands(Loc, LHS, RHS);
+
+ return GetSignedVectorType(LHS.get()->getType());
+}
+
+inline QualType Sema::CheckBitwiseOperands(
+ ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign) {
+ checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/false);
+
+ if (LHS.get()->getType()->isVectorType() ||
+ RHS.get()->getType()->isVectorType()) {
+ if (LHS.get()->getType()->hasIntegerRepresentation() &&
+ RHS.get()->getType()->hasIntegerRepresentation())
+ return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign,
+ /*AllowBothBool*/true,
+ /*AllowBoolConversions*/getLangOpts().ZVector);
+ return InvalidOperands(Loc, LHS, RHS);
+ }
+
+ ExprResult LHSResult = LHS, RHSResult = RHS;
+ QualType compType = UsualArithmeticConversions(LHSResult, RHSResult,
+ IsCompAssign);
+ if (LHSResult.isInvalid() || RHSResult.isInvalid())
+ return QualType();
+ LHS = LHSResult.get();
+ RHS = RHSResult.get();
+
+ if (!compType.isNull() && compType->isIntegralOrUnscopedEnumerationType())
+ return compType;
+ return InvalidOperands(Loc, LHS, RHS);
+}
+
+// C99 6.5.[13,14]
+inline QualType Sema::CheckLogicalOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc,
+ BinaryOperatorKind Opc) {
+ // Check vector operands differently.
+ if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType())
+ return CheckVectorLogicalOperands(LHS, RHS, Loc);
+
+ // Diagnose cases where the user write a logical and/or but probably meant a
+ // bitwise one. We do this when the LHS is a non-bool integer and the RHS
+ // is a constant.
+ if (LHS.get()->getType()->isIntegerType() &&
+ !LHS.get()->getType()->isBooleanType() &&
+ RHS.get()->getType()->isIntegerType() && !RHS.get()->isValueDependent() &&
+ // Don't warn in macros or template instantiations.
+ !Loc.isMacroID() && ActiveTemplateInstantiations.empty()) {
+ // If the RHS can be constant folded, and if it constant folds to something
+ // that isn't 0 or 1 (which indicate a potential logical operation that
+ // happened to fold to true/false) then warn.
+ // Parens on the RHS are ignored.
+ llvm::APSInt Result;
+ if (RHS.get()->EvaluateAsInt(Result, Context))
+ if ((getLangOpts().Bool && !RHS.get()->getType()->isBooleanType() &&
+ !RHS.get()->getExprLoc().isMacroID()) ||
+ (Result != 0 && Result != 1)) {
+ Diag(Loc, diag::warn_logical_instead_of_bitwise)
+ << RHS.get()->getSourceRange()
+ << (Opc == BO_LAnd ? "&&" : "||");
+ // Suggest replacing the logical operator with the bitwise version
+ Diag(Loc, diag::note_logical_instead_of_bitwise_change_operator)
+ << (Opc == BO_LAnd ? "&" : "|")
+ << FixItHint::CreateReplacement(SourceRange(
+ Loc, getLocForEndOfToken(Loc)),
+ Opc == BO_LAnd ? "&" : "|");
+ if (Opc == BO_LAnd)
+ // Suggest replacing "Foo() && kNonZero" with "Foo()"
+ Diag(Loc, diag::note_logical_instead_of_bitwise_remove_constant)
+ << FixItHint::CreateRemoval(
+ SourceRange(getLocForEndOfToken(LHS.get()->getLocEnd()),
+ RHS.get()->getLocEnd()));
+ }
+ }
+
+ if (!Context.getLangOpts().CPlusPlus) {
+ // OpenCL v1.1 s6.3.g: The logical operators and (&&), or (||) do
+ // not operate on the built-in scalar and vector float types.
+ if (Context.getLangOpts().OpenCL &&
+ Context.getLangOpts().OpenCLVersion < 120) {
+ if (LHS.get()->getType()->isFloatingType() ||
+ RHS.get()->getType()->isFloatingType())
+ return InvalidOperands(Loc, LHS, RHS);
+ }
+
+ LHS = UsualUnaryConversions(LHS.get());
+ if (LHS.isInvalid())
+ return QualType();
+
+ RHS = UsualUnaryConversions(RHS.get());
+ if (RHS.isInvalid())
+ return QualType();
+
+ if (!LHS.get()->getType()->isScalarType() ||
+ !RHS.get()->getType()->isScalarType())
+ return InvalidOperands(Loc, LHS, RHS);
+
+ return Context.IntTy;
+ }
+
+ // The following is safe because we only use this method for
+ // non-overloadable operands.
+
+ // C++ [expr.log.and]p1
+ // C++ [expr.log.or]p1
+ // The operands are both contextually converted to type bool.
+ ExprResult LHSRes = PerformContextuallyConvertToBool(LHS.get());
+ if (LHSRes.isInvalid())
+ return InvalidOperands(Loc, LHS, RHS);
+ LHS = LHSRes;
+
+ ExprResult RHSRes = PerformContextuallyConvertToBool(RHS.get());
+ if (RHSRes.isInvalid())
+ return InvalidOperands(Loc, LHS, RHS);
+ RHS = RHSRes;
+
+ // C++ [expr.log.and]p2
+ // C++ [expr.log.or]p2
+ // The result is a bool.
+ return Context.BoolTy;
+}
+
+static bool IsReadonlyMessage(Expr *E, Sema &S) {
+ const MemberExpr *ME = dyn_cast<MemberExpr>(E);
+ if (!ME) return false;
+ if (!isa<FieldDecl>(ME->getMemberDecl())) return false;
+ ObjCMessageExpr *Base =
+ dyn_cast<ObjCMessageExpr>(ME->getBase()->IgnoreParenImpCasts());
+ if (!Base) return false;
+ return Base->getMethodDecl() != nullptr;
+}
+
+/// Is the given expression (which must be 'const') a reference to a
+/// variable which was originally non-const, but which has become
+/// 'const' due to being captured within a block?
+enum NonConstCaptureKind { NCCK_None, NCCK_Block, NCCK_Lambda };
+static NonConstCaptureKind isReferenceToNonConstCapture(Sema &S, Expr *E) {
+ assert(E->isLValue() && E->getType().isConstQualified());
+ E = E->IgnoreParens();
+
+ // Must be a reference to a declaration from an enclosing scope.
+ DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E);
+ if (!DRE) return NCCK_None;
+ if (!DRE->refersToEnclosingVariableOrCapture()) return NCCK_None;
+
+ // The declaration must be a variable which is not declared 'const'.
+ VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
+ if (!var) return NCCK_None;
+ if (var->getType().isConstQualified()) return NCCK_None;
+ assert(var->hasLocalStorage() && "capture added 'const' to non-local?");
+
+ // Decide whether the first capture was for a block or a lambda.
+ DeclContext *DC = S.CurContext, *Prev = nullptr;
+ while (DC != var->getDeclContext()) {
+ Prev = DC;
+ DC = DC->getParent();
+ }
+ // Unless we have an init-capture, we've gone one step too far.
+ if (!var->isInitCapture())
+ DC = Prev;
+ return (isa<BlockDecl>(DC) ? NCCK_Block : NCCK_Lambda);
+}
+
+static bool IsTypeModifiable(QualType Ty, bool IsDereference) {
+ Ty = Ty.getNonReferenceType();
+ if (IsDereference && Ty->isPointerType())
+ Ty = Ty->getPointeeType();
+ return !Ty.isConstQualified();
+}
+
+/// Emit the "read-only variable not assignable" error and print notes to give
+/// more information about why the variable is not assignable, such as pointing
+/// to the declaration of a const variable, showing that a method is const, or
+/// that the function is returning a const reference.
+static void DiagnoseConstAssignment(Sema &S, const Expr *E,
+ SourceLocation Loc) {
+ // Update err_typecheck_assign_const and note_typecheck_assign_const
+ // when this enum is changed.
+ enum {
+ ConstFunction,
+ ConstVariable,
+ ConstMember,
+ ConstMethod,
+ ConstUnknown, // Keep as last element
+ };
+
+ SourceRange ExprRange = E->getSourceRange();
+
+ // Only emit one error on the first const found. All other consts will emit
+ // a note to the error.
+ bool DiagnosticEmitted = false;
+
+ // Track if the current expression is the result of a derefence, and if the
+ // next checked expression is the result of a derefence.
+ bool IsDereference = false;
+ bool NextIsDereference = false;
+
+ // Loop to process MemberExpr chains.
+ while (true) {
+ IsDereference = NextIsDereference;
+ NextIsDereference = false;
+
+ E = E->IgnoreParenImpCasts();
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
+ NextIsDereference = ME->isArrow();
+ const ValueDecl *VD = ME->getMemberDecl();
+ if (const FieldDecl *Field = dyn_cast<FieldDecl>(VD)) {
+ // Mutable fields can be modified even if the class is const.
+ if (Field->isMutable()) {
+ assert(DiagnosticEmitted && "Expected diagnostic not emitted.");
+ break;
+ }
+
+ if (!IsTypeModifiable(Field->getType(), IsDereference)) {
+ if (!DiagnosticEmitted) {
+ S.Diag(Loc, diag::err_typecheck_assign_const)
+ << ExprRange << ConstMember << false /*static*/ << Field
+ << Field->getType();
+ DiagnosticEmitted = true;
+ }
+ S.Diag(VD->getLocation(), diag::note_typecheck_assign_const)
+ << ConstMember << false /*static*/ << Field << Field->getType()
+ << Field->getSourceRange();
+ }
+ E = ME->getBase();
+ continue;
+ } else if (const VarDecl *VDecl = dyn_cast<VarDecl>(VD)) {
+ if (VDecl->getType().isConstQualified()) {
+ if (!DiagnosticEmitted) {
+ S.Diag(Loc, diag::err_typecheck_assign_const)
+ << ExprRange << ConstMember << true /*static*/ << VDecl
+ << VDecl->getType();
+ DiagnosticEmitted = true;
+ }
+ S.Diag(VD->getLocation(), diag::note_typecheck_assign_const)
+ << ConstMember << true /*static*/ << VDecl << VDecl->getType()
+ << VDecl->getSourceRange();
+ }
+ // Static fields do not inherit constness from parents.
+ break;
+ }
+ break;
+ } // End MemberExpr
+ break;
+ }
+
+ if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
+ // Function calls
+ const FunctionDecl *FD = CE->getDirectCallee();
+ if (FD && !IsTypeModifiable(FD->getReturnType(), IsDereference)) {
+ if (!DiagnosticEmitted) {
+ S.Diag(Loc, diag::err_typecheck_assign_const) << ExprRange
+ << ConstFunction << FD;
+ DiagnosticEmitted = true;
+ }
+ S.Diag(FD->getReturnTypeSourceRange().getBegin(),
+ diag::note_typecheck_assign_const)
+ << ConstFunction << FD << FD->getReturnType()
+ << FD->getReturnTypeSourceRange();
+ }
+ } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ // Point to variable declaration.
+ if (const ValueDecl *VD = DRE->getDecl()) {
+ if (!IsTypeModifiable(VD->getType(), IsDereference)) {
+ if (!DiagnosticEmitted) {
+ S.Diag(Loc, diag::err_typecheck_assign_const)
+ << ExprRange << ConstVariable << VD << VD->getType();
+ DiagnosticEmitted = true;
+ }
+ S.Diag(VD->getLocation(), diag::note_typecheck_assign_const)
+ << ConstVariable << VD << VD->getType() << VD->getSourceRange();
+ }
+ }
+ } else if (isa<CXXThisExpr>(E)) {
+ if (const DeclContext *DC = S.getFunctionLevelDeclContext()) {
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(DC)) {
+ if (MD->isConst()) {
+ if (!DiagnosticEmitted) {
+ S.Diag(Loc, diag::err_typecheck_assign_const) << ExprRange
+ << ConstMethod << MD;
+ DiagnosticEmitted = true;
+ }
+ S.Diag(MD->getLocation(), diag::note_typecheck_assign_const)
+ << ConstMethod << MD << MD->getSourceRange();
+ }
+ }
+ }
+ }
+
+ if (DiagnosticEmitted)
+ return;
+
+ // Can't determine a more specific message, so display the generic error.
+ S.Diag(Loc, diag::err_typecheck_assign_const) << ExprRange << ConstUnknown;
+}
+
+/// CheckForModifiableLvalue - Verify that E is a modifiable lvalue. If not,
+/// emit an error and return true. If so, return false.
+static bool CheckForModifiableLvalue(Expr *E, SourceLocation Loc, Sema &S) {
+ assert(!E->hasPlaceholderType(BuiltinType::PseudoObject));
+ SourceLocation OrigLoc = Loc;
+ Expr::isModifiableLvalueResult IsLV = E->isModifiableLvalue(S.Context,
+ &Loc);
+ if (IsLV == Expr::MLV_ClassTemporary && IsReadonlyMessage(E, S))
+ IsLV = Expr::MLV_InvalidMessageExpression;
+ if (IsLV == Expr::MLV_Valid)
+ return false;
+
+ unsigned DiagID = 0;
+ bool NeedType = false;
+ switch (IsLV) { // C99 6.5.16p2
+ case Expr::MLV_ConstQualified:
+ // Use a specialized diagnostic when we're assigning to an object
+ // from an enclosing function or block.
+ if (NonConstCaptureKind NCCK = isReferenceToNonConstCapture(S, E)) {
+ if (NCCK == NCCK_Block)
+ DiagID = diag::err_block_decl_ref_not_modifiable_lvalue;
+ else
+ DiagID = diag::err_lambda_decl_ref_not_modifiable_lvalue;
+ break;
+ }
+
+ // In ARC, use some specialized diagnostics for occasions where we
+ // infer 'const'. These are always pseudo-strong variables.
+ if (S.getLangOpts().ObjCAutoRefCount) {
+ DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(E->IgnoreParenCasts());
+ if (declRef && isa<VarDecl>(declRef->getDecl())) {
+ VarDecl *var = cast<VarDecl>(declRef->getDecl());
+
+ // Use the normal diagnostic if it's pseudo-__strong but the
+ // user actually wrote 'const'.
+ if (var->isARCPseudoStrong() &&
+ (!var->getTypeSourceInfo() ||
+ !var->getTypeSourceInfo()->getType().isConstQualified())) {
+ // There are two pseudo-strong cases:
+ // - self
+ ObjCMethodDecl *method = S.getCurMethodDecl();
+ if (method && var == method->getSelfDecl())
+ DiagID = method->isClassMethod()
+ ? diag::err_typecheck_arc_assign_self_class_method
+ : diag::err_typecheck_arc_assign_self;
+
+ // - fast enumeration variables
+ else
+ DiagID = diag::err_typecheck_arr_assign_enumeration;
+
+ SourceRange Assign;
+ if (Loc != OrigLoc)
+ Assign = SourceRange(OrigLoc, OrigLoc);
+ S.Diag(Loc, DiagID) << E->getSourceRange() << Assign;
+ // We need to preserve the AST regardless, so migration tool
+ // can do its job.
+ return false;
+ }
+ }
+ }
+
+ // If none of the special cases above are triggered, then this is a
+ // simple const assignment.
+ if (DiagID == 0) {
+ DiagnoseConstAssignment(S, E, Loc);
+ return true;
+ }
+
+ break;
+ case Expr::MLV_ConstAddrSpace:
+ DiagnoseConstAssignment(S, E, Loc);
+ return true;
+ case Expr::MLV_ArrayType:
+ case Expr::MLV_ArrayTemporary:
+ DiagID = diag::err_typecheck_array_not_modifiable_lvalue;
+ NeedType = true;
+ break;
+ case Expr::MLV_NotObjectType:
+ DiagID = diag::err_typecheck_non_object_not_modifiable_lvalue;
+ NeedType = true;
+ break;
+ case Expr::MLV_LValueCast:
+ DiagID = diag::err_typecheck_lvalue_casts_not_supported;
+ break;
+ case Expr::MLV_Valid:
+ llvm_unreachable("did not take early return for MLV_Valid");
+ case Expr::MLV_InvalidExpression:
+ case Expr::MLV_MemberFunction:
+ case Expr::MLV_ClassTemporary:
+ DiagID = diag::err_typecheck_expression_not_modifiable_lvalue;
+ break;
+ case Expr::MLV_IncompleteType:
+ case Expr::MLV_IncompleteVoidType:
+ return S.RequireCompleteType(Loc, E->getType(),
+ diag::err_typecheck_incomplete_type_not_modifiable_lvalue, E);
+ case Expr::MLV_DuplicateVectorComponents:
+ DiagID = diag::err_typecheck_duplicate_vector_components_not_mlvalue;
+ break;
+ case Expr::MLV_NoSetterProperty:
+ llvm_unreachable("readonly properties should be processed differently");
+ case Expr::MLV_InvalidMessageExpression:
+ DiagID = diag::error_readonly_message_assignment;
+ break;
+ case Expr::MLV_SubObjCPropertySetting:
+ DiagID = diag::error_no_subobject_property_setting;
+ break;
+ }
+
+ SourceRange Assign;
+ if (Loc != OrigLoc)
+ Assign = SourceRange(OrigLoc, OrigLoc);
+ if (NeedType)
+ S.Diag(Loc, DiagID) << E->getType() << E->getSourceRange() << Assign;
+ else
+ S.Diag(Loc, DiagID) << E->getSourceRange() << Assign;
+ return true;
+}
+
+static void CheckIdentityFieldAssignment(Expr *LHSExpr, Expr *RHSExpr,
+ SourceLocation Loc,
+ Sema &Sema) {
+ // C / C++ fields
+ MemberExpr *ML = dyn_cast<MemberExpr>(LHSExpr);
+ MemberExpr *MR = dyn_cast<MemberExpr>(RHSExpr);
+ if (ML && MR && ML->getMemberDecl() == MR->getMemberDecl()) {
+ if (isa<CXXThisExpr>(ML->getBase()) && isa<CXXThisExpr>(MR->getBase()))
+ Sema.Diag(Loc, diag::warn_identity_field_assign) << 0;
+ }
+
+ // Objective-C instance variables
+ ObjCIvarRefExpr *OL = dyn_cast<ObjCIvarRefExpr>(LHSExpr);
+ ObjCIvarRefExpr *OR = dyn_cast<ObjCIvarRefExpr>(RHSExpr);
+ if (OL && OR && OL->getDecl() == OR->getDecl()) {
+ DeclRefExpr *RL = dyn_cast<DeclRefExpr>(OL->getBase()->IgnoreImpCasts());
+ DeclRefExpr *RR = dyn_cast<DeclRefExpr>(OR->getBase()->IgnoreImpCasts());
+ if (RL && RR && RL->getDecl() == RR->getDecl())
+ Sema.Diag(Loc, diag::warn_identity_field_assign) << 1;
+ }
+}
+
+// C99 6.5.16.1
+QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS,
+ SourceLocation Loc,
+ QualType CompoundType) {
+ assert(!LHSExpr->hasPlaceholderType(BuiltinType::PseudoObject));
+
+ // Verify that LHS is a modifiable lvalue, and emit error if not.
+ if (CheckForModifiableLvalue(LHSExpr, Loc, *this))
+ return QualType();
+
+ QualType LHSType = LHSExpr->getType();
+ QualType RHSType = CompoundType.isNull() ? RHS.get()->getType() :
+ CompoundType;
+ AssignConvertType ConvTy;
+ if (CompoundType.isNull()) {
+ Expr *RHSCheck = RHS.get();
+
+ CheckIdentityFieldAssignment(LHSExpr, RHSCheck, Loc, *this);
+
+ QualType LHSTy(LHSType);
+ ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS);
+ if (RHS.isInvalid())
+ return QualType();
+ // Special case of NSObject attributes on c-style pointer types.
+ if (ConvTy == IncompatiblePointer &&
+ ((Context.isObjCNSObjectType(LHSType) &&
+ RHSType->isObjCObjectPointerType()) ||
+ (Context.isObjCNSObjectType(RHSType) &&
+ LHSType->isObjCObjectPointerType())))
+ ConvTy = Compatible;
+
+ if (ConvTy == Compatible &&
+ LHSType->isObjCObjectType())
+ Diag(Loc, diag::err_objc_object_assignment)
+ << LHSType;
+
+ // If the RHS is a unary plus or minus, check to see if they = and + are
+ // right next to each other. If so, the user may have typo'd "x =+ 4"
+ // instead of "x += 4".
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(RHSCheck))
+ RHSCheck = ICE->getSubExpr();
+ if (UnaryOperator *UO = dyn_cast<UnaryOperator>(RHSCheck)) {
+ if ((UO->getOpcode() == UO_Plus ||
+ UO->getOpcode() == UO_Minus) &&
+ Loc.isFileID() && UO->getOperatorLoc().isFileID() &&
+ // Only if the two operators are exactly adjacent.
+ Loc.getLocWithOffset(1) == UO->getOperatorLoc() &&
+ // And there is a space or other character before the subexpr of the
+ // unary +/-. We don't want to warn on "x=-1".
+ Loc.getLocWithOffset(2) != UO->getSubExpr()->getLocStart() &&
+ UO->getSubExpr()->getLocStart().isFileID()) {
+ Diag(Loc, diag::warn_not_compound_assign)
+ << (UO->getOpcode() == UO_Plus ? "+" : "-")
+ << SourceRange(UO->getOperatorLoc(), UO->getOperatorLoc());
+ }
+ }
+
+ if (ConvTy == Compatible) {
+ if (LHSType.getObjCLifetime() == Qualifiers::OCL_Strong) {
+ // Warn about retain cycles where a block captures the LHS, but
+ // not if the LHS is a simple variable into which the block is
+ // being stored...unless that variable can be captured by reference!
+ const Expr *InnerLHS = LHSExpr->IgnoreParenCasts();
+ const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(InnerLHS);
+ if (!DRE || DRE->getDecl()->hasAttr<BlocksAttr>())
+ checkRetainCycles(LHSExpr, RHS.get());
+
+ // It is safe to assign a weak reference into a strong variable.
+ // Although this code can still have problems:
+ // id x = self.weakProp;
+ // id y = self.weakProp;
+ // we do not warn to warn spuriously when 'x' and 'y' are on separate
+ // paths through the function. This should be revisited if
+ // -Wrepeated-use-of-weak is made flow-sensitive.
+ if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak,
+ RHS.get()->getLocStart()))
+ getCurFunction()->markSafeWeakUse(RHS.get());
+
+ } else if (getLangOpts().ObjCAutoRefCount) {
+ checkUnsafeExprAssigns(Loc, LHSExpr, RHS.get());
+ }
+ }
+ } else {
+ // Compound assignment "x += y"
+ ConvTy = CheckAssignmentConstraints(Loc, LHSType, RHSType);
+ }
+
+ if (DiagnoseAssignmentResult(ConvTy, Loc, LHSType, RHSType,
+ RHS.get(), AA_Assigning))
+ return QualType();
+
+ CheckForNullPointerDereference(*this, LHSExpr);
+
+ // C99 6.5.16p3: The type of an assignment expression is the type of the
+ // left operand unless the left operand has qualified type, in which case
+ // it is the unqualified version of the type of the left operand.
+ // C99 6.5.16.1p2: In simple assignment, the value of the right operand
+ // is converted to the type of the assignment expression (above).
+ // C++ 5.17p1: the type of the assignment expression is that of its left
+ // operand.
+ return (getLangOpts().CPlusPlus
+ ? LHSType : LHSType.getUnqualifiedType());
+}
+
+// C99 6.5.17
+static QualType CheckCommaOperands(Sema &S, ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc) {
+ LHS = S.CheckPlaceholderExpr(LHS.get());
+ RHS = S.CheckPlaceholderExpr(RHS.get());
+ if (LHS.isInvalid() || RHS.isInvalid())
+ return QualType();
+
+ // C's comma performs lvalue conversion (C99 6.3.2.1) on both its
+ // operands, but not unary promotions.
+ // C++'s comma does not do any conversions at all (C++ [expr.comma]p1).
+
+ // So we treat the LHS as a ignored value, and in C++ we allow the
+ // containing site to determine what should be done with the RHS.
+ LHS = S.IgnoredValueConversions(LHS.get());
+ if (LHS.isInvalid())
+ return QualType();
+
+ S.DiagnoseUnusedExprResult(LHS.get());
+
+ if (!S.getLangOpts().CPlusPlus) {
+ RHS = S.DefaultFunctionArrayLvalueConversion(RHS.get());
+ if (RHS.isInvalid())
+ return QualType();
+ if (!RHS.get()->getType()->isVoidType())
+ S.RequireCompleteType(Loc, RHS.get()->getType(),
+ diag::err_incomplete_type);
+ }
+
+ return RHS.get()->getType();
+}
+
+/// CheckIncrementDecrementOperand - unlike most "Check" methods, this routine
+/// doesn't need to call UsualUnaryConversions or UsualArithmeticConversions.
+static QualType CheckIncrementDecrementOperand(Sema &S, Expr *Op,
+ ExprValueKind &VK,
+ ExprObjectKind &OK,
+ SourceLocation OpLoc,
+ bool IsInc, bool IsPrefix) {
+ if (Op->isTypeDependent())
+ return S.Context.DependentTy;
+
+ QualType ResType = Op->getType();
+ // Atomic types can be used for increment / decrement where the non-atomic
+ // versions can, so ignore the _Atomic() specifier for the purpose of
+ // checking.
+ if (const AtomicType *ResAtomicType = ResType->getAs<AtomicType>())
+ ResType = ResAtomicType->getValueType();
+
+ assert(!ResType.isNull() && "no type for increment/decrement expression");
+
+ if (S.getLangOpts().CPlusPlus && ResType->isBooleanType()) {
+ // Decrement of bool is not allowed.
+ if (!IsInc) {
+ S.Diag(OpLoc, diag::err_decrement_bool) << Op->getSourceRange();
+ return QualType();
+ }
+ // Increment of bool sets it to true, but is deprecated.
+ S.Diag(OpLoc, S.getLangOpts().CPlusPlus1z ? diag::ext_increment_bool
+ : diag::warn_increment_bool)
+ << Op->getSourceRange();
+ } else if (S.getLangOpts().CPlusPlus && ResType->isEnumeralType()) {
+ // Error on enum increments and decrements in C++ mode
+ S.Diag(OpLoc, diag::err_increment_decrement_enum) << IsInc << ResType;
+ return QualType();
+ } else if (ResType->isRealType()) {
+ // OK!
+ } else if (ResType->isPointerType()) {
+ // C99 6.5.2.4p2, 6.5.6p2
+ if (!checkArithmeticOpPointerOperand(S, OpLoc, Op))
+ return QualType();
+ } else if (ResType->isObjCObjectPointerType()) {
+ // On modern runtimes, ObjC pointer arithmetic is forbidden.
+ // Otherwise, we just need a complete type.
+ if (checkArithmeticIncompletePointerType(S, OpLoc, Op) ||
+ checkArithmeticOnObjCPointer(S, OpLoc, Op))
+ return QualType();
+ } else if (ResType->isAnyComplexType()) {
+ // C99 does not support ++/-- on complex types, we allow as an extension.
+ S.Diag(OpLoc, diag::ext_integer_increment_complex)
+ << ResType << Op->getSourceRange();
+ } else if (ResType->isPlaceholderType()) {
+ ExprResult PR = S.CheckPlaceholderExpr(Op);
+ if (PR.isInvalid()) return QualType();
+ return CheckIncrementDecrementOperand(S, PR.get(), VK, OK, OpLoc,
+ IsInc, IsPrefix);
+ } else if (S.getLangOpts().AltiVec && ResType->isVectorType()) {
+ // OK! ( C/C++ Language Extensions for CBEA(Version 2.6) 10.3 )
+ } else if (S.getLangOpts().ZVector && ResType->isVectorType() &&
+ (ResType->getAs<VectorType>()->getVectorKind() !=
+ VectorType::AltiVecBool)) {
+ // The z vector extensions allow ++ and -- for non-bool vectors.
+ } else if(S.getLangOpts().OpenCL && ResType->isVectorType() &&
+ ResType->getAs<VectorType>()->getElementType()->isIntegerType()) {
+ // OpenCL V1.2 6.3 says dec/inc ops operate on integer vector types.
+ } else {
+ S.Diag(OpLoc, diag::err_typecheck_illegal_increment_decrement)
+ << ResType << int(IsInc) << Op->getSourceRange();
+ return QualType();
+ }
+ // At this point, we know we have a real, complex or pointer type.
+ // Now make sure the operand is a modifiable lvalue.
+ if (CheckForModifiableLvalue(Op, OpLoc, S))
+ return QualType();
+ // In C++, a prefix increment is the same type as the operand. Otherwise
+ // (in C or with postfix), the increment is the unqualified type of the
+ // operand.
+ if (IsPrefix && S.getLangOpts().CPlusPlus) {
+ VK = VK_LValue;
+ OK = Op->getObjectKind();
+ return ResType;
+ } else {
+ VK = VK_RValue;
+ return ResType.getUnqualifiedType();
+ }
+}
+
+
+/// getPrimaryDecl - Helper function for CheckAddressOfOperand().
+/// This routine allows us to typecheck complex/recursive expressions
+/// where the declaration is needed for type checking. We only need to
+/// handle cases when the expression references a function designator
+/// or is an lvalue. Here are some examples:
+/// - &(x) => x
+/// - &*****f => f for f a function designator.
+/// - &s.xx => s
+/// - &s.zz[1].yy -> s, if zz is an array
+/// - *(x + 1) -> x, if x is an array
+/// - &"123"[2] -> 0
+/// - & __real__ x -> x
+static ValueDecl *getPrimaryDecl(Expr *E) {
+ switch (E->getStmtClass()) {
+ case Stmt::DeclRefExprClass:
+ return cast<DeclRefExpr>(E)->getDecl();
+ case Stmt::MemberExprClass:
+ // If this is an arrow operator, the address is an offset from
+ // the base's value, so the object the base refers to is
+ // irrelevant.
+ if (cast<MemberExpr>(E)->isArrow())
+ return nullptr;
+ // Otherwise, the expression refers to a part of the base
+ return getPrimaryDecl(cast<MemberExpr>(E)->getBase());
+ case Stmt::ArraySubscriptExprClass: {
+ // FIXME: This code shouldn't be necessary! We should catch the implicit
+ // promotion of register arrays earlier.
+ Expr* Base = cast<ArraySubscriptExpr>(E)->getBase();
+ if (ImplicitCastExpr* ICE = dyn_cast<ImplicitCastExpr>(Base)) {
+ if (ICE->getSubExpr()->getType()->isArrayType())
+ return getPrimaryDecl(ICE->getSubExpr());
+ }
+ return nullptr;
+ }
+ case Stmt::UnaryOperatorClass: {
+ UnaryOperator *UO = cast<UnaryOperator>(E);
+
+ switch(UO->getOpcode()) {
+ case UO_Real:
+ case UO_Imag:
+ case UO_Extension:
+ return getPrimaryDecl(UO->getSubExpr());
+ default:
+ return nullptr;
+ }
+ }
+ case Stmt::ParenExprClass:
+ return getPrimaryDecl(cast<ParenExpr>(E)->getSubExpr());
+ case Stmt::ImplicitCastExprClass:
+ // If the result of an implicit cast is an l-value, we care about
+ // the sub-expression; otherwise, the result here doesn't matter.
+ return getPrimaryDecl(cast<ImplicitCastExpr>(E)->getSubExpr());
+ default:
+ return nullptr;
+ }
+}
+
+namespace {
+ enum {
+ AO_Bit_Field = 0,
+ AO_Vector_Element = 1,
+ AO_Property_Expansion = 2,
+ AO_Register_Variable = 3,
+ AO_No_Error = 4
+ };
+}
+/// \brief Diagnose invalid operand for address of operations.
+///
+/// \param Type The type of operand which cannot have its address taken.
+static void diagnoseAddressOfInvalidType(Sema &S, SourceLocation Loc,
+ Expr *E, unsigned Type) {
+ S.Diag(Loc, diag::err_typecheck_address_of) << Type << E->getSourceRange();
+}
+
+/// CheckAddressOfOperand - The operand of & must be either a function
+/// designator or an lvalue designating an object. If it is an lvalue, the
+/// object cannot be declared with storage class register or be a bit field.
+/// Note: The usual conversions are *not* applied to the operand of the &
+/// operator (C99 6.3.2.1p[2-4]), and its result is never an lvalue.
+/// In C++, the operand might be an overloaded function name, in which case
+/// we allow the '&' but retain the overloaded-function type.
+QualType Sema::CheckAddressOfOperand(ExprResult &OrigOp, SourceLocation OpLoc) {
+ if (const BuiltinType *PTy = OrigOp.get()->getType()->getAsPlaceholderType()){
+ if (PTy->getKind() == BuiltinType::Overload) {
+ Expr *E = OrigOp.get()->IgnoreParens();
+ if (!isa<OverloadExpr>(E)) {
+ assert(cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf);
+ Diag(OpLoc, diag::err_typecheck_invalid_lvalue_addrof_addrof_function)
+ << OrigOp.get()->getSourceRange();
+ return QualType();
+ }
+
+ OverloadExpr *Ovl = cast<OverloadExpr>(E);
+ if (isa<UnresolvedMemberExpr>(Ovl))
+ if (!ResolveSingleFunctionTemplateSpecialization(Ovl)) {
+ Diag(OpLoc, diag::err_invalid_form_pointer_member_function)
+ << OrigOp.get()->getSourceRange();
+ return QualType();
+ }
+
+ return Context.OverloadTy;
+ }
+
+ if (PTy->getKind() == BuiltinType::UnknownAny)
+ return Context.UnknownAnyTy;
+
+ if (PTy->getKind() == BuiltinType::BoundMember) {
+ Diag(OpLoc, diag::err_invalid_form_pointer_member_function)
+ << OrigOp.get()->getSourceRange();
+ return QualType();
+ }
+
+ OrigOp = CheckPlaceholderExpr(OrigOp.get());
+ if (OrigOp.isInvalid()) return QualType();
+ }
+
+ if (OrigOp.get()->isTypeDependent())
+ return Context.DependentTy;
+
+ assert(!OrigOp.get()->getType()->isPlaceholderType());
+
+ // Make sure to ignore parentheses in subsequent checks
+ Expr *op = OrigOp.get()->IgnoreParens();
+
+ // OpenCL v1.0 s6.8.a.3: Pointers to functions are not allowed.
+ if (LangOpts.OpenCL && op->getType()->isFunctionType()) {
+ Diag(op->getExprLoc(), diag::err_opencl_taking_function_address);
+ return QualType();
+ }
+
+ if (getLangOpts().C99) {
+ // Implement C99-only parts of addressof rules.
+ if (UnaryOperator* uOp = dyn_cast<UnaryOperator>(op)) {
+ if (uOp->getOpcode() == UO_Deref)
+ // Per C99 6.5.3.2, the address of a deref always returns a valid result
+ // (assuming the deref expression is valid).
+ return uOp->getSubExpr()->getType();
+ }
+ // Technically, there should be a check for array subscript
+ // expressions here, but the result of one is always an lvalue anyway.
+ }
+ ValueDecl *dcl = getPrimaryDecl(op);
+
+ if (auto *FD = dyn_cast_or_null<FunctionDecl>(dcl))
+ if (!checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true,
+ op->getLocStart()))
+ return QualType();
+
+ Expr::LValueClassification lval = op->ClassifyLValue(Context);
+ unsigned AddressOfError = AO_No_Error;
+
+ if (lval == Expr::LV_ClassTemporary || lval == Expr::LV_ArrayTemporary) {
+ bool sfinae = (bool)isSFINAEContext();
+ Diag(OpLoc, isSFINAEContext() ? diag::err_typecheck_addrof_temporary
+ : diag::ext_typecheck_addrof_temporary)
+ << op->getType() << op->getSourceRange();
+ if (sfinae)
+ return QualType();
+ // Materialize the temporary as an lvalue so that we can take its address.
+ OrigOp = op = new (Context)
+ MaterializeTemporaryExpr(op->getType(), OrigOp.get(), true);
+ } else if (isa<ObjCSelectorExpr>(op)) {
+ return Context.getPointerType(op->getType());
+ } else if (lval == Expr::LV_MemberFunction) {
+ // If it's an instance method, make a member pointer.
+ // The expression must have exactly the form &A::foo.
+
+ // If the underlying expression isn't a decl ref, give up.
+ if (!isa<DeclRefExpr>(op)) {
+ Diag(OpLoc, diag::err_invalid_form_pointer_member_function)
+ << OrigOp.get()->getSourceRange();
+ return QualType();
+ }
+ DeclRefExpr *DRE = cast<DeclRefExpr>(op);
+ CXXMethodDecl *MD = cast<CXXMethodDecl>(DRE->getDecl());
+
+ // The id-expression was parenthesized.
+ if (OrigOp.get() != DRE) {
+ Diag(OpLoc, diag::err_parens_pointer_member_function)
+ << OrigOp.get()->getSourceRange();
+
+ // The method was named without a qualifier.
+ } else if (!DRE->getQualifier()) {
+ if (MD->getParent()->getName().empty())
+ Diag(OpLoc, diag::err_unqualified_pointer_member_function)
+ << op->getSourceRange();
+ else {
+ SmallString<32> Str;
+ StringRef Qual = (MD->getParent()->getName() + "::").toStringRef(Str);
+ Diag(OpLoc, diag::err_unqualified_pointer_member_function)
+ << op->getSourceRange()
+ << FixItHint::CreateInsertion(op->getSourceRange().getBegin(), Qual);
+ }
+ }
+
+ // Taking the address of a dtor is illegal per C++ [class.dtor]p2.
+ if (isa<CXXDestructorDecl>(MD))
+ Diag(OpLoc, diag::err_typecheck_addrof_dtor) << op->getSourceRange();
+
+ QualType MPTy = Context.getMemberPointerType(
+ op->getType(), Context.getTypeDeclType(MD->getParent()).getTypePtr());
+ // Under the MS ABI, lock down the inheritance model now.
+ if (Context.getTargetInfo().getCXXABI().isMicrosoft())
+ (void)isCompleteType(OpLoc, MPTy);
+ return MPTy;
+ } else if (lval != Expr::LV_Valid && lval != Expr::LV_IncompleteVoidType) {
+ // C99 6.5.3.2p1
+ // The operand must be either an l-value or a function designator
+ if (!op->getType()->isFunctionType()) {
+ // Use a special diagnostic for loads from property references.
+ if (isa<PseudoObjectExpr>(op)) {
+ AddressOfError = AO_Property_Expansion;
+ } else {
+ Diag(OpLoc, diag::err_typecheck_invalid_lvalue_addrof)
+ << op->getType() << op->getSourceRange();
+ return QualType();
+ }
+ }
+ } else if (op->getObjectKind() == OK_BitField) { // C99 6.5.3.2p1
+ // The operand cannot be a bit-field
+ AddressOfError = AO_Bit_Field;
+ } else if (op->getObjectKind() == OK_VectorComponent) {
+ // The operand cannot be an element of a vector
+ AddressOfError = AO_Vector_Element;
+ } else if (dcl) { // C99 6.5.3.2p1
+ // We have an lvalue with a decl. Make sure the decl is not declared
+ // with the register storage-class specifier.
+ if (const VarDecl *vd = dyn_cast<VarDecl>(dcl)) {
+ // in C++ it is not error to take address of a register
+ // variable (c++03 7.1.1P3)
+ if (vd->getStorageClass() == SC_Register &&
+ !getLangOpts().CPlusPlus) {
+ AddressOfError = AO_Register_Variable;
+ }
+ } else if (isa<MSPropertyDecl>(dcl)) {
+ AddressOfError = AO_Property_Expansion;
+ } else if (isa<FunctionTemplateDecl>(dcl)) {
+ return Context.OverloadTy;
+ } else if (isa<FieldDecl>(dcl) || isa<IndirectFieldDecl>(dcl)) {
+ // Okay: we can take the address of a field.
+ // Could be a pointer to member, though, if there is an explicit
+ // scope qualifier for the class.
+ if (isa<DeclRefExpr>(op) && cast<DeclRefExpr>(op)->getQualifier()) {
+ DeclContext *Ctx = dcl->getDeclContext();
+ if (Ctx && Ctx->isRecord()) {
+ if (dcl->getType()->isReferenceType()) {
+ Diag(OpLoc,
+ diag::err_cannot_form_pointer_to_member_of_reference_type)
+ << dcl->getDeclName() << dcl->getType();
+ return QualType();
+ }
+
+ while (cast<RecordDecl>(Ctx)->isAnonymousStructOrUnion())
+ Ctx = Ctx->getParent();
+
+ QualType MPTy = Context.getMemberPointerType(
+ op->getType(),
+ Context.getTypeDeclType(cast<RecordDecl>(Ctx)).getTypePtr());
+ // Under the MS ABI, lock down the inheritance model now.
+ if (Context.getTargetInfo().getCXXABI().isMicrosoft())
+ (void)isCompleteType(OpLoc, MPTy);
+ return MPTy;
+ }
+ }
+ } else if (!isa<FunctionDecl>(dcl) && !isa<NonTypeTemplateParmDecl>(dcl))
+ llvm_unreachable("Unknown/unexpected decl type");
+ }
+
+ if (AddressOfError != AO_No_Error) {
+ diagnoseAddressOfInvalidType(*this, OpLoc, op, AddressOfError);
+ return QualType();
+ }
+
+ if (lval == Expr::LV_IncompleteVoidType) {
+ // Taking the address of a void variable is technically illegal, but we
+ // allow it in cases which are otherwise valid.
+ // Example: "extern void x; void* y = &x;".
+ Diag(OpLoc, diag::ext_typecheck_addrof_void) << op->getSourceRange();
+ }
+
+ // If the operand has type "type", the result has type "pointer to type".
+ if (op->getType()->isObjCObjectType())
+ return Context.getObjCObjectPointerType(op->getType());
+ return Context.getPointerType(op->getType());
+}
+
+static void RecordModifiableNonNullParam(Sema &S, const Expr *Exp) {
+ const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Exp);
+ if (!DRE)
+ return;
+ const Decl *D = DRE->getDecl();
+ if (!D)
+ return;
+ const ParmVarDecl *Param = dyn_cast<ParmVarDecl>(D);
+ if (!Param)
+ return;
+ if (const FunctionDecl* FD = dyn_cast<FunctionDecl>(Param->getDeclContext()))
+ if (!FD->hasAttr<NonNullAttr>() && !Param->hasAttr<NonNullAttr>())
+ return;
+ if (FunctionScopeInfo *FD = S.getCurFunction())
+ if (!FD->ModifiedNonNullParams.count(Param))
+ FD->ModifiedNonNullParams.insert(Param);
+}
+
+/// CheckIndirectionOperand - Type check unary indirection (prefix '*').
+static QualType CheckIndirectionOperand(Sema &S, Expr *Op, ExprValueKind &VK,
+ SourceLocation OpLoc) {
+ if (Op->isTypeDependent())
+ return S.Context.DependentTy;
+
+ ExprResult ConvResult = S.UsualUnaryConversions(Op);
+ if (ConvResult.isInvalid())
+ return QualType();
+ Op = ConvResult.get();
+ QualType OpTy = Op->getType();
+ QualType Result;
+
+ if (isa<CXXReinterpretCastExpr>(Op)) {
+ QualType OpOrigType = Op->IgnoreParenCasts()->getType();
+ S.CheckCompatibleReinterpretCast(OpOrigType, OpTy, /*IsDereference*/true,
+ Op->getSourceRange());
+ }
+
+ if (const PointerType *PT = OpTy->getAs<PointerType>())
+ Result = PT->getPointeeType();
+ else if (const ObjCObjectPointerType *OPT =
+ OpTy->getAs<ObjCObjectPointerType>())
+ Result = OPT->getPointeeType();
+ else {
+ ExprResult PR = S.CheckPlaceholderExpr(Op);
+ if (PR.isInvalid()) return QualType();
+ if (PR.get() != Op)
+ return CheckIndirectionOperand(S, PR.get(), VK, OpLoc);
+ }
+
+ if (Result.isNull()) {
+ S.Diag(OpLoc, diag::err_typecheck_indirection_requires_pointer)
+ << OpTy << Op->getSourceRange();
+ return QualType();
+ }
+
+ // Note that per both C89 and C99, indirection is always legal, even if Result
+ // is an incomplete type or void. It would be possible to warn about
+ // dereferencing a void pointer, but it's completely well-defined, and such a
+ // warning is unlikely to catch any mistakes. In C++, indirection is not valid
+ // for pointers to 'void' but is fine for any other pointer type:
+ //
+ // C++ [expr.unary.op]p1:
+ // [...] the expression to which [the unary * operator] is applied shall
+ // be a pointer to an object type, or a pointer to a function type
+ if (S.getLangOpts().CPlusPlus && Result->isVoidType())
+ S.Diag(OpLoc, diag::ext_typecheck_indirection_through_void_pointer)
+ << OpTy << Op->getSourceRange();
+
+ // Dereferences are usually l-values...
+ VK = VK_LValue;
+
+ // ...except that certain expressions are never l-values in C.
+ if (!S.getLangOpts().CPlusPlus && Result.isCForbiddenLValueType())
+ VK = VK_RValue;
+
+ return Result;
+}
+
+BinaryOperatorKind Sema::ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind) {
+ BinaryOperatorKind Opc;
+ switch (Kind) {
+ default: llvm_unreachable("Unknown binop!");
+ case tok::periodstar: Opc = BO_PtrMemD; break;
+ case tok::arrowstar: Opc = BO_PtrMemI; break;
+ case tok::star: Opc = BO_Mul; break;
+ case tok::slash: Opc = BO_Div; break;
+ case tok::percent: Opc = BO_Rem; break;
+ case tok::plus: Opc = BO_Add; break;
+ case tok::minus: Opc = BO_Sub; break;
+ case tok::lessless: Opc = BO_Shl; break;
+ case tok::greatergreater: Opc = BO_Shr; break;
+ case tok::lessequal: Opc = BO_LE; break;
+ case tok::less: Opc = BO_LT; break;
+ case tok::greaterequal: Opc = BO_GE; break;
+ case tok::greater: Opc = BO_GT; break;
+ case tok::exclaimequal: Opc = BO_NE; break;
+ case tok::equalequal: Opc = BO_EQ; break;
+ case tok::amp: Opc = BO_And; break;
+ case tok::caret: Opc = BO_Xor; break;
+ case tok::pipe: Opc = BO_Or; break;
+ case tok::ampamp: Opc = BO_LAnd; break;
+ case tok::pipepipe: Opc = BO_LOr; break;
+ case tok::equal: Opc = BO_Assign; break;
+ case tok::starequal: Opc = BO_MulAssign; break;
+ case tok::slashequal: Opc = BO_DivAssign; break;
+ case tok::percentequal: Opc = BO_RemAssign; break;
+ case tok::plusequal: Opc = BO_AddAssign; break;
+ case tok::minusequal: Opc = BO_SubAssign; break;
+ case tok::lesslessequal: Opc = BO_ShlAssign; break;
+ case tok::greatergreaterequal: Opc = BO_ShrAssign; break;
+ case tok::ampequal: Opc = BO_AndAssign; break;
+ case tok::caretequal: Opc = BO_XorAssign; break;
+ case tok::pipeequal: Opc = BO_OrAssign; break;
+ case tok::comma: Opc = BO_Comma; break;
+ }
+ return Opc;
+}
+
+static inline UnaryOperatorKind ConvertTokenKindToUnaryOpcode(
+ tok::TokenKind Kind) {
+ UnaryOperatorKind Opc;
+ switch (Kind) {
+ default: llvm_unreachable("Unknown unary op!");
+ case tok::plusplus: Opc = UO_PreInc; break;
+ case tok::minusminus: Opc = UO_PreDec; break;
+ case tok::amp: Opc = UO_AddrOf; break;
+ case tok::star: Opc = UO_Deref; break;
+ case tok::plus: Opc = UO_Plus; break;
+ case tok::minus: Opc = UO_Minus; break;
+ case tok::tilde: Opc = UO_Not; break;
+ case tok::exclaim: Opc = UO_LNot; break;
+ case tok::kw___real: Opc = UO_Real; break;
+ case tok::kw___imag: Opc = UO_Imag; break;
+ case tok::kw___extension__: Opc = UO_Extension; break;
+ }
+ return Opc;
+}
+
+/// DiagnoseSelfAssignment - Emits a warning if a value is assigned to itself.
+/// This warning is only emitted for builtin assignment operations. It is also
+/// suppressed in the event of macro expansions.
+static void DiagnoseSelfAssignment(Sema &S, Expr *LHSExpr, Expr *RHSExpr,
+ SourceLocation OpLoc) {
+ if (!S.ActiveTemplateInstantiations.empty())
+ return;
+ if (OpLoc.isInvalid() || OpLoc.isMacroID())
+ return;
+ LHSExpr = LHSExpr->IgnoreParenImpCasts();
+ RHSExpr = RHSExpr->IgnoreParenImpCasts();
+ const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr);
+ const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr);
+ if (!LHSDeclRef || !RHSDeclRef ||
+ LHSDeclRef->getLocation().isMacroID() ||
+ RHSDeclRef->getLocation().isMacroID())
+ return;
+ const ValueDecl *LHSDecl =
+ cast<ValueDecl>(LHSDeclRef->getDecl()->getCanonicalDecl());
+ const ValueDecl *RHSDecl =
+ cast<ValueDecl>(RHSDeclRef->getDecl()->getCanonicalDecl());
+ if (LHSDecl != RHSDecl)
+ return;
+ if (LHSDecl->getType().isVolatileQualified())
+ return;
+ if (const ReferenceType *RefTy = LHSDecl->getType()->getAs<ReferenceType>())
+ if (RefTy->getPointeeType().isVolatileQualified())
+ return;
+
+ S.Diag(OpLoc, diag::warn_self_assignment)
+ << LHSDeclRef->getType()
+ << LHSExpr->getSourceRange() << RHSExpr->getSourceRange();
+}
+
+/// Check if a bitwise-& is performed on an Objective-C pointer. This
+/// is usually indicative of introspection within the Objective-C pointer.
+static void checkObjCPointerIntrospection(Sema &S, ExprResult &L, ExprResult &R,
+ SourceLocation OpLoc) {
+ if (!S.getLangOpts().ObjC1)
+ return;
+
+ const Expr *ObjCPointerExpr = nullptr, *OtherExpr = nullptr;
+ const Expr *LHS = L.get();
+ const Expr *RHS = R.get();
+
+ if (LHS->IgnoreParenCasts()->getType()->isObjCObjectPointerType()) {
+ ObjCPointerExpr = LHS;
+ OtherExpr = RHS;
+ }
+ else if (RHS->IgnoreParenCasts()->getType()->isObjCObjectPointerType()) {
+ ObjCPointerExpr = RHS;
+ OtherExpr = LHS;
+ }
+
+ // This warning is deliberately made very specific to reduce false
+ // positives with logic that uses '&' for hashing. This logic mainly
+ // looks for code trying to introspect into tagged pointers, which
+ // code should generally never do.
+ if (ObjCPointerExpr && isa<IntegerLiteral>(OtherExpr->IgnoreParenCasts())) {
+ unsigned Diag = diag::warn_objc_pointer_masking;
+ // Determine if we are introspecting the result of performSelectorXXX.
+ const Expr *Ex = ObjCPointerExpr->IgnoreParenCasts();
+ // Special case messages to -performSelector and friends, which
+ // can return non-pointer values boxed in a pointer value.
+ // Some clients may wish to silence warnings in this subcase.
+ if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(Ex)) {
+ Selector S = ME->getSelector();
+ StringRef SelArg0 = S.getNameForSlot(0);
+ if (SelArg0.startswith("performSelector"))
+ Diag = diag::warn_objc_pointer_masking_performSelector;
+ }
+
+ S.Diag(OpLoc, Diag)
+ << ObjCPointerExpr->getSourceRange();
+ }
+}
+
+static NamedDecl *getDeclFromExpr(Expr *E) {
+ if (!E)
+ return nullptr;
+ if (auto *DRE = dyn_cast<DeclRefExpr>(E))
+ return DRE->getDecl();
+ if (auto *ME = dyn_cast<MemberExpr>(E))
+ return ME->getMemberDecl();
+ if (auto *IRE = dyn_cast<ObjCIvarRefExpr>(E))
+ return IRE->getDecl();
+ return nullptr;
+}
+
+/// CreateBuiltinBinOp - Creates a new built-in binary operation with
+/// operator @p Opc at location @c TokLoc. This routine only supports
+/// built-in operations; ActOnBinOp handles overloaded operators.
+ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
+ BinaryOperatorKind Opc,
+ Expr *LHSExpr, Expr *RHSExpr) {
+ if (getLangOpts().CPlusPlus11 && isa<InitListExpr>(RHSExpr)) {
+ // The syntax only allows initializer lists on the RHS of assignment,
+ // so we don't need to worry about accepting invalid code for
+ // non-assignment operators.
+ // C++11 5.17p9:
+ // The meaning of x = {v} [...] is that of x = T(v) [...]. The meaning
+ // of x = {} is x = T().
+ InitializationKind Kind =
+ InitializationKind::CreateDirectList(RHSExpr->getLocStart());
+ InitializedEntity Entity =
+ InitializedEntity::InitializeTemporary(LHSExpr->getType());
+ InitializationSequence InitSeq(*this, Entity, Kind, RHSExpr);
+ ExprResult Init = InitSeq.Perform(*this, Entity, Kind, RHSExpr);
+ if (Init.isInvalid())
+ return Init;
+ RHSExpr = Init.get();
+ }
+
+ ExprResult LHS = LHSExpr, RHS = RHSExpr;
+ QualType ResultTy; // Result type of the binary operator.
+ // The following two variables are used for compound assignment operators
+ QualType CompLHSTy; // Type of LHS after promotions for computation
+ QualType CompResultTy; // Type of computation result
+ ExprValueKind VK = VK_RValue;
+ ExprObjectKind OK = OK_Ordinary;
+
+ if (!getLangOpts().CPlusPlus) {
+ // C cannot handle TypoExpr nodes on either side of a binop because it
+ // doesn't handle dependent types properly, so make sure any TypoExprs have
+ // been dealt with before checking the operands.
+ LHS = CorrectDelayedTyposInExpr(LHSExpr);
+ RHS = CorrectDelayedTyposInExpr(RHSExpr, [Opc, LHS](Expr *E) {
+ if (Opc != BO_Assign)
+ return ExprResult(E);
+ // Avoid correcting the RHS to the same Expr as the LHS.
+ Decl *D = getDeclFromExpr(E);
+ return (D && D == getDeclFromExpr(LHS.get())) ? ExprError() : E;
+ });
+ if (!LHS.isUsable() || !RHS.isUsable())
+ return ExprError();
+ }
+
+ if (getLangOpts().OpenCL) {
+ // OpenCLC v2.0 s6.13.11.1 allows atomic variables to be initialized by
+ // the ATOMIC_VAR_INIT macro.
+ if (LHSExpr->getType()->isAtomicType() ||
+ RHSExpr->getType()->isAtomicType()) {
+ SourceRange SR(LHSExpr->getLocStart(), RHSExpr->getLocEnd());
+ if (BO_Assign == Opc)
+ Diag(OpLoc, diag::err_atomic_init_constant) << SR;
+ else
+ ResultTy = InvalidOperands(OpLoc, LHS, RHS);
+ return ExprError();
+ }
+ }
+
+ switch (Opc) {
+ case BO_Assign:
+ ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, QualType());
+ if (getLangOpts().CPlusPlus &&
+ LHS.get()->getObjectKind() != OK_ObjCProperty) {
+ VK = LHS.get()->getValueKind();
+ OK = LHS.get()->getObjectKind();
+ }
+ if (!ResultTy.isNull()) {
+ DiagnoseSelfAssignment(*this, LHS.get(), RHS.get(), OpLoc);
+ DiagnoseSelfMove(LHS.get(), RHS.get(), OpLoc);
+ }
+ RecordModifiableNonNullParam(*this, LHS.get());
+ break;
+ case BO_PtrMemD:
+ case BO_PtrMemI:
+ ResultTy = CheckPointerToMemberOperands(LHS, RHS, VK, OpLoc,
+ Opc == BO_PtrMemI);
+ break;
+ case BO_Mul:
+ case BO_Div:
+ ResultTy = CheckMultiplyDivideOperands(LHS, RHS, OpLoc, false,
+ Opc == BO_Div);
+ break;
+ case BO_Rem:
+ ResultTy = CheckRemainderOperands(LHS, RHS, OpLoc);
+ break;
+ case BO_Add:
+ ResultTy = CheckAdditionOperands(LHS, RHS, OpLoc, Opc);
+ break;
+ case BO_Sub:
+ ResultTy = CheckSubtractionOperands(LHS, RHS, OpLoc);
+ break;
+ case BO_Shl:
+ case BO_Shr:
+ ResultTy = CheckShiftOperands(LHS, RHS, OpLoc, Opc);
+ break;
+ case BO_LE:
+ case BO_LT:
+ case BO_GE:
+ case BO_GT:
+ ResultTy = CheckCompareOperands(LHS, RHS, OpLoc, Opc, true);
+ break;
+ case BO_EQ:
+ case BO_NE:
+ ResultTy = CheckCompareOperands(LHS, RHS, OpLoc, Opc, false);
+ break;
+ case BO_And:
+ checkObjCPointerIntrospection(*this, LHS, RHS, OpLoc);
+ case BO_Xor:
+ case BO_Or:
+ ResultTy = CheckBitwiseOperands(LHS, RHS, OpLoc);
+ break;
+ case BO_LAnd:
+ case BO_LOr:
+ ResultTy = CheckLogicalOperands(LHS, RHS, OpLoc, Opc);
+ break;
+ case BO_MulAssign:
+ case BO_DivAssign:
+ CompResultTy = CheckMultiplyDivideOperands(LHS, RHS, OpLoc, true,
+ Opc == BO_DivAssign);
+ CompLHSTy = CompResultTy;
+ if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid())
+ ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy);
+ break;
+ case BO_RemAssign:
+ CompResultTy = CheckRemainderOperands(LHS, RHS, OpLoc, true);
+ CompLHSTy = CompResultTy;
+ if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid())
+ ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy);
+ break;
+ case BO_AddAssign:
+ CompResultTy = CheckAdditionOperands(LHS, RHS, OpLoc, Opc, &CompLHSTy);
+ if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid())
+ ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy);
+ break;
+ case BO_SubAssign:
+ CompResultTy = CheckSubtractionOperands(LHS, RHS, OpLoc, &CompLHSTy);
+ if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid())
+ ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy);
+ break;
+ case BO_ShlAssign:
+ case BO_ShrAssign:
+ CompResultTy = CheckShiftOperands(LHS, RHS, OpLoc, Opc, true);
+ CompLHSTy = CompResultTy;
+ if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid())
+ ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy);
+ break;
+ case BO_AndAssign:
+ case BO_OrAssign: // fallthrough
+ DiagnoseSelfAssignment(*this, LHS.get(), RHS.get(), OpLoc);
+ case BO_XorAssign:
+ CompResultTy = CheckBitwiseOperands(LHS, RHS, OpLoc, true);
+ CompLHSTy = CompResultTy;
+ if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid())
+ ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy);
+ break;
+ case BO_Comma:
+ ResultTy = CheckCommaOperands(*this, LHS, RHS, OpLoc);
+ if (getLangOpts().CPlusPlus && !RHS.isInvalid()) {
+ VK = RHS.get()->getValueKind();
+ OK = RHS.get()->getObjectKind();
+ }
+ break;
+ }
+ if (ResultTy.isNull() || LHS.isInvalid() || RHS.isInvalid())
+ return ExprError();
+
+ // Check for array bounds violations for both sides of the BinaryOperator
+ CheckArrayAccess(LHS.get());
+ CheckArrayAccess(RHS.get());
+
+ if (const ObjCIsaExpr *OISA = dyn_cast<ObjCIsaExpr>(LHS.get()->IgnoreParenCasts())) {
+ NamedDecl *ObjectSetClass = LookupSingleName(TUScope,
+ &Context.Idents.get("object_setClass"),
+ SourceLocation(), LookupOrdinaryName);
+ if (ObjectSetClass && isa<ObjCIsaExpr>(LHS.get())) {
+ SourceLocation RHSLocEnd = getLocForEndOfToken(RHS.get()->getLocEnd());
+ Diag(LHS.get()->getExprLoc(), diag::warn_objc_isa_assign) <<
+ FixItHint::CreateInsertion(LHS.get()->getLocStart(), "object_setClass(") <<
+ FixItHint::CreateReplacement(SourceRange(OISA->getOpLoc(), OpLoc), ",") <<
+ FixItHint::CreateInsertion(RHSLocEnd, ")");
+ }
+ else
+ Diag(LHS.get()->getExprLoc(), diag::warn_objc_isa_assign);
+ }
+ else if (const ObjCIvarRefExpr *OIRE =
+ dyn_cast<ObjCIvarRefExpr>(LHS.get()->IgnoreParenCasts()))
+ DiagnoseDirectIsaAccess(*this, OIRE, OpLoc, RHS.get());
+
+ if (CompResultTy.isNull())
+ return new (Context) BinaryOperator(LHS.get(), RHS.get(), Opc, ResultTy, VK,
+ OK, OpLoc, FPFeatures.fp_contract);
+ if (getLangOpts().CPlusPlus && LHS.get()->getObjectKind() !=
+ OK_ObjCProperty) {
+ VK = VK_LValue;
+ OK = LHS.get()->getObjectKind();
+ }
+ return new (Context) CompoundAssignOperator(
+ LHS.get(), RHS.get(), Opc, ResultTy, VK, OK, CompLHSTy, CompResultTy,
+ OpLoc, FPFeatures.fp_contract);
+}
+
+/// DiagnoseBitwisePrecedence - Emit a warning when bitwise and comparison
+/// operators are mixed in a way that suggests that the programmer forgot that
+/// comparison operators have higher precedence. The most typical example of
+/// such code is "flags & 0x0020 != 0", which is equivalent to "flags & 1".
+static void DiagnoseBitwisePrecedence(Sema &Self, BinaryOperatorKind Opc,
+ SourceLocation OpLoc, Expr *LHSExpr,
+ Expr *RHSExpr) {
+ BinaryOperator *LHSBO = dyn_cast<BinaryOperator>(LHSExpr);
+ BinaryOperator *RHSBO = dyn_cast<BinaryOperator>(RHSExpr);
+
+ // Check that one of the sides is a comparison operator and the other isn't.
+ bool isLeftComp = LHSBO && LHSBO->isComparisonOp();
+ bool isRightComp = RHSBO && RHSBO->isComparisonOp();
+ if (isLeftComp == isRightComp)
+ return;
+
+ // Bitwise operations are sometimes used as eager logical ops.
+ // Don't diagnose this.
+ bool isLeftBitwise = LHSBO && LHSBO->isBitwiseOp();
+ bool isRightBitwise = RHSBO && RHSBO->isBitwiseOp();
+ if (isLeftBitwise || isRightBitwise)
+ return;
+
+ SourceRange DiagRange = isLeftComp ? SourceRange(LHSExpr->getLocStart(),
+ OpLoc)
+ : SourceRange(OpLoc, RHSExpr->getLocEnd());
+ StringRef OpStr = isLeftComp ? LHSBO->getOpcodeStr() : RHSBO->getOpcodeStr();
+ SourceRange ParensRange = isLeftComp ?
+ SourceRange(LHSBO->getRHS()->getLocStart(), RHSExpr->getLocEnd())
+ : SourceRange(LHSExpr->getLocStart(), RHSBO->getLHS()->getLocEnd());
+
+ Self.Diag(OpLoc, diag::warn_precedence_bitwise_rel)
+ << DiagRange << BinaryOperator::getOpcodeStr(Opc) << OpStr;
+ SuggestParentheses(Self, OpLoc,
+ Self.PDiag(diag::note_precedence_silence) << OpStr,
+ (isLeftComp ? LHSExpr : RHSExpr)->getSourceRange());
+ SuggestParentheses(Self, OpLoc,
+ Self.PDiag(diag::note_precedence_bitwise_first)
+ << BinaryOperator::getOpcodeStr(Opc),
+ ParensRange);
+}
+
+/// \brief It accepts a '&&' expr that is inside a '||' one.
+/// Emit a diagnostic together with a fixit hint that wraps the '&&' expression
+/// in parentheses.
+static void
+EmitDiagnosticForLogicalAndInLogicalOr(Sema &Self, SourceLocation OpLoc,
+ BinaryOperator *Bop) {
+ assert(Bop->getOpcode() == BO_LAnd);
+ Self.Diag(Bop->getOperatorLoc(), diag::warn_logical_and_in_logical_or)
+ << Bop->getSourceRange() << OpLoc;
+ SuggestParentheses(Self, Bop->getOperatorLoc(),
+ Self.PDiag(diag::note_precedence_silence)
+ << Bop->getOpcodeStr(),
+ Bop->getSourceRange());
+}
+
+/// \brief Returns true if the given expression can be evaluated as a constant
+/// 'true'.
+static bool EvaluatesAsTrue(Sema &S, Expr *E) {
+ bool Res;
+ return !E->isValueDependent() &&
+ E->EvaluateAsBooleanCondition(Res, S.getASTContext()) && Res;
+}
+
+/// \brief Returns true if the given expression can be evaluated as a constant
+/// 'false'.
+static bool EvaluatesAsFalse(Sema &S, Expr *E) {
+ bool Res;
+ return !E->isValueDependent() &&
+ E->EvaluateAsBooleanCondition(Res, S.getASTContext()) && !Res;
+}
+
+/// \brief Look for '&&' in the left hand of a '||' expr.
+static void DiagnoseLogicalAndInLogicalOrLHS(Sema &S, SourceLocation OpLoc,
+ Expr *LHSExpr, Expr *RHSExpr) {
+ if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(LHSExpr)) {
+ if (Bop->getOpcode() == BO_LAnd) {
+ // If it's "a && b || 0" don't warn since the precedence doesn't matter.
+ if (EvaluatesAsFalse(S, RHSExpr))
+ return;
+ // If it's "1 && a || b" don't warn since the precedence doesn't matter.
+ if (!EvaluatesAsTrue(S, Bop->getLHS()))
+ return EmitDiagnosticForLogicalAndInLogicalOr(S, OpLoc, Bop);
+ } else if (Bop->getOpcode() == BO_LOr) {
+ if (BinaryOperator *RBop = dyn_cast<BinaryOperator>(Bop->getRHS())) {
+ // If it's "a || b && 1 || c" we didn't warn earlier for
+ // "a || b && 1", but warn now.
+ if (RBop->getOpcode() == BO_LAnd && EvaluatesAsTrue(S, RBop->getRHS()))
+ return EmitDiagnosticForLogicalAndInLogicalOr(S, OpLoc, RBop);
+ }
+ }
+ }
+}
+
+/// \brief Look for '&&' in the right hand of a '||' expr.
+static void DiagnoseLogicalAndInLogicalOrRHS(Sema &S, SourceLocation OpLoc,
+ Expr *LHSExpr, Expr *RHSExpr) {
+ if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(RHSExpr)) {
+ if (Bop->getOpcode() == BO_LAnd) {
+ // If it's "0 || a && b" don't warn since the precedence doesn't matter.
+ if (EvaluatesAsFalse(S, LHSExpr))
+ return;
+ // If it's "a || b && 1" don't warn since the precedence doesn't matter.
+ if (!EvaluatesAsTrue(S, Bop->getRHS()))
+ return EmitDiagnosticForLogicalAndInLogicalOr(S, OpLoc, Bop);
+ }
+ }
+}
+
+/// \brief Look for bitwise op in the left or right hand of a bitwise op with
+/// lower precedence and emit a diagnostic together with a fixit hint that wraps
+/// the '&' expression in parentheses.
+static void DiagnoseBitwiseOpInBitwiseOp(Sema &S, BinaryOperatorKind Opc,
+ SourceLocation OpLoc, Expr *SubExpr) {
+ if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(SubExpr)) {
+ if (Bop->isBitwiseOp() && Bop->getOpcode() < Opc) {
+ S.Diag(Bop->getOperatorLoc(), diag::warn_bitwise_op_in_bitwise_op)
+ << Bop->getOpcodeStr() << BinaryOperator::getOpcodeStr(Opc)
+ << Bop->getSourceRange() << OpLoc;
+ SuggestParentheses(S, Bop->getOperatorLoc(),
+ S.PDiag(diag::note_precedence_silence)
+ << Bop->getOpcodeStr(),
+ Bop->getSourceRange());
+ }
+ }
+}
+
+static void DiagnoseAdditionInShift(Sema &S, SourceLocation OpLoc,
+ Expr *SubExpr, StringRef Shift) {
+ if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(SubExpr)) {
+ if (Bop->getOpcode() == BO_Add || Bop->getOpcode() == BO_Sub) {
+ StringRef Op = Bop->getOpcodeStr();
+ S.Diag(Bop->getOperatorLoc(), diag::warn_addition_in_bitshift)
+ << Bop->getSourceRange() << OpLoc << Shift << Op;
+ SuggestParentheses(S, Bop->getOperatorLoc(),
+ S.PDiag(diag::note_precedence_silence) << Op,
+ Bop->getSourceRange());
+ }
+ }
+}
+
+static void DiagnoseShiftCompare(Sema &S, SourceLocation OpLoc,
+ Expr *LHSExpr, Expr *RHSExpr) {
+ CXXOperatorCallExpr *OCE = dyn_cast<CXXOperatorCallExpr>(LHSExpr);
+ if (!OCE)
+ return;
+
+ FunctionDecl *FD = OCE->getDirectCallee();
+ if (!FD || !FD->isOverloadedOperator())
+ return;
+
+ OverloadedOperatorKind Kind = FD->getOverloadedOperator();
+ if (Kind != OO_LessLess && Kind != OO_GreaterGreater)
+ return;
+
+ S.Diag(OpLoc, diag::warn_overloaded_shift_in_comparison)
+ << LHSExpr->getSourceRange() << RHSExpr->getSourceRange()
+ << (Kind == OO_LessLess);
+ SuggestParentheses(S, OCE->getOperatorLoc(),
+ S.PDiag(diag::note_precedence_silence)
+ << (Kind == OO_LessLess ? "<<" : ">>"),
+ OCE->getSourceRange());
+ SuggestParentheses(S, OpLoc,
+ S.PDiag(diag::note_evaluate_comparison_first),
+ SourceRange(OCE->getArg(1)->getLocStart(),
+ RHSExpr->getLocEnd()));
+}
+
+/// DiagnoseBinOpPrecedence - Emit warnings for expressions with tricky
+/// precedence.
+static void DiagnoseBinOpPrecedence(Sema &Self, BinaryOperatorKind Opc,
+ SourceLocation OpLoc, Expr *LHSExpr,
+ Expr *RHSExpr){
+ // Diagnose "arg1 'bitwise' arg2 'eq' arg3".
+ if (BinaryOperator::isBitwiseOp(Opc))
+ DiagnoseBitwisePrecedence(Self, Opc, OpLoc, LHSExpr, RHSExpr);
+
+ // Diagnose "arg1 & arg2 | arg3"
+ if ((Opc == BO_Or || Opc == BO_Xor) &&
+ !OpLoc.isMacroID()/* Don't warn in macros. */) {
+ DiagnoseBitwiseOpInBitwiseOp(Self, Opc, OpLoc, LHSExpr);
+ DiagnoseBitwiseOpInBitwiseOp(Self, Opc, OpLoc, RHSExpr);
+ }
+
+ // Warn about arg1 || arg2 && arg3, as GCC 4.3+ does.
+ // We don't warn for 'assert(a || b && "bad")' since this is safe.
+ if (Opc == BO_LOr && !OpLoc.isMacroID()/* Don't warn in macros. */) {
+ DiagnoseLogicalAndInLogicalOrLHS(Self, OpLoc, LHSExpr, RHSExpr);
+ DiagnoseLogicalAndInLogicalOrRHS(Self, OpLoc, LHSExpr, RHSExpr);
+ }
+
+ if ((Opc == BO_Shl && LHSExpr->getType()->isIntegralType(Self.getASTContext()))
+ || Opc == BO_Shr) {
+ StringRef Shift = BinaryOperator::getOpcodeStr(Opc);
+ DiagnoseAdditionInShift(Self, OpLoc, LHSExpr, Shift);
+ DiagnoseAdditionInShift(Self, OpLoc, RHSExpr, Shift);
+ }
+
+ // Warn on overloaded shift operators and comparisons, such as:
+ // cout << 5 == 4;
+ if (BinaryOperator::isComparisonOp(Opc))
+ DiagnoseShiftCompare(Self, OpLoc, LHSExpr, RHSExpr);
+}
+
+// Binary Operators. 'Tok' is the token for the operator.
+ExprResult Sema::ActOnBinOp(Scope *S, SourceLocation TokLoc,
+ tok::TokenKind Kind,
+ Expr *LHSExpr, Expr *RHSExpr) {
+ BinaryOperatorKind Opc = ConvertTokenKindToBinaryOpcode(Kind);
+ assert(LHSExpr && "ActOnBinOp(): missing left expression");
+ assert(RHSExpr && "ActOnBinOp(): missing right expression");
+
+ // Emit warnings for tricky precedence issues, e.g. "bitfield & 0x4 == 0"
+ DiagnoseBinOpPrecedence(*this, Opc, TokLoc, LHSExpr, RHSExpr);
+
+ return BuildBinOp(S, TokLoc, Opc, LHSExpr, RHSExpr);
+}
+
+/// Build an overloaded binary operator expression in the given scope.
+static ExprResult BuildOverloadedBinOp(Sema &S, Scope *Sc, SourceLocation OpLoc,
+ BinaryOperatorKind Opc,
+ Expr *LHS, Expr *RHS) {
+ // Find all of the overloaded operators visible from this
+ // point. We perform both an operator-name lookup from the local
+ // scope and an argument-dependent lookup based on the types of
+ // the arguments.
+ UnresolvedSet<16> Functions;
+ OverloadedOperatorKind OverOp
+ = BinaryOperator::getOverloadedOperator(Opc);
+ if (Sc && OverOp != OO_None && OverOp != OO_Equal)
+ S.LookupOverloadedOperatorName(OverOp, Sc, LHS->getType(),
+ RHS->getType(), Functions);
+
+ // Build the (potentially-overloaded, potentially-dependent)
+ // binary operation.
+ return S.CreateOverloadedBinOp(OpLoc, Opc, Functions, LHS, RHS);
+}
+
+ExprResult Sema::BuildBinOp(Scope *S, SourceLocation OpLoc,
+ BinaryOperatorKind Opc,
+ Expr *LHSExpr, Expr *RHSExpr) {
+ // We want to end up calling one of checkPseudoObjectAssignment
+ // (if the LHS is a pseudo-object), BuildOverloadedBinOp (if
+ // both expressions are overloadable or either is type-dependent),
+ // or CreateBuiltinBinOp (in any other case). We also want to get
+ // any placeholder types out of the way.
+
+ // Handle pseudo-objects in the LHS.
+ if (const BuiltinType *pty = LHSExpr->getType()->getAsPlaceholderType()) {
+ // Assignments with a pseudo-object l-value need special analysis.
+ if (pty->getKind() == BuiltinType::PseudoObject &&
+ BinaryOperator::isAssignmentOp(Opc))
+ return checkPseudoObjectAssignment(S, OpLoc, Opc, LHSExpr, RHSExpr);
+
+ // Don't resolve overloads if the other type is overloadable.
+ if (pty->getKind() == BuiltinType::Overload) {
+ // We can't actually test that if we still have a placeholder,
+ // though. Fortunately, none of the exceptions we see in that
+ // code below are valid when the LHS is an overload set. Note
+ // that an overload set can be dependently-typed, but it never
+ // instantiates to having an overloadable type.
+ ExprResult resolvedRHS = CheckPlaceholderExpr(RHSExpr);
+ if (resolvedRHS.isInvalid()) return ExprError();
+ RHSExpr = resolvedRHS.get();
+
+ if (RHSExpr->isTypeDependent() ||
+ RHSExpr->getType()->isOverloadableType())
+ return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr);
+ }
+
+ ExprResult LHS = CheckPlaceholderExpr(LHSExpr);
+ if (LHS.isInvalid()) return ExprError();
+ LHSExpr = LHS.get();
+ }
+
+ // Handle pseudo-objects in the RHS.
+ if (const BuiltinType *pty = RHSExpr->getType()->getAsPlaceholderType()) {
+ // An overload in the RHS can potentially be resolved by the type
+ // being assigned to.
+ if (Opc == BO_Assign && pty->getKind() == BuiltinType::Overload) {
+ if (LHSExpr->isTypeDependent() || RHSExpr->isTypeDependent())
+ return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr);
+
+ if (LHSExpr->getType()->isOverloadableType())
+ return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr);
+
+ return CreateBuiltinBinOp(OpLoc, Opc, LHSExpr, RHSExpr);
+ }
+
+ // Don't resolve overloads if the other type is overloadable.
+ if (pty->getKind() == BuiltinType::Overload &&
+ LHSExpr->getType()->isOverloadableType())
+ return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr);
+
+ ExprResult resolvedRHS = CheckPlaceholderExpr(RHSExpr);
+ if (!resolvedRHS.isUsable()) return ExprError();
+ RHSExpr = resolvedRHS.get();
+ }
+
+ if (getLangOpts().CPlusPlus) {
+ // If either expression is type-dependent, always build an
+ // overloaded op.
+ if (LHSExpr->isTypeDependent() || RHSExpr->isTypeDependent())
+ return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr);
+
+ // Otherwise, build an overloaded op if either expression has an
+ // overloadable type.
+ if (LHSExpr->getType()->isOverloadableType() ||
+ RHSExpr->getType()->isOverloadableType())
+ return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr);
+ }
+
+ // Build a built-in binary operation.
+ return CreateBuiltinBinOp(OpLoc, Opc, LHSExpr, RHSExpr);
+}
+
+ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
+ UnaryOperatorKind Opc,
+ Expr *InputExpr) {
+ ExprResult Input = InputExpr;
+ ExprValueKind VK = VK_RValue;
+ ExprObjectKind OK = OK_Ordinary;
+ QualType resultType;
+ if (getLangOpts().OpenCL) {
+ // The only legal unary operation for atomics is '&'.
+ if (Opc != UO_AddrOf && InputExpr->getType()->isAtomicType()) {
+ return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
+ << InputExpr->getType()
+ << Input.get()->getSourceRange());
+ }
+ }
+ switch (Opc) {
+ case UO_PreInc:
+ case UO_PreDec:
+ case UO_PostInc:
+ case UO_PostDec:
+ resultType = CheckIncrementDecrementOperand(*this, Input.get(), VK, OK,
+ OpLoc,
+ Opc == UO_PreInc ||
+ Opc == UO_PostInc,
+ Opc == UO_PreInc ||
+ Opc == UO_PreDec);
+ break;
+ case UO_AddrOf:
+ resultType = CheckAddressOfOperand(Input, OpLoc);
+ RecordModifiableNonNullParam(*this, InputExpr);
+ break;
+ case UO_Deref: {
+ Input = DefaultFunctionArrayLvalueConversion(Input.get());
+ if (Input.isInvalid()) return ExprError();
+ resultType = CheckIndirectionOperand(*this, Input.get(), VK, OpLoc);
+ break;
+ }
+ case UO_Plus:
+ case UO_Minus:
+ Input = UsualUnaryConversions(Input.get());
+ if (Input.isInvalid()) return ExprError();
+ resultType = Input.get()->getType();
+ if (resultType->isDependentType())
+ break;
+ if (resultType->isArithmeticType()) // C99 6.5.3.3p1
+ break;
+ else if (resultType->isVectorType() &&
+ // The z vector extensions don't allow + or - with bool vectors.
+ (!Context.getLangOpts().ZVector ||
+ resultType->getAs<VectorType>()->getVectorKind() !=
+ VectorType::AltiVecBool))
+ break;
+ else if (getLangOpts().CPlusPlus && // C++ [expr.unary.op]p6
+ Opc == UO_Plus &&
+ resultType->isPointerType())
+ break;
+
+ return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
+ << resultType << Input.get()->getSourceRange());
+
+ case UO_Not: // bitwise complement
+ Input = UsualUnaryConversions(Input.get());
+ if (Input.isInvalid())
+ return ExprError();
+ resultType = Input.get()->getType();
+ if (resultType->isDependentType())
+ break;
+ // C99 6.5.3.3p1. We allow complex int and float as a GCC extension.
+ if (resultType->isComplexType() || resultType->isComplexIntegerType())
+ // C99 does not support '~' for complex conjugation.
+ Diag(OpLoc, diag::ext_integer_complement_complex)
+ << resultType << Input.get()->getSourceRange();
+ else if (resultType->hasIntegerRepresentation())
+ break;
+ else if (resultType->isExtVectorType()) {
+ if (Context.getLangOpts().OpenCL) {
+ // OpenCL v1.1 s6.3.f: The bitwise operator not (~) does not operate
+ // on vector float types.
+ QualType T = resultType->getAs<ExtVectorType>()->getElementType();
+ if (!T->isIntegerType())
+ return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
+ << resultType << Input.get()->getSourceRange());
+ }
+ break;
+ } else {
+ return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
+ << resultType << Input.get()->getSourceRange());
+ }
+ break;
+
+ case UO_LNot: // logical negation
+ // Unlike +/-/~, integer promotions aren't done here (C99 6.5.3.3p5).
+ Input = DefaultFunctionArrayLvalueConversion(Input.get());
+ if (Input.isInvalid()) return ExprError();
+ resultType = Input.get()->getType();
+
+ // Though we still have to promote half FP to float...
+ if (resultType->isHalfType() && !Context.getLangOpts().NativeHalfType) {
+ Input = ImpCastExprToType(Input.get(), Context.FloatTy, CK_FloatingCast).get();
+ resultType = Context.FloatTy;
+ }
+
+ if (resultType->isDependentType())
+ break;
+ if (resultType->isScalarType() && !isScopedEnumerationType(resultType)) {
+ // C99 6.5.3.3p1: ok, fallthrough;
+ if (Context.getLangOpts().CPlusPlus) {
+ // C++03 [expr.unary.op]p8, C++0x [expr.unary.op]p9:
+ // operand contextually converted to bool.
+ Input = ImpCastExprToType(Input.get(), Context.BoolTy,
+ ScalarTypeToBooleanCastKind(resultType));
+ } else if (Context.getLangOpts().OpenCL &&
+ Context.getLangOpts().OpenCLVersion < 120) {
+ // OpenCL v1.1 6.3.h: The logical operator not (!) does not
+ // operate on scalar float types.
+ if (!resultType->isIntegerType())
+ return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
+ << resultType << Input.get()->getSourceRange());
+ }
+ } else if (resultType->isExtVectorType()) {
+ if (Context.getLangOpts().OpenCL &&
+ Context.getLangOpts().OpenCLVersion < 120) {
+ // OpenCL v1.1 6.3.h: The logical operator not (!) does not
+ // operate on vector float types.
+ QualType T = resultType->getAs<ExtVectorType>()->getElementType();
+ if (!T->isIntegerType())
+ return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
+ << resultType << Input.get()->getSourceRange());
+ }
+ // Vector logical not returns the signed variant of the operand type.
+ resultType = GetSignedVectorType(resultType);
+ break;
+ } else {
+ return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
+ << resultType << Input.get()->getSourceRange());
+ }
+
+ // LNot always has type int. C99 6.5.3.3p5.
+ // In C++, it's bool. C++ 5.3.1p8
+ resultType = Context.getLogicalOperationType();
+ break;
+ case UO_Real:
+ case UO_Imag:
+ resultType = CheckRealImagOperand(*this, Input, OpLoc, Opc == UO_Real);
+ // _Real maps ordinary l-values into ordinary l-values. _Imag maps ordinary
+ // complex l-values to ordinary l-values and all other values to r-values.
+ if (Input.isInvalid()) return ExprError();
+ if (Opc == UO_Real || Input.get()->getType()->isAnyComplexType()) {
+ if (Input.get()->getValueKind() != VK_RValue &&
+ Input.get()->getObjectKind() == OK_Ordinary)
+ VK = Input.get()->getValueKind();
+ } else if (!getLangOpts().CPlusPlus) {
+ // In C, a volatile scalar is read by __imag. In C++, it is not.
+ Input = DefaultLvalueConversion(Input.get());
+ }
+ break;
+ case UO_Extension:
+ case UO_Coawait:
+ resultType = Input.get()->getType();
+ VK = Input.get()->getValueKind();
+ OK = Input.get()->getObjectKind();
+ break;
+ }
+ if (resultType.isNull() || Input.isInvalid())
+ return ExprError();
+
+ // Check for array bounds violations in the operand of the UnaryOperator,
+ // except for the '*' and '&' operators that have to be handled specially
+ // by CheckArrayAccess (as there are special cases like &array[arraysize]
+ // that are explicitly defined as valid by the standard).
+ if (Opc != UO_AddrOf && Opc != UO_Deref)
+ CheckArrayAccess(Input.get());
+
+ return new (Context)
+ UnaryOperator(Input.get(), Opc, resultType, VK, OK, OpLoc);
+}
+
+/// \brief Determine whether the given expression is a qualified member
+/// access expression, of a form that could be turned into a pointer to member
+/// with the address-of operator.
+static bool isQualifiedMemberAccess(Expr *E) {
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ if (!DRE->getQualifier())
+ return false;
+
+ ValueDecl *VD = DRE->getDecl();
+ if (!VD->isCXXClassMember())
+ return false;
+
+ if (isa<FieldDecl>(VD) || isa<IndirectFieldDecl>(VD))
+ return true;
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(VD))
+ return Method->isInstance();
+
+ return false;
+ }
+
+ if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(E)) {
+ if (!ULE->getQualifier())
+ return false;
+
+ for (NamedDecl *D : ULE->decls()) {
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ if (Method->isInstance())
+ return true;
+ } else {
+ // Overload set does not contain methods.
+ break;
+ }
+ }
+
+ return false;
+ }
+
+ return false;
+}
+
+ExprResult Sema::BuildUnaryOp(Scope *S, SourceLocation OpLoc,
+ UnaryOperatorKind Opc, Expr *Input) {
+ // First things first: handle placeholders so that the
+ // overloaded-operator check considers the right type.
+ if (const BuiltinType *pty = Input->getType()->getAsPlaceholderType()) {
+ // Increment and decrement of pseudo-object references.
+ if (pty->getKind() == BuiltinType::PseudoObject &&
+ UnaryOperator::isIncrementDecrementOp(Opc))
+ return checkPseudoObjectIncDec(S, OpLoc, Opc, Input);
+
+ // extension is always a builtin operator.
+ if (Opc == UO_Extension)
+ return CreateBuiltinUnaryOp(OpLoc, Opc, Input);
+
+ // & gets special logic for several kinds of placeholder.
+ // The builtin code knows what to do.
+ if (Opc == UO_AddrOf &&
+ (pty->getKind() == BuiltinType::Overload ||
+ pty->getKind() == BuiltinType::UnknownAny ||
+ pty->getKind() == BuiltinType::BoundMember))
+ return CreateBuiltinUnaryOp(OpLoc, Opc, Input);
+
+ // Anything else needs to be handled now.
+ ExprResult Result = CheckPlaceholderExpr(Input);
+ if (Result.isInvalid()) return ExprError();
+ Input = Result.get();
+ }
+
+ if (getLangOpts().CPlusPlus && Input->getType()->isOverloadableType() &&
+ UnaryOperator::getOverloadedOperator(Opc) != OO_None &&
+ !(Opc == UO_AddrOf && isQualifiedMemberAccess(Input))) {
+ // Find all of the overloaded operators visible from this
+ // point. We perform both an operator-name lookup from the local
+ // scope and an argument-dependent lookup based on the types of
+ // the arguments.
+ UnresolvedSet<16> Functions;
+ OverloadedOperatorKind OverOp = UnaryOperator::getOverloadedOperator(Opc);
+ if (S && OverOp != OO_None)
+ LookupOverloadedOperatorName(OverOp, S, Input->getType(), QualType(),
+ Functions);
+
+ return CreateOverloadedUnaryOp(OpLoc, Opc, Functions, Input);
+ }
+
+ return CreateBuiltinUnaryOp(OpLoc, Opc, Input);
+}
+
+// Unary Operators. 'Tok' is the token for the operator.
+ExprResult Sema::ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
+ tok::TokenKind Op, Expr *Input) {
+ return BuildUnaryOp(S, OpLoc, ConvertTokenKindToUnaryOpcode(Op), Input);
+}
+
+/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
+ExprResult Sema::ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
+ LabelDecl *TheDecl) {
+ TheDecl->markUsed(Context);
+ // Create the AST node. The address of a label always has type 'void*'.
+ return new (Context) AddrLabelExpr(OpLoc, LabLoc, TheDecl,
+ Context.getPointerType(Context.VoidTy));
+}
+
+/// Given the last statement in a statement-expression, check whether
+/// the result is a producing expression (like a call to an
+/// ns_returns_retained function) and, if so, rebuild it to hoist the
+/// release out of the full-expression. Otherwise, return null.
+/// Cannot fail.
+static Expr *maybeRebuildARCConsumingStmt(Stmt *Statement) {
+ // Should always be wrapped with one of these.
+ ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(Statement);
+ if (!cleanups) return nullptr;
+
+ ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(cleanups->getSubExpr());
+ if (!cast || cast->getCastKind() != CK_ARCConsumeObject)
+ return nullptr;
+
+ // Splice out the cast. This shouldn't modify any interesting
+ // features of the statement.
+ Expr *producer = cast->getSubExpr();
+ assert(producer->getType() == cast->getType());
+ assert(producer->getValueKind() == cast->getValueKind());
+ cleanups->setSubExpr(producer);
+ return cleanups;
+}
+
+void Sema::ActOnStartStmtExpr() {
+ PushExpressionEvaluationContext(ExprEvalContexts.back().Context);
+}
+
+void Sema::ActOnStmtExprError() {
+ // Note that function is also called by TreeTransform when leaving a
+ // StmtExpr scope without rebuilding anything.
+
+ DiscardCleanupsInEvaluationContext();
+ PopExpressionEvaluationContext();
+}
+
+ExprResult
+Sema::ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
+ SourceLocation RPLoc) { // "({..})"
+ assert(SubStmt && isa<CompoundStmt>(SubStmt) && "Invalid action invocation!");
+ CompoundStmt *Compound = cast<CompoundStmt>(SubStmt);
+
+ if (hasAnyUnrecoverableErrorsInThisFunction())
+ DiscardCleanupsInEvaluationContext();
+ assert(!ExprNeedsCleanups && "cleanups within StmtExpr not correctly bound!");
+ PopExpressionEvaluationContext();
+
+ // FIXME: there are a variety of strange constraints to enforce here, for
+ // example, it is not possible to goto into a stmt expression apparently.
+ // More semantic analysis is needed.
+
+ // If there are sub-stmts in the compound stmt, take the type of the last one
+ // as the type of the stmtexpr.
+ QualType Ty = Context.VoidTy;
+ bool StmtExprMayBindToTemp = false;
+ if (!Compound->body_empty()) {
+ Stmt *LastStmt = Compound->body_back();
+ LabelStmt *LastLabelStmt = nullptr;
+ // If LastStmt is a label, skip down through into the body.
+ while (LabelStmt *Label = dyn_cast<LabelStmt>(LastStmt)) {
+ LastLabelStmt = Label;
+ LastStmt = Label->getSubStmt();
+ }
+
+ if (Expr *LastE = dyn_cast<Expr>(LastStmt)) {
+ // Do function/array conversion on the last expression, but not
+ // lvalue-to-rvalue. However, initialize an unqualified type.
+ ExprResult LastExpr = DefaultFunctionArrayConversion(LastE);
+ if (LastExpr.isInvalid())
+ return ExprError();
+ Ty = LastExpr.get()->getType().getUnqualifiedType();
+
+ if (!Ty->isDependentType() && !LastExpr.get()->isTypeDependent()) {
+ // In ARC, if the final expression ends in a consume, splice
+ // the consume out and bind it later. In the alternate case
+ // (when dealing with a retainable type), the result
+ // initialization will create a produce. In both cases the
+ // result will be +1, and we'll need to balance that out with
+ // a bind.
+ if (Expr *rebuiltLastStmt
+ = maybeRebuildARCConsumingStmt(LastExpr.get())) {
+ LastExpr = rebuiltLastStmt;
+ } else {
+ LastExpr = PerformCopyInitialization(
+ InitializedEntity::InitializeResult(LPLoc,
+ Ty,
+ false),
+ SourceLocation(),
+ LastExpr);
+ }
+
+ if (LastExpr.isInvalid())
+ return ExprError();
+ if (LastExpr.get() != nullptr) {
+ if (!LastLabelStmt)
+ Compound->setLastStmt(LastExpr.get());
+ else
+ LastLabelStmt->setSubStmt(LastExpr.get());
+ StmtExprMayBindToTemp = true;
+ }
+ }
+ }
+ }
+
+ // FIXME: Check that expression type is complete/non-abstract; statement
+ // expressions are not lvalues.
+ Expr *ResStmtExpr = new (Context) StmtExpr(Compound, Ty, LPLoc, RPLoc);
+ if (StmtExprMayBindToTemp)
+ return MaybeBindToTemporary(ResStmtExpr);
+ return ResStmtExpr;
+}
+
+ExprResult Sema::BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
+ TypeSourceInfo *TInfo,
+ ArrayRef<OffsetOfComponent> Components,
+ SourceLocation RParenLoc) {
+ QualType ArgTy = TInfo->getType();
+ bool Dependent = ArgTy->isDependentType();
+ SourceRange TypeRange = TInfo->getTypeLoc().getLocalSourceRange();
+
+ // We must have at least one component that refers to the type, and the first
+ // one is known to be a field designator. Verify that the ArgTy represents
+ // a struct/union/class.
+ if (!Dependent && !ArgTy->isRecordType())
+ return ExprError(Diag(BuiltinLoc, diag::err_offsetof_record_type)
+ << ArgTy << TypeRange);
+
+ // Type must be complete per C99 7.17p3 because a declaring a variable
+ // with an incomplete type would be ill-formed.
+ if (!Dependent
+ && RequireCompleteType(BuiltinLoc, ArgTy,
+ diag::err_offsetof_incomplete_type, TypeRange))
+ return ExprError();
+
+ // offsetof with non-identifier designators (e.g. "offsetof(x, a.b[c])") are a
+ // GCC extension, diagnose them.
+ // FIXME: This diagnostic isn't actually visible because the location is in
+ // a system header!
+ if (Components.size() != 1)
+ Diag(BuiltinLoc, diag::ext_offsetof_extended_field_designator)
+ << SourceRange(Components[1].LocStart, Components.back().LocEnd);
+
+ bool DidWarnAboutNonPOD = false;
+ QualType CurrentType = ArgTy;
+ SmallVector<OffsetOfNode, 4> Comps;
+ SmallVector<Expr*, 4> Exprs;
+ for (const OffsetOfComponent &OC : Components) {
+ if (OC.isBrackets) {
+ // Offset of an array sub-field. TODO: Should we allow vector elements?
+ if (!CurrentType->isDependentType()) {
+ const ArrayType *AT = Context.getAsArrayType(CurrentType);
+ if(!AT)
+ return ExprError(Diag(OC.LocEnd, diag::err_offsetof_array_type)
+ << CurrentType);
+ CurrentType = AT->getElementType();
+ } else
+ CurrentType = Context.DependentTy;
+
+ ExprResult IdxRval = DefaultLvalueConversion(static_cast<Expr*>(OC.U.E));
+ if (IdxRval.isInvalid())
+ return ExprError();
+ Expr *Idx = IdxRval.get();
+
+ // The expression must be an integral expression.
+ // FIXME: An integral constant expression?
+ if (!Idx->isTypeDependent() && !Idx->isValueDependent() &&
+ !Idx->getType()->isIntegerType())
+ return ExprError(Diag(Idx->getLocStart(),
+ diag::err_typecheck_subscript_not_integer)
+ << Idx->getSourceRange());
+
+ // Record this array index.
+ Comps.push_back(OffsetOfNode(OC.LocStart, Exprs.size(), OC.LocEnd));
+ Exprs.push_back(Idx);
+ continue;
+ }
+
+ // Offset of a field.
+ if (CurrentType->isDependentType()) {
+ // We have the offset of a field, but we can't look into the dependent
+ // type. Just record the identifier of the field.
+ Comps.push_back(OffsetOfNode(OC.LocStart, OC.U.IdentInfo, OC.LocEnd));
+ CurrentType = Context.DependentTy;
+ continue;
+ }
+
+ // We need to have a complete type to look into.
+ if (RequireCompleteType(OC.LocStart, CurrentType,
+ diag::err_offsetof_incomplete_type))
+ return ExprError();
+
+ // Look for the designated field.
+ const RecordType *RC = CurrentType->getAs<RecordType>();
+ if (!RC)
+ return ExprError(Diag(OC.LocEnd, diag::err_offsetof_record_type)
+ << CurrentType);
+ RecordDecl *RD = RC->getDecl();
+
+ // C++ [lib.support.types]p5:
+ // The macro offsetof accepts a restricted set of type arguments in this
+ // International Standard. type shall be a POD structure or a POD union
+ // (clause 9).
+ // C++11 [support.types]p4:
+ // If type is not a standard-layout class (Clause 9), the results are
+ // undefined.
+ if (CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
+ bool IsSafe = LangOpts.CPlusPlus11? CRD->isStandardLayout() : CRD->isPOD();
+ unsigned DiagID =
+ LangOpts.CPlusPlus11? diag::ext_offsetof_non_standardlayout_type
+ : diag::ext_offsetof_non_pod_type;
+
+ if (!IsSafe && !DidWarnAboutNonPOD &&
+ DiagRuntimeBehavior(BuiltinLoc, nullptr,
+ PDiag(DiagID)
+ << SourceRange(Components[0].LocStart, OC.LocEnd)
+ << CurrentType))
+ DidWarnAboutNonPOD = true;
+ }
+
+ // Look for the field.
+ LookupResult R(*this, OC.U.IdentInfo, OC.LocStart, LookupMemberName);
+ LookupQualifiedName(R, RD);
+ FieldDecl *MemberDecl = R.getAsSingle<FieldDecl>();
+ IndirectFieldDecl *IndirectMemberDecl = nullptr;
+ if (!MemberDecl) {
+ if ((IndirectMemberDecl = R.getAsSingle<IndirectFieldDecl>()))
+ MemberDecl = IndirectMemberDecl->getAnonField();
+ }
+
+ if (!MemberDecl)
+ return ExprError(Diag(BuiltinLoc, diag::err_no_member)
+ << OC.U.IdentInfo << RD << SourceRange(OC.LocStart,
+ OC.LocEnd));
+
+ // C99 7.17p3:
+ // (If the specified member is a bit-field, the behavior is undefined.)
+ //
+ // We diagnose this as an error.
+ if (MemberDecl->isBitField()) {
+ Diag(OC.LocEnd, diag::err_offsetof_bitfield)
+ << MemberDecl->getDeclName()
+ << SourceRange(BuiltinLoc, RParenLoc);
+ Diag(MemberDecl->getLocation(), diag::note_bitfield_decl);
+ return ExprError();
+ }
+
+ RecordDecl *Parent = MemberDecl->getParent();
+ if (IndirectMemberDecl)
+ Parent = cast<RecordDecl>(IndirectMemberDecl->getDeclContext());
+
+ // If the member was found in a base class, introduce OffsetOfNodes for
+ // the base class indirections.
+ CXXBasePaths Paths;
+ if (IsDerivedFrom(OC.LocStart, CurrentType, Context.getTypeDeclType(Parent),
+ Paths)) {
+ if (Paths.getDetectedVirtual()) {
+ Diag(OC.LocEnd, diag::err_offsetof_field_of_virtual_base)
+ << MemberDecl->getDeclName()
+ << SourceRange(BuiltinLoc, RParenLoc);
+ return ExprError();
+ }
+
+ CXXBasePath &Path = Paths.front();
+ for (const CXXBasePathElement &B : Path)
+ Comps.push_back(OffsetOfNode(B.Base));
+ }
+
+ if (IndirectMemberDecl) {
+ for (auto *FI : IndirectMemberDecl->chain()) {
+ assert(isa<FieldDecl>(FI));
+ Comps.push_back(OffsetOfNode(OC.LocStart,
+ cast<FieldDecl>(FI), OC.LocEnd));
+ }
+ } else
+ Comps.push_back(OffsetOfNode(OC.LocStart, MemberDecl, OC.LocEnd));
+
+ CurrentType = MemberDecl->getType().getNonReferenceType();
+ }
+
+ return OffsetOfExpr::Create(Context, Context.getSizeType(), BuiltinLoc, TInfo,
+ Comps, Exprs, RParenLoc);
+}
+
+ExprResult Sema::ActOnBuiltinOffsetOf(Scope *S,
+ SourceLocation BuiltinLoc,
+ SourceLocation TypeLoc,
+ ParsedType ParsedArgTy,
+ ArrayRef<OffsetOfComponent> Components,
+ SourceLocation RParenLoc) {
+
+ TypeSourceInfo *ArgTInfo;
+ QualType ArgTy = GetTypeFromParser(ParsedArgTy, &ArgTInfo);
+ if (ArgTy.isNull())
+ return ExprError();
+
+ if (!ArgTInfo)
+ ArgTInfo = Context.getTrivialTypeSourceInfo(ArgTy, TypeLoc);
+
+ return BuildBuiltinOffsetOf(BuiltinLoc, ArgTInfo, Components, RParenLoc);
+}
+
+
+ExprResult Sema::ActOnChooseExpr(SourceLocation BuiltinLoc,
+ Expr *CondExpr,
+ Expr *LHSExpr, Expr *RHSExpr,
+ SourceLocation RPLoc) {
+ assert((CondExpr && LHSExpr && RHSExpr) && "Missing type argument(s)");
+
+ ExprValueKind VK = VK_RValue;
+ ExprObjectKind OK = OK_Ordinary;
+ QualType resType;
+ bool ValueDependent = false;
+ bool CondIsTrue = false;
+ if (CondExpr->isTypeDependent() || CondExpr->isValueDependent()) {
+ resType = Context.DependentTy;
+ ValueDependent = true;
+ } else {
+ // The conditional expression is required to be a constant expression.
+ llvm::APSInt condEval(32);
+ ExprResult CondICE
+ = VerifyIntegerConstantExpression(CondExpr, &condEval,
+ diag::err_typecheck_choose_expr_requires_constant, false);
+ if (CondICE.isInvalid())
+ return ExprError();
+ CondExpr = CondICE.get();
+ CondIsTrue = condEval.getZExtValue();
+
+ // If the condition is > zero, then the AST type is the same as the LSHExpr.
+ Expr *ActiveExpr = CondIsTrue ? LHSExpr : RHSExpr;
+
+ resType = ActiveExpr->getType();
+ ValueDependent = ActiveExpr->isValueDependent();
+ VK = ActiveExpr->getValueKind();
+ OK = ActiveExpr->getObjectKind();
+ }
+
+ return new (Context)
+ ChooseExpr(BuiltinLoc, CondExpr, LHSExpr, RHSExpr, resType, VK, OK, RPLoc,
+ CondIsTrue, resType->isDependentType(), ValueDependent);
+}
+
+//===----------------------------------------------------------------------===//
+// Clang Extensions.
+//===----------------------------------------------------------------------===//
+
+/// ActOnBlockStart - This callback is invoked when a block literal is started.
+void Sema::ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope) {
+ BlockDecl *Block = BlockDecl::Create(Context, CurContext, CaretLoc);
+
+ if (LangOpts.CPlusPlus) {
+ Decl *ManglingContextDecl;
+ if (MangleNumberingContext *MCtx =
+ getCurrentMangleNumberContext(Block->getDeclContext(),
+ ManglingContextDecl)) {
+ unsigned ManglingNumber = MCtx->getManglingNumber(Block);
+ Block->setBlockMangling(ManglingNumber, ManglingContextDecl);
+ }
+ }
+
+ PushBlockScope(CurScope, Block);
+ CurContext->addDecl(Block);
+ if (CurScope)
+ PushDeclContext(CurScope, Block);
+ else
+ CurContext = Block;
+
+ getCurBlock()->HasImplicitReturnType = true;
+
+ // Enter a new evaluation context to insulate the block from any
+ // cleanups from the enclosing full-expression.
+ PushExpressionEvaluationContext(PotentiallyEvaluated);
+}
+
+void Sema::ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
+ Scope *CurScope) {
+ assert(ParamInfo.getIdentifier() == nullptr &&
+ "block-id should have no identifier!");
+ assert(ParamInfo.getContext() == Declarator::BlockLiteralContext);
+ BlockScopeInfo *CurBlock = getCurBlock();
+
+ TypeSourceInfo *Sig = GetTypeForDeclarator(ParamInfo, CurScope);
+ QualType T = Sig->getType();
+
+ // FIXME: We should allow unexpanded parameter packs here, but that would,
+ // in turn, make the block expression contain unexpanded parameter packs.
+ if (DiagnoseUnexpandedParameterPack(CaretLoc, Sig, UPPC_Block)) {
+ // Drop the parameters.
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.HasTrailingReturn = false;
+ EPI.TypeQuals |= DeclSpec::TQ_const;
+ T = Context.getFunctionType(Context.DependentTy, None, EPI);
+ Sig = Context.getTrivialTypeSourceInfo(T);
+ }
+
+ // GetTypeForDeclarator always produces a function type for a block
+ // literal signature. Furthermore, it is always a FunctionProtoType
+ // unless the function was written with a typedef.
+ assert(T->isFunctionType() &&
+ "GetTypeForDeclarator made a non-function block signature");
+
+ // Look for an explicit signature in that function type.
+ FunctionProtoTypeLoc ExplicitSignature;
+
+ TypeLoc tmp = Sig->getTypeLoc().IgnoreParens();
+ if ((ExplicitSignature = tmp.getAs<FunctionProtoTypeLoc>())) {
+
+ // Check whether that explicit signature was synthesized by
+ // GetTypeForDeclarator. If so, don't save that as part of the
+ // written signature.
+ if (ExplicitSignature.getLocalRangeBegin() ==
+ ExplicitSignature.getLocalRangeEnd()) {
+ // This would be much cheaper if we stored TypeLocs instead of
+ // TypeSourceInfos.
+ TypeLoc Result = ExplicitSignature.getReturnLoc();
+ unsigned Size = Result.getFullDataSize();
+ Sig = Context.CreateTypeSourceInfo(Result.getType(), Size);
+ Sig->getTypeLoc().initializeFullCopy(Result, Size);
+
+ ExplicitSignature = FunctionProtoTypeLoc();
+ }
+ }
+
+ CurBlock->TheDecl->setSignatureAsWritten(Sig);
+ CurBlock->FunctionType = T;
+
+ const FunctionType *Fn = T->getAs<FunctionType>();
+ QualType RetTy = Fn->getReturnType();
+ bool isVariadic =
+ (isa<FunctionProtoType>(Fn) && cast<FunctionProtoType>(Fn)->isVariadic());
+
+ CurBlock->TheDecl->setIsVariadic(isVariadic);
+
+ // Context.DependentTy is used as a placeholder for a missing block
+ // return type. TODO: what should we do with declarators like:
+ // ^ * { ... }
+ // If the answer is "apply template argument deduction"....
+ if (RetTy != Context.DependentTy) {
+ CurBlock->ReturnType = RetTy;
+ CurBlock->TheDecl->setBlockMissingReturnType(false);
+ CurBlock->HasImplicitReturnType = false;
+ }
+
+ // Push block parameters from the declarator if we had them.
+ SmallVector<ParmVarDecl*, 8> Params;
+ if (ExplicitSignature) {
+ for (unsigned I = 0, E = ExplicitSignature.getNumParams(); I != E; ++I) {
+ ParmVarDecl *Param = ExplicitSignature.getParam(I);
+ if (Param->getIdentifier() == nullptr &&
+ !Param->isImplicit() &&
+ !Param->isInvalidDecl() &&
+ !getLangOpts().CPlusPlus)
+ Diag(Param->getLocation(), diag::err_parameter_name_omitted);
+ Params.push_back(Param);
+ }
+
+ // Fake up parameter variables if we have a typedef, like
+ // ^ fntype { ... }
+ } else if (const FunctionProtoType *Fn = T->getAs<FunctionProtoType>()) {
+ for (const auto &I : Fn->param_types()) {
+ ParmVarDecl *Param = BuildParmVarDeclForTypedef(
+ CurBlock->TheDecl, ParamInfo.getLocStart(), I);
+ Params.push_back(Param);
+ }
+ }
+
+ // Set the parameters on the block decl.
+ if (!Params.empty()) {
+ CurBlock->TheDecl->setParams(Params);
+ CheckParmsForFunctionDef(CurBlock->TheDecl->param_begin(),
+ CurBlock->TheDecl->param_end(),
+ /*CheckParameterNames=*/false);
+ }
+
+ // Finally we can process decl attributes.
+ ProcessDeclAttributes(CurScope, CurBlock->TheDecl, ParamInfo);
+
+ // Put the parameter variables in scope.
+ for (auto AI : CurBlock->TheDecl->params()) {
+ AI->setOwningFunction(CurBlock->TheDecl);
+
+ // If this has an identifier, add it to the scope stack.
+ if (AI->getIdentifier()) {
+ CheckShadow(CurBlock->TheScope, AI);
+
+ PushOnScopeChains(AI, CurBlock->TheScope);
+ }
+ }
+}
+
+/// ActOnBlockError - If there is an error parsing a block, this callback
+/// is invoked to pop the information about the block from the action impl.
+void Sema::ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope) {
+ // Leave the expression-evaluation context.
+ DiscardCleanupsInEvaluationContext();
+ PopExpressionEvaluationContext();
+
+ // Pop off CurBlock, handle nested blocks.
+ PopDeclContext();
+ PopFunctionScopeInfo();
+}
+
+/// ActOnBlockStmtExpr - This is called when the body of a block statement
+/// literal was successfully completed. ^(int x){...}
+ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
+ Stmt *Body, Scope *CurScope) {
+ // If blocks are disabled, emit an error.
+ if (!LangOpts.Blocks)
+ Diag(CaretLoc, diag::err_blocks_disable);
+
+ // Leave the expression-evaluation context.
+ if (hasAnyUnrecoverableErrorsInThisFunction())
+ DiscardCleanupsInEvaluationContext();
+ assert(!ExprNeedsCleanups && "cleanups within block not correctly bound!");
+ PopExpressionEvaluationContext();
+
+ BlockScopeInfo *BSI = cast<BlockScopeInfo>(FunctionScopes.back());
+
+ if (BSI->HasImplicitReturnType)
+ deduceClosureReturnType(*BSI);
+
+ PopDeclContext();
+
+ QualType RetTy = Context.VoidTy;
+ if (!BSI->ReturnType.isNull())
+ RetTy = BSI->ReturnType;
+
+ bool NoReturn = BSI->TheDecl->hasAttr<NoReturnAttr>();
+ QualType BlockTy;
+
+ // Set the captured variables on the block.
+ // FIXME: Share capture structure between BlockDecl and CapturingScopeInfo!
+ SmallVector<BlockDecl::Capture, 4> Captures;
+ for (CapturingScopeInfo::Capture &Cap : BSI->Captures) {
+ if (Cap.isThisCapture())
+ continue;
+ BlockDecl::Capture NewCap(Cap.getVariable(), Cap.isBlockCapture(),
+ Cap.isNested(), Cap.getInitExpr());
+ Captures.push_back(NewCap);
+ }
+ BSI->TheDecl->setCaptures(Context, Captures, BSI->CXXThisCaptureIndex != 0);
+
+ // If the user wrote a function type in some form, try to use that.
+ if (!BSI->FunctionType.isNull()) {
+ const FunctionType *FTy = BSI->FunctionType->getAs<FunctionType>();
+
+ FunctionType::ExtInfo Ext = FTy->getExtInfo();
+ if (NoReturn && !Ext.getNoReturn()) Ext = Ext.withNoReturn(true);
+
+ // Turn protoless block types into nullary block types.
+ if (isa<FunctionNoProtoType>(FTy)) {
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.ExtInfo = Ext;
+ BlockTy = Context.getFunctionType(RetTy, None, EPI);
+
+ // Otherwise, if we don't need to change anything about the function type,
+ // preserve its sugar structure.
+ } else if (FTy->getReturnType() == RetTy &&
+ (!NoReturn || FTy->getNoReturnAttr())) {
+ BlockTy = BSI->FunctionType;
+
+ // Otherwise, make the minimal modifications to the function type.
+ } else {
+ const FunctionProtoType *FPT = cast<FunctionProtoType>(FTy);
+ FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
+ EPI.TypeQuals = 0; // FIXME: silently?
+ EPI.ExtInfo = Ext;
+ BlockTy = Context.getFunctionType(RetTy, FPT->getParamTypes(), EPI);
+ }
+
+ // If we don't have a function type, just build one from nothing.
+ } else {
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.ExtInfo = FunctionType::ExtInfo().withNoReturn(NoReturn);
+ BlockTy = Context.getFunctionType(RetTy, None, EPI);
+ }
+
+ DiagnoseUnusedParameters(BSI->TheDecl->param_begin(),
+ BSI->TheDecl->param_end());
+ BlockTy = Context.getBlockPointerType(BlockTy);
+
+ // If needed, diagnose invalid gotos and switches in the block.
+ if (getCurFunction()->NeedsScopeChecking() &&
+ !PP.isCodeCompletionEnabled())
+ DiagnoseInvalidJumps(cast<CompoundStmt>(Body));
+
+ BSI->TheDecl->setBody(cast<CompoundStmt>(Body));
+
+ // Try to apply the named return value optimization. We have to check again
+ // if we can do this, though, because blocks keep return statements around
+ // to deduce an implicit return type.
+ if (getLangOpts().CPlusPlus && RetTy->isRecordType() &&
+ !BSI->TheDecl->isDependentContext())
+ computeNRVO(Body, BSI);
+
+ BlockExpr *Result = new (Context) BlockExpr(BSI->TheDecl, BlockTy);
+ AnalysisBasedWarnings::Policy WP = AnalysisWarnings.getDefaultPolicy();
+ PopFunctionScopeInfo(&WP, Result->getBlockDecl(), Result);
+
+ // If the block isn't obviously global, i.e. it captures anything at
+ // all, then we need to do a few things in the surrounding context:
+ if (Result->getBlockDecl()->hasCaptures()) {
+ // First, this expression has a new cleanup object.
+ ExprCleanupObjects.push_back(Result->getBlockDecl());
+ ExprNeedsCleanups = true;
+
+ // It also gets a branch-protected scope if any of the captured
+ // variables needs destruction.
+ for (const auto &CI : Result->getBlockDecl()->captures()) {
+ const VarDecl *var = CI.getVariable();
+ if (var->getType().isDestructedType() != QualType::DK_none) {
+ getCurFunction()->setHasBranchProtectedScope();
+ break;
+ }
+ }
+ }
+
+ return Result;
+}
+
+ExprResult Sema::ActOnVAArg(SourceLocation BuiltinLoc,
+ Expr *E, ParsedType Ty,
+ SourceLocation RPLoc) {
+ TypeSourceInfo *TInfo;
+ GetTypeFromParser(Ty, &TInfo);
+ return BuildVAArgExpr(BuiltinLoc, E, TInfo, RPLoc);
+}
+
+ExprResult Sema::BuildVAArgExpr(SourceLocation BuiltinLoc,
+ Expr *E, TypeSourceInfo *TInfo,
+ SourceLocation RPLoc) {
+ Expr *OrigExpr = E;
+ bool IsMS = false;
+
+ // It might be a __builtin_ms_va_list. (But don't ever mark a va_arg()
+ // as Microsoft ABI on an actual Microsoft platform, where
+ // __builtin_ms_va_list and __builtin_va_list are the same.)
+ if (!E->isTypeDependent() && Context.getTargetInfo().hasBuiltinMSVaList() &&
+ Context.getTargetInfo().getBuiltinVaListKind() != TargetInfo::CharPtrBuiltinVaList) {
+ QualType MSVaListType = Context.getBuiltinMSVaListType();
+ if (Context.hasSameType(MSVaListType, E->getType())) {
+ if (CheckForModifiableLvalue(E, BuiltinLoc, *this))
+ return ExprError();
+ IsMS = true;
+ }
+ }
+
+ // Get the va_list type
+ QualType VaListType = Context.getBuiltinVaListType();
+ if (!IsMS) {
+ if (VaListType->isArrayType()) {
+ // Deal with implicit array decay; for example, on x86-64,
+ // va_list is an array, but it's supposed to decay to
+ // a pointer for va_arg.
+ VaListType = Context.getArrayDecayedType(VaListType);
+ // Make sure the input expression also decays appropriately.
+ ExprResult Result = UsualUnaryConversions(E);
+ if (Result.isInvalid())
+ return ExprError();
+ E = Result.get();
+ } else if (VaListType->isRecordType() && getLangOpts().CPlusPlus) {
+ // If va_list is a record type and we are compiling in C++ mode,
+ // check the argument using reference binding.
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(
+ Context, Context.getLValueReferenceType(VaListType), false);
+ ExprResult Init = PerformCopyInitialization(Entity, SourceLocation(), E);
+ if (Init.isInvalid())
+ return ExprError();
+ E = Init.getAs<Expr>();
+ } else {
+ // Otherwise, the va_list argument must be an l-value because
+ // it is modified by va_arg.
+ if (!E->isTypeDependent() &&
+ CheckForModifiableLvalue(E, BuiltinLoc, *this))
+ return ExprError();
+ }
+ }
+
+ if (!IsMS && !E->isTypeDependent() &&
+ !Context.hasSameType(VaListType, E->getType()))
+ return ExprError(Diag(E->getLocStart(),
+ diag::err_first_argument_to_va_arg_not_of_type_va_list)
+ << OrigExpr->getType() << E->getSourceRange());
+
+ if (!TInfo->getType()->isDependentType()) {
+ if (RequireCompleteType(TInfo->getTypeLoc().getBeginLoc(), TInfo->getType(),
+ diag::err_second_parameter_to_va_arg_incomplete,
+ TInfo->getTypeLoc()))
+ return ExprError();
+
+ if (RequireNonAbstractType(TInfo->getTypeLoc().getBeginLoc(),
+ TInfo->getType(),
+ diag::err_second_parameter_to_va_arg_abstract,
+ TInfo->getTypeLoc()))
+ return ExprError();
+
+ if (!TInfo->getType().isPODType(Context)) {
+ Diag(TInfo->getTypeLoc().getBeginLoc(),
+ TInfo->getType()->isObjCLifetimeType()
+ ? diag::warn_second_parameter_to_va_arg_ownership_qualified
+ : diag::warn_second_parameter_to_va_arg_not_pod)
+ << TInfo->getType()
+ << TInfo->getTypeLoc().getSourceRange();
+ }
+
+ // Check for va_arg where arguments of the given type will be promoted
+ // (i.e. this va_arg is guaranteed to have undefined behavior).
+ QualType PromoteType;
+ if (TInfo->getType()->isPromotableIntegerType()) {
+ PromoteType = Context.getPromotedIntegerType(TInfo->getType());
+ if (Context.typesAreCompatible(PromoteType, TInfo->getType()))
+ PromoteType = QualType();
+ }
+ if (TInfo->getType()->isSpecificBuiltinType(BuiltinType::Float))
+ PromoteType = Context.DoubleTy;
+ if (!PromoteType.isNull())
+ DiagRuntimeBehavior(TInfo->getTypeLoc().getBeginLoc(), E,
+ PDiag(diag::warn_second_parameter_to_va_arg_never_compatible)
+ << TInfo->getType()
+ << PromoteType
+ << TInfo->getTypeLoc().getSourceRange());
+ }
+
+ QualType T = TInfo->getType().getNonLValueExprType(Context);
+ return new (Context) VAArgExpr(BuiltinLoc, E, TInfo, RPLoc, T, IsMS);
+}
+
+ExprResult Sema::ActOnGNUNullExpr(SourceLocation TokenLoc) {
+ // The type of __null will be int or long, depending on the size of
+ // pointers on the target.
+ QualType Ty;
+ unsigned pw = Context.getTargetInfo().getPointerWidth(0);
+ if (pw == Context.getTargetInfo().getIntWidth())
+ Ty = Context.IntTy;
+ else if (pw == Context.getTargetInfo().getLongWidth())
+ Ty = Context.LongTy;
+ else if (pw == Context.getTargetInfo().getLongLongWidth())
+ Ty = Context.LongLongTy;
+ else {
+ llvm_unreachable("I don't know size of pointer!");
+ }
+
+ return new (Context) GNUNullExpr(Ty, TokenLoc);
+}
+
+bool
+Sema::ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&Exp) {
+ if (!getLangOpts().ObjC1)
+ return false;
+
+ const ObjCObjectPointerType *PT = DstType->getAs<ObjCObjectPointerType>();
+ if (!PT)
+ return false;
+
+ if (!PT->isObjCIdType()) {
+ // Check if the destination is the 'NSString' interface.
+ const ObjCInterfaceDecl *ID = PT->getInterfaceDecl();
+ if (!ID || !ID->getIdentifier()->isStr("NSString"))
+ return false;
+ }
+
+ // Ignore any parens, implicit casts (should only be
+ // array-to-pointer decays), and not-so-opaque values. The last is
+ // important for making this trigger for property assignments.
+ Expr *SrcExpr = Exp->IgnoreParenImpCasts();
+ if (OpaqueValueExpr *OV = dyn_cast<OpaqueValueExpr>(SrcExpr))
+ if (OV->getSourceExpr())
+ SrcExpr = OV->getSourceExpr()->IgnoreParenImpCasts();
+
+ StringLiteral *SL = dyn_cast<StringLiteral>(SrcExpr);
+ if (!SL || !SL->isAscii())
+ return false;
+ Diag(SL->getLocStart(), diag::err_missing_atsign_prefix)
+ << FixItHint::CreateInsertion(SL->getLocStart(), "@");
+ Exp = BuildObjCStringLiteral(SL->getLocStart(), SL).get();
+ return true;
+}
+
+static bool maybeDiagnoseAssignmentToFunction(Sema &S, QualType DstType,
+ const Expr *SrcExpr) {
+ if (!DstType->isFunctionPointerType() ||
+ !SrcExpr->getType()->isFunctionType())
+ return false;
+
+ auto *DRE = dyn_cast<DeclRefExpr>(SrcExpr->IgnoreParenImpCasts());
+ if (!DRE)
+ return false;
+
+ auto *FD = dyn_cast<FunctionDecl>(DRE->getDecl());
+ if (!FD)
+ return false;
+
+ return !S.checkAddressOfFunctionIsAvailable(FD,
+ /*Complain=*/true,
+ SrcExpr->getLocStart());
+}
+
+bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
+ SourceLocation Loc,
+ QualType DstType, QualType SrcType,
+ Expr *SrcExpr, AssignmentAction Action,
+ bool *Complained) {
+ if (Complained)
+ *Complained = false;
+
+ // Decode the result (notice that AST's are still created for extensions).
+ bool CheckInferredResultType = false;
+ bool isInvalid = false;
+ unsigned DiagKind = 0;
+ FixItHint Hint;
+ ConversionFixItGenerator ConvHints;
+ bool MayHaveConvFixit = false;
+ bool MayHaveFunctionDiff = false;
+ const ObjCInterfaceDecl *IFace = nullptr;
+ const ObjCProtocolDecl *PDecl = nullptr;
+
+ switch (ConvTy) {
+ case Compatible:
+ DiagnoseAssignmentEnum(DstType, SrcType, SrcExpr);
+ return false;
+
+ case PointerToInt:
+ DiagKind = diag::ext_typecheck_convert_pointer_int;
+ ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this);
+ MayHaveConvFixit = true;
+ break;
+ case IntToPointer:
+ DiagKind = diag::ext_typecheck_convert_int_pointer;
+ ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this);
+ MayHaveConvFixit = true;
+ break;
+ case IncompatiblePointer:
+ DiagKind =
+ (Action == AA_Passing_CFAudited ?
+ diag::err_arc_typecheck_convert_incompatible_pointer :
+ diag::ext_typecheck_convert_incompatible_pointer);
+ CheckInferredResultType = DstType->isObjCObjectPointerType() &&
+ SrcType->isObjCObjectPointerType();
+ if (Hint.isNull() && !CheckInferredResultType) {
+ ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this);
+ }
+ else if (CheckInferredResultType) {
+ SrcType = SrcType.getUnqualifiedType();
+ DstType = DstType.getUnqualifiedType();
+ }
+ MayHaveConvFixit = true;
+ break;
+ case IncompatiblePointerSign:
+ DiagKind = diag::ext_typecheck_convert_incompatible_pointer_sign;
+ break;
+ case FunctionVoidPointer:
+ DiagKind = diag::ext_typecheck_convert_pointer_void_func;
+ break;
+ case IncompatiblePointerDiscardsQualifiers: {
+ // Perform array-to-pointer decay if necessary.
+ if (SrcType->isArrayType()) SrcType = Context.getArrayDecayedType(SrcType);
+
+ Qualifiers lhq = SrcType->getPointeeType().getQualifiers();
+ Qualifiers rhq = DstType->getPointeeType().getQualifiers();
+ if (lhq.getAddressSpace() != rhq.getAddressSpace()) {
+ DiagKind = diag::err_typecheck_incompatible_address_space;
+ break;
+
+
+ } else if (lhq.getObjCLifetime() != rhq.getObjCLifetime()) {
+ DiagKind = diag::err_typecheck_incompatible_ownership;
+ break;
+ }
+
+ llvm_unreachable("unknown error case for discarding qualifiers!");
+ // fallthrough
+ }
+ case CompatiblePointerDiscardsQualifiers:
+ // If the qualifiers lost were because we were applying the
+ // (deprecated) C++ conversion from a string literal to a char*
+ // (or wchar_t*), then there was no error (C++ 4.2p2). FIXME:
+ // Ideally, this check would be performed in
+ // checkPointerTypesForAssignment. However, that would require a
+ // bit of refactoring (so that the second argument is an
+ // expression, rather than a type), which should be done as part
+ // of a larger effort to fix checkPointerTypesForAssignment for
+ // C++ semantics.
+ if (getLangOpts().CPlusPlus &&
+ IsStringLiteralToNonConstPointerConversion(SrcExpr, DstType))
+ return false;
+ DiagKind = diag::ext_typecheck_convert_discards_qualifiers;
+ break;
+ case IncompatibleNestedPointerQualifiers:
+ DiagKind = diag::ext_nested_pointer_qualifier_mismatch;
+ break;
+ case IntToBlockPointer:
+ DiagKind = diag::err_int_to_block_pointer;
+ break;
+ case IncompatibleBlockPointer:
+ DiagKind = diag::err_typecheck_convert_incompatible_block_pointer;
+ break;
+ case IncompatibleObjCQualifiedId: {
+ if (SrcType->isObjCQualifiedIdType()) {
+ const ObjCObjectPointerType *srcOPT =
+ SrcType->getAs<ObjCObjectPointerType>();
+ for (auto *srcProto : srcOPT->quals()) {
+ PDecl = srcProto;
+ break;
+ }
+ if (const ObjCInterfaceType *IFaceT =
+ DstType->getAs<ObjCObjectPointerType>()->getInterfaceType())
+ IFace = IFaceT->getDecl();
+ }
+ else if (DstType->isObjCQualifiedIdType()) {
+ const ObjCObjectPointerType *dstOPT =
+ DstType->getAs<ObjCObjectPointerType>();
+ for (auto *dstProto : dstOPT->quals()) {
+ PDecl = dstProto;
+ break;
+ }
+ if (const ObjCInterfaceType *IFaceT =
+ SrcType->getAs<ObjCObjectPointerType>()->getInterfaceType())
+ IFace = IFaceT->getDecl();
+ }
+ DiagKind = diag::warn_incompatible_qualified_id;
+ break;
+ }
+ case IncompatibleVectors:
+ DiagKind = diag::warn_incompatible_vectors;
+ break;
+ case IncompatibleObjCWeakRef:
+ DiagKind = diag::err_arc_weak_unavailable_assign;
+ break;
+ case Incompatible:
+ if (maybeDiagnoseAssignmentToFunction(*this, DstType, SrcExpr)) {
+ if (Complained)
+ *Complained = true;
+ return true;
+ }
+
+ DiagKind = diag::err_typecheck_convert_incompatible;
+ ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this);
+ MayHaveConvFixit = true;
+ isInvalid = true;
+ MayHaveFunctionDiff = true;
+ break;
+ }
+
+ QualType FirstType, SecondType;
+ switch (Action) {
+ case AA_Assigning:
+ case AA_Initializing:
+ // The destination type comes first.
+ FirstType = DstType;
+ SecondType = SrcType;
+ break;
+
+ case AA_Returning:
+ case AA_Passing:
+ case AA_Passing_CFAudited:
+ case AA_Converting:
+ case AA_Sending:
+ case AA_Casting:
+ // The source type comes first.
+ FirstType = SrcType;
+ SecondType = DstType;
+ break;
+ }
+
+ PartialDiagnostic FDiag = PDiag(DiagKind);
+ if (Action == AA_Passing_CFAudited)
+ FDiag << FirstType << SecondType << AA_Passing << SrcExpr->getSourceRange();
+ else
+ FDiag << FirstType << SecondType << Action << SrcExpr->getSourceRange();
+
+ // If we can fix the conversion, suggest the FixIts.
+ assert(ConvHints.isNull() || Hint.isNull());
+ if (!ConvHints.isNull()) {
+ for (FixItHint &H : ConvHints.Hints)
+ FDiag << H;
+ } else {
+ FDiag << Hint;
+ }
+ if (MayHaveConvFixit) { FDiag << (unsigned) (ConvHints.Kind); }
+
+ if (MayHaveFunctionDiff)
+ HandleFunctionTypeMismatch(FDiag, SecondType, FirstType);
+
+ Diag(Loc, FDiag);
+ if (DiagKind == diag::warn_incompatible_qualified_id &&
+ PDecl && IFace && !IFace->hasDefinition())
+ Diag(IFace->getLocation(), diag::not_incomplete_class_and_qualified_id)
+ << IFace->getName() << PDecl->getName();
+
+ if (SecondType == Context.OverloadTy)
+ NoteAllOverloadCandidates(OverloadExpr::find(SrcExpr).Expression,
+ FirstType, /*TakingAddress=*/true);
+
+ if (CheckInferredResultType)
+ EmitRelatedResultTypeNote(SrcExpr);
+
+ if (Action == AA_Returning && ConvTy == IncompatiblePointer)
+ EmitRelatedResultTypeNoteForReturn(DstType);
+
+ if (Complained)
+ *Complained = true;
+ return isInvalid;
+}
+
+ExprResult Sema::VerifyIntegerConstantExpression(Expr *E,
+ llvm::APSInt *Result) {
+ class SimpleICEDiagnoser : public VerifyICEDiagnoser {
+ public:
+ void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) override {
+ S.Diag(Loc, diag::err_expr_not_ice) << S.LangOpts.CPlusPlus << SR;
+ }
+ } Diagnoser;
+
+ return VerifyIntegerConstantExpression(E, Result, Diagnoser);
+}
+
+ExprResult Sema::VerifyIntegerConstantExpression(Expr *E,
+ llvm::APSInt *Result,
+ unsigned DiagID,
+ bool AllowFold) {
+ class IDDiagnoser : public VerifyICEDiagnoser {
+ unsigned DiagID;
+
+ public:
+ IDDiagnoser(unsigned DiagID)
+ : VerifyICEDiagnoser(DiagID == 0), DiagID(DiagID) { }
+
+ void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) override {
+ S.Diag(Loc, DiagID) << SR;
+ }
+ } Diagnoser(DiagID);
+
+ return VerifyIntegerConstantExpression(E, Result, Diagnoser, AllowFold);
+}
+
+void Sema::VerifyICEDiagnoser::diagnoseFold(Sema &S, SourceLocation Loc,
+ SourceRange SR) {
+ S.Diag(Loc, diag::ext_expr_not_ice) << SR << S.LangOpts.CPlusPlus;
+}
+
+ExprResult
+Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
+ VerifyICEDiagnoser &Diagnoser,
+ bool AllowFold) {
+ SourceLocation DiagLoc = E->getLocStart();
+
+ if (getLangOpts().CPlusPlus11) {
+ // C++11 [expr.const]p5:
+ // If an expression of literal class type is used in a context where an
+ // integral constant expression is required, then that class type shall
+ // have a single non-explicit conversion function to an integral or
+ // unscoped enumeration type
+ ExprResult Converted;
+ class CXX11ConvertDiagnoser : public ICEConvertDiagnoser {
+ public:
+ CXX11ConvertDiagnoser(bool Silent)
+ : ICEConvertDiagnoser(/*AllowScopedEnumerations*/false,
+ Silent, true) {}
+
+ SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc,
+ QualType T) override {
+ return S.Diag(Loc, diag::err_ice_not_integral) << T;
+ }
+
+ SemaDiagnosticBuilder diagnoseIncomplete(
+ Sema &S, SourceLocation Loc, QualType T) override {
+ return S.Diag(Loc, diag::err_ice_incomplete_type) << T;
+ }
+
+ SemaDiagnosticBuilder diagnoseExplicitConv(
+ Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override {
+ return S.Diag(Loc, diag::err_ice_explicit_conversion) << T << ConvTy;
+ }
+
+ SemaDiagnosticBuilder noteExplicitConv(
+ Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
+ return S.Diag(Conv->getLocation(), diag::note_ice_conversion_here)
+ << ConvTy->isEnumeralType() << ConvTy;
+ }
+
+ SemaDiagnosticBuilder diagnoseAmbiguous(
+ Sema &S, SourceLocation Loc, QualType T) override {
+ return S.Diag(Loc, diag::err_ice_ambiguous_conversion) << T;
+ }
+
+ SemaDiagnosticBuilder noteAmbiguous(
+ Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
+ return S.Diag(Conv->getLocation(), diag::note_ice_conversion_here)
+ << ConvTy->isEnumeralType() << ConvTy;
+ }
+
+ SemaDiagnosticBuilder diagnoseConversion(
+ Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override {
+ llvm_unreachable("conversion functions are permitted");
+ }
+ } ConvertDiagnoser(Diagnoser.Suppress);
+
+ Converted = PerformContextualImplicitConversion(DiagLoc, E,
+ ConvertDiagnoser);
+ if (Converted.isInvalid())
+ return Converted;
+ E = Converted.get();
+ if (!E->getType()->isIntegralOrUnscopedEnumerationType())
+ return ExprError();
+ } else if (!E->getType()->isIntegralOrUnscopedEnumerationType()) {
+ // An ICE must be of integral or unscoped enumeration type.
+ if (!Diagnoser.Suppress)
+ Diagnoser.diagnoseNotICE(*this, DiagLoc, E->getSourceRange());
+ return ExprError();
+ }
+
+ // Circumvent ICE checking in C++11 to avoid evaluating the expression twice
+ // in the non-ICE case.
+ if (!getLangOpts().CPlusPlus11 && E->isIntegerConstantExpr(Context)) {
+ if (Result)
+ *Result = E->EvaluateKnownConstInt(Context);
+ return E;
+ }
+
+ Expr::EvalResult EvalResult;
+ SmallVector<PartialDiagnosticAt, 8> Notes;
+ EvalResult.Diag = &Notes;
+
+ // Try to evaluate the expression, and produce diagnostics explaining why it's
+ // not a constant expression as a side-effect.
+ bool Folded = E->EvaluateAsRValue(EvalResult, Context) &&
+ EvalResult.Val.isInt() && !EvalResult.HasSideEffects;
+
+ // In C++11, we can rely on diagnostics being produced for any expression
+ // which is not a constant expression. If no diagnostics were produced, then
+ // this is a constant expression.
+ if (Folded && getLangOpts().CPlusPlus11 && Notes.empty()) {
+ if (Result)
+ *Result = EvalResult.Val.getInt();
+ return E;
+ }
+
+ // If our only note is the usual "invalid subexpression" note, just point
+ // the caret at its location rather than producing an essentially
+ // redundant note.
+ if (Notes.size() == 1 && Notes[0].second.getDiagID() ==
+ diag::note_invalid_subexpr_in_const_expr) {
+ DiagLoc = Notes[0].first;
+ Notes.clear();
+ }
+
+ if (!Folded || !AllowFold) {
+ if (!Diagnoser.Suppress) {
+ Diagnoser.diagnoseNotICE(*this, DiagLoc, E->getSourceRange());
+ for (const PartialDiagnosticAt &Note : Notes)
+ Diag(Note.first, Note.second);
+ }
+
+ return ExprError();
+ }
+
+ Diagnoser.diagnoseFold(*this, DiagLoc, E->getSourceRange());
+ for (const PartialDiagnosticAt &Note : Notes)
+ Diag(Note.first, Note.second);
+
+ if (Result)
+ *Result = EvalResult.Val.getInt();
+ return E;
+}
+
+namespace {
+ // Handle the case where we conclude a expression which we speculatively
+ // considered to be unevaluated is actually evaluated.
+ class TransformToPE : public TreeTransform<TransformToPE> {
+ typedef TreeTransform<TransformToPE> BaseTransform;
+
+ public:
+ TransformToPE(Sema &SemaRef) : BaseTransform(SemaRef) { }
+
+ // Make sure we redo semantic analysis
+ bool AlwaysRebuild() { return true; }
+
+ // Make sure we handle LabelStmts correctly.
+ // FIXME: This does the right thing, but maybe we need a more general
+ // fix to TreeTransform?
+ StmtResult TransformLabelStmt(LabelStmt *S) {
+ S->getDecl()->setStmt(nullptr);
+ return BaseTransform::TransformLabelStmt(S);
+ }
+
+ // We need to special-case DeclRefExprs referring to FieldDecls which
+ // are not part of a member pointer formation; normal TreeTransforming
+ // doesn't catch this case because of the way we represent them in the AST.
+ // FIXME: This is a bit ugly; is it really the best way to handle this
+ // case?
+ //
+ // Error on DeclRefExprs referring to FieldDecls.
+ ExprResult TransformDeclRefExpr(DeclRefExpr *E) {
+ if (isa<FieldDecl>(E->getDecl()) &&
+ !SemaRef.isUnevaluatedContext())
+ return SemaRef.Diag(E->getLocation(),
+ diag::err_invalid_non_static_member_use)
+ << E->getDecl() << E->getSourceRange();
+
+ return BaseTransform::TransformDeclRefExpr(E);
+ }
+
+ // Exception: filter out member pointer formation
+ ExprResult TransformUnaryOperator(UnaryOperator *E) {
+ if (E->getOpcode() == UO_AddrOf && E->getType()->isMemberPointerType())
+ return E;
+
+ return BaseTransform::TransformUnaryOperator(E);
+ }
+
+ ExprResult TransformLambdaExpr(LambdaExpr *E) {
+ // Lambdas never need to be transformed.
+ return E;
+ }
+ };
+}
+
+ExprResult Sema::TransformToPotentiallyEvaluated(Expr *E) {
+ assert(isUnevaluatedContext() &&
+ "Should only transform unevaluated expressions");
+ ExprEvalContexts.back().Context =
+ ExprEvalContexts[ExprEvalContexts.size()-2].Context;
+ if (isUnevaluatedContext())
+ return E;
+ return TransformToPE(*this).TransformExpr(E);
+}
+
+void
+Sema::PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
+ Decl *LambdaContextDecl,
+ bool IsDecltype) {
+ ExprEvalContexts.emplace_back(NewContext, ExprCleanupObjects.size(),
+ ExprNeedsCleanups, LambdaContextDecl,
+ IsDecltype);
+ ExprNeedsCleanups = false;
+ if (!MaybeODRUseExprs.empty())
+ std::swap(MaybeODRUseExprs, ExprEvalContexts.back().SavedMaybeODRUseExprs);
+}
+
+void
+Sema::PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
+ ReuseLambdaContextDecl_t,
+ bool IsDecltype) {
+ Decl *ClosureContextDecl = ExprEvalContexts.back().ManglingContextDecl;
+ PushExpressionEvaluationContext(NewContext, ClosureContextDecl, IsDecltype);
+}
+
+void Sema::PopExpressionEvaluationContext() {
+ ExpressionEvaluationContextRecord& Rec = ExprEvalContexts.back();
+ unsigned NumTypos = Rec.NumTypos;
+
+ if (!Rec.Lambdas.empty()) {
+ if (Rec.isUnevaluated() || Rec.Context == ConstantEvaluated) {
+ unsigned D;
+ if (Rec.isUnevaluated()) {
+ // C++11 [expr.prim.lambda]p2:
+ // A lambda-expression shall not appear in an unevaluated operand
+ // (Clause 5).
+ D = diag::err_lambda_unevaluated_operand;
+ } else {
+ // C++1y [expr.const]p2:
+ // A conditional-expression e is a core constant expression unless the
+ // evaluation of e, following the rules of the abstract machine, would
+ // evaluate [...] a lambda-expression.
+ D = diag::err_lambda_in_constant_expression;
+ }
+ for (const auto *L : Rec.Lambdas)
+ Diag(L->getLocStart(), D);
+ } else {
+ // Mark the capture expressions odr-used. This was deferred
+ // during lambda expression creation.
+ for (auto *Lambda : Rec.Lambdas) {
+ for (auto *C : Lambda->capture_inits())
+ MarkDeclarationsReferencedInExpr(C);
+ }
+ }
+ }
+
+ // When are coming out of an unevaluated context, clear out any
+ // temporaries that we may have created as part of the evaluation of
+ // the expression in that context: they aren't relevant because they
+ // will never be constructed.
+ if (Rec.isUnevaluated() || Rec.Context == ConstantEvaluated) {
+ ExprCleanupObjects.erase(ExprCleanupObjects.begin() + Rec.NumCleanupObjects,
+ ExprCleanupObjects.end());
+ ExprNeedsCleanups = Rec.ParentNeedsCleanups;
+ CleanupVarDeclMarking();
+ std::swap(MaybeODRUseExprs, Rec.SavedMaybeODRUseExprs);
+ // Otherwise, merge the contexts together.
+ } else {
+ ExprNeedsCleanups |= Rec.ParentNeedsCleanups;
+ MaybeODRUseExprs.insert(Rec.SavedMaybeODRUseExprs.begin(),
+ Rec.SavedMaybeODRUseExprs.end());
+ }
+
+ // Pop the current expression evaluation context off the stack.
+ ExprEvalContexts.pop_back();
+
+ if (!ExprEvalContexts.empty())
+ ExprEvalContexts.back().NumTypos += NumTypos;
+ else
+ assert(NumTypos == 0 && "There are outstanding typos after popping the "
+ "last ExpressionEvaluationContextRecord");
+}
+
+void Sema::DiscardCleanupsInEvaluationContext() {
+ ExprCleanupObjects.erase(
+ ExprCleanupObjects.begin() + ExprEvalContexts.back().NumCleanupObjects,
+ ExprCleanupObjects.end());
+ ExprNeedsCleanups = false;
+ MaybeODRUseExprs.clear();
+}
+
+ExprResult Sema::HandleExprEvaluationContextForTypeof(Expr *E) {
+ if (!E->getType()->isVariablyModifiedType())
+ return E;
+ return TransformToPotentiallyEvaluated(E);
+}
+
+static bool IsPotentiallyEvaluatedContext(Sema &SemaRef) {
+ // Do not mark anything as "used" within a dependent context; wait for
+ // an instantiation.
+ if (SemaRef.CurContext->isDependentContext())
+ return false;
+
+ switch (SemaRef.ExprEvalContexts.back().Context) {
+ case Sema::Unevaluated:
+ case Sema::UnevaluatedAbstract:
+ // We are in an expression that is not potentially evaluated; do nothing.
+ // (Depending on how you read the standard, we actually do need to do
+ // something here for null pointer constants, but the standard's
+ // definition of a null pointer constant is completely crazy.)
+ return false;
+
+ case Sema::ConstantEvaluated:
+ case Sema::PotentiallyEvaluated:
+ // We are in a potentially evaluated expression (or a constant-expression
+ // in C++03); we need to do implicit template instantiation, implicitly
+ // define class members, and mark most declarations as used.
+ return true;
+
+ case Sema::PotentiallyEvaluatedIfUsed:
+ // Referenced declarations will only be used if the construct in the
+ // containing expression is used.
+ return false;
+ }
+ llvm_unreachable("Invalid context");
+}
+
+/// \brief Mark a function referenced, and check whether it is odr-used
+/// (C++ [basic.def.odr]p2, C99 6.9p3)
+void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
+ bool OdrUse) {
+ assert(Func && "No function?");
+
+ Func->setReferenced();
+
+ // C++11 [basic.def.odr]p3:
+ // A function whose name appears as a potentially-evaluated expression is
+ // odr-used if it is the unique lookup result or the selected member of a
+ // set of overloaded functions [...].
+ //
+ // We (incorrectly) mark overload resolution as an unevaluated context, so we
+ // can just check that here. Skip the rest of this function if we've already
+ // marked the function as used.
+ if (Func->isUsed(/*CheckUsedAttr=*/false) ||
+ !IsPotentiallyEvaluatedContext(*this)) {
+ // C++11 [temp.inst]p3:
+ // Unless a function template specialization has been explicitly
+ // instantiated or explicitly specialized, the function template
+ // specialization is implicitly instantiated when the specialization is
+ // referenced in a context that requires a function definition to exist.
+ //
+ // We consider constexpr function templates to be referenced in a context
+ // that requires a definition to exist whenever they are referenced.
+ //
+ // FIXME: This instantiates constexpr functions too frequently. If this is
+ // really an unevaluated context (and we're not just in the definition of a
+ // function template or overload resolution or other cases which we
+ // incorrectly consider to be unevaluated contexts), and we're not in a
+ // subexpression which we actually need to evaluate (for instance, a
+ // template argument, array bound or an expression in a braced-init-list),
+ // we are not permitted to instantiate this constexpr function definition.
+ //
+ // FIXME: This also implicitly defines special members too frequently. They
+ // are only supposed to be implicitly defined if they are odr-used, but they
+ // are not odr-used from constant expressions in unevaluated contexts.
+ // However, they cannot be referenced if they are deleted, and they are
+ // deleted whenever the implicit definition of the special member would
+ // fail.
+ if (!Func->isConstexpr() || Func->getBody())
+ return;
+ CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Func);
+ if (!Func->isImplicitlyInstantiable() && (!MD || MD->isUserProvided()))
+ return;
+ }
+
+ // Note that this declaration has been used.
+ if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(Func)) {
+ Constructor = cast<CXXConstructorDecl>(Constructor->getFirstDecl());
+ if (Constructor->isDefaulted() && !Constructor->isDeleted()) {
+ if (Constructor->isDefaultConstructor()) {
+ if (Constructor->isTrivial() && !Constructor->hasAttr<DLLExportAttr>())
+ return;
+ DefineImplicitDefaultConstructor(Loc, Constructor);
+ } else if (Constructor->isCopyConstructor()) {
+ DefineImplicitCopyConstructor(Loc, Constructor);
+ } else if (Constructor->isMoveConstructor()) {
+ DefineImplicitMoveConstructor(Loc, Constructor);
+ }
+ } else if (Constructor->getInheritedConstructor()) {
+ DefineInheritingConstructor(Loc, Constructor);
+ }
+ } else if (CXXDestructorDecl *Destructor =
+ dyn_cast<CXXDestructorDecl>(Func)) {
+ Destructor = cast<CXXDestructorDecl>(Destructor->getFirstDecl());
+ if (Destructor->isDefaulted() && !Destructor->isDeleted()) {
+ if (Destructor->isTrivial() && !Destructor->hasAttr<DLLExportAttr>())
+ return;
+ DefineImplicitDestructor(Loc, Destructor);
+ }
+ if (Destructor->isVirtual() && getLangOpts().AppleKext)
+ MarkVTableUsed(Loc, Destructor->getParent());
+ } else if (CXXMethodDecl *MethodDecl = dyn_cast<CXXMethodDecl>(Func)) {
+ if (MethodDecl->isOverloadedOperator() &&
+ MethodDecl->getOverloadedOperator() == OO_Equal) {
+ MethodDecl = cast<CXXMethodDecl>(MethodDecl->getFirstDecl());
+ if (MethodDecl->isDefaulted() && !MethodDecl->isDeleted()) {
+ if (MethodDecl->isCopyAssignmentOperator())
+ DefineImplicitCopyAssignment(Loc, MethodDecl);
+ else
+ DefineImplicitMoveAssignment(Loc, MethodDecl);
+ }
+ } else if (isa<CXXConversionDecl>(MethodDecl) &&
+ MethodDecl->getParent()->isLambda()) {
+ CXXConversionDecl *Conversion =
+ cast<CXXConversionDecl>(MethodDecl->getFirstDecl());
+ if (Conversion->isLambdaToBlockPointerConversion())
+ DefineImplicitLambdaToBlockPointerConversion(Loc, Conversion);
+ else
+ DefineImplicitLambdaToFunctionPointerConversion(Loc, Conversion);
+ } else if (MethodDecl->isVirtual() && getLangOpts().AppleKext)
+ MarkVTableUsed(Loc, MethodDecl->getParent());
+ }
+
+ // Recursive functions should be marked when used from another function.
+ // FIXME: Is this really right?
+ if (CurContext == Func) return;
+
+ // Resolve the exception specification for any function which is
+ // used: CodeGen will need it.
+ const FunctionProtoType *FPT = Func->getType()->getAs<FunctionProtoType>();
+ if (FPT && isUnresolvedExceptionSpec(FPT->getExceptionSpecType()))
+ ResolveExceptionSpec(Loc, FPT);
+
+ if (!OdrUse) return;
+
+ // Implicit instantiation of function templates and member functions of
+ // class templates.
+ if (Func->isImplicitlyInstantiable()) {
+ bool AlreadyInstantiated = false;
+ SourceLocation PointOfInstantiation = Loc;
+ if (FunctionTemplateSpecializationInfo *SpecInfo
+ = Func->getTemplateSpecializationInfo()) {
+ if (SpecInfo->getPointOfInstantiation().isInvalid())
+ SpecInfo->setPointOfInstantiation(Loc);
+ else if (SpecInfo->getTemplateSpecializationKind()
+ == TSK_ImplicitInstantiation) {
+ AlreadyInstantiated = true;
+ PointOfInstantiation = SpecInfo->getPointOfInstantiation();
+ }
+ } else if (MemberSpecializationInfo *MSInfo
+ = Func->getMemberSpecializationInfo()) {
+ if (MSInfo->getPointOfInstantiation().isInvalid())
+ MSInfo->setPointOfInstantiation(Loc);
+ else if (MSInfo->getTemplateSpecializationKind()
+ == TSK_ImplicitInstantiation) {
+ AlreadyInstantiated = true;
+ PointOfInstantiation = MSInfo->getPointOfInstantiation();
+ }
+ }
+
+ if (!AlreadyInstantiated || Func->isConstexpr()) {
+ if (isa<CXXRecordDecl>(Func->getDeclContext()) &&
+ cast<CXXRecordDecl>(Func->getDeclContext())->isLocalClass() &&
+ ActiveTemplateInstantiations.size())
+ PendingLocalImplicitInstantiations.push_back(
+ std::make_pair(Func, PointOfInstantiation));
+ else if (Func->isConstexpr())
+ // Do not defer instantiations of constexpr functions, to avoid the
+ // expression evaluator needing to call back into Sema if it sees a
+ // call to such a function.
+ InstantiateFunctionDefinition(PointOfInstantiation, Func);
+ else {
+ PendingInstantiations.push_back(std::make_pair(Func,
+ PointOfInstantiation));
+ // Notify the consumer that a function was implicitly instantiated.
+ Consumer.HandleCXXImplicitFunctionInstantiation(Func);
+ }
+ }
+ } else {
+ // Walk redefinitions, as some of them may be instantiable.
+ for (auto i : Func->redecls()) {
+ if (!i->isUsed(false) && i->isImplicitlyInstantiable())
+ MarkFunctionReferenced(Loc, i);
+ }
+ }
+
+ // Keep track of used but undefined functions.
+ if (!Func->isDefined()) {
+ if (mightHaveNonExternalLinkage(Func))
+ UndefinedButUsed.insert(std::make_pair(Func->getCanonicalDecl(), Loc));
+ else if (Func->getMostRecentDecl()->isInlined() &&
+ !LangOpts.GNUInline &&
+ !Func->getMostRecentDecl()->hasAttr<GNUInlineAttr>())
+ UndefinedButUsed.insert(std::make_pair(Func->getCanonicalDecl(), Loc));
+ }
+
+ // Normally the most current decl is marked used while processing the use and
+ // any subsequent decls are marked used by decl merging. This fails with
+ // template instantiation since marking can happen at the end of the file
+ // and, because of the two phase lookup, this function is called with at
+ // decl in the middle of a decl chain. We loop to maintain the invariant
+ // that once a decl is used, all decls after it are also used.
+ for (FunctionDecl *F = Func->getMostRecentDecl();; F = F->getPreviousDecl()) {
+ F->markUsed(Context);
+ if (F == Func)
+ break;
+ }
+}
+
+static void
+diagnoseUncapturableValueReference(Sema &S, SourceLocation loc,
+ VarDecl *var, DeclContext *DC) {
+ DeclContext *VarDC = var->getDeclContext();
+
+ // If the parameter still belongs to the translation unit, then
+ // we're actually just using one parameter in the declaration of
+ // the next.
+ if (isa<ParmVarDecl>(var) &&
+ isa<TranslationUnitDecl>(VarDC))
+ return;
+
+ // For C code, don't diagnose about capture if we're not actually in code
+ // right now; it's impossible to write a non-constant expression outside of
+ // function context, so we'll get other (more useful) diagnostics later.
+ //
+ // For C++, things get a bit more nasty... it would be nice to suppress this
+ // diagnostic for certain cases like using a local variable in an array bound
+ // for a member of a local class, but the correct predicate is not obvious.
+ if (!S.getLangOpts().CPlusPlus && !S.CurContext->isFunctionOrMethod())
+ return;
+
+ if (isa<CXXMethodDecl>(VarDC) &&
+ cast<CXXRecordDecl>(VarDC->getParent())->isLambda()) {
+ S.Diag(loc, diag::err_reference_to_local_var_in_enclosing_lambda)
+ << var->getIdentifier();
+ } else if (FunctionDecl *fn = dyn_cast<FunctionDecl>(VarDC)) {
+ S.Diag(loc, diag::err_reference_to_local_var_in_enclosing_function)
+ << var->getIdentifier() << fn->getDeclName();
+ } else if (isa<BlockDecl>(VarDC)) {
+ S.Diag(loc, diag::err_reference_to_local_var_in_enclosing_block)
+ << var->getIdentifier();
+ } else {
+ // FIXME: Is there any other context where a local variable can be
+ // declared?
+ S.Diag(loc, diag::err_reference_to_local_var_in_enclosing_context)
+ << var->getIdentifier();
+ }
+
+ S.Diag(var->getLocation(), diag::note_entity_declared_at)
+ << var->getIdentifier();
+
+ // FIXME: Add additional diagnostic info about class etc. which prevents
+ // capture.
+}
+
+
+static bool isVariableAlreadyCapturedInScopeInfo(CapturingScopeInfo *CSI, VarDecl *Var,
+ bool &SubCapturesAreNested,
+ QualType &CaptureType,
+ QualType &DeclRefType) {
+ // Check whether we've already captured it.
+ if (CSI->CaptureMap.count(Var)) {
+ // If we found a capture, any subcaptures are nested.
+ SubCapturesAreNested = true;
+
+ // Retrieve the capture type for this variable.
+ CaptureType = CSI->getCapture(Var).getCaptureType();
+
+ // Compute the type of an expression that refers to this variable.
+ DeclRefType = CaptureType.getNonReferenceType();
+
+ // Similarly to mutable captures in lambda, all the OpenMP captures by copy
+ // are mutable in the sense that user can change their value - they are
+ // private instances of the captured declarations.
+ const CapturingScopeInfo::Capture &Cap = CSI->getCapture(Var);
+ if (Cap.isCopyCapture() &&
+ !(isa<LambdaScopeInfo>(CSI) && cast<LambdaScopeInfo>(CSI)->Mutable) &&
+ !(isa<CapturedRegionScopeInfo>(CSI) &&
+ cast<CapturedRegionScopeInfo>(CSI)->CapRegionKind == CR_OpenMP))
+ DeclRefType.addConst();
+ return true;
+ }
+ return false;
+}
+
+// Only block literals, captured statements, and lambda expressions can
+// capture; other scopes don't work.
+static DeclContext *getParentOfCapturingContextOrNull(DeclContext *DC, VarDecl *Var,
+ SourceLocation Loc,
+ const bool Diagnose, Sema &S) {
+ if (isa<BlockDecl>(DC) || isa<CapturedDecl>(DC) || isLambdaCallOperator(DC))
+ return getLambdaAwareParentOfDeclContext(DC);
+ else if (Var->hasLocalStorage()) {
+ if (Diagnose)
+ diagnoseUncapturableValueReference(S, Loc, Var, DC);
+ }
+ return nullptr;
+}
+
+// Certain capturing entities (lambdas, blocks etc.) are not allowed to capture
+// certain types of variables (unnamed, variably modified types etc.)
+// so check for eligibility.
+static bool isVariableCapturable(CapturingScopeInfo *CSI, VarDecl *Var,
+ SourceLocation Loc,
+ const bool Diagnose, Sema &S) {
+
+ bool IsBlock = isa<BlockScopeInfo>(CSI);
+ bool IsLambda = isa<LambdaScopeInfo>(CSI);
+
+ // Lambdas are not allowed to capture unnamed variables
+ // (e.g. anonymous unions).
+ // FIXME: The C++11 rule don't actually state this explicitly, but I'm
+ // assuming that's the intent.
+ if (IsLambda && !Var->getDeclName()) {
+ if (Diagnose) {
+ S.Diag(Loc, diag::err_lambda_capture_anonymous_var);
+ S.Diag(Var->getLocation(), diag::note_declared_at);
+ }
+ return false;
+ }
+
+ // Prohibit variably-modified types in blocks; they're difficult to deal with.
+ if (Var->getType()->isVariablyModifiedType() && IsBlock) {
+ if (Diagnose) {
+ S.Diag(Loc, diag::err_ref_vm_type);
+ S.Diag(Var->getLocation(), diag::note_previous_decl)
+ << Var->getDeclName();
+ }
+ return false;
+ }
+ // Prohibit structs with flexible array members too.
+ // We cannot capture what is in the tail end of the struct.
+ if (const RecordType *VTTy = Var->getType()->getAs<RecordType>()) {
+ if (VTTy->getDecl()->hasFlexibleArrayMember()) {
+ if (Diagnose) {
+ if (IsBlock)
+ S.Diag(Loc, diag::err_ref_flexarray_type);
+ else
+ S.Diag(Loc, diag::err_lambda_capture_flexarray_type)
+ << Var->getDeclName();
+ S.Diag(Var->getLocation(), diag::note_previous_decl)
+ << Var->getDeclName();
+ }
+ return false;
+ }
+ }
+ const bool HasBlocksAttr = Var->hasAttr<BlocksAttr>();
+ // Lambdas and captured statements are not allowed to capture __block
+ // variables; they don't support the expected semantics.
+ if (HasBlocksAttr && (IsLambda || isa<CapturedRegionScopeInfo>(CSI))) {
+ if (Diagnose) {
+ S.Diag(Loc, diag::err_capture_block_variable)
+ << Var->getDeclName() << !IsLambda;
+ S.Diag(Var->getLocation(), diag::note_previous_decl)
+ << Var->getDeclName();
+ }
+ return false;
+ }
+
+ return true;
+}
+
+// Returns true if the capture by block was successful.
+static bool captureInBlock(BlockScopeInfo *BSI, VarDecl *Var,
+ SourceLocation Loc,
+ const bool BuildAndDiagnose,
+ QualType &CaptureType,
+ QualType &DeclRefType,
+ const bool Nested,
+ Sema &S) {
+ Expr *CopyExpr = nullptr;
+ bool ByRef = false;
+
+ // Blocks are not allowed to capture arrays.
+ if (CaptureType->isArrayType()) {
+ if (BuildAndDiagnose) {
+ S.Diag(Loc, diag::err_ref_array_type);
+ S.Diag(Var->getLocation(), diag::note_previous_decl)
+ << Var->getDeclName();
+ }
+ return false;
+ }
+
+ // Forbid the block-capture of autoreleasing variables.
+ if (CaptureType.getObjCLifetime() == Qualifiers::OCL_Autoreleasing) {
+ if (BuildAndDiagnose) {
+ S.Diag(Loc, diag::err_arc_autoreleasing_capture)
+ << /*block*/ 0;
+ S.Diag(Var->getLocation(), diag::note_previous_decl)
+ << Var->getDeclName();
+ }
+ return false;
+ }
+ const bool HasBlocksAttr = Var->hasAttr<BlocksAttr>();
+ if (HasBlocksAttr || CaptureType->isReferenceType()) {
+ // Block capture by reference does not change the capture or
+ // declaration reference types.
+ ByRef = true;
+ } else {
+ // Block capture by copy introduces 'const'.
+ CaptureType = CaptureType.getNonReferenceType().withConst();
+ DeclRefType = CaptureType;
+
+ if (S.getLangOpts().CPlusPlus && BuildAndDiagnose) {
+ if (const RecordType *Record = DeclRefType->getAs<RecordType>()) {
+ // The capture logic needs the destructor, so make sure we mark it.
+ // Usually this is unnecessary because most local variables have
+ // their destructors marked at declaration time, but parameters are
+ // an exception because it's technically only the call site that
+ // actually requires the destructor.
+ if (isa<ParmVarDecl>(Var))
+ S.FinalizeVarWithDestructor(Var, Record);
+
+ // Enter a new evaluation context to insulate the copy
+ // full-expression.
+ EnterExpressionEvaluationContext scope(S, S.PotentiallyEvaluated);
+
+ // According to the blocks spec, the capture of a variable from
+ // the stack requires a const copy constructor. This is not true
+ // of the copy/move done to move a __block variable to the heap.
+ Expr *DeclRef = new (S.Context) DeclRefExpr(Var, Nested,
+ DeclRefType.withConst(),
+ VK_LValue, Loc);
+
+ ExprResult Result
+ = S.PerformCopyInitialization(
+ InitializedEntity::InitializeBlock(Var->getLocation(),
+ CaptureType, false),
+ Loc, DeclRef);
+
+ // Build a full-expression copy expression if initialization
+ // succeeded and used a non-trivial constructor. Recover from
+ // errors by pretending that the copy isn't necessary.
+ if (!Result.isInvalid() &&
+ !cast<CXXConstructExpr>(Result.get())->getConstructor()
+ ->isTrivial()) {
+ Result = S.MaybeCreateExprWithCleanups(Result);
+ CopyExpr = Result.get();
+ }
+ }
+ }
+ }
+
+ // Actually capture the variable.
+ if (BuildAndDiagnose)
+ BSI->addCapture(Var, HasBlocksAttr, ByRef, Nested, Loc,
+ SourceLocation(), CaptureType, CopyExpr);
+
+ return true;
+
+}
+
+
+/// \brief Capture the given variable in the captured region.
+static bool captureInCapturedRegion(CapturedRegionScopeInfo *RSI,
+ VarDecl *Var,
+ SourceLocation Loc,
+ const bool BuildAndDiagnose,
+ QualType &CaptureType,
+ QualType &DeclRefType,
+ const bool RefersToCapturedVariable,
+ Sema &S) {
+
+ // By default, capture variables by reference.
+ bool ByRef = true;
+ // Using an LValue reference type is consistent with Lambdas (see below).
+ if (S.getLangOpts().OpenMP) {
+ ByRef = S.IsOpenMPCapturedByRef(Var, RSI);
+ if (S.IsOpenMPCapturedVar(Var))
+ DeclRefType = DeclRefType.getUnqualifiedType();
+ }
+
+ if (ByRef)
+ CaptureType = S.Context.getLValueReferenceType(DeclRefType);
+ else
+ CaptureType = DeclRefType;
+
+ Expr *CopyExpr = nullptr;
+ if (BuildAndDiagnose) {
+ // The current implementation assumes that all variables are captured
+ // by references. Since there is no capture by copy, no expression
+ // evaluation will be needed.
+ RecordDecl *RD = RSI->TheRecordDecl;
+
+ FieldDecl *Field
+ = FieldDecl::Create(S.Context, RD, Loc, Loc, nullptr, CaptureType,
+ S.Context.getTrivialTypeSourceInfo(CaptureType, Loc),
+ nullptr, false, ICIS_NoInit);
+ Field->setImplicit(true);
+ Field->setAccess(AS_private);
+ RD->addDecl(Field);
+
+ CopyExpr = new (S.Context) DeclRefExpr(Var, RefersToCapturedVariable,
+ DeclRefType, VK_LValue, Loc);
+ Var->setReferenced(true);
+ Var->markUsed(S.Context);
+ }
+
+ // Actually capture the variable.
+ if (BuildAndDiagnose)
+ RSI->addCapture(Var, /*isBlock*/false, ByRef, RefersToCapturedVariable, Loc,
+ SourceLocation(), CaptureType, CopyExpr);
+
+
+ return true;
+}
+
+/// \brief Create a field within the lambda class for the variable
+/// being captured.
+static void addAsFieldToClosureType(Sema &S, LambdaScopeInfo *LSI, VarDecl *Var,
+ QualType FieldType, QualType DeclRefType,
+ SourceLocation Loc,
+ bool RefersToCapturedVariable) {
+ CXXRecordDecl *Lambda = LSI->Lambda;
+
+ // Build the non-static data member.
+ FieldDecl *Field
+ = FieldDecl::Create(S.Context, Lambda, Loc, Loc, nullptr, FieldType,
+ S.Context.getTrivialTypeSourceInfo(FieldType, Loc),
+ nullptr, false, ICIS_NoInit);
+ Field->setImplicit(true);
+ Field->setAccess(AS_private);
+ Lambda->addDecl(Field);
+}
+
+/// \brief Capture the given variable in the lambda.
+static bool captureInLambda(LambdaScopeInfo *LSI,
+ VarDecl *Var,
+ SourceLocation Loc,
+ const bool BuildAndDiagnose,
+ QualType &CaptureType,
+ QualType &DeclRefType,
+ const bool RefersToCapturedVariable,
+ const Sema::TryCaptureKind Kind,
+ SourceLocation EllipsisLoc,
+ const bool IsTopScope,
+ Sema &S) {
+
+ // Determine whether we are capturing by reference or by value.
+ bool ByRef = false;
+ if (IsTopScope && Kind != Sema::TryCapture_Implicit) {
+ ByRef = (Kind == Sema::TryCapture_ExplicitByRef);
+ } else {
+ ByRef = (LSI->ImpCaptureStyle == LambdaScopeInfo::ImpCap_LambdaByref);
+ }
+
+ // Compute the type of the field that will capture this variable.
+ if (ByRef) {
+ // C++11 [expr.prim.lambda]p15:
+ // An entity is captured by reference if it is implicitly or
+ // explicitly captured but not captured by copy. It is
+ // unspecified whether additional unnamed non-static data
+ // members are declared in the closure type for entities
+ // captured by reference.
+ //
+ // FIXME: It is not clear whether we want to build an lvalue reference
+ // to the DeclRefType or to CaptureType.getNonReferenceType(). GCC appears
+ // to do the former, while EDG does the latter. Core issue 1249 will
+ // clarify, but for now we follow GCC because it's a more permissive and
+ // easily defensible position.
+ CaptureType = S.Context.getLValueReferenceType(DeclRefType);
+ } else {
+ // C++11 [expr.prim.lambda]p14:
+ // For each entity captured by copy, an unnamed non-static
+ // data member is declared in the closure type. The
+ // declaration order of these members is unspecified. The type
+ // of such a data member is the type of the corresponding
+ // captured entity if the entity is not a reference to an
+ // object, or the referenced type otherwise. [Note: If the
+ // captured entity is a reference to a function, the
+ // corresponding data member is also a reference to a
+ // function. - end note ]
+ if (const ReferenceType *RefType = CaptureType->getAs<ReferenceType>()){
+ if (!RefType->getPointeeType()->isFunctionType())
+ CaptureType = RefType->getPointeeType();
+ }
+
+ // Forbid the lambda copy-capture of autoreleasing variables.
+ if (CaptureType.getObjCLifetime() == Qualifiers::OCL_Autoreleasing) {
+ if (BuildAndDiagnose) {
+ S.Diag(Loc, diag::err_arc_autoreleasing_capture) << /*lambda*/ 1;
+ S.Diag(Var->getLocation(), diag::note_previous_decl)
+ << Var->getDeclName();
+ }
+ return false;
+ }
+
+ // Make sure that by-copy captures are of a complete and non-abstract type.
+ if (BuildAndDiagnose) {
+ if (!CaptureType->isDependentType() &&
+ S.RequireCompleteType(Loc, CaptureType,
+ diag::err_capture_of_incomplete_type,
+ Var->getDeclName()))
+ return false;
+
+ if (S.RequireNonAbstractType(Loc, CaptureType,
+ diag::err_capture_of_abstract_type))
+ return false;
+ }
+ }
+
+ // Capture this variable in the lambda.
+ if (BuildAndDiagnose)
+ addAsFieldToClosureType(S, LSI, Var, CaptureType, DeclRefType, Loc,
+ RefersToCapturedVariable);
+
+ // Compute the type of a reference to this captured variable.
+ if (ByRef)
+ DeclRefType = CaptureType.getNonReferenceType();
+ else {
+ // C++ [expr.prim.lambda]p5:
+ // The closure type for a lambda-expression has a public inline
+ // function call operator [...]. This function call operator is
+ // declared const (9.3.1) if and only if the lambda-expression’s
+ // parameter-declaration-clause is not followed by mutable.
+ DeclRefType = CaptureType.getNonReferenceType();
+ if (!LSI->Mutable && !CaptureType->isReferenceType())
+ DeclRefType.addConst();
+ }
+
+ // Add the capture.
+ if (BuildAndDiagnose)
+ LSI->addCapture(Var, /*IsBlock=*/false, ByRef, RefersToCapturedVariable,
+ Loc, EllipsisLoc, CaptureType, /*CopyExpr=*/nullptr);
+
+ return true;
+}
+
+bool Sema::tryCaptureVariable(
+ VarDecl *Var, SourceLocation ExprLoc, TryCaptureKind Kind,
+ SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType,
+ QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt) {
+ // An init-capture is notionally from the context surrounding its
+ // declaration, but its parent DC is the lambda class.
+ DeclContext *VarDC = Var->getDeclContext();
+ if (Var->isInitCapture())
+ VarDC = VarDC->getParent();
+
+ DeclContext *DC = CurContext;
+ const unsigned MaxFunctionScopesIndex = FunctionScopeIndexToStopAt
+ ? *FunctionScopeIndexToStopAt : FunctionScopes.size() - 1;
+ // We need to sync up the Declaration Context with the
+ // FunctionScopeIndexToStopAt
+ if (FunctionScopeIndexToStopAt) {
+ unsigned FSIndex = FunctionScopes.size() - 1;
+ while (FSIndex != MaxFunctionScopesIndex) {
+ DC = getLambdaAwareParentOfDeclContext(DC);
+ --FSIndex;
+ }
+ }
+
+
+ // If the variable is declared in the current context, there is no need to
+ // capture it.
+ if (VarDC == DC) return true;
+
+ // Capture global variables if it is required to use private copy of this
+ // variable.
+ bool IsGlobal = !Var->hasLocalStorage();
+ if (IsGlobal && !(LangOpts.OpenMP && IsOpenMPCapturedVar(Var)))
+ return true;
+
+ // Walk up the stack to determine whether we can capture the variable,
+ // performing the "simple" checks that don't depend on type. We stop when
+ // we've either hit the declared scope of the variable or find an existing
+ // capture of that variable. We start from the innermost capturing-entity
+ // (the DC) and ensure that all intervening capturing-entities
+ // (blocks/lambdas etc.) between the innermost capturer and the variable`s
+ // declcontext can either capture the variable or have already captured
+ // the variable.
+ CaptureType = Var->getType();
+ DeclRefType = CaptureType.getNonReferenceType();
+ bool Nested = false;
+ bool Explicit = (Kind != TryCapture_Implicit);
+ unsigned FunctionScopesIndex = MaxFunctionScopesIndex;
+ unsigned OpenMPLevel = 0;
+ do {
+ // Only block literals, captured statements, and lambda expressions can
+ // capture; other scopes don't work.
+ DeclContext *ParentDC = getParentOfCapturingContextOrNull(DC, Var,
+ ExprLoc,
+ BuildAndDiagnose,
+ *this);
+ // We need to check for the parent *first* because, if we *have*
+ // private-captured a global variable, we need to recursively capture it in
+ // intermediate blocks, lambdas, etc.
+ if (!ParentDC) {
+ if (IsGlobal) {
+ FunctionScopesIndex = MaxFunctionScopesIndex - 1;
+ break;
+ }
+ return true;
+ }
+
+ FunctionScopeInfo *FSI = FunctionScopes[FunctionScopesIndex];
+ CapturingScopeInfo *CSI = cast<CapturingScopeInfo>(FSI);
+
+
+ // Check whether we've already captured it.
+ if (isVariableAlreadyCapturedInScopeInfo(CSI, Var, Nested, CaptureType,
+ DeclRefType))
+ break;
+ // If we are instantiating a generic lambda call operator body,
+ // we do not want to capture new variables. What was captured
+ // during either a lambdas transformation or initial parsing
+ // should be used.
+ if (isGenericLambdaCallOperatorSpecialization(DC)) {
+ if (BuildAndDiagnose) {
+ LambdaScopeInfo *LSI = cast<LambdaScopeInfo>(CSI);
+ if (LSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_None) {
+ Diag(ExprLoc, diag::err_lambda_impcap) << Var->getDeclName();
+ Diag(Var->getLocation(), diag::note_previous_decl)
+ << Var->getDeclName();
+ Diag(LSI->Lambda->getLocStart(), diag::note_lambda_decl);
+ } else
+ diagnoseUncapturableValueReference(*this, ExprLoc, Var, DC);
+ }
+ return true;
+ }
+ // Certain capturing entities (lambdas, blocks etc.) are not allowed to capture
+ // certain types of variables (unnamed, variably modified types etc.)
+ // so check for eligibility.
+ if (!isVariableCapturable(CSI, Var, ExprLoc, BuildAndDiagnose, *this))
+ return true;
+
+ // Try to capture variable-length arrays types.
+ if (Var->getType()->isVariablyModifiedType()) {
+ // We're going to walk down into the type and look for VLA
+ // expressions.
+ QualType QTy = Var->getType();
+ if (ParmVarDecl *PVD = dyn_cast_or_null<ParmVarDecl>(Var))
+ QTy = PVD->getOriginalType();
+ do {
+ const Type *Ty = QTy.getTypePtr();
+ switch (Ty->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
+#include "clang/AST/TypeNodes.def"
+ QTy = QualType();
+ break;
+ // These types are never variably-modified.
+ case Type::Builtin:
+ case Type::Complex:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::Record:
+ case Type::Enum:
+ case Type::Elaborated:
+ case Type::TemplateSpecialization:
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::ObjCObjectPointer:
+ llvm_unreachable("type class is never variably-modified!");
+ case Type::Adjusted:
+ QTy = cast<AdjustedType>(Ty)->getOriginalType();
+ break;
+ case Type::Decayed:
+ QTy = cast<DecayedType>(Ty)->getPointeeType();
+ break;
+ case Type::Pointer:
+ QTy = cast<PointerType>(Ty)->getPointeeType();
+ break;
+ case Type::BlockPointer:
+ QTy = cast<BlockPointerType>(Ty)->getPointeeType();
+ break;
+ case Type::LValueReference:
+ case Type::RValueReference:
+ QTy = cast<ReferenceType>(Ty)->getPointeeType();
+ break;
+ case Type::MemberPointer:
+ QTy = cast<MemberPointerType>(Ty)->getPointeeType();
+ break;
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ // Losing element qualification here is fine.
+ QTy = cast<ArrayType>(Ty)->getElementType();
+ break;
+ case Type::VariableArray: {
+ // Losing element qualification here is fine.
+ const VariableArrayType *VAT = cast<VariableArrayType>(Ty);
+
+ // Unknown size indication requires no size computation.
+ // Otherwise, evaluate and record it.
+ if (auto Size = VAT->getSizeExpr()) {
+ if (!CSI->isVLATypeCaptured(VAT)) {
+ RecordDecl *CapRecord = nullptr;
+ if (auto LSI = dyn_cast<LambdaScopeInfo>(CSI)) {
+ CapRecord = LSI->Lambda;
+ } else if (auto CRSI = dyn_cast<CapturedRegionScopeInfo>(CSI)) {
+ CapRecord = CRSI->TheRecordDecl;
+ }
+ if (CapRecord) {
+ auto ExprLoc = Size->getExprLoc();
+ auto SizeType = Context.getSizeType();
+ // Build the non-static data member.
+ auto Field = FieldDecl::Create(
+ Context, CapRecord, ExprLoc, ExprLoc,
+ /*Id*/ nullptr, SizeType, /*TInfo*/ nullptr,
+ /*BW*/ nullptr, /*Mutable*/ false,
+ /*InitStyle*/ ICIS_NoInit);
+ Field->setImplicit(true);
+ Field->setAccess(AS_private);
+ Field->setCapturedVLAType(VAT);
+ CapRecord->addDecl(Field);
+
+ CSI->addVLATypeCapture(ExprLoc, SizeType);
+ }
+ }
+ }
+ QTy = VAT->getElementType();
+ break;
+ }
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ QTy = cast<FunctionType>(Ty)->getReturnType();
+ break;
+ case Type::Paren:
+ case Type::TypeOf:
+ case Type::UnaryTransform:
+ case Type::Attributed:
+ case Type::SubstTemplateTypeParm:
+ case Type::PackExpansion:
+ // Keep walking after single level desugaring.
+ QTy = QTy.getSingleStepDesugaredType(getASTContext());
+ break;
+ case Type::Typedef:
+ QTy = cast<TypedefType>(Ty)->desugar();
+ break;
+ case Type::Decltype:
+ QTy = cast<DecltypeType>(Ty)->desugar();
+ break;
+ case Type::Auto:
+ QTy = cast<AutoType>(Ty)->getDeducedType();
+ break;
+ case Type::TypeOfExpr:
+ QTy = cast<TypeOfExprType>(Ty)->getUnderlyingExpr()->getType();
+ break;
+ case Type::Atomic:
+ QTy = cast<AtomicType>(Ty)->getValueType();
+ break;
+ }
+ } while (!QTy.isNull() && QTy->isVariablyModifiedType());
+ }
+
+ if (getLangOpts().OpenMP) {
+ if (auto *RSI = dyn_cast<CapturedRegionScopeInfo>(CSI)) {
+ // OpenMP private variables should not be captured in outer scope, so
+ // just break here. Similarly, global variables that are captured in a
+ // target region should not be captured outside the scope of the region.
+ if (RSI->CapRegionKind == CR_OpenMP) {
+ auto isTargetCap = isOpenMPTargetCapturedVar(Var, OpenMPLevel);
+ // When we detect target captures we are looking from inside the
+ // target region, therefore we need to propagate the capture from the
+ // enclosing region. Therefore, the capture is not initially nested.
+ if (isTargetCap)
+ FunctionScopesIndex--;
+
+ if (isTargetCap || isOpenMPPrivateVar(Var, OpenMPLevel)) {
+ Nested = !isTargetCap;
+ DeclRefType = DeclRefType.getUnqualifiedType();
+ CaptureType = Context.getLValueReferenceType(DeclRefType);
+ break;
+ }
+ ++OpenMPLevel;
+ }
+ }
+ }
+ if (CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_None && !Explicit) {
+ // No capture-default, and this is not an explicit capture
+ // so cannot capture this variable.
+ if (BuildAndDiagnose) {
+ Diag(ExprLoc, diag::err_lambda_impcap) << Var->getDeclName();
+ Diag(Var->getLocation(), diag::note_previous_decl)
+ << Var->getDeclName();
+ Diag(cast<LambdaScopeInfo>(CSI)->Lambda->getLocStart(),
+ diag::note_lambda_decl);
+ // FIXME: If we error out because an outer lambda can not implicitly
+ // capture a variable that an inner lambda explicitly captures, we
+ // should have the inner lambda do the explicit capture - because
+ // it makes for cleaner diagnostics later. This would purely be done
+ // so that the diagnostic does not misleadingly claim that a variable
+ // can not be captured by a lambda implicitly even though it is captured
+ // explicitly. Suggestion:
+ // - create const bool VariableCaptureWasInitiallyExplicit = Explicit
+ // at the function head
+ // - cache the StartingDeclContext - this must be a lambda
+ // - captureInLambda in the innermost lambda the variable.
+ }
+ return true;
+ }
+
+ FunctionScopesIndex--;
+ DC = ParentDC;
+ Explicit = false;
+ } while (!VarDC->Equals(DC));
+
+ // Walk back down the scope stack, (e.g. from outer lambda to inner lambda)
+ // computing the type of the capture at each step, checking type-specific
+ // requirements, and adding captures if requested.
+ // If the variable had already been captured previously, we start capturing
+ // at the lambda nested within that one.
+ for (unsigned I = ++FunctionScopesIndex, N = MaxFunctionScopesIndex + 1; I != N;
+ ++I) {
+ CapturingScopeInfo *CSI = cast<CapturingScopeInfo>(FunctionScopes[I]);
+
+ if (BlockScopeInfo *BSI = dyn_cast<BlockScopeInfo>(CSI)) {
+ if (!captureInBlock(BSI, Var, ExprLoc,
+ BuildAndDiagnose, CaptureType,
+ DeclRefType, Nested, *this))
+ return true;
+ Nested = true;
+ } else if (CapturedRegionScopeInfo *RSI = dyn_cast<CapturedRegionScopeInfo>(CSI)) {
+ if (!captureInCapturedRegion(RSI, Var, ExprLoc,
+ BuildAndDiagnose, CaptureType,
+ DeclRefType, Nested, *this))
+ return true;
+ Nested = true;
+ } else {
+ LambdaScopeInfo *LSI = cast<LambdaScopeInfo>(CSI);
+ if (!captureInLambda(LSI, Var, ExprLoc,
+ BuildAndDiagnose, CaptureType,
+ DeclRefType, Nested, Kind, EllipsisLoc,
+ /*IsTopScope*/I == N - 1, *this))
+ return true;
+ Nested = true;
+ }
+ }
+ return false;
+}
+
+bool Sema::tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
+ TryCaptureKind Kind, SourceLocation EllipsisLoc) {
+ QualType CaptureType;
+ QualType DeclRefType;
+ return tryCaptureVariable(Var, Loc, Kind, EllipsisLoc,
+ /*BuildAndDiagnose=*/true, CaptureType,
+ DeclRefType, nullptr);
+}
+
+bool Sema::NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc) {
+ QualType CaptureType;
+ QualType DeclRefType;
+ return !tryCaptureVariable(Var, Loc, TryCapture_Implicit, SourceLocation(),
+ /*BuildAndDiagnose=*/false, CaptureType,
+ DeclRefType, nullptr);
+}
+
+QualType Sema::getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc) {
+ QualType CaptureType;
+ QualType DeclRefType;
+
+ // Determine whether we can capture this variable.
+ if (tryCaptureVariable(Var, Loc, TryCapture_Implicit, SourceLocation(),
+ /*BuildAndDiagnose=*/false, CaptureType,
+ DeclRefType, nullptr))
+ return QualType();
+
+ return DeclRefType;
+}
+
+
+
+// If either the type of the variable or the initializer is dependent,
+// return false. Otherwise, determine whether the variable is a constant
+// expression. Use this if you need to know if a variable that might or
+// might not be dependent is truly a constant expression.
+static inline bool IsVariableNonDependentAndAConstantExpression(VarDecl *Var,
+ ASTContext &Context) {
+
+ if (Var->getType()->isDependentType())
+ return false;
+ const VarDecl *DefVD = nullptr;
+ Var->getAnyInitializer(DefVD);
+ if (!DefVD)
+ return false;
+ EvaluatedStmt *Eval = DefVD->ensureEvaluatedStmt();
+ Expr *Init = cast<Expr>(Eval->Value);
+ if (Init->isValueDependent())
+ return false;
+ return IsVariableAConstantExpression(Var, Context);
+}
+
+
+void Sema::UpdateMarkingForLValueToRValue(Expr *E) {
+ // Per C++11 [basic.def.odr], a variable is odr-used "unless it is
+ // an object that satisfies the requirements for appearing in a
+ // constant expression (5.19) and the lvalue-to-rvalue conversion (4.1)
+ // is immediately applied." This function handles the lvalue-to-rvalue
+ // conversion part.
+ MaybeODRUseExprs.erase(E->IgnoreParens());
+
+ // If we are in a lambda, check if this DeclRefExpr or MemberExpr refers
+ // to a variable that is a constant expression, and if so, identify it as
+ // a reference to a variable that does not involve an odr-use of that
+ // variable.
+ if (LambdaScopeInfo *LSI = getCurLambda()) {
+ Expr *SansParensExpr = E->IgnoreParens();
+ VarDecl *Var = nullptr;
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(SansParensExpr))
+ Var = dyn_cast<VarDecl>(DRE->getFoundDecl());
+ else if (MemberExpr *ME = dyn_cast<MemberExpr>(SansParensExpr))
+ Var = dyn_cast<VarDecl>(ME->getMemberDecl());
+
+ if (Var && IsVariableNonDependentAndAConstantExpression(Var, Context))
+ LSI->markVariableExprAsNonODRUsed(SansParensExpr);
+ }
+}
+
+ExprResult Sema::ActOnConstantExpression(ExprResult Res) {
+ Res = CorrectDelayedTyposInExpr(Res);
+
+ if (!Res.isUsable())
+ return Res;
+
+ // If a constant-expression is a reference to a variable where we delay
+ // deciding whether it is an odr-use, just assume we will apply the
+ // lvalue-to-rvalue conversion. In the one case where this doesn't happen
+ // (a non-type template argument), we have special handling anyway.
+ UpdateMarkingForLValueToRValue(Res.get());
+ return Res;
+}
+
+void Sema::CleanupVarDeclMarking() {
+ for (Expr *E : MaybeODRUseExprs) {
+ VarDecl *Var;
+ SourceLocation Loc;
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ Var = cast<VarDecl>(DRE->getDecl());
+ Loc = DRE->getLocation();
+ } else if (MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
+ Var = cast<VarDecl>(ME->getMemberDecl());
+ Loc = ME->getMemberLoc();
+ } else {
+ llvm_unreachable("Unexpected expression");
+ }
+
+ MarkVarDeclODRUsed(Var, Loc, *this,
+ /*MaxFunctionScopeIndex Pointer*/ nullptr);
+ }
+
+ MaybeODRUseExprs.clear();
+}
+
+
+static void DoMarkVarDeclReferenced(Sema &SemaRef, SourceLocation Loc,
+ VarDecl *Var, Expr *E) {
+ assert((!E || isa<DeclRefExpr>(E) || isa<MemberExpr>(E)) &&
+ "Invalid Expr argument to DoMarkVarDeclReferenced");
+ Var->setReferenced();
+
+ TemplateSpecializationKind TSK = Var->getTemplateSpecializationKind();
+ bool MarkODRUsed = true;
+
+ // If the context is not potentially evaluated, this is not an odr-use and
+ // does not trigger instantiation.
+ if (!IsPotentiallyEvaluatedContext(SemaRef)) {
+ if (SemaRef.isUnevaluatedContext())
+ return;
+
+ // If we don't yet know whether this context is going to end up being an
+ // evaluated context, and we're referencing a variable from an enclosing
+ // scope, add a potential capture.
+ //
+ // FIXME: Is this necessary? These contexts are only used for default
+ // arguments, where local variables can't be used.
+ const bool RefersToEnclosingScope =
+ (SemaRef.CurContext != Var->getDeclContext() &&
+ Var->getDeclContext()->isFunctionOrMethod() && Var->hasLocalStorage());
+ if (RefersToEnclosingScope) {
+ if (LambdaScopeInfo *const LSI = SemaRef.getCurLambda()) {
+ // If a variable could potentially be odr-used, defer marking it so
+ // until we finish analyzing the full expression for any
+ // lvalue-to-rvalue
+ // or discarded value conversions that would obviate odr-use.
+ // Add it to the list of potential captures that will be analyzed
+ // later (ActOnFinishFullExpr) for eventual capture and odr-use marking
+ // unless the variable is a reference that was initialized by a constant
+ // expression (this will never need to be captured or odr-used).
+ assert(E && "Capture variable should be used in an expression.");
+ if (!Var->getType()->isReferenceType() ||
+ !IsVariableNonDependentAndAConstantExpression(Var, SemaRef.Context))
+ LSI->addPotentialCapture(E->IgnoreParens());
+ }
+ }
+
+ if (!isTemplateInstantiation(TSK))
+ return;
+
+ // Instantiate, but do not mark as odr-used, variable templates.
+ MarkODRUsed = false;
+ }
+
+ VarTemplateSpecializationDecl *VarSpec =
+ dyn_cast<VarTemplateSpecializationDecl>(Var);
+ assert(!isa<VarTemplatePartialSpecializationDecl>(Var) &&
+ "Can't instantiate a partial template specialization.");
+
+ // Perform implicit instantiation of static data members, static data member
+ // templates of class templates, and variable template specializations. Delay
+ // instantiations of variable templates, except for those that could be used
+ // in a constant expression.
+ if (isTemplateInstantiation(TSK)) {
+ bool TryInstantiating = TSK == TSK_ImplicitInstantiation;
+
+ if (TryInstantiating && !isa<VarTemplateSpecializationDecl>(Var)) {
+ if (Var->getPointOfInstantiation().isInvalid()) {
+ // This is a modification of an existing AST node. Notify listeners.
+ if (ASTMutationListener *L = SemaRef.getASTMutationListener())
+ L->StaticDataMemberInstantiated(Var);
+ } else if (!Var->isUsableInConstantExpressions(SemaRef.Context))
+ // Don't bother trying to instantiate it again, unless we might need
+ // its initializer before we get to the end of the TU.
+ TryInstantiating = false;
+ }
+
+ if (Var->getPointOfInstantiation().isInvalid())
+ Var->setTemplateSpecializationKind(TSK, Loc);
+
+ if (TryInstantiating) {
+ SourceLocation PointOfInstantiation = Var->getPointOfInstantiation();
+ bool InstantiationDependent = false;
+ bool IsNonDependent =
+ VarSpec ? !TemplateSpecializationType::anyDependentTemplateArguments(
+ VarSpec->getTemplateArgsInfo(), InstantiationDependent)
+ : true;
+
+ // Do not instantiate specializations that are still type-dependent.
+ if (IsNonDependent) {
+ if (Var->isUsableInConstantExpressions(SemaRef.Context)) {
+ // Do not defer instantiations of variables which could be used in a
+ // constant expression.
+ SemaRef.InstantiateVariableDefinition(PointOfInstantiation, Var);
+ } else {
+ SemaRef.PendingInstantiations
+ .push_back(std::make_pair(Var, PointOfInstantiation));
+ }
+ }
+ }
+ }
+
+ if(!MarkODRUsed) return;
+
+ // Per C++11 [basic.def.odr], a variable is odr-used "unless it satisfies
+ // the requirements for appearing in a constant expression (5.19) and, if
+ // it is an object, the lvalue-to-rvalue conversion (4.1)
+ // is immediately applied." We check the first part here, and
+ // Sema::UpdateMarkingForLValueToRValue deals with the second part.
+ // Note that we use the C++11 definition everywhere because nothing in
+ // C++03 depends on whether we get the C++03 version correct. The second
+ // part does not apply to references, since they are not objects.
+ if (E && IsVariableAConstantExpression(Var, SemaRef.Context)) {
+ // A reference initialized by a constant expression can never be
+ // odr-used, so simply ignore it.
+ if (!Var->getType()->isReferenceType())
+ SemaRef.MaybeODRUseExprs.insert(E);
+ } else
+ MarkVarDeclODRUsed(Var, Loc, SemaRef,
+ /*MaxFunctionScopeIndex ptr*/ nullptr);
+}
+
+/// \brief Mark a variable referenced, and check whether it is odr-used
+/// (C++ [basic.def.odr]p2, C99 6.9p3). Note that this should not be
+/// used directly for normal expressions referring to VarDecl.
+void Sema::MarkVariableReferenced(SourceLocation Loc, VarDecl *Var) {
+ DoMarkVarDeclReferenced(*this, Loc, Var, nullptr);
+}
+
+static void MarkExprReferenced(Sema &SemaRef, SourceLocation Loc,
+ Decl *D, Expr *E, bool OdrUse) {
+ if (VarDecl *Var = dyn_cast<VarDecl>(D)) {
+ DoMarkVarDeclReferenced(SemaRef, Loc, Var, E);
+ return;
+ }
+
+ SemaRef.MarkAnyDeclReferenced(Loc, D, OdrUse);
+
+ // If this is a call to a method via a cast, also mark the method in the
+ // derived class used in case codegen can devirtualize the call.
+ const MemberExpr *ME = dyn_cast<MemberExpr>(E);
+ if (!ME)
+ return;
+ CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ME->getMemberDecl());
+ if (!MD)
+ return;
+ // Only attempt to devirtualize if this is truly a virtual call.
+ bool IsVirtualCall = MD->isVirtual() &&
+ ME->performsVirtualDispatch(SemaRef.getLangOpts());
+ if (!IsVirtualCall)
+ return;
+ const Expr *Base = ME->getBase();
+ const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType();
+ if (!MostDerivedClassDecl)
+ return;
+ CXXMethodDecl *DM = MD->getCorrespondingMethodInClass(MostDerivedClassDecl);
+ if (!DM || DM->isPure())
+ return;
+ SemaRef.MarkAnyDeclReferenced(Loc, DM, OdrUse);
+}
+
+/// \brief Perform reference-marking and odr-use handling for a DeclRefExpr.
+void Sema::MarkDeclRefReferenced(DeclRefExpr *E) {
+ // TODO: update this with DR# once a defect report is filed.
+ // C++11 defect. The address of a pure member should not be an ODR use, even
+ // if it's a qualified reference.
+ bool OdrUse = true;
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(E->getDecl()))
+ if (Method->isVirtual())
+ OdrUse = false;
+ MarkExprReferenced(*this, E->getLocation(), E->getDecl(), E, OdrUse);
+}
+
+/// \brief Perform reference-marking and odr-use handling for a MemberExpr.
+void Sema::MarkMemberReferenced(MemberExpr *E) {
+ // C++11 [basic.def.odr]p2:
+ // A non-overloaded function whose name appears as a potentially-evaluated
+ // expression or a member of a set of candidate functions, if selected by
+ // overload resolution when referred to from a potentially-evaluated
+ // expression, is odr-used, unless it is a pure virtual function and its
+ // name is not explicitly qualified.
+ bool OdrUse = true;
+ if (E->performsVirtualDispatch(getLangOpts())) {
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(E->getMemberDecl()))
+ if (Method->isPure())
+ OdrUse = false;
+ }
+ SourceLocation Loc = E->getMemberLoc().isValid() ?
+ E->getMemberLoc() : E->getLocStart();
+ MarkExprReferenced(*this, Loc, E->getMemberDecl(), E, OdrUse);
+}
+
+/// \brief Perform marking for a reference to an arbitrary declaration. It
+/// marks the declaration referenced, and performs odr-use checking for
+/// functions and variables. This method should not be used when building a
+/// normal expression which refers to a variable.
+void Sema::MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse) {
+ if (OdrUse) {
+ if (auto *VD = dyn_cast<VarDecl>(D)) {
+ MarkVariableReferenced(Loc, VD);
+ return;
+ }
+ }
+ if (auto *FD = dyn_cast<FunctionDecl>(D)) {
+ MarkFunctionReferenced(Loc, FD, OdrUse);
+ return;
+ }
+ D->setReferenced();
+}
+
+namespace {
+ // Mark all of the declarations referenced
+ // FIXME: Not fully implemented yet! We need to have a better understanding
+ // of when we're entering
+ class MarkReferencedDecls : public RecursiveASTVisitor<MarkReferencedDecls> {
+ Sema &S;
+ SourceLocation Loc;
+
+ public:
+ typedef RecursiveASTVisitor<MarkReferencedDecls> Inherited;
+
+ MarkReferencedDecls(Sema &S, SourceLocation Loc) : S(S), Loc(Loc) { }
+
+ bool TraverseTemplateArgument(const TemplateArgument &Arg);
+ bool TraverseRecordType(RecordType *T);
+ };
+}
+
+bool MarkReferencedDecls::TraverseTemplateArgument(
+ const TemplateArgument &Arg) {
+ if (Arg.getKind() == TemplateArgument::Declaration) {
+ if (Decl *D = Arg.getAsDecl())
+ S.MarkAnyDeclReferenced(Loc, D, true);
+ }
+
+ return Inherited::TraverseTemplateArgument(Arg);
+}
+
+bool MarkReferencedDecls::TraverseRecordType(RecordType *T) {
+ if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(T->getDecl())) {
+ const TemplateArgumentList &Args = Spec->getTemplateArgs();
+ return TraverseTemplateArguments(Args.data(), Args.size());
+ }
+
+ return true;
+}
+
+void Sema::MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T) {
+ MarkReferencedDecls Marker(*this, Loc);
+ Marker.TraverseType(Context.getCanonicalType(T));
+}
+
+namespace {
+ /// \brief Helper class that marks all of the declarations referenced by
+ /// potentially-evaluated subexpressions as "referenced".
+ class EvaluatedExprMarker : public EvaluatedExprVisitor<EvaluatedExprMarker> {
+ Sema &S;
+ bool SkipLocalVariables;
+
+ public:
+ typedef EvaluatedExprVisitor<EvaluatedExprMarker> Inherited;
+
+ EvaluatedExprMarker(Sema &S, bool SkipLocalVariables)
+ : Inherited(S.Context), S(S), SkipLocalVariables(SkipLocalVariables) { }
+
+ void VisitDeclRefExpr(DeclRefExpr *E) {
+ // If we were asked not to visit local variables, don't.
+ if (SkipLocalVariables) {
+ if (VarDecl *VD = dyn_cast<VarDecl>(E->getDecl()))
+ if (VD->hasLocalStorage())
+ return;
+ }
+
+ S.MarkDeclRefReferenced(E);
+ }
+
+ void VisitMemberExpr(MemberExpr *E) {
+ S.MarkMemberReferenced(E);
+ Inherited::VisitMemberExpr(E);
+ }
+
+ void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
+ S.MarkFunctionReferenced(E->getLocStart(),
+ const_cast<CXXDestructorDecl*>(E->getTemporary()->getDestructor()));
+ Visit(E->getSubExpr());
+ }
+
+ void VisitCXXNewExpr(CXXNewExpr *E) {
+ if (E->getOperatorNew())
+ S.MarkFunctionReferenced(E->getLocStart(), E->getOperatorNew());
+ if (E->getOperatorDelete())
+ S.MarkFunctionReferenced(E->getLocStart(), E->getOperatorDelete());
+ Inherited::VisitCXXNewExpr(E);
+ }
+
+ void VisitCXXDeleteExpr(CXXDeleteExpr *E) {
+ if (E->getOperatorDelete())
+ S.MarkFunctionReferenced(E->getLocStart(), E->getOperatorDelete());
+ QualType Destroyed = S.Context.getBaseElementType(E->getDestroyedType());
+ if (const RecordType *DestroyedRec = Destroyed->getAs<RecordType>()) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(DestroyedRec->getDecl());
+ S.MarkFunctionReferenced(E->getLocStart(),
+ S.LookupDestructor(Record));
+ }
+
+ Inherited::VisitCXXDeleteExpr(E);
+ }
+
+ void VisitCXXConstructExpr(CXXConstructExpr *E) {
+ S.MarkFunctionReferenced(E->getLocStart(), E->getConstructor());
+ Inherited::VisitCXXConstructExpr(E);
+ }
+
+ void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
+ Visit(E->getExpr());
+ }
+
+ void VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ Inherited::VisitImplicitCastExpr(E);
+
+ if (E->getCastKind() == CK_LValueToRValue)
+ S.UpdateMarkingForLValueToRValue(E->getSubExpr());
+ }
+ };
+}
+
+/// \brief Mark any declarations that appear within this expression or any
+/// potentially-evaluated subexpressions as "referenced".
+///
+/// \param SkipLocalVariables If true, don't mark local variables as
+/// 'referenced'.
+void Sema::MarkDeclarationsReferencedInExpr(Expr *E,
+ bool SkipLocalVariables) {
+ EvaluatedExprMarker(*this, SkipLocalVariables).Visit(E);
+}
+
+/// \brief Emit a diagnostic that describes an effect on the run-time behavior
+/// of the program being compiled.
+///
+/// This routine emits the given diagnostic when the code currently being
+/// type-checked is "potentially evaluated", meaning that there is a
+/// possibility that the code will actually be executable. Code in sizeof()
+/// expressions, code used only during overload resolution, etc., are not
+/// potentially evaluated. This routine will suppress such diagnostics or,
+/// in the absolutely nutty case of potentially potentially evaluated
+/// expressions (C++ typeid), queue the diagnostic to potentially emit it
+/// later.
+///
+/// This routine should be used for all diagnostics that describe the run-time
+/// behavior of a program, such as passing a non-POD value through an ellipsis.
+/// Failure to do so will likely result in spurious diagnostics or failures
+/// during overload resolution or within sizeof/alignof/typeof/typeid.
+bool Sema::DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
+ const PartialDiagnostic &PD) {
+ switch (ExprEvalContexts.back().Context) {
+ case Unevaluated:
+ case UnevaluatedAbstract:
+ // The argument will never be evaluated, so don't complain.
+ break;
+
+ case ConstantEvaluated:
+ // Relevant diagnostics should be produced by constant evaluation.
+ break;
+
+ case PotentiallyEvaluated:
+ case PotentiallyEvaluatedIfUsed:
+ if (Statement && getCurFunctionOrMethodDecl()) {
+ FunctionScopes.back()->PossiblyUnreachableDiags.
+ push_back(sema::PossiblyUnreachableDiag(PD, Loc, Statement));
+ }
+ else
+ Diag(Loc, PD);
+
+ return true;
+ }
+
+ return false;
+}
+
+bool Sema::CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
+ CallExpr *CE, FunctionDecl *FD) {
+ if (ReturnType->isVoidType() || !ReturnType->isIncompleteType())
+ return false;
+
+ // If we're inside a decltype's expression, don't check for a valid return
+ // type or construct temporaries until we know whether this is the last call.
+ if (ExprEvalContexts.back().IsDecltype) {
+ ExprEvalContexts.back().DelayedDecltypeCalls.push_back(CE);
+ return false;
+ }
+
+ class CallReturnIncompleteDiagnoser : public TypeDiagnoser {
+ FunctionDecl *FD;
+ CallExpr *CE;
+
+ public:
+ CallReturnIncompleteDiagnoser(FunctionDecl *FD, CallExpr *CE)
+ : FD(FD), CE(CE) { }
+
+ void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
+ if (!FD) {
+ S.Diag(Loc, diag::err_call_incomplete_return)
+ << T << CE->getSourceRange();
+ return;
+ }
+
+ S.Diag(Loc, diag::err_call_function_incomplete_return)
+ << CE->getSourceRange() << FD->getDeclName() << T;
+ S.Diag(FD->getLocation(), diag::note_entity_declared_at)
+ << FD->getDeclName();
+ }
+ } Diagnoser(FD, CE);
+
+ if (RequireCompleteType(Loc, ReturnType, Diagnoser))
+ return true;
+
+ return false;
+}
+
+// Diagnose the s/=/==/ and s/\|=/!=/ typos. Note that adding parentheses
+// will prevent this condition from triggering, which is what we want.
+void Sema::DiagnoseAssignmentAsCondition(Expr *E) {
+ SourceLocation Loc;
+
+ unsigned diagnostic = diag::warn_condition_is_assignment;
+ bool IsOrAssign = false;
+
+ if (BinaryOperator *Op = dyn_cast<BinaryOperator>(E)) {
+ if (Op->getOpcode() != BO_Assign && Op->getOpcode() != BO_OrAssign)
+ return;
+
+ IsOrAssign = Op->getOpcode() == BO_OrAssign;
+
+ // Greylist some idioms by putting them into a warning subcategory.
+ if (ObjCMessageExpr *ME
+ = dyn_cast<ObjCMessageExpr>(Op->getRHS()->IgnoreParenCasts())) {
+ Selector Sel = ME->getSelector();
+
+ // self = [<foo> init...]
+ if (isSelfExpr(Op->getLHS()) && ME->getMethodFamily() == OMF_init)
+ diagnostic = diag::warn_condition_is_idiomatic_assignment;
+
+ // <foo> = [<bar> nextObject]
+ else if (Sel.isUnarySelector() && Sel.getNameForSlot(0) == "nextObject")
+ diagnostic = diag::warn_condition_is_idiomatic_assignment;
+ }
+
+ Loc = Op->getOperatorLoc();
+ } else if (CXXOperatorCallExpr *Op = dyn_cast<CXXOperatorCallExpr>(E)) {
+ if (Op->getOperator() != OO_Equal && Op->getOperator() != OO_PipeEqual)
+ return;
+
+ IsOrAssign = Op->getOperator() == OO_PipeEqual;
+ Loc = Op->getOperatorLoc();
+ } else if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E))
+ return DiagnoseAssignmentAsCondition(POE->getSyntacticForm());
+ else {
+ // Not an assignment.
+ return;
+ }
+
+ Diag(Loc, diagnostic) << E->getSourceRange();
+
+ SourceLocation Open = E->getLocStart();
+ SourceLocation Close = getLocForEndOfToken(E->getSourceRange().getEnd());
+ Diag(Loc, diag::note_condition_assign_silence)
+ << FixItHint::CreateInsertion(Open, "(")
+ << FixItHint::CreateInsertion(Close, ")");
+
+ if (IsOrAssign)
+ Diag(Loc, diag::note_condition_or_assign_to_comparison)
+ << FixItHint::CreateReplacement(Loc, "!=");
+ else
+ Diag(Loc, diag::note_condition_assign_to_comparison)
+ << FixItHint::CreateReplacement(Loc, "==");
+}
+
+/// \brief Redundant parentheses over an equality comparison can indicate
+/// that the user intended an assignment used as condition.
+void Sema::DiagnoseEqualityWithExtraParens(ParenExpr *ParenE) {
+ // Don't warn if the parens came from a macro.
+ SourceLocation parenLoc = ParenE->getLocStart();
+ if (parenLoc.isInvalid() || parenLoc.isMacroID())
+ return;
+ // Don't warn for dependent expressions.
+ if (ParenE->isTypeDependent())
+ return;
+
+ Expr *E = ParenE->IgnoreParens();
+
+ if (BinaryOperator *opE = dyn_cast<BinaryOperator>(E))
+ if (opE->getOpcode() == BO_EQ &&
+ opE->getLHS()->IgnoreParenImpCasts()->isModifiableLvalue(Context)
+ == Expr::MLV_Valid) {
+ SourceLocation Loc = opE->getOperatorLoc();
+
+ Diag(Loc, diag::warn_equality_with_extra_parens) << E->getSourceRange();
+ SourceRange ParenERange = ParenE->getSourceRange();
+ Diag(Loc, diag::note_equality_comparison_silence)
+ << FixItHint::CreateRemoval(ParenERange.getBegin())
+ << FixItHint::CreateRemoval(ParenERange.getEnd());
+ Diag(Loc, diag::note_equality_comparison_to_assign)
+ << FixItHint::CreateReplacement(Loc, "=");
+ }
+}
+
+ExprResult Sema::CheckBooleanCondition(Expr *E, SourceLocation Loc) {
+ DiagnoseAssignmentAsCondition(E);
+ if (ParenExpr *parenE = dyn_cast<ParenExpr>(E))
+ DiagnoseEqualityWithExtraParens(parenE);
+
+ ExprResult result = CheckPlaceholderExpr(E);
+ if (result.isInvalid()) return ExprError();
+ E = result.get();
+
+ if (!E->isTypeDependent()) {
+ if (getLangOpts().CPlusPlus)
+ return CheckCXXBooleanCondition(E); // C++ 6.4p4
+
+ ExprResult ERes = DefaultFunctionArrayLvalueConversion(E);
+ if (ERes.isInvalid())
+ return ExprError();
+ E = ERes.get();
+
+ QualType T = E->getType();
+ if (!T->isScalarType()) { // C99 6.8.4.1p1
+ Diag(Loc, diag::err_typecheck_statement_requires_scalar)
+ << T << E->getSourceRange();
+ return ExprError();
+ }
+ CheckBoolLikeConversion(E, Loc);
+ }
+
+ return E;
+}
+
+ExprResult Sema::ActOnBooleanCondition(Scope *S, SourceLocation Loc,
+ Expr *SubExpr) {
+ if (!SubExpr)
+ return ExprError();
+
+ return CheckBooleanCondition(SubExpr, Loc);
+}
+
+namespace {
+ /// A visitor for rebuilding a call to an __unknown_any expression
+ /// to have an appropriate type.
+ struct RebuildUnknownAnyFunction
+ : StmtVisitor<RebuildUnknownAnyFunction, ExprResult> {
+
+ Sema &S;
+
+ RebuildUnknownAnyFunction(Sema &S) : S(S) {}
+
+ ExprResult VisitStmt(Stmt *S) {
+ llvm_unreachable("unexpected statement!");
+ }
+
+ ExprResult VisitExpr(Expr *E) {
+ S.Diag(E->getExprLoc(), diag::err_unsupported_unknown_any_call)
+ << E->getSourceRange();
+ return ExprError();
+ }
+
+ /// Rebuild an expression which simply semantically wraps another
+ /// expression which it shares the type and value kind of.
+ template <class T> ExprResult rebuildSugarExpr(T *E) {
+ ExprResult SubResult = Visit(E->getSubExpr());
+ if (SubResult.isInvalid()) return ExprError();
+
+ Expr *SubExpr = SubResult.get();
+ E->setSubExpr(SubExpr);
+ E->setType(SubExpr->getType());
+ E->setValueKind(SubExpr->getValueKind());
+ assert(E->getObjectKind() == OK_Ordinary);
+ return E;
+ }
+
+ ExprResult VisitParenExpr(ParenExpr *E) {
+ return rebuildSugarExpr(E);
+ }
+
+ ExprResult VisitUnaryExtension(UnaryOperator *E) {
+ return rebuildSugarExpr(E);
+ }
+
+ ExprResult VisitUnaryAddrOf(UnaryOperator *E) {
+ ExprResult SubResult = Visit(E->getSubExpr());
+ if (SubResult.isInvalid()) return ExprError();
+
+ Expr *SubExpr = SubResult.get();
+ E->setSubExpr(SubExpr);
+ E->setType(S.Context.getPointerType(SubExpr->getType()));
+ assert(E->getValueKind() == VK_RValue);
+ assert(E->getObjectKind() == OK_Ordinary);
+ return E;
+ }
+
+ ExprResult resolveDecl(Expr *E, ValueDecl *VD) {
+ if (!isa<FunctionDecl>(VD)) return VisitExpr(E);
+
+ E->setType(VD->getType());
+
+ assert(E->getValueKind() == VK_RValue);
+ if (S.getLangOpts().CPlusPlus &&
+ !(isa<CXXMethodDecl>(VD) &&
+ cast<CXXMethodDecl>(VD)->isInstance()))
+ E->setValueKind(VK_LValue);
+
+ return E;
+ }
+
+ ExprResult VisitMemberExpr(MemberExpr *E) {
+ return resolveDecl(E, E->getMemberDecl());
+ }
+
+ ExprResult VisitDeclRefExpr(DeclRefExpr *E) {
+ return resolveDecl(E, E->getDecl());
+ }
+ };
+}
+
+/// Given a function expression of unknown-any type, try to rebuild it
+/// to have a function type.
+static ExprResult rebuildUnknownAnyFunction(Sema &S, Expr *FunctionExpr) {
+ ExprResult Result = RebuildUnknownAnyFunction(S).Visit(FunctionExpr);
+ if (Result.isInvalid()) return ExprError();
+ return S.DefaultFunctionArrayConversion(Result.get());
+}
+
+namespace {
+ /// A visitor for rebuilding an expression of type __unknown_anytype
+ /// into one which resolves the type directly on the referring
+ /// expression. Strict preservation of the original source
+ /// structure is not a goal.
+ struct RebuildUnknownAnyExpr
+ : StmtVisitor<RebuildUnknownAnyExpr, ExprResult> {
+
+ Sema &S;
+
+ /// The current destination type.
+ QualType DestType;
+
+ RebuildUnknownAnyExpr(Sema &S, QualType CastType)
+ : S(S), DestType(CastType) {}
+
+ ExprResult VisitStmt(Stmt *S) {
+ llvm_unreachable("unexpected statement!");
+ }
+
+ ExprResult VisitExpr(Expr *E) {
+ S.Diag(E->getExprLoc(), diag::err_unsupported_unknown_any_expr)
+ << E->getSourceRange();
+ return ExprError();
+ }
+
+ ExprResult VisitCallExpr(CallExpr *E);
+ ExprResult VisitObjCMessageExpr(ObjCMessageExpr *E);
+
+ /// Rebuild an expression which simply semantically wraps another
+ /// expression which it shares the type and value kind of.
+ template <class T> ExprResult rebuildSugarExpr(T *E) {
+ ExprResult SubResult = Visit(E->getSubExpr());
+ if (SubResult.isInvalid()) return ExprError();
+ Expr *SubExpr = SubResult.get();
+ E->setSubExpr(SubExpr);
+ E->setType(SubExpr->getType());
+ E->setValueKind(SubExpr->getValueKind());
+ assert(E->getObjectKind() == OK_Ordinary);
+ return E;
+ }
+
+ ExprResult VisitParenExpr(ParenExpr *E) {
+ return rebuildSugarExpr(E);
+ }
+
+ ExprResult VisitUnaryExtension(UnaryOperator *E) {
+ return rebuildSugarExpr(E);
+ }
+
+ ExprResult VisitUnaryAddrOf(UnaryOperator *E) {
+ const PointerType *Ptr = DestType->getAs<PointerType>();
+ if (!Ptr) {
+ S.Diag(E->getOperatorLoc(), diag::err_unknown_any_addrof)
+ << E->getSourceRange();
+ return ExprError();
+ }
+ assert(E->getValueKind() == VK_RValue);
+ assert(E->getObjectKind() == OK_Ordinary);
+ E->setType(DestType);
+
+ // Build the sub-expression as if it were an object of the pointee type.
+ DestType = Ptr->getPointeeType();
+ ExprResult SubResult = Visit(E->getSubExpr());
+ if (SubResult.isInvalid()) return ExprError();
+ E->setSubExpr(SubResult.get());
+ return E;
+ }
+
+ ExprResult VisitImplicitCastExpr(ImplicitCastExpr *E);
+
+ ExprResult resolveDecl(Expr *E, ValueDecl *VD);
+
+ ExprResult VisitMemberExpr(MemberExpr *E) {
+ return resolveDecl(E, E->getMemberDecl());
+ }
+
+ ExprResult VisitDeclRefExpr(DeclRefExpr *E) {
+ return resolveDecl(E, E->getDecl());
+ }
+ };
+}
+
+/// Rebuilds a call expression which yielded __unknown_anytype.
+ExprResult RebuildUnknownAnyExpr::VisitCallExpr(CallExpr *E) {
+ Expr *CalleeExpr = E->getCallee();
+
+ enum FnKind {
+ FK_MemberFunction,
+ FK_FunctionPointer,
+ FK_BlockPointer
+ };
+
+ FnKind Kind;
+ QualType CalleeType = CalleeExpr->getType();
+ if (CalleeType == S.Context.BoundMemberTy) {
+ assert(isa<CXXMemberCallExpr>(E) || isa<CXXOperatorCallExpr>(E));
+ Kind = FK_MemberFunction;
+ CalleeType = Expr::findBoundMemberType(CalleeExpr);
+ } else if (const PointerType *Ptr = CalleeType->getAs<PointerType>()) {
+ CalleeType = Ptr->getPointeeType();
+ Kind = FK_FunctionPointer;
+ } else {
+ CalleeType = CalleeType->castAs<BlockPointerType>()->getPointeeType();
+ Kind = FK_BlockPointer;
+ }
+ const FunctionType *FnType = CalleeType->castAs<FunctionType>();
+
+ // Verify that this is a legal result type of a function.
+ if (DestType->isArrayType() || DestType->isFunctionType()) {
+ unsigned diagID = diag::err_func_returning_array_function;
+ if (Kind == FK_BlockPointer)
+ diagID = diag::err_block_returning_array_function;
+
+ S.Diag(E->getExprLoc(), diagID)
+ << DestType->isFunctionType() << DestType;
+ return ExprError();
+ }
+
+ // Otherwise, go ahead and set DestType as the call's result.
+ E->setType(DestType.getNonLValueExprType(S.Context));
+ E->setValueKind(Expr::getValueKindForType(DestType));
+ assert(E->getObjectKind() == OK_Ordinary);
+
+ // Rebuild the function type, replacing the result type with DestType.
+ const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FnType);
+ if (Proto) {
+ // __unknown_anytype(...) is a special case used by the debugger when
+ // it has no idea what a function's signature is.
+ //
+ // We want to build this call essentially under the K&R
+ // unprototyped rules, but making a FunctionNoProtoType in C++
+ // would foul up all sorts of assumptions. However, we cannot
+ // simply pass all arguments as variadic arguments, nor can we
+ // portably just call the function under a non-variadic type; see
+ // the comment on IR-gen's TargetInfo::isNoProtoCallVariadic.
+ // However, it turns out that in practice it is generally safe to
+ // call a function declared as "A foo(B,C,D);" under the prototype
+ // "A foo(B,C,D,...);". The only known exception is with the
+ // Windows ABI, where any variadic function is implicitly cdecl
+ // regardless of its normal CC. Therefore we change the parameter
+ // types to match the types of the arguments.
+ //
+ // This is a hack, but it is far superior to moving the
+ // corresponding target-specific code from IR-gen to Sema/AST.
+
+ ArrayRef<QualType> ParamTypes = Proto->getParamTypes();
+ SmallVector<QualType, 8> ArgTypes;
+ if (ParamTypes.empty() && Proto->isVariadic()) { // the special case
+ ArgTypes.reserve(E->getNumArgs());
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
+ Expr *Arg = E->getArg(i);
+ QualType ArgType = Arg->getType();
+ if (E->isLValue()) {
+ ArgType = S.Context.getLValueReferenceType(ArgType);
+ } else if (E->isXValue()) {
+ ArgType = S.Context.getRValueReferenceType(ArgType);
+ }
+ ArgTypes.push_back(ArgType);
+ }
+ ParamTypes = ArgTypes;
+ }
+ DestType = S.Context.getFunctionType(DestType, ParamTypes,
+ Proto->getExtProtoInfo());
+ } else {
+ DestType = S.Context.getFunctionNoProtoType(DestType,
+ FnType->getExtInfo());
+ }
+
+ // Rebuild the appropriate pointer-to-function type.
+ switch (Kind) {
+ case FK_MemberFunction:
+ // Nothing to do.
+ break;
+
+ case FK_FunctionPointer:
+ DestType = S.Context.getPointerType(DestType);
+ break;
+
+ case FK_BlockPointer:
+ DestType = S.Context.getBlockPointerType(DestType);
+ break;
+ }
+
+ // Finally, we can recurse.
+ ExprResult CalleeResult = Visit(CalleeExpr);
+ if (!CalleeResult.isUsable()) return ExprError();
+ E->setCallee(CalleeResult.get());
+
+ // Bind a temporary if necessary.
+ return S.MaybeBindToTemporary(E);
+}
+
+ExprResult RebuildUnknownAnyExpr::VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ // Verify that this is a legal result type of a call.
+ if (DestType->isArrayType() || DestType->isFunctionType()) {
+ S.Diag(E->getExprLoc(), diag::err_func_returning_array_function)
+ << DestType->isFunctionType() << DestType;
+ return ExprError();
+ }
+
+ // Rewrite the method result type if available.
+ if (ObjCMethodDecl *Method = E->getMethodDecl()) {
+ assert(Method->getReturnType() == S.Context.UnknownAnyTy);
+ Method->setReturnType(DestType);
+ }
+
+ // Change the type of the message.
+ E->setType(DestType.getNonReferenceType());
+ E->setValueKind(Expr::getValueKindForType(DestType));
+
+ return S.MaybeBindToTemporary(E);
+}
+
+ExprResult RebuildUnknownAnyExpr::VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ // The only case we should ever see here is a function-to-pointer decay.
+ if (E->getCastKind() == CK_FunctionToPointerDecay) {
+ assert(E->getValueKind() == VK_RValue);
+ assert(E->getObjectKind() == OK_Ordinary);
+
+ E->setType(DestType);
+
+ // Rebuild the sub-expression as the pointee (function) type.
+ DestType = DestType->castAs<PointerType>()->getPointeeType();
+
+ ExprResult Result = Visit(E->getSubExpr());
+ if (!Result.isUsable()) return ExprError();
+
+ E->setSubExpr(Result.get());
+ return E;
+ } else if (E->getCastKind() == CK_LValueToRValue) {
+ assert(E->getValueKind() == VK_RValue);
+ assert(E->getObjectKind() == OK_Ordinary);
+
+ assert(isa<BlockPointerType>(E->getType()));
+
+ E->setType(DestType);
+
+ // The sub-expression has to be a lvalue reference, so rebuild it as such.
+ DestType = S.Context.getLValueReferenceType(DestType);
+
+ ExprResult Result = Visit(E->getSubExpr());
+ if (!Result.isUsable()) return ExprError();
+
+ E->setSubExpr(Result.get());
+ return E;
+ } else {
+ llvm_unreachable("Unhandled cast type!");
+ }
+}
+
+ExprResult RebuildUnknownAnyExpr::resolveDecl(Expr *E, ValueDecl *VD) {
+ ExprValueKind ValueKind = VK_LValue;
+ QualType Type = DestType;
+
+ // We know how to make this work for certain kinds of decls:
+
+ // - functions
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(VD)) {
+ if (const PointerType *Ptr = Type->getAs<PointerType>()) {
+ DestType = Ptr->getPointeeType();
+ ExprResult Result = resolveDecl(E, VD);
+ if (Result.isInvalid()) return ExprError();
+ return S.ImpCastExprToType(Result.get(), Type,
+ CK_FunctionToPointerDecay, VK_RValue);
+ }
+
+ if (!Type->isFunctionType()) {
+ S.Diag(E->getExprLoc(), diag::err_unknown_any_function)
+ << VD << E->getSourceRange();
+ return ExprError();
+ }
+ if (const FunctionProtoType *FT = Type->getAs<FunctionProtoType>()) {
+ // We must match the FunctionDecl's type to the hack introduced in
+ // RebuildUnknownAnyExpr::VisitCallExpr to vararg functions of unknown
+ // type. See the lengthy commentary in that routine.
+ QualType FDT = FD->getType();
+ const FunctionType *FnType = FDT->castAs<FunctionType>();
+ const FunctionProtoType *Proto = dyn_cast_or_null<FunctionProtoType>(FnType);
+ DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E);
+ if (DRE && Proto && Proto->getParamTypes().empty() && Proto->isVariadic()) {
+ SourceLocation Loc = FD->getLocation();
+ FunctionDecl *NewFD = FunctionDecl::Create(FD->getASTContext(),
+ FD->getDeclContext(),
+ Loc, Loc, FD->getNameInfo().getName(),
+ DestType, FD->getTypeSourceInfo(),
+ SC_None, false/*isInlineSpecified*/,
+ FD->hasPrototype(),
+ false/*isConstexprSpecified*/);
+
+ if (FD->getQualifier())
+ NewFD->setQualifierInfo(FD->getQualifierLoc());
+
+ SmallVector<ParmVarDecl*, 16> Params;
+ for (const auto &AI : FT->param_types()) {
+ ParmVarDecl *Param =
+ S.BuildParmVarDeclForTypedef(FD, Loc, AI);
+ Param->setScopeInfo(0, Params.size());
+ Params.push_back(Param);
+ }
+ NewFD->setParams(Params);
+ DRE->setDecl(NewFD);
+ VD = DRE->getDecl();
+ }
+ }
+
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
+ if (MD->isInstance()) {
+ ValueKind = VK_RValue;
+ Type = S.Context.BoundMemberTy;
+ }
+
+ // Function references aren't l-values in C.
+ if (!S.getLangOpts().CPlusPlus)
+ ValueKind = VK_RValue;
+
+ // - variables
+ } else if (isa<VarDecl>(VD)) {
+ if (const ReferenceType *RefTy = Type->getAs<ReferenceType>()) {
+ Type = RefTy->getPointeeType();
+ } else if (Type->isFunctionType()) {
+ S.Diag(E->getExprLoc(), diag::err_unknown_any_var_function_type)
+ << VD << E->getSourceRange();
+ return ExprError();
+ }
+
+ // - nothing else
+ } else {
+ S.Diag(E->getExprLoc(), diag::err_unsupported_unknown_any_decl)
+ << VD << E->getSourceRange();
+ return ExprError();
+ }
+
+ // Modifying the declaration like this is friendly to IR-gen but
+ // also really dangerous.
+ VD->setType(DestType);
+ E->setType(Type);
+ E->setValueKind(ValueKind);
+ return E;
+}
+
+/// Check a cast of an unknown-any type. We intentionally only
+/// trigger this for C-style casts.
+ExprResult Sema::checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
+ Expr *CastExpr, CastKind &CastKind,
+ ExprValueKind &VK, CXXCastPath &Path) {
+ // Rewrite the casted expression from scratch.
+ ExprResult result = RebuildUnknownAnyExpr(*this, CastType).Visit(CastExpr);
+ if (!result.isUsable()) return ExprError();
+
+ CastExpr = result.get();
+ VK = CastExpr->getValueKind();
+ CastKind = CK_NoOp;
+
+ return CastExpr;
+}
+
+ExprResult Sema::forceUnknownAnyToType(Expr *E, QualType ToType) {
+ return RebuildUnknownAnyExpr(*this, ToType).Visit(E);
+}
+
+ExprResult Sema::checkUnknownAnyArg(SourceLocation callLoc,
+ Expr *arg, QualType &paramType) {
+ // If the syntactic form of the argument is not an explicit cast of
+ // any sort, just do default argument promotion.
+ ExplicitCastExpr *castArg = dyn_cast<ExplicitCastExpr>(arg->IgnoreParens());
+ if (!castArg) {
+ ExprResult result = DefaultArgumentPromotion(arg);
+ if (result.isInvalid()) return ExprError();
+ paramType = result.get()->getType();
+ return result;
+ }
+
+ // Otherwise, use the type that was written in the explicit cast.
+ assert(!arg->hasPlaceholderType());
+ paramType = castArg->getTypeAsWritten();
+
+ // Copy-initialize a parameter of that type.
+ InitializedEntity entity =
+ InitializedEntity::InitializeParameter(Context, paramType,
+ /*consumed*/ false);
+ return PerformCopyInitialization(entity, callLoc, arg);
+}
+
+static ExprResult diagnoseUnknownAnyExpr(Sema &S, Expr *E) {
+ Expr *orig = E;
+ unsigned diagID = diag::err_uncasted_use_of_unknown_any;
+ while (true) {
+ E = E->IgnoreParenImpCasts();
+ if (CallExpr *call = dyn_cast<CallExpr>(E)) {
+ E = call->getCallee();
+ diagID = diag::err_uncasted_call_of_unknown_any;
+ } else {
+ break;
+ }
+ }
+
+ SourceLocation loc;
+ NamedDecl *d;
+ if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(E)) {
+ loc = ref->getLocation();
+ d = ref->getDecl();
+ } else if (MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
+ loc = mem->getMemberLoc();
+ d = mem->getMemberDecl();
+ } else if (ObjCMessageExpr *msg = dyn_cast<ObjCMessageExpr>(E)) {
+ diagID = diag::err_uncasted_call_of_unknown_any;
+ loc = msg->getSelectorStartLoc();
+ d = msg->getMethodDecl();
+ if (!d) {
+ S.Diag(loc, diag::err_uncasted_send_to_unknown_any_method)
+ << static_cast<unsigned>(msg->isClassMessage()) << msg->getSelector()
+ << orig->getSourceRange();
+ return ExprError();
+ }
+ } else {
+ S.Diag(E->getExprLoc(), diag::err_unsupported_unknown_any_expr)
+ << E->getSourceRange();
+ return ExprError();
+ }
+
+ S.Diag(loc, diagID) << d << orig->getSourceRange();
+
+ // Never recoverable.
+ return ExprError();
+}
+
+/// Check for operands with placeholder types and complain if found.
+/// Returns true if there was an error and no recovery was possible.
+ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
+ if (!getLangOpts().CPlusPlus) {
+ // C cannot handle TypoExpr nodes on either side of a binop because it
+ // doesn't handle dependent types properly, so make sure any TypoExprs have
+ // been dealt with before checking the operands.
+ ExprResult Result = CorrectDelayedTyposInExpr(E);
+ if (!Result.isUsable()) return ExprError();
+ E = Result.get();
+ }
+
+ const BuiltinType *placeholderType = E->getType()->getAsPlaceholderType();
+ if (!placeholderType) return E;
+
+ switch (placeholderType->getKind()) {
+
+ // Overloaded expressions.
+ case BuiltinType::Overload: {
+ // Try to resolve a single function template specialization.
+ // This is obligatory.
+ ExprResult result = E;
+ if (ResolveAndFixSingleFunctionTemplateSpecialization(result, false)) {
+ return result;
+
+ // If that failed, try to recover with a call.
+ } else {
+ tryToRecoverWithCall(result, PDiag(diag::err_ovl_unresolvable),
+ /*complain*/ true);
+ return result;
+ }
+ }
+
+ // Bound member functions.
+ case BuiltinType::BoundMember: {
+ ExprResult result = E;
+ const Expr *BME = E->IgnoreParens();
+ PartialDiagnostic PD = PDiag(diag::err_bound_member_function);
+ // Try to give a nicer diagnostic if it is a bound member that we recognize.
+ if (isa<CXXPseudoDestructorExpr>(BME)) {
+ PD = PDiag(diag::err_dtor_expr_without_call) << /*pseudo-destructor*/ 1;
+ } else if (const auto *ME = dyn_cast<MemberExpr>(BME)) {
+ if (ME->getMemberNameInfo().getName().getNameKind() ==
+ DeclarationName::CXXDestructorName)
+ PD = PDiag(diag::err_dtor_expr_without_call) << /*destructor*/ 0;
+ }
+ tryToRecoverWithCall(result, PD,
+ /*complain*/ true);
+ return result;
+ }
+
+ // ARC unbridged casts.
+ case BuiltinType::ARCUnbridgedCast: {
+ Expr *realCast = stripARCUnbridgedCast(E);
+ diagnoseARCUnbridgedCast(realCast);
+ return realCast;
+ }
+
+ // Expressions of unknown type.
+ case BuiltinType::UnknownAny:
+ return diagnoseUnknownAnyExpr(*this, E);
+
+ // Pseudo-objects.
+ case BuiltinType::PseudoObject:
+ return checkPseudoObjectRValue(E);
+
+ case BuiltinType::BuiltinFn: {
+ // Accept __noop without parens by implicitly converting it to a call expr.
+ auto *DRE = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts());
+ if (DRE) {
+ auto *FD = cast<FunctionDecl>(DRE->getDecl());
+ if (FD->getBuiltinID() == Builtin::BI__noop) {
+ E = ImpCastExprToType(E, Context.getPointerType(FD->getType()),
+ CK_BuiltinFnToFnPtr).get();
+ return new (Context) CallExpr(Context, E, None, Context.IntTy,
+ VK_RValue, SourceLocation());
+ }
+ }
+
+ Diag(E->getLocStart(), diag::err_builtin_fn_use);
+ return ExprError();
+ }
+
+ // Expressions of unknown type.
+ case BuiltinType::OMPArraySection:
+ Diag(E->getLocStart(), diag::err_omp_array_section_use);
+ return ExprError();
+
+ // Everything else should be impossible.
+#define BUILTIN_TYPE(Id, SingletonId) \
+ case BuiltinType::Id:
+#define PLACEHOLDER_TYPE(Id, SingletonId)
+#include "clang/AST/BuiltinTypes.def"
+ break;
+ }
+
+ llvm_unreachable("invalid placeholder type!");
+}
+
+bool Sema::CheckCaseExpression(Expr *E) {
+ if (E->isTypeDependent())
+ return true;
+ if (E->isValueDependent() || E->isIntegerConstantExpr(Context))
+ return E->getType()->isIntegralOrEnumerationType();
+ return false;
+}
+
+/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
+ExprResult
+Sema::ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind) {
+ assert((Kind == tok::kw___objc_yes || Kind == tok::kw___objc_no) &&
+ "Unknown Objective-C Boolean value!");
+ QualType BoolT = Context.ObjCBuiltinBoolTy;
+ if (!Context.getBOOLDecl()) {
+ LookupResult Result(*this, &Context.Idents.get("BOOL"), OpLoc,
+ Sema::LookupOrdinaryName);
+ if (LookupName(Result, getCurScope()) && Result.isSingleResult()) {
+ NamedDecl *ND = Result.getFoundDecl();
+ if (TypedefDecl *TD = dyn_cast<TypedefDecl>(ND))
+ Context.setBOOLDecl(TD);
+ }
+ }
+ if (Context.getBOOLDecl())
+ BoolT = Context.getBOOLType();
+ return new (Context)
+ ObjCBoolLiteralExpr(Kind == tok::kw___objc_yes, BoolT, OpLoc);
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp
new file mode 100644
index 0000000..2ad595f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp
@@ -0,0 +1,6856 @@
+//===--- SemaExprCXX.cpp - Semantic Analysis for Expressions --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Implements semantic analysis for C++ expressions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "TreeTransform.h"
+#include "TypeLocBuilder.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTLambda.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaLambda.h"
+#include "clang/Sema/TemplateDeduction.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace clang;
+using namespace sema;
+
+/// \brief Handle the result of the special case name lookup for inheriting
+/// constructor declarations. 'NS::X::X' and 'NS::X<...>::X' are treated as
+/// constructor names in member using declarations, even if 'X' is not the
+/// name of the corresponding type.
+ParsedType Sema::getInheritingConstructorName(CXXScopeSpec &SS,
+ SourceLocation NameLoc,
+ IdentifierInfo &Name) {
+ NestedNameSpecifier *NNS = SS.getScopeRep();
+
+ // Convert the nested-name-specifier into a type.
+ QualType Type;
+ switch (NNS->getKind()) {
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ Type = QualType(NNS->getAsType(), 0);
+ break;
+
+ case NestedNameSpecifier::Identifier:
+ // Strip off the last layer of the nested-name-specifier and build a
+ // typename type for it.
+ assert(NNS->getAsIdentifier() == &Name && "not a constructor name");
+ Type = Context.getDependentNameType(ETK_None, NNS->getPrefix(),
+ NNS->getAsIdentifier());
+ break;
+
+ case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Super:
+ case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::NamespaceAlias:
+ llvm_unreachable("Nested name specifier is not a type for inheriting ctor");
+ }
+
+ // This reference to the type is located entirely at the location of the
+ // final identifier in the qualified-id.
+ return CreateParsedType(Type,
+ Context.getTrivialTypeSourceInfo(Type, NameLoc));
+}
+
+ParsedType Sema::getDestructorName(SourceLocation TildeLoc,
+ IdentifierInfo &II,
+ SourceLocation NameLoc,
+ Scope *S, CXXScopeSpec &SS,
+ ParsedType ObjectTypePtr,
+ bool EnteringContext) {
+ // Determine where to perform name lookup.
+
+ // FIXME: This area of the standard is very messy, and the current
+ // wording is rather unclear about which scopes we search for the
+ // destructor name; see core issues 399 and 555. Issue 399 in
+ // particular shows where the current description of destructor name
+ // lookup is completely out of line with existing practice, e.g.,
+ // this appears to be ill-formed:
+ //
+ // namespace N {
+ // template <typename T> struct S {
+ // ~S();
+ // };
+ // }
+ //
+ // void f(N::S<int>* s) {
+ // s->N::S<int>::~S();
+ // }
+ //
+ // See also PR6358 and PR6359.
+ // For this reason, we're currently only doing the C++03 version of this
+ // code; the C++0x version has to wait until we get a proper spec.
+ QualType SearchType;
+ DeclContext *LookupCtx = nullptr;
+ bool isDependent = false;
+ bool LookInScope = false;
+
+ if (SS.isInvalid())
+ return ParsedType();
+
+ // If we have an object type, it's because we are in a
+ // pseudo-destructor-expression or a member access expression, and
+ // we know what type we're looking for.
+ if (ObjectTypePtr)
+ SearchType = GetTypeFromParser(ObjectTypePtr);
+
+ if (SS.isSet()) {
+ NestedNameSpecifier *NNS = SS.getScopeRep();
+
+ bool AlreadySearched = false;
+ bool LookAtPrefix = true;
+ // C++11 [basic.lookup.qual]p6:
+ // If a pseudo-destructor-name (5.2.4) contains a nested-name-specifier,
+ // the type-names are looked up as types in the scope designated by the
+ // nested-name-specifier. Similarly, in a qualified-id of the form:
+ //
+ // nested-name-specifier[opt] class-name :: ~ class-name
+ //
+ // the second class-name is looked up in the same scope as the first.
+ //
+ // Here, we determine whether the code below is permitted to look at the
+ // prefix of the nested-name-specifier.
+ DeclContext *DC = computeDeclContext(SS, EnteringContext);
+ if (DC && DC->isFileContext()) {
+ AlreadySearched = true;
+ LookupCtx = DC;
+ isDependent = false;
+ } else if (DC && isa<CXXRecordDecl>(DC)) {
+ LookAtPrefix = false;
+ LookInScope = true;
+ }
+
+ // The second case from the C++03 rules quoted further above.
+ NestedNameSpecifier *Prefix = nullptr;
+ if (AlreadySearched) {
+ // Nothing left to do.
+ } else if (LookAtPrefix && (Prefix = NNS->getPrefix())) {
+ CXXScopeSpec PrefixSS;
+ PrefixSS.Adopt(NestedNameSpecifierLoc(Prefix, SS.location_data()));
+ LookupCtx = computeDeclContext(PrefixSS, EnteringContext);
+ isDependent = isDependentScopeSpecifier(PrefixSS);
+ } else if (ObjectTypePtr) {
+ LookupCtx = computeDeclContext(SearchType);
+ isDependent = SearchType->isDependentType();
+ } else {
+ LookupCtx = computeDeclContext(SS, EnteringContext);
+ isDependent = LookupCtx && LookupCtx->isDependentContext();
+ }
+ } else if (ObjectTypePtr) {
+ // C++ [basic.lookup.classref]p3:
+ // If the unqualified-id is ~type-name, the type-name is looked up
+ // in the context of the entire postfix-expression. If the type T
+ // of the object expression is of a class type C, the type-name is
+ // also looked up in the scope of class C. At least one of the
+ // lookups shall find a name that refers to (possibly
+ // cv-qualified) T.
+ LookupCtx = computeDeclContext(SearchType);
+ isDependent = SearchType->isDependentType();
+ assert((isDependent || !SearchType->isIncompleteType()) &&
+ "Caller should have completed object type");
+
+ LookInScope = true;
+ } else {
+ // Perform lookup into the current scope (only).
+ LookInScope = true;
+ }
+
+ TypeDecl *NonMatchingTypeDecl = nullptr;
+ LookupResult Found(*this, &II, NameLoc, LookupOrdinaryName);
+ for (unsigned Step = 0; Step != 2; ++Step) {
+ // Look for the name first in the computed lookup context (if we
+ // have one) and, if that fails to find a match, in the scope (if
+ // we're allowed to look there).
+ Found.clear();
+ if (Step == 0 && LookupCtx)
+ LookupQualifiedName(Found, LookupCtx);
+ else if (Step == 1 && LookInScope && S)
+ LookupName(Found, S);
+ else
+ continue;
+
+ // FIXME: Should we be suppressing ambiguities here?
+ if (Found.isAmbiguous())
+ return ParsedType();
+
+ if (TypeDecl *Type = Found.getAsSingle<TypeDecl>()) {
+ QualType T = Context.getTypeDeclType(Type);
+ MarkAnyDeclReferenced(Type->getLocation(), Type, /*OdrUse=*/false);
+
+ if (SearchType.isNull() || SearchType->isDependentType() ||
+ Context.hasSameUnqualifiedType(T, SearchType)) {
+ // We found our type!
+
+ return CreateParsedType(T,
+ Context.getTrivialTypeSourceInfo(T, NameLoc));
+ }
+
+ if (!SearchType.isNull())
+ NonMatchingTypeDecl = Type;
+ }
+
+ // If the name that we found is a class template name, and it is
+ // the same name as the template name in the last part of the
+ // nested-name-specifier (if present) or the object type, then
+ // this is the destructor for that class.
+ // FIXME: This is a workaround until we get real drafting for core
+ // issue 399, for which there isn't even an obvious direction.
+ if (ClassTemplateDecl *Template = Found.getAsSingle<ClassTemplateDecl>()) {
+ QualType MemberOfType;
+ if (SS.isSet()) {
+ if (DeclContext *Ctx = computeDeclContext(SS, EnteringContext)) {
+ // Figure out the type of the context, if it has one.
+ if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Ctx))
+ MemberOfType = Context.getTypeDeclType(Record);
+ }
+ }
+ if (MemberOfType.isNull())
+ MemberOfType = SearchType;
+
+ if (MemberOfType.isNull())
+ continue;
+
+ // We're referring into a class template specialization. If the
+ // class template we found is the same as the template being
+ // specialized, we found what we are looking for.
+ if (const RecordType *Record = MemberOfType->getAs<RecordType>()) {
+ if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(Record->getDecl())) {
+ if (Spec->getSpecializedTemplate()->getCanonicalDecl() ==
+ Template->getCanonicalDecl())
+ return CreateParsedType(
+ MemberOfType,
+ Context.getTrivialTypeSourceInfo(MemberOfType, NameLoc));
+ }
+
+ continue;
+ }
+
+ // We're referring to an unresolved class template
+ // specialization. Determine whether we class template we found
+ // is the same as the template being specialized or, if we don't
+ // know which template is being specialized, that it at least
+ // has the same name.
+ if (const TemplateSpecializationType *SpecType
+ = MemberOfType->getAs<TemplateSpecializationType>()) {
+ TemplateName SpecName = SpecType->getTemplateName();
+
+ // The class template we found is the same template being
+ // specialized.
+ if (TemplateDecl *SpecTemplate = SpecName.getAsTemplateDecl()) {
+ if (SpecTemplate->getCanonicalDecl() == Template->getCanonicalDecl())
+ return CreateParsedType(
+ MemberOfType,
+ Context.getTrivialTypeSourceInfo(MemberOfType, NameLoc));
+
+ continue;
+ }
+
+ // The class template we found has the same name as the
+ // (dependent) template name being specialized.
+ if (DependentTemplateName *DepTemplate
+ = SpecName.getAsDependentTemplateName()) {
+ if (DepTemplate->isIdentifier() &&
+ DepTemplate->getIdentifier() == Template->getIdentifier())
+ return CreateParsedType(
+ MemberOfType,
+ Context.getTrivialTypeSourceInfo(MemberOfType, NameLoc));
+
+ continue;
+ }
+ }
+ }
+ }
+
+ if (isDependent) {
+ // We didn't find our type, but that's okay: it's dependent
+ // anyway.
+
+ // FIXME: What if we have no nested-name-specifier?
+ QualType T = CheckTypenameType(ETK_None, SourceLocation(),
+ SS.getWithLocInContext(Context),
+ II, NameLoc);
+ return ParsedType::make(T);
+ }
+
+ if (NonMatchingTypeDecl) {
+ QualType T = Context.getTypeDeclType(NonMatchingTypeDecl);
+ Diag(NameLoc, diag::err_destructor_expr_type_mismatch)
+ << T << SearchType;
+ Diag(NonMatchingTypeDecl->getLocation(), diag::note_destructor_type_here)
+ << T;
+ } else if (ObjectTypePtr)
+ Diag(NameLoc, diag::err_ident_in_dtor_not_a_type)
+ << &II;
+ else {
+ SemaDiagnosticBuilder DtorDiag = Diag(NameLoc,
+ diag::err_destructor_class_name);
+ if (S) {
+ const DeclContext *Ctx = S->getEntity();
+ if (const CXXRecordDecl *Class = dyn_cast_or_null<CXXRecordDecl>(Ctx))
+ DtorDiag << FixItHint::CreateReplacement(SourceRange(NameLoc),
+ Class->getNameAsString());
+ }
+ }
+
+ return ParsedType();
+}
+
+ParsedType Sema::getDestructorType(const DeclSpec& DS, ParsedType ObjectType) {
+ if (DS.getTypeSpecType() == DeclSpec::TST_error || !ObjectType)
+ return ParsedType();
+ assert(DS.getTypeSpecType() == DeclSpec::TST_decltype
+ && "only get destructor types from declspecs");
+ QualType T = BuildDecltypeType(DS.getRepAsExpr(), DS.getTypeSpecTypeLoc());
+ QualType SearchType = GetTypeFromParser(ObjectType);
+ if (SearchType->isDependentType() || Context.hasSameUnqualifiedType(SearchType, T)) {
+ return ParsedType::make(T);
+ }
+
+ Diag(DS.getTypeSpecTypeLoc(), diag::err_destructor_expr_type_mismatch)
+ << T << SearchType;
+ return ParsedType();
+}
+
+bool Sema::checkLiteralOperatorId(const CXXScopeSpec &SS,
+ const UnqualifiedId &Name) {
+ assert(Name.getKind() == UnqualifiedId::IK_LiteralOperatorId);
+
+ if (!SS.isValid())
+ return false;
+
+ switch (SS.getScopeRep()->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ // Per C++11 [over.literal]p2, literal operators can only be declared at
+ // namespace scope. Therefore, this unqualified-id cannot name anything.
+ // Reject it early, because we have no AST representation for this in the
+ // case where the scope is dependent.
+ Diag(Name.getLocStart(), diag::err_literal_operator_id_outside_namespace)
+ << SS.getScopeRep();
+ return true;
+
+ case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Super:
+ case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::NamespaceAlias:
+ return false;
+ }
+
+ llvm_unreachable("unknown nested name specifier kind");
+}
+
+/// \brief Build a C++ typeid expression with a type operand.
+ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
+ SourceLocation TypeidLoc,
+ TypeSourceInfo *Operand,
+ SourceLocation RParenLoc) {
+ // C++ [expr.typeid]p4:
+ // The top-level cv-qualifiers of the lvalue expression or the type-id
+ // that is the operand of typeid are always ignored.
+ // If the type of the type-id is a class type or a reference to a class
+ // type, the class shall be completely-defined.
+ Qualifiers Quals;
+ QualType T
+ = Context.getUnqualifiedArrayType(Operand->getType().getNonReferenceType(),
+ Quals);
+ if (T->getAs<RecordType>() &&
+ RequireCompleteType(TypeidLoc, T, diag::err_incomplete_typeid))
+ return ExprError();
+
+ if (T->isVariablyModifiedType())
+ return ExprError(Diag(TypeidLoc, diag::err_variably_modified_typeid) << T);
+
+ return new (Context) CXXTypeidExpr(TypeInfoType.withConst(), Operand,
+ SourceRange(TypeidLoc, RParenLoc));
+}
+
+/// \brief Build a C++ typeid expression with an expression operand.
+ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
+ SourceLocation TypeidLoc,
+ Expr *E,
+ SourceLocation RParenLoc) {
+ bool WasEvaluated = false;
+ if (E && !E->isTypeDependent()) {
+ if (E->getType()->isPlaceholderType()) {
+ ExprResult result = CheckPlaceholderExpr(E);
+ if (result.isInvalid()) return ExprError();
+ E = result.get();
+ }
+
+ QualType T = E->getType();
+ if (const RecordType *RecordT = T->getAs<RecordType>()) {
+ CXXRecordDecl *RecordD = cast<CXXRecordDecl>(RecordT->getDecl());
+ // C++ [expr.typeid]p3:
+ // [...] If the type of the expression is a class type, the class
+ // shall be completely-defined.
+ if (RequireCompleteType(TypeidLoc, T, diag::err_incomplete_typeid))
+ return ExprError();
+
+ // C++ [expr.typeid]p3:
+ // When typeid is applied to an expression other than an glvalue of a
+ // polymorphic class type [...] [the] expression is an unevaluated
+ // operand. [...]
+ if (RecordD->isPolymorphic() && E->isGLValue()) {
+ // The subexpression is potentially evaluated; switch the context
+ // and recheck the subexpression.
+ ExprResult Result = TransformToPotentiallyEvaluated(E);
+ if (Result.isInvalid()) return ExprError();
+ E = Result.get();
+
+ // We require a vtable to query the type at run time.
+ MarkVTableUsed(TypeidLoc, RecordD);
+ WasEvaluated = true;
+ }
+ }
+
+ // C++ [expr.typeid]p4:
+ // [...] If the type of the type-id is a reference to a possibly
+ // cv-qualified type, the result of the typeid expression refers to a
+ // std::type_info object representing the cv-unqualified referenced
+ // type.
+ Qualifiers Quals;
+ QualType UnqualT = Context.getUnqualifiedArrayType(T, Quals);
+ if (!Context.hasSameType(T, UnqualT)) {
+ T = UnqualT;
+ E = ImpCastExprToType(E, UnqualT, CK_NoOp, E->getValueKind()).get();
+ }
+ }
+
+ if (E->getType()->isVariablyModifiedType())
+ return ExprError(Diag(TypeidLoc, diag::err_variably_modified_typeid)
+ << E->getType());
+ else if (ActiveTemplateInstantiations.empty() &&
+ E->HasSideEffects(Context, WasEvaluated)) {
+ // The expression operand for typeid is in an unevaluated expression
+ // context, so side effects could result in unintended consequences.
+ Diag(E->getExprLoc(), WasEvaluated
+ ? diag::warn_side_effects_typeid
+ : diag::warn_side_effects_unevaluated_context);
+ }
+
+ return new (Context) CXXTypeidExpr(TypeInfoType.withConst(), E,
+ SourceRange(TypeidLoc, RParenLoc));
+}
+
+/// ActOnCXXTypeidOfType - Parse typeid( type-id ) or typeid (expression);
+ExprResult
+Sema::ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc,
+ bool isType, void *TyOrExpr, SourceLocation RParenLoc) {
+ // Find the std::type_info type.
+ if (!getStdNamespace())
+ return ExprError(Diag(OpLoc, diag::err_need_header_before_typeid));
+
+ if (!CXXTypeInfoDecl) {
+ IdentifierInfo *TypeInfoII = &PP.getIdentifierTable().get("type_info");
+ LookupResult R(*this, TypeInfoII, SourceLocation(), LookupTagName);
+ LookupQualifiedName(R, getStdNamespace());
+ CXXTypeInfoDecl = R.getAsSingle<RecordDecl>();
+ // Microsoft's typeinfo doesn't have type_info in std but in the global
+ // namespace if _HAS_EXCEPTIONS is defined to 0. See PR13153.
+ if (!CXXTypeInfoDecl && LangOpts.MSVCCompat) {
+ LookupQualifiedName(R, Context.getTranslationUnitDecl());
+ CXXTypeInfoDecl = R.getAsSingle<RecordDecl>();
+ }
+ if (!CXXTypeInfoDecl)
+ return ExprError(Diag(OpLoc, diag::err_need_header_before_typeid));
+ }
+
+ if (!getLangOpts().RTTI) {
+ return ExprError(Diag(OpLoc, diag::err_no_typeid_with_fno_rtti));
+ }
+
+ QualType TypeInfoType = Context.getTypeDeclType(CXXTypeInfoDecl);
+
+ if (isType) {
+ // The operand is a type; handle it as such.
+ TypeSourceInfo *TInfo = nullptr;
+ QualType T = GetTypeFromParser(ParsedType::getFromOpaquePtr(TyOrExpr),
+ &TInfo);
+ if (T.isNull())
+ return ExprError();
+
+ if (!TInfo)
+ TInfo = Context.getTrivialTypeSourceInfo(T, OpLoc);
+
+ return BuildCXXTypeId(TypeInfoType, OpLoc, TInfo, RParenLoc);
+ }
+
+ // The operand is an expression.
+ return BuildCXXTypeId(TypeInfoType, OpLoc, (Expr*)TyOrExpr, RParenLoc);
+}
+
+/// \brief Build a Microsoft __uuidof expression with a type operand.
+ExprResult Sema::BuildCXXUuidof(QualType TypeInfoType,
+ SourceLocation TypeidLoc,
+ TypeSourceInfo *Operand,
+ SourceLocation RParenLoc) {
+ if (!Operand->getType()->isDependentType()) {
+ bool HasMultipleGUIDs = false;
+ if (!CXXUuidofExpr::GetUuidAttrOfType(Operand->getType(),
+ &HasMultipleGUIDs)) {
+ if (HasMultipleGUIDs)
+ return ExprError(Diag(TypeidLoc, diag::err_uuidof_with_multiple_guids));
+ else
+ return ExprError(Diag(TypeidLoc, diag::err_uuidof_without_guid));
+ }
+ }
+
+ return new (Context) CXXUuidofExpr(TypeInfoType.withConst(), Operand,
+ SourceRange(TypeidLoc, RParenLoc));
+}
+
+/// \brief Build a Microsoft __uuidof expression with an expression operand.
+ExprResult Sema::BuildCXXUuidof(QualType TypeInfoType,
+ SourceLocation TypeidLoc,
+ Expr *E,
+ SourceLocation RParenLoc) {
+ if (!E->getType()->isDependentType()) {
+ bool HasMultipleGUIDs = false;
+ if (!CXXUuidofExpr::GetUuidAttrOfType(E->getType(), &HasMultipleGUIDs) &&
+ !E->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) {
+ if (HasMultipleGUIDs)
+ return ExprError(Diag(TypeidLoc, diag::err_uuidof_with_multiple_guids));
+ else
+ return ExprError(Diag(TypeidLoc, diag::err_uuidof_without_guid));
+ }
+ }
+
+ return new (Context) CXXUuidofExpr(TypeInfoType.withConst(), E,
+ SourceRange(TypeidLoc, RParenLoc));
+}
+
+/// ActOnCXXUuidof - Parse __uuidof( type-id ) or __uuidof (expression);
+ExprResult
+Sema::ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc,
+ bool isType, void *TyOrExpr, SourceLocation RParenLoc) {
+ // If MSVCGuidDecl has not been cached, do the lookup.
+ if (!MSVCGuidDecl) {
+ IdentifierInfo *GuidII = &PP.getIdentifierTable().get("_GUID");
+ LookupResult R(*this, GuidII, SourceLocation(), LookupTagName);
+ LookupQualifiedName(R, Context.getTranslationUnitDecl());
+ MSVCGuidDecl = R.getAsSingle<RecordDecl>();
+ if (!MSVCGuidDecl)
+ return ExprError(Diag(OpLoc, diag::err_need_header_before_ms_uuidof));
+ }
+
+ QualType GuidType = Context.getTypeDeclType(MSVCGuidDecl);
+
+ if (isType) {
+ // The operand is a type; handle it as such.
+ TypeSourceInfo *TInfo = nullptr;
+ QualType T = GetTypeFromParser(ParsedType::getFromOpaquePtr(TyOrExpr),
+ &TInfo);
+ if (T.isNull())
+ return ExprError();
+
+ if (!TInfo)
+ TInfo = Context.getTrivialTypeSourceInfo(T, OpLoc);
+
+ return BuildCXXUuidof(GuidType, OpLoc, TInfo, RParenLoc);
+ }
+
+ // The operand is an expression.
+ return BuildCXXUuidof(GuidType, OpLoc, (Expr*)TyOrExpr, RParenLoc);
+}
+
+/// ActOnCXXBoolLiteral - Parse {true,false} literals.
+ExprResult
+Sema::ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind) {
+ assert((Kind == tok::kw_true || Kind == tok::kw_false) &&
+ "Unknown C++ Boolean value!");
+ return new (Context)
+ CXXBoolLiteralExpr(Kind == tok::kw_true, Context.BoolTy, OpLoc);
+}
+
+/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
+ExprResult
+Sema::ActOnCXXNullPtrLiteral(SourceLocation Loc) {
+ return new (Context) CXXNullPtrLiteralExpr(Context.NullPtrTy, Loc);
+}
+
+/// ActOnCXXThrow - Parse throw expressions.
+ExprResult
+Sema::ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *Ex) {
+ bool IsThrownVarInScope = false;
+ if (Ex) {
+ // C++0x [class.copymove]p31:
+ // When certain criteria are met, an implementation is allowed to omit the
+ // copy/move construction of a class object [...]
+ //
+ // - in a throw-expression, when the operand is the name of a
+ // non-volatile automatic object (other than a function or catch-
+ // clause parameter) whose scope does not extend beyond the end of the
+ // innermost enclosing try-block (if there is one), the copy/move
+ // operation from the operand to the exception object (15.1) can be
+ // omitted by constructing the automatic object directly into the
+ // exception object
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Ex->IgnoreParens()))
+ if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl())) {
+ if (Var->hasLocalStorage() && !Var->getType().isVolatileQualified()) {
+ for( ; S; S = S->getParent()) {
+ if (S->isDeclScope(Var)) {
+ IsThrownVarInScope = true;
+ break;
+ }
+
+ if (S->getFlags() &
+ (Scope::FnScope | Scope::ClassScope | Scope::BlockScope |
+ Scope::FunctionPrototypeScope | Scope::ObjCMethodScope |
+ Scope::TryScope))
+ break;
+ }
+ }
+ }
+ }
+
+ return BuildCXXThrow(OpLoc, Ex, IsThrownVarInScope);
+}
+
+ExprResult Sema::BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
+ bool IsThrownVarInScope) {
+ // Don't report an error if 'throw' is used in system headers.
+ if (!getLangOpts().CXXExceptions &&
+ !getSourceManager().isInSystemHeader(OpLoc))
+ Diag(OpLoc, diag::err_exceptions_disabled) << "throw";
+
+ if (getCurScope() && getCurScope()->isOpenMPSimdDirectiveScope())
+ Diag(OpLoc, diag::err_omp_simd_region_cannot_use_stmt) << "throw";
+
+ if (Ex && !Ex->isTypeDependent()) {
+ QualType ExceptionObjectTy = Context.getExceptionObjectType(Ex->getType());
+ if (CheckCXXThrowOperand(OpLoc, ExceptionObjectTy, Ex))
+ return ExprError();
+
+ // Initialize the exception result. This implicitly weeds out
+ // abstract types or types with inaccessible copy constructors.
+
+ // C++0x [class.copymove]p31:
+ // When certain criteria are met, an implementation is allowed to omit the
+ // copy/move construction of a class object [...]
+ //
+ // - in a throw-expression, when the operand is the name of a
+ // non-volatile automatic object (other than a function or
+ // catch-clause
+ // parameter) whose scope does not extend beyond the end of the
+ // innermost enclosing try-block (if there is one), the copy/move
+ // operation from the operand to the exception object (15.1) can be
+ // omitted by constructing the automatic object directly into the
+ // exception object
+ const VarDecl *NRVOVariable = nullptr;
+ if (IsThrownVarInScope)
+ NRVOVariable = getCopyElisionCandidate(QualType(), Ex, false);
+
+ InitializedEntity Entity = InitializedEntity::InitializeException(
+ OpLoc, ExceptionObjectTy,
+ /*NRVO=*/NRVOVariable != nullptr);
+ ExprResult Res = PerformMoveOrCopyInitialization(
+ Entity, NRVOVariable, QualType(), Ex, IsThrownVarInScope);
+ if (Res.isInvalid())
+ return ExprError();
+ Ex = Res.get();
+ }
+
+ return new (Context)
+ CXXThrowExpr(Ex, Context.VoidTy, OpLoc, IsThrownVarInScope);
+}
+
+static void
+collectPublicBases(CXXRecordDecl *RD,
+ llvm::DenseMap<CXXRecordDecl *, unsigned> &SubobjectsSeen,
+ llvm::SmallPtrSetImpl<CXXRecordDecl *> &VBases,
+ llvm::SetVector<CXXRecordDecl *> &PublicSubobjectsSeen,
+ bool ParentIsPublic) {
+ for (const CXXBaseSpecifier &BS : RD->bases()) {
+ CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl();
+ bool NewSubobject;
+ // Virtual bases constitute the same subobject. Non-virtual bases are
+ // always distinct subobjects.
+ if (BS.isVirtual())
+ NewSubobject = VBases.insert(BaseDecl).second;
+ else
+ NewSubobject = true;
+
+ if (NewSubobject)
+ ++SubobjectsSeen[BaseDecl];
+
+ // Only add subobjects which have public access throughout the entire chain.
+ bool PublicPath = ParentIsPublic && BS.getAccessSpecifier() == AS_public;
+ if (PublicPath)
+ PublicSubobjectsSeen.insert(BaseDecl);
+
+ // Recurse on to each base subobject.
+ collectPublicBases(BaseDecl, SubobjectsSeen, VBases, PublicSubobjectsSeen,
+ PublicPath);
+ }
+}
+
+static void getUnambiguousPublicSubobjects(
+ CXXRecordDecl *RD, llvm::SmallVectorImpl<CXXRecordDecl *> &Objects) {
+ llvm::DenseMap<CXXRecordDecl *, unsigned> SubobjectsSeen;
+ llvm::SmallSet<CXXRecordDecl *, 2> VBases;
+ llvm::SetVector<CXXRecordDecl *> PublicSubobjectsSeen;
+ SubobjectsSeen[RD] = 1;
+ PublicSubobjectsSeen.insert(RD);
+ collectPublicBases(RD, SubobjectsSeen, VBases, PublicSubobjectsSeen,
+ /*ParentIsPublic=*/true);
+
+ for (CXXRecordDecl *PublicSubobject : PublicSubobjectsSeen) {
+ // Skip ambiguous objects.
+ if (SubobjectsSeen[PublicSubobject] > 1)
+ continue;
+
+ Objects.push_back(PublicSubobject);
+ }
+}
+
+/// CheckCXXThrowOperand - Validate the operand of a throw.
+bool Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc,
+ QualType ExceptionObjectTy, Expr *E) {
+ // If the type of the exception would be an incomplete type or a pointer
+ // to an incomplete type other than (cv) void the program is ill-formed.
+ QualType Ty = ExceptionObjectTy;
+ bool isPointer = false;
+ if (const PointerType* Ptr = Ty->getAs<PointerType>()) {
+ Ty = Ptr->getPointeeType();
+ isPointer = true;
+ }
+ if (!isPointer || !Ty->isVoidType()) {
+ if (RequireCompleteType(ThrowLoc, Ty,
+ isPointer ? diag::err_throw_incomplete_ptr
+ : diag::err_throw_incomplete,
+ E->getSourceRange()))
+ return true;
+
+ if (RequireNonAbstractType(ThrowLoc, ExceptionObjectTy,
+ diag::err_throw_abstract_type, E))
+ return true;
+ }
+
+ // If the exception has class type, we need additional handling.
+ CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
+ if (!RD)
+ return false;
+
+ // If we are throwing a polymorphic class type or pointer thereof,
+ // exception handling will make use of the vtable.
+ MarkVTableUsed(ThrowLoc, RD);
+
+ // If a pointer is thrown, the referenced object will not be destroyed.
+ if (isPointer)
+ return false;
+
+ // If the class has a destructor, we must be able to call it.
+ if (!RD->hasIrrelevantDestructor()) {
+ if (CXXDestructorDecl *Destructor = LookupDestructor(RD)) {
+ MarkFunctionReferenced(E->getExprLoc(), Destructor);
+ CheckDestructorAccess(E->getExprLoc(), Destructor,
+ PDiag(diag::err_access_dtor_exception) << Ty);
+ if (DiagnoseUseOfDecl(Destructor, E->getExprLoc()))
+ return true;
+ }
+ }
+
+ // The MSVC ABI creates a list of all types which can catch the exception
+ // object. This list also references the appropriate copy constructor to call
+ // if the object is caught by value and has a non-trivial copy constructor.
+ if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+ // We are only interested in the public, unambiguous bases contained within
+ // the exception object. Bases which are ambiguous or otherwise
+ // inaccessible are not catchable types.
+ llvm::SmallVector<CXXRecordDecl *, 2> UnambiguousPublicSubobjects;
+ getUnambiguousPublicSubobjects(RD, UnambiguousPublicSubobjects);
+
+ for (CXXRecordDecl *Subobject : UnambiguousPublicSubobjects) {
+ // Attempt to lookup the copy constructor. Various pieces of machinery
+ // will spring into action, like template instantiation, which means this
+ // cannot be a simple walk of the class's decls. Instead, we must perform
+ // lookup and overload resolution.
+ CXXConstructorDecl *CD = LookupCopyingConstructor(Subobject, 0);
+ if (!CD)
+ continue;
+
+ // Mark the constructor referenced as it is used by this throw expression.
+ MarkFunctionReferenced(E->getExprLoc(), CD);
+
+ // Skip this copy constructor if it is trivial, we don't need to record it
+ // in the catchable type data.
+ if (CD->isTrivial())
+ continue;
+
+ // The copy constructor is non-trivial, create a mapping from this class
+ // type to this constructor.
+ // N.B. The selection of copy constructor is not sensitive to this
+ // particular throw-site. Lookup will be performed at the catch-site to
+ // ensure that the copy constructor is, in fact, accessible (via
+ // friendship or any other means).
+ Context.addCopyConstructorForExceptionObject(Subobject, CD);
+
+ // We don't keep the instantiated default argument expressions around so
+ // we must rebuild them here.
+ for (unsigned I = 1, E = CD->getNumParams(); I != E; ++I) {
+ // Skip any default arguments that we've already instantiated.
+ if (Context.getDefaultArgExprForConstructor(CD, I))
+ continue;
+
+ Expr *DefaultArg =
+ BuildCXXDefaultArgExpr(ThrowLoc, CD, CD->getParamDecl(I)).get();
+ Context.addDefaultArgExprForConstructor(CD, I, DefaultArg);
+ }
+ }
+ }
+
+ return false;
+}
+
+QualType Sema::getCurrentThisType() {
+ DeclContext *DC = getFunctionLevelDeclContext();
+ QualType ThisTy = CXXThisTypeOverride;
+ if (CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(DC)) {
+ if (method && method->isInstance())
+ ThisTy = method->getThisType(Context);
+ }
+ if (ThisTy.isNull()) {
+ if (isGenericLambdaCallOperatorSpecialization(CurContext) &&
+ CurContext->getParent()->getParent()->isRecord()) {
+ // This is a generic lambda call operator that is being instantiated
+ // within a default initializer - so use the enclosing class as 'this'.
+ // There is no enclosing member function to retrieve the 'this' pointer
+ // from.
+ QualType ClassTy = Context.getTypeDeclType(
+ cast<CXXRecordDecl>(CurContext->getParent()->getParent()));
+ // There are no cv-qualifiers for 'this' within default initializers,
+ // per [expr.prim.general]p4.
+ return Context.getPointerType(ClassTy);
+ }
+ }
+ return ThisTy;
+}
+
+Sema::CXXThisScopeRAII::CXXThisScopeRAII(Sema &S,
+ Decl *ContextDecl,
+ unsigned CXXThisTypeQuals,
+ bool Enabled)
+ : S(S), OldCXXThisTypeOverride(S.CXXThisTypeOverride), Enabled(false)
+{
+ if (!Enabled || !ContextDecl)
+ return;
+
+ CXXRecordDecl *Record = nullptr;
+ if (ClassTemplateDecl *Template = dyn_cast<ClassTemplateDecl>(ContextDecl))
+ Record = Template->getTemplatedDecl();
+ else
+ Record = cast<CXXRecordDecl>(ContextDecl);
+
+ S.CXXThisTypeOverride
+ = S.Context.getPointerType(
+ S.Context.getRecordType(Record).withCVRQualifiers(CXXThisTypeQuals));
+
+ this->Enabled = true;
+}
+
+
+Sema::CXXThisScopeRAII::~CXXThisScopeRAII() {
+ if (Enabled) {
+ S.CXXThisTypeOverride = OldCXXThisTypeOverride;
+ }
+}
+
+static Expr *captureThis(ASTContext &Context, RecordDecl *RD,
+ QualType ThisTy, SourceLocation Loc) {
+ FieldDecl *Field
+ = FieldDecl::Create(Context, RD, Loc, Loc, nullptr, ThisTy,
+ Context.getTrivialTypeSourceInfo(ThisTy, Loc),
+ nullptr, false, ICIS_NoInit);
+ Field->setImplicit(true);
+ Field->setAccess(AS_private);
+ RD->addDecl(Field);
+ return new (Context) CXXThisExpr(Loc, ThisTy, /*isImplicit*/true);
+}
+
+bool Sema::CheckCXXThisCapture(SourceLocation Loc, bool Explicit,
+ bool BuildAndDiagnose, const unsigned *const FunctionScopeIndexToStopAt) {
+ // We don't need to capture this in an unevaluated context.
+ if (isUnevaluatedContext() && !Explicit)
+ return true;
+
+ const unsigned MaxFunctionScopesIndex = FunctionScopeIndexToStopAt ?
+ *FunctionScopeIndexToStopAt : FunctionScopes.size() - 1;
+ // Otherwise, check that we can capture 'this'.
+ unsigned NumClosures = 0;
+ for (unsigned idx = MaxFunctionScopesIndex; idx != 0; idx--) {
+ if (CapturingScopeInfo *CSI =
+ dyn_cast<CapturingScopeInfo>(FunctionScopes[idx])) {
+ if (CSI->CXXThisCaptureIndex != 0) {
+ // 'this' is already being captured; there isn't anything more to do.
+ break;
+ }
+ LambdaScopeInfo *LSI = dyn_cast<LambdaScopeInfo>(CSI);
+ if (LSI && isGenericLambdaCallOperatorSpecialization(LSI->CallOperator)) {
+ // This context can't implicitly capture 'this'; fail out.
+ if (BuildAndDiagnose)
+ Diag(Loc, diag::err_this_capture) << Explicit;
+ return true;
+ }
+ if (CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByref ||
+ CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByval ||
+ CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_Block ||
+ CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_CapturedRegion ||
+ Explicit) {
+ // This closure can capture 'this'; continue looking upwards.
+ NumClosures++;
+ Explicit = false;
+ continue;
+ }
+ // This context can't implicitly capture 'this'; fail out.
+ if (BuildAndDiagnose)
+ Diag(Loc, diag::err_this_capture) << Explicit;
+ return true;
+ }
+ break;
+ }
+ if (!BuildAndDiagnose) return false;
+ // Mark that we're implicitly capturing 'this' in all the scopes we skipped.
+ // FIXME: We need to delay this marking in PotentiallyPotentiallyEvaluated
+ // contexts.
+ for (unsigned idx = MaxFunctionScopesIndex; NumClosures;
+ --idx, --NumClosures) {
+ CapturingScopeInfo *CSI = cast<CapturingScopeInfo>(FunctionScopes[idx]);
+ Expr *ThisExpr = nullptr;
+ QualType ThisTy = getCurrentThisType();
+ if (LambdaScopeInfo *LSI = dyn_cast<LambdaScopeInfo>(CSI))
+ // For lambda expressions, build a field and an initializing expression.
+ ThisExpr = captureThis(Context, LSI->Lambda, ThisTy, Loc);
+ else if (CapturedRegionScopeInfo *RSI
+ = dyn_cast<CapturedRegionScopeInfo>(FunctionScopes[idx]))
+ ThisExpr = captureThis(Context, RSI->TheRecordDecl, ThisTy, Loc);
+
+ bool isNested = NumClosures > 1;
+ CSI->addThisCapture(isNested, Loc, ThisTy, ThisExpr);
+ }
+ return false;
+}
+
+ExprResult Sema::ActOnCXXThis(SourceLocation Loc) {
+ /// C++ 9.3.2: In the body of a non-static member function, the keyword this
+ /// is a non-lvalue expression whose value is the address of the object for
+ /// which the function is called.
+
+ QualType ThisTy = getCurrentThisType();
+ if (ThisTy.isNull()) return Diag(Loc, diag::err_invalid_this_use);
+
+ CheckCXXThisCapture(Loc);
+ return new (Context) CXXThisExpr(Loc, ThisTy, /*isImplicit=*/false);
+}
+
+bool Sema::isThisOutsideMemberFunctionBody(QualType BaseType) {
+ // If we're outside the body of a member function, then we'll have a specified
+ // type for 'this'.
+ if (CXXThisTypeOverride.isNull())
+ return false;
+
+ // Determine whether we're looking into a class that's currently being
+ // defined.
+ CXXRecordDecl *Class = BaseType->getAsCXXRecordDecl();
+ return Class && Class->isBeingDefined();
+}
+
+ExprResult
+Sema::ActOnCXXTypeConstructExpr(ParsedType TypeRep,
+ SourceLocation LParenLoc,
+ MultiExprArg exprs,
+ SourceLocation RParenLoc) {
+ if (!TypeRep)
+ return ExprError();
+
+ TypeSourceInfo *TInfo;
+ QualType Ty = GetTypeFromParser(TypeRep, &TInfo);
+ if (!TInfo)
+ TInfo = Context.getTrivialTypeSourceInfo(Ty, SourceLocation());
+
+ return BuildCXXTypeConstructExpr(TInfo, LParenLoc, exprs, RParenLoc);
+}
+
+/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
+/// Can be interpreted either as function-style casting ("int(x)")
+/// or class type construction ("ClassType(x,y,z)")
+/// or creation of a value-initialized type ("int()").
+ExprResult
+Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
+ SourceLocation LParenLoc,
+ MultiExprArg Exprs,
+ SourceLocation RParenLoc) {
+ QualType Ty = TInfo->getType();
+ SourceLocation TyBeginLoc = TInfo->getTypeLoc().getBeginLoc();
+
+ if (Ty->isDependentType() || CallExpr::hasAnyTypeDependentArguments(Exprs)) {
+ return CXXUnresolvedConstructExpr::Create(Context, TInfo, LParenLoc, Exprs,
+ RParenLoc);
+ }
+
+ bool ListInitialization = LParenLoc.isInvalid();
+ assert((!ListInitialization || (Exprs.size() == 1 && isa<InitListExpr>(Exprs[0])))
+ && "List initialization must have initializer list as expression.");
+ SourceRange FullRange = SourceRange(TyBeginLoc,
+ ListInitialization ? Exprs[0]->getSourceRange().getEnd() : RParenLoc);
+
+ // C++ [expr.type.conv]p1:
+ // If the expression list is a single expression, the type conversion
+ // expression is equivalent (in definedness, and if defined in meaning) to the
+ // corresponding cast expression.
+ if (Exprs.size() == 1 && !ListInitialization) {
+ Expr *Arg = Exprs[0];
+ return BuildCXXFunctionalCastExpr(TInfo, LParenLoc, Arg, RParenLoc);
+ }
+
+ // C++14 [expr.type.conv]p2: The expression T(), where T is a
+ // simple-type-specifier or typename-specifier for a non-array complete
+ // object type or the (possibly cv-qualified) void type, creates a prvalue
+ // of the specified type, whose value is that produced by value-initializing
+ // an object of type T.
+ QualType ElemTy = Ty;
+ if (Ty->isArrayType()) {
+ if (!ListInitialization)
+ return ExprError(Diag(TyBeginLoc,
+ diag::err_value_init_for_array_type) << FullRange);
+ ElemTy = Context.getBaseElementType(Ty);
+ }
+
+ if (!ListInitialization && Ty->isFunctionType())
+ return ExprError(Diag(TyBeginLoc, diag::err_value_init_for_function_type)
+ << FullRange);
+
+ if (!Ty->isVoidType() &&
+ RequireCompleteType(TyBeginLoc, ElemTy,
+ diag::err_invalid_incomplete_type_use, FullRange))
+ return ExprError();
+
+ if (RequireNonAbstractType(TyBeginLoc, Ty,
+ diag::err_allocation_of_abstract_type))
+ return ExprError();
+
+ InitializedEntity Entity = InitializedEntity::InitializeTemporary(TInfo);
+ InitializationKind Kind =
+ Exprs.size() ? ListInitialization
+ ? InitializationKind::CreateDirectList(TyBeginLoc)
+ : InitializationKind::CreateDirect(TyBeginLoc, LParenLoc, RParenLoc)
+ : InitializationKind::CreateValue(TyBeginLoc, LParenLoc, RParenLoc);
+ InitializationSequence InitSeq(*this, Entity, Kind, Exprs);
+ ExprResult Result = InitSeq.Perform(*this, Entity, Kind, Exprs);
+
+ if (Result.isInvalid() || !ListInitialization)
+ return Result;
+
+ Expr *Inner = Result.get();
+ if (CXXBindTemporaryExpr *BTE = dyn_cast_or_null<CXXBindTemporaryExpr>(Inner))
+ Inner = BTE->getSubExpr();
+ if (!isa<CXXTemporaryObjectExpr>(Inner)) {
+ // If we created a CXXTemporaryObjectExpr, that node also represents the
+ // functional cast. Otherwise, create an explicit cast to represent
+ // the syntactic form of a functional-style cast that was used here.
+ //
+ // FIXME: Creating a CXXFunctionalCastExpr around a CXXConstructExpr
+ // would give a more consistent AST representation than using a
+ // CXXTemporaryObjectExpr. It's also weird that the functional cast
+ // is sometimes handled by initialization and sometimes not.
+ QualType ResultType = Result.get()->getType();
+ Result = CXXFunctionalCastExpr::Create(
+ Context, ResultType, Expr::getValueKindForType(TInfo->getType()), TInfo,
+ CK_NoOp, Result.get(), /*Path=*/nullptr, LParenLoc, RParenLoc);
+ }
+
+ return Result;
+}
+
+/// doesUsualArrayDeleteWantSize - Answers whether the usual
+/// operator delete[] for the given type has a size_t parameter.
+static bool doesUsualArrayDeleteWantSize(Sema &S, SourceLocation loc,
+ QualType allocType) {
+ const RecordType *record =
+ allocType->getBaseElementTypeUnsafe()->getAs<RecordType>();
+ if (!record) return false;
+
+ // Try to find an operator delete[] in class scope.
+
+ DeclarationName deleteName =
+ S.Context.DeclarationNames.getCXXOperatorName(OO_Array_Delete);
+ LookupResult ops(S, deleteName, loc, Sema::LookupOrdinaryName);
+ S.LookupQualifiedName(ops, record->getDecl());
+
+ // We're just doing this for information.
+ ops.suppressDiagnostics();
+
+ // Very likely: there's no operator delete[].
+ if (ops.empty()) return false;
+
+ // If it's ambiguous, it should be illegal to call operator delete[]
+ // on this thing, so it doesn't matter if we allocate extra space or not.
+ if (ops.isAmbiguous()) return false;
+
+ LookupResult::Filter filter = ops.makeFilter();
+ while (filter.hasNext()) {
+ NamedDecl *del = filter.next()->getUnderlyingDecl();
+
+ // C++0x [basic.stc.dynamic.deallocation]p2:
+ // A template instance is never a usual deallocation function,
+ // regardless of its signature.
+ if (isa<FunctionTemplateDecl>(del)) {
+ filter.erase();
+ continue;
+ }
+
+ // C++0x [basic.stc.dynamic.deallocation]p2:
+ // If class T does not declare [an operator delete[] with one
+ // parameter] but does declare a member deallocation function
+ // named operator delete[] with exactly two parameters, the
+ // second of which has type std::size_t, then this function
+ // is a usual deallocation function.
+ if (!cast<CXXMethodDecl>(del)->isUsualDeallocationFunction()) {
+ filter.erase();
+ continue;
+ }
+ }
+ filter.done();
+
+ if (!ops.isSingleResult()) return false;
+
+ const FunctionDecl *del = cast<FunctionDecl>(ops.getFoundDecl());
+ return (del->getNumParams() == 2);
+}
+
+/// \brief Parsed a C++ 'new' expression (C++ 5.3.4).
+///
+/// E.g.:
+/// @code new (memory) int[size][4] @endcode
+/// or
+/// @code ::new Foo(23, "hello") @endcode
+///
+/// \param StartLoc The first location of the expression.
+/// \param UseGlobal True if 'new' was prefixed with '::'.
+/// \param PlacementLParen Opening paren of the placement arguments.
+/// \param PlacementArgs Placement new arguments.
+/// \param PlacementRParen Closing paren of the placement arguments.
+/// \param TypeIdParens If the type is in parens, the source range.
+/// \param D The type to be allocated, as well as array dimensions.
+/// \param Initializer The initializing expression or initializer-list, or null
+/// if there is none.
+ExprResult
+Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
+ SourceLocation PlacementLParen, MultiExprArg PlacementArgs,
+ SourceLocation PlacementRParen, SourceRange TypeIdParens,
+ Declarator &D, Expr *Initializer) {
+ bool TypeContainsAuto = D.getDeclSpec().containsPlaceholderType();
+
+ Expr *ArraySize = nullptr;
+ // If the specified type is an array, unwrap it and save the expression.
+ if (D.getNumTypeObjects() > 0 &&
+ D.getTypeObject(0).Kind == DeclaratorChunk::Array) {
+ DeclaratorChunk &Chunk = D.getTypeObject(0);
+ if (TypeContainsAuto)
+ return ExprError(Diag(Chunk.Loc, diag::err_new_array_of_auto)
+ << D.getSourceRange());
+ if (Chunk.Arr.hasStatic)
+ return ExprError(Diag(Chunk.Loc, diag::err_static_illegal_in_new)
+ << D.getSourceRange());
+ if (!Chunk.Arr.NumElts)
+ return ExprError(Diag(Chunk.Loc, diag::err_array_new_needs_size)
+ << D.getSourceRange());
+
+ ArraySize = static_cast<Expr*>(Chunk.Arr.NumElts);
+ D.DropFirstTypeObject();
+ }
+
+ // Every dimension shall be of constant size.
+ if (ArraySize) {
+ for (unsigned I = 0, N = D.getNumTypeObjects(); I < N; ++I) {
+ if (D.getTypeObject(I).Kind != DeclaratorChunk::Array)
+ break;
+
+ DeclaratorChunk::ArrayTypeInfo &Array = D.getTypeObject(I).Arr;
+ if (Expr *NumElts = (Expr *)Array.NumElts) {
+ if (!NumElts->isTypeDependent() && !NumElts->isValueDependent()) {
+ if (getLangOpts().CPlusPlus14) {
+ // C++1y [expr.new]p6: Every constant-expression in a noptr-new-declarator
+ // shall be a converted constant expression (5.19) of type std::size_t
+ // and shall evaluate to a strictly positive value.
+ unsigned IntWidth = Context.getTargetInfo().getIntWidth();
+ assert(IntWidth && "Builtin type of size 0?");
+ llvm::APSInt Value(IntWidth);
+ Array.NumElts
+ = CheckConvertedConstantExpression(NumElts, Context.getSizeType(), Value,
+ CCEK_NewExpr)
+ .get();
+ } else {
+ Array.NumElts
+ = VerifyIntegerConstantExpression(NumElts, nullptr,
+ diag::err_new_array_nonconst)
+ .get();
+ }
+ if (!Array.NumElts)
+ return ExprError();
+ }
+ }
+ }
+ }
+
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, /*Scope=*/nullptr);
+ QualType AllocType = TInfo->getType();
+ if (D.isInvalidType())
+ return ExprError();
+
+ SourceRange DirectInitRange;
+ if (ParenListExpr *List = dyn_cast_or_null<ParenListExpr>(Initializer))
+ DirectInitRange = List->getSourceRange();
+
+ return BuildCXXNew(SourceRange(StartLoc, D.getLocEnd()), UseGlobal,
+ PlacementLParen,
+ PlacementArgs,
+ PlacementRParen,
+ TypeIdParens,
+ AllocType,
+ TInfo,
+ ArraySize,
+ DirectInitRange,
+ Initializer,
+ TypeContainsAuto);
+}
+
+static bool isLegalArrayNewInitializer(CXXNewExpr::InitializationStyle Style,
+ Expr *Init) {
+ if (!Init)
+ return true;
+ if (ParenListExpr *PLE = dyn_cast<ParenListExpr>(Init))
+ return PLE->getNumExprs() == 0;
+ if (isa<ImplicitValueInitExpr>(Init))
+ return true;
+ else if (CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init))
+ return !CCE->isListInitialization() &&
+ CCE->getConstructor()->isDefaultConstructor();
+ else if (Style == CXXNewExpr::ListInit) {
+ assert(isa<InitListExpr>(Init) &&
+ "Shouldn't create list CXXConstructExprs for arrays.");
+ return true;
+ }
+ return false;
+}
+
+ExprResult
+Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
+ SourceLocation PlacementLParen,
+ MultiExprArg PlacementArgs,
+ SourceLocation PlacementRParen,
+ SourceRange TypeIdParens,
+ QualType AllocType,
+ TypeSourceInfo *AllocTypeInfo,
+ Expr *ArraySize,
+ SourceRange DirectInitRange,
+ Expr *Initializer,
+ bool TypeMayContainAuto) {
+ SourceRange TypeRange = AllocTypeInfo->getTypeLoc().getSourceRange();
+ SourceLocation StartLoc = Range.getBegin();
+
+ CXXNewExpr::InitializationStyle initStyle;
+ if (DirectInitRange.isValid()) {
+ assert(Initializer && "Have parens but no initializer.");
+ initStyle = CXXNewExpr::CallInit;
+ } else if (Initializer && isa<InitListExpr>(Initializer))
+ initStyle = CXXNewExpr::ListInit;
+ else {
+ assert((!Initializer || isa<ImplicitValueInitExpr>(Initializer) ||
+ isa<CXXConstructExpr>(Initializer)) &&
+ "Initializer expression that cannot have been implicitly created.");
+ initStyle = CXXNewExpr::NoInit;
+ }
+
+ Expr **Inits = &Initializer;
+ unsigned NumInits = Initializer ? 1 : 0;
+ if (ParenListExpr *List = dyn_cast_or_null<ParenListExpr>(Initializer)) {
+ assert(initStyle == CXXNewExpr::CallInit && "paren init for non-call init");
+ Inits = List->getExprs();
+ NumInits = List->getNumExprs();
+ }
+
+ // C++11 [dcl.spec.auto]p6. Deduce the type which 'auto' stands in for.
+ if (TypeMayContainAuto && AllocType->isUndeducedType()) {
+ if (initStyle == CXXNewExpr::NoInit || NumInits == 0)
+ return ExprError(Diag(StartLoc, diag::err_auto_new_requires_ctor_arg)
+ << AllocType << TypeRange);
+ if (initStyle == CXXNewExpr::ListInit ||
+ (NumInits == 1 && isa<InitListExpr>(Inits[0])))
+ return ExprError(Diag(Inits[0]->getLocStart(),
+ diag::err_auto_new_list_init)
+ << AllocType << TypeRange);
+ if (NumInits > 1) {
+ Expr *FirstBad = Inits[1];
+ return ExprError(Diag(FirstBad->getLocStart(),
+ diag::err_auto_new_ctor_multiple_expressions)
+ << AllocType << TypeRange);
+ }
+ Expr *Deduce = Inits[0];
+ QualType DeducedType;
+ if (DeduceAutoType(AllocTypeInfo, Deduce, DeducedType) == DAR_Failed)
+ return ExprError(Diag(StartLoc, diag::err_auto_new_deduction_failure)
+ << AllocType << Deduce->getType()
+ << TypeRange << Deduce->getSourceRange());
+ if (DeducedType.isNull())
+ return ExprError();
+ AllocType = DeducedType;
+ }
+
+ // Per C++0x [expr.new]p5, the type being constructed may be a
+ // typedef of an array type.
+ if (!ArraySize) {
+ if (const ConstantArrayType *Array
+ = Context.getAsConstantArrayType(AllocType)) {
+ ArraySize = IntegerLiteral::Create(Context, Array->getSize(),
+ Context.getSizeType(),
+ TypeRange.getEnd());
+ AllocType = Array->getElementType();
+ }
+ }
+
+ if (CheckAllocatedType(AllocType, TypeRange.getBegin(), TypeRange))
+ return ExprError();
+
+ if (initStyle == CXXNewExpr::ListInit &&
+ isStdInitializerList(AllocType, nullptr)) {
+ Diag(AllocTypeInfo->getTypeLoc().getBeginLoc(),
+ diag::warn_dangling_std_initializer_list)
+ << /*at end of FE*/0 << Inits[0]->getSourceRange();
+ }
+
+ // In ARC, infer 'retaining' for the allocated
+ if (getLangOpts().ObjCAutoRefCount &&
+ AllocType.getObjCLifetime() == Qualifiers::OCL_None &&
+ AllocType->isObjCLifetimeType()) {
+ AllocType = Context.getLifetimeQualifiedType(AllocType,
+ AllocType->getObjCARCImplicitLifetime());
+ }
+
+ QualType ResultType = Context.getPointerType(AllocType);
+
+ if (ArraySize && ArraySize->getType()->isNonOverloadPlaceholderType()) {
+ ExprResult result = CheckPlaceholderExpr(ArraySize);
+ if (result.isInvalid()) return ExprError();
+ ArraySize = result.get();
+ }
+ // C++98 5.3.4p6: "The expression in a direct-new-declarator shall have
+ // integral or enumeration type with a non-negative value."
+ // C++11 [expr.new]p6: The expression [...] shall be of integral or unscoped
+ // enumeration type, or a class type for which a single non-explicit
+ // conversion function to integral or unscoped enumeration type exists.
+ // C++1y [expr.new]p6: The expression [...] is implicitly converted to
+ // std::size_t.
+ if (ArraySize && !ArraySize->isTypeDependent()) {
+ ExprResult ConvertedSize;
+ if (getLangOpts().CPlusPlus14) {
+ assert(Context.getTargetInfo().getIntWidth() && "Builtin type of size 0?");
+
+ ConvertedSize = PerformImplicitConversion(ArraySize, Context.getSizeType(),
+ AA_Converting);
+
+ if (!ConvertedSize.isInvalid() &&
+ ArraySize->getType()->getAs<RecordType>())
+ // Diagnose the compatibility of this conversion.
+ Diag(StartLoc, diag::warn_cxx98_compat_array_size_conversion)
+ << ArraySize->getType() << 0 << "'size_t'";
+ } else {
+ class SizeConvertDiagnoser : public ICEConvertDiagnoser {
+ protected:
+ Expr *ArraySize;
+
+ public:
+ SizeConvertDiagnoser(Expr *ArraySize)
+ : ICEConvertDiagnoser(/*AllowScopedEnumerations*/false, false, false),
+ ArraySize(ArraySize) {}
+
+ SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc,
+ QualType T) override {
+ return S.Diag(Loc, diag::err_array_size_not_integral)
+ << S.getLangOpts().CPlusPlus11 << T;
+ }
+
+ SemaDiagnosticBuilder diagnoseIncomplete(
+ Sema &S, SourceLocation Loc, QualType T) override {
+ return S.Diag(Loc, diag::err_array_size_incomplete_type)
+ << T << ArraySize->getSourceRange();
+ }
+
+ SemaDiagnosticBuilder diagnoseExplicitConv(
+ Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override {
+ return S.Diag(Loc, diag::err_array_size_explicit_conversion) << T << ConvTy;
+ }
+
+ SemaDiagnosticBuilder noteExplicitConv(
+ Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
+ return S.Diag(Conv->getLocation(), diag::note_array_size_conversion)
+ << ConvTy->isEnumeralType() << ConvTy;
+ }
+
+ SemaDiagnosticBuilder diagnoseAmbiguous(
+ Sema &S, SourceLocation Loc, QualType T) override {
+ return S.Diag(Loc, diag::err_array_size_ambiguous_conversion) << T;
+ }
+
+ SemaDiagnosticBuilder noteAmbiguous(
+ Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
+ return S.Diag(Conv->getLocation(), diag::note_array_size_conversion)
+ << ConvTy->isEnumeralType() << ConvTy;
+ }
+
+ SemaDiagnosticBuilder diagnoseConversion(Sema &S, SourceLocation Loc,
+ QualType T,
+ QualType ConvTy) override {
+ return S.Diag(Loc,
+ S.getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_array_size_conversion
+ : diag::ext_array_size_conversion)
+ << T << ConvTy->isEnumeralType() << ConvTy;
+ }
+ } SizeDiagnoser(ArraySize);
+
+ ConvertedSize = PerformContextualImplicitConversion(StartLoc, ArraySize,
+ SizeDiagnoser);
+ }
+ if (ConvertedSize.isInvalid())
+ return ExprError();
+
+ ArraySize = ConvertedSize.get();
+ QualType SizeType = ArraySize->getType();
+
+ if (!SizeType->isIntegralOrUnscopedEnumerationType())
+ return ExprError();
+
+ // C++98 [expr.new]p7:
+ // The expression in a direct-new-declarator shall have integral type
+ // with a non-negative value.
+ //
+ // Let's see if this is a constant < 0. If so, we reject it out of
+ // hand. Otherwise, if it's not a constant, we must have an unparenthesized
+ // array type.
+ //
+ // Note: such a construct has well-defined semantics in C++11: it throws
+ // std::bad_array_new_length.
+ if (!ArraySize->isValueDependent()) {
+ llvm::APSInt Value;
+ // We've already performed any required implicit conversion to integer or
+ // unscoped enumeration type.
+ if (ArraySize->isIntegerConstantExpr(Value, Context)) {
+ if (Value < llvm::APSInt(
+ llvm::APInt::getNullValue(Value.getBitWidth()),
+ Value.isUnsigned())) {
+ if (getLangOpts().CPlusPlus11)
+ Diag(ArraySize->getLocStart(),
+ diag::warn_typecheck_negative_array_new_size)
+ << ArraySize->getSourceRange();
+ else
+ return ExprError(Diag(ArraySize->getLocStart(),
+ diag::err_typecheck_negative_array_size)
+ << ArraySize->getSourceRange());
+ } else if (!AllocType->isDependentType()) {
+ unsigned ActiveSizeBits =
+ ConstantArrayType::getNumAddressingBits(Context, AllocType, Value);
+ if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context)) {
+ if (getLangOpts().CPlusPlus11)
+ Diag(ArraySize->getLocStart(),
+ diag::warn_array_new_too_large)
+ << Value.toString(10)
+ << ArraySize->getSourceRange();
+ else
+ return ExprError(Diag(ArraySize->getLocStart(),
+ diag::err_array_too_large)
+ << Value.toString(10)
+ << ArraySize->getSourceRange());
+ }
+ }
+ } else if (TypeIdParens.isValid()) {
+ // Can't have dynamic array size when the type-id is in parentheses.
+ Diag(ArraySize->getLocStart(), diag::ext_new_paren_array_nonconst)
+ << ArraySize->getSourceRange()
+ << FixItHint::CreateRemoval(TypeIdParens.getBegin())
+ << FixItHint::CreateRemoval(TypeIdParens.getEnd());
+
+ TypeIdParens = SourceRange();
+ }
+ }
+
+ // Note that we do *not* convert the argument in any way. It can
+ // be signed, larger than size_t, whatever.
+ }
+
+ FunctionDecl *OperatorNew = nullptr;
+ FunctionDecl *OperatorDelete = nullptr;
+
+ if (!AllocType->isDependentType() &&
+ !Expr::hasAnyTypeDependentArguments(PlacementArgs) &&
+ FindAllocationFunctions(StartLoc,
+ SourceRange(PlacementLParen, PlacementRParen),
+ UseGlobal, AllocType, ArraySize, PlacementArgs,
+ OperatorNew, OperatorDelete))
+ return ExprError();
+
+ // If this is an array allocation, compute whether the usual array
+ // deallocation function for the type has a size_t parameter.
+ bool UsualArrayDeleteWantsSize = false;
+ if (ArraySize && !AllocType->isDependentType())
+ UsualArrayDeleteWantsSize
+ = doesUsualArrayDeleteWantSize(*this, StartLoc, AllocType);
+
+ SmallVector<Expr *, 8> AllPlaceArgs;
+ if (OperatorNew) {
+ const FunctionProtoType *Proto =
+ OperatorNew->getType()->getAs<FunctionProtoType>();
+ VariadicCallType CallType = Proto->isVariadic() ? VariadicFunction
+ : VariadicDoesNotApply;
+
+ // We've already converted the placement args, just fill in any default
+ // arguments. Skip the first parameter because we don't have a corresponding
+ // argument.
+ if (GatherArgumentsForCall(PlacementLParen, OperatorNew, Proto, 1,
+ PlacementArgs, AllPlaceArgs, CallType))
+ return ExprError();
+
+ if (!AllPlaceArgs.empty())
+ PlacementArgs = AllPlaceArgs;
+
+ // FIXME: This is wrong: PlacementArgs misses out the first (size) argument.
+ DiagnoseSentinelCalls(OperatorNew, PlacementLParen, PlacementArgs);
+
+ // FIXME: Missing call to CheckFunctionCall or equivalent
+ }
+
+ // Warn if the type is over-aligned and is being allocated by global operator
+ // new.
+ if (PlacementArgs.empty() && OperatorNew &&
+ (OperatorNew->isImplicit() ||
+ getSourceManager().isInSystemHeader(OperatorNew->getLocStart()))) {
+ if (unsigned Align = Context.getPreferredTypeAlign(AllocType.getTypePtr())){
+ unsigned SuitableAlign = Context.getTargetInfo().getSuitableAlign();
+ if (Align > SuitableAlign)
+ Diag(StartLoc, diag::warn_overaligned_type)
+ << AllocType
+ << unsigned(Align / Context.getCharWidth())
+ << unsigned(SuitableAlign / Context.getCharWidth());
+ }
+ }
+
+ QualType InitType = AllocType;
+ // Array 'new' can't have any initializers except empty parentheses.
+ // Initializer lists are also allowed, in C++11. Rely on the parser for the
+ // dialect distinction.
+ if (ResultType->isArrayType() || ArraySize) {
+ if (!isLegalArrayNewInitializer(initStyle, Initializer)) {
+ SourceRange InitRange(Inits[0]->getLocStart(),
+ Inits[NumInits - 1]->getLocEnd());
+ Diag(StartLoc, diag::err_new_array_init_args) << InitRange;
+ return ExprError();
+ }
+ if (InitListExpr *ILE = dyn_cast_or_null<InitListExpr>(Initializer)) {
+ // We do the initialization typechecking against the array type
+ // corresponding to the number of initializers + 1 (to also check
+ // default-initialization).
+ unsigned NumElements = ILE->getNumInits() + 1;
+ InitType = Context.getConstantArrayType(AllocType,
+ llvm::APInt(Context.getTypeSize(Context.getSizeType()), NumElements),
+ ArrayType::Normal, 0);
+ }
+ }
+
+ // If we can perform the initialization, and we've not already done so,
+ // do it now.
+ if (!AllocType->isDependentType() &&
+ !Expr::hasAnyTypeDependentArguments(
+ llvm::makeArrayRef(Inits, NumInits))) {
+ // C++11 [expr.new]p15:
+ // A new-expression that creates an object of type T initializes that
+ // object as follows:
+ InitializationKind Kind
+ // - If the new-initializer is omitted, the object is default-
+ // initialized (8.5); if no initialization is performed,
+ // the object has indeterminate value
+ = initStyle == CXXNewExpr::NoInit
+ ? InitializationKind::CreateDefault(TypeRange.getBegin())
+ // - Otherwise, the new-initializer is interpreted according to the
+ // initialization rules of 8.5 for direct-initialization.
+ : initStyle == CXXNewExpr::ListInit
+ ? InitializationKind::CreateDirectList(TypeRange.getBegin())
+ : InitializationKind::CreateDirect(TypeRange.getBegin(),
+ DirectInitRange.getBegin(),
+ DirectInitRange.getEnd());
+
+ InitializedEntity Entity
+ = InitializedEntity::InitializeNew(StartLoc, InitType);
+ InitializationSequence InitSeq(*this, Entity, Kind, MultiExprArg(Inits, NumInits));
+ ExprResult FullInit = InitSeq.Perform(*this, Entity, Kind,
+ MultiExprArg(Inits, NumInits));
+ if (FullInit.isInvalid())
+ return ExprError();
+
+ // FullInit is our initializer; strip off CXXBindTemporaryExprs, because
+ // we don't want the initialized object to be destructed.
+ if (CXXBindTemporaryExpr *Binder =
+ dyn_cast_or_null<CXXBindTemporaryExpr>(FullInit.get()))
+ FullInit = Binder->getSubExpr();
+
+ Initializer = FullInit.get();
+ }
+
+ // Mark the new and delete operators as referenced.
+ if (OperatorNew) {
+ if (DiagnoseUseOfDecl(OperatorNew, StartLoc))
+ return ExprError();
+ MarkFunctionReferenced(StartLoc, OperatorNew);
+ }
+ if (OperatorDelete) {
+ if (DiagnoseUseOfDecl(OperatorDelete, StartLoc))
+ return ExprError();
+ MarkFunctionReferenced(StartLoc, OperatorDelete);
+ }
+
+ // C++0x [expr.new]p17:
+ // If the new expression creates an array of objects of class type,
+ // access and ambiguity control are done for the destructor.
+ QualType BaseAllocType = Context.getBaseElementType(AllocType);
+ if (ArraySize && !BaseAllocType->isDependentType()) {
+ if (const RecordType *BaseRecordType = BaseAllocType->getAs<RecordType>()) {
+ if (CXXDestructorDecl *dtor = LookupDestructor(
+ cast<CXXRecordDecl>(BaseRecordType->getDecl()))) {
+ MarkFunctionReferenced(StartLoc, dtor);
+ CheckDestructorAccess(StartLoc, dtor,
+ PDiag(diag::err_access_dtor)
+ << BaseAllocType);
+ if (DiagnoseUseOfDecl(dtor, StartLoc))
+ return ExprError();
+ }
+ }
+ }
+
+ return new (Context)
+ CXXNewExpr(Context, UseGlobal, OperatorNew, OperatorDelete,
+ UsualArrayDeleteWantsSize, PlacementArgs, TypeIdParens,
+ ArraySize, initStyle, Initializer, ResultType, AllocTypeInfo,
+ Range, DirectInitRange);
+}
+
+/// \brief Checks that a type is suitable as the allocated type
+/// in a new-expression.
+bool Sema::CheckAllocatedType(QualType AllocType, SourceLocation Loc,
+ SourceRange R) {
+ // C++ 5.3.4p1: "[The] type shall be a complete object type, but not an
+ // abstract class type or array thereof.
+ if (AllocType->isFunctionType())
+ return Diag(Loc, diag::err_bad_new_type)
+ << AllocType << 0 << R;
+ else if (AllocType->isReferenceType())
+ return Diag(Loc, diag::err_bad_new_type)
+ << AllocType << 1 << R;
+ else if (!AllocType->isDependentType() &&
+ RequireCompleteType(Loc, AllocType, diag::err_new_incomplete_type,R))
+ return true;
+ else if (RequireNonAbstractType(Loc, AllocType,
+ diag::err_allocation_of_abstract_type))
+ return true;
+ else if (AllocType->isVariablyModifiedType())
+ return Diag(Loc, diag::err_variably_modified_new_type)
+ << AllocType;
+ else if (unsigned AddressSpace = AllocType.getAddressSpace())
+ return Diag(Loc, diag::err_address_space_qualified_new)
+ << AllocType.getUnqualifiedType() << AddressSpace;
+ else if (getLangOpts().ObjCAutoRefCount) {
+ if (const ArrayType *AT = Context.getAsArrayType(AllocType)) {
+ QualType BaseAllocType = Context.getBaseElementType(AT);
+ if (BaseAllocType.getObjCLifetime() == Qualifiers::OCL_None &&
+ BaseAllocType->isObjCLifetimeType())
+ return Diag(Loc, diag::err_arc_new_array_without_ownership)
+ << BaseAllocType;
+ }
+ }
+
+ return false;
+}
+
+/// \brief Determine whether the given function is a non-placement
+/// deallocation function.
+static bool isNonPlacementDeallocationFunction(Sema &S, FunctionDecl *FD) {
+ if (FD->isInvalidDecl())
+ return false;
+
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FD))
+ return Method->isUsualDeallocationFunction();
+
+ if (FD->getOverloadedOperator() != OO_Delete &&
+ FD->getOverloadedOperator() != OO_Array_Delete)
+ return false;
+
+ if (FD->getNumParams() == 1)
+ return true;
+
+ return S.getLangOpts().SizedDeallocation && FD->getNumParams() == 2 &&
+ S.Context.hasSameUnqualifiedType(FD->getParamDecl(1)->getType(),
+ S.Context.getSizeType());
+}
+
+/// FindAllocationFunctions - Finds the overloads of operator new and delete
+/// that are appropriate for the allocation.
+bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
+ bool UseGlobal, QualType AllocType,
+ bool IsArray, MultiExprArg PlaceArgs,
+ FunctionDecl *&OperatorNew,
+ FunctionDecl *&OperatorDelete) {
+ // --- Choosing an allocation function ---
+ // C++ 5.3.4p8 - 14 & 18
+ // 1) If UseGlobal is true, only look in the global scope. Else, also look
+ // in the scope of the allocated class.
+ // 2) If an array size is given, look for operator new[], else look for
+ // operator new.
+ // 3) The first argument is always size_t. Append the arguments from the
+ // placement form.
+
+ SmallVector<Expr*, 8> AllocArgs(1 + PlaceArgs.size());
+ // We don't care about the actual value of this argument.
+ // FIXME: Should the Sema create the expression and embed it in the syntax
+ // tree? Or should the consumer just recalculate the value?
+ IntegerLiteral Size(Context, llvm::APInt::getNullValue(
+ Context.getTargetInfo().getPointerWidth(0)),
+ Context.getSizeType(),
+ SourceLocation());
+ AllocArgs[0] = &Size;
+ std::copy(PlaceArgs.begin(), PlaceArgs.end(), AllocArgs.begin() + 1);
+
+ // C++ [expr.new]p8:
+ // If the allocated type is a non-array type, the allocation
+ // function's name is operator new and the deallocation function's
+ // name is operator delete. If the allocated type is an array
+ // type, the allocation function's name is operator new[] and the
+ // deallocation function's name is operator delete[].
+ DeclarationName NewName = Context.DeclarationNames.getCXXOperatorName(
+ IsArray ? OO_Array_New : OO_New);
+ DeclarationName DeleteName = Context.DeclarationNames.getCXXOperatorName(
+ IsArray ? OO_Array_Delete : OO_Delete);
+
+ QualType AllocElemType = Context.getBaseElementType(AllocType);
+
+ if (AllocElemType->isRecordType() && !UseGlobal) {
+ CXXRecordDecl *Record
+ = cast<CXXRecordDecl>(AllocElemType->getAs<RecordType>()->getDecl());
+ if (FindAllocationOverload(StartLoc, Range, NewName, AllocArgs, Record,
+ /*AllowMissing=*/true, OperatorNew))
+ return true;
+ }
+
+ if (!OperatorNew) {
+ // Didn't find a member overload. Look for a global one.
+ DeclareGlobalNewDelete();
+ DeclContext *TUDecl = Context.getTranslationUnitDecl();
+ bool FallbackEnabled = IsArray && Context.getLangOpts().MSVCCompat;
+ if (FindAllocationOverload(StartLoc, Range, NewName, AllocArgs, TUDecl,
+ /*AllowMissing=*/FallbackEnabled, OperatorNew,
+ /*Diagnose=*/!FallbackEnabled)) {
+ if (!FallbackEnabled)
+ return true;
+
+ // MSVC will fall back on trying to find a matching global operator new
+ // if operator new[] cannot be found. Also, MSVC will leak by not
+ // generating a call to operator delete or operator delete[], but we
+ // will not replicate that bug.
+ NewName = Context.DeclarationNames.getCXXOperatorName(OO_New);
+ DeleteName = Context.DeclarationNames.getCXXOperatorName(OO_Delete);
+ if (FindAllocationOverload(StartLoc, Range, NewName, AllocArgs, TUDecl,
+ /*AllowMissing=*/false, OperatorNew))
+ return true;
+ }
+ }
+
+ // We don't need an operator delete if we're running under
+ // -fno-exceptions.
+ if (!getLangOpts().Exceptions) {
+ OperatorDelete = nullptr;
+ return false;
+ }
+
+ // C++ [expr.new]p19:
+ //
+ // If the new-expression begins with a unary :: operator, the
+ // deallocation function's name is looked up in the global
+ // scope. Otherwise, if the allocated type is a class type T or an
+ // array thereof, the deallocation function's name is looked up in
+ // the scope of T. If this lookup fails to find the name, or if
+ // the allocated type is not a class type or array thereof, the
+ // deallocation function's name is looked up in the global scope.
+ LookupResult FoundDelete(*this, DeleteName, StartLoc, LookupOrdinaryName);
+ if (AllocElemType->isRecordType() && !UseGlobal) {
+ CXXRecordDecl *RD
+ = cast<CXXRecordDecl>(AllocElemType->getAs<RecordType>()->getDecl());
+ LookupQualifiedName(FoundDelete, RD);
+ }
+ if (FoundDelete.isAmbiguous())
+ return true; // FIXME: clean up expressions?
+
+ if (FoundDelete.empty()) {
+ DeclareGlobalNewDelete();
+ LookupQualifiedName(FoundDelete, Context.getTranslationUnitDecl());
+ }
+
+ FoundDelete.suppressDiagnostics();
+
+ SmallVector<std::pair<DeclAccessPair,FunctionDecl*>, 2> Matches;
+
+ // Whether we're looking for a placement operator delete is dictated
+ // by whether we selected a placement operator new, not by whether
+ // we had explicit placement arguments. This matters for things like
+ // struct A { void *operator new(size_t, int = 0); ... };
+ // A *a = new A()
+ bool isPlacementNew = (!PlaceArgs.empty() || OperatorNew->param_size() != 1);
+
+ if (isPlacementNew) {
+ // C++ [expr.new]p20:
+ // A declaration of a placement deallocation function matches the
+ // declaration of a placement allocation function if it has the
+ // same number of parameters and, after parameter transformations
+ // (8.3.5), all parameter types except the first are
+ // identical. [...]
+ //
+ // To perform this comparison, we compute the function type that
+ // the deallocation function should have, and use that type both
+ // for template argument deduction and for comparison purposes.
+ //
+ // FIXME: this comparison should ignore CC and the like.
+ QualType ExpectedFunctionType;
+ {
+ const FunctionProtoType *Proto
+ = OperatorNew->getType()->getAs<FunctionProtoType>();
+
+ SmallVector<QualType, 4> ArgTypes;
+ ArgTypes.push_back(Context.VoidPtrTy);
+ for (unsigned I = 1, N = Proto->getNumParams(); I < N; ++I)
+ ArgTypes.push_back(Proto->getParamType(I));
+
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.Variadic = Proto->isVariadic();
+
+ ExpectedFunctionType
+ = Context.getFunctionType(Context.VoidTy, ArgTypes, EPI);
+ }
+
+ for (LookupResult::iterator D = FoundDelete.begin(),
+ DEnd = FoundDelete.end();
+ D != DEnd; ++D) {
+ FunctionDecl *Fn = nullptr;
+ if (FunctionTemplateDecl *FnTmpl
+ = dyn_cast<FunctionTemplateDecl>((*D)->getUnderlyingDecl())) {
+ // Perform template argument deduction to try to match the
+ // expected function type.
+ TemplateDeductionInfo Info(StartLoc);
+ if (DeduceTemplateArguments(FnTmpl, nullptr, ExpectedFunctionType, Fn,
+ Info))
+ continue;
+ } else
+ Fn = cast<FunctionDecl>((*D)->getUnderlyingDecl());
+
+ if (Context.hasSameType(Fn->getType(), ExpectedFunctionType))
+ Matches.push_back(std::make_pair(D.getPair(), Fn));
+ }
+ } else {
+ // C++ [expr.new]p20:
+ // [...] Any non-placement deallocation function matches a
+ // non-placement allocation function. [...]
+ for (LookupResult::iterator D = FoundDelete.begin(),
+ DEnd = FoundDelete.end();
+ D != DEnd; ++D) {
+ if (FunctionDecl *Fn = dyn_cast<FunctionDecl>((*D)->getUnderlyingDecl()))
+ if (isNonPlacementDeallocationFunction(*this, Fn))
+ Matches.push_back(std::make_pair(D.getPair(), Fn));
+ }
+
+ // C++1y [expr.new]p22:
+ // For a non-placement allocation function, the normal deallocation
+ // function lookup is used
+ // C++1y [expr.delete]p?:
+ // If [...] deallocation function lookup finds both a usual deallocation
+ // function with only a pointer parameter and a usual deallocation
+ // function with both a pointer parameter and a size parameter, then the
+ // selected deallocation function shall be the one with two parameters.
+ // Otherwise, the selected deallocation function shall be the function
+ // with one parameter.
+ if (getLangOpts().SizedDeallocation && Matches.size() == 2) {
+ if (Matches[0].second->getNumParams() == 1)
+ Matches.erase(Matches.begin());
+ else
+ Matches.erase(Matches.begin() + 1);
+ assert(Matches[0].second->getNumParams() == 2 &&
+ "found an unexpected usual deallocation function");
+ }
+ }
+
+ // C++ [expr.new]p20:
+ // [...] If the lookup finds a single matching deallocation
+ // function, that function will be called; otherwise, no
+ // deallocation function will be called.
+ if (Matches.size() == 1) {
+ OperatorDelete = Matches[0].second;
+
+ // C++0x [expr.new]p20:
+ // If the lookup finds the two-parameter form of a usual
+ // deallocation function (3.7.4.2) and that function, considered
+ // as a placement deallocation function, would have been
+ // selected as a match for the allocation function, the program
+ // is ill-formed.
+ if (!PlaceArgs.empty() && getLangOpts().CPlusPlus11 &&
+ isNonPlacementDeallocationFunction(*this, OperatorDelete)) {
+ Diag(StartLoc, diag::err_placement_new_non_placement_delete)
+ << SourceRange(PlaceArgs.front()->getLocStart(),
+ PlaceArgs.back()->getLocEnd());
+ if (!OperatorDelete->isImplicit())
+ Diag(OperatorDelete->getLocation(), diag::note_previous_decl)
+ << DeleteName;
+ } else {
+ CheckAllocationAccess(StartLoc, Range, FoundDelete.getNamingClass(),
+ Matches[0].first);
+ }
+ }
+
+ return false;
+}
+
+/// \brief Find an fitting overload for the allocation function
+/// in the specified scope.
+///
+/// \param StartLoc The location of the 'new' token.
+/// \param Range The range of the placement arguments.
+/// \param Name The name of the function ('operator new' or 'operator new[]').
+/// \param Args The placement arguments specified.
+/// \param Ctx The scope in which we should search; either a class scope or the
+/// translation unit.
+/// \param AllowMissing If \c true, report an error if we can't find any
+/// allocation functions. Otherwise, succeed but don't fill in \p
+/// Operator.
+/// \param Operator Filled in with the found allocation function. Unchanged if
+/// no allocation function was found.
+/// \param Diagnose If \c true, issue errors if the allocation function is not
+/// usable.
+bool Sema::FindAllocationOverload(SourceLocation StartLoc, SourceRange Range,
+ DeclarationName Name, MultiExprArg Args,
+ DeclContext *Ctx,
+ bool AllowMissing, FunctionDecl *&Operator,
+ bool Diagnose) {
+ LookupResult R(*this, Name, StartLoc, LookupOrdinaryName);
+ LookupQualifiedName(R, Ctx);
+ if (R.empty()) {
+ if (AllowMissing || !Diagnose)
+ return false;
+ return Diag(StartLoc, diag::err_ovl_no_viable_function_in_call)
+ << Name << Range;
+ }
+
+ if (R.isAmbiguous())
+ return true;
+
+ R.suppressDiagnostics();
+
+ OverloadCandidateSet Candidates(StartLoc, OverloadCandidateSet::CSK_Normal);
+ for (LookupResult::iterator Alloc = R.begin(), AllocEnd = R.end();
+ Alloc != AllocEnd; ++Alloc) {
+ // Even member operator new/delete are implicitly treated as
+ // static, so don't use AddMemberCandidate.
+ NamedDecl *D = (*Alloc)->getUnderlyingDecl();
+
+ if (FunctionTemplateDecl *FnTemplate = dyn_cast<FunctionTemplateDecl>(D)) {
+ AddTemplateOverloadCandidate(FnTemplate, Alloc.getPair(),
+ /*ExplicitTemplateArgs=*/nullptr,
+ Args, Candidates,
+ /*SuppressUserConversions=*/false);
+ continue;
+ }
+
+ FunctionDecl *Fn = cast<FunctionDecl>(D);
+ AddOverloadCandidate(Fn, Alloc.getPair(), Args, Candidates,
+ /*SuppressUserConversions=*/false);
+ }
+
+ // Do the resolution.
+ OverloadCandidateSet::iterator Best;
+ switch (Candidates.BestViableFunction(*this, StartLoc, Best)) {
+ case OR_Success: {
+ // Got one!
+ FunctionDecl *FnDecl = Best->Function;
+ if (CheckAllocationAccess(StartLoc, Range, R.getNamingClass(),
+ Best->FoundDecl, Diagnose) == AR_inaccessible)
+ return true;
+
+ Operator = FnDecl;
+ return false;
+ }
+
+ case OR_No_Viable_Function:
+ if (Diagnose) {
+ Diag(StartLoc, diag::err_ovl_no_viable_function_in_call)
+ << Name << Range;
+ Candidates.NoteCandidates(*this, OCD_AllCandidates, Args);
+ }
+ return true;
+
+ case OR_Ambiguous:
+ if (Diagnose) {
+ Diag(StartLoc, diag::err_ovl_ambiguous_call)
+ << Name << Range;
+ Candidates.NoteCandidates(*this, OCD_ViableCandidates, Args);
+ }
+ return true;
+
+ case OR_Deleted: {
+ if (Diagnose) {
+ Diag(StartLoc, diag::err_ovl_deleted_call)
+ << Best->Function->isDeleted()
+ << Name
+ << getDeletedOrUnavailableSuffix(Best->Function)
+ << Range;
+ Candidates.NoteCandidates(*this, OCD_AllCandidates, Args);
+ }
+ return true;
+ }
+ }
+ llvm_unreachable("Unreachable, bad result from BestViableFunction");
+}
+
+
+/// DeclareGlobalNewDelete - Declare the global forms of operator new and
+/// delete. These are:
+/// @code
+/// // C++03:
+/// void* operator new(std::size_t) throw(std::bad_alloc);
+/// void* operator new[](std::size_t) throw(std::bad_alloc);
+/// void operator delete(void *) throw();
+/// void operator delete[](void *) throw();
+/// // C++11:
+/// void* operator new(std::size_t);
+/// void* operator new[](std::size_t);
+/// void operator delete(void *) noexcept;
+/// void operator delete[](void *) noexcept;
+/// // C++1y:
+/// void* operator new(std::size_t);
+/// void* operator new[](std::size_t);
+/// void operator delete(void *) noexcept;
+/// void operator delete[](void *) noexcept;
+/// void operator delete(void *, std::size_t) noexcept;
+/// void operator delete[](void *, std::size_t) noexcept;
+/// @endcode
+/// Note that the placement and nothrow forms of new are *not* implicitly
+/// declared. Their use requires including \<new\>.
+void Sema::DeclareGlobalNewDelete() {
+ if (GlobalNewDeleteDeclared)
+ return;
+
+ // C++ [basic.std.dynamic]p2:
+ // [...] The following allocation and deallocation functions (18.4) are
+ // implicitly declared in global scope in each translation unit of a
+ // program
+ //
+ // C++03:
+ // void* operator new(std::size_t) throw(std::bad_alloc);
+ // void* operator new[](std::size_t) throw(std::bad_alloc);
+ // void operator delete(void*) throw();
+ // void operator delete[](void*) throw();
+ // C++11:
+ // void* operator new(std::size_t);
+ // void* operator new[](std::size_t);
+ // void operator delete(void*) noexcept;
+ // void operator delete[](void*) noexcept;
+ // C++1y:
+ // void* operator new(std::size_t);
+ // void* operator new[](std::size_t);
+ // void operator delete(void*) noexcept;
+ // void operator delete[](void*) noexcept;
+ // void operator delete(void*, std::size_t) noexcept;
+ // void operator delete[](void*, std::size_t) noexcept;
+ //
+ // These implicit declarations introduce only the function names operator
+ // new, operator new[], operator delete, operator delete[].
+ //
+ // Here, we need to refer to std::bad_alloc, so we will implicitly declare
+ // "std" or "bad_alloc" as necessary to form the exception specification.
+ // However, we do not make these implicit declarations visible to name
+ // lookup.
+ if (!StdBadAlloc && !getLangOpts().CPlusPlus11) {
+ // The "std::bad_alloc" class has not yet been declared, so build it
+ // implicitly.
+ StdBadAlloc = CXXRecordDecl::Create(Context, TTK_Class,
+ getOrCreateStdNamespace(),
+ SourceLocation(), SourceLocation(),
+ &PP.getIdentifierTable().get("bad_alloc"),
+ nullptr);
+ getStdBadAlloc()->setImplicit(true);
+ }
+
+ GlobalNewDeleteDeclared = true;
+
+ QualType VoidPtr = Context.getPointerType(Context.VoidTy);
+ QualType SizeT = Context.getSizeType();
+ bool AssumeSaneOperatorNew = getLangOpts().AssumeSaneOperatorNew;
+
+ DeclareGlobalAllocationFunction(
+ Context.DeclarationNames.getCXXOperatorName(OO_New),
+ VoidPtr, SizeT, QualType(), AssumeSaneOperatorNew);
+ DeclareGlobalAllocationFunction(
+ Context.DeclarationNames.getCXXOperatorName(OO_Array_New),
+ VoidPtr, SizeT, QualType(), AssumeSaneOperatorNew);
+ DeclareGlobalAllocationFunction(
+ Context.DeclarationNames.getCXXOperatorName(OO_Delete),
+ Context.VoidTy, VoidPtr);
+ DeclareGlobalAllocationFunction(
+ Context.DeclarationNames.getCXXOperatorName(OO_Array_Delete),
+ Context.VoidTy, VoidPtr);
+ if (getLangOpts().SizedDeallocation) {
+ DeclareGlobalAllocationFunction(
+ Context.DeclarationNames.getCXXOperatorName(OO_Delete),
+ Context.VoidTy, VoidPtr, Context.getSizeType());
+ DeclareGlobalAllocationFunction(
+ Context.DeclarationNames.getCXXOperatorName(OO_Array_Delete),
+ Context.VoidTy, VoidPtr, Context.getSizeType());
+ }
+}
+
+/// DeclareGlobalAllocationFunction - Declares a single implicit global
+/// allocation function if it doesn't already exist.
+void Sema::DeclareGlobalAllocationFunction(DeclarationName Name,
+ QualType Return,
+ QualType Param1, QualType Param2,
+ bool AddRestrictAttr) {
+ DeclContext *GlobalCtx = Context.getTranslationUnitDecl();
+ unsigned NumParams = Param2.isNull() ? 1 : 2;
+
+ // Check if this function is already declared.
+ DeclContext::lookup_result R = GlobalCtx->lookup(Name);
+ for (DeclContext::lookup_iterator Alloc = R.begin(), AllocEnd = R.end();
+ Alloc != AllocEnd; ++Alloc) {
+ // Only look at non-template functions, as it is the predefined,
+ // non-templated allocation function we are trying to declare here.
+ if (FunctionDecl *Func = dyn_cast<FunctionDecl>(*Alloc)) {
+ if (Func->getNumParams() == NumParams) {
+ QualType InitialParam1Type =
+ Context.getCanonicalType(Func->getParamDecl(0)
+ ->getType().getUnqualifiedType());
+ QualType InitialParam2Type =
+ NumParams == 2
+ ? Context.getCanonicalType(Func->getParamDecl(1)
+ ->getType().getUnqualifiedType())
+ : QualType();
+ // FIXME: Do we need to check for default arguments here?
+ if (InitialParam1Type == Param1 &&
+ (NumParams == 1 || InitialParam2Type == Param2)) {
+ if (AddRestrictAttr && !Func->hasAttr<RestrictAttr>())
+ Func->addAttr(RestrictAttr::CreateImplicit(
+ Context, RestrictAttr::GNU_malloc));
+ // Make the function visible to name lookup, even if we found it in
+ // an unimported module. It either is an implicitly-declared global
+ // allocation function, or is suppressing that function.
+ Func->setHidden(false);
+ return;
+ }
+ }
+ }
+ }
+
+ FunctionProtoType::ExtProtoInfo EPI;
+
+ QualType BadAllocType;
+ bool HasBadAllocExceptionSpec
+ = (Name.getCXXOverloadedOperator() == OO_New ||
+ Name.getCXXOverloadedOperator() == OO_Array_New);
+ if (HasBadAllocExceptionSpec) {
+ if (!getLangOpts().CPlusPlus11) {
+ BadAllocType = Context.getTypeDeclType(getStdBadAlloc());
+ assert(StdBadAlloc && "Must have std::bad_alloc declared");
+ EPI.ExceptionSpec.Type = EST_Dynamic;
+ EPI.ExceptionSpec.Exceptions = llvm::makeArrayRef(BadAllocType);
+ }
+ } else {
+ EPI.ExceptionSpec =
+ getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone;
+ }
+
+ QualType Params[] = { Param1, Param2 };
+
+ QualType FnType = Context.getFunctionType(
+ Return, llvm::makeArrayRef(Params, NumParams), EPI);
+ FunctionDecl *Alloc =
+ FunctionDecl::Create(Context, GlobalCtx, SourceLocation(),
+ SourceLocation(), Name,
+ FnType, /*TInfo=*/nullptr, SC_None, false, true);
+ Alloc->setImplicit();
+
+ // Implicit sized deallocation functions always have default visibility.
+ Alloc->addAttr(VisibilityAttr::CreateImplicit(Context,
+ VisibilityAttr::Default));
+
+ if (AddRestrictAttr)
+ Alloc->addAttr(
+ RestrictAttr::CreateImplicit(Context, RestrictAttr::GNU_malloc));
+
+ ParmVarDecl *ParamDecls[2];
+ for (unsigned I = 0; I != NumParams; ++I) {
+ ParamDecls[I] = ParmVarDecl::Create(Context, Alloc, SourceLocation(),
+ SourceLocation(), nullptr,
+ Params[I], /*TInfo=*/nullptr,
+ SC_None, nullptr);
+ ParamDecls[I]->setImplicit();
+ }
+ Alloc->setParams(llvm::makeArrayRef(ParamDecls, NumParams));
+
+ Context.getTranslationUnitDecl()->addDecl(Alloc);
+ IdResolver.tryAddTopLevelDecl(Alloc, Name);
+}
+
+FunctionDecl *Sema::FindUsualDeallocationFunction(SourceLocation StartLoc,
+ bool CanProvideSize,
+ DeclarationName Name) {
+ DeclareGlobalNewDelete();
+
+ LookupResult FoundDelete(*this, Name, StartLoc, LookupOrdinaryName);
+ LookupQualifiedName(FoundDelete, Context.getTranslationUnitDecl());
+
+ // C++ [expr.new]p20:
+ // [...] Any non-placement deallocation function matches a
+ // non-placement allocation function. [...]
+ llvm::SmallVector<FunctionDecl*, 2> Matches;
+ for (LookupResult::iterator D = FoundDelete.begin(),
+ DEnd = FoundDelete.end();
+ D != DEnd; ++D) {
+ if (FunctionDecl *Fn = dyn_cast<FunctionDecl>(*D))
+ if (isNonPlacementDeallocationFunction(*this, Fn))
+ Matches.push_back(Fn);
+ }
+
+ // C++1y [expr.delete]p?:
+ // If the type is complete and deallocation function lookup finds both a
+ // usual deallocation function with only a pointer parameter and a usual
+ // deallocation function with both a pointer parameter and a size
+ // parameter, then the selected deallocation function shall be the one
+ // with two parameters. Otherwise, the selected deallocation function
+ // shall be the function with one parameter.
+ if (getLangOpts().SizedDeallocation && Matches.size() == 2) {
+ unsigned NumArgs = CanProvideSize ? 2 : 1;
+ if (Matches[0]->getNumParams() != NumArgs)
+ Matches.erase(Matches.begin());
+ else
+ Matches.erase(Matches.begin() + 1);
+ assert(Matches[0]->getNumParams() == NumArgs &&
+ "found an unexpected usual deallocation function");
+ }
+
+ if (getLangOpts().CUDA && getLangOpts().CUDATargetOverloads)
+ EraseUnwantedCUDAMatches(dyn_cast<FunctionDecl>(CurContext), Matches);
+
+ assert(Matches.size() == 1 &&
+ "unexpectedly have multiple usual deallocation functions");
+ return Matches.front();
+}
+
+bool Sema::FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
+ DeclarationName Name,
+ FunctionDecl* &Operator, bool Diagnose) {
+ LookupResult Found(*this, Name, StartLoc, LookupOrdinaryName);
+ // Try to find operator delete/operator delete[] in class scope.
+ LookupQualifiedName(Found, RD);
+
+ if (Found.isAmbiguous())
+ return true;
+
+ Found.suppressDiagnostics();
+
+ SmallVector<DeclAccessPair,4> Matches;
+ for (LookupResult::iterator F = Found.begin(), FEnd = Found.end();
+ F != FEnd; ++F) {
+ NamedDecl *ND = (*F)->getUnderlyingDecl();
+
+ // Ignore template operator delete members from the check for a usual
+ // deallocation function.
+ if (isa<FunctionTemplateDecl>(ND))
+ continue;
+
+ if (cast<CXXMethodDecl>(ND)->isUsualDeallocationFunction())
+ Matches.push_back(F.getPair());
+ }
+
+ if (getLangOpts().CUDA && getLangOpts().CUDATargetOverloads)
+ EraseUnwantedCUDAMatches(dyn_cast<FunctionDecl>(CurContext), Matches);
+
+ // There's exactly one suitable operator; pick it.
+ if (Matches.size() == 1) {
+ Operator = cast<CXXMethodDecl>(Matches[0]->getUnderlyingDecl());
+
+ if (Operator->isDeleted()) {
+ if (Diagnose) {
+ Diag(StartLoc, diag::err_deleted_function_use);
+ NoteDeletedFunction(Operator);
+ }
+ return true;
+ }
+
+ if (CheckAllocationAccess(StartLoc, SourceRange(), Found.getNamingClass(),
+ Matches[0], Diagnose) == AR_inaccessible)
+ return true;
+
+ return false;
+
+ // We found multiple suitable operators; complain about the ambiguity.
+ } else if (!Matches.empty()) {
+ if (Diagnose) {
+ Diag(StartLoc, diag::err_ambiguous_suitable_delete_member_function_found)
+ << Name << RD;
+
+ for (SmallVectorImpl<DeclAccessPair>::iterator
+ F = Matches.begin(), FEnd = Matches.end(); F != FEnd; ++F)
+ Diag((*F)->getUnderlyingDecl()->getLocation(),
+ diag::note_member_declared_here) << Name;
+ }
+ return true;
+ }
+
+ // We did find operator delete/operator delete[] declarations, but
+ // none of them were suitable.
+ if (!Found.empty()) {
+ if (Diagnose) {
+ Diag(StartLoc, diag::err_no_suitable_delete_member_function_found)
+ << Name << RD;
+
+ for (LookupResult::iterator F = Found.begin(), FEnd = Found.end();
+ F != FEnd; ++F)
+ Diag((*F)->getUnderlyingDecl()->getLocation(),
+ diag::note_member_declared_here) << Name;
+ }
+ return true;
+ }
+
+ Operator = nullptr;
+ return false;
+}
+
+namespace {
+/// \brief Checks whether delete-expression, and new-expression used for
+/// initializing deletee have the same array form.
+class MismatchingNewDeleteDetector {
+public:
+ enum MismatchResult {
+ /// Indicates that there is no mismatch or a mismatch cannot be proven.
+ NoMismatch,
+ /// Indicates that variable is initialized with mismatching form of \a new.
+ VarInitMismatches,
+ /// Indicates that member is initialized with mismatching form of \a new.
+ MemberInitMismatches,
+ /// Indicates that 1 or more constructors' definitions could not been
+ /// analyzed, and they will be checked again at the end of translation unit.
+ AnalyzeLater
+ };
+
+ /// \param EndOfTU True, if this is the final analysis at the end of
+ /// translation unit. False, if this is the initial analysis at the point
+ /// delete-expression was encountered.
+ explicit MismatchingNewDeleteDetector(bool EndOfTU)
+ : IsArrayForm(false), Field(nullptr), EndOfTU(EndOfTU),
+ HasUndefinedConstructors(false) {}
+
+ /// \brief Checks whether pointee of a delete-expression is initialized with
+ /// matching form of new-expression.
+ ///
+ /// If return value is \c VarInitMismatches or \c MemberInitMismatches at the
+ /// point where delete-expression is encountered, then a warning will be
+ /// issued immediately. If return value is \c AnalyzeLater at the point where
+ /// delete-expression is seen, then member will be analyzed at the end of
+ /// translation unit. \c AnalyzeLater is returned iff at least one constructor
+ /// couldn't be analyzed. If at least one constructor initializes the member
+ /// with matching type of new, the return value is \c NoMismatch.
+ MismatchResult analyzeDeleteExpr(const CXXDeleteExpr *DE);
+ /// \brief Analyzes a class member.
+ /// \param Field Class member to analyze.
+ /// \param DeleteWasArrayForm Array form-ness of the delete-expression used
+ /// for deleting the \p Field.
+ MismatchResult analyzeField(FieldDecl *Field, bool DeleteWasArrayForm);
+ /// List of mismatching new-expressions used for initialization of the pointee
+ llvm::SmallVector<const CXXNewExpr *, 4> NewExprs;
+ /// Indicates whether delete-expression was in array form.
+ bool IsArrayForm;
+ FieldDecl *Field;
+
+private:
+ const bool EndOfTU;
+ /// \brief Indicates that there is at least one constructor without body.
+ bool HasUndefinedConstructors;
+ /// \brief Returns \c CXXNewExpr from given initialization expression.
+ /// \param E Expression used for initializing pointee in delete-expression.
+ /// E can be a single-element \c InitListExpr consisting of new-expression.
+ const CXXNewExpr *getNewExprFromInitListOrExpr(const Expr *E);
+ /// \brief Returns whether member is initialized with mismatching form of
+ /// \c new either by the member initializer or in-class initialization.
+ ///
+ /// If bodies of all constructors are not visible at the end of translation
+ /// unit or at least one constructor initializes member with the matching
+ /// form of \c new, mismatch cannot be proven, and this function will return
+ /// \c NoMismatch.
+ MismatchResult analyzeMemberExpr(const MemberExpr *ME);
+ /// \brief Returns whether variable is initialized with mismatching form of
+ /// \c new.
+ ///
+ /// If variable is initialized with matching form of \c new or variable is not
+ /// initialized with a \c new expression, this function will return true.
+ /// If variable is initialized with mismatching form of \c new, returns false.
+ /// \param D Variable to analyze.
+ bool hasMatchingVarInit(const DeclRefExpr *D);
+ /// \brief Checks whether the constructor initializes pointee with mismatching
+ /// form of \c new.
+ ///
+ /// Returns true, if member is initialized with matching form of \c new in
+ /// member initializer list. Returns false, if member is initialized with the
+ /// matching form of \c new in this constructor's initializer or given
+ /// constructor isn't defined at the point where delete-expression is seen, or
+ /// member isn't initialized by the constructor.
+ bool hasMatchingNewInCtor(const CXXConstructorDecl *CD);
+ /// \brief Checks whether member is initialized with matching form of
+ /// \c new in member initializer list.
+ bool hasMatchingNewInCtorInit(const CXXCtorInitializer *CI);
+ /// Checks whether member is initialized with mismatching form of \c new by
+ /// in-class initializer.
+ MismatchResult analyzeInClassInitializer();
+};
+}
+
+MismatchingNewDeleteDetector::MismatchResult
+MismatchingNewDeleteDetector::analyzeDeleteExpr(const CXXDeleteExpr *DE) {
+ NewExprs.clear();
+ assert(DE && "Expected delete-expression");
+ IsArrayForm = DE->isArrayForm();
+ const Expr *E = DE->getArgument()->IgnoreParenImpCasts();
+ if (const MemberExpr *ME = dyn_cast<const MemberExpr>(E)) {
+ return analyzeMemberExpr(ME);
+ } else if (const DeclRefExpr *D = dyn_cast<const DeclRefExpr>(E)) {
+ if (!hasMatchingVarInit(D))
+ return VarInitMismatches;
+ }
+ return NoMismatch;
+}
+
+const CXXNewExpr *
+MismatchingNewDeleteDetector::getNewExprFromInitListOrExpr(const Expr *E) {
+ assert(E != nullptr && "Expected a valid initializer expression");
+ E = E->IgnoreParenImpCasts();
+ if (const InitListExpr *ILE = dyn_cast<const InitListExpr>(E)) {
+ if (ILE->getNumInits() == 1)
+ E = dyn_cast<const CXXNewExpr>(ILE->getInit(0)->IgnoreParenImpCasts());
+ }
+
+ return dyn_cast_or_null<const CXXNewExpr>(E);
+}
+
+bool MismatchingNewDeleteDetector::hasMatchingNewInCtorInit(
+ const CXXCtorInitializer *CI) {
+ const CXXNewExpr *NE = nullptr;
+ if (Field == CI->getMember() &&
+ (NE = getNewExprFromInitListOrExpr(CI->getInit()))) {
+ if (NE->isArray() == IsArrayForm)
+ return true;
+ else
+ NewExprs.push_back(NE);
+ }
+ return false;
+}
+
+bool MismatchingNewDeleteDetector::hasMatchingNewInCtor(
+ const CXXConstructorDecl *CD) {
+ if (CD->isImplicit())
+ return false;
+ const FunctionDecl *Definition = CD;
+ if (!CD->isThisDeclarationADefinition() && !CD->isDefined(Definition)) {
+ HasUndefinedConstructors = true;
+ return EndOfTU;
+ }
+ for (const auto *CI : cast<const CXXConstructorDecl>(Definition)->inits()) {
+ if (hasMatchingNewInCtorInit(CI))
+ return true;
+ }
+ return false;
+}
+
+MismatchingNewDeleteDetector::MismatchResult
+MismatchingNewDeleteDetector::analyzeInClassInitializer() {
+ assert(Field != nullptr && "This should be called only for members");
+ const Expr *InitExpr = Field->getInClassInitializer();
+ if (!InitExpr)
+ return EndOfTU ? NoMismatch : AnalyzeLater;
+ if (const CXXNewExpr *NE = getNewExprFromInitListOrExpr(InitExpr)) {
+ if (NE->isArray() != IsArrayForm) {
+ NewExprs.push_back(NE);
+ return MemberInitMismatches;
+ }
+ }
+ return NoMismatch;
+}
+
+MismatchingNewDeleteDetector::MismatchResult
+MismatchingNewDeleteDetector::analyzeField(FieldDecl *Field,
+ bool DeleteWasArrayForm) {
+ assert(Field != nullptr && "Analysis requires a valid class member.");
+ this->Field = Field;
+ IsArrayForm = DeleteWasArrayForm;
+ const CXXRecordDecl *RD = cast<const CXXRecordDecl>(Field->getParent());
+ for (const auto *CD : RD->ctors()) {
+ if (hasMatchingNewInCtor(CD))
+ return NoMismatch;
+ }
+ if (HasUndefinedConstructors)
+ return EndOfTU ? NoMismatch : AnalyzeLater;
+ if (!NewExprs.empty())
+ return MemberInitMismatches;
+ return Field->hasInClassInitializer() ? analyzeInClassInitializer()
+ : NoMismatch;
+}
+
+MismatchingNewDeleteDetector::MismatchResult
+MismatchingNewDeleteDetector::analyzeMemberExpr(const MemberExpr *ME) {
+ assert(ME != nullptr && "Expected a member expression");
+ if (FieldDecl *F = dyn_cast<FieldDecl>(ME->getMemberDecl()))
+ return analyzeField(F, IsArrayForm);
+ return NoMismatch;
+}
+
+bool MismatchingNewDeleteDetector::hasMatchingVarInit(const DeclRefExpr *D) {
+ const CXXNewExpr *NE = nullptr;
+ if (const VarDecl *VD = dyn_cast<const VarDecl>(D->getDecl())) {
+ if (VD->hasInit() && (NE = getNewExprFromInitListOrExpr(VD->getInit())) &&
+ NE->isArray() != IsArrayForm) {
+ NewExprs.push_back(NE);
+ }
+ }
+ return NewExprs.empty();
+}
+
+static void
+DiagnoseMismatchedNewDelete(Sema &SemaRef, SourceLocation DeleteLoc,
+ const MismatchingNewDeleteDetector &Detector) {
+ SourceLocation EndOfDelete = SemaRef.getLocForEndOfToken(DeleteLoc);
+ FixItHint H;
+ if (!Detector.IsArrayForm)
+ H = FixItHint::CreateInsertion(EndOfDelete, "[]");
+ else {
+ SourceLocation RSquare = Lexer::findLocationAfterToken(
+ DeleteLoc, tok::l_square, SemaRef.getSourceManager(),
+ SemaRef.getLangOpts(), true);
+ if (RSquare.isValid())
+ H = FixItHint::CreateRemoval(SourceRange(EndOfDelete, RSquare));
+ }
+ SemaRef.Diag(DeleteLoc, diag::warn_mismatched_delete_new)
+ << Detector.IsArrayForm << H;
+
+ for (const auto *NE : Detector.NewExprs)
+ SemaRef.Diag(NE->getExprLoc(), diag::note_allocated_here)
+ << Detector.IsArrayForm;
+}
+
+void Sema::AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE) {
+ if (Diags.isIgnored(diag::warn_mismatched_delete_new, SourceLocation()))
+ return;
+ MismatchingNewDeleteDetector Detector(/*EndOfTU=*/false);
+ switch (Detector.analyzeDeleteExpr(DE)) {
+ case MismatchingNewDeleteDetector::VarInitMismatches:
+ case MismatchingNewDeleteDetector::MemberInitMismatches: {
+ DiagnoseMismatchedNewDelete(*this, DE->getLocStart(), Detector);
+ break;
+ }
+ case MismatchingNewDeleteDetector::AnalyzeLater: {
+ DeleteExprs[Detector.Field].push_back(
+ std::make_pair(DE->getLocStart(), DE->isArrayForm()));
+ break;
+ }
+ case MismatchingNewDeleteDetector::NoMismatch:
+ break;
+ }
+}
+
+void Sema::AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
+ bool DeleteWasArrayForm) {
+ MismatchingNewDeleteDetector Detector(/*EndOfTU=*/true);
+ switch (Detector.analyzeField(Field, DeleteWasArrayForm)) {
+ case MismatchingNewDeleteDetector::VarInitMismatches:
+ llvm_unreachable("This analysis should have been done for class members.");
+ case MismatchingNewDeleteDetector::AnalyzeLater:
+ llvm_unreachable("Analysis cannot be postponed any point beyond end of "
+ "translation unit.");
+ case MismatchingNewDeleteDetector::MemberInitMismatches:
+ DiagnoseMismatchedNewDelete(*this, DeleteLoc, Detector);
+ break;
+ case MismatchingNewDeleteDetector::NoMismatch:
+ break;
+ }
+}
+
+/// ActOnCXXDelete - Parsed a C++ 'delete' expression (C++ 5.3.5), as in:
+/// @code ::delete ptr; @endcode
+/// or
+/// @code delete [] ptr; @endcode
+ExprResult
+Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
+ bool ArrayForm, Expr *ExE) {
+ // C++ [expr.delete]p1:
+ // The operand shall have a pointer type, or a class type having a single
+ // non-explicit conversion function to a pointer type. The result has type
+ // void.
+ //
+ // DR599 amends "pointer type" to "pointer to object type" in both cases.
+
+ ExprResult Ex = ExE;
+ FunctionDecl *OperatorDelete = nullptr;
+ bool ArrayFormAsWritten = ArrayForm;
+ bool UsualArrayDeleteWantsSize = false;
+
+ if (!Ex.get()->isTypeDependent()) {
+ // Perform lvalue-to-rvalue cast, if needed.
+ Ex = DefaultLvalueConversion(Ex.get());
+ if (Ex.isInvalid())
+ return ExprError();
+
+ QualType Type = Ex.get()->getType();
+
+ class DeleteConverter : public ContextualImplicitConverter {
+ public:
+ DeleteConverter() : ContextualImplicitConverter(false, true) {}
+
+ bool match(QualType ConvType) override {
+ // FIXME: If we have an operator T* and an operator void*, we must pick
+ // the operator T*.
+ if (const PointerType *ConvPtrType = ConvType->getAs<PointerType>())
+ if (ConvPtrType->getPointeeType()->isIncompleteOrObjectType())
+ return true;
+ return false;
+ }
+
+ SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc,
+ QualType T) override {
+ return S.Diag(Loc, diag::err_delete_operand) << T;
+ }
+
+ SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc,
+ QualType T) override {
+ return S.Diag(Loc, diag::err_delete_incomplete_class_type) << T;
+ }
+
+ SemaDiagnosticBuilder diagnoseExplicitConv(Sema &S, SourceLocation Loc,
+ QualType T,
+ QualType ConvTy) override {
+ return S.Diag(Loc, diag::err_delete_explicit_conversion) << T << ConvTy;
+ }
+
+ SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv,
+ QualType ConvTy) override {
+ return S.Diag(Conv->getLocation(), diag::note_delete_conversion)
+ << ConvTy;
+ }
+
+ SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc,
+ QualType T) override {
+ return S.Diag(Loc, diag::err_ambiguous_delete_operand) << T;
+ }
+
+ SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv,
+ QualType ConvTy) override {
+ return S.Diag(Conv->getLocation(), diag::note_delete_conversion)
+ << ConvTy;
+ }
+
+ SemaDiagnosticBuilder diagnoseConversion(Sema &S, SourceLocation Loc,
+ QualType T,
+ QualType ConvTy) override {
+ llvm_unreachable("conversion functions are permitted");
+ }
+ } Converter;
+
+ Ex = PerformContextualImplicitConversion(StartLoc, Ex.get(), Converter);
+ if (Ex.isInvalid())
+ return ExprError();
+ Type = Ex.get()->getType();
+ if (!Converter.match(Type))
+ // FIXME: PerformContextualImplicitConversion should return ExprError
+ // itself in this case.
+ return ExprError();
+
+ QualType Pointee = Type->getAs<PointerType>()->getPointeeType();
+ QualType PointeeElem = Context.getBaseElementType(Pointee);
+
+ if (unsigned AddressSpace = Pointee.getAddressSpace())
+ return Diag(Ex.get()->getLocStart(),
+ diag::err_address_space_qualified_delete)
+ << Pointee.getUnqualifiedType() << AddressSpace;
+
+ CXXRecordDecl *PointeeRD = nullptr;
+ if (Pointee->isVoidType() && !isSFINAEContext()) {
+ // The C++ standard bans deleting a pointer to a non-object type, which
+ // effectively bans deletion of "void*". However, most compilers support
+ // this, so we treat it as a warning unless we're in a SFINAE context.
+ Diag(StartLoc, diag::ext_delete_void_ptr_operand)
+ << Type << Ex.get()->getSourceRange();
+ } else if (Pointee->isFunctionType() || Pointee->isVoidType()) {
+ return ExprError(Diag(StartLoc, diag::err_delete_operand)
+ << Type << Ex.get()->getSourceRange());
+ } else if (!Pointee->isDependentType()) {
+ // FIXME: This can result in errors if the definition was imported from a
+ // module but is hidden.
+ if (!RequireCompleteType(StartLoc, Pointee,
+ diag::warn_delete_incomplete, Ex.get())) {
+ if (const RecordType *RT = PointeeElem->getAs<RecordType>())
+ PointeeRD = cast<CXXRecordDecl>(RT->getDecl());
+ }
+ }
+
+ if (Pointee->isArrayType() && !ArrayForm) {
+ Diag(StartLoc, diag::warn_delete_array_type)
+ << Type << Ex.get()->getSourceRange()
+ << FixItHint::CreateInsertion(getLocForEndOfToken(StartLoc), "[]");
+ ArrayForm = true;
+ }
+
+ DeclarationName DeleteName = Context.DeclarationNames.getCXXOperatorName(
+ ArrayForm ? OO_Array_Delete : OO_Delete);
+
+ if (PointeeRD) {
+ if (!UseGlobal &&
+ FindDeallocationFunction(StartLoc, PointeeRD, DeleteName,
+ OperatorDelete))
+ return ExprError();
+
+ // If we're allocating an array of records, check whether the
+ // usual operator delete[] has a size_t parameter.
+ if (ArrayForm) {
+ // If the user specifically asked to use the global allocator,
+ // we'll need to do the lookup into the class.
+ if (UseGlobal)
+ UsualArrayDeleteWantsSize =
+ doesUsualArrayDeleteWantSize(*this, StartLoc, PointeeElem);
+
+ // Otherwise, the usual operator delete[] should be the
+ // function we just found.
+ else if (OperatorDelete && isa<CXXMethodDecl>(OperatorDelete))
+ UsualArrayDeleteWantsSize = (OperatorDelete->getNumParams() == 2);
+ }
+
+ if (!PointeeRD->hasIrrelevantDestructor())
+ if (CXXDestructorDecl *Dtor = LookupDestructor(PointeeRD)) {
+ MarkFunctionReferenced(StartLoc,
+ const_cast<CXXDestructorDecl*>(Dtor));
+ if (DiagnoseUseOfDecl(Dtor, StartLoc))
+ return ExprError();
+ }
+
+ // C++ [expr.delete]p3:
+ // In the first alternative (delete object), if the static type of the
+ // object to be deleted is different from its dynamic type, the static
+ // type shall be a base class of the dynamic type of the object to be
+ // deleted and the static type shall have a virtual destructor or the
+ // behavior is undefined.
+ //
+ // Note: a final class cannot be derived from, no issue there
+ if (PointeeRD->isPolymorphic() && !PointeeRD->hasAttr<FinalAttr>()) {
+ CXXDestructorDecl *dtor = PointeeRD->getDestructor();
+ if (dtor && !dtor->isVirtual()) {
+ if (PointeeRD->isAbstract()) {
+ // If the class is abstract, we warn by default, because we're
+ // sure the code has undefined behavior.
+ Diag(StartLoc, diag::warn_delete_abstract_non_virtual_dtor)
+ << PointeeElem;
+ } else if (!ArrayForm) {
+ // Otherwise, if this is not an array delete, it's a bit suspect,
+ // but not necessarily wrong.
+ Diag(StartLoc, diag::warn_delete_non_virtual_dtor) << PointeeElem;
+ }
+ }
+ }
+
+ }
+
+ if (!OperatorDelete)
+ // Look for a global declaration.
+ OperatorDelete = FindUsualDeallocationFunction(
+ StartLoc, isCompleteType(StartLoc, Pointee) &&
+ (!ArrayForm || UsualArrayDeleteWantsSize ||
+ Pointee.isDestructedType()),
+ DeleteName);
+
+ MarkFunctionReferenced(StartLoc, OperatorDelete);
+
+ // Check access and ambiguity of operator delete and destructor.
+ if (PointeeRD) {
+ if (CXXDestructorDecl *Dtor = LookupDestructor(PointeeRD)) {
+ CheckDestructorAccess(Ex.get()->getExprLoc(), Dtor,
+ PDiag(diag::err_access_dtor) << PointeeElem);
+ }
+ }
+ }
+
+ CXXDeleteExpr *Result = new (Context) CXXDeleteExpr(
+ Context.VoidTy, UseGlobal, ArrayForm, ArrayFormAsWritten,
+ UsualArrayDeleteWantsSize, OperatorDelete, Ex.get(), StartLoc);
+ AnalyzeDeleteExprMismatch(Result);
+ return Result;
+}
+
+/// \brief Check the use of the given variable as a C++ condition in an if,
+/// while, do-while, or switch statement.
+ExprResult Sema::CheckConditionVariable(VarDecl *ConditionVar,
+ SourceLocation StmtLoc,
+ bool ConvertToBoolean) {
+ if (ConditionVar->isInvalidDecl())
+ return ExprError();
+
+ QualType T = ConditionVar->getType();
+
+ // C++ [stmt.select]p2:
+ // The declarator shall not specify a function or an array.
+ if (T->isFunctionType())
+ return ExprError(Diag(ConditionVar->getLocation(),
+ diag::err_invalid_use_of_function_type)
+ << ConditionVar->getSourceRange());
+ else if (T->isArrayType())
+ return ExprError(Diag(ConditionVar->getLocation(),
+ diag::err_invalid_use_of_array_type)
+ << ConditionVar->getSourceRange());
+
+ ExprResult Condition = DeclRefExpr::Create(
+ Context, NestedNameSpecifierLoc(), SourceLocation(), ConditionVar,
+ /*enclosing*/ false, ConditionVar->getLocation(),
+ ConditionVar->getType().getNonReferenceType(), VK_LValue);
+
+ MarkDeclRefReferenced(cast<DeclRefExpr>(Condition.get()));
+
+ if (ConvertToBoolean) {
+ Condition = CheckBooleanCondition(Condition.get(), StmtLoc);
+ if (Condition.isInvalid())
+ return ExprError();
+ }
+
+ return Condition;
+}
+
+/// CheckCXXBooleanCondition - Returns true if a conversion to bool is invalid.
+ExprResult Sema::CheckCXXBooleanCondition(Expr *CondExpr) {
+ // C++ 6.4p4:
+ // The value of a condition that is an initialized declaration in a statement
+ // other than a switch statement is the value of the declared variable
+ // implicitly converted to type bool. If that conversion is ill-formed, the
+ // program is ill-formed.
+ // The value of a condition that is an expression is the value of the
+ // expression, implicitly converted to bool.
+ //
+ return PerformContextuallyConvertToBool(CondExpr);
+}
+
+/// Helper function to determine whether this is the (deprecated) C++
+/// conversion from a string literal to a pointer to non-const char or
+/// non-const wchar_t (for narrow and wide string literals,
+/// respectively).
+bool
+Sema::IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType) {
+ // Look inside the implicit cast, if it exists.
+ if (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(From))
+ From = Cast->getSubExpr();
+
+ // A string literal (2.13.4) that is not a wide string literal can
+ // be converted to an rvalue of type "pointer to char"; a wide
+ // string literal can be converted to an rvalue of type "pointer
+ // to wchar_t" (C++ 4.2p2).
+ if (StringLiteral *StrLit = dyn_cast<StringLiteral>(From->IgnoreParens()))
+ if (const PointerType *ToPtrType = ToType->getAs<PointerType>())
+ if (const BuiltinType *ToPointeeType
+ = ToPtrType->getPointeeType()->getAs<BuiltinType>()) {
+ // This conversion is considered only when there is an
+ // explicit appropriate pointer target type (C++ 4.2p2).
+ if (!ToPtrType->getPointeeType().hasQualifiers()) {
+ switch (StrLit->getKind()) {
+ case StringLiteral::UTF8:
+ case StringLiteral::UTF16:
+ case StringLiteral::UTF32:
+ // We don't allow UTF literals to be implicitly converted
+ break;
+ case StringLiteral::Ascii:
+ return (ToPointeeType->getKind() == BuiltinType::Char_U ||
+ ToPointeeType->getKind() == BuiltinType::Char_S);
+ case StringLiteral::Wide:
+ return ToPointeeType->isWideCharType();
+ }
+ }
+ }
+
+ return false;
+}
+
+static ExprResult BuildCXXCastArgument(Sema &S,
+ SourceLocation CastLoc,
+ QualType Ty,
+ CastKind Kind,
+ CXXMethodDecl *Method,
+ DeclAccessPair FoundDecl,
+ bool HadMultipleCandidates,
+ Expr *From) {
+ switch (Kind) {
+ default: llvm_unreachable("Unhandled cast kind!");
+ case CK_ConstructorConversion: {
+ CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(Method);
+ SmallVector<Expr*, 8> ConstructorArgs;
+
+ if (S.RequireNonAbstractType(CastLoc, Ty,
+ diag::err_allocation_of_abstract_type))
+ return ExprError();
+
+ if (S.CompleteConstructorCall(Constructor, From, CastLoc, ConstructorArgs))
+ return ExprError();
+
+ S.CheckConstructorAccess(CastLoc, Constructor,
+ InitializedEntity::InitializeTemporary(Ty),
+ Constructor->getAccess());
+ if (S.DiagnoseUseOfDecl(Method, CastLoc))
+ return ExprError();
+
+ ExprResult Result = S.BuildCXXConstructExpr(
+ CastLoc, Ty, cast<CXXConstructorDecl>(Method),
+ ConstructorArgs, HadMultipleCandidates,
+ /*ListInit*/ false, /*StdInitListInit*/ false, /*ZeroInit*/ false,
+ CXXConstructExpr::CK_Complete, SourceRange());
+ if (Result.isInvalid())
+ return ExprError();
+
+ return S.MaybeBindToTemporary(Result.getAs<Expr>());
+ }
+
+ case CK_UserDefinedConversion: {
+ assert(!From->getType()->isPointerType() && "Arg can't have pointer type!");
+
+ S.CheckMemberOperatorAccess(CastLoc, From, /*arg*/ nullptr, FoundDecl);
+ if (S.DiagnoseUseOfDecl(Method, CastLoc))
+ return ExprError();
+
+ // Create an implicit call expr that calls it.
+ CXXConversionDecl *Conv = cast<CXXConversionDecl>(Method);
+ ExprResult Result = S.BuildCXXMemberCallExpr(From, FoundDecl, Conv,
+ HadMultipleCandidates);
+ if (Result.isInvalid())
+ return ExprError();
+ // Record usage of conversion in an implicit cast.
+ Result = ImplicitCastExpr::Create(S.Context, Result.get()->getType(),
+ CK_UserDefinedConversion, Result.get(),
+ nullptr, Result.get()->getValueKind());
+
+ return S.MaybeBindToTemporary(Result.get());
+ }
+ }
+}
+
+/// PerformImplicitConversion - Perform an implicit conversion of the
+/// expression From to the type ToType using the pre-computed implicit
+/// conversion sequence ICS. Returns the converted
+/// expression. Action is the kind of conversion we're performing,
+/// used in the error message.
+ExprResult
+Sema::PerformImplicitConversion(Expr *From, QualType ToType,
+ const ImplicitConversionSequence &ICS,
+ AssignmentAction Action,
+ CheckedConversionKind CCK) {
+ switch (ICS.getKind()) {
+ case ImplicitConversionSequence::StandardConversion: {
+ ExprResult Res = PerformImplicitConversion(From, ToType, ICS.Standard,
+ Action, CCK);
+ if (Res.isInvalid())
+ return ExprError();
+ From = Res.get();
+ break;
+ }
+
+ case ImplicitConversionSequence::UserDefinedConversion: {
+
+ FunctionDecl *FD = ICS.UserDefined.ConversionFunction;
+ CastKind CastKind;
+ QualType BeforeToType;
+ assert(FD && "no conversion function for user-defined conversion seq");
+ if (const CXXConversionDecl *Conv = dyn_cast<CXXConversionDecl>(FD)) {
+ CastKind = CK_UserDefinedConversion;
+
+ // If the user-defined conversion is specified by a conversion function,
+ // the initial standard conversion sequence converts the source type to
+ // the implicit object parameter of the conversion function.
+ BeforeToType = Context.getTagDeclType(Conv->getParent());
+ } else {
+ const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(FD);
+ CastKind = CK_ConstructorConversion;
+ // Do no conversion if dealing with ... for the first conversion.
+ if (!ICS.UserDefined.EllipsisConversion) {
+ // If the user-defined conversion is specified by a constructor, the
+ // initial standard conversion sequence converts the source type to
+ // the type required by the argument of the constructor
+ BeforeToType = Ctor->getParamDecl(0)->getType().getNonReferenceType();
+ }
+ }
+ // Watch out for ellipsis conversion.
+ if (!ICS.UserDefined.EllipsisConversion) {
+ ExprResult Res =
+ PerformImplicitConversion(From, BeforeToType,
+ ICS.UserDefined.Before, AA_Converting,
+ CCK);
+ if (Res.isInvalid())
+ return ExprError();
+ From = Res.get();
+ }
+
+ ExprResult CastArg
+ = BuildCXXCastArgument(*this,
+ From->getLocStart(),
+ ToType.getNonReferenceType(),
+ CastKind, cast<CXXMethodDecl>(FD),
+ ICS.UserDefined.FoundConversionFunction,
+ ICS.UserDefined.HadMultipleCandidates,
+ From);
+
+ if (CastArg.isInvalid())
+ return ExprError();
+
+ From = CastArg.get();
+
+ return PerformImplicitConversion(From, ToType, ICS.UserDefined.After,
+ AA_Converting, CCK);
+ }
+
+ case ImplicitConversionSequence::AmbiguousConversion:
+ ICS.DiagnoseAmbiguousConversion(*this, From->getExprLoc(),
+ PDiag(diag::err_typecheck_ambiguous_condition)
+ << From->getSourceRange());
+ return ExprError();
+
+ case ImplicitConversionSequence::EllipsisConversion:
+ llvm_unreachable("Cannot perform an ellipsis conversion");
+
+ case ImplicitConversionSequence::BadConversion:
+ return ExprError();
+ }
+
+ // Everything went well.
+ return From;
+}
+
+/// PerformImplicitConversion - Perform an implicit conversion of the
+/// expression From to the type ToType by following the standard
+/// conversion sequence SCS. Returns the converted
+/// expression. Flavor is the context in which we're performing this
+/// conversion, for use in error messages.
+ExprResult
+Sema::PerformImplicitConversion(Expr *From, QualType ToType,
+ const StandardConversionSequence& SCS,
+ AssignmentAction Action,
+ CheckedConversionKind CCK) {
+ bool CStyle = (CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast);
+
+ // Overall FIXME: we are recomputing too many types here and doing far too
+ // much extra work. What this means is that we need to keep track of more
+ // information that is computed when we try the implicit conversion initially,
+ // so that we don't need to recompute anything here.
+ QualType FromType = From->getType();
+
+ if (SCS.CopyConstructor) {
+ // FIXME: When can ToType be a reference type?
+ assert(!ToType->isReferenceType());
+ if (SCS.Second == ICK_Derived_To_Base) {
+ SmallVector<Expr*, 8> ConstructorArgs;
+ if (CompleteConstructorCall(cast<CXXConstructorDecl>(SCS.CopyConstructor),
+ From, /*FIXME:ConstructLoc*/SourceLocation(),
+ ConstructorArgs))
+ return ExprError();
+ return BuildCXXConstructExpr(
+ /*FIXME:ConstructLoc*/ SourceLocation(), ToType, SCS.CopyConstructor,
+ ConstructorArgs, /*HadMultipleCandidates*/ false,
+ /*ListInit*/ false, /*StdInitListInit*/ false, /*ZeroInit*/ false,
+ CXXConstructExpr::CK_Complete, SourceRange());
+ }
+ return BuildCXXConstructExpr(
+ /*FIXME:ConstructLoc*/ SourceLocation(), ToType, SCS.CopyConstructor,
+ From, /*HadMultipleCandidates*/ false,
+ /*ListInit*/ false, /*StdInitListInit*/ false, /*ZeroInit*/ false,
+ CXXConstructExpr::CK_Complete, SourceRange());
+ }
+
+ // Resolve overloaded function references.
+ if (Context.hasSameType(FromType, Context.OverloadTy)) {
+ DeclAccessPair Found;
+ FunctionDecl *Fn = ResolveAddressOfOverloadedFunction(From, ToType,
+ true, Found);
+ if (!Fn)
+ return ExprError();
+
+ if (DiagnoseUseOfDecl(Fn, From->getLocStart()))
+ return ExprError();
+
+ From = FixOverloadedFunctionReference(From, Found, Fn);
+ FromType = From->getType();
+ }
+
+ // If we're converting to an atomic type, first convert to the corresponding
+ // non-atomic type.
+ QualType ToAtomicType;
+ if (const AtomicType *ToAtomic = ToType->getAs<AtomicType>()) {
+ ToAtomicType = ToType;
+ ToType = ToAtomic->getValueType();
+ }
+
+ QualType InitialFromType = FromType;
+ // Perform the first implicit conversion.
+ switch (SCS.First) {
+ case ICK_Identity:
+ if (const AtomicType *FromAtomic = FromType->getAs<AtomicType>()) {
+ FromType = FromAtomic->getValueType().getUnqualifiedType();
+ From = ImplicitCastExpr::Create(Context, FromType, CK_AtomicToNonAtomic,
+ From, /*BasePath=*/nullptr, VK_RValue);
+ }
+ break;
+
+ case ICK_Lvalue_To_Rvalue: {
+ assert(From->getObjectKind() != OK_ObjCProperty);
+ ExprResult FromRes = DefaultLvalueConversion(From);
+ assert(!FromRes.isInvalid() && "Can't perform deduced conversion?!");
+ From = FromRes.get();
+ FromType = From->getType();
+ break;
+ }
+
+ case ICK_Array_To_Pointer:
+ FromType = Context.getArrayDecayedType(FromType);
+ From = ImpCastExprToType(From, FromType, CK_ArrayToPointerDecay,
+ VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ break;
+
+ case ICK_Function_To_Pointer:
+ FromType = Context.getPointerType(FromType);
+ From = ImpCastExprToType(From, FromType, CK_FunctionToPointerDecay,
+ VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ break;
+
+ default:
+ llvm_unreachable("Improper first standard conversion");
+ }
+
+ // Perform the second implicit conversion
+ switch (SCS.Second) {
+ case ICK_Identity:
+ // C++ [except.spec]p5:
+ // [For] assignment to and initialization of pointers to functions,
+ // pointers to member functions, and references to functions: the
+ // target entity shall allow at least the exceptions allowed by the
+ // source value in the assignment or initialization.
+ switch (Action) {
+ case AA_Assigning:
+ case AA_Initializing:
+ // Note, function argument passing and returning are initialization.
+ case AA_Passing:
+ case AA_Returning:
+ case AA_Sending:
+ case AA_Passing_CFAudited:
+ if (CheckExceptionSpecCompatibility(From, ToType))
+ return ExprError();
+ break;
+
+ case AA_Casting:
+ case AA_Converting:
+ // Casts and implicit conversions are not initialization, so are not
+ // checked for exception specification mismatches.
+ break;
+ }
+ // Nothing else to do.
+ break;
+
+ case ICK_NoReturn_Adjustment:
+ // If both sides are functions (or pointers/references to them), there could
+ // be incompatible exception declarations.
+ if (CheckExceptionSpecCompatibility(From, ToType))
+ return ExprError();
+
+ From = ImpCastExprToType(From, ToType, CK_NoOp,
+ VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ break;
+
+ case ICK_Integral_Promotion:
+ case ICK_Integral_Conversion:
+ if (ToType->isBooleanType()) {
+ assert(FromType->castAs<EnumType>()->getDecl()->isFixed() &&
+ SCS.Second == ICK_Integral_Promotion &&
+ "only enums with fixed underlying type can promote to bool");
+ From = ImpCastExprToType(From, ToType, CK_IntegralToBoolean,
+ VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ } else {
+ From = ImpCastExprToType(From, ToType, CK_IntegralCast,
+ VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ }
+ break;
+
+ case ICK_Floating_Promotion:
+ case ICK_Floating_Conversion:
+ From = ImpCastExprToType(From, ToType, CK_FloatingCast,
+ VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ break;
+
+ case ICK_Complex_Promotion:
+ case ICK_Complex_Conversion: {
+ QualType FromEl = From->getType()->getAs<ComplexType>()->getElementType();
+ QualType ToEl = ToType->getAs<ComplexType>()->getElementType();
+ CastKind CK;
+ if (FromEl->isRealFloatingType()) {
+ if (ToEl->isRealFloatingType())
+ CK = CK_FloatingComplexCast;
+ else
+ CK = CK_FloatingComplexToIntegralComplex;
+ } else if (ToEl->isRealFloatingType()) {
+ CK = CK_IntegralComplexToFloatingComplex;
+ } else {
+ CK = CK_IntegralComplexCast;
+ }
+ From = ImpCastExprToType(From, ToType, CK,
+ VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ break;
+ }
+
+ case ICK_Floating_Integral:
+ if (ToType->isRealFloatingType())
+ From = ImpCastExprToType(From, ToType, CK_IntegralToFloating,
+ VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ else
+ From = ImpCastExprToType(From, ToType, CK_FloatingToIntegral,
+ VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ break;
+
+ case ICK_Compatible_Conversion:
+ From = ImpCastExprToType(From, ToType, CK_NoOp,
+ VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ break;
+
+ case ICK_Writeback_Conversion:
+ case ICK_Pointer_Conversion: {
+ if (SCS.IncompatibleObjC && Action != AA_Casting) {
+ // Diagnose incompatible Objective-C conversions
+ if (Action == AA_Initializing || Action == AA_Assigning)
+ Diag(From->getLocStart(),
+ diag::ext_typecheck_convert_incompatible_pointer)
+ << ToType << From->getType() << Action
+ << From->getSourceRange() << 0;
+ else
+ Diag(From->getLocStart(),
+ diag::ext_typecheck_convert_incompatible_pointer)
+ << From->getType() << ToType << Action
+ << From->getSourceRange() << 0;
+
+ if (From->getType()->isObjCObjectPointerType() &&
+ ToType->isObjCObjectPointerType())
+ EmitRelatedResultTypeNote(From);
+ }
+ else if (getLangOpts().ObjCAutoRefCount &&
+ !CheckObjCARCUnavailableWeakConversion(ToType,
+ From->getType())) {
+ if (Action == AA_Initializing)
+ Diag(From->getLocStart(),
+ diag::err_arc_weak_unavailable_assign);
+ else
+ Diag(From->getLocStart(),
+ diag::err_arc_convesion_of_weak_unavailable)
+ << (Action == AA_Casting) << From->getType() << ToType
+ << From->getSourceRange();
+ }
+
+ CastKind Kind = CK_Invalid;
+ CXXCastPath BasePath;
+ if (CheckPointerConversion(From, ToType, Kind, BasePath, CStyle))
+ return ExprError();
+
+ // Make sure we extend blocks if necessary.
+ // FIXME: doing this here is really ugly.
+ if (Kind == CK_BlockPointerToObjCPointerCast) {
+ ExprResult E = From;
+ (void) PrepareCastToObjCObjectPointer(E);
+ From = E.get();
+ }
+ if (getLangOpts().ObjCAutoRefCount)
+ CheckObjCARCConversion(SourceRange(), ToType, From, CCK);
+ From = ImpCastExprToType(From, ToType, Kind, VK_RValue, &BasePath, CCK)
+ .get();
+ break;
+ }
+
+ case ICK_Pointer_Member: {
+ CastKind Kind = CK_Invalid;
+ CXXCastPath BasePath;
+ if (CheckMemberPointerConversion(From, ToType, Kind, BasePath, CStyle))
+ return ExprError();
+ if (CheckExceptionSpecCompatibility(From, ToType))
+ return ExprError();
+
+ // We may not have been able to figure out what this member pointer resolved
+ // to up until this exact point. Attempt to lock-in it's inheritance model.
+ if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+ (void)isCompleteType(From->getExprLoc(), From->getType());
+ (void)isCompleteType(From->getExprLoc(), ToType);
+ }
+
+ From = ImpCastExprToType(From, ToType, Kind, VK_RValue, &BasePath, CCK)
+ .get();
+ break;
+ }
+
+ case ICK_Boolean_Conversion:
+ // Perform half-to-boolean conversion via float.
+ if (From->getType()->isHalfType()) {
+ From = ImpCastExprToType(From, Context.FloatTy, CK_FloatingCast).get();
+ FromType = Context.FloatTy;
+ }
+
+ From = ImpCastExprToType(From, Context.BoolTy,
+ ScalarTypeToBooleanCastKind(FromType),
+ VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ break;
+
+ case ICK_Derived_To_Base: {
+ CXXCastPath BasePath;
+ if (CheckDerivedToBaseConversion(From->getType(),
+ ToType.getNonReferenceType(),
+ From->getLocStart(),
+ From->getSourceRange(),
+ &BasePath,
+ CStyle))
+ return ExprError();
+
+ From = ImpCastExprToType(From, ToType.getNonReferenceType(),
+ CK_DerivedToBase, From->getValueKind(),
+ &BasePath, CCK).get();
+ break;
+ }
+
+ case ICK_Vector_Conversion:
+ From = ImpCastExprToType(From, ToType, CK_BitCast,
+ VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ break;
+
+ case ICK_Vector_Splat:
+ // Vector splat from any arithmetic type to a vector.
+ // Cast to the element type.
+ {
+ QualType elType = ToType->getAs<ExtVectorType>()->getElementType();
+ if (elType != From->getType()) {
+ ExprResult E = From;
+ From = ImpCastExprToType(From, elType,
+ PrepareScalarCast(E, elType)).get();
+ }
+ From = ImpCastExprToType(From, ToType, CK_VectorSplat,
+ VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ }
+ break;
+
+ case ICK_Complex_Real:
+ // Case 1. x -> _Complex y
+ if (const ComplexType *ToComplex = ToType->getAs<ComplexType>()) {
+ QualType ElType = ToComplex->getElementType();
+ bool isFloatingComplex = ElType->isRealFloatingType();
+
+ // x -> y
+ if (Context.hasSameUnqualifiedType(ElType, From->getType())) {
+ // do nothing
+ } else if (From->getType()->isRealFloatingType()) {
+ From = ImpCastExprToType(From, ElType,
+ isFloatingComplex ? CK_FloatingCast : CK_FloatingToIntegral).get();
+ } else {
+ assert(From->getType()->isIntegerType());
+ From = ImpCastExprToType(From, ElType,
+ isFloatingComplex ? CK_IntegralToFloating : CK_IntegralCast).get();
+ }
+ // y -> _Complex y
+ From = ImpCastExprToType(From, ToType,
+ isFloatingComplex ? CK_FloatingRealToComplex
+ : CK_IntegralRealToComplex).get();
+
+ // Case 2. _Complex x -> y
+ } else {
+ const ComplexType *FromComplex = From->getType()->getAs<ComplexType>();
+ assert(FromComplex);
+
+ QualType ElType = FromComplex->getElementType();
+ bool isFloatingComplex = ElType->isRealFloatingType();
+
+ // _Complex x -> x
+ From = ImpCastExprToType(From, ElType,
+ isFloatingComplex ? CK_FloatingComplexToReal
+ : CK_IntegralComplexToReal,
+ VK_RValue, /*BasePath=*/nullptr, CCK).get();
+
+ // x -> y
+ if (Context.hasSameUnqualifiedType(ElType, ToType)) {
+ // do nothing
+ } else if (ToType->isRealFloatingType()) {
+ From = ImpCastExprToType(From, ToType,
+ isFloatingComplex ? CK_FloatingCast : CK_IntegralToFloating,
+ VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ } else {
+ assert(ToType->isIntegerType());
+ From = ImpCastExprToType(From, ToType,
+ isFloatingComplex ? CK_FloatingToIntegral : CK_IntegralCast,
+ VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ }
+ }
+ break;
+
+ case ICK_Block_Pointer_Conversion: {
+ From = ImpCastExprToType(From, ToType.getUnqualifiedType(), CK_BitCast,
+ VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ break;
+ }
+
+ case ICK_TransparentUnionConversion: {
+ ExprResult FromRes = From;
+ Sema::AssignConvertType ConvTy =
+ CheckTransparentUnionArgumentConstraints(ToType, FromRes);
+ if (FromRes.isInvalid())
+ return ExprError();
+ From = FromRes.get();
+ assert ((ConvTy == Sema::Compatible) &&
+ "Improper transparent union conversion");
+ (void)ConvTy;
+ break;
+ }
+
+ case ICK_Zero_Event_Conversion:
+ From = ImpCastExprToType(From, ToType,
+ CK_ZeroToOCLEvent,
+ From->getValueKind()).get();
+ break;
+
+ case ICK_Lvalue_To_Rvalue:
+ case ICK_Array_To_Pointer:
+ case ICK_Function_To_Pointer:
+ case ICK_Qualification:
+ case ICK_Num_Conversion_Kinds:
+ case ICK_C_Only_Conversion:
+ llvm_unreachable("Improper second standard conversion");
+ }
+
+ switch (SCS.Third) {
+ case ICK_Identity:
+ // Nothing to do.
+ break;
+
+ case ICK_Qualification: {
+ // The qualification keeps the category of the inner expression, unless the
+ // target type isn't a reference.
+ ExprValueKind VK = ToType->isReferenceType() ?
+ From->getValueKind() : VK_RValue;
+ From = ImpCastExprToType(From, ToType.getNonLValueExprType(Context),
+ CK_NoOp, VK, /*BasePath=*/nullptr, CCK).get();
+
+ if (SCS.DeprecatedStringLiteralToCharPtr &&
+ !getLangOpts().WritableStrings) {
+ Diag(From->getLocStart(), getLangOpts().CPlusPlus11
+ ? diag::ext_deprecated_string_literal_conversion
+ : diag::warn_deprecated_string_literal_conversion)
+ << ToType.getNonReferenceType();
+ }
+
+ break;
+ }
+
+ default:
+ llvm_unreachable("Improper third standard conversion");
+ }
+
+ // If this conversion sequence involved a scalar -> atomic conversion, perform
+ // that conversion now.
+ if (!ToAtomicType.isNull()) {
+ assert(Context.hasSameType(
+ ToAtomicType->castAs<AtomicType>()->getValueType(), From->getType()));
+ From = ImpCastExprToType(From, ToAtomicType, CK_NonAtomicToAtomic,
+ VK_RValue, nullptr, CCK).get();
+ }
+
+ // If this conversion sequence succeeded and involved implicitly converting a
+ // _Nullable type to a _Nonnull one, complain.
+ if (CCK == CCK_ImplicitConversion)
+ diagnoseNullableToNonnullConversion(ToType, InitialFromType,
+ From->getLocStart());
+
+ return From;
+}
+
+/// \brief Check the completeness of a type in a unary type trait.
+///
+/// If the particular type trait requires a complete type, tries to complete
+/// it. If completing the type fails, a diagnostic is emitted and false
+/// returned. If completing the type succeeds or no completion was required,
+/// returns true.
+static bool CheckUnaryTypeTraitTypeCompleteness(Sema &S, TypeTrait UTT,
+ SourceLocation Loc,
+ QualType ArgTy) {
+ // C++0x [meta.unary.prop]p3:
+ // For all of the class templates X declared in this Clause, instantiating
+ // that template with a template argument that is a class template
+ // specialization may result in the implicit instantiation of the template
+ // argument if and only if the semantics of X require that the argument
+ // must be a complete type.
+ // We apply this rule to all the type trait expressions used to implement
+ // these class templates. We also try to follow any GCC documented behavior
+ // in these expressions to ensure portability of standard libraries.
+ switch (UTT) {
+ default: llvm_unreachable("not a UTT");
+ // is_complete_type somewhat obviously cannot require a complete type.
+ case UTT_IsCompleteType:
+ // Fall-through
+
+ // These traits are modeled on the type predicates in C++0x
+ // [meta.unary.cat] and [meta.unary.comp]. They are not specified as
+ // requiring a complete type, as whether or not they return true cannot be
+ // impacted by the completeness of the type.
+ case UTT_IsVoid:
+ case UTT_IsIntegral:
+ case UTT_IsFloatingPoint:
+ case UTT_IsArray:
+ case UTT_IsPointer:
+ case UTT_IsLvalueReference:
+ case UTT_IsRvalueReference:
+ case UTT_IsMemberFunctionPointer:
+ case UTT_IsMemberObjectPointer:
+ case UTT_IsEnum:
+ case UTT_IsUnion:
+ case UTT_IsClass:
+ case UTT_IsFunction:
+ case UTT_IsReference:
+ case UTT_IsArithmetic:
+ case UTT_IsFundamental:
+ case UTT_IsObject:
+ case UTT_IsScalar:
+ case UTT_IsCompound:
+ case UTT_IsMemberPointer:
+ // Fall-through
+
+ // These traits are modeled on type predicates in C++0x [meta.unary.prop]
+ // which requires some of its traits to have the complete type. However,
+ // the completeness of the type cannot impact these traits' semantics, and
+ // so they don't require it. This matches the comments on these traits in
+ // Table 49.
+ case UTT_IsConst:
+ case UTT_IsVolatile:
+ case UTT_IsSigned:
+ case UTT_IsUnsigned:
+
+ // This type trait always returns false, checking the type is moot.
+ case UTT_IsInterfaceClass:
+ return true;
+
+ // C++14 [meta.unary.prop]:
+ // If T is a non-union class type, T shall be a complete type.
+ case UTT_IsEmpty:
+ case UTT_IsPolymorphic:
+ case UTT_IsAbstract:
+ if (const auto *RD = ArgTy->getAsCXXRecordDecl())
+ if (!RD->isUnion())
+ return !S.RequireCompleteType(
+ Loc, ArgTy, diag::err_incomplete_type_used_in_type_trait_expr);
+ return true;
+
+ // C++14 [meta.unary.prop]:
+ // If T is a class type, T shall be a complete type.
+ case UTT_IsFinal:
+ case UTT_IsSealed:
+ if (ArgTy->getAsCXXRecordDecl())
+ return !S.RequireCompleteType(
+ Loc, ArgTy, diag::err_incomplete_type_used_in_type_trait_expr);
+ return true;
+
+ // C++0x [meta.unary.prop] Table 49 requires the following traits to be
+ // applied to a complete type.
+ case UTT_IsTrivial:
+ case UTT_IsTriviallyCopyable:
+ case UTT_IsStandardLayout:
+ case UTT_IsPOD:
+ case UTT_IsLiteral:
+
+ case UTT_IsDestructible:
+ case UTT_IsNothrowDestructible:
+ // Fall-through
+
+ // These trait expressions are designed to help implement predicates in
+ // [meta.unary.prop] despite not being named the same. They are specified
+ // by both GCC and the Embarcadero C++ compiler, and require the complete
+ // type due to the overarching C++0x type predicates being implemented
+ // requiring the complete type.
+ case UTT_HasNothrowAssign:
+ case UTT_HasNothrowMoveAssign:
+ case UTT_HasNothrowConstructor:
+ case UTT_HasNothrowCopy:
+ case UTT_HasTrivialAssign:
+ case UTT_HasTrivialMoveAssign:
+ case UTT_HasTrivialDefaultConstructor:
+ case UTT_HasTrivialMoveConstructor:
+ case UTT_HasTrivialCopy:
+ case UTT_HasTrivialDestructor:
+ case UTT_HasVirtualDestructor:
+ // Arrays of unknown bound are expressly allowed.
+ QualType ElTy = ArgTy;
+ if (ArgTy->isIncompleteArrayType())
+ ElTy = S.Context.getAsArrayType(ArgTy)->getElementType();
+
+ // The void type is expressly allowed.
+ if (ElTy->isVoidType())
+ return true;
+
+ return !S.RequireCompleteType(
+ Loc, ElTy, diag::err_incomplete_type_used_in_type_trait_expr);
+ }
+}
+
+static bool HasNoThrowOperator(const RecordType *RT, OverloadedOperatorKind Op,
+ Sema &Self, SourceLocation KeyLoc, ASTContext &C,
+ bool (CXXRecordDecl::*HasTrivial)() const,
+ bool (CXXRecordDecl::*HasNonTrivial)() const,
+ bool (CXXMethodDecl::*IsDesiredOp)() const)
+{
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if ((RD->*HasTrivial)() && !(RD->*HasNonTrivial)())
+ return true;
+
+ DeclarationName Name = C.DeclarationNames.getCXXOperatorName(Op);
+ DeclarationNameInfo NameInfo(Name, KeyLoc);
+ LookupResult Res(Self, NameInfo, Sema::LookupOrdinaryName);
+ if (Self.LookupQualifiedName(Res, RD)) {
+ bool FoundOperator = false;
+ Res.suppressDiagnostics();
+ for (LookupResult::iterator Op = Res.begin(), OpEnd = Res.end();
+ Op != OpEnd; ++Op) {
+ if (isa<FunctionTemplateDecl>(*Op))
+ continue;
+
+ CXXMethodDecl *Operator = cast<CXXMethodDecl>(*Op);
+ if((Operator->*IsDesiredOp)()) {
+ FoundOperator = true;
+ const FunctionProtoType *CPT =
+ Operator->getType()->getAs<FunctionProtoType>();
+ CPT = Self.ResolveExceptionSpec(KeyLoc, CPT);
+ if (!CPT || !CPT->isNothrow(C))
+ return false;
+ }
+ }
+ return FoundOperator;
+ }
+ return false;
+}
+
+static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
+ SourceLocation KeyLoc, QualType T) {
+ assert(!T->isDependentType() && "Cannot evaluate traits of dependent type");
+
+ ASTContext &C = Self.Context;
+ switch(UTT) {
+ default: llvm_unreachable("not a UTT");
+ // Type trait expressions corresponding to the primary type category
+ // predicates in C++0x [meta.unary.cat].
+ case UTT_IsVoid:
+ return T->isVoidType();
+ case UTT_IsIntegral:
+ return T->isIntegralType(C);
+ case UTT_IsFloatingPoint:
+ return T->isFloatingType();
+ case UTT_IsArray:
+ return T->isArrayType();
+ case UTT_IsPointer:
+ return T->isPointerType();
+ case UTT_IsLvalueReference:
+ return T->isLValueReferenceType();
+ case UTT_IsRvalueReference:
+ return T->isRValueReferenceType();
+ case UTT_IsMemberFunctionPointer:
+ return T->isMemberFunctionPointerType();
+ case UTT_IsMemberObjectPointer:
+ return T->isMemberDataPointerType();
+ case UTT_IsEnum:
+ return T->isEnumeralType();
+ case UTT_IsUnion:
+ return T->isUnionType();
+ case UTT_IsClass:
+ return T->isClassType() || T->isStructureType() || T->isInterfaceType();
+ case UTT_IsFunction:
+ return T->isFunctionType();
+
+ // Type trait expressions which correspond to the convenient composition
+ // predicates in C++0x [meta.unary.comp].
+ case UTT_IsReference:
+ return T->isReferenceType();
+ case UTT_IsArithmetic:
+ return T->isArithmeticType() && !T->isEnumeralType();
+ case UTT_IsFundamental:
+ return T->isFundamentalType();
+ case UTT_IsObject:
+ return T->isObjectType();
+ case UTT_IsScalar:
+ // Note: semantic analysis depends on Objective-C lifetime types to be
+ // considered scalar types. However, such types do not actually behave
+ // like scalar types at run time (since they may require retain/release
+ // operations), so we report them as non-scalar.
+ if (T->isObjCLifetimeType()) {
+ switch (T.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ return true;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Autoreleasing:
+ return false;
+ }
+ }
+
+ return T->isScalarType();
+ case UTT_IsCompound:
+ return T->isCompoundType();
+ case UTT_IsMemberPointer:
+ return T->isMemberPointerType();
+
+ // Type trait expressions which correspond to the type property predicates
+ // in C++0x [meta.unary.prop].
+ case UTT_IsConst:
+ return T.isConstQualified();
+ case UTT_IsVolatile:
+ return T.isVolatileQualified();
+ case UTT_IsTrivial:
+ return T.isTrivialType(C);
+ case UTT_IsTriviallyCopyable:
+ return T.isTriviallyCopyableType(C);
+ case UTT_IsStandardLayout:
+ return T->isStandardLayoutType();
+ case UTT_IsPOD:
+ return T.isPODType(C);
+ case UTT_IsLiteral:
+ return T->isLiteralType(C);
+ case UTT_IsEmpty:
+ if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
+ return !RD->isUnion() && RD->isEmpty();
+ return false;
+ case UTT_IsPolymorphic:
+ if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
+ return !RD->isUnion() && RD->isPolymorphic();
+ return false;
+ case UTT_IsAbstract:
+ if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
+ return !RD->isUnion() && RD->isAbstract();
+ return false;
+ // __is_interface_class only returns true when CL is invoked in /CLR mode and
+ // even then only when it is used with the 'interface struct ...' syntax
+ // Clang doesn't support /CLR which makes this type trait moot.
+ case UTT_IsInterfaceClass:
+ return false;
+ case UTT_IsFinal:
+ case UTT_IsSealed:
+ if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
+ return RD->hasAttr<FinalAttr>();
+ return false;
+ case UTT_IsSigned:
+ return T->isSignedIntegerType();
+ case UTT_IsUnsigned:
+ return T->isUnsignedIntegerType();
+
+ // Type trait expressions which query classes regarding their construction,
+ // destruction, and copying. Rather than being based directly on the
+ // related type predicates in the standard, they are specified by both
+ // GCC[1] and the Embarcadero C++ compiler[2], and Clang implements those
+ // specifications.
+ //
+ // 1: http://gcc.gnu/.org/onlinedocs/gcc/Type-Traits.html
+ // 2: http://docwiki.embarcadero.com/RADStudio/XE/en/Type_Trait_Functions_(C%2B%2B0x)_Index
+ //
+ // Note that these builtins do not behave as documented in g++: if a class
+ // has both a trivial and a non-trivial special member of a particular kind,
+ // they return false! For now, we emulate this behavior.
+ // FIXME: This appears to be a g++ bug: more complex cases reveal that it
+ // does not correctly compute triviality in the presence of multiple special
+ // members of the same kind. Revisit this once the g++ bug is fixed.
+ case UTT_HasTrivialDefaultConstructor:
+ // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
+ // If __is_pod (type) is true then the trait is true, else if type is
+ // a cv class or union type (or array thereof) with a trivial default
+ // constructor ([class.ctor]) then the trait is true, else it is false.
+ if (T.isPODType(C))
+ return true;
+ if (CXXRecordDecl *RD = C.getBaseElementType(T)->getAsCXXRecordDecl())
+ return RD->hasTrivialDefaultConstructor() &&
+ !RD->hasNonTrivialDefaultConstructor();
+ return false;
+ case UTT_HasTrivialMoveConstructor:
+ // This trait is implemented by MSVC 2012 and needed to parse the
+ // standard library headers. Specifically this is used as the logic
+ // behind std::is_trivially_move_constructible (20.9.4.3).
+ if (T.isPODType(C))
+ return true;
+ if (CXXRecordDecl *RD = C.getBaseElementType(T)->getAsCXXRecordDecl())
+ return RD->hasTrivialMoveConstructor() && !RD->hasNonTrivialMoveConstructor();
+ return false;
+ case UTT_HasTrivialCopy:
+ // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
+ // If __is_pod (type) is true or type is a reference type then
+ // the trait is true, else if type is a cv class or union type
+ // with a trivial copy constructor ([class.copy]) then the trait
+ // is true, else it is false.
+ if (T.isPODType(C) || T->isReferenceType())
+ return true;
+ if (CXXRecordDecl *RD = T->getAsCXXRecordDecl())
+ return RD->hasTrivialCopyConstructor() &&
+ !RD->hasNonTrivialCopyConstructor();
+ return false;
+ case UTT_HasTrivialMoveAssign:
+ // This trait is implemented by MSVC 2012 and needed to parse the
+ // standard library headers. Specifically it is used as the logic
+ // behind std::is_trivially_move_assignable (20.9.4.3)
+ if (T.isPODType(C))
+ return true;
+ if (CXXRecordDecl *RD = C.getBaseElementType(T)->getAsCXXRecordDecl())
+ return RD->hasTrivialMoveAssignment() && !RD->hasNonTrivialMoveAssignment();
+ return false;
+ case UTT_HasTrivialAssign:
+ // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
+ // If type is const qualified or is a reference type then the
+ // trait is false. Otherwise if __is_pod (type) is true then the
+ // trait is true, else if type is a cv class or union type with
+ // a trivial copy assignment ([class.copy]) then the trait is
+ // true, else it is false.
+ // Note: the const and reference restrictions are interesting,
+ // given that const and reference members don't prevent a class
+ // from having a trivial copy assignment operator (but do cause
+ // errors if the copy assignment operator is actually used, q.v.
+ // [class.copy]p12).
+
+ if (T.isConstQualified())
+ return false;
+ if (T.isPODType(C))
+ return true;
+ if (CXXRecordDecl *RD = T->getAsCXXRecordDecl())
+ return RD->hasTrivialCopyAssignment() &&
+ !RD->hasNonTrivialCopyAssignment();
+ return false;
+ case UTT_IsDestructible:
+ case UTT_IsNothrowDestructible:
+ // C++14 [meta.unary.prop]:
+ // For reference types, is_destructible<T>::value is true.
+ if (T->isReferenceType())
+ return true;
+
+ // Objective-C++ ARC: autorelease types don't require destruction.
+ if (T->isObjCLifetimeType() &&
+ T.getObjCLifetime() == Qualifiers::OCL_Autoreleasing)
+ return true;
+
+ // C++14 [meta.unary.prop]:
+ // For incomplete types and function types, is_destructible<T>::value is
+ // false.
+ if (T->isIncompleteType() || T->isFunctionType())
+ return false;
+
+ // C++14 [meta.unary.prop]:
+ // For object types and given U equal to remove_all_extents_t<T>, if the
+ // expression std::declval<U&>().~U() is well-formed when treated as an
+ // unevaluated operand (Clause 5), then is_destructible<T>::value is true
+ if (auto *RD = C.getBaseElementType(T)->getAsCXXRecordDecl()) {
+ CXXDestructorDecl *Destructor = Self.LookupDestructor(RD);
+ if (!Destructor)
+ return false;
+ // C++14 [dcl.fct.def.delete]p2:
+ // A program that refers to a deleted function implicitly or
+ // explicitly, other than to declare it, is ill-formed.
+ if (Destructor->isDeleted())
+ return false;
+ if (C.getLangOpts().AccessControl && Destructor->getAccess() != AS_public)
+ return false;
+ if (UTT == UTT_IsNothrowDestructible) {
+ const FunctionProtoType *CPT =
+ Destructor->getType()->getAs<FunctionProtoType>();
+ CPT = Self.ResolveExceptionSpec(KeyLoc, CPT);
+ if (!CPT || !CPT->isNothrow(C))
+ return false;
+ }
+ }
+ return true;
+
+ case UTT_HasTrivialDestructor:
+ // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html
+ // If __is_pod (type) is true or type is a reference type
+ // then the trait is true, else if type is a cv class or union
+ // type (or array thereof) with a trivial destructor
+ // ([class.dtor]) then the trait is true, else it is
+ // false.
+ if (T.isPODType(C) || T->isReferenceType())
+ return true;
+
+ // Objective-C++ ARC: autorelease types don't require destruction.
+ if (T->isObjCLifetimeType() &&
+ T.getObjCLifetime() == Qualifiers::OCL_Autoreleasing)
+ return true;
+
+ if (CXXRecordDecl *RD = C.getBaseElementType(T)->getAsCXXRecordDecl())
+ return RD->hasTrivialDestructor();
+ return false;
+ // TODO: Propagate nothrowness for implicitly declared special members.
+ case UTT_HasNothrowAssign:
+ // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
+ // If type is const qualified or is a reference type then the
+ // trait is false. Otherwise if __has_trivial_assign (type)
+ // is true then the trait is true, else if type is a cv class
+ // or union type with copy assignment operators that are known
+ // not to throw an exception then the trait is true, else it is
+ // false.
+ if (C.getBaseElementType(T).isConstQualified())
+ return false;
+ if (T->isReferenceType())
+ return false;
+ if (T.isPODType(C) || T->isObjCLifetimeType())
+ return true;
+
+ if (const RecordType *RT = T->getAs<RecordType>())
+ return HasNoThrowOperator(RT, OO_Equal, Self, KeyLoc, C,
+ &CXXRecordDecl::hasTrivialCopyAssignment,
+ &CXXRecordDecl::hasNonTrivialCopyAssignment,
+ &CXXMethodDecl::isCopyAssignmentOperator);
+ return false;
+ case UTT_HasNothrowMoveAssign:
+ // This trait is implemented by MSVC 2012 and needed to parse the
+ // standard library headers. Specifically this is used as the logic
+ // behind std::is_nothrow_move_assignable (20.9.4.3).
+ if (T.isPODType(C))
+ return true;
+
+ if (const RecordType *RT = C.getBaseElementType(T)->getAs<RecordType>())
+ return HasNoThrowOperator(RT, OO_Equal, Self, KeyLoc, C,
+ &CXXRecordDecl::hasTrivialMoveAssignment,
+ &CXXRecordDecl::hasNonTrivialMoveAssignment,
+ &CXXMethodDecl::isMoveAssignmentOperator);
+ return false;
+ case UTT_HasNothrowCopy:
+ // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
+ // If __has_trivial_copy (type) is true then the trait is true, else
+ // if type is a cv class or union type with copy constructors that are
+ // known not to throw an exception then the trait is true, else it is
+ // false.
+ if (T.isPODType(C) || T->isReferenceType() || T->isObjCLifetimeType())
+ return true;
+ if (CXXRecordDecl *RD = T->getAsCXXRecordDecl()) {
+ if (RD->hasTrivialCopyConstructor() &&
+ !RD->hasNonTrivialCopyConstructor())
+ return true;
+
+ bool FoundConstructor = false;
+ unsigned FoundTQs;
+ for (const auto *ND : Self.LookupConstructors(RD)) {
+ // A template constructor is never a copy constructor.
+ // FIXME: However, it may actually be selected at the actual overload
+ // resolution point.
+ if (isa<FunctionTemplateDecl>(ND))
+ continue;
+ const CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(ND);
+ if (Constructor->isCopyConstructor(FoundTQs)) {
+ FoundConstructor = true;
+ const FunctionProtoType *CPT
+ = Constructor->getType()->getAs<FunctionProtoType>();
+ CPT = Self.ResolveExceptionSpec(KeyLoc, CPT);
+ if (!CPT)
+ return false;
+ // TODO: check whether evaluating default arguments can throw.
+ // For now, we'll be conservative and assume that they can throw.
+ if (!CPT->isNothrow(C) || CPT->getNumParams() > 1)
+ return false;
+ }
+ }
+
+ return FoundConstructor;
+ }
+ return false;
+ case UTT_HasNothrowConstructor:
+ // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html
+ // If __has_trivial_constructor (type) is true then the trait is
+ // true, else if type is a cv class or union type (or array
+ // thereof) with a default constructor that is known not to
+ // throw an exception then the trait is true, else it is false.
+ if (T.isPODType(C) || T->isObjCLifetimeType())
+ return true;
+ if (CXXRecordDecl *RD = C.getBaseElementType(T)->getAsCXXRecordDecl()) {
+ if (RD->hasTrivialDefaultConstructor() &&
+ !RD->hasNonTrivialDefaultConstructor())
+ return true;
+
+ bool FoundConstructor = false;
+ for (const auto *ND : Self.LookupConstructors(RD)) {
+ // FIXME: In C++0x, a constructor template can be a default constructor.
+ if (isa<FunctionTemplateDecl>(ND))
+ continue;
+ const CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(ND);
+ if (Constructor->isDefaultConstructor()) {
+ FoundConstructor = true;
+ const FunctionProtoType *CPT
+ = Constructor->getType()->getAs<FunctionProtoType>();
+ CPT = Self.ResolveExceptionSpec(KeyLoc, CPT);
+ if (!CPT)
+ return false;
+ // FIXME: check whether evaluating default arguments can throw.
+ // For now, we'll be conservative and assume that they can throw.
+ if (!CPT->isNothrow(C) || CPT->getNumParams() > 0)
+ return false;
+ }
+ }
+ return FoundConstructor;
+ }
+ return false;
+ case UTT_HasVirtualDestructor:
+ // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
+ // If type is a class type with a virtual destructor ([class.dtor])
+ // then the trait is true, else it is false.
+ if (CXXRecordDecl *RD = T->getAsCXXRecordDecl())
+ if (CXXDestructorDecl *Destructor = Self.LookupDestructor(RD))
+ return Destructor->isVirtual();
+ return false;
+
+ // These type trait expressions are modeled on the specifications for the
+ // Embarcadero C++0x type trait functions:
+ // http://docwiki.embarcadero.com/RADStudio/XE/en/Type_Trait_Functions_(C%2B%2B0x)_Index
+ case UTT_IsCompleteType:
+ // http://docwiki.embarcadero.com/RADStudio/XE/en/Is_complete_type_(typename_T_):
+ // Returns True if and only if T is a complete type at the point of the
+ // function call.
+ return !T->isIncompleteType();
+ }
+}
+
+/// \brief Determine whether T has a non-trivial Objective-C lifetime in
+/// ARC mode.
+static bool hasNontrivialObjCLifetime(QualType T) {
+ switch (T.getObjCLifetime()) {
+ case Qualifiers::OCL_ExplicitNone:
+ return false;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Autoreleasing:
+ return true;
+
+ case Qualifiers::OCL_None:
+ return T->isObjCLifetimeType();
+ }
+
+ llvm_unreachable("Unknown ObjC lifetime qualifier");
+}
+
+static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, QualType LhsT,
+ QualType RhsT, SourceLocation KeyLoc);
+
+static bool evaluateTypeTrait(Sema &S, TypeTrait Kind, SourceLocation KWLoc,
+ ArrayRef<TypeSourceInfo *> Args,
+ SourceLocation RParenLoc) {
+ if (Kind <= UTT_Last)
+ return EvaluateUnaryTypeTrait(S, Kind, KWLoc, Args[0]->getType());
+
+ if (Kind <= BTT_Last)
+ return EvaluateBinaryTypeTrait(S, Kind, Args[0]->getType(),
+ Args[1]->getType(), RParenLoc);
+
+ switch (Kind) {
+ case clang::TT_IsConstructible:
+ case clang::TT_IsNothrowConstructible:
+ case clang::TT_IsTriviallyConstructible: {
+ // C++11 [meta.unary.prop]:
+ // is_trivially_constructible is defined as:
+ //
+ // is_constructible<T, Args...>::value is true and the variable
+ // definition for is_constructible, as defined below, is known to call
+ // no operation that is not trivial.
+ //
+ // The predicate condition for a template specialization
+ // is_constructible<T, Args...> shall be satisfied if and only if the
+ // following variable definition would be well-formed for some invented
+ // variable t:
+ //
+ // T t(create<Args>()...);
+ assert(!Args.empty());
+
+ // Precondition: T and all types in the parameter pack Args shall be
+ // complete types, (possibly cv-qualified) void, or arrays of
+ // unknown bound.
+ for (const auto *TSI : Args) {
+ QualType ArgTy = TSI->getType();
+ if (ArgTy->isVoidType() || ArgTy->isIncompleteArrayType())
+ continue;
+
+ if (S.RequireCompleteType(KWLoc, ArgTy,
+ diag::err_incomplete_type_used_in_type_trait_expr))
+ return false;
+ }
+
+ // Make sure the first argument is not incomplete nor a function type.
+ QualType T = Args[0]->getType();
+ if (T->isIncompleteType() || T->isFunctionType())
+ return false;
+
+ // Make sure the first argument is not an abstract type.
+ CXXRecordDecl *RD = T->getAsCXXRecordDecl();
+ if (RD && RD->isAbstract())
+ return false;
+
+ SmallVector<OpaqueValueExpr, 2> OpaqueArgExprs;
+ SmallVector<Expr *, 2> ArgExprs;
+ ArgExprs.reserve(Args.size() - 1);
+ for (unsigned I = 1, N = Args.size(); I != N; ++I) {
+ QualType ArgTy = Args[I]->getType();
+ if (ArgTy->isObjectType() || ArgTy->isFunctionType())
+ ArgTy = S.Context.getRValueReferenceType(ArgTy);
+ OpaqueArgExprs.push_back(
+ OpaqueValueExpr(Args[I]->getTypeLoc().getLocStart(),
+ ArgTy.getNonLValueExprType(S.Context),
+ Expr::getValueKindForType(ArgTy)));
+ }
+ for (Expr &E : OpaqueArgExprs)
+ ArgExprs.push_back(&E);
+
+ // Perform the initialization in an unevaluated context within a SFINAE
+ // trap at translation unit scope.
+ EnterExpressionEvaluationContext Unevaluated(S, Sema::Unevaluated);
+ Sema::SFINAETrap SFINAE(S, /*AccessCheckingSFINAE=*/true);
+ Sema::ContextRAII TUContext(S, S.Context.getTranslationUnitDecl());
+ InitializedEntity To(InitializedEntity::InitializeTemporary(Args[0]));
+ InitializationKind InitKind(InitializationKind::CreateDirect(KWLoc, KWLoc,
+ RParenLoc));
+ InitializationSequence Init(S, To, InitKind, ArgExprs);
+ if (Init.Failed())
+ return false;
+
+ ExprResult Result = Init.Perform(S, To, InitKind, ArgExprs);
+ if (Result.isInvalid() || SFINAE.hasErrorOccurred())
+ return false;
+
+ if (Kind == clang::TT_IsConstructible)
+ return true;
+
+ if (Kind == clang::TT_IsNothrowConstructible)
+ return S.canThrow(Result.get()) == CT_Cannot;
+
+ if (Kind == clang::TT_IsTriviallyConstructible) {
+ // Under Objective-C ARC, if the destination has non-trivial Objective-C
+ // lifetime, this is a non-trivial construction.
+ if (S.getLangOpts().ObjCAutoRefCount &&
+ hasNontrivialObjCLifetime(T.getNonReferenceType()))
+ return false;
+
+ // The initialization succeeded; now make sure there are no non-trivial
+ // calls.
+ return !Result.get()->hasNonTrivialCall(S.Context);
+ }
+
+ llvm_unreachable("unhandled type trait");
+ return false;
+ }
+ default: llvm_unreachable("not a TT");
+ }
+
+ return false;
+}
+
+ExprResult Sema::BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
+ ArrayRef<TypeSourceInfo *> Args,
+ SourceLocation RParenLoc) {
+ QualType ResultType = Context.getLogicalOperationType();
+
+ if (Kind <= UTT_Last && !CheckUnaryTypeTraitTypeCompleteness(
+ *this, Kind, KWLoc, Args[0]->getType()))
+ return ExprError();
+
+ bool Dependent = false;
+ for (unsigned I = 0, N = Args.size(); I != N; ++I) {
+ if (Args[I]->getType()->isDependentType()) {
+ Dependent = true;
+ break;
+ }
+ }
+
+ bool Result = false;
+ if (!Dependent)
+ Result = evaluateTypeTrait(*this, Kind, KWLoc, Args, RParenLoc);
+
+ return TypeTraitExpr::Create(Context, ResultType, KWLoc, Kind, Args,
+ RParenLoc, Result);
+}
+
+ExprResult Sema::ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
+ ArrayRef<ParsedType> Args,
+ SourceLocation RParenLoc) {
+ SmallVector<TypeSourceInfo *, 4> ConvertedArgs;
+ ConvertedArgs.reserve(Args.size());
+
+ for (unsigned I = 0, N = Args.size(); I != N; ++I) {
+ TypeSourceInfo *TInfo;
+ QualType T = GetTypeFromParser(Args[I], &TInfo);
+ if (!TInfo)
+ TInfo = Context.getTrivialTypeSourceInfo(T, KWLoc);
+
+ ConvertedArgs.push_back(TInfo);
+ }
+
+ return BuildTypeTrait(Kind, KWLoc, ConvertedArgs, RParenLoc);
+}
+
+static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, QualType LhsT,
+ QualType RhsT, SourceLocation KeyLoc) {
+ assert(!LhsT->isDependentType() && !RhsT->isDependentType() &&
+ "Cannot evaluate traits of dependent types");
+
+ switch(BTT) {
+ case BTT_IsBaseOf: {
+ // C++0x [meta.rel]p2
+ // Base is a base class of Derived without regard to cv-qualifiers or
+ // Base and Derived are not unions and name the same class type without
+ // regard to cv-qualifiers.
+
+ const RecordType *lhsRecord = LhsT->getAs<RecordType>();
+ if (!lhsRecord) return false;
+
+ const RecordType *rhsRecord = RhsT->getAs<RecordType>();
+ if (!rhsRecord) return false;
+
+ assert(Self.Context.hasSameUnqualifiedType(LhsT, RhsT)
+ == (lhsRecord == rhsRecord));
+
+ if (lhsRecord == rhsRecord)
+ return !lhsRecord->getDecl()->isUnion();
+
+ // C++0x [meta.rel]p2:
+ // If Base and Derived are class types and are different types
+ // (ignoring possible cv-qualifiers) then Derived shall be a
+ // complete type.
+ if (Self.RequireCompleteType(KeyLoc, RhsT,
+ diag::err_incomplete_type_used_in_type_trait_expr))
+ return false;
+
+ return cast<CXXRecordDecl>(rhsRecord->getDecl())
+ ->isDerivedFrom(cast<CXXRecordDecl>(lhsRecord->getDecl()));
+ }
+ case BTT_IsSame:
+ return Self.Context.hasSameType(LhsT, RhsT);
+ case BTT_TypeCompatible:
+ return Self.Context.typesAreCompatible(LhsT.getUnqualifiedType(),
+ RhsT.getUnqualifiedType());
+ case BTT_IsConvertible:
+ case BTT_IsConvertibleTo: {
+ // C++0x [meta.rel]p4:
+ // Given the following function prototype:
+ //
+ // template <class T>
+ // typename add_rvalue_reference<T>::type create();
+ //
+ // the predicate condition for a template specialization
+ // is_convertible<From, To> shall be satisfied if and only if
+ // the return expression in the following code would be
+ // well-formed, including any implicit conversions to the return
+ // type of the function:
+ //
+ // To test() {
+ // return create<From>();
+ // }
+ //
+ // Access checking is performed as if in a context unrelated to To and
+ // From. Only the validity of the immediate context of the expression
+ // of the return-statement (including conversions to the return type)
+ // is considered.
+ //
+ // We model the initialization as a copy-initialization of a temporary
+ // of the appropriate type, which for this expression is identical to the
+ // return statement (since NRVO doesn't apply).
+
+ // Functions aren't allowed to return function or array types.
+ if (RhsT->isFunctionType() || RhsT->isArrayType())
+ return false;
+
+ // A return statement in a void function must have void type.
+ if (RhsT->isVoidType())
+ return LhsT->isVoidType();
+
+ // A function definition requires a complete, non-abstract return type.
+ if (!Self.isCompleteType(KeyLoc, RhsT) || Self.isAbstractType(KeyLoc, RhsT))
+ return false;
+
+ // Compute the result of add_rvalue_reference.
+ if (LhsT->isObjectType() || LhsT->isFunctionType())
+ LhsT = Self.Context.getRValueReferenceType(LhsT);
+
+ // Build a fake source and destination for initialization.
+ InitializedEntity To(InitializedEntity::InitializeTemporary(RhsT));
+ OpaqueValueExpr From(KeyLoc, LhsT.getNonLValueExprType(Self.Context),
+ Expr::getValueKindForType(LhsT));
+ Expr *FromPtr = &From;
+ InitializationKind Kind(InitializationKind::CreateCopy(KeyLoc,
+ SourceLocation()));
+
+ // Perform the initialization in an unevaluated context within a SFINAE
+ // trap at translation unit scope.
+ EnterExpressionEvaluationContext Unevaluated(Self, Sema::Unevaluated);
+ Sema::SFINAETrap SFINAE(Self, /*AccessCheckingSFINAE=*/true);
+ Sema::ContextRAII TUContext(Self, Self.Context.getTranslationUnitDecl());
+ InitializationSequence Init(Self, To, Kind, FromPtr);
+ if (Init.Failed())
+ return false;
+
+ ExprResult Result = Init.Perform(Self, To, Kind, FromPtr);
+ return !Result.isInvalid() && !SFINAE.hasErrorOccurred();
+ }
+
+ case BTT_IsNothrowAssignable:
+ case BTT_IsTriviallyAssignable: {
+ // C++11 [meta.unary.prop]p3:
+ // is_trivially_assignable is defined as:
+ // is_assignable<T, U>::value is true and the assignment, as defined by
+ // is_assignable, is known to call no operation that is not trivial
+ //
+ // is_assignable is defined as:
+ // The expression declval<T>() = declval<U>() is well-formed when
+ // treated as an unevaluated operand (Clause 5).
+ //
+ // For both, T and U shall be complete types, (possibly cv-qualified)
+ // void, or arrays of unknown bound.
+ if (!LhsT->isVoidType() && !LhsT->isIncompleteArrayType() &&
+ Self.RequireCompleteType(KeyLoc, LhsT,
+ diag::err_incomplete_type_used_in_type_trait_expr))
+ return false;
+ if (!RhsT->isVoidType() && !RhsT->isIncompleteArrayType() &&
+ Self.RequireCompleteType(KeyLoc, RhsT,
+ diag::err_incomplete_type_used_in_type_trait_expr))
+ return false;
+
+ // cv void is never assignable.
+ if (LhsT->isVoidType() || RhsT->isVoidType())
+ return false;
+
+ // Build expressions that emulate the effect of declval<T>() and
+ // declval<U>().
+ if (LhsT->isObjectType() || LhsT->isFunctionType())
+ LhsT = Self.Context.getRValueReferenceType(LhsT);
+ if (RhsT->isObjectType() || RhsT->isFunctionType())
+ RhsT = Self.Context.getRValueReferenceType(RhsT);
+ OpaqueValueExpr Lhs(KeyLoc, LhsT.getNonLValueExprType(Self.Context),
+ Expr::getValueKindForType(LhsT));
+ OpaqueValueExpr Rhs(KeyLoc, RhsT.getNonLValueExprType(Self.Context),
+ Expr::getValueKindForType(RhsT));
+
+ // Attempt the assignment in an unevaluated context within a SFINAE
+ // trap at translation unit scope.
+ EnterExpressionEvaluationContext Unevaluated(Self, Sema::Unevaluated);
+ Sema::SFINAETrap SFINAE(Self, /*AccessCheckingSFINAE=*/true);
+ Sema::ContextRAII TUContext(Self, Self.Context.getTranslationUnitDecl());
+ ExprResult Result = Self.BuildBinOp(/*S=*/nullptr, KeyLoc, BO_Assign, &Lhs,
+ &Rhs);
+ if (Result.isInvalid() || SFINAE.hasErrorOccurred())
+ return false;
+
+ if (BTT == BTT_IsNothrowAssignable)
+ return Self.canThrow(Result.get()) == CT_Cannot;
+
+ if (BTT == BTT_IsTriviallyAssignable) {
+ // Under Objective-C ARC, if the destination has non-trivial Objective-C
+ // lifetime, this is a non-trivial assignment.
+ if (Self.getLangOpts().ObjCAutoRefCount &&
+ hasNontrivialObjCLifetime(LhsT.getNonReferenceType()))
+ return false;
+
+ return !Result.get()->hasNonTrivialCall(Self.Context);
+ }
+
+ llvm_unreachable("unhandled type trait");
+ return false;
+ }
+ default: llvm_unreachable("not a BTT");
+ }
+ llvm_unreachable("Unknown type trait or not implemented");
+}
+
+ExprResult Sema::ActOnArrayTypeTrait(ArrayTypeTrait ATT,
+ SourceLocation KWLoc,
+ ParsedType Ty,
+ Expr* DimExpr,
+ SourceLocation RParen) {
+ TypeSourceInfo *TSInfo;
+ QualType T = GetTypeFromParser(Ty, &TSInfo);
+ if (!TSInfo)
+ TSInfo = Context.getTrivialTypeSourceInfo(T);
+
+ return BuildArrayTypeTrait(ATT, KWLoc, TSInfo, DimExpr, RParen);
+}
+
+static uint64_t EvaluateArrayTypeTrait(Sema &Self, ArrayTypeTrait ATT,
+ QualType T, Expr *DimExpr,
+ SourceLocation KeyLoc) {
+ assert(!T->isDependentType() && "Cannot evaluate traits of dependent type");
+
+ switch(ATT) {
+ case ATT_ArrayRank:
+ if (T->isArrayType()) {
+ unsigned Dim = 0;
+ while (const ArrayType *AT = Self.Context.getAsArrayType(T)) {
+ ++Dim;
+ T = AT->getElementType();
+ }
+ return Dim;
+ }
+ return 0;
+
+ case ATT_ArrayExtent: {
+ llvm::APSInt Value;
+ uint64_t Dim;
+ if (Self.VerifyIntegerConstantExpression(DimExpr, &Value,
+ diag::err_dimension_expr_not_constant_integer,
+ false).isInvalid())
+ return 0;
+ if (Value.isSigned() && Value.isNegative()) {
+ Self.Diag(KeyLoc, diag::err_dimension_expr_not_constant_integer)
+ << DimExpr->getSourceRange();
+ return 0;
+ }
+ Dim = Value.getLimitedValue();
+
+ if (T->isArrayType()) {
+ unsigned D = 0;
+ bool Matched = false;
+ while (const ArrayType *AT = Self.Context.getAsArrayType(T)) {
+ if (Dim == D) {
+ Matched = true;
+ break;
+ }
+ ++D;
+ T = AT->getElementType();
+ }
+
+ if (Matched && T->isArrayType()) {
+ if (const ConstantArrayType *CAT = Self.Context.getAsConstantArrayType(T))
+ return CAT->getSize().getLimitedValue();
+ }
+ }
+ return 0;
+ }
+ }
+ llvm_unreachable("Unknown type trait or not implemented");
+}
+
+ExprResult Sema::BuildArrayTypeTrait(ArrayTypeTrait ATT,
+ SourceLocation KWLoc,
+ TypeSourceInfo *TSInfo,
+ Expr* DimExpr,
+ SourceLocation RParen) {
+ QualType T = TSInfo->getType();
+
+ // FIXME: This should likely be tracked as an APInt to remove any host
+ // assumptions about the width of size_t on the target.
+ uint64_t Value = 0;
+ if (!T->isDependentType())
+ Value = EvaluateArrayTypeTrait(*this, ATT, T, DimExpr, KWLoc);
+
+ // While the specification for these traits from the Embarcadero C++
+ // compiler's documentation says the return type is 'unsigned int', Clang
+ // returns 'size_t'. On Windows, the primary platform for the Embarcadero
+ // compiler, there is no difference. On several other platforms this is an
+ // important distinction.
+ return new (Context) ArrayTypeTraitExpr(KWLoc, ATT, TSInfo, Value, DimExpr,
+ RParen, Context.getSizeType());
+}
+
+ExprResult Sema::ActOnExpressionTrait(ExpressionTrait ET,
+ SourceLocation KWLoc,
+ Expr *Queried,
+ SourceLocation RParen) {
+ // If error parsing the expression, ignore.
+ if (!Queried)
+ return ExprError();
+
+ ExprResult Result = BuildExpressionTrait(ET, KWLoc, Queried, RParen);
+
+ return Result;
+}
+
+static bool EvaluateExpressionTrait(ExpressionTrait ET, Expr *E) {
+ switch (ET) {
+ case ET_IsLValueExpr: return E->isLValue();
+ case ET_IsRValueExpr: return E->isRValue();
+ }
+ llvm_unreachable("Expression trait not covered by switch");
+}
+
+ExprResult Sema::BuildExpressionTrait(ExpressionTrait ET,
+ SourceLocation KWLoc,
+ Expr *Queried,
+ SourceLocation RParen) {
+ if (Queried->isTypeDependent()) {
+ // Delay type-checking for type-dependent expressions.
+ } else if (Queried->getType()->isPlaceholderType()) {
+ ExprResult PE = CheckPlaceholderExpr(Queried);
+ if (PE.isInvalid()) return ExprError();
+ return BuildExpressionTrait(ET, KWLoc, PE.get(), RParen);
+ }
+
+ bool Value = EvaluateExpressionTrait(ET, Queried);
+
+ return new (Context)
+ ExpressionTraitExpr(KWLoc, ET, Queried, Value, RParen, Context.BoolTy);
+}
+
+QualType Sema::CheckPointerToMemberOperands(ExprResult &LHS, ExprResult &RHS,
+ ExprValueKind &VK,
+ SourceLocation Loc,
+ bool isIndirect) {
+ assert(!LHS.get()->getType()->isPlaceholderType() &&
+ !RHS.get()->getType()->isPlaceholderType() &&
+ "placeholders should have been weeded out by now");
+
+ // The LHS undergoes lvalue conversions if this is ->*.
+ if (isIndirect) {
+ LHS = DefaultLvalueConversion(LHS.get());
+ if (LHS.isInvalid()) return QualType();
+ }
+
+ // The RHS always undergoes lvalue conversions.
+ RHS = DefaultLvalueConversion(RHS.get());
+ if (RHS.isInvalid()) return QualType();
+
+ const char *OpSpelling = isIndirect ? "->*" : ".*";
+ // C++ 5.5p2
+ // The binary operator .* [p3: ->*] binds its second operand, which shall
+ // be of type "pointer to member of T" (where T is a completely-defined
+ // class type) [...]
+ QualType RHSType = RHS.get()->getType();
+ const MemberPointerType *MemPtr = RHSType->getAs<MemberPointerType>();
+ if (!MemPtr) {
+ Diag(Loc, diag::err_bad_memptr_rhs)
+ << OpSpelling << RHSType << RHS.get()->getSourceRange();
+ return QualType();
+ }
+
+ QualType Class(MemPtr->getClass(), 0);
+
+ // Note: C++ [expr.mptr.oper]p2-3 says that the class type into which the
+ // member pointer points must be completely-defined. However, there is no
+ // reason for this semantic distinction, and the rule is not enforced by
+ // other compilers. Therefore, we do not check this property, as it is
+ // likely to be considered a defect.
+
+ // C++ 5.5p2
+ // [...] to its first operand, which shall be of class T or of a class of
+ // which T is an unambiguous and accessible base class. [p3: a pointer to
+ // such a class]
+ QualType LHSType = LHS.get()->getType();
+ if (isIndirect) {
+ if (const PointerType *Ptr = LHSType->getAs<PointerType>())
+ LHSType = Ptr->getPointeeType();
+ else {
+ Diag(Loc, diag::err_bad_memptr_lhs)
+ << OpSpelling << 1 << LHSType
+ << FixItHint::CreateReplacement(SourceRange(Loc), ".*");
+ return QualType();
+ }
+ }
+
+ if (!Context.hasSameUnqualifiedType(Class, LHSType)) {
+ // If we want to check the hierarchy, we need a complete type.
+ if (RequireCompleteType(Loc, LHSType, diag::err_bad_memptr_lhs,
+ OpSpelling, (int)isIndirect)) {
+ return QualType();
+ }
+
+ if (!IsDerivedFrom(Loc, LHSType, Class)) {
+ Diag(Loc, diag::err_bad_memptr_lhs) << OpSpelling
+ << (int)isIndirect << LHS.get()->getType();
+ return QualType();
+ }
+
+ CXXCastPath BasePath;
+ if (CheckDerivedToBaseConversion(LHSType, Class, Loc,
+ SourceRange(LHS.get()->getLocStart(),
+ RHS.get()->getLocEnd()),
+ &BasePath))
+ return QualType();
+
+ // Cast LHS to type of use.
+ QualType UseType = isIndirect ? Context.getPointerType(Class) : Class;
+ ExprValueKind VK = isIndirect ? VK_RValue : LHS.get()->getValueKind();
+ LHS = ImpCastExprToType(LHS.get(), UseType, CK_DerivedToBase, VK,
+ &BasePath);
+ }
+
+ if (isa<CXXScalarValueInitExpr>(RHS.get()->IgnoreParens())) {
+ // Diagnose use of pointer-to-member type which when used as
+ // the functional cast in a pointer-to-member expression.
+ Diag(Loc, diag::err_pointer_to_member_type) << isIndirect;
+ return QualType();
+ }
+
+ // C++ 5.5p2
+ // The result is an object or a function of the type specified by the
+ // second operand.
+ // The cv qualifiers are the union of those in the pointer and the left side,
+ // in accordance with 5.5p5 and 5.2.5.
+ QualType Result = MemPtr->getPointeeType();
+ Result = Context.getCVRQualifiedType(Result, LHSType.getCVRQualifiers());
+
+ // C++0x [expr.mptr.oper]p6:
+ // In a .* expression whose object expression is an rvalue, the program is
+ // ill-formed if the second operand is a pointer to member function with
+ // ref-qualifier &. In a ->* expression or in a .* expression whose object
+ // expression is an lvalue, the program is ill-formed if the second operand
+ // is a pointer to member function with ref-qualifier &&.
+ if (const FunctionProtoType *Proto = Result->getAs<FunctionProtoType>()) {
+ switch (Proto->getRefQualifier()) {
+ case RQ_None:
+ // Do nothing
+ break;
+
+ case RQ_LValue:
+ if (!isIndirect && !LHS.get()->Classify(Context).isLValue())
+ Diag(Loc, diag::err_pointer_to_member_oper_value_classify)
+ << RHSType << 1 << LHS.get()->getSourceRange();
+ break;
+
+ case RQ_RValue:
+ if (isIndirect || !LHS.get()->Classify(Context).isRValue())
+ Diag(Loc, diag::err_pointer_to_member_oper_value_classify)
+ << RHSType << 0 << LHS.get()->getSourceRange();
+ break;
+ }
+ }
+
+ // C++ [expr.mptr.oper]p6:
+ // The result of a .* expression whose second operand is a pointer
+ // to a data member is of the same value category as its
+ // first operand. The result of a .* expression whose second
+ // operand is a pointer to a member function is a prvalue. The
+ // result of an ->* expression is an lvalue if its second operand
+ // is a pointer to data member and a prvalue otherwise.
+ if (Result->isFunctionType()) {
+ VK = VK_RValue;
+ return Context.BoundMemberTy;
+ } else if (isIndirect) {
+ VK = VK_LValue;
+ } else {
+ VK = LHS.get()->getValueKind();
+ }
+
+ return Result;
+}
+
+/// \brief Try to convert a type to another according to C++0x 5.16p3.
+///
+/// This is part of the parameter validation for the ? operator. If either
+/// value operand is a class type, the two operands are attempted to be
+/// converted to each other. This function does the conversion in one direction.
+/// It returns true if the program is ill-formed and has already been diagnosed
+/// as such.
+static bool TryClassUnification(Sema &Self, Expr *From, Expr *To,
+ SourceLocation QuestionLoc,
+ bool &HaveConversion,
+ QualType &ToType) {
+ HaveConversion = false;
+ ToType = To->getType();
+
+ InitializationKind Kind = InitializationKind::CreateCopy(To->getLocStart(),
+ SourceLocation());
+ // C++0x 5.16p3
+ // The process for determining whether an operand expression E1 of type T1
+ // can be converted to match an operand expression E2 of type T2 is defined
+ // as follows:
+ // -- If E2 is an lvalue:
+ bool ToIsLvalue = To->isLValue();
+ if (ToIsLvalue) {
+ // E1 can be converted to match E2 if E1 can be implicitly converted to
+ // type "lvalue reference to T2", subject to the constraint that in the
+ // conversion the reference must bind directly to E1.
+ QualType T = Self.Context.getLValueReferenceType(ToType);
+ InitializedEntity Entity = InitializedEntity::InitializeTemporary(T);
+
+ InitializationSequence InitSeq(Self, Entity, Kind, From);
+ if (InitSeq.isDirectReferenceBinding()) {
+ ToType = T;
+ HaveConversion = true;
+ return false;
+ }
+
+ if (InitSeq.isAmbiguous())
+ return InitSeq.Diagnose(Self, Entity, Kind, From);
+ }
+
+ // -- If E2 is an rvalue, or if the conversion above cannot be done:
+ // -- if E1 and E2 have class type, and the underlying class types are
+ // the same or one is a base class of the other:
+ QualType FTy = From->getType();
+ QualType TTy = To->getType();
+ const RecordType *FRec = FTy->getAs<RecordType>();
+ const RecordType *TRec = TTy->getAs<RecordType>();
+ bool FDerivedFromT = FRec && TRec && FRec != TRec &&
+ Self.IsDerivedFrom(QuestionLoc, FTy, TTy);
+ if (FRec && TRec && (FRec == TRec || FDerivedFromT ||
+ Self.IsDerivedFrom(QuestionLoc, TTy, FTy))) {
+ // E1 can be converted to match E2 if the class of T2 is the
+ // same type as, or a base class of, the class of T1, and
+ // [cv2 > cv1].
+ if (FRec == TRec || FDerivedFromT) {
+ if (TTy.isAtLeastAsQualifiedAs(FTy)) {
+ InitializedEntity Entity = InitializedEntity::InitializeTemporary(TTy);
+ InitializationSequence InitSeq(Self, Entity, Kind, From);
+ if (InitSeq) {
+ HaveConversion = true;
+ return false;
+ }
+
+ if (InitSeq.isAmbiguous())
+ return InitSeq.Diagnose(Self, Entity, Kind, From);
+ }
+ }
+
+ return false;
+ }
+
+ // -- Otherwise: E1 can be converted to match E2 if E1 can be
+ // implicitly converted to the type that expression E2 would have
+ // if E2 were converted to an rvalue (or the type it has, if E2 is
+ // an rvalue).
+ //
+ // This actually refers very narrowly to the lvalue-to-rvalue conversion, not
+ // to the array-to-pointer or function-to-pointer conversions.
+ if (!TTy->getAs<TagType>())
+ TTy = TTy.getUnqualifiedType();
+
+ InitializedEntity Entity = InitializedEntity::InitializeTemporary(TTy);
+ InitializationSequence InitSeq(Self, Entity, Kind, From);
+ HaveConversion = !InitSeq.Failed();
+ ToType = TTy;
+ if (InitSeq.isAmbiguous())
+ return InitSeq.Diagnose(Self, Entity, Kind, From);
+
+ return false;
+}
+
+/// \brief Try to find a common type for two according to C++0x 5.16p5.
+///
+/// This is part of the parameter validation for the ? operator. If either
+/// value operand is a class type, overload resolution is used to find a
+/// conversion to a common type.
+static bool FindConditionalOverload(Sema &Self, ExprResult &LHS, ExprResult &RHS,
+ SourceLocation QuestionLoc) {
+ Expr *Args[2] = { LHS.get(), RHS.get() };
+ OverloadCandidateSet CandidateSet(QuestionLoc,
+ OverloadCandidateSet::CSK_Operator);
+ Self.AddBuiltinOperatorCandidates(OO_Conditional, QuestionLoc, Args,
+ CandidateSet);
+
+ OverloadCandidateSet::iterator Best;
+ switch (CandidateSet.BestViableFunction(Self, QuestionLoc, Best)) {
+ case OR_Success: {
+ // We found a match. Perform the conversions on the arguments and move on.
+ ExprResult LHSRes =
+ Self.PerformImplicitConversion(LHS.get(), Best->BuiltinTypes.ParamTypes[0],
+ Best->Conversions[0], Sema::AA_Converting);
+ if (LHSRes.isInvalid())
+ break;
+ LHS = LHSRes;
+
+ ExprResult RHSRes =
+ Self.PerformImplicitConversion(RHS.get(), Best->BuiltinTypes.ParamTypes[1],
+ Best->Conversions[1], Sema::AA_Converting);
+ if (RHSRes.isInvalid())
+ break;
+ RHS = RHSRes;
+ if (Best->Function)
+ Self.MarkFunctionReferenced(QuestionLoc, Best->Function);
+ return false;
+ }
+
+ case OR_No_Viable_Function:
+
+ // Emit a better diagnostic if one of the expressions is a null pointer
+ // constant and the other is a pointer type. In this case, the user most
+ // likely forgot to take the address of the other expression.
+ if (Self.DiagnoseConditionalForNull(LHS.get(), RHS.get(), QuestionLoc))
+ return true;
+
+ Self.Diag(QuestionLoc, diag::err_typecheck_cond_incompatible_operands)
+ << LHS.get()->getType() << RHS.get()->getType()
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ return true;
+
+ case OR_Ambiguous:
+ Self.Diag(QuestionLoc, diag::err_conditional_ambiguous_ovl)
+ << LHS.get()->getType() << RHS.get()->getType()
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ // FIXME: Print the possible common types by printing the return types of
+ // the viable candidates.
+ break;
+
+ case OR_Deleted:
+ llvm_unreachable("Conditional operator has only built-in overloads");
+ }
+ return true;
+}
+
+/// \brief Perform an "extended" implicit conversion as returned by
+/// TryClassUnification.
+static bool ConvertForConditional(Sema &Self, ExprResult &E, QualType T) {
+ InitializedEntity Entity = InitializedEntity::InitializeTemporary(T);
+ InitializationKind Kind = InitializationKind::CreateCopy(E.get()->getLocStart(),
+ SourceLocation());
+ Expr *Arg = E.get();
+ InitializationSequence InitSeq(Self, Entity, Kind, Arg);
+ ExprResult Result = InitSeq.Perform(Self, Entity, Kind, Arg);
+ if (Result.isInvalid())
+ return true;
+
+ E = Result;
+ return false;
+}
+
+/// \brief Check the operands of ?: under C++ semantics.
+///
+/// See C++ [expr.cond]. Note that LHS is never null, even for the GNU x ?: y
+/// extension. In this case, LHS == Cond. (But they're not aliases.)
+QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
+ ExprResult &RHS, ExprValueKind &VK,
+ ExprObjectKind &OK,
+ SourceLocation QuestionLoc) {
+ // FIXME: Handle C99's complex types, vector types, block pointers and Obj-C++
+ // interface pointers.
+
+ // C++11 [expr.cond]p1
+ // The first expression is contextually converted to bool.
+ if (!Cond.get()->isTypeDependent()) {
+ ExprResult CondRes = CheckCXXBooleanCondition(Cond.get());
+ if (CondRes.isInvalid())
+ return QualType();
+ Cond = CondRes;
+ }
+
+ // Assume r-value.
+ VK = VK_RValue;
+ OK = OK_Ordinary;
+
+ // Either of the arguments dependent?
+ if (LHS.get()->isTypeDependent() || RHS.get()->isTypeDependent())
+ return Context.DependentTy;
+
+ // C++11 [expr.cond]p2
+ // If either the second or the third operand has type (cv) void, ...
+ QualType LTy = LHS.get()->getType();
+ QualType RTy = RHS.get()->getType();
+ bool LVoid = LTy->isVoidType();
+ bool RVoid = RTy->isVoidType();
+ if (LVoid || RVoid) {
+ // ... one of the following shall hold:
+ // -- The second or the third operand (but not both) is a (possibly
+ // parenthesized) throw-expression; the result is of the type
+ // and value category of the other.
+ bool LThrow = isa<CXXThrowExpr>(LHS.get()->IgnoreParenImpCasts());
+ bool RThrow = isa<CXXThrowExpr>(RHS.get()->IgnoreParenImpCasts());
+ if (LThrow != RThrow) {
+ Expr *NonThrow = LThrow ? RHS.get() : LHS.get();
+ VK = NonThrow->getValueKind();
+ // DR (no number yet): the result is a bit-field if the
+ // non-throw-expression operand is a bit-field.
+ OK = NonThrow->getObjectKind();
+ return NonThrow->getType();
+ }
+
+ // -- Both the second and third operands have type void; the result is of
+ // type void and is a prvalue.
+ if (LVoid && RVoid)
+ return Context.VoidTy;
+
+ // Neither holds, error.
+ Diag(QuestionLoc, diag::err_conditional_void_nonvoid)
+ << (LVoid ? RTy : LTy) << (LVoid ? 0 : 1)
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ return QualType();
+ }
+
+ // Neither is void.
+
+ // C++11 [expr.cond]p3
+ // Otherwise, if the second and third operand have different types, and
+ // either has (cv) class type [...] an attempt is made to convert each of
+ // those operands to the type of the other.
+ if (!Context.hasSameType(LTy, RTy) &&
+ (LTy->isRecordType() || RTy->isRecordType())) {
+ // These return true if a single direction is already ambiguous.
+ QualType L2RType, R2LType;
+ bool HaveL2R, HaveR2L;
+ if (TryClassUnification(*this, LHS.get(), RHS.get(), QuestionLoc, HaveL2R, L2RType))
+ return QualType();
+ if (TryClassUnification(*this, RHS.get(), LHS.get(), QuestionLoc, HaveR2L, R2LType))
+ return QualType();
+
+ // If both can be converted, [...] the program is ill-formed.
+ if (HaveL2R && HaveR2L) {
+ Diag(QuestionLoc, diag::err_conditional_ambiguous)
+ << LTy << RTy << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ return QualType();
+ }
+
+ // If exactly one conversion is possible, that conversion is applied to
+ // the chosen operand and the converted operands are used in place of the
+ // original operands for the remainder of this section.
+ if (HaveL2R) {
+ if (ConvertForConditional(*this, LHS, L2RType) || LHS.isInvalid())
+ return QualType();
+ LTy = LHS.get()->getType();
+ } else if (HaveR2L) {
+ if (ConvertForConditional(*this, RHS, R2LType) || RHS.isInvalid())
+ return QualType();
+ RTy = RHS.get()->getType();
+ }
+ }
+
+ // C++11 [expr.cond]p3
+ // if both are glvalues of the same value category and the same type except
+ // for cv-qualification, an attempt is made to convert each of those
+ // operands to the type of the other.
+ ExprValueKind LVK = LHS.get()->getValueKind();
+ ExprValueKind RVK = RHS.get()->getValueKind();
+ if (!Context.hasSameType(LTy, RTy) &&
+ Context.hasSameUnqualifiedType(LTy, RTy) &&
+ LVK == RVK && LVK != VK_RValue) {
+ // Since the unqualified types are reference-related and we require the
+ // result to be as if a reference bound directly, the only conversion
+ // we can perform is to add cv-qualifiers.
+ Qualifiers LCVR = Qualifiers::fromCVRMask(LTy.getCVRQualifiers());
+ Qualifiers RCVR = Qualifiers::fromCVRMask(RTy.getCVRQualifiers());
+ if (RCVR.isStrictSupersetOf(LCVR)) {
+ LHS = ImpCastExprToType(LHS.get(), RTy, CK_NoOp, LVK);
+ LTy = LHS.get()->getType();
+ }
+ else if (LCVR.isStrictSupersetOf(RCVR)) {
+ RHS = ImpCastExprToType(RHS.get(), LTy, CK_NoOp, RVK);
+ RTy = RHS.get()->getType();
+ }
+ }
+
+ // C++11 [expr.cond]p4
+ // If the second and third operands are glvalues of the same value
+ // category and have the same type, the result is of that type and
+ // value category and it is a bit-field if the second or the third
+ // operand is a bit-field, or if both are bit-fields.
+ // We only extend this to bitfields, not to the crazy other kinds of
+ // l-values.
+ bool Same = Context.hasSameType(LTy, RTy);
+ if (Same && LVK == RVK && LVK != VK_RValue &&
+ LHS.get()->isOrdinaryOrBitFieldObject() &&
+ RHS.get()->isOrdinaryOrBitFieldObject()) {
+ VK = LHS.get()->getValueKind();
+ if (LHS.get()->getObjectKind() == OK_BitField ||
+ RHS.get()->getObjectKind() == OK_BitField)
+ OK = OK_BitField;
+ return LTy;
+ }
+
+ // C++11 [expr.cond]p5
+ // Otherwise, the result is a prvalue. If the second and third operands
+ // do not have the same type, and either has (cv) class type, ...
+ if (!Same && (LTy->isRecordType() || RTy->isRecordType())) {
+ // ... overload resolution is used to determine the conversions (if any)
+ // to be applied to the operands. If the overload resolution fails, the
+ // program is ill-formed.
+ if (FindConditionalOverload(*this, LHS, RHS, QuestionLoc))
+ return QualType();
+ }
+
+ // C++11 [expr.cond]p6
+ // Lvalue-to-rvalue, array-to-pointer, and function-to-pointer standard
+ // conversions are performed on the second and third operands.
+ LHS = DefaultFunctionArrayLvalueConversion(LHS.get());
+ RHS = DefaultFunctionArrayLvalueConversion(RHS.get());
+ if (LHS.isInvalid() || RHS.isInvalid())
+ return QualType();
+ LTy = LHS.get()->getType();
+ RTy = RHS.get()->getType();
+
+ // After those conversions, one of the following shall hold:
+ // -- The second and third operands have the same type; the result
+ // is of that type. If the operands have class type, the result
+ // is a prvalue temporary of the result type, which is
+ // copy-initialized from either the second operand or the third
+ // operand depending on the value of the first operand.
+ if (Context.getCanonicalType(LTy) == Context.getCanonicalType(RTy)) {
+ if (LTy->isRecordType()) {
+ // The operands have class type. Make a temporary copy.
+ if (RequireNonAbstractType(QuestionLoc, LTy,
+ diag::err_allocation_of_abstract_type))
+ return QualType();
+ InitializedEntity Entity = InitializedEntity::InitializeTemporary(LTy);
+
+ ExprResult LHSCopy = PerformCopyInitialization(Entity,
+ SourceLocation(),
+ LHS);
+ if (LHSCopy.isInvalid())
+ return QualType();
+
+ ExprResult RHSCopy = PerformCopyInitialization(Entity,
+ SourceLocation(),
+ RHS);
+ if (RHSCopy.isInvalid())
+ return QualType();
+
+ LHS = LHSCopy;
+ RHS = RHSCopy;
+ }
+
+ return LTy;
+ }
+
+ // Extension: conditional operator involving vector types.
+ if (LTy->isVectorType() || RTy->isVectorType())
+ return CheckVectorOperands(LHS, RHS, QuestionLoc, /*isCompAssign*/false,
+ /*AllowBothBool*/true,
+ /*AllowBoolConversions*/false);
+
+ // -- The second and third operands have arithmetic or enumeration type;
+ // the usual arithmetic conversions are performed to bring them to a
+ // common type, and the result is of that type.
+ if (LTy->isArithmeticType() && RTy->isArithmeticType()) {
+ QualType ResTy = UsualArithmeticConversions(LHS, RHS);
+ if (LHS.isInvalid() || RHS.isInvalid())
+ return QualType();
+
+ LHS = ImpCastExprToType(LHS.get(), ResTy, PrepareScalarCast(LHS, ResTy));
+ RHS = ImpCastExprToType(RHS.get(), ResTy, PrepareScalarCast(RHS, ResTy));
+
+ return ResTy;
+ }
+
+ // -- The second and third operands have pointer type, or one has pointer
+ // type and the other is a null pointer constant, or both are null
+ // pointer constants, at least one of which is non-integral; pointer
+ // conversions and qualification conversions are performed to bring them
+ // to their composite pointer type. The result is of the composite
+ // pointer type.
+ // -- The second and third operands have pointer to member type, or one has
+ // pointer to member type and the other is a null pointer constant;
+ // pointer to member conversions and qualification conversions are
+ // performed to bring them to a common type, whose cv-qualification
+ // shall match the cv-qualification of either the second or the third
+ // operand. The result is of the common type.
+ bool NonStandardCompositeType = false;
+ QualType Composite = FindCompositePointerType(QuestionLoc, LHS, RHS,
+ isSFINAEContext() ? nullptr
+ : &NonStandardCompositeType);
+ if (!Composite.isNull()) {
+ if (NonStandardCompositeType)
+ Diag(QuestionLoc,
+ diag::ext_typecheck_cond_incompatible_operands_nonstandard)
+ << LTy << RTy << Composite
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+
+ return Composite;
+ }
+
+ // Similarly, attempt to find composite type of two objective-c pointers.
+ Composite = FindCompositeObjCPointerType(LHS, RHS, QuestionLoc);
+ if (!Composite.isNull())
+ return Composite;
+
+ // Check if we are using a null with a non-pointer type.
+ if (DiagnoseConditionalForNull(LHS.get(), RHS.get(), QuestionLoc))
+ return QualType();
+
+ Diag(QuestionLoc, diag::err_typecheck_cond_incompatible_operands)
+ << LHS.get()->getType() << RHS.get()->getType()
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ return QualType();
+}
+
+/// \brief Find a merged pointer type and convert the two expressions to it.
+///
+/// This finds the composite pointer type (or member pointer type) for @p E1
+/// and @p E2 according to C++11 5.9p2. It converts both expressions to this
+/// type and returns it.
+/// It does not emit diagnostics.
+///
+/// \param Loc The location of the operator requiring these two expressions to
+/// be converted to the composite pointer type.
+///
+/// If \p NonStandardCompositeType is non-NULL, then we are permitted to find
+/// a non-standard (but still sane) composite type to which both expressions
+/// can be converted. When such a type is chosen, \c *NonStandardCompositeType
+/// will be set true.
+QualType Sema::FindCompositePointerType(SourceLocation Loc,
+ Expr *&E1, Expr *&E2,
+ bool *NonStandardCompositeType) {
+ if (NonStandardCompositeType)
+ *NonStandardCompositeType = false;
+
+ assert(getLangOpts().CPlusPlus && "This function assumes C++");
+ QualType T1 = E1->getType(), T2 = E2->getType();
+
+ // C++11 5.9p2
+ // Pointer conversions and qualification conversions are performed on
+ // pointer operands to bring them to their composite pointer type. If
+ // one operand is a null pointer constant, the composite pointer type is
+ // std::nullptr_t if the other operand is also a null pointer constant or,
+ // if the other operand is a pointer, the type of the other operand.
+ if (!T1->isAnyPointerType() && !T1->isMemberPointerType() &&
+ !T2->isAnyPointerType() && !T2->isMemberPointerType()) {
+ if (T1->isNullPtrType() &&
+ E2->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) {
+ E2 = ImpCastExprToType(E2, T1, CK_NullToPointer).get();
+ return T1;
+ }
+ if (T2->isNullPtrType() &&
+ E1->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) {
+ E1 = ImpCastExprToType(E1, T2, CK_NullToPointer).get();
+ return T2;
+ }
+ return QualType();
+ }
+
+ if (E1->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) {
+ if (T2->isMemberPointerType())
+ E1 = ImpCastExprToType(E1, T2, CK_NullToMemberPointer).get();
+ else
+ E1 = ImpCastExprToType(E1, T2, CK_NullToPointer).get();
+ return T2;
+ }
+ if (E2->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) {
+ if (T1->isMemberPointerType())
+ E2 = ImpCastExprToType(E2, T1, CK_NullToMemberPointer).get();
+ else
+ E2 = ImpCastExprToType(E2, T1, CK_NullToPointer).get();
+ return T1;
+ }
+
+ // Now both have to be pointers or member pointers.
+ if ((!T1->isPointerType() && !T1->isMemberPointerType()) ||
+ (!T2->isPointerType() && !T2->isMemberPointerType()))
+ return QualType();
+
+ // Otherwise, of one of the operands has type "pointer to cv1 void," then
+ // the other has type "pointer to cv2 T" and the composite pointer type is
+ // "pointer to cv12 void," where cv12 is the union of cv1 and cv2.
+ // Otherwise, the composite pointer type is a pointer type similar to the
+ // type of one of the operands, with a cv-qualification signature that is
+ // the union of the cv-qualification signatures of the operand types.
+ // In practice, the first part here is redundant; it's subsumed by the second.
+ // What we do here is, we build the two possible composite types, and try the
+ // conversions in both directions. If only one works, or if the two composite
+ // types are the same, we have succeeded.
+ // FIXME: extended qualifiers?
+ typedef SmallVector<unsigned, 4> QualifierVector;
+ QualifierVector QualifierUnion;
+ typedef SmallVector<std::pair<const Type *, const Type *>, 4>
+ ContainingClassVector;
+ ContainingClassVector MemberOfClass;
+ QualType Composite1 = Context.getCanonicalType(T1),
+ Composite2 = Context.getCanonicalType(T2);
+ unsigned NeedConstBefore = 0;
+ do {
+ const PointerType *Ptr1, *Ptr2;
+ if ((Ptr1 = Composite1->getAs<PointerType>()) &&
+ (Ptr2 = Composite2->getAs<PointerType>())) {
+ Composite1 = Ptr1->getPointeeType();
+ Composite2 = Ptr2->getPointeeType();
+
+ // If we're allowed to create a non-standard composite type, keep track
+ // of where we need to fill in additional 'const' qualifiers.
+ if (NonStandardCompositeType &&
+ Composite1.getCVRQualifiers() != Composite2.getCVRQualifiers())
+ NeedConstBefore = QualifierUnion.size();
+
+ QualifierUnion.push_back(
+ Composite1.getCVRQualifiers() | Composite2.getCVRQualifiers());
+ MemberOfClass.push_back(std::make_pair(nullptr, nullptr));
+ continue;
+ }
+
+ const MemberPointerType *MemPtr1, *MemPtr2;
+ if ((MemPtr1 = Composite1->getAs<MemberPointerType>()) &&
+ (MemPtr2 = Composite2->getAs<MemberPointerType>())) {
+ Composite1 = MemPtr1->getPointeeType();
+ Composite2 = MemPtr2->getPointeeType();
+
+ // If we're allowed to create a non-standard composite type, keep track
+ // of where we need to fill in additional 'const' qualifiers.
+ if (NonStandardCompositeType &&
+ Composite1.getCVRQualifiers() != Composite2.getCVRQualifiers())
+ NeedConstBefore = QualifierUnion.size();
+
+ QualifierUnion.push_back(
+ Composite1.getCVRQualifiers() | Composite2.getCVRQualifiers());
+ MemberOfClass.push_back(std::make_pair(MemPtr1->getClass(),
+ MemPtr2->getClass()));
+ continue;
+ }
+
+ // FIXME: block pointer types?
+
+ // Cannot unwrap any more types.
+ break;
+ } while (true);
+
+ if (NeedConstBefore && NonStandardCompositeType) {
+ // Extension: Add 'const' to qualifiers that come before the first qualifier
+ // mismatch, so that our (non-standard!) composite type meets the
+ // requirements of C++ [conv.qual]p4 bullet 3.
+ for (unsigned I = 0; I != NeedConstBefore; ++I) {
+ if ((QualifierUnion[I] & Qualifiers::Const) == 0) {
+ QualifierUnion[I] = QualifierUnion[I] | Qualifiers::Const;
+ *NonStandardCompositeType = true;
+ }
+ }
+ }
+
+ // Rewrap the composites as pointers or member pointers with the union CVRs.
+ ContainingClassVector::reverse_iterator MOC
+ = MemberOfClass.rbegin();
+ for (QualifierVector::reverse_iterator
+ I = QualifierUnion.rbegin(),
+ E = QualifierUnion.rend();
+ I != E; (void)++I, ++MOC) {
+ Qualifiers Quals = Qualifiers::fromCVRMask(*I);
+ if (MOC->first && MOC->second) {
+ // Rebuild member pointer type
+ Composite1 = Context.getMemberPointerType(
+ Context.getQualifiedType(Composite1, Quals),
+ MOC->first);
+ Composite2 = Context.getMemberPointerType(
+ Context.getQualifiedType(Composite2, Quals),
+ MOC->second);
+ } else {
+ // Rebuild pointer type
+ Composite1
+ = Context.getPointerType(Context.getQualifiedType(Composite1, Quals));
+ Composite2
+ = Context.getPointerType(Context.getQualifiedType(Composite2, Quals));
+ }
+ }
+
+ // Try to convert to the first composite pointer type.
+ InitializedEntity Entity1
+ = InitializedEntity::InitializeTemporary(Composite1);
+ InitializationKind Kind
+ = InitializationKind::CreateCopy(Loc, SourceLocation());
+ InitializationSequence E1ToC1(*this, Entity1, Kind, E1);
+ InitializationSequence E2ToC1(*this, Entity1, Kind, E2);
+
+ if (E1ToC1 && E2ToC1) {
+ // Conversion to Composite1 is viable.
+ if (!Context.hasSameType(Composite1, Composite2)) {
+ // Composite2 is a different type from Composite1. Check whether
+ // Composite2 is also viable.
+ InitializedEntity Entity2
+ = InitializedEntity::InitializeTemporary(Composite2);
+ InitializationSequence E1ToC2(*this, Entity2, Kind, E1);
+ InitializationSequence E2ToC2(*this, Entity2, Kind, E2);
+ if (E1ToC2 && E2ToC2) {
+ // Both Composite1 and Composite2 are viable and are different;
+ // this is an ambiguity.
+ return QualType();
+ }
+ }
+
+ // Convert E1 to Composite1
+ ExprResult E1Result
+ = E1ToC1.Perform(*this, Entity1, Kind, E1);
+ if (E1Result.isInvalid())
+ return QualType();
+ E1 = E1Result.getAs<Expr>();
+
+ // Convert E2 to Composite1
+ ExprResult E2Result
+ = E2ToC1.Perform(*this, Entity1, Kind, E2);
+ if (E2Result.isInvalid())
+ return QualType();
+ E2 = E2Result.getAs<Expr>();
+
+ return Composite1;
+ }
+
+ // Check whether Composite2 is viable.
+ InitializedEntity Entity2
+ = InitializedEntity::InitializeTemporary(Composite2);
+ InitializationSequence E1ToC2(*this, Entity2, Kind, E1);
+ InitializationSequence E2ToC2(*this, Entity2, Kind, E2);
+ if (!E1ToC2 || !E2ToC2)
+ return QualType();
+
+ // Convert E1 to Composite2
+ ExprResult E1Result
+ = E1ToC2.Perform(*this, Entity2, Kind, E1);
+ if (E1Result.isInvalid())
+ return QualType();
+ E1 = E1Result.getAs<Expr>();
+
+ // Convert E2 to Composite2
+ ExprResult E2Result
+ = E2ToC2.Perform(*this, Entity2, Kind, E2);
+ if (E2Result.isInvalid())
+ return QualType();
+ E2 = E2Result.getAs<Expr>();
+
+ return Composite2;
+}
+
+ExprResult Sema::MaybeBindToTemporary(Expr *E) {
+ if (!E)
+ return ExprError();
+
+ assert(!isa<CXXBindTemporaryExpr>(E) && "Double-bound temporary?");
+
+ // If the result is a glvalue, we shouldn't bind it.
+ if (!E->isRValue())
+ return E;
+
+ // In ARC, calls that return a retainable type can return retained,
+ // in which case we have to insert a consuming cast.
+ if (getLangOpts().ObjCAutoRefCount &&
+ E->getType()->isObjCRetainableType()) {
+
+ bool ReturnsRetained;
+
+ // For actual calls, we compute this by examining the type of the
+ // called value.
+ if (CallExpr *Call = dyn_cast<CallExpr>(E)) {
+ Expr *Callee = Call->getCallee()->IgnoreParens();
+ QualType T = Callee->getType();
+
+ if (T == Context.BoundMemberTy) {
+ // Handle pointer-to-members.
+ if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Callee))
+ T = BinOp->getRHS()->getType();
+ else if (MemberExpr *Mem = dyn_cast<MemberExpr>(Callee))
+ T = Mem->getMemberDecl()->getType();
+ }
+
+ if (const PointerType *Ptr = T->getAs<PointerType>())
+ T = Ptr->getPointeeType();
+ else if (const BlockPointerType *Ptr = T->getAs<BlockPointerType>())
+ T = Ptr->getPointeeType();
+ else if (const MemberPointerType *MemPtr = T->getAs<MemberPointerType>())
+ T = MemPtr->getPointeeType();
+
+ const FunctionType *FTy = T->getAs<FunctionType>();
+ assert(FTy && "call to value not of function type?");
+ ReturnsRetained = FTy->getExtInfo().getProducesResult();
+
+ // ActOnStmtExpr arranges things so that StmtExprs of retainable
+ // type always produce a +1 object.
+ } else if (isa<StmtExpr>(E)) {
+ ReturnsRetained = true;
+
+ // We hit this case with the lambda conversion-to-block optimization;
+ // we don't want any extra casts here.
+ } else if (isa<CastExpr>(E) &&
+ isa<BlockExpr>(cast<CastExpr>(E)->getSubExpr())) {
+ return E;
+
+ // For message sends and property references, we try to find an
+ // actual method. FIXME: we should infer retention by selector in
+ // cases where we don't have an actual method.
+ } else {
+ ObjCMethodDecl *D = nullptr;
+ if (ObjCMessageExpr *Send = dyn_cast<ObjCMessageExpr>(E)) {
+ D = Send->getMethodDecl();
+ } else if (ObjCBoxedExpr *BoxedExpr = dyn_cast<ObjCBoxedExpr>(E)) {
+ D = BoxedExpr->getBoxingMethod();
+ } else if (ObjCArrayLiteral *ArrayLit = dyn_cast<ObjCArrayLiteral>(E)) {
+ D = ArrayLit->getArrayWithObjectsMethod();
+ } else if (ObjCDictionaryLiteral *DictLit
+ = dyn_cast<ObjCDictionaryLiteral>(E)) {
+ D = DictLit->getDictWithObjectsMethod();
+ }
+
+ ReturnsRetained = (D && D->hasAttr<NSReturnsRetainedAttr>());
+
+ // Don't do reclaims on performSelector calls; despite their
+ // return type, the invoked method doesn't necessarily actually
+ // return an object.
+ if (!ReturnsRetained &&
+ D && D->getMethodFamily() == OMF_performSelector)
+ return E;
+ }
+
+ // Don't reclaim an object of Class type.
+ if (!ReturnsRetained && E->getType()->isObjCARCImplicitlyUnretainedType())
+ return E;
+
+ ExprNeedsCleanups = true;
+
+ CastKind ck = (ReturnsRetained ? CK_ARCConsumeObject
+ : CK_ARCReclaimReturnedObject);
+ return ImplicitCastExpr::Create(Context, E->getType(), ck, E, nullptr,
+ VK_RValue);
+ }
+
+ if (!getLangOpts().CPlusPlus)
+ return E;
+
+ // Search for the base element type (cf. ASTContext::getBaseElementType) with
+ // a fast path for the common case that the type is directly a RecordType.
+ const Type *T = Context.getCanonicalType(E->getType().getTypePtr());
+ const RecordType *RT = nullptr;
+ while (!RT) {
+ switch (T->getTypeClass()) {
+ case Type::Record:
+ RT = cast<RecordType>(T);
+ break;
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ case Type::DependentSizedArray:
+ T = cast<ArrayType>(T)->getElementType().getTypePtr();
+ break;
+ default:
+ return E;
+ }
+ }
+
+ // That should be enough to guarantee that this type is complete, if we're
+ // not processing a decltype expression.
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (RD->isInvalidDecl() || RD->isDependentContext())
+ return E;
+
+ bool IsDecltype = ExprEvalContexts.back().IsDecltype;
+ CXXDestructorDecl *Destructor = IsDecltype ? nullptr : LookupDestructor(RD);
+
+ if (Destructor) {
+ MarkFunctionReferenced(E->getExprLoc(), Destructor);
+ CheckDestructorAccess(E->getExprLoc(), Destructor,
+ PDiag(diag::err_access_dtor_temp)
+ << E->getType());
+ if (DiagnoseUseOfDecl(Destructor, E->getExprLoc()))
+ return ExprError();
+
+ // If destructor is trivial, we can avoid the extra copy.
+ if (Destructor->isTrivial())
+ return E;
+
+ // We need a cleanup, but we don't need to remember the temporary.
+ ExprNeedsCleanups = true;
+ }
+
+ CXXTemporary *Temp = CXXTemporary::Create(Context, Destructor);
+ CXXBindTemporaryExpr *Bind = CXXBindTemporaryExpr::Create(Context, Temp, E);
+
+ if (IsDecltype)
+ ExprEvalContexts.back().DelayedDecltypeBinds.push_back(Bind);
+
+ return Bind;
+}
+
+ExprResult
+Sema::MaybeCreateExprWithCleanups(ExprResult SubExpr) {
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ return MaybeCreateExprWithCleanups(SubExpr.get());
+}
+
+Expr *Sema::MaybeCreateExprWithCleanups(Expr *SubExpr) {
+ assert(SubExpr && "subexpression can't be null!");
+
+ CleanupVarDeclMarking();
+
+ unsigned FirstCleanup = ExprEvalContexts.back().NumCleanupObjects;
+ assert(ExprCleanupObjects.size() >= FirstCleanup);
+ assert(ExprNeedsCleanups || ExprCleanupObjects.size() == FirstCleanup);
+ if (!ExprNeedsCleanups)
+ return SubExpr;
+
+ auto Cleanups = llvm::makeArrayRef(ExprCleanupObjects.begin() + FirstCleanup,
+ ExprCleanupObjects.size() - FirstCleanup);
+
+ Expr *E = ExprWithCleanups::Create(Context, SubExpr, Cleanups);
+ DiscardCleanupsInEvaluationContext();
+
+ return E;
+}
+
+Stmt *Sema::MaybeCreateStmtWithCleanups(Stmt *SubStmt) {
+ assert(SubStmt && "sub-statement can't be null!");
+
+ CleanupVarDeclMarking();
+
+ if (!ExprNeedsCleanups)
+ return SubStmt;
+
+ // FIXME: In order to attach the temporaries, wrap the statement into
+ // a StmtExpr; currently this is only used for asm statements.
+ // This is hacky, either create a new CXXStmtWithTemporaries statement or
+ // a new AsmStmtWithTemporaries.
+ CompoundStmt *CompStmt = new (Context) CompoundStmt(Context, SubStmt,
+ SourceLocation(),
+ SourceLocation());
+ Expr *E = new (Context) StmtExpr(CompStmt, Context.VoidTy, SourceLocation(),
+ SourceLocation());
+ return MaybeCreateExprWithCleanups(E);
+}
+
+/// Process the expression contained within a decltype. For such expressions,
+/// certain semantic checks on temporaries are delayed until this point, and
+/// are omitted for the 'topmost' call in the decltype expression. If the
+/// topmost call bound a temporary, strip that temporary off the expression.
+ExprResult Sema::ActOnDecltypeExpression(Expr *E) {
+ assert(ExprEvalContexts.back().IsDecltype && "not in a decltype expression");
+
+ // C++11 [expr.call]p11:
+ // If a function call is a prvalue of object type,
+ // -- if the function call is either
+ // -- the operand of a decltype-specifier, or
+ // -- the right operand of a comma operator that is the operand of a
+ // decltype-specifier,
+ // a temporary object is not introduced for the prvalue.
+
+ // Recursively rebuild ParenExprs and comma expressions to strip out the
+ // outermost CXXBindTemporaryExpr, if any.
+ if (ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
+ ExprResult SubExpr = ActOnDecltypeExpression(PE->getSubExpr());
+ if (SubExpr.isInvalid())
+ return ExprError();
+ if (SubExpr.get() == PE->getSubExpr())
+ return E;
+ return ActOnParenExpr(PE->getLParen(), PE->getRParen(), SubExpr.get());
+ }
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ if (BO->getOpcode() == BO_Comma) {
+ ExprResult RHS = ActOnDecltypeExpression(BO->getRHS());
+ if (RHS.isInvalid())
+ return ExprError();
+ if (RHS.get() == BO->getRHS())
+ return E;
+ return new (Context) BinaryOperator(
+ BO->getLHS(), RHS.get(), BO_Comma, BO->getType(), BO->getValueKind(),
+ BO->getObjectKind(), BO->getOperatorLoc(), BO->isFPContractable());
+ }
+ }
+
+ CXXBindTemporaryExpr *TopBind = dyn_cast<CXXBindTemporaryExpr>(E);
+ CallExpr *TopCall = TopBind ? dyn_cast<CallExpr>(TopBind->getSubExpr())
+ : nullptr;
+ if (TopCall)
+ E = TopCall;
+ else
+ TopBind = nullptr;
+
+ // Disable the special decltype handling now.
+ ExprEvalContexts.back().IsDecltype = false;
+
+ // In MS mode, don't perform any extra checking of call return types within a
+ // decltype expression.
+ if (getLangOpts().MSVCCompat)
+ return E;
+
+ // Perform the semantic checks we delayed until this point.
+ for (unsigned I = 0, N = ExprEvalContexts.back().DelayedDecltypeCalls.size();
+ I != N; ++I) {
+ CallExpr *Call = ExprEvalContexts.back().DelayedDecltypeCalls[I];
+ if (Call == TopCall)
+ continue;
+
+ if (CheckCallReturnType(Call->getCallReturnType(Context),
+ Call->getLocStart(),
+ Call, Call->getDirectCallee()))
+ return ExprError();
+ }
+
+ // Now all relevant types are complete, check the destructors are accessible
+ // and non-deleted, and annotate them on the temporaries.
+ for (unsigned I = 0, N = ExprEvalContexts.back().DelayedDecltypeBinds.size();
+ I != N; ++I) {
+ CXXBindTemporaryExpr *Bind =
+ ExprEvalContexts.back().DelayedDecltypeBinds[I];
+ if (Bind == TopBind)
+ continue;
+
+ CXXTemporary *Temp = Bind->getTemporary();
+
+ CXXRecordDecl *RD =
+ Bind->getType()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ CXXDestructorDecl *Destructor = LookupDestructor(RD);
+ Temp->setDestructor(Destructor);
+
+ MarkFunctionReferenced(Bind->getExprLoc(), Destructor);
+ CheckDestructorAccess(Bind->getExprLoc(), Destructor,
+ PDiag(diag::err_access_dtor_temp)
+ << Bind->getType());
+ if (DiagnoseUseOfDecl(Destructor, Bind->getExprLoc()))
+ return ExprError();
+
+ // We need a cleanup, but we don't need to remember the temporary.
+ ExprNeedsCleanups = true;
+ }
+
+ // Possibly strip off the top CXXBindTemporaryExpr.
+ return E;
+}
+
+/// Note a set of 'operator->' functions that were used for a member access.
+static void noteOperatorArrows(Sema &S,
+ ArrayRef<FunctionDecl *> OperatorArrows) {
+ unsigned SkipStart = OperatorArrows.size(), SkipCount = 0;
+ // FIXME: Make this configurable?
+ unsigned Limit = 9;
+ if (OperatorArrows.size() > Limit) {
+ // Produce Limit-1 normal notes and one 'skipping' note.
+ SkipStart = (Limit - 1) / 2 + (Limit - 1) % 2;
+ SkipCount = OperatorArrows.size() - (Limit - 1);
+ }
+
+ for (unsigned I = 0; I < OperatorArrows.size(); /**/) {
+ if (I == SkipStart) {
+ S.Diag(OperatorArrows[I]->getLocation(),
+ diag::note_operator_arrows_suppressed)
+ << SkipCount;
+ I += SkipCount;
+ } else {
+ S.Diag(OperatorArrows[I]->getLocation(), diag::note_operator_arrow_here)
+ << OperatorArrows[I]->getCallResultType();
+ ++I;
+ }
+ }
+}
+
+ExprResult Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base,
+ SourceLocation OpLoc,
+ tok::TokenKind OpKind,
+ ParsedType &ObjectType,
+ bool &MayBePseudoDestructor) {
+ // Since this might be a postfix expression, get rid of ParenListExprs.
+ ExprResult Result = MaybeConvertParenListExprToParenExpr(S, Base);
+ if (Result.isInvalid()) return ExprError();
+ Base = Result.get();
+
+ Result = CheckPlaceholderExpr(Base);
+ if (Result.isInvalid()) return ExprError();
+ Base = Result.get();
+
+ QualType BaseType = Base->getType();
+ MayBePseudoDestructor = false;
+ if (BaseType->isDependentType()) {
+ // If we have a pointer to a dependent type and are using the -> operator,
+ // the object type is the type that the pointer points to. We might still
+ // have enough information about that type to do something useful.
+ if (OpKind == tok::arrow)
+ if (const PointerType *Ptr = BaseType->getAs<PointerType>())
+ BaseType = Ptr->getPointeeType();
+
+ ObjectType = ParsedType::make(BaseType);
+ MayBePseudoDestructor = true;
+ return Base;
+ }
+
+ // C++ [over.match.oper]p8:
+ // [...] When operator->returns, the operator-> is applied to the value
+ // returned, with the original second operand.
+ if (OpKind == tok::arrow) {
+ QualType StartingType = BaseType;
+ bool NoArrowOperatorFound = false;
+ bool FirstIteration = true;
+ FunctionDecl *CurFD = dyn_cast<FunctionDecl>(CurContext);
+ // The set of types we've considered so far.
+ llvm::SmallPtrSet<CanQualType,8> CTypes;
+ SmallVector<FunctionDecl*, 8> OperatorArrows;
+ CTypes.insert(Context.getCanonicalType(BaseType));
+
+ while (BaseType->isRecordType()) {
+ if (OperatorArrows.size() >= getLangOpts().ArrowDepth) {
+ Diag(OpLoc, diag::err_operator_arrow_depth_exceeded)
+ << StartingType << getLangOpts().ArrowDepth << Base->getSourceRange();
+ noteOperatorArrows(*this, OperatorArrows);
+ Diag(OpLoc, diag::note_operator_arrow_depth)
+ << getLangOpts().ArrowDepth;
+ return ExprError();
+ }
+
+ Result = BuildOverloadedArrowExpr(
+ S, Base, OpLoc,
+ // When in a template specialization and on the first loop iteration,
+ // potentially give the default diagnostic (with the fixit in a
+ // separate note) instead of having the error reported back to here
+ // and giving a diagnostic with a fixit attached to the error itself.
+ (FirstIteration && CurFD && CurFD->isFunctionTemplateSpecialization())
+ ? nullptr
+ : &NoArrowOperatorFound);
+ if (Result.isInvalid()) {
+ if (NoArrowOperatorFound) {
+ if (FirstIteration) {
+ Diag(OpLoc, diag::err_typecheck_member_reference_suggestion)
+ << BaseType << 1 << Base->getSourceRange()
+ << FixItHint::CreateReplacement(OpLoc, ".");
+ OpKind = tok::period;
+ break;
+ }
+ Diag(OpLoc, diag::err_typecheck_member_reference_arrow)
+ << BaseType << Base->getSourceRange();
+ CallExpr *CE = dyn_cast<CallExpr>(Base);
+ if (Decl *CD = (CE ? CE->getCalleeDecl() : nullptr)) {
+ Diag(CD->getLocStart(),
+ diag::note_member_reference_arrow_from_operator_arrow);
+ }
+ }
+ return ExprError();
+ }
+ Base = Result.get();
+ if (CXXOperatorCallExpr *OpCall = dyn_cast<CXXOperatorCallExpr>(Base))
+ OperatorArrows.push_back(OpCall->getDirectCallee());
+ BaseType = Base->getType();
+ CanQualType CBaseType = Context.getCanonicalType(BaseType);
+ if (!CTypes.insert(CBaseType).second) {
+ Diag(OpLoc, diag::err_operator_arrow_circular) << StartingType;
+ noteOperatorArrows(*this, OperatorArrows);
+ return ExprError();
+ }
+ FirstIteration = false;
+ }
+
+ if (OpKind == tok::arrow &&
+ (BaseType->isPointerType() || BaseType->isObjCObjectPointerType()))
+ BaseType = BaseType->getPointeeType();
+ }
+
+ // Objective-C properties allow "." access on Objective-C pointer types,
+ // so adjust the base type to the object type itself.
+ if (BaseType->isObjCObjectPointerType())
+ BaseType = BaseType->getPointeeType();
+
+ // C++ [basic.lookup.classref]p2:
+ // [...] If the type of the object expression is of pointer to scalar
+ // type, the unqualified-id is looked up in the context of the complete
+ // postfix-expression.
+ //
+ // This also indicates that we could be parsing a pseudo-destructor-name.
+ // Note that Objective-C class and object types can be pseudo-destructor
+ // expressions or normal member (ivar or property) access expressions, and
+ // it's legal for the type to be incomplete if this is a pseudo-destructor
+ // call. We'll do more incomplete-type checks later in the lookup process,
+ // so just skip this check for ObjC types.
+ if (BaseType->isObjCObjectOrInterfaceType()) {
+ ObjectType = ParsedType::make(BaseType);
+ MayBePseudoDestructor = true;
+ return Base;
+ } else if (!BaseType->isRecordType()) {
+ ObjectType = ParsedType();
+ MayBePseudoDestructor = true;
+ return Base;
+ }
+
+ // The object type must be complete (or dependent), or
+ // C++11 [expr.prim.general]p3:
+ // Unlike the object expression in other contexts, *this is not required to
+ // be of complete type for purposes of class member access (5.2.5) outside
+ // the member function body.
+ if (!BaseType->isDependentType() &&
+ !isThisOutsideMemberFunctionBody(BaseType) &&
+ RequireCompleteType(OpLoc, BaseType, diag::err_incomplete_member_access))
+ return ExprError();
+
+ // C++ [basic.lookup.classref]p2:
+ // If the id-expression in a class member access (5.2.5) is an
+ // unqualified-id, and the type of the object expression is of a class
+ // type C (or of pointer to a class type C), the unqualified-id is looked
+ // up in the scope of class C. [...]
+ ObjectType = ParsedType::make(BaseType);
+ return Base;
+}
+
+static bool CheckArrow(Sema& S, QualType& ObjectType, Expr *&Base,
+ tok::TokenKind& OpKind, SourceLocation OpLoc) {
+ if (Base->hasPlaceholderType()) {
+ ExprResult result = S.CheckPlaceholderExpr(Base);
+ if (result.isInvalid()) return true;
+ Base = result.get();
+ }
+ ObjectType = Base->getType();
+
+ // C++ [expr.pseudo]p2:
+ // The left-hand side of the dot operator shall be of scalar type. The
+ // left-hand side of the arrow operator shall be of pointer to scalar type.
+ // This scalar type is the object type.
+ // Note that this is rather different from the normal handling for the
+ // arrow operator.
+ if (OpKind == tok::arrow) {
+ if (const PointerType *Ptr = ObjectType->getAs<PointerType>()) {
+ ObjectType = Ptr->getPointeeType();
+ } else if (!Base->isTypeDependent()) {
+ // The user wrote "p->" when she probably meant "p."; fix it.
+ S.Diag(OpLoc, diag::err_typecheck_member_reference_suggestion)
+ << ObjectType << true
+ << FixItHint::CreateReplacement(OpLoc, ".");
+ if (S.isSFINAEContext())
+ return true;
+
+ OpKind = tok::period;
+ }
+ }
+
+ return false;
+}
+
+ExprResult Sema::BuildPseudoDestructorExpr(Expr *Base,
+ SourceLocation OpLoc,
+ tok::TokenKind OpKind,
+ const CXXScopeSpec &SS,
+ TypeSourceInfo *ScopeTypeInfo,
+ SourceLocation CCLoc,
+ SourceLocation TildeLoc,
+ PseudoDestructorTypeStorage Destructed) {
+ TypeSourceInfo *DestructedTypeInfo = Destructed.getTypeSourceInfo();
+
+ QualType ObjectType;
+ if (CheckArrow(*this, ObjectType, Base, OpKind, OpLoc))
+ return ExprError();
+
+ if (!ObjectType->isDependentType() && !ObjectType->isScalarType() &&
+ !ObjectType->isVectorType()) {
+ if (getLangOpts().MSVCCompat && ObjectType->isVoidType())
+ Diag(OpLoc, diag::ext_pseudo_dtor_on_void) << Base->getSourceRange();
+ else {
+ Diag(OpLoc, diag::err_pseudo_dtor_base_not_scalar)
+ << ObjectType << Base->getSourceRange();
+ return ExprError();
+ }
+ }
+
+ // C++ [expr.pseudo]p2:
+ // [...] The cv-unqualified versions of the object type and of the type
+ // designated by the pseudo-destructor-name shall be the same type.
+ if (DestructedTypeInfo) {
+ QualType DestructedType = DestructedTypeInfo->getType();
+ SourceLocation DestructedTypeStart
+ = DestructedTypeInfo->getTypeLoc().getLocalSourceRange().getBegin();
+ if (!DestructedType->isDependentType() && !ObjectType->isDependentType()) {
+ if (!Context.hasSameUnqualifiedType(DestructedType, ObjectType)) {
+ Diag(DestructedTypeStart, diag::err_pseudo_dtor_type_mismatch)
+ << ObjectType << DestructedType << Base->getSourceRange()
+ << DestructedTypeInfo->getTypeLoc().getLocalSourceRange();
+
+ // Recover by setting the destructed type to the object type.
+ DestructedType = ObjectType;
+ DestructedTypeInfo = Context.getTrivialTypeSourceInfo(ObjectType,
+ DestructedTypeStart);
+ Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo);
+ } else if (DestructedType.getObjCLifetime() !=
+ ObjectType.getObjCLifetime()) {
+
+ if (DestructedType.getObjCLifetime() == Qualifiers::OCL_None) {
+ // Okay: just pretend that the user provided the correctly-qualified
+ // type.
+ } else {
+ Diag(DestructedTypeStart, diag::err_arc_pseudo_dtor_inconstant_quals)
+ << ObjectType << DestructedType << Base->getSourceRange()
+ << DestructedTypeInfo->getTypeLoc().getLocalSourceRange();
+ }
+
+ // Recover by setting the destructed type to the object type.
+ DestructedType = ObjectType;
+ DestructedTypeInfo = Context.getTrivialTypeSourceInfo(ObjectType,
+ DestructedTypeStart);
+ Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo);
+ }
+ }
+ }
+
+ // C++ [expr.pseudo]p2:
+ // [...] Furthermore, the two type-names in a pseudo-destructor-name of the
+ // form
+ //
+ // ::[opt] nested-name-specifier[opt] type-name :: ~ type-name
+ //
+ // shall designate the same scalar type.
+ if (ScopeTypeInfo) {
+ QualType ScopeType = ScopeTypeInfo->getType();
+ if (!ScopeType->isDependentType() && !ObjectType->isDependentType() &&
+ !Context.hasSameUnqualifiedType(ScopeType, ObjectType)) {
+
+ Diag(ScopeTypeInfo->getTypeLoc().getLocalSourceRange().getBegin(),
+ diag::err_pseudo_dtor_type_mismatch)
+ << ObjectType << ScopeType << Base->getSourceRange()
+ << ScopeTypeInfo->getTypeLoc().getLocalSourceRange();
+
+ ScopeType = QualType();
+ ScopeTypeInfo = nullptr;
+ }
+ }
+
+ Expr *Result
+ = new (Context) CXXPseudoDestructorExpr(Context, Base,
+ OpKind == tok::arrow, OpLoc,
+ SS.getWithLocInContext(Context),
+ ScopeTypeInfo,
+ CCLoc,
+ TildeLoc,
+ Destructed);
+
+ return Result;
+}
+
+ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
+ SourceLocation OpLoc,
+ tok::TokenKind OpKind,
+ CXXScopeSpec &SS,
+ UnqualifiedId &FirstTypeName,
+ SourceLocation CCLoc,
+ SourceLocation TildeLoc,
+ UnqualifiedId &SecondTypeName) {
+ assert((FirstTypeName.getKind() == UnqualifiedId::IK_TemplateId ||
+ FirstTypeName.getKind() == UnqualifiedId::IK_Identifier) &&
+ "Invalid first type name in pseudo-destructor");
+ assert((SecondTypeName.getKind() == UnqualifiedId::IK_TemplateId ||
+ SecondTypeName.getKind() == UnqualifiedId::IK_Identifier) &&
+ "Invalid second type name in pseudo-destructor");
+
+ QualType ObjectType;
+ if (CheckArrow(*this, ObjectType, Base, OpKind, OpLoc))
+ return ExprError();
+
+ // Compute the object type that we should use for name lookup purposes. Only
+ // record types and dependent types matter.
+ ParsedType ObjectTypePtrForLookup;
+ if (!SS.isSet()) {
+ if (ObjectType->isRecordType())
+ ObjectTypePtrForLookup = ParsedType::make(ObjectType);
+ else if (ObjectType->isDependentType())
+ ObjectTypePtrForLookup = ParsedType::make(Context.DependentTy);
+ }
+
+ // Convert the name of the type being destructed (following the ~) into a
+ // type (with source-location information).
+ QualType DestructedType;
+ TypeSourceInfo *DestructedTypeInfo = nullptr;
+ PseudoDestructorTypeStorage Destructed;
+ if (SecondTypeName.getKind() == UnqualifiedId::IK_Identifier) {
+ ParsedType T = getTypeName(*SecondTypeName.Identifier,
+ SecondTypeName.StartLocation,
+ S, &SS, true, false, ObjectTypePtrForLookup);
+ if (!T &&
+ ((SS.isSet() && !computeDeclContext(SS, false)) ||
+ (!SS.isSet() && ObjectType->isDependentType()))) {
+ // The name of the type being destroyed is a dependent name, and we
+ // couldn't find anything useful in scope. Just store the identifier and
+ // it's location, and we'll perform (qualified) name lookup again at
+ // template instantiation time.
+ Destructed = PseudoDestructorTypeStorage(SecondTypeName.Identifier,
+ SecondTypeName.StartLocation);
+ } else if (!T) {
+ Diag(SecondTypeName.StartLocation,
+ diag::err_pseudo_dtor_destructor_non_type)
+ << SecondTypeName.Identifier << ObjectType;
+ if (isSFINAEContext())
+ return ExprError();
+
+ // Recover by assuming we had the right type all along.
+ DestructedType = ObjectType;
+ } else
+ DestructedType = GetTypeFromParser(T, &DestructedTypeInfo);
+ } else {
+ // Resolve the template-id to a type.
+ TemplateIdAnnotation *TemplateId = SecondTypeName.TemplateId;
+ ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
+ TemplateId->NumArgs);
+ TypeResult T = ActOnTemplateIdType(TemplateId->SS,
+ TemplateId->TemplateKWLoc,
+ TemplateId->Template,
+ TemplateId->TemplateNameLoc,
+ TemplateId->LAngleLoc,
+ TemplateArgsPtr,
+ TemplateId->RAngleLoc);
+ if (T.isInvalid() || !T.get()) {
+ // Recover by assuming we had the right type all along.
+ DestructedType = ObjectType;
+ } else
+ DestructedType = GetTypeFromParser(T.get(), &DestructedTypeInfo);
+ }
+
+ // If we've performed some kind of recovery, (re-)build the type source
+ // information.
+ if (!DestructedType.isNull()) {
+ if (!DestructedTypeInfo)
+ DestructedTypeInfo = Context.getTrivialTypeSourceInfo(DestructedType,
+ SecondTypeName.StartLocation);
+ Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo);
+ }
+
+ // Convert the name of the scope type (the type prior to '::') into a type.
+ TypeSourceInfo *ScopeTypeInfo = nullptr;
+ QualType ScopeType;
+ if (FirstTypeName.getKind() == UnqualifiedId::IK_TemplateId ||
+ FirstTypeName.Identifier) {
+ if (FirstTypeName.getKind() == UnqualifiedId::IK_Identifier) {
+ ParsedType T = getTypeName(*FirstTypeName.Identifier,
+ FirstTypeName.StartLocation,
+ S, &SS, true, false, ObjectTypePtrForLookup);
+ if (!T) {
+ Diag(FirstTypeName.StartLocation,
+ diag::err_pseudo_dtor_destructor_non_type)
+ << FirstTypeName.Identifier << ObjectType;
+
+ if (isSFINAEContext())
+ return ExprError();
+
+ // Just drop this type. It's unnecessary anyway.
+ ScopeType = QualType();
+ } else
+ ScopeType = GetTypeFromParser(T, &ScopeTypeInfo);
+ } else {
+ // Resolve the template-id to a type.
+ TemplateIdAnnotation *TemplateId = FirstTypeName.TemplateId;
+ ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
+ TemplateId->NumArgs);
+ TypeResult T = ActOnTemplateIdType(TemplateId->SS,
+ TemplateId->TemplateKWLoc,
+ TemplateId->Template,
+ TemplateId->TemplateNameLoc,
+ TemplateId->LAngleLoc,
+ TemplateArgsPtr,
+ TemplateId->RAngleLoc);
+ if (T.isInvalid() || !T.get()) {
+ // Recover by dropping this type.
+ ScopeType = QualType();
+ } else
+ ScopeType = GetTypeFromParser(T.get(), &ScopeTypeInfo);
+ }
+ }
+
+ if (!ScopeType.isNull() && !ScopeTypeInfo)
+ ScopeTypeInfo = Context.getTrivialTypeSourceInfo(ScopeType,
+ FirstTypeName.StartLocation);
+
+
+ return BuildPseudoDestructorExpr(Base, OpLoc, OpKind, SS,
+ ScopeTypeInfo, CCLoc, TildeLoc,
+ Destructed);
+}
+
+ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
+ SourceLocation OpLoc,
+ tok::TokenKind OpKind,
+ SourceLocation TildeLoc,
+ const DeclSpec& DS) {
+ QualType ObjectType;
+ if (CheckArrow(*this, ObjectType, Base, OpKind, OpLoc))
+ return ExprError();
+
+ QualType T = BuildDecltypeType(DS.getRepAsExpr(), DS.getTypeSpecTypeLoc(),
+ false);
+
+ TypeLocBuilder TLB;
+ DecltypeTypeLoc DecltypeTL = TLB.push<DecltypeTypeLoc>(T);
+ DecltypeTL.setNameLoc(DS.getTypeSpecTypeLoc());
+ TypeSourceInfo *DestructedTypeInfo = TLB.getTypeSourceInfo(Context, T);
+ PseudoDestructorTypeStorage Destructed(DestructedTypeInfo);
+
+ return BuildPseudoDestructorExpr(Base, OpLoc, OpKind, CXXScopeSpec(),
+ nullptr, SourceLocation(), TildeLoc,
+ Destructed);
+}
+
+ExprResult Sema::BuildCXXMemberCallExpr(Expr *E, NamedDecl *FoundDecl,
+ CXXConversionDecl *Method,
+ bool HadMultipleCandidates) {
+ if (Method->getParent()->isLambda() &&
+ Method->getConversionType()->isBlockPointerType()) {
+ // This is a lambda coversion to block pointer; check if the argument
+ // is a LambdaExpr.
+ Expr *SubE = E;
+ CastExpr *CE = dyn_cast<CastExpr>(SubE);
+ if (CE && CE->getCastKind() == CK_NoOp)
+ SubE = CE->getSubExpr();
+ SubE = SubE->IgnoreParens();
+ if (CXXBindTemporaryExpr *BE = dyn_cast<CXXBindTemporaryExpr>(SubE))
+ SubE = BE->getSubExpr();
+ if (isa<LambdaExpr>(SubE)) {
+ // For the conversion to block pointer on a lambda expression, we
+ // construct a special BlockLiteral instead; this doesn't really make
+ // a difference in ARC, but outside of ARC the resulting block literal
+ // follows the normal lifetime rules for block literals instead of being
+ // autoreleased.
+ DiagnosticErrorTrap Trap(Diags);
+ ExprResult Exp = BuildBlockForLambdaConversion(E->getExprLoc(),
+ E->getExprLoc(),
+ Method, E);
+ if (Exp.isInvalid())
+ Diag(E->getExprLoc(), diag::note_lambda_to_block_conv);
+ return Exp;
+ }
+ }
+
+ ExprResult Exp = PerformObjectArgumentInitialization(E, /*Qualifier=*/nullptr,
+ FoundDecl, Method);
+ if (Exp.isInvalid())
+ return true;
+
+ MemberExpr *ME = new (Context) MemberExpr(
+ Exp.get(), /*IsArrow=*/false, SourceLocation(), Method, SourceLocation(),
+ Context.BoundMemberTy, VK_RValue, OK_Ordinary);
+ if (HadMultipleCandidates)
+ ME->setHadMultipleCandidates(true);
+ MarkMemberReferenced(ME);
+
+ QualType ResultType = Method->getReturnType();
+ ExprValueKind VK = Expr::getValueKindForType(ResultType);
+ ResultType = ResultType.getNonLValueExprType(Context);
+
+ CXXMemberCallExpr *CE =
+ new (Context) CXXMemberCallExpr(Context, ME, None, ResultType, VK,
+ Exp.get()->getLocEnd());
+ return CE;
+}
+
+ExprResult Sema::BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
+ SourceLocation RParen) {
+ // If the operand is an unresolved lookup expression, the expression is ill-
+ // formed per [over.over]p1, because overloaded function names cannot be used
+ // without arguments except in explicit contexts.
+ ExprResult R = CheckPlaceholderExpr(Operand);
+ if (R.isInvalid())
+ return R;
+
+ // The operand may have been modified when checking the placeholder type.
+ Operand = R.get();
+
+ if (ActiveTemplateInstantiations.empty() &&
+ Operand->HasSideEffects(Context, false)) {
+ // The expression operand for noexcept is in an unevaluated expression
+ // context, so side effects could result in unintended consequences.
+ Diag(Operand->getExprLoc(), diag::warn_side_effects_unevaluated_context);
+ }
+
+ CanThrowResult CanThrow = canThrow(Operand);
+ return new (Context)
+ CXXNoexceptExpr(Context.BoolTy, Operand, CanThrow, KeyLoc, RParen);
+}
+
+ExprResult Sema::ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation,
+ Expr *Operand, SourceLocation RParen) {
+ return BuildCXXNoexceptExpr(KeyLoc, Operand, RParen);
+}
+
+static bool IsSpecialDiscardedValue(Expr *E) {
+ // In C++11, discarded-value expressions of a certain form are special,
+ // according to [expr]p10:
+ // The lvalue-to-rvalue conversion (4.1) is applied only if the
+ // expression is an lvalue of volatile-qualified type and it has
+ // one of the following forms:
+ E = E->IgnoreParens();
+
+ // - id-expression (5.1.1),
+ if (isa<DeclRefExpr>(E))
+ return true;
+
+ // - subscripting (5.2.1),
+ if (isa<ArraySubscriptExpr>(E))
+ return true;
+
+ // - class member access (5.2.5),
+ if (isa<MemberExpr>(E))
+ return true;
+
+ // - indirection (5.3.1),
+ if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E))
+ if (UO->getOpcode() == UO_Deref)
+ return true;
+
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ // - pointer-to-member operation (5.5),
+ if (BO->isPtrMemOp())
+ return true;
+
+ // - comma expression (5.18) where the right operand is one of the above.
+ if (BO->getOpcode() == BO_Comma)
+ return IsSpecialDiscardedValue(BO->getRHS());
+ }
+
+ // - conditional expression (5.16) where both the second and the third
+ // operands are one of the above, or
+ if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E))
+ return IsSpecialDiscardedValue(CO->getTrueExpr()) &&
+ IsSpecialDiscardedValue(CO->getFalseExpr());
+ // The related edge case of "*x ?: *x".
+ if (BinaryConditionalOperator *BCO =
+ dyn_cast<BinaryConditionalOperator>(E)) {
+ if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(BCO->getTrueExpr()))
+ return IsSpecialDiscardedValue(OVE->getSourceExpr()) &&
+ IsSpecialDiscardedValue(BCO->getFalseExpr());
+ }
+
+ // Objective-C++ extensions to the rule.
+ if (isa<PseudoObjectExpr>(E) || isa<ObjCIvarRefExpr>(E))
+ return true;
+
+ return false;
+}
+
+/// Perform the conversions required for an expression used in a
+/// context that ignores the result.
+ExprResult Sema::IgnoredValueConversions(Expr *E) {
+ if (E->hasPlaceholderType()) {
+ ExprResult result = CheckPlaceholderExpr(E);
+ if (result.isInvalid()) return E;
+ E = result.get();
+ }
+
+ // C99 6.3.2.1:
+ // [Except in specific positions,] an lvalue that does not have
+ // array type is converted to the value stored in the
+ // designated object (and is no longer an lvalue).
+ if (E->isRValue()) {
+ // In C, function designators (i.e. expressions of function type)
+ // are r-values, but we still want to do function-to-pointer decay
+ // on them. This is both technically correct and convenient for
+ // some clients.
+ if (!getLangOpts().CPlusPlus && E->getType()->isFunctionType())
+ return DefaultFunctionArrayConversion(E);
+
+ return E;
+ }
+
+ if (getLangOpts().CPlusPlus) {
+ // The C++11 standard defines the notion of a discarded-value expression;
+ // normally, we don't need to do anything to handle it, but if it is a
+ // volatile lvalue with a special form, we perform an lvalue-to-rvalue
+ // conversion.
+ if (getLangOpts().CPlusPlus11 && E->isGLValue() &&
+ E->getType().isVolatileQualified() &&
+ IsSpecialDiscardedValue(E)) {
+ ExprResult Res = DefaultLvalueConversion(E);
+ if (Res.isInvalid())
+ return E;
+ E = Res.get();
+ }
+ return E;
+ }
+
+ // GCC seems to also exclude expressions of incomplete enum type.
+ if (const EnumType *T = E->getType()->getAs<EnumType>()) {
+ if (!T->getDecl()->isComplete()) {
+ // FIXME: stupid workaround for a codegen bug!
+ E = ImpCastExprToType(E, Context.VoidTy, CK_ToVoid).get();
+ return E;
+ }
+ }
+
+ ExprResult Res = DefaultFunctionArrayLvalueConversion(E);
+ if (Res.isInvalid())
+ return E;
+ E = Res.get();
+
+ if (!E->getType()->isVoidType())
+ RequireCompleteType(E->getExprLoc(), E->getType(),
+ diag::err_incomplete_type);
+ return E;
+}
+
+// If we can unambiguously determine whether Var can never be used
+// in a constant expression, return true.
+// - if the variable and its initializer are non-dependent, then
+// we can unambiguously check if the variable is a constant expression.
+// - if the initializer is not value dependent - we can determine whether
+// it can be used to initialize a constant expression. If Init can not
+// be used to initialize a constant expression we conclude that Var can
+// never be a constant expression.
+// - FXIME: if the initializer is dependent, we can still do some analysis and
+// identify certain cases unambiguously as non-const by using a Visitor:
+// - such as those that involve odr-use of a ParmVarDecl, involve a new
+// delete, lambda-expr, dynamic-cast, reinterpret-cast etc...
+static inline bool VariableCanNeverBeAConstantExpression(VarDecl *Var,
+ ASTContext &Context) {
+ if (isa<ParmVarDecl>(Var)) return true;
+ const VarDecl *DefVD = nullptr;
+
+ // If there is no initializer - this can not be a constant expression.
+ if (!Var->getAnyInitializer(DefVD)) return true;
+ assert(DefVD);
+ if (DefVD->isWeak()) return false;
+ EvaluatedStmt *Eval = DefVD->ensureEvaluatedStmt();
+
+ Expr *Init = cast<Expr>(Eval->Value);
+
+ if (Var->getType()->isDependentType() || Init->isValueDependent()) {
+ // FIXME: Teach the constant evaluator to deal with the non-dependent parts
+ // of value-dependent expressions, and use it here to determine whether the
+ // initializer is a potential constant expression.
+ return false;
+ }
+
+ return !IsVariableAConstantExpression(Var, Context);
+}
+
+/// \brief Check if the current lambda has any potential captures
+/// that must be captured by any of its enclosing lambdas that are ready to
+/// capture. If there is a lambda that can capture a nested
+/// potential-capture, go ahead and do so. Also, check to see if any
+/// variables are uncaptureable or do not involve an odr-use so do not
+/// need to be captured.
+
+static void CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(
+ Expr *const FE, LambdaScopeInfo *const CurrentLSI, Sema &S) {
+
+ assert(!S.isUnevaluatedContext());
+ assert(S.CurContext->isDependentContext());
+ assert(CurrentLSI->CallOperator == S.CurContext &&
+ "The current call operator must be synchronized with Sema's CurContext");
+
+ const bool IsFullExprInstantiationDependent = FE->isInstantiationDependent();
+
+ ArrayRef<const FunctionScopeInfo *> FunctionScopesArrayRef(
+ S.FunctionScopes.data(), S.FunctionScopes.size());
+
+ // All the potentially captureable variables in the current nested
+ // lambda (within a generic outer lambda), must be captured by an
+ // outer lambda that is enclosed within a non-dependent context.
+ const unsigned NumPotentialCaptures =
+ CurrentLSI->getNumPotentialVariableCaptures();
+ for (unsigned I = 0; I != NumPotentialCaptures; ++I) {
+ Expr *VarExpr = nullptr;
+ VarDecl *Var = nullptr;
+ CurrentLSI->getPotentialVariableCapture(I, Var, VarExpr);
+ // If the variable is clearly identified as non-odr-used and the full
+ // expression is not instantiation dependent, only then do we not
+ // need to check enclosing lambda's for speculative captures.
+ // For e.g.:
+ // Even though 'x' is not odr-used, it should be captured.
+ // int test() {
+ // const int x = 10;
+ // auto L = [=](auto a) {
+ // (void) +x + a;
+ // };
+ // }
+ if (CurrentLSI->isVariableExprMarkedAsNonODRUsed(VarExpr) &&
+ !IsFullExprInstantiationDependent)
+ continue;
+
+ // If we have a capture-capable lambda for the variable, go ahead and
+ // capture the variable in that lambda (and all its enclosing lambdas).
+ if (const Optional<unsigned> Index =
+ getStackIndexOfNearestEnclosingCaptureCapableLambda(
+ FunctionScopesArrayRef, Var, S)) {
+ const unsigned FunctionScopeIndexOfCapturableLambda = Index.getValue();
+ MarkVarDeclODRUsed(Var, VarExpr->getExprLoc(), S,
+ &FunctionScopeIndexOfCapturableLambda);
+ }
+ const bool IsVarNeverAConstantExpression =
+ VariableCanNeverBeAConstantExpression(Var, S.Context);
+ if (!IsFullExprInstantiationDependent || IsVarNeverAConstantExpression) {
+ // This full expression is not instantiation dependent or the variable
+ // can not be used in a constant expression - which means
+ // this variable must be odr-used here, so diagnose a
+ // capture violation early, if the variable is un-captureable.
+ // This is purely for diagnosing errors early. Otherwise, this
+ // error would get diagnosed when the lambda becomes capture ready.
+ QualType CaptureType, DeclRefType;
+ SourceLocation ExprLoc = VarExpr->getExprLoc();
+ if (S.tryCaptureVariable(Var, ExprLoc, S.TryCapture_Implicit,
+ /*EllipsisLoc*/ SourceLocation(),
+ /*BuildAndDiagnose*/false, CaptureType,
+ DeclRefType, nullptr)) {
+ // We will never be able to capture this variable, and we need
+ // to be able to in any and all instantiations, so diagnose it.
+ S.tryCaptureVariable(Var, ExprLoc, S.TryCapture_Implicit,
+ /*EllipsisLoc*/ SourceLocation(),
+ /*BuildAndDiagnose*/true, CaptureType,
+ DeclRefType, nullptr);
+ }
+ }
+ }
+
+ // Check if 'this' needs to be captured.
+ if (CurrentLSI->hasPotentialThisCapture()) {
+ // If we have a capture-capable lambda for 'this', go ahead and capture
+ // 'this' in that lambda (and all its enclosing lambdas).
+ if (const Optional<unsigned> Index =
+ getStackIndexOfNearestEnclosingCaptureCapableLambda(
+ FunctionScopesArrayRef, /*0 is 'this'*/ nullptr, S)) {
+ const unsigned FunctionScopeIndexOfCapturableLambda = Index.getValue();
+ S.CheckCXXThisCapture(CurrentLSI->PotentialThisCaptureLocation,
+ /*Explicit*/ false, /*BuildAndDiagnose*/ true,
+ &FunctionScopeIndexOfCapturableLambda);
+ }
+ }
+
+ // Reset all the potential captures at the end of each full-expression.
+ CurrentLSI->clearPotentialCaptures();
+}
+
+static ExprResult attemptRecovery(Sema &SemaRef,
+ const TypoCorrectionConsumer &Consumer,
+ TypoCorrection TC) {
+ LookupResult R(SemaRef, Consumer.getLookupResult().getLookupNameInfo(),
+ Consumer.getLookupResult().getLookupKind());
+ const CXXScopeSpec *SS = Consumer.getSS();
+ CXXScopeSpec NewSS;
+
+ // Use an approprate CXXScopeSpec for building the expr.
+ if (auto *NNS = TC.getCorrectionSpecifier())
+ NewSS.MakeTrivial(SemaRef.Context, NNS, TC.getCorrectionRange());
+ else if (SS && !TC.WillReplaceSpecifier())
+ NewSS = *SS;
+
+ if (auto *ND = TC.getFoundDecl()) {
+ R.setLookupName(ND->getDeclName());
+ R.addDecl(ND);
+ if (ND->isCXXClassMember()) {
+ // Figure out the correct naming class to add to the LookupResult.
+ CXXRecordDecl *Record = nullptr;
+ if (auto *NNS = TC.getCorrectionSpecifier())
+ Record = NNS->getAsType()->getAsCXXRecordDecl();
+ if (!Record)
+ Record =
+ dyn_cast<CXXRecordDecl>(ND->getDeclContext()->getRedeclContext());
+ if (Record)
+ R.setNamingClass(Record);
+
+ // Detect and handle the case where the decl might be an implicit
+ // member.
+ bool MightBeImplicitMember;
+ if (!Consumer.isAddressOfOperand())
+ MightBeImplicitMember = true;
+ else if (!NewSS.isEmpty())
+ MightBeImplicitMember = false;
+ else if (R.isOverloadedResult())
+ MightBeImplicitMember = false;
+ else if (R.isUnresolvableResult())
+ MightBeImplicitMember = true;
+ else
+ MightBeImplicitMember = isa<FieldDecl>(ND) ||
+ isa<IndirectFieldDecl>(ND) ||
+ isa<MSPropertyDecl>(ND);
+
+ if (MightBeImplicitMember)
+ return SemaRef.BuildPossibleImplicitMemberExpr(
+ NewSS, /*TemplateKWLoc*/ SourceLocation(), R,
+ /*TemplateArgs*/ nullptr, /*S*/ nullptr);
+ } else if (auto *Ivar = dyn_cast<ObjCIvarDecl>(ND)) {
+ return SemaRef.LookupInObjCMethod(R, Consumer.getScope(),
+ Ivar->getIdentifier());
+ }
+ }
+
+ return SemaRef.BuildDeclarationNameExpr(NewSS, R, /*NeedsADL*/ false,
+ /*AcceptInvalidDecl*/ true);
+}
+
+namespace {
+class FindTypoExprs : public RecursiveASTVisitor<FindTypoExprs> {
+ llvm::SmallSetVector<TypoExpr *, 2> &TypoExprs;
+
+public:
+ explicit FindTypoExprs(llvm::SmallSetVector<TypoExpr *, 2> &TypoExprs)
+ : TypoExprs(TypoExprs) {}
+ bool VisitTypoExpr(TypoExpr *TE) {
+ TypoExprs.insert(TE);
+ return true;
+ }
+};
+
+class TransformTypos : public TreeTransform<TransformTypos> {
+ typedef TreeTransform<TransformTypos> BaseTransform;
+
+ VarDecl *InitDecl; // A decl to avoid as a correction because it is in the
+ // process of being initialized.
+ llvm::function_ref<ExprResult(Expr *)> ExprFilter;
+ llvm::SmallSetVector<TypoExpr *, 2> TypoExprs, AmbiguousTypoExprs;
+ llvm::SmallDenseMap<TypoExpr *, ExprResult, 2> TransformCache;
+ llvm::SmallDenseMap<OverloadExpr *, Expr *, 4> OverloadResolution;
+
+ /// \brief Emit diagnostics for all of the TypoExprs encountered.
+ /// If the TypoExprs were successfully corrected, then the diagnostics should
+ /// suggest the corrections. Otherwise the diagnostics will not suggest
+ /// anything (having been passed an empty TypoCorrection).
+ void EmitAllDiagnostics() {
+ for (auto E : TypoExprs) {
+ TypoExpr *TE = cast<TypoExpr>(E);
+ auto &State = SemaRef.getTypoExprState(TE);
+ if (State.DiagHandler) {
+ TypoCorrection TC = State.Consumer->getCurrentCorrection();
+ ExprResult Replacement = TransformCache[TE];
+
+ // Extract the NamedDecl from the transformed TypoExpr and add it to the
+ // TypoCorrection, replacing the existing decls. This ensures the right
+ // NamedDecl is used in diagnostics e.g. in the case where overload
+ // resolution was used to select one from several possible decls that
+ // had been stored in the TypoCorrection.
+ if (auto *ND = getDeclFromExpr(
+ Replacement.isInvalid() ? nullptr : Replacement.get()))
+ TC.setCorrectionDecl(ND);
+
+ State.DiagHandler(TC);
+ }
+ SemaRef.clearDelayedTypo(TE);
+ }
+ }
+
+ /// \brief If corrections for the first TypoExpr have been exhausted for a
+ /// given combination of the other TypoExprs, retry those corrections against
+ /// the next combination of substitutions for the other TypoExprs by advancing
+ /// to the next potential correction of the second TypoExpr. For the second
+ /// and subsequent TypoExprs, if its stream of corrections has been exhausted,
+ /// the stream is reset and the next TypoExpr's stream is advanced by one (a
+ /// TypoExpr's correction stream is advanced by removing the TypoExpr from the
+ /// TransformCache). Returns true if there is still any untried combinations
+ /// of corrections.
+ bool CheckAndAdvanceTypoExprCorrectionStreams() {
+ for (auto TE : TypoExprs) {
+ auto &State = SemaRef.getTypoExprState(TE);
+ TransformCache.erase(TE);
+ if (!State.Consumer->finished())
+ return true;
+ State.Consumer->resetCorrectionStream();
+ }
+ return false;
+ }
+
+ NamedDecl *getDeclFromExpr(Expr *E) {
+ if (auto *OE = dyn_cast_or_null<OverloadExpr>(E))
+ E = OverloadResolution[OE];
+
+ if (!E)
+ return nullptr;
+ if (auto *DRE = dyn_cast<DeclRefExpr>(E))
+ return DRE->getFoundDecl();
+ if (auto *ME = dyn_cast<MemberExpr>(E))
+ return ME->getFoundDecl();
+ // FIXME: Add any other expr types that could be be seen by the delayed typo
+ // correction TreeTransform for which the corresponding TypoCorrection could
+ // contain multiple decls.
+ return nullptr;
+ }
+
+ ExprResult TryTransform(Expr *E) {
+ Sema::SFINAETrap Trap(SemaRef);
+ ExprResult Res = TransformExpr(E);
+ if (Trap.hasErrorOccurred() || Res.isInvalid())
+ return ExprError();
+
+ return ExprFilter(Res.get());
+ }
+
+public:
+ TransformTypos(Sema &SemaRef, VarDecl *InitDecl, llvm::function_ref<ExprResult(Expr *)> Filter)
+ : BaseTransform(SemaRef), InitDecl(InitDecl), ExprFilter(Filter) {}
+
+ ExprResult RebuildCallExpr(Expr *Callee, SourceLocation LParenLoc,
+ MultiExprArg Args,
+ SourceLocation RParenLoc,
+ Expr *ExecConfig = nullptr) {
+ auto Result = BaseTransform::RebuildCallExpr(Callee, LParenLoc, Args,
+ RParenLoc, ExecConfig);
+ if (auto *OE = dyn_cast<OverloadExpr>(Callee)) {
+ if (Result.isUsable()) {
+ Expr *ResultCall = Result.get();
+ if (auto *BE = dyn_cast<CXXBindTemporaryExpr>(ResultCall))
+ ResultCall = BE->getSubExpr();
+ if (auto *CE = dyn_cast<CallExpr>(ResultCall))
+ OverloadResolution[OE] = CE->getCallee();
+ }
+ }
+ return Result;
+ }
+
+ ExprResult TransformLambdaExpr(LambdaExpr *E) { return Owned(E); }
+
+ ExprResult TransformBlockExpr(BlockExpr *E) { return Owned(E); }
+
+ ExprResult Transform(Expr *E) {
+ ExprResult Res;
+ while (true) {
+ Res = TryTransform(E);
+
+ // Exit if either the transform was valid or if there were no TypoExprs
+ // to transform that still have any untried correction candidates..
+ if (!Res.isInvalid() ||
+ !CheckAndAdvanceTypoExprCorrectionStreams())
+ break;
+ }
+
+ // Ensure none of the TypoExprs have multiple typo correction candidates
+ // with the same edit length that pass all the checks and filters.
+ // TODO: Properly handle various permutations of possible corrections when
+ // there is more than one potentially ambiguous typo correction.
+ // Also, disable typo correction while attempting the transform when
+ // handling potentially ambiguous typo corrections as any new TypoExprs will
+ // have been introduced by the application of one of the correction
+ // candidates and add little to no value if corrected.
+ SemaRef.DisableTypoCorrection = true;
+ while (!AmbiguousTypoExprs.empty()) {
+ auto TE = AmbiguousTypoExprs.back();
+ auto Cached = TransformCache[TE];
+ auto &State = SemaRef.getTypoExprState(TE);
+ State.Consumer->saveCurrentPosition();
+ TransformCache.erase(TE);
+ if (!TryTransform(E).isInvalid()) {
+ State.Consumer->resetCorrectionStream();
+ TransformCache.erase(TE);
+ Res = ExprError();
+ break;
+ }
+ AmbiguousTypoExprs.remove(TE);
+ State.Consumer->restoreSavedPosition();
+ TransformCache[TE] = Cached;
+ }
+ SemaRef.DisableTypoCorrection = false;
+
+ // Ensure that all of the TypoExprs within the current Expr have been found.
+ if (!Res.isUsable())
+ FindTypoExprs(TypoExprs).TraverseStmt(E);
+
+ EmitAllDiagnostics();
+
+ return Res;
+ }
+
+ ExprResult TransformTypoExpr(TypoExpr *E) {
+ // If the TypoExpr hasn't been seen before, record it. Otherwise, return the
+ // cached transformation result if there is one and the TypoExpr isn't the
+ // first one that was encountered.
+ auto &CacheEntry = TransformCache[E];
+ if (!TypoExprs.insert(E) && !CacheEntry.isUnset()) {
+ return CacheEntry;
+ }
+
+ auto &State = SemaRef.getTypoExprState(E);
+ assert(State.Consumer && "Cannot transform a cleared TypoExpr");
+
+ // For the first TypoExpr and an uncached TypoExpr, find the next likely
+ // typo correction and return it.
+ while (TypoCorrection TC = State.Consumer->getNextCorrection()) {
+ if (InitDecl && TC.getFoundDecl() == InitDecl)
+ continue;
+ ExprResult NE = State.RecoveryHandler ?
+ State.RecoveryHandler(SemaRef, E, TC) :
+ attemptRecovery(SemaRef, *State.Consumer, TC);
+ if (!NE.isInvalid()) {
+ // Check whether there may be a second viable correction with the same
+ // edit distance; if so, remember this TypoExpr may have an ambiguous
+ // correction so it can be more thoroughly vetted later.
+ TypoCorrection Next;
+ if ((Next = State.Consumer->peekNextCorrection()) &&
+ Next.getEditDistance(false) == TC.getEditDistance(false)) {
+ AmbiguousTypoExprs.insert(E);
+ } else {
+ AmbiguousTypoExprs.remove(E);
+ }
+ assert(!NE.isUnset() &&
+ "Typo was transformed into a valid-but-null ExprResult");
+ return CacheEntry = NE;
+ }
+ }
+ return CacheEntry = ExprError();
+ }
+};
+}
+
+ExprResult
+Sema::CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl,
+ llvm::function_ref<ExprResult(Expr *)> Filter) {
+ // If the current evaluation context indicates there are uncorrected typos
+ // and the current expression isn't guaranteed to not have typos, try to
+ // resolve any TypoExpr nodes that might be in the expression.
+ if (E && !ExprEvalContexts.empty() && ExprEvalContexts.back().NumTypos &&
+ (E->isTypeDependent() || E->isValueDependent() ||
+ E->isInstantiationDependent())) {
+ auto TyposInContext = ExprEvalContexts.back().NumTypos;
+ assert(TyposInContext < ~0U && "Recursive call of CorrectDelayedTyposInExpr");
+ ExprEvalContexts.back().NumTypos = ~0U;
+ auto TyposResolved = DelayedTypos.size();
+ auto Result = TransformTypos(*this, InitDecl, Filter).Transform(E);
+ ExprEvalContexts.back().NumTypos = TyposInContext;
+ TyposResolved -= DelayedTypos.size();
+ if (Result.isInvalid() || Result.get() != E) {
+ ExprEvalContexts.back().NumTypos -= TyposResolved;
+ return Result;
+ }
+ assert(TyposResolved == 0 && "Corrected typo but got same Expr back?");
+ }
+ return E;
+}
+
+ExprResult Sema::ActOnFinishFullExpr(Expr *FE, SourceLocation CC,
+ bool DiscardedValue,
+ bool IsConstexpr,
+ bool IsLambdaInitCaptureInitializer) {
+ ExprResult FullExpr = FE;
+
+ if (!FullExpr.get())
+ return ExprError();
+
+ // If we are an init-expression in a lambdas init-capture, we should not
+ // diagnose an unexpanded pack now (will be diagnosed once lambda-expr
+ // containing full-expression is done).
+ // template<class ... Ts> void test(Ts ... t) {
+ // test([&a(t)]() { <-- (t) is an init-expr that shouldn't be diagnosed now.
+ // return a;
+ // }() ...);
+ // }
+ // FIXME: This is a hack. It would be better if we pushed the lambda scope
+ // when we parse the lambda introducer, and teach capturing (but not
+ // unexpanded pack detection) to walk over LambdaScopeInfos which don't have a
+ // corresponding class yet (that is, have LambdaScopeInfo either represent a
+ // lambda where we've entered the introducer but not the body, or represent a
+ // lambda where we've entered the body, depending on where the
+ // parser/instantiation has got to).
+ if (!IsLambdaInitCaptureInitializer &&
+ DiagnoseUnexpandedParameterPack(FullExpr.get()))
+ return ExprError();
+
+ // Top-level expressions default to 'id' when we're in a debugger.
+ if (DiscardedValue && getLangOpts().DebuggerCastResultToId &&
+ FullExpr.get()->getType() == Context.UnknownAnyTy) {
+ FullExpr = forceUnknownAnyToType(FullExpr.get(), Context.getObjCIdType());
+ if (FullExpr.isInvalid())
+ return ExprError();
+ }
+
+ if (DiscardedValue) {
+ FullExpr = CheckPlaceholderExpr(FullExpr.get());
+ if (FullExpr.isInvalid())
+ return ExprError();
+
+ FullExpr = IgnoredValueConversions(FullExpr.get());
+ if (FullExpr.isInvalid())
+ return ExprError();
+ }
+
+ FullExpr = CorrectDelayedTyposInExpr(FullExpr.get());
+ if (FullExpr.isInvalid())
+ return ExprError();
+
+ CheckCompletedExpr(FullExpr.get(), CC, IsConstexpr);
+
+ // At the end of this full expression (which could be a deeply nested
+ // lambda), if there is a potential capture within the nested lambda,
+ // have the outer capture-able lambda try and capture it.
+ // Consider the following code:
+ // void f(int, int);
+ // void f(const int&, double);
+ // void foo() {
+ // const int x = 10, y = 20;
+ // auto L = [=](auto a) {
+ // auto M = [=](auto b) {
+ // f(x, b); <-- requires x to be captured by L and M
+ // f(y, a); <-- requires y to be captured by L, but not all Ms
+ // };
+ // };
+ // }
+
+ // FIXME: Also consider what happens for something like this that involves
+ // the gnu-extension statement-expressions or even lambda-init-captures:
+ // void f() {
+ // const int n = 0;
+ // auto L = [&](auto a) {
+ // +n + ({ 0; a; });
+ // };
+ // }
+ //
+ // Here, we see +n, and then the full-expression 0; ends, so we don't
+ // capture n (and instead remove it from our list of potential captures),
+ // and then the full-expression +n + ({ 0; }); ends, but it's too late
+ // for us to see that we need to capture n after all.
+
+ LambdaScopeInfo *const CurrentLSI = getCurLambda();
+ // FIXME: PR 17877 showed that getCurLambda() can return a valid pointer
+ // even if CurContext is not a lambda call operator. Refer to that Bug Report
+ // for an example of the code that might cause this asynchrony.
+ // By ensuring we are in the context of a lambda's call operator
+ // we can fix the bug (we only need to check whether we need to capture
+ // if we are within a lambda's body); but per the comments in that
+ // PR, a proper fix would entail :
+ // "Alternative suggestion:
+ // - Add to Sema an integer holding the smallest (outermost) scope
+ // index that we are *lexically* within, and save/restore/set to
+ // FunctionScopes.size() in InstantiatingTemplate's
+ // constructor/destructor.
+ // - Teach the handful of places that iterate over FunctionScopes to
+ // stop at the outermost enclosing lexical scope."
+ const bool IsInLambdaDeclContext = isLambdaCallOperator(CurContext);
+ if (IsInLambdaDeclContext && CurrentLSI &&
+ CurrentLSI->hasPotentialCaptures() && !FullExpr.isInvalid())
+ CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(FE, CurrentLSI,
+ *this);
+ return MaybeCreateExprWithCleanups(FullExpr);
+}
+
+StmtResult Sema::ActOnFinishFullStmt(Stmt *FullStmt) {
+ if (!FullStmt) return StmtError();
+
+ return MaybeCreateStmtWithCleanups(FullStmt);
+}
+
+Sema::IfExistsResult
+Sema::CheckMicrosoftIfExistsSymbol(Scope *S,
+ CXXScopeSpec &SS,
+ const DeclarationNameInfo &TargetNameInfo) {
+ DeclarationName TargetName = TargetNameInfo.getName();
+ if (!TargetName)
+ return IER_DoesNotExist;
+
+ // If the name itself is dependent, then the result is dependent.
+ if (TargetName.isDependentName())
+ return IER_Dependent;
+
+ // Do the redeclaration lookup in the current scope.
+ LookupResult R(*this, TargetNameInfo, Sema::LookupAnyName,
+ Sema::NotForRedeclaration);
+ LookupParsedName(R, S, &SS);
+ R.suppressDiagnostics();
+
+ switch (R.getResultKind()) {
+ case LookupResult::Found:
+ case LookupResult::FoundOverloaded:
+ case LookupResult::FoundUnresolvedValue:
+ case LookupResult::Ambiguous:
+ return IER_Exists;
+
+ case LookupResult::NotFound:
+ return IER_DoesNotExist;
+
+ case LookupResult::NotFoundInCurrentInstantiation:
+ return IER_Dependent;
+ }
+
+ llvm_unreachable("Invalid LookupResult Kind!");
+}
+
+Sema::IfExistsResult
+Sema::CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
+ bool IsIfExists, CXXScopeSpec &SS,
+ UnqualifiedId &Name) {
+ DeclarationNameInfo TargetNameInfo = GetNameFromUnqualifiedId(Name);
+
+ // Check for unexpanded parameter packs.
+ SmallVector<UnexpandedParameterPack, 4> Unexpanded;
+ collectUnexpandedParameterPacks(SS, Unexpanded);
+ collectUnexpandedParameterPacks(TargetNameInfo, Unexpanded);
+ if (!Unexpanded.empty()) {
+ DiagnoseUnexpandedParameterPacks(KeywordLoc,
+ IsIfExists? UPPC_IfExists
+ : UPPC_IfNotExists,
+ Unexpanded);
+ return IER_Error;
+ }
+
+ return CheckMicrosoftIfExistsSymbol(S, SS, TargetNameInfo);
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExprMember.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExprMember.cpp
new file mode 100644
index 0000000..9c345f8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExprMember.cpp
@@ -0,0 +1,1774 @@
+//===--- SemaExprMember.cpp - Semantic Analysis for Expressions -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis member access expressions.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Sema/Overload.h"
+#include "clang/AST/ASTLambda.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaInternal.h"
+
+using namespace clang;
+using namespace sema;
+
+typedef llvm::SmallPtrSet<const CXXRecordDecl*, 4> BaseSet;
+
+/// Determines if the given class is provably not derived from all of
+/// the prospective base classes.
+static bool isProvablyNotDerivedFrom(Sema &SemaRef, CXXRecordDecl *Record,
+ const BaseSet &Bases) {
+ auto BaseIsNotInSet = [&Bases](const CXXRecordDecl *Base) {
+ return !Bases.count(Base->getCanonicalDecl());
+ };
+ return BaseIsNotInSet(Record) && Record->forallBases(BaseIsNotInSet);
+}
+
+enum IMAKind {
+ /// The reference is definitely not an instance member access.
+ IMA_Static,
+
+ /// The reference may be an implicit instance member access.
+ IMA_Mixed,
+
+ /// The reference may be to an instance member, but it might be invalid if
+ /// so, because the context is not an instance method.
+ IMA_Mixed_StaticContext,
+
+ /// The reference may be to an instance member, but it is invalid if
+ /// so, because the context is from an unrelated class.
+ IMA_Mixed_Unrelated,
+
+ /// The reference is definitely an implicit instance member access.
+ IMA_Instance,
+
+ /// The reference may be to an unresolved using declaration.
+ IMA_Unresolved,
+
+ /// The reference is a contextually-permitted abstract member reference.
+ IMA_Abstract,
+
+ /// The reference may be to an unresolved using declaration and the
+ /// context is not an instance method.
+ IMA_Unresolved_StaticContext,
+
+ // The reference refers to a field which is not a member of the containing
+ // class, which is allowed because we're in C++11 mode and the context is
+ // unevaluated.
+ IMA_Field_Uneval_Context,
+
+ /// All possible referrents are instance members and the current
+ /// context is not an instance method.
+ IMA_Error_StaticContext,
+
+ /// All possible referrents are instance members of an unrelated
+ /// class.
+ IMA_Error_Unrelated
+};
+
+/// The given lookup names class member(s) and is not being used for
+/// an address-of-member expression. Classify the type of access
+/// according to whether it's possible that this reference names an
+/// instance member. This is best-effort in dependent contexts; it is okay to
+/// conservatively answer "yes", in which case some errors will simply
+/// not be caught until template-instantiation.
+static IMAKind ClassifyImplicitMemberAccess(Sema &SemaRef,
+ const LookupResult &R) {
+ assert(!R.empty() && (*R.begin())->isCXXClassMember());
+
+ DeclContext *DC = SemaRef.getFunctionLevelDeclContext();
+
+ bool isStaticContext = SemaRef.CXXThisTypeOverride.isNull() &&
+ (!isa<CXXMethodDecl>(DC) || cast<CXXMethodDecl>(DC)->isStatic());
+
+ if (R.isUnresolvableResult())
+ return isStaticContext ? IMA_Unresolved_StaticContext : IMA_Unresolved;
+
+ // Collect all the declaring classes of instance members we find.
+ bool hasNonInstance = false;
+ bool isField = false;
+ BaseSet Classes;
+ for (NamedDecl *D : R) {
+ // Look through any using decls.
+ D = D->getUnderlyingDecl();
+
+ if (D->isCXXInstanceMember()) {
+ isField |= isa<FieldDecl>(D) || isa<MSPropertyDecl>(D) ||
+ isa<IndirectFieldDecl>(D);
+
+ CXXRecordDecl *R = cast<CXXRecordDecl>(D->getDeclContext());
+ Classes.insert(R->getCanonicalDecl());
+ } else
+ hasNonInstance = true;
+ }
+
+ // If we didn't find any instance members, it can't be an implicit
+ // member reference.
+ if (Classes.empty())
+ return IMA_Static;
+
+ // C++11 [expr.prim.general]p12:
+ // An id-expression that denotes a non-static data member or non-static
+ // member function of a class can only be used:
+ // (...)
+ // - if that id-expression denotes a non-static data member and it
+ // appears in an unevaluated operand.
+ //
+ // This rule is specific to C++11. However, we also permit this form
+ // in unevaluated inline assembly operands, like the operand to a SIZE.
+ IMAKind AbstractInstanceResult = IMA_Static; // happens to be 'false'
+ assert(!AbstractInstanceResult);
+ switch (SemaRef.ExprEvalContexts.back().Context) {
+ case Sema::Unevaluated:
+ if (isField && SemaRef.getLangOpts().CPlusPlus11)
+ AbstractInstanceResult = IMA_Field_Uneval_Context;
+ break;
+
+ case Sema::UnevaluatedAbstract:
+ AbstractInstanceResult = IMA_Abstract;
+ break;
+
+ case Sema::ConstantEvaluated:
+ case Sema::PotentiallyEvaluated:
+ case Sema::PotentiallyEvaluatedIfUsed:
+ break;
+ }
+
+ // If the current context is not an instance method, it can't be
+ // an implicit member reference.
+ if (isStaticContext) {
+ if (hasNonInstance)
+ return IMA_Mixed_StaticContext;
+
+ return AbstractInstanceResult ? AbstractInstanceResult
+ : IMA_Error_StaticContext;
+ }
+
+ CXXRecordDecl *contextClass;
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(DC))
+ contextClass = MD->getParent()->getCanonicalDecl();
+ else
+ contextClass = cast<CXXRecordDecl>(DC);
+
+ // [class.mfct.non-static]p3:
+ // ...is used in the body of a non-static member function of class X,
+ // if name lookup (3.4.1) resolves the name in the id-expression to a
+ // non-static non-type member of some class C [...]
+ // ...if C is not X or a base class of X, the class member access expression
+ // is ill-formed.
+ if (R.getNamingClass() &&
+ contextClass->getCanonicalDecl() !=
+ R.getNamingClass()->getCanonicalDecl()) {
+ // If the naming class is not the current context, this was a qualified
+ // member name lookup, and it's sufficient to check that we have the naming
+ // class as a base class.
+ Classes.clear();
+ Classes.insert(R.getNamingClass()->getCanonicalDecl());
+ }
+
+ // If we can prove that the current context is unrelated to all the
+ // declaring classes, it can't be an implicit member reference (in
+ // which case it's an error if any of those members are selected).
+ if (isProvablyNotDerivedFrom(SemaRef, contextClass, Classes))
+ return hasNonInstance ? IMA_Mixed_Unrelated :
+ AbstractInstanceResult ? AbstractInstanceResult :
+ IMA_Error_Unrelated;
+
+ return (hasNonInstance ? IMA_Mixed : IMA_Instance);
+}
+
+/// Diagnose a reference to a field with no object available.
+static void diagnoseInstanceReference(Sema &SemaRef,
+ const CXXScopeSpec &SS,
+ NamedDecl *Rep,
+ const DeclarationNameInfo &nameInfo) {
+ SourceLocation Loc = nameInfo.getLoc();
+ SourceRange Range(Loc);
+ if (SS.isSet()) Range.setBegin(SS.getRange().getBegin());
+
+ // Look through using shadow decls and aliases.
+ Rep = Rep->getUnderlyingDecl();
+
+ DeclContext *FunctionLevelDC = SemaRef.getFunctionLevelDeclContext();
+ CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FunctionLevelDC);
+ CXXRecordDecl *ContextClass = Method ? Method->getParent() : nullptr;
+ CXXRecordDecl *RepClass = dyn_cast<CXXRecordDecl>(Rep->getDeclContext());
+
+ bool InStaticMethod = Method && Method->isStatic();
+ bool IsField = isa<FieldDecl>(Rep) || isa<IndirectFieldDecl>(Rep);
+
+ if (IsField && InStaticMethod)
+ // "invalid use of member 'x' in static member function"
+ SemaRef.Diag(Loc, diag::err_invalid_member_use_in_static_method)
+ << Range << nameInfo.getName();
+ else if (ContextClass && RepClass && SS.isEmpty() && !InStaticMethod &&
+ !RepClass->Equals(ContextClass) && RepClass->Encloses(ContextClass))
+ // Unqualified lookup in a non-static member function found a member of an
+ // enclosing class.
+ SemaRef.Diag(Loc, diag::err_nested_non_static_member_use)
+ << IsField << RepClass << nameInfo.getName() << ContextClass << Range;
+ else if (IsField)
+ SemaRef.Diag(Loc, diag::err_invalid_non_static_member_use)
+ << nameInfo.getName() << Range;
+ else
+ SemaRef.Diag(Loc, diag::err_member_call_without_object)
+ << Range;
+}
+
+/// Builds an expression which might be an implicit member expression.
+ExprResult
+Sema::BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ LookupResult &R,
+ const TemplateArgumentListInfo *TemplateArgs,
+ const Scope *S) {
+ switch (ClassifyImplicitMemberAccess(*this, R)) {
+ case IMA_Instance:
+ return BuildImplicitMemberExpr(SS, TemplateKWLoc, R, TemplateArgs, true, S);
+
+ case IMA_Mixed:
+ case IMA_Mixed_Unrelated:
+ case IMA_Unresolved:
+ return BuildImplicitMemberExpr(SS, TemplateKWLoc, R, TemplateArgs, false,
+ S);
+
+ case IMA_Field_Uneval_Context:
+ Diag(R.getNameLoc(), diag::warn_cxx98_compat_non_static_member_use)
+ << R.getLookupNameInfo().getName();
+ // Fall through.
+ case IMA_Static:
+ case IMA_Abstract:
+ case IMA_Mixed_StaticContext:
+ case IMA_Unresolved_StaticContext:
+ if (TemplateArgs || TemplateKWLoc.isValid())
+ return BuildTemplateIdExpr(SS, TemplateKWLoc, R, false, TemplateArgs);
+ return BuildDeclarationNameExpr(SS, R, false);
+
+ case IMA_Error_StaticContext:
+ case IMA_Error_Unrelated:
+ diagnoseInstanceReference(*this, SS, R.getRepresentativeDecl(),
+ R.getLookupNameInfo());
+ return ExprError();
+ }
+
+ llvm_unreachable("unexpected instance member access kind");
+}
+
+/// Check an ext-vector component access expression.
+///
+/// VK should be set in advance to the value kind of the base
+/// expression.
+static QualType
+CheckExtVectorComponent(Sema &S, QualType baseType, ExprValueKind &VK,
+ SourceLocation OpLoc, const IdentifierInfo *CompName,
+ SourceLocation CompLoc) {
+ // FIXME: Share logic with ExtVectorElementExpr::containsDuplicateElements,
+ // see FIXME there.
+ //
+ // FIXME: This logic can be greatly simplified by splitting it along
+ // halving/not halving and reworking the component checking.
+ const ExtVectorType *vecType = baseType->getAs<ExtVectorType>();
+
+ // The vector accessor can't exceed the number of elements.
+ const char *compStr = CompName->getNameStart();
+
+ // This flag determines whether or not the component is one of the four
+ // special names that indicate a subset of exactly half the elements are
+ // to be selected.
+ bool HalvingSwizzle = false;
+
+ // This flag determines whether or not CompName has an 's' char prefix,
+ // indicating that it is a string of hex values to be used as vector indices.
+ bool HexSwizzle = (*compStr == 's' || *compStr == 'S') && compStr[1];
+
+ bool HasRepeated = false;
+ bool HasIndex[16] = {};
+
+ int Idx;
+
+ // Check that we've found one of the special components, or that the component
+ // names must come from the same set.
+ if (!strcmp(compStr, "hi") || !strcmp(compStr, "lo") ||
+ !strcmp(compStr, "even") || !strcmp(compStr, "odd")) {
+ HalvingSwizzle = true;
+ } else if (!HexSwizzle &&
+ (Idx = vecType->getPointAccessorIdx(*compStr)) != -1) {
+ do {
+ if (HasIndex[Idx]) HasRepeated = true;
+ HasIndex[Idx] = true;
+ compStr++;
+ } while (*compStr && (Idx = vecType->getPointAccessorIdx(*compStr)) != -1);
+ } else {
+ if (HexSwizzle) compStr++;
+ while ((Idx = vecType->getNumericAccessorIdx(*compStr)) != -1) {
+ if (HasIndex[Idx]) HasRepeated = true;
+ HasIndex[Idx] = true;
+ compStr++;
+ }
+ }
+
+ if (!HalvingSwizzle && *compStr) {
+ // We didn't get to the end of the string. This means the component names
+ // didn't come from the same set *or* we encountered an illegal name.
+ S.Diag(OpLoc, diag::err_ext_vector_component_name_illegal)
+ << StringRef(compStr, 1) << SourceRange(CompLoc);
+ return QualType();
+ }
+
+ // Ensure no component accessor exceeds the width of the vector type it
+ // operates on.
+ if (!HalvingSwizzle) {
+ compStr = CompName->getNameStart();
+
+ if (HexSwizzle)
+ compStr++;
+
+ while (*compStr) {
+ if (!vecType->isAccessorWithinNumElements(*compStr++)) {
+ S.Diag(OpLoc, diag::err_ext_vector_component_exceeds_length)
+ << baseType << SourceRange(CompLoc);
+ return QualType();
+ }
+ }
+ }
+
+ // The component accessor looks fine - now we need to compute the actual type.
+ // The vector type is implied by the component accessor. For example,
+ // vec4.b is a float, vec4.xy is a vec2, vec4.rgb is a vec3, etc.
+ // vec4.s0 is a float, vec4.s23 is a vec3, etc.
+ // vec4.hi, vec4.lo, vec4.e, and vec4.o all return vec2.
+ unsigned CompSize = HalvingSwizzle ? (vecType->getNumElements() + 1) / 2
+ : CompName->getLength();
+ if (HexSwizzle)
+ CompSize--;
+
+ if (CompSize == 1)
+ return vecType->getElementType();
+
+ if (HasRepeated) VK = VK_RValue;
+
+ QualType VT = S.Context.getExtVectorType(vecType->getElementType(), CompSize);
+ // Now look up the TypeDefDecl from the vector type. Without this,
+ // diagostics look bad. We want extended vector types to appear built-in.
+ for (Sema::ExtVectorDeclsType::iterator
+ I = S.ExtVectorDecls.begin(S.getExternalSource()),
+ E = S.ExtVectorDecls.end();
+ I != E; ++I) {
+ if ((*I)->getUnderlyingType() == VT)
+ return S.Context.getTypedefType(*I);
+ }
+
+ return VT; // should never get here (a typedef type should always be found).
+}
+
+static Decl *FindGetterSetterNameDeclFromProtocolList(const ObjCProtocolDecl*PDecl,
+ IdentifierInfo *Member,
+ const Selector &Sel,
+ ASTContext &Context) {
+ if (Member)
+ if (ObjCPropertyDecl *PD = PDecl->FindPropertyDeclaration(Member))
+ return PD;
+ if (ObjCMethodDecl *OMD = PDecl->getInstanceMethod(Sel))
+ return OMD;
+
+ for (const auto *I : PDecl->protocols()) {
+ if (Decl *D = FindGetterSetterNameDeclFromProtocolList(I, Member, Sel,
+ Context))
+ return D;
+ }
+ return nullptr;
+}
+
+static Decl *FindGetterSetterNameDecl(const ObjCObjectPointerType *QIdTy,
+ IdentifierInfo *Member,
+ const Selector &Sel,
+ ASTContext &Context) {
+ // Check protocols on qualified interfaces.
+ Decl *GDecl = nullptr;
+ for (const auto *I : QIdTy->quals()) {
+ if (Member)
+ if (ObjCPropertyDecl *PD = I->FindPropertyDeclaration(Member)) {
+ GDecl = PD;
+ break;
+ }
+ // Also must look for a getter or setter name which uses property syntax.
+ if (ObjCMethodDecl *OMD = I->getInstanceMethod(Sel)) {
+ GDecl = OMD;
+ break;
+ }
+ }
+ if (!GDecl) {
+ for (const auto *I : QIdTy->quals()) {
+ // Search in the protocol-qualifier list of current protocol.
+ GDecl = FindGetterSetterNameDeclFromProtocolList(I, Member, Sel, Context);
+ if (GDecl)
+ return GDecl;
+ }
+ }
+ return GDecl;
+}
+
+ExprResult
+Sema::ActOnDependentMemberExpr(Expr *BaseExpr, QualType BaseType,
+ bool IsArrow, SourceLocation OpLoc,
+ const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ NamedDecl *FirstQualifierInScope,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ // Even in dependent contexts, try to diagnose base expressions with
+ // obviously wrong types, e.g.:
+ //
+ // T* t;
+ // t.f;
+ //
+ // In Obj-C++, however, the above expression is valid, since it could be
+ // accessing the 'f' property if T is an Obj-C interface. The extra check
+ // allows this, while still reporting an error if T is a struct pointer.
+ if (!IsArrow) {
+ const PointerType *PT = BaseType->getAs<PointerType>();
+ if (PT && (!getLangOpts().ObjC1 ||
+ PT->getPointeeType()->isRecordType())) {
+ assert(BaseExpr && "cannot happen with implicit member accesses");
+ Diag(OpLoc, diag::err_typecheck_member_reference_struct_union)
+ << BaseType << BaseExpr->getSourceRange() << NameInfo.getSourceRange();
+ return ExprError();
+ }
+ }
+
+ assert(BaseType->isDependentType() ||
+ NameInfo.getName().isDependentName() ||
+ isDependentScopeSpecifier(SS));
+
+ // Get the type being accessed in BaseType. If this is an arrow, the BaseExpr
+ // must have pointer type, and the accessed type is the pointee.
+ return CXXDependentScopeMemberExpr::Create(
+ Context, BaseExpr, BaseType, IsArrow, OpLoc,
+ SS.getWithLocInContext(Context), TemplateKWLoc, FirstQualifierInScope,
+ NameInfo, TemplateArgs);
+}
+
+/// We know that the given qualified member reference points only to
+/// declarations which do not belong to the static type of the base
+/// expression. Diagnose the problem.
+static void DiagnoseQualifiedMemberReference(Sema &SemaRef,
+ Expr *BaseExpr,
+ QualType BaseType,
+ const CXXScopeSpec &SS,
+ NamedDecl *rep,
+ const DeclarationNameInfo &nameInfo) {
+ // If this is an implicit member access, use a different set of
+ // diagnostics.
+ if (!BaseExpr)
+ return diagnoseInstanceReference(SemaRef, SS, rep, nameInfo);
+
+ SemaRef.Diag(nameInfo.getLoc(), diag::err_qualified_member_of_unrelated)
+ << SS.getRange() << rep << BaseType;
+}
+
+// Check whether the declarations we found through a nested-name
+// specifier in a member expression are actually members of the base
+// type. The restriction here is:
+//
+// C++ [expr.ref]p2:
+// ... In these cases, the id-expression shall name a
+// member of the class or of one of its base classes.
+//
+// So it's perfectly legitimate for the nested-name specifier to name
+// an unrelated class, and for us to find an overload set including
+// decls from classes which are not superclasses, as long as the decl
+// we actually pick through overload resolution is from a superclass.
+bool Sema::CheckQualifiedMemberReference(Expr *BaseExpr,
+ QualType BaseType,
+ const CXXScopeSpec &SS,
+ const LookupResult &R) {
+ CXXRecordDecl *BaseRecord =
+ cast_or_null<CXXRecordDecl>(computeDeclContext(BaseType));
+ if (!BaseRecord) {
+ // We can't check this yet because the base type is still
+ // dependent.
+ assert(BaseType->isDependentType());
+ return false;
+ }
+
+ for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
+ // If this is an implicit member reference and we find a
+ // non-instance member, it's not an error.
+ if (!BaseExpr && !(*I)->isCXXInstanceMember())
+ return false;
+
+ // Note that we use the DC of the decl, not the underlying decl.
+ DeclContext *DC = (*I)->getDeclContext();
+ while (DC->isTransparentContext())
+ DC = DC->getParent();
+
+ if (!DC->isRecord())
+ continue;
+
+ CXXRecordDecl *MemberRecord = cast<CXXRecordDecl>(DC)->getCanonicalDecl();
+ if (BaseRecord->getCanonicalDecl() == MemberRecord ||
+ !BaseRecord->isProvablyNotDerivedFrom(MemberRecord))
+ return false;
+ }
+
+ DiagnoseQualifiedMemberReference(*this, BaseExpr, BaseType, SS,
+ R.getRepresentativeDecl(),
+ R.getLookupNameInfo());
+ return true;
+}
+
+namespace {
+
+// Callback to only accept typo corrections that are either a ValueDecl or a
+// FunctionTemplateDecl and are declared in the current record or, for a C++
+// classes, one of its base classes.
+class RecordMemberExprValidatorCCC : public CorrectionCandidateCallback {
+public:
+ explicit RecordMemberExprValidatorCCC(const RecordType *RTy)
+ : Record(RTy->getDecl()) {
+ // Don't add bare keywords to the consumer since they will always fail
+ // validation by virtue of not being associated with any decls.
+ WantTypeSpecifiers = false;
+ WantExpressionKeywords = false;
+ WantCXXNamedCasts = false;
+ WantFunctionLikeCasts = false;
+ WantRemainingKeywords = false;
+ }
+
+ bool ValidateCandidate(const TypoCorrection &candidate) override {
+ NamedDecl *ND = candidate.getCorrectionDecl();
+ // Don't accept candidates that cannot be member functions, constants,
+ // variables, or templates.
+ if (!ND || !(isa<ValueDecl>(ND) || isa<FunctionTemplateDecl>(ND)))
+ return false;
+
+ // Accept candidates that occur in the current record.
+ if (Record->containsDecl(ND))
+ return true;
+
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Record)) {
+ // Accept candidates that occur in any of the current class' base classes.
+ for (const auto &BS : RD->bases()) {
+ if (const RecordType *BSTy =
+ dyn_cast_or_null<RecordType>(BS.getType().getTypePtrOrNull())) {
+ if (BSTy->getDecl()->containsDecl(ND))
+ return true;
+ }
+ }
+ }
+
+ return false;
+ }
+
+private:
+ const RecordDecl *const Record;
+};
+
+}
+
+static bool LookupMemberExprInRecord(Sema &SemaRef, LookupResult &R,
+ Expr *BaseExpr,
+ const RecordType *RTy,
+ SourceLocation OpLoc, bool IsArrow,
+ CXXScopeSpec &SS, bool HasTemplateArgs,
+ TypoExpr *&TE) {
+ SourceRange BaseRange = BaseExpr ? BaseExpr->getSourceRange() : SourceRange();
+ RecordDecl *RDecl = RTy->getDecl();
+ if (!SemaRef.isThisOutsideMemberFunctionBody(QualType(RTy, 0)) &&
+ SemaRef.RequireCompleteType(OpLoc, QualType(RTy, 0),
+ diag::err_typecheck_incomplete_tag,
+ BaseRange))
+ return true;
+
+ if (HasTemplateArgs) {
+ // LookupTemplateName doesn't expect these both to exist simultaneously.
+ QualType ObjectType = SS.isSet() ? QualType() : QualType(RTy, 0);
+
+ bool MOUS;
+ SemaRef.LookupTemplateName(R, nullptr, SS, ObjectType, false, MOUS);
+ return false;
+ }
+
+ DeclContext *DC = RDecl;
+ if (SS.isSet()) {
+ // If the member name was a qualified-id, look into the
+ // nested-name-specifier.
+ DC = SemaRef.computeDeclContext(SS, false);
+
+ if (SemaRef.RequireCompleteDeclContext(SS, DC)) {
+ SemaRef.Diag(SS.getRange().getEnd(), diag::err_typecheck_incomplete_tag)
+ << SS.getRange() << DC;
+ return true;
+ }
+
+ assert(DC && "Cannot handle non-computable dependent contexts in lookup");
+
+ if (!isa<TypeDecl>(DC)) {
+ SemaRef.Diag(R.getNameLoc(), diag::err_qualified_member_nonclass)
+ << DC << SS.getRange();
+ return true;
+ }
+ }
+
+ // The record definition is complete, now look up the member.
+ SemaRef.LookupQualifiedName(R, DC, SS);
+
+ if (!R.empty())
+ return false;
+
+ DeclarationName Typo = R.getLookupName();
+ SourceLocation TypoLoc = R.getNameLoc();
+
+ struct QueryState {
+ Sema &SemaRef;
+ DeclarationNameInfo NameInfo;
+ Sema::LookupNameKind LookupKind;
+ Sema::RedeclarationKind Redecl;
+ };
+ QueryState Q = {R.getSema(), R.getLookupNameInfo(), R.getLookupKind(),
+ R.isForRedeclaration() ? Sema::ForRedeclaration
+ : Sema::NotForRedeclaration};
+ TE = SemaRef.CorrectTypoDelayed(
+ R.getLookupNameInfo(), R.getLookupKind(), nullptr, &SS,
+ llvm::make_unique<RecordMemberExprValidatorCCC>(RTy),
+ [=, &SemaRef](const TypoCorrection &TC) {
+ if (TC) {
+ assert(!TC.isKeyword() &&
+ "Got a keyword as a correction for a member!");
+ bool DroppedSpecifier =
+ TC.WillReplaceSpecifier() &&
+ Typo.getAsString() == TC.getAsString(SemaRef.getLangOpts());
+ SemaRef.diagnoseTypo(TC, SemaRef.PDiag(diag::err_no_member_suggest)
+ << Typo << DC << DroppedSpecifier
+ << SS.getRange());
+ } else {
+ SemaRef.Diag(TypoLoc, diag::err_no_member) << Typo << DC << BaseRange;
+ }
+ },
+ [=](Sema &SemaRef, TypoExpr *TE, TypoCorrection TC) mutable {
+ LookupResult R(Q.SemaRef, Q.NameInfo, Q.LookupKind, Q.Redecl);
+ R.clear(); // Ensure there's no decls lingering in the shared state.
+ R.suppressDiagnostics();
+ R.setLookupName(TC.getCorrection());
+ for (NamedDecl *ND : TC)
+ R.addDecl(ND);
+ R.resolveKind();
+ return SemaRef.BuildMemberReferenceExpr(
+ BaseExpr, BaseExpr->getType(), OpLoc, IsArrow, SS, SourceLocation(),
+ nullptr, R, nullptr, nullptr);
+ },
+ Sema::CTK_ErrorRecovery, DC);
+
+ return false;
+}
+
+static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
+ ExprResult &BaseExpr, bool &IsArrow,
+ SourceLocation OpLoc, CXXScopeSpec &SS,
+ Decl *ObjCImpDecl, bool HasTemplateArgs);
+
+ExprResult
+Sema::BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
+ SourceLocation OpLoc, bool IsArrow,
+ CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ NamedDecl *FirstQualifierInScope,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *TemplateArgs,
+ const Scope *S,
+ ActOnMemberAccessExtraArgs *ExtraArgs) {
+ if (BaseType->isDependentType() ||
+ (SS.isSet() && isDependentScopeSpecifier(SS)))
+ return ActOnDependentMemberExpr(Base, BaseType,
+ IsArrow, OpLoc,
+ SS, TemplateKWLoc, FirstQualifierInScope,
+ NameInfo, TemplateArgs);
+
+ LookupResult R(*this, NameInfo, LookupMemberName);
+
+ // Implicit member accesses.
+ if (!Base) {
+ TypoExpr *TE = nullptr;
+ QualType RecordTy = BaseType;
+ if (IsArrow) RecordTy = RecordTy->getAs<PointerType>()->getPointeeType();
+ if (LookupMemberExprInRecord(*this, R, nullptr,
+ RecordTy->getAs<RecordType>(), OpLoc, IsArrow,
+ SS, TemplateArgs != nullptr, TE))
+ return ExprError();
+ if (TE)
+ return TE;
+
+ // Explicit member accesses.
+ } else {
+ ExprResult BaseResult = Base;
+ ExprResult Result = LookupMemberExpr(
+ *this, R, BaseResult, IsArrow, OpLoc, SS,
+ ExtraArgs ? ExtraArgs->ObjCImpDecl : nullptr,
+ TemplateArgs != nullptr);
+
+ if (BaseResult.isInvalid())
+ return ExprError();
+ Base = BaseResult.get();
+
+ if (Result.isInvalid())
+ return ExprError();
+
+ if (Result.get())
+ return Result;
+
+ // LookupMemberExpr can modify Base, and thus change BaseType
+ BaseType = Base->getType();
+ }
+
+ return BuildMemberReferenceExpr(Base, BaseType,
+ OpLoc, IsArrow, SS, TemplateKWLoc,
+ FirstQualifierInScope, R, TemplateArgs, S,
+ false, ExtraArgs);
+}
+
+static ExprResult
+BuildFieldReferenceExpr(Sema &S, Expr *BaseExpr, bool IsArrow,
+ SourceLocation OpLoc, const CXXScopeSpec &SS,
+ FieldDecl *Field, DeclAccessPair FoundDecl,
+ const DeclarationNameInfo &MemberNameInfo);
+
+ExprResult
+Sema::BuildAnonymousStructUnionMemberReference(const CXXScopeSpec &SS,
+ SourceLocation loc,
+ IndirectFieldDecl *indirectField,
+ DeclAccessPair foundDecl,
+ Expr *baseObjectExpr,
+ SourceLocation opLoc) {
+ // First, build the expression that refers to the base object.
+
+ bool baseObjectIsPointer = false;
+ Qualifiers baseQuals;
+
+ // Case 1: the base of the indirect field is not a field.
+ VarDecl *baseVariable = indirectField->getVarDecl();
+ CXXScopeSpec EmptySS;
+ if (baseVariable) {
+ assert(baseVariable->getType()->isRecordType());
+
+ // In principle we could have a member access expression that
+ // accesses an anonymous struct/union that's a static member of
+ // the base object's class. However, under the current standard,
+ // static data members cannot be anonymous structs or unions.
+ // Supporting this is as easy as building a MemberExpr here.
+ assert(!baseObjectExpr && "anonymous struct/union is static data member?");
+
+ DeclarationNameInfo baseNameInfo(DeclarationName(), loc);
+
+ ExprResult result
+ = BuildDeclarationNameExpr(EmptySS, baseNameInfo, baseVariable);
+ if (result.isInvalid()) return ExprError();
+
+ baseObjectExpr = result.get();
+ baseObjectIsPointer = false;
+ baseQuals = baseObjectExpr->getType().getQualifiers();
+
+ // Case 2: the base of the indirect field is a field and the user
+ // wrote a member expression.
+ } else if (baseObjectExpr) {
+ // The caller provided the base object expression. Determine
+ // whether its a pointer and whether it adds any qualifiers to the
+ // anonymous struct/union fields we're looking into.
+ QualType objectType = baseObjectExpr->getType();
+
+ if (const PointerType *ptr = objectType->getAs<PointerType>()) {
+ baseObjectIsPointer = true;
+ objectType = ptr->getPointeeType();
+ } else {
+ baseObjectIsPointer = false;
+ }
+ baseQuals = objectType.getQualifiers();
+
+ // Case 3: the base of the indirect field is a field and we should
+ // build an implicit member access.
+ } else {
+ // We've found a member of an anonymous struct/union that is
+ // inside a non-anonymous struct/union, so in a well-formed
+ // program our base object expression is "this".
+ QualType ThisTy = getCurrentThisType();
+ if (ThisTy.isNull()) {
+ Diag(loc, diag::err_invalid_member_use_in_static_method)
+ << indirectField->getDeclName();
+ return ExprError();
+ }
+
+ // Our base object expression is "this".
+ CheckCXXThisCapture(loc);
+ baseObjectExpr
+ = new (Context) CXXThisExpr(loc, ThisTy, /*isImplicit=*/ true);
+ baseObjectIsPointer = true;
+ baseQuals = ThisTy->castAs<PointerType>()->getPointeeType().getQualifiers();
+ }
+
+ // Build the implicit member references to the field of the
+ // anonymous struct/union.
+ Expr *result = baseObjectExpr;
+ IndirectFieldDecl::chain_iterator
+ FI = indirectField->chain_begin(), FEnd = indirectField->chain_end();
+
+ // Build the first member access in the chain with full information.
+ if (!baseVariable) {
+ FieldDecl *field = cast<FieldDecl>(*FI);
+
+ // Make a nameInfo that properly uses the anonymous name.
+ DeclarationNameInfo memberNameInfo(field->getDeclName(), loc);
+
+ result = BuildFieldReferenceExpr(*this, result, baseObjectIsPointer,
+ SourceLocation(), EmptySS, field,
+ foundDecl, memberNameInfo).get();
+ if (!result)
+ return ExprError();
+
+ // FIXME: check qualified member access
+ }
+
+ // In all cases, we should now skip the first declaration in the chain.
+ ++FI;
+
+ while (FI != FEnd) {
+ FieldDecl *field = cast<FieldDecl>(*FI++);
+
+ // FIXME: these are somewhat meaningless
+ DeclarationNameInfo memberNameInfo(field->getDeclName(), loc);
+ DeclAccessPair fakeFoundDecl =
+ DeclAccessPair::make(field, field->getAccess());
+
+ result =
+ BuildFieldReferenceExpr(*this, result, /*isarrow*/ false,
+ SourceLocation(), (FI == FEnd ? SS : EmptySS),
+ field, fakeFoundDecl, memberNameInfo).get();
+ }
+
+ return result;
+}
+
+static ExprResult
+BuildMSPropertyRefExpr(Sema &S, Expr *BaseExpr, bool IsArrow,
+ const CXXScopeSpec &SS,
+ MSPropertyDecl *PD,
+ const DeclarationNameInfo &NameInfo) {
+ // Property names are always simple identifiers and therefore never
+ // require any interesting additional storage.
+ return new (S.Context) MSPropertyRefExpr(BaseExpr, PD, IsArrow,
+ S.Context.PseudoObjectTy, VK_LValue,
+ SS.getWithLocInContext(S.Context),
+ NameInfo.getLoc());
+}
+
+/// \brief Build a MemberExpr AST node.
+static MemberExpr *BuildMemberExpr(
+ Sema &SemaRef, ASTContext &C, Expr *Base, bool isArrow,
+ SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
+ ValueDecl *Member, DeclAccessPair FoundDecl,
+ const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK,
+ ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr) {
+ assert((!isArrow || Base->isRValue()) && "-> base must be a pointer rvalue");
+ MemberExpr *E = MemberExpr::Create(
+ C, Base, isArrow, OpLoc, SS.getWithLocInContext(C), TemplateKWLoc, Member,
+ FoundDecl, MemberNameInfo, TemplateArgs, Ty, VK, OK);
+ SemaRef.MarkMemberReferenced(E);
+ return E;
+}
+
+/// \brief Determine if the given scope is within a function-try-block handler.
+static bool IsInFnTryBlockHandler(const Scope *S) {
+ // Walk the scope stack until finding a FnTryCatchScope, or leave the
+ // function scope. If a FnTryCatchScope is found, check whether the TryScope
+ // flag is set. If it is not, it's a function-try-block handler.
+ for (; S != S->getFnParent(); S = S->getParent()) {
+ if (S->getFlags() & Scope::FnTryCatchScope)
+ return (S->getFlags() & Scope::TryScope) != Scope::TryScope;
+ }
+ return false;
+}
+
+ExprResult
+Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
+ SourceLocation OpLoc, bool IsArrow,
+ const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ NamedDecl *FirstQualifierInScope,
+ LookupResult &R,
+ const TemplateArgumentListInfo *TemplateArgs,
+ const Scope *S,
+ bool SuppressQualifierCheck,
+ ActOnMemberAccessExtraArgs *ExtraArgs) {
+ QualType BaseType = BaseExprType;
+ if (IsArrow) {
+ assert(BaseType->isPointerType());
+ BaseType = BaseType->castAs<PointerType>()->getPointeeType();
+ }
+ R.setBaseObjectType(BaseType);
+
+ LambdaScopeInfo *const CurLSI = getCurLambda();
+ // If this is an implicit member reference and the overloaded
+ // name refers to both static and non-static member functions
+ // (i.e. BaseExpr is null) and if we are currently processing a lambda,
+ // check if we should/can capture 'this'...
+ // Keep this example in mind:
+ // struct X {
+ // void f(int) { }
+ // static void f(double) { }
+ //
+ // int g() {
+ // auto L = [=](auto a) {
+ // return [](int i) {
+ // return [=](auto b) {
+ // f(b);
+ // //f(decltype(a){});
+ // };
+ // };
+ // };
+ // auto M = L(0.0);
+ // auto N = M(3);
+ // N(5.32); // OK, must not error.
+ // return 0;
+ // }
+ // };
+ //
+ if (!BaseExpr && CurLSI) {
+ SourceLocation Loc = R.getNameLoc();
+ if (SS.getRange().isValid())
+ Loc = SS.getRange().getBegin();
+ DeclContext *EnclosingFunctionCtx = CurContext->getParent()->getParent();
+ // If the enclosing function is not dependent, then this lambda is
+ // capture ready, so if we can capture this, do so.
+ if (!EnclosingFunctionCtx->isDependentContext()) {
+ // If the current lambda and all enclosing lambdas can capture 'this' -
+ // then go ahead and capture 'this' (since our unresolved overload set
+ // contains both static and non-static member functions).
+ if (!CheckCXXThisCapture(Loc, /*Explcit*/false, /*Diagnose*/false))
+ CheckCXXThisCapture(Loc);
+ } else if (CurContext->isDependentContext()) {
+ // ... since this is an implicit member reference, that might potentially
+ // involve a 'this' capture, mark 'this' for potential capture in
+ // enclosing lambdas.
+ if (CurLSI->ImpCaptureStyle != CurLSI->ImpCap_None)
+ CurLSI->addPotentialThisCapture(Loc);
+ }
+ }
+ const DeclarationNameInfo &MemberNameInfo = R.getLookupNameInfo();
+ DeclarationName MemberName = MemberNameInfo.getName();
+ SourceLocation MemberLoc = MemberNameInfo.getLoc();
+
+ if (R.isAmbiguous())
+ return ExprError();
+
+ // [except.handle]p10: Referring to any non-static member or base class of an
+ // object in the handler for a function-try-block of a constructor or
+ // destructor for that object results in undefined behavior.
+ const auto *FD = getCurFunctionDecl();
+ if (S && BaseExpr && FD &&
+ (isa<CXXDestructorDecl>(FD) || isa<CXXConstructorDecl>(FD)) &&
+ isa<CXXThisExpr>(BaseExpr->IgnoreImpCasts()) &&
+ IsInFnTryBlockHandler(S))
+ Diag(MemberLoc, diag::warn_cdtor_function_try_handler_mem_expr)
+ << isa<CXXDestructorDecl>(FD);
+
+ if (R.empty()) {
+ // Rederive where we looked up.
+ DeclContext *DC = (SS.isSet()
+ ? computeDeclContext(SS, false)
+ : BaseType->getAs<RecordType>()->getDecl());
+
+ if (ExtraArgs) {
+ ExprResult RetryExpr;
+ if (!IsArrow && BaseExpr) {
+ SFINAETrap Trap(*this, true);
+ ParsedType ObjectType;
+ bool MayBePseudoDestructor = false;
+ RetryExpr = ActOnStartCXXMemberReference(getCurScope(), BaseExpr,
+ OpLoc, tok::arrow, ObjectType,
+ MayBePseudoDestructor);
+ if (RetryExpr.isUsable() && !Trap.hasErrorOccurred()) {
+ CXXScopeSpec TempSS(SS);
+ RetryExpr = ActOnMemberAccessExpr(
+ ExtraArgs->S, RetryExpr.get(), OpLoc, tok::arrow, TempSS,
+ TemplateKWLoc, ExtraArgs->Id, ExtraArgs->ObjCImpDecl);
+ }
+ if (Trap.hasErrorOccurred())
+ RetryExpr = ExprError();
+ }
+ if (RetryExpr.isUsable()) {
+ Diag(OpLoc, diag::err_no_member_overloaded_arrow)
+ << MemberName << DC << FixItHint::CreateReplacement(OpLoc, "->");
+ return RetryExpr;
+ }
+ }
+
+ Diag(R.getNameLoc(), diag::err_no_member)
+ << MemberName << DC
+ << (BaseExpr ? BaseExpr->getSourceRange() : SourceRange());
+ return ExprError();
+ }
+
+ // Diagnose lookups that find only declarations from a non-base
+ // type. This is possible for either qualified lookups (which may
+ // have been qualified with an unrelated type) or implicit member
+ // expressions (which were found with unqualified lookup and thus
+ // may have come from an enclosing scope). Note that it's okay for
+ // lookup to find declarations from a non-base type as long as those
+ // aren't the ones picked by overload resolution.
+ if ((SS.isSet() || !BaseExpr ||
+ (isa<CXXThisExpr>(BaseExpr) &&
+ cast<CXXThisExpr>(BaseExpr)->isImplicit())) &&
+ !SuppressQualifierCheck &&
+ CheckQualifiedMemberReference(BaseExpr, BaseType, SS, R))
+ return ExprError();
+
+ // Construct an unresolved result if we in fact got an unresolved
+ // result.
+ if (R.isOverloadedResult() || R.isUnresolvableResult()) {
+ // Suppress any lookup-related diagnostics; we'll do these when we
+ // pick a member.
+ R.suppressDiagnostics();
+
+ UnresolvedMemberExpr *MemExpr
+ = UnresolvedMemberExpr::Create(Context, R.isUnresolvableResult(),
+ BaseExpr, BaseExprType,
+ IsArrow, OpLoc,
+ SS.getWithLocInContext(Context),
+ TemplateKWLoc, MemberNameInfo,
+ TemplateArgs, R.begin(), R.end());
+
+ return MemExpr;
+ }
+
+ assert(R.isSingleResult());
+ DeclAccessPair FoundDecl = R.begin().getPair();
+ NamedDecl *MemberDecl = R.getFoundDecl();
+
+ // FIXME: diagnose the presence of template arguments now.
+
+ // If the decl being referenced had an error, return an error for this
+ // sub-expr without emitting another error, in order to avoid cascading
+ // error cases.
+ if (MemberDecl->isInvalidDecl())
+ return ExprError();
+
+ // Handle the implicit-member-access case.
+ if (!BaseExpr) {
+ // If this is not an instance member, convert to a non-member access.
+ if (!MemberDecl->isCXXInstanceMember())
+ return BuildDeclarationNameExpr(SS, R.getLookupNameInfo(), MemberDecl);
+
+ SourceLocation Loc = R.getNameLoc();
+ if (SS.getRange().isValid())
+ Loc = SS.getRange().getBegin();
+ CheckCXXThisCapture(Loc);
+ BaseExpr = new (Context) CXXThisExpr(Loc, BaseExprType,/*isImplicit=*/true);
+ }
+
+ // Check the use of this member.
+ if (DiagnoseUseOfDecl(MemberDecl, MemberLoc))
+ return ExprError();
+
+ if (FieldDecl *FD = dyn_cast<FieldDecl>(MemberDecl))
+ return BuildFieldReferenceExpr(*this, BaseExpr, IsArrow, OpLoc, SS, FD,
+ FoundDecl, MemberNameInfo);
+
+ if (MSPropertyDecl *PD = dyn_cast<MSPropertyDecl>(MemberDecl))
+ return BuildMSPropertyRefExpr(*this, BaseExpr, IsArrow, SS, PD,
+ MemberNameInfo);
+
+ if (IndirectFieldDecl *FD = dyn_cast<IndirectFieldDecl>(MemberDecl))
+ // We may have found a field within an anonymous union or struct
+ // (C++ [class.union]).
+ return BuildAnonymousStructUnionMemberReference(SS, MemberLoc, FD,
+ FoundDecl, BaseExpr,
+ OpLoc);
+
+ if (VarDecl *Var = dyn_cast<VarDecl>(MemberDecl)) {
+ return BuildMemberExpr(*this, Context, BaseExpr, IsArrow, OpLoc, SS,
+ TemplateKWLoc, Var, FoundDecl, MemberNameInfo,
+ Var->getType().getNonReferenceType(), VK_LValue,
+ OK_Ordinary);
+ }
+
+ if (CXXMethodDecl *MemberFn = dyn_cast<CXXMethodDecl>(MemberDecl)) {
+ ExprValueKind valueKind;
+ QualType type;
+ if (MemberFn->isInstance()) {
+ valueKind = VK_RValue;
+ type = Context.BoundMemberTy;
+ } else {
+ valueKind = VK_LValue;
+ type = MemberFn->getType();
+ }
+
+ return BuildMemberExpr(*this, Context, BaseExpr, IsArrow, OpLoc, SS,
+ TemplateKWLoc, MemberFn, FoundDecl, MemberNameInfo,
+ type, valueKind, OK_Ordinary);
+ }
+ assert(!isa<FunctionDecl>(MemberDecl) && "member function not C++ method?");
+
+ if (EnumConstantDecl *Enum = dyn_cast<EnumConstantDecl>(MemberDecl)) {
+ return BuildMemberExpr(*this, Context, BaseExpr, IsArrow, OpLoc, SS,
+ TemplateKWLoc, Enum, FoundDecl, MemberNameInfo,
+ Enum->getType(), VK_RValue, OK_Ordinary);
+ }
+
+ // We found something that we didn't expect. Complain.
+ if (isa<TypeDecl>(MemberDecl))
+ Diag(MemberLoc, diag::err_typecheck_member_reference_type)
+ << MemberName << BaseType << int(IsArrow);
+ else
+ Diag(MemberLoc, diag::err_typecheck_member_reference_unknown)
+ << MemberName << BaseType << int(IsArrow);
+
+ Diag(MemberDecl->getLocation(), diag::note_member_declared_here)
+ << MemberName;
+ R.suppressDiagnostics();
+ return ExprError();
+}
+
+/// Given that normal member access failed on the given expression,
+/// and given that the expression's type involves builtin-id or
+/// builtin-Class, decide whether substituting in the redefinition
+/// types would be profitable. The redefinition type is whatever
+/// this translation unit tried to typedef to id/Class; we store
+/// it to the side and then re-use it in places like this.
+static bool ShouldTryAgainWithRedefinitionType(Sema &S, ExprResult &base) {
+ const ObjCObjectPointerType *opty
+ = base.get()->getType()->getAs<ObjCObjectPointerType>();
+ if (!opty) return false;
+
+ const ObjCObjectType *ty = opty->getObjectType();
+
+ QualType redef;
+ if (ty->isObjCId()) {
+ redef = S.Context.getObjCIdRedefinitionType();
+ } else if (ty->isObjCClass()) {
+ redef = S.Context.getObjCClassRedefinitionType();
+ } else {
+ return false;
+ }
+
+ // Do the substitution as long as the redefinition type isn't just a
+ // possibly-qualified pointer to builtin-id or builtin-Class again.
+ opty = redef->getAs<ObjCObjectPointerType>();
+ if (opty && !opty->getObjectType()->getInterface())
+ return false;
+
+ base = S.ImpCastExprToType(base.get(), redef, CK_BitCast);
+ return true;
+}
+
+static bool isRecordType(QualType T) {
+ return T->isRecordType();
+}
+static bool isPointerToRecordType(QualType T) {
+ if (const PointerType *PT = T->getAs<PointerType>())
+ return PT->getPointeeType()->isRecordType();
+ return false;
+}
+
+/// Perform conversions on the LHS of a member access expression.
+ExprResult
+Sema::PerformMemberExprBaseConversion(Expr *Base, bool IsArrow) {
+ if (IsArrow && !Base->getType()->isFunctionType())
+ return DefaultFunctionArrayLvalueConversion(Base);
+
+ return CheckPlaceholderExpr(Base);
+}
+
+/// Look up the given member of the given non-type-dependent
+/// expression. This can return in one of two ways:
+/// * If it returns a sentinel null-but-valid result, the caller will
+/// assume that lookup was performed and the results written into
+/// the provided structure. It will take over from there.
+/// * Otherwise, the returned expression will be produced in place of
+/// an ordinary member expression.
+///
+/// The ObjCImpDecl bit is a gross hack that will need to be properly
+/// fixed for ObjC++.
+static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
+ ExprResult &BaseExpr, bool &IsArrow,
+ SourceLocation OpLoc, CXXScopeSpec &SS,
+ Decl *ObjCImpDecl, bool HasTemplateArgs) {
+ assert(BaseExpr.get() && "no base expression");
+
+ // Perform default conversions.
+ BaseExpr = S.PerformMemberExprBaseConversion(BaseExpr.get(), IsArrow);
+ if (BaseExpr.isInvalid())
+ return ExprError();
+
+ QualType BaseType = BaseExpr.get()->getType();
+ assert(!BaseType->isDependentType());
+
+ DeclarationName MemberName = R.getLookupName();
+ SourceLocation MemberLoc = R.getNameLoc();
+
+ // For later type-checking purposes, turn arrow accesses into dot
+ // accesses. The only access type we support that doesn't follow
+ // the C equivalence "a->b === (*a).b" is ObjC property accesses,
+ // and those never use arrows, so this is unaffected.
+ if (IsArrow) {
+ if (const PointerType *Ptr = BaseType->getAs<PointerType>())
+ BaseType = Ptr->getPointeeType();
+ else if (const ObjCObjectPointerType *Ptr
+ = BaseType->getAs<ObjCObjectPointerType>())
+ BaseType = Ptr->getPointeeType();
+ else if (BaseType->isRecordType()) {
+ // Recover from arrow accesses to records, e.g.:
+ // struct MyRecord foo;
+ // foo->bar
+ // This is actually well-formed in C++ if MyRecord has an
+ // overloaded operator->, but that should have been dealt with
+ // by now--or a diagnostic message already issued if a problem
+ // was encountered while looking for the overloaded operator->.
+ if (!S.getLangOpts().CPlusPlus) {
+ S.Diag(OpLoc, diag::err_typecheck_member_reference_suggestion)
+ << BaseType << int(IsArrow) << BaseExpr.get()->getSourceRange()
+ << FixItHint::CreateReplacement(OpLoc, ".");
+ }
+ IsArrow = false;
+ } else if (BaseType->isFunctionType()) {
+ goto fail;
+ } else {
+ S.Diag(MemberLoc, diag::err_typecheck_member_reference_arrow)
+ << BaseType << BaseExpr.get()->getSourceRange();
+ return ExprError();
+ }
+ }
+
+ // Handle field access to simple records.
+ if (const RecordType *RTy = BaseType->getAs<RecordType>()) {
+ TypoExpr *TE = nullptr;
+ if (LookupMemberExprInRecord(S, R, BaseExpr.get(), RTy,
+ OpLoc, IsArrow, SS, HasTemplateArgs, TE))
+ return ExprError();
+
+ // Returning valid-but-null is how we indicate to the caller that
+ // the lookup result was filled in. If typo correction was attempted and
+ // failed, the lookup result will have been cleared--that combined with the
+ // valid-but-null ExprResult will trigger the appropriate diagnostics.
+ return ExprResult(TE);
+ }
+
+ // Handle ivar access to Objective-C objects.
+ if (const ObjCObjectType *OTy = BaseType->getAs<ObjCObjectType>()) {
+ if (!SS.isEmpty() && !SS.isInvalid()) {
+ S.Diag(SS.getRange().getBegin(), diag::err_qualified_objc_access)
+ << 1 << SS.getScopeRep()
+ << FixItHint::CreateRemoval(SS.getRange());
+ SS.clear();
+ }
+
+ IdentifierInfo *Member = MemberName.getAsIdentifierInfo();
+
+ // There are three cases for the base type:
+ // - builtin id (qualified or unqualified)
+ // - builtin Class (qualified or unqualified)
+ // - an interface
+ ObjCInterfaceDecl *IDecl = OTy->getInterface();
+ if (!IDecl) {
+ if (S.getLangOpts().ObjCAutoRefCount &&
+ (OTy->isObjCId() || OTy->isObjCClass()))
+ goto fail;
+ // There's an implicit 'isa' ivar on all objects.
+ // But we only actually find it this way on objects of type 'id',
+ // apparently.
+ if (OTy->isObjCId() && Member->isStr("isa"))
+ return new (S.Context) ObjCIsaExpr(BaseExpr.get(), IsArrow, MemberLoc,
+ OpLoc, S.Context.getObjCClassType());
+ if (ShouldTryAgainWithRedefinitionType(S, BaseExpr))
+ return LookupMemberExpr(S, R, BaseExpr, IsArrow, OpLoc, SS,
+ ObjCImpDecl, HasTemplateArgs);
+ goto fail;
+ }
+
+ if (S.RequireCompleteType(OpLoc, BaseType,
+ diag::err_typecheck_incomplete_tag,
+ BaseExpr.get()))
+ return ExprError();
+
+ ObjCInterfaceDecl *ClassDeclared = nullptr;
+ ObjCIvarDecl *IV = IDecl->lookupInstanceVariable(Member, ClassDeclared);
+
+ if (!IV) {
+ // Attempt to correct for typos in ivar names.
+ auto Validator = llvm::make_unique<DeclFilterCCC<ObjCIvarDecl>>();
+ Validator->IsObjCIvarLookup = IsArrow;
+ if (TypoCorrection Corrected = S.CorrectTypo(
+ R.getLookupNameInfo(), Sema::LookupMemberName, nullptr, nullptr,
+ std::move(Validator), Sema::CTK_ErrorRecovery, IDecl)) {
+ IV = Corrected.getCorrectionDeclAs<ObjCIvarDecl>();
+ S.diagnoseTypo(
+ Corrected,
+ S.PDiag(diag::err_typecheck_member_reference_ivar_suggest)
+ << IDecl->getDeclName() << MemberName);
+
+ // Figure out the class that declares the ivar.
+ assert(!ClassDeclared);
+ Decl *D = cast<Decl>(IV->getDeclContext());
+ if (ObjCCategoryDecl *CAT = dyn_cast<ObjCCategoryDecl>(D))
+ D = CAT->getClassInterface();
+ ClassDeclared = cast<ObjCInterfaceDecl>(D);
+ } else {
+ if (IsArrow && IDecl->FindPropertyDeclaration(Member)) {
+ S.Diag(MemberLoc, diag::err_property_found_suggest)
+ << Member << BaseExpr.get()->getType()
+ << FixItHint::CreateReplacement(OpLoc, ".");
+ return ExprError();
+ }
+
+ S.Diag(MemberLoc, diag::err_typecheck_member_reference_ivar)
+ << IDecl->getDeclName() << MemberName
+ << BaseExpr.get()->getSourceRange();
+ return ExprError();
+ }
+ }
+
+ assert(ClassDeclared);
+
+ // If the decl being referenced had an error, return an error for this
+ // sub-expr without emitting another error, in order to avoid cascading
+ // error cases.
+ if (IV->isInvalidDecl())
+ return ExprError();
+
+ // Check whether we can reference this field.
+ if (S.DiagnoseUseOfDecl(IV, MemberLoc))
+ return ExprError();
+ if (IV->getAccessControl() != ObjCIvarDecl::Public &&
+ IV->getAccessControl() != ObjCIvarDecl::Package) {
+ ObjCInterfaceDecl *ClassOfMethodDecl = nullptr;
+ if (ObjCMethodDecl *MD = S.getCurMethodDecl())
+ ClassOfMethodDecl = MD->getClassInterface();
+ else if (ObjCImpDecl && S.getCurFunctionDecl()) {
+ // Case of a c-function declared inside an objc implementation.
+ // FIXME: For a c-style function nested inside an objc implementation
+ // class, there is no implementation context available, so we pass
+ // down the context as argument to this routine. Ideally, this context
+ // need be passed down in the AST node and somehow calculated from the
+ // AST for a function decl.
+ if (ObjCImplementationDecl *IMPD =
+ dyn_cast<ObjCImplementationDecl>(ObjCImpDecl))
+ ClassOfMethodDecl = IMPD->getClassInterface();
+ else if (ObjCCategoryImplDecl* CatImplClass =
+ dyn_cast<ObjCCategoryImplDecl>(ObjCImpDecl))
+ ClassOfMethodDecl = CatImplClass->getClassInterface();
+ }
+ if (!S.getLangOpts().DebuggerSupport) {
+ if (IV->getAccessControl() == ObjCIvarDecl::Private) {
+ if (!declaresSameEntity(ClassDeclared, IDecl) ||
+ !declaresSameEntity(ClassOfMethodDecl, ClassDeclared))
+ S.Diag(MemberLoc, diag::error_private_ivar_access)
+ << IV->getDeclName();
+ } else if (!IDecl->isSuperClassOf(ClassOfMethodDecl))
+ // @protected
+ S.Diag(MemberLoc, diag::error_protected_ivar_access)
+ << IV->getDeclName();
+ }
+ }
+ bool warn = true;
+ if (S.getLangOpts().ObjCAutoRefCount) {
+ Expr *BaseExp = BaseExpr.get()->IgnoreParenImpCasts();
+ if (UnaryOperator *UO = dyn_cast<UnaryOperator>(BaseExp))
+ if (UO->getOpcode() == UO_Deref)
+ BaseExp = UO->getSubExpr()->IgnoreParenCasts();
+
+ if (DeclRefExpr *DE = dyn_cast<DeclRefExpr>(BaseExp))
+ if (DE->getType().getObjCLifetime() == Qualifiers::OCL_Weak) {
+ S.Diag(DE->getLocation(), diag::error_arc_weak_ivar_access);
+ warn = false;
+ }
+ }
+ if (warn) {
+ if (ObjCMethodDecl *MD = S.getCurMethodDecl()) {
+ ObjCMethodFamily MF = MD->getMethodFamily();
+ warn = (MF != OMF_init && MF != OMF_dealloc &&
+ MF != OMF_finalize &&
+ !S.IvarBacksCurrentMethodAccessor(IDecl, MD, IV));
+ }
+ if (warn)
+ S.Diag(MemberLoc, diag::warn_direct_ivar_access) << IV->getDeclName();
+ }
+
+ ObjCIvarRefExpr *Result = new (S.Context) ObjCIvarRefExpr(
+ IV, IV->getUsageType(BaseType), MemberLoc, OpLoc, BaseExpr.get(),
+ IsArrow);
+
+ if (S.getLangOpts().ObjCAutoRefCount) {
+ if (IV->getType().getObjCLifetime() == Qualifiers::OCL_Weak) {
+ if (!S.Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, MemberLoc))
+ S.recordUseOfEvaluatedWeak(Result);
+ }
+ }
+
+ return Result;
+ }
+
+ // Objective-C property access.
+ const ObjCObjectPointerType *OPT;
+ if (!IsArrow && (OPT = BaseType->getAs<ObjCObjectPointerType>())) {
+ if (!SS.isEmpty() && !SS.isInvalid()) {
+ S.Diag(SS.getRange().getBegin(), diag::err_qualified_objc_access)
+ << 0 << SS.getScopeRep() << FixItHint::CreateRemoval(SS.getRange());
+ SS.clear();
+ }
+
+ // This actually uses the base as an r-value.
+ BaseExpr = S.DefaultLvalueConversion(BaseExpr.get());
+ if (BaseExpr.isInvalid())
+ return ExprError();
+
+ assert(S.Context.hasSameUnqualifiedType(BaseType,
+ BaseExpr.get()->getType()));
+
+ IdentifierInfo *Member = MemberName.getAsIdentifierInfo();
+
+ const ObjCObjectType *OT = OPT->getObjectType();
+
+ // id, with and without qualifiers.
+ if (OT->isObjCId()) {
+ // Check protocols on qualified interfaces.
+ Selector Sel = S.PP.getSelectorTable().getNullarySelector(Member);
+ if (Decl *PMDecl =
+ FindGetterSetterNameDecl(OPT, Member, Sel, S.Context)) {
+ if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(PMDecl)) {
+ // Check the use of this declaration
+ if (S.DiagnoseUseOfDecl(PD, MemberLoc))
+ return ExprError();
+
+ return new (S.Context)
+ ObjCPropertyRefExpr(PD, S.Context.PseudoObjectTy, VK_LValue,
+ OK_ObjCProperty, MemberLoc, BaseExpr.get());
+ }
+
+ if (ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(PMDecl)) {
+ // Check the use of this method.
+ if (S.DiagnoseUseOfDecl(OMD, MemberLoc))
+ return ExprError();
+ Selector SetterSel =
+ SelectorTable::constructSetterSelector(S.PP.getIdentifierTable(),
+ S.PP.getSelectorTable(),
+ Member);
+ ObjCMethodDecl *SMD = nullptr;
+ if (Decl *SDecl = FindGetterSetterNameDecl(OPT,
+ /*Property id*/ nullptr,
+ SetterSel, S.Context))
+ SMD = dyn_cast<ObjCMethodDecl>(SDecl);
+
+ return new (S.Context)
+ ObjCPropertyRefExpr(OMD, SMD, S.Context.PseudoObjectTy, VK_LValue,
+ OK_ObjCProperty, MemberLoc, BaseExpr.get());
+ }
+ }
+ // Use of id.member can only be for a property reference. Do not
+ // use the 'id' redefinition in this case.
+ if (IsArrow && ShouldTryAgainWithRedefinitionType(S, BaseExpr))
+ return LookupMemberExpr(S, R, BaseExpr, IsArrow, OpLoc, SS,
+ ObjCImpDecl, HasTemplateArgs);
+
+ return ExprError(S.Diag(MemberLoc, diag::err_property_not_found)
+ << MemberName << BaseType);
+ }
+
+ // 'Class', unqualified only.
+ if (OT->isObjCClass()) {
+ // Only works in a method declaration (??!).
+ ObjCMethodDecl *MD = S.getCurMethodDecl();
+ if (!MD) {
+ if (ShouldTryAgainWithRedefinitionType(S, BaseExpr))
+ return LookupMemberExpr(S, R, BaseExpr, IsArrow, OpLoc, SS,
+ ObjCImpDecl, HasTemplateArgs);
+
+ goto fail;
+ }
+
+ // Also must look for a getter name which uses property syntax.
+ Selector Sel = S.PP.getSelectorTable().getNullarySelector(Member);
+ ObjCInterfaceDecl *IFace = MD->getClassInterface();
+ ObjCMethodDecl *Getter;
+ if ((Getter = IFace->lookupClassMethod(Sel))) {
+ // Check the use of this method.
+ if (S.DiagnoseUseOfDecl(Getter, MemberLoc))
+ return ExprError();
+ } else
+ Getter = IFace->lookupPrivateMethod(Sel, false);
+ // If we found a getter then this may be a valid dot-reference, we
+ // will look for the matching setter, in case it is needed.
+ Selector SetterSel =
+ SelectorTable::constructSetterSelector(S.PP.getIdentifierTable(),
+ S.PP.getSelectorTable(),
+ Member);
+ ObjCMethodDecl *Setter = IFace->lookupClassMethod(SetterSel);
+ if (!Setter) {
+ // If this reference is in an @implementation, also check for 'private'
+ // methods.
+ Setter = IFace->lookupPrivateMethod(SetterSel, false);
+ }
+
+ if (Setter && S.DiagnoseUseOfDecl(Setter, MemberLoc))
+ return ExprError();
+
+ if (Getter || Setter) {
+ return new (S.Context) ObjCPropertyRefExpr(
+ Getter, Setter, S.Context.PseudoObjectTy, VK_LValue,
+ OK_ObjCProperty, MemberLoc, BaseExpr.get());
+ }
+
+ if (ShouldTryAgainWithRedefinitionType(S, BaseExpr))
+ return LookupMemberExpr(S, R, BaseExpr, IsArrow, OpLoc, SS,
+ ObjCImpDecl, HasTemplateArgs);
+
+ return ExprError(S.Diag(MemberLoc, diag::err_property_not_found)
+ << MemberName << BaseType);
+ }
+
+ // Normal property access.
+ return S.HandleExprPropertyRefExpr(OPT, BaseExpr.get(), OpLoc, MemberName,
+ MemberLoc, SourceLocation(), QualType(),
+ false);
+ }
+
+ // Handle 'field access' to vectors, such as 'V.xx'.
+ if (BaseType->isExtVectorType()) {
+ // FIXME: this expr should store IsArrow.
+ IdentifierInfo *Member = MemberName.getAsIdentifierInfo();
+ ExprValueKind VK;
+ if (IsArrow)
+ VK = VK_LValue;
+ else {
+ if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(BaseExpr.get()))
+ VK = POE->getSyntacticForm()->getValueKind();
+ else
+ VK = BaseExpr.get()->getValueKind();
+ }
+ QualType ret = CheckExtVectorComponent(S, BaseType, VK, OpLoc,
+ Member, MemberLoc);
+ if (ret.isNull())
+ return ExprError();
+
+ return new (S.Context)
+ ExtVectorElementExpr(ret, VK, BaseExpr.get(), *Member, MemberLoc);
+ }
+
+ // Adjust builtin-sel to the appropriate redefinition type if that's
+ // not just a pointer to builtin-sel again.
+ if (IsArrow && BaseType->isSpecificBuiltinType(BuiltinType::ObjCSel) &&
+ !S.Context.getObjCSelRedefinitionType()->isObjCSelType()) {
+ BaseExpr = S.ImpCastExprToType(
+ BaseExpr.get(), S.Context.getObjCSelRedefinitionType(), CK_BitCast);
+ return LookupMemberExpr(S, R, BaseExpr, IsArrow, OpLoc, SS,
+ ObjCImpDecl, HasTemplateArgs);
+ }
+
+ // Failure cases.
+ fail:
+
+ // Recover from dot accesses to pointers, e.g.:
+ // type *foo;
+ // foo.bar
+ // This is actually well-formed in two cases:
+ // - 'type' is an Objective C type
+ // - 'bar' is a pseudo-destructor name which happens to refer to
+ // the appropriate pointer type
+ if (const PointerType *Ptr = BaseType->getAs<PointerType>()) {
+ if (!IsArrow && Ptr->getPointeeType()->isRecordType() &&
+ MemberName.getNameKind() != DeclarationName::CXXDestructorName) {
+ S.Diag(OpLoc, diag::err_typecheck_member_reference_suggestion)
+ << BaseType << int(IsArrow) << BaseExpr.get()->getSourceRange()
+ << FixItHint::CreateReplacement(OpLoc, "->");
+
+ // Recurse as an -> access.
+ IsArrow = true;
+ return LookupMemberExpr(S, R, BaseExpr, IsArrow, OpLoc, SS,
+ ObjCImpDecl, HasTemplateArgs);
+ }
+ }
+
+ // If the user is trying to apply -> or . to a function name, it's probably
+ // because they forgot parentheses to call that function.
+ if (S.tryToRecoverWithCall(
+ BaseExpr, S.PDiag(diag::err_member_reference_needs_call),
+ /*complain*/ false,
+ IsArrow ? &isPointerToRecordType : &isRecordType)) {
+ if (BaseExpr.isInvalid())
+ return ExprError();
+ BaseExpr = S.DefaultFunctionArrayConversion(BaseExpr.get());
+ return LookupMemberExpr(S, R, BaseExpr, IsArrow, OpLoc, SS,
+ ObjCImpDecl, HasTemplateArgs);
+ }
+
+ S.Diag(OpLoc, diag::err_typecheck_member_reference_struct_union)
+ << BaseType << BaseExpr.get()->getSourceRange() << MemberLoc;
+
+ return ExprError();
+}
+
+/// The main callback when the parser finds something like
+/// expression . [nested-name-specifier] identifier
+/// expression -> [nested-name-specifier] identifier
+/// where 'identifier' encompasses a fairly broad spectrum of
+/// possibilities, including destructor and operator references.
+///
+/// \param OpKind either tok::arrow or tok::period
+/// \param ObjCImpDecl the current Objective-C \@implementation
+/// decl; this is an ugly hack around the fact that Objective-C
+/// \@implementations aren't properly put in the context chain
+ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base,
+ SourceLocation OpLoc,
+ tok::TokenKind OpKind,
+ CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ UnqualifiedId &Id,
+ Decl *ObjCImpDecl) {
+ if (SS.isSet() && SS.isInvalid())
+ return ExprError();
+
+ // Warn about the explicit constructor calls Microsoft extension.
+ if (getLangOpts().MicrosoftExt &&
+ Id.getKind() == UnqualifiedId::IK_ConstructorName)
+ Diag(Id.getSourceRange().getBegin(),
+ diag::ext_ms_explicit_constructor_call);
+
+ TemplateArgumentListInfo TemplateArgsBuffer;
+
+ // Decompose the name into its component parts.
+ DeclarationNameInfo NameInfo;
+ const TemplateArgumentListInfo *TemplateArgs;
+ DecomposeUnqualifiedId(Id, TemplateArgsBuffer,
+ NameInfo, TemplateArgs);
+
+ DeclarationName Name = NameInfo.getName();
+ bool IsArrow = (OpKind == tok::arrow);
+
+ NamedDecl *FirstQualifierInScope
+ = (!SS.isSet() ? nullptr : FindFirstQualifierInScope(S, SS.getScopeRep()));
+
+ // This is a postfix expression, so get rid of ParenListExprs.
+ ExprResult Result = MaybeConvertParenListExprToParenExpr(S, Base);
+ if (Result.isInvalid()) return ExprError();
+ Base = Result.get();
+
+ if (Base->getType()->isDependentType() || Name.isDependentName() ||
+ isDependentScopeSpecifier(SS)) {
+ return ActOnDependentMemberExpr(Base, Base->getType(), IsArrow, OpLoc, SS,
+ TemplateKWLoc, FirstQualifierInScope,
+ NameInfo, TemplateArgs);
+ }
+
+ ActOnMemberAccessExtraArgs ExtraArgs = {S, Id, ObjCImpDecl};
+ return BuildMemberReferenceExpr(Base, Base->getType(), OpLoc, IsArrow, SS,
+ TemplateKWLoc, FirstQualifierInScope,
+ NameInfo, TemplateArgs, S, &ExtraArgs);
+}
+
+static ExprResult
+BuildFieldReferenceExpr(Sema &S, Expr *BaseExpr, bool IsArrow,
+ SourceLocation OpLoc, const CXXScopeSpec &SS,
+ FieldDecl *Field, DeclAccessPair FoundDecl,
+ const DeclarationNameInfo &MemberNameInfo) {
+ // x.a is an l-value if 'a' has a reference type. Otherwise:
+ // x.a is an l-value/x-value/pr-value if the base is (and note
+ // that *x is always an l-value), except that if the base isn't
+ // an ordinary object then we must have an rvalue.
+ ExprValueKind VK = VK_LValue;
+ ExprObjectKind OK = OK_Ordinary;
+ if (!IsArrow) {
+ if (BaseExpr->getObjectKind() == OK_Ordinary)
+ VK = BaseExpr->getValueKind();
+ else
+ VK = VK_RValue;
+ }
+ if (VK != VK_RValue && Field->isBitField())
+ OK = OK_BitField;
+
+ // Figure out the type of the member; see C99 6.5.2.3p3, C++ [expr.ref]
+ QualType MemberType = Field->getType();
+ if (const ReferenceType *Ref = MemberType->getAs<ReferenceType>()) {
+ MemberType = Ref->getPointeeType();
+ VK = VK_LValue;
+ } else {
+ QualType BaseType = BaseExpr->getType();
+ if (IsArrow) BaseType = BaseType->getAs<PointerType>()->getPointeeType();
+
+ Qualifiers BaseQuals = BaseType.getQualifiers();
+
+ // GC attributes are never picked up by members.
+ BaseQuals.removeObjCGCAttr();
+
+ // CVR attributes from the base are picked up by members,
+ // except that 'mutable' members don't pick up 'const'.
+ if (Field->isMutable()) BaseQuals.removeConst();
+
+ Qualifiers MemberQuals
+ = S.Context.getCanonicalType(MemberType).getQualifiers();
+
+ assert(!MemberQuals.hasAddressSpace());
+
+
+ Qualifiers Combined = BaseQuals + MemberQuals;
+ if (Combined != MemberQuals)
+ MemberType = S.Context.getQualifiedType(MemberType, Combined);
+ }
+
+ S.UnusedPrivateFields.remove(Field);
+
+ ExprResult Base =
+ S.PerformObjectMemberConversion(BaseExpr, SS.getScopeRep(),
+ FoundDecl, Field);
+ if (Base.isInvalid())
+ return ExprError();
+ return BuildMemberExpr(S, S.Context, Base.get(), IsArrow, OpLoc, SS,
+ /*TemplateKWLoc=*/SourceLocation(), Field, FoundDecl,
+ MemberNameInfo, MemberType, VK, OK);
+}
+
+/// Builds an implicit member access expression. The current context
+/// is known to be an instance method, and the given unqualified lookup
+/// set is known to contain only instance members, at least one of which
+/// is from an appropriate type.
+ExprResult
+Sema::BuildImplicitMemberExpr(const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ LookupResult &R,
+ const TemplateArgumentListInfo *TemplateArgs,
+ bool IsKnownInstance, const Scope *S) {
+ assert(!R.empty() && !R.isAmbiguous());
+
+ SourceLocation loc = R.getNameLoc();
+
+ // If this is known to be an instance access, go ahead and build an
+ // implicit 'this' expression now.
+ // 'this' expression now.
+ QualType ThisTy = getCurrentThisType();
+ assert(!ThisTy.isNull() && "didn't correctly pre-flight capture of 'this'");
+
+ Expr *baseExpr = nullptr; // null signifies implicit access
+ if (IsKnownInstance) {
+ SourceLocation Loc = R.getNameLoc();
+ if (SS.getRange().isValid())
+ Loc = SS.getRange().getBegin();
+ CheckCXXThisCapture(Loc);
+ baseExpr = new (Context) CXXThisExpr(loc, ThisTy, /*isImplicit=*/true);
+ }
+
+ return BuildMemberReferenceExpr(baseExpr, ThisTy,
+ /*OpLoc*/ SourceLocation(),
+ /*IsArrow*/ true,
+ SS, TemplateKWLoc,
+ /*FirstQualifierInScope*/ nullptr,
+ R, TemplateArgs, S);
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp
new file mode 100644
index 0000000..57a08b9
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp
@@ -0,0 +1,4300 @@
+//===--- SemaExprObjC.cpp - Semantic Analysis for ObjC Expressions --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for Objective-C expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
+#include "clang/Edit/Commit.h"
+#include "clang/Edit/Rewriters.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "llvm/ADT/SmallString.h"
+
+using namespace clang;
+using namespace sema;
+using llvm::makeArrayRef;
+
+ExprResult Sema::ParseObjCStringLiteral(SourceLocation *AtLocs,
+ ArrayRef<Expr *> Strings) {
+ // Most ObjC strings are formed out of a single piece. However, we *can*
+ // have strings formed out of multiple @ strings with multiple pptokens in
+ // each one, e.g. @"foo" "bar" @"baz" "qux" which need to be turned into one
+ // StringLiteral for ObjCStringLiteral to hold onto.
+ StringLiteral *S = cast<StringLiteral>(Strings[0]);
+
+ // If we have a multi-part string, merge it all together.
+ if (Strings.size() != 1) {
+ // Concatenate objc strings.
+ SmallString<128> StrBuf;
+ SmallVector<SourceLocation, 8> StrLocs;
+
+ for (Expr *E : Strings) {
+ S = cast<StringLiteral>(E);
+
+ // ObjC strings can't be wide or UTF.
+ if (!S->isAscii()) {
+ Diag(S->getLocStart(), diag::err_cfstring_literal_not_string_constant)
+ << S->getSourceRange();
+ return true;
+ }
+
+ // Append the string.
+ StrBuf += S->getString();
+
+ // Get the locations of the string tokens.
+ StrLocs.append(S->tokloc_begin(), S->tokloc_end());
+ }
+
+ // Create the aggregate string with the appropriate content and location
+ // information.
+ const ConstantArrayType *CAT = Context.getAsConstantArrayType(S->getType());
+ assert(CAT && "String literal not of constant array type!");
+ QualType StrTy = Context.getConstantArrayType(
+ CAT->getElementType(), llvm::APInt(32, StrBuf.size() + 1),
+ CAT->getSizeModifier(), CAT->getIndexTypeCVRQualifiers());
+ S = StringLiteral::Create(Context, StrBuf, StringLiteral::Ascii,
+ /*Pascal=*/false, StrTy, &StrLocs[0],
+ StrLocs.size());
+ }
+
+ return BuildObjCStringLiteral(AtLocs[0], S);
+}
+
+ExprResult Sema::BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S){
+ // Verify that this composite string is acceptable for ObjC strings.
+ if (CheckObjCString(S))
+ return true;
+
+ // Initialize the constant string interface lazily. This assumes
+ // the NSString interface is seen in this translation unit. Note: We
+ // don't use NSConstantString, since the runtime team considers this
+ // interface private (even though it appears in the header files).
+ QualType Ty = Context.getObjCConstantStringInterface();
+ if (!Ty.isNull()) {
+ Ty = Context.getObjCObjectPointerType(Ty);
+ } else if (getLangOpts().NoConstantCFStrings) {
+ IdentifierInfo *NSIdent=nullptr;
+ std::string StringClass(getLangOpts().ObjCConstantStringClass);
+
+ if (StringClass.empty())
+ NSIdent = &Context.Idents.get("NSConstantString");
+ else
+ NSIdent = &Context.Idents.get(StringClass);
+
+ NamedDecl *IF = LookupSingleName(TUScope, NSIdent, AtLoc,
+ LookupOrdinaryName);
+ if (ObjCInterfaceDecl *StrIF = dyn_cast_or_null<ObjCInterfaceDecl>(IF)) {
+ Context.setObjCConstantStringInterface(StrIF);
+ Ty = Context.getObjCConstantStringInterface();
+ Ty = Context.getObjCObjectPointerType(Ty);
+ } else {
+ // If there is no NSConstantString interface defined then treat this
+ // as error and recover from it.
+ Diag(S->getLocStart(), diag::err_no_nsconstant_string_class) << NSIdent
+ << S->getSourceRange();
+ Ty = Context.getObjCIdType();
+ }
+ } else {
+ IdentifierInfo *NSIdent = NSAPIObj->getNSClassId(NSAPI::ClassId_NSString);
+ NamedDecl *IF = LookupSingleName(TUScope, NSIdent, AtLoc,
+ LookupOrdinaryName);
+ if (ObjCInterfaceDecl *StrIF = dyn_cast_or_null<ObjCInterfaceDecl>(IF)) {
+ Context.setObjCConstantStringInterface(StrIF);
+ Ty = Context.getObjCConstantStringInterface();
+ Ty = Context.getObjCObjectPointerType(Ty);
+ } else {
+ // If there is no NSString interface defined, implicitly declare
+ // a @class NSString; and use that instead. This is to make sure
+ // type of an NSString literal is represented correctly, instead of
+ // being an 'id' type.
+ Ty = Context.getObjCNSStringType();
+ if (Ty.isNull()) {
+ ObjCInterfaceDecl *NSStringIDecl =
+ ObjCInterfaceDecl::Create (Context,
+ Context.getTranslationUnitDecl(),
+ SourceLocation(), NSIdent,
+ nullptr, nullptr, SourceLocation());
+ Ty = Context.getObjCInterfaceType(NSStringIDecl);
+ Context.setObjCNSStringType(Ty);
+ }
+ Ty = Context.getObjCObjectPointerType(Ty);
+ }
+ }
+
+ return new (Context) ObjCStringLiteral(S, Ty, AtLoc);
+}
+
+/// \brief Emits an error if the given method does not exist, or if the return
+/// type is not an Objective-C object.
+static bool validateBoxingMethod(Sema &S, SourceLocation Loc,
+ const ObjCInterfaceDecl *Class,
+ Selector Sel, const ObjCMethodDecl *Method) {
+ if (!Method) {
+ // FIXME: Is there a better way to avoid quotes than using getName()?
+ S.Diag(Loc, diag::err_undeclared_boxing_method) << Sel << Class->getName();
+ return false;
+ }
+
+ // Make sure the return type is reasonable.
+ QualType ReturnType = Method->getReturnType();
+ if (!ReturnType->isObjCObjectPointerType()) {
+ S.Diag(Loc, diag::err_objc_literal_method_sig)
+ << Sel;
+ S.Diag(Method->getLocation(), diag::note_objc_literal_method_return)
+ << ReturnType;
+ return false;
+ }
+
+ return true;
+}
+
+/// \brief Maps ObjCLiteralKind to NSClassIdKindKind
+static NSAPI::NSClassIdKindKind ClassKindFromLiteralKind(
+ Sema::ObjCLiteralKind LiteralKind) {
+ switch (LiteralKind) {
+ case Sema::LK_Array:
+ return NSAPI::ClassId_NSArray;
+ case Sema::LK_Dictionary:
+ return NSAPI::ClassId_NSDictionary;
+ case Sema::LK_Numeric:
+ return NSAPI::ClassId_NSNumber;
+ case Sema::LK_String:
+ return NSAPI::ClassId_NSString;
+ case Sema::LK_Boxed:
+ return NSAPI::ClassId_NSValue;
+
+ // there is no corresponding matching
+ // between LK_None/LK_Block and NSClassIdKindKind
+ case Sema::LK_Block:
+ case Sema::LK_None:
+ break;
+ }
+ llvm_unreachable("LiteralKind can't be converted into a ClassKind");
+}
+
+/// \brief Validates ObjCInterfaceDecl availability.
+/// ObjCInterfaceDecl, used to create ObjC literals, should be defined
+/// if clang not in a debugger mode.
+static bool ValidateObjCLiteralInterfaceDecl(Sema &S, ObjCInterfaceDecl *Decl,
+ SourceLocation Loc,
+ Sema::ObjCLiteralKind LiteralKind) {
+ if (!Decl) {
+ NSAPI::NSClassIdKindKind Kind = ClassKindFromLiteralKind(LiteralKind);
+ IdentifierInfo *II = S.NSAPIObj->getNSClassId(Kind);
+ S.Diag(Loc, diag::err_undeclared_objc_literal_class)
+ << II->getName() << LiteralKind;
+ return false;
+ } else if (!Decl->hasDefinition() && !S.getLangOpts().DebuggerObjCLiteral) {
+ S.Diag(Loc, diag::err_undeclared_objc_literal_class)
+ << Decl->getName() << LiteralKind;
+ S.Diag(Decl->getLocation(), diag::note_forward_class);
+ return false;
+ }
+
+ return true;
+}
+
+/// \brief Looks up ObjCInterfaceDecl of a given NSClassIdKindKind.
+/// Used to create ObjC literals, such as NSDictionary (@{}),
+/// NSArray (@[]) and Boxed Expressions (@())
+static ObjCInterfaceDecl *LookupObjCInterfaceDeclForLiteral(Sema &S,
+ SourceLocation Loc,
+ Sema::ObjCLiteralKind LiteralKind) {
+ NSAPI::NSClassIdKindKind ClassKind = ClassKindFromLiteralKind(LiteralKind);
+ IdentifierInfo *II = S.NSAPIObj->getNSClassId(ClassKind);
+ NamedDecl *IF = S.LookupSingleName(S.TUScope, II, Loc,
+ Sema::LookupOrdinaryName);
+ ObjCInterfaceDecl *ID = dyn_cast_or_null<ObjCInterfaceDecl>(IF);
+ if (!ID && S.getLangOpts().DebuggerObjCLiteral) {
+ ASTContext &Context = S.Context;
+ TranslationUnitDecl *TU = Context.getTranslationUnitDecl();
+ ID = ObjCInterfaceDecl::Create (Context, TU, SourceLocation(), II,
+ nullptr, nullptr, SourceLocation());
+ }
+
+ if (!ValidateObjCLiteralInterfaceDecl(S, ID, Loc, LiteralKind)) {
+ ID = nullptr;
+ }
+
+ return ID;
+}
+
+/// \brief Retrieve the NSNumber factory method that should be used to create
+/// an Objective-C literal for the given type.
+static ObjCMethodDecl *getNSNumberFactoryMethod(Sema &S, SourceLocation Loc,
+ QualType NumberType,
+ bool isLiteral = false,
+ SourceRange R = SourceRange()) {
+ Optional<NSAPI::NSNumberLiteralMethodKind> Kind =
+ S.NSAPIObj->getNSNumberFactoryMethodKind(NumberType);
+
+ if (!Kind) {
+ if (isLiteral) {
+ S.Diag(Loc, diag::err_invalid_nsnumber_type)
+ << NumberType << R;
+ }
+ return nullptr;
+ }
+
+ // If we already looked up this method, we're done.
+ if (S.NSNumberLiteralMethods[*Kind])
+ return S.NSNumberLiteralMethods[*Kind];
+
+ Selector Sel = S.NSAPIObj->getNSNumberLiteralSelector(*Kind,
+ /*Instance=*/false);
+
+ ASTContext &CX = S.Context;
+
+ // Look up the NSNumber class, if we haven't done so already. It's cached
+ // in the Sema instance.
+ if (!S.NSNumberDecl) {
+ S.NSNumberDecl = LookupObjCInterfaceDeclForLiteral(S, Loc,
+ Sema::LK_Numeric);
+ if (!S.NSNumberDecl) {
+ return nullptr;
+ }
+ }
+
+ if (S.NSNumberPointer.isNull()) {
+ // generate the pointer to NSNumber type.
+ QualType NSNumberObject = CX.getObjCInterfaceType(S.NSNumberDecl);
+ S.NSNumberPointer = CX.getObjCObjectPointerType(NSNumberObject);
+ }
+
+ // Look for the appropriate method within NSNumber.
+ ObjCMethodDecl *Method = S.NSNumberDecl->lookupClassMethod(Sel);
+ if (!Method && S.getLangOpts().DebuggerObjCLiteral) {
+ // create a stub definition this NSNumber factory method.
+ TypeSourceInfo *ReturnTInfo = nullptr;
+ Method =
+ ObjCMethodDecl::Create(CX, SourceLocation(), SourceLocation(), Sel,
+ S.NSNumberPointer, ReturnTInfo, S.NSNumberDecl,
+ /*isInstance=*/false, /*isVariadic=*/false,
+ /*isPropertyAccessor=*/false,
+ /*isImplicitlyDeclared=*/true,
+ /*isDefined=*/false, ObjCMethodDecl::Required,
+ /*HasRelatedResultType=*/false);
+ ParmVarDecl *value = ParmVarDecl::Create(S.Context, Method,
+ SourceLocation(), SourceLocation(),
+ &CX.Idents.get("value"),
+ NumberType, /*TInfo=*/nullptr,
+ SC_None, nullptr);
+ Method->setMethodParams(S.Context, value, None);
+ }
+
+ if (!validateBoxingMethod(S, Loc, S.NSNumberDecl, Sel, Method))
+ return nullptr;
+
+ // Note: if the parameter type is out-of-line, we'll catch it later in the
+ // implicit conversion.
+
+ S.NSNumberLiteralMethods[*Kind] = Method;
+ return Method;
+}
+
+/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
+/// numeric literal expression. Type of the expression will be "NSNumber *".
+ExprResult Sema::BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number) {
+ // Determine the type of the literal.
+ QualType NumberType = Number->getType();
+ if (CharacterLiteral *Char = dyn_cast<CharacterLiteral>(Number)) {
+ // In C, character literals have type 'int'. That's not the type we want
+ // to use to determine the Objective-c literal kind.
+ switch (Char->getKind()) {
+ case CharacterLiteral::Ascii:
+ NumberType = Context.CharTy;
+ break;
+
+ case CharacterLiteral::Wide:
+ NumberType = Context.getWideCharType();
+ break;
+
+ case CharacterLiteral::UTF16:
+ NumberType = Context.Char16Ty;
+ break;
+
+ case CharacterLiteral::UTF32:
+ NumberType = Context.Char32Ty;
+ break;
+ }
+ }
+
+ // Look for the appropriate method within NSNumber.
+ // Construct the literal.
+ SourceRange NR(Number->getSourceRange());
+ ObjCMethodDecl *Method = getNSNumberFactoryMethod(*this, AtLoc, NumberType,
+ true, NR);
+ if (!Method)
+ return ExprError();
+
+ // Convert the number to the type that the parameter expects.
+ ParmVarDecl *ParamDecl = Method->parameters()[0];
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
+ ParamDecl);
+ ExprResult ConvertedNumber = PerformCopyInitialization(Entity,
+ SourceLocation(),
+ Number);
+ if (ConvertedNumber.isInvalid())
+ return ExprError();
+ Number = ConvertedNumber.get();
+
+ // Use the effective source range of the literal, including the leading '@'.
+ return MaybeBindToTemporary(
+ new (Context) ObjCBoxedExpr(Number, NSNumberPointer, Method,
+ SourceRange(AtLoc, NR.getEnd())));
+}
+
+ExprResult Sema::ActOnObjCBoolLiteral(SourceLocation AtLoc,
+ SourceLocation ValueLoc,
+ bool Value) {
+ ExprResult Inner;
+ if (getLangOpts().CPlusPlus) {
+ Inner = ActOnCXXBoolLiteral(ValueLoc, Value? tok::kw_true : tok::kw_false);
+ } else {
+ // C doesn't actually have a way to represent literal values of type
+ // _Bool. So, we'll use 0/1 and implicit cast to _Bool.
+ Inner = ActOnIntegerConstant(ValueLoc, Value? 1 : 0);
+ Inner = ImpCastExprToType(Inner.get(), Context.BoolTy,
+ CK_IntegralToBoolean);
+ }
+
+ return BuildObjCNumericLiteral(AtLoc, Inner.get());
+}
+
+/// \brief Check that the given expression is a valid element of an Objective-C
+/// collection literal.
+static ExprResult CheckObjCCollectionLiteralElement(Sema &S, Expr *Element,
+ QualType T,
+ bool ArrayLiteral = false) {
+ // If the expression is type-dependent, there's nothing for us to do.
+ if (Element->isTypeDependent())
+ return Element;
+
+ ExprResult Result = S.CheckPlaceholderExpr(Element);
+ if (Result.isInvalid())
+ return ExprError();
+ Element = Result.get();
+
+ // In C++, check for an implicit conversion to an Objective-C object pointer
+ // type.
+ if (S.getLangOpts().CPlusPlus && Element->getType()->isRecordType()) {
+ InitializedEntity Entity
+ = InitializedEntity::InitializeParameter(S.Context, T,
+ /*Consumed=*/false);
+ InitializationKind Kind
+ = InitializationKind::CreateCopy(Element->getLocStart(),
+ SourceLocation());
+ InitializationSequence Seq(S, Entity, Kind, Element);
+ if (!Seq.Failed())
+ return Seq.Perform(S, Entity, Kind, Element);
+ }
+
+ Expr *OrigElement = Element;
+
+ // Perform lvalue-to-rvalue conversion.
+ Result = S.DefaultLvalueConversion(Element);
+ if (Result.isInvalid())
+ return ExprError();
+ Element = Result.get();
+
+ // Make sure that we have an Objective-C pointer type or block.
+ if (!Element->getType()->isObjCObjectPointerType() &&
+ !Element->getType()->isBlockPointerType()) {
+ bool Recovered = false;
+
+ // If this is potentially an Objective-C numeric literal, add the '@'.
+ if (isa<IntegerLiteral>(OrigElement) ||
+ isa<CharacterLiteral>(OrigElement) ||
+ isa<FloatingLiteral>(OrigElement) ||
+ isa<ObjCBoolLiteralExpr>(OrigElement) ||
+ isa<CXXBoolLiteralExpr>(OrigElement)) {
+ if (S.NSAPIObj->getNSNumberFactoryMethodKind(OrigElement->getType())) {
+ int Which = isa<CharacterLiteral>(OrigElement) ? 1
+ : (isa<CXXBoolLiteralExpr>(OrigElement) ||
+ isa<ObjCBoolLiteralExpr>(OrigElement)) ? 2
+ : 3;
+
+ S.Diag(OrigElement->getLocStart(), diag::err_box_literal_collection)
+ << Which << OrigElement->getSourceRange()
+ << FixItHint::CreateInsertion(OrigElement->getLocStart(), "@");
+
+ Result = S.BuildObjCNumericLiteral(OrigElement->getLocStart(),
+ OrigElement);
+ if (Result.isInvalid())
+ return ExprError();
+
+ Element = Result.get();
+ Recovered = true;
+ }
+ }
+ // If this is potentially an Objective-C string literal, add the '@'.
+ else if (StringLiteral *String = dyn_cast<StringLiteral>(OrigElement)) {
+ if (String->isAscii()) {
+ S.Diag(OrigElement->getLocStart(), diag::err_box_literal_collection)
+ << 0 << OrigElement->getSourceRange()
+ << FixItHint::CreateInsertion(OrigElement->getLocStart(), "@");
+
+ Result = S.BuildObjCStringLiteral(OrigElement->getLocStart(), String);
+ if (Result.isInvalid())
+ return ExprError();
+
+ Element = Result.get();
+ Recovered = true;
+ }
+ }
+
+ if (!Recovered) {
+ S.Diag(Element->getLocStart(), diag::err_invalid_collection_element)
+ << Element->getType();
+ return ExprError();
+ }
+ }
+ if (ArrayLiteral)
+ if (ObjCStringLiteral *getString =
+ dyn_cast<ObjCStringLiteral>(OrigElement)) {
+ if (StringLiteral *SL = getString->getString()) {
+ unsigned numConcat = SL->getNumConcatenated();
+ if (numConcat > 1) {
+ // Only warn if the concatenated string doesn't come from a macro.
+ bool hasMacro = false;
+ for (unsigned i = 0; i < numConcat ; ++i)
+ if (SL->getStrTokenLoc(i).isMacroID()) {
+ hasMacro = true;
+ break;
+ }
+ if (!hasMacro)
+ S.Diag(Element->getLocStart(),
+ diag::warn_concatenated_nsarray_literal)
+ << Element->getType();
+ }
+ }
+ }
+
+ // Make sure that the element has the type that the container factory
+ // function expects.
+ return S.PerformCopyInitialization(
+ InitializedEntity::InitializeParameter(S.Context, T,
+ /*Consumed=*/false),
+ Element->getLocStart(), Element);
+}
+
+ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
+ if (ValueExpr->isTypeDependent()) {
+ ObjCBoxedExpr *BoxedExpr =
+ new (Context) ObjCBoxedExpr(ValueExpr, Context.DependentTy, nullptr, SR);
+ return BoxedExpr;
+ }
+ ObjCMethodDecl *BoxingMethod = nullptr;
+ QualType BoxedType;
+ // Convert the expression to an RValue, so we can check for pointer types...
+ ExprResult RValue = DefaultFunctionArrayLvalueConversion(ValueExpr);
+ if (RValue.isInvalid()) {
+ return ExprError();
+ }
+ SourceLocation Loc = SR.getBegin();
+ ValueExpr = RValue.get();
+ QualType ValueType(ValueExpr->getType());
+ if (const PointerType *PT = ValueType->getAs<PointerType>()) {
+ QualType PointeeType = PT->getPointeeType();
+ if (Context.hasSameUnqualifiedType(PointeeType, Context.CharTy)) {
+
+ if (!NSStringDecl) {
+ NSStringDecl = LookupObjCInterfaceDeclForLiteral(*this, Loc,
+ Sema::LK_String);
+ if (!NSStringDecl) {
+ return ExprError();
+ }
+ QualType NSStringObject = Context.getObjCInterfaceType(NSStringDecl);
+ NSStringPointer = Context.getObjCObjectPointerType(NSStringObject);
+ }
+
+ if (!StringWithUTF8StringMethod) {
+ IdentifierInfo *II = &Context.Idents.get("stringWithUTF8String");
+ Selector stringWithUTF8String = Context.Selectors.getUnarySelector(II);
+
+ // Look for the appropriate method within NSString.
+ BoxingMethod = NSStringDecl->lookupClassMethod(stringWithUTF8String);
+ if (!BoxingMethod && getLangOpts().DebuggerObjCLiteral) {
+ // Debugger needs to work even if NSString hasn't been defined.
+ TypeSourceInfo *ReturnTInfo = nullptr;
+ ObjCMethodDecl *M = ObjCMethodDecl::Create(
+ Context, SourceLocation(), SourceLocation(), stringWithUTF8String,
+ NSStringPointer, ReturnTInfo, NSStringDecl,
+ /*isInstance=*/false, /*isVariadic=*/false,
+ /*isPropertyAccessor=*/false,
+ /*isImplicitlyDeclared=*/true,
+ /*isDefined=*/false, ObjCMethodDecl::Required,
+ /*HasRelatedResultType=*/false);
+ QualType ConstCharType = Context.CharTy.withConst();
+ ParmVarDecl *value =
+ ParmVarDecl::Create(Context, M,
+ SourceLocation(), SourceLocation(),
+ &Context.Idents.get("value"),
+ Context.getPointerType(ConstCharType),
+ /*TInfo=*/nullptr,
+ SC_None, nullptr);
+ M->setMethodParams(Context, value, None);
+ BoxingMethod = M;
+ }
+
+ if (!validateBoxingMethod(*this, Loc, NSStringDecl,
+ stringWithUTF8String, BoxingMethod))
+ return ExprError();
+
+ StringWithUTF8StringMethod = BoxingMethod;
+ }
+
+ BoxingMethod = StringWithUTF8StringMethod;
+ BoxedType = NSStringPointer;
+ }
+ } else if (ValueType->isBuiltinType()) {
+ // The other types we support are numeric, char and BOOL/bool. We could also
+ // provide limited support for structure types, such as NSRange, NSRect, and
+ // NSSize. See NSValue (NSValueGeometryExtensions) in <Foundation/NSGeometry.h>
+ // for more details.
+
+ // Check for a top-level character literal.
+ if (const CharacterLiteral *Char =
+ dyn_cast<CharacterLiteral>(ValueExpr->IgnoreParens())) {
+ // In C, character literals have type 'int'. That's not the type we want
+ // to use to determine the Objective-c literal kind.
+ switch (Char->getKind()) {
+ case CharacterLiteral::Ascii:
+ ValueType = Context.CharTy;
+ break;
+
+ case CharacterLiteral::Wide:
+ ValueType = Context.getWideCharType();
+ break;
+
+ case CharacterLiteral::UTF16:
+ ValueType = Context.Char16Ty;
+ break;
+
+ case CharacterLiteral::UTF32:
+ ValueType = Context.Char32Ty;
+ break;
+ }
+ }
+ CheckForIntOverflow(ValueExpr);
+ // FIXME: Do I need to do anything special with BoolTy expressions?
+
+ // Look for the appropriate method within NSNumber.
+ BoxingMethod = getNSNumberFactoryMethod(*this, Loc, ValueType);
+ BoxedType = NSNumberPointer;
+ } else if (const EnumType *ET = ValueType->getAs<EnumType>()) {
+ if (!ET->getDecl()->isComplete()) {
+ Diag(Loc, diag::err_objc_incomplete_boxed_expression_type)
+ << ValueType << ValueExpr->getSourceRange();
+ return ExprError();
+ }
+
+ BoxingMethod = getNSNumberFactoryMethod(*this, Loc,
+ ET->getDecl()->getIntegerType());
+ BoxedType = NSNumberPointer;
+ } else if (ValueType->isObjCBoxableRecordType()) {
+ // Support for structure types, that marked as objc_boxable
+ // struct __attribute__((objc_boxable)) s { ... };
+
+ // Look up the NSValue class, if we haven't done so already. It's cached
+ // in the Sema instance.
+ if (!NSValueDecl) {
+ NSValueDecl = LookupObjCInterfaceDeclForLiteral(*this, Loc,
+ Sema::LK_Boxed);
+ if (!NSValueDecl) {
+ return ExprError();
+ }
+
+ // generate the pointer to NSValue type.
+ QualType NSValueObject = Context.getObjCInterfaceType(NSValueDecl);
+ NSValuePointer = Context.getObjCObjectPointerType(NSValueObject);
+ }
+
+ if (!ValueWithBytesObjCTypeMethod) {
+ IdentifierInfo *II[] = {
+ &Context.Idents.get("valueWithBytes"),
+ &Context.Idents.get("objCType")
+ };
+ Selector ValueWithBytesObjCType = Context.Selectors.getSelector(2, II);
+
+ // Look for the appropriate method within NSValue.
+ BoxingMethod = NSValueDecl->lookupClassMethod(ValueWithBytesObjCType);
+ if (!BoxingMethod && getLangOpts().DebuggerObjCLiteral) {
+ // Debugger needs to work even if NSValue hasn't been defined.
+ TypeSourceInfo *ReturnTInfo = nullptr;
+ ObjCMethodDecl *M = ObjCMethodDecl::Create(
+ Context,
+ SourceLocation(),
+ SourceLocation(),
+ ValueWithBytesObjCType,
+ NSValuePointer,
+ ReturnTInfo,
+ NSValueDecl,
+ /*isInstance=*/false,
+ /*isVariadic=*/false,
+ /*isPropertyAccessor=*/false,
+ /*isImplicitlyDeclared=*/true,
+ /*isDefined=*/false,
+ ObjCMethodDecl::Required,
+ /*HasRelatedResultType=*/false);
+
+ SmallVector<ParmVarDecl *, 2> Params;
+
+ ParmVarDecl *bytes =
+ ParmVarDecl::Create(Context, M,
+ SourceLocation(), SourceLocation(),
+ &Context.Idents.get("bytes"),
+ Context.VoidPtrTy.withConst(),
+ /*TInfo=*/nullptr,
+ SC_None, nullptr);
+ Params.push_back(bytes);
+
+ QualType ConstCharType = Context.CharTy.withConst();
+ ParmVarDecl *type =
+ ParmVarDecl::Create(Context, M,
+ SourceLocation(), SourceLocation(),
+ &Context.Idents.get("type"),
+ Context.getPointerType(ConstCharType),
+ /*TInfo=*/nullptr,
+ SC_None, nullptr);
+ Params.push_back(type);
+
+ M->setMethodParams(Context, Params, None);
+ BoxingMethod = M;
+ }
+
+ if (!validateBoxingMethod(*this, Loc, NSValueDecl,
+ ValueWithBytesObjCType, BoxingMethod))
+ return ExprError();
+
+ ValueWithBytesObjCTypeMethod = BoxingMethod;
+ }
+
+ if (!ValueType.isTriviallyCopyableType(Context)) {
+ Diag(Loc, diag::err_objc_non_trivially_copyable_boxed_expression_type)
+ << ValueType << ValueExpr->getSourceRange();
+ return ExprError();
+ }
+
+ BoxingMethod = ValueWithBytesObjCTypeMethod;
+ BoxedType = NSValuePointer;
+ }
+
+ if (!BoxingMethod) {
+ Diag(Loc, diag::err_objc_illegal_boxed_expression_type)
+ << ValueType << ValueExpr->getSourceRange();
+ return ExprError();
+ }
+
+ DiagnoseUseOfDecl(BoxingMethod, Loc);
+
+ ExprResult ConvertedValueExpr;
+ if (ValueType->isObjCBoxableRecordType()) {
+ InitializedEntity IE = InitializedEntity::InitializeTemporary(ValueType);
+ ConvertedValueExpr = PerformCopyInitialization(IE, ValueExpr->getExprLoc(),
+ ValueExpr);
+ } else {
+ // Convert the expression to the type that the parameter requires.
+ ParmVarDecl *ParamDecl = BoxingMethod->parameters()[0];
+ InitializedEntity IE = InitializedEntity::InitializeParameter(Context,
+ ParamDecl);
+ ConvertedValueExpr = PerformCopyInitialization(IE, SourceLocation(),
+ ValueExpr);
+ }
+
+ if (ConvertedValueExpr.isInvalid())
+ return ExprError();
+ ValueExpr = ConvertedValueExpr.get();
+
+ ObjCBoxedExpr *BoxedExpr =
+ new (Context) ObjCBoxedExpr(ValueExpr, BoxedType,
+ BoxingMethod, SR);
+ return MaybeBindToTemporary(BoxedExpr);
+}
+
+/// Build an ObjC subscript pseudo-object expression, given that
+/// that's supported by the runtime.
+ExprResult Sema::BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
+ Expr *IndexExpr,
+ ObjCMethodDecl *getterMethod,
+ ObjCMethodDecl *setterMethod) {
+ assert(!LangOpts.isSubscriptPointerArithmetic());
+
+ // We can't get dependent types here; our callers should have
+ // filtered them out.
+ assert((!BaseExpr->isTypeDependent() && !IndexExpr->isTypeDependent()) &&
+ "base or index cannot have dependent type here");
+
+ // Filter out placeholders in the index. In theory, overloads could
+ // be preserved here, although that might not actually work correctly.
+ ExprResult Result = CheckPlaceholderExpr(IndexExpr);
+ if (Result.isInvalid())
+ return ExprError();
+ IndexExpr = Result.get();
+
+ // Perform lvalue-to-rvalue conversion on the base.
+ Result = DefaultLvalueConversion(BaseExpr);
+ if (Result.isInvalid())
+ return ExprError();
+ BaseExpr = Result.get();
+
+ // Build the pseudo-object expression.
+ return new (Context) ObjCSubscriptRefExpr(
+ BaseExpr, IndexExpr, Context.PseudoObjectTy, VK_LValue, OK_ObjCSubscript,
+ getterMethod, setterMethod, RB);
+}
+
+ExprResult Sema::BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements) {
+ SourceLocation Loc = SR.getBegin();
+
+ if (!NSArrayDecl) {
+ NSArrayDecl = LookupObjCInterfaceDeclForLiteral(*this, Loc,
+ Sema::LK_Array);
+ if (!NSArrayDecl) {
+ return ExprError();
+ }
+ }
+
+ // Find the arrayWithObjects:count: method, if we haven't done so already.
+ QualType IdT = Context.getObjCIdType();
+ if (!ArrayWithObjectsMethod) {
+ Selector
+ Sel = NSAPIObj->getNSArraySelector(NSAPI::NSArr_arrayWithObjectsCount);
+ ObjCMethodDecl *Method = NSArrayDecl->lookupClassMethod(Sel);
+ if (!Method && getLangOpts().DebuggerObjCLiteral) {
+ TypeSourceInfo *ReturnTInfo = nullptr;
+ Method = ObjCMethodDecl::Create(
+ Context, SourceLocation(), SourceLocation(), Sel, IdT, ReturnTInfo,
+ Context.getTranslationUnitDecl(), false /*Instance*/,
+ false /*isVariadic*/,
+ /*isPropertyAccessor=*/false,
+ /*isImplicitlyDeclared=*/true, /*isDefined=*/false,
+ ObjCMethodDecl::Required, false);
+ SmallVector<ParmVarDecl *, 2> Params;
+ ParmVarDecl *objects = ParmVarDecl::Create(Context, Method,
+ SourceLocation(),
+ SourceLocation(),
+ &Context.Idents.get("objects"),
+ Context.getPointerType(IdT),
+ /*TInfo=*/nullptr,
+ SC_None, nullptr);
+ Params.push_back(objects);
+ ParmVarDecl *cnt = ParmVarDecl::Create(Context, Method,
+ SourceLocation(),
+ SourceLocation(),
+ &Context.Idents.get("cnt"),
+ Context.UnsignedLongTy,
+ /*TInfo=*/nullptr, SC_None,
+ nullptr);
+ Params.push_back(cnt);
+ Method->setMethodParams(Context, Params, None);
+ }
+
+ if (!validateBoxingMethod(*this, Loc, NSArrayDecl, Sel, Method))
+ return ExprError();
+
+ // Dig out the type that all elements should be converted to.
+ QualType T = Method->parameters()[0]->getType();
+ const PointerType *PtrT = T->getAs<PointerType>();
+ if (!PtrT ||
+ !Context.hasSameUnqualifiedType(PtrT->getPointeeType(), IdT)) {
+ Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
+ << Sel;
+ Diag(Method->parameters()[0]->getLocation(),
+ diag::note_objc_literal_method_param)
+ << 0 << T
+ << Context.getPointerType(IdT.withConst());
+ return ExprError();
+ }
+
+ // Check that the 'count' parameter is integral.
+ if (!Method->parameters()[1]->getType()->isIntegerType()) {
+ Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
+ << Sel;
+ Diag(Method->parameters()[1]->getLocation(),
+ diag::note_objc_literal_method_param)
+ << 1
+ << Method->parameters()[1]->getType()
+ << "integral";
+ return ExprError();
+ }
+
+ // We've found a good +arrayWithObjects:count: method. Save it!
+ ArrayWithObjectsMethod = Method;
+ }
+
+ QualType ObjectsType = ArrayWithObjectsMethod->parameters()[0]->getType();
+ QualType RequiredType = ObjectsType->castAs<PointerType>()->getPointeeType();
+
+ // Check that each of the elements provided is valid in a collection literal,
+ // performing conversions as necessary.
+ Expr **ElementsBuffer = Elements.data();
+ for (unsigned I = 0, N = Elements.size(); I != N; ++I) {
+ ExprResult Converted = CheckObjCCollectionLiteralElement(*this,
+ ElementsBuffer[I],
+ RequiredType, true);
+ if (Converted.isInvalid())
+ return ExprError();
+
+ ElementsBuffer[I] = Converted.get();
+ }
+
+ QualType Ty
+ = Context.getObjCObjectPointerType(
+ Context.getObjCInterfaceType(NSArrayDecl));
+
+ return MaybeBindToTemporary(
+ ObjCArrayLiteral::Create(Context, Elements, Ty,
+ ArrayWithObjectsMethod, SR));
+}
+
+ExprResult Sema::BuildObjCDictionaryLiteral(SourceRange SR,
+ MutableArrayRef<ObjCDictionaryElement> Elements) {
+ SourceLocation Loc = SR.getBegin();
+
+ if (!NSDictionaryDecl) {
+ NSDictionaryDecl = LookupObjCInterfaceDeclForLiteral(*this, Loc,
+ Sema::LK_Dictionary);
+ if (!NSDictionaryDecl) {
+ return ExprError();
+ }
+ }
+
+ // Find the dictionaryWithObjects:forKeys:count: method, if we haven't done
+ // so already.
+ QualType IdT = Context.getObjCIdType();
+ if (!DictionaryWithObjectsMethod) {
+ Selector Sel = NSAPIObj->getNSDictionarySelector(
+ NSAPI::NSDict_dictionaryWithObjectsForKeysCount);
+ ObjCMethodDecl *Method = NSDictionaryDecl->lookupClassMethod(Sel);
+ if (!Method && getLangOpts().DebuggerObjCLiteral) {
+ Method = ObjCMethodDecl::Create(Context,
+ SourceLocation(), SourceLocation(), Sel,
+ IdT,
+ nullptr /*TypeSourceInfo */,
+ Context.getTranslationUnitDecl(),
+ false /*Instance*/, false/*isVariadic*/,
+ /*isPropertyAccessor=*/false,
+ /*isImplicitlyDeclared=*/true, /*isDefined=*/false,
+ ObjCMethodDecl::Required,
+ false);
+ SmallVector<ParmVarDecl *, 3> Params;
+ ParmVarDecl *objects = ParmVarDecl::Create(Context, Method,
+ SourceLocation(),
+ SourceLocation(),
+ &Context.Idents.get("objects"),
+ Context.getPointerType(IdT),
+ /*TInfo=*/nullptr, SC_None,
+ nullptr);
+ Params.push_back(objects);
+ ParmVarDecl *keys = ParmVarDecl::Create(Context, Method,
+ SourceLocation(),
+ SourceLocation(),
+ &Context.Idents.get("keys"),
+ Context.getPointerType(IdT),
+ /*TInfo=*/nullptr, SC_None,
+ nullptr);
+ Params.push_back(keys);
+ ParmVarDecl *cnt = ParmVarDecl::Create(Context, Method,
+ SourceLocation(),
+ SourceLocation(),
+ &Context.Idents.get("cnt"),
+ Context.UnsignedLongTy,
+ /*TInfo=*/nullptr, SC_None,
+ nullptr);
+ Params.push_back(cnt);
+ Method->setMethodParams(Context, Params, None);
+ }
+
+ if (!validateBoxingMethod(*this, SR.getBegin(), NSDictionaryDecl, Sel,
+ Method))
+ return ExprError();
+
+ // Dig out the type that all values should be converted to.
+ QualType ValueT = Method->parameters()[0]->getType();
+ const PointerType *PtrValue = ValueT->getAs<PointerType>();
+ if (!PtrValue ||
+ !Context.hasSameUnqualifiedType(PtrValue->getPointeeType(), IdT)) {
+ Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
+ << Sel;
+ Diag(Method->parameters()[0]->getLocation(),
+ diag::note_objc_literal_method_param)
+ << 0 << ValueT
+ << Context.getPointerType(IdT.withConst());
+ return ExprError();
+ }
+
+ // Dig out the type that all keys should be converted to.
+ QualType KeyT = Method->parameters()[1]->getType();
+ const PointerType *PtrKey = KeyT->getAs<PointerType>();
+ if (!PtrKey ||
+ !Context.hasSameUnqualifiedType(PtrKey->getPointeeType(),
+ IdT)) {
+ bool err = true;
+ if (PtrKey) {
+ if (QIDNSCopying.isNull()) {
+ // key argument of selector is id<NSCopying>?
+ if (ObjCProtocolDecl *NSCopyingPDecl =
+ LookupProtocol(&Context.Idents.get("NSCopying"), SR.getBegin())) {
+ ObjCProtocolDecl *PQ[] = {NSCopyingPDecl};
+ QIDNSCopying =
+ Context.getObjCObjectType(Context.ObjCBuiltinIdTy, { },
+ llvm::makeArrayRef(
+ (ObjCProtocolDecl**) PQ,
+ 1),
+ false);
+ QIDNSCopying = Context.getObjCObjectPointerType(QIDNSCopying);
+ }
+ }
+ if (!QIDNSCopying.isNull())
+ err = !Context.hasSameUnqualifiedType(PtrKey->getPointeeType(),
+ QIDNSCopying);
+ }
+
+ if (err) {
+ Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
+ << Sel;
+ Diag(Method->parameters()[1]->getLocation(),
+ diag::note_objc_literal_method_param)
+ << 1 << KeyT
+ << Context.getPointerType(IdT.withConst());
+ return ExprError();
+ }
+ }
+
+ // Check that the 'count' parameter is integral.
+ QualType CountType = Method->parameters()[2]->getType();
+ if (!CountType->isIntegerType()) {
+ Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
+ << Sel;
+ Diag(Method->parameters()[2]->getLocation(),
+ diag::note_objc_literal_method_param)
+ << 2 << CountType
+ << "integral";
+ return ExprError();
+ }
+
+ // We've found a good +dictionaryWithObjects:keys:count: method; save it!
+ DictionaryWithObjectsMethod = Method;
+ }
+
+ QualType ValuesT = DictionaryWithObjectsMethod->parameters()[0]->getType();
+ QualType ValueT = ValuesT->castAs<PointerType>()->getPointeeType();
+ QualType KeysT = DictionaryWithObjectsMethod->parameters()[1]->getType();
+ QualType KeyT = KeysT->castAs<PointerType>()->getPointeeType();
+
+ // Check that each of the keys and values provided is valid in a collection
+ // literal, performing conversions as necessary.
+ bool HasPackExpansions = false;
+ for (ObjCDictionaryElement &Element : Elements) {
+ // Check the key.
+ ExprResult Key = CheckObjCCollectionLiteralElement(*this, Element.Key,
+ KeyT);
+ if (Key.isInvalid())
+ return ExprError();
+
+ // Check the value.
+ ExprResult Value
+ = CheckObjCCollectionLiteralElement(*this, Element.Value, ValueT);
+ if (Value.isInvalid())
+ return ExprError();
+
+ Element.Key = Key.get();
+ Element.Value = Value.get();
+
+ if (Element.EllipsisLoc.isInvalid())
+ continue;
+
+ if (!Element.Key->containsUnexpandedParameterPack() &&
+ !Element.Value->containsUnexpandedParameterPack()) {
+ Diag(Element.EllipsisLoc,
+ diag::err_pack_expansion_without_parameter_packs)
+ << SourceRange(Element.Key->getLocStart(),
+ Element.Value->getLocEnd());
+ return ExprError();
+ }
+
+ HasPackExpansions = true;
+ }
+
+
+ QualType Ty
+ = Context.getObjCObjectPointerType(
+ Context.getObjCInterfaceType(NSDictionaryDecl));
+ return MaybeBindToTemporary(ObjCDictionaryLiteral::Create(
+ Context, Elements, HasPackExpansions, Ty,
+ DictionaryWithObjectsMethod, SR));
+}
+
+ExprResult Sema::BuildObjCEncodeExpression(SourceLocation AtLoc,
+ TypeSourceInfo *EncodedTypeInfo,
+ SourceLocation RParenLoc) {
+ QualType EncodedType = EncodedTypeInfo->getType();
+ QualType StrTy;
+ if (EncodedType->isDependentType())
+ StrTy = Context.DependentTy;
+ else {
+ if (!EncodedType->getAsArrayTypeUnsafe() && //// Incomplete array is handled.
+ !EncodedType->isVoidType()) // void is handled too.
+ if (RequireCompleteType(AtLoc, EncodedType,
+ diag::err_incomplete_type_objc_at_encode,
+ EncodedTypeInfo->getTypeLoc()))
+ return ExprError();
+
+ std::string Str;
+ QualType NotEncodedT;
+ Context.getObjCEncodingForType(EncodedType, Str, nullptr, &NotEncodedT);
+ if (!NotEncodedT.isNull())
+ Diag(AtLoc, diag::warn_incomplete_encoded_type)
+ << EncodedType << NotEncodedT;
+
+ // The type of @encode is the same as the type of the corresponding string,
+ // which is an array type.
+ StrTy = Context.CharTy;
+ // A C++ string literal has a const-qualified element type (C++ 2.13.4p1).
+ if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings)
+ StrTy.addConst();
+ StrTy = Context.getConstantArrayType(StrTy, llvm::APInt(32, Str.size()+1),
+ ArrayType::Normal, 0);
+ }
+
+ return new (Context) ObjCEncodeExpr(StrTy, EncodedTypeInfo, AtLoc, RParenLoc);
+}
+
+ExprResult Sema::ParseObjCEncodeExpression(SourceLocation AtLoc,
+ SourceLocation EncodeLoc,
+ SourceLocation LParenLoc,
+ ParsedType ty,
+ SourceLocation RParenLoc) {
+ // FIXME: Preserve type source info ?
+ TypeSourceInfo *TInfo;
+ QualType EncodedType = GetTypeFromParser(ty, &TInfo);
+ if (!TInfo)
+ TInfo = Context.getTrivialTypeSourceInfo(EncodedType,
+ getLocForEndOfToken(LParenLoc));
+
+ return BuildObjCEncodeExpression(AtLoc, TInfo, RParenLoc);
+}
+
+static bool HelperToDiagnoseMismatchedMethodsInGlobalPool(Sema &S,
+ SourceLocation AtLoc,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc,
+ ObjCMethodDecl *Method,
+ ObjCMethodList &MethList) {
+ ObjCMethodList *M = &MethList;
+ bool Warned = false;
+ for (M = M->getNext(); M; M=M->getNext()) {
+ ObjCMethodDecl *MatchingMethodDecl = M->getMethod();
+ if (MatchingMethodDecl == Method ||
+ isa<ObjCImplDecl>(MatchingMethodDecl->getDeclContext()) ||
+ MatchingMethodDecl->getSelector() != Method->getSelector())
+ continue;
+ if (!S.MatchTwoMethodDeclarations(Method,
+ MatchingMethodDecl, Sema::MMS_loose)) {
+ if (!Warned) {
+ Warned = true;
+ S.Diag(AtLoc, diag::warning_multiple_selectors)
+ << Method->getSelector() << FixItHint::CreateInsertion(LParenLoc, "(")
+ << FixItHint::CreateInsertion(RParenLoc, ")");
+ S.Diag(Method->getLocation(), diag::note_method_declared_at)
+ << Method->getDeclName();
+ }
+ S.Diag(MatchingMethodDecl->getLocation(), diag::note_method_declared_at)
+ << MatchingMethodDecl->getDeclName();
+ }
+ }
+ return Warned;
+}
+
+static void DiagnoseMismatchedSelectors(Sema &S, SourceLocation AtLoc,
+ ObjCMethodDecl *Method,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc,
+ bool WarnMultipleSelectors) {
+ if (!WarnMultipleSelectors ||
+ S.Diags.isIgnored(diag::warning_multiple_selectors, SourceLocation()))
+ return;
+ bool Warned = false;
+ for (Sema::GlobalMethodPool::iterator b = S.MethodPool.begin(),
+ e = S.MethodPool.end(); b != e; b++) {
+ // first, instance methods
+ ObjCMethodList &InstMethList = b->second.first;
+ if (HelperToDiagnoseMismatchedMethodsInGlobalPool(S, AtLoc, LParenLoc, RParenLoc,
+ Method, InstMethList))
+ Warned = true;
+
+ // second, class methods
+ ObjCMethodList &ClsMethList = b->second.second;
+ if (HelperToDiagnoseMismatchedMethodsInGlobalPool(S, AtLoc, LParenLoc, RParenLoc,
+ Method, ClsMethList) || Warned)
+ return;
+ }
+}
+
+ExprResult Sema::ParseObjCSelectorExpression(Selector Sel,
+ SourceLocation AtLoc,
+ SourceLocation SelLoc,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc,
+ bool WarnMultipleSelectors) {
+ ObjCMethodDecl *Method = LookupInstanceMethodInGlobalPool(Sel,
+ SourceRange(LParenLoc, RParenLoc));
+ if (!Method)
+ Method = LookupFactoryMethodInGlobalPool(Sel,
+ SourceRange(LParenLoc, RParenLoc));
+ if (!Method) {
+ if (const ObjCMethodDecl *OM = SelectorsForTypoCorrection(Sel)) {
+ Selector MatchedSel = OM->getSelector();
+ SourceRange SelectorRange(LParenLoc.getLocWithOffset(1),
+ RParenLoc.getLocWithOffset(-1));
+ Diag(SelLoc, diag::warn_undeclared_selector_with_typo)
+ << Sel << MatchedSel
+ << FixItHint::CreateReplacement(SelectorRange, MatchedSel.getAsString());
+
+ } else
+ Diag(SelLoc, diag::warn_undeclared_selector) << Sel;
+ } else
+ DiagnoseMismatchedSelectors(*this, AtLoc, Method, LParenLoc, RParenLoc,
+ WarnMultipleSelectors);
+
+ if (Method &&
+ Method->getImplementationControl() != ObjCMethodDecl::Optional &&
+ !getSourceManager().isInSystemHeader(Method->getLocation()))
+ ReferencedSelectors.insert(std::make_pair(Sel, AtLoc));
+
+ // In ARC, forbid the user from using @selector for
+ // retain/release/autorelease/dealloc/retainCount.
+ if (getLangOpts().ObjCAutoRefCount) {
+ switch (Sel.getMethodFamily()) {
+ case OMF_retain:
+ case OMF_release:
+ case OMF_autorelease:
+ case OMF_retainCount:
+ case OMF_dealloc:
+ Diag(AtLoc, diag::err_arc_illegal_selector) <<
+ Sel << SourceRange(LParenLoc, RParenLoc);
+ break;
+
+ case OMF_None:
+ case OMF_alloc:
+ case OMF_copy:
+ case OMF_finalize:
+ case OMF_init:
+ case OMF_mutableCopy:
+ case OMF_new:
+ case OMF_self:
+ case OMF_initialize:
+ case OMF_performSelector:
+ break;
+ }
+ }
+ QualType Ty = Context.getObjCSelType();
+ return new (Context) ObjCSelectorExpr(Ty, Sel, AtLoc, RParenLoc);
+}
+
+ExprResult Sema::ParseObjCProtocolExpression(IdentifierInfo *ProtocolId,
+ SourceLocation AtLoc,
+ SourceLocation ProtoLoc,
+ SourceLocation LParenLoc,
+ SourceLocation ProtoIdLoc,
+ SourceLocation RParenLoc) {
+ ObjCProtocolDecl* PDecl = LookupProtocol(ProtocolId, ProtoIdLoc);
+ if (!PDecl) {
+ Diag(ProtoLoc, diag::err_undeclared_protocol) << ProtocolId;
+ return true;
+ }
+ if (PDecl->hasDefinition())
+ PDecl = PDecl->getDefinition();
+
+ QualType Ty = Context.getObjCProtoType();
+ if (Ty.isNull())
+ return true;
+ Ty = Context.getObjCObjectPointerType(Ty);
+ return new (Context) ObjCProtocolExpr(Ty, PDecl, AtLoc, ProtoIdLoc, RParenLoc);
+}
+
+/// Try to capture an implicit reference to 'self'.
+ObjCMethodDecl *Sema::tryCaptureObjCSelf(SourceLocation Loc) {
+ DeclContext *DC = getFunctionLevelDeclContext();
+
+ // If we're not in an ObjC method, error out. Note that, unlike the
+ // C++ case, we don't require an instance method --- class methods
+ // still have a 'self', and we really do still need to capture it!
+ ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(DC);
+ if (!method)
+ return nullptr;
+
+ tryCaptureVariable(method->getSelfDecl(), Loc);
+
+ return method;
+}
+
+static QualType stripObjCInstanceType(ASTContext &Context, QualType T) {
+ QualType origType = T;
+ if (auto nullability = AttributedType::stripOuterNullability(T)) {
+ if (T == Context.getObjCInstanceType()) {
+ return Context.getAttributedType(
+ AttributedType::getNullabilityAttrKind(*nullability),
+ Context.getObjCIdType(),
+ Context.getObjCIdType());
+ }
+
+ return origType;
+ }
+
+ if (T == Context.getObjCInstanceType())
+ return Context.getObjCIdType();
+
+ return origType;
+}
+
+/// Determine the result type of a message send based on the receiver type,
+/// method, and the kind of message send.
+///
+/// This is the "base" result type, which will still need to be adjusted
+/// to account for nullability.
+static QualType getBaseMessageSendResultType(Sema &S,
+ QualType ReceiverType,
+ ObjCMethodDecl *Method,
+ bool isClassMessage,
+ bool isSuperMessage) {
+ assert(Method && "Must have a method");
+ if (!Method->hasRelatedResultType())
+ return Method->getSendResultType(ReceiverType);
+
+ ASTContext &Context = S.Context;
+
+ // Local function that transfers the nullability of the method's
+ // result type to the returned result.
+ auto transferNullability = [&](QualType type) -> QualType {
+ // If the method's result type has nullability, extract it.
+ if (auto nullability = Method->getSendResultType(ReceiverType)
+ ->getNullability(Context)){
+ // Strip off any outer nullability sugar from the provided type.
+ (void)AttributedType::stripOuterNullability(type);
+
+ // Form a new attributed type using the method result type's nullability.
+ return Context.getAttributedType(
+ AttributedType::getNullabilityAttrKind(*nullability),
+ type,
+ type);
+ }
+
+ return type;
+ };
+
+ // If a method has a related return type:
+ // - if the method found is an instance method, but the message send
+ // was a class message send, T is the declared return type of the method
+ // found
+ if (Method->isInstanceMethod() && isClassMessage)
+ return stripObjCInstanceType(Context,
+ Method->getSendResultType(ReceiverType));
+
+ // - if the receiver is super, T is a pointer to the class of the
+ // enclosing method definition
+ if (isSuperMessage) {
+ if (ObjCMethodDecl *CurMethod = S.getCurMethodDecl())
+ if (ObjCInterfaceDecl *Class = CurMethod->getClassInterface()) {
+ return transferNullability(
+ Context.getObjCObjectPointerType(
+ Context.getObjCInterfaceType(Class)));
+ }
+ }
+
+ // - if the receiver is the name of a class U, T is a pointer to U
+ if (ReceiverType->getAsObjCInterfaceType())
+ return transferNullability(Context.getObjCObjectPointerType(ReceiverType));
+ // - if the receiver is of type Class or qualified Class type,
+ // T is the declared return type of the method.
+ if (ReceiverType->isObjCClassType() ||
+ ReceiverType->isObjCQualifiedClassType())
+ return stripObjCInstanceType(Context,
+ Method->getSendResultType(ReceiverType));
+
+ // - if the receiver is id, qualified id, Class, or qualified Class, T
+ // is the receiver type, otherwise
+ // - T is the type of the receiver expression.
+ return transferNullability(ReceiverType);
+}
+
+QualType Sema::getMessageSendResultType(QualType ReceiverType,
+ ObjCMethodDecl *Method,
+ bool isClassMessage,
+ bool isSuperMessage) {
+ // Produce the result type.
+ QualType resultType = getBaseMessageSendResultType(*this, ReceiverType,
+ Method,
+ isClassMessage,
+ isSuperMessage);
+
+ // If this is a class message, ignore the nullability of the receiver.
+ if (isClassMessage)
+ return resultType;
+
+ // Map the nullability of the result into a table index.
+ unsigned receiverNullabilityIdx = 0;
+ if (auto nullability = ReceiverType->getNullability(Context))
+ receiverNullabilityIdx = 1 + static_cast<unsigned>(*nullability);
+
+ unsigned resultNullabilityIdx = 0;
+ if (auto nullability = resultType->getNullability(Context))
+ resultNullabilityIdx = 1 + static_cast<unsigned>(*nullability);
+
+ // The table of nullability mappings, indexed by the receiver's nullability
+ // and then the result type's nullability.
+ static const uint8_t None = 0;
+ static const uint8_t NonNull = 1;
+ static const uint8_t Nullable = 2;
+ static const uint8_t Unspecified = 3;
+ static const uint8_t nullabilityMap[4][4] = {
+ // None NonNull Nullable Unspecified
+ /* None */ { None, None, Nullable, None },
+ /* NonNull */ { None, NonNull, Nullable, Unspecified },
+ /* Nullable */ { Nullable, Nullable, Nullable, Nullable },
+ /* Unspecified */ { None, Unspecified, Nullable, Unspecified }
+ };
+
+ unsigned newResultNullabilityIdx
+ = nullabilityMap[receiverNullabilityIdx][resultNullabilityIdx];
+ if (newResultNullabilityIdx == resultNullabilityIdx)
+ return resultType;
+
+ // Strip off the existing nullability. This removes as little type sugar as
+ // possible.
+ do {
+ if (auto attributed = dyn_cast<AttributedType>(resultType.getTypePtr())) {
+ resultType = attributed->getModifiedType();
+ } else {
+ resultType = resultType.getDesugaredType(Context);
+ }
+ } while (resultType->getNullability(Context));
+
+ // Add nullability back if needed.
+ if (newResultNullabilityIdx > 0) {
+ auto newNullability
+ = static_cast<NullabilityKind>(newResultNullabilityIdx-1);
+ return Context.getAttributedType(
+ AttributedType::getNullabilityAttrKind(newNullability),
+ resultType, resultType);
+ }
+
+ return resultType;
+}
+
+/// Look for an ObjC method whose result type exactly matches the given type.
+static const ObjCMethodDecl *
+findExplicitInstancetypeDeclarer(const ObjCMethodDecl *MD,
+ QualType instancetype) {
+ if (MD->getReturnType() == instancetype)
+ return MD;
+
+ // For these purposes, a method in an @implementation overrides a
+ // declaration in the @interface.
+ if (const ObjCImplDecl *impl =
+ dyn_cast<ObjCImplDecl>(MD->getDeclContext())) {
+ const ObjCContainerDecl *iface;
+ if (const ObjCCategoryImplDecl *catImpl =
+ dyn_cast<ObjCCategoryImplDecl>(impl)) {
+ iface = catImpl->getCategoryDecl();
+ } else {
+ iface = impl->getClassInterface();
+ }
+
+ const ObjCMethodDecl *ifaceMD =
+ iface->getMethod(MD->getSelector(), MD->isInstanceMethod());
+ if (ifaceMD) return findExplicitInstancetypeDeclarer(ifaceMD, instancetype);
+ }
+
+ SmallVector<const ObjCMethodDecl *, 4> overrides;
+ MD->getOverriddenMethods(overrides);
+ for (unsigned i = 0, e = overrides.size(); i != e; ++i) {
+ if (const ObjCMethodDecl *result =
+ findExplicitInstancetypeDeclarer(overrides[i], instancetype))
+ return result;
+ }
+
+ return nullptr;
+}
+
+void Sema::EmitRelatedResultTypeNoteForReturn(QualType destType) {
+ // Only complain if we're in an ObjC method and the required return
+ // type doesn't match the method's declared return type.
+ ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(CurContext);
+ if (!MD || !MD->hasRelatedResultType() ||
+ Context.hasSameUnqualifiedType(destType, MD->getReturnType()))
+ return;
+
+ // Look for a method overridden by this method which explicitly uses
+ // 'instancetype'.
+ if (const ObjCMethodDecl *overridden =
+ findExplicitInstancetypeDeclarer(MD, Context.getObjCInstanceType())) {
+ SourceRange range = overridden->getReturnTypeSourceRange();
+ SourceLocation loc = range.getBegin();
+ if (loc.isInvalid())
+ loc = overridden->getLocation();
+ Diag(loc, diag::note_related_result_type_explicit)
+ << /*current method*/ 1 << range;
+ return;
+ }
+
+ // Otherwise, if we have an interesting method family, note that.
+ // This should always trigger if the above didn't.
+ if (ObjCMethodFamily family = MD->getMethodFamily())
+ Diag(MD->getLocation(), diag::note_related_result_type_family)
+ << /*current method*/ 1
+ << family;
+}
+
+void Sema::EmitRelatedResultTypeNote(const Expr *E) {
+ E = E->IgnoreParenImpCasts();
+ const ObjCMessageExpr *MsgSend = dyn_cast<ObjCMessageExpr>(E);
+ if (!MsgSend)
+ return;
+
+ const ObjCMethodDecl *Method = MsgSend->getMethodDecl();
+ if (!Method)
+ return;
+
+ if (!Method->hasRelatedResultType())
+ return;
+
+ if (Context.hasSameUnqualifiedType(
+ Method->getReturnType().getNonReferenceType(), MsgSend->getType()))
+ return;
+
+ if (!Context.hasSameUnqualifiedType(Method->getReturnType(),
+ Context.getObjCInstanceType()))
+ return;
+
+ Diag(Method->getLocation(), diag::note_related_result_type_inferred)
+ << Method->isInstanceMethod() << Method->getSelector()
+ << MsgSend->getType();
+}
+
+bool Sema::CheckMessageArgumentTypes(QualType ReceiverType,
+ MultiExprArg Args,
+ Selector Sel,
+ ArrayRef<SourceLocation> SelectorLocs,
+ ObjCMethodDecl *Method,
+ bool isClassMessage, bool isSuperMessage,
+ SourceLocation lbrac, SourceLocation rbrac,
+ SourceRange RecRange,
+ QualType &ReturnType, ExprValueKind &VK) {
+ SourceLocation SelLoc;
+ if (!SelectorLocs.empty() && SelectorLocs.front().isValid())
+ SelLoc = SelectorLocs.front();
+ else
+ SelLoc = lbrac;
+
+ if (!Method) {
+ // Apply default argument promotion as for (C99 6.5.2.2p6).
+ for (unsigned i = 0, e = Args.size(); i != e; i++) {
+ if (Args[i]->isTypeDependent())
+ continue;
+
+ ExprResult result;
+ if (getLangOpts().DebuggerSupport) {
+ QualType paramTy; // ignored
+ result = checkUnknownAnyArg(SelLoc, Args[i], paramTy);
+ } else {
+ result = DefaultArgumentPromotion(Args[i]);
+ }
+ if (result.isInvalid())
+ return true;
+ Args[i] = result.get();
+ }
+
+ unsigned DiagID;
+ if (getLangOpts().ObjCAutoRefCount)
+ DiagID = diag::err_arc_method_not_found;
+ else
+ DiagID = isClassMessage ? diag::warn_class_method_not_found
+ : diag::warn_inst_method_not_found;
+ if (!getLangOpts().DebuggerSupport) {
+ const ObjCMethodDecl *OMD = SelectorsForTypoCorrection(Sel, ReceiverType);
+ if (OMD && !OMD->isInvalidDecl()) {
+ if (getLangOpts().ObjCAutoRefCount)
+ DiagID = diag::error_method_not_found_with_typo;
+ else
+ DiagID = isClassMessage ? diag::warn_class_method_not_found_with_typo
+ : diag::warn_instance_method_not_found_with_typo;
+ Selector MatchedSel = OMD->getSelector();
+ SourceRange SelectorRange(SelectorLocs.front(), SelectorLocs.back());
+ if (MatchedSel.isUnarySelector())
+ Diag(SelLoc, DiagID)
+ << Sel<< isClassMessage << MatchedSel
+ << FixItHint::CreateReplacement(SelectorRange, MatchedSel.getAsString());
+ else
+ Diag(SelLoc, DiagID) << Sel<< isClassMessage << MatchedSel;
+ }
+ else
+ Diag(SelLoc, DiagID)
+ << Sel << isClassMessage << SourceRange(SelectorLocs.front(),
+ SelectorLocs.back());
+ // Find the class to which we are sending this message.
+ if (ReceiverType->isObjCObjectPointerType()) {
+ if (ObjCInterfaceDecl *ThisClass =
+ ReceiverType->getAs<ObjCObjectPointerType>()->getInterfaceDecl()) {
+ Diag(ThisClass->getLocation(), diag::note_receiver_class_declared);
+ if (!RecRange.isInvalid())
+ if (ThisClass->lookupClassMethod(Sel))
+ Diag(RecRange.getBegin(),diag::note_receiver_expr_here)
+ << FixItHint::CreateReplacement(RecRange,
+ ThisClass->getNameAsString());
+ }
+ }
+ }
+
+ // In debuggers, we want to use __unknown_anytype for these
+ // results so that clients can cast them.
+ if (getLangOpts().DebuggerSupport) {
+ ReturnType = Context.UnknownAnyTy;
+ } else {
+ ReturnType = Context.getObjCIdType();
+ }
+ VK = VK_RValue;
+ return false;
+ }
+
+ ReturnType = getMessageSendResultType(ReceiverType, Method, isClassMessage,
+ isSuperMessage);
+ VK = Expr::getValueKindForType(Method->getReturnType());
+
+ unsigned NumNamedArgs = Sel.getNumArgs();
+ // Method might have more arguments than selector indicates. This is due
+ // to addition of c-style arguments in method.
+ if (Method->param_size() > Sel.getNumArgs())
+ NumNamedArgs = Method->param_size();
+ // FIXME. This need be cleaned up.
+ if (Args.size() < NumNamedArgs) {
+ Diag(SelLoc, diag::err_typecheck_call_too_few_args)
+ << 2 << NumNamedArgs << static_cast<unsigned>(Args.size());
+ return false;
+ }
+
+ // Compute the set of type arguments to be substituted into each parameter
+ // type.
+ Optional<ArrayRef<QualType>> typeArgs
+ = ReceiverType->getObjCSubstitutions(Method->getDeclContext());
+ bool IsError = false;
+ for (unsigned i = 0; i < NumNamedArgs; i++) {
+ // We can't do any type-checking on a type-dependent argument.
+ if (Args[i]->isTypeDependent())
+ continue;
+
+ Expr *argExpr = Args[i];
+
+ ParmVarDecl *param = Method->parameters()[i];
+ assert(argExpr && "CheckMessageArgumentTypes(): missing expression");
+
+ // Strip the unbridged-cast placeholder expression off unless it's
+ // a consumed argument.
+ if (argExpr->hasPlaceholderType(BuiltinType::ARCUnbridgedCast) &&
+ !param->hasAttr<CFConsumedAttr>())
+ argExpr = stripARCUnbridgedCast(argExpr);
+
+ // If the parameter is __unknown_anytype, infer its type
+ // from the argument.
+ if (param->getType() == Context.UnknownAnyTy) {
+ QualType paramType;
+ ExprResult argE = checkUnknownAnyArg(SelLoc, argExpr, paramType);
+ if (argE.isInvalid()) {
+ IsError = true;
+ } else {
+ Args[i] = argE.get();
+
+ // Update the parameter type in-place.
+ param->setType(paramType);
+ }
+ continue;
+ }
+
+ QualType origParamType = param->getType();
+ QualType paramType = param->getType();
+ if (typeArgs)
+ paramType = paramType.substObjCTypeArgs(
+ Context,
+ *typeArgs,
+ ObjCSubstitutionContext::Parameter);
+
+ if (RequireCompleteType(argExpr->getSourceRange().getBegin(),
+ paramType,
+ diag::err_call_incomplete_argument, argExpr))
+ return true;
+
+ InitializedEntity Entity
+ = InitializedEntity::InitializeParameter(Context, param, paramType);
+ ExprResult ArgE = PerformCopyInitialization(Entity, SourceLocation(), argExpr);
+ if (ArgE.isInvalid())
+ IsError = true;
+ else {
+ Args[i] = ArgE.getAs<Expr>();
+
+ // If we are type-erasing a block to a block-compatible
+ // Objective-C pointer type, we may need to extend the lifetime
+ // of the block object.
+ if (typeArgs && Args[i]->isRValue() && paramType->isBlockPointerType() &&
+ Args[i]->getType()->isBlockPointerType() &&
+ origParamType->isObjCObjectPointerType()) {
+ ExprResult arg = Args[i];
+ maybeExtendBlockObject(arg);
+ Args[i] = arg.get();
+ }
+ }
+ }
+
+ // Promote additional arguments to variadic methods.
+ if (Method->isVariadic()) {
+ for (unsigned i = NumNamedArgs, e = Args.size(); i < e; ++i) {
+ if (Args[i]->isTypeDependent())
+ continue;
+
+ ExprResult Arg = DefaultVariadicArgumentPromotion(Args[i], VariadicMethod,
+ nullptr);
+ IsError |= Arg.isInvalid();
+ Args[i] = Arg.get();
+ }
+ } else {
+ // Check for extra arguments to non-variadic methods.
+ if (Args.size() != NumNamedArgs) {
+ Diag(Args[NumNamedArgs]->getLocStart(),
+ diag::err_typecheck_call_too_many_args)
+ << 2 /*method*/ << NumNamedArgs << static_cast<unsigned>(Args.size())
+ << Method->getSourceRange()
+ << SourceRange(Args[NumNamedArgs]->getLocStart(),
+ Args.back()->getLocEnd());
+ }
+ }
+
+ DiagnoseSentinelCalls(Method, SelLoc, Args);
+
+ // Do additional checkings on method.
+ IsError |= CheckObjCMethodCall(
+ Method, SelLoc, makeArrayRef(Args.data(), Args.size()));
+
+ return IsError;
+}
+
+bool Sema::isSelfExpr(Expr *RExpr) {
+ // 'self' is objc 'self' in an objc method only.
+ ObjCMethodDecl *Method =
+ dyn_cast_or_null<ObjCMethodDecl>(CurContext->getNonClosureAncestor());
+ return isSelfExpr(RExpr, Method);
+}
+
+bool Sema::isSelfExpr(Expr *receiver, const ObjCMethodDecl *method) {
+ if (!method) return false;
+
+ receiver = receiver->IgnoreParenLValueCasts();
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(receiver))
+ if (DRE->getDecl() == method->getSelfDecl())
+ return true;
+ return false;
+}
+
+/// LookupMethodInType - Look up a method in an ObjCObjectType.
+ObjCMethodDecl *Sema::LookupMethodInObjectType(Selector sel, QualType type,
+ bool isInstance) {
+ const ObjCObjectType *objType = type->castAs<ObjCObjectType>();
+ if (ObjCInterfaceDecl *iface = objType->getInterface()) {
+ // Look it up in the main interface (and categories, etc.)
+ if (ObjCMethodDecl *method = iface->lookupMethod(sel, isInstance))
+ return method;
+
+ // Okay, look for "private" methods declared in any
+ // @implementations we've seen.
+ if (ObjCMethodDecl *method = iface->lookupPrivateMethod(sel, isInstance))
+ return method;
+ }
+
+ // Check qualifiers.
+ for (const auto *I : objType->quals())
+ if (ObjCMethodDecl *method = I->lookupMethod(sel, isInstance))
+ return method;
+
+ return nullptr;
+}
+
+/// LookupMethodInQualifiedType - Lookups up a method in protocol qualifier
+/// list of a qualified objective pointer type.
+ObjCMethodDecl *Sema::LookupMethodInQualifiedType(Selector Sel,
+ const ObjCObjectPointerType *OPT,
+ bool Instance)
+{
+ ObjCMethodDecl *MD = nullptr;
+ for (const auto *PROTO : OPT->quals()) {
+ if ((MD = PROTO->lookupMethod(Sel, Instance))) {
+ return MD;
+ }
+ }
+ return nullptr;
+}
+
+/// HandleExprPropertyRefExpr - Handle foo.bar where foo is a pointer to an
+/// objective C interface. This is a property reference expression.
+ExprResult Sema::
+HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
+ Expr *BaseExpr, SourceLocation OpLoc,
+ DeclarationName MemberName,
+ SourceLocation MemberLoc,
+ SourceLocation SuperLoc, QualType SuperType,
+ bool Super) {
+ const ObjCInterfaceType *IFaceT = OPT->getInterfaceType();
+ ObjCInterfaceDecl *IFace = IFaceT->getDecl();
+
+ if (!MemberName.isIdentifier()) {
+ Diag(MemberLoc, diag::err_invalid_property_name)
+ << MemberName << QualType(OPT, 0);
+ return ExprError();
+ }
+
+ IdentifierInfo *Member = MemberName.getAsIdentifierInfo();
+
+ SourceRange BaseRange = Super? SourceRange(SuperLoc)
+ : BaseExpr->getSourceRange();
+ if (RequireCompleteType(MemberLoc, OPT->getPointeeType(),
+ diag::err_property_not_found_forward_class,
+ MemberName, BaseRange))
+ return ExprError();
+
+ if (ObjCPropertyDecl *PD = IFace->FindPropertyDeclaration(Member)) {
+ // Check whether we can reference this property.
+ if (DiagnoseUseOfDecl(PD, MemberLoc))
+ return ExprError();
+ if (Super)
+ return new (Context)
+ ObjCPropertyRefExpr(PD, Context.PseudoObjectTy, VK_LValue,
+ OK_ObjCProperty, MemberLoc, SuperLoc, SuperType);
+ else
+ return new (Context)
+ ObjCPropertyRefExpr(PD, Context.PseudoObjectTy, VK_LValue,
+ OK_ObjCProperty, MemberLoc, BaseExpr);
+ }
+ // Check protocols on qualified interfaces.
+ for (const auto *I : OPT->quals())
+ if (ObjCPropertyDecl *PD = I->FindPropertyDeclaration(Member)) {
+ // Check whether we can reference this property.
+ if (DiagnoseUseOfDecl(PD, MemberLoc))
+ return ExprError();
+
+ if (Super)
+ return new (Context) ObjCPropertyRefExpr(
+ PD, Context.PseudoObjectTy, VK_LValue, OK_ObjCProperty, MemberLoc,
+ SuperLoc, SuperType);
+ else
+ return new (Context)
+ ObjCPropertyRefExpr(PD, Context.PseudoObjectTy, VK_LValue,
+ OK_ObjCProperty, MemberLoc, BaseExpr);
+ }
+ // If that failed, look for an "implicit" property by seeing if the nullary
+ // selector is implemented.
+
+ // FIXME: The logic for looking up nullary and unary selectors should be
+ // shared with the code in ActOnInstanceMessage.
+
+ Selector Sel = PP.getSelectorTable().getNullarySelector(Member);
+ ObjCMethodDecl *Getter = IFace->lookupInstanceMethod(Sel);
+
+ // May be founf in property's qualified list.
+ if (!Getter)
+ Getter = LookupMethodInQualifiedType(Sel, OPT, true);
+
+ // If this reference is in an @implementation, check for 'private' methods.
+ if (!Getter)
+ Getter = IFace->lookupPrivateMethod(Sel);
+
+ if (Getter) {
+ // Check if we can reference this property.
+ if (DiagnoseUseOfDecl(Getter, MemberLoc))
+ return ExprError();
+ }
+ // If we found a getter then this may be a valid dot-reference, we
+ // will look for the matching setter, in case it is needed.
+ Selector SetterSel =
+ SelectorTable::constructSetterSelector(PP.getIdentifierTable(),
+ PP.getSelectorTable(), Member);
+ ObjCMethodDecl *Setter = IFace->lookupInstanceMethod(SetterSel);
+
+ // May be founf in property's qualified list.
+ if (!Setter)
+ Setter = LookupMethodInQualifiedType(SetterSel, OPT, true);
+
+ if (!Setter) {
+ // If this reference is in an @implementation, also check for 'private'
+ // methods.
+ Setter = IFace->lookupPrivateMethod(SetterSel);
+ }
+
+ if (Setter && DiagnoseUseOfDecl(Setter, MemberLoc))
+ return ExprError();
+
+ // Special warning if member name used in a property-dot for a setter accessor
+ // does not use a property with same name; e.g. obj.X = ... for a property with
+ // name 'x'.
+ if (Setter && Setter->isImplicit() && Setter->isPropertyAccessor()
+ && !IFace->FindPropertyDeclaration(Member)) {
+ if (const ObjCPropertyDecl *PDecl = Setter->findPropertyDecl()) {
+ // Do not warn if user is using property-dot syntax to make call to
+ // user named setter.
+ if (!(PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_setter))
+ Diag(MemberLoc,
+ diag::warn_property_access_suggest)
+ << MemberName << QualType(OPT, 0) << PDecl->getName()
+ << FixItHint::CreateReplacement(MemberLoc, PDecl->getName());
+ }
+ }
+
+ if (Getter || Setter) {
+ if (Super)
+ return new (Context)
+ ObjCPropertyRefExpr(Getter, Setter, Context.PseudoObjectTy, VK_LValue,
+ OK_ObjCProperty, MemberLoc, SuperLoc, SuperType);
+ else
+ return new (Context)
+ ObjCPropertyRefExpr(Getter, Setter, Context.PseudoObjectTy, VK_LValue,
+ OK_ObjCProperty, MemberLoc, BaseExpr);
+
+ }
+
+ // Attempt to correct for typos in property names.
+ if (TypoCorrection Corrected =
+ CorrectTypo(DeclarationNameInfo(MemberName, MemberLoc),
+ LookupOrdinaryName, nullptr, nullptr,
+ llvm::make_unique<DeclFilterCCC<ObjCPropertyDecl>>(),
+ CTK_ErrorRecovery, IFace, false, OPT)) {
+ diagnoseTypo(Corrected, PDiag(diag::err_property_not_found_suggest)
+ << MemberName << QualType(OPT, 0));
+ DeclarationName TypoResult = Corrected.getCorrection();
+ return HandleExprPropertyRefExpr(OPT, BaseExpr, OpLoc,
+ TypoResult, MemberLoc,
+ SuperLoc, SuperType, Super);
+ }
+ ObjCInterfaceDecl *ClassDeclared;
+ if (ObjCIvarDecl *Ivar =
+ IFace->lookupInstanceVariable(Member, ClassDeclared)) {
+ QualType T = Ivar->getType();
+ if (const ObjCObjectPointerType * OBJPT =
+ T->getAsObjCInterfacePointerType()) {
+ if (RequireCompleteType(MemberLoc, OBJPT->getPointeeType(),
+ diag::err_property_not_as_forward_class,
+ MemberName, BaseExpr))
+ return ExprError();
+ }
+ Diag(MemberLoc,
+ diag::err_ivar_access_using_property_syntax_suggest)
+ << MemberName << QualType(OPT, 0) << Ivar->getDeclName()
+ << FixItHint::CreateReplacement(OpLoc, "->");
+ return ExprError();
+ }
+
+ Diag(MemberLoc, diag::err_property_not_found)
+ << MemberName << QualType(OPT, 0);
+ if (Setter)
+ Diag(Setter->getLocation(), diag::note_getter_unavailable)
+ << MemberName << BaseExpr->getSourceRange();
+ return ExprError();
+}
+
+
+
+ExprResult Sema::
+ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
+ IdentifierInfo &propertyName,
+ SourceLocation receiverNameLoc,
+ SourceLocation propertyNameLoc) {
+
+ IdentifierInfo *receiverNamePtr = &receiverName;
+ ObjCInterfaceDecl *IFace = getObjCInterfaceDecl(receiverNamePtr,
+ receiverNameLoc);
+
+ QualType SuperType;
+ if (!IFace) {
+ // If the "receiver" is 'super' in a method, handle it as an expression-like
+ // property reference.
+ if (receiverNamePtr->isStr("super")) {
+ if (ObjCMethodDecl *CurMethod = tryCaptureObjCSelf(receiverNameLoc)) {
+ if (auto classDecl = CurMethod->getClassInterface()) {
+ SuperType = QualType(classDecl->getSuperClassType(), 0);
+ if (CurMethod->isInstanceMethod()) {
+ if (SuperType.isNull()) {
+ // The current class does not have a superclass.
+ Diag(receiverNameLoc, diag::error_root_class_cannot_use_super)
+ << CurMethod->getClassInterface()->getIdentifier();
+ return ExprError();
+ }
+ QualType T = Context.getObjCObjectPointerType(SuperType);
+
+ return HandleExprPropertyRefExpr(T->castAs<ObjCObjectPointerType>(),
+ /*BaseExpr*/nullptr,
+ SourceLocation()/*OpLoc*/,
+ &propertyName,
+ propertyNameLoc,
+ receiverNameLoc, T, true);
+ }
+
+ // Otherwise, if this is a class method, try dispatching to our
+ // superclass.
+ IFace = CurMethod->getClassInterface()->getSuperClass();
+ }
+ }
+ }
+
+ if (!IFace) {
+ Diag(receiverNameLoc, diag::err_expected_either) << tok::identifier
+ << tok::l_paren;
+ return ExprError();
+ }
+ }
+
+ // Search for a declared property first.
+ Selector Sel = PP.getSelectorTable().getNullarySelector(&propertyName);
+ ObjCMethodDecl *Getter = IFace->lookupClassMethod(Sel);
+
+ // If this reference is in an @implementation, check for 'private' methods.
+ if (!Getter)
+ Getter = IFace->lookupPrivateClassMethod(Sel);
+
+ if (Getter) {
+ // FIXME: refactor/share with ActOnMemberReference().
+ // Check if we can reference this property.
+ if (DiagnoseUseOfDecl(Getter, propertyNameLoc))
+ return ExprError();
+ }
+
+ // Look for the matching setter, in case it is needed.
+ Selector SetterSel =
+ SelectorTable::constructSetterSelector(PP.getIdentifierTable(),
+ PP.getSelectorTable(),
+ &propertyName);
+
+ ObjCMethodDecl *Setter = IFace->lookupClassMethod(SetterSel);
+ if (!Setter) {
+ // If this reference is in an @implementation, also check for 'private'
+ // methods.
+ Setter = IFace->lookupPrivateClassMethod(SetterSel);
+ }
+ // Look through local category implementations associated with the class.
+ if (!Setter)
+ Setter = IFace->getCategoryClassMethod(SetterSel);
+
+ if (Setter && DiagnoseUseOfDecl(Setter, propertyNameLoc))
+ return ExprError();
+
+ if (Getter || Setter) {
+ if (!SuperType.isNull())
+ return new (Context)
+ ObjCPropertyRefExpr(Getter, Setter, Context.PseudoObjectTy, VK_LValue,
+ OK_ObjCProperty, propertyNameLoc, receiverNameLoc,
+ SuperType);
+
+ return new (Context) ObjCPropertyRefExpr(
+ Getter, Setter, Context.PseudoObjectTy, VK_LValue, OK_ObjCProperty,
+ propertyNameLoc, receiverNameLoc, IFace);
+ }
+ return ExprError(Diag(propertyNameLoc, diag::err_property_not_found)
+ << &propertyName << Context.getObjCInterfaceType(IFace));
+}
+
+namespace {
+
+class ObjCInterfaceOrSuperCCC : public CorrectionCandidateCallback {
+ public:
+ ObjCInterfaceOrSuperCCC(ObjCMethodDecl *Method) {
+ // Determine whether "super" is acceptable in the current context.
+ if (Method && Method->getClassInterface())
+ WantObjCSuper = Method->getClassInterface()->getSuperClass();
+ }
+
+ bool ValidateCandidate(const TypoCorrection &candidate) override {
+ return candidate.getCorrectionDeclAs<ObjCInterfaceDecl>() ||
+ candidate.isKeyword("super");
+ }
+};
+
+}
+
+Sema::ObjCMessageKind Sema::getObjCMessageKind(Scope *S,
+ IdentifierInfo *Name,
+ SourceLocation NameLoc,
+ bool IsSuper,
+ bool HasTrailingDot,
+ ParsedType &ReceiverType) {
+ ReceiverType = ParsedType();
+
+ // If the identifier is "super" and there is no trailing dot, we're
+ // messaging super. If the identifier is "super" and there is a
+ // trailing dot, it's an instance message.
+ if (IsSuper && S->isInObjcMethodScope())
+ return HasTrailingDot? ObjCInstanceMessage : ObjCSuperMessage;
+
+ LookupResult Result(*this, Name, NameLoc, LookupOrdinaryName);
+ LookupName(Result, S);
+
+ switch (Result.getResultKind()) {
+ case LookupResult::NotFound:
+ // Normal name lookup didn't find anything. If we're in an
+ // Objective-C method, look for ivars. If we find one, we're done!
+ // FIXME: This is a hack. Ivar lookup should be part of normal
+ // lookup.
+ if (ObjCMethodDecl *Method = getCurMethodDecl()) {
+ if (!Method->getClassInterface()) {
+ // Fall back: let the parser try to parse it as an instance message.
+ return ObjCInstanceMessage;
+ }
+
+ ObjCInterfaceDecl *ClassDeclared;
+ if (Method->getClassInterface()->lookupInstanceVariable(Name,
+ ClassDeclared))
+ return ObjCInstanceMessage;
+ }
+
+ // Break out; we'll perform typo correction below.
+ break;
+
+ case LookupResult::NotFoundInCurrentInstantiation:
+ case LookupResult::FoundOverloaded:
+ case LookupResult::FoundUnresolvedValue:
+ case LookupResult::Ambiguous:
+ Result.suppressDiagnostics();
+ return ObjCInstanceMessage;
+
+ case LookupResult::Found: {
+ // If the identifier is a class or not, and there is a trailing dot,
+ // it's an instance message.
+ if (HasTrailingDot)
+ return ObjCInstanceMessage;
+ // We found something. If it's a type, then we have a class
+ // message. Otherwise, it's an instance message.
+ NamedDecl *ND = Result.getFoundDecl();
+ QualType T;
+ if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(ND))
+ T = Context.getObjCInterfaceType(Class);
+ else if (TypeDecl *Type = dyn_cast<TypeDecl>(ND)) {
+ T = Context.getTypeDeclType(Type);
+ DiagnoseUseOfDecl(Type, NameLoc);
+ }
+ else
+ return ObjCInstanceMessage;
+
+ // We have a class message, and T is the type we're
+ // messaging. Build source-location information for it.
+ TypeSourceInfo *TSInfo = Context.getTrivialTypeSourceInfo(T, NameLoc);
+ ReceiverType = CreateParsedType(T, TSInfo);
+ return ObjCClassMessage;
+ }
+ }
+
+ if (TypoCorrection Corrected = CorrectTypo(
+ Result.getLookupNameInfo(), Result.getLookupKind(), S, nullptr,
+ llvm::make_unique<ObjCInterfaceOrSuperCCC>(getCurMethodDecl()),
+ CTK_ErrorRecovery, nullptr, false, nullptr, false)) {
+ if (Corrected.isKeyword()) {
+ // If we've found the keyword "super" (the only keyword that would be
+ // returned by CorrectTypo), this is a send to super.
+ diagnoseTypo(Corrected,
+ PDiag(diag::err_unknown_receiver_suggest) << Name);
+ return ObjCSuperMessage;
+ } else if (ObjCInterfaceDecl *Class =
+ Corrected.getCorrectionDeclAs<ObjCInterfaceDecl>()) {
+ // If we found a declaration, correct when it refers to an Objective-C
+ // class.
+ diagnoseTypo(Corrected,
+ PDiag(diag::err_unknown_receiver_suggest) << Name);
+ QualType T = Context.getObjCInterfaceType(Class);
+ TypeSourceInfo *TSInfo = Context.getTrivialTypeSourceInfo(T, NameLoc);
+ ReceiverType = CreateParsedType(T, TSInfo);
+ return ObjCClassMessage;
+ }
+ }
+
+ // Fall back: let the parser try to parse it as an instance message.
+ return ObjCInstanceMessage;
+}
+
+ExprResult Sema::ActOnSuperMessage(Scope *S,
+ SourceLocation SuperLoc,
+ Selector Sel,
+ SourceLocation LBracLoc,
+ ArrayRef<SourceLocation> SelectorLocs,
+ SourceLocation RBracLoc,
+ MultiExprArg Args) {
+ // Determine whether we are inside a method or not.
+ ObjCMethodDecl *Method = tryCaptureObjCSelf(SuperLoc);
+ if (!Method) {
+ Diag(SuperLoc, diag::err_invalid_receiver_to_message_super);
+ return ExprError();
+ }
+
+ ObjCInterfaceDecl *Class = Method->getClassInterface();
+ if (!Class) {
+ Diag(SuperLoc, diag::error_no_super_class_message)
+ << Method->getDeclName();
+ return ExprError();
+ }
+
+ QualType SuperTy(Class->getSuperClassType(), 0);
+ if (SuperTy.isNull()) {
+ // The current class does not have a superclass.
+ Diag(SuperLoc, diag::error_root_class_cannot_use_super)
+ << Class->getIdentifier();
+ return ExprError();
+ }
+
+ // We are in a method whose class has a superclass, so 'super'
+ // is acting as a keyword.
+ if (Method->getSelector() == Sel)
+ getCurFunction()->ObjCShouldCallSuper = false;
+
+ if (Method->isInstanceMethod()) {
+ // Since we are in an instance method, this is an instance
+ // message to the superclass instance.
+ SuperTy = Context.getObjCObjectPointerType(SuperTy);
+ return BuildInstanceMessage(nullptr, SuperTy, SuperLoc,
+ Sel, /*Method=*/nullptr,
+ LBracLoc, SelectorLocs, RBracLoc, Args);
+ }
+
+ // Since we are in a class method, this is a class message to
+ // the superclass.
+ return BuildClassMessage(/*ReceiverTypeInfo=*/nullptr,
+ SuperTy,
+ SuperLoc, Sel, /*Method=*/nullptr,
+ LBracLoc, SelectorLocs, RBracLoc, Args);
+}
+
+
+ExprResult Sema::BuildClassMessageImplicit(QualType ReceiverType,
+ bool isSuperReceiver,
+ SourceLocation Loc,
+ Selector Sel,
+ ObjCMethodDecl *Method,
+ MultiExprArg Args) {
+ TypeSourceInfo *receiverTypeInfo = nullptr;
+ if (!ReceiverType.isNull())
+ receiverTypeInfo = Context.getTrivialTypeSourceInfo(ReceiverType);
+
+ return BuildClassMessage(receiverTypeInfo, ReceiverType,
+ /*SuperLoc=*/isSuperReceiver ? Loc : SourceLocation(),
+ Sel, Method, Loc, Loc, Loc, Args,
+ /*isImplicit=*/true);
+
+}
+
+static void applyCocoaAPICheck(Sema &S, const ObjCMessageExpr *Msg,
+ unsigned DiagID,
+ bool (*refactor)(const ObjCMessageExpr *,
+ const NSAPI &, edit::Commit &)) {
+ SourceLocation MsgLoc = Msg->getExprLoc();
+ if (S.Diags.isIgnored(DiagID, MsgLoc))
+ return;
+
+ SourceManager &SM = S.SourceMgr;
+ edit::Commit ECommit(SM, S.LangOpts);
+ if (refactor(Msg,*S.NSAPIObj, ECommit)) {
+ DiagnosticBuilder Builder = S.Diag(MsgLoc, DiagID)
+ << Msg->getSelector() << Msg->getSourceRange();
+ // FIXME: Don't emit diagnostic at all if fixits are non-commitable.
+ if (!ECommit.isCommitable())
+ return;
+ for (edit::Commit::edit_iterator
+ I = ECommit.edit_begin(), E = ECommit.edit_end(); I != E; ++I) {
+ const edit::Commit::Edit &Edit = *I;
+ switch (Edit.Kind) {
+ case edit::Commit::Act_Insert:
+ Builder.AddFixItHint(FixItHint::CreateInsertion(Edit.OrigLoc,
+ Edit.Text,
+ Edit.BeforePrev));
+ break;
+ case edit::Commit::Act_InsertFromRange:
+ Builder.AddFixItHint(
+ FixItHint::CreateInsertionFromRange(Edit.OrigLoc,
+ Edit.getInsertFromRange(SM),
+ Edit.BeforePrev));
+ break;
+ case edit::Commit::Act_Remove:
+ Builder.AddFixItHint(FixItHint::CreateRemoval(Edit.getFileRange(SM)));
+ break;
+ }
+ }
+ }
+}
+
+static void checkCocoaAPI(Sema &S, const ObjCMessageExpr *Msg) {
+ applyCocoaAPICheck(S, Msg, diag::warn_objc_redundant_literal_use,
+ edit::rewriteObjCRedundantCallWithLiteral);
+}
+
+/// \brief Diagnose use of %s directive in an NSString which is being passed
+/// as formatting string to formatting method.
+static void
+DiagnoseCStringFormatDirectiveInObjCAPI(Sema &S,
+ ObjCMethodDecl *Method,
+ Selector Sel,
+ Expr **Args, unsigned NumArgs) {
+ unsigned Idx = 0;
+ bool Format = false;
+ ObjCStringFormatFamily SFFamily = Sel.getStringFormatFamily();
+ if (SFFamily == ObjCStringFormatFamily::SFF_NSString) {
+ Idx = 0;
+ Format = true;
+ }
+ else if (Method) {
+ for (const auto *I : Method->specific_attrs<FormatAttr>()) {
+ if (S.GetFormatNSStringIdx(I, Idx)) {
+ Format = true;
+ break;
+ }
+ }
+ }
+ if (!Format || NumArgs <= Idx)
+ return;
+
+ Expr *FormatExpr = Args[Idx];
+ if (ObjCStringLiteral *OSL =
+ dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) {
+ StringLiteral *FormatString = OSL->getString();
+ if (S.FormatStringHasSArg(FormatString)) {
+ S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string)
+ << "%s" << 0 << 0;
+ if (Method)
+ S.Diag(Method->getLocation(), diag::note_method_declared_at)
+ << Method->getDeclName();
+ }
+ }
+}
+
+/// \brief Build an Objective-C class message expression.
+///
+/// This routine takes care of both normal class messages and
+/// class messages to the superclass.
+///
+/// \param ReceiverTypeInfo Type source information that describes the
+/// receiver of this message. This may be NULL, in which case we are
+/// sending to the superclass and \p SuperLoc must be a valid source
+/// location.
+
+/// \param ReceiverType The type of the object receiving the
+/// message. When \p ReceiverTypeInfo is non-NULL, this is the same
+/// type as that refers to. For a superclass send, this is the type of
+/// the superclass.
+///
+/// \param SuperLoc The location of the "super" keyword in a
+/// superclass message.
+///
+/// \param Sel The selector to which the message is being sent.
+///
+/// \param Method The method that this class message is invoking, if
+/// already known.
+///
+/// \param LBracLoc The location of the opening square bracket ']'.
+///
+/// \param RBracLoc The location of the closing square bracket ']'.
+///
+/// \param ArgsIn The message arguments.
+ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
+ QualType ReceiverType,
+ SourceLocation SuperLoc,
+ Selector Sel,
+ ObjCMethodDecl *Method,
+ SourceLocation LBracLoc,
+ ArrayRef<SourceLocation> SelectorLocs,
+ SourceLocation RBracLoc,
+ MultiExprArg ArgsIn,
+ bool isImplicit) {
+ SourceLocation Loc = SuperLoc.isValid()? SuperLoc
+ : ReceiverTypeInfo->getTypeLoc().getSourceRange().getBegin();
+ if (LBracLoc.isInvalid()) {
+ Diag(Loc, diag::err_missing_open_square_message_send)
+ << FixItHint::CreateInsertion(Loc, "[");
+ LBracLoc = Loc;
+ }
+ SourceLocation SelLoc;
+ if (!SelectorLocs.empty() && SelectorLocs.front().isValid())
+ SelLoc = SelectorLocs.front();
+ else
+ SelLoc = Loc;
+
+ if (ReceiverType->isDependentType()) {
+ // If the receiver type is dependent, we can't type-check anything
+ // at this point. Build a dependent expression.
+ unsigned NumArgs = ArgsIn.size();
+ Expr **Args = ArgsIn.data();
+ assert(SuperLoc.isInvalid() && "Message to super with dependent type");
+ return ObjCMessageExpr::Create(
+ Context, ReceiverType, VK_RValue, LBracLoc, ReceiverTypeInfo, Sel,
+ SelectorLocs, /*Method=*/nullptr, makeArrayRef(Args, NumArgs), RBracLoc,
+ isImplicit);
+ }
+
+ // Find the class to which we are sending this message.
+ ObjCInterfaceDecl *Class = nullptr;
+ const ObjCObjectType *ClassType = ReceiverType->getAs<ObjCObjectType>();
+ if (!ClassType || !(Class = ClassType->getInterface())) {
+ Diag(Loc, diag::err_invalid_receiver_class_message)
+ << ReceiverType;
+ return ExprError();
+ }
+ assert(Class && "We don't know which class we're messaging?");
+ // objc++ diagnoses during typename annotation.
+ if (!getLangOpts().CPlusPlus)
+ (void)DiagnoseUseOfDecl(Class, SelLoc);
+ // Find the method we are messaging.
+ if (!Method) {
+ SourceRange TypeRange
+ = SuperLoc.isValid()? SourceRange(SuperLoc)
+ : ReceiverTypeInfo->getTypeLoc().getSourceRange();
+ if (RequireCompleteType(Loc, Context.getObjCInterfaceType(Class),
+ (getLangOpts().ObjCAutoRefCount
+ ? diag::err_arc_receiver_forward_class
+ : diag::warn_receiver_forward_class),
+ TypeRange)) {
+ // A forward class used in messaging is treated as a 'Class'
+ Method = LookupFactoryMethodInGlobalPool(Sel,
+ SourceRange(LBracLoc, RBracLoc));
+ if (Method && !getLangOpts().ObjCAutoRefCount)
+ Diag(Method->getLocation(), diag::note_method_sent_forward_class)
+ << Method->getDeclName();
+ }
+ if (!Method)
+ Method = Class->lookupClassMethod(Sel);
+
+ // If we have an implementation in scope, check "private" methods.
+ if (!Method)
+ Method = Class->lookupPrivateClassMethod(Sel);
+
+ if (Method && DiagnoseUseOfDecl(Method, SelLoc))
+ return ExprError();
+ }
+
+ // Check the argument types and determine the result type.
+ QualType ReturnType;
+ ExprValueKind VK = VK_RValue;
+
+ unsigned NumArgs = ArgsIn.size();
+ Expr **Args = ArgsIn.data();
+ if (CheckMessageArgumentTypes(ReceiverType, MultiExprArg(Args, NumArgs),
+ Sel, SelectorLocs,
+ Method, true,
+ SuperLoc.isValid(), LBracLoc, RBracLoc,
+ SourceRange(),
+ ReturnType, VK))
+ return ExprError();
+
+ if (Method && !Method->getReturnType()->isVoidType() &&
+ RequireCompleteType(LBracLoc, Method->getReturnType(),
+ diag::err_illegal_message_expr_incomplete_type))
+ return ExprError();
+
+ // Warn about explicit call of +initialize on its own class. But not on 'super'.
+ if (Method && Method->getMethodFamily() == OMF_initialize) {
+ if (!SuperLoc.isValid()) {
+ const ObjCInterfaceDecl *ID =
+ dyn_cast<ObjCInterfaceDecl>(Method->getDeclContext());
+ if (ID == Class) {
+ Diag(Loc, diag::warn_direct_initialize_call);
+ Diag(Method->getLocation(), diag::note_method_declared_at)
+ << Method->getDeclName();
+ }
+ }
+ else if (ObjCMethodDecl *CurMeth = getCurMethodDecl()) {
+ // [super initialize] is allowed only within an +initialize implementation
+ if (CurMeth->getMethodFamily() != OMF_initialize) {
+ Diag(Loc, diag::warn_direct_super_initialize_call);
+ Diag(Method->getLocation(), diag::note_method_declared_at)
+ << Method->getDeclName();
+ Diag(CurMeth->getLocation(), diag::note_method_declared_at)
+ << CurMeth->getDeclName();
+ }
+ }
+ }
+
+ DiagnoseCStringFormatDirectiveInObjCAPI(*this, Method, Sel, Args, NumArgs);
+
+ // Construct the appropriate ObjCMessageExpr.
+ ObjCMessageExpr *Result;
+ if (SuperLoc.isValid())
+ Result = ObjCMessageExpr::Create(Context, ReturnType, VK, LBracLoc,
+ SuperLoc, /*IsInstanceSuper=*/false,
+ ReceiverType, Sel, SelectorLocs,
+ Method, makeArrayRef(Args, NumArgs),
+ RBracLoc, isImplicit);
+ else {
+ Result = ObjCMessageExpr::Create(Context, ReturnType, VK, LBracLoc,
+ ReceiverTypeInfo, Sel, SelectorLocs,
+ Method, makeArrayRef(Args, NumArgs),
+ RBracLoc, isImplicit);
+ if (!isImplicit)
+ checkCocoaAPI(*this, Result);
+ }
+ return MaybeBindToTemporary(Result);
+}
+
+// ActOnClassMessage - used for both unary and keyword messages.
+// ArgExprs is optional - if it is present, the number of expressions
+// is obtained from Sel.getNumArgs().
+ExprResult Sema::ActOnClassMessage(Scope *S,
+ ParsedType Receiver,
+ Selector Sel,
+ SourceLocation LBracLoc,
+ ArrayRef<SourceLocation> SelectorLocs,
+ SourceLocation RBracLoc,
+ MultiExprArg Args) {
+ TypeSourceInfo *ReceiverTypeInfo;
+ QualType ReceiverType = GetTypeFromParser(Receiver, &ReceiverTypeInfo);
+ if (ReceiverType.isNull())
+ return ExprError();
+
+
+ if (!ReceiverTypeInfo)
+ ReceiverTypeInfo = Context.getTrivialTypeSourceInfo(ReceiverType, LBracLoc);
+
+ return BuildClassMessage(ReceiverTypeInfo, ReceiverType,
+ /*SuperLoc=*/SourceLocation(), Sel,
+ /*Method=*/nullptr, LBracLoc, SelectorLocs, RBracLoc,
+ Args);
+}
+
+ExprResult Sema::BuildInstanceMessageImplicit(Expr *Receiver,
+ QualType ReceiverType,
+ SourceLocation Loc,
+ Selector Sel,
+ ObjCMethodDecl *Method,
+ MultiExprArg Args) {
+ return BuildInstanceMessage(Receiver, ReceiverType,
+ /*SuperLoc=*/!Receiver ? Loc : SourceLocation(),
+ Sel, Method, Loc, Loc, Loc, Args,
+ /*isImplicit=*/true);
+}
+
+/// \brief Build an Objective-C instance message expression.
+///
+/// This routine takes care of both normal instance messages and
+/// instance messages to the superclass instance.
+///
+/// \param Receiver The expression that computes the object that will
+/// receive this message. This may be empty, in which case we are
+/// sending to the superclass instance and \p SuperLoc must be a valid
+/// source location.
+///
+/// \param ReceiverType The (static) type of the object receiving the
+/// message. When a \p Receiver expression is provided, this is the
+/// same type as that expression. For a superclass instance send, this
+/// is a pointer to the type of the superclass.
+///
+/// \param SuperLoc The location of the "super" keyword in a
+/// superclass instance message.
+///
+/// \param Sel The selector to which the message is being sent.
+///
+/// \param Method The method that this instance message is invoking, if
+/// already known.
+///
+/// \param LBracLoc The location of the opening square bracket ']'.
+///
+/// \param RBracLoc The location of the closing square bracket ']'.
+///
+/// \param ArgsIn The message arguments.
+ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
+ QualType ReceiverType,
+ SourceLocation SuperLoc,
+ Selector Sel,
+ ObjCMethodDecl *Method,
+ SourceLocation LBracLoc,
+ ArrayRef<SourceLocation> SelectorLocs,
+ SourceLocation RBracLoc,
+ MultiExprArg ArgsIn,
+ bool isImplicit) {
+ // The location of the receiver.
+ SourceLocation Loc = SuperLoc.isValid()? SuperLoc : Receiver->getLocStart();
+ SourceRange RecRange =
+ SuperLoc.isValid()? SuperLoc : Receiver->getSourceRange();
+ SourceLocation SelLoc;
+ if (!SelectorLocs.empty() && SelectorLocs.front().isValid())
+ SelLoc = SelectorLocs.front();
+ else
+ SelLoc = Loc;
+
+ if (LBracLoc.isInvalid()) {
+ Diag(Loc, diag::err_missing_open_square_message_send)
+ << FixItHint::CreateInsertion(Loc, "[");
+ LBracLoc = Loc;
+ }
+
+ // If we have a receiver expression, perform appropriate promotions
+ // and determine receiver type.
+ if (Receiver) {
+ if (Receiver->hasPlaceholderType()) {
+ ExprResult Result;
+ if (Receiver->getType() == Context.UnknownAnyTy)
+ Result = forceUnknownAnyToType(Receiver, Context.getObjCIdType());
+ else
+ Result = CheckPlaceholderExpr(Receiver);
+ if (Result.isInvalid()) return ExprError();
+ Receiver = Result.get();
+ }
+
+ if (Receiver->isTypeDependent()) {
+ // If the receiver is type-dependent, we can't type-check anything
+ // at this point. Build a dependent expression.
+ unsigned NumArgs = ArgsIn.size();
+ Expr **Args = ArgsIn.data();
+ assert(SuperLoc.isInvalid() && "Message to super with dependent type");
+ return ObjCMessageExpr::Create(
+ Context, Context.DependentTy, VK_RValue, LBracLoc, Receiver, Sel,
+ SelectorLocs, /*Method=*/nullptr, makeArrayRef(Args, NumArgs),
+ RBracLoc, isImplicit);
+ }
+
+ // If necessary, apply function/array conversion to the receiver.
+ // C99 6.7.5.3p[7,8].
+ ExprResult Result = DefaultFunctionArrayLvalueConversion(Receiver);
+ if (Result.isInvalid())
+ return ExprError();
+ Receiver = Result.get();
+ ReceiverType = Receiver->getType();
+
+ // If the receiver is an ObjC pointer, a block pointer, or an
+ // __attribute__((NSObject)) pointer, we don't need to do any
+ // special conversion in order to look up a receiver.
+ if (ReceiverType->isObjCRetainableType()) {
+ // do nothing
+ } else if (!getLangOpts().ObjCAutoRefCount &&
+ !Context.getObjCIdType().isNull() &&
+ (ReceiverType->isPointerType() ||
+ ReceiverType->isIntegerType())) {
+ // Implicitly convert integers and pointers to 'id' but emit a warning.
+ // But not in ARC.
+ Diag(Loc, diag::warn_bad_receiver_type)
+ << ReceiverType
+ << Receiver->getSourceRange();
+ if (ReceiverType->isPointerType()) {
+ Receiver = ImpCastExprToType(Receiver, Context.getObjCIdType(),
+ CK_CPointerToObjCPointerCast).get();
+ } else {
+ // TODO: specialized warning on null receivers?
+ bool IsNull = Receiver->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNull);
+ CastKind Kind = IsNull ? CK_NullToPointer : CK_IntegralToPointer;
+ Receiver = ImpCastExprToType(Receiver, Context.getObjCIdType(),
+ Kind).get();
+ }
+ ReceiverType = Receiver->getType();
+ } else if (getLangOpts().CPlusPlus) {
+ // The receiver must be a complete type.
+ if (RequireCompleteType(Loc, Receiver->getType(),
+ diag::err_incomplete_receiver_type))
+ return ExprError();
+
+ ExprResult result = PerformContextuallyConvertToObjCPointer(Receiver);
+ if (result.isUsable()) {
+ Receiver = result.get();
+ ReceiverType = Receiver->getType();
+ }
+ }
+ }
+
+ // There's a somewhat weird interaction here where we assume that we
+ // won't actually have a method unless we also don't need to do some
+ // of the more detailed type-checking on the receiver.
+
+ if (!Method) {
+ // Handle messages to id and __kindof types (where we use the
+ // global method pool).
+ // FIXME: The type bound is currently ignored by lookup in the
+ // global pool.
+ const ObjCObjectType *typeBound = nullptr;
+ bool receiverIsIdLike = ReceiverType->isObjCIdOrObjectKindOfType(Context,
+ typeBound);
+ if (receiverIsIdLike || ReceiverType->isBlockPointerType() ||
+ (Receiver && Context.isObjCNSObjectType(Receiver->getType()))) {
+ Method = LookupInstanceMethodInGlobalPool(Sel,
+ SourceRange(LBracLoc, RBracLoc),
+ receiverIsIdLike);
+ if (!Method)
+ Method = LookupFactoryMethodInGlobalPool(Sel,
+ SourceRange(LBracLoc,RBracLoc),
+ receiverIsIdLike);
+ if (Method) {
+ if (ObjCMethodDecl *BestMethod =
+ SelectBestMethod(Sel, ArgsIn, Method->isInstanceMethod()))
+ Method = BestMethod;
+ if (!AreMultipleMethodsInGlobalPool(Sel, Method,
+ SourceRange(LBracLoc, RBracLoc),
+ receiverIsIdLike)) {
+ DiagnoseUseOfDecl(Method, SelLoc);
+ }
+ }
+ } else if (ReceiverType->isObjCClassOrClassKindOfType() ||
+ ReceiverType->isObjCQualifiedClassType()) {
+ // Handle messages to Class.
+ // We allow sending a message to a qualified Class ("Class<foo>"), which
+ // is ok as long as one of the protocols implements the selector (if not,
+ // warn).
+ if (!ReceiverType->isObjCClassOrClassKindOfType()) {
+ const ObjCObjectPointerType *QClassTy
+ = ReceiverType->getAsObjCQualifiedClassType();
+ // Search protocols for class methods.
+ Method = LookupMethodInQualifiedType(Sel, QClassTy, false);
+ if (!Method) {
+ Method = LookupMethodInQualifiedType(Sel, QClassTy, true);
+ // warn if instance method found for a Class message.
+ if (Method) {
+ Diag(SelLoc, diag::warn_instance_method_on_class_found)
+ << Method->getSelector() << Sel;
+ Diag(Method->getLocation(), diag::note_method_declared_at)
+ << Method->getDeclName();
+ }
+ }
+ } else {
+ if (ObjCMethodDecl *CurMeth = getCurMethodDecl()) {
+ if (ObjCInterfaceDecl *ClassDecl = CurMeth->getClassInterface()) {
+ // First check the public methods in the class interface.
+ Method = ClassDecl->lookupClassMethod(Sel);
+
+ if (!Method)
+ Method = ClassDecl->lookupPrivateClassMethod(Sel);
+ }
+ if (Method && DiagnoseUseOfDecl(Method, SelLoc))
+ return ExprError();
+ }
+ if (!Method) {
+ // If not messaging 'self', look for any factory method named 'Sel'.
+ if (!Receiver || !isSelfExpr(Receiver)) {
+ Method = LookupFactoryMethodInGlobalPool(Sel,
+ SourceRange(LBracLoc, RBracLoc));
+ if (!Method) {
+ // If no class (factory) method was found, check if an _instance_
+ // method of the same name exists in the root class only.
+ Method = LookupInstanceMethodInGlobalPool(Sel,
+ SourceRange(LBracLoc, RBracLoc));
+ if (Method)
+ if (const ObjCInterfaceDecl *ID =
+ dyn_cast<ObjCInterfaceDecl>(Method->getDeclContext())) {
+ if (ID->getSuperClass())
+ Diag(SelLoc, diag::warn_root_inst_method_not_found)
+ << Sel << SourceRange(LBracLoc, RBracLoc);
+ }
+ }
+ if (Method)
+ if (ObjCMethodDecl *BestMethod =
+ SelectBestMethod(Sel, ArgsIn, Method->isInstanceMethod()))
+ Method = BestMethod;
+ }
+ }
+ }
+ } else {
+ ObjCInterfaceDecl *ClassDecl = nullptr;
+
+ // We allow sending a message to a qualified ID ("id<foo>"), which is ok as
+ // long as one of the protocols implements the selector (if not, warn).
+ // And as long as message is not deprecated/unavailable (warn if it is).
+ if (const ObjCObjectPointerType *QIdTy
+ = ReceiverType->getAsObjCQualifiedIdType()) {
+ // Search protocols for instance methods.
+ Method = LookupMethodInQualifiedType(Sel, QIdTy, true);
+ if (!Method)
+ Method = LookupMethodInQualifiedType(Sel, QIdTy, false);
+ if (Method && DiagnoseUseOfDecl(Method, SelLoc))
+ return ExprError();
+ } else if (const ObjCObjectPointerType *OCIType
+ = ReceiverType->getAsObjCInterfacePointerType()) {
+ // We allow sending a message to a pointer to an interface (an object).
+ ClassDecl = OCIType->getInterfaceDecl();
+
+ // Try to complete the type. Under ARC, this is a hard error from which
+ // we don't try to recover.
+ // FIXME: In the non-ARC case, this will still be a hard error if the
+ // definition is found in a module that's not visible.
+ const ObjCInterfaceDecl *forwardClass = nullptr;
+ if (RequireCompleteType(Loc, OCIType->getPointeeType(),
+ getLangOpts().ObjCAutoRefCount
+ ? diag::err_arc_receiver_forward_instance
+ : diag::warn_receiver_forward_instance,
+ Receiver? Receiver->getSourceRange()
+ : SourceRange(SuperLoc))) {
+ if (getLangOpts().ObjCAutoRefCount)
+ return ExprError();
+
+ forwardClass = OCIType->getInterfaceDecl();
+ Diag(Receiver ? Receiver->getLocStart()
+ : SuperLoc, diag::note_receiver_is_id);
+ Method = nullptr;
+ } else {
+ Method = ClassDecl->lookupInstanceMethod(Sel);
+ }
+
+ if (!Method)
+ // Search protocol qualifiers.
+ Method = LookupMethodInQualifiedType(Sel, OCIType, true);
+
+ if (!Method) {
+ // If we have implementations in scope, check "private" methods.
+ Method = ClassDecl->lookupPrivateMethod(Sel);
+
+ if (!Method && getLangOpts().ObjCAutoRefCount) {
+ Diag(SelLoc, diag::err_arc_may_not_respond)
+ << OCIType->getPointeeType() << Sel << RecRange
+ << SourceRange(SelectorLocs.front(), SelectorLocs.back());
+ return ExprError();
+ }
+
+ if (!Method && (!Receiver || !isSelfExpr(Receiver))) {
+ // If we still haven't found a method, look in the global pool. This
+ // behavior isn't very desirable, however we need it for GCC
+ // compatibility. FIXME: should we deviate??
+ if (OCIType->qual_empty()) {
+ Method = LookupInstanceMethodInGlobalPool(Sel,
+ SourceRange(LBracLoc, RBracLoc));
+ if (Method) {
+ if (auto BestMethod =
+ SelectBestMethod(Sel, ArgsIn, Method->isInstanceMethod()))
+ Method = BestMethod;
+ AreMultipleMethodsInGlobalPool(Sel, Method,
+ SourceRange(LBracLoc, RBracLoc),
+ true);
+ }
+ if (Method && !forwardClass)
+ Diag(SelLoc, diag::warn_maynot_respond)
+ << OCIType->getInterfaceDecl()->getIdentifier()
+ << Sel << RecRange;
+ }
+ }
+ }
+ if (Method && DiagnoseUseOfDecl(Method, SelLoc, forwardClass))
+ return ExprError();
+ } else {
+ // Reject other random receiver types (e.g. structs).
+ Diag(Loc, diag::err_bad_receiver_type)
+ << ReceiverType << Receiver->getSourceRange();
+ return ExprError();
+ }
+ }
+ }
+
+ FunctionScopeInfo *DIFunctionScopeInfo =
+ (Method && Method->getMethodFamily() == OMF_init)
+ ? getEnclosingFunction() : nullptr;
+
+ if (DIFunctionScopeInfo &&
+ DIFunctionScopeInfo->ObjCIsDesignatedInit &&
+ (SuperLoc.isValid() || isSelfExpr(Receiver))) {
+ bool isDesignatedInitChain = false;
+ if (SuperLoc.isValid()) {
+ if (const ObjCObjectPointerType *
+ OCIType = ReceiverType->getAsObjCInterfacePointerType()) {
+ if (const ObjCInterfaceDecl *ID = OCIType->getInterfaceDecl()) {
+ // Either we know this is a designated initializer or we
+ // conservatively assume it because we don't know for sure.
+ if (!ID->declaresOrInheritsDesignatedInitializers() ||
+ ID->isDesignatedInitializer(Sel)) {
+ isDesignatedInitChain = true;
+ DIFunctionScopeInfo->ObjCWarnForNoDesignatedInitChain = false;
+ }
+ }
+ }
+ }
+ if (!isDesignatedInitChain) {
+ const ObjCMethodDecl *InitMethod = nullptr;
+ bool isDesignated =
+ getCurMethodDecl()->isDesignatedInitializerForTheInterface(&InitMethod);
+ assert(isDesignated && InitMethod);
+ (void)isDesignated;
+ Diag(SelLoc, SuperLoc.isValid() ?
+ diag::warn_objc_designated_init_non_designated_init_call :
+ diag::warn_objc_designated_init_non_super_designated_init_call);
+ Diag(InitMethod->getLocation(),
+ diag::note_objc_designated_init_marked_here);
+ }
+ }
+
+ if (DIFunctionScopeInfo &&
+ DIFunctionScopeInfo->ObjCIsSecondaryInit &&
+ (SuperLoc.isValid() || isSelfExpr(Receiver))) {
+ if (SuperLoc.isValid()) {
+ Diag(SelLoc, diag::warn_objc_secondary_init_super_init_call);
+ } else {
+ DIFunctionScopeInfo->ObjCWarnForNoInitDelegation = false;
+ }
+ }
+
+ // Check the message arguments.
+ unsigned NumArgs = ArgsIn.size();
+ Expr **Args = ArgsIn.data();
+ QualType ReturnType;
+ ExprValueKind VK = VK_RValue;
+ bool ClassMessage = (ReceiverType->isObjCClassType() ||
+ ReceiverType->isObjCQualifiedClassType());
+ if (CheckMessageArgumentTypes(ReceiverType, MultiExprArg(Args, NumArgs),
+ Sel, SelectorLocs, Method,
+ ClassMessage, SuperLoc.isValid(),
+ LBracLoc, RBracLoc, RecRange, ReturnType, VK))
+ return ExprError();
+
+ if (Method && !Method->getReturnType()->isVoidType() &&
+ RequireCompleteType(LBracLoc, Method->getReturnType(),
+ diag::err_illegal_message_expr_incomplete_type))
+ return ExprError();
+
+ // In ARC, forbid the user from sending messages to
+ // retain/release/autorelease/dealloc/retainCount explicitly.
+ if (getLangOpts().ObjCAutoRefCount) {
+ ObjCMethodFamily family =
+ (Method ? Method->getMethodFamily() : Sel.getMethodFamily());
+ switch (family) {
+ case OMF_init:
+ if (Method)
+ checkInitMethod(Method, ReceiverType);
+
+ case OMF_None:
+ case OMF_alloc:
+ case OMF_copy:
+ case OMF_finalize:
+ case OMF_mutableCopy:
+ case OMF_new:
+ case OMF_self:
+ case OMF_initialize:
+ break;
+
+ case OMF_dealloc:
+ case OMF_retain:
+ case OMF_release:
+ case OMF_autorelease:
+ case OMF_retainCount:
+ Diag(SelLoc, diag::err_arc_illegal_explicit_message)
+ << Sel << RecRange;
+ break;
+
+ case OMF_performSelector:
+ if (Method && NumArgs >= 1) {
+ if (ObjCSelectorExpr *SelExp = dyn_cast<ObjCSelectorExpr>(Args[0])) {
+ Selector ArgSel = SelExp->getSelector();
+ ObjCMethodDecl *SelMethod =
+ LookupInstanceMethodInGlobalPool(ArgSel,
+ SelExp->getSourceRange());
+ if (!SelMethod)
+ SelMethod =
+ LookupFactoryMethodInGlobalPool(ArgSel,
+ SelExp->getSourceRange());
+ if (SelMethod) {
+ ObjCMethodFamily SelFamily = SelMethod->getMethodFamily();
+ switch (SelFamily) {
+ case OMF_alloc:
+ case OMF_copy:
+ case OMF_mutableCopy:
+ case OMF_new:
+ case OMF_self:
+ case OMF_init:
+ // Issue error, unless ns_returns_not_retained.
+ if (!SelMethod->hasAttr<NSReturnsNotRetainedAttr>()) {
+ // selector names a +1 method
+ Diag(SelLoc,
+ diag::err_arc_perform_selector_retains);
+ Diag(SelMethod->getLocation(), diag::note_method_declared_at)
+ << SelMethod->getDeclName();
+ }
+ break;
+ default:
+ // +0 call. OK. unless ns_returns_retained.
+ if (SelMethod->hasAttr<NSReturnsRetainedAttr>()) {
+ // selector names a +1 method
+ Diag(SelLoc,
+ diag::err_arc_perform_selector_retains);
+ Diag(SelMethod->getLocation(), diag::note_method_declared_at)
+ << SelMethod->getDeclName();
+ }
+ break;
+ }
+ }
+ } else {
+ // error (may leak).
+ Diag(SelLoc, diag::warn_arc_perform_selector_leaks);
+ Diag(Args[0]->getExprLoc(), diag::note_used_here);
+ }
+ }
+ break;
+ }
+ }
+
+ DiagnoseCStringFormatDirectiveInObjCAPI(*this, Method, Sel, Args, NumArgs);
+
+ // Construct the appropriate ObjCMessageExpr instance.
+ ObjCMessageExpr *Result;
+ if (SuperLoc.isValid())
+ Result = ObjCMessageExpr::Create(Context, ReturnType, VK, LBracLoc,
+ SuperLoc, /*IsInstanceSuper=*/true,
+ ReceiverType, Sel, SelectorLocs, Method,
+ makeArrayRef(Args, NumArgs), RBracLoc,
+ isImplicit);
+ else {
+ Result = ObjCMessageExpr::Create(Context, ReturnType, VK, LBracLoc,
+ Receiver, Sel, SelectorLocs, Method,
+ makeArrayRef(Args, NumArgs), RBracLoc,
+ isImplicit);
+ if (!isImplicit)
+ checkCocoaAPI(*this, Result);
+ }
+
+ if (getLangOpts().ObjCAutoRefCount) {
+ // In ARC, annotate delegate init calls.
+ if (Result->getMethodFamily() == OMF_init &&
+ (SuperLoc.isValid() || isSelfExpr(Receiver))) {
+ // Only consider init calls *directly* in init implementations,
+ // not within blocks.
+ ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(CurContext);
+ if (method && method->getMethodFamily() == OMF_init) {
+ // The implicit assignment to self means we also don't want to
+ // consume the result.
+ Result->setDelegateInitCall(true);
+ return Result;
+ }
+ }
+
+ // In ARC, check for message sends which are likely to introduce
+ // retain cycles.
+ checkRetainCycles(Result);
+
+ if (!isImplicit && Method) {
+ if (const ObjCPropertyDecl *Prop = Method->findPropertyDecl()) {
+ bool IsWeak =
+ Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak;
+ if (!IsWeak && Sel.isUnarySelector())
+ IsWeak = ReturnType.getObjCLifetime() & Qualifiers::OCL_Weak;
+ if (IsWeak &&
+ !Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, LBracLoc))
+ getCurFunction()->recordUseOfWeak(Result, Prop);
+ }
+ }
+ }
+
+ CheckObjCCircularContainer(Result);
+
+ return MaybeBindToTemporary(Result);
+}
+
+static void RemoveSelectorFromWarningCache(Sema &S, Expr* Arg) {
+ if (ObjCSelectorExpr *OSE =
+ dyn_cast<ObjCSelectorExpr>(Arg->IgnoreParenCasts())) {
+ Selector Sel = OSE->getSelector();
+ SourceLocation Loc = OSE->getAtLoc();
+ auto Pos = S.ReferencedSelectors.find(Sel);
+ if (Pos != S.ReferencedSelectors.end() && Pos->second == Loc)
+ S.ReferencedSelectors.erase(Pos);
+ }
+}
+
+// ActOnInstanceMessage - used for both unary and keyword messages.
+// ArgExprs is optional - if it is present, the number of expressions
+// is obtained from Sel.getNumArgs().
+ExprResult Sema::ActOnInstanceMessage(Scope *S,
+ Expr *Receiver,
+ Selector Sel,
+ SourceLocation LBracLoc,
+ ArrayRef<SourceLocation> SelectorLocs,
+ SourceLocation RBracLoc,
+ MultiExprArg Args) {
+ if (!Receiver)
+ return ExprError();
+
+ // A ParenListExpr can show up while doing error recovery with invalid code.
+ if (isa<ParenListExpr>(Receiver)) {
+ ExprResult Result = MaybeConvertParenListExprToParenExpr(S, Receiver);
+ if (Result.isInvalid()) return ExprError();
+ Receiver = Result.get();
+ }
+
+ if (RespondsToSelectorSel.isNull()) {
+ IdentifierInfo *SelectorId = &Context.Idents.get("respondsToSelector");
+ RespondsToSelectorSel = Context.Selectors.getUnarySelector(SelectorId);
+ }
+ if (Sel == RespondsToSelectorSel)
+ RemoveSelectorFromWarningCache(*this, Args[0]);
+
+ return BuildInstanceMessage(Receiver, Receiver->getType(),
+ /*SuperLoc=*/SourceLocation(), Sel,
+ /*Method=*/nullptr, LBracLoc, SelectorLocs,
+ RBracLoc, Args);
+}
+
+enum ARCConversionTypeClass {
+ /// int, void, struct A
+ ACTC_none,
+
+ /// id, void (^)()
+ ACTC_retainable,
+
+ /// id*, id***, void (^*)(),
+ ACTC_indirectRetainable,
+
+ /// void* might be a normal C type, or it might a CF type.
+ ACTC_voidPtr,
+
+ /// struct A*
+ ACTC_coreFoundation
+};
+static bool isAnyRetainable(ARCConversionTypeClass ACTC) {
+ return (ACTC == ACTC_retainable ||
+ ACTC == ACTC_coreFoundation ||
+ ACTC == ACTC_voidPtr);
+}
+static bool isAnyCLike(ARCConversionTypeClass ACTC) {
+ return ACTC == ACTC_none ||
+ ACTC == ACTC_voidPtr ||
+ ACTC == ACTC_coreFoundation;
+}
+
+static ARCConversionTypeClass classifyTypeForARCConversion(QualType type) {
+ bool isIndirect = false;
+
+ // Ignore an outermost reference type.
+ if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
+ type = ref->getPointeeType();
+ isIndirect = true;
+ }
+
+ // Drill through pointers and arrays recursively.
+ while (true) {
+ if (const PointerType *ptr = type->getAs<PointerType>()) {
+ type = ptr->getPointeeType();
+
+ // The first level of pointer may be the innermost pointer on a CF type.
+ if (!isIndirect) {
+ if (type->isVoidType()) return ACTC_voidPtr;
+ if (type->isRecordType()) return ACTC_coreFoundation;
+ }
+ } else if (const ArrayType *array = type->getAsArrayTypeUnsafe()) {
+ type = QualType(array->getElementType()->getBaseElementTypeUnsafe(), 0);
+ } else {
+ break;
+ }
+ isIndirect = true;
+ }
+
+ if (isIndirect) {
+ if (type->isObjCARCBridgableType())
+ return ACTC_indirectRetainable;
+ return ACTC_none;
+ }
+
+ if (type->isObjCARCBridgableType())
+ return ACTC_retainable;
+
+ return ACTC_none;
+}
+
+namespace {
+ /// A result from the cast checker.
+ enum ACCResult {
+ /// Cannot be casted.
+ ACC_invalid,
+
+ /// Can be safely retained or not retained.
+ ACC_bottom,
+
+ /// Can be casted at +0.
+ ACC_plusZero,
+
+ /// Can be casted at +1.
+ ACC_plusOne
+ };
+ ACCResult merge(ACCResult left, ACCResult right) {
+ if (left == right) return left;
+ if (left == ACC_bottom) return right;
+ if (right == ACC_bottom) return left;
+ return ACC_invalid;
+ }
+
+ /// A checker which white-lists certain expressions whose conversion
+ /// to or from retainable type would otherwise be forbidden in ARC.
+ class ARCCastChecker : public StmtVisitor<ARCCastChecker, ACCResult> {
+ typedef StmtVisitor<ARCCastChecker, ACCResult> super;
+
+ ASTContext &Context;
+ ARCConversionTypeClass SourceClass;
+ ARCConversionTypeClass TargetClass;
+ bool Diagnose;
+
+ static bool isCFType(QualType type) {
+ // Someday this can use ns_bridged. For now, it has to do this.
+ return type->isCARCBridgableType();
+ }
+
+ public:
+ ARCCastChecker(ASTContext &Context, ARCConversionTypeClass source,
+ ARCConversionTypeClass target, bool diagnose)
+ : Context(Context), SourceClass(source), TargetClass(target),
+ Diagnose(diagnose) {}
+
+ using super::Visit;
+ ACCResult Visit(Expr *e) {
+ return super::Visit(e->IgnoreParens());
+ }
+
+ ACCResult VisitStmt(Stmt *s) {
+ return ACC_invalid;
+ }
+
+ /// Null pointer constants can be casted however you please.
+ ACCResult VisitExpr(Expr *e) {
+ if (e->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNotNull))
+ return ACC_bottom;
+ return ACC_invalid;
+ }
+
+ /// Objective-C string literals can be safely casted.
+ ACCResult VisitObjCStringLiteral(ObjCStringLiteral *e) {
+ // If we're casting to any retainable type, go ahead. Global
+ // strings are immune to retains, so this is bottom.
+ if (isAnyRetainable(TargetClass)) return ACC_bottom;
+
+ return ACC_invalid;
+ }
+
+ /// Look through certain implicit and explicit casts.
+ ACCResult VisitCastExpr(CastExpr *e) {
+ switch (e->getCastKind()) {
+ case CK_NullToPointer:
+ return ACC_bottom;
+
+ case CK_NoOp:
+ case CK_LValueToRValue:
+ case CK_BitCast:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ return Visit(e->getSubExpr());
+
+ default:
+ return ACC_invalid;
+ }
+ }
+
+ /// Look through unary extension.
+ ACCResult VisitUnaryExtension(UnaryOperator *e) {
+ return Visit(e->getSubExpr());
+ }
+
+ /// Ignore the LHS of a comma operator.
+ ACCResult VisitBinComma(BinaryOperator *e) {
+ return Visit(e->getRHS());
+ }
+
+ /// Conditional operators are okay if both sides are okay.
+ ACCResult VisitConditionalOperator(ConditionalOperator *e) {
+ ACCResult left = Visit(e->getTrueExpr());
+ if (left == ACC_invalid) return ACC_invalid;
+ return merge(left, Visit(e->getFalseExpr()));
+ }
+
+ /// Look through pseudo-objects.
+ ACCResult VisitPseudoObjectExpr(PseudoObjectExpr *e) {
+ // If we're getting here, we should always have a result.
+ return Visit(e->getResultExpr());
+ }
+
+ /// Statement expressions are okay if their result expression is okay.
+ ACCResult VisitStmtExpr(StmtExpr *e) {
+ return Visit(e->getSubStmt()->body_back());
+ }
+
+ /// Some declaration references are okay.
+ ACCResult VisitDeclRefExpr(DeclRefExpr *e) {
+ VarDecl *var = dyn_cast<VarDecl>(e->getDecl());
+ // References to global constants are okay.
+ if (isAnyRetainable(TargetClass) &&
+ isAnyRetainable(SourceClass) &&
+ var &&
+ var->getStorageClass() == SC_Extern &&
+ var->getType().isConstQualified()) {
+
+ // In system headers, they can also be assumed to be immune to retains.
+ // These are things like 'kCFStringTransformToLatin'.
+ if (Context.getSourceManager().isInSystemHeader(var->getLocation()))
+ return ACC_bottom;
+
+ return ACC_plusZero;
+ }
+
+ // Nothing else.
+ return ACC_invalid;
+ }
+
+ /// Some calls are okay.
+ ACCResult VisitCallExpr(CallExpr *e) {
+ if (FunctionDecl *fn = e->getDirectCallee())
+ if (ACCResult result = checkCallToFunction(fn))
+ return result;
+
+ return super::VisitCallExpr(e);
+ }
+
+ ACCResult checkCallToFunction(FunctionDecl *fn) {
+ // Require a CF*Ref return type.
+ if (!isCFType(fn->getReturnType()))
+ return ACC_invalid;
+
+ if (!isAnyRetainable(TargetClass))
+ return ACC_invalid;
+
+ // Honor an explicit 'not retained' attribute.
+ if (fn->hasAttr<CFReturnsNotRetainedAttr>())
+ return ACC_plusZero;
+
+ // Honor an explicit 'retained' attribute, except that for
+ // now we're not going to permit implicit handling of +1 results,
+ // because it's a bit frightening.
+ if (fn->hasAttr<CFReturnsRetainedAttr>())
+ return Diagnose ? ACC_plusOne
+ : ACC_invalid; // ACC_plusOne if we start accepting this
+
+ // Recognize this specific builtin function, which is used by CFSTR.
+ unsigned builtinID = fn->getBuiltinID();
+ if (builtinID == Builtin::BI__builtin___CFStringMakeConstantString)
+ return ACC_bottom;
+
+ // Otherwise, don't do anything implicit with an unaudited function.
+ if (!fn->hasAttr<CFAuditedTransferAttr>())
+ return ACC_invalid;
+
+ // Otherwise, it's +0 unless it follows the create convention.
+ if (ento::coreFoundation::followsCreateRule(fn))
+ return Diagnose ? ACC_plusOne
+ : ACC_invalid; // ACC_plusOne if we start accepting this
+
+ return ACC_plusZero;
+ }
+
+ ACCResult VisitObjCMessageExpr(ObjCMessageExpr *e) {
+ return checkCallToMethod(e->getMethodDecl());
+ }
+
+ ACCResult VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *e) {
+ ObjCMethodDecl *method;
+ if (e->isExplicitProperty())
+ method = e->getExplicitProperty()->getGetterMethodDecl();
+ else
+ method = e->getImplicitPropertyGetter();
+ return checkCallToMethod(method);
+ }
+
+ ACCResult checkCallToMethod(ObjCMethodDecl *method) {
+ if (!method) return ACC_invalid;
+
+ // Check for message sends to functions returning CF types. We
+ // just obey the Cocoa conventions with these, even though the
+ // return type is CF.
+ if (!isAnyRetainable(TargetClass) || !isCFType(method->getReturnType()))
+ return ACC_invalid;
+
+ // If the method is explicitly marked not-retained, it's +0.
+ if (method->hasAttr<CFReturnsNotRetainedAttr>())
+ return ACC_plusZero;
+
+ // If the method is explicitly marked as returning retained, or its
+ // selector follows a +1 Cocoa convention, treat it as +1.
+ if (method->hasAttr<CFReturnsRetainedAttr>())
+ return ACC_plusOne;
+
+ switch (method->getSelector().getMethodFamily()) {
+ case OMF_alloc:
+ case OMF_copy:
+ case OMF_mutableCopy:
+ case OMF_new:
+ return ACC_plusOne;
+
+ default:
+ // Otherwise, treat it as +0.
+ return ACC_plusZero;
+ }
+ }
+ };
+}
+
+bool Sema::isKnownName(StringRef name) {
+ if (name.empty())
+ return false;
+ LookupResult R(*this, &Context.Idents.get(name), SourceLocation(),
+ Sema::LookupOrdinaryName);
+ return LookupName(R, TUScope, false);
+}
+
+static void addFixitForObjCARCConversion(Sema &S,
+ DiagnosticBuilder &DiagB,
+ Sema::CheckedConversionKind CCK,
+ SourceLocation afterLParen,
+ QualType castType,
+ Expr *castExpr,
+ Expr *realCast,
+ const char *bridgeKeyword,
+ const char *CFBridgeName) {
+ // We handle C-style and implicit casts here.
+ switch (CCK) {
+ case Sema::CCK_ImplicitConversion:
+ case Sema::CCK_CStyleCast:
+ case Sema::CCK_OtherCast:
+ break;
+ case Sema::CCK_FunctionalCast:
+ return;
+ }
+
+ if (CFBridgeName) {
+ if (CCK == Sema::CCK_OtherCast) {
+ if (const CXXNamedCastExpr *NCE = dyn_cast<CXXNamedCastExpr>(realCast)) {
+ SourceRange range(NCE->getOperatorLoc(),
+ NCE->getAngleBrackets().getEnd());
+ SmallString<32> BridgeCall;
+
+ SourceManager &SM = S.getSourceManager();
+ char PrevChar = *SM.getCharacterData(range.getBegin().getLocWithOffset(-1));
+ if (Lexer::isIdentifierBodyChar(PrevChar, S.getLangOpts()))
+ BridgeCall += ' ';
+
+ BridgeCall += CFBridgeName;
+ DiagB.AddFixItHint(FixItHint::CreateReplacement(range, BridgeCall));
+ }
+ return;
+ }
+ Expr *castedE = castExpr;
+ if (CStyleCastExpr *CCE = dyn_cast<CStyleCastExpr>(castedE))
+ castedE = CCE->getSubExpr();
+ castedE = castedE->IgnoreImpCasts();
+ SourceRange range = castedE->getSourceRange();
+
+ SmallString<32> BridgeCall;
+
+ SourceManager &SM = S.getSourceManager();
+ char PrevChar = *SM.getCharacterData(range.getBegin().getLocWithOffset(-1));
+ if (Lexer::isIdentifierBodyChar(PrevChar, S.getLangOpts()))
+ BridgeCall += ' ';
+
+ BridgeCall += CFBridgeName;
+
+ if (isa<ParenExpr>(castedE)) {
+ DiagB.AddFixItHint(FixItHint::CreateInsertion(range.getBegin(),
+ BridgeCall));
+ } else {
+ BridgeCall += '(';
+ DiagB.AddFixItHint(FixItHint::CreateInsertion(range.getBegin(),
+ BridgeCall));
+ DiagB.AddFixItHint(FixItHint::CreateInsertion(
+ S.getLocForEndOfToken(range.getEnd()),
+ ")"));
+ }
+ return;
+ }
+
+ if (CCK == Sema::CCK_CStyleCast) {
+ DiagB.AddFixItHint(FixItHint::CreateInsertion(afterLParen, bridgeKeyword));
+ } else if (CCK == Sema::CCK_OtherCast) {
+ if (const CXXNamedCastExpr *NCE = dyn_cast<CXXNamedCastExpr>(realCast)) {
+ std::string castCode = "(";
+ castCode += bridgeKeyword;
+ castCode += castType.getAsString();
+ castCode += ")";
+ SourceRange Range(NCE->getOperatorLoc(),
+ NCE->getAngleBrackets().getEnd());
+ DiagB.AddFixItHint(FixItHint::CreateReplacement(Range, castCode));
+ }
+ } else {
+ std::string castCode = "(";
+ castCode += bridgeKeyword;
+ castCode += castType.getAsString();
+ castCode += ")";
+ Expr *castedE = castExpr->IgnoreImpCasts();
+ SourceRange range = castedE->getSourceRange();
+ if (isa<ParenExpr>(castedE)) {
+ DiagB.AddFixItHint(FixItHint::CreateInsertion(range.getBegin(),
+ castCode));
+ } else {
+ castCode += "(";
+ DiagB.AddFixItHint(FixItHint::CreateInsertion(range.getBegin(),
+ castCode));
+ DiagB.AddFixItHint(FixItHint::CreateInsertion(
+ S.getLocForEndOfToken(range.getEnd()),
+ ")"));
+ }
+ }
+}
+
+template <typename T>
+static inline T *getObjCBridgeAttr(const TypedefType *TD) {
+ TypedefNameDecl *TDNDecl = TD->getDecl();
+ QualType QT = TDNDecl->getUnderlyingType();
+ if (QT->isPointerType()) {
+ QT = QT->getPointeeType();
+ if (const RecordType *RT = QT->getAs<RecordType>())
+ if (RecordDecl *RD = RT->getDecl()->getMostRecentDecl())
+ return RD->getAttr<T>();
+ }
+ return nullptr;
+}
+
+static ObjCBridgeRelatedAttr *ObjCBridgeRelatedAttrFromType(QualType T,
+ TypedefNameDecl *&TDNDecl) {
+ while (const TypedefType *TD = dyn_cast<TypedefType>(T.getTypePtr())) {
+ TDNDecl = TD->getDecl();
+ if (ObjCBridgeRelatedAttr *ObjCBAttr =
+ getObjCBridgeAttr<ObjCBridgeRelatedAttr>(TD))
+ return ObjCBAttr;
+ T = TDNDecl->getUnderlyingType();
+ }
+ return nullptr;
+}
+
+static void
+diagnoseObjCARCConversion(Sema &S, SourceRange castRange,
+ QualType castType, ARCConversionTypeClass castACTC,
+ Expr *castExpr, Expr *realCast,
+ ARCConversionTypeClass exprACTC,
+ Sema::CheckedConversionKind CCK) {
+ SourceLocation loc =
+ (castRange.isValid() ? castRange.getBegin() : castExpr->getExprLoc());
+
+ if (S.makeUnavailableInSystemHeader(loc,
+ UnavailableAttr::IR_ARCForbiddenConversion))
+ return;
+
+ QualType castExprType = castExpr->getType();
+ TypedefNameDecl *TDNDecl = nullptr;
+ if ((castACTC == ACTC_coreFoundation && exprACTC == ACTC_retainable &&
+ ObjCBridgeRelatedAttrFromType(castType, TDNDecl)) ||
+ (exprACTC == ACTC_coreFoundation && castACTC == ACTC_retainable &&
+ ObjCBridgeRelatedAttrFromType(castExprType, TDNDecl)))
+ return;
+
+ unsigned srcKind = 0;
+ switch (exprACTC) {
+ case ACTC_none:
+ case ACTC_coreFoundation:
+ case ACTC_voidPtr:
+ srcKind = (castExprType->isPointerType() ? 1 : 0);
+ break;
+ case ACTC_retainable:
+ srcKind = (castExprType->isBlockPointerType() ? 2 : 3);
+ break;
+ case ACTC_indirectRetainable:
+ srcKind = 4;
+ break;
+ }
+
+ // Check whether this could be fixed with a bridge cast.
+ SourceLocation afterLParen = S.getLocForEndOfToken(castRange.getBegin());
+ SourceLocation noteLoc = afterLParen.isValid() ? afterLParen : loc;
+
+ // Bridge from an ARC type to a CF type.
+ if (castACTC == ACTC_retainable && isAnyRetainable(exprACTC)) {
+
+ S.Diag(loc, diag::err_arc_cast_requires_bridge)
+ << unsigned(CCK == Sema::CCK_ImplicitConversion) // cast|implicit
+ << 2 // of C pointer type
+ << castExprType
+ << unsigned(castType->isBlockPointerType()) // to ObjC|block type
+ << castType
+ << castRange
+ << castExpr->getSourceRange();
+ bool br = S.isKnownName("CFBridgingRelease");
+ ACCResult CreateRule =
+ ARCCastChecker(S.Context, exprACTC, castACTC, true).Visit(castExpr);
+ assert(CreateRule != ACC_bottom && "This cast should already be accepted.");
+ if (CreateRule != ACC_plusOne)
+ {
+ DiagnosticBuilder DiagB =
+ (CCK != Sema::CCK_OtherCast) ? S.Diag(noteLoc, diag::note_arc_bridge)
+ : S.Diag(noteLoc, diag::note_arc_cstyle_bridge);
+
+ addFixitForObjCARCConversion(S, DiagB, CCK, afterLParen,
+ castType, castExpr, realCast, "__bridge ",
+ nullptr);
+ }
+ if (CreateRule != ACC_plusZero)
+ {
+ DiagnosticBuilder DiagB =
+ (CCK == Sema::CCK_OtherCast && !br) ?
+ S.Diag(noteLoc, diag::note_arc_cstyle_bridge_transfer) << castExprType :
+ S.Diag(br ? castExpr->getExprLoc() : noteLoc,
+ diag::note_arc_bridge_transfer)
+ << castExprType << br;
+
+ addFixitForObjCARCConversion(S, DiagB, CCK, afterLParen,
+ castType, castExpr, realCast, "__bridge_transfer ",
+ br ? "CFBridgingRelease" : nullptr);
+ }
+
+ return;
+ }
+
+ // Bridge from a CF type to an ARC type.
+ if (exprACTC == ACTC_retainable && isAnyRetainable(castACTC)) {
+ bool br = S.isKnownName("CFBridgingRetain");
+ S.Diag(loc, diag::err_arc_cast_requires_bridge)
+ << unsigned(CCK == Sema::CCK_ImplicitConversion) // cast|implicit
+ << unsigned(castExprType->isBlockPointerType()) // of ObjC|block type
+ << castExprType
+ << 2 // to C pointer type
+ << castType
+ << castRange
+ << castExpr->getSourceRange();
+ ACCResult CreateRule =
+ ARCCastChecker(S.Context, exprACTC, castACTC, true).Visit(castExpr);
+ assert(CreateRule != ACC_bottom && "This cast should already be accepted.");
+ if (CreateRule != ACC_plusOne)
+ {
+ DiagnosticBuilder DiagB =
+ (CCK != Sema::CCK_OtherCast) ? S.Diag(noteLoc, diag::note_arc_bridge)
+ : S.Diag(noteLoc, diag::note_arc_cstyle_bridge);
+ addFixitForObjCARCConversion(S, DiagB, CCK, afterLParen,
+ castType, castExpr, realCast, "__bridge ",
+ nullptr);
+ }
+ if (CreateRule != ACC_plusZero)
+ {
+ DiagnosticBuilder DiagB =
+ (CCK == Sema::CCK_OtherCast && !br) ?
+ S.Diag(noteLoc, diag::note_arc_cstyle_bridge_retained) << castType :
+ S.Diag(br ? castExpr->getExprLoc() : noteLoc,
+ diag::note_arc_bridge_retained)
+ << castType << br;
+
+ addFixitForObjCARCConversion(S, DiagB, CCK, afterLParen,
+ castType, castExpr, realCast, "__bridge_retained ",
+ br ? "CFBridgingRetain" : nullptr);
+ }
+
+ return;
+ }
+
+ S.Diag(loc, diag::err_arc_mismatched_cast)
+ << (CCK != Sema::CCK_ImplicitConversion)
+ << srcKind << castExprType << castType
+ << castRange << castExpr->getSourceRange();
+}
+
+template <typename TB>
+static bool CheckObjCBridgeNSCast(Sema &S, QualType castType, Expr *castExpr,
+ bool &HadTheAttribute, bool warn) {
+ QualType T = castExpr->getType();
+ HadTheAttribute = false;
+ while (const TypedefType *TD = dyn_cast<TypedefType>(T.getTypePtr())) {
+ TypedefNameDecl *TDNDecl = TD->getDecl();
+ if (TB *ObjCBAttr = getObjCBridgeAttr<TB>(TD)) {
+ if (IdentifierInfo *Parm = ObjCBAttr->getBridgedType()) {
+ HadTheAttribute = true;
+ if (Parm->isStr("id"))
+ return true;
+
+ NamedDecl *Target = nullptr;
+ // Check for an existing type with this name.
+ LookupResult R(S, DeclarationName(Parm), SourceLocation(),
+ Sema::LookupOrdinaryName);
+ if (S.LookupName(R, S.TUScope)) {
+ Target = R.getFoundDecl();
+ if (Target && isa<ObjCInterfaceDecl>(Target)) {
+ ObjCInterfaceDecl *ExprClass = cast<ObjCInterfaceDecl>(Target);
+ if (const ObjCObjectPointerType *InterfacePointerType =
+ castType->getAsObjCInterfacePointerType()) {
+ ObjCInterfaceDecl *CastClass
+ = InterfacePointerType->getObjectType()->getInterface();
+ if ((CastClass == ExprClass) ||
+ (CastClass && CastClass->isSuperClassOf(ExprClass)))
+ return true;
+ if (warn)
+ S.Diag(castExpr->getLocStart(), diag::warn_objc_invalid_bridge)
+ << T << Target->getName() << castType->getPointeeType();
+ return false;
+ } else if (castType->isObjCIdType() ||
+ (S.Context.ObjCObjectAdoptsQTypeProtocols(
+ castType, ExprClass)))
+ // ok to cast to 'id'.
+ // casting to id<p-list> is ok if bridge type adopts all of
+ // p-list protocols.
+ return true;
+ else {
+ if (warn) {
+ S.Diag(castExpr->getLocStart(), diag::warn_objc_invalid_bridge)
+ << T << Target->getName() << castType;
+ S.Diag(TDNDecl->getLocStart(), diag::note_declared_at);
+ S.Diag(Target->getLocStart(), diag::note_declared_at);
+ }
+ return false;
+ }
+ }
+ } else if (!castType->isObjCIdType()) {
+ S.Diag(castExpr->getLocStart(), diag::err_objc_cf_bridged_not_interface)
+ << castExpr->getType() << Parm;
+ S.Diag(TDNDecl->getLocStart(), diag::note_declared_at);
+ if (Target)
+ S.Diag(Target->getLocStart(), diag::note_declared_at);
+ }
+ return true;
+ }
+ return false;
+ }
+ T = TDNDecl->getUnderlyingType();
+ }
+ return true;
+}
+
+template <typename TB>
+static bool CheckObjCBridgeCFCast(Sema &S, QualType castType, Expr *castExpr,
+ bool &HadTheAttribute, bool warn) {
+ QualType T = castType;
+ HadTheAttribute = false;
+ while (const TypedefType *TD = dyn_cast<TypedefType>(T.getTypePtr())) {
+ TypedefNameDecl *TDNDecl = TD->getDecl();
+ if (TB *ObjCBAttr = getObjCBridgeAttr<TB>(TD)) {
+ if (IdentifierInfo *Parm = ObjCBAttr->getBridgedType()) {
+ HadTheAttribute = true;
+ if (Parm->isStr("id"))
+ return true;
+
+ NamedDecl *Target = nullptr;
+ // Check for an existing type with this name.
+ LookupResult R(S, DeclarationName(Parm), SourceLocation(),
+ Sema::LookupOrdinaryName);
+ if (S.LookupName(R, S.TUScope)) {
+ Target = R.getFoundDecl();
+ if (Target && isa<ObjCInterfaceDecl>(Target)) {
+ ObjCInterfaceDecl *CastClass = cast<ObjCInterfaceDecl>(Target);
+ if (const ObjCObjectPointerType *InterfacePointerType =
+ castExpr->getType()->getAsObjCInterfacePointerType()) {
+ ObjCInterfaceDecl *ExprClass
+ = InterfacePointerType->getObjectType()->getInterface();
+ if ((CastClass == ExprClass) ||
+ (ExprClass && CastClass->isSuperClassOf(ExprClass)))
+ return true;
+ if (warn) {
+ S.Diag(castExpr->getLocStart(), diag::warn_objc_invalid_bridge_to_cf)
+ << castExpr->getType()->getPointeeType() << T;
+ S.Diag(TDNDecl->getLocStart(), diag::note_declared_at);
+ }
+ return false;
+ } else if (castExpr->getType()->isObjCIdType() ||
+ (S.Context.QIdProtocolsAdoptObjCObjectProtocols(
+ castExpr->getType(), CastClass)))
+ // ok to cast an 'id' expression to a CFtype.
+ // ok to cast an 'id<plist>' expression to CFtype provided plist
+ // adopts all of CFtype's ObjetiveC's class plist.
+ return true;
+ else {
+ if (warn) {
+ S.Diag(castExpr->getLocStart(), diag::warn_objc_invalid_bridge_to_cf)
+ << castExpr->getType() << castType;
+ S.Diag(TDNDecl->getLocStart(), diag::note_declared_at);
+ S.Diag(Target->getLocStart(), diag::note_declared_at);
+ }
+ return false;
+ }
+ }
+ }
+ S.Diag(castExpr->getLocStart(), diag::err_objc_ns_bridged_invalid_cfobject)
+ << castExpr->getType() << castType;
+ S.Diag(TDNDecl->getLocStart(), diag::note_declared_at);
+ if (Target)
+ S.Diag(Target->getLocStart(), diag::note_declared_at);
+ return true;
+ }
+ return false;
+ }
+ T = TDNDecl->getUnderlyingType();
+ }
+ return true;
+}
+
+void Sema::CheckTollFreeBridgeCast(QualType castType, Expr *castExpr) {
+ if (!getLangOpts().ObjC1)
+ return;
+ // warn in presence of __bridge casting to or from a toll free bridge cast.
+ ARCConversionTypeClass exprACTC = classifyTypeForARCConversion(castExpr->getType());
+ ARCConversionTypeClass castACTC = classifyTypeForARCConversion(castType);
+ if (castACTC == ACTC_retainable && exprACTC == ACTC_coreFoundation) {
+ bool HasObjCBridgeAttr;
+ bool ObjCBridgeAttrWillNotWarn =
+ CheckObjCBridgeNSCast<ObjCBridgeAttr>(*this, castType, castExpr, HasObjCBridgeAttr,
+ false);
+ if (ObjCBridgeAttrWillNotWarn && HasObjCBridgeAttr)
+ return;
+ bool HasObjCBridgeMutableAttr;
+ bool ObjCBridgeMutableAttrWillNotWarn =
+ CheckObjCBridgeNSCast<ObjCBridgeMutableAttr>(*this, castType, castExpr,
+ HasObjCBridgeMutableAttr, false);
+ if (ObjCBridgeMutableAttrWillNotWarn && HasObjCBridgeMutableAttr)
+ return;
+
+ if (HasObjCBridgeAttr)
+ CheckObjCBridgeNSCast<ObjCBridgeAttr>(*this, castType, castExpr, HasObjCBridgeAttr,
+ true);
+ else if (HasObjCBridgeMutableAttr)
+ CheckObjCBridgeNSCast<ObjCBridgeMutableAttr>(*this, castType, castExpr,
+ HasObjCBridgeMutableAttr, true);
+ }
+ else if (castACTC == ACTC_coreFoundation && exprACTC == ACTC_retainable) {
+ bool HasObjCBridgeAttr;
+ bool ObjCBridgeAttrWillNotWarn =
+ CheckObjCBridgeCFCast<ObjCBridgeAttr>(*this, castType, castExpr, HasObjCBridgeAttr,
+ false);
+ if (ObjCBridgeAttrWillNotWarn && HasObjCBridgeAttr)
+ return;
+ bool HasObjCBridgeMutableAttr;
+ bool ObjCBridgeMutableAttrWillNotWarn =
+ CheckObjCBridgeCFCast<ObjCBridgeMutableAttr>(*this, castType, castExpr,
+ HasObjCBridgeMutableAttr, false);
+ if (ObjCBridgeMutableAttrWillNotWarn && HasObjCBridgeMutableAttr)
+ return;
+
+ if (HasObjCBridgeAttr)
+ CheckObjCBridgeCFCast<ObjCBridgeAttr>(*this, castType, castExpr, HasObjCBridgeAttr,
+ true);
+ else if (HasObjCBridgeMutableAttr)
+ CheckObjCBridgeCFCast<ObjCBridgeMutableAttr>(*this, castType, castExpr,
+ HasObjCBridgeMutableAttr, true);
+ }
+}
+
+void Sema::CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr) {
+ QualType SrcType = castExpr->getType();
+ if (ObjCPropertyRefExpr *PRE = dyn_cast<ObjCPropertyRefExpr>(castExpr)) {
+ if (PRE->isExplicitProperty()) {
+ if (ObjCPropertyDecl *PDecl = PRE->getExplicitProperty())
+ SrcType = PDecl->getType();
+ }
+ else if (PRE->isImplicitProperty()) {
+ if (ObjCMethodDecl *Getter = PRE->getImplicitPropertyGetter())
+ SrcType = Getter->getReturnType();
+
+ }
+ }
+
+ ARCConversionTypeClass srcExprACTC = classifyTypeForARCConversion(SrcType);
+ ARCConversionTypeClass castExprACTC = classifyTypeForARCConversion(castType);
+ if (srcExprACTC != ACTC_retainable || castExprACTC != ACTC_coreFoundation)
+ return;
+ CheckObjCBridgeRelatedConversions(castExpr->getLocStart(),
+ castType, SrcType, castExpr);
+ return;
+}
+
+bool Sema::CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
+ CastKind &Kind) {
+ if (!getLangOpts().ObjC1)
+ return false;
+ ARCConversionTypeClass exprACTC =
+ classifyTypeForARCConversion(castExpr->getType());
+ ARCConversionTypeClass castACTC = classifyTypeForARCConversion(castType);
+ if ((castACTC == ACTC_retainable && exprACTC == ACTC_coreFoundation) ||
+ (castACTC == ACTC_coreFoundation && exprACTC == ACTC_retainable)) {
+ CheckTollFreeBridgeCast(castType, castExpr);
+ Kind = (castACTC == ACTC_coreFoundation) ? CK_BitCast
+ : CK_CPointerToObjCPointerCast;
+ return true;
+ }
+ return false;
+}
+
+bool Sema::checkObjCBridgeRelatedComponents(SourceLocation Loc,
+ QualType DestType, QualType SrcType,
+ ObjCInterfaceDecl *&RelatedClass,
+ ObjCMethodDecl *&ClassMethod,
+ ObjCMethodDecl *&InstanceMethod,
+ TypedefNameDecl *&TDNDecl,
+ bool CfToNs) {
+ QualType T = CfToNs ? SrcType : DestType;
+ ObjCBridgeRelatedAttr *ObjCBAttr = ObjCBridgeRelatedAttrFromType(T, TDNDecl);
+ if (!ObjCBAttr)
+ return false;
+
+ IdentifierInfo *RCId = ObjCBAttr->getRelatedClass();
+ IdentifierInfo *CMId = ObjCBAttr->getClassMethod();
+ IdentifierInfo *IMId = ObjCBAttr->getInstanceMethod();
+ if (!RCId)
+ return false;
+ NamedDecl *Target = nullptr;
+ // Check for an existing type with this name.
+ LookupResult R(*this, DeclarationName(RCId), SourceLocation(),
+ Sema::LookupOrdinaryName);
+ if (!LookupName(R, TUScope)) {
+ Diag(Loc, diag::err_objc_bridged_related_invalid_class) << RCId
+ << SrcType << DestType;
+ Diag(TDNDecl->getLocStart(), diag::note_declared_at);
+ return false;
+ }
+ Target = R.getFoundDecl();
+ if (Target && isa<ObjCInterfaceDecl>(Target))
+ RelatedClass = cast<ObjCInterfaceDecl>(Target);
+ else {
+ Diag(Loc, diag::err_objc_bridged_related_invalid_class_name) << RCId
+ << SrcType << DestType;
+ Diag(TDNDecl->getLocStart(), diag::note_declared_at);
+ if (Target)
+ Diag(Target->getLocStart(), diag::note_declared_at);
+ return false;
+ }
+
+ // Check for an existing class method with the given selector name.
+ if (CfToNs && CMId) {
+ Selector Sel = Context.Selectors.getUnarySelector(CMId);
+ ClassMethod = RelatedClass->lookupMethod(Sel, false);
+ if (!ClassMethod) {
+ Diag(Loc, diag::err_objc_bridged_related_known_method)
+ << SrcType << DestType << Sel << false;
+ Diag(TDNDecl->getLocStart(), diag::note_declared_at);
+ return false;
+ }
+ }
+
+ // Check for an existing instance method with the given selector name.
+ if (!CfToNs && IMId) {
+ Selector Sel = Context.Selectors.getNullarySelector(IMId);
+ InstanceMethod = RelatedClass->lookupMethod(Sel, true);
+ if (!InstanceMethod) {
+ Diag(Loc, diag::err_objc_bridged_related_known_method)
+ << SrcType << DestType << Sel << true;
+ Diag(TDNDecl->getLocStart(), diag::note_declared_at);
+ return false;
+ }
+ }
+ return true;
+}
+
+bool
+Sema::CheckObjCBridgeRelatedConversions(SourceLocation Loc,
+ QualType DestType, QualType SrcType,
+ Expr *&SrcExpr) {
+ ARCConversionTypeClass rhsExprACTC = classifyTypeForARCConversion(SrcType);
+ ARCConversionTypeClass lhsExprACTC = classifyTypeForARCConversion(DestType);
+ bool CfToNs = (rhsExprACTC == ACTC_coreFoundation && lhsExprACTC == ACTC_retainable);
+ bool NsToCf = (rhsExprACTC == ACTC_retainable && lhsExprACTC == ACTC_coreFoundation);
+ if (!CfToNs && !NsToCf)
+ return false;
+
+ ObjCInterfaceDecl *RelatedClass;
+ ObjCMethodDecl *ClassMethod = nullptr;
+ ObjCMethodDecl *InstanceMethod = nullptr;
+ TypedefNameDecl *TDNDecl = nullptr;
+ if (!checkObjCBridgeRelatedComponents(Loc, DestType, SrcType, RelatedClass,
+ ClassMethod, InstanceMethod, TDNDecl, CfToNs))
+ return false;
+
+ if (CfToNs) {
+ // Implicit conversion from CF to ObjC object is needed.
+ if (ClassMethod) {
+ std::string ExpressionString = "[";
+ ExpressionString += RelatedClass->getNameAsString();
+ ExpressionString += " ";
+ ExpressionString += ClassMethod->getSelector().getAsString();
+ SourceLocation SrcExprEndLoc = getLocForEndOfToken(SrcExpr->getLocEnd());
+ // Provide a fixit: [RelatedClass ClassMethod SrcExpr]
+ Diag(Loc, diag::err_objc_bridged_related_known_method)
+ << SrcType << DestType << ClassMethod->getSelector() << false
+ << FixItHint::CreateInsertion(SrcExpr->getLocStart(), ExpressionString)
+ << FixItHint::CreateInsertion(SrcExprEndLoc, "]");
+ Diag(RelatedClass->getLocStart(), diag::note_declared_at);
+ Diag(TDNDecl->getLocStart(), diag::note_declared_at);
+
+ QualType receiverType =
+ Context.getObjCInterfaceType(RelatedClass);
+ // Argument.
+ Expr *args[] = { SrcExpr };
+ ExprResult msg = BuildClassMessageImplicit(receiverType, false,
+ ClassMethod->getLocation(),
+ ClassMethod->getSelector(), ClassMethod,
+ MultiExprArg(args, 1));
+ SrcExpr = msg.get();
+ return true;
+ }
+ }
+ else {
+ // Implicit conversion from ObjC type to CF object is needed.
+ if (InstanceMethod) {
+ std::string ExpressionString;
+ SourceLocation SrcExprEndLoc = getLocForEndOfToken(SrcExpr->getLocEnd());
+ if (InstanceMethod->isPropertyAccessor())
+ if (const ObjCPropertyDecl *PDecl = InstanceMethod->findPropertyDecl()) {
+ // fixit: ObjectExpr.propertyname when it is aproperty accessor.
+ ExpressionString = ".";
+ ExpressionString += PDecl->getNameAsString();
+ Diag(Loc, diag::err_objc_bridged_related_known_method)
+ << SrcType << DestType << InstanceMethod->getSelector() << true
+ << FixItHint::CreateInsertion(SrcExprEndLoc, ExpressionString);
+ }
+ if (ExpressionString.empty()) {
+ // Provide a fixit: [ObjectExpr InstanceMethod]
+ ExpressionString = " ";
+ ExpressionString += InstanceMethod->getSelector().getAsString();
+ ExpressionString += "]";
+
+ Diag(Loc, diag::err_objc_bridged_related_known_method)
+ << SrcType << DestType << InstanceMethod->getSelector() << true
+ << FixItHint::CreateInsertion(SrcExpr->getLocStart(), "[")
+ << FixItHint::CreateInsertion(SrcExprEndLoc, ExpressionString);
+ }
+ Diag(RelatedClass->getLocStart(), diag::note_declared_at);
+ Diag(TDNDecl->getLocStart(), diag::note_declared_at);
+
+ ExprResult msg =
+ BuildInstanceMessageImplicit(SrcExpr, SrcType,
+ InstanceMethod->getLocation(),
+ InstanceMethod->getSelector(),
+ InstanceMethod, None);
+ SrcExpr = msg.get();
+ return true;
+ }
+ }
+ return false;
+}
+
+Sema::ARCConversionResult
+Sema::CheckObjCARCConversion(SourceRange castRange, QualType castType,
+ Expr *&castExpr, CheckedConversionKind CCK,
+ bool DiagnoseCFAudited,
+ BinaryOperatorKind Opc) {
+ QualType castExprType = castExpr->getType();
+
+ // For the purposes of the classification, we assume reference types
+ // will bind to temporaries.
+ QualType effCastType = castType;
+ if (const ReferenceType *ref = castType->getAs<ReferenceType>())
+ effCastType = ref->getPointeeType();
+
+ ARCConversionTypeClass exprACTC = classifyTypeForARCConversion(castExprType);
+ ARCConversionTypeClass castACTC = classifyTypeForARCConversion(effCastType);
+ if (exprACTC == castACTC) {
+ // check for viablity and report error if casting an rvalue to a
+ // life-time qualifier.
+ if ((castACTC == ACTC_retainable) &&
+ (CCK == CCK_CStyleCast || CCK == CCK_OtherCast) &&
+ (castType != castExprType)) {
+ const Type *DT = castType.getTypePtr();
+ QualType QDT = castType;
+ // We desugar some types but not others. We ignore those
+ // that cannot happen in a cast; i.e. auto, and those which
+ // should not be de-sugared; i.e typedef.
+ if (const ParenType *PT = dyn_cast<ParenType>(DT))
+ QDT = PT->desugar();
+ else if (const TypeOfType *TP = dyn_cast<TypeOfType>(DT))
+ QDT = TP->desugar();
+ else if (const AttributedType *AT = dyn_cast<AttributedType>(DT))
+ QDT = AT->desugar();
+ if (QDT != castType &&
+ QDT.getObjCLifetime() != Qualifiers::OCL_None) {
+ SourceLocation loc =
+ (castRange.isValid() ? castRange.getBegin()
+ : castExpr->getExprLoc());
+ Diag(loc, diag::err_arc_nolifetime_behavior);
+ }
+ }
+ return ACR_okay;
+ }
+
+ if (isAnyCLike(exprACTC) && isAnyCLike(castACTC)) return ACR_okay;
+
+ // Allow all of these types to be cast to integer types (but not
+ // vice-versa).
+ if (castACTC == ACTC_none && castType->isIntegralType(Context))
+ return ACR_okay;
+
+ // Allow casts between pointers to lifetime types (e.g., __strong id*)
+ // and pointers to void (e.g., cv void *). Casting from void* to lifetime*
+ // must be explicit.
+ if (exprACTC == ACTC_indirectRetainable && castACTC == ACTC_voidPtr)
+ return ACR_okay;
+ if (castACTC == ACTC_indirectRetainable && exprACTC == ACTC_voidPtr &&
+ CCK != CCK_ImplicitConversion)
+ return ACR_okay;
+
+ switch (ARCCastChecker(Context, exprACTC, castACTC, false).Visit(castExpr)) {
+ // For invalid casts, fall through.
+ case ACC_invalid:
+ break;
+
+ // Do nothing for both bottom and +0.
+ case ACC_bottom:
+ case ACC_plusZero:
+ return ACR_okay;
+
+ // If the result is +1, consume it here.
+ case ACC_plusOne:
+ castExpr = ImplicitCastExpr::Create(Context, castExpr->getType(),
+ CK_ARCConsumeObject, castExpr,
+ nullptr, VK_RValue);
+ ExprNeedsCleanups = true;
+ return ACR_okay;
+ }
+
+ // If this is a non-implicit cast from id or block type to a
+ // CoreFoundation type, delay complaining in case the cast is used
+ // in an acceptable context.
+ if (exprACTC == ACTC_retainable && isAnyRetainable(castACTC) &&
+ CCK != CCK_ImplicitConversion)
+ return ACR_unbridged;
+
+ // Do not issue bridge cast" diagnostic when implicit casting a cstring
+ // to 'NSString *'. Let caller issue a normal mismatched diagnostic with
+ // suitable fix-it.
+ if (castACTC == ACTC_retainable && exprACTC == ACTC_none &&
+ ConversionToObjCStringLiteralCheck(castType, castExpr))
+ return ACR_okay;
+
+ // Do not issue "bridge cast" diagnostic when implicit casting
+ // a retainable object to a CF type parameter belonging to an audited
+ // CF API function. Let caller issue a normal type mismatched diagnostic
+ // instead.
+ if (!DiagnoseCFAudited || exprACTC != ACTC_retainable ||
+ castACTC != ACTC_coreFoundation)
+ if (!(exprACTC == ACTC_voidPtr && castACTC == ACTC_retainable &&
+ (Opc == BO_NE || Opc == BO_EQ)))
+ diagnoseObjCARCConversion(*this, castRange, castType, castACTC,
+ castExpr, castExpr, exprACTC, CCK);
+ return ACR_okay;
+}
+
+/// Given that we saw an expression with the ARCUnbridgedCastTy
+/// placeholder type, complain bitterly.
+void Sema::diagnoseARCUnbridgedCast(Expr *e) {
+ // We expect the spurious ImplicitCastExpr to already have been stripped.
+ assert(!e->hasPlaceholderType(BuiltinType::ARCUnbridgedCast));
+ CastExpr *realCast = cast<CastExpr>(e->IgnoreParens());
+
+ SourceRange castRange;
+ QualType castType;
+ CheckedConversionKind CCK;
+
+ if (CStyleCastExpr *cast = dyn_cast<CStyleCastExpr>(realCast)) {
+ castRange = SourceRange(cast->getLParenLoc(), cast->getRParenLoc());
+ castType = cast->getTypeAsWritten();
+ CCK = CCK_CStyleCast;
+ } else if (ExplicitCastExpr *cast = dyn_cast<ExplicitCastExpr>(realCast)) {
+ castRange = cast->getTypeInfoAsWritten()->getTypeLoc().getSourceRange();
+ castType = cast->getTypeAsWritten();
+ CCK = CCK_OtherCast;
+ } else {
+ castType = cast->getType();
+ CCK = CCK_ImplicitConversion;
+ }
+
+ ARCConversionTypeClass castACTC =
+ classifyTypeForARCConversion(castType.getNonReferenceType());
+
+ Expr *castExpr = realCast->getSubExpr();
+ assert(classifyTypeForARCConversion(castExpr->getType()) == ACTC_retainable);
+
+ diagnoseObjCARCConversion(*this, castRange, castType, castACTC,
+ castExpr, realCast, ACTC_retainable, CCK);
+}
+
+/// stripARCUnbridgedCast - Given an expression of ARCUnbridgedCast
+/// type, remove the placeholder cast.
+Expr *Sema::stripARCUnbridgedCast(Expr *e) {
+ assert(e->hasPlaceholderType(BuiltinType::ARCUnbridgedCast));
+
+ if (ParenExpr *pe = dyn_cast<ParenExpr>(e)) {
+ Expr *sub = stripARCUnbridgedCast(pe->getSubExpr());
+ return new (Context) ParenExpr(pe->getLParen(), pe->getRParen(), sub);
+ } else if (UnaryOperator *uo = dyn_cast<UnaryOperator>(e)) {
+ assert(uo->getOpcode() == UO_Extension);
+ Expr *sub = stripARCUnbridgedCast(uo->getSubExpr());
+ return new (Context) UnaryOperator(sub, UO_Extension, sub->getType(),
+ sub->getValueKind(), sub->getObjectKind(),
+ uo->getOperatorLoc());
+ } else if (GenericSelectionExpr *gse = dyn_cast<GenericSelectionExpr>(e)) {
+ assert(!gse->isResultDependent());
+
+ unsigned n = gse->getNumAssocs();
+ SmallVector<Expr*, 4> subExprs(n);
+ SmallVector<TypeSourceInfo*, 4> subTypes(n);
+ for (unsigned i = 0; i != n; ++i) {
+ subTypes[i] = gse->getAssocTypeSourceInfo(i);
+ Expr *sub = gse->getAssocExpr(i);
+ if (i == gse->getResultIndex())
+ sub = stripARCUnbridgedCast(sub);
+ subExprs[i] = sub;
+ }
+
+ return new (Context) GenericSelectionExpr(Context, gse->getGenericLoc(),
+ gse->getControllingExpr(),
+ subTypes, subExprs,
+ gse->getDefaultLoc(),
+ gse->getRParenLoc(),
+ gse->containsUnexpandedParameterPack(),
+ gse->getResultIndex());
+ } else {
+ assert(isa<ImplicitCastExpr>(e) && "bad form of unbridged cast!");
+ return cast<ImplicitCastExpr>(e)->getSubExpr();
+ }
+}
+
+bool Sema::CheckObjCARCUnavailableWeakConversion(QualType castType,
+ QualType exprType) {
+ QualType canCastType =
+ Context.getCanonicalType(castType).getUnqualifiedType();
+ QualType canExprType =
+ Context.getCanonicalType(exprType).getUnqualifiedType();
+ if (isa<ObjCObjectPointerType>(canCastType) &&
+ castType.getObjCLifetime() == Qualifiers::OCL_Weak &&
+ canExprType->isObjCObjectPointerType()) {
+ if (const ObjCObjectPointerType *ObjT =
+ canExprType->getAs<ObjCObjectPointerType>())
+ if (const ObjCInterfaceDecl *ObjI = ObjT->getInterfaceDecl())
+ return !ObjI->isArcWeakrefUnavailable();
+ }
+ return true;
+}
+
+/// Look for an ObjCReclaimReturnedObject cast and destroy it.
+static Expr *maybeUndoReclaimObject(Expr *e) {
+ // For now, we just undo operands that are *immediately* reclaim
+ // expressions, which prevents the vast majority of potential
+ // problems here. To catch them all, we'd need to rebuild arbitrary
+ // value-propagating subexpressions --- we can't reliably rebuild
+ // in-place because of expression sharing.
+ if (ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(e))
+ if (ice->getCastKind() == CK_ARCReclaimReturnedObject)
+ return ice->getSubExpr();
+
+ return e;
+}
+
+ExprResult Sema::BuildObjCBridgedCast(SourceLocation LParenLoc,
+ ObjCBridgeCastKind Kind,
+ SourceLocation BridgeKeywordLoc,
+ TypeSourceInfo *TSInfo,
+ Expr *SubExpr) {
+ ExprResult SubResult = UsualUnaryConversions(SubExpr);
+ if (SubResult.isInvalid()) return ExprError();
+ SubExpr = SubResult.get();
+
+ QualType T = TSInfo->getType();
+ QualType FromType = SubExpr->getType();
+
+ CastKind CK;
+
+ bool MustConsume = false;
+ if (T->isDependentType() || SubExpr->isTypeDependent()) {
+ // Okay: we'll build a dependent expression type.
+ CK = CK_Dependent;
+ } else if (T->isObjCARCBridgableType() && FromType->isCARCBridgableType()) {
+ // Casting CF -> id
+ CK = (T->isBlockPointerType() ? CK_AnyPointerToBlockPointerCast
+ : CK_CPointerToObjCPointerCast);
+ switch (Kind) {
+ case OBC_Bridge:
+ break;
+
+ case OBC_BridgeRetained: {
+ bool br = isKnownName("CFBridgingRelease");
+ Diag(BridgeKeywordLoc, diag::err_arc_bridge_cast_wrong_kind)
+ << 2
+ << FromType
+ << (T->isBlockPointerType()? 1 : 0)
+ << T
+ << SubExpr->getSourceRange()
+ << Kind;
+ Diag(BridgeKeywordLoc, diag::note_arc_bridge)
+ << FixItHint::CreateReplacement(BridgeKeywordLoc, "__bridge");
+ Diag(BridgeKeywordLoc, diag::note_arc_bridge_transfer)
+ << FromType << br
+ << FixItHint::CreateReplacement(BridgeKeywordLoc,
+ br ? "CFBridgingRelease "
+ : "__bridge_transfer ");
+
+ Kind = OBC_Bridge;
+ break;
+ }
+
+ case OBC_BridgeTransfer:
+ // We must consume the Objective-C object produced by the cast.
+ MustConsume = true;
+ break;
+ }
+ } else if (T->isCARCBridgableType() && FromType->isObjCARCBridgableType()) {
+ // Okay: id -> CF
+ CK = CK_BitCast;
+ switch (Kind) {
+ case OBC_Bridge:
+ // Reclaiming a value that's going to be __bridge-casted to CF
+ // is very dangerous, so we don't do it.
+ SubExpr = maybeUndoReclaimObject(SubExpr);
+ break;
+
+ case OBC_BridgeRetained:
+ // Produce the object before casting it.
+ SubExpr = ImplicitCastExpr::Create(Context, FromType,
+ CK_ARCProduceObject,
+ SubExpr, nullptr, VK_RValue);
+ break;
+
+ case OBC_BridgeTransfer: {
+ bool br = isKnownName("CFBridgingRetain");
+ Diag(BridgeKeywordLoc, diag::err_arc_bridge_cast_wrong_kind)
+ << (FromType->isBlockPointerType()? 1 : 0)
+ << FromType
+ << 2
+ << T
+ << SubExpr->getSourceRange()
+ << Kind;
+
+ Diag(BridgeKeywordLoc, diag::note_arc_bridge)
+ << FixItHint::CreateReplacement(BridgeKeywordLoc, "__bridge ");
+ Diag(BridgeKeywordLoc, diag::note_arc_bridge_retained)
+ << T << br
+ << FixItHint::CreateReplacement(BridgeKeywordLoc,
+ br ? "CFBridgingRetain " : "__bridge_retained");
+
+ Kind = OBC_Bridge;
+ break;
+ }
+ }
+ } else {
+ Diag(LParenLoc, diag::err_arc_bridge_cast_incompatible)
+ << FromType << T << Kind
+ << SubExpr->getSourceRange()
+ << TSInfo->getTypeLoc().getSourceRange();
+ return ExprError();
+ }
+
+ Expr *Result = new (Context) ObjCBridgedCastExpr(LParenLoc, Kind, CK,
+ BridgeKeywordLoc,
+ TSInfo, SubExpr);
+
+ if (MustConsume) {
+ ExprNeedsCleanups = true;
+ Result = ImplicitCastExpr::Create(Context, T, CK_ARCConsumeObject, Result,
+ nullptr, VK_RValue);
+ }
+
+ return Result;
+}
+
+ExprResult Sema::ActOnObjCBridgedCast(Scope *S,
+ SourceLocation LParenLoc,
+ ObjCBridgeCastKind Kind,
+ SourceLocation BridgeKeywordLoc,
+ ParsedType Type,
+ SourceLocation RParenLoc,
+ Expr *SubExpr) {
+ TypeSourceInfo *TSInfo = nullptr;
+ QualType T = GetTypeFromParser(Type, &TSInfo);
+ if (Kind == OBC_Bridge)
+ CheckTollFreeBridgeCast(T, SubExpr);
+ if (!TSInfo)
+ TSInfo = Context.getTrivialTypeSourceInfo(T, LParenLoc);
+ return BuildObjCBridgedCast(LParenLoc, Kind, BridgeKeywordLoc, TSInfo,
+ SubExpr);
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaFixItUtils.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaFixItUtils.cpp
new file mode 100644
index 0000000..714fbed
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaFixItUtils.cpp
@@ -0,0 +1,222 @@
+//===--- SemaFixItUtils.cpp - Sema FixIts ---------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines helper classes for generation of Sema FixItHints.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Sema/SemaFixItUtils.h"
+
+using namespace clang;
+
+bool ConversionFixItGenerator::compareTypesSimple(CanQualType From,
+ CanQualType To,
+ Sema &S,
+ SourceLocation Loc,
+ ExprValueKind FromVK) {
+ if (!To.isAtLeastAsQualifiedAs(From))
+ return false;
+
+ From = From.getNonReferenceType();
+ To = To.getNonReferenceType();
+
+ // If both are pointer types, work with the pointee types.
+ if (isa<PointerType>(From) && isa<PointerType>(To)) {
+ From = S.Context.getCanonicalType(
+ (cast<PointerType>(From))->getPointeeType());
+ To = S.Context.getCanonicalType(
+ (cast<PointerType>(To))->getPointeeType());
+ }
+
+ const CanQualType FromUnq = From.getUnqualifiedType();
+ const CanQualType ToUnq = To.getUnqualifiedType();
+
+ if ((FromUnq == ToUnq || (S.IsDerivedFrom(Loc, FromUnq, ToUnq)) ) &&
+ To.isAtLeastAsQualifiedAs(From))
+ return true;
+ return false;
+}
+
+bool ConversionFixItGenerator::tryToFixConversion(const Expr *FullExpr,
+ const QualType FromTy,
+ const QualType ToTy,
+ Sema &S) {
+ if (!FullExpr)
+ return false;
+
+ const CanQualType FromQTy = S.Context.getCanonicalType(FromTy);
+ const CanQualType ToQTy = S.Context.getCanonicalType(ToTy);
+ const SourceLocation Begin = FullExpr->getSourceRange().getBegin();
+ const SourceLocation End = S.getLocForEndOfToken(FullExpr->getSourceRange()
+ .getEnd());
+
+ // Strip the implicit casts - those are implied by the compiler, not the
+ // original source code.
+ const Expr* Expr = FullExpr->IgnoreImpCasts();
+
+ bool NeedParen = true;
+ if (isa<ArraySubscriptExpr>(Expr) ||
+ isa<CallExpr>(Expr) ||
+ isa<DeclRefExpr>(Expr) ||
+ isa<CastExpr>(Expr) ||
+ isa<CXXNewExpr>(Expr) ||
+ isa<CXXConstructExpr>(Expr) ||
+ isa<CXXDeleteExpr>(Expr) ||
+ isa<CXXNoexceptExpr>(Expr) ||
+ isa<CXXPseudoDestructorExpr>(Expr) ||
+ isa<CXXScalarValueInitExpr>(Expr) ||
+ isa<CXXThisExpr>(Expr) ||
+ isa<CXXTypeidExpr>(Expr) ||
+ isa<CXXUnresolvedConstructExpr>(Expr) ||
+ isa<ObjCMessageExpr>(Expr) ||
+ isa<ObjCPropertyRefExpr>(Expr) ||
+ isa<ObjCProtocolExpr>(Expr) ||
+ isa<MemberExpr>(Expr) ||
+ isa<ParenExpr>(FullExpr) ||
+ isa<ParenListExpr>(Expr) ||
+ isa<SizeOfPackExpr>(Expr) ||
+ isa<UnaryOperator>(Expr))
+ NeedParen = false;
+
+ // Check if the argument needs to be dereferenced:
+ // (type * -> type) or (type * -> type &).
+ if (const PointerType *FromPtrTy = dyn_cast<PointerType>(FromQTy)) {
+ OverloadFixItKind FixKind = OFIK_Dereference;
+
+ bool CanConvert = CompareTypes(
+ S.Context.getCanonicalType(FromPtrTy->getPointeeType()), ToQTy,
+ S, Begin, VK_LValue);
+ if (CanConvert) {
+ // Do not suggest dereferencing a Null pointer.
+ if (Expr->IgnoreParenCasts()->
+ isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull))
+ return false;
+
+ if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Expr)) {
+ if (UO->getOpcode() == UO_AddrOf) {
+ FixKind = OFIK_RemoveTakeAddress;
+ Hints.push_back(FixItHint::CreateRemoval(
+ CharSourceRange::getTokenRange(Begin, Begin)));
+ }
+ } else if (NeedParen) {
+ Hints.push_back(FixItHint::CreateInsertion(Begin, "*("));
+ Hints.push_back(FixItHint::CreateInsertion(End, ")"));
+ } else {
+ Hints.push_back(FixItHint::CreateInsertion(Begin, "*"));
+ }
+
+ NumConversionsFixed++;
+ if (NumConversionsFixed == 1)
+ Kind = FixKind;
+ return true;
+ }
+ }
+
+ // Check if the pointer to the argument needs to be passed:
+ // (type -> type *) or (type & -> type *).
+ if (isa<PointerType>(ToQTy)) {
+ bool CanConvert = false;
+ OverloadFixItKind FixKind = OFIK_TakeAddress;
+
+ // Only suggest taking address of L-values.
+ if (!Expr->isLValue() || Expr->getObjectKind() != OK_Ordinary)
+ return false;
+
+ CanConvert = CompareTypes(S.Context.getPointerType(FromQTy), ToQTy,
+ S, Begin, VK_RValue);
+ if (CanConvert) {
+
+ if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Expr)) {
+ if (UO->getOpcode() == UO_Deref) {
+ FixKind = OFIK_RemoveDereference;
+ Hints.push_back(FixItHint::CreateRemoval(
+ CharSourceRange::getTokenRange(Begin, Begin)));
+ }
+ } else if (NeedParen) {
+ Hints.push_back(FixItHint::CreateInsertion(Begin, "&("));
+ Hints.push_back(FixItHint::CreateInsertion(End, ")"));
+ } else {
+ Hints.push_back(FixItHint::CreateInsertion(Begin, "&"));
+ }
+
+ NumConversionsFixed++;
+ if (NumConversionsFixed == 1)
+ Kind = FixKind;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool isMacroDefined(const Sema &S, SourceLocation Loc, StringRef Name) {
+ return (bool)S.PP.getMacroDefinitionAtLoc(&S.getASTContext().Idents.get(Name),
+ Loc);
+}
+
+static std::string getScalarZeroExpressionForType(
+ const Type &T, SourceLocation Loc, const Sema &S) {
+ assert(T.isScalarType() && "use scalar types only");
+ // Suggest "0" for non-enumeration scalar types, unless we can find a
+ // better initializer.
+ if (T.isEnumeralType())
+ return std::string();
+ if ((T.isObjCObjectPointerType() || T.isBlockPointerType()) &&
+ isMacroDefined(S, Loc, "nil"))
+ return "nil";
+ if (T.isRealFloatingType())
+ return "0.0";
+ if (T.isBooleanType() &&
+ (S.LangOpts.CPlusPlus || isMacroDefined(S, Loc, "false")))
+ return "false";
+ if (T.isPointerType() || T.isMemberPointerType()) {
+ if (S.LangOpts.CPlusPlus11)
+ return "nullptr";
+ if (isMacroDefined(S, Loc, "NULL"))
+ return "NULL";
+ }
+ if (T.isCharType())
+ return "'\\0'";
+ if (T.isWideCharType())
+ return "L'\\0'";
+ if (T.isChar16Type())
+ return "u'\\0'";
+ if (T.isChar32Type())
+ return "U'\\0'";
+ return "0";
+}
+
+std::string
+Sema::getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const {
+ if (T->isScalarType()) {
+ std::string s = getScalarZeroExpressionForType(*T, Loc, *this);
+ if (!s.empty())
+ s = " = " + s;
+ return s;
+ }
+
+ const CXXRecordDecl *RD = T->getAsCXXRecordDecl();
+ if (!RD || !RD->hasDefinition())
+ return std::string();
+ if (LangOpts.CPlusPlus11 && !RD->hasUserProvidedDefaultConstructor())
+ return "{}";
+ if (RD->isAggregate())
+ return " = {}";
+ return std::string();
+}
+
+std::string
+Sema::getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const {
+ return getScalarZeroExpressionForType(*T, Loc, *this);
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp
new file mode 100644
index 0000000..c3a8946
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp
@@ -0,0 +1,7689 @@
+//===--- SemaInit.cpp - Semantic Analysis for Initializers ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for initializers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/Initialization.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/Designator.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/SemaInternal.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <map>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Sema Initialization Checking
+//===----------------------------------------------------------------------===//
+
+/// \brief Check whether T is compatible with a wide character type (wchar_t,
+/// char16_t or char32_t).
+static bool IsWideCharCompatible(QualType T, ASTContext &Context) {
+ if (Context.typesAreCompatible(Context.getWideCharType(), T))
+ return true;
+ if (Context.getLangOpts().CPlusPlus || Context.getLangOpts().C11) {
+ return Context.typesAreCompatible(Context.Char16Ty, T) ||
+ Context.typesAreCompatible(Context.Char32Ty, T);
+ }
+ return false;
+}
+
+enum StringInitFailureKind {
+ SIF_None,
+ SIF_NarrowStringIntoWideChar,
+ SIF_WideStringIntoChar,
+ SIF_IncompatWideStringIntoWideChar,
+ SIF_Other
+};
+
+/// \brief Check whether the array of type AT can be initialized by the Init
+/// expression by means of string initialization. Returns SIF_None if so,
+/// otherwise returns a StringInitFailureKind that describes why the
+/// initialization would not work.
+static StringInitFailureKind IsStringInit(Expr *Init, const ArrayType *AT,
+ ASTContext &Context) {
+ if (!isa<ConstantArrayType>(AT) && !isa<IncompleteArrayType>(AT))
+ return SIF_Other;
+
+ // See if this is a string literal or @encode.
+ Init = Init->IgnoreParens();
+
+ // Handle @encode, which is a narrow string.
+ if (isa<ObjCEncodeExpr>(Init) && AT->getElementType()->isCharType())
+ return SIF_None;
+
+ // Otherwise we can only handle string literals.
+ StringLiteral *SL = dyn_cast<StringLiteral>(Init);
+ if (!SL)
+ return SIF_Other;
+
+ const QualType ElemTy =
+ Context.getCanonicalType(AT->getElementType()).getUnqualifiedType();
+
+ switch (SL->getKind()) {
+ case StringLiteral::Ascii:
+ case StringLiteral::UTF8:
+ // char array can be initialized with a narrow string.
+ // Only allow char x[] = "foo"; not char x[] = L"foo";
+ if (ElemTy->isCharType())
+ return SIF_None;
+ if (IsWideCharCompatible(ElemTy, Context))
+ return SIF_NarrowStringIntoWideChar;
+ return SIF_Other;
+ // C99 6.7.8p15 (with correction from DR343), or C11 6.7.9p15:
+ // "An array with element type compatible with a qualified or unqualified
+ // version of wchar_t, char16_t, or char32_t may be initialized by a wide
+ // string literal with the corresponding encoding prefix (L, u, or U,
+ // respectively), optionally enclosed in braces.
+ case StringLiteral::UTF16:
+ if (Context.typesAreCompatible(Context.Char16Ty, ElemTy))
+ return SIF_None;
+ if (ElemTy->isCharType())
+ return SIF_WideStringIntoChar;
+ if (IsWideCharCompatible(ElemTy, Context))
+ return SIF_IncompatWideStringIntoWideChar;
+ return SIF_Other;
+ case StringLiteral::UTF32:
+ if (Context.typesAreCompatible(Context.Char32Ty, ElemTy))
+ return SIF_None;
+ if (ElemTy->isCharType())
+ return SIF_WideStringIntoChar;
+ if (IsWideCharCompatible(ElemTy, Context))
+ return SIF_IncompatWideStringIntoWideChar;
+ return SIF_Other;
+ case StringLiteral::Wide:
+ if (Context.typesAreCompatible(Context.getWideCharType(), ElemTy))
+ return SIF_None;
+ if (ElemTy->isCharType())
+ return SIF_WideStringIntoChar;
+ if (IsWideCharCompatible(ElemTy, Context))
+ return SIF_IncompatWideStringIntoWideChar;
+ return SIF_Other;
+ }
+
+ llvm_unreachable("missed a StringLiteral kind?");
+}
+
+static StringInitFailureKind IsStringInit(Expr *init, QualType declType,
+ ASTContext &Context) {
+ const ArrayType *arrayType = Context.getAsArrayType(declType);
+ if (!arrayType)
+ return SIF_Other;
+ return IsStringInit(init, arrayType, Context);
+}
+
+/// Update the type of a string literal, including any surrounding parentheses,
+/// to match the type of the object which it is initializing.
+static void updateStringLiteralType(Expr *E, QualType Ty) {
+ while (true) {
+ E->setType(Ty);
+ if (isa<StringLiteral>(E) || isa<ObjCEncodeExpr>(E))
+ break;
+ else if (ParenExpr *PE = dyn_cast<ParenExpr>(E))
+ E = PE->getSubExpr();
+ else if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E))
+ E = UO->getSubExpr();
+ else if (GenericSelectionExpr *GSE = dyn_cast<GenericSelectionExpr>(E))
+ E = GSE->getResultExpr();
+ else
+ llvm_unreachable("unexpected expr in string literal init");
+ }
+}
+
+static void CheckStringInit(Expr *Str, QualType &DeclT, const ArrayType *AT,
+ Sema &S) {
+ // Get the length of the string as parsed.
+ auto *ConstantArrayTy =
+ cast<ConstantArrayType>(Str->getType()->getAsArrayTypeUnsafe());
+ uint64_t StrLength = ConstantArrayTy->getSize().getZExtValue();
+
+ if (const IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(AT)) {
+ // C99 6.7.8p14. We have an array of character type with unknown size
+ // being initialized to a string literal.
+ llvm::APInt ConstVal(32, StrLength);
+ // Return a new array type (C99 6.7.8p22).
+ DeclT = S.Context.getConstantArrayType(IAT->getElementType(),
+ ConstVal,
+ ArrayType::Normal, 0);
+ updateStringLiteralType(Str, DeclT);
+ return;
+ }
+
+ const ConstantArrayType *CAT = cast<ConstantArrayType>(AT);
+
+ // We have an array of character type with known size. However,
+ // the size may be smaller or larger than the string we are initializing.
+ // FIXME: Avoid truncation for 64-bit length strings.
+ if (S.getLangOpts().CPlusPlus) {
+ if (StringLiteral *SL = dyn_cast<StringLiteral>(Str->IgnoreParens())) {
+ // For Pascal strings it's OK to strip off the terminating null character,
+ // so the example below is valid:
+ //
+ // unsigned char a[2] = "\pa";
+ if (SL->isPascal())
+ StrLength--;
+ }
+
+ // [dcl.init.string]p2
+ if (StrLength > CAT->getSize().getZExtValue())
+ S.Diag(Str->getLocStart(),
+ diag::err_initializer_string_for_char_array_too_long)
+ << Str->getSourceRange();
+ } else {
+ // C99 6.7.8p14.
+ if (StrLength-1 > CAT->getSize().getZExtValue())
+ S.Diag(Str->getLocStart(),
+ diag::ext_initializer_string_for_char_array_too_long)
+ << Str->getSourceRange();
+ }
+
+ // Set the type to the actual size that we are initializing. If we have
+ // something like:
+ // char x[1] = "foo";
+ // then this will set the string literal's type to char[1].
+ updateStringLiteralType(Str, DeclT);
+}
+
+//===----------------------------------------------------------------------===//
+// Semantic checking for initializer lists.
+//===----------------------------------------------------------------------===//
+
+/// @brief Semantic checking for initializer lists.
+///
+/// The InitListChecker class contains a set of routines that each
+/// handle the initialization of a certain kind of entity, e.g.,
+/// arrays, vectors, struct/union types, scalars, etc. The
+/// InitListChecker itself performs a recursive walk of the subobject
+/// structure of the type to be initialized, while stepping through
+/// the initializer list one element at a time. The IList and Index
+/// parameters to each of the Check* routines contain the active
+/// (syntactic) initializer list and the index into that initializer
+/// list that represents the current initializer. Each routine is
+/// responsible for moving that Index forward as it consumes elements.
+///
+/// Each Check* routine also has a StructuredList/StructuredIndex
+/// arguments, which contains the current "structured" (semantic)
+/// initializer list and the index into that initializer list where we
+/// are copying initializers as we map them over to the semantic
+/// list. Once we have completed our recursive walk of the subobject
+/// structure, we will have constructed a full semantic initializer
+/// list.
+///
+/// C99 designators cause changes in the initializer list traversal,
+/// because they make the initialization "jump" into a specific
+/// subobject and then continue the initialization from that
+/// point. CheckDesignatedInitializer() recursively steps into the
+/// designated subobject and manages backing out the recursion to
+/// initialize the subobjects after the one designated.
+namespace {
+class InitListChecker {
+ Sema &SemaRef;
+ bool hadError;
+ bool VerifyOnly; // no diagnostics, no structure building
+ llvm::DenseMap<InitListExpr *, InitListExpr *> SyntacticToSemantic;
+ InitListExpr *FullyStructuredList;
+
+ void CheckImplicitInitList(const InitializedEntity &Entity,
+ InitListExpr *ParentIList, QualType T,
+ unsigned &Index, InitListExpr *StructuredList,
+ unsigned &StructuredIndex);
+ void CheckExplicitInitList(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType &T,
+ InitListExpr *StructuredList,
+ bool TopLevelObject = false);
+ void CheckListElementTypes(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType &DeclType,
+ bool SubobjectIsDesignatorContext,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ bool TopLevelObject = false);
+ void CheckSubElementType(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType ElemType,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex);
+ void CheckComplexType(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType DeclType,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex);
+ void CheckScalarType(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType DeclType,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex);
+ void CheckReferenceType(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType DeclType,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex);
+ void CheckVectorType(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType DeclType, unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex);
+ void CheckStructUnionTypes(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType DeclType,
+ RecordDecl::field_iterator Field,
+ bool SubobjectIsDesignatorContext, unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ bool TopLevelObject = false);
+ void CheckArrayType(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType &DeclType,
+ llvm::APSInt elementIndex,
+ bool SubobjectIsDesignatorContext, unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex);
+ bool CheckDesignatedInitializer(const InitializedEntity &Entity,
+ InitListExpr *IList, DesignatedInitExpr *DIE,
+ unsigned DesigIdx,
+ QualType &CurrentObjectType,
+ RecordDecl::field_iterator *NextField,
+ llvm::APSInt *NextElementIndex,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ bool FinishSubobjectInit,
+ bool TopLevelObject);
+ InitListExpr *getStructuredSubobjectInit(InitListExpr *IList, unsigned Index,
+ QualType CurrentObjectType,
+ InitListExpr *StructuredList,
+ unsigned StructuredIndex,
+ SourceRange InitRange,
+ bool IsFullyOverwritten = false);
+ void UpdateStructuredListElement(InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ Expr *expr);
+ int numArrayElements(QualType DeclType);
+ int numStructUnionElements(QualType DeclType);
+
+ static ExprResult PerformEmptyInit(Sema &SemaRef,
+ SourceLocation Loc,
+ const InitializedEntity &Entity,
+ bool VerifyOnly);
+
+ // Explanation on the "FillWithNoInit" mode:
+ //
+ // Assume we have the following definitions (Case#1):
+ // struct P { char x[6][6]; } xp = { .x[1] = "bar" };
+ // struct PP { struct P lp; } l = { .lp = xp, .lp.x[1][2] = 'f' };
+ //
+ // l.lp.x[1][0..1] should not be filled with implicit initializers because the
+ // "base" initializer "xp" will provide values for them; l.lp.x[1] will be "baf".
+ //
+ // But if we have (Case#2):
+ // struct PP l = { .lp = xp, .lp.x[1] = { [2] = 'f' } };
+ //
+ // l.lp.x[1][0..1] are implicitly initialized and do not use values from the
+ // "base" initializer; l.lp.x[1] will be "\0\0f\0\0\0".
+ //
+ // To distinguish Case#1 from Case#2, and also to avoid leaving many "holes"
+ // in the InitListExpr, the "holes" in Case#1 are filled not with empty
+ // initializers but with special "NoInitExpr" place holders, which tells the
+ // CodeGen not to generate any initializers for these parts.
+ void FillInEmptyInitForField(unsigned Init, FieldDecl *Field,
+ const InitializedEntity &ParentEntity,
+ InitListExpr *ILE, bool &RequiresSecondPass,
+ bool FillWithNoInit = false);
+ void FillInEmptyInitializations(const InitializedEntity &Entity,
+ InitListExpr *ILE, bool &RequiresSecondPass,
+ bool FillWithNoInit = false);
+ bool CheckFlexibleArrayInit(const InitializedEntity &Entity,
+ Expr *InitExpr, FieldDecl *Field,
+ bool TopLevelObject);
+ void CheckEmptyInitializable(const InitializedEntity &Entity,
+ SourceLocation Loc);
+
+public:
+ InitListChecker(Sema &S, const InitializedEntity &Entity,
+ InitListExpr *IL, QualType &T, bool VerifyOnly);
+ bool HadError() { return hadError; }
+
+ // @brief Retrieves the fully-structured initializer list used for
+ // semantic analysis and code generation.
+ InitListExpr *getFullyStructuredList() const { return FullyStructuredList; }
+};
+} // end anonymous namespace
+
+ExprResult InitListChecker::PerformEmptyInit(Sema &SemaRef,
+ SourceLocation Loc,
+ const InitializedEntity &Entity,
+ bool VerifyOnly) {
+ InitializationKind Kind = InitializationKind::CreateValue(Loc, Loc, Loc,
+ true);
+ MultiExprArg SubInit;
+ Expr *InitExpr;
+ InitListExpr DummyInitList(SemaRef.Context, Loc, None, Loc);
+
+ // C++ [dcl.init.aggr]p7:
+ // If there are fewer initializer-clauses in the list than there are
+ // members in the aggregate, then each member not explicitly initialized
+ // ...
+ bool EmptyInitList = SemaRef.getLangOpts().CPlusPlus11 &&
+ Entity.getType()->getBaseElementTypeUnsafe()->isRecordType();
+ if (EmptyInitList) {
+ // C++1y / DR1070:
+ // shall be initialized [...] from an empty initializer list.
+ //
+ // We apply the resolution of this DR to C++11 but not C++98, since C++98
+ // does not have useful semantics for initialization from an init list.
+ // We treat this as copy-initialization, because aggregate initialization
+ // always performs copy-initialization on its elements.
+ //
+ // Only do this if we're initializing a class type, to avoid filling in
+ // the initializer list where possible.
+ InitExpr = VerifyOnly ? &DummyInitList : new (SemaRef.Context)
+ InitListExpr(SemaRef.Context, Loc, None, Loc);
+ InitExpr->setType(SemaRef.Context.VoidTy);
+ SubInit = InitExpr;
+ Kind = InitializationKind::CreateCopy(Loc, Loc);
+ } else {
+ // C++03:
+ // shall be value-initialized.
+ }
+
+ InitializationSequence InitSeq(SemaRef, Entity, Kind, SubInit);
+ // libstdc++4.6 marks the vector default constructor as explicit in
+ // _GLIBCXX_DEBUG mode, so recover using the C++03 logic in that case.
+ // stlport does so too. Look for std::__debug for libstdc++, and for
+ // std:: for stlport. This is effectively a compiler-side implementation of
+ // LWG2193.
+ if (!InitSeq && EmptyInitList && InitSeq.getFailureKind() ==
+ InitializationSequence::FK_ExplicitConstructor) {
+ OverloadCandidateSet::iterator Best;
+ OverloadingResult O =
+ InitSeq.getFailedCandidateSet()
+ .BestViableFunction(SemaRef, Kind.getLocation(), Best);
+ (void)O;
+ assert(O == OR_Success && "Inconsistent overload resolution");
+ CXXConstructorDecl *CtorDecl = cast<CXXConstructorDecl>(Best->Function);
+ CXXRecordDecl *R = CtorDecl->getParent();
+
+ if (CtorDecl->getMinRequiredArguments() == 0 &&
+ CtorDecl->isExplicit() && R->getDeclName() &&
+ SemaRef.SourceMgr.isInSystemHeader(CtorDecl->getLocation())) {
+
+
+ bool IsInStd = false;
+ for (NamespaceDecl *ND = dyn_cast<NamespaceDecl>(R->getDeclContext());
+ ND && !IsInStd; ND = dyn_cast<NamespaceDecl>(ND->getParent())) {
+ if (SemaRef.getStdNamespace()->InEnclosingNamespaceSetOf(ND))
+ IsInStd = true;
+ }
+
+ if (IsInStd && llvm::StringSwitch<bool>(R->getName())
+ .Cases("basic_string", "deque", "forward_list", true)
+ .Cases("list", "map", "multimap", "multiset", true)
+ .Cases("priority_queue", "queue", "set", "stack", true)
+ .Cases("unordered_map", "unordered_set", "vector", true)
+ .Default(false)) {
+ InitSeq.InitializeFrom(
+ SemaRef, Entity,
+ InitializationKind::CreateValue(Loc, Loc, Loc, true),
+ MultiExprArg(), /*TopLevelOfInitList=*/false);
+ // Emit a warning for this. System header warnings aren't shown
+ // by default, but people working on system headers should see it.
+ if (!VerifyOnly) {
+ SemaRef.Diag(CtorDecl->getLocation(),
+ diag::warn_invalid_initializer_from_system_header);
+ if (Entity.getKind() == InitializedEntity::EK_Member)
+ SemaRef.Diag(Entity.getDecl()->getLocation(),
+ diag::note_used_in_initialization_here);
+ else if (Entity.getKind() == InitializedEntity::EK_ArrayElement)
+ SemaRef.Diag(Loc, diag::note_used_in_initialization_here);
+ }
+ }
+ }
+ }
+ if (!InitSeq) {
+ if (!VerifyOnly) {
+ InitSeq.Diagnose(SemaRef, Entity, Kind, SubInit);
+ if (Entity.getKind() == InitializedEntity::EK_Member)
+ SemaRef.Diag(Entity.getDecl()->getLocation(),
+ diag::note_in_omitted_aggregate_initializer)
+ << /*field*/1 << Entity.getDecl();
+ else if (Entity.getKind() == InitializedEntity::EK_ArrayElement)
+ SemaRef.Diag(Loc, diag::note_in_omitted_aggregate_initializer)
+ << /*array element*/0 << Entity.getElementIndex();
+ }
+ return ExprError();
+ }
+
+ return VerifyOnly ? ExprResult(static_cast<Expr *>(nullptr))
+ : InitSeq.Perform(SemaRef, Entity, Kind, SubInit);
+}
+
+void InitListChecker::CheckEmptyInitializable(const InitializedEntity &Entity,
+ SourceLocation Loc) {
+ assert(VerifyOnly &&
+ "CheckEmptyInitializable is only inteded for verification mode.");
+ if (PerformEmptyInit(SemaRef, Loc, Entity, /*VerifyOnly*/true).isInvalid())
+ hadError = true;
+}
+
+void InitListChecker::FillInEmptyInitForField(unsigned Init, FieldDecl *Field,
+ const InitializedEntity &ParentEntity,
+ InitListExpr *ILE,
+ bool &RequiresSecondPass,
+ bool FillWithNoInit) {
+ SourceLocation Loc = ILE->getLocEnd();
+ unsigned NumInits = ILE->getNumInits();
+ InitializedEntity MemberEntity
+ = InitializedEntity::InitializeMember(Field, &ParentEntity);
+
+ if (const RecordType *RType = ILE->getType()->getAs<RecordType>())
+ if (!RType->getDecl()->isUnion())
+ assert(Init < NumInits && "This ILE should have been expanded");
+
+ if (Init >= NumInits || !ILE->getInit(Init)) {
+ if (FillWithNoInit) {
+ Expr *Filler = new (SemaRef.Context) NoInitExpr(Field->getType());
+ if (Init < NumInits)
+ ILE->setInit(Init, Filler);
+ else
+ ILE->updateInit(SemaRef.Context, Init, Filler);
+ return;
+ }
+ // C++1y [dcl.init.aggr]p7:
+ // If there are fewer initializer-clauses in the list than there are
+ // members in the aggregate, then each member not explicitly initialized
+ // shall be initialized from its brace-or-equal-initializer [...]
+ if (Field->hasInClassInitializer()) {
+ ExprResult DIE = SemaRef.BuildCXXDefaultInitExpr(Loc, Field);
+ if (DIE.isInvalid()) {
+ hadError = true;
+ return;
+ }
+ if (Init < NumInits)
+ ILE->setInit(Init, DIE.get());
+ else {
+ ILE->updateInit(SemaRef.Context, Init, DIE.get());
+ RequiresSecondPass = true;
+ }
+ return;
+ }
+
+ if (Field->getType()->isReferenceType()) {
+ // C++ [dcl.init.aggr]p9:
+ // If an incomplete or empty initializer-list leaves a
+ // member of reference type uninitialized, the program is
+ // ill-formed.
+ SemaRef.Diag(Loc, diag::err_init_reference_member_uninitialized)
+ << Field->getType()
+ << ILE->getSyntacticForm()->getSourceRange();
+ SemaRef.Diag(Field->getLocation(),
+ diag::note_uninit_reference_member);
+ hadError = true;
+ return;
+ }
+
+ ExprResult MemberInit = PerformEmptyInit(SemaRef, Loc, MemberEntity,
+ /*VerifyOnly*/false);
+ if (MemberInit.isInvalid()) {
+ hadError = true;
+ return;
+ }
+
+ if (hadError) {
+ // Do nothing
+ } else if (Init < NumInits) {
+ ILE->setInit(Init, MemberInit.getAs<Expr>());
+ } else if (!isa<ImplicitValueInitExpr>(MemberInit.get())) {
+ // Empty initialization requires a constructor call, so
+ // extend the initializer list to include the constructor
+ // call and make a note that we'll need to take another pass
+ // through the initializer list.
+ ILE->updateInit(SemaRef.Context, Init, MemberInit.getAs<Expr>());
+ RequiresSecondPass = true;
+ }
+ } else if (InitListExpr *InnerILE
+ = dyn_cast<InitListExpr>(ILE->getInit(Init)))
+ FillInEmptyInitializations(MemberEntity, InnerILE,
+ RequiresSecondPass, FillWithNoInit);
+ else if (DesignatedInitUpdateExpr *InnerDIUE
+ = dyn_cast<DesignatedInitUpdateExpr>(ILE->getInit(Init)))
+ FillInEmptyInitializations(MemberEntity, InnerDIUE->getUpdater(),
+ RequiresSecondPass, /*FillWithNoInit =*/ true);
+}
+
+/// Recursively replaces NULL values within the given initializer list
+/// with expressions that perform value-initialization of the
+/// appropriate type.
+void
+InitListChecker::FillInEmptyInitializations(const InitializedEntity &Entity,
+ InitListExpr *ILE,
+ bool &RequiresSecondPass,
+ bool FillWithNoInit) {
+ assert((ILE->getType() != SemaRef.Context.VoidTy) &&
+ "Should not have void type");
+
+ if (const RecordType *RType = ILE->getType()->getAs<RecordType>()) {
+ const RecordDecl *RDecl = RType->getDecl();
+ if (RDecl->isUnion() && ILE->getInitializedFieldInUnion())
+ FillInEmptyInitForField(0, ILE->getInitializedFieldInUnion(),
+ Entity, ILE, RequiresSecondPass, FillWithNoInit);
+ else if (RDecl->isUnion() && isa<CXXRecordDecl>(RDecl) &&
+ cast<CXXRecordDecl>(RDecl)->hasInClassInitializer()) {
+ for (auto *Field : RDecl->fields()) {
+ if (Field->hasInClassInitializer()) {
+ FillInEmptyInitForField(0, Field, Entity, ILE, RequiresSecondPass,
+ FillWithNoInit);
+ break;
+ }
+ }
+ } else {
+ // The fields beyond ILE->getNumInits() are default initialized, so in
+ // order to leave them uninitialized, the ILE is expanded and the extra
+ // fields are then filled with NoInitExpr.
+ unsigned NumFields = 0;
+ for (auto *Field : RDecl->fields())
+ if (!Field->isUnnamedBitfield())
+ ++NumFields;
+ if (ILE->getNumInits() < NumFields)
+ ILE->resizeInits(SemaRef.Context, NumFields);
+
+ unsigned Init = 0;
+ for (auto *Field : RDecl->fields()) {
+ if (Field->isUnnamedBitfield())
+ continue;
+
+ if (hadError)
+ return;
+
+ FillInEmptyInitForField(Init, Field, Entity, ILE, RequiresSecondPass,
+ FillWithNoInit);
+ if (hadError)
+ return;
+
+ ++Init;
+
+ // Only look at the first initialization of a union.
+ if (RDecl->isUnion())
+ break;
+ }
+ }
+
+ return;
+ }
+
+ QualType ElementType;
+
+ InitializedEntity ElementEntity = Entity;
+ unsigned NumInits = ILE->getNumInits();
+ unsigned NumElements = NumInits;
+ if (const ArrayType *AType = SemaRef.Context.getAsArrayType(ILE->getType())) {
+ ElementType = AType->getElementType();
+ if (const ConstantArrayType *CAType = dyn_cast<ConstantArrayType>(AType))
+ NumElements = CAType->getSize().getZExtValue();
+ ElementEntity = InitializedEntity::InitializeElement(SemaRef.Context,
+ 0, Entity);
+ } else if (const VectorType *VType = ILE->getType()->getAs<VectorType>()) {
+ ElementType = VType->getElementType();
+ NumElements = VType->getNumElements();
+ ElementEntity = InitializedEntity::InitializeElement(SemaRef.Context,
+ 0, Entity);
+ } else
+ ElementType = ILE->getType();
+
+ for (unsigned Init = 0; Init != NumElements; ++Init) {
+ if (hadError)
+ return;
+
+ if (ElementEntity.getKind() == InitializedEntity::EK_ArrayElement ||
+ ElementEntity.getKind() == InitializedEntity::EK_VectorElement)
+ ElementEntity.setElementIndex(Init);
+
+ Expr *InitExpr = (Init < NumInits ? ILE->getInit(Init) : nullptr);
+ if (!InitExpr && Init < NumInits && ILE->hasArrayFiller())
+ ILE->setInit(Init, ILE->getArrayFiller());
+ else if (!InitExpr && !ILE->hasArrayFiller()) {
+ Expr *Filler = nullptr;
+
+ if (FillWithNoInit)
+ Filler = new (SemaRef.Context) NoInitExpr(ElementType);
+ else {
+ ExprResult ElementInit = PerformEmptyInit(SemaRef, ILE->getLocEnd(),
+ ElementEntity,
+ /*VerifyOnly*/false);
+ if (ElementInit.isInvalid()) {
+ hadError = true;
+ return;
+ }
+
+ Filler = ElementInit.getAs<Expr>();
+ }
+
+ if (hadError) {
+ // Do nothing
+ } else if (Init < NumInits) {
+ // For arrays, just set the expression used for value-initialization
+ // of the "holes" in the array.
+ if (ElementEntity.getKind() == InitializedEntity::EK_ArrayElement)
+ ILE->setArrayFiller(Filler);
+ else
+ ILE->setInit(Init, Filler);
+ } else {
+ // For arrays, just set the expression used for value-initialization
+ // of the rest of elements and exit.
+ if (ElementEntity.getKind() == InitializedEntity::EK_ArrayElement) {
+ ILE->setArrayFiller(Filler);
+ return;
+ }
+
+ if (!isa<ImplicitValueInitExpr>(Filler) && !isa<NoInitExpr>(Filler)) {
+ // Empty initialization requires a constructor call, so
+ // extend the initializer list to include the constructor
+ // call and make a note that we'll need to take another pass
+ // through the initializer list.
+ ILE->updateInit(SemaRef.Context, Init, Filler);
+ RequiresSecondPass = true;
+ }
+ }
+ } else if (InitListExpr *InnerILE
+ = dyn_cast_or_null<InitListExpr>(InitExpr))
+ FillInEmptyInitializations(ElementEntity, InnerILE, RequiresSecondPass,
+ FillWithNoInit);
+ else if (DesignatedInitUpdateExpr *InnerDIUE
+ = dyn_cast_or_null<DesignatedInitUpdateExpr>(InitExpr))
+ FillInEmptyInitializations(ElementEntity, InnerDIUE->getUpdater(),
+ RequiresSecondPass, /*FillWithNoInit =*/ true);
+ }
+}
+
+
+InitListChecker::InitListChecker(Sema &S, const InitializedEntity &Entity,
+ InitListExpr *IL, QualType &T,
+ bool VerifyOnly)
+ : SemaRef(S), VerifyOnly(VerifyOnly) {
+ // FIXME: Check that IL isn't already the semantic form of some other
+ // InitListExpr. If it is, we'd create a broken AST.
+
+ hadError = false;
+
+ FullyStructuredList =
+ getStructuredSubobjectInit(IL, 0, T, nullptr, 0, IL->getSourceRange());
+ CheckExplicitInitList(Entity, IL, T, FullyStructuredList,
+ /*TopLevelObject=*/true);
+
+ if (!hadError && !VerifyOnly) {
+ bool RequiresSecondPass = false;
+ FillInEmptyInitializations(Entity, FullyStructuredList, RequiresSecondPass);
+ if (RequiresSecondPass && !hadError)
+ FillInEmptyInitializations(Entity, FullyStructuredList,
+ RequiresSecondPass);
+ }
+}
+
+int InitListChecker::numArrayElements(QualType DeclType) {
+ // FIXME: use a proper constant
+ int maxElements = 0x7FFFFFFF;
+ if (const ConstantArrayType *CAT =
+ SemaRef.Context.getAsConstantArrayType(DeclType)) {
+ maxElements = static_cast<int>(CAT->getSize().getZExtValue());
+ }
+ return maxElements;
+}
+
+int InitListChecker::numStructUnionElements(QualType DeclType) {
+ RecordDecl *structDecl = DeclType->getAs<RecordType>()->getDecl();
+ int InitializableMembers = 0;
+ for (const auto *Field : structDecl->fields())
+ if (!Field->isUnnamedBitfield())
+ ++InitializableMembers;
+
+ if (structDecl->isUnion())
+ return std::min(InitializableMembers, 1);
+ return InitializableMembers - structDecl->hasFlexibleArrayMember();
+}
+
+/// Check whether the range of the initializer \p ParentIList from element
+/// \p Index onwards can be used to initialize an object of type \p T. Update
+/// \p Index to indicate how many elements of the list were consumed.
+///
+/// This also fills in \p StructuredList, from element \p StructuredIndex
+/// onwards, with the fully-braced, desugared form of the initialization.
+void InitListChecker::CheckImplicitInitList(const InitializedEntity &Entity,
+ InitListExpr *ParentIList,
+ QualType T, unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex) {
+ int maxElements = 0;
+
+ if (T->isArrayType())
+ maxElements = numArrayElements(T);
+ else if (T->isRecordType())
+ maxElements = numStructUnionElements(T);
+ else if (T->isVectorType())
+ maxElements = T->getAs<VectorType>()->getNumElements();
+ else
+ llvm_unreachable("CheckImplicitInitList(): Illegal type");
+
+ if (maxElements == 0) {
+ if (!VerifyOnly)
+ SemaRef.Diag(ParentIList->getInit(Index)->getLocStart(),
+ diag::err_implicit_empty_initializer);
+ ++Index;
+ hadError = true;
+ return;
+ }
+
+ // Build a structured initializer list corresponding to this subobject.
+ InitListExpr *StructuredSubobjectInitList
+ = getStructuredSubobjectInit(ParentIList, Index, T, StructuredList,
+ StructuredIndex,
+ SourceRange(ParentIList->getInit(Index)->getLocStart(),
+ ParentIList->getSourceRange().getEnd()));
+ unsigned StructuredSubobjectInitIndex = 0;
+
+ // Check the element types and build the structural subobject.
+ unsigned StartIndex = Index;
+ CheckListElementTypes(Entity, ParentIList, T,
+ /*SubobjectIsDesignatorContext=*/false, Index,
+ StructuredSubobjectInitList,
+ StructuredSubobjectInitIndex);
+
+ if (!VerifyOnly) {
+ StructuredSubobjectInitList->setType(T);
+
+ unsigned EndIndex = (Index == StartIndex? StartIndex : Index - 1);
+ // Update the structured sub-object initializer so that it's ending
+ // range corresponds with the end of the last initializer it used.
+ if (EndIndex < ParentIList->getNumInits() &&
+ ParentIList->getInit(EndIndex)) {
+ SourceLocation EndLoc
+ = ParentIList->getInit(EndIndex)->getSourceRange().getEnd();
+ StructuredSubobjectInitList->setRBraceLoc(EndLoc);
+ }
+
+ // Complain about missing braces.
+ if (T->isArrayType() || T->isRecordType()) {
+ SemaRef.Diag(StructuredSubobjectInitList->getLocStart(),
+ diag::warn_missing_braces)
+ << StructuredSubobjectInitList->getSourceRange()
+ << FixItHint::CreateInsertion(
+ StructuredSubobjectInitList->getLocStart(), "{")
+ << FixItHint::CreateInsertion(
+ SemaRef.getLocForEndOfToken(
+ StructuredSubobjectInitList->getLocEnd()),
+ "}");
+ }
+ }
+}
+
+/// Warn that \p Entity was of scalar type and was initialized by a
+/// single-element braced initializer list.
+static void warnBracedScalarInit(Sema &S, const InitializedEntity &Entity,
+ SourceRange Braces) {
+ // Don't warn during template instantiation. If the initialization was
+ // non-dependent, we warned during the initial parse; otherwise, the
+ // type might not be scalar in some uses of the template.
+ if (!S.ActiveTemplateInstantiations.empty())
+ return;
+
+ unsigned DiagID = 0;
+
+ switch (Entity.getKind()) {
+ case InitializedEntity::EK_VectorElement:
+ case InitializedEntity::EK_ComplexElement:
+ case InitializedEntity::EK_ArrayElement:
+ case InitializedEntity::EK_Parameter:
+ case InitializedEntity::EK_Parameter_CF_Audited:
+ case InitializedEntity::EK_Result:
+ // Extra braces here are suspicious.
+ DiagID = diag::warn_braces_around_scalar_init;
+ break;
+
+ case InitializedEntity::EK_Member:
+ // Warn on aggregate initialization but not on ctor init list or
+ // default member initializer.
+ if (Entity.getParent())
+ DiagID = diag::warn_braces_around_scalar_init;
+ break;
+
+ case InitializedEntity::EK_Variable:
+ case InitializedEntity::EK_LambdaCapture:
+ // No warning, might be direct-list-initialization.
+ // FIXME: Should we warn for copy-list-initialization in these cases?
+ break;
+
+ case InitializedEntity::EK_New:
+ case InitializedEntity::EK_Temporary:
+ case InitializedEntity::EK_CompoundLiteralInit:
+ // No warning, braces are part of the syntax of the underlying construct.
+ break;
+
+ case InitializedEntity::EK_RelatedResult:
+ // No warning, we already warned when initializing the result.
+ break;
+
+ case InitializedEntity::EK_Exception:
+ case InitializedEntity::EK_Base:
+ case InitializedEntity::EK_Delegating:
+ case InitializedEntity::EK_BlockElement:
+ llvm_unreachable("unexpected braced scalar init");
+ }
+
+ if (DiagID) {
+ S.Diag(Braces.getBegin(), DiagID)
+ << Braces
+ << FixItHint::CreateRemoval(Braces.getBegin())
+ << FixItHint::CreateRemoval(Braces.getEnd());
+ }
+}
+
+
+/// Check whether the initializer \p IList (that was written with explicit
+/// braces) can be used to initialize an object of type \p T.
+///
+/// This also fills in \p StructuredList with the fully-braced, desugared
+/// form of the initialization.
+void InitListChecker::CheckExplicitInitList(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType &T,
+ InitListExpr *StructuredList,
+ bool TopLevelObject) {
+ if (!VerifyOnly) {
+ SyntacticToSemantic[IList] = StructuredList;
+ StructuredList->setSyntacticForm(IList);
+ }
+
+ unsigned Index = 0, StructuredIndex = 0;
+ CheckListElementTypes(Entity, IList, T, /*SubobjectIsDesignatorContext=*/true,
+ Index, StructuredList, StructuredIndex, TopLevelObject);
+ if (!VerifyOnly) {
+ QualType ExprTy = T;
+ if (!ExprTy->isArrayType())
+ ExprTy = ExprTy.getNonLValueExprType(SemaRef.Context);
+ IList->setType(ExprTy);
+ StructuredList->setType(ExprTy);
+ }
+ if (hadError)
+ return;
+
+ if (Index < IList->getNumInits()) {
+ // We have leftover initializers
+ if (VerifyOnly) {
+ if (SemaRef.getLangOpts().CPlusPlus ||
+ (SemaRef.getLangOpts().OpenCL &&
+ IList->getType()->isVectorType())) {
+ hadError = true;
+ }
+ return;
+ }
+
+ if (StructuredIndex == 1 &&
+ IsStringInit(StructuredList->getInit(0), T, SemaRef.Context) ==
+ SIF_None) {
+ unsigned DK = diag::ext_excess_initializers_in_char_array_initializer;
+ if (SemaRef.getLangOpts().CPlusPlus) {
+ DK = diag::err_excess_initializers_in_char_array_initializer;
+ hadError = true;
+ }
+ // Special-case
+ SemaRef.Diag(IList->getInit(Index)->getLocStart(), DK)
+ << IList->getInit(Index)->getSourceRange();
+ } else if (!T->isIncompleteType()) {
+ // Don't complain for incomplete types, since we'll get an error
+ // elsewhere
+ QualType CurrentObjectType = StructuredList->getType();
+ int initKind =
+ CurrentObjectType->isArrayType()? 0 :
+ CurrentObjectType->isVectorType()? 1 :
+ CurrentObjectType->isScalarType()? 2 :
+ CurrentObjectType->isUnionType()? 3 :
+ 4;
+
+ unsigned DK = diag::ext_excess_initializers;
+ if (SemaRef.getLangOpts().CPlusPlus) {
+ DK = diag::err_excess_initializers;
+ hadError = true;
+ }
+ if (SemaRef.getLangOpts().OpenCL && initKind == 1) {
+ DK = diag::err_excess_initializers;
+ hadError = true;
+ }
+
+ SemaRef.Diag(IList->getInit(Index)->getLocStart(), DK)
+ << initKind << IList->getInit(Index)->getSourceRange();
+ }
+ }
+
+ if (!VerifyOnly && T->isScalarType() &&
+ IList->getNumInits() == 1 && !isa<InitListExpr>(IList->getInit(0)))
+ warnBracedScalarInit(SemaRef, Entity, IList->getSourceRange());
+}
+
+void InitListChecker::CheckListElementTypes(const InitializedEntity &Entity,
+ InitListExpr *IList,
+ QualType &DeclType,
+ bool SubobjectIsDesignatorContext,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ bool TopLevelObject) {
+ if (DeclType->isAnyComplexType() && SubobjectIsDesignatorContext) {
+ // Explicitly braced initializer for complex type can be real+imaginary
+ // parts.
+ CheckComplexType(Entity, IList, DeclType, Index,
+ StructuredList, StructuredIndex);
+ } else if (DeclType->isScalarType()) {
+ CheckScalarType(Entity, IList, DeclType, Index,
+ StructuredList, StructuredIndex);
+ } else if (DeclType->isVectorType()) {
+ CheckVectorType(Entity, IList, DeclType, Index,
+ StructuredList, StructuredIndex);
+ } else if (DeclType->isRecordType()) {
+ assert(DeclType->isAggregateType() &&
+ "non-aggregate records should be handed in CheckSubElementType");
+ RecordDecl *RD = DeclType->getAs<RecordType>()->getDecl();
+ CheckStructUnionTypes(Entity, IList, DeclType, RD->field_begin(),
+ SubobjectIsDesignatorContext, Index,
+ StructuredList, StructuredIndex,
+ TopLevelObject);
+ } else if (DeclType->isArrayType()) {
+ llvm::APSInt Zero(
+ SemaRef.Context.getTypeSize(SemaRef.Context.getSizeType()),
+ false);
+ CheckArrayType(Entity, IList, DeclType, Zero,
+ SubobjectIsDesignatorContext, Index,
+ StructuredList, StructuredIndex);
+ } else if (DeclType->isVoidType() || DeclType->isFunctionType()) {
+ // This type is invalid, issue a diagnostic.
+ ++Index;
+ if (!VerifyOnly)
+ SemaRef.Diag(IList->getLocStart(), diag::err_illegal_initializer_type)
+ << DeclType;
+ hadError = true;
+ } else if (DeclType->isReferenceType()) {
+ CheckReferenceType(Entity, IList, DeclType, Index,
+ StructuredList, StructuredIndex);
+ } else if (DeclType->isObjCObjectType()) {
+ if (!VerifyOnly)
+ SemaRef.Diag(IList->getLocStart(), diag::err_init_objc_class)
+ << DeclType;
+ hadError = true;
+ } else {
+ if (!VerifyOnly)
+ SemaRef.Diag(IList->getLocStart(), diag::err_illegal_initializer_type)
+ << DeclType;
+ hadError = true;
+ }
+}
+
+void InitListChecker::CheckSubElementType(const InitializedEntity &Entity,
+ InitListExpr *IList,
+ QualType ElemType,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex) {
+ Expr *expr = IList->getInit(Index);
+
+ if (ElemType->isReferenceType())
+ return CheckReferenceType(Entity, IList, ElemType, Index,
+ StructuredList, StructuredIndex);
+
+ if (InitListExpr *SubInitList = dyn_cast<InitListExpr>(expr)) {
+ if (SubInitList->getNumInits() == 1 &&
+ IsStringInit(SubInitList->getInit(0), ElemType, SemaRef.Context) ==
+ SIF_None) {
+ expr = SubInitList->getInit(0);
+ } else if (!SemaRef.getLangOpts().CPlusPlus) {
+ InitListExpr *InnerStructuredList
+ = getStructuredSubobjectInit(IList, Index, ElemType,
+ StructuredList, StructuredIndex,
+ SubInitList->getSourceRange(), true);
+ CheckExplicitInitList(Entity, SubInitList, ElemType,
+ InnerStructuredList);
+
+ if (!hadError && !VerifyOnly) {
+ bool RequiresSecondPass = false;
+ FillInEmptyInitializations(Entity, InnerStructuredList,
+ RequiresSecondPass);
+ if (RequiresSecondPass && !hadError)
+ FillInEmptyInitializations(Entity, InnerStructuredList,
+ RequiresSecondPass);
+ }
+ ++StructuredIndex;
+ ++Index;
+ return;
+ }
+ // C++ initialization is handled later.
+ } else if (isa<ImplicitValueInitExpr>(expr)) {
+ // This happens during template instantiation when we see an InitListExpr
+ // that we've already checked once.
+ assert(SemaRef.Context.hasSameType(expr->getType(), ElemType) &&
+ "found implicit initialization for the wrong type");
+ if (!VerifyOnly)
+ UpdateStructuredListElement(StructuredList, StructuredIndex, expr);
+ ++Index;
+ return;
+ }
+
+ if (SemaRef.getLangOpts().CPlusPlus) {
+ // C++ [dcl.init.aggr]p2:
+ // Each member is copy-initialized from the corresponding
+ // initializer-clause.
+
+ // FIXME: Better EqualLoc?
+ InitializationKind Kind =
+ InitializationKind::CreateCopy(expr->getLocStart(), SourceLocation());
+ InitializationSequence Seq(SemaRef, Entity, Kind, expr,
+ /*TopLevelOfInitList*/ true);
+
+ // C++14 [dcl.init.aggr]p13:
+ // If the assignment-expression can initialize a member, the member is
+ // initialized. Otherwise [...] brace elision is assumed
+ //
+ // Brace elision is never performed if the element is not an
+ // assignment-expression.
+ if (Seq || isa<InitListExpr>(expr)) {
+ if (!VerifyOnly) {
+ ExprResult Result =
+ Seq.Perform(SemaRef, Entity, Kind, expr);
+ if (Result.isInvalid())
+ hadError = true;
+
+ UpdateStructuredListElement(StructuredList, StructuredIndex,
+ Result.getAs<Expr>());
+ } else if (!Seq)
+ hadError = true;
+ ++Index;
+ return;
+ }
+
+ // Fall through for subaggregate initialization
+ } else if (ElemType->isScalarType() || ElemType->isAtomicType()) {
+ // FIXME: Need to handle atomic aggregate types with implicit init lists.
+ return CheckScalarType(Entity, IList, ElemType, Index,
+ StructuredList, StructuredIndex);
+ } else if (const ArrayType *arrayType =
+ SemaRef.Context.getAsArrayType(ElemType)) {
+ // arrayType can be incomplete if we're initializing a flexible
+ // array member. There's nothing we can do with the completed
+ // type here, though.
+
+ if (IsStringInit(expr, arrayType, SemaRef.Context) == SIF_None) {
+ if (!VerifyOnly) {
+ CheckStringInit(expr, ElemType, arrayType, SemaRef);
+ UpdateStructuredListElement(StructuredList, StructuredIndex, expr);
+ }
+ ++Index;
+ return;
+ }
+
+ // Fall through for subaggregate initialization.
+
+ } else {
+ assert((ElemType->isRecordType() || ElemType->isVectorType()) &&
+ "Unexpected type");
+
+ // C99 6.7.8p13:
+ //
+ // The initializer for a structure or union object that has
+ // automatic storage duration shall be either an initializer
+ // list as described below, or a single expression that has
+ // compatible structure or union type. In the latter case, the
+ // initial value of the object, including unnamed members, is
+ // that of the expression.
+ ExprResult ExprRes = expr;
+ if (SemaRef.CheckSingleAssignmentConstraints(
+ ElemType, ExprRes, !VerifyOnly) != Sema::Incompatible) {
+ if (ExprRes.isInvalid())
+ hadError = true;
+ else {
+ ExprRes = SemaRef.DefaultFunctionArrayLvalueConversion(ExprRes.get());
+ if (ExprRes.isInvalid())
+ hadError = true;
+ }
+ UpdateStructuredListElement(StructuredList, StructuredIndex,
+ ExprRes.getAs<Expr>());
+ ++Index;
+ return;
+ }
+ ExprRes.get();
+ // Fall through for subaggregate initialization
+ }
+
+ // C++ [dcl.init.aggr]p12:
+ //
+ // [...] Otherwise, if the member is itself a non-empty
+ // subaggregate, brace elision is assumed and the initializer is
+ // considered for the initialization of the first member of
+ // the subaggregate.
+ if (!SemaRef.getLangOpts().OpenCL &&
+ (ElemType->isAggregateType() || ElemType->isVectorType())) {
+ CheckImplicitInitList(Entity, IList, ElemType, Index, StructuredList,
+ StructuredIndex);
+ ++StructuredIndex;
+ } else {
+ if (!VerifyOnly) {
+ // We cannot initialize this element, so let
+ // PerformCopyInitialization produce the appropriate diagnostic.
+ SemaRef.PerformCopyInitialization(Entity, SourceLocation(), expr,
+ /*TopLevelOfInitList=*/true);
+ }
+ hadError = true;
+ ++Index;
+ ++StructuredIndex;
+ }
+}
+
+void InitListChecker::CheckComplexType(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType DeclType,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex) {
+ assert(Index == 0 && "Index in explicit init list must be zero");
+
+ // As an extension, clang supports complex initializers, which initialize
+ // a complex number component-wise. When an explicit initializer list for
+ // a complex number contains two two initializers, this extension kicks in:
+ // it exepcts the initializer list to contain two elements convertible to
+ // the element type of the complex type. The first element initializes
+ // the real part, and the second element intitializes the imaginary part.
+
+ if (IList->getNumInits() != 2)
+ return CheckScalarType(Entity, IList, DeclType, Index, StructuredList,
+ StructuredIndex);
+
+ // This is an extension in C. (The builtin _Complex type does not exist
+ // in the C++ standard.)
+ if (!SemaRef.getLangOpts().CPlusPlus && !VerifyOnly)
+ SemaRef.Diag(IList->getLocStart(), diag::ext_complex_component_init)
+ << IList->getSourceRange();
+
+ // Initialize the complex number.
+ QualType elementType = DeclType->getAs<ComplexType>()->getElementType();
+ InitializedEntity ElementEntity =
+ InitializedEntity::InitializeElement(SemaRef.Context, 0, Entity);
+
+ for (unsigned i = 0; i < 2; ++i) {
+ ElementEntity.setElementIndex(Index);
+ CheckSubElementType(ElementEntity, IList, elementType, Index,
+ StructuredList, StructuredIndex);
+ }
+}
+
+
+void InitListChecker::CheckScalarType(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType DeclType,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex) {
+ if (Index >= IList->getNumInits()) {
+ if (!VerifyOnly)
+ SemaRef.Diag(IList->getLocStart(),
+ SemaRef.getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_empty_scalar_initializer :
+ diag::err_empty_scalar_initializer)
+ << IList->getSourceRange();
+ hadError = !SemaRef.getLangOpts().CPlusPlus11;
+ ++Index;
+ ++StructuredIndex;
+ return;
+ }
+
+ Expr *expr = IList->getInit(Index);
+ if (InitListExpr *SubIList = dyn_cast<InitListExpr>(expr)) {
+ // FIXME: This is invalid, and accepting it causes overload resolution
+ // to pick the wrong overload in some corner cases.
+ if (!VerifyOnly)
+ SemaRef.Diag(SubIList->getLocStart(),
+ diag::ext_many_braces_around_scalar_init)
+ << SubIList->getSourceRange();
+
+ CheckScalarType(Entity, SubIList, DeclType, Index, StructuredList,
+ StructuredIndex);
+ return;
+ } else if (isa<DesignatedInitExpr>(expr)) {
+ if (!VerifyOnly)
+ SemaRef.Diag(expr->getLocStart(),
+ diag::err_designator_for_scalar_init)
+ << DeclType << expr->getSourceRange();
+ hadError = true;
+ ++Index;
+ ++StructuredIndex;
+ return;
+ }
+
+ if (VerifyOnly) {
+ if (!SemaRef.CanPerformCopyInitialization(Entity,expr))
+ hadError = true;
+ ++Index;
+ return;
+ }
+
+ ExprResult Result =
+ SemaRef.PerformCopyInitialization(Entity, expr->getLocStart(), expr,
+ /*TopLevelOfInitList=*/true);
+
+ Expr *ResultExpr = nullptr;
+
+ if (Result.isInvalid())
+ hadError = true; // types weren't compatible.
+ else {
+ ResultExpr = Result.getAs<Expr>();
+
+ if (ResultExpr != expr) {
+ // The type was promoted, update initializer list.
+ IList->setInit(Index, ResultExpr);
+ }
+ }
+ if (hadError)
+ ++StructuredIndex;
+ else
+ UpdateStructuredListElement(StructuredList, StructuredIndex, ResultExpr);
+ ++Index;
+}
+
+void InitListChecker::CheckReferenceType(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType DeclType,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex) {
+ if (Index >= IList->getNumInits()) {
+ // FIXME: It would be wonderful if we could point at the actual member. In
+ // general, it would be useful to pass location information down the stack,
+ // so that we know the location (or decl) of the "current object" being
+ // initialized.
+ if (!VerifyOnly)
+ SemaRef.Diag(IList->getLocStart(),
+ diag::err_init_reference_member_uninitialized)
+ << DeclType
+ << IList->getSourceRange();
+ hadError = true;
+ ++Index;
+ ++StructuredIndex;
+ return;
+ }
+
+ Expr *expr = IList->getInit(Index);
+ if (isa<InitListExpr>(expr) && !SemaRef.getLangOpts().CPlusPlus11) {
+ if (!VerifyOnly)
+ SemaRef.Diag(IList->getLocStart(), diag::err_init_non_aggr_init_list)
+ << DeclType << IList->getSourceRange();
+ hadError = true;
+ ++Index;
+ ++StructuredIndex;
+ return;
+ }
+
+ if (VerifyOnly) {
+ if (!SemaRef.CanPerformCopyInitialization(Entity,expr))
+ hadError = true;
+ ++Index;
+ return;
+ }
+
+ ExprResult Result =
+ SemaRef.PerformCopyInitialization(Entity, expr->getLocStart(), expr,
+ /*TopLevelOfInitList=*/true);
+
+ if (Result.isInvalid())
+ hadError = true;
+
+ expr = Result.getAs<Expr>();
+ IList->setInit(Index, expr);
+
+ if (hadError)
+ ++StructuredIndex;
+ else
+ UpdateStructuredListElement(StructuredList, StructuredIndex, expr);
+ ++Index;
+}
+
+void InitListChecker::CheckVectorType(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType DeclType,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex) {
+ const VectorType *VT = DeclType->getAs<VectorType>();
+ unsigned maxElements = VT->getNumElements();
+ unsigned numEltsInit = 0;
+ QualType elementType = VT->getElementType();
+
+ if (Index >= IList->getNumInits()) {
+ // Make sure the element type can be value-initialized.
+ if (VerifyOnly)
+ CheckEmptyInitializable(
+ InitializedEntity::InitializeElement(SemaRef.Context, 0, Entity),
+ IList->getLocEnd());
+ return;
+ }
+
+ if (!SemaRef.getLangOpts().OpenCL) {
+ // If the initializing element is a vector, try to copy-initialize
+ // instead of breaking it apart (which is doomed to failure anyway).
+ Expr *Init = IList->getInit(Index);
+ if (!isa<InitListExpr>(Init) && Init->getType()->isVectorType()) {
+ if (VerifyOnly) {
+ if (!SemaRef.CanPerformCopyInitialization(Entity, Init))
+ hadError = true;
+ ++Index;
+ return;
+ }
+
+ ExprResult Result =
+ SemaRef.PerformCopyInitialization(Entity, Init->getLocStart(), Init,
+ /*TopLevelOfInitList=*/true);
+
+ Expr *ResultExpr = nullptr;
+ if (Result.isInvalid())
+ hadError = true; // types weren't compatible.
+ else {
+ ResultExpr = Result.getAs<Expr>();
+
+ if (ResultExpr != Init) {
+ // The type was promoted, update initializer list.
+ IList->setInit(Index, ResultExpr);
+ }
+ }
+ if (hadError)
+ ++StructuredIndex;
+ else
+ UpdateStructuredListElement(StructuredList, StructuredIndex,
+ ResultExpr);
+ ++Index;
+ return;
+ }
+
+ InitializedEntity ElementEntity =
+ InitializedEntity::InitializeElement(SemaRef.Context, 0, Entity);
+
+ for (unsigned i = 0; i < maxElements; ++i, ++numEltsInit) {
+ // Don't attempt to go past the end of the init list
+ if (Index >= IList->getNumInits()) {
+ if (VerifyOnly)
+ CheckEmptyInitializable(ElementEntity, IList->getLocEnd());
+ break;
+ }
+
+ ElementEntity.setElementIndex(Index);
+ CheckSubElementType(ElementEntity, IList, elementType, Index,
+ StructuredList, StructuredIndex);
+ }
+
+ if (VerifyOnly)
+ return;
+
+ bool isBigEndian = SemaRef.Context.getTargetInfo().isBigEndian();
+ const VectorType *T = Entity.getType()->getAs<VectorType>();
+ if (isBigEndian && (T->getVectorKind() == VectorType::NeonVector ||
+ T->getVectorKind() == VectorType::NeonPolyVector)) {
+ // The ability to use vector initializer lists is a GNU vector extension
+ // and is unrelated to the NEON intrinsics in arm_neon.h. On little
+ // endian machines it works fine, however on big endian machines it
+ // exhibits surprising behaviour:
+ //
+ // uint32x2_t x = {42, 64};
+ // return vget_lane_u32(x, 0); // Will return 64.
+ //
+ // Because of this, explicitly call out that it is non-portable.
+ //
+ SemaRef.Diag(IList->getLocStart(),
+ diag::warn_neon_vector_initializer_non_portable);
+
+ const char *typeCode;
+ unsigned typeSize = SemaRef.Context.getTypeSize(elementType);
+
+ if (elementType->isFloatingType())
+ typeCode = "f";
+ else if (elementType->isSignedIntegerType())
+ typeCode = "s";
+ else if (elementType->isUnsignedIntegerType())
+ typeCode = "u";
+ else
+ llvm_unreachable("Invalid element type!");
+
+ SemaRef.Diag(IList->getLocStart(),
+ SemaRef.Context.getTypeSize(VT) > 64 ?
+ diag::note_neon_vector_initializer_non_portable_q :
+ diag::note_neon_vector_initializer_non_portable)
+ << typeCode << typeSize;
+ }
+
+ return;
+ }
+
+ InitializedEntity ElementEntity =
+ InitializedEntity::InitializeElement(SemaRef.Context, 0, Entity);
+
+ // OpenCL initializers allows vectors to be constructed from vectors.
+ for (unsigned i = 0; i < maxElements; ++i) {
+ // Don't attempt to go past the end of the init list
+ if (Index >= IList->getNumInits())
+ break;
+
+ ElementEntity.setElementIndex(Index);
+
+ QualType IType = IList->getInit(Index)->getType();
+ if (!IType->isVectorType()) {
+ CheckSubElementType(ElementEntity, IList, elementType, Index,
+ StructuredList, StructuredIndex);
+ ++numEltsInit;
+ } else {
+ QualType VecType;
+ const VectorType *IVT = IType->getAs<VectorType>();
+ unsigned numIElts = IVT->getNumElements();
+
+ if (IType->isExtVectorType())
+ VecType = SemaRef.Context.getExtVectorType(elementType, numIElts);
+ else
+ VecType = SemaRef.Context.getVectorType(elementType, numIElts,
+ IVT->getVectorKind());
+ CheckSubElementType(ElementEntity, IList, VecType, Index,
+ StructuredList, StructuredIndex);
+ numEltsInit += numIElts;
+ }
+ }
+
+ // OpenCL requires all elements to be initialized.
+ if (numEltsInit != maxElements) {
+ if (!VerifyOnly)
+ SemaRef.Diag(IList->getLocStart(),
+ diag::err_vector_incorrect_num_initializers)
+ << (numEltsInit < maxElements) << maxElements << numEltsInit;
+ hadError = true;
+ }
+}
+
+void InitListChecker::CheckArrayType(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType &DeclType,
+ llvm::APSInt elementIndex,
+ bool SubobjectIsDesignatorContext,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex) {
+ const ArrayType *arrayType = SemaRef.Context.getAsArrayType(DeclType);
+
+ // Check for the special-case of initializing an array with a string.
+ if (Index < IList->getNumInits()) {
+ if (IsStringInit(IList->getInit(Index), arrayType, SemaRef.Context) ==
+ SIF_None) {
+ // We place the string literal directly into the resulting
+ // initializer list. This is the only place where the structure
+ // of the structured initializer list doesn't match exactly,
+ // because doing so would involve allocating one character
+ // constant for each string.
+ if (!VerifyOnly) {
+ CheckStringInit(IList->getInit(Index), DeclType, arrayType, SemaRef);
+ UpdateStructuredListElement(StructuredList, StructuredIndex,
+ IList->getInit(Index));
+ StructuredList->resizeInits(SemaRef.Context, StructuredIndex);
+ }
+ ++Index;
+ return;
+ }
+ }
+ if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(arrayType)) {
+ // Check for VLAs; in standard C it would be possible to check this
+ // earlier, but I don't know where clang accepts VLAs (gcc accepts
+ // them in all sorts of strange places).
+ if (!VerifyOnly)
+ SemaRef.Diag(VAT->getSizeExpr()->getLocStart(),
+ diag::err_variable_object_no_init)
+ << VAT->getSizeExpr()->getSourceRange();
+ hadError = true;
+ ++Index;
+ ++StructuredIndex;
+ return;
+ }
+
+ // We might know the maximum number of elements in advance.
+ llvm::APSInt maxElements(elementIndex.getBitWidth(),
+ elementIndex.isUnsigned());
+ bool maxElementsKnown = false;
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(arrayType)) {
+ maxElements = CAT->getSize();
+ elementIndex = elementIndex.extOrTrunc(maxElements.getBitWidth());
+ elementIndex.setIsUnsigned(maxElements.isUnsigned());
+ maxElementsKnown = true;
+ }
+
+ QualType elementType = arrayType->getElementType();
+ while (Index < IList->getNumInits()) {
+ Expr *Init = IList->getInit(Index);
+ if (DesignatedInitExpr *DIE = dyn_cast<DesignatedInitExpr>(Init)) {
+ // If we're not the subobject that matches up with the '{' for
+ // the designator, we shouldn't be handling the
+ // designator. Return immediately.
+ if (!SubobjectIsDesignatorContext)
+ return;
+
+ // Handle this designated initializer. elementIndex will be
+ // updated to be the next array element we'll initialize.
+ if (CheckDesignatedInitializer(Entity, IList, DIE, 0,
+ DeclType, nullptr, &elementIndex, Index,
+ StructuredList, StructuredIndex, true,
+ false)) {
+ hadError = true;
+ continue;
+ }
+
+ if (elementIndex.getBitWidth() > maxElements.getBitWidth())
+ maxElements = maxElements.extend(elementIndex.getBitWidth());
+ else if (elementIndex.getBitWidth() < maxElements.getBitWidth())
+ elementIndex = elementIndex.extend(maxElements.getBitWidth());
+ elementIndex.setIsUnsigned(maxElements.isUnsigned());
+
+ // If the array is of incomplete type, keep track of the number of
+ // elements in the initializer.
+ if (!maxElementsKnown && elementIndex > maxElements)
+ maxElements = elementIndex;
+
+ continue;
+ }
+
+ // If we know the maximum number of elements, and we've already
+ // hit it, stop consuming elements in the initializer list.
+ if (maxElementsKnown && elementIndex == maxElements)
+ break;
+
+ InitializedEntity ElementEntity =
+ InitializedEntity::InitializeElement(SemaRef.Context, StructuredIndex,
+ Entity);
+ // Check this element.
+ CheckSubElementType(ElementEntity, IList, elementType, Index,
+ StructuredList, StructuredIndex);
+ ++elementIndex;
+
+ // If the array is of incomplete type, keep track of the number of
+ // elements in the initializer.
+ if (!maxElementsKnown && elementIndex > maxElements)
+ maxElements = elementIndex;
+ }
+ if (!hadError && DeclType->isIncompleteArrayType() && !VerifyOnly) {
+ // If this is an incomplete array type, the actual type needs to
+ // be calculated here.
+ llvm::APSInt Zero(maxElements.getBitWidth(), maxElements.isUnsigned());
+ if (maxElements == Zero) {
+ // Sizing an array implicitly to zero is not allowed by ISO C,
+ // but is supported by GNU.
+ SemaRef.Diag(IList->getLocStart(),
+ diag::ext_typecheck_zero_array_size);
+ }
+
+ DeclType = SemaRef.Context.getConstantArrayType(elementType, maxElements,
+ ArrayType::Normal, 0);
+ }
+ if (!hadError && VerifyOnly) {
+ // Check if there are any members of the array that get value-initialized.
+ // If so, check if doing that is possible.
+ // FIXME: This needs to detect holes left by designated initializers too.
+ if (maxElementsKnown && elementIndex < maxElements)
+ CheckEmptyInitializable(InitializedEntity::InitializeElement(
+ SemaRef.Context, 0, Entity),
+ IList->getLocEnd());
+ }
+}
+
+bool InitListChecker::CheckFlexibleArrayInit(const InitializedEntity &Entity,
+ Expr *InitExpr,
+ FieldDecl *Field,
+ bool TopLevelObject) {
+ // Handle GNU flexible array initializers.
+ unsigned FlexArrayDiag;
+ if (isa<InitListExpr>(InitExpr) &&
+ cast<InitListExpr>(InitExpr)->getNumInits() == 0) {
+ // Empty flexible array init always allowed as an extension
+ FlexArrayDiag = diag::ext_flexible_array_init;
+ } else if (SemaRef.getLangOpts().CPlusPlus) {
+ // Disallow flexible array init in C++; it is not required for gcc
+ // compatibility, and it needs work to IRGen correctly in general.
+ FlexArrayDiag = diag::err_flexible_array_init;
+ } else if (!TopLevelObject) {
+ // Disallow flexible array init on non-top-level object
+ FlexArrayDiag = diag::err_flexible_array_init;
+ } else if (Entity.getKind() != InitializedEntity::EK_Variable) {
+ // Disallow flexible array init on anything which is not a variable.
+ FlexArrayDiag = diag::err_flexible_array_init;
+ } else if (cast<VarDecl>(Entity.getDecl())->hasLocalStorage()) {
+ // Disallow flexible array init on local variables.
+ FlexArrayDiag = diag::err_flexible_array_init;
+ } else {
+ // Allow other cases.
+ FlexArrayDiag = diag::ext_flexible_array_init;
+ }
+
+ if (!VerifyOnly) {
+ SemaRef.Diag(InitExpr->getLocStart(),
+ FlexArrayDiag)
+ << InitExpr->getLocStart();
+ SemaRef.Diag(Field->getLocation(), diag::note_flexible_array_member)
+ << Field;
+ }
+
+ return FlexArrayDiag != diag::ext_flexible_array_init;
+}
+
+void InitListChecker::CheckStructUnionTypes(const InitializedEntity &Entity,
+ InitListExpr *IList,
+ QualType DeclType,
+ RecordDecl::field_iterator Field,
+ bool SubobjectIsDesignatorContext,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ bool TopLevelObject) {
+ RecordDecl* structDecl = DeclType->getAs<RecordType>()->getDecl();
+
+ // If the record is invalid, some of it's members are invalid. To avoid
+ // confusion, we forgo checking the intializer for the entire record.
+ if (structDecl->isInvalidDecl()) {
+ // Assume it was supposed to consume a single initializer.
+ ++Index;
+ hadError = true;
+ return;
+ }
+
+ if (DeclType->isUnionType() && IList->getNumInits() == 0) {
+ RecordDecl *RD = DeclType->getAs<RecordType>()->getDecl();
+
+ // If there's a default initializer, use it.
+ if (isa<CXXRecordDecl>(RD) && cast<CXXRecordDecl>(RD)->hasInClassInitializer()) {
+ if (VerifyOnly)
+ return;
+ for (RecordDecl::field_iterator FieldEnd = RD->field_end();
+ Field != FieldEnd; ++Field) {
+ if (Field->hasInClassInitializer()) {
+ StructuredList->setInitializedFieldInUnion(*Field);
+ // FIXME: Actually build a CXXDefaultInitExpr?
+ return;
+ }
+ }
+ }
+
+ // Value-initialize the first member of the union that isn't an unnamed
+ // bitfield.
+ for (RecordDecl::field_iterator FieldEnd = RD->field_end();
+ Field != FieldEnd; ++Field) {
+ if (!Field->isUnnamedBitfield()) {
+ if (VerifyOnly)
+ CheckEmptyInitializable(
+ InitializedEntity::InitializeMember(*Field, &Entity),
+ IList->getLocEnd());
+ else
+ StructuredList->setInitializedFieldInUnion(*Field);
+ break;
+ }
+ }
+ return;
+ }
+
+ // If structDecl is a forward declaration, this loop won't do
+ // anything except look at designated initializers; That's okay,
+ // because an error should get printed out elsewhere. It might be
+ // worthwhile to skip over the rest of the initializer, though.
+ RecordDecl *RD = DeclType->getAs<RecordType>()->getDecl();
+ RecordDecl::field_iterator FieldEnd = RD->field_end();
+ bool InitializedSomething = false;
+ bool CheckForMissingFields = true;
+ while (Index < IList->getNumInits()) {
+ Expr *Init = IList->getInit(Index);
+
+ if (DesignatedInitExpr *DIE = dyn_cast<DesignatedInitExpr>(Init)) {
+ // If we're not the subobject that matches up with the '{' for
+ // the designator, we shouldn't be handling the
+ // designator. Return immediately.
+ if (!SubobjectIsDesignatorContext)
+ return;
+
+ // Handle this designated initializer. Field will be updated to
+ // the next field that we'll be initializing.
+ if (CheckDesignatedInitializer(Entity, IList, DIE, 0,
+ DeclType, &Field, nullptr, Index,
+ StructuredList, StructuredIndex,
+ true, TopLevelObject))
+ hadError = true;
+
+ InitializedSomething = true;
+
+ // Disable check for missing fields when designators are used.
+ // This matches gcc behaviour.
+ CheckForMissingFields = false;
+ continue;
+ }
+
+ if (Field == FieldEnd) {
+ // We've run out of fields. We're done.
+ break;
+ }
+
+ // We've already initialized a member of a union. We're done.
+ if (InitializedSomething && DeclType->isUnionType())
+ break;
+
+ // If we've hit the flexible array member at the end, we're done.
+ if (Field->getType()->isIncompleteArrayType())
+ break;
+
+ if (Field->isUnnamedBitfield()) {
+ // Don't initialize unnamed bitfields, e.g. "int : 20;"
+ ++Field;
+ continue;
+ }
+
+ // Make sure we can use this declaration.
+ bool InvalidUse;
+ if (VerifyOnly)
+ InvalidUse = !SemaRef.CanUseDecl(*Field);
+ else
+ InvalidUse = SemaRef.DiagnoseUseOfDecl(*Field,
+ IList->getInit(Index)->getLocStart());
+ if (InvalidUse) {
+ ++Index;
+ ++Field;
+ hadError = true;
+ continue;
+ }
+
+ InitializedEntity MemberEntity =
+ InitializedEntity::InitializeMember(*Field, &Entity);
+ CheckSubElementType(MemberEntity, IList, Field->getType(), Index,
+ StructuredList, StructuredIndex);
+ InitializedSomething = true;
+
+ if (DeclType->isUnionType() && !VerifyOnly) {
+ // Initialize the first field within the union.
+ StructuredList->setInitializedFieldInUnion(*Field);
+ }
+
+ ++Field;
+ }
+
+ // Emit warnings for missing struct field initializers.
+ if (!VerifyOnly && InitializedSomething && CheckForMissingFields &&
+ Field != FieldEnd && !Field->getType()->isIncompleteArrayType() &&
+ !DeclType->isUnionType()) {
+ // It is possible we have one or more unnamed bitfields remaining.
+ // Find first (if any) named field and emit warning.
+ for (RecordDecl::field_iterator it = Field, end = RD->field_end();
+ it != end; ++it) {
+ if (!it->isUnnamedBitfield() && !it->hasInClassInitializer()) {
+ SemaRef.Diag(IList->getSourceRange().getEnd(),
+ diag::warn_missing_field_initializers) << *it;
+ break;
+ }
+ }
+ }
+
+ // Check that any remaining fields can be value-initialized.
+ if (VerifyOnly && Field != FieldEnd && !DeclType->isUnionType() &&
+ !Field->getType()->isIncompleteArrayType()) {
+ // FIXME: Should check for holes left by designated initializers too.
+ for (; Field != FieldEnd && !hadError; ++Field) {
+ if (!Field->isUnnamedBitfield() && !Field->hasInClassInitializer())
+ CheckEmptyInitializable(
+ InitializedEntity::InitializeMember(*Field, &Entity),
+ IList->getLocEnd());
+ }
+ }
+
+ if (Field == FieldEnd || !Field->getType()->isIncompleteArrayType() ||
+ Index >= IList->getNumInits())
+ return;
+
+ if (CheckFlexibleArrayInit(Entity, IList->getInit(Index), *Field,
+ TopLevelObject)) {
+ hadError = true;
+ ++Index;
+ return;
+ }
+
+ InitializedEntity MemberEntity =
+ InitializedEntity::InitializeMember(*Field, &Entity);
+
+ if (isa<InitListExpr>(IList->getInit(Index)))
+ CheckSubElementType(MemberEntity, IList, Field->getType(), Index,
+ StructuredList, StructuredIndex);
+ else
+ CheckImplicitInitList(MemberEntity, IList, Field->getType(), Index,
+ StructuredList, StructuredIndex);
+}
+
+/// \brief Expand a field designator that refers to a member of an
+/// anonymous struct or union into a series of field designators that
+/// refers to the field within the appropriate subobject.
+///
+static void ExpandAnonymousFieldDesignator(Sema &SemaRef,
+ DesignatedInitExpr *DIE,
+ unsigned DesigIdx,
+ IndirectFieldDecl *IndirectField) {
+ typedef DesignatedInitExpr::Designator Designator;
+
+ // Build the replacement designators.
+ SmallVector<Designator, 4> Replacements;
+ for (IndirectFieldDecl::chain_iterator PI = IndirectField->chain_begin(),
+ PE = IndirectField->chain_end(); PI != PE; ++PI) {
+ if (PI + 1 == PE)
+ Replacements.push_back(Designator((IdentifierInfo *)nullptr,
+ DIE->getDesignator(DesigIdx)->getDotLoc(),
+ DIE->getDesignator(DesigIdx)->getFieldLoc()));
+ else
+ Replacements.push_back(Designator((IdentifierInfo *)nullptr,
+ SourceLocation(), SourceLocation()));
+ assert(isa<FieldDecl>(*PI));
+ Replacements.back().setField(cast<FieldDecl>(*PI));
+ }
+
+ // Expand the current designator into the set of replacement
+ // designators, so we have a full subobject path down to where the
+ // member of the anonymous struct/union is actually stored.
+ DIE->ExpandDesignator(SemaRef.Context, DesigIdx, &Replacements[0],
+ &Replacements[0] + Replacements.size());
+}
+
+static DesignatedInitExpr *CloneDesignatedInitExpr(Sema &SemaRef,
+ DesignatedInitExpr *DIE) {
+ unsigned NumIndexExprs = DIE->getNumSubExprs() - 1;
+ SmallVector<Expr*, 4> IndexExprs(NumIndexExprs);
+ for (unsigned I = 0; I < NumIndexExprs; ++I)
+ IndexExprs[I] = DIE->getSubExpr(I + 1);
+ return DesignatedInitExpr::Create(SemaRef.Context, DIE->designators_begin(),
+ DIE->size(), IndexExprs,
+ DIE->getEqualOrColonLoc(),
+ DIE->usesGNUSyntax(), DIE->getInit());
+}
+
+namespace {
+
+// Callback to only accept typo corrections that are for field members of
+// the given struct or union.
+class FieldInitializerValidatorCCC : public CorrectionCandidateCallback {
+ public:
+ explicit FieldInitializerValidatorCCC(RecordDecl *RD)
+ : Record(RD) {}
+
+ bool ValidateCandidate(const TypoCorrection &candidate) override {
+ FieldDecl *FD = candidate.getCorrectionDeclAs<FieldDecl>();
+ return FD && FD->getDeclContext()->getRedeclContext()->Equals(Record);
+ }
+
+ private:
+ RecordDecl *Record;
+};
+
+}
+
+/// @brief Check the well-formedness of a C99 designated initializer.
+///
+/// Determines whether the designated initializer @p DIE, which
+/// resides at the given @p Index within the initializer list @p
+/// IList, is well-formed for a current object of type @p DeclType
+/// (C99 6.7.8). The actual subobject that this designator refers to
+/// within the current subobject is returned in either
+/// @p NextField or @p NextElementIndex (whichever is appropriate).
+///
+/// @param IList The initializer list in which this designated
+/// initializer occurs.
+///
+/// @param DIE The designated initializer expression.
+///
+/// @param DesigIdx The index of the current designator.
+///
+/// @param CurrentObjectType The type of the "current object" (C99 6.7.8p17),
+/// into which the designation in @p DIE should refer.
+///
+/// @param NextField If non-NULL and the first designator in @p DIE is
+/// a field, this will be set to the field declaration corresponding
+/// to the field named by the designator.
+///
+/// @param NextElementIndex If non-NULL and the first designator in @p
+/// DIE is an array designator or GNU array-range designator, this
+/// will be set to the last index initialized by this designator.
+///
+/// @param Index Index into @p IList where the designated initializer
+/// @p DIE occurs.
+///
+/// @param StructuredList The initializer list expression that
+/// describes all of the subobject initializers in the order they'll
+/// actually be initialized.
+///
+/// @returns true if there was an error, false otherwise.
+bool
+InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
+ InitListExpr *IList,
+ DesignatedInitExpr *DIE,
+ unsigned DesigIdx,
+ QualType &CurrentObjectType,
+ RecordDecl::field_iterator *NextField,
+ llvm::APSInt *NextElementIndex,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ bool FinishSubobjectInit,
+ bool TopLevelObject) {
+ if (DesigIdx == DIE->size()) {
+ // Check the actual initialization for the designated object type.
+ bool prevHadError = hadError;
+
+ // Temporarily remove the designator expression from the
+ // initializer list that the child calls see, so that we don't try
+ // to re-process the designator.
+ unsigned OldIndex = Index;
+ IList->setInit(OldIndex, DIE->getInit());
+
+ CheckSubElementType(Entity, IList, CurrentObjectType, Index,
+ StructuredList, StructuredIndex);
+
+ // Restore the designated initializer expression in the syntactic
+ // form of the initializer list.
+ if (IList->getInit(OldIndex) != DIE->getInit())
+ DIE->setInit(IList->getInit(OldIndex));
+ IList->setInit(OldIndex, DIE);
+
+ return hadError && !prevHadError;
+ }
+
+ DesignatedInitExpr::Designator *D = DIE->getDesignator(DesigIdx);
+ bool IsFirstDesignator = (DesigIdx == 0);
+ if (!VerifyOnly) {
+ assert((IsFirstDesignator || StructuredList) &&
+ "Need a non-designated initializer list to start from");
+
+ // Determine the structural initializer list that corresponds to the
+ // current subobject.
+ if (IsFirstDesignator)
+ StructuredList = SyntacticToSemantic.lookup(IList);
+ else {
+ Expr *ExistingInit = StructuredIndex < StructuredList->getNumInits() ?
+ StructuredList->getInit(StructuredIndex) : nullptr;
+ if (!ExistingInit && StructuredList->hasArrayFiller())
+ ExistingInit = StructuredList->getArrayFiller();
+
+ if (!ExistingInit)
+ StructuredList =
+ getStructuredSubobjectInit(IList, Index, CurrentObjectType,
+ StructuredList, StructuredIndex,
+ SourceRange(D->getLocStart(),
+ DIE->getLocEnd()));
+ else if (InitListExpr *Result = dyn_cast<InitListExpr>(ExistingInit))
+ StructuredList = Result;
+ else {
+ if (DesignatedInitUpdateExpr *E =
+ dyn_cast<DesignatedInitUpdateExpr>(ExistingInit))
+ StructuredList = E->getUpdater();
+ else {
+ DesignatedInitUpdateExpr *DIUE =
+ new (SemaRef.Context) DesignatedInitUpdateExpr(SemaRef.Context,
+ D->getLocStart(), ExistingInit,
+ DIE->getLocEnd());
+ StructuredList->updateInit(SemaRef.Context, StructuredIndex, DIUE);
+ StructuredList = DIUE->getUpdater();
+ }
+
+ // We need to check on source range validity because the previous
+ // initializer does not have to be an explicit initializer. e.g.,
+ //
+ // struct P { int a, b; };
+ // struct PP { struct P p } l = { { .a = 2 }, .p.b = 3 };
+ //
+ // There is an overwrite taking place because the first braced initializer
+ // list "{ .a = 2 }" already provides value for .p.b (which is zero).
+ if (ExistingInit->getSourceRange().isValid()) {
+ // We are creating an initializer list that initializes the
+ // subobjects of the current object, but there was already an
+ // initialization that completely initialized the current
+ // subobject, e.g., by a compound literal:
+ //
+ // struct X { int a, b; };
+ // struct X xs[] = { [0] = (struct X) { 1, 2 }, [0].b = 3 };
+ //
+ // Here, xs[0].a == 0 and xs[0].b == 3, since the second,
+ // designated initializer re-initializes the whole
+ // subobject [0], overwriting previous initializers.
+ SemaRef.Diag(D->getLocStart(),
+ diag::warn_subobject_initializer_overrides)
+ << SourceRange(D->getLocStart(), DIE->getLocEnd());
+
+ SemaRef.Diag(ExistingInit->getLocStart(),
+ diag::note_previous_initializer)
+ << /*FIXME:has side effects=*/0
+ << ExistingInit->getSourceRange();
+ }
+ }
+ }
+ assert(StructuredList && "Expected a structured initializer list");
+ }
+
+ if (D->isFieldDesignator()) {
+ // C99 6.7.8p7:
+ //
+ // If a designator has the form
+ //
+ // . identifier
+ //
+ // then the current object (defined below) shall have
+ // structure or union type and the identifier shall be the
+ // name of a member of that type.
+ const RecordType *RT = CurrentObjectType->getAs<RecordType>();
+ if (!RT) {
+ SourceLocation Loc = D->getDotLoc();
+ if (Loc.isInvalid())
+ Loc = D->getFieldLoc();
+ if (!VerifyOnly)
+ SemaRef.Diag(Loc, diag::err_field_designator_non_aggr)
+ << SemaRef.getLangOpts().CPlusPlus << CurrentObjectType;
+ ++Index;
+ return true;
+ }
+
+ FieldDecl *KnownField = D->getField();
+ if (!KnownField) {
+ IdentifierInfo *FieldName = D->getFieldName();
+ DeclContext::lookup_result Lookup = RT->getDecl()->lookup(FieldName);
+ for (NamedDecl *ND : Lookup) {
+ if (auto *FD = dyn_cast<FieldDecl>(ND)) {
+ KnownField = FD;
+ break;
+ }
+ if (auto *IFD = dyn_cast<IndirectFieldDecl>(ND)) {
+ // In verify mode, don't modify the original.
+ if (VerifyOnly)
+ DIE = CloneDesignatedInitExpr(SemaRef, DIE);
+ ExpandAnonymousFieldDesignator(SemaRef, DIE, DesigIdx, IFD);
+ D = DIE->getDesignator(DesigIdx);
+ KnownField = cast<FieldDecl>(*IFD->chain_begin());
+ break;
+ }
+ }
+ if (!KnownField) {
+ if (VerifyOnly) {
+ ++Index;
+ return true; // No typo correction when just trying this out.
+ }
+
+ // Name lookup found something, but it wasn't a field.
+ if (!Lookup.empty()) {
+ SemaRef.Diag(D->getFieldLoc(), diag::err_field_designator_nonfield)
+ << FieldName;
+ SemaRef.Diag(Lookup.front()->getLocation(),
+ diag::note_field_designator_found);
+ ++Index;
+ return true;
+ }
+
+ // Name lookup didn't find anything.
+ // Determine whether this was a typo for another field name.
+ if (TypoCorrection Corrected = SemaRef.CorrectTypo(
+ DeclarationNameInfo(FieldName, D->getFieldLoc()),
+ Sema::LookupMemberName, /*Scope=*/nullptr, /*SS=*/nullptr,
+ llvm::make_unique<FieldInitializerValidatorCCC>(RT->getDecl()),
+ Sema::CTK_ErrorRecovery, RT->getDecl())) {
+ SemaRef.diagnoseTypo(
+ Corrected,
+ SemaRef.PDiag(diag::err_field_designator_unknown_suggest)
+ << FieldName << CurrentObjectType);
+ KnownField = Corrected.getCorrectionDeclAs<FieldDecl>();
+ hadError = true;
+ } else {
+ // Typo correction didn't find anything.
+ SemaRef.Diag(D->getFieldLoc(), diag::err_field_designator_unknown)
+ << FieldName << CurrentObjectType;
+ ++Index;
+ return true;
+ }
+ }
+ }
+
+ unsigned FieldIndex = 0;
+ for (auto *FI : RT->getDecl()->fields()) {
+ if (FI->isUnnamedBitfield())
+ continue;
+ if (KnownField == FI)
+ break;
+ ++FieldIndex;
+ }
+
+ RecordDecl::field_iterator Field =
+ RecordDecl::field_iterator(DeclContext::decl_iterator(KnownField));
+
+ // All of the fields of a union are located at the same place in
+ // the initializer list.
+ if (RT->getDecl()->isUnion()) {
+ FieldIndex = 0;
+ if (!VerifyOnly) {
+ FieldDecl *CurrentField = StructuredList->getInitializedFieldInUnion();
+ if (CurrentField && CurrentField != *Field) {
+ assert(StructuredList->getNumInits() == 1
+ && "A union should never have more than one initializer!");
+
+ // we're about to throw away an initializer, emit warning
+ SemaRef.Diag(D->getFieldLoc(),
+ diag::warn_initializer_overrides)
+ << D->getSourceRange();
+ Expr *ExistingInit = StructuredList->getInit(0);
+ SemaRef.Diag(ExistingInit->getLocStart(),
+ diag::note_previous_initializer)
+ << /*FIXME:has side effects=*/0
+ << ExistingInit->getSourceRange();
+
+ // remove existing initializer
+ StructuredList->resizeInits(SemaRef.Context, 0);
+ StructuredList->setInitializedFieldInUnion(nullptr);
+ }
+
+ StructuredList->setInitializedFieldInUnion(*Field);
+ }
+ }
+
+ // Make sure we can use this declaration.
+ bool InvalidUse;
+ if (VerifyOnly)
+ InvalidUse = !SemaRef.CanUseDecl(*Field);
+ else
+ InvalidUse = SemaRef.DiagnoseUseOfDecl(*Field, D->getFieldLoc());
+ if (InvalidUse) {
+ ++Index;
+ return true;
+ }
+
+ if (!VerifyOnly) {
+ // Update the designator with the field declaration.
+ D->setField(*Field);
+
+ // Make sure that our non-designated initializer list has space
+ // for a subobject corresponding to this field.
+ if (FieldIndex >= StructuredList->getNumInits())
+ StructuredList->resizeInits(SemaRef.Context, FieldIndex + 1);
+ }
+
+ // This designator names a flexible array member.
+ if (Field->getType()->isIncompleteArrayType()) {
+ bool Invalid = false;
+ if ((DesigIdx + 1) != DIE->size()) {
+ // We can't designate an object within the flexible array
+ // member (because GCC doesn't allow it).
+ if (!VerifyOnly) {
+ DesignatedInitExpr::Designator *NextD
+ = DIE->getDesignator(DesigIdx + 1);
+ SemaRef.Diag(NextD->getLocStart(),
+ diag::err_designator_into_flexible_array_member)
+ << SourceRange(NextD->getLocStart(),
+ DIE->getLocEnd());
+ SemaRef.Diag(Field->getLocation(), diag::note_flexible_array_member)
+ << *Field;
+ }
+ Invalid = true;
+ }
+
+ if (!hadError && !isa<InitListExpr>(DIE->getInit()) &&
+ !isa<StringLiteral>(DIE->getInit())) {
+ // The initializer is not an initializer list.
+ if (!VerifyOnly) {
+ SemaRef.Diag(DIE->getInit()->getLocStart(),
+ diag::err_flexible_array_init_needs_braces)
+ << DIE->getInit()->getSourceRange();
+ SemaRef.Diag(Field->getLocation(), diag::note_flexible_array_member)
+ << *Field;
+ }
+ Invalid = true;
+ }
+
+ // Check GNU flexible array initializer.
+ if (!Invalid && CheckFlexibleArrayInit(Entity, DIE->getInit(), *Field,
+ TopLevelObject))
+ Invalid = true;
+
+ if (Invalid) {
+ ++Index;
+ return true;
+ }
+
+ // Initialize the array.
+ bool prevHadError = hadError;
+ unsigned newStructuredIndex = FieldIndex;
+ unsigned OldIndex = Index;
+ IList->setInit(Index, DIE->getInit());
+
+ InitializedEntity MemberEntity =
+ InitializedEntity::InitializeMember(*Field, &Entity);
+ CheckSubElementType(MemberEntity, IList, Field->getType(), Index,
+ StructuredList, newStructuredIndex);
+
+ IList->setInit(OldIndex, DIE);
+ if (hadError && !prevHadError) {
+ ++Field;
+ ++FieldIndex;
+ if (NextField)
+ *NextField = Field;
+ StructuredIndex = FieldIndex;
+ return true;
+ }
+ } else {
+ // Recurse to check later designated subobjects.
+ QualType FieldType = Field->getType();
+ unsigned newStructuredIndex = FieldIndex;
+
+ InitializedEntity MemberEntity =
+ InitializedEntity::InitializeMember(*Field, &Entity);
+ if (CheckDesignatedInitializer(MemberEntity, IList, DIE, DesigIdx + 1,
+ FieldType, nullptr, nullptr, Index,
+ StructuredList, newStructuredIndex,
+ true, false))
+ return true;
+ }
+
+ // Find the position of the next field to be initialized in this
+ // subobject.
+ ++Field;
+ ++FieldIndex;
+
+ // If this the first designator, our caller will continue checking
+ // the rest of this struct/class/union subobject.
+ if (IsFirstDesignator) {
+ if (NextField)
+ *NextField = Field;
+ StructuredIndex = FieldIndex;
+ return false;
+ }
+
+ if (!FinishSubobjectInit)
+ return false;
+
+ // We've already initialized something in the union; we're done.
+ if (RT->getDecl()->isUnion())
+ return hadError;
+
+ // Check the remaining fields within this class/struct/union subobject.
+ bool prevHadError = hadError;
+
+ CheckStructUnionTypes(Entity, IList, CurrentObjectType, Field, false, Index,
+ StructuredList, FieldIndex);
+ return hadError && !prevHadError;
+ }
+
+ // C99 6.7.8p6:
+ //
+ // If a designator has the form
+ //
+ // [ constant-expression ]
+ //
+ // then the current object (defined below) shall have array
+ // type and the expression shall be an integer constant
+ // expression. If the array is of unknown size, any
+ // nonnegative value is valid.
+ //
+ // Additionally, cope with the GNU extension that permits
+ // designators of the form
+ //
+ // [ constant-expression ... constant-expression ]
+ const ArrayType *AT = SemaRef.Context.getAsArrayType(CurrentObjectType);
+ if (!AT) {
+ if (!VerifyOnly)
+ SemaRef.Diag(D->getLBracketLoc(), diag::err_array_designator_non_array)
+ << CurrentObjectType;
+ ++Index;
+ return true;
+ }
+
+ Expr *IndexExpr = nullptr;
+ llvm::APSInt DesignatedStartIndex, DesignatedEndIndex;
+ if (D->isArrayDesignator()) {
+ IndexExpr = DIE->getArrayIndex(*D);
+ DesignatedStartIndex = IndexExpr->EvaluateKnownConstInt(SemaRef.Context);
+ DesignatedEndIndex = DesignatedStartIndex;
+ } else {
+ assert(D->isArrayRangeDesignator() && "Need array-range designator");
+
+ DesignatedStartIndex =
+ DIE->getArrayRangeStart(*D)->EvaluateKnownConstInt(SemaRef.Context);
+ DesignatedEndIndex =
+ DIE->getArrayRangeEnd(*D)->EvaluateKnownConstInt(SemaRef.Context);
+ IndexExpr = DIE->getArrayRangeEnd(*D);
+
+ // Codegen can't handle evaluating array range designators that have side
+ // effects, because we replicate the AST value for each initialized element.
+ // As such, set the sawArrayRangeDesignator() bit if we initialize multiple
+ // elements with something that has a side effect, so codegen can emit an
+ // "error unsupported" error instead of miscompiling the app.
+ if (DesignatedStartIndex.getZExtValue()!=DesignatedEndIndex.getZExtValue()&&
+ DIE->getInit()->HasSideEffects(SemaRef.Context) && !VerifyOnly)
+ FullyStructuredList->sawArrayRangeDesignator();
+ }
+
+ if (isa<ConstantArrayType>(AT)) {
+ llvm::APSInt MaxElements(cast<ConstantArrayType>(AT)->getSize(), false);
+ DesignatedStartIndex
+ = DesignatedStartIndex.extOrTrunc(MaxElements.getBitWidth());
+ DesignatedStartIndex.setIsUnsigned(MaxElements.isUnsigned());
+ DesignatedEndIndex
+ = DesignatedEndIndex.extOrTrunc(MaxElements.getBitWidth());
+ DesignatedEndIndex.setIsUnsigned(MaxElements.isUnsigned());
+ if (DesignatedEndIndex >= MaxElements) {
+ if (!VerifyOnly)
+ SemaRef.Diag(IndexExpr->getLocStart(),
+ diag::err_array_designator_too_large)
+ << DesignatedEndIndex.toString(10) << MaxElements.toString(10)
+ << IndexExpr->getSourceRange();
+ ++Index;
+ return true;
+ }
+ } else {
+ unsigned DesignatedIndexBitWidth =
+ ConstantArrayType::getMaxSizeBits(SemaRef.Context);
+ DesignatedStartIndex =
+ DesignatedStartIndex.extOrTrunc(DesignatedIndexBitWidth);
+ DesignatedEndIndex =
+ DesignatedEndIndex.extOrTrunc(DesignatedIndexBitWidth);
+ DesignatedStartIndex.setIsUnsigned(true);
+ DesignatedEndIndex.setIsUnsigned(true);
+ }
+
+ if (!VerifyOnly && StructuredList->isStringLiteralInit()) {
+ // We're modifying a string literal init; we have to decompose the string
+ // so we can modify the individual characters.
+ ASTContext &Context = SemaRef.Context;
+ Expr *SubExpr = StructuredList->getInit(0)->IgnoreParens();
+
+ // Compute the character type
+ QualType CharTy = AT->getElementType();
+
+ // Compute the type of the integer literals.
+ QualType PromotedCharTy = CharTy;
+ if (CharTy->isPromotableIntegerType())
+ PromotedCharTy = Context.getPromotedIntegerType(CharTy);
+ unsigned PromotedCharTyWidth = Context.getTypeSize(PromotedCharTy);
+
+ if (StringLiteral *SL = dyn_cast<StringLiteral>(SubExpr)) {
+ // Get the length of the string.
+ uint64_t StrLen = SL->getLength();
+ if (cast<ConstantArrayType>(AT)->getSize().ult(StrLen))
+ StrLen = cast<ConstantArrayType>(AT)->getSize().getZExtValue();
+ StructuredList->resizeInits(Context, StrLen);
+
+ // Build a literal for each character in the string, and put them into
+ // the init list.
+ for (unsigned i = 0, e = StrLen; i != e; ++i) {
+ llvm::APInt CodeUnit(PromotedCharTyWidth, SL->getCodeUnit(i));
+ Expr *Init = new (Context) IntegerLiteral(
+ Context, CodeUnit, PromotedCharTy, SubExpr->getExprLoc());
+ if (CharTy != PromotedCharTy)
+ Init = ImplicitCastExpr::Create(Context, CharTy, CK_IntegralCast,
+ Init, nullptr, VK_RValue);
+ StructuredList->updateInit(Context, i, Init);
+ }
+ } else {
+ ObjCEncodeExpr *E = cast<ObjCEncodeExpr>(SubExpr);
+ std::string Str;
+ Context.getObjCEncodingForType(E->getEncodedType(), Str);
+
+ // Get the length of the string.
+ uint64_t StrLen = Str.size();
+ if (cast<ConstantArrayType>(AT)->getSize().ult(StrLen))
+ StrLen = cast<ConstantArrayType>(AT)->getSize().getZExtValue();
+ StructuredList->resizeInits(Context, StrLen);
+
+ // Build a literal for each character in the string, and put them into
+ // the init list.
+ for (unsigned i = 0, e = StrLen; i != e; ++i) {
+ llvm::APInt CodeUnit(PromotedCharTyWidth, Str[i]);
+ Expr *Init = new (Context) IntegerLiteral(
+ Context, CodeUnit, PromotedCharTy, SubExpr->getExprLoc());
+ if (CharTy != PromotedCharTy)
+ Init = ImplicitCastExpr::Create(Context, CharTy, CK_IntegralCast,
+ Init, nullptr, VK_RValue);
+ StructuredList->updateInit(Context, i, Init);
+ }
+ }
+ }
+
+ // Make sure that our non-designated initializer list has space
+ // for a subobject corresponding to this array element.
+ if (!VerifyOnly &&
+ DesignatedEndIndex.getZExtValue() >= StructuredList->getNumInits())
+ StructuredList->resizeInits(SemaRef.Context,
+ DesignatedEndIndex.getZExtValue() + 1);
+
+ // Repeatedly perform subobject initializations in the range
+ // [DesignatedStartIndex, DesignatedEndIndex].
+
+ // Move to the next designator
+ unsigned ElementIndex = DesignatedStartIndex.getZExtValue();
+ unsigned OldIndex = Index;
+
+ InitializedEntity ElementEntity =
+ InitializedEntity::InitializeElement(SemaRef.Context, 0, Entity);
+
+ while (DesignatedStartIndex <= DesignatedEndIndex) {
+ // Recurse to check later designated subobjects.
+ QualType ElementType = AT->getElementType();
+ Index = OldIndex;
+
+ ElementEntity.setElementIndex(ElementIndex);
+ if (CheckDesignatedInitializer(ElementEntity, IList, DIE, DesigIdx + 1,
+ ElementType, nullptr, nullptr, Index,
+ StructuredList, ElementIndex,
+ (DesignatedStartIndex == DesignatedEndIndex),
+ false))
+ return true;
+
+ // Move to the next index in the array that we'll be initializing.
+ ++DesignatedStartIndex;
+ ElementIndex = DesignatedStartIndex.getZExtValue();
+ }
+
+ // If this the first designator, our caller will continue checking
+ // the rest of this array subobject.
+ if (IsFirstDesignator) {
+ if (NextElementIndex)
+ *NextElementIndex = DesignatedStartIndex;
+ StructuredIndex = ElementIndex;
+ return false;
+ }
+
+ if (!FinishSubobjectInit)
+ return false;
+
+ // Check the remaining elements within this array subobject.
+ bool prevHadError = hadError;
+ CheckArrayType(Entity, IList, CurrentObjectType, DesignatedStartIndex,
+ /*SubobjectIsDesignatorContext=*/false, Index,
+ StructuredList, ElementIndex);
+ return hadError && !prevHadError;
+}
+
+// Get the structured initializer list for a subobject of type
+// @p CurrentObjectType.
+InitListExpr *
+InitListChecker::getStructuredSubobjectInit(InitListExpr *IList, unsigned Index,
+ QualType CurrentObjectType,
+ InitListExpr *StructuredList,
+ unsigned StructuredIndex,
+ SourceRange InitRange,
+ bool IsFullyOverwritten) {
+ if (VerifyOnly)
+ return nullptr; // No structured list in verification-only mode.
+ Expr *ExistingInit = nullptr;
+ if (!StructuredList)
+ ExistingInit = SyntacticToSemantic.lookup(IList);
+ else if (StructuredIndex < StructuredList->getNumInits())
+ ExistingInit = StructuredList->getInit(StructuredIndex);
+
+ if (InitListExpr *Result = dyn_cast_or_null<InitListExpr>(ExistingInit))
+ // There might have already been initializers for subobjects of the current
+ // object, but a subsequent initializer list will overwrite the entirety
+ // of the current object. (See DR 253 and C99 6.7.8p21). e.g.,
+ //
+ // struct P { char x[6]; };
+ // struct P l = { .x[2] = 'x', .x = { [0] = 'f' } };
+ //
+ // The first designated initializer is ignored, and l.x is just "f".
+ if (!IsFullyOverwritten)
+ return Result;
+
+ if (ExistingInit) {
+ // We are creating an initializer list that initializes the
+ // subobjects of the current object, but there was already an
+ // initialization that completely initialized the current
+ // subobject, e.g., by a compound literal:
+ //
+ // struct X { int a, b; };
+ // struct X xs[] = { [0] = (struct X) { 1, 2 }, [0].b = 3 };
+ //
+ // Here, xs[0].a == 0 and xs[0].b == 3, since the second,
+ // designated initializer re-initializes the whole
+ // subobject [0], overwriting previous initializers.
+ SemaRef.Diag(InitRange.getBegin(),
+ diag::warn_subobject_initializer_overrides)
+ << InitRange;
+ SemaRef.Diag(ExistingInit->getLocStart(),
+ diag::note_previous_initializer)
+ << /*FIXME:has side effects=*/0
+ << ExistingInit->getSourceRange();
+ }
+
+ InitListExpr *Result
+ = new (SemaRef.Context) InitListExpr(SemaRef.Context,
+ InitRange.getBegin(), None,
+ InitRange.getEnd());
+
+ QualType ResultType = CurrentObjectType;
+ if (!ResultType->isArrayType())
+ ResultType = ResultType.getNonLValueExprType(SemaRef.Context);
+ Result->setType(ResultType);
+
+ // Pre-allocate storage for the structured initializer list.
+ unsigned NumElements = 0;
+ unsigned NumInits = 0;
+ bool GotNumInits = false;
+ if (!StructuredList) {
+ NumInits = IList->getNumInits();
+ GotNumInits = true;
+ } else if (Index < IList->getNumInits()) {
+ if (InitListExpr *SubList = dyn_cast<InitListExpr>(IList->getInit(Index))) {
+ NumInits = SubList->getNumInits();
+ GotNumInits = true;
+ }
+ }
+
+ if (const ArrayType *AType
+ = SemaRef.Context.getAsArrayType(CurrentObjectType)) {
+ if (const ConstantArrayType *CAType = dyn_cast<ConstantArrayType>(AType)) {
+ NumElements = CAType->getSize().getZExtValue();
+ // Simple heuristic so that we don't allocate a very large
+ // initializer with many empty entries at the end.
+ if (GotNumInits && NumElements > NumInits)
+ NumElements = 0;
+ }
+ } else if (const VectorType *VType = CurrentObjectType->getAs<VectorType>())
+ NumElements = VType->getNumElements();
+ else if (const RecordType *RType = CurrentObjectType->getAs<RecordType>()) {
+ RecordDecl *RDecl = RType->getDecl();
+ if (RDecl->isUnion())
+ NumElements = 1;
+ else
+ NumElements = std::distance(RDecl->field_begin(), RDecl->field_end());
+ }
+
+ Result->reserveInits(SemaRef.Context, NumElements);
+
+ // Link this new initializer list into the structured initializer
+ // lists.
+ if (StructuredList)
+ StructuredList->updateInit(SemaRef.Context, StructuredIndex, Result);
+ else {
+ Result->setSyntacticForm(IList);
+ SyntacticToSemantic[IList] = Result;
+ }
+
+ return Result;
+}
+
+/// Update the initializer at index @p StructuredIndex within the
+/// structured initializer list to the value @p expr.
+void InitListChecker::UpdateStructuredListElement(InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ Expr *expr) {
+ // No structured initializer list to update
+ if (!StructuredList)
+ return;
+
+ if (Expr *PrevInit = StructuredList->updateInit(SemaRef.Context,
+ StructuredIndex, expr)) {
+ // This initializer overwrites a previous initializer. Warn.
+ // We need to check on source range validity because the previous
+ // initializer does not have to be an explicit initializer.
+ // struct P { int a, b; };
+ // struct PP { struct P p } l = { { .a = 2 }, .p.b = 3 };
+ // There is an overwrite taking place because the first braced initializer
+ // list "{ .a = 2 }' already provides value for .p.b (which is zero).
+ if (PrevInit->getSourceRange().isValid()) {
+ SemaRef.Diag(expr->getLocStart(),
+ diag::warn_initializer_overrides)
+ << expr->getSourceRange();
+
+ SemaRef.Diag(PrevInit->getLocStart(),
+ diag::note_previous_initializer)
+ << /*FIXME:has side effects=*/0
+ << PrevInit->getSourceRange();
+ }
+ }
+
+ ++StructuredIndex;
+}
+
+/// Check that the given Index expression is a valid array designator
+/// value. This is essentially just a wrapper around
+/// VerifyIntegerConstantExpression that also checks for negative values
+/// and produces a reasonable diagnostic if there is a
+/// failure. Returns the index expression, possibly with an implicit cast
+/// added, on success. If everything went okay, Value will receive the
+/// value of the constant expression.
+static ExprResult
+CheckArrayDesignatorExpr(Sema &S, Expr *Index, llvm::APSInt &Value) {
+ SourceLocation Loc = Index->getLocStart();
+
+ // Make sure this is an integer constant expression.
+ ExprResult Result = S.VerifyIntegerConstantExpression(Index, &Value);
+ if (Result.isInvalid())
+ return Result;
+
+ if (Value.isSigned() && Value.isNegative())
+ return S.Diag(Loc, diag::err_array_designator_negative)
+ << Value.toString(10) << Index->getSourceRange();
+
+ Value.setIsUnsigned(true);
+ return Result;
+}
+
+ExprResult Sema::ActOnDesignatedInitializer(Designation &Desig,
+ SourceLocation Loc,
+ bool GNUSyntax,
+ ExprResult Init) {
+ typedef DesignatedInitExpr::Designator ASTDesignator;
+
+ bool Invalid = false;
+ SmallVector<ASTDesignator, 32> Designators;
+ SmallVector<Expr *, 32> InitExpressions;
+
+ // Build designators and check array designator expressions.
+ for (unsigned Idx = 0; Idx < Desig.getNumDesignators(); ++Idx) {
+ const Designator &D = Desig.getDesignator(Idx);
+ switch (D.getKind()) {
+ case Designator::FieldDesignator:
+ Designators.push_back(ASTDesignator(D.getField(), D.getDotLoc(),
+ D.getFieldLoc()));
+ break;
+
+ case Designator::ArrayDesignator: {
+ Expr *Index = static_cast<Expr *>(D.getArrayIndex());
+ llvm::APSInt IndexValue;
+ if (!Index->isTypeDependent() && !Index->isValueDependent())
+ Index = CheckArrayDesignatorExpr(*this, Index, IndexValue).get();
+ if (!Index)
+ Invalid = true;
+ else {
+ Designators.push_back(ASTDesignator(InitExpressions.size(),
+ D.getLBracketLoc(),
+ D.getRBracketLoc()));
+ InitExpressions.push_back(Index);
+ }
+ break;
+ }
+
+ case Designator::ArrayRangeDesignator: {
+ Expr *StartIndex = static_cast<Expr *>(D.getArrayRangeStart());
+ Expr *EndIndex = static_cast<Expr *>(D.getArrayRangeEnd());
+ llvm::APSInt StartValue;
+ llvm::APSInt EndValue;
+ bool StartDependent = StartIndex->isTypeDependent() ||
+ StartIndex->isValueDependent();
+ bool EndDependent = EndIndex->isTypeDependent() ||
+ EndIndex->isValueDependent();
+ if (!StartDependent)
+ StartIndex =
+ CheckArrayDesignatorExpr(*this, StartIndex, StartValue).get();
+ if (!EndDependent)
+ EndIndex = CheckArrayDesignatorExpr(*this, EndIndex, EndValue).get();
+
+ if (!StartIndex || !EndIndex)
+ Invalid = true;
+ else {
+ // Make sure we're comparing values with the same bit width.
+ if (StartDependent || EndDependent) {
+ // Nothing to compute.
+ } else if (StartValue.getBitWidth() > EndValue.getBitWidth())
+ EndValue = EndValue.extend(StartValue.getBitWidth());
+ else if (StartValue.getBitWidth() < EndValue.getBitWidth())
+ StartValue = StartValue.extend(EndValue.getBitWidth());
+
+ if (!StartDependent && !EndDependent && EndValue < StartValue) {
+ Diag(D.getEllipsisLoc(), diag::err_array_designator_empty_range)
+ << StartValue.toString(10) << EndValue.toString(10)
+ << StartIndex->getSourceRange() << EndIndex->getSourceRange();
+ Invalid = true;
+ } else {
+ Designators.push_back(ASTDesignator(InitExpressions.size(),
+ D.getLBracketLoc(),
+ D.getEllipsisLoc(),
+ D.getRBracketLoc()));
+ InitExpressions.push_back(StartIndex);
+ InitExpressions.push_back(EndIndex);
+ }
+ }
+ break;
+ }
+ }
+ }
+
+ if (Invalid || Init.isInvalid())
+ return ExprError();
+
+ // Clear out the expressions within the designation.
+ Desig.ClearExprs(*this);
+
+ DesignatedInitExpr *DIE
+ = DesignatedInitExpr::Create(Context,
+ Designators.data(), Designators.size(),
+ InitExpressions, Loc, GNUSyntax,
+ Init.getAs<Expr>());
+
+ if (!getLangOpts().C99)
+ Diag(DIE->getLocStart(), diag::ext_designated_init)
+ << DIE->getSourceRange();
+
+ return DIE;
+}
+
+//===----------------------------------------------------------------------===//
+// Initialization entity
+//===----------------------------------------------------------------------===//
+
+InitializedEntity::InitializedEntity(ASTContext &Context, unsigned Index,
+ const InitializedEntity &Parent)
+ : Parent(&Parent), Index(Index)
+{
+ if (const ArrayType *AT = Context.getAsArrayType(Parent.getType())) {
+ Kind = EK_ArrayElement;
+ Type = AT->getElementType();
+ } else if (const VectorType *VT = Parent.getType()->getAs<VectorType>()) {
+ Kind = EK_VectorElement;
+ Type = VT->getElementType();
+ } else {
+ const ComplexType *CT = Parent.getType()->getAs<ComplexType>();
+ assert(CT && "Unexpected type");
+ Kind = EK_ComplexElement;
+ Type = CT->getElementType();
+ }
+}
+
+InitializedEntity
+InitializedEntity::InitializeBase(ASTContext &Context,
+ const CXXBaseSpecifier *Base,
+ bool IsInheritedVirtualBase) {
+ InitializedEntity Result;
+ Result.Kind = EK_Base;
+ Result.Parent = nullptr;
+ Result.Base = reinterpret_cast<uintptr_t>(Base);
+ if (IsInheritedVirtualBase)
+ Result.Base |= 0x01;
+
+ Result.Type = Base->getType();
+ return Result;
+}
+
+DeclarationName InitializedEntity::getName() const {
+ switch (getKind()) {
+ case EK_Parameter:
+ case EK_Parameter_CF_Audited: {
+ ParmVarDecl *D = reinterpret_cast<ParmVarDecl*>(Parameter & ~0x1);
+ return (D ? D->getDeclName() : DeclarationName());
+ }
+
+ case EK_Variable:
+ case EK_Member:
+ return VariableOrMember->getDeclName();
+
+ case EK_LambdaCapture:
+ return DeclarationName(Capture.VarID);
+
+ case EK_Result:
+ case EK_Exception:
+ case EK_New:
+ case EK_Temporary:
+ case EK_Base:
+ case EK_Delegating:
+ case EK_ArrayElement:
+ case EK_VectorElement:
+ case EK_ComplexElement:
+ case EK_BlockElement:
+ case EK_CompoundLiteralInit:
+ case EK_RelatedResult:
+ return DeclarationName();
+ }
+
+ llvm_unreachable("Invalid EntityKind!");
+}
+
+DeclaratorDecl *InitializedEntity::getDecl() const {
+ switch (getKind()) {
+ case EK_Variable:
+ case EK_Member:
+ return VariableOrMember;
+
+ case EK_Parameter:
+ case EK_Parameter_CF_Audited:
+ return reinterpret_cast<ParmVarDecl*>(Parameter & ~0x1);
+
+ case EK_Result:
+ case EK_Exception:
+ case EK_New:
+ case EK_Temporary:
+ case EK_Base:
+ case EK_Delegating:
+ case EK_ArrayElement:
+ case EK_VectorElement:
+ case EK_ComplexElement:
+ case EK_BlockElement:
+ case EK_LambdaCapture:
+ case EK_CompoundLiteralInit:
+ case EK_RelatedResult:
+ return nullptr;
+ }
+
+ llvm_unreachable("Invalid EntityKind!");
+}
+
+bool InitializedEntity::allowsNRVO() const {
+ switch (getKind()) {
+ case EK_Result:
+ case EK_Exception:
+ return LocAndNRVO.NRVO;
+
+ case EK_Variable:
+ case EK_Parameter:
+ case EK_Parameter_CF_Audited:
+ case EK_Member:
+ case EK_New:
+ case EK_Temporary:
+ case EK_CompoundLiteralInit:
+ case EK_Base:
+ case EK_Delegating:
+ case EK_ArrayElement:
+ case EK_VectorElement:
+ case EK_ComplexElement:
+ case EK_BlockElement:
+ case EK_LambdaCapture:
+ case EK_RelatedResult:
+ break;
+ }
+
+ return false;
+}
+
+unsigned InitializedEntity::dumpImpl(raw_ostream &OS) const {
+ assert(getParent() != this);
+ unsigned Depth = getParent() ? getParent()->dumpImpl(OS) : 0;
+ for (unsigned I = 0; I != Depth; ++I)
+ OS << "`-";
+
+ switch (getKind()) {
+ case EK_Variable: OS << "Variable"; break;
+ case EK_Parameter: OS << "Parameter"; break;
+ case EK_Parameter_CF_Audited: OS << "CF audited function Parameter";
+ break;
+ case EK_Result: OS << "Result"; break;
+ case EK_Exception: OS << "Exception"; break;
+ case EK_Member: OS << "Member"; break;
+ case EK_New: OS << "New"; break;
+ case EK_Temporary: OS << "Temporary"; break;
+ case EK_CompoundLiteralInit: OS << "CompoundLiteral";break;
+ case EK_RelatedResult: OS << "RelatedResult"; break;
+ case EK_Base: OS << "Base"; break;
+ case EK_Delegating: OS << "Delegating"; break;
+ case EK_ArrayElement: OS << "ArrayElement " << Index; break;
+ case EK_VectorElement: OS << "VectorElement " << Index; break;
+ case EK_ComplexElement: OS << "ComplexElement " << Index; break;
+ case EK_BlockElement: OS << "Block"; break;
+ case EK_LambdaCapture:
+ OS << "LambdaCapture ";
+ OS << DeclarationName(Capture.VarID);
+ break;
+ }
+
+ if (Decl *D = getDecl()) {
+ OS << " ";
+ cast<NamedDecl>(D)->printQualifiedName(OS);
+ }
+
+ OS << " '" << getType().getAsString() << "'\n";
+
+ return Depth + 1;
+}
+
+void InitializedEntity::dump() const {
+ dumpImpl(llvm::errs());
+}
+
+//===----------------------------------------------------------------------===//
+// Initialization sequence
+//===----------------------------------------------------------------------===//
+
+void InitializationSequence::Step::Destroy() {
+ switch (Kind) {
+ case SK_ResolveAddressOfOverloadedFunction:
+ case SK_CastDerivedToBaseRValue:
+ case SK_CastDerivedToBaseXValue:
+ case SK_CastDerivedToBaseLValue:
+ case SK_BindReference:
+ case SK_BindReferenceToTemporary:
+ case SK_ExtraneousCopyToTemporary:
+ case SK_UserConversion:
+ case SK_QualificationConversionRValue:
+ case SK_QualificationConversionXValue:
+ case SK_QualificationConversionLValue:
+ case SK_AtomicConversion:
+ case SK_LValueToRValue:
+ case SK_ListInitialization:
+ case SK_UnwrapInitList:
+ case SK_RewrapInitList:
+ case SK_ConstructorInitialization:
+ case SK_ConstructorInitializationFromList:
+ case SK_ZeroInitialization:
+ case SK_CAssignment:
+ case SK_StringInit:
+ case SK_ObjCObjectConversion:
+ case SK_ArrayInit:
+ case SK_ParenthesizedArrayInit:
+ case SK_PassByIndirectCopyRestore:
+ case SK_PassByIndirectRestore:
+ case SK_ProduceObjCObject:
+ case SK_StdInitializerList:
+ case SK_StdInitializerListConstructorCall:
+ case SK_OCLSamplerInit:
+ case SK_OCLZeroEvent:
+ break;
+
+ case SK_ConversionSequence:
+ case SK_ConversionSequenceNoNarrowing:
+ delete ICS;
+ }
+}
+
+bool InitializationSequence::isDirectReferenceBinding() const {
+ return !Steps.empty() && Steps.back().Kind == SK_BindReference;
+}
+
+bool InitializationSequence::isAmbiguous() const {
+ if (!Failed())
+ return false;
+
+ switch (getFailureKind()) {
+ case FK_TooManyInitsForReference:
+ case FK_ArrayNeedsInitList:
+ case FK_ArrayNeedsInitListOrStringLiteral:
+ case FK_ArrayNeedsInitListOrWideStringLiteral:
+ case FK_NarrowStringIntoWideCharArray:
+ case FK_WideStringIntoCharArray:
+ case FK_IncompatWideStringIntoWideChar:
+ case FK_AddressOfOverloadFailed: // FIXME: Could do better
+ case FK_NonConstLValueReferenceBindingToTemporary:
+ case FK_NonConstLValueReferenceBindingToUnrelated:
+ case FK_RValueReferenceBindingToLValue:
+ case FK_ReferenceInitDropsQualifiers:
+ case FK_ReferenceInitFailed:
+ case FK_ConversionFailed:
+ case FK_ConversionFromPropertyFailed:
+ case FK_TooManyInitsForScalar:
+ case FK_ReferenceBindingToInitList:
+ case FK_InitListBadDestinationType:
+ case FK_DefaultInitOfConst:
+ case FK_Incomplete:
+ case FK_ArrayTypeMismatch:
+ case FK_NonConstantArrayInit:
+ case FK_ListInitializationFailed:
+ case FK_VariableLengthArrayHasInitializer:
+ case FK_PlaceholderType:
+ case FK_ExplicitConstructor:
+ case FK_AddressOfUnaddressableFunction:
+ return false;
+
+ case FK_ReferenceInitOverloadFailed:
+ case FK_UserConversionOverloadFailed:
+ case FK_ConstructorOverloadFailed:
+ case FK_ListConstructorOverloadFailed:
+ return FailedOverloadResult == OR_Ambiguous;
+ }
+
+ llvm_unreachable("Invalid EntityKind!");
+}
+
+bool InitializationSequence::isConstructorInitialization() const {
+ return !Steps.empty() && Steps.back().Kind == SK_ConstructorInitialization;
+}
+
+void
+InitializationSequence
+::AddAddressOverloadResolutionStep(FunctionDecl *Function,
+ DeclAccessPair Found,
+ bool HadMultipleCandidates) {
+ Step S;
+ S.Kind = SK_ResolveAddressOfOverloadedFunction;
+ S.Type = Function->getType();
+ S.Function.HadMultipleCandidates = HadMultipleCandidates;
+ S.Function.Function = Function;
+ S.Function.FoundDecl = Found;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddDerivedToBaseCastStep(QualType BaseType,
+ ExprValueKind VK) {
+ Step S;
+ switch (VK) {
+ case VK_RValue: S.Kind = SK_CastDerivedToBaseRValue; break;
+ case VK_XValue: S.Kind = SK_CastDerivedToBaseXValue; break;
+ case VK_LValue: S.Kind = SK_CastDerivedToBaseLValue; break;
+ }
+ S.Type = BaseType;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddReferenceBindingStep(QualType T,
+ bool BindingTemporary) {
+ Step S;
+ S.Kind = BindingTemporary? SK_BindReferenceToTemporary : SK_BindReference;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddExtraneousCopyToTemporary(QualType T) {
+ Step S;
+ S.Kind = SK_ExtraneousCopyToTemporary;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
+void
+InitializationSequence::AddUserConversionStep(FunctionDecl *Function,
+ DeclAccessPair FoundDecl,
+ QualType T,
+ bool HadMultipleCandidates) {
+ Step S;
+ S.Kind = SK_UserConversion;
+ S.Type = T;
+ S.Function.HadMultipleCandidates = HadMultipleCandidates;
+ S.Function.Function = Function;
+ S.Function.FoundDecl = FoundDecl;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddQualificationConversionStep(QualType Ty,
+ ExprValueKind VK) {
+ Step S;
+ S.Kind = SK_QualificationConversionRValue; // work around a gcc warning
+ switch (VK) {
+ case VK_RValue:
+ S.Kind = SK_QualificationConversionRValue;
+ break;
+ case VK_XValue:
+ S.Kind = SK_QualificationConversionXValue;
+ break;
+ case VK_LValue:
+ S.Kind = SK_QualificationConversionLValue;
+ break;
+ }
+ S.Type = Ty;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddAtomicConversionStep(QualType Ty) {
+ Step S;
+ S.Kind = SK_AtomicConversion;
+ S.Type = Ty;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddLValueToRValueStep(QualType Ty) {
+ assert(!Ty.hasQualifiers() && "rvalues may not have qualifiers");
+
+ Step S;
+ S.Kind = SK_LValueToRValue;
+ S.Type = Ty;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddConversionSequenceStep(
+ const ImplicitConversionSequence &ICS, QualType T,
+ bool TopLevelOfInitList) {
+ Step S;
+ S.Kind = TopLevelOfInitList ? SK_ConversionSequenceNoNarrowing
+ : SK_ConversionSequence;
+ S.Type = T;
+ S.ICS = new ImplicitConversionSequence(ICS);
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddListInitializationStep(QualType T) {
+ Step S;
+ S.Kind = SK_ListInitialization;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
+void
+InitializationSequence
+::AddConstructorInitializationStep(CXXConstructorDecl *Constructor,
+ AccessSpecifier Access,
+ QualType T,
+ bool HadMultipleCandidates,
+ bool FromInitList, bool AsInitList) {
+ Step S;
+ S.Kind = FromInitList ? AsInitList ? SK_StdInitializerListConstructorCall
+ : SK_ConstructorInitializationFromList
+ : SK_ConstructorInitialization;
+ S.Type = T;
+ S.Function.HadMultipleCandidates = HadMultipleCandidates;
+ S.Function.Function = Constructor;
+ S.Function.FoundDecl = DeclAccessPair::make(Constructor, Access);
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddZeroInitializationStep(QualType T) {
+ Step S;
+ S.Kind = SK_ZeroInitialization;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddCAssignmentStep(QualType T) {
+ Step S;
+ S.Kind = SK_CAssignment;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddStringInitStep(QualType T) {
+ Step S;
+ S.Kind = SK_StringInit;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddObjCObjectConversionStep(QualType T) {
+ Step S;
+ S.Kind = SK_ObjCObjectConversion;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddArrayInitStep(QualType T) {
+ Step S;
+ S.Kind = SK_ArrayInit;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddParenthesizedArrayInitStep(QualType T) {
+ Step S;
+ S.Kind = SK_ParenthesizedArrayInit;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddPassByIndirectCopyRestoreStep(QualType type,
+ bool shouldCopy) {
+ Step s;
+ s.Kind = (shouldCopy ? SK_PassByIndirectCopyRestore
+ : SK_PassByIndirectRestore);
+ s.Type = type;
+ Steps.push_back(s);
+}
+
+void InitializationSequence::AddProduceObjCObjectStep(QualType T) {
+ Step S;
+ S.Kind = SK_ProduceObjCObject;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddStdInitializerListConstructionStep(QualType T) {
+ Step S;
+ S.Kind = SK_StdInitializerList;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddOCLSamplerInitStep(QualType T) {
+ Step S;
+ S.Kind = SK_OCLSamplerInit;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddOCLZeroEventStep(QualType T) {
+ Step S;
+ S.Kind = SK_OCLZeroEvent;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::RewrapReferenceInitList(QualType T,
+ InitListExpr *Syntactic) {
+ assert(Syntactic->getNumInits() == 1 &&
+ "Can only rewrap trivial init lists.");
+ Step S;
+ S.Kind = SK_UnwrapInitList;
+ S.Type = Syntactic->getInit(0)->getType();
+ Steps.insert(Steps.begin(), S);
+
+ S.Kind = SK_RewrapInitList;
+ S.Type = T;
+ S.WrappingSyntacticList = Syntactic;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::SetOverloadFailure(FailureKind Failure,
+ OverloadingResult Result) {
+ setSequenceKind(FailedSequence);
+ this->Failure = Failure;
+ this->FailedOverloadResult = Result;
+}
+
+//===----------------------------------------------------------------------===//
+// Attempt initialization
+//===----------------------------------------------------------------------===//
+
+/// Tries to add a zero initializer. Returns true if that worked.
+static bool
+maybeRecoverWithZeroInitialization(Sema &S, InitializationSequence &Sequence,
+ const InitializedEntity &Entity) {
+ if (Entity.getKind() != InitializedEntity::EK_Variable)
+ return false;
+
+ VarDecl *VD = cast<VarDecl>(Entity.getDecl());
+ if (VD->getInit() || VD->getLocEnd().isMacroID())
+ return false;
+
+ QualType VariableTy = VD->getType().getCanonicalType();
+ SourceLocation Loc = S.getLocForEndOfToken(VD->getLocEnd());
+ std::string Init = S.getFixItZeroInitializerForType(VariableTy, Loc);
+ if (!Init.empty()) {
+ Sequence.AddZeroInitializationStep(Entity.getType());
+ Sequence.SetZeroInitializationFixit(Init, Loc);
+ return true;
+ }
+ return false;
+}
+
+static void MaybeProduceObjCObject(Sema &S,
+ InitializationSequence &Sequence,
+ const InitializedEntity &Entity) {
+ if (!S.getLangOpts().ObjCAutoRefCount) return;
+
+ /// When initializing a parameter, produce the value if it's marked
+ /// __attribute__((ns_consumed)).
+ if (Entity.isParameterKind()) {
+ if (!Entity.isParameterConsumed())
+ return;
+
+ assert(Entity.getType()->isObjCRetainableType() &&
+ "consuming an object of unretainable type?");
+ Sequence.AddProduceObjCObjectStep(Entity.getType());
+
+ /// When initializing a return value, if the return type is a
+ /// retainable type, then returns need to immediately retain the
+ /// object. If an autorelease is required, it will be done at the
+ /// last instant.
+ } else if (Entity.getKind() == InitializedEntity::EK_Result) {
+ if (!Entity.getType()->isObjCRetainableType())
+ return;
+
+ Sequence.AddProduceObjCObjectStep(Entity.getType());
+ }
+}
+
+static void TryListInitialization(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ InitListExpr *InitList,
+ InitializationSequence &Sequence);
+
+/// \brief When initializing from init list via constructor, handle
+/// initialization of an object of type std::initializer_list<T>.
+///
+/// \return true if we have handled initialization of an object of type
+/// std::initializer_list<T>, false otherwise.
+static bool TryInitializerListConstruction(Sema &S,
+ InitListExpr *List,
+ QualType DestType,
+ InitializationSequence &Sequence) {
+ QualType E;
+ if (!S.isStdInitializerList(DestType, &E))
+ return false;
+
+ if (!S.isCompleteType(List->getExprLoc(), E)) {
+ Sequence.setIncompleteTypeFailure(E);
+ return true;
+ }
+
+ // Try initializing a temporary array from the init list.
+ QualType ArrayType = S.Context.getConstantArrayType(
+ E.withConst(), llvm::APInt(S.Context.getTypeSize(S.Context.getSizeType()),
+ List->getNumInits()),
+ clang::ArrayType::Normal, 0);
+ InitializedEntity HiddenArray =
+ InitializedEntity::InitializeTemporary(ArrayType);
+ InitializationKind Kind =
+ InitializationKind::CreateDirectList(List->getExprLoc());
+ TryListInitialization(S, HiddenArray, Kind, List, Sequence);
+ if (Sequence)
+ Sequence.AddStdInitializerListConstructionStep(DestType);
+ return true;
+}
+
+static OverloadingResult
+ResolveConstructorOverload(Sema &S, SourceLocation DeclLoc,
+ MultiExprArg Args,
+ OverloadCandidateSet &CandidateSet,
+ DeclContext::lookup_result Ctors,
+ OverloadCandidateSet::iterator &Best,
+ bool CopyInitializing, bool AllowExplicit,
+ bool OnlyListConstructors, bool IsListInit) {
+ CandidateSet.clear();
+
+ for (NamedDecl *D : Ctors) {
+ DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess());
+ bool SuppressUserConversions = false;
+
+ // Find the constructor (which may be a template).
+ CXXConstructorDecl *Constructor = nullptr;
+ FunctionTemplateDecl *ConstructorTmpl = dyn_cast<FunctionTemplateDecl>(D);
+ if (ConstructorTmpl)
+ Constructor = cast<CXXConstructorDecl>(
+ ConstructorTmpl->getTemplatedDecl());
+ else {
+ Constructor = cast<CXXConstructorDecl>(D);
+
+ // C++11 [over.best.ics]p4:
+ // ... and the constructor or user-defined conversion function is a
+ // candidate by
+ // - 13.3.1.3, when the argument is the temporary in the second step
+ // of a class copy-initialization, or
+ // - 13.3.1.4, 13.3.1.5, or 13.3.1.6 (in all cases),
+ // user-defined conversion sequences are not considered.
+ // FIXME: This breaks backward compatibility, e.g. PR12117. As a
+ // temporary fix, let's re-instate the third bullet above until
+ // there is a resolution in the standard, i.e.,
+ // - 13.3.1.7 when the initializer list has exactly one element that is
+ // itself an initializer list and a conversion to some class X or
+ // reference to (possibly cv-qualified) X is considered for the first
+ // parameter of a constructor of X.
+ if ((CopyInitializing ||
+ (IsListInit && Args.size() == 1 && isa<InitListExpr>(Args[0]))) &&
+ Constructor->isCopyOrMoveConstructor())
+ SuppressUserConversions = true;
+ }
+
+ if (!Constructor->isInvalidDecl() &&
+ (AllowExplicit || !Constructor->isExplicit()) &&
+ (!OnlyListConstructors || S.isInitListConstructor(Constructor))) {
+ if (ConstructorTmpl)
+ S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl,
+ /*ExplicitArgs*/ nullptr, Args,
+ CandidateSet, SuppressUserConversions);
+ else {
+ // C++ [over.match.copy]p1:
+ // - When initializing a temporary to be bound to the first parameter
+ // of a constructor that takes a reference to possibly cv-qualified
+ // T as its first argument, called with a single argument in the
+ // context of direct-initialization, explicit conversion functions
+ // are also considered.
+ bool AllowExplicitConv = AllowExplicit && !CopyInitializing &&
+ Args.size() == 1 &&
+ Constructor->isCopyOrMoveConstructor();
+ S.AddOverloadCandidate(Constructor, FoundDecl, Args, CandidateSet,
+ SuppressUserConversions,
+ /*PartialOverloading=*/false,
+ /*AllowExplicit=*/AllowExplicitConv);
+ }
+ }
+ }
+
+ // Perform overload resolution and return the result.
+ return CandidateSet.BestViableFunction(S, DeclLoc, Best);
+}
+
+/// \brief Attempt initialization by constructor (C++ [dcl.init]), which
+/// enumerates the constructors of the initialized entity and performs overload
+/// resolution to select the best.
+/// \param IsListInit Is this list-initialization?
+/// \param IsInitListCopy Is this non-list-initialization resulting from a
+/// list-initialization from {x} where x is the same
+/// type as the entity?
+static void TryConstructorInitialization(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ MultiExprArg Args, QualType DestType,
+ InitializationSequence &Sequence,
+ bool IsListInit = false,
+ bool IsInitListCopy = false) {
+ assert((!IsListInit || (Args.size() == 1 && isa<InitListExpr>(Args[0]))) &&
+ "IsListInit must come with a single initializer list argument.");
+
+ // The type we're constructing needs to be complete.
+ if (!S.isCompleteType(Kind.getLocation(), DestType)) {
+ Sequence.setIncompleteTypeFailure(DestType);
+ return;
+ }
+
+ const RecordType *DestRecordType = DestType->getAs<RecordType>();
+ assert(DestRecordType && "Constructor initialization requires record type");
+ CXXRecordDecl *DestRecordDecl
+ = cast<CXXRecordDecl>(DestRecordType->getDecl());
+
+ // Build the candidate set directly in the initialization sequence
+ // structure, so that it will persist if we fail.
+ OverloadCandidateSet &CandidateSet = Sequence.getFailedCandidateSet();
+
+ // Determine whether we are allowed to call explicit constructors or
+ // explicit conversion operators.
+ bool AllowExplicit = Kind.AllowExplicit() || IsListInit;
+ bool CopyInitialization = Kind.getKind() == InitializationKind::IK_Copy;
+
+ // - Otherwise, if T is a class type, constructors are considered. The
+ // applicable constructors are enumerated, and the best one is chosen
+ // through overload resolution.
+ DeclContext::lookup_result Ctors = S.LookupConstructors(DestRecordDecl);
+
+ OverloadingResult Result = OR_No_Viable_Function;
+ OverloadCandidateSet::iterator Best;
+ bool AsInitializerList = false;
+
+ // C++11 [over.match.list]p1, per DR1467:
+ // When objects of non-aggregate type T are list-initialized, such that
+ // 8.5.4 [dcl.init.list] specifies that overload resolution is performed
+ // according to the rules in this section, overload resolution selects
+ // the constructor in two phases:
+ //
+ // - Initially, the candidate functions are the initializer-list
+ // constructors of the class T and the argument list consists of the
+ // initializer list as a single argument.
+ if (IsListInit) {
+ InitListExpr *ILE = cast<InitListExpr>(Args[0]);
+ AsInitializerList = true;
+
+ // If the initializer list has no elements and T has a default constructor,
+ // the first phase is omitted.
+ if (ILE->getNumInits() != 0 || !DestRecordDecl->hasDefaultConstructor())
+ Result = ResolveConstructorOverload(S, Kind.getLocation(), Args,
+ CandidateSet, Ctors, Best,
+ CopyInitialization, AllowExplicit,
+ /*OnlyListConstructor=*/true,
+ IsListInit);
+
+ // Time to unwrap the init list.
+ Args = MultiExprArg(ILE->getInits(), ILE->getNumInits());
+ }
+
+ // C++11 [over.match.list]p1:
+ // - If no viable initializer-list constructor is found, overload resolution
+ // is performed again, where the candidate functions are all the
+ // constructors of the class T and the argument list consists of the
+ // elements of the initializer list.
+ if (Result == OR_No_Viable_Function) {
+ AsInitializerList = false;
+ Result = ResolveConstructorOverload(S, Kind.getLocation(), Args,
+ CandidateSet, Ctors, Best,
+ CopyInitialization, AllowExplicit,
+ /*OnlyListConstructors=*/false,
+ IsListInit);
+ }
+ if (Result) {
+ Sequence.SetOverloadFailure(IsListInit ?
+ InitializationSequence::FK_ListConstructorOverloadFailed :
+ InitializationSequence::FK_ConstructorOverloadFailed,
+ Result);
+ return;
+ }
+
+ // C++11 [dcl.init]p6:
+ // If a program calls for the default initialization of an object
+ // of a const-qualified type T, T shall be a class type with a
+ // user-provided default constructor.
+ if (Kind.getKind() == InitializationKind::IK_Default &&
+ Entity.getType().isConstQualified() &&
+ !cast<CXXConstructorDecl>(Best->Function)->isUserProvided()) {
+ if (!maybeRecoverWithZeroInitialization(S, Sequence, Entity))
+ Sequence.SetFailed(InitializationSequence::FK_DefaultInitOfConst);
+ return;
+ }
+
+ // C++11 [over.match.list]p1:
+ // In copy-list-initialization, if an explicit constructor is chosen, the
+ // initializer is ill-formed.
+ CXXConstructorDecl *CtorDecl = cast<CXXConstructorDecl>(Best->Function);
+ if (IsListInit && !Kind.AllowExplicit() && CtorDecl->isExplicit()) {
+ Sequence.SetFailed(InitializationSequence::FK_ExplicitConstructor);
+ return;
+ }
+
+ // Add the constructor initialization step. Any cv-qualification conversion is
+ // subsumed by the initialization.
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+ Sequence.AddConstructorInitializationStep(
+ CtorDecl, Best->FoundDecl.getAccess(), DestType, HadMultipleCandidates,
+ IsListInit | IsInitListCopy, AsInitializerList);
+}
+
+static bool
+ResolveOverloadedFunctionForReferenceBinding(Sema &S,
+ Expr *Initializer,
+ QualType &SourceType,
+ QualType &UnqualifiedSourceType,
+ QualType UnqualifiedTargetType,
+ InitializationSequence &Sequence) {
+ if (S.Context.getCanonicalType(UnqualifiedSourceType) ==
+ S.Context.OverloadTy) {
+ DeclAccessPair Found;
+ bool HadMultipleCandidates = false;
+ if (FunctionDecl *Fn
+ = S.ResolveAddressOfOverloadedFunction(Initializer,
+ UnqualifiedTargetType,
+ false, Found,
+ &HadMultipleCandidates)) {
+ Sequence.AddAddressOverloadResolutionStep(Fn, Found,
+ HadMultipleCandidates);
+ SourceType = Fn->getType();
+ UnqualifiedSourceType = SourceType.getUnqualifiedType();
+ } else if (!UnqualifiedTargetType->isRecordType()) {
+ Sequence.SetFailed(InitializationSequence::FK_AddressOfOverloadFailed);
+ return true;
+ }
+ }
+ return false;
+}
+
+static void TryReferenceInitializationCore(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ Expr *Initializer,
+ QualType cv1T1, QualType T1,
+ Qualifiers T1Quals,
+ QualType cv2T2, QualType T2,
+ Qualifiers T2Quals,
+ InitializationSequence &Sequence);
+
+static void TryValueInitialization(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ InitializationSequence &Sequence,
+ InitListExpr *InitList = nullptr);
+
+/// \brief Attempt list initialization of a reference.
+static void TryReferenceListInitialization(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ InitListExpr *InitList,
+ InitializationSequence &Sequence) {
+ // First, catch C++03 where this isn't possible.
+ if (!S.getLangOpts().CPlusPlus11) {
+ Sequence.SetFailed(InitializationSequence::FK_ReferenceBindingToInitList);
+ return;
+ }
+ // Can't reference initialize a compound literal.
+ if (Entity.getKind() == InitializedEntity::EK_CompoundLiteralInit) {
+ Sequence.SetFailed(InitializationSequence::FK_ReferenceBindingToInitList);
+ return;
+ }
+
+ QualType DestType = Entity.getType();
+ QualType cv1T1 = DestType->getAs<ReferenceType>()->getPointeeType();
+ Qualifiers T1Quals;
+ QualType T1 = S.Context.getUnqualifiedArrayType(cv1T1, T1Quals);
+
+ // Reference initialization via an initializer list works thus:
+ // If the initializer list consists of a single element that is
+ // reference-related to the referenced type, bind directly to that element
+ // (possibly creating temporaries).
+ // Otherwise, initialize a temporary with the initializer list and
+ // bind to that.
+ if (InitList->getNumInits() == 1) {
+ Expr *Initializer = InitList->getInit(0);
+ QualType cv2T2 = Initializer->getType();
+ Qualifiers T2Quals;
+ QualType T2 = S.Context.getUnqualifiedArrayType(cv2T2, T2Quals);
+
+ // If this fails, creating a temporary wouldn't work either.
+ if (ResolveOverloadedFunctionForReferenceBinding(S, Initializer, cv2T2, T2,
+ T1, Sequence))
+ return;
+
+ SourceLocation DeclLoc = Initializer->getLocStart();
+ bool dummy1, dummy2, dummy3;
+ Sema::ReferenceCompareResult RefRelationship
+ = S.CompareReferenceRelationship(DeclLoc, cv1T1, cv2T2, dummy1,
+ dummy2, dummy3);
+ if (RefRelationship >= Sema::Ref_Related) {
+ // Try to bind the reference here.
+ TryReferenceInitializationCore(S, Entity, Kind, Initializer, cv1T1, T1,
+ T1Quals, cv2T2, T2, T2Quals, Sequence);
+ if (Sequence)
+ Sequence.RewrapReferenceInitList(cv1T1, InitList);
+ return;
+ }
+
+ // Update the initializer if we've resolved an overloaded function.
+ if (Sequence.step_begin() != Sequence.step_end())
+ Sequence.RewrapReferenceInitList(cv1T1, InitList);
+ }
+
+ // Not reference-related. Create a temporary and bind to that.
+ InitializedEntity TempEntity = InitializedEntity::InitializeTemporary(cv1T1);
+
+ TryListInitialization(S, TempEntity, Kind, InitList, Sequence);
+ if (Sequence) {
+ if (DestType->isRValueReferenceType() ||
+ (T1Quals.hasConst() && !T1Quals.hasVolatile()))
+ Sequence.AddReferenceBindingStep(cv1T1, /*bindingTemporary=*/true);
+ else
+ Sequence.SetFailed(
+ InitializationSequence::FK_NonConstLValueReferenceBindingToTemporary);
+ }
+}
+
+/// \brief Attempt list initialization (C++0x [dcl.init.list])
+static void TryListInitialization(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ InitListExpr *InitList,
+ InitializationSequence &Sequence) {
+ QualType DestType = Entity.getType();
+
+ // C++ doesn't allow scalar initialization with more than one argument.
+ // But C99 complex numbers are scalars and it makes sense there.
+ if (S.getLangOpts().CPlusPlus && DestType->isScalarType() &&
+ !DestType->isAnyComplexType() && InitList->getNumInits() > 1) {
+ Sequence.SetFailed(InitializationSequence::FK_TooManyInitsForScalar);
+ return;
+ }
+ if (DestType->isReferenceType()) {
+ TryReferenceListInitialization(S, Entity, Kind, InitList, Sequence);
+ return;
+ }
+
+ if (DestType->isRecordType() &&
+ !S.isCompleteType(InitList->getLocStart(), DestType)) {
+ Sequence.setIncompleteTypeFailure(DestType);
+ return;
+ }
+
+ // C++11 [dcl.init.list]p3, per DR1467:
+ // - If T is a class type and the initializer list has a single element of
+ // type cv U, where U is T or a class derived from T, the object is
+ // initialized from that element (by copy-initialization for
+ // copy-list-initialization, or by direct-initialization for
+ // direct-list-initialization).
+ // - Otherwise, if T is a character array and the initializer list has a
+ // single element that is an appropriately-typed string literal
+ // (8.5.2 [dcl.init.string]), initialization is performed as described
+ // in that section.
+ // - Otherwise, if T is an aggregate, [...] (continue below).
+ if (S.getLangOpts().CPlusPlus11 && InitList->getNumInits() == 1) {
+ if (DestType->isRecordType()) {
+ QualType InitType = InitList->getInit(0)->getType();
+ if (S.Context.hasSameUnqualifiedType(InitType, DestType) ||
+ S.IsDerivedFrom(InitList->getLocStart(), InitType, DestType)) {
+ Expr *InitAsExpr = InitList->getInit(0);
+ TryConstructorInitialization(S, Entity, Kind, InitAsExpr, DestType,
+ Sequence, /*InitListSyntax*/ false,
+ /*IsInitListCopy*/ true);
+ return;
+ }
+ }
+ if (const ArrayType *DestAT = S.Context.getAsArrayType(DestType)) {
+ Expr *SubInit[1] = {InitList->getInit(0)};
+ if (!isa<VariableArrayType>(DestAT) &&
+ IsStringInit(SubInit[0], DestAT, S.Context) == SIF_None) {
+ InitializationKind SubKind =
+ Kind.getKind() == InitializationKind::IK_DirectList
+ ? InitializationKind::CreateDirect(Kind.getLocation(),
+ InitList->getLBraceLoc(),
+ InitList->getRBraceLoc())
+ : Kind;
+ Sequence.InitializeFrom(S, Entity, SubKind, SubInit,
+ /*TopLevelOfInitList*/ true);
+
+ // TryStringLiteralInitialization() (in InitializeFrom()) will fail if
+ // the element is not an appropriately-typed string literal, in which
+ // case we should proceed as in C++11 (below).
+ if (Sequence) {
+ Sequence.RewrapReferenceInitList(Entity.getType(), InitList);
+ return;
+ }
+ }
+ }
+ }
+
+ // C++11 [dcl.init.list]p3:
+ // - If T is an aggregate, aggregate initialization is performed.
+ if ((DestType->isRecordType() && !DestType->isAggregateType()) ||
+ (S.getLangOpts().CPlusPlus11 &&
+ S.isStdInitializerList(DestType, nullptr))) {
+ if (S.getLangOpts().CPlusPlus11) {
+ // - Otherwise, if the initializer list has no elements and T is a
+ // class type with a default constructor, the object is
+ // value-initialized.
+ if (InitList->getNumInits() == 0) {
+ CXXRecordDecl *RD = DestType->getAsCXXRecordDecl();
+ if (RD->hasDefaultConstructor()) {
+ TryValueInitialization(S, Entity, Kind, Sequence, InitList);
+ return;
+ }
+ }
+
+ // - Otherwise, if T is a specialization of std::initializer_list<E>,
+ // an initializer_list object constructed [...]
+ if (TryInitializerListConstruction(S, InitList, DestType, Sequence))
+ return;
+
+ // - Otherwise, if T is a class type, constructors are considered.
+ Expr *InitListAsExpr = InitList;
+ TryConstructorInitialization(S, Entity, Kind, InitListAsExpr, DestType,
+ Sequence, /*InitListSyntax*/ true);
+ } else
+ Sequence.SetFailed(InitializationSequence::FK_InitListBadDestinationType);
+ return;
+ }
+
+ if (S.getLangOpts().CPlusPlus && !DestType->isAggregateType() &&
+ InitList->getNumInits() == 1 &&
+ InitList->getInit(0)->getType()->isRecordType()) {
+ // - Otherwise, if the initializer list has a single element of type E
+ // [...references are handled above...], the object or reference is
+ // initialized from that element (by copy-initialization for
+ // copy-list-initialization, or by direct-initialization for
+ // direct-list-initialization); if a narrowing conversion is required
+ // to convert the element to T, the program is ill-formed.
+ //
+ // Per core-24034, this is direct-initialization if we were performing
+ // direct-list-initialization and copy-initialization otherwise.
+ // We can't use InitListChecker for this, because it always performs
+ // copy-initialization. This only matters if we might use an 'explicit'
+ // conversion operator, so we only need to handle the cases where the source
+ // is of record type.
+ InitializationKind SubKind =
+ Kind.getKind() == InitializationKind::IK_DirectList
+ ? InitializationKind::CreateDirect(Kind.getLocation(),
+ InitList->getLBraceLoc(),
+ InitList->getRBraceLoc())
+ : Kind;
+ Expr *SubInit[1] = { InitList->getInit(0) };
+ Sequence.InitializeFrom(S, Entity, SubKind, SubInit,
+ /*TopLevelOfInitList*/true);
+ if (Sequence)
+ Sequence.RewrapReferenceInitList(Entity.getType(), InitList);
+ return;
+ }
+
+ InitListChecker CheckInitList(S, Entity, InitList,
+ DestType, /*VerifyOnly=*/true);
+ if (CheckInitList.HadError()) {
+ Sequence.SetFailed(InitializationSequence::FK_ListInitializationFailed);
+ return;
+ }
+
+ // Add the list initialization step with the built init list.
+ Sequence.AddListInitializationStep(DestType);
+}
+
+/// \brief Try a reference initialization that involves calling a conversion
+/// function.
+static OverloadingResult TryRefInitWithConversionFunction(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ Expr *Initializer,
+ bool AllowRValues,
+ InitializationSequence &Sequence) {
+ QualType DestType = Entity.getType();
+ QualType cv1T1 = DestType->getAs<ReferenceType>()->getPointeeType();
+ QualType T1 = cv1T1.getUnqualifiedType();
+ QualType cv2T2 = Initializer->getType();
+ QualType T2 = cv2T2.getUnqualifiedType();
+
+ bool DerivedToBase;
+ bool ObjCConversion;
+ bool ObjCLifetimeConversion;
+ assert(!S.CompareReferenceRelationship(Initializer->getLocStart(),
+ T1, T2, DerivedToBase,
+ ObjCConversion,
+ ObjCLifetimeConversion) &&
+ "Must have incompatible references when binding via conversion");
+ (void)DerivedToBase;
+ (void)ObjCConversion;
+ (void)ObjCLifetimeConversion;
+
+ // Build the candidate set directly in the initialization sequence
+ // structure, so that it will persist if we fail.
+ OverloadCandidateSet &CandidateSet = Sequence.getFailedCandidateSet();
+ CandidateSet.clear();
+
+ // Determine whether we are allowed to call explicit constructors or
+ // explicit conversion operators.
+ bool AllowExplicit = Kind.AllowExplicit();
+ bool AllowExplicitConvs = Kind.allowExplicitConversionFunctionsInRefBinding();
+
+ const RecordType *T1RecordType = nullptr;
+ if (AllowRValues && (T1RecordType = T1->getAs<RecordType>()) &&
+ S.isCompleteType(Kind.getLocation(), T1)) {
+ // The type we're converting to is a class type. Enumerate its constructors
+ // to see if there is a suitable conversion.
+ CXXRecordDecl *T1RecordDecl = cast<CXXRecordDecl>(T1RecordType->getDecl());
+
+ for (NamedDecl *D : S.LookupConstructors(T1RecordDecl)) {
+ DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess());
+
+ // Find the constructor (which may be a template).
+ CXXConstructorDecl *Constructor = nullptr;
+ FunctionTemplateDecl *ConstructorTmpl = dyn_cast<FunctionTemplateDecl>(D);
+ if (ConstructorTmpl)
+ Constructor = cast<CXXConstructorDecl>(
+ ConstructorTmpl->getTemplatedDecl());
+ else
+ Constructor = cast<CXXConstructorDecl>(D);
+
+ if (!Constructor->isInvalidDecl() &&
+ Constructor->isConvertingConstructor(AllowExplicit)) {
+ if (ConstructorTmpl)
+ S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl,
+ /*ExplicitArgs*/ nullptr,
+ Initializer, CandidateSet,
+ /*SuppressUserConversions=*/true);
+ else
+ S.AddOverloadCandidate(Constructor, FoundDecl,
+ Initializer, CandidateSet,
+ /*SuppressUserConversions=*/true);
+ }
+ }
+ }
+ if (T1RecordType && T1RecordType->getDecl()->isInvalidDecl())
+ return OR_No_Viable_Function;
+
+ const RecordType *T2RecordType = nullptr;
+ if ((T2RecordType = T2->getAs<RecordType>()) &&
+ S.isCompleteType(Kind.getLocation(), T2)) {
+ // The type we're converting from is a class type, enumerate its conversion
+ // functions.
+ CXXRecordDecl *T2RecordDecl = cast<CXXRecordDecl>(T2RecordType->getDecl());
+
+ const auto &Conversions = T2RecordDecl->getVisibleConversionFunctions();
+ for (auto I = Conversions.begin(), E = Conversions.end(); I != E; ++I) {
+ NamedDecl *D = *I;
+ CXXRecordDecl *ActingDC = cast<CXXRecordDecl>(D->getDeclContext());
+ if (isa<UsingShadowDecl>(D))
+ D = cast<UsingShadowDecl>(D)->getTargetDecl();
+
+ FunctionTemplateDecl *ConvTemplate = dyn_cast<FunctionTemplateDecl>(D);
+ CXXConversionDecl *Conv;
+ if (ConvTemplate)
+ Conv = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl());
+ else
+ Conv = cast<CXXConversionDecl>(D);
+
+ // If the conversion function doesn't return a reference type,
+ // it can't be considered for this conversion unless we're allowed to
+ // consider rvalues.
+ // FIXME: Do we need to make sure that we only consider conversion
+ // candidates with reference-compatible results? That might be needed to
+ // break recursion.
+ if ((AllowExplicitConvs || !Conv->isExplicit()) &&
+ (AllowRValues || Conv->getConversionType()->isLValueReferenceType())){
+ if (ConvTemplate)
+ S.AddTemplateConversionCandidate(ConvTemplate, I.getPair(),
+ ActingDC, Initializer,
+ DestType, CandidateSet,
+ /*AllowObjCConversionOnExplicit=*/
+ false);
+ else
+ S.AddConversionCandidate(Conv, I.getPair(), ActingDC,
+ Initializer, DestType, CandidateSet,
+ /*AllowObjCConversionOnExplicit=*/false);
+ }
+ }
+ }
+ if (T2RecordType && T2RecordType->getDecl()->isInvalidDecl())
+ return OR_No_Viable_Function;
+
+ SourceLocation DeclLoc = Initializer->getLocStart();
+
+ // Perform overload resolution. If it fails, return the failed result.
+ OverloadCandidateSet::iterator Best;
+ if (OverloadingResult Result
+ = CandidateSet.BestViableFunction(S, DeclLoc, Best, true))
+ return Result;
+
+ FunctionDecl *Function = Best->Function;
+ // This is the overload that will be used for this initialization step if we
+ // use this initialization. Mark it as referenced.
+ Function->setReferenced();
+
+ // Compute the returned type of the conversion.
+ if (isa<CXXConversionDecl>(Function))
+ T2 = Function->getReturnType();
+ else
+ T2 = cv1T1;
+
+ // Add the user-defined conversion step.
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+ Sequence.AddUserConversionStep(Function, Best->FoundDecl,
+ T2.getNonLValueExprType(S.Context),
+ HadMultipleCandidates);
+
+ // Determine whether we need to perform derived-to-base or
+ // cv-qualification adjustments.
+ ExprValueKind VK = VK_RValue;
+ if (T2->isLValueReferenceType())
+ VK = VK_LValue;
+ else if (const RValueReferenceType *RRef = T2->getAs<RValueReferenceType>())
+ VK = RRef->getPointeeType()->isFunctionType() ? VK_LValue : VK_XValue;
+
+ bool NewDerivedToBase = false;
+ bool NewObjCConversion = false;
+ bool NewObjCLifetimeConversion = false;
+ Sema::ReferenceCompareResult NewRefRelationship
+ = S.CompareReferenceRelationship(DeclLoc, T1,
+ T2.getNonLValueExprType(S.Context),
+ NewDerivedToBase, NewObjCConversion,
+ NewObjCLifetimeConversion);
+ if (NewRefRelationship == Sema::Ref_Incompatible) {
+ // If the type we've converted to is not reference-related to the
+ // type we're looking for, then there is another conversion step
+ // we need to perform to produce a temporary of the right type
+ // that we'll be binding to.
+ ImplicitConversionSequence ICS;
+ ICS.setStandard();
+ ICS.Standard = Best->FinalConversion;
+ T2 = ICS.Standard.getToType(2);
+ Sequence.AddConversionSequenceStep(ICS, T2);
+ } else if (NewDerivedToBase)
+ Sequence.AddDerivedToBaseCastStep(
+ S.Context.getQualifiedType(T1,
+ T2.getNonReferenceType().getQualifiers()),
+ VK);
+ else if (NewObjCConversion)
+ Sequence.AddObjCObjectConversionStep(
+ S.Context.getQualifiedType(T1,
+ T2.getNonReferenceType().getQualifiers()));
+
+ if (cv1T1.getQualifiers() != T2.getNonReferenceType().getQualifiers())
+ Sequence.AddQualificationConversionStep(cv1T1, VK);
+
+ Sequence.AddReferenceBindingStep(cv1T1, !T2->isReferenceType());
+ return OR_Success;
+}
+
+static void CheckCXX98CompatAccessibleCopy(Sema &S,
+ const InitializedEntity &Entity,
+ Expr *CurInitExpr);
+
+/// \brief Attempt reference initialization (C++0x [dcl.init.ref])
+static void TryReferenceInitialization(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ Expr *Initializer,
+ InitializationSequence &Sequence) {
+ QualType DestType = Entity.getType();
+ QualType cv1T1 = DestType->getAs<ReferenceType>()->getPointeeType();
+ Qualifiers T1Quals;
+ QualType T1 = S.Context.getUnqualifiedArrayType(cv1T1, T1Quals);
+ QualType cv2T2 = Initializer->getType();
+ Qualifiers T2Quals;
+ QualType T2 = S.Context.getUnqualifiedArrayType(cv2T2, T2Quals);
+
+ // If the initializer is the address of an overloaded function, try
+ // to resolve the overloaded function. If all goes well, T2 is the
+ // type of the resulting function.
+ if (ResolveOverloadedFunctionForReferenceBinding(S, Initializer, cv2T2, T2,
+ T1, Sequence))
+ return;
+
+ // Delegate everything else to a subfunction.
+ TryReferenceInitializationCore(S, Entity, Kind, Initializer, cv1T1, T1,
+ T1Quals, cv2T2, T2, T2Quals, Sequence);
+}
+
+/// Converts the target of reference initialization so that it has the
+/// appropriate qualifiers and value kind.
+///
+/// In this case, 'x' is an 'int' lvalue, but it needs to be 'const int'.
+/// \code
+/// int x;
+/// const int &r = x;
+/// \endcode
+///
+/// In this case the reference is binding to a bitfield lvalue, which isn't
+/// valid. Perform a load to create a lifetime-extended temporary instead.
+/// \code
+/// const int &r = someStruct.bitfield;
+/// \endcode
+static ExprValueKind
+convertQualifiersAndValueKindIfNecessary(Sema &S,
+ InitializationSequence &Sequence,
+ Expr *Initializer,
+ QualType cv1T1,
+ Qualifiers T1Quals,
+ Qualifiers T2Quals,
+ bool IsLValueRef) {
+ bool IsNonAddressableType = Initializer->refersToBitField() ||
+ Initializer->refersToVectorElement();
+
+ if (IsNonAddressableType) {
+ // C++11 [dcl.init.ref]p5: [...] Otherwise, the reference shall be an
+ // lvalue reference to a non-volatile const type, or the reference shall be
+ // an rvalue reference.
+ //
+ // If not, we can't make a temporary and bind to that. Give up and allow the
+ // error to be diagnosed later.
+ if (IsLValueRef && (!T1Quals.hasConst() || T1Quals.hasVolatile())) {
+ assert(Initializer->isGLValue());
+ return Initializer->getValueKind();
+ }
+
+ // Force a load so we can materialize a temporary.
+ Sequence.AddLValueToRValueStep(cv1T1.getUnqualifiedType());
+ return VK_RValue;
+ }
+
+ if (T1Quals != T2Quals) {
+ Sequence.AddQualificationConversionStep(cv1T1,
+ Initializer->getValueKind());
+ }
+
+ return Initializer->getValueKind();
+}
+
+
+/// \brief Reference initialization without resolving overloaded functions.
+static void TryReferenceInitializationCore(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ Expr *Initializer,
+ QualType cv1T1, QualType T1,
+ Qualifiers T1Quals,
+ QualType cv2T2, QualType T2,
+ Qualifiers T2Quals,
+ InitializationSequence &Sequence) {
+ QualType DestType = Entity.getType();
+ SourceLocation DeclLoc = Initializer->getLocStart();
+ // Compute some basic properties of the types and the initializer.
+ bool isLValueRef = DestType->isLValueReferenceType();
+ bool isRValueRef = !isLValueRef;
+ bool DerivedToBase = false;
+ bool ObjCConversion = false;
+ bool ObjCLifetimeConversion = false;
+ Expr::Classification InitCategory = Initializer->Classify(S.Context);
+ Sema::ReferenceCompareResult RefRelationship
+ = S.CompareReferenceRelationship(DeclLoc, cv1T1, cv2T2, DerivedToBase,
+ ObjCConversion, ObjCLifetimeConversion);
+
+ // C++0x [dcl.init.ref]p5:
+ // A reference to type "cv1 T1" is initialized by an expression of type
+ // "cv2 T2" as follows:
+ //
+ // - If the reference is an lvalue reference and the initializer
+ // expression
+ // Note the analogous bullet points for rvalue refs to functions. Because
+ // there are no function rvalues in C++, rvalue refs to functions are treated
+ // like lvalue refs.
+ OverloadingResult ConvOvlResult = OR_Success;
+ bool T1Function = T1->isFunctionType();
+ if (isLValueRef || T1Function) {
+ if (InitCategory.isLValue() &&
+ (RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification ||
+ (Kind.isCStyleOrFunctionalCast() &&
+ RefRelationship == Sema::Ref_Related))) {
+ // - is an lvalue (but is not a bit-field), and "cv1 T1" is
+ // reference-compatible with "cv2 T2," or
+ //
+ // Per C++ [over.best.ics]p2, we don't diagnose whether the lvalue is a
+ // bit-field when we're determining whether the reference initialization
+ // can occur. However, we do pay attention to whether it is a bit-field
+ // to decide whether we're actually binding to a temporary created from
+ // the bit-field.
+ if (DerivedToBase)
+ Sequence.AddDerivedToBaseCastStep(
+ S.Context.getQualifiedType(T1, T2Quals),
+ VK_LValue);
+ else if (ObjCConversion)
+ Sequence.AddObjCObjectConversionStep(
+ S.Context.getQualifiedType(T1, T2Quals));
+
+ ExprValueKind ValueKind =
+ convertQualifiersAndValueKindIfNecessary(S, Sequence, Initializer,
+ cv1T1, T1Quals, T2Quals,
+ isLValueRef);
+ Sequence.AddReferenceBindingStep(cv1T1, ValueKind == VK_RValue);
+ return;
+ }
+
+ // - has a class type (i.e., T2 is a class type), where T1 is not
+ // reference-related to T2, and can be implicitly converted to an
+ // lvalue of type "cv3 T3," where "cv1 T1" is reference-compatible
+ // with "cv3 T3" (this conversion is selected by enumerating the
+ // applicable conversion functions (13.3.1.6) and choosing the best
+ // one through overload resolution (13.3)),
+ // If we have an rvalue ref to function type here, the rhs must be
+ // an rvalue. DR1287 removed the "implicitly" here.
+ if (RefRelationship == Sema::Ref_Incompatible && T2->isRecordType() &&
+ (isLValueRef || InitCategory.isRValue())) {
+ ConvOvlResult = TryRefInitWithConversionFunction(
+ S, Entity, Kind, Initializer, /*AllowRValues*/isRValueRef, Sequence);
+ if (ConvOvlResult == OR_Success)
+ return;
+ if (ConvOvlResult != OR_No_Viable_Function)
+ Sequence.SetOverloadFailure(
+ InitializationSequence::FK_ReferenceInitOverloadFailed,
+ ConvOvlResult);
+ }
+ }
+
+ // - Otherwise, the reference shall be an lvalue reference to a
+ // non-volatile const type (i.e., cv1 shall be const), or the reference
+ // shall be an rvalue reference.
+ if (isLValueRef && !(T1Quals.hasConst() && !T1Quals.hasVolatile())) {
+ if (S.Context.getCanonicalType(T2) == S.Context.OverloadTy)
+ Sequence.SetFailed(InitializationSequence::FK_AddressOfOverloadFailed);
+ else if (ConvOvlResult && !Sequence.getFailedCandidateSet().empty())
+ Sequence.SetOverloadFailure(
+ InitializationSequence::FK_ReferenceInitOverloadFailed,
+ ConvOvlResult);
+ else
+ Sequence.SetFailed(InitCategory.isLValue()
+ ? (RefRelationship == Sema::Ref_Related
+ ? InitializationSequence::FK_ReferenceInitDropsQualifiers
+ : InitializationSequence::FK_NonConstLValueReferenceBindingToUnrelated)
+ : InitializationSequence::FK_NonConstLValueReferenceBindingToTemporary);
+
+ return;
+ }
+
+ // - If the initializer expression
+ // - is an xvalue, class prvalue, array prvalue, or function lvalue and
+ // "cv1 T1" is reference-compatible with "cv2 T2"
+ // Note: functions are handled below.
+ if (!T1Function &&
+ (RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification ||
+ (Kind.isCStyleOrFunctionalCast() &&
+ RefRelationship == Sema::Ref_Related)) &&
+ (InitCategory.isXValue() ||
+ (InitCategory.isPRValue() && T2->isRecordType()) ||
+ (InitCategory.isPRValue() && T2->isArrayType()))) {
+ ExprValueKind ValueKind = InitCategory.isXValue()? VK_XValue : VK_RValue;
+ if (InitCategory.isPRValue() && T2->isRecordType()) {
+ // The corresponding bullet in C++03 [dcl.init.ref]p5 gives the
+ // compiler the freedom to perform a copy here or bind to the
+ // object, while C++0x requires that we bind directly to the
+ // object. Hence, we always bind to the object without making an
+ // extra copy. However, in C++03 requires that we check for the
+ // presence of a suitable copy constructor:
+ //
+ // The constructor that would be used to make the copy shall
+ // be callable whether or not the copy is actually done.
+ if (!S.getLangOpts().CPlusPlus11 && !S.getLangOpts().MicrosoftExt)
+ Sequence.AddExtraneousCopyToTemporary(cv2T2);
+ else if (S.getLangOpts().CPlusPlus11)
+ CheckCXX98CompatAccessibleCopy(S, Entity, Initializer);
+ }
+
+ if (DerivedToBase)
+ Sequence.AddDerivedToBaseCastStep(S.Context.getQualifiedType(T1, T2Quals),
+ ValueKind);
+ else if (ObjCConversion)
+ Sequence.AddObjCObjectConversionStep(
+ S.Context.getQualifiedType(T1, T2Quals));
+
+ ValueKind = convertQualifiersAndValueKindIfNecessary(S, Sequence,
+ Initializer, cv1T1,
+ T1Quals, T2Quals,
+ isLValueRef);
+
+ Sequence.AddReferenceBindingStep(cv1T1, ValueKind == VK_RValue);
+ return;
+ }
+
+ // - has a class type (i.e., T2 is a class type), where T1 is not
+ // reference-related to T2, and can be implicitly converted to an
+ // xvalue, class prvalue, or function lvalue of type "cv3 T3",
+ // where "cv1 T1" is reference-compatible with "cv3 T3",
+ //
+ // DR1287 removes the "implicitly" here.
+ if (T2->isRecordType()) {
+ if (RefRelationship == Sema::Ref_Incompatible) {
+ ConvOvlResult = TryRefInitWithConversionFunction(
+ S, Entity, Kind, Initializer, /*AllowRValues*/true, Sequence);
+ if (ConvOvlResult)
+ Sequence.SetOverloadFailure(
+ InitializationSequence::FK_ReferenceInitOverloadFailed,
+ ConvOvlResult);
+
+ return;
+ }
+
+ if ((RefRelationship == Sema::Ref_Compatible ||
+ RefRelationship == Sema::Ref_Compatible_With_Added_Qualification) &&
+ isRValueRef && InitCategory.isLValue()) {
+ Sequence.SetFailed(
+ InitializationSequence::FK_RValueReferenceBindingToLValue);
+ return;
+ }
+
+ Sequence.SetFailed(InitializationSequence::FK_ReferenceInitDropsQualifiers);
+ return;
+ }
+
+ // - Otherwise, a temporary of type "cv1 T1" is created and initialized
+ // from the initializer expression using the rules for a non-reference
+ // copy-initialization (8.5). The reference is then bound to the
+ // temporary. [...]
+
+ InitializedEntity TempEntity = InitializedEntity::InitializeTemporary(cv1T1);
+
+ // FIXME: Why do we use an implicit conversion here rather than trying
+ // copy-initialization?
+ ImplicitConversionSequence ICS
+ = S.TryImplicitConversion(Initializer, TempEntity.getType(),
+ /*SuppressUserConversions=*/false,
+ /*AllowExplicit=*/false,
+ /*FIXME:InOverloadResolution=*/false,
+ /*CStyle=*/Kind.isCStyleOrFunctionalCast(),
+ /*AllowObjCWritebackConversion=*/false);
+
+ if (ICS.isBad()) {
+ // FIXME: Use the conversion function set stored in ICS to turn
+ // this into an overloading ambiguity diagnostic. However, we need
+ // to keep that set as an OverloadCandidateSet rather than as some
+ // other kind of set.
+ if (ConvOvlResult && !Sequence.getFailedCandidateSet().empty())
+ Sequence.SetOverloadFailure(
+ InitializationSequence::FK_ReferenceInitOverloadFailed,
+ ConvOvlResult);
+ else if (S.Context.getCanonicalType(T2) == S.Context.OverloadTy)
+ Sequence.SetFailed(InitializationSequence::FK_AddressOfOverloadFailed);
+ else
+ Sequence.SetFailed(InitializationSequence::FK_ReferenceInitFailed);
+ return;
+ } else {
+ Sequence.AddConversionSequenceStep(ICS, TempEntity.getType());
+ }
+
+ // [...] If T1 is reference-related to T2, cv1 must be the
+ // same cv-qualification as, or greater cv-qualification
+ // than, cv2; otherwise, the program is ill-formed.
+ unsigned T1CVRQuals = T1Quals.getCVRQualifiers();
+ unsigned T2CVRQuals = T2Quals.getCVRQualifiers();
+ if (RefRelationship == Sema::Ref_Related &&
+ (T1CVRQuals | T2CVRQuals) != T1CVRQuals) {
+ Sequence.SetFailed(InitializationSequence::FK_ReferenceInitDropsQualifiers);
+ return;
+ }
+
+ // [...] If T1 is reference-related to T2 and the reference is an rvalue
+ // reference, the initializer expression shall not be an lvalue.
+ if (RefRelationship >= Sema::Ref_Related && !isLValueRef &&
+ InitCategory.isLValue()) {
+ Sequence.SetFailed(
+ InitializationSequence::FK_RValueReferenceBindingToLValue);
+ return;
+ }
+
+ Sequence.AddReferenceBindingStep(cv1T1, /*bindingTemporary=*/true);
+ return;
+}
+
+/// \brief Attempt character array initialization from a string literal
+/// (C++ [dcl.init.string], C99 6.7.8).
+static void TryStringLiteralInitialization(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ Expr *Initializer,
+ InitializationSequence &Sequence) {
+ Sequence.AddStringInitStep(Entity.getType());
+}
+
+/// \brief Attempt value initialization (C++ [dcl.init]p7).
+static void TryValueInitialization(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ InitializationSequence &Sequence,
+ InitListExpr *InitList) {
+ assert((!InitList || InitList->getNumInits() == 0) &&
+ "Shouldn't use value-init for non-empty init lists");
+
+ // C++98 [dcl.init]p5, C++11 [dcl.init]p7:
+ //
+ // To value-initialize an object of type T means:
+ QualType T = Entity.getType();
+
+ // -- if T is an array type, then each element is value-initialized;
+ T = S.Context.getBaseElementType(T);
+
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ bool NeedZeroInitialization = true;
+ if (!S.getLangOpts().CPlusPlus11) {
+ // C++98:
+ // -- if T is a class type (clause 9) with a user-declared constructor
+ // (12.1), then the default constructor for T is called (and the
+ // initialization is ill-formed if T has no accessible default
+ // constructor);
+ if (ClassDecl->hasUserDeclaredConstructor())
+ NeedZeroInitialization = false;
+ } else {
+ // C++11:
+ // -- if T is a class type (clause 9) with either no default constructor
+ // (12.1 [class.ctor]) or a default constructor that is user-provided
+ // or deleted, then the object is default-initialized;
+ CXXConstructorDecl *CD = S.LookupDefaultConstructor(ClassDecl);
+ if (!CD || !CD->getCanonicalDecl()->isDefaulted() || CD->isDeleted())
+ NeedZeroInitialization = false;
+ }
+
+ // -- if T is a (possibly cv-qualified) non-union class type without a
+ // user-provided or deleted default constructor, then the object is
+ // zero-initialized and, if T has a non-trivial default constructor,
+ // default-initialized;
+ // The 'non-union' here was removed by DR1502. The 'non-trivial default
+ // constructor' part was removed by DR1507.
+ if (NeedZeroInitialization)
+ Sequence.AddZeroInitializationStep(Entity.getType());
+
+ // C++03:
+ // -- if T is a non-union class type without a user-declared constructor,
+ // then every non-static data member and base class component of T is
+ // value-initialized;
+ // [...] A program that calls for [...] value-initialization of an
+ // entity of reference type is ill-formed.
+ //
+ // C++11 doesn't need this handling, because value-initialization does not
+ // occur recursively there, and the implicit default constructor is
+ // defined as deleted in the problematic cases.
+ if (!S.getLangOpts().CPlusPlus11 &&
+ ClassDecl->hasUninitializedReferenceMember()) {
+ Sequence.SetFailed(InitializationSequence::FK_TooManyInitsForReference);
+ return;
+ }
+
+ // If this is list-value-initialization, pass the empty init list on when
+ // building the constructor call. This affects the semantics of a few
+ // things (such as whether an explicit default constructor can be called).
+ Expr *InitListAsExpr = InitList;
+ MultiExprArg Args(&InitListAsExpr, InitList ? 1 : 0);
+ bool InitListSyntax = InitList;
+
+ return TryConstructorInitialization(S, Entity, Kind, Args, T, Sequence,
+ InitListSyntax);
+ }
+ }
+
+ Sequence.AddZeroInitializationStep(Entity.getType());
+}
+
+/// \brief Attempt default initialization (C++ [dcl.init]p6).
+static void TryDefaultInitialization(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ InitializationSequence &Sequence) {
+ assert(Kind.getKind() == InitializationKind::IK_Default);
+
+ // C++ [dcl.init]p6:
+ // To default-initialize an object of type T means:
+ // - if T is an array type, each element is default-initialized;
+ QualType DestType = S.Context.getBaseElementType(Entity.getType());
+
+ // - if T is a (possibly cv-qualified) class type (Clause 9), the default
+ // constructor for T is called (and the initialization is ill-formed if
+ // T has no accessible default constructor);
+ if (DestType->isRecordType() && S.getLangOpts().CPlusPlus) {
+ TryConstructorInitialization(S, Entity, Kind, None, DestType, Sequence);
+ return;
+ }
+
+ // - otherwise, no initialization is performed.
+
+ // If a program calls for the default initialization of an object of
+ // a const-qualified type T, T shall be a class type with a user-provided
+ // default constructor.
+ if (DestType.isConstQualified() && S.getLangOpts().CPlusPlus) {
+ if (!maybeRecoverWithZeroInitialization(S, Sequence, Entity))
+ Sequence.SetFailed(InitializationSequence::FK_DefaultInitOfConst);
+ return;
+ }
+
+ // If the destination type has a lifetime property, zero-initialize it.
+ if (DestType.getQualifiers().hasObjCLifetime()) {
+ Sequence.AddZeroInitializationStep(Entity.getType());
+ return;
+ }
+}
+
+/// \brief Attempt a user-defined conversion between two types (C++ [dcl.init]),
+/// which enumerates all conversion functions and performs overload resolution
+/// to select the best.
+static void TryUserDefinedConversion(Sema &S,
+ QualType DestType,
+ const InitializationKind &Kind,
+ Expr *Initializer,
+ InitializationSequence &Sequence,
+ bool TopLevelOfInitList) {
+ assert(!DestType->isReferenceType() && "References are handled elsewhere");
+ QualType SourceType = Initializer->getType();
+ assert((DestType->isRecordType() || SourceType->isRecordType()) &&
+ "Must have a class type to perform a user-defined conversion");
+
+ // Build the candidate set directly in the initialization sequence
+ // structure, so that it will persist if we fail.
+ OverloadCandidateSet &CandidateSet = Sequence.getFailedCandidateSet();
+ CandidateSet.clear();
+
+ // Determine whether we are allowed to call explicit constructors or
+ // explicit conversion operators.
+ bool AllowExplicit = Kind.AllowExplicit();
+
+ if (const RecordType *DestRecordType = DestType->getAs<RecordType>()) {
+ // The type we're converting to is a class type. Enumerate its constructors
+ // to see if there is a suitable conversion.
+ CXXRecordDecl *DestRecordDecl
+ = cast<CXXRecordDecl>(DestRecordType->getDecl());
+
+ // Try to complete the type we're converting to.
+ if (S.isCompleteType(Kind.getLocation(), DestType)) {
+ DeclContext::lookup_result R = S.LookupConstructors(DestRecordDecl);
+ // The container holding the constructors can under certain conditions
+ // be changed while iterating. To be safe we copy the lookup results
+ // to a new container.
+ SmallVector<NamedDecl*, 8> CopyOfCon(R.begin(), R.end());
+ for (SmallVectorImpl<NamedDecl *>::iterator
+ Con = CopyOfCon.begin(), ConEnd = CopyOfCon.end();
+ Con != ConEnd; ++Con) {
+ NamedDecl *D = *Con;
+ DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess());
+
+ // Find the constructor (which may be a template).
+ CXXConstructorDecl *Constructor = nullptr;
+ FunctionTemplateDecl *ConstructorTmpl
+ = dyn_cast<FunctionTemplateDecl>(D);
+ if (ConstructorTmpl)
+ Constructor = cast<CXXConstructorDecl>(
+ ConstructorTmpl->getTemplatedDecl());
+ else
+ Constructor = cast<CXXConstructorDecl>(D);
+
+ if (!Constructor->isInvalidDecl() &&
+ Constructor->isConvertingConstructor(AllowExplicit)) {
+ if (ConstructorTmpl)
+ S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl,
+ /*ExplicitArgs*/ nullptr,
+ Initializer, CandidateSet,
+ /*SuppressUserConversions=*/true);
+ else
+ S.AddOverloadCandidate(Constructor, FoundDecl,
+ Initializer, CandidateSet,
+ /*SuppressUserConversions=*/true);
+ }
+ }
+ }
+ }
+
+ SourceLocation DeclLoc = Initializer->getLocStart();
+
+ if (const RecordType *SourceRecordType = SourceType->getAs<RecordType>()) {
+ // The type we're converting from is a class type, enumerate its conversion
+ // functions.
+
+ // We can only enumerate the conversion functions for a complete type; if
+ // the type isn't complete, simply skip this step.
+ if (S.isCompleteType(DeclLoc, SourceType)) {
+ CXXRecordDecl *SourceRecordDecl
+ = cast<CXXRecordDecl>(SourceRecordType->getDecl());
+
+ const auto &Conversions =
+ SourceRecordDecl->getVisibleConversionFunctions();
+ for (auto I = Conversions.begin(), E = Conversions.end(); I != E; ++I) {
+ NamedDecl *D = *I;
+ CXXRecordDecl *ActingDC = cast<CXXRecordDecl>(D->getDeclContext());
+ if (isa<UsingShadowDecl>(D))
+ D = cast<UsingShadowDecl>(D)->getTargetDecl();
+
+ FunctionTemplateDecl *ConvTemplate = dyn_cast<FunctionTemplateDecl>(D);
+ CXXConversionDecl *Conv;
+ if (ConvTemplate)
+ Conv = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl());
+ else
+ Conv = cast<CXXConversionDecl>(D);
+
+ if (AllowExplicit || !Conv->isExplicit()) {
+ if (ConvTemplate)
+ S.AddTemplateConversionCandidate(ConvTemplate, I.getPair(),
+ ActingDC, Initializer, DestType,
+ CandidateSet, AllowExplicit);
+ else
+ S.AddConversionCandidate(Conv, I.getPair(), ActingDC,
+ Initializer, DestType, CandidateSet,
+ AllowExplicit);
+ }
+ }
+ }
+ }
+
+ // Perform overload resolution. If it fails, return the failed result.
+ OverloadCandidateSet::iterator Best;
+ if (OverloadingResult Result
+ = CandidateSet.BestViableFunction(S, DeclLoc, Best, true)) {
+ Sequence.SetOverloadFailure(
+ InitializationSequence::FK_UserConversionOverloadFailed,
+ Result);
+ return;
+ }
+
+ FunctionDecl *Function = Best->Function;
+ Function->setReferenced();
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+
+ if (isa<CXXConstructorDecl>(Function)) {
+ // Add the user-defined conversion step. Any cv-qualification conversion is
+ // subsumed by the initialization. Per DR5, the created temporary is of the
+ // cv-unqualified type of the destination.
+ Sequence.AddUserConversionStep(Function, Best->FoundDecl,
+ DestType.getUnqualifiedType(),
+ HadMultipleCandidates);
+ return;
+ }
+
+ // Add the user-defined conversion step that calls the conversion function.
+ QualType ConvType = Function->getCallResultType();
+ if (ConvType->getAs<RecordType>()) {
+ // If we're converting to a class type, there may be an copy of
+ // the resulting temporary object (possible to create an object of
+ // a base class type). That copy is not a separate conversion, so
+ // we just make a note of the actual destination type (possibly a
+ // base class of the type returned by the conversion function) and
+ // let the user-defined conversion step handle the conversion.
+ Sequence.AddUserConversionStep(Function, Best->FoundDecl, DestType,
+ HadMultipleCandidates);
+ return;
+ }
+
+ Sequence.AddUserConversionStep(Function, Best->FoundDecl, ConvType,
+ HadMultipleCandidates);
+
+ // If the conversion following the call to the conversion function
+ // is interesting, add it as a separate step.
+ if (Best->FinalConversion.First || Best->FinalConversion.Second ||
+ Best->FinalConversion.Third) {
+ ImplicitConversionSequence ICS;
+ ICS.setStandard();
+ ICS.Standard = Best->FinalConversion;
+ Sequence.AddConversionSequenceStep(ICS, DestType, TopLevelOfInitList);
+ }
+}
+
+/// An egregious hack for compatibility with libstdc++-4.2: in <tr1/hashtable>,
+/// a function with a pointer return type contains a 'return false;' statement.
+/// In C++11, 'false' is not a null pointer, so this breaks the build of any
+/// code using that header.
+///
+/// Work around this by treating 'return false;' as zero-initializing the result
+/// if it's used in a pointer-returning function in a system header.
+static bool isLibstdcxxPointerReturnFalseHack(Sema &S,
+ const InitializedEntity &Entity,
+ const Expr *Init) {
+ return S.getLangOpts().CPlusPlus11 &&
+ Entity.getKind() == InitializedEntity::EK_Result &&
+ Entity.getType()->isPointerType() &&
+ isa<CXXBoolLiteralExpr>(Init) &&
+ !cast<CXXBoolLiteralExpr>(Init)->getValue() &&
+ S.getSourceManager().isInSystemHeader(Init->getExprLoc());
+}
+
+/// The non-zero enum values here are indexes into diagnostic alternatives.
+enum InvalidICRKind { IIK_okay, IIK_nonlocal, IIK_nonscalar };
+
+/// Determines whether this expression is an acceptable ICR source.
+static InvalidICRKind isInvalidICRSource(ASTContext &C, Expr *e,
+ bool isAddressOf, bool &isWeakAccess) {
+ // Skip parens.
+ e = e->IgnoreParens();
+
+ // Skip address-of nodes.
+ if (UnaryOperator *op = dyn_cast<UnaryOperator>(e)) {
+ if (op->getOpcode() == UO_AddrOf)
+ return isInvalidICRSource(C, op->getSubExpr(), /*addressof*/ true,
+ isWeakAccess);
+
+ // Skip certain casts.
+ } else if (CastExpr *ce = dyn_cast<CastExpr>(e)) {
+ switch (ce->getCastKind()) {
+ case CK_Dependent:
+ case CK_BitCast:
+ case CK_LValueBitCast:
+ case CK_NoOp:
+ return isInvalidICRSource(C, ce->getSubExpr(), isAddressOf, isWeakAccess);
+
+ case CK_ArrayToPointerDecay:
+ return IIK_nonscalar;
+
+ case CK_NullToPointer:
+ return IIK_okay;
+
+ default:
+ break;
+ }
+
+ // If we have a declaration reference, it had better be a local variable.
+ } else if (isa<DeclRefExpr>(e)) {
+ // set isWeakAccess to true, to mean that there will be an implicit
+ // load which requires a cleanup.
+ if (e->getType().getObjCLifetime() == Qualifiers::OCL_Weak)
+ isWeakAccess = true;
+
+ if (!isAddressOf) return IIK_nonlocal;
+
+ VarDecl *var = dyn_cast<VarDecl>(cast<DeclRefExpr>(e)->getDecl());
+ if (!var) return IIK_nonlocal;
+
+ return (var->hasLocalStorage() ? IIK_okay : IIK_nonlocal);
+
+ // If we have a conditional operator, check both sides.
+ } else if (ConditionalOperator *cond = dyn_cast<ConditionalOperator>(e)) {
+ if (InvalidICRKind iik = isInvalidICRSource(C, cond->getLHS(), isAddressOf,
+ isWeakAccess))
+ return iik;
+
+ return isInvalidICRSource(C, cond->getRHS(), isAddressOf, isWeakAccess);
+
+ // These are never scalar.
+ } else if (isa<ArraySubscriptExpr>(e)) {
+ return IIK_nonscalar;
+
+ // Otherwise, it needs to be a null pointer constant.
+ } else {
+ return (e->isNullPointerConstant(C, Expr::NPC_ValueDependentIsNull)
+ ? IIK_okay : IIK_nonlocal);
+ }
+
+ return IIK_nonlocal;
+}
+
+/// Check whether the given expression is a valid operand for an
+/// indirect copy/restore.
+static void checkIndirectCopyRestoreSource(Sema &S, Expr *src) {
+ assert(src->isRValue());
+ bool isWeakAccess = false;
+ InvalidICRKind iik = isInvalidICRSource(S.Context, src, false, isWeakAccess);
+ // If isWeakAccess to true, there will be an implicit
+ // load which requires a cleanup.
+ if (S.getLangOpts().ObjCAutoRefCount && isWeakAccess)
+ S.ExprNeedsCleanups = true;
+
+ if (iik == IIK_okay) return;
+
+ S.Diag(src->getExprLoc(), diag::err_arc_nonlocal_writeback)
+ << ((unsigned) iik - 1) // shift index into diagnostic explanations
+ << src->getSourceRange();
+}
+
+/// \brief Determine whether we have compatible array types for the
+/// purposes of GNU by-copy array initialization.
+static bool hasCompatibleArrayTypes(ASTContext &Context, const ArrayType *Dest,
+ const ArrayType *Source) {
+ // If the source and destination array types are equivalent, we're
+ // done.
+ if (Context.hasSameType(QualType(Dest, 0), QualType(Source, 0)))
+ return true;
+
+ // Make sure that the element types are the same.
+ if (!Context.hasSameType(Dest->getElementType(), Source->getElementType()))
+ return false;
+
+ // The only mismatch we allow is when the destination is an
+ // incomplete array type and the source is a constant array type.
+ return Source->isConstantArrayType() && Dest->isIncompleteArrayType();
+}
+
+static bool tryObjCWritebackConversion(Sema &S,
+ InitializationSequence &Sequence,
+ const InitializedEntity &Entity,
+ Expr *Initializer) {
+ bool ArrayDecay = false;
+ QualType ArgType = Initializer->getType();
+ QualType ArgPointee;
+ if (const ArrayType *ArgArrayType = S.Context.getAsArrayType(ArgType)) {
+ ArrayDecay = true;
+ ArgPointee = ArgArrayType->getElementType();
+ ArgType = S.Context.getPointerType(ArgPointee);
+ }
+
+ // Handle write-back conversion.
+ QualType ConvertedArgType;
+ if (!S.isObjCWritebackConversion(ArgType, Entity.getType(),
+ ConvertedArgType))
+ return false;
+
+ // We should copy unless we're passing to an argument explicitly
+ // marked 'out'.
+ bool ShouldCopy = true;
+ if (ParmVarDecl *param = cast_or_null<ParmVarDecl>(Entity.getDecl()))
+ ShouldCopy = (param->getObjCDeclQualifier() != ParmVarDecl::OBJC_TQ_Out);
+
+ // Do we need an lvalue conversion?
+ if (ArrayDecay || Initializer->isGLValue()) {
+ ImplicitConversionSequence ICS;
+ ICS.setStandard();
+ ICS.Standard.setAsIdentityConversion();
+
+ QualType ResultType;
+ if (ArrayDecay) {
+ ICS.Standard.First = ICK_Array_To_Pointer;
+ ResultType = S.Context.getPointerType(ArgPointee);
+ } else {
+ ICS.Standard.First = ICK_Lvalue_To_Rvalue;
+ ResultType = Initializer->getType().getNonLValueExprType(S.Context);
+ }
+
+ Sequence.AddConversionSequenceStep(ICS, ResultType);
+ }
+
+ Sequence.AddPassByIndirectCopyRestoreStep(Entity.getType(), ShouldCopy);
+ return true;
+}
+
+static bool TryOCLSamplerInitialization(Sema &S,
+ InitializationSequence &Sequence,
+ QualType DestType,
+ Expr *Initializer) {
+ if (!S.getLangOpts().OpenCL || !DestType->isSamplerT() ||
+ !Initializer->isIntegerConstantExpr(S.getASTContext()))
+ return false;
+
+ Sequence.AddOCLSamplerInitStep(DestType);
+ return true;
+}
+
+//
+// OpenCL 1.2 spec, s6.12.10
+//
+// The event argument can also be used to associate the
+// async_work_group_copy with a previous async copy allowing
+// an event to be shared by multiple async copies; otherwise
+// event should be zero.
+//
+static bool TryOCLZeroEventInitialization(Sema &S,
+ InitializationSequence &Sequence,
+ QualType DestType,
+ Expr *Initializer) {
+ if (!S.getLangOpts().OpenCL || !DestType->isEventT() ||
+ !Initializer->isIntegerConstantExpr(S.getASTContext()) ||
+ (Initializer->EvaluateKnownConstInt(S.getASTContext()) != 0))
+ return false;
+
+ Sequence.AddOCLZeroEventStep(DestType);
+ return true;
+}
+
+InitializationSequence::InitializationSequence(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ MultiExprArg Args,
+ bool TopLevelOfInitList)
+ : FailedCandidateSet(Kind.getLocation(), OverloadCandidateSet::CSK_Normal) {
+ InitializeFrom(S, Entity, Kind, Args, TopLevelOfInitList);
+}
+
+/// Tries to get a FunctionDecl out of `E`. If it succeeds and we can take the
+/// address of that function, this returns true. Otherwise, it returns false.
+static bool isExprAnUnaddressableFunction(Sema &S, const Expr *E) {
+ auto *DRE = dyn_cast<DeclRefExpr>(E);
+ if (!DRE || !isa<FunctionDecl>(DRE->getDecl()))
+ return false;
+
+ return !S.checkAddressOfFunctionIsAvailable(
+ cast<FunctionDecl>(DRE->getDecl()));
+}
+
+void InitializationSequence::InitializeFrom(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ MultiExprArg Args,
+ bool TopLevelOfInitList) {
+ ASTContext &Context = S.Context;
+
+ // Eliminate non-overload placeholder types in the arguments. We
+ // need to do this before checking whether types are dependent
+ // because lowering a pseudo-object expression might well give us
+ // something of dependent type.
+ for (unsigned I = 0, E = Args.size(); I != E; ++I)
+ if (Args[I]->getType()->isNonOverloadPlaceholderType()) {
+ // FIXME: should we be doing this here?
+ ExprResult result = S.CheckPlaceholderExpr(Args[I]);
+ if (result.isInvalid()) {
+ SetFailed(FK_PlaceholderType);
+ return;
+ }
+ Args[I] = result.get();
+ }
+
+ // C++0x [dcl.init]p16:
+ // The semantics of initializers are as follows. The destination type is
+ // the type of the object or reference being initialized and the source
+ // type is the type of the initializer expression. The source type is not
+ // defined when the initializer is a braced-init-list or when it is a
+ // parenthesized list of expressions.
+ QualType DestType = Entity.getType();
+
+ if (DestType->isDependentType() ||
+ Expr::hasAnyTypeDependentArguments(Args)) {
+ SequenceKind = DependentSequence;
+ return;
+ }
+
+ // Almost everything is a normal sequence.
+ setSequenceKind(NormalSequence);
+
+ QualType SourceType;
+ Expr *Initializer = nullptr;
+ if (Args.size() == 1) {
+ Initializer = Args[0];
+ if (S.getLangOpts().ObjC1) {
+ if (S.CheckObjCBridgeRelatedConversions(Initializer->getLocStart(),
+ DestType, Initializer->getType(),
+ Initializer) ||
+ S.ConversionToObjCStringLiteralCheck(DestType, Initializer))
+ Args[0] = Initializer;
+ }
+ if (!isa<InitListExpr>(Initializer))
+ SourceType = Initializer->getType();
+ }
+
+ // - If the initializer is a (non-parenthesized) braced-init-list, the
+ // object is list-initialized (8.5.4).
+ if (Kind.getKind() != InitializationKind::IK_Direct) {
+ if (InitListExpr *InitList = dyn_cast_or_null<InitListExpr>(Initializer)) {
+ TryListInitialization(S, Entity, Kind, InitList, *this);
+ return;
+ }
+ }
+
+ // - If the destination type is a reference type, see 8.5.3.
+ if (DestType->isReferenceType()) {
+ // C++0x [dcl.init.ref]p1:
+ // A variable declared to be a T& or T&&, that is, "reference to type T"
+ // (8.3.2), shall be initialized by an object, or function, of type T or
+ // by an object that can be converted into a T.
+ // (Therefore, multiple arguments are not permitted.)
+ if (Args.size() != 1)
+ SetFailed(FK_TooManyInitsForReference);
+ else
+ TryReferenceInitialization(S, Entity, Kind, Args[0], *this);
+ return;
+ }
+
+ // - If the initializer is (), the object is value-initialized.
+ if (Kind.getKind() == InitializationKind::IK_Value ||
+ (Kind.getKind() == InitializationKind::IK_Direct && Args.empty())) {
+ TryValueInitialization(S, Entity, Kind, *this);
+ return;
+ }
+
+ // Handle default initialization.
+ if (Kind.getKind() == InitializationKind::IK_Default) {
+ TryDefaultInitialization(S, Entity, Kind, *this);
+ return;
+ }
+
+ // - If the destination type is an array of characters, an array of
+ // char16_t, an array of char32_t, or an array of wchar_t, and the
+ // initializer is a string literal, see 8.5.2.
+ // - Otherwise, if the destination type is an array, the program is
+ // ill-formed.
+ if (const ArrayType *DestAT = Context.getAsArrayType(DestType)) {
+ if (Initializer && isa<VariableArrayType>(DestAT)) {
+ SetFailed(FK_VariableLengthArrayHasInitializer);
+ return;
+ }
+
+ if (Initializer) {
+ switch (IsStringInit(Initializer, DestAT, Context)) {
+ case SIF_None:
+ TryStringLiteralInitialization(S, Entity, Kind, Initializer, *this);
+ return;
+ case SIF_NarrowStringIntoWideChar:
+ SetFailed(FK_NarrowStringIntoWideCharArray);
+ return;
+ case SIF_WideStringIntoChar:
+ SetFailed(FK_WideStringIntoCharArray);
+ return;
+ case SIF_IncompatWideStringIntoWideChar:
+ SetFailed(FK_IncompatWideStringIntoWideChar);
+ return;
+ case SIF_Other:
+ break;
+ }
+ }
+
+ // Note: as an GNU C extension, we allow initialization of an
+ // array from a compound literal that creates an array of the same
+ // type, so long as the initializer has no side effects.
+ if (!S.getLangOpts().CPlusPlus && Initializer &&
+ isa<CompoundLiteralExpr>(Initializer->IgnoreParens()) &&
+ Initializer->getType()->isArrayType()) {
+ const ArrayType *SourceAT
+ = Context.getAsArrayType(Initializer->getType());
+ if (!hasCompatibleArrayTypes(S.Context, DestAT, SourceAT))
+ SetFailed(FK_ArrayTypeMismatch);
+ else if (Initializer->HasSideEffects(S.Context))
+ SetFailed(FK_NonConstantArrayInit);
+ else {
+ AddArrayInitStep(DestType);
+ }
+ }
+ // Note: as a GNU C++ extension, we allow list-initialization of a
+ // class member of array type from a parenthesized initializer list.
+ else if (S.getLangOpts().CPlusPlus &&
+ Entity.getKind() == InitializedEntity::EK_Member &&
+ Initializer && isa<InitListExpr>(Initializer)) {
+ TryListInitialization(S, Entity, Kind, cast<InitListExpr>(Initializer),
+ *this);
+ AddParenthesizedArrayInitStep(DestType);
+ } else if (DestAT->getElementType()->isCharType())
+ SetFailed(FK_ArrayNeedsInitListOrStringLiteral);
+ else if (IsWideCharCompatible(DestAT->getElementType(), Context))
+ SetFailed(FK_ArrayNeedsInitListOrWideStringLiteral);
+ else
+ SetFailed(FK_ArrayNeedsInitList);
+
+ return;
+ }
+
+ // Determine whether we should consider writeback conversions for
+ // Objective-C ARC.
+ bool allowObjCWritebackConversion = S.getLangOpts().ObjCAutoRefCount &&
+ Entity.isParameterKind();
+
+ // We're at the end of the line for C: it's either a write-back conversion
+ // or it's a C assignment. There's no need to check anything else.
+ if (!S.getLangOpts().CPlusPlus) {
+ // If allowed, check whether this is an Objective-C writeback conversion.
+ if (allowObjCWritebackConversion &&
+ tryObjCWritebackConversion(S, *this, Entity, Initializer)) {
+ return;
+ }
+
+ if (TryOCLSamplerInitialization(S, *this, DestType, Initializer))
+ return;
+
+ if (TryOCLZeroEventInitialization(S, *this, DestType, Initializer))
+ return;
+
+ // Handle initialization in C
+ AddCAssignmentStep(DestType);
+ MaybeProduceObjCObject(S, *this, Entity);
+ return;
+ }
+
+ assert(S.getLangOpts().CPlusPlus);
+
+ // - If the destination type is a (possibly cv-qualified) class type:
+ if (DestType->isRecordType()) {
+ // - If the initialization is direct-initialization, or if it is
+ // copy-initialization where the cv-unqualified version of the
+ // source type is the same class as, or a derived class of, the
+ // class of the destination, constructors are considered. [...]
+ if (Kind.getKind() == InitializationKind::IK_Direct ||
+ (Kind.getKind() == InitializationKind::IK_Copy &&
+ (Context.hasSameUnqualifiedType(SourceType, DestType) ||
+ S.IsDerivedFrom(Initializer->getLocStart(), SourceType, DestType))))
+ TryConstructorInitialization(S, Entity, Kind, Args,
+ DestType, *this);
+ // - Otherwise (i.e., for the remaining copy-initialization cases),
+ // user-defined conversion sequences that can convert from the source
+ // type to the destination type or (when a conversion function is
+ // used) to a derived class thereof are enumerated as described in
+ // 13.3.1.4, and the best one is chosen through overload resolution
+ // (13.3).
+ else
+ TryUserDefinedConversion(S, DestType, Kind, Initializer, *this,
+ TopLevelOfInitList);
+ return;
+ }
+
+ if (Args.size() > 1) {
+ SetFailed(FK_TooManyInitsForScalar);
+ return;
+ }
+ assert(Args.size() == 1 && "Zero-argument case handled above");
+
+ // - Otherwise, if the source type is a (possibly cv-qualified) class
+ // type, conversion functions are considered.
+ if (!SourceType.isNull() && SourceType->isRecordType()) {
+ // For a conversion to _Atomic(T) from either T or a class type derived
+ // from T, initialize the T object then convert to _Atomic type.
+ bool NeedAtomicConversion = false;
+ if (const AtomicType *Atomic = DestType->getAs<AtomicType>()) {
+ if (Context.hasSameUnqualifiedType(SourceType, Atomic->getValueType()) ||
+ S.IsDerivedFrom(Initializer->getLocStart(), SourceType,
+ Atomic->getValueType())) {
+ DestType = Atomic->getValueType();
+ NeedAtomicConversion = true;
+ }
+ }
+
+ TryUserDefinedConversion(S, DestType, Kind, Initializer, *this,
+ TopLevelOfInitList);
+ MaybeProduceObjCObject(S, *this, Entity);
+ if (!Failed() && NeedAtomicConversion)
+ AddAtomicConversionStep(Entity.getType());
+ return;
+ }
+
+ // - Otherwise, the initial value of the object being initialized is the
+ // (possibly converted) value of the initializer expression. Standard
+ // conversions (Clause 4) will be used, if necessary, to convert the
+ // initializer expression to the cv-unqualified version of the
+ // destination type; no user-defined conversions are considered.
+
+ ImplicitConversionSequence ICS
+ = S.TryImplicitConversion(Initializer, DestType,
+ /*SuppressUserConversions*/true,
+ /*AllowExplicitConversions*/ false,
+ /*InOverloadResolution*/ false,
+ /*CStyle=*/Kind.isCStyleOrFunctionalCast(),
+ allowObjCWritebackConversion);
+
+ if (ICS.isStandard() &&
+ ICS.Standard.Second == ICK_Writeback_Conversion) {
+ // Objective-C ARC writeback conversion.
+
+ // We should copy unless we're passing to an argument explicitly
+ // marked 'out'.
+ bool ShouldCopy = true;
+ if (ParmVarDecl *Param = cast_or_null<ParmVarDecl>(Entity.getDecl()))
+ ShouldCopy = (Param->getObjCDeclQualifier() != ParmVarDecl::OBJC_TQ_Out);
+
+ // If there was an lvalue adjustment, add it as a separate conversion.
+ if (ICS.Standard.First == ICK_Array_To_Pointer ||
+ ICS.Standard.First == ICK_Lvalue_To_Rvalue) {
+ ImplicitConversionSequence LvalueICS;
+ LvalueICS.setStandard();
+ LvalueICS.Standard.setAsIdentityConversion();
+ LvalueICS.Standard.setAllToTypes(ICS.Standard.getToType(0));
+ LvalueICS.Standard.First = ICS.Standard.First;
+ AddConversionSequenceStep(LvalueICS, ICS.Standard.getToType(0));
+ }
+
+ AddPassByIndirectCopyRestoreStep(DestType, ShouldCopy);
+ } else if (ICS.isBad()) {
+ DeclAccessPair dap;
+ if (isLibstdcxxPointerReturnFalseHack(S, Entity, Initializer)) {
+ AddZeroInitializationStep(Entity.getType());
+ } else if (Initializer->getType() == Context.OverloadTy &&
+ !S.ResolveAddressOfOverloadedFunction(Initializer, DestType,
+ false, dap))
+ SetFailed(InitializationSequence::FK_AddressOfOverloadFailed);
+ else if (Initializer->getType()->isFunctionType() &&
+ isExprAnUnaddressableFunction(S, Initializer))
+ SetFailed(InitializationSequence::FK_AddressOfUnaddressableFunction);
+ else
+ SetFailed(InitializationSequence::FK_ConversionFailed);
+ } else {
+ AddConversionSequenceStep(ICS, DestType, TopLevelOfInitList);
+
+ MaybeProduceObjCObject(S, *this, Entity);
+ }
+}
+
+InitializationSequence::~InitializationSequence() {
+ for (auto &S : Steps)
+ S.Destroy();
+}
+
+//===----------------------------------------------------------------------===//
+// Perform initialization
+//===----------------------------------------------------------------------===//
+static Sema::AssignmentAction
+getAssignmentAction(const InitializedEntity &Entity, bool Diagnose = false) {
+ switch(Entity.getKind()) {
+ case InitializedEntity::EK_Variable:
+ case InitializedEntity::EK_New:
+ case InitializedEntity::EK_Exception:
+ case InitializedEntity::EK_Base:
+ case InitializedEntity::EK_Delegating:
+ return Sema::AA_Initializing;
+
+ case InitializedEntity::EK_Parameter:
+ if (Entity.getDecl() &&
+ isa<ObjCMethodDecl>(Entity.getDecl()->getDeclContext()))
+ return Sema::AA_Sending;
+
+ return Sema::AA_Passing;
+
+ case InitializedEntity::EK_Parameter_CF_Audited:
+ if (Entity.getDecl() &&
+ isa<ObjCMethodDecl>(Entity.getDecl()->getDeclContext()))
+ return Sema::AA_Sending;
+
+ return !Diagnose ? Sema::AA_Passing : Sema::AA_Passing_CFAudited;
+
+ case InitializedEntity::EK_Result:
+ return Sema::AA_Returning;
+
+ case InitializedEntity::EK_Temporary:
+ case InitializedEntity::EK_RelatedResult:
+ // FIXME: Can we tell apart casting vs. converting?
+ return Sema::AA_Casting;
+
+ case InitializedEntity::EK_Member:
+ case InitializedEntity::EK_ArrayElement:
+ case InitializedEntity::EK_VectorElement:
+ case InitializedEntity::EK_ComplexElement:
+ case InitializedEntity::EK_BlockElement:
+ case InitializedEntity::EK_LambdaCapture:
+ case InitializedEntity::EK_CompoundLiteralInit:
+ return Sema::AA_Initializing;
+ }
+
+ llvm_unreachable("Invalid EntityKind!");
+}
+
+/// \brief Whether we should bind a created object as a temporary when
+/// initializing the given entity.
+static bool shouldBindAsTemporary(const InitializedEntity &Entity) {
+ switch (Entity.getKind()) {
+ case InitializedEntity::EK_ArrayElement:
+ case InitializedEntity::EK_Member:
+ case InitializedEntity::EK_Result:
+ case InitializedEntity::EK_New:
+ case InitializedEntity::EK_Variable:
+ case InitializedEntity::EK_Base:
+ case InitializedEntity::EK_Delegating:
+ case InitializedEntity::EK_VectorElement:
+ case InitializedEntity::EK_ComplexElement:
+ case InitializedEntity::EK_Exception:
+ case InitializedEntity::EK_BlockElement:
+ case InitializedEntity::EK_LambdaCapture:
+ case InitializedEntity::EK_CompoundLiteralInit:
+ return false;
+
+ case InitializedEntity::EK_Parameter:
+ case InitializedEntity::EK_Parameter_CF_Audited:
+ case InitializedEntity::EK_Temporary:
+ case InitializedEntity::EK_RelatedResult:
+ return true;
+ }
+
+ llvm_unreachable("missed an InitializedEntity kind?");
+}
+
+/// \brief Whether the given entity, when initialized with an object
+/// created for that initialization, requires destruction.
+static bool shouldDestroyTemporary(const InitializedEntity &Entity) {
+ switch (Entity.getKind()) {
+ case InitializedEntity::EK_Result:
+ case InitializedEntity::EK_New:
+ case InitializedEntity::EK_Base:
+ case InitializedEntity::EK_Delegating:
+ case InitializedEntity::EK_VectorElement:
+ case InitializedEntity::EK_ComplexElement:
+ case InitializedEntity::EK_BlockElement:
+ case InitializedEntity::EK_LambdaCapture:
+ return false;
+
+ case InitializedEntity::EK_Member:
+ case InitializedEntity::EK_Variable:
+ case InitializedEntity::EK_Parameter:
+ case InitializedEntity::EK_Parameter_CF_Audited:
+ case InitializedEntity::EK_Temporary:
+ case InitializedEntity::EK_ArrayElement:
+ case InitializedEntity::EK_Exception:
+ case InitializedEntity::EK_CompoundLiteralInit:
+ case InitializedEntity::EK_RelatedResult:
+ return true;
+ }
+
+ llvm_unreachable("missed an InitializedEntity kind?");
+}
+
+/// \brief Look for copy and move constructors and constructor templates, for
+/// copying an object via direct-initialization (per C++11 [dcl.init]p16).
+static void LookupCopyAndMoveConstructors(Sema &S,
+ OverloadCandidateSet &CandidateSet,
+ CXXRecordDecl *Class,
+ Expr *CurInitExpr) {
+ DeclContext::lookup_result R = S.LookupConstructors(Class);
+ // The container holding the constructors can under certain conditions
+ // be changed while iterating (e.g. because of deserialization).
+ // To be safe we copy the lookup results to a new container.
+ SmallVector<NamedDecl*, 16> Ctors(R.begin(), R.end());
+ for (SmallVectorImpl<NamedDecl *>::iterator
+ CI = Ctors.begin(), CE = Ctors.end(); CI != CE; ++CI) {
+ NamedDecl *D = *CI;
+ CXXConstructorDecl *Constructor = nullptr;
+
+ if ((Constructor = dyn_cast<CXXConstructorDecl>(D))) {
+ // Handle copy/moveconstructors, only.
+ if (!Constructor || Constructor->isInvalidDecl() ||
+ !Constructor->isCopyOrMoveConstructor() ||
+ !Constructor->isConvertingConstructor(/*AllowExplicit=*/true))
+ continue;
+
+ DeclAccessPair FoundDecl
+ = DeclAccessPair::make(Constructor, Constructor->getAccess());
+ S.AddOverloadCandidate(Constructor, FoundDecl,
+ CurInitExpr, CandidateSet);
+ continue;
+ }
+
+ // Handle constructor templates.
+ FunctionTemplateDecl *ConstructorTmpl = cast<FunctionTemplateDecl>(D);
+ if (ConstructorTmpl->isInvalidDecl())
+ continue;
+
+ Constructor = cast<CXXConstructorDecl>(
+ ConstructorTmpl->getTemplatedDecl());
+ if (!Constructor->isConvertingConstructor(/*AllowExplicit=*/true))
+ continue;
+
+ // FIXME: Do we need to limit this to copy-constructor-like
+ // candidates?
+ DeclAccessPair FoundDecl
+ = DeclAccessPair::make(ConstructorTmpl, ConstructorTmpl->getAccess());
+ S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl, nullptr,
+ CurInitExpr, CandidateSet, true);
+ }
+}
+
+/// \brief Get the location at which initialization diagnostics should appear.
+static SourceLocation getInitializationLoc(const InitializedEntity &Entity,
+ Expr *Initializer) {
+ switch (Entity.getKind()) {
+ case InitializedEntity::EK_Result:
+ return Entity.getReturnLoc();
+
+ case InitializedEntity::EK_Exception:
+ return Entity.getThrowLoc();
+
+ case InitializedEntity::EK_Variable:
+ return Entity.getDecl()->getLocation();
+
+ case InitializedEntity::EK_LambdaCapture:
+ return Entity.getCaptureLoc();
+
+ case InitializedEntity::EK_ArrayElement:
+ case InitializedEntity::EK_Member:
+ case InitializedEntity::EK_Parameter:
+ case InitializedEntity::EK_Parameter_CF_Audited:
+ case InitializedEntity::EK_Temporary:
+ case InitializedEntity::EK_New:
+ case InitializedEntity::EK_Base:
+ case InitializedEntity::EK_Delegating:
+ case InitializedEntity::EK_VectorElement:
+ case InitializedEntity::EK_ComplexElement:
+ case InitializedEntity::EK_BlockElement:
+ case InitializedEntity::EK_CompoundLiteralInit:
+ case InitializedEntity::EK_RelatedResult:
+ return Initializer->getLocStart();
+ }
+ llvm_unreachable("missed an InitializedEntity kind?");
+}
+
+/// \brief Make a (potentially elidable) temporary copy of the object
+/// provided by the given initializer by calling the appropriate copy
+/// constructor.
+///
+/// \param S The Sema object used for type-checking.
+///
+/// \param T The type of the temporary object, which must either be
+/// the type of the initializer expression or a superclass thereof.
+///
+/// \param Entity The entity being initialized.
+///
+/// \param CurInit The initializer expression.
+///
+/// \param IsExtraneousCopy Whether this is an "extraneous" copy that
+/// is permitted in C++03 (but not C++0x) when binding a reference to
+/// an rvalue.
+///
+/// \returns An expression that copies the initializer expression into
+/// a temporary object, or an error expression if a copy could not be
+/// created.
+static ExprResult CopyObject(Sema &S,
+ QualType T,
+ const InitializedEntity &Entity,
+ ExprResult CurInit,
+ bool IsExtraneousCopy) {
+ if (CurInit.isInvalid())
+ return CurInit;
+ // Determine which class type we're copying to.
+ Expr *CurInitExpr = (Expr *)CurInit.get();
+ CXXRecordDecl *Class = nullptr;
+ if (const RecordType *Record = T->getAs<RecordType>())
+ Class = cast<CXXRecordDecl>(Record->getDecl());
+ if (!Class)
+ return CurInit;
+
+ // C++0x [class.copy]p32:
+ // When certain criteria are met, an implementation is allowed to
+ // omit the copy/move construction of a class object, even if the
+ // copy/move constructor and/or destructor for the object have
+ // side effects. [...]
+ // - when a temporary class object that has not been bound to a
+ // reference (12.2) would be copied/moved to a class object
+ // with the same cv-unqualified type, the copy/move operation
+ // can be omitted by constructing the temporary object
+ // directly into the target of the omitted copy/move
+ //
+ // Note that the other three bullets are handled elsewhere. Copy
+ // elision for return statements and throw expressions are handled as part
+ // of constructor initialization, while copy elision for exception handlers
+ // is handled by the run-time.
+ bool Elidable = CurInitExpr->isTemporaryObject(S.Context, Class);
+ SourceLocation Loc = getInitializationLoc(Entity, CurInit.get());
+
+ // Make sure that the type we are copying is complete.
+ if (S.RequireCompleteType(Loc, T, diag::err_temp_copy_incomplete))
+ return CurInit;
+
+ // Perform overload resolution using the class's copy/move constructors.
+ // Only consider constructors and constructor templates. Per
+ // C++0x [dcl.init]p16, second bullet to class types, this initialization
+ // is direct-initialization.
+ OverloadCandidateSet CandidateSet(Loc, OverloadCandidateSet::CSK_Normal);
+ LookupCopyAndMoveConstructors(S, CandidateSet, Class, CurInitExpr);
+
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+
+ OverloadCandidateSet::iterator Best;
+ switch (CandidateSet.BestViableFunction(S, Loc, Best)) {
+ case OR_Success:
+ break;
+
+ case OR_No_Viable_Function:
+ S.Diag(Loc, IsExtraneousCopy && !S.isSFINAEContext()
+ ? diag::ext_rvalue_to_reference_temp_copy_no_viable
+ : diag::err_temp_copy_no_viable)
+ << (int)Entity.getKind() << CurInitExpr->getType()
+ << CurInitExpr->getSourceRange();
+ CandidateSet.NoteCandidates(S, OCD_AllCandidates, CurInitExpr);
+ if (!IsExtraneousCopy || S.isSFINAEContext())
+ return ExprError();
+ return CurInit;
+
+ case OR_Ambiguous:
+ S.Diag(Loc, diag::err_temp_copy_ambiguous)
+ << (int)Entity.getKind() << CurInitExpr->getType()
+ << CurInitExpr->getSourceRange();
+ CandidateSet.NoteCandidates(S, OCD_ViableCandidates, CurInitExpr);
+ return ExprError();
+
+ case OR_Deleted:
+ S.Diag(Loc, diag::err_temp_copy_deleted)
+ << (int)Entity.getKind() << CurInitExpr->getType()
+ << CurInitExpr->getSourceRange();
+ S.NoteDeletedFunction(Best->Function);
+ return ExprError();
+ }
+
+ CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(Best->Function);
+ SmallVector<Expr*, 8> ConstructorArgs;
+ CurInit.get(); // Ownership transferred into MultiExprArg, below.
+
+ S.CheckConstructorAccess(Loc, Constructor, Entity,
+ Best->FoundDecl.getAccess(), IsExtraneousCopy);
+
+ if (IsExtraneousCopy) {
+ // If this is a totally extraneous copy for C++03 reference
+ // binding purposes, just return the original initialization
+ // expression. We don't generate an (elided) copy operation here
+ // because doing so would require us to pass down a flag to avoid
+ // infinite recursion, where each step adds another extraneous,
+ // elidable copy.
+
+ // Instantiate the default arguments of any extra parameters in
+ // the selected copy constructor, as if we were going to create a
+ // proper call to the copy constructor.
+ for (unsigned I = 1, N = Constructor->getNumParams(); I != N; ++I) {
+ ParmVarDecl *Parm = Constructor->getParamDecl(I);
+ if (S.RequireCompleteType(Loc, Parm->getType(),
+ diag::err_call_incomplete_argument))
+ break;
+
+ // Build the default argument expression; we don't actually care
+ // if this succeeds or not, because this routine will complain
+ // if there was a problem.
+ S.BuildCXXDefaultArgExpr(Loc, Constructor, Parm);
+ }
+
+ return CurInitExpr;
+ }
+
+ // Determine the arguments required to actually perform the
+ // constructor call (we might have derived-to-base conversions, or
+ // the copy constructor may have default arguments).
+ if (S.CompleteConstructorCall(Constructor, CurInitExpr, Loc, ConstructorArgs))
+ return ExprError();
+
+ // Actually perform the constructor call.
+ CurInit = S.BuildCXXConstructExpr(Loc, T, Constructor, Elidable,
+ ConstructorArgs,
+ HadMultipleCandidates,
+ /*ListInit*/ false,
+ /*StdInitListInit*/ false,
+ /*ZeroInit*/ false,
+ CXXConstructExpr::CK_Complete,
+ SourceRange());
+
+ // If we're supposed to bind temporaries, do so.
+ if (!CurInit.isInvalid() && shouldBindAsTemporary(Entity))
+ CurInit = S.MaybeBindToTemporary(CurInit.getAs<Expr>());
+ return CurInit;
+}
+
+/// \brief Check whether elidable copy construction for binding a reference to
+/// a temporary would have succeeded if we were building in C++98 mode, for
+/// -Wc++98-compat.
+static void CheckCXX98CompatAccessibleCopy(Sema &S,
+ const InitializedEntity &Entity,
+ Expr *CurInitExpr) {
+ assert(S.getLangOpts().CPlusPlus11);
+
+ const RecordType *Record = CurInitExpr->getType()->getAs<RecordType>();
+ if (!Record)
+ return;
+
+ SourceLocation Loc = getInitializationLoc(Entity, CurInitExpr);
+ if (S.Diags.isIgnored(diag::warn_cxx98_compat_temp_copy, Loc))
+ return;
+
+ // Find constructors which would have been considered.
+ OverloadCandidateSet CandidateSet(Loc, OverloadCandidateSet::CSK_Normal);
+ LookupCopyAndMoveConstructors(
+ S, CandidateSet, cast<CXXRecordDecl>(Record->getDecl()), CurInitExpr);
+
+ // Perform overload resolution.
+ OverloadCandidateSet::iterator Best;
+ OverloadingResult OR = CandidateSet.BestViableFunction(S, Loc, Best);
+
+ PartialDiagnostic Diag = S.PDiag(diag::warn_cxx98_compat_temp_copy)
+ << OR << (int)Entity.getKind() << CurInitExpr->getType()
+ << CurInitExpr->getSourceRange();
+
+ switch (OR) {
+ case OR_Success:
+ S.CheckConstructorAccess(Loc, cast<CXXConstructorDecl>(Best->Function),
+ Entity, Best->FoundDecl.getAccess(), Diag);
+ // FIXME: Check default arguments as far as that's possible.
+ break;
+
+ case OR_No_Viable_Function:
+ S.Diag(Loc, Diag);
+ CandidateSet.NoteCandidates(S, OCD_AllCandidates, CurInitExpr);
+ break;
+
+ case OR_Ambiguous:
+ S.Diag(Loc, Diag);
+ CandidateSet.NoteCandidates(S, OCD_ViableCandidates, CurInitExpr);
+ break;
+
+ case OR_Deleted:
+ S.Diag(Loc, Diag);
+ S.NoteDeletedFunction(Best->Function);
+ break;
+ }
+}
+
+void InitializationSequence::PrintInitLocationNote(Sema &S,
+ const InitializedEntity &Entity) {
+ if (Entity.isParameterKind() && Entity.getDecl()) {
+ if (Entity.getDecl()->getLocation().isInvalid())
+ return;
+
+ if (Entity.getDecl()->getDeclName())
+ S.Diag(Entity.getDecl()->getLocation(), diag::note_parameter_named_here)
+ << Entity.getDecl()->getDeclName();
+ else
+ S.Diag(Entity.getDecl()->getLocation(), diag::note_parameter_here);
+ }
+ else if (Entity.getKind() == InitializedEntity::EK_RelatedResult &&
+ Entity.getMethodDecl())
+ S.Diag(Entity.getMethodDecl()->getLocation(),
+ diag::note_method_return_type_change)
+ << Entity.getMethodDecl()->getDeclName();
+}
+
+static bool isReferenceBinding(const InitializationSequence::Step &s) {
+ return s.Kind == InitializationSequence::SK_BindReference ||
+ s.Kind == InitializationSequence::SK_BindReferenceToTemporary;
+}
+
+/// Returns true if the parameters describe a constructor initialization of
+/// an explicit temporary object, e.g. "Point(x, y)".
+static bool isExplicitTemporary(const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ unsigned NumArgs) {
+ switch (Entity.getKind()) {
+ case InitializedEntity::EK_Temporary:
+ case InitializedEntity::EK_CompoundLiteralInit:
+ case InitializedEntity::EK_RelatedResult:
+ break;
+ default:
+ return false;
+ }
+
+ switch (Kind.getKind()) {
+ case InitializationKind::IK_DirectList:
+ return true;
+ // FIXME: Hack to work around cast weirdness.
+ case InitializationKind::IK_Direct:
+ case InitializationKind::IK_Value:
+ return NumArgs != 1;
+ default:
+ return false;
+ }
+}
+
+static ExprResult
+PerformConstructorInitialization(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ MultiExprArg Args,
+ const InitializationSequence::Step& Step,
+ bool &ConstructorInitRequiresZeroInit,
+ bool IsListInitialization,
+ bool IsStdInitListInitialization,
+ SourceLocation LBraceLoc,
+ SourceLocation RBraceLoc) {
+ unsigned NumArgs = Args.size();
+ CXXConstructorDecl *Constructor
+ = cast<CXXConstructorDecl>(Step.Function.Function);
+ bool HadMultipleCandidates = Step.Function.HadMultipleCandidates;
+
+ // Build a call to the selected constructor.
+ SmallVector<Expr*, 8> ConstructorArgs;
+ SourceLocation Loc = (Kind.isCopyInit() && Kind.getEqualLoc().isValid())
+ ? Kind.getEqualLoc()
+ : Kind.getLocation();
+
+ if (Kind.getKind() == InitializationKind::IK_Default) {
+ // Force even a trivial, implicit default constructor to be
+ // semantically checked. We do this explicitly because we don't build
+ // the definition for completely trivial constructors.
+ assert(Constructor->getParent() && "No parent class for constructor.");
+ if (Constructor->isDefaulted() && Constructor->isDefaultConstructor() &&
+ Constructor->isTrivial() && !Constructor->isUsed(false))
+ S.DefineImplicitDefaultConstructor(Loc, Constructor);
+ }
+
+ ExprResult CurInit((Expr *)nullptr);
+
+ // C++ [over.match.copy]p1:
+ // - When initializing a temporary to be bound to the first parameter
+ // of a constructor that takes a reference to possibly cv-qualified
+ // T as its first argument, called with a single argument in the
+ // context of direct-initialization, explicit conversion functions
+ // are also considered.
+ bool AllowExplicitConv = Kind.AllowExplicit() && !Kind.isCopyInit() &&
+ Args.size() == 1 &&
+ Constructor->isCopyOrMoveConstructor();
+
+ // Determine the arguments required to actually perform the constructor
+ // call.
+ if (S.CompleteConstructorCall(Constructor, Args,
+ Loc, ConstructorArgs,
+ AllowExplicitConv,
+ IsListInitialization))
+ return ExprError();
+
+
+ if (isExplicitTemporary(Entity, Kind, NumArgs)) {
+ // An explicitly-constructed temporary, e.g., X(1, 2).
+ S.MarkFunctionReferenced(Loc, Constructor);
+ if (S.DiagnoseUseOfDecl(Constructor, Loc))
+ return ExprError();
+
+ TypeSourceInfo *TSInfo = Entity.getTypeSourceInfo();
+ if (!TSInfo)
+ TSInfo = S.Context.getTrivialTypeSourceInfo(Entity.getType(), Loc);
+ SourceRange ParenOrBraceRange =
+ (Kind.getKind() == InitializationKind::IK_DirectList)
+ ? SourceRange(LBraceLoc, RBraceLoc)
+ : Kind.getParenRange();
+
+ CurInit = new (S.Context) CXXTemporaryObjectExpr(
+ S.Context, Constructor, TSInfo, ConstructorArgs, ParenOrBraceRange,
+ HadMultipleCandidates, IsListInitialization,
+ IsStdInitListInitialization, ConstructorInitRequiresZeroInit);
+ } else {
+ CXXConstructExpr::ConstructionKind ConstructKind =
+ CXXConstructExpr::CK_Complete;
+
+ if (Entity.getKind() == InitializedEntity::EK_Base) {
+ ConstructKind = Entity.getBaseSpecifier()->isVirtual() ?
+ CXXConstructExpr::CK_VirtualBase :
+ CXXConstructExpr::CK_NonVirtualBase;
+ } else if (Entity.getKind() == InitializedEntity::EK_Delegating) {
+ ConstructKind = CXXConstructExpr::CK_Delegating;
+ }
+
+ // Only get the parenthesis or brace range if it is a list initialization or
+ // direct construction.
+ SourceRange ParenOrBraceRange;
+ if (IsListInitialization)
+ ParenOrBraceRange = SourceRange(LBraceLoc, RBraceLoc);
+ else if (Kind.getKind() == InitializationKind::IK_Direct)
+ ParenOrBraceRange = Kind.getParenRange();
+
+ // If the entity allows NRVO, mark the construction as elidable
+ // unconditionally.
+ if (Entity.allowsNRVO())
+ CurInit = S.BuildCXXConstructExpr(Loc, Entity.getType(),
+ Constructor, /*Elidable=*/true,
+ ConstructorArgs,
+ HadMultipleCandidates,
+ IsListInitialization,
+ IsStdInitListInitialization,
+ ConstructorInitRequiresZeroInit,
+ ConstructKind,
+ ParenOrBraceRange);
+ else
+ CurInit = S.BuildCXXConstructExpr(Loc, Entity.getType(),
+ Constructor,
+ ConstructorArgs,
+ HadMultipleCandidates,
+ IsListInitialization,
+ IsStdInitListInitialization,
+ ConstructorInitRequiresZeroInit,
+ ConstructKind,
+ ParenOrBraceRange);
+ }
+ if (CurInit.isInvalid())
+ return ExprError();
+
+ // Only check access if all of that succeeded.
+ S.CheckConstructorAccess(Loc, Constructor, Entity,
+ Step.Function.FoundDecl.getAccess());
+ if (S.DiagnoseUseOfDecl(Step.Function.FoundDecl, Loc))
+ return ExprError();
+
+ if (shouldBindAsTemporary(Entity))
+ CurInit = S.MaybeBindToTemporary(CurInit.get());
+
+ return CurInit;
+}
+
+/// Determine whether the specified InitializedEntity definitely has a lifetime
+/// longer than the current full-expression. Conservatively returns false if
+/// it's unclear.
+static bool
+InitializedEntityOutlivesFullExpression(const InitializedEntity &Entity) {
+ const InitializedEntity *Top = &Entity;
+ while (Top->getParent())
+ Top = Top->getParent();
+
+ switch (Top->getKind()) {
+ case InitializedEntity::EK_Variable:
+ case InitializedEntity::EK_Result:
+ case InitializedEntity::EK_Exception:
+ case InitializedEntity::EK_Member:
+ case InitializedEntity::EK_New:
+ case InitializedEntity::EK_Base:
+ case InitializedEntity::EK_Delegating:
+ return true;
+
+ case InitializedEntity::EK_ArrayElement:
+ case InitializedEntity::EK_VectorElement:
+ case InitializedEntity::EK_BlockElement:
+ case InitializedEntity::EK_ComplexElement:
+ // Could not determine what the full initialization is. Assume it might not
+ // outlive the full-expression.
+ return false;
+
+ case InitializedEntity::EK_Parameter:
+ case InitializedEntity::EK_Parameter_CF_Audited:
+ case InitializedEntity::EK_Temporary:
+ case InitializedEntity::EK_LambdaCapture:
+ case InitializedEntity::EK_CompoundLiteralInit:
+ case InitializedEntity::EK_RelatedResult:
+ // The entity being initialized might not outlive the full-expression.
+ return false;
+ }
+
+ llvm_unreachable("unknown entity kind");
+}
+
+/// Determine the declaration which an initialized entity ultimately refers to,
+/// for the purpose of lifetime-extending a temporary bound to a reference in
+/// the initialization of \p Entity.
+static const InitializedEntity *getEntityForTemporaryLifetimeExtension(
+ const InitializedEntity *Entity,
+ const InitializedEntity *FallbackDecl = nullptr) {
+ // C++11 [class.temporary]p5:
+ switch (Entity->getKind()) {
+ case InitializedEntity::EK_Variable:
+ // The temporary [...] persists for the lifetime of the reference
+ return Entity;
+
+ case InitializedEntity::EK_Member:
+ // For subobjects, we look at the complete object.
+ if (Entity->getParent())
+ return getEntityForTemporaryLifetimeExtension(Entity->getParent(),
+ Entity);
+
+ // except:
+ // -- A temporary bound to a reference member in a constructor's
+ // ctor-initializer persists until the constructor exits.
+ return Entity;
+
+ case InitializedEntity::EK_Parameter:
+ case InitializedEntity::EK_Parameter_CF_Audited:
+ // -- A temporary bound to a reference parameter in a function call
+ // persists until the completion of the full-expression containing
+ // the call.
+ case InitializedEntity::EK_Result:
+ // -- The lifetime of a temporary bound to the returned value in a
+ // function return statement is not extended; the temporary is
+ // destroyed at the end of the full-expression in the return statement.
+ case InitializedEntity::EK_New:
+ // -- A temporary bound to a reference in a new-initializer persists
+ // until the completion of the full-expression containing the
+ // new-initializer.
+ return nullptr;
+
+ case InitializedEntity::EK_Temporary:
+ case InitializedEntity::EK_CompoundLiteralInit:
+ case InitializedEntity::EK_RelatedResult:
+ // We don't yet know the storage duration of the surrounding temporary.
+ // Assume it's got full-expression duration for now, it will patch up our
+ // storage duration if that's not correct.
+ return nullptr;
+
+ case InitializedEntity::EK_ArrayElement:
+ // For subobjects, we look at the complete object.
+ return getEntityForTemporaryLifetimeExtension(Entity->getParent(),
+ FallbackDecl);
+
+ case InitializedEntity::EK_Base:
+ case InitializedEntity::EK_Delegating:
+ // We can reach this case for aggregate initialization in a constructor:
+ // struct A { int &&r; };
+ // struct B : A { B() : A{0} {} };
+ // In this case, use the innermost field decl as the context.
+ return FallbackDecl;
+
+ case InitializedEntity::EK_BlockElement:
+ case InitializedEntity::EK_LambdaCapture:
+ case InitializedEntity::EK_Exception:
+ case InitializedEntity::EK_VectorElement:
+ case InitializedEntity::EK_ComplexElement:
+ return nullptr;
+ }
+ llvm_unreachable("unknown entity kind");
+}
+
+static void performLifetimeExtension(Expr *Init,
+ const InitializedEntity *ExtendingEntity);
+
+/// Update a glvalue expression that is used as the initializer of a reference
+/// to note that its lifetime is extended.
+/// \return \c true if any temporary had its lifetime extended.
+static bool
+performReferenceExtension(Expr *Init,
+ const InitializedEntity *ExtendingEntity) {
+ // Walk past any constructs which we can lifetime-extend across.
+ Expr *Old;
+ do {
+ Old = Init;
+
+ if (InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
+ if (ILE->getNumInits() == 1 && ILE->isGLValue()) {
+ // This is just redundant braces around an initializer. Step over it.
+ Init = ILE->getInit(0);
+ }
+ }
+
+ // Step over any subobject adjustments; we may have a materialized
+ // temporary inside them.
+ SmallVector<const Expr *, 2> CommaLHSs;
+ SmallVector<SubobjectAdjustment, 2> Adjustments;
+ Init = const_cast<Expr *>(
+ Init->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments));
+
+ // Per current approach for DR1376, look through casts to reference type
+ // when performing lifetime extension.
+ if (CastExpr *CE = dyn_cast<CastExpr>(Init))
+ if (CE->getSubExpr()->isGLValue())
+ Init = CE->getSubExpr();
+
+ // FIXME: Per DR1213, subscripting on an array temporary produces an xvalue.
+ // It's unclear if binding a reference to that xvalue extends the array
+ // temporary.
+ } while (Init != Old);
+
+ if (MaterializeTemporaryExpr *ME = dyn_cast<MaterializeTemporaryExpr>(Init)) {
+ // Update the storage duration of the materialized temporary.
+ // FIXME: Rebuild the expression instead of mutating it.
+ ME->setExtendingDecl(ExtendingEntity->getDecl(),
+ ExtendingEntity->allocateManglingNumber());
+ performLifetimeExtension(ME->GetTemporaryExpr(), ExtendingEntity);
+ return true;
+ }
+
+ return false;
+}
+
+/// Update a prvalue expression that is going to be materialized as a
+/// lifetime-extended temporary.
+static void performLifetimeExtension(Expr *Init,
+ const InitializedEntity *ExtendingEntity) {
+ // Dig out the expression which constructs the extended temporary.
+ SmallVector<const Expr *, 2> CommaLHSs;
+ SmallVector<SubobjectAdjustment, 2> Adjustments;
+ Init = const_cast<Expr *>(
+ Init->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments));
+
+ if (CXXBindTemporaryExpr *BTE = dyn_cast<CXXBindTemporaryExpr>(Init))
+ Init = BTE->getSubExpr();
+
+ if (CXXStdInitializerListExpr *ILE =
+ dyn_cast<CXXStdInitializerListExpr>(Init)) {
+ performReferenceExtension(ILE->getSubExpr(), ExtendingEntity);
+ return;
+ }
+
+ if (InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
+ if (ILE->getType()->isArrayType()) {
+ for (unsigned I = 0, N = ILE->getNumInits(); I != N; ++I)
+ performLifetimeExtension(ILE->getInit(I), ExtendingEntity);
+ return;
+ }
+
+ if (CXXRecordDecl *RD = ILE->getType()->getAsCXXRecordDecl()) {
+ assert(RD->isAggregate() && "aggregate init on non-aggregate");
+
+ // If we lifetime-extend a braced initializer which is initializing an
+ // aggregate, and that aggregate contains reference members which are
+ // bound to temporaries, those temporaries are also lifetime-extended.
+ if (RD->isUnion() && ILE->getInitializedFieldInUnion() &&
+ ILE->getInitializedFieldInUnion()->getType()->isReferenceType())
+ performReferenceExtension(ILE->getInit(0), ExtendingEntity);
+ else {
+ unsigned Index = 0;
+ for (const auto *I : RD->fields()) {
+ if (Index >= ILE->getNumInits())
+ break;
+ if (I->isUnnamedBitfield())
+ continue;
+ Expr *SubInit = ILE->getInit(Index);
+ if (I->getType()->isReferenceType())
+ performReferenceExtension(SubInit, ExtendingEntity);
+ else if (isa<InitListExpr>(SubInit) ||
+ isa<CXXStdInitializerListExpr>(SubInit))
+ // This may be either aggregate-initialization of a member or
+ // initialization of a std::initializer_list object. Either way,
+ // we should recursively lifetime-extend that initializer.
+ performLifetimeExtension(SubInit, ExtendingEntity);
+ ++Index;
+ }
+ }
+ }
+ }
+}
+
+static void warnOnLifetimeExtension(Sema &S, const InitializedEntity &Entity,
+ const Expr *Init, bool IsInitializerList,
+ const ValueDecl *ExtendingDecl) {
+ // Warn if a field lifetime-extends a temporary.
+ if (isa<FieldDecl>(ExtendingDecl)) {
+ if (IsInitializerList) {
+ S.Diag(Init->getExprLoc(), diag::warn_dangling_std_initializer_list)
+ << /*at end of constructor*/true;
+ return;
+ }
+
+ bool IsSubobjectMember = false;
+ for (const InitializedEntity *Ent = Entity.getParent(); Ent;
+ Ent = Ent->getParent()) {
+ if (Ent->getKind() != InitializedEntity::EK_Base) {
+ IsSubobjectMember = true;
+ break;
+ }
+ }
+ S.Diag(Init->getExprLoc(),
+ diag::warn_bind_ref_member_to_temporary)
+ << ExtendingDecl << Init->getSourceRange()
+ << IsSubobjectMember << IsInitializerList;
+ if (IsSubobjectMember)
+ S.Diag(ExtendingDecl->getLocation(),
+ diag::note_ref_subobject_of_member_declared_here);
+ else
+ S.Diag(ExtendingDecl->getLocation(),
+ diag::note_ref_or_ptr_member_declared_here)
+ << /*is pointer*/false;
+ }
+}
+
+static void DiagnoseNarrowingInInitList(Sema &S,
+ const ImplicitConversionSequence &ICS,
+ QualType PreNarrowingType,
+ QualType EntityType,
+ const Expr *PostInit);
+
+/// Provide warnings when std::move is used on construction.
+static void CheckMoveOnConstruction(Sema &S, const Expr *InitExpr,
+ bool IsReturnStmt) {
+ if (!InitExpr)
+ return;
+
+ if (!S.ActiveTemplateInstantiations.empty())
+ return;
+
+ QualType DestType = InitExpr->getType();
+ if (!DestType->isRecordType())
+ return;
+
+ unsigned DiagID = 0;
+ if (IsReturnStmt) {
+ const CXXConstructExpr *CCE =
+ dyn_cast<CXXConstructExpr>(InitExpr->IgnoreParens());
+ if (!CCE || CCE->getNumArgs() != 1)
+ return;
+
+ if (!CCE->getConstructor()->isCopyOrMoveConstructor())
+ return;
+
+ InitExpr = CCE->getArg(0)->IgnoreImpCasts();
+ }
+
+ // Find the std::move call and get the argument.
+ const CallExpr *CE = dyn_cast<CallExpr>(InitExpr->IgnoreParens());
+ if (!CE || CE->getNumArgs() != 1)
+ return;
+
+ const FunctionDecl *MoveFunction = CE->getDirectCallee();
+ if (!MoveFunction || !MoveFunction->isInStdNamespace() ||
+ !MoveFunction->getIdentifier() ||
+ !MoveFunction->getIdentifier()->isStr("move"))
+ return;
+
+ const Expr *Arg = CE->getArg(0)->IgnoreImplicit();
+
+ if (IsReturnStmt) {
+ const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Arg->IgnoreParenImpCasts());
+ if (!DRE || DRE->refersToEnclosingVariableOrCapture())
+ return;
+
+ const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl());
+ if (!VD || !VD->hasLocalStorage())
+ return;
+
+ QualType SourceType = VD->getType();
+ if (!SourceType->isRecordType())
+ return;
+
+ if (!S.Context.hasSameUnqualifiedType(DestType, SourceType)) {
+ return;
+ }
+
+ // If we're returning a function parameter, copy elision
+ // is not possible.
+ if (isa<ParmVarDecl>(VD))
+ DiagID = diag::warn_redundant_move_on_return;
+ else
+ DiagID = diag::warn_pessimizing_move_on_return;
+ } else {
+ DiagID = diag::warn_pessimizing_move_on_initialization;
+ const Expr *ArgStripped = Arg->IgnoreImplicit()->IgnoreParens();
+ if (!ArgStripped->isRValue() || !ArgStripped->getType()->isRecordType())
+ return;
+ }
+
+ S.Diag(CE->getLocStart(), DiagID);
+
+ // Get all the locations for a fix-it. Don't emit the fix-it if any location
+ // is within a macro.
+ SourceLocation CallBegin = CE->getCallee()->getLocStart();
+ if (CallBegin.isMacroID())
+ return;
+ SourceLocation RParen = CE->getRParenLoc();
+ if (RParen.isMacroID())
+ return;
+ SourceLocation LParen;
+ SourceLocation ArgLoc = Arg->getLocStart();
+
+ // Special testing for the argument location. Since the fix-it needs the
+ // location right before the argument, the argument location can be in a
+ // macro only if it is at the beginning of the macro.
+ while (ArgLoc.isMacroID() &&
+ S.getSourceManager().isAtStartOfImmediateMacroExpansion(ArgLoc)) {
+ ArgLoc = S.getSourceManager().getImmediateExpansionRange(ArgLoc).first;
+ }
+
+ if (LParen.isMacroID())
+ return;
+
+ LParen = ArgLoc.getLocWithOffset(-1);
+
+ S.Diag(CE->getLocStart(), diag::note_remove_move)
+ << FixItHint::CreateRemoval(SourceRange(CallBegin, LParen))
+ << FixItHint::CreateRemoval(SourceRange(RParen, RParen));
+}
+
+ExprResult
+InitializationSequence::Perform(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ MultiExprArg Args,
+ QualType *ResultType) {
+ if (Failed()) {
+ Diagnose(S, Entity, Kind, Args);
+ return ExprError();
+ }
+ if (!ZeroInitializationFixit.empty()) {
+ unsigned DiagID = diag::err_default_init_const;
+ if (Decl *D = Entity.getDecl())
+ if (S.getLangOpts().MSVCCompat && D->hasAttr<SelectAnyAttr>())
+ DiagID = diag::ext_default_init_const;
+
+ // The initialization would have succeeded with this fixit. Since the fixit
+ // is on the error, we need to build a valid AST in this case, so this isn't
+ // handled in the Failed() branch above.
+ QualType DestType = Entity.getType();
+ S.Diag(Kind.getLocation(), DiagID)
+ << DestType << (bool)DestType->getAs<RecordType>()
+ << FixItHint::CreateInsertion(ZeroInitializationFixitLoc,
+ ZeroInitializationFixit);
+ }
+
+ if (getKind() == DependentSequence) {
+ // If the declaration is a non-dependent, incomplete array type
+ // that has an initializer, then its type will be completed once
+ // the initializer is instantiated.
+ if (ResultType && !Entity.getType()->isDependentType() &&
+ Args.size() == 1) {
+ QualType DeclType = Entity.getType();
+ if (const IncompleteArrayType *ArrayT
+ = S.Context.getAsIncompleteArrayType(DeclType)) {
+ // FIXME: We don't currently have the ability to accurately
+ // compute the length of an initializer list without
+ // performing full type-checking of the initializer list
+ // (since we have to determine where braces are implicitly
+ // introduced and such). So, we fall back to making the array
+ // type a dependently-sized array type with no specified
+ // bound.
+ if (isa<InitListExpr>((Expr *)Args[0])) {
+ SourceRange Brackets;
+
+ // Scavange the location of the brackets from the entity, if we can.
+ if (DeclaratorDecl *DD = Entity.getDecl()) {
+ if (TypeSourceInfo *TInfo = DD->getTypeSourceInfo()) {
+ TypeLoc TL = TInfo->getTypeLoc();
+ if (IncompleteArrayTypeLoc ArrayLoc =
+ TL.getAs<IncompleteArrayTypeLoc>())
+ Brackets = ArrayLoc.getBracketsRange();
+ }
+ }
+
+ *ResultType
+ = S.Context.getDependentSizedArrayType(ArrayT->getElementType(),
+ /*NumElts=*/nullptr,
+ ArrayT->getSizeModifier(),
+ ArrayT->getIndexTypeCVRQualifiers(),
+ Brackets);
+ }
+
+ }
+ }
+ if (Kind.getKind() == InitializationKind::IK_Direct &&
+ !Kind.isExplicitCast()) {
+ // Rebuild the ParenListExpr.
+ SourceRange ParenRange = Kind.getParenRange();
+ return S.ActOnParenListExpr(ParenRange.getBegin(), ParenRange.getEnd(),
+ Args);
+ }
+ assert(Kind.getKind() == InitializationKind::IK_Copy ||
+ Kind.isExplicitCast() ||
+ Kind.getKind() == InitializationKind::IK_DirectList);
+ return ExprResult(Args[0]);
+ }
+
+ // No steps means no initialization.
+ if (Steps.empty())
+ return ExprResult((Expr *)nullptr);
+
+ if (S.getLangOpts().CPlusPlus11 && Entity.getType()->isReferenceType() &&
+ Args.size() == 1 && isa<InitListExpr>(Args[0]) &&
+ !Entity.isParameterKind()) {
+ // Produce a C++98 compatibility warning if we are initializing a reference
+ // from an initializer list. For parameters, we produce a better warning
+ // elsewhere.
+ Expr *Init = Args[0];
+ S.Diag(Init->getLocStart(), diag::warn_cxx98_compat_reference_list_init)
+ << Init->getSourceRange();
+ }
+
+ // Diagnose cases where we initialize a pointer to an array temporary, and the
+ // pointer obviously outlives the temporary.
+ if (Args.size() == 1 && Args[0]->getType()->isArrayType() &&
+ Entity.getType()->isPointerType() &&
+ InitializedEntityOutlivesFullExpression(Entity)) {
+ Expr *Init = Args[0];
+ Expr::LValueClassification Kind = Init->ClassifyLValue(S.Context);
+ if (Kind == Expr::LV_ClassTemporary || Kind == Expr::LV_ArrayTemporary)
+ S.Diag(Init->getLocStart(), diag::warn_temporary_array_to_pointer_decay)
+ << Init->getSourceRange();
+ }
+
+ QualType DestType = Entity.getType().getNonReferenceType();
+ // FIXME: Ugly hack around the fact that Entity.getType() is not
+ // the same as Entity.getDecl()->getType() in cases involving type merging,
+ // and we want latter when it makes sense.
+ if (ResultType)
+ *ResultType = Entity.getDecl() ? Entity.getDecl()->getType() :
+ Entity.getType();
+
+ ExprResult CurInit((Expr *)nullptr);
+
+ // For initialization steps that start with a single initializer,
+ // grab the only argument out the Args and place it into the "current"
+ // initializer.
+ switch (Steps.front().Kind) {
+ case SK_ResolveAddressOfOverloadedFunction:
+ case SK_CastDerivedToBaseRValue:
+ case SK_CastDerivedToBaseXValue:
+ case SK_CastDerivedToBaseLValue:
+ case SK_BindReference:
+ case SK_BindReferenceToTemporary:
+ case SK_ExtraneousCopyToTemporary:
+ case SK_UserConversion:
+ case SK_QualificationConversionLValue:
+ case SK_QualificationConversionXValue:
+ case SK_QualificationConversionRValue:
+ case SK_AtomicConversion:
+ case SK_LValueToRValue:
+ case SK_ConversionSequence:
+ case SK_ConversionSequenceNoNarrowing:
+ case SK_ListInitialization:
+ case SK_UnwrapInitList:
+ case SK_RewrapInitList:
+ case SK_CAssignment:
+ case SK_StringInit:
+ case SK_ObjCObjectConversion:
+ case SK_ArrayInit:
+ case SK_ParenthesizedArrayInit:
+ case SK_PassByIndirectCopyRestore:
+ case SK_PassByIndirectRestore:
+ case SK_ProduceObjCObject:
+ case SK_StdInitializerList:
+ case SK_OCLSamplerInit:
+ case SK_OCLZeroEvent: {
+ assert(Args.size() == 1);
+ CurInit = Args[0];
+ if (!CurInit.get()) return ExprError();
+ break;
+ }
+
+ case SK_ConstructorInitialization:
+ case SK_ConstructorInitializationFromList:
+ case SK_StdInitializerListConstructorCall:
+ case SK_ZeroInitialization:
+ break;
+ }
+
+ // Walk through the computed steps for the initialization sequence,
+ // performing the specified conversions along the way.
+ bool ConstructorInitRequiresZeroInit = false;
+ for (step_iterator Step = step_begin(), StepEnd = step_end();
+ Step != StepEnd; ++Step) {
+ if (CurInit.isInvalid())
+ return ExprError();
+
+ QualType SourceType = CurInit.get() ? CurInit.get()->getType() : QualType();
+
+ switch (Step->Kind) {
+ case SK_ResolveAddressOfOverloadedFunction:
+ // Overload resolution determined which function invoke; update the
+ // initializer to reflect that choice.
+ S.CheckAddressOfMemberAccess(CurInit.get(), Step->Function.FoundDecl);
+ if (S.DiagnoseUseOfDecl(Step->Function.FoundDecl, Kind.getLocation()))
+ return ExprError();
+ CurInit = S.FixOverloadedFunctionReference(CurInit,
+ Step->Function.FoundDecl,
+ Step->Function.Function);
+ break;
+
+ case SK_CastDerivedToBaseRValue:
+ case SK_CastDerivedToBaseXValue:
+ case SK_CastDerivedToBaseLValue: {
+ // We have a derived-to-base cast that produces either an rvalue or an
+ // lvalue. Perform that cast.
+
+ CXXCastPath BasePath;
+
+ // Casts to inaccessible base classes are allowed with C-style casts.
+ bool IgnoreBaseAccess = Kind.isCStyleOrFunctionalCast();
+ if (S.CheckDerivedToBaseConversion(SourceType, Step->Type,
+ CurInit.get()->getLocStart(),
+ CurInit.get()->getSourceRange(),
+ &BasePath, IgnoreBaseAccess))
+ return ExprError();
+
+ ExprValueKind VK =
+ Step->Kind == SK_CastDerivedToBaseLValue ?
+ VK_LValue :
+ (Step->Kind == SK_CastDerivedToBaseXValue ?
+ VK_XValue :
+ VK_RValue);
+ CurInit =
+ ImplicitCastExpr::Create(S.Context, Step->Type, CK_DerivedToBase,
+ CurInit.get(), &BasePath, VK);
+ break;
+ }
+
+ case SK_BindReference:
+ // References cannot bind to bit-fields (C++ [dcl.init.ref]p5).
+ if (CurInit.get()->refersToBitField()) {
+ // We don't necessarily have an unambiguous source bit-field.
+ FieldDecl *BitField = CurInit.get()->getSourceBitField();
+ S.Diag(Kind.getLocation(), diag::err_reference_bind_to_bitfield)
+ << Entity.getType().isVolatileQualified()
+ << (BitField ? BitField->getDeclName() : DeclarationName())
+ << (BitField != nullptr)
+ << CurInit.get()->getSourceRange();
+ if (BitField)
+ S.Diag(BitField->getLocation(), diag::note_bitfield_decl);
+
+ return ExprError();
+ }
+
+ if (CurInit.get()->refersToVectorElement()) {
+ // References cannot bind to vector elements.
+ S.Diag(Kind.getLocation(), diag::err_reference_bind_to_vector_element)
+ << Entity.getType().isVolatileQualified()
+ << CurInit.get()->getSourceRange();
+ PrintInitLocationNote(S, Entity);
+ return ExprError();
+ }
+
+ // Reference binding does not have any corresponding ASTs.
+
+ // Check exception specifications
+ if (S.CheckExceptionSpecCompatibility(CurInit.get(), DestType))
+ return ExprError();
+
+ // Even though we didn't materialize a temporary, the binding may still
+ // extend the lifetime of a temporary. This happens if we bind a reference
+ // to the result of a cast to reference type.
+ if (const InitializedEntity *ExtendingEntity =
+ getEntityForTemporaryLifetimeExtension(&Entity))
+ if (performReferenceExtension(CurInit.get(), ExtendingEntity))
+ warnOnLifetimeExtension(S, Entity, CurInit.get(),
+ /*IsInitializerList=*/false,
+ ExtendingEntity->getDecl());
+
+ break;
+
+ case SK_BindReferenceToTemporary: {
+ // Make sure the "temporary" is actually an rvalue.
+ assert(CurInit.get()->isRValue() && "not a temporary");
+
+ // Check exception specifications
+ if (S.CheckExceptionSpecCompatibility(CurInit.get(), DestType))
+ return ExprError();
+
+ // Materialize the temporary into memory.
+ MaterializeTemporaryExpr *MTE = new (S.Context) MaterializeTemporaryExpr(
+ Entity.getType().getNonReferenceType(), CurInit.get(),
+ Entity.getType()->isLValueReferenceType());
+
+ // Maybe lifetime-extend the temporary's subobjects to match the
+ // entity's lifetime.
+ if (const InitializedEntity *ExtendingEntity =
+ getEntityForTemporaryLifetimeExtension(&Entity))
+ if (performReferenceExtension(MTE, ExtendingEntity))
+ warnOnLifetimeExtension(S, Entity, CurInit.get(), /*IsInitializerList=*/false,
+ ExtendingEntity->getDecl());
+
+ // If we're binding to an Objective-C object that has lifetime, we
+ // need cleanups. Likewise if we're extending this temporary to automatic
+ // storage duration -- we need to register its cleanup during the
+ // full-expression's cleanups.
+ if ((S.getLangOpts().ObjCAutoRefCount &&
+ MTE->getType()->isObjCLifetimeType()) ||
+ (MTE->getStorageDuration() == SD_Automatic &&
+ MTE->getType().isDestructedType()))
+ S.ExprNeedsCleanups = true;
+
+ CurInit = MTE;
+ break;
+ }
+
+ case SK_ExtraneousCopyToTemporary:
+ CurInit = CopyObject(S, Step->Type, Entity, CurInit,
+ /*IsExtraneousCopy=*/true);
+ break;
+
+ case SK_UserConversion: {
+ // We have a user-defined conversion that invokes either a constructor
+ // or a conversion function.
+ CastKind CastKind;
+ bool IsCopy = false;
+ FunctionDecl *Fn = Step->Function.Function;
+ DeclAccessPair FoundFn = Step->Function.FoundDecl;
+ bool HadMultipleCandidates = Step->Function.HadMultipleCandidates;
+ bool CreatedObject = false;
+ if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(Fn)) {
+ // Build a call to the selected constructor.
+ SmallVector<Expr*, 8> ConstructorArgs;
+ SourceLocation Loc = CurInit.get()->getLocStart();
+ CurInit.get(); // Ownership transferred into MultiExprArg, below.
+
+ // Determine the arguments required to actually perform the constructor
+ // call.
+ Expr *Arg = CurInit.get();
+ if (S.CompleteConstructorCall(Constructor,
+ MultiExprArg(&Arg, 1),
+ Loc, ConstructorArgs))
+ return ExprError();
+
+ // Build an expression that constructs a temporary.
+ CurInit = S.BuildCXXConstructExpr(Loc, Step->Type, Constructor,
+ ConstructorArgs,
+ HadMultipleCandidates,
+ /*ListInit*/ false,
+ /*StdInitListInit*/ false,
+ /*ZeroInit*/ false,
+ CXXConstructExpr::CK_Complete,
+ SourceRange());
+ if (CurInit.isInvalid())
+ return ExprError();
+
+ S.CheckConstructorAccess(Kind.getLocation(), Constructor, Entity,
+ FoundFn.getAccess());
+ if (S.DiagnoseUseOfDecl(FoundFn, Kind.getLocation()))
+ return ExprError();
+
+ CastKind = CK_ConstructorConversion;
+ QualType Class = S.Context.getTypeDeclType(Constructor->getParent());
+ if (S.Context.hasSameUnqualifiedType(SourceType, Class) ||
+ S.IsDerivedFrom(Loc, SourceType, Class))
+ IsCopy = true;
+
+ CreatedObject = true;
+ } else {
+ // Build a call to the conversion function.
+ CXXConversionDecl *Conversion = cast<CXXConversionDecl>(Fn);
+ S.CheckMemberOperatorAccess(Kind.getLocation(), CurInit.get(), nullptr,
+ FoundFn);
+ if (S.DiagnoseUseOfDecl(FoundFn, Kind.getLocation()))
+ return ExprError();
+
+ // FIXME: Should we move this initialization into a separate
+ // derived-to-base conversion? I believe the answer is "no", because
+ // we don't want to turn off access control here for c-style casts.
+ ExprResult CurInitExprRes =
+ S.PerformObjectArgumentInitialization(CurInit.get(),
+ /*Qualifier=*/nullptr,
+ FoundFn, Conversion);
+ if(CurInitExprRes.isInvalid())
+ return ExprError();
+ CurInit = CurInitExprRes;
+
+ // Build the actual call to the conversion function.
+ CurInit = S.BuildCXXMemberCallExpr(CurInit.get(), FoundFn, Conversion,
+ HadMultipleCandidates);
+ if (CurInit.isInvalid() || !CurInit.get())
+ return ExprError();
+
+ CastKind = CK_UserDefinedConversion;
+
+ CreatedObject = Conversion->getReturnType()->isRecordType();
+ }
+
+ bool RequiresCopy = !IsCopy && !isReferenceBinding(Steps.back());
+ bool MaybeBindToTemp = RequiresCopy || shouldBindAsTemporary(Entity);
+
+ if (!MaybeBindToTemp && CreatedObject && shouldDestroyTemporary(Entity)) {
+ QualType T = CurInit.get()->getType();
+ if (const RecordType *Record = T->getAs<RecordType>()) {
+ CXXDestructorDecl *Destructor
+ = S.LookupDestructor(cast<CXXRecordDecl>(Record->getDecl()));
+ S.CheckDestructorAccess(CurInit.get()->getLocStart(), Destructor,
+ S.PDiag(diag::err_access_dtor_temp) << T);
+ S.MarkFunctionReferenced(CurInit.get()->getLocStart(), Destructor);
+ if (S.DiagnoseUseOfDecl(Destructor, CurInit.get()->getLocStart()))
+ return ExprError();
+ }
+ }
+
+ CurInit = ImplicitCastExpr::Create(S.Context, CurInit.get()->getType(),
+ CastKind, CurInit.get(), nullptr,
+ CurInit.get()->getValueKind());
+ if (MaybeBindToTemp)
+ CurInit = S.MaybeBindToTemporary(CurInit.getAs<Expr>());
+ if (RequiresCopy)
+ CurInit = CopyObject(S, Entity.getType().getNonReferenceType(), Entity,
+ CurInit, /*IsExtraneousCopy=*/false);
+ break;
+ }
+
+ case SK_QualificationConversionLValue:
+ case SK_QualificationConversionXValue:
+ case SK_QualificationConversionRValue: {
+ // Perform a qualification conversion; these can never go wrong.
+ ExprValueKind VK =
+ Step->Kind == SK_QualificationConversionLValue ?
+ VK_LValue :
+ (Step->Kind == SK_QualificationConversionXValue ?
+ VK_XValue :
+ VK_RValue);
+ CurInit = S.ImpCastExprToType(CurInit.get(), Step->Type, CK_NoOp, VK);
+ break;
+ }
+
+ case SK_AtomicConversion: {
+ assert(CurInit.get()->isRValue() && "cannot convert glvalue to atomic");
+ CurInit = S.ImpCastExprToType(CurInit.get(), Step->Type,
+ CK_NonAtomicToAtomic, VK_RValue);
+ break;
+ }
+
+ case SK_LValueToRValue: {
+ assert(CurInit.get()->isGLValue() && "cannot load from a prvalue");
+ CurInit = ImplicitCastExpr::Create(S.Context, Step->Type,
+ CK_LValueToRValue, CurInit.get(),
+ /*BasePath=*/nullptr, VK_RValue);
+ break;
+ }
+
+ case SK_ConversionSequence:
+ case SK_ConversionSequenceNoNarrowing: {
+ Sema::CheckedConversionKind CCK
+ = Kind.isCStyleCast()? Sema::CCK_CStyleCast
+ : Kind.isFunctionalCast()? Sema::CCK_FunctionalCast
+ : Kind.isExplicitCast()? Sema::CCK_OtherCast
+ : Sema::CCK_ImplicitConversion;
+ ExprResult CurInitExprRes =
+ S.PerformImplicitConversion(CurInit.get(), Step->Type, *Step->ICS,
+ getAssignmentAction(Entity), CCK);
+ if (CurInitExprRes.isInvalid())
+ return ExprError();
+ CurInit = CurInitExprRes;
+
+ if (Step->Kind == SK_ConversionSequenceNoNarrowing &&
+ S.getLangOpts().CPlusPlus && !CurInit.get()->isValueDependent())
+ DiagnoseNarrowingInInitList(S, *Step->ICS, SourceType, Entity.getType(),
+ CurInit.get());
+ break;
+ }
+
+ case SK_ListInitialization: {
+ InitListExpr *InitList = cast<InitListExpr>(CurInit.get());
+ // If we're not initializing the top-level entity, we need to create an
+ // InitializeTemporary entity for our target type.
+ QualType Ty = Step->Type;
+ bool IsTemporary = !S.Context.hasSameType(Entity.getType(), Ty);
+ InitializedEntity TempEntity = InitializedEntity::InitializeTemporary(Ty);
+ InitializedEntity InitEntity = IsTemporary ? TempEntity : Entity;
+ InitListChecker PerformInitList(S, InitEntity,
+ InitList, Ty, /*VerifyOnly=*/false);
+ if (PerformInitList.HadError())
+ return ExprError();
+
+ // Hack: We must update *ResultType if available in order to set the
+ // bounds of arrays, e.g. in 'int ar[] = {1, 2, 3};'.
+ // Worst case: 'const int (&arref)[] = {1, 2, 3};'.
+ if (ResultType &&
+ ResultType->getNonReferenceType()->isIncompleteArrayType()) {
+ if ((*ResultType)->isRValueReferenceType())
+ Ty = S.Context.getRValueReferenceType(Ty);
+ else if ((*ResultType)->isLValueReferenceType())
+ Ty = S.Context.getLValueReferenceType(Ty,
+ (*ResultType)->getAs<LValueReferenceType>()->isSpelledAsLValue());
+ *ResultType = Ty;
+ }
+
+ InitListExpr *StructuredInitList =
+ PerformInitList.getFullyStructuredList();
+ CurInit.get();
+ CurInit = shouldBindAsTemporary(InitEntity)
+ ? S.MaybeBindToTemporary(StructuredInitList)
+ : StructuredInitList;
+ break;
+ }
+
+ case SK_ConstructorInitializationFromList: {
+ // When an initializer list is passed for a parameter of type "reference
+ // to object", we don't get an EK_Temporary entity, but instead an
+ // EK_Parameter entity with reference type.
+ // FIXME: This is a hack. What we really should do is create a user
+ // conversion step for this case, but this makes it considerably more
+ // complicated. For now, this will do.
+ InitializedEntity TempEntity = InitializedEntity::InitializeTemporary(
+ Entity.getType().getNonReferenceType());
+ bool UseTemporary = Entity.getType()->isReferenceType();
+ assert(Args.size() == 1 && "expected a single argument for list init");
+ InitListExpr *InitList = cast<InitListExpr>(Args[0]);
+ S.Diag(InitList->getExprLoc(), diag::warn_cxx98_compat_ctor_list_init)
+ << InitList->getSourceRange();
+ MultiExprArg Arg(InitList->getInits(), InitList->getNumInits());
+ CurInit = PerformConstructorInitialization(S, UseTemporary ? TempEntity :
+ Entity,
+ Kind, Arg, *Step,
+ ConstructorInitRequiresZeroInit,
+ /*IsListInitialization*/true,
+ /*IsStdInitListInit*/false,
+ InitList->getLBraceLoc(),
+ InitList->getRBraceLoc());
+ break;
+ }
+
+ case SK_UnwrapInitList:
+ CurInit = cast<InitListExpr>(CurInit.get())->getInit(0);
+ break;
+
+ case SK_RewrapInitList: {
+ Expr *E = CurInit.get();
+ InitListExpr *Syntactic = Step->WrappingSyntacticList;
+ InitListExpr *ILE = new (S.Context) InitListExpr(S.Context,
+ Syntactic->getLBraceLoc(), E, Syntactic->getRBraceLoc());
+ ILE->setSyntacticForm(Syntactic);
+ ILE->setType(E->getType());
+ ILE->setValueKind(E->getValueKind());
+ CurInit = ILE;
+ break;
+ }
+
+ case SK_ConstructorInitialization:
+ case SK_StdInitializerListConstructorCall: {
+ // When an initializer list is passed for a parameter of type "reference
+ // to object", we don't get an EK_Temporary entity, but instead an
+ // EK_Parameter entity with reference type.
+ // FIXME: This is a hack. What we really should do is create a user
+ // conversion step for this case, but this makes it considerably more
+ // complicated. For now, this will do.
+ InitializedEntity TempEntity = InitializedEntity::InitializeTemporary(
+ Entity.getType().getNonReferenceType());
+ bool UseTemporary = Entity.getType()->isReferenceType();
+ bool IsStdInitListInit =
+ Step->Kind == SK_StdInitializerListConstructorCall;
+ CurInit = PerformConstructorInitialization(
+ S, UseTemporary ? TempEntity : Entity, Kind, Args, *Step,
+ ConstructorInitRequiresZeroInit,
+ /*IsListInitialization*/IsStdInitListInit,
+ /*IsStdInitListInitialization*/IsStdInitListInit,
+ /*LBraceLoc*/SourceLocation(),
+ /*RBraceLoc*/SourceLocation());
+ break;
+ }
+
+ case SK_ZeroInitialization: {
+ step_iterator NextStep = Step;
+ ++NextStep;
+ if (NextStep != StepEnd &&
+ (NextStep->Kind == SK_ConstructorInitialization ||
+ NextStep->Kind == SK_ConstructorInitializationFromList)) {
+ // The need for zero-initialization is recorded directly into
+ // the call to the object's constructor within the next step.
+ ConstructorInitRequiresZeroInit = true;
+ } else if (Kind.getKind() == InitializationKind::IK_Value &&
+ S.getLangOpts().CPlusPlus &&
+ !Kind.isImplicitValueInit()) {
+ TypeSourceInfo *TSInfo = Entity.getTypeSourceInfo();
+ if (!TSInfo)
+ TSInfo = S.Context.getTrivialTypeSourceInfo(Step->Type,
+ Kind.getRange().getBegin());
+
+ CurInit = new (S.Context) CXXScalarValueInitExpr(
+ TSInfo->getType().getNonLValueExprType(S.Context), TSInfo,
+ Kind.getRange().getEnd());
+ } else {
+ CurInit = new (S.Context) ImplicitValueInitExpr(Step->Type);
+ }
+ break;
+ }
+
+ case SK_CAssignment: {
+ QualType SourceType = CurInit.get()->getType();
+ // Save off the initial CurInit in case we need to emit a diagnostic
+ ExprResult InitialCurInit = CurInit;
+ ExprResult Result = CurInit;
+ Sema::AssignConvertType ConvTy =
+ S.CheckSingleAssignmentConstraints(Step->Type, Result, true,
+ Entity.getKind() == InitializedEntity::EK_Parameter_CF_Audited);
+ if (Result.isInvalid())
+ return ExprError();
+ CurInit = Result;
+
+ // If this is a call, allow conversion to a transparent union.
+ ExprResult CurInitExprRes = CurInit;
+ if (ConvTy != Sema::Compatible &&
+ Entity.isParameterKind() &&
+ S.CheckTransparentUnionArgumentConstraints(Step->Type, CurInitExprRes)
+ == Sema::Compatible)
+ ConvTy = Sema::Compatible;
+ if (CurInitExprRes.isInvalid())
+ return ExprError();
+ CurInit = CurInitExprRes;
+
+ bool Complained;
+ if (S.DiagnoseAssignmentResult(ConvTy, Kind.getLocation(),
+ Step->Type, SourceType,
+ InitialCurInit.get(),
+ getAssignmentAction(Entity, true),
+ &Complained)) {
+ PrintInitLocationNote(S, Entity);
+ return ExprError();
+ } else if (Complained)
+ PrintInitLocationNote(S, Entity);
+ break;
+ }
+
+ case SK_StringInit: {
+ QualType Ty = Step->Type;
+ CheckStringInit(CurInit.get(), ResultType ? *ResultType : Ty,
+ S.Context.getAsArrayType(Ty), S);
+ break;
+ }
+
+ case SK_ObjCObjectConversion:
+ CurInit = S.ImpCastExprToType(CurInit.get(), Step->Type,
+ CK_ObjCObjectLValueCast,
+ CurInit.get()->getValueKind());
+ break;
+
+ case SK_ArrayInit:
+ // Okay: we checked everything before creating this step. Note that
+ // this is a GNU extension.
+ S.Diag(Kind.getLocation(), diag::ext_array_init_copy)
+ << Step->Type << CurInit.get()->getType()
+ << CurInit.get()->getSourceRange();
+
+ // If the destination type is an incomplete array type, update the
+ // type accordingly.
+ if (ResultType) {
+ if (const IncompleteArrayType *IncompleteDest
+ = S.Context.getAsIncompleteArrayType(Step->Type)) {
+ if (const ConstantArrayType *ConstantSource
+ = S.Context.getAsConstantArrayType(CurInit.get()->getType())) {
+ *ResultType = S.Context.getConstantArrayType(
+ IncompleteDest->getElementType(),
+ ConstantSource->getSize(),
+ ArrayType::Normal, 0);
+ }
+ }
+ }
+ break;
+
+ case SK_ParenthesizedArrayInit:
+ // Okay: we checked everything before creating this step. Note that
+ // this is a GNU extension.
+ S.Diag(Kind.getLocation(), diag::ext_array_init_parens)
+ << CurInit.get()->getSourceRange();
+ break;
+
+ case SK_PassByIndirectCopyRestore:
+ case SK_PassByIndirectRestore:
+ checkIndirectCopyRestoreSource(S, CurInit.get());
+ CurInit = new (S.Context) ObjCIndirectCopyRestoreExpr(
+ CurInit.get(), Step->Type,
+ Step->Kind == SK_PassByIndirectCopyRestore);
+ break;
+
+ case SK_ProduceObjCObject:
+ CurInit =
+ ImplicitCastExpr::Create(S.Context, Step->Type, CK_ARCProduceObject,
+ CurInit.get(), nullptr, VK_RValue);
+ break;
+
+ case SK_StdInitializerList: {
+ S.Diag(CurInit.get()->getExprLoc(),
+ diag::warn_cxx98_compat_initializer_list_init)
+ << CurInit.get()->getSourceRange();
+
+ // Materialize the temporary into memory.
+ MaterializeTemporaryExpr *MTE = new (S.Context)
+ MaterializeTemporaryExpr(CurInit.get()->getType(), CurInit.get(),
+ /*BoundToLvalueReference=*/false);
+
+ // Maybe lifetime-extend the array temporary's subobjects to match the
+ // entity's lifetime.
+ if (const InitializedEntity *ExtendingEntity =
+ getEntityForTemporaryLifetimeExtension(&Entity))
+ if (performReferenceExtension(MTE, ExtendingEntity))
+ warnOnLifetimeExtension(S, Entity, CurInit.get(),
+ /*IsInitializerList=*/true,
+ ExtendingEntity->getDecl());
+
+ // Wrap it in a construction of a std::initializer_list<T>.
+ CurInit = new (S.Context) CXXStdInitializerListExpr(Step->Type, MTE);
+
+ // Bind the result, in case the library has given initializer_list a
+ // non-trivial destructor.
+ if (shouldBindAsTemporary(Entity))
+ CurInit = S.MaybeBindToTemporary(CurInit.get());
+ break;
+ }
+
+ case SK_OCLSamplerInit: {
+ assert(Step->Type->isSamplerT() &&
+ "Sampler initialization on non-sampler type.");
+
+ QualType SourceType = CurInit.get()->getType();
+
+ if (Entity.isParameterKind()) {
+ if (!SourceType->isSamplerT())
+ S.Diag(Kind.getLocation(), diag::err_sampler_argument_required)
+ << SourceType;
+ } else if (Entity.getKind() != InitializedEntity::EK_Variable) {
+ llvm_unreachable("Invalid EntityKind!");
+ }
+
+ break;
+ }
+ case SK_OCLZeroEvent: {
+ assert(Step->Type->isEventT() &&
+ "Event initialization on non-event type.");
+
+ CurInit = S.ImpCastExprToType(CurInit.get(), Step->Type,
+ CK_ZeroToOCLEvent,
+ CurInit.get()->getValueKind());
+ break;
+ }
+ }
+ }
+
+ // Diagnose non-fatal problems with the completed initialization.
+ if (Entity.getKind() == InitializedEntity::EK_Member &&
+ cast<FieldDecl>(Entity.getDecl())->isBitField())
+ S.CheckBitFieldInitialization(Kind.getLocation(),
+ cast<FieldDecl>(Entity.getDecl()),
+ CurInit.get());
+
+ // Check for std::move on construction.
+ if (const Expr *E = CurInit.get()) {
+ CheckMoveOnConstruction(S, E,
+ Entity.getKind() == InitializedEntity::EK_Result);
+ }
+
+ return CurInit;
+}
+
+/// Somewhere within T there is an uninitialized reference subobject.
+/// Dig it out and diagnose it.
+static bool DiagnoseUninitializedReference(Sema &S, SourceLocation Loc,
+ QualType T) {
+ if (T->isReferenceType()) {
+ S.Diag(Loc, diag::err_reference_without_init)
+ << T.getNonReferenceType();
+ return true;
+ }
+
+ CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ if (!RD || !RD->hasUninitializedReferenceMember())
+ return false;
+
+ for (const auto *FI : RD->fields()) {
+ if (FI->isUnnamedBitfield())
+ continue;
+
+ if (DiagnoseUninitializedReference(S, FI->getLocation(), FI->getType())) {
+ S.Diag(Loc, diag::note_value_initialization_here) << RD;
+ return true;
+ }
+ }
+
+ for (const auto &BI : RD->bases()) {
+ if (DiagnoseUninitializedReference(S, BI.getLocStart(), BI.getType())) {
+ S.Diag(Loc, diag::note_value_initialization_here) << RD;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Diagnose initialization failures
+//===----------------------------------------------------------------------===//
+
+/// Emit notes associated with an initialization that failed due to a
+/// "simple" conversion failure.
+static void emitBadConversionNotes(Sema &S, const InitializedEntity &entity,
+ Expr *op) {
+ QualType destType = entity.getType();
+ if (destType.getNonReferenceType()->isObjCObjectPointerType() &&
+ op->getType()->isObjCObjectPointerType()) {
+
+ // Emit a possible note about the conversion failing because the
+ // operand is a message send with a related result type.
+ S.EmitRelatedResultTypeNote(op);
+
+ // Emit a possible note about a return failing because we're
+ // expecting a related result type.
+ if (entity.getKind() == InitializedEntity::EK_Result)
+ S.EmitRelatedResultTypeNoteForReturn(destType);
+ }
+}
+
+static void diagnoseListInit(Sema &S, const InitializedEntity &Entity,
+ InitListExpr *InitList) {
+ QualType DestType = Entity.getType();
+
+ QualType E;
+ if (S.getLangOpts().CPlusPlus11 && S.isStdInitializerList(DestType, &E)) {
+ QualType ArrayType = S.Context.getConstantArrayType(
+ E.withConst(),
+ llvm::APInt(S.Context.getTypeSize(S.Context.getSizeType()),
+ InitList->getNumInits()),
+ clang::ArrayType::Normal, 0);
+ InitializedEntity HiddenArray =
+ InitializedEntity::InitializeTemporary(ArrayType);
+ return diagnoseListInit(S, HiddenArray, InitList);
+ }
+
+ if (DestType->isReferenceType()) {
+ // A list-initialization failure for a reference means that we tried to
+ // create a temporary of the inner type (per [dcl.init.list]p3.6) and the
+ // inner initialization failed.
+ QualType T = DestType->getAs<ReferenceType>()->getPointeeType();
+ diagnoseListInit(S, InitializedEntity::InitializeTemporary(T), InitList);
+ SourceLocation Loc = InitList->getLocStart();
+ if (auto *D = Entity.getDecl())
+ Loc = D->getLocation();
+ S.Diag(Loc, diag::note_in_reference_temporary_list_initializer) << T;
+ return;
+ }
+
+ InitListChecker DiagnoseInitList(S, Entity, InitList, DestType,
+ /*VerifyOnly=*/false);
+ assert(DiagnoseInitList.HadError() &&
+ "Inconsistent init list check result.");
+}
+
+bool InitializationSequence::Diagnose(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ ArrayRef<Expr *> Args) {
+ if (!Failed())
+ return false;
+
+ QualType DestType = Entity.getType();
+ switch (Failure) {
+ case FK_TooManyInitsForReference:
+ // FIXME: Customize for the initialized entity?
+ if (Args.empty()) {
+ // Dig out the reference subobject which is uninitialized and diagnose it.
+ // If this is value-initialization, this could be nested some way within
+ // the target type.
+ assert(Kind.getKind() == InitializationKind::IK_Value ||
+ DestType->isReferenceType());
+ bool Diagnosed =
+ DiagnoseUninitializedReference(S, Kind.getLocation(), DestType);
+ assert(Diagnosed && "couldn't find uninitialized reference to diagnose");
+ (void)Diagnosed;
+ } else // FIXME: diagnostic below could be better!
+ S.Diag(Kind.getLocation(), diag::err_reference_has_multiple_inits)
+ << SourceRange(Args.front()->getLocStart(), Args.back()->getLocEnd());
+ break;
+
+ case FK_ArrayNeedsInitList:
+ S.Diag(Kind.getLocation(), diag::err_array_init_not_init_list) << 0;
+ break;
+ case FK_ArrayNeedsInitListOrStringLiteral:
+ S.Diag(Kind.getLocation(), diag::err_array_init_not_init_list) << 1;
+ break;
+ case FK_ArrayNeedsInitListOrWideStringLiteral:
+ S.Diag(Kind.getLocation(), diag::err_array_init_not_init_list) << 2;
+ break;
+ case FK_NarrowStringIntoWideCharArray:
+ S.Diag(Kind.getLocation(), diag::err_array_init_narrow_string_into_wchar);
+ break;
+ case FK_WideStringIntoCharArray:
+ S.Diag(Kind.getLocation(), diag::err_array_init_wide_string_into_char);
+ break;
+ case FK_IncompatWideStringIntoWideChar:
+ S.Diag(Kind.getLocation(),
+ diag::err_array_init_incompat_wide_string_into_wchar);
+ break;
+ case FK_ArrayTypeMismatch:
+ case FK_NonConstantArrayInit:
+ S.Diag(Kind.getLocation(),
+ (Failure == FK_ArrayTypeMismatch
+ ? diag::err_array_init_different_type
+ : diag::err_array_init_non_constant_array))
+ << DestType.getNonReferenceType()
+ << Args[0]->getType()
+ << Args[0]->getSourceRange();
+ break;
+
+ case FK_VariableLengthArrayHasInitializer:
+ S.Diag(Kind.getLocation(), diag::err_variable_object_no_init)
+ << Args[0]->getSourceRange();
+ break;
+
+ case FK_AddressOfOverloadFailed: {
+ DeclAccessPair Found;
+ S.ResolveAddressOfOverloadedFunction(Args[0],
+ DestType.getNonReferenceType(),
+ true,
+ Found);
+ break;
+ }
+
+ case FK_AddressOfUnaddressableFunction: {
+ auto *FD = cast<FunctionDecl>(cast<DeclRefExpr>(Args[0])->getDecl());
+ S.checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true,
+ Args[0]->getLocStart());
+ break;
+ }
+
+ case FK_ReferenceInitOverloadFailed:
+ case FK_UserConversionOverloadFailed:
+ switch (FailedOverloadResult) {
+ case OR_Ambiguous:
+ if (Failure == FK_UserConversionOverloadFailed)
+ S.Diag(Kind.getLocation(), diag::err_typecheck_ambiguous_condition)
+ << Args[0]->getType() << DestType
+ << Args[0]->getSourceRange();
+ else
+ S.Diag(Kind.getLocation(), diag::err_ref_init_ambiguous)
+ << DestType << Args[0]->getType()
+ << Args[0]->getSourceRange();
+
+ FailedCandidateSet.NoteCandidates(S, OCD_ViableCandidates, Args);
+ break;
+
+ case OR_No_Viable_Function:
+ if (!S.RequireCompleteType(Kind.getLocation(),
+ DestType.getNonReferenceType(),
+ diag::err_typecheck_nonviable_condition_incomplete,
+ Args[0]->getType(), Args[0]->getSourceRange()))
+ S.Diag(Kind.getLocation(), diag::err_typecheck_nonviable_condition)
+ << (Entity.getKind() == InitializedEntity::EK_Result)
+ << Args[0]->getType() << Args[0]->getSourceRange()
+ << DestType.getNonReferenceType();
+
+ FailedCandidateSet.NoteCandidates(S, OCD_AllCandidates, Args);
+ break;
+
+ case OR_Deleted: {
+ S.Diag(Kind.getLocation(), diag::err_typecheck_deleted_function)
+ << Args[0]->getType() << DestType.getNonReferenceType()
+ << Args[0]->getSourceRange();
+ OverloadCandidateSet::iterator Best;
+ OverloadingResult Ovl
+ = FailedCandidateSet.BestViableFunction(S, Kind.getLocation(), Best,
+ true);
+ if (Ovl == OR_Deleted) {
+ S.NoteDeletedFunction(Best->Function);
+ } else {
+ llvm_unreachable("Inconsistent overload resolution?");
+ }
+ break;
+ }
+
+ case OR_Success:
+ llvm_unreachable("Conversion did not fail!");
+ }
+ break;
+
+ case FK_NonConstLValueReferenceBindingToTemporary:
+ if (isa<InitListExpr>(Args[0])) {
+ S.Diag(Kind.getLocation(),
+ diag::err_lvalue_reference_bind_to_initlist)
+ << DestType.getNonReferenceType().isVolatileQualified()
+ << DestType.getNonReferenceType()
+ << Args[0]->getSourceRange();
+ break;
+ }
+ // Intentional fallthrough
+
+ case FK_NonConstLValueReferenceBindingToUnrelated:
+ S.Diag(Kind.getLocation(),
+ Failure == FK_NonConstLValueReferenceBindingToTemporary
+ ? diag::err_lvalue_reference_bind_to_temporary
+ : diag::err_lvalue_reference_bind_to_unrelated)
+ << DestType.getNonReferenceType().isVolatileQualified()
+ << DestType.getNonReferenceType()
+ << Args[0]->getType()
+ << Args[0]->getSourceRange();
+ break;
+
+ case FK_RValueReferenceBindingToLValue:
+ S.Diag(Kind.getLocation(), diag::err_lvalue_to_rvalue_ref)
+ << DestType.getNonReferenceType() << Args[0]->getType()
+ << Args[0]->getSourceRange();
+ break;
+
+ case FK_ReferenceInitDropsQualifiers: {
+ QualType SourceType = Args[0]->getType();
+ QualType NonRefType = DestType.getNonReferenceType();
+ Qualifiers DroppedQualifiers =
+ SourceType.getQualifiers() - NonRefType.getQualifiers();
+
+ S.Diag(Kind.getLocation(), diag::err_reference_bind_drops_quals)
+ << SourceType
+ << NonRefType
+ << DroppedQualifiers.getCVRQualifiers()
+ << Args[0]->getSourceRange();
+ break;
+ }
+
+ case FK_ReferenceInitFailed:
+ S.Diag(Kind.getLocation(), diag::err_reference_bind_failed)
+ << DestType.getNonReferenceType()
+ << Args[0]->isLValue()
+ << Args[0]->getType()
+ << Args[0]->getSourceRange();
+ emitBadConversionNotes(S, Entity, Args[0]);
+ break;
+
+ case FK_ConversionFailed: {
+ QualType FromType = Args[0]->getType();
+ PartialDiagnostic PDiag = S.PDiag(diag::err_init_conversion_failed)
+ << (int)Entity.getKind()
+ << DestType
+ << Args[0]->isLValue()
+ << FromType
+ << Args[0]->getSourceRange();
+ S.HandleFunctionTypeMismatch(PDiag, FromType, DestType);
+ S.Diag(Kind.getLocation(), PDiag);
+ emitBadConversionNotes(S, Entity, Args[0]);
+ break;
+ }
+
+ case FK_ConversionFromPropertyFailed:
+ // No-op. This error has already been reported.
+ break;
+
+ case FK_TooManyInitsForScalar: {
+ SourceRange R;
+
+ auto *InitList = dyn_cast<InitListExpr>(Args[0]);
+ if (InitList && InitList->getNumInits() >= 1) {
+ R = SourceRange(InitList->getInit(0)->getLocEnd(), InitList->getLocEnd());
+ } else {
+ assert(Args.size() > 1 && "Expected multiple initializers!");
+ R = SourceRange(Args.front()->getLocEnd(), Args.back()->getLocEnd());
+ }
+
+ R.setBegin(S.getLocForEndOfToken(R.getBegin()));
+ if (Kind.isCStyleOrFunctionalCast())
+ S.Diag(Kind.getLocation(), diag::err_builtin_func_cast_more_than_one_arg)
+ << R;
+ else
+ S.Diag(Kind.getLocation(), diag::err_excess_initializers)
+ << /*scalar=*/2 << R;
+ break;
+ }
+
+ case FK_ReferenceBindingToInitList:
+ S.Diag(Kind.getLocation(), diag::err_reference_bind_init_list)
+ << DestType.getNonReferenceType() << Args[0]->getSourceRange();
+ break;
+
+ case FK_InitListBadDestinationType:
+ S.Diag(Kind.getLocation(), diag::err_init_list_bad_dest_type)
+ << (DestType->isRecordType()) << DestType << Args[0]->getSourceRange();
+ break;
+
+ case FK_ListConstructorOverloadFailed:
+ case FK_ConstructorOverloadFailed: {
+ SourceRange ArgsRange;
+ if (Args.size())
+ ArgsRange = SourceRange(Args.front()->getLocStart(),
+ Args.back()->getLocEnd());
+
+ if (Failure == FK_ListConstructorOverloadFailed) {
+ assert(Args.size() == 1 &&
+ "List construction from other than 1 argument.");
+ InitListExpr *InitList = cast<InitListExpr>(Args[0]);
+ Args = MultiExprArg(InitList->getInits(), InitList->getNumInits());
+ }
+
+ // FIXME: Using "DestType" for the entity we're printing is probably
+ // bad.
+ switch (FailedOverloadResult) {
+ case OR_Ambiguous:
+ S.Diag(Kind.getLocation(), diag::err_ovl_ambiguous_init)
+ << DestType << ArgsRange;
+ FailedCandidateSet.NoteCandidates(S, OCD_ViableCandidates, Args);
+ break;
+
+ case OR_No_Viable_Function:
+ if (Kind.getKind() == InitializationKind::IK_Default &&
+ (Entity.getKind() == InitializedEntity::EK_Base ||
+ Entity.getKind() == InitializedEntity::EK_Member) &&
+ isa<CXXConstructorDecl>(S.CurContext)) {
+ // This is implicit default initialization of a member or
+ // base within a constructor. If no viable function was
+ // found, notify the user that she needs to explicitly
+ // initialize this base/member.
+ CXXConstructorDecl *Constructor
+ = cast<CXXConstructorDecl>(S.CurContext);
+ if (Entity.getKind() == InitializedEntity::EK_Base) {
+ S.Diag(Kind.getLocation(), diag::err_missing_default_ctor)
+ << (Constructor->getInheritedConstructor() ? 2 :
+ Constructor->isImplicit() ? 1 : 0)
+ << S.Context.getTypeDeclType(Constructor->getParent())
+ << /*base=*/0
+ << Entity.getType();
+
+ RecordDecl *BaseDecl
+ = Entity.getBaseSpecifier()->getType()->getAs<RecordType>()
+ ->getDecl();
+ S.Diag(BaseDecl->getLocation(), diag::note_previous_decl)
+ << S.Context.getTagDeclType(BaseDecl);
+ } else {
+ S.Diag(Kind.getLocation(), diag::err_missing_default_ctor)
+ << (Constructor->getInheritedConstructor() ? 2 :
+ Constructor->isImplicit() ? 1 : 0)
+ << S.Context.getTypeDeclType(Constructor->getParent())
+ << /*member=*/1
+ << Entity.getName();
+ S.Diag(Entity.getDecl()->getLocation(),
+ diag::note_member_declared_at);
+
+ if (const RecordType *Record
+ = Entity.getType()->getAs<RecordType>())
+ S.Diag(Record->getDecl()->getLocation(),
+ diag::note_previous_decl)
+ << S.Context.getTagDeclType(Record->getDecl());
+ }
+ break;
+ }
+
+ S.Diag(Kind.getLocation(), diag::err_ovl_no_viable_function_in_init)
+ << DestType << ArgsRange;
+ FailedCandidateSet.NoteCandidates(S, OCD_AllCandidates, Args);
+ break;
+
+ case OR_Deleted: {
+ OverloadCandidateSet::iterator Best;
+ OverloadingResult Ovl
+ = FailedCandidateSet.BestViableFunction(S, Kind.getLocation(), Best);
+ if (Ovl != OR_Deleted) {
+ S.Diag(Kind.getLocation(), diag::err_ovl_deleted_init)
+ << true << DestType << ArgsRange;
+ llvm_unreachable("Inconsistent overload resolution?");
+ break;
+ }
+
+ // If this is a defaulted or implicitly-declared function, then
+ // it was implicitly deleted. Make it clear that the deletion was
+ // implicit.
+ if (S.isImplicitlyDeleted(Best->Function))
+ S.Diag(Kind.getLocation(), diag::err_ovl_deleted_special_init)
+ << S.getSpecialMember(cast<CXXMethodDecl>(Best->Function))
+ << DestType << ArgsRange;
+ else
+ S.Diag(Kind.getLocation(), diag::err_ovl_deleted_init)
+ << true << DestType << ArgsRange;
+
+ S.NoteDeletedFunction(Best->Function);
+ break;
+ }
+
+ case OR_Success:
+ llvm_unreachable("Conversion did not fail!");
+ }
+ }
+ break;
+
+ case FK_DefaultInitOfConst:
+ if (Entity.getKind() == InitializedEntity::EK_Member &&
+ isa<CXXConstructorDecl>(S.CurContext)) {
+ // This is implicit default-initialization of a const member in
+ // a constructor. Complain that it needs to be explicitly
+ // initialized.
+ CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(S.CurContext);
+ S.Diag(Kind.getLocation(), diag::err_uninitialized_member_in_ctor)
+ << (Constructor->getInheritedConstructor() ? 2 :
+ Constructor->isImplicit() ? 1 : 0)
+ << S.Context.getTypeDeclType(Constructor->getParent())
+ << /*const=*/1
+ << Entity.getName();
+ S.Diag(Entity.getDecl()->getLocation(), diag::note_previous_decl)
+ << Entity.getName();
+ } else {
+ S.Diag(Kind.getLocation(), diag::err_default_init_const)
+ << DestType << (bool)DestType->getAs<RecordType>();
+ }
+ break;
+
+ case FK_Incomplete:
+ S.RequireCompleteType(Kind.getLocation(), FailedIncompleteType,
+ diag::err_init_incomplete_type);
+ break;
+
+ case FK_ListInitializationFailed: {
+ // Run the init list checker again to emit diagnostics.
+ InitListExpr *InitList = cast<InitListExpr>(Args[0]);
+ diagnoseListInit(S, Entity, InitList);
+ break;
+ }
+
+ case FK_PlaceholderType: {
+ // FIXME: Already diagnosed!
+ break;
+ }
+
+ case FK_ExplicitConstructor: {
+ S.Diag(Kind.getLocation(), diag::err_selected_explicit_constructor)
+ << Args[0]->getSourceRange();
+ OverloadCandidateSet::iterator Best;
+ OverloadingResult Ovl
+ = FailedCandidateSet.BestViableFunction(S, Kind.getLocation(), Best);
+ (void)Ovl;
+ assert(Ovl == OR_Success && "Inconsistent overload resolution");
+ CXXConstructorDecl *CtorDecl = cast<CXXConstructorDecl>(Best->Function);
+ S.Diag(CtorDecl->getLocation(), diag::note_constructor_declared_here);
+ break;
+ }
+ }
+
+ PrintInitLocationNote(S, Entity);
+ return true;
+}
+
+void InitializationSequence::dump(raw_ostream &OS) const {
+ switch (SequenceKind) {
+ case FailedSequence: {
+ OS << "Failed sequence: ";
+ switch (Failure) {
+ case FK_TooManyInitsForReference:
+ OS << "too many initializers for reference";
+ break;
+
+ case FK_ArrayNeedsInitList:
+ OS << "array requires initializer list";
+ break;
+
+ case FK_AddressOfUnaddressableFunction:
+ OS << "address of unaddressable function was taken";
+ break;
+
+ case FK_ArrayNeedsInitListOrStringLiteral:
+ OS << "array requires initializer list or string literal";
+ break;
+
+ case FK_ArrayNeedsInitListOrWideStringLiteral:
+ OS << "array requires initializer list or wide string literal";
+ break;
+
+ case FK_NarrowStringIntoWideCharArray:
+ OS << "narrow string into wide char array";
+ break;
+
+ case FK_WideStringIntoCharArray:
+ OS << "wide string into char array";
+ break;
+
+ case FK_IncompatWideStringIntoWideChar:
+ OS << "incompatible wide string into wide char array";
+ break;
+
+ case FK_ArrayTypeMismatch:
+ OS << "array type mismatch";
+ break;
+
+ case FK_NonConstantArrayInit:
+ OS << "non-constant array initializer";
+ break;
+
+ case FK_AddressOfOverloadFailed:
+ OS << "address of overloaded function failed";
+ break;
+
+ case FK_ReferenceInitOverloadFailed:
+ OS << "overload resolution for reference initialization failed";
+ break;
+
+ case FK_NonConstLValueReferenceBindingToTemporary:
+ OS << "non-const lvalue reference bound to temporary";
+ break;
+
+ case FK_NonConstLValueReferenceBindingToUnrelated:
+ OS << "non-const lvalue reference bound to unrelated type";
+ break;
+
+ case FK_RValueReferenceBindingToLValue:
+ OS << "rvalue reference bound to an lvalue";
+ break;
+
+ case FK_ReferenceInitDropsQualifiers:
+ OS << "reference initialization drops qualifiers";
+ break;
+
+ case FK_ReferenceInitFailed:
+ OS << "reference initialization failed";
+ break;
+
+ case FK_ConversionFailed:
+ OS << "conversion failed";
+ break;
+
+ case FK_ConversionFromPropertyFailed:
+ OS << "conversion from property failed";
+ break;
+
+ case FK_TooManyInitsForScalar:
+ OS << "too many initializers for scalar";
+ break;
+
+ case FK_ReferenceBindingToInitList:
+ OS << "referencing binding to initializer list";
+ break;
+
+ case FK_InitListBadDestinationType:
+ OS << "initializer list for non-aggregate, non-scalar type";
+ break;
+
+ case FK_UserConversionOverloadFailed:
+ OS << "overloading failed for user-defined conversion";
+ break;
+
+ case FK_ConstructorOverloadFailed:
+ OS << "constructor overloading failed";
+ break;
+
+ case FK_DefaultInitOfConst:
+ OS << "default initialization of a const variable";
+ break;
+
+ case FK_Incomplete:
+ OS << "initialization of incomplete type";
+ break;
+
+ case FK_ListInitializationFailed:
+ OS << "list initialization checker failure";
+ break;
+
+ case FK_VariableLengthArrayHasInitializer:
+ OS << "variable length array has an initializer";
+ break;
+
+ case FK_PlaceholderType:
+ OS << "initializer expression isn't contextually valid";
+ break;
+
+ case FK_ListConstructorOverloadFailed:
+ OS << "list constructor overloading failed";
+ break;
+
+ case FK_ExplicitConstructor:
+ OS << "list copy initialization chose explicit constructor";
+ break;
+ }
+ OS << '\n';
+ return;
+ }
+
+ case DependentSequence:
+ OS << "Dependent sequence\n";
+ return;
+
+ case NormalSequence:
+ OS << "Normal sequence: ";
+ break;
+ }
+
+ for (step_iterator S = step_begin(), SEnd = step_end(); S != SEnd; ++S) {
+ if (S != step_begin()) {
+ OS << " -> ";
+ }
+
+ switch (S->Kind) {
+ case SK_ResolveAddressOfOverloadedFunction:
+ OS << "resolve address of overloaded function";
+ break;
+
+ case SK_CastDerivedToBaseRValue:
+ OS << "derived-to-base case (rvalue" << S->Type.getAsString() << ")";
+ break;
+
+ case SK_CastDerivedToBaseXValue:
+ OS << "derived-to-base case (xvalue" << S->Type.getAsString() << ")";
+ break;
+
+ case SK_CastDerivedToBaseLValue:
+ OS << "derived-to-base case (lvalue" << S->Type.getAsString() << ")";
+ break;
+
+ case SK_BindReference:
+ OS << "bind reference to lvalue";
+ break;
+
+ case SK_BindReferenceToTemporary:
+ OS << "bind reference to a temporary";
+ break;
+
+ case SK_ExtraneousCopyToTemporary:
+ OS << "extraneous C++03 copy to temporary";
+ break;
+
+ case SK_UserConversion:
+ OS << "user-defined conversion via " << *S->Function.Function;
+ break;
+
+ case SK_QualificationConversionRValue:
+ OS << "qualification conversion (rvalue)";
+ break;
+
+ case SK_QualificationConversionXValue:
+ OS << "qualification conversion (xvalue)";
+ break;
+
+ case SK_QualificationConversionLValue:
+ OS << "qualification conversion (lvalue)";
+ break;
+
+ case SK_AtomicConversion:
+ OS << "non-atomic-to-atomic conversion";
+ break;
+
+ case SK_LValueToRValue:
+ OS << "load (lvalue to rvalue)";
+ break;
+
+ case SK_ConversionSequence:
+ OS << "implicit conversion sequence (";
+ S->ICS->dump(); // FIXME: use OS
+ OS << ")";
+ break;
+
+ case SK_ConversionSequenceNoNarrowing:
+ OS << "implicit conversion sequence with narrowing prohibited (";
+ S->ICS->dump(); // FIXME: use OS
+ OS << ")";
+ break;
+
+ case SK_ListInitialization:
+ OS << "list aggregate initialization";
+ break;
+
+ case SK_UnwrapInitList:
+ OS << "unwrap reference initializer list";
+ break;
+
+ case SK_RewrapInitList:
+ OS << "rewrap reference initializer list";
+ break;
+
+ case SK_ConstructorInitialization:
+ OS << "constructor initialization";
+ break;
+
+ case SK_ConstructorInitializationFromList:
+ OS << "list initialization via constructor";
+ break;
+
+ case SK_ZeroInitialization:
+ OS << "zero initialization";
+ break;
+
+ case SK_CAssignment:
+ OS << "C assignment";
+ break;
+
+ case SK_StringInit:
+ OS << "string initialization";
+ break;
+
+ case SK_ObjCObjectConversion:
+ OS << "Objective-C object conversion";
+ break;
+
+ case SK_ArrayInit:
+ OS << "array initialization";
+ break;
+
+ case SK_ParenthesizedArrayInit:
+ OS << "parenthesized array initialization";
+ break;
+
+ case SK_PassByIndirectCopyRestore:
+ OS << "pass by indirect copy and restore";
+ break;
+
+ case SK_PassByIndirectRestore:
+ OS << "pass by indirect restore";
+ break;
+
+ case SK_ProduceObjCObject:
+ OS << "Objective-C object retension";
+ break;
+
+ case SK_StdInitializerList:
+ OS << "std::initializer_list from initializer list";
+ break;
+
+ case SK_StdInitializerListConstructorCall:
+ OS << "list initialization from std::initializer_list";
+ break;
+
+ case SK_OCLSamplerInit:
+ OS << "OpenCL sampler_t from integer constant";
+ break;
+
+ case SK_OCLZeroEvent:
+ OS << "OpenCL event_t from zero";
+ break;
+ }
+
+ OS << " [" << S->Type.getAsString() << ']';
+ }
+
+ OS << '\n';
+}
+
+void InitializationSequence::dump() const {
+ dump(llvm::errs());
+}
+
+static void DiagnoseNarrowingInInitList(Sema &S,
+ const ImplicitConversionSequence &ICS,
+ QualType PreNarrowingType,
+ QualType EntityType,
+ const Expr *PostInit) {
+ const StandardConversionSequence *SCS = nullptr;
+ switch (ICS.getKind()) {
+ case ImplicitConversionSequence::StandardConversion:
+ SCS = &ICS.Standard;
+ break;
+ case ImplicitConversionSequence::UserDefinedConversion:
+ SCS = &ICS.UserDefined.After;
+ break;
+ case ImplicitConversionSequence::AmbiguousConversion:
+ case ImplicitConversionSequence::EllipsisConversion:
+ case ImplicitConversionSequence::BadConversion:
+ return;
+ }
+
+ // C++11 [dcl.init.list]p7: Check whether this is a narrowing conversion.
+ APValue ConstantValue;
+ QualType ConstantType;
+ switch (SCS->getNarrowingKind(S.Context, PostInit, ConstantValue,
+ ConstantType)) {
+ case NK_Not_Narrowing:
+ // No narrowing occurred.
+ return;
+
+ case NK_Type_Narrowing:
+ // This was a floating-to-integer conversion, which is always considered a
+ // narrowing conversion even if the value is a constant and can be
+ // represented exactly as an integer.
+ S.Diag(PostInit->getLocStart(),
+ (S.getLangOpts().MicrosoftExt || !S.getLangOpts().CPlusPlus11)
+ ? diag::warn_init_list_type_narrowing
+ : diag::ext_init_list_type_narrowing)
+ << PostInit->getSourceRange()
+ << PreNarrowingType.getLocalUnqualifiedType()
+ << EntityType.getLocalUnqualifiedType();
+ break;
+
+ case NK_Constant_Narrowing:
+ // A constant value was narrowed.
+ S.Diag(PostInit->getLocStart(),
+ (S.getLangOpts().MicrosoftExt || !S.getLangOpts().CPlusPlus11)
+ ? diag::warn_init_list_constant_narrowing
+ : diag::ext_init_list_constant_narrowing)
+ << PostInit->getSourceRange()
+ << ConstantValue.getAsString(S.getASTContext(), ConstantType)
+ << EntityType.getLocalUnqualifiedType();
+ break;
+
+ case NK_Variable_Narrowing:
+ // A variable's value may have been narrowed.
+ S.Diag(PostInit->getLocStart(),
+ (S.getLangOpts().MicrosoftExt || !S.getLangOpts().CPlusPlus11)
+ ? diag::warn_init_list_variable_narrowing
+ : diag::ext_init_list_variable_narrowing)
+ << PostInit->getSourceRange()
+ << PreNarrowingType.getLocalUnqualifiedType()
+ << EntityType.getLocalUnqualifiedType();
+ break;
+ }
+
+ SmallString<128> StaticCast;
+ llvm::raw_svector_ostream OS(StaticCast);
+ OS << "static_cast<";
+ if (const TypedefType *TT = EntityType->getAs<TypedefType>()) {
+ // It's important to use the typedef's name if there is one so that the
+ // fixit doesn't break code using types like int64_t.
+ //
+ // FIXME: This will break if the typedef requires qualification. But
+ // getQualifiedNameAsString() includes non-machine-parsable components.
+ OS << *TT->getDecl();
+ } else if (const BuiltinType *BT = EntityType->getAs<BuiltinType>())
+ OS << BT->getName(S.getLangOpts());
+ else {
+ // Oops, we didn't find the actual type of the variable. Don't emit a fixit
+ // with a broken cast.
+ return;
+ }
+ OS << ">(";
+ S.Diag(PostInit->getLocStart(), diag::note_init_list_narrowing_silence)
+ << PostInit->getSourceRange()
+ << FixItHint::CreateInsertion(PostInit->getLocStart(), OS.str())
+ << FixItHint::CreateInsertion(
+ S.getLocForEndOfToken(PostInit->getLocEnd()), ")");
+}
+
+//===----------------------------------------------------------------------===//
+// Initialization helper functions
+//===----------------------------------------------------------------------===//
+bool
+Sema::CanPerformCopyInitialization(const InitializedEntity &Entity,
+ ExprResult Init) {
+ if (Init.isInvalid())
+ return false;
+
+ Expr *InitE = Init.get();
+ assert(InitE && "No initialization expression");
+
+ InitializationKind Kind
+ = InitializationKind::CreateCopy(InitE->getLocStart(), SourceLocation());
+ InitializationSequence Seq(*this, Entity, Kind, InitE);
+ return !Seq.Failed();
+}
+
+ExprResult
+Sema::PerformCopyInitialization(const InitializedEntity &Entity,
+ SourceLocation EqualLoc,
+ ExprResult Init,
+ bool TopLevelOfInitList,
+ bool AllowExplicit) {
+ if (Init.isInvalid())
+ return ExprError();
+
+ Expr *InitE = Init.get();
+ assert(InitE && "No initialization expression?");
+
+ if (EqualLoc.isInvalid())
+ EqualLoc = InitE->getLocStart();
+
+ InitializationKind Kind = InitializationKind::CreateCopy(InitE->getLocStart(),
+ EqualLoc,
+ AllowExplicit);
+ InitializationSequence Seq(*this, Entity, Kind, InitE, TopLevelOfInitList);
+
+ ExprResult Result = Seq.Perform(*this, Entity, Kind, InitE);
+
+ return Result;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaLambda.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaLambda.cpp
new file mode 100644
index 0000000..884add2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaLambda.cpp
@@ -0,0 +1,1703 @@
+//===--- SemaLambda.cpp - Semantic Analysis for C++11 Lambdas -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for C++ lambda expressions.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Sema/DeclSpec.h"
+#include "TypeLocBuilder.h"
+#include "clang/AST/ASTLambda.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/SemaLambda.h"
+using namespace clang;
+using namespace sema;
+
+/// \brief Examines the FunctionScopeInfo stack to determine the nearest
+/// enclosing lambda (to the current lambda) that is 'capture-ready' for
+/// the variable referenced in the current lambda (i.e. \p VarToCapture).
+/// If successful, returns the index into Sema's FunctionScopeInfo stack
+/// of the capture-ready lambda's LambdaScopeInfo.
+///
+/// Climbs down the stack of lambdas (deepest nested lambda - i.e. current
+/// lambda - is on top) to determine the index of the nearest enclosing/outer
+/// lambda that is ready to capture the \p VarToCapture being referenced in
+/// the current lambda.
+/// As we climb down the stack, we want the index of the first such lambda -
+/// that is the lambda with the highest index that is 'capture-ready'.
+///
+/// A lambda 'L' is capture-ready for 'V' (var or this) if:
+/// - its enclosing context is non-dependent
+/// - and if the chain of lambdas between L and the lambda in which
+/// V is potentially used (i.e. the lambda at the top of the scope info
+/// stack), can all capture or have already captured V.
+/// If \p VarToCapture is 'null' then we are trying to capture 'this'.
+///
+/// Note that a lambda that is deemed 'capture-ready' still needs to be checked
+/// for whether it is 'capture-capable' (see
+/// getStackIndexOfNearestEnclosingCaptureCapableLambda), before it can truly
+/// capture.
+///
+/// \param FunctionScopes - Sema's stack of nested FunctionScopeInfo's (which a
+/// LambdaScopeInfo inherits from). The current/deepest/innermost lambda
+/// is at the top of the stack and has the highest index.
+/// \param VarToCapture - the variable to capture. If NULL, capture 'this'.
+///
+/// \returns An Optional<unsigned> Index that if evaluates to 'true' contains
+/// the index (into Sema's FunctionScopeInfo stack) of the innermost lambda
+/// which is capture-ready. If the return value evaluates to 'false' then
+/// no lambda is capture-ready for \p VarToCapture.
+
+static inline Optional<unsigned>
+getStackIndexOfNearestEnclosingCaptureReadyLambda(
+ ArrayRef<const clang::sema::FunctionScopeInfo *> FunctionScopes,
+ VarDecl *VarToCapture) {
+ // Label failure to capture.
+ const Optional<unsigned> NoLambdaIsCaptureReady;
+
+ assert(
+ isa<clang::sema::LambdaScopeInfo>(
+ FunctionScopes[FunctionScopes.size() - 1]) &&
+ "The function on the top of sema's function-info stack must be a lambda");
+
+ // If VarToCapture is null, we are attempting to capture 'this'.
+ const bool IsCapturingThis = !VarToCapture;
+ const bool IsCapturingVariable = !IsCapturingThis;
+
+ // Start with the current lambda at the top of the stack (highest index).
+ unsigned CurScopeIndex = FunctionScopes.size() - 1;
+ DeclContext *EnclosingDC =
+ cast<sema::LambdaScopeInfo>(FunctionScopes[CurScopeIndex])->CallOperator;
+
+ do {
+ const clang::sema::LambdaScopeInfo *LSI =
+ cast<sema::LambdaScopeInfo>(FunctionScopes[CurScopeIndex]);
+ // IF we have climbed down to an intervening enclosing lambda that contains
+ // the variable declaration - it obviously can/must not capture the
+ // variable.
+ // Since its enclosing DC is dependent, all the lambdas between it and the
+ // innermost nested lambda are dependent (otherwise we wouldn't have
+ // arrived here) - so we don't yet have a lambda that can capture the
+ // variable.
+ if (IsCapturingVariable &&
+ VarToCapture->getDeclContext()->Equals(EnclosingDC))
+ return NoLambdaIsCaptureReady;
+
+ // For an enclosing lambda to be capture ready for an entity, all
+ // intervening lambda's have to be able to capture that entity. If even
+ // one of the intervening lambda's is not capable of capturing the entity
+ // then no enclosing lambda can ever capture that entity.
+ // For e.g.
+ // const int x = 10;
+ // [=](auto a) { #1
+ // [](auto b) { #2 <-- an intervening lambda that can never capture 'x'
+ // [=](auto c) { #3
+ // f(x, c); <-- can not lead to x's speculative capture by #1 or #2
+ // }; }; };
+ // If they do not have a default implicit capture, check to see
+ // if the entity has already been explicitly captured.
+ // If even a single dependent enclosing lambda lacks the capability
+ // to ever capture this variable, there is no further enclosing
+ // non-dependent lambda that can capture this variable.
+ if (LSI->ImpCaptureStyle == sema::LambdaScopeInfo::ImpCap_None) {
+ if (IsCapturingVariable && !LSI->isCaptured(VarToCapture))
+ return NoLambdaIsCaptureReady;
+ if (IsCapturingThis && !LSI->isCXXThisCaptured())
+ return NoLambdaIsCaptureReady;
+ }
+ EnclosingDC = getLambdaAwareParentOfDeclContext(EnclosingDC);
+
+ assert(CurScopeIndex);
+ --CurScopeIndex;
+ } while (!EnclosingDC->isTranslationUnit() &&
+ EnclosingDC->isDependentContext() &&
+ isLambdaCallOperator(EnclosingDC));
+
+ assert(CurScopeIndex < (FunctionScopes.size() - 1));
+ // If the enclosingDC is not dependent, then the immediately nested lambda
+ // (one index above) is capture-ready.
+ if (!EnclosingDC->isDependentContext())
+ return CurScopeIndex + 1;
+ return NoLambdaIsCaptureReady;
+}
+
+/// \brief Examines the FunctionScopeInfo stack to determine the nearest
+/// enclosing lambda (to the current lambda) that is 'capture-capable' for
+/// the variable referenced in the current lambda (i.e. \p VarToCapture).
+/// If successful, returns the index into Sema's FunctionScopeInfo stack
+/// of the capture-capable lambda's LambdaScopeInfo.
+///
+/// Given the current stack of lambdas being processed by Sema and
+/// the variable of interest, to identify the nearest enclosing lambda (to the
+/// current lambda at the top of the stack) that can truly capture
+/// a variable, it has to have the following two properties:
+/// a) 'capture-ready' - be the innermost lambda that is 'capture-ready':
+/// - climb down the stack (i.e. starting from the innermost and examining
+/// each outer lambda step by step) checking if each enclosing
+/// lambda can either implicitly or explicitly capture the variable.
+/// Record the first such lambda that is enclosed in a non-dependent
+/// context. If no such lambda currently exists return failure.
+/// b) 'capture-capable' - make sure the 'capture-ready' lambda can truly
+/// capture the variable by checking all its enclosing lambdas:
+/// - check if all outer lambdas enclosing the 'capture-ready' lambda
+/// identified above in 'a' can also capture the variable (this is done
+/// via tryCaptureVariable for variables and CheckCXXThisCapture for
+/// 'this' by passing in the index of the Lambda identified in step 'a')
+///
+/// \param FunctionScopes - Sema's stack of nested FunctionScopeInfo's (which a
+/// LambdaScopeInfo inherits from). The current/deepest/innermost lambda
+/// is at the top of the stack.
+///
+/// \param VarToCapture - the variable to capture. If NULL, capture 'this'.
+///
+///
+/// \returns An Optional<unsigned> Index that if evaluates to 'true' contains
+/// the index (into Sema's FunctionScopeInfo stack) of the innermost lambda
+/// which is capture-capable. If the return value evaluates to 'false' then
+/// no lambda is capture-capable for \p VarToCapture.
+
+Optional<unsigned> clang::getStackIndexOfNearestEnclosingCaptureCapableLambda(
+ ArrayRef<const sema::FunctionScopeInfo *> FunctionScopes,
+ VarDecl *VarToCapture, Sema &S) {
+
+ const Optional<unsigned> NoLambdaIsCaptureCapable;
+
+ const Optional<unsigned> OptionalStackIndex =
+ getStackIndexOfNearestEnclosingCaptureReadyLambda(FunctionScopes,
+ VarToCapture);
+ if (!OptionalStackIndex)
+ return NoLambdaIsCaptureCapable;
+
+ const unsigned IndexOfCaptureReadyLambda = OptionalStackIndex.getValue();
+ assert(((IndexOfCaptureReadyLambda != (FunctionScopes.size() - 1)) ||
+ S.getCurGenericLambda()) &&
+ "The capture ready lambda for a potential capture can only be the "
+ "current lambda if it is a generic lambda");
+
+ const sema::LambdaScopeInfo *const CaptureReadyLambdaLSI =
+ cast<sema::LambdaScopeInfo>(FunctionScopes[IndexOfCaptureReadyLambda]);
+
+ // If VarToCapture is null, we are attempting to capture 'this'
+ const bool IsCapturingThis = !VarToCapture;
+ const bool IsCapturingVariable = !IsCapturingThis;
+
+ if (IsCapturingVariable) {
+ // Check if the capture-ready lambda can truly capture the variable, by
+ // checking whether all enclosing lambdas of the capture-ready lambda allow
+ // the capture - i.e. make sure it is capture-capable.
+ QualType CaptureType, DeclRefType;
+ const bool CanCaptureVariable =
+ !S.tryCaptureVariable(VarToCapture,
+ /*ExprVarIsUsedInLoc*/ SourceLocation(),
+ clang::Sema::TryCapture_Implicit,
+ /*EllipsisLoc*/ SourceLocation(),
+ /*BuildAndDiagnose*/ false, CaptureType,
+ DeclRefType, &IndexOfCaptureReadyLambda);
+ if (!CanCaptureVariable)
+ return NoLambdaIsCaptureCapable;
+ } else {
+ // Check if the capture-ready lambda can truly capture 'this' by checking
+ // whether all enclosing lambdas of the capture-ready lambda can capture
+ // 'this'.
+ const bool CanCaptureThis =
+ !S.CheckCXXThisCapture(
+ CaptureReadyLambdaLSI->PotentialThisCaptureLocation,
+ /*Explicit*/ false, /*BuildAndDiagnose*/ false,
+ &IndexOfCaptureReadyLambda);
+ if (!CanCaptureThis)
+ return NoLambdaIsCaptureCapable;
+ }
+ return IndexOfCaptureReadyLambda;
+}
+
+static inline TemplateParameterList *
+getGenericLambdaTemplateParameterList(LambdaScopeInfo *LSI, Sema &SemaRef) {
+ if (LSI->GLTemplateParameterList)
+ return LSI->GLTemplateParameterList;
+
+ if (!LSI->AutoTemplateParams.empty()) {
+ SourceRange IntroRange = LSI->IntroducerRange;
+ SourceLocation LAngleLoc = IntroRange.getBegin();
+ SourceLocation RAngleLoc = IntroRange.getEnd();
+ LSI->GLTemplateParameterList = TemplateParameterList::Create(
+ SemaRef.Context,
+ /*Template kw loc*/ SourceLocation(), LAngleLoc,
+ llvm::makeArrayRef((NamedDecl *const *)LSI->AutoTemplateParams.data(),
+ LSI->AutoTemplateParams.size()),
+ RAngleLoc);
+ }
+ return LSI->GLTemplateParameterList;
+}
+
+CXXRecordDecl *Sema::createLambdaClosureType(SourceRange IntroducerRange,
+ TypeSourceInfo *Info,
+ bool KnownDependent,
+ LambdaCaptureDefault CaptureDefault) {
+ DeclContext *DC = CurContext;
+ while (!(DC->isFunctionOrMethod() || DC->isRecord() || DC->isFileContext()))
+ DC = DC->getParent();
+ bool IsGenericLambda = getGenericLambdaTemplateParameterList(getCurLambda(),
+ *this);
+ // Start constructing the lambda class.
+ CXXRecordDecl *Class = CXXRecordDecl::CreateLambda(Context, DC, Info,
+ IntroducerRange.getBegin(),
+ KnownDependent,
+ IsGenericLambda,
+ CaptureDefault);
+ DC->addDecl(Class);
+
+ return Class;
+}
+
+/// \brief Determine whether the given context is or is enclosed in an inline
+/// function.
+static bool isInInlineFunction(const DeclContext *DC) {
+ while (!DC->isFileContext()) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(DC))
+ if (FD->isInlined())
+ return true;
+
+ DC = DC->getLexicalParent();
+ }
+
+ return false;
+}
+
+MangleNumberingContext *
+Sema::getCurrentMangleNumberContext(const DeclContext *DC,
+ Decl *&ManglingContextDecl) {
+ // Compute the context for allocating mangling numbers in the current
+ // expression, if the ABI requires them.
+ ManglingContextDecl = ExprEvalContexts.back().ManglingContextDecl;
+
+ enum ContextKind {
+ Normal,
+ DefaultArgument,
+ DataMember,
+ StaticDataMember
+ } Kind = Normal;
+
+ // Default arguments of member function parameters that appear in a class
+ // definition, as well as the initializers of data members, receive special
+ // treatment. Identify them.
+ if (ManglingContextDecl) {
+ if (ParmVarDecl *Param = dyn_cast<ParmVarDecl>(ManglingContextDecl)) {
+ if (const DeclContext *LexicalDC
+ = Param->getDeclContext()->getLexicalParent())
+ if (LexicalDC->isRecord())
+ Kind = DefaultArgument;
+ } else if (VarDecl *Var = dyn_cast<VarDecl>(ManglingContextDecl)) {
+ if (Var->getDeclContext()->isRecord())
+ Kind = StaticDataMember;
+ } else if (isa<FieldDecl>(ManglingContextDecl)) {
+ Kind = DataMember;
+ }
+ }
+
+ // Itanium ABI [5.1.7]:
+ // In the following contexts [...] the one-definition rule requires closure
+ // types in different translation units to "correspond":
+ bool IsInNonspecializedTemplate =
+ !ActiveTemplateInstantiations.empty() || CurContext->isDependentContext();
+ switch (Kind) {
+ case Normal:
+ // -- the bodies of non-exported nonspecialized template functions
+ // -- the bodies of inline functions
+ if ((IsInNonspecializedTemplate &&
+ !(ManglingContextDecl && isa<ParmVarDecl>(ManglingContextDecl))) ||
+ isInInlineFunction(CurContext)) {
+ ManglingContextDecl = nullptr;
+ return &Context.getManglingNumberContext(DC);
+ }
+
+ ManglingContextDecl = nullptr;
+ return nullptr;
+
+ case StaticDataMember:
+ // -- the initializers of nonspecialized static members of template classes
+ if (!IsInNonspecializedTemplate) {
+ ManglingContextDecl = nullptr;
+ return nullptr;
+ }
+ // Fall through to get the current context.
+
+ case DataMember:
+ // -- the in-class initializers of class members
+ case DefaultArgument:
+ // -- default arguments appearing in class definitions
+ return &ExprEvalContexts.back().getMangleNumberingContext(Context);
+ }
+
+ llvm_unreachable("unexpected context");
+}
+
+MangleNumberingContext &
+Sema::ExpressionEvaluationContextRecord::getMangleNumberingContext(
+ ASTContext &Ctx) {
+ assert(ManglingContextDecl && "Need to have a context declaration");
+ if (!MangleNumbering)
+ MangleNumbering = Ctx.createMangleNumberingContext();
+ return *MangleNumbering;
+}
+
+CXXMethodDecl *Sema::startLambdaDefinition(CXXRecordDecl *Class,
+ SourceRange IntroducerRange,
+ TypeSourceInfo *MethodTypeInfo,
+ SourceLocation EndLoc,
+ ArrayRef<ParmVarDecl *> Params) {
+ QualType MethodType = MethodTypeInfo->getType();
+ TemplateParameterList *TemplateParams =
+ getGenericLambdaTemplateParameterList(getCurLambda(), *this);
+ // If a lambda appears in a dependent context or is a generic lambda (has
+ // template parameters) and has an 'auto' return type, deduce it to a
+ // dependent type.
+ if (Class->isDependentContext() || TemplateParams) {
+ const FunctionProtoType *FPT = MethodType->castAs<FunctionProtoType>();
+ QualType Result = FPT->getReturnType();
+ if (Result->isUndeducedType()) {
+ Result = SubstAutoType(Result, Context.DependentTy);
+ MethodType = Context.getFunctionType(Result, FPT->getParamTypes(),
+ FPT->getExtProtoInfo());
+ }
+ }
+
+ // C++11 [expr.prim.lambda]p5:
+ // The closure type for a lambda-expression has a public inline function
+ // call operator (13.5.4) whose parameters and return type are described by
+ // the lambda-expression's parameter-declaration-clause and
+ // trailing-return-type respectively.
+ DeclarationName MethodName
+ = Context.DeclarationNames.getCXXOperatorName(OO_Call);
+ DeclarationNameLoc MethodNameLoc;
+ MethodNameLoc.CXXOperatorName.BeginOpNameLoc
+ = IntroducerRange.getBegin().getRawEncoding();
+ MethodNameLoc.CXXOperatorName.EndOpNameLoc
+ = IntroducerRange.getEnd().getRawEncoding();
+ CXXMethodDecl *Method
+ = CXXMethodDecl::Create(Context, Class, EndLoc,
+ DeclarationNameInfo(MethodName,
+ IntroducerRange.getBegin(),
+ MethodNameLoc),
+ MethodType, MethodTypeInfo,
+ SC_None,
+ /*isInline=*/true,
+ /*isConstExpr=*/false,
+ EndLoc);
+ Method->setAccess(AS_public);
+
+ // Temporarily set the lexical declaration context to the current
+ // context, so that the Scope stack matches the lexical nesting.
+ Method->setLexicalDeclContext(CurContext);
+ // Create a function template if we have a template parameter list
+ FunctionTemplateDecl *const TemplateMethod = TemplateParams ?
+ FunctionTemplateDecl::Create(Context, Class,
+ Method->getLocation(), MethodName,
+ TemplateParams,
+ Method) : nullptr;
+ if (TemplateMethod) {
+ TemplateMethod->setLexicalDeclContext(CurContext);
+ TemplateMethod->setAccess(AS_public);
+ Method->setDescribedFunctionTemplate(TemplateMethod);
+ }
+
+ // Add parameters.
+ if (!Params.empty()) {
+ Method->setParams(Params);
+ CheckParmsForFunctionDef(const_cast<ParmVarDecl **>(Params.begin()),
+ const_cast<ParmVarDecl **>(Params.end()),
+ /*CheckParameterNames=*/false);
+
+ for (auto P : Method->params())
+ P->setOwningFunction(Method);
+ }
+
+ Decl *ManglingContextDecl;
+ if (MangleNumberingContext *MCtx =
+ getCurrentMangleNumberContext(Class->getDeclContext(),
+ ManglingContextDecl)) {
+ unsigned ManglingNumber = MCtx->getManglingNumber(Method);
+ Class->setLambdaMangling(ManglingNumber, ManglingContextDecl);
+ }
+
+ return Method;
+}
+
+void Sema::buildLambdaScope(LambdaScopeInfo *LSI,
+ CXXMethodDecl *CallOperator,
+ SourceRange IntroducerRange,
+ LambdaCaptureDefault CaptureDefault,
+ SourceLocation CaptureDefaultLoc,
+ bool ExplicitParams,
+ bool ExplicitResultType,
+ bool Mutable) {
+ LSI->CallOperator = CallOperator;
+ CXXRecordDecl *LambdaClass = CallOperator->getParent();
+ LSI->Lambda = LambdaClass;
+ if (CaptureDefault == LCD_ByCopy)
+ LSI->ImpCaptureStyle = LambdaScopeInfo::ImpCap_LambdaByval;
+ else if (CaptureDefault == LCD_ByRef)
+ LSI->ImpCaptureStyle = LambdaScopeInfo::ImpCap_LambdaByref;
+ LSI->CaptureDefaultLoc = CaptureDefaultLoc;
+ LSI->IntroducerRange = IntroducerRange;
+ LSI->ExplicitParams = ExplicitParams;
+ LSI->Mutable = Mutable;
+
+ if (ExplicitResultType) {
+ LSI->ReturnType = CallOperator->getReturnType();
+
+ if (!LSI->ReturnType->isDependentType() &&
+ !LSI->ReturnType->isVoidType()) {
+ if (RequireCompleteType(CallOperator->getLocStart(), LSI->ReturnType,
+ diag::err_lambda_incomplete_result)) {
+ // Do nothing.
+ }
+ }
+ } else {
+ LSI->HasImplicitReturnType = true;
+ }
+}
+
+void Sema::finishLambdaExplicitCaptures(LambdaScopeInfo *LSI) {
+ LSI->finishedExplicitCaptures();
+}
+
+void Sema::addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope) {
+ // Introduce our parameters into the function scope
+ for (unsigned p = 0, NumParams = CallOperator->getNumParams();
+ p < NumParams; ++p) {
+ ParmVarDecl *Param = CallOperator->getParamDecl(p);
+
+ // If this has an identifier, add it to the scope stack.
+ if (CurScope && Param->getIdentifier()) {
+ CheckShadow(CurScope, Param);
+
+ PushOnScopeChains(Param, CurScope);
+ }
+ }
+}
+
+/// If this expression is an enumerator-like expression of some type
+/// T, return the type T; otherwise, return null.
+///
+/// Pointer comparisons on the result here should always work because
+/// it's derived from either the parent of an EnumConstantDecl
+/// (i.e. the definition) or the declaration returned by
+/// EnumType::getDecl() (i.e. the definition).
+static EnumDecl *findEnumForBlockReturn(Expr *E) {
+ // An expression is an enumerator-like expression of type T if,
+ // ignoring parens and parens-like expressions:
+ E = E->IgnoreParens();
+
+ // - it is an enumerator whose enum type is T or
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ if (EnumConstantDecl *D
+ = dyn_cast<EnumConstantDecl>(DRE->getDecl())) {
+ return cast<EnumDecl>(D->getDeclContext());
+ }
+ return nullptr;
+ }
+
+ // - it is a comma expression whose RHS is an enumerator-like
+ // expression of type T or
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ if (BO->getOpcode() == BO_Comma)
+ return findEnumForBlockReturn(BO->getRHS());
+ return nullptr;
+ }
+
+ // - it is a statement-expression whose value expression is an
+ // enumerator-like expression of type T or
+ if (StmtExpr *SE = dyn_cast<StmtExpr>(E)) {
+ if (Expr *last = dyn_cast_or_null<Expr>(SE->getSubStmt()->body_back()))
+ return findEnumForBlockReturn(last);
+ return nullptr;
+ }
+
+ // - it is a ternary conditional operator (not the GNU ?:
+ // extension) whose second and third operands are
+ // enumerator-like expressions of type T or
+ if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
+ if (EnumDecl *ED = findEnumForBlockReturn(CO->getTrueExpr()))
+ if (ED == findEnumForBlockReturn(CO->getFalseExpr()))
+ return ED;
+ return nullptr;
+ }
+
+ // (implicitly:)
+ // - it is an implicit integral conversion applied to an
+ // enumerator-like expression of type T or
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
+ // We can sometimes see integral conversions in valid
+ // enumerator-like expressions.
+ if (ICE->getCastKind() == CK_IntegralCast)
+ return findEnumForBlockReturn(ICE->getSubExpr());
+
+ // Otherwise, just rely on the type.
+ }
+
+ // - it is an expression of that formal enum type.
+ if (const EnumType *ET = E->getType()->getAs<EnumType>()) {
+ return ET->getDecl();
+ }
+
+ // Otherwise, nope.
+ return nullptr;
+}
+
+/// Attempt to find a type T for which the returned expression of the
+/// given statement is an enumerator-like expression of that type.
+static EnumDecl *findEnumForBlockReturn(ReturnStmt *ret) {
+ if (Expr *retValue = ret->getRetValue())
+ return findEnumForBlockReturn(retValue);
+ return nullptr;
+}
+
+/// Attempt to find a common type T for which all of the returned
+/// expressions in a block are enumerator-like expressions of that
+/// type.
+static EnumDecl *findCommonEnumForBlockReturns(ArrayRef<ReturnStmt*> returns) {
+ ArrayRef<ReturnStmt*>::iterator i = returns.begin(), e = returns.end();
+
+ // Try to find one for the first return.
+ EnumDecl *ED = findEnumForBlockReturn(*i);
+ if (!ED) return nullptr;
+
+ // Check that the rest of the returns have the same enum.
+ for (++i; i != e; ++i) {
+ if (findEnumForBlockReturn(*i) != ED)
+ return nullptr;
+ }
+
+ // Never infer an anonymous enum type.
+ if (!ED->hasNameForLinkage()) return nullptr;
+
+ return ED;
+}
+
+/// Adjust the given return statements so that they formally return
+/// the given type. It should require, at most, an IntegralCast.
+static void adjustBlockReturnsToEnum(Sema &S, ArrayRef<ReturnStmt*> returns,
+ QualType returnType) {
+ for (ArrayRef<ReturnStmt*>::iterator
+ i = returns.begin(), e = returns.end(); i != e; ++i) {
+ ReturnStmt *ret = *i;
+ Expr *retValue = ret->getRetValue();
+ if (S.Context.hasSameType(retValue->getType(), returnType))
+ continue;
+
+ // Right now we only support integral fixup casts.
+ assert(returnType->isIntegralOrUnscopedEnumerationType());
+ assert(retValue->getType()->isIntegralOrUnscopedEnumerationType());
+
+ ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(retValue);
+
+ Expr *E = (cleanups ? cleanups->getSubExpr() : retValue);
+ E = ImplicitCastExpr::Create(S.Context, returnType, CK_IntegralCast,
+ E, /*base path*/ nullptr, VK_RValue);
+ if (cleanups) {
+ cleanups->setSubExpr(E);
+ } else {
+ ret->setRetValue(E);
+ }
+ }
+}
+
+void Sema::deduceClosureReturnType(CapturingScopeInfo &CSI) {
+ assert(CSI.HasImplicitReturnType);
+ // If it was ever a placeholder, it had to been deduced to DependentTy.
+ assert(CSI.ReturnType.isNull() || !CSI.ReturnType->isUndeducedType());
+
+ // C++ core issue 975:
+ // If a lambda-expression does not include a trailing-return-type,
+ // it is as if the trailing-return-type denotes the following type:
+ // - if there are no return statements in the compound-statement,
+ // or all return statements return either an expression of type
+ // void or no expression or braced-init-list, the type void;
+ // - otherwise, if all return statements return an expression
+ // and the types of the returned expressions after
+ // lvalue-to-rvalue conversion (4.1 [conv.lval]),
+ // array-to-pointer conversion (4.2 [conv.array]), and
+ // function-to-pointer conversion (4.3 [conv.func]) are the
+ // same, that common type;
+ // - otherwise, the program is ill-formed.
+ //
+ // C++ core issue 1048 additionally removes top-level cv-qualifiers
+ // from the types of returned expressions to match the C++14 auto
+ // deduction rules.
+ //
+ // In addition, in blocks in non-C++ modes, if all of the return
+ // statements are enumerator-like expressions of some type T, where
+ // T has a name for linkage, then we infer the return type of the
+ // block to be that type.
+
+ // First case: no return statements, implicit void return type.
+ ASTContext &Ctx = getASTContext();
+ if (CSI.Returns.empty()) {
+ // It's possible there were simply no /valid/ return statements.
+ // In this case, the first one we found may have at least given us a type.
+ if (CSI.ReturnType.isNull())
+ CSI.ReturnType = Ctx.VoidTy;
+ return;
+ }
+
+ // Second case: at least one return statement has dependent type.
+ // Delay type checking until instantiation.
+ assert(!CSI.ReturnType.isNull() && "We should have a tentative return type.");
+ if (CSI.ReturnType->isDependentType())
+ return;
+
+ // Try to apply the enum-fuzz rule.
+ if (!getLangOpts().CPlusPlus) {
+ assert(isa<BlockScopeInfo>(CSI));
+ const EnumDecl *ED = findCommonEnumForBlockReturns(CSI.Returns);
+ if (ED) {
+ CSI.ReturnType = Context.getTypeDeclType(ED);
+ adjustBlockReturnsToEnum(*this, CSI.Returns, CSI.ReturnType);
+ return;
+ }
+ }
+
+ // Third case: only one return statement. Don't bother doing extra work!
+ SmallVectorImpl<ReturnStmt*>::iterator I = CSI.Returns.begin(),
+ E = CSI.Returns.end();
+ if (I+1 == E)
+ return;
+
+ // General case: many return statements.
+ // Check that they all have compatible return types.
+
+ // We require the return types to strictly match here.
+ // Note that we've already done the required promotions as part of
+ // processing the return statement.
+ for (; I != E; ++I) {
+ const ReturnStmt *RS = *I;
+ const Expr *RetE = RS->getRetValue();
+
+ QualType ReturnType =
+ (RetE ? RetE->getType() : Context.VoidTy).getUnqualifiedType();
+ if (Context.getCanonicalFunctionResultType(ReturnType) ==
+ Context.getCanonicalFunctionResultType(CSI.ReturnType))
+ continue;
+
+ // FIXME: This is a poor diagnostic for ReturnStmts without expressions.
+ // TODO: It's possible that the *first* return is the divergent one.
+ Diag(RS->getLocStart(),
+ diag::err_typecheck_missing_return_type_incompatible)
+ << ReturnType << CSI.ReturnType
+ << isa<LambdaScopeInfo>(CSI);
+ // Continue iterating so that we keep emitting diagnostics.
+ }
+}
+
+QualType Sema::buildLambdaInitCaptureInitialization(SourceLocation Loc,
+ bool ByRef,
+ IdentifierInfo *Id,
+ bool IsDirectInit,
+ Expr *&Init) {
+ // Create an 'auto' or 'auto&' TypeSourceInfo that we can use to
+ // deduce against.
+ QualType DeductType = Context.getAutoDeductType();
+ TypeLocBuilder TLB;
+ TLB.pushTypeSpec(DeductType).setNameLoc(Loc);
+ if (ByRef) {
+ DeductType = BuildReferenceType(DeductType, true, Loc, Id);
+ assert(!DeductType.isNull() && "can't build reference to auto");
+ TLB.push<ReferenceTypeLoc>(DeductType).setSigilLoc(Loc);
+ }
+ TypeSourceInfo *TSI = TLB.getTypeSourceInfo(Context, DeductType);
+
+ // Deduce the type of the init capture.
+ QualType DeducedType = deduceVarTypeFromInitializer(
+ /*VarDecl*/nullptr, DeclarationName(Id), DeductType, TSI,
+ SourceRange(Loc, Loc), IsDirectInit, Init);
+ if (DeducedType.isNull())
+ return QualType();
+
+ // Are we a non-list direct initialization?
+ ParenListExpr *CXXDirectInit = dyn_cast<ParenListExpr>(Init);
+
+ // Perform initialization analysis and ensure any implicit conversions
+ // (such as lvalue-to-rvalue) are enforced.
+ InitializedEntity Entity =
+ InitializedEntity::InitializeLambdaCapture(Id, DeducedType, Loc);
+ InitializationKind Kind =
+ IsDirectInit
+ ? (CXXDirectInit ? InitializationKind::CreateDirect(
+ Loc, Init->getLocStart(), Init->getLocEnd())
+ : InitializationKind::CreateDirectList(Loc))
+ : InitializationKind::CreateCopy(Loc, Init->getLocStart());
+
+ MultiExprArg Args = Init;
+ if (CXXDirectInit)
+ Args =
+ MultiExprArg(CXXDirectInit->getExprs(), CXXDirectInit->getNumExprs());
+ QualType DclT;
+ InitializationSequence InitSeq(*this, Entity, Kind, Args);
+ ExprResult Result = InitSeq.Perform(*this, Entity, Kind, Args, &DclT);
+
+ if (Result.isInvalid())
+ return QualType();
+ Init = Result.getAs<Expr>();
+
+ // The init-capture initialization is a full-expression that must be
+ // processed as one before we enter the declcontext of the lambda's
+ // call-operator.
+ Result = ActOnFinishFullExpr(Init, Loc, /*DiscardedValue*/ false,
+ /*IsConstexpr*/ false,
+ /*IsLambdaInitCaptureInitalizer*/ true);
+ if (Result.isInvalid())
+ return QualType();
+
+ Init = Result.getAs<Expr>();
+ return DeducedType;
+}
+
+VarDecl *Sema::createLambdaInitCaptureVarDecl(SourceLocation Loc,
+ QualType InitCaptureType,
+ IdentifierInfo *Id,
+ unsigned InitStyle, Expr *Init) {
+ TypeSourceInfo *TSI = Context.getTrivialTypeSourceInfo(InitCaptureType,
+ Loc);
+ // Create a dummy variable representing the init-capture. This is not actually
+ // used as a variable, and only exists as a way to name and refer to the
+ // init-capture.
+ // FIXME: Pass in separate source locations for '&' and identifier.
+ VarDecl *NewVD = VarDecl::Create(Context, CurContext, Loc,
+ Loc, Id, InitCaptureType, TSI, SC_Auto);
+ NewVD->setInitCapture(true);
+ NewVD->setReferenced(true);
+ // FIXME: Pass in a VarDecl::InitializationStyle.
+ NewVD->setInitStyle(static_cast<VarDecl::InitializationStyle>(InitStyle));
+ NewVD->markUsed(Context);
+ NewVD->setInit(Init);
+ return NewVD;
+}
+
+FieldDecl *Sema::buildInitCaptureField(LambdaScopeInfo *LSI, VarDecl *Var) {
+ FieldDecl *Field = FieldDecl::Create(
+ Context, LSI->Lambda, Var->getLocation(), Var->getLocation(),
+ nullptr, Var->getType(), Var->getTypeSourceInfo(), nullptr, false,
+ ICIS_NoInit);
+ Field->setImplicit(true);
+ Field->setAccess(AS_private);
+ LSI->Lambda->addDecl(Field);
+
+ LSI->addCapture(Var, /*isBlock*/false, Var->getType()->isReferenceType(),
+ /*isNested*/false, Var->getLocation(), SourceLocation(),
+ Var->getType(), Var->getInit());
+ return Field;
+}
+
+void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
+ Declarator &ParamInfo,
+ Scope *CurScope) {
+ // Determine if we're within a context where we know that the lambda will
+ // be dependent, because there are template parameters in scope.
+ bool KnownDependent = false;
+ LambdaScopeInfo *const LSI = getCurLambda();
+ assert(LSI && "LambdaScopeInfo should be on stack!");
+ TemplateParameterList *TemplateParams =
+ getGenericLambdaTemplateParameterList(LSI, *this);
+
+ if (Scope *TmplScope = CurScope->getTemplateParamParent()) {
+ // Since we have our own TemplateParams, so check if an outer scope
+ // has template params, only then are we in a dependent scope.
+ if (TemplateParams) {
+ TmplScope = TmplScope->getParent();
+ TmplScope = TmplScope ? TmplScope->getTemplateParamParent() : nullptr;
+ }
+ if (TmplScope && !TmplScope->decl_empty())
+ KnownDependent = true;
+ }
+ // Determine the signature of the call operator.
+ TypeSourceInfo *MethodTyInfo;
+ bool ExplicitParams = true;
+ bool ExplicitResultType = true;
+ bool ContainsUnexpandedParameterPack = false;
+ SourceLocation EndLoc;
+ SmallVector<ParmVarDecl *, 8> Params;
+ if (ParamInfo.getNumTypeObjects() == 0) {
+ // C++11 [expr.prim.lambda]p4:
+ // If a lambda-expression does not include a lambda-declarator, it is as
+ // if the lambda-declarator were ().
+ FunctionProtoType::ExtProtoInfo EPI(Context.getDefaultCallingConvention(
+ /*IsVariadic=*/false, /*IsCXXMethod=*/true));
+ EPI.HasTrailingReturn = true;
+ EPI.TypeQuals |= DeclSpec::TQ_const;
+ // C++1y [expr.prim.lambda]:
+ // The lambda return type is 'auto', which is replaced by the
+ // trailing-return type if provided and/or deduced from 'return'
+ // statements
+ // We don't do this before C++1y, because we don't support deduced return
+ // types there.
+ QualType DefaultTypeForNoTrailingReturn =
+ getLangOpts().CPlusPlus14 ? Context.getAutoDeductType()
+ : Context.DependentTy;
+ QualType MethodTy =
+ Context.getFunctionType(DefaultTypeForNoTrailingReturn, None, EPI);
+ MethodTyInfo = Context.getTrivialTypeSourceInfo(MethodTy);
+ ExplicitParams = false;
+ ExplicitResultType = false;
+ EndLoc = Intro.Range.getEnd();
+ } else {
+ assert(ParamInfo.isFunctionDeclarator() &&
+ "lambda-declarator is a function");
+ DeclaratorChunk::FunctionTypeInfo &FTI = ParamInfo.getFunctionTypeInfo();
+
+ // C++11 [expr.prim.lambda]p5:
+ // This function call operator is declared const (9.3.1) if and only if
+ // the lambda-expression's parameter-declaration-clause is not followed
+ // by mutable. It is neither virtual nor declared volatile. [...]
+ if (!FTI.hasMutableQualifier())
+ FTI.TypeQuals |= DeclSpec::TQ_const;
+
+ MethodTyInfo = GetTypeForDeclarator(ParamInfo, CurScope);
+ assert(MethodTyInfo && "no type from lambda-declarator");
+ EndLoc = ParamInfo.getSourceRange().getEnd();
+
+ ExplicitResultType = FTI.hasTrailingReturnType();
+
+ if (FTIHasNonVoidParameters(FTI)) {
+ Params.reserve(FTI.NumParams);
+ for (unsigned i = 0, e = FTI.NumParams; i != e; ++i)
+ Params.push_back(cast<ParmVarDecl>(FTI.Params[i].Param));
+ }
+
+ // Check for unexpanded parameter packs in the method type.
+ if (MethodTyInfo->getType()->containsUnexpandedParameterPack())
+ ContainsUnexpandedParameterPack = true;
+ }
+
+ CXXRecordDecl *Class = createLambdaClosureType(Intro.Range, MethodTyInfo,
+ KnownDependent, Intro.Default);
+
+ CXXMethodDecl *Method = startLambdaDefinition(Class, Intro.Range,
+ MethodTyInfo, EndLoc, Params);
+ if (ExplicitParams)
+ CheckCXXDefaultArguments(Method);
+
+ // Attributes on the lambda apply to the method.
+ ProcessDeclAttributes(CurScope, Method, ParamInfo);
+
+ // Introduce the function call operator as the current declaration context.
+ PushDeclContext(CurScope, Method);
+
+ // Build the lambda scope.
+ buildLambdaScope(LSI, Method, Intro.Range, Intro.Default, Intro.DefaultLoc,
+ ExplicitParams, ExplicitResultType, !Method->isConst());
+
+ // C++11 [expr.prim.lambda]p9:
+ // A lambda-expression whose smallest enclosing scope is a block scope is a
+ // local lambda expression; any other lambda expression shall not have a
+ // capture-default or simple-capture in its lambda-introducer.
+ //
+ // For simple-captures, this is covered by the check below that any named
+ // entity is a variable that can be captured.
+ //
+ // For DR1632, we also allow a capture-default in any context where we can
+ // odr-use 'this' (in particular, in a default initializer for a non-static
+ // data member).
+ if (Intro.Default != LCD_None && !Class->getParent()->isFunctionOrMethod() &&
+ (getCurrentThisType().isNull() ||
+ CheckCXXThisCapture(SourceLocation(), /*Explicit*/true,
+ /*BuildAndDiagnose*/false)))
+ Diag(Intro.DefaultLoc, diag::err_capture_default_non_local);
+
+ // Distinct capture names, for diagnostics.
+ llvm::SmallSet<IdentifierInfo*, 8> CaptureNames;
+
+ // Handle explicit captures.
+ SourceLocation PrevCaptureLoc
+ = Intro.Default == LCD_None? Intro.Range.getBegin() : Intro.DefaultLoc;
+ for (auto C = Intro.Captures.begin(), E = Intro.Captures.end(); C != E;
+ PrevCaptureLoc = C->Loc, ++C) {
+ if (C->Kind == LCK_This) {
+ // C++11 [expr.prim.lambda]p8:
+ // An identifier or this shall not appear more than once in a
+ // lambda-capture.
+ if (LSI->isCXXThisCaptured()) {
+ Diag(C->Loc, diag::err_capture_more_than_once)
+ << "'this'" << SourceRange(LSI->getCXXThisCapture().getLocation())
+ << FixItHint::CreateRemoval(
+ SourceRange(getLocForEndOfToken(PrevCaptureLoc), C->Loc));
+ continue;
+ }
+
+ // C++11 [expr.prim.lambda]p8:
+ // If a lambda-capture includes a capture-default that is =, the
+ // lambda-capture shall not contain this [...].
+ if (Intro.Default == LCD_ByCopy) {
+ Diag(C->Loc, diag::err_this_capture_with_copy_default)
+ << FixItHint::CreateRemoval(
+ SourceRange(getLocForEndOfToken(PrevCaptureLoc), C->Loc));
+ continue;
+ }
+
+ // C++11 [expr.prim.lambda]p12:
+ // If this is captured by a local lambda expression, its nearest
+ // enclosing function shall be a non-static member function.
+ QualType ThisCaptureType = getCurrentThisType();
+ if (ThisCaptureType.isNull()) {
+ Diag(C->Loc, diag::err_this_capture) << true;
+ continue;
+ }
+
+ CheckCXXThisCapture(C->Loc, /*Explicit=*/true);
+ continue;
+ }
+
+ assert(C->Id && "missing identifier for capture");
+
+ if (C->Init.isInvalid())
+ continue;
+
+ VarDecl *Var = nullptr;
+ if (C->Init.isUsable()) {
+ Diag(C->Loc, getLangOpts().CPlusPlus14
+ ? diag::warn_cxx11_compat_init_capture
+ : diag::ext_init_capture);
+
+ if (C->Init.get()->containsUnexpandedParameterPack())
+ ContainsUnexpandedParameterPack = true;
+ // If the initializer expression is usable, but the InitCaptureType
+ // is not, then an error has occurred - so ignore the capture for now.
+ // for e.g., [n{0}] { }; <-- if no <initializer_list> is included.
+ // FIXME: we should create the init capture variable and mark it invalid
+ // in this case.
+ if (C->InitCaptureType.get().isNull())
+ continue;
+
+ unsigned InitStyle;
+ switch (C->InitKind) {
+ case LambdaCaptureInitKind::NoInit:
+ llvm_unreachable("not an init-capture?");
+ case LambdaCaptureInitKind::CopyInit:
+ InitStyle = VarDecl::CInit;
+ break;
+ case LambdaCaptureInitKind::DirectInit:
+ InitStyle = VarDecl::CallInit;
+ break;
+ case LambdaCaptureInitKind::ListInit:
+ InitStyle = VarDecl::ListInit;
+ break;
+ }
+ Var = createLambdaInitCaptureVarDecl(C->Loc, C->InitCaptureType.get(),
+ C->Id, InitStyle, C->Init.get());
+ // C++1y [expr.prim.lambda]p11:
+ // An init-capture behaves as if it declares and explicitly
+ // captures a variable [...] whose declarative region is the
+ // lambda-expression's compound-statement
+ if (Var)
+ PushOnScopeChains(Var, CurScope, false);
+ } else {
+ assert(C->InitKind == LambdaCaptureInitKind::NoInit &&
+ "init capture has valid but null init?");
+
+ // C++11 [expr.prim.lambda]p8:
+ // If a lambda-capture includes a capture-default that is &, the
+ // identifiers in the lambda-capture shall not be preceded by &.
+ // If a lambda-capture includes a capture-default that is =, [...]
+ // each identifier it contains shall be preceded by &.
+ if (C->Kind == LCK_ByRef && Intro.Default == LCD_ByRef) {
+ Diag(C->Loc, diag::err_reference_capture_with_reference_default)
+ << FixItHint::CreateRemoval(
+ SourceRange(getLocForEndOfToken(PrevCaptureLoc), C->Loc));
+ continue;
+ } else if (C->Kind == LCK_ByCopy && Intro.Default == LCD_ByCopy) {
+ Diag(C->Loc, diag::err_copy_capture_with_copy_default)
+ << FixItHint::CreateRemoval(
+ SourceRange(getLocForEndOfToken(PrevCaptureLoc), C->Loc));
+ continue;
+ }
+
+ // C++11 [expr.prim.lambda]p10:
+ // The identifiers in a capture-list are looked up using the usual
+ // rules for unqualified name lookup (3.4.1)
+ DeclarationNameInfo Name(C->Id, C->Loc);
+ LookupResult R(*this, Name, LookupOrdinaryName);
+ LookupName(R, CurScope);
+ if (R.isAmbiguous())
+ continue;
+ if (R.empty()) {
+ // FIXME: Disable corrections that would add qualification?
+ CXXScopeSpec ScopeSpec;
+ if (DiagnoseEmptyLookup(CurScope, ScopeSpec, R,
+ llvm::make_unique<DeclFilterCCC<VarDecl>>()))
+ continue;
+ }
+
+ Var = R.getAsSingle<VarDecl>();
+ if (Var && DiagnoseUseOfDecl(Var, C->Loc))
+ continue;
+ }
+
+ // C++11 [expr.prim.lambda]p8:
+ // An identifier or this shall not appear more than once in a
+ // lambda-capture.
+ if (!CaptureNames.insert(C->Id).second) {
+ if (Var && LSI->isCaptured(Var)) {
+ Diag(C->Loc, diag::err_capture_more_than_once)
+ << C->Id << SourceRange(LSI->getCapture(Var).getLocation())
+ << FixItHint::CreateRemoval(
+ SourceRange(getLocForEndOfToken(PrevCaptureLoc), C->Loc));
+ } else
+ // Previous capture captured something different (one or both was
+ // an init-cpature): no fixit.
+ Diag(C->Loc, diag::err_capture_more_than_once) << C->Id;
+ continue;
+ }
+
+ // C++11 [expr.prim.lambda]p10:
+ // [...] each such lookup shall find a variable with automatic storage
+ // duration declared in the reaching scope of the local lambda expression.
+ // Note that the 'reaching scope' check happens in tryCaptureVariable().
+ if (!Var) {
+ Diag(C->Loc, diag::err_capture_does_not_name_variable) << C->Id;
+ continue;
+ }
+
+ // Ignore invalid decls; they'll just confuse the code later.
+ if (Var->isInvalidDecl())
+ continue;
+
+ if (!Var->hasLocalStorage()) {
+ Diag(C->Loc, diag::err_capture_non_automatic_variable) << C->Id;
+ Diag(Var->getLocation(), diag::note_previous_decl) << C->Id;
+ continue;
+ }
+
+ // C++11 [expr.prim.lambda]p23:
+ // A capture followed by an ellipsis is a pack expansion (14.5.3).
+ SourceLocation EllipsisLoc;
+ if (C->EllipsisLoc.isValid()) {
+ if (Var->isParameterPack()) {
+ EllipsisLoc = C->EllipsisLoc;
+ } else {
+ Diag(C->EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
+ << SourceRange(C->Loc);
+
+ // Just ignore the ellipsis.
+ }
+ } else if (Var->isParameterPack()) {
+ ContainsUnexpandedParameterPack = true;
+ }
+
+ if (C->Init.isUsable()) {
+ buildInitCaptureField(LSI, Var);
+ } else {
+ TryCaptureKind Kind = C->Kind == LCK_ByRef ? TryCapture_ExplicitByRef :
+ TryCapture_ExplicitByVal;
+ tryCaptureVariable(Var, C->Loc, Kind, EllipsisLoc);
+ }
+ }
+ finishLambdaExplicitCaptures(LSI);
+
+ LSI->ContainsUnexpandedParameterPack = ContainsUnexpandedParameterPack;
+
+ // Add lambda parameters into scope.
+ addLambdaParameters(Method, CurScope);
+
+ // Enter a new evaluation context to insulate the lambda from any
+ // cleanups from the enclosing full-expression.
+ PushExpressionEvaluationContext(PotentiallyEvaluated);
+}
+
+void Sema::ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
+ bool IsInstantiation) {
+ LambdaScopeInfo *LSI = cast<LambdaScopeInfo>(FunctionScopes.back());
+
+ // Leave the expression-evaluation context.
+ DiscardCleanupsInEvaluationContext();
+ PopExpressionEvaluationContext();
+
+ // Leave the context of the lambda.
+ if (!IsInstantiation)
+ PopDeclContext();
+
+ // Finalize the lambda.
+ CXXRecordDecl *Class = LSI->Lambda;
+ Class->setInvalidDecl();
+ SmallVector<Decl*, 4> Fields(Class->fields());
+ ActOnFields(nullptr, Class->getLocation(), Class, Fields, SourceLocation(),
+ SourceLocation(), nullptr);
+ CheckCompletedCXXClass(Class);
+
+ PopFunctionScopeInfo();
+}
+
+/// \brief Add a lambda's conversion to function pointer, as described in
+/// C++11 [expr.prim.lambda]p6.
+static void addFunctionPointerConversion(Sema &S,
+ SourceRange IntroducerRange,
+ CXXRecordDecl *Class,
+ CXXMethodDecl *CallOperator) {
+ // This conversion is explicitly disabled if the lambda's function has
+ // pass_object_size attributes on any of its parameters.
+ if (std::any_of(CallOperator->param_begin(), CallOperator->param_end(),
+ std::mem_fn(&ParmVarDecl::hasAttr<PassObjectSizeAttr>)))
+ return;
+
+ // Add the conversion to function pointer.
+ const FunctionProtoType *CallOpProto =
+ CallOperator->getType()->getAs<FunctionProtoType>();
+ const FunctionProtoType::ExtProtoInfo CallOpExtInfo =
+ CallOpProto->getExtProtoInfo();
+ QualType PtrToFunctionTy;
+ QualType InvokerFunctionTy;
+ {
+ FunctionProtoType::ExtProtoInfo InvokerExtInfo = CallOpExtInfo;
+ CallingConv CC = S.Context.getDefaultCallingConvention(
+ CallOpProto->isVariadic(), /*IsCXXMethod=*/false);
+ InvokerExtInfo.ExtInfo = InvokerExtInfo.ExtInfo.withCallingConv(CC);
+ InvokerExtInfo.TypeQuals = 0;
+ assert(InvokerExtInfo.RefQualifier == RQ_None &&
+ "Lambda's call operator should not have a reference qualifier");
+ InvokerFunctionTy =
+ S.Context.getFunctionType(CallOpProto->getReturnType(),
+ CallOpProto->getParamTypes(), InvokerExtInfo);
+ PtrToFunctionTy = S.Context.getPointerType(InvokerFunctionTy);
+ }
+
+ // Create the type of the conversion function.
+ FunctionProtoType::ExtProtoInfo ConvExtInfo(
+ S.Context.getDefaultCallingConvention(
+ /*IsVariadic=*/false, /*IsCXXMethod=*/true));
+ // The conversion function is always const.
+ ConvExtInfo.TypeQuals = Qualifiers::Const;
+ QualType ConvTy =
+ S.Context.getFunctionType(PtrToFunctionTy, None, ConvExtInfo);
+
+ SourceLocation Loc = IntroducerRange.getBegin();
+ DeclarationName ConversionName
+ = S.Context.DeclarationNames.getCXXConversionFunctionName(
+ S.Context.getCanonicalType(PtrToFunctionTy));
+ DeclarationNameLoc ConvNameLoc;
+ // Construct a TypeSourceInfo for the conversion function, and wire
+ // all the parameters appropriately for the FunctionProtoTypeLoc
+ // so that everything works during transformation/instantiation of
+ // generic lambdas.
+ // The main reason for wiring up the parameters of the conversion
+ // function with that of the call operator is so that constructs
+ // like the following work:
+ // auto L = [](auto b) { <-- 1
+ // return [](auto a) -> decltype(a) { <-- 2
+ // return a;
+ // };
+ // };
+ // int (*fp)(int) = L(5);
+ // Because the trailing return type can contain DeclRefExprs that refer
+ // to the original call operator's variables, we hijack the call
+ // operators ParmVarDecls below.
+ TypeSourceInfo *ConvNamePtrToFunctionTSI =
+ S.Context.getTrivialTypeSourceInfo(PtrToFunctionTy, Loc);
+ ConvNameLoc.NamedType.TInfo = ConvNamePtrToFunctionTSI;
+
+ // The conversion function is a conversion to a pointer-to-function.
+ TypeSourceInfo *ConvTSI = S.Context.getTrivialTypeSourceInfo(ConvTy, Loc);
+ FunctionProtoTypeLoc ConvTL =
+ ConvTSI->getTypeLoc().getAs<FunctionProtoTypeLoc>();
+ // Get the result of the conversion function which is a pointer-to-function.
+ PointerTypeLoc PtrToFunctionTL =
+ ConvTL.getReturnLoc().getAs<PointerTypeLoc>();
+ // Do the same for the TypeSourceInfo that is used to name the conversion
+ // operator.
+ PointerTypeLoc ConvNamePtrToFunctionTL =
+ ConvNamePtrToFunctionTSI->getTypeLoc().getAs<PointerTypeLoc>();
+
+ // Get the underlying function types that the conversion function will
+ // be converting to (should match the type of the call operator).
+ FunctionProtoTypeLoc CallOpConvTL =
+ PtrToFunctionTL.getPointeeLoc().getAs<FunctionProtoTypeLoc>();
+ FunctionProtoTypeLoc CallOpConvNameTL =
+ ConvNamePtrToFunctionTL.getPointeeLoc().getAs<FunctionProtoTypeLoc>();
+
+ // Wire up the FunctionProtoTypeLocs with the call operator's parameters.
+ // These parameter's are essentially used to transform the name and
+ // the type of the conversion operator. By using the same parameters
+ // as the call operator's we don't have to fix any back references that
+ // the trailing return type of the call operator's uses (such as
+ // decltype(some_type<decltype(a)>::type{} + decltype(a){}) etc.)
+ // - we can simply use the return type of the call operator, and
+ // everything should work.
+ SmallVector<ParmVarDecl *, 4> InvokerParams;
+ for (unsigned I = 0, N = CallOperator->getNumParams(); I != N; ++I) {
+ ParmVarDecl *From = CallOperator->getParamDecl(I);
+
+ InvokerParams.push_back(ParmVarDecl::Create(S.Context,
+ // Temporarily add to the TU. This is set to the invoker below.
+ S.Context.getTranslationUnitDecl(),
+ From->getLocStart(),
+ From->getLocation(),
+ From->getIdentifier(),
+ From->getType(),
+ From->getTypeSourceInfo(),
+ From->getStorageClass(),
+ /*DefaultArg=*/nullptr));
+ CallOpConvTL.setParam(I, From);
+ CallOpConvNameTL.setParam(I, From);
+ }
+
+ CXXConversionDecl *Conversion
+ = CXXConversionDecl::Create(S.Context, Class, Loc,
+ DeclarationNameInfo(ConversionName,
+ Loc, ConvNameLoc),
+ ConvTy,
+ ConvTSI,
+ /*isInline=*/true, /*isExplicit=*/false,
+ /*isConstexpr=*/false,
+ CallOperator->getBody()->getLocEnd());
+ Conversion->setAccess(AS_public);
+ Conversion->setImplicit(true);
+
+ if (Class->isGenericLambda()) {
+ // Create a template version of the conversion operator, using the template
+ // parameter list of the function call operator.
+ FunctionTemplateDecl *TemplateCallOperator =
+ CallOperator->getDescribedFunctionTemplate();
+ FunctionTemplateDecl *ConversionTemplate =
+ FunctionTemplateDecl::Create(S.Context, Class,
+ Loc, ConversionName,
+ TemplateCallOperator->getTemplateParameters(),
+ Conversion);
+ ConversionTemplate->setAccess(AS_public);
+ ConversionTemplate->setImplicit(true);
+ Conversion->setDescribedFunctionTemplate(ConversionTemplate);
+ Class->addDecl(ConversionTemplate);
+ } else
+ Class->addDecl(Conversion);
+ // Add a non-static member function that will be the result of
+ // the conversion with a certain unique ID.
+ DeclarationName InvokerName = &S.Context.Idents.get(
+ getLambdaStaticInvokerName());
+ // FIXME: Instead of passing in the CallOperator->getTypeSourceInfo()
+ // we should get a prebuilt TrivialTypeSourceInfo from Context
+ // using FunctionTy & Loc and get its TypeLoc as a FunctionProtoTypeLoc
+ // then rewire the parameters accordingly, by hoisting up the InvokeParams
+ // loop below and then use its Params to set Invoke->setParams(...) below.
+ // This would avoid the 'const' qualifier of the calloperator from
+ // contaminating the type of the invoker, which is currently adjusted
+ // in SemaTemplateDeduction.cpp:DeduceTemplateArguments. Fixing the
+ // trailing return type of the invoker would require a visitor to rebuild
+ // the trailing return type and adjusting all back DeclRefExpr's to refer
+ // to the new static invoker parameters - not the call operator's.
+ CXXMethodDecl *Invoke
+ = CXXMethodDecl::Create(S.Context, Class, Loc,
+ DeclarationNameInfo(InvokerName, Loc),
+ InvokerFunctionTy,
+ CallOperator->getTypeSourceInfo(),
+ SC_Static, /*IsInline=*/true,
+ /*IsConstexpr=*/false,
+ CallOperator->getBody()->getLocEnd());
+ for (unsigned I = 0, N = CallOperator->getNumParams(); I != N; ++I)
+ InvokerParams[I]->setOwningFunction(Invoke);
+ Invoke->setParams(InvokerParams);
+ Invoke->setAccess(AS_private);
+ Invoke->setImplicit(true);
+ if (Class->isGenericLambda()) {
+ FunctionTemplateDecl *TemplateCallOperator =
+ CallOperator->getDescribedFunctionTemplate();
+ FunctionTemplateDecl *StaticInvokerTemplate = FunctionTemplateDecl::Create(
+ S.Context, Class, Loc, InvokerName,
+ TemplateCallOperator->getTemplateParameters(),
+ Invoke);
+ StaticInvokerTemplate->setAccess(AS_private);
+ StaticInvokerTemplate->setImplicit(true);
+ Invoke->setDescribedFunctionTemplate(StaticInvokerTemplate);
+ Class->addDecl(StaticInvokerTemplate);
+ } else
+ Class->addDecl(Invoke);
+}
+
+/// \brief Add a lambda's conversion to block pointer.
+static void addBlockPointerConversion(Sema &S,
+ SourceRange IntroducerRange,
+ CXXRecordDecl *Class,
+ CXXMethodDecl *CallOperator) {
+ const FunctionProtoType *Proto =
+ CallOperator->getType()->getAs<FunctionProtoType>();
+
+ // The function type inside the block pointer type is the same as the call
+ // operator with some tweaks. The calling convention is the default free
+ // function convention, and the type qualifications are lost.
+ FunctionProtoType::ExtProtoInfo BlockEPI = Proto->getExtProtoInfo();
+ BlockEPI.ExtInfo =
+ BlockEPI.ExtInfo.withCallingConv(S.Context.getDefaultCallingConvention(
+ Proto->isVariadic(), /*IsCXXMethod=*/false));
+ BlockEPI.TypeQuals = 0;
+ QualType FunctionTy = S.Context.getFunctionType(
+ Proto->getReturnType(), Proto->getParamTypes(), BlockEPI);
+ QualType BlockPtrTy = S.Context.getBlockPointerType(FunctionTy);
+
+ FunctionProtoType::ExtProtoInfo ConversionEPI(
+ S.Context.getDefaultCallingConvention(
+ /*IsVariadic=*/false, /*IsCXXMethod=*/true));
+ ConversionEPI.TypeQuals = Qualifiers::Const;
+ QualType ConvTy = S.Context.getFunctionType(BlockPtrTy, None, ConversionEPI);
+
+ SourceLocation Loc = IntroducerRange.getBegin();
+ DeclarationName Name
+ = S.Context.DeclarationNames.getCXXConversionFunctionName(
+ S.Context.getCanonicalType(BlockPtrTy));
+ DeclarationNameLoc NameLoc;
+ NameLoc.NamedType.TInfo = S.Context.getTrivialTypeSourceInfo(BlockPtrTy, Loc);
+ CXXConversionDecl *Conversion
+ = CXXConversionDecl::Create(S.Context, Class, Loc,
+ DeclarationNameInfo(Name, Loc, NameLoc),
+ ConvTy,
+ S.Context.getTrivialTypeSourceInfo(ConvTy, Loc),
+ /*isInline=*/true, /*isExplicit=*/false,
+ /*isConstexpr=*/false,
+ CallOperator->getBody()->getLocEnd());
+ Conversion->setAccess(AS_public);
+ Conversion->setImplicit(true);
+ Class->addDecl(Conversion);
+}
+
+static ExprResult performLambdaVarCaptureInitialization(
+ Sema &S, LambdaScopeInfo::Capture &Capture,
+ FieldDecl *Field,
+ SmallVectorImpl<VarDecl *> &ArrayIndexVars,
+ SmallVectorImpl<unsigned> &ArrayIndexStarts) {
+ assert(Capture.isVariableCapture() && "not a variable capture");
+
+ auto *Var = Capture.getVariable();
+ SourceLocation Loc = Capture.getLocation();
+
+ // C++11 [expr.prim.lambda]p21:
+ // When the lambda-expression is evaluated, the entities that
+ // are captured by copy are used to direct-initialize each
+ // corresponding non-static data member of the resulting closure
+ // object. (For array members, the array elements are
+ // direct-initialized in increasing subscript order.) These
+ // initializations are performed in the (unspecified) order in
+ // which the non-static data members are declared.
+
+ // C++ [expr.prim.lambda]p12:
+ // An entity captured by a lambda-expression is odr-used (3.2) in
+ // the scope containing the lambda-expression.
+ ExprResult RefResult = S.BuildDeclarationNameExpr(
+ CXXScopeSpec(), DeclarationNameInfo(Var->getDeclName(), Loc), Var);
+ if (RefResult.isInvalid())
+ return ExprError();
+ Expr *Ref = RefResult.get();
+
+ QualType FieldType = Field->getType();
+
+ // When the variable has array type, create index variables for each
+ // dimension of the array. We use these index variables to subscript
+ // the source array, and other clients (e.g., CodeGen) will perform
+ // the necessary iteration with these index variables.
+ //
+ // FIXME: This is dumb. Add a proper AST representation for array
+ // copy-construction and use it here.
+ SmallVector<VarDecl *, 4> IndexVariables;
+ QualType BaseType = FieldType;
+ QualType SizeType = S.Context.getSizeType();
+ ArrayIndexStarts.push_back(ArrayIndexVars.size());
+ while (const ConstantArrayType *Array
+ = S.Context.getAsConstantArrayType(BaseType)) {
+ // Create the iteration variable for this array index.
+ IdentifierInfo *IterationVarName = nullptr;
+ {
+ SmallString<8> Str;
+ llvm::raw_svector_ostream OS(Str);
+ OS << "__i" << IndexVariables.size();
+ IterationVarName = &S.Context.Idents.get(OS.str());
+ }
+ VarDecl *IterationVar = VarDecl::Create(
+ S.Context, S.CurContext, Loc, Loc, IterationVarName, SizeType,
+ S.Context.getTrivialTypeSourceInfo(SizeType, Loc), SC_None);
+ IterationVar->setImplicit();
+ IndexVariables.push_back(IterationVar);
+ ArrayIndexVars.push_back(IterationVar);
+
+ // Create a reference to the iteration variable.
+ ExprResult IterationVarRef =
+ S.BuildDeclRefExpr(IterationVar, SizeType, VK_LValue, Loc);
+ assert(!IterationVarRef.isInvalid() &&
+ "Reference to invented variable cannot fail!");
+ IterationVarRef = S.DefaultLvalueConversion(IterationVarRef.get());
+ assert(!IterationVarRef.isInvalid() &&
+ "Conversion of invented variable cannot fail!");
+
+ // Subscript the array with this iteration variable.
+ ExprResult Subscript =
+ S.CreateBuiltinArraySubscriptExpr(Ref, Loc, IterationVarRef.get(), Loc);
+ if (Subscript.isInvalid())
+ return ExprError();
+
+ Ref = Subscript.get();
+ BaseType = Array->getElementType();
+ }
+
+ // Construct the entity that we will be initializing. For an array, this
+ // will be first element in the array, which may require several levels
+ // of array-subscript entities.
+ SmallVector<InitializedEntity, 4> Entities;
+ Entities.reserve(1 + IndexVariables.size());
+ Entities.push_back(InitializedEntity::InitializeLambdaCapture(
+ Var->getIdentifier(), FieldType, Loc));
+ for (unsigned I = 0, N = IndexVariables.size(); I != N; ++I)
+ Entities.push_back(
+ InitializedEntity::InitializeElement(S.Context, 0, Entities.back()));
+
+ InitializationKind InitKind = InitializationKind::CreateDirect(Loc, Loc, Loc);
+ InitializationSequence Init(S, Entities.back(), InitKind, Ref);
+ return Init.Perform(S, Entities.back(), InitKind, Ref);
+}
+
+ExprResult Sema::ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
+ Scope *CurScope) {
+ LambdaScopeInfo LSI = *cast<LambdaScopeInfo>(FunctionScopes.back());
+ ActOnFinishFunctionBody(LSI.CallOperator, Body);
+ return BuildLambdaExpr(StartLoc, Body->getLocEnd(), &LSI);
+}
+
+static LambdaCaptureDefault
+mapImplicitCaptureStyle(CapturingScopeInfo::ImplicitCaptureStyle ICS) {
+ switch (ICS) {
+ case CapturingScopeInfo::ImpCap_None:
+ return LCD_None;
+ case CapturingScopeInfo::ImpCap_LambdaByval:
+ return LCD_ByCopy;
+ case CapturingScopeInfo::ImpCap_CapturedRegion:
+ case CapturingScopeInfo::ImpCap_LambdaByref:
+ return LCD_ByRef;
+ case CapturingScopeInfo::ImpCap_Block:
+ llvm_unreachable("block capture in lambda");
+ }
+ llvm_unreachable("Unknown implicit capture style");
+}
+
+ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
+ LambdaScopeInfo *LSI) {
+ // Collect information from the lambda scope.
+ SmallVector<LambdaCapture, 4> Captures;
+ SmallVector<Expr *, 4> CaptureInits;
+ SourceLocation CaptureDefaultLoc = LSI->CaptureDefaultLoc;
+ LambdaCaptureDefault CaptureDefault =
+ mapImplicitCaptureStyle(LSI->ImpCaptureStyle);
+ CXXRecordDecl *Class;
+ CXXMethodDecl *CallOperator;
+ SourceRange IntroducerRange;
+ bool ExplicitParams;
+ bool ExplicitResultType;
+ bool LambdaExprNeedsCleanups;
+ bool ContainsUnexpandedParameterPack;
+ SmallVector<VarDecl *, 4> ArrayIndexVars;
+ SmallVector<unsigned, 4> ArrayIndexStarts;
+ {
+ CallOperator = LSI->CallOperator;
+ Class = LSI->Lambda;
+ IntroducerRange = LSI->IntroducerRange;
+ ExplicitParams = LSI->ExplicitParams;
+ ExplicitResultType = !LSI->HasImplicitReturnType;
+ LambdaExprNeedsCleanups = LSI->ExprNeedsCleanups;
+ ContainsUnexpandedParameterPack = LSI->ContainsUnexpandedParameterPack;
+
+ CallOperator->setLexicalDeclContext(Class);
+ Decl *TemplateOrNonTemplateCallOperatorDecl =
+ CallOperator->getDescribedFunctionTemplate()
+ ? CallOperator->getDescribedFunctionTemplate()
+ : cast<Decl>(CallOperator);
+
+ TemplateOrNonTemplateCallOperatorDecl->setLexicalDeclContext(Class);
+ Class->addDecl(TemplateOrNonTemplateCallOperatorDecl);
+
+ PopExpressionEvaluationContext();
+
+ // Translate captures.
+ auto CurField = Class->field_begin();
+ for (unsigned I = 0, N = LSI->Captures.size(); I != N; ++I, ++CurField) {
+ LambdaScopeInfo::Capture From = LSI->Captures[I];
+ assert(!From.isBlockCapture() && "Cannot capture __block variables");
+ bool IsImplicit = I >= LSI->NumExplicitCaptures;
+
+ // Handle 'this' capture.
+ if (From.isThisCapture()) {
+ Captures.push_back(
+ LambdaCapture(From.getLocation(), IsImplicit, LCK_This));
+ CaptureInits.push_back(new (Context) CXXThisExpr(From.getLocation(),
+ getCurrentThisType(),
+ /*isImplicit=*/true));
+ ArrayIndexStarts.push_back(ArrayIndexVars.size());
+ continue;
+ }
+ if (From.isVLATypeCapture()) {
+ Captures.push_back(
+ LambdaCapture(From.getLocation(), IsImplicit, LCK_VLAType));
+ CaptureInits.push_back(nullptr);
+ ArrayIndexStarts.push_back(ArrayIndexVars.size());
+ continue;
+ }
+
+ VarDecl *Var = From.getVariable();
+ LambdaCaptureKind Kind = From.isCopyCapture() ? LCK_ByCopy : LCK_ByRef;
+ Captures.push_back(LambdaCapture(From.getLocation(), IsImplicit, Kind,
+ Var, From.getEllipsisLoc()));
+ Expr *Init = From.getInitExpr();
+ if (!Init) {
+ auto InitResult = performLambdaVarCaptureInitialization(
+ *this, From, *CurField, ArrayIndexVars, ArrayIndexStarts);
+ if (InitResult.isInvalid())
+ return ExprError();
+ Init = InitResult.get();
+ } else {
+ ArrayIndexStarts.push_back(ArrayIndexVars.size());
+ }
+ CaptureInits.push_back(Init);
+ }
+
+ // C++11 [expr.prim.lambda]p6:
+ // The closure type for a lambda-expression with no lambda-capture
+ // has a public non-virtual non-explicit const conversion function
+ // to pointer to function having the same parameter and return
+ // types as the closure type's function call operator.
+ if (Captures.empty() && CaptureDefault == LCD_None)
+ addFunctionPointerConversion(*this, IntroducerRange, Class,
+ CallOperator);
+
+ // Objective-C++:
+ // The closure type for a lambda-expression has a public non-virtual
+ // non-explicit const conversion function to a block pointer having the
+ // same parameter and return types as the closure type's function call
+ // operator.
+ // FIXME: Fix generic lambda to block conversions.
+ if (getLangOpts().Blocks && getLangOpts().ObjC1 &&
+ !Class->isGenericLambda())
+ addBlockPointerConversion(*this, IntroducerRange, Class, CallOperator);
+
+ // Finalize the lambda class.
+ SmallVector<Decl*, 4> Fields(Class->fields());
+ ActOnFields(nullptr, Class->getLocation(), Class, Fields, SourceLocation(),
+ SourceLocation(), nullptr);
+ CheckCompletedCXXClass(Class);
+ }
+
+ if (LambdaExprNeedsCleanups)
+ ExprNeedsCleanups = true;
+
+ LambdaExpr *Lambda = LambdaExpr::Create(Context, Class, IntroducerRange,
+ CaptureDefault, CaptureDefaultLoc,
+ Captures,
+ ExplicitParams, ExplicitResultType,
+ CaptureInits, ArrayIndexVars,
+ ArrayIndexStarts, EndLoc,
+ ContainsUnexpandedParameterPack);
+
+ if (!CurContext->isDependentContext()) {
+ switch (ExprEvalContexts.back().Context) {
+ // C++11 [expr.prim.lambda]p2:
+ // A lambda-expression shall not appear in an unevaluated operand
+ // (Clause 5).
+ case Unevaluated:
+ case UnevaluatedAbstract:
+ // C++1y [expr.const]p2:
+ // A conditional-expression e is a core constant expression unless the
+ // evaluation of e, following the rules of the abstract machine, would
+ // evaluate [...] a lambda-expression.
+ //
+ // This is technically incorrect, there are some constant evaluated contexts
+ // where this should be allowed. We should probably fix this when DR1607 is
+ // ratified, it lays out the exact set of conditions where we shouldn't
+ // allow a lambda-expression.
+ case ConstantEvaluated:
+ // We don't actually diagnose this case immediately, because we
+ // could be within a context where we might find out later that
+ // the expression is potentially evaluated (e.g., for typeid).
+ ExprEvalContexts.back().Lambdas.push_back(Lambda);
+ break;
+
+ case PotentiallyEvaluated:
+ case PotentiallyEvaluatedIfUsed:
+ break;
+ }
+ }
+
+ return MaybeBindToTemporary(Lambda);
+}
+
+ExprResult Sema::BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
+ SourceLocation ConvLocation,
+ CXXConversionDecl *Conv,
+ Expr *Src) {
+ // Make sure that the lambda call operator is marked used.
+ CXXRecordDecl *Lambda = Conv->getParent();
+ CXXMethodDecl *CallOperator
+ = cast<CXXMethodDecl>(
+ Lambda->lookup(
+ Context.DeclarationNames.getCXXOperatorName(OO_Call)).front());
+ CallOperator->setReferenced();
+ CallOperator->markUsed(Context);
+
+ ExprResult Init = PerformCopyInitialization(
+ InitializedEntity::InitializeBlock(ConvLocation,
+ Src->getType(),
+ /*NRVO=*/false),
+ CurrentLocation, Src);
+ if (!Init.isInvalid())
+ Init = ActOnFinishFullExpr(Init.get());
+
+ if (Init.isInvalid())
+ return ExprError();
+
+ // Create the new block to be returned.
+ BlockDecl *Block = BlockDecl::Create(Context, CurContext, ConvLocation);
+
+ // Set the type information.
+ Block->setSignatureAsWritten(CallOperator->getTypeSourceInfo());
+ Block->setIsVariadic(CallOperator->isVariadic());
+ Block->setBlockMissingReturnType(false);
+
+ // Add parameters.
+ SmallVector<ParmVarDecl *, 4> BlockParams;
+ for (unsigned I = 0, N = CallOperator->getNumParams(); I != N; ++I) {
+ ParmVarDecl *From = CallOperator->getParamDecl(I);
+ BlockParams.push_back(ParmVarDecl::Create(Context, Block,
+ From->getLocStart(),
+ From->getLocation(),
+ From->getIdentifier(),
+ From->getType(),
+ From->getTypeSourceInfo(),
+ From->getStorageClass(),
+ /*DefaultArg=*/nullptr));
+ }
+ Block->setParams(BlockParams);
+
+ Block->setIsConversionFromLambda(true);
+
+ // Add capture. The capture uses a fake variable, which doesn't correspond
+ // to any actual memory location. However, the initializer copy-initializes
+ // the lambda object.
+ TypeSourceInfo *CapVarTSI =
+ Context.getTrivialTypeSourceInfo(Src->getType());
+ VarDecl *CapVar = VarDecl::Create(Context, Block, ConvLocation,
+ ConvLocation, nullptr,
+ Src->getType(), CapVarTSI,
+ SC_None);
+ BlockDecl::Capture Capture(/*Variable=*/CapVar, /*ByRef=*/false,
+ /*Nested=*/false, /*Copy=*/Init.get());
+ Block->setCaptures(Context, Capture, /*CapturesCXXThis=*/false);
+
+ // Add a fake function body to the block. IR generation is responsible
+ // for filling in the actual body, which cannot be expressed as an AST.
+ Block->setBody(new (Context) CompoundStmt(ConvLocation));
+
+ // Create the block literal expression.
+ Expr *BuildBlock = new (Context) BlockExpr(Block, Conv->getConversionType());
+ ExprCleanupObjects.push_back(Block);
+ ExprNeedsCleanups = true;
+
+ return BuildBlock;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp
new file mode 100644
index 0000000..481ae6c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp
@@ -0,0 +1,4990 @@
+//===--------------------- SemaLookup.cpp - Name Lookup ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements name lookup for C, C++, Objective-C, and
+// Objective-C++.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/Lookup.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclLookups.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/ModuleLoader.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/ExternalSemaSource.h"
+#include "clang/Sema/Overload.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/TemplateDeduction.h"
+#include "clang/Sema/TypoCorrection.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/TinyPtrVector.h"
+#include "llvm/ADT/edit_distance.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+#include <iterator>
+#include <limits>
+#include <list>
+#include <map>
+#include <set>
+#include <utility>
+#include <vector>
+
+using namespace clang;
+using namespace sema;
+
+namespace {
+ class UnqualUsingEntry {
+ const DeclContext *Nominated;
+ const DeclContext *CommonAncestor;
+
+ public:
+ UnqualUsingEntry(const DeclContext *Nominated,
+ const DeclContext *CommonAncestor)
+ : Nominated(Nominated), CommonAncestor(CommonAncestor) {
+ }
+
+ const DeclContext *getCommonAncestor() const {
+ return CommonAncestor;
+ }
+
+ const DeclContext *getNominatedNamespace() const {
+ return Nominated;
+ }
+
+ // Sort by the pointer value of the common ancestor.
+ struct Comparator {
+ bool operator()(const UnqualUsingEntry &L, const UnqualUsingEntry &R) {
+ return L.getCommonAncestor() < R.getCommonAncestor();
+ }
+
+ bool operator()(const UnqualUsingEntry &E, const DeclContext *DC) {
+ return E.getCommonAncestor() < DC;
+ }
+
+ bool operator()(const DeclContext *DC, const UnqualUsingEntry &E) {
+ return DC < E.getCommonAncestor();
+ }
+ };
+ };
+
+ /// A collection of using directives, as used by C++ unqualified
+ /// lookup.
+ class UnqualUsingDirectiveSet {
+ typedef SmallVector<UnqualUsingEntry, 8> ListTy;
+
+ ListTy list;
+ llvm::SmallPtrSet<DeclContext*, 8> visited;
+
+ public:
+ UnqualUsingDirectiveSet() {}
+
+ void visitScopeChain(Scope *S, Scope *InnermostFileScope) {
+ // C++ [namespace.udir]p1:
+ // During unqualified name lookup, the names appear as if they
+ // were declared in the nearest enclosing namespace which contains
+ // both the using-directive and the nominated namespace.
+ DeclContext *InnermostFileDC = InnermostFileScope->getEntity();
+ assert(InnermostFileDC && InnermostFileDC->isFileContext());
+
+ for (; S; S = S->getParent()) {
+ // C++ [namespace.udir]p1:
+ // A using-directive shall not appear in class scope, but may
+ // appear in namespace scope or in block scope.
+ DeclContext *Ctx = S->getEntity();
+ if (Ctx && Ctx->isFileContext()) {
+ visit(Ctx, Ctx);
+ } else if (!Ctx || Ctx->isFunctionOrMethod()) {
+ for (auto *I : S->using_directives())
+ visit(I, InnermostFileDC);
+ }
+ }
+ }
+
+ // Visits a context and collect all of its using directives
+ // recursively. Treats all using directives as if they were
+ // declared in the context.
+ //
+ // A given context is only every visited once, so it is important
+ // that contexts be visited from the inside out in order to get
+ // the effective DCs right.
+ void visit(DeclContext *DC, DeclContext *EffectiveDC) {
+ if (!visited.insert(DC).second)
+ return;
+
+ addUsingDirectives(DC, EffectiveDC);
+ }
+
+ // Visits a using directive and collects all of its using
+ // directives recursively. Treats all using directives as if they
+ // were declared in the effective DC.
+ void visit(UsingDirectiveDecl *UD, DeclContext *EffectiveDC) {
+ DeclContext *NS = UD->getNominatedNamespace();
+ if (!visited.insert(NS).second)
+ return;
+
+ addUsingDirective(UD, EffectiveDC);
+ addUsingDirectives(NS, EffectiveDC);
+ }
+
+ // Adds all the using directives in a context (and those nominated
+ // by its using directives, transitively) as if they appeared in
+ // the given effective context.
+ void addUsingDirectives(DeclContext *DC, DeclContext *EffectiveDC) {
+ SmallVector<DeclContext*, 4> queue;
+ while (true) {
+ for (auto UD : DC->using_directives()) {
+ DeclContext *NS = UD->getNominatedNamespace();
+ if (visited.insert(NS).second) {
+ addUsingDirective(UD, EffectiveDC);
+ queue.push_back(NS);
+ }
+ }
+
+ if (queue.empty())
+ return;
+
+ DC = queue.pop_back_val();
+ }
+ }
+
+ // Add a using directive as if it had been declared in the given
+ // context. This helps implement C++ [namespace.udir]p3:
+ // The using-directive is transitive: if a scope contains a
+ // using-directive that nominates a second namespace that itself
+ // contains using-directives, the effect is as if the
+ // using-directives from the second namespace also appeared in
+ // the first.
+ void addUsingDirective(UsingDirectiveDecl *UD, DeclContext *EffectiveDC) {
+ // Find the common ancestor between the effective context and
+ // the nominated namespace.
+ DeclContext *Common = UD->getNominatedNamespace();
+ while (!Common->Encloses(EffectiveDC))
+ Common = Common->getParent();
+ Common = Common->getPrimaryContext();
+
+ list.push_back(UnqualUsingEntry(UD->getNominatedNamespace(), Common));
+ }
+
+ void done() {
+ std::sort(list.begin(), list.end(), UnqualUsingEntry::Comparator());
+ }
+
+ typedef ListTy::const_iterator const_iterator;
+
+ const_iterator begin() const { return list.begin(); }
+ const_iterator end() const { return list.end(); }
+
+ llvm::iterator_range<const_iterator>
+ getNamespacesFor(DeclContext *DC) const {
+ return llvm::make_range(std::equal_range(begin(), end(),
+ DC->getPrimaryContext(),
+ UnqualUsingEntry::Comparator()));
+ }
+ };
+} // end anonymous namespace
+
+// Retrieve the set of identifier namespaces that correspond to a
+// specific kind of name lookup.
+static inline unsigned getIDNS(Sema::LookupNameKind NameKind,
+ bool CPlusPlus,
+ bool Redeclaration) {
+ unsigned IDNS = 0;
+ switch (NameKind) {
+ case Sema::LookupObjCImplicitSelfParam:
+ case Sema::LookupOrdinaryName:
+ case Sema::LookupRedeclarationWithLinkage:
+ case Sema::LookupLocalFriendName:
+ IDNS = Decl::IDNS_Ordinary;
+ if (CPlusPlus) {
+ IDNS |= Decl::IDNS_Tag | Decl::IDNS_Member | Decl::IDNS_Namespace;
+ if (Redeclaration)
+ IDNS |= Decl::IDNS_TagFriend | Decl::IDNS_OrdinaryFriend;
+ }
+ if (Redeclaration)
+ IDNS |= Decl::IDNS_LocalExtern;
+ break;
+
+ case Sema::LookupOperatorName:
+ // Operator lookup is its own crazy thing; it is not the same
+ // as (e.g.) looking up an operator name for redeclaration.
+ assert(!Redeclaration && "cannot do redeclaration operator lookup");
+ IDNS = Decl::IDNS_NonMemberOperator;
+ break;
+
+ case Sema::LookupTagName:
+ if (CPlusPlus) {
+ IDNS = Decl::IDNS_Type;
+
+ // When looking for a redeclaration of a tag name, we add:
+ // 1) TagFriend to find undeclared friend decls
+ // 2) Namespace because they can't "overload" with tag decls.
+ // 3) Tag because it includes class templates, which can't
+ // "overload" with tag decls.
+ if (Redeclaration)
+ IDNS |= Decl::IDNS_Tag | Decl::IDNS_TagFriend | Decl::IDNS_Namespace;
+ } else {
+ IDNS = Decl::IDNS_Tag;
+ }
+ break;
+
+ case Sema::LookupLabel:
+ IDNS = Decl::IDNS_Label;
+ break;
+
+ case Sema::LookupMemberName:
+ IDNS = Decl::IDNS_Member;
+ if (CPlusPlus)
+ IDNS |= Decl::IDNS_Tag | Decl::IDNS_Ordinary;
+ break;
+
+ case Sema::LookupNestedNameSpecifierName:
+ IDNS = Decl::IDNS_Type | Decl::IDNS_Namespace;
+ break;
+
+ case Sema::LookupNamespaceName:
+ IDNS = Decl::IDNS_Namespace;
+ break;
+
+ case Sema::LookupUsingDeclName:
+ assert(Redeclaration && "should only be used for redecl lookup");
+ IDNS = Decl::IDNS_Ordinary | Decl::IDNS_Tag | Decl::IDNS_Member |
+ Decl::IDNS_Using | Decl::IDNS_TagFriend | Decl::IDNS_OrdinaryFriend |
+ Decl::IDNS_LocalExtern;
+ break;
+
+ case Sema::LookupObjCProtocolName:
+ IDNS = Decl::IDNS_ObjCProtocol;
+ break;
+
+ case Sema::LookupAnyName:
+ IDNS = Decl::IDNS_Ordinary | Decl::IDNS_Tag | Decl::IDNS_Member
+ | Decl::IDNS_Using | Decl::IDNS_Namespace | Decl::IDNS_ObjCProtocol
+ | Decl::IDNS_Type;
+ break;
+ }
+ return IDNS;
+}
+
+void LookupResult::configure() {
+ IDNS = getIDNS(LookupKind, getSema().getLangOpts().CPlusPlus,
+ isForRedeclaration());
+
+ // If we're looking for one of the allocation or deallocation
+ // operators, make sure that the implicitly-declared new and delete
+ // operators can be found.
+ switch (NameInfo.getName().getCXXOverloadedOperator()) {
+ case OO_New:
+ case OO_Delete:
+ case OO_Array_New:
+ case OO_Array_Delete:
+ getSema().DeclareGlobalNewDelete();
+ break;
+
+ default:
+ break;
+ }
+
+ // Compiler builtins are always visible, regardless of where they end
+ // up being declared.
+ if (IdentifierInfo *Id = NameInfo.getName().getAsIdentifierInfo()) {
+ if (unsigned BuiltinID = Id->getBuiltinID()) {
+ if (!getSema().Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))
+ AllowHidden = true;
+ }
+ }
+}
+
+bool LookupResult::sanity() const {
+ // This function is never called by NDEBUG builds.
+ assert(ResultKind != NotFound || Decls.size() == 0);
+ assert(ResultKind != Found || Decls.size() == 1);
+ assert(ResultKind != FoundOverloaded || Decls.size() > 1 ||
+ (Decls.size() == 1 &&
+ isa<FunctionTemplateDecl>((*begin())->getUnderlyingDecl())));
+ assert(ResultKind != FoundUnresolvedValue || sanityCheckUnresolved());
+ assert(ResultKind != Ambiguous || Decls.size() > 1 ||
+ (Decls.size() == 1 && (Ambiguity == AmbiguousBaseSubobjects ||
+ Ambiguity == AmbiguousBaseSubobjectTypes)));
+ assert((Paths != nullptr) == (ResultKind == Ambiguous &&
+ (Ambiguity == AmbiguousBaseSubobjectTypes ||
+ Ambiguity == AmbiguousBaseSubobjects)));
+ return true;
+}
+
+// Necessary because CXXBasePaths is not complete in Sema.h
+void LookupResult::deletePaths(CXXBasePaths *Paths) {
+ delete Paths;
+}
+
+/// Get a representative context for a declaration such that two declarations
+/// will have the same context if they were found within the same scope.
+static DeclContext *getContextForScopeMatching(Decl *D) {
+ // For function-local declarations, use that function as the context. This
+ // doesn't account for scopes within the function; the caller must deal with
+ // those.
+ DeclContext *DC = D->getLexicalDeclContext();
+ if (DC->isFunctionOrMethod())
+ return DC;
+
+ // Otherwise, look at the semantic context of the declaration. The
+ // declaration must have been found there.
+ return D->getDeclContext()->getRedeclContext();
+}
+
+/// \brief Determine whether \p D is a better lookup result than \p Existing,
+/// given that they declare the same entity.
+static bool isPreferredLookupResult(Sema &S, Sema::LookupNameKind Kind,
+ NamedDecl *D, NamedDecl *Existing) {
+ // When looking up redeclarations of a using declaration, prefer a using
+ // shadow declaration over any other declaration of the same entity.
+ if (Kind == Sema::LookupUsingDeclName && isa<UsingShadowDecl>(D) &&
+ !isa<UsingShadowDecl>(Existing))
+ return true;
+
+ auto *DUnderlying = D->getUnderlyingDecl();
+ auto *EUnderlying = Existing->getUnderlyingDecl();
+
+ // If they have different underlying declarations, prefer a typedef over the
+ // original type (this happens when two type declarations denote the same
+ // type), per a generous reading of C++ [dcl.typedef]p3 and p4. The typedef
+ // might carry additional semantic information, such as an alignment override.
+ // However, per C++ [dcl.typedef]p5, when looking up a tag name, prefer a tag
+ // declaration over a typedef.
+ if (DUnderlying->getCanonicalDecl() != EUnderlying->getCanonicalDecl()) {
+ assert(isa<TypeDecl>(DUnderlying) && isa<TypeDecl>(EUnderlying));
+ bool HaveTag = isa<TagDecl>(EUnderlying);
+ bool WantTag = Kind == Sema::LookupTagName;
+ return HaveTag != WantTag;
+ }
+
+ // Pick the function with more default arguments.
+ // FIXME: In the presence of ambiguous default arguments, we should keep both,
+ // so we can diagnose the ambiguity if the default argument is needed.
+ // See C++ [over.match.best]p3.
+ if (auto *DFD = dyn_cast<FunctionDecl>(DUnderlying)) {
+ auto *EFD = cast<FunctionDecl>(EUnderlying);
+ unsigned DMin = DFD->getMinRequiredArguments();
+ unsigned EMin = EFD->getMinRequiredArguments();
+ // If D has more default arguments, it is preferred.
+ if (DMin != EMin)
+ return DMin < EMin;
+ // FIXME: When we track visibility for default function arguments, check
+ // that we pick the declaration with more visible default arguments.
+ }
+
+ // Pick the template with more default template arguments.
+ if (auto *DTD = dyn_cast<TemplateDecl>(DUnderlying)) {
+ auto *ETD = cast<TemplateDecl>(EUnderlying);
+ unsigned DMin = DTD->getTemplateParameters()->getMinRequiredArguments();
+ unsigned EMin = ETD->getTemplateParameters()->getMinRequiredArguments();
+ // If D has more default arguments, it is preferred. Note that default
+ // arguments (and their visibility) is monotonically increasing across the
+ // redeclaration chain, so this is a quick proxy for "is more recent".
+ if (DMin != EMin)
+ return DMin < EMin;
+ // If D has more *visible* default arguments, it is preferred. Note, an
+ // earlier default argument being visible does not imply that a later
+ // default argument is visible, so we can't just check the first one.
+ for (unsigned I = DMin, N = DTD->getTemplateParameters()->size();
+ I != N; ++I) {
+ if (!S.hasVisibleDefaultArgument(
+ ETD->getTemplateParameters()->getParam(I)) &&
+ S.hasVisibleDefaultArgument(
+ DTD->getTemplateParameters()->getParam(I)))
+ return true;
+ }
+ }
+
+ // For most kinds of declaration, it doesn't really matter which one we pick.
+ if (!isa<FunctionDecl>(DUnderlying) && !isa<VarDecl>(DUnderlying)) {
+ // If the existing declaration is hidden, prefer the new one. Otherwise,
+ // keep what we've got.
+ return !S.isVisible(Existing);
+ }
+
+ // Pick the newer declaration; it might have a more precise type.
+ for (Decl *Prev = DUnderlying->getPreviousDecl(); Prev;
+ Prev = Prev->getPreviousDecl())
+ if (Prev == EUnderlying)
+ return true;
+ return false;
+
+ // If the existing declaration is hidden, prefer the new one. Otherwise,
+ // keep what we've got.
+ return !S.isVisible(Existing);
+}
+
+/// Determine whether \p D can hide a tag declaration.
+static bool canHideTag(NamedDecl *D) {
+ // C++ [basic.scope.declarative]p4:
+ // Given a set of declarations in a single declarative region [...]
+ // exactly one declaration shall declare a class name or enumeration name
+ // that is not a typedef name and the other declarations shall all refer to
+ // the same variable or enumerator, or all refer to functions and function
+ // templates; in this case the class name or enumeration name is hidden.
+ // C++ [basic.scope.hiding]p2:
+ // A class name or enumeration name can be hidden by the name of a
+ // variable, data member, function, or enumerator declared in the same
+ // scope.
+ D = D->getUnderlyingDecl();
+ return isa<VarDecl>(D) || isa<EnumConstantDecl>(D) || isa<FunctionDecl>(D) ||
+ isa<FunctionTemplateDecl>(D) || isa<FieldDecl>(D);
+}
+
+/// Resolves the result kind of this lookup.
+void LookupResult::resolveKind() {
+ unsigned N = Decls.size();
+
+ // Fast case: no possible ambiguity.
+ if (N == 0) {
+ assert(ResultKind == NotFound ||
+ ResultKind == NotFoundInCurrentInstantiation);
+ return;
+ }
+
+ // If there's a single decl, we need to examine it to decide what
+ // kind of lookup this is.
+ if (N == 1) {
+ NamedDecl *D = (*Decls.begin())->getUnderlyingDecl();
+ if (isa<FunctionTemplateDecl>(D))
+ ResultKind = FoundOverloaded;
+ else if (isa<UnresolvedUsingValueDecl>(D))
+ ResultKind = FoundUnresolvedValue;
+ return;
+ }
+
+ // Don't do any extra resolution if we've already resolved as ambiguous.
+ if (ResultKind == Ambiguous) return;
+
+ llvm::SmallDenseMap<NamedDecl*, unsigned, 16> Unique;
+ llvm::SmallDenseMap<QualType, unsigned, 16> UniqueTypes;
+
+ bool Ambiguous = false;
+ bool HasTag = false, HasFunction = false;
+ bool HasFunctionTemplate = false, HasUnresolved = false;
+ NamedDecl *HasNonFunction = nullptr;
+
+ llvm::SmallVector<NamedDecl*, 4> EquivalentNonFunctions;
+
+ unsigned UniqueTagIndex = 0;
+
+ unsigned I = 0;
+ while (I < N) {
+ NamedDecl *D = Decls[I]->getUnderlyingDecl();
+ D = cast<NamedDecl>(D->getCanonicalDecl());
+
+ // Ignore an invalid declaration unless it's the only one left.
+ if (D->isInvalidDecl() && !(I == 0 && N == 1)) {
+ Decls[I] = Decls[--N];
+ continue;
+ }
+
+ llvm::Optional<unsigned> ExistingI;
+
+ // Redeclarations of types via typedef can occur both within a scope
+ // and, through using declarations and directives, across scopes. There is
+ // no ambiguity if they all refer to the same type, so unique based on the
+ // canonical type.
+ if (TypeDecl *TD = dyn_cast<TypeDecl>(D)) {
+ QualType T = getSema().Context.getTypeDeclType(TD);
+ auto UniqueResult = UniqueTypes.insert(
+ std::make_pair(getSema().Context.getCanonicalType(T), I));
+ if (!UniqueResult.second) {
+ // The type is not unique.
+ ExistingI = UniqueResult.first->second;
+ }
+ }
+
+ // For non-type declarations, check for a prior lookup result naming this
+ // canonical declaration.
+ if (!ExistingI) {
+ auto UniqueResult = Unique.insert(std::make_pair(D, I));
+ if (!UniqueResult.second) {
+ // We've seen this entity before.
+ ExistingI = UniqueResult.first->second;
+ }
+ }
+
+ if (ExistingI) {
+ // This is not a unique lookup result. Pick one of the results and
+ // discard the other.
+ if (isPreferredLookupResult(getSema(), getLookupKind(), Decls[I],
+ Decls[*ExistingI]))
+ Decls[*ExistingI] = Decls[I];
+ Decls[I] = Decls[--N];
+ continue;
+ }
+
+ // Otherwise, do some decl type analysis and then continue.
+
+ if (isa<UnresolvedUsingValueDecl>(D)) {
+ HasUnresolved = true;
+ } else if (isa<TagDecl>(D)) {
+ if (HasTag)
+ Ambiguous = true;
+ UniqueTagIndex = I;
+ HasTag = true;
+ } else if (isa<FunctionTemplateDecl>(D)) {
+ HasFunction = true;
+ HasFunctionTemplate = true;
+ } else if (isa<FunctionDecl>(D)) {
+ HasFunction = true;
+ } else {
+ if (HasNonFunction) {
+ // If we're about to create an ambiguity between two declarations that
+ // are equivalent, but one is an internal linkage declaration from one
+ // module and the other is an internal linkage declaration from another
+ // module, just skip it.
+ if (getSema().isEquivalentInternalLinkageDeclaration(HasNonFunction,
+ D)) {
+ EquivalentNonFunctions.push_back(D);
+ Decls[I] = Decls[--N];
+ continue;
+ }
+
+ Ambiguous = true;
+ }
+ HasNonFunction = D;
+ }
+ I++;
+ }
+
+ // C++ [basic.scope.hiding]p2:
+ // A class name or enumeration name can be hidden by the name of
+ // an object, function, or enumerator declared in the same
+ // scope. If a class or enumeration name and an object, function,
+ // or enumerator are declared in the same scope (in any order)
+ // with the same name, the class or enumeration name is hidden
+ // wherever the object, function, or enumerator name is visible.
+ // But it's still an error if there are distinct tag types found,
+ // even if they're not visible. (ref?)
+ if (N > 1 && HideTags && HasTag && !Ambiguous &&
+ (HasFunction || HasNonFunction || HasUnresolved)) {
+ NamedDecl *OtherDecl = Decls[UniqueTagIndex ? 0 : N - 1];
+ if (isa<TagDecl>(Decls[UniqueTagIndex]->getUnderlyingDecl()) &&
+ getContextForScopeMatching(Decls[UniqueTagIndex])->Equals(
+ getContextForScopeMatching(OtherDecl)) &&
+ canHideTag(OtherDecl))
+ Decls[UniqueTagIndex] = Decls[--N];
+ else
+ Ambiguous = true;
+ }
+
+ // FIXME: This diagnostic should really be delayed until we're done with
+ // the lookup result, in case the ambiguity is resolved by the caller.
+ if (!EquivalentNonFunctions.empty() && !Ambiguous)
+ getSema().diagnoseEquivalentInternalLinkageDeclarations(
+ getNameLoc(), HasNonFunction, EquivalentNonFunctions);
+
+ Decls.set_size(N);
+
+ if (HasNonFunction && (HasFunction || HasUnresolved))
+ Ambiguous = true;
+
+ if (Ambiguous)
+ setAmbiguous(LookupResult::AmbiguousReference);
+ else if (HasUnresolved)
+ ResultKind = LookupResult::FoundUnresolvedValue;
+ else if (N > 1 || HasFunctionTemplate)
+ ResultKind = LookupResult::FoundOverloaded;
+ else
+ ResultKind = LookupResult::Found;
+}
+
+void LookupResult::addDeclsFromBasePaths(const CXXBasePaths &P) {
+ CXXBasePaths::const_paths_iterator I, E;
+ for (I = P.begin(), E = P.end(); I != E; ++I)
+ for (DeclContext::lookup_iterator DI = I->Decls.begin(),
+ DE = I->Decls.end(); DI != DE; ++DI)
+ addDecl(*DI);
+}
+
+void LookupResult::setAmbiguousBaseSubobjects(CXXBasePaths &P) {
+ Paths = new CXXBasePaths;
+ Paths->swap(P);
+ addDeclsFromBasePaths(*Paths);
+ resolveKind();
+ setAmbiguous(AmbiguousBaseSubobjects);
+}
+
+void LookupResult::setAmbiguousBaseSubobjectTypes(CXXBasePaths &P) {
+ Paths = new CXXBasePaths;
+ Paths->swap(P);
+ addDeclsFromBasePaths(*Paths);
+ resolveKind();
+ setAmbiguous(AmbiguousBaseSubobjectTypes);
+}
+
+void LookupResult::print(raw_ostream &Out) {
+ Out << Decls.size() << " result(s)";
+ if (isAmbiguous()) Out << ", ambiguous";
+ if (Paths) Out << ", base paths present";
+
+ for (iterator I = begin(), E = end(); I != E; ++I) {
+ Out << "\n";
+ (*I)->print(Out, 2);
+ }
+}
+
+/// \brief Lookup a builtin function, when name lookup would otherwise
+/// fail.
+static bool LookupBuiltin(Sema &S, LookupResult &R) {
+ Sema::LookupNameKind NameKind = R.getLookupKind();
+
+ // If we didn't find a use of this identifier, and if the identifier
+ // corresponds to a compiler builtin, create the decl object for the builtin
+ // now, injecting it into translation unit scope, and return it.
+ if (NameKind == Sema::LookupOrdinaryName ||
+ NameKind == Sema::LookupRedeclarationWithLinkage) {
+ IdentifierInfo *II = R.getLookupName().getAsIdentifierInfo();
+ if (II) {
+ if (S.getLangOpts().CPlusPlus11 && S.getLangOpts().GNUMode &&
+ II == S.getFloat128Identifier()) {
+ // libstdc++4.7's type_traits expects type __float128 to exist, so
+ // insert a dummy type to make that header build in gnu++11 mode.
+ R.addDecl(S.getASTContext().getFloat128StubType());
+ return true;
+ }
+ if (S.getLangOpts().CPlusPlus && NameKind == Sema::LookupOrdinaryName &&
+ II == S.getASTContext().getMakeIntegerSeqName()) {
+ R.addDecl(S.getASTContext().getMakeIntegerSeqDecl());
+ return true;
+ }
+
+ // If this is a builtin on this (or all) targets, create the decl.
+ if (unsigned BuiltinID = II->getBuiltinID()) {
+ // In C++, we don't have any predefined library functions like
+ // 'malloc'. Instead, we'll just error.
+ if (S.getLangOpts().CPlusPlus &&
+ S.Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))
+ return false;
+
+ if (NamedDecl *D = S.LazilyCreateBuiltin((IdentifierInfo *)II,
+ BuiltinID, S.TUScope,
+ R.isForRedeclaration(),
+ R.getNameLoc())) {
+ R.addDecl(D);
+ return true;
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
+/// \brief Determine whether we can declare a special member function within
+/// the class at this point.
+static bool CanDeclareSpecialMemberFunction(const CXXRecordDecl *Class) {
+ // We need to have a definition for the class.
+ if (!Class->getDefinition() || Class->isDependentContext())
+ return false;
+
+ // We can't be in the middle of defining the class.
+ return !Class->isBeingDefined();
+}
+
+void Sema::ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class) {
+ if (!CanDeclareSpecialMemberFunction(Class))
+ return;
+
+ // If the default constructor has not yet been declared, do so now.
+ if (Class->needsImplicitDefaultConstructor())
+ DeclareImplicitDefaultConstructor(Class);
+
+ // If the copy constructor has not yet been declared, do so now.
+ if (Class->needsImplicitCopyConstructor())
+ DeclareImplicitCopyConstructor(Class);
+
+ // If the copy assignment operator has not yet been declared, do so now.
+ if (Class->needsImplicitCopyAssignment())
+ DeclareImplicitCopyAssignment(Class);
+
+ if (getLangOpts().CPlusPlus11) {
+ // If the move constructor has not yet been declared, do so now.
+ if (Class->needsImplicitMoveConstructor())
+ DeclareImplicitMoveConstructor(Class); // might not actually do it
+
+ // If the move assignment operator has not yet been declared, do so now.
+ if (Class->needsImplicitMoveAssignment())
+ DeclareImplicitMoveAssignment(Class); // might not actually do it
+ }
+
+ // If the destructor has not yet been declared, do so now.
+ if (Class->needsImplicitDestructor())
+ DeclareImplicitDestructor(Class);
+}
+
+/// \brief Determine whether this is the name of an implicitly-declared
+/// special member function.
+static bool isImplicitlyDeclaredMemberFunctionName(DeclarationName Name) {
+ switch (Name.getNameKind()) {
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ return true;
+
+ case DeclarationName::CXXOperatorName:
+ return Name.getCXXOverloadedOperator() == OO_Equal;
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/// \brief If there are any implicit member functions with the given name
+/// that need to be declared in the given declaration context, do so.
+static void DeclareImplicitMemberFunctionsWithName(Sema &S,
+ DeclarationName Name,
+ const DeclContext *DC) {
+ if (!DC)
+ return;
+
+ switch (Name.getNameKind()) {
+ case DeclarationName::CXXConstructorName:
+ if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(DC))
+ if (Record->getDefinition() && CanDeclareSpecialMemberFunction(Record)) {
+ CXXRecordDecl *Class = const_cast<CXXRecordDecl *>(Record);
+ if (Record->needsImplicitDefaultConstructor())
+ S.DeclareImplicitDefaultConstructor(Class);
+ if (Record->needsImplicitCopyConstructor())
+ S.DeclareImplicitCopyConstructor(Class);
+ if (S.getLangOpts().CPlusPlus11 &&
+ Record->needsImplicitMoveConstructor())
+ S.DeclareImplicitMoveConstructor(Class);
+ }
+ break;
+
+ case DeclarationName::CXXDestructorName:
+ if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(DC))
+ if (Record->getDefinition() && Record->needsImplicitDestructor() &&
+ CanDeclareSpecialMemberFunction(Record))
+ S.DeclareImplicitDestructor(const_cast<CXXRecordDecl *>(Record));
+ break;
+
+ case DeclarationName::CXXOperatorName:
+ if (Name.getCXXOverloadedOperator() != OO_Equal)
+ break;
+
+ if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(DC)) {
+ if (Record->getDefinition() && CanDeclareSpecialMemberFunction(Record)) {
+ CXXRecordDecl *Class = const_cast<CXXRecordDecl *>(Record);
+ if (Record->needsImplicitCopyAssignment())
+ S.DeclareImplicitCopyAssignment(Class);
+ if (S.getLangOpts().CPlusPlus11 &&
+ Record->needsImplicitMoveAssignment())
+ S.DeclareImplicitMoveAssignment(Class);
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+// Adds all qualifying matches for a name within a decl context to the
+// given lookup result. Returns true if any matches were found.
+static bool LookupDirect(Sema &S, LookupResult &R, const DeclContext *DC) {
+ bool Found = false;
+
+ // Lazily declare C++ special member functions.
+ if (S.getLangOpts().CPlusPlus)
+ DeclareImplicitMemberFunctionsWithName(S, R.getLookupName(), DC);
+
+ // Perform lookup into this declaration context.
+ DeclContext::lookup_result DR = DC->lookup(R.getLookupName());
+ for (DeclContext::lookup_iterator I = DR.begin(), E = DR.end(); I != E;
+ ++I) {
+ NamedDecl *D = *I;
+ if ((D = R.getAcceptableDecl(D))) {
+ R.addDecl(D);
+ Found = true;
+ }
+ }
+
+ if (!Found && DC->isTranslationUnit() && LookupBuiltin(S, R))
+ return true;
+
+ if (R.getLookupName().getNameKind()
+ != DeclarationName::CXXConversionFunctionName ||
+ R.getLookupName().getCXXNameType()->isDependentType() ||
+ !isa<CXXRecordDecl>(DC))
+ return Found;
+
+ // C++ [temp.mem]p6:
+ // A specialization of a conversion function template is not found by
+ // name lookup. Instead, any conversion function templates visible in the
+ // context of the use are considered. [...]
+ const CXXRecordDecl *Record = cast<CXXRecordDecl>(DC);
+ if (!Record->isCompleteDefinition())
+ return Found;
+
+ for (CXXRecordDecl::conversion_iterator U = Record->conversion_begin(),
+ UEnd = Record->conversion_end(); U != UEnd; ++U) {
+ FunctionTemplateDecl *ConvTemplate = dyn_cast<FunctionTemplateDecl>(*U);
+ if (!ConvTemplate)
+ continue;
+
+ // When we're performing lookup for the purposes of redeclaration, just
+ // add the conversion function template. When we deduce template
+ // arguments for specializations, we'll end up unifying the return
+ // type of the new declaration with the type of the function template.
+ if (R.isForRedeclaration()) {
+ R.addDecl(ConvTemplate);
+ Found = true;
+ continue;
+ }
+
+ // C++ [temp.mem]p6:
+ // [...] For each such operator, if argument deduction succeeds
+ // (14.9.2.3), the resulting specialization is used as if found by
+ // name lookup.
+ //
+ // When referencing a conversion function for any purpose other than
+ // a redeclaration (such that we'll be building an expression with the
+ // result), perform template argument deduction and place the
+ // specialization into the result set. We do this to avoid forcing all
+ // callers to perform special deduction for conversion functions.
+ TemplateDeductionInfo Info(R.getNameLoc());
+ FunctionDecl *Specialization = nullptr;
+
+ const FunctionProtoType *ConvProto
+ = ConvTemplate->getTemplatedDecl()->getType()->getAs<FunctionProtoType>();
+ assert(ConvProto && "Nonsensical conversion function template type");
+
+ // Compute the type of the function that we would expect the conversion
+ // function to have, if it were to match the name given.
+ // FIXME: Calling convention!
+ FunctionProtoType::ExtProtoInfo EPI = ConvProto->getExtProtoInfo();
+ EPI.ExtInfo = EPI.ExtInfo.withCallingConv(CC_C);
+ EPI.ExceptionSpec = EST_None;
+ QualType ExpectedType
+ = R.getSema().Context.getFunctionType(R.getLookupName().getCXXNameType(),
+ None, EPI);
+
+ // Perform template argument deduction against the type that we would
+ // expect the function to have.
+ if (R.getSema().DeduceTemplateArguments(ConvTemplate, nullptr, ExpectedType,
+ Specialization, Info)
+ == Sema::TDK_Success) {
+ R.addDecl(Specialization);
+ Found = true;
+ }
+ }
+
+ return Found;
+}
+
+// Performs C++ unqualified lookup into the given file context.
+static bool
+CppNamespaceLookup(Sema &S, LookupResult &R, ASTContext &Context,
+ DeclContext *NS, UnqualUsingDirectiveSet &UDirs) {
+
+ assert(NS && NS->isFileContext() && "CppNamespaceLookup() requires namespace!");
+
+ // Perform direct name lookup into the LookupCtx.
+ bool Found = LookupDirect(S, R, NS);
+
+ // Perform direct name lookup into the namespaces nominated by the
+ // using directives whose common ancestor is this namespace.
+ for (const UnqualUsingEntry &UUE : UDirs.getNamespacesFor(NS))
+ if (LookupDirect(S, R, UUE.getNominatedNamespace()))
+ Found = true;
+
+ R.resolveKind();
+
+ return Found;
+}
+
+static bool isNamespaceOrTranslationUnitScope(Scope *S) {
+ if (DeclContext *Ctx = S->getEntity())
+ return Ctx->isFileContext();
+ return false;
+}
+
+// Find the next outer declaration context from this scope. This
+// routine actually returns the semantic outer context, which may
+// differ from the lexical context (encoded directly in the Scope
+// stack) when we are parsing a member of a class template. In this
+// case, the second element of the pair will be true, to indicate that
+// name lookup should continue searching in this semantic context when
+// it leaves the current template parameter scope.
+static std::pair<DeclContext *, bool> findOuterContext(Scope *S) {
+ DeclContext *DC = S->getEntity();
+ DeclContext *Lexical = nullptr;
+ for (Scope *OuterS = S->getParent(); OuterS;
+ OuterS = OuterS->getParent()) {
+ if (OuterS->getEntity()) {
+ Lexical = OuterS->getEntity();
+ break;
+ }
+ }
+
+ // C++ [temp.local]p8:
+ // In the definition of a member of a class template that appears
+ // outside of the namespace containing the class template
+ // definition, the name of a template-parameter hides the name of
+ // a member of this namespace.
+ //
+ // Example:
+ //
+ // namespace N {
+ // class C { };
+ //
+ // template<class T> class B {
+ // void f(T);
+ // };
+ // }
+ //
+ // template<class C> void N::B<C>::f(C) {
+ // C b; // C is the template parameter, not N::C
+ // }
+ //
+ // In this example, the lexical context we return is the
+ // TranslationUnit, while the semantic context is the namespace N.
+ if (!Lexical || !DC || !S->getParent() ||
+ !S->getParent()->isTemplateParamScope())
+ return std::make_pair(Lexical, false);
+
+ // Find the outermost template parameter scope.
+ // For the example, this is the scope for the template parameters of
+ // template<class C>.
+ Scope *OutermostTemplateScope = S->getParent();
+ while (OutermostTemplateScope->getParent() &&
+ OutermostTemplateScope->getParent()->isTemplateParamScope())
+ OutermostTemplateScope = OutermostTemplateScope->getParent();
+
+ // Find the namespace context in which the original scope occurs. In
+ // the example, this is namespace N.
+ DeclContext *Semantic = DC;
+ while (!Semantic->isFileContext())
+ Semantic = Semantic->getParent();
+
+ // Find the declaration context just outside of the template
+ // parameter scope. This is the context in which the template is
+ // being lexically declaration (a namespace context). In the
+ // example, this is the global scope.
+ if (Lexical->isFileContext() && !Lexical->Equals(Semantic) &&
+ Lexical->Encloses(Semantic))
+ return std::make_pair(Semantic, true);
+
+ return std::make_pair(Lexical, false);
+}
+
+namespace {
+/// An RAII object to specify that we want to find block scope extern
+/// declarations.
+struct FindLocalExternScope {
+ FindLocalExternScope(LookupResult &R)
+ : R(R), OldFindLocalExtern(R.getIdentifierNamespace() &
+ Decl::IDNS_LocalExtern) {
+ R.setFindLocalExtern(R.getIdentifierNamespace() & Decl::IDNS_Ordinary);
+ }
+ void restore() {
+ R.setFindLocalExtern(OldFindLocalExtern);
+ }
+ ~FindLocalExternScope() {
+ restore();
+ }
+ LookupResult &R;
+ bool OldFindLocalExtern;
+};
+} // end anonymous namespace
+
+bool Sema::CppLookupName(LookupResult &R, Scope *S) {
+ assert(getLangOpts().CPlusPlus && "Can perform only C++ lookup");
+
+ DeclarationName Name = R.getLookupName();
+ Sema::LookupNameKind NameKind = R.getLookupKind();
+
+ // If this is the name of an implicitly-declared special member function,
+ // go through the scope stack to implicitly declare
+ if (isImplicitlyDeclaredMemberFunctionName(Name)) {
+ for (Scope *PreS = S; PreS; PreS = PreS->getParent())
+ if (DeclContext *DC = PreS->getEntity())
+ DeclareImplicitMemberFunctionsWithName(*this, Name, DC);
+ }
+
+ // Implicitly declare member functions with the name we're looking for, if in
+ // fact we are in a scope where it matters.
+
+ Scope *Initial = S;
+ IdentifierResolver::iterator
+ I = IdResolver.begin(Name),
+ IEnd = IdResolver.end();
+
+ // First we lookup local scope.
+ // We don't consider using-directives, as per 7.3.4.p1 [namespace.udir]
+ // ...During unqualified name lookup (3.4.1), the names appear as if
+ // they were declared in the nearest enclosing namespace which contains
+ // both the using-directive and the nominated namespace.
+ // [Note: in this context, "contains" means "contains directly or
+ // indirectly".
+ //
+ // For example:
+ // namespace A { int i; }
+ // void foo() {
+ // int i;
+ // {
+ // using namespace A;
+ // ++i; // finds local 'i', A::i appears at global scope
+ // }
+ // }
+ //
+ UnqualUsingDirectiveSet UDirs;
+ bool VisitedUsingDirectives = false;
+ bool LeftStartingScope = false;
+ DeclContext *OutsideOfTemplateParamDC = nullptr;
+
+ // When performing a scope lookup, we want to find local extern decls.
+ FindLocalExternScope FindLocals(R);
+
+ for (; S && !isNamespaceOrTranslationUnitScope(S); S = S->getParent()) {
+ DeclContext *Ctx = S->getEntity();
+
+ // Check whether the IdResolver has anything in this scope.
+ bool Found = false;
+ for (; I != IEnd && S->isDeclScope(*I); ++I) {
+ if (NamedDecl *ND = R.getAcceptableDecl(*I)) {
+ if (NameKind == LookupRedeclarationWithLinkage) {
+ // Determine whether this (or a previous) declaration is
+ // out-of-scope.
+ if (!LeftStartingScope && !Initial->isDeclScope(*I))
+ LeftStartingScope = true;
+
+ // If we found something outside of our starting scope that
+ // does not have linkage, skip it. If it's a template parameter,
+ // we still find it, so we can diagnose the invalid redeclaration.
+ if (LeftStartingScope && !((*I)->hasLinkage()) &&
+ !(*I)->isTemplateParameter()) {
+ R.setShadowed();
+ continue;
+ }
+ }
+
+ Found = true;
+ R.addDecl(ND);
+ }
+ }
+ if (Found) {
+ R.resolveKind();
+ if (S->isClassScope())
+ if (CXXRecordDecl *Record = dyn_cast_or_null<CXXRecordDecl>(Ctx))
+ R.setNamingClass(Record);
+ return true;
+ }
+
+ if (NameKind == LookupLocalFriendName && !S->isClassScope()) {
+ // C++11 [class.friend]p11:
+ // If a friend declaration appears in a local class and the name
+ // specified is an unqualified name, a prior declaration is
+ // looked up without considering scopes that are outside the
+ // innermost enclosing non-class scope.
+ return false;
+ }
+
+ if (!Ctx && S->isTemplateParamScope() && OutsideOfTemplateParamDC &&
+ S->getParent() && !S->getParent()->isTemplateParamScope()) {
+ // We've just searched the last template parameter scope and
+ // found nothing, so look into the contexts between the
+ // lexical and semantic declaration contexts returned by
+ // findOuterContext(). This implements the name lookup behavior
+ // of C++ [temp.local]p8.
+ Ctx = OutsideOfTemplateParamDC;
+ OutsideOfTemplateParamDC = nullptr;
+ }
+
+ if (Ctx) {
+ DeclContext *OuterCtx;
+ bool SearchAfterTemplateScope;
+ std::tie(OuterCtx, SearchAfterTemplateScope) = findOuterContext(S);
+ if (SearchAfterTemplateScope)
+ OutsideOfTemplateParamDC = OuterCtx;
+
+ for (; Ctx && !Ctx->Equals(OuterCtx); Ctx = Ctx->getLookupParent()) {
+ // We do not directly look into transparent contexts, since
+ // those entities will be found in the nearest enclosing
+ // non-transparent context.
+ if (Ctx->isTransparentContext())
+ continue;
+
+ // We do not look directly into function or method contexts,
+ // since all of the local variables and parameters of the
+ // function/method are present within the Scope.
+ if (Ctx->isFunctionOrMethod()) {
+ // If we have an Objective-C instance method, look for ivars
+ // in the corresponding interface.
+ if (ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(Ctx)) {
+ if (Method->isInstanceMethod() && Name.getAsIdentifierInfo())
+ if (ObjCInterfaceDecl *Class = Method->getClassInterface()) {
+ ObjCInterfaceDecl *ClassDeclared;
+ if (ObjCIvarDecl *Ivar = Class->lookupInstanceVariable(
+ Name.getAsIdentifierInfo(),
+ ClassDeclared)) {
+ if (NamedDecl *ND = R.getAcceptableDecl(Ivar)) {
+ R.addDecl(ND);
+ R.resolveKind();
+ return true;
+ }
+ }
+ }
+ }
+
+ continue;
+ }
+
+ // If this is a file context, we need to perform unqualified name
+ // lookup considering using directives.
+ if (Ctx->isFileContext()) {
+ // If we haven't handled using directives yet, do so now.
+ if (!VisitedUsingDirectives) {
+ // Add using directives from this context up to the top level.
+ for (DeclContext *UCtx = Ctx; UCtx; UCtx = UCtx->getParent()) {
+ if (UCtx->isTransparentContext())
+ continue;
+
+ UDirs.visit(UCtx, UCtx);
+ }
+
+ // Find the innermost file scope, so we can add using directives
+ // from local scopes.
+ Scope *InnermostFileScope = S;
+ while (InnermostFileScope &&
+ !isNamespaceOrTranslationUnitScope(InnermostFileScope))
+ InnermostFileScope = InnermostFileScope->getParent();
+ UDirs.visitScopeChain(Initial, InnermostFileScope);
+
+ UDirs.done();
+
+ VisitedUsingDirectives = true;
+ }
+
+ if (CppNamespaceLookup(*this, R, Context, Ctx, UDirs)) {
+ R.resolveKind();
+ return true;
+ }
+
+ continue;
+ }
+
+ // Perform qualified name lookup into this context.
+ // FIXME: In some cases, we know that every name that could be found by
+ // this qualified name lookup will also be on the identifier chain. For
+ // example, inside a class without any base classes, we never need to
+ // perform qualified lookup because all of the members are on top of the
+ // identifier chain.
+ if (LookupQualifiedName(R, Ctx, /*InUnqualifiedLookup=*/true))
+ return true;
+ }
+ }
+ }
+
+ // Stop if we ran out of scopes.
+ // FIXME: This really, really shouldn't be happening.
+ if (!S) return false;
+
+ // If we are looking for members, no need to look into global/namespace scope.
+ if (NameKind == LookupMemberName)
+ return false;
+
+ // Collect UsingDirectiveDecls in all scopes, and recursively all
+ // nominated namespaces by those using-directives.
+ //
+ // FIXME: Cache this sorted list in Scope structure, and DeclContext, so we
+ // don't build it for each lookup!
+ if (!VisitedUsingDirectives) {
+ UDirs.visitScopeChain(Initial, S);
+ UDirs.done();
+ }
+
+ // If we're not performing redeclaration lookup, do not look for local
+ // extern declarations outside of a function scope.
+ if (!R.isForRedeclaration())
+ FindLocals.restore();
+
+ // Lookup namespace scope, and global scope.
+ // Unqualified name lookup in C++ requires looking into scopes
+ // that aren't strictly lexical, and therefore we walk through the
+ // context as well as walking through the scopes.
+ for (; S; S = S->getParent()) {
+ // Check whether the IdResolver has anything in this scope.
+ bool Found = false;
+ for (; I != IEnd && S->isDeclScope(*I); ++I) {
+ if (NamedDecl *ND = R.getAcceptableDecl(*I)) {
+ // We found something. Look for anything else in our scope
+ // with this same name and in an acceptable identifier
+ // namespace, so that we can construct an overload set if we
+ // need to.
+ Found = true;
+ R.addDecl(ND);
+ }
+ }
+
+ if (Found && S->isTemplateParamScope()) {
+ R.resolveKind();
+ return true;
+ }
+
+ DeclContext *Ctx = S->getEntity();
+ if (!Ctx && S->isTemplateParamScope() && OutsideOfTemplateParamDC &&
+ S->getParent() && !S->getParent()->isTemplateParamScope()) {
+ // We've just searched the last template parameter scope and
+ // found nothing, so look into the contexts between the
+ // lexical and semantic declaration contexts returned by
+ // findOuterContext(). This implements the name lookup behavior
+ // of C++ [temp.local]p8.
+ Ctx = OutsideOfTemplateParamDC;
+ OutsideOfTemplateParamDC = nullptr;
+ }
+
+ if (Ctx) {
+ DeclContext *OuterCtx;
+ bool SearchAfterTemplateScope;
+ std::tie(OuterCtx, SearchAfterTemplateScope) = findOuterContext(S);
+ if (SearchAfterTemplateScope)
+ OutsideOfTemplateParamDC = OuterCtx;
+
+ for (; Ctx && !Ctx->Equals(OuterCtx); Ctx = Ctx->getLookupParent()) {
+ // We do not directly look into transparent contexts, since
+ // those entities will be found in the nearest enclosing
+ // non-transparent context.
+ if (Ctx->isTransparentContext())
+ continue;
+
+ // If we have a context, and it's not a context stashed in the
+ // template parameter scope for an out-of-line definition, also
+ // look into that context.
+ if (!(Found && S && S->isTemplateParamScope())) {
+ assert(Ctx->isFileContext() &&
+ "We should have been looking only at file context here already.");
+
+ // Look into context considering using-directives.
+ if (CppNamespaceLookup(*this, R, Context, Ctx, UDirs))
+ Found = true;
+ }
+
+ if (Found) {
+ R.resolveKind();
+ return true;
+ }
+
+ if (R.isForRedeclaration() && !Ctx->isTransparentContext())
+ return false;
+ }
+ }
+
+ if (R.isForRedeclaration() && Ctx && !Ctx->isTransparentContext())
+ return false;
+ }
+
+ return !R.empty();
+}
+
+/// \brief Find the declaration that a class temploid member specialization was
+/// instantiated from, or the member itself if it is an explicit specialization.
+static Decl *getInstantiatedFrom(Decl *D, MemberSpecializationInfo *MSInfo) {
+ return MSInfo->isExplicitSpecialization() ? D : MSInfo->getInstantiatedFrom();
+}
+
+Module *Sema::getOwningModule(Decl *Entity) {
+ // If it's imported, grab its owning module.
+ Module *M = Entity->getImportedOwningModule();
+ if (M || !isa<NamedDecl>(Entity) || !cast<NamedDecl>(Entity)->isHidden())
+ return M;
+ assert(!Entity->isFromASTFile() &&
+ "hidden entity from AST file has no owning module");
+
+ if (!getLangOpts().ModulesLocalVisibility) {
+ // If we're not tracking visibility locally, the only way a declaration
+ // can be hidden and local is if it's hidden because it's parent is (for
+ // instance, maybe this is a lazily-declared special member of an imported
+ // class).
+ auto *Parent = cast<NamedDecl>(Entity->getDeclContext());
+ assert(Parent->isHidden() && "unexpectedly hidden decl");
+ return getOwningModule(Parent);
+ }
+
+ // It's local and hidden; grab or compute its owning module.
+ M = Entity->getLocalOwningModule();
+ if (M)
+ return M;
+
+ if (auto *Containing =
+ PP.getModuleContainingLocation(Entity->getLocation())) {
+ M = Containing;
+ } else if (Entity->isInvalidDecl() || Entity->getLocation().isInvalid()) {
+ // Don't bother tracking visibility for invalid declarations with broken
+ // locations.
+ cast<NamedDecl>(Entity)->setHidden(false);
+ } else {
+ // We need to assign a module to an entity that exists outside of any
+ // module, so that we can hide it from modules that we textually enter.
+ // Invent a fake module for all such entities.
+ if (!CachedFakeTopLevelModule) {
+ CachedFakeTopLevelModule =
+ PP.getHeaderSearchInfo().getModuleMap().findOrCreateModule(
+ "<top-level>", nullptr, false, false).first;
+
+ auto &SrcMgr = PP.getSourceManager();
+ SourceLocation StartLoc =
+ SrcMgr.getLocForStartOfFile(SrcMgr.getMainFileID());
+ auto &TopLevel =
+ VisibleModulesStack.empty() ? VisibleModules : VisibleModulesStack[0];
+ TopLevel.setVisible(CachedFakeTopLevelModule, StartLoc);
+ }
+
+ M = CachedFakeTopLevelModule;
+ }
+
+ if (M)
+ Entity->setLocalOwningModule(M);
+ return M;
+}
+
+void Sema::makeMergedDefinitionVisible(NamedDecl *ND, SourceLocation Loc) {
+ if (auto *M = PP.getModuleContainingLocation(Loc))
+ Context.mergeDefinitionIntoModule(ND, M);
+ else
+ // We're not building a module; just make the definition visible.
+ ND->setHidden(false);
+
+ // If ND is a template declaration, make the template parameters
+ // visible too. They're not (necessarily) within a mergeable DeclContext.
+ if (auto *TD = dyn_cast<TemplateDecl>(ND))
+ for (auto *Param : *TD->getTemplateParameters())
+ makeMergedDefinitionVisible(Param, Loc);
+}
+
+/// \brief Find the module in which the given declaration was defined.
+static Module *getDefiningModule(Sema &S, Decl *Entity) {
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(Entity)) {
+ // If this function was instantiated from a template, the defining module is
+ // the module containing the pattern.
+ if (FunctionDecl *Pattern = FD->getTemplateInstantiationPattern())
+ Entity = Pattern;
+ } else if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Entity)) {
+ if (CXXRecordDecl *Pattern = RD->getTemplateInstantiationPattern())
+ Entity = Pattern;
+ } else if (EnumDecl *ED = dyn_cast<EnumDecl>(Entity)) {
+ if (MemberSpecializationInfo *MSInfo = ED->getMemberSpecializationInfo())
+ Entity = getInstantiatedFrom(ED, MSInfo);
+ } else if (VarDecl *VD = dyn_cast<VarDecl>(Entity)) {
+ // FIXME: Map from variable template specializations back to the template.
+ if (MemberSpecializationInfo *MSInfo = VD->getMemberSpecializationInfo())
+ Entity = getInstantiatedFrom(VD, MSInfo);
+ }
+
+ // Walk up to the containing context. That might also have been instantiated
+ // from a template.
+ DeclContext *Context = Entity->getDeclContext();
+ if (Context->isFileContext())
+ return S.getOwningModule(Entity);
+ return getDefiningModule(S, cast<Decl>(Context));
+}
+
+llvm::DenseSet<Module*> &Sema::getLookupModules() {
+ unsigned N = ActiveTemplateInstantiations.size();
+ for (unsigned I = ActiveTemplateInstantiationLookupModules.size();
+ I != N; ++I) {
+ Module *M =
+ getDefiningModule(*this, ActiveTemplateInstantiations[I].Entity);
+ if (M && !LookupModulesCache.insert(M).second)
+ M = nullptr;
+ ActiveTemplateInstantiationLookupModules.push_back(M);
+ }
+ return LookupModulesCache;
+}
+
+bool Sema::hasVisibleMergedDefinition(NamedDecl *Def) {
+ for (Module *Merged : Context.getModulesWithMergedDefinition(Def))
+ if (isModuleVisible(Merged))
+ return true;
+ return false;
+}
+
+template<typename ParmDecl>
+static bool
+hasVisibleDefaultArgument(Sema &S, const ParmDecl *D,
+ llvm::SmallVectorImpl<Module *> *Modules) {
+ if (!D->hasDefaultArgument())
+ return false;
+
+ while (D) {
+ auto &DefaultArg = D->getDefaultArgStorage();
+ if (!DefaultArg.isInherited() && S.isVisible(D))
+ return true;
+
+ if (!DefaultArg.isInherited() && Modules) {
+ auto *NonConstD = const_cast<ParmDecl*>(D);
+ Modules->push_back(S.getOwningModule(NonConstD));
+ const auto &Merged = S.Context.getModulesWithMergedDefinition(NonConstD);
+ Modules->insert(Modules->end(), Merged.begin(), Merged.end());
+ }
+
+ // If there was a previous default argument, maybe its parameter is visible.
+ D = DefaultArg.getInheritedFrom();
+ }
+ return false;
+}
+
+bool Sema::hasVisibleDefaultArgument(const NamedDecl *D,
+ llvm::SmallVectorImpl<Module *> *Modules) {
+ if (auto *P = dyn_cast<TemplateTypeParmDecl>(D))
+ return ::hasVisibleDefaultArgument(*this, P, Modules);
+ if (auto *P = dyn_cast<NonTypeTemplateParmDecl>(D))
+ return ::hasVisibleDefaultArgument(*this, P, Modules);
+ return ::hasVisibleDefaultArgument(*this, cast<TemplateTemplateParmDecl>(D),
+ Modules);
+}
+
+/// \brief Determine whether a declaration is visible to name lookup.
+///
+/// This routine determines whether the declaration D is visible in the current
+/// lookup context, taking into account the current template instantiation
+/// stack. During template instantiation, a declaration is visible if it is
+/// visible from a module containing any entity on the template instantiation
+/// path (by instantiating a template, you allow it to see the declarations that
+/// your module can see, including those later on in your module).
+bool LookupResult::isVisibleSlow(Sema &SemaRef, NamedDecl *D) {
+ assert(D->isHidden() && "should not call this: not in slow case");
+ Module *DeclModule = nullptr;
+
+ if (SemaRef.getLangOpts().ModulesLocalVisibility) {
+ DeclModule = SemaRef.getOwningModule(D);
+ if (!DeclModule) {
+ // getOwningModule() may have decided the declaration should not be hidden.
+ assert(!D->isHidden() && "hidden decl not from a module");
+ return true;
+ }
+
+ // If the owning module is visible, and the decl is not module private,
+ // then the decl is visible too. (Module private is ignored within the same
+ // top-level module.)
+ if ((!D->isFromASTFile() || !D->isModulePrivate()) &&
+ (SemaRef.isModuleVisible(DeclModule) ||
+ SemaRef.hasVisibleMergedDefinition(D)))
+ return true;
+ }
+
+ // If this declaration is not at namespace scope nor module-private,
+ // then it is visible if its lexical parent has a visible definition.
+ DeclContext *DC = D->getLexicalDeclContext();
+ if (!D->isModulePrivate() &&
+ DC && !DC->isFileContext() && !isa<LinkageSpecDecl>(DC)) {
+ // For a parameter, check whether our current template declaration's
+ // lexical context is visible, not whether there's some other visible
+ // definition of it, because parameters aren't "within" the definition.
+ if ((D->isTemplateParameter() || isa<ParmVarDecl>(D))
+ ? isVisible(SemaRef, cast<NamedDecl>(DC))
+ : SemaRef.hasVisibleDefinition(cast<NamedDecl>(DC))) {
+ if (SemaRef.ActiveTemplateInstantiations.empty() &&
+ // FIXME: Do something better in this case.
+ !SemaRef.getLangOpts().ModulesLocalVisibility) {
+ // Cache the fact that this declaration is implicitly visible because
+ // its parent has a visible definition.
+ D->setHidden(false);
+ }
+ return true;
+ }
+ return false;
+ }
+
+ // Find the extra places where we need to look.
+ llvm::DenseSet<Module*> &LookupModules = SemaRef.getLookupModules();
+ if (LookupModules.empty())
+ return false;
+
+ if (!DeclModule) {
+ DeclModule = SemaRef.getOwningModule(D);
+ assert(DeclModule && "hidden decl not from a module");
+ }
+
+ // If our lookup set contains the decl's module, it's visible.
+ if (LookupModules.count(DeclModule))
+ return true;
+
+ // If the declaration isn't exported, it's not visible in any other module.
+ if (D->isModulePrivate())
+ return false;
+
+ // Check whether DeclModule is transitively exported to an import of
+ // the lookup set.
+ return std::any_of(LookupModules.begin(), LookupModules.end(),
+ [&](Module *M) { return M->isModuleVisible(DeclModule); });
+}
+
+bool Sema::isVisibleSlow(const NamedDecl *D) {
+ return LookupResult::isVisible(*this, const_cast<NamedDecl*>(D));
+}
+
+bool Sema::shouldLinkPossiblyHiddenDecl(LookupResult &R, const NamedDecl *New) {
+ for (auto *D : R) {
+ if (isVisible(D))
+ return true;
+ }
+ return New->isExternallyVisible();
+}
+
+/// \brief Retrieve the visible declaration corresponding to D, if any.
+///
+/// This routine determines whether the declaration D is visible in the current
+/// module, with the current imports. If not, it checks whether any
+/// redeclaration of D is visible, and if so, returns that declaration.
+///
+/// \returns D, or a visible previous declaration of D, whichever is more recent
+/// and visible. If no declaration of D is visible, returns null.
+static NamedDecl *findAcceptableDecl(Sema &SemaRef, NamedDecl *D) {
+ assert(!LookupResult::isVisible(SemaRef, D) && "not in slow case");
+
+ for (auto RD : D->redecls()) {
+ if (auto ND = dyn_cast<NamedDecl>(RD)) {
+ // FIXME: This is wrong in the case where the previous declaration is not
+ // visible in the same scope as D. This needs to be done much more
+ // carefully.
+ if (LookupResult::isVisible(SemaRef, ND))
+ return ND;
+ }
+ }
+
+ return nullptr;
+}
+
+NamedDecl *LookupResult::getAcceptableDeclSlow(NamedDecl *D) const {
+ return findAcceptableDecl(getSema(), D);
+}
+
+/// @brief Perform unqualified name lookup starting from a given
+/// scope.
+///
+/// Unqualified name lookup (C++ [basic.lookup.unqual], C99 6.2.1) is
+/// used to find names within the current scope. For example, 'x' in
+/// @code
+/// int x;
+/// int f() {
+/// return x; // unqualified name look finds 'x' in the global scope
+/// }
+/// @endcode
+///
+/// Different lookup criteria can find different names. For example, a
+/// particular scope can have both a struct and a function of the same
+/// name, and each can be found by certain lookup criteria. For more
+/// information about lookup criteria, see the documentation for the
+/// class LookupCriteria.
+///
+/// @param S The scope from which unqualified name lookup will
+/// begin. If the lookup criteria permits, name lookup may also search
+/// in the parent scopes.
+///
+/// @param [in,out] R Specifies the lookup to perform (e.g., the name to
+/// look up and the lookup kind), and is updated with the results of lookup
+/// including zero or more declarations and possibly additional information
+/// used to diagnose ambiguities.
+///
+/// @returns \c true if lookup succeeded and false otherwise.
+bool Sema::LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation) {
+ DeclarationName Name = R.getLookupName();
+ if (!Name) return false;
+
+ LookupNameKind NameKind = R.getLookupKind();
+
+ if (!getLangOpts().CPlusPlus) {
+ // Unqualified name lookup in C/Objective-C is purely lexical, so
+ // search in the declarations attached to the name.
+ if (NameKind == Sema::LookupRedeclarationWithLinkage) {
+ // Find the nearest non-transparent declaration scope.
+ while (!(S->getFlags() & Scope::DeclScope) ||
+ (S->getEntity() && S->getEntity()->isTransparentContext()))
+ S = S->getParent();
+ }
+
+ // When performing a scope lookup, we want to find local extern decls.
+ FindLocalExternScope FindLocals(R);
+
+ // Scan up the scope chain looking for a decl that matches this
+ // identifier that is in the appropriate namespace. This search
+ // should not take long, as shadowing of names is uncommon, and
+ // deep shadowing is extremely uncommon.
+ bool LeftStartingScope = false;
+
+ for (IdentifierResolver::iterator I = IdResolver.begin(Name),
+ IEnd = IdResolver.end();
+ I != IEnd; ++I)
+ if (NamedDecl *D = R.getAcceptableDecl(*I)) {
+ if (NameKind == LookupRedeclarationWithLinkage) {
+ // Determine whether this (or a previous) declaration is
+ // out-of-scope.
+ if (!LeftStartingScope && !S->isDeclScope(*I))
+ LeftStartingScope = true;
+
+ // If we found something outside of our starting scope that
+ // does not have linkage, skip it.
+ if (LeftStartingScope && !((*I)->hasLinkage())) {
+ R.setShadowed();
+ continue;
+ }
+ }
+ else if (NameKind == LookupObjCImplicitSelfParam &&
+ !isa<ImplicitParamDecl>(*I))
+ continue;
+
+ R.addDecl(D);
+
+ // Check whether there are any other declarations with the same name
+ // and in the same scope.
+ if (I != IEnd) {
+ // Find the scope in which this declaration was declared (if it
+ // actually exists in a Scope).
+ while (S && !S->isDeclScope(D))
+ S = S->getParent();
+
+ // If the scope containing the declaration is the translation unit,
+ // then we'll need to perform our checks based on the matching
+ // DeclContexts rather than matching scopes.
+ if (S && isNamespaceOrTranslationUnitScope(S))
+ S = nullptr;
+
+ // Compute the DeclContext, if we need it.
+ DeclContext *DC = nullptr;
+ if (!S)
+ DC = (*I)->getDeclContext()->getRedeclContext();
+
+ IdentifierResolver::iterator LastI = I;
+ for (++LastI; LastI != IEnd; ++LastI) {
+ if (S) {
+ // Match based on scope.
+ if (!S->isDeclScope(*LastI))
+ break;
+ } else {
+ // Match based on DeclContext.
+ DeclContext *LastDC
+ = (*LastI)->getDeclContext()->getRedeclContext();
+ if (!LastDC->Equals(DC))
+ break;
+ }
+
+ // If the declaration is in the right namespace and visible, add it.
+ if (NamedDecl *LastD = R.getAcceptableDecl(*LastI))
+ R.addDecl(LastD);
+ }
+
+ R.resolveKind();
+ }
+
+ return true;
+ }
+ } else {
+ // Perform C++ unqualified name lookup.
+ if (CppLookupName(R, S))
+ return true;
+ }
+
+ // If we didn't find a use of this identifier, and if the identifier
+ // corresponds to a compiler builtin, create the decl object for the builtin
+ // now, injecting it into translation unit scope, and return it.
+ if (AllowBuiltinCreation && LookupBuiltin(*this, R))
+ return true;
+
+ // If we didn't find a use of this identifier, the ExternalSource
+ // may be able to handle the situation.
+ // Note: some lookup failures are expected!
+ // See e.g. R.isForRedeclaration().
+ return (ExternalSource && ExternalSource->LookupUnqualified(R, S));
+}
+
+/// @brief Perform qualified name lookup in the namespaces nominated by
+/// using directives by the given context.
+///
+/// C++98 [namespace.qual]p2:
+/// Given X::m (where X is a user-declared namespace), or given \::m
+/// (where X is the global namespace), let S be the set of all
+/// declarations of m in X and in the transitive closure of all
+/// namespaces nominated by using-directives in X and its used
+/// namespaces, except that using-directives are ignored in any
+/// namespace, including X, directly containing one or more
+/// declarations of m. No namespace is searched more than once in
+/// the lookup of a name. If S is the empty set, the program is
+/// ill-formed. Otherwise, if S has exactly one member, or if the
+/// context of the reference is a using-declaration
+/// (namespace.udecl), S is the required set of declarations of
+/// m. Otherwise if the use of m is not one that allows a unique
+/// declaration to be chosen from S, the program is ill-formed.
+///
+/// C++98 [namespace.qual]p5:
+/// During the lookup of a qualified namespace member name, if the
+/// lookup finds more than one declaration of the member, and if one
+/// declaration introduces a class name or enumeration name and the
+/// other declarations either introduce the same object, the same
+/// enumerator or a set of functions, the non-type name hides the
+/// class or enumeration name if and only if the declarations are
+/// from the same namespace; otherwise (the declarations are from
+/// different namespaces), the program is ill-formed.
+static bool LookupQualifiedNameInUsingDirectives(Sema &S, LookupResult &R,
+ DeclContext *StartDC) {
+ assert(StartDC->isFileContext() && "start context is not a file context");
+
+ DeclContext::udir_range UsingDirectives = StartDC->using_directives();
+ if (UsingDirectives.begin() == UsingDirectives.end()) return false;
+
+ // We have at least added all these contexts to the queue.
+ llvm::SmallPtrSet<DeclContext*, 8> Visited;
+ Visited.insert(StartDC);
+
+ // We have not yet looked into these namespaces, much less added
+ // their "using-children" to the queue.
+ SmallVector<NamespaceDecl*, 8> Queue;
+
+ // We have already looked into the initial namespace; seed the queue
+ // with its using-children.
+ for (auto *I : UsingDirectives) {
+ NamespaceDecl *ND = I->getNominatedNamespace()->getOriginalNamespace();
+ if (Visited.insert(ND).second)
+ Queue.push_back(ND);
+ }
+
+ // The easiest way to implement the restriction in [namespace.qual]p5
+ // is to check whether any of the individual results found a tag
+ // and, if so, to declare an ambiguity if the final result is not
+ // a tag.
+ bool FoundTag = false;
+ bool FoundNonTag = false;
+
+ LookupResult LocalR(LookupResult::Temporary, R);
+
+ bool Found = false;
+ while (!Queue.empty()) {
+ NamespaceDecl *ND = Queue.pop_back_val();
+
+ // We go through some convolutions here to avoid copying results
+ // between LookupResults.
+ bool UseLocal = !R.empty();
+ LookupResult &DirectR = UseLocal ? LocalR : R;
+ bool FoundDirect = LookupDirect(S, DirectR, ND);
+
+ if (FoundDirect) {
+ // First do any local hiding.
+ DirectR.resolveKind();
+
+ // If the local result is a tag, remember that.
+ if (DirectR.isSingleTagDecl())
+ FoundTag = true;
+ else
+ FoundNonTag = true;
+
+ // Append the local results to the total results if necessary.
+ if (UseLocal) {
+ R.addAllDecls(LocalR);
+ LocalR.clear();
+ }
+ }
+
+ // If we find names in this namespace, ignore its using directives.
+ if (FoundDirect) {
+ Found = true;
+ continue;
+ }
+
+ for (auto I : ND->using_directives()) {
+ NamespaceDecl *Nom = I->getNominatedNamespace();
+ if (Visited.insert(Nom).second)
+ Queue.push_back(Nom);
+ }
+ }
+
+ if (Found) {
+ if (FoundTag && FoundNonTag)
+ R.setAmbiguousQualifiedTagHiding();
+ else
+ R.resolveKind();
+ }
+
+ return Found;
+}
+
+/// \brief Callback that looks for any member of a class with the given name.
+static bool LookupAnyMember(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path, DeclarationName Name) {
+ RecordDecl *BaseRecord = Specifier->getType()->getAs<RecordType>()->getDecl();
+
+ Path.Decls = BaseRecord->lookup(Name);
+ return !Path.Decls.empty();
+}
+
+/// \brief Determine whether the given set of member declarations contains only
+/// static members, nested types, and enumerators.
+template<typename InputIterator>
+static bool HasOnlyStaticMembers(InputIterator First, InputIterator Last) {
+ Decl *D = (*First)->getUnderlyingDecl();
+ if (isa<VarDecl>(D) || isa<TypeDecl>(D) || isa<EnumConstantDecl>(D))
+ return true;
+
+ if (isa<CXXMethodDecl>(D)) {
+ // Determine whether all of the methods are static.
+ bool AllMethodsAreStatic = true;
+ for(; First != Last; ++First) {
+ D = (*First)->getUnderlyingDecl();
+
+ if (!isa<CXXMethodDecl>(D)) {
+ assert(isa<TagDecl>(D) && "Non-function must be a tag decl");
+ break;
+ }
+
+ if (!cast<CXXMethodDecl>(D)->isStatic()) {
+ AllMethodsAreStatic = false;
+ break;
+ }
+ }
+
+ if (AllMethodsAreStatic)
+ return true;
+ }
+
+ return false;
+}
+
+/// \brief Perform qualified name lookup into a given context.
+///
+/// Qualified name lookup (C++ [basic.lookup.qual]) is used to find
+/// names when the context of those names is explicit specified, e.g.,
+/// "std::vector" or "x->member", or as part of unqualified name lookup.
+///
+/// Different lookup criteria can find different names. For example, a
+/// particular scope can have both a struct and a function of the same
+/// name, and each can be found by certain lookup criteria. For more
+/// information about lookup criteria, see the documentation for the
+/// class LookupCriteria.
+///
+/// \param R captures both the lookup criteria and any lookup results found.
+///
+/// \param LookupCtx The context in which qualified name lookup will
+/// search. If the lookup criteria permits, name lookup may also search
+/// in the parent contexts or (for C++ classes) base classes.
+///
+/// \param InUnqualifiedLookup true if this is qualified name lookup that
+/// occurs as part of unqualified name lookup.
+///
+/// \returns true if lookup succeeded, false if it failed.
+bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
+ bool InUnqualifiedLookup) {
+ assert(LookupCtx && "Sema::LookupQualifiedName requires a lookup context");
+
+ if (!R.getLookupName())
+ return false;
+
+ // Make sure that the declaration context is complete.
+ assert((!isa<TagDecl>(LookupCtx) ||
+ LookupCtx->isDependentContext() ||
+ cast<TagDecl>(LookupCtx)->isCompleteDefinition() ||
+ cast<TagDecl>(LookupCtx)->isBeingDefined()) &&
+ "Declaration context must already be complete!");
+
+ struct QualifiedLookupInScope {
+ bool oldVal;
+ DeclContext *Context;
+ // Set flag in DeclContext informing debugger that we're looking for qualified name
+ QualifiedLookupInScope(DeclContext *ctx) : Context(ctx) {
+ oldVal = ctx->setUseQualifiedLookup();
+ }
+ ~QualifiedLookupInScope() {
+ Context->setUseQualifiedLookup(oldVal);
+ }
+ } QL(LookupCtx);
+
+ if (LookupDirect(*this, R, LookupCtx)) {
+ R.resolveKind();
+ if (isa<CXXRecordDecl>(LookupCtx))
+ R.setNamingClass(cast<CXXRecordDecl>(LookupCtx));
+ return true;
+ }
+
+ // Don't descend into implied contexts for redeclarations.
+ // C++98 [namespace.qual]p6:
+ // In a declaration for a namespace member in which the
+ // declarator-id is a qualified-id, given that the qualified-id
+ // for the namespace member has the form
+ // nested-name-specifier unqualified-id
+ // the unqualified-id shall name a member of the namespace
+ // designated by the nested-name-specifier.
+ // See also [class.mfct]p5 and [class.static.data]p2.
+ if (R.isForRedeclaration())
+ return false;
+
+ // If this is a namespace, look it up in the implied namespaces.
+ if (LookupCtx->isFileContext())
+ return LookupQualifiedNameInUsingDirectives(*this, R, LookupCtx);
+
+ // If this isn't a C++ class, we aren't allowed to look into base
+ // classes, we're done.
+ CXXRecordDecl *LookupRec = dyn_cast<CXXRecordDecl>(LookupCtx);
+ if (!LookupRec || !LookupRec->getDefinition())
+ return false;
+
+ // If we're performing qualified name lookup into a dependent class,
+ // then we are actually looking into a current instantiation. If we have any
+ // dependent base classes, then we either have to delay lookup until
+ // template instantiation time (at which point all bases will be available)
+ // or we have to fail.
+ if (!InUnqualifiedLookup && LookupRec->isDependentContext() &&
+ LookupRec->hasAnyDependentBases()) {
+ R.setNotFoundInCurrentInstantiation();
+ return false;
+ }
+
+ // Perform lookup into our base classes.
+ CXXBasePaths Paths;
+ Paths.setOrigin(LookupRec);
+
+ // Look for this member in our base classes
+ bool (*BaseCallback)(const CXXBaseSpecifier *Specifier, CXXBasePath &Path,
+ DeclarationName Name) = nullptr;
+ switch (R.getLookupKind()) {
+ case LookupObjCImplicitSelfParam:
+ case LookupOrdinaryName:
+ case LookupMemberName:
+ case LookupRedeclarationWithLinkage:
+ case LookupLocalFriendName:
+ BaseCallback = &CXXRecordDecl::FindOrdinaryMember;
+ break;
+
+ case LookupTagName:
+ BaseCallback = &CXXRecordDecl::FindTagMember;
+ break;
+
+ case LookupAnyName:
+ BaseCallback = &LookupAnyMember;
+ break;
+
+ case LookupUsingDeclName:
+ // This lookup is for redeclarations only.
+
+ case LookupOperatorName:
+ case LookupNamespaceName:
+ case LookupObjCProtocolName:
+ case LookupLabel:
+ // These lookups will never find a member in a C++ class (or base class).
+ return false;
+
+ case LookupNestedNameSpecifierName:
+ BaseCallback = &CXXRecordDecl::FindNestedNameSpecifierMember;
+ break;
+ }
+
+ DeclarationName Name = R.getLookupName();
+ if (!LookupRec->lookupInBases(
+ [=](const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
+ return BaseCallback(Specifier, Path, Name);
+ },
+ Paths))
+ return false;
+
+ R.setNamingClass(LookupRec);
+
+ // C++ [class.member.lookup]p2:
+ // [...] If the resulting set of declarations are not all from
+ // sub-objects of the same type, or the set has a nonstatic member
+ // and includes members from distinct sub-objects, there is an
+ // ambiguity and the program is ill-formed. Otherwise that set is
+ // the result of the lookup.
+ QualType SubobjectType;
+ int SubobjectNumber = 0;
+ AccessSpecifier SubobjectAccess = AS_none;
+
+ for (CXXBasePaths::paths_iterator Path = Paths.begin(), PathEnd = Paths.end();
+ Path != PathEnd; ++Path) {
+ const CXXBasePathElement &PathElement = Path->back();
+
+ // Pick the best (i.e. most permissive i.e. numerically lowest) access
+ // across all paths.
+ SubobjectAccess = std::min(SubobjectAccess, Path->Access);
+
+ // Determine whether we're looking at a distinct sub-object or not.
+ if (SubobjectType.isNull()) {
+ // This is the first subobject we've looked at. Record its type.
+ SubobjectType = Context.getCanonicalType(PathElement.Base->getType());
+ SubobjectNumber = PathElement.SubobjectNumber;
+ continue;
+ }
+
+ if (SubobjectType
+ != Context.getCanonicalType(PathElement.Base->getType())) {
+ // We found members of the given name in two subobjects of
+ // different types. If the declaration sets aren't the same, this
+ // lookup is ambiguous.
+ if (HasOnlyStaticMembers(Path->Decls.begin(), Path->Decls.end())) {
+ CXXBasePaths::paths_iterator FirstPath = Paths.begin();
+ DeclContext::lookup_iterator FirstD = FirstPath->Decls.begin();
+ DeclContext::lookup_iterator CurrentD = Path->Decls.begin();
+
+ while (FirstD != FirstPath->Decls.end() &&
+ CurrentD != Path->Decls.end()) {
+ if ((*FirstD)->getUnderlyingDecl()->getCanonicalDecl() !=
+ (*CurrentD)->getUnderlyingDecl()->getCanonicalDecl())
+ break;
+
+ ++FirstD;
+ ++CurrentD;
+ }
+
+ if (FirstD == FirstPath->Decls.end() &&
+ CurrentD == Path->Decls.end())
+ continue;
+ }
+
+ R.setAmbiguousBaseSubobjectTypes(Paths);
+ return true;
+ }
+
+ if (SubobjectNumber != PathElement.SubobjectNumber) {
+ // We have a different subobject of the same type.
+
+ // C++ [class.member.lookup]p5:
+ // A static member, a nested type or an enumerator defined in
+ // a base class T can unambiguously be found even if an object
+ // has more than one base class subobject of type T.
+ if (HasOnlyStaticMembers(Path->Decls.begin(), Path->Decls.end()))
+ continue;
+
+ // We have found a nonstatic member name in multiple, distinct
+ // subobjects. Name lookup is ambiguous.
+ R.setAmbiguousBaseSubobjects(Paths);
+ return true;
+ }
+ }
+
+ // Lookup in a base class succeeded; return these results.
+
+ for (auto *D : Paths.front().Decls) {
+ AccessSpecifier AS = CXXRecordDecl::MergeAccess(SubobjectAccess,
+ D->getAccess());
+ R.addDecl(D, AS);
+ }
+ R.resolveKind();
+ return true;
+}
+
+/// \brief Performs qualified name lookup or special type of lookup for
+/// "__super::" scope specifier.
+///
+/// This routine is a convenience overload meant to be called from contexts
+/// that need to perform a qualified name lookup with an optional C++ scope
+/// specifier that might require special kind of lookup.
+///
+/// \param R captures both the lookup criteria and any lookup results found.
+///
+/// \param LookupCtx The context in which qualified name lookup will
+/// search.
+///
+/// \param SS An optional C++ scope-specifier.
+///
+/// \returns true if lookup succeeded, false if it failed.
+bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
+ CXXScopeSpec &SS) {
+ auto *NNS = SS.getScopeRep();
+ if (NNS && NNS->getKind() == NestedNameSpecifier::Super)
+ return LookupInSuper(R, NNS->getAsRecordDecl());
+ else
+
+ return LookupQualifiedName(R, LookupCtx);
+}
+
+/// @brief Performs name lookup for a name that was parsed in the
+/// source code, and may contain a C++ scope specifier.
+///
+/// This routine is a convenience routine meant to be called from
+/// contexts that receive a name and an optional C++ scope specifier
+/// (e.g., "N::M::x"). It will then perform either qualified or
+/// unqualified name lookup (with LookupQualifiedName or LookupName,
+/// respectively) on the given name and return those results. It will
+/// perform a special type of lookup for "__super::" scope specifier.
+///
+/// @param S The scope from which unqualified name lookup will
+/// begin.
+///
+/// @param SS An optional C++ scope-specifier, e.g., "::N::M".
+///
+/// @param EnteringContext Indicates whether we are going to enter the
+/// context of the scope-specifier SS (if present).
+///
+/// @returns True if any decls were found (but possibly ambiguous)
+bool Sema::LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
+ bool AllowBuiltinCreation, bool EnteringContext) {
+ if (SS && SS->isInvalid()) {
+ // When the scope specifier is invalid, don't even look for
+ // anything.
+ return false;
+ }
+
+ if (SS && SS->isSet()) {
+ NestedNameSpecifier *NNS = SS->getScopeRep();
+ if (NNS->getKind() == NestedNameSpecifier::Super)
+ return LookupInSuper(R, NNS->getAsRecordDecl());
+
+ if (DeclContext *DC = computeDeclContext(*SS, EnteringContext)) {
+ // We have resolved the scope specifier to a particular declaration
+ // contex, and will perform name lookup in that context.
+ if (!DC->isDependentContext() && RequireCompleteDeclContext(*SS, DC))
+ return false;
+
+ R.setContextRange(SS->getRange());
+ return LookupQualifiedName(R, DC);
+ }
+
+ // We could not resolve the scope specified to a specific declaration
+ // context, which means that SS refers to an unknown specialization.
+ // Name lookup can't find anything in this case.
+ R.setNotFoundInCurrentInstantiation();
+ R.setContextRange(SS->getRange());
+ return false;
+ }
+
+ // Perform unqualified name lookup starting in the given scope.
+ return LookupName(R, S, AllowBuiltinCreation);
+}
+
+/// \brief Perform qualified name lookup into all base classes of the given
+/// class.
+///
+/// \param R captures both the lookup criteria and any lookup results found.
+///
+/// \param Class The context in which qualified name lookup will
+/// search. Name lookup will search in all base classes merging the results.
+///
+/// @returns True if any decls were found (but possibly ambiguous)
+bool Sema::LookupInSuper(LookupResult &R, CXXRecordDecl *Class) {
+ // The access-control rules we use here are essentially the rules for
+ // doing a lookup in Class that just magically skipped the direct
+ // members of Class itself. That is, the naming class is Class, and the
+ // access includes the access of the base.
+ for (const auto &BaseSpec : Class->bases()) {
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(
+ BaseSpec.getType()->castAs<RecordType>()->getDecl());
+ LookupResult Result(*this, R.getLookupNameInfo(), R.getLookupKind());
+ Result.setBaseObjectType(Context.getRecordType(Class));
+ LookupQualifiedName(Result, RD);
+
+ // Copy the lookup results into the target, merging the base's access into
+ // the path access.
+ for (auto I = Result.begin(), E = Result.end(); I != E; ++I) {
+ R.addDecl(I.getDecl(),
+ CXXRecordDecl::MergeAccess(BaseSpec.getAccessSpecifier(),
+ I.getAccess()));
+ }
+
+ Result.suppressDiagnostics();
+ }
+
+ R.resolveKind();
+ R.setNamingClass(Class);
+
+ return !R.empty();
+}
+
+/// \brief Produce a diagnostic describing the ambiguity that resulted
+/// from name lookup.
+///
+/// \param Result The result of the ambiguous lookup to be diagnosed.
+void Sema::DiagnoseAmbiguousLookup(LookupResult &Result) {
+ assert(Result.isAmbiguous() && "Lookup result must be ambiguous");
+
+ DeclarationName Name = Result.getLookupName();
+ SourceLocation NameLoc = Result.getNameLoc();
+ SourceRange LookupRange = Result.getContextRange();
+
+ switch (Result.getAmbiguityKind()) {
+ case LookupResult::AmbiguousBaseSubobjects: {
+ CXXBasePaths *Paths = Result.getBasePaths();
+ QualType SubobjectType = Paths->front().back().Base->getType();
+ Diag(NameLoc, diag::err_ambiguous_member_multiple_subobjects)
+ << Name << SubobjectType << getAmbiguousPathsDisplayString(*Paths)
+ << LookupRange;
+
+ DeclContext::lookup_iterator Found = Paths->front().Decls.begin();
+ while (isa<CXXMethodDecl>(*Found) &&
+ cast<CXXMethodDecl>(*Found)->isStatic())
+ ++Found;
+
+ Diag((*Found)->getLocation(), diag::note_ambiguous_member_found);
+ break;
+ }
+
+ case LookupResult::AmbiguousBaseSubobjectTypes: {
+ Diag(NameLoc, diag::err_ambiguous_member_multiple_subobject_types)
+ << Name << LookupRange;
+
+ CXXBasePaths *Paths = Result.getBasePaths();
+ std::set<Decl *> DeclsPrinted;
+ for (CXXBasePaths::paths_iterator Path = Paths->begin(),
+ PathEnd = Paths->end();
+ Path != PathEnd; ++Path) {
+ Decl *D = Path->Decls.front();
+ if (DeclsPrinted.insert(D).second)
+ Diag(D->getLocation(), diag::note_ambiguous_member_found);
+ }
+ break;
+ }
+
+ case LookupResult::AmbiguousTagHiding: {
+ Diag(NameLoc, diag::err_ambiguous_tag_hiding) << Name << LookupRange;
+
+ llvm::SmallPtrSet<NamedDecl*, 8> TagDecls;
+
+ for (auto *D : Result)
+ if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
+ TagDecls.insert(TD);
+ Diag(TD->getLocation(), diag::note_hidden_tag);
+ }
+
+ for (auto *D : Result)
+ if (!isa<TagDecl>(D))
+ Diag(D->getLocation(), diag::note_hiding_object);
+
+ // For recovery purposes, go ahead and implement the hiding.
+ LookupResult::Filter F = Result.makeFilter();
+ while (F.hasNext()) {
+ if (TagDecls.count(F.next()))
+ F.erase();
+ }
+ F.done();
+ break;
+ }
+
+ case LookupResult::AmbiguousReference: {
+ Diag(NameLoc, diag::err_ambiguous_reference) << Name << LookupRange;
+
+ for (auto *D : Result)
+ Diag(D->getLocation(), diag::note_ambiguous_candidate) << D;
+ break;
+ }
+ }
+}
+
+namespace {
+ struct AssociatedLookup {
+ AssociatedLookup(Sema &S, SourceLocation InstantiationLoc,
+ Sema::AssociatedNamespaceSet &Namespaces,
+ Sema::AssociatedClassSet &Classes)
+ : S(S), Namespaces(Namespaces), Classes(Classes),
+ InstantiationLoc(InstantiationLoc) {
+ }
+
+ Sema &S;
+ Sema::AssociatedNamespaceSet &Namespaces;
+ Sema::AssociatedClassSet &Classes;
+ SourceLocation InstantiationLoc;
+ };
+} // end anonymous namespace
+
+static void
+addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType T);
+
+static void CollectEnclosingNamespace(Sema::AssociatedNamespaceSet &Namespaces,
+ DeclContext *Ctx) {
+ // Add the associated namespace for this class.
+
+ // We don't use DeclContext::getEnclosingNamespaceContext() as this may
+ // be a locally scoped record.
+
+ // We skip out of inline namespaces. The innermost non-inline namespace
+ // contains all names of all its nested inline namespaces anyway, so we can
+ // replace the entire inline namespace tree with its root.
+ while (Ctx->isRecord() || Ctx->isTransparentContext() ||
+ Ctx->isInlineNamespace())
+ Ctx = Ctx->getParent();
+
+ if (Ctx->isFileContext())
+ Namespaces.insert(Ctx->getPrimaryContext());
+}
+
+// \brief Add the associated classes and namespaces for argument-dependent
+// lookup that involves a template argument (C++ [basic.lookup.koenig]p2).
+static void
+addAssociatedClassesAndNamespaces(AssociatedLookup &Result,
+ const TemplateArgument &Arg) {
+ // C++ [basic.lookup.koenig]p2, last bullet:
+ // -- [...] ;
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ break;
+
+ case TemplateArgument::Type:
+ // [...] the namespaces and classes associated with the types of the
+ // template arguments provided for template type parameters (excluding
+ // template template parameters)
+ addAssociatedClassesAndNamespaces(Result, Arg.getAsType());
+ break;
+
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion: {
+ // [...] the namespaces in which any template template arguments are
+ // defined; and the classes in which any member templates used as
+ // template template arguments are defined.
+ TemplateName Template = Arg.getAsTemplateOrTemplatePattern();
+ if (ClassTemplateDecl *ClassTemplate
+ = dyn_cast<ClassTemplateDecl>(Template.getAsTemplateDecl())) {
+ DeclContext *Ctx = ClassTemplate->getDeclContext();
+ if (CXXRecordDecl *EnclosingClass = dyn_cast<CXXRecordDecl>(Ctx))
+ Result.Classes.insert(EnclosingClass);
+ // Add the associated namespace for this class.
+ CollectEnclosingNamespace(Result.Namespaces, Ctx);
+ }
+ break;
+ }
+
+ case TemplateArgument::Declaration:
+ case TemplateArgument::Integral:
+ case TemplateArgument::Expression:
+ case TemplateArgument::NullPtr:
+ // [Note: non-type template arguments do not contribute to the set of
+ // associated namespaces. ]
+ break;
+
+ case TemplateArgument::Pack:
+ for (const auto &P : Arg.pack_elements())
+ addAssociatedClassesAndNamespaces(Result, P);
+ break;
+ }
+}
+
+// \brief Add the associated classes and namespaces for
+// argument-dependent lookup with an argument of class type
+// (C++ [basic.lookup.koenig]p2).
+static void
+addAssociatedClassesAndNamespaces(AssociatedLookup &Result,
+ CXXRecordDecl *Class) {
+
+ // Just silently ignore anything whose name is __va_list_tag.
+ if (Class->getDeclName() == Result.S.VAListTagName)
+ return;
+
+ // C++ [basic.lookup.koenig]p2:
+ // [...]
+ // -- If T is a class type (including unions), its associated
+ // classes are: the class itself; the class of which it is a
+ // member, if any; and its direct and indirect base
+ // classes. Its associated namespaces are the namespaces in
+ // which its associated classes are defined.
+
+ // Add the class of which it is a member, if any.
+ DeclContext *Ctx = Class->getDeclContext();
+ if (CXXRecordDecl *EnclosingClass = dyn_cast<CXXRecordDecl>(Ctx))
+ Result.Classes.insert(EnclosingClass);
+ // Add the associated namespace for this class.
+ CollectEnclosingNamespace(Result.Namespaces, Ctx);
+
+ // Add the class itself. If we've already seen this class, we don't
+ // need to visit base classes.
+ //
+ // FIXME: That's not correct, we may have added this class only because it
+ // was the enclosing class of another class, and in that case we won't have
+ // added its base classes yet.
+ if (!Result.Classes.insert(Class).second)
+ return;
+
+ // -- If T is a template-id, its associated namespaces and classes are
+ // the namespace in which the template is defined; for member
+ // templates, the member template's class; the namespaces and classes
+ // associated with the types of the template arguments provided for
+ // template type parameters (excluding template template parameters); the
+ // namespaces in which any template template arguments are defined; and
+ // the classes in which any member templates used as template template
+ // arguments are defined. [Note: non-type template arguments do not
+ // contribute to the set of associated namespaces. ]
+ if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(Class)) {
+ DeclContext *Ctx = Spec->getSpecializedTemplate()->getDeclContext();
+ if (CXXRecordDecl *EnclosingClass = dyn_cast<CXXRecordDecl>(Ctx))
+ Result.Classes.insert(EnclosingClass);
+ // Add the associated namespace for this class.
+ CollectEnclosingNamespace(Result.Namespaces, Ctx);
+
+ const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
+ for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
+ addAssociatedClassesAndNamespaces(Result, TemplateArgs[I]);
+ }
+
+ // Only recurse into base classes for complete types.
+ if (!Result.S.isCompleteType(Result.InstantiationLoc,
+ Result.S.Context.getRecordType(Class)))
+ return;
+
+ // Add direct and indirect base classes along with their associated
+ // namespaces.
+ SmallVector<CXXRecordDecl *, 32> Bases;
+ Bases.push_back(Class);
+ while (!Bases.empty()) {
+ // Pop this class off the stack.
+ Class = Bases.pop_back_val();
+
+ // Visit the base classes.
+ for (const auto &Base : Class->bases()) {
+ const RecordType *BaseType = Base.getType()->getAs<RecordType>();
+ // In dependent contexts, we do ADL twice, and the first time around,
+ // the base type might be a dependent TemplateSpecializationType, or a
+ // TemplateTypeParmType. If that happens, simply ignore it.
+ // FIXME: If we want to support export, we probably need to add the
+ // namespace of the template in a TemplateSpecializationType, or even
+ // the classes and namespaces of known non-dependent arguments.
+ if (!BaseType)
+ continue;
+ CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(BaseType->getDecl());
+ if (Result.Classes.insert(BaseDecl).second) {
+ // Find the associated namespace for this base class.
+ DeclContext *BaseCtx = BaseDecl->getDeclContext();
+ CollectEnclosingNamespace(Result.Namespaces, BaseCtx);
+
+ // Make sure we visit the bases of this base class.
+ if (BaseDecl->bases_begin() != BaseDecl->bases_end())
+ Bases.push_back(BaseDecl);
+ }
+ }
+ }
+}
+
+// \brief Add the associated classes and namespaces for
+// argument-dependent lookup with an argument of type T
+// (C++ [basic.lookup.koenig]p2).
+static void
+addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType Ty) {
+ // C++ [basic.lookup.koenig]p2:
+ //
+ // For each argument type T in the function call, there is a set
+ // of zero or more associated namespaces and a set of zero or more
+ // associated classes to be considered. The sets of namespaces and
+ // classes is determined entirely by the types of the function
+ // arguments (and the namespace of any template template
+ // argument). Typedef names and using-declarations used to specify
+ // the types do not contribute to this set. The sets of namespaces
+ // and classes are determined in the following way:
+
+ SmallVector<const Type *, 16> Queue;
+ const Type *T = Ty->getCanonicalTypeInternal().getTypePtr();
+
+ while (true) {
+ switch (T->getTypeClass()) {
+
+#define TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define ABSTRACT_TYPE(Class, Base)
+#include "clang/AST/TypeNodes.def"
+ // T is canonical. We can also ignore dependent types because
+ // we don't need to do ADL at the definition point, but if we
+ // wanted to implement template export (or if we find some other
+ // use for associated classes and namespaces...) this would be
+ // wrong.
+ break;
+
+ // -- If T is a pointer to U or an array of U, its associated
+ // namespaces and classes are those associated with U.
+ case Type::Pointer:
+ T = cast<PointerType>(T)->getPointeeType().getTypePtr();
+ continue;
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ T = cast<ArrayType>(T)->getElementType().getTypePtr();
+ continue;
+
+ // -- If T is a fundamental type, its associated sets of
+ // namespaces and classes are both empty.
+ case Type::Builtin:
+ break;
+
+ // -- If T is a class type (including unions), its associated
+ // classes are: the class itself; the class of which it is a
+ // member, if any; and its direct and indirect base
+ // classes. Its associated namespaces are the namespaces in
+ // which its associated classes are defined.
+ case Type::Record: {
+ CXXRecordDecl *Class =
+ cast<CXXRecordDecl>(cast<RecordType>(T)->getDecl());
+ addAssociatedClassesAndNamespaces(Result, Class);
+ break;
+ }
+
+ // -- If T is an enumeration type, its associated namespace is
+ // the namespace in which it is defined. If it is class
+ // member, its associated class is the member's class; else
+ // it has no associated class.
+ case Type::Enum: {
+ EnumDecl *Enum = cast<EnumType>(T)->getDecl();
+
+ DeclContext *Ctx = Enum->getDeclContext();
+ if (CXXRecordDecl *EnclosingClass = dyn_cast<CXXRecordDecl>(Ctx))
+ Result.Classes.insert(EnclosingClass);
+
+ // Add the associated namespace for this class.
+ CollectEnclosingNamespace(Result.Namespaces, Ctx);
+
+ break;
+ }
+
+ // -- If T is a function type, its associated namespaces and
+ // classes are those associated with the function parameter
+ // types and those associated with the return type.
+ case Type::FunctionProto: {
+ const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
+ for (const auto &Arg : Proto->param_types())
+ Queue.push_back(Arg.getTypePtr());
+ // fallthrough
+ }
+ case Type::FunctionNoProto: {
+ const FunctionType *FnType = cast<FunctionType>(T);
+ T = FnType->getReturnType().getTypePtr();
+ continue;
+ }
+
+ // -- If T is a pointer to a member function of a class X, its
+ // associated namespaces and classes are those associated
+ // with the function parameter types and return type,
+ // together with those associated with X.
+ //
+ // -- If T is a pointer to a data member of class X, its
+ // associated namespaces and classes are those associated
+ // with the member type together with those associated with
+ // X.
+ case Type::MemberPointer: {
+ const MemberPointerType *MemberPtr = cast<MemberPointerType>(T);
+
+ // Queue up the class type into which this points.
+ Queue.push_back(MemberPtr->getClass());
+
+ // And directly continue with the pointee type.
+ T = MemberPtr->getPointeeType().getTypePtr();
+ continue;
+ }
+
+ // As an extension, treat this like a normal pointer.
+ case Type::BlockPointer:
+ T = cast<BlockPointerType>(T)->getPointeeType().getTypePtr();
+ continue;
+
+ // References aren't covered by the standard, but that's such an
+ // obvious defect that we cover them anyway.
+ case Type::LValueReference:
+ case Type::RValueReference:
+ T = cast<ReferenceType>(T)->getPointeeType().getTypePtr();
+ continue;
+
+ // These are fundamental types.
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::Complex:
+ break;
+
+ // Non-deduced auto types only get here for error cases.
+ case Type::Auto:
+ break;
+
+ // If T is an Objective-C object or interface type, or a pointer to an
+ // object or interface type, the associated namespace is the global
+ // namespace.
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::ObjCObjectPointer:
+ Result.Namespaces.insert(Result.S.Context.getTranslationUnitDecl());
+ break;
+
+ // Atomic types are just wrappers; use the associations of the
+ // contained type.
+ case Type::Atomic:
+ T = cast<AtomicType>(T)->getValueType().getTypePtr();
+ continue;
+ }
+
+ if (Queue.empty())
+ break;
+ T = Queue.pop_back_val();
+ }
+}
+
+/// \brief Find the associated classes and namespaces for
+/// argument-dependent lookup for a call with the given set of
+/// arguments.
+///
+/// This routine computes the sets of associated classes and associated
+/// namespaces searched by argument-dependent lookup
+/// (C++ [basic.lookup.argdep]) for a given set of arguments.
+void Sema::FindAssociatedClassesAndNamespaces(
+ SourceLocation InstantiationLoc, ArrayRef<Expr *> Args,
+ AssociatedNamespaceSet &AssociatedNamespaces,
+ AssociatedClassSet &AssociatedClasses) {
+ AssociatedNamespaces.clear();
+ AssociatedClasses.clear();
+
+ AssociatedLookup Result(*this, InstantiationLoc,
+ AssociatedNamespaces, AssociatedClasses);
+
+ // C++ [basic.lookup.koenig]p2:
+ // For each argument type T in the function call, there is a set
+ // of zero or more associated namespaces and a set of zero or more
+ // associated classes to be considered. The sets of namespaces and
+ // classes is determined entirely by the types of the function
+ // arguments (and the namespace of any template template
+ // argument).
+ for (unsigned ArgIdx = 0; ArgIdx != Args.size(); ++ArgIdx) {
+ Expr *Arg = Args[ArgIdx];
+
+ if (Arg->getType() != Context.OverloadTy) {
+ addAssociatedClassesAndNamespaces(Result, Arg->getType());
+ continue;
+ }
+
+ // [...] In addition, if the argument is the name or address of a
+ // set of overloaded functions and/or function templates, its
+ // associated classes and namespaces are the union of those
+ // associated with each of the members of the set: the namespace
+ // in which the function or function template is defined and the
+ // classes and namespaces associated with its (non-dependent)
+ // parameter types and return type.
+ Arg = Arg->IgnoreParens();
+ if (UnaryOperator *unaryOp = dyn_cast<UnaryOperator>(Arg))
+ if (unaryOp->getOpcode() == UO_AddrOf)
+ Arg = unaryOp->getSubExpr();
+
+ UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(Arg);
+ if (!ULE) continue;
+
+ for (const auto *D : ULE->decls()) {
+ // Look through any using declarations to find the underlying function.
+ const FunctionDecl *FDecl = D->getUnderlyingDecl()->getAsFunction();
+
+ // Add the classes and namespaces associated with the parameter
+ // types and return type of this function.
+ addAssociatedClassesAndNamespaces(Result, FDecl->getType());
+ }
+ }
+}
+
+NamedDecl *Sema::LookupSingleName(Scope *S, DeclarationName Name,
+ SourceLocation Loc,
+ LookupNameKind NameKind,
+ RedeclarationKind Redecl) {
+ LookupResult R(*this, Name, Loc, NameKind, Redecl);
+ LookupName(R, S);
+ return R.getAsSingle<NamedDecl>();
+}
+
+/// \brief Find the protocol with the given name, if any.
+ObjCProtocolDecl *Sema::LookupProtocol(IdentifierInfo *II,
+ SourceLocation IdLoc,
+ RedeclarationKind Redecl) {
+ Decl *D = LookupSingleName(TUScope, II, IdLoc,
+ LookupObjCProtocolName, Redecl);
+ return cast_or_null<ObjCProtocolDecl>(D);
+}
+
+void Sema::LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
+ QualType T1, QualType T2,
+ UnresolvedSetImpl &Functions) {
+ // C++ [over.match.oper]p3:
+ // -- The set of non-member candidates is the result of the
+ // unqualified lookup of operator@ in the context of the
+ // expression according to the usual rules for name lookup in
+ // unqualified function calls (3.4.2) except that all member
+ // functions are ignored.
+ DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(Op);
+ LookupResult Operators(*this, OpName, SourceLocation(), LookupOperatorName);
+ LookupName(Operators, S);
+
+ assert(!Operators.isAmbiguous() && "Operator lookup cannot be ambiguous");
+ Functions.append(Operators.begin(), Operators.end());
+}
+
+Sema::SpecialMemberOverloadResult *Sema::LookupSpecialMember(CXXRecordDecl *RD,
+ CXXSpecialMember SM,
+ bool ConstArg,
+ bool VolatileArg,
+ bool RValueThis,
+ bool ConstThis,
+ bool VolatileThis) {
+ assert(CanDeclareSpecialMemberFunction(RD) &&
+ "doing special member lookup into record that isn't fully complete");
+ RD = RD->getDefinition();
+ if (RValueThis || ConstThis || VolatileThis)
+ assert((SM == CXXCopyAssignment || SM == CXXMoveAssignment) &&
+ "constructors and destructors always have unqualified lvalue this");
+ if (ConstArg || VolatileArg)
+ assert((SM != CXXDefaultConstructor && SM != CXXDestructor) &&
+ "parameter-less special members can't have qualified arguments");
+
+ llvm::FoldingSetNodeID ID;
+ ID.AddPointer(RD);
+ ID.AddInteger(SM);
+ ID.AddInteger(ConstArg);
+ ID.AddInteger(VolatileArg);
+ ID.AddInteger(RValueThis);
+ ID.AddInteger(ConstThis);
+ ID.AddInteger(VolatileThis);
+
+ void *InsertPoint;
+ SpecialMemberOverloadResult *Result =
+ SpecialMemberCache.FindNodeOrInsertPos(ID, InsertPoint);
+
+ // This was already cached
+ if (Result)
+ return Result;
+
+ Result = BumpAlloc.Allocate<SpecialMemberOverloadResult>();
+ Result = new (Result) SpecialMemberOverloadResult(ID);
+ SpecialMemberCache.InsertNode(Result, InsertPoint);
+
+ if (SM == CXXDestructor) {
+ if (RD->needsImplicitDestructor())
+ DeclareImplicitDestructor(RD);
+ CXXDestructorDecl *DD = RD->getDestructor();
+ assert(DD && "record without a destructor");
+ Result->setMethod(DD);
+ Result->setKind(DD->isDeleted() ?
+ SpecialMemberOverloadResult::NoMemberOrDeleted :
+ SpecialMemberOverloadResult::Success);
+ return Result;
+ }
+
+ // Prepare for overload resolution. Here we construct a synthetic argument
+ // if necessary and make sure that implicit functions are declared.
+ CanQualType CanTy = Context.getCanonicalType(Context.getTagDeclType(RD));
+ DeclarationName Name;
+ Expr *Arg = nullptr;
+ unsigned NumArgs;
+
+ QualType ArgType = CanTy;
+ ExprValueKind VK = VK_LValue;
+
+ if (SM == CXXDefaultConstructor) {
+ Name = Context.DeclarationNames.getCXXConstructorName(CanTy);
+ NumArgs = 0;
+ if (RD->needsImplicitDefaultConstructor())
+ DeclareImplicitDefaultConstructor(RD);
+ } else {
+ if (SM == CXXCopyConstructor || SM == CXXMoveConstructor) {
+ Name = Context.DeclarationNames.getCXXConstructorName(CanTy);
+ if (RD->needsImplicitCopyConstructor())
+ DeclareImplicitCopyConstructor(RD);
+ if (getLangOpts().CPlusPlus11 && RD->needsImplicitMoveConstructor())
+ DeclareImplicitMoveConstructor(RD);
+ } else {
+ Name = Context.DeclarationNames.getCXXOperatorName(OO_Equal);
+ if (RD->needsImplicitCopyAssignment())
+ DeclareImplicitCopyAssignment(RD);
+ if (getLangOpts().CPlusPlus11 && RD->needsImplicitMoveAssignment())
+ DeclareImplicitMoveAssignment(RD);
+ }
+
+ if (ConstArg)
+ ArgType.addConst();
+ if (VolatileArg)
+ ArgType.addVolatile();
+
+ // This isn't /really/ specified by the standard, but it's implied
+ // we should be working from an RValue in the case of move to ensure
+ // that we prefer to bind to rvalue references, and an LValue in the
+ // case of copy to ensure we don't bind to rvalue references.
+ // Possibly an XValue is actually correct in the case of move, but
+ // there is no semantic difference for class types in this restricted
+ // case.
+ if (SM == CXXCopyConstructor || SM == CXXCopyAssignment)
+ VK = VK_LValue;
+ else
+ VK = VK_RValue;
+ }
+
+ OpaqueValueExpr FakeArg(SourceLocation(), ArgType, VK);
+
+ if (SM != CXXDefaultConstructor) {
+ NumArgs = 1;
+ Arg = &FakeArg;
+ }
+
+ // Create the object argument
+ QualType ThisTy = CanTy;
+ if (ConstThis)
+ ThisTy.addConst();
+ if (VolatileThis)
+ ThisTy.addVolatile();
+ Expr::Classification Classification =
+ OpaqueValueExpr(SourceLocation(), ThisTy,
+ RValueThis ? VK_RValue : VK_LValue).Classify(Context);
+
+ // Now we perform lookup on the name we computed earlier and do overload
+ // resolution. Lookup is only performed directly into the class since there
+ // will always be a (possibly implicit) declaration to shadow any others.
+ OverloadCandidateSet OCS(RD->getLocation(), OverloadCandidateSet::CSK_Normal);
+ DeclContext::lookup_result R = RD->lookup(Name);
+
+ if (R.empty()) {
+ // We might have no default constructor because we have a lambda's closure
+ // type, rather than because there's some other declared constructor.
+ // Every class has a copy/move constructor, copy/move assignment, and
+ // destructor.
+ assert(SM == CXXDefaultConstructor &&
+ "lookup for a constructor or assignment operator was empty");
+ Result->setMethod(nullptr);
+ Result->setKind(SpecialMemberOverloadResult::NoMemberOrDeleted);
+ return Result;
+ }
+
+ // Copy the candidates as our processing of them may load new declarations
+ // from an external source and invalidate lookup_result.
+ SmallVector<NamedDecl *, 8> Candidates(R.begin(), R.end());
+
+ for (auto *Cand : Candidates) {
+ if (Cand->isInvalidDecl())
+ continue;
+
+ if (UsingShadowDecl *U = dyn_cast<UsingShadowDecl>(Cand)) {
+ // FIXME: [namespace.udecl]p15 says that we should only consider a
+ // using declaration here if it does not match a declaration in the
+ // derived class. We do not implement this correctly in other cases
+ // either.
+ Cand = U->getTargetDecl();
+
+ if (Cand->isInvalidDecl())
+ continue;
+ }
+
+ if (CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(Cand)) {
+ if (SM == CXXCopyAssignment || SM == CXXMoveAssignment)
+ AddMethodCandidate(M, DeclAccessPair::make(M, AS_public), RD, ThisTy,
+ Classification, llvm::makeArrayRef(&Arg, NumArgs),
+ OCS, true);
+ else
+ AddOverloadCandidate(M, DeclAccessPair::make(M, AS_public),
+ llvm::makeArrayRef(&Arg, NumArgs), OCS, true);
+ } else if (FunctionTemplateDecl *Tmpl =
+ dyn_cast<FunctionTemplateDecl>(Cand)) {
+ if (SM == CXXCopyAssignment || SM == CXXMoveAssignment)
+ AddMethodTemplateCandidate(Tmpl, DeclAccessPair::make(Tmpl, AS_public),
+ RD, nullptr, ThisTy, Classification,
+ llvm::makeArrayRef(&Arg, NumArgs),
+ OCS, true);
+ else
+ AddTemplateOverloadCandidate(Tmpl, DeclAccessPair::make(Tmpl, AS_public),
+ nullptr, llvm::makeArrayRef(&Arg, NumArgs),
+ OCS, true);
+ } else {
+ assert(isa<UsingDecl>(Cand) && "illegal Kind of operator = Decl");
+ }
+ }
+
+ OverloadCandidateSet::iterator Best;
+ switch (OCS.BestViableFunction(*this, SourceLocation(), Best)) {
+ case OR_Success:
+ Result->setMethod(cast<CXXMethodDecl>(Best->Function));
+ Result->setKind(SpecialMemberOverloadResult::Success);
+ break;
+
+ case OR_Deleted:
+ Result->setMethod(cast<CXXMethodDecl>(Best->Function));
+ Result->setKind(SpecialMemberOverloadResult::NoMemberOrDeleted);
+ break;
+
+ case OR_Ambiguous:
+ Result->setMethod(nullptr);
+ Result->setKind(SpecialMemberOverloadResult::Ambiguous);
+ break;
+
+ case OR_No_Viable_Function:
+ Result->setMethod(nullptr);
+ Result->setKind(SpecialMemberOverloadResult::NoMemberOrDeleted);
+ break;
+ }
+
+ return Result;
+}
+
+/// \brief Look up the default constructor for the given class.
+CXXConstructorDecl *Sema::LookupDefaultConstructor(CXXRecordDecl *Class) {
+ SpecialMemberOverloadResult *Result =
+ LookupSpecialMember(Class, CXXDefaultConstructor, false, false, false,
+ false, false);
+
+ return cast_or_null<CXXConstructorDecl>(Result->getMethod());
+}
+
+/// \brief Look up the copying constructor for the given class.
+CXXConstructorDecl *Sema::LookupCopyingConstructor(CXXRecordDecl *Class,
+ unsigned Quals) {
+ assert(!(Quals & ~(Qualifiers::Const | Qualifiers::Volatile)) &&
+ "non-const, non-volatile qualifiers for copy ctor arg");
+ SpecialMemberOverloadResult *Result =
+ LookupSpecialMember(Class, CXXCopyConstructor, Quals & Qualifiers::Const,
+ Quals & Qualifiers::Volatile, false, false, false);
+
+ return cast_or_null<CXXConstructorDecl>(Result->getMethod());
+}
+
+/// \brief Look up the moving constructor for the given class.
+CXXConstructorDecl *Sema::LookupMovingConstructor(CXXRecordDecl *Class,
+ unsigned Quals) {
+ SpecialMemberOverloadResult *Result =
+ LookupSpecialMember(Class, CXXMoveConstructor, Quals & Qualifiers::Const,
+ Quals & Qualifiers::Volatile, false, false, false);
+
+ return cast_or_null<CXXConstructorDecl>(Result->getMethod());
+}
+
+/// \brief Look up the constructors for the given class.
+DeclContext::lookup_result Sema::LookupConstructors(CXXRecordDecl *Class) {
+ // If the implicit constructors have not yet been declared, do so now.
+ if (CanDeclareSpecialMemberFunction(Class)) {
+ if (Class->needsImplicitDefaultConstructor())
+ DeclareImplicitDefaultConstructor(Class);
+ if (Class->needsImplicitCopyConstructor())
+ DeclareImplicitCopyConstructor(Class);
+ if (getLangOpts().CPlusPlus11 && Class->needsImplicitMoveConstructor())
+ DeclareImplicitMoveConstructor(Class);
+ }
+
+ CanQualType T = Context.getCanonicalType(Context.getTypeDeclType(Class));
+ DeclarationName Name = Context.DeclarationNames.getCXXConstructorName(T);
+ return Class->lookup(Name);
+}
+
+/// \brief Look up the copying assignment operator for the given class.
+CXXMethodDecl *Sema::LookupCopyingAssignment(CXXRecordDecl *Class,
+ unsigned Quals, bool RValueThis,
+ unsigned ThisQuals) {
+ assert(!(Quals & ~(Qualifiers::Const | Qualifiers::Volatile)) &&
+ "non-const, non-volatile qualifiers for copy assignment arg");
+ assert(!(ThisQuals & ~(Qualifiers::Const | Qualifiers::Volatile)) &&
+ "non-const, non-volatile qualifiers for copy assignment this");
+ SpecialMemberOverloadResult *Result =
+ LookupSpecialMember(Class, CXXCopyAssignment, Quals & Qualifiers::Const,
+ Quals & Qualifiers::Volatile, RValueThis,
+ ThisQuals & Qualifiers::Const,
+ ThisQuals & Qualifiers::Volatile);
+
+ return Result->getMethod();
+}
+
+/// \brief Look up the moving assignment operator for the given class.
+CXXMethodDecl *Sema::LookupMovingAssignment(CXXRecordDecl *Class,
+ unsigned Quals,
+ bool RValueThis,
+ unsigned ThisQuals) {
+ assert(!(ThisQuals & ~(Qualifiers::Const | Qualifiers::Volatile)) &&
+ "non-const, non-volatile qualifiers for copy assignment this");
+ SpecialMemberOverloadResult *Result =
+ LookupSpecialMember(Class, CXXMoveAssignment, Quals & Qualifiers::Const,
+ Quals & Qualifiers::Volatile, RValueThis,
+ ThisQuals & Qualifiers::Const,
+ ThisQuals & Qualifiers::Volatile);
+
+ return Result->getMethod();
+}
+
+/// \brief Look for the destructor of the given class.
+///
+/// During semantic analysis, this routine should be used in lieu of
+/// CXXRecordDecl::getDestructor().
+///
+/// \returns The destructor for this class.
+CXXDestructorDecl *Sema::LookupDestructor(CXXRecordDecl *Class) {
+ return cast<CXXDestructorDecl>(LookupSpecialMember(Class, CXXDestructor,
+ false, false, false,
+ false, false)->getMethod());
+}
+
+/// LookupLiteralOperator - Determine which literal operator should be used for
+/// a user-defined literal, per C++11 [lex.ext].
+///
+/// Normal overload resolution is not used to select which literal operator to
+/// call for a user-defined literal. Look up the provided literal operator name,
+/// and filter the results to the appropriate set for the given argument types.
+Sema::LiteralOperatorLookupResult
+Sema::LookupLiteralOperator(Scope *S, LookupResult &R,
+ ArrayRef<QualType> ArgTys,
+ bool AllowRaw, bool AllowTemplate,
+ bool AllowStringTemplate) {
+ LookupName(R, S);
+ assert(R.getResultKind() != LookupResult::Ambiguous &&
+ "literal operator lookup can't be ambiguous");
+
+ // Filter the lookup results appropriately.
+ LookupResult::Filter F = R.makeFilter();
+
+ bool FoundRaw = false;
+ bool FoundTemplate = false;
+ bool FoundStringTemplate = false;
+ bool FoundExactMatch = false;
+
+ while (F.hasNext()) {
+ Decl *D = F.next();
+ if (UsingShadowDecl *USD = dyn_cast<UsingShadowDecl>(D))
+ D = USD->getTargetDecl();
+
+ // If the declaration we found is invalid, skip it.
+ if (D->isInvalidDecl()) {
+ F.erase();
+ continue;
+ }
+
+ bool IsRaw = false;
+ bool IsTemplate = false;
+ bool IsStringTemplate = false;
+ bool IsExactMatch = false;
+
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->getNumParams() == 1 &&
+ FD->getParamDecl(0)->getType()->getAs<PointerType>())
+ IsRaw = true;
+ else if (FD->getNumParams() == ArgTys.size()) {
+ IsExactMatch = true;
+ for (unsigned ArgIdx = 0; ArgIdx != ArgTys.size(); ++ArgIdx) {
+ QualType ParamTy = FD->getParamDecl(ArgIdx)->getType();
+ if (!Context.hasSameUnqualifiedType(ArgTys[ArgIdx], ParamTy)) {
+ IsExactMatch = false;
+ break;
+ }
+ }
+ }
+ }
+ if (FunctionTemplateDecl *FD = dyn_cast<FunctionTemplateDecl>(D)) {
+ TemplateParameterList *Params = FD->getTemplateParameters();
+ if (Params->size() == 1)
+ IsTemplate = true;
+ else
+ IsStringTemplate = true;
+ }
+
+ if (IsExactMatch) {
+ FoundExactMatch = true;
+ AllowRaw = false;
+ AllowTemplate = false;
+ AllowStringTemplate = false;
+ if (FoundRaw || FoundTemplate || FoundStringTemplate) {
+ // Go through again and remove the raw and template decls we've
+ // already found.
+ F.restart();
+ FoundRaw = FoundTemplate = FoundStringTemplate = false;
+ }
+ } else if (AllowRaw && IsRaw) {
+ FoundRaw = true;
+ } else if (AllowTemplate && IsTemplate) {
+ FoundTemplate = true;
+ } else if (AllowStringTemplate && IsStringTemplate) {
+ FoundStringTemplate = true;
+ } else {
+ F.erase();
+ }
+ }
+
+ F.done();
+
+ // C++11 [lex.ext]p3, p4: If S contains a literal operator with a matching
+ // parameter type, that is used in preference to a raw literal operator
+ // or literal operator template.
+ if (FoundExactMatch)
+ return LOLR_Cooked;
+
+ // C++11 [lex.ext]p3, p4: S shall contain a raw literal operator or a literal
+ // operator template, but not both.
+ if (FoundRaw && FoundTemplate) {
+ Diag(R.getNameLoc(), diag::err_ovl_ambiguous_call) << R.getLookupName();
+ for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I)
+ NoteOverloadCandidate((*I)->getUnderlyingDecl()->getAsFunction());
+ return LOLR_Error;
+ }
+
+ if (FoundRaw)
+ return LOLR_Raw;
+
+ if (FoundTemplate)
+ return LOLR_Template;
+
+ if (FoundStringTemplate)
+ return LOLR_StringTemplate;
+
+ // Didn't find anything we could use.
+ Diag(R.getNameLoc(), diag::err_ovl_no_viable_literal_operator)
+ << R.getLookupName() << (int)ArgTys.size() << ArgTys[0]
+ << (ArgTys.size() == 2 ? ArgTys[1] : QualType()) << AllowRaw
+ << (AllowTemplate || AllowStringTemplate);
+ return LOLR_Error;
+}
+
+void ADLResult::insert(NamedDecl *New) {
+ NamedDecl *&Old = Decls[cast<NamedDecl>(New->getCanonicalDecl())];
+
+ // If we haven't yet seen a decl for this key, or the last decl
+ // was exactly this one, we're done.
+ if (Old == nullptr || Old == New) {
+ Old = New;
+ return;
+ }
+
+ // Otherwise, decide which is a more recent redeclaration.
+ FunctionDecl *OldFD = Old->getAsFunction();
+ FunctionDecl *NewFD = New->getAsFunction();
+
+ FunctionDecl *Cursor = NewFD;
+ while (true) {
+ Cursor = Cursor->getPreviousDecl();
+
+ // If we got to the end without finding OldFD, OldFD is the newer
+ // declaration; leave things as they are.
+ if (!Cursor) return;
+
+ // If we do find OldFD, then NewFD is newer.
+ if (Cursor == OldFD) break;
+
+ // Otherwise, keep looking.
+ }
+
+ Old = New;
+}
+
+void Sema::ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
+ ArrayRef<Expr *> Args, ADLResult &Result) {
+ // Find all of the associated namespaces and classes based on the
+ // arguments we have.
+ AssociatedNamespaceSet AssociatedNamespaces;
+ AssociatedClassSet AssociatedClasses;
+ FindAssociatedClassesAndNamespaces(Loc, Args,
+ AssociatedNamespaces,
+ AssociatedClasses);
+
+ // C++ [basic.lookup.argdep]p3:
+ // Let X be the lookup set produced by unqualified lookup (3.4.1)
+ // and let Y be the lookup set produced by argument dependent
+ // lookup (defined as follows). If X contains [...] then Y is
+ // empty. Otherwise Y is the set of declarations found in the
+ // namespaces associated with the argument types as described
+ // below. The set of declarations found by the lookup of the name
+ // is the union of X and Y.
+ //
+ // Here, we compute Y and add its members to the overloaded
+ // candidate set.
+ for (auto *NS : AssociatedNamespaces) {
+ // When considering an associated namespace, the lookup is the
+ // same as the lookup performed when the associated namespace is
+ // used as a qualifier (3.4.3.2) except that:
+ //
+ // -- Any using-directives in the associated namespace are
+ // ignored.
+ //
+ // -- Any namespace-scope friend functions declared in
+ // associated classes are visible within their respective
+ // namespaces even if they are not visible during an ordinary
+ // lookup (11.4).
+ DeclContext::lookup_result R = NS->lookup(Name);
+ for (auto *D : R) {
+ // If the only declaration here is an ordinary friend, consider
+ // it only if it was declared in an associated classes.
+ if ((D->getIdentifierNamespace() & Decl::IDNS_Ordinary) == 0) {
+ // If it's neither ordinarily visible nor a friend, we can't find it.
+ if ((D->getIdentifierNamespace() & Decl::IDNS_OrdinaryFriend) == 0)
+ continue;
+
+ bool DeclaredInAssociatedClass = false;
+ for (Decl *DI = D; DI; DI = DI->getPreviousDecl()) {
+ DeclContext *LexDC = DI->getLexicalDeclContext();
+ if (isa<CXXRecordDecl>(LexDC) &&
+ AssociatedClasses.count(cast<CXXRecordDecl>(LexDC)) &&
+ isVisible(cast<NamedDecl>(DI))) {
+ DeclaredInAssociatedClass = true;
+ break;
+ }
+ }
+ if (!DeclaredInAssociatedClass)
+ continue;
+ }
+
+ if (isa<UsingShadowDecl>(D))
+ D = cast<UsingShadowDecl>(D)->getTargetDecl();
+
+ if (!isa<FunctionDecl>(D) && !isa<FunctionTemplateDecl>(D))
+ continue;
+
+ if (!isVisible(D) && !(D = findAcceptableDecl(*this, D)))
+ continue;
+
+ Result.insert(D);
+ }
+ }
+}
+
+//----------------------------------------------------------------------------
+// Search for all visible declarations.
+//----------------------------------------------------------------------------
+VisibleDeclConsumer::~VisibleDeclConsumer() { }
+
+bool VisibleDeclConsumer::includeHiddenDecls() const { return false; }
+
+namespace {
+
+class ShadowContextRAII;
+
+class VisibleDeclsRecord {
+public:
+ /// \brief An entry in the shadow map, which is optimized to store a
+ /// single declaration (the common case) but can also store a list
+ /// of declarations.
+ typedef llvm::TinyPtrVector<NamedDecl*> ShadowMapEntry;
+
+private:
+ /// \brief A mapping from declaration names to the declarations that have
+ /// this name within a particular scope.
+ typedef llvm::DenseMap<DeclarationName, ShadowMapEntry> ShadowMap;
+
+ /// \brief A list of shadow maps, which is used to model name hiding.
+ std::list<ShadowMap> ShadowMaps;
+
+ /// \brief The declaration contexts we have already visited.
+ llvm::SmallPtrSet<DeclContext *, 8> VisitedContexts;
+
+ friend class ShadowContextRAII;
+
+public:
+ /// \brief Determine whether we have already visited this context
+ /// (and, if not, note that we are going to visit that context now).
+ bool visitedContext(DeclContext *Ctx) {
+ return !VisitedContexts.insert(Ctx).second;
+ }
+
+ bool alreadyVisitedContext(DeclContext *Ctx) {
+ return VisitedContexts.count(Ctx);
+ }
+
+ /// \brief Determine whether the given declaration is hidden in the
+ /// current scope.
+ ///
+ /// \returns the declaration that hides the given declaration, or
+ /// NULL if no such declaration exists.
+ NamedDecl *checkHidden(NamedDecl *ND);
+
+ /// \brief Add a declaration to the current shadow map.
+ void add(NamedDecl *ND) {
+ ShadowMaps.back()[ND->getDeclName()].push_back(ND);
+ }
+};
+
+/// \brief RAII object that records when we've entered a shadow context.
+class ShadowContextRAII {
+ VisibleDeclsRecord &Visible;
+
+ typedef VisibleDeclsRecord::ShadowMap ShadowMap;
+
+public:
+ ShadowContextRAII(VisibleDeclsRecord &Visible) : Visible(Visible) {
+ Visible.ShadowMaps.emplace_back();
+ }
+
+ ~ShadowContextRAII() {
+ Visible.ShadowMaps.pop_back();
+ }
+};
+
+} // end anonymous namespace
+
+NamedDecl *VisibleDeclsRecord::checkHidden(NamedDecl *ND) {
+ unsigned IDNS = ND->getIdentifierNamespace();
+ std::list<ShadowMap>::reverse_iterator SM = ShadowMaps.rbegin();
+ for (std::list<ShadowMap>::reverse_iterator SMEnd = ShadowMaps.rend();
+ SM != SMEnd; ++SM) {
+ ShadowMap::iterator Pos = SM->find(ND->getDeclName());
+ if (Pos == SM->end())
+ continue;
+
+ for (auto *D : Pos->second) {
+ // A tag declaration does not hide a non-tag declaration.
+ if (D->hasTagIdentifierNamespace() &&
+ (IDNS & (Decl::IDNS_Member | Decl::IDNS_Ordinary |
+ Decl::IDNS_ObjCProtocol)))
+ continue;
+
+ // Protocols are in distinct namespaces from everything else.
+ if (((D->getIdentifierNamespace() & Decl::IDNS_ObjCProtocol)
+ || (IDNS & Decl::IDNS_ObjCProtocol)) &&
+ D->getIdentifierNamespace() != IDNS)
+ continue;
+
+ // Functions and function templates in the same scope overload
+ // rather than hide. FIXME: Look for hiding based on function
+ // signatures!
+ if (D->getUnderlyingDecl()->isFunctionOrFunctionTemplate() &&
+ ND->getUnderlyingDecl()->isFunctionOrFunctionTemplate() &&
+ SM == ShadowMaps.rbegin())
+ continue;
+
+ // We've found a declaration that hides this one.
+ return D;
+ }
+ }
+
+ return nullptr;
+}
+
+static void LookupVisibleDecls(DeclContext *Ctx, LookupResult &Result,
+ bool QualifiedNameLookup,
+ bool InBaseClass,
+ VisibleDeclConsumer &Consumer,
+ VisibleDeclsRecord &Visited) {
+ if (!Ctx)
+ return;
+
+ // Make sure we don't visit the same context twice.
+ if (Visited.visitedContext(Ctx->getPrimaryContext()))
+ return;
+
+ // Outside C++, lookup results for the TU live on identifiers.
+ if (isa<TranslationUnitDecl>(Ctx) &&
+ !Result.getSema().getLangOpts().CPlusPlus) {
+ auto &S = Result.getSema();
+ auto &Idents = S.Context.Idents;
+
+ // Ensure all external identifiers are in the identifier table.
+ if (IdentifierInfoLookup *External = Idents.getExternalIdentifierLookup()) {
+ std::unique_ptr<IdentifierIterator> Iter(External->getIdentifiers());
+ for (StringRef Name = Iter->Next(); !Name.empty(); Name = Iter->Next())
+ Idents.get(Name);
+ }
+
+ // Walk all lookup results in the TU for each identifier.
+ for (const auto &Ident : Idents) {
+ for (auto I = S.IdResolver.begin(Ident.getValue()),
+ E = S.IdResolver.end();
+ I != E; ++I) {
+ if (S.IdResolver.isDeclInScope(*I, Ctx)) {
+ if (NamedDecl *ND = Result.getAcceptableDecl(*I)) {
+ Consumer.FoundDecl(ND, Visited.checkHidden(ND), Ctx, InBaseClass);
+ Visited.add(ND);
+ }
+ }
+ }
+ }
+
+ return;
+ }
+
+ if (CXXRecordDecl *Class = dyn_cast<CXXRecordDecl>(Ctx))
+ Result.getSema().ForceDeclarationOfImplicitMembers(Class);
+
+ // Enumerate all of the results in this context.
+ for (DeclContextLookupResult R : Ctx->lookups()) {
+ for (auto *D : R) {
+ if (auto *ND = Result.getAcceptableDecl(D)) {
+ Consumer.FoundDecl(ND, Visited.checkHidden(ND), Ctx, InBaseClass);
+ Visited.add(ND);
+ }
+ }
+ }
+
+ // Traverse using directives for qualified name lookup.
+ if (QualifiedNameLookup) {
+ ShadowContextRAII Shadow(Visited);
+ for (auto I : Ctx->using_directives()) {
+ LookupVisibleDecls(I->getNominatedNamespace(), Result,
+ QualifiedNameLookup, InBaseClass, Consumer, Visited);
+ }
+ }
+
+ // Traverse the contexts of inherited C++ classes.
+ if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Ctx)) {
+ if (!Record->hasDefinition())
+ return;
+
+ for (const auto &B : Record->bases()) {
+ QualType BaseType = B.getType();
+
+ // Don't look into dependent bases, because name lookup can't look
+ // there anyway.
+ if (BaseType->isDependentType())
+ continue;
+
+ const RecordType *Record = BaseType->getAs<RecordType>();
+ if (!Record)
+ continue;
+
+ // FIXME: It would be nice to be able to determine whether referencing
+ // a particular member would be ambiguous. For example, given
+ //
+ // struct A { int member; };
+ // struct B { int member; };
+ // struct C : A, B { };
+ //
+ // void f(C *c) { c->### }
+ //
+ // accessing 'member' would result in an ambiguity. However, we
+ // could be smart enough to qualify the member with the base
+ // class, e.g.,
+ //
+ // c->B::member
+ //
+ // or
+ //
+ // c->A::member
+
+ // Find results in this base class (and its bases).
+ ShadowContextRAII Shadow(Visited);
+ LookupVisibleDecls(Record->getDecl(), Result, QualifiedNameLookup,
+ true, Consumer, Visited);
+ }
+ }
+
+ // Traverse the contexts of Objective-C classes.
+ if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(Ctx)) {
+ // Traverse categories.
+ for (auto *Cat : IFace->visible_categories()) {
+ ShadowContextRAII Shadow(Visited);
+ LookupVisibleDecls(Cat, Result, QualifiedNameLookup, false,
+ Consumer, Visited);
+ }
+
+ // Traverse protocols.
+ for (auto *I : IFace->all_referenced_protocols()) {
+ ShadowContextRAII Shadow(Visited);
+ LookupVisibleDecls(I, Result, QualifiedNameLookup, false, Consumer,
+ Visited);
+ }
+
+ // Traverse the superclass.
+ if (IFace->getSuperClass()) {
+ ShadowContextRAII Shadow(Visited);
+ LookupVisibleDecls(IFace->getSuperClass(), Result, QualifiedNameLookup,
+ true, Consumer, Visited);
+ }
+
+ // If there is an implementation, traverse it. We do this to find
+ // synthesized ivars.
+ if (IFace->getImplementation()) {
+ ShadowContextRAII Shadow(Visited);
+ LookupVisibleDecls(IFace->getImplementation(), Result,
+ QualifiedNameLookup, InBaseClass, Consumer, Visited);
+ }
+ } else if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Ctx)) {
+ for (auto *I : Protocol->protocols()) {
+ ShadowContextRAII Shadow(Visited);
+ LookupVisibleDecls(I, Result, QualifiedNameLookup, false, Consumer,
+ Visited);
+ }
+ } else if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(Ctx)) {
+ for (auto *I : Category->protocols()) {
+ ShadowContextRAII Shadow(Visited);
+ LookupVisibleDecls(I, Result, QualifiedNameLookup, false, Consumer,
+ Visited);
+ }
+
+ // If there is an implementation, traverse it.
+ if (Category->getImplementation()) {
+ ShadowContextRAII Shadow(Visited);
+ LookupVisibleDecls(Category->getImplementation(), Result,
+ QualifiedNameLookup, true, Consumer, Visited);
+ }
+ }
+}
+
+static void LookupVisibleDecls(Scope *S, LookupResult &Result,
+ UnqualUsingDirectiveSet &UDirs,
+ VisibleDeclConsumer &Consumer,
+ VisibleDeclsRecord &Visited) {
+ if (!S)
+ return;
+
+ if (!S->getEntity() ||
+ (!S->getParent() &&
+ !Visited.alreadyVisitedContext(S->getEntity())) ||
+ (S->getEntity())->isFunctionOrMethod()) {
+ FindLocalExternScope FindLocals(Result);
+ // Walk through the declarations in this Scope.
+ for (auto *D : S->decls()) {
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ if ((ND = Result.getAcceptableDecl(ND))) {
+ Consumer.FoundDecl(ND, Visited.checkHidden(ND), nullptr, false);
+ Visited.add(ND);
+ }
+ }
+ }
+
+ // FIXME: C++ [temp.local]p8
+ DeclContext *Entity = nullptr;
+ if (S->getEntity()) {
+ // Look into this scope's declaration context, along with any of its
+ // parent lookup contexts (e.g., enclosing classes), up to the point
+ // where we hit the context stored in the next outer scope.
+ Entity = S->getEntity();
+ DeclContext *OuterCtx = findOuterContext(S).first; // FIXME
+
+ for (DeclContext *Ctx = Entity; Ctx && !Ctx->Equals(OuterCtx);
+ Ctx = Ctx->getLookupParent()) {
+ if (ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(Ctx)) {
+ if (Method->isInstanceMethod()) {
+ // For instance methods, look for ivars in the method's interface.
+ LookupResult IvarResult(Result.getSema(), Result.getLookupName(),
+ Result.getNameLoc(), Sema::LookupMemberName);
+ if (ObjCInterfaceDecl *IFace = Method->getClassInterface()) {
+ LookupVisibleDecls(IFace, IvarResult, /*QualifiedNameLookup=*/false,
+ /*InBaseClass=*/false, Consumer, Visited);
+ }
+ }
+
+ // We've already performed all of the name lookup that we need
+ // to for Objective-C methods; the next context will be the
+ // outer scope.
+ break;
+ }
+
+ if (Ctx->isFunctionOrMethod())
+ continue;
+
+ LookupVisibleDecls(Ctx, Result, /*QualifiedNameLookup=*/false,
+ /*InBaseClass=*/false, Consumer, Visited);
+ }
+ } else if (!S->getParent()) {
+ // Look into the translation unit scope. We walk through the translation
+ // unit's declaration context, because the Scope itself won't have all of
+ // the declarations if we loaded a precompiled header.
+ // FIXME: We would like the translation unit's Scope object to point to the
+ // translation unit, so we don't need this special "if" branch. However,
+ // doing so would force the normal C++ name-lookup code to look into the
+ // translation unit decl when the IdentifierInfo chains would suffice.
+ // Once we fix that problem (which is part of a more general "don't look
+ // in DeclContexts unless we have to" optimization), we can eliminate this.
+ Entity = Result.getSema().Context.getTranslationUnitDecl();
+ LookupVisibleDecls(Entity, Result, /*QualifiedNameLookup=*/false,
+ /*InBaseClass=*/false, Consumer, Visited);
+ }
+
+ if (Entity) {
+ // Lookup visible declarations in any namespaces found by using
+ // directives.
+ for (const UnqualUsingEntry &UUE : UDirs.getNamespacesFor(Entity))
+ LookupVisibleDecls(const_cast<DeclContext *>(UUE.getNominatedNamespace()),
+ Result, /*QualifiedNameLookup=*/false,
+ /*InBaseClass=*/false, Consumer, Visited);
+ }
+
+ // Lookup names in the parent scope.
+ ShadowContextRAII Shadow(Visited);
+ LookupVisibleDecls(S->getParent(), Result, UDirs, Consumer, Visited);
+}
+
+void Sema::LookupVisibleDecls(Scope *S, LookupNameKind Kind,
+ VisibleDeclConsumer &Consumer,
+ bool IncludeGlobalScope) {
+ // Determine the set of using directives available during
+ // unqualified name lookup.
+ Scope *Initial = S;
+ UnqualUsingDirectiveSet UDirs;
+ if (getLangOpts().CPlusPlus) {
+ // Find the first namespace or translation-unit scope.
+ while (S && !isNamespaceOrTranslationUnitScope(S))
+ S = S->getParent();
+
+ UDirs.visitScopeChain(Initial, S);
+ }
+ UDirs.done();
+
+ // Look for visible declarations.
+ LookupResult Result(*this, DeclarationName(), SourceLocation(), Kind);
+ Result.setAllowHidden(Consumer.includeHiddenDecls());
+ VisibleDeclsRecord Visited;
+ if (!IncludeGlobalScope)
+ Visited.visitedContext(Context.getTranslationUnitDecl());
+ ShadowContextRAII Shadow(Visited);
+ ::LookupVisibleDecls(Initial, Result, UDirs, Consumer, Visited);
+}
+
+void Sema::LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
+ VisibleDeclConsumer &Consumer,
+ bool IncludeGlobalScope) {
+ LookupResult Result(*this, DeclarationName(), SourceLocation(), Kind);
+ Result.setAllowHidden(Consumer.includeHiddenDecls());
+ VisibleDeclsRecord Visited;
+ if (!IncludeGlobalScope)
+ Visited.visitedContext(Context.getTranslationUnitDecl());
+ ShadowContextRAII Shadow(Visited);
+ ::LookupVisibleDecls(Ctx, Result, /*QualifiedNameLookup=*/true,
+ /*InBaseClass=*/false, Consumer, Visited);
+}
+
+/// LookupOrCreateLabel - Do a name lookup of a label with the specified name.
+/// If GnuLabelLoc is a valid source location, then this is a definition
+/// of an __label__ label name, otherwise it is a normal label definition
+/// or use.
+LabelDecl *Sema::LookupOrCreateLabel(IdentifierInfo *II, SourceLocation Loc,
+ SourceLocation GnuLabelLoc) {
+ // Do a lookup to see if we have a label with this name already.
+ NamedDecl *Res = nullptr;
+
+ if (GnuLabelLoc.isValid()) {
+ // Local label definitions always shadow existing labels.
+ Res = LabelDecl::Create(Context, CurContext, Loc, II, GnuLabelLoc);
+ Scope *S = CurScope;
+ PushOnScopeChains(Res, S, true);
+ return cast<LabelDecl>(Res);
+ }
+
+ // Not a GNU local label.
+ Res = LookupSingleName(CurScope, II, Loc, LookupLabel, NotForRedeclaration);
+ // If we found a label, check to see if it is in the same context as us.
+ // When in a Block, we don't want to reuse a label in an enclosing function.
+ if (Res && Res->getDeclContext() != CurContext)
+ Res = nullptr;
+ if (!Res) {
+ // If not forward referenced or defined already, create the backing decl.
+ Res = LabelDecl::Create(Context, CurContext, Loc, II);
+ Scope *S = CurScope->getFnParent();
+ assert(S && "Not in a function?");
+ PushOnScopeChains(Res, S, true);
+ }
+ return cast<LabelDecl>(Res);
+}
+
+//===----------------------------------------------------------------------===//
+// Typo correction
+//===----------------------------------------------------------------------===//
+
+static bool isCandidateViable(CorrectionCandidateCallback &CCC,
+ TypoCorrection &Candidate) {
+ Candidate.setCallbackDistance(CCC.RankCandidate(Candidate));
+ return Candidate.getEditDistance(false) != TypoCorrection::InvalidDistance;
+}
+
+static void LookupPotentialTypoResult(Sema &SemaRef,
+ LookupResult &Res,
+ IdentifierInfo *Name,
+ Scope *S, CXXScopeSpec *SS,
+ DeclContext *MemberContext,
+ bool EnteringContext,
+ bool isObjCIvarLookup,
+ bool FindHidden);
+
+/// \brief Check whether the declarations found for a typo correction are
+/// visible, and if none of them are, convert the correction to an 'import
+/// a module' correction.
+static void checkCorrectionVisibility(Sema &SemaRef, TypoCorrection &TC) {
+ if (TC.begin() == TC.end())
+ return;
+
+ TypoCorrection::decl_iterator DI = TC.begin(), DE = TC.end();
+
+ for (/**/; DI != DE; ++DI)
+ if (!LookupResult::isVisible(SemaRef, *DI))
+ break;
+ // Nothing to do if all decls are visible.
+ if (DI == DE)
+ return;
+
+ llvm::SmallVector<NamedDecl*, 4> NewDecls(TC.begin(), DI);
+ bool AnyVisibleDecls = !NewDecls.empty();
+
+ for (/**/; DI != DE; ++DI) {
+ NamedDecl *VisibleDecl = *DI;
+ if (!LookupResult::isVisible(SemaRef, *DI))
+ VisibleDecl = findAcceptableDecl(SemaRef, *DI);
+
+ if (VisibleDecl) {
+ if (!AnyVisibleDecls) {
+ // Found a visible decl, discard all hidden ones.
+ AnyVisibleDecls = true;
+ NewDecls.clear();
+ }
+ NewDecls.push_back(VisibleDecl);
+ } else if (!AnyVisibleDecls && !(*DI)->isModulePrivate())
+ NewDecls.push_back(*DI);
+ }
+
+ if (NewDecls.empty())
+ TC = TypoCorrection();
+ else {
+ TC.setCorrectionDecls(NewDecls);
+ TC.setRequiresImport(!AnyVisibleDecls);
+ }
+}
+
+// Fill the supplied vector with the IdentifierInfo pointers for each piece of
+// the given NestedNameSpecifier (i.e. given a NestedNameSpecifier "foo::bar::",
+// fill the vector with the IdentifierInfo pointers for "foo" and "bar").
+static void getNestedNameSpecifierIdentifiers(
+ NestedNameSpecifier *NNS,
+ SmallVectorImpl<const IdentifierInfo*> &Identifiers) {
+ if (NestedNameSpecifier *Prefix = NNS->getPrefix())
+ getNestedNameSpecifierIdentifiers(Prefix, Identifiers);
+ else
+ Identifiers.clear();
+
+ const IdentifierInfo *II = nullptr;
+
+ switch (NNS->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ II = NNS->getAsIdentifier();
+ break;
+
+ case NestedNameSpecifier::Namespace:
+ if (NNS->getAsNamespace()->isAnonymousNamespace())
+ return;
+ II = NNS->getAsNamespace()->getIdentifier();
+ break;
+
+ case NestedNameSpecifier::NamespaceAlias:
+ II = NNS->getAsNamespaceAlias()->getIdentifier();
+ break;
+
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ case NestedNameSpecifier::TypeSpec:
+ II = QualType(NNS->getAsType(), 0).getBaseTypeIdentifier();
+ break;
+
+ case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Super:
+ return;
+ }
+
+ if (II)
+ Identifiers.push_back(II);
+}
+
+void TypoCorrectionConsumer::FoundDecl(NamedDecl *ND, NamedDecl *Hiding,
+ DeclContext *Ctx, bool InBaseClass) {
+ // Don't consider hidden names for typo correction.
+ if (Hiding)
+ return;
+
+ // Only consider entities with identifiers for names, ignoring
+ // special names (constructors, overloaded operators, selectors,
+ // etc.).
+ IdentifierInfo *Name = ND->getIdentifier();
+ if (!Name)
+ return;
+
+ // Only consider visible declarations and declarations from modules with
+ // names that exactly match.
+ if (!LookupResult::isVisible(SemaRef, ND) && Name != Typo &&
+ !findAcceptableDecl(SemaRef, ND))
+ return;
+
+ FoundName(Name->getName());
+}
+
+void TypoCorrectionConsumer::FoundName(StringRef Name) {
+ // Compute the edit distance between the typo and the name of this
+ // entity, and add the identifier to the list of results.
+ addName(Name, nullptr);
+}
+
+void TypoCorrectionConsumer::addKeywordResult(StringRef Keyword) {
+ // Compute the edit distance between the typo and this keyword,
+ // and add the keyword to the list of results.
+ addName(Keyword, nullptr, nullptr, true);
+}
+
+void TypoCorrectionConsumer::addName(StringRef Name, NamedDecl *ND,
+ NestedNameSpecifier *NNS, bool isKeyword) {
+ // Use a simple length-based heuristic to determine the minimum possible
+ // edit distance. If the minimum isn't good enough, bail out early.
+ StringRef TypoStr = Typo->getName();
+ unsigned MinED = abs((int)Name.size() - (int)TypoStr.size());
+ if (MinED && TypoStr.size() / MinED < 3)
+ return;
+
+ // Compute an upper bound on the allowable edit distance, so that the
+ // edit-distance algorithm can short-circuit.
+ unsigned UpperBound = (TypoStr.size() + 2) / 3 + 1;
+ unsigned ED = TypoStr.edit_distance(Name, true, UpperBound);
+ if (ED >= UpperBound) return;
+
+ TypoCorrection TC(&SemaRef.Context.Idents.get(Name), ND, NNS, ED);
+ if (isKeyword) TC.makeKeyword();
+ TC.setCorrectionRange(nullptr, Result.getLookupNameInfo());
+ addCorrection(TC);
+}
+
+static const unsigned MaxTypoDistanceResultSets = 5;
+
+void TypoCorrectionConsumer::addCorrection(TypoCorrection Correction) {
+ StringRef TypoStr = Typo->getName();
+ StringRef Name = Correction.getCorrectionAsIdentifierInfo()->getName();
+
+ // For very short typos, ignore potential corrections that have a different
+ // base identifier from the typo or which have a normalized edit distance
+ // longer than the typo itself.
+ if (TypoStr.size() < 3 &&
+ (Name != TypoStr || Correction.getEditDistance(true) > TypoStr.size()))
+ return;
+
+ // If the correction is resolved but is not viable, ignore it.
+ if (Correction.isResolved()) {
+ checkCorrectionVisibility(SemaRef, Correction);
+ if (!Correction || !isCandidateViable(*CorrectionValidator, Correction))
+ return;
+ }
+
+ TypoResultList &CList =
+ CorrectionResults[Correction.getEditDistance(false)][Name];
+
+ if (!CList.empty() && !CList.back().isResolved())
+ CList.pop_back();
+ if (NamedDecl *NewND = Correction.getCorrectionDecl()) {
+ std::string CorrectionStr = Correction.getAsString(SemaRef.getLangOpts());
+ for (TypoResultList::iterator RI = CList.begin(), RIEnd = CList.end();
+ RI != RIEnd; ++RI) {
+ // If the Correction refers to a decl already in the result list,
+ // replace the existing result if the string representation of Correction
+ // comes before the current result alphabetically, then stop as there is
+ // nothing more to be done to add Correction to the candidate set.
+ if (RI->getCorrectionDecl() == NewND) {
+ if (CorrectionStr < RI->getAsString(SemaRef.getLangOpts()))
+ *RI = Correction;
+ return;
+ }
+ }
+ }
+ if (CList.empty() || Correction.isResolved())
+ CList.push_back(Correction);
+
+ while (CorrectionResults.size() > MaxTypoDistanceResultSets)
+ CorrectionResults.erase(std::prev(CorrectionResults.end()));
+}
+
+void TypoCorrectionConsumer::addNamespaces(
+ const llvm::MapVector<NamespaceDecl *, bool> &KnownNamespaces) {
+ SearchNamespaces = true;
+
+ for (auto KNPair : KnownNamespaces)
+ Namespaces.addNameSpecifier(KNPair.first);
+
+ bool SSIsTemplate = false;
+ if (NestedNameSpecifier *NNS =
+ (SS && SS->isValid()) ? SS->getScopeRep() : nullptr) {
+ if (const Type *T = NNS->getAsType())
+ SSIsTemplate = T->getTypeClass() == Type::TemplateSpecialization;
+ }
+ // Do not transform this into an iterator-based loop. The loop body can
+ // trigger the creation of further types (through lazy deserialization) and
+ // invalide iterators into this list.
+ auto &Types = SemaRef.getASTContext().getTypes();
+ for (unsigned I = 0; I != Types.size(); ++I) {
+ const auto *TI = Types[I];
+ if (CXXRecordDecl *CD = TI->getAsCXXRecordDecl()) {
+ CD = CD->getCanonicalDecl();
+ if (!CD->isDependentType() && !CD->isAnonymousStructOrUnion() &&
+ !CD->isUnion() && CD->getIdentifier() &&
+ (SSIsTemplate || !isa<ClassTemplateSpecializationDecl>(CD)) &&
+ (CD->isBeingDefined() || CD->isCompleteDefinition()))
+ Namespaces.addNameSpecifier(CD);
+ }
+ }
+}
+
+const TypoCorrection &TypoCorrectionConsumer::getNextCorrection() {
+ if (++CurrentTCIndex < ValidatedCorrections.size())
+ return ValidatedCorrections[CurrentTCIndex];
+
+ CurrentTCIndex = ValidatedCorrections.size();
+ while (!CorrectionResults.empty()) {
+ auto DI = CorrectionResults.begin();
+ if (DI->second.empty()) {
+ CorrectionResults.erase(DI);
+ continue;
+ }
+
+ auto RI = DI->second.begin();
+ if (RI->second.empty()) {
+ DI->second.erase(RI);
+ performQualifiedLookups();
+ continue;
+ }
+
+ TypoCorrection TC = RI->second.pop_back_val();
+ if (TC.isResolved() || TC.requiresImport() || resolveCorrection(TC)) {
+ ValidatedCorrections.push_back(TC);
+ return ValidatedCorrections[CurrentTCIndex];
+ }
+ }
+ return ValidatedCorrections[0]; // The empty correction.
+}
+
+bool TypoCorrectionConsumer::resolveCorrection(TypoCorrection &Candidate) {
+ IdentifierInfo *Name = Candidate.getCorrectionAsIdentifierInfo();
+ DeclContext *TempMemberContext = MemberContext;
+ CXXScopeSpec *TempSS = SS.get();
+retry_lookup:
+ LookupPotentialTypoResult(SemaRef, Result, Name, S, TempSS, TempMemberContext,
+ EnteringContext,
+ CorrectionValidator->IsObjCIvarLookup,
+ Name == Typo && !Candidate.WillReplaceSpecifier());
+ switch (Result.getResultKind()) {
+ case LookupResult::NotFound:
+ case LookupResult::NotFoundInCurrentInstantiation:
+ case LookupResult::FoundUnresolvedValue:
+ if (TempSS) {
+ // Immediately retry the lookup without the given CXXScopeSpec
+ TempSS = nullptr;
+ Candidate.WillReplaceSpecifier(true);
+ goto retry_lookup;
+ }
+ if (TempMemberContext) {
+ if (SS && !TempSS)
+ TempSS = SS.get();
+ TempMemberContext = nullptr;
+ goto retry_lookup;
+ }
+ if (SearchNamespaces)
+ QualifiedResults.push_back(Candidate);
+ break;
+
+ case LookupResult::Ambiguous:
+ // We don't deal with ambiguities.
+ break;
+
+ case LookupResult::Found:
+ case LookupResult::FoundOverloaded:
+ // Store all of the Decls for overloaded symbols
+ for (auto *TRD : Result)
+ Candidate.addCorrectionDecl(TRD);
+ checkCorrectionVisibility(SemaRef, Candidate);
+ if (!isCandidateViable(*CorrectionValidator, Candidate)) {
+ if (SearchNamespaces)
+ QualifiedResults.push_back(Candidate);
+ break;
+ }
+ Candidate.setCorrectionRange(SS.get(), Result.getLookupNameInfo());
+ return true;
+ }
+ return false;
+}
+
+void TypoCorrectionConsumer::performQualifiedLookups() {
+ unsigned TypoLen = Typo->getName().size();
+ for (auto QR : QualifiedResults) {
+ for (auto NSI : Namespaces) {
+ DeclContext *Ctx = NSI.DeclCtx;
+ const Type *NSType = NSI.NameSpecifier->getAsType();
+
+ // If the current NestedNameSpecifier refers to a class and the
+ // current correction candidate is the name of that class, then skip
+ // it as it is unlikely a qualified version of the class' constructor
+ // is an appropriate correction.
+ if (CXXRecordDecl *NSDecl = NSType ? NSType->getAsCXXRecordDecl() :
+ nullptr) {
+ if (NSDecl->getIdentifier() == QR.getCorrectionAsIdentifierInfo())
+ continue;
+ }
+
+ TypoCorrection TC(QR);
+ TC.ClearCorrectionDecls();
+ TC.setCorrectionSpecifier(NSI.NameSpecifier);
+ TC.setQualifierDistance(NSI.EditDistance);
+ TC.setCallbackDistance(0); // Reset the callback distance
+
+ // If the current correction candidate and namespace combination are
+ // too far away from the original typo based on the normalized edit
+ // distance, then skip performing a qualified name lookup.
+ unsigned TmpED = TC.getEditDistance(true);
+ if (QR.getCorrectionAsIdentifierInfo() != Typo && TmpED &&
+ TypoLen / TmpED < 3)
+ continue;
+
+ Result.clear();
+ Result.setLookupName(QR.getCorrectionAsIdentifierInfo());
+ if (!SemaRef.LookupQualifiedName(Result, Ctx))
+ continue;
+
+ // Any corrections added below will be validated in subsequent
+ // iterations of the main while() loop over the Consumer's contents.
+ switch (Result.getResultKind()) {
+ case LookupResult::Found:
+ case LookupResult::FoundOverloaded: {
+ if (SS && SS->isValid()) {
+ std::string NewQualified = TC.getAsString(SemaRef.getLangOpts());
+ std::string OldQualified;
+ llvm::raw_string_ostream OldOStream(OldQualified);
+ SS->getScopeRep()->print(OldOStream, SemaRef.getPrintingPolicy());
+ OldOStream << Typo->getName();
+ // If correction candidate would be an identical written qualified
+ // identifer, then the existing CXXScopeSpec probably included a
+ // typedef that didn't get accounted for properly.
+ if (OldOStream.str() == NewQualified)
+ break;
+ }
+ for (LookupResult::iterator TRD = Result.begin(), TRDEnd = Result.end();
+ TRD != TRDEnd; ++TRD) {
+ if (SemaRef.CheckMemberAccess(TC.getCorrectionRange().getBegin(),
+ NSType ? NSType->getAsCXXRecordDecl()
+ : nullptr,
+ TRD.getPair()) == Sema::AR_accessible)
+ TC.addCorrectionDecl(*TRD);
+ }
+ if (TC.isResolved()) {
+ TC.setCorrectionRange(SS.get(), Result.getLookupNameInfo());
+ addCorrection(TC);
+ }
+ break;
+ }
+ case LookupResult::NotFound:
+ case LookupResult::NotFoundInCurrentInstantiation:
+ case LookupResult::Ambiguous:
+ case LookupResult::FoundUnresolvedValue:
+ break;
+ }
+ }
+ }
+ QualifiedResults.clear();
+}
+
+TypoCorrectionConsumer::NamespaceSpecifierSet::NamespaceSpecifierSet(
+ ASTContext &Context, DeclContext *CurContext, CXXScopeSpec *CurScopeSpec)
+ : Context(Context), CurContextChain(buildContextChain(CurContext)) {
+ if (NestedNameSpecifier *NNS =
+ CurScopeSpec ? CurScopeSpec->getScopeRep() : nullptr) {
+ llvm::raw_string_ostream SpecifierOStream(CurNameSpecifier);
+ NNS->print(SpecifierOStream, Context.getPrintingPolicy());
+
+ getNestedNameSpecifierIdentifiers(NNS, CurNameSpecifierIdentifiers);
+ }
+ // Build the list of identifiers that would be used for an absolute
+ // (from the global context) NestedNameSpecifier referring to the current
+ // context.
+ for (DeclContextList::reverse_iterator C = CurContextChain.rbegin(),
+ CEnd = CurContextChain.rend();
+ C != CEnd; ++C) {
+ if (NamespaceDecl *ND = dyn_cast_or_null<NamespaceDecl>(*C))
+ CurContextIdentifiers.push_back(ND->getIdentifier());
+ }
+
+ // Add the global context as a NestedNameSpecifier
+ SpecifierInfo SI = {cast<DeclContext>(Context.getTranslationUnitDecl()),
+ NestedNameSpecifier::GlobalSpecifier(Context), 1};
+ DistanceMap[1].push_back(SI);
+}
+
+auto TypoCorrectionConsumer::NamespaceSpecifierSet::buildContextChain(
+ DeclContext *Start) -> DeclContextList {
+ assert(Start && "Building a context chain from a null context");
+ DeclContextList Chain;
+ for (DeclContext *DC = Start->getPrimaryContext(); DC != nullptr;
+ DC = DC->getLookupParent()) {
+ NamespaceDecl *ND = dyn_cast_or_null<NamespaceDecl>(DC);
+ if (!DC->isInlineNamespace() && !DC->isTransparentContext() &&
+ !(ND && ND->isAnonymousNamespace()))
+ Chain.push_back(DC->getPrimaryContext());
+ }
+ return Chain;
+}
+
+unsigned
+TypoCorrectionConsumer::NamespaceSpecifierSet::buildNestedNameSpecifier(
+ DeclContextList &DeclChain, NestedNameSpecifier *&NNS) {
+ unsigned NumSpecifiers = 0;
+ for (DeclContextList::reverse_iterator C = DeclChain.rbegin(),
+ CEnd = DeclChain.rend();
+ C != CEnd; ++C) {
+ if (NamespaceDecl *ND = dyn_cast_or_null<NamespaceDecl>(*C)) {
+ NNS = NestedNameSpecifier::Create(Context, NNS, ND);
+ ++NumSpecifiers;
+ } else if (RecordDecl *RD = dyn_cast_or_null<RecordDecl>(*C)) {
+ NNS = NestedNameSpecifier::Create(Context, NNS, RD->isTemplateDecl(),
+ RD->getTypeForDecl());
+ ++NumSpecifiers;
+ }
+ }
+ return NumSpecifiers;
+}
+
+void TypoCorrectionConsumer::NamespaceSpecifierSet::addNameSpecifier(
+ DeclContext *Ctx) {
+ NestedNameSpecifier *NNS = nullptr;
+ unsigned NumSpecifiers = 0;
+ DeclContextList NamespaceDeclChain(buildContextChain(Ctx));
+ DeclContextList FullNamespaceDeclChain(NamespaceDeclChain);
+
+ // Eliminate common elements from the two DeclContext chains.
+ for (DeclContextList::reverse_iterator C = CurContextChain.rbegin(),
+ CEnd = CurContextChain.rend();
+ C != CEnd && !NamespaceDeclChain.empty() &&
+ NamespaceDeclChain.back() == *C; ++C) {
+ NamespaceDeclChain.pop_back();
+ }
+
+ // Build the NestedNameSpecifier from what is left of the NamespaceDeclChain
+ NumSpecifiers = buildNestedNameSpecifier(NamespaceDeclChain, NNS);
+
+ // Add an explicit leading '::' specifier if needed.
+ if (NamespaceDeclChain.empty()) {
+ // Rebuild the NestedNameSpecifier as a globally-qualified specifier.
+ NNS = NestedNameSpecifier::GlobalSpecifier(Context);
+ NumSpecifiers =
+ buildNestedNameSpecifier(FullNamespaceDeclChain, NNS);
+ } else if (NamedDecl *ND =
+ dyn_cast_or_null<NamedDecl>(NamespaceDeclChain.back())) {
+ IdentifierInfo *Name = ND->getIdentifier();
+ bool SameNameSpecifier = false;
+ if (std::find(CurNameSpecifierIdentifiers.begin(),
+ CurNameSpecifierIdentifiers.end(),
+ Name) != CurNameSpecifierIdentifiers.end()) {
+ std::string NewNameSpecifier;
+ llvm::raw_string_ostream SpecifierOStream(NewNameSpecifier);
+ SmallVector<const IdentifierInfo *, 4> NewNameSpecifierIdentifiers;
+ getNestedNameSpecifierIdentifiers(NNS, NewNameSpecifierIdentifiers);
+ NNS->print(SpecifierOStream, Context.getPrintingPolicy());
+ SpecifierOStream.flush();
+ SameNameSpecifier = NewNameSpecifier == CurNameSpecifier;
+ }
+ if (SameNameSpecifier ||
+ std::find(CurContextIdentifiers.begin(), CurContextIdentifiers.end(),
+ Name) != CurContextIdentifiers.end()) {
+ // Rebuild the NestedNameSpecifier as a globally-qualified specifier.
+ NNS = NestedNameSpecifier::GlobalSpecifier(Context);
+ NumSpecifiers =
+ buildNestedNameSpecifier(FullNamespaceDeclChain, NNS);
+ }
+ }
+
+ // If the built NestedNameSpecifier would be replacing an existing
+ // NestedNameSpecifier, use the number of component identifiers that
+ // would need to be changed as the edit distance instead of the number
+ // of components in the built NestedNameSpecifier.
+ if (NNS && !CurNameSpecifierIdentifiers.empty()) {
+ SmallVector<const IdentifierInfo*, 4> NewNameSpecifierIdentifiers;
+ getNestedNameSpecifierIdentifiers(NNS, NewNameSpecifierIdentifiers);
+ NumSpecifiers = llvm::ComputeEditDistance(
+ llvm::makeArrayRef(CurNameSpecifierIdentifiers),
+ llvm::makeArrayRef(NewNameSpecifierIdentifiers));
+ }
+
+ SpecifierInfo SI = {Ctx, NNS, NumSpecifiers};
+ DistanceMap[NumSpecifiers].push_back(SI);
+}
+
+/// \brief Perform name lookup for a possible result for typo correction.
+static void LookupPotentialTypoResult(Sema &SemaRef,
+ LookupResult &Res,
+ IdentifierInfo *Name,
+ Scope *S, CXXScopeSpec *SS,
+ DeclContext *MemberContext,
+ bool EnteringContext,
+ bool isObjCIvarLookup,
+ bool FindHidden) {
+ Res.suppressDiagnostics();
+ Res.clear();
+ Res.setLookupName(Name);
+ Res.setAllowHidden(FindHidden);
+ if (MemberContext) {
+ if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(MemberContext)) {
+ if (isObjCIvarLookup) {
+ if (ObjCIvarDecl *Ivar = Class->lookupInstanceVariable(Name)) {
+ Res.addDecl(Ivar);
+ Res.resolveKind();
+ return;
+ }
+ }
+
+ if (ObjCPropertyDecl *Prop = Class->FindPropertyDeclaration(Name)) {
+ Res.addDecl(Prop);
+ Res.resolveKind();
+ return;
+ }
+ }
+
+ SemaRef.LookupQualifiedName(Res, MemberContext);
+ return;
+ }
+
+ SemaRef.LookupParsedName(Res, S, SS, /*AllowBuiltinCreation=*/false,
+ EnteringContext);
+
+ // Fake ivar lookup; this should really be part of
+ // LookupParsedName.
+ if (ObjCMethodDecl *Method = SemaRef.getCurMethodDecl()) {
+ if (Method->isInstanceMethod() && Method->getClassInterface() &&
+ (Res.empty() ||
+ (Res.isSingleResult() &&
+ Res.getFoundDecl()->isDefinedOutsideFunctionOrMethod()))) {
+ if (ObjCIvarDecl *IV
+ = Method->getClassInterface()->lookupInstanceVariable(Name)) {
+ Res.addDecl(IV);
+ Res.resolveKind();
+ }
+ }
+ }
+}
+
+/// \brief Add keywords to the consumer as possible typo corrections.
+static void AddKeywordsToConsumer(Sema &SemaRef,
+ TypoCorrectionConsumer &Consumer,
+ Scope *S, CorrectionCandidateCallback &CCC,
+ bool AfterNestedNameSpecifier) {
+ if (AfterNestedNameSpecifier) {
+ // For 'X::', we know exactly which keywords can appear next.
+ Consumer.addKeywordResult("template");
+ if (CCC.WantExpressionKeywords)
+ Consumer.addKeywordResult("operator");
+ return;
+ }
+
+ if (CCC.WantObjCSuper)
+ Consumer.addKeywordResult("super");
+
+ if (CCC.WantTypeSpecifiers) {
+ // Add type-specifier keywords to the set of results.
+ static const char *const CTypeSpecs[] = {
+ "char", "const", "double", "enum", "float", "int", "long", "short",
+ "signed", "struct", "union", "unsigned", "void", "volatile",
+ "_Complex", "_Imaginary",
+ // storage-specifiers as well
+ "extern", "inline", "static", "typedef"
+ };
+
+ const unsigned NumCTypeSpecs = llvm::array_lengthof(CTypeSpecs);
+ for (unsigned I = 0; I != NumCTypeSpecs; ++I)
+ Consumer.addKeywordResult(CTypeSpecs[I]);
+
+ if (SemaRef.getLangOpts().C99)
+ Consumer.addKeywordResult("restrict");
+ if (SemaRef.getLangOpts().Bool || SemaRef.getLangOpts().CPlusPlus)
+ Consumer.addKeywordResult("bool");
+ else if (SemaRef.getLangOpts().C99)
+ Consumer.addKeywordResult("_Bool");
+
+ if (SemaRef.getLangOpts().CPlusPlus) {
+ Consumer.addKeywordResult("class");
+ Consumer.addKeywordResult("typename");
+ Consumer.addKeywordResult("wchar_t");
+
+ if (SemaRef.getLangOpts().CPlusPlus11) {
+ Consumer.addKeywordResult("char16_t");
+ Consumer.addKeywordResult("char32_t");
+ Consumer.addKeywordResult("constexpr");
+ Consumer.addKeywordResult("decltype");
+ Consumer.addKeywordResult("thread_local");
+ }
+ }
+
+ if (SemaRef.getLangOpts().GNUMode)
+ Consumer.addKeywordResult("typeof");
+ } else if (CCC.WantFunctionLikeCasts) {
+ static const char *const CastableTypeSpecs[] = {
+ "char", "double", "float", "int", "long", "short",
+ "signed", "unsigned", "void"
+ };
+ for (auto *kw : CastableTypeSpecs)
+ Consumer.addKeywordResult(kw);
+ }
+
+ if (CCC.WantCXXNamedCasts && SemaRef.getLangOpts().CPlusPlus) {
+ Consumer.addKeywordResult("const_cast");
+ Consumer.addKeywordResult("dynamic_cast");
+ Consumer.addKeywordResult("reinterpret_cast");
+ Consumer.addKeywordResult("static_cast");
+ }
+
+ if (CCC.WantExpressionKeywords) {
+ Consumer.addKeywordResult("sizeof");
+ if (SemaRef.getLangOpts().Bool || SemaRef.getLangOpts().CPlusPlus) {
+ Consumer.addKeywordResult("false");
+ Consumer.addKeywordResult("true");
+ }
+
+ if (SemaRef.getLangOpts().CPlusPlus) {
+ static const char *const CXXExprs[] = {
+ "delete", "new", "operator", "throw", "typeid"
+ };
+ const unsigned NumCXXExprs = llvm::array_lengthof(CXXExprs);
+ for (unsigned I = 0; I != NumCXXExprs; ++I)
+ Consumer.addKeywordResult(CXXExprs[I]);
+
+ if (isa<CXXMethodDecl>(SemaRef.CurContext) &&
+ cast<CXXMethodDecl>(SemaRef.CurContext)->isInstance())
+ Consumer.addKeywordResult("this");
+
+ if (SemaRef.getLangOpts().CPlusPlus11) {
+ Consumer.addKeywordResult("alignof");
+ Consumer.addKeywordResult("nullptr");
+ }
+ }
+
+ if (SemaRef.getLangOpts().C11) {
+ // FIXME: We should not suggest _Alignof if the alignof macro
+ // is present.
+ Consumer.addKeywordResult("_Alignof");
+ }
+ }
+
+ if (CCC.WantRemainingKeywords) {
+ if (SemaRef.getCurFunctionOrMethodDecl() || SemaRef.getCurBlock()) {
+ // Statements.
+ static const char *const CStmts[] = {
+ "do", "else", "for", "goto", "if", "return", "switch", "while" };
+ const unsigned NumCStmts = llvm::array_lengthof(CStmts);
+ for (unsigned I = 0; I != NumCStmts; ++I)
+ Consumer.addKeywordResult(CStmts[I]);
+
+ if (SemaRef.getLangOpts().CPlusPlus) {
+ Consumer.addKeywordResult("catch");
+ Consumer.addKeywordResult("try");
+ }
+
+ if (S && S->getBreakParent())
+ Consumer.addKeywordResult("break");
+
+ if (S && S->getContinueParent())
+ Consumer.addKeywordResult("continue");
+
+ if (!SemaRef.getCurFunction()->SwitchStack.empty()) {
+ Consumer.addKeywordResult("case");
+ Consumer.addKeywordResult("default");
+ }
+ } else {
+ if (SemaRef.getLangOpts().CPlusPlus) {
+ Consumer.addKeywordResult("namespace");
+ Consumer.addKeywordResult("template");
+ }
+
+ if (S && S->isClassScope()) {
+ Consumer.addKeywordResult("explicit");
+ Consumer.addKeywordResult("friend");
+ Consumer.addKeywordResult("mutable");
+ Consumer.addKeywordResult("private");
+ Consumer.addKeywordResult("protected");
+ Consumer.addKeywordResult("public");
+ Consumer.addKeywordResult("virtual");
+ }
+ }
+
+ if (SemaRef.getLangOpts().CPlusPlus) {
+ Consumer.addKeywordResult("using");
+
+ if (SemaRef.getLangOpts().CPlusPlus11)
+ Consumer.addKeywordResult("static_assert");
+ }
+ }
+}
+
+std::unique_ptr<TypoCorrectionConsumer> Sema::makeTypoCorrectionConsumer(
+ const DeclarationNameInfo &TypoName, Sema::LookupNameKind LookupKind,
+ Scope *S, CXXScopeSpec *SS,
+ std::unique_ptr<CorrectionCandidateCallback> CCC,
+ DeclContext *MemberContext, bool EnteringContext,
+ const ObjCObjectPointerType *OPT, bool ErrorRecovery) {
+
+ if (Diags.hasFatalErrorOccurred() || !getLangOpts().SpellChecking ||
+ DisableTypoCorrection)
+ return nullptr;
+
+ // In Microsoft mode, don't perform typo correction in a template member
+ // function dependent context because it interferes with the "lookup into
+ // dependent bases of class templates" feature.
+ if (getLangOpts().MSVCCompat && CurContext->isDependentContext() &&
+ isa<CXXMethodDecl>(CurContext))
+ return nullptr;
+
+ // We only attempt to correct typos for identifiers.
+ IdentifierInfo *Typo = TypoName.getName().getAsIdentifierInfo();
+ if (!Typo)
+ return nullptr;
+
+ // If the scope specifier itself was invalid, don't try to correct
+ // typos.
+ if (SS && SS->isInvalid())
+ return nullptr;
+
+ // Never try to correct typos during template deduction or
+ // instantiation.
+ if (!ActiveTemplateInstantiations.empty())
+ return nullptr;
+
+ // Don't try to correct 'super'.
+ if (S && S->isInObjcMethodScope() && Typo == getSuperIdentifier())
+ return nullptr;
+
+ // Abort if typo correction already failed for this specific typo.
+ IdentifierSourceLocations::iterator locs = TypoCorrectionFailures.find(Typo);
+ if (locs != TypoCorrectionFailures.end() &&
+ locs->second.count(TypoName.getLoc()))
+ return nullptr;
+
+ // Don't try to correct the identifier "vector" when in AltiVec mode.
+ // TODO: Figure out why typo correction misbehaves in this case, fix it, and
+ // remove this workaround.
+ if ((getLangOpts().AltiVec || getLangOpts().ZVector) && Typo->isStr("vector"))
+ return nullptr;
+
+ // Provide a stop gap for files that are just seriously broken. Trying
+ // to correct all typos can turn into a HUGE performance penalty, causing
+ // some files to take minutes to get rejected by the parser.
+ unsigned Limit = getDiagnostics().getDiagnosticOptions().SpellCheckingLimit;
+ if (Limit && TyposCorrected >= Limit)
+ return nullptr;
+ ++TyposCorrected;
+
+ // If we're handling a missing symbol error, using modules, and the
+ // special search all modules option is used, look for a missing import.
+ if (ErrorRecovery && getLangOpts().Modules &&
+ getLangOpts().ModulesSearchAll) {
+ // The following has the side effect of loading the missing module.
+ getModuleLoader().lookupMissingImports(Typo->getName(),
+ TypoName.getLocStart());
+ }
+
+ CorrectionCandidateCallback &CCCRef = *CCC;
+ auto Consumer = llvm::make_unique<TypoCorrectionConsumer>(
+ *this, TypoName, LookupKind, S, SS, std::move(CCC), MemberContext,
+ EnteringContext);
+
+ // Perform name lookup to find visible, similarly-named entities.
+ bool IsUnqualifiedLookup = false;
+ DeclContext *QualifiedDC = MemberContext;
+ if (MemberContext) {
+ LookupVisibleDecls(MemberContext, LookupKind, *Consumer);
+
+ // Look in qualified interfaces.
+ if (OPT) {
+ for (auto *I : OPT->quals())
+ LookupVisibleDecls(I, LookupKind, *Consumer);
+ }
+ } else if (SS && SS->isSet()) {
+ QualifiedDC = computeDeclContext(*SS, EnteringContext);
+ if (!QualifiedDC)
+ return nullptr;
+
+ LookupVisibleDecls(QualifiedDC, LookupKind, *Consumer);
+ } else {
+ IsUnqualifiedLookup = true;
+ }
+
+ // Determine whether we are going to search in the various namespaces for
+ // corrections.
+ bool SearchNamespaces
+ = getLangOpts().CPlusPlus &&
+ (IsUnqualifiedLookup || (SS && SS->isSet()));
+
+ if (IsUnqualifiedLookup || SearchNamespaces) {
+ // For unqualified lookup, look through all of the names that we have
+ // seen in this translation unit.
+ // FIXME: Re-add the ability to skip very unlikely potential corrections.
+ for (const auto &I : Context.Idents)
+ Consumer->FoundName(I.getKey());
+
+ // Walk through identifiers in external identifier sources.
+ // FIXME: Re-add the ability to skip very unlikely potential corrections.
+ if (IdentifierInfoLookup *External
+ = Context.Idents.getExternalIdentifierLookup()) {
+ std::unique_ptr<IdentifierIterator> Iter(External->getIdentifiers());
+ do {
+ StringRef Name = Iter->Next();
+ if (Name.empty())
+ break;
+
+ Consumer->FoundName(Name);
+ } while (true);
+ }
+ }
+
+ AddKeywordsToConsumer(*this, *Consumer, S, CCCRef, SS && SS->isNotEmpty());
+
+ // Build the NestedNameSpecifiers for the KnownNamespaces, if we're going
+ // to search those namespaces.
+ if (SearchNamespaces) {
+ // Load any externally-known namespaces.
+ if (ExternalSource && !LoadedExternalKnownNamespaces) {
+ SmallVector<NamespaceDecl *, 4> ExternalKnownNamespaces;
+ LoadedExternalKnownNamespaces = true;
+ ExternalSource->ReadKnownNamespaces(ExternalKnownNamespaces);
+ for (auto *N : ExternalKnownNamespaces)
+ KnownNamespaces[N] = true;
+ }
+
+ Consumer->addNamespaces(KnownNamespaces);
+ }
+
+ return Consumer;
+}
+
+/// \brief Try to "correct" a typo in the source code by finding
+/// visible declarations whose names are similar to the name that was
+/// present in the source code.
+///
+/// \param TypoName the \c DeclarationNameInfo structure that contains
+/// the name that was present in the source code along with its location.
+///
+/// \param LookupKind the name-lookup criteria used to search for the name.
+///
+/// \param S the scope in which name lookup occurs.
+///
+/// \param SS the nested-name-specifier that precedes the name we're
+/// looking for, if present.
+///
+/// \param CCC A CorrectionCandidateCallback object that provides further
+/// validation of typo correction candidates. It also provides flags for
+/// determining the set of keywords permitted.
+///
+/// \param MemberContext if non-NULL, the context in which to look for
+/// a member access expression.
+///
+/// \param EnteringContext whether we're entering the context described by
+/// the nested-name-specifier SS.
+///
+/// \param OPT when non-NULL, the search for visible declarations will
+/// also walk the protocols in the qualified interfaces of \p OPT.
+///
+/// \returns a \c TypoCorrection containing the corrected name if the typo
+/// along with information such as the \c NamedDecl where the corrected name
+/// was declared, and any additional \c NestedNameSpecifier needed to access
+/// it (C++ only). The \c TypoCorrection is empty if there is no correction.
+TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
+ Sema::LookupNameKind LookupKind,
+ Scope *S, CXXScopeSpec *SS,
+ std::unique_ptr<CorrectionCandidateCallback> CCC,
+ CorrectTypoKind Mode,
+ DeclContext *MemberContext,
+ bool EnteringContext,
+ const ObjCObjectPointerType *OPT,
+ bool RecordFailure) {
+ assert(CCC && "CorrectTypo requires a CorrectionCandidateCallback");
+
+ // Always let the ExternalSource have the first chance at correction, even
+ // if we would otherwise have given up.
+ if (ExternalSource) {
+ if (TypoCorrection Correction = ExternalSource->CorrectTypo(
+ TypoName, LookupKind, S, SS, *CCC, MemberContext, EnteringContext, OPT))
+ return Correction;
+ }
+
+ // Ugly hack equivalent to CTC == CTC_ObjCMessageReceiver;
+ // WantObjCSuper is only true for CTC_ObjCMessageReceiver and for
+ // some instances of CTC_Unknown, while WantRemainingKeywords is true
+ // for CTC_Unknown but not for CTC_ObjCMessageReceiver.
+ bool ObjCMessageReceiver = CCC->WantObjCSuper && !CCC->WantRemainingKeywords;
+
+ IdentifierInfo *Typo = TypoName.getName().getAsIdentifierInfo();
+ auto Consumer = makeTypoCorrectionConsumer(
+ TypoName, LookupKind, S, SS, std::move(CCC), MemberContext,
+ EnteringContext, OPT, Mode == CTK_ErrorRecovery);
+
+ if (!Consumer)
+ return TypoCorrection();
+
+ // If we haven't found anything, we're done.
+ if (Consumer->empty())
+ return FailedCorrection(Typo, TypoName.getLoc(), RecordFailure);
+
+ // Make sure the best edit distance (prior to adding any namespace qualifiers)
+ // is not more that about a third of the length of the typo's identifier.
+ unsigned ED = Consumer->getBestEditDistance(true);
+ unsigned TypoLen = Typo->getName().size();
+ if (ED > 0 && TypoLen / ED < 3)
+ return FailedCorrection(Typo, TypoName.getLoc(), RecordFailure);
+
+ TypoCorrection BestTC = Consumer->getNextCorrection();
+ TypoCorrection SecondBestTC = Consumer->getNextCorrection();
+ if (!BestTC)
+ return FailedCorrection(Typo, TypoName.getLoc(), RecordFailure);
+
+ ED = BestTC.getEditDistance();
+
+ if (TypoLen >= 3 && ED > 0 && TypoLen / ED < 3) {
+ // If this was an unqualified lookup and we believe the callback
+ // object wouldn't have filtered out possible corrections, note
+ // that no correction was found.
+ return FailedCorrection(Typo, TypoName.getLoc(), RecordFailure);
+ }
+
+ // If only a single name remains, return that result.
+ if (!SecondBestTC ||
+ SecondBestTC.getEditDistance(false) > BestTC.getEditDistance(false)) {
+ const TypoCorrection &Result = BestTC;
+
+ // Don't correct to a keyword that's the same as the typo; the keyword
+ // wasn't actually in scope.
+ if (ED == 0 && Result.isKeyword())
+ return FailedCorrection(Typo, TypoName.getLoc(), RecordFailure);
+
+ TypoCorrection TC = Result;
+ TC.setCorrectionRange(SS, TypoName);
+ checkCorrectionVisibility(*this, TC);
+ return TC;
+ } else if (SecondBestTC && ObjCMessageReceiver) {
+ // Prefer 'super' when we're completing in a message-receiver
+ // context.
+
+ if (BestTC.getCorrection().getAsString() != "super") {
+ if (SecondBestTC.getCorrection().getAsString() == "super")
+ BestTC = SecondBestTC;
+ else if ((*Consumer)["super"].front().isKeyword())
+ BestTC = (*Consumer)["super"].front();
+ }
+ // Don't correct to a keyword that's the same as the typo; the keyword
+ // wasn't actually in scope.
+ if (BestTC.getEditDistance() == 0 ||
+ BestTC.getCorrection().getAsString() != "super")
+ return FailedCorrection(Typo, TypoName.getLoc(), RecordFailure);
+
+ BestTC.setCorrectionRange(SS, TypoName);
+ return BestTC;
+ }
+
+ // Record the failure's location if needed and return an empty correction. If
+ // this was an unqualified lookup and we believe the callback object did not
+ // filter out possible corrections, also cache the failure for the typo.
+ return FailedCorrection(Typo, TypoName.getLoc(), RecordFailure && !SecondBestTC);
+}
+
+/// \brief Try to "correct" a typo in the source code by finding
+/// visible declarations whose names are similar to the name that was
+/// present in the source code.
+///
+/// \param TypoName the \c DeclarationNameInfo structure that contains
+/// the name that was present in the source code along with its location.
+///
+/// \param LookupKind the name-lookup criteria used to search for the name.
+///
+/// \param S the scope in which name lookup occurs.
+///
+/// \param SS the nested-name-specifier that precedes the name we're
+/// looking for, if present.
+///
+/// \param CCC A CorrectionCandidateCallback object that provides further
+/// validation of typo correction candidates. It also provides flags for
+/// determining the set of keywords permitted.
+///
+/// \param TDG A TypoDiagnosticGenerator functor that will be used to print
+/// diagnostics when the actual typo correction is attempted.
+///
+/// \param TRC A TypoRecoveryCallback functor that will be used to build an
+/// Expr from a typo correction candidate.
+///
+/// \param MemberContext if non-NULL, the context in which to look for
+/// a member access expression.
+///
+/// \param EnteringContext whether we're entering the context described by
+/// the nested-name-specifier SS.
+///
+/// \param OPT when non-NULL, the search for visible declarations will
+/// also walk the protocols in the qualified interfaces of \p OPT.
+///
+/// \returns a new \c TypoExpr that will later be replaced in the AST with an
+/// Expr representing the result of performing typo correction, or nullptr if
+/// typo correction is not possible. If nullptr is returned, no diagnostics will
+/// be emitted and it is the responsibility of the caller to emit any that are
+/// needed.
+TypoExpr *Sema::CorrectTypoDelayed(
+ const DeclarationNameInfo &TypoName, Sema::LookupNameKind LookupKind,
+ Scope *S, CXXScopeSpec *SS,
+ std::unique_ptr<CorrectionCandidateCallback> CCC,
+ TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode,
+ DeclContext *MemberContext, bool EnteringContext,
+ const ObjCObjectPointerType *OPT) {
+ assert(CCC && "CorrectTypoDelayed requires a CorrectionCandidateCallback");
+
+ TypoCorrection Empty;
+ auto Consumer = makeTypoCorrectionConsumer(
+ TypoName, LookupKind, S, SS, std::move(CCC), MemberContext,
+ EnteringContext, OPT, Mode == CTK_ErrorRecovery);
+
+ if (!Consumer || Consumer->empty())
+ return nullptr;
+
+ // Make sure the best edit distance (prior to adding any namespace qualifiers)
+ // is not more that about a third of the length of the typo's identifier.
+ unsigned ED = Consumer->getBestEditDistance(true);
+ IdentifierInfo *Typo = TypoName.getName().getAsIdentifierInfo();
+ if (ED > 0 && Typo->getName().size() / ED < 3)
+ return nullptr;
+
+ ExprEvalContexts.back().NumTypos++;
+ return createDelayedTypo(std::move(Consumer), std::move(TDG), std::move(TRC));
+}
+
+void TypoCorrection::addCorrectionDecl(NamedDecl *CDecl) {
+ if (!CDecl) return;
+
+ if (isKeyword())
+ CorrectionDecls.clear();
+
+ CorrectionDecls.push_back(CDecl);
+
+ if (!CorrectionName)
+ CorrectionName = CDecl->getDeclName();
+}
+
+std::string TypoCorrection::getAsString(const LangOptions &LO) const {
+ if (CorrectionNameSpec) {
+ std::string tmpBuffer;
+ llvm::raw_string_ostream PrefixOStream(tmpBuffer);
+ CorrectionNameSpec->print(PrefixOStream, PrintingPolicy(LO));
+ PrefixOStream << CorrectionName;
+ return PrefixOStream.str();
+ }
+
+ return CorrectionName.getAsString();
+}
+
+bool CorrectionCandidateCallback::ValidateCandidate(
+ const TypoCorrection &candidate) {
+ if (!candidate.isResolved())
+ return true;
+
+ if (candidate.isKeyword())
+ return WantTypeSpecifiers || WantExpressionKeywords || WantCXXNamedCasts ||
+ WantRemainingKeywords || WantObjCSuper;
+
+ bool HasNonType = false;
+ bool HasStaticMethod = false;
+ bool HasNonStaticMethod = false;
+ for (Decl *D : candidate) {
+ if (FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(D))
+ D = FTD->getTemplatedDecl();
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ if (Method->isStatic())
+ HasStaticMethod = true;
+ else
+ HasNonStaticMethod = true;
+ }
+ if (!isa<TypeDecl>(D))
+ HasNonType = true;
+ }
+
+ if (IsAddressOfOperand && HasNonStaticMethod && !HasStaticMethod &&
+ !candidate.getCorrectionSpecifier())
+ return false;
+
+ return WantTypeSpecifiers || HasNonType;
+}
+
+FunctionCallFilterCCC::FunctionCallFilterCCC(Sema &SemaRef, unsigned NumArgs,
+ bool HasExplicitTemplateArgs,
+ MemberExpr *ME)
+ : NumArgs(NumArgs), HasExplicitTemplateArgs(HasExplicitTemplateArgs),
+ CurContext(SemaRef.CurContext), MemberFn(ME) {
+ WantTypeSpecifiers = false;
+ WantFunctionLikeCasts = SemaRef.getLangOpts().CPlusPlus && NumArgs == 1;
+ WantRemainingKeywords = false;
+}
+
+bool FunctionCallFilterCCC::ValidateCandidate(const TypoCorrection &candidate) {
+ if (!candidate.getCorrectionDecl())
+ return candidate.isKeyword();
+
+ for (auto *C : candidate) {
+ FunctionDecl *FD = nullptr;
+ NamedDecl *ND = C->getUnderlyingDecl();
+ if (FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(ND))
+ FD = FTD->getTemplatedDecl();
+ if (!HasExplicitTemplateArgs && !FD) {
+ if (!(FD = dyn_cast<FunctionDecl>(ND)) && isa<ValueDecl>(ND)) {
+ // If the Decl is neither a function nor a template function,
+ // determine if it is a pointer or reference to a function. If so,
+ // check against the number of arguments expected for the pointee.
+ QualType ValType = cast<ValueDecl>(ND)->getType();
+ if (ValType->isAnyPointerType() || ValType->isReferenceType())
+ ValType = ValType->getPointeeType();
+ if (const FunctionProtoType *FPT = ValType->getAs<FunctionProtoType>())
+ if (FPT->getNumParams() == NumArgs)
+ return true;
+ }
+ }
+
+ // Skip the current candidate if it is not a FunctionDecl or does not accept
+ // the current number of arguments.
+ if (!FD || !(FD->getNumParams() >= NumArgs &&
+ FD->getMinRequiredArguments() <= NumArgs))
+ continue;
+
+ // If the current candidate is a non-static C++ method, skip the candidate
+ // unless the method being corrected--or the current DeclContext, if the
+ // function being corrected is not a method--is a method in the same class
+ // or a descendent class of the candidate's parent class.
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ if (MemberFn || !MD->isStatic()) {
+ CXXMethodDecl *CurMD =
+ MemberFn
+ ? dyn_cast_or_null<CXXMethodDecl>(MemberFn->getMemberDecl())
+ : dyn_cast_or_null<CXXMethodDecl>(CurContext);
+ CXXRecordDecl *CurRD =
+ CurMD ? CurMD->getParent()->getCanonicalDecl() : nullptr;
+ CXXRecordDecl *RD = MD->getParent()->getCanonicalDecl();
+ if (!CurRD || (CurRD != RD && !CurRD->isDerivedFrom(RD)))
+ continue;
+ }
+ }
+ return true;
+ }
+ return false;
+}
+
+void Sema::diagnoseTypo(const TypoCorrection &Correction,
+ const PartialDiagnostic &TypoDiag,
+ bool ErrorRecovery) {
+ diagnoseTypo(Correction, TypoDiag, PDiag(diag::note_previous_decl),
+ ErrorRecovery);
+}
+
+/// Find which declaration we should import to provide the definition of
+/// the given declaration.
+static NamedDecl *getDefinitionToImport(NamedDecl *D) {
+ if (VarDecl *VD = dyn_cast<VarDecl>(D))
+ return VD->getDefinition();
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ return FD->isDefined(FD) ? const_cast<FunctionDecl*>(FD) : nullptr;
+ if (TagDecl *TD = dyn_cast<TagDecl>(D))
+ return TD->getDefinition();
+ if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D))
+ return ID->getDefinition();
+ if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(D))
+ return PD->getDefinition();
+ if (TemplateDecl *TD = dyn_cast<TemplateDecl>(D))
+ return getDefinitionToImport(TD->getTemplatedDecl());
+ return nullptr;
+}
+
+void Sema::diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
+ bool NeedDefinition, bool Recover) {
+ assert(!isVisible(Decl) && "missing import for non-hidden decl?");
+
+ // Suggest importing a module providing the definition of this entity, if
+ // possible.
+ NamedDecl *Def = getDefinitionToImport(Decl);
+ if (!Def)
+ Def = Decl;
+
+ // FIXME: Add a Fix-It that imports the corresponding module or includes
+ // the header.
+ Module *Owner = getOwningModule(Decl);
+ assert(Owner && "definition of hidden declaration is not in a module");
+
+ llvm::SmallVector<Module*, 8> OwningModules;
+ OwningModules.push_back(Owner);
+ auto Merged = Context.getModulesWithMergedDefinition(Decl);
+ OwningModules.insert(OwningModules.end(), Merged.begin(), Merged.end());
+
+ diagnoseMissingImport(Loc, Decl, Decl->getLocation(), OwningModules,
+ NeedDefinition ? MissingImportKind::Definition
+ : MissingImportKind::Declaration,
+ Recover);
+}
+
+void Sema::diagnoseMissingImport(SourceLocation UseLoc, NamedDecl *Decl,
+ SourceLocation DeclLoc,
+ ArrayRef<Module *> Modules,
+ MissingImportKind MIK, bool Recover) {
+ assert(!Modules.empty());
+
+ if (Modules.size() > 1) {
+ std::string ModuleList;
+ unsigned N = 0;
+ for (Module *M : Modules) {
+ ModuleList += "\n ";
+ if (++N == 5 && N != Modules.size()) {
+ ModuleList += "[...]";
+ break;
+ }
+ ModuleList += M->getFullModuleName();
+ }
+
+ Diag(UseLoc, diag::err_module_unimported_use_multiple)
+ << (int)MIK << Decl << ModuleList;
+ } else {
+ Diag(UseLoc, diag::err_module_unimported_use)
+ << (int)MIK << Decl << Modules[0]->getFullModuleName();
+ }
+
+ unsigned DiagID;
+ switch (MIK) {
+ case MissingImportKind::Declaration:
+ DiagID = diag::note_previous_declaration;
+ break;
+ case MissingImportKind::Definition:
+ DiagID = diag::note_previous_definition;
+ break;
+ case MissingImportKind::DefaultArgument:
+ DiagID = diag::note_default_argument_declared_here;
+ break;
+ }
+ Diag(DeclLoc, DiagID);
+
+ // Try to recover by implicitly importing this module.
+ if (Recover)
+ createImplicitModuleImportForErrorRecovery(UseLoc, Modules[0]);
+}
+
+/// \brief Diagnose a successfully-corrected typo. Separated from the correction
+/// itself to allow external validation of the result, etc.
+///
+/// \param Correction The result of performing typo correction.
+/// \param TypoDiag The diagnostic to produce. This will have the corrected
+/// string added to it (and usually also a fixit).
+/// \param PrevNote A note to use when indicating the location of the entity to
+/// which we are correcting. Will have the correction string added to it.
+/// \param ErrorRecovery If \c true (the default), the caller is going to
+/// recover from the typo as if the corrected string had been typed.
+/// In this case, \c PDiag must be an error, and we will attach a fixit
+/// to it.
+void Sema::diagnoseTypo(const TypoCorrection &Correction,
+ const PartialDiagnostic &TypoDiag,
+ const PartialDiagnostic &PrevNote,
+ bool ErrorRecovery) {
+ std::string CorrectedStr = Correction.getAsString(getLangOpts());
+ std::string CorrectedQuotedStr = Correction.getQuoted(getLangOpts());
+ FixItHint FixTypo = FixItHint::CreateReplacement(
+ Correction.getCorrectionRange(), CorrectedStr);
+
+ // Maybe we're just missing a module import.
+ if (Correction.requiresImport()) {
+ NamedDecl *Decl = Correction.getFoundDecl();
+ assert(Decl && "import required but no declaration to import");
+
+ diagnoseMissingImport(Correction.getCorrectionRange().getBegin(), Decl,
+ /*NeedDefinition*/ false, ErrorRecovery);
+ return;
+ }
+
+ Diag(Correction.getCorrectionRange().getBegin(), TypoDiag)
+ << CorrectedQuotedStr << (ErrorRecovery ? FixTypo : FixItHint());
+
+ NamedDecl *ChosenDecl =
+ Correction.isKeyword() ? nullptr : Correction.getFoundDecl();
+ if (PrevNote.getDiagID() && ChosenDecl)
+ Diag(ChosenDecl->getLocation(), PrevNote)
+ << CorrectedQuotedStr << (ErrorRecovery ? FixItHint() : FixTypo);
+}
+
+TypoExpr *Sema::createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
+ TypoDiagnosticGenerator TDG,
+ TypoRecoveryCallback TRC) {
+ assert(TCC && "createDelayedTypo requires a valid TypoCorrectionConsumer");
+ auto TE = new (Context) TypoExpr(Context.DependentTy);
+ auto &State = DelayedTypos[TE];
+ State.Consumer = std::move(TCC);
+ State.DiagHandler = std::move(TDG);
+ State.RecoveryHandler = std::move(TRC);
+ return TE;
+}
+
+const Sema::TypoExprState &Sema::getTypoExprState(TypoExpr *TE) const {
+ auto Entry = DelayedTypos.find(TE);
+ assert(Entry != DelayedTypos.end() &&
+ "Failed to get the state for a TypoExpr!");
+ return Entry->second;
+}
+
+void Sema::clearDelayedTypo(TypoExpr *TE) {
+ DelayedTypos.erase(TE);
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp
new file mode 100644
index 0000000..1cb84e4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp
@@ -0,0 +1,2451 @@
+//===--- SemaObjCProperty.cpp - Semantic Analysis for ObjC @property ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for Objective C @property and
+// @synthesize declarations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/Initialization.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallString.h"
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Grammar actions.
+//===----------------------------------------------------------------------===//
+
+/// getImpliedARCOwnership - Given a set of property attributes and a
+/// type, infer an expected lifetime. The type's ownership qualification
+/// is not considered.
+///
+/// Returns OCL_None if the attributes as stated do not imply an ownership.
+/// Never returns OCL_Autoreleasing.
+static Qualifiers::ObjCLifetime getImpliedARCOwnership(
+ ObjCPropertyDecl::PropertyAttributeKind attrs,
+ QualType type) {
+ // retain, strong, copy, weak, and unsafe_unretained are only legal
+ // on properties of retainable pointer type.
+ if (attrs & (ObjCPropertyDecl::OBJC_PR_retain |
+ ObjCPropertyDecl::OBJC_PR_strong |
+ ObjCPropertyDecl::OBJC_PR_copy)) {
+ return Qualifiers::OCL_Strong;
+ } else if (attrs & ObjCPropertyDecl::OBJC_PR_weak) {
+ return Qualifiers::OCL_Weak;
+ } else if (attrs & ObjCPropertyDecl::OBJC_PR_unsafe_unretained) {
+ return Qualifiers::OCL_ExplicitNone;
+ }
+
+ // assign can appear on other types, so we have to check the
+ // property type.
+ if (attrs & ObjCPropertyDecl::OBJC_PR_assign &&
+ type->isObjCRetainableType()) {
+ return Qualifiers::OCL_ExplicitNone;
+ }
+
+ return Qualifiers::OCL_None;
+}
+
+/// Check the internal consistency of a property declaration with
+/// an explicit ownership qualifier.
+static void checkPropertyDeclWithOwnership(Sema &S,
+ ObjCPropertyDecl *property) {
+ if (property->isInvalidDecl()) return;
+
+ ObjCPropertyDecl::PropertyAttributeKind propertyKind
+ = property->getPropertyAttributes();
+ Qualifiers::ObjCLifetime propertyLifetime
+ = property->getType().getObjCLifetime();
+
+ assert(propertyLifetime != Qualifiers::OCL_None);
+
+ Qualifiers::ObjCLifetime expectedLifetime
+ = getImpliedARCOwnership(propertyKind, property->getType());
+ if (!expectedLifetime) {
+ // We have a lifetime qualifier but no dominating property
+ // attribute. That's okay, but restore reasonable invariants by
+ // setting the property attribute according to the lifetime
+ // qualifier.
+ ObjCPropertyDecl::PropertyAttributeKind attr;
+ if (propertyLifetime == Qualifiers::OCL_Strong) {
+ attr = ObjCPropertyDecl::OBJC_PR_strong;
+ } else if (propertyLifetime == Qualifiers::OCL_Weak) {
+ attr = ObjCPropertyDecl::OBJC_PR_weak;
+ } else {
+ assert(propertyLifetime == Qualifiers::OCL_ExplicitNone);
+ attr = ObjCPropertyDecl::OBJC_PR_unsafe_unretained;
+ }
+ property->setPropertyAttributes(attr);
+ return;
+ }
+
+ if (propertyLifetime == expectedLifetime) return;
+
+ property->setInvalidDecl();
+ S.Diag(property->getLocation(),
+ diag::err_arc_inconsistent_property_ownership)
+ << property->getDeclName()
+ << expectedLifetime
+ << propertyLifetime;
+}
+
+/// \brief Check this Objective-C property against a property declared in the
+/// given protocol.
+static void
+CheckPropertyAgainstProtocol(Sema &S, ObjCPropertyDecl *Prop,
+ ObjCProtocolDecl *Proto,
+ llvm::SmallPtrSetImpl<ObjCProtocolDecl *> &Known) {
+ // Have we seen this protocol before?
+ if (!Known.insert(Proto).second)
+ return;
+
+ // Look for a property with the same name.
+ DeclContext::lookup_result R = Proto->lookup(Prop->getDeclName());
+ for (unsigned I = 0, N = R.size(); I != N; ++I) {
+ if (ObjCPropertyDecl *ProtoProp = dyn_cast<ObjCPropertyDecl>(R[I])) {
+ S.DiagnosePropertyMismatch(Prop, ProtoProp, Proto->getIdentifier(), true);
+ return;
+ }
+ }
+
+ // Check this property against any protocols we inherit.
+ for (auto *P : Proto->protocols())
+ CheckPropertyAgainstProtocol(S, Prop, P, Known);
+}
+
+static unsigned deducePropertyOwnershipFromType(Sema &S, QualType T) {
+ // In GC mode, just look for the __weak qualifier.
+ if (S.getLangOpts().getGC() != LangOptions::NonGC) {
+ if (T.isObjCGCWeak()) return ObjCDeclSpec::DQ_PR_weak;
+
+ // In ARC/MRC, look for an explicit ownership qualifier.
+ // For some reason, this only applies to __weak.
+ } else if (auto ownership = T.getObjCLifetime()) {
+ switch (ownership) {
+ case Qualifiers::OCL_Weak:
+ return ObjCDeclSpec::DQ_PR_weak;
+ case Qualifiers::OCL_Strong:
+ return ObjCDeclSpec::DQ_PR_strong;
+ case Qualifiers::OCL_ExplicitNone:
+ return ObjCDeclSpec::DQ_PR_unsafe_unretained;
+ case Qualifiers::OCL_Autoreleasing:
+ case Qualifiers::OCL_None:
+ return 0;
+ }
+ llvm_unreachable("bad qualifier");
+ }
+
+ return 0;
+}
+
+static const unsigned OwnershipMask =
+ (ObjCPropertyDecl::OBJC_PR_assign |
+ ObjCPropertyDecl::OBJC_PR_retain |
+ ObjCPropertyDecl::OBJC_PR_copy |
+ ObjCPropertyDecl::OBJC_PR_weak |
+ ObjCPropertyDecl::OBJC_PR_strong |
+ ObjCPropertyDecl::OBJC_PR_unsafe_unretained);
+
+static unsigned getOwnershipRule(unsigned attr) {
+ unsigned result = attr & OwnershipMask;
+
+ // From an ownership perspective, assign and unsafe_unretained are
+ // identical; make sure one also implies the other.
+ if (result & (ObjCPropertyDecl::OBJC_PR_assign |
+ ObjCPropertyDecl::OBJC_PR_unsafe_unretained)) {
+ result |= ObjCPropertyDecl::OBJC_PR_assign |
+ ObjCPropertyDecl::OBJC_PR_unsafe_unretained;
+ }
+
+ return result;
+}
+
+Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
+ SourceLocation LParenLoc,
+ FieldDeclarator &FD,
+ ObjCDeclSpec &ODS,
+ Selector GetterSel,
+ Selector SetterSel,
+ tok::ObjCKeywordKind MethodImplKind,
+ DeclContext *lexicalDC) {
+ unsigned Attributes = ODS.getPropertyAttributes();
+ FD.D.setObjCWeakProperty((Attributes & ObjCDeclSpec::DQ_PR_weak) != 0);
+ TypeSourceInfo *TSI = GetTypeForDeclarator(FD.D, S);
+ QualType T = TSI->getType();
+ if (!getOwnershipRule(Attributes)) {
+ Attributes |= deducePropertyOwnershipFromType(*this, T);
+ }
+ bool isReadWrite = ((Attributes & ObjCDeclSpec::DQ_PR_readwrite) ||
+ // default is readwrite!
+ !(Attributes & ObjCDeclSpec::DQ_PR_readonly));
+
+ // Proceed with constructing the ObjCPropertyDecls.
+ ObjCContainerDecl *ClassDecl = cast<ObjCContainerDecl>(CurContext);
+ ObjCPropertyDecl *Res = nullptr;
+ if (ObjCCategoryDecl *CDecl = dyn_cast<ObjCCategoryDecl>(ClassDecl)) {
+ if (CDecl->IsClassExtension()) {
+ Res = HandlePropertyInClassExtension(S, AtLoc, LParenLoc,
+ FD, GetterSel, SetterSel,
+ isReadWrite,
+ Attributes,
+ ODS.getPropertyAttributes(),
+ T, TSI, MethodImplKind);
+ if (!Res)
+ return nullptr;
+ }
+ }
+
+ if (!Res) {
+ Res = CreatePropertyDecl(S, ClassDecl, AtLoc, LParenLoc, FD,
+ GetterSel, SetterSel, isReadWrite,
+ Attributes, ODS.getPropertyAttributes(),
+ T, TSI, MethodImplKind);
+ if (lexicalDC)
+ Res->setLexicalDeclContext(lexicalDC);
+ }
+
+ // Validate the attributes on the @property.
+ CheckObjCPropertyAttributes(Res, AtLoc, Attributes,
+ (isa<ObjCInterfaceDecl>(ClassDecl) ||
+ isa<ObjCProtocolDecl>(ClassDecl)));
+
+ // Check consistency if the type has explicit ownership qualification.
+ if (Res->getType().getObjCLifetime())
+ checkPropertyDeclWithOwnership(*this, Res);
+
+ llvm::SmallPtrSet<ObjCProtocolDecl *, 16> KnownProtos;
+ if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(ClassDecl)) {
+ // For a class, compare the property against a property in our superclass.
+ bool FoundInSuper = false;
+ ObjCInterfaceDecl *CurrentInterfaceDecl = IFace;
+ while (ObjCInterfaceDecl *Super = CurrentInterfaceDecl->getSuperClass()) {
+ DeclContext::lookup_result R = Super->lookup(Res->getDeclName());
+ for (unsigned I = 0, N = R.size(); I != N; ++I) {
+ if (ObjCPropertyDecl *SuperProp = dyn_cast<ObjCPropertyDecl>(R[I])) {
+ DiagnosePropertyMismatch(Res, SuperProp, Super->getIdentifier(), false);
+ FoundInSuper = true;
+ break;
+ }
+ }
+ if (FoundInSuper)
+ break;
+ else
+ CurrentInterfaceDecl = Super;
+ }
+
+ if (FoundInSuper) {
+ // Also compare the property against a property in our protocols.
+ for (auto *P : CurrentInterfaceDecl->protocols()) {
+ CheckPropertyAgainstProtocol(*this, Res, P, KnownProtos);
+ }
+ } else {
+ // Slower path: look in all protocols we referenced.
+ for (auto *P : IFace->all_referenced_protocols()) {
+ CheckPropertyAgainstProtocol(*this, Res, P, KnownProtos);
+ }
+ }
+ } else if (ObjCCategoryDecl *Cat = dyn_cast<ObjCCategoryDecl>(ClassDecl)) {
+ // We don't check if class extension. Because properties in class extension
+ // are meant to override some of the attributes and checking has already done
+ // when property in class extension is constructed.
+ if (!Cat->IsClassExtension())
+ for (auto *P : Cat->protocols())
+ CheckPropertyAgainstProtocol(*this, Res, P, KnownProtos);
+ } else {
+ ObjCProtocolDecl *Proto = cast<ObjCProtocolDecl>(ClassDecl);
+ for (auto *P : Proto->protocols())
+ CheckPropertyAgainstProtocol(*this, Res, P, KnownProtos);
+ }
+
+ ActOnDocumentableDecl(Res);
+ return Res;
+}
+
+static ObjCPropertyDecl::PropertyAttributeKind
+makePropertyAttributesAsWritten(unsigned Attributes) {
+ unsigned attributesAsWritten = 0;
+ if (Attributes & ObjCDeclSpec::DQ_PR_readonly)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_readonly;
+ if (Attributes & ObjCDeclSpec::DQ_PR_readwrite)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_readwrite;
+ if (Attributes & ObjCDeclSpec::DQ_PR_getter)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_getter;
+ if (Attributes & ObjCDeclSpec::DQ_PR_setter)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_setter;
+ if (Attributes & ObjCDeclSpec::DQ_PR_assign)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_assign;
+ if (Attributes & ObjCDeclSpec::DQ_PR_retain)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_retain;
+ if (Attributes & ObjCDeclSpec::DQ_PR_strong)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_strong;
+ if (Attributes & ObjCDeclSpec::DQ_PR_weak)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_weak;
+ if (Attributes & ObjCDeclSpec::DQ_PR_copy)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_copy;
+ if (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_unsafe_unretained;
+ if (Attributes & ObjCDeclSpec::DQ_PR_nonatomic)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_nonatomic;
+ if (Attributes & ObjCDeclSpec::DQ_PR_atomic)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_atomic;
+
+ return (ObjCPropertyDecl::PropertyAttributeKind)attributesAsWritten;
+}
+
+static bool LocPropertyAttribute( ASTContext &Context, const char *attrName,
+ SourceLocation LParenLoc, SourceLocation &Loc) {
+ if (LParenLoc.isMacroID())
+ return false;
+
+ SourceManager &SM = Context.getSourceManager();
+ std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(LParenLoc);
+ // Try to load the file buffer.
+ bool invalidTemp = false;
+ StringRef file = SM.getBufferData(locInfo.first, &invalidTemp);
+ if (invalidTemp)
+ return false;
+ const char *tokenBegin = file.data() + locInfo.second;
+
+ // Lex from the start of the given location.
+ Lexer lexer(SM.getLocForStartOfFile(locInfo.first),
+ Context.getLangOpts(),
+ file.begin(), tokenBegin, file.end());
+ Token Tok;
+ do {
+ lexer.LexFromRawLexer(Tok);
+ if (Tok.is(tok::raw_identifier) && Tok.getRawIdentifier() == attrName) {
+ Loc = Tok.getLocation();
+ return true;
+ }
+ } while (Tok.isNot(tok::r_paren));
+ return false;
+
+}
+
+/// Check for a mismatch in the atomicity of the given properties.
+static void checkAtomicPropertyMismatch(Sema &S,
+ ObjCPropertyDecl *OldProperty,
+ ObjCPropertyDecl *NewProperty,
+ bool PropagateAtomicity) {
+ // If the atomicity of both matches, we're done.
+ bool OldIsAtomic =
+ (OldProperty->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic)
+ == 0;
+ bool NewIsAtomic =
+ (NewProperty->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic)
+ == 0;
+ if (OldIsAtomic == NewIsAtomic) return;
+
+ // Determine whether the given property is readonly and implicitly
+ // atomic.
+ auto isImplicitlyReadonlyAtomic = [](ObjCPropertyDecl *Property) -> bool {
+ // Is it readonly?
+ auto Attrs = Property->getPropertyAttributes();
+ if ((Attrs & ObjCPropertyDecl::OBJC_PR_readonly) == 0) return false;
+
+ // Is it nonatomic?
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_nonatomic) return false;
+
+ // Was 'atomic' specified directly?
+ if (Property->getPropertyAttributesAsWritten() &
+ ObjCPropertyDecl::OBJC_PR_atomic)
+ return false;
+
+ return true;
+ };
+
+ // If we're allowed to propagate atomicity, and the new property did
+ // not specify atomicity at all, propagate.
+ const unsigned AtomicityMask =
+ (ObjCPropertyDecl::OBJC_PR_atomic | ObjCPropertyDecl::OBJC_PR_nonatomic);
+ if (PropagateAtomicity &&
+ ((NewProperty->getPropertyAttributesAsWritten() & AtomicityMask) == 0)) {
+ unsigned Attrs = NewProperty->getPropertyAttributes();
+ Attrs = Attrs & ~AtomicityMask;
+ if (OldIsAtomic)
+ Attrs |= ObjCPropertyDecl::OBJC_PR_atomic;
+ else
+ Attrs |= ObjCPropertyDecl::OBJC_PR_nonatomic;
+
+ NewProperty->overwritePropertyAttributes(Attrs);
+ return;
+ }
+
+ // One of the properties is atomic; if it's a readonly property, and
+ // 'atomic' wasn't explicitly specified, we're okay.
+ if ((OldIsAtomic && isImplicitlyReadonlyAtomic(OldProperty)) ||
+ (NewIsAtomic && isImplicitlyReadonlyAtomic(NewProperty)))
+ return;
+
+ // Diagnose the conflict.
+ const IdentifierInfo *OldContextName;
+ auto *OldDC = OldProperty->getDeclContext();
+ if (auto Category = dyn_cast<ObjCCategoryDecl>(OldDC))
+ OldContextName = Category->getClassInterface()->getIdentifier();
+ else
+ OldContextName = cast<ObjCContainerDecl>(OldDC)->getIdentifier();
+
+ S.Diag(NewProperty->getLocation(), diag::warn_property_attribute)
+ << NewProperty->getDeclName() << "atomic"
+ << OldContextName;
+ S.Diag(OldProperty->getLocation(), diag::note_property_declare);
+}
+
+ObjCPropertyDecl *
+Sema::HandlePropertyInClassExtension(Scope *S,
+ SourceLocation AtLoc,
+ SourceLocation LParenLoc,
+ FieldDeclarator &FD,
+ Selector GetterSel, Selector SetterSel,
+ const bool isReadWrite,
+ unsigned &Attributes,
+ const unsigned AttributesAsWritten,
+ QualType T,
+ TypeSourceInfo *TSI,
+ tok::ObjCKeywordKind MethodImplKind) {
+ ObjCCategoryDecl *CDecl = cast<ObjCCategoryDecl>(CurContext);
+ // Diagnose if this property is already in continuation class.
+ DeclContext *DC = CurContext;
+ IdentifierInfo *PropertyId = FD.D.getIdentifier();
+ ObjCInterfaceDecl *CCPrimary = CDecl->getClassInterface();
+
+ // We need to look in the @interface to see if the @property was
+ // already declared.
+ if (!CCPrimary) {
+ Diag(CDecl->getLocation(), diag::err_continuation_class);
+ return nullptr;
+ }
+
+ // Find the property in the extended class's primary class or
+ // extensions.
+ ObjCPropertyDecl *PIDecl =
+ CCPrimary->FindPropertyVisibleInPrimaryClass(PropertyId);
+
+ // If we found a property in an extension, complain.
+ if (PIDecl && isa<ObjCCategoryDecl>(PIDecl->getDeclContext())) {
+ Diag(AtLoc, diag::err_duplicate_property);
+ Diag(PIDecl->getLocation(), diag::note_property_declare);
+ return nullptr;
+ }
+
+ // Check for consistency with the previous declaration, if there is one.
+ if (PIDecl) {
+ // A readonly property declared in the primary class can be refined
+ // by adding a readwrite property within an extension.
+ // Anything else is an error.
+ if (!(PIDecl->isReadOnly() && isReadWrite)) {
+ // Tailor the diagnostics for the common case where a readwrite
+ // property is declared both in the @interface and the continuation.
+ // This is a common error where the user often intended the original
+ // declaration to be readonly.
+ unsigned diag =
+ (Attributes & ObjCDeclSpec::DQ_PR_readwrite) &&
+ (PIDecl->getPropertyAttributesAsWritten() &
+ ObjCPropertyDecl::OBJC_PR_readwrite)
+ ? diag::err_use_continuation_class_redeclaration_readwrite
+ : diag::err_use_continuation_class;
+ Diag(AtLoc, diag)
+ << CCPrimary->getDeclName();
+ Diag(PIDecl->getLocation(), diag::note_property_declare);
+ return nullptr;
+ }
+
+ // Check for consistency of getters.
+ if (PIDecl->getGetterName() != GetterSel) {
+ // If the getter was written explicitly, complain.
+ if (AttributesAsWritten & ObjCDeclSpec::DQ_PR_getter) {
+ Diag(AtLoc, diag::warn_property_redecl_getter_mismatch)
+ << PIDecl->getGetterName() << GetterSel;
+ Diag(PIDecl->getLocation(), diag::note_property_declare);
+ }
+
+ // Always adopt the getter from the original declaration.
+ GetterSel = PIDecl->getGetterName();
+ Attributes |= ObjCDeclSpec::DQ_PR_getter;
+ }
+
+ // Check consistency of ownership.
+ unsigned ExistingOwnership
+ = getOwnershipRule(PIDecl->getPropertyAttributes());
+ unsigned NewOwnership = getOwnershipRule(Attributes);
+ if (ExistingOwnership && NewOwnership != ExistingOwnership) {
+ // If the ownership was written explicitly, complain.
+ if (getOwnershipRule(AttributesAsWritten)) {
+ Diag(AtLoc, diag::warn_property_attr_mismatch);
+ Diag(PIDecl->getLocation(), diag::note_property_declare);
+ }
+
+ // Take the ownership from the original property.
+ Attributes = (Attributes & ~OwnershipMask) | ExistingOwnership;
+ }
+
+ // If the redeclaration is 'weak' but the original property is not,
+ if ((Attributes & ObjCPropertyDecl::OBJC_PR_weak) &&
+ !(PIDecl->getPropertyAttributesAsWritten()
+ & ObjCPropertyDecl::OBJC_PR_weak) &&
+ PIDecl->getType()->getAs<ObjCObjectPointerType>() &&
+ PIDecl->getType().getObjCLifetime() == Qualifiers::OCL_None) {
+ Diag(AtLoc, diag::warn_property_implicitly_mismatched);
+ Diag(PIDecl->getLocation(), diag::note_property_declare);
+ }
+ }
+
+ // Create a new ObjCPropertyDecl with the DeclContext being
+ // the class extension.
+ ObjCPropertyDecl *PDecl = CreatePropertyDecl(S, CDecl, AtLoc, LParenLoc,
+ FD, GetterSel, SetterSel,
+ isReadWrite,
+ Attributes, AttributesAsWritten,
+ T, TSI, MethodImplKind, DC);
+
+ // If there was no declaration of a property with the same name in
+ // the primary class, we're done.
+ if (!PIDecl) {
+ ProcessPropertyDecl(PDecl);
+ return PDecl;
+ }
+
+ if (!Context.hasSameType(PIDecl->getType(), PDecl->getType())) {
+ bool IncompatibleObjC = false;
+ QualType ConvertedType;
+ // Relax the strict type matching for property type in continuation class.
+ // Allow property object type of continuation class to be different as long
+ // as it narrows the object type in its primary class property. Note that
+ // this conversion is safe only because the wider type is for a 'readonly'
+ // property in primary class and 'narrowed' type for a 'readwrite' property
+ // in continuation class.
+ QualType PrimaryClassPropertyT = Context.getCanonicalType(PIDecl->getType());
+ QualType ClassExtPropertyT = Context.getCanonicalType(PDecl->getType());
+ if (!isa<ObjCObjectPointerType>(PrimaryClassPropertyT) ||
+ !isa<ObjCObjectPointerType>(ClassExtPropertyT) ||
+ (!isObjCPointerConversion(ClassExtPropertyT, PrimaryClassPropertyT,
+ ConvertedType, IncompatibleObjC))
+ || IncompatibleObjC) {
+ Diag(AtLoc,
+ diag::err_type_mismatch_continuation_class) << PDecl->getType();
+ Diag(PIDecl->getLocation(), diag::note_property_declare);
+ return nullptr;
+ }
+ }
+
+ // Check that atomicity of property in class extension matches the previous
+ // declaration.
+ checkAtomicPropertyMismatch(*this, PIDecl, PDecl, true);
+
+ // Make sure getter/setter are appropriately synthesized.
+ ProcessPropertyDecl(PDecl);
+ return PDecl;
+}
+
+ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
+ ObjCContainerDecl *CDecl,
+ SourceLocation AtLoc,
+ SourceLocation LParenLoc,
+ FieldDeclarator &FD,
+ Selector GetterSel,
+ Selector SetterSel,
+ const bool isReadWrite,
+ const unsigned Attributes,
+ const unsigned AttributesAsWritten,
+ QualType T,
+ TypeSourceInfo *TInfo,
+ tok::ObjCKeywordKind MethodImplKind,
+ DeclContext *lexicalDC){
+ IdentifierInfo *PropertyId = FD.D.getIdentifier();
+
+ // Property defaults to 'assign' if it is readwrite, unless this is ARC
+ // and the type is retainable.
+ bool isAssign;
+ if (Attributes & (ObjCDeclSpec::DQ_PR_assign |
+ ObjCDeclSpec::DQ_PR_unsafe_unretained)) {
+ isAssign = true;
+ } else if (getOwnershipRule(Attributes) || !isReadWrite) {
+ isAssign = false;
+ } else {
+ isAssign = (!getLangOpts().ObjCAutoRefCount ||
+ !T->isObjCRetainableType());
+ }
+
+ // Issue a warning if property is 'assign' as default and its
+ // object, which is gc'able conforms to NSCopying protocol
+ if (getLangOpts().getGC() != LangOptions::NonGC &&
+ isAssign && !(Attributes & ObjCDeclSpec::DQ_PR_assign)) {
+ if (const ObjCObjectPointerType *ObjPtrTy =
+ T->getAs<ObjCObjectPointerType>()) {
+ ObjCInterfaceDecl *IDecl = ObjPtrTy->getObjectType()->getInterface();
+ if (IDecl)
+ if (ObjCProtocolDecl* PNSCopying =
+ LookupProtocol(&Context.Idents.get("NSCopying"), AtLoc))
+ if (IDecl->ClassImplementsProtocol(PNSCopying, true))
+ Diag(AtLoc, diag::warn_implements_nscopying) << PropertyId;
+ }
+ }
+
+ if (T->isObjCObjectType()) {
+ SourceLocation StarLoc = TInfo->getTypeLoc().getLocEnd();
+ StarLoc = getLocForEndOfToken(StarLoc);
+ Diag(FD.D.getIdentifierLoc(), diag::err_statically_allocated_object)
+ << FixItHint::CreateInsertion(StarLoc, "*");
+ T = Context.getObjCObjectPointerType(T);
+ SourceLocation TLoc = TInfo->getTypeLoc().getLocStart();
+ TInfo = Context.getTrivialTypeSourceInfo(T, TLoc);
+ }
+
+ DeclContext *DC = cast<DeclContext>(CDecl);
+ ObjCPropertyDecl *PDecl = ObjCPropertyDecl::Create(Context, DC,
+ FD.D.getIdentifierLoc(),
+ PropertyId, AtLoc,
+ LParenLoc, T, TInfo);
+
+ if (ObjCPropertyDecl *prevDecl =
+ ObjCPropertyDecl::findPropertyDecl(DC, PropertyId)) {
+ Diag(PDecl->getLocation(), diag::err_duplicate_property);
+ Diag(prevDecl->getLocation(), diag::note_property_declare);
+ PDecl->setInvalidDecl();
+ }
+ else {
+ DC->addDecl(PDecl);
+ if (lexicalDC)
+ PDecl->setLexicalDeclContext(lexicalDC);
+ }
+
+ if (T->isArrayType() || T->isFunctionType()) {
+ Diag(AtLoc, diag::err_property_type) << T;
+ PDecl->setInvalidDecl();
+ }
+
+ ProcessDeclAttributes(S, PDecl, FD.D);
+
+ // Regardless of setter/getter attribute, we save the default getter/setter
+ // selector names in anticipation of declaration of setter/getter methods.
+ PDecl->setGetterName(GetterSel);
+ PDecl->setSetterName(SetterSel);
+ PDecl->setPropertyAttributesAsWritten(
+ makePropertyAttributesAsWritten(AttributesAsWritten));
+
+ if (Attributes & ObjCDeclSpec::DQ_PR_readonly)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_readonly);
+
+ if (Attributes & ObjCDeclSpec::DQ_PR_getter)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_getter);
+
+ if (Attributes & ObjCDeclSpec::DQ_PR_setter)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_setter);
+
+ if (isReadWrite)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_readwrite);
+
+ if (Attributes & ObjCDeclSpec::DQ_PR_retain)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_retain);
+
+ if (Attributes & ObjCDeclSpec::DQ_PR_strong)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong);
+
+ if (Attributes & ObjCDeclSpec::DQ_PR_weak)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_weak);
+
+ if (Attributes & ObjCDeclSpec::DQ_PR_copy)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_copy);
+
+ if (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_unsafe_unretained);
+
+ if (isAssign)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_assign);
+
+ // In the semantic attributes, one of nonatomic or atomic is always set.
+ if (Attributes & ObjCDeclSpec::DQ_PR_nonatomic)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_nonatomic);
+ else
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_atomic);
+
+ // 'unsafe_unretained' is alias for 'assign'.
+ if (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_assign);
+ if (isAssign)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_unsafe_unretained);
+
+ if (MethodImplKind == tok::objc_required)
+ PDecl->setPropertyImplementation(ObjCPropertyDecl::Required);
+ else if (MethodImplKind == tok::objc_optional)
+ PDecl->setPropertyImplementation(ObjCPropertyDecl::Optional);
+
+ if (Attributes & ObjCDeclSpec::DQ_PR_nullability)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_nullability);
+
+ if (Attributes & ObjCDeclSpec::DQ_PR_null_resettable)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_null_resettable);
+
+ return PDecl;
+}
+
+static void checkARCPropertyImpl(Sema &S, SourceLocation propertyImplLoc,
+ ObjCPropertyDecl *property,
+ ObjCIvarDecl *ivar) {
+ if (property->isInvalidDecl() || ivar->isInvalidDecl()) return;
+
+ QualType ivarType = ivar->getType();
+ Qualifiers::ObjCLifetime ivarLifetime = ivarType.getObjCLifetime();
+
+ // The lifetime implied by the property's attributes.
+ Qualifiers::ObjCLifetime propertyLifetime =
+ getImpliedARCOwnership(property->getPropertyAttributes(),
+ property->getType());
+
+ // We're fine if they match.
+ if (propertyLifetime == ivarLifetime) return;
+
+ // None isn't a valid lifetime for an object ivar in ARC, and
+ // __autoreleasing is never valid; don't diagnose twice.
+ if ((ivarLifetime == Qualifiers::OCL_None &&
+ S.getLangOpts().ObjCAutoRefCount) ||
+ ivarLifetime == Qualifiers::OCL_Autoreleasing)
+ return;
+
+ // If the ivar is private, and it's implicitly __unsafe_unretained
+ // becaues of its type, then pretend it was actually implicitly
+ // __strong. This is only sound because we're processing the
+ // property implementation before parsing any method bodies.
+ if (ivarLifetime == Qualifiers::OCL_ExplicitNone &&
+ propertyLifetime == Qualifiers::OCL_Strong &&
+ ivar->getAccessControl() == ObjCIvarDecl::Private) {
+ SplitQualType split = ivarType.split();
+ if (split.Quals.hasObjCLifetime()) {
+ assert(ivarType->isObjCARCImplicitlyUnretainedType());
+ split.Quals.setObjCLifetime(Qualifiers::OCL_Strong);
+ ivarType = S.Context.getQualifiedType(split);
+ ivar->setType(ivarType);
+ return;
+ }
+ }
+
+ switch (propertyLifetime) {
+ case Qualifiers::OCL_Strong:
+ S.Diag(ivar->getLocation(), diag::err_arc_strong_property_ownership)
+ << property->getDeclName()
+ << ivar->getDeclName()
+ << ivarLifetime;
+ break;
+
+ case Qualifiers::OCL_Weak:
+ S.Diag(ivar->getLocation(), diag::error_weak_property)
+ << property->getDeclName()
+ << ivar->getDeclName();
+ break;
+
+ case Qualifiers::OCL_ExplicitNone:
+ S.Diag(ivar->getLocation(), diag::err_arc_assign_property_ownership)
+ << property->getDeclName()
+ << ivar->getDeclName()
+ << ((property->getPropertyAttributesAsWritten()
+ & ObjCPropertyDecl::OBJC_PR_assign) != 0);
+ break;
+
+ case Qualifiers::OCL_Autoreleasing:
+ llvm_unreachable("properties cannot be autoreleasing");
+
+ case Qualifiers::OCL_None:
+ // Any other property should be ignored.
+ return;
+ }
+
+ S.Diag(property->getLocation(), diag::note_property_declare);
+ if (propertyImplLoc.isValid())
+ S.Diag(propertyImplLoc, diag::note_property_synthesize);
+}
+
+/// setImpliedPropertyAttributeForReadOnlyProperty -
+/// This routine evaludates life-time attributes for a 'readonly'
+/// property with no known lifetime of its own, using backing
+/// 'ivar's attribute, if any. If no backing 'ivar', property's
+/// life-time is assumed 'strong'.
+static void setImpliedPropertyAttributeForReadOnlyProperty(
+ ObjCPropertyDecl *property, ObjCIvarDecl *ivar) {
+ Qualifiers::ObjCLifetime propertyLifetime =
+ getImpliedARCOwnership(property->getPropertyAttributes(),
+ property->getType());
+ if (propertyLifetime != Qualifiers::OCL_None)
+ return;
+
+ if (!ivar) {
+ // if no backing ivar, make property 'strong'.
+ property->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong);
+ return;
+ }
+ // property assumes owenership of backing ivar.
+ QualType ivarType = ivar->getType();
+ Qualifiers::ObjCLifetime ivarLifetime = ivarType.getObjCLifetime();
+ if (ivarLifetime == Qualifiers::OCL_Strong)
+ property->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong);
+ else if (ivarLifetime == Qualifiers::OCL_Weak)
+ property->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_weak);
+ return;
+}
+
+/// DiagnosePropertyMismatchDeclInProtocols - diagnose properties declared
+/// in inherited protocols with mismatched types. Since any of them can
+/// be candidate for synthesis.
+static void
+DiagnosePropertyMismatchDeclInProtocols(Sema &S, SourceLocation AtLoc,
+ ObjCInterfaceDecl *ClassDecl,
+ ObjCPropertyDecl *Property) {
+ ObjCInterfaceDecl::ProtocolPropertyMap PropMap;
+ for (const auto *PI : ClassDecl->all_referenced_protocols()) {
+ if (const ObjCProtocolDecl *PDecl = PI->getDefinition())
+ PDecl->collectInheritedProtocolProperties(Property, PropMap);
+ }
+ if (ObjCInterfaceDecl *SDecl = ClassDecl->getSuperClass())
+ while (SDecl) {
+ for (const auto *PI : SDecl->all_referenced_protocols()) {
+ if (const ObjCProtocolDecl *PDecl = PI->getDefinition())
+ PDecl->collectInheritedProtocolProperties(Property, PropMap);
+ }
+ SDecl = SDecl->getSuperClass();
+ }
+
+ if (PropMap.empty())
+ return;
+
+ QualType RHSType = S.Context.getCanonicalType(Property->getType());
+ bool FirsTime = true;
+ for (ObjCInterfaceDecl::ProtocolPropertyMap::iterator
+ I = PropMap.begin(), E = PropMap.end(); I != E; I++) {
+ ObjCPropertyDecl *Prop = I->second;
+ QualType LHSType = S.Context.getCanonicalType(Prop->getType());
+ if (!S.Context.propertyTypesAreCompatible(LHSType, RHSType)) {
+ bool IncompatibleObjC = false;
+ QualType ConvertedType;
+ if (!S.isObjCPointerConversion(RHSType, LHSType, ConvertedType, IncompatibleObjC)
+ || IncompatibleObjC) {
+ if (FirsTime) {
+ S.Diag(Property->getLocation(), diag::warn_protocol_property_mismatch)
+ << Property->getType();
+ FirsTime = false;
+ }
+ S.Diag(Prop->getLocation(), diag::note_protocol_property_declare)
+ << Prop->getType();
+ }
+ }
+ }
+ if (!FirsTime && AtLoc.isValid())
+ S.Diag(AtLoc, diag::note_property_synthesize);
+}
+
+/// Determine whether any storage attributes were written on the property.
+static bool hasWrittenStorageAttribute(ObjCPropertyDecl *Prop) {
+ if (Prop->getPropertyAttributesAsWritten() & OwnershipMask) return true;
+
+ // If this is a readwrite property in a class extension that refines
+ // a readonly property in the original class definition, check it as
+ // well.
+
+ // If it's a readonly property, we're not interested.
+ if (Prop->isReadOnly()) return false;
+
+ // Is it declared in an extension?
+ auto Category = dyn_cast<ObjCCategoryDecl>(Prop->getDeclContext());
+ if (!Category || !Category->IsClassExtension()) return false;
+
+ // Find the corresponding property in the primary class definition.
+ auto OrigClass = Category->getClassInterface();
+ for (auto Found : OrigClass->lookup(Prop->getDeclName())) {
+ if (ObjCPropertyDecl *OrigProp = dyn_cast<ObjCPropertyDecl>(Found))
+ return OrigProp->getPropertyAttributesAsWritten() & OwnershipMask;
+ }
+
+ // Look through all of the protocols.
+ for (const auto *Proto : OrigClass->all_referenced_protocols()) {
+ if (ObjCPropertyDecl *OrigProp =
+ Proto->FindPropertyDeclaration(Prop->getIdentifier()))
+ return OrigProp->getPropertyAttributesAsWritten() & OwnershipMask;
+ }
+
+ return false;
+}
+
+/// ActOnPropertyImplDecl - This routine performs semantic checks and
+/// builds the AST node for a property implementation declaration; declared
+/// as \@synthesize or \@dynamic.
+///
+Decl *Sema::ActOnPropertyImplDecl(Scope *S,
+ SourceLocation AtLoc,
+ SourceLocation PropertyLoc,
+ bool Synthesize,
+ IdentifierInfo *PropertyId,
+ IdentifierInfo *PropertyIvar,
+ SourceLocation PropertyIvarLoc) {
+ ObjCContainerDecl *ClassImpDecl =
+ dyn_cast<ObjCContainerDecl>(CurContext);
+ // Make sure we have a context for the property implementation declaration.
+ if (!ClassImpDecl) {
+ Diag(AtLoc, diag::error_missing_property_context);
+ return nullptr;
+ }
+ if (PropertyIvarLoc.isInvalid())
+ PropertyIvarLoc = PropertyLoc;
+ SourceLocation PropertyDiagLoc = PropertyLoc;
+ if (PropertyDiagLoc.isInvalid())
+ PropertyDiagLoc = ClassImpDecl->getLocStart();
+ ObjCPropertyDecl *property = nullptr;
+ ObjCInterfaceDecl *IDecl = nullptr;
+ // Find the class or category class where this property must have
+ // a declaration.
+ ObjCImplementationDecl *IC = nullptr;
+ ObjCCategoryImplDecl *CatImplClass = nullptr;
+ if ((IC = dyn_cast<ObjCImplementationDecl>(ClassImpDecl))) {
+ IDecl = IC->getClassInterface();
+ // We always synthesize an interface for an implementation
+ // without an interface decl. So, IDecl is always non-zero.
+ assert(IDecl &&
+ "ActOnPropertyImplDecl - @implementation without @interface");
+
+ // Look for this property declaration in the @implementation's @interface
+ property = IDecl->FindPropertyDeclaration(PropertyId);
+ if (!property) {
+ Diag(PropertyLoc, diag::error_bad_property_decl) << IDecl->getDeclName();
+ return nullptr;
+ }
+ unsigned PIkind = property->getPropertyAttributesAsWritten();
+ if ((PIkind & (ObjCPropertyDecl::OBJC_PR_atomic |
+ ObjCPropertyDecl::OBJC_PR_nonatomic) ) == 0) {
+ if (AtLoc.isValid())
+ Diag(AtLoc, diag::warn_implicit_atomic_property);
+ else
+ Diag(IC->getLocation(), diag::warn_auto_implicit_atomic_property);
+ Diag(property->getLocation(), diag::note_property_declare);
+ }
+
+ if (const ObjCCategoryDecl *CD =
+ dyn_cast<ObjCCategoryDecl>(property->getDeclContext())) {
+ if (!CD->IsClassExtension()) {
+ Diag(PropertyLoc, diag::error_category_property) << CD->getDeclName();
+ Diag(property->getLocation(), diag::note_property_declare);
+ return nullptr;
+ }
+ }
+ if (Synthesize&&
+ (PIkind & ObjCPropertyDecl::OBJC_PR_readonly) &&
+ property->hasAttr<IBOutletAttr>() &&
+ !AtLoc.isValid()) {
+ bool ReadWriteProperty = false;
+ // Search into the class extensions and see if 'readonly property is
+ // redeclared 'readwrite', then no warning is to be issued.
+ for (auto *Ext : IDecl->known_extensions()) {
+ DeclContext::lookup_result R = Ext->lookup(property->getDeclName());
+ if (!R.empty())
+ if (ObjCPropertyDecl *ExtProp = dyn_cast<ObjCPropertyDecl>(R[0])) {
+ PIkind = ExtProp->getPropertyAttributesAsWritten();
+ if (PIkind & ObjCPropertyDecl::OBJC_PR_readwrite) {
+ ReadWriteProperty = true;
+ break;
+ }
+ }
+ }
+
+ if (!ReadWriteProperty) {
+ Diag(property->getLocation(), diag::warn_auto_readonly_iboutlet_property)
+ << property;
+ SourceLocation readonlyLoc;
+ if (LocPropertyAttribute(Context, "readonly",
+ property->getLParenLoc(), readonlyLoc)) {
+ SourceLocation endLoc =
+ readonlyLoc.getLocWithOffset(strlen("readonly")-1);
+ SourceRange ReadonlySourceRange(readonlyLoc, endLoc);
+ Diag(property->getLocation(),
+ diag::note_auto_readonly_iboutlet_fixup_suggest) <<
+ FixItHint::CreateReplacement(ReadonlySourceRange, "readwrite");
+ }
+ }
+ }
+ if (Synthesize && isa<ObjCProtocolDecl>(property->getDeclContext()))
+ DiagnosePropertyMismatchDeclInProtocols(*this, AtLoc, IDecl, property);
+
+ } else if ((CatImplClass = dyn_cast<ObjCCategoryImplDecl>(ClassImpDecl))) {
+ if (Synthesize) {
+ Diag(AtLoc, diag::error_synthesize_category_decl);
+ return nullptr;
+ }
+ IDecl = CatImplClass->getClassInterface();
+ if (!IDecl) {
+ Diag(AtLoc, diag::error_missing_property_interface);
+ return nullptr;
+ }
+ ObjCCategoryDecl *Category =
+ IDecl->FindCategoryDeclaration(CatImplClass->getIdentifier());
+
+ // If category for this implementation not found, it is an error which
+ // has already been reported eralier.
+ if (!Category)
+ return nullptr;
+ // Look for this property declaration in @implementation's category
+ property = Category->FindPropertyDeclaration(PropertyId);
+ if (!property) {
+ Diag(PropertyLoc, diag::error_bad_category_property_decl)
+ << Category->getDeclName();
+ return nullptr;
+ }
+ } else {
+ Diag(AtLoc, diag::error_bad_property_context);
+ return nullptr;
+ }
+ ObjCIvarDecl *Ivar = nullptr;
+ bool CompleteTypeErr = false;
+ bool compat = true;
+ // Check that we have a valid, previously declared ivar for @synthesize
+ if (Synthesize) {
+ // @synthesize
+ if (!PropertyIvar)
+ PropertyIvar = PropertyId;
+ // Check that this is a previously declared 'ivar' in 'IDecl' interface
+ ObjCInterfaceDecl *ClassDeclared;
+ Ivar = IDecl->lookupInstanceVariable(PropertyIvar, ClassDeclared);
+ QualType PropType = property->getType();
+ QualType PropertyIvarType = PropType.getNonReferenceType();
+
+ if (RequireCompleteType(PropertyDiagLoc, PropertyIvarType,
+ diag::err_incomplete_synthesized_property,
+ property->getDeclName())) {
+ Diag(property->getLocation(), diag::note_property_declare);
+ CompleteTypeErr = true;
+ }
+
+ if (getLangOpts().ObjCAutoRefCount &&
+ (property->getPropertyAttributesAsWritten() &
+ ObjCPropertyDecl::OBJC_PR_readonly) &&
+ PropertyIvarType->isObjCRetainableType()) {
+ setImpliedPropertyAttributeForReadOnlyProperty(property, Ivar);
+ }
+
+ ObjCPropertyDecl::PropertyAttributeKind kind
+ = property->getPropertyAttributes();
+
+ bool isARCWeak = false;
+ if (kind & ObjCPropertyDecl::OBJC_PR_weak) {
+ // Add GC __weak to the ivar type if the property is weak.
+ if (getLangOpts().getGC() != LangOptions::NonGC) {
+ assert(!getLangOpts().ObjCAutoRefCount);
+ if (PropertyIvarType.isObjCGCStrong()) {
+ Diag(PropertyDiagLoc, diag::err_gc_weak_property_strong_type);
+ Diag(property->getLocation(), diag::note_property_declare);
+ } else {
+ PropertyIvarType =
+ Context.getObjCGCQualType(PropertyIvarType, Qualifiers::Weak);
+ }
+
+ // Otherwise, check whether ARC __weak is enabled and works with
+ // the property type.
+ } else {
+ if (!getLangOpts().ObjCWeak) {
+ // Only complain here when synthesizing an ivar.
+ if (!Ivar) {
+ Diag(PropertyDiagLoc,
+ getLangOpts().ObjCWeakRuntime
+ ? diag::err_synthesizing_arc_weak_property_disabled
+ : diag::err_synthesizing_arc_weak_property_no_runtime);
+ Diag(property->getLocation(), diag::note_property_declare);
+ }
+ CompleteTypeErr = true; // suppress later diagnostics about the ivar
+ } else {
+ isARCWeak = true;
+ if (const ObjCObjectPointerType *ObjT =
+ PropertyIvarType->getAs<ObjCObjectPointerType>()) {
+ const ObjCInterfaceDecl *ObjI = ObjT->getInterfaceDecl();
+ if (ObjI && ObjI->isArcWeakrefUnavailable()) {
+ Diag(property->getLocation(),
+ diag::err_arc_weak_unavailable_property)
+ << PropertyIvarType;
+ Diag(ClassImpDecl->getLocation(), diag::note_implemented_by_class)
+ << ClassImpDecl->getName();
+ }
+ }
+ }
+ }
+ }
+
+ if (AtLoc.isInvalid()) {
+ // Check when default synthesizing a property that there is
+ // an ivar matching property name and issue warning; since this
+ // is the most common case of not using an ivar used for backing
+ // property in non-default synthesis case.
+ ObjCInterfaceDecl *ClassDeclared=nullptr;
+ ObjCIvarDecl *originalIvar =
+ IDecl->lookupInstanceVariable(property->getIdentifier(),
+ ClassDeclared);
+ if (originalIvar) {
+ Diag(PropertyDiagLoc,
+ diag::warn_autosynthesis_property_ivar_match)
+ << PropertyId << (Ivar == nullptr) << PropertyIvar
+ << originalIvar->getIdentifier();
+ Diag(property->getLocation(), diag::note_property_declare);
+ Diag(originalIvar->getLocation(), diag::note_ivar_decl);
+ }
+ }
+
+ if (!Ivar) {
+ // In ARC, give the ivar a lifetime qualifier based on the
+ // property attributes.
+ if ((getLangOpts().ObjCAutoRefCount || isARCWeak) &&
+ !PropertyIvarType.getObjCLifetime() &&
+ PropertyIvarType->isObjCRetainableType()) {
+
+ // It's an error if we have to do this and the user didn't
+ // explicitly write an ownership attribute on the property.
+ if (!hasWrittenStorageAttribute(property) &&
+ !(kind & ObjCPropertyDecl::OBJC_PR_strong)) {
+ Diag(PropertyDiagLoc,
+ diag::err_arc_objc_property_default_assign_on_object);
+ Diag(property->getLocation(), diag::note_property_declare);
+ } else {
+ Qualifiers::ObjCLifetime lifetime =
+ getImpliedARCOwnership(kind, PropertyIvarType);
+ assert(lifetime && "no lifetime for property?");
+
+ Qualifiers qs;
+ qs.addObjCLifetime(lifetime);
+ PropertyIvarType = Context.getQualifiedType(PropertyIvarType, qs);
+ }
+ }
+
+ Ivar = ObjCIvarDecl::Create(Context, ClassImpDecl,
+ PropertyIvarLoc,PropertyIvarLoc, PropertyIvar,
+ PropertyIvarType, /*Dinfo=*/nullptr,
+ ObjCIvarDecl::Private,
+ (Expr *)nullptr, true);
+ if (RequireNonAbstractType(PropertyIvarLoc,
+ PropertyIvarType,
+ diag::err_abstract_type_in_decl,
+ AbstractSynthesizedIvarType)) {
+ Diag(property->getLocation(), diag::note_property_declare);
+ Ivar->setInvalidDecl();
+ } else if (CompleteTypeErr)
+ Ivar->setInvalidDecl();
+ ClassImpDecl->addDecl(Ivar);
+ IDecl->makeDeclVisibleInContext(Ivar);
+
+ if (getLangOpts().ObjCRuntime.isFragile())
+ Diag(PropertyDiagLoc, diag::error_missing_property_ivar_decl)
+ << PropertyId;
+ // Note! I deliberately want it to fall thru so, we have a
+ // a property implementation and to avoid future warnings.
+ } else if (getLangOpts().ObjCRuntime.isNonFragile() &&
+ !declaresSameEntity(ClassDeclared, IDecl)) {
+ Diag(PropertyDiagLoc, diag::error_ivar_in_superclass_use)
+ << property->getDeclName() << Ivar->getDeclName()
+ << ClassDeclared->getDeclName();
+ Diag(Ivar->getLocation(), diag::note_previous_access_declaration)
+ << Ivar << Ivar->getName();
+ // Note! I deliberately want it to fall thru so more errors are caught.
+ }
+ property->setPropertyIvarDecl(Ivar);
+
+ QualType IvarType = Context.getCanonicalType(Ivar->getType());
+
+ // Check that type of property and its ivar are type compatible.
+ if (!Context.hasSameType(PropertyIvarType, IvarType)) {
+ if (isa<ObjCObjectPointerType>(PropertyIvarType)
+ && isa<ObjCObjectPointerType>(IvarType))
+ compat =
+ Context.canAssignObjCInterfaces(
+ PropertyIvarType->getAs<ObjCObjectPointerType>(),
+ IvarType->getAs<ObjCObjectPointerType>());
+ else {
+ compat = (CheckAssignmentConstraints(PropertyIvarLoc, PropertyIvarType,
+ IvarType)
+ == Compatible);
+ }
+ if (!compat) {
+ Diag(PropertyDiagLoc, diag::error_property_ivar_type)
+ << property->getDeclName() << PropType
+ << Ivar->getDeclName() << IvarType;
+ Diag(Ivar->getLocation(), diag::note_ivar_decl);
+ // Note! I deliberately want it to fall thru so, we have a
+ // a property implementation and to avoid future warnings.
+ }
+ else {
+ // FIXME! Rules for properties are somewhat different that those
+ // for assignments. Use a new routine to consolidate all cases;
+ // specifically for property redeclarations as well as for ivars.
+ QualType lhsType =Context.getCanonicalType(PropertyIvarType).getUnqualifiedType();
+ QualType rhsType =Context.getCanonicalType(IvarType).getUnqualifiedType();
+ if (lhsType != rhsType &&
+ lhsType->isArithmeticType()) {
+ Diag(PropertyDiagLoc, diag::error_property_ivar_type)
+ << property->getDeclName() << PropType
+ << Ivar->getDeclName() << IvarType;
+ Diag(Ivar->getLocation(), diag::note_ivar_decl);
+ // Fall thru - see previous comment
+ }
+ }
+ // __weak is explicit. So it works on Canonical type.
+ if ((PropType.isObjCGCWeak() && !IvarType.isObjCGCWeak() &&
+ getLangOpts().getGC() != LangOptions::NonGC)) {
+ Diag(PropertyDiagLoc, diag::error_weak_property)
+ << property->getDeclName() << Ivar->getDeclName();
+ Diag(Ivar->getLocation(), diag::note_ivar_decl);
+ // Fall thru - see previous comment
+ }
+ // Fall thru - see previous comment
+ if ((property->getType()->isObjCObjectPointerType() ||
+ PropType.isObjCGCStrong()) && IvarType.isObjCGCWeak() &&
+ getLangOpts().getGC() != LangOptions::NonGC) {
+ Diag(PropertyDiagLoc, diag::error_strong_property)
+ << property->getDeclName() << Ivar->getDeclName();
+ // Fall thru - see previous comment
+ }
+ }
+ if (getLangOpts().ObjCAutoRefCount || isARCWeak ||
+ Ivar->getType().getObjCLifetime())
+ checkARCPropertyImpl(*this, PropertyLoc, property, Ivar);
+ } else if (PropertyIvar)
+ // @dynamic
+ Diag(PropertyDiagLoc, diag::error_dynamic_property_ivar_decl);
+
+ assert (property && "ActOnPropertyImplDecl - property declaration missing");
+ ObjCPropertyImplDecl *PIDecl =
+ ObjCPropertyImplDecl::Create(Context, CurContext, AtLoc, PropertyLoc,
+ property,
+ (Synthesize ?
+ ObjCPropertyImplDecl::Synthesize
+ : ObjCPropertyImplDecl::Dynamic),
+ Ivar, PropertyIvarLoc);
+
+ if (CompleteTypeErr || !compat)
+ PIDecl->setInvalidDecl();
+
+ if (ObjCMethodDecl *getterMethod = property->getGetterMethodDecl()) {
+ getterMethod->createImplicitParams(Context, IDecl);
+ if (getLangOpts().CPlusPlus && Synthesize && !CompleteTypeErr &&
+ Ivar->getType()->isRecordType()) {
+ // For Objective-C++, need to synthesize the AST for the IVAR object to be
+ // returned by the getter as it must conform to C++'s copy-return rules.
+ // FIXME. Eventually we want to do this for Objective-C as well.
+ SynthesizedFunctionScope Scope(*this, getterMethod);
+ ImplicitParamDecl *SelfDecl = getterMethod->getSelfDecl();
+ DeclRefExpr *SelfExpr =
+ new (Context) DeclRefExpr(SelfDecl, false, SelfDecl->getType(),
+ VK_LValue, PropertyDiagLoc);
+ MarkDeclRefReferenced(SelfExpr);
+ Expr *LoadSelfExpr =
+ ImplicitCastExpr::Create(Context, SelfDecl->getType(),
+ CK_LValueToRValue, SelfExpr, nullptr,
+ VK_RValue);
+ Expr *IvarRefExpr =
+ new (Context) ObjCIvarRefExpr(Ivar,
+ Ivar->getUsageType(SelfDecl->getType()),
+ PropertyDiagLoc,
+ Ivar->getLocation(),
+ LoadSelfExpr, true, true);
+ ExprResult Res = PerformCopyInitialization(
+ InitializedEntity::InitializeResult(PropertyDiagLoc,
+ getterMethod->getReturnType(),
+ /*NRVO=*/false),
+ PropertyDiagLoc, IvarRefExpr);
+ if (!Res.isInvalid()) {
+ Expr *ResExpr = Res.getAs<Expr>();
+ if (ResExpr)
+ ResExpr = MaybeCreateExprWithCleanups(ResExpr);
+ PIDecl->setGetterCXXConstructor(ResExpr);
+ }
+ }
+ if (property->hasAttr<NSReturnsNotRetainedAttr>() &&
+ !getterMethod->hasAttr<NSReturnsNotRetainedAttr>()) {
+ Diag(getterMethod->getLocation(),
+ diag::warn_property_getter_owning_mismatch);
+ Diag(property->getLocation(), diag::note_property_declare);
+ }
+ if (getLangOpts().ObjCAutoRefCount && Synthesize)
+ switch (getterMethod->getMethodFamily()) {
+ case OMF_retain:
+ case OMF_retainCount:
+ case OMF_release:
+ case OMF_autorelease:
+ Diag(getterMethod->getLocation(), diag::err_arc_illegal_method_def)
+ << 1 << getterMethod->getSelector();
+ break;
+ default:
+ break;
+ }
+ }
+ if (ObjCMethodDecl *setterMethod = property->getSetterMethodDecl()) {
+ setterMethod->createImplicitParams(Context, IDecl);
+ if (getLangOpts().CPlusPlus && Synthesize && !CompleteTypeErr &&
+ Ivar->getType()->isRecordType()) {
+ // FIXME. Eventually we want to do this for Objective-C as well.
+ SynthesizedFunctionScope Scope(*this, setterMethod);
+ ImplicitParamDecl *SelfDecl = setterMethod->getSelfDecl();
+ DeclRefExpr *SelfExpr =
+ new (Context) DeclRefExpr(SelfDecl, false, SelfDecl->getType(),
+ VK_LValue, PropertyDiagLoc);
+ MarkDeclRefReferenced(SelfExpr);
+ Expr *LoadSelfExpr =
+ ImplicitCastExpr::Create(Context, SelfDecl->getType(),
+ CK_LValueToRValue, SelfExpr, nullptr,
+ VK_RValue);
+ Expr *lhs =
+ new (Context) ObjCIvarRefExpr(Ivar,
+ Ivar->getUsageType(SelfDecl->getType()),
+ PropertyDiagLoc,
+ Ivar->getLocation(),
+ LoadSelfExpr, true, true);
+ ObjCMethodDecl::param_iterator P = setterMethod->param_begin();
+ ParmVarDecl *Param = (*P);
+ QualType T = Param->getType().getNonReferenceType();
+ DeclRefExpr *rhs = new (Context) DeclRefExpr(Param, false, T,
+ VK_LValue, PropertyDiagLoc);
+ MarkDeclRefReferenced(rhs);
+ ExprResult Res = BuildBinOp(S, PropertyDiagLoc,
+ BO_Assign, lhs, rhs);
+ if (property->getPropertyAttributes() &
+ ObjCPropertyDecl::OBJC_PR_atomic) {
+ Expr *callExpr = Res.getAs<Expr>();
+ if (const CXXOperatorCallExpr *CXXCE =
+ dyn_cast_or_null<CXXOperatorCallExpr>(callExpr))
+ if (const FunctionDecl *FuncDecl = CXXCE->getDirectCallee())
+ if (!FuncDecl->isTrivial())
+ if (property->getType()->isReferenceType()) {
+ Diag(PropertyDiagLoc,
+ diag::err_atomic_property_nontrivial_assign_op)
+ << property->getType();
+ Diag(FuncDecl->getLocStart(),
+ diag::note_callee_decl) << FuncDecl;
+ }
+ }
+ PIDecl->setSetterCXXAssignment(Res.getAs<Expr>());
+ }
+ }
+
+ if (IC) {
+ if (Synthesize)
+ if (ObjCPropertyImplDecl *PPIDecl =
+ IC->FindPropertyImplIvarDecl(PropertyIvar)) {
+ Diag(PropertyLoc, diag::error_duplicate_ivar_use)
+ << PropertyId << PPIDecl->getPropertyDecl()->getIdentifier()
+ << PropertyIvar;
+ Diag(PPIDecl->getLocation(), diag::note_previous_use);
+ }
+
+ if (ObjCPropertyImplDecl *PPIDecl
+ = IC->FindPropertyImplDecl(PropertyId)) {
+ Diag(PropertyLoc, diag::error_property_implemented) << PropertyId;
+ Diag(PPIDecl->getLocation(), diag::note_previous_declaration);
+ return nullptr;
+ }
+ IC->addPropertyImplementation(PIDecl);
+ if (getLangOpts().ObjCDefaultSynthProperties &&
+ getLangOpts().ObjCRuntime.isNonFragile() &&
+ !IDecl->isObjCRequiresPropertyDefs()) {
+ // Diagnose if an ivar was lazily synthesdized due to a previous
+ // use and if 1) property is @dynamic or 2) property is synthesized
+ // but it requires an ivar of different name.
+ ObjCInterfaceDecl *ClassDeclared=nullptr;
+ ObjCIvarDecl *Ivar = nullptr;
+ if (!Synthesize)
+ Ivar = IDecl->lookupInstanceVariable(PropertyId, ClassDeclared);
+ else {
+ if (PropertyIvar && PropertyIvar != PropertyId)
+ Ivar = IDecl->lookupInstanceVariable(PropertyId, ClassDeclared);
+ }
+ // Issue diagnostics only if Ivar belongs to current class.
+ if (Ivar && Ivar->getSynthesize() &&
+ declaresSameEntity(IC->getClassInterface(), ClassDeclared)) {
+ Diag(Ivar->getLocation(), diag::err_undeclared_var_use)
+ << PropertyId;
+ Ivar->setInvalidDecl();
+ }
+ }
+ } else {
+ if (Synthesize)
+ if (ObjCPropertyImplDecl *PPIDecl =
+ CatImplClass->FindPropertyImplIvarDecl(PropertyIvar)) {
+ Diag(PropertyDiagLoc, diag::error_duplicate_ivar_use)
+ << PropertyId << PPIDecl->getPropertyDecl()->getIdentifier()
+ << PropertyIvar;
+ Diag(PPIDecl->getLocation(), diag::note_previous_use);
+ }
+
+ if (ObjCPropertyImplDecl *PPIDecl =
+ CatImplClass->FindPropertyImplDecl(PropertyId)) {
+ Diag(PropertyDiagLoc, diag::error_property_implemented) << PropertyId;
+ Diag(PPIDecl->getLocation(), diag::note_previous_declaration);
+ return nullptr;
+ }
+ CatImplClass->addPropertyImplementation(PIDecl);
+ }
+
+ return PIDecl;
+}
+
+//===----------------------------------------------------------------------===//
+// Helper methods.
+//===----------------------------------------------------------------------===//
+
+/// DiagnosePropertyMismatch - Compares two properties for their
+/// attributes and types and warns on a variety of inconsistencies.
+///
+void
+Sema::DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
+ ObjCPropertyDecl *SuperProperty,
+ const IdentifierInfo *inheritedName,
+ bool OverridingProtocolProperty) {
+ ObjCPropertyDecl::PropertyAttributeKind CAttr =
+ Property->getPropertyAttributes();
+ ObjCPropertyDecl::PropertyAttributeKind SAttr =
+ SuperProperty->getPropertyAttributes();
+
+ // We allow readonly properties without an explicit ownership
+ // (assign/unsafe_unretained/weak/retain/strong/copy) in super class
+ // to be overridden by a property with any explicit ownership in the subclass.
+ if (!OverridingProtocolProperty &&
+ !getOwnershipRule(SAttr) && getOwnershipRule(CAttr))
+ ;
+ else {
+ if ((CAttr & ObjCPropertyDecl::OBJC_PR_readonly)
+ && (SAttr & ObjCPropertyDecl::OBJC_PR_readwrite))
+ Diag(Property->getLocation(), diag::warn_readonly_property)
+ << Property->getDeclName() << inheritedName;
+ if ((CAttr & ObjCPropertyDecl::OBJC_PR_copy)
+ != (SAttr & ObjCPropertyDecl::OBJC_PR_copy))
+ Diag(Property->getLocation(), diag::warn_property_attribute)
+ << Property->getDeclName() << "copy" << inheritedName;
+ else if (!(SAttr & ObjCPropertyDecl::OBJC_PR_readonly)){
+ unsigned CAttrRetain =
+ (CAttr &
+ (ObjCPropertyDecl::OBJC_PR_retain | ObjCPropertyDecl::OBJC_PR_strong));
+ unsigned SAttrRetain =
+ (SAttr &
+ (ObjCPropertyDecl::OBJC_PR_retain | ObjCPropertyDecl::OBJC_PR_strong));
+ bool CStrong = (CAttrRetain != 0);
+ bool SStrong = (SAttrRetain != 0);
+ if (CStrong != SStrong)
+ Diag(Property->getLocation(), diag::warn_property_attribute)
+ << Property->getDeclName() << "retain (or strong)" << inheritedName;
+ }
+ }
+
+ // Check for nonatomic; note that nonatomic is effectively
+ // meaningless for readonly properties, so don't diagnose if the
+ // atomic property is 'readonly'.
+ checkAtomicPropertyMismatch(*this, SuperProperty, Property, false);
+ if (Property->getSetterName() != SuperProperty->getSetterName()) {
+ Diag(Property->getLocation(), diag::warn_property_attribute)
+ << Property->getDeclName() << "setter" << inheritedName;
+ Diag(SuperProperty->getLocation(), diag::note_property_declare);
+ }
+ if (Property->getGetterName() != SuperProperty->getGetterName()) {
+ Diag(Property->getLocation(), diag::warn_property_attribute)
+ << Property->getDeclName() << "getter" << inheritedName;
+ Diag(SuperProperty->getLocation(), diag::note_property_declare);
+ }
+
+ QualType LHSType =
+ Context.getCanonicalType(SuperProperty->getType());
+ QualType RHSType =
+ Context.getCanonicalType(Property->getType());
+
+ if (!Context.propertyTypesAreCompatible(LHSType, RHSType)) {
+ // Do cases not handled in above.
+ // FIXME. For future support of covariant property types, revisit this.
+ bool IncompatibleObjC = false;
+ QualType ConvertedType;
+ if (!isObjCPointerConversion(RHSType, LHSType,
+ ConvertedType, IncompatibleObjC) ||
+ IncompatibleObjC) {
+ Diag(Property->getLocation(), diag::warn_property_types_are_incompatible)
+ << Property->getType() << SuperProperty->getType() << inheritedName;
+ Diag(SuperProperty->getLocation(), diag::note_property_declare);
+ }
+ }
+}
+
+bool Sema::DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *property,
+ ObjCMethodDecl *GetterMethod,
+ SourceLocation Loc) {
+ if (!GetterMethod)
+ return false;
+ QualType GetterType = GetterMethod->getReturnType().getNonReferenceType();
+ QualType PropertyIvarType = property->getType().getNonReferenceType();
+ bool compat = Context.hasSameType(PropertyIvarType, GetterType);
+ if (!compat) {
+ const ObjCObjectPointerType *propertyObjCPtr = nullptr;
+ const ObjCObjectPointerType *getterObjCPtr = nullptr;
+ if ((propertyObjCPtr = PropertyIvarType->getAs<ObjCObjectPointerType>()) &&
+ (getterObjCPtr = GetterType->getAs<ObjCObjectPointerType>()))
+ compat = Context.canAssignObjCInterfaces(getterObjCPtr, propertyObjCPtr);
+ else if (CheckAssignmentConstraints(Loc, GetterType, PropertyIvarType)
+ != Compatible) {
+ Diag(Loc, diag::error_property_accessor_type)
+ << property->getDeclName() << PropertyIvarType
+ << GetterMethod->getSelector() << GetterType;
+ Diag(GetterMethod->getLocation(), diag::note_declared_at);
+ return true;
+ } else {
+ compat = true;
+ QualType lhsType =Context.getCanonicalType(PropertyIvarType).getUnqualifiedType();
+ QualType rhsType =Context.getCanonicalType(GetterType).getUnqualifiedType();
+ if (lhsType != rhsType && lhsType->isArithmeticType())
+ compat = false;
+ }
+ }
+
+ if (!compat) {
+ Diag(Loc, diag::warn_accessor_property_type_mismatch)
+ << property->getDeclName()
+ << GetterMethod->getSelector();
+ Diag(GetterMethod->getLocation(), diag::note_declared_at);
+ return true;
+ }
+
+ return false;
+}
+
+/// CollectImmediateProperties - This routine collects all properties in
+/// the class and its conforming protocols; but not those in its super class.
+static void CollectImmediateProperties(ObjCContainerDecl *CDecl,
+ ObjCContainerDecl::PropertyMap &PropMap,
+ ObjCContainerDecl::PropertyMap &SuperPropMap,
+ bool IncludeProtocols = true) {
+
+ if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
+ for (auto *Prop : IDecl->properties())
+ PropMap[Prop->getIdentifier()] = Prop;
+
+ // Collect the properties from visible extensions.
+ for (auto *Ext : IDecl->visible_extensions())
+ CollectImmediateProperties(Ext, PropMap, SuperPropMap, IncludeProtocols);
+
+ if (IncludeProtocols) {
+ // Scan through class's protocols.
+ for (auto *PI : IDecl->all_referenced_protocols())
+ CollectImmediateProperties(PI, PropMap, SuperPropMap);
+ }
+ }
+ if (ObjCCategoryDecl *CATDecl = dyn_cast<ObjCCategoryDecl>(CDecl)) {
+ for (auto *Prop : CATDecl->properties())
+ PropMap[Prop->getIdentifier()] = Prop;
+ if (IncludeProtocols) {
+ // Scan through class's protocols.
+ for (auto *PI : CATDecl->protocols())
+ CollectImmediateProperties(PI, PropMap, SuperPropMap);
+ }
+ }
+ else if (ObjCProtocolDecl *PDecl = dyn_cast<ObjCProtocolDecl>(CDecl)) {
+ for (auto *Prop : PDecl->properties()) {
+ ObjCPropertyDecl *PropertyFromSuper = SuperPropMap[Prop->getIdentifier()];
+ // Exclude property for protocols which conform to class's super-class,
+ // as super-class has to implement the property.
+ if (!PropertyFromSuper ||
+ PropertyFromSuper->getIdentifier() != Prop->getIdentifier()) {
+ ObjCPropertyDecl *&PropEntry = PropMap[Prop->getIdentifier()];
+ if (!PropEntry)
+ PropEntry = Prop;
+ }
+ }
+ // scan through protocol's protocols.
+ for (auto *PI : PDecl->protocols())
+ CollectImmediateProperties(PI, PropMap, SuperPropMap);
+ }
+}
+
+/// CollectSuperClassPropertyImplementations - This routine collects list of
+/// properties to be implemented in super class(s) and also coming from their
+/// conforming protocols.
+static void CollectSuperClassPropertyImplementations(ObjCInterfaceDecl *CDecl,
+ ObjCInterfaceDecl::PropertyMap &PropMap) {
+ if (ObjCInterfaceDecl *SDecl = CDecl->getSuperClass()) {
+ ObjCInterfaceDecl::PropertyDeclOrder PO;
+ while (SDecl) {
+ SDecl->collectPropertiesToImplement(PropMap, PO);
+ SDecl = SDecl->getSuperClass();
+ }
+ }
+}
+
+/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
+/// an ivar synthesized for 'Method' and 'Method' is a property accessor
+/// declared in class 'IFace'.
+bool
+Sema::IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
+ ObjCMethodDecl *Method, ObjCIvarDecl *IV) {
+ if (!IV->getSynthesize())
+ return false;
+ ObjCMethodDecl *IMD = IFace->lookupMethod(Method->getSelector(),
+ Method->isInstanceMethod());
+ if (!IMD || !IMD->isPropertyAccessor())
+ return false;
+
+ // look up a property declaration whose one of its accessors is implemented
+ // by this method.
+ for (const auto *Property : IFace->properties()) {
+ if ((Property->getGetterName() == IMD->getSelector() ||
+ Property->getSetterName() == IMD->getSelector()) &&
+ (Property->getPropertyIvarDecl() == IV))
+ return true;
+ }
+ // Also look up property declaration in class extension whose one of its
+ // accessors is implemented by this method.
+ for (const auto *Ext : IFace->known_extensions())
+ for (const auto *Property : Ext->properties())
+ if ((Property->getGetterName() == IMD->getSelector() ||
+ Property->getSetterName() == IMD->getSelector()) &&
+ (Property->getPropertyIvarDecl() == IV))
+ return true;
+ return false;
+}
+
+static bool SuperClassImplementsProperty(ObjCInterfaceDecl *IDecl,
+ ObjCPropertyDecl *Prop) {
+ bool SuperClassImplementsGetter = false;
+ bool SuperClassImplementsSetter = false;
+ if (Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_readonly)
+ SuperClassImplementsSetter = true;
+
+ while (IDecl->getSuperClass()) {
+ ObjCInterfaceDecl *SDecl = IDecl->getSuperClass();
+ if (!SuperClassImplementsGetter && SDecl->getInstanceMethod(Prop->getGetterName()))
+ SuperClassImplementsGetter = true;
+
+ if (!SuperClassImplementsSetter && SDecl->getInstanceMethod(Prop->getSetterName()))
+ SuperClassImplementsSetter = true;
+ if (SuperClassImplementsGetter && SuperClassImplementsSetter)
+ return true;
+ IDecl = IDecl->getSuperClass();
+ }
+ return false;
+}
+
+/// \brief Default synthesizes all properties which must be synthesized
+/// in class's \@implementation.
+void Sema::DefaultSynthesizeProperties(Scope *S, ObjCImplDecl* IMPDecl,
+ ObjCInterfaceDecl *IDecl) {
+
+ ObjCInterfaceDecl::PropertyMap PropMap;
+ ObjCInterfaceDecl::PropertyDeclOrder PropertyOrder;
+ IDecl->collectPropertiesToImplement(PropMap, PropertyOrder);
+ if (PropMap.empty())
+ return;
+ ObjCInterfaceDecl::PropertyMap SuperPropMap;
+ CollectSuperClassPropertyImplementations(IDecl, SuperPropMap);
+
+ for (unsigned i = 0, e = PropertyOrder.size(); i != e; i++) {
+ ObjCPropertyDecl *Prop = PropertyOrder[i];
+ // Is there a matching property synthesize/dynamic?
+ if (Prop->isInvalidDecl() ||
+ Prop->getPropertyImplementation() == ObjCPropertyDecl::Optional)
+ continue;
+ // Property may have been synthesized by user.
+ if (IMPDecl->FindPropertyImplDecl(Prop->getIdentifier()))
+ continue;
+ if (IMPDecl->getInstanceMethod(Prop->getGetterName())) {
+ if (Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_readonly)
+ continue;
+ if (IMPDecl->getInstanceMethod(Prop->getSetterName()))
+ continue;
+ }
+ if (ObjCPropertyImplDecl *PID =
+ IMPDecl->FindPropertyImplIvarDecl(Prop->getIdentifier())) {
+ Diag(Prop->getLocation(), diag::warn_no_autosynthesis_shared_ivar_property)
+ << Prop->getIdentifier();
+ if (PID->getLocation().isValid())
+ Diag(PID->getLocation(), diag::note_property_synthesize);
+ continue;
+ }
+ ObjCPropertyDecl *PropInSuperClass = SuperPropMap[Prop->getIdentifier()];
+ if (ObjCProtocolDecl *Proto =
+ dyn_cast<ObjCProtocolDecl>(Prop->getDeclContext())) {
+ // We won't auto-synthesize properties declared in protocols.
+ // Suppress the warning if class's superclass implements property's
+ // getter and implements property's setter (if readwrite property).
+ // Or, if property is going to be implemented in its super class.
+ if (!SuperClassImplementsProperty(IDecl, Prop) && !PropInSuperClass) {
+ Diag(IMPDecl->getLocation(),
+ diag::warn_auto_synthesizing_protocol_property)
+ << Prop << Proto;
+ Diag(Prop->getLocation(), diag::note_property_declare);
+ }
+ continue;
+ }
+ // If property to be implemented in the super class, ignore.
+ if (PropInSuperClass) {
+ if ((Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_readwrite) &&
+ (PropInSuperClass->getPropertyAttributes() &
+ ObjCPropertyDecl::OBJC_PR_readonly) &&
+ !IMPDecl->getInstanceMethod(Prop->getSetterName()) &&
+ !IDecl->HasUserDeclaredSetterMethod(Prop)) {
+ Diag(Prop->getLocation(), diag::warn_no_autosynthesis_property)
+ << Prop->getIdentifier();
+ Diag(PropInSuperClass->getLocation(), diag::note_property_declare);
+ }
+ else {
+ Diag(Prop->getLocation(), diag::warn_autosynthesis_property_in_superclass)
+ << Prop->getIdentifier();
+ Diag(PropInSuperClass->getLocation(), diag::note_property_declare);
+ Diag(IMPDecl->getLocation(), diag::note_while_in_implementation);
+ }
+ continue;
+ }
+ // We use invalid SourceLocations for the synthesized ivars since they
+ // aren't really synthesized at a particular location; they just exist.
+ // Saying that they are located at the @implementation isn't really going
+ // to help users.
+ ObjCPropertyImplDecl *PIDecl = dyn_cast_or_null<ObjCPropertyImplDecl>(
+ ActOnPropertyImplDecl(S, SourceLocation(), SourceLocation(),
+ true,
+ /* property = */ Prop->getIdentifier(),
+ /* ivar = */ Prop->getDefaultSynthIvarName(Context),
+ Prop->getLocation()));
+ if (PIDecl) {
+ Diag(Prop->getLocation(), diag::warn_missing_explicit_synthesis);
+ Diag(IMPDecl->getLocation(), diag::note_while_in_implementation);
+ }
+ }
+}
+
+void Sema::DefaultSynthesizeProperties(Scope *S, Decl *D) {
+ if (!LangOpts.ObjCDefaultSynthProperties || LangOpts.ObjCRuntime.isFragile())
+ return;
+ ObjCImplementationDecl *IC=dyn_cast_or_null<ObjCImplementationDecl>(D);
+ if (!IC)
+ return;
+ if (ObjCInterfaceDecl* IDecl = IC->getClassInterface())
+ if (!IDecl->isObjCRequiresPropertyDefs())
+ DefaultSynthesizeProperties(S, IC, IDecl);
+}
+
+static void DiagnoseUnimplementedAccessor(Sema &S,
+ ObjCInterfaceDecl *PrimaryClass,
+ Selector Method,
+ ObjCImplDecl* IMPDecl,
+ ObjCContainerDecl *CDecl,
+ ObjCCategoryDecl *C,
+ ObjCPropertyDecl *Prop,
+ Sema::SelectorSet &SMap) {
+ // When reporting on missing property setter/getter implementation in
+ // categories, do not report when they are declared in primary class,
+ // class's protocol, or one of it super classes. This is because,
+ // the class is going to implement them.
+ if (!SMap.count(Method) &&
+ (PrimaryClass == nullptr ||
+ !PrimaryClass->lookupPropertyAccessor(Method, C))) {
+ S.Diag(IMPDecl->getLocation(),
+ isa<ObjCCategoryDecl>(CDecl) ?
+ diag::warn_setter_getter_impl_required_in_category :
+ diag::warn_setter_getter_impl_required)
+ << Prop->getDeclName() << Method;
+ S.Diag(Prop->getLocation(),
+ diag::note_property_declare);
+ if (S.LangOpts.ObjCDefaultSynthProperties &&
+ S.LangOpts.ObjCRuntime.isNonFragile())
+ if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(CDecl))
+ if (const ObjCInterfaceDecl *RID = ID->isObjCRequiresPropertyDefs())
+ S.Diag(RID->getLocation(), diag::note_suppressed_class_declare);
+ }
+}
+
+void Sema::DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
+ ObjCContainerDecl *CDecl,
+ bool SynthesizeProperties) {
+ ObjCContainerDecl::PropertyMap PropMap;
+ ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl);
+
+ if (!SynthesizeProperties) {
+ ObjCContainerDecl::PropertyMap NoNeedToImplPropMap;
+ // Gather properties which need not be implemented in this class
+ // or category.
+ if (!IDecl)
+ if (ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(CDecl)) {
+ // For categories, no need to implement properties declared in
+ // its primary class (and its super classes) if property is
+ // declared in one of those containers.
+ if ((IDecl = C->getClassInterface())) {
+ ObjCInterfaceDecl::PropertyDeclOrder PO;
+ IDecl->collectPropertiesToImplement(NoNeedToImplPropMap, PO);
+ }
+ }
+ if (IDecl)
+ CollectSuperClassPropertyImplementations(IDecl, NoNeedToImplPropMap);
+
+ CollectImmediateProperties(CDecl, PropMap, NoNeedToImplPropMap);
+ }
+
+ // Scan the @interface to see if any of the protocols it adopts
+ // require an explicit implementation, via attribute
+ // 'objc_protocol_requires_explicit_implementation'.
+ if (IDecl) {
+ std::unique_ptr<ObjCContainerDecl::PropertyMap> LazyMap;
+
+ for (auto *PDecl : IDecl->all_referenced_protocols()) {
+ if (!PDecl->hasAttr<ObjCExplicitProtocolImplAttr>())
+ continue;
+ // Lazily construct a set of all the properties in the @interface
+ // of the class, without looking at the superclass. We cannot
+ // use the call to CollectImmediateProperties() above as that
+ // utilizes information from the super class's properties as well
+ // as scans the adopted protocols. This work only triggers for protocols
+ // with the attribute, which is very rare, and only occurs when
+ // analyzing the @implementation.
+ if (!LazyMap) {
+ ObjCContainerDecl::PropertyMap NoNeedToImplPropMap;
+ LazyMap.reset(new ObjCContainerDecl::PropertyMap());
+ CollectImmediateProperties(CDecl, *LazyMap, NoNeedToImplPropMap,
+ /* IncludeProtocols */ false);
+ }
+ // Add the properties of 'PDecl' to the list of properties that
+ // need to be implemented.
+ for (auto *PropDecl : PDecl->properties()) {
+ if ((*LazyMap)[PropDecl->getIdentifier()])
+ continue;
+ PropMap[PropDecl->getIdentifier()] = PropDecl;
+ }
+ }
+ }
+
+ if (PropMap.empty())
+ return;
+
+ llvm::DenseSet<ObjCPropertyDecl *> PropImplMap;
+ for (const auto *I : IMPDecl->property_impls())
+ PropImplMap.insert(I->getPropertyDecl());
+
+ SelectorSet InsMap;
+ // Collect property accessors implemented in current implementation.
+ for (const auto *I : IMPDecl->instance_methods())
+ InsMap.insert(I->getSelector());
+
+ ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(CDecl);
+ ObjCInterfaceDecl *PrimaryClass = nullptr;
+ if (C && !C->IsClassExtension())
+ if ((PrimaryClass = C->getClassInterface()))
+ // Report unimplemented properties in the category as well.
+ if (ObjCImplDecl *IMP = PrimaryClass->getImplementation()) {
+ // When reporting on missing setter/getters, do not report when
+ // setter/getter is implemented in category's primary class
+ // implementation.
+ for (const auto *I : IMP->instance_methods())
+ InsMap.insert(I->getSelector());
+ }
+
+ for (ObjCContainerDecl::PropertyMap::iterator
+ P = PropMap.begin(), E = PropMap.end(); P != E; ++P) {
+ ObjCPropertyDecl *Prop = P->second;
+ // Is there a matching propery synthesize/dynamic?
+ if (Prop->isInvalidDecl() ||
+ Prop->getPropertyImplementation() == ObjCPropertyDecl::Optional ||
+ PropImplMap.count(Prop) ||
+ Prop->getAvailability() == AR_Unavailable)
+ continue;
+
+ // Diagnose unimplemented getters and setters.
+ DiagnoseUnimplementedAccessor(*this,
+ PrimaryClass, Prop->getGetterName(), IMPDecl, CDecl, C, Prop, InsMap);
+ if (!Prop->isReadOnly())
+ DiagnoseUnimplementedAccessor(*this,
+ PrimaryClass, Prop->getSetterName(),
+ IMPDecl, CDecl, C, Prop, InsMap);
+ }
+}
+
+void Sema::diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl) {
+ for (const auto *propertyImpl : impDecl->property_impls()) {
+ const auto *property = propertyImpl->getPropertyDecl();
+
+ // Warn about null_resettable properties with synthesized setters,
+ // because the setter won't properly handle nil.
+ if (propertyImpl->getPropertyImplementation()
+ == ObjCPropertyImplDecl::Synthesize &&
+ (property->getPropertyAttributes() &
+ ObjCPropertyDecl::OBJC_PR_null_resettable) &&
+ property->getGetterMethodDecl() &&
+ property->getSetterMethodDecl()) {
+ auto *getterMethod = property->getGetterMethodDecl();
+ auto *setterMethod = property->getSetterMethodDecl();
+ if (!impDecl->getInstanceMethod(setterMethod->getSelector()) &&
+ !impDecl->getInstanceMethod(getterMethod->getSelector())) {
+ SourceLocation loc = propertyImpl->getLocation();
+ if (loc.isInvalid())
+ loc = impDecl->getLocStart();
+
+ Diag(loc, diag::warn_null_resettable_setter)
+ << setterMethod->getSelector() << property->getDeclName();
+ }
+ }
+ }
+}
+
+void
+Sema::AtomicPropertySetterGetterRules (ObjCImplDecl* IMPDecl,
+ ObjCInterfaceDecl* IDecl) {
+ // Rules apply in non-GC mode only
+ if (getLangOpts().getGC() != LangOptions::NonGC)
+ return;
+ ObjCContainerDecl::PropertyMap PM;
+ for (auto *Prop : IDecl->properties())
+ PM[Prop->getIdentifier()] = Prop;
+ for (const auto *Ext : IDecl->known_extensions())
+ for (auto *Prop : Ext->properties())
+ PM[Prop->getIdentifier()] = Prop;
+
+ for (ObjCContainerDecl::PropertyMap::iterator I = PM.begin(), E = PM.end();
+ I != E; ++I) {
+ const ObjCPropertyDecl *Property = I->second;
+ ObjCMethodDecl *GetterMethod = nullptr;
+ ObjCMethodDecl *SetterMethod = nullptr;
+ bool LookedUpGetterSetter = false;
+
+ unsigned Attributes = Property->getPropertyAttributes();
+ unsigned AttributesAsWritten = Property->getPropertyAttributesAsWritten();
+
+ if (!(AttributesAsWritten & ObjCPropertyDecl::OBJC_PR_atomic) &&
+ !(AttributesAsWritten & ObjCPropertyDecl::OBJC_PR_nonatomic)) {
+ GetterMethod = IMPDecl->getInstanceMethod(Property->getGetterName());
+ SetterMethod = IMPDecl->getInstanceMethod(Property->getSetterName());
+ LookedUpGetterSetter = true;
+ if (GetterMethod) {
+ Diag(GetterMethod->getLocation(),
+ diag::warn_default_atomic_custom_getter_setter)
+ << Property->getIdentifier() << 0;
+ Diag(Property->getLocation(), diag::note_property_declare);
+ }
+ if (SetterMethod) {
+ Diag(SetterMethod->getLocation(),
+ diag::warn_default_atomic_custom_getter_setter)
+ << Property->getIdentifier() << 1;
+ Diag(Property->getLocation(), diag::note_property_declare);
+ }
+ }
+
+ // We only care about readwrite atomic property.
+ if ((Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) ||
+ !(Attributes & ObjCPropertyDecl::OBJC_PR_readwrite))
+ continue;
+ if (const ObjCPropertyImplDecl *PIDecl
+ = IMPDecl->FindPropertyImplDecl(Property->getIdentifier())) {
+ if (PIDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+ continue;
+ if (!LookedUpGetterSetter) {
+ GetterMethod = IMPDecl->getInstanceMethod(Property->getGetterName());
+ SetterMethod = IMPDecl->getInstanceMethod(Property->getSetterName());
+ }
+ if ((GetterMethod && !SetterMethod) || (!GetterMethod && SetterMethod)) {
+ SourceLocation MethodLoc =
+ (GetterMethod ? GetterMethod->getLocation()
+ : SetterMethod->getLocation());
+ Diag(MethodLoc, diag::warn_atomic_property_rule)
+ << Property->getIdentifier() << (GetterMethod != nullptr)
+ << (SetterMethod != nullptr);
+ // fixit stuff.
+ if (Property->getLParenLoc().isValid() &&
+ !(AttributesAsWritten & ObjCPropertyDecl::OBJC_PR_atomic)) {
+ // @property () ... case.
+ SourceLocation AfterLParen =
+ getLocForEndOfToken(Property->getLParenLoc());
+ StringRef NonatomicStr = AttributesAsWritten? "nonatomic, "
+ : "nonatomic";
+ Diag(Property->getLocation(),
+ diag::note_atomic_property_fixup_suggest)
+ << FixItHint::CreateInsertion(AfterLParen, NonatomicStr);
+ } else if (Property->getLParenLoc().isInvalid()) {
+ //@property id etc.
+ SourceLocation startLoc =
+ Property->getTypeSourceInfo()->getTypeLoc().getBeginLoc();
+ Diag(Property->getLocation(),
+ diag::note_atomic_property_fixup_suggest)
+ << FixItHint::CreateInsertion(startLoc, "(nonatomic) ");
+ }
+ else
+ Diag(MethodLoc, diag::note_atomic_property_fixup_suggest);
+ Diag(Property->getLocation(), diag::note_property_declare);
+ }
+ }
+ }
+}
+
+void Sema::DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D) {
+ if (getLangOpts().getGC() == LangOptions::GCOnly)
+ return;
+
+ for (const auto *PID : D->property_impls()) {
+ const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ if (PD && !PD->hasAttr<NSReturnsNotRetainedAttr>() &&
+ !D->getInstanceMethod(PD->getGetterName())) {
+ ObjCMethodDecl *method = PD->getGetterMethodDecl();
+ if (!method)
+ continue;
+ ObjCMethodFamily family = method->getMethodFamily();
+ if (family == OMF_alloc || family == OMF_copy ||
+ family == OMF_mutableCopy || family == OMF_new) {
+ if (getLangOpts().ObjCAutoRefCount)
+ Diag(PD->getLocation(), diag::err_cocoa_naming_owned_rule);
+ else
+ Diag(PD->getLocation(), diag::warn_cocoa_naming_owned_rule);
+
+ // Look for a getter explicitly declared alongside the property.
+ // If we find one, use its location for the note.
+ SourceLocation noteLoc = PD->getLocation();
+ SourceLocation fixItLoc;
+ for (auto *getterRedecl : method->redecls()) {
+ if (getterRedecl->isImplicit())
+ continue;
+ if (getterRedecl->getDeclContext() != PD->getDeclContext())
+ continue;
+ noteLoc = getterRedecl->getLocation();
+ fixItLoc = getterRedecl->getLocEnd();
+ }
+
+ Preprocessor &PP = getPreprocessor();
+ TokenValue tokens[] = {
+ tok::kw___attribute, tok::l_paren, tok::l_paren,
+ PP.getIdentifierInfo("objc_method_family"), tok::l_paren,
+ PP.getIdentifierInfo("none"), tok::r_paren,
+ tok::r_paren, tok::r_paren
+ };
+ StringRef spelling = "__attribute__((objc_method_family(none)))";
+ StringRef macroName = PP.getLastMacroWithSpelling(noteLoc, tokens);
+ if (!macroName.empty())
+ spelling = macroName;
+
+ auto noteDiag = Diag(noteLoc, diag::note_cocoa_naming_declare_family)
+ << method->getDeclName() << spelling;
+ if (fixItLoc.isValid()) {
+ SmallString<64> fixItText(" ");
+ fixItText += spelling;
+ noteDiag << FixItHint::CreateInsertion(fixItLoc, fixItText);
+ }
+ }
+ }
+ }
+}
+
+void Sema::DiagnoseMissingDesignatedInitOverrides(
+ const ObjCImplementationDecl *ImplD,
+ const ObjCInterfaceDecl *IFD) {
+ assert(IFD->hasDesignatedInitializers());
+ const ObjCInterfaceDecl *SuperD = IFD->getSuperClass();
+ if (!SuperD)
+ return;
+
+ SelectorSet InitSelSet;
+ for (const auto *I : ImplD->instance_methods())
+ if (I->getMethodFamily() == OMF_init)
+ InitSelSet.insert(I->getSelector());
+
+ SmallVector<const ObjCMethodDecl *, 8> DesignatedInits;
+ SuperD->getDesignatedInitializers(DesignatedInits);
+ for (SmallVector<const ObjCMethodDecl *, 8>::iterator
+ I = DesignatedInits.begin(), E = DesignatedInits.end(); I != E; ++I) {
+ const ObjCMethodDecl *MD = *I;
+ if (!InitSelSet.count(MD->getSelector())) {
+ bool Ignore = false;
+ if (auto *IMD = IFD->getInstanceMethod(MD->getSelector())) {
+ Ignore = IMD->isUnavailable();
+ }
+ if (!Ignore) {
+ Diag(ImplD->getLocation(),
+ diag::warn_objc_implementation_missing_designated_init_override)
+ << MD->getSelector();
+ Diag(MD->getLocation(), diag::note_objc_designated_init_marked_here);
+ }
+ }
+ }
+}
+
+/// AddPropertyAttrs - Propagates attributes from a property to the
+/// implicitly-declared getter or setter for that property.
+static void AddPropertyAttrs(Sema &S, ObjCMethodDecl *PropertyMethod,
+ ObjCPropertyDecl *Property) {
+ // Should we just clone all attributes over?
+ for (const auto *A : Property->attrs()) {
+ if (isa<DeprecatedAttr>(A) ||
+ isa<UnavailableAttr>(A) ||
+ isa<AvailabilityAttr>(A))
+ PropertyMethod->addAttr(A->clone(S.Context));
+ }
+}
+
+/// ProcessPropertyDecl - Make sure that any user-defined setter/getter methods
+/// have the property type and issue diagnostics if they don't.
+/// Also synthesize a getter/setter method if none exist (and update the
+/// appropriate lookup tables.
+void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property) {
+ ObjCMethodDecl *GetterMethod, *SetterMethod;
+ ObjCContainerDecl *CD = cast<ObjCContainerDecl>(property->getDeclContext());
+ if (CD->isInvalidDecl())
+ return;
+
+ GetterMethod = CD->getInstanceMethod(property->getGetterName());
+ // if setter or getter is not found in class extension, it might be
+ // in the primary class.
+ if (!GetterMethod)
+ if (const ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(CD))
+ if (CatDecl->IsClassExtension())
+ GetterMethod = CatDecl->getClassInterface()->
+ getInstanceMethod(property->getGetterName());
+
+ SetterMethod = CD->getInstanceMethod(property->getSetterName());
+ if (!SetterMethod)
+ if (const ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(CD))
+ if (CatDecl->IsClassExtension())
+ SetterMethod = CatDecl->getClassInterface()->
+ getInstanceMethod(property->getSetterName());
+ DiagnosePropertyAccessorMismatch(property, GetterMethod,
+ property->getLocation());
+
+ if (SetterMethod) {
+ ObjCPropertyDecl::PropertyAttributeKind CAttr =
+ property->getPropertyAttributes();
+ if ((!(CAttr & ObjCPropertyDecl::OBJC_PR_readonly)) &&
+ Context.getCanonicalType(SetterMethod->getReturnType()) !=
+ Context.VoidTy)
+ Diag(SetterMethod->getLocation(), diag::err_setter_type_void);
+ if (SetterMethod->param_size() != 1 ||
+ !Context.hasSameUnqualifiedType(
+ (*SetterMethod->param_begin())->getType().getNonReferenceType(),
+ property->getType().getNonReferenceType())) {
+ Diag(property->getLocation(),
+ diag::warn_accessor_property_type_mismatch)
+ << property->getDeclName()
+ << SetterMethod->getSelector();
+ Diag(SetterMethod->getLocation(), diag::note_declared_at);
+ }
+ }
+
+ // Synthesize getter/setter methods if none exist.
+ // Find the default getter and if one not found, add one.
+ // FIXME: The synthesized property we set here is misleading. We almost always
+ // synthesize these methods unless the user explicitly provided prototypes
+ // (which is odd, but allowed). Sema should be typechecking that the
+ // declarations jive in that situation (which it is not currently).
+ if (!GetterMethod) {
+ // No instance method of same name as property getter name was found.
+ // Declare a getter method and add it to the list of methods
+ // for this class.
+ SourceLocation Loc = property->getLocation();
+
+ // If the property is null_resettable, the getter returns nonnull.
+ QualType resultTy = property->getType();
+ if (property->getPropertyAttributes() &
+ ObjCPropertyDecl::OBJC_PR_null_resettable) {
+ QualType modifiedTy = resultTy;
+ if (auto nullability = AttributedType::stripOuterNullability(modifiedTy)) {
+ if (*nullability == NullabilityKind::Unspecified)
+ resultTy = Context.getAttributedType(AttributedType::attr_nonnull,
+ modifiedTy, modifiedTy);
+ }
+ }
+
+ GetterMethod = ObjCMethodDecl::Create(Context, Loc, Loc,
+ property->getGetterName(),
+ resultTy, nullptr, CD,
+ /*isInstance=*/true, /*isVariadic=*/false,
+ /*isPropertyAccessor=*/true,
+ /*isImplicitlyDeclared=*/true, /*isDefined=*/false,
+ (property->getPropertyImplementation() ==
+ ObjCPropertyDecl::Optional) ?
+ ObjCMethodDecl::Optional :
+ ObjCMethodDecl::Required);
+ CD->addDecl(GetterMethod);
+
+ AddPropertyAttrs(*this, GetterMethod, property);
+
+ if (property->hasAttr<NSReturnsNotRetainedAttr>())
+ GetterMethod->addAttr(NSReturnsNotRetainedAttr::CreateImplicit(Context,
+ Loc));
+
+ if (property->hasAttr<ObjCReturnsInnerPointerAttr>())
+ GetterMethod->addAttr(
+ ObjCReturnsInnerPointerAttr::CreateImplicit(Context, Loc));
+
+ if (const SectionAttr *SA = property->getAttr<SectionAttr>())
+ GetterMethod->addAttr(
+ SectionAttr::CreateImplicit(Context, SectionAttr::GNU_section,
+ SA->getName(), Loc));
+
+ if (getLangOpts().ObjCAutoRefCount)
+ CheckARCMethodDecl(GetterMethod);
+ } else
+ // A user declared getter will be synthesize when @synthesize of
+ // the property with the same name is seen in the @implementation
+ GetterMethod->setPropertyAccessor(true);
+ property->setGetterMethodDecl(GetterMethod);
+
+ // Skip setter if property is read-only.
+ if (!property->isReadOnly()) {
+ // Find the default setter and if one not found, add one.
+ if (!SetterMethod) {
+ // No instance method of same name as property setter name was found.
+ // Declare a setter method and add it to the list of methods
+ // for this class.
+ SourceLocation Loc = property->getLocation();
+
+ SetterMethod =
+ ObjCMethodDecl::Create(Context, Loc, Loc,
+ property->getSetterName(), Context.VoidTy,
+ nullptr, CD, /*isInstance=*/true,
+ /*isVariadic=*/false,
+ /*isPropertyAccessor=*/true,
+ /*isImplicitlyDeclared=*/true,
+ /*isDefined=*/false,
+ (property->getPropertyImplementation() ==
+ ObjCPropertyDecl::Optional) ?
+ ObjCMethodDecl::Optional :
+ ObjCMethodDecl::Required);
+
+ // If the property is null_resettable, the setter accepts a
+ // nullable value.
+ QualType paramTy = property->getType().getUnqualifiedType();
+ if (property->getPropertyAttributes() &
+ ObjCPropertyDecl::OBJC_PR_null_resettable) {
+ QualType modifiedTy = paramTy;
+ if (auto nullability = AttributedType::stripOuterNullability(modifiedTy)){
+ if (*nullability == NullabilityKind::Unspecified)
+ paramTy = Context.getAttributedType(AttributedType::attr_nullable,
+ modifiedTy, modifiedTy);
+ }
+ }
+
+ // Invent the arguments for the setter. We don't bother making a
+ // nice name for the argument.
+ ParmVarDecl *Argument = ParmVarDecl::Create(Context, SetterMethod,
+ Loc, Loc,
+ property->getIdentifier(),
+ paramTy,
+ /*TInfo=*/nullptr,
+ SC_None,
+ nullptr);
+ SetterMethod->setMethodParams(Context, Argument, None);
+
+ AddPropertyAttrs(*this, SetterMethod, property);
+
+ CD->addDecl(SetterMethod);
+ if (const SectionAttr *SA = property->getAttr<SectionAttr>())
+ SetterMethod->addAttr(
+ SectionAttr::CreateImplicit(Context, SectionAttr::GNU_section,
+ SA->getName(), Loc));
+ // It's possible for the user to have set a very odd custom
+ // setter selector that causes it to have a method family.
+ if (getLangOpts().ObjCAutoRefCount)
+ CheckARCMethodDecl(SetterMethod);
+ } else
+ // A user declared setter will be synthesize when @synthesize of
+ // the property with the same name is seen in the @implementation
+ SetterMethod->setPropertyAccessor(true);
+ property->setSetterMethodDecl(SetterMethod);
+ }
+ // Add any synthesized methods to the global pool. This allows us to
+ // handle the following, which is supported by GCC (and part of the design).
+ //
+ // @interface Foo
+ // @property double bar;
+ // @end
+ //
+ // void thisIsUnfortunate() {
+ // id foo;
+ // double bar = [foo bar];
+ // }
+ //
+ if (GetterMethod)
+ AddInstanceMethodToGlobalPool(GetterMethod);
+ if (SetterMethod)
+ AddInstanceMethodToGlobalPool(SetterMethod);
+
+ ObjCInterfaceDecl *CurrentClass = dyn_cast<ObjCInterfaceDecl>(CD);
+ if (!CurrentClass) {
+ if (ObjCCategoryDecl *Cat = dyn_cast<ObjCCategoryDecl>(CD))
+ CurrentClass = Cat->getClassInterface();
+ else if (ObjCImplDecl *Impl = dyn_cast<ObjCImplDecl>(CD))
+ CurrentClass = Impl->getClassInterface();
+ }
+ if (GetterMethod)
+ CheckObjCMethodOverrides(GetterMethod, CurrentClass, Sema::RTC_Unknown);
+ if (SetterMethod)
+ CheckObjCMethodOverrides(SetterMethod, CurrentClass, Sema::RTC_Unknown);
+}
+
+void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
+ SourceLocation Loc,
+ unsigned &Attributes,
+ bool propertyInPrimaryClass) {
+ // FIXME: Improve the reported location.
+ if (!PDecl || PDecl->isInvalidDecl())
+ return;
+
+ if ((Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
+ (Attributes & ObjCDeclSpec::DQ_PR_readwrite))
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "readonly" << "readwrite";
+
+ ObjCPropertyDecl *PropertyDecl = cast<ObjCPropertyDecl>(PDecl);
+ QualType PropertyTy = PropertyDecl->getType();
+
+ // Check for copy or retain on non-object types.
+ if ((Attributes & (ObjCDeclSpec::DQ_PR_weak | ObjCDeclSpec::DQ_PR_copy |
+ ObjCDeclSpec::DQ_PR_retain | ObjCDeclSpec::DQ_PR_strong)) &&
+ !PropertyTy->isObjCRetainableType() &&
+ !PropertyDecl->hasAttr<ObjCNSObjectAttr>()) {
+ Diag(Loc, diag::err_objc_property_requires_object)
+ << (Attributes & ObjCDeclSpec::DQ_PR_weak ? "weak" :
+ Attributes & ObjCDeclSpec::DQ_PR_copy ? "copy" : "retain (or strong)");
+ Attributes &= ~(ObjCDeclSpec::DQ_PR_weak | ObjCDeclSpec::DQ_PR_copy |
+ ObjCDeclSpec::DQ_PR_retain | ObjCDeclSpec::DQ_PR_strong);
+ PropertyDecl->setInvalidDecl();
+ }
+
+ // Check for more than one of { assign, copy, retain }.
+ if (Attributes & ObjCDeclSpec::DQ_PR_assign) {
+ if (Attributes & ObjCDeclSpec::DQ_PR_copy) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "assign" << "copy";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_copy;
+ }
+ if (Attributes & ObjCDeclSpec::DQ_PR_retain) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "assign" << "retain";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_retain;
+ }
+ if (Attributes & ObjCDeclSpec::DQ_PR_strong) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "assign" << "strong";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_strong;
+ }
+ if (getLangOpts().ObjCAutoRefCount &&
+ (Attributes & ObjCDeclSpec::DQ_PR_weak)) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "assign" << "weak";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_weak;
+ }
+ if (PropertyDecl->hasAttr<IBOutletCollectionAttr>())
+ Diag(Loc, diag::warn_iboutletcollection_property_assign);
+ } else if (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained) {
+ if (Attributes & ObjCDeclSpec::DQ_PR_copy) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "unsafe_unretained" << "copy";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_copy;
+ }
+ if (Attributes & ObjCDeclSpec::DQ_PR_retain) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "unsafe_unretained" << "retain";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_retain;
+ }
+ if (Attributes & ObjCDeclSpec::DQ_PR_strong) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "unsafe_unretained" << "strong";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_strong;
+ }
+ if (getLangOpts().ObjCAutoRefCount &&
+ (Attributes & ObjCDeclSpec::DQ_PR_weak)) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "unsafe_unretained" << "weak";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_weak;
+ }
+ } else if (Attributes & ObjCDeclSpec::DQ_PR_copy) {
+ if (Attributes & ObjCDeclSpec::DQ_PR_retain) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "copy" << "retain";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_retain;
+ }
+ if (Attributes & ObjCDeclSpec::DQ_PR_strong) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "copy" << "strong";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_strong;
+ }
+ if (Attributes & ObjCDeclSpec::DQ_PR_weak) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "copy" << "weak";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_weak;
+ }
+ }
+ else if ((Attributes & ObjCDeclSpec::DQ_PR_retain) &&
+ (Attributes & ObjCDeclSpec::DQ_PR_weak)) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "retain" << "weak";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_retain;
+ }
+ else if ((Attributes & ObjCDeclSpec::DQ_PR_strong) &&
+ (Attributes & ObjCDeclSpec::DQ_PR_weak)) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "strong" << "weak";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_weak;
+ }
+
+ if (Attributes & ObjCDeclSpec::DQ_PR_weak) {
+ // 'weak' and 'nonnull' are mutually exclusive.
+ if (auto nullability = PropertyTy->getNullability(Context)) {
+ if (*nullability == NullabilityKind::NonNull)
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "nonnull" << "weak";
+ }
+ }
+
+ if ((Attributes & ObjCDeclSpec::DQ_PR_atomic) &&
+ (Attributes & ObjCDeclSpec::DQ_PR_nonatomic)) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "atomic" << "nonatomic";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_atomic;
+ }
+
+ // Warn if user supplied no assignment attribute, property is
+ // readwrite, and this is an object type.
+ if (!getOwnershipRule(Attributes) && PropertyTy->isObjCRetainableType()) {
+ if (Attributes & ObjCDeclSpec::DQ_PR_readonly) {
+ // do nothing
+ } else if (getLangOpts().ObjCAutoRefCount) {
+ // With arc, @property definitions should default to strong when
+ // not specified.
+ PropertyDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong);
+ } else if (PropertyTy->isObjCObjectPointerType()) {
+ bool isAnyClassTy =
+ (PropertyTy->isObjCClassType() ||
+ PropertyTy->isObjCQualifiedClassType());
+ // In non-gc, non-arc mode, 'Class' is treated as a 'void *' no need to
+ // issue any warning.
+ if (isAnyClassTy && getLangOpts().getGC() == LangOptions::NonGC)
+ ;
+ else if (propertyInPrimaryClass) {
+ // Don't issue warning on property with no life time in class
+ // extension as it is inherited from property in primary class.
+ // Skip this warning in gc-only mode.
+ if (getLangOpts().getGC() != LangOptions::GCOnly)
+ Diag(Loc, diag::warn_objc_property_no_assignment_attribute);
+
+ // If non-gc code warn that this is likely inappropriate.
+ if (getLangOpts().getGC() == LangOptions::NonGC)
+ Diag(Loc, diag::warn_objc_property_default_assign_on_object);
+ }
+ }
+
+ // FIXME: Implement warning dependent on NSCopying being
+ // implemented. See also:
+ // <rdar://5168496&4855821&5607453&5096644&4947311&5698469&4947014&5168496>
+ // (please trim this list while you are at it).
+ }
+
+ if (!(Attributes & ObjCDeclSpec::DQ_PR_copy)
+ &&!(Attributes & ObjCDeclSpec::DQ_PR_readonly)
+ && getLangOpts().getGC() == LangOptions::GCOnly
+ && PropertyTy->isBlockPointerType())
+ Diag(Loc, diag::warn_objc_property_copy_missing_on_block);
+ else if ((Attributes & ObjCDeclSpec::DQ_PR_retain) &&
+ !(Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
+ !(Attributes & ObjCDeclSpec::DQ_PR_strong) &&
+ PropertyTy->isBlockPointerType())
+ Diag(Loc, diag::warn_objc_property_retain_of_block);
+
+ if ((Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
+ (Attributes & ObjCDeclSpec::DQ_PR_setter))
+ Diag(Loc, diag::warn_objc_readonly_property_has_setter);
+
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaOpenMP.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaOpenMP.cpp
new file mode 100644
index 0000000..f66e218
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaOpenMP.cpp
@@ -0,0 +1,8544 @@
+//===--- SemaOpenMP.cpp - Semantic Analysis for OpenMP constructs ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// \brief This file implements semantic analysis for OpenMP directives and
+/// clauses.
+///
+//===----------------------------------------------------------------------===//
+
+#include "TreeTransform.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclOpenMP.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtOpenMP.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/OpenMPKinds.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaInternal.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Stack of data-sharing attributes for variables
+//===----------------------------------------------------------------------===//
+
+namespace {
+/// \brief Default data sharing attributes, which can be applied to directive.
+enum DefaultDataSharingAttributes {
+ DSA_unspecified = 0, /// \brief Data sharing attribute not specified.
+ DSA_none = 1 << 0, /// \brief Default data sharing attribute 'none'.
+ DSA_shared = 1 << 1 /// \brief Default data sharing attribute 'shared'.
+};
+
+template <class T> struct MatchesAny {
+ explicit MatchesAny(ArrayRef<T> Arr) : Arr(std::move(Arr)) {}
+ bool operator()(T Kind) {
+ for (auto KindEl : Arr)
+ if (KindEl == Kind)
+ return true;
+ return false;
+ }
+
+private:
+ ArrayRef<T> Arr;
+};
+struct MatchesAlways {
+ MatchesAlways() {}
+ template <class T> bool operator()(T) { return true; }
+};
+
+typedef MatchesAny<OpenMPClauseKind> MatchesAnyClause;
+typedef MatchesAny<OpenMPDirectiveKind> MatchesAnyDirective;
+
+/// \brief Stack for tracking declarations used in OpenMP directives and
+/// clauses and their data-sharing attributes.
+class DSAStackTy {
+public:
+ struct DSAVarData {
+ OpenMPDirectiveKind DKind;
+ OpenMPClauseKind CKind;
+ DeclRefExpr *RefExpr;
+ SourceLocation ImplicitDSALoc;
+ DSAVarData()
+ : DKind(OMPD_unknown), CKind(OMPC_unknown), RefExpr(nullptr),
+ ImplicitDSALoc() {}
+ };
+
+public:
+ struct MapInfo {
+ Expr *RefExpr;
+ };
+
+private:
+ struct DSAInfo {
+ OpenMPClauseKind Attributes;
+ DeclRefExpr *RefExpr;
+ };
+ typedef llvm::SmallDenseMap<VarDecl *, DSAInfo, 64> DeclSAMapTy;
+ typedef llvm::SmallDenseMap<VarDecl *, DeclRefExpr *, 64> AlignedMapTy;
+ typedef llvm::DenseMap<VarDecl *, unsigned> LoopControlVariablesMapTy;
+ typedef llvm::SmallDenseMap<VarDecl *, MapInfo, 64> MappedDeclsTy;
+ typedef llvm::StringMap<std::pair<OMPCriticalDirective *, llvm::APSInt>>
+ CriticalsWithHintsTy;
+
+ struct SharingMapTy {
+ DeclSAMapTy SharingMap;
+ AlignedMapTy AlignedMap;
+ MappedDeclsTy MappedDecls;
+ LoopControlVariablesMapTy LCVMap;
+ DefaultDataSharingAttributes DefaultAttr;
+ SourceLocation DefaultAttrLoc;
+ OpenMPDirectiveKind Directive;
+ DeclarationNameInfo DirectiveName;
+ Scope *CurScope;
+ SourceLocation ConstructLoc;
+ /// \brief first argument (Expr *) contains optional argument of the
+ /// 'ordered' clause, the second one is true if the regions has 'ordered'
+ /// clause, false otherwise.
+ llvm::PointerIntPair<Expr *, 1, bool> OrderedRegion;
+ bool NowaitRegion;
+ bool CancelRegion;
+ unsigned AssociatedLoops;
+ SourceLocation InnerTeamsRegionLoc;
+ SharingMapTy(OpenMPDirectiveKind DKind, DeclarationNameInfo Name,
+ Scope *CurScope, SourceLocation Loc)
+ : SharingMap(), AlignedMap(), LCVMap(), DefaultAttr(DSA_unspecified),
+ Directive(DKind), DirectiveName(std::move(Name)), CurScope(CurScope),
+ ConstructLoc(Loc), OrderedRegion(), NowaitRegion(false),
+ CancelRegion(false), AssociatedLoops(1), InnerTeamsRegionLoc() {}
+ SharingMapTy()
+ : SharingMap(), AlignedMap(), LCVMap(), DefaultAttr(DSA_unspecified),
+ Directive(OMPD_unknown), DirectiveName(), CurScope(nullptr),
+ ConstructLoc(), OrderedRegion(), NowaitRegion(false),
+ CancelRegion(false), AssociatedLoops(1), InnerTeamsRegionLoc() {}
+ };
+
+ typedef SmallVector<SharingMapTy, 64> StackTy;
+
+ /// \brief Stack of used declaration and their data-sharing attributes.
+ StackTy Stack;
+ /// \brief true, if check for DSA must be from parent directive, false, if
+ /// from current directive.
+ OpenMPClauseKind ClauseKindMode;
+ Sema &SemaRef;
+ bool ForceCapturing;
+ CriticalsWithHintsTy Criticals;
+
+ typedef SmallVector<SharingMapTy, 8>::reverse_iterator reverse_iterator;
+
+ DSAVarData getDSA(StackTy::reverse_iterator Iter, VarDecl *D);
+
+ /// \brief Checks if the variable is a local for OpenMP region.
+ bool isOpenMPLocal(VarDecl *D, StackTy::reverse_iterator Iter);
+
+public:
+ explicit DSAStackTy(Sema &S)
+ : Stack(1), ClauseKindMode(OMPC_unknown), SemaRef(S),
+ ForceCapturing(false) {}
+
+ bool isClauseParsingMode() const { return ClauseKindMode != OMPC_unknown; }
+ void setClauseParsingMode(OpenMPClauseKind K) { ClauseKindMode = K; }
+
+ bool isForceVarCapturing() const { return ForceCapturing; }
+ void setForceVarCapturing(bool V) { ForceCapturing = V; }
+
+ void push(OpenMPDirectiveKind DKind, const DeclarationNameInfo &DirName,
+ Scope *CurScope, SourceLocation Loc) {
+ Stack.push_back(SharingMapTy(DKind, DirName, CurScope, Loc));
+ Stack.back().DefaultAttrLoc = Loc;
+ }
+
+ void pop() {
+ assert(Stack.size() > 1 && "Data-sharing attributes stack is empty!");
+ Stack.pop_back();
+ }
+
+ void addCriticalWithHint(OMPCriticalDirective *D, llvm::APSInt Hint) {
+ Criticals[D->getDirectiveName().getAsString()] = std::make_pair(D, Hint);
+ }
+ const std::pair<OMPCriticalDirective *, llvm::APSInt>
+ getCriticalWithHint(const DeclarationNameInfo &Name) const {
+ auto I = Criticals.find(Name.getAsString());
+ if (I != Criticals.end())
+ return I->second;
+ return std::make_pair(nullptr, llvm::APSInt());
+ }
+ /// \brief If 'aligned' declaration for given variable \a D was not seen yet,
+ /// add it and return NULL; otherwise return previous occurrence's expression
+ /// for diagnostics.
+ DeclRefExpr *addUniqueAligned(VarDecl *D, DeclRefExpr *NewDE);
+
+ /// \brief Register specified variable as loop control variable.
+ void addLoopControlVariable(VarDecl *D);
+ /// \brief Check if the specified variable is a loop control variable for
+ /// current region.
+ /// \return The index of the loop control variable in the list of associated
+ /// for-loops (from outer to inner).
+ unsigned isLoopControlVariable(VarDecl *D);
+ /// \brief Check if the specified variable is a loop control variable for
+ /// parent region.
+ /// \return The index of the loop control variable in the list of associated
+ /// for-loops (from outer to inner).
+ unsigned isParentLoopControlVariable(VarDecl *D);
+ /// \brief Get the loop control variable for the I-th loop (or nullptr) in
+ /// parent directive.
+ VarDecl *getParentLoopControlVariable(unsigned I);
+
+ /// \brief Adds explicit data sharing attribute to the specified declaration.
+ void addDSA(VarDecl *D, DeclRefExpr *E, OpenMPClauseKind A);
+
+ /// \brief Returns data sharing attributes from top of the stack for the
+ /// specified declaration.
+ DSAVarData getTopDSA(VarDecl *D, bool FromParent);
+ /// \brief Returns data-sharing attributes for the specified declaration.
+ DSAVarData getImplicitDSA(VarDecl *D, bool FromParent);
+ /// \brief Checks if the specified variables has data-sharing attributes which
+ /// match specified \a CPred predicate in any directive which matches \a DPred
+ /// predicate.
+ template <class ClausesPredicate, class DirectivesPredicate>
+ DSAVarData hasDSA(VarDecl *D, ClausesPredicate CPred,
+ DirectivesPredicate DPred, bool FromParent);
+ /// \brief Checks if the specified variables has data-sharing attributes which
+ /// match specified \a CPred predicate in any innermost directive which
+ /// matches \a DPred predicate.
+ template <class ClausesPredicate, class DirectivesPredicate>
+ DSAVarData hasInnermostDSA(VarDecl *D, ClausesPredicate CPred,
+ DirectivesPredicate DPred,
+ bool FromParent);
+ /// \brief Checks if the specified variables has explicit data-sharing
+ /// attributes which match specified \a CPred predicate at the specified
+ /// OpenMP region.
+ bool hasExplicitDSA(VarDecl *D,
+ const llvm::function_ref<bool(OpenMPClauseKind)> &CPred,
+ unsigned Level);
+
+ /// \brief Returns true if the directive at level \Level matches in the
+ /// specified \a DPred predicate.
+ bool hasExplicitDirective(
+ const llvm::function_ref<bool(OpenMPDirectiveKind)> &DPred,
+ unsigned Level);
+
+ /// \brief Finds a directive which matches specified \a DPred predicate.
+ template <class NamedDirectivesPredicate>
+ bool hasDirective(NamedDirectivesPredicate DPred, bool FromParent);
+
+ /// \brief Returns currently analyzed directive.
+ OpenMPDirectiveKind getCurrentDirective() const {
+ return Stack.back().Directive;
+ }
+ /// \brief Returns parent directive.
+ OpenMPDirectiveKind getParentDirective() const {
+ if (Stack.size() > 2)
+ return Stack[Stack.size() - 2].Directive;
+ return OMPD_unknown;
+ }
+ /// \brief Return the directive associated with the provided scope.
+ OpenMPDirectiveKind getDirectiveForScope(const Scope *S) const;
+
+ /// \brief Set default data sharing attribute to none.
+ void setDefaultDSANone(SourceLocation Loc) {
+ Stack.back().DefaultAttr = DSA_none;
+ Stack.back().DefaultAttrLoc = Loc;
+ }
+ /// \brief Set default data sharing attribute to shared.
+ void setDefaultDSAShared(SourceLocation Loc) {
+ Stack.back().DefaultAttr = DSA_shared;
+ Stack.back().DefaultAttrLoc = Loc;
+ }
+
+ DefaultDataSharingAttributes getDefaultDSA() const {
+ return Stack.back().DefaultAttr;
+ }
+ SourceLocation getDefaultDSALocation() const {
+ return Stack.back().DefaultAttrLoc;
+ }
+
+ /// \brief Checks if the specified variable is a threadprivate.
+ bool isThreadPrivate(VarDecl *D) {
+ DSAVarData DVar = getTopDSA(D, false);
+ return isOpenMPThreadPrivate(DVar.CKind);
+ }
+
+ /// \brief Marks current region as ordered (it has an 'ordered' clause).
+ void setOrderedRegion(bool IsOrdered, Expr *Param) {
+ Stack.back().OrderedRegion.setInt(IsOrdered);
+ Stack.back().OrderedRegion.setPointer(Param);
+ }
+ /// \brief Returns true, if parent region is ordered (has associated
+ /// 'ordered' clause), false - otherwise.
+ bool isParentOrderedRegion() const {
+ if (Stack.size() > 2)
+ return Stack[Stack.size() - 2].OrderedRegion.getInt();
+ return false;
+ }
+ /// \brief Returns optional parameter for the ordered region.
+ Expr *getParentOrderedRegionParam() const {
+ if (Stack.size() > 2)
+ return Stack[Stack.size() - 2].OrderedRegion.getPointer();
+ return nullptr;
+ }
+ /// \brief Marks current region as nowait (it has a 'nowait' clause).
+ void setNowaitRegion(bool IsNowait = true) {
+ Stack.back().NowaitRegion = IsNowait;
+ }
+ /// \brief Returns true, if parent region is nowait (has associated
+ /// 'nowait' clause), false - otherwise.
+ bool isParentNowaitRegion() const {
+ if (Stack.size() > 2)
+ return Stack[Stack.size() - 2].NowaitRegion;
+ return false;
+ }
+ /// \brief Marks parent region as cancel region.
+ void setParentCancelRegion(bool Cancel = true) {
+ if (Stack.size() > 2)
+ Stack[Stack.size() - 2].CancelRegion =
+ Stack[Stack.size() - 2].CancelRegion || Cancel;
+ }
+ /// \brief Return true if current region has inner cancel construct.
+ bool isCancelRegion() const {
+ return Stack.back().CancelRegion;
+ }
+
+ /// \brief Set collapse value for the region.
+ void setAssociatedLoops(unsigned Val) { Stack.back().AssociatedLoops = Val; }
+ /// \brief Return collapse value for region.
+ unsigned getAssociatedLoops() const { return Stack.back().AssociatedLoops; }
+
+ /// \brief Marks current target region as one with closely nested teams
+ /// region.
+ void setParentTeamsRegionLoc(SourceLocation TeamsRegionLoc) {
+ if (Stack.size() > 2)
+ Stack[Stack.size() - 2].InnerTeamsRegionLoc = TeamsRegionLoc;
+ }
+ /// \brief Returns true, if current region has closely nested teams region.
+ bool hasInnerTeamsRegion() const {
+ return getInnerTeamsRegionLoc().isValid();
+ }
+ /// \brief Returns location of the nested teams region (if any).
+ SourceLocation getInnerTeamsRegionLoc() const {
+ if (Stack.size() > 1)
+ return Stack.back().InnerTeamsRegionLoc;
+ return SourceLocation();
+ }
+
+ Scope *getCurScope() const { return Stack.back().CurScope; }
+ Scope *getCurScope() { return Stack.back().CurScope; }
+ SourceLocation getConstructLoc() { return Stack.back().ConstructLoc; }
+
+ MapInfo getMapInfoForVar(VarDecl *VD) {
+ MapInfo VarMI = {0};
+ for (auto Cnt = Stack.size() - 1; Cnt > 0; --Cnt) {
+ if (Stack[Cnt].MappedDecls.count(VD)) {
+ VarMI = Stack[Cnt].MappedDecls[VD];
+ break;
+ }
+ }
+ return VarMI;
+ }
+
+ void addMapInfoForVar(VarDecl *VD, MapInfo MI) {
+ if (Stack.size() > 1) {
+ Stack.back().MappedDecls[VD] = MI;
+ }
+ }
+
+ MapInfo IsMappedInCurrentRegion(VarDecl *VD) {
+ assert(Stack.size() > 1 && "Target level is 0");
+ MapInfo VarMI = {0};
+ if (Stack.size() > 1 && Stack.back().MappedDecls.count(VD)) {
+ VarMI = Stack.back().MappedDecls[VD];
+ }
+ return VarMI;
+ }
+};
+bool isParallelOrTaskRegion(OpenMPDirectiveKind DKind) {
+ return isOpenMPParallelDirective(DKind) || DKind == OMPD_task ||
+ isOpenMPTeamsDirective(DKind) || DKind == OMPD_unknown ||
+ isOpenMPTaskLoopDirective(DKind);
+}
+} // namespace
+
+DSAStackTy::DSAVarData DSAStackTy::getDSA(StackTy::reverse_iterator Iter,
+ VarDecl *D) {
+ D = D->getCanonicalDecl();
+ DSAVarData DVar;
+ if (Iter == std::prev(Stack.rend())) {
+ // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
+ // in a region but not in construct]
+ // File-scope or namespace-scope variables referenced in called routines
+ // in the region are shared unless they appear in a threadprivate
+ // directive.
+ if (!D->isFunctionOrMethodVarDecl() && !isa<ParmVarDecl>(D))
+ DVar.CKind = OMPC_shared;
+
+ // OpenMP [2.9.1.2, Data-sharing Attribute Rules for Variables Referenced
+ // in a region but not in construct]
+ // Variables with static storage duration that are declared in called
+ // routines in the region are shared.
+ if (D->hasGlobalStorage())
+ DVar.CKind = OMPC_shared;
+
+ return DVar;
+ }
+
+ DVar.DKind = Iter->Directive;
+ // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
+ // in a Construct, C/C++, predetermined, p.1]
+ // Variables with automatic storage duration that are declared in a scope
+ // inside the construct are private.
+ if (isOpenMPLocal(D, Iter) && D->isLocalVarDecl() &&
+ (D->getStorageClass() == SC_Auto || D->getStorageClass() == SC_None)) {
+ DVar.CKind = OMPC_private;
+ return DVar;
+ }
+
+ // Explicitly specified attributes and local variables with predetermined
+ // attributes.
+ if (Iter->SharingMap.count(D)) {
+ DVar.RefExpr = Iter->SharingMap[D].RefExpr;
+ DVar.CKind = Iter->SharingMap[D].Attributes;
+ DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
+ return DVar;
+ }
+
+ // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
+ // in a Construct, C/C++, implicitly determined, p.1]
+ // In a parallel or task construct, the data-sharing attributes of these
+ // variables are determined by the default clause, if present.
+ switch (Iter->DefaultAttr) {
+ case DSA_shared:
+ DVar.CKind = OMPC_shared;
+ DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
+ return DVar;
+ case DSA_none:
+ return DVar;
+ case DSA_unspecified:
+ // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
+ // in a Construct, implicitly determined, p.2]
+ // In a parallel construct, if no default clause is present, these
+ // variables are shared.
+ DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
+ if (isOpenMPParallelDirective(DVar.DKind) ||
+ isOpenMPTeamsDirective(DVar.DKind)) {
+ DVar.CKind = OMPC_shared;
+ return DVar;
+ }
+
+ // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
+ // in a Construct, implicitly determined, p.4]
+ // In a task construct, if no default clause is present, a variable that in
+ // the enclosing context is determined to be shared by all implicit tasks
+ // bound to the current team is shared.
+ if (DVar.DKind == OMPD_task) {
+ DSAVarData DVarTemp;
+ for (StackTy::reverse_iterator I = std::next(Iter), EE = Stack.rend();
+ I != EE; ++I) {
+ // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables
+ // Referenced
+ // in a Construct, implicitly determined, p.6]
+ // In a task construct, if no default clause is present, a variable
+ // whose data-sharing attribute is not determined by the rules above is
+ // firstprivate.
+ DVarTemp = getDSA(I, D);
+ if (DVarTemp.CKind != OMPC_shared) {
+ DVar.RefExpr = nullptr;
+ DVar.DKind = OMPD_task;
+ DVar.CKind = OMPC_firstprivate;
+ return DVar;
+ }
+ if (isParallelOrTaskRegion(I->Directive))
+ break;
+ }
+ DVar.DKind = OMPD_task;
+ DVar.CKind =
+ (DVarTemp.CKind == OMPC_unknown) ? OMPC_firstprivate : OMPC_shared;
+ return DVar;
+ }
+ }
+ // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
+ // in a Construct, implicitly determined, p.3]
+ // For constructs other than task, if no default clause is present, these
+ // variables inherit their data-sharing attributes from the enclosing
+ // context.
+ return getDSA(std::next(Iter), D);
+}
+
+DeclRefExpr *DSAStackTy::addUniqueAligned(VarDecl *D, DeclRefExpr *NewDE) {
+ assert(Stack.size() > 1 && "Data sharing attributes stack is empty");
+ D = D->getCanonicalDecl();
+ auto It = Stack.back().AlignedMap.find(D);
+ if (It == Stack.back().AlignedMap.end()) {
+ assert(NewDE && "Unexpected nullptr expr to be added into aligned map");
+ Stack.back().AlignedMap[D] = NewDE;
+ return nullptr;
+ } else {
+ assert(It->second && "Unexpected nullptr expr in the aligned map");
+ return It->second;
+ }
+ return nullptr;
+}
+
+void DSAStackTy::addLoopControlVariable(VarDecl *D) {
+ assert(Stack.size() > 1 && "Data-sharing attributes stack is empty");
+ D = D->getCanonicalDecl();
+ Stack.back().LCVMap.insert(std::make_pair(D, Stack.back().LCVMap.size() + 1));
+}
+
+unsigned DSAStackTy::isLoopControlVariable(VarDecl *D) {
+ assert(Stack.size() > 1 && "Data-sharing attributes stack is empty");
+ D = D->getCanonicalDecl();
+ return Stack.back().LCVMap.count(D) > 0 ? Stack.back().LCVMap[D] : 0;
+}
+
+unsigned DSAStackTy::isParentLoopControlVariable(VarDecl *D) {
+ assert(Stack.size() > 2 && "Data-sharing attributes stack is empty");
+ D = D->getCanonicalDecl();
+ return Stack[Stack.size() - 2].LCVMap.count(D) > 0
+ ? Stack[Stack.size() - 2].LCVMap[D]
+ : 0;
+}
+
+VarDecl *DSAStackTy::getParentLoopControlVariable(unsigned I) {
+ assert(Stack.size() > 2 && "Data-sharing attributes stack is empty");
+ if (Stack[Stack.size() - 2].LCVMap.size() < I)
+ return nullptr;
+ for (auto &Pair : Stack[Stack.size() - 2].LCVMap) {
+ if (Pair.second == I)
+ return Pair.first;
+ }
+ return nullptr;
+}
+
+void DSAStackTy::addDSA(VarDecl *D, DeclRefExpr *E, OpenMPClauseKind A) {
+ D = D->getCanonicalDecl();
+ if (A == OMPC_threadprivate) {
+ Stack[0].SharingMap[D].Attributes = A;
+ Stack[0].SharingMap[D].RefExpr = E;
+ } else {
+ assert(Stack.size() > 1 && "Data-sharing attributes stack is empty");
+ Stack.back().SharingMap[D].Attributes = A;
+ Stack.back().SharingMap[D].RefExpr = E;
+ }
+}
+
+bool DSAStackTy::isOpenMPLocal(VarDecl *D, StackTy::reverse_iterator Iter) {
+ D = D->getCanonicalDecl();
+ if (Stack.size() > 2) {
+ reverse_iterator I = Iter, E = std::prev(Stack.rend());
+ Scope *TopScope = nullptr;
+ while (I != E && !isParallelOrTaskRegion(I->Directive)) {
+ ++I;
+ }
+ if (I == E)
+ return false;
+ TopScope = I->CurScope ? I->CurScope->getParent() : nullptr;
+ Scope *CurScope = getCurScope();
+ while (CurScope != TopScope && !CurScope->isDeclScope(D)) {
+ CurScope = CurScope->getParent();
+ }
+ return CurScope != TopScope;
+ }
+ return false;
+}
+
+/// \brief Build a variable declaration for OpenMP loop iteration variable.
+static VarDecl *buildVarDecl(Sema &SemaRef, SourceLocation Loc, QualType Type,
+ StringRef Name, const AttrVec *Attrs = nullptr) {
+ DeclContext *DC = SemaRef.CurContext;
+ IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name);
+ TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc);
+ VarDecl *Decl =
+ VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type, TInfo, SC_None);
+ if (Attrs) {
+ for (specific_attr_iterator<AlignedAttr> I(Attrs->begin()), E(Attrs->end());
+ I != E; ++I)
+ Decl->addAttr(*I);
+ }
+ Decl->setImplicit();
+ return Decl;
+}
+
+static DeclRefExpr *buildDeclRefExpr(Sema &S, VarDecl *D, QualType Ty,
+ SourceLocation Loc,
+ bool RefersToCapture = false) {
+ D->setReferenced();
+ D->markUsed(S.Context);
+ return DeclRefExpr::Create(S.getASTContext(), NestedNameSpecifierLoc(),
+ SourceLocation(), D, RefersToCapture, Loc, Ty,
+ VK_LValue);
+}
+
+DSAStackTy::DSAVarData DSAStackTy::getTopDSA(VarDecl *D, bool FromParent) {
+ D = D->getCanonicalDecl();
+ DSAVarData DVar;
+
+ // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
+ // in a Construct, C/C++, predetermined, p.1]
+ // Variables appearing in threadprivate directives are threadprivate.
+ if ((D->getTLSKind() != VarDecl::TLS_None &&
+ !(D->hasAttr<OMPThreadPrivateDeclAttr>() &&
+ SemaRef.getLangOpts().OpenMPUseTLS &&
+ SemaRef.getASTContext().getTargetInfo().isTLSSupported())) ||
+ (D->getStorageClass() == SC_Register && D->hasAttr<AsmLabelAttr>() &&
+ !D->isLocalVarDecl())) {
+ addDSA(D, buildDeclRefExpr(SemaRef, D, D->getType().getNonReferenceType(),
+ D->getLocation()),
+ OMPC_threadprivate);
+ }
+ if (Stack[0].SharingMap.count(D)) {
+ DVar.RefExpr = Stack[0].SharingMap[D].RefExpr;
+ DVar.CKind = OMPC_threadprivate;
+ return DVar;
+ }
+
+ // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
+ // in a Construct, C/C++, predetermined, p.4]
+ // Static data members are shared.
+ // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
+ // in a Construct, C/C++, predetermined, p.7]
+ // Variables with static storage duration that are declared in a scope
+ // inside the construct are shared.
+ if (D->isStaticDataMember()) {
+ DSAVarData DVarTemp =
+ hasDSA(D, isOpenMPPrivate, MatchesAlways(), FromParent);
+ if (DVarTemp.CKind != OMPC_unknown && DVarTemp.RefExpr)
+ return DVar;
+
+ DVar.CKind = OMPC_shared;
+ return DVar;
+ }
+
+ QualType Type = D->getType().getNonReferenceType().getCanonicalType();
+ bool IsConstant = Type.isConstant(SemaRef.getASTContext());
+ Type = SemaRef.getASTContext().getBaseElementType(Type);
+ // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
+ // in a Construct, C/C++, predetermined, p.6]
+ // Variables with const qualified type having no mutable member are
+ // shared.
+ CXXRecordDecl *RD =
+ SemaRef.getLangOpts().CPlusPlus ? Type->getAsCXXRecordDecl() : nullptr;
+ if (auto *CTSD = dyn_cast_or_null<ClassTemplateSpecializationDecl>(RD))
+ if (auto *CTD = CTSD->getSpecializedTemplate())
+ RD = CTD->getTemplatedDecl();
+ if (IsConstant &&
+ !(SemaRef.getLangOpts().CPlusPlus && RD && RD->hasMutableFields())) {
+ // Variables with const-qualified type having no mutable member may be
+ // listed in a firstprivate clause, even if they are static data members.
+ DSAVarData DVarTemp = hasDSA(D, MatchesAnyClause(OMPC_firstprivate),
+ MatchesAlways(), FromParent);
+ if (DVarTemp.CKind == OMPC_firstprivate && DVarTemp.RefExpr)
+ return DVar;
+
+ DVar.CKind = OMPC_shared;
+ return DVar;
+ }
+
+ // Explicitly specified attributes and local variables with predetermined
+ // attributes.
+ auto StartI = std::next(Stack.rbegin());
+ auto EndI = std::prev(Stack.rend());
+ if (FromParent && StartI != EndI) {
+ StartI = std::next(StartI);
+ }
+ auto I = std::prev(StartI);
+ if (I->SharingMap.count(D)) {
+ DVar.RefExpr = I->SharingMap[D].RefExpr;
+ DVar.CKind = I->SharingMap[D].Attributes;
+ DVar.ImplicitDSALoc = I->DefaultAttrLoc;
+ }
+
+ return DVar;
+}
+
+DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(VarDecl *D, bool FromParent) {
+ D = D->getCanonicalDecl();
+ auto StartI = Stack.rbegin();
+ auto EndI = std::prev(Stack.rend());
+ if (FromParent && StartI != EndI) {
+ StartI = std::next(StartI);
+ }
+ return getDSA(StartI, D);
+}
+
+template <class ClausesPredicate, class DirectivesPredicate>
+DSAStackTy::DSAVarData DSAStackTy::hasDSA(VarDecl *D, ClausesPredicate CPred,
+ DirectivesPredicate DPred,
+ bool FromParent) {
+ D = D->getCanonicalDecl();
+ auto StartI = std::next(Stack.rbegin());
+ auto EndI = std::prev(Stack.rend());
+ if (FromParent && StartI != EndI) {
+ StartI = std::next(StartI);
+ }
+ for (auto I = StartI, EE = EndI; I != EE; ++I) {
+ if (!DPred(I->Directive) && !isParallelOrTaskRegion(I->Directive))
+ continue;
+ DSAVarData DVar = getDSA(I, D);
+ if (CPred(DVar.CKind))
+ return DVar;
+ }
+ return DSAVarData();
+}
+
+template <class ClausesPredicate, class DirectivesPredicate>
+DSAStackTy::DSAVarData
+DSAStackTy::hasInnermostDSA(VarDecl *D, ClausesPredicate CPred,
+ DirectivesPredicate DPred, bool FromParent) {
+ D = D->getCanonicalDecl();
+ auto StartI = std::next(Stack.rbegin());
+ auto EndI = std::prev(Stack.rend());
+ if (FromParent && StartI != EndI) {
+ StartI = std::next(StartI);
+ }
+ for (auto I = StartI, EE = EndI; I != EE; ++I) {
+ if (!DPred(I->Directive))
+ break;
+ DSAVarData DVar = getDSA(I, D);
+ if (CPred(DVar.CKind))
+ return DVar;
+ return DSAVarData();
+ }
+ return DSAVarData();
+}
+
+bool DSAStackTy::hasExplicitDSA(
+ VarDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> &CPred,
+ unsigned Level) {
+ if (CPred(ClauseKindMode))
+ return true;
+ if (isClauseParsingMode())
+ ++Level;
+ D = D->getCanonicalDecl();
+ auto StartI = Stack.rbegin();
+ auto EndI = std::prev(Stack.rend());
+ if (std::distance(StartI, EndI) <= (int)Level)
+ return false;
+ std::advance(StartI, Level);
+ return (StartI->SharingMap.count(D) > 0) && StartI->SharingMap[D].RefExpr &&
+ CPred(StartI->SharingMap[D].Attributes);
+}
+
+bool DSAStackTy::hasExplicitDirective(
+ const llvm::function_ref<bool(OpenMPDirectiveKind)> &DPred,
+ unsigned Level) {
+ if (isClauseParsingMode())
+ ++Level;
+ auto StartI = Stack.rbegin();
+ auto EndI = std::prev(Stack.rend());
+ if (std::distance(StartI, EndI) <= (int)Level)
+ return false;
+ std::advance(StartI, Level);
+ return DPred(StartI->Directive);
+}
+
+template <class NamedDirectivesPredicate>
+bool DSAStackTy::hasDirective(NamedDirectivesPredicate DPred, bool FromParent) {
+ auto StartI = std::next(Stack.rbegin());
+ auto EndI = std::prev(Stack.rend());
+ if (FromParent && StartI != EndI) {
+ StartI = std::next(StartI);
+ }
+ for (auto I = StartI, EE = EndI; I != EE; ++I) {
+ if (DPred(I->Directive, I->DirectiveName, I->ConstructLoc))
+ return true;
+ }
+ return false;
+}
+
+OpenMPDirectiveKind DSAStackTy::getDirectiveForScope(const Scope *S) const {
+ for (auto I = Stack.rbegin(), EE = Stack.rend(); I != EE; ++I)
+ if (I->CurScope == S)
+ return I->Directive;
+ return OMPD_unknown;
+}
+
+void Sema::InitDataSharingAttributesStack() {
+ VarDataSharingAttributesStack = new DSAStackTy(*this);
+}
+
+#define DSAStack static_cast<DSAStackTy *>(VarDataSharingAttributesStack)
+
+bool Sema::IsOpenMPCapturedByRef(VarDecl *VD,
+ const CapturedRegionScopeInfo *RSI) {
+ assert(LangOpts.OpenMP && "OpenMP is not allowed");
+
+ auto &Ctx = getASTContext();
+ bool IsByRef = true;
+
+ // Find the directive that is associated with the provided scope.
+ auto DKind = DSAStack->getDirectiveForScope(RSI->TheScope);
+ auto Ty = VD->getType();
+
+ if (isOpenMPTargetDirective(DKind)) {
+ // This table summarizes how a given variable should be passed to the device
+ // given its type and the clauses where it appears. This table is based on
+ // the description in OpenMP 4.5 [2.10.4, target Construct] and
+ // OpenMP 4.5 [2.15.5, Data-mapping Attribute Rules and Clauses].
+ //
+ // =========================================================================
+ // | type | defaultmap | pvt | first | is_device_ptr | map | res. |
+ // | |(tofrom:scalar)| | pvt | | | |
+ // =========================================================================
+ // | scl | | | | - | | bycopy|
+ // | scl | | - | x | - | - | bycopy|
+ // | scl | | x | - | - | - | null |
+ // | scl | x | | | - | | byref |
+ // | scl | x | - | x | - | - | bycopy|
+ // | scl | x | x | - | - | - | null |
+ // | scl | | - | - | - | x | byref |
+ // | scl | x | - | - | - | x | byref |
+ //
+ // | agg | n.a. | | | - | | byref |
+ // | agg | n.a. | - | x | - | - | byref |
+ // | agg | n.a. | x | - | - | - | null |
+ // | agg | n.a. | - | - | - | x | byref |
+ // | agg | n.a. | - | - | - | x[] | byref |
+ //
+ // | ptr | n.a. | | | - | | bycopy|
+ // | ptr | n.a. | - | x | - | - | bycopy|
+ // | ptr | n.a. | x | - | - | - | null |
+ // | ptr | n.a. | - | - | - | x | byref |
+ // | ptr | n.a. | - | - | - | x[] | bycopy|
+ // | ptr | n.a. | - | - | x | | bycopy|
+ // | ptr | n.a. | - | - | x | x | bycopy|
+ // | ptr | n.a. | - | - | x | x[] | bycopy|
+ // =========================================================================
+ // Legend:
+ // scl - scalar
+ // ptr - pointer
+ // agg - aggregate
+ // x - applies
+ // - - invalid in this combination
+ // [] - mapped with an array section
+ // byref - should be mapped by reference
+ // byval - should be mapped by value
+ // null - initialize a local variable to null on the device
+ //
+ // Observations:
+ // - All scalar declarations that show up in a map clause have to be passed
+ // by reference, because they may have been mapped in the enclosing data
+ // environment.
+ // - If the scalar value does not fit the size of uintptr, it has to be
+ // passed by reference, regardless the result in the table above.
+ // - For pointers mapped by value that have either an implicit map or an
+ // array section, the runtime library may pass the NULL value to the
+ // device instead of the value passed to it by the compiler.
+
+ // FIXME: Right now, only implicit maps are implemented. Properly mapping
+ // values requires having the map, private, and firstprivate clauses SEMA
+ // and parsing in place, which we don't yet.
+
+ if (Ty->isReferenceType())
+ Ty = Ty->castAs<ReferenceType>()->getPointeeType();
+ IsByRef = !Ty->isScalarType();
+ }
+
+ // When passing data by value, we need to make sure it fits the uintptr size
+ // and alignment, because the runtime library only deals with uintptr types.
+ // If it does not fit the uintptr size, we need to pass the data by reference
+ // instead.
+ if (!IsByRef &&
+ (Ctx.getTypeSizeInChars(Ty) >
+ Ctx.getTypeSizeInChars(Ctx.getUIntPtrType()) ||
+ Ctx.getDeclAlign(VD) > Ctx.getTypeAlignInChars(Ctx.getUIntPtrType())))
+ IsByRef = true;
+
+ return IsByRef;
+}
+
+bool Sema::IsOpenMPCapturedVar(VarDecl *VD) {
+ assert(LangOpts.OpenMP && "OpenMP is not allowed");
+ VD = VD->getCanonicalDecl();
+
+ // If we are attempting to capture a global variable in a directive with
+ // 'target' we return true so that this global is also mapped to the device.
+ //
+ // FIXME: If the declaration is enclosed in a 'declare target' directive,
+ // then it should not be captured. Therefore, an extra check has to be
+ // inserted here once support for 'declare target' is added.
+ //
+ if (!VD->hasLocalStorage()) {
+ if (DSAStack->getCurrentDirective() == OMPD_target &&
+ !DSAStack->isClauseParsingMode()) {
+ return true;
+ }
+ if (DSAStack->getCurScope() &&
+ DSAStack->hasDirective(
+ [](OpenMPDirectiveKind K, const DeclarationNameInfo &DNI,
+ SourceLocation Loc) -> bool {
+ return isOpenMPTargetDirective(K);
+ },
+ false)) {
+ return true;
+ }
+ }
+
+ if (DSAStack->getCurrentDirective() != OMPD_unknown &&
+ (!DSAStack->isClauseParsingMode() ||
+ DSAStack->getParentDirective() != OMPD_unknown)) {
+ if (DSAStack->isLoopControlVariable(VD) ||
+ (VD->hasLocalStorage() &&
+ isParallelOrTaskRegion(DSAStack->getCurrentDirective())) ||
+ DSAStack->isForceVarCapturing())
+ return true;
+ auto DVarPrivate = DSAStack->getTopDSA(VD, DSAStack->isClauseParsingMode());
+ if (DVarPrivate.CKind != OMPC_unknown && isOpenMPPrivate(DVarPrivate.CKind))
+ return true;
+ DVarPrivate = DSAStack->hasDSA(VD, isOpenMPPrivate, MatchesAlways(),
+ DSAStack->isClauseParsingMode());
+ return DVarPrivate.CKind != OMPC_unknown;
+ }
+ return false;
+}
+
+bool Sema::isOpenMPPrivateVar(VarDecl *VD, unsigned Level) {
+ assert(LangOpts.OpenMP && "OpenMP is not allowed");
+ return DSAStack->hasExplicitDSA(
+ VD, [](OpenMPClauseKind K) -> bool { return K == OMPC_private; }, Level);
+}
+
+bool Sema::isOpenMPTargetCapturedVar(VarDecl *VD, unsigned Level) {
+ assert(LangOpts.OpenMP && "OpenMP is not allowed");
+ // Return true if the current level is no longer enclosed in a target region.
+
+ return !VD->hasLocalStorage() &&
+ DSAStack->hasExplicitDirective(isOpenMPTargetDirective, Level);
+}
+
+void Sema::DestroyDataSharingAttributesStack() { delete DSAStack; }
+
+void Sema::StartOpenMPDSABlock(OpenMPDirectiveKind DKind,
+ const DeclarationNameInfo &DirName,
+ Scope *CurScope, SourceLocation Loc) {
+ DSAStack->push(DKind, DirName, CurScope, Loc);
+ PushExpressionEvaluationContext(PotentiallyEvaluated);
+}
+
+void Sema::StartOpenMPClause(OpenMPClauseKind K) {
+ DSAStack->setClauseParsingMode(K);
+}
+
+void Sema::EndOpenMPClause() {
+ DSAStack->setClauseParsingMode(/*K=*/OMPC_unknown);
+}
+
+void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
+ // OpenMP [2.14.3.5, Restrictions, C/C++, p.1]
+ // A variable of class type (or array thereof) that appears in a lastprivate
+ // clause requires an accessible, unambiguous default constructor for the
+ // class type, unless the list item is also specified in a firstprivate
+ // clause.
+ if (auto D = dyn_cast_or_null<OMPExecutableDirective>(CurDirective)) {
+ for (auto *C : D->clauses()) {
+ if (auto *Clause = dyn_cast<OMPLastprivateClause>(C)) {
+ SmallVector<Expr *, 8> PrivateCopies;
+ for (auto *DE : Clause->varlists()) {
+ if (DE->isValueDependent() || DE->isTypeDependent()) {
+ PrivateCopies.push_back(nullptr);
+ continue;
+ }
+ auto *VD = cast<VarDecl>(cast<DeclRefExpr>(DE)->getDecl());
+ QualType Type = VD->getType().getNonReferenceType();
+ auto DVar = DSAStack->getTopDSA(VD, false);
+ if (DVar.CKind == OMPC_lastprivate) {
+ // Generate helper private variable and initialize it with the
+ // default value. The address of the original variable is replaced
+ // by the address of the new private variable in CodeGen. This new
+ // variable is not added to IdResolver, so the code in the OpenMP
+ // region uses original variable for proper diagnostics.
+ auto *VDPrivate = buildVarDecl(
+ *this, DE->getExprLoc(), Type.getUnqualifiedType(),
+ VD->getName(), VD->hasAttrs() ? &VD->getAttrs() : nullptr);
+ ActOnUninitializedDecl(VDPrivate, /*TypeMayContainAuto=*/false);
+ if (VDPrivate->isInvalidDecl())
+ continue;
+ PrivateCopies.push_back(buildDeclRefExpr(
+ *this, VDPrivate, DE->getType(), DE->getExprLoc()));
+ } else {
+ // The variable is also a firstprivate, so initialization sequence
+ // for private copy is generated already.
+ PrivateCopies.push_back(nullptr);
+ }
+ }
+ // Set initializers to private copies if no errors were found.
+ if (PrivateCopies.size() == Clause->varlist_size()) {
+ Clause->setPrivateCopies(PrivateCopies);
+ }
+ }
+ }
+ }
+
+ DSAStack->pop();
+ DiscardCleanupsInEvaluationContext();
+ PopExpressionEvaluationContext();
+}
+
+static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
+ Expr *NumIterations, Sema &SemaRef,
+ Scope *S);
+
+namespace {
+
+class VarDeclFilterCCC : public CorrectionCandidateCallback {
+private:
+ Sema &SemaRef;
+
+public:
+ explicit VarDeclFilterCCC(Sema &S) : SemaRef(S) {}
+ bool ValidateCandidate(const TypoCorrection &Candidate) override {
+ NamedDecl *ND = Candidate.getCorrectionDecl();
+ if (VarDecl *VD = dyn_cast_or_null<VarDecl>(ND)) {
+ return VD->hasGlobalStorage() &&
+ SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
+ SemaRef.getCurScope());
+ }
+ return false;
+ }
+};
+} // namespace
+
+ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope,
+ CXXScopeSpec &ScopeSpec,
+ const DeclarationNameInfo &Id) {
+ LookupResult Lookup(*this, Id, LookupOrdinaryName);
+ LookupParsedName(Lookup, CurScope, &ScopeSpec, true);
+
+ if (Lookup.isAmbiguous())
+ return ExprError();
+
+ VarDecl *VD;
+ if (!Lookup.isSingleResult()) {
+ if (TypoCorrection Corrected = CorrectTypo(
+ Id, LookupOrdinaryName, CurScope, nullptr,
+ llvm::make_unique<VarDeclFilterCCC>(*this), CTK_ErrorRecovery)) {
+ diagnoseTypo(Corrected,
+ PDiag(Lookup.empty()
+ ? diag::err_undeclared_var_use_suggest
+ : diag::err_omp_expected_var_arg_suggest)
+ << Id.getName());
+ VD = Corrected.getCorrectionDeclAs<VarDecl>();
+ } else {
+ Diag(Id.getLoc(), Lookup.empty() ? diag::err_undeclared_var_use
+ : diag::err_omp_expected_var_arg)
+ << Id.getName();
+ return ExprError();
+ }
+ } else {
+ if (!(VD = Lookup.getAsSingle<VarDecl>())) {
+ Diag(Id.getLoc(), diag::err_omp_expected_var_arg) << Id.getName();
+ Diag(Lookup.getFoundDecl()->getLocation(), diag::note_declared_at);
+ return ExprError();
+ }
+ }
+ Lookup.suppressDiagnostics();
+
+ // OpenMP [2.9.2, Syntax, C/C++]
+ // Variables must be file-scope, namespace-scope, or static block-scope.
+ if (!VD->hasGlobalStorage()) {
+ Diag(Id.getLoc(), diag::err_omp_global_var_arg)
+ << getOpenMPDirectiveName(OMPD_threadprivate) << !VD->isStaticLocal();
+ bool IsDecl =
+ VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
+ Diag(VD->getLocation(),
+ IsDecl ? diag::note_previous_decl : diag::note_defined_here)
+ << VD;
+ return ExprError();
+ }
+
+ VarDecl *CanonicalVD = VD->getCanonicalDecl();
+ NamedDecl *ND = cast<NamedDecl>(CanonicalVD);
+ // OpenMP [2.9.2, Restrictions, C/C++, p.2]
+ // A threadprivate directive for file-scope variables must appear outside
+ // any definition or declaration.
+ if (CanonicalVD->getDeclContext()->isTranslationUnit() &&
+ !getCurLexicalContext()->isTranslationUnit()) {
+ Diag(Id.getLoc(), diag::err_omp_var_scope)
+ << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
+ bool IsDecl =
+ VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
+ Diag(VD->getLocation(),
+ IsDecl ? diag::note_previous_decl : diag::note_defined_here)
+ << VD;
+ return ExprError();
+ }
+ // OpenMP [2.9.2, Restrictions, C/C++, p.3]
+ // A threadprivate directive for static class member variables must appear
+ // in the class definition, in the same scope in which the member
+ // variables are declared.
+ if (CanonicalVD->isStaticDataMember() &&
+ !CanonicalVD->getDeclContext()->Equals(getCurLexicalContext())) {
+ Diag(Id.getLoc(), diag::err_omp_var_scope)
+ << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
+ bool IsDecl =
+ VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
+ Diag(VD->getLocation(),
+ IsDecl ? diag::note_previous_decl : diag::note_defined_here)
+ << VD;
+ return ExprError();
+ }
+ // OpenMP [2.9.2, Restrictions, C/C++, p.4]
+ // A threadprivate directive for namespace-scope variables must appear
+ // outside any definition or declaration other than the namespace
+ // definition itself.
+ if (CanonicalVD->getDeclContext()->isNamespace() &&
+ (!getCurLexicalContext()->isFileContext() ||
+ !getCurLexicalContext()->Encloses(CanonicalVD->getDeclContext()))) {
+ Diag(Id.getLoc(), diag::err_omp_var_scope)
+ << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
+ bool IsDecl =
+ VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
+ Diag(VD->getLocation(),
+ IsDecl ? diag::note_previous_decl : diag::note_defined_here)
+ << VD;
+ return ExprError();
+ }
+ // OpenMP [2.9.2, Restrictions, C/C++, p.6]
+ // A threadprivate directive for static block-scope variables must appear
+ // in the scope of the variable and not in a nested scope.
+ if (CanonicalVD->isStaticLocal() && CurScope &&
+ !isDeclInScope(ND, getCurLexicalContext(), CurScope)) {
+ Diag(Id.getLoc(), diag::err_omp_var_scope)
+ << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
+ bool IsDecl =
+ VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
+ Diag(VD->getLocation(),
+ IsDecl ? diag::note_previous_decl : diag::note_defined_here)
+ << VD;
+ return ExprError();
+ }
+
+ // OpenMP [2.9.2, Restrictions, C/C++, p.2-6]
+ // A threadprivate directive must lexically precede all references to any
+ // of the variables in its list.
+ if (VD->isUsed() && !DSAStack->isThreadPrivate(VD)) {
+ Diag(Id.getLoc(), diag::err_omp_var_used)
+ << getOpenMPDirectiveName(OMPD_threadprivate) << VD;
+ return ExprError();
+ }
+
+ QualType ExprType = VD->getType().getNonReferenceType();
+ ExprResult DE = buildDeclRefExpr(*this, VD, ExprType, Id.getLoc());
+ return DE;
+}
+
+Sema::DeclGroupPtrTy
+Sema::ActOnOpenMPThreadprivateDirective(SourceLocation Loc,
+ ArrayRef<Expr *> VarList) {
+ if (OMPThreadPrivateDecl *D = CheckOMPThreadPrivateDecl(Loc, VarList)) {
+ CurContext->addDecl(D);
+ return DeclGroupPtrTy::make(DeclGroupRef(D));
+ }
+ return DeclGroupPtrTy();
+}
+
+namespace {
+class LocalVarRefChecker : public ConstStmtVisitor<LocalVarRefChecker, bool> {
+ Sema &SemaRef;
+
+public:
+ bool VisitDeclRefExpr(const DeclRefExpr *E) {
+ if (auto VD = dyn_cast<VarDecl>(E->getDecl())) {
+ if (VD->hasLocalStorage()) {
+ SemaRef.Diag(E->getLocStart(),
+ diag::err_omp_local_var_in_threadprivate_init)
+ << E->getSourceRange();
+ SemaRef.Diag(VD->getLocation(), diag::note_defined_here)
+ << VD << VD->getSourceRange();
+ return true;
+ }
+ }
+ return false;
+ }
+ bool VisitStmt(const Stmt *S) {
+ for (auto Child : S->children()) {
+ if (Child && Visit(Child))
+ return true;
+ }
+ return false;
+ }
+ explicit LocalVarRefChecker(Sema &SemaRef) : SemaRef(SemaRef) {}
+};
+} // namespace
+
+OMPThreadPrivateDecl *
+Sema::CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList) {
+ SmallVector<Expr *, 8> Vars;
+ for (auto &RefExpr : VarList) {
+ DeclRefExpr *DE = cast<DeclRefExpr>(RefExpr);
+ VarDecl *VD = cast<VarDecl>(DE->getDecl());
+ SourceLocation ILoc = DE->getExprLoc();
+
+ QualType QType = VD->getType();
+ if (QType->isDependentType() || QType->isInstantiationDependentType()) {
+ // It will be analyzed later.
+ Vars.push_back(DE);
+ continue;
+ }
+
+ // OpenMP [2.9.2, Restrictions, C/C++, p.10]
+ // A threadprivate variable must not have an incomplete type.
+ if (RequireCompleteType(ILoc, VD->getType(),
+ diag::err_omp_threadprivate_incomplete_type)) {
+ continue;
+ }
+
+ // OpenMP [2.9.2, Restrictions, C/C++, p.10]
+ // A threadprivate variable must not have a reference type.
+ if (VD->getType()->isReferenceType()) {
+ Diag(ILoc, diag::err_omp_ref_type_arg)
+ << getOpenMPDirectiveName(OMPD_threadprivate) << VD->getType();
+ bool IsDecl =
+ VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
+ Diag(VD->getLocation(),
+ IsDecl ? diag::note_previous_decl : diag::note_defined_here)
+ << VD;
+ continue;
+ }
+
+ // Check if this is a TLS variable. If TLS is not being supported, produce
+ // the corresponding diagnostic.
+ if ((VD->getTLSKind() != VarDecl::TLS_None &&
+ !(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
+ getLangOpts().OpenMPUseTLS &&
+ getASTContext().getTargetInfo().isTLSSupported())) ||
+ (VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() &&
+ !VD->isLocalVarDecl())) {
+ Diag(ILoc, diag::err_omp_var_thread_local)
+ << VD << ((VD->getTLSKind() != VarDecl::TLS_None) ? 0 : 1);
+ bool IsDecl =
+ VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
+ Diag(VD->getLocation(),
+ IsDecl ? diag::note_previous_decl : diag::note_defined_here)
+ << VD;
+ continue;
+ }
+
+ // Check if initial value of threadprivate variable reference variable with
+ // local storage (it is not supported by runtime).
+ if (auto Init = VD->getAnyInitializer()) {
+ LocalVarRefChecker Checker(*this);
+ if (Checker.Visit(Init))
+ continue;
+ }
+
+ Vars.push_back(RefExpr);
+ DSAStack->addDSA(VD, DE, OMPC_threadprivate);
+ VD->addAttr(OMPThreadPrivateDeclAttr::CreateImplicit(
+ Context, SourceRange(Loc, Loc)));
+ if (auto *ML = Context.getASTMutationListener())
+ ML->DeclarationMarkedOpenMPThreadPrivate(VD);
+ }
+ OMPThreadPrivateDecl *D = nullptr;
+ if (!Vars.empty()) {
+ D = OMPThreadPrivateDecl::Create(Context, getCurLexicalContext(), Loc,
+ Vars);
+ D->setAccess(AS_public);
+ }
+ return D;
+}
+
+static void ReportOriginalDSA(Sema &SemaRef, DSAStackTy *Stack,
+ const VarDecl *VD, DSAStackTy::DSAVarData DVar,
+ bool IsLoopIterVar = false) {
+ if (DVar.RefExpr) {
+ SemaRef.Diag(DVar.RefExpr->getExprLoc(), diag::note_omp_explicit_dsa)
+ << getOpenMPClauseName(DVar.CKind);
+ return;
+ }
+ enum {
+ PDSA_StaticMemberShared,
+ PDSA_StaticLocalVarShared,
+ PDSA_LoopIterVarPrivate,
+ PDSA_LoopIterVarLinear,
+ PDSA_LoopIterVarLastprivate,
+ PDSA_ConstVarShared,
+ PDSA_GlobalVarShared,
+ PDSA_TaskVarFirstprivate,
+ PDSA_LocalVarPrivate,
+ PDSA_Implicit
+ } Reason = PDSA_Implicit;
+ bool ReportHint = false;
+ auto ReportLoc = VD->getLocation();
+ if (IsLoopIterVar) {
+ if (DVar.CKind == OMPC_private)
+ Reason = PDSA_LoopIterVarPrivate;
+ else if (DVar.CKind == OMPC_lastprivate)
+ Reason = PDSA_LoopIterVarLastprivate;
+ else
+ Reason = PDSA_LoopIterVarLinear;
+ } else if (DVar.DKind == OMPD_task && DVar.CKind == OMPC_firstprivate) {
+ Reason = PDSA_TaskVarFirstprivate;
+ ReportLoc = DVar.ImplicitDSALoc;
+ } else if (VD->isStaticLocal())
+ Reason = PDSA_StaticLocalVarShared;
+ else if (VD->isStaticDataMember())
+ Reason = PDSA_StaticMemberShared;
+ else if (VD->isFileVarDecl())
+ Reason = PDSA_GlobalVarShared;
+ else if (VD->getType().isConstant(SemaRef.getASTContext()))
+ Reason = PDSA_ConstVarShared;
+ else if (VD->isLocalVarDecl() && DVar.CKind == OMPC_private) {
+ ReportHint = true;
+ Reason = PDSA_LocalVarPrivate;
+ }
+ if (Reason != PDSA_Implicit) {
+ SemaRef.Diag(ReportLoc, diag::note_omp_predetermined_dsa)
+ << Reason << ReportHint
+ << getOpenMPDirectiveName(Stack->getCurrentDirective());
+ } else if (DVar.ImplicitDSALoc.isValid()) {
+ SemaRef.Diag(DVar.ImplicitDSALoc, diag::note_omp_implicit_dsa)
+ << getOpenMPClauseName(DVar.CKind);
+ }
+}
+
+namespace {
+class DSAAttrChecker : public StmtVisitor<DSAAttrChecker, void> {
+ DSAStackTy *Stack;
+ Sema &SemaRef;
+ bool ErrorFound;
+ CapturedStmt *CS;
+ llvm::SmallVector<Expr *, 8> ImplicitFirstprivate;
+ llvm::DenseMap<VarDecl *, Expr *> VarsWithInheritedDSA;
+
+public:
+ void VisitDeclRefExpr(DeclRefExpr *E) {
+ if (auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
+ // Skip internally declared variables.
+ if (VD->isLocalVarDecl() && !CS->capturesVariable(VD))
+ return;
+
+ auto DVar = Stack->getTopDSA(VD, false);
+ // Check if the variable has explicit DSA set and stop analysis if it so.
+ if (DVar.RefExpr) return;
+
+ auto ELoc = E->getExprLoc();
+ auto DKind = Stack->getCurrentDirective();
+ // The default(none) clause requires that each variable that is referenced
+ // in the construct, and does not have a predetermined data-sharing
+ // attribute, must have its data-sharing attribute explicitly determined
+ // by being listed in a data-sharing attribute clause.
+ if (DVar.CKind == OMPC_unknown && Stack->getDefaultDSA() == DSA_none &&
+ isParallelOrTaskRegion(DKind) &&
+ VarsWithInheritedDSA.count(VD) == 0) {
+ VarsWithInheritedDSA[VD] = E;
+ return;
+ }
+
+ // OpenMP [2.9.3.6, Restrictions, p.2]
+ // A list item that appears in a reduction clause of the innermost
+ // enclosing worksharing or parallel construct may not be accessed in an
+ // explicit task.
+ DVar = Stack->hasInnermostDSA(VD, MatchesAnyClause(OMPC_reduction),
+ [](OpenMPDirectiveKind K) -> bool {
+ return isOpenMPParallelDirective(K) ||
+ isOpenMPWorksharingDirective(K) ||
+ isOpenMPTeamsDirective(K);
+ },
+ false);
+ if (DKind == OMPD_task && DVar.CKind == OMPC_reduction) {
+ ErrorFound = true;
+ SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
+ ReportOriginalDSA(SemaRef, Stack, VD, DVar);
+ return;
+ }
+
+ // Define implicit data-sharing attributes for task.
+ DVar = Stack->getImplicitDSA(VD, false);
+ if (DKind == OMPD_task && DVar.CKind != OMPC_shared)
+ ImplicitFirstprivate.push_back(E);
+ }
+ }
+ void VisitOMPExecutableDirective(OMPExecutableDirective *S) {
+ for (auto *C : S->clauses()) {
+ // Skip analysis of arguments of implicitly defined firstprivate clause
+ // for task directives.
+ if (C && (!isa<OMPFirstprivateClause>(C) || C->getLocStart().isValid()))
+ for (auto *CC : C->children()) {
+ if (CC)
+ Visit(CC);
+ }
+ }
+ }
+ void VisitStmt(Stmt *S) {
+ for (auto *C : S->children()) {
+ if (C && !isa<OMPExecutableDirective>(C))
+ Visit(C);
+ }
+ }
+
+ bool isErrorFound() { return ErrorFound; }
+ ArrayRef<Expr *> getImplicitFirstprivate() { return ImplicitFirstprivate; }
+ llvm::DenseMap<VarDecl *, Expr *> &getVarsWithInheritedDSA() {
+ return VarsWithInheritedDSA;
+ }
+
+ DSAAttrChecker(DSAStackTy *S, Sema &SemaRef, CapturedStmt *CS)
+ : Stack(S), SemaRef(SemaRef), ErrorFound(false), CS(CS) {}
+};
+} // namespace
+
+void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
+ switch (DKind) {
+ case OMPD_parallel: {
+ QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
+ QualType KmpInt32PtrTy =
+ Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
+ Sema::CapturedParamNameType Params[] = {
+ std::make_pair(".global_tid.", KmpInt32PtrTy),
+ std::make_pair(".bound_tid.", KmpInt32PtrTy),
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ break;
+ }
+ case OMPD_simd: {
+ Sema::CapturedParamNameType Params[] = {
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ break;
+ }
+ case OMPD_for: {
+ Sema::CapturedParamNameType Params[] = {
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ break;
+ }
+ case OMPD_for_simd: {
+ Sema::CapturedParamNameType Params[] = {
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ break;
+ }
+ case OMPD_sections: {
+ Sema::CapturedParamNameType Params[] = {
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ break;
+ }
+ case OMPD_section: {
+ Sema::CapturedParamNameType Params[] = {
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ break;
+ }
+ case OMPD_single: {
+ Sema::CapturedParamNameType Params[] = {
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ break;
+ }
+ case OMPD_master: {
+ Sema::CapturedParamNameType Params[] = {
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ break;
+ }
+ case OMPD_critical: {
+ Sema::CapturedParamNameType Params[] = {
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ break;
+ }
+ case OMPD_parallel_for: {
+ QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
+ QualType KmpInt32PtrTy =
+ Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
+ Sema::CapturedParamNameType Params[] = {
+ std::make_pair(".global_tid.", KmpInt32PtrTy),
+ std::make_pair(".bound_tid.", KmpInt32PtrTy),
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ break;
+ }
+ case OMPD_parallel_for_simd: {
+ QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
+ QualType KmpInt32PtrTy =
+ Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
+ Sema::CapturedParamNameType Params[] = {
+ std::make_pair(".global_tid.", KmpInt32PtrTy),
+ std::make_pair(".bound_tid.", KmpInt32PtrTy),
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ break;
+ }
+ case OMPD_parallel_sections: {
+ QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
+ QualType KmpInt32PtrTy =
+ Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
+ Sema::CapturedParamNameType Params[] = {
+ std::make_pair(".global_tid.", KmpInt32PtrTy),
+ std::make_pair(".bound_tid.", KmpInt32PtrTy),
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ break;
+ }
+ case OMPD_task: {
+ QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
+ QualType Args[] = {Context.VoidPtrTy.withConst().withRestrict()};
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.Variadic = true;
+ QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
+ Sema::CapturedParamNameType Params[] = {
+ std::make_pair(".global_tid.", KmpInt32Ty),
+ std::make_pair(".part_id.", KmpInt32Ty),
+ std::make_pair(".privates.",
+ Context.VoidPtrTy.withConst().withRestrict()),
+ std::make_pair(
+ ".copy_fn.",
+ Context.getPointerType(CopyFnType).withConst().withRestrict()),
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ // Mark this captured region as inlined, because we don't use outlined
+ // function directly.
+ getCurCapturedRegion()->TheCapturedDecl->addAttr(
+ AlwaysInlineAttr::CreateImplicit(
+ Context, AlwaysInlineAttr::Keyword_forceinline, SourceRange()));
+ break;
+ }
+ case OMPD_ordered: {
+ Sema::CapturedParamNameType Params[] = {
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ break;
+ }
+ case OMPD_atomic: {
+ Sema::CapturedParamNameType Params[] = {
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ break;
+ }
+ case OMPD_target_data:
+ case OMPD_target: {
+ Sema::CapturedParamNameType Params[] = {
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ break;
+ }
+ case OMPD_teams: {
+ QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
+ QualType KmpInt32PtrTy =
+ Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
+ Sema::CapturedParamNameType Params[] = {
+ std::make_pair(".global_tid.", KmpInt32PtrTy),
+ std::make_pair(".bound_tid.", KmpInt32PtrTy),
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ break;
+ }
+ case OMPD_taskgroup: {
+ Sema::CapturedParamNameType Params[] = {
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ break;
+ }
+ case OMPD_taskloop: {
+ Sema::CapturedParamNameType Params[] = {
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ break;
+ }
+ case OMPD_taskloop_simd: {
+ Sema::CapturedParamNameType Params[] = {
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ break;
+ }
+ case OMPD_distribute: {
+ Sema::CapturedParamNameType Params[] = {
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ break;
+ }
+ case OMPD_threadprivate:
+ case OMPD_taskyield:
+ case OMPD_barrier:
+ case OMPD_taskwait:
+ case OMPD_cancellation_point:
+ case OMPD_cancel:
+ case OMPD_flush:
+ llvm_unreachable("OpenMP Directive is not allowed");
+ case OMPD_unknown:
+ llvm_unreachable("Unknown OpenMP directive");
+ }
+}
+
+StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
+ ArrayRef<OMPClause *> Clauses) {
+ if (!S.isUsable()) {
+ ActOnCapturedRegionError();
+ return StmtError();
+ }
+
+ OMPOrderedClause *OC = nullptr;
+ OMPScheduleClause *SC = nullptr;
+ SmallVector<OMPLinearClause *, 4> LCs;
+ // This is required for proper codegen.
+ for (auto *Clause : Clauses) {
+ if (isOpenMPPrivate(Clause->getClauseKind()) ||
+ Clause->getClauseKind() == OMPC_copyprivate ||
+ (getLangOpts().OpenMPUseTLS &&
+ getASTContext().getTargetInfo().isTLSSupported() &&
+ Clause->getClauseKind() == OMPC_copyin)) {
+ DSAStack->setForceVarCapturing(Clause->getClauseKind() == OMPC_copyin);
+ // Mark all variables in private list clauses as used in inner region.
+ for (auto *VarRef : Clause->children()) {
+ if (auto *E = cast_or_null<Expr>(VarRef)) {
+ MarkDeclarationsReferencedInExpr(E);
+ }
+ }
+ DSAStack->setForceVarCapturing(/*V=*/false);
+ } else if (isParallelOrTaskRegion(DSAStack->getCurrentDirective()) &&
+ Clause->getClauseKind() == OMPC_schedule) {
+ // Mark all variables in private list clauses as used in inner region.
+ // Required for proper codegen of combined directives.
+ // TODO: add processing for other clauses.
+ if (auto *E = cast_or_null<Expr>(
+ cast<OMPScheduleClause>(Clause)->getHelperChunkSize()))
+ MarkDeclarationsReferencedInExpr(E);
+ }
+ if (Clause->getClauseKind() == OMPC_schedule)
+ SC = cast<OMPScheduleClause>(Clause);
+ else if (Clause->getClauseKind() == OMPC_ordered)
+ OC = cast<OMPOrderedClause>(Clause);
+ else if (Clause->getClauseKind() == OMPC_linear)
+ LCs.push_back(cast<OMPLinearClause>(Clause));
+ }
+ bool ErrorFound = false;
+ // OpenMP, 2.7.1 Loop Construct, Restrictions
+ // The nonmonotonic modifier cannot be specified if an ordered clause is
+ // specified.
+ if (SC &&
+ (SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
+ SC->getSecondScheduleModifier() ==
+ OMPC_SCHEDULE_MODIFIER_nonmonotonic) &&
+ OC) {
+ Diag(SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic
+ ? SC->getFirstScheduleModifierLoc()
+ : SC->getSecondScheduleModifierLoc(),
+ diag::err_omp_schedule_nonmonotonic_ordered)
+ << SourceRange(OC->getLocStart(), OC->getLocEnd());
+ ErrorFound = true;
+ }
+ if (!LCs.empty() && OC && OC->getNumForLoops()) {
+ for (auto *C : LCs) {
+ Diag(C->getLocStart(), diag::err_omp_linear_ordered)
+ << SourceRange(OC->getLocStart(), OC->getLocEnd());
+ }
+ ErrorFound = true;
+ }
+ if (isOpenMPWorksharingDirective(DSAStack->getCurrentDirective()) &&
+ isOpenMPSimdDirective(DSAStack->getCurrentDirective()) && OC &&
+ OC->getNumForLoops()) {
+ Diag(OC->getLocStart(), diag::err_omp_ordered_simd)
+ << getOpenMPDirectiveName(DSAStack->getCurrentDirective());
+ ErrorFound = true;
+ }
+ if (ErrorFound) {
+ ActOnCapturedRegionError();
+ return StmtError();
+ }
+ return ActOnCapturedRegionEnd(S.get());
+}
+
+static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
+ OpenMPDirectiveKind CurrentRegion,
+ const DeclarationNameInfo &CurrentName,
+ OpenMPDirectiveKind CancelRegion,
+ SourceLocation StartLoc) {
+ // Allowed nesting of constructs
+ // +------------------+-----------------+------------------------------------+
+ // | Parent directive | Child directive | Closely (!), No-Closely(+), Both(*)|
+ // +------------------+-----------------+------------------------------------+
+ // | parallel | parallel | * |
+ // | parallel | for | * |
+ // | parallel | for simd | * |
+ // | parallel | master | * |
+ // | parallel | critical | * |
+ // | parallel | simd | * |
+ // | parallel | sections | * |
+ // | parallel | section | + |
+ // | parallel | single | * |
+ // | parallel | parallel for | * |
+ // | parallel |parallel for simd| * |
+ // | parallel |parallel sections| * |
+ // | parallel | task | * |
+ // | parallel | taskyield | * |
+ // | parallel | barrier | * |
+ // | parallel | taskwait | * |
+ // | parallel | taskgroup | * |
+ // | parallel | flush | * |
+ // | parallel | ordered | + |
+ // | parallel | atomic | * |
+ // | parallel | target | * |
+ // | parallel | teams | + |
+ // | parallel | cancellation | |
+ // | | point | ! |
+ // | parallel | cancel | ! |
+ // | parallel | taskloop | * |
+ // | parallel | taskloop simd | * |
+ // | parallel | distribute | |
+ // +------------------+-----------------+------------------------------------+
+ // | for | parallel | * |
+ // | for | for | + |
+ // | for | for simd | + |
+ // | for | master | + |
+ // | for | critical | * |
+ // | for | simd | * |
+ // | for | sections | + |
+ // | for | section | + |
+ // | for | single | + |
+ // | for | parallel for | * |
+ // | for |parallel for simd| * |
+ // | for |parallel sections| * |
+ // | for | task | * |
+ // | for | taskyield | * |
+ // | for | barrier | + |
+ // | for | taskwait | * |
+ // | for | taskgroup | * |
+ // | for | flush | * |
+ // | for | ordered | * (if construct is ordered) |
+ // | for | atomic | * |
+ // | for | target | * |
+ // | for | teams | + |
+ // | for | cancellation | |
+ // | | point | ! |
+ // | for | cancel | ! |
+ // | for | taskloop | * |
+ // | for | taskloop simd | * |
+ // | for | distribute | |
+ // +------------------+-----------------+------------------------------------+
+ // | master | parallel | * |
+ // | master | for | + |
+ // | master | for simd | + |
+ // | master | master | * |
+ // | master | critical | * |
+ // | master | simd | * |
+ // | master | sections | + |
+ // | master | section | + |
+ // | master | single | + |
+ // | master | parallel for | * |
+ // | master |parallel for simd| * |
+ // | master |parallel sections| * |
+ // | master | task | * |
+ // | master | taskyield | * |
+ // | master | barrier | + |
+ // | master | taskwait | * |
+ // | master | taskgroup | * |
+ // | master | flush | * |
+ // | master | ordered | + |
+ // | master | atomic | * |
+ // | master | target | * |
+ // | master | teams | + |
+ // | master | cancellation | |
+ // | | point | |
+ // | master | cancel | |
+ // | master | taskloop | * |
+ // | master | taskloop simd | * |
+ // | master | distribute | |
+ // +------------------+-----------------+------------------------------------+
+ // | critical | parallel | * |
+ // | critical | for | + |
+ // | critical | for simd | + |
+ // | critical | master | * |
+ // | critical | critical | * (should have different names) |
+ // | critical | simd | * |
+ // | critical | sections | + |
+ // | critical | section | + |
+ // | critical | single | + |
+ // | critical | parallel for | * |
+ // | critical |parallel for simd| * |
+ // | critical |parallel sections| * |
+ // | critical | task | * |
+ // | critical | taskyield | * |
+ // | critical | barrier | + |
+ // | critical | taskwait | * |
+ // | critical | taskgroup | * |
+ // | critical | ordered | + |
+ // | critical | atomic | * |
+ // | critical | target | * |
+ // | critical | teams | + |
+ // | critical | cancellation | |
+ // | | point | |
+ // | critical | cancel | |
+ // | critical | taskloop | * |
+ // | critical | taskloop simd | * |
+ // | critical | distribute | |
+ // +------------------+-----------------+------------------------------------+
+ // | simd | parallel | |
+ // | simd | for | |
+ // | simd | for simd | |
+ // | simd | master | |
+ // | simd | critical | |
+ // | simd | simd | |
+ // | simd | sections | |
+ // | simd | section | |
+ // | simd | single | |
+ // | simd | parallel for | |
+ // | simd |parallel for simd| |
+ // | simd |parallel sections| |
+ // | simd | task | |
+ // | simd | taskyield | |
+ // | simd | barrier | |
+ // | simd | taskwait | |
+ // | simd | taskgroup | |
+ // | simd | flush | |
+ // | simd | ordered | + (with simd clause) |
+ // | simd | atomic | |
+ // | simd | target | |
+ // | simd | teams | |
+ // | simd | cancellation | |
+ // | | point | |
+ // | simd | cancel | |
+ // | simd | taskloop | |
+ // | simd | taskloop simd | |
+ // | simd | distribute | |
+ // +------------------+-----------------+------------------------------------+
+ // | for simd | parallel | |
+ // | for simd | for | |
+ // | for simd | for simd | |
+ // | for simd | master | |
+ // | for simd | critical | |
+ // | for simd | simd | |
+ // | for simd | sections | |
+ // | for simd | section | |
+ // | for simd | single | |
+ // | for simd | parallel for | |
+ // | for simd |parallel for simd| |
+ // | for simd |parallel sections| |
+ // | for simd | task | |
+ // | for simd | taskyield | |
+ // | for simd | barrier | |
+ // | for simd | taskwait | |
+ // | for simd | taskgroup | |
+ // | for simd | flush | |
+ // | for simd | ordered | + (with simd clause) |
+ // | for simd | atomic | |
+ // | for simd | target | |
+ // | for simd | teams | |
+ // | for simd | cancellation | |
+ // | | point | |
+ // | for simd | cancel | |
+ // | for simd | taskloop | |
+ // | for simd | taskloop simd | |
+ // | for simd | distribute | |
+ // +------------------+-----------------+------------------------------------+
+ // | parallel for simd| parallel | |
+ // | parallel for simd| for | |
+ // | parallel for simd| for simd | |
+ // | parallel for simd| master | |
+ // | parallel for simd| critical | |
+ // | parallel for simd| simd | |
+ // | parallel for simd| sections | |
+ // | parallel for simd| section | |
+ // | parallel for simd| single | |
+ // | parallel for simd| parallel for | |
+ // | parallel for simd|parallel for simd| |
+ // | parallel for simd|parallel sections| |
+ // | parallel for simd| task | |
+ // | parallel for simd| taskyield | |
+ // | parallel for simd| barrier | |
+ // | parallel for simd| taskwait | |
+ // | parallel for simd| taskgroup | |
+ // | parallel for simd| flush | |
+ // | parallel for simd| ordered | + (with simd clause) |
+ // | parallel for simd| atomic | |
+ // | parallel for simd| target | |
+ // | parallel for simd| teams | |
+ // | parallel for simd| cancellation | |
+ // | | point | |
+ // | parallel for simd| cancel | |
+ // | parallel for simd| taskloop | |
+ // | parallel for simd| taskloop simd | |
+ // | parallel for simd| distribute | |
+ // +------------------+-----------------+------------------------------------+
+ // | sections | parallel | * |
+ // | sections | for | + |
+ // | sections | for simd | + |
+ // | sections | master | + |
+ // | sections | critical | * |
+ // | sections | simd | * |
+ // | sections | sections | + |
+ // | sections | section | * |
+ // | sections | single | + |
+ // | sections | parallel for | * |
+ // | sections |parallel for simd| * |
+ // | sections |parallel sections| * |
+ // | sections | task | * |
+ // | sections | taskyield | * |
+ // | sections | barrier | + |
+ // | sections | taskwait | * |
+ // | sections | taskgroup | * |
+ // | sections | flush | * |
+ // | sections | ordered | + |
+ // | sections | atomic | * |
+ // | sections | target | * |
+ // | sections | teams | + |
+ // | sections | cancellation | |
+ // | | point | ! |
+ // | sections | cancel | ! |
+ // | sections | taskloop | * |
+ // | sections | taskloop simd | * |
+ // | sections | distribute | |
+ // +------------------+-----------------+------------------------------------+
+ // | section | parallel | * |
+ // | section | for | + |
+ // | section | for simd | + |
+ // | section | master | + |
+ // | section | critical | * |
+ // | section | simd | * |
+ // | section | sections | + |
+ // | section | section | + |
+ // | section | single | + |
+ // | section | parallel for | * |
+ // | section |parallel for simd| * |
+ // | section |parallel sections| * |
+ // | section | task | * |
+ // | section | taskyield | * |
+ // | section | barrier | + |
+ // | section | taskwait | * |
+ // | section | taskgroup | * |
+ // | section | flush | * |
+ // | section | ordered | + |
+ // | section | atomic | * |
+ // | section | target | * |
+ // | section | teams | + |
+ // | section | cancellation | |
+ // | | point | ! |
+ // | section | cancel | ! |
+ // | section | taskloop | * |
+ // | section | taskloop simd | * |
+ // | section | distribute | |
+ // +------------------+-----------------+------------------------------------+
+ // | single | parallel | * |
+ // | single | for | + |
+ // | single | for simd | + |
+ // | single | master | + |
+ // | single | critical | * |
+ // | single | simd | * |
+ // | single | sections | + |
+ // | single | section | + |
+ // | single | single | + |
+ // | single | parallel for | * |
+ // | single |parallel for simd| * |
+ // | single |parallel sections| * |
+ // | single | task | * |
+ // | single | taskyield | * |
+ // | single | barrier | + |
+ // | single | taskwait | * |
+ // | single | taskgroup | * |
+ // | single | flush | * |
+ // | single | ordered | + |
+ // | single | atomic | * |
+ // | single | target | * |
+ // | single | teams | + |
+ // | single | cancellation | |
+ // | | point | |
+ // | single | cancel | |
+ // | single | taskloop | * |
+ // | single | taskloop simd | * |
+ // | single | distribute | |
+ // +------------------+-----------------+------------------------------------+
+ // | parallel for | parallel | * |
+ // | parallel for | for | + |
+ // | parallel for | for simd | + |
+ // | parallel for | master | + |
+ // | parallel for | critical | * |
+ // | parallel for | simd | * |
+ // | parallel for | sections | + |
+ // | parallel for | section | + |
+ // | parallel for | single | + |
+ // | parallel for | parallel for | * |
+ // | parallel for |parallel for simd| * |
+ // | parallel for |parallel sections| * |
+ // | parallel for | task | * |
+ // | parallel for | taskyield | * |
+ // | parallel for | barrier | + |
+ // | parallel for | taskwait | * |
+ // | parallel for | taskgroup | * |
+ // | parallel for | flush | * |
+ // | parallel for | ordered | * (if construct is ordered) |
+ // | parallel for | atomic | * |
+ // | parallel for | target | * |
+ // | parallel for | teams | + |
+ // | parallel for | cancellation | |
+ // | | point | ! |
+ // | parallel for | cancel | ! |
+ // | parallel for | taskloop | * |
+ // | parallel for | taskloop simd | * |
+ // | parallel for | distribute | |
+ // +------------------+-----------------+------------------------------------+
+ // | parallel sections| parallel | * |
+ // | parallel sections| for | + |
+ // | parallel sections| for simd | + |
+ // | parallel sections| master | + |
+ // | parallel sections| critical | + |
+ // | parallel sections| simd | * |
+ // | parallel sections| sections | + |
+ // | parallel sections| section | * |
+ // | parallel sections| single | + |
+ // | parallel sections| parallel for | * |
+ // | parallel sections|parallel for simd| * |
+ // | parallel sections|parallel sections| * |
+ // | parallel sections| task | * |
+ // | parallel sections| taskyield | * |
+ // | parallel sections| barrier | + |
+ // | parallel sections| taskwait | * |
+ // | parallel sections| taskgroup | * |
+ // | parallel sections| flush | * |
+ // | parallel sections| ordered | + |
+ // | parallel sections| atomic | * |
+ // | parallel sections| target | * |
+ // | parallel sections| teams | + |
+ // | parallel sections| cancellation | |
+ // | | point | ! |
+ // | parallel sections| cancel | ! |
+ // | parallel sections| taskloop | * |
+ // | parallel sections| taskloop simd | * |
+ // | parallel sections| distribute | |
+ // +------------------+-----------------+------------------------------------+
+ // | task | parallel | * |
+ // | task | for | + |
+ // | task | for simd | + |
+ // | task | master | + |
+ // | task | critical | * |
+ // | task | simd | * |
+ // | task | sections | + |
+ // | task | section | + |
+ // | task | single | + |
+ // | task | parallel for | * |
+ // | task |parallel for simd| * |
+ // | task |parallel sections| * |
+ // | task | task | * |
+ // | task | taskyield | * |
+ // | task | barrier | + |
+ // | task | taskwait | * |
+ // | task | taskgroup | * |
+ // | task | flush | * |
+ // | task | ordered | + |
+ // | task | atomic | * |
+ // | task | target | * |
+ // | task | teams | + |
+ // | task | cancellation | |
+ // | | point | ! |
+ // | task | cancel | ! |
+ // | task | taskloop | * |
+ // | task | taskloop simd | * |
+ // | task | distribute | |
+ // +------------------+-----------------+------------------------------------+
+ // | ordered | parallel | * |
+ // | ordered | for | + |
+ // | ordered | for simd | + |
+ // | ordered | master | * |
+ // | ordered | critical | * |
+ // | ordered | simd | * |
+ // | ordered | sections | + |
+ // | ordered | section | + |
+ // | ordered | single | + |
+ // | ordered | parallel for | * |
+ // | ordered |parallel for simd| * |
+ // | ordered |parallel sections| * |
+ // | ordered | task | * |
+ // | ordered | taskyield | * |
+ // | ordered | barrier | + |
+ // | ordered | taskwait | * |
+ // | ordered | taskgroup | * |
+ // | ordered | flush | * |
+ // | ordered | ordered | + |
+ // | ordered | atomic | * |
+ // | ordered | target | * |
+ // | ordered | teams | + |
+ // | ordered | cancellation | |
+ // | | point | |
+ // | ordered | cancel | |
+ // | ordered | taskloop | * |
+ // | ordered | taskloop simd | * |
+ // | ordered | distribute | |
+ // +------------------+-----------------+------------------------------------+
+ // | atomic | parallel | |
+ // | atomic | for | |
+ // | atomic | for simd | |
+ // | atomic | master | |
+ // | atomic | critical | |
+ // | atomic | simd | |
+ // | atomic | sections | |
+ // | atomic | section | |
+ // | atomic | single | |
+ // | atomic | parallel for | |
+ // | atomic |parallel for simd| |
+ // | atomic |parallel sections| |
+ // | atomic | task | |
+ // | atomic | taskyield | |
+ // | atomic | barrier | |
+ // | atomic | taskwait | |
+ // | atomic | taskgroup | |
+ // | atomic | flush | |
+ // | atomic | ordered | |
+ // | atomic | atomic | |
+ // | atomic | target | |
+ // | atomic | teams | |
+ // | atomic | cancellation | |
+ // | | point | |
+ // | atomic | cancel | |
+ // | atomic | taskloop | |
+ // | atomic | taskloop simd | |
+ // | atomic | distribute | |
+ // +------------------+-----------------+------------------------------------+
+ // | target | parallel | * |
+ // | target | for | * |
+ // | target | for simd | * |
+ // | target | master | * |
+ // | target | critical | * |
+ // | target | simd | * |
+ // | target | sections | * |
+ // | target | section | * |
+ // | target | single | * |
+ // | target | parallel for | * |
+ // | target |parallel for simd| * |
+ // | target |parallel sections| * |
+ // | target | task | * |
+ // | target | taskyield | * |
+ // | target | barrier | * |
+ // | target | taskwait | * |
+ // | target | taskgroup | * |
+ // | target | flush | * |
+ // | target | ordered | * |
+ // | target | atomic | * |
+ // | target | target | * |
+ // | target | teams | * |
+ // | target | cancellation | |
+ // | | point | |
+ // | target | cancel | |
+ // | target | taskloop | * |
+ // | target | taskloop simd | * |
+ // | target | distribute | |
+ // +------------------+-----------------+------------------------------------+
+ // | teams | parallel | * |
+ // | teams | for | + |
+ // | teams | for simd | + |
+ // | teams | master | + |
+ // | teams | critical | + |
+ // | teams | simd | + |
+ // | teams | sections | + |
+ // | teams | section | + |
+ // | teams | single | + |
+ // | teams | parallel for | * |
+ // | teams |parallel for simd| * |
+ // | teams |parallel sections| * |
+ // | teams | task | + |
+ // | teams | taskyield | + |
+ // | teams | barrier | + |
+ // | teams | taskwait | + |
+ // | teams | taskgroup | + |
+ // | teams | flush | + |
+ // | teams | ordered | + |
+ // | teams | atomic | + |
+ // | teams | target | + |
+ // | teams | teams | + |
+ // | teams | cancellation | |
+ // | | point | |
+ // | teams | cancel | |
+ // | teams | taskloop | + |
+ // | teams | taskloop simd | + |
+ // | teams | distribute | ! |
+ // +------------------+-----------------+------------------------------------+
+ // | taskloop | parallel | * |
+ // | taskloop | for | + |
+ // | taskloop | for simd | + |
+ // | taskloop | master | + |
+ // | taskloop | critical | * |
+ // | taskloop | simd | * |
+ // | taskloop | sections | + |
+ // | taskloop | section | + |
+ // | taskloop | single | + |
+ // | taskloop | parallel for | * |
+ // | taskloop |parallel for simd| * |
+ // | taskloop |parallel sections| * |
+ // | taskloop | task | * |
+ // | taskloop | taskyield | * |
+ // | taskloop | barrier | + |
+ // | taskloop | taskwait | * |
+ // | taskloop | taskgroup | * |
+ // | taskloop | flush | * |
+ // | taskloop | ordered | + |
+ // | taskloop | atomic | * |
+ // | taskloop | target | * |
+ // | taskloop | teams | + |
+ // | taskloop | cancellation | |
+ // | | point | |
+ // | taskloop | cancel | |
+ // | taskloop | taskloop | * |
+ // | taskloop | distribute | |
+ // +------------------+-----------------+------------------------------------+
+ // | taskloop simd | parallel | |
+ // | taskloop simd | for | |
+ // | taskloop simd | for simd | |
+ // | taskloop simd | master | |
+ // | taskloop simd | critical | |
+ // | taskloop simd | simd | |
+ // | taskloop simd | sections | |
+ // | taskloop simd | section | |
+ // | taskloop simd | single | |
+ // | taskloop simd | parallel for | |
+ // | taskloop simd |parallel for simd| |
+ // | taskloop simd |parallel sections| |
+ // | taskloop simd | task | |
+ // | taskloop simd | taskyield | |
+ // | taskloop simd | barrier | |
+ // | taskloop simd | taskwait | |
+ // | taskloop simd | taskgroup | |
+ // | taskloop simd | flush | |
+ // | taskloop simd | ordered | + (with simd clause) |
+ // | taskloop simd | atomic | |
+ // | taskloop simd | target | |
+ // | taskloop simd | teams | |
+ // | taskloop simd | cancellation | |
+ // | | point | |
+ // | taskloop simd | cancel | |
+ // | taskloop simd | taskloop | |
+ // | taskloop simd | taskloop simd | |
+ // | taskloop simd | distribute | |
+ // +------------------+-----------------+------------------------------------+
+ // | distribute | parallel | * |
+ // | distribute | for | * |
+ // | distribute | for simd | * |
+ // | distribute | master | * |
+ // | distribute | critical | * |
+ // | distribute | simd | * |
+ // | distribute | sections | * |
+ // | distribute | section | * |
+ // | distribute | single | * |
+ // | distribute | parallel for | * |
+ // | distribute |parallel for simd| * |
+ // | distribute |parallel sections| * |
+ // | distribute | task | * |
+ // | distribute | taskyield | * |
+ // | distribute | barrier | * |
+ // | distribute | taskwait | * |
+ // | distribute | taskgroup | * |
+ // | distribute | flush | * |
+ // | distribute | ordered | + |
+ // | distribute | atomic | * |
+ // | distribute | target | |
+ // | distribute | teams | |
+ // | distribute | cancellation | + |
+ // | | point | |
+ // | distribute | cancel | + |
+ // | distribute | taskloop | * |
+ // | distribute | taskloop simd | * |
+ // | distribute | distribute | |
+ // +------------------+-----------------+------------------------------------+
+ if (Stack->getCurScope()) {
+ auto ParentRegion = Stack->getParentDirective();
+ bool NestingProhibited = false;
+ bool CloseNesting = true;
+ enum {
+ NoRecommend,
+ ShouldBeInParallelRegion,
+ ShouldBeInOrderedRegion,
+ ShouldBeInTargetRegion,
+ ShouldBeInTeamsRegion
+ } Recommend = NoRecommend;
+ if (isOpenMPSimdDirective(ParentRegion) && CurrentRegion != OMPD_ordered) {
+ // OpenMP [2.16, Nesting of Regions]
+ // OpenMP constructs may not be nested inside a simd region.
+ // OpenMP [2.8.1,simd Construct, Restrictions]
+ // An ordered construct with the simd clause is the only OpenMP construct
+ // that can appear in the simd region.
+ SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_simd);
+ return true;
+ }
+ if (ParentRegion == OMPD_atomic) {
+ // OpenMP [2.16, Nesting of Regions]
+ // OpenMP constructs may not be nested inside an atomic region.
+ SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_atomic);
+ return true;
+ }
+ if (CurrentRegion == OMPD_section) {
+ // OpenMP [2.7.2, sections Construct, Restrictions]
+ // Orphaned section directives are prohibited. That is, the section
+ // directives must appear within the sections construct and must not be
+ // encountered elsewhere in the sections region.
+ if (ParentRegion != OMPD_sections &&
+ ParentRegion != OMPD_parallel_sections) {
+ SemaRef.Diag(StartLoc, diag::err_omp_orphaned_section_directive)
+ << (ParentRegion != OMPD_unknown)
+ << getOpenMPDirectiveName(ParentRegion);
+ return true;
+ }
+ return false;
+ }
+ // Allow some constructs to be orphaned (they could be used in functions,
+ // called from OpenMP regions with the required preconditions).
+ if (ParentRegion == OMPD_unknown)
+ return false;
+ if (CurrentRegion == OMPD_cancellation_point ||
+ CurrentRegion == OMPD_cancel) {
+ // OpenMP [2.16, Nesting of Regions]
+ // A cancellation point construct for which construct-type-clause is
+ // taskgroup must be nested inside a task construct. A cancellation
+ // point construct for which construct-type-clause is not taskgroup must
+ // be closely nested inside an OpenMP construct that matches the type
+ // specified in construct-type-clause.
+ // A cancel construct for which construct-type-clause is taskgroup must be
+ // nested inside a task construct. A cancel construct for which
+ // construct-type-clause is not taskgroup must be closely nested inside an
+ // OpenMP construct that matches the type specified in
+ // construct-type-clause.
+ NestingProhibited =
+ !((CancelRegion == OMPD_parallel && ParentRegion == OMPD_parallel) ||
+ (CancelRegion == OMPD_for &&
+ (ParentRegion == OMPD_for || ParentRegion == OMPD_parallel_for)) ||
+ (CancelRegion == OMPD_taskgroup && ParentRegion == OMPD_task) ||
+ (CancelRegion == OMPD_sections &&
+ (ParentRegion == OMPD_section || ParentRegion == OMPD_sections ||
+ ParentRegion == OMPD_parallel_sections)));
+ } else if (CurrentRegion == OMPD_master) {
+ // OpenMP [2.16, Nesting of Regions]
+ // A master region may not be closely nested inside a worksharing,
+ // atomic, or explicit task region.
+ NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
+ ParentRegion == OMPD_task ||
+ isOpenMPTaskLoopDirective(ParentRegion);
+ } else if (CurrentRegion == OMPD_critical && CurrentName.getName()) {
+ // OpenMP [2.16, Nesting of Regions]
+ // A critical region may not be nested (closely or otherwise) inside a
+ // critical region with the same name. Note that this restriction is not
+ // sufficient to prevent deadlock.
+ SourceLocation PreviousCriticalLoc;
+ bool DeadLock =
+ Stack->hasDirective([CurrentName, &PreviousCriticalLoc](
+ OpenMPDirectiveKind K,
+ const DeclarationNameInfo &DNI,
+ SourceLocation Loc)
+ ->bool {
+ if (K == OMPD_critical &&
+ DNI.getName() == CurrentName.getName()) {
+ PreviousCriticalLoc = Loc;
+ return true;
+ } else
+ return false;
+ },
+ false /* skip top directive */);
+ if (DeadLock) {
+ SemaRef.Diag(StartLoc,
+ diag::err_omp_prohibited_region_critical_same_name)
+ << CurrentName.getName();
+ if (PreviousCriticalLoc.isValid())
+ SemaRef.Diag(PreviousCriticalLoc,
+ diag::note_omp_previous_critical_region);
+ return true;
+ }
+ } else if (CurrentRegion == OMPD_barrier) {
+ // OpenMP [2.16, Nesting of Regions]
+ // A barrier region may not be closely nested inside a worksharing,
+ // explicit task, critical, ordered, atomic, or master region.
+ NestingProhibited =
+ isOpenMPWorksharingDirective(ParentRegion) ||
+ ParentRegion == OMPD_task || ParentRegion == OMPD_master ||
+ ParentRegion == OMPD_critical || ParentRegion == OMPD_ordered ||
+ isOpenMPTaskLoopDirective(ParentRegion);
+ } else if (isOpenMPWorksharingDirective(CurrentRegion) &&
+ !isOpenMPParallelDirective(CurrentRegion)) {
+ // OpenMP [2.16, Nesting of Regions]
+ // A worksharing region may not be closely nested inside a worksharing,
+ // explicit task, critical, ordered, atomic, or master region.
+ NestingProhibited =
+ isOpenMPWorksharingDirective(ParentRegion) ||
+ ParentRegion == OMPD_task || ParentRegion == OMPD_master ||
+ ParentRegion == OMPD_critical || ParentRegion == OMPD_ordered ||
+ isOpenMPTaskLoopDirective(ParentRegion);
+ Recommend = ShouldBeInParallelRegion;
+ } else if (CurrentRegion == OMPD_ordered) {
+ // OpenMP [2.16, Nesting of Regions]
+ // An ordered region may not be closely nested inside a critical,
+ // atomic, or explicit task region.
+ // An ordered region must be closely nested inside a loop region (or
+ // parallel loop region) with an ordered clause.
+ // OpenMP [2.8.1,simd Construct, Restrictions]
+ // An ordered construct with the simd clause is the only OpenMP construct
+ // that can appear in the simd region.
+ NestingProhibited = ParentRegion == OMPD_critical ||
+ ParentRegion == OMPD_task ||
+ isOpenMPTaskLoopDirective(ParentRegion) ||
+ !(isOpenMPSimdDirective(ParentRegion) ||
+ Stack->isParentOrderedRegion());
+ Recommend = ShouldBeInOrderedRegion;
+ } else if (isOpenMPTeamsDirective(CurrentRegion)) {
+ // OpenMP [2.16, Nesting of Regions]
+ // If specified, a teams construct must be contained within a target
+ // construct.
+ NestingProhibited = ParentRegion != OMPD_target;
+ Recommend = ShouldBeInTargetRegion;
+ Stack->setParentTeamsRegionLoc(Stack->getConstructLoc());
+ }
+ if (!NestingProhibited && isOpenMPTeamsDirective(ParentRegion)) {
+ // OpenMP [2.16, Nesting of Regions]
+ // distribute, parallel, parallel sections, parallel workshare, and the
+ // parallel loop and parallel loop SIMD constructs are the only OpenMP
+ // constructs that can be closely nested in the teams region.
+ NestingProhibited = !isOpenMPParallelDirective(CurrentRegion) &&
+ !isOpenMPDistributeDirective(CurrentRegion);
+ Recommend = ShouldBeInParallelRegion;
+ }
+ if (!NestingProhibited && isOpenMPDistributeDirective(CurrentRegion)) {
+ // OpenMP 4.5 [2.17 Nesting of Regions]
+ // The region associated with the distribute construct must be strictly
+ // nested inside a teams region
+ NestingProhibited = !isOpenMPTeamsDirective(ParentRegion);
+ Recommend = ShouldBeInTeamsRegion;
+ }
+ if (NestingProhibited) {
+ SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region)
+ << CloseNesting << getOpenMPDirectiveName(ParentRegion) << Recommend
+ << getOpenMPDirectiveName(CurrentRegion);
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool checkIfClauses(Sema &S, OpenMPDirectiveKind Kind,
+ ArrayRef<OMPClause *> Clauses,
+ ArrayRef<OpenMPDirectiveKind> AllowedNameModifiers) {
+ bool ErrorFound = false;
+ unsigned NamedModifiersNumber = 0;
+ SmallVector<const OMPIfClause *, OMPC_unknown + 1> FoundNameModifiers(
+ OMPD_unknown + 1);
+ SmallVector<SourceLocation, 4> NameModifierLoc;
+ for (const auto *C : Clauses) {
+ if (const auto *IC = dyn_cast_or_null<OMPIfClause>(C)) {
+ // At most one if clause without a directive-name-modifier can appear on
+ // the directive.
+ OpenMPDirectiveKind CurNM = IC->getNameModifier();
+ if (FoundNameModifiers[CurNM]) {
+ S.Diag(C->getLocStart(), diag::err_omp_more_one_clause)
+ << getOpenMPDirectiveName(Kind) << getOpenMPClauseName(OMPC_if)
+ << (CurNM != OMPD_unknown) << getOpenMPDirectiveName(CurNM);
+ ErrorFound = true;
+ } else if (CurNM != OMPD_unknown) {
+ NameModifierLoc.push_back(IC->getNameModifierLoc());
+ ++NamedModifiersNumber;
+ }
+ FoundNameModifiers[CurNM] = IC;
+ if (CurNM == OMPD_unknown)
+ continue;
+ // Check if the specified name modifier is allowed for the current
+ // directive.
+ // At most one if clause with the particular directive-name-modifier can
+ // appear on the directive.
+ bool MatchFound = false;
+ for (auto NM : AllowedNameModifiers) {
+ if (CurNM == NM) {
+ MatchFound = true;
+ break;
+ }
+ }
+ if (!MatchFound) {
+ S.Diag(IC->getNameModifierLoc(),
+ diag::err_omp_wrong_if_directive_name_modifier)
+ << getOpenMPDirectiveName(CurNM) << getOpenMPDirectiveName(Kind);
+ ErrorFound = true;
+ }
+ }
+ }
+ // If any if clause on the directive includes a directive-name-modifier then
+ // all if clauses on the directive must include a directive-name-modifier.
+ if (FoundNameModifiers[OMPD_unknown] && NamedModifiersNumber > 0) {
+ if (NamedModifiersNumber == AllowedNameModifiers.size()) {
+ S.Diag(FoundNameModifiers[OMPD_unknown]->getLocStart(),
+ diag::err_omp_no_more_if_clause);
+ } else {
+ std::string Values;
+ std::string Sep(", ");
+ unsigned AllowedCnt = 0;
+ unsigned TotalAllowedNum =
+ AllowedNameModifiers.size() - NamedModifiersNumber;
+ for (unsigned Cnt = 0, End = AllowedNameModifiers.size(); Cnt < End;
+ ++Cnt) {
+ OpenMPDirectiveKind NM = AllowedNameModifiers[Cnt];
+ if (!FoundNameModifiers[NM]) {
+ Values += "'";
+ Values += getOpenMPDirectiveName(NM);
+ Values += "'";
+ if (AllowedCnt + 2 == TotalAllowedNum)
+ Values += " or ";
+ else if (AllowedCnt + 1 != TotalAllowedNum)
+ Values += Sep;
+ ++AllowedCnt;
+ }
+ }
+ S.Diag(FoundNameModifiers[OMPD_unknown]->getCondition()->getLocStart(),
+ diag::err_omp_unnamed_if_clause)
+ << (TotalAllowedNum > 1) << Values;
+ }
+ for (auto Loc : NameModifierLoc) {
+ S.Diag(Loc, diag::note_omp_previous_named_if_clause);
+ }
+ ErrorFound = true;
+ }
+ return ErrorFound;
+}
+
+StmtResult Sema::ActOnOpenMPExecutableDirective(
+ OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
+ OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) {
+ StmtResult Res = StmtError();
+ if (CheckNestingOfRegions(*this, DSAStack, Kind, DirName, CancelRegion,
+ StartLoc))
+ return StmtError();
+
+ llvm::SmallVector<OMPClause *, 8> ClausesWithImplicit;
+ llvm::DenseMap<VarDecl *, Expr *> VarsWithInheritedDSA;
+ bool ErrorFound = false;
+ ClausesWithImplicit.append(Clauses.begin(), Clauses.end());
+ if (AStmt) {
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+
+ // Check default data sharing attributes for referenced variables.
+ DSAAttrChecker DSAChecker(DSAStack, *this, cast<CapturedStmt>(AStmt));
+ DSAChecker.Visit(cast<CapturedStmt>(AStmt)->getCapturedStmt());
+ if (DSAChecker.isErrorFound())
+ return StmtError();
+ // Generate list of implicitly defined firstprivate variables.
+ VarsWithInheritedDSA = DSAChecker.getVarsWithInheritedDSA();
+
+ if (!DSAChecker.getImplicitFirstprivate().empty()) {
+ if (OMPClause *Implicit = ActOnOpenMPFirstprivateClause(
+ DSAChecker.getImplicitFirstprivate(), SourceLocation(),
+ SourceLocation(), SourceLocation())) {
+ ClausesWithImplicit.push_back(Implicit);
+ ErrorFound = cast<OMPFirstprivateClause>(Implicit)->varlist_size() !=
+ DSAChecker.getImplicitFirstprivate().size();
+ } else
+ ErrorFound = true;
+ }
+ }
+
+ llvm::SmallVector<OpenMPDirectiveKind, 4> AllowedNameModifiers;
+ switch (Kind) {
+ case OMPD_parallel:
+ Res = ActOnOpenMPParallelDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc);
+ AllowedNameModifiers.push_back(OMPD_parallel);
+ break;
+ case OMPD_simd:
+ Res = ActOnOpenMPSimdDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc,
+ VarsWithInheritedDSA);
+ break;
+ case OMPD_for:
+ Res = ActOnOpenMPForDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc,
+ VarsWithInheritedDSA);
+ break;
+ case OMPD_for_simd:
+ Res = ActOnOpenMPForSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc, VarsWithInheritedDSA);
+ break;
+ case OMPD_sections:
+ Res = ActOnOpenMPSectionsDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc);
+ break;
+ case OMPD_section:
+ assert(ClausesWithImplicit.empty() &&
+ "No clauses are allowed for 'omp section' directive");
+ Res = ActOnOpenMPSectionDirective(AStmt, StartLoc, EndLoc);
+ break;
+ case OMPD_single:
+ Res = ActOnOpenMPSingleDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc);
+ break;
+ case OMPD_master:
+ assert(ClausesWithImplicit.empty() &&
+ "No clauses are allowed for 'omp master' directive");
+ Res = ActOnOpenMPMasterDirective(AStmt, StartLoc, EndLoc);
+ break;
+ case OMPD_critical:
+ Res = ActOnOpenMPCriticalDirective(DirName, ClausesWithImplicit, AStmt,
+ StartLoc, EndLoc);
+ break;
+ case OMPD_parallel_for:
+ Res = ActOnOpenMPParallelForDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc, VarsWithInheritedDSA);
+ AllowedNameModifiers.push_back(OMPD_parallel);
+ break;
+ case OMPD_parallel_for_simd:
+ Res = ActOnOpenMPParallelForSimdDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ AllowedNameModifiers.push_back(OMPD_parallel);
+ break;
+ case OMPD_parallel_sections:
+ Res = ActOnOpenMPParallelSectionsDirective(ClausesWithImplicit, AStmt,
+ StartLoc, EndLoc);
+ AllowedNameModifiers.push_back(OMPD_parallel);
+ break;
+ case OMPD_task:
+ Res =
+ ActOnOpenMPTaskDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc);
+ AllowedNameModifiers.push_back(OMPD_task);
+ break;
+ case OMPD_taskyield:
+ assert(ClausesWithImplicit.empty() &&
+ "No clauses are allowed for 'omp taskyield' directive");
+ assert(AStmt == nullptr &&
+ "No associated statement allowed for 'omp taskyield' directive");
+ Res = ActOnOpenMPTaskyieldDirective(StartLoc, EndLoc);
+ break;
+ case OMPD_barrier:
+ assert(ClausesWithImplicit.empty() &&
+ "No clauses are allowed for 'omp barrier' directive");
+ assert(AStmt == nullptr &&
+ "No associated statement allowed for 'omp barrier' directive");
+ Res = ActOnOpenMPBarrierDirective(StartLoc, EndLoc);
+ break;
+ case OMPD_taskwait:
+ assert(ClausesWithImplicit.empty() &&
+ "No clauses are allowed for 'omp taskwait' directive");
+ assert(AStmt == nullptr &&
+ "No associated statement allowed for 'omp taskwait' directive");
+ Res = ActOnOpenMPTaskwaitDirective(StartLoc, EndLoc);
+ break;
+ case OMPD_taskgroup:
+ assert(ClausesWithImplicit.empty() &&
+ "No clauses are allowed for 'omp taskgroup' directive");
+ Res = ActOnOpenMPTaskgroupDirective(AStmt, StartLoc, EndLoc);
+ break;
+ case OMPD_flush:
+ assert(AStmt == nullptr &&
+ "No associated statement allowed for 'omp flush' directive");
+ Res = ActOnOpenMPFlushDirective(ClausesWithImplicit, StartLoc, EndLoc);
+ break;
+ case OMPD_ordered:
+ Res = ActOnOpenMPOrderedDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc);
+ break;
+ case OMPD_atomic:
+ Res = ActOnOpenMPAtomicDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc);
+ break;
+ case OMPD_teams:
+ Res =
+ ActOnOpenMPTeamsDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc);
+ break;
+ case OMPD_target:
+ Res = ActOnOpenMPTargetDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc);
+ AllowedNameModifiers.push_back(OMPD_target);
+ break;
+ case OMPD_cancellation_point:
+ assert(ClausesWithImplicit.empty() &&
+ "No clauses are allowed for 'omp cancellation point' directive");
+ assert(AStmt == nullptr && "No associated statement allowed for 'omp "
+ "cancellation point' directive");
+ Res = ActOnOpenMPCancellationPointDirective(StartLoc, EndLoc, CancelRegion);
+ break;
+ case OMPD_cancel:
+ assert(AStmt == nullptr &&
+ "No associated statement allowed for 'omp cancel' directive");
+ Res = ActOnOpenMPCancelDirective(ClausesWithImplicit, StartLoc, EndLoc,
+ CancelRegion);
+ AllowedNameModifiers.push_back(OMPD_cancel);
+ break;
+ case OMPD_target_data:
+ Res = ActOnOpenMPTargetDataDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc);
+ AllowedNameModifiers.push_back(OMPD_target_data);
+ break;
+ case OMPD_taskloop:
+ Res = ActOnOpenMPTaskLoopDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc, VarsWithInheritedDSA);
+ AllowedNameModifiers.push_back(OMPD_taskloop);
+ break;
+ case OMPD_taskloop_simd:
+ Res = ActOnOpenMPTaskLoopSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc, VarsWithInheritedDSA);
+ AllowedNameModifiers.push_back(OMPD_taskloop);
+ break;
+ case OMPD_distribute:
+ Res = ActOnOpenMPDistributeDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc, VarsWithInheritedDSA);
+ break;
+ case OMPD_threadprivate:
+ llvm_unreachable("OpenMP Directive is not allowed");
+ case OMPD_unknown:
+ llvm_unreachable("Unknown OpenMP directive");
+ }
+
+ for (auto P : VarsWithInheritedDSA) {
+ Diag(P.second->getExprLoc(), diag::err_omp_no_dsa_for_variable)
+ << P.first << P.second->getSourceRange();
+ }
+ ErrorFound = !VarsWithInheritedDSA.empty() || ErrorFound;
+
+ if (!AllowedNameModifiers.empty())
+ ErrorFound = checkIfClauses(*this, Kind, Clauses, AllowedNameModifiers) ||
+ ErrorFound;
+
+ if (ErrorFound)
+ return StmtError();
+ return Res;
+}
+
+StmtResult Sema::ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (!AStmt)
+ return StmtError();
+
+ CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+
+ getCurFunction()->setHasBranchProtectedScope();
+
+ return OMPParallelDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
+ DSAStack->isCancelRegion());
+}
+
+namespace {
+/// \brief Helper class for checking canonical form of the OpenMP loops and
+/// extracting iteration space of each loop in the loop nest, that will be used
+/// for IR generation.
+class OpenMPIterationSpaceChecker {
+ /// \brief Reference to Sema.
+ Sema &SemaRef;
+ /// \brief A location for diagnostics (when there is no some better location).
+ SourceLocation DefaultLoc;
+ /// \brief A location for diagnostics (when increment is not compatible).
+ SourceLocation ConditionLoc;
+ /// \brief A source location for referring to loop init later.
+ SourceRange InitSrcRange;
+ /// \brief A source location for referring to condition later.
+ SourceRange ConditionSrcRange;
+ /// \brief A source location for referring to increment later.
+ SourceRange IncrementSrcRange;
+ /// \brief Loop variable.
+ VarDecl *Var;
+ /// \brief Reference to loop variable.
+ DeclRefExpr *VarRef;
+ /// \brief Lower bound (initializer for the var).
+ Expr *LB;
+ /// \brief Upper bound.
+ Expr *UB;
+ /// \brief Loop step (increment).
+ Expr *Step;
+ /// \brief This flag is true when condition is one of:
+ /// Var < UB
+ /// Var <= UB
+ /// UB > Var
+ /// UB >= Var
+ bool TestIsLessOp;
+ /// \brief This flag is true when condition is strict ( < or > ).
+ bool TestIsStrictOp;
+ /// \brief This flag is true when step is subtracted on each iteration.
+ bool SubtractStep;
+
+public:
+ OpenMPIterationSpaceChecker(Sema &SemaRef, SourceLocation DefaultLoc)
+ : SemaRef(SemaRef), DefaultLoc(DefaultLoc), ConditionLoc(DefaultLoc),
+ InitSrcRange(SourceRange()), ConditionSrcRange(SourceRange()),
+ IncrementSrcRange(SourceRange()), Var(nullptr), VarRef(nullptr),
+ LB(nullptr), UB(nullptr), Step(nullptr), TestIsLessOp(false),
+ TestIsStrictOp(false), SubtractStep(false) {}
+ /// \brief Check init-expr for canonical loop form and save loop counter
+ /// variable - #Var and its initialization value - #LB.
+ bool CheckInit(Stmt *S, bool EmitDiags = true);
+ /// \brief Check test-expr for canonical form, save upper-bound (#UB), flags
+ /// for less/greater and for strict/non-strict comparison.
+ bool CheckCond(Expr *S);
+ /// \brief Check incr-expr for canonical loop form and return true if it
+ /// does not conform, otherwise save loop step (#Step).
+ bool CheckInc(Expr *S);
+ /// \brief Return the loop counter variable.
+ VarDecl *GetLoopVar() const { return Var; }
+ /// \brief Return the reference expression to loop counter variable.
+ DeclRefExpr *GetLoopVarRefExpr() const { return VarRef; }
+ /// \brief Source range of the loop init.
+ SourceRange GetInitSrcRange() const { return InitSrcRange; }
+ /// \brief Source range of the loop condition.
+ SourceRange GetConditionSrcRange() const { return ConditionSrcRange; }
+ /// \brief Source range of the loop increment.
+ SourceRange GetIncrementSrcRange() const { return IncrementSrcRange; }
+ /// \brief True if the step should be subtracted.
+ bool ShouldSubtractStep() const { return SubtractStep; }
+ /// \brief Build the expression to calculate the number of iterations.
+ Expr *BuildNumIterations(Scope *S, const bool LimitedType) const;
+ /// \brief Build the precondition expression for the loops.
+ Expr *BuildPreCond(Scope *S, Expr *Cond) const;
+ /// \brief Build reference expression to the counter be used for codegen.
+ Expr *BuildCounterVar() const;
+ /// \brief Build reference expression to the private counter be used for
+ /// codegen.
+ Expr *BuildPrivateCounterVar() const;
+ /// \brief Build initization of the counter be used for codegen.
+ Expr *BuildCounterInit() const;
+ /// \brief Build step of the counter be used for codegen.
+ Expr *BuildCounterStep() const;
+ /// \brief Return true if any expression is dependent.
+ bool Dependent() const;
+
+private:
+ /// \brief Check the right-hand side of an assignment in the increment
+ /// expression.
+ bool CheckIncRHS(Expr *RHS);
+ /// \brief Helper to set loop counter variable and its initializer.
+ bool SetVarAndLB(VarDecl *NewVar, DeclRefExpr *NewVarRefExpr, Expr *NewLB);
+ /// \brief Helper to set upper bound.
+ bool SetUB(Expr *NewUB, bool LessOp, bool StrictOp, SourceRange SR,
+ SourceLocation SL);
+ /// \brief Helper to set loop increment.
+ bool SetStep(Expr *NewStep, bool Subtract);
+};
+
+bool OpenMPIterationSpaceChecker::Dependent() const {
+ if (!Var) {
+ assert(!LB && !UB && !Step);
+ return false;
+ }
+ return Var->getType()->isDependentType() || (LB && LB->isValueDependent()) ||
+ (UB && UB->isValueDependent()) || (Step && Step->isValueDependent());
+}
+
+template <typename T>
+static T *getExprAsWritten(T *E) {
+ if (auto *ExprTemp = dyn_cast<ExprWithCleanups>(E))
+ E = ExprTemp->getSubExpr();
+
+ if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
+ E = MTE->GetTemporaryExpr();
+
+ while (auto *Binder = dyn_cast<CXXBindTemporaryExpr>(E))
+ E = Binder->getSubExpr();
+
+ if (auto *ICE = dyn_cast<ImplicitCastExpr>(E))
+ E = ICE->getSubExprAsWritten();
+ return E->IgnoreParens();
+}
+
+bool OpenMPIterationSpaceChecker::SetVarAndLB(VarDecl *NewVar,
+ DeclRefExpr *NewVarRefExpr,
+ Expr *NewLB) {
+ // State consistency checking to ensure correct usage.
+ assert(Var == nullptr && LB == nullptr && VarRef == nullptr &&
+ UB == nullptr && Step == nullptr && !TestIsLessOp && !TestIsStrictOp);
+ if (!NewVar || !NewLB)
+ return true;
+ Var = NewVar;
+ VarRef = NewVarRefExpr;
+ if (auto *CE = dyn_cast_or_null<CXXConstructExpr>(NewLB))
+ if (const CXXConstructorDecl *Ctor = CE->getConstructor())
+ if ((Ctor->isCopyOrMoveConstructor() ||
+ Ctor->isConvertingConstructor(/*AllowExplicit=*/false)) &&
+ CE->getNumArgs() > 0 && CE->getArg(0) != nullptr)
+ NewLB = CE->getArg(0)->IgnoreParenImpCasts();
+ LB = NewLB;
+ return false;
+}
+
+bool OpenMPIterationSpaceChecker::SetUB(Expr *NewUB, bool LessOp, bool StrictOp,
+ SourceRange SR, SourceLocation SL) {
+ // State consistency checking to ensure correct usage.
+ assert(Var != nullptr && LB != nullptr && UB == nullptr && Step == nullptr &&
+ !TestIsLessOp && !TestIsStrictOp);
+ if (!NewUB)
+ return true;
+ UB = NewUB;
+ TestIsLessOp = LessOp;
+ TestIsStrictOp = StrictOp;
+ ConditionSrcRange = SR;
+ ConditionLoc = SL;
+ return false;
+}
+
+bool OpenMPIterationSpaceChecker::SetStep(Expr *NewStep, bool Subtract) {
+ // State consistency checking to ensure correct usage.
+ assert(Var != nullptr && LB != nullptr && Step == nullptr);
+ if (!NewStep)
+ return true;
+ if (!NewStep->isValueDependent()) {
+ // Check that the step is integer expression.
+ SourceLocation StepLoc = NewStep->getLocStart();
+ ExprResult Val =
+ SemaRef.PerformOpenMPImplicitIntegerConversion(StepLoc, NewStep);
+ if (Val.isInvalid())
+ return true;
+ NewStep = Val.get();
+
+ // OpenMP [2.6, Canonical Loop Form, Restrictions]
+ // If test-expr is of form var relational-op b and relational-op is < or
+ // <= then incr-expr must cause var to increase on each iteration of the
+ // loop. If test-expr is of form var relational-op b and relational-op is
+ // > or >= then incr-expr must cause var to decrease on each iteration of
+ // the loop.
+ // If test-expr is of form b relational-op var and relational-op is < or
+ // <= then incr-expr must cause var to decrease on each iteration of the
+ // loop. If test-expr is of form b relational-op var and relational-op is
+ // > or >= then incr-expr must cause var to increase on each iteration of
+ // the loop.
+ llvm::APSInt Result;
+ bool IsConstant = NewStep->isIntegerConstantExpr(Result, SemaRef.Context);
+ bool IsUnsigned = !NewStep->getType()->hasSignedIntegerRepresentation();
+ bool IsConstNeg =
+ IsConstant && Result.isSigned() && (Subtract != Result.isNegative());
+ bool IsConstPos =
+ IsConstant && Result.isSigned() && (Subtract == Result.isNegative());
+ bool IsConstZero = IsConstant && !Result.getBoolValue();
+ if (UB && (IsConstZero ||
+ (TestIsLessOp ? (IsConstNeg || (IsUnsigned && Subtract))
+ : (IsConstPos || (IsUnsigned && !Subtract))))) {
+ SemaRef.Diag(NewStep->getExprLoc(),
+ diag::err_omp_loop_incr_not_compatible)
+ << Var << TestIsLessOp << NewStep->getSourceRange();
+ SemaRef.Diag(ConditionLoc,
+ diag::note_omp_loop_cond_requres_compatible_incr)
+ << TestIsLessOp << ConditionSrcRange;
+ return true;
+ }
+ if (TestIsLessOp == Subtract) {
+ NewStep = SemaRef.CreateBuiltinUnaryOp(NewStep->getExprLoc(), UO_Minus,
+ NewStep).get();
+ Subtract = !Subtract;
+ }
+ }
+
+ Step = NewStep;
+ SubtractStep = Subtract;
+ return false;
+}
+
+bool OpenMPIterationSpaceChecker::CheckInit(Stmt *S, bool EmitDiags) {
+ // Check init-expr for canonical loop form and save loop counter
+ // variable - #Var and its initialization value - #LB.
+ // OpenMP [2.6] Canonical loop form. init-expr may be one of the following:
+ // var = lb
+ // integer-type var = lb
+ // random-access-iterator-type var = lb
+ // pointer-type var = lb
+ //
+ if (!S) {
+ if (EmitDiags) {
+ SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_init);
+ }
+ return true;
+ }
+ InitSrcRange = S->getSourceRange();
+ if (Expr *E = dyn_cast<Expr>(S))
+ S = E->IgnoreParens();
+ if (auto BO = dyn_cast<BinaryOperator>(S)) {
+ if (BO->getOpcode() == BO_Assign)
+ if (auto DRE = dyn_cast<DeclRefExpr>(BO->getLHS()->IgnoreParens()))
+ return SetVarAndLB(dyn_cast<VarDecl>(DRE->getDecl()), DRE,
+ BO->getRHS());
+ } else if (auto DS = dyn_cast<DeclStmt>(S)) {
+ if (DS->isSingleDecl()) {
+ if (auto Var = dyn_cast_or_null<VarDecl>(DS->getSingleDecl())) {
+ if (Var->hasInit() && !Var->getType()->isReferenceType()) {
+ // Accept non-canonical init form here but emit ext. warning.
+ if (Var->getInitStyle() != VarDecl::CInit && EmitDiags)
+ SemaRef.Diag(S->getLocStart(),
+ diag::ext_omp_loop_not_canonical_init)
+ << S->getSourceRange();
+ return SetVarAndLB(Var, nullptr, Var->getInit());
+ }
+ }
+ }
+ } else if (auto CE = dyn_cast<CXXOperatorCallExpr>(S))
+ if (CE->getOperator() == OO_Equal)
+ if (auto DRE = dyn_cast<DeclRefExpr>(CE->getArg(0)))
+ return SetVarAndLB(dyn_cast<VarDecl>(DRE->getDecl()), DRE,
+ CE->getArg(1));
+
+ if (EmitDiags) {
+ SemaRef.Diag(S->getLocStart(), diag::err_omp_loop_not_canonical_init)
+ << S->getSourceRange();
+ }
+ return true;
+}
+
+/// \brief Ignore parenthesizes, implicit casts, copy constructor and return the
+/// variable (which may be the loop variable) if possible.
+static const VarDecl *GetInitVarDecl(const Expr *E) {
+ if (!E)
+ return nullptr;
+ E = getExprAsWritten(E);
+ if (auto *CE = dyn_cast_or_null<CXXConstructExpr>(E))
+ if (const CXXConstructorDecl *Ctor = CE->getConstructor())
+ if ((Ctor->isCopyOrMoveConstructor() ||
+ Ctor->isConvertingConstructor(/*AllowExplicit=*/false)) &&
+ CE->getNumArgs() > 0 && CE->getArg(0) != nullptr)
+ E = CE->getArg(0)->IgnoreParenImpCasts();
+ auto DRE = dyn_cast_or_null<DeclRefExpr>(E);
+ if (!DRE)
+ return nullptr;
+ return dyn_cast<VarDecl>(DRE->getDecl());
+}
+
+bool OpenMPIterationSpaceChecker::CheckCond(Expr *S) {
+ // Check test-expr for canonical form, save upper-bound UB, flags for
+ // less/greater and for strict/non-strict comparison.
+ // OpenMP [2.6] Canonical loop form. Test-expr may be one of the following:
+ // var relational-op b
+ // b relational-op var
+ //
+ if (!S) {
+ SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_cond) << Var;
+ return true;
+ }
+ S = getExprAsWritten(S);
+ SourceLocation CondLoc = S->getLocStart();
+ if (auto BO = dyn_cast<BinaryOperator>(S)) {
+ if (BO->isRelationalOp()) {
+ if (GetInitVarDecl(BO->getLHS()) == Var)
+ return SetUB(BO->getRHS(),
+ (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_LE),
+ (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_GT),
+ BO->getSourceRange(), BO->getOperatorLoc());
+ if (GetInitVarDecl(BO->getRHS()) == Var)
+ return SetUB(BO->getLHS(),
+ (BO->getOpcode() == BO_GT || BO->getOpcode() == BO_GE),
+ (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_GT),
+ BO->getSourceRange(), BO->getOperatorLoc());
+ }
+ } else if (auto CE = dyn_cast<CXXOperatorCallExpr>(S)) {
+ if (CE->getNumArgs() == 2) {
+ auto Op = CE->getOperator();
+ switch (Op) {
+ case OO_Greater:
+ case OO_GreaterEqual:
+ case OO_Less:
+ case OO_LessEqual:
+ if (GetInitVarDecl(CE->getArg(0)) == Var)
+ return SetUB(CE->getArg(1), Op == OO_Less || Op == OO_LessEqual,
+ Op == OO_Less || Op == OO_Greater, CE->getSourceRange(),
+ CE->getOperatorLoc());
+ if (GetInitVarDecl(CE->getArg(1)) == Var)
+ return SetUB(CE->getArg(0), Op == OO_Greater || Op == OO_GreaterEqual,
+ Op == OO_Less || Op == OO_Greater, CE->getSourceRange(),
+ CE->getOperatorLoc());
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ SemaRef.Diag(CondLoc, diag::err_omp_loop_not_canonical_cond)
+ << S->getSourceRange() << Var;
+ return true;
+}
+
+bool OpenMPIterationSpaceChecker::CheckIncRHS(Expr *RHS) {
+ // RHS of canonical loop form increment can be:
+ // var + incr
+ // incr + var
+ // var - incr
+ //
+ RHS = RHS->IgnoreParenImpCasts();
+ if (auto BO = dyn_cast<BinaryOperator>(RHS)) {
+ if (BO->isAdditiveOp()) {
+ bool IsAdd = BO->getOpcode() == BO_Add;
+ if (GetInitVarDecl(BO->getLHS()) == Var)
+ return SetStep(BO->getRHS(), !IsAdd);
+ if (IsAdd && GetInitVarDecl(BO->getRHS()) == Var)
+ return SetStep(BO->getLHS(), false);
+ }
+ } else if (auto CE = dyn_cast<CXXOperatorCallExpr>(RHS)) {
+ bool IsAdd = CE->getOperator() == OO_Plus;
+ if ((IsAdd || CE->getOperator() == OO_Minus) && CE->getNumArgs() == 2) {
+ if (GetInitVarDecl(CE->getArg(0)) == Var)
+ return SetStep(CE->getArg(1), !IsAdd);
+ if (IsAdd && GetInitVarDecl(CE->getArg(1)) == Var)
+ return SetStep(CE->getArg(0), false);
+ }
+ }
+ SemaRef.Diag(RHS->getLocStart(), diag::err_omp_loop_not_canonical_incr)
+ << RHS->getSourceRange() << Var;
+ return true;
+}
+
+bool OpenMPIterationSpaceChecker::CheckInc(Expr *S) {
+ // Check incr-expr for canonical loop form and return true if it
+ // does not conform.
+ // OpenMP [2.6] Canonical loop form. Test-expr may be one of the following:
+ // ++var
+ // var++
+ // --var
+ // var--
+ // var += incr
+ // var -= incr
+ // var = var + incr
+ // var = incr + var
+ // var = var - incr
+ //
+ if (!S) {
+ SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_incr) << Var;
+ return true;
+ }
+ IncrementSrcRange = S->getSourceRange();
+ S = S->IgnoreParens();
+ if (auto UO = dyn_cast<UnaryOperator>(S)) {
+ if (UO->isIncrementDecrementOp() && GetInitVarDecl(UO->getSubExpr()) == Var)
+ return SetStep(
+ SemaRef.ActOnIntegerConstant(UO->getLocStart(),
+ (UO->isDecrementOp() ? -1 : 1)).get(),
+ false);
+ } else if (auto BO = dyn_cast<BinaryOperator>(S)) {
+ switch (BO->getOpcode()) {
+ case BO_AddAssign:
+ case BO_SubAssign:
+ if (GetInitVarDecl(BO->getLHS()) == Var)
+ return SetStep(BO->getRHS(), BO->getOpcode() == BO_SubAssign);
+ break;
+ case BO_Assign:
+ if (GetInitVarDecl(BO->getLHS()) == Var)
+ return CheckIncRHS(BO->getRHS());
+ break;
+ default:
+ break;
+ }
+ } else if (auto CE = dyn_cast<CXXOperatorCallExpr>(S)) {
+ switch (CE->getOperator()) {
+ case OO_PlusPlus:
+ case OO_MinusMinus:
+ if (GetInitVarDecl(CE->getArg(0)) == Var)
+ return SetStep(
+ SemaRef.ActOnIntegerConstant(
+ CE->getLocStart(),
+ ((CE->getOperator() == OO_MinusMinus) ? -1 : 1)).get(),
+ false);
+ break;
+ case OO_PlusEqual:
+ case OO_MinusEqual:
+ if (GetInitVarDecl(CE->getArg(0)) == Var)
+ return SetStep(CE->getArg(1), CE->getOperator() == OO_MinusEqual);
+ break;
+ case OO_Equal:
+ if (GetInitVarDecl(CE->getArg(0)) == Var)
+ return CheckIncRHS(CE->getArg(1));
+ break;
+ default:
+ break;
+ }
+ }
+ SemaRef.Diag(S->getLocStart(), diag::err_omp_loop_not_canonical_incr)
+ << S->getSourceRange() << Var;
+ return true;
+}
+
+namespace {
+// Transform variables declared in GNU statement expressions to new ones to
+// avoid crash on codegen.
+class TransformToNewDefs : public TreeTransform<TransformToNewDefs> {
+ typedef TreeTransform<TransformToNewDefs> BaseTransform;
+
+public:
+ TransformToNewDefs(Sema &SemaRef) : BaseTransform(SemaRef) {}
+
+ Decl *TransformDefinition(SourceLocation Loc, Decl *D) {
+ if (auto *VD = cast<VarDecl>(D))
+ if (!isa<ParmVarDecl>(D) && !isa<VarTemplateSpecializationDecl>(D) &&
+ !isa<ImplicitParamDecl>(D)) {
+ auto *NewVD = VarDecl::Create(
+ SemaRef.Context, VD->getDeclContext(), VD->getLocStart(),
+ VD->getLocation(), VD->getIdentifier(), VD->getType(),
+ VD->getTypeSourceInfo(), VD->getStorageClass());
+ NewVD->setTSCSpec(VD->getTSCSpec());
+ NewVD->setInit(VD->getInit());
+ NewVD->setInitStyle(VD->getInitStyle());
+ NewVD->setExceptionVariable(VD->isExceptionVariable());
+ NewVD->setNRVOVariable(VD->isNRVOVariable());
+ NewVD->setCXXForRangeDecl(VD->isInExternCXXContext());
+ NewVD->setConstexpr(VD->isConstexpr());
+ NewVD->setInitCapture(VD->isInitCapture());
+ NewVD->setPreviousDeclInSameBlockScope(
+ VD->isPreviousDeclInSameBlockScope());
+ VD->getDeclContext()->addHiddenDecl(NewVD);
+ if (VD->hasAttrs())
+ NewVD->setAttrs(VD->getAttrs());
+ transformedLocalDecl(VD, NewVD);
+ return NewVD;
+ }
+ return BaseTransform::TransformDefinition(Loc, D);
+ }
+
+ ExprResult TransformDeclRefExpr(DeclRefExpr *E) {
+ if (auto *NewD = TransformDecl(E->getExprLoc(), E->getDecl()))
+ if (E->getDecl() != NewD) {
+ NewD->setReferenced();
+ NewD->markUsed(SemaRef.Context);
+ return DeclRefExpr::Create(
+ SemaRef.Context, E->getQualifierLoc(), E->getTemplateKeywordLoc(),
+ cast<ValueDecl>(NewD), E->refersToEnclosingVariableOrCapture(),
+ E->getNameInfo(), E->getType(), E->getValueKind());
+ }
+ return BaseTransform::TransformDeclRefExpr(E);
+ }
+};
+}
+
+/// \brief Build the expression to calculate the number of iterations.
+Expr *
+OpenMPIterationSpaceChecker::BuildNumIterations(Scope *S,
+ const bool LimitedType) const {
+ TransformToNewDefs Transform(SemaRef);
+ ExprResult Diff;
+ auto VarType = Var->getType().getNonReferenceType();
+ if (VarType->isIntegerType() || VarType->isPointerType() ||
+ SemaRef.getLangOpts().CPlusPlus) {
+ // Upper - Lower
+ auto *UBExpr = TestIsLessOp ? UB : LB;
+ auto *LBExpr = TestIsLessOp ? LB : UB;
+ Expr *Upper = Transform.TransformExpr(UBExpr).get();
+ Expr *Lower = Transform.TransformExpr(LBExpr).get();
+ if (!Upper || !Lower)
+ return nullptr;
+ Upper = SemaRef.PerformImplicitConversion(Upper, UBExpr->getType(),
+ Sema::AA_Converting,
+ /*AllowExplicit=*/true)
+ .get();
+ Lower = SemaRef.PerformImplicitConversion(Lower, LBExpr->getType(),
+ Sema::AA_Converting,
+ /*AllowExplicit=*/true)
+ .get();
+ if (!Upper || !Lower)
+ return nullptr;
+
+ Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Sub, Upper, Lower);
+
+ if (!Diff.isUsable() && VarType->getAsCXXRecordDecl()) {
+ // BuildBinOp already emitted error, this one is to point user to upper
+ // and lower bound, and to tell what is passed to 'operator-'.
+ SemaRef.Diag(Upper->getLocStart(), diag::err_omp_loop_diff_cxx)
+ << Upper->getSourceRange() << Lower->getSourceRange();
+ return nullptr;
+ }
+ }
+
+ if (!Diff.isUsable())
+ return nullptr;
+
+ // Upper - Lower [- 1]
+ if (TestIsStrictOp)
+ Diff = SemaRef.BuildBinOp(
+ S, DefaultLoc, BO_Sub, Diff.get(),
+ SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
+ if (!Diff.isUsable())
+ return nullptr;
+
+ // Upper - Lower [- 1] + Step
+ auto NewStep = Transform.TransformExpr(Step->IgnoreImplicit());
+ if (NewStep.isInvalid())
+ return nullptr;
+ NewStep = SemaRef.PerformImplicitConversion(
+ NewStep.get(), Step->IgnoreImplicit()->getType(), Sema::AA_Converting,
+ /*AllowExplicit=*/true);
+ if (NewStep.isInvalid())
+ return nullptr;
+ Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Add, Diff.get(), NewStep.get());
+ if (!Diff.isUsable())
+ return nullptr;
+
+ // Parentheses (for dumping/debugging purposes only).
+ Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
+ if (!Diff.isUsable())
+ return nullptr;
+
+ // (Upper - Lower [- 1] + Step) / Step
+ NewStep = Transform.TransformExpr(Step->IgnoreImplicit());
+ if (NewStep.isInvalid())
+ return nullptr;
+ NewStep = SemaRef.PerformImplicitConversion(
+ NewStep.get(), Step->IgnoreImplicit()->getType(), Sema::AA_Converting,
+ /*AllowExplicit=*/true);
+ if (NewStep.isInvalid())
+ return nullptr;
+ Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Div, Diff.get(), NewStep.get());
+ if (!Diff.isUsable())
+ return nullptr;
+
+ // OpenMP runtime requires 32-bit or 64-bit loop variables.
+ QualType Type = Diff.get()->getType();
+ auto &C = SemaRef.Context;
+ bool UseVarType = VarType->hasIntegerRepresentation() &&
+ C.getTypeSize(Type) > C.getTypeSize(VarType);
+ if (!Type->isIntegerType() || UseVarType) {
+ unsigned NewSize =
+ UseVarType ? C.getTypeSize(VarType) : C.getTypeSize(Type);
+ bool IsSigned = UseVarType ? VarType->hasSignedIntegerRepresentation()
+ : Type->hasSignedIntegerRepresentation();
+ Type = C.getIntTypeForBitwidth(NewSize, IsSigned);
+ Diff = SemaRef.PerformImplicitConversion(
+ Diff.get(), Type, Sema::AA_Converting, /*AllowExplicit=*/true);
+ if (!Diff.isUsable())
+ return nullptr;
+ }
+ if (LimitedType) {
+ unsigned NewSize = (C.getTypeSize(Type) > 32) ? 64 : 32;
+ if (NewSize != C.getTypeSize(Type)) {
+ if (NewSize < C.getTypeSize(Type)) {
+ assert(NewSize == 64 && "incorrect loop var size");
+ SemaRef.Diag(DefaultLoc, diag::warn_omp_loop_64_bit_var)
+ << InitSrcRange << ConditionSrcRange;
+ }
+ QualType NewType = C.getIntTypeForBitwidth(
+ NewSize, Type->hasSignedIntegerRepresentation() ||
+ C.getTypeSize(Type) < NewSize);
+ Diff = SemaRef.PerformImplicitConversion(Diff.get(), NewType,
+ Sema::AA_Converting, true);
+ if (!Diff.isUsable())
+ return nullptr;
+ }
+ }
+
+ return Diff.get();
+}
+
+Expr *OpenMPIterationSpaceChecker::BuildPreCond(Scope *S, Expr *Cond) const {
+ // Try to build LB <op> UB, where <op> is <, >, <=, or >=.
+ bool Suppress = SemaRef.getDiagnostics().getSuppressAllDiagnostics();
+ SemaRef.getDiagnostics().setSuppressAllDiagnostics(/*Val=*/true);
+ TransformToNewDefs Transform(SemaRef);
+
+ auto NewLB = Transform.TransformExpr(LB);
+ auto NewUB = Transform.TransformExpr(UB);
+ if (NewLB.isInvalid() || NewUB.isInvalid())
+ return Cond;
+ NewLB = SemaRef.PerformImplicitConversion(NewLB.get(), LB->getType(),
+ Sema::AA_Converting,
+ /*AllowExplicit=*/true);
+ NewUB = SemaRef.PerformImplicitConversion(NewUB.get(), UB->getType(),
+ Sema::AA_Converting,
+ /*AllowExplicit=*/true);
+ if (NewLB.isInvalid() || NewUB.isInvalid())
+ return Cond;
+ auto CondExpr = SemaRef.BuildBinOp(
+ S, DefaultLoc, TestIsLessOp ? (TestIsStrictOp ? BO_LT : BO_LE)
+ : (TestIsStrictOp ? BO_GT : BO_GE),
+ NewLB.get(), NewUB.get());
+ if (CondExpr.isUsable()) {
+ CondExpr = SemaRef.PerformImplicitConversion(
+ CondExpr.get(), SemaRef.Context.BoolTy, /*Action=*/Sema::AA_Casting,
+ /*AllowExplicit=*/true);
+ }
+ SemaRef.getDiagnostics().setSuppressAllDiagnostics(Suppress);
+ // Otherwise use original loop conditon and evaluate it in runtime.
+ return CondExpr.isUsable() ? CondExpr.get() : Cond;
+}
+
+/// \brief Build reference expression to the counter be used for codegen.
+Expr *OpenMPIterationSpaceChecker::BuildCounterVar() const {
+ return buildDeclRefExpr(SemaRef, Var, Var->getType().getNonReferenceType(),
+ DefaultLoc);
+}
+
+Expr *OpenMPIterationSpaceChecker::BuildPrivateCounterVar() const {
+ if (Var && !Var->isInvalidDecl()) {
+ auto Type = Var->getType().getNonReferenceType();
+ auto *PrivateVar =
+ buildVarDecl(SemaRef, DefaultLoc, Type, Var->getName(),
+ Var->hasAttrs() ? &Var->getAttrs() : nullptr);
+ if (PrivateVar->isInvalidDecl())
+ return nullptr;
+ return buildDeclRefExpr(SemaRef, PrivateVar, Type, DefaultLoc);
+ }
+ return nullptr;
+}
+
+/// \brief Build initization of the counter be used for codegen.
+Expr *OpenMPIterationSpaceChecker::BuildCounterInit() const { return LB; }
+
+/// \brief Build step of the counter be used for codegen.
+Expr *OpenMPIterationSpaceChecker::BuildCounterStep() const { return Step; }
+
+/// \brief Iteration space of a single for loop.
+struct LoopIterationSpace {
+ /// \brief Condition of the loop.
+ Expr *PreCond;
+ /// \brief This expression calculates the number of iterations in the loop.
+ /// It is always possible to calculate it before starting the loop.
+ Expr *NumIterations;
+ /// \brief The loop counter variable.
+ Expr *CounterVar;
+ /// \brief Private loop counter variable.
+ Expr *PrivateCounterVar;
+ /// \brief This is initializer for the initial value of #CounterVar.
+ Expr *CounterInit;
+ /// \brief This is step for the #CounterVar used to generate its update:
+ /// #CounterVar = #CounterInit + #CounterStep * CurrentIteration.
+ Expr *CounterStep;
+ /// \brief Should step be subtracted?
+ bool Subtract;
+ /// \brief Source range of the loop init.
+ SourceRange InitSrcRange;
+ /// \brief Source range of the loop condition.
+ SourceRange CondSrcRange;
+ /// \brief Source range of the loop increment.
+ SourceRange IncSrcRange;
+};
+
+} // namespace
+
+void Sema::ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init) {
+ assert(getLangOpts().OpenMP && "OpenMP is not active.");
+ assert(Init && "Expected loop in canonical form.");
+ unsigned AssociatedLoops = DSAStack->getAssociatedLoops();
+ if (AssociatedLoops > 0 &&
+ isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
+ OpenMPIterationSpaceChecker ISC(*this, ForLoc);
+ if (!ISC.CheckInit(Init, /*EmitDiags=*/false))
+ DSAStack->addLoopControlVariable(ISC.GetLoopVar());
+ DSAStack->setAssociatedLoops(AssociatedLoops - 1);
+ }
+}
+
+/// \brief Called on a for stmt to check and extract its iteration space
+/// for further processing (such as collapsing).
+static bool CheckOpenMPIterationSpace(
+ OpenMPDirectiveKind DKind, Stmt *S, Sema &SemaRef, DSAStackTy &DSA,
+ unsigned CurrentNestedLoopCount, unsigned NestedLoopCount,
+ Expr *CollapseLoopCountExpr, Expr *OrderedLoopCountExpr,
+ llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA,
+ LoopIterationSpace &ResultIterSpace) {
+ // OpenMP [2.6, Canonical Loop Form]
+ // for (init-expr; test-expr; incr-expr) structured-block
+ auto For = dyn_cast_or_null<ForStmt>(S);
+ if (!For) {
+ SemaRef.Diag(S->getLocStart(), diag::err_omp_not_for)
+ << (CollapseLoopCountExpr != nullptr || OrderedLoopCountExpr != nullptr)
+ << getOpenMPDirectiveName(DKind) << NestedLoopCount
+ << (CurrentNestedLoopCount > 0) << CurrentNestedLoopCount;
+ if (NestedLoopCount > 1) {
+ if (CollapseLoopCountExpr && OrderedLoopCountExpr)
+ SemaRef.Diag(DSA.getConstructLoc(),
+ diag::note_omp_collapse_ordered_expr)
+ << 2 << CollapseLoopCountExpr->getSourceRange()
+ << OrderedLoopCountExpr->getSourceRange();
+ else if (CollapseLoopCountExpr)
+ SemaRef.Diag(CollapseLoopCountExpr->getExprLoc(),
+ diag::note_omp_collapse_ordered_expr)
+ << 0 << CollapseLoopCountExpr->getSourceRange();
+ else
+ SemaRef.Diag(OrderedLoopCountExpr->getExprLoc(),
+ diag::note_omp_collapse_ordered_expr)
+ << 1 << OrderedLoopCountExpr->getSourceRange();
+ }
+ return true;
+ }
+ assert(For->getBody());
+
+ OpenMPIterationSpaceChecker ISC(SemaRef, For->getForLoc());
+
+ // Check init.
+ auto Init = For->getInit();
+ if (ISC.CheckInit(Init)) {
+ return true;
+ }
+
+ bool HasErrors = false;
+
+ // Check loop variable's type.
+ auto Var = ISC.GetLoopVar();
+
+ // OpenMP [2.6, Canonical Loop Form]
+ // Var is one of the following:
+ // A variable of signed or unsigned integer type.
+ // For C++, a variable of a random access iterator type.
+ // For C, a variable of a pointer type.
+ auto VarType = Var->getType().getNonReferenceType();
+ if (!VarType->isDependentType() && !VarType->isIntegerType() &&
+ !VarType->isPointerType() &&
+ !(SemaRef.getLangOpts().CPlusPlus && VarType->isOverloadableType())) {
+ SemaRef.Diag(Init->getLocStart(), diag::err_omp_loop_variable_type)
+ << SemaRef.getLangOpts().CPlusPlus;
+ HasErrors = true;
+ }
+
+ // OpenMP, 2.14.1.1 Data-sharing Attribute Rules for Variables Referenced in a
+ // Construct
+ // The loop iteration variable(s) in the associated for-loop(s) of a for or
+ // parallel for construct is (are) private.
+ // The loop iteration variable in the associated for-loop of a simd construct
+ // with just one associated for-loop is linear with a constant-linear-step
+ // that is the increment of the associated for-loop.
+ // Exclude loop var from the list of variables with implicitly defined data
+ // sharing attributes.
+ VarsWithImplicitDSA.erase(Var);
+
+ // OpenMP [2.14.1.1, Data-sharing Attribute Rules for Variables Referenced in
+ // a Construct, C/C++].
+ // The loop iteration variable in the associated for-loop of a simd construct
+ // with just one associated for-loop may be listed in a linear clause with a
+ // constant-linear-step that is the increment of the associated for-loop.
+ // The loop iteration variable(s) in the associated for-loop(s) of a for or
+ // parallel for construct may be listed in a private or lastprivate clause.
+ DSAStackTy::DSAVarData DVar = DSA.getTopDSA(Var, false);
+ auto LoopVarRefExpr = ISC.GetLoopVarRefExpr();
+ // If LoopVarRefExpr is nullptr it means the corresponding loop variable is
+ // declared in the loop and it is predetermined as a private.
+ auto PredeterminedCKind =
+ isOpenMPSimdDirective(DKind)
+ ? ((NestedLoopCount == 1) ? OMPC_linear : OMPC_lastprivate)
+ : OMPC_private;
+ if (((isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown &&
+ DVar.CKind != PredeterminedCKind) ||
+ ((isOpenMPWorksharingDirective(DKind) || DKind == OMPD_taskloop ||
+ isOpenMPDistributeDirective(DKind)) &&
+ !isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown &&
+ DVar.CKind != OMPC_private && DVar.CKind != OMPC_lastprivate)) &&
+ (DVar.CKind != OMPC_private || DVar.RefExpr != nullptr)) {
+ SemaRef.Diag(Init->getLocStart(), diag::err_omp_loop_var_dsa)
+ << getOpenMPClauseName(DVar.CKind) << getOpenMPDirectiveName(DKind)
+ << getOpenMPClauseName(PredeterminedCKind);
+ if (DVar.RefExpr == nullptr)
+ DVar.CKind = PredeterminedCKind;
+ ReportOriginalDSA(SemaRef, &DSA, Var, DVar, /*IsLoopIterVar=*/true);
+ HasErrors = true;
+ } else if (LoopVarRefExpr != nullptr) {
+ // Make the loop iteration variable private (for worksharing constructs),
+ // linear (for simd directives with the only one associated loop) or
+ // lastprivate (for simd directives with several collapsed or ordered
+ // loops).
+ if (DVar.CKind == OMPC_unknown)
+ DVar = DSA.hasDSA(Var, isOpenMPPrivate, MatchesAlways(),
+ /*FromParent=*/false);
+ DSA.addDSA(Var, LoopVarRefExpr, PredeterminedCKind);
+ }
+
+ assert(isOpenMPLoopDirective(DKind) && "DSA for non-loop vars");
+
+ // Check test-expr.
+ HasErrors |= ISC.CheckCond(For->getCond());
+
+ // Check incr-expr.
+ HasErrors |= ISC.CheckInc(For->getInc());
+
+ if (ISC.Dependent() || SemaRef.CurContext->isDependentContext() || HasErrors)
+ return HasErrors;
+
+ // Build the loop's iteration space representation.
+ ResultIterSpace.PreCond = ISC.BuildPreCond(DSA.getCurScope(), For->getCond());
+ ResultIterSpace.NumIterations = ISC.BuildNumIterations(
+ DSA.getCurScope(), (isOpenMPWorksharingDirective(DKind) ||
+ isOpenMPTaskLoopDirective(DKind) ||
+ isOpenMPDistributeDirective(DKind)));
+ ResultIterSpace.CounterVar = ISC.BuildCounterVar();
+ ResultIterSpace.PrivateCounterVar = ISC.BuildPrivateCounterVar();
+ ResultIterSpace.CounterInit = ISC.BuildCounterInit();
+ ResultIterSpace.CounterStep = ISC.BuildCounterStep();
+ ResultIterSpace.InitSrcRange = ISC.GetInitSrcRange();
+ ResultIterSpace.CondSrcRange = ISC.GetConditionSrcRange();
+ ResultIterSpace.IncSrcRange = ISC.GetIncrementSrcRange();
+ ResultIterSpace.Subtract = ISC.ShouldSubtractStep();
+
+ HasErrors |= (ResultIterSpace.PreCond == nullptr ||
+ ResultIterSpace.NumIterations == nullptr ||
+ ResultIterSpace.CounterVar == nullptr ||
+ ResultIterSpace.PrivateCounterVar == nullptr ||
+ ResultIterSpace.CounterInit == nullptr ||
+ ResultIterSpace.CounterStep == nullptr);
+
+ return HasErrors;
+}
+
+/// \brief Build 'VarRef = Start.
+static ExprResult BuildCounterInit(Sema &SemaRef, Scope *S, SourceLocation Loc,
+ ExprResult VarRef, ExprResult Start) {
+ TransformToNewDefs Transform(SemaRef);
+ // Build 'VarRef = Start.
+ auto NewStart = Transform.TransformExpr(Start.get()->IgnoreImplicit());
+ if (NewStart.isInvalid())
+ return ExprError();
+ NewStart = SemaRef.PerformImplicitConversion(
+ NewStart.get(), Start.get()->IgnoreImplicit()->getType(),
+ Sema::AA_Converting,
+ /*AllowExplicit=*/true);
+ if (NewStart.isInvalid())
+ return ExprError();
+ NewStart = SemaRef.PerformImplicitConversion(
+ NewStart.get(), VarRef.get()->getType(), Sema::AA_Converting,
+ /*AllowExplicit=*/true);
+ if (!NewStart.isUsable())
+ return ExprError();
+
+ auto Init =
+ SemaRef.BuildBinOp(S, Loc, BO_Assign, VarRef.get(), NewStart.get());
+ return Init;
+}
+
+/// \brief Build 'VarRef = Start + Iter * Step'.
+static ExprResult BuildCounterUpdate(Sema &SemaRef, Scope *S,
+ SourceLocation Loc, ExprResult VarRef,
+ ExprResult Start, ExprResult Iter,
+ ExprResult Step, bool Subtract) {
+ // Add parentheses (for debugging purposes only).
+ Iter = SemaRef.ActOnParenExpr(Loc, Loc, Iter.get());
+ if (!VarRef.isUsable() || !Start.isUsable() || !Iter.isUsable() ||
+ !Step.isUsable())
+ return ExprError();
+
+ TransformToNewDefs Transform(SemaRef);
+ auto NewStep = Transform.TransformExpr(Step.get()->IgnoreImplicit());
+ if (NewStep.isInvalid())
+ return ExprError();
+ NewStep = SemaRef.PerformImplicitConversion(
+ NewStep.get(), Step.get()->IgnoreImplicit()->getType(),
+ Sema::AA_Converting,
+ /*AllowExplicit=*/true);
+ if (NewStep.isInvalid())
+ return ExprError();
+ ExprResult Update =
+ SemaRef.BuildBinOp(S, Loc, BO_Mul, Iter.get(), NewStep.get());
+ if (!Update.isUsable())
+ return ExprError();
+
+ // Build 'VarRef = Start + Iter * Step'.
+ auto NewStart = Transform.TransformExpr(Start.get()->IgnoreImplicit());
+ if (NewStart.isInvalid())
+ return ExprError();
+ NewStart = SemaRef.PerformImplicitConversion(
+ NewStart.get(), Start.get()->IgnoreImplicit()->getType(),
+ Sema::AA_Converting,
+ /*AllowExplicit=*/true);
+ if (NewStart.isInvalid())
+ return ExprError();
+ Update = SemaRef.BuildBinOp(S, Loc, (Subtract ? BO_Sub : BO_Add),
+ NewStart.get(), Update.get());
+ if (!Update.isUsable())
+ return ExprError();
+
+ Update = SemaRef.PerformImplicitConversion(
+ Update.get(), VarRef.get()->getType(), Sema::AA_Converting, true);
+ if (!Update.isUsable())
+ return ExprError();
+
+ Update = SemaRef.BuildBinOp(S, Loc, BO_Assign, VarRef.get(), Update.get());
+ return Update;
+}
+
+/// \brief Convert integer expression \a E to make it have at least \a Bits
+/// bits.
+static ExprResult WidenIterationCount(unsigned Bits, Expr *E,
+ Sema &SemaRef) {
+ if (E == nullptr)
+ return ExprError();
+ auto &C = SemaRef.Context;
+ QualType OldType = E->getType();
+ unsigned HasBits = C.getTypeSize(OldType);
+ if (HasBits >= Bits)
+ return ExprResult(E);
+ // OK to convert to signed, because new type has more bits than old.
+ QualType NewType = C.getIntTypeForBitwidth(Bits, /* Signed */ true);
+ return SemaRef.PerformImplicitConversion(E, NewType, Sema::AA_Converting,
+ true);
+}
+
+/// \brief Check if the given expression \a E is a constant integer that fits
+/// into \a Bits bits.
+static bool FitsInto(unsigned Bits, bool Signed, Expr *E, Sema &SemaRef) {
+ if (E == nullptr)
+ return false;
+ llvm::APSInt Result;
+ if (E->isIntegerConstantExpr(Result, SemaRef.Context))
+ return Signed ? Result.isSignedIntN(Bits) : Result.isIntN(Bits);
+ return false;
+}
+
+/// \brief Called on a for stmt to check itself and nested loops (if any).
+/// \return Returns 0 if one of the collapsed stmts is not canonical for loop,
+/// number of collapsed loops otherwise.
+static unsigned
+CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
+ Expr *OrderedLoopCountExpr, Stmt *AStmt, Sema &SemaRef,
+ DSAStackTy &DSA,
+ llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA,
+ OMPLoopDirective::HelperExprs &Built) {
+ unsigned NestedLoopCount = 1;
+ if (CollapseLoopCountExpr) {
+ // Found 'collapse' clause - calculate collapse number.
+ llvm::APSInt Result;
+ if (CollapseLoopCountExpr->EvaluateAsInt(Result, SemaRef.getASTContext()))
+ NestedLoopCount = Result.getLimitedValue();
+ }
+ if (OrderedLoopCountExpr) {
+ // Found 'ordered' clause - calculate collapse number.
+ llvm::APSInt Result;
+ if (OrderedLoopCountExpr->EvaluateAsInt(Result, SemaRef.getASTContext())) {
+ if (Result.getLimitedValue() < NestedLoopCount) {
+ SemaRef.Diag(OrderedLoopCountExpr->getExprLoc(),
+ diag::err_omp_wrong_ordered_loop_count)
+ << OrderedLoopCountExpr->getSourceRange();
+ SemaRef.Diag(CollapseLoopCountExpr->getExprLoc(),
+ diag::note_collapse_loop_count)
+ << CollapseLoopCountExpr->getSourceRange();
+ }
+ NestedLoopCount = Result.getLimitedValue();
+ }
+ }
+ // This is helper routine for loop directives (e.g., 'for', 'simd',
+ // 'for simd', etc.).
+ SmallVector<LoopIterationSpace, 4> IterSpaces;
+ IterSpaces.resize(NestedLoopCount);
+ Stmt *CurStmt = AStmt->IgnoreContainers(/* IgnoreCaptured */ true);
+ for (unsigned Cnt = 0; Cnt < NestedLoopCount; ++Cnt) {
+ if (CheckOpenMPIterationSpace(DKind, CurStmt, SemaRef, DSA, Cnt,
+ NestedLoopCount, CollapseLoopCountExpr,
+ OrderedLoopCountExpr, VarsWithImplicitDSA,
+ IterSpaces[Cnt]))
+ return 0;
+ // Move on to the next nested for loop, or to the loop body.
+ // OpenMP [2.8.1, simd construct, Restrictions]
+ // All loops associated with the construct must be perfectly nested; that
+ // is, there must be no intervening code nor any OpenMP directive between
+ // any two loops.
+ CurStmt = cast<ForStmt>(CurStmt)->getBody()->IgnoreContainers();
+ }
+
+ Built.clear(/* size */ NestedLoopCount);
+
+ if (SemaRef.CurContext->isDependentContext())
+ return NestedLoopCount;
+
+ // An example of what is generated for the following code:
+ //
+ // #pragma omp simd collapse(2) ordered(2)
+ // for (i = 0; i < NI; ++i)
+ // for (k = 0; k < NK; ++k)
+ // for (j = J0; j < NJ; j+=2) {
+ // <loop body>
+ // }
+ //
+ // We generate the code below.
+ // Note: the loop body may be outlined in CodeGen.
+ // Note: some counters may be C++ classes, operator- is used to find number of
+ // iterations and operator+= to calculate counter value.
+ // Note: decltype(NumIterations) must be integer type (in 'omp for', only i32
+ // or i64 is currently supported).
+ //
+ // #define NumIterations (NI * ((NJ - J0 - 1 + 2) / 2))
+ // for (int[32|64]_t IV = 0; IV < NumIterations; ++IV ) {
+ // .local.i = IV / ((NJ - J0 - 1 + 2) / 2);
+ // .local.j = J0 + (IV % ((NJ - J0 - 1 + 2) / 2)) * 2;
+ // // similar updates for vars in clauses (e.g. 'linear')
+ // <loop body (using local i and j)>
+ // }
+ // i = NI; // assign final values of counters
+ // j = NJ;
+ //
+
+ // Last iteration number is (I1 * I2 * ... In) - 1, where I1, I2 ... In are
+ // the iteration counts of the collapsed for loops.
+ // Precondition tests if there is at least one iteration (all conditions are
+ // true).
+ auto PreCond = ExprResult(IterSpaces[0].PreCond);
+ auto N0 = IterSpaces[0].NumIterations;
+ ExprResult LastIteration32 = WidenIterationCount(
+ 32 /* Bits */, SemaRef.PerformImplicitConversion(
+ N0->IgnoreImpCasts(), N0->getType(),
+ Sema::AA_Converting, /*AllowExplicit=*/true)
+ .get(),
+ SemaRef);
+ ExprResult LastIteration64 = WidenIterationCount(
+ 64 /* Bits */, SemaRef.PerformImplicitConversion(
+ N0->IgnoreImpCasts(), N0->getType(),
+ Sema::AA_Converting, /*AllowExplicit=*/true)
+ .get(),
+ SemaRef);
+
+ if (!LastIteration32.isUsable() || !LastIteration64.isUsable())
+ return NestedLoopCount;
+
+ auto &C = SemaRef.Context;
+ bool AllCountsNeedLessThan32Bits = C.getTypeSize(N0->getType()) < 32;
+
+ Scope *CurScope = DSA.getCurScope();
+ for (unsigned Cnt = 1; Cnt < NestedLoopCount; ++Cnt) {
+ if (PreCond.isUsable()) {
+ PreCond = SemaRef.BuildBinOp(CurScope, SourceLocation(), BO_LAnd,
+ PreCond.get(), IterSpaces[Cnt].PreCond);
+ }
+ auto N = IterSpaces[Cnt].NumIterations;
+ AllCountsNeedLessThan32Bits &= C.getTypeSize(N->getType()) < 32;
+ if (LastIteration32.isUsable())
+ LastIteration32 = SemaRef.BuildBinOp(
+ CurScope, SourceLocation(), BO_Mul, LastIteration32.get(),
+ SemaRef.PerformImplicitConversion(N->IgnoreImpCasts(), N->getType(),
+ Sema::AA_Converting,
+ /*AllowExplicit=*/true)
+ .get());
+ if (LastIteration64.isUsable())
+ LastIteration64 = SemaRef.BuildBinOp(
+ CurScope, SourceLocation(), BO_Mul, LastIteration64.get(),
+ SemaRef.PerformImplicitConversion(N->IgnoreImpCasts(), N->getType(),
+ Sema::AA_Converting,
+ /*AllowExplicit=*/true)
+ .get());
+ }
+
+ // Choose either the 32-bit or 64-bit version.
+ ExprResult LastIteration = LastIteration64;
+ if (LastIteration32.isUsable() &&
+ C.getTypeSize(LastIteration32.get()->getType()) == 32 &&
+ (AllCountsNeedLessThan32Bits || NestedLoopCount == 1 ||
+ FitsInto(
+ 32 /* Bits */,
+ LastIteration32.get()->getType()->hasSignedIntegerRepresentation(),
+ LastIteration64.get(), SemaRef)))
+ LastIteration = LastIteration32;
+
+ if (!LastIteration.isUsable())
+ return 0;
+
+ // Save the number of iterations.
+ ExprResult NumIterations = LastIteration;
+ {
+ LastIteration = SemaRef.BuildBinOp(
+ CurScope, SourceLocation(), BO_Sub, LastIteration.get(),
+ SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
+ if (!LastIteration.isUsable())
+ return 0;
+ }
+
+ // Calculate the last iteration number beforehand instead of doing this on
+ // each iteration. Do not do this if the number of iterations may be kfold-ed.
+ llvm::APSInt Result;
+ bool IsConstant =
+ LastIteration.get()->isIntegerConstantExpr(Result, SemaRef.Context);
+ ExprResult CalcLastIteration;
+ if (!IsConstant) {
+ SourceLocation SaveLoc;
+ VarDecl *SaveVar =
+ buildVarDecl(SemaRef, SaveLoc, LastIteration.get()->getType(),
+ ".omp.last.iteration");
+ ExprResult SaveRef = buildDeclRefExpr(
+ SemaRef, SaveVar, LastIteration.get()->getType(), SaveLoc);
+ CalcLastIteration = SemaRef.BuildBinOp(CurScope, SaveLoc, BO_Assign,
+ SaveRef.get(), LastIteration.get());
+ LastIteration = SaveRef;
+
+ // Prepare SaveRef + 1.
+ NumIterations = SemaRef.BuildBinOp(
+ CurScope, SaveLoc, BO_Add, SaveRef.get(),
+ SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
+ if (!NumIterations.isUsable())
+ return 0;
+ }
+
+ SourceLocation InitLoc = IterSpaces[0].InitSrcRange.getBegin();
+
+ QualType VType = LastIteration.get()->getType();
+ // Build variables passed into runtime, nesessary for worksharing directives.
+ ExprResult LB, UB, IL, ST, EUB;
+ if (isOpenMPWorksharingDirective(DKind) || isOpenMPTaskLoopDirective(DKind) ||
+ isOpenMPDistributeDirective(DKind)) {
+ // Lower bound variable, initialized with zero.
+ VarDecl *LBDecl = buildVarDecl(SemaRef, InitLoc, VType, ".omp.lb");
+ LB = buildDeclRefExpr(SemaRef, LBDecl, VType, InitLoc);
+ SemaRef.AddInitializerToDecl(
+ LBDecl, SemaRef.ActOnIntegerConstant(InitLoc, 0).get(),
+ /*DirectInit*/ false, /*TypeMayContainAuto*/ false);
+
+ // Upper bound variable, initialized with last iteration number.
+ VarDecl *UBDecl = buildVarDecl(SemaRef, InitLoc, VType, ".omp.ub");
+ UB = buildDeclRefExpr(SemaRef, UBDecl, VType, InitLoc);
+ SemaRef.AddInitializerToDecl(UBDecl, LastIteration.get(),
+ /*DirectInit*/ false,
+ /*TypeMayContainAuto*/ false);
+
+ // A 32-bit variable-flag where runtime returns 1 for the last iteration.
+ // This will be used to implement clause 'lastprivate'.
+ QualType Int32Ty = SemaRef.Context.getIntTypeForBitwidth(32, true);
+ VarDecl *ILDecl = buildVarDecl(SemaRef, InitLoc, Int32Ty, ".omp.is_last");
+ IL = buildDeclRefExpr(SemaRef, ILDecl, Int32Ty, InitLoc);
+ SemaRef.AddInitializerToDecl(
+ ILDecl, SemaRef.ActOnIntegerConstant(InitLoc, 0).get(),
+ /*DirectInit*/ false, /*TypeMayContainAuto*/ false);
+
+ // Stride variable returned by runtime (we initialize it to 1 by default).
+ VarDecl *STDecl = buildVarDecl(SemaRef, InitLoc, VType, ".omp.stride");
+ ST = buildDeclRefExpr(SemaRef, STDecl, VType, InitLoc);
+ SemaRef.AddInitializerToDecl(
+ STDecl, SemaRef.ActOnIntegerConstant(InitLoc, 1).get(),
+ /*DirectInit*/ false, /*TypeMayContainAuto*/ false);
+
+ // Build expression: UB = min(UB, LastIteration)
+ // It is nesessary for CodeGen of directives with static scheduling.
+ ExprResult IsUBGreater = SemaRef.BuildBinOp(CurScope, InitLoc, BO_GT,
+ UB.get(), LastIteration.get());
+ ExprResult CondOp = SemaRef.ActOnConditionalOp(
+ InitLoc, InitLoc, IsUBGreater.get(), LastIteration.get(), UB.get());
+ EUB = SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, UB.get(),
+ CondOp.get());
+ EUB = SemaRef.ActOnFinishFullExpr(EUB.get());
+ }
+
+ // Build the iteration variable and its initialization before loop.
+ ExprResult IV;
+ ExprResult Init;
+ {
+ VarDecl *IVDecl = buildVarDecl(SemaRef, InitLoc, VType, ".omp.iv");
+ IV = buildDeclRefExpr(SemaRef, IVDecl, VType, InitLoc);
+ Expr *RHS = (isOpenMPWorksharingDirective(DKind) ||
+ isOpenMPTaskLoopDirective(DKind) ||
+ isOpenMPDistributeDirective(DKind))
+ ? LB.get()
+ : SemaRef.ActOnIntegerConstant(SourceLocation(), 0).get();
+ Init = SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, IV.get(), RHS);
+ Init = SemaRef.ActOnFinishFullExpr(Init.get());
+ }
+
+ // Loop condition (IV < NumIterations) or (IV <= UB) for worksharing loops.
+ SourceLocation CondLoc;
+ ExprResult Cond =
+ (isOpenMPWorksharingDirective(DKind) ||
+ isOpenMPTaskLoopDirective(DKind) || isOpenMPDistributeDirective(DKind))
+ ? SemaRef.BuildBinOp(CurScope, CondLoc, BO_LE, IV.get(), UB.get())
+ : SemaRef.BuildBinOp(CurScope, CondLoc, BO_LT, IV.get(),
+ NumIterations.get());
+
+ // Loop increment (IV = IV + 1)
+ SourceLocation IncLoc;
+ ExprResult Inc =
+ SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, IV.get(),
+ SemaRef.ActOnIntegerConstant(IncLoc, 1).get());
+ if (!Inc.isUsable())
+ return 0;
+ Inc = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, IV.get(), Inc.get());
+ Inc = SemaRef.ActOnFinishFullExpr(Inc.get());
+ if (!Inc.isUsable())
+ return 0;
+
+ // Increments for worksharing loops (LB = LB + ST; UB = UB + ST).
+ // Used for directives with static scheduling.
+ ExprResult NextLB, NextUB;
+ if (isOpenMPWorksharingDirective(DKind) || isOpenMPTaskLoopDirective(DKind) ||
+ isOpenMPDistributeDirective(DKind)) {
+ // LB + ST
+ NextLB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, LB.get(), ST.get());
+ if (!NextLB.isUsable())
+ return 0;
+ // LB = LB + ST
+ NextLB =
+ SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, LB.get(), NextLB.get());
+ NextLB = SemaRef.ActOnFinishFullExpr(NextLB.get());
+ if (!NextLB.isUsable())
+ return 0;
+ // UB + ST
+ NextUB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, UB.get(), ST.get());
+ if (!NextUB.isUsable())
+ return 0;
+ // UB = UB + ST
+ NextUB =
+ SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, UB.get(), NextUB.get());
+ NextUB = SemaRef.ActOnFinishFullExpr(NextUB.get());
+ if (!NextUB.isUsable())
+ return 0;
+ }
+
+ // Build updates and final values of the loop counters.
+ bool HasErrors = false;
+ Built.Counters.resize(NestedLoopCount);
+ Built.Inits.resize(NestedLoopCount);
+ Built.Updates.resize(NestedLoopCount);
+ Built.Finals.resize(NestedLoopCount);
+ {
+ ExprResult Div;
+ // Go from inner nested loop to outer.
+ for (int Cnt = NestedLoopCount - 1; Cnt >= 0; --Cnt) {
+ LoopIterationSpace &IS = IterSpaces[Cnt];
+ SourceLocation UpdLoc = IS.IncSrcRange.getBegin();
+ // Build: Iter = (IV / Div) % IS.NumIters
+ // where Div is product of previous iterations' IS.NumIters.
+ ExprResult Iter;
+ if (Div.isUsable()) {
+ Iter =
+ SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Div, IV.get(), Div.get());
+ } else {
+ Iter = IV;
+ assert((Cnt == (int)NestedLoopCount - 1) &&
+ "unusable div expected on first iteration only");
+ }
+
+ if (Cnt != 0 && Iter.isUsable())
+ Iter = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Rem, Iter.get(),
+ IS.NumIterations);
+ if (!Iter.isUsable()) {
+ HasErrors = true;
+ break;
+ }
+
+ // Build update: IS.CounterVar(Private) = IS.Start + Iter * IS.Step
+ auto *CounterVar = buildDeclRefExpr(
+ SemaRef, cast<VarDecl>(cast<DeclRefExpr>(IS.CounterVar)->getDecl()),
+ IS.CounterVar->getType(), IS.CounterVar->getExprLoc(),
+ /*RefersToCapture=*/true);
+ ExprResult Init = BuildCounterInit(SemaRef, CurScope, UpdLoc, CounterVar,
+ IS.CounterInit);
+ if (!Init.isUsable()) {
+ HasErrors = true;
+ break;
+ }
+ ExprResult Update =
+ BuildCounterUpdate(SemaRef, CurScope, UpdLoc, CounterVar,
+ IS.CounterInit, Iter, IS.CounterStep, IS.Subtract);
+ if (!Update.isUsable()) {
+ HasErrors = true;
+ break;
+ }
+
+ // Build final: IS.CounterVar = IS.Start + IS.NumIters * IS.Step
+ ExprResult Final = BuildCounterUpdate(
+ SemaRef, CurScope, UpdLoc, CounterVar, IS.CounterInit,
+ IS.NumIterations, IS.CounterStep, IS.Subtract);
+ if (!Final.isUsable()) {
+ HasErrors = true;
+ break;
+ }
+
+ // Build Div for the next iteration: Div <- Div * IS.NumIters
+ if (Cnt != 0) {
+ if (Div.isUnset())
+ Div = IS.NumIterations;
+ else
+ Div = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Mul, Div.get(),
+ IS.NumIterations);
+
+ // Add parentheses (for debugging purposes only).
+ if (Div.isUsable())
+ Div = SemaRef.ActOnParenExpr(UpdLoc, UpdLoc, Div.get());
+ if (!Div.isUsable()) {
+ HasErrors = true;
+ break;
+ }
+ }
+ if (!Update.isUsable() || !Final.isUsable()) {
+ HasErrors = true;
+ break;
+ }
+ // Save results
+ Built.Counters[Cnt] = IS.CounterVar;
+ Built.PrivateCounters[Cnt] = IS.PrivateCounterVar;
+ Built.Inits[Cnt] = Init.get();
+ Built.Updates[Cnt] = Update.get();
+ Built.Finals[Cnt] = Final.get();
+ }
+ }
+
+ if (HasErrors)
+ return 0;
+
+ // Save results
+ Built.IterationVarRef = IV.get();
+ Built.LastIteration = LastIteration.get();
+ Built.NumIterations = NumIterations.get();
+ Built.CalcLastIteration =
+ SemaRef.ActOnFinishFullExpr(CalcLastIteration.get()).get();
+ Built.PreCond = PreCond.get();
+ Built.Cond = Cond.get();
+ Built.Init = Init.get();
+ Built.Inc = Inc.get();
+ Built.LB = LB.get();
+ Built.UB = UB.get();
+ Built.IL = IL.get();
+ Built.ST = ST.get();
+ Built.EUB = EUB.get();
+ Built.NLB = NextLB.get();
+ Built.NUB = NextUB.get();
+
+ return NestedLoopCount;
+}
+
+static Expr *getCollapseNumberExpr(ArrayRef<OMPClause *> Clauses) {
+ auto CollapseClauses =
+ OMPExecutableDirective::getClausesOfKind<OMPCollapseClause>(Clauses);
+ if (CollapseClauses.begin() != CollapseClauses.end())
+ return (*CollapseClauses.begin())->getNumForLoops();
+ return nullptr;
+}
+
+static Expr *getOrderedNumberExpr(ArrayRef<OMPClause *> Clauses) {
+ auto OrderedClauses =
+ OMPExecutableDirective::getClausesOfKind<OMPOrderedClause>(Clauses);
+ if (OrderedClauses.begin() != OrderedClauses.end())
+ return (*OrderedClauses.begin())->getNumForLoops();
+ return nullptr;
+}
+
+static bool checkSimdlenSafelenValues(Sema &S, const Expr *Simdlen,
+ const Expr *Safelen) {
+ llvm::APSInt SimdlenRes, SafelenRes;
+ if (Simdlen->isValueDependent() || Simdlen->isTypeDependent() ||
+ Simdlen->isInstantiationDependent() ||
+ Simdlen->containsUnexpandedParameterPack())
+ return false;
+ if (Safelen->isValueDependent() || Safelen->isTypeDependent() ||
+ Safelen->isInstantiationDependent() ||
+ Safelen->containsUnexpandedParameterPack())
+ return false;
+ Simdlen->EvaluateAsInt(SimdlenRes, S.Context);
+ Safelen->EvaluateAsInt(SafelenRes, S.Context);
+ // OpenMP 4.1 [2.8.1, simd Construct, Restrictions]
+ // If both simdlen and safelen clauses are specified, the value of the simdlen
+ // parameter must be less than or equal to the value of the safelen parameter.
+ if (SimdlenRes > SafelenRes) {
+ S.Diag(Simdlen->getExprLoc(), diag::err_omp_wrong_simdlen_safelen_values)
+ << Simdlen->getSourceRange() << Safelen->getSourceRange();
+ return true;
+ }
+ return false;
+}
+
+StmtResult Sema::ActOnOpenMPSimdDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse' or 'ordered' with number of loops, it will
+ // define the nested loops number.
+ unsigned NestedLoopCount = CheckOpenMPLoop(
+ OMPD_simd, getCollapseNumberExpr(Clauses), getOrderedNumberExpr(Clauses),
+ AStmt, *this, *DSAStack, VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp simd loop exprs were not built");
+
+ if (!CurContext->isDependentContext()) {
+ // Finalize the clauses that need pre-built expressions for CodeGen.
+ for (auto C : Clauses) {
+ if (auto LC = dyn_cast<OMPLinearClause>(C))
+ if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
+ B.NumIterations, *this, CurScope))
+ return StmtError();
+ }
+ }
+
+ // OpenMP 4.1 [2.8.1, simd Construct, Restrictions]
+ // If both simdlen and safelen clauses are specified, the value of the simdlen
+ // parameter must be less than or equal to the value of the safelen parameter.
+ OMPSafelenClause *Safelen = nullptr;
+ OMPSimdlenClause *Simdlen = nullptr;
+ for (auto *Clause : Clauses) {
+ if (Clause->getClauseKind() == OMPC_safelen)
+ Safelen = cast<OMPSafelenClause>(Clause);
+ else if (Clause->getClauseKind() == OMPC_simdlen)
+ Simdlen = cast<OMPSimdlenClause>(Clause);
+ if (Safelen && Simdlen)
+ break;
+ }
+ if (Simdlen && Safelen &&
+ checkSimdlenSafelenValues(*this, Simdlen->getSimdlen(),
+ Safelen->getSafelen()))
+ return StmtError();
+
+ getCurFunction()->setHasBranchProtectedScope();
+ return OMPSimdDirective::Create(Context, StartLoc, EndLoc, NestedLoopCount,
+ Clauses, AStmt, B);
+}
+
+StmtResult Sema::ActOnOpenMPForDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse' or 'ordered' with number of loops, it will
+ // define the nested loops number.
+ unsigned NestedLoopCount = CheckOpenMPLoop(
+ OMPD_for, getCollapseNumberExpr(Clauses), getOrderedNumberExpr(Clauses),
+ AStmt, *this, *DSAStack, VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp for loop exprs were not built");
+
+ if (!CurContext->isDependentContext()) {
+ // Finalize the clauses that need pre-built expressions for CodeGen.
+ for (auto C : Clauses) {
+ if (auto LC = dyn_cast<OMPLinearClause>(C))
+ if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
+ B.NumIterations, *this, CurScope))
+ return StmtError();
+ }
+ }
+
+ getCurFunction()->setHasBranchProtectedScope();
+ return OMPForDirective::Create(Context, StartLoc, EndLoc, NestedLoopCount,
+ Clauses, AStmt, B, DSAStack->isCancelRegion());
+}
+
+StmtResult Sema::ActOnOpenMPForSimdDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse' or 'ordered' with number of loops, it will
+ // define the nested loops number.
+ unsigned NestedLoopCount =
+ CheckOpenMPLoop(OMPD_for_simd, getCollapseNumberExpr(Clauses),
+ getOrderedNumberExpr(Clauses), AStmt, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp for simd loop exprs were not built");
+
+ if (!CurContext->isDependentContext()) {
+ // Finalize the clauses that need pre-built expressions for CodeGen.
+ for (auto C : Clauses) {
+ if (auto LC = dyn_cast<OMPLinearClause>(C))
+ if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
+ B.NumIterations, *this, CurScope))
+ return StmtError();
+ }
+ }
+
+ // OpenMP 4.1 [2.8.1, simd Construct, Restrictions]
+ // If both simdlen and safelen clauses are specified, the value of the simdlen
+ // parameter must be less than or equal to the value of the safelen parameter.
+ OMPSafelenClause *Safelen = nullptr;
+ OMPSimdlenClause *Simdlen = nullptr;
+ for (auto *Clause : Clauses) {
+ if (Clause->getClauseKind() == OMPC_safelen)
+ Safelen = cast<OMPSafelenClause>(Clause);
+ else if (Clause->getClauseKind() == OMPC_simdlen)
+ Simdlen = cast<OMPSimdlenClause>(Clause);
+ if (Safelen && Simdlen)
+ break;
+ }
+ if (Simdlen && Safelen &&
+ checkSimdlenSafelenValues(*this, Simdlen->getSimdlen(),
+ Safelen->getSafelen()))
+ return StmtError();
+
+ getCurFunction()->setHasBranchProtectedScope();
+ return OMPForSimdDirective::Create(Context, StartLoc, EndLoc, NestedLoopCount,
+ Clauses, AStmt, B);
+}
+
+StmtResult Sema::ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ auto BaseStmt = AStmt;
+ while (CapturedStmt *CS = dyn_cast_or_null<CapturedStmt>(BaseStmt))
+ BaseStmt = CS->getCapturedStmt();
+ if (auto C = dyn_cast_or_null<CompoundStmt>(BaseStmt)) {
+ auto S = C->children();
+ if (S.begin() == S.end())
+ return StmtError();
+ // All associated statements must be '#pragma omp section' except for
+ // the first one.
+ for (Stmt *SectionStmt : llvm::make_range(std::next(S.begin()), S.end())) {
+ if (!SectionStmt || !isa<OMPSectionDirective>(SectionStmt)) {
+ if (SectionStmt)
+ Diag(SectionStmt->getLocStart(),
+ diag::err_omp_sections_substmt_not_section);
+ return StmtError();
+ }
+ cast<OMPSectionDirective>(SectionStmt)
+ ->setHasCancel(DSAStack->isCancelRegion());
+ }
+ } else {
+ Diag(AStmt->getLocStart(), diag::err_omp_sections_not_compound_stmt);
+ return StmtError();
+ }
+
+ getCurFunction()->setHasBranchProtectedScope();
+
+ return OMPSectionsDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
+ DSAStack->isCancelRegion());
+}
+
+StmtResult Sema::ActOnOpenMPSectionDirective(Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+
+ getCurFunction()->setHasBranchProtectedScope();
+ DSAStack->setParentCancelRegion(DSAStack->isCancelRegion());
+
+ return OMPSectionDirective::Create(Context, StartLoc, EndLoc, AStmt,
+ DSAStack->isCancelRegion());
+}
+
+StmtResult Sema::ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+
+ getCurFunction()->setHasBranchProtectedScope();
+
+ // OpenMP [2.7.3, single Construct, Restrictions]
+ // The copyprivate clause must not be used with the nowait clause.
+ OMPClause *Nowait = nullptr;
+ OMPClause *Copyprivate = nullptr;
+ for (auto *Clause : Clauses) {
+ if (Clause->getClauseKind() == OMPC_nowait)
+ Nowait = Clause;
+ else if (Clause->getClauseKind() == OMPC_copyprivate)
+ Copyprivate = Clause;
+ if (Copyprivate && Nowait) {
+ Diag(Copyprivate->getLocStart(),
+ diag::err_omp_single_copyprivate_with_nowait);
+ Diag(Nowait->getLocStart(), diag::note_omp_nowait_clause_here);
+ return StmtError();
+ }
+ }
+
+ return OMPSingleDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt);
+}
+
+StmtResult Sema::ActOnOpenMPMasterDirective(Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+
+ getCurFunction()->setHasBranchProtectedScope();
+
+ return OMPMasterDirective::Create(Context, StartLoc, EndLoc, AStmt);
+}
+
+StmtResult Sema::ActOnOpenMPCriticalDirective(
+ const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+
+ bool ErrorFound = false;
+ llvm::APSInt Hint;
+ SourceLocation HintLoc;
+ bool DependentHint = false;
+ for (auto *C : Clauses) {
+ if (C->getClauseKind() == OMPC_hint) {
+ if (!DirName.getName()) {
+ Diag(C->getLocStart(), diag::err_omp_hint_clause_no_name);
+ ErrorFound = true;
+ }
+ Expr *E = cast<OMPHintClause>(C)->getHint();
+ if (E->isTypeDependent() || E->isValueDependent() ||
+ E->isInstantiationDependent())
+ DependentHint = true;
+ else {
+ Hint = E->EvaluateKnownConstInt(Context);
+ HintLoc = C->getLocStart();
+ }
+ }
+ }
+ if (ErrorFound)
+ return StmtError();
+ auto Pair = DSAStack->getCriticalWithHint(DirName);
+ if (Pair.first && DirName.getName() && !DependentHint) {
+ if (llvm::APSInt::compareValues(Hint, Pair.second) != 0) {
+ Diag(StartLoc, diag::err_omp_critical_with_hint);
+ if (HintLoc.isValid()) {
+ Diag(HintLoc, diag::note_omp_critical_hint_here)
+ << 0 << Hint.toString(/*Radix=*/10, /*Signed=*/false);
+ } else
+ Diag(StartLoc, diag::note_omp_critical_no_hint) << 0;
+ if (auto *C = Pair.first->getSingleClause<OMPHintClause>()) {
+ Diag(C->getLocStart(), diag::note_omp_critical_hint_here)
+ << 1
+ << C->getHint()->EvaluateKnownConstInt(Context).toString(
+ /*Radix=*/10, /*Signed=*/false);
+ } else
+ Diag(Pair.first->getLocStart(), diag::note_omp_critical_no_hint) << 1;
+ }
+ }
+
+ getCurFunction()->setHasBranchProtectedScope();
+
+ auto *Dir = OMPCriticalDirective::Create(Context, DirName, StartLoc, EndLoc,
+ Clauses, AStmt);
+ if (!Pair.first && DirName.getName() && !DependentHint)
+ DSAStack->addCriticalWithHint(Dir, Hint);
+ return Dir;
+}
+
+StmtResult Sema::ActOnOpenMPParallelForDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse' or 'ordered' with number of loops, it will
+ // define the nested loops number.
+ unsigned NestedLoopCount =
+ CheckOpenMPLoop(OMPD_parallel_for, getCollapseNumberExpr(Clauses),
+ getOrderedNumberExpr(Clauses), AStmt, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp parallel for loop exprs were not built");
+
+ if (!CurContext->isDependentContext()) {
+ // Finalize the clauses that need pre-built expressions for CodeGen.
+ for (auto C : Clauses) {
+ if (auto LC = dyn_cast<OMPLinearClause>(C))
+ if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
+ B.NumIterations, *this, CurScope))
+ return StmtError();
+ }
+ }
+
+ getCurFunction()->setHasBranchProtectedScope();
+ return OMPParallelForDirective::Create(Context, StartLoc, EndLoc,
+ NestedLoopCount, Clauses, AStmt, B,
+ DSAStack->isCancelRegion());
+}
+
+StmtResult Sema::ActOnOpenMPParallelForSimdDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse' or 'ordered' with number of loops, it will
+ // define the nested loops number.
+ unsigned NestedLoopCount =
+ CheckOpenMPLoop(OMPD_parallel_for_simd, getCollapseNumberExpr(Clauses),
+ getOrderedNumberExpr(Clauses), AStmt, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ if (!CurContext->isDependentContext()) {
+ // Finalize the clauses that need pre-built expressions for CodeGen.
+ for (auto C : Clauses) {
+ if (auto LC = dyn_cast<OMPLinearClause>(C))
+ if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
+ B.NumIterations, *this, CurScope))
+ return StmtError();
+ }
+ }
+
+ // OpenMP 4.1 [2.8.1, simd Construct, Restrictions]
+ // If both simdlen and safelen clauses are specified, the value of the simdlen
+ // parameter must be less than or equal to the value of the safelen parameter.
+ OMPSafelenClause *Safelen = nullptr;
+ OMPSimdlenClause *Simdlen = nullptr;
+ for (auto *Clause : Clauses) {
+ if (Clause->getClauseKind() == OMPC_safelen)
+ Safelen = cast<OMPSafelenClause>(Clause);
+ else if (Clause->getClauseKind() == OMPC_simdlen)
+ Simdlen = cast<OMPSimdlenClause>(Clause);
+ if (Safelen && Simdlen)
+ break;
+ }
+ if (Simdlen && Safelen &&
+ checkSimdlenSafelenValues(*this, Simdlen->getSimdlen(),
+ Safelen->getSafelen()))
+ return StmtError();
+
+ getCurFunction()->setHasBranchProtectedScope();
+ return OMPParallelForSimdDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+}
+
+StmtResult
+Sema::ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ auto BaseStmt = AStmt;
+ while (CapturedStmt *CS = dyn_cast_or_null<CapturedStmt>(BaseStmt))
+ BaseStmt = CS->getCapturedStmt();
+ if (auto C = dyn_cast_or_null<CompoundStmt>(BaseStmt)) {
+ auto S = C->children();
+ if (S.begin() == S.end())
+ return StmtError();
+ // All associated statements must be '#pragma omp section' except for
+ // the first one.
+ for (Stmt *SectionStmt : llvm::make_range(std::next(S.begin()), S.end())) {
+ if (!SectionStmt || !isa<OMPSectionDirective>(SectionStmt)) {
+ if (SectionStmt)
+ Diag(SectionStmt->getLocStart(),
+ diag::err_omp_parallel_sections_substmt_not_section);
+ return StmtError();
+ }
+ cast<OMPSectionDirective>(SectionStmt)
+ ->setHasCancel(DSAStack->isCancelRegion());
+ }
+ } else {
+ Diag(AStmt->getLocStart(),
+ diag::err_omp_parallel_sections_not_compound_stmt);
+ return StmtError();
+ }
+
+ getCurFunction()->setHasBranchProtectedScope();
+
+ return OMPParallelSectionsDirective::Create(
+ Context, StartLoc, EndLoc, Clauses, AStmt, DSAStack->isCancelRegion());
+}
+
+StmtResult Sema::ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (!AStmt)
+ return StmtError();
+
+ CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+
+ getCurFunction()->setHasBranchProtectedScope();
+
+ return OMPTaskDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
+ DSAStack->isCancelRegion());
+}
+
+StmtResult Sema::ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return OMPTaskyieldDirective::Create(Context, StartLoc, EndLoc);
+}
+
+StmtResult Sema::ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return OMPBarrierDirective::Create(Context, StartLoc, EndLoc);
+}
+
+StmtResult Sema::ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return OMPTaskwaitDirective::Create(Context, StartLoc, EndLoc);
+}
+
+StmtResult Sema::ActOnOpenMPTaskgroupDirective(Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+
+ getCurFunction()->setHasBranchProtectedScope();
+
+ return OMPTaskgroupDirective::Create(Context, StartLoc, EndLoc, AStmt);
+}
+
+StmtResult Sema::ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ assert(Clauses.size() <= 1 && "Extra clauses in flush directive");
+ return OMPFlushDirective::Create(Context, StartLoc, EndLoc, Clauses);
+}
+
+StmtResult Sema::ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ OMPClause *DependFound = nullptr;
+ OMPClause *DependSourceClause = nullptr;
+ OMPClause *DependSinkClause = nullptr;
+ bool ErrorFound = false;
+ OMPThreadsClause *TC = nullptr;
+ OMPSIMDClause *SC = nullptr;
+ for (auto *C : Clauses) {
+ if (auto *DC = dyn_cast<OMPDependClause>(C)) {
+ DependFound = C;
+ if (DC->getDependencyKind() == OMPC_DEPEND_source) {
+ if (DependSourceClause) {
+ Diag(C->getLocStart(), diag::err_omp_more_one_clause)
+ << getOpenMPDirectiveName(OMPD_ordered)
+ << getOpenMPClauseName(OMPC_depend) << 2;
+ ErrorFound = true;
+ } else
+ DependSourceClause = C;
+ if (DependSinkClause) {
+ Diag(C->getLocStart(), diag::err_omp_depend_sink_source_not_allowed)
+ << 0;
+ ErrorFound = true;
+ }
+ } else if (DC->getDependencyKind() == OMPC_DEPEND_sink) {
+ if (DependSourceClause) {
+ Diag(C->getLocStart(), diag::err_omp_depend_sink_source_not_allowed)
+ << 1;
+ ErrorFound = true;
+ }
+ DependSinkClause = C;
+ }
+ } else if (C->getClauseKind() == OMPC_threads)
+ TC = cast<OMPThreadsClause>(C);
+ else if (C->getClauseKind() == OMPC_simd)
+ SC = cast<OMPSIMDClause>(C);
+ }
+ if (!ErrorFound && !SC &&
+ isOpenMPSimdDirective(DSAStack->getParentDirective())) {
+ // OpenMP [2.8.1,simd Construct, Restrictions]
+ // An ordered construct with the simd clause is the only OpenMP construct
+ // that can appear in the simd region.
+ Diag(StartLoc, diag::err_omp_prohibited_region_simd);
+ ErrorFound = true;
+ } else if (DependFound && (TC || SC)) {
+ Diag(DependFound->getLocStart(), diag::err_omp_depend_clause_thread_simd)
+ << getOpenMPClauseName(TC ? TC->getClauseKind() : SC->getClauseKind());
+ ErrorFound = true;
+ } else if (DependFound && !DSAStack->getParentOrderedRegionParam()) {
+ Diag(DependFound->getLocStart(),
+ diag::err_omp_ordered_directive_without_param);
+ ErrorFound = true;
+ } else if (TC || Clauses.empty()) {
+ if (auto *Param = DSAStack->getParentOrderedRegionParam()) {
+ SourceLocation ErrLoc = TC ? TC->getLocStart() : StartLoc;
+ Diag(ErrLoc, diag::err_omp_ordered_directive_with_param)
+ << (TC != nullptr);
+ Diag(Param->getLocStart(), diag::note_omp_ordered_param);
+ ErrorFound = true;
+ }
+ }
+ if ((!AStmt && !DependFound) || ErrorFound)
+ return StmtError();
+
+ if (AStmt) {
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+
+ getCurFunction()->setHasBranchProtectedScope();
+ }
+
+ return OMPOrderedDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt);
+}
+
+namespace {
+/// \brief Helper class for checking expression in 'omp atomic [update]'
+/// construct.
+class OpenMPAtomicUpdateChecker {
+ /// \brief Error results for atomic update expressions.
+ enum ExprAnalysisErrorCode {
+ /// \brief A statement is not an expression statement.
+ NotAnExpression,
+ /// \brief Expression is not builtin binary or unary operation.
+ NotABinaryOrUnaryExpression,
+ /// \brief Unary operation is not post-/pre- increment/decrement operation.
+ NotAnUnaryIncDecExpression,
+ /// \brief An expression is not of scalar type.
+ NotAScalarType,
+ /// \brief A binary operation is not an assignment operation.
+ NotAnAssignmentOp,
+ /// \brief RHS part of the binary operation is not a binary expression.
+ NotABinaryExpression,
+ /// \brief RHS part is not additive/multiplicative/shift/biwise binary
+ /// expression.
+ NotABinaryOperator,
+ /// \brief RHS binary operation does not have reference to the updated LHS
+ /// part.
+ NotAnUpdateExpression,
+ /// \brief No errors is found.
+ NoError
+ };
+ /// \brief Reference to Sema.
+ Sema &SemaRef;
+ /// \brief A location for note diagnostics (when error is found).
+ SourceLocation NoteLoc;
+ /// \brief 'x' lvalue part of the source atomic expression.
+ Expr *X;
+ /// \brief 'expr' rvalue part of the source atomic expression.
+ Expr *E;
+ /// \brief Helper expression of the form
+ /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
+ /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
+ Expr *UpdateExpr;
+ /// \brief Is 'x' a LHS in a RHS part of full update expression. It is
+ /// important for non-associative operations.
+ bool IsXLHSInRHSPart;
+ BinaryOperatorKind Op;
+ SourceLocation OpLoc;
+ /// \brief true if the source expression is a postfix unary operation, false
+ /// if it is a prefix unary operation.
+ bool IsPostfixUpdate;
+
+public:
+ OpenMPAtomicUpdateChecker(Sema &SemaRef)
+ : SemaRef(SemaRef), X(nullptr), E(nullptr), UpdateExpr(nullptr),
+ IsXLHSInRHSPart(false), Op(BO_PtrMemD), IsPostfixUpdate(false) {}
+ /// \brief Check specified statement that it is suitable for 'atomic update'
+ /// constructs and extract 'x', 'expr' and Operation from the original
+ /// expression. If DiagId and NoteId == 0, then only check is performed
+ /// without error notification.
+ /// \param DiagId Diagnostic which should be emitted if error is found.
+ /// \param NoteId Diagnostic note for the main error message.
+ /// \return true if statement is not an update expression, false otherwise.
+ bool checkStatement(Stmt *S, unsigned DiagId = 0, unsigned NoteId = 0);
+ /// \brief Return the 'x' lvalue part of the source atomic expression.
+ Expr *getX() const { return X; }
+ /// \brief Return the 'expr' rvalue part of the source atomic expression.
+ Expr *getExpr() const { return E; }
+ /// \brief Return the update expression used in calculation of the updated
+ /// value. Always has form 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
+ /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
+ Expr *getUpdateExpr() const { return UpdateExpr; }
+ /// \brief Return true if 'x' is LHS in RHS part of full update expression,
+ /// false otherwise.
+ bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; }
+
+ /// \brief true if the source expression is a postfix unary operation, false
+ /// if it is a prefix unary operation.
+ bool isPostfixUpdate() const { return IsPostfixUpdate; }
+
+private:
+ bool checkBinaryOperation(BinaryOperator *AtomicBinOp, unsigned DiagId = 0,
+ unsigned NoteId = 0);
+};
+} // namespace
+
+bool OpenMPAtomicUpdateChecker::checkBinaryOperation(
+ BinaryOperator *AtomicBinOp, unsigned DiagId, unsigned NoteId) {
+ ExprAnalysisErrorCode ErrorFound = NoError;
+ SourceLocation ErrorLoc, NoteLoc;
+ SourceRange ErrorRange, NoteRange;
+ // Allowed constructs are:
+ // x = x binop expr;
+ // x = expr binop x;
+ if (AtomicBinOp->getOpcode() == BO_Assign) {
+ X = AtomicBinOp->getLHS();
+ if (auto *AtomicInnerBinOp = dyn_cast<BinaryOperator>(
+ AtomicBinOp->getRHS()->IgnoreParenImpCasts())) {
+ if (AtomicInnerBinOp->isMultiplicativeOp() ||
+ AtomicInnerBinOp->isAdditiveOp() || AtomicInnerBinOp->isShiftOp() ||
+ AtomicInnerBinOp->isBitwiseOp()) {
+ Op = AtomicInnerBinOp->getOpcode();
+ OpLoc = AtomicInnerBinOp->getOperatorLoc();
+ auto *LHS = AtomicInnerBinOp->getLHS();
+ auto *RHS = AtomicInnerBinOp->getRHS();
+ llvm::FoldingSetNodeID XId, LHSId, RHSId;
+ X->IgnoreParenImpCasts()->Profile(XId, SemaRef.getASTContext(),
+ /*Canonical=*/true);
+ LHS->IgnoreParenImpCasts()->Profile(LHSId, SemaRef.getASTContext(),
+ /*Canonical=*/true);
+ RHS->IgnoreParenImpCasts()->Profile(RHSId, SemaRef.getASTContext(),
+ /*Canonical=*/true);
+ if (XId == LHSId) {
+ E = RHS;
+ IsXLHSInRHSPart = true;
+ } else if (XId == RHSId) {
+ E = LHS;
+ IsXLHSInRHSPart = false;
+ } else {
+ ErrorLoc = AtomicInnerBinOp->getExprLoc();
+ ErrorRange = AtomicInnerBinOp->getSourceRange();
+ NoteLoc = X->getExprLoc();
+ NoteRange = X->getSourceRange();
+ ErrorFound = NotAnUpdateExpression;
+ }
+ } else {
+ ErrorLoc = AtomicInnerBinOp->getExprLoc();
+ ErrorRange = AtomicInnerBinOp->getSourceRange();
+ NoteLoc = AtomicInnerBinOp->getOperatorLoc();
+ NoteRange = SourceRange(NoteLoc, NoteLoc);
+ ErrorFound = NotABinaryOperator;
+ }
+ } else {
+ NoteLoc = ErrorLoc = AtomicBinOp->getRHS()->getExprLoc();
+ NoteRange = ErrorRange = AtomicBinOp->getRHS()->getSourceRange();
+ ErrorFound = NotABinaryExpression;
+ }
+ } else {
+ ErrorLoc = AtomicBinOp->getExprLoc();
+ ErrorRange = AtomicBinOp->getSourceRange();
+ NoteLoc = AtomicBinOp->getOperatorLoc();
+ NoteRange = SourceRange(NoteLoc, NoteLoc);
+ ErrorFound = NotAnAssignmentOp;
+ }
+ if (ErrorFound != NoError && DiagId != 0 && NoteId != 0) {
+ SemaRef.Diag(ErrorLoc, DiagId) << ErrorRange;
+ SemaRef.Diag(NoteLoc, NoteId) << ErrorFound << NoteRange;
+ return true;
+ } else if (SemaRef.CurContext->isDependentContext())
+ E = X = UpdateExpr = nullptr;
+ return ErrorFound != NoError;
+}
+
+bool OpenMPAtomicUpdateChecker::checkStatement(Stmt *S, unsigned DiagId,
+ unsigned NoteId) {
+ ExprAnalysisErrorCode ErrorFound = NoError;
+ SourceLocation ErrorLoc, NoteLoc;
+ SourceRange ErrorRange, NoteRange;
+ // Allowed constructs are:
+ // x++;
+ // x--;
+ // ++x;
+ // --x;
+ // x binop= expr;
+ // x = x binop expr;
+ // x = expr binop x;
+ if (auto *AtomicBody = dyn_cast<Expr>(S)) {
+ AtomicBody = AtomicBody->IgnoreParenImpCasts();
+ if (AtomicBody->getType()->isScalarType() ||
+ AtomicBody->isInstantiationDependent()) {
+ if (auto *AtomicCompAssignOp = dyn_cast<CompoundAssignOperator>(
+ AtomicBody->IgnoreParenImpCasts())) {
+ // Check for Compound Assignment Operation
+ Op = BinaryOperator::getOpForCompoundAssignment(
+ AtomicCompAssignOp->getOpcode());
+ OpLoc = AtomicCompAssignOp->getOperatorLoc();
+ E = AtomicCompAssignOp->getRHS();
+ X = AtomicCompAssignOp->getLHS();
+ IsXLHSInRHSPart = true;
+ } else if (auto *AtomicBinOp = dyn_cast<BinaryOperator>(
+ AtomicBody->IgnoreParenImpCasts())) {
+ // Check for Binary Operation
+ if(checkBinaryOperation(AtomicBinOp, DiagId, NoteId))
+ return true;
+ } else if (auto *AtomicUnaryOp =
+ dyn_cast<UnaryOperator>(AtomicBody->IgnoreParenImpCasts())) {
+ // Check for Unary Operation
+ if (AtomicUnaryOp->isIncrementDecrementOp()) {
+ IsPostfixUpdate = AtomicUnaryOp->isPostfix();
+ Op = AtomicUnaryOp->isIncrementOp() ? BO_Add : BO_Sub;
+ OpLoc = AtomicUnaryOp->getOperatorLoc();
+ X = AtomicUnaryOp->getSubExpr();
+ E = SemaRef.ActOnIntegerConstant(OpLoc, /*uint64_t Val=*/1).get();
+ IsXLHSInRHSPart = true;
+ } else {
+ ErrorFound = NotAnUnaryIncDecExpression;
+ ErrorLoc = AtomicUnaryOp->getExprLoc();
+ ErrorRange = AtomicUnaryOp->getSourceRange();
+ NoteLoc = AtomicUnaryOp->getOperatorLoc();
+ NoteRange = SourceRange(NoteLoc, NoteLoc);
+ }
+ } else if (!AtomicBody->isInstantiationDependent()) {
+ ErrorFound = NotABinaryOrUnaryExpression;
+ NoteLoc = ErrorLoc = AtomicBody->getExprLoc();
+ NoteRange = ErrorRange = AtomicBody->getSourceRange();
+ }
+ } else {
+ ErrorFound = NotAScalarType;
+ NoteLoc = ErrorLoc = AtomicBody->getLocStart();
+ NoteRange = ErrorRange = SourceRange(NoteLoc, NoteLoc);
+ }
+ } else {
+ ErrorFound = NotAnExpression;
+ NoteLoc = ErrorLoc = S->getLocStart();
+ NoteRange = ErrorRange = SourceRange(NoteLoc, NoteLoc);
+ }
+ if (ErrorFound != NoError && DiagId != 0 && NoteId != 0) {
+ SemaRef.Diag(ErrorLoc, DiagId) << ErrorRange;
+ SemaRef.Diag(NoteLoc, NoteId) << ErrorFound << NoteRange;
+ return true;
+ } else if (SemaRef.CurContext->isDependentContext())
+ E = X = UpdateExpr = nullptr;
+ if (ErrorFound == NoError && E && X) {
+ // Build an update expression of form 'OpaqueValueExpr(x) binop
+ // OpaqueValueExpr(expr)' or 'OpaqueValueExpr(expr) binop
+ // OpaqueValueExpr(x)' and then cast it to the type of the 'x' expression.
+ auto *OVEX = new (SemaRef.getASTContext())
+ OpaqueValueExpr(X->getExprLoc(), X->getType(), VK_RValue);
+ auto *OVEExpr = new (SemaRef.getASTContext())
+ OpaqueValueExpr(E->getExprLoc(), E->getType(), VK_RValue);
+ auto Update =
+ SemaRef.CreateBuiltinBinOp(OpLoc, Op, IsXLHSInRHSPart ? OVEX : OVEExpr,
+ IsXLHSInRHSPart ? OVEExpr : OVEX);
+ if (Update.isInvalid())
+ return true;
+ Update = SemaRef.PerformImplicitConversion(Update.get(), X->getType(),
+ Sema::AA_Casting);
+ if (Update.isInvalid())
+ return true;
+ UpdateExpr = Update.get();
+ }
+ return ErrorFound != NoError;
+}
+
+StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (!AStmt)
+ return StmtError();
+
+ auto CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ OpenMPClauseKind AtomicKind = OMPC_unknown;
+ SourceLocation AtomicKindLoc;
+ for (auto *C : Clauses) {
+ if (C->getClauseKind() == OMPC_read || C->getClauseKind() == OMPC_write ||
+ C->getClauseKind() == OMPC_update ||
+ C->getClauseKind() == OMPC_capture) {
+ if (AtomicKind != OMPC_unknown) {
+ Diag(C->getLocStart(), diag::err_omp_atomic_several_clauses)
+ << SourceRange(C->getLocStart(), C->getLocEnd());
+ Diag(AtomicKindLoc, diag::note_omp_atomic_previous_clause)
+ << getOpenMPClauseName(AtomicKind);
+ } else {
+ AtomicKind = C->getClauseKind();
+ AtomicKindLoc = C->getLocStart();
+ }
+ }
+ }
+
+ auto Body = CS->getCapturedStmt();
+ if (auto *EWC = dyn_cast<ExprWithCleanups>(Body))
+ Body = EWC->getSubExpr();
+
+ Expr *X = nullptr;
+ Expr *V = nullptr;
+ Expr *E = nullptr;
+ Expr *UE = nullptr;
+ bool IsXLHSInRHSPart = false;
+ bool IsPostfixUpdate = false;
+ // OpenMP [2.12.6, atomic Construct]
+ // In the next expressions:
+ // * x and v (as applicable) are both l-value expressions with scalar type.
+ // * During the execution of an atomic region, multiple syntactic
+ // occurrences of x must designate the same storage location.
+ // * Neither of v and expr (as applicable) may access the storage location
+ // designated by x.
+ // * Neither of x and expr (as applicable) may access the storage location
+ // designated by v.
+ // * expr is an expression with scalar type.
+ // * binop is one of +, *, -, /, &, ^, |, <<, or >>.
+ // * binop, binop=, ++, and -- are not overloaded operators.
+ // * The expression x binop expr must be numerically equivalent to x binop
+ // (expr). This requirement is satisfied if the operators in expr have
+ // precedence greater than binop, or by using parentheses around expr or
+ // subexpressions of expr.
+ // * The expression expr binop x must be numerically equivalent to (expr)
+ // binop x. This requirement is satisfied if the operators in expr have
+ // precedence equal to or greater than binop, or by using parentheses around
+ // expr or subexpressions of expr.
+ // * For forms that allow multiple occurrences of x, the number of times
+ // that x is evaluated is unspecified.
+ if (AtomicKind == OMPC_read) {
+ enum {
+ NotAnExpression,
+ NotAnAssignmentOp,
+ NotAScalarType,
+ NotAnLValue,
+ NoError
+ } ErrorFound = NoError;
+ SourceLocation ErrorLoc, NoteLoc;
+ SourceRange ErrorRange, NoteRange;
+ // If clause is read:
+ // v = x;
+ if (auto AtomicBody = dyn_cast<Expr>(Body)) {
+ auto AtomicBinOp =
+ dyn_cast<BinaryOperator>(AtomicBody->IgnoreParenImpCasts());
+ if (AtomicBinOp && AtomicBinOp->getOpcode() == BO_Assign) {
+ X = AtomicBinOp->getRHS()->IgnoreParenImpCasts();
+ V = AtomicBinOp->getLHS()->IgnoreParenImpCasts();
+ if ((X->isInstantiationDependent() || X->getType()->isScalarType()) &&
+ (V->isInstantiationDependent() || V->getType()->isScalarType())) {
+ if (!X->isLValue() || !V->isLValue()) {
+ auto NotLValueExpr = X->isLValue() ? V : X;
+ ErrorFound = NotAnLValue;
+ ErrorLoc = AtomicBinOp->getExprLoc();
+ ErrorRange = AtomicBinOp->getSourceRange();
+ NoteLoc = NotLValueExpr->getExprLoc();
+ NoteRange = NotLValueExpr->getSourceRange();
+ }
+ } else if (!X->isInstantiationDependent() ||
+ !V->isInstantiationDependent()) {
+ auto NotScalarExpr =
+ (X->isInstantiationDependent() || X->getType()->isScalarType())
+ ? V
+ : X;
+ ErrorFound = NotAScalarType;
+ ErrorLoc = AtomicBinOp->getExprLoc();
+ ErrorRange = AtomicBinOp->getSourceRange();
+ NoteLoc = NotScalarExpr->getExprLoc();
+ NoteRange = NotScalarExpr->getSourceRange();
+ }
+ } else if (!AtomicBody->isInstantiationDependent()) {
+ ErrorFound = NotAnAssignmentOp;
+ ErrorLoc = AtomicBody->getExprLoc();
+ ErrorRange = AtomicBody->getSourceRange();
+ NoteLoc = AtomicBinOp ? AtomicBinOp->getOperatorLoc()
+ : AtomicBody->getExprLoc();
+ NoteRange = AtomicBinOp ? AtomicBinOp->getSourceRange()
+ : AtomicBody->getSourceRange();
+ }
+ } else {
+ ErrorFound = NotAnExpression;
+ NoteLoc = ErrorLoc = Body->getLocStart();
+ NoteRange = ErrorRange = SourceRange(NoteLoc, NoteLoc);
+ }
+ if (ErrorFound != NoError) {
+ Diag(ErrorLoc, diag::err_omp_atomic_read_not_expression_statement)
+ << ErrorRange;
+ Diag(NoteLoc, diag::note_omp_atomic_read_write) << ErrorFound
+ << NoteRange;
+ return StmtError();
+ } else if (CurContext->isDependentContext())
+ V = X = nullptr;
+ } else if (AtomicKind == OMPC_write) {
+ enum {
+ NotAnExpression,
+ NotAnAssignmentOp,
+ NotAScalarType,
+ NotAnLValue,
+ NoError
+ } ErrorFound = NoError;
+ SourceLocation ErrorLoc, NoteLoc;
+ SourceRange ErrorRange, NoteRange;
+ // If clause is write:
+ // x = expr;
+ if (auto AtomicBody = dyn_cast<Expr>(Body)) {
+ auto AtomicBinOp =
+ dyn_cast<BinaryOperator>(AtomicBody->IgnoreParenImpCasts());
+ if (AtomicBinOp && AtomicBinOp->getOpcode() == BO_Assign) {
+ X = AtomicBinOp->getLHS();
+ E = AtomicBinOp->getRHS();
+ if ((X->isInstantiationDependent() || X->getType()->isScalarType()) &&
+ (E->isInstantiationDependent() || E->getType()->isScalarType())) {
+ if (!X->isLValue()) {
+ ErrorFound = NotAnLValue;
+ ErrorLoc = AtomicBinOp->getExprLoc();
+ ErrorRange = AtomicBinOp->getSourceRange();
+ NoteLoc = X->getExprLoc();
+ NoteRange = X->getSourceRange();
+ }
+ } else if (!X->isInstantiationDependent() ||
+ !E->isInstantiationDependent()) {
+ auto NotScalarExpr =
+ (X->isInstantiationDependent() || X->getType()->isScalarType())
+ ? E
+ : X;
+ ErrorFound = NotAScalarType;
+ ErrorLoc = AtomicBinOp->getExprLoc();
+ ErrorRange = AtomicBinOp->getSourceRange();
+ NoteLoc = NotScalarExpr->getExprLoc();
+ NoteRange = NotScalarExpr->getSourceRange();
+ }
+ } else if (!AtomicBody->isInstantiationDependent()) {
+ ErrorFound = NotAnAssignmentOp;
+ ErrorLoc = AtomicBody->getExprLoc();
+ ErrorRange = AtomicBody->getSourceRange();
+ NoteLoc = AtomicBinOp ? AtomicBinOp->getOperatorLoc()
+ : AtomicBody->getExprLoc();
+ NoteRange = AtomicBinOp ? AtomicBinOp->getSourceRange()
+ : AtomicBody->getSourceRange();
+ }
+ } else {
+ ErrorFound = NotAnExpression;
+ NoteLoc = ErrorLoc = Body->getLocStart();
+ NoteRange = ErrorRange = SourceRange(NoteLoc, NoteLoc);
+ }
+ if (ErrorFound != NoError) {
+ Diag(ErrorLoc, diag::err_omp_atomic_write_not_expression_statement)
+ << ErrorRange;
+ Diag(NoteLoc, diag::note_omp_atomic_read_write) << ErrorFound
+ << NoteRange;
+ return StmtError();
+ } else if (CurContext->isDependentContext())
+ E = X = nullptr;
+ } else if (AtomicKind == OMPC_update || AtomicKind == OMPC_unknown) {
+ // If clause is update:
+ // x++;
+ // x--;
+ // ++x;
+ // --x;
+ // x binop= expr;
+ // x = x binop expr;
+ // x = expr binop x;
+ OpenMPAtomicUpdateChecker Checker(*this);
+ if (Checker.checkStatement(
+ Body, (AtomicKind == OMPC_update)
+ ? diag::err_omp_atomic_update_not_expression_statement
+ : diag::err_omp_atomic_not_expression_statement,
+ diag::note_omp_atomic_update))
+ return StmtError();
+ if (!CurContext->isDependentContext()) {
+ E = Checker.getExpr();
+ X = Checker.getX();
+ UE = Checker.getUpdateExpr();
+ IsXLHSInRHSPart = Checker.isXLHSInRHSPart();
+ }
+ } else if (AtomicKind == OMPC_capture) {
+ enum {
+ NotAnAssignmentOp,
+ NotACompoundStatement,
+ NotTwoSubstatements,
+ NotASpecificExpression,
+ NoError
+ } ErrorFound = NoError;
+ SourceLocation ErrorLoc, NoteLoc;
+ SourceRange ErrorRange, NoteRange;
+ if (auto *AtomicBody = dyn_cast<Expr>(Body)) {
+ // If clause is a capture:
+ // v = x++;
+ // v = x--;
+ // v = ++x;
+ // v = --x;
+ // v = x binop= expr;
+ // v = x = x binop expr;
+ // v = x = expr binop x;
+ auto *AtomicBinOp =
+ dyn_cast<BinaryOperator>(AtomicBody->IgnoreParenImpCasts());
+ if (AtomicBinOp && AtomicBinOp->getOpcode() == BO_Assign) {
+ V = AtomicBinOp->getLHS();
+ Body = AtomicBinOp->getRHS()->IgnoreParenImpCasts();
+ OpenMPAtomicUpdateChecker Checker(*this);
+ if (Checker.checkStatement(
+ Body, diag::err_omp_atomic_capture_not_expression_statement,
+ diag::note_omp_atomic_update))
+ return StmtError();
+ E = Checker.getExpr();
+ X = Checker.getX();
+ UE = Checker.getUpdateExpr();
+ IsXLHSInRHSPart = Checker.isXLHSInRHSPart();
+ IsPostfixUpdate = Checker.isPostfixUpdate();
+ } else if (!AtomicBody->isInstantiationDependent()) {
+ ErrorLoc = AtomicBody->getExprLoc();
+ ErrorRange = AtomicBody->getSourceRange();
+ NoteLoc = AtomicBinOp ? AtomicBinOp->getOperatorLoc()
+ : AtomicBody->getExprLoc();
+ NoteRange = AtomicBinOp ? AtomicBinOp->getSourceRange()
+ : AtomicBody->getSourceRange();
+ ErrorFound = NotAnAssignmentOp;
+ }
+ if (ErrorFound != NoError) {
+ Diag(ErrorLoc, diag::err_omp_atomic_capture_not_expression_statement)
+ << ErrorRange;
+ Diag(NoteLoc, diag::note_omp_atomic_capture) << ErrorFound << NoteRange;
+ return StmtError();
+ } else if (CurContext->isDependentContext()) {
+ UE = V = E = X = nullptr;
+ }
+ } else {
+ // If clause is a capture:
+ // { v = x; x = expr; }
+ // { v = x; x++; }
+ // { v = x; x--; }
+ // { v = x; ++x; }
+ // { v = x; --x; }
+ // { v = x; x binop= expr; }
+ // { v = x; x = x binop expr; }
+ // { v = x; x = expr binop x; }
+ // { x++; v = x; }
+ // { x--; v = x; }
+ // { ++x; v = x; }
+ // { --x; v = x; }
+ // { x binop= expr; v = x; }
+ // { x = x binop expr; v = x; }
+ // { x = expr binop x; v = x; }
+ if (auto *CS = dyn_cast<CompoundStmt>(Body)) {
+ // Check that this is { expr1; expr2; }
+ if (CS->size() == 2) {
+ auto *First = CS->body_front();
+ auto *Second = CS->body_back();
+ if (auto *EWC = dyn_cast<ExprWithCleanups>(First))
+ First = EWC->getSubExpr()->IgnoreParenImpCasts();
+ if (auto *EWC = dyn_cast<ExprWithCleanups>(Second))
+ Second = EWC->getSubExpr()->IgnoreParenImpCasts();
+ // Need to find what subexpression is 'v' and what is 'x'.
+ OpenMPAtomicUpdateChecker Checker(*this);
+ bool IsUpdateExprFound = !Checker.checkStatement(Second);
+ BinaryOperator *BinOp = nullptr;
+ if (IsUpdateExprFound) {
+ BinOp = dyn_cast<BinaryOperator>(First);
+ IsUpdateExprFound = BinOp && BinOp->getOpcode() == BO_Assign;
+ }
+ if (IsUpdateExprFound && !CurContext->isDependentContext()) {
+ // { v = x; x++; }
+ // { v = x; x--; }
+ // { v = x; ++x; }
+ // { v = x; --x; }
+ // { v = x; x binop= expr; }
+ // { v = x; x = x binop expr; }
+ // { v = x; x = expr binop x; }
+ // Check that the first expression has form v = x.
+ auto *PossibleX = BinOp->getRHS()->IgnoreParenImpCasts();
+ llvm::FoldingSetNodeID XId, PossibleXId;
+ Checker.getX()->Profile(XId, Context, /*Canonical=*/true);
+ PossibleX->Profile(PossibleXId, Context, /*Canonical=*/true);
+ IsUpdateExprFound = XId == PossibleXId;
+ if (IsUpdateExprFound) {
+ V = BinOp->getLHS();
+ X = Checker.getX();
+ E = Checker.getExpr();
+ UE = Checker.getUpdateExpr();
+ IsXLHSInRHSPart = Checker.isXLHSInRHSPart();
+ IsPostfixUpdate = true;
+ }
+ }
+ if (!IsUpdateExprFound) {
+ IsUpdateExprFound = !Checker.checkStatement(First);
+ BinOp = nullptr;
+ if (IsUpdateExprFound) {
+ BinOp = dyn_cast<BinaryOperator>(Second);
+ IsUpdateExprFound = BinOp && BinOp->getOpcode() == BO_Assign;
+ }
+ if (IsUpdateExprFound && !CurContext->isDependentContext()) {
+ // { x++; v = x; }
+ // { x--; v = x; }
+ // { ++x; v = x; }
+ // { --x; v = x; }
+ // { x binop= expr; v = x; }
+ // { x = x binop expr; v = x; }
+ // { x = expr binop x; v = x; }
+ // Check that the second expression has form v = x.
+ auto *PossibleX = BinOp->getRHS()->IgnoreParenImpCasts();
+ llvm::FoldingSetNodeID XId, PossibleXId;
+ Checker.getX()->Profile(XId, Context, /*Canonical=*/true);
+ PossibleX->Profile(PossibleXId, Context, /*Canonical=*/true);
+ IsUpdateExprFound = XId == PossibleXId;
+ if (IsUpdateExprFound) {
+ V = BinOp->getLHS();
+ X = Checker.getX();
+ E = Checker.getExpr();
+ UE = Checker.getUpdateExpr();
+ IsXLHSInRHSPart = Checker.isXLHSInRHSPart();
+ IsPostfixUpdate = false;
+ }
+ }
+ }
+ if (!IsUpdateExprFound) {
+ // { v = x; x = expr; }
+ auto *FirstExpr = dyn_cast<Expr>(First);
+ auto *SecondExpr = dyn_cast<Expr>(Second);
+ if (!FirstExpr || !SecondExpr ||
+ !(FirstExpr->isInstantiationDependent() ||
+ SecondExpr->isInstantiationDependent())) {
+ auto *FirstBinOp = dyn_cast<BinaryOperator>(First);
+ if (!FirstBinOp || FirstBinOp->getOpcode() != BO_Assign) {
+ ErrorFound = NotAnAssignmentOp;
+ NoteLoc = ErrorLoc = FirstBinOp ? FirstBinOp->getOperatorLoc()
+ : First->getLocStart();
+ NoteRange = ErrorRange = FirstBinOp
+ ? FirstBinOp->getSourceRange()
+ : SourceRange(ErrorLoc, ErrorLoc);
+ } else {
+ auto *SecondBinOp = dyn_cast<BinaryOperator>(Second);
+ if (!SecondBinOp || SecondBinOp->getOpcode() != BO_Assign) {
+ ErrorFound = NotAnAssignmentOp;
+ NoteLoc = ErrorLoc = SecondBinOp
+ ? SecondBinOp->getOperatorLoc()
+ : Second->getLocStart();
+ NoteRange = ErrorRange =
+ SecondBinOp ? SecondBinOp->getSourceRange()
+ : SourceRange(ErrorLoc, ErrorLoc);
+ } else {
+ auto *PossibleXRHSInFirst =
+ FirstBinOp->getRHS()->IgnoreParenImpCasts();
+ auto *PossibleXLHSInSecond =
+ SecondBinOp->getLHS()->IgnoreParenImpCasts();
+ llvm::FoldingSetNodeID X1Id, X2Id;
+ PossibleXRHSInFirst->Profile(X1Id, Context,
+ /*Canonical=*/true);
+ PossibleXLHSInSecond->Profile(X2Id, Context,
+ /*Canonical=*/true);
+ IsUpdateExprFound = X1Id == X2Id;
+ if (IsUpdateExprFound) {
+ V = FirstBinOp->getLHS();
+ X = SecondBinOp->getLHS();
+ E = SecondBinOp->getRHS();
+ UE = nullptr;
+ IsXLHSInRHSPart = false;
+ IsPostfixUpdate = true;
+ } else {
+ ErrorFound = NotASpecificExpression;
+ ErrorLoc = FirstBinOp->getExprLoc();
+ ErrorRange = FirstBinOp->getSourceRange();
+ NoteLoc = SecondBinOp->getLHS()->getExprLoc();
+ NoteRange = SecondBinOp->getRHS()->getSourceRange();
+ }
+ }
+ }
+ }
+ }
+ } else {
+ NoteLoc = ErrorLoc = Body->getLocStart();
+ NoteRange = ErrorRange =
+ SourceRange(Body->getLocStart(), Body->getLocStart());
+ ErrorFound = NotTwoSubstatements;
+ }
+ } else {
+ NoteLoc = ErrorLoc = Body->getLocStart();
+ NoteRange = ErrorRange =
+ SourceRange(Body->getLocStart(), Body->getLocStart());
+ ErrorFound = NotACompoundStatement;
+ }
+ if (ErrorFound != NoError) {
+ Diag(ErrorLoc, diag::err_omp_atomic_capture_not_compound_statement)
+ << ErrorRange;
+ Diag(NoteLoc, diag::note_omp_atomic_capture) << ErrorFound << NoteRange;
+ return StmtError();
+ } else if (CurContext->isDependentContext()) {
+ UE = V = E = X = nullptr;
+ }
+ }
+ }
+
+ getCurFunction()->setHasBranchProtectedScope();
+
+ return OMPAtomicDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
+ X, V, E, UE, IsXLHSInRHSPart,
+ IsPostfixUpdate);
+}
+
+StmtResult Sema::ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (!AStmt)
+ return StmtError();
+
+ CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+
+ // OpenMP [2.16, Nesting of Regions]
+ // If specified, a teams construct must be contained within a target
+ // construct. That target construct must contain no statements or directives
+ // outside of the teams construct.
+ if (DSAStack->hasInnerTeamsRegion()) {
+ auto S = AStmt->IgnoreContainers(/*IgnoreCaptured*/ true);
+ bool OMPTeamsFound = true;
+ if (auto *CS = dyn_cast<CompoundStmt>(S)) {
+ auto I = CS->body_begin();
+ while (I != CS->body_end()) {
+ auto OED = dyn_cast<OMPExecutableDirective>(*I);
+ if (!OED || !isOpenMPTeamsDirective(OED->getDirectiveKind())) {
+ OMPTeamsFound = false;
+ break;
+ }
+ ++I;
+ }
+ assert(I != CS->body_end() && "Not found statement");
+ S = *I;
+ }
+ if (!OMPTeamsFound) {
+ Diag(StartLoc, diag::err_omp_target_contains_not_only_teams);
+ Diag(DSAStack->getInnerTeamsRegionLoc(),
+ diag::note_omp_nested_teams_construct_here);
+ Diag(S->getLocStart(), diag::note_omp_nested_statement_here)
+ << isa<OMPExecutableDirective>(S);
+ return StmtError();
+ }
+ }
+
+ getCurFunction()->setHasBranchProtectedScope();
+
+ return OMPTargetDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt);
+}
+
+StmtResult Sema::ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+
+ getCurFunction()->setHasBranchProtectedScope();
+
+ return OMPTargetDataDirective::Create(Context, StartLoc, EndLoc, Clauses,
+ AStmt);
+}
+
+StmtResult Sema::ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (!AStmt)
+ return StmtError();
+
+ CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+
+ getCurFunction()->setHasBranchProtectedScope();
+
+ return OMPTeamsDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt);
+}
+
+StmtResult
+Sema::ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ OpenMPDirectiveKind CancelRegion) {
+ if (CancelRegion != OMPD_parallel && CancelRegion != OMPD_for &&
+ CancelRegion != OMPD_sections && CancelRegion != OMPD_taskgroup) {
+ Diag(StartLoc, diag::err_omp_wrong_cancel_region)
+ << getOpenMPDirectiveName(CancelRegion);
+ return StmtError();
+ }
+ if (DSAStack->isParentNowaitRegion()) {
+ Diag(StartLoc, diag::err_omp_parent_cancel_region_nowait) << 0;
+ return StmtError();
+ }
+ if (DSAStack->isParentOrderedRegion()) {
+ Diag(StartLoc, diag::err_omp_parent_cancel_region_ordered) << 0;
+ return StmtError();
+ }
+ return OMPCancellationPointDirective::Create(Context, StartLoc, EndLoc,
+ CancelRegion);
+}
+
+StmtResult Sema::ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ OpenMPDirectiveKind CancelRegion) {
+ if (CancelRegion != OMPD_parallel && CancelRegion != OMPD_for &&
+ CancelRegion != OMPD_sections && CancelRegion != OMPD_taskgroup) {
+ Diag(StartLoc, diag::err_omp_wrong_cancel_region)
+ << getOpenMPDirectiveName(CancelRegion);
+ return StmtError();
+ }
+ if (DSAStack->isParentNowaitRegion()) {
+ Diag(StartLoc, diag::err_omp_parent_cancel_region_nowait) << 1;
+ return StmtError();
+ }
+ if (DSAStack->isParentOrderedRegion()) {
+ Diag(StartLoc, diag::err_omp_parent_cancel_region_ordered) << 1;
+ return StmtError();
+ }
+ DSAStack->setParentCancelRegion(/*Cancel=*/true);
+ return OMPCancelDirective::Create(Context, StartLoc, EndLoc, Clauses,
+ CancelRegion);
+}
+
+static bool checkGrainsizeNumTasksClauses(Sema &S,
+ ArrayRef<OMPClause *> Clauses) {
+ OMPClause *PrevClause = nullptr;
+ bool ErrorFound = false;
+ for (auto *C : Clauses) {
+ if (C->getClauseKind() == OMPC_grainsize ||
+ C->getClauseKind() == OMPC_num_tasks) {
+ if (!PrevClause)
+ PrevClause = C;
+ else if (PrevClause->getClauseKind() != C->getClauseKind()) {
+ S.Diag(C->getLocStart(),
+ diag::err_omp_grainsize_num_tasks_mutually_exclusive)
+ << getOpenMPClauseName(C->getClauseKind())
+ << getOpenMPClauseName(PrevClause->getClauseKind());
+ S.Diag(PrevClause->getLocStart(),
+ diag::note_omp_previous_grainsize_num_tasks)
+ << getOpenMPClauseName(PrevClause->getClauseKind());
+ ErrorFound = true;
+ }
+ }
+ }
+ return ErrorFound;
+}
+
+StmtResult Sema::ActOnOpenMPTaskLoopDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse' or 'ordered' with number of loops, it will
+ // define the nested loops number.
+ unsigned NestedLoopCount =
+ CheckOpenMPLoop(OMPD_taskloop, getCollapseNumberExpr(Clauses),
+ /*OrderedLoopCountExpr=*/nullptr, AStmt, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp for loop exprs were not built");
+
+ // OpenMP, [2.9.2 taskloop Construct, Restrictions]
+ // The grainsize clause and num_tasks clause are mutually exclusive and may
+ // not appear on the same taskloop directive.
+ if (checkGrainsizeNumTasksClauses(*this, Clauses))
+ return StmtError();
+
+ getCurFunction()->setHasBranchProtectedScope();
+ return OMPTaskLoopDirective::Create(Context, StartLoc, EndLoc,
+ NestedLoopCount, Clauses, AStmt, B);
+}
+
+StmtResult Sema::ActOnOpenMPTaskLoopSimdDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse' or 'ordered' with number of loops, it will
+ // define the nested loops number.
+ unsigned NestedLoopCount =
+ CheckOpenMPLoop(OMPD_taskloop_simd, getCollapseNumberExpr(Clauses),
+ /*OrderedLoopCountExpr=*/nullptr, AStmt, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp for loop exprs were not built");
+
+ // OpenMP, [2.9.2 taskloop Construct, Restrictions]
+ // The grainsize clause and num_tasks clause are mutually exclusive and may
+ // not appear on the same taskloop directive.
+ if (checkGrainsizeNumTasksClauses(*this, Clauses))
+ return StmtError();
+
+ getCurFunction()->setHasBranchProtectedScope();
+ return OMPTaskLoopSimdDirective::Create(Context, StartLoc, EndLoc,
+ NestedLoopCount, Clauses, AStmt, B);
+}
+
+StmtResult Sema::ActOnOpenMPDistributeDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse' with number of loops, it will
+ // define the nested loops number.
+ unsigned NestedLoopCount =
+ CheckOpenMPLoop(OMPD_distribute, getCollapseNumberExpr(Clauses),
+ nullptr /*ordered not a clause on distribute*/, AStmt,
+ *this, *DSAStack, VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp for loop exprs were not built");
+
+ getCurFunction()->setHasBranchProtectedScope();
+ return OMPDistributeDirective::Create(Context, StartLoc, EndLoc,
+ NestedLoopCount, Clauses, AStmt, B);
+}
+
+OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ OMPClause *Res = nullptr;
+ switch (Kind) {
+ case OMPC_final:
+ Res = ActOnOpenMPFinalClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_num_threads:
+ Res = ActOnOpenMPNumThreadsClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_safelen:
+ Res = ActOnOpenMPSafelenClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_simdlen:
+ Res = ActOnOpenMPSimdlenClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_collapse:
+ Res = ActOnOpenMPCollapseClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_ordered:
+ Res = ActOnOpenMPOrderedClause(StartLoc, EndLoc, LParenLoc, Expr);
+ break;
+ case OMPC_device:
+ Res = ActOnOpenMPDeviceClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_num_teams:
+ Res = ActOnOpenMPNumTeamsClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_thread_limit:
+ Res = ActOnOpenMPThreadLimitClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_priority:
+ Res = ActOnOpenMPPriorityClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_grainsize:
+ Res = ActOnOpenMPGrainsizeClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_num_tasks:
+ Res = ActOnOpenMPNumTasksClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_hint:
+ Res = ActOnOpenMPHintClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_if:
+ case OMPC_default:
+ case OMPC_proc_bind:
+ case OMPC_schedule:
+ case OMPC_private:
+ case OMPC_firstprivate:
+ case OMPC_lastprivate:
+ case OMPC_shared:
+ case OMPC_reduction:
+ case OMPC_linear:
+ case OMPC_aligned:
+ case OMPC_copyin:
+ case OMPC_copyprivate:
+ case OMPC_nowait:
+ case OMPC_untied:
+ case OMPC_mergeable:
+ case OMPC_threadprivate:
+ case OMPC_flush:
+ case OMPC_read:
+ case OMPC_write:
+ case OMPC_update:
+ case OMPC_capture:
+ case OMPC_seq_cst:
+ case OMPC_depend:
+ case OMPC_threads:
+ case OMPC_simd:
+ case OMPC_map:
+ case OMPC_nogroup:
+ case OMPC_unknown:
+ llvm_unreachable("Clause is not allowed.");
+ }
+ return Res;
+}
+
+OMPClause *Sema::ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
+ Expr *Condition, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation NameModifierLoc,
+ SourceLocation ColonLoc,
+ SourceLocation EndLoc) {
+ Expr *ValExpr = Condition;
+ if (!Condition->isValueDependent() && !Condition->isTypeDependent() &&
+ !Condition->isInstantiationDependent() &&
+ !Condition->containsUnexpandedParameterPack()) {
+ ExprResult Val = ActOnBooleanCondition(DSAStack->getCurScope(),
+ Condition->getExprLoc(), Condition);
+ if (Val.isInvalid())
+ return nullptr;
+
+ ValExpr = Val.get();
+ }
+
+ return new (Context) OMPIfClause(NameModifier, ValExpr, StartLoc, LParenLoc,
+ NameModifierLoc, ColonLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPFinalClause(Expr *Condition,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ Expr *ValExpr = Condition;
+ if (!Condition->isValueDependent() && !Condition->isTypeDependent() &&
+ !Condition->isInstantiationDependent() &&
+ !Condition->containsUnexpandedParameterPack()) {
+ ExprResult Val = ActOnBooleanCondition(DSAStack->getCurScope(),
+ Condition->getExprLoc(), Condition);
+ if (Val.isInvalid())
+ return nullptr;
+
+ ValExpr = Val.get();
+ }
+
+ return new (Context) OMPFinalClause(ValExpr, StartLoc, LParenLoc, EndLoc);
+}
+ExprResult Sema::PerformOpenMPImplicitIntegerConversion(SourceLocation Loc,
+ Expr *Op) {
+ if (!Op)
+ return ExprError();
+
+ class IntConvertDiagnoser : public ICEConvertDiagnoser {
+ public:
+ IntConvertDiagnoser()
+ : ICEConvertDiagnoser(/*AllowScopedEnumerations*/ false, false, true) {}
+ SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc,
+ QualType T) override {
+ return S.Diag(Loc, diag::err_omp_not_integral) << T;
+ }
+ SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc,
+ QualType T) override {
+ return S.Diag(Loc, diag::err_omp_incomplete_type) << T;
+ }
+ SemaDiagnosticBuilder diagnoseExplicitConv(Sema &S, SourceLocation Loc,
+ QualType T,
+ QualType ConvTy) override {
+ return S.Diag(Loc, diag::err_omp_explicit_conversion) << T << ConvTy;
+ }
+ SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv,
+ QualType ConvTy) override {
+ return S.Diag(Conv->getLocation(), diag::note_omp_conversion_here)
+ << ConvTy->isEnumeralType() << ConvTy;
+ }
+ SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc,
+ QualType T) override {
+ return S.Diag(Loc, diag::err_omp_ambiguous_conversion) << T;
+ }
+ SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv,
+ QualType ConvTy) override {
+ return S.Diag(Conv->getLocation(), diag::note_omp_conversion_here)
+ << ConvTy->isEnumeralType() << ConvTy;
+ }
+ SemaDiagnosticBuilder diagnoseConversion(Sema &, SourceLocation, QualType,
+ QualType) override {
+ llvm_unreachable("conversion functions are permitted");
+ }
+ } ConvertDiagnoser;
+ return PerformContextualImplicitConversion(Loc, Op, ConvertDiagnoser);
+}
+
+static bool IsNonNegativeIntegerValue(Expr *&ValExpr, Sema &SemaRef,
+ OpenMPClauseKind CKind,
+ bool StrictlyPositive) {
+ if (!ValExpr->isTypeDependent() && !ValExpr->isValueDependent() &&
+ !ValExpr->isInstantiationDependent()) {
+ SourceLocation Loc = ValExpr->getExprLoc();
+ ExprResult Value =
+ SemaRef.PerformOpenMPImplicitIntegerConversion(Loc, ValExpr);
+ if (Value.isInvalid())
+ return false;
+
+ ValExpr = Value.get();
+ // The expression must evaluate to a non-negative integer value.
+ llvm::APSInt Result;
+ if (ValExpr->isIntegerConstantExpr(Result, SemaRef.Context) &&
+ Result.isSigned() &&
+ !((!StrictlyPositive && Result.isNonNegative()) ||
+ (StrictlyPositive && Result.isStrictlyPositive()))) {
+ SemaRef.Diag(Loc, diag::err_omp_negative_expression_in_clause)
+ << getOpenMPClauseName(CKind) << (StrictlyPositive ? 1 : 0)
+ << ValExpr->getSourceRange();
+ return false;
+ }
+ }
+ return true;
+}
+
+OMPClause *Sema::ActOnOpenMPNumThreadsClause(Expr *NumThreads,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ Expr *ValExpr = NumThreads;
+
+ // OpenMP [2.5, Restrictions]
+ // The num_threads expression must evaluate to a positive integer value.
+ if (!IsNonNegativeIntegerValue(ValExpr, *this, OMPC_num_threads,
+ /*StrictlyPositive=*/true))
+ return nullptr;
+
+ return new (Context)
+ OMPNumThreadsClause(ValExpr, StartLoc, LParenLoc, EndLoc);
+}
+
+ExprResult Sema::VerifyPositiveIntegerConstantInClause(Expr *E,
+ OpenMPClauseKind CKind,
+ bool StrictlyPositive) {
+ if (!E)
+ return ExprError();
+ if (E->isValueDependent() || E->isTypeDependent() ||
+ E->isInstantiationDependent() || E->containsUnexpandedParameterPack())
+ return E;
+ llvm::APSInt Result;
+ ExprResult ICE = VerifyIntegerConstantExpression(E, &Result);
+ if (ICE.isInvalid())
+ return ExprError();
+ if ((StrictlyPositive && !Result.isStrictlyPositive()) ||
+ (!StrictlyPositive && !Result.isNonNegative())) {
+ Diag(E->getExprLoc(), diag::err_omp_negative_expression_in_clause)
+ << getOpenMPClauseName(CKind) << (StrictlyPositive ? 1 : 0)
+ << E->getSourceRange();
+ return ExprError();
+ }
+ if (CKind == OMPC_aligned && !Result.isPowerOf2()) {
+ Diag(E->getExprLoc(), diag::warn_omp_alignment_not_power_of_two)
+ << E->getSourceRange();
+ return ExprError();
+ }
+ if (CKind == OMPC_collapse && DSAStack->getAssociatedLoops() == 1)
+ DSAStack->setAssociatedLoops(Result.getExtValue());
+ else if (CKind == OMPC_ordered)
+ DSAStack->setAssociatedLoops(Result.getExtValue());
+ return ICE;
+}
+
+OMPClause *Sema::ActOnOpenMPSafelenClause(Expr *Len, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ // OpenMP [2.8.1, simd construct, Description]
+ // The parameter of the safelen clause must be a constant
+ // positive integer expression.
+ ExprResult Safelen = VerifyPositiveIntegerConstantInClause(Len, OMPC_safelen);
+ if (Safelen.isInvalid())
+ return nullptr;
+ return new (Context)
+ OMPSafelenClause(Safelen.get(), StartLoc, LParenLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPSimdlenClause(Expr *Len, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ // OpenMP [2.8.1, simd construct, Description]
+ // The parameter of the simdlen clause must be a constant
+ // positive integer expression.
+ ExprResult Simdlen = VerifyPositiveIntegerConstantInClause(Len, OMPC_simdlen);
+ if (Simdlen.isInvalid())
+ return nullptr;
+ return new (Context)
+ OMPSimdlenClause(Simdlen.get(), StartLoc, LParenLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPCollapseClause(Expr *NumForLoops,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ // OpenMP [2.7.1, loop construct, Description]
+ // OpenMP [2.8.1, simd construct, Description]
+ // OpenMP [2.9.6, distribute construct, Description]
+ // The parameter of the collapse clause must be a constant
+ // positive integer expression.
+ ExprResult NumForLoopsResult =
+ VerifyPositiveIntegerConstantInClause(NumForLoops, OMPC_collapse);
+ if (NumForLoopsResult.isInvalid())
+ return nullptr;
+ return new (Context)
+ OMPCollapseClause(NumForLoopsResult.get(), StartLoc, LParenLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPOrderedClause(SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ SourceLocation LParenLoc,
+ Expr *NumForLoops) {
+ // OpenMP [2.7.1, loop construct, Description]
+ // OpenMP [2.8.1, simd construct, Description]
+ // OpenMP [2.9.6, distribute construct, Description]
+ // The parameter of the ordered clause must be a constant
+ // positive integer expression if any.
+ if (NumForLoops && LParenLoc.isValid()) {
+ ExprResult NumForLoopsResult =
+ VerifyPositiveIntegerConstantInClause(NumForLoops, OMPC_ordered);
+ if (NumForLoopsResult.isInvalid())
+ return nullptr;
+ NumForLoops = NumForLoopsResult.get();
+ } else
+ NumForLoops = nullptr;
+ DSAStack->setOrderedRegion(/*IsOrdered=*/true, NumForLoops);
+ return new (Context)
+ OMPOrderedClause(NumForLoops, StartLoc, LParenLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPSimpleClause(
+ OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc,
+ SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) {
+ OMPClause *Res = nullptr;
+ switch (Kind) {
+ case OMPC_default:
+ Res =
+ ActOnOpenMPDefaultClause(static_cast<OpenMPDefaultClauseKind>(Argument),
+ ArgumentLoc, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_proc_bind:
+ Res = ActOnOpenMPProcBindClause(
+ static_cast<OpenMPProcBindClauseKind>(Argument), ArgumentLoc, StartLoc,
+ LParenLoc, EndLoc);
+ break;
+ case OMPC_if:
+ case OMPC_final:
+ case OMPC_num_threads:
+ case OMPC_safelen:
+ case OMPC_simdlen:
+ case OMPC_collapse:
+ case OMPC_schedule:
+ case OMPC_private:
+ case OMPC_firstprivate:
+ case OMPC_lastprivate:
+ case OMPC_shared:
+ case OMPC_reduction:
+ case OMPC_linear:
+ case OMPC_aligned:
+ case OMPC_copyin:
+ case OMPC_copyprivate:
+ case OMPC_ordered:
+ case OMPC_nowait:
+ case OMPC_untied:
+ case OMPC_mergeable:
+ case OMPC_threadprivate:
+ case OMPC_flush:
+ case OMPC_read:
+ case OMPC_write:
+ case OMPC_update:
+ case OMPC_capture:
+ case OMPC_seq_cst:
+ case OMPC_depend:
+ case OMPC_device:
+ case OMPC_threads:
+ case OMPC_simd:
+ case OMPC_map:
+ case OMPC_num_teams:
+ case OMPC_thread_limit:
+ case OMPC_priority:
+ case OMPC_grainsize:
+ case OMPC_nogroup:
+ case OMPC_num_tasks:
+ case OMPC_hint:
+ case OMPC_unknown:
+ llvm_unreachable("Clause is not allowed.");
+ }
+ return Res;
+}
+
+static std::string
+getListOfPossibleValues(OpenMPClauseKind K, unsigned First, unsigned Last,
+ ArrayRef<unsigned> Exclude = llvm::None) {
+ std::string Values;
+ unsigned Bound = Last >= 2 ? Last - 2 : 0;
+ unsigned Skipped = Exclude.size();
+ auto S = Exclude.begin(), E = Exclude.end();
+ for (unsigned i = First; i < Last; ++i) {
+ if (std::find(S, E, i) != E) {
+ --Skipped;
+ continue;
+ }
+ Values += "'";
+ Values += getOpenMPSimpleClauseTypeName(K, i);
+ Values += "'";
+ if (i == Bound - Skipped)
+ Values += " or ";
+ else if (i != Bound + 1 - Skipped)
+ Values += ", ";
+ }
+ return Values;
+}
+
+OMPClause *Sema::ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
+ SourceLocation KindKwLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ if (Kind == OMPC_DEFAULT_unknown) {
+ static_assert(OMPC_DEFAULT_unknown > 0,
+ "OMPC_DEFAULT_unknown not greater than 0");
+ Diag(KindKwLoc, diag::err_omp_unexpected_clause_value)
+ << getListOfPossibleValues(OMPC_default, /*First=*/0,
+ /*Last=*/OMPC_DEFAULT_unknown)
+ << getOpenMPClauseName(OMPC_default);
+ return nullptr;
+ }
+ switch (Kind) {
+ case OMPC_DEFAULT_none:
+ DSAStack->setDefaultDSANone(KindKwLoc);
+ break;
+ case OMPC_DEFAULT_shared:
+ DSAStack->setDefaultDSAShared(KindKwLoc);
+ break;
+ case OMPC_DEFAULT_unknown:
+ llvm_unreachable("Clause kind is not allowed.");
+ break;
+ }
+ return new (Context)
+ OMPDefaultClause(Kind, KindKwLoc, StartLoc, LParenLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
+ SourceLocation KindKwLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ if (Kind == OMPC_PROC_BIND_unknown) {
+ Diag(KindKwLoc, diag::err_omp_unexpected_clause_value)
+ << getListOfPossibleValues(OMPC_proc_bind, /*First=*/0,
+ /*Last=*/OMPC_PROC_BIND_unknown)
+ << getOpenMPClauseName(OMPC_proc_bind);
+ return nullptr;
+ }
+ return new (Context)
+ OMPProcBindClause(Kind, KindKwLoc, StartLoc, LParenLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
+ OpenMPClauseKind Kind, ArrayRef<unsigned> Argument, Expr *Expr,
+ SourceLocation StartLoc, SourceLocation LParenLoc,
+ ArrayRef<SourceLocation> ArgumentLoc, SourceLocation DelimLoc,
+ SourceLocation EndLoc) {
+ OMPClause *Res = nullptr;
+ switch (Kind) {
+ case OMPC_schedule:
+ enum { Modifier1, Modifier2, ScheduleKind, NumberOfElements };
+ assert(Argument.size() == NumberOfElements &&
+ ArgumentLoc.size() == NumberOfElements);
+ Res = ActOnOpenMPScheduleClause(
+ static_cast<OpenMPScheduleClauseModifier>(Argument[Modifier1]),
+ static_cast<OpenMPScheduleClauseModifier>(Argument[Modifier2]),
+ static_cast<OpenMPScheduleClauseKind>(Argument[ScheduleKind]), Expr,
+ StartLoc, LParenLoc, ArgumentLoc[Modifier1], ArgumentLoc[Modifier2],
+ ArgumentLoc[ScheduleKind], DelimLoc, EndLoc);
+ break;
+ case OMPC_if:
+ assert(Argument.size() == 1 && ArgumentLoc.size() == 1);
+ Res = ActOnOpenMPIfClause(static_cast<OpenMPDirectiveKind>(Argument.back()),
+ Expr, StartLoc, LParenLoc, ArgumentLoc.back(),
+ DelimLoc, EndLoc);
+ break;
+ case OMPC_final:
+ case OMPC_num_threads:
+ case OMPC_safelen:
+ case OMPC_simdlen:
+ case OMPC_collapse:
+ case OMPC_default:
+ case OMPC_proc_bind:
+ case OMPC_private:
+ case OMPC_firstprivate:
+ case OMPC_lastprivate:
+ case OMPC_shared:
+ case OMPC_reduction:
+ case OMPC_linear:
+ case OMPC_aligned:
+ case OMPC_copyin:
+ case OMPC_copyprivate:
+ case OMPC_ordered:
+ case OMPC_nowait:
+ case OMPC_untied:
+ case OMPC_mergeable:
+ case OMPC_threadprivate:
+ case OMPC_flush:
+ case OMPC_read:
+ case OMPC_write:
+ case OMPC_update:
+ case OMPC_capture:
+ case OMPC_seq_cst:
+ case OMPC_depend:
+ case OMPC_device:
+ case OMPC_threads:
+ case OMPC_simd:
+ case OMPC_map:
+ case OMPC_num_teams:
+ case OMPC_thread_limit:
+ case OMPC_priority:
+ case OMPC_grainsize:
+ case OMPC_nogroup:
+ case OMPC_num_tasks:
+ case OMPC_hint:
+ case OMPC_unknown:
+ llvm_unreachable("Clause is not allowed.");
+ }
+ return Res;
+}
+
+static bool checkScheduleModifiers(Sema &S, OpenMPScheduleClauseModifier M1,
+ OpenMPScheduleClauseModifier M2,
+ SourceLocation M1Loc, SourceLocation M2Loc) {
+ if (M1 == OMPC_SCHEDULE_MODIFIER_unknown && M1Loc.isValid()) {
+ SmallVector<unsigned, 2> Excluded;
+ if (M2 != OMPC_SCHEDULE_MODIFIER_unknown)
+ Excluded.push_back(M2);
+ if (M2 == OMPC_SCHEDULE_MODIFIER_nonmonotonic)
+ Excluded.push_back(OMPC_SCHEDULE_MODIFIER_monotonic);
+ if (M2 == OMPC_SCHEDULE_MODIFIER_monotonic)
+ Excluded.push_back(OMPC_SCHEDULE_MODIFIER_nonmonotonic);
+ S.Diag(M1Loc, diag::err_omp_unexpected_clause_value)
+ << getListOfPossibleValues(OMPC_schedule,
+ /*First=*/OMPC_SCHEDULE_MODIFIER_unknown + 1,
+ /*Last=*/OMPC_SCHEDULE_MODIFIER_last,
+ Excluded)
+ << getOpenMPClauseName(OMPC_schedule);
+ return true;
+ }
+ return false;
+}
+
+OMPClause *Sema::ActOnOpenMPScheduleClause(
+ OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
+ OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
+ SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc) {
+ if (checkScheduleModifiers(*this, M1, M2, M1Loc, M2Loc) ||
+ checkScheduleModifiers(*this, M2, M1, M2Loc, M1Loc))
+ return nullptr;
+ // OpenMP, 2.7.1, Loop Construct, Restrictions
+ // Either the monotonic modifier or the nonmonotonic modifier can be specified
+ // but not both.
+ if ((M1 == M2 && M1 != OMPC_SCHEDULE_MODIFIER_unknown) ||
+ (M1 == OMPC_SCHEDULE_MODIFIER_monotonic &&
+ M2 == OMPC_SCHEDULE_MODIFIER_nonmonotonic) ||
+ (M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic &&
+ M2 == OMPC_SCHEDULE_MODIFIER_monotonic)) {
+ Diag(M2Loc, diag::err_omp_unexpected_schedule_modifier)
+ << getOpenMPSimpleClauseTypeName(OMPC_schedule, M2)
+ << getOpenMPSimpleClauseTypeName(OMPC_schedule, M1);
+ return nullptr;
+ }
+ if (Kind == OMPC_SCHEDULE_unknown) {
+ std::string Values;
+ if (M1Loc.isInvalid() && M2Loc.isInvalid()) {
+ unsigned Exclude[] = {OMPC_SCHEDULE_unknown};
+ Values = getListOfPossibleValues(OMPC_schedule, /*First=*/0,
+ /*Last=*/OMPC_SCHEDULE_MODIFIER_last,
+ Exclude);
+ } else {
+ Values = getListOfPossibleValues(OMPC_schedule, /*First=*/0,
+ /*Last=*/OMPC_SCHEDULE_unknown);
+ }
+ Diag(KindLoc, diag::err_omp_unexpected_clause_value)
+ << Values << getOpenMPClauseName(OMPC_schedule);
+ return nullptr;
+ }
+ // OpenMP, 2.7.1, Loop Construct, Restrictions
+ // The nonmonotonic modifier can only be specified with schedule(dynamic) or
+ // schedule(guided).
+ if ((M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
+ M2 == OMPC_SCHEDULE_MODIFIER_nonmonotonic) &&
+ Kind != OMPC_SCHEDULE_dynamic && Kind != OMPC_SCHEDULE_guided) {
+ Diag(M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic ? M1Loc : M2Loc,
+ diag::err_omp_schedule_nonmonotonic_static);
+ return nullptr;
+ }
+ Expr *ValExpr = ChunkSize;
+ Expr *HelperValExpr = nullptr;
+ if (ChunkSize) {
+ if (!ChunkSize->isValueDependent() && !ChunkSize->isTypeDependent() &&
+ !ChunkSize->isInstantiationDependent() &&
+ !ChunkSize->containsUnexpandedParameterPack()) {
+ SourceLocation ChunkSizeLoc = ChunkSize->getLocStart();
+ ExprResult Val =
+ PerformOpenMPImplicitIntegerConversion(ChunkSizeLoc, ChunkSize);
+ if (Val.isInvalid())
+ return nullptr;
+
+ ValExpr = Val.get();
+
+ // OpenMP [2.7.1, Restrictions]
+ // chunk_size must be a loop invariant integer expression with a positive
+ // value.
+ llvm::APSInt Result;
+ if (ValExpr->isIntegerConstantExpr(Result, Context)) {
+ if (Result.isSigned() && !Result.isStrictlyPositive()) {
+ Diag(ChunkSizeLoc, diag::err_omp_negative_expression_in_clause)
+ << "schedule" << 1 << ChunkSize->getSourceRange();
+ return nullptr;
+ }
+ } else if (isParallelOrTaskRegion(DSAStack->getCurrentDirective())) {
+ auto *ImpVar = buildVarDecl(*this, ChunkSize->getExprLoc(),
+ ChunkSize->getType(), ".chunk.");
+ auto *ImpVarRef = buildDeclRefExpr(*this, ImpVar, ChunkSize->getType(),
+ ChunkSize->getExprLoc(),
+ /*RefersToCapture=*/true);
+ HelperValExpr = ImpVarRef;
+ }
+ }
+ }
+
+ return new (Context)
+ OMPScheduleClause(StartLoc, LParenLoc, KindLoc, CommaLoc, EndLoc, Kind,
+ ValExpr, HelperValExpr, M1, M1Loc, M2, M2Loc);
+}
+
+OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ OMPClause *Res = nullptr;
+ switch (Kind) {
+ case OMPC_ordered:
+ Res = ActOnOpenMPOrderedClause(StartLoc, EndLoc);
+ break;
+ case OMPC_nowait:
+ Res = ActOnOpenMPNowaitClause(StartLoc, EndLoc);
+ break;
+ case OMPC_untied:
+ Res = ActOnOpenMPUntiedClause(StartLoc, EndLoc);
+ break;
+ case OMPC_mergeable:
+ Res = ActOnOpenMPMergeableClause(StartLoc, EndLoc);
+ break;
+ case OMPC_read:
+ Res = ActOnOpenMPReadClause(StartLoc, EndLoc);
+ break;
+ case OMPC_write:
+ Res = ActOnOpenMPWriteClause(StartLoc, EndLoc);
+ break;
+ case OMPC_update:
+ Res = ActOnOpenMPUpdateClause(StartLoc, EndLoc);
+ break;
+ case OMPC_capture:
+ Res = ActOnOpenMPCaptureClause(StartLoc, EndLoc);
+ break;
+ case OMPC_seq_cst:
+ Res = ActOnOpenMPSeqCstClause(StartLoc, EndLoc);
+ break;
+ case OMPC_threads:
+ Res = ActOnOpenMPThreadsClause(StartLoc, EndLoc);
+ break;
+ case OMPC_simd:
+ Res = ActOnOpenMPSIMDClause(StartLoc, EndLoc);
+ break;
+ case OMPC_nogroup:
+ Res = ActOnOpenMPNogroupClause(StartLoc, EndLoc);
+ break;
+ case OMPC_if:
+ case OMPC_final:
+ case OMPC_num_threads:
+ case OMPC_safelen:
+ case OMPC_simdlen:
+ case OMPC_collapse:
+ case OMPC_schedule:
+ case OMPC_private:
+ case OMPC_firstprivate:
+ case OMPC_lastprivate:
+ case OMPC_shared:
+ case OMPC_reduction:
+ case OMPC_linear:
+ case OMPC_aligned:
+ case OMPC_copyin:
+ case OMPC_copyprivate:
+ case OMPC_default:
+ case OMPC_proc_bind:
+ case OMPC_threadprivate:
+ case OMPC_flush:
+ case OMPC_depend:
+ case OMPC_device:
+ case OMPC_map:
+ case OMPC_num_teams:
+ case OMPC_thread_limit:
+ case OMPC_priority:
+ case OMPC_grainsize:
+ case OMPC_num_tasks:
+ case OMPC_hint:
+ case OMPC_unknown:
+ llvm_unreachable("Clause is not allowed.");
+ }
+ return Res;
+}
+
+OMPClause *Sema::ActOnOpenMPNowaitClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ DSAStack->setNowaitRegion();
+ return new (Context) OMPNowaitClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPUntiedClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPUntiedClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPMergeableClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPMergeableClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPReadClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPReadClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPWriteClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPWriteClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPUpdateClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPUpdateClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPCaptureClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPCaptureClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPSeqCstClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPThreadsClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPThreadsClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPSIMDClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPSIMDClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPNogroupClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPNogroupClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPVarListClause(
+ OpenMPClauseKind Kind, ArrayRef<Expr *> VarList, Expr *TailExpr,
+ SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc,
+ SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
+ const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind,
+ OpenMPLinearClauseKind LinKind, OpenMPMapClauseKind MapTypeModifier,
+ OpenMPMapClauseKind MapType, SourceLocation DepLinMapLoc) {
+ OMPClause *Res = nullptr;
+ switch (Kind) {
+ case OMPC_private:
+ Res = ActOnOpenMPPrivateClause(VarList, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_firstprivate:
+ Res = ActOnOpenMPFirstprivateClause(VarList, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_lastprivate:
+ Res = ActOnOpenMPLastprivateClause(VarList, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_shared:
+ Res = ActOnOpenMPSharedClause(VarList, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_reduction:
+ Res = ActOnOpenMPReductionClause(VarList, StartLoc, LParenLoc, ColonLoc,
+ EndLoc, ReductionIdScopeSpec, ReductionId);
+ break;
+ case OMPC_linear:
+ Res = ActOnOpenMPLinearClause(VarList, TailExpr, StartLoc, LParenLoc,
+ LinKind, DepLinMapLoc, ColonLoc, EndLoc);
+ break;
+ case OMPC_aligned:
+ Res = ActOnOpenMPAlignedClause(VarList, TailExpr, StartLoc, LParenLoc,
+ ColonLoc, EndLoc);
+ break;
+ case OMPC_copyin:
+ Res = ActOnOpenMPCopyinClause(VarList, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_copyprivate:
+ Res = ActOnOpenMPCopyprivateClause(VarList, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_flush:
+ Res = ActOnOpenMPFlushClause(VarList, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_depend:
+ Res = ActOnOpenMPDependClause(DepKind, DepLinMapLoc, ColonLoc, VarList,
+ StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_map:
+ Res = ActOnOpenMPMapClause(MapTypeModifier, MapType, DepLinMapLoc, ColonLoc,
+ VarList, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_if:
+ case OMPC_final:
+ case OMPC_num_threads:
+ case OMPC_safelen:
+ case OMPC_simdlen:
+ case OMPC_collapse:
+ case OMPC_default:
+ case OMPC_proc_bind:
+ case OMPC_schedule:
+ case OMPC_ordered:
+ case OMPC_nowait:
+ case OMPC_untied:
+ case OMPC_mergeable:
+ case OMPC_threadprivate:
+ case OMPC_read:
+ case OMPC_write:
+ case OMPC_update:
+ case OMPC_capture:
+ case OMPC_seq_cst:
+ case OMPC_device:
+ case OMPC_threads:
+ case OMPC_simd:
+ case OMPC_num_teams:
+ case OMPC_thread_limit:
+ case OMPC_priority:
+ case OMPC_grainsize:
+ case OMPC_nogroup:
+ case OMPC_num_tasks:
+ case OMPC_hint:
+ case OMPC_unknown:
+ llvm_unreachable("Clause is not allowed.");
+ }
+ return Res;
+}
+
+OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ SmallVector<Expr *, 8> Vars;
+ SmallVector<Expr *, 8> PrivateCopies;
+ for (auto &RefExpr : VarList) {
+ assert(RefExpr && "NULL expr in OpenMP private clause.");
+ if (isa<DependentScopeDeclRefExpr>(RefExpr)) {
+ // It will be analyzed later.
+ Vars.push_back(RefExpr);
+ PrivateCopies.push_back(nullptr);
+ continue;
+ }
+
+ SourceLocation ELoc = RefExpr->getExprLoc();
+ // OpenMP [2.1, C/C++]
+ // A list item is a variable name.
+ // OpenMP [2.9.3.3, Restrictions, p.1]
+ // A variable that is part of another variable (as an array or
+ // structure element) cannot appear in a private clause.
+ DeclRefExpr *DE = dyn_cast_or_null<DeclRefExpr>(RefExpr);
+ if (!DE || !isa<VarDecl>(DE->getDecl())) {
+ Diag(ELoc, diag::err_omp_expected_var_name) << RefExpr->getSourceRange();
+ continue;
+ }
+ Decl *D = DE->getDecl();
+ VarDecl *VD = cast<VarDecl>(D);
+
+ QualType Type = VD->getType();
+ if (Type->isDependentType() || Type->isInstantiationDependentType()) {
+ // It will be analyzed later.
+ Vars.push_back(DE);
+ PrivateCopies.push_back(nullptr);
+ continue;
+ }
+
+ // OpenMP [2.9.3.3, Restrictions, C/C++, p.3]
+ // A variable that appears in a private clause must not have an incomplete
+ // type or a reference type.
+ if (RequireCompleteType(ELoc, Type,
+ diag::err_omp_private_incomplete_type)) {
+ continue;
+ }
+ Type = Type.getNonReferenceType();
+
+ // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
+ // in a Construct]
+ // Variables with the predetermined data-sharing attributes may not be
+ // listed in data-sharing attributes clauses, except for the cases
+ // listed below. For these exceptions only, listing a predetermined
+ // variable in a data-sharing attribute clause is allowed and overrides
+ // the variable's predetermined data-sharing attributes.
+ DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(VD, false);
+ if (DVar.CKind != OMPC_unknown && DVar.CKind != OMPC_private) {
+ Diag(ELoc, diag::err_omp_wrong_dsa) << getOpenMPClauseName(DVar.CKind)
+ << getOpenMPClauseName(OMPC_private);
+ ReportOriginalDSA(*this, DSAStack, VD, DVar);
+ continue;
+ }
+
+ // Variably modified types are not supported for tasks.
+ if (!Type->isAnyPointerType() && Type->isVariablyModifiedType() &&
+ DSAStack->getCurrentDirective() == OMPD_task) {
+ Diag(ELoc, diag::err_omp_variably_modified_type_not_supported)
+ << getOpenMPClauseName(OMPC_private) << Type
+ << getOpenMPDirectiveName(DSAStack->getCurrentDirective());
+ bool IsDecl =
+ VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
+ Diag(VD->getLocation(),
+ IsDecl ? diag::note_previous_decl : diag::note_defined_here)
+ << VD;
+ continue;
+ }
+
+ // OpenMP [2.9.3.3, Restrictions, C/C++, p.1]
+ // A variable of class type (or array thereof) that appears in a private
+ // clause requires an accessible, unambiguous default constructor for the
+ // class type.
+ // Generate helper private variable and initialize it with the default
+ // value. The address of the original variable is replaced by the address of
+ // the new private variable in CodeGen. This new variable is not added to
+ // IdResolver, so the code in the OpenMP region uses original variable for
+ // proper diagnostics.
+ Type = Type.getUnqualifiedType();
+ auto VDPrivate = buildVarDecl(*this, DE->getExprLoc(), Type, VD->getName(),
+ VD->hasAttrs() ? &VD->getAttrs() : nullptr);
+ ActOnUninitializedDecl(VDPrivate, /*TypeMayContainAuto=*/false);
+ if (VDPrivate->isInvalidDecl())
+ continue;
+ auto VDPrivateRefExpr = buildDeclRefExpr(
+ *this, VDPrivate, DE->getType().getUnqualifiedType(), DE->getExprLoc());
+
+ DSAStack->addDSA(VD, DE, OMPC_private);
+ Vars.push_back(DE);
+ PrivateCopies.push_back(VDPrivateRefExpr);
+ }
+
+ if (Vars.empty())
+ return nullptr;
+
+ return OMPPrivateClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars,
+ PrivateCopies);
+}
+
+namespace {
+class DiagsUninitializedSeveretyRAII {
+private:
+ DiagnosticsEngine &Diags;
+ SourceLocation SavedLoc;
+ bool IsIgnored;
+
+public:
+ DiagsUninitializedSeveretyRAII(DiagnosticsEngine &Diags, SourceLocation Loc,
+ bool IsIgnored)
+ : Diags(Diags), SavedLoc(Loc), IsIgnored(IsIgnored) {
+ if (!IsIgnored) {
+ Diags.setSeverity(/*Diag*/ diag::warn_uninit_self_reference_in_init,
+ /*Map*/ diag::Severity::Ignored, Loc);
+ }
+ }
+ ~DiagsUninitializedSeveretyRAII() {
+ if (!IsIgnored)
+ Diags.popMappings(SavedLoc);
+ }
+};
+}
+
+OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ SmallVector<Expr *, 8> Vars;
+ SmallVector<Expr *, 8> PrivateCopies;
+ SmallVector<Expr *, 8> Inits;
+ bool IsImplicitClause =
+ StartLoc.isInvalid() && LParenLoc.isInvalid() && EndLoc.isInvalid();
+ auto ImplicitClauseLoc = DSAStack->getConstructLoc();
+
+ for (auto &RefExpr : VarList) {
+ assert(RefExpr && "NULL expr in OpenMP firstprivate clause.");
+ if (isa<DependentScopeDeclRefExpr>(RefExpr)) {
+ // It will be analyzed later.
+ Vars.push_back(RefExpr);
+ PrivateCopies.push_back(nullptr);
+ Inits.push_back(nullptr);
+ continue;
+ }
+
+ SourceLocation ELoc =
+ IsImplicitClause ? ImplicitClauseLoc : RefExpr->getExprLoc();
+ // OpenMP [2.1, C/C++]
+ // A list item is a variable name.
+ // OpenMP [2.9.3.3, Restrictions, p.1]
+ // A variable that is part of another variable (as an array or
+ // structure element) cannot appear in a private clause.
+ DeclRefExpr *DE = dyn_cast_or_null<DeclRefExpr>(RefExpr);
+ if (!DE || !isa<VarDecl>(DE->getDecl())) {
+ Diag(ELoc, diag::err_omp_expected_var_name) << RefExpr->getSourceRange();
+ continue;
+ }
+ Decl *D = DE->getDecl();
+ VarDecl *VD = cast<VarDecl>(D);
+
+ QualType Type = VD->getType();
+ if (Type->isDependentType() || Type->isInstantiationDependentType()) {
+ // It will be analyzed later.
+ Vars.push_back(DE);
+ PrivateCopies.push_back(nullptr);
+ Inits.push_back(nullptr);
+ continue;
+ }
+
+ // OpenMP [2.9.3.3, Restrictions, C/C++, p.3]
+ // A variable that appears in a private clause must not have an incomplete
+ // type or a reference type.
+ if (RequireCompleteType(ELoc, Type,
+ diag::err_omp_firstprivate_incomplete_type)) {
+ continue;
+ }
+ Type = Type.getNonReferenceType();
+
+ // OpenMP [2.9.3.4, Restrictions, C/C++, p.1]
+ // A variable of class type (or array thereof) that appears in a private
+ // clause requires an accessible, unambiguous copy constructor for the
+ // class type.
+ auto ElemType = Context.getBaseElementType(Type).getNonReferenceType();
+
+ // If an implicit firstprivate variable found it was checked already.
+ if (!IsImplicitClause) {
+ DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(VD, false);
+ bool IsConstant = ElemType.isConstant(Context);
+ // OpenMP [2.4.13, Data-sharing Attribute Clauses]
+ // A list item that specifies a given variable may not appear in more
+ // than one clause on the same directive, except that a variable may be
+ // specified in both firstprivate and lastprivate clauses.
+ if (DVar.CKind != OMPC_unknown && DVar.CKind != OMPC_firstprivate &&
+ DVar.CKind != OMPC_lastprivate && DVar.RefExpr) {
+ Diag(ELoc, diag::err_omp_wrong_dsa)
+ << getOpenMPClauseName(DVar.CKind)
+ << getOpenMPClauseName(OMPC_firstprivate);
+ ReportOriginalDSA(*this, DSAStack, VD, DVar);
+ continue;
+ }
+
+ // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
+ // in a Construct]
+ // Variables with the predetermined data-sharing attributes may not be
+ // listed in data-sharing attributes clauses, except for the cases
+ // listed below. For these exceptions only, listing a predetermined
+ // variable in a data-sharing attribute clause is allowed and overrides
+ // the variable's predetermined data-sharing attributes.
+ // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
+ // in a Construct, C/C++, p.2]
+ // Variables with const-qualified type having no mutable member may be
+ // listed in a firstprivate clause, even if they are static data members.
+ if (!(IsConstant || VD->isStaticDataMember()) && !DVar.RefExpr &&
+ DVar.CKind != OMPC_unknown && DVar.CKind != OMPC_shared) {
+ Diag(ELoc, diag::err_omp_wrong_dsa)
+ << getOpenMPClauseName(DVar.CKind)
+ << getOpenMPClauseName(OMPC_firstprivate);
+ ReportOriginalDSA(*this, DSAStack, VD, DVar);
+ continue;
+ }
+
+ OpenMPDirectiveKind CurrDir = DSAStack->getCurrentDirective();
+ // OpenMP [2.9.3.4, Restrictions, p.2]
+ // A list item that is private within a parallel region must not appear
+ // in a firstprivate clause on a worksharing construct if any of the
+ // worksharing regions arising from the worksharing construct ever bind
+ // to any of the parallel regions arising from the parallel construct.
+ if (isOpenMPWorksharingDirective(CurrDir) &&
+ !isOpenMPParallelDirective(CurrDir)) {
+ DVar = DSAStack->getImplicitDSA(VD, true);
+ if (DVar.CKind != OMPC_shared &&
+ (isOpenMPParallelDirective(DVar.DKind) ||
+ DVar.DKind == OMPD_unknown)) {
+ Diag(ELoc, diag::err_omp_required_access)
+ << getOpenMPClauseName(OMPC_firstprivate)
+ << getOpenMPClauseName(OMPC_shared);
+ ReportOriginalDSA(*this, DSAStack, VD, DVar);
+ continue;
+ }
+ }
+ // OpenMP [2.9.3.4, Restrictions, p.3]
+ // A list item that appears in a reduction clause of a parallel construct
+ // must not appear in a firstprivate clause on a worksharing or task
+ // construct if any of the worksharing or task regions arising from the
+ // worksharing or task construct ever bind to any of the parallel regions
+ // arising from the parallel construct.
+ // OpenMP [2.9.3.4, Restrictions, p.4]
+ // A list item that appears in a reduction clause in worksharing
+ // construct must not appear in a firstprivate clause in a task construct
+ // encountered during execution of any of the worksharing regions arising
+ // from the worksharing construct.
+ if (CurrDir == OMPD_task) {
+ DVar =
+ DSAStack->hasInnermostDSA(VD, MatchesAnyClause(OMPC_reduction),
+ [](OpenMPDirectiveKind K) -> bool {
+ return isOpenMPParallelDirective(K) ||
+ isOpenMPWorksharingDirective(K);
+ },
+ false);
+ if (DVar.CKind == OMPC_reduction &&
+ (isOpenMPParallelDirective(DVar.DKind) ||
+ isOpenMPWorksharingDirective(DVar.DKind))) {
+ Diag(ELoc, diag::err_omp_parallel_reduction_in_task_firstprivate)
+ << getOpenMPDirectiveName(DVar.DKind);
+ ReportOriginalDSA(*this, DSAStack, VD, DVar);
+ continue;
+ }
+ }
+
+ // OpenMP 4.5 [2.15.3.4, Restrictions, p.3]
+ // A list item that is private within a teams region must not appear in a
+ // firstprivate clause on a distribute construct if any of the distribute
+ // regions arising from the distribute construct ever bind to any of the
+ // teams regions arising from the teams construct.
+ // OpenMP 4.5 [2.15.3.4, Restrictions, p.3]
+ // A list item that appears in a reduction clause of a teams construct
+ // must not appear in a firstprivate clause on a distribute construct if
+ // any of the distribute regions arising from the distribute construct
+ // ever bind to any of the teams regions arising from the teams construct.
+ // OpenMP 4.5 [2.10.8, Distribute Construct, p.3]
+ // A list item may appear in a firstprivate or lastprivate clause but not
+ // both.
+ if (CurrDir == OMPD_distribute) {
+ DVar = DSAStack->hasInnermostDSA(VD, MatchesAnyClause(OMPC_private),
+ [](OpenMPDirectiveKind K) -> bool {
+ return isOpenMPTeamsDirective(K);
+ },
+ false);
+ if (DVar.CKind == OMPC_private && isOpenMPTeamsDirective(DVar.DKind)) {
+ Diag(ELoc, diag::err_omp_firstprivate_distribute_private_teams);
+ ReportOriginalDSA(*this, DSAStack, VD, DVar);
+ continue;
+ }
+ DVar = DSAStack->hasInnermostDSA(VD, MatchesAnyClause(OMPC_reduction),
+ [](OpenMPDirectiveKind K) -> bool {
+ return isOpenMPTeamsDirective(K);
+ },
+ false);
+ if (DVar.CKind == OMPC_reduction &&
+ isOpenMPTeamsDirective(DVar.DKind)) {
+ Diag(ELoc, diag::err_omp_firstprivate_distribute_in_teams_reduction);
+ ReportOriginalDSA(*this, DSAStack, VD, DVar);
+ continue;
+ }
+ DVar = DSAStack->getTopDSA(VD, false);
+ if (DVar.CKind == OMPC_lastprivate) {
+ Diag(ELoc, diag::err_omp_firstprivate_and_lastprivate_in_distribute);
+ ReportOriginalDSA(*this, DSAStack, VD, DVar);
+ continue;
+ }
+ }
+ }
+
+ // Variably modified types are not supported for tasks.
+ if (!Type->isAnyPointerType() && Type->isVariablyModifiedType() &&
+ DSAStack->getCurrentDirective() == OMPD_task) {
+ Diag(ELoc, diag::err_omp_variably_modified_type_not_supported)
+ << getOpenMPClauseName(OMPC_firstprivate) << Type
+ << getOpenMPDirectiveName(DSAStack->getCurrentDirective());
+ bool IsDecl =
+ VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
+ Diag(VD->getLocation(),
+ IsDecl ? diag::note_previous_decl : diag::note_defined_here)
+ << VD;
+ continue;
+ }
+
+ Type = Type.getUnqualifiedType();
+ auto VDPrivate = buildVarDecl(*this, ELoc, Type, VD->getName(),
+ VD->hasAttrs() ? &VD->getAttrs() : nullptr);
+ // Generate helper private variable and initialize it with the value of the
+ // original variable. The address of the original variable is replaced by
+ // the address of the new private variable in the CodeGen. This new variable
+ // is not added to IdResolver, so the code in the OpenMP region uses
+ // original variable for proper diagnostics and variable capturing.
+ Expr *VDInitRefExpr = nullptr;
+ // For arrays generate initializer for single element and replace it by the
+ // original array element in CodeGen.
+ if (Type->isArrayType()) {
+ auto VDInit =
+ buildVarDecl(*this, DE->getExprLoc(), ElemType, VD->getName());
+ VDInitRefExpr = buildDeclRefExpr(*this, VDInit, ElemType, ELoc);
+ auto Init = DefaultLvalueConversion(VDInitRefExpr).get();
+ ElemType = ElemType.getUnqualifiedType();
+ auto *VDInitTemp = buildVarDecl(*this, DE->getLocStart(), ElemType,
+ ".firstprivate.temp");
+ InitializedEntity Entity =
+ InitializedEntity::InitializeVariable(VDInitTemp);
+ InitializationKind Kind = InitializationKind::CreateCopy(ELoc, ELoc);
+
+ InitializationSequence InitSeq(*this, Entity, Kind, Init);
+ ExprResult Result = InitSeq.Perform(*this, Entity, Kind, Init);
+ if (Result.isInvalid())
+ VDPrivate->setInvalidDecl();
+ else
+ VDPrivate->setInit(Result.getAs<Expr>());
+ // Remove temp variable declaration.
+ Context.Deallocate(VDInitTemp);
+ } else {
+ auto *VDInit =
+ buildVarDecl(*this, DE->getLocStart(), Type, ".firstprivate.temp");
+ VDInitRefExpr =
+ buildDeclRefExpr(*this, VDInit, DE->getType(), DE->getExprLoc());
+ AddInitializerToDecl(VDPrivate,
+ DefaultLvalueConversion(VDInitRefExpr).get(),
+ /*DirectInit=*/false, /*TypeMayContainAuto=*/false);
+ }
+ if (VDPrivate->isInvalidDecl()) {
+ if (IsImplicitClause) {
+ Diag(DE->getExprLoc(),
+ diag::note_omp_task_predetermined_firstprivate_here);
+ }
+ continue;
+ }
+ CurContext->addDecl(VDPrivate);
+ auto VDPrivateRefExpr = buildDeclRefExpr(
+ *this, VDPrivate, DE->getType().getUnqualifiedType(), DE->getExprLoc());
+ DSAStack->addDSA(VD, DE, OMPC_firstprivate);
+ Vars.push_back(DE);
+ PrivateCopies.push_back(VDPrivateRefExpr);
+ Inits.push_back(VDInitRefExpr);
+ }
+
+ if (Vars.empty())
+ return nullptr;
+
+ return OMPFirstprivateClause::Create(Context, StartLoc, LParenLoc, EndLoc,
+ Vars, PrivateCopies, Inits);
+}
+
+OMPClause *Sema::ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ SmallVector<Expr *, 8> Vars;
+ SmallVector<Expr *, 8> SrcExprs;
+ SmallVector<Expr *, 8> DstExprs;
+ SmallVector<Expr *, 8> AssignmentOps;
+ for (auto &RefExpr : VarList) {
+ assert(RefExpr && "NULL expr in OpenMP lastprivate clause.");
+ if (isa<DependentScopeDeclRefExpr>(RefExpr)) {
+ // It will be analyzed later.
+ Vars.push_back(RefExpr);
+ SrcExprs.push_back(nullptr);
+ DstExprs.push_back(nullptr);
+ AssignmentOps.push_back(nullptr);
+ continue;
+ }
+
+ SourceLocation ELoc = RefExpr->getExprLoc();
+ // OpenMP [2.1, C/C++]
+ // A list item is a variable name.
+ // OpenMP [2.14.3.5, Restrictions, p.1]
+ // A variable that is part of another variable (as an array or structure
+ // element) cannot appear in a lastprivate clause.
+ DeclRefExpr *DE = dyn_cast_or_null<DeclRefExpr>(RefExpr);
+ if (!DE || !isa<VarDecl>(DE->getDecl())) {
+ Diag(ELoc, diag::err_omp_expected_var_name) << RefExpr->getSourceRange();
+ continue;
+ }
+ Decl *D = DE->getDecl();
+ VarDecl *VD = cast<VarDecl>(D);
+
+ QualType Type = VD->getType();
+ if (Type->isDependentType() || Type->isInstantiationDependentType()) {
+ // It will be analyzed later.
+ Vars.push_back(DE);
+ SrcExprs.push_back(nullptr);
+ DstExprs.push_back(nullptr);
+ AssignmentOps.push_back(nullptr);
+ continue;
+ }
+
+ // OpenMP [2.14.3.5, Restrictions, C/C++, p.2]
+ // A variable that appears in a lastprivate clause must not have an
+ // incomplete type or a reference type.
+ if (RequireCompleteType(ELoc, Type,
+ diag::err_omp_lastprivate_incomplete_type)) {
+ continue;
+ }
+ Type = Type.getNonReferenceType();
+
+ // OpenMP [2.14.1.1, Data-sharing Attribute Rules for Variables Referenced
+ // in a Construct]
+ // Variables with the predetermined data-sharing attributes may not be
+ // listed in data-sharing attributes clauses, except for the cases
+ // listed below.
+ DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(VD, false);
+ if (DVar.CKind != OMPC_unknown && DVar.CKind != OMPC_lastprivate &&
+ DVar.CKind != OMPC_firstprivate &&
+ (DVar.CKind != OMPC_private || DVar.RefExpr != nullptr)) {
+ Diag(ELoc, diag::err_omp_wrong_dsa)
+ << getOpenMPClauseName(DVar.CKind)
+ << getOpenMPClauseName(OMPC_lastprivate);
+ ReportOriginalDSA(*this, DSAStack, VD, DVar);
+ continue;
+ }
+
+ OpenMPDirectiveKind CurrDir = DSAStack->getCurrentDirective();
+ // OpenMP [2.14.3.5, Restrictions, p.2]
+ // A list item that is private within a parallel region, or that appears in
+ // the reduction clause of a parallel construct, must not appear in a
+ // lastprivate clause on a worksharing construct if any of the corresponding
+ // worksharing regions ever binds to any of the corresponding parallel
+ // regions.
+ DSAStackTy::DSAVarData TopDVar = DVar;
+ if (isOpenMPWorksharingDirective(CurrDir) &&
+ !isOpenMPParallelDirective(CurrDir)) {
+ DVar = DSAStack->getImplicitDSA(VD, true);
+ if (DVar.CKind != OMPC_shared) {
+ Diag(ELoc, diag::err_omp_required_access)
+ << getOpenMPClauseName(OMPC_lastprivate)
+ << getOpenMPClauseName(OMPC_shared);
+ ReportOriginalDSA(*this, DSAStack, VD, DVar);
+ continue;
+ }
+ }
+ // OpenMP [2.14.3.5, Restrictions, C++, p.1,2]
+ // A variable of class type (or array thereof) that appears in a
+ // lastprivate clause requires an accessible, unambiguous default
+ // constructor for the class type, unless the list item is also specified
+ // in a firstprivate clause.
+ // A variable of class type (or array thereof) that appears in a
+ // lastprivate clause requires an accessible, unambiguous copy assignment
+ // operator for the class type.
+ Type = Context.getBaseElementType(Type).getNonReferenceType();
+ auto *SrcVD = buildVarDecl(*this, DE->getLocStart(),
+ Type.getUnqualifiedType(), ".lastprivate.src",
+ VD->hasAttrs() ? &VD->getAttrs() : nullptr);
+ auto *PseudoSrcExpr = buildDeclRefExpr(
+ *this, SrcVD, Type.getUnqualifiedType(), DE->getExprLoc());
+ auto *DstVD =
+ buildVarDecl(*this, DE->getLocStart(), Type, ".lastprivate.dst",
+ VD->hasAttrs() ? &VD->getAttrs() : nullptr);
+ auto *PseudoDstExpr =
+ buildDeclRefExpr(*this, DstVD, Type, DE->getExprLoc());
+ // For arrays generate assignment operation for single element and replace
+ // it by the original array element in CodeGen.
+ auto AssignmentOp = BuildBinOp(/*S=*/nullptr, DE->getExprLoc(), BO_Assign,
+ PseudoDstExpr, PseudoSrcExpr);
+ if (AssignmentOp.isInvalid())
+ continue;
+ AssignmentOp = ActOnFinishFullExpr(AssignmentOp.get(), DE->getExprLoc(),
+ /*DiscardedValue=*/true);
+ if (AssignmentOp.isInvalid())
+ continue;
+
+ // OpenMP 4.5 [2.10.8, Distribute Construct, p.3]
+ // A list item may appear in a firstprivate or lastprivate clause but not
+ // both.
+ if (CurrDir == OMPD_distribute) {
+ DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(VD, false);
+ if (DVar.CKind == OMPC_firstprivate) {
+ Diag(ELoc, diag::err_omp_firstprivate_and_lastprivate_in_distribute);
+ ReportOriginalDSA(*this, DSAStack, VD, DVar);
+ continue;
+ }
+ }
+
+ if (TopDVar.CKind != OMPC_firstprivate)
+ DSAStack->addDSA(VD, DE, OMPC_lastprivate);
+ Vars.push_back(DE);
+ SrcExprs.push_back(PseudoSrcExpr);
+ DstExprs.push_back(PseudoDstExpr);
+ AssignmentOps.push_back(AssignmentOp.get());
+ }
+
+ if (Vars.empty())
+ return nullptr;
+
+ return OMPLastprivateClause::Create(Context, StartLoc, LParenLoc, EndLoc,
+ Vars, SrcExprs, DstExprs, AssignmentOps);
+}
+
+OMPClause *Sema::ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ SmallVector<Expr *, 8> Vars;
+ for (auto &RefExpr : VarList) {
+ assert(RefExpr && "NULL expr in OpenMP shared clause.");
+ if (isa<DependentScopeDeclRefExpr>(RefExpr)) {
+ // It will be analyzed later.
+ Vars.push_back(RefExpr);
+ continue;
+ }
+
+ SourceLocation ELoc = RefExpr->getExprLoc();
+ // OpenMP [2.1, C/C++]
+ // A list item is a variable name.
+ // OpenMP [2.14.3.2, Restrictions, p.1]
+ // A variable that is part of another variable (as an array or structure
+ // element) cannot appear in a shared unless it is a static data member
+ // of a C++ class.
+ DeclRefExpr *DE = dyn_cast<DeclRefExpr>(RefExpr);
+ if (!DE || !isa<VarDecl>(DE->getDecl())) {
+ Diag(ELoc, diag::err_omp_expected_var_name) << RefExpr->getSourceRange();
+ continue;
+ }
+ Decl *D = DE->getDecl();
+ VarDecl *VD = cast<VarDecl>(D);
+
+ QualType Type = VD->getType();
+ if (Type->isDependentType() || Type->isInstantiationDependentType()) {
+ // It will be analyzed later.
+ Vars.push_back(DE);
+ continue;
+ }
+
+ // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
+ // in a Construct]
+ // Variables with the predetermined data-sharing attributes may not be
+ // listed in data-sharing attributes clauses, except for the cases
+ // listed below. For these exceptions only, listing a predetermined
+ // variable in a data-sharing attribute clause is allowed and overrides
+ // the variable's predetermined data-sharing attributes.
+ DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(VD, false);
+ if (DVar.CKind != OMPC_unknown && DVar.CKind != OMPC_shared &&
+ DVar.RefExpr) {
+ Diag(ELoc, diag::err_omp_wrong_dsa) << getOpenMPClauseName(DVar.CKind)
+ << getOpenMPClauseName(OMPC_shared);
+ ReportOriginalDSA(*this, DSAStack, VD, DVar);
+ continue;
+ }
+
+ DSAStack->addDSA(VD, DE, OMPC_shared);
+ Vars.push_back(DE);
+ }
+
+ if (Vars.empty())
+ return nullptr;
+
+ return OMPSharedClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars);
+}
+
+namespace {
+class DSARefChecker : public StmtVisitor<DSARefChecker, bool> {
+ DSAStackTy *Stack;
+
+public:
+ bool VisitDeclRefExpr(DeclRefExpr *E) {
+ if (VarDecl *VD = dyn_cast<VarDecl>(E->getDecl())) {
+ DSAStackTy::DSAVarData DVar = Stack->getTopDSA(VD, false);
+ if (DVar.CKind == OMPC_shared && !DVar.RefExpr)
+ return false;
+ if (DVar.CKind != OMPC_unknown)
+ return true;
+ DSAStackTy::DSAVarData DVarPrivate =
+ Stack->hasDSA(VD, isOpenMPPrivate, MatchesAlways(), false);
+ if (DVarPrivate.CKind != OMPC_unknown)
+ return true;
+ return false;
+ }
+ return false;
+ }
+ bool VisitStmt(Stmt *S) {
+ for (auto Child : S->children()) {
+ if (Child && Visit(Child))
+ return true;
+ }
+ return false;
+ }
+ explicit DSARefChecker(DSAStackTy *S) : Stack(S) {}
+};
+} // namespace
+
+OMPClause *Sema::ActOnOpenMPReductionClause(
+ ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation ColonLoc, SourceLocation EndLoc,
+ CXXScopeSpec &ReductionIdScopeSpec,
+ const DeclarationNameInfo &ReductionId) {
+ // TODO: Allow scope specification search when 'declare reduction' is
+ // supported.
+ assert(ReductionIdScopeSpec.isEmpty() &&
+ "No support for scoped reduction identifiers yet.");
+
+ auto DN = ReductionId.getName();
+ auto OOK = DN.getCXXOverloadedOperator();
+ BinaryOperatorKind BOK = BO_Comma;
+
+ // OpenMP [2.14.3.6, reduction clause]
+ // C
+ // reduction-identifier is either an identifier or one of the following
+ // operators: +, -, *, &, |, ^, && and ||
+ // C++
+ // reduction-identifier is either an id-expression or one of the following
+ // operators: +, -, *, &, |, ^, && and ||
+ // FIXME: Only 'min' and 'max' identifiers are supported for now.
+ switch (OOK) {
+ case OO_Plus:
+ case OO_Minus:
+ BOK = BO_Add;
+ break;
+ case OO_Star:
+ BOK = BO_Mul;
+ break;
+ case OO_Amp:
+ BOK = BO_And;
+ break;
+ case OO_Pipe:
+ BOK = BO_Or;
+ break;
+ case OO_Caret:
+ BOK = BO_Xor;
+ break;
+ case OO_AmpAmp:
+ BOK = BO_LAnd;
+ break;
+ case OO_PipePipe:
+ BOK = BO_LOr;
+ break;
+ case OO_New:
+ case OO_Delete:
+ case OO_Array_New:
+ case OO_Array_Delete:
+ case OO_Slash:
+ case OO_Percent:
+ case OO_Tilde:
+ case OO_Exclaim:
+ case OO_Equal:
+ case OO_Less:
+ case OO_Greater:
+ case OO_LessEqual:
+ case OO_GreaterEqual:
+ case OO_PlusEqual:
+ case OO_MinusEqual:
+ case OO_StarEqual:
+ case OO_SlashEqual:
+ case OO_PercentEqual:
+ case OO_CaretEqual:
+ case OO_AmpEqual:
+ case OO_PipeEqual:
+ case OO_LessLess:
+ case OO_GreaterGreater:
+ case OO_LessLessEqual:
+ case OO_GreaterGreaterEqual:
+ case OO_EqualEqual:
+ case OO_ExclaimEqual:
+ case OO_PlusPlus:
+ case OO_MinusMinus:
+ case OO_Comma:
+ case OO_ArrowStar:
+ case OO_Arrow:
+ case OO_Call:
+ case OO_Subscript:
+ case OO_Conditional:
+ case OO_Coawait:
+ case NUM_OVERLOADED_OPERATORS:
+ llvm_unreachable("Unexpected reduction identifier");
+ case OO_None:
+ if (auto II = DN.getAsIdentifierInfo()) {
+ if (II->isStr("max"))
+ BOK = BO_GT;
+ else if (II->isStr("min"))
+ BOK = BO_LT;
+ }
+ break;
+ }
+ SourceRange ReductionIdRange;
+ if (ReductionIdScopeSpec.isValid()) {
+ ReductionIdRange.setBegin(ReductionIdScopeSpec.getBeginLoc());
+ }
+ ReductionIdRange.setEnd(ReductionId.getEndLoc());
+ if (BOK == BO_Comma) {
+ // Not allowed reduction identifier is found.
+ Diag(ReductionId.getLocStart(), diag::err_omp_unknown_reduction_identifier)
+ << ReductionIdRange;
+ return nullptr;
+ }
+
+ SmallVector<Expr *, 8> Vars;
+ SmallVector<Expr *, 8> Privates;
+ SmallVector<Expr *, 8> LHSs;
+ SmallVector<Expr *, 8> RHSs;
+ SmallVector<Expr *, 8> ReductionOps;
+ for (auto RefExpr : VarList) {
+ assert(RefExpr && "nullptr expr in OpenMP reduction clause.");
+ if (isa<DependentScopeDeclRefExpr>(RefExpr)) {
+ // It will be analyzed later.
+ Vars.push_back(RefExpr);
+ Privates.push_back(nullptr);
+ LHSs.push_back(nullptr);
+ RHSs.push_back(nullptr);
+ ReductionOps.push_back(nullptr);
+ continue;
+ }
+
+ if (RefExpr->isTypeDependent() || RefExpr->isValueDependent() ||
+ RefExpr->isInstantiationDependent() ||
+ RefExpr->containsUnexpandedParameterPack()) {
+ // It will be analyzed later.
+ Vars.push_back(RefExpr);
+ Privates.push_back(nullptr);
+ LHSs.push_back(nullptr);
+ RHSs.push_back(nullptr);
+ ReductionOps.push_back(nullptr);
+ continue;
+ }
+
+ auto ELoc = RefExpr->getExprLoc();
+ auto ERange = RefExpr->getSourceRange();
+ // OpenMP [2.1, C/C++]
+ // A list item is a variable or array section, subject to the restrictions
+ // specified in Section 2.4 on page 42 and in each of the sections
+ // describing clauses and directives for which a list appears.
+ // OpenMP [2.14.3.3, Restrictions, p.1]
+ // A variable that is part of another variable (as an array or
+ // structure element) cannot appear in a private clause.
+ auto *DE = dyn_cast<DeclRefExpr>(RefExpr);
+ auto *ASE = dyn_cast<ArraySubscriptExpr>(RefExpr);
+ auto *OASE = dyn_cast<OMPArraySectionExpr>(RefExpr);
+ if (!ASE && !OASE && (!DE || !isa<VarDecl>(DE->getDecl()))) {
+ Diag(ELoc, diag::err_omp_expected_var_name_or_array_item) << ERange;
+ continue;
+ }
+ QualType Type;
+ VarDecl *VD = nullptr;
+ if (DE) {
+ auto D = DE->getDecl();
+ VD = cast<VarDecl>(D);
+ Type = VD->getType();
+ } else if (ASE) {
+ Type = ASE->getType();
+ auto *Base = ASE->getBase()->IgnoreParenImpCasts();
+ while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
+ Base = TempASE->getBase()->IgnoreParenImpCasts();
+ DE = dyn_cast<DeclRefExpr>(Base);
+ if (DE)
+ VD = dyn_cast<VarDecl>(DE->getDecl());
+ if (!VD) {
+ Diag(Base->getExprLoc(), diag::err_omp_expected_base_var_name)
+ << 0 << Base->getSourceRange();
+ continue;
+ }
+ } else if (OASE) {
+ auto BaseType = OMPArraySectionExpr::getBaseOriginalType(OASE->getBase());
+ if (auto *ATy = BaseType->getAsArrayTypeUnsafe())
+ Type = ATy->getElementType();
+ else
+ Type = BaseType->getPointeeType();
+ auto *Base = OASE->getBase()->IgnoreParenImpCasts();
+ while (auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
+ Base = TempOASE->getBase()->IgnoreParenImpCasts();
+ while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
+ Base = TempASE->getBase()->IgnoreParenImpCasts();
+ DE = dyn_cast<DeclRefExpr>(Base);
+ if (DE)
+ VD = dyn_cast<VarDecl>(DE->getDecl());
+ if (!VD) {
+ Diag(Base->getExprLoc(), diag::err_omp_expected_base_var_name)
+ << 1 << Base->getSourceRange();
+ continue;
+ }
+ }
+
+ // OpenMP [2.9.3.3, Restrictions, C/C++, p.3]
+ // A variable that appears in a private clause must not have an incomplete
+ // type or a reference type.
+ if (RequireCompleteType(ELoc, Type,
+ diag::err_omp_reduction_incomplete_type))
+ continue;
+ // OpenMP [2.14.3.6, reduction clause, Restrictions]
+ // Arrays may not appear in a reduction clause.
+ if (Type.getNonReferenceType()->isArrayType()) {
+ Diag(ELoc, diag::err_omp_reduction_type_array) << Type << ERange;
+ if (!ASE && !OASE) {
+ bool IsDecl = VD->isThisDeclarationADefinition(Context) ==
+ VarDecl::DeclarationOnly;
+ Diag(VD->getLocation(),
+ IsDecl ? diag::note_previous_decl : diag::note_defined_here)
+ << VD;
+ }
+ continue;
+ }
+ // OpenMP [2.14.3.6, reduction clause, Restrictions]
+ // A list item that appears in a reduction clause must not be
+ // const-qualified.
+ if (Type.getNonReferenceType().isConstant(Context)) {
+ Diag(ELoc, diag::err_omp_const_reduction_list_item)
+ << getOpenMPClauseName(OMPC_reduction) << Type << ERange;
+ if (!ASE && !OASE) {
+ bool IsDecl = VD->isThisDeclarationADefinition(Context) ==
+ VarDecl::DeclarationOnly;
+ Diag(VD->getLocation(),
+ IsDecl ? diag::note_previous_decl : diag::note_defined_here)
+ << VD;
+ }
+ continue;
+ }
+ // OpenMP [2.9.3.6, Restrictions, C/C++, p.4]
+ // If a list-item is a reference type then it must bind to the same object
+ // for all threads of the team.
+ if (!ASE && !OASE) {
+ VarDecl *VDDef = VD->getDefinition();
+ if (Type->isReferenceType() && VDDef) {
+ DSARefChecker Check(DSAStack);
+ if (Check.Visit(VDDef->getInit())) {
+ Diag(ELoc, diag::err_omp_reduction_ref_type_arg) << ERange;
+ Diag(VDDef->getLocation(), diag::note_defined_here) << VDDef;
+ continue;
+ }
+ }
+ }
+ // OpenMP [2.14.3.6, reduction clause, Restrictions]
+ // The type of a list item that appears in a reduction clause must be valid
+ // for the reduction-identifier. For a max or min reduction in C, the type
+ // of the list item must be an allowed arithmetic data type: char, int,
+ // float, double, or _Bool, possibly modified with long, short, signed, or
+ // unsigned. For a max or min reduction in C++, the type of the list item
+ // must be an allowed arithmetic data type: char, wchar_t, int, float,
+ // double, or bool, possibly modified with long, short, signed, or unsigned.
+ if ((BOK == BO_GT || BOK == BO_LT) &&
+ !(Type->isScalarType() ||
+ (getLangOpts().CPlusPlus && Type->isArithmeticType()))) {
+ Diag(ELoc, diag::err_omp_clause_not_arithmetic_type_arg)
+ << getLangOpts().CPlusPlus;
+ if (!ASE && !OASE) {
+ bool IsDecl = VD->isThisDeclarationADefinition(Context) ==
+ VarDecl::DeclarationOnly;
+ Diag(VD->getLocation(),
+ IsDecl ? diag::note_previous_decl : diag::note_defined_here)
+ << VD;
+ }
+ continue;
+ }
+ if ((BOK == BO_OrAssign || BOK == BO_AndAssign || BOK == BO_XorAssign) &&
+ !getLangOpts().CPlusPlus && Type->isFloatingType()) {
+ Diag(ELoc, diag::err_omp_clause_floating_type_arg);
+ if (!ASE && !OASE) {
+ bool IsDecl = VD->isThisDeclarationADefinition(Context) ==
+ VarDecl::DeclarationOnly;
+ Diag(VD->getLocation(),
+ IsDecl ? diag::note_previous_decl : diag::note_defined_here)
+ << VD;
+ }
+ continue;
+ }
+ // OpenMP [2.14.1.1, Data-sharing Attribute Rules for Variables Referenced
+ // in a Construct]
+ // Variables with the predetermined data-sharing attributes may not be
+ // listed in data-sharing attributes clauses, except for the cases
+ // listed below. For these exceptions only, listing a predetermined
+ // variable in a data-sharing attribute clause is allowed and overrides
+ // the variable's predetermined data-sharing attributes.
+ // OpenMP [2.14.3.6, Restrictions, p.3]
+ // Any number of reduction clauses can be specified on the directive,
+ // but a list item can appear only once in the reduction clauses for that
+ // directive.
+ DSAStackTy::DSAVarData DVar;
+ DVar = DSAStack->getTopDSA(VD, false);
+ if (DVar.CKind == OMPC_reduction) {
+ Diag(ELoc, diag::err_omp_once_referenced)
+ << getOpenMPClauseName(OMPC_reduction);
+ if (DVar.RefExpr) {
+ Diag(DVar.RefExpr->getExprLoc(), diag::note_omp_referenced);
+ }
+ } else if (DVar.CKind != OMPC_unknown) {
+ Diag(ELoc, diag::err_omp_wrong_dsa)
+ << getOpenMPClauseName(DVar.CKind)
+ << getOpenMPClauseName(OMPC_reduction);
+ ReportOriginalDSA(*this, DSAStack, VD, DVar);
+ continue;
+ }
+
+ // OpenMP [2.14.3.6, Restrictions, p.1]
+ // A list item that appears in a reduction clause of a worksharing
+ // construct must be shared in the parallel regions to which any of the
+ // worksharing regions arising from the worksharing construct bind.
+ OpenMPDirectiveKind CurrDir = DSAStack->getCurrentDirective();
+ if (isOpenMPWorksharingDirective(CurrDir) &&
+ !isOpenMPParallelDirective(CurrDir)) {
+ DVar = DSAStack->getImplicitDSA(VD, true);
+ if (DVar.CKind != OMPC_shared) {
+ Diag(ELoc, diag::err_omp_required_access)
+ << getOpenMPClauseName(OMPC_reduction)
+ << getOpenMPClauseName(OMPC_shared);
+ ReportOriginalDSA(*this, DSAStack, VD, DVar);
+ continue;
+ }
+ }
+
+ Type = Type.getNonLValueExprType(Context).getUnqualifiedType();
+ auto *LHSVD = buildVarDecl(*this, ELoc, Type, ".reduction.lhs",
+ VD->hasAttrs() ? &VD->getAttrs() : nullptr);
+ auto *RHSVD = buildVarDecl(*this, ELoc, Type, VD->getName(),
+ VD->hasAttrs() ? &VD->getAttrs() : nullptr);
+ auto PrivateTy = Type;
+ if (OASE) {
+ // For array sections only:
+ // Create pseudo array type for private copy. The size for this array will
+ // be generated during codegen.
+ // For array subscripts or single variables Private Ty is the same as Type
+ // (type of the variable or single array element).
+ PrivateTy = Context.getVariableArrayType(
+ Type, new (Context) OpaqueValueExpr(SourceLocation(),
+ Context.getSizeType(), VK_RValue),
+ ArrayType::Normal, /*IndexTypeQuals=*/0, SourceRange());
+ }
+ // Private copy.
+ auto *PrivateVD = buildVarDecl(*this, ELoc, PrivateTy, VD->getName(),
+ VD->hasAttrs() ? &VD->getAttrs() : nullptr);
+ // Add initializer for private variable.
+ Expr *Init = nullptr;
+ switch (BOK) {
+ case BO_Add:
+ case BO_Xor:
+ case BO_Or:
+ case BO_LOr:
+ // '+', '-', '^', '|', '||' reduction ops - initializer is '0'.
+ if (Type->isScalarType() || Type->isAnyComplexType()) {
+ Init = ActOnIntegerConstant(ELoc, /*Val=*/0).get();
+ }
+ break;
+ case BO_Mul:
+ case BO_LAnd:
+ if (Type->isScalarType() || Type->isAnyComplexType()) {
+ // '*' and '&&' reduction ops - initializer is '1'.
+ Init = ActOnIntegerConstant(ELoc, /*Val=*/1).get();
+ }
+ break;
+ case BO_And: {
+ // '&' reduction op - initializer is '~0'.
+ QualType OrigType = Type;
+ if (auto *ComplexTy = OrigType->getAs<ComplexType>()) {
+ Type = ComplexTy->getElementType();
+ }
+ if (Type->isRealFloatingType()) {
+ llvm::APFloat InitValue =
+ llvm::APFloat::getAllOnesValue(Context.getTypeSize(Type),
+ /*isIEEE=*/true);
+ Init = FloatingLiteral::Create(Context, InitValue, /*isexact=*/true,
+ Type, ELoc);
+ } else if (Type->isScalarType()) {
+ auto Size = Context.getTypeSize(Type);
+ QualType IntTy = Context.getIntTypeForBitwidth(Size, /*Signed=*/0);
+ llvm::APInt InitValue = llvm::APInt::getAllOnesValue(Size);
+ Init = IntegerLiteral::Create(Context, InitValue, IntTy, ELoc);
+ }
+ if (Init && OrigType->isAnyComplexType()) {
+ // Init = 0xFFFF + 0xFFFFi;
+ auto *Im = new (Context) ImaginaryLiteral(Init, OrigType);
+ Init = CreateBuiltinBinOp(ELoc, BO_Add, Init, Im).get();
+ }
+ Type = OrigType;
+ break;
+ }
+ case BO_LT:
+ case BO_GT: {
+ // 'min' reduction op - initializer is 'Largest representable number in
+ // the reduction list item type'.
+ // 'max' reduction op - initializer is 'Least representable number in
+ // the reduction list item type'.
+ if (Type->isIntegerType() || Type->isPointerType()) {
+ bool IsSigned = Type->hasSignedIntegerRepresentation();
+ auto Size = Context.getTypeSize(Type);
+ QualType IntTy =
+ Context.getIntTypeForBitwidth(Size, /*Signed=*/IsSigned);
+ llvm::APInt InitValue =
+ (BOK != BO_LT)
+ ? IsSigned ? llvm::APInt::getSignedMinValue(Size)
+ : llvm::APInt::getMinValue(Size)
+ : IsSigned ? llvm::APInt::getSignedMaxValue(Size)
+ : llvm::APInt::getMaxValue(Size);
+ Init = IntegerLiteral::Create(Context, InitValue, IntTy, ELoc);
+ if (Type->isPointerType()) {
+ // Cast to pointer type.
+ auto CastExpr = BuildCStyleCastExpr(
+ SourceLocation(), Context.getTrivialTypeSourceInfo(Type, ELoc),
+ SourceLocation(), Init);
+ if (CastExpr.isInvalid())
+ continue;
+ Init = CastExpr.get();
+ }
+ } else if (Type->isRealFloatingType()) {
+ llvm::APFloat InitValue = llvm::APFloat::getLargest(
+ Context.getFloatTypeSemantics(Type), BOK != BO_LT);
+ Init = FloatingLiteral::Create(Context, InitValue, /*isexact=*/true,
+ Type, ELoc);
+ }
+ break;
+ }
+ case BO_PtrMemD:
+ case BO_PtrMemI:
+ case BO_MulAssign:
+ case BO_Div:
+ case BO_Rem:
+ case BO_Sub:
+ case BO_Shl:
+ case BO_Shr:
+ case BO_LE:
+ case BO_GE:
+ case BO_EQ:
+ case BO_NE:
+ case BO_AndAssign:
+ case BO_XorAssign:
+ case BO_OrAssign:
+ case BO_Assign:
+ case BO_AddAssign:
+ case BO_SubAssign:
+ case BO_DivAssign:
+ case BO_RemAssign:
+ case BO_ShlAssign:
+ case BO_ShrAssign:
+ case BO_Comma:
+ llvm_unreachable("Unexpected reduction operation");
+ }
+ if (Init) {
+ AddInitializerToDecl(RHSVD, Init, /*DirectInit=*/false,
+ /*TypeMayContainAuto=*/false);
+ } else
+ ActOnUninitializedDecl(RHSVD, /*TypeMayContainAuto=*/false);
+ if (!RHSVD->hasInit()) {
+ Diag(ELoc, diag::err_omp_reduction_id_not_compatible) << Type
+ << ReductionIdRange;
+ if (VD) {
+ bool IsDecl = VD->isThisDeclarationADefinition(Context) ==
+ VarDecl::DeclarationOnly;
+ Diag(VD->getLocation(),
+ IsDecl ? diag::note_previous_decl : diag::note_defined_here)
+ << VD;
+ }
+ continue;
+ }
+ // Store initializer for single element in private copy. Will be used during
+ // codegen.
+ PrivateVD->setInit(RHSVD->getInit());
+ PrivateVD->setInitStyle(RHSVD->getInitStyle());
+ auto *LHSDRE = buildDeclRefExpr(*this, LHSVD, Type, ELoc);
+ auto *RHSDRE = buildDeclRefExpr(*this, RHSVD, Type, ELoc);
+ auto *PrivateDRE = buildDeclRefExpr(*this, PrivateVD, PrivateTy, ELoc);
+ ExprResult ReductionOp =
+ BuildBinOp(DSAStack->getCurScope(), ReductionId.getLocStart(), BOK,
+ LHSDRE, RHSDRE);
+ if (ReductionOp.isUsable()) {
+ if (BOK != BO_LT && BOK != BO_GT) {
+ ReductionOp =
+ BuildBinOp(DSAStack->getCurScope(), ReductionId.getLocStart(),
+ BO_Assign, LHSDRE, ReductionOp.get());
+ } else {
+ auto *ConditionalOp = new (Context) ConditionalOperator(
+ ReductionOp.get(), SourceLocation(), LHSDRE, SourceLocation(),
+ RHSDRE, Type, VK_LValue, OK_Ordinary);
+ ReductionOp =
+ BuildBinOp(DSAStack->getCurScope(), ReductionId.getLocStart(),
+ BO_Assign, LHSDRE, ConditionalOp);
+ }
+ ReductionOp = ActOnFinishFullExpr(ReductionOp.get());
+ }
+ if (ReductionOp.isInvalid())
+ continue;
+
+ DSAStack->addDSA(VD, DE, OMPC_reduction);
+ Vars.push_back(RefExpr);
+ Privates.push_back(PrivateDRE);
+ LHSs.push_back(LHSDRE);
+ RHSs.push_back(RHSDRE);
+ ReductionOps.push_back(ReductionOp.get());
+ }
+
+ if (Vars.empty())
+ return nullptr;
+
+ return OMPReductionClause::Create(
+ Context, StartLoc, LParenLoc, ColonLoc, EndLoc, Vars,
+ ReductionIdScopeSpec.getWithLocInContext(Context), ReductionId, Privates,
+ LHSs, RHSs, ReductionOps);
+}
+
+OMPClause *Sema::ActOnOpenMPLinearClause(
+ ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc,
+ SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind,
+ SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc) {
+ SmallVector<Expr *, 8> Vars;
+ SmallVector<Expr *, 8> Privates;
+ SmallVector<Expr *, 8> Inits;
+ if ((!LangOpts.CPlusPlus && LinKind != OMPC_LINEAR_val) ||
+ LinKind == OMPC_LINEAR_unknown) {
+ Diag(LinLoc, diag::err_omp_wrong_linear_modifier) << LangOpts.CPlusPlus;
+ LinKind = OMPC_LINEAR_val;
+ }
+ for (auto &RefExpr : VarList) {
+ assert(RefExpr && "NULL expr in OpenMP linear clause.");
+ if (isa<DependentScopeDeclRefExpr>(RefExpr)) {
+ // It will be analyzed later.
+ Vars.push_back(RefExpr);
+ Privates.push_back(nullptr);
+ Inits.push_back(nullptr);
+ continue;
+ }
+
+ // OpenMP [2.14.3.7, linear clause]
+ // A list item that appears in a linear clause is subject to the private
+ // clause semantics described in Section 2.14.3.3 on page 159 except as
+ // noted. In addition, the value of the new list item on each iteration
+ // of the associated loop(s) corresponds to the value of the original
+ // list item before entering the construct plus the logical number of
+ // the iteration times linear-step.
+
+ SourceLocation ELoc = RefExpr->getExprLoc();
+ // OpenMP [2.1, C/C++]
+ // A list item is a variable name.
+ // OpenMP [2.14.3.3, Restrictions, p.1]
+ // A variable that is part of another variable (as an array or
+ // structure element) cannot appear in a private clause.
+ DeclRefExpr *DE = dyn_cast<DeclRefExpr>(RefExpr);
+ if (!DE || !isa<VarDecl>(DE->getDecl())) {
+ Diag(ELoc, diag::err_omp_expected_var_name) << RefExpr->getSourceRange();
+ continue;
+ }
+
+ VarDecl *VD = cast<VarDecl>(DE->getDecl());
+
+ // OpenMP [2.14.3.7, linear clause]
+ // A list-item cannot appear in more than one linear clause.
+ // A list-item that appears in a linear clause cannot appear in any
+ // other data-sharing attribute clause.
+ DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(VD, false);
+ if (DVar.RefExpr) {
+ Diag(ELoc, diag::err_omp_wrong_dsa) << getOpenMPClauseName(DVar.CKind)
+ << getOpenMPClauseName(OMPC_linear);
+ ReportOriginalDSA(*this, DSAStack, VD, DVar);
+ continue;
+ }
+
+ QualType QType = VD->getType();
+ if (QType->isDependentType() || QType->isInstantiationDependentType()) {
+ // It will be analyzed later.
+ Vars.push_back(DE);
+ Privates.push_back(nullptr);
+ Inits.push_back(nullptr);
+ continue;
+ }
+
+ // A variable must not have an incomplete type or a reference type.
+ if (RequireCompleteType(ELoc, QType,
+ diag::err_omp_linear_incomplete_type)) {
+ continue;
+ }
+ if ((LinKind == OMPC_LINEAR_uval || LinKind == OMPC_LINEAR_ref) &&
+ !QType->isReferenceType()) {
+ Diag(ELoc, diag::err_omp_wrong_linear_modifier_non_reference)
+ << QType << getOpenMPSimpleClauseTypeName(OMPC_linear, LinKind);
+ continue;
+ }
+ QType = QType.getNonReferenceType();
+
+ // A list item must not be const-qualified.
+ if (QType.isConstant(Context)) {
+ Diag(ELoc, diag::err_omp_const_variable)
+ << getOpenMPClauseName(OMPC_linear);
+ bool IsDecl =
+ VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
+ Diag(VD->getLocation(),
+ IsDecl ? diag::note_previous_decl : diag::note_defined_here)
+ << VD;
+ continue;
+ }
+
+ // A list item must be of integral or pointer type.
+ QType = QType.getUnqualifiedType().getCanonicalType();
+ const Type *Ty = QType.getTypePtrOrNull();
+ if (!Ty || (!Ty->isDependentType() && !Ty->isIntegralType(Context) &&
+ !Ty->isPointerType())) {
+ Diag(ELoc, diag::err_omp_linear_expected_int_or_ptr) << QType;
+ bool IsDecl =
+ VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
+ Diag(VD->getLocation(),
+ IsDecl ? diag::note_previous_decl : diag::note_defined_here)
+ << VD;
+ continue;
+ }
+
+ // Build private copy of original var.
+ auto *Private = buildVarDecl(*this, ELoc, QType, VD->getName(),
+ VD->hasAttrs() ? &VD->getAttrs() : nullptr);
+ auto *PrivateRef = buildDeclRefExpr(
+ *this, Private, DE->getType().getUnqualifiedType(), DE->getExprLoc());
+ // Build var to save initial value.
+ VarDecl *Init = buildVarDecl(*this, ELoc, QType, ".linear.start");
+ Expr *InitExpr;
+ if (LinKind == OMPC_LINEAR_uval)
+ InitExpr = VD->getInit();
+ else
+ InitExpr = DE;
+ AddInitializerToDecl(Init, DefaultLvalueConversion(InitExpr).get(),
+ /*DirectInit*/ false, /*TypeMayContainAuto*/ false);
+ auto InitRef = buildDeclRefExpr(
+ *this, Init, DE->getType().getUnqualifiedType(), DE->getExprLoc());
+ DSAStack->addDSA(VD, DE, OMPC_linear);
+ Vars.push_back(DE);
+ Privates.push_back(PrivateRef);
+ Inits.push_back(InitRef);
+ }
+
+ if (Vars.empty())
+ return nullptr;
+
+ Expr *StepExpr = Step;
+ Expr *CalcStepExpr = nullptr;
+ if (Step && !Step->isValueDependent() && !Step->isTypeDependent() &&
+ !Step->isInstantiationDependent() &&
+ !Step->containsUnexpandedParameterPack()) {
+ SourceLocation StepLoc = Step->getLocStart();
+ ExprResult Val = PerformOpenMPImplicitIntegerConversion(StepLoc, Step);
+ if (Val.isInvalid())
+ return nullptr;
+ StepExpr = Val.get();
+
+ // Build var to save the step value.
+ VarDecl *SaveVar =
+ buildVarDecl(*this, StepLoc, StepExpr->getType(), ".linear.step");
+ ExprResult SaveRef =
+ buildDeclRefExpr(*this, SaveVar, StepExpr->getType(), StepLoc);
+ ExprResult CalcStep =
+ BuildBinOp(CurScope, StepLoc, BO_Assign, SaveRef.get(), StepExpr);
+ CalcStep = ActOnFinishFullExpr(CalcStep.get());
+
+ // Warn about zero linear step (it would be probably better specified as
+ // making corresponding variables 'const').
+ llvm::APSInt Result;
+ bool IsConstant = StepExpr->isIntegerConstantExpr(Result, Context);
+ if (IsConstant && !Result.isNegative() && !Result.isStrictlyPositive())
+ Diag(StepLoc, diag::warn_omp_linear_step_zero) << Vars[0]
+ << (Vars.size() > 1);
+ if (!IsConstant && CalcStep.isUsable()) {
+ // Calculate the step beforehand instead of doing this on each iteration.
+ // (This is not used if the number of iterations may be kfold-ed).
+ CalcStepExpr = CalcStep.get();
+ }
+ }
+
+ return OMPLinearClause::Create(Context, StartLoc, LParenLoc, LinKind, LinLoc,
+ ColonLoc, EndLoc, Vars, Privates, Inits,
+ StepExpr, CalcStepExpr);
+}
+
+static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
+ Expr *NumIterations, Sema &SemaRef,
+ Scope *S) {
+ // Walk the vars and build update/final expressions for the CodeGen.
+ SmallVector<Expr *, 8> Updates;
+ SmallVector<Expr *, 8> Finals;
+ Expr *Step = Clause.getStep();
+ Expr *CalcStep = Clause.getCalcStep();
+ // OpenMP [2.14.3.7, linear clause]
+ // If linear-step is not specified it is assumed to be 1.
+ if (Step == nullptr)
+ Step = SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get();
+ else if (CalcStep)
+ Step = cast<BinaryOperator>(CalcStep)->getLHS();
+ bool HasErrors = false;
+ auto CurInit = Clause.inits().begin();
+ auto CurPrivate = Clause.privates().begin();
+ auto LinKind = Clause.getModifier();
+ for (auto &RefExpr : Clause.varlists()) {
+ Expr *InitExpr = *CurInit;
+
+ // Build privatized reference to the current linear var.
+ auto DE = cast<DeclRefExpr>(RefExpr);
+ Expr *CapturedRef;
+ if (LinKind == OMPC_LINEAR_uval)
+ CapturedRef = cast<VarDecl>(DE->getDecl())->getInit();
+ else
+ CapturedRef =
+ buildDeclRefExpr(SemaRef, cast<VarDecl>(DE->getDecl()),
+ DE->getType().getUnqualifiedType(), DE->getExprLoc(),
+ /*RefersToCapture=*/true);
+
+ // Build update: Var = InitExpr + IV * Step
+ ExprResult Update =
+ BuildCounterUpdate(SemaRef, S, RefExpr->getExprLoc(), *CurPrivate,
+ InitExpr, IV, Step, /* Subtract */ false);
+ Update = SemaRef.ActOnFinishFullExpr(Update.get(), DE->getLocStart(),
+ /*DiscardedValue=*/true);
+
+ // Build final: Var = InitExpr + NumIterations * Step
+ ExprResult Final =
+ BuildCounterUpdate(SemaRef, S, RefExpr->getExprLoc(), CapturedRef,
+ InitExpr, NumIterations, Step, /* Subtract */ false);
+ Final = SemaRef.ActOnFinishFullExpr(Final.get(), DE->getLocStart(),
+ /*DiscardedValue=*/true);
+ if (!Update.isUsable() || !Final.isUsable()) {
+ Updates.push_back(nullptr);
+ Finals.push_back(nullptr);
+ HasErrors = true;
+ } else {
+ Updates.push_back(Update.get());
+ Finals.push_back(Final.get());
+ }
+ ++CurInit, ++CurPrivate;
+ }
+ Clause.setUpdates(Updates);
+ Clause.setFinals(Finals);
+ return HasErrors;
+}
+
+OMPClause *Sema::ActOnOpenMPAlignedClause(
+ ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc) {
+
+ SmallVector<Expr *, 8> Vars;
+ for (auto &RefExpr : VarList) {
+ assert(RefExpr && "NULL expr in OpenMP aligned clause.");
+ if (isa<DependentScopeDeclRefExpr>(RefExpr)) {
+ // It will be analyzed later.
+ Vars.push_back(RefExpr);
+ continue;
+ }
+
+ SourceLocation ELoc = RefExpr->getExprLoc();
+ // OpenMP [2.1, C/C++]
+ // A list item is a variable name.
+ DeclRefExpr *DE = dyn_cast<DeclRefExpr>(RefExpr);
+ if (!DE || !isa<VarDecl>(DE->getDecl())) {
+ Diag(ELoc, diag::err_omp_expected_var_name) << RefExpr->getSourceRange();
+ continue;
+ }
+
+ VarDecl *VD = cast<VarDecl>(DE->getDecl());
+
+ // OpenMP [2.8.1, simd construct, Restrictions]
+ // The type of list items appearing in the aligned clause must be
+ // array, pointer, reference to array, or reference to pointer.
+ QualType QType = VD->getType();
+ QType = QType.getNonReferenceType().getUnqualifiedType().getCanonicalType();
+ const Type *Ty = QType.getTypePtrOrNull();
+ if (!Ty || (!Ty->isDependentType() && !Ty->isArrayType() &&
+ !Ty->isPointerType())) {
+ Diag(ELoc, diag::err_omp_aligned_expected_array_or_ptr)
+ << QType << getLangOpts().CPlusPlus << RefExpr->getSourceRange();
+ bool IsDecl =
+ VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
+ Diag(VD->getLocation(),
+ IsDecl ? diag::note_previous_decl : diag::note_defined_here)
+ << VD;
+ continue;
+ }
+
+ // OpenMP [2.8.1, simd construct, Restrictions]
+ // A list-item cannot appear in more than one aligned clause.
+ if (DeclRefExpr *PrevRef = DSAStack->addUniqueAligned(VD, DE)) {
+ Diag(ELoc, diag::err_omp_aligned_twice) << RefExpr->getSourceRange();
+ Diag(PrevRef->getExprLoc(), diag::note_omp_explicit_dsa)
+ << getOpenMPClauseName(OMPC_aligned);
+ continue;
+ }
+
+ Vars.push_back(DE);
+ }
+
+ // OpenMP [2.8.1, simd construct, Description]
+ // The parameter of the aligned clause, alignment, must be a constant
+ // positive integer expression.
+ // If no optional parameter is specified, implementation-defined default
+ // alignments for SIMD instructions on the target platforms are assumed.
+ if (Alignment != nullptr) {
+ ExprResult AlignResult =
+ VerifyPositiveIntegerConstantInClause(Alignment, OMPC_aligned);
+ if (AlignResult.isInvalid())
+ return nullptr;
+ Alignment = AlignResult.get();
+ }
+ if (Vars.empty())
+ return nullptr;
+
+ return OMPAlignedClause::Create(Context, StartLoc, LParenLoc, ColonLoc,
+ EndLoc, Vars, Alignment);
+}
+
+OMPClause *Sema::ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ SmallVector<Expr *, 8> Vars;
+ SmallVector<Expr *, 8> SrcExprs;
+ SmallVector<Expr *, 8> DstExprs;
+ SmallVector<Expr *, 8> AssignmentOps;
+ for (auto &RefExpr : VarList) {
+ assert(RefExpr && "NULL expr in OpenMP copyin clause.");
+ if (isa<DependentScopeDeclRefExpr>(RefExpr)) {
+ // It will be analyzed later.
+ Vars.push_back(RefExpr);
+ SrcExprs.push_back(nullptr);
+ DstExprs.push_back(nullptr);
+ AssignmentOps.push_back(nullptr);
+ continue;
+ }
+
+ SourceLocation ELoc = RefExpr->getExprLoc();
+ // OpenMP [2.1, C/C++]
+ // A list item is a variable name.
+ // OpenMP [2.14.4.1, Restrictions, p.1]
+ // A list item that appears in a copyin clause must be threadprivate.
+ DeclRefExpr *DE = dyn_cast<DeclRefExpr>(RefExpr);
+ if (!DE || !isa<VarDecl>(DE->getDecl())) {
+ Diag(ELoc, diag::err_omp_expected_var_name) << RefExpr->getSourceRange();
+ continue;
+ }
+
+ Decl *D = DE->getDecl();
+ VarDecl *VD = cast<VarDecl>(D);
+
+ QualType Type = VD->getType();
+ if (Type->isDependentType() || Type->isInstantiationDependentType()) {
+ // It will be analyzed later.
+ Vars.push_back(DE);
+ SrcExprs.push_back(nullptr);
+ DstExprs.push_back(nullptr);
+ AssignmentOps.push_back(nullptr);
+ continue;
+ }
+
+ // OpenMP [2.14.4.1, Restrictions, C/C++, p.1]
+ // A list item that appears in a copyin clause must be threadprivate.
+ if (!DSAStack->isThreadPrivate(VD)) {
+ Diag(ELoc, diag::err_omp_required_access)
+ << getOpenMPClauseName(OMPC_copyin)
+ << getOpenMPDirectiveName(OMPD_threadprivate);
+ continue;
+ }
+
+ // OpenMP [2.14.4.1, Restrictions, C/C++, p.2]
+ // A variable of class type (or array thereof) that appears in a
+ // copyin clause requires an accessible, unambiguous copy assignment
+ // operator for the class type.
+ auto ElemType = Context.getBaseElementType(Type).getNonReferenceType();
+ auto *SrcVD =
+ buildVarDecl(*this, DE->getLocStart(), ElemType.getUnqualifiedType(),
+ ".copyin.src", VD->hasAttrs() ? &VD->getAttrs() : nullptr);
+ auto *PseudoSrcExpr = buildDeclRefExpr(
+ *this, SrcVD, ElemType.getUnqualifiedType(), DE->getExprLoc());
+ auto *DstVD =
+ buildVarDecl(*this, DE->getLocStart(), ElemType, ".copyin.dst",
+ VD->hasAttrs() ? &VD->getAttrs() : nullptr);
+ auto *PseudoDstExpr =
+ buildDeclRefExpr(*this, DstVD, ElemType, DE->getExprLoc());
+ // For arrays generate assignment operation for single element and replace
+ // it by the original array element in CodeGen.
+ auto AssignmentOp = BuildBinOp(/*S=*/nullptr, DE->getExprLoc(), BO_Assign,
+ PseudoDstExpr, PseudoSrcExpr);
+ if (AssignmentOp.isInvalid())
+ continue;
+ AssignmentOp = ActOnFinishFullExpr(AssignmentOp.get(), DE->getExprLoc(),
+ /*DiscardedValue=*/true);
+ if (AssignmentOp.isInvalid())
+ continue;
+
+ DSAStack->addDSA(VD, DE, OMPC_copyin);
+ Vars.push_back(DE);
+ SrcExprs.push_back(PseudoSrcExpr);
+ DstExprs.push_back(PseudoDstExpr);
+ AssignmentOps.push_back(AssignmentOp.get());
+ }
+
+ if (Vars.empty())
+ return nullptr;
+
+ return OMPCopyinClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars,
+ SrcExprs, DstExprs, AssignmentOps);
+}
+
+OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ SmallVector<Expr *, 8> Vars;
+ SmallVector<Expr *, 8> SrcExprs;
+ SmallVector<Expr *, 8> DstExprs;
+ SmallVector<Expr *, 8> AssignmentOps;
+ for (auto &RefExpr : VarList) {
+ assert(RefExpr && "NULL expr in OpenMP copyprivate clause.");
+ if (isa<DependentScopeDeclRefExpr>(RefExpr)) {
+ // It will be analyzed later.
+ Vars.push_back(RefExpr);
+ SrcExprs.push_back(nullptr);
+ DstExprs.push_back(nullptr);
+ AssignmentOps.push_back(nullptr);
+ continue;
+ }
+
+ SourceLocation ELoc = RefExpr->getExprLoc();
+ // OpenMP [2.1, C/C++]
+ // A list item is a variable name.
+ // OpenMP [2.14.4.1, Restrictions, p.1]
+ // A list item that appears in a copyin clause must be threadprivate.
+ DeclRefExpr *DE = dyn_cast<DeclRefExpr>(RefExpr);
+ if (!DE || !isa<VarDecl>(DE->getDecl())) {
+ Diag(ELoc, diag::err_omp_expected_var_name) << RefExpr->getSourceRange();
+ continue;
+ }
+
+ Decl *D = DE->getDecl();
+ VarDecl *VD = cast<VarDecl>(D);
+
+ QualType Type = VD->getType();
+ if (Type->isDependentType() || Type->isInstantiationDependentType()) {
+ // It will be analyzed later.
+ Vars.push_back(DE);
+ SrcExprs.push_back(nullptr);
+ DstExprs.push_back(nullptr);
+ AssignmentOps.push_back(nullptr);
+ continue;
+ }
+
+ // OpenMP [2.14.4.2, Restrictions, p.2]
+ // A list item that appears in a copyprivate clause may not appear in a
+ // private or firstprivate clause on the single construct.
+ if (!DSAStack->isThreadPrivate(VD)) {
+ auto DVar = DSAStack->getTopDSA(VD, false);
+ if (DVar.CKind != OMPC_unknown && DVar.CKind != OMPC_copyprivate &&
+ DVar.RefExpr) {
+ Diag(ELoc, diag::err_omp_wrong_dsa)
+ << getOpenMPClauseName(DVar.CKind)
+ << getOpenMPClauseName(OMPC_copyprivate);
+ ReportOriginalDSA(*this, DSAStack, VD, DVar);
+ continue;
+ }
+
+ // OpenMP [2.11.4.2, Restrictions, p.1]
+ // All list items that appear in a copyprivate clause must be either
+ // threadprivate or private in the enclosing context.
+ if (DVar.CKind == OMPC_unknown) {
+ DVar = DSAStack->getImplicitDSA(VD, false);
+ if (DVar.CKind == OMPC_shared) {
+ Diag(ELoc, diag::err_omp_required_access)
+ << getOpenMPClauseName(OMPC_copyprivate)
+ << "threadprivate or private in the enclosing context";
+ ReportOriginalDSA(*this, DSAStack, VD, DVar);
+ continue;
+ }
+ }
+ }
+
+ // Variably modified types are not supported.
+ if (!Type->isAnyPointerType() && Type->isVariablyModifiedType()) {
+ Diag(ELoc, diag::err_omp_variably_modified_type_not_supported)
+ << getOpenMPClauseName(OMPC_copyprivate) << Type
+ << getOpenMPDirectiveName(DSAStack->getCurrentDirective());
+ bool IsDecl =
+ VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
+ Diag(VD->getLocation(),
+ IsDecl ? diag::note_previous_decl : diag::note_defined_here)
+ << VD;
+ continue;
+ }
+
+ // OpenMP [2.14.4.1, Restrictions, C/C++, p.2]
+ // A variable of class type (or array thereof) that appears in a
+ // copyin clause requires an accessible, unambiguous copy assignment
+ // operator for the class type.
+ Type = Context.getBaseElementType(Type.getNonReferenceType())
+ .getUnqualifiedType();
+ auto *SrcVD =
+ buildVarDecl(*this, DE->getLocStart(), Type, ".copyprivate.src",
+ VD->hasAttrs() ? &VD->getAttrs() : nullptr);
+ auto *PseudoSrcExpr =
+ buildDeclRefExpr(*this, SrcVD, Type, DE->getExprLoc());
+ auto *DstVD =
+ buildVarDecl(*this, DE->getLocStart(), Type, ".copyprivate.dst",
+ VD->hasAttrs() ? &VD->getAttrs() : nullptr);
+ auto *PseudoDstExpr =
+ buildDeclRefExpr(*this, DstVD, Type, DE->getExprLoc());
+ auto AssignmentOp = BuildBinOp(/*S=*/nullptr, DE->getExprLoc(), BO_Assign,
+ PseudoDstExpr, PseudoSrcExpr);
+ if (AssignmentOp.isInvalid())
+ continue;
+ AssignmentOp = ActOnFinishFullExpr(AssignmentOp.get(), DE->getExprLoc(),
+ /*DiscardedValue=*/true);
+ if (AssignmentOp.isInvalid())
+ continue;
+
+ // No need to mark vars as copyprivate, they are already threadprivate or
+ // implicitly private.
+ Vars.push_back(DE);
+ SrcExprs.push_back(PseudoSrcExpr);
+ DstExprs.push_back(PseudoDstExpr);
+ AssignmentOps.push_back(AssignmentOp.get());
+ }
+
+ if (Vars.empty())
+ return nullptr;
+
+ return OMPCopyprivateClause::Create(Context, StartLoc, LParenLoc, EndLoc,
+ Vars, SrcExprs, DstExprs, AssignmentOps);
+}
+
+OMPClause *Sema::ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ if (VarList.empty())
+ return nullptr;
+
+ return OMPFlushClause::Create(Context, StartLoc, LParenLoc, EndLoc, VarList);
+}
+
+OMPClause *
+Sema::ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind,
+ SourceLocation DepLoc, SourceLocation ColonLoc,
+ ArrayRef<Expr *> VarList, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc) {
+ if (DSAStack->getCurrentDirective() == OMPD_ordered &&
+ DepKind != OMPC_DEPEND_source && DepKind != OMPC_DEPEND_sink) {
+ Diag(DepLoc, diag::err_omp_unexpected_clause_value)
+ << "'source' or 'sink'" << getOpenMPClauseName(OMPC_depend);
+ return nullptr;
+ }
+ if (DSAStack->getCurrentDirective() != OMPD_ordered &&
+ (DepKind == OMPC_DEPEND_unknown || DepKind == OMPC_DEPEND_source ||
+ DepKind == OMPC_DEPEND_sink)) {
+ unsigned Except[] = {OMPC_DEPEND_source, OMPC_DEPEND_sink};
+ Diag(DepLoc, diag::err_omp_unexpected_clause_value)
+ << getListOfPossibleValues(OMPC_depend, /*First=*/0,
+ /*Last=*/OMPC_DEPEND_unknown, Except)
+ << getOpenMPClauseName(OMPC_depend);
+ return nullptr;
+ }
+ SmallVector<Expr *, 8> Vars;
+ llvm::APSInt DepCounter(/*BitWidth=*/32);
+ llvm::APSInt TotalDepCount(/*BitWidth=*/32);
+ if (DepKind == OMPC_DEPEND_sink) {
+ if (auto *OrderedCountExpr = DSAStack->getParentOrderedRegionParam()) {
+ TotalDepCount = OrderedCountExpr->EvaluateKnownConstInt(Context);
+ TotalDepCount.setIsUnsigned(/*Val=*/true);
+ }
+ }
+ if ((DepKind != OMPC_DEPEND_sink && DepKind != OMPC_DEPEND_source) ||
+ DSAStack->getParentOrderedRegionParam()) {
+ for (auto &RefExpr : VarList) {
+ assert(RefExpr && "NULL expr in OpenMP shared clause.");
+ if (isa<DependentScopeDeclRefExpr>(RefExpr) ||
+ (DepKind == OMPC_DEPEND_sink && CurContext->isDependentContext())) {
+ // It will be analyzed later.
+ Vars.push_back(RefExpr);
+ continue;
+ }
+
+ SourceLocation ELoc = RefExpr->getExprLoc();
+ auto *SimpleExpr = RefExpr->IgnoreParenCasts();
+ if (DepKind == OMPC_DEPEND_sink) {
+ if (DepCounter >= TotalDepCount) {
+ Diag(ELoc, diag::err_omp_depend_sink_unexpected_expr);
+ continue;
+ }
+ ++DepCounter;
+ // OpenMP [2.13.9, Summary]
+ // depend(dependence-type : vec), where dependence-type is:
+ // 'sink' and where vec is the iteration vector, which has the form:
+ // x1 [+- d1], x2 [+- d2 ], . . . , xn [+- dn]
+ // where n is the value specified by the ordered clause in the loop
+ // directive, xi denotes the loop iteration variable of the i-th nested
+ // loop associated with the loop directive, and di is a constant
+ // non-negative integer.
+ SimpleExpr = SimpleExpr->IgnoreImplicit();
+ auto *DE = dyn_cast<DeclRefExpr>(SimpleExpr);
+ if (!DE) {
+ OverloadedOperatorKind OOK = OO_None;
+ SourceLocation OOLoc;
+ Expr *LHS, *RHS;
+ if (auto *BO = dyn_cast<BinaryOperator>(SimpleExpr)) {
+ OOK = BinaryOperator::getOverloadedOperator(BO->getOpcode());
+ OOLoc = BO->getOperatorLoc();
+ LHS = BO->getLHS()->IgnoreParenImpCasts();
+ RHS = BO->getRHS()->IgnoreParenImpCasts();
+ } else if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(SimpleExpr)) {
+ OOK = OCE->getOperator();
+ OOLoc = OCE->getOperatorLoc();
+ LHS = OCE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
+ RHS = OCE->getArg(/*Arg=*/1)->IgnoreParenImpCasts();
+ } else if (auto *MCE = dyn_cast<CXXMemberCallExpr>(SimpleExpr)) {
+ OOK = MCE->getMethodDecl()
+ ->getNameInfo()
+ .getName()
+ .getCXXOverloadedOperator();
+ OOLoc = MCE->getCallee()->getExprLoc();
+ LHS = MCE->getImplicitObjectArgument()->IgnoreParenImpCasts();
+ RHS = MCE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
+ } else {
+ Diag(ELoc, diag::err_omp_depend_sink_wrong_expr);
+ continue;
+ }
+ DE = dyn_cast<DeclRefExpr>(LHS);
+ if (!DE) {
+ Diag(LHS->getExprLoc(),
+ diag::err_omp_depend_sink_expected_loop_iteration)
+ << DSAStack->getParentLoopControlVariable(
+ DepCounter.getZExtValue());
+ continue;
+ }
+ if (OOK != OO_Plus && OOK != OO_Minus) {
+ Diag(OOLoc, diag::err_omp_depend_sink_expected_plus_minus);
+ continue;
+ }
+ ExprResult Res = VerifyPositiveIntegerConstantInClause(
+ RHS, OMPC_depend, /*StrictlyPositive=*/false);
+ if (Res.isInvalid())
+ continue;
+ }
+ auto *VD = dyn_cast<VarDecl>(DE->getDecl());
+ if (!CurContext->isDependentContext() &&
+ DSAStack->getParentOrderedRegionParam() &&
+ (!VD || DepCounter != DSAStack->isParentLoopControlVariable(VD))) {
+ Diag(DE->getExprLoc(),
+ diag::err_omp_depend_sink_expected_loop_iteration)
+ << DSAStack->getParentLoopControlVariable(
+ DepCounter.getZExtValue());
+ continue;
+ }
+ } else {
+ // OpenMP [2.11.1.1, Restrictions, p.3]
+ // A variable that is part of another variable (such as a field of a
+ // structure) but is not an array element or an array section cannot
+ // appear in a depend clause.
+ auto *DE = dyn_cast<DeclRefExpr>(SimpleExpr);
+ auto *ASE = dyn_cast<ArraySubscriptExpr>(SimpleExpr);
+ auto *OASE = dyn_cast<OMPArraySectionExpr>(SimpleExpr);
+ if (!RefExpr->IgnoreParenImpCasts()->isLValue() ||
+ (!ASE && !DE && !OASE) || (DE && !isa<VarDecl>(DE->getDecl())) ||
+ (ASE && !ASE->getBase()->getType()->isAnyPointerType() &&
+ !ASE->getBase()->getType()->isArrayType())) {
+ Diag(ELoc, diag::err_omp_expected_var_name_or_array_item)
+ << RefExpr->getSourceRange();
+ continue;
+ }
+ }
+
+ Vars.push_back(RefExpr->IgnoreParenImpCasts());
+ }
+
+ if (!CurContext->isDependentContext() && DepKind == OMPC_DEPEND_sink &&
+ TotalDepCount > VarList.size() &&
+ DSAStack->getParentOrderedRegionParam()) {
+ Diag(EndLoc, diag::err_omp_depend_sink_expected_loop_iteration)
+ << DSAStack->getParentLoopControlVariable(VarList.size() + 1);
+ }
+ if (DepKind != OMPC_DEPEND_source && DepKind != OMPC_DEPEND_sink &&
+ Vars.empty())
+ return nullptr;
+ }
+
+ return OMPDependClause::Create(Context, StartLoc, LParenLoc, EndLoc, DepKind,
+ DepLoc, ColonLoc, Vars);
+}
+
+OMPClause *Sema::ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ Expr *ValExpr = Device;
+
+ // OpenMP [2.9.1, Restrictions]
+ // The device expression must evaluate to a non-negative integer value.
+ if (!IsNonNegativeIntegerValue(ValExpr, *this, OMPC_device,
+ /*StrictlyPositive=*/false))
+ return nullptr;
+
+ return new (Context) OMPDeviceClause(ValExpr, StartLoc, LParenLoc, EndLoc);
+}
+
+static bool IsCXXRecordForMappable(Sema &SemaRef, SourceLocation Loc,
+ DSAStackTy *Stack, CXXRecordDecl *RD) {
+ if (!RD || RD->isInvalidDecl())
+ return true;
+
+ if (auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(RD))
+ if (auto *CTD = CTSD->getSpecializedTemplate())
+ RD = CTD->getTemplatedDecl();
+ auto QTy = SemaRef.Context.getRecordType(RD);
+ if (RD->isDynamicClass()) {
+ SemaRef.Diag(Loc, diag::err_omp_not_mappable_type) << QTy;
+ SemaRef.Diag(RD->getLocation(), diag::note_omp_polymorphic_in_target);
+ return false;
+ }
+ auto *DC = RD;
+ bool IsCorrect = true;
+ for (auto *I : DC->decls()) {
+ if (I) {
+ if (auto *MD = dyn_cast<CXXMethodDecl>(I)) {
+ if (MD->isStatic()) {
+ SemaRef.Diag(Loc, diag::err_omp_not_mappable_type) << QTy;
+ SemaRef.Diag(MD->getLocation(),
+ diag::note_omp_static_member_in_target);
+ IsCorrect = false;
+ }
+ } else if (auto *VD = dyn_cast<VarDecl>(I)) {
+ if (VD->isStaticDataMember()) {
+ SemaRef.Diag(Loc, diag::err_omp_not_mappable_type) << QTy;
+ SemaRef.Diag(VD->getLocation(),
+ diag::note_omp_static_member_in_target);
+ IsCorrect = false;
+ }
+ }
+ }
+ }
+
+ for (auto &I : RD->bases()) {
+ if (!IsCXXRecordForMappable(SemaRef, I.getLocStart(), Stack,
+ I.getType()->getAsCXXRecordDecl()))
+ IsCorrect = false;
+ }
+ return IsCorrect;
+}
+
+static bool CheckTypeMappable(SourceLocation SL, SourceRange SR, Sema &SemaRef,
+ DSAStackTy *Stack, QualType QTy) {
+ NamedDecl *ND;
+ if (QTy->isIncompleteType(&ND)) {
+ SemaRef.Diag(SL, diag::err_incomplete_type) << QTy << SR;
+ return false;
+ } else if (CXXRecordDecl *RD = dyn_cast_or_null<CXXRecordDecl>(ND)) {
+ if (!RD->isInvalidDecl() &&
+ !IsCXXRecordForMappable(SemaRef, SL, Stack, RD))
+ return false;
+ }
+ return true;
+}
+
+OMPClause *Sema::ActOnOpenMPMapClause(
+ OpenMPMapClauseKind MapTypeModifier, OpenMPMapClauseKind MapType,
+ SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) {
+ SmallVector<Expr *, 4> Vars;
+
+ for (auto &RE : VarList) {
+ assert(RE && "Null expr in omp map");
+ if (isa<DependentScopeDeclRefExpr>(RE)) {
+ // It will be analyzed later.
+ Vars.push_back(RE);
+ continue;
+ }
+ SourceLocation ELoc = RE->getExprLoc();
+
+ // OpenMP [2.14.5, Restrictions]
+ // A variable that is part of another variable (such as field of a
+ // structure) but is not an array element or an array section cannot appear
+ // in a map clause.
+ auto *VE = RE->IgnoreParenLValueCasts();
+
+ if (VE->isValueDependent() || VE->isTypeDependent() ||
+ VE->isInstantiationDependent() ||
+ VE->containsUnexpandedParameterPack()) {
+ // It will be analyzed later.
+ Vars.push_back(RE);
+ continue;
+ }
+
+ auto *SimpleExpr = RE->IgnoreParenCasts();
+ auto *DE = dyn_cast<DeclRefExpr>(SimpleExpr);
+ auto *ASE = dyn_cast<ArraySubscriptExpr>(SimpleExpr);
+ auto *OASE = dyn_cast<OMPArraySectionExpr>(SimpleExpr);
+
+ if (!RE->IgnoreParenImpCasts()->isLValue() ||
+ (!OASE && !ASE && !DE) ||
+ (DE && !isa<VarDecl>(DE->getDecl())) ||
+ (ASE && !ASE->getBase()->getType()->isAnyPointerType() &&
+ !ASE->getBase()->getType()->isArrayType())) {
+ Diag(ELoc, diag::err_omp_expected_var_name_or_array_item)
+ << RE->getSourceRange();
+ continue;
+ }
+
+ Decl *D = nullptr;
+ if (DE) {
+ D = DE->getDecl();
+ } else if (ASE) {
+ auto *B = ASE->getBase()->IgnoreParenCasts();
+ D = dyn_cast<DeclRefExpr>(B)->getDecl();
+ } else if (OASE) {
+ auto *B = OASE->getBase();
+ D = dyn_cast<DeclRefExpr>(B)->getDecl();
+ }
+ assert(D && "Null decl on map clause.");
+ auto *VD = cast<VarDecl>(D);
+
+ // OpenMP [2.14.5, Restrictions, p.8]
+ // threadprivate variables cannot appear in a map clause.
+ if (DSAStack->isThreadPrivate(VD)) {
+ auto DVar = DSAStack->getTopDSA(VD, false);
+ Diag(ELoc, diag::err_omp_threadprivate_in_map);
+ ReportOriginalDSA(*this, DSAStack, VD, DVar);
+ continue;
+ }
+
+ // OpenMP [2.14.5, Restrictions, p.2]
+ // At most one list item can be an array item derived from a given variable
+ // in map clauses of the same construct.
+ // OpenMP [2.14.5, Restrictions, p.3]
+ // List items of map clauses in the same construct must not share original
+ // storage.
+ // OpenMP [2.14.5, Restrictions, C/C++, p.2]
+ // A variable for which the type is pointer, reference to array, or
+ // reference to pointer and an array section derived from that variable
+ // must not appear as list items of map clauses of the same construct.
+ DSAStackTy::MapInfo MI = DSAStack->IsMappedInCurrentRegion(VD);
+ if (MI.RefExpr) {
+ Diag(ELoc, diag::err_omp_map_shared_storage) << ELoc;
+ Diag(MI.RefExpr->getExprLoc(), diag::note_used_here)
+ << MI.RefExpr->getSourceRange();
+ continue;
+ }
+
+ // OpenMP [2.14.5, Restrictions, C/C++, p.3,4]
+ // A variable for which the type is pointer, reference to array, or
+ // reference to pointer must not appear as a list item if the enclosing
+ // device data environment already contains an array section derived from
+ // that variable.
+ // An array section derived from a variable for which the type is pointer,
+ // reference to array, or reference to pointer must not appear as a list
+ // item if the enclosing device data environment already contains that
+ // variable.
+ QualType Type = VD->getType();
+ MI = DSAStack->getMapInfoForVar(VD);
+ if (MI.RefExpr && (isa<DeclRefExpr>(MI.RefExpr->IgnoreParenLValueCasts()) !=
+ isa<DeclRefExpr>(VE)) &&
+ (Type->isPointerType() || Type->isReferenceType())) {
+ Diag(ELoc, diag::err_omp_map_shared_storage) << ELoc;
+ Diag(MI.RefExpr->getExprLoc(), diag::note_used_here)
+ << MI.RefExpr->getSourceRange();
+ continue;
+ }
+
+ // OpenMP [2.14.5, Restrictions, C/C++, p.7]
+ // A list item must have a mappable type.
+ if (!CheckTypeMappable(VE->getExprLoc(), VE->getSourceRange(), *this,
+ DSAStack, Type))
+ continue;
+
+ Vars.push_back(RE);
+ MI.RefExpr = RE;
+ DSAStack->addMapInfoForVar(VD, MI);
+ }
+ if (Vars.empty())
+ return nullptr;
+
+ return OMPMapClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars,
+ MapTypeModifier, MapType, MapLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPNumTeamsClause(Expr *NumTeams,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ Expr *ValExpr = NumTeams;
+
+ // OpenMP [teams Constrcut, Restrictions]
+ // The num_teams expression must evaluate to a positive integer value.
+ if (!IsNonNegativeIntegerValue(ValExpr, *this, OMPC_num_teams,
+ /*StrictlyPositive=*/true))
+ return nullptr;
+
+ return new (Context) OMPNumTeamsClause(ValExpr, StartLoc, LParenLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ Expr *ValExpr = ThreadLimit;
+
+ // OpenMP [teams Constrcut, Restrictions]
+ // The thread_limit expression must evaluate to a positive integer value.
+ if (!IsNonNegativeIntegerValue(ValExpr, *this, OMPC_thread_limit,
+ /*StrictlyPositive=*/true))
+ return nullptr;
+
+ return new (Context) OMPThreadLimitClause(ValExpr, StartLoc, LParenLoc,
+ EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPPriorityClause(Expr *Priority,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ Expr *ValExpr = Priority;
+
+ // OpenMP [2.9.1, task Constrcut]
+ // The priority-value is a non-negative numerical scalar expression.
+ if (!IsNonNegativeIntegerValue(ValExpr, *this, OMPC_priority,
+ /*StrictlyPositive=*/false))
+ return nullptr;
+
+ return new (Context) OMPPriorityClause(ValExpr, StartLoc, LParenLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPGrainsizeClause(Expr *Grainsize,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ Expr *ValExpr = Grainsize;
+
+ // OpenMP [2.9.2, taskloop Constrcut]
+ // The parameter of the grainsize clause must be a positive integer
+ // expression.
+ if (!IsNonNegativeIntegerValue(ValExpr, *this, OMPC_grainsize,
+ /*StrictlyPositive=*/true))
+ return nullptr;
+
+ return new (Context) OMPGrainsizeClause(ValExpr, StartLoc, LParenLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPNumTasksClause(Expr *NumTasks,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ Expr *ValExpr = NumTasks;
+
+ // OpenMP [2.9.2, taskloop Constrcut]
+ // The parameter of the num_tasks clause must be a positive integer
+ // expression.
+ if (!IsNonNegativeIntegerValue(ValExpr, *this, OMPC_num_tasks,
+ /*StrictlyPositive=*/true))
+ return nullptr;
+
+ return new (Context) OMPNumTasksClause(ValExpr, StartLoc, LParenLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ // OpenMP [2.13.2, critical construct, Description]
+ // ... where hint-expression is an integer constant expression that evaluates
+ // to a valid lock hint.
+ ExprResult HintExpr = VerifyPositiveIntegerConstantInClause(Hint, OMPC_hint);
+ if (HintExpr.isInvalid())
+ return nullptr;
+ return new (Context)
+ OMPHintClause(HintExpr.get(), StartLoc, LParenLoc, EndLoc);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp
new file mode 100644
index 0000000..e0c10e4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp
@@ -0,0 +1,12920 @@
+//===--- SemaOverload.cpp - C++ Overloading -------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides Sema routines for C++ overloading.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/Overload.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/TypeOrdering.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Template.h"
+#include "clang/Sema/TemplateDeduction.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallString.h"
+#include <algorithm>
+#include <cstdlib>
+
+using namespace clang;
+using namespace sema;
+
+static bool functionHasPassObjectSizeParams(const FunctionDecl *FD) {
+ return std::any_of(FD->param_begin(), FD->param_end(),
+ std::mem_fn(&ParmVarDecl::hasAttr<PassObjectSizeAttr>));
+}
+
+/// A convenience routine for creating a decayed reference to a function.
+static ExprResult
+CreateFunctionRefExpr(Sema &S, FunctionDecl *Fn, NamedDecl *FoundDecl,
+ bool HadMultipleCandidates,
+ SourceLocation Loc = SourceLocation(),
+ const DeclarationNameLoc &LocInfo = DeclarationNameLoc()){
+ if (S.DiagnoseUseOfDecl(FoundDecl, Loc))
+ return ExprError();
+ // If FoundDecl is different from Fn (such as if one is a template
+ // and the other a specialization), make sure DiagnoseUseOfDecl is
+ // called on both.
+ // FIXME: This would be more comprehensively addressed by modifying
+ // DiagnoseUseOfDecl to accept both the FoundDecl and the decl
+ // being used.
+ if (FoundDecl != Fn && S.DiagnoseUseOfDecl(Fn, Loc))
+ return ExprError();
+ DeclRefExpr *DRE = new (S.Context) DeclRefExpr(Fn, false, Fn->getType(),
+ VK_LValue, Loc, LocInfo);
+ if (HadMultipleCandidates)
+ DRE->setHadMultipleCandidates(true);
+
+ S.MarkDeclRefReferenced(DRE);
+ return S.ImpCastExprToType(DRE, S.Context.getPointerType(DRE->getType()),
+ CK_FunctionToPointerDecay);
+}
+
+static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
+ bool InOverloadResolution,
+ StandardConversionSequence &SCS,
+ bool CStyle,
+ bool AllowObjCWritebackConversion);
+
+static bool IsTransparentUnionStandardConversion(Sema &S, Expr* From,
+ QualType &ToType,
+ bool InOverloadResolution,
+ StandardConversionSequence &SCS,
+ bool CStyle);
+static OverloadingResult
+IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
+ UserDefinedConversionSequence& User,
+ OverloadCandidateSet& Conversions,
+ bool AllowExplicit,
+ bool AllowObjCConversionOnExplicit);
+
+
+static ImplicitConversionSequence::CompareKind
+CompareStandardConversionSequences(Sema &S, SourceLocation Loc,
+ const StandardConversionSequence& SCS1,
+ const StandardConversionSequence& SCS2);
+
+static ImplicitConversionSequence::CompareKind
+CompareQualificationConversions(Sema &S,
+ const StandardConversionSequence& SCS1,
+ const StandardConversionSequence& SCS2);
+
+static ImplicitConversionSequence::CompareKind
+CompareDerivedToBaseConversions(Sema &S, SourceLocation Loc,
+ const StandardConversionSequence& SCS1,
+ const StandardConversionSequence& SCS2);
+
+/// GetConversionRank - Retrieve the implicit conversion rank
+/// corresponding to the given implicit conversion kind.
+ImplicitConversionRank clang::GetConversionRank(ImplicitConversionKind Kind) {
+ static const ImplicitConversionRank
+ Rank[(int)ICK_Num_Conversion_Kinds] = {
+ ICR_Exact_Match,
+ ICR_Exact_Match,
+ ICR_Exact_Match,
+ ICR_Exact_Match,
+ ICR_Exact_Match,
+ ICR_Exact_Match,
+ ICR_Promotion,
+ ICR_Promotion,
+ ICR_Promotion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Complex_Real_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Writeback_Conversion,
+ ICR_Exact_Match, // NOTE(gbiv): This may not be completely right --
+ // it was omitted by the patch that added
+ // ICK_Zero_Event_Conversion
+ ICR_C_Conversion
+ };
+ return Rank[(int)Kind];
+}
+
+/// GetImplicitConversionName - Return the name of this kind of
+/// implicit conversion.
+static const char* GetImplicitConversionName(ImplicitConversionKind Kind) {
+ static const char* const Name[(int)ICK_Num_Conversion_Kinds] = {
+ "No conversion",
+ "Lvalue-to-rvalue",
+ "Array-to-pointer",
+ "Function-to-pointer",
+ "Noreturn adjustment",
+ "Qualification",
+ "Integral promotion",
+ "Floating point promotion",
+ "Complex promotion",
+ "Integral conversion",
+ "Floating conversion",
+ "Complex conversion",
+ "Floating-integral conversion",
+ "Pointer conversion",
+ "Pointer-to-member conversion",
+ "Boolean conversion",
+ "Compatible-types conversion",
+ "Derived-to-base conversion",
+ "Vector conversion",
+ "Vector splat",
+ "Complex-real conversion",
+ "Block Pointer conversion",
+ "Transparent Union Conversion",
+ "Writeback conversion",
+ "OpenCL Zero Event Conversion",
+ "C specific type conversion"
+ };
+ return Name[Kind];
+}
+
+/// StandardConversionSequence - Set the standard conversion
+/// sequence to the identity conversion.
+void StandardConversionSequence::setAsIdentityConversion() {
+ First = ICK_Identity;
+ Second = ICK_Identity;
+ Third = ICK_Identity;
+ DeprecatedStringLiteralToCharPtr = false;
+ QualificationIncludesObjCLifetime = false;
+ ReferenceBinding = false;
+ DirectBinding = false;
+ IsLvalueReference = true;
+ BindsToFunctionLvalue = false;
+ BindsToRvalue = false;
+ BindsImplicitObjectArgumentWithoutRefQualifier = false;
+ ObjCLifetimeConversionBinding = false;
+ CopyConstructor = nullptr;
+}
+
+/// getRank - Retrieve the rank of this standard conversion sequence
+/// (C++ 13.3.3.1.1p3). The rank is the largest rank of each of the
+/// implicit conversions.
+ImplicitConversionRank StandardConversionSequence::getRank() const {
+ ImplicitConversionRank Rank = ICR_Exact_Match;
+ if (GetConversionRank(First) > Rank)
+ Rank = GetConversionRank(First);
+ if (GetConversionRank(Second) > Rank)
+ Rank = GetConversionRank(Second);
+ if (GetConversionRank(Third) > Rank)
+ Rank = GetConversionRank(Third);
+ return Rank;
+}
+
+/// isPointerConversionToBool - Determines whether this conversion is
+/// a conversion of a pointer or pointer-to-member to bool. This is
+/// used as part of the ranking of standard conversion sequences
+/// (C++ 13.3.3.2p4).
+bool StandardConversionSequence::isPointerConversionToBool() const {
+ // Note that FromType has not necessarily been transformed by the
+ // array-to-pointer or function-to-pointer implicit conversions, so
+ // check for their presence as well as checking whether FromType is
+ // a pointer.
+ if (getToType(1)->isBooleanType() &&
+ (getFromType()->isPointerType() ||
+ getFromType()->isObjCObjectPointerType() ||
+ getFromType()->isBlockPointerType() ||
+ getFromType()->isNullPtrType() ||
+ First == ICK_Array_To_Pointer || First == ICK_Function_To_Pointer))
+ return true;
+
+ return false;
+}
+
+/// isPointerConversionToVoidPointer - Determines whether this
+/// conversion is a conversion of a pointer to a void pointer. This is
+/// used as part of the ranking of standard conversion sequences (C++
+/// 13.3.3.2p4).
+bool
+StandardConversionSequence::
+isPointerConversionToVoidPointer(ASTContext& Context) const {
+ QualType FromType = getFromType();
+ QualType ToType = getToType(1);
+
+ // Note that FromType has not necessarily been transformed by the
+ // array-to-pointer implicit conversion, so check for its presence
+ // and redo the conversion to get a pointer.
+ if (First == ICK_Array_To_Pointer)
+ FromType = Context.getArrayDecayedType(FromType);
+
+ if (Second == ICK_Pointer_Conversion && FromType->isAnyPointerType())
+ if (const PointerType* ToPtrType = ToType->getAs<PointerType>())
+ return ToPtrType->getPointeeType()->isVoidType();
+
+ return false;
+}
+
+/// Skip any implicit casts which could be either part of a narrowing conversion
+/// or after one in an implicit conversion.
+static const Expr *IgnoreNarrowingConversion(const Expr *Converted) {
+ while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Converted)) {
+ switch (ICE->getCastKind()) {
+ case CK_NoOp:
+ case CK_IntegralCast:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ Converted = ICE->getSubExpr();
+ continue;
+
+ default:
+ return Converted;
+ }
+ }
+
+ return Converted;
+}
+
+/// Check if this standard conversion sequence represents a narrowing
+/// conversion, according to C++11 [dcl.init.list]p7.
+///
+/// \param Ctx The AST context.
+/// \param Converted The result of applying this standard conversion sequence.
+/// \param ConstantValue If this is an NK_Constant_Narrowing conversion, the
+/// value of the expression prior to the narrowing conversion.
+/// \param ConstantType If this is an NK_Constant_Narrowing conversion, the
+/// type of the expression prior to the narrowing conversion.
+NarrowingKind
+StandardConversionSequence::getNarrowingKind(ASTContext &Ctx,
+ const Expr *Converted,
+ APValue &ConstantValue,
+ QualType &ConstantType) const {
+ assert(Ctx.getLangOpts().CPlusPlus && "narrowing check outside C++");
+
+ // C++11 [dcl.init.list]p7:
+ // A narrowing conversion is an implicit conversion ...
+ QualType FromType = getToType(0);
+ QualType ToType = getToType(1);
+ switch (Second) {
+ // 'bool' is an integral type; dispatch to the right place to handle it.
+ case ICK_Boolean_Conversion:
+ if (FromType->isRealFloatingType())
+ goto FloatingIntegralConversion;
+ if (FromType->isIntegralOrUnscopedEnumerationType())
+ goto IntegralConversion;
+ // Boolean conversions can be from pointers and pointers to members
+ // [conv.bool], and those aren't considered narrowing conversions.
+ return NK_Not_Narrowing;
+
+ // -- from a floating-point type to an integer type, or
+ //
+ // -- from an integer type or unscoped enumeration type to a floating-point
+ // type, except where the source is a constant expression and the actual
+ // value after conversion will fit into the target type and will produce
+ // the original value when converted back to the original type, or
+ case ICK_Floating_Integral:
+ FloatingIntegralConversion:
+ if (FromType->isRealFloatingType() && ToType->isIntegralType(Ctx)) {
+ return NK_Type_Narrowing;
+ } else if (FromType->isIntegralType(Ctx) && ToType->isRealFloatingType()) {
+ llvm::APSInt IntConstantValue;
+ const Expr *Initializer = IgnoreNarrowingConversion(Converted);
+ if (Initializer &&
+ Initializer->isIntegerConstantExpr(IntConstantValue, Ctx)) {
+ // Convert the integer to the floating type.
+ llvm::APFloat Result(Ctx.getFloatTypeSemantics(ToType));
+ Result.convertFromAPInt(IntConstantValue, IntConstantValue.isSigned(),
+ llvm::APFloat::rmNearestTiesToEven);
+ // And back.
+ llvm::APSInt ConvertedValue = IntConstantValue;
+ bool ignored;
+ Result.convertToInteger(ConvertedValue,
+ llvm::APFloat::rmTowardZero, &ignored);
+ // If the resulting value is different, this was a narrowing conversion.
+ if (IntConstantValue != ConvertedValue) {
+ ConstantValue = APValue(IntConstantValue);
+ ConstantType = Initializer->getType();
+ return NK_Constant_Narrowing;
+ }
+ } else {
+ // Variables are always narrowings.
+ return NK_Variable_Narrowing;
+ }
+ }
+ return NK_Not_Narrowing;
+
+ // -- from long double to double or float, or from double to float, except
+ // where the source is a constant expression and the actual value after
+ // conversion is within the range of values that can be represented (even
+ // if it cannot be represented exactly), or
+ case ICK_Floating_Conversion:
+ if (FromType->isRealFloatingType() && ToType->isRealFloatingType() &&
+ Ctx.getFloatingTypeOrder(FromType, ToType) == 1) {
+ // FromType is larger than ToType.
+ const Expr *Initializer = IgnoreNarrowingConversion(Converted);
+ if (Initializer->isCXX11ConstantExpr(Ctx, &ConstantValue)) {
+ // Constant!
+ assert(ConstantValue.isFloat());
+ llvm::APFloat FloatVal = ConstantValue.getFloat();
+ // Convert the source value into the target type.
+ bool ignored;
+ llvm::APFloat::opStatus ConvertStatus = FloatVal.convert(
+ Ctx.getFloatTypeSemantics(ToType),
+ llvm::APFloat::rmNearestTiesToEven, &ignored);
+ // If there was no overflow, the source value is within the range of
+ // values that can be represented.
+ if (ConvertStatus & llvm::APFloat::opOverflow) {
+ ConstantType = Initializer->getType();
+ return NK_Constant_Narrowing;
+ }
+ } else {
+ return NK_Variable_Narrowing;
+ }
+ }
+ return NK_Not_Narrowing;
+
+ // -- from an integer type or unscoped enumeration type to an integer type
+ // that cannot represent all the values of the original type, except where
+ // the source is a constant expression and the actual value after
+ // conversion will fit into the target type and will produce the original
+ // value when converted back to the original type.
+ case ICK_Integral_Conversion:
+ IntegralConversion: {
+ assert(FromType->isIntegralOrUnscopedEnumerationType());
+ assert(ToType->isIntegralOrUnscopedEnumerationType());
+ const bool FromSigned = FromType->isSignedIntegerOrEnumerationType();
+ const unsigned FromWidth = Ctx.getIntWidth(FromType);
+ const bool ToSigned = ToType->isSignedIntegerOrEnumerationType();
+ const unsigned ToWidth = Ctx.getIntWidth(ToType);
+
+ if (FromWidth > ToWidth ||
+ (FromWidth == ToWidth && FromSigned != ToSigned) ||
+ (FromSigned && !ToSigned)) {
+ // Not all values of FromType can be represented in ToType.
+ llvm::APSInt InitializerValue;
+ const Expr *Initializer = IgnoreNarrowingConversion(Converted);
+ if (!Initializer->isIntegerConstantExpr(InitializerValue, Ctx)) {
+ // Such conversions on variables are always narrowing.
+ return NK_Variable_Narrowing;
+ }
+ bool Narrowing = false;
+ if (FromWidth < ToWidth) {
+ // Negative -> unsigned is narrowing. Otherwise, more bits is never
+ // narrowing.
+ if (InitializerValue.isSigned() && InitializerValue.isNegative())
+ Narrowing = true;
+ } else {
+ // Add a bit to the InitializerValue so we don't have to worry about
+ // signed vs. unsigned comparisons.
+ InitializerValue = InitializerValue.extend(
+ InitializerValue.getBitWidth() + 1);
+ // Convert the initializer to and from the target width and signed-ness.
+ llvm::APSInt ConvertedValue = InitializerValue;
+ ConvertedValue = ConvertedValue.trunc(ToWidth);
+ ConvertedValue.setIsSigned(ToSigned);
+ ConvertedValue = ConvertedValue.extend(InitializerValue.getBitWidth());
+ ConvertedValue.setIsSigned(InitializerValue.isSigned());
+ // If the result is different, this was a narrowing conversion.
+ if (ConvertedValue != InitializerValue)
+ Narrowing = true;
+ }
+ if (Narrowing) {
+ ConstantType = Initializer->getType();
+ ConstantValue = APValue(InitializerValue);
+ return NK_Constant_Narrowing;
+ }
+ }
+ return NK_Not_Narrowing;
+ }
+
+ default:
+ // Other kinds of conversions are not narrowings.
+ return NK_Not_Narrowing;
+ }
+}
+
+/// dump - Print this standard conversion sequence to standard
+/// error. Useful for debugging overloading issues.
+void StandardConversionSequence::dump() const {
+ raw_ostream &OS = llvm::errs();
+ bool PrintedSomething = false;
+ if (First != ICK_Identity) {
+ OS << GetImplicitConversionName(First);
+ PrintedSomething = true;
+ }
+
+ if (Second != ICK_Identity) {
+ if (PrintedSomething) {
+ OS << " -> ";
+ }
+ OS << GetImplicitConversionName(Second);
+
+ if (CopyConstructor) {
+ OS << " (by copy constructor)";
+ } else if (DirectBinding) {
+ OS << " (direct reference binding)";
+ } else if (ReferenceBinding) {
+ OS << " (reference binding)";
+ }
+ PrintedSomething = true;
+ }
+
+ if (Third != ICK_Identity) {
+ if (PrintedSomething) {
+ OS << " -> ";
+ }
+ OS << GetImplicitConversionName(Third);
+ PrintedSomething = true;
+ }
+
+ if (!PrintedSomething) {
+ OS << "No conversions required";
+ }
+}
+
+/// dump - Print this user-defined conversion sequence to standard
+/// error. Useful for debugging overloading issues.
+void UserDefinedConversionSequence::dump() const {
+ raw_ostream &OS = llvm::errs();
+ if (Before.First || Before.Second || Before.Third) {
+ Before.dump();
+ OS << " -> ";
+ }
+ if (ConversionFunction)
+ OS << '\'' << *ConversionFunction << '\'';
+ else
+ OS << "aggregate initialization";
+ if (After.First || After.Second || After.Third) {
+ OS << " -> ";
+ After.dump();
+ }
+}
+
+/// dump - Print this implicit conversion sequence to standard
+/// error. Useful for debugging overloading issues.
+void ImplicitConversionSequence::dump() const {
+ raw_ostream &OS = llvm::errs();
+ if (isStdInitializerListElement())
+ OS << "Worst std::initializer_list element conversion: ";
+ switch (ConversionKind) {
+ case StandardConversion:
+ OS << "Standard conversion: ";
+ Standard.dump();
+ break;
+ case UserDefinedConversion:
+ OS << "User-defined conversion: ";
+ UserDefined.dump();
+ break;
+ case EllipsisConversion:
+ OS << "Ellipsis conversion";
+ break;
+ case AmbiguousConversion:
+ OS << "Ambiguous conversion";
+ break;
+ case BadConversion:
+ OS << "Bad conversion";
+ break;
+ }
+
+ OS << "\n";
+}
+
+void AmbiguousConversionSequence::construct() {
+ new (&conversions()) ConversionSet();
+}
+
+void AmbiguousConversionSequence::destruct() {
+ conversions().~ConversionSet();
+}
+
+void
+AmbiguousConversionSequence::copyFrom(const AmbiguousConversionSequence &O) {
+ FromTypePtr = O.FromTypePtr;
+ ToTypePtr = O.ToTypePtr;
+ new (&conversions()) ConversionSet(O.conversions());
+}
+
+namespace {
+ // Structure used by DeductionFailureInfo to store
+ // template argument information.
+ struct DFIArguments {
+ TemplateArgument FirstArg;
+ TemplateArgument SecondArg;
+ };
+ // Structure used by DeductionFailureInfo to store
+ // template parameter and template argument information.
+ struct DFIParamWithArguments : DFIArguments {
+ TemplateParameter Param;
+ };
+ // Structure used by DeductionFailureInfo to store template argument
+ // information and the index of the problematic call argument.
+ struct DFIDeducedMismatchArgs : DFIArguments {
+ TemplateArgumentList *TemplateArgs;
+ unsigned CallArgIndex;
+ };
+}
+
+/// \brief Convert from Sema's representation of template deduction information
+/// to the form used in overload-candidate information.
+DeductionFailureInfo
+clang::MakeDeductionFailureInfo(ASTContext &Context,
+ Sema::TemplateDeductionResult TDK,
+ TemplateDeductionInfo &Info) {
+ DeductionFailureInfo Result;
+ Result.Result = static_cast<unsigned>(TDK);
+ Result.HasDiagnostic = false;
+ switch (TDK) {
+ case Sema::TDK_Success:
+ case Sema::TDK_Invalid:
+ case Sema::TDK_InstantiationDepth:
+ case Sema::TDK_TooManyArguments:
+ case Sema::TDK_TooFewArguments:
+ case Sema::TDK_MiscellaneousDeductionFailure:
+ Result.Data = nullptr;
+ break;
+
+ case Sema::TDK_Incomplete:
+ case Sema::TDK_InvalidExplicitArguments:
+ Result.Data = Info.Param.getOpaqueValue();
+ break;
+
+ case Sema::TDK_DeducedMismatch: {
+ // FIXME: Should allocate from normal heap so that we can free this later.
+ auto *Saved = new (Context) DFIDeducedMismatchArgs;
+ Saved->FirstArg = Info.FirstArg;
+ Saved->SecondArg = Info.SecondArg;
+ Saved->TemplateArgs = Info.take();
+ Saved->CallArgIndex = Info.CallArgIndex;
+ Result.Data = Saved;
+ break;
+ }
+
+ case Sema::TDK_NonDeducedMismatch: {
+ // FIXME: Should allocate from normal heap so that we can free this later.
+ DFIArguments *Saved = new (Context) DFIArguments;
+ Saved->FirstArg = Info.FirstArg;
+ Saved->SecondArg = Info.SecondArg;
+ Result.Data = Saved;
+ break;
+ }
+
+ case Sema::TDK_Inconsistent:
+ case Sema::TDK_Underqualified: {
+ // FIXME: Should allocate from normal heap so that we can free this later.
+ DFIParamWithArguments *Saved = new (Context) DFIParamWithArguments;
+ Saved->Param = Info.Param;
+ Saved->FirstArg = Info.FirstArg;
+ Saved->SecondArg = Info.SecondArg;
+ Result.Data = Saved;
+ break;
+ }
+
+ case Sema::TDK_SubstitutionFailure:
+ Result.Data = Info.take();
+ if (Info.hasSFINAEDiagnostic()) {
+ PartialDiagnosticAt *Diag = new (Result.Diagnostic) PartialDiagnosticAt(
+ SourceLocation(), PartialDiagnostic::NullDiagnostic());
+ Info.takeSFINAEDiagnostic(*Diag);
+ Result.HasDiagnostic = true;
+ }
+ break;
+
+ case Sema::TDK_FailedOverloadResolution:
+ Result.Data = Info.Expression;
+ break;
+ }
+
+ return Result;
+}
+
+void DeductionFailureInfo::Destroy() {
+ switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
+ case Sema::TDK_Success:
+ case Sema::TDK_Invalid:
+ case Sema::TDK_InstantiationDepth:
+ case Sema::TDK_Incomplete:
+ case Sema::TDK_TooManyArguments:
+ case Sema::TDK_TooFewArguments:
+ case Sema::TDK_InvalidExplicitArguments:
+ case Sema::TDK_FailedOverloadResolution:
+ break;
+
+ case Sema::TDK_Inconsistent:
+ case Sema::TDK_Underqualified:
+ case Sema::TDK_DeducedMismatch:
+ case Sema::TDK_NonDeducedMismatch:
+ // FIXME: Destroy the data?
+ Data = nullptr;
+ break;
+
+ case Sema::TDK_SubstitutionFailure:
+ // FIXME: Destroy the template argument list?
+ Data = nullptr;
+ if (PartialDiagnosticAt *Diag = getSFINAEDiagnostic()) {
+ Diag->~PartialDiagnosticAt();
+ HasDiagnostic = false;
+ }
+ break;
+
+ // Unhandled
+ case Sema::TDK_MiscellaneousDeductionFailure:
+ break;
+ }
+}
+
+PartialDiagnosticAt *DeductionFailureInfo::getSFINAEDiagnostic() {
+ if (HasDiagnostic)
+ return static_cast<PartialDiagnosticAt*>(static_cast<void*>(Diagnostic));
+ return nullptr;
+}
+
+TemplateParameter DeductionFailureInfo::getTemplateParameter() {
+ switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
+ case Sema::TDK_Success:
+ case Sema::TDK_Invalid:
+ case Sema::TDK_InstantiationDepth:
+ case Sema::TDK_TooManyArguments:
+ case Sema::TDK_TooFewArguments:
+ case Sema::TDK_SubstitutionFailure:
+ case Sema::TDK_DeducedMismatch:
+ case Sema::TDK_NonDeducedMismatch:
+ case Sema::TDK_FailedOverloadResolution:
+ return TemplateParameter();
+
+ case Sema::TDK_Incomplete:
+ case Sema::TDK_InvalidExplicitArguments:
+ return TemplateParameter::getFromOpaqueValue(Data);
+
+ case Sema::TDK_Inconsistent:
+ case Sema::TDK_Underqualified:
+ return static_cast<DFIParamWithArguments*>(Data)->Param;
+
+ // Unhandled
+ case Sema::TDK_MiscellaneousDeductionFailure:
+ break;
+ }
+
+ return TemplateParameter();
+}
+
+TemplateArgumentList *DeductionFailureInfo::getTemplateArgumentList() {
+ switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
+ case Sema::TDK_Success:
+ case Sema::TDK_Invalid:
+ case Sema::TDK_InstantiationDepth:
+ case Sema::TDK_TooManyArguments:
+ case Sema::TDK_TooFewArguments:
+ case Sema::TDK_Incomplete:
+ case Sema::TDK_InvalidExplicitArguments:
+ case Sema::TDK_Inconsistent:
+ case Sema::TDK_Underqualified:
+ case Sema::TDK_NonDeducedMismatch:
+ case Sema::TDK_FailedOverloadResolution:
+ return nullptr;
+
+ case Sema::TDK_DeducedMismatch:
+ return static_cast<DFIDeducedMismatchArgs*>(Data)->TemplateArgs;
+
+ case Sema::TDK_SubstitutionFailure:
+ return static_cast<TemplateArgumentList*>(Data);
+
+ // Unhandled
+ case Sema::TDK_MiscellaneousDeductionFailure:
+ break;
+ }
+
+ return nullptr;
+}
+
+const TemplateArgument *DeductionFailureInfo::getFirstArg() {
+ switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
+ case Sema::TDK_Success:
+ case Sema::TDK_Invalid:
+ case Sema::TDK_InstantiationDepth:
+ case Sema::TDK_Incomplete:
+ case Sema::TDK_TooManyArguments:
+ case Sema::TDK_TooFewArguments:
+ case Sema::TDK_InvalidExplicitArguments:
+ case Sema::TDK_SubstitutionFailure:
+ case Sema::TDK_FailedOverloadResolution:
+ return nullptr;
+
+ case Sema::TDK_Inconsistent:
+ case Sema::TDK_Underqualified:
+ case Sema::TDK_DeducedMismatch:
+ case Sema::TDK_NonDeducedMismatch:
+ return &static_cast<DFIArguments*>(Data)->FirstArg;
+
+ // Unhandled
+ case Sema::TDK_MiscellaneousDeductionFailure:
+ break;
+ }
+
+ return nullptr;
+}
+
+const TemplateArgument *DeductionFailureInfo::getSecondArg() {
+ switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
+ case Sema::TDK_Success:
+ case Sema::TDK_Invalid:
+ case Sema::TDK_InstantiationDepth:
+ case Sema::TDK_Incomplete:
+ case Sema::TDK_TooManyArguments:
+ case Sema::TDK_TooFewArguments:
+ case Sema::TDK_InvalidExplicitArguments:
+ case Sema::TDK_SubstitutionFailure:
+ case Sema::TDK_FailedOverloadResolution:
+ return nullptr;
+
+ case Sema::TDK_Inconsistent:
+ case Sema::TDK_Underqualified:
+ case Sema::TDK_DeducedMismatch:
+ case Sema::TDK_NonDeducedMismatch:
+ return &static_cast<DFIArguments*>(Data)->SecondArg;
+
+ // Unhandled
+ case Sema::TDK_MiscellaneousDeductionFailure:
+ break;
+ }
+
+ return nullptr;
+}
+
+Expr *DeductionFailureInfo::getExpr() {
+ if (static_cast<Sema::TemplateDeductionResult>(Result) ==
+ Sema::TDK_FailedOverloadResolution)
+ return static_cast<Expr*>(Data);
+
+ return nullptr;
+}
+
+llvm::Optional<unsigned> DeductionFailureInfo::getCallArgIndex() {
+ if (static_cast<Sema::TemplateDeductionResult>(Result) ==
+ Sema::TDK_DeducedMismatch)
+ return static_cast<DFIDeducedMismatchArgs*>(Data)->CallArgIndex;
+
+ return llvm::None;
+}
+
+void OverloadCandidateSet::destroyCandidates() {
+ for (iterator i = begin(), e = end(); i != e; ++i) {
+ for (unsigned ii = 0, ie = i->NumConversions; ii != ie; ++ii)
+ i->Conversions[ii].~ImplicitConversionSequence();
+ if (!i->Viable && i->FailureKind == ovl_fail_bad_deduction)
+ i->DeductionFailure.Destroy();
+ }
+}
+
+void OverloadCandidateSet::clear() {
+ destroyCandidates();
+ NumInlineSequences = 0;
+ Candidates.clear();
+ Functions.clear();
+}
+
+namespace {
+ class UnbridgedCastsSet {
+ struct Entry {
+ Expr **Addr;
+ Expr *Saved;
+ };
+ SmallVector<Entry, 2> Entries;
+
+ public:
+ void save(Sema &S, Expr *&E) {
+ assert(E->hasPlaceholderType(BuiltinType::ARCUnbridgedCast));
+ Entry entry = { &E, E };
+ Entries.push_back(entry);
+ E = S.stripARCUnbridgedCast(E);
+ }
+
+ void restore() {
+ for (SmallVectorImpl<Entry>::iterator
+ i = Entries.begin(), e = Entries.end(); i != e; ++i)
+ *i->Addr = i->Saved;
+ }
+ };
+}
+
+/// checkPlaceholderForOverload - Do any interesting placeholder-like
+/// preprocessing on the given expression.
+///
+/// \param unbridgedCasts a collection to which to add unbridged casts;
+/// without this, they will be immediately diagnosed as errors
+///
+/// Return true on unrecoverable error.
+static bool
+checkPlaceholderForOverload(Sema &S, Expr *&E,
+ UnbridgedCastsSet *unbridgedCasts = nullptr) {
+ if (const BuiltinType *placeholder = E->getType()->getAsPlaceholderType()) {
+ // We can't handle overloaded expressions here because overload
+ // resolution might reasonably tweak them.
+ if (placeholder->getKind() == BuiltinType::Overload) return false;
+
+ // If the context potentially accepts unbridged ARC casts, strip
+ // the unbridged cast and add it to the collection for later restoration.
+ if (placeholder->getKind() == BuiltinType::ARCUnbridgedCast &&
+ unbridgedCasts) {
+ unbridgedCasts->save(S, E);
+ return false;
+ }
+
+ // Go ahead and check everything else.
+ ExprResult result = S.CheckPlaceholderExpr(E);
+ if (result.isInvalid())
+ return true;
+
+ E = result.get();
+ return false;
+ }
+
+ // Nothing to do.
+ return false;
+}
+
+/// checkArgPlaceholdersForOverload - Check a set of call operands for
+/// placeholders.
+static bool checkArgPlaceholdersForOverload(Sema &S,
+ MultiExprArg Args,
+ UnbridgedCastsSet &unbridged) {
+ for (unsigned i = 0, e = Args.size(); i != e; ++i)
+ if (checkPlaceholderForOverload(S, Args[i], &unbridged))
+ return true;
+
+ return false;
+}
+
+// IsOverload - Determine whether the given New declaration is an
+// overload of the declarations in Old. This routine returns false if
+// New and Old cannot be overloaded, e.g., if New has the same
+// signature as some function in Old (C++ 1.3.10) or if the Old
+// declarations aren't functions (or function templates) at all. When
+// it does return false, MatchedDecl will point to the decl that New
+// cannot be overloaded with. This decl may be a UsingShadowDecl on
+// top of the underlying declaration.
+//
+// Example: Given the following input:
+//
+// void f(int, float); // #1
+// void f(int, int); // #2
+// int f(int, int); // #3
+//
+// When we process #1, there is no previous declaration of "f",
+// so IsOverload will not be used.
+//
+// When we process #2, Old contains only the FunctionDecl for #1. By
+// comparing the parameter types, we see that #1 and #2 are overloaded
+// (since they have different signatures), so this routine returns
+// false; MatchedDecl is unchanged.
+//
+// When we process #3, Old is an overload set containing #1 and #2. We
+// compare the signatures of #3 to #1 (they're overloaded, so we do
+// nothing) and then #3 to #2. Since the signatures of #3 and #2 are
+// identical (return types of functions are not part of the
+// signature), IsOverload returns false and MatchedDecl will be set to
+// point to the FunctionDecl for #2.
+//
+// 'NewIsUsingShadowDecl' indicates that 'New' is being introduced
+// into a class by a using declaration. The rules for whether to hide
+// shadow declarations ignore some properties which otherwise figure
+// into a function template's signature.
+Sema::OverloadKind
+Sema::CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &Old,
+ NamedDecl *&Match, bool NewIsUsingDecl) {
+ for (LookupResult::iterator I = Old.begin(), E = Old.end();
+ I != E; ++I) {
+ NamedDecl *OldD = *I;
+
+ bool OldIsUsingDecl = false;
+ if (isa<UsingShadowDecl>(OldD)) {
+ OldIsUsingDecl = true;
+
+ // We can always introduce two using declarations into the same
+ // context, even if they have identical signatures.
+ if (NewIsUsingDecl) continue;
+
+ OldD = cast<UsingShadowDecl>(OldD)->getTargetDecl();
+ }
+
+ // A using-declaration does not conflict with another declaration
+ // if one of them is hidden.
+ if ((OldIsUsingDecl || NewIsUsingDecl) && !isVisible(*I))
+ continue;
+
+ // If either declaration was introduced by a using declaration,
+ // we'll need to use slightly different rules for matching.
+ // Essentially, these rules are the normal rules, except that
+ // function templates hide function templates with different
+ // return types or template parameter lists.
+ bool UseMemberUsingDeclRules =
+ (OldIsUsingDecl || NewIsUsingDecl) && CurContext->isRecord() &&
+ !New->getFriendObjectKind();
+
+ if (FunctionDecl *OldF = OldD->getAsFunction()) {
+ if (!IsOverload(New, OldF, UseMemberUsingDeclRules)) {
+ if (UseMemberUsingDeclRules && OldIsUsingDecl) {
+ HideUsingShadowDecl(S, cast<UsingShadowDecl>(*I));
+ continue;
+ }
+
+ if (!isa<FunctionTemplateDecl>(OldD) &&
+ !shouldLinkPossiblyHiddenDecl(*I, New))
+ continue;
+
+ Match = *I;
+ return Ovl_Match;
+ }
+ } else if (isa<UsingDecl>(OldD)) {
+ // We can overload with these, which can show up when doing
+ // redeclaration checks for UsingDecls.
+ assert(Old.getLookupKind() == LookupUsingDeclName);
+ } else if (isa<TagDecl>(OldD)) {
+ // We can always overload with tags by hiding them.
+ } else if (isa<UnresolvedUsingValueDecl>(OldD)) {
+ // Optimistically assume that an unresolved using decl will
+ // overload; if it doesn't, we'll have to diagnose during
+ // template instantiation.
+ } else {
+ // (C++ 13p1):
+ // Only function declarations can be overloaded; object and type
+ // declarations cannot be overloaded.
+ Match = *I;
+ return Ovl_NonFunction;
+ }
+ }
+
+ return Ovl_Overload;
+}
+
+bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
+ bool UseUsingDeclRules) {
+ // C++ [basic.start.main]p2: This function shall not be overloaded.
+ if (New->isMain())
+ return false;
+
+ // MSVCRT user defined entry points cannot be overloaded.
+ if (New->isMSVCRTEntryPoint())
+ return false;
+
+ FunctionTemplateDecl *OldTemplate = Old->getDescribedFunctionTemplate();
+ FunctionTemplateDecl *NewTemplate = New->getDescribedFunctionTemplate();
+
+ // C++ [temp.fct]p2:
+ // A function template can be overloaded with other function templates
+ // and with normal (non-template) functions.
+ if ((OldTemplate == nullptr) != (NewTemplate == nullptr))
+ return true;
+
+ // Is the function New an overload of the function Old?
+ QualType OldQType = Context.getCanonicalType(Old->getType());
+ QualType NewQType = Context.getCanonicalType(New->getType());
+
+ // Compare the signatures (C++ 1.3.10) of the two functions to
+ // determine whether they are overloads. If we find any mismatch
+ // in the signature, they are overloads.
+
+ // If either of these functions is a K&R-style function (no
+ // prototype), then we consider them to have matching signatures.
+ if (isa<FunctionNoProtoType>(OldQType.getTypePtr()) ||
+ isa<FunctionNoProtoType>(NewQType.getTypePtr()))
+ return false;
+
+ const FunctionProtoType *OldType = cast<FunctionProtoType>(OldQType);
+ const FunctionProtoType *NewType = cast<FunctionProtoType>(NewQType);
+
+ // The signature of a function includes the types of its
+ // parameters (C++ 1.3.10), which includes the presence or absence
+ // of the ellipsis; see C++ DR 357).
+ if (OldQType != NewQType &&
+ (OldType->getNumParams() != NewType->getNumParams() ||
+ OldType->isVariadic() != NewType->isVariadic() ||
+ !FunctionParamTypesAreEqual(OldType, NewType)))
+ return true;
+
+ // C++ [temp.over.link]p4:
+ // The signature of a function template consists of its function
+ // signature, its return type and its template parameter list. The names
+ // of the template parameters are significant only for establishing the
+ // relationship between the template parameters and the rest of the
+ // signature.
+ //
+ // We check the return type and template parameter lists for function
+ // templates first; the remaining checks follow.
+ //
+ // However, we don't consider either of these when deciding whether
+ // a member introduced by a shadow declaration is hidden.
+ if (!UseUsingDeclRules && NewTemplate &&
+ (!TemplateParameterListsAreEqual(NewTemplate->getTemplateParameters(),
+ OldTemplate->getTemplateParameters(),
+ false, TPL_TemplateMatch) ||
+ OldType->getReturnType() != NewType->getReturnType()))
+ return true;
+
+ // If the function is a class member, its signature includes the
+ // cv-qualifiers (if any) and ref-qualifier (if any) on the function itself.
+ //
+ // As part of this, also check whether one of the member functions
+ // is static, in which case they are not overloads (C++
+ // 13.1p2). While not part of the definition of the signature,
+ // this check is important to determine whether these functions
+ // can be overloaded.
+ CXXMethodDecl *OldMethod = dyn_cast<CXXMethodDecl>(Old);
+ CXXMethodDecl *NewMethod = dyn_cast<CXXMethodDecl>(New);
+ if (OldMethod && NewMethod &&
+ !OldMethod->isStatic() && !NewMethod->isStatic()) {
+ if (OldMethod->getRefQualifier() != NewMethod->getRefQualifier()) {
+ if (!UseUsingDeclRules &&
+ (OldMethod->getRefQualifier() == RQ_None ||
+ NewMethod->getRefQualifier() == RQ_None)) {
+ // C++0x [over.load]p2:
+ // - Member function declarations with the same name and the same
+ // parameter-type-list as well as member function template
+ // declarations with the same name, the same parameter-type-list, and
+ // the same template parameter lists cannot be overloaded if any of
+ // them, but not all, have a ref-qualifier (8.3.5).
+ Diag(NewMethod->getLocation(), diag::err_ref_qualifier_overload)
+ << NewMethod->getRefQualifier() << OldMethod->getRefQualifier();
+ Diag(OldMethod->getLocation(), diag::note_previous_declaration);
+ }
+ return true;
+ }
+
+ // We may not have applied the implicit const for a constexpr member
+ // function yet (because we haven't yet resolved whether this is a static
+ // or non-static member function). Add it now, on the assumption that this
+ // is a redeclaration of OldMethod.
+ unsigned OldQuals = OldMethod->getTypeQualifiers();
+ unsigned NewQuals = NewMethod->getTypeQualifiers();
+ if (!getLangOpts().CPlusPlus14 && NewMethod->isConstexpr() &&
+ !isa<CXXConstructorDecl>(NewMethod))
+ NewQuals |= Qualifiers::Const;
+
+ // We do not allow overloading based off of '__restrict'.
+ OldQuals &= ~Qualifiers::Restrict;
+ NewQuals &= ~Qualifiers::Restrict;
+ if (OldQuals != NewQuals)
+ return true;
+ }
+
+ // Though pass_object_size is placed on parameters and takes an argument, we
+ // consider it to be a function-level modifier for the sake of function
+ // identity. Either the function has one or more parameters with
+ // pass_object_size or it doesn't.
+ if (functionHasPassObjectSizeParams(New) !=
+ functionHasPassObjectSizeParams(Old))
+ return true;
+
+ // enable_if attributes are an order-sensitive part of the signature.
+ for (specific_attr_iterator<EnableIfAttr>
+ NewI = New->specific_attr_begin<EnableIfAttr>(),
+ NewE = New->specific_attr_end<EnableIfAttr>(),
+ OldI = Old->specific_attr_begin<EnableIfAttr>(),
+ OldE = Old->specific_attr_end<EnableIfAttr>();
+ NewI != NewE || OldI != OldE; ++NewI, ++OldI) {
+ if (NewI == NewE || OldI == OldE)
+ return true;
+ llvm::FoldingSetNodeID NewID, OldID;
+ NewI->getCond()->Profile(NewID, Context, true);
+ OldI->getCond()->Profile(OldID, Context, true);
+ if (NewID != OldID)
+ return true;
+ }
+
+ if (getLangOpts().CUDA && getLangOpts().CUDATargetOverloads) {
+ CUDAFunctionTarget NewTarget = IdentifyCUDATarget(New),
+ OldTarget = IdentifyCUDATarget(Old);
+ if (NewTarget == CFT_InvalidTarget || NewTarget == CFT_Global)
+ return false;
+
+ assert((OldTarget != CFT_InvalidTarget) && "Unexpected invalid target.");
+
+ // Don't allow mixing of HD with other kinds. This guarantees that
+ // we have only one viable function with this signature on any
+ // side of CUDA compilation .
+ if ((NewTarget == CFT_HostDevice) || (OldTarget == CFT_HostDevice))
+ return false;
+
+ // Allow overloading of functions with same signature, but
+ // different CUDA target attributes.
+ return NewTarget != OldTarget;
+ }
+
+ // The signatures match; this is not an overload.
+ return false;
+}
+
+/// \brief Checks availability of the function depending on the current
+/// function context. Inside an unavailable function, unavailability is ignored.
+///
+/// \returns true if \arg FD is unavailable and current context is inside
+/// an available function, false otherwise.
+bool Sema::isFunctionConsideredUnavailable(FunctionDecl *FD) {
+ return FD->isUnavailable() && !cast<Decl>(CurContext)->isUnavailable();
+}
+
+/// \brief Tries a user-defined conversion from From to ToType.
+///
+/// Produces an implicit conversion sequence for when a standard conversion
+/// is not an option. See TryImplicitConversion for more information.
+static ImplicitConversionSequence
+TryUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
+ bool SuppressUserConversions,
+ bool AllowExplicit,
+ bool InOverloadResolution,
+ bool CStyle,
+ bool AllowObjCWritebackConversion,
+ bool AllowObjCConversionOnExplicit) {
+ ImplicitConversionSequence ICS;
+
+ if (SuppressUserConversions) {
+ // We're not in the case above, so there is no conversion that
+ // we can perform.
+ ICS.setBad(BadConversionSequence::no_conversion, From, ToType);
+ return ICS;
+ }
+
+ // Attempt user-defined conversion.
+ OverloadCandidateSet Conversions(From->getExprLoc(),
+ OverloadCandidateSet::CSK_Normal);
+ switch (IsUserDefinedConversion(S, From, ToType, ICS.UserDefined,
+ Conversions, AllowExplicit,
+ AllowObjCConversionOnExplicit)) {
+ case OR_Success:
+ case OR_Deleted:
+ ICS.setUserDefined();
+ ICS.UserDefined.Before.setAsIdentityConversion();
+ // C++ [over.ics.user]p4:
+ // A conversion of an expression of class type to the same class
+ // type is given Exact Match rank, and a conversion of an
+ // expression of class type to a base class of that type is
+ // given Conversion rank, in spite of the fact that a copy
+ // constructor (i.e., a user-defined conversion function) is
+ // called for those cases.
+ if (CXXConstructorDecl *Constructor
+ = dyn_cast<CXXConstructorDecl>(ICS.UserDefined.ConversionFunction)) {
+ QualType FromCanon
+ = S.Context.getCanonicalType(From->getType().getUnqualifiedType());
+ QualType ToCanon
+ = S.Context.getCanonicalType(ToType).getUnqualifiedType();
+ if (Constructor->isCopyConstructor() &&
+ (FromCanon == ToCanon ||
+ S.IsDerivedFrom(From->getLocStart(), FromCanon, ToCanon))) {
+ // Turn this into a "standard" conversion sequence, so that it
+ // gets ranked with standard conversion sequences.
+ ICS.setStandard();
+ ICS.Standard.setAsIdentityConversion();
+ ICS.Standard.setFromType(From->getType());
+ ICS.Standard.setAllToTypes(ToType);
+ ICS.Standard.CopyConstructor = Constructor;
+ if (ToCanon != FromCanon)
+ ICS.Standard.Second = ICK_Derived_To_Base;
+ }
+ }
+ break;
+
+ case OR_Ambiguous:
+ ICS.setAmbiguous();
+ ICS.Ambiguous.setFromType(From->getType());
+ ICS.Ambiguous.setToType(ToType);
+ for (OverloadCandidateSet::iterator Cand = Conversions.begin();
+ Cand != Conversions.end(); ++Cand)
+ if (Cand->Viable)
+ ICS.Ambiguous.addConversion(Cand->Function);
+ break;
+
+ // Fall through.
+ case OR_No_Viable_Function:
+ ICS.setBad(BadConversionSequence::no_conversion, From, ToType);
+ break;
+ }
+
+ return ICS;
+}
+
+/// TryImplicitConversion - Attempt to perform an implicit conversion
+/// from the given expression (Expr) to the given type (ToType). This
+/// function returns an implicit conversion sequence that can be used
+/// to perform the initialization. Given
+///
+/// void f(float f);
+/// void g(int i) { f(i); }
+///
+/// this routine would produce an implicit conversion sequence to
+/// describe the initialization of f from i, which will be a standard
+/// conversion sequence containing an lvalue-to-rvalue conversion (C++
+/// 4.1) followed by a floating-integral conversion (C++ 4.9).
+//
+/// Note that this routine only determines how the conversion can be
+/// performed; it does not actually perform the conversion. As such,
+/// it will not produce any diagnostics if no conversion is available,
+/// but will instead return an implicit conversion sequence of kind
+/// "BadConversion".
+///
+/// If @p SuppressUserConversions, then user-defined conversions are
+/// not permitted.
+/// If @p AllowExplicit, then explicit user-defined conversions are
+/// permitted.
+///
+/// \param AllowObjCWritebackConversion Whether we allow the Objective-C
+/// writeback conversion, which allows __autoreleasing id* parameters to
+/// be initialized with __strong id* or __weak id* arguments.
+static ImplicitConversionSequence
+TryImplicitConversion(Sema &S, Expr *From, QualType ToType,
+ bool SuppressUserConversions,
+ bool AllowExplicit,
+ bool InOverloadResolution,
+ bool CStyle,
+ bool AllowObjCWritebackConversion,
+ bool AllowObjCConversionOnExplicit) {
+ ImplicitConversionSequence ICS;
+ if (IsStandardConversion(S, From, ToType, InOverloadResolution,
+ ICS.Standard, CStyle, AllowObjCWritebackConversion)){
+ ICS.setStandard();
+ return ICS;
+ }
+
+ if (!S.getLangOpts().CPlusPlus) {
+ ICS.setBad(BadConversionSequence::no_conversion, From, ToType);
+ return ICS;
+ }
+
+ // C++ [over.ics.user]p4:
+ // A conversion of an expression of class type to the same class
+ // type is given Exact Match rank, and a conversion of an
+ // expression of class type to a base class of that type is
+ // given Conversion rank, in spite of the fact that a copy/move
+ // constructor (i.e., a user-defined conversion function) is
+ // called for those cases.
+ QualType FromType = From->getType();
+ if (ToType->getAs<RecordType>() && FromType->getAs<RecordType>() &&
+ (S.Context.hasSameUnqualifiedType(FromType, ToType) ||
+ S.IsDerivedFrom(From->getLocStart(), FromType, ToType))) {
+ ICS.setStandard();
+ ICS.Standard.setAsIdentityConversion();
+ ICS.Standard.setFromType(FromType);
+ ICS.Standard.setAllToTypes(ToType);
+
+ // We don't actually check at this point whether there is a valid
+ // copy/move constructor, since overloading just assumes that it
+ // exists. When we actually perform initialization, we'll find the
+ // appropriate constructor to copy the returned object, if needed.
+ ICS.Standard.CopyConstructor = nullptr;
+
+ // Determine whether this is considered a derived-to-base conversion.
+ if (!S.Context.hasSameUnqualifiedType(FromType, ToType))
+ ICS.Standard.Second = ICK_Derived_To_Base;
+
+ return ICS;
+ }
+
+ return TryUserDefinedConversion(S, From, ToType, SuppressUserConversions,
+ AllowExplicit, InOverloadResolution, CStyle,
+ AllowObjCWritebackConversion,
+ AllowObjCConversionOnExplicit);
+}
+
+ImplicitConversionSequence
+Sema::TryImplicitConversion(Expr *From, QualType ToType,
+ bool SuppressUserConversions,
+ bool AllowExplicit,
+ bool InOverloadResolution,
+ bool CStyle,
+ bool AllowObjCWritebackConversion) {
+ return ::TryImplicitConversion(*this, From, ToType,
+ SuppressUserConversions, AllowExplicit,
+ InOverloadResolution, CStyle,
+ AllowObjCWritebackConversion,
+ /*AllowObjCConversionOnExplicit=*/false);
+}
+
+/// PerformImplicitConversion - Perform an implicit conversion of the
+/// expression From to the type ToType. Returns the
+/// converted expression. Flavor is the kind of conversion we're
+/// performing, used in the error message. If @p AllowExplicit,
+/// explicit user-defined conversions are permitted.
+ExprResult
+Sema::PerformImplicitConversion(Expr *From, QualType ToType,
+ AssignmentAction Action, bool AllowExplicit) {
+ ImplicitConversionSequence ICS;
+ return PerformImplicitConversion(From, ToType, Action, AllowExplicit, ICS);
+}
+
+ExprResult
+Sema::PerformImplicitConversion(Expr *From, QualType ToType,
+ AssignmentAction Action, bool AllowExplicit,
+ ImplicitConversionSequence& ICS) {
+ if (checkPlaceholderForOverload(*this, From))
+ return ExprError();
+
+ // Objective-C ARC: Determine whether we will allow the writeback conversion.
+ bool AllowObjCWritebackConversion
+ = getLangOpts().ObjCAutoRefCount &&
+ (Action == AA_Passing || Action == AA_Sending);
+ if (getLangOpts().ObjC1)
+ CheckObjCBridgeRelatedConversions(From->getLocStart(),
+ ToType, From->getType(), From);
+ ICS = ::TryImplicitConversion(*this, From, ToType,
+ /*SuppressUserConversions=*/false,
+ AllowExplicit,
+ /*InOverloadResolution=*/false,
+ /*CStyle=*/false,
+ AllowObjCWritebackConversion,
+ /*AllowObjCConversionOnExplicit=*/false);
+ return PerformImplicitConversion(From, ToType, ICS, Action);
+}
+
+/// \brief Determine whether the conversion from FromType to ToType is a valid
+/// conversion that strips "noreturn" off the nested function type.
+bool Sema::IsNoReturnConversion(QualType FromType, QualType ToType,
+ QualType &ResultTy) {
+ if (Context.hasSameUnqualifiedType(FromType, ToType))
+ return false;
+
+ // Permit the conversion F(t __attribute__((noreturn))) -> F(t)
+ // where F adds one of the following at most once:
+ // - a pointer
+ // - a member pointer
+ // - a block pointer
+ CanQualType CanTo = Context.getCanonicalType(ToType);
+ CanQualType CanFrom = Context.getCanonicalType(FromType);
+ Type::TypeClass TyClass = CanTo->getTypeClass();
+ if (TyClass != CanFrom->getTypeClass()) return false;
+ if (TyClass != Type::FunctionProto && TyClass != Type::FunctionNoProto) {
+ if (TyClass == Type::Pointer) {
+ CanTo = CanTo.getAs<PointerType>()->getPointeeType();
+ CanFrom = CanFrom.getAs<PointerType>()->getPointeeType();
+ } else if (TyClass == Type::BlockPointer) {
+ CanTo = CanTo.getAs<BlockPointerType>()->getPointeeType();
+ CanFrom = CanFrom.getAs<BlockPointerType>()->getPointeeType();
+ } else if (TyClass == Type::MemberPointer) {
+ CanTo = CanTo.getAs<MemberPointerType>()->getPointeeType();
+ CanFrom = CanFrom.getAs<MemberPointerType>()->getPointeeType();
+ } else {
+ return false;
+ }
+
+ TyClass = CanTo->getTypeClass();
+ if (TyClass != CanFrom->getTypeClass()) return false;
+ if (TyClass != Type::FunctionProto && TyClass != Type::FunctionNoProto)
+ return false;
+ }
+
+ const FunctionType *FromFn = cast<FunctionType>(CanFrom);
+ FunctionType::ExtInfo EInfo = FromFn->getExtInfo();
+ if (!EInfo.getNoReturn()) return false;
+
+ FromFn = Context.adjustFunctionType(FromFn, EInfo.withNoReturn(false));
+ assert(QualType(FromFn, 0).isCanonical());
+ if (QualType(FromFn, 0) != CanTo) return false;
+
+ ResultTy = ToType;
+ return true;
+}
+
+/// \brief Determine whether the conversion from FromType to ToType is a valid
+/// vector conversion.
+///
+/// \param ICK Will be set to the vector conversion kind, if this is a vector
+/// conversion.
+static bool IsVectorConversion(Sema &S, QualType FromType,
+ QualType ToType, ImplicitConversionKind &ICK) {
+ // We need at least one of these types to be a vector type to have a vector
+ // conversion.
+ if (!ToType->isVectorType() && !FromType->isVectorType())
+ return false;
+
+ // Identical types require no conversions.
+ if (S.Context.hasSameUnqualifiedType(FromType, ToType))
+ return false;
+
+ // There are no conversions between extended vector types, only identity.
+ if (ToType->isExtVectorType()) {
+ // There are no conversions between extended vector types other than the
+ // identity conversion.
+ if (FromType->isExtVectorType())
+ return false;
+
+ // Vector splat from any arithmetic type to a vector.
+ if (FromType->isArithmeticType()) {
+ ICK = ICK_Vector_Splat;
+ return true;
+ }
+ }
+
+ // We can perform the conversion between vector types in the following cases:
+ // 1)vector types are equivalent AltiVec and GCC vector types
+ // 2)lax vector conversions are permitted and the vector types are of the
+ // same size
+ if (ToType->isVectorType() && FromType->isVectorType()) {
+ if (S.Context.areCompatibleVectorTypes(FromType, ToType) ||
+ S.isLaxVectorConversion(FromType, ToType)) {
+ ICK = ICK_Vector_Conversion;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool tryAtomicConversion(Sema &S, Expr *From, QualType ToType,
+ bool InOverloadResolution,
+ StandardConversionSequence &SCS,
+ bool CStyle);
+
+/// IsStandardConversion - Determines whether there is a standard
+/// conversion sequence (C++ [conv], C++ [over.ics.scs]) from the
+/// expression From to the type ToType. Standard conversion sequences
+/// only consider non-class types; for conversions that involve class
+/// types, use TryImplicitConversion. If a conversion exists, SCS will
+/// contain the standard conversion sequence required to perform this
+/// conversion and this routine will return true. Otherwise, this
+/// routine will return false and the value of SCS is unspecified.
+static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
+ bool InOverloadResolution,
+ StandardConversionSequence &SCS,
+ bool CStyle,
+ bool AllowObjCWritebackConversion) {
+ QualType FromType = From->getType();
+
+ // Standard conversions (C++ [conv])
+ SCS.setAsIdentityConversion();
+ SCS.IncompatibleObjC = false;
+ SCS.setFromType(FromType);
+ SCS.CopyConstructor = nullptr;
+
+ // There are no standard conversions for class types in C++, so
+ // abort early. When overloading in C, however, we do permit them.
+ if (S.getLangOpts().CPlusPlus &&
+ (FromType->isRecordType() || ToType->isRecordType()))
+ return false;
+
+ // The first conversion can be an lvalue-to-rvalue conversion,
+ // array-to-pointer conversion, or function-to-pointer conversion
+ // (C++ 4p1).
+
+ if (FromType == S.Context.OverloadTy) {
+ DeclAccessPair AccessPair;
+ if (FunctionDecl *Fn
+ = S.ResolveAddressOfOverloadedFunction(From, ToType, false,
+ AccessPair)) {
+ // We were able to resolve the address of the overloaded function,
+ // so we can convert to the type of that function.
+ FromType = Fn->getType();
+ SCS.setFromType(FromType);
+
+ // we can sometimes resolve &foo<int> regardless of ToType, so check
+ // if the type matches (identity) or we are converting to bool
+ if (!S.Context.hasSameUnqualifiedType(
+ S.ExtractUnqualifiedFunctionType(ToType), FromType)) {
+ QualType resultTy;
+ // if the function type matches except for [[noreturn]], it's ok
+ if (!S.IsNoReturnConversion(FromType,
+ S.ExtractUnqualifiedFunctionType(ToType), resultTy))
+ // otherwise, only a boolean conversion is standard
+ if (!ToType->isBooleanType())
+ return false;
+ }
+
+ // Check if the "from" expression is taking the address of an overloaded
+ // function and recompute the FromType accordingly. Take advantage of the
+ // fact that non-static member functions *must* have such an address-of
+ // expression.
+ CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn);
+ if (Method && !Method->isStatic()) {
+ assert(isa<UnaryOperator>(From->IgnoreParens()) &&
+ "Non-unary operator on non-static member address");
+ assert(cast<UnaryOperator>(From->IgnoreParens())->getOpcode()
+ == UO_AddrOf &&
+ "Non-address-of operator on non-static member address");
+ const Type *ClassType
+ = S.Context.getTypeDeclType(Method->getParent()).getTypePtr();
+ FromType = S.Context.getMemberPointerType(FromType, ClassType);
+ } else if (isa<UnaryOperator>(From->IgnoreParens())) {
+ assert(cast<UnaryOperator>(From->IgnoreParens())->getOpcode() ==
+ UO_AddrOf &&
+ "Non-address-of operator for overloaded function expression");
+ FromType = S.Context.getPointerType(FromType);
+ }
+
+ // Check that we've computed the proper type after overload resolution.
+ assert(S.Context.hasSameType(
+ FromType,
+ S.FixOverloadedFunctionReference(From, AccessPair, Fn)->getType()));
+ } else {
+ return false;
+ }
+ }
+ // Lvalue-to-rvalue conversion (C++11 4.1):
+ // A glvalue (3.10) of a non-function, non-array type T can
+ // be converted to a prvalue.
+ bool argIsLValue = From->isGLValue();
+ if (argIsLValue &&
+ !FromType->isFunctionType() && !FromType->isArrayType() &&
+ S.Context.getCanonicalType(FromType) != S.Context.OverloadTy) {
+ SCS.First = ICK_Lvalue_To_Rvalue;
+
+ // C11 6.3.2.1p2:
+ // ... if the lvalue has atomic type, the value has the non-atomic version
+ // of the type of the lvalue ...
+ if (const AtomicType *Atomic = FromType->getAs<AtomicType>())
+ FromType = Atomic->getValueType();
+
+ // If T is a non-class type, the type of the rvalue is the
+ // cv-unqualified version of T. Otherwise, the type of the rvalue
+ // is T (C++ 4.1p1). C++ can't get here with class types; in C, we
+ // just strip the qualifiers because they don't matter.
+ FromType = FromType.getUnqualifiedType();
+ } else if (FromType->isArrayType()) {
+ // Array-to-pointer conversion (C++ 4.2)
+ SCS.First = ICK_Array_To_Pointer;
+
+ // An lvalue or rvalue of type "array of N T" or "array of unknown
+ // bound of T" can be converted to an rvalue of type "pointer to
+ // T" (C++ 4.2p1).
+ FromType = S.Context.getArrayDecayedType(FromType);
+
+ if (S.IsStringLiteralToNonConstPointerConversion(From, ToType)) {
+ // This conversion is deprecated in C++03 (D.4)
+ SCS.DeprecatedStringLiteralToCharPtr = true;
+
+ // For the purpose of ranking in overload resolution
+ // (13.3.3.1.1), this conversion is considered an
+ // array-to-pointer conversion followed by a qualification
+ // conversion (4.4). (C++ 4.2p2)
+ SCS.Second = ICK_Identity;
+ SCS.Third = ICK_Qualification;
+ SCS.QualificationIncludesObjCLifetime = false;
+ SCS.setAllToTypes(FromType);
+ return true;
+ }
+ } else if (FromType->isFunctionType() && argIsLValue) {
+ // Function-to-pointer conversion (C++ 4.3).
+ SCS.First = ICK_Function_To_Pointer;
+
+ if (auto *DRE = dyn_cast<DeclRefExpr>(From->IgnoreParenCasts()))
+ if (auto *FD = dyn_cast<FunctionDecl>(DRE->getDecl()))
+ if (!S.checkAddressOfFunctionIsAvailable(FD))
+ return false;
+
+ // An lvalue of function type T can be converted to an rvalue of
+ // type "pointer to T." The result is a pointer to the
+ // function. (C++ 4.3p1).
+ FromType = S.Context.getPointerType(FromType);
+ } else {
+ // We don't require any conversions for the first step.
+ SCS.First = ICK_Identity;
+ }
+ SCS.setToType(0, FromType);
+
+ // The second conversion can be an integral promotion, floating
+ // point promotion, integral conversion, floating point conversion,
+ // floating-integral conversion, pointer conversion,
+ // pointer-to-member conversion, or boolean conversion (C++ 4p1).
+ // For overloading in C, this can also be a "compatible-type"
+ // conversion.
+ bool IncompatibleObjC = false;
+ ImplicitConversionKind SecondICK = ICK_Identity;
+ if (S.Context.hasSameUnqualifiedType(FromType, ToType)) {
+ // The unqualified versions of the types are the same: there's no
+ // conversion to do.
+ SCS.Second = ICK_Identity;
+ } else if (S.IsIntegralPromotion(From, FromType, ToType)) {
+ // Integral promotion (C++ 4.5).
+ SCS.Second = ICK_Integral_Promotion;
+ FromType = ToType.getUnqualifiedType();
+ } else if (S.IsFloatingPointPromotion(FromType, ToType)) {
+ // Floating point promotion (C++ 4.6).
+ SCS.Second = ICK_Floating_Promotion;
+ FromType = ToType.getUnqualifiedType();
+ } else if (S.IsComplexPromotion(FromType, ToType)) {
+ // Complex promotion (Clang extension)
+ SCS.Second = ICK_Complex_Promotion;
+ FromType = ToType.getUnqualifiedType();
+ } else if (ToType->isBooleanType() &&
+ (FromType->isArithmeticType() ||
+ FromType->isAnyPointerType() ||
+ FromType->isBlockPointerType() ||
+ FromType->isMemberPointerType() ||
+ FromType->isNullPtrType())) {
+ // Boolean conversions (C++ 4.12).
+ SCS.Second = ICK_Boolean_Conversion;
+ FromType = S.Context.BoolTy;
+ } else if (FromType->isIntegralOrUnscopedEnumerationType() &&
+ ToType->isIntegralType(S.Context)) {
+ // Integral conversions (C++ 4.7).
+ SCS.Second = ICK_Integral_Conversion;
+ FromType = ToType.getUnqualifiedType();
+ } else if (FromType->isAnyComplexType() && ToType->isAnyComplexType()) {
+ // Complex conversions (C99 6.3.1.6)
+ SCS.Second = ICK_Complex_Conversion;
+ FromType = ToType.getUnqualifiedType();
+ } else if ((FromType->isAnyComplexType() && ToType->isArithmeticType()) ||
+ (ToType->isAnyComplexType() && FromType->isArithmeticType())) {
+ // Complex-real conversions (C99 6.3.1.7)
+ SCS.Second = ICK_Complex_Real;
+ FromType = ToType.getUnqualifiedType();
+ } else if (FromType->isRealFloatingType() && ToType->isRealFloatingType()) {
+ // Floating point conversions (C++ 4.8).
+ SCS.Second = ICK_Floating_Conversion;
+ FromType = ToType.getUnqualifiedType();
+ } else if ((FromType->isRealFloatingType() &&
+ ToType->isIntegralType(S.Context)) ||
+ (FromType->isIntegralOrUnscopedEnumerationType() &&
+ ToType->isRealFloatingType())) {
+ // Floating-integral conversions (C++ 4.9).
+ SCS.Second = ICK_Floating_Integral;
+ FromType = ToType.getUnqualifiedType();
+ } else if (S.IsBlockPointerConversion(FromType, ToType, FromType)) {
+ SCS.Second = ICK_Block_Pointer_Conversion;
+ } else if (AllowObjCWritebackConversion &&
+ S.isObjCWritebackConversion(FromType, ToType, FromType)) {
+ SCS.Second = ICK_Writeback_Conversion;
+ } else if (S.IsPointerConversion(From, FromType, ToType, InOverloadResolution,
+ FromType, IncompatibleObjC)) {
+ // Pointer conversions (C++ 4.10).
+ SCS.Second = ICK_Pointer_Conversion;
+ SCS.IncompatibleObjC = IncompatibleObjC;
+ FromType = FromType.getUnqualifiedType();
+ } else if (S.IsMemberPointerConversion(From, FromType, ToType,
+ InOverloadResolution, FromType)) {
+ // Pointer to member conversions (4.11).
+ SCS.Second = ICK_Pointer_Member;
+ } else if (IsVectorConversion(S, FromType, ToType, SecondICK)) {
+ SCS.Second = SecondICK;
+ FromType = ToType.getUnqualifiedType();
+ } else if (!S.getLangOpts().CPlusPlus &&
+ S.Context.typesAreCompatible(ToType, FromType)) {
+ // Compatible conversions (Clang extension for C function overloading)
+ SCS.Second = ICK_Compatible_Conversion;
+ FromType = ToType.getUnqualifiedType();
+ } else if (S.IsNoReturnConversion(FromType, ToType, FromType)) {
+ // Treat a conversion that strips "noreturn" as an identity conversion.
+ SCS.Second = ICK_NoReturn_Adjustment;
+ } else if (IsTransparentUnionStandardConversion(S, From, ToType,
+ InOverloadResolution,
+ SCS, CStyle)) {
+ SCS.Second = ICK_TransparentUnionConversion;
+ FromType = ToType;
+ } else if (tryAtomicConversion(S, From, ToType, InOverloadResolution, SCS,
+ CStyle)) {
+ // tryAtomicConversion has updated the standard conversion sequence
+ // appropriately.
+ return true;
+ } else if (ToType->isEventT() &&
+ From->isIntegerConstantExpr(S.getASTContext()) &&
+ From->EvaluateKnownConstInt(S.getASTContext()) == 0) {
+ SCS.Second = ICK_Zero_Event_Conversion;
+ FromType = ToType;
+ } else {
+ // No second conversion required.
+ SCS.Second = ICK_Identity;
+ }
+ SCS.setToType(1, FromType);
+
+ QualType CanonFrom;
+ QualType CanonTo;
+ // The third conversion can be a qualification conversion (C++ 4p1).
+ bool ObjCLifetimeConversion;
+ if (S.IsQualificationConversion(FromType, ToType, CStyle,
+ ObjCLifetimeConversion)) {
+ SCS.Third = ICK_Qualification;
+ SCS.QualificationIncludesObjCLifetime = ObjCLifetimeConversion;
+ FromType = ToType;
+ CanonFrom = S.Context.getCanonicalType(FromType);
+ CanonTo = S.Context.getCanonicalType(ToType);
+ } else {
+ // No conversion required
+ SCS.Third = ICK_Identity;
+
+ // C++ [over.best.ics]p6:
+ // [...] Any difference in top-level cv-qualification is
+ // subsumed by the initialization itself and does not constitute
+ // a conversion. [...]
+ CanonFrom = S.Context.getCanonicalType(FromType);
+ CanonTo = S.Context.getCanonicalType(ToType);
+ if (CanonFrom.getLocalUnqualifiedType()
+ == CanonTo.getLocalUnqualifiedType() &&
+ CanonFrom.getLocalQualifiers() != CanonTo.getLocalQualifiers()) {
+ FromType = ToType;
+ CanonFrom = CanonTo;
+ }
+ }
+ SCS.setToType(2, FromType);
+
+ if (CanonFrom == CanonTo)
+ return true;
+
+ // If we have not converted the argument type to the parameter type,
+ // this is a bad conversion sequence, unless we're resolving an overload in C.
+ if (S.getLangOpts().CPlusPlus || !InOverloadResolution)
+ return false;
+
+ ExprResult ER = ExprResult{From};
+ auto Conv = S.CheckSingleAssignmentConstraints(ToType, ER,
+ /*Diagnose=*/false,
+ /*DiagnoseCFAudited=*/false,
+ /*ConvertRHS=*/false);
+ if (Conv != Sema::Compatible)
+ return false;
+
+ SCS.setAllToTypes(ToType);
+ // We need to set all three because we want this conversion to rank terribly,
+ // and we don't know what conversions it may overlap with.
+ SCS.First = ICK_C_Only_Conversion;
+ SCS.Second = ICK_C_Only_Conversion;
+ SCS.Third = ICK_C_Only_Conversion;
+ return true;
+}
+
+static bool
+IsTransparentUnionStandardConversion(Sema &S, Expr* From,
+ QualType &ToType,
+ bool InOverloadResolution,
+ StandardConversionSequence &SCS,
+ bool CStyle) {
+
+ const RecordType *UT = ToType->getAsUnionType();
+ if (!UT || !UT->getDecl()->hasAttr<TransparentUnionAttr>())
+ return false;
+ // The field to initialize within the transparent union.
+ RecordDecl *UD = UT->getDecl();
+ // It's compatible if the expression matches any of the fields.
+ for (const auto *it : UD->fields()) {
+ if (IsStandardConversion(S, From, it->getType(), InOverloadResolution, SCS,
+ CStyle, /*ObjCWritebackConversion=*/false)) {
+ ToType = it->getType();
+ return true;
+ }
+ }
+ return false;
+}
+
+/// IsIntegralPromotion - Determines whether the conversion from the
+/// expression From (whose potentially-adjusted type is FromType) to
+/// ToType is an integral promotion (C++ 4.5). If so, returns true and
+/// sets PromotedType to the promoted type.
+bool Sema::IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType) {
+ const BuiltinType *To = ToType->getAs<BuiltinType>();
+ // All integers are built-in.
+ if (!To) {
+ return false;
+ }
+
+ // An rvalue of type char, signed char, unsigned char, short int, or
+ // unsigned short int can be converted to an rvalue of type int if
+ // int can represent all the values of the source type; otherwise,
+ // the source rvalue can be converted to an rvalue of type unsigned
+ // int (C++ 4.5p1).
+ if (FromType->isPromotableIntegerType() && !FromType->isBooleanType() &&
+ !FromType->isEnumeralType()) {
+ if (// We can promote any signed, promotable integer type to an int
+ (FromType->isSignedIntegerType() ||
+ // We can promote any unsigned integer type whose size is
+ // less than int to an int.
+ (!FromType->isSignedIntegerType() &&
+ Context.getTypeSize(FromType) < Context.getTypeSize(ToType)))) {
+ return To->getKind() == BuiltinType::Int;
+ }
+
+ return To->getKind() == BuiltinType::UInt;
+ }
+
+ // C++11 [conv.prom]p3:
+ // A prvalue of an unscoped enumeration type whose underlying type is not
+ // fixed (7.2) can be converted to an rvalue a prvalue of the first of the
+ // following types that can represent all the values of the enumeration
+ // (i.e., the values in the range bmin to bmax as described in 7.2): int,
+ // unsigned int, long int, unsigned long int, long long int, or unsigned
+ // long long int. If none of the types in that list can represent all the
+ // values of the enumeration, an rvalue a prvalue of an unscoped enumeration
+ // type can be converted to an rvalue a prvalue of the extended integer type
+ // with lowest integer conversion rank (4.13) greater than the rank of long
+ // long in which all the values of the enumeration can be represented. If
+ // there are two such extended types, the signed one is chosen.
+ // C++11 [conv.prom]p4:
+ // A prvalue of an unscoped enumeration type whose underlying type is fixed
+ // can be converted to a prvalue of its underlying type. Moreover, if
+ // integral promotion can be applied to its underlying type, a prvalue of an
+ // unscoped enumeration type whose underlying type is fixed can also be
+ // converted to a prvalue of the promoted underlying type.
+ if (const EnumType *FromEnumType = FromType->getAs<EnumType>()) {
+ // C++0x 7.2p9: Note that this implicit enum to int conversion is not
+ // provided for a scoped enumeration.
+ if (FromEnumType->getDecl()->isScoped())
+ return false;
+
+ // We can perform an integral promotion to the underlying type of the enum,
+ // even if that's not the promoted type. Note that the check for promoting
+ // the underlying type is based on the type alone, and does not consider
+ // the bitfield-ness of the actual source expression.
+ if (FromEnumType->getDecl()->isFixed()) {
+ QualType Underlying = FromEnumType->getDecl()->getIntegerType();
+ return Context.hasSameUnqualifiedType(Underlying, ToType) ||
+ IsIntegralPromotion(nullptr, Underlying, ToType);
+ }
+
+ // We have already pre-calculated the promotion type, so this is trivial.
+ if (ToType->isIntegerType() &&
+ isCompleteType(From->getLocStart(), FromType))
+ return Context.hasSameUnqualifiedType(
+ ToType, FromEnumType->getDecl()->getPromotionType());
+ }
+
+ // C++0x [conv.prom]p2:
+ // A prvalue of type char16_t, char32_t, or wchar_t (3.9.1) can be converted
+ // to an rvalue a prvalue of the first of the following types that can
+ // represent all the values of its underlying type: int, unsigned int,
+ // long int, unsigned long int, long long int, or unsigned long long int.
+ // If none of the types in that list can represent all the values of its
+ // underlying type, an rvalue a prvalue of type char16_t, char32_t,
+ // or wchar_t can be converted to an rvalue a prvalue of its underlying
+ // type.
+ if (FromType->isAnyCharacterType() && !FromType->isCharType() &&
+ ToType->isIntegerType()) {
+ // Determine whether the type we're converting from is signed or
+ // unsigned.
+ bool FromIsSigned = FromType->isSignedIntegerType();
+ uint64_t FromSize = Context.getTypeSize(FromType);
+
+ // The types we'll try to promote to, in the appropriate
+ // order. Try each of these types.
+ QualType PromoteTypes[6] = {
+ Context.IntTy, Context.UnsignedIntTy,
+ Context.LongTy, Context.UnsignedLongTy ,
+ Context.LongLongTy, Context.UnsignedLongLongTy
+ };
+ for (int Idx = 0; Idx < 6; ++Idx) {
+ uint64_t ToSize = Context.getTypeSize(PromoteTypes[Idx]);
+ if (FromSize < ToSize ||
+ (FromSize == ToSize &&
+ FromIsSigned == PromoteTypes[Idx]->isSignedIntegerType())) {
+ // We found the type that we can promote to. If this is the
+ // type we wanted, we have a promotion. Otherwise, no
+ // promotion.
+ return Context.hasSameUnqualifiedType(ToType, PromoteTypes[Idx]);
+ }
+ }
+ }
+
+ // An rvalue for an integral bit-field (9.6) can be converted to an
+ // rvalue of type int if int can represent all the values of the
+ // bit-field; otherwise, it can be converted to unsigned int if
+ // unsigned int can represent all the values of the bit-field. If
+ // the bit-field is larger yet, no integral promotion applies to
+ // it. If the bit-field has an enumerated type, it is treated as any
+ // other value of that type for promotion purposes (C++ 4.5p3).
+ // FIXME: We should delay checking of bit-fields until we actually perform the
+ // conversion.
+ if (From) {
+ if (FieldDecl *MemberDecl = From->getSourceBitField()) {
+ llvm::APSInt BitWidth;
+ if (FromType->isIntegralType(Context) &&
+ MemberDecl->getBitWidth()->isIntegerConstantExpr(BitWidth, Context)) {
+ llvm::APSInt ToSize(BitWidth.getBitWidth(), BitWidth.isUnsigned());
+ ToSize = Context.getTypeSize(ToType);
+
+ // Are we promoting to an int from a bitfield that fits in an int?
+ if (BitWidth < ToSize ||
+ (FromType->isSignedIntegerType() && BitWidth <= ToSize)) {
+ return To->getKind() == BuiltinType::Int;
+ }
+
+ // Are we promoting to an unsigned int from an unsigned bitfield
+ // that fits into an unsigned int?
+ if (FromType->isUnsignedIntegerType() && BitWidth <= ToSize) {
+ return To->getKind() == BuiltinType::UInt;
+ }
+
+ return false;
+ }
+ }
+ }
+
+ // An rvalue of type bool can be converted to an rvalue of type int,
+ // with false becoming zero and true becoming one (C++ 4.5p4).
+ if (FromType->isBooleanType() && To->getKind() == BuiltinType::Int) {
+ return true;
+ }
+
+ return false;
+}
+
+/// IsFloatingPointPromotion - Determines whether the conversion from
+/// FromType to ToType is a floating point promotion (C++ 4.6). If so,
+/// returns true and sets PromotedType to the promoted type.
+bool Sema::IsFloatingPointPromotion(QualType FromType, QualType ToType) {
+ if (const BuiltinType *FromBuiltin = FromType->getAs<BuiltinType>())
+ if (const BuiltinType *ToBuiltin = ToType->getAs<BuiltinType>()) {
+ /// An rvalue of type float can be converted to an rvalue of type
+ /// double. (C++ 4.6p1).
+ if (FromBuiltin->getKind() == BuiltinType::Float &&
+ ToBuiltin->getKind() == BuiltinType::Double)
+ return true;
+
+ // C99 6.3.1.5p1:
+ // When a float is promoted to double or long double, or a
+ // double is promoted to long double [...].
+ if (!getLangOpts().CPlusPlus &&
+ (FromBuiltin->getKind() == BuiltinType::Float ||
+ FromBuiltin->getKind() == BuiltinType::Double) &&
+ (ToBuiltin->getKind() == BuiltinType::LongDouble))
+ return true;
+
+ // Half can be promoted to float.
+ if (!getLangOpts().NativeHalfType &&
+ FromBuiltin->getKind() == BuiltinType::Half &&
+ ToBuiltin->getKind() == BuiltinType::Float)
+ return true;
+ }
+
+ return false;
+}
+
+/// \brief Determine if a conversion is a complex promotion.
+///
+/// A complex promotion is defined as a complex -> complex conversion
+/// where the conversion between the underlying real types is a
+/// floating-point or integral promotion.
+bool Sema::IsComplexPromotion(QualType FromType, QualType ToType) {
+ const ComplexType *FromComplex = FromType->getAs<ComplexType>();
+ if (!FromComplex)
+ return false;
+
+ const ComplexType *ToComplex = ToType->getAs<ComplexType>();
+ if (!ToComplex)
+ return false;
+
+ return IsFloatingPointPromotion(FromComplex->getElementType(),
+ ToComplex->getElementType()) ||
+ IsIntegralPromotion(nullptr, FromComplex->getElementType(),
+ ToComplex->getElementType());
+}
+
+/// BuildSimilarlyQualifiedPointerType - In a pointer conversion from
+/// the pointer type FromPtr to a pointer to type ToPointee, with the
+/// same type qualifiers as FromPtr has on its pointee type. ToType,
+/// if non-empty, will be a pointer to ToType that may or may not have
+/// the right set of qualifiers on its pointee.
+///
+static QualType
+BuildSimilarlyQualifiedPointerType(const Type *FromPtr,
+ QualType ToPointee, QualType ToType,
+ ASTContext &Context,
+ bool StripObjCLifetime = false) {
+ assert((FromPtr->getTypeClass() == Type::Pointer ||
+ FromPtr->getTypeClass() == Type::ObjCObjectPointer) &&
+ "Invalid similarly-qualified pointer type");
+
+ /// Conversions to 'id' subsume cv-qualifier conversions.
+ if (ToType->isObjCIdType() || ToType->isObjCQualifiedIdType())
+ return ToType.getUnqualifiedType();
+
+ QualType CanonFromPointee
+ = Context.getCanonicalType(FromPtr->getPointeeType());
+ QualType CanonToPointee = Context.getCanonicalType(ToPointee);
+ Qualifiers Quals = CanonFromPointee.getQualifiers();
+
+ if (StripObjCLifetime)
+ Quals.removeObjCLifetime();
+
+ // Exact qualifier match -> return the pointer type we're converting to.
+ if (CanonToPointee.getLocalQualifiers() == Quals) {
+ // ToType is exactly what we need. Return it.
+ if (!ToType.isNull())
+ return ToType.getUnqualifiedType();
+
+ // Build a pointer to ToPointee. It has the right qualifiers
+ // already.
+ if (isa<ObjCObjectPointerType>(ToType))
+ return Context.getObjCObjectPointerType(ToPointee);
+ return Context.getPointerType(ToPointee);
+ }
+
+ // Just build a canonical type that has the right qualifiers.
+ QualType QualifiedCanonToPointee
+ = Context.getQualifiedType(CanonToPointee.getLocalUnqualifiedType(), Quals);
+
+ if (isa<ObjCObjectPointerType>(ToType))
+ return Context.getObjCObjectPointerType(QualifiedCanonToPointee);
+ return Context.getPointerType(QualifiedCanonToPointee);
+}
+
+static bool isNullPointerConstantForConversion(Expr *Expr,
+ bool InOverloadResolution,
+ ASTContext &Context) {
+ // Handle value-dependent integral null pointer constants correctly.
+ // http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#903
+ if (Expr->isValueDependent() && !Expr->isTypeDependent() &&
+ Expr->getType()->isIntegerType() && !Expr->getType()->isEnumeralType())
+ return !InOverloadResolution;
+
+ return Expr->isNullPointerConstant(Context,
+ InOverloadResolution? Expr::NPC_ValueDependentIsNotNull
+ : Expr::NPC_ValueDependentIsNull);
+}
+
+/// IsPointerConversion - Determines whether the conversion of the
+/// expression From, which has the (possibly adjusted) type FromType,
+/// can be converted to the type ToType via a pointer conversion (C++
+/// 4.10). If so, returns true and places the converted type (that
+/// might differ from ToType in its cv-qualifiers at some level) into
+/// ConvertedType.
+///
+/// This routine also supports conversions to and from block pointers
+/// and conversions with Objective-C's 'id', 'id<protocols...>', and
+/// pointers to interfaces. FIXME: Once we've determined the
+/// appropriate overloading rules for Objective-C, we may want to
+/// split the Objective-C checks into a different routine; however,
+/// GCC seems to consider all of these conversions to be pointer
+/// conversions, so for now they live here. IncompatibleObjC will be
+/// set if the conversion is an allowed Objective-C conversion that
+/// should result in a warning.
+bool Sema::IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
+ bool InOverloadResolution,
+ QualType& ConvertedType,
+ bool &IncompatibleObjC) {
+ IncompatibleObjC = false;
+ if (isObjCPointerConversion(FromType, ToType, ConvertedType,
+ IncompatibleObjC))
+ return true;
+
+ // Conversion from a null pointer constant to any Objective-C pointer type.
+ if (ToType->isObjCObjectPointerType() &&
+ isNullPointerConstantForConversion(From, InOverloadResolution, Context)) {
+ ConvertedType = ToType;
+ return true;
+ }
+
+ // Blocks: Block pointers can be converted to void*.
+ if (FromType->isBlockPointerType() && ToType->isPointerType() &&
+ ToType->getAs<PointerType>()->getPointeeType()->isVoidType()) {
+ ConvertedType = ToType;
+ return true;
+ }
+ // Blocks: A null pointer constant can be converted to a block
+ // pointer type.
+ if (ToType->isBlockPointerType() &&
+ isNullPointerConstantForConversion(From, InOverloadResolution, Context)) {
+ ConvertedType = ToType;
+ return true;
+ }
+
+ // If the left-hand-side is nullptr_t, the right side can be a null
+ // pointer constant.
+ if (ToType->isNullPtrType() &&
+ isNullPointerConstantForConversion(From, InOverloadResolution, Context)) {
+ ConvertedType = ToType;
+ return true;
+ }
+
+ const PointerType* ToTypePtr = ToType->getAs<PointerType>();
+ if (!ToTypePtr)
+ return false;
+
+ // A null pointer constant can be converted to a pointer type (C++ 4.10p1).
+ if (isNullPointerConstantForConversion(From, InOverloadResolution, Context)) {
+ ConvertedType = ToType;
+ return true;
+ }
+
+ // Beyond this point, both types need to be pointers
+ // , including objective-c pointers.
+ QualType ToPointeeType = ToTypePtr->getPointeeType();
+ if (FromType->isObjCObjectPointerType() && ToPointeeType->isVoidType() &&
+ !getLangOpts().ObjCAutoRefCount) {
+ ConvertedType = BuildSimilarlyQualifiedPointerType(
+ FromType->getAs<ObjCObjectPointerType>(),
+ ToPointeeType,
+ ToType, Context);
+ return true;
+ }
+ const PointerType *FromTypePtr = FromType->getAs<PointerType>();
+ if (!FromTypePtr)
+ return false;
+
+ QualType FromPointeeType = FromTypePtr->getPointeeType();
+
+ // If the unqualified pointee types are the same, this can't be a
+ // pointer conversion, so don't do all of the work below.
+ if (Context.hasSameUnqualifiedType(FromPointeeType, ToPointeeType))
+ return false;
+
+ // An rvalue of type "pointer to cv T," where T is an object type,
+ // can be converted to an rvalue of type "pointer to cv void" (C++
+ // 4.10p2).
+ if (FromPointeeType->isIncompleteOrObjectType() &&
+ ToPointeeType->isVoidType()) {
+ ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr,
+ ToPointeeType,
+ ToType, Context,
+ /*StripObjCLifetime=*/true);
+ return true;
+ }
+
+ // MSVC allows implicit function to void* type conversion.
+ if (getLangOpts().MSVCCompat && FromPointeeType->isFunctionType() &&
+ ToPointeeType->isVoidType()) {
+ ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr,
+ ToPointeeType,
+ ToType, Context);
+ return true;
+ }
+
+ // When we're overloading in C, we allow a special kind of pointer
+ // conversion for compatible-but-not-identical pointee types.
+ if (!getLangOpts().CPlusPlus &&
+ Context.typesAreCompatible(FromPointeeType, ToPointeeType)) {
+ ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr,
+ ToPointeeType,
+ ToType, Context);
+ return true;
+ }
+
+ // C++ [conv.ptr]p3:
+ //
+ // An rvalue of type "pointer to cv D," where D is a class type,
+ // can be converted to an rvalue of type "pointer to cv B," where
+ // B is a base class (clause 10) of D. If B is an inaccessible
+ // (clause 11) or ambiguous (10.2) base class of D, a program that
+ // necessitates this conversion is ill-formed. The result of the
+ // conversion is a pointer to the base class sub-object of the
+ // derived class object. The null pointer value is converted to
+ // the null pointer value of the destination type.
+ //
+ // Note that we do not check for ambiguity or inaccessibility
+ // here. That is handled by CheckPointerConversion.
+ if (getLangOpts().CPlusPlus &&
+ FromPointeeType->isRecordType() && ToPointeeType->isRecordType() &&
+ !Context.hasSameUnqualifiedType(FromPointeeType, ToPointeeType) &&
+ IsDerivedFrom(From->getLocStart(), FromPointeeType, ToPointeeType)) {
+ ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr,
+ ToPointeeType,
+ ToType, Context);
+ return true;
+ }
+
+ if (FromPointeeType->isVectorType() && ToPointeeType->isVectorType() &&
+ Context.areCompatibleVectorTypes(FromPointeeType, ToPointeeType)) {
+ ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr,
+ ToPointeeType,
+ ToType, Context);
+ return true;
+ }
+
+ return false;
+}
+
+/// \brief Adopt the given qualifiers for the given type.
+static QualType AdoptQualifiers(ASTContext &Context, QualType T, Qualifiers Qs){
+ Qualifiers TQs = T.getQualifiers();
+
+ // Check whether qualifiers already match.
+ if (TQs == Qs)
+ return T;
+
+ if (Qs.compatiblyIncludes(TQs))
+ return Context.getQualifiedType(T, Qs);
+
+ return Context.getQualifiedType(T.getUnqualifiedType(), Qs);
+}
+
+/// isObjCPointerConversion - Determines whether this is an
+/// Objective-C pointer conversion. Subroutine of IsPointerConversion,
+/// with the same arguments and return values.
+bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
+ QualType& ConvertedType,
+ bool &IncompatibleObjC) {
+ if (!getLangOpts().ObjC1)
+ return false;
+
+ // The set of qualifiers on the type we're converting from.
+ Qualifiers FromQualifiers = FromType.getQualifiers();
+
+ // First, we handle all conversions on ObjC object pointer types.
+ const ObjCObjectPointerType* ToObjCPtr =
+ ToType->getAs<ObjCObjectPointerType>();
+ const ObjCObjectPointerType *FromObjCPtr =
+ FromType->getAs<ObjCObjectPointerType>();
+
+ if (ToObjCPtr && FromObjCPtr) {
+ // If the pointee types are the same (ignoring qualifications),
+ // then this is not a pointer conversion.
+ if (Context.hasSameUnqualifiedType(ToObjCPtr->getPointeeType(),
+ FromObjCPtr->getPointeeType()))
+ return false;
+
+ // Conversion between Objective-C pointers.
+ if (Context.canAssignObjCInterfaces(ToObjCPtr, FromObjCPtr)) {
+ const ObjCInterfaceType* LHS = ToObjCPtr->getInterfaceType();
+ const ObjCInterfaceType* RHS = FromObjCPtr->getInterfaceType();
+ if (getLangOpts().CPlusPlus && LHS && RHS &&
+ !ToObjCPtr->getPointeeType().isAtLeastAsQualifiedAs(
+ FromObjCPtr->getPointeeType()))
+ return false;
+ ConvertedType = BuildSimilarlyQualifiedPointerType(FromObjCPtr,
+ ToObjCPtr->getPointeeType(),
+ ToType, Context);
+ ConvertedType = AdoptQualifiers(Context, ConvertedType, FromQualifiers);
+ return true;
+ }
+
+ if (Context.canAssignObjCInterfaces(FromObjCPtr, ToObjCPtr)) {
+ // Okay: this is some kind of implicit downcast of Objective-C
+ // interfaces, which is permitted. However, we're going to
+ // complain about it.
+ IncompatibleObjC = true;
+ ConvertedType = BuildSimilarlyQualifiedPointerType(FromObjCPtr,
+ ToObjCPtr->getPointeeType(),
+ ToType, Context);
+ ConvertedType = AdoptQualifiers(Context, ConvertedType, FromQualifiers);
+ return true;
+ }
+ }
+ // Beyond this point, both types need to be C pointers or block pointers.
+ QualType ToPointeeType;
+ if (const PointerType *ToCPtr = ToType->getAs<PointerType>())
+ ToPointeeType = ToCPtr->getPointeeType();
+ else if (const BlockPointerType *ToBlockPtr =
+ ToType->getAs<BlockPointerType>()) {
+ // Objective C++: We're able to convert from a pointer to any object
+ // to a block pointer type.
+ if (FromObjCPtr && FromObjCPtr->isObjCBuiltinType()) {
+ ConvertedType = AdoptQualifiers(Context, ToType, FromQualifiers);
+ return true;
+ }
+ ToPointeeType = ToBlockPtr->getPointeeType();
+ }
+ else if (FromType->getAs<BlockPointerType>() &&
+ ToObjCPtr && ToObjCPtr->isObjCBuiltinType()) {
+ // Objective C++: We're able to convert from a block pointer type to a
+ // pointer to any object.
+ ConvertedType = AdoptQualifiers(Context, ToType, FromQualifiers);
+ return true;
+ }
+ else
+ return false;
+
+ QualType FromPointeeType;
+ if (const PointerType *FromCPtr = FromType->getAs<PointerType>())
+ FromPointeeType = FromCPtr->getPointeeType();
+ else if (const BlockPointerType *FromBlockPtr =
+ FromType->getAs<BlockPointerType>())
+ FromPointeeType = FromBlockPtr->getPointeeType();
+ else
+ return false;
+
+ // If we have pointers to pointers, recursively check whether this
+ // is an Objective-C conversion.
+ if (FromPointeeType->isPointerType() && ToPointeeType->isPointerType() &&
+ isObjCPointerConversion(FromPointeeType, ToPointeeType, ConvertedType,
+ IncompatibleObjC)) {
+ // We always complain about this conversion.
+ IncompatibleObjC = true;
+ ConvertedType = Context.getPointerType(ConvertedType);
+ ConvertedType = AdoptQualifiers(Context, ConvertedType, FromQualifiers);
+ return true;
+ }
+ // Allow conversion of pointee being objective-c pointer to another one;
+ // as in I* to id.
+ if (FromPointeeType->getAs<ObjCObjectPointerType>() &&
+ ToPointeeType->getAs<ObjCObjectPointerType>() &&
+ isObjCPointerConversion(FromPointeeType, ToPointeeType, ConvertedType,
+ IncompatibleObjC)) {
+
+ ConvertedType = Context.getPointerType(ConvertedType);
+ ConvertedType = AdoptQualifiers(Context, ConvertedType, FromQualifiers);
+ return true;
+ }
+
+ // If we have pointers to functions or blocks, check whether the only
+ // differences in the argument and result types are in Objective-C
+ // pointer conversions. If so, we permit the conversion (but
+ // complain about it).
+ const FunctionProtoType *FromFunctionType
+ = FromPointeeType->getAs<FunctionProtoType>();
+ const FunctionProtoType *ToFunctionType
+ = ToPointeeType->getAs<FunctionProtoType>();
+ if (FromFunctionType && ToFunctionType) {
+ // If the function types are exactly the same, this isn't an
+ // Objective-C pointer conversion.
+ if (Context.getCanonicalType(FromPointeeType)
+ == Context.getCanonicalType(ToPointeeType))
+ return false;
+
+ // Perform the quick checks that will tell us whether these
+ // function types are obviously different.
+ if (FromFunctionType->getNumParams() != ToFunctionType->getNumParams() ||
+ FromFunctionType->isVariadic() != ToFunctionType->isVariadic() ||
+ FromFunctionType->getTypeQuals() != ToFunctionType->getTypeQuals())
+ return false;
+
+ bool HasObjCConversion = false;
+ if (Context.getCanonicalType(FromFunctionType->getReturnType()) ==
+ Context.getCanonicalType(ToFunctionType->getReturnType())) {
+ // Okay, the types match exactly. Nothing to do.
+ } else if (isObjCPointerConversion(FromFunctionType->getReturnType(),
+ ToFunctionType->getReturnType(),
+ ConvertedType, IncompatibleObjC)) {
+ // Okay, we have an Objective-C pointer conversion.
+ HasObjCConversion = true;
+ } else {
+ // Function types are too different. Abort.
+ return false;
+ }
+
+ // Check argument types.
+ for (unsigned ArgIdx = 0, NumArgs = FromFunctionType->getNumParams();
+ ArgIdx != NumArgs; ++ArgIdx) {
+ QualType FromArgType = FromFunctionType->getParamType(ArgIdx);
+ QualType ToArgType = ToFunctionType->getParamType(ArgIdx);
+ if (Context.getCanonicalType(FromArgType)
+ == Context.getCanonicalType(ToArgType)) {
+ // Okay, the types match exactly. Nothing to do.
+ } else if (isObjCPointerConversion(FromArgType, ToArgType,
+ ConvertedType, IncompatibleObjC)) {
+ // Okay, we have an Objective-C pointer conversion.
+ HasObjCConversion = true;
+ } else {
+ // Argument types are too different. Abort.
+ return false;
+ }
+ }
+
+ if (HasObjCConversion) {
+ // We had an Objective-C conversion. Allow this pointer
+ // conversion, but complain about it.
+ ConvertedType = AdoptQualifiers(Context, ToType, FromQualifiers);
+ IncompatibleObjC = true;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/// \brief Determine whether this is an Objective-C writeback conversion,
+/// used for parameter passing when performing automatic reference counting.
+///
+/// \param FromType The type we're converting form.
+///
+/// \param ToType The type we're converting to.
+///
+/// \param ConvertedType The type that will be produced after applying
+/// this conversion.
+bool Sema::isObjCWritebackConversion(QualType FromType, QualType ToType,
+ QualType &ConvertedType) {
+ if (!getLangOpts().ObjCAutoRefCount ||
+ Context.hasSameUnqualifiedType(FromType, ToType))
+ return false;
+
+ // Parameter must be a pointer to __autoreleasing (with no other qualifiers).
+ QualType ToPointee;
+ if (const PointerType *ToPointer = ToType->getAs<PointerType>())
+ ToPointee = ToPointer->getPointeeType();
+ else
+ return false;
+
+ Qualifiers ToQuals = ToPointee.getQualifiers();
+ if (!ToPointee->isObjCLifetimeType() ||
+ ToQuals.getObjCLifetime() != Qualifiers::OCL_Autoreleasing ||
+ !ToQuals.withoutObjCLifetime().empty())
+ return false;
+
+ // Argument must be a pointer to __strong to __weak.
+ QualType FromPointee;
+ if (const PointerType *FromPointer = FromType->getAs<PointerType>())
+ FromPointee = FromPointer->getPointeeType();
+ else
+ return false;
+
+ Qualifiers FromQuals = FromPointee.getQualifiers();
+ if (!FromPointee->isObjCLifetimeType() ||
+ (FromQuals.getObjCLifetime() != Qualifiers::OCL_Strong &&
+ FromQuals.getObjCLifetime() != Qualifiers::OCL_Weak))
+ return false;
+
+ // Make sure that we have compatible qualifiers.
+ FromQuals.setObjCLifetime(Qualifiers::OCL_Autoreleasing);
+ if (!ToQuals.compatiblyIncludes(FromQuals))
+ return false;
+
+ // Remove qualifiers from the pointee type we're converting from; they
+ // aren't used in the compatibility check belong, and we'll be adding back
+ // qualifiers (with __autoreleasing) if the compatibility check succeeds.
+ FromPointee = FromPointee.getUnqualifiedType();
+
+ // The unqualified form of the pointee types must be compatible.
+ ToPointee = ToPointee.getUnqualifiedType();
+ bool IncompatibleObjC;
+ if (Context.typesAreCompatible(FromPointee, ToPointee))
+ FromPointee = ToPointee;
+ else if (!isObjCPointerConversion(FromPointee, ToPointee, FromPointee,
+ IncompatibleObjC))
+ return false;
+
+ /// \brief Construct the type we're converting to, which is a pointer to
+ /// __autoreleasing pointee.
+ FromPointee = Context.getQualifiedType(FromPointee, FromQuals);
+ ConvertedType = Context.getPointerType(FromPointee);
+ return true;
+}
+
+bool Sema::IsBlockPointerConversion(QualType FromType, QualType ToType,
+ QualType& ConvertedType) {
+ QualType ToPointeeType;
+ if (const BlockPointerType *ToBlockPtr =
+ ToType->getAs<BlockPointerType>())
+ ToPointeeType = ToBlockPtr->getPointeeType();
+ else
+ return false;
+
+ QualType FromPointeeType;
+ if (const BlockPointerType *FromBlockPtr =
+ FromType->getAs<BlockPointerType>())
+ FromPointeeType = FromBlockPtr->getPointeeType();
+ else
+ return false;
+ // We have pointer to blocks, check whether the only
+ // differences in the argument and result types are in Objective-C
+ // pointer conversions. If so, we permit the conversion.
+
+ const FunctionProtoType *FromFunctionType
+ = FromPointeeType->getAs<FunctionProtoType>();
+ const FunctionProtoType *ToFunctionType
+ = ToPointeeType->getAs<FunctionProtoType>();
+
+ if (!FromFunctionType || !ToFunctionType)
+ return false;
+
+ if (Context.hasSameType(FromPointeeType, ToPointeeType))
+ return true;
+
+ // Perform the quick checks that will tell us whether these
+ // function types are obviously different.
+ if (FromFunctionType->getNumParams() != ToFunctionType->getNumParams() ||
+ FromFunctionType->isVariadic() != ToFunctionType->isVariadic())
+ return false;
+
+ FunctionType::ExtInfo FromEInfo = FromFunctionType->getExtInfo();
+ FunctionType::ExtInfo ToEInfo = ToFunctionType->getExtInfo();
+ if (FromEInfo != ToEInfo)
+ return false;
+
+ bool IncompatibleObjC = false;
+ if (Context.hasSameType(FromFunctionType->getReturnType(),
+ ToFunctionType->getReturnType())) {
+ // Okay, the types match exactly. Nothing to do.
+ } else {
+ QualType RHS = FromFunctionType->getReturnType();
+ QualType LHS = ToFunctionType->getReturnType();
+ if ((!getLangOpts().CPlusPlus || !RHS->isRecordType()) &&
+ !RHS.hasQualifiers() && LHS.hasQualifiers())
+ LHS = LHS.getUnqualifiedType();
+
+ if (Context.hasSameType(RHS,LHS)) {
+ // OK exact match.
+ } else if (isObjCPointerConversion(RHS, LHS,
+ ConvertedType, IncompatibleObjC)) {
+ if (IncompatibleObjC)
+ return false;
+ // Okay, we have an Objective-C pointer conversion.
+ }
+ else
+ return false;
+ }
+
+ // Check argument types.
+ for (unsigned ArgIdx = 0, NumArgs = FromFunctionType->getNumParams();
+ ArgIdx != NumArgs; ++ArgIdx) {
+ IncompatibleObjC = false;
+ QualType FromArgType = FromFunctionType->getParamType(ArgIdx);
+ QualType ToArgType = ToFunctionType->getParamType(ArgIdx);
+ if (Context.hasSameType(FromArgType, ToArgType)) {
+ // Okay, the types match exactly. Nothing to do.
+ } else if (isObjCPointerConversion(ToArgType, FromArgType,
+ ConvertedType, IncompatibleObjC)) {
+ if (IncompatibleObjC)
+ return false;
+ // Okay, we have an Objective-C pointer conversion.
+ } else
+ // Argument types are too different. Abort.
+ return false;
+ }
+ if (LangOpts.ObjCAutoRefCount &&
+ !Context.FunctionTypesMatchOnNSConsumedAttrs(FromFunctionType,
+ ToFunctionType))
+ return false;
+
+ ConvertedType = ToType;
+ return true;
+}
+
+enum {
+ ft_default,
+ ft_different_class,
+ ft_parameter_arity,
+ ft_parameter_mismatch,
+ ft_return_type,
+ ft_qualifer_mismatch
+};
+
+/// Attempts to get the FunctionProtoType from a Type. Handles
+/// MemberFunctionPointers properly.
+static const FunctionProtoType *tryGetFunctionProtoType(QualType FromType) {
+ if (auto *FPT = FromType->getAs<FunctionProtoType>())
+ return FPT;
+
+ if (auto *MPT = FromType->getAs<MemberPointerType>())
+ return MPT->getPointeeType()->getAs<FunctionProtoType>();
+
+ return nullptr;
+}
+
+/// HandleFunctionTypeMismatch - Gives diagnostic information for differeing
+/// function types. Catches different number of parameter, mismatch in
+/// parameter types, and different return types.
+void Sema::HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
+ QualType FromType, QualType ToType) {
+ // If either type is not valid, include no extra info.
+ if (FromType.isNull() || ToType.isNull()) {
+ PDiag << ft_default;
+ return;
+ }
+
+ // Get the function type from the pointers.
+ if (FromType->isMemberPointerType() && ToType->isMemberPointerType()) {
+ const MemberPointerType *FromMember = FromType->getAs<MemberPointerType>(),
+ *ToMember = ToType->getAs<MemberPointerType>();
+ if (!Context.hasSameType(FromMember->getClass(), ToMember->getClass())) {
+ PDiag << ft_different_class << QualType(ToMember->getClass(), 0)
+ << QualType(FromMember->getClass(), 0);
+ return;
+ }
+ FromType = FromMember->getPointeeType();
+ ToType = ToMember->getPointeeType();
+ }
+
+ if (FromType->isPointerType())
+ FromType = FromType->getPointeeType();
+ if (ToType->isPointerType())
+ ToType = ToType->getPointeeType();
+
+ // Remove references.
+ FromType = FromType.getNonReferenceType();
+ ToType = ToType.getNonReferenceType();
+
+ // Don't print extra info for non-specialized template functions.
+ if (FromType->isInstantiationDependentType() &&
+ !FromType->getAs<TemplateSpecializationType>()) {
+ PDiag << ft_default;
+ return;
+ }
+
+ // No extra info for same types.
+ if (Context.hasSameType(FromType, ToType)) {
+ PDiag << ft_default;
+ return;
+ }
+
+ const FunctionProtoType *FromFunction = tryGetFunctionProtoType(FromType),
+ *ToFunction = tryGetFunctionProtoType(ToType);
+
+ // Both types need to be function types.
+ if (!FromFunction || !ToFunction) {
+ PDiag << ft_default;
+ return;
+ }
+
+ if (FromFunction->getNumParams() != ToFunction->getNumParams()) {
+ PDiag << ft_parameter_arity << ToFunction->getNumParams()
+ << FromFunction->getNumParams();
+ return;
+ }
+
+ // Handle different parameter types.
+ unsigned ArgPos;
+ if (!FunctionParamTypesAreEqual(FromFunction, ToFunction, &ArgPos)) {
+ PDiag << ft_parameter_mismatch << ArgPos + 1
+ << ToFunction->getParamType(ArgPos)
+ << FromFunction->getParamType(ArgPos);
+ return;
+ }
+
+ // Handle different return type.
+ if (!Context.hasSameType(FromFunction->getReturnType(),
+ ToFunction->getReturnType())) {
+ PDiag << ft_return_type << ToFunction->getReturnType()
+ << FromFunction->getReturnType();
+ return;
+ }
+
+ unsigned FromQuals = FromFunction->getTypeQuals(),
+ ToQuals = ToFunction->getTypeQuals();
+ if (FromQuals != ToQuals) {
+ PDiag << ft_qualifer_mismatch << ToQuals << FromQuals;
+ return;
+ }
+
+ // Unable to find a difference, so add no extra info.
+ PDiag << ft_default;
+}
+
+/// FunctionParamTypesAreEqual - This routine checks two function proto types
+/// for equality of their argument types. Caller has already checked that
+/// they have same number of arguments. If the parameters are different,
+/// ArgPos will have the parameter index of the first different parameter.
+bool Sema::FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
+ const FunctionProtoType *NewType,
+ unsigned *ArgPos) {
+ for (FunctionProtoType::param_type_iterator O = OldType->param_type_begin(),
+ N = NewType->param_type_begin(),
+ E = OldType->param_type_end();
+ O && (O != E); ++O, ++N) {
+ if (!Context.hasSameType(O->getUnqualifiedType(),
+ N->getUnqualifiedType())) {
+ if (ArgPos)
+ *ArgPos = O - OldType->param_type_begin();
+ return false;
+ }
+ }
+ return true;
+}
+
+/// CheckPointerConversion - Check the pointer conversion from the
+/// expression From to the type ToType. This routine checks for
+/// ambiguous or inaccessible derived-to-base pointer
+/// conversions for which IsPointerConversion has already returned
+/// true. It returns true and produces a diagnostic if there was an
+/// error, or returns false otherwise.
+bool Sema::CheckPointerConversion(Expr *From, QualType ToType,
+ CastKind &Kind,
+ CXXCastPath& BasePath,
+ bool IgnoreBaseAccess) {
+ QualType FromType = From->getType();
+ bool IsCStyleOrFunctionalCast = IgnoreBaseAccess;
+
+ Kind = CK_BitCast;
+
+ if (!IsCStyleOrFunctionalCast && !FromType->isAnyPointerType() &&
+ From->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNotNull) ==
+ Expr::NPCK_ZeroExpression) {
+ if (Context.hasSameUnqualifiedType(From->getType(), Context.BoolTy))
+ DiagRuntimeBehavior(From->getExprLoc(), From,
+ PDiag(diag::warn_impcast_bool_to_null_pointer)
+ << ToType << From->getSourceRange());
+ else if (!isUnevaluatedContext())
+ Diag(From->getExprLoc(), diag::warn_non_literal_null_pointer)
+ << ToType << From->getSourceRange();
+ }
+ if (const PointerType *ToPtrType = ToType->getAs<PointerType>()) {
+ if (const PointerType *FromPtrType = FromType->getAs<PointerType>()) {
+ QualType FromPointeeType = FromPtrType->getPointeeType(),
+ ToPointeeType = ToPtrType->getPointeeType();
+
+ if (FromPointeeType->isRecordType() && ToPointeeType->isRecordType() &&
+ !Context.hasSameUnqualifiedType(FromPointeeType, ToPointeeType)) {
+ // We must have a derived-to-base conversion. Check an
+ // ambiguous or inaccessible conversion.
+ if (CheckDerivedToBaseConversion(FromPointeeType, ToPointeeType,
+ From->getExprLoc(),
+ From->getSourceRange(), &BasePath,
+ IgnoreBaseAccess))
+ return true;
+
+ // The conversion was successful.
+ Kind = CK_DerivedToBase;
+ }
+
+ if (!IsCStyleOrFunctionalCast && FromPointeeType->isFunctionType() &&
+ ToPointeeType->isVoidType()) {
+ assert(getLangOpts().MSVCCompat &&
+ "this should only be possible with MSVCCompat!");
+ Diag(From->getExprLoc(), diag::ext_ms_impcast_fn_obj)
+ << From->getSourceRange();
+ }
+ }
+ } else if (const ObjCObjectPointerType *ToPtrType =
+ ToType->getAs<ObjCObjectPointerType>()) {
+ if (const ObjCObjectPointerType *FromPtrType =
+ FromType->getAs<ObjCObjectPointerType>()) {
+ // Objective-C++ conversions are always okay.
+ // FIXME: We should have a different class of conversions for the
+ // Objective-C++ implicit conversions.
+ if (FromPtrType->isObjCBuiltinType() || ToPtrType->isObjCBuiltinType())
+ return false;
+ } else if (FromType->isBlockPointerType()) {
+ Kind = CK_BlockPointerToObjCPointerCast;
+ } else {
+ Kind = CK_CPointerToObjCPointerCast;
+ }
+ } else if (ToType->isBlockPointerType()) {
+ if (!FromType->isBlockPointerType())
+ Kind = CK_AnyPointerToBlockPointerCast;
+ }
+
+ // We shouldn't fall into this case unless it's valid for other
+ // reasons.
+ if (From->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull))
+ Kind = CK_NullToPointer;
+
+ return false;
+}
+
+/// IsMemberPointerConversion - Determines whether the conversion of the
+/// expression From, which has the (possibly adjusted) type FromType, can be
+/// converted to the type ToType via a member pointer conversion (C++ 4.11).
+/// If so, returns true and places the converted type (that might differ from
+/// ToType in its cv-qualifiers at some level) into ConvertedType.
+bool Sema::IsMemberPointerConversion(Expr *From, QualType FromType,
+ QualType ToType,
+ bool InOverloadResolution,
+ QualType &ConvertedType) {
+ const MemberPointerType *ToTypePtr = ToType->getAs<MemberPointerType>();
+ if (!ToTypePtr)
+ return false;
+
+ // A null pointer constant can be converted to a member pointer (C++ 4.11p1)
+ if (From->isNullPointerConstant(Context,
+ InOverloadResolution? Expr::NPC_ValueDependentIsNotNull
+ : Expr::NPC_ValueDependentIsNull)) {
+ ConvertedType = ToType;
+ return true;
+ }
+
+ // Otherwise, both types have to be member pointers.
+ const MemberPointerType *FromTypePtr = FromType->getAs<MemberPointerType>();
+ if (!FromTypePtr)
+ return false;
+
+ // A pointer to member of B can be converted to a pointer to member of D,
+ // where D is derived from B (C++ 4.11p2).
+ QualType FromClass(FromTypePtr->getClass(), 0);
+ QualType ToClass(ToTypePtr->getClass(), 0);
+
+ if (!Context.hasSameUnqualifiedType(FromClass, ToClass) &&
+ IsDerivedFrom(From->getLocStart(), ToClass, FromClass)) {
+ ConvertedType = Context.getMemberPointerType(FromTypePtr->getPointeeType(),
+ ToClass.getTypePtr());
+ return true;
+ }
+
+ return false;
+}
+
+/// CheckMemberPointerConversion - Check the member pointer conversion from the
+/// expression From to the type ToType. This routine checks for ambiguous or
+/// virtual or inaccessible base-to-derived member pointer conversions
+/// for which IsMemberPointerConversion has already returned true. It returns
+/// true and produces a diagnostic if there was an error, or returns false
+/// otherwise.
+bool Sema::CheckMemberPointerConversion(Expr *From, QualType ToType,
+ CastKind &Kind,
+ CXXCastPath &BasePath,
+ bool IgnoreBaseAccess) {
+ QualType FromType = From->getType();
+ const MemberPointerType *FromPtrType = FromType->getAs<MemberPointerType>();
+ if (!FromPtrType) {
+ // This must be a null pointer to member pointer conversion
+ assert(From->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNull) &&
+ "Expr must be null pointer constant!");
+ Kind = CK_NullToMemberPointer;
+ return false;
+ }
+
+ const MemberPointerType *ToPtrType = ToType->getAs<MemberPointerType>();
+ assert(ToPtrType && "No member pointer cast has a target type "
+ "that is not a member pointer.");
+
+ QualType FromClass = QualType(FromPtrType->getClass(), 0);
+ QualType ToClass = QualType(ToPtrType->getClass(), 0);
+
+ // FIXME: What about dependent types?
+ assert(FromClass->isRecordType() && "Pointer into non-class.");
+ assert(ToClass->isRecordType() && "Pointer into non-class.");
+
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/true);
+ bool DerivationOkay =
+ IsDerivedFrom(From->getLocStart(), ToClass, FromClass, Paths);
+ assert(DerivationOkay &&
+ "Should not have been called if derivation isn't OK.");
+ (void)DerivationOkay;
+
+ if (Paths.isAmbiguous(Context.getCanonicalType(FromClass).
+ getUnqualifiedType())) {
+ std::string PathDisplayStr = getAmbiguousPathsDisplayString(Paths);
+ Diag(From->getExprLoc(), diag::err_ambiguous_memptr_conv)
+ << 0 << FromClass << ToClass << PathDisplayStr << From->getSourceRange();
+ return true;
+ }
+
+ if (const RecordType *VBase = Paths.getDetectedVirtual()) {
+ Diag(From->getExprLoc(), diag::err_memptr_conv_via_virtual)
+ << FromClass << ToClass << QualType(VBase, 0)
+ << From->getSourceRange();
+ return true;
+ }
+
+ if (!IgnoreBaseAccess)
+ CheckBaseClassAccess(From->getExprLoc(), FromClass, ToClass,
+ Paths.front(),
+ diag::err_downcast_from_inaccessible_base);
+
+ // Must be a base to derived member conversion.
+ BuildBasePathArray(Paths, BasePath);
+ Kind = CK_BaseToDerivedMemberPointer;
+ return false;
+}
+
+/// Determine whether the lifetime conversion between the two given
+/// qualifiers sets is nontrivial.
+static bool isNonTrivialObjCLifetimeConversion(Qualifiers FromQuals,
+ Qualifiers ToQuals) {
+ // Converting anything to const __unsafe_unretained is trivial.
+ if (ToQuals.hasConst() &&
+ ToQuals.getObjCLifetime() == Qualifiers::OCL_ExplicitNone)
+ return false;
+
+ return true;
+}
+
+/// IsQualificationConversion - Determines whether the conversion from
+/// an rvalue of type FromType to ToType is a qualification conversion
+/// (C++ 4.4).
+///
+/// \param ObjCLifetimeConversion Output parameter that will be set to indicate
+/// when the qualification conversion involves a change in the Objective-C
+/// object lifetime.
+bool
+Sema::IsQualificationConversion(QualType FromType, QualType ToType,
+ bool CStyle, bool &ObjCLifetimeConversion) {
+ FromType = Context.getCanonicalType(FromType);
+ ToType = Context.getCanonicalType(ToType);
+ ObjCLifetimeConversion = false;
+
+ // If FromType and ToType are the same type, this is not a
+ // qualification conversion.
+ if (FromType.getUnqualifiedType() == ToType.getUnqualifiedType())
+ return false;
+
+ // (C++ 4.4p4):
+ // A conversion can add cv-qualifiers at levels other than the first
+ // in multi-level pointers, subject to the following rules: [...]
+ bool PreviousToQualsIncludeConst = true;
+ bool UnwrappedAnyPointer = false;
+ while (Context.UnwrapSimilarPointerTypes(FromType, ToType)) {
+ // Within each iteration of the loop, we check the qualifiers to
+ // determine if this still looks like a qualification
+ // conversion. Then, if all is well, we unwrap one more level of
+ // pointers or pointers-to-members and do it all again
+ // until there are no more pointers or pointers-to-members left to
+ // unwrap.
+ UnwrappedAnyPointer = true;
+
+ Qualifiers FromQuals = FromType.getQualifiers();
+ Qualifiers ToQuals = ToType.getQualifiers();
+
+ // Objective-C ARC:
+ // Check Objective-C lifetime conversions.
+ if (FromQuals.getObjCLifetime() != ToQuals.getObjCLifetime() &&
+ UnwrappedAnyPointer) {
+ if (ToQuals.compatiblyIncludesObjCLifetime(FromQuals)) {
+ if (isNonTrivialObjCLifetimeConversion(FromQuals, ToQuals))
+ ObjCLifetimeConversion = true;
+ FromQuals.removeObjCLifetime();
+ ToQuals.removeObjCLifetime();
+ } else {
+ // Qualification conversions cannot cast between different
+ // Objective-C lifetime qualifiers.
+ return false;
+ }
+ }
+
+ // Allow addition/removal of GC attributes but not changing GC attributes.
+ if (FromQuals.getObjCGCAttr() != ToQuals.getObjCGCAttr() &&
+ (!FromQuals.hasObjCGCAttr() || !ToQuals.hasObjCGCAttr())) {
+ FromQuals.removeObjCGCAttr();
+ ToQuals.removeObjCGCAttr();
+ }
+
+ // -- for every j > 0, if const is in cv 1,j then const is in cv
+ // 2,j, and similarly for volatile.
+ if (!CStyle && !ToQuals.compatiblyIncludes(FromQuals))
+ return false;
+
+ // -- if the cv 1,j and cv 2,j are different, then const is in
+ // every cv for 0 < k < j.
+ if (!CStyle && FromQuals.getCVRQualifiers() != ToQuals.getCVRQualifiers()
+ && !PreviousToQualsIncludeConst)
+ return false;
+
+ // Keep track of whether all prior cv-qualifiers in the "to" type
+ // include const.
+ PreviousToQualsIncludeConst
+ = PreviousToQualsIncludeConst && ToQuals.hasConst();
+ }
+
+ // We are left with FromType and ToType being the pointee types
+ // after unwrapping the original FromType and ToType the same number
+ // of types. If we unwrapped any pointers, and if FromType and
+ // ToType have the same unqualified type (since we checked
+ // qualifiers above), then this is a qualification conversion.
+ return UnwrappedAnyPointer && Context.hasSameUnqualifiedType(FromType,ToType);
+}
+
+/// \brief - Determine whether this is a conversion from a scalar type to an
+/// atomic type.
+///
+/// If successful, updates \c SCS's second and third steps in the conversion
+/// sequence to finish the conversion.
+static bool tryAtomicConversion(Sema &S, Expr *From, QualType ToType,
+ bool InOverloadResolution,
+ StandardConversionSequence &SCS,
+ bool CStyle) {
+ const AtomicType *ToAtomic = ToType->getAs<AtomicType>();
+ if (!ToAtomic)
+ return false;
+
+ StandardConversionSequence InnerSCS;
+ if (!IsStandardConversion(S, From, ToAtomic->getValueType(),
+ InOverloadResolution, InnerSCS,
+ CStyle, /*AllowObjCWritebackConversion=*/false))
+ return false;
+
+ SCS.Second = InnerSCS.Second;
+ SCS.setToType(1, InnerSCS.getToType(1));
+ SCS.Third = InnerSCS.Third;
+ SCS.QualificationIncludesObjCLifetime
+ = InnerSCS.QualificationIncludesObjCLifetime;
+ SCS.setToType(2, InnerSCS.getToType(2));
+ return true;
+}
+
+static bool isFirstArgumentCompatibleWithType(ASTContext &Context,
+ CXXConstructorDecl *Constructor,
+ QualType Type) {
+ const FunctionProtoType *CtorType =
+ Constructor->getType()->getAs<FunctionProtoType>();
+ if (CtorType->getNumParams() > 0) {
+ QualType FirstArg = CtorType->getParamType(0);
+ if (Context.hasSameUnqualifiedType(Type, FirstArg.getNonReferenceType()))
+ return true;
+ }
+ return false;
+}
+
+static OverloadingResult
+IsInitializerListConstructorConversion(Sema &S, Expr *From, QualType ToType,
+ CXXRecordDecl *To,
+ UserDefinedConversionSequence &User,
+ OverloadCandidateSet &CandidateSet,
+ bool AllowExplicit) {
+ DeclContext::lookup_result R = S.LookupConstructors(To);
+ for (DeclContext::lookup_iterator Con = R.begin(), ConEnd = R.end();
+ Con != ConEnd; ++Con) {
+ NamedDecl *D = *Con;
+ DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess());
+
+ // Find the constructor (which may be a template).
+ CXXConstructorDecl *Constructor = nullptr;
+ FunctionTemplateDecl *ConstructorTmpl
+ = dyn_cast<FunctionTemplateDecl>(D);
+ if (ConstructorTmpl)
+ Constructor
+ = cast<CXXConstructorDecl>(ConstructorTmpl->getTemplatedDecl());
+ else
+ Constructor = cast<CXXConstructorDecl>(D);
+
+ bool Usable = !Constructor->isInvalidDecl() &&
+ S.isInitListConstructor(Constructor) &&
+ (AllowExplicit || !Constructor->isExplicit());
+ if (Usable) {
+ // If the first argument is (a reference to) the target type,
+ // suppress conversions.
+ bool SuppressUserConversions =
+ isFirstArgumentCompatibleWithType(S.Context, Constructor, ToType);
+ if (ConstructorTmpl)
+ S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl,
+ /*ExplicitArgs*/ nullptr,
+ From, CandidateSet,
+ SuppressUserConversions);
+ else
+ S.AddOverloadCandidate(Constructor, FoundDecl,
+ From, CandidateSet,
+ SuppressUserConversions);
+ }
+ }
+
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+
+ OverloadCandidateSet::iterator Best;
+ switch (auto Result =
+ CandidateSet.BestViableFunction(S, From->getLocStart(),
+ Best, true)) {
+ case OR_Deleted:
+ case OR_Success: {
+ // Record the standard conversion we used and the conversion function.
+ CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(Best->Function);
+ QualType ThisType = Constructor->getThisType(S.Context);
+ // Initializer lists don't have conversions as such.
+ User.Before.setAsIdentityConversion();
+ User.HadMultipleCandidates = HadMultipleCandidates;
+ User.ConversionFunction = Constructor;
+ User.FoundConversionFunction = Best->FoundDecl;
+ User.After.setAsIdentityConversion();
+ User.After.setFromType(ThisType->getAs<PointerType>()->getPointeeType());
+ User.After.setAllToTypes(ToType);
+ return Result;
+ }
+
+ case OR_No_Viable_Function:
+ return OR_No_Viable_Function;
+ case OR_Ambiguous:
+ return OR_Ambiguous;
+ }
+
+ llvm_unreachable("Invalid OverloadResult!");
+}
+
+/// Determines whether there is a user-defined conversion sequence
+/// (C++ [over.ics.user]) that converts expression From to the type
+/// ToType. If such a conversion exists, User will contain the
+/// user-defined conversion sequence that performs such a conversion
+/// and this routine will return true. Otherwise, this routine returns
+/// false and User is unspecified.
+///
+/// \param AllowExplicit true if the conversion should consider C++0x
+/// "explicit" conversion functions as well as non-explicit conversion
+/// functions (C++0x [class.conv.fct]p2).
+///
+/// \param AllowObjCConversionOnExplicit true if the conversion should
+/// allow an extra Objective-C pointer conversion on uses of explicit
+/// constructors. Requires \c AllowExplicit to also be set.
+static OverloadingResult
+IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
+ UserDefinedConversionSequence &User,
+ OverloadCandidateSet &CandidateSet,
+ bool AllowExplicit,
+ bool AllowObjCConversionOnExplicit) {
+ assert(AllowExplicit || !AllowObjCConversionOnExplicit);
+
+ // Whether we will only visit constructors.
+ bool ConstructorsOnly = false;
+
+ // If the type we are conversion to is a class type, enumerate its
+ // constructors.
+ if (const RecordType *ToRecordType = ToType->getAs<RecordType>()) {
+ // C++ [over.match.ctor]p1:
+ // When objects of class type are direct-initialized (8.5), or
+ // copy-initialized from an expression of the same or a
+ // derived class type (8.5), overload resolution selects the
+ // constructor. [...] For copy-initialization, the candidate
+ // functions are all the converting constructors (12.3.1) of
+ // that class. The argument list is the expression-list within
+ // the parentheses of the initializer.
+ if (S.Context.hasSameUnqualifiedType(ToType, From->getType()) ||
+ (From->getType()->getAs<RecordType>() &&
+ S.IsDerivedFrom(From->getLocStart(), From->getType(), ToType)))
+ ConstructorsOnly = true;
+
+ if (!S.isCompleteType(From->getExprLoc(), ToType)) {
+ // We're not going to find any constructors.
+ } else if (CXXRecordDecl *ToRecordDecl
+ = dyn_cast<CXXRecordDecl>(ToRecordType->getDecl())) {
+
+ Expr **Args = &From;
+ unsigned NumArgs = 1;
+ bool ListInitializing = false;
+ if (InitListExpr *InitList = dyn_cast<InitListExpr>(From)) {
+ // But first, see if there is an init-list-constructor that will work.
+ OverloadingResult Result = IsInitializerListConstructorConversion(
+ S, From, ToType, ToRecordDecl, User, CandidateSet, AllowExplicit);
+ if (Result != OR_No_Viable_Function)
+ return Result;
+ // Never mind.
+ CandidateSet.clear();
+
+ // If we're list-initializing, we pass the individual elements as
+ // arguments, not the entire list.
+ Args = InitList->getInits();
+ NumArgs = InitList->getNumInits();
+ ListInitializing = true;
+ }
+
+ DeclContext::lookup_result R = S.LookupConstructors(ToRecordDecl);
+ for (DeclContext::lookup_iterator Con = R.begin(), ConEnd = R.end();
+ Con != ConEnd; ++Con) {
+ NamedDecl *D = *Con;
+ DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess());
+
+ // Find the constructor (which may be a template).
+ CXXConstructorDecl *Constructor = nullptr;
+ FunctionTemplateDecl *ConstructorTmpl
+ = dyn_cast<FunctionTemplateDecl>(D);
+ if (ConstructorTmpl)
+ Constructor
+ = cast<CXXConstructorDecl>(ConstructorTmpl->getTemplatedDecl());
+ else
+ Constructor = cast<CXXConstructorDecl>(D);
+
+ bool Usable = !Constructor->isInvalidDecl();
+ if (ListInitializing)
+ Usable = Usable && (AllowExplicit || !Constructor->isExplicit());
+ else
+ Usable = Usable &&Constructor->isConvertingConstructor(AllowExplicit);
+ if (Usable) {
+ bool SuppressUserConversions = !ConstructorsOnly;
+ if (SuppressUserConversions && ListInitializing) {
+ SuppressUserConversions = false;
+ if (NumArgs == 1) {
+ // If the first argument is (a reference to) the target type,
+ // suppress conversions.
+ SuppressUserConversions = isFirstArgumentCompatibleWithType(
+ S.Context, Constructor, ToType);
+ }
+ }
+ if (ConstructorTmpl)
+ S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl,
+ /*ExplicitArgs*/ nullptr,
+ llvm::makeArrayRef(Args, NumArgs),
+ CandidateSet, SuppressUserConversions);
+ else
+ // Allow one user-defined conversion when user specifies a
+ // From->ToType conversion via an static cast (c-style, etc).
+ S.AddOverloadCandidate(Constructor, FoundDecl,
+ llvm::makeArrayRef(Args, NumArgs),
+ CandidateSet, SuppressUserConversions);
+ }
+ }
+ }
+ }
+
+ // Enumerate conversion functions, if we're allowed to.
+ if (ConstructorsOnly || isa<InitListExpr>(From)) {
+ } else if (!S.isCompleteType(From->getLocStart(), From->getType())) {
+ // No conversion functions from incomplete types.
+ } else if (const RecordType *FromRecordType
+ = From->getType()->getAs<RecordType>()) {
+ if (CXXRecordDecl *FromRecordDecl
+ = dyn_cast<CXXRecordDecl>(FromRecordType->getDecl())) {
+ // Add all of the conversion functions as candidates.
+ const auto &Conversions = FromRecordDecl->getVisibleConversionFunctions();
+ for (auto I = Conversions.begin(), E = Conversions.end(); I != E; ++I) {
+ DeclAccessPair FoundDecl = I.getPair();
+ NamedDecl *D = FoundDecl.getDecl();
+ CXXRecordDecl *ActingContext = cast<CXXRecordDecl>(D->getDeclContext());
+ if (isa<UsingShadowDecl>(D))
+ D = cast<UsingShadowDecl>(D)->getTargetDecl();
+
+ CXXConversionDecl *Conv;
+ FunctionTemplateDecl *ConvTemplate;
+ if ((ConvTemplate = dyn_cast<FunctionTemplateDecl>(D)))
+ Conv = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl());
+ else
+ Conv = cast<CXXConversionDecl>(D);
+
+ if (AllowExplicit || !Conv->isExplicit()) {
+ if (ConvTemplate)
+ S.AddTemplateConversionCandidate(ConvTemplate, FoundDecl,
+ ActingContext, From, ToType,
+ CandidateSet,
+ AllowObjCConversionOnExplicit);
+ else
+ S.AddConversionCandidate(Conv, FoundDecl, ActingContext,
+ From, ToType, CandidateSet,
+ AllowObjCConversionOnExplicit);
+ }
+ }
+ }
+ }
+
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+
+ OverloadCandidateSet::iterator Best;
+ switch (auto Result = CandidateSet.BestViableFunction(S, From->getLocStart(),
+ Best, true)) {
+ case OR_Success:
+ case OR_Deleted:
+ // Record the standard conversion we used and the conversion function.
+ if (CXXConstructorDecl *Constructor
+ = dyn_cast<CXXConstructorDecl>(Best->Function)) {
+ // C++ [over.ics.user]p1:
+ // If the user-defined conversion is specified by a
+ // constructor (12.3.1), the initial standard conversion
+ // sequence converts the source type to the type required by
+ // the argument of the constructor.
+ //
+ QualType ThisType = Constructor->getThisType(S.Context);
+ if (isa<InitListExpr>(From)) {
+ // Initializer lists don't have conversions as such.
+ User.Before.setAsIdentityConversion();
+ } else {
+ if (Best->Conversions[0].isEllipsis())
+ User.EllipsisConversion = true;
+ else {
+ User.Before = Best->Conversions[0].Standard;
+ User.EllipsisConversion = false;
+ }
+ }
+ User.HadMultipleCandidates = HadMultipleCandidates;
+ User.ConversionFunction = Constructor;
+ User.FoundConversionFunction = Best->FoundDecl;
+ User.After.setAsIdentityConversion();
+ User.After.setFromType(ThisType->getAs<PointerType>()->getPointeeType());
+ User.After.setAllToTypes(ToType);
+ return Result;
+ }
+ if (CXXConversionDecl *Conversion
+ = dyn_cast<CXXConversionDecl>(Best->Function)) {
+ // C++ [over.ics.user]p1:
+ //
+ // [...] If the user-defined conversion is specified by a
+ // conversion function (12.3.2), the initial standard
+ // conversion sequence converts the source type to the
+ // implicit object parameter of the conversion function.
+ User.Before = Best->Conversions[0].Standard;
+ User.HadMultipleCandidates = HadMultipleCandidates;
+ User.ConversionFunction = Conversion;
+ User.FoundConversionFunction = Best->FoundDecl;
+ User.EllipsisConversion = false;
+
+ // C++ [over.ics.user]p2:
+ // The second standard conversion sequence converts the
+ // result of the user-defined conversion to the target type
+ // for the sequence. Since an implicit conversion sequence
+ // is an initialization, the special rules for
+ // initialization by user-defined conversion apply when
+ // selecting the best user-defined conversion for a
+ // user-defined conversion sequence (see 13.3.3 and
+ // 13.3.3.1).
+ User.After = Best->FinalConversion;
+ return Result;
+ }
+ llvm_unreachable("Not a constructor or conversion function?");
+
+ case OR_No_Viable_Function:
+ return OR_No_Viable_Function;
+
+ case OR_Ambiguous:
+ return OR_Ambiguous;
+ }
+
+ llvm_unreachable("Invalid OverloadResult!");
+}
+
+bool
+Sema::DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType) {
+ ImplicitConversionSequence ICS;
+ OverloadCandidateSet CandidateSet(From->getExprLoc(),
+ OverloadCandidateSet::CSK_Normal);
+ OverloadingResult OvResult =
+ IsUserDefinedConversion(*this, From, ToType, ICS.UserDefined,
+ CandidateSet, false, false);
+ if (OvResult == OR_Ambiguous)
+ Diag(From->getLocStart(), diag::err_typecheck_ambiguous_condition)
+ << From->getType() << ToType << From->getSourceRange();
+ else if (OvResult == OR_No_Viable_Function && !CandidateSet.empty()) {
+ if (!RequireCompleteType(From->getLocStart(), ToType,
+ diag::err_typecheck_nonviable_condition_incomplete,
+ From->getType(), From->getSourceRange()))
+ Diag(From->getLocStart(), diag::err_typecheck_nonviable_condition)
+ << false << From->getType() << From->getSourceRange() << ToType;
+ } else
+ return false;
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, From);
+ return true;
+}
+
+/// \brief Compare the user-defined conversion functions or constructors
+/// of two user-defined conversion sequences to determine whether any ordering
+/// is possible.
+static ImplicitConversionSequence::CompareKind
+compareConversionFunctions(Sema &S, FunctionDecl *Function1,
+ FunctionDecl *Function2) {
+ if (!S.getLangOpts().ObjC1 || !S.getLangOpts().CPlusPlus11)
+ return ImplicitConversionSequence::Indistinguishable;
+
+ // Objective-C++:
+ // If both conversion functions are implicitly-declared conversions from
+ // a lambda closure type to a function pointer and a block pointer,
+ // respectively, always prefer the conversion to a function pointer,
+ // because the function pointer is more lightweight and is more likely
+ // to keep code working.
+ CXXConversionDecl *Conv1 = dyn_cast_or_null<CXXConversionDecl>(Function1);
+ if (!Conv1)
+ return ImplicitConversionSequence::Indistinguishable;
+
+ CXXConversionDecl *Conv2 = dyn_cast<CXXConversionDecl>(Function2);
+ if (!Conv2)
+ return ImplicitConversionSequence::Indistinguishable;
+
+ if (Conv1->getParent()->isLambda() && Conv2->getParent()->isLambda()) {
+ bool Block1 = Conv1->getConversionType()->isBlockPointerType();
+ bool Block2 = Conv2->getConversionType()->isBlockPointerType();
+ if (Block1 != Block2)
+ return Block1 ? ImplicitConversionSequence::Worse
+ : ImplicitConversionSequence::Better;
+ }
+
+ return ImplicitConversionSequence::Indistinguishable;
+}
+
+static bool hasDeprecatedStringLiteralToCharPtrConversion(
+ const ImplicitConversionSequence &ICS) {
+ return (ICS.isStandard() && ICS.Standard.DeprecatedStringLiteralToCharPtr) ||
+ (ICS.isUserDefined() &&
+ ICS.UserDefined.Before.DeprecatedStringLiteralToCharPtr);
+}
+
+/// CompareImplicitConversionSequences - Compare two implicit
+/// conversion sequences to determine whether one is better than the
+/// other or if they are indistinguishable (C++ 13.3.3.2).
+static ImplicitConversionSequence::CompareKind
+CompareImplicitConversionSequences(Sema &S, SourceLocation Loc,
+ const ImplicitConversionSequence& ICS1,
+ const ImplicitConversionSequence& ICS2)
+{
+ // (C++ 13.3.3.2p2): When comparing the basic forms of implicit
+ // conversion sequences (as defined in 13.3.3.1)
+ // -- a standard conversion sequence (13.3.3.1.1) is a better
+ // conversion sequence than a user-defined conversion sequence or
+ // an ellipsis conversion sequence, and
+ // -- a user-defined conversion sequence (13.3.3.1.2) is a better
+ // conversion sequence than an ellipsis conversion sequence
+ // (13.3.3.1.3).
+ //
+ // C++0x [over.best.ics]p10:
+ // For the purpose of ranking implicit conversion sequences as
+ // described in 13.3.3.2, the ambiguous conversion sequence is
+ // treated as a user-defined sequence that is indistinguishable
+ // from any other user-defined conversion sequence.
+
+ // String literal to 'char *' conversion has been deprecated in C++03. It has
+ // been removed from C++11. We still accept this conversion, if it happens at
+ // the best viable function. Otherwise, this conversion is considered worse
+ // than ellipsis conversion. Consider this as an extension; this is not in the
+ // standard. For example:
+ //
+ // int &f(...); // #1
+ // void f(char*); // #2
+ // void g() { int &r = f("foo"); }
+ //
+ // In C++03, we pick #2 as the best viable function.
+ // In C++11, we pick #1 as the best viable function, because ellipsis
+ // conversion is better than string-literal to char* conversion (since there
+ // is no such conversion in C++11). If there was no #1 at all or #1 couldn't
+ // convert arguments, #2 would be the best viable function in C++11.
+ // If the best viable function has this conversion, a warning will be issued
+ // in C++03, or an ExtWarn (+SFINAE failure) will be issued in C++11.
+
+ if (S.getLangOpts().CPlusPlus11 && !S.getLangOpts().WritableStrings &&
+ hasDeprecatedStringLiteralToCharPtrConversion(ICS1) !=
+ hasDeprecatedStringLiteralToCharPtrConversion(ICS2))
+ return hasDeprecatedStringLiteralToCharPtrConversion(ICS1)
+ ? ImplicitConversionSequence::Worse
+ : ImplicitConversionSequence::Better;
+
+ if (ICS1.getKindRank() < ICS2.getKindRank())
+ return ImplicitConversionSequence::Better;
+ if (ICS2.getKindRank() < ICS1.getKindRank())
+ return ImplicitConversionSequence::Worse;
+
+ // The following checks require both conversion sequences to be of
+ // the same kind.
+ if (ICS1.getKind() != ICS2.getKind())
+ return ImplicitConversionSequence::Indistinguishable;
+
+ ImplicitConversionSequence::CompareKind Result =
+ ImplicitConversionSequence::Indistinguishable;
+
+ // Two implicit conversion sequences of the same form are
+ // indistinguishable conversion sequences unless one of the
+ // following rules apply: (C++ 13.3.3.2p3):
+
+ // List-initialization sequence L1 is a better conversion sequence than
+ // list-initialization sequence L2 if:
+ // - L1 converts to std::initializer_list<X> for some X and L2 does not, or,
+ // if not that,
+ // - L1 converts to type "array of N1 T", L2 converts to type "array of N2 T",
+ // and N1 is smaller than N2.,
+ // even if one of the other rules in this paragraph would otherwise apply.
+ if (!ICS1.isBad()) {
+ if (ICS1.isStdInitializerListElement() &&
+ !ICS2.isStdInitializerListElement())
+ return ImplicitConversionSequence::Better;
+ if (!ICS1.isStdInitializerListElement() &&
+ ICS2.isStdInitializerListElement())
+ return ImplicitConversionSequence::Worse;
+ }
+
+ if (ICS1.isStandard())
+ // Standard conversion sequence S1 is a better conversion sequence than
+ // standard conversion sequence S2 if [...]
+ Result = CompareStandardConversionSequences(S, Loc,
+ ICS1.Standard, ICS2.Standard);
+ else if (ICS1.isUserDefined()) {
+ // User-defined conversion sequence U1 is a better conversion
+ // sequence than another user-defined conversion sequence U2 if
+ // they contain the same user-defined conversion function or
+ // constructor and if the second standard conversion sequence of
+ // U1 is better than the second standard conversion sequence of
+ // U2 (C++ 13.3.3.2p3).
+ if (ICS1.UserDefined.ConversionFunction ==
+ ICS2.UserDefined.ConversionFunction)
+ Result = CompareStandardConversionSequences(S, Loc,
+ ICS1.UserDefined.After,
+ ICS2.UserDefined.After);
+ else
+ Result = compareConversionFunctions(S,
+ ICS1.UserDefined.ConversionFunction,
+ ICS2.UserDefined.ConversionFunction);
+ }
+
+ return Result;
+}
+
+static bool hasSimilarType(ASTContext &Context, QualType T1, QualType T2) {
+ while (Context.UnwrapSimilarPointerTypes(T1, T2)) {
+ Qualifiers Quals;
+ T1 = Context.getUnqualifiedArrayType(T1, Quals);
+ T2 = Context.getUnqualifiedArrayType(T2, Quals);
+ }
+
+ return Context.hasSameUnqualifiedType(T1, T2);
+}
+
+// Per 13.3.3.2p3, compare the given standard conversion sequences to
+// determine if one is a proper subset of the other.
+static ImplicitConversionSequence::CompareKind
+compareStandardConversionSubsets(ASTContext &Context,
+ const StandardConversionSequence& SCS1,
+ const StandardConversionSequence& SCS2) {
+ ImplicitConversionSequence::CompareKind Result
+ = ImplicitConversionSequence::Indistinguishable;
+
+ // the identity conversion sequence is considered to be a subsequence of
+ // any non-identity conversion sequence
+ if (SCS1.isIdentityConversion() && !SCS2.isIdentityConversion())
+ return ImplicitConversionSequence::Better;
+ else if (!SCS1.isIdentityConversion() && SCS2.isIdentityConversion())
+ return ImplicitConversionSequence::Worse;
+
+ if (SCS1.Second != SCS2.Second) {
+ if (SCS1.Second == ICK_Identity)
+ Result = ImplicitConversionSequence::Better;
+ else if (SCS2.Second == ICK_Identity)
+ Result = ImplicitConversionSequence::Worse;
+ else
+ return ImplicitConversionSequence::Indistinguishable;
+ } else if (!hasSimilarType(Context, SCS1.getToType(1), SCS2.getToType(1)))
+ return ImplicitConversionSequence::Indistinguishable;
+
+ if (SCS1.Third == SCS2.Third) {
+ return Context.hasSameType(SCS1.getToType(2), SCS2.getToType(2))? Result
+ : ImplicitConversionSequence::Indistinguishable;
+ }
+
+ if (SCS1.Third == ICK_Identity)
+ return Result == ImplicitConversionSequence::Worse
+ ? ImplicitConversionSequence::Indistinguishable
+ : ImplicitConversionSequence::Better;
+
+ if (SCS2.Third == ICK_Identity)
+ return Result == ImplicitConversionSequence::Better
+ ? ImplicitConversionSequence::Indistinguishable
+ : ImplicitConversionSequence::Worse;
+
+ return ImplicitConversionSequence::Indistinguishable;
+}
+
+/// \brief Determine whether one of the given reference bindings is better
+/// than the other based on what kind of bindings they are.
+static bool
+isBetterReferenceBindingKind(const StandardConversionSequence &SCS1,
+ const StandardConversionSequence &SCS2) {
+ // C++0x [over.ics.rank]p3b4:
+ // -- S1 and S2 are reference bindings (8.5.3) and neither refers to an
+ // implicit object parameter of a non-static member function declared
+ // without a ref-qualifier, and *either* S1 binds an rvalue reference
+ // to an rvalue and S2 binds an lvalue reference *or S1 binds an
+ // lvalue reference to a function lvalue and S2 binds an rvalue
+ // reference*.
+ //
+ // FIXME: Rvalue references. We're going rogue with the above edits,
+ // because the semantics in the current C++0x working paper (N3225 at the
+ // time of this writing) break the standard definition of std::forward
+ // and std::reference_wrapper when dealing with references to functions.
+ // Proposed wording changes submitted to CWG for consideration.
+ if (SCS1.BindsImplicitObjectArgumentWithoutRefQualifier ||
+ SCS2.BindsImplicitObjectArgumentWithoutRefQualifier)
+ return false;
+
+ return (!SCS1.IsLvalueReference && SCS1.BindsToRvalue &&
+ SCS2.IsLvalueReference) ||
+ (SCS1.IsLvalueReference && SCS1.BindsToFunctionLvalue &&
+ !SCS2.IsLvalueReference && SCS2.BindsToFunctionLvalue);
+}
+
+/// CompareStandardConversionSequences - Compare two standard
+/// conversion sequences to determine whether one is better than the
+/// other or if they are indistinguishable (C++ 13.3.3.2p3).
+static ImplicitConversionSequence::CompareKind
+CompareStandardConversionSequences(Sema &S, SourceLocation Loc,
+ const StandardConversionSequence& SCS1,
+ const StandardConversionSequence& SCS2)
+{
+ // Standard conversion sequence S1 is a better conversion sequence
+ // than standard conversion sequence S2 if (C++ 13.3.3.2p3):
+
+ // -- S1 is a proper subsequence of S2 (comparing the conversion
+ // sequences in the canonical form defined by 13.3.3.1.1,
+ // excluding any Lvalue Transformation; the identity conversion
+ // sequence is considered to be a subsequence of any
+ // non-identity conversion sequence) or, if not that,
+ if (ImplicitConversionSequence::CompareKind CK
+ = compareStandardConversionSubsets(S.Context, SCS1, SCS2))
+ return CK;
+
+ // -- the rank of S1 is better than the rank of S2 (by the rules
+ // defined below), or, if not that,
+ ImplicitConversionRank Rank1 = SCS1.getRank();
+ ImplicitConversionRank Rank2 = SCS2.getRank();
+ if (Rank1 < Rank2)
+ return ImplicitConversionSequence::Better;
+ else if (Rank2 < Rank1)
+ return ImplicitConversionSequence::Worse;
+
+ // (C++ 13.3.3.2p4): Two conversion sequences with the same rank
+ // are indistinguishable unless one of the following rules
+ // applies:
+
+ // A conversion that is not a conversion of a pointer, or
+ // pointer to member, to bool is better than another conversion
+ // that is such a conversion.
+ if (SCS1.isPointerConversionToBool() != SCS2.isPointerConversionToBool())
+ return SCS2.isPointerConversionToBool()
+ ? ImplicitConversionSequence::Better
+ : ImplicitConversionSequence::Worse;
+
+ // C++ [over.ics.rank]p4b2:
+ //
+ // If class B is derived directly or indirectly from class A,
+ // conversion of B* to A* is better than conversion of B* to
+ // void*, and conversion of A* to void* is better than conversion
+ // of B* to void*.
+ bool SCS1ConvertsToVoid
+ = SCS1.isPointerConversionToVoidPointer(S.Context);
+ bool SCS2ConvertsToVoid
+ = SCS2.isPointerConversionToVoidPointer(S.Context);
+ if (SCS1ConvertsToVoid != SCS2ConvertsToVoid) {
+ // Exactly one of the conversion sequences is a conversion to
+ // a void pointer; it's the worse conversion.
+ return SCS2ConvertsToVoid ? ImplicitConversionSequence::Better
+ : ImplicitConversionSequence::Worse;
+ } else if (!SCS1ConvertsToVoid && !SCS2ConvertsToVoid) {
+ // Neither conversion sequence converts to a void pointer; compare
+ // their derived-to-base conversions.
+ if (ImplicitConversionSequence::CompareKind DerivedCK
+ = CompareDerivedToBaseConversions(S, Loc, SCS1, SCS2))
+ return DerivedCK;
+ } else if (SCS1ConvertsToVoid && SCS2ConvertsToVoid &&
+ !S.Context.hasSameType(SCS1.getFromType(), SCS2.getFromType())) {
+ // Both conversion sequences are conversions to void
+ // pointers. Compare the source types to determine if there's an
+ // inheritance relationship in their sources.
+ QualType FromType1 = SCS1.getFromType();
+ QualType FromType2 = SCS2.getFromType();
+
+ // Adjust the types we're converting from via the array-to-pointer
+ // conversion, if we need to.
+ if (SCS1.First == ICK_Array_To_Pointer)
+ FromType1 = S.Context.getArrayDecayedType(FromType1);
+ if (SCS2.First == ICK_Array_To_Pointer)
+ FromType2 = S.Context.getArrayDecayedType(FromType2);
+
+ QualType FromPointee1 = FromType1->getPointeeType().getUnqualifiedType();
+ QualType FromPointee2 = FromType2->getPointeeType().getUnqualifiedType();
+
+ if (S.IsDerivedFrom(Loc, FromPointee2, FromPointee1))
+ return ImplicitConversionSequence::Better;
+ else if (S.IsDerivedFrom(Loc, FromPointee1, FromPointee2))
+ return ImplicitConversionSequence::Worse;
+
+ // Objective-C++: If one interface is more specific than the
+ // other, it is the better one.
+ const ObjCObjectPointerType* FromObjCPtr1
+ = FromType1->getAs<ObjCObjectPointerType>();
+ const ObjCObjectPointerType* FromObjCPtr2
+ = FromType2->getAs<ObjCObjectPointerType>();
+ if (FromObjCPtr1 && FromObjCPtr2) {
+ bool AssignLeft = S.Context.canAssignObjCInterfaces(FromObjCPtr1,
+ FromObjCPtr2);
+ bool AssignRight = S.Context.canAssignObjCInterfaces(FromObjCPtr2,
+ FromObjCPtr1);
+ if (AssignLeft != AssignRight) {
+ return AssignLeft? ImplicitConversionSequence::Better
+ : ImplicitConversionSequence::Worse;
+ }
+ }
+ }
+
+ // Compare based on qualification conversions (C++ 13.3.3.2p3,
+ // bullet 3).
+ if (ImplicitConversionSequence::CompareKind QualCK
+ = CompareQualificationConversions(S, SCS1, SCS2))
+ return QualCK;
+
+ if (SCS1.ReferenceBinding && SCS2.ReferenceBinding) {
+ // Check for a better reference binding based on the kind of bindings.
+ if (isBetterReferenceBindingKind(SCS1, SCS2))
+ return ImplicitConversionSequence::Better;
+ else if (isBetterReferenceBindingKind(SCS2, SCS1))
+ return ImplicitConversionSequence::Worse;
+
+ // C++ [over.ics.rank]p3b4:
+ // -- S1 and S2 are reference bindings (8.5.3), and the types to
+ // which the references refer are the same type except for
+ // top-level cv-qualifiers, and the type to which the reference
+ // initialized by S2 refers is more cv-qualified than the type
+ // to which the reference initialized by S1 refers.
+ QualType T1 = SCS1.getToType(2);
+ QualType T2 = SCS2.getToType(2);
+ T1 = S.Context.getCanonicalType(T1);
+ T2 = S.Context.getCanonicalType(T2);
+ Qualifiers T1Quals, T2Quals;
+ QualType UnqualT1 = S.Context.getUnqualifiedArrayType(T1, T1Quals);
+ QualType UnqualT2 = S.Context.getUnqualifiedArrayType(T2, T2Quals);
+ if (UnqualT1 == UnqualT2) {
+ // Objective-C++ ARC: If the references refer to objects with different
+ // lifetimes, prefer bindings that don't change lifetime.
+ if (SCS1.ObjCLifetimeConversionBinding !=
+ SCS2.ObjCLifetimeConversionBinding) {
+ return SCS1.ObjCLifetimeConversionBinding
+ ? ImplicitConversionSequence::Worse
+ : ImplicitConversionSequence::Better;
+ }
+
+ // If the type is an array type, promote the element qualifiers to the
+ // type for comparison.
+ if (isa<ArrayType>(T1) && T1Quals)
+ T1 = S.Context.getQualifiedType(UnqualT1, T1Quals);
+ if (isa<ArrayType>(T2) && T2Quals)
+ T2 = S.Context.getQualifiedType(UnqualT2, T2Quals);
+ if (T2.isMoreQualifiedThan(T1))
+ return ImplicitConversionSequence::Better;
+ else if (T1.isMoreQualifiedThan(T2))
+ return ImplicitConversionSequence::Worse;
+ }
+ }
+
+ // In Microsoft mode, prefer an integral conversion to a
+ // floating-to-integral conversion if the integral conversion
+ // is between types of the same size.
+ // For example:
+ // void f(float);
+ // void f(int);
+ // int main {
+ // long a;
+ // f(a);
+ // }
+ // Here, MSVC will call f(int) instead of generating a compile error
+ // as clang will do in standard mode.
+ if (S.getLangOpts().MSVCCompat && SCS1.Second == ICK_Integral_Conversion &&
+ SCS2.Second == ICK_Floating_Integral &&
+ S.Context.getTypeSize(SCS1.getFromType()) ==
+ S.Context.getTypeSize(SCS1.getToType(2)))
+ return ImplicitConversionSequence::Better;
+
+ return ImplicitConversionSequence::Indistinguishable;
+}
+
+/// CompareQualificationConversions - Compares two standard conversion
+/// sequences to determine whether they can be ranked based on their
+/// qualification conversions (C++ 13.3.3.2p3 bullet 3).
+static ImplicitConversionSequence::CompareKind
+CompareQualificationConversions(Sema &S,
+ const StandardConversionSequence& SCS1,
+ const StandardConversionSequence& SCS2) {
+ // C++ 13.3.3.2p3:
+ // -- S1 and S2 differ only in their qualification conversion and
+ // yield similar types T1 and T2 (C++ 4.4), respectively, and the
+ // cv-qualification signature of type T1 is a proper subset of
+ // the cv-qualification signature of type T2, and S1 is not the
+ // deprecated string literal array-to-pointer conversion (4.2).
+ if (SCS1.First != SCS2.First || SCS1.Second != SCS2.Second ||
+ SCS1.Third != SCS2.Third || SCS1.Third != ICK_Qualification)
+ return ImplicitConversionSequence::Indistinguishable;
+
+ // FIXME: the example in the standard doesn't use a qualification
+ // conversion (!)
+ QualType T1 = SCS1.getToType(2);
+ QualType T2 = SCS2.getToType(2);
+ T1 = S.Context.getCanonicalType(T1);
+ T2 = S.Context.getCanonicalType(T2);
+ Qualifiers T1Quals, T2Quals;
+ QualType UnqualT1 = S.Context.getUnqualifiedArrayType(T1, T1Quals);
+ QualType UnqualT2 = S.Context.getUnqualifiedArrayType(T2, T2Quals);
+
+ // If the types are the same, we won't learn anything by unwrapped
+ // them.
+ if (UnqualT1 == UnqualT2)
+ return ImplicitConversionSequence::Indistinguishable;
+
+ // If the type is an array type, promote the element qualifiers to the type
+ // for comparison.
+ if (isa<ArrayType>(T1) && T1Quals)
+ T1 = S.Context.getQualifiedType(UnqualT1, T1Quals);
+ if (isa<ArrayType>(T2) && T2Quals)
+ T2 = S.Context.getQualifiedType(UnqualT2, T2Quals);
+
+ ImplicitConversionSequence::CompareKind Result
+ = ImplicitConversionSequence::Indistinguishable;
+
+ // Objective-C++ ARC:
+ // Prefer qualification conversions not involving a change in lifetime
+ // to qualification conversions that do not change lifetime.
+ if (SCS1.QualificationIncludesObjCLifetime !=
+ SCS2.QualificationIncludesObjCLifetime) {
+ Result = SCS1.QualificationIncludesObjCLifetime
+ ? ImplicitConversionSequence::Worse
+ : ImplicitConversionSequence::Better;
+ }
+
+ while (S.Context.UnwrapSimilarPointerTypes(T1, T2)) {
+ // Within each iteration of the loop, we check the qualifiers to
+ // determine if this still looks like a qualification
+ // conversion. Then, if all is well, we unwrap one more level of
+ // pointers or pointers-to-members and do it all again
+ // until there are no more pointers or pointers-to-members left
+ // to unwrap. This essentially mimics what
+ // IsQualificationConversion does, but here we're checking for a
+ // strict subset of qualifiers.
+ if (T1.getCVRQualifiers() == T2.getCVRQualifiers())
+ // The qualifiers are the same, so this doesn't tell us anything
+ // about how the sequences rank.
+ ;
+ else if (T2.isMoreQualifiedThan(T1)) {
+ // T1 has fewer qualifiers, so it could be the better sequence.
+ if (Result == ImplicitConversionSequence::Worse)
+ // Neither has qualifiers that are a subset of the other's
+ // qualifiers.
+ return ImplicitConversionSequence::Indistinguishable;
+
+ Result = ImplicitConversionSequence::Better;
+ } else if (T1.isMoreQualifiedThan(T2)) {
+ // T2 has fewer qualifiers, so it could be the better sequence.
+ if (Result == ImplicitConversionSequence::Better)
+ // Neither has qualifiers that are a subset of the other's
+ // qualifiers.
+ return ImplicitConversionSequence::Indistinguishable;
+
+ Result = ImplicitConversionSequence::Worse;
+ } else {
+ // Qualifiers are disjoint.
+ return ImplicitConversionSequence::Indistinguishable;
+ }
+
+ // If the types after this point are equivalent, we're done.
+ if (S.Context.hasSameUnqualifiedType(T1, T2))
+ break;
+ }
+
+ // Check that the winning standard conversion sequence isn't using
+ // the deprecated string literal array to pointer conversion.
+ switch (Result) {
+ case ImplicitConversionSequence::Better:
+ if (SCS1.DeprecatedStringLiteralToCharPtr)
+ Result = ImplicitConversionSequence::Indistinguishable;
+ break;
+
+ case ImplicitConversionSequence::Indistinguishable:
+ break;
+
+ case ImplicitConversionSequence::Worse:
+ if (SCS2.DeprecatedStringLiteralToCharPtr)
+ Result = ImplicitConversionSequence::Indistinguishable;
+ break;
+ }
+
+ return Result;
+}
+
+/// CompareDerivedToBaseConversions - Compares two standard conversion
+/// sequences to determine whether they can be ranked based on their
+/// various kinds of derived-to-base conversions (C++
+/// [over.ics.rank]p4b3). As part of these checks, we also look at
+/// conversions between Objective-C interface types.
+static ImplicitConversionSequence::CompareKind
+CompareDerivedToBaseConversions(Sema &S, SourceLocation Loc,
+ const StandardConversionSequence& SCS1,
+ const StandardConversionSequence& SCS2) {
+ QualType FromType1 = SCS1.getFromType();
+ QualType ToType1 = SCS1.getToType(1);
+ QualType FromType2 = SCS2.getFromType();
+ QualType ToType2 = SCS2.getToType(1);
+
+ // Adjust the types we're converting from via the array-to-pointer
+ // conversion, if we need to.
+ if (SCS1.First == ICK_Array_To_Pointer)
+ FromType1 = S.Context.getArrayDecayedType(FromType1);
+ if (SCS2.First == ICK_Array_To_Pointer)
+ FromType2 = S.Context.getArrayDecayedType(FromType2);
+
+ // Canonicalize all of the types.
+ FromType1 = S.Context.getCanonicalType(FromType1);
+ ToType1 = S.Context.getCanonicalType(ToType1);
+ FromType2 = S.Context.getCanonicalType(FromType2);
+ ToType2 = S.Context.getCanonicalType(ToType2);
+
+ // C++ [over.ics.rank]p4b3:
+ //
+ // If class B is derived directly or indirectly from class A and
+ // class C is derived directly or indirectly from B,
+ //
+ // Compare based on pointer conversions.
+ if (SCS1.Second == ICK_Pointer_Conversion &&
+ SCS2.Second == ICK_Pointer_Conversion &&
+ /*FIXME: Remove if Objective-C id conversions get their own rank*/
+ FromType1->isPointerType() && FromType2->isPointerType() &&
+ ToType1->isPointerType() && ToType2->isPointerType()) {
+ QualType FromPointee1
+ = FromType1->getAs<PointerType>()->getPointeeType().getUnqualifiedType();
+ QualType ToPointee1
+ = ToType1->getAs<PointerType>()->getPointeeType().getUnqualifiedType();
+ QualType FromPointee2
+ = FromType2->getAs<PointerType>()->getPointeeType().getUnqualifiedType();
+ QualType ToPointee2
+ = ToType2->getAs<PointerType>()->getPointeeType().getUnqualifiedType();
+
+ // -- conversion of C* to B* is better than conversion of C* to A*,
+ if (FromPointee1 == FromPointee2 && ToPointee1 != ToPointee2) {
+ if (S.IsDerivedFrom(Loc, ToPointee1, ToPointee2))
+ return ImplicitConversionSequence::Better;
+ else if (S.IsDerivedFrom(Loc, ToPointee2, ToPointee1))
+ return ImplicitConversionSequence::Worse;
+ }
+
+ // -- conversion of B* to A* is better than conversion of C* to A*,
+ if (FromPointee1 != FromPointee2 && ToPointee1 == ToPointee2) {
+ if (S.IsDerivedFrom(Loc, FromPointee2, FromPointee1))
+ return ImplicitConversionSequence::Better;
+ else if (S.IsDerivedFrom(Loc, FromPointee1, FromPointee2))
+ return ImplicitConversionSequence::Worse;
+ }
+ } else if (SCS1.Second == ICK_Pointer_Conversion &&
+ SCS2.Second == ICK_Pointer_Conversion) {
+ const ObjCObjectPointerType *FromPtr1
+ = FromType1->getAs<ObjCObjectPointerType>();
+ const ObjCObjectPointerType *FromPtr2
+ = FromType2->getAs<ObjCObjectPointerType>();
+ const ObjCObjectPointerType *ToPtr1
+ = ToType1->getAs<ObjCObjectPointerType>();
+ const ObjCObjectPointerType *ToPtr2
+ = ToType2->getAs<ObjCObjectPointerType>();
+
+ if (FromPtr1 && FromPtr2 && ToPtr1 && ToPtr2) {
+ // Apply the same conversion ranking rules for Objective-C pointer types
+ // that we do for C++ pointers to class types. However, we employ the
+ // Objective-C pseudo-subtyping relationship used for assignment of
+ // Objective-C pointer types.
+ bool FromAssignLeft
+ = S.Context.canAssignObjCInterfaces(FromPtr1, FromPtr2);
+ bool FromAssignRight
+ = S.Context.canAssignObjCInterfaces(FromPtr2, FromPtr1);
+ bool ToAssignLeft
+ = S.Context.canAssignObjCInterfaces(ToPtr1, ToPtr2);
+ bool ToAssignRight
+ = S.Context.canAssignObjCInterfaces(ToPtr2, ToPtr1);
+
+ // A conversion to an a non-id object pointer type or qualified 'id'
+ // type is better than a conversion to 'id'.
+ if (ToPtr1->isObjCIdType() &&
+ (ToPtr2->isObjCQualifiedIdType() || ToPtr2->getInterfaceDecl()))
+ return ImplicitConversionSequence::Worse;
+ if (ToPtr2->isObjCIdType() &&
+ (ToPtr1->isObjCQualifiedIdType() || ToPtr1->getInterfaceDecl()))
+ return ImplicitConversionSequence::Better;
+
+ // A conversion to a non-id object pointer type is better than a
+ // conversion to a qualified 'id' type
+ if (ToPtr1->isObjCQualifiedIdType() && ToPtr2->getInterfaceDecl())
+ return ImplicitConversionSequence::Worse;
+ if (ToPtr2->isObjCQualifiedIdType() && ToPtr1->getInterfaceDecl())
+ return ImplicitConversionSequence::Better;
+
+ // A conversion to an a non-Class object pointer type or qualified 'Class'
+ // type is better than a conversion to 'Class'.
+ if (ToPtr1->isObjCClassType() &&
+ (ToPtr2->isObjCQualifiedClassType() || ToPtr2->getInterfaceDecl()))
+ return ImplicitConversionSequence::Worse;
+ if (ToPtr2->isObjCClassType() &&
+ (ToPtr1->isObjCQualifiedClassType() || ToPtr1->getInterfaceDecl()))
+ return ImplicitConversionSequence::Better;
+
+ // A conversion to a non-Class object pointer type is better than a
+ // conversion to a qualified 'Class' type.
+ if (ToPtr1->isObjCQualifiedClassType() && ToPtr2->getInterfaceDecl())
+ return ImplicitConversionSequence::Worse;
+ if (ToPtr2->isObjCQualifiedClassType() && ToPtr1->getInterfaceDecl())
+ return ImplicitConversionSequence::Better;
+
+ // -- "conversion of C* to B* is better than conversion of C* to A*,"
+ if (S.Context.hasSameType(FromType1, FromType2) &&
+ !FromPtr1->isObjCIdType() && !FromPtr1->isObjCClassType() &&
+ (ToAssignLeft != ToAssignRight))
+ return ToAssignLeft? ImplicitConversionSequence::Worse
+ : ImplicitConversionSequence::Better;
+
+ // -- "conversion of B* to A* is better than conversion of C* to A*,"
+ if (S.Context.hasSameUnqualifiedType(ToType1, ToType2) &&
+ (FromAssignLeft != FromAssignRight))
+ return FromAssignLeft? ImplicitConversionSequence::Better
+ : ImplicitConversionSequence::Worse;
+ }
+ }
+
+ // Ranking of member-pointer types.
+ if (SCS1.Second == ICK_Pointer_Member && SCS2.Second == ICK_Pointer_Member &&
+ FromType1->isMemberPointerType() && FromType2->isMemberPointerType() &&
+ ToType1->isMemberPointerType() && ToType2->isMemberPointerType()) {
+ const MemberPointerType * FromMemPointer1 =
+ FromType1->getAs<MemberPointerType>();
+ const MemberPointerType * ToMemPointer1 =
+ ToType1->getAs<MemberPointerType>();
+ const MemberPointerType * FromMemPointer2 =
+ FromType2->getAs<MemberPointerType>();
+ const MemberPointerType * ToMemPointer2 =
+ ToType2->getAs<MemberPointerType>();
+ const Type *FromPointeeType1 = FromMemPointer1->getClass();
+ const Type *ToPointeeType1 = ToMemPointer1->getClass();
+ const Type *FromPointeeType2 = FromMemPointer2->getClass();
+ const Type *ToPointeeType2 = ToMemPointer2->getClass();
+ QualType FromPointee1 = QualType(FromPointeeType1, 0).getUnqualifiedType();
+ QualType ToPointee1 = QualType(ToPointeeType1, 0).getUnqualifiedType();
+ QualType FromPointee2 = QualType(FromPointeeType2, 0).getUnqualifiedType();
+ QualType ToPointee2 = QualType(ToPointeeType2, 0).getUnqualifiedType();
+ // conversion of A::* to B::* is better than conversion of A::* to C::*,
+ if (FromPointee1 == FromPointee2 && ToPointee1 != ToPointee2) {
+ if (S.IsDerivedFrom(Loc, ToPointee1, ToPointee2))
+ return ImplicitConversionSequence::Worse;
+ else if (S.IsDerivedFrom(Loc, ToPointee2, ToPointee1))
+ return ImplicitConversionSequence::Better;
+ }
+ // conversion of B::* to C::* is better than conversion of A::* to C::*
+ if (ToPointee1 == ToPointee2 && FromPointee1 != FromPointee2) {
+ if (S.IsDerivedFrom(Loc, FromPointee1, FromPointee2))
+ return ImplicitConversionSequence::Better;
+ else if (S.IsDerivedFrom(Loc, FromPointee2, FromPointee1))
+ return ImplicitConversionSequence::Worse;
+ }
+ }
+
+ if (SCS1.Second == ICK_Derived_To_Base) {
+ // -- conversion of C to B is better than conversion of C to A,
+ // -- binding of an expression of type C to a reference of type
+ // B& is better than binding an expression of type C to a
+ // reference of type A&,
+ if (S.Context.hasSameUnqualifiedType(FromType1, FromType2) &&
+ !S.Context.hasSameUnqualifiedType(ToType1, ToType2)) {
+ if (S.IsDerivedFrom(Loc, ToType1, ToType2))
+ return ImplicitConversionSequence::Better;
+ else if (S.IsDerivedFrom(Loc, ToType2, ToType1))
+ return ImplicitConversionSequence::Worse;
+ }
+
+ // -- conversion of B to A is better than conversion of C to A.
+ // -- binding of an expression of type B to a reference of type
+ // A& is better than binding an expression of type C to a
+ // reference of type A&,
+ if (!S.Context.hasSameUnqualifiedType(FromType1, FromType2) &&
+ S.Context.hasSameUnqualifiedType(ToType1, ToType2)) {
+ if (S.IsDerivedFrom(Loc, FromType2, FromType1))
+ return ImplicitConversionSequence::Better;
+ else if (S.IsDerivedFrom(Loc, FromType1, FromType2))
+ return ImplicitConversionSequence::Worse;
+ }
+ }
+
+ return ImplicitConversionSequence::Indistinguishable;
+}
+
+/// \brief Determine whether the given type is valid, e.g., it is not an invalid
+/// C++ class.
+static bool isTypeValid(QualType T) {
+ if (CXXRecordDecl *Record = T->getAsCXXRecordDecl())
+ return !Record->isInvalidDecl();
+
+ return true;
+}
+
+/// CompareReferenceRelationship - Compare the two types T1 and T2 to
+/// determine whether they are reference-related,
+/// reference-compatible, reference-compatible with added
+/// qualification, or incompatible, for use in C++ initialization by
+/// reference (C++ [dcl.ref.init]p4). Neither type can be a reference
+/// type, and the first type (T1) is the pointee type of the reference
+/// type being initialized.
+Sema::ReferenceCompareResult
+Sema::CompareReferenceRelationship(SourceLocation Loc,
+ QualType OrigT1, QualType OrigT2,
+ bool &DerivedToBase,
+ bool &ObjCConversion,
+ bool &ObjCLifetimeConversion) {
+ assert(!OrigT1->isReferenceType() &&
+ "T1 must be the pointee type of the reference type");
+ assert(!OrigT2->isReferenceType() && "T2 cannot be a reference type");
+
+ QualType T1 = Context.getCanonicalType(OrigT1);
+ QualType T2 = Context.getCanonicalType(OrigT2);
+ Qualifiers T1Quals, T2Quals;
+ QualType UnqualT1 = Context.getUnqualifiedArrayType(T1, T1Quals);
+ QualType UnqualT2 = Context.getUnqualifiedArrayType(T2, T2Quals);
+
+ // C++ [dcl.init.ref]p4:
+ // Given types "cv1 T1" and "cv2 T2," "cv1 T1" is
+ // reference-related to "cv2 T2" if T1 is the same type as T2, or
+ // T1 is a base class of T2.
+ DerivedToBase = false;
+ ObjCConversion = false;
+ ObjCLifetimeConversion = false;
+ if (UnqualT1 == UnqualT2) {
+ // Nothing to do.
+ } else if (isCompleteType(Loc, OrigT2) &&
+ isTypeValid(UnqualT1) && isTypeValid(UnqualT2) &&
+ IsDerivedFrom(Loc, UnqualT2, UnqualT1))
+ DerivedToBase = true;
+ else if (UnqualT1->isObjCObjectOrInterfaceType() &&
+ UnqualT2->isObjCObjectOrInterfaceType() &&
+ Context.canBindObjCObjectType(UnqualT1, UnqualT2))
+ ObjCConversion = true;
+ else
+ return Ref_Incompatible;
+
+ // At this point, we know that T1 and T2 are reference-related (at
+ // least).
+
+ // If the type is an array type, promote the element qualifiers to the type
+ // for comparison.
+ if (isa<ArrayType>(T1) && T1Quals)
+ T1 = Context.getQualifiedType(UnqualT1, T1Quals);
+ if (isa<ArrayType>(T2) && T2Quals)
+ T2 = Context.getQualifiedType(UnqualT2, T2Quals);
+
+ // C++ [dcl.init.ref]p4:
+ // "cv1 T1" is reference-compatible with "cv2 T2" if T1 is
+ // reference-related to T2 and cv1 is the same cv-qualification
+ // as, or greater cv-qualification than, cv2. For purposes of
+ // overload resolution, cases for which cv1 is greater
+ // cv-qualification than cv2 are identified as
+ // reference-compatible with added qualification (see 13.3.3.2).
+ //
+ // Note that we also require equivalence of Objective-C GC and address-space
+ // qualifiers when performing these computations, so that e.g., an int in
+ // address space 1 is not reference-compatible with an int in address
+ // space 2.
+ if (T1Quals.getObjCLifetime() != T2Quals.getObjCLifetime() &&
+ T1Quals.compatiblyIncludesObjCLifetime(T2Quals)) {
+ if (isNonTrivialObjCLifetimeConversion(T2Quals, T1Quals))
+ ObjCLifetimeConversion = true;
+
+ T1Quals.removeObjCLifetime();
+ T2Quals.removeObjCLifetime();
+ }
+
+ if (T1Quals == T2Quals)
+ return Ref_Compatible;
+ else if (T1Quals.compatiblyIncludes(T2Quals))
+ return Ref_Compatible_With_Added_Qualification;
+ else
+ return Ref_Related;
+}
+
+/// \brief Look for a user-defined conversion to an value reference-compatible
+/// with DeclType. Return true if something definite is found.
+static bool
+FindConversionForRefInit(Sema &S, ImplicitConversionSequence &ICS,
+ QualType DeclType, SourceLocation DeclLoc,
+ Expr *Init, QualType T2, bool AllowRvalues,
+ bool AllowExplicit) {
+ assert(T2->isRecordType() && "Can only find conversions of record types.");
+ CXXRecordDecl *T2RecordDecl
+ = dyn_cast<CXXRecordDecl>(T2->getAs<RecordType>()->getDecl());
+
+ OverloadCandidateSet CandidateSet(DeclLoc, OverloadCandidateSet::CSK_Normal);
+ const auto &Conversions = T2RecordDecl->getVisibleConversionFunctions();
+ for (auto I = Conversions.begin(), E = Conversions.end(); I != E; ++I) {
+ NamedDecl *D = *I;
+ CXXRecordDecl *ActingDC = cast<CXXRecordDecl>(D->getDeclContext());
+ if (isa<UsingShadowDecl>(D))
+ D = cast<UsingShadowDecl>(D)->getTargetDecl();
+
+ FunctionTemplateDecl *ConvTemplate
+ = dyn_cast<FunctionTemplateDecl>(D);
+ CXXConversionDecl *Conv;
+ if (ConvTemplate)
+ Conv = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl());
+ else
+ Conv = cast<CXXConversionDecl>(D);
+
+ // If this is an explicit conversion, and we're not allowed to consider
+ // explicit conversions, skip it.
+ if (!AllowExplicit && Conv->isExplicit())
+ continue;
+
+ if (AllowRvalues) {
+ bool DerivedToBase = false;
+ bool ObjCConversion = false;
+ bool ObjCLifetimeConversion = false;
+
+ // If we are initializing an rvalue reference, don't permit conversion
+ // functions that return lvalues.
+ if (!ConvTemplate && DeclType->isRValueReferenceType()) {
+ const ReferenceType *RefType
+ = Conv->getConversionType()->getAs<LValueReferenceType>();
+ if (RefType && !RefType->getPointeeType()->isFunctionType())
+ continue;
+ }
+
+ if (!ConvTemplate &&
+ S.CompareReferenceRelationship(
+ DeclLoc,
+ Conv->getConversionType().getNonReferenceType()
+ .getUnqualifiedType(),
+ DeclType.getNonReferenceType().getUnqualifiedType(),
+ DerivedToBase, ObjCConversion, ObjCLifetimeConversion) ==
+ Sema::Ref_Incompatible)
+ continue;
+ } else {
+ // If the conversion function doesn't return a reference type,
+ // it can't be considered for this conversion. An rvalue reference
+ // is only acceptable if its referencee is a function type.
+
+ const ReferenceType *RefType =
+ Conv->getConversionType()->getAs<ReferenceType>();
+ if (!RefType ||
+ (!RefType->isLValueReferenceType() &&
+ !RefType->getPointeeType()->isFunctionType()))
+ continue;
+ }
+
+ if (ConvTemplate)
+ S.AddTemplateConversionCandidate(ConvTemplate, I.getPair(), ActingDC,
+ Init, DeclType, CandidateSet,
+ /*AllowObjCConversionOnExplicit=*/false);
+ else
+ S.AddConversionCandidate(Conv, I.getPair(), ActingDC, Init,
+ DeclType, CandidateSet,
+ /*AllowObjCConversionOnExplicit=*/false);
+ }
+
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+
+ OverloadCandidateSet::iterator Best;
+ switch (CandidateSet.BestViableFunction(S, DeclLoc, Best, true)) {
+ case OR_Success:
+ // C++ [over.ics.ref]p1:
+ //
+ // [...] If the parameter binds directly to the result of
+ // applying a conversion function to the argument
+ // expression, the implicit conversion sequence is a
+ // user-defined conversion sequence (13.3.3.1.2), with the
+ // second standard conversion sequence either an identity
+ // conversion or, if the conversion function returns an
+ // entity of a type that is a derived class of the parameter
+ // type, a derived-to-base Conversion.
+ if (!Best->FinalConversion.DirectBinding)
+ return false;
+
+ ICS.setUserDefined();
+ ICS.UserDefined.Before = Best->Conversions[0].Standard;
+ ICS.UserDefined.After = Best->FinalConversion;
+ ICS.UserDefined.HadMultipleCandidates = HadMultipleCandidates;
+ ICS.UserDefined.ConversionFunction = Best->Function;
+ ICS.UserDefined.FoundConversionFunction = Best->FoundDecl;
+ ICS.UserDefined.EllipsisConversion = false;
+ assert(ICS.UserDefined.After.ReferenceBinding &&
+ ICS.UserDefined.After.DirectBinding &&
+ "Expected a direct reference binding!");
+ return true;
+
+ case OR_Ambiguous:
+ ICS.setAmbiguous();
+ for (OverloadCandidateSet::iterator Cand = CandidateSet.begin();
+ Cand != CandidateSet.end(); ++Cand)
+ if (Cand->Viable)
+ ICS.Ambiguous.addConversion(Cand->Function);
+ return true;
+
+ case OR_No_Viable_Function:
+ case OR_Deleted:
+ // There was no suitable conversion, or we found a deleted
+ // conversion; continue with other checks.
+ return false;
+ }
+
+ llvm_unreachable("Invalid OverloadResult!");
+}
+
+/// \brief Compute an implicit conversion sequence for reference
+/// initialization.
+static ImplicitConversionSequence
+TryReferenceInit(Sema &S, Expr *Init, QualType DeclType,
+ SourceLocation DeclLoc,
+ bool SuppressUserConversions,
+ bool AllowExplicit) {
+ assert(DeclType->isReferenceType() && "Reference init needs a reference");
+
+ // Most paths end in a failed conversion.
+ ImplicitConversionSequence ICS;
+ ICS.setBad(BadConversionSequence::no_conversion, Init, DeclType);
+
+ QualType T1 = DeclType->getAs<ReferenceType>()->getPointeeType();
+ QualType T2 = Init->getType();
+
+ // If the initializer is the address of an overloaded function, try
+ // to resolve the overloaded function. If all goes well, T2 is the
+ // type of the resulting function.
+ if (S.Context.getCanonicalType(T2) == S.Context.OverloadTy) {
+ DeclAccessPair Found;
+ if (FunctionDecl *Fn = S.ResolveAddressOfOverloadedFunction(Init, DeclType,
+ false, Found))
+ T2 = Fn->getType();
+ }
+
+ // Compute some basic properties of the types and the initializer.
+ bool isRValRef = DeclType->isRValueReferenceType();
+ bool DerivedToBase = false;
+ bool ObjCConversion = false;
+ bool ObjCLifetimeConversion = false;
+ Expr::Classification InitCategory = Init->Classify(S.Context);
+ Sema::ReferenceCompareResult RefRelationship
+ = S.CompareReferenceRelationship(DeclLoc, T1, T2, DerivedToBase,
+ ObjCConversion, ObjCLifetimeConversion);
+
+
+ // C++0x [dcl.init.ref]p5:
+ // A reference to type "cv1 T1" is initialized by an expression
+ // of type "cv2 T2" as follows:
+
+ // -- If reference is an lvalue reference and the initializer expression
+ if (!isRValRef) {
+ // -- is an lvalue (but is not a bit-field), and "cv1 T1" is
+ // reference-compatible with "cv2 T2," or
+ //
+ // Per C++ [over.ics.ref]p4, we don't check the bit-field property here.
+ if (InitCategory.isLValue() &&
+ RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification) {
+ // C++ [over.ics.ref]p1:
+ // When a parameter of reference type binds directly (8.5.3)
+ // to an argument expression, the implicit conversion sequence
+ // is the identity conversion, unless the argument expression
+ // has a type that is a derived class of the parameter type,
+ // in which case the implicit conversion sequence is a
+ // derived-to-base Conversion (13.3.3.1).
+ ICS.setStandard();
+ ICS.Standard.First = ICK_Identity;
+ ICS.Standard.Second = DerivedToBase? ICK_Derived_To_Base
+ : ObjCConversion? ICK_Compatible_Conversion
+ : ICK_Identity;
+ ICS.Standard.Third = ICK_Identity;
+ ICS.Standard.FromTypePtr = T2.getAsOpaquePtr();
+ ICS.Standard.setToType(0, T2);
+ ICS.Standard.setToType(1, T1);
+ ICS.Standard.setToType(2, T1);
+ ICS.Standard.ReferenceBinding = true;
+ ICS.Standard.DirectBinding = true;
+ ICS.Standard.IsLvalueReference = !isRValRef;
+ ICS.Standard.BindsToFunctionLvalue = T2->isFunctionType();
+ ICS.Standard.BindsToRvalue = false;
+ ICS.Standard.BindsImplicitObjectArgumentWithoutRefQualifier = false;
+ ICS.Standard.ObjCLifetimeConversionBinding = ObjCLifetimeConversion;
+ ICS.Standard.CopyConstructor = nullptr;
+ ICS.Standard.DeprecatedStringLiteralToCharPtr = false;
+
+ // Nothing more to do: the inaccessibility/ambiguity check for
+ // derived-to-base conversions is suppressed when we're
+ // computing the implicit conversion sequence (C++
+ // [over.best.ics]p2).
+ return ICS;
+ }
+
+ // -- has a class type (i.e., T2 is a class type), where T1 is
+ // not reference-related to T2, and can be implicitly
+ // converted to an lvalue of type "cv3 T3," where "cv1 T1"
+ // is reference-compatible with "cv3 T3" 92) (this
+ // conversion is selected by enumerating the applicable
+ // conversion functions (13.3.1.6) and choosing the best
+ // one through overload resolution (13.3)),
+ if (!SuppressUserConversions && T2->isRecordType() &&
+ S.isCompleteType(DeclLoc, T2) &&
+ RefRelationship == Sema::Ref_Incompatible) {
+ if (FindConversionForRefInit(S, ICS, DeclType, DeclLoc,
+ Init, T2, /*AllowRvalues=*/false,
+ AllowExplicit))
+ return ICS;
+ }
+ }
+
+ // -- Otherwise, the reference shall be an lvalue reference to a
+ // non-volatile const type (i.e., cv1 shall be const), or the reference
+ // shall be an rvalue reference.
+ if (!isRValRef && (!T1.isConstQualified() || T1.isVolatileQualified()))
+ return ICS;
+
+ // -- If the initializer expression
+ //
+ // -- is an xvalue, class prvalue, array prvalue or function
+ // lvalue and "cv1 T1" is reference-compatible with "cv2 T2", or
+ if (RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification &&
+ (InitCategory.isXValue() ||
+ (InitCategory.isPRValue() && (T2->isRecordType() || T2->isArrayType())) ||
+ (InitCategory.isLValue() && T2->isFunctionType()))) {
+ ICS.setStandard();
+ ICS.Standard.First = ICK_Identity;
+ ICS.Standard.Second = DerivedToBase? ICK_Derived_To_Base
+ : ObjCConversion? ICK_Compatible_Conversion
+ : ICK_Identity;
+ ICS.Standard.Third = ICK_Identity;
+ ICS.Standard.FromTypePtr = T2.getAsOpaquePtr();
+ ICS.Standard.setToType(0, T2);
+ ICS.Standard.setToType(1, T1);
+ ICS.Standard.setToType(2, T1);
+ ICS.Standard.ReferenceBinding = true;
+ // In C++0x, this is always a direct binding. In C++98/03, it's a direct
+ // binding unless we're binding to a class prvalue.
+ // Note: Although xvalues wouldn't normally show up in C++98/03 code, we
+ // allow the use of rvalue references in C++98/03 for the benefit of
+ // standard library implementors; therefore, we need the xvalue check here.
+ ICS.Standard.DirectBinding =
+ S.getLangOpts().CPlusPlus11 ||
+ !(InitCategory.isPRValue() || T2->isRecordType());
+ ICS.Standard.IsLvalueReference = !isRValRef;
+ ICS.Standard.BindsToFunctionLvalue = T2->isFunctionType();
+ ICS.Standard.BindsToRvalue = InitCategory.isRValue();
+ ICS.Standard.BindsImplicitObjectArgumentWithoutRefQualifier = false;
+ ICS.Standard.ObjCLifetimeConversionBinding = ObjCLifetimeConversion;
+ ICS.Standard.CopyConstructor = nullptr;
+ ICS.Standard.DeprecatedStringLiteralToCharPtr = false;
+ return ICS;
+ }
+
+ // -- has a class type (i.e., T2 is a class type), where T1 is not
+ // reference-related to T2, and can be implicitly converted to
+ // an xvalue, class prvalue, or function lvalue of type
+ // "cv3 T3", where "cv1 T1" is reference-compatible with
+ // "cv3 T3",
+ //
+ // then the reference is bound to the value of the initializer
+ // expression in the first case and to the result of the conversion
+ // in the second case (or, in either case, to an appropriate base
+ // class subobject).
+ if (!SuppressUserConversions && RefRelationship == Sema::Ref_Incompatible &&
+ T2->isRecordType() && S.isCompleteType(DeclLoc, T2) &&
+ FindConversionForRefInit(S, ICS, DeclType, DeclLoc,
+ Init, T2, /*AllowRvalues=*/true,
+ AllowExplicit)) {
+ // In the second case, if the reference is an rvalue reference
+ // and the second standard conversion sequence of the
+ // user-defined conversion sequence includes an lvalue-to-rvalue
+ // conversion, the program is ill-formed.
+ if (ICS.isUserDefined() && isRValRef &&
+ ICS.UserDefined.After.First == ICK_Lvalue_To_Rvalue)
+ ICS.setBad(BadConversionSequence::no_conversion, Init, DeclType);
+
+ return ICS;
+ }
+
+ // A temporary of function type cannot be created; don't even try.
+ if (T1->isFunctionType())
+ return ICS;
+
+ // -- Otherwise, a temporary of type "cv1 T1" is created and
+ // initialized from the initializer expression using the
+ // rules for a non-reference copy initialization (8.5). The
+ // reference is then bound to the temporary. If T1 is
+ // reference-related to T2, cv1 must be the same
+ // cv-qualification as, or greater cv-qualification than,
+ // cv2; otherwise, the program is ill-formed.
+ if (RefRelationship == Sema::Ref_Related) {
+ // If cv1 == cv2 or cv1 is a greater cv-qualified than cv2, then
+ // we would be reference-compatible or reference-compatible with
+ // added qualification. But that wasn't the case, so the reference
+ // initialization fails.
+ //
+ // Note that we only want to check address spaces and cvr-qualifiers here.
+ // ObjC GC and lifetime qualifiers aren't important.
+ Qualifiers T1Quals = T1.getQualifiers();
+ Qualifiers T2Quals = T2.getQualifiers();
+ T1Quals.removeObjCGCAttr();
+ T1Quals.removeObjCLifetime();
+ T2Quals.removeObjCGCAttr();
+ T2Quals.removeObjCLifetime();
+ if (!T1Quals.compatiblyIncludes(T2Quals))
+ return ICS;
+ }
+
+ // If at least one of the types is a class type, the types are not
+ // related, and we aren't allowed any user conversions, the
+ // reference binding fails. This case is important for breaking
+ // recursion, since TryImplicitConversion below will attempt to
+ // create a temporary through the use of a copy constructor.
+ if (SuppressUserConversions && RefRelationship == Sema::Ref_Incompatible &&
+ (T1->isRecordType() || T2->isRecordType()))
+ return ICS;
+
+ // If T1 is reference-related to T2 and the reference is an rvalue
+ // reference, the initializer expression shall not be an lvalue.
+ if (RefRelationship >= Sema::Ref_Related &&
+ isRValRef && Init->Classify(S.Context).isLValue())
+ return ICS;
+
+ // C++ [over.ics.ref]p2:
+ // When a parameter of reference type is not bound directly to
+ // an argument expression, the conversion sequence is the one
+ // required to convert the argument expression to the
+ // underlying type of the reference according to
+ // 13.3.3.1. Conceptually, this conversion sequence corresponds
+ // to copy-initializing a temporary of the underlying type with
+ // the argument expression. Any difference in top-level
+ // cv-qualification is subsumed by the initialization itself
+ // and does not constitute a conversion.
+ ICS = TryImplicitConversion(S, Init, T1, SuppressUserConversions,
+ /*AllowExplicit=*/false,
+ /*InOverloadResolution=*/false,
+ /*CStyle=*/false,
+ /*AllowObjCWritebackConversion=*/false,
+ /*AllowObjCConversionOnExplicit=*/false);
+
+ // Of course, that's still a reference binding.
+ if (ICS.isStandard()) {
+ ICS.Standard.ReferenceBinding = true;
+ ICS.Standard.IsLvalueReference = !isRValRef;
+ ICS.Standard.BindsToFunctionLvalue = false;
+ ICS.Standard.BindsToRvalue = true;
+ ICS.Standard.BindsImplicitObjectArgumentWithoutRefQualifier = false;
+ ICS.Standard.ObjCLifetimeConversionBinding = false;
+ } else if (ICS.isUserDefined()) {
+ const ReferenceType *LValRefType =
+ ICS.UserDefined.ConversionFunction->getReturnType()
+ ->getAs<LValueReferenceType>();
+
+ // C++ [over.ics.ref]p3:
+ // Except for an implicit object parameter, for which see 13.3.1, a
+ // standard conversion sequence cannot be formed if it requires [...]
+ // binding an rvalue reference to an lvalue other than a function
+ // lvalue.
+ // Note that the function case is not possible here.
+ if (DeclType->isRValueReferenceType() && LValRefType) {
+ // FIXME: This is the wrong BadConversionSequence. The problem is binding
+ // an rvalue reference to a (non-function) lvalue, not binding an lvalue
+ // reference to an rvalue!
+ ICS.setBad(BadConversionSequence::lvalue_ref_to_rvalue, Init, DeclType);
+ return ICS;
+ }
+
+ ICS.UserDefined.Before.setAsIdentityConversion();
+ ICS.UserDefined.After.ReferenceBinding = true;
+ ICS.UserDefined.After.IsLvalueReference = !isRValRef;
+ ICS.UserDefined.After.BindsToFunctionLvalue = false;
+ ICS.UserDefined.After.BindsToRvalue = !LValRefType;
+ ICS.UserDefined.After.BindsImplicitObjectArgumentWithoutRefQualifier = false;
+ ICS.UserDefined.After.ObjCLifetimeConversionBinding = false;
+ }
+
+ return ICS;
+}
+
+static ImplicitConversionSequence
+TryCopyInitialization(Sema &S, Expr *From, QualType ToType,
+ bool SuppressUserConversions,
+ bool InOverloadResolution,
+ bool AllowObjCWritebackConversion,
+ bool AllowExplicit = false);
+
+/// TryListConversion - Try to copy-initialize a value of type ToType from the
+/// initializer list From.
+static ImplicitConversionSequence
+TryListConversion(Sema &S, InitListExpr *From, QualType ToType,
+ bool SuppressUserConversions,
+ bool InOverloadResolution,
+ bool AllowObjCWritebackConversion) {
+ // C++11 [over.ics.list]p1:
+ // When an argument is an initializer list, it is not an expression and
+ // special rules apply for converting it to a parameter type.
+
+ ImplicitConversionSequence Result;
+ Result.setBad(BadConversionSequence::no_conversion, From, ToType);
+
+ // We need a complete type for what follows. Incomplete types can never be
+ // initialized from init lists.
+ if (!S.isCompleteType(From->getLocStart(), ToType))
+ return Result;
+
+ // Per DR1467:
+ // If the parameter type is a class X and the initializer list has a single
+ // element of type cv U, where U is X or a class derived from X, the
+ // implicit conversion sequence is the one required to convert the element
+ // to the parameter type.
+ //
+ // Otherwise, if the parameter type is a character array [... ]
+ // and the initializer list has a single element that is an
+ // appropriately-typed string literal (8.5.2 [dcl.init.string]), the
+ // implicit conversion sequence is the identity conversion.
+ if (From->getNumInits() == 1) {
+ if (ToType->isRecordType()) {
+ QualType InitType = From->getInit(0)->getType();
+ if (S.Context.hasSameUnqualifiedType(InitType, ToType) ||
+ S.IsDerivedFrom(From->getLocStart(), InitType, ToType))
+ return TryCopyInitialization(S, From->getInit(0), ToType,
+ SuppressUserConversions,
+ InOverloadResolution,
+ AllowObjCWritebackConversion);
+ }
+ // FIXME: Check the other conditions here: array of character type,
+ // initializer is a string literal.
+ if (ToType->isArrayType()) {
+ InitializedEntity Entity =
+ InitializedEntity::InitializeParameter(S.Context, ToType,
+ /*Consumed=*/false);
+ if (S.CanPerformCopyInitialization(Entity, From)) {
+ Result.setStandard();
+ Result.Standard.setAsIdentityConversion();
+ Result.Standard.setFromType(ToType);
+ Result.Standard.setAllToTypes(ToType);
+ return Result;
+ }
+ }
+ }
+
+ // C++14 [over.ics.list]p2: Otherwise, if the parameter type [...] (below).
+ // C++11 [over.ics.list]p2:
+ // If the parameter type is std::initializer_list<X> or "array of X" and
+ // all the elements can be implicitly converted to X, the implicit
+ // conversion sequence is the worst conversion necessary to convert an
+ // element of the list to X.
+ //
+ // C++14 [over.ics.list]p3:
+ // Otherwise, if the parameter type is "array of N X", if the initializer
+ // list has exactly N elements or if it has fewer than N elements and X is
+ // default-constructible, and if all the elements of the initializer list
+ // can be implicitly converted to X, the implicit conversion sequence is
+ // the worst conversion necessary to convert an element of the list to X.
+ //
+ // FIXME: We're missing a lot of these checks.
+ bool toStdInitializerList = false;
+ QualType X;
+ if (ToType->isArrayType())
+ X = S.Context.getAsArrayType(ToType)->getElementType();
+ else
+ toStdInitializerList = S.isStdInitializerList(ToType, &X);
+ if (!X.isNull()) {
+ for (unsigned i = 0, e = From->getNumInits(); i < e; ++i) {
+ Expr *Init = From->getInit(i);
+ ImplicitConversionSequence ICS =
+ TryCopyInitialization(S, Init, X, SuppressUserConversions,
+ InOverloadResolution,
+ AllowObjCWritebackConversion);
+ // If a single element isn't convertible, fail.
+ if (ICS.isBad()) {
+ Result = ICS;
+ break;
+ }
+ // Otherwise, look for the worst conversion.
+ if (Result.isBad() ||
+ CompareImplicitConversionSequences(S, From->getLocStart(), ICS,
+ Result) ==
+ ImplicitConversionSequence::Worse)
+ Result = ICS;
+ }
+
+ // For an empty list, we won't have computed any conversion sequence.
+ // Introduce the identity conversion sequence.
+ if (From->getNumInits() == 0) {
+ Result.setStandard();
+ Result.Standard.setAsIdentityConversion();
+ Result.Standard.setFromType(ToType);
+ Result.Standard.setAllToTypes(ToType);
+ }
+
+ Result.setStdInitializerListElement(toStdInitializerList);
+ return Result;
+ }
+
+ // C++14 [over.ics.list]p4:
+ // C++11 [over.ics.list]p3:
+ // Otherwise, if the parameter is a non-aggregate class X and overload
+ // resolution chooses a single best constructor [...] the implicit
+ // conversion sequence is a user-defined conversion sequence. If multiple
+ // constructors are viable but none is better than the others, the
+ // implicit conversion sequence is a user-defined conversion sequence.
+ if (ToType->isRecordType() && !ToType->isAggregateType()) {
+ // This function can deal with initializer lists.
+ return TryUserDefinedConversion(S, From, ToType, SuppressUserConversions,
+ /*AllowExplicit=*/false,
+ InOverloadResolution, /*CStyle=*/false,
+ AllowObjCWritebackConversion,
+ /*AllowObjCConversionOnExplicit=*/false);
+ }
+
+ // C++14 [over.ics.list]p5:
+ // C++11 [over.ics.list]p4:
+ // Otherwise, if the parameter has an aggregate type which can be
+ // initialized from the initializer list [...] the implicit conversion
+ // sequence is a user-defined conversion sequence.
+ if (ToType->isAggregateType()) {
+ // Type is an aggregate, argument is an init list. At this point it comes
+ // down to checking whether the initialization works.
+ // FIXME: Find out whether this parameter is consumed or not.
+ InitializedEntity Entity =
+ InitializedEntity::InitializeParameter(S.Context, ToType,
+ /*Consumed=*/false);
+ if (S.CanPerformCopyInitialization(Entity, From)) {
+ Result.setUserDefined();
+ Result.UserDefined.Before.setAsIdentityConversion();
+ // Initializer lists don't have a type.
+ Result.UserDefined.Before.setFromType(QualType());
+ Result.UserDefined.Before.setAllToTypes(QualType());
+
+ Result.UserDefined.After.setAsIdentityConversion();
+ Result.UserDefined.After.setFromType(ToType);
+ Result.UserDefined.After.setAllToTypes(ToType);
+ Result.UserDefined.ConversionFunction = nullptr;
+ }
+ return Result;
+ }
+
+ // C++14 [over.ics.list]p6:
+ // C++11 [over.ics.list]p5:
+ // Otherwise, if the parameter is a reference, see 13.3.3.1.4.
+ if (ToType->isReferenceType()) {
+ // The standard is notoriously unclear here, since 13.3.3.1.4 doesn't
+ // mention initializer lists in any way. So we go by what list-
+ // initialization would do and try to extrapolate from that.
+
+ QualType T1 = ToType->getAs<ReferenceType>()->getPointeeType();
+
+ // If the initializer list has a single element that is reference-related
+ // to the parameter type, we initialize the reference from that.
+ if (From->getNumInits() == 1) {
+ Expr *Init = From->getInit(0);
+
+ QualType T2 = Init->getType();
+
+ // If the initializer is the address of an overloaded function, try
+ // to resolve the overloaded function. If all goes well, T2 is the
+ // type of the resulting function.
+ if (S.Context.getCanonicalType(T2) == S.Context.OverloadTy) {
+ DeclAccessPair Found;
+ if (FunctionDecl *Fn = S.ResolveAddressOfOverloadedFunction(
+ Init, ToType, false, Found))
+ T2 = Fn->getType();
+ }
+
+ // Compute some basic properties of the types and the initializer.
+ bool dummy1 = false;
+ bool dummy2 = false;
+ bool dummy3 = false;
+ Sema::ReferenceCompareResult RefRelationship
+ = S.CompareReferenceRelationship(From->getLocStart(), T1, T2, dummy1,
+ dummy2, dummy3);
+
+ if (RefRelationship >= Sema::Ref_Related) {
+ return TryReferenceInit(S, Init, ToType, /*FIXME*/From->getLocStart(),
+ SuppressUserConversions,
+ /*AllowExplicit=*/false);
+ }
+ }
+
+ // Otherwise, we bind the reference to a temporary created from the
+ // initializer list.
+ Result = TryListConversion(S, From, T1, SuppressUserConversions,
+ InOverloadResolution,
+ AllowObjCWritebackConversion);
+ if (Result.isFailure())
+ return Result;
+ assert(!Result.isEllipsis() &&
+ "Sub-initialization cannot result in ellipsis conversion.");
+
+ // Can we even bind to a temporary?
+ if (ToType->isRValueReferenceType() ||
+ (T1.isConstQualified() && !T1.isVolatileQualified())) {
+ StandardConversionSequence &SCS = Result.isStandard() ? Result.Standard :
+ Result.UserDefined.After;
+ SCS.ReferenceBinding = true;
+ SCS.IsLvalueReference = ToType->isLValueReferenceType();
+ SCS.BindsToRvalue = true;
+ SCS.BindsToFunctionLvalue = false;
+ SCS.BindsImplicitObjectArgumentWithoutRefQualifier = false;
+ SCS.ObjCLifetimeConversionBinding = false;
+ } else
+ Result.setBad(BadConversionSequence::lvalue_ref_to_rvalue,
+ From, ToType);
+ return Result;
+ }
+
+ // C++14 [over.ics.list]p7:
+ // C++11 [over.ics.list]p6:
+ // Otherwise, if the parameter type is not a class:
+ if (!ToType->isRecordType()) {
+ // - if the initializer list has one element that is not itself an
+ // initializer list, the implicit conversion sequence is the one
+ // required to convert the element to the parameter type.
+ unsigned NumInits = From->getNumInits();
+ if (NumInits == 1 && !isa<InitListExpr>(From->getInit(0)))
+ Result = TryCopyInitialization(S, From->getInit(0), ToType,
+ SuppressUserConversions,
+ InOverloadResolution,
+ AllowObjCWritebackConversion);
+ // - if the initializer list has no elements, the implicit conversion
+ // sequence is the identity conversion.
+ else if (NumInits == 0) {
+ Result.setStandard();
+ Result.Standard.setAsIdentityConversion();
+ Result.Standard.setFromType(ToType);
+ Result.Standard.setAllToTypes(ToType);
+ }
+ return Result;
+ }
+
+ // C++14 [over.ics.list]p8:
+ // C++11 [over.ics.list]p7:
+ // In all cases other than those enumerated above, no conversion is possible
+ return Result;
+}
+
+/// TryCopyInitialization - Try to copy-initialize a value of type
+/// ToType from the expression From. Return the implicit conversion
+/// sequence required to pass this argument, which may be a bad
+/// conversion sequence (meaning that the argument cannot be passed to
+/// a parameter of this type). If @p SuppressUserConversions, then we
+/// do not permit any user-defined conversion sequences.
+static ImplicitConversionSequence
+TryCopyInitialization(Sema &S, Expr *From, QualType ToType,
+ bool SuppressUserConversions,
+ bool InOverloadResolution,
+ bool AllowObjCWritebackConversion,
+ bool AllowExplicit) {
+ if (InitListExpr *FromInitList = dyn_cast<InitListExpr>(From))
+ return TryListConversion(S, FromInitList, ToType, SuppressUserConversions,
+ InOverloadResolution,AllowObjCWritebackConversion);
+
+ if (ToType->isReferenceType())
+ return TryReferenceInit(S, From, ToType,
+ /*FIXME:*/From->getLocStart(),
+ SuppressUserConversions,
+ AllowExplicit);
+
+ return TryImplicitConversion(S, From, ToType,
+ SuppressUserConversions,
+ /*AllowExplicit=*/false,
+ InOverloadResolution,
+ /*CStyle=*/false,
+ AllowObjCWritebackConversion,
+ /*AllowObjCConversionOnExplicit=*/false);
+}
+
+static bool TryCopyInitialization(const CanQualType FromQTy,
+ const CanQualType ToQTy,
+ Sema &S,
+ SourceLocation Loc,
+ ExprValueKind FromVK) {
+ OpaqueValueExpr TmpExpr(Loc, FromQTy, FromVK);
+ ImplicitConversionSequence ICS =
+ TryCopyInitialization(S, &TmpExpr, ToQTy, true, true, false);
+
+ return !ICS.isBad();
+}
+
+/// TryObjectArgumentInitialization - Try to initialize the object
+/// parameter of the given member function (@c Method) from the
+/// expression @p From.
+static ImplicitConversionSequence
+TryObjectArgumentInitialization(Sema &S, SourceLocation Loc, QualType FromType,
+ Expr::Classification FromClassification,
+ CXXMethodDecl *Method,
+ CXXRecordDecl *ActingContext) {
+ QualType ClassType = S.Context.getTypeDeclType(ActingContext);
+ // [class.dtor]p2: A destructor can be invoked for a const, volatile or
+ // const volatile object.
+ unsigned Quals = isa<CXXDestructorDecl>(Method) ?
+ Qualifiers::Const | Qualifiers::Volatile : Method->getTypeQualifiers();
+ QualType ImplicitParamType = S.Context.getCVRQualifiedType(ClassType, Quals);
+
+ // Set up the conversion sequence as a "bad" conversion, to allow us
+ // to exit early.
+ ImplicitConversionSequence ICS;
+
+ // We need to have an object of class type.
+ if (const PointerType *PT = FromType->getAs<PointerType>()) {
+ FromType = PT->getPointeeType();
+
+ // When we had a pointer, it's implicitly dereferenced, so we
+ // better have an lvalue.
+ assert(FromClassification.isLValue());
+ }
+
+ assert(FromType->isRecordType());
+
+ // C++0x [over.match.funcs]p4:
+ // For non-static member functions, the type of the implicit object
+ // parameter is
+ //
+ // - "lvalue reference to cv X" for functions declared without a
+ // ref-qualifier or with the & ref-qualifier
+ // - "rvalue reference to cv X" for functions declared with the &&
+ // ref-qualifier
+ //
+ // where X is the class of which the function is a member and cv is the
+ // cv-qualification on the member function declaration.
+ //
+ // However, when finding an implicit conversion sequence for the argument, we
+ // are not allowed to create temporaries or perform user-defined conversions
+ // (C++ [over.match.funcs]p5). We perform a simplified version of
+ // reference binding here, that allows class rvalues to bind to
+ // non-constant references.
+
+ // First check the qualifiers.
+ QualType FromTypeCanon = S.Context.getCanonicalType(FromType);
+ if (ImplicitParamType.getCVRQualifiers()
+ != FromTypeCanon.getLocalCVRQualifiers() &&
+ !ImplicitParamType.isAtLeastAsQualifiedAs(FromTypeCanon)) {
+ ICS.setBad(BadConversionSequence::bad_qualifiers,
+ FromType, ImplicitParamType);
+ return ICS;
+ }
+
+ // Check that we have either the same type or a derived type. It
+ // affects the conversion rank.
+ QualType ClassTypeCanon = S.Context.getCanonicalType(ClassType);
+ ImplicitConversionKind SecondKind;
+ if (ClassTypeCanon == FromTypeCanon.getLocalUnqualifiedType()) {
+ SecondKind = ICK_Identity;
+ } else if (S.IsDerivedFrom(Loc, FromType, ClassType))
+ SecondKind = ICK_Derived_To_Base;
+ else {
+ ICS.setBad(BadConversionSequence::unrelated_class,
+ FromType, ImplicitParamType);
+ return ICS;
+ }
+
+ // Check the ref-qualifier.
+ switch (Method->getRefQualifier()) {
+ case RQ_None:
+ // Do nothing; we don't care about lvalueness or rvalueness.
+ break;
+
+ case RQ_LValue:
+ if (!FromClassification.isLValue() && Quals != Qualifiers::Const) {
+ // non-const lvalue reference cannot bind to an rvalue
+ ICS.setBad(BadConversionSequence::lvalue_ref_to_rvalue, FromType,
+ ImplicitParamType);
+ return ICS;
+ }
+ break;
+
+ case RQ_RValue:
+ if (!FromClassification.isRValue()) {
+ // rvalue reference cannot bind to an lvalue
+ ICS.setBad(BadConversionSequence::rvalue_ref_to_lvalue, FromType,
+ ImplicitParamType);
+ return ICS;
+ }
+ break;
+ }
+
+ // Success. Mark this as a reference binding.
+ ICS.setStandard();
+ ICS.Standard.setAsIdentityConversion();
+ ICS.Standard.Second = SecondKind;
+ ICS.Standard.setFromType(FromType);
+ ICS.Standard.setAllToTypes(ImplicitParamType);
+ ICS.Standard.ReferenceBinding = true;
+ ICS.Standard.DirectBinding = true;
+ ICS.Standard.IsLvalueReference = Method->getRefQualifier() != RQ_RValue;
+ ICS.Standard.BindsToFunctionLvalue = false;
+ ICS.Standard.BindsToRvalue = FromClassification.isRValue();
+ ICS.Standard.BindsImplicitObjectArgumentWithoutRefQualifier
+ = (Method->getRefQualifier() == RQ_None);
+ return ICS;
+}
+
+/// PerformObjectArgumentInitialization - Perform initialization of
+/// the implicit object parameter for the given Method with the given
+/// expression.
+ExprResult
+Sema::PerformObjectArgumentInitialization(Expr *From,
+ NestedNameSpecifier *Qualifier,
+ NamedDecl *FoundDecl,
+ CXXMethodDecl *Method) {
+ QualType FromRecordType, DestType;
+ QualType ImplicitParamRecordType =
+ Method->getThisType(Context)->getAs<PointerType>()->getPointeeType();
+
+ Expr::Classification FromClassification;
+ if (const PointerType *PT = From->getType()->getAs<PointerType>()) {
+ FromRecordType = PT->getPointeeType();
+ DestType = Method->getThisType(Context);
+ FromClassification = Expr::Classification::makeSimpleLValue();
+ } else {
+ FromRecordType = From->getType();
+ DestType = ImplicitParamRecordType;
+ FromClassification = From->Classify(Context);
+ }
+
+ // Note that we always use the true parent context when performing
+ // the actual argument initialization.
+ ImplicitConversionSequence ICS = TryObjectArgumentInitialization(
+ *this, From->getLocStart(), From->getType(), FromClassification, Method,
+ Method->getParent());
+ if (ICS.isBad()) {
+ if (ICS.Bad.Kind == BadConversionSequence::bad_qualifiers) {
+ Qualifiers FromQs = FromRecordType.getQualifiers();
+ Qualifiers ToQs = DestType.getQualifiers();
+ unsigned CVR = FromQs.getCVRQualifiers() & ~ToQs.getCVRQualifiers();
+ if (CVR) {
+ Diag(From->getLocStart(),
+ diag::err_member_function_call_bad_cvr)
+ << Method->getDeclName() << FromRecordType << (CVR - 1)
+ << From->getSourceRange();
+ Diag(Method->getLocation(), diag::note_previous_decl)
+ << Method->getDeclName();
+ return ExprError();
+ }
+ }
+
+ return Diag(From->getLocStart(),
+ diag::err_implicit_object_parameter_init)
+ << ImplicitParamRecordType << FromRecordType << From->getSourceRange();
+ }
+
+ if (ICS.Standard.Second == ICK_Derived_To_Base) {
+ ExprResult FromRes =
+ PerformObjectMemberConversion(From, Qualifier, FoundDecl, Method);
+ if (FromRes.isInvalid())
+ return ExprError();
+ From = FromRes.get();
+ }
+
+ if (!Context.hasSameType(From->getType(), DestType))
+ From = ImpCastExprToType(From, DestType, CK_NoOp,
+ From->getValueKind()).get();
+ return From;
+}
+
+/// TryContextuallyConvertToBool - Attempt to contextually convert the
+/// expression From to bool (C++0x [conv]p3).
+static ImplicitConversionSequence
+TryContextuallyConvertToBool(Sema &S, Expr *From) {
+ return TryImplicitConversion(S, From, S.Context.BoolTy,
+ /*SuppressUserConversions=*/false,
+ /*AllowExplicit=*/true,
+ /*InOverloadResolution=*/false,
+ /*CStyle=*/false,
+ /*AllowObjCWritebackConversion=*/false,
+ /*AllowObjCConversionOnExplicit=*/false);
+}
+
+/// PerformContextuallyConvertToBool - Perform a contextual conversion
+/// of the expression From to bool (C++0x [conv]p3).
+ExprResult Sema::PerformContextuallyConvertToBool(Expr *From) {
+ if (checkPlaceholderForOverload(*this, From))
+ return ExprError();
+
+ ImplicitConversionSequence ICS = TryContextuallyConvertToBool(*this, From);
+ if (!ICS.isBad())
+ return PerformImplicitConversion(From, Context.BoolTy, ICS, AA_Converting);
+
+ if (!DiagnoseMultipleUserDefinedConversion(From, Context.BoolTy))
+ return Diag(From->getLocStart(),
+ diag::err_typecheck_bool_condition)
+ << From->getType() << From->getSourceRange();
+ return ExprError();
+}
+
+/// Check that the specified conversion is permitted in a converted constant
+/// expression, according to C++11 [expr.const]p3. Return true if the conversion
+/// is acceptable.
+static bool CheckConvertedConstantConversions(Sema &S,
+ StandardConversionSequence &SCS) {
+ // Since we know that the target type is an integral or unscoped enumeration
+ // type, most conversion kinds are impossible. All possible First and Third
+ // conversions are fine.
+ switch (SCS.Second) {
+ case ICK_Identity:
+ case ICK_NoReturn_Adjustment:
+ case ICK_Integral_Promotion:
+ case ICK_Integral_Conversion: // Narrowing conversions are checked elsewhere.
+ return true;
+
+ case ICK_Boolean_Conversion:
+ // Conversion from an integral or unscoped enumeration type to bool is
+ // classified as ICK_Boolean_Conversion, but it's also arguably an integral
+ // conversion, so we allow it in a converted constant expression.
+ //
+ // FIXME: Per core issue 1407, we should not allow this, but that breaks
+ // a lot of popular code. We should at least add a warning for this
+ // (non-conforming) extension.
+ return SCS.getFromType()->isIntegralOrUnscopedEnumerationType() &&
+ SCS.getToType(2)->isBooleanType();
+
+ case ICK_Pointer_Conversion:
+ case ICK_Pointer_Member:
+ // C++1z: null pointer conversions and null member pointer conversions are
+ // only permitted if the source type is std::nullptr_t.
+ return SCS.getFromType()->isNullPtrType();
+
+ case ICK_Floating_Promotion:
+ case ICK_Complex_Promotion:
+ case ICK_Floating_Conversion:
+ case ICK_Complex_Conversion:
+ case ICK_Floating_Integral:
+ case ICK_Compatible_Conversion:
+ case ICK_Derived_To_Base:
+ case ICK_Vector_Conversion:
+ case ICK_Vector_Splat:
+ case ICK_Complex_Real:
+ case ICK_Block_Pointer_Conversion:
+ case ICK_TransparentUnionConversion:
+ case ICK_Writeback_Conversion:
+ case ICK_Zero_Event_Conversion:
+ case ICK_C_Only_Conversion:
+ return false;
+
+ case ICK_Lvalue_To_Rvalue:
+ case ICK_Array_To_Pointer:
+ case ICK_Function_To_Pointer:
+ llvm_unreachable("found a first conversion kind in Second");
+
+ case ICK_Qualification:
+ llvm_unreachable("found a third conversion kind in Second");
+
+ case ICK_Num_Conversion_Kinds:
+ break;
+ }
+
+ llvm_unreachable("unknown conversion kind");
+}
+
+/// CheckConvertedConstantExpression - Check that the expression From is a
+/// converted constant expression of type T, perform the conversion and produce
+/// the converted expression, per C++11 [expr.const]p3.
+static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
+ QualType T, APValue &Value,
+ Sema::CCEKind CCE,
+ bool RequireInt) {
+ assert(S.getLangOpts().CPlusPlus11 &&
+ "converted constant expression outside C++11");
+
+ if (checkPlaceholderForOverload(S, From))
+ return ExprError();
+
+ // C++1z [expr.const]p3:
+ // A converted constant expression of type T is an expression,
+ // implicitly converted to type T, where the converted
+ // expression is a constant expression and the implicit conversion
+ // sequence contains only [... list of conversions ...].
+ ImplicitConversionSequence ICS =
+ TryCopyInitialization(S, From, T,
+ /*SuppressUserConversions=*/false,
+ /*InOverloadResolution=*/false,
+ /*AllowObjcWritebackConversion=*/false,
+ /*AllowExplicit=*/false);
+ StandardConversionSequence *SCS = nullptr;
+ switch (ICS.getKind()) {
+ case ImplicitConversionSequence::StandardConversion:
+ SCS = &ICS.Standard;
+ break;
+ case ImplicitConversionSequence::UserDefinedConversion:
+ // We are converting to a non-class type, so the Before sequence
+ // must be trivial.
+ SCS = &ICS.UserDefined.After;
+ break;
+ case ImplicitConversionSequence::AmbiguousConversion:
+ case ImplicitConversionSequence::BadConversion:
+ if (!S.DiagnoseMultipleUserDefinedConversion(From, T))
+ return S.Diag(From->getLocStart(),
+ diag::err_typecheck_converted_constant_expression)
+ << From->getType() << From->getSourceRange() << T;
+ return ExprError();
+
+ case ImplicitConversionSequence::EllipsisConversion:
+ llvm_unreachable("ellipsis conversion in converted constant expression");
+ }
+
+ // Check that we would only use permitted conversions.
+ if (!CheckConvertedConstantConversions(S, *SCS)) {
+ return S.Diag(From->getLocStart(),
+ diag::err_typecheck_converted_constant_expression_disallowed)
+ << From->getType() << From->getSourceRange() << T;
+ }
+ // [...] and where the reference binding (if any) binds directly.
+ if (SCS->ReferenceBinding && !SCS->DirectBinding) {
+ return S.Diag(From->getLocStart(),
+ diag::err_typecheck_converted_constant_expression_indirect)
+ << From->getType() << From->getSourceRange() << T;
+ }
+
+ ExprResult Result =
+ S.PerformImplicitConversion(From, T, ICS, Sema::AA_Converting);
+ if (Result.isInvalid())
+ return Result;
+
+ // Check for a narrowing implicit conversion.
+ APValue PreNarrowingValue;
+ QualType PreNarrowingType;
+ switch (SCS->getNarrowingKind(S.Context, Result.get(), PreNarrowingValue,
+ PreNarrowingType)) {
+ case NK_Variable_Narrowing:
+ // Implicit conversion to a narrower type, and the value is not a constant
+ // expression. We'll diagnose this in a moment.
+ case NK_Not_Narrowing:
+ break;
+
+ case NK_Constant_Narrowing:
+ S.Diag(From->getLocStart(), diag::ext_cce_narrowing)
+ << CCE << /*Constant*/1
+ << PreNarrowingValue.getAsString(S.Context, PreNarrowingType) << T;
+ break;
+
+ case NK_Type_Narrowing:
+ S.Diag(From->getLocStart(), diag::ext_cce_narrowing)
+ << CCE << /*Constant*/0 << From->getType() << T;
+ break;
+ }
+
+ // Check the expression is a constant expression.
+ SmallVector<PartialDiagnosticAt, 8> Notes;
+ Expr::EvalResult Eval;
+ Eval.Diag = &Notes;
+
+ if ((T->isReferenceType()
+ ? !Result.get()->EvaluateAsLValue(Eval, S.Context)
+ : !Result.get()->EvaluateAsRValue(Eval, S.Context)) ||
+ (RequireInt && !Eval.Val.isInt())) {
+ // The expression can't be folded, so we can't keep it at this position in
+ // the AST.
+ Result = ExprError();
+ } else {
+ Value = Eval.Val;
+
+ if (Notes.empty()) {
+ // It's a constant expression.
+ return Result;
+ }
+ }
+
+ // It's not a constant expression. Produce an appropriate diagnostic.
+ if (Notes.size() == 1 &&
+ Notes[0].second.getDiagID() == diag::note_invalid_subexpr_in_const_expr)
+ S.Diag(Notes[0].first, diag::err_expr_not_cce) << CCE;
+ else {
+ S.Diag(From->getLocStart(), diag::err_expr_not_cce)
+ << CCE << From->getSourceRange();
+ for (unsigned I = 0; I < Notes.size(); ++I)
+ S.Diag(Notes[I].first, Notes[I].second);
+ }
+ return ExprError();
+}
+
+ExprResult Sema::CheckConvertedConstantExpression(Expr *From, QualType T,
+ APValue &Value, CCEKind CCE) {
+ return ::CheckConvertedConstantExpression(*this, From, T, Value, CCE, false);
+}
+
+ExprResult Sema::CheckConvertedConstantExpression(Expr *From, QualType T,
+ llvm::APSInt &Value,
+ CCEKind CCE) {
+ assert(T->isIntegralOrEnumerationType() && "unexpected converted const type");
+
+ APValue V;
+ auto R = ::CheckConvertedConstantExpression(*this, From, T, V, CCE, true);
+ if (!R.isInvalid())
+ Value = V.getInt();
+ return R;
+}
+
+
+/// dropPointerConversions - If the given standard conversion sequence
+/// involves any pointer conversions, remove them. This may change
+/// the result type of the conversion sequence.
+static void dropPointerConversion(StandardConversionSequence &SCS) {
+ if (SCS.Second == ICK_Pointer_Conversion) {
+ SCS.Second = ICK_Identity;
+ SCS.Third = ICK_Identity;
+ SCS.ToTypePtrs[2] = SCS.ToTypePtrs[1] = SCS.ToTypePtrs[0];
+ }
+}
+
+/// TryContextuallyConvertToObjCPointer - Attempt to contextually
+/// convert the expression From to an Objective-C pointer type.
+static ImplicitConversionSequence
+TryContextuallyConvertToObjCPointer(Sema &S, Expr *From) {
+ // Do an implicit conversion to 'id'.
+ QualType Ty = S.Context.getObjCIdType();
+ ImplicitConversionSequence ICS
+ = TryImplicitConversion(S, From, Ty,
+ // FIXME: Are these flags correct?
+ /*SuppressUserConversions=*/false,
+ /*AllowExplicit=*/true,
+ /*InOverloadResolution=*/false,
+ /*CStyle=*/false,
+ /*AllowObjCWritebackConversion=*/false,
+ /*AllowObjCConversionOnExplicit=*/true);
+
+ // Strip off any final conversions to 'id'.
+ switch (ICS.getKind()) {
+ case ImplicitConversionSequence::BadConversion:
+ case ImplicitConversionSequence::AmbiguousConversion:
+ case ImplicitConversionSequence::EllipsisConversion:
+ break;
+
+ case ImplicitConversionSequence::UserDefinedConversion:
+ dropPointerConversion(ICS.UserDefined.After);
+ break;
+
+ case ImplicitConversionSequence::StandardConversion:
+ dropPointerConversion(ICS.Standard);
+ break;
+ }
+
+ return ICS;
+}
+
+/// PerformContextuallyConvertToObjCPointer - Perform a contextual
+/// conversion of the expression From to an Objective-C pointer type.
+ExprResult Sema::PerformContextuallyConvertToObjCPointer(Expr *From) {
+ if (checkPlaceholderForOverload(*this, From))
+ return ExprError();
+
+ QualType Ty = Context.getObjCIdType();
+ ImplicitConversionSequence ICS =
+ TryContextuallyConvertToObjCPointer(*this, From);
+ if (!ICS.isBad())
+ return PerformImplicitConversion(From, Ty, ICS, AA_Converting);
+ return ExprError();
+}
+
+/// Determine whether the provided type is an integral type, or an enumeration
+/// type of a permitted flavor.
+bool Sema::ICEConvertDiagnoser::match(QualType T) {
+ return AllowScopedEnumerations ? T->isIntegralOrEnumerationType()
+ : T->isIntegralOrUnscopedEnumerationType();
+}
+
+static ExprResult
+diagnoseAmbiguousConversion(Sema &SemaRef, SourceLocation Loc, Expr *From,
+ Sema::ContextualImplicitConverter &Converter,
+ QualType T, UnresolvedSetImpl &ViableConversions) {
+
+ if (Converter.Suppress)
+ return ExprError();
+
+ Converter.diagnoseAmbiguous(SemaRef, Loc, T) << From->getSourceRange();
+ for (unsigned I = 0, N = ViableConversions.size(); I != N; ++I) {
+ CXXConversionDecl *Conv =
+ cast<CXXConversionDecl>(ViableConversions[I]->getUnderlyingDecl());
+ QualType ConvTy = Conv->getConversionType().getNonReferenceType();
+ Converter.noteAmbiguous(SemaRef, Conv, ConvTy);
+ }
+ return From;
+}
+
+static bool
+diagnoseNoViableConversion(Sema &SemaRef, SourceLocation Loc, Expr *&From,
+ Sema::ContextualImplicitConverter &Converter,
+ QualType T, bool HadMultipleCandidates,
+ UnresolvedSetImpl &ExplicitConversions) {
+ if (ExplicitConversions.size() == 1 && !Converter.Suppress) {
+ DeclAccessPair Found = ExplicitConversions[0];
+ CXXConversionDecl *Conversion =
+ cast<CXXConversionDecl>(Found->getUnderlyingDecl());
+
+ // The user probably meant to invoke the given explicit
+ // conversion; use it.
+ QualType ConvTy = Conversion->getConversionType().getNonReferenceType();
+ std::string TypeStr;
+ ConvTy.getAsStringInternal(TypeStr, SemaRef.getPrintingPolicy());
+
+ Converter.diagnoseExplicitConv(SemaRef, Loc, T, ConvTy)
+ << FixItHint::CreateInsertion(From->getLocStart(),
+ "static_cast<" + TypeStr + ">(")
+ << FixItHint::CreateInsertion(
+ SemaRef.getLocForEndOfToken(From->getLocEnd()), ")");
+ Converter.noteExplicitConv(SemaRef, Conversion, ConvTy);
+
+ // If we aren't in a SFINAE context, build a call to the
+ // explicit conversion function.
+ if (SemaRef.isSFINAEContext())
+ return true;
+
+ SemaRef.CheckMemberOperatorAccess(From->getExprLoc(), From, nullptr, Found);
+ ExprResult Result = SemaRef.BuildCXXMemberCallExpr(From, Found, Conversion,
+ HadMultipleCandidates);
+ if (Result.isInvalid())
+ return true;
+ // Record usage of conversion in an implicit cast.
+ From = ImplicitCastExpr::Create(SemaRef.Context, Result.get()->getType(),
+ CK_UserDefinedConversion, Result.get(),
+ nullptr, Result.get()->getValueKind());
+ }
+ return false;
+}
+
+static bool recordConversion(Sema &SemaRef, SourceLocation Loc, Expr *&From,
+ Sema::ContextualImplicitConverter &Converter,
+ QualType T, bool HadMultipleCandidates,
+ DeclAccessPair &Found) {
+ CXXConversionDecl *Conversion =
+ cast<CXXConversionDecl>(Found->getUnderlyingDecl());
+ SemaRef.CheckMemberOperatorAccess(From->getExprLoc(), From, nullptr, Found);
+
+ QualType ToType = Conversion->getConversionType().getNonReferenceType();
+ if (!Converter.SuppressConversion) {
+ if (SemaRef.isSFINAEContext())
+ return true;
+
+ Converter.diagnoseConversion(SemaRef, Loc, T, ToType)
+ << From->getSourceRange();
+ }
+
+ ExprResult Result = SemaRef.BuildCXXMemberCallExpr(From, Found, Conversion,
+ HadMultipleCandidates);
+ if (Result.isInvalid())
+ return true;
+ // Record usage of conversion in an implicit cast.
+ From = ImplicitCastExpr::Create(SemaRef.Context, Result.get()->getType(),
+ CK_UserDefinedConversion, Result.get(),
+ nullptr, Result.get()->getValueKind());
+ return false;
+}
+
+static ExprResult finishContextualImplicitConversion(
+ Sema &SemaRef, SourceLocation Loc, Expr *From,
+ Sema::ContextualImplicitConverter &Converter) {
+ if (!Converter.match(From->getType()) && !Converter.Suppress)
+ Converter.diagnoseNoMatch(SemaRef, Loc, From->getType())
+ << From->getSourceRange();
+
+ return SemaRef.DefaultLvalueConversion(From);
+}
+
+static void
+collectViableConversionCandidates(Sema &SemaRef, Expr *From, QualType ToType,
+ UnresolvedSetImpl &ViableConversions,
+ OverloadCandidateSet &CandidateSet) {
+ for (unsigned I = 0, N = ViableConversions.size(); I != N; ++I) {
+ DeclAccessPair FoundDecl = ViableConversions[I];
+ NamedDecl *D = FoundDecl.getDecl();
+ CXXRecordDecl *ActingContext = cast<CXXRecordDecl>(D->getDeclContext());
+ if (isa<UsingShadowDecl>(D))
+ D = cast<UsingShadowDecl>(D)->getTargetDecl();
+
+ CXXConversionDecl *Conv;
+ FunctionTemplateDecl *ConvTemplate;
+ if ((ConvTemplate = dyn_cast<FunctionTemplateDecl>(D)))
+ Conv = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl());
+ else
+ Conv = cast<CXXConversionDecl>(D);
+
+ if (ConvTemplate)
+ SemaRef.AddTemplateConversionCandidate(
+ ConvTemplate, FoundDecl, ActingContext, From, ToType, CandidateSet,
+ /*AllowObjCConversionOnExplicit=*/false);
+ else
+ SemaRef.AddConversionCandidate(Conv, FoundDecl, ActingContext, From,
+ ToType, CandidateSet,
+ /*AllowObjCConversionOnExplicit=*/false);
+ }
+}
+
+/// \brief Attempt to convert the given expression to a type which is accepted
+/// by the given converter.
+///
+/// This routine will attempt to convert an expression of class type to a
+/// type accepted by the specified converter. In C++11 and before, the class
+/// must have a single non-explicit conversion function converting to a matching
+/// type. In C++1y, there can be multiple such conversion functions, but only
+/// one target type.
+///
+/// \param Loc The source location of the construct that requires the
+/// conversion.
+///
+/// \param From The expression we're converting from.
+///
+/// \param Converter Used to control and diagnose the conversion process.
+///
+/// \returns The expression, converted to an integral or enumeration type if
+/// successful.
+ExprResult Sema::PerformContextualImplicitConversion(
+ SourceLocation Loc, Expr *From, ContextualImplicitConverter &Converter) {
+ // We can't perform any more checking for type-dependent expressions.
+ if (From->isTypeDependent())
+ return From;
+
+ // Process placeholders immediately.
+ if (From->hasPlaceholderType()) {
+ ExprResult result = CheckPlaceholderExpr(From);
+ if (result.isInvalid())
+ return result;
+ From = result.get();
+ }
+
+ // If the expression already has a matching type, we're golden.
+ QualType T = From->getType();
+ if (Converter.match(T))
+ return DefaultLvalueConversion(From);
+
+ // FIXME: Check for missing '()' if T is a function type?
+
+ // We can only perform contextual implicit conversions on objects of class
+ // type.
+ const RecordType *RecordTy = T->getAs<RecordType>();
+ if (!RecordTy || !getLangOpts().CPlusPlus) {
+ if (!Converter.Suppress)
+ Converter.diagnoseNoMatch(*this, Loc, T) << From->getSourceRange();
+ return From;
+ }
+
+ // We must have a complete class type.
+ struct TypeDiagnoserPartialDiag : TypeDiagnoser {
+ ContextualImplicitConverter &Converter;
+ Expr *From;
+
+ TypeDiagnoserPartialDiag(ContextualImplicitConverter &Converter, Expr *From)
+ : Converter(Converter), From(From) {}
+
+ void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
+ Converter.diagnoseIncomplete(S, Loc, T) << From->getSourceRange();
+ }
+ } IncompleteDiagnoser(Converter, From);
+
+ if (Converter.Suppress ? !isCompleteType(Loc, T)
+ : RequireCompleteType(Loc, T, IncompleteDiagnoser))
+ return From;
+
+ // Look for a conversion to an integral or enumeration type.
+ UnresolvedSet<4>
+ ViableConversions; // These are *potentially* viable in C++1y.
+ UnresolvedSet<4> ExplicitConversions;
+ const auto &Conversions =
+ cast<CXXRecordDecl>(RecordTy->getDecl())->getVisibleConversionFunctions();
+
+ bool HadMultipleCandidates =
+ (std::distance(Conversions.begin(), Conversions.end()) > 1);
+
+ // To check that there is only one target type, in C++1y:
+ QualType ToType;
+ bool HasUniqueTargetType = true;
+
+ // Collect explicit or viable (potentially in C++1y) conversions.
+ for (auto I = Conversions.begin(), E = Conversions.end(); I != E; ++I) {
+ NamedDecl *D = (*I)->getUnderlyingDecl();
+ CXXConversionDecl *Conversion;
+ FunctionTemplateDecl *ConvTemplate = dyn_cast<FunctionTemplateDecl>(D);
+ if (ConvTemplate) {
+ if (getLangOpts().CPlusPlus14)
+ Conversion = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl());
+ else
+ continue; // C++11 does not consider conversion operator templates(?).
+ } else
+ Conversion = cast<CXXConversionDecl>(D);
+
+ assert((!ConvTemplate || getLangOpts().CPlusPlus14) &&
+ "Conversion operator templates are considered potentially "
+ "viable in C++1y");
+
+ QualType CurToType = Conversion->getConversionType().getNonReferenceType();
+ if (Converter.match(CurToType) || ConvTemplate) {
+
+ if (Conversion->isExplicit()) {
+ // FIXME: For C++1y, do we need this restriction?
+ // cf. diagnoseNoViableConversion()
+ if (!ConvTemplate)
+ ExplicitConversions.addDecl(I.getDecl(), I.getAccess());
+ } else {
+ if (!ConvTemplate && getLangOpts().CPlusPlus14) {
+ if (ToType.isNull())
+ ToType = CurToType.getUnqualifiedType();
+ else if (HasUniqueTargetType &&
+ (CurToType.getUnqualifiedType() != ToType))
+ HasUniqueTargetType = false;
+ }
+ ViableConversions.addDecl(I.getDecl(), I.getAccess());
+ }
+ }
+ }
+
+ if (getLangOpts().CPlusPlus14) {
+ // C++1y [conv]p6:
+ // ... An expression e of class type E appearing in such a context
+ // is said to be contextually implicitly converted to a specified
+ // type T and is well-formed if and only if e can be implicitly
+ // converted to a type T that is determined as follows: E is searched
+ // for conversion functions whose return type is cv T or reference to
+ // cv T such that T is allowed by the context. There shall be
+ // exactly one such T.
+
+ // If no unique T is found:
+ if (ToType.isNull()) {
+ if (diagnoseNoViableConversion(*this, Loc, From, Converter, T,
+ HadMultipleCandidates,
+ ExplicitConversions))
+ return ExprError();
+ return finishContextualImplicitConversion(*this, Loc, From, Converter);
+ }
+
+ // If more than one unique Ts are found:
+ if (!HasUniqueTargetType)
+ return diagnoseAmbiguousConversion(*this, Loc, From, Converter, T,
+ ViableConversions);
+
+ // If one unique T is found:
+ // First, build a candidate set from the previously recorded
+ // potentially viable conversions.
+ OverloadCandidateSet CandidateSet(Loc, OverloadCandidateSet::CSK_Normal);
+ collectViableConversionCandidates(*this, From, ToType, ViableConversions,
+ CandidateSet);
+
+ // Then, perform overload resolution over the candidate set.
+ OverloadCandidateSet::iterator Best;
+ switch (CandidateSet.BestViableFunction(*this, Loc, Best)) {
+ case OR_Success: {
+ // Apply this conversion.
+ DeclAccessPair Found =
+ DeclAccessPair::make(Best->Function, Best->FoundDecl.getAccess());
+ if (recordConversion(*this, Loc, From, Converter, T,
+ HadMultipleCandidates, Found))
+ return ExprError();
+ break;
+ }
+ case OR_Ambiguous:
+ return diagnoseAmbiguousConversion(*this, Loc, From, Converter, T,
+ ViableConversions);
+ case OR_No_Viable_Function:
+ if (diagnoseNoViableConversion(*this, Loc, From, Converter, T,
+ HadMultipleCandidates,
+ ExplicitConversions))
+ return ExprError();
+ // fall through 'OR_Deleted' case.
+ case OR_Deleted:
+ // We'll complain below about a non-integral condition type.
+ break;
+ }
+ } else {
+ switch (ViableConversions.size()) {
+ case 0: {
+ if (diagnoseNoViableConversion(*this, Loc, From, Converter, T,
+ HadMultipleCandidates,
+ ExplicitConversions))
+ return ExprError();
+
+ // We'll complain below about a non-integral condition type.
+ break;
+ }
+ case 1: {
+ // Apply this conversion.
+ DeclAccessPair Found = ViableConversions[0];
+ if (recordConversion(*this, Loc, From, Converter, T,
+ HadMultipleCandidates, Found))
+ return ExprError();
+ break;
+ }
+ default:
+ return diagnoseAmbiguousConversion(*this, Loc, From, Converter, T,
+ ViableConversions);
+ }
+ }
+
+ return finishContextualImplicitConversion(*this, Loc, From, Converter);
+}
+
+/// IsAcceptableNonMemberOperatorCandidate - Determine whether Fn is
+/// an acceptable non-member overloaded operator for a call whose
+/// arguments have types T1 (and, if non-empty, T2). This routine
+/// implements the check in C++ [over.match.oper]p3b2 concerning
+/// enumeration types.
+static bool IsAcceptableNonMemberOperatorCandidate(ASTContext &Context,
+ FunctionDecl *Fn,
+ ArrayRef<Expr *> Args) {
+ QualType T1 = Args[0]->getType();
+ QualType T2 = Args.size() > 1 ? Args[1]->getType() : QualType();
+
+ if (T1->isDependentType() || (!T2.isNull() && T2->isDependentType()))
+ return true;
+
+ if (T1->isRecordType() || (!T2.isNull() && T2->isRecordType()))
+ return true;
+
+ const FunctionProtoType *Proto = Fn->getType()->getAs<FunctionProtoType>();
+ if (Proto->getNumParams() < 1)
+ return false;
+
+ if (T1->isEnumeralType()) {
+ QualType ArgType = Proto->getParamType(0).getNonReferenceType();
+ if (Context.hasSameUnqualifiedType(T1, ArgType))
+ return true;
+ }
+
+ if (Proto->getNumParams() < 2)
+ return false;
+
+ if (!T2.isNull() && T2->isEnumeralType()) {
+ QualType ArgType = Proto->getParamType(1).getNonReferenceType();
+ if (Context.hasSameUnqualifiedType(T2, ArgType))
+ return true;
+ }
+
+ return false;
+}
+
+/// AddOverloadCandidate - Adds the given function to the set of
+/// candidate functions, using the given function call arguments. If
+/// @p SuppressUserConversions, then don't allow user-defined
+/// conversions via constructors or conversion operators.
+///
+/// \param PartialOverloading true if we are performing "partial" overloading
+/// based on an incomplete set of function arguments. This feature is used by
+/// code completion.
+void
+Sema::AddOverloadCandidate(FunctionDecl *Function,
+ DeclAccessPair FoundDecl,
+ ArrayRef<Expr *> Args,
+ OverloadCandidateSet &CandidateSet,
+ bool SuppressUserConversions,
+ bool PartialOverloading,
+ bool AllowExplicit) {
+ const FunctionProtoType *Proto
+ = dyn_cast<FunctionProtoType>(Function->getType()->getAs<FunctionType>());
+ assert(Proto && "Functions without a prototype cannot be overloaded");
+ assert(!Function->getDescribedFunctionTemplate() &&
+ "Use AddTemplateOverloadCandidate for function templates");
+
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Function)) {
+ if (!isa<CXXConstructorDecl>(Method)) {
+ // If we get here, it's because we're calling a member function
+ // that is named without a member access expression (e.g.,
+ // "this->f") that was either written explicitly or created
+ // implicitly. This can happen with a qualified call to a member
+ // function, e.g., X::f(). We use an empty type for the implied
+ // object argument (C++ [over.call.func]p3), and the acting context
+ // is irrelevant.
+ AddMethodCandidate(Method, FoundDecl, Method->getParent(),
+ QualType(), Expr::Classification::makeSimpleLValue(),
+ Args, CandidateSet, SuppressUserConversions,
+ PartialOverloading);
+ return;
+ }
+ // We treat a constructor like a non-member function, since its object
+ // argument doesn't participate in overload resolution.
+ }
+
+ if (!CandidateSet.isNewCandidate(Function))
+ return;
+
+ // C++ [over.match.oper]p3:
+ // if no operand has a class type, only those non-member functions in the
+ // lookup set that have a first parameter of type T1 or "reference to
+ // (possibly cv-qualified) T1", when T1 is an enumeration type, or (if there
+ // is a right operand) a second parameter of type T2 or "reference to
+ // (possibly cv-qualified) T2", when T2 is an enumeration type, are
+ // candidate functions.
+ if (CandidateSet.getKind() == OverloadCandidateSet::CSK_Operator &&
+ !IsAcceptableNonMemberOperatorCandidate(Context, Function, Args))
+ return;
+
+ // C++11 [class.copy]p11: [DR1402]
+ // A defaulted move constructor that is defined as deleted is ignored by
+ // overload resolution.
+ CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(Function);
+ if (Constructor && Constructor->isDefaulted() && Constructor->isDeleted() &&
+ Constructor->isMoveConstructor())
+ return;
+
+ // Overload resolution is always an unevaluated context.
+ EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
+
+ // Add this candidate
+ OverloadCandidate &Candidate = CandidateSet.addCandidate(Args.size());
+ Candidate.FoundDecl = FoundDecl;
+ Candidate.Function = Function;
+ Candidate.Viable = true;
+ Candidate.IsSurrogate = false;
+ Candidate.IgnoreObjectArgument = false;
+ Candidate.ExplicitCallArguments = Args.size();
+
+ if (Constructor) {
+ // C++ [class.copy]p3:
+ // A member function template is never instantiated to perform the copy
+ // of a class object to an object of its class type.
+ QualType ClassType = Context.getTypeDeclType(Constructor->getParent());
+ if (Args.size() == 1 && Constructor->isSpecializationCopyingObject() &&
+ (Context.hasSameUnqualifiedType(ClassType, Args[0]->getType()) ||
+ IsDerivedFrom(Args[0]->getLocStart(), Args[0]->getType(),
+ ClassType))) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_illegal_constructor;
+ return;
+ }
+ }
+
+ unsigned NumParams = Proto->getNumParams();
+
+ // (C++ 13.3.2p2): A candidate function having fewer than m
+ // parameters is viable only if it has an ellipsis in its parameter
+ // list (8.3.5).
+ if (TooManyArguments(NumParams, Args.size(), PartialOverloading) &&
+ !Proto->isVariadic()) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_too_many_arguments;
+ return;
+ }
+
+ // (C++ 13.3.2p2): A candidate function having more than m parameters
+ // is viable only if the (m+1)st parameter has a default argument
+ // (8.3.6). For the purposes of overload resolution, the
+ // parameter list is truncated on the right, so that there are
+ // exactly m parameters.
+ unsigned MinRequiredArgs = Function->getMinRequiredArguments();
+ if (Args.size() < MinRequiredArgs && !PartialOverloading) {
+ // Not enough arguments.
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_too_few_arguments;
+ return;
+ }
+
+ // (CUDA B.1): Check for invalid calls between targets.
+ if (getLangOpts().CUDA)
+ if (const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext))
+ // Skip the check for callers that are implicit members, because in this
+ // case we may not yet know what the member's target is; the target is
+ // inferred for the member automatically, based on the bases and fields of
+ // the class.
+ if (!Caller->isImplicit() && CheckCUDATarget(Caller, Function)) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_target;
+ return;
+ }
+
+ // Determine the implicit conversion sequences for each of the
+ // arguments.
+ for (unsigned ArgIdx = 0; ArgIdx < Args.size(); ++ArgIdx) {
+ if (ArgIdx < NumParams) {
+ // (C++ 13.3.2p3): for F to be a viable function, there shall
+ // exist for each argument an implicit conversion sequence
+ // (13.3.3.1) that converts that argument to the corresponding
+ // parameter of F.
+ QualType ParamType = Proto->getParamType(ArgIdx);
+ Candidate.Conversions[ArgIdx]
+ = TryCopyInitialization(*this, Args[ArgIdx], ParamType,
+ SuppressUserConversions,
+ /*InOverloadResolution=*/true,
+ /*AllowObjCWritebackConversion=*/
+ getLangOpts().ObjCAutoRefCount,
+ AllowExplicit);
+ if (Candidate.Conversions[ArgIdx].isBad()) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_conversion;
+ return;
+ }
+ } else {
+ // (C++ 13.3.2p2): For the purposes of overload resolution, any
+ // argument for which there is no corresponding parameter is
+ // considered to ""match the ellipsis" (C+ 13.3.3.1.3).
+ Candidate.Conversions[ArgIdx].setEllipsis();
+ }
+ }
+
+ if (EnableIfAttr *FailedAttr = CheckEnableIf(Function, Args)) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_enable_if;
+ Candidate.DeductionFailure.Data = FailedAttr;
+ return;
+ }
+}
+
+ObjCMethodDecl *Sema::SelectBestMethod(Selector Sel, MultiExprArg Args,
+ bool IsInstance) {
+ SmallVector<ObjCMethodDecl*, 4> Methods;
+ if (!CollectMultipleMethodsInGlobalPool(Sel, Methods, IsInstance))
+ return nullptr;
+
+ for (unsigned b = 0, e = Methods.size(); b < e; b++) {
+ bool Match = true;
+ ObjCMethodDecl *Method = Methods[b];
+ unsigned NumNamedArgs = Sel.getNumArgs();
+ // Method might have more arguments than selector indicates. This is due
+ // to addition of c-style arguments in method.
+ if (Method->param_size() > NumNamedArgs)
+ NumNamedArgs = Method->param_size();
+ if (Args.size() < NumNamedArgs)
+ continue;
+
+ for (unsigned i = 0; i < NumNamedArgs; i++) {
+ // We can't do any type-checking on a type-dependent argument.
+ if (Args[i]->isTypeDependent()) {
+ Match = false;
+ break;
+ }
+
+ ParmVarDecl *param = Method->parameters()[i];
+ Expr *argExpr = Args[i];
+ assert(argExpr && "SelectBestMethod(): missing expression");
+
+ // Strip the unbridged-cast placeholder expression off unless it's
+ // a consumed argument.
+ if (argExpr->hasPlaceholderType(BuiltinType::ARCUnbridgedCast) &&
+ !param->hasAttr<CFConsumedAttr>())
+ argExpr = stripARCUnbridgedCast(argExpr);
+
+ // If the parameter is __unknown_anytype, move on to the next method.
+ if (param->getType() == Context.UnknownAnyTy) {
+ Match = false;
+ break;
+ }
+
+ ImplicitConversionSequence ConversionState
+ = TryCopyInitialization(*this, argExpr, param->getType(),
+ /*SuppressUserConversions*/false,
+ /*InOverloadResolution=*/true,
+ /*AllowObjCWritebackConversion=*/
+ getLangOpts().ObjCAutoRefCount,
+ /*AllowExplicit*/false);
+ if (ConversionState.isBad()) {
+ Match = false;
+ break;
+ }
+ }
+ // Promote additional arguments to variadic methods.
+ if (Match && Method->isVariadic()) {
+ for (unsigned i = NumNamedArgs, e = Args.size(); i < e; ++i) {
+ if (Args[i]->isTypeDependent()) {
+ Match = false;
+ break;
+ }
+ ExprResult Arg = DefaultVariadicArgumentPromotion(Args[i], VariadicMethod,
+ nullptr);
+ if (Arg.isInvalid()) {
+ Match = false;
+ break;
+ }
+ }
+ } else {
+ // Check for extra arguments to non-variadic methods.
+ if (Args.size() != NumNamedArgs)
+ Match = false;
+ else if (Match && NumNamedArgs == 0 && Methods.size() > 1) {
+ // Special case when selectors have no argument. In this case, select
+ // one with the most general result type of 'id'.
+ for (unsigned b = 0, e = Methods.size(); b < e; b++) {
+ QualType ReturnT = Methods[b]->getReturnType();
+ if (ReturnT->isObjCIdType())
+ return Methods[b];
+ }
+ }
+ }
+
+ if (Match)
+ return Method;
+ }
+ return nullptr;
+}
+
+// specific_attr_iterator iterates over enable_if attributes in reverse, and
+// enable_if is order-sensitive. As a result, we need to reverse things
+// sometimes. Size of 4 elements is arbitrary.
+static SmallVector<EnableIfAttr *, 4>
+getOrderedEnableIfAttrs(const FunctionDecl *Function) {
+ SmallVector<EnableIfAttr *, 4> Result;
+ if (!Function->hasAttrs())
+ return Result;
+
+ const auto &FuncAttrs = Function->getAttrs();
+ for (Attr *Attr : FuncAttrs)
+ if (auto *EnableIf = dyn_cast<EnableIfAttr>(Attr))
+ Result.push_back(EnableIf);
+
+ std::reverse(Result.begin(), Result.end());
+ return Result;
+}
+
+EnableIfAttr *Sema::CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
+ bool MissingImplicitThis) {
+ auto EnableIfAttrs = getOrderedEnableIfAttrs(Function);
+ if (EnableIfAttrs.empty())
+ return nullptr;
+
+ SFINAETrap Trap(*this);
+ SmallVector<Expr *, 16> ConvertedArgs;
+ bool InitializationFailed = false;
+ bool ContainsValueDependentExpr = false;
+
+ // Convert the arguments.
+ for (unsigned i = 0, e = Args.size(); i != e; ++i) {
+ if (i == 0 && !MissingImplicitThis && isa<CXXMethodDecl>(Function) &&
+ !cast<CXXMethodDecl>(Function)->isStatic() &&
+ !isa<CXXConstructorDecl>(Function)) {
+ CXXMethodDecl *Method = cast<CXXMethodDecl>(Function);
+ ExprResult R =
+ PerformObjectArgumentInitialization(Args[0], /*Qualifier=*/nullptr,
+ Method, Method);
+ if (R.isInvalid()) {
+ InitializationFailed = true;
+ break;
+ }
+ ContainsValueDependentExpr |= R.get()->isValueDependent();
+ ConvertedArgs.push_back(R.get());
+ } else {
+ ExprResult R =
+ PerformCopyInitialization(InitializedEntity::InitializeParameter(
+ Context,
+ Function->getParamDecl(i)),
+ SourceLocation(),
+ Args[i]);
+ if (R.isInvalid()) {
+ InitializationFailed = true;
+ break;
+ }
+ ContainsValueDependentExpr |= R.get()->isValueDependent();
+ ConvertedArgs.push_back(R.get());
+ }
+ }
+
+ if (InitializationFailed || Trap.hasErrorOccurred())
+ return EnableIfAttrs[0];
+
+ // Push default arguments if needed.
+ if (!Function->isVariadic() && Args.size() < Function->getNumParams()) {
+ for (unsigned i = Args.size(), e = Function->getNumParams(); i != e; ++i) {
+ ParmVarDecl *P = Function->getParamDecl(i);
+ ExprResult R = PerformCopyInitialization(
+ InitializedEntity::InitializeParameter(Context,
+ Function->getParamDecl(i)),
+ SourceLocation(),
+ P->hasUninstantiatedDefaultArg() ? P->getUninstantiatedDefaultArg()
+ : P->getDefaultArg());
+ if (R.isInvalid()) {
+ InitializationFailed = true;
+ break;
+ }
+ ContainsValueDependentExpr |= R.get()->isValueDependent();
+ ConvertedArgs.push_back(R.get());
+ }
+
+ if (InitializationFailed || Trap.hasErrorOccurred())
+ return EnableIfAttrs[0];
+ }
+
+ for (auto *EIA : EnableIfAttrs) {
+ APValue Result;
+ if (EIA->getCond()->isValueDependent()) {
+ // Don't even try now, we'll examine it after instantiation.
+ continue;
+ }
+
+ if (!EIA->getCond()->EvaluateWithSubstitution(
+ Result, Context, Function, llvm::makeArrayRef(ConvertedArgs))) {
+ if (!ContainsValueDependentExpr)
+ return EIA;
+ } else if (!Result.isInt() || !Result.getInt().getBoolValue()) {
+ return EIA;
+ }
+ }
+ return nullptr;
+}
+
+/// \brief Add all of the function declarations in the given function set to
+/// the overload candidate set.
+void Sema::AddFunctionCandidates(const UnresolvedSetImpl &Fns,
+ ArrayRef<Expr *> Args,
+ OverloadCandidateSet& CandidateSet,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ bool SuppressUserConversions,
+ bool PartialOverloading) {
+ for (UnresolvedSetIterator F = Fns.begin(), E = Fns.end(); F != E; ++F) {
+ NamedDecl *D = F.getDecl()->getUnderlyingDecl();
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (isa<CXXMethodDecl>(FD) && !cast<CXXMethodDecl>(FD)->isStatic())
+ AddMethodCandidate(cast<CXXMethodDecl>(FD), F.getPair(),
+ cast<CXXMethodDecl>(FD)->getParent(),
+ Args[0]->getType(), Args[0]->Classify(Context),
+ Args.slice(1), CandidateSet,
+ SuppressUserConversions, PartialOverloading);
+ else
+ AddOverloadCandidate(FD, F.getPair(), Args, CandidateSet,
+ SuppressUserConversions, PartialOverloading);
+ } else {
+ FunctionTemplateDecl *FunTmpl = cast<FunctionTemplateDecl>(D);
+ if (isa<CXXMethodDecl>(FunTmpl->getTemplatedDecl()) &&
+ !cast<CXXMethodDecl>(FunTmpl->getTemplatedDecl())->isStatic())
+ AddMethodTemplateCandidate(FunTmpl, F.getPair(),
+ cast<CXXRecordDecl>(FunTmpl->getDeclContext()),
+ ExplicitTemplateArgs,
+ Args[0]->getType(),
+ Args[0]->Classify(Context), Args.slice(1),
+ CandidateSet, SuppressUserConversions,
+ PartialOverloading);
+ else
+ AddTemplateOverloadCandidate(FunTmpl, F.getPair(),
+ ExplicitTemplateArgs, Args,
+ CandidateSet, SuppressUserConversions,
+ PartialOverloading);
+ }
+ }
+}
+
+/// AddMethodCandidate - Adds a named decl (which is some kind of
+/// method) as a method candidate to the given overload set.
+void Sema::AddMethodCandidate(DeclAccessPair FoundDecl,
+ QualType ObjectType,
+ Expr::Classification ObjectClassification,
+ ArrayRef<Expr *> Args,
+ OverloadCandidateSet& CandidateSet,
+ bool SuppressUserConversions) {
+ NamedDecl *Decl = FoundDecl.getDecl();
+ CXXRecordDecl *ActingContext = cast<CXXRecordDecl>(Decl->getDeclContext());
+
+ if (isa<UsingShadowDecl>(Decl))
+ Decl = cast<UsingShadowDecl>(Decl)->getTargetDecl();
+
+ if (FunctionTemplateDecl *TD = dyn_cast<FunctionTemplateDecl>(Decl)) {
+ assert(isa<CXXMethodDecl>(TD->getTemplatedDecl()) &&
+ "Expected a member function template");
+ AddMethodTemplateCandidate(TD, FoundDecl, ActingContext,
+ /*ExplicitArgs*/ nullptr,
+ ObjectType, ObjectClassification,
+ Args, CandidateSet,
+ SuppressUserConversions);
+ } else {
+ AddMethodCandidate(cast<CXXMethodDecl>(Decl), FoundDecl, ActingContext,
+ ObjectType, ObjectClassification,
+ Args,
+ CandidateSet, SuppressUserConversions);
+ }
+}
+
+/// AddMethodCandidate - Adds the given C++ member function to the set
+/// of candidate functions, using the given function call arguments
+/// and the object argument (@c Object). For example, in a call
+/// @c o.f(a1,a2), @c Object will contain @c o and @c Args will contain
+/// both @c a1 and @c a2. If @p SuppressUserConversions, then don't
+/// allow user-defined conversions via constructors or conversion
+/// operators.
+void
+Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
+ CXXRecordDecl *ActingContext, QualType ObjectType,
+ Expr::Classification ObjectClassification,
+ ArrayRef<Expr *> Args,
+ OverloadCandidateSet &CandidateSet,
+ bool SuppressUserConversions,
+ bool PartialOverloading) {
+ const FunctionProtoType *Proto
+ = dyn_cast<FunctionProtoType>(Method->getType()->getAs<FunctionType>());
+ assert(Proto && "Methods without a prototype cannot be overloaded");
+ assert(!isa<CXXConstructorDecl>(Method) &&
+ "Use AddOverloadCandidate for constructors");
+
+ if (!CandidateSet.isNewCandidate(Method))
+ return;
+
+ // C++11 [class.copy]p23: [DR1402]
+ // A defaulted move assignment operator that is defined as deleted is
+ // ignored by overload resolution.
+ if (Method->isDefaulted() && Method->isDeleted() &&
+ Method->isMoveAssignmentOperator())
+ return;
+
+ // Overload resolution is always an unevaluated context.
+ EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
+
+ // Add this candidate
+ OverloadCandidate &Candidate = CandidateSet.addCandidate(Args.size() + 1);
+ Candidate.FoundDecl = FoundDecl;
+ Candidate.Function = Method;
+ Candidate.IsSurrogate = false;
+ Candidate.IgnoreObjectArgument = false;
+ Candidate.ExplicitCallArguments = Args.size();
+
+ unsigned NumParams = Proto->getNumParams();
+
+ // (C++ 13.3.2p2): A candidate function having fewer than m
+ // parameters is viable only if it has an ellipsis in its parameter
+ // list (8.3.5).
+ if (TooManyArguments(NumParams, Args.size(), PartialOverloading) &&
+ !Proto->isVariadic()) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_too_many_arguments;
+ return;
+ }
+
+ // (C++ 13.3.2p2): A candidate function having more than m parameters
+ // is viable only if the (m+1)st parameter has a default argument
+ // (8.3.6). For the purposes of overload resolution, the
+ // parameter list is truncated on the right, so that there are
+ // exactly m parameters.
+ unsigned MinRequiredArgs = Method->getMinRequiredArguments();
+ if (Args.size() < MinRequiredArgs && !PartialOverloading) {
+ // Not enough arguments.
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_too_few_arguments;
+ return;
+ }
+
+ Candidate.Viable = true;
+
+ if (Method->isStatic() || ObjectType.isNull())
+ // The implicit object argument is ignored.
+ Candidate.IgnoreObjectArgument = true;
+ else {
+ // Determine the implicit conversion sequence for the object
+ // parameter.
+ Candidate.Conversions[0] = TryObjectArgumentInitialization(
+ *this, CandidateSet.getLocation(), ObjectType, ObjectClassification,
+ Method, ActingContext);
+ if (Candidate.Conversions[0].isBad()) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_conversion;
+ return;
+ }
+ }
+
+ // (CUDA B.1): Check for invalid calls between targets.
+ if (getLangOpts().CUDA)
+ if (const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext))
+ if (CheckCUDATarget(Caller, Method)) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_target;
+ return;
+ }
+
+ // Determine the implicit conversion sequences for each of the
+ // arguments.
+ for (unsigned ArgIdx = 0; ArgIdx < Args.size(); ++ArgIdx) {
+ if (ArgIdx < NumParams) {
+ // (C++ 13.3.2p3): for F to be a viable function, there shall
+ // exist for each argument an implicit conversion sequence
+ // (13.3.3.1) that converts that argument to the corresponding
+ // parameter of F.
+ QualType ParamType = Proto->getParamType(ArgIdx);
+ Candidate.Conversions[ArgIdx + 1]
+ = TryCopyInitialization(*this, Args[ArgIdx], ParamType,
+ SuppressUserConversions,
+ /*InOverloadResolution=*/true,
+ /*AllowObjCWritebackConversion=*/
+ getLangOpts().ObjCAutoRefCount);
+ if (Candidate.Conversions[ArgIdx + 1].isBad()) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_conversion;
+ return;
+ }
+ } else {
+ // (C++ 13.3.2p2): For the purposes of overload resolution, any
+ // argument for which there is no corresponding parameter is
+ // considered to "match the ellipsis" (C+ 13.3.3.1.3).
+ Candidate.Conversions[ArgIdx + 1].setEllipsis();
+ }
+ }
+
+ if (EnableIfAttr *FailedAttr = CheckEnableIf(Method, Args, true)) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_enable_if;
+ Candidate.DeductionFailure.Data = FailedAttr;
+ return;
+ }
+}
+
+/// \brief Add a C++ member function template as a candidate to the candidate
+/// set, using template argument deduction to produce an appropriate member
+/// function template specialization.
+void
+Sema::AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
+ DeclAccessPair FoundDecl,
+ CXXRecordDecl *ActingContext,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ QualType ObjectType,
+ Expr::Classification ObjectClassification,
+ ArrayRef<Expr *> Args,
+ OverloadCandidateSet& CandidateSet,
+ bool SuppressUserConversions,
+ bool PartialOverloading) {
+ if (!CandidateSet.isNewCandidate(MethodTmpl))
+ return;
+
+ // C++ [over.match.funcs]p7:
+ // In each case where a candidate is a function template, candidate
+ // function template specializations are generated using template argument
+ // deduction (14.8.3, 14.8.2). Those candidates are then handled as
+ // candidate functions in the usual way.113) A given name can refer to one
+ // or more function templates and also to a set of overloaded non-template
+ // functions. In such a case, the candidate functions generated from each
+ // function template are combined with the set of non-template candidate
+ // functions.
+ TemplateDeductionInfo Info(CandidateSet.getLocation());
+ FunctionDecl *Specialization = nullptr;
+ if (TemplateDeductionResult Result
+ = DeduceTemplateArguments(MethodTmpl, ExplicitTemplateArgs, Args,
+ Specialization, Info, PartialOverloading)) {
+ OverloadCandidate &Candidate = CandidateSet.addCandidate();
+ Candidate.FoundDecl = FoundDecl;
+ Candidate.Function = MethodTmpl->getTemplatedDecl();
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_deduction;
+ Candidate.IsSurrogate = false;
+ Candidate.IgnoreObjectArgument = false;
+ Candidate.ExplicitCallArguments = Args.size();
+ Candidate.DeductionFailure = MakeDeductionFailureInfo(Context, Result,
+ Info);
+ return;
+ }
+
+ // Add the function template specialization produced by template argument
+ // deduction as a candidate.
+ assert(Specialization && "Missing member function template specialization?");
+ assert(isa<CXXMethodDecl>(Specialization) &&
+ "Specialization is not a member function?");
+ AddMethodCandidate(cast<CXXMethodDecl>(Specialization), FoundDecl,
+ ActingContext, ObjectType, ObjectClassification, Args,
+ CandidateSet, SuppressUserConversions, PartialOverloading);
+}
+
+/// \brief Add a C++ function template specialization as a candidate
+/// in the candidate set, using template argument deduction to produce
+/// an appropriate function template specialization.
+void
+Sema::AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
+ DeclAccessPair FoundDecl,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ ArrayRef<Expr *> Args,
+ OverloadCandidateSet& CandidateSet,
+ bool SuppressUserConversions,
+ bool PartialOverloading) {
+ if (!CandidateSet.isNewCandidate(FunctionTemplate))
+ return;
+
+ // C++ [over.match.funcs]p7:
+ // In each case where a candidate is a function template, candidate
+ // function template specializations are generated using template argument
+ // deduction (14.8.3, 14.8.2). Those candidates are then handled as
+ // candidate functions in the usual way.113) A given name can refer to one
+ // or more function templates and also to a set of overloaded non-template
+ // functions. In such a case, the candidate functions generated from each
+ // function template are combined with the set of non-template candidate
+ // functions.
+ TemplateDeductionInfo Info(CandidateSet.getLocation());
+ FunctionDecl *Specialization = nullptr;
+ if (TemplateDeductionResult Result
+ = DeduceTemplateArguments(FunctionTemplate, ExplicitTemplateArgs, Args,
+ Specialization, Info, PartialOverloading)) {
+ OverloadCandidate &Candidate = CandidateSet.addCandidate();
+ Candidate.FoundDecl = FoundDecl;
+ Candidate.Function = FunctionTemplate->getTemplatedDecl();
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_deduction;
+ Candidate.IsSurrogate = false;
+ Candidate.IgnoreObjectArgument = false;
+ Candidate.ExplicitCallArguments = Args.size();
+ Candidate.DeductionFailure = MakeDeductionFailureInfo(Context, Result,
+ Info);
+ return;
+ }
+
+ // Add the function template specialization produced by template argument
+ // deduction as a candidate.
+ assert(Specialization && "Missing function template specialization?");
+ AddOverloadCandidate(Specialization, FoundDecl, Args, CandidateSet,
+ SuppressUserConversions, PartialOverloading);
+}
+
+/// Determine whether this is an allowable conversion from the result
+/// of an explicit conversion operator to the expected type, per C++
+/// [over.match.conv]p1 and [over.match.ref]p1.
+///
+/// \param ConvType The return type of the conversion function.
+///
+/// \param ToType The type we are converting to.
+///
+/// \param AllowObjCPointerConversion Allow a conversion from one
+/// Objective-C pointer to another.
+///
+/// \returns true if the conversion is allowable, false otherwise.
+static bool isAllowableExplicitConversion(Sema &S,
+ QualType ConvType, QualType ToType,
+ bool AllowObjCPointerConversion) {
+ QualType ToNonRefType = ToType.getNonReferenceType();
+
+ // Easy case: the types are the same.
+ if (S.Context.hasSameUnqualifiedType(ConvType, ToNonRefType))
+ return true;
+
+ // Allow qualification conversions.
+ bool ObjCLifetimeConversion;
+ if (S.IsQualificationConversion(ConvType, ToNonRefType, /*CStyle*/false,
+ ObjCLifetimeConversion))
+ return true;
+
+ // If we're not allowed to consider Objective-C pointer conversions,
+ // we're done.
+ if (!AllowObjCPointerConversion)
+ return false;
+
+ // Is this an Objective-C pointer conversion?
+ bool IncompatibleObjC = false;
+ QualType ConvertedType;
+ return S.isObjCPointerConversion(ConvType, ToNonRefType, ConvertedType,
+ IncompatibleObjC);
+}
+
+/// AddConversionCandidate - Add a C++ conversion function as a
+/// candidate in the candidate set (C++ [over.match.conv],
+/// C++ [over.match.copy]). From is the expression we're converting from,
+/// and ToType is the type that we're eventually trying to convert to
+/// (which may or may not be the same type as the type that the
+/// conversion function produces).
+void
+Sema::AddConversionCandidate(CXXConversionDecl *Conversion,
+ DeclAccessPair FoundDecl,
+ CXXRecordDecl *ActingContext,
+ Expr *From, QualType ToType,
+ OverloadCandidateSet& CandidateSet,
+ bool AllowObjCConversionOnExplicit) {
+ assert(!Conversion->getDescribedFunctionTemplate() &&
+ "Conversion function templates use AddTemplateConversionCandidate");
+ QualType ConvType = Conversion->getConversionType().getNonReferenceType();
+ if (!CandidateSet.isNewCandidate(Conversion))
+ return;
+
+ // If the conversion function has an undeduced return type, trigger its
+ // deduction now.
+ if (getLangOpts().CPlusPlus14 && ConvType->isUndeducedType()) {
+ if (DeduceReturnType(Conversion, From->getExprLoc()))
+ return;
+ ConvType = Conversion->getConversionType().getNonReferenceType();
+ }
+
+ // Per C++ [over.match.conv]p1, [over.match.ref]p1, an explicit conversion
+ // operator is only a candidate if its return type is the target type or
+ // can be converted to the target type with a qualification conversion.
+ if (Conversion->isExplicit() &&
+ !isAllowableExplicitConversion(*this, ConvType, ToType,
+ AllowObjCConversionOnExplicit))
+ return;
+
+ // Overload resolution is always an unevaluated context.
+ EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
+
+ // Add this candidate
+ OverloadCandidate &Candidate = CandidateSet.addCandidate(1);
+ Candidate.FoundDecl = FoundDecl;
+ Candidate.Function = Conversion;
+ Candidate.IsSurrogate = false;
+ Candidate.IgnoreObjectArgument = false;
+ Candidate.FinalConversion.setAsIdentityConversion();
+ Candidate.FinalConversion.setFromType(ConvType);
+ Candidate.FinalConversion.setAllToTypes(ToType);
+ Candidate.Viable = true;
+ Candidate.ExplicitCallArguments = 1;
+
+ // C++ [over.match.funcs]p4:
+ // For conversion functions, the function is considered to be a member of
+ // the class of the implicit implied object argument for the purpose of
+ // defining the type of the implicit object parameter.
+ //
+ // Determine the implicit conversion sequence for the implicit
+ // object parameter.
+ QualType ImplicitParamType = From->getType();
+ if (const PointerType *FromPtrType = ImplicitParamType->getAs<PointerType>())
+ ImplicitParamType = FromPtrType->getPointeeType();
+ CXXRecordDecl *ConversionContext
+ = cast<CXXRecordDecl>(ImplicitParamType->getAs<RecordType>()->getDecl());
+
+ Candidate.Conversions[0] = TryObjectArgumentInitialization(
+ *this, CandidateSet.getLocation(), From->getType(),
+ From->Classify(Context), Conversion, ConversionContext);
+
+ if (Candidate.Conversions[0].isBad()) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_conversion;
+ return;
+ }
+
+ // We won't go through a user-defined type conversion function to convert a
+ // derived to base as such conversions are given Conversion Rank. They only
+ // go through a copy constructor. 13.3.3.1.2-p4 [over.ics.user]
+ QualType FromCanon
+ = Context.getCanonicalType(From->getType().getUnqualifiedType());
+ QualType ToCanon = Context.getCanonicalType(ToType).getUnqualifiedType();
+ if (FromCanon == ToCanon ||
+ IsDerivedFrom(CandidateSet.getLocation(), FromCanon, ToCanon)) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_trivial_conversion;
+ return;
+ }
+
+ // To determine what the conversion from the result of calling the
+ // conversion function to the type we're eventually trying to
+ // convert to (ToType), we need to synthesize a call to the
+ // conversion function and attempt copy initialization from it. This
+ // makes sure that we get the right semantics with respect to
+ // lvalues/rvalues and the type. Fortunately, we can allocate this
+ // call on the stack and we don't need its arguments to be
+ // well-formed.
+ DeclRefExpr ConversionRef(Conversion, false, Conversion->getType(),
+ VK_LValue, From->getLocStart());
+ ImplicitCastExpr ConversionFn(ImplicitCastExpr::OnStack,
+ Context.getPointerType(Conversion->getType()),
+ CK_FunctionToPointerDecay,
+ &ConversionRef, VK_RValue);
+
+ QualType ConversionType = Conversion->getConversionType();
+ if (!isCompleteType(From->getLocStart(), ConversionType)) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_final_conversion;
+ return;
+ }
+
+ ExprValueKind VK = Expr::getValueKindForType(ConversionType);
+
+ // Note that it is safe to allocate CallExpr on the stack here because
+ // there are 0 arguments (i.e., nothing is allocated using ASTContext's
+ // allocator).
+ QualType CallResultType = ConversionType.getNonLValueExprType(Context);
+ CallExpr Call(Context, &ConversionFn, None, CallResultType, VK,
+ From->getLocStart());
+ ImplicitConversionSequence ICS =
+ TryCopyInitialization(*this, &Call, ToType,
+ /*SuppressUserConversions=*/true,
+ /*InOverloadResolution=*/false,
+ /*AllowObjCWritebackConversion=*/false);
+
+ switch (ICS.getKind()) {
+ case ImplicitConversionSequence::StandardConversion:
+ Candidate.FinalConversion = ICS.Standard;
+
+ // C++ [over.ics.user]p3:
+ // If the user-defined conversion is specified by a specialization of a
+ // conversion function template, the second standard conversion sequence
+ // shall have exact match rank.
+ if (Conversion->getPrimaryTemplate() &&
+ GetConversionRank(ICS.Standard.Second) != ICR_Exact_Match) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_final_conversion_not_exact;
+ return;
+ }
+
+ // C++0x [dcl.init.ref]p5:
+ // In the second case, if the reference is an rvalue reference and
+ // the second standard conversion sequence of the user-defined
+ // conversion sequence includes an lvalue-to-rvalue conversion, the
+ // program is ill-formed.
+ if (ToType->isRValueReferenceType() &&
+ ICS.Standard.First == ICK_Lvalue_To_Rvalue) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_final_conversion;
+ return;
+ }
+ break;
+
+ case ImplicitConversionSequence::BadConversion:
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_final_conversion;
+ return;
+
+ default:
+ llvm_unreachable(
+ "Can only end up with a standard conversion sequence or failure");
+ }
+
+ if (EnableIfAttr *FailedAttr = CheckEnableIf(Conversion, None)) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_enable_if;
+ Candidate.DeductionFailure.Data = FailedAttr;
+ return;
+ }
+}
+
+/// \brief Adds a conversion function template specialization
+/// candidate to the overload set, using template argument deduction
+/// to deduce the template arguments of the conversion function
+/// template from the type that we are converting to (C++
+/// [temp.deduct.conv]).
+void
+Sema::AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate,
+ DeclAccessPair FoundDecl,
+ CXXRecordDecl *ActingDC,
+ Expr *From, QualType ToType,
+ OverloadCandidateSet &CandidateSet,
+ bool AllowObjCConversionOnExplicit) {
+ assert(isa<CXXConversionDecl>(FunctionTemplate->getTemplatedDecl()) &&
+ "Only conversion function templates permitted here");
+
+ if (!CandidateSet.isNewCandidate(FunctionTemplate))
+ return;
+
+ TemplateDeductionInfo Info(CandidateSet.getLocation());
+ CXXConversionDecl *Specialization = nullptr;
+ if (TemplateDeductionResult Result
+ = DeduceTemplateArguments(FunctionTemplate, ToType,
+ Specialization, Info)) {
+ OverloadCandidate &Candidate = CandidateSet.addCandidate();
+ Candidate.FoundDecl = FoundDecl;
+ Candidate.Function = FunctionTemplate->getTemplatedDecl();
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_deduction;
+ Candidate.IsSurrogate = false;
+ Candidate.IgnoreObjectArgument = false;
+ Candidate.ExplicitCallArguments = 1;
+ Candidate.DeductionFailure = MakeDeductionFailureInfo(Context, Result,
+ Info);
+ return;
+ }
+
+ // Add the conversion function template specialization produced by
+ // template argument deduction as a candidate.
+ assert(Specialization && "Missing function template specialization?");
+ AddConversionCandidate(Specialization, FoundDecl, ActingDC, From, ToType,
+ CandidateSet, AllowObjCConversionOnExplicit);
+}
+
+/// AddSurrogateCandidate - Adds a "surrogate" candidate function that
+/// converts the given @c Object to a function pointer via the
+/// conversion function @c Conversion, and then attempts to call it
+/// with the given arguments (C++ [over.call.object]p2-4). Proto is
+/// the type of function that we'll eventually be calling.
+void Sema::AddSurrogateCandidate(CXXConversionDecl *Conversion,
+ DeclAccessPair FoundDecl,
+ CXXRecordDecl *ActingContext,
+ const FunctionProtoType *Proto,
+ Expr *Object,
+ ArrayRef<Expr *> Args,
+ OverloadCandidateSet& CandidateSet) {
+ if (!CandidateSet.isNewCandidate(Conversion))
+ return;
+
+ // Overload resolution is always an unevaluated context.
+ EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
+
+ OverloadCandidate &Candidate = CandidateSet.addCandidate(Args.size() + 1);
+ Candidate.FoundDecl = FoundDecl;
+ Candidate.Function = nullptr;
+ Candidate.Surrogate = Conversion;
+ Candidate.Viable = true;
+ Candidate.IsSurrogate = true;
+ Candidate.IgnoreObjectArgument = false;
+ Candidate.ExplicitCallArguments = Args.size();
+
+ // Determine the implicit conversion sequence for the implicit
+ // object parameter.
+ ImplicitConversionSequence ObjectInit = TryObjectArgumentInitialization(
+ *this, CandidateSet.getLocation(), Object->getType(),
+ Object->Classify(Context), Conversion, ActingContext);
+ if (ObjectInit.isBad()) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_conversion;
+ Candidate.Conversions[0] = ObjectInit;
+ return;
+ }
+
+ // The first conversion is actually a user-defined conversion whose
+ // first conversion is ObjectInit's standard conversion (which is
+ // effectively a reference binding). Record it as such.
+ Candidate.Conversions[0].setUserDefined();
+ Candidate.Conversions[0].UserDefined.Before = ObjectInit.Standard;
+ Candidate.Conversions[0].UserDefined.EllipsisConversion = false;
+ Candidate.Conversions[0].UserDefined.HadMultipleCandidates = false;
+ Candidate.Conversions[0].UserDefined.ConversionFunction = Conversion;
+ Candidate.Conversions[0].UserDefined.FoundConversionFunction = FoundDecl;
+ Candidate.Conversions[0].UserDefined.After
+ = Candidate.Conversions[0].UserDefined.Before;
+ Candidate.Conversions[0].UserDefined.After.setAsIdentityConversion();
+
+ // Find the
+ unsigned NumParams = Proto->getNumParams();
+
+ // (C++ 13.3.2p2): A candidate function having fewer than m
+ // parameters is viable only if it has an ellipsis in its parameter
+ // list (8.3.5).
+ if (Args.size() > NumParams && !Proto->isVariadic()) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_too_many_arguments;
+ return;
+ }
+
+ // Function types don't have any default arguments, so just check if
+ // we have enough arguments.
+ if (Args.size() < NumParams) {
+ // Not enough arguments.
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_too_few_arguments;
+ return;
+ }
+
+ // Determine the implicit conversion sequences for each of the
+ // arguments.
+ for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) {
+ if (ArgIdx < NumParams) {
+ // (C++ 13.3.2p3): for F to be a viable function, there shall
+ // exist for each argument an implicit conversion sequence
+ // (13.3.3.1) that converts that argument to the corresponding
+ // parameter of F.
+ QualType ParamType = Proto->getParamType(ArgIdx);
+ Candidate.Conversions[ArgIdx + 1]
+ = TryCopyInitialization(*this, Args[ArgIdx], ParamType,
+ /*SuppressUserConversions=*/false,
+ /*InOverloadResolution=*/false,
+ /*AllowObjCWritebackConversion=*/
+ getLangOpts().ObjCAutoRefCount);
+ if (Candidate.Conversions[ArgIdx + 1].isBad()) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_conversion;
+ return;
+ }
+ } else {
+ // (C++ 13.3.2p2): For the purposes of overload resolution, any
+ // argument for which there is no corresponding parameter is
+ // considered to ""match the ellipsis" (C+ 13.3.3.1.3).
+ Candidate.Conversions[ArgIdx + 1].setEllipsis();
+ }
+ }
+
+ if (EnableIfAttr *FailedAttr = CheckEnableIf(Conversion, None)) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_enable_if;
+ Candidate.DeductionFailure.Data = FailedAttr;
+ return;
+ }
+}
+
+/// \brief Add overload candidates for overloaded operators that are
+/// member functions.
+///
+/// Add the overloaded operator candidates that are member functions
+/// for the operator Op that was used in an operator expression such
+/// as "x Op y". , Args/NumArgs provides the operator arguments, and
+/// CandidateSet will store the added overload candidates. (C++
+/// [over.match.oper]).
+void Sema::AddMemberOperatorCandidates(OverloadedOperatorKind Op,
+ SourceLocation OpLoc,
+ ArrayRef<Expr *> Args,
+ OverloadCandidateSet& CandidateSet,
+ SourceRange OpRange) {
+ DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(Op);
+
+ // C++ [over.match.oper]p3:
+ // For a unary operator @ with an operand of a type whose
+ // cv-unqualified version is T1, and for a binary operator @ with
+ // a left operand of a type whose cv-unqualified version is T1 and
+ // a right operand of a type whose cv-unqualified version is T2,
+ // three sets of candidate functions, designated member
+ // candidates, non-member candidates and built-in candidates, are
+ // constructed as follows:
+ QualType T1 = Args[0]->getType();
+
+ // -- If T1 is a complete class type or a class currently being
+ // defined, the set of member candidates is the result of the
+ // qualified lookup of T1::operator@ (13.3.1.1.1); otherwise,
+ // the set of member candidates is empty.
+ if (const RecordType *T1Rec = T1->getAs<RecordType>()) {
+ // Complete the type if it can be completed.
+ if (!isCompleteType(OpLoc, T1) && !T1Rec->isBeingDefined())
+ return;
+ // If the type is neither complete nor being defined, bail out now.
+ if (!T1Rec->getDecl()->getDefinition())
+ return;
+
+ LookupResult Operators(*this, OpName, OpLoc, LookupOrdinaryName);
+ LookupQualifiedName(Operators, T1Rec->getDecl());
+ Operators.suppressDiagnostics();
+
+ for (LookupResult::iterator Oper = Operators.begin(),
+ OperEnd = Operators.end();
+ Oper != OperEnd;
+ ++Oper)
+ AddMethodCandidate(Oper.getPair(), Args[0]->getType(),
+ Args[0]->Classify(Context),
+ Args.slice(1),
+ CandidateSet,
+ /* SuppressUserConversions = */ false);
+ }
+}
+
+/// AddBuiltinCandidate - Add a candidate for a built-in
+/// operator. ResultTy and ParamTys are the result and parameter types
+/// of the built-in candidate, respectively. Args and NumArgs are the
+/// arguments being passed to the candidate. IsAssignmentOperator
+/// should be true when this built-in candidate is an assignment
+/// operator. NumContextualBoolArguments is the number of arguments
+/// (at the beginning of the argument list) that will be contextually
+/// converted to bool.
+void Sema::AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys,
+ ArrayRef<Expr *> Args,
+ OverloadCandidateSet& CandidateSet,
+ bool IsAssignmentOperator,
+ unsigned NumContextualBoolArguments) {
+ // Overload resolution is always an unevaluated context.
+ EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
+
+ // Add this candidate
+ OverloadCandidate &Candidate = CandidateSet.addCandidate(Args.size());
+ Candidate.FoundDecl = DeclAccessPair::make(nullptr, AS_none);
+ Candidate.Function = nullptr;
+ Candidate.IsSurrogate = false;
+ Candidate.IgnoreObjectArgument = false;
+ Candidate.BuiltinTypes.ResultTy = ResultTy;
+ for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx)
+ Candidate.BuiltinTypes.ParamTypes[ArgIdx] = ParamTys[ArgIdx];
+
+ // Determine the implicit conversion sequences for each of the
+ // arguments.
+ Candidate.Viable = true;
+ Candidate.ExplicitCallArguments = Args.size();
+ for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) {
+ // C++ [over.match.oper]p4:
+ // For the built-in assignment operators, conversions of the
+ // left operand are restricted as follows:
+ // -- no temporaries are introduced to hold the left operand, and
+ // -- no user-defined conversions are applied to the left
+ // operand to achieve a type match with the left-most
+ // parameter of a built-in candidate.
+ //
+ // We block these conversions by turning off user-defined
+ // conversions, since that is the only way that initialization of
+ // a reference to a non-class type can occur from something that
+ // is not of the same type.
+ if (ArgIdx < NumContextualBoolArguments) {
+ assert(ParamTys[ArgIdx] == Context.BoolTy &&
+ "Contextual conversion to bool requires bool type");
+ Candidate.Conversions[ArgIdx]
+ = TryContextuallyConvertToBool(*this, Args[ArgIdx]);
+ } else {
+ Candidate.Conversions[ArgIdx]
+ = TryCopyInitialization(*this, Args[ArgIdx], ParamTys[ArgIdx],
+ ArgIdx == 0 && IsAssignmentOperator,
+ /*InOverloadResolution=*/false,
+ /*AllowObjCWritebackConversion=*/
+ getLangOpts().ObjCAutoRefCount);
+ }
+ if (Candidate.Conversions[ArgIdx].isBad()) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_conversion;
+ break;
+ }
+ }
+}
+
+namespace {
+
+/// BuiltinCandidateTypeSet - A set of types that will be used for the
+/// candidate operator functions for built-in operators (C++
+/// [over.built]). The types are separated into pointer types and
+/// enumeration types.
+class BuiltinCandidateTypeSet {
+ /// TypeSet - A set of types.
+ typedef llvm::SmallPtrSet<QualType, 8> TypeSet;
+
+ /// PointerTypes - The set of pointer types that will be used in the
+ /// built-in candidates.
+ TypeSet PointerTypes;
+
+ /// MemberPointerTypes - The set of member pointer types that will be
+ /// used in the built-in candidates.
+ TypeSet MemberPointerTypes;
+
+ /// EnumerationTypes - The set of enumeration types that will be
+ /// used in the built-in candidates.
+ TypeSet EnumerationTypes;
+
+ /// \brief The set of vector types that will be used in the built-in
+ /// candidates.
+ TypeSet VectorTypes;
+
+ /// \brief A flag indicating non-record types are viable candidates
+ bool HasNonRecordTypes;
+
+ /// \brief A flag indicating whether either arithmetic or enumeration types
+ /// were present in the candidate set.
+ bool HasArithmeticOrEnumeralTypes;
+
+ /// \brief A flag indicating whether the nullptr type was present in the
+ /// candidate set.
+ bool HasNullPtrType;
+
+ /// Sema - The semantic analysis instance where we are building the
+ /// candidate type set.
+ Sema &SemaRef;
+
+ /// Context - The AST context in which we will build the type sets.
+ ASTContext &Context;
+
+ bool AddPointerWithMoreQualifiedTypeVariants(QualType Ty,
+ const Qualifiers &VisibleQuals);
+ bool AddMemberPointerWithMoreQualifiedTypeVariants(QualType Ty);
+
+public:
+ /// iterator - Iterates through the types that are part of the set.
+ typedef TypeSet::iterator iterator;
+
+ BuiltinCandidateTypeSet(Sema &SemaRef)
+ : HasNonRecordTypes(false),
+ HasArithmeticOrEnumeralTypes(false),
+ HasNullPtrType(false),
+ SemaRef(SemaRef),
+ Context(SemaRef.Context) { }
+
+ void AddTypesConvertedFrom(QualType Ty,
+ SourceLocation Loc,
+ bool AllowUserConversions,
+ bool AllowExplicitConversions,
+ const Qualifiers &VisibleTypeConversionsQuals);
+
+ /// pointer_begin - First pointer type found;
+ iterator pointer_begin() { return PointerTypes.begin(); }
+
+ /// pointer_end - Past the last pointer type found;
+ iterator pointer_end() { return PointerTypes.end(); }
+
+ /// member_pointer_begin - First member pointer type found;
+ iterator member_pointer_begin() { return MemberPointerTypes.begin(); }
+
+ /// member_pointer_end - Past the last member pointer type found;
+ iterator member_pointer_end() { return MemberPointerTypes.end(); }
+
+ /// enumeration_begin - First enumeration type found;
+ iterator enumeration_begin() { return EnumerationTypes.begin(); }
+
+ /// enumeration_end - Past the last enumeration type found;
+ iterator enumeration_end() { return EnumerationTypes.end(); }
+
+ iterator vector_begin() { return VectorTypes.begin(); }
+ iterator vector_end() { return VectorTypes.end(); }
+
+ bool hasNonRecordTypes() { return HasNonRecordTypes; }
+ bool hasArithmeticOrEnumeralTypes() { return HasArithmeticOrEnumeralTypes; }
+ bool hasNullPtrType() const { return HasNullPtrType; }
+};
+
+} // end anonymous namespace
+
+/// AddPointerWithMoreQualifiedTypeVariants - Add the pointer type @p Ty to
+/// the set of pointer types along with any more-qualified variants of
+/// that type. For example, if @p Ty is "int const *", this routine
+/// will add "int const *", "int const volatile *", "int const
+/// restrict *", and "int const volatile restrict *" to the set of
+/// pointer types. Returns true if the add of @p Ty itself succeeded,
+/// false otherwise.
+///
+/// FIXME: what to do about extended qualifiers?
+bool
+BuiltinCandidateTypeSet::AddPointerWithMoreQualifiedTypeVariants(QualType Ty,
+ const Qualifiers &VisibleQuals) {
+
+ // Insert this type.
+ if (!PointerTypes.insert(Ty).second)
+ return false;
+
+ QualType PointeeTy;
+ const PointerType *PointerTy = Ty->getAs<PointerType>();
+ bool buildObjCPtr = false;
+ if (!PointerTy) {
+ const ObjCObjectPointerType *PTy = Ty->castAs<ObjCObjectPointerType>();
+ PointeeTy = PTy->getPointeeType();
+ buildObjCPtr = true;
+ } else {
+ PointeeTy = PointerTy->getPointeeType();
+ }
+
+ // Don't add qualified variants of arrays. For one, they're not allowed
+ // (the qualifier would sink to the element type), and for another, the
+ // only overload situation where it matters is subscript or pointer +- int,
+ // and those shouldn't have qualifier variants anyway.
+ if (PointeeTy->isArrayType())
+ return true;
+
+ unsigned BaseCVR = PointeeTy.getCVRQualifiers();
+ bool hasVolatile = VisibleQuals.hasVolatile();
+ bool hasRestrict = VisibleQuals.hasRestrict();
+
+ // Iterate through all strict supersets of BaseCVR.
+ for (unsigned CVR = BaseCVR+1; CVR <= Qualifiers::CVRMask; ++CVR) {
+ if ((CVR | BaseCVR) != CVR) continue;
+ // Skip over volatile if no volatile found anywhere in the types.
+ if ((CVR & Qualifiers::Volatile) && !hasVolatile) continue;
+
+ // Skip over restrict if no restrict found anywhere in the types, or if
+ // the type cannot be restrict-qualified.
+ if ((CVR & Qualifiers::Restrict) &&
+ (!hasRestrict ||
+ (!(PointeeTy->isAnyPointerType() || PointeeTy->isReferenceType()))))
+ continue;
+
+ // Build qualified pointee type.
+ QualType QPointeeTy = Context.getCVRQualifiedType(PointeeTy, CVR);
+
+ // Build qualified pointer type.
+ QualType QPointerTy;
+ if (!buildObjCPtr)
+ QPointerTy = Context.getPointerType(QPointeeTy);
+ else
+ QPointerTy = Context.getObjCObjectPointerType(QPointeeTy);
+
+ // Insert qualified pointer type.
+ PointerTypes.insert(QPointerTy);
+ }
+
+ return true;
+}
+
+/// AddMemberPointerWithMoreQualifiedTypeVariants - Add the pointer type @p Ty
+/// to the set of pointer types along with any more-qualified variants of
+/// that type. For example, if @p Ty is "int const *", this routine
+/// will add "int const *", "int const volatile *", "int const
+/// restrict *", and "int const volatile restrict *" to the set of
+/// pointer types. Returns true if the add of @p Ty itself succeeded,
+/// false otherwise.
+///
+/// FIXME: what to do about extended qualifiers?
+bool
+BuiltinCandidateTypeSet::AddMemberPointerWithMoreQualifiedTypeVariants(
+ QualType Ty) {
+ // Insert this type.
+ if (!MemberPointerTypes.insert(Ty).second)
+ return false;
+
+ const MemberPointerType *PointerTy = Ty->getAs<MemberPointerType>();
+ assert(PointerTy && "type was not a member pointer type!");
+
+ QualType PointeeTy = PointerTy->getPointeeType();
+ // Don't add qualified variants of arrays. For one, they're not allowed
+ // (the qualifier would sink to the element type), and for another, the
+ // only overload situation where it matters is subscript or pointer +- int,
+ // and those shouldn't have qualifier variants anyway.
+ if (PointeeTy->isArrayType())
+ return true;
+ const Type *ClassTy = PointerTy->getClass();
+
+ // Iterate through all strict supersets of the pointee type's CVR
+ // qualifiers.
+ unsigned BaseCVR = PointeeTy.getCVRQualifiers();
+ for (unsigned CVR = BaseCVR+1; CVR <= Qualifiers::CVRMask; ++CVR) {
+ if ((CVR | BaseCVR) != CVR) continue;
+
+ QualType QPointeeTy = Context.getCVRQualifiedType(PointeeTy, CVR);
+ MemberPointerTypes.insert(
+ Context.getMemberPointerType(QPointeeTy, ClassTy));
+ }
+
+ return true;
+}
+
+/// AddTypesConvertedFrom - Add each of the types to which the type @p
+/// Ty can be implicit converted to the given set of @p Types. We're
+/// primarily interested in pointer types and enumeration types. We also
+/// take member pointer types, for the conditional operator.
+/// AllowUserConversions is true if we should look at the conversion
+/// functions of a class type, and AllowExplicitConversions if we
+/// should also include the explicit conversion functions of a class
+/// type.
+void
+BuiltinCandidateTypeSet::AddTypesConvertedFrom(QualType Ty,
+ SourceLocation Loc,
+ bool AllowUserConversions,
+ bool AllowExplicitConversions,
+ const Qualifiers &VisibleQuals) {
+ // Only deal with canonical types.
+ Ty = Context.getCanonicalType(Ty);
+
+ // Look through reference types; they aren't part of the type of an
+ // expression for the purposes of conversions.
+ if (const ReferenceType *RefTy = Ty->getAs<ReferenceType>())
+ Ty = RefTy->getPointeeType();
+
+ // If we're dealing with an array type, decay to the pointer.
+ if (Ty->isArrayType())
+ Ty = SemaRef.Context.getArrayDecayedType(Ty);
+
+ // Otherwise, we don't care about qualifiers on the type.
+ Ty = Ty.getLocalUnqualifiedType();
+
+ // Flag if we ever add a non-record type.
+ const RecordType *TyRec = Ty->getAs<RecordType>();
+ HasNonRecordTypes = HasNonRecordTypes || !TyRec;
+
+ // Flag if we encounter an arithmetic type.
+ HasArithmeticOrEnumeralTypes =
+ HasArithmeticOrEnumeralTypes || Ty->isArithmeticType();
+
+ if (Ty->isObjCIdType() || Ty->isObjCClassType())
+ PointerTypes.insert(Ty);
+ else if (Ty->getAs<PointerType>() || Ty->getAs<ObjCObjectPointerType>()) {
+ // Insert our type, and its more-qualified variants, into the set
+ // of types.
+ if (!AddPointerWithMoreQualifiedTypeVariants(Ty, VisibleQuals))
+ return;
+ } else if (Ty->isMemberPointerType()) {
+ // Member pointers are far easier, since the pointee can't be converted.
+ if (!AddMemberPointerWithMoreQualifiedTypeVariants(Ty))
+ return;
+ } else if (Ty->isEnumeralType()) {
+ HasArithmeticOrEnumeralTypes = true;
+ EnumerationTypes.insert(Ty);
+ } else if (Ty->isVectorType()) {
+ // We treat vector types as arithmetic types in many contexts as an
+ // extension.
+ HasArithmeticOrEnumeralTypes = true;
+ VectorTypes.insert(Ty);
+ } else if (Ty->isNullPtrType()) {
+ HasNullPtrType = true;
+ } else if (AllowUserConversions && TyRec) {
+ // No conversion functions in incomplete types.
+ if (!SemaRef.isCompleteType(Loc, Ty))
+ return;
+
+ CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(TyRec->getDecl());
+ for (NamedDecl *D : ClassDecl->getVisibleConversionFunctions()) {
+ if (isa<UsingShadowDecl>(D))
+ D = cast<UsingShadowDecl>(D)->getTargetDecl();
+
+ // Skip conversion function templates; they don't tell us anything
+ // about which builtin types we can convert to.
+ if (isa<FunctionTemplateDecl>(D))
+ continue;
+
+ CXXConversionDecl *Conv = cast<CXXConversionDecl>(D);
+ if (AllowExplicitConversions || !Conv->isExplicit()) {
+ AddTypesConvertedFrom(Conv->getConversionType(), Loc, false, false,
+ VisibleQuals);
+ }
+ }
+ }
+}
+
+/// \brief Helper function for AddBuiltinOperatorCandidates() that adds
+/// the volatile- and non-volatile-qualified assignment operators for the
+/// given type to the candidate set.
+static void AddBuiltinAssignmentOperatorCandidates(Sema &S,
+ QualType T,
+ ArrayRef<Expr *> Args,
+ OverloadCandidateSet &CandidateSet) {
+ QualType ParamTypes[2];
+
+ // T& operator=(T&, T)
+ ParamTypes[0] = S.Context.getLValueReferenceType(T);
+ ParamTypes[1] = T;
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet,
+ /*IsAssignmentOperator=*/true);
+
+ if (!S.Context.getCanonicalType(T).isVolatileQualified()) {
+ // volatile T& operator=(volatile T&, T)
+ ParamTypes[0]
+ = S.Context.getLValueReferenceType(S.Context.getVolatileType(T));
+ ParamTypes[1] = T;
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet,
+ /*IsAssignmentOperator=*/true);
+ }
+}
+
+/// CollectVRQualifiers - This routine returns Volatile/Restrict qualifiers,
+/// if any, found in visible type conversion functions found in ArgExpr's type.
+static Qualifiers CollectVRQualifiers(ASTContext &Context, Expr* ArgExpr) {
+ Qualifiers VRQuals;
+ const RecordType *TyRec;
+ if (const MemberPointerType *RHSMPType =
+ ArgExpr->getType()->getAs<MemberPointerType>())
+ TyRec = RHSMPType->getClass()->getAs<RecordType>();
+ else
+ TyRec = ArgExpr->getType()->getAs<RecordType>();
+ if (!TyRec) {
+ // Just to be safe, assume the worst case.
+ VRQuals.addVolatile();
+ VRQuals.addRestrict();
+ return VRQuals;
+ }
+
+ CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(TyRec->getDecl());
+ if (!ClassDecl->hasDefinition())
+ return VRQuals;
+
+ for (NamedDecl *D : ClassDecl->getVisibleConversionFunctions()) {
+ if (isa<UsingShadowDecl>(D))
+ D = cast<UsingShadowDecl>(D)->getTargetDecl();
+ if (CXXConversionDecl *Conv = dyn_cast<CXXConversionDecl>(D)) {
+ QualType CanTy = Context.getCanonicalType(Conv->getConversionType());
+ if (const ReferenceType *ResTypeRef = CanTy->getAs<ReferenceType>())
+ CanTy = ResTypeRef->getPointeeType();
+ // Need to go down the pointer/mempointer chain and add qualifiers
+ // as see them.
+ bool done = false;
+ while (!done) {
+ if (CanTy.isRestrictQualified())
+ VRQuals.addRestrict();
+ if (const PointerType *ResTypePtr = CanTy->getAs<PointerType>())
+ CanTy = ResTypePtr->getPointeeType();
+ else if (const MemberPointerType *ResTypeMPtr =
+ CanTy->getAs<MemberPointerType>())
+ CanTy = ResTypeMPtr->getPointeeType();
+ else
+ done = true;
+ if (CanTy.isVolatileQualified())
+ VRQuals.addVolatile();
+ if (VRQuals.hasRestrict() && VRQuals.hasVolatile())
+ return VRQuals;
+ }
+ }
+ }
+ return VRQuals;
+}
+
+namespace {
+
+/// \brief Helper class to manage the addition of builtin operator overload
+/// candidates. It provides shared state and utility methods used throughout
+/// the process, as well as a helper method to add each group of builtin
+/// operator overloads from the standard to a candidate set.
+class BuiltinOperatorOverloadBuilder {
+ // Common instance state available to all overload candidate addition methods.
+ Sema &S;
+ ArrayRef<Expr *> Args;
+ Qualifiers VisibleTypeConversionsQuals;
+ bool HasArithmeticOrEnumeralCandidateType;
+ SmallVectorImpl<BuiltinCandidateTypeSet> &CandidateTypes;
+ OverloadCandidateSet &CandidateSet;
+
+ // Define some constants used to index and iterate over the arithemetic types
+ // provided via the getArithmeticType() method below.
+ // The "promoted arithmetic types" are the arithmetic
+ // types are that preserved by promotion (C++ [over.built]p2).
+ static const unsigned FirstIntegralType = 3;
+ static const unsigned LastIntegralType = 20;
+ static const unsigned FirstPromotedIntegralType = 3,
+ LastPromotedIntegralType = 11;
+ static const unsigned FirstPromotedArithmeticType = 0,
+ LastPromotedArithmeticType = 11;
+ static const unsigned NumArithmeticTypes = 20;
+
+ /// \brief Get the canonical type for a given arithmetic type index.
+ CanQualType getArithmeticType(unsigned index) {
+ assert(index < NumArithmeticTypes);
+ static CanQualType ASTContext::* const
+ ArithmeticTypes[NumArithmeticTypes] = {
+ // Start of promoted types.
+ &ASTContext::FloatTy,
+ &ASTContext::DoubleTy,
+ &ASTContext::LongDoubleTy,
+
+ // Start of integral types.
+ &ASTContext::IntTy,
+ &ASTContext::LongTy,
+ &ASTContext::LongLongTy,
+ &ASTContext::Int128Ty,
+ &ASTContext::UnsignedIntTy,
+ &ASTContext::UnsignedLongTy,
+ &ASTContext::UnsignedLongLongTy,
+ &ASTContext::UnsignedInt128Ty,
+ // End of promoted types.
+
+ &ASTContext::BoolTy,
+ &ASTContext::CharTy,
+ &ASTContext::WCharTy,
+ &ASTContext::Char16Ty,
+ &ASTContext::Char32Ty,
+ &ASTContext::SignedCharTy,
+ &ASTContext::ShortTy,
+ &ASTContext::UnsignedCharTy,
+ &ASTContext::UnsignedShortTy,
+ // End of integral types.
+ // FIXME: What about complex? What about half?
+ };
+ return S.Context.*ArithmeticTypes[index];
+ }
+
+ /// \brief Gets the canonical type resulting from the usual arithemetic
+ /// converions for the given arithmetic types.
+ CanQualType getUsualArithmeticConversions(unsigned L, unsigned R) {
+ // Accelerator table for performing the usual arithmetic conversions.
+ // The rules are basically:
+ // - if either is floating-point, use the wider floating-point
+ // - if same signedness, use the higher rank
+ // - if same size, use unsigned of the higher rank
+ // - use the larger type
+ // These rules, together with the axiom that higher ranks are
+ // never smaller, are sufficient to precompute all of these results
+ // *except* when dealing with signed types of higher rank.
+ // (we could precompute SLL x UI for all known platforms, but it's
+ // better not to make any assumptions).
+ // We assume that int128 has a higher rank than long long on all platforms.
+ enum PromotedType {
+ Dep=-1,
+ Flt, Dbl, LDbl, SI, SL, SLL, S128, UI, UL, ULL, U128
+ };
+ static const PromotedType ConversionsTable[LastPromotedArithmeticType]
+ [LastPromotedArithmeticType] = {
+/* Flt*/ { Flt, Dbl, LDbl, Flt, Flt, Flt, Flt, Flt, Flt, Flt, Flt },
+/* Dbl*/ { Dbl, Dbl, LDbl, Dbl, Dbl, Dbl, Dbl, Dbl, Dbl, Dbl, Dbl },
+/*LDbl*/ { LDbl, LDbl, LDbl, LDbl, LDbl, LDbl, LDbl, LDbl, LDbl, LDbl, LDbl },
+/* SI*/ { Flt, Dbl, LDbl, SI, SL, SLL, S128, UI, UL, ULL, U128 },
+/* SL*/ { Flt, Dbl, LDbl, SL, SL, SLL, S128, Dep, UL, ULL, U128 },
+/* SLL*/ { Flt, Dbl, LDbl, SLL, SLL, SLL, S128, Dep, Dep, ULL, U128 },
+/*S128*/ { Flt, Dbl, LDbl, S128, S128, S128, S128, S128, S128, S128, U128 },
+/* UI*/ { Flt, Dbl, LDbl, UI, Dep, Dep, S128, UI, UL, ULL, U128 },
+/* UL*/ { Flt, Dbl, LDbl, UL, UL, Dep, S128, UL, UL, ULL, U128 },
+/* ULL*/ { Flt, Dbl, LDbl, ULL, ULL, ULL, S128, ULL, ULL, ULL, U128 },
+/*U128*/ { Flt, Dbl, LDbl, U128, U128, U128, U128, U128, U128, U128, U128 },
+ };
+
+ assert(L < LastPromotedArithmeticType);
+ assert(R < LastPromotedArithmeticType);
+ int Idx = ConversionsTable[L][R];
+
+ // Fast path: the table gives us a concrete answer.
+ if (Idx != Dep) return getArithmeticType(Idx);
+
+ // Slow path: we need to compare widths.
+ // An invariant is that the signed type has higher rank.
+ CanQualType LT = getArithmeticType(L),
+ RT = getArithmeticType(R);
+ unsigned LW = S.Context.getIntWidth(LT),
+ RW = S.Context.getIntWidth(RT);
+
+ // If they're different widths, use the signed type.
+ if (LW > RW) return LT;
+ else if (LW < RW) return RT;
+
+ // Otherwise, use the unsigned type of the signed type's rank.
+ if (L == SL || R == SL) return S.Context.UnsignedLongTy;
+ assert(L == SLL || R == SLL);
+ return S.Context.UnsignedLongLongTy;
+ }
+
+ /// \brief Helper method to factor out the common pattern of adding overloads
+ /// for '++' and '--' builtin operators.
+ void addPlusPlusMinusMinusStyleOverloads(QualType CandidateTy,
+ bool HasVolatile,
+ bool HasRestrict) {
+ QualType ParamTypes[2] = {
+ S.Context.getLValueReferenceType(CandidateTy),
+ S.Context.IntTy
+ };
+
+ // Non-volatile version.
+ if (Args.size() == 1)
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet);
+ else
+ S.AddBuiltinCandidate(CandidateTy, ParamTypes, Args, CandidateSet);
+
+ // Use a heuristic to reduce number of builtin candidates in the set:
+ // add volatile version only if there are conversions to a volatile type.
+ if (HasVolatile) {
+ ParamTypes[0] =
+ S.Context.getLValueReferenceType(
+ S.Context.getVolatileType(CandidateTy));
+ if (Args.size() == 1)
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet);
+ else
+ S.AddBuiltinCandidate(CandidateTy, ParamTypes, Args, CandidateSet);
+ }
+
+ // Add restrict version only if there are conversions to a restrict type
+ // and our candidate type is a non-restrict-qualified pointer.
+ if (HasRestrict && CandidateTy->isAnyPointerType() &&
+ !CandidateTy.isRestrictQualified()) {
+ ParamTypes[0]
+ = S.Context.getLValueReferenceType(
+ S.Context.getCVRQualifiedType(CandidateTy, Qualifiers::Restrict));
+ if (Args.size() == 1)
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet);
+ else
+ S.AddBuiltinCandidate(CandidateTy, ParamTypes, Args, CandidateSet);
+
+ if (HasVolatile) {
+ ParamTypes[0]
+ = S.Context.getLValueReferenceType(
+ S.Context.getCVRQualifiedType(CandidateTy,
+ (Qualifiers::Volatile |
+ Qualifiers::Restrict)));
+ if (Args.size() == 1)
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet);
+ else
+ S.AddBuiltinCandidate(CandidateTy, ParamTypes, Args, CandidateSet);
+ }
+ }
+
+ }
+
+public:
+ BuiltinOperatorOverloadBuilder(
+ Sema &S, ArrayRef<Expr *> Args,
+ Qualifiers VisibleTypeConversionsQuals,
+ bool HasArithmeticOrEnumeralCandidateType,
+ SmallVectorImpl<BuiltinCandidateTypeSet> &CandidateTypes,
+ OverloadCandidateSet &CandidateSet)
+ : S(S), Args(Args),
+ VisibleTypeConversionsQuals(VisibleTypeConversionsQuals),
+ HasArithmeticOrEnumeralCandidateType(
+ HasArithmeticOrEnumeralCandidateType),
+ CandidateTypes(CandidateTypes),
+ CandidateSet(CandidateSet) {
+ // Validate some of our static helper constants in debug builds.
+ assert(getArithmeticType(FirstPromotedIntegralType) == S.Context.IntTy &&
+ "Invalid first promoted integral type");
+ assert(getArithmeticType(LastPromotedIntegralType - 1)
+ == S.Context.UnsignedInt128Ty &&
+ "Invalid last promoted integral type");
+ assert(getArithmeticType(FirstPromotedArithmeticType)
+ == S.Context.FloatTy &&
+ "Invalid first promoted arithmetic type");
+ assert(getArithmeticType(LastPromotedArithmeticType - 1)
+ == S.Context.UnsignedInt128Ty &&
+ "Invalid last promoted arithmetic type");
+ }
+
+ // C++ [over.built]p3:
+ //
+ // For every pair (T, VQ), where T is an arithmetic type, and VQ
+ // is either volatile or empty, there exist candidate operator
+ // functions of the form
+ //
+ // VQ T& operator++(VQ T&);
+ // T operator++(VQ T&, int);
+ //
+ // C++ [over.built]p4:
+ //
+ // For every pair (T, VQ), where T is an arithmetic type other
+ // than bool, and VQ is either volatile or empty, there exist
+ // candidate operator functions of the form
+ //
+ // VQ T& operator--(VQ T&);
+ // T operator--(VQ T&, int);
+ void addPlusPlusMinusMinusArithmeticOverloads(OverloadedOperatorKind Op) {
+ if (!HasArithmeticOrEnumeralCandidateType)
+ return;
+
+ for (unsigned Arith = (Op == OO_PlusPlus? 0 : 1);
+ Arith < NumArithmeticTypes; ++Arith) {
+ addPlusPlusMinusMinusStyleOverloads(
+ getArithmeticType(Arith),
+ VisibleTypeConversionsQuals.hasVolatile(),
+ VisibleTypeConversionsQuals.hasRestrict());
+ }
+ }
+
+ // C++ [over.built]p5:
+ //
+ // For every pair (T, VQ), where T is a cv-qualified or
+ // cv-unqualified object type, and VQ is either volatile or
+ // empty, there exist candidate operator functions of the form
+ //
+ // T*VQ& operator++(T*VQ&);
+ // T*VQ& operator--(T*VQ&);
+ // T* operator++(T*VQ&, int);
+ // T* operator--(T*VQ&, int);
+ void addPlusPlusMinusMinusPointerOverloads() {
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[0].pointer_begin(),
+ PtrEnd = CandidateTypes[0].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ // Skip pointer types that aren't pointers to object types.
+ if (!(*Ptr)->getPointeeType()->isObjectType())
+ continue;
+
+ addPlusPlusMinusMinusStyleOverloads(*Ptr,
+ (!(*Ptr).isVolatileQualified() &&
+ VisibleTypeConversionsQuals.hasVolatile()),
+ (!(*Ptr).isRestrictQualified() &&
+ VisibleTypeConversionsQuals.hasRestrict()));
+ }
+ }
+
+ // C++ [over.built]p6:
+ // For every cv-qualified or cv-unqualified object type T, there
+ // exist candidate operator functions of the form
+ //
+ // T& operator*(T*);
+ //
+ // C++ [over.built]p7:
+ // For every function type T that does not have cv-qualifiers or a
+ // ref-qualifier, there exist candidate operator functions of the form
+ // T& operator*(T*);
+ void addUnaryStarPointerOverloads() {
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[0].pointer_begin(),
+ PtrEnd = CandidateTypes[0].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ QualType ParamTy = *Ptr;
+ QualType PointeeTy = ParamTy->getPointeeType();
+ if (!PointeeTy->isObjectType() && !PointeeTy->isFunctionType())
+ continue;
+
+ if (const FunctionProtoType *Proto =PointeeTy->getAs<FunctionProtoType>())
+ if (Proto->getTypeQuals() || Proto->getRefQualifier())
+ continue;
+
+ S.AddBuiltinCandidate(S.Context.getLValueReferenceType(PointeeTy),
+ &ParamTy, Args, CandidateSet);
+ }
+ }
+
+ // C++ [over.built]p9:
+ // For every promoted arithmetic type T, there exist candidate
+ // operator functions of the form
+ //
+ // T operator+(T);
+ // T operator-(T);
+ void addUnaryPlusOrMinusArithmeticOverloads() {
+ if (!HasArithmeticOrEnumeralCandidateType)
+ return;
+
+ for (unsigned Arith = FirstPromotedArithmeticType;
+ Arith < LastPromotedArithmeticType; ++Arith) {
+ QualType ArithTy = getArithmeticType(Arith);
+ S.AddBuiltinCandidate(ArithTy, &ArithTy, Args, CandidateSet);
+ }
+
+ // Extension: We also add these operators for vector types.
+ for (BuiltinCandidateTypeSet::iterator
+ Vec = CandidateTypes[0].vector_begin(),
+ VecEnd = CandidateTypes[0].vector_end();
+ Vec != VecEnd; ++Vec) {
+ QualType VecTy = *Vec;
+ S.AddBuiltinCandidate(VecTy, &VecTy, Args, CandidateSet);
+ }
+ }
+
+ // C++ [over.built]p8:
+ // For every type T, there exist candidate operator functions of
+ // the form
+ //
+ // T* operator+(T*);
+ void addUnaryPlusPointerOverloads() {
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[0].pointer_begin(),
+ PtrEnd = CandidateTypes[0].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ QualType ParamTy = *Ptr;
+ S.AddBuiltinCandidate(ParamTy, &ParamTy, Args, CandidateSet);
+ }
+ }
+
+ // C++ [over.built]p10:
+ // For every promoted integral type T, there exist candidate
+ // operator functions of the form
+ //
+ // T operator~(T);
+ void addUnaryTildePromotedIntegralOverloads() {
+ if (!HasArithmeticOrEnumeralCandidateType)
+ return;
+
+ for (unsigned Int = FirstPromotedIntegralType;
+ Int < LastPromotedIntegralType; ++Int) {
+ QualType IntTy = getArithmeticType(Int);
+ S.AddBuiltinCandidate(IntTy, &IntTy, Args, CandidateSet);
+ }
+
+ // Extension: We also add this operator for vector types.
+ for (BuiltinCandidateTypeSet::iterator
+ Vec = CandidateTypes[0].vector_begin(),
+ VecEnd = CandidateTypes[0].vector_end();
+ Vec != VecEnd; ++Vec) {
+ QualType VecTy = *Vec;
+ S.AddBuiltinCandidate(VecTy, &VecTy, Args, CandidateSet);
+ }
+ }
+
+ // C++ [over.match.oper]p16:
+ // For every pointer to member type T, there exist candidate operator
+ // functions of the form
+ //
+ // bool operator==(T,T);
+ // bool operator!=(T,T);
+ void addEqualEqualOrNotEqualMemberPointerOverloads() {
+ /// Set of (canonical) types that we've already handled.
+ llvm::SmallPtrSet<QualType, 8> AddedTypes;
+
+ for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) {
+ for (BuiltinCandidateTypeSet::iterator
+ MemPtr = CandidateTypes[ArgIdx].member_pointer_begin(),
+ MemPtrEnd = CandidateTypes[ArgIdx].member_pointer_end();
+ MemPtr != MemPtrEnd;
+ ++MemPtr) {
+ // Don't add the same builtin candidate twice.
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*MemPtr)).second)
+ continue;
+
+ QualType ParamTypes[2] = { *MemPtr, *MemPtr };
+ S.AddBuiltinCandidate(S.Context.BoolTy, ParamTypes, Args, CandidateSet);
+ }
+ }
+ }
+
+ // C++ [over.built]p15:
+ //
+ // For every T, where T is an enumeration type, a pointer type, or
+ // std::nullptr_t, there exist candidate operator functions of the form
+ //
+ // bool operator<(T, T);
+ // bool operator>(T, T);
+ // bool operator<=(T, T);
+ // bool operator>=(T, T);
+ // bool operator==(T, T);
+ // bool operator!=(T, T);
+ void addRelationalPointerOrEnumeralOverloads() {
+ // C++ [over.match.oper]p3:
+ // [...]the built-in candidates include all of the candidate operator
+ // functions defined in 13.6 that, compared to the given operator, [...]
+ // do not have the same parameter-type-list as any non-template non-member
+ // candidate.
+ //
+ // Note that in practice, this only affects enumeration types because there
+ // aren't any built-in candidates of record type, and a user-defined operator
+ // must have an operand of record or enumeration type. Also, the only other
+ // overloaded operator with enumeration arguments, operator=,
+ // cannot be overloaded for enumeration types, so this is the only place
+ // where we must suppress candidates like this.
+ llvm::DenseSet<std::pair<CanQualType, CanQualType> >
+ UserDefinedBinaryOperators;
+
+ for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) {
+ if (CandidateTypes[ArgIdx].enumeration_begin() !=
+ CandidateTypes[ArgIdx].enumeration_end()) {
+ for (OverloadCandidateSet::iterator C = CandidateSet.begin(),
+ CEnd = CandidateSet.end();
+ C != CEnd; ++C) {
+ if (!C->Viable || !C->Function || C->Function->getNumParams() != 2)
+ continue;
+
+ if (C->Function->isFunctionTemplateSpecialization())
+ continue;
+
+ QualType FirstParamType =
+ C->Function->getParamDecl(0)->getType().getUnqualifiedType();
+ QualType SecondParamType =
+ C->Function->getParamDecl(1)->getType().getUnqualifiedType();
+
+ // Skip if either parameter isn't of enumeral type.
+ if (!FirstParamType->isEnumeralType() ||
+ !SecondParamType->isEnumeralType())
+ continue;
+
+ // Add this operator to the set of known user-defined operators.
+ UserDefinedBinaryOperators.insert(
+ std::make_pair(S.Context.getCanonicalType(FirstParamType),
+ S.Context.getCanonicalType(SecondParamType)));
+ }
+ }
+ }
+
+ /// Set of (canonical) types that we've already handled.
+ llvm::SmallPtrSet<QualType, 8> AddedTypes;
+
+ for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) {
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[ArgIdx].pointer_begin(),
+ PtrEnd = CandidateTypes[ArgIdx].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ // Don't add the same builtin candidate twice.
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)).second)
+ continue;
+
+ QualType ParamTypes[2] = { *Ptr, *Ptr };
+ S.AddBuiltinCandidate(S.Context.BoolTy, ParamTypes, Args, CandidateSet);
+ }
+ for (BuiltinCandidateTypeSet::iterator
+ Enum = CandidateTypes[ArgIdx].enumeration_begin(),
+ EnumEnd = CandidateTypes[ArgIdx].enumeration_end();
+ Enum != EnumEnd; ++Enum) {
+ CanQualType CanonType = S.Context.getCanonicalType(*Enum);
+
+ // Don't add the same builtin candidate twice, or if a user defined
+ // candidate exists.
+ if (!AddedTypes.insert(CanonType).second ||
+ UserDefinedBinaryOperators.count(std::make_pair(CanonType,
+ CanonType)))
+ continue;
+
+ QualType ParamTypes[2] = { *Enum, *Enum };
+ S.AddBuiltinCandidate(S.Context.BoolTy, ParamTypes, Args, CandidateSet);
+ }
+
+ if (CandidateTypes[ArgIdx].hasNullPtrType()) {
+ CanQualType NullPtrTy = S.Context.getCanonicalType(S.Context.NullPtrTy);
+ if (AddedTypes.insert(NullPtrTy).second &&
+ !UserDefinedBinaryOperators.count(std::make_pair(NullPtrTy,
+ NullPtrTy))) {
+ QualType ParamTypes[2] = { NullPtrTy, NullPtrTy };
+ S.AddBuiltinCandidate(S.Context.BoolTy, ParamTypes, Args,
+ CandidateSet);
+ }
+ }
+ }
+ }
+
+ // C++ [over.built]p13:
+ //
+ // For every cv-qualified or cv-unqualified object type T
+ // there exist candidate operator functions of the form
+ //
+ // T* operator+(T*, ptrdiff_t);
+ // T& operator[](T*, ptrdiff_t); [BELOW]
+ // T* operator-(T*, ptrdiff_t);
+ // T* operator+(ptrdiff_t, T*);
+ // T& operator[](ptrdiff_t, T*); [BELOW]
+ //
+ // C++ [over.built]p14:
+ //
+ // For every T, where T is a pointer to object type, there
+ // exist candidate operator functions of the form
+ //
+ // ptrdiff_t operator-(T, T);
+ void addBinaryPlusOrMinusPointerOverloads(OverloadedOperatorKind Op) {
+ /// Set of (canonical) types that we've already handled.
+ llvm::SmallPtrSet<QualType, 8> AddedTypes;
+
+ for (int Arg = 0; Arg < 2; ++Arg) {
+ QualType AsymmetricParamTypes[2] = {
+ S.Context.getPointerDiffType(),
+ S.Context.getPointerDiffType(),
+ };
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[Arg].pointer_begin(),
+ PtrEnd = CandidateTypes[Arg].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ QualType PointeeTy = (*Ptr)->getPointeeType();
+ if (!PointeeTy->isObjectType())
+ continue;
+
+ AsymmetricParamTypes[Arg] = *Ptr;
+ if (Arg == 0 || Op == OO_Plus) {
+ // operator+(T*, ptrdiff_t) or operator-(T*, ptrdiff_t)
+ // T* operator+(ptrdiff_t, T*);
+ S.AddBuiltinCandidate(*Ptr, AsymmetricParamTypes, Args, CandidateSet);
+ }
+ if (Op == OO_Minus) {
+ // ptrdiff_t operator-(T, T);
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)).second)
+ continue;
+
+ QualType ParamTypes[2] = { *Ptr, *Ptr };
+ S.AddBuiltinCandidate(S.Context.getPointerDiffType(), ParamTypes,
+ Args, CandidateSet);
+ }
+ }
+ }
+ }
+
+ // C++ [over.built]p12:
+ //
+ // For every pair of promoted arithmetic types L and R, there
+ // exist candidate operator functions of the form
+ //
+ // LR operator*(L, R);
+ // LR operator/(L, R);
+ // LR operator+(L, R);
+ // LR operator-(L, R);
+ // bool operator<(L, R);
+ // bool operator>(L, R);
+ // bool operator<=(L, R);
+ // bool operator>=(L, R);
+ // bool operator==(L, R);
+ // bool operator!=(L, R);
+ //
+ // where LR is the result of the usual arithmetic conversions
+ // between types L and R.
+ //
+ // C++ [over.built]p24:
+ //
+ // For every pair of promoted arithmetic types L and R, there exist
+ // candidate operator functions of the form
+ //
+ // LR operator?(bool, L, R);
+ //
+ // where LR is the result of the usual arithmetic conversions
+ // between types L and R.
+ // Our candidates ignore the first parameter.
+ void addGenericBinaryArithmeticOverloads(bool isComparison) {
+ if (!HasArithmeticOrEnumeralCandidateType)
+ return;
+
+ for (unsigned Left = FirstPromotedArithmeticType;
+ Left < LastPromotedArithmeticType; ++Left) {
+ for (unsigned Right = FirstPromotedArithmeticType;
+ Right < LastPromotedArithmeticType; ++Right) {
+ QualType LandR[2] = { getArithmeticType(Left),
+ getArithmeticType(Right) };
+ QualType Result =
+ isComparison ? S.Context.BoolTy
+ : getUsualArithmeticConversions(Left, Right);
+ S.AddBuiltinCandidate(Result, LandR, Args, CandidateSet);
+ }
+ }
+
+ // Extension: Add the binary operators ==, !=, <, <=, >=, >, *, /, and the
+ // conditional operator for vector types.
+ for (BuiltinCandidateTypeSet::iterator
+ Vec1 = CandidateTypes[0].vector_begin(),
+ Vec1End = CandidateTypes[0].vector_end();
+ Vec1 != Vec1End; ++Vec1) {
+ for (BuiltinCandidateTypeSet::iterator
+ Vec2 = CandidateTypes[1].vector_begin(),
+ Vec2End = CandidateTypes[1].vector_end();
+ Vec2 != Vec2End; ++Vec2) {
+ QualType LandR[2] = { *Vec1, *Vec2 };
+ QualType Result = S.Context.BoolTy;
+ if (!isComparison) {
+ if ((*Vec1)->isExtVectorType() || !(*Vec2)->isExtVectorType())
+ Result = *Vec1;
+ else
+ Result = *Vec2;
+ }
+
+ S.AddBuiltinCandidate(Result, LandR, Args, CandidateSet);
+ }
+ }
+ }
+
+ // C++ [over.built]p17:
+ //
+ // For every pair of promoted integral types L and R, there
+ // exist candidate operator functions of the form
+ //
+ // LR operator%(L, R);
+ // LR operator&(L, R);
+ // LR operator^(L, R);
+ // LR operator|(L, R);
+ // L operator<<(L, R);
+ // L operator>>(L, R);
+ //
+ // where LR is the result of the usual arithmetic conversions
+ // between types L and R.
+ void addBinaryBitwiseArithmeticOverloads(OverloadedOperatorKind Op) {
+ if (!HasArithmeticOrEnumeralCandidateType)
+ return;
+
+ for (unsigned Left = FirstPromotedIntegralType;
+ Left < LastPromotedIntegralType; ++Left) {
+ for (unsigned Right = FirstPromotedIntegralType;
+ Right < LastPromotedIntegralType; ++Right) {
+ QualType LandR[2] = { getArithmeticType(Left),
+ getArithmeticType(Right) };
+ QualType Result = (Op == OO_LessLess || Op == OO_GreaterGreater)
+ ? LandR[0]
+ : getUsualArithmeticConversions(Left, Right);
+ S.AddBuiltinCandidate(Result, LandR, Args, CandidateSet);
+ }
+ }
+ }
+
+ // C++ [over.built]p20:
+ //
+ // For every pair (T, VQ), where T is an enumeration or
+ // pointer to member type and VQ is either volatile or
+ // empty, there exist candidate operator functions of the form
+ //
+ // VQ T& operator=(VQ T&, T);
+ void addAssignmentMemberPointerOrEnumeralOverloads() {
+ /// Set of (canonical) types that we've already handled.
+ llvm::SmallPtrSet<QualType, 8> AddedTypes;
+
+ for (unsigned ArgIdx = 0; ArgIdx < 2; ++ArgIdx) {
+ for (BuiltinCandidateTypeSet::iterator
+ Enum = CandidateTypes[ArgIdx].enumeration_begin(),
+ EnumEnd = CandidateTypes[ArgIdx].enumeration_end();
+ Enum != EnumEnd; ++Enum) {
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*Enum)).second)
+ continue;
+
+ AddBuiltinAssignmentOperatorCandidates(S, *Enum, Args, CandidateSet);
+ }
+
+ for (BuiltinCandidateTypeSet::iterator
+ MemPtr = CandidateTypes[ArgIdx].member_pointer_begin(),
+ MemPtrEnd = CandidateTypes[ArgIdx].member_pointer_end();
+ MemPtr != MemPtrEnd; ++MemPtr) {
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*MemPtr)).second)
+ continue;
+
+ AddBuiltinAssignmentOperatorCandidates(S, *MemPtr, Args, CandidateSet);
+ }
+ }
+ }
+
+ // C++ [over.built]p19:
+ //
+ // For every pair (T, VQ), where T is any type and VQ is either
+ // volatile or empty, there exist candidate operator functions
+ // of the form
+ //
+ // T*VQ& operator=(T*VQ&, T*);
+ //
+ // C++ [over.built]p21:
+ //
+ // For every pair (T, VQ), where T is a cv-qualified or
+ // cv-unqualified object type and VQ is either volatile or
+ // empty, there exist candidate operator functions of the form
+ //
+ // T*VQ& operator+=(T*VQ&, ptrdiff_t);
+ // T*VQ& operator-=(T*VQ&, ptrdiff_t);
+ void addAssignmentPointerOverloads(bool isEqualOp) {
+ /// Set of (canonical) types that we've already handled.
+ llvm::SmallPtrSet<QualType, 8> AddedTypes;
+
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[0].pointer_begin(),
+ PtrEnd = CandidateTypes[0].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ // If this is operator=, keep track of the builtin candidates we added.
+ if (isEqualOp)
+ AddedTypes.insert(S.Context.getCanonicalType(*Ptr));
+ else if (!(*Ptr)->getPointeeType()->isObjectType())
+ continue;
+
+ // non-volatile version
+ QualType ParamTypes[2] = {
+ S.Context.getLValueReferenceType(*Ptr),
+ isEqualOp ? *Ptr : S.Context.getPointerDiffType(),
+ };
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet,
+ /*IsAssigmentOperator=*/ isEqualOp);
+
+ bool NeedVolatile = !(*Ptr).isVolatileQualified() &&
+ VisibleTypeConversionsQuals.hasVolatile();
+ if (NeedVolatile) {
+ // volatile version
+ ParamTypes[0] =
+ S.Context.getLValueReferenceType(S.Context.getVolatileType(*Ptr));
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet,
+ /*IsAssigmentOperator=*/isEqualOp);
+ }
+
+ if (!(*Ptr).isRestrictQualified() &&
+ VisibleTypeConversionsQuals.hasRestrict()) {
+ // restrict version
+ ParamTypes[0]
+ = S.Context.getLValueReferenceType(S.Context.getRestrictType(*Ptr));
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet,
+ /*IsAssigmentOperator=*/isEqualOp);
+
+ if (NeedVolatile) {
+ // volatile restrict version
+ ParamTypes[0]
+ = S.Context.getLValueReferenceType(
+ S.Context.getCVRQualifiedType(*Ptr,
+ (Qualifiers::Volatile |
+ Qualifiers::Restrict)));
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet,
+ /*IsAssigmentOperator=*/isEqualOp);
+ }
+ }
+ }
+
+ if (isEqualOp) {
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[1].pointer_begin(),
+ PtrEnd = CandidateTypes[1].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ // Make sure we don't add the same candidate twice.
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)).second)
+ continue;
+
+ QualType ParamTypes[2] = {
+ S.Context.getLValueReferenceType(*Ptr),
+ *Ptr,
+ };
+
+ // non-volatile version
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet,
+ /*IsAssigmentOperator=*/true);
+
+ bool NeedVolatile = !(*Ptr).isVolatileQualified() &&
+ VisibleTypeConversionsQuals.hasVolatile();
+ if (NeedVolatile) {
+ // volatile version
+ ParamTypes[0] =
+ S.Context.getLValueReferenceType(S.Context.getVolatileType(*Ptr));
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet,
+ /*IsAssigmentOperator=*/true);
+ }
+
+ if (!(*Ptr).isRestrictQualified() &&
+ VisibleTypeConversionsQuals.hasRestrict()) {
+ // restrict version
+ ParamTypes[0]
+ = S.Context.getLValueReferenceType(S.Context.getRestrictType(*Ptr));
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet,
+ /*IsAssigmentOperator=*/true);
+
+ if (NeedVolatile) {
+ // volatile restrict version
+ ParamTypes[0]
+ = S.Context.getLValueReferenceType(
+ S.Context.getCVRQualifiedType(*Ptr,
+ (Qualifiers::Volatile |
+ Qualifiers::Restrict)));
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet,
+ /*IsAssigmentOperator=*/true);
+ }
+ }
+ }
+ }
+ }
+
+ // C++ [over.built]p18:
+ //
+ // For every triple (L, VQ, R), where L is an arithmetic type,
+ // VQ is either volatile or empty, and R is a promoted
+ // arithmetic type, there exist candidate operator functions of
+ // the form
+ //
+ // VQ L& operator=(VQ L&, R);
+ // VQ L& operator*=(VQ L&, R);
+ // VQ L& operator/=(VQ L&, R);
+ // VQ L& operator+=(VQ L&, R);
+ // VQ L& operator-=(VQ L&, R);
+ void addAssignmentArithmeticOverloads(bool isEqualOp) {
+ if (!HasArithmeticOrEnumeralCandidateType)
+ return;
+
+ for (unsigned Left = 0; Left < NumArithmeticTypes; ++Left) {
+ for (unsigned Right = FirstPromotedArithmeticType;
+ Right < LastPromotedArithmeticType; ++Right) {
+ QualType ParamTypes[2];
+ ParamTypes[1] = getArithmeticType(Right);
+
+ // Add this built-in operator as a candidate (VQ is empty).
+ ParamTypes[0] =
+ S.Context.getLValueReferenceType(getArithmeticType(Left));
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet,
+ /*IsAssigmentOperator=*/isEqualOp);
+
+ // Add this built-in operator as a candidate (VQ is 'volatile').
+ if (VisibleTypeConversionsQuals.hasVolatile()) {
+ ParamTypes[0] =
+ S.Context.getVolatileType(getArithmeticType(Left));
+ ParamTypes[0] = S.Context.getLValueReferenceType(ParamTypes[0]);
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet,
+ /*IsAssigmentOperator=*/isEqualOp);
+ }
+ }
+ }
+
+ // Extension: Add the binary operators =, +=, -=, *=, /= for vector types.
+ for (BuiltinCandidateTypeSet::iterator
+ Vec1 = CandidateTypes[0].vector_begin(),
+ Vec1End = CandidateTypes[0].vector_end();
+ Vec1 != Vec1End; ++Vec1) {
+ for (BuiltinCandidateTypeSet::iterator
+ Vec2 = CandidateTypes[1].vector_begin(),
+ Vec2End = CandidateTypes[1].vector_end();
+ Vec2 != Vec2End; ++Vec2) {
+ QualType ParamTypes[2];
+ ParamTypes[1] = *Vec2;
+ // Add this built-in operator as a candidate (VQ is empty).
+ ParamTypes[0] = S.Context.getLValueReferenceType(*Vec1);
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet,
+ /*IsAssigmentOperator=*/isEqualOp);
+
+ // Add this built-in operator as a candidate (VQ is 'volatile').
+ if (VisibleTypeConversionsQuals.hasVolatile()) {
+ ParamTypes[0] = S.Context.getVolatileType(*Vec1);
+ ParamTypes[0] = S.Context.getLValueReferenceType(ParamTypes[0]);
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet,
+ /*IsAssigmentOperator=*/isEqualOp);
+ }
+ }
+ }
+ }
+
+ // C++ [over.built]p22:
+ //
+ // For every triple (L, VQ, R), where L is an integral type, VQ
+ // is either volatile or empty, and R is a promoted integral
+ // type, there exist candidate operator functions of the form
+ //
+ // VQ L& operator%=(VQ L&, R);
+ // VQ L& operator<<=(VQ L&, R);
+ // VQ L& operator>>=(VQ L&, R);
+ // VQ L& operator&=(VQ L&, R);
+ // VQ L& operator^=(VQ L&, R);
+ // VQ L& operator|=(VQ L&, R);
+ void addAssignmentIntegralOverloads() {
+ if (!HasArithmeticOrEnumeralCandidateType)
+ return;
+
+ for (unsigned Left = FirstIntegralType; Left < LastIntegralType; ++Left) {
+ for (unsigned Right = FirstPromotedIntegralType;
+ Right < LastPromotedIntegralType; ++Right) {
+ QualType ParamTypes[2];
+ ParamTypes[1] = getArithmeticType(Right);
+
+ // Add this built-in operator as a candidate (VQ is empty).
+ ParamTypes[0] =
+ S.Context.getLValueReferenceType(getArithmeticType(Left));
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet);
+ if (VisibleTypeConversionsQuals.hasVolatile()) {
+ // Add this built-in operator as a candidate (VQ is 'volatile').
+ ParamTypes[0] = getArithmeticType(Left);
+ ParamTypes[0] = S.Context.getVolatileType(ParamTypes[0]);
+ ParamTypes[0] = S.Context.getLValueReferenceType(ParamTypes[0]);
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet);
+ }
+ }
+ }
+ }
+
+ // C++ [over.operator]p23:
+ //
+ // There also exist candidate operator functions of the form
+ //
+ // bool operator!(bool);
+ // bool operator&&(bool, bool);
+ // bool operator||(bool, bool);
+ void addExclaimOverload() {
+ QualType ParamTy = S.Context.BoolTy;
+ S.AddBuiltinCandidate(ParamTy, &ParamTy, Args, CandidateSet,
+ /*IsAssignmentOperator=*/false,
+ /*NumContextualBoolArguments=*/1);
+ }
+ void addAmpAmpOrPipePipeOverload() {
+ QualType ParamTypes[2] = { S.Context.BoolTy, S.Context.BoolTy };
+ S.AddBuiltinCandidate(S.Context.BoolTy, ParamTypes, Args, CandidateSet,
+ /*IsAssignmentOperator=*/false,
+ /*NumContextualBoolArguments=*/2);
+ }
+
+ // C++ [over.built]p13:
+ //
+ // For every cv-qualified or cv-unqualified object type T there
+ // exist candidate operator functions of the form
+ //
+ // T* operator+(T*, ptrdiff_t); [ABOVE]
+ // T& operator[](T*, ptrdiff_t);
+ // T* operator-(T*, ptrdiff_t); [ABOVE]
+ // T* operator+(ptrdiff_t, T*); [ABOVE]
+ // T& operator[](ptrdiff_t, T*);
+ void addSubscriptOverloads() {
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[0].pointer_begin(),
+ PtrEnd = CandidateTypes[0].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ QualType ParamTypes[2] = { *Ptr, S.Context.getPointerDiffType() };
+ QualType PointeeType = (*Ptr)->getPointeeType();
+ if (!PointeeType->isObjectType())
+ continue;
+
+ QualType ResultTy = S.Context.getLValueReferenceType(PointeeType);
+
+ // T& operator[](T*, ptrdiff_t)
+ S.AddBuiltinCandidate(ResultTy, ParamTypes, Args, CandidateSet);
+ }
+
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[1].pointer_begin(),
+ PtrEnd = CandidateTypes[1].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ QualType ParamTypes[2] = { S.Context.getPointerDiffType(), *Ptr };
+ QualType PointeeType = (*Ptr)->getPointeeType();
+ if (!PointeeType->isObjectType())
+ continue;
+
+ QualType ResultTy = S.Context.getLValueReferenceType(PointeeType);
+
+ // T& operator[](ptrdiff_t, T*)
+ S.AddBuiltinCandidate(ResultTy, ParamTypes, Args, CandidateSet);
+ }
+ }
+
+ // C++ [over.built]p11:
+ // For every quintuple (C1, C2, T, CV1, CV2), where C2 is a class type,
+ // C1 is the same type as C2 or is a derived class of C2, T is an object
+ // type or a function type, and CV1 and CV2 are cv-qualifier-seqs,
+ // there exist candidate operator functions of the form
+ //
+ // CV12 T& operator->*(CV1 C1*, CV2 T C2::*);
+ //
+ // where CV12 is the union of CV1 and CV2.
+ void addArrowStarOverloads() {
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[0].pointer_begin(),
+ PtrEnd = CandidateTypes[0].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ QualType C1Ty = (*Ptr);
+ QualType C1;
+ QualifierCollector Q1;
+ C1 = QualType(Q1.strip(C1Ty->getPointeeType()), 0);
+ if (!isa<RecordType>(C1))
+ continue;
+ // heuristic to reduce number of builtin candidates in the set.
+ // Add volatile/restrict version only if there are conversions to a
+ // volatile/restrict type.
+ if (!VisibleTypeConversionsQuals.hasVolatile() && Q1.hasVolatile())
+ continue;
+ if (!VisibleTypeConversionsQuals.hasRestrict() && Q1.hasRestrict())
+ continue;
+ for (BuiltinCandidateTypeSet::iterator
+ MemPtr = CandidateTypes[1].member_pointer_begin(),
+ MemPtrEnd = CandidateTypes[1].member_pointer_end();
+ MemPtr != MemPtrEnd; ++MemPtr) {
+ const MemberPointerType *mptr = cast<MemberPointerType>(*MemPtr);
+ QualType C2 = QualType(mptr->getClass(), 0);
+ C2 = C2.getUnqualifiedType();
+ if (C1 != C2 && !S.IsDerivedFrom(CandidateSet.getLocation(), C1, C2))
+ break;
+ QualType ParamTypes[2] = { *Ptr, *MemPtr };
+ // build CV12 T&
+ QualType T = mptr->getPointeeType();
+ if (!VisibleTypeConversionsQuals.hasVolatile() &&
+ T.isVolatileQualified())
+ continue;
+ if (!VisibleTypeConversionsQuals.hasRestrict() &&
+ T.isRestrictQualified())
+ continue;
+ T = Q1.apply(S.Context, T);
+ QualType ResultTy = S.Context.getLValueReferenceType(T);
+ S.AddBuiltinCandidate(ResultTy, ParamTypes, Args, CandidateSet);
+ }
+ }
+ }
+
+ // Note that we don't consider the first argument, since it has been
+ // contextually converted to bool long ago. The candidates below are
+ // therefore added as binary.
+ //
+ // C++ [over.built]p25:
+ // For every type T, where T is a pointer, pointer-to-member, or scoped
+ // enumeration type, there exist candidate operator functions of the form
+ //
+ // T operator?(bool, T, T);
+ //
+ void addConditionalOperatorOverloads() {
+ /// Set of (canonical) types that we've already handled.
+ llvm::SmallPtrSet<QualType, 8> AddedTypes;
+
+ for (unsigned ArgIdx = 0; ArgIdx < 2; ++ArgIdx) {
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[ArgIdx].pointer_begin(),
+ PtrEnd = CandidateTypes[ArgIdx].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)).second)
+ continue;
+
+ QualType ParamTypes[2] = { *Ptr, *Ptr };
+ S.AddBuiltinCandidate(*Ptr, ParamTypes, Args, CandidateSet);
+ }
+
+ for (BuiltinCandidateTypeSet::iterator
+ MemPtr = CandidateTypes[ArgIdx].member_pointer_begin(),
+ MemPtrEnd = CandidateTypes[ArgIdx].member_pointer_end();
+ MemPtr != MemPtrEnd; ++MemPtr) {
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*MemPtr)).second)
+ continue;
+
+ QualType ParamTypes[2] = { *MemPtr, *MemPtr };
+ S.AddBuiltinCandidate(*MemPtr, ParamTypes, Args, CandidateSet);
+ }
+
+ if (S.getLangOpts().CPlusPlus11) {
+ for (BuiltinCandidateTypeSet::iterator
+ Enum = CandidateTypes[ArgIdx].enumeration_begin(),
+ EnumEnd = CandidateTypes[ArgIdx].enumeration_end();
+ Enum != EnumEnd; ++Enum) {
+ if (!(*Enum)->getAs<EnumType>()->getDecl()->isScoped())
+ continue;
+
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*Enum)).second)
+ continue;
+
+ QualType ParamTypes[2] = { *Enum, *Enum };
+ S.AddBuiltinCandidate(*Enum, ParamTypes, Args, CandidateSet);
+ }
+ }
+ }
+ }
+};
+
+} // end anonymous namespace
+
+/// AddBuiltinOperatorCandidates - Add the appropriate built-in
+/// operator overloads to the candidate set (C++ [over.built]), based
+/// on the operator @p Op and the arguments given. For example, if the
+/// operator is a binary '+', this routine might add "int
+/// operator+(int, int)" to cover integer addition.
+void Sema::AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
+ SourceLocation OpLoc,
+ ArrayRef<Expr *> Args,
+ OverloadCandidateSet &CandidateSet) {
+ // Find all of the types that the arguments can convert to, but only
+ // if the operator we're looking at has built-in operator candidates
+ // that make use of these types. Also record whether we encounter non-record
+ // candidate types or either arithmetic or enumeral candidate types.
+ Qualifiers VisibleTypeConversionsQuals;
+ VisibleTypeConversionsQuals.addConst();
+ for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx)
+ VisibleTypeConversionsQuals += CollectVRQualifiers(Context, Args[ArgIdx]);
+
+ bool HasNonRecordCandidateType = false;
+ bool HasArithmeticOrEnumeralCandidateType = false;
+ SmallVector<BuiltinCandidateTypeSet, 2> CandidateTypes;
+ for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) {
+ CandidateTypes.emplace_back(*this);
+ CandidateTypes[ArgIdx].AddTypesConvertedFrom(Args[ArgIdx]->getType(),
+ OpLoc,
+ true,
+ (Op == OO_Exclaim ||
+ Op == OO_AmpAmp ||
+ Op == OO_PipePipe),
+ VisibleTypeConversionsQuals);
+ HasNonRecordCandidateType = HasNonRecordCandidateType ||
+ CandidateTypes[ArgIdx].hasNonRecordTypes();
+ HasArithmeticOrEnumeralCandidateType =
+ HasArithmeticOrEnumeralCandidateType ||
+ CandidateTypes[ArgIdx].hasArithmeticOrEnumeralTypes();
+ }
+
+ // Exit early when no non-record types have been added to the candidate set
+ // for any of the arguments to the operator.
+ //
+ // We can't exit early for !, ||, or &&, since there we have always have
+ // 'bool' overloads.
+ if (!HasNonRecordCandidateType &&
+ !(Op == OO_Exclaim || Op == OO_AmpAmp || Op == OO_PipePipe))
+ return;
+
+ // Setup an object to manage the common state for building overloads.
+ BuiltinOperatorOverloadBuilder OpBuilder(*this, Args,
+ VisibleTypeConversionsQuals,
+ HasArithmeticOrEnumeralCandidateType,
+ CandidateTypes, CandidateSet);
+
+ // Dispatch over the operation to add in only those overloads which apply.
+ switch (Op) {
+ case OO_None:
+ case NUM_OVERLOADED_OPERATORS:
+ llvm_unreachable("Expected an overloaded operator");
+
+ case OO_New:
+ case OO_Delete:
+ case OO_Array_New:
+ case OO_Array_Delete:
+ case OO_Call:
+ llvm_unreachable(
+ "Special operators don't use AddBuiltinOperatorCandidates");
+
+ case OO_Comma:
+ case OO_Arrow:
+ case OO_Coawait:
+ // C++ [over.match.oper]p3:
+ // -- For the operator ',', the unary operator '&', the
+ // operator '->', or the operator 'co_await', the
+ // built-in candidates set is empty.
+ break;
+
+ case OO_Plus: // '+' is either unary or binary
+ if (Args.size() == 1)
+ OpBuilder.addUnaryPlusPointerOverloads();
+ // Fall through.
+
+ case OO_Minus: // '-' is either unary or binary
+ if (Args.size() == 1) {
+ OpBuilder.addUnaryPlusOrMinusArithmeticOverloads();
+ } else {
+ OpBuilder.addBinaryPlusOrMinusPointerOverloads(Op);
+ OpBuilder.addGenericBinaryArithmeticOverloads(/*isComparison=*/false);
+ }
+ break;
+
+ case OO_Star: // '*' is either unary or binary
+ if (Args.size() == 1)
+ OpBuilder.addUnaryStarPointerOverloads();
+ else
+ OpBuilder.addGenericBinaryArithmeticOverloads(/*isComparison=*/false);
+ break;
+
+ case OO_Slash:
+ OpBuilder.addGenericBinaryArithmeticOverloads(/*isComparison=*/false);
+ break;
+
+ case OO_PlusPlus:
+ case OO_MinusMinus:
+ OpBuilder.addPlusPlusMinusMinusArithmeticOverloads(Op);
+ OpBuilder.addPlusPlusMinusMinusPointerOverloads();
+ break;
+
+ case OO_EqualEqual:
+ case OO_ExclaimEqual:
+ OpBuilder.addEqualEqualOrNotEqualMemberPointerOverloads();
+ // Fall through.
+
+ case OO_Less:
+ case OO_Greater:
+ case OO_LessEqual:
+ case OO_GreaterEqual:
+ OpBuilder.addRelationalPointerOrEnumeralOverloads();
+ OpBuilder.addGenericBinaryArithmeticOverloads(/*isComparison=*/true);
+ break;
+
+ case OO_Percent:
+ case OO_Caret:
+ case OO_Pipe:
+ case OO_LessLess:
+ case OO_GreaterGreater:
+ OpBuilder.addBinaryBitwiseArithmeticOverloads(Op);
+ break;
+
+ case OO_Amp: // '&' is either unary or binary
+ if (Args.size() == 1)
+ // C++ [over.match.oper]p3:
+ // -- For the operator ',', the unary operator '&', or the
+ // operator '->', the built-in candidates set is empty.
+ break;
+
+ OpBuilder.addBinaryBitwiseArithmeticOverloads(Op);
+ break;
+
+ case OO_Tilde:
+ OpBuilder.addUnaryTildePromotedIntegralOverloads();
+ break;
+
+ case OO_Equal:
+ OpBuilder.addAssignmentMemberPointerOrEnumeralOverloads();
+ // Fall through.
+
+ case OO_PlusEqual:
+ case OO_MinusEqual:
+ OpBuilder.addAssignmentPointerOverloads(Op == OO_Equal);
+ // Fall through.
+
+ case OO_StarEqual:
+ case OO_SlashEqual:
+ OpBuilder.addAssignmentArithmeticOverloads(Op == OO_Equal);
+ break;
+
+ case OO_PercentEqual:
+ case OO_LessLessEqual:
+ case OO_GreaterGreaterEqual:
+ case OO_AmpEqual:
+ case OO_CaretEqual:
+ case OO_PipeEqual:
+ OpBuilder.addAssignmentIntegralOverloads();
+ break;
+
+ case OO_Exclaim:
+ OpBuilder.addExclaimOverload();
+ break;
+
+ case OO_AmpAmp:
+ case OO_PipePipe:
+ OpBuilder.addAmpAmpOrPipePipeOverload();
+ break;
+
+ case OO_Subscript:
+ OpBuilder.addSubscriptOverloads();
+ break;
+
+ case OO_ArrowStar:
+ OpBuilder.addArrowStarOverloads();
+ break;
+
+ case OO_Conditional:
+ OpBuilder.addConditionalOperatorOverloads();
+ OpBuilder.addGenericBinaryArithmeticOverloads(/*isComparison=*/false);
+ break;
+ }
+}
+
+/// \brief Add function candidates found via argument-dependent lookup
+/// to the set of overloading candidates.
+///
+/// This routine performs argument-dependent name lookup based on the
+/// given function name (which may also be an operator name) and adds
+/// all of the overload candidates found by ADL to the overload
+/// candidate set (C++ [basic.lookup.argdep]).
+void
+Sema::AddArgumentDependentLookupCandidates(DeclarationName Name,
+ SourceLocation Loc,
+ ArrayRef<Expr *> Args,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ OverloadCandidateSet& CandidateSet,
+ bool PartialOverloading) {
+ ADLResult Fns;
+
+ // FIXME: This approach for uniquing ADL results (and removing
+ // redundant candidates from the set) relies on pointer-equality,
+ // which means we need to key off the canonical decl. However,
+ // always going back to the canonical decl might not get us the
+ // right set of default arguments. What default arguments are
+ // we supposed to consider on ADL candidates, anyway?
+
+ // FIXME: Pass in the explicit template arguments?
+ ArgumentDependentLookup(Name, Loc, Args, Fns);
+
+ // Erase all of the candidates we already knew about.
+ for (OverloadCandidateSet::iterator Cand = CandidateSet.begin(),
+ CandEnd = CandidateSet.end();
+ Cand != CandEnd; ++Cand)
+ if (Cand->Function) {
+ Fns.erase(Cand->Function);
+ if (FunctionTemplateDecl *FunTmpl = Cand->Function->getPrimaryTemplate())
+ Fns.erase(FunTmpl);
+ }
+
+ // For each of the ADL candidates we found, add it to the overload
+ // set.
+ for (ADLResult::iterator I = Fns.begin(), E = Fns.end(); I != E; ++I) {
+ DeclAccessPair FoundDecl = DeclAccessPair::make(*I, AS_none);
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
+ if (ExplicitTemplateArgs)
+ continue;
+
+ AddOverloadCandidate(FD, FoundDecl, Args, CandidateSet, false,
+ PartialOverloading);
+ } else
+ AddTemplateOverloadCandidate(cast<FunctionTemplateDecl>(*I),
+ FoundDecl, ExplicitTemplateArgs,
+ Args, CandidateSet, PartialOverloading);
+ }
+}
+
+// Determines whether Cand1 is "better" in terms of its enable_if attrs than
+// Cand2 for overloading. This function assumes that all of the enable_if attrs
+// on Cand1 and Cand2 have conditions that evaluate to true.
+//
+// Cand1's set of enable_if attributes are said to be "better" than Cand2's iff
+// Cand1's first N enable_if attributes have precisely the same conditions as
+// Cand2's first N enable_if attributes (where N = the number of enable_if
+// attributes on Cand2), and Cand1 has more than N enable_if attributes.
+static bool hasBetterEnableIfAttrs(Sema &S, const FunctionDecl *Cand1,
+ const FunctionDecl *Cand2) {
+
+ // FIXME: The next several lines are just
+ // specific_attr_iterator<EnableIfAttr> but going in declaration order,
+ // instead of reverse order which is how they're stored in the AST.
+ auto Cand1Attrs = getOrderedEnableIfAttrs(Cand1);
+ auto Cand2Attrs = getOrderedEnableIfAttrs(Cand2);
+
+ // Candidate 1 is better if it has strictly more attributes and
+ // the common sequence is identical.
+ if (Cand1Attrs.size() <= Cand2Attrs.size())
+ return false;
+
+ auto Cand1I = Cand1Attrs.begin();
+ llvm::FoldingSetNodeID Cand1ID, Cand2ID;
+ for (auto &Cand2A : Cand2Attrs) {
+ Cand1ID.clear();
+ Cand2ID.clear();
+
+ auto &Cand1A = *Cand1I++;
+ Cand1A->getCond()->Profile(Cand1ID, S.getASTContext(), true);
+ Cand2A->getCond()->Profile(Cand2ID, S.getASTContext(), true);
+ if (Cand1ID != Cand2ID)
+ return false;
+ }
+
+ return true;
+}
+
+/// isBetterOverloadCandidate - Determines whether the first overload
+/// candidate is a better candidate than the second (C++ 13.3.3p1).
+bool clang::isBetterOverloadCandidate(Sema &S, const OverloadCandidate &Cand1,
+ const OverloadCandidate &Cand2,
+ SourceLocation Loc,
+ bool UserDefinedConversion) {
+ // Define viable functions to be better candidates than non-viable
+ // functions.
+ if (!Cand2.Viable)
+ return Cand1.Viable;
+ else if (!Cand1.Viable)
+ return false;
+
+ // C++ [over.match.best]p1:
+ //
+ // -- if F is a static member function, ICS1(F) is defined such
+ // that ICS1(F) is neither better nor worse than ICS1(G) for
+ // any function G, and, symmetrically, ICS1(G) is neither
+ // better nor worse than ICS1(F).
+ unsigned StartArg = 0;
+ if (Cand1.IgnoreObjectArgument || Cand2.IgnoreObjectArgument)
+ StartArg = 1;
+
+ // C++ [over.match.best]p1:
+ // A viable function F1 is defined to be a better function than another
+ // viable function F2 if for all arguments i, ICSi(F1) is not a worse
+ // conversion sequence than ICSi(F2), and then...
+ unsigned NumArgs = Cand1.NumConversions;
+ assert(Cand2.NumConversions == NumArgs && "Overload candidate mismatch");
+ bool HasBetterConversion = false;
+ for (unsigned ArgIdx = StartArg; ArgIdx < NumArgs; ++ArgIdx) {
+ switch (CompareImplicitConversionSequences(S, Loc,
+ Cand1.Conversions[ArgIdx],
+ Cand2.Conversions[ArgIdx])) {
+ case ImplicitConversionSequence::Better:
+ // Cand1 has a better conversion sequence.
+ HasBetterConversion = true;
+ break;
+
+ case ImplicitConversionSequence::Worse:
+ // Cand1 can't be better than Cand2.
+ return false;
+
+ case ImplicitConversionSequence::Indistinguishable:
+ // Do nothing.
+ break;
+ }
+ }
+
+ // -- for some argument j, ICSj(F1) is a better conversion sequence than
+ // ICSj(F2), or, if not that,
+ if (HasBetterConversion)
+ return true;
+
+ // -- the context is an initialization by user-defined conversion
+ // (see 8.5, 13.3.1.5) and the standard conversion sequence
+ // from the return type of F1 to the destination type (i.e.,
+ // the type of the entity being initialized) is a better
+ // conversion sequence than the standard conversion sequence
+ // from the return type of F2 to the destination type.
+ if (UserDefinedConversion && Cand1.Function && Cand2.Function &&
+ isa<CXXConversionDecl>(Cand1.Function) &&
+ isa<CXXConversionDecl>(Cand2.Function)) {
+ // First check whether we prefer one of the conversion functions over the
+ // other. This only distinguishes the results in non-standard, extension
+ // cases such as the conversion from a lambda closure type to a function
+ // pointer or block.
+ ImplicitConversionSequence::CompareKind Result =
+ compareConversionFunctions(S, Cand1.Function, Cand2.Function);
+ if (Result == ImplicitConversionSequence::Indistinguishable)
+ Result = CompareStandardConversionSequences(S, Loc,
+ Cand1.FinalConversion,
+ Cand2.FinalConversion);
+
+ if (Result != ImplicitConversionSequence::Indistinguishable)
+ return Result == ImplicitConversionSequence::Better;
+
+ // FIXME: Compare kind of reference binding if conversion functions
+ // convert to a reference type used in direct reference binding, per
+ // C++14 [over.match.best]p1 section 2 bullet 3.
+ }
+
+ // -- F1 is a non-template function and F2 is a function template
+ // specialization, or, if not that,
+ bool Cand1IsSpecialization = Cand1.Function &&
+ Cand1.Function->getPrimaryTemplate();
+ bool Cand2IsSpecialization = Cand2.Function &&
+ Cand2.Function->getPrimaryTemplate();
+ if (Cand1IsSpecialization != Cand2IsSpecialization)
+ return Cand2IsSpecialization;
+
+ // -- F1 and F2 are function template specializations, and the function
+ // template for F1 is more specialized than the template for F2
+ // according to the partial ordering rules described in 14.5.5.2, or,
+ // if not that,
+ if (Cand1IsSpecialization && Cand2IsSpecialization) {
+ if (FunctionTemplateDecl *BetterTemplate
+ = S.getMoreSpecializedTemplate(Cand1.Function->getPrimaryTemplate(),
+ Cand2.Function->getPrimaryTemplate(),
+ Loc,
+ isa<CXXConversionDecl>(Cand1.Function)? TPOC_Conversion
+ : TPOC_Call,
+ Cand1.ExplicitCallArguments,
+ Cand2.ExplicitCallArguments))
+ return BetterTemplate == Cand1.Function->getPrimaryTemplate();
+ }
+
+ // Check for enable_if value-based overload resolution.
+ if (Cand1.Function && Cand2.Function &&
+ (Cand1.Function->hasAttr<EnableIfAttr>() ||
+ Cand2.Function->hasAttr<EnableIfAttr>()))
+ return hasBetterEnableIfAttrs(S, Cand1.Function, Cand2.Function);
+
+ if (S.getLangOpts().CUDA && S.getLangOpts().CUDATargetOverloads &&
+ Cand1.Function && Cand2.Function) {
+ FunctionDecl *Caller = dyn_cast<FunctionDecl>(S.CurContext);
+ return S.IdentifyCUDAPreference(Caller, Cand1.Function) >
+ S.IdentifyCUDAPreference(Caller, Cand2.Function);
+ }
+
+ bool HasPS1 = Cand1.Function != nullptr &&
+ functionHasPassObjectSizeParams(Cand1.Function);
+ bool HasPS2 = Cand2.Function != nullptr &&
+ functionHasPassObjectSizeParams(Cand2.Function);
+ return HasPS1 != HasPS2 && HasPS1;
+}
+
+/// Determine whether two declarations are "equivalent" for the purposes of
+/// name lookup and overload resolution. This applies when the same internal/no
+/// linkage entity is defined by two modules (probably by textually including
+/// the same header). In such a case, we don't consider the declarations to
+/// declare the same entity, but we also don't want lookups with both
+/// declarations visible to be ambiguous in some cases (this happens when using
+/// a modularized libstdc++).
+bool Sema::isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
+ const NamedDecl *B) {
+ auto *VA = dyn_cast_or_null<ValueDecl>(A);
+ auto *VB = dyn_cast_or_null<ValueDecl>(B);
+ if (!VA || !VB)
+ return false;
+
+ // The declarations must be declaring the same name as an internal linkage
+ // entity in different modules.
+ if (!VA->getDeclContext()->getRedeclContext()->Equals(
+ VB->getDeclContext()->getRedeclContext()) ||
+ getOwningModule(const_cast<ValueDecl *>(VA)) ==
+ getOwningModule(const_cast<ValueDecl *>(VB)) ||
+ VA->isExternallyVisible() || VB->isExternallyVisible())
+ return false;
+
+ // Check that the declarations appear to be equivalent.
+ //
+ // FIXME: Checking the type isn't really enough to resolve the ambiguity.
+ // For constants and functions, we should check the initializer or body is
+ // the same. For non-constant variables, we shouldn't allow it at all.
+ if (Context.hasSameType(VA->getType(), VB->getType()))
+ return true;
+
+ // Enum constants within unnamed enumerations will have different types, but
+ // may still be similar enough to be interchangeable for our purposes.
+ if (auto *EA = dyn_cast<EnumConstantDecl>(VA)) {
+ if (auto *EB = dyn_cast<EnumConstantDecl>(VB)) {
+ // Only handle anonymous enums. If the enumerations were named and
+ // equivalent, they would have been merged to the same type.
+ auto *EnumA = cast<EnumDecl>(EA->getDeclContext());
+ auto *EnumB = cast<EnumDecl>(EB->getDeclContext());
+ if (EnumA->hasNameForLinkage() || EnumB->hasNameForLinkage() ||
+ !Context.hasSameType(EnumA->getIntegerType(),
+ EnumB->getIntegerType()))
+ return false;
+ // Allow this only if the value is the same for both enumerators.
+ return llvm::APSInt::isSameValue(EA->getInitVal(), EB->getInitVal());
+ }
+ }
+
+ // Nothing else is sufficiently similar.
+ return false;
+}
+
+void Sema::diagnoseEquivalentInternalLinkageDeclarations(
+ SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv) {
+ Diag(Loc, diag::ext_equivalent_internal_linkage_decl_in_modules) << D;
+
+ Module *M = getOwningModule(const_cast<NamedDecl*>(D));
+ Diag(D->getLocation(), diag::note_equivalent_internal_linkage_decl)
+ << !M << (M ? M->getFullModuleName() : "");
+
+ for (auto *E : Equiv) {
+ Module *M = getOwningModule(const_cast<NamedDecl*>(E));
+ Diag(E->getLocation(), diag::note_equivalent_internal_linkage_decl)
+ << !M << (M ? M->getFullModuleName() : "");
+ }
+}
+
+/// \brief Computes the best viable function (C++ 13.3.3)
+/// within an overload candidate set.
+///
+/// \param Loc The location of the function name (or operator symbol) for
+/// which overload resolution occurs.
+///
+/// \param Best If overload resolution was successful or found a deleted
+/// function, \p Best points to the candidate function found.
+///
+/// \returns The result of overload resolution.
+OverloadingResult
+OverloadCandidateSet::BestViableFunction(Sema &S, SourceLocation Loc,
+ iterator &Best,
+ bool UserDefinedConversion) {
+ // Find the best viable function.
+ Best = end();
+ for (iterator Cand = begin(); Cand != end(); ++Cand) {
+ if (Cand->Viable)
+ if (Best == end() || isBetterOverloadCandidate(S, *Cand, *Best, Loc,
+ UserDefinedConversion))
+ Best = Cand;
+ }
+
+ // If we didn't find any viable functions, abort.
+ if (Best == end())
+ return OR_No_Viable_Function;
+
+ llvm::SmallVector<const NamedDecl *, 4> EquivalentCands;
+
+ // Make sure that this function is better than every other viable
+ // function. If not, we have an ambiguity.
+ for (iterator Cand = begin(); Cand != end(); ++Cand) {
+ if (Cand->Viable &&
+ Cand != Best &&
+ !isBetterOverloadCandidate(S, *Best, *Cand, Loc,
+ UserDefinedConversion)) {
+ if (S.isEquivalentInternalLinkageDeclaration(Best->Function,
+ Cand->Function)) {
+ EquivalentCands.push_back(Cand->Function);
+ continue;
+ }
+
+ Best = end();
+ return OR_Ambiguous;
+ }
+ }
+
+ // Best is the best viable function.
+ if (Best->Function &&
+ (Best->Function->isDeleted() ||
+ S.isFunctionConsideredUnavailable(Best->Function)))
+ return OR_Deleted;
+
+ if (!EquivalentCands.empty())
+ S.diagnoseEquivalentInternalLinkageDeclarations(Loc, Best->Function,
+ EquivalentCands);
+
+ return OR_Success;
+}
+
+namespace {
+
+enum OverloadCandidateKind {
+ oc_function,
+ oc_method,
+ oc_constructor,
+ oc_function_template,
+ oc_method_template,
+ oc_constructor_template,
+ oc_implicit_default_constructor,
+ oc_implicit_copy_constructor,
+ oc_implicit_move_constructor,
+ oc_implicit_copy_assignment,
+ oc_implicit_move_assignment,
+ oc_implicit_inherited_constructor
+};
+
+OverloadCandidateKind ClassifyOverloadCandidate(Sema &S,
+ FunctionDecl *Fn,
+ std::string &Description) {
+ bool isTemplate = false;
+
+ if (FunctionTemplateDecl *FunTmpl = Fn->getPrimaryTemplate()) {
+ isTemplate = true;
+ Description = S.getTemplateArgumentBindingsText(
+ FunTmpl->getTemplateParameters(), *Fn->getTemplateSpecializationArgs());
+ }
+
+ if (CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(Fn)) {
+ if (!Ctor->isImplicit())
+ return isTemplate ? oc_constructor_template : oc_constructor;
+
+ if (Ctor->getInheritedConstructor())
+ return oc_implicit_inherited_constructor;
+
+ if (Ctor->isDefaultConstructor())
+ return oc_implicit_default_constructor;
+
+ if (Ctor->isMoveConstructor())
+ return oc_implicit_move_constructor;
+
+ assert(Ctor->isCopyConstructor() &&
+ "unexpected sort of implicit constructor");
+ return oc_implicit_copy_constructor;
+ }
+
+ if (CXXMethodDecl *Meth = dyn_cast<CXXMethodDecl>(Fn)) {
+ // This actually gets spelled 'candidate function' for now, but
+ // it doesn't hurt to split it out.
+ if (!Meth->isImplicit())
+ return isTemplate ? oc_method_template : oc_method;
+
+ if (Meth->isMoveAssignmentOperator())
+ return oc_implicit_move_assignment;
+
+ if (Meth->isCopyAssignmentOperator())
+ return oc_implicit_copy_assignment;
+
+ assert(isa<CXXConversionDecl>(Meth) && "expected conversion");
+ return oc_method;
+ }
+
+ return isTemplate ? oc_function_template : oc_function;
+}
+
+void MaybeEmitInheritedConstructorNote(Sema &S, Decl *Fn) {
+ const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(Fn);
+ if (!Ctor) return;
+
+ Ctor = Ctor->getInheritedConstructor();
+ if (!Ctor) return;
+
+ S.Diag(Ctor->getLocation(), diag::note_ovl_candidate_inherited_constructor);
+}
+
+} // end anonymous namespace
+
+static bool isFunctionAlwaysEnabled(const ASTContext &Ctx,
+ const FunctionDecl *FD) {
+ for (auto *EnableIf : FD->specific_attrs<EnableIfAttr>()) {
+ bool AlwaysTrue;
+ if (!EnableIf->getCond()->EvaluateAsBooleanCondition(AlwaysTrue, Ctx))
+ return false;
+ if (!AlwaysTrue)
+ return false;
+ }
+ return true;
+}
+
+/// \brief Returns true if we can take the address of the function.
+///
+/// \param Complain - If true, we'll emit a diagnostic
+/// \param InOverloadResolution - For the purposes of emitting a diagnostic, are
+/// we in overload resolution?
+/// \param Loc - The location of the statement we're complaining about. Ignored
+/// if we're not complaining, or if we're in overload resolution.
+static bool checkAddressOfFunctionIsAvailable(Sema &S, const FunctionDecl *FD,
+ bool Complain,
+ bool InOverloadResolution,
+ SourceLocation Loc) {
+ if (!isFunctionAlwaysEnabled(S.Context, FD)) {
+ if (Complain) {
+ if (InOverloadResolution)
+ S.Diag(FD->getLocStart(),
+ diag::note_addrof_ovl_candidate_disabled_by_enable_if_attr);
+ else
+ S.Diag(Loc, diag::err_addrof_function_disabled_by_enable_if_attr) << FD;
+ }
+ return false;
+ }
+
+ auto I = std::find_if(FD->param_begin(), FD->param_end(),
+ std::mem_fn(&ParmVarDecl::hasAttr<PassObjectSizeAttr>));
+ if (I == FD->param_end())
+ return true;
+
+ if (Complain) {
+ // Add one to ParamNo because it's user-facing
+ unsigned ParamNo = std::distance(FD->param_begin(), I) + 1;
+ if (InOverloadResolution)
+ S.Diag(FD->getLocation(),
+ diag::note_ovl_candidate_has_pass_object_size_params)
+ << ParamNo;
+ else
+ S.Diag(Loc, diag::err_address_of_function_with_pass_object_size_params)
+ << FD << ParamNo;
+ }
+ return false;
+}
+
+static bool checkAddressOfCandidateIsAvailable(Sema &S,
+ const FunctionDecl *FD) {
+ return checkAddressOfFunctionIsAvailable(S, FD, /*Complain=*/true,
+ /*InOverloadResolution=*/true,
+ /*Loc=*/SourceLocation());
+}
+
+bool Sema::checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
+ bool Complain,
+ SourceLocation Loc) {
+ return ::checkAddressOfFunctionIsAvailable(*this, Function, Complain,
+ /*InOverloadResolution=*/false,
+ Loc);
+}
+
+// Notes the location of an overload candidate.
+void Sema::NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType,
+ bool TakingAddress) {
+ if (TakingAddress && !checkAddressOfCandidateIsAvailable(*this, Fn))
+ return;
+
+ std::string FnDesc;
+ OverloadCandidateKind K = ClassifyOverloadCandidate(*this, Fn, FnDesc);
+ PartialDiagnostic PD = PDiag(diag::note_ovl_candidate)
+ << (unsigned) K << FnDesc;
+
+ HandleFunctionTypeMismatch(PD, Fn->getType(), DestType);
+ Diag(Fn->getLocation(), PD);
+ MaybeEmitInheritedConstructorNote(*this, Fn);
+}
+
+// Notes the location of all overload candidates designated through
+// OverloadedExpr
+void Sema::NoteAllOverloadCandidates(Expr *OverloadedExpr, QualType DestType,
+ bool TakingAddress) {
+ assert(OverloadedExpr->getType() == Context.OverloadTy);
+
+ OverloadExpr::FindResult Ovl = OverloadExpr::find(OverloadedExpr);
+ OverloadExpr *OvlExpr = Ovl.Expression;
+
+ for (UnresolvedSetIterator I = OvlExpr->decls_begin(),
+ IEnd = OvlExpr->decls_end();
+ I != IEnd; ++I) {
+ if (FunctionTemplateDecl *FunTmpl =
+ dyn_cast<FunctionTemplateDecl>((*I)->getUnderlyingDecl()) ) {
+ NoteOverloadCandidate(FunTmpl->getTemplatedDecl(), DestType,
+ TakingAddress);
+ } else if (FunctionDecl *Fun
+ = dyn_cast<FunctionDecl>((*I)->getUnderlyingDecl()) ) {
+ NoteOverloadCandidate(Fun, DestType, TakingAddress);
+ }
+ }
+}
+
+/// Diagnoses an ambiguous conversion. The partial diagnostic is the
+/// "lead" diagnostic; it will be given two arguments, the source and
+/// target types of the conversion.
+void ImplicitConversionSequence::DiagnoseAmbiguousConversion(
+ Sema &S,
+ SourceLocation CaretLoc,
+ const PartialDiagnostic &PDiag) const {
+ S.Diag(CaretLoc, PDiag)
+ << Ambiguous.getFromType() << Ambiguous.getToType();
+ // FIXME: The note limiting machinery is borrowed from
+ // OverloadCandidateSet::NoteCandidates; there's an opportunity for
+ // refactoring here.
+ const OverloadsShown ShowOverloads = S.Diags.getShowOverloads();
+ unsigned CandsShown = 0;
+ AmbiguousConversionSequence::const_iterator I, E;
+ for (I = Ambiguous.begin(), E = Ambiguous.end(); I != E; ++I) {
+ if (CandsShown >= 4 && ShowOverloads == Ovl_Best)
+ break;
+ ++CandsShown;
+ S.NoteOverloadCandidate(*I);
+ }
+ if (I != E)
+ S.Diag(SourceLocation(), diag::note_ovl_too_many_candidates) << int(E - I);
+}
+
+static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
+ unsigned I, bool TakingCandidateAddress) {
+ const ImplicitConversionSequence &Conv = Cand->Conversions[I];
+ assert(Conv.isBad());
+ assert(Cand->Function && "for now, candidate must be a function");
+ FunctionDecl *Fn = Cand->Function;
+
+ // There's a conversion slot for the object argument if this is a
+ // non-constructor method. Note that 'I' corresponds the
+ // conversion-slot index.
+ bool isObjectArgument = false;
+ if (isa<CXXMethodDecl>(Fn) && !isa<CXXConstructorDecl>(Fn)) {
+ if (I == 0)
+ isObjectArgument = true;
+ else
+ I--;
+ }
+
+ std::string FnDesc;
+ OverloadCandidateKind FnKind = ClassifyOverloadCandidate(S, Fn, FnDesc);
+
+ Expr *FromExpr = Conv.Bad.FromExpr;
+ QualType FromTy = Conv.Bad.getFromType();
+ QualType ToTy = Conv.Bad.getToType();
+
+ if (FromTy == S.Context.OverloadTy) {
+ assert(FromExpr && "overload set argument came from implicit argument?");
+ Expr *E = FromExpr->IgnoreParens();
+ if (isa<UnaryOperator>(E))
+ E = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens();
+ DeclarationName Name = cast<OverloadExpr>(E)->getName();
+
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_overload)
+ << (unsigned) FnKind << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
+ << ToTy << Name << I+1;
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+ }
+
+ // Do some hand-waving analysis to see if the non-viability is due
+ // to a qualifier mismatch.
+ CanQualType CFromTy = S.Context.getCanonicalType(FromTy);
+ CanQualType CToTy = S.Context.getCanonicalType(ToTy);
+ if (CanQual<ReferenceType> RT = CToTy->getAs<ReferenceType>())
+ CToTy = RT->getPointeeType();
+ else {
+ // TODO: detect and diagnose the full richness of const mismatches.
+ if (CanQual<PointerType> FromPT = CFromTy->getAs<PointerType>())
+ if (CanQual<PointerType> ToPT = CToTy->getAs<PointerType>())
+ CFromTy = FromPT->getPointeeType(), CToTy = ToPT->getPointeeType();
+ }
+
+ if (CToTy.getUnqualifiedType() == CFromTy.getUnqualifiedType() &&
+ !CToTy.isAtLeastAsQualifiedAs(CFromTy)) {
+ Qualifiers FromQs = CFromTy.getQualifiers();
+ Qualifiers ToQs = CToTy.getQualifiers();
+
+ if (FromQs.getAddressSpace() != ToQs.getAddressSpace()) {
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_addrspace)
+ << (unsigned) FnKind << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
+ << FromTy
+ << FromQs.getAddressSpace() << ToQs.getAddressSpace()
+ << (unsigned) isObjectArgument << I+1;
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+ }
+
+ if (FromQs.getObjCLifetime() != ToQs.getObjCLifetime()) {
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_ownership)
+ << (unsigned) FnKind << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
+ << FromTy
+ << FromQs.getObjCLifetime() << ToQs.getObjCLifetime()
+ << (unsigned) isObjectArgument << I+1;
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+ }
+
+ if (FromQs.getObjCGCAttr() != ToQs.getObjCGCAttr()) {
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_gc)
+ << (unsigned) FnKind << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
+ << FromTy
+ << FromQs.getObjCGCAttr() << ToQs.getObjCGCAttr()
+ << (unsigned) isObjectArgument << I+1;
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+ }
+
+ unsigned CVR = FromQs.getCVRQualifiers() & ~ToQs.getCVRQualifiers();
+ assert(CVR && "unexpected qualifiers mismatch");
+
+ if (isObjectArgument) {
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_cvr_this)
+ << (unsigned) FnKind << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
+ << FromTy << (CVR - 1);
+ } else {
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_cvr)
+ << (unsigned) FnKind << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
+ << FromTy << (CVR - 1) << I+1;
+ }
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+ }
+
+ // Special diagnostic for failure to convert an initializer list, since
+ // telling the user that it has type void is not useful.
+ if (FromExpr && isa<InitListExpr>(FromExpr)) {
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_list_argument)
+ << (unsigned) FnKind << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
+ << FromTy << ToTy << (unsigned) isObjectArgument << I+1;
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+ }
+
+ // Diagnose references or pointers to incomplete types differently,
+ // since it's far from impossible that the incompleteness triggered
+ // the failure.
+ QualType TempFromTy = FromTy.getNonReferenceType();
+ if (const PointerType *PTy = TempFromTy->getAs<PointerType>())
+ TempFromTy = PTy->getPointeeType();
+ if (TempFromTy->isIncompleteType()) {
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_conv_incomplete)
+ << (unsigned) FnKind << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
+ << FromTy << ToTy << (unsigned) isObjectArgument << I+1;
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+ }
+
+ // Diagnose base -> derived pointer conversions.
+ unsigned BaseToDerivedConversion = 0;
+ if (const PointerType *FromPtrTy = FromTy->getAs<PointerType>()) {
+ if (const PointerType *ToPtrTy = ToTy->getAs<PointerType>()) {
+ if (ToPtrTy->getPointeeType().isAtLeastAsQualifiedAs(
+ FromPtrTy->getPointeeType()) &&
+ !FromPtrTy->getPointeeType()->isIncompleteType() &&
+ !ToPtrTy->getPointeeType()->isIncompleteType() &&
+ S.IsDerivedFrom(SourceLocation(), ToPtrTy->getPointeeType(),
+ FromPtrTy->getPointeeType()))
+ BaseToDerivedConversion = 1;
+ }
+ } else if (const ObjCObjectPointerType *FromPtrTy
+ = FromTy->getAs<ObjCObjectPointerType>()) {
+ if (const ObjCObjectPointerType *ToPtrTy
+ = ToTy->getAs<ObjCObjectPointerType>())
+ if (const ObjCInterfaceDecl *FromIface = FromPtrTy->getInterfaceDecl())
+ if (const ObjCInterfaceDecl *ToIface = ToPtrTy->getInterfaceDecl())
+ if (ToPtrTy->getPointeeType().isAtLeastAsQualifiedAs(
+ FromPtrTy->getPointeeType()) &&
+ FromIface->isSuperClassOf(ToIface))
+ BaseToDerivedConversion = 2;
+ } else if (const ReferenceType *ToRefTy = ToTy->getAs<ReferenceType>()) {
+ if (ToRefTy->getPointeeType().isAtLeastAsQualifiedAs(FromTy) &&
+ !FromTy->isIncompleteType() &&
+ !ToRefTy->getPointeeType()->isIncompleteType() &&
+ S.IsDerivedFrom(SourceLocation(), ToRefTy->getPointeeType(), FromTy)) {
+ BaseToDerivedConversion = 3;
+ } else if (ToTy->isLValueReferenceType() && !FromExpr->isLValue() &&
+ ToTy.getNonReferenceType().getCanonicalType() ==
+ FromTy.getNonReferenceType().getCanonicalType()) {
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_lvalue)
+ << (unsigned) FnKind << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
+ << (unsigned) isObjectArgument << I + 1;
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+ }
+ }
+
+ if (BaseToDerivedConversion) {
+ S.Diag(Fn->getLocation(),
+ diag::note_ovl_candidate_bad_base_to_derived_conv)
+ << (unsigned) FnKind << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
+ << (BaseToDerivedConversion - 1)
+ << FromTy << ToTy << I+1;
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+ }
+
+ if (isa<ObjCObjectPointerType>(CFromTy) &&
+ isa<PointerType>(CToTy)) {
+ Qualifiers FromQs = CFromTy.getQualifiers();
+ Qualifiers ToQs = CToTy.getQualifiers();
+ if (FromQs.getObjCLifetime() != ToQs.getObjCLifetime()) {
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_arc_conv)
+ << (unsigned) FnKind << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
+ << FromTy << ToTy << (unsigned) isObjectArgument << I+1;
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+ }
+ }
+
+ if (TakingCandidateAddress &&
+ !checkAddressOfCandidateIsAvailable(S, Cand->Function))
+ return;
+
+ // Emit the generic diagnostic and, optionally, add the hints to it.
+ PartialDiagnostic FDiag = S.PDiag(diag::note_ovl_candidate_bad_conv);
+ FDiag << (unsigned) FnKind << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
+ << FromTy << ToTy << (unsigned) isObjectArgument << I + 1
+ << (unsigned) (Cand->Fix.Kind);
+
+ // If we can fix the conversion, suggest the FixIts.
+ for (std::vector<FixItHint>::iterator HI = Cand->Fix.Hints.begin(),
+ HE = Cand->Fix.Hints.end(); HI != HE; ++HI)
+ FDiag << *HI;
+ S.Diag(Fn->getLocation(), FDiag);
+
+ MaybeEmitInheritedConstructorNote(S, Fn);
+}
+
+/// Additional arity mismatch diagnosis specific to a function overload
+/// candidates. This is not covered by the more general DiagnoseArityMismatch()
+/// over a candidate in any candidate set.
+static bool CheckArityMismatch(Sema &S, OverloadCandidate *Cand,
+ unsigned NumArgs) {
+ FunctionDecl *Fn = Cand->Function;
+ unsigned MinParams = Fn->getMinRequiredArguments();
+
+ // With invalid overloaded operators, it's possible that we think we
+ // have an arity mismatch when in fact it looks like we have the
+ // right number of arguments, because only overloaded operators have
+ // the weird behavior of overloading member and non-member functions.
+ // Just don't report anything.
+ if (Fn->isInvalidDecl() &&
+ Fn->getDeclName().getNameKind() == DeclarationName::CXXOperatorName)
+ return true;
+
+ if (NumArgs < MinParams) {
+ assert((Cand->FailureKind == ovl_fail_too_few_arguments) ||
+ (Cand->FailureKind == ovl_fail_bad_deduction &&
+ Cand->DeductionFailure.Result == Sema::TDK_TooFewArguments));
+ } else {
+ assert((Cand->FailureKind == ovl_fail_too_many_arguments) ||
+ (Cand->FailureKind == ovl_fail_bad_deduction &&
+ Cand->DeductionFailure.Result == Sema::TDK_TooManyArguments));
+ }
+
+ return false;
+}
+
+/// General arity mismatch diagnosis over a candidate in a candidate set.
+static void DiagnoseArityMismatch(Sema &S, Decl *D, unsigned NumFormalArgs) {
+ assert(isa<FunctionDecl>(D) &&
+ "The templated declaration should at least be a function"
+ " when diagnosing bad template argument deduction due to too many"
+ " or too few arguments");
+
+ FunctionDecl *Fn = cast<FunctionDecl>(D);
+
+ // TODO: treat calls to a missing default constructor as a special case
+ const FunctionProtoType *FnTy = Fn->getType()->getAs<FunctionProtoType>();
+ unsigned MinParams = Fn->getMinRequiredArguments();
+
+ // at least / at most / exactly
+ unsigned mode, modeCount;
+ if (NumFormalArgs < MinParams) {
+ if (MinParams != FnTy->getNumParams() || FnTy->isVariadic() ||
+ FnTy->isTemplateVariadic())
+ mode = 0; // "at least"
+ else
+ mode = 2; // "exactly"
+ modeCount = MinParams;
+ } else {
+ if (MinParams != FnTy->getNumParams())
+ mode = 1; // "at most"
+ else
+ mode = 2; // "exactly"
+ modeCount = FnTy->getNumParams();
+ }
+
+ std::string Description;
+ OverloadCandidateKind FnKind = ClassifyOverloadCandidate(S, Fn, Description);
+
+ if (modeCount == 1 && Fn->getParamDecl(0)->getDeclName())
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_arity_one)
+ << (unsigned) FnKind << (Fn->getDescribedFunctionTemplate() != nullptr)
+ << mode << Fn->getParamDecl(0) << NumFormalArgs;
+ else
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_arity)
+ << (unsigned) FnKind << (Fn->getDescribedFunctionTemplate() != nullptr)
+ << mode << modeCount << NumFormalArgs;
+ MaybeEmitInheritedConstructorNote(S, Fn);
+}
+
+/// Arity mismatch diagnosis specific to a function overload candidate.
+static void DiagnoseArityMismatch(Sema &S, OverloadCandidate *Cand,
+ unsigned NumFormalArgs) {
+ if (!CheckArityMismatch(S, Cand, NumFormalArgs))
+ DiagnoseArityMismatch(S, Cand->Function, NumFormalArgs);
+}
+
+static TemplateDecl *getDescribedTemplate(Decl *Templated) {
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(Templated))
+ return FD->getDescribedFunctionTemplate();
+ else if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Templated))
+ return RD->getDescribedClassTemplate();
+
+ llvm_unreachable("Unsupported: Getting the described template declaration"
+ " for bad deduction diagnosis");
+}
+
+/// Diagnose a failed template-argument deduction.
+static void DiagnoseBadDeduction(Sema &S, Decl *Templated,
+ DeductionFailureInfo &DeductionFailure,
+ unsigned NumArgs,
+ bool TakingCandidateAddress) {
+ TemplateParameter Param = DeductionFailure.getTemplateParameter();
+ NamedDecl *ParamD;
+ (ParamD = Param.dyn_cast<TemplateTypeParmDecl*>()) ||
+ (ParamD = Param.dyn_cast<NonTypeTemplateParmDecl*>()) ||
+ (ParamD = Param.dyn_cast<TemplateTemplateParmDecl*>());
+ switch (DeductionFailure.Result) {
+ case Sema::TDK_Success:
+ llvm_unreachable("TDK_success while diagnosing bad deduction");
+
+ case Sema::TDK_Incomplete: {
+ assert(ParamD && "no parameter found for incomplete deduction result");
+ S.Diag(Templated->getLocation(),
+ diag::note_ovl_candidate_incomplete_deduction)
+ << ParamD->getDeclName();
+ MaybeEmitInheritedConstructorNote(S, Templated);
+ return;
+ }
+
+ case Sema::TDK_Underqualified: {
+ assert(ParamD && "no parameter found for bad qualifiers deduction result");
+ TemplateTypeParmDecl *TParam = cast<TemplateTypeParmDecl>(ParamD);
+
+ QualType Param = DeductionFailure.getFirstArg()->getAsType();
+
+ // Param will have been canonicalized, but it should just be a
+ // qualified version of ParamD, so move the qualifiers to that.
+ QualifierCollector Qs;
+ Qs.strip(Param);
+ QualType NonCanonParam = Qs.apply(S.Context, TParam->getTypeForDecl());
+ assert(S.Context.hasSameType(Param, NonCanonParam));
+
+ // Arg has also been canonicalized, but there's nothing we can do
+ // about that. It also doesn't matter as much, because it won't
+ // have any template parameters in it (because deduction isn't
+ // done on dependent types).
+ QualType Arg = DeductionFailure.getSecondArg()->getAsType();
+
+ S.Diag(Templated->getLocation(), diag::note_ovl_candidate_underqualified)
+ << ParamD->getDeclName() << Arg << NonCanonParam;
+ MaybeEmitInheritedConstructorNote(S, Templated);
+ return;
+ }
+
+ case Sema::TDK_Inconsistent: {
+ assert(ParamD && "no parameter found for inconsistent deduction result");
+ int which = 0;
+ if (isa<TemplateTypeParmDecl>(ParamD))
+ which = 0;
+ else if (isa<NonTypeTemplateParmDecl>(ParamD))
+ which = 1;
+ else {
+ which = 2;
+ }
+
+ S.Diag(Templated->getLocation(),
+ diag::note_ovl_candidate_inconsistent_deduction)
+ << which << ParamD->getDeclName() << *DeductionFailure.getFirstArg()
+ << *DeductionFailure.getSecondArg();
+ MaybeEmitInheritedConstructorNote(S, Templated);
+ return;
+ }
+
+ case Sema::TDK_InvalidExplicitArguments:
+ assert(ParamD && "no parameter found for invalid explicit arguments");
+ if (ParamD->getDeclName())
+ S.Diag(Templated->getLocation(),
+ diag::note_ovl_candidate_explicit_arg_mismatch_named)
+ << ParamD->getDeclName();
+ else {
+ int index = 0;
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(ParamD))
+ index = TTP->getIndex();
+ else if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(ParamD))
+ index = NTTP->getIndex();
+ else
+ index = cast<TemplateTemplateParmDecl>(ParamD)->getIndex();
+ S.Diag(Templated->getLocation(),
+ diag::note_ovl_candidate_explicit_arg_mismatch_unnamed)
+ << (index + 1);
+ }
+ MaybeEmitInheritedConstructorNote(S, Templated);
+ return;
+
+ case Sema::TDK_TooManyArguments:
+ case Sema::TDK_TooFewArguments:
+ DiagnoseArityMismatch(S, Templated, NumArgs);
+ return;
+
+ case Sema::TDK_InstantiationDepth:
+ S.Diag(Templated->getLocation(),
+ diag::note_ovl_candidate_instantiation_depth);
+ MaybeEmitInheritedConstructorNote(S, Templated);
+ return;
+
+ case Sema::TDK_SubstitutionFailure: {
+ // Format the template argument list into the argument string.
+ SmallString<128> TemplateArgString;
+ if (TemplateArgumentList *Args =
+ DeductionFailure.getTemplateArgumentList()) {
+ TemplateArgString = " ";
+ TemplateArgString += S.getTemplateArgumentBindingsText(
+ getDescribedTemplate(Templated)->getTemplateParameters(), *Args);
+ }
+
+ // If this candidate was disabled by enable_if, say so.
+ PartialDiagnosticAt *PDiag = DeductionFailure.getSFINAEDiagnostic();
+ if (PDiag && PDiag->second.getDiagID() ==
+ diag::err_typename_nested_not_found_enable_if) {
+ // FIXME: Use the source range of the condition, and the fully-qualified
+ // name of the enable_if template. These are both present in PDiag.
+ S.Diag(PDiag->first, diag::note_ovl_candidate_disabled_by_enable_if)
+ << "'enable_if'" << TemplateArgString;
+ return;
+ }
+
+ // Format the SFINAE diagnostic into the argument string.
+ // FIXME: Add a general mechanism to include a PartialDiagnostic *'s
+ // formatted message in another diagnostic.
+ SmallString<128> SFINAEArgString;
+ SourceRange R;
+ if (PDiag) {
+ SFINAEArgString = ": ";
+ R = SourceRange(PDiag->first, PDiag->first);
+ PDiag->second.EmitToString(S.getDiagnostics(), SFINAEArgString);
+ }
+
+ S.Diag(Templated->getLocation(),
+ diag::note_ovl_candidate_substitution_failure)
+ << TemplateArgString << SFINAEArgString << R;
+ MaybeEmitInheritedConstructorNote(S, Templated);
+ return;
+ }
+
+ case Sema::TDK_FailedOverloadResolution: {
+ OverloadExpr::FindResult R = OverloadExpr::find(DeductionFailure.getExpr());
+ S.Diag(Templated->getLocation(),
+ diag::note_ovl_candidate_failed_overload_resolution)
+ << R.Expression->getName();
+ return;
+ }
+
+ case Sema::TDK_DeducedMismatch: {
+ // Format the template argument list into the argument string.
+ SmallString<128> TemplateArgString;
+ if (TemplateArgumentList *Args =
+ DeductionFailure.getTemplateArgumentList()) {
+ TemplateArgString = " ";
+ TemplateArgString += S.getTemplateArgumentBindingsText(
+ getDescribedTemplate(Templated)->getTemplateParameters(), *Args);
+ }
+
+ S.Diag(Templated->getLocation(), diag::note_ovl_candidate_deduced_mismatch)
+ << (*DeductionFailure.getCallArgIndex() + 1)
+ << *DeductionFailure.getFirstArg() << *DeductionFailure.getSecondArg()
+ << TemplateArgString;
+ break;
+ }
+
+ case Sema::TDK_NonDeducedMismatch: {
+ // FIXME: Provide a source location to indicate what we couldn't match.
+ TemplateArgument FirstTA = *DeductionFailure.getFirstArg();
+ TemplateArgument SecondTA = *DeductionFailure.getSecondArg();
+ if (FirstTA.getKind() == TemplateArgument::Template &&
+ SecondTA.getKind() == TemplateArgument::Template) {
+ TemplateName FirstTN = FirstTA.getAsTemplate();
+ TemplateName SecondTN = SecondTA.getAsTemplate();
+ if (FirstTN.getKind() == TemplateName::Template &&
+ SecondTN.getKind() == TemplateName::Template) {
+ if (FirstTN.getAsTemplateDecl()->getName() ==
+ SecondTN.getAsTemplateDecl()->getName()) {
+ // FIXME: This fixes a bad diagnostic where both templates are named
+ // the same. This particular case is a bit difficult since:
+ // 1) It is passed as a string to the diagnostic printer.
+ // 2) The diagnostic printer only attempts to find a better
+ // name for types, not decls.
+ // Ideally, this should folded into the diagnostic printer.
+ S.Diag(Templated->getLocation(),
+ diag::note_ovl_candidate_non_deduced_mismatch_qualified)
+ << FirstTN.getAsTemplateDecl() << SecondTN.getAsTemplateDecl();
+ return;
+ }
+ }
+ }
+
+ if (TakingCandidateAddress && isa<FunctionDecl>(Templated) &&
+ !checkAddressOfCandidateIsAvailable(S, cast<FunctionDecl>(Templated)))
+ return;
+
+ // FIXME: For generic lambda parameters, check if the function is a lambda
+ // call operator, and if so, emit a prettier and more informative
+ // diagnostic that mentions 'auto' and lambda in addition to
+ // (or instead of?) the canonical template type parameters.
+ S.Diag(Templated->getLocation(),
+ diag::note_ovl_candidate_non_deduced_mismatch)
+ << FirstTA << SecondTA;
+ return;
+ }
+ // TODO: diagnose these individually, then kill off
+ // note_ovl_candidate_bad_deduction, which is uselessly vague.
+ case Sema::TDK_MiscellaneousDeductionFailure:
+ S.Diag(Templated->getLocation(), diag::note_ovl_candidate_bad_deduction);
+ MaybeEmitInheritedConstructorNote(S, Templated);
+ return;
+ }
+}
+
+/// Diagnose a failed template-argument deduction, for function calls.
+static void DiagnoseBadDeduction(Sema &S, OverloadCandidate *Cand,
+ unsigned NumArgs,
+ bool TakingCandidateAddress) {
+ unsigned TDK = Cand->DeductionFailure.Result;
+ if (TDK == Sema::TDK_TooFewArguments || TDK == Sema::TDK_TooManyArguments) {
+ if (CheckArityMismatch(S, Cand, NumArgs))
+ return;
+ }
+ DiagnoseBadDeduction(S, Cand->Function, // pattern
+ Cand->DeductionFailure, NumArgs, TakingCandidateAddress);
+}
+
+/// CUDA: diagnose an invalid call across targets.
+static void DiagnoseBadTarget(Sema &S, OverloadCandidate *Cand) {
+ FunctionDecl *Caller = cast<FunctionDecl>(S.CurContext);
+ FunctionDecl *Callee = Cand->Function;
+
+ Sema::CUDAFunctionTarget CallerTarget = S.IdentifyCUDATarget(Caller),
+ CalleeTarget = S.IdentifyCUDATarget(Callee);
+
+ std::string FnDesc;
+ OverloadCandidateKind FnKind = ClassifyOverloadCandidate(S, Callee, FnDesc);
+
+ S.Diag(Callee->getLocation(), diag::note_ovl_candidate_bad_target)
+ << (unsigned)FnKind << CalleeTarget << CallerTarget;
+
+ // This could be an implicit constructor for which we could not infer the
+ // target due to a collsion. Diagnose that case.
+ CXXMethodDecl *Meth = dyn_cast<CXXMethodDecl>(Callee);
+ if (Meth != nullptr && Meth->isImplicit()) {
+ CXXRecordDecl *ParentClass = Meth->getParent();
+ Sema::CXXSpecialMember CSM;
+
+ switch (FnKind) {
+ default:
+ return;
+ case oc_implicit_default_constructor:
+ CSM = Sema::CXXDefaultConstructor;
+ break;
+ case oc_implicit_copy_constructor:
+ CSM = Sema::CXXCopyConstructor;
+ break;
+ case oc_implicit_move_constructor:
+ CSM = Sema::CXXMoveConstructor;
+ break;
+ case oc_implicit_copy_assignment:
+ CSM = Sema::CXXCopyAssignment;
+ break;
+ case oc_implicit_move_assignment:
+ CSM = Sema::CXXMoveAssignment;
+ break;
+ };
+
+ bool ConstRHS = false;
+ if (Meth->getNumParams()) {
+ if (const ReferenceType *RT =
+ Meth->getParamDecl(0)->getType()->getAs<ReferenceType>()) {
+ ConstRHS = RT->getPointeeType().isConstQualified();
+ }
+ }
+
+ S.inferCUDATargetForImplicitSpecialMember(ParentClass, CSM, Meth,
+ /* ConstRHS */ ConstRHS,
+ /* Diagnose */ true);
+ }
+}
+
+static void DiagnoseFailedEnableIfAttr(Sema &S, OverloadCandidate *Cand) {
+ FunctionDecl *Callee = Cand->Function;
+ EnableIfAttr *Attr = static_cast<EnableIfAttr*>(Cand->DeductionFailure.Data);
+
+ S.Diag(Callee->getLocation(),
+ diag::note_ovl_candidate_disabled_by_enable_if_attr)
+ << Attr->getCond()->getSourceRange() << Attr->getMessage();
+}
+
+/// Generates a 'note' diagnostic for an overload candidate. We've
+/// already generated a primary error at the call site.
+///
+/// It really does need to be a single diagnostic with its caret
+/// pointed at the candidate declaration. Yes, this creates some
+/// major challenges of technical writing. Yes, this makes pointing
+/// out problems with specific arguments quite awkward. It's still
+/// better than generating twenty screens of text for every failed
+/// overload.
+///
+/// It would be great to be able to express per-candidate problems
+/// more richly for those diagnostic clients that cared, but we'd
+/// still have to be just as careful with the default diagnostics.
+static void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
+ unsigned NumArgs,
+ bool TakingCandidateAddress) {
+ FunctionDecl *Fn = Cand->Function;
+
+ // Note deleted candidates, but only if they're viable.
+ if (Cand->Viable && (Fn->isDeleted() ||
+ S.isFunctionConsideredUnavailable(Fn))) {
+ std::string FnDesc;
+ OverloadCandidateKind FnKind = ClassifyOverloadCandidate(S, Fn, FnDesc);
+
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_deleted)
+ << FnKind << FnDesc
+ << (Fn->isDeleted() ? (Fn->isDeletedAsWritten() ? 1 : 2) : 0);
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+ }
+
+ // We don't really have anything else to say about viable candidates.
+ if (Cand->Viable) {
+ S.NoteOverloadCandidate(Fn);
+ return;
+ }
+
+ switch (Cand->FailureKind) {
+ case ovl_fail_too_many_arguments:
+ case ovl_fail_too_few_arguments:
+ return DiagnoseArityMismatch(S, Cand, NumArgs);
+
+ case ovl_fail_bad_deduction:
+ return DiagnoseBadDeduction(S, Cand, NumArgs, TakingCandidateAddress);
+
+ case ovl_fail_illegal_constructor: {
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_illegal_constructor)
+ << (Fn->getPrimaryTemplate() ? 1 : 0);
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+ }
+
+ case ovl_fail_trivial_conversion:
+ case ovl_fail_bad_final_conversion:
+ case ovl_fail_final_conversion_not_exact:
+ return S.NoteOverloadCandidate(Fn);
+
+ case ovl_fail_bad_conversion: {
+ unsigned I = (Cand->IgnoreObjectArgument ? 1 : 0);
+ for (unsigned N = Cand->NumConversions; I != N; ++I)
+ if (Cand->Conversions[I].isBad())
+ return DiagnoseBadConversion(S, Cand, I, TakingCandidateAddress);
+
+ // FIXME: this currently happens when we're called from SemaInit
+ // when user-conversion overload fails. Figure out how to handle
+ // those conditions and diagnose them well.
+ return S.NoteOverloadCandidate(Fn);
+ }
+
+ case ovl_fail_bad_target:
+ return DiagnoseBadTarget(S, Cand);
+
+ case ovl_fail_enable_if:
+ return DiagnoseFailedEnableIfAttr(S, Cand);
+ }
+}
+
+static void NoteSurrogateCandidate(Sema &S, OverloadCandidate *Cand) {
+ // Desugar the type of the surrogate down to a function type,
+ // retaining as many typedefs as possible while still showing
+ // the function type (and, therefore, its parameter types).
+ QualType FnType = Cand->Surrogate->getConversionType();
+ bool isLValueReference = false;
+ bool isRValueReference = false;
+ bool isPointer = false;
+ if (const LValueReferenceType *FnTypeRef =
+ FnType->getAs<LValueReferenceType>()) {
+ FnType = FnTypeRef->getPointeeType();
+ isLValueReference = true;
+ } else if (const RValueReferenceType *FnTypeRef =
+ FnType->getAs<RValueReferenceType>()) {
+ FnType = FnTypeRef->getPointeeType();
+ isRValueReference = true;
+ }
+ if (const PointerType *FnTypePtr = FnType->getAs<PointerType>()) {
+ FnType = FnTypePtr->getPointeeType();
+ isPointer = true;
+ }
+ // Desugar down to a function type.
+ FnType = QualType(FnType->getAs<FunctionType>(), 0);
+ // Reconstruct the pointer/reference as appropriate.
+ if (isPointer) FnType = S.Context.getPointerType(FnType);
+ if (isRValueReference) FnType = S.Context.getRValueReferenceType(FnType);
+ if (isLValueReference) FnType = S.Context.getLValueReferenceType(FnType);
+
+ S.Diag(Cand->Surrogate->getLocation(), diag::note_ovl_surrogate_cand)
+ << FnType;
+ MaybeEmitInheritedConstructorNote(S, Cand->Surrogate);
+}
+
+static void NoteBuiltinOperatorCandidate(Sema &S, StringRef Opc,
+ SourceLocation OpLoc,
+ OverloadCandidate *Cand) {
+ assert(Cand->NumConversions <= 2 && "builtin operator is not binary");
+ std::string TypeStr("operator");
+ TypeStr += Opc;
+ TypeStr += "(";
+ TypeStr += Cand->BuiltinTypes.ParamTypes[0].getAsString();
+ if (Cand->NumConversions == 1) {
+ TypeStr += ")";
+ S.Diag(OpLoc, diag::note_ovl_builtin_unary_candidate) << TypeStr;
+ } else {
+ TypeStr += ", ";
+ TypeStr += Cand->BuiltinTypes.ParamTypes[1].getAsString();
+ TypeStr += ")";
+ S.Diag(OpLoc, diag::note_ovl_builtin_binary_candidate) << TypeStr;
+ }
+}
+
+static void NoteAmbiguousUserConversions(Sema &S, SourceLocation OpLoc,
+ OverloadCandidate *Cand) {
+ unsigned NoOperands = Cand->NumConversions;
+ for (unsigned ArgIdx = 0; ArgIdx < NoOperands; ++ArgIdx) {
+ const ImplicitConversionSequence &ICS = Cand->Conversions[ArgIdx];
+ if (ICS.isBad()) break; // all meaningless after first invalid
+ if (!ICS.isAmbiguous()) continue;
+
+ ICS.DiagnoseAmbiguousConversion(S, OpLoc,
+ S.PDiag(diag::note_ambiguous_type_conversion));
+ }
+}
+
+static SourceLocation GetLocationForCandidate(const OverloadCandidate *Cand) {
+ if (Cand->Function)
+ return Cand->Function->getLocation();
+ if (Cand->IsSurrogate)
+ return Cand->Surrogate->getLocation();
+ return SourceLocation();
+}
+
+static unsigned RankDeductionFailure(const DeductionFailureInfo &DFI) {
+ switch ((Sema::TemplateDeductionResult)DFI.Result) {
+ case Sema::TDK_Success:
+ llvm_unreachable("TDK_success while diagnosing bad deduction");
+
+ case Sema::TDK_Invalid:
+ case Sema::TDK_Incomplete:
+ return 1;
+
+ case Sema::TDK_Underqualified:
+ case Sema::TDK_Inconsistent:
+ return 2;
+
+ case Sema::TDK_SubstitutionFailure:
+ case Sema::TDK_DeducedMismatch:
+ case Sema::TDK_NonDeducedMismatch:
+ case Sema::TDK_MiscellaneousDeductionFailure:
+ return 3;
+
+ case Sema::TDK_InstantiationDepth:
+ case Sema::TDK_FailedOverloadResolution:
+ return 4;
+
+ case Sema::TDK_InvalidExplicitArguments:
+ return 5;
+
+ case Sema::TDK_TooManyArguments:
+ case Sema::TDK_TooFewArguments:
+ return 6;
+ }
+ llvm_unreachable("Unhandled deduction result");
+}
+
+namespace {
+struct CompareOverloadCandidatesForDisplay {
+ Sema &S;
+ SourceLocation Loc;
+ size_t NumArgs;
+
+ CompareOverloadCandidatesForDisplay(Sema &S, SourceLocation Loc, size_t nArgs)
+ : S(S), NumArgs(nArgs) {}
+
+ bool operator()(const OverloadCandidate *L,
+ const OverloadCandidate *R) {
+ // Fast-path this check.
+ if (L == R) return false;
+
+ // Order first by viability.
+ if (L->Viable) {
+ if (!R->Viable) return true;
+
+ // TODO: introduce a tri-valued comparison for overload
+ // candidates. Would be more worthwhile if we had a sort
+ // that could exploit it.
+ if (isBetterOverloadCandidate(S, *L, *R, SourceLocation())) return true;
+ if (isBetterOverloadCandidate(S, *R, *L, SourceLocation())) return false;
+ } else if (R->Viable)
+ return false;
+
+ assert(L->Viable == R->Viable);
+
+ // Criteria by which we can sort non-viable candidates:
+ if (!L->Viable) {
+ // 1. Arity mismatches come after other candidates.
+ if (L->FailureKind == ovl_fail_too_many_arguments ||
+ L->FailureKind == ovl_fail_too_few_arguments) {
+ if (R->FailureKind == ovl_fail_too_many_arguments ||
+ R->FailureKind == ovl_fail_too_few_arguments) {
+ int LDist = std::abs((int)L->getNumParams() - (int)NumArgs);
+ int RDist = std::abs((int)R->getNumParams() - (int)NumArgs);
+ if (LDist == RDist) {
+ if (L->FailureKind == R->FailureKind)
+ // Sort non-surrogates before surrogates.
+ return !L->IsSurrogate && R->IsSurrogate;
+ // Sort candidates requiring fewer parameters than there were
+ // arguments given after candidates requiring more parameters
+ // than there were arguments given.
+ return L->FailureKind == ovl_fail_too_many_arguments;
+ }
+ return LDist < RDist;
+ }
+ return false;
+ }
+ if (R->FailureKind == ovl_fail_too_many_arguments ||
+ R->FailureKind == ovl_fail_too_few_arguments)
+ return true;
+
+ // 2. Bad conversions come first and are ordered by the number
+ // of bad conversions and quality of good conversions.
+ if (L->FailureKind == ovl_fail_bad_conversion) {
+ if (R->FailureKind != ovl_fail_bad_conversion)
+ return true;
+
+ // The conversion that can be fixed with a smaller number of changes,
+ // comes first.
+ unsigned numLFixes = L->Fix.NumConversionsFixed;
+ unsigned numRFixes = R->Fix.NumConversionsFixed;
+ numLFixes = (numLFixes == 0) ? UINT_MAX : numLFixes;
+ numRFixes = (numRFixes == 0) ? UINT_MAX : numRFixes;
+ if (numLFixes != numRFixes) {
+ return numLFixes < numRFixes;
+ }
+
+ // If there's any ordering between the defined conversions...
+ // FIXME: this might not be transitive.
+ assert(L->NumConversions == R->NumConversions);
+
+ int leftBetter = 0;
+ unsigned I = (L->IgnoreObjectArgument || R->IgnoreObjectArgument);
+ for (unsigned E = L->NumConversions; I != E; ++I) {
+ switch (CompareImplicitConversionSequences(S, Loc,
+ L->Conversions[I],
+ R->Conversions[I])) {
+ case ImplicitConversionSequence::Better:
+ leftBetter++;
+ break;
+
+ case ImplicitConversionSequence::Worse:
+ leftBetter--;
+ break;
+
+ case ImplicitConversionSequence::Indistinguishable:
+ break;
+ }
+ }
+ if (leftBetter > 0) return true;
+ if (leftBetter < 0) return false;
+
+ } else if (R->FailureKind == ovl_fail_bad_conversion)
+ return false;
+
+ if (L->FailureKind == ovl_fail_bad_deduction) {
+ if (R->FailureKind != ovl_fail_bad_deduction)
+ return true;
+
+ if (L->DeductionFailure.Result != R->DeductionFailure.Result)
+ return RankDeductionFailure(L->DeductionFailure)
+ < RankDeductionFailure(R->DeductionFailure);
+ } else if (R->FailureKind == ovl_fail_bad_deduction)
+ return false;
+
+ // TODO: others?
+ }
+
+ // Sort everything else by location.
+ SourceLocation LLoc = GetLocationForCandidate(L);
+ SourceLocation RLoc = GetLocationForCandidate(R);
+
+ // Put candidates without locations (e.g. builtins) at the end.
+ if (LLoc.isInvalid()) return false;
+ if (RLoc.isInvalid()) return true;
+
+ return S.SourceMgr.isBeforeInTranslationUnit(LLoc, RLoc);
+ }
+};
+}
+
+/// CompleteNonViableCandidate - Normally, overload resolution only
+/// computes up to the first. Produces the FixIt set if possible.
+static void CompleteNonViableCandidate(Sema &S, OverloadCandidate *Cand,
+ ArrayRef<Expr *> Args) {
+ assert(!Cand->Viable);
+
+ // Don't do anything on failures other than bad conversion.
+ if (Cand->FailureKind != ovl_fail_bad_conversion) return;
+
+ // We only want the FixIts if all the arguments can be corrected.
+ bool Unfixable = false;
+ // Use a implicit copy initialization to check conversion fixes.
+ Cand->Fix.setConversionChecker(TryCopyInitialization);
+
+ // Skip forward to the first bad conversion.
+ unsigned ConvIdx = (Cand->IgnoreObjectArgument ? 1 : 0);
+ unsigned ConvCount = Cand->NumConversions;
+ while (true) {
+ assert(ConvIdx != ConvCount && "no bad conversion in candidate");
+ ConvIdx++;
+ if (Cand->Conversions[ConvIdx - 1].isBad()) {
+ Unfixable = !Cand->TryToFixBadConversion(ConvIdx - 1, S);
+ break;
+ }
+ }
+
+ if (ConvIdx == ConvCount)
+ return;
+
+ assert(!Cand->Conversions[ConvIdx].isInitialized() &&
+ "remaining conversion is initialized?");
+
+ // FIXME: this should probably be preserved from the overload
+ // operation somehow.
+ bool SuppressUserConversions = false;
+
+ const FunctionProtoType* Proto;
+ unsigned ArgIdx = ConvIdx;
+
+ if (Cand->IsSurrogate) {
+ QualType ConvType
+ = Cand->Surrogate->getConversionType().getNonReferenceType();
+ if (const PointerType *ConvPtrType = ConvType->getAs<PointerType>())
+ ConvType = ConvPtrType->getPointeeType();
+ Proto = ConvType->getAs<FunctionProtoType>();
+ ArgIdx--;
+ } else if (Cand->Function) {
+ Proto = Cand->Function->getType()->getAs<FunctionProtoType>();
+ if (isa<CXXMethodDecl>(Cand->Function) &&
+ !isa<CXXConstructorDecl>(Cand->Function))
+ ArgIdx--;
+ } else {
+ // Builtin binary operator with a bad first conversion.
+ assert(ConvCount <= 3);
+ for (; ConvIdx != ConvCount; ++ConvIdx)
+ Cand->Conversions[ConvIdx]
+ = TryCopyInitialization(S, Args[ConvIdx],
+ Cand->BuiltinTypes.ParamTypes[ConvIdx],
+ SuppressUserConversions,
+ /*InOverloadResolution*/ true,
+ /*AllowObjCWritebackConversion=*/
+ S.getLangOpts().ObjCAutoRefCount);
+ return;
+ }
+
+ // Fill in the rest of the conversions.
+ unsigned NumParams = Proto->getNumParams();
+ for (; ConvIdx != ConvCount; ++ConvIdx, ++ArgIdx) {
+ if (ArgIdx < NumParams) {
+ Cand->Conversions[ConvIdx] = TryCopyInitialization(
+ S, Args[ArgIdx], Proto->getParamType(ArgIdx), SuppressUserConversions,
+ /*InOverloadResolution=*/true,
+ /*AllowObjCWritebackConversion=*/
+ S.getLangOpts().ObjCAutoRefCount);
+ // Store the FixIt in the candidate if it exists.
+ if (!Unfixable && Cand->Conversions[ConvIdx].isBad())
+ Unfixable = !Cand->TryToFixBadConversion(ConvIdx, S);
+ }
+ else
+ Cand->Conversions[ConvIdx].setEllipsis();
+ }
+}
+
+/// PrintOverloadCandidates - When overload resolution fails, prints
+/// diagnostic messages containing the candidates in the candidate
+/// set.
+void OverloadCandidateSet::NoteCandidates(Sema &S,
+ OverloadCandidateDisplayKind OCD,
+ ArrayRef<Expr *> Args,
+ StringRef Opc,
+ SourceLocation OpLoc) {
+ // Sort the candidates by viability and position. Sorting directly would
+ // be prohibitive, so we make a set of pointers and sort those.
+ SmallVector<OverloadCandidate*, 32> Cands;
+ if (OCD == OCD_AllCandidates) Cands.reserve(size());
+ for (iterator Cand = begin(), LastCand = end(); Cand != LastCand; ++Cand) {
+ if (Cand->Viable)
+ Cands.push_back(Cand);
+ else if (OCD == OCD_AllCandidates) {
+ CompleteNonViableCandidate(S, Cand, Args);
+ if (Cand->Function || Cand->IsSurrogate)
+ Cands.push_back(Cand);
+ // Otherwise, this a non-viable builtin candidate. We do not, in general,
+ // want to list every possible builtin candidate.
+ }
+ }
+
+ std::sort(Cands.begin(), Cands.end(),
+ CompareOverloadCandidatesForDisplay(S, OpLoc, Args.size()));
+
+ bool ReportedAmbiguousConversions = false;
+
+ SmallVectorImpl<OverloadCandidate*>::iterator I, E;
+ const OverloadsShown ShowOverloads = S.Diags.getShowOverloads();
+ unsigned CandsShown = 0;
+ for (I = Cands.begin(), E = Cands.end(); I != E; ++I) {
+ OverloadCandidate *Cand = *I;
+
+ // Set an arbitrary limit on the number of candidate functions we'll spam
+ // the user with. FIXME: This limit should depend on details of the
+ // candidate list.
+ if (CandsShown >= 4 && ShowOverloads == Ovl_Best) {
+ break;
+ }
+ ++CandsShown;
+
+ if (Cand->Function)
+ NoteFunctionCandidate(S, Cand, Args.size(),
+ /*TakingCandidateAddress=*/false);
+ else if (Cand->IsSurrogate)
+ NoteSurrogateCandidate(S, Cand);
+ else {
+ assert(Cand->Viable &&
+ "Non-viable built-in candidates are not added to Cands.");
+ // Generally we only see ambiguities including viable builtin
+ // operators if overload resolution got screwed up by an
+ // ambiguous user-defined conversion.
+ //
+ // FIXME: It's quite possible for different conversions to see
+ // different ambiguities, though.
+ if (!ReportedAmbiguousConversions) {
+ NoteAmbiguousUserConversions(S, OpLoc, Cand);
+ ReportedAmbiguousConversions = true;
+ }
+
+ // If this is a viable builtin, print it.
+ NoteBuiltinOperatorCandidate(S, Opc, OpLoc, Cand);
+ }
+ }
+
+ if (I != E)
+ S.Diag(OpLoc, diag::note_ovl_too_many_candidates) << int(E - I);
+}
+
+static SourceLocation
+GetLocationForCandidate(const TemplateSpecCandidate *Cand) {
+ return Cand->Specialization ? Cand->Specialization->getLocation()
+ : SourceLocation();
+}
+
+namespace {
+struct CompareTemplateSpecCandidatesForDisplay {
+ Sema &S;
+ CompareTemplateSpecCandidatesForDisplay(Sema &S) : S(S) {}
+
+ bool operator()(const TemplateSpecCandidate *L,
+ const TemplateSpecCandidate *R) {
+ // Fast-path this check.
+ if (L == R)
+ return false;
+
+ // Assuming that both candidates are not matches...
+
+ // Sort by the ranking of deduction failures.
+ if (L->DeductionFailure.Result != R->DeductionFailure.Result)
+ return RankDeductionFailure(L->DeductionFailure) <
+ RankDeductionFailure(R->DeductionFailure);
+
+ // Sort everything else by location.
+ SourceLocation LLoc = GetLocationForCandidate(L);
+ SourceLocation RLoc = GetLocationForCandidate(R);
+
+ // Put candidates without locations (e.g. builtins) at the end.
+ if (LLoc.isInvalid())
+ return false;
+ if (RLoc.isInvalid())
+ return true;
+
+ return S.SourceMgr.isBeforeInTranslationUnit(LLoc, RLoc);
+ }
+};
+}
+
+/// Diagnose a template argument deduction failure.
+/// We are treating these failures as overload failures due to bad
+/// deductions.
+void TemplateSpecCandidate::NoteDeductionFailure(Sema &S,
+ bool ForTakingAddress) {
+ DiagnoseBadDeduction(S, Specialization, // pattern
+ DeductionFailure, /*NumArgs=*/0, ForTakingAddress);
+}
+
+void TemplateSpecCandidateSet::destroyCandidates() {
+ for (iterator i = begin(), e = end(); i != e; ++i) {
+ i->DeductionFailure.Destroy();
+ }
+}
+
+void TemplateSpecCandidateSet::clear() {
+ destroyCandidates();
+ Candidates.clear();
+}
+
+/// NoteCandidates - When no template specialization match is found, prints
+/// diagnostic messages containing the non-matching specializations that form
+/// the candidate set.
+/// This is analoguous to OverloadCandidateSet::NoteCandidates() with
+/// OCD == OCD_AllCandidates and Cand->Viable == false.
+void TemplateSpecCandidateSet::NoteCandidates(Sema &S, SourceLocation Loc) {
+ // Sort the candidates by position (assuming no candidate is a match).
+ // Sorting directly would be prohibitive, so we make a set of pointers
+ // and sort those.
+ SmallVector<TemplateSpecCandidate *, 32> Cands;
+ Cands.reserve(size());
+ for (iterator Cand = begin(), LastCand = end(); Cand != LastCand; ++Cand) {
+ if (Cand->Specialization)
+ Cands.push_back(Cand);
+ // Otherwise, this is a non-matching builtin candidate. We do not,
+ // in general, want to list every possible builtin candidate.
+ }
+
+ std::sort(Cands.begin(), Cands.end(),
+ CompareTemplateSpecCandidatesForDisplay(S));
+
+ // FIXME: Perhaps rename OverloadsShown and getShowOverloads()
+ // for generalization purposes (?).
+ const OverloadsShown ShowOverloads = S.Diags.getShowOverloads();
+
+ SmallVectorImpl<TemplateSpecCandidate *>::iterator I, E;
+ unsigned CandsShown = 0;
+ for (I = Cands.begin(), E = Cands.end(); I != E; ++I) {
+ TemplateSpecCandidate *Cand = *I;
+
+ // Set an arbitrary limit on the number of candidates we'll spam
+ // the user with. FIXME: This limit should depend on details of the
+ // candidate list.
+ if (CandsShown >= 4 && ShowOverloads == Ovl_Best)
+ break;
+ ++CandsShown;
+
+ assert(Cand->Specialization &&
+ "Non-matching built-in candidates are not added to Cands.");
+ Cand->NoteDeductionFailure(S, ForTakingAddress);
+ }
+
+ if (I != E)
+ S.Diag(Loc, diag::note_ovl_too_many_candidates) << int(E - I);
+}
+
+// [PossiblyAFunctionType] --> [Return]
+// NonFunctionType --> NonFunctionType
+// R (A) --> R(A)
+// R (*)(A) --> R (A)
+// R (&)(A) --> R (A)
+// R (S::*)(A) --> R (A)
+QualType Sema::ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType) {
+ QualType Ret = PossiblyAFunctionType;
+ if (const PointerType *ToTypePtr =
+ PossiblyAFunctionType->getAs<PointerType>())
+ Ret = ToTypePtr->getPointeeType();
+ else if (const ReferenceType *ToTypeRef =
+ PossiblyAFunctionType->getAs<ReferenceType>())
+ Ret = ToTypeRef->getPointeeType();
+ else if (const MemberPointerType *MemTypePtr =
+ PossiblyAFunctionType->getAs<MemberPointerType>())
+ Ret = MemTypePtr->getPointeeType();
+ Ret =
+ Context.getCanonicalType(Ret).getUnqualifiedType();
+ return Ret;
+}
+
+namespace {
+// A helper class to help with address of function resolution
+// - allows us to avoid passing around all those ugly parameters
+class AddressOfFunctionResolver {
+ Sema& S;
+ Expr* SourceExpr;
+ const QualType& TargetType;
+ QualType TargetFunctionType; // Extracted function type from target type
+
+ bool Complain;
+ //DeclAccessPair& ResultFunctionAccessPair;
+ ASTContext& Context;
+
+ bool TargetTypeIsNonStaticMemberFunction;
+ bool FoundNonTemplateFunction;
+ bool StaticMemberFunctionFromBoundPointer;
+ bool HasComplained;
+
+ OverloadExpr::FindResult OvlExprInfo;
+ OverloadExpr *OvlExpr;
+ TemplateArgumentListInfo OvlExplicitTemplateArgs;
+ SmallVector<std::pair<DeclAccessPair, FunctionDecl*>, 4> Matches;
+ TemplateSpecCandidateSet FailedCandidates;
+
+public:
+ AddressOfFunctionResolver(Sema &S, Expr *SourceExpr,
+ const QualType &TargetType, bool Complain)
+ : S(S), SourceExpr(SourceExpr), TargetType(TargetType),
+ Complain(Complain), Context(S.getASTContext()),
+ TargetTypeIsNonStaticMemberFunction(
+ !!TargetType->getAs<MemberPointerType>()),
+ FoundNonTemplateFunction(false),
+ StaticMemberFunctionFromBoundPointer(false),
+ HasComplained(false),
+ OvlExprInfo(OverloadExpr::find(SourceExpr)),
+ OvlExpr(OvlExprInfo.Expression),
+ FailedCandidates(OvlExpr->getNameLoc(), /*ForTakingAddress=*/true) {
+ ExtractUnqualifiedFunctionTypeFromTargetType();
+
+ if (TargetFunctionType->isFunctionType()) {
+ if (UnresolvedMemberExpr *UME = dyn_cast<UnresolvedMemberExpr>(OvlExpr))
+ if (!UME->isImplicitAccess() &&
+ !S.ResolveSingleFunctionTemplateSpecialization(UME))
+ StaticMemberFunctionFromBoundPointer = true;
+ } else if (OvlExpr->hasExplicitTemplateArgs()) {
+ DeclAccessPair dap;
+ if (FunctionDecl *Fn = S.ResolveSingleFunctionTemplateSpecialization(
+ OvlExpr, false, &dap)) {
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn))
+ if (!Method->isStatic()) {
+ // If the target type is a non-function type and the function found
+ // is a non-static member function, pretend as if that was the
+ // target, it's the only possible type to end up with.
+ TargetTypeIsNonStaticMemberFunction = true;
+
+ // And skip adding the function if its not in the proper form.
+ // We'll diagnose this due to an empty set of functions.
+ if (!OvlExprInfo.HasFormOfMemberPointer)
+ return;
+ }
+
+ Matches.push_back(std::make_pair(dap, Fn));
+ }
+ return;
+ }
+
+ if (OvlExpr->hasExplicitTemplateArgs())
+ OvlExpr->copyTemplateArgumentsInto(OvlExplicitTemplateArgs);
+
+ if (FindAllFunctionsThatMatchTargetTypeExactly()) {
+ // C++ [over.over]p4:
+ // If more than one function is selected, [...]
+ if (Matches.size() > 1 && !eliminiateSuboptimalOverloadCandidates()) {
+ if (FoundNonTemplateFunction)
+ EliminateAllTemplateMatches();
+ else
+ EliminateAllExceptMostSpecializedTemplate();
+ }
+ }
+
+ if (S.getLangOpts().CUDA && S.getLangOpts().CUDATargetOverloads &&
+ Matches.size() > 1)
+ EliminateSuboptimalCudaMatches();
+ }
+
+ bool hasComplained() const { return HasComplained; }
+
+private:
+ // Is A considered a better overload candidate for the desired type than B?
+ bool isBetterCandidate(const FunctionDecl *A, const FunctionDecl *B) {
+ return hasBetterEnableIfAttrs(S, A, B);
+ }
+
+ // Returns true if we've eliminated any (read: all but one) candidates, false
+ // otherwise.
+ bool eliminiateSuboptimalOverloadCandidates() {
+ // Same algorithm as overload resolution -- one pass to pick the "best",
+ // another pass to be sure that nothing is better than the best.
+ auto Best = Matches.begin();
+ for (auto I = Matches.begin()+1, E = Matches.end(); I != E; ++I)
+ if (isBetterCandidate(I->second, Best->second))
+ Best = I;
+
+ const FunctionDecl *BestFn = Best->second;
+ auto IsBestOrInferiorToBest = [this, BestFn](
+ const std::pair<DeclAccessPair, FunctionDecl *> &Pair) {
+ return BestFn == Pair.second || isBetterCandidate(BestFn, Pair.second);
+ };
+
+ // Note: We explicitly leave Matches unmodified if there isn't a clear best
+ // option, so we can potentially give the user a better error
+ if (!std::all_of(Matches.begin(), Matches.end(), IsBestOrInferiorToBest))
+ return false;
+ Matches[0] = *Best;
+ Matches.resize(1);
+ return true;
+ }
+
+ bool isTargetTypeAFunction() const {
+ return TargetFunctionType->isFunctionType();
+ }
+
+ // [ToType] [Return]
+
+ // R (*)(A) --> R (A), IsNonStaticMemberFunction = false
+ // R (&)(A) --> R (A), IsNonStaticMemberFunction = false
+ // R (S::*)(A) --> R (A), IsNonStaticMemberFunction = true
+ void inline ExtractUnqualifiedFunctionTypeFromTargetType() {
+ TargetFunctionType = S.ExtractUnqualifiedFunctionType(TargetType);
+ }
+
+ // return true if any matching specializations were found
+ bool AddMatchingTemplateFunction(FunctionTemplateDecl* FunctionTemplate,
+ const DeclAccessPair& CurAccessFunPair) {
+ if (CXXMethodDecl *Method
+ = dyn_cast<CXXMethodDecl>(FunctionTemplate->getTemplatedDecl())) {
+ // Skip non-static function templates when converting to pointer, and
+ // static when converting to member pointer.
+ if (Method->isStatic() == TargetTypeIsNonStaticMemberFunction)
+ return false;
+ }
+ else if (TargetTypeIsNonStaticMemberFunction)
+ return false;
+
+ // C++ [over.over]p2:
+ // If the name is a function template, template argument deduction is
+ // done (14.8.2.2), and if the argument deduction succeeds, the
+ // resulting template argument list is used to generate a single
+ // function template specialization, which is added to the set of
+ // overloaded functions considered.
+ FunctionDecl *Specialization = nullptr;
+ TemplateDeductionInfo Info(FailedCandidates.getLocation());
+ if (Sema::TemplateDeductionResult Result
+ = S.DeduceTemplateArguments(FunctionTemplate,
+ &OvlExplicitTemplateArgs,
+ TargetFunctionType, Specialization,
+ Info, /*InOverloadResolution=*/true)) {
+ // Make a note of the failed deduction for diagnostics.
+ FailedCandidates.addCandidate()
+ .set(FunctionTemplate->getTemplatedDecl(),
+ MakeDeductionFailureInfo(Context, Result, Info));
+ return false;
+ }
+
+ // Template argument deduction ensures that we have an exact match or
+ // compatible pointer-to-function arguments that would be adjusted by ICS.
+ // This function template specicalization works.
+ Specialization = cast<FunctionDecl>(Specialization->getCanonicalDecl());
+ assert(S.isSameOrCompatibleFunctionType(
+ Context.getCanonicalType(Specialization->getType()),
+ Context.getCanonicalType(TargetFunctionType)));
+
+ if (!S.checkAddressOfFunctionIsAvailable(Specialization))
+ return false;
+
+ Matches.push_back(std::make_pair(CurAccessFunPair, Specialization));
+ return true;
+ }
+
+ bool AddMatchingNonTemplateFunction(NamedDecl* Fn,
+ const DeclAccessPair& CurAccessFunPair) {
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn)) {
+ // Skip non-static functions when converting to pointer, and static
+ // when converting to member pointer.
+ if (Method->isStatic() == TargetTypeIsNonStaticMemberFunction)
+ return false;
+ }
+ else if (TargetTypeIsNonStaticMemberFunction)
+ return false;
+
+ if (FunctionDecl *FunDecl = dyn_cast<FunctionDecl>(Fn)) {
+ if (S.getLangOpts().CUDA)
+ if (FunctionDecl *Caller = dyn_cast<FunctionDecl>(S.CurContext))
+ if (!Caller->isImplicit() && S.CheckCUDATarget(Caller, FunDecl))
+ return false;
+
+ // If any candidate has a placeholder return type, trigger its deduction
+ // now.
+ if (S.getLangOpts().CPlusPlus14 &&
+ FunDecl->getReturnType()->isUndeducedType() &&
+ S.DeduceReturnType(FunDecl, SourceExpr->getLocStart(), Complain)) {
+ HasComplained |= Complain;
+ return false;
+ }
+
+ if (!S.checkAddressOfFunctionIsAvailable(FunDecl))
+ return false;
+
+ QualType ResultTy;
+ if (Context.hasSameUnqualifiedType(TargetFunctionType,
+ FunDecl->getType()) ||
+ S.IsNoReturnConversion(FunDecl->getType(), TargetFunctionType,
+ ResultTy) ||
+ (!S.getLangOpts().CPlusPlus && TargetType->isVoidPointerType())) {
+ Matches.push_back(std::make_pair(
+ CurAccessFunPair, cast<FunctionDecl>(FunDecl->getCanonicalDecl())));
+ FoundNonTemplateFunction = true;
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ bool FindAllFunctionsThatMatchTargetTypeExactly() {
+ bool Ret = false;
+
+ // If the overload expression doesn't have the form of a pointer to
+ // member, don't try to convert it to a pointer-to-member type.
+ if (IsInvalidFormOfPointerToMemberFunction())
+ return false;
+
+ for (UnresolvedSetIterator I = OvlExpr->decls_begin(),
+ E = OvlExpr->decls_end();
+ I != E; ++I) {
+ // Look through any using declarations to find the underlying function.
+ NamedDecl *Fn = (*I)->getUnderlyingDecl();
+
+ // C++ [over.over]p3:
+ // Non-member functions and static member functions match
+ // targets of type "pointer-to-function" or "reference-to-function."
+ // Nonstatic member functions match targets of
+ // type "pointer-to-member-function."
+ // Note that according to DR 247, the containing class does not matter.
+ if (FunctionTemplateDecl *FunctionTemplate
+ = dyn_cast<FunctionTemplateDecl>(Fn)) {
+ if (AddMatchingTemplateFunction(FunctionTemplate, I.getPair()))
+ Ret = true;
+ }
+ // If we have explicit template arguments supplied, skip non-templates.
+ else if (!OvlExpr->hasExplicitTemplateArgs() &&
+ AddMatchingNonTemplateFunction(Fn, I.getPair()))
+ Ret = true;
+ }
+ assert(Ret || Matches.empty());
+ return Ret;
+ }
+
+ void EliminateAllExceptMostSpecializedTemplate() {
+ // [...] and any given function template specialization F1 is
+ // eliminated if the set contains a second function template
+ // specialization whose function template is more specialized
+ // than the function template of F1 according to the partial
+ // ordering rules of 14.5.5.2.
+
+ // The algorithm specified above is quadratic. We instead use a
+ // two-pass algorithm (similar to the one used to identify the
+ // best viable function in an overload set) that identifies the
+ // best function template (if it exists).
+
+ UnresolvedSet<4> MatchesCopy; // TODO: avoid!
+ for (unsigned I = 0, E = Matches.size(); I != E; ++I)
+ MatchesCopy.addDecl(Matches[I].second, Matches[I].first.getAccess());
+
+ // TODO: It looks like FailedCandidates does not serve much purpose
+ // here, since the no_viable diagnostic has index 0.
+ UnresolvedSetIterator Result = S.getMostSpecialized(
+ MatchesCopy.begin(), MatchesCopy.end(), FailedCandidates,
+ SourceExpr->getLocStart(), S.PDiag(),
+ S.PDiag(diag::err_addr_ovl_ambiguous) << Matches[0]
+ .second->getDeclName(),
+ S.PDiag(diag::note_ovl_candidate) << (unsigned)oc_function_template,
+ Complain, TargetFunctionType);
+
+ if (Result != MatchesCopy.end()) {
+ // Make it the first and only element
+ Matches[0].first = Matches[Result - MatchesCopy.begin()].first;
+ Matches[0].second = cast<FunctionDecl>(*Result);
+ Matches.resize(1);
+ } else
+ HasComplained |= Complain;
+ }
+
+ void EliminateAllTemplateMatches() {
+ // [...] any function template specializations in the set are
+ // eliminated if the set also contains a non-template function, [...]
+ for (unsigned I = 0, N = Matches.size(); I != N; ) {
+ if (Matches[I].second->getPrimaryTemplate() == nullptr)
+ ++I;
+ else {
+ Matches[I] = Matches[--N];
+ Matches.resize(N);
+ }
+ }
+ }
+
+ void EliminateSuboptimalCudaMatches() {
+ S.EraseUnwantedCUDAMatches(dyn_cast<FunctionDecl>(S.CurContext), Matches);
+ }
+
+public:
+ void ComplainNoMatchesFound() const {
+ assert(Matches.empty());
+ S.Diag(OvlExpr->getLocStart(), diag::err_addr_ovl_no_viable)
+ << OvlExpr->getName() << TargetFunctionType
+ << OvlExpr->getSourceRange();
+ if (FailedCandidates.empty())
+ S.NoteAllOverloadCandidates(OvlExpr, TargetFunctionType,
+ /*TakingAddress=*/true);
+ else {
+ // We have some deduction failure messages. Use them to diagnose
+ // the function templates, and diagnose the non-template candidates
+ // normally.
+ for (UnresolvedSetIterator I = OvlExpr->decls_begin(),
+ IEnd = OvlExpr->decls_end();
+ I != IEnd; ++I)
+ if (FunctionDecl *Fun =
+ dyn_cast<FunctionDecl>((*I)->getUnderlyingDecl()))
+ if (!functionHasPassObjectSizeParams(Fun))
+ S.NoteOverloadCandidate(Fun, TargetFunctionType,
+ /*TakingAddress=*/true);
+ FailedCandidates.NoteCandidates(S, OvlExpr->getLocStart());
+ }
+ }
+
+ bool IsInvalidFormOfPointerToMemberFunction() const {
+ return TargetTypeIsNonStaticMemberFunction &&
+ !OvlExprInfo.HasFormOfMemberPointer;
+ }
+
+ void ComplainIsInvalidFormOfPointerToMemberFunction() const {
+ // TODO: Should we condition this on whether any functions might
+ // have matched, or is it more appropriate to do that in callers?
+ // TODO: a fixit wouldn't hurt.
+ S.Diag(OvlExpr->getNameLoc(), diag::err_addr_ovl_no_qualifier)
+ << TargetType << OvlExpr->getSourceRange();
+ }
+
+ bool IsStaticMemberFunctionFromBoundPointer() const {
+ return StaticMemberFunctionFromBoundPointer;
+ }
+
+ void ComplainIsStaticMemberFunctionFromBoundPointer() const {
+ S.Diag(OvlExpr->getLocStart(),
+ diag::err_invalid_form_pointer_member_function)
+ << OvlExpr->getSourceRange();
+ }
+
+ void ComplainOfInvalidConversion() const {
+ S.Diag(OvlExpr->getLocStart(), diag::err_addr_ovl_not_func_ptrref)
+ << OvlExpr->getName() << TargetType;
+ }
+
+ void ComplainMultipleMatchesFound() const {
+ assert(Matches.size() > 1);
+ S.Diag(OvlExpr->getLocStart(), diag::err_addr_ovl_ambiguous)
+ << OvlExpr->getName()
+ << OvlExpr->getSourceRange();
+ S.NoteAllOverloadCandidates(OvlExpr, TargetFunctionType,
+ /*TakingAddress=*/true);
+ }
+
+ bool hadMultipleCandidates() const { return (OvlExpr->getNumDecls() > 1); }
+
+ int getNumMatches() const { return Matches.size(); }
+
+ FunctionDecl* getMatchingFunctionDecl() const {
+ if (Matches.size() != 1) return nullptr;
+ return Matches[0].second;
+ }
+
+ const DeclAccessPair* getMatchingFunctionAccessPair() const {
+ if (Matches.size() != 1) return nullptr;
+ return &Matches[0].first;
+ }
+};
+}
+
+/// ResolveAddressOfOverloadedFunction - Try to resolve the address of
+/// an overloaded function (C++ [over.over]), where @p From is an
+/// expression with overloaded function type and @p ToType is the type
+/// we're trying to resolve to. For example:
+///
+/// @code
+/// int f(double);
+/// int f(int);
+///
+/// int (*pfd)(double) = f; // selects f(double)
+/// @endcode
+///
+/// This routine returns the resulting FunctionDecl if it could be
+/// resolved, and NULL otherwise. When @p Complain is true, this
+/// routine will emit diagnostics if there is an error.
+FunctionDecl *
+Sema::ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
+ QualType TargetType,
+ bool Complain,
+ DeclAccessPair &FoundResult,
+ bool *pHadMultipleCandidates) {
+ assert(AddressOfExpr->getType() == Context.OverloadTy);
+
+ AddressOfFunctionResolver Resolver(*this, AddressOfExpr, TargetType,
+ Complain);
+ int NumMatches = Resolver.getNumMatches();
+ FunctionDecl *Fn = nullptr;
+ bool ShouldComplain = Complain && !Resolver.hasComplained();
+ if (NumMatches == 0 && ShouldComplain) {
+ if (Resolver.IsInvalidFormOfPointerToMemberFunction())
+ Resolver.ComplainIsInvalidFormOfPointerToMemberFunction();
+ else
+ Resolver.ComplainNoMatchesFound();
+ }
+ else if (NumMatches > 1 && ShouldComplain)
+ Resolver.ComplainMultipleMatchesFound();
+ else if (NumMatches == 1) {
+ Fn = Resolver.getMatchingFunctionDecl();
+ assert(Fn);
+ FoundResult = *Resolver.getMatchingFunctionAccessPair();
+ if (Complain) {
+ if (Resolver.IsStaticMemberFunctionFromBoundPointer())
+ Resolver.ComplainIsStaticMemberFunctionFromBoundPointer();
+ else
+ CheckAddressOfMemberAccess(AddressOfExpr, FoundResult);
+ }
+ }
+
+ if (pHadMultipleCandidates)
+ *pHadMultipleCandidates = Resolver.hadMultipleCandidates();
+ return Fn;
+}
+
+/// \brief Given an expression that refers to an overloaded function, try to
+/// resolve that overloaded function expression down to a single function.
+///
+/// This routine can only resolve template-ids that refer to a single function
+/// template, where that template-id refers to a single template whose template
+/// arguments are either provided by the template-id or have defaults,
+/// as described in C++0x [temp.arg.explicit]p3.
+///
+/// If no template-ids are found, no diagnostics are emitted and NULL is
+/// returned.
+FunctionDecl *
+Sema::ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
+ bool Complain,
+ DeclAccessPair *FoundResult) {
+ // C++ [over.over]p1:
+ // [...] [Note: any redundant set of parentheses surrounding the
+ // overloaded function name is ignored (5.1). ]
+ // C++ [over.over]p1:
+ // [...] The overloaded function name can be preceded by the &
+ // operator.
+
+ // If we didn't actually find any template-ids, we're done.
+ if (!ovl->hasExplicitTemplateArgs())
+ return nullptr;
+
+ TemplateArgumentListInfo ExplicitTemplateArgs;
+ ovl->copyTemplateArgumentsInto(ExplicitTemplateArgs);
+ TemplateSpecCandidateSet FailedCandidates(ovl->getNameLoc());
+
+ // Look through all of the overloaded functions, searching for one
+ // whose type matches exactly.
+ FunctionDecl *Matched = nullptr;
+ for (UnresolvedSetIterator I = ovl->decls_begin(),
+ E = ovl->decls_end(); I != E; ++I) {
+ // C++0x [temp.arg.explicit]p3:
+ // [...] In contexts where deduction is done and fails, or in contexts
+ // where deduction is not done, if a template argument list is
+ // specified and it, along with any default template arguments,
+ // identifies a single function template specialization, then the
+ // template-id is an lvalue for the function template specialization.
+ FunctionTemplateDecl *FunctionTemplate
+ = cast<FunctionTemplateDecl>((*I)->getUnderlyingDecl());
+
+ // C++ [over.over]p2:
+ // If the name is a function template, template argument deduction is
+ // done (14.8.2.2), and if the argument deduction succeeds, the
+ // resulting template argument list is used to generate a single
+ // function template specialization, which is added to the set of
+ // overloaded functions considered.
+ FunctionDecl *Specialization = nullptr;
+ TemplateDeductionInfo Info(FailedCandidates.getLocation());
+ if (TemplateDeductionResult Result
+ = DeduceTemplateArguments(FunctionTemplate, &ExplicitTemplateArgs,
+ Specialization, Info,
+ /*InOverloadResolution=*/true)) {
+ // Make a note of the failed deduction for diagnostics.
+ // TODO: Actually use the failed-deduction info?
+ FailedCandidates.addCandidate()
+ .set(FunctionTemplate->getTemplatedDecl(),
+ MakeDeductionFailureInfo(Context, Result, Info));
+ continue;
+ }
+
+ assert(Specialization && "no specialization and no error?");
+
+ // Multiple matches; we can't resolve to a single declaration.
+ if (Matched) {
+ if (Complain) {
+ Diag(ovl->getExprLoc(), diag::err_addr_ovl_ambiguous)
+ << ovl->getName();
+ NoteAllOverloadCandidates(ovl);
+ }
+ return nullptr;
+ }
+
+ Matched = Specialization;
+ if (FoundResult) *FoundResult = I.getPair();
+ }
+
+ if (Matched && getLangOpts().CPlusPlus14 &&
+ Matched->getReturnType()->isUndeducedType() &&
+ DeduceReturnType(Matched, ovl->getExprLoc(), Complain))
+ return nullptr;
+
+ return Matched;
+}
+
+
+
+
+// Resolve and fix an overloaded expression that can be resolved
+// because it identifies a single function template specialization.
+//
+// Last three arguments should only be supplied if Complain = true
+//
+// Return true if it was logically possible to so resolve the
+// expression, regardless of whether or not it succeeded. Always
+// returns true if 'complain' is set.
+bool Sema::ResolveAndFixSingleFunctionTemplateSpecialization(
+ ExprResult &SrcExpr, bool doFunctionPointerConverion,
+ bool complain, SourceRange OpRangeForComplaining,
+ QualType DestTypeForComplaining,
+ unsigned DiagIDForComplaining) {
+ assert(SrcExpr.get()->getType() == Context.OverloadTy);
+
+ OverloadExpr::FindResult ovl = OverloadExpr::find(SrcExpr.get());
+
+ DeclAccessPair found;
+ ExprResult SingleFunctionExpression;
+ if (FunctionDecl *fn = ResolveSingleFunctionTemplateSpecialization(
+ ovl.Expression, /*complain*/ false, &found)) {
+ if (DiagnoseUseOfDecl(fn, SrcExpr.get()->getLocStart())) {
+ SrcExpr = ExprError();
+ return true;
+ }
+
+ // It is only correct to resolve to an instance method if we're
+ // resolving a form that's permitted to be a pointer to member.
+ // Otherwise we'll end up making a bound member expression, which
+ // is illegal in all the contexts we resolve like this.
+ if (!ovl.HasFormOfMemberPointer &&
+ isa<CXXMethodDecl>(fn) &&
+ cast<CXXMethodDecl>(fn)->isInstance()) {
+ if (!complain) return false;
+
+ Diag(ovl.Expression->getExprLoc(),
+ diag::err_bound_member_function)
+ << 0 << ovl.Expression->getSourceRange();
+
+ // TODO: I believe we only end up here if there's a mix of
+ // static and non-static candidates (otherwise the expression
+ // would have 'bound member' type, not 'overload' type).
+ // Ideally we would note which candidate was chosen and why
+ // the static candidates were rejected.
+ SrcExpr = ExprError();
+ return true;
+ }
+
+ // Fix the expression to refer to 'fn'.
+ SingleFunctionExpression =
+ FixOverloadedFunctionReference(SrcExpr.get(), found, fn);
+
+ // If desired, do function-to-pointer decay.
+ if (doFunctionPointerConverion) {
+ SingleFunctionExpression =
+ DefaultFunctionArrayLvalueConversion(SingleFunctionExpression.get());
+ if (SingleFunctionExpression.isInvalid()) {
+ SrcExpr = ExprError();
+ return true;
+ }
+ }
+ }
+
+ if (!SingleFunctionExpression.isUsable()) {
+ if (complain) {
+ Diag(OpRangeForComplaining.getBegin(), DiagIDForComplaining)
+ << ovl.Expression->getName()
+ << DestTypeForComplaining
+ << OpRangeForComplaining
+ << ovl.Expression->getQualifierLoc().getSourceRange();
+ NoteAllOverloadCandidates(SrcExpr.get());
+
+ SrcExpr = ExprError();
+ return true;
+ }
+
+ return false;
+ }
+
+ SrcExpr = SingleFunctionExpression;
+ return true;
+}
+
+/// \brief Add a single candidate to the overload set.
+static void AddOverloadedCallCandidate(Sema &S,
+ DeclAccessPair FoundDecl,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ ArrayRef<Expr *> Args,
+ OverloadCandidateSet &CandidateSet,
+ bool PartialOverloading,
+ bool KnownValid) {
+ NamedDecl *Callee = FoundDecl.getDecl();
+ if (isa<UsingShadowDecl>(Callee))
+ Callee = cast<UsingShadowDecl>(Callee)->getTargetDecl();
+
+ if (FunctionDecl *Func = dyn_cast<FunctionDecl>(Callee)) {
+ if (ExplicitTemplateArgs) {
+ assert(!KnownValid && "Explicit template arguments?");
+ return;
+ }
+ S.AddOverloadCandidate(Func, FoundDecl, Args, CandidateSet,
+ /*SuppressUsedConversions=*/false,
+ PartialOverloading);
+ return;
+ }
+
+ if (FunctionTemplateDecl *FuncTemplate
+ = dyn_cast<FunctionTemplateDecl>(Callee)) {
+ S.AddTemplateOverloadCandidate(FuncTemplate, FoundDecl,
+ ExplicitTemplateArgs, Args, CandidateSet,
+ /*SuppressUsedConversions=*/false,
+ PartialOverloading);
+ return;
+ }
+
+ assert(!KnownValid && "unhandled case in overloaded call candidate");
+}
+
+/// \brief Add the overload candidates named by callee and/or found by argument
+/// dependent lookup to the given overload set.
+void Sema::AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
+ ArrayRef<Expr *> Args,
+ OverloadCandidateSet &CandidateSet,
+ bool PartialOverloading) {
+
+#ifndef NDEBUG
+ // Verify that ArgumentDependentLookup is consistent with the rules
+ // in C++0x [basic.lookup.argdep]p3:
+ //
+ // Let X be the lookup set produced by unqualified lookup (3.4.1)
+ // and let Y be the lookup set produced by argument dependent
+ // lookup (defined as follows). If X contains
+ //
+ // -- a declaration of a class member, or
+ //
+ // -- a block-scope function declaration that is not a
+ // using-declaration, or
+ //
+ // -- a declaration that is neither a function or a function
+ // template
+ //
+ // then Y is empty.
+
+ if (ULE->requiresADL()) {
+ for (UnresolvedLookupExpr::decls_iterator I = ULE->decls_begin(),
+ E = ULE->decls_end(); I != E; ++I) {
+ assert(!(*I)->getDeclContext()->isRecord());
+ assert(isa<UsingShadowDecl>(*I) ||
+ !(*I)->getDeclContext()->isFunctionOrMethod());
+ assert((*I)->getUnderlyingDecl()->isFunctionOrFunctionTemplate());
+ }
+ }
+#endif
+
+ // It would be nice to avoid this copy.
+ TemplateArgumentListInfo TABuffer;
+ TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr;
+ if (ULE->hasExplicitTemplateArgs()) {
+ ULE->copyTemplateArgumentsInto(TABuffer);
+ ExplicitTemplateArgs = &TABuffer;
+ }
+
+ for (UnresolvedLookupExpr::decls_iterator I = ULE->decls_begin(),
+ E = ULE->decls_end(); I != E; ++I)
+ AddOverloadedCallCandidate(*this, I.getPair(), ExplicitTemplateArgs, Args,
+ CandidateSet, PartialOverloading,
+ /*KnownValid*/ true);
+
+ if (ULE->requiresADL())
+ AddArgumentDependentLookupCandidates(ULE->getName(), ULE->getExprLoc(),
+ Args, ExplicitTemplateArgs,
+ CandidateSet, PartialOverloading);
+}
+
+/// Determine whether a declaration with the specified name could be moved into
+/// a different namespace.
+static bool canBeDeclaredInNamespace(const DeclarationName &Name) {
+ switch (Name.getCXXOverloadedOperator()) {
+ case OO_New: case OO_Array_New:
+ case OO_Delete: case OO_Array_Delete:
+ return false;
+
+ default:
+ return true;
+ }
+}
+
+/// Attempt to recover from an ill-formed use of a non-dependent name in a
+/// template, where the non-dependent name was declared after the template
+/// was defined. This is common in code written for a compilers which do not
+/// correctly implement two-stage name lookup.
+///
+/// Returns true if a viable candidate was found and a diagnostic was issued.
+static bool
+DiagnoseTwoPhaseLookup(Sema &SemaRef, SourceLocation FnLoc,
+ const CXXScopeSpec &SS, LookupResult &R,
+ OverloadCandidateSet::CandidateSetKind CSK,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ ArrayRef<Expr *> Args,
+ bool *DoDiagnoseEmptyLookup = nullptr) {
+ if (SemaRef.ActiveTemplateInstantiations.empty() || !SS.isEmpty())
+ return false;
+
+ for (DeclContext *DC = SemaRef.CurContext; DC; DC = DC->getParent()) {
+ if (DC->isTransparentContext())
+ continue;
+
+ SemaRef.LookupQualifiedName(R, DC);
+
+ if (!R.empty()) {
+ R.suppressDiagnostics();
+
+ if (isa<CXXRecordDecl>(DC)) {
+ // Don't diagnose names we find in classes; we get much better
+ // diagnostics for these from DiagnoseEmptyLookup.
+ R.clear();
+ if (DoDiagnoseEmptyLookup)
+ *DoDiagnoseEmptyLookup = true;
+ return false;
+ }
+
+ OverloadCandidateSet Candidates(FnLoc, CSK);
+ for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I)
+ AddOverloadedCallCandidate(SemaRef, I.getPair(),
+ ExplicitTemplateArgs, Args,
+ Candidates, false, /*KnownValid*/ false);
+
+ OverloadCandidateSet::iterator Best;
+ if (Candidates.BestViableFunction(SemaRef, FnLoc, Best) != OR_Success) {
+ // No viable functions. Don't bother the user with notes for functions
+ // which don't work and shouldn't be found anyway.
+ R.clear();
+ return false;
+ }
+
+ // Find the namespaces where ADL would have looked, and suggest
+ // declaring the function there instead.
+ Sema::AssociatedNamespaceSet AssociatedNamespaces;
+ Sema::AssociatedClassSet AssociatedClasses;
+ SemaRef.FindAssociatedClassesAndNamespaces(FnLoc, Args,
+ AssociatedNamespaces,
+ AssociatedClasses);
+ Sema::AssociatedNamespaceSet SuggestedNamespaces;
+ if (canBeDeclaredInNamespace(R.getLookupName())) {
+ DeclContext *Std = SemaRef.getStdNamespace();
+ for (Sema::AssociatedNamespaceSet::iterator
+ it = AssociatedNamespaces.begin(),
+ end = AssociatedNamespaces.end(); it != end; ++it) {
+ // Never suggest declaring a function within namespace 'std'.
+ if (Std && Std->Encloses(*it))
+ continue;
+
+ // Never suggest declaring a function within a namespace with a
+ // reserved name, like __gnu_cxx.
+ NamespaceDecl *NS = dyn_cast<NamespaceDecl>(*it);
+ if (NS &&
+ NS->getQualifiedNameAsString().find("__") != std::string::npos)
+ continue;
+
+ SuggestedNamespaces.insert(*it);
+ }
+ }
+
+ SemaRef.Diag(R.getNameLoc(), diag::err_not_found_by_two_phase_lookup)
+ << R.getLookupName();
+ if (SuggestedNamespaces.empty()) {
+ SemaRef.Diag(Best->Function->getLocation(),
+ diag::note_not_found_by_two_phase_lookup)
+ << R.getLookupName() << 0;
+ } else if (SuggestedNamespaces.size() == 1) {
+ SemaRef.Diag(Best->Function->getLocation(),
+ diag::note_not_found_by_two_phase_lookup)
+ << R.getLookupName() << 1 << *SuggestedNamespaces.begin();
+ } else {
+ // FIXME: It would be useful to list the associated namespaces here,
+ // but the diagnostics infrastructure doesn't provide a way to produce
+ // a localized representation of a list of items.
+ SemaRef.Diag(Best->Function->getLocation(),
+ diag::note_not_found_by_two_phase_lookup)
+ << R.getLookupName() << 2;
+ }
+
+ // Try to recover by calling this function.
+ return true;
+ }
+
+ R.clear();
+ }
+
+ return false;
+}
+
+/// Attempt to recover from ill-formed use of a non-dependent operator in a
+/// template, where the non-dependent operator was declared after the template
+/// was defined.
+///
+/// Returns true if a viable candidate was found and a diagnostic was issued.
+static bool
+DiagnoseTwoPhaseOperatorLookup(Sema &SemaRef, OverloadedOperatorKind Op,
+ SourceLocation OpLoc,
+ ArrayRef<Expr *> Args) {
+ DeclarationName OpName =
+ SemaRef.Context.DeclarationNames.getCXXOperatorName(Op);
+ LookupResult R(SemaRef, OpName, OpLoc, Sema::LookupOperatorName);
+ return DiagnoseTwoPhaseLookup(SemaRef, OpLoc, CXXScopeSpec(), R,
+ OverloadCandidateSet::CSK_Operator,
+ /*ExplicitTemplateArgs=*/nullptr, Args);
+}
+
+namespace {
+class BuildRecoveryCallExprRAII {
+ Sema &SemaRef;
+public:
+ BuildRecoveryCallExprRAII(Sema &S) : SemaRef(S) {
+ assert(SemaRef.IsBuildingRecoveryCallExpr == false);
+ SemaRef.IsBuildingRecoveryCallExpr = true;
+ }
+
+ ~BuildRecoveryCallExprRAII() {
+ SemaRef.IsBuildingRecoveryCallExpr = false;
+ }
+};
+
+}
+
+static std::unique_ptr<CorrectionCandidateCallback>
+MakeValidator(Sema &SemaRef, MemberExpr *ME, size_t NumArgs,
+ bool HasTemplateArgs, bool AllowTypoCorrection) {
+ if (!AllowTypoCorrection)
+ return llvm::make_unique<NoTypoCorrectionCCC>();
+ return llvm::make_unique<FunctionCallFilterCCC>(SemaRef, NumArgs,
+ HasTemplateArgs, ME);
+}
+
+/// Attempts to recover from a call where no functions were found.
+///
+/// Returns true if new candidates were found.
+static ExprResult
+BuildRecoveryCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
+ UnresolvedLookupExpr *ULE,
+ SourceLocation LParenLoc,
+ MutableArrayRef<Expr *> Args,
+ SourceLocation RParenLoc,
+ bool EmptyLookup, bool AllowTypoCorrection) {
+ // Do not try to recover if it is already building a recovery call.
+ // This stops infinite loops for template instantiations like
+ //
+ // template <typename T> auto foo(T t) -> decltype(foo(t)) {}
+ // template <typename T> auto foo(T t) -> decltype(foo(&t)) {}
+ //
+ if (SemaRef.IsBuildingRecoveryCallExpr)
+ return ExprError();
+ BuildRecoveryCallExprRAII RCE(SemaRef);
+
+ CXXScopeSpec SS;
+ SS.Adopt(ULE->getQualifierLoc());
+ SourceLocation TemplateKWLoc = ULE->getTemplateKeywordLoc();
+
+ TemplateArgumentListInfo TABuffer;
+ TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr;
+ if (ULE->hasExplicitTemplateArgs()) {
+ ULE->copyTemplateArgumentsInto(TABuffer);
+ ExplicitTemplateArgs = &TABuffer;
+ }
+
+ LookupResult R(SemaRef, ULE->getName(), ULE->getNameLoc(),
+ Sema::LookupOrdinaryName);
+ bool DoDiagnoseEmptyLookup = EmptyLookup;
+ if (!DiagnoseTwoPhaseLookup(SemaRef, Fn->getExprLoc(), SS, R,
+ OverloadCandidateSet::CSK_Normal,
+ ExplicitTemplateArgs, Args,
+ &DoDiagnoseEmptyLookup) &&
+ (!DoDiagnoseEmptyLookup || SemaRef.DiagnoseEmptyLookup(
+ S, SS, R,
+ MakeValidator(SemaRef, dyn_cast<MemberExpr>(Fn), Args.size(),
+ ExplicitTemplateArgs != nullptr, AllowTypoCorrection),
+ ExplicitTemplateArgs, Args)))
+ return ExprError();
+
+ assert(!R.empty() && "lookup results empty despite recovery");
+
+ // Build an implicit member call if appropriate. Just drop the
+ // casts and such from the call, we don't really care.
+ ExprResult NewFn = ExprError();
+ if ((*R.begin())->isCXXClassMember())
+ NewFn = SemaRef.BuildPossibleImplicitMemberExpr(SS, TemplateKWLoc, R,
+ ExplicitTemplateArgs, S);
+ else if (ExplicitTemplateArgs || TemplateKWLoc.isValid())
+ NewFn = SemaRef.BuildTemplateIdExpr(SS, TemplateKWLoc, R, false,
+ ExplicitTemplateArgs);
+ else
+ NewFn = SemaRef.BuildDeclarationNameExpr(SS, R, false);
+
+ if (NewFn.isInvalid())
+ return ExprError();
+
+ // This shouldn't cause an infinite loop because we're giving it
+ // an expression with viable lookup results, which should never
+ // end up here.
+ return SemaRef.ActOnCallExpr(/*Scope*/ nullptr, NewFn.get(), LParenLoc,
+ MultiExprArg(Args.data(), Args.size()),
+ RParenLoc);
+}
+
+/// \brief Constructs and populates an OverloadedCandidateSet from
+/// the given function.
+/// \returns true when an the ExprResult output parameter has been set.
+bool Sema::buildOverloadedCallSet(Scope *S, Expr *Fn,
+ UnresolvedLookupExpr *ULE,
+ MultiExprArg Args,
+ SourceLocation RParenLoc,
+ OverloadCandidateSet *CandidateSet,
+ ExprResult *Result) {
+#ifndef NDEBUG
+ if (ULE->requiresADL()) {
+ // To do ADL, we must have found an unqualified name.
+ assert(!ULE->getQualifier() && "qualified name with ADL");
+
+ // We don't perform ADL for implicit declarations of builtins.
+ // Verify that this was correctly set up.
+ FunctionDecl *F;
+ if (ULE->decls_begin() + 1 == ULE->decls_end() &&
+ (F = dyn_cast<FunctionDecl>(*ULE->decls_begin())) &&
+ F->getBuiltinID() && F->isImplicit())
+ llvm_unreachable("performing ADL for builtin");
+
+ // We don't perform ADL in C.
+ assert(getLangOpts().CPlusPlus && "ADL enabled in C");
+ }
+#endif
+
+ UnbridgedCastsSet UnbridgedCasts;
+ if (checkArgPlaceholdersForOverload(*this, Args, UnbridgedCasts)) {
+ *Result = ExprError();
+ return true;
+ }
+
+ // Add the functions denoted by the callee to the set of candidate
+ // functions, including those from argument-dependent lookup.
+ AddOverloadedCallCandidates(ULE, Args, *CandidateSet);
+
+ if (getLangOpts().MSVCCompat &&
+ CurContext->isDependentContext() && !isSFINAEContext() &&
+ (isa<FunctionDecl>(CurContext) || isa<CXXRecordDecl>(CurContext))) {
+
+ OverloadCandidateSet::iterator Best;
+ if (CandidateSet->empty() ||
+ CandidateSet->BestViableFunction(*this, Fn->getLocStart(), Best) ==
+ OR_No_Viable_Function) {
+ // In Microsoft mode, if we are inside a template class member function then
+ // create a type dependent CallExpr. The goal is to postpone name lookup
+ // to instantiation time to be able to search into type dependent base
+ // classes.
+ CallExpr *CE = new (Context) CallExpr(
+ Context, Fn, Args, Context.DependentTy, VK_RValue, RParenLoc);
+ CE->setTypeDependent(true);
+ CE->setValueDependent(true);
+ CE->setInstantiationDependent(true);
+ *Result = CE;
+ return true;
+ }
+ }
+
+ if (CandidateSet->empty())
+ return false;
+
+ UnbridgedCasts.restore();
+ return false;
+}
+
+/// FinishOverloadedCallExpr - given an OverloadCandidateSet, builds and returns
+/// the completed call expression. If overload resolution fails, emits
+/// diagnostics and returns ExprError()
+static ExprResult FinishOverloadedCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
+ UnresolvedLookupExpr *ULE,
+ SourceLocation LParenLoc,
+ MultiExprArg Args,
+ SourceLocation RParenLoc,
+ Expr *ExecConfig,
+ OverloadCandidateSet *CandidateSet,
+ OverloadCandidateSet::iterator *Best,
+ OverloadingResult OverloadResult,
+ bool AllowTypoCorrection) {
+ if (CandidateSet->empty())
+ return BuildRecoveryCallExpr(SemaRef, S, Fn, ULE, LParenLoc, Args,
+ RParenLoc, /*EmptyLookup=*/true,
+ AllowTypoCorrection);
+
+ switch (OverloadResult) {
+ case OR_Success: {
+ FunctionDecl *FDecl = (*Best)->Function;
+ SemaRef.CheckUnresolvedLookupAccess(ULE, (*Best)->FoundDecl);
+ if (SemaRef.DiagnoseUseOfDecl(FDecl, ULE->getNameLoc()))
+ return ExprError();
+ Fn = SemaRef.FixOverloadedFunctionReference(Fn, (*Best)->FoundDecl, FDecl);
+ return SemaRef.BuildResolvedCallExpr(Fn, FDecl, LParenLoc, Args, RParenLoc,
+ ExecConfig);
+ }
+
+ case OR_No_Viable_Function: {
+ // Try to recover by looking for viable functions which the user might
+ // have meant to call.
+ ExprResult Recovery = BuildRecoveryCallExpr(SemaRef, S, Fn, ULE, LParenLoc,
+ Args, RParenLoc,
+ /*EmptyLookup=*/false,
+ AllowTypoCorrection);
+ if (!Recovery.isInvalid())
+ return Recovery;
+
+ // If the user passes in a function that we can't take the address of, we
+ // generally end up emitting really bad error messages. Here, we attempt to
+ // emit better ones.
+ for (const Expr *Arg : Args) {
+ if (!Arg->getType()->isFunctionType())
+ continue;
+ if (auto *DRE = dyn_cast<DeclRefExpr>(Arg->IgnoreParenImpCasts())) {
+ auto *FD = dyn_cast<FunctionDecl>(DRE->getDecl());
+ if (FD &&
+ !SemaRef.checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true,
+ Arg->getExprLoc()))
+ return ExprError();
+ }
+ }
+
+ SemaRef.Diag(Fn->getLocStart(), diag::err_ovl_no_viable_function_in_call)
+ << ULE->getName() << Fn->getSourceRange();
+ CandidateSet->NoteCandidates(SemaRef, OCD_AllCandidates, Args);
+ break;
+ }
+
+ case OR_Ambiguous:
+ SemaRef.Diag(Fn->getLocStart(), diag::err_ovl_ambiguous_call)
+ << ULE->getName() << Fn->getSourceRange();
+ CandidateSet->NoteCandidates(SemaRef, OCD_ViableCandidates, Args);
+ break;
+
+ case OR_Deleted: {
+ SemaRef.Diag(Fn->getLocStart(), diag::err_ovl_deleted_call)
+ << (*Best)->Function->isDeleted()
+ << ULE->getName()
+ << SemaRef.getDeletedOrUnavailableSuffix((*Best)->Function)
+ << Fn->getSourceRange();
+ CandidateSet->NoteCandidates(SemaRef, OCD_AllCandidates, Args);
+
+ // We emitted an error for the unvailable/deleted function call but keep
+ // the call in the AST.
+ FunctionDecl *FDecl = (*Best)->Function;
+ Fn = SemaRef.FixOverloadedFunctionReference(Fn, (*Best)->FoundDecl, FDecl);
+ return SemaRef.BuildResolvedCallExpr(Fn, FDecl, LParenLoc, Args, RParenLoc,
+ ExecConfig);
+ }
+ }
+
+ // Overload resolution failed.
+ return ExprError();
+}
+
+/// BuildOverloadedCallExpr - Given the call expression that calls Fn
+/// (which eventually refers to the declaration Func) and the call
+/// arguments Args/NumArgs, attempt to resolve the function call down
+/// to a specific function. If overload resolution succeeds, returns
+/// the call expression produced by overload resolution.
+/// Otherwise, emits diagnostics and returns ExprError.
+ExprResult Sema::BuildOverloadedCallExpr(Scope *S, Expr *Fn,
+ UnresolvedLookupExpr *ULE,
+ SourceLocation LParenLoc,
+ MultiExprArg Args,
+ SourceLocation RParenLoc,
+ Expr *ExecConfig,
+ bool AllowTypoCorrection) {
+ OverloadCandidateSet CandidateSet(Fn->getExprLoc(),
+ OverloadCandidateSet::CSK_Normal);
+ ExprResult result;
+
+ if (buildOverloadedCallSet(S, Fn, ULE, Args, LParenLoc, &CandidateSet,
+ &result))
+ return result;
+
+ OverloadCandidateSet::iterator Best;
+ OverloadingResult OverloadResult =
+ CandidateSet.BestViableFunction(*this, Fn->getLocStart(), Best);
+
+ return FinishOverloadedCallExpr(*this, S, Fn, ULE, LParenLoc, Args,
+ RParenLoc, ExecConfig, &CandidateSet,
+ &Best, OverloadResult,
+ AllowTypoCorrection);
+}
+
+static bool IsOverloaded(const UnresolvedSetImpl &Functions) {
+ return Functions.size() > 1 ||
+ (Functions.size() == 1 && isa<FunctionTemplateDecl>(*Functions.begin()));
+}
+
+/// \brief Create a unary operation that may resolve to an overloaded
+/// operator.
+///
+/// \param OpLoc The location of the operator itself (e.g., '*').
+///
+/// \param Opc The UnaryOperatorKind that describes this operator.
+///
+/// \param Fns The set of non-member functions that will be
+/// considered by overload resolution. The caller needs to build this
+/// set based on the context using, e.g.,
+/// LookupOverloadedOperatorName() and ArgumentDependentLookup(). This
+/// set should not contain any member functions; those will be added
+/// by CreateOverloadedUnaryOp().
+///
+/// \param Input The input argument.
+ExprResult
+Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
+ const UnresolvedSetImpl &Fns,
+ Expr *Input) {
+ OverloadedOperatorKind Op = UnaryOperator::getOverloadedOperator(Opc);
+ assert(Op != OO_None && "Invalid opcode for overloaded unary operator");
+ DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(Op);
+ // TODO: provide better source location info.
+ DeclarationNameInfo OpNameInfo(OpName, OpLoc);
+
+ if (checkPlaceholderForOverload(*this, Input))
+ return ExprError();
+
+ Expr *Args[2] = { Input, nullptr };
+ unsigned NumArgs = 1;
+
+ // For post-increment and post-decrement, add the implicit '0' as
+ // the second argument, so that we know this is a post-increment or
+ // post-decrement.
+ if (Opc == UO_PostInc || Opc == UO_PostDec) {
+ llvm::APSInt Zero(Context.getTypeSize(Context.IntTy), false);
+ Args[1] = IntegerLiteral::Create(Context, Zero, Context.IntTy,
+ SourceLocation());
+ NumArgs = 2;
+ }
+
+ ArrayRef<Expr *> ArgsArray(Args, NumArgs);
+
+ if (Input->isTypeDependent()) {
+ if (Fns.empty())
+ return new (Context) UnaryOperator(Input, Opc, Context.DependentTy,
+ VK_RValue, OK_Ordinary, OpLoc);
+
+ CXXRecordDecl *NamingClass = nullptr; // lookup ignores member operators
+ UnresolvedLookupExpr *Fn
+ = UnresolvedLookupExpr::Create(Context, NamingClass,
+ NestedNameSpecifierLoc(), OpNameInfo,
+ /*ADL*/ true, IsOverloaded(Fns),
+ Fns.begin(), Fns.end());
+ return new (Context)
+ CXXOperatorCallExpr(Context, Op, Fn, ArgsArray, Context.DependentTy,
+ VK_RValue, OpLoc, false);
+ }
+
+ // Build an empty overload set.
+ OverloadCandidateSet CandidateSet(OpLoc, OverloadCandidateSet::CSK_Operator);
+
+ // Add the candidates from the given function set.
+ AddFunctionCandidates(Fns, ArgsArray, CandidateSet);
+
+ // Add operator candidates that are member functions.
+ AddMemberOperatorCandidates(Op, OpLoc, ArgsArray, CandidateSet);
+
+ // Add candidates from ADL.
+ AddArgumentDependentLookupCandidates(OpName, OpLoc, ArgsArray,
+ /*ExplicitTemplateArgs*/nullptr,
+ CandidateSet);
+
+ // Add builtin operator candidates.
+ AddBuiltinOperatorCandidates(Op, OpLoc, ArgsArray, CandidateSet);
+
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+
+ // Perform overload resolution.
+ OverloadCandidateSet::iterator Best;
+ switch (CandidateSet.BestViableFunction(*this, OpLoc, Best)) {
+ case OR_Success: {
+ // We found a built-in operator or an overloaded operator.
+ FunctionDecl *FnDecl = Best->Function;
+
+ if (FnDecl) {
+ // We matched an overloaded operator. Build a call to that
+ // operator.
+
+ // Convert the arguments.
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FnDecl)) {
+ CheckMemberOperatorAccess(OpLoc, Args[0], nullptr, Best->FoundDecl);
+
+ ExprResult InputRes =
+ PerformObjectArgumentInitialization(Input, /*Qualifier=*/nullptr,
+ Best->FoundDecl, Method);
+ if (InputRes.isInvalid())
+ return ExprError();
+ Input = InputRes.get();
+ } else {
+ // Convert the arguments.
+ ExprResult InputInit
+ = PerformCopyInitialization(InitializedEntity::InitializeParameter(
+ Context,
+ FnDecl->getParamDecl(0)),
+ SourceLocation(),
+ Input);
+ if (InputInit.isInvalid())
+ return ExprError();
+ Input = InputInit.get();
+ }
+
+ // Build the actual expression node.
+ ExprResult FnExpr = CreateFunctionRefExpr(*this, FnDecl, Best->FoundDecl,
+ HadMultipleCandidates, OpLoc);
+ if (FnExpr.isInvalid())
+ return ExprError();
+
+ // Determine the result type.
+ QualType ResultTy = FnDecl->getReturnType();
+ ExprValueKind VK = Expr::getValueKindForType(ResultTy);
+ ResultTy = ResultTy.getNonLValueExprType(Context);
+
+ Args[0] = Input;
+ CallExpr *TheCall =
+ new (Context) CXXOperatorCallExpr(Context, Op, FnExpr.get(), ArgsArray,
+ ResultTy, VK, OpLoc, false);
+
+ if (CheckCallReturnType(FnDecl->getReturnType(), OpLoc, TheCall, FnDecl))
+ return ExprError();
+
+ return MaybeBindToTemporary(TheCall);
+ } else {
+ // We matched a built-in operator. Convert the arguments, then
+ // break out so that we will build the appropriate built-in
+ // operator node.
+ ExprResult InputRes =
+ PerformImplicitConversion(Input, Best->BuiltinTypes.ParamTypes[0],
+ Best->Conversions[0], AA_Passing);
+ if (InputRes.isInvalid())
+ return ExprError();
+ Input = InputRes.get();
+ break;
+ }
+ }
+
+ case OR_No_Viable_Function:
+ // This is an erroneous use of an operator which can be overloaded by
+ // a non-member function. Check for non-member operators which were
+ // defined too late to be candidates.
+ if (DiagnoseTwoPhaseOperatorLookup(*this, Op, OpLoc, ArgsArray))
+ // FIXME: Recover by calling the found function.
+ return ExprError();
+
+ // No viable function; fall through to handling this as a
+ // built-in operator, which will produce an error message for us.
+ break;
+
+ case OR_Ambiguous:
+ Diag(OpLoc, diag::err_ovl_ambiguous_oper_unary)
+ << UnaryOperator::getOpcodeStr(Opc)
+ << Input->getType()
+ << Input->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, ArgsArray,
+ UnaryOperator::getOpcodeStr(Opc), OpLoc);
+ return ExprError();
+
+ case OR_Deleted:
+ Diag(OpLoc, diag::err_ovl_deleted_oper)
+ << Best->Function->isDeleted()
+ << UnaryOperator::getOpcodeStr(Opc)
+ << getDeletedOrUnavailableSuffix(Best->Function)
+ << Input->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, ArgsArray,
+ UnaryOperator::getOpcodeStr(Opc), OpLoc);
+ return ExprError();
+ }
+
+ // Either we found no viable overloaded operator or we matched a
+ // built-in operator. In either case, fall through to trying to
+ // build a built-in operation.
+ return CreateBuiltinUnaryOp(OpLoc, Opc, Input);
+}
+
+/// \brief Create a binary operation that may resolve to an overloaded
+/// operator.
+///
+/// \param OpLoc The location of the operator itself (e.g., '+').
+///
+/// \param Opc The BinaryOperatorKind that describes this operator.
+///
+/// \param Fns The set of non-member functions that will be
+/// considered by overload resolution. The caller needs to build this
+/// set based on the context using, e.g.,
+/// LookupOverloadedOperatorName() and ArgumentDependentLookup(). This
+/// set should not contain any member functions; those will be added
+/// by CreateOverloadedBinOp().
+///
+/// \param LHS Left-hand argument.
+/// \param RHS Right-hand argument.
+ExprResult
+Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
+ BinaryOperatorKind Opc,
+ const UnresolvedSetImpl &Fns,
+ Expr *LHS, Expr *RHS) {
+ Expr *Args[2] = { LHS, RHS };
+ LHS=RHS=nullptr; // Please use only Args instead of LHS/RHS couple
+
+ OverloadedOperatorKind Op = BinaryOperator::getOverloadedOperator(Opc);
+ DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(Op);
+
+ // If either side is type-dependent, create an appropriate dependent
+ // expression.
+ if (Args[0]->isTypeDependent() || Args[1]->isTypeDependent()) {
+ if (Fns.empty()) {
+ // If there are no functions to store, just build a dependent
+ // BinaryOperator or CompoundAssignment.
+ if (Opc <= BO_Assign || Opc > BO_OrAssign)
+ return new (Context) BinaryOperator(
+ Args[0], Args[1], Opc, Context.DependentTy, VK_RValue, OK_Ordinary,
+ OpLoc, FPFeatures.fp_contract);
+
+ return new (Context) CompoundAssignOperator(
+ Args[0], Args[1], Opc, Context.DependentTy, VK_LValue, OK_Ordinary,
+ Context.DependentTy, Context.DependentTy, OpLoc,
+ FPFeatures.fp_contract);
+ }
+
+ // FIXME: save results of ADL from here?
+ CXXRecordDecl *NamingClass = nullptr; // lookup ignores member operators
+ // TODO: provide better source location info in DNLoc component.
+ DeclarationNameInfo OpNameInfo(OpName, OpLoc);
+ UnresolvedLookupExpr *Fn
+ = UnresolvedLookupExpr::Create(Context, NamingClass,
+ NestedNameSpecifierLoc(), OpNameInfo,
+ /*ADL*/ true, IsOverloaded(Fns),
+ Fns.begin(), Fns.end());
+ return new (Context)
+ CXXOperatorCallExpr(Context, Op, Fn, Args, Context.DependentTy,
+ VK_RValue, OpLoc, FPFeatures.fp_contract);
+ }
+
+ // Always do placeholder-like conversions on the RHS.
+ if (checkPlaceholderForOverload(*this, Args[1]))
+ return ExprError();
+
+ // Do placeholder-like conversion on the LHS; note that we should
+ // not get here with a PseudoObject LHS.
+ assert(Args[0]->getObjectKind() != OK_ObjCProperty);
+ if (checkPlaceholderForOverload(*this, Args[0]))
+ return ExprError();
+
+ // If this is the assignment operator, we only perform overload resolution
+ // if the left-hand side is a class or enumeration type. This is actually
+ // a hack. The standard requires that we do overload resolution between the
+ // various built-in candidates, but as DR507 points out, this can lead to
+ // problems. So we do it this way, which pretty much follows what GCC does.
+ // Note that we go the traditional code path for compound assignment forms.
+ if (Opc == BO_Assign && !Args[0]->getType()->isOverloadableType())
+ return CreateBuiltinBinOp(OpLoc, Opc, Args[0], Args[1]);
+
+ // If this is the .* operator, which is not overloadable, just
+ // create a built-in binary operator.
+ if (Opc == BO_PtrMemD)
+ return CreateBuiltinBinOp(OpLoc, Opc, Args[0], Args[1]);
+
+ // Build an empty overload set.
+ OverloadCandidateSet CandidateSet(OpLoc, OverloadCandidateSet::CSK_Operator);
+
+ // Add the candidates from the given function set.
+ AddFunctionCandidates(Fns, Args, CandidateSet);
+
+ // Add operator candidates that are member functions.
+ AddMemberOperatorCandidates(Op, OpLoc, Args, CandidateSet);
+
+ // Add candidates from ADL. Per [over.match.oper]p2, this lookup is not
+ // performed for an assignment operator (nor for operator[] nor operator->,
+ // which don't get here).
+ if (Opc != BO_Assign)
+ AddArgumentDependentLookupCandidates(OpName, OpLoc, Args,
+ /*ExplicitTemplateArgs*/ nullptr,
+ CandidateSet);
+
+ // Add builtin operator candidates.
+ AddBuiltinOperatorCandidates(Op, OpLoc, Args, CandidateSet);
+
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+
+ // Perform overload resolution.
+ OverloadCandidateSet::iterator Best;
+ switch (CandidateSet.BestViableFunction(*this, OpLoc, Best)) {
+ case OR_Success: {
+ // We found a built-in operator or an overloaded operator.
+ FunctionDecl *FnDecl = Best->Function;
+
+ if (FnDecl) {
+ // We matched an overloaded operator. Build a call to that
+ // operator.
+
+ // Convert the arguments.
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FnDecl)) {
+ // Best->Access is only meaningful for class members.
+ CheckMemberOperatorAccess(OpLoc, Args[0], Args[1], Best->FoundDecl);
+
+ ExprResult Arg1 =
+ PerformCopyInitialization(
+ InitializedEntity::InitializeParameter(Context,
+ FnDecl->getParamDecl(0)),
+ SourceLocation(), Args[1]);
+ if (Arg1.isInvalid())
+ return ExprError();
+
+ ExprResult Arg0 =
+ PerformObjectArgumentInitialization(Args[0], /*Qualifier=*/nullptr,
+ Best->FoundDecl, Method);
+ if (Arg0.isInvalid())
+ return ExprError();
+ Args[0] = Arg0.getAs<Expr>();
+ Args[1] = RHS = Arg1.getAs<Expr>();
+ } else {
+ // Convert the arguments.
+ ExprResult Arg0 = PerformCopyInitialization(
+ InitializedEntity::InitializeParameter(Context,
+ FnDecl->getParamDecl(0)),
+ SourceLocation(), Args[0]);
+ if (Arg0.isInvalid())
+ return ExprError();
+
+ ExprResult Arg1 =
+ PerformCopyInitialization(
+ InitializedEntity::InitializeParameter(Context,
+ FnDecl->getParamDecl(1)),
+ SourceLocation(), Args[1]);
+ if (Arg1.isInvalid())
+ return ExprError();
+ Args[0] = LHS = Arg0.getAs<Expr>();
+ Args[1] = RHS = Arg1.getAs<Expr>();
+ }
+
+ // Build the actual expression node.
+ ExprResult FnExpr = CreateFunctionRefExpr(*this, FnDecl,
+ Best->FoundDecl,
+ HadMultipleCandidates, OpLoc);
+ if (FnExpr.isInvalid())
+ return ExprError();
+
+ // Determine the result type.
+ QualType ResultTy = FnDecl->getReturnType();
+ ExprValueKind VK = Expr::getValueKindForType(ResultTy);
+ ResultTy = ResultTy.getNonLValueExprType(Context);
+
+ CXXOperatorCallExpr *TheCall =
+ new (Context) CXXOperatorCallExpr(Context, Op, FnExpr.get(),
+ Args, ResultTy, VK, OpLoc,
+ FPFeatures.fp_contract);
+
+ if (CheckCallReturnType(FnDecl->getReturnType(), OpLoc, TheCall,
+ FnDecl))
+ return ExprError();
+
+ ArrayRef<const Expr *> ArgsArray(Args, 2);
+ // Cut off the implicit 'this'.
+ if (isa<CXXMethodDecl>(FnDecl))
+ ArgsArray = ArgsArray.slice(1);
+
+ // Check for a self move.
+ if (Op == OO_Equal)
+ DiagnoseSelfMove(Args[0], Args[1], OpLoc);
+
+ checkCall(FnDecl, nullptr, ArgsArray, isa<CXXMethodDecl>(FnDecl), OpLoc,
+ TheCall->getSourceRange(), VariadicDoesNotApply);
+
+ return MaybeBindToTemporary(TheCall);
+ } else {
+ // We matched a built-in operator. Convert the arguments, then
+ // break out so that we will build the appropriate built-in
+ // operator node.
+ ExprResult ArgsRes0 =
+ PerformImplicitConversion(Args[0], Best->BuiltinTypes.ParamTypes[0],
+ Best->Conversions[0], AA_Passing);
+ if (ArgsRes0.isInvalid())
+ return ExprError();
+ Args[0] = ArgsRes0.get();
+
+ ExprResult ArgsRes1 =
+ PerformImplicitConversion(Args[1], Best->BuiltinTypes.ParamTypes[1],
+ Best->Conversions[1], AA_Passing);
+ if (ArgsRes1.isInvalid())
+ return ExprError();
+ Args[1] = ArgsRes1.get();
+ break;
+ }
+ }
+
+ case OR_No_Viable_Function: {
+ // C++ [over.match.oper]p9:
+ // If the operator is the operator , [...] and there are no
+ // viable functions, then the operator is assumed to be the
+ // built-in operator and interpreted according to clause 5.
+ if (Opc == BO_Comma)
+ break;
+
+ // For class as left operand for assignment or compound assigment
+ // operator do not fall through to handling in built-in, but report that
+ // no overloaded assignment operator found
+ ExprResult Result = ExprError();
+ if (Args[0]->getType()->isRecordType() &&
+ Opc >= BO_Assign && Opc <= BO_OrAssign) {
+ Diag(OpLoc, diag::err_ovl_no_viable_oper)
+ << BinaryOperator::getOpcodeStr(Opc)
+ << Args[0]->getSourceRange() << Args[1]->getSourceRange();
+ if (Args[0]->getType()->isIncompleteType()) {
+ Diag(OpLoc, diag::note_assign_lhs_incomplete)
+ << Args[0]->getType()
+ << Args[0]->getSourceRange() << Args[1]->getSourceRange();
+ }
+ } else {
+ // This is an erroneous use of an operator which can be overloaded by
+ // a non-member function. Check for non-member operators which were
+ // defined too late to be candidates.
+ if (DiagnoseTwoPhaseOperatorLookup(*this, Op, OpLoc, Args))
+ // FIXME: Recover by calling the found function.
+ return ExprError();
+
+ // No viable function; try to create a built-in operation, which will
+ // produce an error. Then, show the non-viable candidates.
+ Result = CreateBuiltinBinOp(OpLoc, Opc, Args[0], Args[1]);
+ }
+ assert(Result.isInvalid() &&
+ "C++ binary operator overloading is missing candidates!");
+ if (Result.isInvalid())
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args,
+ BinaryOperator::getOpcodeStr(Opc), OpLoc);
+ return Result;
+ }
+
+ case OR_Ambiguous:
+ Diag(OpLoc, diag::err_ovl_ambiguous_oper_binary)
+ << BinaryOperator::getOpcodeStr(Opc)
+ << Args[0]->getType() << Args[1]->getType()
+ << Args[0]->getSourceRange() << Args[1]->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args,
+ BinaryOperator::getOpcodeStr(Opc), OpLoc);
+ return ExprError();
+
+ case OR_Deleted:
+ if (isImplicitlyDeleted(Best->Function)) {
+ CXXMethodDecl *Method = cast<CXXMethodDecl>(Best->Function);
+ Diag(OpLoc, diag::err_ovl_deleted_special_oper)
+ << Context.getRecordType(Method->getParent())
+ << getSpecialMember(Method);
+
+ // The user probably meant to call this special member. Just
+ // explain why it's deleted.
+ NoteDeletedFunction(Method);
+ return ExprError();
+ } else {
+ Diag(OpLoc, diag::err_ovl_deleted_oper)
+ << Best->Function->isDeleted()
+ << BinaryOperator::getOpcodeStr(Opc)
+ << getDeletedOrUnavailableSuffix(Best->Function)
+ << Args[0]->getSourceRange() << Args[1]->getSourceRange();
+ }
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args,
+ BinaryOperator::getOpcodeStr(Opc), OpLoc);
+ return ExprError();
+ }
+
+ // We matched a built-in operator; build it.
+ return CreateBuiltinBinOp(OpLoc, Opc, Args[0], Args[1]);
+}
+
+ExprResult
+Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
+ SourceLocation RLoc,
+ Expr *Base, Expr *Idx) {
+ Expr *Args[2] = { Base, Idx };
+ DeclarationName OpName =
+ Context.DeclarationNames.getCXXOperatorName(OO_Subscript);
+
+ // If either side is type-dependent, create an appropriate dependent
+ // expression.
+ if (Args[0]->isTypeDependent() || Args[1]->isTypeDependent()) {
+
+ CXXRecordDecl *NamingClass = nullptr; // lookup ignores member operators
+ // CHECKME: no 'operator' keyword?
+ DeclarationNameInfo OpNameInfo(OpName, LLoc);
+ OpNameInfo.setCXXOperatorNameRange(SourceRange(LLoc, RLoc));
+ UnresolvedLookupExpr *Fn
+ = UnresolvedLookupExpr::Create(Context, NamingClass,
+ NestedNameSpecifierLoc(), OpNameInfo,
+ /*ADL*/ true, /*Overloaded*/ false,
+ UnresolvedSetIterator(),
+ UnresolvedSetIterator());
+ // Can't add any actual overloads yet
+
+ return new (Context)
+ CXXOperatorCallExpr(Context, OO_Subscript, Fn, Args,
+ Context.DependentTy, VK_RValue, RLoc, false);
+ }
+
+ // Handle placeholders on both operands.
+ if (checkPlaceholderForOverload(*this, Args[0]))
+ return ExprError();
+ if (checkPlaceholderForOverload(*this, Args[1]))
+ return ExprError();
+
+ // Build an empty overload set.
+ OverloadCandidateSet CandidateSet(LLoc, OverloadCandidateSet::CSK_Operator);
+
+ // Subscript can only be overloaded as a member function.
+
+ // Add operator candidates that are member functions.
+ AddMemberOperatorCandidates(OO_Subscript, LLoc, Args, CandidateSet);
+
+ // Add builtin operator candidates.
+ AddBuiltinOperatorCandidates(OO_Subscript, LLoc, Args, CandidateSet);
+
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+
+ // Perform overload resolution.
+ OverloadCandidateSet::iterator Best;
+ switch (CandidateSet.BestViableFunction(*this, LLoc, Best)) {
+ case OR_Success: {
+ // We found a built-in operator or an overloaded operator.
+ FunctionDecl *FnDecl = Best->Function;
+
+ if (FnDecl) {
+ // We matched an overloaded operator. Build a call to that
+ // operator.
+
+ CheckMemberOperatorAccess(LLoc, Args[0], Args[1], Best->FoundDecl);
+
+ // Convert the arguments.
+ CXXMethodDecl *Method = cast<CXXMethodDecl>(FnDecl);
+ ExprResult Arg0 =
+ PerformObjectArgumentInitialization(Args[0], /*Qualifier=*/nullptr,
+ Best->FoundDecl, Method);
+ if (Arg0.isInvalid())
+ return ExprError();
+ Args[0] = Arg0.get();
+
+ // Convert the arguments.
+ ExprResult InputInit
+ = PerformCopyInitialization(InitializedEntity::InitializeParameter(
+ Context,
+ FnDecl->getParamDecl(0)),
+ SourceLocation(),
+ Args[1]);
+ if (InputInit.isInvalid())
+ return ExprError();
+
+ Args[1] = InputInit.getAs<Expr>();
+
+ // Build the actual expression node.
+ DeclarationNameInfo OpLocInfo(OpName, LLoc);
+ OpLocInfo.setCXXOperatorNameRange(SourceRange(LLoc, RLoc));
+ ExprResult FnExpr = CreateFunctionRefExpr(*this, FnDecl,
+ Best->FoundDecl,
+ HadMultipleCandidates,
+ OpLocInfo.getLoc(),
+ OpLocInfo.getInfo());
+ if (FnExpr.isInvalid())
+ return ExprError();
+
+ // Determine the result type
+ QualType ResultTy = FnDecl->getReturnType();
+ ExprValueKind VK = Expr::getValueKindForType(ResultTy);
+ ResultTy = ResultTy.getNonLValueExprType(Context);
+
+ CXXOperatorCallExpr *TheCall =
+ new (Context) CXXOperatorCallExpr(Context, OO_Subscript,
+ FnExpr.get(), Args,
+ ResultTy, VK, RLoc,
+ false);
+
+ if (CheckCallReturnType(FnDecl->getReturnType(), LLoc, TheCall, FnDecl))
+ return ExprError();
+
+ return MaybeBindToTemporary(TheCall);
+ } else {
+ // We matched a built-in operator. Convert the arguments, then
+ // break out so that we will build the appropriate built-in
+ // operator node.
+ ExprResult ArgsRes0 =
+ PerformImplicitConversion(Args[0], Best->BuiltinTypes.ParamTypes[0],
+ Best->Conversions[0], AA_Passing);
+ if (ArgsRes0.isInvalid())
+ return ExprError();
+ Args[0] = ArgsRes0.get();
+
+ ExprResult ArgsRes1 =
+ PerformImplicitConversion(Args[1], Best->BuiltinTypes.ParamTypes[1],
+ Best->Conversions[1], AA_Passing);
+ if (ArgsRes1.isInvalid())
+ return ExprError();
+ Args[1] = ArgsRes1.get();
+
+ break;
+ }
+ }
+
+ case OR_No_Viable_Function: {
+ if (CandidateSet.empty())
+ Diag(LLoc, diag::err_ovl_no_oper)
+ << Args[0]->getType() << /*subscript*/ 0
+ << Args[0]->getSourceRange() << Args[1]->getSourceRange();
+ else
+ Diag(LLoc, diag::err_ovl_no_viable_subscript)
+ << Args[0]->getType()
+ << Args[0]->getSourceRange() << Args[1]->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args,
+ "[]", LLoc);
+ return ExprError();
+ }
+
+ case OR_Ambiguous:
+ Diag(LLoc, diag::err_ovl_ambiguous_oper_binary)
+ << "[]"
+ << Args[0]->getType() << Args[1]->getType()
+ << Args[0]->getSourceRange() << Args[1]->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args,
+ "[]", LLoc);
+ return ExprError();
+
+ case OR_Deleted:
+ Diag(LLoc, diag::err_ovl_deleted_oper)
+ << Best->Function->isDeleted() << "[]"
+ << getDeletedOrUnavailableSuffix(Best->Function)
+ << Args[0]->getSourceRange() << Args[1]->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args,
+ "[]", LLoc);
+ return ExprError();
+ }
+
+ // We matched a built-in operator; build it.
+ return CreateBuiltinArraySubscriptExpr(Args[0], LLoc, Args[1], RLoc);
+}
+
+/// BuildCallToMemberFunction - Build a call to a member
+/// function. MemExpr is the expression that refers to the member
+/// function (and includes the object parameter), Args/NumArgs are the
+/// arguments to the function call (not including the object
+/// parameter). The caller needs to validate that the member
+/// expression refers to a non-static member function or an overloaded
+/// member function.
+ExprResult
+Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
+ SourceLocation LParenLoc,
+ MultiExprArg Args,
+ SourceLocation RParenLoc) {
+ assert(MemExprE->getType() == Context.BoundMemberTy ||
+ MemExprE->getType() == Context.OverloadTy);
+
+ // Dig out the member expression. This holds both the object
+ // argument and the member function we're referring to.
+ Expr *NakedMemExpr = MemExprE->IgnoreParens();
+
+ // Determine whether this is a call to a pointer-to-member function.
+ if (BinaryOperator *op = dyn_cast<BinaryOperator>(NakedMemExpr)) {
+ assert(op->getType() == Context.BoundMemberTy);
+ assert(op->getOpcode() == BO_PtrMemD || op->getOpcode() == BO_PtrMemI);
+
+ QualType fnType =
+ op->getRHS()->getType()->castAs<MemberPointerType>()->getPointeeType();
+
+ const FunctionProtoType *proto = fnType->castAs<FunctionProtoType>();
+ QualType resultType = proto->getCallResultType(Context);
+ ExprValueKind valueKind = Expr::getValueKindForType(proto->getReturnType());
+
+ // Check that the object type isn't more qualified than the
+ // member function we're calling.
+ Qualifiers funcQuals = Qualifiers::fromCVRMask(proto->getTypeQuals());
+
+ QualType objectType = op->getLHS()->getType();
+ if (op->getOpcode() == BO_PtrMemI)
+ objectType = objectType->castAs<PointerType>()->getPointeeType();
+ Qualifiers objectQuals = objectType.getQualifiers();
+
+ Qualifiers difference = objectQuals - funcQuals;
+ difference.removeObjCGCAttr();
+ difference.removeAddressSpace();
+ if (difference) {
+ std::string qualsString = difference.getAsString();
+ Diag(LParenLoc, diag::err_pointer_to_member_call_drops_quals)
+ << fnType.getUnqualifiedType()
+ << qualsString
+ << (qualsString.find(' ') == std::string::npos ? 1 : 2);
+ }
+
+ CXXMemberCallExpr *call
+ = new (Context) CXXMemberCallExpr(Context, MemExprE, Args,
+ resultType, valueKind, RParenLoc);
+
+ if (CheckCallReturnType(proto->getReturnType(), op->getRHS()->getLocStart(),
+ call, nullptr))
+ return ExprError();
+
+ if (ConvertArgumentsForCall(call, op, nullptr, proto, Args, RParenLoc))
+ return ExprError();
+
+ if (CheckOtherCall(call, proto))
+ return ExprError();
+
+ return MaybeBindToTemporary(call);
+ }
+
+ if (isa<CXXPseudoDestructorExpr>(NakedMemExpr))
+ return new (Context)
+ CallExpr(Context, MemExprE, Args, Context.VoidTy, VK_RValue, RParenLoc);
+
+ UnbridgedCastsSet UnbridgedCasts;
+ if (checkArgPlaceholdersForOverload(*this, Args, UnbridgedCasts))
+ return ExprError();
+
+ MemberExpr *MemExpr;
+ CXXMethodDecl *Method = nullptr;
+ DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_public);
+ NestedNameSpecifier *Qualifier = nullptr;
+ if (isa<MemberExpr>(NakedMemExpr)) {
+ MemExpr = cast<MemberExpr>(NakedMemExpr);
+ Method = cast<CXXMethodDecl>(MemExpr->getMemberDecl());
+ FoundDecl = MemExpr->getFoundDecl();
+ Qualifier = MemExpr->getQualifier();
+ UnbridgedCasts.restore();
+ } else {
+ UnresolvedMemberExpr *UnresExpr = cast<UnresolvedMemberExpr>(NakedMemExpr);
+ Qualifier = UnresExpr->getQualifier();
+
+ QualType ObjectType = UnresExpr->getBaseType();
+ Expr::Classification ObjectClassification
+ = UnresExpr->isArrow()? Expr::Classification::makeSimpleLValue()
+ : UnresExpr->getBase()->Classify(Context);
+
+ // Add overload candidates
+ OverloadCandidateSet CandidateSet(UnresExpr->getMemberLoc(),
+ OverloadCandidateSet::CSK_Normal);
+
+ // FIXME: avoid copy.
+ TemplateArgumentListInfo TemplateArgsBuffer, *TemplateArgs = nullptr;
+ if (UnresExpr->hasExplicitTemplateArgs()) {
+ UnresExpr->copyTemplateArgumentsInto(TemplateArgsBuffer);
+ TemplateArgs = &TemplateArgsBuffer;
+ }
+
+ for (UnresolvedMemberExpr::decls_iterator I = UnresExpr->decls_begin(),
+ E = UnresExpr->decls_end(); I != E; ++I) {
+
+ NamedDecl *Func = *I;
+ CXXRecordDecl *ActingDC = cast<CXXRecordDecl>(Func->getDeclContext());
+ if (isa<UsingShadowDecl>(Func))
+ Func = cast<UsingShadowDecl>(Func)->getTargetDecl();
+
+
+ // Microsoft supports direct constructor calls.
+ if (getLangOpts().MicrosoftExt && isa<CXXConstructorDecl>(Func)) {
+ AddOverloadCandidate(cast<CXXConstructorDecl>(Func), I.getPair(),
+ Args, CandidateSet);
+ } else if ((Method = dyn_cast<CXXMethodDecl>(Func))) {
+ // If explicit template arguments were provided, we can't call a
+ // non-template member function.
+ if (TemplateArgs)
+ continue;
+
+ AddMethodCandidate(Method, I.getPair(), ActingDC, ObjectType,
+ ObjectClassification, Args, CandidateSet,
+ /*SuppressUserConversions=*/false);
+ } else {
+ AddMethodTemplateCandidate(cast<FunctionTemplateDecl>(Func),
+ I.getPair(), ActingDC, TemplateArgs,
+ ObjectType, ObjectClassification,
+ Args, CandidateSet,
+ /*SuppressUsedConversions=*/false);
+ }
+ }
+
+ DeclarationName DeclName = UnresExpr->getMemberName();
+
+ UnbridgedCasts.restore();
+
+ OverloadCandidateSet::iterator Best;
+ switch (CandidateSet.BestViableFunction(*this, UnresExpr->getLocStart(),
+ Best)) {
+ case OR_Success:
+ Method = cast<CXXMethodDecl>(Best->Function);
+ FoundDecl = Best->FoundDecl;
+ CheckUnresolvedMemberAccess(UnresExpr, Best->FoundDecl);
+ if (DiagnoseUseOfDecl(Best->FoundDecl, UnresExpr->getNameLoc()))
+ return ExprError();
+ // If FoundDecl is different from Method (such as if one is a template
+ // and the other a specialization), make sure DiagnoseUseOfDecl is
+ // called on both.
+ // FIXME: This would be more comprehensively addressed by modifying
+ // DiagnoseUseOfDecl to accept both the FoundDecl and the decl
+ // being used.
+ if (Method != FoundDecl.getDecl() &&
+ DiagnoseUseOfDecl(Method, UnresExpr->getNameLoc()))
+ return ExprError();
+ break;
+
+ case OR_No_Viable_Function:
+ Diag(UnresExpr->getMemberLoc(),
+ diag::err_ovl_no_viable_member_function_in_call)
+ << DeclName << MemExprE->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args);
+ // FIXME: Leaking incoming expressions!
+ return ExprError();
+
+ case OR_Ambiguous:
+ Diag(UnresExpr->getMemberLoc(), diag::err_ovl_ambiguous_member_call)
+ << DeclName << MemExprE->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args);
+ // FIXME: Leaking incoming expressions!
+ return ExprError();
+
+ case OR_Deleted:
+ Diag(UnresExpr->getMemberLoc(), diag::err_ovl_deleted_member_call)
+ << Best->Function->isDeleted()
+ << DeclName
+ << getDeletedOrUnavailableSuffix(Best->Function)
+ << MemExprE->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args);
+ // FIXME: Leaking incoming expressions!
+ return ExprError();
+ }
+
+ MemExprE = FixOverloadedFunctionReference(MemExprE, FoundDecl, Method);
+
+ // If overload resolution picked a static member, build a
+ // non-member call based on that function.
+ if (Method->isStatic()) {
+ return BuildResolvedCallExpr(MemExprE, Method, LParenLoc, Args,
+ RParenLoc);
+ }
+
+ MemExpr = cast<MemberExpr>(MemExprE->IgnoreParens());
+ }
+
+ QualType ResultType = Method->getReturnType();
+ ExprValueKind VK = Expr::getValueKindForType(ResultType);
+ ResultType = ResultType.getNonLValueExprType(Context);
+
+ assert(Method && "Member call to something that isn't a method?");
+ CXXMemberCallExpr *TheCall =
+ new (Context) CXXMemberCallExpr(Context, MemExprE, Args,
+ ResultType, VK, RParenLoc);
+
+ // (CUDA B.1): Check for invalid calls between targets.
+ if (getLangOpts().CUDA) {
+ if (const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext)) {
+ if (CheckCUDATarget(Caller, Method)) {
+ Diag(MemExpr->getMemberLoc(), diag::err_ref_bad_target)
+ << IdentifyCUDATarget(Method) << Method->getIdentifier()
+ << IdentifyCUDATarget(Caller);
+ return ExprError();
+ }
+ }
+ }
+
+ // Check for a valid return type.
+ if (CheckCallReturnType(Method->getReturnType(), MemExpr->getMemberLoc(),
+ TheCall, Method))
+ return ExprError();
+
+ // Convert the object argument (for a non-static member function call).
+ // We only need to do this if there was actually an overload; otherwise
+ // it was done at lookup.
+ if (!Method->isStatic()) {
+ ExprResult ObjectArg =
+ PerformObjectArgumentInitialization(MemExpr->getBase(), Qualifier,
+ FoundDecl, Method);
+ if (ObjectArg.isInvalid())
+ return ExprError();
+ MemExpr->setBase(ObjectArg.get());
+ }
+
+ // Convert the rest of the arguments
+ const FunctionProtoType *Proto =
+ Method->getType()->getAs<FunctionProtoType>();
+ if (ConvertArgumentsForCall(TheCall, MemExpr, Method, Proto, Args,
+ RParenLoc))
+ return ExprError();
+
+ DiagnoseSentinelCalls(Method, LParenLoc, Args);
+
+ if (CheckFunctionCall(Method, TheCall, Proto))
+ return ExprError();
+
+ // In the case the method to call was not selected by the overloading
+ // resolution process, we still need to handle the enable_if attribute. Do
+ // that here, so it will not hide previous -- and more relevant -- errors
+ if (isa<MemberExpr>(NakedMemExpr)) {
+ if (const EnableIfAttr *Attr = CheckEnableIf(Method, Args, true)) {
+ Diag(MemExprE->getLocStart(),
+ diag::err_ovl_no_viable_member_function_in_call)
+ << Method << Method->getSourceRange();
+ Diag(Method->getLocation(),
+ diag::note_ovl_candidate_disabled_by_enable_if_attr)
+ << Attr->getCond()->getSourceRange() << Attr->getMessage();
+ return ExprError();
+ }
+ }
+
+ if ((isa<CXXConstructorDecl>(CurContext) ||
+ isa<CXXDestructorDecl>(CurContext)) &&
+ TheCall->getMethodDecl()->isPure()) {
+ const CXXMethodDecl *MD = TheCall->getMethodDecl();
+
+ if (isa<CXXThisExpr>(MemExpr->getBase()->IgnoreParenCasts()) &&
+ MemExpr->performsVirtualDispatch(getLangOpts())) {
+ Diag(MemExpr->getLocStart(),
+ diag::warn_call_to_pure_virtual_member_function_from_ctor_dtor)
+ << MD->getDeclName() << isa<CXXDestructorDecl>(CurContext)
+ << MD->getParent()->getDeclName();
+
+ Diag(MD->getLocStart(), diag::note_previous_decl) << MD->getDeclName();
+ if (getLangOpts().AppleKext)
+ Diag(MemExpr->getLocStart(),
+ diag::note_pure_qualified_call_kext)
+ << MD->getParent()->getDeclName()
+ << MD->getDeclName();
+ }
+ }
+ return MaybeBindToTemporary(TheCall);
+}
+
+/// BuildCallToObjectOfClassType - Build a call to an object of class
+/// type (C++ [over.call.object]), which can end up invoking an
+/// overloaded function call operator (@c operator()) or performing a
+/// user-defined conversion on the object argument.
+ExprResult
+Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
+ SourceLocation LParenLoc,
+ MultiExprArg Args,
+ SourceLocation RParenLoc) {
+ if (checkPlaceholderForOverload(*this, Obj))
+ return ExprError();
+ ExprResult Object = Obj;
+
+ UnbridgedCastsSet UnbridgedCasts;
+ if (checkArgPlaceholdersForOverload(*this, Args, UnbridgedCasts))
+ return ExprError();
+
+ assert(Object.get()->getType()->isRecordType() &&
+ "Requires object type argument");
+ const RecordType *Record = Object.get()->getType()->getAs<RecordType>();
+
+ // C++ [over.call.object]p1:
+ // If the primary-expression E in the function call syntax
+ // evaluates to a class object of type "cv T", then the set of
+ // candidate functions includes at least the function call
+ // operators of T. The function call operators of T are obtained by
+ // ordinary lookup of the name operator() in the context of
+ // (E).operator().
+ OverloadCandidateSet CandidateSet(LParenLoc,
+ OverloadCandidateSet::CSK_Operator);
+ DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(OO_Call);
+
+ if (RequireCompleteType(LParenLoc, Object.get()->getType(),
+ diag::err_incomplete_object_call, Object.get()))
+ return true;
+
+ LookupResult R(*this, OpName, LParenLoc, LookupOrdinaryName);
+ LookupQualifiedName(R, Record->getDecl());
+ R.suppressDiagnostics();
+
+ for (LookupResult::iterator Oper = R.begin(), OperEnd = R.end();
+ Oper != OperEnd; ++Oper) {
+ AddMethodCandidate(Oper.getPair(), Object.get()->getType(),
+ Object.get()->Classify(Context),
+ Args, CandidateSet,
+ /*SuppressUserConversions=*/ false);
+ }
+
+ // C++ [over.call.object]p2:
+ // In addition, for each (non-explicit in C++0x) conversion function
+ // declared in T of the form
+ //
+ // operator conversion-type-id () cv-qualifier;
+ //
+ // where cv-qualifier is the same cv-qualification as, or a
+ // greater cv-qualification than, cv, and where conversion-type-id
+ // denotes the type "pointer to function of (P1,...,Pn) returning
+ // R", or the type "reference to pointer to function of
+ // (P1,...,Pn) returning R", or the type "reference to function
+ // of (P1,...,Pn) returning R", a surrogate call function [...]
+ // is also considered as a candidate function. Similarly,
+ // surrogate call functions are added to the set of candidate
+ // functions for each conversion function declared in an
+ // accessible base class provided the function is not hidden
+ // within T by another intervening declaration.
+ const auto &Conversions =
+ cast<CXXRecordDecl>(Record->getDecl())->getVisibleConversionFunctions();
+ for (auto I = Conversions.begin(), E = Conversions.end(); I != E; ++I) {
+ NamedDecl *D = *I;
+ CXXRecordDecl *ActingContext = cast<CXXRecordDecl>(D->getDeclContext());
+ if (isa<UsingShadowDecl>(D))
+ D = cast<UsingShadowDecl>(D)->getTargetDecl();
+
+ // Skip over templated conversion functions; they aren't
+ // surrogates.
+ if (isa<FunctionTemplateDecl>(D))
+ continue;
+
+ CXXConversionDecl *Conv = cast<CXXConversionDecl>(D);
+ if (!Conv->isExplicit()) {
+ // Strip the reference type (if any) and then the pointer type (if
+ // any) to get down to what might be a function type.
+ QualType ConvType = Conv->getConversionType().getNonReferenceType();
+ if (const PointerType *ConvPtrType = ConvType->getAs<PointerType>())
+ ConvType = ConvPtrType->getPointeeType();
+
+ if (const FunctionProtoType *Proto = ConvType->getAs<FunctionProtoType>())
+ {
+ AddSurrogateCandidate(Conv, I.getPair(), ActingContext, Proto,
+ Object.get(), Args, CandidateSet);
+ }
+ }
+ }
+
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+
+ // Perform overload resolution.
+ OverloadCandidateSet::iterator Best;
+ switch (CandidateSet.BestViableFunction(*this, Object.get()->getLocStart(),
+ Best)) {
+ case OR_Success:
+ // Overload resolution succeeded; we'll build the appropriate call
+ // below.
+ break;
+
+ case OR_No_Viable_Function:
+ if (CandidateSet.empty())
+ Diag(Object.get()->getLocStart(), diag::err_ovl_no_oper)
+ << Object.get()->getType() << /*call*/ 1
+ << Object.get()->getSourceRange();
+ else
+ Diag(Object.get()->getLocStart(),
+ diag::err_ovl_no_viable_object_call)
+ << Object.get()->getType() << Object.get()->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args);
+ break;
+
+ case OR_Ambiguous:
+ Diag(Object.get()->getLocStart(),
+ diag::err_ovl_ambiguous_object_call)
+ << Object.get()->getType() << Object.get()->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args);
+ break;
+
+ case OR_Deleted:
+ Diag(Object.get()->getLocStart(),
+ diag::err_ovl_deleted_object_call)
+ << Best->Function->isDeleted()
+ << Object.get()->getType()
+ << getDeletedOrUnavailableSuffix(Best->Function)
+ << Object.get()->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args);
+ break;
+ }
+
+ if (Best == CandidateSet.end())
+ return true;
+
+ UnbridgedCasts.restore();
+
+ if (Best->Function == nullptr) {
+ // Since there is no function declaration, this is one of the
+ // surrogate candidates. Dig out the conversion function.
+ CXXConversionDecl *Conv
+ = cast<CXXConversionDecl>(
+ Best->Conversions[0].UserDefined.ConversionFunction);
+
+ CheckMemberOperatorAccess(LParenLoc, Object.get(), nullptr,
+ Best->FoundDecl);
+ if (DiagnoseUseOfDecl(Best->FoundDecl, LParenLoc))
+ return ExprError();
+ assert(Conv == Best->FoundDecl.getDecl() &&
+ "Found Decl & conversion-to-functionptr should be same, right?!");
+ // We selected one of the surrogate functions that converts the
+ // object parameter to a function pointer. Perform the conversion
+ // on the object argument, then let ActOnCallExpr finish the job.
+
+ // Create an implicit member expr to refer to the conversion operator.
+ // and then call it.
+ ExprResult Call = BuildCXXMemberCallExpr(Object.get(), Best->FoundDecl,
+ Conv, HadMultipleCandidates);
+ if (Call.isInvalid())
+ return ExprError();
+ // Record usage of conversion in an implicit cast.
+ Call = ImplicitCastExpr::Create(Context, Call.get()->getType(),
+ CK_UserDefinedConversion, Call.get(),
+ nullptr, VK_RValue);
+
+ return ActOnCallExpr(S, Call.get(), LParenLoc, Args, RParenLoc);
+ }
+
+ CheckMemberOperatorAccess(LParenLoc, Object.get(), nullptr, Best->FoundDecl);
+
+ // We found an overloaded operator(). Build a CXXOperatorCallExpr
+ // that calls this method, using Object for the implicit object
+ // parameter and passing along the remaining arguments.
+ CXXMethodDecl *Method = cast<CXXMethodDecl>(Best->Function);
+
+ // An error diagnostic has already been printed when parsing the declaration.
+ if (Method->isInvalidDecl())
+ return ExprError();
+
+ const FunctionProtoType *Proto =
+ Method->getType()->getAs<FunctionProtoType>();
+
+ unsigned NumParams = Proto->getNumParams();
+
+ DeclarationNameInfo OpLocInfo(
+ Context.DeclarationNames.getCXXOperatorName(OO_Call), LParenLoc);
+ OpLocInfo.setCXXOperatorNameRange(SourceRange(LParenLoc, RParenLoc));
+ ExprResult NewFn = CreateFunctionRefExpr(*this, Method, Best->FoundDecl,
+ HadMultipleCandidates,
+ OpLocInfo.getLoc(),
+ OpLocInfo.getInfo());
+ if (NewFn.isInvalid())
+ return true;
+
+ // Build the full argument list for the method call (the implicit object
+ // parameter is placed at the beginning of the list).
+ std::unique_ptr<Expr * []> MethodArgs(new Expr *[Args.size() + 1]);
+ MethodArgs[0] = Object.get();
+ std::copy(Args.begin(), Args.end(), &MethodArgs[1]);
+
+ // Once we've built TheCall, all of the expressions are properly
+ // owned.
+ QualType ResultTy = Method->getReturnType();
+ ExprValueKind VK = Expr::getValueKindForType(ResultTy);
+ ResultTy = ResultTy.getNonLValueExprType(Context);
+
+ CXXOperatorCallExpr *TheCall = new (Context)
+ CXXOperatorCallExpr(Context, OO_Call, NewFn.get(),
+ llvm::makeArrayRef(MethodArgs.get(), Args.size() + 1),
+ ResultTy, VK, RParenLoc, false);
+ MethodArgs.reset();
+
+ if (CheckCallReturnType(Method->getReturnType(), LParenLoc, TheCall, Method))
+ return true;
+
+ // We may have default arguments. If so, we need to allocate more
+ // slots in the call for them.
+ if (Args.size() < NumParams)
+ TheCall->setNumArgs(Context, NumParams + 1);
+
+ bool IsError = false;
+
+ // Initialize the implicit object parameter.
+ ExprResult ObjRes =
+ PerformObjectArgumentInitialization(Object.get(), /*Qualifier=*/nullptr,
+ Best->FoundDecl, Method);
+ if (ObjRes.isInvalid())
+ IsError = true;
+ else
+ Object = ObjRes;
+ TheCall->setArg(0, Object.get());
+
+ // Check the argument types.
+ for (unsigned i = 0; i != NumParams; i++) {
+ Expr *Arg;
+ if (i < Args.size()) {
+ Arg = Args[i];
+
+ // Pass the argument.
+
+ ExprResult InputInit
+ = PerformCopyInitialization(InitializedEntity::InitializeParameter(
+ Context,
+ Method->getParamDecl(i)),
+ SourceLocation(), Arg);
+
+ IsError |= InputInit.isInvalid();
+ Arg = InputInit.getAs<Expr>();
+ } else {
+ ExprResult DefArg
+ = BuildCXXDefaultArgExpr(LParenLoc, Method, Method->getParamDecl(i));
+ if (DefArg.isInvalid()) {
+ IsError = true;
+ break;
+ }
+
+ Arg = DefArg.getAs<Expr>();
+ }
+
+ TheCall->setArg(i + 1, Arg);
+ }
+
+ // If this is a variadic call, handle args passed through "...".
+ if (Proto->isVariadic()) {
+ // Promote the arguments (C99 6.5.2.2p7).
+ for (unsigned i = NumParams, e = Args.size(); i < e; i++) {
+ ExprResult Arg = DefaultVariadicArgumentPromotion(Args[i], VariadicMethod,
+ nullptr);
+ IsError |= Arg.isInvalid();
+ TheCall->setArg(i + 1, Arg.get());
+ }
+ }
+
+ if (IsError) return true;
+
+ DiagnoseSentinelCalls(Method, LParenLoc, Args);
+
+ if (CheckFunctionCall(Method, TheCall, Proto))
+ return true;
+
+ return MaybeBindToTemporary(TheCall);
+}
+
+/// BuildOverloadedArrowExpr - Build a call to an overloaded @c operator->
+/// (if one exists), where @c Base is an expression of class type and
+/// @c Member is the name of the member we're trying to find.
+ExprResult
+Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc,
+ bool *NoArrowOperatorFound) {
+ assert(Base->getType()->isRecordType() &&
+ "left-hand side must have class type");
+
+ if (checkPlaceholderForOverload(*this, Base))
+ return ExprError();
+
+ SourceLocation Loc = Base->getExprLoc();
+
+ // C++ [over.ref]p1:
+ //
+ // [...] An expression x->m is interpreted as (x.operator->())->m
+ // for a class object x of type T if T::operator->() exists and if
+ // the operator is selected as the best match function by the
+ // overload resolution mechanism (13.3).
+ DeclarationName OpName =
+ Context.DeclarationNames.getCXXOperatorName(OO_Arrow);
+ OverloadCandidateSet CandidateSet(Loc, OverloadCandidateSet::CSK_Operator);
+ const RecordType *BaseRecord = Base->getType()->getAs<RecordType>();
+
+ if (RequireCompleteType(Loc, Base->getType(),
+ diag::err_typecheck_incomplete_tag, Base))
+ return ExprError();
+
+ LookupResult R(*this, OpName, OpLoc, LookupOrdinaryName);
+ LookupQualifiedName(R, BaseRecord->getDecl());
+ R.suppressDiagnostics();
+
+ for (LookupResult::iterator Oper = R.begin(), OperEnd = R.end();
+ Oper != OperEnd; ++Oper) {
+ AddMethodCandidate(Oper.getPair(), Base->getType(), Base->Classify(Context),
+ None, CandidateSet, /*SuppressUserConversions=*/false);
+ }
+
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+
+ // Perform overload resolution.
+ OverloadCandidateSet::iterator Best;
+ switch (CandidateSet.BestViableFunction(*this, OpLoc, Best)) {
+ case OR_Success:
+ // Overload resolution succeeded; we'll build the call below.
+ break;
+
+ case OR_No_Viable_Function:
+ if (CandidateSet.empty()) {
+ QualType BaseType = Base->getType();
+ if (NoArrowOperatorFound) {
+ // Report this specific error to the caller instead of emitting a
+ // diagnostic, as requested.
+ *NoArrowOperatorFound = true;
+ return ExprError();
+ }
+ Diag(OpLoc, diag::err_typecheck_member_reference_arrow)
+ << BaseType << Base->getSourceRange();
+ if (BaseType->isRecordType() && !BaseType->isPointerType()) {
+ Diag(OpLoc, diag::note_typecheck_member_reference_suggestion)
+ << FixItHint::CreateReplacement(OpLoc, ".");
+ }
+ } else
+ Diag(OpLoc, diag::err_ovl_no_viable_oper)
+ << "operator->" << Base->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Base);
+ return ExprError();
+
+ case OR_Ambiguous:
+ Diag(OpLoc, diag::err_ovl_ambiguous_oper_unary)
+ << "->" << Base->getType() << Base->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Base);
+ return ExprError();
+
+ case OR_Deleted:
+ Diag(OpLoc, diag::err_ovl_deleted_oper)
+ << Best->Function->isDeleted()
+ << "->"
+ << getDeletedOrUnavailableSuffix(Best->Function)
+ << Base->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Base);
+ return ExprError();
+ }
+
+ CheckMemberOperatorAccess(OpLoc, Base, nullptr, Best->FoundDecl);
+
+ // Convert the object parameter.
+ CXXMethodDecl *Method = cast<CXXMethodDecl>(Best->Function);
+ ExprResult BaseResult =
+ PerformObjectArgumentInitialization(Base, /*Qualifier=*/nullptr,
+ Best->FoundDecl, Method);
+ if (BaseResult.isInvalid())
+ return ExprError();
+ Base = BaseResult.get();
+
+ // Build the operator call.
+ ExprResult FnExpr = CreateFunctionRefExpr(*this, Method, Best->FoundDecl,
+ HadMultipleCandidates, OpLoc);
+ if (FnExpr.isInvalid())
+ return ExprError();
+
+ QualType ResultTy = Method->getReturnType();
+ ExprValueKind VK = Expr::getValueKindForType(ResultTy);
+ ResultTy = ResultTy.getNonLValueExprType(Context);
+ CXXOperatorCallExpr *TheCall =
+ new (Context) CXXOperatorCallExpr(Context, OO_Arrow, FnExpr.get(),
+ Base, ResultTy, VK, OpLoc, false);
+
+ if (CheckCallReturnType(Method->getReturnType(), OpLoc, TheCall, Method))
+ return ExprError();
+
+ return MaybeBindToTemporary(TheCall);
+}
+
+/// BuildLiteralOperatorCall - Build a UserDefinedLiteral by creating a call to
+/// a literal operator described by the provided lookup results.
+ExprResult Sema::BuildLiteralOperatorCall(LookupResult &R,
+ DeclarationNameInfo &SuffixInfo,
+ ArrayRef<Expr*> Args,
+ SourceLocation LitEndLoc,
+ TemplateArgumentListInfo *TemplateArgs) {
+ SourceLocation UDSuffixLoc = SuffixInfo.getCXXLiteralOperatorNameLoc();
+
+ OverloadCandidateSet CandidateSet(UDSuffixLoc,
+ OverloadCandidateSet::CSK_Normal);
+ AddFunctionCandidates(R.asUnresolvedSet(), Args, CandidateSet, TemplateArgs,
+ /*SuppressUserConversions=*/true);
+
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+
+ // Perform overload resolution. This will usually be trivial, but might need
+ // to perform substitutions for a literal operator template.
+ OverloadCandidateSet::iterator Best;
+ switch (CandidateSet.BestViableFunction(*this, UDSuffixLoc, Best)) {
+ case OR_Success:
+ case OR_Deleted:
+ break;
+
+ case OR_No_Viable_Function:
+ Diag(UDSuffixLoc, diag::err_ovl_no_viable_function_in_call)
+ << R.getLookupName();
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args);
+ return ExprError();
+
+ case OR_Ambiguous:
+ Diag(R.getNameLoc(), diag::err_ovl_ambiguous_call) << R.getLookupName();
+ CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args);
+ return ExprError();
+ }
+
+ FunctionDecl *FD = Best->Function;
+ ExprResult Fn = CreateFunctionRefExpr(*this, FD, Best->FoundDecl,
+ HadMultipleCandidates,
+ SuffixInfo.getLoc(),
+ SuffixInfo.getInfo());
+ if (Fn.isInvalid())
+ return true;
+
+ // Check the argument types. This should almost always be a no-op, except
+ // that array-to-pointer decay is applied to string literals.
+ Expr *ConvArgs[2];
+ for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) {
+ ExprResult InputInit = PerformCopyInitialization(
+ InitializedEntity::InitializeParameter(Context, FD->getParamDecl(ArgIdx)),
+ SourceLocation(), Args[ArgIdx]);
+ if (InputInit.isInvalid())
+ return true;
+ ConvArgs[ArgIdx] = InputInit.get();
+ }
+
+ QualType ResultTy = FD->getReturnType();
+ ExprValueKind VK = Expr::getValueKindForType(ResultTy);
+ ResultTy = ResultTy.getNonLValueExprType(Context);
+
+ UserDefinedLiteral *UDL =
+ new (Context) UserDefinedLiteral(Context, Fn.get(),
+ llvm::makeArrayRef(ConvArgs, Args.size()),
+ ResultTy, VK, LitEndLoc, UDSuffixLoc);
+
+ if (CheckCallReturnType(FD->getReturnType(), UDSuffixLoc, UDL, FD))
+ return ExprError();
+
+ if (CheckFunctionCall(FD, UDL, nullptr))
+ return ExprError();
+
+ return MaybeBindToTemporary(UDL);
+}
+
+/// Build a call to 'begin' or 'end' for a C++11 for-range statement. If the
+/// given LookupResult is non-empty, it is assumed to describe a member which
+/// will be invoked. Otherwise, the function will be found via argument
+/// dependent lookup.
+/// CallExpr is set to a valid expression and FRS_Success returned on success,
+/// otherwise CallExpr is set to ExprError() and some non-success value
+/// is returned.
+Sema::ForRangeStatus
+Sema::BuildForRangeBeginEndCall(SourceLocation Loc,
+ SourceLocation RangeLoc,
+ const DeclarationNameInfo &NameInfo,
+ LookupResult &MemberLookup,
+ OverloadCandidateSet *CandidateSet,
+ Expr *Range, ExprResult *CallExpr) {
+ Scope *S = nullptr;
+
+ CandidateSet->clear();
+ if (!MemberLookup.empty()) {
+ ExprResult MemberRef =
+ BuildMemberReferenceExpr(Range, Range->getType(), Loc,
+ /*IsPtr=*/false, CXXScopeSpec(),
+ /*TemplateKWLoc=*/SourceLocation(),
+ /*FirstQualifierInScope=*/nullptr,
+ MemberLookup,
+ /*TemplateArgs=*/nullptr, S);
+ if (MemberRef.isInvalid()) {
+ *CallExpr = ExprError();
+ return FRS_DiagnosticIssued;
+ }
+ *CallExpr = ActOnCallExpr(S, MemberRef.get(), Loc, None, Loc, nullptr);
+ if (CallExpr->isInvalid()) {
+ *CallExpr = ExprError();
+ return FRS_DiagnosticIssued;
+ }
+ } else {
+ UnresolvedSet<0> FoundNames;
+ UnresolvedLookupExpr *Fn =
+ UnresolvedLookupExpr::Create(Context, /*NamingClass=*/nullptr,
+ NestedNameSpecifierLoc(), NameInfo,
+ /*NeedsADL=*/true, /*Overloaded=*/false,
+ FoundNames.begin(), FoundNames.end());
+
+ bool CandidateSetError = buildOverloadedCallSet(S, Fn, Fn, Range, Loc,
+ CandidateSet, CallExpr);
+ if (CandidateSet->empty() || CandidateSetError) {
+ *CallExpr = ExprError();
+ return FRS_NoViableFunction;
+ }
+ OverloadCandidateSet::iterator Best;
+ OverloadingResult OverloadResult =
+ CandidateSet->BestViableFunction(*this, Fn->getLocStart(), Best);
+
+ if (OverloadResult == OR_No_Viable_Function) {
+ *CallExpr = ExprError();
+ return FRS_NoViableFunction;
+ }
+ *CallExpr = FinishOverloadedCallExpr(*this, S, Fn, Fn, Loc, Range,
+ Loc, nullptr, CandidateSet, &Best,
+ OverloadResult,
+ /*AllowTypoCorrection=*/false);
+ if (CallExpr->isInvalid() || OverloadResult != OR_Success) {
+ *CallExpr = ExprError();
+ return FRS_DiagnosticIssued;
+ }
+ }
+ return FRS_Success;
+}
+
+
+/// FixOverloadedFunctionReference - E is an expression that refers to
+/// a C++ overloaded function (possibly with some parentheses and
+/// perhaps a '&' around it). We have resolved the overloaded function
+/// to the function declaration Fn, so patch up the expression E to
+/// refer (possibly indirectly) to Fn. Returns the new expr.
+Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
+ FunctionDecl *Fn) {
+ if (ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
+ Expr *SubExpr = FixOverloadedFunctionReference(PE->getSubExpr(),
+ Found, Fn);
+ if (SubExpr == PE->getSubExpr())
+ return PE;
+
+ return new (Context) ParenExpr(PE->getLParen(), PE->getRParen(), SubExpr);
+ }
+
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
+ Expr *SubExpr = FixOverloadedFunctionReference(ICE->getSubExpr(),
+ Found, Fn);
+ assert(Context.hasSameType(ICE->getSubExpr()->getType(),
+ SubExpr->getType()) &&
+ "Implicit cast type cannot be determined from overload");
+ assert(ICE->path_empty() && "fixing up hierarchy conversion?");
+ if (SubExpr == ICE->getSubExpr())
+ return ICE;
+
+ return ImplicitCastExpr::Create(Context, ICE->getType(),
+ ICE->getCastKind(),
+ SubExpr, nullptr,
+ ICE->getValueKind());
+ }
+
+ if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(E)) {
+ assert(UnOp->getOpcode() == UO_AddrOf &&
+ "Can only take the address of an overloaded function");
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn)) {
+ if (Method->isStatic()) {
+ // Do nothing: static member functions aren't any different
+ // from non-member functions.
+ } else {
+ // Fix the subexpression, which really has to be an
+ // UnresolvedLookupExpr holding an overloaded member function
+ // or template.
+ Expr *SubExpr = FixOverloadedFunctionReference(UnOp->getSubExpr(),
+ Found, Fn);
+ if (SubExpr == UnOp->getSubExpr())
+ return UnOp;
+
+ assert(isa<DeclRefExpr>(SubExpr)
+ && "fixed to something other than a decl ref");
+ assert(cast<DeclRefExpr>(SubExpr)->getQualifier()
+ && "fixed to a member ref with no nested name qualifier");
+
+ // We have taken the address of a pointer to member
+ // function. Perform the computation here so that we get the
+ // appropriate pointer to member type.
+ QualType ClassType
+ = Context.getTypeDeclType(cast<RecordDecl>(Method->getDeclContext()));
+ QualType MemPtrType
+ = Context.getMemberPointerType(Fn->getType(), ClassType.getTypePtr());
+
+ return new (Context) UnaryOperator(SubExpr, UO_AddrOf, MemPtrType,
+ VK_RValue, OK_Ordinary,
+ UnOp->getOperatorLoc());
+ }
+ }
+ Expr *SubExpr = FixOverloadedFunctionReference(UnOp->getSubExpr(),
+ Found, Fn);
+ if (SubExpr == UnOp->getSubExpr())
+ return UnOp;
+
+ return new (Context) UnaryOperator(SubExpr, UO_AddrOf,
+ Context.getPointerType(SubExpr->getType()),
+ VK_RValue, OK_Ordinary,
+ UnOp->getOperatorLoc());
+ }
+
+ if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(E)) {
+ // FIXME: avoid copy.
+ TemplateArgumentListInfo TemplateArgsBuffer, *TemplateArgs = nullptr;
+ if (ULE->hasExplicitTemplateArgs()) {
+ ULE->copyTemplateArgumentsInto(TemplateArgsBuffer);
+ TemplateArgs = &TemplateArgsBuffer;
+ }
+
+ DeclRefExpr *DRE = DeclRefExpr::Create(Context,
+ ULE->getQualifierLoc(),
+ ULE->getTemplateKeywordLoc(),
+ Fn,
+ /*enclosing*/ false, // FIXME?
+ ULE->getNameLoc(),
+ Fn->getType(),
+ VK_LValue,
+ Found.getDecl(),
+ TemplateArgs);
+ MarkDeclRefReferenced(DRE);
+ DRE->setHadMultipleCandidates(ULE->getNumDecls() > 1);
+ return DRE;
+ }
+
+ if (UnresolvedMemberExpr *MemExpr = dyn_cast<UnresolvedMemberExpr>(E)) {
+ // FIXME: avoid copy.
+ TemplateArgumentListInfo TemplateArgsBuffer, *TemplateArgs = nullptr;
+ if (MemExpr->hasExplicitTemplateArgs()) {
+ MemExpr->copyTemplateArgumentsInto(TemplateArgsBuffer);
+ TemplateArgs = &TemplateArgsBuffer;
+ }
+
+ Expr *Base;
+
+ // If we're filling in a static method where we used to have an
+ // implicit member access, rewrite to a simple decl ref.
+ if (MemExpr->isImplicitAccess()) {
+ if (cast<CXXMethodDecl>(Fn)->isStatic()) {
+ DeclRefExpr *DRE = DeclRefExpr::Create(Context,
+ MemExpr->getQualifierLoc(),
+ MemExpr->getTemplateKeywordLoc(),
+ Fn,
+ /*enclosing*/ false,
+ MemExpr->getMemberLoc(),
+ Fn->getType(),
+ VK_LValue,
+ Found.getDecl(),
+ TemplateArgs);
+ MarkDeclRefReferenced(DRE);
+ DRE->setHadMultipleCandidates(MemExpr->getNumDecls() > 1);
+ return DRE;
+ } else {
+ SourceLocation Loc = MemExpr->getMemberLoc();
+ if (MemExpr->getQualifier())
+ Loc = MemExpr->getQualifierLoc().getBeginLoc();
+ CheckCXXThisCapture(Loc);
+ Base = new (Context) CXXThisExpr(Loc,
+ MemExpr->getBaseType(),
+ /*isImplicit=*/true);
+ }
+ } else
+ Base = MemExpr->getBase();
+
+ ExprValueKind valueKind;
+ QualType type;
+ if (cast<CXXMethodDecl>(Fn)->isStatic()) {
+ valueKind = VK_LValue;
+ type = Fn->getType();
+ } else {
+ valueKind = VK_RValue;
+ type = Context.BoundMemberTy;
+ }
+
+ MemberExpr *ME = MemberExpr::Create(
+ Context, Base, MemExpr->isArrow(), MemExpr->getOperatorLoc(),
+ MemExpr->getQualifierLoc(), MemExpr->getTemplateKeywordLoc(), Fn, Found,
+ MemExpr->getMemberNameInfo(), TemplateArgs, type, valueKind,
+ OK_Ordinary);
+ ME->setHadMultipleCandidates(true);
+ MarkMemberReferenced(ME);
+ return ME;
+ }
+
+ llvm_unreachable("Invalid reference to overloaded function");
+}
+
+ExprResult Sema::FixOverloadedFunctionReference(ExprResult E,
+ DeclAccessPair Found,
+ FunctionDecl *Fn) {
+ return FixOverloadedFunctionReference(E.get(), Found, Fn);
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaPseudoObject.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaPseudoObject.cpp
new file mode 100644
index 0000000..e5d51f1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaPseudoObject.cpp
@@ -0,0 +1,1664 @@
+//===--- SemaPseudoObject.cpp - Semantic Analysis for Pseudo-Objects ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for expressions involving
+// pseudo-object references. Pseudo-objects are conceptual objects
+// whose storage is entirely abstract and all accesses to which are
+// translated through some sort of abstraction barrier.
+//
+// For example, Objective-C objects can have "properties", either
+// declared or undeclared. A property may be accessed by writing
+// expr.prop
+// where 'expr' is an r-value of Objective-C pointer type and 'prop'
+// is the name of the property. If this expression is used in a context
+// needing an r-value, it is treated as if it were a message-send
+// of the associated 'getter' selector, typically:
+// [expr prop]
+// If it is used as the LHS of a simple assignment, it is treated
+// as a message-send of the associated 'setter' selector, typically:
+// [expr setProp: RHS]
+// If it is used as the LHS of a compound assignment, or the operand
+// of a unary increment or decrement, both are required; for example,
+// 'expr.prop *= 100' would be translated to:
+// [expr setProp: [expr prop] * 100]
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "llvm/ADT/SmallString.h"
+
+using namespace clang;
+using namespace sema;
+
+namespace {
+ // Basically just a very focused copy of TreeTransform.
+ struct Rebuilder {
+ Sema &S;
+ unsigned MSPropertySubscriptCount;
+ typedef llvm::function_ref<Expr *(Expr *, unsigned)> SpecificRebuilderRefTy;
+ const SpecificRebuilderRefTy &SpecificCallback;
+ Rebuilder(Sema &S, const SpecificRebuilderRefTy &SpecificCallback)
+ : S(S), MSPropertySubscriptCount(0),
+ SpecificCallback(SpecificCallback) {}
+
+ Expr *rebuildObjCPropertyRefExpr(ObjCPropertyRefExpr *refExpr) {
+ // Fortunately, the constraint that we're rebuilding something
+ // with a base limits the number of cases here.
+ if (refExpr->isClassReceiver() || refExpr->isSuperReceiver())
+ return refExpr;
+
+ if (refExpr->isExplicitProperty()) {
+ return new (S.Context) ObjCPropertyRefExpr(
+ refExpr->getExplicitProperty(), refExpr->getType(),
+ refExpr->getValueKind(), refExpr->getObjectKind(),
+ refExpr->getLocation(), SpecificCallback(refExpr->getBase(), 0));
+ }
+ return new (S.Context) ObjCPropertyRefExpr(
+ refExpr->getImplicitPropertyGetter(),
+ refExpr->getImplicitPropertySetter(), refExpr->getType(),
+ refExpr->getValueKind(), refExpr->getObjectKind(),
+ refExpr->getLocation(), SpecificCallback(refExpr->getBase(), 0));
+ }
+ Expr *rebuildObjCSubscriptRefExpr(ObjCSubscriptRefExpr *refExpr) {
+ assert(refExpr->getBaseExpr());
+ assert(refExpr->getKeyExpr());
+
+ return new (S.Context) ObjCSubscriptRefExpr(
+ SpecificCallback(refExpr->getBaseExpr(), 0),
+ SpecificCallback(refExpr->getKeyExpr(), 1), refExpr->getType(),
+ refExpr->getValueKind(), refExpr->getObjectKind(),
+ refExpr->getAtIndexMethodDecl(), refExpr->setAtIndexMethodDecl(),
+ refExpr->getRBracket());
+ }
+ Expr *rebuildMSPropertyRefExpr(MSPropertyRefExpr *refExpr) {
+ assert(refExpr->getBaseExpr());
+
+ return new (S.Context) MSPropertyRefExpr(
+ SpecificCallback(refExpr->getBaseExpr(), 0),
+ refExpr->getPropertyDecl(), refExpr->isArrow(), refExpr->getType(),
+ refExpr->getValueKind(), refExpr->getQualifierLoc(),
+ refExpr->getMemberLoc());
+ }
+ Expr *rebuildMSPropertySubscriptExpr(MSPropertySubscriptExpr *refExpr) {
+ assert(refExpr->getBase());
+ assert(refExpr->getIdx());
+
+ auto *NewBase = rebuild(refExpr->getBase());
+ ++MSPropertySubscriptCount;
+ return new (S.Context) MSPropertySubscriptExpr(
+ NewBase,
+ SpecificCallback(refExpr->getIdx(), MSPropertySubscriptCount),
+ refExpr->getType(), refExpr->getValueKind(), refExpr->getObjectKind(),
+ refExpr->getRBracketLoc());
+ }
+
+ Expr *rebuild(Expr *e) {
+ // Fast path: nothing to look through.
+ if (auto *PRE = dyn_cast<ObjCPropertyRefExpr>(e))
+ return rebuildObjCPropertyRefExpr(PRE);
+ if (auto *SRE = dyn_cast<ObjCSubscriptRefExpr>(e))
+ return rebuildObjCSubscriptRefExpr(SRE);
+ if (auto *MSPRE = dyn_cast<MSPropertyRefExpr>(e))
+ return rebuildMSPropertyRefExpr(MSPRE);
+ if (auto *MSPSE = dyn_cast<MSPropertySubscriptExpr>(e))
+ return rebuildMSPropertySubscriptExpr(MSPSE);
+
+ // Otherwise, we should look through and rebuild anything that
+ // IgnoreParens would.
+
+ if (ParenExpr *parens = dyn_cast<ParenExpr>(e)) {
+ e = rebuild(parens->getSubExpr());
+ return new (S.Context) ParenExpr(parens->getLParen(),
+ parens->getRParen(),
+ e);
+ }
+
+ if (UnaryOperator *uop = dyn_cast<UnaryOperator>(e)) {
+ assert(uop->getOpcode() == UO_Extension);
+ e = rebuild(uop->getSubExpr());
+ return new (S.Context) UnaryOperator(e, uop->getOpcode(),
+ uop->getType(),
+ uop->getValueKind(),
+ uop->getObjectKind(),
+ uop->getOperatorLoc());
+ }
+
+ if (GenericSelectionExpr *gse = dyn_cast<GenericSelectionExpr>(e)) {
+ assert(!gse->isResultDependent());
+ unsigned resultIndex = gse->getResultIndex();
+ unsigned numAssocs = gse->getNumAssocs();
+
+ SmallVector<Expr*, 8> assocs(numAssocs);
+ SmallVector<TypeSourceInfo*, 8> assocTypes(numAssocs);
+
+ for (unsigned i = 0; i != numAssocs; ++i) {
+ Expr *assoc = gse->getAssocExpr(i);
+ if (i == resultIndex) assoc = rebuild(assoc);
+ assocs[i] = assoc;
+ assocTypes[i] = gse->getAssocTypeSourceInfo(i);
+ }
+
+ return new (S.Context) GenericSelectionExpr(S.Context,
+ gse->getGenericLoc(),
+ gse->getControllingExpr(),
+ assocTypes,
+ assocs,
+ gse->getDefaultLoc(),
+ gse->getRParenLoc(),
+ gse->containsUnexpandedParameterPack(),
+ resultIndex);
+ }
+
+ if (ChooseExpr *ce = dyn_cast<ChooseExpr>(e)) {
+ assert(!ce->isConditionDependent());
+
+ Expr *LHS = ce->getLHS(), *RHS = ce->getRHS();
+ Expr *&rebuiltExpr = ce->isConditionTrue() ? LHS : RHS;
+ rebuiltExpr = rebuild(rebuiltExpr);
+
+ return new (S.Context) ChooseExpr(ce->getBuiltinLoc(),
+ ce->getCond(),
+ LHS, RHS,
+ rebuiltExpr->getType(),
+ rebuiltExpr->getValueKind(),
+ rebuiltExpr->getObjectKind(),
+ ce->getRParenLoc(),
+ ce->isConditionTrue(),
+ rebuiltExpr->isTypeDependent(),
+ rebuiltExpr->isValueDependent());
+ }
+
+ llvm_unreachable("bad expression to rebuild!");
+ }
+ };
+
+ class PseudoOpBuilder {
+ public:
+ Sema &S;
+ unsigned ResultIndex;
+ SourceLocation GenericLoc;
+ SmallVector<Expr *, 4> Semantics;
+
+ PseudoOpBuilder(Sema &S, SourceLocation genericLoc)
+ : S(S), ResultIndex(PseudoObjectExpr::NoResult),
+ GenericLoc(genericLoc) {}
+
+ virtual ~PseudoOpBuilder() {}
+
+ /// Add a normal semantic expression.
+ void addSemanticExpr(Expr *semantic) {
+ Semantics.push_back(semantic);
+ }
+
+ /// Add the 'result' semantic expression.
+ void addResultSemanticExpr(Expr *resultExpr) {
+ assert(ResultIndex == PseudoObjectExpr::NoResult);
+ ResultIndex = Semantics.size();
+ Semantics.push_back(resultExpr);
+ }
+
+ ExprResult buildRValueOperation(Expr *op);
+ ExprResult buildAssignmentOperation(Scope *Sc,
+ SourceLocation opLoc,
+ BinaryOperatorKind opcode,
+ Expr *LHS, Expr *RHS);
+ ExprResult buildIncDecOperation(Scope *Sc, SourceLocation opLoc,
+ UnaryOperatorKind opcode,
+ Expr *op);
+
+ virtual ExprResult complete(Expr *syntacticForm);
+
+ OpaqueValueExpr *capture(Expr *op);
+ OpaqueValueExpr *captureValueAsResult(Expr *op);
+
+ void setResultToLastSemantic() {
+ assert(ResultIndex == PseudoObjectExpr::NoResult);
+ ResultIndex = Semantics.size() - 1;
+ }
+
+ /// Return true if assignments have a non-void result.
+ static bool CanCaptureValue(Expr *exp) {
+ if (exp->isGLValue())
+ return true;
+ QualType ty = exp->getType();
+ assert(!ty->isIncompleteType());
+ assert(!ty->isDependentType());
+
+ if (const CXXRecordDecl *ClassDecl = ty->getAsCXXRecordDecl())
+ return ClassDecl->isTriviallyCopyable();
+ return true;
+ }
+
+ virtual Expr *rebuildAndCaptureObject(Expr *) = 0;
+ virtual ExprResult buildGet() = 0;
+ virtual ExprResult buildSet(Expr *, SourceLocation,
+ bool captureSetValueAsResult) = 0;
+ /// \brief Should the result of an assignment be the formal result of the
+ /// setter call or the value that was passed to the setter?
+ ///
+ /// Different pseudo-object language features use different language rules
+ /// for this.
+ /// The default is to use the set value. Currently, this affects the
+ /// behavior of simple assignments, compound assignments, and prefix
+ /// increment and decrement.
+ /// Postfix increment and decrement always use the getter result as the
+ /// expression result.
+ ///
+ /// If this method returns true, and the set value isn't capturable for
+ /// some reason, the result of the expression will be void.
+ virtual bool captureSetValueAsResult() const { return true; }
+ };
+
+ /// A PseudoOpBuilder for Objective-C \@properties.
+ class ObjCPropertyOpBuilder : public PseudoOpBuilder {
+ ObjCPropertyRefExpr *RefExpr;
+ ObjCPropertyRefExpr *SyntacticRefExpr;
+ OpaqueValueExpr *InstanceReceiver;
+ ObjCMethodDecl *Getter;
+
+ ObjCMethodDecl *Setter;
+ Selector SetterSelector;
+ Selector GetterSelector;
+
+ public:
+ ObjCPropertyOpBuilder(Sema &S, ObjCPropertyRefExpr *refExpr) :
+ PseudoOpBuilder(S, refExpr->getLocation()), RefExpr(refExpr),
+ SyntacticRefExpr(nullptr), InstanceReceiver(nullptr), Getter(nullptr),
+ Setter(nullptr) {
+ }
+
+ ExprResult buildRValueOperation(Expr *op);
+ ExprResult buildAssignmentOperation(Scope *Sc,
+ SourceLocation opLoc,
+ BinaryOperatorKind opcode,
+ Expr *LHS, Expr *RHS);
+ ExprResult buildIncDecOperation(Scope *Sc, SourceLocation opLoc,
+ UnaryOperatorKind opcode,
+ Expr *op);
+
+ bool tryBuildGetOfReference(Expr *op, ExprResult &result);
+ bool findSetter(bool warn=true);
+ bool findGetter();
+ void DiagnoseUnsupportedPropertyUse();
+
+ Expr *rebuildAndCaptureObject(Expr *syntacticBase) override;
+ ExprResult buildGet() override;
+ ExprResult buildSet(Expr *op, SourceLocation, bool) override;
+ ExprResult complete(Expr *SyntacticForm) override;
+
+ bool isWeakProperty() const;
+ };
+
+ /// A PseudoOpBuilder for Objective-C array/dictionary indexing.
+ class ObjCSubscriptOpBuilder : public PseudoOpBuilder {
+ ObjCSubscriptRefExpr *RefExpr;
+ OpaqueValueExpr *InstanceBase;
+ OpaqueValueExpr *InstanceKey;
+ ObjCMethodDecl *AtIndexGetter;
+ Selector AtIndexGetterSelector;
+
+ ObjCMethodDecl *AtIndexSetter;
+ Selector AtIndexSetterSelector;
+
+ public:
+ ObjCSubscriptOpBuilder(Sema &S, ObjCSubscriptRefExpr *refExpr) :
+ PseudoOpBuilder(S, refExpr->getSourceRange().getBegin()),
+ RefExpr(refExpr),
+ InstanceBase(nullptr), InstanceKey(nullptr),
+ AtIndexGetter(nullptr), AtIndexSetter(nullptr) {}
+
+ ExprResult buildRValueOperation(Expr *op);
+ ExprResult buildAssignmentOperation(Scope *Sc,
+ SourceLocation opLoc,
+ BinaryOperatorKind opcode,
+ Expr *LHS, Expr *RHS);
+ Expr *rebuildAndCaptureObject(Expr *syntacticBase) override;
+
+ bool findAtIndexGetter();
+ bool findAtIndexSetter();
+
+ ExprResult buildGet() override;
+ ExprResult buildSet(Expr *op, SourceLocation, bool) override;
+ };
+
+ class MSPropertyOpBuilder : public PseudoOpBuilder {
+ MSPropertyRefExpr *RefExpr;
+ OpaqueValueExpr *InstanceBase;
+ SmallVector<Expr *, 4> CallArgs;
+
+ MSPropertyRefExpr *getBaseMSProperty(MSPropertySubscriptExpr *E);
+
+ public:
+ MSPropertyOpBuilder(Sema &S, MSPropertyRefExpr *refExpr) :
+ PseudoOpBuilder(S, refExpr->getSourceRange().getBegin()),
+ RefExpr(refExpr), InstanceBase(nullptr) {}
+ MSPropertyOpBuilder(Sema &S, MSPropertySubscriptExpr *refExpr)
+ : PseudoOpBuilder(S, refExpr->getSourceRange().getBegin()),
+ InstanceBase(nullptr) {
+ RefExpr = getBaseMSProperty(refExpr);
+ }
+
+ Expr *rebuildAndCaptureObject(Expr *) override;
+ ExprResult buildGet() override;
+ ExprResult buildSet(Expr *op, SourceLocation, bool) override;
+ bool captureSetValueAsResult() const override { return false; }
+ };
+}
+
+/// Capture the given expression in an OpaqueValueExpr.
+OpaqueValueExpr *PseudoOpBuilder::capture(Expr *e) {
+ // Make a new OVE whose source is the given expression.
+ OpaqueValueExpr *captured =
+ new (S.Context) OpaqueValueExpr(GenericLoc, e->getType(),
+ e->getValueKind(), e->getObjectKind(),
+ e);
+
+ // Make sure we bind that in the semantics.
+ addSemanticExpr(captured);
+ return captured;
+}
+
+/// Capture the given expression as the result of this pseudo-object
+/// operation. This routine is safe against expressions which may
+/// already be captured.
+///
+/// \returns the captured expression, which will be the
+/// same as the input if the input was already captured
+OpaqueValueExpr *PseudoOpBuilder::captureValueAsResult(Expr *e) {
+ assert(ResultIndex == PseudoObjectExpr::NoResult);
+
+ // If the expression hasn't already been captured, just capture it
+ // and set the new semantic
+ if (!isa<OpaqueValueExpr>(e)) {
+ OpaqueValueExpr *cap = capture(e);
+ setResultToLastSemantic();
+ return cap;
+ }
+
+ // Otherwise, it must already be one of our semantic expressions;
+ // set ResultIndex to its index.
+ unsigned index = 0;
+ for (;; ++index) {
+ assert(index < Semantics.size() &&
+ "captured expression not found in semantics!");
+ if (e == Semantics[index]) break;
+ }
+ ResultIndex = index;
+ return cast<OpaqueValueExpr>(e);
+}
+
+/// The routine which creates the final PseudoObjectExpr.
+ExprResult PseudoOpBuilder::complete(Expr *syntactic) {
+ return PseudoObjectExpr::Create(S.Context, syntactic,
+ Semantics, ResultIndex);
+}
+
+/// The main skeleton for building an r-value operation.
+ExprResult PseudoOpBuilder::buildRValueOperation(Expr *op) {
+ Expr *syntacticBase = rebuildAndCaptureObject(op);
+
+ ExprResult getExpr = buildGet();
+ if (getExpr.isInvalid()) return ExprError();
+ addResultSemanticExpr(getExpr.get());
+
+ return complete(syntacticBase);
+}
+
+/// The basic skeleton for building a simple or compound
+/// assignment operation.
+ExprResult
+PseudoOpBuilder::buildAssignmentOperation(Scope *Sc, SourceLocation opcLoc,
+ BinaryOperatorKind opcode,
+ Expr *LHS, Expr *RHS) {
+ assert(BinaryOperator::isAssignmentOp(opcode));
+
+ Expr *syntacticLHS = rebuildAndCaptureObject(LHS);
+ OpaqueValueExpr *capturedRHS = capture(RHS);
+
+ // In some very specific cases, semantic analysis of the RHS as an
+ // expression may require it to be rewritten. In these cases, we
+ // cannot safely keep the OVE around. Fortunately, we don't really
+ // need to: we don't use this particular OVE in multiple places, and
+ // no clients rely that closely on matching up expressions in the
+ // semantic expression with expressions from the syntactic form.
+ Expr *semanticRHS = capturedRHS;
+ if (RHS->hasPlaceholderType() || isa<InitListExpr>(RHS)) {
+ semanticRHS = RHS;
+ Semantics.pop_back();
+ }
+
+ Expr *syntactic;
+
+ ExprResult result;
+ if (opcode == BO_Assign) {
+ result = semanticRHS;
+ syntactic = new (S.Context) BinaryOperator(syntacticLHS, capturedRHS,
+ opcode, capturedRHS->getType(),
+ capturedRHS->getValueKind(),
+ OK_Ordinary, opcLoc, false);
+ } else {
+ ExprResult opLHS = buildGet();
+ if (opLHS.isInvalid()) return ExprError();
+
+ // Build an ordinary, non-compound operation.
+ BinaryOperatorKind nonCompound =
+ BinaryOperator::getOpForCompoundAssignment(opcode);
+ result = S.BuildBinOp(Sc, opcLoc, nonCompound, opLHS.get(), semanticRHS);
+ if (result.isInvalid()) return ExprError();
+
+ syntactic =
+ new (S.Context) CompoundAssignOperator(syntacticLHS, capturedRHS, opcode,
+ result.get()->getType(),
+ result.get()->getValueKind(),
+ OK_Ordinary,
+ opLHS.get()->getType(),
+ result.get()->getType(),
+ opcLoc, false);
+ }
+
+ // The result of the assignment, if not void, is the value set into
+ // the l-value.
+ result = buildSet(result.get(), opcLoc, captureSetValueAsResult());
+ if (result.isInvalid()) return ExprError();
+ addSemanticExpr(result.get());
+ if (!captureSetValueAsResult() && !result.get()->getType()->isVoidType() &&
+ (result.get()->isTypeDependent() || CanCaptureValue(result.get())))
+ setResultToLastSemantic();
+
+ return complete(syntactic);
+}
+
+/// The basic skeleton for building an increment or decrement
+/// operation.
+ExprResult
+PseudoOpBuilder::buildIncDecOperation(Scope *Sc, SourceLocation opcLoc,
+ UnaryOperatorKind opcode,
+ Expr *op) {
+ assert(UnaryOperator::isIncrementDecrementOp(opcode));
+
+ Expr *syntacticOp = rebuildAndCaptureObject(op);
+
+ // Load the value.
+ ExprResult result = buildGet();
+ if (result.isInvalid()) return ExprError();
+
+ QualType resultType = result.get()->getType();
+
+ // That's the postfix result.
+ if (UnaryOperator::isPostfix(opcode) &&
+ (result.get()->isTypeDependent() || CanCaptureValue(result.get()))) {
+ result = capture(result.get());
+ setResultToLastSemantic();
+ }
+
+ // Add or subtract a literal 1.
+ llvm::APInt oneV(S.Context.getTypeSize(S.Context.IntTy), 1);
+ Expr *one = IntegerLiteral::Create(S.Context, oneV, S.Context.IntTy,
+ GenericLoc);
+
+ if (UnaryOperator::isIncrementOp(opcode)) {
+ result = S.BuildBinOp(Sc, opcLoc, BO_Add, result.get(), one);
+ } else {
+ result = S.BuildBinOp(Sc, opcLoc, BO_Sub, result.get(), one);
+ }
+ if (result.isInvalid()) return ExprError();
+
+ // Store that back into the result. The value stored is the result
+ // of a prefix operation.
+ result = buildSet(result.get(), opcLoc, UnaryOperator::isPrefix(opcode) &&
+ captureSetValueAsResult());
+ if (result.isInvalid()) return ExprError();
+ addSemanticExpr(result.get());
+ if (UnaryOperator::isPrefix(opcode) && !captureSetValueAsResult() &&
+ !result.get()->getType()->isVoidType() &&
+ (result.get()->isTypeDependent() || CanCaptureValue(result.get())))
+ setResultToLastSemantic();
+
+ UnaryOperator *syntactic =
+ new (S.Context) UnaryOperator(syntacticOp, opcode, resultType,
+ VK_LValue, OK_Ordinary, opcLoc);
+ return complete(syntactic);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Objective-C @property and implicit property references
+//===----------------------------------------------------------------------===//
+
+/// Look up a method in the receiver type of an Objective-C property
+/// reference.
+static ObjCMethodDecl *LookupMethodInReceiverType(Sema &S, Selector sel,
+ const ObjCPropertyRefExpr *PRE) {
+ if (PRE->isObjectReceiver()) {
+ const ObjCObjectPointerType *PT =
+ PRE->getBase()->getType()->castAs<ObjCObjectPointerType>();
+
+ // Special case for 'self' in class method implementations.
+ if (PT->isObjCClassType() &&
+ S.isSelfExpr(const_cast<Expr*>(PRE->getBase()))) {
+ // This cast is safe because isSelfExpr is only true within
+ // methods.
+ ObjCMethodDecl *method =
+ cast<ObjCMethodDecl>(S.CurContext->getNonClosureAncestor());
+ return S.LookupMethodInObjectType(sel,
+ S.Context.getObjCInterfaceType(method->getClassInterface()),
+ /*instance*/ false);
+ }
+
+ return S.LookupMethodInObjectType(sel, PT->getPointeeType(), true);
+ }
+
+ if (PRE->isSuperReceiver()) {
+ if (const ObjCObjectPointerType *PT =
+ PRE->getSuperReceiverType()->getAs<ObjCObjectPointerType>())
+ return S.LookupMethodInObjectType(sel, PT->getPointeeType(), true);
+
+ return S.LookupMethodInObjectType(sel, PRE->getSuperReceiverType(), false);
+ }
+
+ assert(PRE->isClassReceiver() && "Invalid expression");
+ QualType IT = S.Context.getObjCInterfaceType(PRE->getClassReceiver());
+ return S.LookupMethodInObjectType(sel, IT, false);
+}
+
+bool ObjCPropertyOpBuilder::isWeakProperty() const {
+ QualType T;
+ if (RefExpr->isExplicitProperty()) {
+ const ObjCPropertyDecl *Prop = RefExpr->getExplicitProperty();
+ if (Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak)
+ return !Prop->hasAttr<IBOutletAttr>();
+
+ T = Prop->getType();
+ } else if (Getter) {
+ T = Getter->getReturnType();
+ } else {
+ return false;
+ }
+
+ return T.getObjCLifetime() == Qualifiers::OCL_Weak;
+}
+
+bool ObjCPropertyOpBuilder::findGetter() {
+ if (Getter) return true;
+
+ // For implicit properties, just trust the lookup we already did.
+ if (RefExpr->isImplicitProperty()) {
+ if ((Getter = RefExpr->getImplicitPropertyGetter())) {
+ GetterSelector = Getter->getSelector();
+ return true;
+ }
+ else {
+ // Must build the getter selector the hard way.
+ ObjCMethodDecl *setter = RefExpr->getImplicitPropertySetter();
+ assert(setter && "both setter and getter are null - cannot happen");
+ IdentifierInfo *setterName =
+ setter->getSelector().getIdentifierInfoForSlot(0);
+ IdentifierInfo *getterName =
+ &S.Context.Idents.get(setterName->getName().substr(3));
+ GetterSelector =
+ S.PP.getSelectorTable().getNullarySelector(getterName);
+ return false;
+ }
+ }
+
+ ObjCPropertyDecl *prop = RefExpr->getExplicitProperty();
+ Getter = LookupMethodInReceiverType(S, prop->getGetterName(), RefExpr);
+ return (Getter != nullptr);
+}
+
+/// Try to find the most accurate setter declaration for the property
+/// reference.
+///
+/// \return true if a setter was found, in which case Setter
+bool ObjCPropertyOpBuilder::findSetter(bool warn) {
+ // For implicit properties, just trust the lookup we already did.
+ if (RefExpr->isImplicitProperty()) {
+ if (ObjCMethodDecl *setter = RefExpr->getImplicitPropertySetter()) {
+ Setter = setter;
+ SetterSelector = setter->getSelector();
+ return true;
+ } else {
+ IdentifierInfo *getterName =
+ RefExpr->getImplicitPropertyGetter()->getSelector()
+ .getIdentifierInfoForSlot(0);
+ SetterSelector =
+ SelectorTable::constructSetterSelector(S.PP.getIdentifierTable(),
+ S.PP.getSelectorTable(),
+ getterName);
+ return false;
+ }
+ }
+
+ // For explicit properties, this is more involved.
+ ObjCPropertyDecl *prop = RefExpr->getExplicitProperty();
+ SetterSelector = prop->getSetterName();
+
+ // Do a normal method lookup first.
+ if (ObjCMethodDecl *setter =
+ LookupMethodInReceiverType(S, SetterSelector, RefExpr)) {
+ if (setter->isPropertyAccessor() && warn)
+ if (const ObjCInterfaceDecl *IFace =
+ dyn_cast<ObjCInterfaceDecl>(setter->getDeclContext())) {
+ StringRef thisPropertyName = prop->getName();
+ // Try flipping the case of the first character.
+ char front = thisPropertyName.front();
+ front = isLowercase(front) ? toUppercase(front) : toLowercase(front);
+ SmallString<100> PropertyName = thisPropertyName;
+ PropertyName[0] = front;
+ IdentifierInfo *AltMember = &S.PP.getIdentifierTable().get(PropertyName);
+ if (ObjCPropertyDecl *prop1 = IFace->FindPropertyDeclaration(AltMember))
+ if (prop != prop1 && (prop1->getSetterMethodDecl() == setter)) {
+ S.Diag(RefExpr->getExprLoc(), diag::error_property_setter_ambiguous_use)
+ << prop << prop1 << setter->getSelector();
+ S.Diag(prop->getLocation(), diag::note_property_declare);
+ S.Diag(prop1->getLocation(), diag::note_property_declare);
+ }
+ }
+ Setter = setter;
+ return true;
+ }
+
+ // That can fail in the somewhat crazy situation that we're
+ // type-checking a message send within the @interface declaration
+ // that declared the @property. But it's not clear that that's
+ // valuable to support.
+
+ return false;
+}
+
+void ObjCPropertyOpBuilder::DiagnoseUnsupportedPropertyUse() {
+ if (S.getCurLexicalContext()->isObjCContainer() &&
+ S.getCurLexicalContext()->getDeclKind() != Decl::ObjCCategoryImpl &&
+ S.getCurLexicalContext()->getDeclKind() != Decl::ObjCImplementation) {
+ if (ObjCPropertyDecl *prop = RefExpr->getExplicitProperty()) {
+ S.Diag(RefExpr->getLocation(),
+ diag::err_property_function_in_objc_container);
+ S.Diag(prop->getLocation(), diag::note_property_declare);
+ }
+ }
+}
+
+/// Capture the base object of an Objective-C property expression.
+Expr *ObjCPropertyOpBuilder::rebuildAndCaptureObject(Expr *syntacticBase) {
+ assert(InstanceReceiver == nullptr);
+
+ // If we have a base, capture it in an OVE and rebuild the syntactic
+ // form to use the OVE as its base.
+ if (RefExpr->isObjectReceiver()) {
+ InstanceReceiver = capture(RefExpr->getBase());
+ syntacticBase = Rebuilder(S, [=](Expr *, unsigned) -> Expr * {
+ return InstanceReceiver;
+ }).rebuild(syntacticBase);
+ }
+
+ if (ObjCPropertyRefExpr *
+ refE = dyn_cast<ObjCPropertyRefExpr>(syntacticBase->IgnoreParens()))
+ SyntacticRefExpr = refE;
+
+ return syntacticBase;
+}
+
+/// Load from an Objective-C property reference.
+ExprResult ObjCPropertyOpBuilder::buildGet() {
+ findGetter();
+ if (!Getter) {
+ DiagnoseUnsupportedPropertyUse();
+ return ExprError();
+ }
+
+ if (SyntacticRefExpr)
+ SyntacticRefExpr->setIsMessagingGetter();
+
+ QualType receiverType = RefExpr->getReceiverType(S.Context);
+ if (!Getter->isImplicit())
+ S.DiagnoseUseOfDecl(Getter, GenericLoc, nullptr, true);
+ // Build a message-send.
+ ExprResult msg;
+ if ((Getter->isInstanceMethod() && !RefExpr->isClassReceiver()) ||
+ RefExpr->isObjectReceiver()) {
+ assert(InstanceReceiver || RefExpr->isSuperReceiver());
+ msg = S.BuildInstanceMessageImplicit(InstanceReceiver, receiverType,
+ GenericLoc, Getter->getSelector(),
+ Getter, None);
+ } else {
+ msg = S.BuildClassMessageImplicit(receiverType, RefExpr->isSuperReceiver(),
+ GenericLoc, Getter->getSelector(),
+ Getter, None);
+ }
+ return msg;
+}
+
+/// Store to an Objective-C property reference.
+///
+/// \param captureSetValueAsResult If true, capture the actual
+/// value being set as the value of the property operation.
+ExprResult ObjCPropertyOpBuilder::buildSet(Expr *op, SourceLocation opcLoc,
+ bool captureSetValueAsResult) {
+ if (!findSetter(false)) {
+ DiagnoseUnsupportedPropertyUse();
+ return ExprError();
+ }
+
+ if (SyntacticRefExpr)
+ SyntacticRefExpr->setIsMessagingSetter();
+
+ QualType receiverType = RefExpr->getReceiverType(S.Context);
+
+ // Use assignment constraints when possible; they give us better
+ // diagnostics. "When possible" basically means anything except a
+ // C++ class type.
+ if (!S.getLangOpts().CPlusPlus || !op->getType()->isRecordType()) {
+ QualType paramType = (*Setter->param_begin())->getType()
+ .substObjCMemberType(
+ receiverType,
+ Setter->getDeclContext(),
+ ObjCSubstitutionContext::Parameter);
+ if (!S.getLangOpts().CPlusPlus || !paramType->isRecordType()) {
+ ExprResult opResult = op;
+ Sema::AssignConvertType assignResult
+ = S.CheckSingleAssignmentConstraints(paramType, opResult);
+ if (S.DiagnoseAssignmentResult(assignResult, opcLoc, paramType,
+ op->getType(), opResult.get(),
+ Sema::AA_Assigning))
+ return ExprError();
+
+ op = opResult.get();
+ assert(op && "successful assignment left argument invalid?");
+ }
+ }
+
+ // Arguments.
+ Expr *args[] = { op };
+
+ // Build a message-send.
+ ExprResult msg;
+ if (!Setter->isImplicit())
+ S.DiagnoseUseOfDecl(Setter, GenericLoc, nullptr, true);
+ if ((Setter->isInstanceMethod() && !RefExpr->isClassReceiver()) ||
+ RefExpr->isObjectReceiver()) {
+ msg = S.BuildInstanceMessageImplicit(InstanceReceiver, receiverType,
+ GenericLoc, SetterSelector, Setter,
+ MultiExprArg(args, 1));
+ } else {
+ msg = S.BuildClassMessageImplicit(receiverType, RefExpr->isSuperReceiver(),
+ GenericLoc,
+ SetterSelector, Setter,
+ MultiExprArg(args, 1));
+ }
+
+ if (!msg.isInvalid() && captureSetValueAsResult) {
+ ObjCMessageExpr *msgExpr =
+ cast<ObjCMessageExpr>(msg.get()->IgnoreImplicit());
+ Expr *arg = msgExpr->getArg(0);
+ if (CanCaptureValue(arg))
+ msgExpr->setArg(0, captureValueAsResult(arg));
+ }
+
+ return msg;
+}
+
+/// @property-specific behavior for doing lvalue-to-rvalue conversion.
+ExprResult ObjCPropertyOpBuilder::buildRValueOperation(Expr *op) {
+ // Explicit properties always have getters, but implicit ones don't.
+ // Check that before proceeding.
+ if (RefExpr->isImplicitProperty() && !RefExpr->getImplicitPropertyGetter()) {
+ S.Diag(RefExpr->getLocation(), diag::err_getter_not_found)
+ << RefExpr->getSourceRange();
+ return ExprError();
+ }
+
+ ExprResult result = PseudoOpBuilder::buildRValueOperation(op);
+ if (result.isInvalid()) return ExprError();
+
+ if (RefExpr->isExplicitProperty() && !Getter->hasRelatedResultType())
+ S.DiagnosePropertyAccessorMismatch(RefExpr->getExplicitProperty(),
+ Getter, RefExpr->getLocation());
+
+ // As a special case, if the method returns 'id', try to get
+ // a better type from the property.
+ if (RefExpr->isExplicitProperty() && result.get()->isRValue()) {
+ QualType receiverType = RefExpr->getReceiverType(S.Context);
+ QualType propType = RefExpr->getExplicitProperty()
+ ->getUsageType(receiverType);
+ if (result.get()->getType()->isObjCIdType()) {
+ if (const ObjCObjectPointerType *ptr
+ = propType->getAs<ObjCObjectPointerType>()) {
+ if (!ptr->isObjCIdType())
+ result = S.ImpCastExprToType(result.get(), propType, CK_BitCast);
+ }
+ }
+ if (S.getLangOpts().ObjCAutoRefCount) {
+ Qualifiers::ObjCLifetime LT = propType.getObjCLifetime();
+ if (LT == Qualifiers::OCL_Weak)
+ if (!S.Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, RefExpr->getLocation()))
+ S.getCurFunction()->markSafeWeakUse(RefExpr);
+ }
+ }
+
+ return result;
+}
+
+/// Try to build this as a call to a getter that returns a reference.
+///
+/// \return true if it was possible, whether or not it actually
+/// succeeded
+bool ObjCPropertyOpBuilder::tryBuildGetOfReference(Expr *op,
+ ExprResult &result) {
+ if (!S.getLangOpts().CPlusPlus) return false;
+
+ findGetter();
+ if (!Getter) {
+ // The property has no setter and no getter! This can happen if the type is
+ // invalid. Error have already been reported.
+ result = ExprError();
+ return true;
+ }
+
+ // Only do this if the getter returns an l-value reference type.
+ QualType resultType = Getter->getReturnType();
+ if (!resultType->isLValueReferenceType()) return false;
+
+ result = buildRValueOperation(op);
+ return true;
+}
+
+/// @property-specific behavior for doing assignments.
+ExprResult
+ObjCPropertyOpBuilder::buildAssignmentOperation(Scope *Sc,
+ SourceLocation opcLoc,
+ BinaryOperatorKind opcode,
+ Expr *LHS, Expr *RHS) {
+ assert(BinaryOperator::isAssignmentOp(opcode));
+
+ // If there's no setter, we have no choice but to try to assign to
+ // the result of the getter.
+ if (!findSetter()) {
+ ExprResult result;
+ if (tryBuildGetOfReference(LHS, result)) {
+ if (result.isInvalid()) return ExprError();
+ return S.BuildBinOp(Sc, opcLoc, opcode, result.get(), RHS);
+ }
+
+ // Otherwise, it's an error.
+ S.Diag(opcLoc, diag::err_nosetter_property_assignment)
+ << unsigned(RefExpr->isImplicitProperty())
+ << SetterSelector
+ << LHS->getSourceRange() << RHS->getSourceRange();
+ return ExprError();
+ }
+
+ // If there is a setter, we definitely want to use it.
+
+ // Verify that we can do a compound assignment.
+ if (opcode != BO_Assign && !findGetter()) {
+ S.Diag(opcLoc, diag::err_nogetter_property_compound_assignment)
+ << LHS->getSourceRange() << RHS->getSourceRange();
+ return ExprError();
+ }
+
+ ExprResult result =
+ PseudoOpBuilder::buildAssignmentOperation(Sc, opcLoc, opcode, LHS, RHS);
+ if (result.isInvalid()) return ExprError();
+
+ // Various warnings about property assignments in ARC.
+ if (S.getLangOpts().ObjCAutoRefCount && InstanceReceiver) {
+ S.checkRetainCycles(InstanceReceiver->getSourceExpr(), RHS);
+ S.checkUnsafeExprAssigns(opcLoc, LHS, RHS);
+ }
+
+ return result;
+}
+
+/// @property-specific behavior for doing increments and decrements.
+ExprResult
+ObjCPropertyOpBuilder::buildIncDecOperation(Scope *Sc, SourceLocation opcLoc,
+ UnaryOperatorKind opcode,
+ Expr *op) {
+ // If there's no setter, we have no choice but to try to assign to
+ // the result of the getter.
+ if (!findSetter()) {
+ ExprResult result;
+ if (tryBuildGetOfReference(op, result)) {
+ if (result.isInvalid()) return ExprError();
+ return S.BuildUnaryOp(Sc, opcLoc, opcode, result.get());
+ }
+
+ // Otherwise, it's an error.
+ S.Diag(opcLoc, diag::err_nosetter_property_incdec)
+ << unsigned(RefExpr->isImplicitProperty())
+ << unsigned(UnaryOperator::isDecrementOp(opcode))
+ << SetterSelector
+ << op->getSourceRange();
+ return ExprError();
+ }
+
+ // If there is a setter, we definitely want to use it.
+
+ // We also need a getter.
+ if (!findGetter()) {
+ assert(RefExpr->isImplicitProperty());
+ S.Diag(opcLoc, diag::err_nogetter_property_incdec)
+ << unsigned(UnaryOperator::isDecrementOp(opcode))
+ << GetterSelector
+ << op->getSourceRange();
+ return ExprError();
+ }
+
+ return PseudoOpBuilder::buildIncDecOperation(Sc, opcLoc, opcode, op);
+}
+
+ExprResult ObjCPropertyOpBuilder::complete(Expr *SyntacticForm) {
+ if (S.getLangOpts().ObjCAutoRefCount && isWeakProperty() &&
+ !S.Diags.isIgnored(diag::warn_arc_repeated_use_of_weak,
+ SyntacticForm->getLocStart()))
+ S.recordUseOfEvaluatedWeak(SyntacticRefExpr,
+ SyntacticRefExpr->isMessagingGetter());
+
+ return PseudoOpBuilder::complete(SyntacticForm);
+}
+
+// ObjCSubscript build stuff.
+//
+
+/// objective-c subscripting-specific behavior for doing lvalue-to-rvalue
+/// conversion.
+/// FIXME. Remove this routine if it is proven that no additional
+/// specifity is needed.
+ExprResult ObjCSubscriptOpBuilder::buildRValueOperation(Expr *op) {
+ ExprResult result = PseudoOpBuilder::buildRValueOperation(op);
+ if (result.isInvalid()) return ExprError();
+ return result;
+}
+
+/// objective-c subscripting-specific behavior for doing assignments.
+ExprResult
+ObjCSubscriptOpBuilder::buildAssignmentOperation(Scope *Sc,
+ SourceLocation opcLoc,
+ BinaryOperatorKind opcode,
+ Expr *LHS, Expr *RHS) {
+ assert(BinaryOperator::isAssignmentOp(opcode));
+ // There must be a method to do the Index'ed assignment.
+ if (!findAtIndexSetter())
+ return ExprError();
+
+ // Verify that we can do a compound assignment.
+ if (opcode != BO_Assign && !findAtIndexGetter())
+ return ExprError();
+
+ ExprResult result =
+ PseudoOpBuilder::buildAssignmentOperation(Sc, opcLoc, opcode, LHS, RHS);
+ if (result.isInvalid()) return ExprError();
+
+ // Various warnings about objc Index'ed assignments in ARC.
+ if (S.getLangOpts().ObjCAutoRefCount && InstanceBase) {
+ S.checkRetainCycles(InstanceBase->getSourceExpr(), RHS);
+ S.checkUnsafeExprAssigns(opcLoc, LHS, RHS);
+ }
+
+ return result;
+}
+
+/// Capture the base object of an Objective-C Index'ed expression.
+Expr *ObjCSubscriptOpBuilder::rebuildAndCaptureObject(Expr *syntacticBase) {
+ assert(InstanceBase == nullptr);
+
+ // Capture base expression in an OVE and rebuild the syntactic
+ // form to use the OVE as its base expression.
+ InstanceBase = capture(RefExpr->getBaseExpr());
+ InstanceKey = capture(RefExpr->getKeyExpr());
+
+ syntacticBase =
+ Rebuilder(S, [=](Expr *, unsigned Idx) -> Expr * {
+ switch (Idx) {
+ case 0:
+ return InstanceBase;
+ case 1:
+ return InstanceKey;
+ default:
+ llvm_unreachable("Unexpected index for ObjCSubscriptExpr");
+ }
+ }).rebuild(syntacticBase);
+
+ return syntacticBase;
+}
+
+/// CheckSubscriptingKind - This routine decide what type
+/// of indexing represented by "FromE" is being done.
+Sema::ObjCSubscriptKind
+ Sema::CheckSubscriptingKind(Expr *FromE) {
+ // If the expression already has integral or enumeration type, we're golden.
+ QualType T = FromE->getType();
+ if (T->isIntegralOrEnumerationType())
+ return OS_Array;
+
+ // If we don't have a class type in C++, there's no way we can get an
+ // expression of integral or enumeration type.
+ const RecordType *RecordTy = T->getAs<RecordType>();
+ if (!RecordTy &&
+ (T->isObjCObjectPointerType() || T->isVoidPointerType()))
+ // All other scalar cases are assumed to be dictionary indexing which
+ // caller handles, with diagnostics if needed.
+ return OS_Dictionary;
+ if (!getLangOpts().CPlusPlus ||
+ !RecordTy || RecordTy->isIncompleteType()) {
+ // No indexing can be done. Issue diagnostics and quit.
+ const Expr *IndexExpr = FromE->IgnoreParenImpCasts();
+ if (isa<StringLiteral>(IndexExpr))
+ Diag(FromE->getExprLoc(), diag::err_objc_subscript_pointer)
+ << T << FixItHint::CreateInsertion(FromE->getExprLoc(), "@");
+ else
+ Diag(FromE->getExprLoc(), diag::err_objc_subscript_type_conversion)
+ << T;
+ return OS_Error;
+ }
+
+ // We must have a complete class type.
+ if (RequireCompleteType(FromE->getExprLoc(), T,
+ diag::err_objc_index_incomplete_class_type, FromE))
+ return OS_Error;
+
+ // Look for a conversion to an integral, enumeration type, or
+ // objective-C pointer type.
+ int NoIntegrals=0, NoObjCIdPointers=0;
+ SmallVector<CXXConversionDecl *, 4> ConversionDecls;
+
+ for (NamedDecl *D : cast<CXXRecordDecl>(RecordTy->getDecl())
+ ->getVisibleConversionFunctions()) {
+ if (CXXConversionDecl *Conversion =
+ dyn_cast<CXXConversionDecl>(D->getUnderlyingDecl())) {
+ QualType CT = Conversion->getConversionType().getNonReferenceType();
+ if (CT->isIntegralOrEnumerationType()) {
+ ++NoIntegrals;
+ ConversionDecls.push_back(Conversion);
+ }
+ else if (CT->isObjCIdType() ||CT->isBlockPointerType()) {
+ ++NoObjCIdPointers;
+ ConversionDecls.push_back(Conversion);
+ }
+ }
+ }
+ if (NoIntegrals ==1 && NoObjCIdPointers == 0)
+ return OS_Array;
+ if (NoIntegrals == 0 && NoObjCIdPointers == 1)
+ return OS_Dictionary;
+ if (NoIntegrals == 0 && NoObjCIdPointers == 0) {
+ // No conversion function was found. Issue diagnostic and return.
+ Diag(FromE->getExprLoc(), diag::err_objc_subscript_type_conversion)
+ << FromE->getType();
+ return OS_Error;
+ }
+ Diag(FromE->getExprLoc(), diag::err_objc_multiple_subscript_type_conversion)
+ << FromE->getType();
+ for (unsigned int i = 0; i < ConversionDecls.size(); i++)
+ Diag(ConversionDecls[i]->getLocation(), diag::not_conv_function_declared_at);
+
+ return OS_Error;
+}
+
+/// CheckKeyForObjCARCConversion - This routine suggests bridge casting of CF
+/// objects used as dictionary subscript key objects.
+static void CheckKeyForObjCARCConversion(Sema &S, QualType ContainerT,
+ Expr *Key) {
+ if (ContainerT.isNull())
+ return;
+ // dictionary subscripting.
+ // - (id)objectForKeyedSubscript:(id)key;
+ IdentifierInfo *KeyIdents[] = {
+ &S.Context.Idents.get("objectForKeyedSubscript")
+ };
+ Selector GetterSelector = S.Context.Selectors.getSelector(1, KeyIdents);
+ ObjCMethodDecl *Getter = S.LookupMethodInObjectType(GetterSelector, ContainerT,
+ true /*instance*/);
+ if (!Getter)
+ return;
+ QualType T = Getter->parameters()[0]->getType();
+ S.CheckObjCARCConversion(Key->getSourceRange(),
+ T, Key, Sema::CCK_ImplicitConversion);
+}
+
+bool ObjCSubscriptOpBuilder::findAtIndexGetter() {
+ if (AtIndexGetter)
+ return true;
+
+ Expr *BaseExpr = RefExpr->getBaseExpr();
+ QualType BaseT = BaseExpr->getType();
+
+ QualType ResultType;
+ if (const ObjCObjectPointerType *PTy =
+ BaseT->getAs<ObjCObjectPointerType>()) {
+ ResultType = PTy->getPointeeType();
+ }
+ Sema::ObjCSubscriptKind Res =
+ S.CheckSubscriptingKind(RefExpr->getKeyExpr());
+ if (Res == Sema::OS_Error) {
+ if (S.getLangOpts().ObjCAutoRefCount)
+ CheckKeyForObjCARCConversion(S, ResultType,
+ RefExpr->getKeyExpr());
+ return false;
+ }
+ bool arrayRef = (Res == Sema::OS_Array);
+
+ if (ResultType.isNull()) {
+ S.Diag(BaseExpr->getExprLoc(), diag::err_objc_subscript_base_type)
+ << BaseExpr->getType() << arrayRef;
+ return false;
+ }
+ if (!arrayRef) {
+ // dictionary subscripting.
+ // - (id)objectForKeyedSubscript:(id)key;
+ IdentifierInfo *KeyIdents[] = {
+ &S.Context.Idents.get("objectForKeyedSubscript")
+ };
+ AtIndexGetterSelector = S.Context.Selectors.getSelector(1, KeyIdents);
+ }
+ else {
+ // - (id)objectAtIndexedSubscript:(size_t)index;
+ IdentifierInfo *KeyIdents[] = {
+ &S.Context.Idents.get("objectAtIndexedSubscript")
+ };
+
+ AtIndexGetterSelector = S.Context.Selectors.getSelector(1, KeyIdents);
+ }
+
+ AtIndexGetter = S.LookupMethodInObjectType(AtIndexGetterSelector, ResultType,
+ true /*instance*/);
+ bool receiverIdType = (BaseT->isObjCIdType() ||
+ BaseT->isObjCQualifiedIdType());
+
+ if (!AtIndexGetter && S.getLangOpts().DebuggerObjCLiteral) {
+ AtIndexGetter = ObjCMethodDecl::Create(S.Context, SourceLocation(),
+ SourceLocation(), AtIndexGetterSelector,
+ S.Context.getObjCIdType() /*ReturnType*/,
+ nullptr /*TypeSourceInfo */,
+ S.Context.getTranslationUnitDecl(),
+ true /*Instance*/, false/*isVariadic*/,
+ /*isPropertyAccessor=*/false,
+ /*isImplicitlyDeclared=*/true, /*isDefined=*/false,
+ ObjCMethodDecl::Required,
+ false);
+ ParmVarDecl *Argument = ParmVarDecl::Create(S.Context, AtIndexGetter,
+ SourceLocation(), SourceLocation(),
+ arrayRef ? &S.Context.Idents.get("index")
+ : &S.Context.Idents.get("key"),
+ arrayRef ? S.Context.UnsignedLongTy
+ : S.Context.getObjCIdType(),
+ /*TInfo=*/nullptr,
+ SC_None,
+ nullptr);
+ AtIndexGetter->setMethodParams(S.Context, Argument, None);
+ }
+
+ if (!AtIndexGetter) {
+ if (!receiverIdType) {
+ S.Diag(BaseExpr->getExprLoc(), diag::err_objc_subscript_method_not_found)
+ << BaseExpr->getType() << 0 << arrayRef;
+ return false;
+ }
+ AtIndexGetter =
+ S.LookupInstanceMethodInGlobalPool(AtIndexGetterSelector,
+ RefExpr->getSourceRange(),
+ true);
+ }
+
+ if (AtIndexGetter) {
+ QualType T = AtIndexGetter->parameters()[0]->getType();
+ if ((arrayRef && !T->isIntegralOrEnumerationType()) ||
+ (!arrayRef && !T->isObjCObjectPointerType())) {
+ S.Diag(RefExpr->getKeyExpr()->getExprLoc(),
+ arrayRef ? diag::err_objc_subscript_index_type
+ : diag::err_objc_subscript_key_type) << T;
+ S.Diag(AtIndexGetter->parameters()[0]->getLocation(),
+ diag::note_parameter_type) << T;
+ return false;
+ }
+ QualType R = AtIndexGetter->getReturnType();
+ if (!R->isObjCObjectPointerType()) {
+ S.Diag(RefExpr->getKeyExpr()->getExprLoc(),
+ diag::err_objc_indexing_method_result_type) << R << arrayRef;
+ S.Diag(AtIndexGetter->getLocation(), diag::note_method_declared_at) <<
+ AtIndexGetter->getDeclName();
+ }
+ }
+ return true;
+}
+
+bool ObjCSubscriptOpBuilder::findAtIndexSetter() {
+ if (AtIndexSetter)
+ return true;
+
+ Expr *BaseExpr = RefExpr->getBaseExpr();
+ QualType BaseT = BaseExpr->getType();
+
+ QualType ResultType;
+ if (const ObjCObjectPointerType *PTy =
+ BaseT->getAs<ObjCObjectPointerType>()) {
+ ResultType = PTy->getPointeeType();
+ }
+
+ Sema::ObjCSubscriptKind Res =
+ S.CheckSubscriptingKind(RefExpr->getKeyExpr());
+ if (Res == Sema::OS_Error) {
+ if (S.getLangOpts().ObjCAutoRefCount)
+ CheckKeyForObjCARCConversion(S, ResultType,
+ RefExpr->getKeyExpr());
+ return false;
+ }
+ bool arrayRef = (Res == Sema::OS_Array);
+
+ if (ResultType.isNull()) {
+ S.Diag(BaseExpr->getExprLoc(), diag::err_objc_subscript_base_type)
+ << BaseExpr->getType() << arrayRef;
+ return false;
+ }
+
+ if (!arrayRef) {
+ // dictionary subscripting.
+ // - (void)setObject:(id)object forKeyedSubscript:(id)key;
+ IdentifierInfo *KeyIdents[] = {
+ &S.Context.Idents.get("setObject"),
+ &S.Context.Idents.get("forKeyedSubscript")
+ };
+ AtIndexSetterSelector = S.Context.Selectors.getSelector(2, KeyIdents);
+ }
+ else {
+ // - (void)setObject:(id)object atIndexedSubscript:(NSInteger)index;
+ IdentifierInfo *KeyIdents[] = {
+ &S.Context.Idents.get("setObject"),
+ &S.Context.Idents.get("atIndexedSubscript")
+ };
+ AtIndexSetterSelector = S.Context.Selectors.getSelector(2, KeyIdents);
+ }
+ AtIndexSetter = S.LookupMethodInObjectType(AtIndexSetterSelector, ResultType,
+ true /*instance*/);
+
+ bool receiverIdType = (BaseT->isObjCIdType() ||
+ BaseT->isObjCQualifiedIdType());
+
+ if (!AtIndexSetter && S.getLangOpts().DebuggerObjCLiteral) {
+ TypeSourceInfo *ReturnTInfo = nullptr;
+ QualType ReturnType = S.Context.VoidTy;
+ AtIndexSetter = ObjCMethodDecl::Create(
+ S.Context, SourceLocation(), SourceLocation(), AtIndexSetterSelector,
+ ReturnType, ReturnTInfo, S.Context.getTranslationUnitDecl(),
+ true /*Instance*/, false /*isVariadic*/,
+ /*isPropertyAccessor=*/false,
+ /*isImplicitlyDeclared=*/true, /*isDefined=*/false,
+ ObjCMethodDecl::Required, false);
+ SmallVector<ParmVarDecl *, 2> Params;
+ ParmVarDecl *object = ParmVarDecl::Create(S.Context, AtIndexSetter,
+ SourceLocation(), SourceLocation(),
+ &S.Context.Idents.get("object"),
+ S.Context.getObjCIdType(),
+ /*TInfo=*/nullptr,
+ SC_None,
+ nullptr);
+ Params.push_back(object);
+ ParmVarDecl *key = ParmVarDecl::Create(S.Context, AtIndexSetter,
+ SourceLocation(), SourceLocation(),
+ arrayRef ? &S.Context.Idents.get("index")
+ : &S.Context.Idents.get("key"),
+ arrayRef ? S.Context.UnsignedLongTy
+ : S.Context.getObjCIdType(),
+ /*TInfo=*/nullptr,
+ SC_None,
+ nullptr);
+ Params.push_back(key);
+ AtIndexSetter->setMethodParams(S.Context, Params, None);
+ }
+
+ if (!AtIndexSetter) {
+ if (!receiverIdType) {
+ S.Diag(BaseExpr->getExprLoc(),
+ diag::err_objc_subscript_method_not_found)
+ << BaseExpr->getType() << 1 << arrayRef;
+ return false;
+ }
+ AtIndexSetter =
+ S.LookupInstanceMethodInGlobalPool(AtIndexSetterSelector,
+ RefExpr->getSourceRange(),
+ true);
+ }
+
+ bool err = false;
+ if (AtIndexSetter && arrayRef) {
+ QualType T = AtIndexSetter->parameters()[1]->getType();
+ if (!T->isIntegralOrEnumerationType()) {
+ S.Diag(RefExpr->getKeyExpr()->getExprLoc(),
+ diag::err_objc_subscript_index_type) << T;
+ S.Diag(AtIndexSetter->parameters()[1]->getLocation(),
+ diag::note_parameter_type) << T;
+ err = true;
+ }
+ T = AtIndexSetter->parameters()[0]->getType();
+ if (!T->isObjCObjectPointerType()) {
+ S.Diag(RefExpr->getBaseExpr()->getExprLoc(),
+ diag::err_objc_subscript_object_type) << T << arrayRef;
+ S.Diag(AtIndexSetter->parameters()[0]->getLocation(),
+ diag::note_parameter_type) << T;
+ err = true;
+ }
+ }
+ else if (AtIndexSetter && !arrayRef)
+ for (unsigned i=0; i <2; i++) {
+ QualType T = AtIndexSetter->parameters()[i]->getType();
+ if (!T->isObjCObjectPointerType()) {
+ if (i == 1)
+ S.Diag(RefExpr->getKeyExpr()->getExprLoc(),
+ diag::err_objc_subscript_key_type) << T;
+ else
+ S.Diag(RefExpr->getBaseExpr()->getExprLoc(),
+ diag::err_objc_subscript_dic_object_type) << T;
+ S.Diag(AtIndexSetter->parameters()[i]->getLocation(),
+ diag::note_parameter_type) << T;
+ err = true;
+ }
+ }
+
+ return !err;
+}
+
+// Get the object at "Index" position in the container.
+// [BaseExpr objectAtIndexedSubscript : IndexExpr];
+ExprResult ObjCSubscriptOpBuilder::buildGet() {
+ if (!findAtIndexGetter())
+ return ExprError();
+
+ QualType receiverType = InstanceBase->getType();
+
+ // Build a message-send.
+ ExprResult msg;
+ Expr *Index = InstanceKey;
+
+ // Arguments.
+ Expr *args[] = { Index };
+ assert(InstanceBase);
+ if (AtIndexGetter)
+ S.DiagnoseUseOfDecl(AtIndexGetter, GenericLoc);
+ msg = S.BuildInstanceMessageImplicit(InstanceBase, receiverType,
+ GenericLoc,
+ AtIndexGetterSelector, AtIndexGetter,
+ MultiExprArg(args, 1));
+ return msg;
+}
+
+/// Store into the container the "op" object at "Index"'ed location
+/// by building this messaging expression:
+/// - (void)setObject:(id)object atIndexedSubscript:(NSInteger)index;
+/// \param captureSetValueAsResult If true, capture the actual
+/// value being set as the value of the property operation.
+ExprResult ObjCSubscriptOpBuilder::buildSet(Expr *op, SourceLocation opcLoc,
+ bool captureSetValueAsResult) {
+ if (!findAtIndexSetter())
+ return ExprError();
+ if (AtIndexSetter)
+ S.DiagnoseUseOfDecl(AtIndexSetter, GenericLoc);
+ QualType receiverType = InstanceBase->getType();
+ Expr *Index = InstanceKey;
+
+ // Arguments.
+ Expr *args[] = { op, Index };
+
+ // Build a message-send.
+ ExprResult msg = S.BuildInstanceMessageImplicit(InstanceBase, receiverType,
+ GenericLoc,
+ AtIndexSetterSelector,
+ AtIndexSetter,
+ MultiExprArg(args, 2));
+
+ if (!msg.isInvalid() && captureSetValueAsResult) {
+ ObjCMessageExpr *msgExpr =
+ cast<ObjCMessageExpr>(msg.get()->IgnoreImplicit());
+ Expr *arg = msgExpr->getArg(0);
+ if (CanCaptureValue(arg))
+ msgExpr->setArg(0, captureValueAsResult(arg));
+ }
+
+ return msg;
+}
+
+//===----------------------------------------------------------------------===//
+// MSVC __declspec(property) references
+//===----------------------------------------------------------------------===//
+
+MSPropertyRefExpr *
+MSPropertyOpBuilder::getBaseMSProperty(MSPropertySubscriptExpr *E) {
+ CallArgs.insert(CallArgs.begin(), E->getIdx());
+ Expr *Base = E->getBase()->IgnoreParens();
+ while (auto *MSPropSubscript = dyn_cast<MSPropertySubscriptExpr>(Base)) {
+ CallArgs.insert(CallArgs.begin(), MSPropSubscript->getIdx());
+ Base = MSPropSubscript->getBase()->IgnoreParens();
+ }
+ return cast<MSPropertyRefExpr>(Base);
+}
+
+Expr *MSPropertyOpBuilder::rebuildAndCaptureObject(Expr *syntacticBase) {
+ InstanceBase = capture(RefExpr->getBaseExpr());
+ std::for_each(CallArgs.begin(), CallArgs.end(),
+ [this](Expr *&Arg) { Arg = capture(Arg); });
+ syntacticBase = Rebuilder(S, [=](Expr *, unsigned Idx) -> Expr * {
+ switch (Idx) {
+ case 0:
+ return InstanceBase;
+ default:
+ assert(Idx <= CallArgs.size());
+ return CallArgs[Idx - 1];
+ }
+ }).rebuild(syntacticBase);
+
+ return syntacticBase;
+}
+
+ExprResult MSPropertyOpBuilder::buildGet() {
+ if (!RefExpr->getPropertyDecl()->hasGetter()) {
+ S.Diag(RefExpr->getMemberLoc(), diag::err_no_accessor_for_property)
+ << 0 /* getter */ << RefExpr->getPropertyDecl();
+ return ExprError();
+ }
+
+ UnqualifiedId GetterName;
+ IdentifierInfo *II = RefExpr->getPropertyDecl()->getGetterId();
+ GetterName.setIdentifier(II, RefExpr->getMemberLoc());
+ CXXScopeSpec SS;
+ SS.Adopt(RefExpr->getQualifierLoc());
+ ExprResult GetterExpr =
+ S.ActOnMemberAccessExpr(S.getCurScope(), InstanceBase, SourceLocation(),
+ RefExpr->isArrow() ? tok::arrow : tok::period, SS,
+ SourceLocation(), GetterName, nullptr);
+ if (GetterExpr.isInvalid()) {
+ S.Diag(RefExpr->getMemberLoc(),
+ diag::error_cannot_find_suitable_accessor) << 0 /* getter */
+ << RefExpr->getPropertyDecl();
+ return ExprError();
+ }
+
+ return S.ActOnCallExpr(S.getCurScope(), GetterExpr.get(),
+ RefExpr->getSourceRange().getBegin(), CallArgs,
+ RefExpr->getSourceRange().getEnd());
+}
+
+ExprResult MSPropertyOpBuilder::buildSet(Expr *op, SourceLocation sl,
+ bool captureSetValueAsResult) {
+ if (!RefExpr->getPropertyDecl()->hasSetter()) {
+ S.Diag(RefExpr->getMemberLoc(), diag::err_no_accessor_for_property)
+ << 1 /* setter */ << RefExpr->getPropertyDecl();
+ return ExprError();
+ }
+
+ UnqualifiedId SetterName;
+ IdentifierInfo *II = RefExpr->getPropertyDecl()->getSetterId();
+ SetterName.setIdentifier(II, RefExpr->getMemberLoc());
+ CXXScopeSpec SS;
+ SS.Adopt(RefExpr->getQualifierLoc());
+ ExprResult SetterExpr =
+ S.ActOnMemberAccessExpr(S.getCurScope(), InstanceBase, SourceLocation(),
+ RefExpr->isArrow() ? tok::arrow : tok::period, SS,
+ SourceLocation(), SetterName, nullptr);
+ if (SetterExpr.isInvalid()) {
+ S.Diag(RefExpr->getMemberLoc(),
+ diag::error_cannot_find_suitable_accessor) << 1 /* setter */
+ << RefExpr->getPropertyDecl();
+ return ExprError();
+ }
+
+ SmallVector<Expr*, 4> ArgExprs;
+ ArgExprs.append(CallArgs.begin(), CallArgs.end());
+ ArgExprs.push_back(op);
+ return S.ActOnCallExpr(S.getCurScope(), SetterExpr.get(),
+ RefExpr->getSourceRange().getBegin(), ArgExprs,
+ op->getSourceRange().getEnd());
+}
+
+//===----------------------------------------------------------------------===//
+// General Sema routines.
+//===----------------------------------------------------------------------===//
+
+ExprResult Sema::checkPseudoObjectRValue(Expr *E) {
+ Expr *opaqueRef = E->IgnoreParens();
+ if (ObjCPropertyRefExpr *refExpr
+ = dyn_cast<ObjCPropertyRefExpr>(opaqueRef)) {
+ ObjCPropertyOpBuilder builder(*this, refExpr);
+ return builder.buildRValueOperation(E);
+ }
+ else if (ObjCSubscriptRefExpr *refExpr
+ = dyn_cast<ObjCSubscriptRefExpr>(opaqueRef)) {
+ ObjCSubscriptOpBuilder builder(*this, refExpr);
+ return builder.buildRValueOperation(E);
+ } else if (MSPropertyRefExpr *refExpr
+ = dyn_cast<MSPropertyRefExpr>(opaqueRef)) {
+ MSPropertyOpBuilder builder(*this, refExpr);
+ return builder.buildRValueOperation(E);
+ } else if (MSPropertySubscriptExpr *RefExpr =
+ dyn_cast<MSPropertySubscriptExpr>(opaqueRef)) {
+ MSPropertyOpBuilder Builder(*this, RefExpr);
+ return Builder.buildRValueOperation(E);
+ } else {
+ llvm_unreachable("unknown pseudo-object kind!");
+ }
+}
+
+/// Check an increment or decrement of a pseudo-object expression.
+ExprResult Sema::checkPseudoObjectIncDec(Scope *Sc, SourceLocation opcLoc,
+ UnaryOperatorKind opcode, Expr *op) {
+ // Do nothing if the operand is dependent.
+ if (op->isTypeDependent())
+ return new (Context) UnaryOperator(op, opcode, Context.DependentTy,
+ VK_RValue, OK_Ordinary, opcLoc);
+
+ assert(UnaryOperator::isIncrementDecrementOp(opcode));
+ Expr *opaqueRef = op->IgnoreParens();
+ if (ObjCPropertyRefExpr *refExpr
+ = dyn_cast<ObjCPropertyRefExpr>(opaqueRef)) {
+ ObjCPropertyOpBuilder builder(*this, refExpr);
+ return builder.buildIncDecOperation(Sc, opcLoc, opcode, op);
+ } else if (isa<ObjCSubscriptRefExpr>(opaqueRef)) {
+ Diag(opcLoc, diag::err_illegal_container_subscripting_op);
+ return ExprError();
+ } else if (MSPropertyRefExpr *refExpr
+ = dyn_cast<MSPropertyRefExpr>(opaqueRef)) {
+ MSPropertyOpBuilder builder(*this, refExpr);
+ return builder.buildIncDecOperation(Sc, opcLoc, opcode, op);
+ } else if (MSPropertySubscriptExpr *RefExpr
+ = dyn_cast<MSPropertySubscriptExpr>(opaqueRef)) {
+ MSPropertyOpBuilder Builder(*this, RefExpr);
+ return Builder.buildIncDecOperation(Sc, opcLoc, opcode, op);
+ } else {
+ llvm_unreachable("unknown pseudo-object kind!");
+ }
+}
+
+ExprResult Sema::checkPseudoObjectAssignment(Scope *S, SourceLocation opcLoc,
+ BinaryOperatorKind opcode,
+ Expr *LHS, Expr *RHS) {
+ // Do nothing if either argument is dependent.
+ if (LHS->isTypeDependent() || RHS->isTypeDependent())
+ return new (Context) BinaryOperator(LHS, RHS, opcode, Context.DependentTy,
+ VK_RValue, OK_Ordinary, opcLoc, false);
+
+ // Filter out non-overload placeholder types in the RHS.
+ if (RHS->getType()->isNonOverloadPlaceholderType()) {
+ ExprResult result = CheckPlaceholderExpr(RHS);
+ if (result.isInvalid()) return ExprError();
+ RHS = result.get();
+ }
+
+ Expr *opaqueRef = LHS->IgnoreParens();
+ if (ObjCPropertyRefExpr *refExpr
+ = dyn_cast<ObjCPropertyRefExpr>(opaqueRef)) {
+ ObjCPropertyOpBuilder builder(*this, refExpr);
+ return builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
+ } else if (ObjCSubscriptRefExpr *refExpr
+ = dyn_cast<ObjCSubscriptRefExpr>(opaqueRef)) {
+ ObjCSubscriptOpBuilder builder(*this, refExpr);
+ return builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
+ } else if (MSPropertyRefExpr *refExpr
+ = dyn_cast<MSPropertyRefExpr>(opaqueRef)) {
+ MSPropertyOpBuilder builder(*this, refExpr);
+ return builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
+ } else if (MSPropertySubscriptExpr *RefExpr
+ = dyn_cast<MSPropertySubscriptExpr>(opaqueRef)) {
+ MSPropertyOpBuilder Builder(*this, RefExpr);
+ return Builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
+ } else {
+ llvm_unreachable("unknown pseudo-object kind!");
+ }
+}
+
+/// Given a pseudo-object reference, rebuild it without the opaque
+/// values. Basically, undo the behavior of rebuildAndCaptureObject.
+/// This should never operate in-place.
+static Expr *stripOpaqueValuesFromPseudoObjectRef(Sema &S, Expr *E) {
+ return Rebuilder(S,
+ [=](Expr *E, unsigned) -> Expr * {
+ return cast<OpaqueValueExpr>(E)->getSourceExpr();
+ })
+ .rebuild(E);
+}
+
+/// Given a pseudo-object expression, recreate what it looks like
+/// syntactically without the attendant OpaqueValueExprs.
+///
+/// This is a hack which should be removed when TreeTransform is
+/// capable of rebuilding a tree without stripping implicit
+/// operations.
+Expr *Sema::recreateSyntacticForm(PseudoObjectExpr *E) {
+ Expr *syntax = E->getSyntacticForm();
+ if (UnaryOperator *uop = dyn_cast<UnaryOperator>(syntax)) {
+ Expr *op = stripOpaqueValuesFromPseudoObjectRef(*this, uop->getSubExpr());
+ return new (Context) UnaryOperator(op, uop->getOpcode(), uop->getType(),
+ uop->getValueKind(), uop->getObjectKind(),
+ uop->getOperatorLoc());
+ } else if (CompoundAssignOperator *cop
+ = dyn_cast<CompoundAssignOperator>(syntax)) {
+ Expr *lhs = stripOpaqueValuesFromPseudoObjectRef(*this, cop->getLHS());
+ Expr *rhs = cast<OpaqueValueExpr>(cop->getRHS())->getSourceExpr();
+ return new (Context) CompoundAssignOperator(lhs, rhs, cop->getOpcode(),
+ cop->getType(),
+ cop->getValueKind(),
+ cop->getObjectKind(),
+ cop->getComputationLHSType(),
+ cop->getComputationResultType(),
+ cop->getOperatorLoc(), false);
+ } else if (BinaryOperator *bop = dyn_cast<BinaryOperator>(syntax)) {
+ Expr *lhs = stripOpaqueValuesFromPseudoObjectRef(*this, bop->getLHS());
+ Expr *rhs = cast<OpaqueValueExpr>(bop->getRHS())->getSourceExpr();
+ return new (Context) BinaryOperator(lhs, rhs, bop->getOpcode(),
+ bop->getType(), bop->getValueKind(),
+ bop->getObjectKind(),
+ bop->getOperatorLoc(), false);
+ } else {
+ assert(syntax->hasPlaceholderType(BuiltinType::PseudoObject));
+ return stripOpaqueValuesFromPseudoObjectRef(*this, syntax);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp
new file mode 100644
index 0000000..e1b1a47
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp
@@ -0,0 +1,3933 @@
+//===--- SemaStmt.cpp - Semantic Analysis for Statements ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for statements.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTDiagnostic.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/AST/TypeOrdering.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+using namespace clang;
+using namespace sema;
+
+StmtResult Sema::ActOnExprStmt(ExprResult FE) {
+ if (FE.isInvalid())
+ return StmtError();
+
+ FE = ActOnFinishFullExpr(FE.get(), FE.get()->getExprLoc(),
+ /*DiscardedValue*/ true);
+ if (FE.isInvalid())
+ return StmtError();
+
+ // C99 6.8.3p2: The expression in an expression statement is evaluated as a
+ // void expression for its side effects. Conversion to void allows any
+ // operand, even incomplete types.
+
+ // Same thing in for stmt first clause (when expr) and third clause.
+ return StmtResult(FE.getAs<Stmt>());
+}
+
+
+StmtResult Sema::ActOnExprStmtError() {
+ DiscardCleanupsInEvaluationContext();
+ return StmtError();
+}
+
+StmtResult Sema::ActOnNullStmt(SourceLocation SemiLoc,
+ bool HasLeadingEmptyMacro) {
+ return new (Context) NullStmt(SemiLoc, HasLeadingEmptyMacro);
+}
+
+StmtResult Sema::ActOnDeclStmt(DeclGroupPtrTy dg, SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ DeclGroupRef DG = dg.get();
+
+ // If we have an invalid decl, just return an error.
+ if (DG.isNull()) return StmtError();
+
+ return new (Context) DeclStmt(DG, StartLoc, EndLoc);
+}
+
+void Sema::ActOnForEachDeclStmt(DeclGroupPtrTy dg) {
+ DeclGroupRef DG = dg.get();
+
+ // If we don't have a declaration, or we have an invalid declaration,
+ // just return.
+ if (DG.isNull() || !DG.isSingleDecl())
+ return;
+
+ Decl *decl = DG.getSingleDecl();
+ if (!decl || decl->isInvalidDecl())
+ return;
+
+ // Only variable declarations are permitted.
+ VarDecl *var = dyn_cast<VarDecl>(decl);
+ if (!var) {
+ Diag(decl->getLocation(), diag::err_non_variable_decl_in_for);
+ decl->setInvalidDecl();
+ return;
+ }
+
+ // foreach variables are never actually initialized in the way that
+ // the parser came up with.
+ var->setInit(nullptr);
+
+ // In ARC, we don't need to retain the iteration variable of a fast
+ // enumeration loop. Rather than actually trying to catch that
+ // during declaration processing, we remove the consequences here.
+ if (getLangOpts().ObjCAutoRefCount) {
+ QualType type = var->getType();
+
+ // Only do this if we inferred the lifetime. Inferred lifetime
+ // will show up as a local qualifier because explicit lifetime
+ // should have shown up as an AttributedType instead.
+ if (type.getLocalQualifiers().getObjCLifetime() == Qualifiers::OCL_Strong) {
+ // Add 'const' and mark the variable as pseudo-strong.
+ var->setType(type.withConst());
+ var->setARCPseudoStrong(true);
+ }
+ }
+}
+
+/// \brief Diagnose unused comparisons, both builtin and overloaded operators.
+/// For '==' and '!=', suggest fixits for '=' or '|='.
+///
+/// Adding a cast to void (or other expression wrappers) will prevent the
+/// warning from firing.
+static bool DiagnoseUnusedComparison(Sema &S, const Expr *E) {
+ SourceLocation Loc;
+ bool IsNotEqual, CanAssign, IsRelational;
+
+ if (const BinaryOperator *Op = dyn_cast<BinaryOperator>(E)) {
+ if (!Op->isComparisonOp())
+ return false;
+
+ IsRelational = Op->isRelationalOp();
+ Loc = Op->getOperatorLoc();
+ IsNotEqual = Op->getOpcode() == BO_NE;
+ CanAssign = Op->getLHS()->IgnoreParenImpCasts()->isLValue();
+ } else if (const CXXOperatorCallExpr *Op = dyn_cast<CXXOperatorCallExpr>(E)) {
+ switch (Op->getOperator()) {
+ default:
+ return false;
+ case OO_EqualEqual:
+ case OO_ExclaimEqual:
+ IsRelational = false;
+ break;
+ case OO_Less:
+ case OO_Greater:
+ case OO_GreaterEqual:
+ case OO_LessEqual:
+ IsRelational = true;
+ break;
+ }
+
+ Loc = Op->getOperatorLoc();
+ IsNotEqual = Op->getOperator() == OO_ExclaimEqual;
+ CanAssign = Op->getArg(0)->IgnoreParenImpCasts()->isLValue();
+ } else {
+ // Not a typo-prone comparison.
+ return false;
+ }
+
+ // Suppress warnings when the operator, suspicious as it may be, comes from
+ // a macro expansion.
+ if (S.SourceMgr.isMacroBodyExpansion(Loc))
+ return false;
+
+ S.Diag(Loc, diag::warn_unused_comparison)
+ << (unsigned)IsRelational << (unsigned)IsNotEqual << E->getSourceRange();
+
+ // If the LHS is a plausible entity to assign to, provide a fixit hint to
+ // correct common typos.
+ if (!IsRelational && CanAssign) {
+ if (IsNotEqual)
+ S.Diag(Loc, diag::note_inequality_comparison_to_or_assign)
+ << FixItHint::CreateReplacement(Loc, "|=");
+ else
+ S.Diag(Loc, diag::note_equality_comparison_to_assign)
+ << FixItHint::CreateReplacement(Loc, "=");
+ }
+
+ return true;
+}
+
+void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
+ if (const LabelStmt *Label = dyn_cast_or_null<LabelStmt>(S))
+ return DiagnoseUnusedExprResult(Label->getSubStmt());
+
+ const Expr *E = dyn_cast_or_null<Expr>(S);
+ if (!E)
+ return;
+
+ // If we are in an unevaluated expression context, then there can be no unused
+ // results because the results aren't expected to be used in the first place.
+ if (isUnevaluatedContext())
+ return;
+
+ SourceLocation ExprLoc = E->IgnoreParenImpCasts()->getExprLoc();
+ // In most cases, we don't want to warn if the expression is written in a
+ // macro body, or if the macro comes from a system header. If the offending
+ // expression is a call to a function with the warn_unused_result attribute,
+ // we warn no matter the location. Because of the order in which the various
+ // checks need to happen, we factor out the macro-related test here.
+ bool ShouldSuppress =
+ SourceMgr.isMacroBodyExpansion(ExprLoc) ||
+ SourceMgr.isInSystemMacro(ExprLoc);
+
+ const Expr *WarnExpr;
+ SourceLocation Loc;
+ SourceRange R1, R2;
+ if (!E->isUnusedResultAWarning(WarnExpr, Loc, R1, R2, Context))
+ return;
+
+ // If this is a GNU statement expression expanded from a macro, it is probably
+ // unused because it is a function-like macro that can be used as either an
+ // expression or statement. Don't warn, because it is almost certainly a
+ // false positive.
+ if (isa<StmtExpr>(E) && Loc.isMacroID())
+ return;
+
+ // Check if this is the UNREFERENCED_PARAMETER from the Microsoft headers.
+ // That macro is frequently used to suppress "unused parameter" warnings,
+ // but its implementation makes clang's -Wunused-value fire. Prevent this.
+ if (isa<ParenExpr>(E->IgnoreImpCasts()) && Loc.isMacroID()) {
+ SourceLocation SpellLoc = Loc;
+ if (findMacroSpelling(SpellLoc, "UNREFERENCED_PARAMETER"))
+ return;
+ }
+
+ // Okay, we have an unused result. Depending on what the base expression is,
+ // we might want to make a more specific diagnostic. Check for one of these
+ // cases now.
+ unsigned DiagID = diag::warn_unused_expr;
+ if (const ExprWithCleanups *Temps = dyn_cast<ExprWithCleanups>(E))
+ E = Temps->getSubExpr();
+ if (const CXXBindTemporaryExpr *TempExpr = dyn_cast<CXXBindTemporaryExpr>(E))
+ E = TempExpr->getSubExpr();
+
+ if (DiagnoseUnusedComparison(*this, E))
+ return;
+
+ E = WarnExpr;
+ if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
+ if (E->getType()->isVoidType())
+ return;
+
+ // If the callee has attribute pure, const, or warn_unused_result, warn with
+ // a more specific message to make it clear what is happening. If the call
+ // is written in a macro body, only warn if it has the warn_unused_result
+ // attribute.
+ if (const Decl *FD = CE->getCalleeDecl()) {
+ const FunctionDecl *Func = dyn_cast<FunctionDecl>(FD);
+ if (Func ? Func->hasUnusedResultAttr()
+ : FD->hasAttr<WarnUnusedResultAttr>()) {
+ Diag(Loc, diag::warn_unused_result) << R1 << R2;
+ return;
+ }
+ if (ShouldSuppress)
+ return;
+ if (FD->hasAttr<PureAttr>()) {
+ Diag(Loc, diag::warn_unused_call) << R1 << R2 << "pure";
+ return;
+ }
+ if (FD->hasAttr<ConstAttr>()) {
+ Diag(Loc, diag::warn_unused_call) << R1 << R2 << "const";
+ return;
+ }
+ }
+ } else if (ShouldSuppress)
+ return;
+
+ if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(E)) {
+ if (getLangOpts().ObjCAutoRefCount && ME->isDelegateInitCall()) {
+ Diag(Loc, diag::err_arc_unused_init_message) << R1;
+ return;
+ }
+ const ObjCMethodDecl *MD = ME->getMethodDecl();
+ if (MD) {
+ if (MD->hasAttr<WarnUnusedResultAttr>()) {
+ Diag(Loc, diag::warn_unused_result) << R1 << R2;
+ return;
+ }
+ }
+ } else if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) {
+ const Expr *Source = POE->getSyntacticForm();
+ if (isa<ObjCSubscriptRefExpr>(Source))
+ DiagID = diag::warn_unused_container_subscript_expr;
+ else
+ DiagID = diag::warn_unused_property_expr;
+ } else if (const CXXFunctionalCastExpr *FC
+ = dyn_cast<CXXFunctionalCastExpr>(E)) {
+ if (isa<CXXConstructExpr>(FC->getSubExpr()) ||
+ isa<CXXTemporaryObjectExpr>(FC->getSubExpr()))
+ return;
+ }
+ // Diagnose "(void*) blah" as a typo for "(void) blah".
+ else if (const CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(E)) {
+ TypeSourceInfo *TI = CE->getTypeInfoAsWritten();
+ QualType T = TI->getType();
+
+ // We really do want to use the non-canonical type here.
+ if (T == Context.VoidPtrTy) {
+ PointerTypeLoc TL = TI->getTypeLoc().castAs<PointerTypeLoc>();
+
+ Diag(Loc, diag::warn_unused_voidptr)
+ << FixItHint::CreateRemoval(TL.getStarLoc());
+ return;
+ }
+ }
+
+ if (E->isGLValue() && E->getType().isVolatileQualified()) {
+ Diag(Loc, diag::warn_unused_volatile) << R1 << R2;
+ return;
+ }
+
+ DiagRuntimeBehavior(Loc, nullptr, PDiag(DiagID) << R1 << R2);
+}
+
+void Sema::ActOnStartOfCompoundStmt() {
+ PushCompoundScope();
+}
+
+void Sema::ActOnFinishOfCompoundStmt() {
+ PopCompoundScope();
+}
+
+sema::CompoundScopeInfo &Sema::getCurCompoundScope() const {
+ return getCurFunction()->CompoundScopes.back();
+}
+
+StmtResult Sema::ActOnCompoundStmt(SourceLocation L, SourceLocation R,
+ ArrayRef<Stmt *> Elts, bool isStmtExpr) {
+ const unsigned NumElts = Elts.size();
+
+ // If we're in C89 mode, check that we don't have any decls after stmts. If
+ // so, emit an extension diagnostic.
+ if (!getLangOpts().C99 && !getLangOpts().CPlusPlus) {
+ // Note that __extension__ can be around a decl.
+ unsigned i = 0;
+ // Skip over all declarations.
+ for (; i != NumElts && isa<DeclStmt>(Elts[i]); ++i)
+ /*empty*/;
+
+ // We found the end of the list or a statement. Scan for another declstmt.
+ for (; i != NumElts && !isa<DeclStmt>(Elts[i]); ++i)
+ /*empty*/;
+
+ if (i != NumElts) {
+ Decl *D = *cast<DeclStmt>(Elts[i])->decl_begin();
+ Diag(D->getLocation(), diag::ext_mixed_decls_code);
+ }
+ }
+ // Warn about unused expressions in statements.
+ for (unsigned i = 0; i != NumElts; ++i) {
+ // Ignore statements that are last in a statement expression.
+ if (isStmtExpr && i == NumElts - 1)
+ continue;
+
+ DiagnoseUnusedExprResult(Elts[i]);
+ }
+
+ // Check for suspicious empty body (null statement) in `for' and `while'
+ // statements. Don't do anything for template instantiations, this just adds
+ // noise.
+ if (NumElts != 0 && !CurrentInstantiationScope &&
+ getCurCompoundScope().HasEmptyLoopBodies) {
+ for (unsigned i = 0; i != NumElts - 1; ++i)
+ DiagnoseEmptyLoopBody(Elts[i], Elts[i + 1]);
+ }
+
+ return new (Context) CompoundStmt(Context, Elts, L, R);
+}
+
+StmtResult
+Sema::ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal,
+ SourceLocation DotDotDotLoc, Expr *RHSVal,
+ SourceLocation ColonLoc) {
+ assert(LHSVal && "missing expression in case statement");
+
+ if (getCurFunction()->SwitchStack.empty()) {
+ Diag(CaseLoc, diag::err_case_not_in_switch);
+ return StmtError();
+ }
+
+ ExprResult LHS =
+ CorrectDelayedTyposInExpr(LHSVal, [this](class Expr *E) {
+ if (!getLangOpts().CPlusPlus11)
+ return VerifyIntegerConstantExpression(E);
+ if (Expr *CondExpr =
+ getCurFunction()->SwitchStack.back()->getCond()) {
+ QualType CondType = CondExpr->getType();
+ llvm::APSInt TempVal;
+ return CheckConvertedConstantExpression(E, CondType, TempVal,
+ CCEK_CaseValue);
+ }
+ return ExprError();
+ });
+ if (LHS.isInvalid())
+ return StmtError();
+ LHSVal = LHS.get();
+
+ if (!getLangOpts().CPlusPlus11) {
+ // C99 6.8.4.2p3: The expression shall be an integer constant.
+ // However, GCC allows any evaluatable integer expression.
+ if (!LHSVal->isTypeDependent() && !LHSVal->isValueDependent()) {
+ LHSVal = VerifyIntegerConstantExpression(LHSVal).get();
+ if (!LHSVal)
+ return StmtError();
+ }
+
+ // GCC extension: The expression shall be an integer constant.
+
+ if (RHSVal && !RHSVal->isTypeDependent() && !RHSVal->isValueDependent()) {
+ RHSVal = VerifyIntegerConstantExpression(RHSVal).get();
+ // Recover from an error by just forgetting about it.
+ }
+ }
+
+ LHS = ActOnFinishFullExpr(LHSVal, LHSVal->getExprLoc(), false,
+ getLangOpts().CPlusPlus11);
+ if (LHS.isInvalid())
+ return StmtError();
+
+ auto RHS = RHSVal ? ActOnFinishFullExpr(RHSVal, RHSVal->getExprLoc(), false,
+ getLangOpts().CPlusPlus11)
+ : ExprResult();
+ if (RHS.isInvalid())
+ return StmtError();
+
+ CaseStmt *CS = new (Context)
+ CaseStmt(LHS.get(), RHS.get(), CaseLoc, DotDotDotLoc, ColonLoc);
+ getCurFunction()->SwitchStack.back()->addSwitchCase(CS);
+ return CS;
+}
+
+/// ActOnCaseStmtBody - This installs a statement as the body of a case.
+void Sema::ActOnCaseStmtBody(Stmt *caseStmt, Stmt *SubStmt) {
+ DiagnoseUnusedExprResult(SubStmt);
+
+ CaseStmt *CS = static_cast<CaseStmt*>(caseStmt);
+ CS->setSubStmt(SubStmt);
+}
+
+StmtResult
+Sema::ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc,
+ Stmt *SubStmt, Scope *CurScope) {
+ DiagnoseUnusedExprResult(SubStmt);
+
+ if (getCurFunction()->SwitchStack.empty()) {
+ Diag(DefaultLoc, diag::err_default_not_in_switch);
+ return SubStmt;
+ }
+
+ DefaultStmt *DS = new (Context) DefaultStmt(DefaultLoc, ColonLoc, SubStmt);
+ getCurFunction()->SwitchStack.back()->addSwitchCase(DS);
+ return DS;
+}
+
+StmtResult
+Sema::ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
+ SourceLocation ColonLoc, Stmt *SubStmt) {
+ // If the label was multiply defined, reject it now.
+ if (TheDecl->getStmt()) {
+ Diag(IdentLoc, diag::err_redefinition_of_label) << TheDecl->getDeclName();
+ Diag(TheDecl->getLocation(), diag::note_previous_definition);
+ return SubStmt;
+ }
+
+ // Otherwise, things are good. Fill in the declaration and return it.
+ LabelStmt *LS = new (Context) LabelStmt(IdentLoc, TheDecl, SubStmt);
+ TheDecl->setStmt(LS);
+ if (!TheDecl->isGnuLocal()) {
+ TheDecl->setLocStart(IdentLoc);
+ if (!TheDecl->isMSAsmLabel()) {
+ // Don't update the location of MS ASM labels. These will result in
+ // a diagnostic, and changing the location here will mess that up.
+ TheDecl->setLocation(IdentLoc);
+ }
+ }
+ return LS;
+}
+
+StmtResult Sema::ActOnAttributedStmt(SourceLocation AttrLoc,
+ ArrayRef<const Attr*> Attrs,
+ Stmt *SubStmt) {
+ // Fill in the declaration and return it.
+ AttributedStmt *LS = AttributedStmt::Create(Context, AttrLoc, Attrs, SubStmt);
+ return LS;
+}
+
+StmtResult
+Sema::ActOnIfStmt(SourceLocation IfLoc, FullExprArg CondVal, Decl *CondVar,
+ Stmt *thenStmt, SourceLocation ElseLoc,
+ Stmt *elseStmt) {
+ ExprResult CondResult(CondVal.release());
+
+ VarDecl *ConditionVar = nullptr;
+ if (CondVar) {
+ ConditionVar = cast<VarDecl>(CondVar);
+ CondResult = CheckConditionVariable(ConditionVar, IfLoc, true);
+ CondResult = ActOnFinishFullExpr(CondResult.get(), IfLoc);
+ }
+ Expr *ConditionExpr = CondResult.getAs<Expr>();
+ if (ConditionExpr) {
+ DiagnoseUnusedExprResult(thenStmt);
+
+ if (!elseStmt) {
+ DiagnoseEmptyStmtBody(ConditionExpr->getLocEnd(), thenStmt,
+ diag::warn_empty_if_body);
+ }
+
+ DiagnoseUnusedExprResult(elseStmt);
+ } else {
+ // Create a dummy Expr for the condition for error recovery
+ ConditionExpr = new (Context) OpaqueValueExpr(SourceLocation(),
+ Context.BoolTy, VK_RValue);
+ }
+
+ return new (Context) IfStmt(Context, IfLoc, ConditionVar, ConditionExpr,
+ thenStmt, ElseLoc, elseStmt);
+}
+
+namespace {
+ struct CaseCompareFunctor {
+ bool operator()(const std::pair<llvm::APSInt, CaseStmt*> &LHS,
+ const llvm::APSInt &RHS) {
+ return LHS.first < RHS;
+ }
+ bool operator()(const std::pair<llvm::APSInt, CaseStmt*> &LHS,
+ const std::pair<llvm::APSInt, CaseStmt*> &RHS) {
+ return LHS.first < RHS.first;
+ }
+ bool operator()(const llvm::APSInt &LHS,
+ const std::pair<llvm::APSInt, CaseStmt*> &RHS) {
+ return LHS < RHS.first;
+ }
+ };
+}
+
+/// CmpCaseVals - Comparison predicate for sorting case values.
+///
+static bool CmpCaseVals(const std::pair<llvm::APSInt, CaseStmt*>& lhs,
+ const std::pair<llvm::APSInt, CaseStmt*>& rhs) {
+ if (lhs.first < rhs.first)
+ return true;
+
+ if (lhs.first == rhs.first &&
+ lhs.second->getCaseLoc().getRawEncoding()
+ < rhs.second->getCaseLoc().getRawEncoding())
+ return true;
+ return false;
+}
+
+/// CmpEnumVals - Comparison predicate for sorting enumeration values.
+///
+static bool CmpEnumVals(const std::pair<llvm::APSInt, EnumConstantDecl*>& lhs,
+ const std::pair<llvm::APSInt, EnumConstantDecl*>& rhs)
+{
+ return lhs.first < rhs.first;
+}
+
+/// EqEnumVals - Comparison preficate for uniqing enumeration values.
+///
+static bool EqEnumVals(const std::pair<llvm::APSInt, EnumConstantDecl*>& lhs,
+ const std::pair<llvm::APSInt, EnumConstantDecl*>& rhs)
+{
+ return lhs.first == rhs.first;
+}
+
+/// GetTypeBeforeIntegralPromotion - Returns the pre-promotion type of
+/// potentially integral-promoted expression @p expr.
+static QualType GetTypeBeforeIntegralPromotion(Expr *&expr) {
+ if (ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(expr))
+ expr = cleanups->getSubExpr();
+ while (ImplicitCastExpr *impcast = dyn_cast<ImplicitCastExpr>(expr)) {
+ if (impcast->getCastKind() != CK_IntegralCast) break;
+ expr = impcast->getSubExpr();
+ }
+ return expr->getType();
+}
+
+StmtResult
+Sema::ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Expr *Cond,
+ Decl *CondVar) {
+ ExprResult CondResult;
+
+ VarDecl *ConditionVar = nullptr;
+ if (CondVar) {
+ ConditionVar = cast<VarDecl>(CondVar);
+ CondResult = CheckConditionVariable(ConditionVar, SourceLocation(), false);
+ if (CondResult.isInvalid())
+ return StmtError();
+
+ Cond = CondResult.get();
+ }
+
+ if (!Cond)
+ return StmtError();
+
+ class SwitchConvertDiagnoser : public ICEConvertDiagnoser {
+ Expr *Cond;
+
+ public:
+ SwitchConvertDiagnoser(Expr *Cond)
+ : ICEConvertDiagnoser(/*AllowScopedEnumerations*/true, false, true),
+ Cond(Cond) {}
+
+ SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc,
+ QualType T) override {
+ return S.Diag(Loc, diag::err_typecheck_statement_requires_integer) << T;
+ }
+
+ SemaDiagnosticBuilder diagnoseIncomplete(
+ Sema &S, SourceLocation Loc, QualType T) override {
+ return S.Diag(Loc, diag::err_switch_incomplete_class_type)
+ << T << Cond->getSourceRange();
+ }
+
+ SemaDiagnosticBuilder diagnoseExplicitConv(
+ Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override {
+ return S.Diag(Loc, diag::err_switch_explicit_conversion) << T << ConvTy;
+ }
+
+ SemaDiagnosticBuilder noteExplicitConv(
+ Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
+ return S.Diag(Conv->getLocation(), diag::note_switch_conversion)
+ << ConvTy->isEnumeralType() << ConvTy;
+ }
+
+ SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc,
+ QualType T) override {
+ return S.Diag(Loc, diag::err_switch_multiple_conversions) << T;
+ }
+
+ SemaDiagnosticBuilder noteAmbiguous(
+ Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
+ return S.Diag(Conv->getLocation(), diag::note_switch_conversion)
+ << ConvTy->isEnumeralType() << ConvTy;
+ }
+
+ SemaDiagnosticBuilder diagnoseConversion(
+ Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override {
+ llvm_unreachable("conversion functions are permitted");
+ }
+ } SwitchDiagnoser(Cond);
+
+ CondResult =
+ PerformContextualImplicitConversion(SwitchLoc, Cond, SwitchDiagnoser);
+ if (CondResult.isInvalid()) return StmtError();
+ Cond = CondResult.get();
+
+ // C99 6.8.4.2p5 - Integer promotions are performed on the controlling expr.
+ CondResult = UsualUnaryConversions(Cond);
+ if (CondResult.isInvalid()) return StmtError();
+ Cond = CondResult.get();
+
+ CondResult = ActOnFinishFullExpr(Cond, SwitchLoc);
+ if (CondResult.isInvalid())
+ return StmtError();
+ Cond = CondResult.get();
+
+ getCurFunction()->setHasBranchIntoScope();
+
+ SwitchStmt *SS = new (Context) SwitchStmt(Context, ConditionVar, Cond);
+ getCurFunction()->SwitchStack.push_back(SS);
+ return SS;
+}
+
+static void AdjustAPSInt(llvm::APSInt &Val, unsigned BitWidth, bool IsSigned) {
+ Val = Val.extOrTrunc(BitWidth);
+ Val.setIsSigned(IsSigned);
+}
+
+/// Check the specified case value is in range for the given unpromoted switch
+/// type.
+static void checkCaseValue(Sema &S, SourceLocation Loc, const llvm::APSInt &Val,
+ unsigned UnpromotedWidth, bool UnpromotedSign) {
+ // If the case value was signed and negative and the switch expression is
+ // unsigned, don't bother to warn: this is implementation-defined behavior.
+ // FIXME: Introduce a second, default-ignored warning for this case?
+ if (UnpromotedWidth < Val.getBitWidth()) {
+ llvm::APSInt ConvVal(Val);
+ AdjustAPSInt(ConvVal, UnpromotedWidth, UnpromotedSign);
+ AdjustAPSInt(ConvVal, Val.getBitWidth(), Val.isSigned());
+ // FIXME: Use different diagnostics for overflow in conversion to promoted
+ // type versus "switch expression cannot have this value". Use proper
+ // IntRange checking rather than just looking at the unpromoted type here.
+ if (ConvVal != Val)
+ S.Diag(Loc, diag::warn_case_value_overflow) << Val.toString(10)
+ << ConvVal.toString(10);
+ }
+}
+
+typedef SmallVector<std::pair<llvm::APSInt, EnumConstantDecl*>, 64> EnumValsTy;
+
+/// Returns true if we should emit a diagnostic about this case expression not
+/// being a part of the enum used in the switch controlling expression.
+static bool ShouldDiagnoseSwitchCaseNotInEnum(const Sema &S,
+ const EnumDecl *ED,
+ const Expr *CaseExpr,
+ EnumValsTy::iterator &EI,
+ EnumValsTy::iterator &EIEnd,
+ const llvm::APSInt &Val) {
+ if (const DeclRefExpr *DRE =
+ dyn_cast<DeclRefExpr>(CaseExpr->IgnoreParenImpCasts())) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ QualType VarType = VD->getType();
+ QualType EnumType = S.Context.getTypeDeclType(ED);
+ if (VD->hasGlobalStorage() && VarType.isConstQualified() &&
+ S.Context.hasSameUnqualifiedType(EnumType, VarType))
+ return false;
+ }
+ }
+
+ if (ED->hasAttr<FlagEnumAttr>()) {
+ return !S.IsValueInFlagEnum(ED, Val, false);
+ } else {
+ while (EI != EIEnd && EI->first < Val)
+ EI++;
+
+ if (EI != EIEnd && EI->first == Val)
+ return false;
+ }
+
+ return true;
+}
+
+StmtResult
+Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
+ Stmt *BodyStmt) {
+ SwitchStmt *SS = cast<SwitchStmt>(Switch);
+ assert(SS == getCurFunction()->SwitchStack.back() &&
+ "switch stack missing push/pop!");
+
+ getCurFunction()->SwitchStack.pop_back();
+
+ if (!BodyStmt) return StmtError();
+ SS->setBody(BodyStmt, SwitchLoc);
+
+ Expr *CondExpr = SS->getCond();
+ if (!CondExpr) return StmtError();
+
+ QualType CondType = CondExpr->getType();
+
+ Expr *CondExprBeforePromotion = CondExpr;
+ QualType CondTypeBeforePromotion =
+ GetTypeBeforeIntegralPromotion(CondExprBeforePromotion);
+
+ // C++ 6.4.2.p2:
+ // Integral promotions are performed (on the switch condition).
+ //
+ // A case value unrepresentable by the original switch condition
+ // type (before the promotion) doesn't make sense, even when it can
+ // be represented by the promoted type. Therefore we need to find
+ // the pre-promotion type of the switch condition.
+ if (!CondExpr->isTypeDependent()) {
+ // We have already converted the expression to an integral or enumeration
+ // type, when we started the switch statement. If we don't have an
+ // appropriate type now, just return an error.
+ if (!CondType->isIntegralOrEnumerationType())
+ return StmtError();
+
+ if (CondExpr->isKnownToHaveBooleanValue()) {
+ // switch(bool_expr) {...} is often a programmer error, e.g.
+ // switch(n && mask) { ... } // Doh - should be "n & mask".
+ // One can always use an if statement instead of switch(bool_expr).
+ Diag(SwitchLoc, diag::warn_bool_switch_condition)
+ << CondExpr->getSourceRange();
+ }
+ }
+
+ // Get the bitwidth of the switched-on value after promotions. We must
+ // convert the integer case values to this width before comparison.
+ bool HasDependentValue
+ = CondExpr->isTypeDependent() || CondExpr->isValueDependent();
+ unsigned CondWidth = HasDependentValue ? 0 : Context.getIntWidth(CondType);
+ bool CondIsSigned = CondType->isSignedIntegerOrEnumerationType();
+
+ // Get the width and signedness that the condition might actually have, for
+ // warning purposes.
+ // FIXME: Grab an IntRange for the condition rather than using the unpromoted
+ // type.
+ unsigned CondWidthBeforePromotion
+ = HasDependentValue ? 0 : Context.getIntWidth(CondTypeBeforePromotion);
+ bool CondIsSignedBeforePromotion
+ = CondTypeBeforePromotion->isSignedIntegerOrEnumerationType();
+
+ // Accumulate all of the case values in a vector so that we can sort them
+ // and detect duplicates. This vector contains the APInt for the case after
+ // it has been converted to the condition type.
+ typedef SmallVector<std::pair<llvm::APSInt, CaseStmt*>, 64> CaseValsTy;
+ CaseValsTy CaseVals;
+
+ // Keep track of any GNU case ranges we see. The APSInt is the low value.
+ typedef std::vector<std::pair<llvm::APSInt, CaseStmt*> > CaseRangesTy;
+ CaseRangesTy CaseRanges;
+
+ DefaultStmt *TheDefaultStmt = nullptr;
+
+ bool CaseListIsErroneous = false;
+
+ for (SwitchCase *SC = SS->getSwitchCaseList(); SC && !HasDependentValue;
+ SC = SC->getNextSwitchCase()) {
+
+ if (DefaultStmt *DS = dyn_cast<DefaultStmt>(SC)) {
+ if (TheDefaultStmt) {
+ Diag(DS->getDefaultLoc(), diag::err_multiple_default_labels_defined);
+ Diag(TheDefaultStmt->getDefaultLoc(), diag::note_duplicate_case_prev);
+
+ // FIXME: Remove the default statement from the switch block so that
+ // we'll return a valid AST. This requires recursing down the AST and
+ // finding it, not something we are set up to do right now. For now,
+ // just lop the entire switch stmt out of the AST.
+ CaseListIsErroneous = true;
+ }
+ TheDefaultStmt = DS;
+
+ } else {
+ CaseStmt *CS = cast<CaseStmt>(SC);
+
+ Expr *Lo = CS->getLHS();
+
+ if (Lo->isTypeDependent() || Lo->isValueDependent()) {
+ HasDependentValue = true;
+ break;
+ }
+
+ llvm::APSInt LoVal;
+
+ if (getLangOpts().CPlusPlus11) {
+ // C++11 [stmt.switch]p2: the constant-expression shall be a converted
+ // constant expression of the promoted type of the switch condition.
+ ExprResult ConvLo =
+ CheckConvertedConstantExpression(Lo, CondType, LoVal, CCEK_CaseValue);
+ if (ConvLo.isInvalid()) {
+ CaseListIsErroneous = true;
+ continue;
+ }
+ Lo = ConvLo.get();
+ } else {
+ // We already verified that the expression has a i-c-e value (C99
+ // 6.8.4.2p3) - get that value now.
+ LoVal = Lo->EvaluateKnownConstInt(Context);
+
+ // If the LHS is not the same type as the condition, insert an implicit
+ // cast.
+ Lo = DefaultLvalueConversion(Lo).get();
+ Lo = ImpCastExprToType(Lo, CondType, CK_IntegralCast).get();
+ }
+
+ // Check the unconverted value is within the range of possible values of
+ // the switch expression.
+ checkCaseValue(*this, Lo->getLocStart(), LoVal,
+ CondWidthBeforePromotion, CondIsSignedBeforePromotion);
+
+ // Convert the value to the same width/sign as the condition.
+ AdjustAPSInt(LoVal, CondWidth, CondIsSigned);
+
+ CS->setLHS(Lo);
+
+ // If this is a case range, remember it in CaseRanges, otherwise CaseVals.
+ if (CS->getRHS()) {
+ if (CS->getRHS()->isTypeDependent() ||
+ CS->getRHS()->isValueDependent()) {
+ HasDependentValue = true;
+ break;
+ }
+ CaseRanges.push_back(std::make_pair(LoVal, CS));
+ } else
+ CaseVals.push_back(std::make_pair(LoVal, CS));
+ }
+ }
+
+ if (!HasDependentValue) {
+ // If we don't have a default statement, check whether the
+ // condition is constant.
+ llvm::APSInt ConstantCondValue;
+ bool HasConstantCond = false;
+ if (!HasDependentValue && !TheDefaultStmt) {
+ HasConstantCond = CondExpr->EvaluateAsInt(ConstantCondValue, Context,
+ Expr::SE_AllowSideEffects);
+ assert(!HasConstantCond ||
+ (ConstantCondValue.getBitWidth() == CondWidth &&
+ ConstantCondValue.isSigned() == CondIsSigned));
+ }
+ bool ShouldCheckConstantCond = HasConstantCond;
+
+ // Sort all the scalar case values so we can easily detect duplicates.
+ std::stable_sort(CaseVals.begin(), CaseVals.end(), CmpCaseVals);
+
+ if (!CaseVals.empty()) {
+ for (unsigned i = 0, e = CaseVals.size(); i != e; ++i) {
+ if (ShouldCheckConstantCond &&
+ CaseVals[i].first == ConstantCondValue)
+ ShouldCheckConstantCond = false;
+
+ if (i != 0 && CaseVals[i].first == CaseVals[i-1].first) {
+ // If we have a duplicate, report it.
+ // First, determine if either case value has a name
+ StringRef PrevString, CurrString;
+ Expr *PrevCase = CaseVals[i-1].second->getLHS()->IgnoreParenCasts();
+ Expr *CurrCase = CaseVals[i].second->getLHS()->IgnoreParenCasts();
+ if (DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(PrevCase)) {
+ PrevString = DeclRef->getDecl()->getName();
+ }
+ if (DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(CurrCase)) {
+ CurrString = DeclRef->getDecl()->getName();
+ }
+ SmallString<16> CaseValStr;
+ CaseVals[i-1].first.toString(CaseValStr);
+
+ if (PrevString == CurrString)
+ Diag(CaseVals[i].second->getLHS()->getLocStart(),
+ diag::err_duplicate_case) <<
+ (PrevString.empty() ? StringRef(CaseValStr) : PrevString);
+ else
+ Diag(CaseVals[i].second->getLHS()->getLocStart(),
+ diag::err_duplicate_case_differing_expr) <<
+ (PrevString.empty() ? StringRef(CaseValStr) : PrevString) <<
+ (CurrString.empty() ? StringRef(CaseValStr) : CurrString) <<
+ CaseValStr;
+
+ Diag(CaseVals[i-1].second->getLHS()->getLocStart(),
+ diag::note_duplicate_case_prev);
+ // FIXME: We really want to remove the bogus case stmt from the
+ // substmt, but we have no way to do this right now.
+ CaseListIsErroneous = true;
+ }
+ }
+ }
+
+ // Detect duplicate case ranges, which usually don't exist at all in
+ // the first place.
+ if (!CaseRanges.empty()) {
+ // Sort all the case ranges by their low value so we can easily detect
+ // overlaps between ranges.
+ std::stable_sort(CaseRanges.begin(), CaseRanges.end());
+
+ // Scan the ranges, computing the high values and removing empty ranges.
+ std::vector<llvm::APSInt> HiVals;
+ for (unsigned i = 0, e = CaseRanges.size(); i != e; ++i) {
+ llvm::APSInt &LoVal = CaseRanges[i].first;
+ CaseStmt *CR = CaseRanges[i].second;
+ Expr *Hi = CR->getRHS();
+ llvm::APSInt HiVal;
+
+ if (getLangOpts().CPlusPlus11) {
+ // C++11 [stmt.switch]p2: the constant-expression shall be a converted
+ // constant expression of the promoted type of the switch condition.
+ ExprResult ConvHi =
+ CheckConvertedConstantExpression(Hi, CondType, HiVal,
+ CCEK_CaseValue);
+ if (ConvHi.isInvalid()) {
+ CaseListIsErroneous = true;
+ continue;
+ }
+ Hi = ConvHi.get();
+ } else {
+ HiVal = Hi->EvaluateKnownConstInt(Context);
+
+ // If the RHS is not the same type as the condition, insert an
+ // implicit cast.
+ Hi = DefaultLvalueConversion(Hi).get();
+ Hi = ImpCastExprToType(Hi, CondType, CK_IntegralCast).get();
+ }
+
+ // Check the unconverted value is within the range of possible values of
+ // the switch expression.
+ checkCaseValue(*this, Hi->getLocStart(), HiVal,
+ CondWidthBeforePromotion, CondIsSignedBeforePromotion);
+
+ // Convert the value to the same width/sign as the condition.
+ AdjustAPSInt(HiVal, CondWidth, CondIsSigned);
+
+ CR->setRHS(Hi);
+
+ // If the low value is bigger than the high value, the case is empty.
+ if (LoVal > HiVal) {
+ Diag(CR->getLHS()->getLocStart(), diag::warn_case_empty_range)
+ << SourceRange(CR->getLHS()->getLocStart(),
+ Hi->getLocEnd());
+ CaseRanges.erase(CaseRanges.begin()+i);
+ --i, --e;
+ continue;
+ }
+
+ if (ShouldCheckConstantCond &&
+ LoVal <= ConstantCondValue &&
+ ConstantCondValue <= HiVal)
+ ShouldCheckConstantCond = false;
+
+ HiVals.push_back(HiVal);
+ }
+
+ // Rescan the ranges, looking for overlap with singleton values and other
+ // ranges. Since the range list is sorted, we only need to compare case
+ // ranges with their neighbors.
+ for (unsigned i = 0, e = CaseRanges.size(); i != e; ++i) {
+ llvm::APSInt &CRLo = CaseRanges[i].first;
+ llvm::APSInt &CRHi = HiVals[i];
+ CaseStmt *CR = CaseRanges[i].second;
+
+ // Check to see whether the case range overlaps with any
+ // singleton cases.
+ CaseStmt *OverlapStmt = nullptr;
+ llvm::APSInt OverlapVal(32);
+
+ // Find the smallest value >= the lower bound. If I is in the
+ // case range, then we have overlap.
+ CaseValsTy::iterator I = std::lower_bound(CaseVals.begin(),
+ CaseVals.end(), CRLo,
+ CaseCompareFunctor());
+ if (I != CaseVals.end() && I->first < CRHi) {
+ OverlapVal = I->first; // Found overlap with scalar.
+ OverlapStmt = I->second;
+ }
+
+ // Find the smallest value bigger than the upper bound.
+ I = std::upper_bound(I, CaseVals.end(), CRHi, CaseCompareFunctor());
+ if (I != CaseVals.begin() && (I-1)->first >= CRLo) {
+ OverlapVal = (I-1)->first; // Found overlap with scalar.
+ OverlapStmt = (I-1)->second;
+ }
+
+ // Check to see if this case stmt overlaps with the subsequent
+ // case range.
+ if (i && CRLo <= HiVals[i-1]) {
+ OverlapVal = HiVals[i-1]; // Found overlap with range.
+ OverlapStmt = CaseRanges[i-1].second;
+ }
+
+ if (OverlapStmt) {
+ // If we have a duplicate, report it.
+ Diag(CR->getLHS()->getLocStart(), diag::err_duplicate_case)
+ << OverlapVal.toString(10);
+ Diag(OverlapStmt->getLHS()->getLocStart(),
+ diag::note_duplicate_case_prev);
+ // FIXME: We really want to remove the bogus case stmt from the
+ // substmt, but we have no way to do this right now.
+ CaseListIsErroneous = true;
+ }
+ }
+ }
+
+ // Complain if we have a constant condition and we didn't find a match.
+ if (!CaseListIsErroneous && ShouldCheckConstantCond) {
+ // TODO: it would be nice if we printed enums as enums, chars as
+ // chars, etc.
+ Diag(CondExpr->getExprLoc(), diag::warn_missing_case_for_condition)
+ << ConstantCondValue.toString(10)
+ << CondExpr->getSourceRange();
+ }
+
+ // Check to see if switch is over an Enum and handles all of its
+ // values. We only issue a warning if there is not 'default:', but
+ // we still do the analysis to preserve this information in the AST
+ // (which can be used by flow-based analyes).
+ //
+ const EnumType *ET = CondTypeBeforePromotion->getAs<EnumType>();
+
+ // If switch has default case, then ignore it.
+ if (!CaseListIsErroneous && !HasConstantCond && ET) {
+ const EnumDecl *ED = ET->getDecl();
+ EnumValsTy EnumVals;
+
+ // Gather all enum values, set their type and sort them,
+ // allowing easier comparison with CaseVals.
+ for (auto *EDI : ED->enumerators()) {
+ llvm::APSInt Val = EDI->getInitVal();
+ AdjustAPSInt(Val, CondWidth, CondIsSigned);
+ EnumVals.push_back(std::make_pair(Val, EDI));
+ }
+ std::stable_sort(EnumVals.begin(), EnumVals.end(), CmpEnumVals);
+ auto EI = EnumVals.begin(), EIEnd =
+ std::unique(EnumVals.begin(), EnumVals.end(), EqEnumVals);
+
+ // See which case values aren't in enum.
+ for (CaseValsTy::const_iterator CI = CaseVals.begin();
+ CI != CaseVals.end(); CI++) {
+ Expr *CaseExpr = CI->second->getLHS();
+ if (ShouldDiagnoseSwitchCaseNotInEnum(*this, ED, CaseExpr, EI, EIEnd,
+ CI->first))
+ Diag(CaseExpr->getExprLoc(), diag::warn_not_in_enum)
+ << CondTypeBeforePromotion;
+ }
+
+ // See which of case ranges aren't in enum
+ EI = EnumVals.begin();
+ for (CaseRangesTy::const_iterator RI = CaseRanges.begin();
+ RI != CaseRanges.end(); RI++) {
+ Expr *CaseExpr = RI->second->getLHS();
+ if (ShouldDiagnoseSwitchCaseNotInEnum(*this, ED, CaseExpr, EI, EIEnd,
+ RI->first))
+ Diag(CaseExpr->getExprLoc(), diag::warn_not_in_enum)
+ << CondTypeBeforePromotion;
+
+ llvm::APSInt Hi =
+ RI->second->getRHS()->EvaluateKnownConstInt(Context);
+ AdjustAPSInt(Hi, CondWidth, CondIsSigned);
+
+ CaseExpr = RI->second->getRHS();
+ if (ShouldDiagnoseSwitchCaseNotInEnum(*this, ED, CaseExpr, EI, EIEnd,
+ Hi))
+ Diag(CaseExpr->getExprLoc(), diag::warn_not_in_enum)
+ << CondTypeBeforePromotion;
+ }
+
+ // Check which enum vals aren't in switch
+ auto CI = CaseVals.begin();
+ auto RI = CaseRanges.begin();
+ bool hasCasesNotInSwitch = false;
+
+ SmallVector<DeclarationName,8> UnhandledNames;
+
+ for (EI = EnumVals.begin(); EI != EIEnd; EI++){
+ // Drop unneeded case values
+ while (CI != CaseVals.end() && CI->first < EI->first)
+ CI++;
+
+ if (CI != CaseVals.end() && CI->first == EI->first)
+ continue;
+
+ // Drop unneeded case ranges
+ for (; RI != CaseRanges.end(); RI++) {
+ llvm::APSInt Hi =
+ RI->second->getRHS()->EvaluateKnownConstInt(Context);
+ AdjustAPSInt(Hi, CondWidth, CondIsSigned);
+ if (EI->first <= Hi)
+ break;
+ }
+
+ if (RI == CaseRanges.end() || EI->first < RI->first) {
+ hasCasesNotInSwitch = true;
+ UnhandledNames.push_back(EI->second->getDeclName());
+ }
+ }
+
+ if (TheDefaultStmt && UnhandledNames.empty())
+ Diag(TheDefaultStmt->getDefaultLoc(), diag::warn_unreachable_default);
+
+ // Produce a nice diagnostic if multiple values aren't handled.
+ if (!UnhandledNames.empty()) {
+ DiagnosticBuilder DB = Diag(CondExpr->getExprLoc(),
+ TheDefaultStmt ? diag::warn_def_missing_case
+ : diag::warn_missing_case)
+ << (int)UnhandledNames.size();
+
+ for (size_t I = 0, E = std::min(UnhandledNames.size(), (size_t)3);
+ I != E; ++I)
+ DB << UnhandledNames[I];
+ }
+
+ if (!hasCasesNotInSwitch)
+ SS->setAllEnumCasesCovered();
+ }
+ }
+
+ if (BodyStmt)
+ DiagnoseEmptyStmtBody(CondExpr->getLocEnd(), BodyStmt,
+ diag::warn_empty_switch_body);
+
+ // FIXME: If the case list was broken is some way, we don't have a good system
+ // to patch it up. Instead, just return the whole substmt as broken.
+ if (CaseListIsErroneous)
+ return StmtError();
+
+ return SS;
+}
+
+void
+Sema::DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
+ Expr *SrcExpr) {
+ if (Diags.isIgnored(diag::warn_not_in_enum_assignment, SrcExpr->getExprLoc()))
+ return;
+
+ if (const EnumType *ET = DstType->getAs<EnumType>())
+ if (!Context.hasSameUnqualifiedType(SrcType, DstType) &&
+ SrcType->isIntegerType()) {
+ if (!SrcExpr->isTypeDependent() && !SrcExpr->isValueDependent() &&
+ SrcExpr->isIntegerConstantExpr(Context)) {
+ // Get the bitwidth of the enum value before promotions.
+ unsigned DstWidth = Context.getIntWidth(DstType);
+ bool DstIsSigned = DstType->isSignedIntegerOrEnumerationType();
+
+ llvm::APSInt RhsVal = SrcExpr->EvaluateKnownConstInt(Context);
+ AdjustAPSInt(RhsVal, DstWidth, DstIsSigned);
+ const EnumDecl *ED = ET->getDecl();
+
+ if (ED->hasAttr<FlagEnumAttr>()) {
+ if (!IsValueInFlagEnum(ED, RhsVal, true))
+ Diag(SrcExpr->getExprLoc(), diag::warn_not_in_enum_assignment)
+ << DstType.getUnqualifiedType();
+ } else {
+ typedef SmallVector<std::pair<llvm::APSInt, EnumConstantDecl *>, 64>
+ EnumValsTy;
+ EnumValsTy EnumVals;
+
+ // Gather all enum values, set their type and sort them,
+ // allowing easier comparison with rhs constant.
+ for (auto *EDI : ED->enumerators()) {
+ llvm::APSInt Val = EDI->getInitVal();
+ AdjustAPSInt(Val, DstWidth, DstIsSigned);
+ EnumVals.push_back(std::make_pair(Val, EDI));
+ }
+ if (EnumVals.empty())
+ return;
+ std::stable_sort(EnumVals.begin(), EnumVals.end(), CmpEnumVals);
+ EnumValsTy::iterator EIend =
+ std::unique(EnumVals.begin(), EnumVals.end(), EqEnumVals);
+
+ // See which values aren't in the enum.
+ EnumValsTy::const_iterator EI = EnumVals.begin();
+ while (EI != EIend && EI->first < RhsVal)
+ EI++;
+ if (EI == EIend || EI->first != RhsVal) {
+ Diag(SrcExpr->getExprLoc(), diag::warn_not_in_enum_assignment)
+ << DstType.getUnqualifiedType();
+ }
+ }
+ }
+ }
+}
+
+StmtResult
+Sema::ActOnWhileStmt(SourceLocation WhileLoc, FullExprArg Cond,
+ Decl *CondVar, Stmt *Body) {
+ ExprResult CondResult(Cond.release());
+
+ VarDecl *ConditionVar = nullptr;
+ if (CondVar) {
+ ConditionVar = cast<VarDecl>(CondVar);
+ CondResult = CheckConditionVariable(ConditionVar, WhileLoc, true);
+ CondResult = ActOnFinishFullExpr(CondResult.get(), WhileLoc);
+ if (CondResult.isInvalid())
+ return StmtError();
+ }
+ Expr *ConditionExpr = CondResult.get();
+ if (!ConditionExpr)
+ return StmtError();
+ CheckBreakContinueBinding(ConditionExpr);
+
+ DiagnoseUnusedExprResult(Body);
+
+ if (isa<NullStmt>(Body))
+ getCurCompoundScope().setHasEmptyLoopBodies();
+
+ return new (Context)
+ WhileStmt(Context, ConditionVar, ConditionExpr, Body, WhileLoc);
+}
+
+StmtResult
+Sema::ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
+ SourceLocation WhileLoc, SourceLocation CondLParen,
+ Expr *Cond, SourceLocation CondRParen) {
+ assert(Cond && "ActOnDoStmt(): missing expression");
+
+ CheckBreakContinueBinding(Cond);
+ ExprResult CondResult = CheckBooleanCondition(Cond, DoLoc);
+ if (CondResult.isInvalid())
+ return StmtError();
+ Cond = CondResult.get();
+
+ CondResult = ActOnFinishFullExpr(Cond, DoLoc);
+ if (CondResult.isInvalid())
+ return StmtError();
+ Cond = CondResult.get();
+
+ DiagnoseUnusedExprResult(Body);
+
+ return new (Context) DoStmt(Body, Cond, DoLoc, WhileLoc, CondRParen);
+}
+
+namespace {
+ // This visitor will traverse a conditional statement and store all
+ // the evaluated decls into a vector. Simple is set to true if none
+ // of the excluded constructs are used.
+ class DeclExtractor : public EvaluatedExprVisitor<DeclExtractor> {
+ llvm::SmallPtrSetImpl<VarDecl*> &Decls;
+ SmallVectorImpl<SourceRange> &Ranges;
+ bool Simple;
+ public:
+ typedef EvaluatedExprVisitor<DeclExtractor> Inherited;
+
+ DeclExtractor(Sema &S, llvm::SmallPtrSetImpl<VarDecl*> &Decls,
+ SmallVectorImpl<SourceRange> &Ranges) :
+ Inherited(S.Context),
+ Decls(Decls),
+ Ranges(Ranges),
+ Simple(true) {}
+
+ bool isSimple() { return Simple; }
+
+ // Replaces the method in EvaluatedExprVisitor.
+ void VisitMemberExpr(MemberExpr* E) {
+ Simple = false;
+ }
+
+ // Any Stmt not whitelisted will cause the condition to be marked complex.
+ void VisitStmt(Stmt *S) {
+ Simple = false;
+ }
+
+ void VisitBinaryOperator(BinaryOperator *E) {
+ Visit(E->getLHS());
+ Visit(E->getRHS());
+ }
+
+ void VisitCastExpr(CastExpr *E) {
+ Visit(E->getSubExpr());
+ }
+
+ void VisitUnaryOperator(UnaryOperator *E) {
+ // Skip checking conditionals with derefernces.
+ if (E->getOpcode() == UO_Deref)
+ Simple = false;
+ else
+ Visit(E->getSubExpr());
+ }
+
+ void VisitConditionalOperator(ConditionalOperator *E) {
+ Visit(E->getCond());
+ Visit(E->getTrueExpr());
+ Visit(E->getFalseExpr());
+ }
+
+ void VisitParenExpr(ParenExpr *E) {
+ Visit(E->getSubExpr());
+ }
+
+ void VisitBinaryConditionalOperator(BinaryConditionalOperator *E) {
+ Visit(E->getOpaqueValue()->getSourceExpr());
+ Visit(E->getFalseExpr());
+ }
+
+ void VisitIntegerLiteral(IntegerLiteral *E) { }
+ void VisitFloatingLiteral(FloatingLiteral *E) { }
+ void VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) { }
+ void VisitCharacterLiteral(CharacterLiteral *E) { }
+ void VisitGNUNullExpr(GNUNullExpr *E) { }
+ void VisitImaginaryLiteral(ImaginaryLiteral *E) { }
+
+ void VisitDeclRefExpr(DeclRefExpr *E) {
+ VarDecl *VD = dyn_cast<VarDecl>(E->getDecl());
+ if (!VD) return;
+
+ Ranges.push_back(E->getSourceRange());
+
+ Decls.insert(VD);
+ }
+
+ }; // end class DeclExtractor
+
+ // DeclMatcher checks to see if the decls are used in a non-evaluated
+ // context.
+ class DeclMatcher : public EvaluatedExprVisitor<DeclMatcher> {
+ llvm::SmallPtrSetImpl<VarDecl*> &Decls;
+ bool FoundDecl;
+
+ public:
+ typedef EvaluatedExprVisitor<DeclMatcher> Inherited;
+
+ DeclMatcher(Sema &S, llvm::SmallPtrSetImpl<VarDecl*> &Decls,
+ Stmt *Statement) :
+ Inherited(S.Context), Decls(Decls), FoundDecl(false) {
+ if (!Statement) return;
+
+ Visit(Statement);
+ }
+
+ void VisitReturnStmt(ReturnStmt *S) {
+ FoundDecl = true;
+ }
+
+ void VisitBreakStmt(BreakStmt *S) {
+ FoundDecl = true;
+ }
+
+ void VisitGotoStmt(GotoStmt *S) {
+ FoundDecl = true;
+ }
+
+ void VisitCastExpr(CastExpr *E) {
+ if (E->getCastKind() == CK_LValueToRValue)
+ CheckLValueToRValueCast(E->getSubExpr());
+ else
+ Visit(E->getSubExpr());
+ }
+
+ void CheckLValueToRValueCast(Expr *E) {
+ E = E->IgnoreParenImpCasts();
+
+ if (isa<DeclRefExpr>(E)) {
+ return;
+ }
+
+ if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
+ Visit(CO->getCond());
+ CheckLValueToRValueCast(CO->getTrueExpr());
+ CheckLValueToRValueCast(CO->getFalseExpr());
+ return;
+ }
+
+ if (BinaryConditionalOperator *BCO =
+ dyn_cast<BinaryConditionalOperator>(E)) {
+ CheckLValueToRValueCast(BCO->getOpaqueValue()->getSourceExpr());
+ CheckLValueToRValueCast(BCO->getFalseExpr());
+ return;
+ }
+
+ Visit(E);
+ }
+
+ void VisitDeclRefExpr(DeclRefExpr *E) {
+ if (VarDecl *VD = dyn_cast<VarDecl>(E->getDecl()))
+ if (Decls.count(VD))
+ FoundDecl = true;
+ }
+
+ bool FoundDeclInUse() { return FoundDecl; }
+
+ }; // end class DeclMatcher
+
+ void CheckForLoopConditionalStatement(Sema &S, Expr *Second,
+ Expr *Third, Stmt *Body) {
+ // Condition is empty
+ if (!Second) return;
+
+ if (S.Diags.isIgnored(diag::warn_variables_not_in_loop_body,
+ Second->getLocStart()))
+ return;
+
+ PartialDiagnostic PDiag = S.PDiag(diag::warn_variables_not_in_loop_body);
+ llvm::SmallPtrSet<VarDecl*, 8> Decls;
+ SmallVector<SourceRange, 10> Ranges;
+ DeclExtractor DE(S, Decls, Ranges);
+ DE.Visit(Second);
+
+ // Don't analyze complex conditionals.
+ if (!DE.isSimple()) return;
+
+ // No decls found.
+ if (Decls.size() == 0) return;
+
+ // Don't warn on volatile, static, or global variables.
+ for (llvm::SmallPtrSetImpl<VarDecl*>::iterator I = Decls.begin(),
+ E = Decls.end();
+ I != E; ++I)
+ if ((*I)->getType().isVolatileQualified() ||
+ (*I)->hasGlobalStorage()) return;
+
+ if (DeclMatcher(S, Decls, Second).FoundDeclInUse() ||
+ DeclMatcher(S, Decls, Third).FoundDeclInUse() ||
+ DeclMatcher(S, Decls, Body).FoundDeclInUse())
+ return;
+
+ // Load decl names into diagnostic.
+ if (Decls.size() > 4)
+ PDiag << 0;
+ else {
+ PDiag << Decls.size();
+ for (llvm::SmallPtrSetImpl<VarDecl*>::iterator I = Decls.begin(),
+ E = Decls.end();
+ I != E; ++I)
+ PDiag << (*I)->getDeclName();
+ }
+
+ // Load SourceRanges into diagnostic if there is room.
+ // Otherwise, load the SourceRange of the conditional expression.
+ if (Ranges.size() <= PartialDiagnostic::MaxArguments)
+ for (SmallVectorImpl<SourceRange>::iterator I = Ranges.begin(),
+ E = Ranges.end();
+ I != E; ++I)
+ PDiag << *I;
+ else
+ PDiag << Second->getSourceRange();
+
+ S.Diag(Ranges.begin()->getBegin(), PDiag);
+ }
+
+ // If Statement is an incemement or decrement, return true and sets the
+ // variables Increment and DRE.
+ bool ProcessIterationStmt(Sema &S, Stmt* Statement, bool &Increment,
+ DeclRefExpr *&DRE) {
+ if (UnaryOperator *UO = dyn_cast<UnaryOperator>(Statement)) {
+ switch (UO->getOpcode()) {
+ default: return false;
+ case UO_PostInc:
+ case UO_PreInc:
+ Increment = true;
+ break;
+ case UO_PostDec:
+ case UO_PreDec:
+ Increment = false;
+ break;
+ }
+ DRE = dyn_cast<DeclRefExpr>(UO->getSubExpr());
+ return DRE;
+ }
+
+ if (CXXOperatorCallExpr *Call = dyn_cast<CXXOperatorCallExpr>(Statement)) {
+ FunctionDecl *FD = Call->getDirectCallee();
+ if (!FD || !FD->isOverloadedOperator()) return false;
+ switch (FD->getOverloadedOperator()) {
+ default: return false;
+ case OO_PlusPlus:
+ Increment = true;
+ break;
+ case OO_MinusMinus:
+ Increment = false;
+ break;
+ }
+ DRE = dyn_cast<DeclRefExpr>(Call->getArg(0));
+ return DRE;
+ }
+
+ return false;
+ }
+
+ // A visitor to determine if a continue or break statement is a
+ // subexpression.
+ class BreakContinueFinder : public EvaluatedExprVisitor<BreakContinueFinder> {
+ SourceLocation BreakLoc;
+ SourceLocation ContinueLoc;
+ public:
+ BreakContinueFinder(Sema &S, Stmt* Body) :
+ Inherited(S.Context) {
+ Visit(Body);
+ }
+
+ typedef EvaluatedExprVisitor<BreakContinueFinder> Inherited;
+
+ void VisitContinueStmt(ContinueStmt* E) {
+ ContinueLoc = E->getContinueLoc();
+ }
+
+ void VisitBreakStmt(BreakStmt* E) {
+ BreakLoc = E->getBreakLoc();
+ }
+
+ bool ContinueFound() { return ContinueLoc.isValid(); }
+ bool BreakFound() { return BreakLoc.isValid(); }
+ SourceLocation GetContinueLoc() { return ContinueLoc; }
+ SourceLocation GetBreakLoc() { return BreakLoc; }
+
+ }; // end class BreakContinueFinder
+
+ // Emit a warning when a loop increment/decrement appears twice per loop
+ // iteration. The conditions which trigger this warning are:
+ // 1) The last statement in the loop body and the third expression in the
+ // for loop are both increment or both decrement of the same variable
+ // 2) No continue statements in the loop body.
+ void CheckForRedundantIteration(Sema &S, Expr *Third, Stmt *Body) {
+ // Return when there is nothing to check.
+ if (!Body || !Third) return;
+
+ if (S.Diags.isIgnored(diag::warn_redundant_loop_iteration,
+ Third->getLocStart()))
+ return;
+
+ // Get the last statement from the loop body.
+ CompoundStmt *CS = dyn_cast<CompoundStmt>(Body);
+ if (!CS || CS->body_empty()) return;
+ Stmt *LastStmt = CS->body_back();
+ if (!LastStmt) return;
+
+ bool LoopIncrement, LastIncrement;
+ DeclRefExpr *LoopDRE, *LastDRE;
+
+ if (!ProcessIterationStmt(S, Third, LoopIncrement, LoopDRE)) return;
+ if (!ProcessIterationStmt(S, LastStmt, LastIncrement, LastDRE)) return;
+
+ // Check that the two statements are both increments or both decrements
+ // on the same variable.
+ if (LoopIncrement != LastIncrement ||
+ LoopDRE->getDecl() != LastDRE->getDecl()) return;
+
+ if (BreakContinueFinder(S, Body).ContinueFound()) return;
+
+ S.Diag(LastDRE->getLocation(), diag::warn_redundant_loop_iteration)
+ << LastDRE->getDecl() << LastIncrement;
+ S.Diag(LoopDRE->getLocation(), diag::note_loop_iteration_here)
+ << LoopIncrement;
+ }
+
+} // end namespace
+
+
+void Sema::CheckBreakContinueBinding(Expr *E) {
+ if (!E || getLangOpts().CPlusPlus)
+ return;
+ BreakContinueFinder BCFinder(*this, E);
+ Scope *BreakParent = CurScope->getBreakParent();
+ if (BCFinder.BreakFound() && BreakParent) {
+ if (BreakParent->getFlags() & Scope::SwitchScope) {
+ Diag(BCFinder.GetBreakLoc(), diag::warn_break_binds_to_switch);
+ } else {
+ Diag(BCFinder.GetBreakLoc(), diag::warn_loop_ctrl_binds_to_inner)
+ << "break";
+ }
+ } else if (BCFinder.ContinueFound() && CurScope->getContinueParent()) {
+ Diag(BCFinder.GetContinueLoc(), diag::warn_loop_ctrl_binds_to_inner)
+ << "continue";
+ }
+}
+
+StmtResult
+Sema::ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc,
+ Stmt *First, FullExprArg second, Decl *secondVar,
+ FullExprArg third,
+ SourceLocation RParenLoc, Stmt *Body) {
+ if (!getLangOpts().CPlusPlus) {
+ if (DeclStmt *DS = dyn_cast_or_null<DeclStmt>(First)) {
+ // C99 6.8.5p3: The declaration part of a 'for' statement shall only
+ // declare identifiers for objects having storage class 'auto' or
+ // 'register'.
+ for (auto *DI : DS->decls()) {
+ VarDecl *VD = dyn_cast<VarDecl>(DI);
+ if (VD && VD->isLocalVarDecl() && !VD->hasLocalStorage())
+ VD = nullptr;
+ if (!VD) {
+ Diag(DI->getLocation(), diag::err_non_local_variable_decl_in_for);
+ DI->setInvalidDecl();
+ }
+ }
+ }
+ }
+
+ CheckBreakContinueBinding(second.get());
+ CheckBreakContinueBinding(third.get());
+
+ CheckForLoopConditionalStatement(*this, second.get(), third.get(), Body);
+ CheckForRedundantIteration(*this, third.get(), Body);
+
+ ExprResult SecondResult(second.release());
+ VarDecl *ConditionVar = nullptr;
+ if (secondVar) {
+ ConditionVar = cast<VarDecl>(secondVar);
+ SecondResult = CheckConditionVariable(ConditionVar, ForLoc, true);
+ SecondResult = ActOnFinishFullExpr(SecondResult.get(), ForLoc);
+ if (SecondResult.isInvalid())
+ return StmtError();
+ }
+
+ Expr *Third = third.release().getAs<Expr>();
+
+ DiagnoseUnusedExprResult(First);
+ DiagnoseUnusedExprResult(Third);
+ DiagnoseUnusedExprResult(Body);
+
+ if (isa<NullStmt>(Body))
+ getCurCompoundScope().setHasEmptyLoopBodies();
+
+ return new (Context) ForStmt(Context, First, SecondResult.get(), ConditionVar,
+ Third, Body, ForLoc, LParenLoc, RParenLoc);
+}
+
+/// In an Objective C collection iteration statement:
+/// for (x in y)
+/// x can be an arbitrary l-value expression. Bind it up as a
+/// full-expression.
+StmtResult Sema::ActOnForEachLValueExpr(Expr *E) {
+ // Reduce placeholder expressions here. Note that this rejects the
+ // use of pseudo-object l-values in this position.
+ ExprResult result = CheckPlaceholderExpr(E);
+ if (result.isInvalid()) return StmtError();
+ E = result.get();
+
+ ExprResult FullExpr = ActOnFinishFullExpr(E);
+ if (FullExpr.isInvalid())
+ return StmtError();
+ return StmtResult(static_cast<Stmt*>(FullExpr.get()));
+}
+
+ExprResult
+Sema::CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection) {
+ if (!collection)
+ return ExprError();
+
+ ExprResult result = CorrectDelayedTyposInExpr(collection);
+ if (!result.isUsable())
+ return ExprError();
+ collection = result.get();
+
+ // Bail out early if we've got a type-dependent expression.
+ if (collection->isTypeDependent()) return collection;
+
+ // Perform normal l-value conversion.
+ result = DefaultFunctionArrayLvalueConversion(collection);
+ if (result.isInvalid())
+ return ExprError();
+ collection = result.get();
+
+ // The operand needs to have object-pointer type.
+ // TODO: should we do a contextual conversion?
+ const ObjCObjectPointerType *pointerType =
+ collection->getType()->getAs<ObjCObjectPointerType>();
+ if (!pointerType)
+ return Diag(forLoc, diag::err_collection_expr_type)
+ << collection->getType() << collection->getSourceRange();
+
+ // Check that the operand provides
+ // - countByEnumeratingWithState:objects:count:
+ const ObjCObjectType *objectType = pointerType->getObjectType();
+ ObjCInterfaceDecl *iface = objectType->getInterface();
+
+ // If we have a forward-declared type, we can't do this check.
+ // Under ARC, it is an error not to have a forward-declared class.
+ if (iface &&
+ (getLangOpts().ObjCAutoRefCount
+ ? RequireCompleteType(forLoc, QualType(objectType, 0),
+ diag::err_arc_collection_forward, collection)
+ : !isCompleteType(forLoc, QualType(objectType, 0)))) {
+ // Otherwise, if we have any useful type information, check that
+ // the type declares the appropriate method.
+ } else if (iface || !objectType->qual_empty()) {
+ IdentifierInfo *selectorIdents[] = {
+ &Context.Idents.get("countByEnumeratingWithState"),
+ &Context.Idents.get("objects"),
+ &Context.Idents.get("count")
+ };
+ Selector selector = Context.Selectors.getSelector(3, &selectorIdents[0]);
+
+ ObjCMethodDecl *method = nullptr;
+
+ // If there's an interface, look in both the public and private APIs.
+ if (iface) {
+ method = iface->lookupInstanceMethod(selector);
+ if (!method) method = iface->lookupPrivateMethod(selector);
+ }
+
+ // Also check protocol qualifiers.
+ if (!method)
+ method = LookupMethodInQualifiedType(selector, pointerType,
+ /*instance*/ true);
+
+ // If we didn't find it anywhere, give up.
+ if (!method) {
+ Diag(forLoc, diag::warn_collection_expr_type)
+ << collection->getType() << selector << collection->getSourceRange();
+ }
+
+ // TODO: check for an incompatible signature?
+ }
+
+ // Wrap up any cleanups in the expression.
+ return collection;
+}
+
+StmtResult
+Sema::ActOnObjCForCollectionStmt(SourceLocation ForLoc,
+ Stmt *First, Expr *collection,
+ SourceLocation RParenLoc) {
+
+ ExprResult CollectionExprResult =
+ CheckObjCForCollectionOperand(ForLoc, collection);
+
+ if (First) {
+ QualType FirstType;
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(First)) {
+ if (!DS->isSingleDecl())
+ return StmtError(Diag((*DS->decl_begin())->getLocation(),
+ diag::err_toomany_element_decls));
+
+ VarDecl *D = dyn_cast<VarDecl>(DS->getSingleDecl());
+ if (!D || D->isInvalidDecl())
+ return StmtError();
+
+ FirstType = D->getType();
+ // C99 6.8.5p3: The declaration part of a 'for' statement shall only
+ // declare identifiers for objects having storage class 'auto' or
+ // 'register'.
+ if (!D->hasLocalStorage())
+ return StmtError(Diag(D->getLocation(),
+ diag::err_non_local_variable_decl_in_for));
+
+ // If the type contained 'auto', deduce the 'auto' to 'id'.
+ if (FirstType->getContainedAutoType()) {
+ OpaqueValueExpr OpaqueId(D->getLocation(), Context.getObjCIdType(),
+ VK_RValue);
+ Expr *DeducedInit = &OpaqueId;
+ if (DeduceAutoType(D->getTypeSourceInfo(), DeducedInit, FirstType) ==
+ DAR_Failed)
+ DiagnoseAutoDeductionFailure(D, DeducedInit);
+ if (FirstType.isNull()) {
+ D->setInvalidDecl();
+ return StmtError();
+ }
+
+ D->setType(FirstType);
+
+ if (ActiveTemplateInstantiations.empty()) {
+ SourceLocation Loc =
+ D->getTypeSourceInfo()->getTypeLoc().getBeginLoc();
+ Diag(Loc, diag::warn_auto_var_is_id)
+ << D->getDeclName();
+ }
+ }
+
+ } else {
+ Expr *FirstE = cast<Expr>(First);
+ if (!FirstE->isTypeDependent() && !FirstE->isLValue())
+ return StmtError(Diag(First->getLocStart(),
+ diag::err_selector_element_not_lvalue)
+ << First->getSourceRange());
+
+ FirstType = static_cast<Expr*>(First)->getType();
+ if (FirstType.isConstQualified())
+ Diag(ForLoc, diag::err_selector_element_const_type)
+ << FirstType << First->getSourceRange();
+ }
+ if (!FirstType->isDependentType() &&
+ !FirstType->isObjCObjectPointerType() &&
+ !FirstType->isBlockPointerType())
+ return StmtError(Diag(ForLoc, diag::err_selector_element_type)
+ << FirstType << First->getSourceRange());
+ }
+
+ if (CollectionExprResult.isInvalid())
+ return StmtError();
+
+ CollectionExprResult = ActOnFinishFullExpr(CollectionExprResult.get());
+ if (CollectionExprResult.isInvalid())
+ return StmtError();
+
+ return new (Context) ObjCForCollectionStmt(First, CollectionExprResult.get(),
+ nullptr, ForLoc, RParenLoc);
+}
+
+/// Finish building a variable declaration for a for-range statement.
+/// \return true if an error occurs.
+static bool FinishForRangeVarDecl(Sema &SemaRef, VarDecl *Decl, Expr *Init,
+ SourceLocation Loc, int DiagID) {
+ if (Decl->getType()->isUndeducedType()) {
+ ExprResult Res = SemaRef.CorrectDelayedTyposInExpr(Init);
+ if (!Res.isUsable()) {
+ Decl->setInvalidDecl();
+ return true;
+ }
+ Init = Res.get();
+ }
+
+ // Deduce the type for the iterator variable now rather than leaving it to
+ // AddInitializerToDecl, so we can produce a more suitable diagnostic.
+ QualType InitType;
+ if ((!isa<InitListExpr>(Init) && Init->getType()->isVoidType()) ||
+ SemaRef.DeduceAutoType(Decl->getTypeSourceInfo(), Init, InitType) ==
+ Sema::DAR_Failed)
+ SemaRef.Diag(Loc, DiagID) << Init->getType();
+ if (InitType.isNull()) {
+ Decl->setInvalidDecl();
+ return true;
+ }
+ Decl->setType(InitType);
+
+ // In ARC, infer lifetime.
+ // FIXME: ARC may want to turn this into 'const __unsafe_unretained' if
+ // we're doing the equivalent of fast iteration.
+ if (SemaRef.getLangOpts().ObjCAutoRefCount &&
+ SemaRef.inferObjCARCLifetime(Decl))
+ Decl->setInvalidDecl();
+
+ SemaRef.AddInitializerToDecl(Decl, Init, /*DirectInit=*/false,
+ /*TypeMayContainAuto=*/false);
+ SemaRef.FinalizeDeclaration(Decl);
+ SemaRef.CurContext->addHiddenDecl(Decl);
+ return false;
+}
+
+namespace {
+// An enum to represent whether something is dealing with a call to begin()
+// or a call to end() in a range-based for loop.
+enum BeginEndFunction {
+ BEF_begin,
+ BEF_end
+};
+
+/// Produce a note indicating which begin/end function was implicitly called
+/// by a C++11 for-range statement. This is often not obvious from the code,
+/// nor from the diagnostics produced when analysing the implicit expressions
+/// required in a for-range statement.
+void NoteForRangeBeginEndFunction(Sema &SemaRef, Expr *E,
+ BeginEndFunction BEF) {
+ CallExpr *CE = dyn_cast<CallExpr>(E);
+ if (!CE)
+ return;
+ FunctionDecl *D = dyn_cast<FunctionDecl>(CE->getCalleeDecl());
+ if (!D)
+ return;
+ SourceLocation Loc = D->getLocation();
+
+ std::string Description;
+ bool IsTemplate = false;
+ if (FunctionTemplateDecl *FunTmpl = D->getPrimaryTemplate()) {
+ Description = SemaRef.getTemplateArgumentBindingsText(
+ FunTmpl->getTemplateParameters(), *D->getTemplateSpecializationArgs());
+ IsTemplate = true;
+ }
+
+ SemaRef.Diag(Loc, diag::note_for_range_begin_end)
+ << BEF << IsTemplate << Description << E->getType();
+}
+
+/// Build a variable declaration for a for-range statement.
+VarDecl *BuildForRangeVarDecl(Sema &SemaRef, SourceLocation Loc,
+ QualType Type, const char *Name) {
+ DeclContext *DC = SemaRef.CurContext;
+ IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name);
+ TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc);
+ VarDecl *Decl = VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type,
+ TInfo, SC_None);
+ Decl->setImplicit();
+ return Decl;
+}
+
+}
+
+static bool ObjCEnumerationCollection(Expr *Collection) {
+ return !Collection->isTypeDependent()
+ && Collection->getType()->getAs<ObjCObjectPointerType>() != nullptr;
+}
+
+/// ActOnCXXForRangeStmt - Check and build a C++11 for-range statement.
+///
+/// C++11 [stmt.ranged]:
+/// A range-based for statement is equivalent to
+///
+/// {
+/// auto && __range = range-init;
+/// for ( auto __begin = begin-expr,
+/// __end = end-expr;
+/// __begin != __end;
+/// ++__begin ) {
+/// for-range-declaration = *__begin;
+/// statement
+/// }
+/// }
+///
+/// The body of the loop is not available yet, since it cannot be analysed until
+/// we have determined the type of the for-range-declaration.
+StmtResult Sema::ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
+ SourceLocation CoawaitLoc, Stmt *First,
+ SourceLocation ColonLoc, Expr *Range,
+ SourceLocation RParenLoc,
+ BuildForRangeKind Kind) {
+ if (!First)
+ return StmtError();
+
+ if (Range && ObjCEnumerationCollection(Range))
+ return ActOnObjCForCollectionStmt(ForLoc, First, Range, RParenLoc);
+
+ DeclStmt *DS = dyn_cast<DeclStmt>(First);
+ assert(DS && "first part of for range not a decl stmt");
+
+ if (!DS->isSingleDecl()) {
+ Diag(DS->getStartLoc(), diag::err_type_defined_in_for_range);
+ return StmtError();
+ }
+
+ Decl *LoopVar = DS->getSingleDecl();
+ if (LoopVar->isInvalidDecl() || !Range ||
+ DiagnoseUnexpandedParameterPack(Range, UPPC_Expression)) {
+ LoopVar->setInvalidDecl();
+ return StmtError();
+ }
+
+ // Coroutines: 'for co_await' implicitly co_awaits its range.
+ if (CoawaitLoc.isValid()) {
+ ExprResult Coawait = ActOnCoawaitExpr(S, CoawaitLoc, Range);
+ if (Coawait.isInvalid()) return StmtError();
+ Range = Coawait.get();
+ }
+
+ // Build auto && __range = range-init
+ SourceLocation RangeLoc = Range->getLocStart();
+ VarDecl *RangeVar = BuildForRangeVarDecl(*this, RangeLoc,
+ Context.getAutoRRefDeductType(),
+ "__range");
+ if (FinishForRangeVarDecl(*this, RangeVar, Range, RangeLoc,
+ diag::err_for_range_deduction_failure)) {
+ LoopVar->setInvalidDecl();
+ return StmtError();
+ }
+
+ // Claim the type doesn't contain auto: we've already done the checking.
+ DeclGroupPtrTy RangeGroup =
+ BuildDeclaratorGroup(MutableArrayRef<Decl *>((Decl **)&RangeVar, 1),
+ /*TypeMayContainAuto=*/ false);
+ StmtResult RangeDecl = ActOnDeclStmt(RangeGroup, RangeLoc, RangeLoc);
+ if (RangeDecl.isInvalid()) {
+ LoopVar->setInvalidDecl();
+ return StmtError();
+ }
+
+ return BuildCXXForRangeStmt(ForLoc, CoawaitLoc, ColonLoc, RangeDecl.get(),
+ /*BeginEndDecl=*/nullptr, /*Cond=*/nullptr,
+ /*Inc=*/nullptr, DS, RParenLoc, Kind);
+}
+
+/// \brief Create the initialization, compare, and increment steps for
+/// the range-based for loop expression.
+/// This function does not handle array-based for loops,
+/// which are created in Sema::BuildCXXForRangeStmt.
+///
+/// \returns a ForRangeStatus indicating success or what kind of error occurred.
+/// BeginExpr and EndExpr are set and FRS_Success is returned on success;
+/// CandidateSet and BEF are set and some non-success value is returned on
+/// failure.
+static Sema::ForRangeStatus BuildNonArrayForRange(Sema &SemaRef,
+ Expr *BeginRange, Expr *EndRange,
+ QualType RangeType,
+ VarDecl *BeginVar,
+ VarDecl *EndVar,
+ SourceLocation ColonLoc,
+ OverloadCandidateSet *CandidateSet,
+ ExprResult *BeginExpr,
+ ExprResult *EndExpr,
+ BeginEndFunction *BEF) {
+ DeclarationNameInfo BeginNameInfo(
+ &SemaRef.PP.getIdentifierTable().get("begin"), ColonLoc);
+ DeclarationNameInfo EndNameInfo(&SemaRef.PP.getIdentifierTable().get("end"),
+ ColonLoc);
+
+ LookupResult BeginMemberLookup(SemaRef, BeginNameInfo,
+ Sema::LookupMemberName);
+ LookupResult EndMemberLookup(SemaRef, EndNameInfo, Sema::LookupMemberName);
+
+ if (CXXRecordDecl *D = RangeType->getAsCXXRecordDecl()) {
+ // - if _RangeT is a class type, the unqualified-ids begin and end are
+ // looked up in the scope of class _RangeT as if by class member access
+ // lookup (3.4.5), and if either (or both) finds at least one
+ // declaration, begin-expr and end-expr are __range.begin() and
+ // __range.end(), respectively;
+ SemaRef.LookupQualifiedName(BeginMemberLookup, D);
+ SemaRef.LookupQualifiedName(EndMemberLookup, D);
+
+ if (BeginMemberLookup.empty() != EndMemberLookup.empty()) {
+ SourceLocation RangeLoc = BeginVar->getLocation();
+ *BEF = BeginMemberLookup.empty() ? BEF_end : BEF_begin;
+
+ SemaRef.Diag(RangeLoc, diag::err_for_range_member_begin_end_mismatch)
+ << RangeLoc << BeginRange->getType() << *BEF;
+ return Sema::FRS_DiagnosticIssued;
+ }
+ } else {
+ // - otherwise, begin-expr and end-expr are begin(__range) and
+ // end(__range), respectively, where begin and end are looked up with
+ // argument-dependent lookup (3.4.2). For the purposes of this name
+ // lookup, namespace std is an associated namespace.
+
+ }
+
+ *BEF = BEF_begin;
+ Sema::ForRangeStatus RangeStatus =
+ SemaRef.BuildForRangeBeginEndCall(ColonLoc, ColonLoc, BeginNameInfo,
+ BeginMemberLookup, CandidateSet,
+ BeginRange, BeginExpr);
+
+ if (RangeStatus != Sema::FRS_Success) {
+ if (RangeStatus == Sema::FRS_DiagnosticIssued)
+ SemaRef.Diag(BeginRange->getLocStart(), diag::note_in_for_range)
+ << ColonLoc << BEF_begin << BeginRange->getType();
+ return RangeStatus;
+ }
+ if (FinishForRangeVarDecl(SemaRef, BeginVar, BeginExpr->get(), ColonLoc,
+ diag::err_for_range_iter_deduction_failure)) {
+ NoteForRangeBeginEndFunction(SemaRef, BeginExpr->get(), *BEF);
+ return Sema::FRS_DiagnosticIssued;
+ }
+
+ *BEF = BEF_end;
+ RangeStatus =
+ SemaRef.BuildForRangeBeginEndCall(ColonLoc, ColonLoc, EndNameInfo,
+ EndMemberLookup, CandidateSet,
+ EndRange, EndExpr);
+ if (RangeStatus != Sema::FRS_Success) {
+ if (RangeStatus == Sema::FRS_DiagnosticIssued)
+ SemaRef.Diag(EndRange->getLocStart(), diag::note_in_for_range)
+ << ColonLoc << BEF_end << EndRange->getType();
+ return RangeStatus;
+ }
+ if (FinishForRangeVarDecl(SemaRef, EndVar, EndExpr->get(), ColonLoc,
+ diag::err_for_range_iter_deduction_failure)) {
+ NoteForRangeBeginEndFunction(SemaRef, EndExpr->get(), *BEF);
+ return Sema::FRS_DiagnosticIssued;
+ }
+ return Sema::FRS_Success;
+}
+
+/// Speculatively attempt to dereference an invalid range expression.
+/// If the attempt fails, this function will return a valid, null StmtResult
+/// and emit no diagnostics.
+static StmtResult RebuildForRangeWithDereference(Sema &SemaRef, Scope *S,
+ SourceLocation ForLoc,
+ SourceLocation CoawaitLoc,
+ Stmt *LoopVarDecl,
+ SourceLocation ColonLoc,
+ Expr *Range,
+ SourceLocation RangeLoc,
+ SourceLocation RParenLoc) {
+ // Determine whether we can rebuild the for-range statement with a
+ // dereferenced range expression.
+ ExprResult AdjustedRange;
+ {
+ Sema::SFINAETrap Trap(SemaRef);
+
+ AdjustedRange = SemaRef.BuildUnaryOp(S, RangeLoc, UO_Deref, Range);
+ if (AdjustedRange.isInvalid())
+ return StmtResult();
+
+ StmtResult SR = SemaRef.ActOnCXXForRangeStmt(
+ S, ForLoc, CoawaitLoc, LoopVarDecl, ColonLoc, AdjustedRange.get(),
+ RParenLoc, Sema::BFRK_Check);
+ if (SR.isInvalid())
+ return StmtResult();
+ }
+
+ // The attempt to dereference worked well enough that it could produce a valid
+ // loop. Produce a fixit, and rebuild the loop with diagnostics enabled, in
+ // case there are any other (non-fatal) problems with it.
+ SemaRef.Diag(RangeLoc, diag::err_for_range_dereference)
+ << Range->getType() << FixItHint::CreateInsertion(RangeLoc, "*");
+ return SemaRef.ActOnCXXForRangeStmt(S, ForLoc, CoawaitLoc, LoopVarDecl,
+ ColonLoc, AdjustedRange.get(), RParenLoc,
+ Sema::BFRK_Rebuild);
+}
+
+namespace {
+/// RAII object to automatically invalidate a declaration if an error occurs.
+struct InvalidateOnErrorScope {
+ InvalidateOnErrorScope(Sema &SemaRef, Decl *D, bool Enabled)
+ : Trap(SemaRef.Diags), D(D), Enabled(Enabled) {}
+ ~InvalidateOnErrorScope() {
+ if (Enabled && Trap.hasErrorOccurred())
+ D->setInvalidDecl();
+ }
+
+ DiagnosticErrorTrap Trap;
+ Decl *D;
+ bool Enabled;
+};
+}
+
+/// BuildCXXForRangeStmt - Build or instantiate a C++11 for-range statement.
+StmtResult
+Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc,
+ SourceLocation ColonLoc,
+ Stmt *RangeDecl, Stmt *BeginEnd, Expr *Cond,
+ Expr *Inc, Stmt *LoopVarDecl,
+ SourceLocation RParenLoc, BuildForRangeKind Kind) {
+ // FIXME: This should not be used during template instantiation. We should
+ // pick up the set of unqualified lookup results for the != and + operators
+ // in the initial parse.
+ //
+ // Testcase (accepts-invalid):
+ // template<typename T> void f() { for (auto x : T()) {} }
+ // namespace N { struct X { X begin(); X end(); int operator*(); }; }
+ // bool operator!=(N::X, N::X); void operator++(N::X);
+ // void g() { f<N::X>(); }
+ Scope *S = getCurScope();
+
+ DeclStmt *RangeDS = cast<DeclStmt>(RangeDecl);
+ VarDecl *RangeVar = cast<VarDecl>(RangeDS->getSingleDecl());
+ QualType RangeVarType = RangeVar->getType();
+
+ DeclStmt *LoopVarDS = cast<DeclStmt>(LoopVarDecl);
+ VarDecl *LoopVar = cast<VarDecl>(LoopVarDS->getSingleDecl());
+
+ // If we hit any errors, mark the loop variable as invalid if its type
+ // contains 'auto'.
+ InvalidateOnErrorScope Invalidate(*this, LoopVar,
+ LoopVar->getType()->isUndeducedType());
+
+ StmtResult BeginEndDecl = BeginEnd;
+ ExprResult NotEqExpr = Cond, IncrExpr = Inc;
+
+ if (RangeVarType->isDependentType()) {
+ // The range is implicitly used as a placeholder when it is dependent.
+ RangeVar->markUsed(Context);
+
+ // Deduce any 'auto's in the loop variable as 'DependentTy'. We'll fill
+ // them in properly when we instantiate the loop.
+ if (!LoopVar->isInvalidDecl() && Kind != BFRK_Check)
+ LoopVar->setType(SubstAutoType(LoopVar->getType(), Context.DependentTy));
+ } else if (!BeginEndDecl.get()) {
+ SourceLocation RangeLoc = RangeVar->getLocation();
+
+ const QualType RangeVarNonRefType = RangeVarType.getNonReferenceType();
+
+ ExprResult BeginRangeRef = BuildDeclRefExpr(RangeVar, RangeVarNonRefType,
+ VK_LValue, ColonLoc);
+ if (BeginRangeRef.isInvalid())
+ return StmtError();
+
+ ExprResult EndRangeRef = BuildDeclRefExpr(RangeVar, RangeVarNonRefType,
+ VK_LValue, ColonLoc);
+ if (EndRangeRef.isInvalid())
+ return StmtError();
+
+ QualType AutoType = Context.getAutoDeductType();
+ Expr *Range = RangeVar->getInit();
+ if (!Range)
+ return StmtError();
+ QualType RangeType = Range->getType();
+
+ if (RequireCompleteType(RangeLoc, RangeType,
+ diag::err_for_range_incomplete_type))
+ return StmtError();
+
+ // Build auto __begin = begin-expr, __end = end-expr.
+ VarDecl *BeginVar = BuildForRangeVarDecl(*this, ColonLoc, AutoType,
+ "__begin");
+ VarDecl *EndVar = BuildForRangeVarDecl(*this, ColonLoc, AutoType,
+ "__end");
+
+ // Build begin-expr and end-expr and attach to __begin and __end variables.
+ ExprResult BeginExpr, EndExpr;
+ if (const ArrayType *UnqAT = RangeType->getAsArrayTypeUnsafe()) {
+ // - if _RangeT is an array type, begin-expr and end-expr are __range and
+ // __range + __bound, respectively, where __bound is the array bound. If
+ // _RangeT is an array of unknown size or an array of incomplete type,
+ // the program is ill-formed;
+
+ // begin-expr is __range.
+ BeginExpr = BeginRangeRef;
+ if (FinishForRangeVarDecl(*this, BeginVar, BeginRangeRef.get(), ColonLoc,
+ diag::err_for_range_iter_deduction_failure)) {
+ NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin);
+ return StmtError();
+ }
+
+ // Find the array bound.
+ ExprResult BoundExpr;
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(UnqAT))
+ BoundExpr = IntegerLiteral::Create(
+ Context, CAT->getSize(), Context.getPointerDiffType(), RangeLoc);
+ else if (const VariableArrayType *VAT =
+ dyn_cast<VariableArrayType>(UnqAT))
+ BoundExpr = VAT->getSizeExpr();
+ else {
+ // Can't be a DependentSizedArrayType or an IncompleteArrayType since
+ // UnqAT is not incomplete and Range is not type-dependent.
+ llvm_unreachable("Unexpected array type in for-range");
+ }
+
+ // end-expr is __range + __bound.
+ EndExpr = ActOnBinOp(S, ColonLoc, tok::plus, EndRangeRef.get(),
+ BoundExpr.get());
+ if (EndExpr.isInvalid())
+ return StmtError();
+ if (FinishForRangeVarDecl(*this, EndVar, EndExpr.get(), ColonLoc,
+ diag::err_for_range_iter_deduction_failure)) {
+ NoteForRangeBeginEndFunction(*this, EndExpr.get(), BEF_end);
+ return StmtError();
+ }
+ } else {
+ OverloadCandidateSet CandidateSet(RangeLoc,
+ OverloadCandidateSet::CSK_Normal);
+ BeginEndFunction BEFFailure;
+ ForRangeStatus RangeStatus =
+ BuildNonArrayForRange(*this, BeginRangeRef.get(),
+ EndRangeRef.get(), RangeType,
+ BeginVar, EndVar, ColonLoc, &CandidateSet,
+ &BeginExpr, &EndExpr, &BEFFailure);
+
+ if (Kind == BFRK_Build && RangeStatus == FRS_NoViableFunction &&
+ BEFFailure == BEF_begin) {
+ // If the range is being built from an array parameter, emit a
+ // a diagnostic that it is being treated as a pointer.
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Range)) {
+ if (ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
+ QualType ArrayTy = PVD->getOriginalType();
+ QualType PointerTy = PVD->getType();
+ if (PointerTy->isPointerType() && ArrayTy->isArrayType()) {
+ Diag(Range->getLocStart(), diag::err_range_on_array_parameter)
+ << RangeLoc << PVD << ArrayTy << PointerTy;
+ Diag(PVD->getLocation(), diag::note_declared_at);
+ return StmtError();
+ }
+ }
+ }
+
+ // If building the range failed, try dereferencing the range expression
+ // unless a diagnostic was issued or the end function is problematic.
+ StmtResult SR = RebuildForRangeWithDereference(*this, S, ForLoc,
+ CoawaitLoc,
+ LoopVarDecl, ColonLoc,
+ Range, RangeLoc,
+ RParenLoc);
+ if (SR.isInvalid() || SR.isUsable())
+ return SR;
+ }
+
+ // Otherwise, emit diagnostics if we haven't already.
+ if (RangeStatus == FRS_NoViableFunction) {
+ Expr *Range = BEFFailure ? EndRangeRef.get() : BeginRangeRef.get();
+ Diag(Range->getLocStart(), diag::err_for_range_invalid)
+ << RangeLoc << Range->getType() << BEFFailure;
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Range);
+ }
+ // Return an error if no fix was discovered.
+ if (RangeStatus != FRS_Success)
+ return StmtError();
+ }
+
+ assert(!BeginExpr.isInvalid() && !EndExpr.isInvalid() &&
+ "invalid range expression in for loop");
+
+ // C++11 [dcl.spec.auto]p7: BeginType and EndType must be the same.
+ QualType BeginType = BeginVar->getType(), EndType = EndVar->getType();
+ if (!Context.hasSameType(BeginType, EndType)) {
+ Diag(RangeLoc, diag::err_for_range_begin_end_types_differ)
+ << BeginType << EndType;
+ NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin);
+ NoteForRangeBeginEndFunction(*this, EndExpr.get(), BEF_end);
+ }
+
+ Decl *BeginEndDecls[] = { BeginVar, EndVar };
+ // Claim the type doesn't contain auto: we've already done the checking.
+ DeclGroupPtrTy BeginEndGroup =
+ BuildDeclaratorGroup(MutableArrayRef<Decl *>(BeginEndDecls, 2),
+ /*TypeMayContainAuto=*/ false);
+ BeginEndDecl = ActOnDeclStmt(BeginEndGroup, ColonLoc, ColonLoc);
+
+ const QualType BeginRefNonRefType = BeginType.getNonReferenceType();
+ ExprResult BeginRef = BuildDeclRefExpr(BeginVar, BeginRefNonRefType,
+ VK_LValue, ColonLoc);
+ if (BeginRef.isInvalid())
+ return StmtError();
+
+ ExprResult EndRef = BuildDeclRefExpr(EndVar, EndType.getNonReferenceType(),
+ VK_LValue, ColonLoc);
+ if (EndRef.isInvalid())
+ return StmtError();
+
+ // Build and check __begin != __end expression.
+ NotEqExpr = ActOnBinOp(S, ColonLoc, tok::exclaimequal,
+ BeginRef.get(), EndRef.get());
+ NotEqExpr = ActOnBooleanCondition(S, ColonLoc, NotEqExpr.get());
+ NotEqExpr = ActOnFinishFullExpr(NotEqExpr.get());
+ if (NotEqExpr.isInvalid()) {
+ Diag(RangeLoc, diag::note_for_range_invalid_iterator)
+ << RangeLoc << 0 << BeginRangeRef.get()->getType();
+ NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin);
+ if (!Context.hasSameType(BeginType, EndType))
+ NoteForRangeBeginEndFunction(*this, EndExpr.get(), BEF_end);
+ return StmtError();
+ }
+
+ // Build and check ++__begin expression.
+ BeginRef = BuildDeclRefExpr(BeginVar, BeginRefNonRefType,
+ VK_LValue, ColonLoc);
+ if (BeginRef.isInvalid())
+ return StmtError();
+
+ IncrExpr = ActOnUnaryOp(S, ColonLoc, tok::plusplus, BeginRef.get());
+ if (!IncrExpr.isInvalid() && CoawaitLoc.isValid())
+ IncrExpr = ActOnCoawaitExpr(S, CoawaitLoc, IncrExpr.get());
+ if (!IncrExpr.isInvalid())
+ IncrExpr = ActOnFinishFullExpr(IncrExpr.get());
+ if (IncrExpr.isInvalid()) {
+ Diag(RangeLoc, diag::note_for_range_invalid_iterator)
+ << RangeLoc << 2 << BeginRangeRef.get()->getType() ;
+ NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin);
+ return StmtError();
+ }
+
+ // Build and check *__begin expression.
+ BeginRef = BuildDeclRefExpr(BeginVar, BeginRefNonRefType,
+ VK_LValue, ColonLoc);
+ if (BeginRef.isInvalid())
+ return StmtError();
+
+ ExprResult DerefExpr = ActOnUnaryOp(S, ColonLoc, tok::star, BeginRef.get());
+ if (DerefExpr.isInvalid()) {
+ Diag(RangeLoc, diag::note_for_range_invalid_iterator)
+ << RangeLoc << 1 << BeginRangeRef.get()->getType();
+ NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin);
+ return StmtError();
+ }
+
+ // Attach *__begin as initializer for VD. Don't touch it if we're just
+ // trying to determine whether this would be a valid range.
+ if (!LoopVar->isInvalidDecl() && Kind != BFRK_Check) {
+ AddInitializerToDecl(LoopVar, DerefExpr.get(), /*DirectInit=*/false,
+ /*TypeMayContainAuto=*/true);
+ if (LoopVar->isInvalidDecl())
+ NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin);
+ }
+ }
+
+ // Don't bother to actually allocate the result if we're just trying to
+ // determine whether it would be valid.
+ if (Kind == BFRK_Check)
+ return StmtResult();
+
+ return new (Context) CXXForRangeStmt(
+ RangeDS, cast_or_null<DeclStmt>(BeginEndDecl.get()), NotEqExpr.get(),
+ IncrExpr.get(), LoopVarDS, /*Body=*/nullptr, ForLoc, CoawaitLoc,
+ ColonLoc, RParenLoc);
+}
+
+/// FinishObjCForCollectionStmt - Attach the body to a objective-C foreach
+/// statement.
+StmtResult Sema::FinishObjCForCollectionStmt(Stmt *S, Stmt *B) {
+ if (!S || !B)
+ return StmtError();
+ ObjCForCollectionStmt * ForStmt = cast<ObjCForCollectionStmt>(S);
+
+ ForStmt->setBody(B);
+ return S;
+}
+
+// Warn when the loop variable is a const reference that creates a copy.
+// Suggest using the non-reference type for copies. If a copy can be prevented
+// suggest the const reference type that would do so.
+// For instance, given "for (const &Foo : Range)", suggest
+// "for (const Foo : Range)" to denote a copy is made for the loop. If
+// possible, also suggest "for (const &Bar : Range)" if this type prevents
+// the copy altogether.
+static void DiagnoseForRangeReferenceVariableCopies(Sema &SemaRef,
+ const VarDecl *VD,
+ QualType RangeInitType) {
+ const Expr *InitExpr = VD->getInit();
+ if (!InitExpr)
+ return;
+
+ QualType VariableType = VD->getType();
+
+ const MaterializeTemporaryExpr *MTE =
+ dyn_cast<MaterializeTemporaryExpr>(InitExpr);
+
+ // No copy made.
+ if (!MTE)
+ return;
+
+ const Expr *E = MTE->GetTemporaryExpr()->IgnoreImpCasts();
+
+ // Searching for either UnaryOperator for dereference of a pointer or
+ // CXXOperatorCallExpr for handling iterators.
+ while (!isa<CXXOperatorCallExpr>(E) && !isa<UnaryOperator>(E)) {
+ if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(E)) {
+ E = CCE->getArg(0);
+ } else if (const CXXMemberCallExpr *Call = dyn_cast<CXXMemberCallExpr>(E)) {
+ const MemberExpr *ME = cast<MemberExpr>(Call->getCallee());
+ E = ME->getBase();
+ } else {
+ const MaterializeTemporaryExpr *MTE = cast<MaterializeTemporaryExpr>(E);
+ E = MTE->GetTemporaryExpr();
+ }
+ E = E->IgnoreImpCasts();
+ }
+
+ bool ReturnsReference = false;
+ if (isa<UnaryOperator>(E)) {
+ ReturnsReference = true;
+ } else {
+ const CXXOperatorCallExpr *Call = cast<CXXOperatorCallExpr>(E);
+ const FunctionDecl *FD = Call->getDirectCallee();
+ QualType ReturnType = FD->getReturnType();
+ ReturnsReference = ReturnType->isReferenceType();
+ }
+
+ if (ReturnsReference) {
+ // Loop variable creates a temporary. Suggest either to go with
+ // non-reference loop variable to indiciate a copy is made, or
+ // the correct time to bind a const reference.
+ SemaRef.Diag(VD->getLocation(), diag::warn_for_range_const_reference_copy)
+ << VD << VariableType << E->getType();
+ QualType NonReferenceType = VariableType.getNonReferenceType();
+ NonReferenceType.removeLocalConst();
+ QualType NewReferenceType =
+ SemaRef.Context.getLValueReferenceType(E->getType().withConst());
+ SemaRef.Diag(VD->getLocStart(), diag::note_use_type_or_non_reference)
+ << NonReferenceType << NewReferenceType << VD->getSourceRange();
+ } else {
+ // The range always returns a copy, so a temporary is always created.
+ // Suggest removing the reference from the loop variable.
+ SemaRef.Diag(VD->getLocation(), diag::warn_for_range_variable_always_copy)
+ << VD << RangeInitType;
+ QualType NonReferenceType = VariableType.getNonReferenceType();
+ NonReferenceType.removeLocalConst();
+ SemaRef.Diag(VD->getLocStart(), diag::note_use_non_reference_type)
+ << NonReferenceType << VD->getSourceRange();
+ }
+}
+
+// Warns when the loop variable can be changed to a reference type to
+// prevent a copy. For instance, if given "for (const Foo x : Range)" suggest
+// "for (const Foo &x : Range)" if this form does not make a copy.
+static void DiagnoseForRangeConstVariableCopies(Sema &SemaRef,
+ const VarDecl *VD) {
+ const Expr *InitExpr = VD->getInit();
+ if (!InitExpr)
+ return;
+
+ QualType VariableType = VD->getType();
+
+ if (const CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(InitExpr)) {
+ if (!CE->getConstructor()->isCopyConstructor())
+ return;
+ } else if (const CastExpr *CE = dyn_cast<CastExpr>(InitExpr)) {
+ if (CE->getCastKind() != CK_LValueToRValue)
+ return;
+ } else {
+ return;
+ }
+
+ // TODO: Determine a maximum size that a POD type can be before a diagnostic
+ // should be emitted. Also, only ignore POD types with trivial copy
+ // constructors.
+ if (VariableType.isPODType(SemaRef.Context))
+ return;
+
+ // Suggest changing from a const variable to a const reference variable
+ // if doing so will prevent a copy.
+ SemaRef.Diag(VD->getLocation(), diag::warn_for_range_copy)
+ << VD << VariableType << InitExpr->getType();
+ SemaRef.Diag(VD->getLocStart(), diag::note_use_reference_type)
+ << SemaRef.Context.getLValueReferenceType(VariableType)
+ << VD->getSourceRange();
+}
+
+/// DiagnoseForRangeVariableCopies - Diagnose three cases and fixes for them.
+/// 1) for (const foo &x : foos) where foos only returns a copy. Suggest
+/// using "const foo x" to show that a copy is made
+/// 2) for (const bar &x : foos) where bar is a temporary intialized by bar.
+/// Suggest either "const bar x" to keep the copying or "const foo& x" to
+/// prevent the copy.
+/// 3) for (const foo x : foos) where x is constructed from a reference foo.
+/// Suggest "const foo &x" to prevent the copy.
+static void DiagnoseForRangeVariableCopies(Sema &SemaRef,
+ const CXXForRangeStmt *ForStmt) {
+ if (SemaRef.Diags.isIgnored(diag::warn_for_range_const_reference_copy,
+ ForStmt->getLocStart()) &&
+ SemaRef.Diags.isIgnored(diag::warn_for_range_variable_always_copy,
+ ForStmt->getLocStart()) &&
+ SemaRef.Diags.isIgnored(diag::warn_for_range_copy,
+ ForStmt->getLocStart())) {
+ return;
+ }
+
+ const VarDecl *VD = ForStmt->getLoopVariable();
+ if (!VD)
+ return;
+
+ QualType VariableType = VD->getType();
+
+ if (VariableType->isIncompleteType())
+ return;
+
+ const Expr *InitExpr = VD->getInit();
+ if (!InitExpr)
+ return;
+
+ if (VariableType->isReferenceType()) {
+ DiagnoseForRangeReferenceVariableCopies(SemaRef, VD,
+ ForStmt->getRangeInit()->getType());
+ } else if (VariableType.isConstQualified()) {
+ DiagnoseForRangeConstVariableCopies(SemaRef, VD);
+ }
+}
+
+/// FinishCXXForRangeStmt - Attach the body to a C++0x for-range statement.
+/// This is a separate step from ActOnCXXForRangeStmt because analysis of the
+/// body cannot be performed until after the type of the range variable is
+/// determined.
+StmtResult Sema::FinishCXXForRangeStmt(Stmt *S, Stmt *B) {
+ if (!S || !B)
+ return StmtError();
+
+ if (isa<ObjCForCollectionStmt>(S))
+ return FinishObjCForCollectionStmt(S, B);
+
+ CXXForRangeStmt *ForStmt = cast<CXXForRangeStmt>(S);
+ ForStmt->setBody(B);
+
+ DiagnoseEmptyStmtBody(ForStmt->getRParenLoc(), B,
+ diag::warn_empty_range_based_for_body);
+
+ DiagnoseForRangeVariableCopies(*this, ForStmt);
+
+ return S;
+}
+
+StmtResult Sema::ActOnGotoStmt(SourceLocation GotoLoc,
+ SourceLocation LabelLoc,
+ LabelDecl *TheDecl) {
+ getCurFunction()->setHasBranchIntoScope();
+ TheDecl->markUsed(Context);
+ return new (Context) GotoStmt(TheDecl, GotoLoc, LabelLoc);
+}
+
+StmtResult
+Sema::ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc,
+ Expr *E) {
+ // Convert operand to void*
+ if (!E->isTypeDependent()) {
+ QualType ETy = E->getType();
+ QualType DestTy = Context.getPointerType(Context.VoidTy.withConst());
+ ExprResult ExprRes = E;
+ AssignConvertType ConvTy =
+ CheckSingleAssignmentConstraints(DestTy, ExprRes);
+ if (ExprRes.isInvalid())
+ return StmtError();
+ E = ExprRes.get();
+ if (DiagnoseAssignmentResult(ConvTy, StarLoc, DestTy, ETy, E, AA_Passing))
+ return StmtError();
+ }
+
+ ExprResult ExprRes = ActOnFinishFullExpr(E);
+ if (ExprRes.isInvalid())
+ return StmtError();
+ E = ExprRes.get();
+
+ getCurFunction()->setHasIndirectGoto();
+
+ return new (Context) IndirectGotoStmt(GotoLoc, StarLoc, E);
+}
+
+static void CheckJumpOutOfSEHFinally(Sema &S, SourceLocation Loc,
+ const Scope &DestScope) {
+ if (!S.CurrentSEHFinally.empty() &&
+ DestScope.Contains(*S.CurrentSEHFinally.back())) {
+ S.Diag(Loc, diag::warn_jump_out_of_seh_finally);
+ }
+}
+
+StmtResult
+Sema::ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope) {
+ Scope *S = CurScope->getContinueParent();
+ if (!S) {
+ // C99 6.8.6.2p1: A break shall appear only in or as a loop body.
+ return StmtError(Diag(ContinueLoc, diag::err_continue_not_in_loop));
+ }
+ CheckJumpOutOfSEHFinally(*this, ContinueLoc, *S);
+
+ return new (Context) ContinueStmt(ContinueLoc);
+}
+
+StmtResult
+Sema::ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope) {
+ Scope *S = CurScope->getBreakParent();
+ if (!S) {
+ // C99 6.8.6.3p1: A break shall appear only in or as a switch/loop body.
+ return StmtError(Diag(BreakLoc, diag::err_break_not_in_loop_or_switch));
+ }
+ if (S->isOpenMPLoopScope())
+ return StmtError(Diag(BreakLoc, diag::err_omp_loop_cannot_use_stmt)
+ << "break");
+ CheckJumpOutOfSEHFinally(*this, BreakLoc, *S);
+
+ return new (Context) BreakStmt(BreakLoc);
+}
+
+/// \brief Determine whether the given expression is a candidate for
+/// copy elision in either a return statement or a throw expression.
+///
+/// \param ReturnType If we're determining the copy elision candidate for
+/// a return statement, this is the return type of the function. If we're
+/// determining the copy elision candidate for a throw expression, this will
+/// be a NULL type.
+///
+/// \param E The expression being returned from the function or block, or
+/// being thrown.
+///
+/// \param AllowFunctionParameter Whether we allow function parameters to
+/// be considered NRVO candidates. C++ prohibits this for NRVO itself, but
+/// we re-use this logic to determine whether we should try to move as part of
+/// a return or throw (which does allow function parameters).
+///
+/// \returns The NRVO candidate variable, if the return statement may use the
+/// NRVO, or NULL if there is no such candidate.
+VarDecl *Sema::getCopyElisionCandidate(QualType ReturnType,
+ Expr *E,
+ bool AllowFunctionParameter) {
+ if (!getLangOpts().CPlusPlus)
+ return nullptr;
+
+ // - in a return statement in a function [where] ...
+ // ... the expression is the name of a non-volatile automatic object ...
+ DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E->IgnoreParens());
+ if (!DR || DR->refersToEnclosingVariableOrCapture())
+ return nullptr;
+ VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl());
+ if (!VD)
+ return nullptr;
+
+ if (isCopyElisionCandidate(ReturnType, VD, AllowFunctionParameter))
+ return VD;
+ return nullptr;
+}
+
+bool Sema::isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
+ bool AllowFunctionParameter) {
+ QualType VDType = VD->getType();
+ // - in a return statement in a function with ...
+ // ... a class return type ...
+ if (!ReturnType.isNull() && !ReturnType->isDependentType()) {
+ if (!ReturnType->isRecordType())
+ return false;
+ // ... the same cv-unqualified type as the function return type ...
+ if (!VDType->isDependentType() &&
+ !Context.hasSameUnqualifiedType(ReturnType, VDType))
+ return false;
+ }
+
+ // ...object (other than a function or catch-clause parameter)...
+ if (VD->getKind() != Decl::Var &&
+ !(AllowFunctionParameter && VD->getKind() == Decl::ParmVar))
+ return false;
+ if (VD->isExceptionVariable()) return false;
+
+ // ...automatic...
+ if (!VD->hasLocalStorage()) return false;
+
+ // ...non-volatile...
+ if (VD->getType().isVolatileQualified()) return false;
+
+ // __block variables can't be allocated in a way that permits NRVO.
+ if (VD->hasAttr<BlocksAttr>()) return false;
+
+ // Variables with higher required alignment than their type's ABI
+ // alignment cannot use NRVO.
+ if (!VD->getType()->isDependentType() && VD->hasAttr<AlignedAttr>() &&
+ Context.getDeclAlign(VD) > Context.getTypeAlignInChars(VD->getType()))
+ return false;
+
+ return true;
+}
+
+/// \brief Perform the initialization of a potentially-movable value, which
+/// is the result of return value.
+///
+/// This routine implements C++0x [class.copy]p33, which attempts to treat
+/// returned lvalues as rvalues in certain cases (to prefer move construction),
+/// then falls back to treating them as lvalues if that failed.
+ExprResult
+Sema::PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
+ const VarDecl *NRVOCandidate,
+ QualType ResultType,
+ Expr *Value,
+ bool AllowNRVO) {
+ // C++0x [class.copy]p33:
+ // When the criteria for elision of a copy operation are met or would
+ // be met save for the fact that the source object is a function
+ // parameter, and the object to be copied is designated by an lvalue,
+ // overload resolution to select the constructor for the copy is first
+ // performed as if the object were designated by an rvalue.
+ ExprResult Res = ExprError();
+ if (AllowNRVO &&
+ (NRVOCandidate || getCopyElisionCandidate(ResultType, Value, true))) {
+ ImplicitCastExpr AsRvalue(ImplicitCastExpr::OnStack,
+ Value->getType(), CK_NoOp, Value, VK_XValue);
+
+ Expr *InitExpr = &AsRvalue;
+ InitializationKind Kind
+ = InitializationKind::CreateCopy(Value->getLocStart(),
+ Value->getLocStart());
+ InitializationSequence Seq(*this, Entity, Kind, InitExpr);
+
+ // [...] If overload resolution fails, or if the type of the first
+ // parameter of the selected constructor is not an rvalue reference
+ // to the object's type (possibly cv-qualified), overload resolution
+ // is performed again, considering the object as an lvalue.
+ if (Seq) {
+ for (InitializationSequence::step_iterator Step = Seq.step_begin(),
+ StepEnd = Seq.step_end();
+ Step != StepEnd; ++Step) {
+ if (Step->Kind != InitializationSequence::SK_ConstructorInitialization)
+ continue;
+
+ CXXConstructorDecl *Constructor
+ = cast<CXXConstructorDecl>(Step->Function.Function);
+
+ const RValueReferenceType *RRefType
+ = Constructor->getParamDecl(0)->getType()
+ ->getAs<RValueReferenceType>();
+
+ // If we don't meet the criteria, break out now.
+ if (!RRefType ||
+ !Context.hasSameUnqualifiedType(RRefType->getPointeeType(),
+ Context.getTypeDeclType(Constructor->getParent())))
+ break;
+
+ // Promote "AsRvalue" to the heap, since we now need this
+ // expression node to persist.
+ Value = ImplicitCastExpr::Create(Context, Value->getType(),
+ CK_NoOp, Value, nullptr, VK_XValue);
+
+ // Complete type-checking the initialization of the return type
+ // using the constructor we found.
+ Res = Seq.Perform(*this, Entity, Kind, Value);
+ }
+ }
+ }
+
+ // Either we didn't meet the criteria for treating an lvalue as an rvalue,
+ // above, or overload resolution failed. Either way, we need to try
+ // (again) now with the return value expression as written.
+ if (Res.isInvalid())
+ Res = PerformCopyInitialization(Entity, SourceLocation(), Value);
+
+ return Res;
+}
+
+/// \brief Determine whether the declared return type of the specified function
+/// contains 'auto'.
+static bool hasDeducedReturnType(FunctionDecl *FD) {
+ const FunctionProtoType *FPT =
+ FD->getTypeSourceInfo()->getType()->castAs<FunctionProtoType>();
+ return FPT->getReturnType()->isUndeducedType();
+}
+
+/// ActOnCapScopeReturnStmt - Utility routine to type-check return statements
+/// for capturing scopes.
+///
+StmtResult
+Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
+ // If this is the first return we've seen, infer the return type.
+ // [expr.prim.lambda]p4 in C++11; block literals follow the same rules.
+ CapturingScopeInfo *CurCap = cast<CapturingScopeInfo>(getCurFunction());
+ QualType FnRetType = CurCap->ReturnType;
+ LambdaScopeInfo *CurLambda = dyn_cast<LambdaScopeInfo>(CurCap);
+
+ if (CurLambda && hasDeducedReturnType(CurLambda->CallOperator)) {
+ // In C++1y, the return type may involve 'auto'.
+ // FIXME: Blocks might have a return type of 'auto' explicitly specified.
+ FunctionDecl *FD = CurLambda->CallOperator;
+ if (CurCap->ReturnType.isNull())
+ CurCap->ReturnType = FD->getReturnType();
+
+ AutoType *AT = CurCap->ReturnType->getContainedAutoType();
+ assert(AT && "lost auto type from lambda return type");
+ if (DeduceFunctionTypeFromReturnExpr(FD, ReturnLoc, RetValExp, AT)) {
+ FD->setInvalidDecl();
+ return StmtError();
+ }
+ CurCap->ReturnType = FnRetType = FD->getReturnType();
+ } else if (CurCap->HasImplicitReturnType) {
+ // For blocks/lambdas with implicit return types, we check each return
+ // statement individually, and deduce the common return type when the block
+ // or lambda is completed.
+ // FIXME: Fold this into the 'auto' codepath above.
+ if (RetValExp && !isa<InitListExpr>(RetValExp)) {
+ ExprResult Result = DefaultFunctionArrayLvalueConversion(RetValExp);
+ if (Result.isInvalid())
+ return StmtError();
+ RetValExp = Result.get();
+
+ // DR1048: even prior to C++14, we should use the 'auto' deduction rules
+ // when deducing a return type for a lambda-expression (or by extension
+ // for a block). These rules differ from the stated C++11 rules only in
+ // that they remove top-level cv-qualifiers.
+ if (!CurContext->isDependentContext())
+ FnRetType = RetValExp->getType().getUnqualifiedType();
+ else
+ FnRetType = CurCap->ReturnType = Context.DependentTy;
+ } else {
+ if (RetValExp) {
+ // C++11 [expr.lambda.prim]p4 bans inferring the result from an
+ // initializer list, because it is not an expression (even
+ // though we represent it as one). We still deduce 'void'.
+ Diag(ReturnLoc, diag::err_lambda_return_init_list)
+ << RetValExp->getSourceRange();
+ }
+
+ FnRetType = Context.VoidTy;
+ }
+
+ // Although we'll properly infer the type of the block once it's completed,
+ // make sure we provide a return type now for better error recovery.
+ if (CurCap->ReturnType.isNull())
+ CurCap->ReturnType = FnRetType;
+ }
+ assert(!FnRetType.isNull());
+
+ if (BlockScopeInfo *CurBlock = dyn_cast<BlockScopeInfo>(CurCap)) {
+ if (CurBlock->FunctionType->getAs<FunctionType>()->getNoReturnAttr()) {
+ Diag(ReturnLoc, diag::err_noreturn_block_has_return_expr);
+ return StmtError();
+ }
+ } else if (CapturedRegionScopeInfo *CurRegion =
+ dyn_cast<CapturedRegionScopeInfo>(CurCap)) {
+ Diag(ReturnLoc, diag::err_return_in_captured_stmt) << CurRegion->getRegionName();
+ return StmtError();
+ } else {
+ assert(CurLambda && "unknown kind of captured scope");
+ if (CurLambda->CallOperator->getType()->getAs<FunctionType>()
+ ->getNoReturnAttr()) {
+ Diag(ReturnLoc, diag::err_noreturn_lambda_has_return_expr);
+ return StmtError();
+ }
+ }
+
+ // Otherwise, verify that this result type matches the previous one. We are
+ // pickier with blocks than for normal functions because we don't have GCC
+ // compatibility to worry about here.
+ const VarDecl *NRVOCandidate = nullptr;
+ if (FnRetType->isDependentType()) {
+ // Delay processing for now. TODO: there are lots of dependent
+ // types we can conclusively prove aren't void.
+ } else if (FnRetType->isVoidType()) {
+ if (RetValExp && !isa<InitListExpr>(RetValExp) &&
+ !(getLangOpts().CPlusPlus &&
+ (RetValExp->isTypeDependent() ||
+ RetValExp->getType()->isVoidType()))) {
+ if (!getLangOpts().CPlusPlus &&
+ RetValExp->getType()->isVoidType())
+ Diag(ReturnLoc, diag::ext_return_has_void_expr) << "literal" << 2;
+ else {
+ Diag(ReturnLoc, diag::err_return_block_has_expr);
+ RetValExp = nullptr;
+ }
+ }
+ } else if (!RetValExp) {
+ return StmtError(Diag(ReturnLoc, diag::err_block_return_missing_expr));
+ } else if (!RetValExp->isTypeDependent()) {
+ // we have a non-void block with an expression, continue checking
+
+ // C99 6.8.6.4p3(136): The return statement is not an assignment. The
+ // overlap restriction of subclause 6.5.16.1 does not apply to the case of
+ // function return.
+
+ // In C++ the return statement is handled via a copy initialization.
+ // the C version of which boils down to CheckSingleAssignmentConstraints.
+ NRVOCandidate = getCopyElisionCandidate(FnRetType, RetValExp, false);
+ InitializedEntity Entity = InitializedEntity::InitializeResult(ReturnLoc,
+ FnRetType,
+ NRVOCandidate != nullptr);
+ ExprResult Res = PerformMoveOrCopyInitialization(Entity, NRVOCandidate,
+ FnRetType, RetValExp);
+ if (Res.isInvalid()) {
+ // FIXME: Cleanup temporaries here, anyway?
+ return StmtError();
+ }
+ RetValExp = Res.get();
+ CheckReturnValExpr(RetValExp, FnRetType, ReturnLoc);
+ } else {
+ NRVOCandidate = getCopyElisionCandidate(FnRetType, RetValExp, false);
+ }
+
+ if (RetValExp) {
+ ExprResult ER = ActOnFinishFullExpr(RetValExp, ReturnLoc);
+ if (ER.isInvalid())
+ return StmtError();
+ RetValExp = ER.get();
+ }
+ ReturnStmt *Result = new (Context) ReturnStmt(ReturnLoc, RetValExp,
+ NRVOCandidate);
+
+ // If we need to check for the named return value optimization,
+ // or if we need to infer the return type,
+ // save the return statement in our scope for later processing.
+ if (CurCap->HasImplicitReturnType || NRVOCandidate)
+ FunctionScopes.back()->Returns.push_back(Result);
+
+ if (FunctionScopes.back()->FirstReturnLoc.isInvalid())
+ FunctionScopes.back()->FirstReturnLoc = ReturnLoc;
+
+ return Result;
+}
+
+namespace {
+/// \brief Marks all typedefs in all local classes in a type referenced.
+///
+/// In a function like
+/// auto f() {
+/// struct S { typedef int a; };
+/// return S();
+/// }
+///
+/// the local type escapes and could be referenced in some TUs but not in
+/// others. Pretend that all local typedefs are always referenced, to not warn
+/// on this. This isn't necessary if f has internal linkage, or the typedef
+/// is private.
+class LocalTypedefNameReferencer
+ : public RecursiveASTVisitor<LocalTypedefNameReferencer> {
+public:
+ LocalTypedefNameReferencer(Sema &S) : S(S) {}
+ bool VisitRecordType(const RecordType *RT);
+private:
+ Sema &S;
+};
+bool LocalTypedefNameReferencer::VisitRecordType(const RecordType *RT) {
+ auto *R = dyn_cast<CXXRecordDecl>(RT->getDecl());
+ if (!R || !R->isLocalClass() || !R->isLocalClass()->isExternallyVisible() ||
+ R->isDependentType())
+ return true;
+ for (auto *TmpD : R->decls())
+ if (auto *T = dyn_cast<TypedefNameDecl>(TmpD))
+ if (T->getAccess() != AS_private || R->hasFriends())
+ S.MarkAnyDeclReferenced(T->getLocation(), T, /*OdrUse=*/false);
+ return true;
+}
+}
+
+TypeLoc Sema::getReturnTypeLoc(FunctionDecl *FD) const {
+ TypeLoc TL = FD->getTypeSourceInfo()->getTypeLoc().IgnoreParens();
+ while (auto ATL = TL.getAs<AttributedTypeLoc>())
+ TL = ATL.getModifiedLoc().IgnoreParens();
+ return TL.castAs<FunctionProtoTypeLoc>().getReturnLoc();
+}
+
+/// Deduce the return type for a function from a returned expression, per
+/// C++1y [dcl.spec.auto]p6.
+bool Sema::DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
+ SourceLocation ReturnLoc,
+ Expr *&RetExpr,
+ AutoType *AT) {
+ TypeLoc OrigResultType = getReturnTypeLoc(FD);
+ QualType Deduced;
+
+ if (RetExpr && isa<InitListExpr>(RetExpr)) {
+ // If the deduction is for a return statement and the initializer is
+ // a braced-init-list, the program is ill-formed.
+ Diag(RetExpr->getExprLoc(),
+ getCurLambda() ? diag::err_lambda_return_init_list
+ : diag::err_auto_fn_return_init_list)
+ << RetExpr->getSourceRange();
+ return true;
+ }
+
+ if (FD->isDependentContext()) {
+ // C++1y [dcl.spec.auto]p12:
+ // Return type deduction [...] occurs when the definition is
+ // instantiated even if the function body contains a return
+ // statement with a non-type-dependent operand.
+ assert(AT->isDeduced() && "should have deduced to dependent type");
+ return false;
+ }
+
+ if (RetExpr) {
+ // Otherwise, [...] deduce a value for U using the rules of template
+ // argument deduction.
+ DeduceAutoResult DAR = DeduceAutoType(OrigResultType, RetExpr, Deduced);
+
+ if (DAR == DAR_Failed && !FD->isInvalidDecl())
+ Diag(RetExpr->getExprLoc(), diag::err_auto_fn_deduction_failure)
+ << OrigResultType.getType() << RetExpr->getType();
+
+ if (DAR != DAR_Succeeded)
+ return true;
+
+ // If a local type is part of the returned type, mark its fields as
+ // referenced.
+ LocalTypedefNameReferencer Referencer(*this);
+ Referencer.TraverseType(RetExpr->getType());
+ } else {
+ // In the case of a return with no operand, the initializer is considered
+ // to be void().
+ //
+ // Deduction here can only succeed if the return type is exactly 'cv auto'
+ // or 'decltype(auto)', so just check for that case directly.
+ if (!OrigResultType.getType()->getAs<AutoType>()) {
+ Diag(ReturnLoc, diag::err_auto_fn_return_void_but_not_auto)
+ << OrigResultType.getType();
+ return true;
+ }
+ // We always deduce U = void in this case.
+ Deduced = SubstAutoType(OrigResultType.getType(), Context.VoidTy);
+ if (Deduced.isNull())
+ return true;
+ }
+
+ // If a function with a declared return type that contains a placeholder type
+ // has multiple return statements, the return type is deduced for each return
+ // statement. [...] if the type deduced is not the same in each deduction,
+ // the program is ill-formed.
+ if (AT->isDeduced() && !FD->isInvalidDecl()) {
+ AutoType *NewAT = Deduced->getContainedAutoType();
+ CanQualType OldDeducedType = Context.getCanonicalFunctionResultType(
+ AT->getDeducedType());
+ CanQualType NewDeducedType = Context.getCanonicalFunctionResultType(
+ NewAT->getDeducedType());
+ if (!FD->isDependentContext() && OldDeducedType != NewDeducedType) {
+ const LambdaScopeInfo *LambdaSI = getCurLambda();
+ if (LambdaSI && LambdaSI->HasImplicitReturnType) {
+ Diag(ReturnLoc, diag::err_typecheck_missing_return_type_incompatible)
+ << NewAT->getDeducedType() << AT->getDeducedType()
+ << true /*IsLambda*/;
+ } else {
+ Diag(ReturnLoc, diag::err_auto_fn_different_deductions)
+ << (AT->isDecltypeAuto() ? 1 : 0)
+ << NewAT->getDeducedType() << AT->getDeducedType();
+ }
+ return true;
+ }
+ } else if (!FD->isInvalidDecl()) {
+ // Update all declarations of the function to have the deduced return type.
+ Context.adjustDeducedFunctionResultType(FD, Deduced);
+ }
+
+ return false;
+}
+
+StmtResult
+Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
+ Scope *CurScope) {
+ StmtResult R = BuildReturnStmt(ReturnLoc, RetValExp);
+ if (R.isInvalid()) {
+ return R;
+ }
+
+ if (VarDecl *VD =
+ const_cast<VarDecl*>(cast<ReturnStmt>(R.get())->getNRVOCandidate())) {
+ CurScope->addNRVOCandidate(VD);
+ } else {
+ CurScope->setNoNRVO();
+ }
+
+ CheckJumpOutOfSEHFinally(*this, ReturnLoc, *CurScope->getFnParent());
+
+ return R;
+}
+
+StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
+ // Check for unexpanded parameter packs.
+ if (RetValExp && DiagnoseUnexpandedParameterPack(RetValExp))
+ return StmtError();
+
+ if (isa<CapturingScopeInfo>(getCurFunction()))
+ return ActOnCapScopeReturnStmt(ReturnLoc, RetValExp);
+
+ QualType FnRetType;
+ QualType RelatedRetType;
+ const AttrVec *Attrs = nullptr;
+ bool isObjCMethod = false;
+
+ if (const FunctionDecl *FD = getCurFunctionDecl()) {
+ FnRetType = FD->getReturnType();
+ if (FD->hasAttrs())
+ Attrs = &FD->getAttrs();
+ if (FD->isNoReturn())
+ Diag(ReturnLoc, diag::warn_noreturn_function_has_return_expr)
+ << FD->getDeclName();
+ } else if (ObjCMethodDecl *MD = getCurMethodDecl()) {
+ FnRetType = MD->getReturnType();
+ isObjCMethod = true;
+ if (MD->hasAttrs())
+ Attrs = &MD->getAttrs();
+ if (MD->hasRelatedResultType() && MD->getClassInterface()) {
+ // In the implementation of a method with a related return type, the
+ // type used to type-check the validity of return statements within the
+ // method body is a pointer to the type of the class being implemented.
+ RelatedRetType = Context.getObjCInterfaceType(MD->getClassInterface());
+ RelatedRetType = Context.getObjCObjectPointerType(RelatedRetType);
+ }
+ } else // If we don't have a function/method context, bail.
+ return StmtError();
+
+ // FIXME: Add a flag to the ScopeInfo to indicate whether we're performing
+ // deduction.
+ if (getLangOpts().CPlusPlus14) {
+ if (AutoType *AT = FnRetType->getContainedAutoType()) {
+ FunctionDecl *FD = cast<FunctionDecl>(CurContext);
+ if (DeduceFunctionTypeFromReturnExpr(FD, ReturnLoc, RetValExp, AT)) {
+ FD->setInvalidDecl();
+ return StmtError();
+ } else {
+ FnRetType = FD->getReturnType();
+ }
+ }
+ }
+
+ bool HasDependentReturnType = FnRetType->isDependentType();
+
+ ReturnStmt *Result = nullptr;
+ if (FnRetType->isVoidType()) {
+ if (RetValExp) {
+ if (isa<InitListExpr>(RetValExp)) {
+ // We simply never allow init lists as the return value of void
+ // functions. This is compatible because this was never allowed before,
+ // so there's no legacy code to deal with.
+ NamedDecl *CurDecl = getCurFunctionOrMethodDecl();
+ int FunctionKind = 0;
+ if (isa<ObjCMethodDecl>(CurDecl))
+ FunctionKind = 1;
+ else if (isa<CXXConstructorDecl>(CurDecl))
+ FunctionKind = 2;
+ else if (isa<CXXDestructorDecl>(CurDecl))
+ FunctionKind = 3;
+
+ Diag(ReturnLoc, diag::err_return_init_list)
+ << CurDecl->getDeclName() << FunctionKind
+ << RetValExp->getSourceRange();
+
+ // Drop the expression.
+ RetValExp = nullptr;
+ } else if (!RetValExp->isTypeDependent()) {
+ // C99 6.8.6.4p1 (ext_ since GCC warns)
+ unsigned D = diag::ext_return_has_expr;
+ if (RetValExp->getType()->isVoidType()) {
+ NamedDecl *CurDecl = getCurFunctionOrMethodDecl();
+ if (isa<CXXConstructorDecl>(CurDecl) ||
+ isa<CXXDestructorDecl>(CurDecl))
+ D = diag::err_ctor_dtor_returns_void;
+ else
+ D = diag::ext_return_has_void_expr;
+ }
+ else {
+ ExprResult Result = RetValExp;
+ Result = IgnoredValueConversions(Result.get());
+ if (Result.isInvalid())
+ return StmtError();
+ RetValExp = Result.get();
+ RetValExp = ImpCastExprToType(RetValExp,
+ Context.VoidTy, CK_ToVoid).get();
+ }
+ // return of void in constructor/destructor is illegal in C++.
+ if (D == diag::err_ctor_dtor_returns_void) {
+ NamedDecl *CurDecl = getCurFunctionOrMethodDecl();
+ Diag(ReturnLoc, D)
+ << CurDecl->getDeclName() << isa<CXXDestructorDecl>(CurDecl)
+ << RetValExp->getSourceRange();
+ }
+ // return (some void expression); is legal in C++.
+ else if (D != diag::ext_return_has_void_expr ||
+ !getLangOpts().CPlusPlus) {
+ NamedDecl *CurDecl = getCurFunctionOrMethodDecl();
+
+ int FunctionKind = 0;
+ if (isa<ObjCMethodDecl>(CurDecl))
+ FunctionKind = 1;
+ else if (isa<CXXConstructorDecl>(CurDecl))
+ FunctionKind = 2;
+ else if (isa<CXXDestructorDecl>(CurDecl))
+ FunctionKind = 3;
+
+ Diag(ReturnLoc, D)
+ << CurDecl->getDeclName() << FunctionKind
+ << RetValExp->getSourceRange();
+ }
+ }
+
+ if (RetValExp) {
+ ExprResult ER = ActOnFinishFullExpr(RetValExp, ReturnLoc);
+ if (ER.isInvalid())
+ return StmtError();
+ RetValExp = ER.get();
+ }
+ }
+
+ Result = new (Context) ReturnStmt(ReturnLoc, RetValExp, nullptr);
+ } else if (!RetValExp && !HasDependentReturnType) {
+ FunctionDecl *FD = getCurFunctionDecl();
+
+ unsigned DiagID;
+ if (getLangOpts().CPlusPlus11 && FD && FD->isConstexpr()) {
+ // C++11 [stmt.return]p2
+ DiagID = diag::err_constexpr_return_missing_expr;
+ FD->setInvalidDecl();
+ } else if (getLangOpts().C99) {
+ // C99 6.8.6.4p1 (ext_ since GCC warns)
+ DiagID = diag::ext_return_missing_expr;
+ } else {
+ // C90 6.6.6.4p4
+ DiagID = diag::warn_return_missing_expr;
+ }
+
+ if (FD)
+ Diag(ReturnLoc, DiagID) << FD->getIdentifier() << 0/*fn*/;
+ else
+ Diag(ReturnLoc, DiagID) << getCurMethodDecl()->getDeclName() << 1/*meth*/;
+
+ Result = new (Context) ReturnStmt(ReturnLoc);
+ } else {
+ assert(RetValExp || HasDependentReturnType);
+ const VarDecl *NRVOCandidate = nullptr;
+
+ QualType RetType = RelatedRetType.isNull() ? FnRetType : RelatedRetType;
+
+ // C99 6.8.6.4p3(136): The return statement is not an assignment. The
+ // overlap restriction of subclause 6.5.16.1 does not apply to the case of
+ // function return.
+
+ // In C++ the return statement is handled via a copy initialization,
+ // the C version of which boils down to CheckSingleAssignmentConstraints.
+ if (RetValExp)
+ NRVOCandidate = getCopyElisionCandidate(FnRetType, RetValExp, false);
+ if (!HasDependentReturnType && !RetValExp->isTypeDependent()) {
+ // we have a non-void function with an expression, continue checking
+ InitializedEntity Entity = InitializedEntity::InitializeResult(ReturnLoc,
+ RetType,
+ NRVOCandidate != nullptr);
+ ExprResult Res = PerformMoveOrCopyInitialization(Entity, NRVOCandidate,
+ RetType, RetValExp);
+ if (Res.isInvalid()) {
+ // FIXME: Clean up temporaries here anyway?
+ return StmtError();
+ }
+ RetValExp = Res.getAs<Expr>();
+
+ // If we have a related result type, we need to implicitly
+ // convert back to the formal result type. We can't pretend to
+ // initialize the result again --- we might end double-retaining
+ // --- so instead we initialize a notional temporary.
+ if (!RelatedRetType.isNull()) {
+ Entity = InitializedEntity::InitializeRelatedResult(getCurMethodDecl(),
+ FnRetType);
+ Res = PerformCopyInitialization(Entity, ReturnLoc, RetValExp);
+ if (Res.isInvalid()) {
+ // FIXME: Clean up temporaries here anyway?
+ return StmtError();
+ }
+ RetValExp = Res.getAs<Expr>();
+ }
+
+ CheckReturnValExpr(RetValExp, FnRetType, ReturnLoc, isObjCMethod, Attrs,
+ getCurFunctionDecl());
+ }
+
+ if (RetValExp) {
+ ExprResult ER = ActOnFinishFullExpr(RetValExp, ReturnLoc);
+ if (ER.isInvalid())
+ return StmtError();
+ RetValExp = ER.get();
+ }
+ Result = new (Context) ReturnStmt(ReturnLoc, RetValExp, NRVOCandidate);
+ }
+
+ // If we need to check for the named return value optimization, save the
+ // return statement in our scope for later processing.
+ if (Result->getNRVOCandidate())
+ FunctionScopes.back()->Returns.push_back(Result);
+
+ if (FunctionScopes.back()->FirstReturnLoc.isInvalid())
+ FunctionScopes.back()->FirstReturnLoc = ReturnLoc;
+
+ return Result;
+}
+
+StmtResult
+Sema::ActOnObjCAtCatchStmt(SourceLocation AtLoc,
+ SourceLocation RParen, Decl *Parm,
+ Stmt *Body) {
+ VarDecl *Var = cast_or_null<VarDecl>(Parm);
+ if (Var && Var->isInvalidDecl())
+ return StmtError();
+
+ return new (Context) ObjCAtCatchStmt(AtLoc, RParen, Var, Body);
+}
+
+StmtResult
+Sema::ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body) {
+ return new (Context) ObjCAtFinallyStmt(AtLoc, Body);
+}
+
+StmtResult
+Sema::ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
+ MultiStmtArg CatchStmts, Stmt *Finally) {
+ if (!getLangOpts().ObjCExceptions)
+ Diag(AtLoc, diag::err_objc_exceptions_disabled) << "@try";
+
+ getCurFunction()->setHasBranchProtectedScope();
+ unsigned NumCatchStmts = CatchStmts.size();
+ return ObjCAtTryStmt::Create(Context, AtLoc, Try, CatchStmts.data(),
+ NumCatchStmts, Finally);
+}
+
+StmtResult Sema::BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw) {
+ if (Throw) {
+ ExprResult Result = DefaultLvalueConversion(Throw);
+ if (Result.isInvalid())
+ return StmtError();
+
+ Result = ActOnFinishFullExpr(Result.get());
+ if (Result.isInvalid())
+ return StmtError();
+ Throw = Result.get();
+
+ QualType ThrowType = Throw->getType();
+ // Make sure the expression type is an ObjC pointer or "void *".
+ if (!ThrowType->isDependentType() &&
+ !ThrowType->isObjCObjectPointerType()) {
+ const PointerType *PT = ThrowType->getAs<PointerType>();
+ if (!PT || !PT->getPointeeType()->isVoidType())
+ return StmtError(Diag(AtLoc, diag::error_objc_throw_expects_object)
+ << Throw->getType() << Throw->getSourceRange());
+ }
+ }
+
+ return new (Context) ObjCAtThrowStmt(AtLoc, Throw);
+}
+
+StmtResult
+Sema::ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
+ Scope *CurScope) {
+ if (!getLangOpts().ObjCExceptions)
+ Diag(AtLoc, diag::err_objc_exceptions_disabled) << "@throw";
+
+ if (!Throw) {
+ // @throw without an expression designates a rethrow (which must occur
+ // in the context of an @catch clause).
+ Scope *AtCatchParent = CurScope;
+ while (AtCatchParent && !AtCatchParent->isAtCatchScope())
+ AtCatchParent = AtCatchParent->getParent();
+ if (!AtCatchParent)
+ return StmtError(Diag(AtLoc, diag::error_rethrow_used_outside_catch));
+ }
+ return BuildObjCAtThrowStmt(AtLoc, Throw);
+}
+
+ExprResult
+Sema::ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand) {
+ ExprResult result = DefaultLvalueConversion(operand);
+ if (result.isInvalid())
+ return ExprError();
+ operand = result.get();
+
+ // Make sure the expression type is an ObjC pointer or "void *".
+ QualType type = operand->getType();
+ if (!type->isDependentType() &&
+ !type->isObjCObjectPointerType()) {
+ const PointerType *pointerType = type->getAs<PointerType>();
+ if (!pointerType || !pointerType->getPointeeType()->isVoidType()) {
+ if (getLangOpts().CPlusPlus) {
+ if (RequireCompleteType(atLoc, type,
+ diag::err_incomplete_receiver_type))
+ return Diag(atLoc, diag::error_objc_synchronized_expects_object)
+ << type << operand->getSourceRange();
+
+ ExprResult result = PerformContextuallyConvertToObjCPointer(operand);
+ if (!result.isUsable())
+ return Diag(atLoc, diag::error_objc_synchronized_expects_object)
+ << type << operand->getSourceRange();
+
+ operand = result.get();
+ } else {
+ return Diag(atLoc, diag::error_objc_synchronized_expects_object)
+ << type << operand->getSourceRange();
+ }
+ }
+ }
+
+ // The operand to @synchronized is a full-expression.
+ return ActOnFinishFullExpr(operand);
+}
+
+StmtResult
+Sema::ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SyncExpr,
+ Stmt *SyncBody) {
+ // We can't jump into or indirect-jump out of a @synchronized block.
+ getCurFunction()->setHasBranchProtectedScope();
+ return new (Context) ObjCAtSynchronizedStmt(AtLoc, SyncExpr, SyncBody);
+}
+
+/// ActOnCXXCatchBlock - Takes an exception declaration and a handler block
+/// and creates a proper catch handler from them.
+StmtResult
+Sema::ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl,
+ Stmt *HandlerBlock) {
+ // There's nothing to test that ActOnExceptionDecl didn't already test.
+ return new (Context)
+ CXXCatchStmt(CatchLoc, cast_or_null<VarDecl>(ExDecl), HandlerBlock);
+}
+
+StmtResult
+Sema::ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body) {
+ getCurFunction()->setHasBranchProtectedScope();
+ return new (Context) ObjCAutoreleasePoolStmt(AtLoc, Body);
+}
+
+namespace {
+class CatchHandlerType {
+ QualType QT;
+ unsigned IsPointer : 1;
+
+ // This is a special constructor to be used only with DenseMapInfo's
+ // getEmptyKey() and getTombstoneKey() functions.
+ friend struct llvm::DenseMapInfo<CatchHandlerType>;
+ enum Unique { ForDenseMap };
+ CatchHandlerType(QualType QT, Unique) : QT(QT), IsPointer(false) {}
+
+public:
+ /// Used when creating a CatchHandlerType from a handler type; will determine
+ /// whether the type is a pointer or reference and will strip off the top
+ /// level pointer and cv-qualifiers.
+ CatchHandlerType(QualType Q) : QT(Q), IsPointer(false) {
+ if (QT->isPointerType())
+ IsPointer = true;
+
+ if (IsPointer || QT->isReferenceType())
+ QT = QT->getPointeeType();
+ QT = QT.getUnqualifiedType();
+ }
+
+ /// Used when creating a CatchHandlerType from a base class type; pretends the
+ /// type passed in had the pointer qualifier, does not need to get an
+ /// unqualified type.
+ CatchHandlerType(QualType QT, bool IsPointer)
+ : QT(QT), IsPointer(IsPointer) {}
+
+ QualType underlying() const { return QT; }
+ bool isPointer() const { return IsPointer; }
+
+ friend bool operator==(const CatchHandlerType &LHS,
+ const CatchHandlerType &RHS) {
+ // If the pointer qualification does not match, we can return early.
+ if (LHS.IsPointer != RHS.IsPointer)
+ return false;
+ // Otherwise, check the underlying type without cv-qualifiers.
+ return LHS.QT == RHS.QT;
+ }
+};
+} // namespace
+
+namespace llvm {
+template <> struct DenseMapInfo<CatchHandlerType> {
+ static CatchHandlerType getEmptyKey() {
+ return CatchHandlerType(DenseMapInfo<QualType>::getEmptyKey(),
+ CatchHandlerType::ForDenseMap);
+ }
+
+ static CatchHandlerType getTombstoneKey() {
+ return CatchHandlerType(DenseMapInfo<QualType>::getTombstoneKey(),
+ CatchHandlerType::ForDenseMap);
+ }
+
+ static unsigned getHashValue(const CatchHandlerType &Base) {
+ return DenseMapInfo<QualType>::getHashValue(Base.underlying());
+ }
+
+ static bool isEqual(const CatchHandlerType &LHS,
+ const CatchHandlerType &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// It's OK to treat CatchHandlerType as a POD type.
+template <> struct isPodLike<CatchHandlerType> {
+ static const bool value = true;
+};
+}
+
+namespace {
+class CatchTypePublicBases {
+ ASTContext &Ctx;
+ const llvm::DenseMap<CatchHandlerType, CXXCatchStmt *> &TypesToCheck;
+ const bool CheckAgainstPointer;
+
+ CXXCatchStmt *FoundHandler;
+ CanQualType FoundHandlerType;
+
+public:
+ CatchTypePublicBases(
+ ASTContext &Ctx,
+ const llvm::DenseMap<CatchHandlerType, CXXCatchStmt *> &T, bool C)
+ : Ctx(Ctx), TypesToCheck(T), CheckAgainstPointer(C),
+ FoundHandler(nullptr) {}
+
+ CXXCatchStmt *getFoundHandler() const { return FoundHandler; }
+ CanQualType getFoundHandlerType() const { return FoundHandlerType; }
+
+ bool operator()(const CXXBaseSpecifier *S, CXXBasePath &) {
+ if (S->getAccessSpecifier() == AccessSpecifier::AS_public) {
+ CatchHandlerType Check(S->getType(), CheckAgainstPointer);
+ auto M = TypesToCheck;
+ auto I = M.find(Check);
+ if (I != M.end()) {
+ FoundHandler = I->second;
+ FoundHandlerType = Ctx.getCanonicalType(S->getType());
+ return true;
+ }
+ }
+ return false;
+ }
+};
+}
+
+/// ActOnCXXTryBlock - Takes a try compound-statement and a number of
+/// handlers and creates a try statement from them.
+StmtResult Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
+ ArrayRef<Stmt *> Handlers) {
+ // Don't report an error if 'try' is used in system headers.
+ if (!getLangOpts().CXXExceptions &&
+ !getSourceManager().isInSystemHeader(TryLoc))
+ Diag(TryLoc, diag::err_exceptions_disabled) << "try";
+
+ if (getCurScope() && getCurScope()->isOpenMPSimdDirectiveScope())
+ Diag(TryLoc, diag::err_omp_simd_region_cannot_use_stmt) << "try";
+
+ sema::FunctionScopeInfo *FSI = getCurFunction();
+
+ // C++ try is incompatible with SEH __try.
+ if (!getLangOpts().Borland && FSI->FirstSEHTryLoc.isValid()) {
+ Diag(TryLoc, diag::err_mixing_cxx_try_seh_try);
+ Diag(FSI->FirstSEHTryLoc, diag::note_conflicting_try_here) << "'__try'";
+ }
+
+ const unsigned NumHandlers = Handlers.size();
+ assert(!Handlers.empty() &&
+ "The parser shouldn't call this if there are no handlers.");
+
+ llvm::DenseMap<CatchHandlerType, CXXCatchStmt *> HandledTypes;
+ for (unsigned i = 0; i < NumHandlers; ++i) {
+ CXXCatchStmt *H = cast<CXXCatchStmt>(Handlers[i]);
+
+ // Diagnose when the handler is a catch-all handler, but it isn't the last
+ // handler for the try block. [except.handle]p5. Also, skip exception
+ // declarations that are invalid, since we can't usefully report on them.
+ if (!H->getExceptionDecl()) {
+ if (i < NumHandlers - 1)
+ return StmtError(Diag(H->getLocStart(), diag::err_early_catch_all));
+ continue;
+ } else if (H->getExceptionDecl()->isInvalidDecl())
+ continue;
+
+ // Walk the type hierarchy to diagnose when this type has already been
+ // handled (duplication), or cannot be handled (derivation inversion). We
+ // ignore top-level cv-qualifiers, per [except.handle]p3
+ CatchHandlerType HandlerCHT =
+ (QualType)Context.getCanonicalType(H->getCaughtType());
+
+ // We can ignore whether the type is a reference or a pointer; we need the
+ // underlying declaration type in order to get at the underlying record
+ // decl, if there is one.
+ QualType Underlying = HandlerCHT.underlying();
+ if (auto *RD = Underlying->getAsCXXRecordDecl()) {
+ if (!RD->hasDefinition())
+ continue;
+ // Check that none of the public, unambiguous base classes are in the
+ // map ([except.handle]p1). Give the base classes the same pointer
+ // qualification as the original type we are basing off of. This allows
+ // comparison against the handler type using the same top-level pointer
+ // as the original type.
+ CXXBasePaths Paths;
+ Paths.setOrigin(RD);
+ CatchTypePublicBases CTPB(Context, HandledTypes, HandlerCHT.isPointer());
+ if (RD->lookupInBases(CTPB, Paths)) {
+ const CXXCatchStmt *Problem = CTPB.getFoundHandler();
+ if (!Paths.isAmbiguous(CTPB.getFoundHandlerType())) {
+ Diag(H->getExceptionDecl()->getTypeSpecStartLoc(),
+ diag::warn_exception_caught_by_earlier_handler)
+ << H->getCaughtType();
+ Diag(Problem->getExceptionDecl()->getTypeSpecStartLoc(),
+ diag::note_previous_exception_handler)
+ << Problem->getCaughtType();
+ }
+ }
+ }
+
+ // Add the type the list of ones we have handled; diagnose if we've already
+ // handled it.
+ auto R = HandledTypes.insert(std::make_pair(H->getCaughtType(), H));
+ if (!R.second) {
+ const CXXCatchStmt *Problem = R.first->second;
+ Diag(H->getExceptionDecl()->getTypeSpecStartLoc(),
+ diag::warn_exception_caught_by_earlier_handler)
+ << H->getCaughtType();
+ Diag(Problem->getExceptionDecl()->getTypeSpecStartLoc(),
+ diag::note_previous_exception_handler)
+ << Problem->getCaughtType();
+ }
+ }
+
+ FSI->setHasCXXTry(TryLoc);
+
+ return CXXTryStmt::Create(Context, TryLoc, TryBlock, Handlers);
+}
+
+StmtResult Sema::ActOnSEHTryBlock(bool IsCXXTry, SourceLocation TryLoc,
+ Stmt *TryBlock, Stmt *Handler) {
+ assert(TryBlock && Handler);
+
+ sema::FunctionScopeInfo *FSI = getCurFunction();
+
+ // SEH __try is incompatible with C++ try. Borland appears to support this,
+ // however.
+ if (!getLangOpts().Borland) {
+ if (FSI->FirstCXXTryLoc.isValid()) {
+ Diag(TryLoc, diag::err_mixing_cxx_try_seh_try);
+ Diag(FSI->FirstCXXTryLoc, diag::note_conflicting_try_here) << "'try'";
+ }
+ }
+
+ FSI->setHasSEHTry(TryLoc);
+
+ // Reject __try in Obj-C methods, blocks, and captured decls, since we don't
+ // track if they use SEH.
+ DeclContext *DC = CurContext;
+ while (DC && !DC->isFunctionOrMethod())
+ DC = DC->getParent();
+ FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(DC);
+ if (FD)
+ FD->setUsesSEHTry(true);
+ else
+ Diag(TryLoc, diag::err_seh_try_outside_functions);
+
+ // Reject __try on unsupported targets.
+ if (!Context.getTargetInfo().isSEHTrySupported())
+ Diag(TryLoc, diag::err_seh_try_unsupported);
+
+ return SEHTryStmt::Create(Context, IsCXXTry, TryLoc, TryBlock, Handler);
+}
+
+StmtResult
+Sema::ActOnSEHExceptBlock(SourceLocation Loc,
+ Expr *FilterExpr,
+ Stmt *Block) {
+ assert(FilterExpr && Block);
+
+ if(!FilterExpr->getType()->isIntegerType()) {
+ return StmtError(Diag(FilterExpr->getExprLoc(),
+ diag::err_filter_expression_integral)
+ << FilterExpr->getType());
+ }
+
+ return SEHExceptStmt::Create(Context,Loc,FilterExpr,Block);
+}
+
+void Sema::ActOnStartSEHFinallyBlock() {
+ CurrentSEHFinally.push_back(CurScope);
+}
+
+void Sema::ActOnAbortSEHFinallyBlock() {
+ CurrentSEHFinally.pop_back();
+}
+
+StmtResult Sema::ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block) {
+ assert(Block);
+ CurrentSEHFinally.pop_back();
+ return SEHFinallyStmt::Create(Context, Loc, Block);
+}
+
+StmtResult
+Sema::ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope) {
+ Scope *SEHTryParent = CurScope;
+ while (SEHTryParent && !SEHTryParent->isSEHTryScope())
+ SEHTryParent = SEHTryParent->getParent();
+ if (!SEHTryParent)
+ return StmtError(Diag(Loc, diag::err_ms___leave_not_in___try));
+ CheckJumpOutOfSEHFinally(*this, Loc, *SEHTryParent);
+
+ return new (Context) SEHLeaveStmt(Loc);
+}
+
+StmtResult Sema::BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
+ bool IsIfExists,
+ NestedNameSpecifierLoc QualifierLoc,
+ DeclarationNameInfo NameInfo,
+ Stmt *Nested)
+{
+ return new (Context) MSDependentExistsStmt(KeywordLoc, IsIfExists,
+ QualifierLoc, NameInfo,
+ cast<CompoundStmt>(Nested));
+}
+
+
+StmtResult Sema::ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
+ bool IsIfExists,
+ CXXScopeSpec &SS,
+ UnqualifiedId &Name,
+ Stmt *Nested) {
+ return BuildMSDependentExistsStmt(KeywordLoc, IsIfExists,
+ SS.getWithLocInContext(Context),
+ GetNameFromUnqualifiedId(Name),
+ Nested);
+}
+
+RecordDecl*
+Sema::CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc,
+ unsigned NumParams) {
+ DeclContext *DC = CurContext;
+ while (!(DC->isFunctionOrMethod() || DC->isRecord() || DC->isFileContext()))
+ DC = DC->getParent();
+
+ RecordDecl *RD = nullptr;
+ if (getLangOpts().CPlusPlus)
+ RD = CXXRecordDecl::Create(Context, TTK_Struct, DC, Loc, Loc,
+ /*Id=*/nullptr);
+ else
+ RD = RecordDecl::Create(Context, TTK_Struct, DC, Loc, Loc, /*Id=*/nullptr);
+
+ RD->setCapturedRecord();
+ DC->addDecl(RD);
+ RD->setImplicit();
+ RD->startDefinition();
+
+ assert(NumParams > 0 && "CapturedStmt requires context parameter");
+ CD = CapturedDecl::Create(Context, CurContext, NumParams);
+ DC->addDecl(CD);
+ return RD;
+}
+
+static void buildCapturedStmtCaptureList(
+ SmallVectorImpl<CapturedStmt::Capture> &Captures,
+ SmallVectorImpl<Expr *> &CaptureInits,
+ ArrayRef<CapturingScopeInfo::Capture> Candidates) {
+
+ typedef ArrayRef<CapturingScopeInfo::Capture>::const_iterator CaptureIter;
+ for (CaptureIter Cap = Candidates.begin(); Cap != Candidates.end(); ++Cap) {
+
+ if (Cap->isThisCapture()) {
+ Captures.push_back(CapturedStmt::Capture(Cap->getLocation(),
+ CapturedStmt::VCK_This));
+ CaptureInits.push_back(Cap->getInitExpr());
+ continue;
+ } else if (Cap->isVLATypeCapture()) {
+ Captures.push_back(
+ CapturedStmt::Capture(Cap->getLocation(), CapturedStmt::VCK_VLAType));
+ CaptureInits.push_back(nullptr);
+ continue;
+ }
+
+ Captures.push_back(CapturedStmt::Capture(Cap->getLocation(),
+ Cap->isReferenceCapture()
+ ? CapturedStmt::VCK_ByRef
+ : CapturedStmt::VCK_ByCopy,
+ Cap->getVariable()));
+ CaptureInits.push_back(Cap->getInitExpr());
+ }
+}
+
+void Sema::ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
+ CapturedRegionKind Kind,
+ unsigned NumParams) {
+ CapturedDecl *CD = nullptr;
+ RecordDecl *RD = CreateCapturedStmtRecordDecl(CD, Loc, NumParams);
+
+ // Build the context parameter
+ DeclContext *DC = CapturedDecl::castToDeclContext(CD);
+ IdentifierInfo *ParamName = &Context.Idents.get("__context");
+ QualType ParamType = Context.getPointerType(Context.getTagDeclType(RD));
+ ImplicitParamDecl *Param
+ = ImplicitParamDecl::Create(Context, DC, Loc, ParamName, ParamType);
+ DC->addDecl(Param);
+
+ CD->setContextParam(0, Param);
+
+ // Enter the capturing scope for this captured region.
+ PushCapturedRegionScope(CurScope, CD, RD, Kind);
+
+ if (CurScope)
+ PushDeclContext(CurScope, CD);
+ else
+ CurContext = CD;
+
+ PushExpressionEvaluationContext(PotentiallyEvaluated);
+}
+
+void Sema::ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
+ CapturedRegionKind Kind,
+ ArrayRef<CapturedParamNameType> Params) {
+ CapturedDecl *CD = nullptr;
+ RecordDecl *RD = CreateCapturedStmtRecordDecl(CD, Loc, Params.size());
+
+ // Build the context parameter
+ DeclContext *DC = CapturedDecl::castToDeclContext(CD);
+ bool ContextIsFound = false;
+ unsigned ParamNum = 0;
+ for (ArrayRef<CapturedParamNameType>::iterator I = Params.begin(),
+ E = Params.end();
+ I != E; ++I, ++ParamNum) {
+ if (I->second.isNull()) {
+ assert(!ContextIsFound &&
+ "null type has been found already for '__context' parameter");
+ IdentifierInfo *ParamName = &Context.Idents.get("__context");
+ QualType ParamType = Context.getPointerType(Context.getTagDeclType(RD));
+ ImplicitParamDecl *Param
+ = ImplicitParamDecl::Create(Context, DC, Loc, ParamName, ParamType);
+ DC->addDecl(Param);
+ CD->setContextParam(ParamNum, Param);
+ ContextIsFound = true;
+ } else {
+ IdentifierInfo *ParamName = &Context.Idents.get(I->first);
+ ImplicitParamDecl *Param
+ = ImplicitParamDecl::Create(Context, DC, Loc, ParamName, I->second);
+ DC->addDecl(Param);
+ CD->setParam(ParamNum, Param);
+ }
+ }
+ assert(ContextIsFound && "no null type for '__context' parameter");
+ if (!ContextIsFound) {
+ // Add __context implicitly if it is not specified.
+ IdentifierInfo *ParamName = &Context.Idents.get("__context");
+ QualType ParamType = Context.getPointerType(Context.getTagDeclType(RD));
+ ImplicitParamDecl *Param =
+ ImplicitParamDecl::Create(Context, DC, Loc, ParamName, ParamType);
+ DC->addDecl(Param);
+ CD->setContextParam(ParamNum, Param);
+ }
+ // Enter the capturing scope for this captured region.
+ PushCapturedRegionScope(CurScope, CD, RD, Kind);
+
+ if (CurScope)
+ PushDeclContext(CurScope, CD);
+ else
+ CurContext = CD;
+
+ PushExpressionEvaluationContext(PotentiallyEvaluated);
+}
+
+void Sema::ActOnCapturedRegionError() {
+ DiscardCleanupsInEvaluationContext();
+ PopExpressionEvaluationContext();
+
+ CapturedRegionScopeInfo *RSI = getCurCapturedRegion();
+ RecordDecl *Record = RSI->TheRecordDecl;
+ Record->setInvalidDecl();
+
+ SmallVector<Decl*, 4> Fields(Record->fields());
+ ActOnFields(/*Scope=*/nullptr, Record->getLocation(), Record, Fields,
+ SourceLocation(), SourceLocation(), /*AttributeList=*/nullptr);
+
+ PopDeclContext();
+ PopFunctionScopeInfo();
+}
+
+StmtResult Sema::ActOnCapturedRegionEnd(Stmt *S) {
+ CapturedRegionScopeInfo *RSI = getCurCapturedRegion();
+
+ SmallVector<CapturedStmt::Capture, 4> Captures;
+ SmallVector<Expr *, 4> CaptureInits;
+ buildCapturedStmtCaptureList(Captures, CaptureInits, RSI->Captures);
+
+ CapturedDecl *CD = RSI->TheCapturedDecl;
+ RecordDecl *RD = RSI->TheRecordDecl;
+
+ CapturedStmt *Res = CapturedStmt::Create(getASTContext(), S,
+ RSI->CapRegionKind, Captures,
+ CaptureInits, CD, RD);
+
+ CD->setBody(Res->getCapturedStmt());
+ RD->completeDefinition();
+
+ DiscardCleanupsInEvaluationContext();
+ PopExpressionEvaluationContext();
+
+ PopDeclContext();
+ PopFunctionScopeInfo();
+
+ return Res;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaStmtAsm.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaStmtAsm.cpp
new file mode 100644
index 0000000..11a4f8b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaStmtAsm.cpp
@@ -0,0 +1,780 @@
+//===--- SemaStmtAsm.cpp - Semantic Analysis for Asm Statements -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for inline asm statements.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/MC/MCParser/MCAsmParser.h"
+using namespace clang;
+using namespace sema;
+
+/// CheckAsmLValue - GNU C has an extremely ugly extension whereby they silently
+/// ignore "noop" casts in places where an lvalue is required by an inline asm.
+/// We emulate this behavior when -fheinous-gnu-extensions is specified, but
+/// provide a strong guidance to not use it.
+///
+/// This method checks to see if the argument is an acceptable l-value and
+/// returns false if it is a case we can handle.
+static bool CheckAsmLValue(const Expr *E, Sema &S) {
+ // Type dependent expressions will be checked during instantiation.
+ if (E->isTypeDependent())
+ return false;
+
+ if (E->isLValue())
+ return false; // Cool, this is an lvalue.
+
+ // Okay, this is not an lvalue, but perhaps it is the result of a cast that we
+ // are supposed to allow.
+ const Expr *E2 = E->IgnoreParenNoopCasts(S.Context);
+ if (E != E2 && E2->isLValue()) {
+ if (!S.getLangOpts().HeinousExtensions)
+ S.Diag(E2->getLocStart(), diag::err_invalid_asm_cast_lvalue)
+ << E->getSourceRange();
+ else
+ S.Diag(E2->getLocStart(), diag::warn_invalid_asm_cast_lvalue)
+ << E->getSourceRange();
+ // Accept, even if we emitted an error diagnostic.
+ return false;
+ }
+
+ // None of the above, just randomly invalid non-lvalue.
+ return true;
+}
+
+/// isOperandMentioned - Return true if the specified operand # is mentioned
+/// anywhere in the decomposed asm string.
+static bool isOperandMentioned(unsigned OpNo,
+ ArrayRef<GCCAsmStmt::AsmStringPiece> AsmStrPieces) {
+ for (unsigned p = 0, e = AsmStrPieces.size(); p != e; ++p) {
+ const GCCAsmStmt::AsmStringPiece &Piece = AsmStrPieces[p];
+ if (!Piece.isOperand()) continue;
+
+ // If this is a reference to the input and if the input was the smaller
+ // one, then we have to reject this asm.
+ if (Piece.getOperandNo() == OpNo)
+ return true;
+ }
+ return false;
+}
+
+static bool CheckNakedParmReference(Expr *E, Sema &S) {
+ FunctionDecl *Func = dyn_cast<FunctionDecl>(S.CurContext);
+ if (!Func)
+ return false;
+ if (!Func->hasAttr<NakedAttr>())
+ return false;
+
+ SmallVector<Expr*, 4> WorkList;
+ WorkList.push_back(E);
+ while (WorkList.size()) {
+ Expr *E = WorkList.pop_back_val();
+ if (isa<CXXThisExpr>(E)) {
+ S.Diag(E->getLocStart(), diag::err_asm_naked_this_ref);
+ S.Diag(Func->getAttr<NakedAttr>()->getLocation(), diag::note_attribute);
+ return true;
+ }
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ if (isa<ParmVarDecl>(DRE->getDecl())) {
+ S.Diag(DRE->getLocStart(), diag::err_asm_naked_parm_ref);
+ S.Diag(Func->getAttr<NakedAttr>()->getLocation(), diag::note_attribute);
+ return true;
+ }
+ }
+ for (Stmt *Child : E->children()) {
+ if (Expr *E = dyn_cast_or_null<Expr>(Child))
+ WorkList.push_back(E);
+ }
+ }
+ return false;
+}
+
+/// \brief Returns true if given expression is not compatible with inline
+/// assembly's memory constraint; false otherwise.
+static bool checkExprMemoryConstraintCompat(Sema &S, Expr *E,
+ TargetInfo::ConstraintInfo &Info,
+ bool is_input_expr) {
+ enum {
+ ExprBitfield = 0,
+ ExprVectorElt,
+ ExprGlobalRegVar,
+ ExprSafeType
+ } EType = ExprSafeType;
+
+ // Bitfields, vector elements and global register variables are not
+ // compatible.
+ if (E->refersToBitField())
+ EType = ExprBitfield;
+ else if (E->refersToVectorElement())
+ EType = ExprVectorElt;
+ else if (E->refersToGlobalRegisterVar())
+ EType = ExprGlobalRegVar;
+
+ if (EType != ExprSafeType) {
+ S.Diag(E->getLocStart(), diag::err_asm_non_addr_value_in_memory_constraint)
+ << EType << is_input_expr << Info.getConstraintStr()
+ << E->getSourceRange();
+ return true;
+ }
+
+ return false;
+}
+
+StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
+ bool IsVolatile, unsigned NumOutputs,
+ unsigned NumInputs, IdentifierInfo **Names,
+ MultiExprArg constraints, MultiExprArg Exprs,
+ Expr *asmString, MultiExprArg clobbers,
+ SourceLocation RParenLoc) {
+ unsigned NumClobbers = clobbers.size();
+ StringLiteral **Constraints =
+ reinterpret_cast<StringLiteral**>(constraints.data());
+ StringLiteral *AsmString = cast<StringLiteral>(asmString);
+ StringLiteral **Clobbers = reinterpret_cast<StringLiteral**>(clobbers.data());
+
+ SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
+
+ // The parser verifies that there is a string literal here.
+ assert(AsmString->isAscii());
+
+ // If we're compiling CUDA file and function attributes indicate that it's not
+ // for this compilation side, skip all the checks.
+ if (!DeclAttrsMatchCUDAMode(getLangOpts(), getCurFunctionDecl())) {
+ GCCAsmStmt *NS = new (Context) GCCAsmStmt(
+ Context, AsmLoc, IsSimple, IsVolatile, NumOutputs, NumInputs, Names,
+ Constraints, Exprs.data(), AsmString, NumClobbers, Clobbers, RParenLoc);
+ return NS;
+ }
+
+ for (unsigned i = 0; i != NumOutputs; i++) {
+ StringLiteral *Literal = Constraints[i];
+ assert(Literal->isAscii());
+
+ StringRef OutputName;
+ if (Names[i])
+ OutputName = Names[i]->getName();
+
+ TargetInfo::ConstraintInfo Info(Literal->getString(), OutputName);
+ if (!Context.getTargetInfo().validateOutputConstraint(Info))
+ return StmtError(Diag(Literal->getLocStart(),
+ diag::err_asm_invalid_output_constraint)
+ << Info.getConstraintStr());
+
+ ExprResult ER = CheckPlaceholderExpr(Exprs[i]);
+ if (ER.isInvalid())
+ return StmtError();
+ Exprs[i] = ER.get();
+
+ // Check that the output exprs are valid lvalues.
+ Expr *OutputExpr = Exprs[i];
+
+ // Referring to parameters is not allowed in naked functions.
+ if (CheckNakedParmReference(OutputExpr, *this))
+ return StmtError();
+
+ // Check that the output expression is compatible with memory constraint.
+ if (Info.allowsMemory() &&
+ checkExprMemoryConstraintCompat(*this, OutputExpr, Info, false))
+ return StmtError();
+
+ OutputConstraintInfos.push_back(Info);
+
+ // If this is dependent, just continue.
+ if (OutputExpr->isTypeDependent())
+ continue;
+
+ Expr::isModifiableLvalueResult IsLV =
+ OutputExpr->isModifiableLvalue(Context, /*Loc=*/nullptr);
+ switch (IsLV) {
+ case Expr::MLV_Valid:
+ // Cool, this is an lvalue.
+ break;
+ case Expr::MLV_ArrayType:
+ // This is OK too.
+ break;
+ case Expr::MLV_LValueCast: {
+ const Expr *LVal = OutputExpr->IgnoreParenNoopCasts(Context);
+ if (!getLangOpts().HeinousExtensions) {
+ Diag(LVal->getLocStart(), diag::err_invalid_asm_cast_lvalue)
+ << OutputExpr->getSourceRange();
+ } else {
+ Diag(LVal->getLocStart(), diag::warn_invalid_asm_cast_lvalue)
+ << OutputExpr->getSourceRange();
+ }
+ // Accept, even if we emitted an error diagnostic.
+ break;
+ }
+ case Expr::MLV_IncompleteType:
+ case Expr::MLV_IncompleteVoidType:
+ if (RequireCompleteType(OutputExpr->getLocStart(), Exprs[i]->getType(),
+ diag::err_dereference_incomplete_type))
+ return StmtError();
+ default:
+ return StmtError(Diag(OutputExpr->getLocStart(),
+ diag::err_asm_invalid_lvalue_in_output)
+ << OutputExpr->getSourceRange());
+ }
+
+ unsigned Size = Context.getTypeSize(OutputExpr->getType());
+ if (!Context.getTargetInfo().validateOutputSize(Literal->getString(),
+ Size))
+ return StmtError(Diag(OutputExpr->getLocStart(),
+ diag::err_asm_invalid_output_size)
+ << Info.getConstraintStr());
+ }
+
+ SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
+
+ for (unsigned i = NumOutputs, e = NumOutputs + NumInputs; i != e; i++) {
+ StringLiteral *Literal = Constraints[i];
+ assert(Literal->isAscii());
+
+ StringRef InputName;
+ if (Names[i])
+ InputName = Names[i]->getName();
+
+ TargetInfo::ConstraintInfo Info(Literal->getString(), InputName);
+ if (!Context.getTargetInfo().validateInputConstraint(OutputConstraintInfos,
+ Info)) {
+ return StmtError(Diag(Literal->getLocStart(),
+ diag::err_asm_invalid_input_constraint)
+ << Info.getConstraintStr());
+ }
+
+ ExprResult ER = CheckPlaceholderExpr(Exprs[i]);
+ if (ER.isInvalid())
+ return StmtError();
+ Exprs[i] = ER.get();
+
+ Expr *InputExpr = Exprs[i];
+
+ // Referring to parameters is not allowed in naked functions.
+ if (CheckNakedParmReference(InputExpr, *this))
+ return StmtError();
+
+ // Check that the input expression is compatible with memory constraint.
+ if (Info.allowsMemory() &&
+ checkExprMemoryConstraintCompat(*this, InputExpr, Info, true))
+ return StmtError();
+
+ // Only allow void types for memory constraints.
+ if (Info.allowsMemory() && !Info.allowsRegister()) {
+ if (CheckAsmLValue(InputExpr, *this))
+ return StmtError(Diag(InputExpr->getLocStart(),
+ diag::err_asm_invalid_lvalue_in_input)
+ << Info.getConstraintStr()
+ << InputExpr->getSourceRange());
+ } else if (Info.requiresImmediateConstant() && !Info.allowsRegister()) {
+ if (!InputExpr->isValueDependent()) {
+ llvm::APSInt Result;
+ if (!InputExpr->EvaluateAsInt(Result, Context))
+ return StmtError(
+ Diag(InputExpr->getLocStart(), diag::err_asm_immediate_expected)
+ << Info.getConstraintStr() << InputExpr->getSourceRange());
+ if (!Info.isValidAsmImmediate(Result))
+ return StmtError(Diag(InputExpr->getLocStart(),
+ diag::err_invalid_asm_value_for_constraint)
+ << Result.toString(10) << Info.getConstraintStr()
+ << InputExpr->getSourceRange());
+ }
+
+ } else {
+ ExprResult Result = DefaultFunctionArrayLvalueConversion(Exprs[i]);
+ if (Result.isInvalid())
+ return StmtError();
+
+ Exprs[i] = Result.get();
+ }
+
+ if (Info.allowsRegister()) {
+ if (InputExpr->getType()->isVoidType()) {
+ return StmtError(Diag(InputExpr->getLocStart(),
+ diag::err_asm_invalid_type_in_input)
+ << InputExpr->getType() << Info.getConstraintStr()
+ << InputExpr->getSourceRange());
+ }
+ }
+
+ InputConstraintInfos.push_back(Info);
+
+ const Type *Ty = Exprs[i]->getType().getTypePtr();
+ if (Ty->isDependentType())
+ continue;
+
+ if (!Ty->isVoidType() || !Info.allowsMemory())
+ if (RequireCompleteType(InputExpr->getLocStart(), Exprs[i]->getType(),
+ diag::err_dereference_incomplete_type))
+ return StmtError();
+
+ unsigned Size = Context.getTypeSize(Ty);
+ if (!Context.getTargetInfo().validateInputSize(Literal->getString(),
+ Size))
+ return StmtError(Diag(InputExpr->getLocStart(),
+ diag::err_asm_invalid_input_size)
+ << Info.getConstraintStr());
+ }
+
+ // Check that the clobbers are valid.
+ for (unsigned i = 0; i != NumClobbers; i++) {
+ StringLiteral *Literal = Clobbers[i];
+ assert(Literal->isAscii());
+
+ StringRef Clobber = Literal->getString();
+
+ if (!Context.getTargetInfo().isValidClobber(Clobber))
+ return StmtError(Diag(Literal->getLocStart(),
+ diag::err_asm_unknown_register_name) << Clobber);
+ }
+
+ GCCAsmStmt *NS =
+ new (Context) GCCAsmStmt(Context, AsmLoc, IsSimple, IsVolatile, NumOutputs,
+ NumInputs, Names, Constraints, Exprs.data(),
+ AsmString, NumClobbers, Clobbers, RParenLoc);
+ // Validate the asm string, ensuring it makes sense given the operands we
+ // have.
+ SmallVector<GCCAsmStmt::AsmStringPiece, 8> Pieces;
+ unsigned DiagOffs;
+ if (unsigned DiagID = NS->AnalyzeAsmString(Pieces, Context, DiagOffs)) {
+ Diag(getLocationOfStringLiteralByte(AsmString, DiagOffs), DiagID)
+ << AsmString->getSourceRange();
+ return StmtError();
+ }
+
+ // Validate constraints and modifiers.
+ for (unsigned i = 0, e = Pieces.size(); i != e; ++i) {
+ GCCAsmStmt::AsmStringPiece &Piece = Pieces[i];
+ if (!Piece.isOperand()) continue;
+
+ // Look for the correct constraint index.
+ unsigned ConstraintIdx = Piece.getOperandNo();
+ unsigned NumOperands = NS->getNumOutputs() + NS->getNumInputs();
+
+ // Look for the (ConstraintIdx - NumOperands + 1)th constraint with
+ // modifier '+'.
+ if (ConstraintIdx >= NumOperands) {
+ unsigned I = 0, E = NS->getNumOutputs();
+
+ for (unsigned Cnt = ConstraintIdx - NumOperands; I != E; ++I)
+ if (OutputConstraintInfos[I].isReadWrite() && Cnt-- == 0) {
+ ConstraintIdx = I;
+ break;
+ }
+
+ assert(I != E && "Invalid operand number should have been caught in "
+ " AnalyzeAsmString");
+ }
+
+ // Now that we have the right indexes go ahead and check.
+ StringLiteral *Literal = Constraints[ConstraintIdx];
+ const Type *Ty = Exprs[ConstraintIdx]->getType().getTypePtr();
+ if (Ty->isDependentType() || Ty->isIncompleteType())
+ continue;
+
+ unsigned Size = Context.getTypeSize(Ty);
+ std::string SuggestedModifier;
+ if (!Context.getTargetInfo().validateConstraintModifier(
+ Literal->getString(), Piece.getModifier(), Size,
+ SuggestedModifier)) {
+ Diag(Exprs[ConstraintIdx]->getLocStart(),
+ diag::warn_asm_mismatched_size_modifier);
+
+ if (!SuggestedModifier.empty()) {
+ auto B = Diag(Piece.getRange().getBegin(),
+ diag::note_asm_missing_constraint_modifier)
+ << SuggestedModifier;
+ SuggestedModifier = "%" + SuggestedModifier + Piece.getString();
+ B.AddFixItHint(FixItHint::CreateReplacement(Piece.getRange(),
+ SuggestedModifier));
+ }
+ }
+ }
+
+ // Validate tied input operands for type mismatches.
+ unsigned NumAlternatives = ~0U;
+ for (unsigned i = 0, e = OutputConstraintInfos.size(); i != e; ++i) {
+ TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
+ StringRef ConstraintStr = Info.getConstraintStr();
+ unsigned AltCount = ConstraintStr.count(',') + 1;
+ if (NumAlternatives == ~0U)
+ NumAlternatives = AltCount;
+ else if (NumAlternatives != AltCount)
+ return StmtError(Diag(NS->getOutputExpr(i)->getLocStart(),
+ diag::err_asm_unexpected_constraint_alternatives)
+ << NumAlternatives << AltCount);
+ }
+ SmallVector<size_t, 4> InputMatchedToOutput(OutputConstraintInfos.size(),
+ ~0U);
+ for (unsigned i = 0, e = InputConstraintInfos.size(); i != e; ++i) {
+ TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
+ StringRef ConstraintStr = Info.getConstraintStr();
+ unsigned AltCount = ConstraintStr.count(',') + 1;
+ if (NumAlternatives == ~0U)
+ NumAlternatives = AltCount;
+ else if (NumAlternatives != AltCount)
+ return StmtError(Diag(NS->getInputExpr(i)->getLocStart(),
+ diag::err_asm_unexpected_constraint_alternatives)
+ << NumAlternatives << AltCount);
+
+ // If this is a tied constraint, verify that the output and input have
+ // either exactly the same type, or that they are int/ptr operands with the
+ // same size (int/long, int*/long, are ok etc).
+ if (!Info.hasTiedOperand()) continue;
+
+ unsigned TiedTo = Info.getTiedOperand();
+ unsigned InputOpNo = i+NumOutputs;
+ Expr *OutputExpr = Exprs[TiedTo];
+ Expr *InputExpr = Exprs[InputOpNo];
+
+ // Make sure no more than one input constraint matches each output.
+ assert(TiedTo < InputMatchedToOutput.size() && "TiedTo value out of range");
+ if (InputMatchedToOutput[TiedTo] != ~0U) {
+ Diag(NS->getInputExpr(i)->getLocStart(),
+ diag::err_asm_input_duplicate_match)
+ << TiedTo;
+ Diag(NS->getInputExpr(InputMatchedToOutput[TiedTo])->getLocStart(),
+ diag::note_asm_input_duplicate_first)
+ << TiedTo;
+ return StmtError();
+ }
+ InputMatchedToOutput[TiedTo] = i;
+
+ if (OutputExpr->isTypeDependent() || InputExpr->isTypeDependent())
+ continue;
+
+ QualType InTy = InputExpr->getType();
+ QualType OutTy = OutputExpr->getType();
+ if (Context.hasSameType(InTy, OutTy))
+ continue; // All types can be tied to themselves.
+
+ // Decide if the input and output are in the same domain (integer/ptr or
+ // floating point.
+ enum AsmDomain {
+ AD_Int, AD_FP, AD_Other
+ } InputDomain, OutputDomain;
+
+ if (InTy->isIntegerType() || InTy->isPointerType())
+ InputDomain = AD_Int;
+ else if (InTy->isRealFloatingType())
+ InputDomain = AD_FP;
+ else
+ InputDomain = AD_Other;
+
+ if (OutTy->isIntegerType() || OutTy->isPointerType())
+ OutputDomain = AD_Int;
+ else if (OutTy->isRealFloatingType())
+ OutputDomain = AD_FP;
+ else
+ OutputDomain = AD_Other;
+
+ // They are ok if they are the same size and in the same domain. This
+ // allows tying things like:
+ // void* to int*
+ // void* to int if they are the same size.
+ // double to long double if they are the same size.
+ //
+ uint64_t OutSize = Context.getTypeSize(OutTy);
+ uint64_t InSize = Context.getTypeSize(InTy);
+ if (OutSize == InSize && InputDomain == OutputDomain &&
+ InputDomain != AD_Other)
+ continue;
+
+ // If the smaller input/output operand is not mentioned in the asm string,
+ // then we can promote the smaller one to a larger input and the asm string
+ // won't notice.
+ bool SmallerValueMentioned = false;
+
+ // If this is a reference to the input and if the input was the smaller
+ // one, then we have to reject this asm.
+ if (isOperandMentioned(InputOpNo, Pieces)) {
+ // This is a use in the asm string of the smaller operand. Since we
+ // codegen this by promoting to a wider value, the asm will get printed
+ // "wrong".
+ SmallerValueMentioned |= InSize < OutSize;
+ }
+ if (isOperandMentioned(TiedTo, Pieces)) {
+ // If this is a reference to the output, and if the output is the larger
+ // value, then it's ok because we'll promote the input to the larger type.
+ SmallerValueMentioned |= OutSize < InSize;
+ }
+
+ // If the smaller value wasn't mentioned in the asm string, and if the
+ // output was a register, just extend the shorter one to the size of the
+ // larger one.
+ if (!SmallerValueMentioned && InputDomain != AD_Other &&
+ OutputConstraintInfos[TiedTo].allowsRegister())
+ continue;
+
+ // Either both of the operands were mentioned or the smaller one was
+ // mentioned. One more special case that we'll allow: if the tied input is
+ // integer, unmentioned, and is a constant, then we'll allow truncating it
+ // down to the size of the destination.
+ if (InputDomain == AD_Int && OutputDomain == AD_Int &&
+ !isOperandMentioned(InputOpNo, Pieces) &&
+ InputExpr->isEvaluatable(Context)) {
+ CastKind castKind =
+ (OutTy->isBooleanType() ? CK_IntegralToBoolean : CK_IntegralCast);
+ InputExpr = ImpCastExprToType(InputExpr, OutTy, castKind).get();
+ Exprs[InputOpNo] = InputExpr;
+ NS->setInputExpr(i, InputExpr);
+ continue;
+ }
+
+ Diag(InputExpr->getLocStart(),
+ diag::err_asm_tying_incompatible_types)
+ << InTy << OutTy << OutputExpr->getSourceRange()
+ << InputExpr->getSourceRange();
+ return StmtError();
+ }
+
+ return NS;
+}
+
+static void fillInlineAsmTypeInfo(const ASTContext &Context, QualType T,
+ llvm::InlineAsmIdentifierInfo &Info) {
+ // Compute the type size (and array length if applicable?).
+ Info.Type = Info.Size = Context.getTypeSizeInChars(T).getQuantity();
+ if (T->isArrayType()) {
+ const ArrayType *ATy = Context.getAsArrayType(T);
+ Info.Type = Context.getTypeSizeInChars(ATy->getElementType()).getQuantity();
+ Info.Length = Info.Size / Info.Type;
+ }
+}
+
+ExprResult Sema::LookupInlineAsmIdentifier(CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ UnqualifiedId &Id,
+ llvm::InlineAsmIdentifierInfo &Info,
+ bool IsUnevaluatedContext) {
+ Info.clear();
+
+ if (IsUnevaluatedContext)
+ PushExpressionEvaluationContext(UnevaluatedAbstract,
+ ReuseLambdaContextDecl);
+
+ ExprResult Result = ActOnIdExpression(getCurScope(), SS, TemplateKWLoc, Id,
+ /*trailing lparen*/ false,
+ /*is & operand*/ false,
+ /*CorrectionCandidateCallback=*/nullptr,
+ /*IsInlineAsmIdentifier=*/ true);
+
+ if (IsUnevaluatedContext)
+ PopExpressionEvaluationContext();
+
+ if (!Result.isUsable()) return Result;
+
+ Result = CheckPlaceholderExpr(Result.get());
+ if (!Result.isUsable()) return Result;
+
+ // Referring to parameters is not allowed in naked functions.
+ if (CheckNakedParmReference(Result.get(), *this))
+ return ExprError();
+
+ QualType T = Result.get()->getType();
+
+ if (T->isDependentType()) {
+ return Result;
+ }
+
+ // Any sort of function type is fine.
+ if (T->isFunctionType()) {
+ return Result;
+ }
+
+ // Otherwise, it needs to be a complete type.
+ if (RequireCompleteExprType(Result.get(), diag::err_asm_incomplete_type)) {
+ return ExprError();
+ }
+
+ fillInlineAsmTypeInfo(Context, T, Info);
+
+ // We can work with the expression as long as it's not an r-value.
+ if (!Result.get()->isRValue())
+ Info.IsVarDecl = true;
+
+ return Result;
+}
+
+bool Sema::LookupInlineAsmField(StringRef Base, StringRef Member,
+ unsigned &Offset, SourceLocation AsmLoc) {
+ Offset = 0;
+ SmallVector<StringRef, 2> Members;
+ Member.split(Members, ".");
+
+ LookupResult BaseResult(*this, &Context.Idents.get(Base), SourceLocation(),
+ LookupOrdinaryName);
+
+ if (!LookupName(BaseResult, getCurScope()))
+ return true;
+
+ LookupResult CurrBaseResult(BaseResult);
+
+ for (StringRef NextMember : Members) {
+
+ if (!CurrBaseResult.isSingleResult())
+ return true;
+
+ const RecordType *RT = nullptr;
+ NamedDecl *FoundDecl = CurrBaseResult.getFoundDecl();
+ if (VarDecl *VD = dyn_cast<VarDecl>(FoundDecl))
+ RT = VD->getType()->getAs<RecordType>();
+ else if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(FoundDecl)) {
+ MarkAnyDeclReferenced(TD->getLocation(), TD, /*OdrUse=*/false);
+ RT = TD->getUnderlyingType()->getAs<RecordType>();
+ } else if (TypeDecl *TD = dyn_cast<TypeDecl>(FoundDecl))
+ RT = TD->getTypeForDecl()->getAs<RecordType>();
+ else if (FieldDecl *TD = dyn_cast<FieldDecl>(FoundDecl))
+ RT = TD->getType()->getAs<RecordType>();
+ if (!RT)
+ return true;
+
+ if (RequireCompleteType(AsmLoc, QualType(RT, 0),
+ diag::err_asm_incomplete_type))
+ return true;
+
+ LookupResult FieldResult(*this, &Context.Idents.get(NextMember),
+ SourceLocation(), LookupMemberName);
+
+ if (!LookupQualifiedName(FieldResult, RT->getDecl()))
+ return true;
+
+ // FIXME: Handle IndirectFieldDecl?
+ FieldDecl *FD = dyn_cast<FieldDecl>(FieldResult.getFoundDecl());
+ if (!FD)
+ return true;
+
+ CurrBaseResult = FieldResult;
+
+ const ASTRecordLayout &RL = Context.getASTRecordLayout(RT->getDecl());
+ unsigned i = FD->getFieldIndex();
+ CharUnits Result = Context.toCharUnitsFromBits(RL.getFieldOffset(i));
+ Offset += (unsigned)Result.getQuantity();
+ }
+
+ return false;
+}
+
+ExprResult
+Sema::LookupInlineAsmVarDeclField(Expr *E, StringRef Member,
+ llvm::InlineAsmIdentifierInfo &Info,
+ SourceLocation AsmLoc) {
+ Info.clear();
+
+ QualType T = E->getType();
+ if (T->isDependentType()) {
+ DeclarationNameInfo NameInfo;
+ NameInfo.setLoc(AsmLoc);
+ NameInfo.setName(&Context.Idents.get(Member));
+ return CXXDependentScopeMemberExpr::Create(
+ Context, E, T, /*IsArrow=*/false, AsmLoc, NestedNameSpecifierLoc(),
+ SourceLocation(),
+ /*FirstQualifierInScope=*/nullptr, NameInfo, /*TemplateArgs=*/nullptr);
+ }
+
+ const RecordType *RT = T->getAs<RecordType>();
+ // FIXME: Diagnose this as field access into a scalar type.
+ if (!RT)
+ return ExprResult();
+
+ LookupResult FieldResult(*this, &Context.Idents.get(Member), AsmLoc,
+ LookupMemberName);
+
+ if (!LookupQualifiedName(FieldResult, RT->getDecl()))
+ return ExprResult();
+
+ // Only normal and indirect field results will work.
+ ValueDecl *FD = dyn_cast<FieldDecl>(FieldResult.getFoundDecl());
+ if (!FD)
+ FD = dyn_cast<IndirectFieldDecl>(FieldResult.getFoundDecl());
+ if (!FD)
+ return ExprResult();
+
+ // Make an Expr to thread through OpDecl.
+ ExprResult Result = BuildMemberReferenceExpr(
+ E, E->getType(), AsmLoc, /*IsArrow=*/false, CXXScopeSpec(),
+ SourceLocation(), nullptr, FieldResult, nullptr, nullptr);
+ if (Result.isInvalid())
+ return Result;
+ Info.OpDecl = Result.get();
+
+ fillInlineAsmTypeInfo(Context, Result.get()->getType(), Info);
+
+ // Fields are "variables" as far as inline assembly is concerned.
+ Info.IsVarDecl = true;
+
+ return Result;
+}
+
+StmtResult Sema::ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
+ ArrayRef<Token> AsmToks,
+ StringRef AsmString,
+ unsigned NumOutputs, unsigned NumInputs,
+ ArrayRef<StringRef> Constraints,
+ ArrayRef<StringRef> Clobbers,
+ ArrayRef<Expr*> Exprs,
+ SourceLocation EndLoc) {
+ bool IsSimple = (NumOutputs != 0 || NumInputs != 0);
+ getCurFunction()->setHasBranchProtectedScope();
+ MSAsmStmt *NS =
+ new (Context) MSAsmStmt(Context, AsmLoc, LBraceLoc, IsSimple,
+ /*IsVolatile*/ true, AsmToks, NumOutputs, NumInputs,
+ Constraints, Exprs, AsmString,
+ Clobbers, EndLoc);
+ return NS;
+}
+
+LabelDecl *Sema::GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
+ SourceLocation Location,
+ bool AlwaysCreate) {
+ LabelDecl* Label = LookupOrCreateLabel(PP.getIdentifierInfo(ExternalLabelName),
+ Location);
+
+ if (Label->isMSAsmLabel()) {
+ // If we have previously created this label implicitly, mark it as used.
+ Label->markUsed(Context);
+ } else {
+ // Otherwise, insert it, but only resolve it if we have seen the label itself.
+ std::string InternalName;
+ llvm::raw_string_ostream OS(InternalName);
+ // Create an internal name for the label. The name should not be a valid mangled
+ // name, and should be unique. We use a dot to make the name an invalid mangled
+ // name.
+ OS << "__MSASMLABEL_." << MSAsmLabelNameCounter++ << "__";
+ for (auto it = ExternalLabelName.begin(); it != ExternalLabelName.end();
+ ++it) {
+ OS << *it;
+ if (*it == '$') {
+ // We escape '$' in asm strings by replacing it with "$$"
+ OS << '$';
+ }
+ }
+ Label->setMSAsmLabel(OS.str());
+ }
+ if (AlwaysCreate) {
+ // The label might have been created implicitly from a previously encountered
+ // goto statement. So, for both newly created and looked up labels, we mark
+ // them as resolved.
+ Label->setMSAsmLabelResolved();
+ }
+ // Adjust their location for being able to generate accurate diagnostics.
+ Label->setLocation(Location);
+
+ return Label;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaStmtAttr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaStmtAttr.cpp
new file mode 100644
index 0000000..984bd07
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaStmtAttr.cpp
@@ -0,0 +1,241 @@
+//===--- SemaStmtAttr.cpp - Statement Attribute Handling ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements stmt-related attribute processing.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Sema/DelayedDiagnostic.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/LoopHint.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "llvm/ADT/StringExtras.h"
+
+using namespace clang;
+using namespace sema;
+
+static Attr *handleFallThroughAttr(Sema &S, Stmt *St, const AttributeList &A,
+ SourceRange Range) {
+ if (!isa<NullStmt>(St)) {
+ S.Diag(A.getRange().getBegin(), diag::err_fallthrough_attr_wrong_target)
+ << St->getLocStart();
+ if (isa<SwitchCase>(St)) {
+ SourceLocation L = S.getLocForEndOfToken(Range.getEnd());
+ S.Diag(L, diag::note_fallthrough_insert_semi_fixit)
+ << FixItHint::CreateInsertion(L, ";");
+ }
+ return nullptr;
+ }
+ if (S.getCurFunction()->SwitchStack.empty()) {
+ S.Diag(A.getRange().getBegin(), diag::err_fallthrough_attr_outside_switch);
+ return nullptr;
+ }
+ return ::new (S.Context) FallThroughAttr(A.getRange(), S.Context,
+ A.getAttributeSpellingListIndex());
+}
+
+static Attr *handleLoopHintAttr(Sema &S, Stmt *St, const AttributeList &A,
+ SourceRange) {
+ IdentifierLoc *PragmaNameLoc = A.getArgAsIdent(0);
+ IdentifierLoc *OptionLoc = A.getArgAsIdent(1);
+ IdentifierLoc *StateLoc = A.getArgAsIdent(2);
+ Expr *ValueExpr = A.getArgAsExpr(3);
+
+ bool PragmaUnroll = PragmaNameLoc->Ident->getName() == "unroll";
+ bool PragmaNoUnroll = PragmaNameLoc->Ident->getName() == "nounroll";
+ if (St->getStmtClass() != Stmt::DoStmtClass &&
+ St->getStmtClass() != Stmt::ForStmtClass &&
+ St->getStmtClass() != Stmt::CXXForRangeStmtClass &&
+ St->getStmtClass() != Stmt::WhileStmtClass) {
+ const char *Pragma =
+ llvm::StringSwitch<const char *>(PragmaNameLoc->Ident->getName())
+ .Case("unroll", "#pragma unroll")
+ .Case("nounroll", "#pragma nounroll")
+ .Default("#pragma clang loop");
+ S.Diag(St->getLocStart(), diag::err_pragma_loop_precedes_nonloop) << Pragma;
+ return nullptr;
+ }
+
+ LoopHintAttr::Spelling Spelling;
+ LoopHintAttr::OptionType Option;
+ LoopHintAttr::LoopHintState State;
+ if (PragmaNoUnroll) {
+ // #pragma nounroll
+ Spelling = LoopHintAttr::Pragma_nounroll;
+ Option = LoopHintAttr::Unroll;
+ State = LoopHintAttr::Disable;
+ } else if (PragmaUnroll) {
+ Spelling = LoopHintAttr::Pragma_unroll;
+ if (ValueExpr) {
+ // #pragma unroll N
+ Option = LoopHintAttr::UnrollCount;
+ State = LoopHintAttr::Numeric;
+ } else {
+ // #pragma unroll
+ Option = LoopHintAttr::Unroll;
+ State = LoopHintAttr::Enable;
+ }
+ } else {
+ // #pragma clang loop ...
+ Spelling = LoopHintAttr::Pragma_clang_loop;
+ assert(OptionLoc && OptionLoc->Ident &&
+ "Attribute must have valid option info.");
+ Option = llvm::StringSwitch<LoopHintAttr::OptionType>(
+ OptionLoc->Ident->getName())
+ .Case("vectorize", LoopHintAttr::Vectorize)
+ .Case("vectorize_width", LoopHintAttr::VectorizeWidth)
+ .Case("interleave", LoopHintAttr::Interleave)
+ .Case("interleave_count", LoopHintAttr::InterleaveCount)
+ .Case("unroll", LoopHintAttr::Unroll)
+ .Case("unroll_count", LoopHintAttr::UnrollCount)
+ .Default(LoopHintAttr::Vectorize);
+ if (Option == LoopHintAttr::VectorizeWidth ||
+ Option == LoopHintAttr::InterleaveCount ||
+ Option == LoopHintAttr::UnrollCount) {
+ assert(ValueExpr && "Attribute must have a valid value expression.");
+ if (S.CheckLoopHintExpr(ValueExpr, St->getLocStart()))
+ return nullptr;
+ State = LoopHintAttr::Numeric;
+ } else if (Option == LoopHintAttr::Vectorize ||
+ Option == LoopHintAttr::Interleave ||
+ Option == LoopHintAttr::Unroll) {
+ assert(StateLoc && StateLoc->Ident && "Loop hint must have an argument");
+ if (StateLoc->Ident->isStr("disable"))
+ State = LoopHintAttr::Disable;
+ else if (StateLoc->Ident->isStr("assume_safety"))
+ State = LoopHintAttr::AssumeSafety;
+ else if (StateLoc->Ident->isStr("full"))
+ State = LoopHintAttr::Full;
+ else if (StateLoc->Ident->isStr("enable"))
+ State = LoopHintAttr::Enable;
+ else
+ llvm_unreachable("bad loop hint argument");
+ } else
+ llvm_unreachable("bad loop hint");
+ }
+
+ return LoopHintAttr::CreateImplicit(S.Context, Spelling, Option, State,
+ ValueExpr, A.getRange());
+}
+
+static void
+CheckForIncompatibleAttributes(Sema &S,
+ const SmallVectorImpl<const Attr *> &Attrs) {
+ // There are 3 categories of loop hints attributes: vectorize, interleave,
+ // and unroll. Each comes in two variants: a state form and a numeric form.
+ // The state form selectively defaults/enables/disables the transformation
+ // for the loop (for unroll, default indicates full unrolling rather than
+ // enabling the transformation). The numeric form form provides an integer
+ // hint (for example, unroll count) to the transformer. The following array
+ // accumulates the hints encountered while iterating through the attributes
+ // to check for compatibility.
+ struct {
+ const LoopHintAttr *StateAttr;
+ const LoopHintAttr *NumericAttr;
+ } HintAttrs[] = {{nullptr, nullptr}, {nullptr, nullptr}, {nullptr, nullptr}};
+
+ for (const auto *I : Attrs) {
+ const LoopHintAttr *LH = dyn_cast<LoopHintAttr>(I);
+
+ // Skip non loop hint attributes
+ if (!LH)
+ continue;
+
+ LoopHintAttr::OptionType Option = LH->getOption();
+ enum { Vectorize, Interleave, Unroll } Category;
+ switch (Option) {
+ case LoopHintAttr::Vectorize:
+ case LoopHintAttr::VectorizeWidth:
+ Category = Vectorize;
+ break;
+ case LoopHintAttr::Interleave:
+ case LoopHintAttr::InterleaveCount:
+ Category = Interleave;
+ break;
+ case LoopHintAttr::Unroll:
+ case LoopHintAttr::UnrollCount:
+ Category = Unroll;
+ break;
+ };
+
+ auto &CategoryState = HintAttrs[Category];
+ const LoopHintAttr *PrevAttr;
+ if (Option == LoopHintAttr::Vectorize ||
+ Option == LoopHintAttr::Interleave || Option == LoopHintAttr::Unroll) {
+ // Enable|Disable|AssumeSafety hint. For example, vectorize(enable).
+ PrevAttr = CategoryState.StateAttr;
+ CategoryState.StateAttr = LH;
+ } else {
+ // Numeric hint. For example, vectorize_width(8).
+ PrevAttr = CategoryState.NumericAttr;
+ CategoryState.NumericAttr = LH;
+ }
+
+ PrintingPolicy Policy(S.Context.getLangOpts());
+ SourceLocation OptionLoc = LH->getRange().getBegin();
+ if (PrevAttr)
+ // Cannot specify same type of attribute twice.
+ S.Diag(OptionLoc, diag::err_pragma_loop_compatibility)
+ << /*Duplicate=*/true << PrevAttr->getDiagnosticName(Policy)
+ << LH->getDiagnosticName(Policy);
+
+ if (CategoryState.StateAttr && CategoryState.NumericAttr &&
+ (Category == Unroll ||
+ CategoryState.StateAttr->getState() == LoopHintAttr::Disable)) {
+ // Disable hints are not compatible with numeric hints of the same
+ // category. As a special case, numeric unroll hints are also not
+ // compatible with enable or full form of the unroll pragma because these
+ // directives indicate full unrolling.
+ S.Diag(OptionLoc, diag::err_pragma_loop_compatibility)
+ << /*Duplicate=*/false
+ << CategoryState.StateAttr->getDiagnosticName(Policy)
+ << CategoryState.NumericAttr->getDiagnosticName(Policy);
+ }
+ }
+}
+
+static Attr *ProcessStmtAttribute(Sema &S, Stmt *St, const AttributeList &A,
+ SourceRange Range) {
+ switch (A.getKind()) {
+ case AttributeList::UnknownAttribute:
+ S.Diag(A.getLoc(), A.isDeclspecAttribute() ?
+ diag::warn_unhandled_ms_attribute_ignored :
+ diag::warn_unknown_attribute_ignored) << A.getName();
+ return nullptr;
+ case AttributeList::AT_FallThrough:
+ return handleFallThroughAttr(S, St, A, Range);
+ case AttributeList::AT_LoopHint:
+ return handleLoopHintAttr(S, St, A, Range);
+ default:
+ // if we're here, then we parsed a known attribute, but didn't recognize
+ // it as a statement attribute => it is declaration attribute
+ S.Diag(A.getRange().getBegin(), diag::err_attribute_invalid_on_stmt)
+ << A.getName() << St->getLocStart();
+ return nullptr;
+ }
+}
+
+StmtResult Sema::ProcessStmtAttributes(Stmt *S, AttributeList *AttrList,
+ SourceRange Range) {
+ SmallVector<const Attr*, 8> Attrs;
+ for (const AttributeList* l = AttrList; l; l = l->getNext()) {
+ if (Attr *a = ProcessStmtAttribute(*this, S, *l, Range))
+ Attrs.push_back(a);
+ }
+
+ CheckForIncompatibleAttributes(*this, Attrs);
+
+ if (Attrs.empty())
+ return S;
+
+ return ActOnAttributedStmt(Range.getBegin(), Attrs, S);
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp
new file mode 100644
index 0000000..6cc8588
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp
@@ -0,0 +1,8466 @@
+//===------- SemaTemplate.cpp - Semantic Analysis for C++ Templates -------===/
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===/
+//
+// This file implements semantic analysis for C++ templates.
+//===----------------------------------------------------------------------===/
+
+#include "TreeTransform.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclFriend.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/TypeVisitor.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Template.h"
+#include "clang/Sema/TemplateDeduction.h"
+#include "llvm/ADT/SmallBitVector.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+using namespace clang;
+using namespace sema;
+
+// Exported for use by Parser.
+SourceRange
+clang::getTemplateParamsRange(TemplateParameterList const * const *Ps,
+ unsigned N) {
+ if (!N) return SourceRange();
+ return SourceRange(Ps[0]->getTemplateLoc(), Ps[N-1]->getRAngleLoc());
+}
+
+/// \brief Determine whether the declaration found is acceptable as the name
+/// of a template and, if so, return that template declaration. Otherwise,
+/// returns NULL.
+static NamedDecl *isAcceptableTemplateName(ASTContext &Context,
+ NamedDecl *Orig,
+ bool AllowFunctionTemplates) {
+ NamedDecl *D = Orig->getUnderlyingDecl();
+
+ if (isa<TemplateDecl>(D)) {
+ if (!AllowFunctionTemplates && isa<FunctionTemplateDecl>(D))
+ return nullptr;
+
+ return Orig;
+ }
+
+ if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(D)) {
+ // C++ [temp.local]p1:
+ // Like normal (non-template) classes, class templates have an
+ // injected-class-name (Clause 9). The injected-class-name
+ // can be used with or without a template-argument-list. When
+ // it is used without a template-argument-list, it is
+ // equivalent to the injected-class-name followed by the
+ // template-parameters of the class template enclosed in
+ // <>. When it is used with a template-argument-list, it
+ // refers to the specified class template specialization,
+ // which could be the current specialization or another
+ // specialization.
+ if (Record->isInjectedClassName()) {
+ Record = cast<CXXRecordDecl>(Record->getDeclContext());
+ if (Record->getDescribedClassTemplate())
+ return Record->getDescribedClassTemplate();
+
+ if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(Record))
+ return Spec->getSpecializedTemplate();
+ }
+
+ return nullptr;
+ }
+
+ return nullptr;
+}
+
+void Sema::FilterAcceptableTemplateNames(LookupResult &R,
+ bool AllowFunctionTemplates) {
+ // The set of class templates we've already seen.
+ llvm::SmallPtrSet<ClassTemplateDecl *, 8> ClassTemplates;
+ LookupResult::Filter filter = R.makeFilter();
+ while (filter.hasNext()) {
+ NamedDecl *Orig = filter.next();
+ NamedDecl *Repl = isAcceptableTemplateName(Context, Orig,
+ AllowFunctionTemplates);
+ if (!Repl)
+ filter.erase();
+ else if (Repl != Orig) {
+
+ // C++ [temp.local]p3:
+ // A lookup that finds an injected-class-name (10.2) can result in an
+ // ambiguity in certain cases (for example, if it is found in more than
+ // one base class). If all of the injected-class-names that are found
+ // refer to specializations of the same class template, and if the name
+ // is used as a template-name, the reference refers to the class
+ // template itself and not a specialization thereof, and is not
+ // ambiguous.
+ if (ClassTemplateDecl *ClassTmpl = dyn_cast<ClassTemplateDecl>(Repl))
+ if (!ClassTemplates.insert(ClassTmpl).second) {
+ filter.erase();
+ continue;
+ }
+
+ // FIXME: we promote access to public here as a workaround to
+ // the fact that LookupResult doesn't let us remember that we
+ // found this template through a particular injected class name,
+ // which means we end up doing nasty things to the invariants.
+ // Pretending that access is public is *much* safer.
+ filter.replace(Repl, AS_public);
+ }
+ }
+ filter.done();
+}
+
+bool Sema::hasAnyAcceptableTemplateNames(LookupResult &R,
+ bool AllowFunctionTemplates) {
+ for (LookupResult::iterator I = R.begin(), IEnd = R.end(); I != IEnd; ++I)
+ if (isAcceptableTemplateName(Context, *I, AllowFunctionTemplates))
+ return true;
+
+ return false;
+}
+
+TemplateNameKind Sema::isTemplateName(Scope *S,
+ CXXScopeSpec &SS,
+ bool hasTemplateKeyword,
+ UnqualifiedId &Name,
+ ParsedType ObjectTypePtr,
+ bool EnteringContext,
+ TemplateTy &TemplateResult,
+ bool &MemberOfUnknownSpecialization) {
+ assert(getLangOpts().CPlusPlus && "No template names in C!");
+
+ DeclarationName TName;
+ MemberOfUnknownSpecialization = false;
+
+ switch (Name.getKind()) {
+ case UnqualifiedId::IK_Identifier:
+ TName = DeclarationName(Name.Identifier);
+ break;
+
+ case UnqualifiedId::IK_OperatorFunctionId:
+ TName = Context.DeclarationNames.getCXXOperatorName(
+ Name.OperatorFunctionId.Operator);
+ break;
+
+ case UnqualifiedId::IK_LiteralOperatorId:
+ TName = Context.DeclarationNames.getCXXLiteralOperatorName(Name.Identifier);
+ break;
+
+ default:
+ return TNK_Non_template;
+ }
+
+ QualType ObjectType = ObjectTypePtr.get();
+
+ LookupResult R(*this, TName, Name.getLocStart(), LookupOrdinaryName);
+ LookupTemplateName(R, S, SS, ObjectType, EnteringContext,
+ MemberOfUnknownSpecialization);
+ if (R.empty()) return TNK_Non_template;
+ if (R.isAmbiguous()) {
+ // Suppress diagnostics; we'll redo this lookup later.
+ R.suppressDiagnostics();
+
+ // FIXME: we might have ambiguous templates, in which case we
+ // should at least parse them properly!
+ return TNK_Non_template;
+ }
+
+ TemplateName Template;
+ TemplateNameKind TemplateKind;
+
+ unsigned ResultCount = R.end() - R.begin();
+ if (ResultCount > 1) {
+ // We assume that we'll preserve the qualifier from a function
+ // template name in other ways.
+ Template = Context.getOverloadedTemplateName(R.begin(), R.end());
+ TemplateKind = TNK_Function_template;
+
+ // We'll do this lookup again later.
+ R.suppressDiagnostics();
+ } else {
+ TemplateDecl *TD = cast<TemplateDecl>((*R.begin())->getUnderlyingDecl());
+
+ if (SS.isSet() && !SS.isInvalid()) {
+ NestedNameSpecifier *Qualifier = SS.getScopeRep();
+ Template = Context.getQualifiedTemplateName(Qualifier,
+ hasTemplateKeyword, TD);
+ } else {
+ Template = TemplateName(TD);
+ }
+
+ if (isa<FunctionTemplateDecl>(TD)) {
+ TemplateKind = TNK_Function_template;
+
+ // We'll do this lookup again later.
+ R.suppressDiagnostics();
+ } else {
+ assert(isa<ClassTemplateDecl>(TD) || isa<TemplateTemplateParmDecl>(TD) ||
+ isa<TypeAliasTemplateDecl>(TD) || isa<VarTemplateDecl>(TD) ||
+ isa<BuiltinTemplateDecl>(TD));
+ TemplateKind =
+ isa<VarTemplateDecl>(TD) ? TNK_Var_template : TNK_Type_template;
+ }
+ }
+
+ TemplateResult = TemplateTy::make(Template);
+ return TemplateKind;
+}
+
+bool Sema::DiagnoseUnknownTemplateName(const IdentifierInfo &II,
+ SourceLocation IILoc,
+ Scope *S,
+ const CXXScopeSpec *SS,
+ TemplateTy &SuggestedTemplate,
+ TemplateNameKind &SuggestedKind) {
+ // We can't recover unless there's a dependent scope specifier preceding the
+ // template name.
+ // FIXME: Typo correction?
+ if (!SS || !SS->isSet() || !isDependentScopeSpecifier(*SS) ||
+ computeDeclContext(*SS))
+ return false;
+
+ // The code is missing a 'template' keyword prior to the dependent template
+ // name.
+ NestedNameSpecifier *Qualifier = (NestedNameSpecifier*)SS->getScopeRep();
+ Diag(IILoc, diag::err_template_kw_missing)
+ << Qualifier << II.getName()
+ << FixItHint::CreateInsertion(IILoc, "template ");
+ SuggestedTemplate
+ = TemplateTy::make(Context.getDependentTemplateName(Qualifier, &II));
+ SuggestedKind = TNK_Dependent_template_name;
+ return true;
+}
+
+void Sema::LookupTemplateName(LookupResult &Found,
+ Scope *S, CXXScopeSpec &SS,
+ QualType ObjectType,
+ bool EnteringContext,
+ bool &MemberOfUnknownSpecialization) {
+ // Determine where to perform name lookup
+ MemberOfUnknownSpecialization = false;
+ DeclContext *LookupCtx = nullptr;
+ bool isDependent = false;
+ if (!ObjectType.isNull()) {
+ // This nested-name-specifier occurs in a member access expression, e.g.,
+ // x->B::f, and we are looking into the type of the object.
+ assert(!SS.isSet() && "ObjectType and scope specifier cannot coexist");
+ LookupCtx = computeDeclContext(ObjectType);
+ isDependent = ObjectType->isDependentType();
+ assert((isDependent || !ObjectType->isIncompleteType() ||
+ ObjectType->castAs<TagType>()->isBeingDefined()) &&
+ "Caller should have completed object type");
+
+ // Template names cannot appear inside an Objective-C class or object type.
+ if (ObjectType->isObjCObjectOrInterfaceType()) {
+ Found.clear();
+ return;
+ }
+ } else if (SS.isSet()) {
+ // This nested-name-specifier occurs after another nested-name-specifier,
+ // so long into the context associated with the prior nested-name-specifier.
+ LookupCtx = computeDeclContext(SS, EnteringContext);
+ isDependent = isDependentScopeSpecifier(SS);
+
+ // The declaration context must be complete.
+ if (LookupCtx && RequireCompleteDeclContext(SS, LookupCtx))
+ return;
+ }
+
+ bool ObjectTypeSearchedInScope = false;
+ bool AllowFunctionTemplatesInLookup = true;
+ if (LookupCtx) {
+ // Perform "qualified" name lookup into the declaration context we
+ // computed, which is either the type of the base of a member access
+ // expression or the declaration context associated with a prior
+ // nested-name-specifier.
+ LookupQualifiedName(Found, LookupCtx);
+ if (!ObjectType.isNull() && Found.empty()) {
+ // C++ [basic.lookup.classref]p1:
+ // In a class member access expression (5.2.5), if the . or -> token is
+ // immediately followed by an identifier followed by a <, the
+ // identifier must be looked up to determine whether the < is the
+ // beginning of a template argument list (14.2) or a less-than operator.
+ // The identifier is first looked up in the class of the object
+ // expression. If the identifier is not found, it is then looked up in
+ // the context of the entire postfix-expression and shall name a class
+ // or function template.
+ if (S) LookupName(Found, S);
+ ObjectTypeSearchedInScope = true;
+ AllowFunctionTemplatesInLookup = false;
+ }
+ } else if (isDependent && (!S || ObjectType.isNull())) {
+ // We cannot look into a dependent object type or nested nme
+ // specifier.
+ MemberOfUnknownSpecialization = true;
+ return;
+ } else {
+ // Perform unqualified name lookup in the current scope.
+ LookupName(Found, S);
+
+ if (!ObjectType.isNull())
+ AllowFunctionTemplatesInLookup = false;
+ }
+
+ if (Found.empty() && !isDependent) {
+ // If we did not find any names, attempt to correct any typos.
+ DeclarationName Name = Found.getLookupName();
+ Found.clear();
+ // Simple filter callback that, for keywords, only accepts the C++ *_cast
+ auto FilterCCC = llvm::make_unique<CorrectionCandidateCallback>();
+ FilterCCC->WantTypeSpecifiers = false;
+ FilterCCC->WantExpressionKeywords = false;
+ FilterCCC->WantRemainingKeywords = false;
+ FilterCCC->WantCXXNamedCasts = true;
+ if (TypoCorrection Corrected = CorrectTypo(
+ Found.getLookupNameInfo(), Found.getLookupKind(), S, &SS,
+ std::move(FilterCCC), CTK_ErrorRecovery, LookupCtx)) {
+ Found.setLookupName(Corrected.getCorrection());
+ if (auto *ND = Corrected.getFoundDecl())
+ Found.addDecl(ND);
+ FilterAcceptableTemplateNames(Found);
+ if (!Found.empty()) {
+ if (LookupCtx) {
+ std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
+ bool DroppedSpecifier = Corrected.WillReplaceSpecifier() &&
+ Name.getAsString() == CorrectedStr;
+ diagnoseTypo(Corrected, PDiag(diag::err_no_member_template_suggest)
+ << Name << LookupCtx << DroppedSpecifier
+ << SS.getRange());
+ } else {
+ diagnoseTypo(Corrected, PDiag(diag::err_no_template_suggest) << Name);
+ }
+ }
+ } else {
+ Found.setLookupName(Name);
+ }
+ }
+
+ FilterAcceptableTemplateNames(Found, AllowFunctionTemplatesInLookup);
+ if (Found.empty()) {
+ if (isDependent)
+ MemberOfUnknownSpecialization = true;
+ return;
+ }
+
+ if (S && !ObjectType.isNull() && !ObjectTypeSearchedInScope &&
+ !getLangOpts().CPlusPlus11) {
+ // C++03 [basic.lookup.classref]p1:
+ // [...] If the lookup in the class of the object expression finds a
+ // template, the name is also looked up in the context of the entire
+ // postfix-expression and [...]
+ //
+ // Note: C++11 does not perform this second lookup.
+ LookupResult FoundOuter(*this, Found.getLookupName(), Found.getNameLoc(),
+ LookupOrdinaryName);
+ LookupName(FoundOuter, S);
+ FilterAcceptableTemplateNames(FoundOuter, /*AllowFunctionTemplates=*/false);
+
+ if (FoundOuter.empty()) {
+ // - if the name is not found, the name found in the class of the
+ // object expression is used, otherwise
+ } else if (!FoundOuter.getAsSingle<ClassTemplateDecl>() ||
+ FoundOuter.isAmbiguous()) {
+ // - if the name is found in the context of the entire
+ // postfix-expression and does not name a class template, the name
+ // found in the class of the object expression is used, otherwise
+ FoundOuter.clear();
+ } else if (!Found.isSuppressingDiagnostics()) {
+ // - if the name found is a class template, it must refer to the same
+ // entity as the one found in the class of the object expression,
+ // otherwise the program is ill-formed.
+ if (!Found.isSingleResult() ||
+ Found.getFoundDecl()->getCanonicalDecl()
+ != FoundOuter.getFoundDecl()->getCanonicalDecl()) {
+ Diag(Found.getNameLoc(),
+ diag::ext_nested_name_member_ref_lookup_ambiguous)
+ << Found.getLookupName()
+ << ObjectType;
+ Diag(Found.getRepresentativeDecl()->getLocation(),
+ diag::note_ambig_member_ref_object_type)
+ << ObjectType;
+ Diag(FoundOuter.getFoundDecl()->getLocation(),
+ diag::note_ambig_member_ref_scope);
+
+ // Recover by taking the template that we found in the object
+ // expression's type.
+ }
+ }
+ }
+}
+
+/// ActOnDependentIdExpression - Handle a dependent id-expression that
+/// was just parsed. This is only possible with an explicit scope
+/// specifier naming a dependent type.
+ExprResult
+Sema::ActOnDependentIdExpression(const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ bool isAddressOfOperand,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ DeclContext *DC = getFunctionLevelDeclContext();
+
+ if (!isAddressOfOperand &&
+ isa<CXXMethodDecl>(DC) &&
+ cast<CXXMethodDecl>(DC)->isInstance()) {
+ QualType ThisType = cast<CXXMethodDecl>(DC)->getThisType(Context);
+
+ // Since the 'this' expression is synthesized, we don't need to
+ // perform the double-lookup check.
+ NamedDecl *FirstQualifierInScope = nullptr;
+
+ return CXXDependentScopeMemberExpr::Create(
+ Context, /*This*/ nullptr, ThisType, /*IsArrow*/ true,
+ /*Op*/ SourceLocation(), SS.getWithLocInContext(Context), TemplateKWLoc,
+ FirstQualifierInScope, NameInfo, TemplateArgs);
+ }
+
+ return BuildDependentDeclRefExpr(SS, TemplateKWLoc, NameInfo, TemplateArgs);
+}
+
+ExprResult
+Sema::BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ return DependentScopeDeclRefExpr::Create(
+ Context, SS.getWithLocInContext(Context), TemplateKWLoc, NameInfo,
+ TemplateArgs);
+}
+
+/// DiagnoseTemplateParameterShadow - Produce a diagnostic complaining
+/// that the template parameter 'PrevDecl' is being shadowed by a new
+/// declaration at location Loc. Returns true to indicate that this is
+/// an error, and false otherwise.
+void Sema::DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl) {
+ assert(PrevDecl->isTemplateParameter() && "Not a template parameter");
+
+ // Microsoft Visual C++ permits template parameters to be shadowed.
+ if (getLangOpts().MicrosoftExt)
+ return;
+
+ // C++ [temp.local]p4:
+ // A template-parameter shall not be redeclared within its
+ // scope (including nested scopes).
+ Diag(Loc, diag::err_template_param_shadow)
+ << cast<NamedDecl>(PrevDecl)->getDeclName();
+ Diag(PrevDecl->getLocation(), diag::note_template_param_here);
+ return;
+}
+
+/// AdjustDeclIfTemplate - If the given decl happens to be a template, reset
+/// the parameter D to reference the templated declaration and return a pointer
+/// to the template declaration. Otherwise, do nothing to D and return null.
+TemplateDecl *Sema::AdjustDeclIfTemplate(Decl *&D) {
+ if (TemplateDecl *Temp = dyn_cast_or_null<TemplateDecl>(D)) {
+ D = Temp->getTemplatedDecl();
+ return Temp;
+ }
+ return nullptr;
+}
+
+ParsedTemplateArgument ParsedTemplateArgument::getTemplatePackExpansion(
+ SourceLocation EllipsisLoc) const {
+ assert(Kind == Template &&
+ "Only template template arguments can be pack expansions here");
+ assert(getAsTemplate().get().containsUnexpandedParameterPack() &&
+ "Template template argument pack expansion without packs");
+ ParsedTemplateArgument Result(*this);
+ Result.EllipsisLoc = EllipsisLoc;
+ return Result;
+}
+
+static TemplateArgumentLoc translateTemplateArgument(Sema &SemaRef,
+ const ParsedTemplateArgument &Arg) {
+
+ switch (Arg.getKind()) {
+ case ParsedTemplateArgument::Type: {
+ TypeSourceInfo *DI;
+ QualType T = SemaRef.GetTypeFromParser(Arg.getAsType(), &DI);
+ if (!DI)
+ DI = SemaRef.Context.getTrivialTypeSourceInfo(T, Arg.getLocation());
+ return TemplateArgumentLoc(TemplateArgument(T), DI);
+ }
+
+ case ParsedTemplateArgument::NonType: {
+ Expr *E = static_cast<Expr *>(Arg.getAsExpr());
+ return TemplateArgumentLoc(TemplateArgument(E), E);
+ }
+
+ case ParsedTemplateArgument::Template: {
+ TemplateName Template = Arg.getAsTemplate().get();
+ TemplateArgument TArg;
+ if (Arg.getEllipsisLoc().isValid())
+ TArg = TemplateArgument(Template, Optional<unsigned int>());
+ else
+ TArg = Template;
+ return TemplateArgumentLoc(TArg,
+ Arg.getScopeSpec().getWithLocInContext(
+ SemaRef.Context),
+ Arg.getLocation(),
+ Arg.getEllipsisLoc());
+ }
+ }
+
+ llvm_unreachable("Unhandled parsed template argument");
+}
+
+/// \brief Translates template arguments as provided by the parser
+/// into template arguments used by semantic analysis.
+void Sema::translateTemplateArguments(const ASTTemplateArgsPtr &TemplateArgsIn,
+ TemplateArgumentListInfo &TemplateArgs) {
+ for (unsigned I = 0, Last = TemplateArgsIn.size(); I != Last; ++I)
+ TemplateArgs.addArgument(translateTemplateArgument(*this,
+ TemplateArgsIn[I]));
+}
+
+static void maybeDiagnoseTemplateParameterShadow(Sema &SemaRef, Scope *S,
+ SourceLocation Loc,
+ IdentifierInfo *Name) {
+ NamedDecl *PrevDecl = SemaRef.LookupSingleName(
+ S, Name, Loc, Sema::LookupOrdinaryName, Sema::ForRedeclaration);
+ if (PrevDecl && PrevDecl->isTemplateParameter())
+ SemaRef.DiagnoseTemplateParameterShadow(Loc, PrevDecl);
+}
+
+/// ActOnTypeParameter - Called when a C++ template type parameter
+/// (e.g., "typename T") has been parsed. Typename specifies whether
+/// the keyword "typename" was used to declare the type parameter
+/// (otherwise, "class" was used), and KeyLoc is the location of the
+/// "class" or "typename" keyword. ParamName is the name of the
+/// parameter (NULL indicates an unnamed template parameter) and
+/// ParamNameLoc is the location of the parameter name (if any).
+/// If the type parameter has a default argument, it will be added
+/// later via ActOnTypeParameterDefault.
+Decl *Sema::ActOnTypeParameter(Scope *S, bool Typename,
+ SourceLocation EllipsisLoc,
+ SourceLocation KeyLoc,
+ IdentifierInfo *ParamName,
+ SourceLocation ParamNameLoc,
+ unsigned Depth, unsigned Position,
+ SourceLocation EqualLoc,
+ ParsedType DefaultArg) {
+ assert(S->isTemplateParamScope() &&
+ "Template type parameter not in template parameter scope!");
+ bool Invalid = false;
+
+ SourceLocation Loc = ParamNameLoc;
+ if (!ParamName)
+ Loc = KeyLoc;
+
+ bool IsParameterPack = EllipsisLoc.isValid();
+ TemplateTypeParmDecl *Param
+ = TemplateTypeParmDecl::Create(Context, Context.getTranslationUnitDecl(),
+ KeyLoc, Loc, Depth, Position, ParamName,
+ Typename, IsParameterPack);
+ Param->setAccess(AS_public);
+ if (Invalid)
+ Param->setInvalidDecl();
+
+ if (ParamName) {
+ maybeDiagnoseTemplateParameterShadow(*this, S, ParamNameLoc, ParamName);
+
+ // Add the template parameter into the current scope.
+ S->AddDecl(Param);
+ IdResolver.AddDecl(Param);
+ }
+
+ // C++0x [temp.param]p9:
+ // A default template-argument may be specified for any kind of
+ // template-parameter that is not a template parameter pack.
+ if (DefaultArg && IsParameterPack) {
+ Diag(EqualLoc, diag::err_template_param_pack_default_arg);
+ DefaultArg = ParsedType();
+ }
+
+ // Handle the default argument, if provided.
+ if (DefaultArg) {
+ TypeSourceInfo *DefaultTInfo;
+ GetTypeFromParser(DefaultArg, &DefaultTInfo);
+
+ assert(DefaultTInfo && "expected source information for type");
+
+ // Check for unexpanded parameter packs.
+ if (DiagnoseUnexpandedParameterPack(Loc, DefaultTInfo,
+ UPPC_DefaultArgument))
+ return Param;
+
+ // Check the template argument itself.
+ if (CheckTemplateArgument(Param, DefaultTInfo)) {
+ Param->setInvalidDecl();
+ return Param;
+ }
+
+ Param->setDefaultArgument(DefaultTInfo);
+ }
+
+ return Param;
+}
+
+/// \brief Check that the type of a non-type template parameter is
+/// well-formed.
+///
+/// \returns the (possibly-promoted) parameter type if valid;
+/// otherwise, produces a diagnostic and returns a NULL type.
+QualType
+Sema::CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc) {
+ // We don't allow variably-modified types as the type of non-type template
+ // parameters.
+ if (T->isVariablyModifiedType()) {
+ Diag(Loc, diag::err_variably_modified_nontype_template_param)
+ << T;
+ return QualType();
+ }
+
+ // C++ [temp.param]p4:
+ //
+ // A non-type template-parameter shall have one of the following
+ // (optionally cv-qualified) types:
+ //
+ // -- integral or enumeration type,
+ if (T->isIntegralOrEnumerationType() ||
+ // -- pointer to object or pointer to function,
+ T->isPointerType() ||
+ // -- reference to object or reference to function,
+ T->isReferenceType() ||
+ // -- pointer to member,
+ T->isMemberPointerType() ||
+ // -- std::nullptr_t.
+ T->isNullPtrType() ||
+ // If T is a dependent type, we can't do the check now, so we
+ // assume that it is well-formed.
+ T->isDependentType()) {
+ // C++ [temp.param]p5: The top-level cv-qualifiers on the template-parameter
+ // are ignored when determining its type.
+ return T.getUnqualifiedType();
+ }
+
+ // C++ [temp.param]p8:
+ //
+ // A non-type template-parameter of type "array of T" or
+ // "function returning T" is adjusted to be of type "pointer to
+ // T" or "pointer to function returning T", respectively.
+ else if (T->isArrayType() || T->isFunctionType())
+ return Context.getDecayedType(T);
+
+ Diag(Loc, diag::err_template_nontype_parm_bad_type)
+ << T;
+
+ return QualType();
+}
+
+Decl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
+ unsigned Depth,
+ unsigned Position,
+ SourceLocation EqualLoc,
+ Expr *Default) {
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ QualType T = TInfo->getType();
+
+ assert(S->isTemplateParamScope() &&
+ "Non-type template parameter not in template parameter scope!");
+ bool Invalid = false;
+
+ T = CheckNonTypeTemplateParameterType(T, D.getIdentifierLoc());
+ if (T.isNull()) {
+ T = Context.IntTy; // Recover with an 'int' type.
+ Invalid = true;
+ }
+
+ IdentifierInfo *ParamName = D.getIdentifier();
+ bool IsParameterPack = D.hasEllipsis();
+ NonTypeTemplateParmDecl *Param
+ = NonTypeTemplateParmDecl::Create(Context, Context.getTranslationUnitDecl(),
+ D.getLocStart(),
+ D.getIdentifierLoc(),
+ Depth, Position, ParamName, T,
+ IsParameterPack, TInfo);
+ Param->setAccess(AS_public);
+
+ if (Invalid)
+ Param->setInvalidDecl();
+
+ if (ParamName) {
+ maybeDiagnoseTemplateParameterShadow(*this, S, D.getIdentifierLoc(),
+ ParamName);
+
+ // Add the template parameter into the current scope.
+ S->AddDecl(Param);
+ IdResolver.AddDecl(Param);
+ }
+
+ // C++0x [temp.param]p9:
+ // A default template-argument may be specified for any kind of
+ // template-parameter that is not a template parameter pack.
+ if (Default && IsParameterPack) {
+ Diag(EqualLoc, diag::err_template_param_pack_default_arg);
+ Default = nullptr;
+ }
+
+ // Check the well-formedness of the default template argument, if provided.
+ if (Default) {
+ // Check for unexpanded parameter packs.
+ if (DiagnoseUnexpandedParameterPack(Default, UPPC_DefaultArgument))
+ return Param;
+
+ TemplateArgument Converted;
+ ExprResult DefaultRes =
+ CheckTemplateArgument(Param, Param->getType(), Default, Converted);
+ if (DefaultRes.isInvalid()) {
+ Param->setInvalidDecl();
+ return Param;
+ }
+ Default = DefaultRes.get();
+
+ Param->setDefaultArgument(Default);
+ }
+
+ return Param;
+}
+
+/// ActOnTemplateTemplateParameter - Called when a C++ template template
+/// parameter (e.g. T in template <template \<typename> class T> class array)
+/// has been parsed. S is the current scope.
+Decl *Sema::ActOnTemplateTemplateParameter(Scope* S,
+ SourceLocation TmpLoc,
+ TemplateParameterList *Params,
+ SourceLocation EllipsisLoc,
+ IdentifierInfo *Name,
+ SourceLocation NameLoc,
+ unsigned Depth,
+ unsigned Position,
+ SourceLocation EqualLoc,
+ ParsedTemplateArgument Default) {
+ assert(S->isTemplateParamScope() &&
+ "Template template parameter not in template parameter scope!");
+
+ // Construct the parameter object.
+ bool IsParameterPack = EllipsisLoc.isValid();
+ TemplateTemplateParmDecl *Param =
+ TemplateTemplateParmDecl::Create(Context, Context.getTranslationUnitDecl(),
+ NameLoc.isInvalid()? TmpLoc : NameLoc,
+ Depth, Position, IsParameterPack,
+ Name, Params);
+ Param->setAccess(AS_public);
+
+ // If the template template parameter has a name, then link the identifier
+ // into the scope and lookup mechanisms.
+ if (Name) {
+ maybeDiagnoseTemplateParameterShadow(*this, S, NameLoc, Name);
+
+ S->AddDecl(Param);
+ IdResolver.AddDecl(Param);
+ }
+
+ if (Params->size() == 0) {
+ Diag(Param->getLocation(), diag::err_template_template_parm_no_parms)
+ << SourceRange(Params->getLAngleLoc(), Params->getRAngleLoc());
+ Param->setInvalidDecl();
+ }
+
+ // C++0x [temp.param]p9:
+ // A default template-argument may be specified for any kind of
+ // template-parameter that is not a template parameter pack.
+ if (IsParameterPack && !Default.isInvalid()) {
+ Diag(EqualLoc, diag::err_template_param_pack_default_arg);
+ Default = ParsedTemplateArgument();
+ }
+
+ if (!Default.isInvalid()) {
+ // Check only that we have a template template argument. We don't want to
+ // try to check well-formedness now, because our template template parameter
+ // might have dependent types in its template parameters, which we wouldn't
+ // be able to match now.
+ //
+ // If none of the template template parameter's template arguments mention
+ // other template parameters, we could actually perform more checking here.
+ // However, it isn't worth doing.
+ TemplateArgumentLoc DefaultArg = translateTemplateArgument(*this, Default);
+ if (DefaultArg.getArgument().getAsTemplate().isNull()) {
+ Diag(DefaultArg.getLocation(), diag::err_template_arg_not_class_template)
+ << DefaultArg.getSourceRange();
+ return Param;
+ }
+
+ // Check for unexpanded parameter packs.
+ if (DiagnoseUnexpandedParameterPack(DefaultArg.getLocation(),
+ DefaultArg.getArgument().getAsTemplate(),
+ UPPC_DefaultArgument))
+ return Param;
+
+ Param->setDefaultArgument(Context, DefaultArg);
+ }
+
+ return Param;
+}
+
+/// ActOnTemplateParameterList - Builds a TemplateParameterList that
+/// contains the template parameters in Params/NumParams.
+TemplateParameterList *
+Sema::ActOnTemplateParameterList(unsigned Depth,
+ SourceLocation ExportLoc,
+ SourceLocation TemplateLoc,
+ SourceLocation LAngleLoc,
+ ArrayRef<Decl *> Params,
+ SourceLocation RAngleLoc) {
+ if (ExportLoc.isValid())
+ Diag(ExportLoc, diag::warn_template_export_unsupported);
+
+ return TemplateParameterList::Create(
+ Context, TemplateLoc, LAngleLoc,
+ llvm::makeArrayRef((NamedDecl *const *)Params.data(), Params.size()),
+ RAngleLoc);
+}
+
+static void SetNestedNameSpecifier(TagDecl *T, const CXXScopeSpec &SS) {
+ if (SS.isSet())
+ T->setQualifierInfo(SS.getWithLocInContext(T->getASTContext()));
+}
+
+DeclResult
+Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
+ SourceLocation KWLoc, CXXScopeSpec &SS,
+ IdentifierInfo *Name, SourceLocation NameLoc,
+ AttributeList *Attr,
+ TemplateParameterList *TemplateParams,
+ AccessSpecifier AS, SourceLocation ModulePrivateLoc,
+ SourceLocation FriendLoc,
+ unsigned NumOuterTemplateParamLists,
+ TemplateParameterList** OuterTemplateParamLists,
+ SkipBodyInfo *SkipBody) {
+ assert(TemplateParams && TemplateParams->size() > 0 &&
+ "No template parameters");
+ assert(TUK != TUK_Reference && "Can only declare or define class templates");
+ bool Invalid = false;
+
+ // Check that we can declare a template here.
+ if (CheckTemplateDeclScope(S, TemplateParams))
+ return true;
+
+ TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
+ assert(Kind != TTK_Enum && "can't build template of enumerated type");
+
+ // There is no such thing as an unnamed class template.
+ if (!Name) {
+ Diag(KWLoc, diag::err_template_unnamed_class);
+ return true;
+ }
+
+ // Find any previous declaration with this name. For a friend with no
+ // scope explicitly specified, we only look for tag declarations (per
+ // C++11 [basic.lookup.elab]p2).
+ DeclContext *SemanticContext;
+ LookupResult Previous(*this, Name, NameLoc,
+ (SS.isEmpty() && TUK == TUK_Friend)
+ ? LookupTagName : LookupOrdinaryName,
+ ForRedeclaration);
+ if (SS.isNotEmpty() && !SS.isInvalid()) {
+ SemanticContext = computeDeclContext(SS, true);
+ if (!SemanticContext) {
+ // FIXME: Horrible, horrible hack! We can't currently represent this
+ // in the AST, and historically we have just ignored such friend
+ // class templates, so don't complain here.
+ Diag(NameLoc, TUK == TUK_Friend
+ ? diag::warn_template_qualified_friend_ignored
+ : diag::err_template_qualified_declarator_no_match)
+ << SS.getScopeRep() << SS.getRange();
+ return TUK != TUK_Friend;
+ }
+
+ if (RequireCompleteDeclContext(SS, SemanticContext))
+ return true;
+
+ // If we're adding a template to a dependent context, we may need to
+ // rebuilding some of the types used within the template parameter list,
+ // now that we know what the current instantiation is.
+ if (SemanticContext->isDependentContext()) {
+ ContextRAII SavedContext(*this, SemanticContext);
+ if (RebuildTemplateParamsInCurrentInstantiation(TemplateParams))
+ Invalid = true;
+ } else if (TUK != TUK_Friend && TUK != TUK_Reference)
+ diagnoseQualifiedDeclaration(SS, SemanticContext, Name, NameLoc);
+
+ LookupQualifiedName(Previous, SemanticContext);
+ } else {
+ SemanticContext = CurContext;
+
+ // C++14 [class.mem]p14:
+ // If T is the name of a class, then each of the following shall have a
+ // name different from T:
+ // -- every member template of class T
+ if (TUK != TUK_Friend &&
+ DiagnoseClassNameShadow(SemanticContext,
+ DeclarationNameInfo(Name, NameLoc)))
+ return true;
+
+ LookupName(Previous, S);
+ }
+
+ if (Previous.isAmbiguous())
+ return true;
+
+ NamedDecl *PrevDecl = nullptr;
+ if (Previous.begin() != Previous.end())
+ PrevDecl = (*Previous.begin())->getUnderlyingDecl();
+
+ // If there is a previous declaration with the same name, check
+ // whether this is a valid redeclaration.
+ ClassTemplateDecl *PrevClassTemplate
+ = dyn_cast_or_null<ClassTemplateDecl>(PrevDecl);
+
+ // We may have found the injected-class-name of a class template,
+ // class template partial specialization, or class template specialization.
+ // In these cases, grab the template that is being defined or specialized.
+ if (!PrevClassTemplate && PrevDecl && isa<CXXRecordDecl>(PrevDecl) &&
+ cast<CXXRecordDecl>(PrevDecl)->isInjectedClassName()) {
+ PrevDecl = cast<CXXRecordDecl>(PrevDecl->getDeclContext());
+ PrevClassTemplate
+ = cast<CXXRecordDecl>(PrevDecl)->getDescribedClassTemplate();
+ if (!PrevClassTemplate && isa<ClassTemplateSpecializationDecl>(PrevDecl)) {
+ PrevClassTemplate
+ = cast<ClassTemplateSpecializationDecl>(PrevDecl)
+ ->getSpecializedTemplate();
+ }
+ }
+
+ if (TUK == TUK_Friend) {
+ // C++ [namespace.memdef]p3:
+ // [...] When looking for a prior declaration of a class or a function
+ // declared as a friend, and when the name of the friend class or
+ // function is neither a qualified name nor a template-id, scopes outside
+ // the innermost enclosing namespace scope are not considered.
+ if (!SS.isSet()) {
+ DeclContext *OutermostContext = CurContext;
+ while (!OutermostContext->isFileContext())
+ OutermostContext = OutermostContext->getLookupParent();
+
+ if (PrevDecl &&
+ (OutermostContext->Equals(PrevDecl->getDeclContext()) ||
+ OutermostContext->Encloses(PrevDecl->getDeclContext()))) {
+ SemanticContext = PrevDecl->getDeclContext();
+ } else {
+ // Declarations in outer scopes don't matter. However, the outermost
+ // context we computed is the semantic context for our new
+ // declaration.
+ PrevDecl = PrevClassTemplate = nullptr;
+ SemanticContext = OutermostContext;
+
+ // Check that the chosen semantic context doesn't already contain a
+ // declaration of this name as a non-tag type.
+ Previous.clear(LookupOrdinaryName);
+ DeclContext *LookupContext = SemanticContext;
+ while (LookupContext->isTransparentContext())
+ LookupContext = LookupContext->getLookupParent();
+ LookupQualifiedName(Previous, LookupContext);
+
+ if (Previous.isAmbiguous())
+ return true;
+
+ if (Previous.begin() != Previous.end())
+ PrevDecl = (*Previous.begin())->getUnderlyingDecl();
+ }
+ }
+ } else if (PrevDecl &&
+ !isDeclInScope(Previous.getRepresentativeDecl(), SemanticContext,
+ S, SS.isValid()))
+ PrevDecl = PrevClassTemplate = nullptr;
+
+ if (auto *Shadow = dyn_cast_or_null<UsingShadowDecl>(
+ PrevDecl ? Previous.getRepresentativeDecl() : nullptr)) {
+ if (SS.isEmpty() &&
+ !(PrevClassTemplate &&
+ PrevClassTemplate->getDeclContext()->getRedeclContext()->Equals(
+ SemanticContext->getRedeclContext()))) {
+ Diag(KWLoc, diag::err_using_decl_conflict_reverse);
+ Diag(Shadow->getTargetDecl()->getLocation(),
+ diag::note_using_decl_target);
+ Diag(Shadow->getUsingDecl()->getLocation(), diag::note_using_decl) << 0;
+ // Recover by ignoring the old declaration.
+ PrevDecl = PrevClassTemplate = nullptr;
+ }
+ }
+
+ if (PrevClassTemplate) {
+ // Ensure that the template parameter lists are compatible. Skip this check
+ // for a friend in a dependent context: the template parameter list itself
+ // could be dependent.
+ if (!(TUK == TUK_Friend && CurContext->isDependentContext()) &&
+ !TemplateParameterListsAreEqual(TemplateParams,
+ PrevClassTemplate->getTemplateParameters(),
+ /*Complain=*/true,
+ TPL_TemplateMatch))
+ return true;
+
+ // C++ [temp.class]p4:
+ // In a redeclaration, partial specialization, explicit
+ // specialization or explicit instantiation of a class template,
+ // the class-key shall agree in kind with the original class
+ // template declaration (7.1.5.3).
+ RecordDecl *PrevRecordDecl = PrevClassTemplate->getTemplatedDecl();
+ if (!isAcceptableTagRedeclaration(PrevRecordDecl, Kind,
+ TUK == TUK_Definition, KWLoc, Name)) {
+ Diag(KWLoc, diag::err_use_with_wrong_tag)
+ << Name
+ << FixItHint::CreateReplacement(KWLoc, PrevRecordDecl->getKindName());
+ Diag(PrevRecordDecl->getLocation(), diag::note_previous_use);
+ Kind = PrevRecordDecl->getTagKind();
+ }
+
+ // Check for redefinition of this class template.
+ if (TUK == TUK_Definition) {
+ if (TagDecl *Def = PrevRecordDecl->getDefinition()) {
+ // If we have a prior definition that is not visible, treat this as
+ // simply making that previous definition visible.
+ NamedDecl *Hidden = nullptr;
+ if (SkipBody && !hasVisibleDefinition(Def, &Hidden)) {
+ SkipBody->ShouldSkip = true;
+ auto *Tmpl = cast<CXXRecordDecl>(Hidden)->getDescribedClassTemplate();
+ assert(Tmpl && "original definition of a class template is not a "
+ "class template?");
+ makeMergedDefinitionVisible(Hidden, KWLoc);
+ makeMergedDefinitionVisible(Tmpl, KWLoc);
+ return Def;
+ }
+
+ Diag(NameLoc, diag::err_redefinition) << Name;
+ Diag(Def->getLocation(), diag::note_previous_definition);
+ // FIXME: Would it make sense to try to "forget" the previous
+ // definition, as part of error recovery?
+ return true;
+ }
+ }
+ } else if (PrevDecl && PrevDecl->isTemplateParameter()) {
+ // Maybe we will complain about the shadowed template parameter.
+ DiagnoseTemplateParameterShadow(NameLoc, PrevDecl);
+ // Just pretend that we didn't see the previous declaration.
+ PrevDecl = nullptr;
+ } else if (PrevDecl) {
+ // C++ [temp]p5:
+ // A class template shall not have the same name as any other
+ // template, class, function, object, enumeration, enumerator,
+ // namespace, or type in the same scope (3.3), except as specified
+ // in (14.5.4).
+ Diag(NameLoc, diag::err_redefinition_different_kind) << Name;
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ return true;
+ }
+
+ // Check the template parameter list of this declaration, possibly
+ // merging in the template parameter list from the previous class
+ // template declaration. Skip this check for a friend in a dependent
+ // context, because the template parameter list might be dependent.
+ if (!(TUK == TUK_Friend && CurContext->isDependentContext()) &&
+ CheckTemplateParameterList(
+ TemplateParams,
+ PrevClassTemplate ? PrevClassTemplate->getTemplateParameters()
+ : nullptr,
+ (SS.isSet() && SemanticContext && SemanticContext->isRecord() &&
+ SemanticContext->isDependentContext())
+ ? TPC_ClassTemplateMember
+ : TUK == TUK_Friend ? TPC_FriendClassTemplate
+ : TPC_ClassTemplate))
+ Invalid = true;
+
+ if (SS.isSet()) {
+ // If the name of the template was qualified, we must be defining the
+ // template out-of-line.
+ if (!SS.isInvalid() && !Invalid && !PrevClassTemplate) {
+ Diag(NameLoc, TUK == TUK_Friend ? diag::err_friend_decl_does_not_match
+ : diag::err_member_decl_does_not_match)
+ << Name << SemanticContext << /*IsDefinition*/true << SS.getRange();
+ Invalid = true;
+ }
+ }
+
+ CXXRecordDecl *NewClass =
+ CXXRecordDecl::Create(Context, Kind, SemanticContext, KWLoc, NameLoc, Name,
+ PrevClassTemplate?
+ PrevClassTemplate->getTemplatedDecl() : nullptr,
+ /*DelayTypeCreation=*/true);
+ SetNestedNameSpecifier(NewClass, SS);
+ if (NumOuterTemplateParamLists > 0)
+ NewClass->setTemplateParameterListsInfo(
+ Context, llvm::makeArrayRef(OuterTemplateParamLists,
+ NumOuterTemplateParamLists));
+
+ // Add alignment attributes if necessary; these attributes are checked when
+ // the ASTContext lays out the structure.
+ if (TUK == TUK_Definition) {
+ AddAlignmentAttributesForRecord(NewClass);
+ AddMsStructLayoutForRecord(NewClass);
+ }
+
+ ClassTemplateDecl *NewTemplate
+ = ClassTemplateDecl::Create(Context, SemanticContext, NameLoc,
+ DeclarationName(Name), TemplateParams,
+ NewClass, PrevClassTemplate);
+ NewClass->setDescribedClassTemplate(NewTemplate);
+
+ if (ModulePrivateLoc.isValid())
+ NewTemplate->setModulePrivate();
+
+ // Build the type for the class template declaration now.
+ QualType T = NewTemplate->getInjectedClassNameSpecialization();
+ T = Context.getInjectedClassNameType(NewClass, T);
+ assert(T->isDependentType() && "Class template type is not dependent?");
+ (void)T;
+
+ // If we are providing an explicit specialization of a member that is a
+ // class template, make a note of that.
+ if (PrevClassTemplate &&
+ PrevClassTemplate->getInstantiatedFromMemberTemplate())
+ PrevClassTemplate->setMemberSpecialization();
+
+ // Set the access specifier.
+ if (!Invalid && TUK != TUK_Friend && NewTemplate->getDeclContext()->isRecord())
+ SetMemberAccessSpecifier(NewTemplate, PrevClassTemplate, AS);
+
+ // Set the lexical context of these templates
+ NewClass->setLexicalDeclContext(CurContext);
+ NewTemplate->setLexicalDeclContext(CurContext);
+
+ if (TUK == TUK_Definition)
+ NewClass->startDefinition();
+
+ if (Attr)
+ ProcessDeclAttributeList(S, NewClass, Attr);
+
+ if (PrevClassTemplate)
+ mergeDeclAttributes(NewClass, PrevClassTemplate->getTemplatedDecl());
+
+ AddPushedVisibilityAttribute(NewClass);
+
+ if (TUK != TUK_Friend) {
+ // Per C++ [basic.scope.temp]p2, skip the template parameter scopes.
+ Scope *Outer = S;
+ while ((Outer->getFlags() & Scope::TemplateParamScope) != 0)
+ Outer = Outer->getParent();
+ PushOnScopeChains(NewTemplate, Outer);
+ } else {
+ if (PrevClassTemplate && PrevClassTemplate->getAccess() != AS_none) {
+ NewTemplate->setAccess(PrevClassTemplate->getAccess());
+ NewClass->setAccess(PrevClassTemplate->getAccess());
+ }
+
+ NewTemplate->setObjectOfFriendDecl();
+
+ // Friend templates are visible in fairly strange ways.
+ if (!CurContext->isDependentContext()) {
+ DeclContext *DC = SemanticContext->getRedeclContext();
+ DC->makeDeclVisibleInContext(NewTemplate);
+ if (Scope *EnclosingScope = getScopeForDeclContext(S, DC))
+ PushOnScopeChains(NewTemplate, EnclosingScope,
+ /* AddToContext = */ false);
+ }
+
+ FriendDecl *Friend = FriendDecl::Create(
+ Context, CurContext, NewClass->getLocation(), NewTemplate, FriendLoc);
+ Friend->setAccess(AS_public);
+ CurContext->addDecl(Friend);
+ }
+
+ if (Invalid) {
+ NewTemplate->setInvalidDecl();
+ NewClass->setInvalidDecl();
+ }
+
+ ActOnDocumentableDecl(NewTemplate);
+
+ return NewTemplate;
+}
+
+/// \brief Diagnose the presence of a default template argument on a
+/// template parameter, which is ill-formed in certain contexts.
+///
+/// \returns true if the default template argument should be dropped.
+static bool DiagnoseDefaultTemplateArgument(Sema &S,
+ Sema::TemplateParamListContext TPC,
+ SourceLocation ParamLoc,
+ SourceRange DefArgRange) {
+ switch (TPC) {
+ case Sema::TPC_ClassTemplate:
+ case Sema::TPC_VarTemplate:
+ case Sema::TPC_TypeAliasTemplate:
+ return false;
+
+ case Sema::TPC_FunctionTemplate:
+ case Sema::TPC_FriendFunctionTemplateDefinition:
+ // C++ [temp.param]p9:
+ // A default template-argument shall not be specified in a
+ // function template declaration or a function template
+ // definition [...]
+ // If a friend function template declaration specifies a default
+ // template-argument, that declaration shall be a definition and shall be
+ // the only declaration of the function template in the translation unit.
+ // (C++98/03 doesn't have this wording; see DR226).
+ S.Diag(ParamLoc, S.getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_template_parameter_default_in_function_template
+ : diag::ext_template_parameter_default_in_function_template)
+ << DefArgRange;
+ return false;
+
+ case Sema::TPC_ClassTemplateMember:
+ // C++0x [temp.param]p9:
+ // A default template-argument shall not be specified in the
+ // template-parameter-lists of the definition of a member of a
+ // class template that appears outside of the member's class.
+ S.Diag(ParamLoc, diag::err_template_parameter_default_template_member)
+ << DefArgRange;
+ return true;
+
+ case Sema::TPC_FriendClassTemplate:
+ case Sema::TPC_FriendFunctionTemplate:
+ // C++ [temp.param]p9:
+ // A default template-argument shall not be specified in a
+ // friend template declaration.
+ S.Diag(ParamLoc, diag::err_template_parameter_default_friend_template)
+ << DefArgRange;
+ return true;
+
+ // FIXME: C++0x [temp.param]p9 allows default template-arguments
+ // for friend function templates if there is only a single
+ // declaration (and it is a definition). Strange!
+ }
+
+ llvm_unreachable("Invalid TemplateParamListContext!");
+}
+
+/// \brief Check for unexpanded parameter packs within the template parameters
+/// of a template template parameter, recursively.
+static bool DiagnoseUnexpandedParameterPacks(Sema &S,
+ TemplateTemplateParmDecl *TTP) {
+ // A template template parameter which is a parameter pack is also a pack
+ // expansion.
+ if (TTP->isParameterPack())
+ return false;
+
+ TemplateParameterList *Params = TTP->getTemplateParameters();
+ for (unsigned I = 0, N = Params->size(); I != N; ++I) {
+ NamedDecl *P = Params->getParam(I);
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(P)) {
+ if (!NTTP->isParameterPack() &&
+ S.DiagnoseUnexpandedParameterPack(NTTP->getLocation(),
+ NTTP->getTypeSourceInfo(),
+ Sema::UPPC_NonTypeTemplateParameterType))
+ return true;
+
+ continue;
+ }
+
+ if (TemplateTemplateParmDecl *InnerTTP
+ = dyn_cast<TemplateTemplateParmDecl>(P))
+ if (DiagnoseUnexpandedParameterPacks(S, InnerTTP))
+ return true;
+ }
+
+ return false;
+}
+
+/// \brief Checks the validity of a template parameter list, possibly
+/// considering the template parameter list from a previous
+/// declaration.
+///
+/// If an "old" template parameter list is provided, it must be
+/// equivalent (per TemplateParameterListsAreEqual) to the "new"
+/// template parameter list.
+///
+/// \param NewParams Template parameter list for a new template
+/// declaration. This template parameter list will be updated with any
+/// default arguments that are carried through from the previous
+/// template parameter list.
+///
+/// \param OldParams If provided, template parameter list from a
+/// previous declaration of the same template. Default template
+/// arguments will be merged from the old template parameter list to
+/// the new template parameter list.
+///
+/// \param TPC Describes the context in which we are checking the given
+/// template parameter list.
+///
+/// \returns true if an error occurred, false otherwise.
+bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
+ TemplateParameterList *OldParams,
+ TemplateParamListContext TPC) {
+ bool Invalid = false;
+
+ // C++ [temp.param]p10:
+ // The set of default template-arguments available for use with a
+ // template declaration or definition is obtained by merging the
+ // default arguments from the definition (if in scope) and all
+ // declarations in scope in the same way default function
+ // arguments are (8.3.6).
+ bool SawDefaultArgument = false;
+ SourceLocation PreviousDefaultArgLoc;
+
+ // Dummy initialization to avoid warnings.
+ TemplateParameterList::iterator OldParam = NewParams->end();
+ if (OldParams)
+ OldParam = OldParams->begin();
+
+ bool RemoveDefaultArguments = false;
+ for (TemplateParameterList::iterator NewParam = NewParams->begin(),
+ NewParamEnd = NewParams->end();
+ NewParam != NewParamEnd; ++NewParam) {
+ // Variables used to diagnose redundant default arguments
+ bool RedundantDefaultArg = false;
+ SourceLocation OldDefaultLoc;
+ SourceLocation NewDefaultLoc;
+
+ // Variable used to diagnose missing default arguments
+ bool MissingDefaultArg = false;
+
+ // Variable used to diagnose non-final parameter packs
+ bool SawParameterPack = false;
+
+ if (TemplateTypeParmDecl *NewTypeParm
+ = dyn_cast<TemplateTypeParmDecl>(*NewParam)) {
+ // Check the presence of a default argument here.
+ if (NewTypeParm->hasDefaultArgument() &&
+ DiagnoseDefaultTemplateArgument(*this, TPC,
+ NewTypeParm->getLocation(),
+ NewTypeParm->getDefaultArgumentInfo()->getTypeLoc()
+ .getSourceRange()))
+ NewTypeParm->removeDefaultArgument();
+
+ // Merge default arguments for template type parameters.
+ TemplateTypeParmDecl *OldTypeParm
+ = OldParams? cast<TemplateTypeParmDecl>(*OldParam) : nullptr;
+ if (NewTypeParm->isParameterPack()) {
+ assert(!NewTypeParm->hasDefaultArgument() &&
+ "Parameter packs can't have a default argument!");
+ SawParameterPack = true;
+ } else if (OldTypeParm && hasVisibleDefaultArgument(OldTypeParm) &&
+ NewTypeParm->hasDefaultArgument()) {
+ OldDefaultLoc = OldTypeParm->getDefaultArgumentLoc();
+ NewDefaultLoc = NewTypeParm->getDefaultArgumentLoc();
+ SawDefaultArgument = true;
+ RedundantDefaultArg = true;
+ PreviousDefaultArgLoc = NewDefaultLoc;
+ } else if (OldTypeParm && OldTypeParm->hasDefaultArgument()) {
+ // Merge the default argument from the old declaration to the
+ // new declaration.
+ NewTypeParm->setInheritedDefaultArgument(Context, OldTypeParm);
+ PreviousDefaultArgLoc = OldTypeParm->getDefaultArgumentLoc();
+ } else if (NewTypeParm->hasDefaultArgument()) {
+ SawDefaultArgument = true;
+ PreviousDefaultArgLoc = NewTypeParm->getDefaultArgumentLoc();
+ } else if (SawDefaultArgument)
+ MissingDefaultArg = true;
+ } else if (NonTypeTemplateParmDecl *NewNonTypeParm
+ = dyn_cast<NonTypeTemplateParmDecl>(*NewParam)) {
+ // Check for unexpanded parameter packs.
+ if (!NewNonTypeParm->isParameterPack() &&
+ DiagnoseUnexpandedParameterPack(NewNonTypeParm->getLocation(),
+ NewNonTypeParm->getTypeSourceInfo(),
+ UPPC_NonTypeTemplateParameterType)) {
+ Invalid = true;
+ continue;
+ }
+
+ // Check the presence of a default argument here.
+ if (NewNonTypeParm->hasDefaultArgument() &&
+ DiagnoseDefaultTemplateArgument(*this, TPC,
+ NewNonTypeParm->getLocation(),
+ NewNonTypeParm->getDefaultArgument()->getSourceRange())) {
+ NewNonTypeParm->removeDefaultArgument();
+ }
+
+ // Merge default arguments for non-type template parameters
+ NonTypeTemplateParmDecl *OldNonTypeParm
+ = OldParams? cast<NonTypeTemplateParmDecl>(*OldParam) : nullptr;
+ if (NewNonTypeParm->isParameterPack()) {
+ assert(!NewNonTypeParm->hasDefaultArgument() &&
+ "Parameter packs can't have a default argument!");
+ if (!NewNonTypeParm->isPackExpansion())
+ SawParameterPack = true;
+ } else if (OldNonTypeParm && hasVisibleDefaultArgument(OldNonTypeParm) &&
+ NewNonTypeParm->hasDefaultArgument()) {
+ OldDefaultLoc = OldNonTypeParm->getDefaultArgumentLoc();
+ NewDefaultLoc = NewNonTypeParm->getDefaultArgumentLoc();
+ SawDefaultArgument = true;
+ RedundantDefaultArg = true;
+ PreviousDefaultArgLoc = NewDefaultLoc;
+ } else if (OldNonTypeParm && OldNonTypeParm->hasDefaultArgument()) {
+ // Merge the default argument from the old declaration to the
+ // new declaration.
+ NewNonTypeParm->setInheritedDefaultArgument(Context, OldNonTypeParm);
+ PreviousDefaultArgLoc = OldNonTypeParm->getDefaultArgumentLoc();
+ } else if (NewNonTypeParm->hasDefaultArgument()) {
+ SawDefaultArgument = true;
+ PreviousDefaultArgLoc = NewNonTypeParm->getDefaultArgumentLoc();
+ } else if (SawDefaultArgument)
+ MissingDefaultArg = true;
+ } else {
+ TemplateTemplateParmDecl *NewTemplateParm
+ = cast<TemplateTemplateParmDecl>(*NewParam);
+
+ // Check for unexpanded parameter packs, recursively.
+ if (::DiagnoseUnexpandedParameterPacks(*this, NewTemplateParm)) {
+ Invalid = true;
+ continue;
+ }
+
+ // Check the presence of a default argument here.
+ if (NewTemplateParm->hasDefaultArgument() &&
+ DiagnoseDefaultTemplateArgument(*this, TPC,
+ NewTemplateParm->getLocation(),
+ NewTemplateParm->getDefaultArgument().getSourceRange()))
+ NewTemplateParm->removeDefaultArgument();
+
+ // Merge default arguments for template template parameters
+ TemplateTemplateParmDecl *OldTemplateParm
+ = OldParams? cast<TemplateTemplateParmDecl>(*OldParam) : nullptr;
+ if (NewTemplateParm->isParameterPack()) {
+ assert(!NewTemplateParm->hasDefaultArgument() &&
+ "Parameter packs can't have a default argument!");
+ if (!NewTemplateParm->isPackExpansion())
+ SawParameterPack = true;
+ } else if (OldTemplateParm &&
+ hasVisibleDefaultArgument(OldTemplateParm) &&
+ NewTemplateParm->hasDefaultArgument()) {
+ OldDefaultLoc = OldTemplateParm->getDefaultArgument().getLocation();
+ NewDefaultLoc = NewTemplateParm->getDefaultArgument().getLocation();
+ SawDefaultArgument = true;
+ RedundantDefaultArg = true;
+ PreviousDefaultArgLoc = NewDefaultLoc;
+ } else if (OldTemplateParm && OldTemplateParm->hasDefaultArgument()) {
+ // Merge the default argument from the old declaration to the
+ // new declaration.
+ NewTemplateParm->setInheritedDefaultArgument(Context, OldTemplateParm);
+ PreviousDefaultArgLoc
+ = OldTemplateParm->getDefaultArgument().getLocation();
+ } else if (NewTemplateParm->hasDefaultArgument()) {
+ SawDefaultArgument = true;
+ PreviousDefaultArgLoc
+ = NewTemplateParm->getDefaultArgument().getLocation();
+ } else if (SawDefaultArgument)
+ MissingDefaultArg = true;
+ }
+
+ // C++11 [temp.param]p11:
+ // If a template parameter of a primary class template or alias template
+ // is a template parameter pack, it shall be the last template parameter.
+ if (SawParameterPack && (NewParam + 1) != NewParamEnd &&
+ (TPC == TPC_ClassTemplate || TPC == TPC_VarTemplate ||
+ TPC == TPC_TypeAliasTemplate)) {
+ Diag((*NewParam)->getLocation(),
+ diag::err_template_param_pack_must_be_last_template_parameter);
+ Invalid = true;
+ }
+
+ if (RedundantDefaultArg) {
+ // C++ [temp.param]p12:
+ // A template-parameter shall not be given default arguments
+ // by two different declarations in the same scope.
+ Diag(NewDefaultLoc, diag::err_template_param_default_arg_redefinition);
+ Diag(OldDefaultLoc, diag::note_template_param_prev_default_arg);
+ Invalid = true;
+ } else if (MissingDefaultArg && TPC != TPC_FunctionTemplate) {
+ // C++ [temp.param]p11:
+ // If a template-parameter of a class template has a default
+ // template-argument, each subsequent template-parameter shall either
+ // have a default template-argument supplied or be a template parameter
+ // pack.
+ Diag((*NewParam)->getLocation(),
+ diag::err_template_param_default_arg_missing);
+ Diag(PreviousDefaultArgLoc, diag::note_template_param_prev_default_arg);
+ Invalid = true;
+ RemoveDefaultArguments = true;
+ }
+
+ // If we have an old template parameter list that we're merging
+ // in, move on to the next parameter.
+ if (OldParams)
+ ++OldParam;
+ }
+
+ // We were missing some default arguments at the end of the list, so remove
+ // all of the default arguments.
+ if (RemoveDefaultArguments) {
+ for (TemplateParameterList::iterator NewParam = NewParams->begin(),
+ NewParamEnd = NewParams->end();
+ NewParam != NewParamEnd; ++NewParam) {
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*NewParam))
+ TTP->removeDefaultArgument();
+ else if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(*NewParam))
+ NTTP->removeDefaultArgument();
+ else
+ cast<TemplateTemplateParmDecl>(*NewParam)->removeDefaultArgument();
+ }
+ }
+
+ return Invalid;
+}
+
+namespace {
+
+/// A class which looks for a use of a certain level of template
+/// parameter.
+struct DependencyChecker : RecursiveASTVisitor<DependencyChecker> {
+ typedef RecursiveASTVisitor<DependencyChecker> super;
+
+ unsigned Depth;
+ bool Match;
+ SourceLocation MatchLoc;
+
+ DependencyChecker(unsigned Depth) : Depth(Depth), Match(false) {}
+
+ DependencyChecker(TemplateParameterList *Params) : Match(false) {
+ NamedDecl *ND = Params->getParam(0);
+ if (TemplateTypeParmDecl *PD = dyn_cast<TemplateTypeParmDecl>(ND)) {
+ Depth = PD->getDepth();
+ } else if (NonTypeTemplateParmDecl *PD =
+ dyn_cast<NonTypeTemplateParmDecl>(ND)) {
+ Depth = PD->getDepth();
+ } else {
+ Depth = cast<TemplateTemplateParmDecl>(ND)->getDepth();
+ }
+ }
+
+ bool Matches(unsigned ParmDepth, SourceLocation Loc = SourceLocation()) {
+ if (ParmDepth >= Depth) {
+ Match = true;
+ MatchLoc = Loc;
+ return true;
+ }
+ return false;
+ }
+
+ bool VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) {
+ return !Matches(TL.getTypePtr()->getDepth(), TL.getNameLoc());
+ }
+
+ bool VisitTemplateTypeParmType(const TemplateTypeParmType *T) {
+ return !Matches(T->getDepth());
+ }
+
+ bool TraverseTemplateName(TemplateName N) {
+ if (TemplateTemplateParmDecl *PD =
+ dyn_cast_or_null<TemplateTemplateParmDecl>(N.getAsTemplateDecl()))
+ if (Matches(PD->getDepth()))
+ return false;
+ return super::TraverseTemplateName(N);
+ }
+
+ bool VisitDeclRefExpr(DeclRefExpr *E) {
+ if (NonTypeTemplateParmDecl *PD =
+ dyn_cast<NonTypeTemplateParmDecl>(E->getDecl()))
+ if (Matches(PD->getDepth(), E->getExprLoc()))
+ return false;
+ return super::VisitDeclRefExpr(E);
+ }
+
+ bool VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *T) {
+ return TraverseType(T->getReplacementType());
+ }
+
+ bool
+ VisitSubstTemplateTypeParmPackType(const SubstTemplateTypeParmPackType *T) {
+ return TraverseTemplateArgument(T->getArgumentPack());
+ }
+
+ bool TraverseInjectedClassNameType(const InjectedClassNameType *T) {
+ return TraverseType(T->getInjectedSpecializationType());
+ }
+};
+}
+
+/// Determines whether a given type depends on the given parameter
+/// list.
+static bool
+DependsOnTemplateParameters(QualType T, TemplateParameterList *Params) {
+ DependencyChecker Checker(Params);
+ Checker.TraverseType(T);
+ return Checker.Match;
+}
+
+// Find the source range corresponding to the named type in the given
+// nested-name-specifier, if any.
+static SourceRange getRangeOfTypeInNestedNameSpecifier(ASTContext &Context,
+ QualType T,
+ const CXXScopeSpec &SS) {
+ NestedNameSpecifierLoc NNSLoc(SS.getScopeRep(), SS.location_data());
+ while (NestedNameSpecifier *NNS = NNSLoc.getNestedNameSpecifier()) {
+ if (const Type *CurType = NNS->getAsType()) {
+ if (Context.hasSameUnqualifiedType(T, QualType(CurType, 0)))
+ return NNSLoc.getTypeLoc().getSourceRange();
+ } else
+ break;
+
+ NNSLoc = NNSLoc.getPrefix();
+ }
+
+ return SourceRange();
+}
+
+/// \brief Match the given template parameter lists to the given scope
+/// specifier, returning the template parameter list that applies to the
+/// name.
+///
+/// \param DeclStartLoc the start of the declaration that has a scope
+/// specifier or a template parameter list.
+///
+/// \param DeclLoc The location of the declaration itself.
+///
+/// \param SS the scope specifier that will be matched to the given template
+/// parameter lists. This scope specifier precedes a qualified name that is
+/// being declared.
+///
+/// \param TemplateId The template-id following the scope specifier, if there
+/// is one. Used to check for a missing 'template<>'.
+///
+/// \param ParamLists the template parameter lists, from the outermost to the
+/// innermost template parameter lists.
+///
+/// \param IsFriend Whether to apply the slightly different rules for
+/// matching template parameters to scope specifiers in friend
+/// declarations.
+///
+/// \param IsExplicitSpecialization will be set true if the entity being
+/// declared is an explicit specialization, false otherwise.
+///
+/// \returns the template parameter list, if any, that corresponds to the
+/// name that is preceded by the scope specifier @p SS. This template
+/// parameter list may have template parameters (if we're declaring a
+/// template) or may have no template parameters (if we're declaring a
+/// template specialization), or may be NULL (if what we're declaring isn't
+/// itself a template).
+TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
+ SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS,
+ TemplateIdAnnotation *TemplateId,
+ ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend,
+ bool &IsExplicitSpecialization, bool &Invalid) {
+ IsExplicitSpecialization = false;
+ Invalid = false;
+
+ // The sequence of nested types to which we will match up the template
+ // parameter lists. We first build this list by starting with the type named
+ // by the nested-name-specifier and walking out until we run out of types.
+ SmallVector<QualType, 4> NestedTypes;
+ QualType T;
+ if (SS.getScopeRep()) {
+ if (CXXRecordDecl *Record
+ = dyn_cast_or_null<CXXRecordDecl>(computeDeclContext(SS, true)))
+ T = Context.getTypeDeclType(Record);
+ else
+ T = QualType(SS.getScopeRep()->getAsType(), 0);
+ }
+
+ // If we found an explicit specialization that prevents us from needing
+ // 'template<>' headers, this will be set to the location of that
+ // explicit specialization.
+ SourceLocation ExplicitSpecLoc;
+
+ while (!T.isNull()) {
+ NestedTypes.push_back(T);
+
+ // Retrieve the parent of a record type.
+ if (CXXRecordDecl *Record = T->getAsCXXRecordDecl()) {
+ // If this type is an explicit specialization, we're done.
+ if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(Record)) {
+ if (!isa<ClassTemplatePartialSpecializationDecl>(Spec) &&
+ Spec->getSpecializationKind() == TSK_ExplicitSpecialization) {
+ ExplicitSpecLoc = Spec->getLocation();
+ break;
+ }
+ } else if (Record->getTemplateSpecializationKind()
+ == TSK_ExplicitSpecialization) {
+ ExplicitSpecLoc = Record->getLocation();
+ break;
+ }
+
+ if (TypeDecl *Parent = dyn_cast<TypeDecl>(Record->getParent()))
+ T = Context.getTypeDeclType(Parent);
+ else
+ T = QualType();
+ continue;
+ }
+
+ if (const TemplateSpecializationType *TST
+ = T->getAs<TemplateSpecializationType>()) {
+ if (TemplateDecl *Template = TST->getTemplateName().getAsTemplateDecl()) {
+ if (TypeDecl *Parent = dyn_cast<TypeDecl>(Template->getDeclContext()))
+ T = Context.getTypeDeclType(Parent);
+ else
+ T = QualType();
+ continue;
+ }
+ }
+
+ // Look one step prior in a dependent template specialization type.
+ if (const DependentTemplateSpecializationType *DependentTST
+ = T->getAs<DependentTemplateSpecializationType>()) {
+ if (NestedNameSpecifier *NNS = DependentTST->getQualifier())
+ T = QualType(NNS->getAsType(), 0);
+ else
+ T = QualType();
+ continue;
+ }
+
+ // Look one step prior in a dependent name type.
+ if (const DependentNameType *DependentName = T->getAs<DependentNameType>()){
+ if (NestedNameSpecifier *NNS = DependentName->getQualifier())
+ T = QualType(NNS->getAsType(), 0);
+ else
+ T = QualType();
+ continue;
+ }
+
+ // Retrieve the parent of an enumeration type.
+ if (const EnumType *EnumT = T->getAs<EnumType>()) {
+ // FIXME: Forward-declared enums require a TSK_ExplicitSpecialization
+ // check here.
+ EnumDecl *Enum = EnumT->getDecl();
+
+ // Get to the parent type.
+ if (TypeDecl *Parent = dyn_cast<TypeDecl>(Enum->getParent()))
+ T = Context.getTypeDeclType(Parent);
+ else
+ T = QualType();
+ continue;
+ }
+
+ T = QualType();
+ }
+ // Reverse the nested types list, since we want to traverse from the outermost
+ // to the innermost while checking template-parameter-lists.
+ std::reverse(NestedTypes.begin(), NestedTypes.end());
+
+ // C++0x [temp.expl.spec]p17:
+ // A member or a member template may be nested within many
+ // enclosing class templates. In an explicit specialization for
+ // such a member, the member declaration shall be preceded by a
+ // template<> for each enclosing class template that is
+ // explicitly specialized.
+ bool SawNonEmptyTemplateParameterList = false;
+
+ auto CheckExplicitSpecialization = [&](SourceRange Range, bool Recovery) {
+ if (SawNonEmptyTemplateParameterList) {
+ Diag(DeclLoc, diag::err_specialize_member_of_template)
+ << !Recovery << Range;
+ Invalid = true;
+ IsExplicitSpecialization = false;
+ return true;
+ }
+
+ return false;
+ };
+
+ auto DiagnoseMissingExplicitSpecialization = [&] (SourceRange Range) {
+ // Check that we can have an explicit specialization here.
+ if (CheckExplicitSpecialization(Range, true))
+ return true;
+
+ // We don't have a template header, but we should.
+ SourceLocation ExpectedTemplateLoc;
+ if (!ParamLists.empty())
+ ExpectedTemplateLoc = ParamLists[0]->getTemplateLoc();
+ else
+ ExpectedTemplateLoc = DeclStartLoc;
+
+ Diag(DeclLoc, diag::err_template_spec_needs_header)
+ << Range
+ << FixItHint::CreateInsertion(ExpectedTemplateLoc, "template<> ");
+ return false;
+ };
+
+ unsigned ParamIdx = 0;
+ for (unsigned TypeIdx = 0, NumTypes = NestedTypes.size(); TypeIdx != NumTypes;
+ ++TypeIdx) {
+ T = NestedTypes[TypeIdx];
+
+ // Whether we expect a 'template<>' header.
+ bool NeedEmptyTemplateHeader = false;
+
+ // Whether we expect a template header with parameters.
+ bool NeedNonemptyTemplateHeader = false;
+
+ // For a dependent type, the set of template parameters that we
+ // expect to see.
+ TemplateParameterList *ExpectedTemplateParams = nullptr;
+
+ // C++0x [temp.expl.spec]p15:
+ // A member or a member template may be nested within many enclosing
+ // class templates. In an explicit specialization for such a member, the
+ // member declaration shall be preceded by a template<> for each
+ // enclosing class template that is explicitly specialized.
+ if (CXXRecordDecl *Record = T->getAsCXXRecordDecl()) {
+ if (ClassTemplatePartialSpecializationDecl *Partial
+ = dyn_cast<ClassTemplatePartialSpecializationDecl>(Record)) {
+ ExpectedTemplateParams = Partial->getTemplateParameters();
+ NeedNonemptyTemplateHeader = true;
+ } else if (Record->isDependentType()) {
+ if (Record->getDescribedClassTemplate()) {
+ ExpectedTemplateParams = Record->getDescribedClassTemplate()
+ ->getTemplateParameters();
+ NeedNonemptyTemplateHeader = true;
+ }
+ } else if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(Record)) {
+ // C++0x [temp.expl.spec]p4:
+ // Members of an explicitly specialized class template are defined
+ // in the same manner as members of normal classes, and not using
+ // the template<> syntax.
+ if (Spec->getSpecializationKind() != TSK_ExplicitSpecialization)
+ NeedEmptyTemplateHeader = true;
+ else
+ continue;
+ } else if (Record->getTemplateSpecializationKind()) {
+ if (Record->getTemplateSpecializationKind()
+ != TSK_ExplicitSpecialization &&
+ TypeIdx == NumTypes - 1)
+ IsExplicitSpecialization = true;
+
+ continue;
+ }
+ } else if (const TemplateSpecializationType *TST
+ = T->getAs<TemplateSpecializationType>()) {
+ if (TemplateDecl *Template = TST->getTemplateName().getAsTemplateDecl()) {
+ ExpectedTemplateParams = Template->getTemplateParameters();
+ NeedNonemptyTemplateHeader = true;
+ }
+ } else if (T->getAs<DependentTemplateSpecializationType>()) {
+ // FIXME: We actually could/should check the template arguments here
+ // against the corresponding template parameter list.
+ NeedNonemptyTemplateHeader = false;
+ }
+
+ // C++ [temp.expl.spec]p16:
+ // In an explicit specialization declaration for a member of a class
+ // template or a member template that ap- pears in namespace scope, the
+ // member template and some of its enclosing class templates may remain
+ // unspecialized, except that the declaration shall not explicitly
+ // specialize a class member template if its en- closing class templates
+ // are not explicitly specialized as well.
+ if (ParamIdx < ParamLists.size()) {
+ if (ParamLists[ParamIdx]->size() == 0) {
+ if (CheckExplicitSpecialization(ParamLists[ParamIdx]->getSourceRange(),
+ false))
+ return nullptr;
+ } else
+ SawNonEmptyTemplateParameterList = true;
+ }
+
+ if (NeedEmptyTemplateHeader) {
+ // If we're on the last of the types, and we need a 'template<>' header
+ // here, then it's an explicit specialization.
+ if (TypeIdx == NumTypes - 1)
+ IsExplicitSpecialization = true;
+
+ if (ParamIdx < ParamLists.size()) {
+ if (ParamLists[ParamIdx]->size() > 0) {
+ // The header has template parameters when it shouldn't. Complain.
+ Diag(ParamLists[ParamIdx]->getTemplateLoc(),
+ diag::err_template_param_list_matches_nontemplate)
+ << T
+ << SourceRange(ParamLists[ParamIdx]->getLAngleLoc(),
+ ParamLists[ParamIdx]->getRAngleLoc())
+ << getRangeOfTypeInNestedNameSpecifier(Context, T, SS);
+ Invalid = true;
+ return nullptr;
+ }
+
+ // Consume this template header.
+ ++ParamIdx;
+ continue;
+ }
+
+ if (!IsFriend)
+ if (DiagnoseMissingExplicitSpecialization(
+ getRangeOfTypeInNestedNameSpecifier(Context, T, SS)))
+ return nullptr;
+
+ continue;
+ }
+
+ if (NeedNonemptyTemplateHeader) {
+ // In friend declarations we can have template-ids which don't
+ // depend on the corresponding template parameter lists. But
+ // assume that empty parameter lists are supposed to match this
+ // template-id.
+ if (IsFriend && T->isDependentType()) {
+ if (ParamIdx < ParamLists.size() &&
+ DependsOnTemplateParameters(T, ParamLists[ParamIdx]))
+ ExpectedTemplateParams = nullptr;
+ else
+ continue;
+ }
+
+ if (ParamIdx < ParamLists.size()) {
+ // Check the template parameter list, if we can.
+ if (ExpectedTemplateParams &&
+ !TemplateParameterListsAreEqual(ParamLists[ParamIdx],
+ ExpectedTemplateParams,
+ true, TPL_TemplateMatch))
+ Invalid = true;
+
+ if (!Invalid &&
+ CheckTemplateParameterList(ParamLists[ParamIdx], nullptr,
+ TPC_ClassTemplateMember))
+ Invalid = true;
+
+ ++ParamIdx;
+ continue;
+ }
+
+ Diag(DeclLoc, diag::err_template_spec_needs_template_parameters)
+ << T
+ << getRangeOfTypeInNestedNameSpecifier(Context, T, SS);
+ Invalid = true;
+ continue;
+ }
+ }
+
+ // If there were at least as many template-ids as there were template
+ // parameter lists, then there are no template parameter lists remaining for
+ // the declaration itself.
+ if (ParamIdx >= ParamLists.size()) {
+ if (TemplateId && !IsFriend) {
+ // We don't have a template header for the declaration itself, but we
+ // should.
+ IsExplicitSpecialization = true;
+ DiagnoseMissingExplicitSpecialization(SourceRange(TemplateId->LAngleLoc,
+ TemplateId->RAngleLoc));
+
+ // Fabricate an empty template parameter list for the invented header.
+ return TemplateParameterList::Create(Context, SourceLocation(),
+ SourceLocation(), None,
+ SourceLocation());
+ }
+
+ return nullptr;
+ }
+
+ // If there were too many template parameter lists, complain about that now.
+ if (ParamIdx < ParamLists.size() - 1) {
+ bool HasAnyExplicitSpecHeader = false;
+ bool AllExplicitSpecHeaders = true;
+ for (unsigned I = ParamIdx, E = ParamLists.size() - 1; I != E; ++I) {
+ if (ParamLists[I]->size() == 0)
+ HasAnyExplicitSpecHeader = true;
+ else
+ AllExplicitSpecHeaders = false;
+ }
+
+ Diag(ParamLists[ParamIdx]->getTemplateLoc(),
+ AllExplicitSpecHeaders ? diag::warn_template_spec_extra_headers
+ : diag::err_template_spec_extra_headers)
+ << SourceRange(ParamLists[ParamIdx]->getTemplateLoc(),
+ ParamLists[ParamLists.size() - 2]->getRAngleLoc());
+
+ // If there was a specialization somewhere, such that 'template<>' is
+ // not required, and there were any 'template<>' headers, note where the
+ // specialization occurred.
+ if (ExplicitSpecLoc.isValid() && HasAnyExplicitSpecHeader)
+ Diag(ExplicitSpecLoc,
+ diag::note_explicit_template_spec_does_not_need_header)
+ << NestedTypes.back();
+
+ // We have a template parameter list with no corresponding scope, which
+ // means that the resulting template declaration can't be instantiated
+ // properly (we'll end up with dependent nodes when we shouldn't).
+ if (!AllExplicitSpecHeaders)
+ Invalid = true;
+ }
+
+ // C++ [temp.expl.spec]p16:
+ // In an explicit specialization declaration for a member of a class
+ // template or a member template that ap- pears in namespace scope, the
+ // member template and some of its enclosing class templates may remain
+ // unspecialized, except that the declaration shall not explicitly
+ // specialize a class member template if its en- closing class templates
+ // are not explicitly specialized as well.
+ if (ParamLists.back()->size() == 0 &&
+ CheckExplicitSpecialization(ParamLists[ParamIdx]->getSourceRange(),
+ false))
+ return nullptr;
+
+ // Return the last template parameter list, which corresponds to the
+ // entity being declared.
+ return ParamLists.back();
+}
+
+void Sema::NoteAllFoundTemplates(TemplateName Name) {
+ if (TemplateDecl *Template = Name.getAsTemplateDecl()) {
+ Diag(Template->getLocation(), diag::note_template_declared_here)
+ << (isa<FunctionTemplateDecl>(Template)
+ ? 0
+ : isa<ClassTemplateDecl>(Template)
+ ? 1
+ : isa<VarTemplateDecl>(Template)
+ ? 2
+ : isa<TypeAliasTemplateDecl>(Template) ? 3 : 4)
+ << Template->getDeclName();
+ return;
+ }
+
+ if (OverloadedTemplateStorage *OST = Name.getAsOverloadedTemplate()) {
+ for (OverloadedTemplateStorage::iterator I = OST->begin(),
+ IEnd = OST->end();
+ I != IEnd; ++I)
+ Diag((*I)->getLocation(), diag::note_template_declared_here)
+ << 0 << (*I)->getDeclName();
+
+ return;
+ }
+}
+
+static QualType
+checkBuiltinTemplateIdType(Sema &SemaRef, BuiltinTemplateDecl *BTD,
+ const SmallVectorImpl<TemplateArgument> &Converted,
+ SourceLocation TemplateLoc,
+ TemplateArgumentListInfo &TemplateArgs) {
+ ASTContext &Context = SemaRef.getASTContext();
+ switch (BTD->getBuiltinTemplateKind()) {
+ case BTK__make_integer_seq:
+ // Specializations of __make_integer_seq<S, T, N> are treated like
+ // S<T, 0, ..., N-1>.
+
+ // C++14 [inteseq.intseq]p1:
+ // T shall be an integer type.
+ if (!Converted[1].getAsType()->isIntegralType(Context)) {
+ SemaRef.Diag(TemplateArgs[1].getLocation(),
+ diag::err_integer_sequence_integral_element_type);
+ return QualType();
+ }
+
+ // C++14 [inteseq.make]p1:
+ // If N is negative the program is ill-formed.
+ TemplateArgument NumArgsArg = Converted[2];
+ llvm::APSInt NumArgs = NumArgsArg.getAsIntegral();
+ if (NumArgs < 0) {
+ SemaRef.Diag(TemplateArgs[2].getLocation(),
+ diag::err_integer_sequence_negative_length);
+ return QualType();
+ }
+
+ QualType ArgTy = NumArgsArg.getIntegralType();
+ TemplateArgumentListInfo SyntheticTemplateArgs;
+ // The type argument gets reused as the first template argument in the
+ // synthetic template argument list.
+ SyntheticTemplateArgs.addArgument(TemplateArgs[1]);
+ // Expand N into 0 ... N-1.
+ for (llvm::APSInt I(NumArgs.getBitWidth(), NumArgs.isUnsigned());
+ I < NumArgs; ++I) {
+ TemplateArgument TA(Context, I, ArgTy);
+ Expr *E = SemaRef.BuildExpressionFromIntegralTemplateArgument(
+ TA, TemplateArgs[2].getLocation())
+ .getAs<Expr>();
+ SyntheticTemplateArgs.addArgument(
+ TemplateArgumentLoc(TemplateArgument(E), E));
+ }
+ // The first template argument will be reused as the template decl that
+ // our synthetic template arguments will be applied to.
+ return SemaRef.CheckTemplateIdType(Converted[0].getAsTemplate(),
+ TemplateLoc, SyntheticTemplateArgs);
+ }
+ llvm_unreachable("unexpected BuiltinTemplateDecl!");
+}
+
+QualType Sema::CheckTemplateIdType(TemplateName Name,
+ SourceLocation TemplateLoc,
+ TemplateArgumentListInfo &TemplateArgs) {
+ DependentTemplateName *DTN
+ = Name.getUnderlying().getAsDependentTemplateName();
+ if (DTN && DTN->isIdentifier())
+ // When building a template-id where the template-name is dependent,
+ // assume the template is a type template. Either our assumption is
+ // correct, or the code is ill-formed and will be diagnosed when the
+ // dependent name is substituted.
+ return Context.getDependentTemplateSpecializationType(ETK_None,
+ DTN->getQualifier(),
+ DTN->getIdentifier(),
+ TemplateArgs);
+
+ TemplateDecl *Template = Name.getAsTemplateDecl();
+ if (!Template || isa<FunctionTemplateDecl>(Template) ||
+ isa<VarTemplateDecl>(Template)) {
+ // We might have a substituted template template parameter pack. If so,
+ // build a template specialization type for it.
+ if (Name.getAsSubstTemplateTemplateParmPack())
+ return Context.getTemplateSpecializationType(Name, TemplateArgs);
+
+ Diag(TemplateLoc, diag::err_template_id_not_a_type)
+ << Name;
+ NoteAllFoundTemplates(Name);
+ return QualType();
+ }
+
+ // Check that the template argument list is well-formed for this
+ // template.
+ SmallVector<TemplateArgument, 4> Converted;
+ if (CheckTemplateArgumentList(Template, TemplateLoc, TemplateArgs,
+ false, Converted))
+ return QualType();
+
+ QualType CanonType;
+
+ bool InstantiationDependent = false;
+ if (TypeAliasTemplateDecl *AliasTemplate =
+ dyn_cast<TypeAliasTemplateDecl>(Template)) {
+ // Find the canonical type for this type alias template specialization.
+ TypeAliasDecl *Pattern = AliasTemplate->getTemplatedDecl();
+ if (Pattern->isInvalidDecl())
+ return QualType();
+
+ TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack,
+ Converted.data(), Converted.size());
+
+ // Only substitute for the innermost template argument list.
+ MultiLevelTemplateArgumentList TemplateArgLists;
+ TemplateArgLists.addOuterTemplateArguments(&TemplateArgs);
+ unsigned Depth = AliasTemplate->getTemplateParameters()->getDepth();
+ for (unsigned I = 0; I < Depth; ++I)
+ TemplateArgLists.addOuterTemplateArguments(None);
+
+ LocalInstantiationScope Scope(*this);
+ InstantiatingTemplate Inst(*this, TemplateLoc, Template);
+ if (Inst.isInvalid())
+ return QualType();
+
+ CanonType = SubstType(Pattern->getUnderlyingType(),
+ TemplateArgLists, AliasTemplate->getLocation(),
+ AliasTemplate->getDeclName());
+ if (CanonType.isNull())
+ return QualType();
+ } else if (Name.isDependent() ||
+ TemplateSpecializationType::anyDependentTemplateArguments(
+ TemplateArgs, InstantiationDependent)) {
+ // This class template specialization is a dependent
+ // type. Therefore, its canonical type is another class template
+ // specialization type that contains all of the converted
+ // arguments in canonical form. This ensures that, e.g., A<T> and
+ // A<T, T> have identical types when A is declared as:
+ //
+ // template<typename T, typename U = T> struct A;
+ TemplateName CanonName = Context.getCanonicalTemplateName(Name);
+ CanonType = Context.getTemplateSpecializationType(CanonName,
+ Converted.data(),
+ Converted.size());
+
+ // FIXME: CanonType is not actually the canonical type, and unfortunately
+ // it is a TemplateSpecializationType that we will never use again.
+ // In the future, we need to teach getTemplateSpecializationType to only
+ // build the canonical type and return that to us.
+ CanonType = Context.getCanonicalType(CanonType);
+
+ // This might work out to be a current instantiation, in which
+ // case the canonical type needs to be the InjectedClassNameType.
+ //
+ // TODO: in theory this could be a simple hashtable lookup; most
+ // changes to CurContext don't change the set of current
+ // instantiations.
+ if (isa<ClassTemplateDecl>(Template)) {
+ for (DeclContext *Ctx = CurContext; Ctx; Ctx = Ctx->getLookupParent()) {
+ // If we get out to a namespace, we're done.
+ if (Ctx->isFileContext()) break;
+
+ // If this isn't a record, keep looking.
+ CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Ctx);
+ if (!Record) continue;
+
+ // Look for one of the two cases with InjectedClassNameTypes
+ // and check whether it's the same template.
+ if (!isa<ClassTemplatePartialSpecializationDecl>(Record) &&
+ !Record->getDescribedClassTemplate())
+ continue;
+
+ // Fetch the injected class name type and check whether its
+ // injected type is equal to the type we just built.
+ QualType ICNT = Context.getTypeDeclType(Record);
+ QualType Injected = cast<InjectedClassNameType>(ICNT)
+ ->getInjectedSpecializationType();
+
+ if (CanonType != Injected->getCanonicalTypeInternal())
+ continue;
+
+ // If so, the canonical type of this TST is the injected
+ // class name type of the record we just found.
+ assert(ICNT.isCanonical());
+ CanonType = ICNT;
+ break;
+ }
+ }
+ } else if (ClassTemplateDecl *ClassTemplate
+ = dyn_cast<ClassTemplateDecl>(Template)) {
+ // Find the class template specialization declaration that
+ // corresponds to these arguments.
+ void *InsertPos = nullptr;
+ ClassTemplateSpecializationDecl *Decl
+ = ClassTemplate->findSpecialization(Converted, InsertPos);
+ if (!Decl) {
+ // This is the first time we have referenced this class template
+ // specialization. Create the canonical declaration and add it to
+ // the set of specializations.
+ Decl = ClassTemplateSpecializationDecl::Create(Context,
+ ClassTemplate->getTemplatedDecl()->getTagKind(),
+ ClassTemplate->getDeclContext(),
+ ClassTemplate->getTemplatedDecl()->getLocStart(),
+ ClassTemplate->getLocation(),
+ ClassTemplate,
+ Converted.data(),
+ Converted.size(), nullptr);
+ ClassTemplate->AddSpecialization(Decl, InsertPos);
+ if (ClassTemplate->isOutOfLine())
+ Decl->setLexicalDeclContext(ClassTemplate->getLexicalDeclContext());
+ }
+
+ // Diagnose uses of this specialization.
+ (void)DiagnoseUseOfDecl(Decl, TemplateLoc);
+
+ CanonType = Context.getTypeDeclType(Decl);
+ assert(isa<RecordType>(CanonType) &&
+ "type of non-dependent specialization is not a RecordType");
+ } else if (auto *BTD = dyn_cast<BuiltinTemplateDecl>(Template)) {
+ CanonType = checkBuiltinTemplateIdType(*this, BTD, Converted, TemplateLoc,
+ TemplateArgs);
+ }
+
+ // Build the fully-sugared type for this class template
+ // specialization, which refers back to the class template
+ // specialization we created or found.
+ return Context.getTemplateSpecializationType(Name, TemplateArgs, CanonType);
+}
+
+TypeResult
+Sema::ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
+ TemplateTy TemplateD, SourceLocation TemplateLoc,
+ SourceLocation LAngleLoc,
+ ASTTemplateArgsPtr TemplateArgsIn,
+ SourceLocation RAngleLoc,
+ bool IsCtorOrDtorName) {
+ if (SS.isInvalid())
+ return true;
+
+ TemplateName Template = TemplateD.get();
+
+ // Translate the parser's template argument list in our AST format.
+ TemplateArgumentListInfo TemplateArgs(LAngleLoc, RAngleLoc);
+ translateTemplateArguments(TemplateArgsIn, TemplateArgs);
+
+ if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) {
+ QualType T
+ = Context.getDependentTemplateSpecializationType(ETK_None,
+ DTN->getQualifier(),
+ DTN->getIdentifier(),
+ TemplateArgs);
+ // Build type-source information.
+ TypeLocBuilder TLB;
+ DependentTemplateSpecializationTypeLoc SpecTL
+ = TLB.push<DependentTemplateSpecializationTypeLoc>(T);
+ SpecTL.setElaboratedKeywordLoc(SourceLocation());
+ SpecTL.setQualifierLoc(SS.getWithLocInContext(Context));
+ SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
+ SpecTL.setTemplateNameLoc(TemplateLoc);
+ SpecTL.setLAngleLoc(LAngleLoc);
+ SpecTL.setRAngleLoc(RAngleLoc);
+ for (unsigned I = 0, N = SpecTL.getNumArgs(); I != N; ++I)
+ SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
+ return CreateParsedType(T, TLB.getTypeSourceInfo(Context, T));
+ }
+
+ QualType Result = CheckTemplateIdType(Template, TemplateLoc, TemplateArgs);
+
+ if (Result.isNull())
+ return true;
+
+ // Build type-source information.
+ TypeLocBuilder TLB;
+ TemplateSpecializationTypeLoc SpecTL
+ = TLB.push<TemplateSpecializationTypeLoc>(Result);
+ SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
+ SpecTL.setTemplateNameLoc(TemplateLoc);
+ SpecTL.setLAngleLoc(LAngleLoc);
+ SpecTL.setRAngleLoc(RAngleLoc);
+ for (unsigned i = 0, e = SpecTL.getNumArgs(); i != e; ++i)
+ SpecTL.setArgLocInfo(i, TemplateArgs[i].getLocInfo());
+
+ // NOTE: avoid constructing an ElaboratedTypeLoc if this is a
+ // constructor or destructor name (in such a case, the scope specifier
+ // will be attached to the enclosing Decl or Expr node).
+ if (SS.isNotEmpty() && !IsCtorOrDtorName) {
+ // Create an elaborated-type-specifier containing the nested-name-specifier.
+ Result = Context.getElaboratedType(ETK_None, SS.getScopeRep(), Result);
+ ElaboratedTypeLoc ElabTL = TLB.push<ElaboratedTypeLoc>(Result);
+ ElabTL.setElaboratedKeywordLoc(SourceLocation());
+ ElabTL.setQualifierLoc(SS.getWithLocInContext(Context));
+ }
+
+ return CreateParsedType(Result, TLB.getTypeSourceInfo(Context, Result));
+}
+
+TypeResult Sema::ActOnTagTemplateIdType(TagUseKind TUK,
+ TypeSpecifierType TagSpec,
+ SourceLocation TagLoc,
+ CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ TemplateTy TemplateD,
+ SourceLocation TemplateLoc,
+ SourceLocation LAngleLoc,
+ ASTTemplateArgsPtr TemplateArgsIn,
+ SourceLocation RAngleLoc) {
+ TemplateName Template = TemplateD.get();
+
+ // Translate the parser's template argument list in our AST format.
+ TemplateArgumentListInfo TemplateArgs(LAngleLoc, RAngleLoc);
+ translateTemplateArguments(TemplateArgsIn, TemplateArgs);
+
+ // Determine the tag kind
+ TagTypeKind TagKind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
+ ElaboratedTypeKeyword Keyword
+ = TypeWithKeyword::getKeywordForTagTypeKind(TagKind);
+
+ if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) {
+ QualType T = Context.getDependentTemplateSpecializationType(Keyword,
+ DTN->getQualifier(),
+ DTN->getIdentifier(),
+ TemplateArgs);
+
+ // Build type-source information.
+ TypeLocBuilder TLB;
+ DependentTemplateSpecializationTypeLoc SpecTL
+ = TLB.push<DependentTemplateSpecializationTypeLoc>(T);
+ SpecTL.setElaboratedKeywordLoc(TagLoc);
+ SpecTL.setQualifierLoc(SS.getWithLocInContext(Context));
+ SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
+ SpecTL.setTemplateNameLoc(TemplateLoc);
+ SpecTL.setLAngleLoc(LAngleLoc);
+ SpecTL.setRAngleLoc(RAngleLoc);
+ for (unsigned I = 0, N = SpecTL.getNumArgs(); I != N; ++I)
+ SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
+ return CreateParsedType(T, TLB.getTypeSourceInfo(Context, T));
+ }
+
+ if (TypeAliasTemplateDecl *TAT =
+ dyn_cast_or_null<TypeAliasTemplateDecl>(Template.getAsTemplateDecl())) {
+ // C++0x [dcl.type.elab]p2:
+ // If the identifier resolves to a typedef-name or the simple-template-id
+ // resolves to an alias template specialization, the
+ // elaborated-type-specifier is ill-formed.
+ Diag(TemplateLoc, diag::err_tag_reference_non_tag) << 4;
+ Diag(TAT->getLocation(), diag::note_declared_at);
+ }
+
+ QualType Result = CheckTemplateIdType(Template, TemplateLoc, TemplateArgs);
+ if (Result.isNull())
+ return TypeResult(true);
+
+ // Check the tag kind
+ if (const RecordType *RT = Result->getAs<RecordType>()) {
+ RecordDecl *D = RT->getDecl();
+
+ IdentifierInfo *Id = D->getIdentifier();
+ assert(Id && "templated class must have an identifier");
+
+ if (!isAcceptableTagRedeclaration(D, TagKind, TUK == TUK_Definition,
+ TagLoc, Id)) {
+ Diag(TagLoc, diag::err_use_with_wrong_tag)
+ << Result
+ << FixItHint::CreateReplacement(SourceRange(TagLoc), D->getKindName());
+ Diag(D->getLocation(), diag::note_previous_use);
+ }
+ }
+
+ // Provide source-location information for the template specialization.
+ TypeLocBuilder TLB;
+ TemplateSpecializationTypeLoc SpecTL
+ = TLB.push<TemplateSpecializationTypeLoc>(Result);
+ SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
+ SpecTL.setTemplateNameLoc(TemplateLoc);
+ SpecTL.setLAngleLoc(LAngleLoc);
+ SpecTL.setRAngleLoc(RAngleLoc);
+ for (unsigned i = 0, e = SpecTL.getNumArgs(); i != e; ++i)
+ SpecTL.setArgLocInfo(i, TemplateArgs[i].getLocInfo());
+
+ // Construct an elaborated type containing the nested-name-specifier (if any)
+ // and tag keyword.
+ Result = Context.getElaboratedType(Keyword, SS.getScopeRep(), Result);
+ ElaboratedTypeLoc ElabTL = TLB.push<ElaboratedTypeLoc>(Result);
+ ElabTL.setElaboratedKeywordLoc(TagLoc);
+ ElabTL.setQualifierLoc(SS.getWithLocInContext(Context));
+ return CreateParsedType(Result, TLB.getTypeSourceInfo(Context, Result));
+}
+
+static bool CheckTemplatePartialSpecializationArgs(
+ Sema &S, SourceLocation NameLoc, TemplateParameterList *TemplateParams,
+ unsigned ExplicitArgs, SmallVectorImpl<TemplateArgument> &TemplateArgs);
+
+static bool CheckTemplateSpecializationScope(Sema &S, NamedDecl *Specialized,
+ NamedDecl *PrevDecl,
+ SourceLocation Loc,
+ bool IsPartialSpecialization);
+
+static TemplateSpecializationKind getTemplateSpecializationKind(Decl *D);
+
+static bool isTemplateArgumentTemplateParameter(
+ const TemplateArgument &Arg, unsigned Depth, unsigned Index) {
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ case TemplateArgument::NullPtr:
+ case TemplateArgument::Integral:
+ case TemplateArgument::Declaration:
+ case TemplateArgument::Pack:
+ case TemplateArgument::TemplateExpansion:
+ return false;
+
+ case TemplateArgument::Type: {
+ QualType Type = Arg.getAsType();
+ const TemplateTypeParmType *TPT =
+ Arg.getAsType()->getAs<TemplateTypeParmType>();
+ return TPT && !Type.hasQualifiers() &&
+ TPT->getDepth() == Depth && TPT->getIndex() == Index;
+ }
+
+ case TemplateArgument::Expression: {
+ DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Arg.getAsExpr());
+ if (!DRE || !DRE->getDecl())
+ return false;
+ const NonTypeTemplateParmDecl *NTTP =
+ dyn_cast<NonTypeTemplateParmDecl>(DRE->getDecl());
+ return NTTP && NTTP->getDepth() == Depth && NTTP->getIndex() == Index;
+ }
+
+ case TemplateArgument::Template:
+ const TemplateTemplateParmDecl *TTP =
+ dyn_cast_or_null<TemplateTemplateParmDecl>(
+ Arg.getAsTemplateOrTemplatePattern().getAsTemplateDecl());
+ return TTP && TTP->getDepth() == Depth && TTP->getIndex() == Index;
+ }
+ llvm_unreachable("unexpected kind of template argument");
+}
+
+static bool isSameAsPrimaryTemplate(TemplateParameterList *Params,
+ ArrayRef<TemplateArgument> Args) {
+ if (Params->size() != Args.size())
+ return false;
+
+ unsigned Depth = Params->getDepth();
+
+ for (unsigned I = 0, N = Args.size(); I != N; ++I) {
+ TemplateArgument Arg = Args[I];
+
+ // If the parameter is a pack expansion, the argument must be a pack
+ // whose only element is a pack expansion.
+ if (Params->getParam(I)->isParameterPack()) {
+ if (Arg.getKind() != TemplateArgument::Pack || Arg.pack_size() != 1 ||
+ !Arg.pack_begin()->isPackExpansion())
+ return false;
+ Arg = Arg.pack_begin()->getPackExpansionPattern();
+ }
+
+ if (!isTemplateArgumentTemplateParameter(Arg, Depth, I))
+ return false;
+ }
+
+ return true;
+}
+
+/// Convert the parser's template argument list representation into our form.
+static TemplateArgumentListInfo
+makeTemplateArgumentListInfo(Sema &S, TemplateIdAnnotation &TemplateId) {
+ TemplateArgumentListInfo TemplateArgs(TemplateId.LAngleLoc,
+ TemplateId.RAngleLoc);
+ ASTTemplateArgsPtr TemplateArgsPtr(TemplateId.getTemplateArgs(),
+ TemplateId.NumArgs);
+ S.translateTemplateArguments(TemplateArgsPtr, TemplateArgs);
+ return TemplateArgs;
+}
+
+DeclResult Sema::ActOnVarTemplateSpecialization(
+ Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc,
+ TemplateParameterList *TemplateParams, StorageClass SC,
+ bool IsPartialSpecialization) {
+ // D must be variable template id.
+ assert(D.getName().getKind() == UnqualifiedId::IK_TemplateId &&
+ "Variable template specialization is declared with a template it.");
+
+ TemplateIdAnnotation *TemplateId = D.getName().TemplateId;
+ TemplateArgumentListInfo TemplateArgs =
+ makeTemplateArgumentListInfo(*this, *TemplateId);
+ SourceLocation TemplateNameLoc = D.getIdentifierLoc();
+ SourceLocation LAngleLoc = TemplateId->LAngleLoc;
+ SourceLocation RAngleLoc = TemplateId->RAngleLoc;
+
+ TemplateName Name = TemplateId->Template.get();
+
+ // The template-id must name a variable template.
+ VarTemplateDecl *VarTemplate =
+ dyn_cast_or_null<VarTemplateDecl>(Name.getAsTemplateDecl());
+ if (!VarTemplate) {
+ NamedDecl *FnTemplate;
+ if (auto *OTS = Name.getAsOverloadedTemplate())
+ FnTemplate = *OTS->begin();
+ else
+ FnTemplate = dyn_cast_or_null<FunctionTemplateDecl>(Name.getAsTemplateDecl());
+ if (FnTemplate)
+ return Diag(D.getIdentifierLoc(), diag::err_var_spec_no_template_but_method)
+ << FnTemplate->getDeclName();
+ return Diag(D.getIdentifierLoc(), diag::err_var_spec_no_template)
+ << IsPartialSpecialization;
+ }
+
+ // Check for unexpanded parameter packs in any of the template arguments.
+ for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
+ if (DiagnoseUnexpandedParameterPack(TemplateArgs[I],
+ UPPC_PartialSpecialization))
+ return true;
+
+ // Check that the template argument list is well-formed for this
+ // template.
+ SmallVector<TemplateArgument, 4> Converted;
+ if (CheckTemplateArgumentList(VarTemplate, TemplateNameLoc, TemplateArgs,
+ false, Converted))
+ return true;
+
+ // Find the variable template (partial) specialization declaration that
+ // corresponds to these arguments.
+ if (IsPartialSpecialization) {
+ if (CheckTemplatePartialSpecializationArgs(
+ *this, TemplateNameLoc, VarTemplate->getTemplateParameters(),
+ TemplateArgs.size(), Converted))
+ return true;
+
+ bool InstantiationDependent;
+ if (!Name.isDependent() &&
+ !TemplateSpecializationType::anyDependentTemplateArguments(
+ TemplateArgs.getArgumentArray(), TemplateArgs.size(),
+ InstantiationDependent)) {
+ Diag(TemplateNameLoc, diag::err_partial_spec_fully_specialized)
+ << VarTemplate->getDeclName();
+ IsPartialSpecialization = false;
+ }
+
+ if (isSameAsPrimaryTemplate(VarTemplate->getTemplateParameters(),
+ Converted)) {
+ // C++ [temp.class.spec]p9b3:
+ //
+ // -- The argument list of the specialization shall not be identical
+ // to the implicit argument list of the primary template.
+ Diag(TemplateNameLoc, diag::err_partial_spec_args_match_primary_template)
+ << /*variable template*/ 1
+ << /*is definition*/(SC != SC_Extern && !CurContext->isRecord())
+ << FixItHint::CreateRemoval(SourceRange(LAngleLoc, RAngleLoc));
+ // FIXME: Recover from this by treating the declaration as a redeclaration
+ // of the primary template.
+ return true;
+ }
+ }
+
+ void *InsertPos = nullptr;
+ VarTemplateSpecializationDecl *PrevDecl = nullptr;
+
+ if (IsPartialSpecialization)
+ // FIXME: Template parameter list matters too
+ PrevDecl = VarTemplate->findPartialSpecialization(Converted, InsertPos);
+ else
+ PrevDecl = VarTemplate->findSpecialization(Converted, InsertPos);
+
+ VarTemplateSpecializationDecl *Specialization = nullptr;
+
+ // Check whether we can declare a variable template specialization in
+ // the current scope.
+ if (CheckTemplateSpecializationScope(*this, VarTemplate, PrevDecl,
+ TemplateNameLoc,
+ IsPartialSpecialization))
+ return true;
+
+ if (PrevDecl && PrevDecl->getSpecializationKind() == TSK_Undeclared) {
+ // Since the only prior variable template specialization with these
+ // arguments was referenced but not declared, reuse that
+ // declaration node as our own, updating its source location and
+ // the list of outer template parameters to reflect our new declaration.
+ Specialization = PrevDecl;
+ Specialization->setLocation(TemplateNameLoc);
+ PrevDecl = nullptr;
+ } else if (IsPartialSpecialization) {
+ // Create a new class template partial specialization declaration node.
+ VarTemplatePartialSpecializationDecl *PrevPartial =
+ cast_or_null<VarTemplatePartialSpecializationDecl>(PrevDecl);
+ VarTemplatePartialSpecializationDecl *Partial =
+ VarTemplatePartialSpecializationDecl::Create(
+ Context, VarTemplate->getDeclContext(), TemplateKWLoc,
+ TemplateNameLoc, TemplateParams, VarTemplate, DI->getType(), DI, SC,
+ Converted.data(), Converted.size(), TemplateArgs);
+
+ if (!PrevPartial)
+ VarTemplate->AddPartialSpecialization(Partial, InsertPos);
+ Specialization = Partial;
+
+ // If we are providing an explicit specialization of a member variable
+ // template specialization, make a note of that.
+ if (PrevPartial && PrevPartial->getInstantiatedFromMember())
+ PrevPartial->setMemberSpecialization();
+
+ // Check that all of the template parameters of the variable template
+ // partial specialization are deducible from the template
+ // arguments. If not, this variable template partial specialization
+ // will never be used.
+ llvm::SmallBitVector DeducibleParams(TemplateParams->size());
+ MarkUsedTemplateParameters(Partial->getTemplateArgs(), true,
+ TemplateParams->getDepth(), DeducibleParams);
+
+ if (!DeducibleParams.all()) {
+ unsigned NumNonDeducible =
+ DeducibleParams.size() - DeducibleParams.count();
+ Diag(TemplateNameLoc, diag::warn_partial_specs_not_deducible)
+ << /*variable template*/ 1 << (NumNonDeducible > 1)
+ << SourceRange(TemplateNameLoc, RAngleLoc);
+ for (unsigned I = 0, N = DeducibleParams.size(); I != N; ++I) {
+ if (!DeducibleParams[I]) {
+ NamedDecl *Param = cast<NamedDecl>(TemplateParams->getParam(I));
+ if (Param->getDeclName())
+ Diag(Param->getLocation(), diag::note_partial_spec_unused_parameter)
+ << Param->getDeclName();
+ else
+ Diag(Param->getLocation(), diag::note_partial_spec_unused_parameter)
+ << "(anonymous)";
+ }
+ }
+ }
+ } else {
+ // Create a new class template specialization declaration node for
+ // this explicit specialization or friend declaration.
+ Specialization = VarTemplateSpecializationDecl::Create(
+ Context, VarTemplate->getDeclContext(), TemplateKWLoc, TemplateNameLoc,
+ VarTemplate, DI->getType(), DI, SC, Converted.data(), Converted.size());
+ Specialization->setTemplateArgsInfo(TemplateArgs);
+
+ if (!PrevDecl)
+ VarTemplate->AddSpecialization(Specialization, InsertPos);
+ }
+
+ // C++ [temp.expl.spec]p6:
+ // If a template, a member template or the member of a class template is
+ // explicitly specialized then that specialization shall be declared
+ // before the first use of that specialization that would cause an implicit
+ // instantiation to take place, in every translation unit in which such a
+ // use occurs; no diagnostic is required.
+ if (PrevDecl && PrevDecl->getPointOfInstantiation().isValid()) {
+ bool Okay = false;
+ for (Decl *Prev = PrevDecl; Prev; Prev = Prev->getPreviousDecl()) {
+ // Is there any previous explicit specialization declaration?
+ if (getTemplateSpecializationKind(Prev) == TSK_ExplicitSpecialization) {
+ Okay = true;
+ break;
+ }
+ }
+
+ if (!Okay) {
+ SourceRange Range(TemplateNameLoc, RAngleLoc);
+ Diag(TemplateNameLoc, diag::err_specialization_after_instantiation)
+ << Name << Range;
+
+ Diag(PrevDecl->getPointOfInstantiation(),
+ diag::note_instantiation_required_here)
+ << (PrevDecl->getTemplateSpecializationKind() !=
+ TSK_ImplicitInstantiation);
+ return true;
+ }
+ }
+
+ Specialization->setTemplateKeywordLoc(TemplateKWLoc);
+ Specialization->setLexicalDeclContext(CurContext);
+
+ // Add the specialization into its lexical context, so that it can
+ // be seen when iterating through the list of declarations in that
+ // context. However, specializations are not found by name lookup.
+ CurContext->addDecl(Specialization);
+
+ // Note that this is an explicit specialization.
+ Specialization->setSpecializationKind(TSK_ExplicitSpecialization);
+
+ if (PrevDecl) {
+ // Check that this isn't a redefinition of this specialization,
+ // merging with previous declarations.
+ LookupResult PrevSpec(*this, GetNameForDeclarator(D), LookupOrdinaryName,
+ ForRedeclaration);
+ PrevSpec.addDecl(PrevDecl);
+ D.setRedeclaration(CheckVariableDeclaration(Specialization, PrevSpec));
+ } else if (Specialization->isStaticDataMember() &&
+ Specialization->isOutOfLine()) {
+ Specialization->setAccess(VarTemplate->getAccess());
+ }
+
+ // Link instantiations of static data members back to the template from
+ // which they were instantiated.
+ if (Specialization->isStaticDataMember())
+ Specialization->setInstantiationOfStaticDataMember(
+ VarTemplate->getTemplatedDecl(),
+ Specialization->getSpecializationKind());
+
+ return Specialization;
+}
+
+namespace {
+/// \brief A partial specialization whose template arguments have matched
+/// a given template-id.
+struct PartialSpecMatchResult {
+ VarTemplatePartialSpecializationDecl *Partial;
+ TemplateArgumentList *Args;
+};
+}
+
+DeclResult
+Sema::CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc,
+ SourceLocation TemplateNameLoc,
+ const TemplateArgumentListInfo &TemplateArgs) {
+ assert(Template && "A variable template id without template?");
+
+ // Check that the template argument list is well-formed for this template.
+ SmallVector<TemplateArgument, 4> Converted;
+ if (CheckTemplateArgumentList(
+ Template, TemplateNameLoc,
+ const_cast<TemplateArgumentListInfo &>(TemplateArgs), false,
+ Converted))
+ return true;
+
+ // Find the variable template specialization declaration that
+ // corresponds to these arguments.
+ void *InsertPos = nullptr;
+ if (VarTemplateSpecializationDecl *Spec = Template->findSpecialization(
+ Converted, InsertPos))
+ // If we already have a variable template specialization, return it.
+ return Spec;
+
+ // This is the first time we have referenced this variable template
+ // specialization. Create the canonical declaration and add it to
+ // the set of specializations, based on the closest partial specialization
+ // that it represents. That is,
+ VarDecl *InstantiationPattern = Template->getTemplatedDecl();
+ TemplateArgumentList TemplateArgList(TemplateArgumentList::OnStack,
+ Converted.data(), Converted.size());
+ TemplateArgumentList *InstantiationArgs = &TemplateArgList;
+ bool AmbiguousPartialSpec = false;
+ typedef PartialSpecMatchResult MatchResult;
+ SmallVector<MatchResult, 4> Matched;
+ SourceLocation PointOfInstantiation = TemplateNameLoc;
+ TemplateSpecCandidateSet FailedCandidates(PointOfInstantiation,
+ /*ForTakingAddress=*/false);
+
+ // 1. Attempt to find the closest partial specialization that this
+ // specializes, if any.
+ // If any of the template arguments is dependent, then this is probably
+ // a placeholder for an incomplete declarative context; which must be
+ // complete by instantiation time. Thus, do not search through the partial
+ // specializations yet.
+ // TODO: Unify with InstantiateClassTemplateSpecialization()?
+ // Perhaps better after unification of DeduceTemplateArguments() and
+ // getMoreSpecializedPartialSpecialization().
+ bool InstantiationDependent = false;
+ if (!TemplateSpecializationType::anyDependentTemplateArguments(
+ TemplateArgs, InstantiationDependent)) {
+
+ SmallVector<VarTemplatePartialSpecializationDecl *, 4> PartialSpecs;
+ Template->getPartialSpecializations(PartialSpecs);
+
+ for (unsigned I = 0, N = PartialSpecs.size(); I != N; ++I) {
+ VarTemplatePartialSpecializationDecl *Partial = PartialSpecs[I];
+ TemplateDeductionInfo Info(FailedCandidates.getLocation());
+
+ if (TemplateDeductionResult Result =
+ DeduceTemplateArguments(Partial, TemplateArgList, Info)) {
+ // Store the failed-deduction information for use in diagnostics, later.
+ // TODO: Actually use the failed-deduction info?
+ FailedCandidates.addCandidate()
+ .set(Partial, MakeDeductionFailureInfo(Context, Result, Info));
+ (void)Result;
+ } else {
+ Matched.push_back(PartialSpecMatchResult());
+ Matched.back().Partial = Partial;
+ Matched.back().Args = Info.take();
+ }
+ }
+
+ if (Matched.size() >= 1) {
+ SmallVector<MatchResult, 4>::iterator Best = Matched.begin();
+ if (Matched.size() == 1) {
+ // -- If exactly one matching specialization is found, the
+ // instantiation is generated from that specialization.
+ // We don't need to do anything for this.
+ } else {
+ // -- If more than one matching specialization is found, the
+ // partial order rules (14.5.4.2) are used to determine
+ // whether one of the specializations is more specialized
+ // than the others. If none of the specializations is more
+ // specialized than all of the other matching
+ // specializations, then the use of the variable template is
+ // ambiguous and the program is ill-formed.
+ for (SmallVector<MatchResult, 4>::iterator P = Best + 1,
+ PEnd = Matched.end();
+ P != PEnd; ++P) {
+ if (getMoreSpecializedPartialSpecialization(P->Partial, Best->Partial,
+ PointOfInstantiation) ==
+ P->Partial)
+ Best = P;
+ }
+
+ // Determine if the best partial specialization is more specialized than
+ // the others.
+ for (SmallVector<MatchResult, 4>::iterator P = Matched.begin(),
+ PEnd = Matched.end();
+ P != PEnd; ++P) {
+ if (P != Best && getMoreSpecializedPartialSpecialization(
+ P->Partial, Best->Partial,
+ PointOfInstantiation) != Best->Partial) {
+ AmbiguousPartialSpec = true;
+ break;
+ }
+ }
+ }
+
+ // Instantiate using the best variable template partial specialization.
+ InstantiationPattern = Best->Partial;
+ InstantiationArgs = Best->Args;
+ } else {
+ // -- If no match is found, the instantiation is generated
+ // from the primary template.
+ // InstantiationPattern = Template->getTemplatedDecl();
+ }
+ }
+
+ // 2. Create the canonical declaration.
+ // Note that we do not instantiate the variable just yet, since
+ // instantiation is handled in DoMarkVarDeclReferenced().
+ // FIXME: LateAttrs et al.?
+ VarTemplateSpecializationDecl *Decl = BuildVarTemplateInstantiation(
+ Template, InstantiationPattern, *InstantiationArgs, TemplateArgs,
+ Converted, TemplateNameLoc, InsertPos /*, LateAttrs, StartingScope*/);
+ if (!Decl)
+ return true;
+
+ if (AmbiguousPartialSpec) {
+ // Partial ordering did not produce a clear winner. Complain.
+ Decl->setInvalidDecl();
+ Diag(PointOfInstantiation, diag::err_partial_spec_ordering_ambiguous)
+ << Decl;
+
+ // Print the matching partial specializations.
+ for (SmallVector<MatchResult, 4>::iterator P = Matched.begin(),
+ PEnd = Matched.end();
+ P != PEnd; ++P)
+ Diag(P->Partial->getLocation(), diag::note_partial_spec_match)
+ << getTemplateArgumentBindingsText(
+ P->Partial->getTemplateParameters(), *P->Args);
+ return true;
+ }
+
+ if (VarTemplatePartialSpecializationDecl *D =
+ dyn_cast<VarTemplatePartialSpecializationDecl>(InstantiationPattern))
+ Decl->setInstantiationOf(D, InstantiationArgs);
+
+ assert(Decl && "No variable template specialization?");
+ return Decl;
+}
+
+ExprResult
+Sema::CheckVarTemplateId(const CXXScopeSpec &SS,
+ const DeclarationNameInfo &NameInfo,
+ VarTemplateDecl *Template, SourceLocation TemplateLoc,
+ const TemplateArgumentListInfo *TemplateArgs) {
+
+ DeclResult Decl = CheckVarTemplateId(Template, TemplateLoc, NameInfo.getLoc(),
+ *TemplateArgs);
+ if (Decl.isInvalid())
+ return ExprError();
+
+ VarDecl *Var = cast<VarDecl>(Decl.get());
+ if (!Var->getTemplateSpecializationKind())
+ Var->setTemplateSpecializationKind(TSK_ImplicitInstantiation,
+ NameInfo.getLoc());
+
+ // Build an ordinary singleton decl ref.
+ return BuildDeclarationNameExpr(SS, NameInfo, Var,
+ /*FoundD=*/nullptr, TemplateArgs);
+}
+
+ExprResult Sema::BuildTemplateIdExpr(const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ LookupResult &R,
+ bool RequiresADL,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ // FIXME: Can we do any checking at this point? I guess we could check the
+ // template arguments that we have against the template name, if the template
+ // name refers to a single template. That's not a terribly common case,
+ // though.
+ // foo<int> could identify a single function unambiguously
+ // This approach does NOT work, since f<int>(1);
+ // gets resolved prior to resorting to overload resolution
+ // i.e., template<class T> void f(double);
+ // vs template<class T, class U> void f(U);
+
+ // These should be filtered out by our callers.
+ assert(!R.empty() && "empty lookup results when building templateid");
+ assert(!R.isAmbiguous() && "ambiguous lookup when building templateid");
+
+ // In C++1y, check variable template ids.
+ bool InstantiationDependent;
+ if (R.getAsSingle<VarTemplateDecl>() &&
+ !TemplateSpecializationType::anyDependentTemplateArguments(
+ *TemplateArgs, InstantiationDependent)) {
+ return CheckVarTemplateId(SS, R.getLookupNameInfo(),
+ R.getAsSingle<VarTemplateDecl>(),
+ TemplateKWLoc, TemplateArgs);
+ }
+
+ // We don't want lookup warnings at this point.
+ R.suppressDiagnostics();
+
+ UnresolvedLookupExpr *ULE
+ = UnresolvedLookupExpr::Create(Context, R.getNamingClass(),
+ SS.getWithLocInContext(Context),
+ TemplateKWLoc,
+ R.getLookupNameInfo(),
+ RequiresADL, TemplateArgs,
+ R.begin(), R.end());
+
+ return ULE;
+}
+
+// We actually only call this from template instantiation.
+ExprResult
+Sema::BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *TemplateArgs) {
+
+ assert(TemplateArgs || TemplateKWLoc.isValid());
+ DeclContext *DC;
+ if (!(DC = computeDeclContext(SS, false)) ||
+ DC->isDependentContext() ||
+ RequireCompleteDeclContext(SS, DC))
+ return BuildDependentDeclRefExpr(SS, TemplateKWLoc, NameInfo, TemplateArgs);
+
+ bool MemberOfUnknownSpecialization;
+ LookupResult R(*this, NameInfo, LookupOrdinaryName);
+ LookupTemplateName(R, (Scope*)nullptr, SS, QualType(), /*Entering*/ false,
+ MemberOfUnknownSpecialization);
+
+ if (R.isAmbiguous())
+ return ExprError();
+
+ if (R.empty()) {
+ Diag(NameInfo.getLoc(), diag::err_template_kw_refers_to_non_template)
+ << NameInfo.getName() << SS.getRange();
+ return ExprError();
+ }
+
+ if (ClassTemplateDecl *Temp = R.getAsSingle<ClassTemplateDecl>()) {
+ Diag(NameInfo.getLoc(), diag::err_template_kw_refers_to_class_template)
+ << SS.getScopeRep()
+ << NameInfo.getName().getAsString() << SS.getRange();
+ Diag(Temp->getLocation(), diag::note_referenced_class_template);
+ return ExprError();
+ }
+
+ return BuildTemplateIdExpr(SS, TemplateKWLoc, R, /*ADL*/ false, TemplateArgs);
+}
+
+/// \brief Form a dependent template name.
+///
+/// This action forms a dependent template name given the template
+/// name and its (presumably dependent) scope specifier. For
+/// example, given "MetaFun::template apply", the scope specifier \p
+/// SS will be "MetaFun::", \p TemplateKWLoc contains the location
+/// of the "template" keyword, and "apply" is the \p Name.
+TemplateNameKind Sema::ActOnDependentTemplateName(Scope *S,
+ CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ UnqualifiedId &Name,
+ ParsedType ObjectType,
+ bool EnteringContext,
+ TemplateTy &Result) {
+ if (TemplateKWLoc.isValid() && S && !S->getTemplateParamParent())
+ Diag(TemplateKWLoc,
+ getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_template_outside_of_template :
+ diag::ext_template_outside_of_template)
+ << FixItHint::CreateRemoval(TemplateKWLoc);
+
+ DeclContext *LookupCtx = nullptr;
+ if (SS.isSet())
+ LookupCtx = computeDeclContext(SS, EnteringContext);
+ if (!LookupCtx && ObjectType)
+ LookupCtx = computeDeclContext(ObjectType.get());
+ if (LookupCtx) {
+ // C++0x [temp.names]p5:
+ // If a name prefixed by the keyword template is not the name of
+ // a template, the program is ill-formed. [Note: the keyword
+ // template may not be applied to non-template members of class
+ // templates. -end note ] [ Note: as is the case with the
+ // typename prefix, the template prefix is allowed in cases
+ // where it is not strictly necessary; i.e., when the
+ // nested-name-specifier or the expression on the left of the ->
+ // or . is not dependent on a template-parameter, or the use
+ // does not appear in the scope of a template. -end note]
+ //
+ // Note: C++03 was more strict here, because it banned the use of
+ // the "template" keyword prior to a template-name that was not a
+ // dependent name. C++ DR468 relaxed this requirement (the
+ // "template" keyword is now permitted). We follow the C++0x
+ // rules, even in C++03 mode with a warning, retroactively applying the DR.
+ bool MemberOfUnknownSpecialization;
+ TemplateNameKind TNK = isTemplateName(S, SS, TemplateKWLoc.isValid(), Name,
+ ObjectType, EnteringContext, Result,
+ MemberOfUnknownSpecialization);
+ if (TNK == TNK_Non_template && LookupCtx->isDependentContext() &&
+ isa<CXXRecordDecl>(LookupCtx) &&
+ (!cast<CXXRecordDecl>(LookupCtx)->hasDefinition() ||
+ cast<CXXRecordDecl>(LookupCtx)->hasAnyDependentBases())) {
+ // This is a dependent template. Handle it below.
+ } else if (TNK == TNK_Non_template) {
+ Diag(Name.getLocStart(),
+ diag::err_template_kw_refers_to_non_template)
+ << GetNameFromUnqualifiedId(Name).getName()
+ << Name.getSourceRange()
+ << TemplateKWLoc;
+ return TNK_Non_template;
+ } else {
+ // We found something; return it.
+ return TNK;
+ }
+ }
+
+ NestedNameSpecifier *Qualifier = SS.getScopeRep();
+
+ switch (Name.getKind()) {
+ case UnqualifiedId::IK_Identifier:
+ Result = TemplateTy::make(Context.getDependentTemplateName(Qualifier,
+ Name.Identifier));
+ return TNK_Dependent_template_name;
+
+ case UnqualifiedId::IK_OperatorFunctionId:
+ Result = TemplateTy::make(Context.getDependentTemplateName(Qualifier,
+ Name.OperatorFunctionId.Operator));
+ return TNK_Function_template;
+
+ case UnqualifiedId::IK_LiteralOperatorId:
+ llvm_unreachable("literal operator id cannot have a dependent scope");
+
+ default:
+ break;
+ }
+
+ Diag(Name.getLocStart(),
+ diag::err_template_kw_refers_to_non_template)
+ << GetNameFromUnqualifiedId(Name).getName()
+ << Name.getSourceRange()
+ << TemplateKWLoc;
+ return TNK_Non_template;
+}
+
+bool Sema::CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
+ TemplateArgumentLoc &AL,
+ SmallVectorImpl<TemplateArgument> &Converted) {
+ const TemplateArgument &Arg = AL.getArgument();
+ QualType ArgType;
+ TypeSourceInfo *TSI = nullptr;
+
+ // Check template type parameter.
+ switch(Arg.getKind()) {
+ case TemplateArgument::Type:
+ // C++ [temp.arg.type]p1:
+ // A template-argument for a template-parameter which is a
+ // type shall be a type-id.
+ ArgType = Arg.getAsType();
+ TSI = AL.getTypeSourceInfo();
+ break;
+ case TemplateArgument::Template: {
+ // We have a template type parameter but the template argument
+ // is a template without any arguments.
+ SourceRange SR = AL.getSourceRange();
+ TemplateName Name = Arg.getAsTemplate();
+ Diag(SR.getBegin(), diag::err_template_missing_args)
+ << Name << SR;
+ if (TemplateDecl *Decl = Name.getAsTemplateDecl())
+ Diag(Decl->getLocation(), diag::note_template_decl_here);
+
+ return true;
+ }
+ case TemplateArgument::Expression: {
+ // We have a template type parameter but the template argument is an
+ // expression; see if maybe it is missing the "typename" keyword.
+ CXXScopeSpec SS;
+ DeclarationNameInfo NameInfo;
+
+ if (DeclRefExpr *ArgExpr = dyn_cast<DeclRefExpr>(Arg.getAsExpr())) {
+ SS.Adopt(ArgExpr->getQualifierLoc());
+ NameInfo = ArgExpr->getNameInfo();
+ } else if (DependentScopeDeclRefExpr *ArgExpr =
+ dyn_cast<DependentScopeDeclRefExpr>(Arg.getAsExpr())) {
+ SS.Adopt(ArgExpr->getQualifierLoc());
+ NameInfo = ArgExpr->getNameInfo();
+ } else if (CXXDependentScopeMemberExpr *ArgExpr =
+ dyn_cast<CXXDependentScopeMemberExpr>(Arg.getAsExpr())) {
+ if (ArgExpr->isImplicitAccess()) {
+ SS.Adopt(ArgExpr->getQualifierLoc());
+ NameInfo = ArgExpr->getMemberNameInfo();
+ }
+ }
+
+ if (auto *II = NameInfo.getName().getAsIdentifierInfo()) {
+ LookupResult Result(*this, NameInfo, LookupOrdinaryName);
+ LookupParsedName(Result, CurScope, &SS);
+
+ if (Result.getAsSingle<TypeDecl>() ||
+ Result.getResultKind() ==
+ LookupResult::NotFoundInCurrentInstantiation) {
+ // Suggest that the user add 'typename' before the NNS.
+ SourceLocation Loc = AL.getSourceRange().getBegin();
+ Diag(Loc, getLangOpts().MSVCCompat
+ ? diag::ext_ms_template_type_arg_missing_typename
+ : diag::err_template_arg_must_be_type_suggest)
+ << FixItHint::CreateInsertion(Loc, "typename ");
+ Diag(Param->getLocation(), diag::note_template_param_here);
+
+ // Recover by synthesizing a type using the location information that we
+ // already have.
+ ArgType =
+ Context.getDependentNameType(ETK_Typename, SS.getScopeRep(), II);
+ TypeLocBuilder TLB;
+ DependentNameTypeLoc TL = TLB.push<DependentNameTypeLoc>(ArgType);
+ TL.setElaboratedKeywordLoc(SourceLocation(/*synthesized*/));
+ TL.setQualifierLoc(SS.getWithLocInContext(Context));
+ TL.setNameLoc(NameInfo.getLoc());
+ TSI = TLB.getTypeSourceInfo(Context, ArgType);
+
+ // Overwrite our input TemplateArgumentLoc so that we can recover
+ // properly.
+ AL = TemplateArgumentLoc(TemplateArgument(ArgType),
+ TemplateArgumentLocInfo(TSI));
+
+ break;
+ }
+ }
+ // fallthrough
+ }
+ default: {
+ // We have a template type parameter but the template argument
+ // is not a type.
+ SourceRange SR = AL.getSourceRange();
+ Diag(SR.getBegin(), diag::err_template_arg_must_be_type) << SR;
+ Diag(Param->getLocation(), diag::note_template_param_here);
+
+ return true;
+ }
+ }
+
+ if (CheckTemplateArgument(Param, TSI))
+ return true;
+
+ // Add the converted template type argument.
+ ArgType = Context.getCanonicalType(ArgType);
+
+ // Objective-C ARC:
+ // If an explicitly-specified template argument type is a lifetime type
+ // with no lifetime qualifier, the __strong lifetime qualifier is inferred.
+ if (getLangOpts().ObjCAutoRefCount &&
+ ArgType->isObjCLifetimeType() &&
+ !ArgType.getObjCLifetime()) {
+ Qualifiers Qs;
+ Qs.setObjCLifetime(Qualifiers::OCL_Strong);
+ ArgType = Context.getQualifiedType(ArgType, Qs);
+ }
+
+ Converted.push_back(TemplateArgument(ArgType));
+ return false;
+}
+
+/// \brief Substitute template arguments into the default template argument for
+/// the given template type parameter.
+///
+/// \param SemaRef the semantic analysis object for which we are performing
+/// the substitution.
+///
+/// \param Template the template that we are synthesizing template arguments
+/// for.
+///
+/// \param TemplateLoc the location of the template name that started the
+/// template-id we are checking.
+///
+/// \param RAngleLoc the location of the right angle bracket ('>') that
+/// terminates the template-id.
+///
+/// \param Param the template template parameter whose default we are
+/// substituting into.
+///
+/// \param Converted the list of template arguments provided for template
+/// parameters that precede \p Param in the template parameter list.
+/// \returns the substituted template argument, or NULL if an error occurred.
+static TypeSourceInfo *
+SubstDefaultTemplateArgument(Sema &SemaRef,
+ TemplateDecl *Template,
+ SourceLocation TemplateLoc,
+ SourceLocation RAngleLoc,
+ TemplateTypeParmDecl *Param,
+ SmallVectorImpl<TemplateArgument> &Converted) {
+ TypeSourceInfo *ArgType = Param->getDefaultArgumentInfo();
+
+ // If the argument type is dependent, instantiate it now based
+ // on the previously-computed template arguments.
+ if (ArgType->getType()->isDependentType()) {
+ Sema::InstantiatingTemplate Inst(SemaRef, TemplateLoc,
+ Template, Converted,
+ SourceRange(TemplateLoc, RAngleLoc));
+ if (Inst.isInvalid())
+ return nullptr;
+
+ TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack,
+ Converted.data(), Converted.size());
+
+ // Only substitute for the innermost template argument list.
+ MultiLevelTemplateArgumentList TemplateArgLists;
+ TemplateArgLists.addOuterTemplateArguments(&TemplateArgs);
+ for (unsigned i = 0, e = Param->getDepth(); i != e; ++i)
+ TemplateArgLists.addOuterTemplateArguments(None);
+
+ Sema::ContextRAII SavedContext(SemaRef, Template->getDeclContext());
+ ArgType =
+ SemaRef.SubstType(ArgType, TemplateArgLists,
+ Param->getDefaultArgumentLoc(), Param->getDeclName());
+ }
+
+ return ArgType;
+}
+
+/// \brief Substitute template arguments into the default template argument for
+/// the given non-type template parameter.
+///
+/// \param SemaRef the semantic analysis object for which we are performing
+/// the substitution.
+///
+/// \param Template the template that we are synthesizing template arguments
+/// for.
+///
+/// \param TemplateLoc the location of the template name that started the
+/// template-id we are checking.
+///
+/// \param RAngleLoc the location of the right angle bracket ('>') that
+/// terminates the template-id.
+///
+/// \param Param the non-type template parameter whose default we are
+/// substituting into.
+///
+/// \param Converted the list of template arguments provided for template
+/// parameters that precede \p Param in the template parameter list.
+///
+/// \returns the substituted template argument, or NULL if an error occurred.
+static ExprResult
+SubstDefaultTemplateArgument(Sema &SemaRef,
+ TemplateDecl *Template,
+ SourceLocation TemplateLoc,
+ SourceLocation RAngleLoc,
+ NonTypeTemplateParmDecl *Param,
+ SmallVectorImpl<TemplateArgument> &Converted) {
+ Sema::InstantiatingTemplate Inst(SemaRef, TemplateLoc,
+ Template, Converted,
+ SourceRange(TemplateLoc, RAngleLoc));
+ if (Inst.isInvalid())
+ return ExprError();
+
+ TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack,
+ Converted.data(), Converted.size());
+
+ // Only substitute for the innermost template argument list.
+ MultiLevelTemplateArgumentList TemplateArgLists;
+ TemplateArgLists.addOuterTemplateArguments(&TemplateArgs);
+ for (unsigned i = 0, e = Param->getDepth(); i != e; ++i)
+ TemplateArgLists.addOuterTemplateArguments(None);
+
+ Sema::ContextRAII SavedContext(SemaRef, Template->getDeclContext());
+ EnterExpressionEvaluationContext ConstantEvaluated(SemaRef,
+ Sema::ConstantEvaluated);
+ return SemaRef.SubstExpr(Param->getDefaultArgument(), TemplateArgLists);
+}
+
+/// \brief Substitute template arguments into the default template argument for
+/// the given template template parameter.
+///
+/// \param SemaRef the semantic analysis object for which we are performing
+/// the substitution.
+///
+/// \param Template the template that we are synthesizing template arguments
+/// for.
+///
+/// \param TemplateLoc the location of the template name that started the
+/// template-id we are checking.
+///
+/// \param RAngleLoc the location of the right angle bracket ('>') that
+/// terminates the template-id.
+///
+/// \param Param the template template parameter whose default we are
+/// substituting into.
+///
+/// \param Converted the list of template arguments provided for template
+/// parameters that precede \p Param in the template parameter list.
+///
+/// \param QualifierLoc Will be set to the nested-name-specifier (with
+/// source-location information) that precedes the template name.
+///
+/// \returns the substituted template argument, or NULL if an error occurred.
+static TemplateName
+SubstDefaultTemplateArgument(Sema &SemaRef,
+ TemplateDecl *Template,
+ SourceLocation TemplateLoc,
+ SourceLocation RAngleLoc,
+ TemplateTemplateParmDecl *Param,
+ SmallVectorImpl<TemplateArgument> &Converted,
+ NestedNameSpecifierLoc &QualifierLoc) {
+ Sema::InstantiatingTemplate Inst(SemaRef, TemplateLoc, Template, Converted,
+ SourceRange(TemplateLoc, RAngleLoc));
+ if (Inst.isInvalid())
+ return TemplateName();
+
+ TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack,
+ Converted.data(), Converted.size());
+
+ // Only substitute for the innermost template argument list.
+ MultiLevelTemplateArgumentList TemplateArgLists;
+ TemplateArgLists.addOuterTemplateArguments(&TemplateArgs);
+ for (unsigned i = 0, e = Param->getDepth(); i != e; ++i)
+ TemplateArgLists.addOuterTemplateArguments(None);
+
+ Sema::ContextRAII SavedContext(SemaRef, Template->getDeclContext());
+ // Substitute into the nested-name-specifier first,
+ QualifierLoc = Param->getDefaultArgument().getTemplateQualifierLoc();
+ if (QualifierLoc) {
+ QualifierLoc =
+ SemaRef.SubstNestedNameSpecifierLoc(QualifierLoc, TemplateArgLists);
+ if (!QualifierLoc)
+ return TemplateName();
+ }
+
+ return SemaRef.SubstTemplateName(
+ QualifierLoc,
+ Param->getDefaultArgument().getArgument().getAsTemplate(),
+ Param->getDefaultArgument().getTemplateNameLoc(),
+ TemplateArgLists);
+}
+
+/// \brief If the given template parameter has a default template
+/// argument, substitute into that default template argument and
+/// return the corresponding template argument.
+TemplateArgumentLoc
+Sema::SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
+ SourceLocation TemplateLoc,
+ SourceLocation RAngleLoc,
+ Decl *Param,
+ SmallVectorImpl<TemplateArgument>
+ &Converted,
+ bool &HasDefaultArg) {
+ HasDefaultArg = false;
+
+ if (TemplateTypeParmDecl *TypeParm = dyn_cast<TemplateTypeParmDecl>(Param)) {
+ if (!hasVisibleDefaultArgument(TypeParm))
+ return TemplateArgumentLoc();
+
+ HasDefaultArg = true;
+ TypeSourceInfo *DI = SubstDefaultTemplateArgument(*this, Template,
+ TemplateLoc,
+ RAngleLoc,
+ TypeParm,
+ Converted);
+ if (DI)
+ return TemplateArgumentLoc(TemplateArgument(DI->getType()), DI);
+
+ return TemplateArgumentLoc();
+ }
+
+ if (NonTypeTemplateParmDecl *NonTypeParm
+ = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
+ if (!hasVisibleDefaultArgument(NonTypeParm))
+ return TemplateArgumentLoc();
+
+ HasDefaultArg = true;
+ ExprResult Arg = SubstDefaultTemplateArgument(*this, Template,
+ TemplateLoc,
+ RAngleLoc,
+ NonTypeParm,
+ Converted);
+ if (Arg.isInvalid())
+ return TemplateArgumentLoc();
+
+ Expr *ArgE = Arg.getAs<Expr>();
+ return TemplateArgumentLoc(TemplateArgument(ArgE), ArgE);
+ }
+
+ TemplateTemplateParmDecl *TempTempParm
+ = cast<TemplateTemplateParmDecl>(Param);
+ if (!hasVisibleDefaultArgument(TempTempParm))
+ return TemplateArgumentLoc();
+
+ HasDefaultArg = true;
+ NestedNameSpecifierLoc QualifierLoc;
+ TemplateName TName = SubstDefaultTemplateArgument(*this, Template,
+ TemplateLoc,
+ RAngleLoc,
+ TempTempParm,
+ Converted,
+ QualifierLoc);
+ if (TName.isNull())
+ return TemplateArgumentLoc();
+
+ return TemplateArgumentLoc(TemplateArgument(TName),
+ TempTempParm->getDefaultArgument().getTemplateQualifierLoc(),
+ TempTempParm->getDefaultArgument().getTemplateNameLoc());
+}
+
+/// \brief Check that the given template argument corresponds to the given
+/// template parameter.
+///
+/// \param Param The template parameter against which the argument will be
+/// checked.
+///
+/// \param Arg The template argument, which may be updated due to conversions.
+///
+/// \param Template The template in which the template argument resides.
+///
+/// \param TemplateLoc The location of the template name for the template
+/// whose argument list we're matching.
+///
+/// \param RAngleLoc The location of the right angle bracket ('>') that closes
+/// the template argument list.
+///
+/// \param ArgumentPackIndex The index into the argument pack where this
+/// argument will be placed. Only valid if the parameter is a parameter pack.
+///
+/// \param Converted The checked, converted argument will be added to the
+/// end of this small vector.
+///
+/// \param CTAK Describes how we arrived at this particular template argument:
+/// explicitly written, deduced, etc.
+///
+/// \returns true on error, false otherwise.
+bool Sema::CheckTemplateArgument(NamedDecl *Param,
+ TemplateArgumentLoc &Arg,
+ NamedDecl *Template,
+ SourceLocation TemplateLoc,
+ SourceLocation RAngleLoc,
+ unsigned ArgumentPackIndex,
+ SmallVectorImpl<TemplateArgument> &Converted,
+ CheckTemplateArgumentKind CTAK) {
+ // Check template type parameters.
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(Param))
+ return CheckTemplateTypeArgument(TTP, Arg, Converted);
+
+ // Check non-type template parameters.
+ if (NonTypeTemplateParmDecl *NTTP =dyn_cast<NonTypeTemplateParmDecl>(Param)) {
+ // Do substitution on the type of the non-type template parameter
+ // with the template arguments we've seen thus far. But if the
+ // template has a dependent context then we cannot substitute yet.
+ QualType NTTPType = NTTP->getType();
+ if (NTTP->isParameterPack() && NTTP->isExpandedParameterPack())
+ NTTPType = NTTP->getExpansionType(ArgumentPackIndex);
+
+ if (NTTPType->isDependentType() &&
+ !isa<TemplateTemplateParmDecl>(Template) &&
+ !Template->getDeclContext()->isDependentContext()) {
+ // Do substitution on the type of the non-type template parameter.
+ InstantiatingTemplate Inst(*this, TemplateLoc, Template,
+ NTTP, Converted,
+ SourceRange(TemplateLoc, RAngleLoc));
+ if (Inst.isInvalid())
+ return true;
+
+ TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack,
+ Converted.data(), Converted.size());
+ NTTPType = SubstType(NTTPType,
+ MultiLevelTemplateArgumentList(TemplateArgs),
+ NTTP->getLocation(),
+ NTTP->getDeclName());
+ // If that worked, check the non-type template parameter type
+ // for validity.
+ if (!NTTPType.isNull())
+ NTTPType = CheckNonTypeTemplateParameterType(NTTPType,
+ NTTP->getLocation());
+ if (NTTPType.isNull())
+ return true;
+ }
+
+ switch (Arg.getArgument().getKind()) {
+ case TemplateArgument::Null:
+ llvm_unreachable("Should never see a NULL template argument here");
+
+ case TemplateArgument::Expression: {
+ TemplateArgument Result;
+ ExprResult Res =
+ CheckTemplateArgument(NTTP, NTTPType, Arg.getArgument().getAsExpr(),
+ Result, CTAK);
+ if (Res.isInvalid())
+ return true;
+
+ // If the resulting expression is new, then use it in place of the
+ // old expression in the template argument.
+ if (Res.get() != Arg.getArgument().getAsExpr()) {
+ TemplateArgument TA(Res.get());
+ Arg = TemplateArgumentLoc(TA, Res.get());
+ }
+
+ Converted.push_back(Result);
+ break;
+ }
+
+ case TemplateArgument::Declaration:
+ case TemplateArgument::Integral:
+ case TemplateArgument::NullPtr:
+ // We've already checked this template argument, so just copy
+ // it to the list of converted arguments.
+ Converted.push_back(Arg.getArgument());
+ break;
+
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion:
+ // We were given a template template argument. It may not be ill-formed;
+ // see below.
+ if (DependentTemplateName *DTN
+ = Arg.getArgument().getAsTemplateOrTemplatePattern()
+ .getAsDependentTemplateName()) {
+ // We have a template argument such as \c T::template X, which we
+ // parsed as a template template argument. However, since we now
+ // know that we need a non-type template argument, convert this
+ // template name into an expression.
+
+ DeclarationNameInfo NameInfo(DTN->getIdentifier(),
+ Arg.getTemplateNameLoc());
+
+ CXXScopeSpec SS;
+ SS.Adopt(Arg.getTemplateQualifierLoc());
+ // FIXME: the template-template arg was a DependentTemplateName,
+ // so it was provided with a template keyword. However, its source
+ // location is not stored in the template argument structure.
+ SourceLocation TemplateKWLoc;
+ ExprResult E = DependentScopeDeclRefExpr::Create(
+ Context, SS.getWithLocInContext(Context), TemplateKWLoc, NameInfo,
+ nullptr);
+
+ // If we parsed the template argument as a pack expansion, create a
+ // pack expansion expression.
+ if (Arg.getArgument().getKind() == TemplateArgument::TemplateExpansion){
+ E = ActOnPackExpansion(E.get(), Arg.getTemplateEllipsisLoc());
+ if (E.isInvalid())
+ return true;
+ }
+
+ TemplateArgument Result;
+ E = CheckTemplateArgument(NTTP, NTTPType, E.get(), Result);
+ if (E.isInvalid())
+ return true;
+
+ Converted.push_back(Result);
+ break;
+ }
+
+ // We have a template argument that actually does refer to a class
+ // template, alias template, or template template parameter, and
+ // therefore cannot be a non-type template argument.
+ Diag(Arg.getLocation(), diag::err_template_arg_must_be_expr)
+ << Arg.getSourceRange();
+
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+
+ case TemplateArgument::Type: {
+ // We have a non-type template parameter but the template
+ // argument is a type.
+
+ // C++ [temp.arg]p2:
+ // In a template-argument, an ambiguity between a type-id and
+ // an expression is resolved to a type-id, regardless of the
+ // form of the corresponding template-parameter.
+ //
+ // We warn specifically about this case, since it can be rather
+ // confusing for users.
+ QualType T = Arg.getArgument().getAsType();
+ SourceRange SR = Arg.getSourceRange();
+ if (T->isFunctionType())
+ Diag(SR.getBegin(), diag::err_template_arg_nontype_ambig) << SR << T;
+ else
+ Diag(SR.getBegin(), diag::err_template_arg_must_be_expr) << SR;
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+
+ case TemplateArgument::Pack:
+ llvm_unreachable("Caller must expand template argument packs");
+ }
+
+ return false;
+ }
+
+
+ // Check template template parameters.
+ TemplateTemplateParmDecl *TempParm = cast<TemplateTemplateParmDecl>(Param);
+
+ // Substitute into the template parameter list of the template
+ // template parameter, since previously-supplied template arguments
+ // may appear within the template template parameter.
+ {
+ // Set up a template instantiation context.
+ LocalInstantiationScope Scope(*this);
+ InstantiatingTemplate Inst(*this, TemplateLoc, Template,
+ TempParm, Converted,
+ SourceRange(TemplateLoc, RAngleLoc));
+ if (Inst.isInvalid())
+ return true;
+
+ TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack,
+ Converted.data(), Converted.size());
+ TempParm = cast_or_null<TemplateTemplateParmDecl>(
+ SubstDecl(TempParm, CurContext,
+ MultiLevelTemplateArgumentList(TemplateArgs)));
+ if (!TempParm)
+ return true;
+ }
+
+ switch (Arg.getArgument().getKind()) {
+ case TemplateArgument::Null:
+ llvm_unreachable("Should never see a NULL template argument here");
+
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion:
+ if (CheckTemplateArgument(TempParm, Arg, ArgumentPackIndex))
+ return true;
+
+ Converted.push_back(Arg.getArgument());
+ break;
+
+ case TemplateArgument::Expression:
+ case TemplateArgument::Type:
+ // We have a template template parameter but the template
+ // argument does not refer to a template.
+ Diag(Arg.getLocation(), diag::err_template_arg_must_be_template)
+ << getLangOpts().CPlusPlus11;
+ return true;
+
+ case TemplateArgument::Declaration:
+ llvm_unreachable("Declaration argument with template template parameter");
+ case TemplateArgument::Integral:
+ llvm_unreachable("Integral argument with template template parameter");
+ case TemplateArgument::NullPtr:
+ llvm_unreachable("Null pointer argument with template template parameter");
+
+ case TemplateArgument::Pack:
+ llvm_unreachable("Caller must expand template argument packs");
+ }
+
+ return false;
+}
+
+/// \brief Diagnose an arity mismatch in the
+static bool diagnoseArityMismatch(Sema &S, TemplateDecl *Template,
+ SourceLocation TemplateLoc,
+ TemplateArgumentListInfo &TemplateArgs) {
+ TemplateParameterList *Params = Template->getTemplateParameters();
+ unsigned NumParams = Params->size();
+ unsigned NumArgs = TemplateArgs.size();
+
+ SourceRange Range;
+ if (NumArgs > NumParams)
+ Range = SourceRange(TemplateArgs[NumParams].getLocation(),
+ TemplateArgs.getRAngleLoc());
+ S.Diag(TemplateLoc, diag::err_template_arg_list_different_arity)
+ << (NumArgs > NumParams)
+ << (isa<ClassTemplateDecl>(Template)? 0 :
+ isa<FunctionTemplateDecl>(Template)? 1 :
+ isa<TemplateTemplateParmDecl>(Template)? 2 : 3)
+ << Template << Range;
+ S.Diag(Template->getLocation(), diag::note_template_decl_here)
+ << Params->getSourceRange();
+ return true;
+}
+
+/// \brief Check whether the template parameter is a pack expansion, and if so,
+/// determine the number of parameters produced by that expansion. For instance:
+///
+/// \code
+/// template<typename ...Ts> struct A {
+/// template<Ts ...NTs, template<Ts> class ...TTs, typename ...Us> struct B;
+/// };
+/// \endcode
+///
+/// In \c A<int,int>::B, \c NTs and \c TTs have expanded pack size 2, and \c Us
+/// is not a pack expansion, so returns an empty Optional.
+static Optional<unsigned> getExpandedPackSize(NamedDecl *Param) {
+ if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
+ if (NTTP->isExpandedParameterPack())
+ return NTTP->getNumExpansionTypes();
+ }
+
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(Param)) {
+ if (TTP->isExpandedParameterPack())
+ return TTP->getNumExpansionTemplateParameters();
+ }
+
+ return None;
+}
+
+/// Diagnose a missing template argument.
+template<typename TemplateParmDecl>
+static bool diagnoseMissingArgument(Sema &S, SourceLocation Loc,
+ TemplateDecl *TD,
+ const TemplateParmDecl *D,
+ TemplateArgumentListInfo &Args) {
+ // Dig out the most recent declaration of the template parameter; there may be
+ // declarations of the template that are more recent than TD.
+ D = cast<TemplateParmDecl>(cast<TemplateDecl>(TD->getMostRecentDecl())
+ ->getTemplateParameters()
+ ->getParam(D->getIndex()));
+
+ // If there's a default argument that's not visible, diagnose that we're
+ // missing a module import.
+ llvm::SmallVector<Module*, 8> Modules;
+ if (D->hasDefaultArgument() && !S.hasVisibleDefaultArgument(D, &Modules)) {
+ S.diagnoseMissingImport(Loc, cast<NamedDecl>(TD),
+ D->getDefaultArgumentLoc(), Modules,
+ Sema::MissingImportKind::DefaultArgument,
+ /*Recover*/ true);
+ return true;
+ }
+
+ // FIXME: If there's a more recent default argument that *is* visible,
+ // diagnose that it was declared too late.
+
+ return diagnoseArityMismatch(S, TD, Loc, Args);
+}
+
+/// \brief Check that the given template argument list is well-formed
+/// for specializing the given template.
+bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
+ SourceLocation TemplateLoc,
+ TemplateArgumentListInfo &TemplateArgs,
+ bool PartialTemplateArgs,
+ SmallVectorImpl<TemplateArgument> &Converted) {
+ // Make a copy of the template arguments for processing. Only make the
+ // changes at the end when successful in matching the arguments to the
+ // template.
+ TemplateArgumentListInfo NewArgs = TemplateArgs;
+
+ TemplateParameterList *Params = Template->getTemplateParameters();
+
+ SourceLocation RAngleLoc = NewArgs.getRAngleLoc();
+
+ // C++ [temp.arg]p1:
+ // [...] The type and form of each template-argument specified in
+ // a template-id shall match the type and form specified for the
+ // corresponding parameter declared by the template in its
+ // template-parameter-list.
+ bool isTemplateTemplateParameter = isa<TemplateTemplateParmDecl>(Template);
+ SmallVector<TemplateArgument, 2> ArgumentPack;
+ unsigned ArgIdx = 0, NumArgs = NewArgs.size();
+ LocalInstantiationScope InstScope(*this, true);
+ for (TemplateParameterList::iterator Param = Params->begin(),
+ ParamEnd = Params->end();
+ Param != ParamEnd; /* increment in loop */) {
+ // If we have an expanded parameter pack, make sure we don't have too
+ // many arguments.
+ if (Optional<unsigned> Expansions = getExpandedPackSize(*Param)) {
+ if (*Expansions == ArgumentPack.size()) {
+ // We're done with this parameter pack. Pack up its arguments and add
+ // them to the list.
+ Converted.push_back(
+ TemplateArgument::CreatePackCopy(Context, ArgumentPack));
+ ArgumentPack.clear();
+
+ // This argument is assigned to the next parameter.
+ ++Param;
+ continue;
+ } else if (ArgIdx == NumArgs && !PartialTemplateArgs) {
+ // Not enough arguments for this parameter pack.
+ Diag(TemplateLoc, diag::err_template_arg_list_different_arity)
+ << false
+ << (isa<ClassTemplateDecl>(Template)? 0 :
+ isa<FunctionTemplateDecl>(Template)? 1 :
+ isa<TemplateTemplateParmDecl>(Template)? 2 : 3)
+ << Template;
+ Diag(Template->getLocation(), diag::note_template_decl_here)
+ << Params->getSourceRange();
+ return true;
+ }
+ }
+
+ if (ArgIdx < NumArgs) {
+ // Check the template argument we were given.
+ if (CheckTemplateArgument(*Param, NewArgs[ArgIdx], Template,
+ TemplateLoc, RAngleLoc,
+ ArgumentPack.size(), Converted))
+ return true;
+
+ bool PackExpansionIntoNonPack =
+ NewArgs[ArgIdx].getArgument().isPackExpansion() &&
+ (!(*Param)->isTemplateParameterPack() || getExpandedPackSize(*Param));
+ if (PackExpansionIntoNonPack && isa<TypeAliasTemplateDecl>(Template)) {
+ // Core issue 1430: we have a pack expansion as an argument to an
+ // alias template, and it's not part of a parameter pack. This
+ // can't be canonicalized, so reject it now.
+ Diag(NewArgs[ArgIdx].getLocation(),
+ diag::err_alias_template_expansion_into_fixed_list)
+ << NewArgs[ArgIdx].getSourceRange();
+ Diag((*Param)->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+
+ // We're now done with this argument.
+ ++ArgIdx;
+
+ if ((*Param)->isTemplateParameterPack()) {
+ // The template parameter was a template parameter pack, so take the
+ // deduced argument and place it on the argument pack. Note that we
+ // stay on the same template parameter so that we can deduce more
+ // arguments.
+ ArgumentPack.push_back(Converted.pop_back_val());
+ } else {
+ // Move to the next template parameter.
+ ++Param;
+ }
+
+ // If we just saw a pack expansion into a non-pack, then directly convert
+ // the remaining arguments, because we don't know what parameters they'll
+ // match up with.
+ if (PackExpansionIntoNonPack) {
+ if (!ArgumentPack.empty()) {
+ // If we were part way through filling in an expanded parameter pack,
+ // fall back to just producing individual arguments.
+ Converted.insert(Converted.end(),
+ ArgumentPack.begin(), ArgumentPack.end());
+ ArgumentPack.clear();
+ }
+
+ while (ArgIdx < NumArgs) {
+ Converted.push_back(NewArgs[ArgIdx].getArgument());
+ ++ArgIdx;
+ }
+
+ return false;
+ }
+
+ continue;
+ }
+
+ // If we're checking a partial template argument list, we're done.
+ if (PartialTemplateArgs) {
+ if ((*Param)->isTemplateParameterPack() && !ArgumentPack.empty())
+ Converted.push_back(
+ TemplateArgument::CreatePackCopy(Context, ArgumentPack));
+
+ return false;
+ }
+
+ // If we have a template parameter pack with no more corresponding
+ // arguments, just break out now and we'll fill in the argument pack below.
+ if ((*Param)->isTemplateParameterPack()) {
+ assert(!getExpandedPackSize(*Param) &&
+ "Should have dealt with this already");
+
+ // A non-expanded parameter pack before the end of the parameter list
+ // only occurs for an ill-formed template parameter list, unless we've
+ // got a partial argument list for a function template, so just bail out.
+ if (Param + 1 != ParamEnd)
+ return true;
+
+ Converted.push_back(
+ TemplateArgument::CreatePackCopy(Context, ArgumentPack));
+ ArgumentPack.clear();
+
+ ++Param;
+ continue;
+ }
+
+ // Check whether we have a default argument.
+ TemplateArgumentLoc Arg;
+
+ // Retrieve the default template argument from the template
+ // parameter. For each kind of template parameter, we substitute the
+ // template arguments provided thus far and any "outer" template arguments
+ // (when the template parameter was part of a nested template) into
+ // the default argument.
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*Param)) {
+ if (!hasVisibleDefaultArgument(TTP))
+ return diagnoseMissingArgument(*this, TemplateLoc, Template, TTP,
+ NewArgs);
+
+ TypeSourceInfo *ArgType = SubstDefaultTemplateArgument(*this,
+ Template,
+ TemplateLoc,
+ RAngleLoc,
+ TTP,
+ Converted);
+ if (!ArgType)
+ return true;
+
+ Arg = TemplateArgumentLoc(TemplateArgument(ArgType->getType()),
+ ArgType);
+ } else if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(*Param)) {
+ if (!hasVisibleDefaultArgument(NTTP))
+ return diagnoseMissingArgument(*this, TemplateLoc, Template, NTTP,
+ NewArgs);
+
+ ExprResult E = SubstDefaultTemplateArgument(*this, Template,
+ TemplateLoc,
+ RAngleLoc,
+ NTTP,
+ Converted);
+ if (E.isInvalid())
+ return true;
+
+ Expr *Ex = E.getAs<Expr>();
+ Arg = TemplateArgumentLoc(TemplateArgument(Ex), Ex);
+ } else {
+ TemplateTemplateParmDecl *TempParm
+ = cast<TemplateTemplateParmDecl>(*Param);
+
+ if (!hasVisibleDefaultArgument(TempParm))
+ return diagnoseMissingArgument(*this, TemplateLoc, Template, TempParm,
+ NewArgs);
+
+ NestedNameSpecifierLoc QualifierLoc;
+ TemplateName Name = SubstDefaultTemplateArgument(*this, Template,
+ TemplateLoc,
+ RAngleLoc,
+ TempParm,
+ Converted,
+ QualifierLoc);
+ if (Name.isNull())
+ return true;
+
+ Arg = TemplateArgumentLoc(TemplateArgument(Name), QualifierLoc,
+ TempParm->getDefaultArgument().getTemplateNameLoc());
+ }
+
+ // Introduce an instantiation record that describes where we are using
+ // the default template argument.
+ InstantiatingTemplate Inst(*this, RAngleLoc, Template, *Param, Converted,
+ SourceRange(TemplateLoc, RAngleLoc));
+ if (Inst.isInvalid())
+ return true;
+
+ // Check the default template argument.
+ if (CheckTemplateArgument(*Param, Arg, Template, TemplateLoc,
+ RAngleLoc, 0, Converted))
+ return true;
+
+ // Core issue 150 (assumed resolution): if this is a template template
+ // parameter, keep track of the default template arguments from the
+ // template definition.
+ if (isTemplateTemplateParameter)
+ NewArgs.addArgument(Arg);
+
+ // Move to the next template parameter and argument.
+ ++Param;
+ ++ArgIdx;
+ }
+
+ // If we're performing a partial argument substitution, allow any trailing
+ // pack expansions; they might be empty. This can happen even if
+ // PartialTemplateArgs is false (the list of arguments is complete but
+ // still dependent).
+ if (ArgIdx < NumArgs && CurrentInstantiationScope &&
+ CurrentInstantiationScope->getPartiallySubstitutedPack()) {
+ while (ArgIdx < NumArgs && NewArgs[ArgIdx].getArgument().isPackExpansion())
+ Converted.push_back(NewArgs[ArgIdx++].getArgument());
+ }
+
+ // If we have any leftover arguments, then there were too many arguments.
+ // Complain and fail.
+ if (ArgIdx < NumArgs)
+ return diagnoseArityMismatch(*this, Template, TemplateLoc, NewArgs);
+
+ // No problems found with the new argument list, propagate changes back
+ // to caller.
+ TemplateArgs = std::move(NewArgs);
+
+ return false;
+}
+
+namespace {
+ class UnnamedLocalNoLinkageFinder
+ : public TypeVisitor<UnnamedLocalNoLinkageFinder, bool>
+ {
+ Sema &S;
+ SourceRange SR;
+
+ typedef TypeVisitor<UnnamedLocalNoLinkageFinder, bool> inherited;
+
+ public:
+ UnnamedLocalNoLinkageFinder(Sema &S, SourceRange SR) : S(S), SR(SR) { }
+
+ bool Visit(QualType T) {
+ return inherited::Visit(T.getTypePtr());
+ }
+
+#define TYPE(Class, Parent) \
+ bool Visit##Class##Type(const Class##Type *);
+#define ABSTRACT_TYPE(Class, Parent) \
+ bool Visit##Class##Type(const Class##Type *) { return false; }
+#define NON_CANONICAL_TYPE(Class, Parent) \
+ bool Visit##Class##Type(const Class##Type *) { return false; }
+#include "clang/AST/TypeNodes.def"
+
+ bool VisitTagDecl(const TagDecl *Tag);
+ bool VisitNestedNameSpecifier(NestedNameSpecifier *NNS);
+ };
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitBuiltinType(const BuiltinType*) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitComplexType(const ComplexType* T) {
+ return Visit(T->getElementType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitPointerType(const PointerType* T) {
+ return Visit(T->getPointeeType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitBlockPointerType(
+ const BlockPointerType* T) {
+ return Visit(T->getPointeeType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitLValueReferenceType(
+ const LValueReferenceType* T) {
+ return Visit(T->getPointeeType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitRValueReferenceType(
+ const RValueReferenceType* T) {
+ return Visit(T->getPointeeType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitMemberPointerType(
+ const MemberPointerType* T) {
+ return Visit(T->getPointeeType()) || Visit(QualType(T->getClass(), 0));
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitConstantArrayType(
+ const ConstantArrayType* T) {
+ return Visit(T->getElementType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitIncompleteArrayType(
+ const IncompleteArrayType* T) {
+ return Visit(T->getElementType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitVariableArrayType(
+ const VariableArrayType* T) {
+ return Visit(T->getElementType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitDependentSizedArrayType(
+ const DependentSizedArrayType* T) {
+ return Visit(T->getElementType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitDependentSizedExtVectorType(
+ const DependentSizedExtVectorType* T) {
+ return Visit(T->getElementType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitVectorType(const VectorType* T) {
+ return Visit(T->getElementType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitExtVectorType(const ExtVectorType* T) {
+ return Visit(T->getElementType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitFunctionProtoType(
+ const FunctionProtoType* T) {
+ for (const auto &A : T->param_types()) {
+ if (Visit(A))
+ return true;
+ }
+
+ return Visit(T->getReturnType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitFunctionNoProtoType(
+ const FunctionNoProtoType* T) {
+ return Visit(T->getReturnType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitUnresolvedUsingType(
+ const UnresolvedUsingType*) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitTypeOfExprType(const TypeOfExprType*) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitTypeOfType(const TypeOfType* T) {
+ return Visit(T->getUnderlyingType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitDecltypeType(const DecltypeType*) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitUnaryTransformType(
+ const UnaryTransformType*) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitAutoType(const AutoType *T) {
+ return Visit(T->getDeducedType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitRecordType(const RecordType* T) {
+ return VisitTagDecl(T->getDecl());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitEnumType(const EnumType* T) {
+ return VisitTagDecl(T->getDecl());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitTemplateTypeParmType(
+ const TemplateTypeParmType*) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitSubstTemplateTypeParmPackType(
+ const SubstTemplateTypeParmPackType *) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitTemplateSpecializationType(
+ const TemplateSpecializationType*) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitInjectedClassNameType(
+ const InjectedClassNameType* T) {
+ return VisitTagDecl(T->getDecl());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitDependentNameType(
+ const DependentNameType* T) {
+ return VisitNestedNameSpecifier(T->getQualifier());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitDependentTemplateSpecializationType(
+ const DependentTemplateSpecializationType* T) {
+ return VisitNestedNameSpecifier(T->getQualifier());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitPackExpansionType(
+ const PackExpansionType* T) {
+ return Visit(T->getPattern());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitObjCObjectType(const ObjCObjectType *) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitObjCInterfaceType(
+ const ObjCInterfaceType *) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitObjCObjectPointerType(
+ const ObjCObjectPointerType *) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitAtomicType(const AtomicType* T) {
+ return Visit(T->getValueType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitTagDecl(const TagDecl *Tag) {
+ if (Tag->getDeclContext()->isFunctionOrMethod()) {
+ S.Diag(SR.getBegin(),
+ S.getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_template_arg_local_type :
+ diag::ext_template_arg_local_type)
+ << S.Context.getTypeDeclType(Tag) << SR;
+ return true;
+ }
+
+ if (!Tag->hasNameForLinkage()) {
+ S.Diag(SR.getBegin(),
+ S.getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_template_arg_unnamed_type :
+ diag::ext_template_arg_unnamed_type) << SR;
+ S.Diag(Tag->getLocation(), diag::note_template_unnamed_type_here);
+ return true;
+ }
+
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitNestedNameSpecifier(
+ NestedNameSpecifier *NNS) {
+ if (NNS->getPrefix() && VisitNestedNameSpecifier(NNS->getPrefix()))
+ return true;
+
+ switch (NNS->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::NamespaceAlias:
+ case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Super:
+ return false;
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ return Visit(QualType(NNS->getAsType(), 0));
+ }
+ llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
+}
+
+
+/// \brief Check a template argument against its corresponding
+/// template type parameter.
+///
+/// This routine implements the semantics of C++ [temp.arg.type]. It
+/// returns true if an error occurred, and false otherwise.
+bool Sema::CheckTemplateArgument(TemplateTypeParmDecl *Param,
+ TypeSourceInfo *ArgInfo) {
+ assert(ArgInfo && "invalid TypeSourceInfo");
+ QualType Arg = ArgInfo->getType();
+ SourceRange SR = ArgInfo->getTypeLoc().getSourceRange();
+
+ if (Arg->isVariablyModifiedType()) {
+ return Diag(SR.getBegin(), diag::err_variably_modified_template_arg) << Arg;
+ } else if (Context.hasSameUnqualifiedType(Arg, Context.OverloadTy)) {
+ return Diag(SR.getBegin(), diag::err_template_arg_overload_type) << SR;
+ }
+
+ // C++03 [temp.arg.type]p2:
+ // A local type, a type with no linkage, an unnamed type or a type
+ // compounded from any of these types shall not be used as a
+ // template-argument for a template type-parameter.
+ //
+ // C++11 allows these, and even in C++03 we allow them as an extension with
+ // a warning.
+ bool NeedsCheck;
+ if (LangOpts.CPlusPlus11)
+ NeedsCheck =
+ !Diags.isIgnored(diag::warn_cxx98_compat_template_arg_unnamed_type,
+ SR.getBegin()) ||
+ !Diags.isIgnored(diag::warn_cxx98_compat_template_arg_local_type,
+ SR.getBegin());
+ else
+ NeedsCheck = Arg->hasUnnamedOrLocalType();
+
+ if (NeedsCheck) {
+ UnnamedLocalNoLinkageFinder Finder(*this, SR);
+ (void)Finder.Visit(Context.getCanonicalType(Arg));
+ }
+
+ return false;
+}
+
+enum NullPointerValueKind {
+ NPV_NotNullPointer,
+ NPV_NullPointer,
+ NPV_Error
+};
+
+/// \brief Determine whether the given template argument is a null pointer
+/// value of the appropriate type.
+static NullPointerValueKind
+isNullPointerValueTemplateArgument(Sema &S, NonTypeTemplateParmDecl *Param,
+ QualType ParamType, Expr *Arg) {
+ if (Arg->isValueDependent() || Arg->isTypeDependent())
+ return NPV_NotNullPointer;
+
+ if (!S.isCompleteType(Arg->getExprLoc(), ParamType))
+ llvm_unreachable(
+ "Incomplete parameter type in isNullPointerValueTemplateArgument!");
+
+ if (!S.getLangOpts().CPlusPlus11)
+ return NPV_NotNullPointer;
+
+ // Determine whether we have a constant expression.
+ ExprResult ArgRV = S.DefaultFunctionArrayConversion(Arg);
+ if (ArgRV.isInvalid())
+ return NPV_Error;
+ Arg = ArgRV.get();
+
+ Expr::EvalResult EvalResult;
+ SmallVector<PartialDiagnosticAt, 8> Notes;
+ EvalResult.Diag = &Notes;
+ if (!Arg->EvaluateAsRValue(EvalResult, S.Context) ||
+ EvalResult.HasSideEffects) {
+ SourceLocation DiagLoc = Arg->getExprLoc();
+
+ // If our only note is the usual "invalid subexpression" note, just point
+ // the caret at its location rather than producing an essentially
+ // redundant note.
+ if (Notes.size() == 1 && Notes[0].second.getDiagID() ==
+ diag::note_invalid_subexpr_in_const_expr) {
+ DiagLoc = Notes[0].first;
+ Notes.clear();
+ }
+
+ S.Diag(DiagLoc, diag::err_template_arg_not_address_constant)
+ << Arg->getType() << Arg->getSourceRange();
+ for (unsigned I = 0, N = Notes.size(); I != N; ++I)
+ S.Diag(Notes[I].first, Notes[I].second);
+
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return NPV_Error;
+ }
+
+ // C++11 [temp.arg.nontype]p1:
+ // - an address constant expression of type std::nullptr_t
+ if (Arg->getType()->isNullPtrType())
+ return NPV_NullPointer;
+
+ // - a constant expression that evaluates to a null pointer value (4.10); or
+ // - a constant expression that evaluates to a null member pointer value
+ // (4.11); or
+ if ((EvalResult.Val.isLValue() && !EvalResult.Val.getLValueBase()) ||
+ (EvalResult.Val.isMemberPointer() &&
+ !EvalResult.Val.getMemberPointerDecl())) {
+ // If our expression has an appropriate type, we've succeeded.
+ bool ObjCLifetimeConversion;
+ if (S.Context.hasSameUnqualifiedType(Arg->getType(), ParamType) ||
+ S.IsQualificationConversion(Arg->getType(), ParamType, false,
+ ObjCLifetimeConversion))
+ return NPV_NullPointer;
+
+ // The types didn't match, but we know we got a null pointer; complain,
+ // then recover as if the types were correct.
+ S.Diag(Arg->getExprLoc(), diag::err_template_arg_wrongtype_null_constant)
+ << Arg->getType() << ParamType << Arg->getSourceRange();
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return NPV_NullPointer;
+ }
+
+ // If we don't have a null pointer value, but we do have a NULL pointer
+ // constant, suggest a cast to the appropriate type.
+ if (Arg->isNullPointerConstant(S.Context, Expr::NPC_NeverValueDependent)) {
+ std::string Code = "static_cast<" + ParamType.getAsString() + ">(";
+ S.Diag(Arg->getExprLoc(), diag::err_template_arg_untyped_null_constant)
+ << ParamType << FixItHint::CreateInsertion(Arg->getLocStart(), Code)
+ << FixItHint::CreateInsertion(S.getLocForEndOfToken(Arg->getLocEnd()),
+ ")");
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return NPV_NullPointer;
+ }
+
+ // FIXME: If we ever want to support general, address-constant expressions
+ // as non-type template arguments, we should return the ExprResult here to
+ // be interpreted by the caller.
+ return NPV_NotNullPointer;
+}
+
+/// \brief Checks whether the given template argument is compatible with its
+/// template parameter.
+static bool CheckTemplateArgumentIsCompatibleWithParameter(
+ Sema &S, NonTypeTemplateParmDecl *Param, QualType ParamType, Expr *ArgIn,
+ Expr *Arg, QualType ArgType) {
+ bool ObjCLifetimeConversion;
+ if (ParamType->isPointerType() &&
+ !ParamType->getAs<PointerType>()->getPointeeType()->isFunctionType() &&
+ S.IsQualificationConversion(ArgType, ParamType, false,
+ ObjCLifetimeConversion)) {
+ // For pointer-to-object types, qualification conversions are
+ // permitted.
+ } else {
+ if (const ReferenceType *ParamRef = ParamType->getAs<ReferenceType>()) {
+ if (!ParamRef->getPointeeType()->isFunctionType()) {
+ // C++ [temp.arg.nontype]p5b3:
+ // For a non-type template-parameter of type reference to
+ // object, no conversions apply. The type referred to by the
+ // reference may be more cv-qualified than the (otherwise
+ // identical) type of the template- argument. The
+ // template-parameter is bound directly to the
+ // template-argument, which shall be an lvalue.
+
+ // FIXME: Other qualifiers?
+ unsigned ParamQuals = ParamRef->getPointeeType().getCVRQualifiers();
+ unsigned ArgQuals = ArgType.getCVRQualifiers();
+
+ if ((ParamQuals | ArgQuals) != ParamQuals) {
+ S.Diag(Arg->getLocStart(),
+ diag::err_template_arg_ref_bind_ignores_quals)
+ << ParamType << Arg->getType() << Arg->getSourceRange();
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+ }
+ }
+
+ // At this point, the template argument refers to an object or
+ // function with external linkage. We now need to check whether the
+ // argument and parameter types are compatible.
+ if (!S.Context.hasSameUnqualifiedType(ArgType,
+ ParamType.getNonReferenceType())) {
+ // We can't perform this conversion or binding.
+ if (ParamType->isReferenceType())
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_no_ref_bind)
+ << ParamType << ArgIn->getType() << Arg->getSourceRange();
+ else
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_not_convertible)
+ << ArgIn->getType() << ParamType << Arg->getSourceRange();
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/// \brief Checks whether the given template argument is the address
+/// of an object or function according to C++ [temp.arg.nontype]p1.
+static bool
+CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
+ NonTypeTemplateParmDecl *Param,
+ QualType ParamType,
+ Expr *ArgIn,
+ TemplateArgument &Converted) {
+ bool Invalid = false;
+ Expr *Arg = ArgIn;
+ QualType ArgType = Arg->getType();
+
+ bool AddressTaken = false;
+ SourceLocation AddrOpLoc;
+ if (S.getLangOpts().MicrosoftExt) {
+ // Microsoft Visual C++ strips all casts, allows an arbitrary number of
+ // dereference and address-of operators.
+ Arg = Arg->IgnoreParenCasts();
+
+ bool ExtWarnMSTemplateArg = false;
+ UnaryOperatorKind FirstOpKind;
+ SourceLocation FirstOpLoc;
+ while (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(Arg)) {
+ UnaryOperatorKind UnOpKind = UnOp->getOpcode();
+ if (UnOpKind == UO_Deref)
+ ExtWarnMSTemplateArg = true;
+ if (UnOpKind == UO_AddrOf || UnOpKind == UO_Deref) {
+ Arg = UnOp->getSubExpr()->IgnoreParenCasts();
+ if (!AddrOpLoc.isValid()) {
+ FirstOpKind = UnOpKind;
+ FirstOpLoc = UnOp->getOperatorLoc();
+ }
+ } else
+ break;
+ }
+ if (FirstOpLoc.isValid()) {
+ if (ExtWarnMSTemplateArg)
+ S.Diag(ArgIn->getLocStart(), diag::ext_ms_deref_template_argument)
+ << ArgIn->getSourceRange();
+
+ if (FirstOpKind == UO_AddrOf)
+ AddressTaken = true;
+ else if (Arg->getType()->isPointerType()) {
+ // We cannot let pointers get dereferenced here, that is obviously not a
+ // constant expression.
+ assert(FirstOpKind == UO_Deref);
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_not_decl_ref)
+ << Arg->getSourceRange();
+ }
+ }
+ } else {
+ // See through any implicit casts we added to fix the type.
+ Arg = Arg->IgnoreImpCasts();
+
+ // C++ [temp.arg.nontype]p1:
+ //
+ // A template-argument for a non-type, non-template
+ // template-parameter shall be one of: [...]
+ //
+ // -- the address of an object or function with external
+ // linkage, including function templates and function
+ // template-ids but excluding non-static class members,
+ // expressed as & id-expression where the & is optional if
+ // the name refers to a function or array, or if the
+ // corresponding template-parameter is a reference; or
+
+ // In C++98/03 mode, give an extension warning on any extra parentheses.
+ // See http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#773
+ bool ExtraParens = false;
+ while (ParenExpr *Parens = dyn_cast<ParenExpr>(Arg)) {
+ if (!Invalid && !ExtraParens) {
+ S.Diag(Arg->getLocStart(),
+ S.getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_template_arg_extra_parens
+ : diag::ext_template_arg_extra_parens)
+ << Arg->getSourceRange();
+ ExtraParens = true;
+ }
+
+ Arg = Parens->getSubExpr();
+ }
+
+ while (SubstNonTypeTemplateParmExpr *subst =
+ dyn_cast<SubstNonTypeTemplateParmExpr>(Arg))
+ Arg = subst->getReplacement()->IgnoreImpCasts();
+
+ if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(Arg)) {
+ if (UnOp->getOpcode() == UO_AddrOf) {
+ Arg = UnOp->getSubExpr();
+ AddressTaken = true;
+ AddrOpLoc = UnOp->getOperatorLoc();
+ }
+ }
+
+ while (SubstNonTypeTemplateParmExpr *subst =
+ dyn_cast<SubstNonTypeTemplateParmExpr>(Arg))
+ Arg = subst->getReplacement()->IgnoreImpCasts();
+ }
+
+ DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Arg);
+ ValueDecl *Entity = DRE ? DRE->getDecl() : nullptr;
+
+ // If our parameter has pointer type, check for a null template value.
+ if (ParamType->isPointerType() || ParamType->isNullPtrType()) {
+ NullPointerValueKind NPV;
+ // dllimport'd entities aren't constant but are available inside of template
+ // arguments.
+ if (Entity && Entity->hasAttr<DLLImportAttr>())
+ NPV = NPV_NotNullPointer;
+ else
+ NPV = isNullPointerValueTemplateArgument(S, Param, ParamType, ArgIn);
+ switch (NPV) {
+ case NPV_NullPointer:
+ S.Diag(Arg->getExprLoc(), diag::warn_cxx98_compat_template_arg_null);
+ Converted = TemplateArgument(S.Context.getCanonicalType(ParamType),
+ /*isNullPtr=*/true);
+ return false;
+
+ case NPV_Error:
+ return true;
+
+ case NPV_NotNullPointer:
+ break;
+ }
+ }
+
+ // Stop checking the precise nature of the argument if it is value dependent,
+ // it should be checked when instantiated.
+ if (Arg->isValueDependent()) {
+ Converted = TemplateArgument(ArgIn);
+ return false;
+ }
+
+ if (isa<CXXUuidofExpr>(Arg)) {
+ if (CheckTemplateArgumentIsCompatibleWithParameter(S, Param, ParamType,
+ ArgIn, Arg, ArgType))
+ return true;
+
+ Converted = TemplateArgument(ArgIn);
+ return false;
+ }
+
+ if (!DRE) {
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_not_decl_ref)
+ << Arg->getSourceRange();
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+
+ // Cannot refer to non-static data members
+ if (isa<FieldDecl>(Entity) || isa<IndirectFieldDecl>(Entity)) {
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_field)
+ << Entity << Arg->getSourceRange();
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+
+ // Cannot refer to non-static member functions
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Entity)) {
+ if (!Method->isStatic()) {
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_method)
+ << Method << Arg->getSourceRange();
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+ }
+
+ FunctionDecl *Func = dyn_cast<FunctionDecl>(Entity);
+ VarDecl *Var = dyn_cast<VarDecl>(Entity);
+
+ // A non-type template argument must refer to an object or function.
+ if (!Func && !Var) {
+ // We found something, but we don't know specifically what it is.
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_not_object_or_func)
+ << Arg->getSourceRange();
+ S.Diag(DRE->getDecl()->getLocation(), diag::note_template_arg_refers_here);
+ return true;
+ }
+
+ // Address / reference template args must have external linkage in C++98.
+ if (Entity->getFormalLinkage() == InternalLinkage) {
+ S.Diag(Arg->getLocStart(), S.getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_template_arg_object_internal :
+ diag::ext_template_arg_object_internal)
+ << !Func << Entity << Arg->getSourceRange();
+ S.Diag(Entity->getLocation(), diag::note_template_arg_internal_object)
+ << !Func;
+ } else if (!Entity->hasLinkage()) {
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_object_no_linkage)
+ << !Func << Entity << Arg->getSourceRange();
+ S.Diag(Entity->getLocation(), diag::note_template_arg_internal_object)
+ << !Func;
+ return true;
+ }
+
+ if (Func) {
+ // If the template parameter has pointer type, the function decays.
+ if (ParamType->isPointerType() && !AddressTaken)
+ ArgType = S.Context.getPointerType(Func->getType());
+ else if (AddressTaken && ParamType->isReferenceType()) {
+ // If we originally had an address-of operator, but the
+ // parameter has reference type, complain and (if things look
+ // like they will work) drop the address-of operator.
+ if (!S.Context.hasSameUnqualifiedType(Func->getType(),
+ ParamType.getNonReferenceType())) {
+ S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer)
+ << ParamType;
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+
+ S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer)
+ << ParamType
+ << FixItHint::CreateRemoval(AddrOpLoc);
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+
+ ArgType = Func->getType();
+ }
+ } else {
+ // A value of reference type is not an object.
+ if (Var->getType()->isReferenceType()) {
+ S.Diag(Arg->getLocStart(),
+ diag::err_template_arg_reference_var)
+ << Var->getType() << Arg->getSourceRange();
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+
+ // A template argument must have static storage duration.
+ if (Var->getTLSKind()) {
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_thread_local)
+ << Arg->getSourceRange();
+ S.Diag(Var->getLocation(), diag::note_template_arg_refers_here);
+ return true;
+ }
+
+ // If the template parameter has pointer type, we must have taken
+ // the address of this object.
+ if (ParamType->isReferenceType()) {
+ if (AddressTaken) {
+ // If we originally had an address-of operator, but the
+ // parameter has reference type, complain and (if things look
+ // like they will work) drop the address-of operator.
+ if (!S.Context.hasSameUnqualifiedType(Var->getType(),
+ ParamType.getNonReferenceType())) {
+ S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer)
+ << ParamType;
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+
+ S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer)
+ << ParamType
+ << FixItHint::CreateRemoval(AddrOpLoc);
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+
+ ArgType = Var->getType();
+ }
+ } else if (!AddressTaken && ParamType->isPointerType()) {
+ if (Var->getType()->isArrayType()) {
+ // Array-to-pointer decay.
+ ArgType = S.Context.getArrayDecayedType(Var->getType());
+ } else {
+ // If the template parameter has pointer type but the address of
+ // this object was not taken, complain and (possibly) recover by
+ // taking the address of the entity.
+ ArgType = S.Context.getPointerType(Var->getType());
+ if (!S.Context.hasSameUnqualifiedType(ArgType, ParamType)) {
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_not_address_of)
+ << ParamType;
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_not_address_of)
+ << ParamType
+ << FixItHint::CreateInsertion(Arg->getLocStart(), "&");
+
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ }
+ }
+ }
+
+ if (CheckTemplateArgumentIsCompatibleWithParameter(S, Param, ParamType, ArgIn,
+ Arg, ArgType))
+ return true;
+
+ // Create the template argument.
+ Converted =
+ TemplateArgument(cast<ValueDecl>(Entity->getCanonicalDecl()), ParamType);
+ S.MarkAnyDeclReferenced(Arg->getLocStart(), Entity, false);
+ return false;
+}
+
+/// \brief Checks whether the given template argument is a pointer to
+/// member constant according to C++ [temp.arg.nontype]p1.
+static bool CheckTemplateArgumentPointerToMember(Sema &S,
+ NonTypeTemplateParmDecl *Param,
+ QualType ParamType,
+ Expr *&ResultArg,
+ TemplateArgument &Converted) {
+ bool Invalid = false;
+
+ // Check for a null pointer value.
+ Expr *Arg = ResultArg;
+ switch (isNullPointerValueTemplateArgument(S, Param, ParamType, Arg)) {
+ case NPV_Error:
+ return true;
+ case NPV_NullPointer:
+ S.Diag(Arg->getExprLoc(), diag::warn_cxx98_compat_template_arg_null);
+ Converted = TemplateArgument(S.Context.getCanonicalType(ParamType),
+ /*isNullPtr*/true);
+ return false;
+ case NPV_NotNullPointer:
+ break;
+ }
+
+ bool ObjCLifetimeConversion;
+ if (S.IsQualificationConversion(Arg->getType(),
+ ParamType.getNonReferenceType(),
+ false, ObjCLifetimeConversion)) {
+ Arg = S.ImpCastExprToType(Arg, ParamType, CK_NoOp,
+ Arg->getValueKind()).get();
+ ResultArg = Arg;
+ } else if (!S.Context.hasSameUnqualifiedType(Arg->getType(),
+ ParamType.getNonReferenceType())) {
+ // We can't perform this conversion.
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_not_convertible)
+ << Arg->getType() << ParamType << Arg->getSourceRange();
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+
+ // See through any implicit casts we added to fix the type.
+ while (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(Arg))
+ Arg = Cast->getSubExpr();
+
+ // C++ [temp.arg.nontype]p1:
+ //
+ // A template-argument for a non-type, non-template
+ // template-parameter shall be one of: [...]
+ //
+ // -- a pointer to member expressed as described in 5.3.1.
+ DeclRefExpr *DRE = nullptr;
+
+ // In C++98/03 mode, give an extension warning on any extra parentheses.
+ // See http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#773
+ bool ExtraParens = false;
+ while (ParenExpr *Parens = dyn_cast<ParenExpr>(Arg)) {
+ if (!Invalid && !ExtraParens) {
+ S.Diag(Arg->getLocStart(),
+ S.getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_template_arg_extra_parens :
+ diag::ext_template_arg_extra_parens)
+ << Arg->getSourceRange();
+ ExtraParens = true;
+ }
+
+ Arg = Parens->getSubExpr();
+ }
+
+ while (SubstNonTypeTemplateParmExpr *subst =
+ dyn_cast<SubstNonTypeTemplateParmExpr>(Arg))
+ Arg = subst->getReplacement()->IgnoreImpCasts();
+
+ // A pointer-to-member constant written &Class::member.
+ if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(Arg)) {
+ if (UnOp->getOpcode() == UO_AddrOf) {
+ DRE = dyn_cast<DeclRefExpr>(UnOp->getSubExpr());
+ if (DRE && !DRE->getQualifier())
+ DRE = nullptr;
+ }
+ }
+ // A constant of pointer-to-member type.
+ else if ((DRE = dyn_cast<DeclRefExpr>(Arg))) {
+ if (ValueDecl *VD = dyn_cast<ValueDecl>(DRE->getDecl())) {
+ if (VD->getType()->isMemberPointerType()) {
+ if (isa<NonTypeTemplateParmDecl>(VD)) {
+ if (Arg->isTypeDependent() || Arg->isValueDependent()) {
+ Converted = TemplateArgument(Arg);
+ } else {
+ VD = cast<ValueDecl>(VD->getCanonicalDecl());
+ Converted = TemplateArgument(VD, ParamType);
+ }
+ return Invalid;
+ }
+ }
+ }
+
+ DRE = nullptr;
+ }
+
+ if (!DRE)
+ return S.Diag(Arg->getLocStart(),
+ diag::err_template_arg_not_pointer_to_member_form)
+ << Arg->getSourceRange();
+
+ if (isa<FieldDecl>(DRE->getDecl()) ||
+ isa<IndirectFieldDecl>(DRE->getDecl()) ||
+ isa<CXXMethodDecl>(DRE->getDecl())) {
+ assert((isa<FieldDecl>(DRE->getDecl()) ||
+ isa<IndirectFieldDecl>(DRE->getDecl()) ||
+ !cast<CXXMethodDecl>(DRE->getDecl())->isStatic()) &&
+ "Only non-static member pointers can make it here");
+
+ // Okay: this is the address of a non-static member, and therefore
+ // a member pointer constant.
+ if (Arg->isTypeDependent() || Arg->isValueDependent()) {
+ Converted = TemplateArgument(Arg);
+ } else {
+ ValueDecl *D = cast<ValueDecl>(DRE->getDecl()->getCanonicalDecl());
+ Converted = TemplateArgument(D, ParamType);
+ }
+ return Invalid;
+ }
+
+ // We found something else, but we don't know specifically what it is.
+ S.Diag(Arg->getLocStart(),
+ diag::err_template_arg_not_pointer_to_member_form)
+ << Arg->getSourceRange();
+ S.Diag(DRE->getDecl()->getLocation(), diag::note_template_arg_refers_here);
+ return true;
+}
+
+/// \brief Check a template argument against its corresponding
+/// non-type template parameter.
+///
+/// This routine implements the semantics of C++ [temp.arg.nontype].
+/// If an error occurred, it returns ExprError(); otherwise, it
+/// returns the converted template argument. \p ParamType is the
+/// type of the non-type template parameter after it has been instantiated.
+ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
+ QualType ParamType, Expr *Arg,
+ TemplateArgument &Converted,
+ CheckTemplateArgumentKind CTAK) {
+ SourceLocation StartLoc = Arg->getLocStart();
+
+ // If either the parameter has a dependent type or the argument is
+ // type-dependent, there's nothing we can check now.
+ if (ParamType->isDependentType() || Arg->isTypeDependent()) {
+ // FIXME: Produce a cloned, canonical expression?
+ Converted = TemplateArgument(Arg);
+ return Arg;
+ }
+
+ // We should have already dropped all cv-qualifiers by now.
+ assert(!ParamType.hasQualifiers() &&
+ "non-type template parameter type cannot be qualified");
+
+ if (CTAK == CTAK_Deduced &&
+ !Context.hasSameUnqualifiedType(ParamType, Arg->getType())) {
+ // C++ [temp.deduct.type]p17:
+ // If, in the declaration of a function template with a non-type
+ // template-parameter, the non-type template-parameter is used
+ // in an expression in the function parameter-list and, if the
+ // corresponding template-argument is deduced, the
+ // template-argument type shall match the type of the
+ // template-parameter exactly, except that a template-argument
+ // deduced from an array bound may be of any integral type.
+ Diag(StartLoc, diag::err_deduced_non_type_template_arg_type_mismatch)
+ << Arg->getType().getUnqualifiedType()
+ << ParamType.getUnqualifiedType();
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ return ExprError();
+ }
+
+ if (getLangOpts().CPlusPlus1z) {
+ // FIXME: We can do some limited checking for a value-dependent but not
+ // type-dependent argument.
+ if (Arg->isValueDependent()) {
+ Converted = TemplateArgument(Arg);
+ return Arg;
+ }
+
+ // C++1z [temp.arg.nontype]p1:
+ // A template-argument for a non-type template parameter shall be
+ // a converted constant expression of the type of the template-parameter.
+ APValue Value;
+ ExprResult ArgResult = CheckConvertedConstantExpression(
+ Arg, ParamType, Value, CCEK_TemplateArg);
+ if (ArgResult.isInvalid())
+ return ExprError();
+
+ QualType CanonParamType = Context.getCanonicalType(ParamType);
+
+ // Convert the APValue to a TemplateArgument.
+ switch (Value.getKind()) {
+ case APValue::Uninitialized:
+ assert(ParamType->isNullPtrType());
+ Converted = TemplateArgument(CanonParamType, /*isNullPtr*/true);
+ break;
+ case APValue::Int:
+ assert(ParamType->isIntegralOrEnumerationType());
+ Converted = TemplateArgument(Context, Value.getInt(), CanonParamType);
+ break;
+ case APValue::MemberPointer: {
+ assert(ParamType->isMemberPointerType());
+
+ // FIXME: We need TemplateArgument representation and mangling for these.
+ if (!Value.getMemberPointerPath().empty()) {
+ Diag(Arg->getLocStart(),
+ diag::err_template_arg_member_ptr_base_derived_not_supported)
+ << Value.getMemberPointerDecl() << ParamType
+ << Arg->getSourceRange();
+ return ExprError();
+ }
+
+ auto *VD = const_cast<ValueDecl*>(Value.getMemberPointerDecl());
+ Converted = VD ? TemplateArgument(VD, CanonParamType)
+ : TemplateArgument(CanonParamType, /*isNullPtr*/true);
+ break;
+ }
+ case APValue::LValue: {
+ // For a non-type template-parameter of pointer or reference type,
+ // the value of the constant expression shall not refer to
+ assert(ParamType->isPointerType() || ParamType->isReferenceType() ||
+ ParamType->isNullPtrType());
+ // -- a temporary object
+ // -- a string literal
+ // -- the result of a typeid expression, or
+ // -- a predefind __func__ variable
+ if (auto *E = Value.getLValueBase().dyn_cast<const Expr*>()) {
+ if (isa<CXXUuidofExpr>(E)) {
+ Converted = TemplateArgument(const_cast<Expr*>(E));
+ break;
+ }
+ Diag(Arg->getLocStart(), diag::err_template_arg_not_decl_ref)
+ << Arg->getSourceRange();
+ return ExprError();
+ }
+ auto *VD = const_cast<ValueDecl *>(
+ Value.getLValueBase().dyn_cast<const ValueDecl *>());
+ // -- a subobject
+ if (Value.hasLValuePath() && Value.getLValuePath().size() == 1 &&
+ VD && VD->getType()->isArrayType() &&
+ Value.getLValuePath()[0].ArrayIndex == 0 &&
+ !Value.isLValueOnePastTheEnd() && ParamType->isPointerType()) {
+ // Per defect report (no number yet):
+ // ... other than a pointer to the first element of a complete array
+ // object.
+ } else if (!Value.hasLValuePath() || Value.getLValuePath().size() ||
+ Value.isLValueOnePastTheEnd()) {
+ Diag(StartLoc, diag::err_non_type_template_arg_subobject)
+ << Value.getAsString(Context, ParamType);
+ return ExprError();
+ }
+ assert((VD || !ParamType->isReferenceType()) &&
+ "null reference should not be a constant expression");
+ assert((!VD || !ParamType->isNullPtrType()) &&
+ "non-null value of type nullptr_t?");
+ Converted = VD ? TemplateArgument(VD, CanonParamType)
+ : TemplateArgument(CanonParamType, /*isNullPtr*/true);
+ break;
+ }
+ case APValue::AddrLabelDiff:
+ return Diag(StartLoc, diag::err_non_type_template_arg_addr_label_diff);
+ case APValue::Float:
+ case APValue::ComplexInt:
+ case APValue::ComplexFloat:
+ case APValue::Vector:
+ case APValue::Array:
+ case APValue::Struct:
+ case APValue::Union:
+ llvm_unreachable("invalid kind for template argument");
+ }
+
+ return ArgResult.get();
+ }
+
+ // C++ [temp.arg.nontype]p5:
+ // The following conversions are performed on each expression used
+ // as a non-type template-argument. If a non-type
+ // template-argument cannot be converted to the type of the
+ // corresponding template-parameter then the program is
+ // ill-formed.
+ if (ParamType->isIntegralOrEnumerationType()) {
+ // C++11:
+ // -- for a non-type template-parameter of integral or
+ // enumeration type, conversions permitted in a converted
+ // constant expression are applied.
+ //
+ // C++98:
+ // -- for a non-type template-parameter of integral or
+ // enumeration type, integral promotions (4.5) and integral
+ // conversions (4.7) are applied.
+
+ if (getLangOpts().CPlusPlus11) {
+ // We can't check arbitrary value-dependent arguments.
+ // FIXME: If there's no viable conversion to the template parameter type,
+ // we should be able to diagnose that prior to instantiation.
+ if (Arg->isValueDependent()) {
+ Converted = TemplateArgument(Arg);
+ return Arg;
+ }
+
+ // C++ [temp.arg.nontype]p1:
+ // A template-argument for a non-type, non-template template-parameter
+ // shall be one of:
+ //
+ // -- for a non-type template-parameter of integral or enumeration
+ // type, a converted constant expression of the type of the
+ // template-parameter; or
+ llvm::APSInt Value;
+ ExprResult ArgResult =
+ CheckConvertedConstantExpression(Arg, ParamType, Value,
+ CCEK_TemplateArg);
+ if (ArgResult.isInvalid())
+ return ExprError();
+
+ // Widen the argument value to sizeof(parameter type). This is almost
+ // always a no-op, except when the parameter type is bool. In
+ // that case, this may extend the argument from 1 bit to 8 bits.
+ QualType IntegerType = ParamType;
+ if (const EnumType *Enum = IntegerType->getAs<EnumType>())
+ IntegerType = Enum->getDecl()->getIntegerType();
+ Value = Value.extOrTrunc(Context.getTypeSize(IntegerType));
+
+ Converted = TemplateArgument(Context, Value,
+ Context.getCanonicalType(ParamType));
+ return ArgResult;
+ }
+
+ ExprResult ArgResult = DefaultLvalueConversion(Arg);
+ if (ArgResult.isInvalid())
+ return ExprError();
+ Arg = ArgResult.get();
+
+ QualType ArgType = Arg->getType();
+
+ // C++ [temp.arg.nontype]p1:
+ // A template-argument for a non-type, non-template
+ // template-parameter shall be one of:
+ //
+ // -- an integral constant-expression of integral or enumeration
+ // type; or
+ // -- the name of a non-type template-parameter; or
+ SourceLocation NonConstantLoc;
+ llvm::APSInt Value;
+ if (!ArgType->isIntegralOrEnumerationType()) {
+ Diag(Arg->getLocStart(),
+ diag::err_template_arg_not_integral_or_enumeral)
+ << ArgType << Arg->getSourceRange();
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ return ExprError();
+ } else if (!Arg->isValueDependent()) {
+ class TmplArgICEDiagnoser : public VerifyICEDiagnoser {
+ QualType T;
+
+ public:
+ TmplArgICEDiagnoser(QualType T) : T(T) { }
+
+ void diagnoseNotICE(Sema &S, SourceLocation Loc,
+ SourceRange SR) override {
+ S.Diag(Loc, diag::err_template_arg_not_ice) << T << SR;
+ }
+ } Diagnoser(ArgType);
+
+ Arg = VerifyIntegerConstantExpression(Arg, &Value, Diagnoser,
+ false).get();
+ if (!Arg)
+ return ExprError();
+ }
+
+ // From here on out, all we care about is the unqualified form
+ // of the argument type.
+ ArgType = ArgType.getUnqualifiedType();
+
+ // Try to convert the argument to the parameter's type.
+ if (Context.hasSameType(ParamType, ArgType)) {
+ // Okay: no conversion necessary
+ } else if (ParamType->isBooleanType()) {
+ // This is an integral-to-boolean conversion.
+ Arg = ImpCastExprToType(Arg, ParamType, CK_IntegralToBoolean).get();
+ } else if (IsIntegralPromotion(Arg, ArgType, ParamType) ||
+ !ParamType->isEnumeralType()) {
+ // This is an integral promotion or conversion.
+ Arg = ImpCastExprToType(Arg, ParamType, CK_IntegralCast).get();
+ } else {
+ // We can't perform this conversion.
+ Diag(Arg->getLocStart(),
+ diag::err_template_arg_not_convertible)
+ << Arg->getType() << ParamType << Arg->getSourceRange();
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ return ExprError();
+ }
+
+ // Add the value of this argument to the list of converted
+ // arguments. We use the bitwidth and signedness of the template
+ // parameter.
+ if (Arg->isValueDependent()) {
+ // The argument is value-dependent. Create a new
+ // TemplateArgument with the converted expression.
+ Converted = TemplateArgument(Arg);
+ return Arg;
+ }
+
+ QualType IntegerType = Context.getCanonicalType(ParamType);
+ if (const EnumType *Enum = IntegerType->getAs<EnumType>())
+ IntegerType = Context.getCanonicalType(Enum->getDecl()->getIntegerType());
+
+ if (ParamType->isBooleanType()) {
+ // Value must be zero or one.
+ Value = Value != 0;
+ unsigned AllowedBits = Context.getTypeSize(IntegerType);
+ if (Value.getBitWidth() != AllowedBits)
+ Value = Value.extOrTrunc(AllowedBits);
+ Value.setIsSigned(IntegerType->isSignedIntegerOrEnumerationType());
+ } else {
+ llvm::APSInt OldValue = Value;
+
+ // Coerce the template argument's value to the value it will have
+ // based on the template parameter's type.
+ unsigned AllowedBits = Context.getTypeSize(IntegerType);
+ if (Value.getBitWidth() != AllowedBits)
+ Value = Value.extOrTrunc(AllowedBits);
+ Value.setIsSigned(IntegerType->isSignedIntegerOrEnumerationType());
+
+ // Complain if an unsigned parameter received a negative value.
+ if (IntegerType->isUnsignedIntegerOrEnumerationType()
+ && (OldValue.isSigned() && OldValue.isNegative())) {
+ Diag(Arg->getLocStart(), diag::warn_template_arg_negative)
+ << OldValue.toString(10) << Value.toString(10) << Param->getType()
+ << Arg->getSourceRange();
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ }
+
+ // Complain if we overflowed the template parameter's type.
+ unsigned RequiredBits;
+ if (IntegerType->isUnsignedIntegerOrEnumerationType())
+ RequiredBits = OldValue.getActiveBits();
+ else if (OldValue.isUnsigned())
+ RequiredBits = OldValue.getActiveBits() + 1;
+ else
+ RequiredBits = OldValue.getMinSignedBits();
+ if (RequiredBits > AllowedBits) {
+ Diag(Arg->getLocStart(),
+ diag::warn_template_arg_too_large)
+ << OldValue.toString(10) << Value.toString(10) << Param->getType()
+ << Arg->getSourceRange();
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ }
+ }
+
+ Converted = TemplateArgument(Context, Value,
+ ParamType->isEnumeralType()
+ ? Context.getCanonicalType(ParamType)
+ : IntegerType);
+ return Arg;
+ }
+
+ QualType ArgType = Arg->getType();
+ DeclAccessPair FoundResult; // temporary for ResolveOverloadedFunction
+
+ // Handle pointer-to-function, reference-to-function, and
+ // pointer-to-member-function all in (roughly) the same way.
+ if (// -- For a non-type template-parameter of type pointer to
+ // function, only the function-to-pointer conversion (4.3) is
+ // applied. If the template-argument represents a set of
+ // overloaded functions (or a pointer to such), the matching
+ // function is selected from the set (13.4).
+ (ParamType->isPointerType() &&
+ ParamType->getAs<PointerType>()->getPointeeType()->isFunctionType()) ||
+ // -- For a non-type template-parameter of type reference to
+ // function, no conversions apply. If the template-argument
+ // represents a set of overloaded functions, the matching
+ // function is selected from the set (13.4).
+ (ParamType->isReferenceType() &&
+ ParamType->getAs<ReferenceType>()->getPointeeType()->isFunctionType()) ||
+ // -- For a non-type template-parameter of type pointer to
+ // member function, no conversions apply. If the
+ // template-argument represents a set of overloaded member
+ // functions, the matching member function is selected from
+ // the set (13.4).
+ (ParamType->isMemberPointerType() &&
+ ParamType->getAs<MemberPointerType>()->getPointeeType()
+ ->isFunctionType())) {
+
+ if (Arg->getType() == Context.OverloadTy) {
+ if (FunctionDecl *Fn = ResolveAddressOfOverloadedFunction(Arg, ParamType,
+ true,
+ FoundResult)) {
+ if (DiagnoseUseOfDecl(Fn, Arg->getLocStart()))
+ return ExprError();
+
+ Arg = FixOverloadedFunctionReference(Arg, FoundResult, Fn);
+ ArgType = Arg->getType();
+ } else
+ return ExprError();
+ }
+
+ if (!ParamType->isMemberPointerType()) {
+ if (CheckTemplateArgumentAddressOfObjectOrFunction(*this, Param,
+ ParamType,
+ Arg, Converted))
+ return ExprError();
+ return Arg;
+ }
+
+ if (CheckTemplateArgumentPointerToMember(*this, Param, ParamType, Arg,
+ Converted))
+ return ExprError();
+ return Arg;
+ }
+
+ if (ParamType->isPointerType()) {
+ // -- for a non-type template-parameter of type pointer to
+ // object, qualification conversions (4.4) and the
+ // array-to-pointer conversion (4.2) are applied.
+ // C++0x also allows a value of std::nullptr_t.
+ assert(ParamType->getPointeeType()->isIncompleteOrObjectType() &&
+ "Only object pointers allowed here");
+
+ if (CheckTemplateArgumentAddressOfObjectOrFunction(*this, Param,
+ ParamType,
+ Arg, Converted))
+ return ExprError();
+ return Arg;
+ }
+
+ if (const ReferenceType *ParamRefType = ParamType->getAs<ReferenceType>()) {
+ // -- For a non-type template-parameter of type reference to
+ // object, no conversions apply. The type referred to by the
+ // reference may be more cv-qualified than the (otherwise
+ // identical) type of the template-argument. The
+ // template-parameter is bound directly to the
+ // template-argument, which must be an lvalue.
+ assert(ParamRefType->getPointeeType()->isIncompleteOrObjectType() &&
+ "Only object references allowed here");
+
+ if (Arg->getType() == Context.OverloadTy) {
+ if (FunctionDecl *Fn = ResolveAddressOfOverloadedFunction(Arg,
+ ParamRefType->getPointeeType(),
+ true,
+ FoundResult)) {
+ if (DiagnoseUseOfDecl(Fn, Arg->getLocStart()))
+ return ExprError();
+
+ Arg = FixOverloadedFunctionReference(Arg, FoundResult, Fn);
+ ArgType = Arg->getType();
+ } else
+ return ExprError();
+ }
+
+ if (CheckTemplateArgumentAddressOfObjectOrFunction(*this, Param,
+ ParamType,
+ Arg, Converted))
+ return ExprError();
+ return Arg;
+ }
+
+ // Deal with parameters of type std::nullptr_t.
+ if (ParamType->isNullPtrType()) {
+ if (Arg->isTypeDependent() || Arg->isValueDependent()) {
+ Converted = TemplateArgument(Arg);
+ return Arg;
+ }
+
+ switch (isNullPointerValueTemplateArgument(*this, Param, ParamType, Arg)) {
+ case NPV_NotNullPointer:
+ Diag(Arg->getExprLoc(), diag::err_template_arg_not_convertible)
+ << Arg->getType() << ParamType;
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ return ExprError();
+
+ case NPV_Error:
+ return ExprError();
+
+ case NPV_NullPointer:
+ Diag(Arg->getExprLoc(), diag::warn_cxx98_compat_template_arg_null);
+ Converted = TemplateArgument(Context.getCanonicalType(ParamType),
+ /*isNullPtr*/true);
+ return Arg;
+ }
+ }
+
+ // -- For a non-type template-parameter of type pointer to data
+ // member, qualification conversions (4.4) are applied.
+ assert(ParamType->isMemberPointerType() && "Only pointers to members remain");
+
+ if (CheckTemplateArgumentPointerToMember(*this, Param, ParamType, Arg,
+ Converted))
+ return ExprError();
+ return Arg;
+}
+
+/// \brief Check a template argument against its corresponding
+/// template template parameter.
+///
+/// This routine implements the semantics of C++ [temp.arg.template].
+/// It returns true if an error occurred, and false otherwise.
+bool Sema::CheckTemplateArgument(TemplateTemplateParmDecl *Param,
+ TemplateArgumentLoc &Arg,
+ unsigned ArgumentPackIndex) {
+ TemplateName Name = Arg.getArgument().getAsTemplateOrTemplatePattern();
+ TemplateDecl *Template = Name.getAsTemplateDecl();
+ if (!Template) {
+ // Any dependent template name is fine.
+ assert(Name.isDependent() && "Non-dependent template isn't a declaration?");
+ return false;
+ }
+
+ // C++0x [temp.arg.template]p1:
+ // A template-argument for a template template-parameter shall be
+ // the name of a class template or an alias template, expressed as an
+ // id-expression. When the template-argument names a class template, only
+ // primary class templates are considered when matching the
+ // template template argument with the corresponding parameter;
+ // partial specializations are not considered even if their
+ // parameter lists match that of the template template parameter.
+ //
+ // Note that we also allow template template parameters here, which
+ // will happen when we are dealing with, e.g., class template
+ // partial specializations.
+ if (!isa<ClassTemplateDecl>(Template) &&
+ !isa<TemplateTemplateParmDecl>(Template) &&
+ !isa<TypeAliasTemplateDecl>(Template)) {
+ assert(isa<FunctionTemplateDecl>(Template) &&
+ "Only function templates are possible here");
+ Diag(Arg.getLocation(), diag::err_template_arg_not_class_template);
+ Diag(Template->getLocation(), diag::note_template_arg_refers_here_func)
+ << Template;
+ }
+
+ TemplateParameterList *Params = Param->getTemplateParameters();
+ if (Param->isExpandedParameterPack())
+ Params = Param->getExpansionTemplateParameters(ArgumentPackIndex);
+
+ return !TemplateParameterListsAreEqual(Template->getTemplateParameters(),
+ Params,
+ true,
+ TPL_TemplateTemplateArgumentMatch,
+ Arg.getLocation());
+}
+
+/// \brief Given a non-type template argument that refers to a
+/// declaration and the type of its corresponding non-type template
+/// parameter, produce an expression that properly refers to that
+/// declaration.
+ExprResult
+Sema::BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
+ QualType ParamType,
+ SourceLocation Loc) {
+ // C++ [temp.param]p8:
+ //
+ // A non-type template-parameter of type "array of T" or
+ // "function returning T" is adjusted to be of type "pointer to
+ // T" or "pointer to function returning T", respectively.
+ if (ParamType->isArrayType())
+ ParamType = Context.getArrayDecayedType(ParamType);
+ else if (ParamType->isFunctionType())
+ ParamType = Context.getPointerType(ParamType);
+
+ // For a NULL non-type template argument, return nullptr casted to the
+ // parameter's type.
+ if (Arg.getKind() == TemplateArgument::NullPtr) {
+ return ImpCastExprToType(
+ new (Context) CXXNullPtrLiteralExpr(Context.NullPtrTy, Loc),
+ ParamType,
+ ParamType->getAs<MemberPointerType>()
+ ? CK_NullToMemberPointer
+ : CK_NullToPointer);
+ }
+ assert(Arg.getKind() == TemplateArgument::Declaration &&
+ "Only declaration template arguments permitted here");
+
+ ValueDecl *VD = cast<ValueDecl>(Arg.getAsDecl());
+
+ if (VD->getDeclContext()->isRecord() &&
+ (isa<CXXMethodDecl>(VD) || isa<FieldDecl>(VD) ||
+ isa<IndirectFieldDecl>(VD))) {
+ // If the value is a class member, we might have a pointer-to-member.
+ // Determine whether the non-type template template parameter is of
+ // pointer-to-member type. If so, we need to build an appropriate
+ // expression for a pointer-to-member, since a "normal" DeclRefExpr
+ // would refer to the member itself.
+ if (ParamType->isMemberPointerType()) {
+ QualType ClassType
+ = Context.getTypeDeclType(cast<RecordDecl>(VD->getDeclContext()));
+ NestedNameSpecifier *Qualifier
+ = NestedNameSpecifier::Create(Context, nullptr, false,
+ ClassType.getTypePtr());
+ CXXScopeSpec SS;
+ SS.MakeTrivial(Context, Qualifier, Loc);
+
+ // The actual value-ness of this is unimportant, but for
+ // internal consistency's sake, references to instance methods
+ // are r-values.
+ ExprValueKind VK = VK_LValue;
+ if (isa<CXXMethodDecl>(VD) && cast<CXXMethodDecl>(VD)->isInstance())
+ VK = VK_RValue;
+
+ ExprResult RefExpr = BuildDeclRefExpr(VD,
+ VD->getType().getNonReferenceType(),
+ VK,
+ Loc,
+ &SS);
+ if (RefExpr.isInvalid())
+ return ExprError();
+
+ RefExpr = CreateBuiltinUnaryOp(Loc, UO_AddrOf, RefExpr.get());
+
+ // We might need to perform a trailing qualification conversion, since
+ // the element type on the parameter could be more qualified than the
+ // element type in the expression we constructed.
+ bool ObjCLifetimeConversion;
+ if (IsQualificationConversion(((Expr*) RefExpr.get())->getType(),
+ ParamType.getUnqualifiedType(), false,
+ ObjCLifetimeConversion))
+ RefExpr = ImpCastExprToType(RefExpr.get(), ParamType.getUnqualifiedType(), CK_NoOp);
+
+ assert(!RefExpr.isInvalid() &&
+ Context.hasSameType(((Expr*) RefExpr.get())->getType(),
+ ParamType.getUnqualifiedType()));
+ return RefExpr;
+ }
+ }
+
+ QualType T = VD->getType().getNonReferenceType();
+
+ if (ParamType->isPointerType()) {
+ // When the non-type template parameter is a pointer, take the
+ // address of the declaration.
+ ExprResult RefExpr = BuildDeclRefExpr(VD, T, VK_LValue, Loc);
+ if (RefExpr.isInvalid())
+ return ExprError();
+
+ if (T->isFunctionType() || T->isArrayType()) {
+ // Decay functions and arrays.
+ RefExpr = DefaultFunctionArrayConversion(RefExpr.get());
+ if (RefExpr.isInvalid())
+ return ExprError();
+
+ return RefExpr;
+ }
+
+ // Take the address of everything else
+ return CreateBuiltinUnaryOp(Loc, UO_AddrOf, RefExpr.get());
+ }
+
+ ExprValueKind VK = VK_RValue;
+
+ // If the non-type template parameter has reference type, qualify the
+ // resulting declaration reference with the extra qualifiers on the
+ // type that the reference refers to.
+ if (const ReferenceType *TargetRef = ParamType->getAs<ReferenceType>()) {
+ VK = VK_LValue;
+ T = Context.getQualifiedType(T,
+ TargetRef->getPointeeType().getQualifiers());
+ } else if (isa<FunctionDecl>(VD)) {
+ // References to functions are always lvalues.
+ VK = VK_LValue;
+ }
+
+ return BuildDeclRefExpr(VD, T, VK, Loc);
+}
+
+/// \brief Construct a new expression that refers to the given
+/// integral template argument with the given source-location
+/// information.
+///
+/// This routine takes care of the mapping from an integral template
+/// argument (which may have any integral type) to the appropriate
+/// literal value.
+ExprResult
+Sema::BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
+ SourceLocation Loc) {
+ assert(Arg.getKind() == TemplateArgument::Integral &&
+ "Operation is only valid for integral template arguments");
+ QualType OrigT = Arg.getIntegralType();
+
+ // If this is an enum type that we're instantiating, we need to use an integer
+ // type the same size as the enumerator. We don't want to build an
+ // IntegerLiteral with enum type. The integer type of an enum type can be of
+ // any integral type with C++11 enum classes, make sure we create the right
+ // type of literal for it.
+ QualType T = OrigT;
+ if (const EnumType *ET = OrigT->getAs<EnumType>())
+ T = ET->getDecl()->getIntegerType();
+
+ Expr *E;
+ if (T->isAnyCharacterType()) {
+ CharacterLiteral::CharacterKind Kind;
+ if (T->isWideCharType())
+ Kind = CharacterLiteral::Wide;
+ else if (T->isChar16Type())
+ Kind = CharacterLiteral::UTF16;
+ else if (T->isChar32Type())
+ Kind = CharacterLiteral::UTF32;
+ else
+ Kind = CharacterLiteral::Ascii;
+
+ E = new (Context) CharacterLiteral(Arg.getAsIntegral().getZExtValue(),
+ Kind, T, Loc);
+ } else if (T->isBooleanType()) {
+ E = new (Context) CXXBoolLiteralExpr(Arg.getAsIntegral().getBoolValue(),
+ T, Loc);
+ } else if (T->isNullPtrType()) {
+ E = new (Context) CXXNullPtrLiteralExpr(Context.NullPtrTy, Loc);
+ } else {
+ E = IntegerLiteral::Create(Context, Arg.getAsIntegral(), T, Loc);
+ }
+
+ if (OrigT->isEnumeralType()) {
+ // FIXME: This is a hack. We need a better way to handle substituted
+ // non-type template parameters.
+ E = CStyleCastExpr::Create(Context, OrigT, VK_RValue, CK_IntegralCast, E,
+ nullptr,
+ Context.getTrivialTypeSourceInfo(OrigT, Loc),
+ Loc, Loc);
+ }
+
+ return E;
+}
+
+/// \brief Match two template parameters within template parameter lists.
+static bool MatchTemplateParameterKind(Sema &S, NamedDecl *New, NamedDecl *Old,
+ bool Complain,
+ Sema::TemplateParameterListEqualKind Kind,
+ SourceLocation TemplateArgLoc) {
+ // Check the actual kind (type, non-type, template).
+ if (Old->getKind() != New->getKind()) {
+ if (Complain) {
+ unsigned NextDiag = diag::err_template_param_different_kind;
+ if (TemplateArgLoc.isValid()) {
+ S.Diag(TemplateArgLoc, diag::err_template_arg_template_params_mismatch);
+ NextDiag = diag::note_template_param_different_kind;
+ }
+ S.Diag(New->getLocation(), NextDiag)
+ << (Kind != Sema::TPL_TemplateMatch);
+ S.Diag(Old->getLocation(), diag::note_template_prev_declaration)
+ << (Kind != Sema::TPL_TemplateMatch);
+ }
+
+ return false;
+ }
+
+ // Check that both are parameter packs are neither are parameter packs.
+ // However, if we are matching a template template argument to a
+ // template template parameter, the template template parameter can have
+ // a parameter pack where the template template argument does not.
+ if (Old->isTemplateParameterPack() != New->isTemplateParameterPack() &&
+ !(Kind == Sema::TPL_TemplateTemplateArgumentMatch &&
+ Old->isTemplateParameterPack())) {
+ if (Complain) {
+ unsigned NextDiag = diag::err_template_parameter_pack_non_pack;
+ if (TemplateArgLoc.isValid()) {
+ S.Diag(TemplateArgLoc,
+ diag::err_template_arg_template_params_mismatch);
+ NextDiag = diag::note_template_parameter_pack_non_pack;
+ }
+
+ unsigned ParamKind = isa<TemplateTypeParmDecl>(New)? 0
+ : isa<NonTypeTemplateParmDecl>(New)? 1
+ : 2;
+ S.Diag(New->getLocation(), NextDiag)
+ << ParamKind << New->isParameterPack();
+ S.Diag(Old->getLocation(), diag::note_template_parameter_pack_here)
+ << ParamKind << Old->isParameterPack();
+ }
+
+ return false;
+ }
+
+ // For non-type template parameters, check the type of the parameter.
+ if (NonTypeTemplateParmDecl *OldNTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(Old)) {
+ NonTypeTemplateParmDecl *NewNTTP = cast<NonTypeTemplateParmDecl>(New);
+
+ // If we are matching a template template argument to a template
+ // template parameter and one of the non-type template parameter types
+ // is dependent, then we must wait until template instantiation time
+ // to actually compare the arguments.
+ if (Kind == Sema::TPL_TemplateTemplateArgumentMatch &&
+ (OldNTTP->getType()->isDependentType() ||
+ NewNTTP->getType()->isDependentType()))
+ return true;
+
+ if (!S.Context.hasSameType(OldNTTP->getType(), NewNTTP->getType())) {
+ if (Complain) {
+ unsigned NextDiag = diag::err_template_nontype_parm_different_type;
+ if (TemplateArgLoc.isValid()) {
+ S.Diag(TemplateArgLoc,
+ diag::err_template_arg_template_params_mismatch);
+ NextDiag = diag::note_template_nontype_parm_different_type;
+ }
+ S.Diag(NewNTTP->getLocation(), NextDiag)
+ << NewNTTP->getType()
+ << (Kind != Sema::TPL_TemplateMatch);
+ S.Diag(OldNTTP->getLocation(),
+ diag::note_template_nontype_parm_prev_declaration)
+ << OldNTTP->getType();
+ }
+
+ return false;
+ }
+
+ return true;
+ }
+
+ // For template template parameters, check the template parameter types.
+ // The template parameter lists of template template
+ // parameters must agree.
+ if (TemplateTemplateParmDecl *OldTTP
+ = dyn_cast<TemplateTemplateParmDecl>(Old)) {
+ TemplateTemplateParmDecl *NewTTP = cast<TemplateTemplateParmDecl>(New);
+ return S.TemplateParameterListsAreEqual(NewTTP->getTemplateParameters(),
+ OldTTP->getTemplateParameters(),
+ Complain,
+ (Kind == Sema::TPL_TemplateMatch
+ ? Sema::TPL_TemplateTemplateParmMatch
+ : Kind),
+ TemplateArgLoc);
+ }
+
+ return true;
+}
+
+/// \brief Diagnose a known arity mismatch when comparing template argument
+/// lists.
+static
+void DiagnoseTemplateParameterListArityMismatch(Sema &S,
+ TemplateParameterList *New,
+ TemplateParameterList *Old,
+ Sema::TemplateParameterListEqualKind Kind,
+ SourceLocation TemplateArgLoc) {
+ unsigned NextDiag = diag::err_template_param_list_different_arity;
+ if (TemplateArgLoc.isValid()) {
+ S.Diag(TemplateArgLoc, diag::err_template_arg_template_params_mismatch);
+ NextDiag = diag::note_template_param_list_different_arity;
+ }
+ S.Diag(New->getTemplateLoc(), NextDiag)
+ << (New->size() > Old->size())
+ << (Kind != Sema::TPL_TemplateMatch)
+ << SourceRange(New->getTemplateLoc(), New->getRAngleLoc());
+ S.Diag(Old->getTemplateLoc(), diag::note_template_prev_declaration)
+ << (Kind != Sema::TPL_TemplateMatch)
+ << SourceRange(Old->getTemplateLoc(), Old->getRAngleLoc());
+}
+
+/// \brief Determine whether the given template parameter lists are
+/// equivalent.
+///
+/// \param New The new template parameter list, typically written in the
+/// source code as part of a new template declaration.
+///
+/// \param Old The old template parameter list, typically found via
+/// name lookup of the template declared with this template parameter
+/// list.
+///
+/// \param Complain If true, this routine will produce a diagnostic if
+/// the template parameter lists are not equivalent.
+///
+/// \param Kind describes how we are to match the template parameter lists.
+///
+/// \param TemplateArgLoc If this source location is valid, then we
+/// are actually checking the template parameter list of a template
+/// argument (New) against the template parameter list of its
+/// corresponding template template parameter (Old). We produce
+/// slightly different diagnostics in this scenario.
+///
+/// \returns True if the template parameter lists are equal, false
+/// otherwise.
+bool
+Sema::TemplateParameterListsAreEqual(TemplateParameterList *New,
+ TemplateParameterList *Old,
+ bool Complain,
+ TemplateParameterListEqualKind Kind,
+ SourceLocation TemplateArgLoc) {
+ if (Old->size() != New->size() && Kind != TPL_TemplateTemplateArgumentMatch) {
+ if (Complain)
+ DiagnoseTemplateParameterListArityMismatch(*this, New, Old, Kind,
+ TemplateArgLoc);
+
+ return false;
+ }
+
+ // C++0x [temp.arg.template]p3:
+ // A template-argument matches a template template-parameter (call it P)
+ // when each of the template parameters in the template-parameter-list of
+ // the template-argument's corresponding class template or alias template
+ // (call it A) matches the corresponding template parameter in the
+ // template-parameter-list of P. [...]
+ TemplateParameterList::iterator NewParm = New->begin();
+ TemplateParameterList::iterator NewParmEnd = New->end();
+ for (TemplateParameterList::iterator OldParm = Old->begin(),
+ OldParmEnd = Old->end();
+ OldParm != OldParmEnd; ++OldParm) {
+ if (Kind != TPL_TemplateTemplateArgumentMatch ||
+ !(*OldParm)->isTemplateParameterPack()) {
+ if (NewParm == NewParmEnd) {
+ if (Complain)
+ DiagnoseTemplateParameterListArityMismatch(*this, New, Old, Kind,
+ TemplateArgLoc);
+
+ return false;
+ }
+
+ if (!MatchTemplateParameterKind(*this, *NewParm, *OldParm, Complain,
+ Kind, TemplateArgLoc))
+ return false;
+
+ ++NewParm;
+ continue;
+ }
+
+ // C++0x [temp.arg.template]p3:
+ // [...] When P's template- parameter-list contains a template parameter
+ // pack (14.5.3), the template parameter pack will match zero or more
+ // template parameters or template parameter packs in the
+ // template-parameter-list of A with the same type and form as the
+ // template parameter pack in P (ignoring whether those template
+ // parameters are template parameter packs).
+ for (; NewParm != NewParmEnd; ++NewParm) {
+ if (!MatchTemplateParameterKind(*this, *NewParm, *OldParm, Complain,
+ Kind, TemplateArgLoc))
+ return false;
+ }
+ }
+
+ // Make sure we exhausted all of the arguments.
+ if (NewParm != NewParmEnd) {
+ if (Complain)
+ DiagnoseTemplateParameterListArityMismatch(*this, New, Old, Kind,
+ TemplateArgLoc);
+
+ return false;
+ }
+
+ return true;
+}
+
+/// \brief Check whether a template can be declared within this scope.
+///
+/// If the template declaration is valid in this scope, returns
+/// false. Otherwise, issues a diagnostic and returns true.
+bool
+Sema::CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams) {
+ if (!S)
+ return false;
+
+ // Find the nearest enclosing declaration scope.
+ while ((S->getFlags() & Scope::DeclScope) == 0 ||
+ (S->getFlags() & Scope::TemplateParamScope) != 0)
+ S = S->getParent();
+
+ // C++ [temp]p4:
+ // A template [...] shall not have C linkage.
+ DeclContext *Ctx = S->getEntity();
+ if (Ctx && Ctx->isExternCContext())
+ return Diag(TemplateParams->getTemplateLoc(), diag::err_template_linkage)
+ << TemplateParams->getSourceRange();
+
+ while (Ctx && isa<LinkageSpecDecl>(Ctx))
+ Ctx = Ctx->getParent();
+
+ // C++ [temp]p2:
+ // A template-declaration can appear only as a namespace scope or
+ // class scope declaration.
+ if (Ctx) {
+ if (Ctx->isFileContext())
+ return false;
+ if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Ctx)) {
+ // C++ [temp.mem]p2:
+ // A local class shall not have member templates.
+ if (RD->isLocalClass())
+ return Diag(TemplateParams->getTemplateLoc(),
+ diag::err_template_inside_local_class)
+ << TemplateParams->getSourceRange();
+ else
+ return false;
+ }
+ }
+
+ return Diag(TemplateParams->getTemplateLoc(),
+ diag::err_template_outside_namespace_or_class_scope)
+ << TemplateParams->getSourceRange();
+}
+
+/// \brief Determine what kind of template specialization the given declaration
+/// is.
+static TemplateSpecializationKind getTemplateSpecializationKind(Decl *D) {
+ if (!D)
+ return TSK_Undeclared;
+
+ if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(D))
+ return Record->getTemplateSpecializationKind();
+ if (FunctionDecl *Function = dyn_cast<FunctionDecl>(D))
+ return Function->getTemplateSpecializationKind();
+ if (VarDecl *Var = dyn_cast<VarDecl>(D))
+ return Var->getTemplateSpecializationKind();
+
+ return TSK_Undeclared;
+}
+
+/// \brief Check whether a specialization is well-formed in the current
+/// context.
+///
+/// This routine determines whether a template specialization can be declared
+/// in the current context (C++ [temp.expl.spec]p2).
+///
+/// \param S the semantic analysis object for which this check is being
+/// performed.
+///
+/// \param Specialized the entity being specialized or instantiated, which
+/// may be a kind of template (class template, function template, etc.) or
+/// a member of a class template (member function, static data member,
+/// member class).
+///
+/// \param PrevDecl the previous declaration of this entity, if any.
+///
+/// \param Loc the location of the explicit specialization or instantiation of
+/// this entity.
+///
+/// \param IsPartialSpecialization whether this is a partial specialization of
+/// a class template.
+///
+/// \returns true if there was an error that we cannot recover from, false
+/// otherwise.
+static bool CheckTemplateSpecializationScope(Sema &S,
+ NamedDecl *Specialized,
+ NamedDecl *PrevDecl,
+ SourceLocation Loc,
+ bool IsPartialSpecialization) {
+ // Keep these "kind" numbers in sync with the %select statements in the
+ // various diagnostics emitted by this routine.
+ int EntityKind = 0;
+ if (isa<ClassTemplateDecl>(Specialized))
+ EntityKind = IsPartialSpecialization? 1 : 0;
+ else if (isa<VarTemplateDecl>(Specialized))
+ EntityKind = IsPartialSpecialization ? 3 : 2;
+ else if (isa<FunctionTemplateDecl>(Specialized))
+ EntityKind = 4;
+ else if (isa<CXXMethodDecl>(Specialized))
+ EntityKind = 5;
+ else if (isa<VarDecl>(Specialized))
+ EntityKind = 6;
+ else if (isa<RecordDecl>(Specialized))
+ EntityKind = 7;
+ else if (isa<EnumDecl>(Specialized) && S.getLangOpts().CPlusPlus11)
+ EntityKind = 8;
+ else {
+ S.Diag(Loc, diag::err_template_spec_unknown_kind)
+ << S.getLangOpts().CPlusPlus11;
+ S.Diag(Specialized->getLocation(), diag::note_specialized_entity);
+ return true;
+ }
+
+ // C++ [temp.expl.spec]p2:
+ // An explicit specialization shall be declared in the namespace
+ // of which the template is a member, or, for member templates, in
+ // the namespace of which the enclosing class or enclosing class
+ // template is a member. An explicit specialization of a member
+ // function, member class or static data member of a class
+ // template shall be declared in the namespace of which the class
+ // template is a member. Such a declaration may also be a
+ // definition. If the declaration is not a definition, the
+ // specialization may be defined later in the name- space in which
+ // the explicit specialization was declared, or in a namespace
+ // that encloses the one in which the explicit specialization was
+ // declared.
+ if (S.CurContext->getRedeclContext()->isFunctionOrMethod()) {
+ S.Diag(Loc, diag::err_template_spec_decl_function_scope)
+ << Specialized;
+ return true;
+ }
+
+ if (S.CurContext->isRecord() && !IsPartialSpecialization) {
+ if (S.getLangOpts().MicrosoftExt) {
+ // Do not warn for class scope explicit specialization during
+ // instantiation, warning was already emitted during pattern
+ // semantic analysis.
+ if (!S.ActiveTemplateInstantiations.size())
+ S.Diag(Loc, diag::ext_function_specialization_in_class)
+ << Specialized;
+ } else {
+ S.Diag(Loc, diag::err_template_spec_decl_class_scope)
+ << Specialized;
+ return true;
+ }
+ }
+
+ if (S.CurContext->isRecord() &&
+ !S.CurContext->Equals(Specialized->getDeclContext())) {
+ // Make sure that we're specializing in the right record context.
+ // Otherwise, things can go horribly wrong.
+ S.Diag(Loc, diag::err_template_spec_decl_class_scope)
+ << Specialized;
+ return true;
+ }
+
+ // C++ [temp.class.spec]p6:
+ // A class template partial specialization may be declared or redeclared
+ // in any namespace scope in which its definition may be defined (14.5.1
+ // and 14.5.2).
+ DeclContext *SpecializedContext
+ = Specialized->getDeclContext()->getEnclosingNamespaceContext();
+ DeclContext *DC = S.CurContext->getEnclosingNamespaceContext();
+
+ // Make sure that this redeclaration (or definition) occurs in an enclosing
+ // namespace.
+ // Note that HandleDeclarator() performs this check for explicit
+ // specializations of function templates, static data members, and member
+ // functions, so we skip the check here for those kinds of entities.
+ // FIXME: HandleDeclarator's diagnostics aren't quite as good, though.
+ // Should we refactor that check, so that it occurs later?
+ if (!DC->Encloses(SpecializedContext) &&
+ !(isa<FunctionTemplateDecl>(Specialized) ||
+ isa<FunctionDecl>(Specialized) ||
+ isa<VarTemplateDecl>(Specialized) ||
+ isa<VarDecl>(Specialized))) {
+ if (isa<TranslationUnitDecl>(SpecializedContext))
+ S.Diag(Loc, diag::err_template_spec_redecl_global_scope)
+ << EntityKind << Specialized;
+ else if (isa<NamespaceDecl>(SpecializedContext)) {
+ int Diag = diag::err_template_spec_redecl_out_of_scope;
+ if (S.getLangOpts().MicrosoftExt)
+ Diag = diag::ext_ms_template_spec_redecl_out_of_scope;
+ S.Diag(Loc, Diag) << EntityKind << Specialized
+ << cast<NamedDecl>(SpecializedContext);
+ } else
+ llvm_unreachable("unexpected namespace context for specialization");
+
+ S.Diag(Specialized->getLocation(), diag::note_specialized_entity);
+ } else if ((!PrevDecl ||
+ getTemplateSpecializationKind(PrevDecl) == TSK_Undeclared ||
+ getTemplateSpecializationKind(PrevDecl) ==
+ TSK_ImplicitInstantiation)) {
+ // C++ [temp.exp.spec]p2:
+ // An explicit specialization shall be declared in the namespace of which
+ // the template is a member, or, for member templates, in the namespace
+ // of which the enclosing class or enclosing class template is a member.
+ // An explicit specialization of a member function, member class or
+ // static data member of a class template shall be declared in the
+ // namespace of which the class template is a member.
+ //
+ // C++11 [temp.expl.spec]p2:
+ // An explicit specialization shall be declared in a namespace enclosing
+ // the specialized template.
+ // C++11 [temp.explicit]p3:
+ // An explicit instantiation shall appear in an enclosing namespace of its
+ // template.
+ if (!DC->InEnclosingNamespaceSetOf(SpecializedContext)) {
+ bool IsCPlusPlus11Extension = DC->Encloses(SpecializedContext);
+ if (isa<TranslationUnitDecl>(SpecializedContext)) {
+ assert(!IsCPlusPlus11Extension &&
+ "DC encloses TU but isn't in enclosing namespace set");
+ S.Diag(Loc, diag::err_template_spec_decl_out_of_scope_global)
+ << EntityKind << Specialized;
+ } else if (isa<NamespaceDecl>(SpecializedContext)) {
+ int Diag;
+ if (!IsCPlusPlus11Extension)
+ Diag = diag::err_template_spec_decl_out_of_scope;
+ else if (!S.getLangOpts().CPlusPlus11)
+ Diag = diag::ext_template_spec_decl_out_of_scope;
+ else
+ Diag = diag::warn_cxx98_compat_template_spec_decl_out_of_scope;
+ S.Diag(Loc, Diag)
+ << EntityKind << Specialized << cast<NamedDecl>(SpecializedContext);
+ }
+
+ S.Diag(Specialized->getLocation(), diag::note_specialized_entity);
+ }
+ }
+
+ return false;
+}
+
+static SourceRange findTemplateParameter(unsigned Depth, Expr *E) {
+ if (!E->isInstantiationDependent())
+ return SourceLocation();
+ DependencyChecker Checker(Depth);
+ Checker.TraverseStmt(E);
+ if (Checker.Match && Checker.MatchLoc.isInvalid())
+ return E->getSourceRange();
+ return Checker.MatchLoc;
+}
+
+static SourceRange findTemplateParameter(unsigned Depth, TypeLoc TL) {
+ if (!TL.getType()->isDependentType())
+ return SourceLocation();
+ DependencyChecker Checker(Depth);
+ Checker.TraverseTypeLoc(TL);
+ if (Checker.Match && Checker.MatchLoc.isInvalid())
+ return TL.getSourceRange();
+ return Checker.MatchLoc;
+}
+
+/// \brief Subroutine of Sema::CheckTemplatePartialSpecializationArgs
+/// that checks non-type template partial specialization arguments.
+static bool CheckNonTypeTemplatePartialSpecializationArgs(
+ Sema &S, SourceLocation TemplateNameLoc, NonTypeTemplateParmDecl *Param,
+ const TemplateArgument *Args, unsigned NumArgs, bool IsDefaultArgument) {
+ for (unsigned I = 0; I != NumArgs; ++I) {
+ if (Args[I].getKind() == TemplateArgument::Pack) {
+ if (CheckNonTypeTemplatePartialSpecializationArgs(
+ S, TemplateNameLoc, Param, Args[I].pack_begin(),
+ Args[I].pack_size(), IsDefaultArgument))
+ return true;
+
+ continue;
+ }
+
+ if (Args[I].getKind() != TemplateArgument::Expression)
+ continue;
+
+ Expr *ArgExpr = Args[I].getAsExpr();
+
+ // We can have a pack expansion of any of the bullets below.
+ if (PackExpansionExpr *Expansion = dyn_cast<PackExpansionExpr>(ArgExpr))
+ ArgExpr = Expansion->getPattern();
+
+ // Strip off any implicit casts we added as part of type checking.
+ while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgExpr))
+ ArgExpr = ICE->getSubExpr();
+
+ // C++ [temp.class.spec]p8:
+ // A non-type argument is non-specialized if it is the name of a
+ // non-type parameter. All other non-type arguments are
+ // specialized.
+ //
+ // Below, we check the two conditions that only apply to
+ // specialized non-type arguments, so skip any non-specialized
+ // arguments.
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ArgExpr))
+ if (isa<NonTypeTemplateParmDecl>(DRE->getDecl()))
+ continue;
+
+ // C++ [temp.class.spec]p9:
+ // Within the argument list of a class template partial
+ // specialization, the following restrictions apply:
+ // -- A partially specialized non-type argument expression
+ // shall not involve a template parameter of the partial
+ // specialization except when the argument expression is a
+ // simple identifier.
+ SourceRange ParamUseRange =
+ findTemplateParameter(Param->getDepth(), ArgExpr);
+ if (ParamUseRange.isValid()) {
+ if (IsDefaultArgument) {
+ S.Diag(TemplateNameLoc,
+ diag::err_dependent_non_type_arg_in_partial_spec);
+ S.Diag(ParamUseRange.getBegin(),
+ diag::note_dependent_non_type_default_arg_in_partial_spec)
+ << ParamUseRange;
+ } else {
+ S.Diag(ParamUseRange.getBegin(),
+ diag::err_dependent_non_type_arg_in_partial_spec)
+ << ParamUseRange;
+ }
+ return true;
+ }
+
+ // -- The type of a template parameter corresponding to a
+ // specialized non-type argument shall not be dependent on a
+ // parameter of the specialization.
+ //
+ // FIXME: We need to delay this check until instantiation in some cases:
+ //
+ // template<template<typename> class X> struct A {
+ // template<typename T, X<T> N> struct B;
+ // template<typename T> struct B<T, 0>;
+ // };
+ // template<typename> using X = int;
+ // A<X>::B<int, 0> b;
+ ParamUseRange = findTemplateParameter(
+ Param->getDepth(), Param->getTypeSourceInfo()->getTypeLoc());
+ if (ParamUseRange.isValid()) {
+ S.Diag(IsDefaultArgument ? TemplateNameLoc : ArgExpr->getLocStart(),
+ diag::err_dependent_typed_non_type_arg_in_partial_spec)
+ << Param->getType() << ParamUseRange;
+ S.Diag(Param->getLocation(), diag::note_template_param_here)
+ << (IsDefaultArgument ? ParamUseRange : SourceRange());
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/// \brief Check the non-type template arguments of a class template
+/// partial specialization according to C++ [temp.class.spec]p9.
+///
+/// \param TemplateNameLoc the location of the template name.
+/// \param TemplateParams the template parameters of the primary class
+/// template.
+/// \param NumExplicit the number of explicitly-specified template arguments.
+/// \param TemplateArgs the template arguments of the class template
+/// partial specialization.
+///
+/// \returns \c true if there was an error, \c false otherwise.
+static bool CheckTemplatePartialSpecializationArgs(
+ Sema &S, SourceLocation TemplateNameLoc,
+ TemplateParameterList *TemplateParams, unsigned NumExplicit,
+ SmallVectorImpl<TemplateArgument> &TemplateArgs) {
+ const TemplateArgument *ArgList = TemplateArgs.data();
+
+ for (unsigned I = 0, N = TemplateParams->size(); I != N; ++I) {
+ NonTypeTemplateParmDecl *Param
+ = dyn_cast<NonTypeTemplateParmDecl>(TemplateParams->getParam(I));
+ if (!Param)
+ continue;
+
+ if (CheckNonTypeTemplatePartialSpecializationArgs(
+ S, TemplateNameLoc, Param, &ArgList[I], 1, I >= NumExplicit))
+ return true;
+ }
+
+ return false;
+}
+
+DeclResult
+Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
+ TagUseKind TUK,
+ SourceLocation KWLoc,
+ SourceLocation ModulePrivateLoc,
+ TemplateIdAnnotation &TemplateId,
+ AttributeList *Attr,
+ MultiTemplateParamsArg
+ TemplateParameterLists,
+ SkipBodyInfo *SkipBody) {
+ assert(TUK != TUK_Reference && "References are not specializations");
+
+ CXXScopeSpec &SS = TemplateId.SS;
+
+ // NOTE: KWLoc is the location of the tag keyword. This will instead
+ // store the location of the outermost template keyword in the declaration.
+ SourceLocation TemplateKWLoc = TemplateParameterLists.size() > 0
+ ? TemplateParameterLists[0]->getTemplateLoc() : KWLoc;
+ SourceLocation TemplateNameLoc = TemplateId.TemplateNameLoc;
+ SourceLocation LAngleLoc = TemplateId.LAngleLoc;
+ SourceLocation RAngleLoc = TemplateId.RAngleLoc;
+
+ // Find the class template we're specializing
+ TemplateName Name = TemplateId.Template.get();
+ ClassTemplateDecl *ClassTemplate
+ = dyn_cast_or_null<ClassTemplateDecl>(Name.getAsTemplateDecl());
+
+ if (!ClassTemplate) {
+ Diag(TemplateNameLoc, diag::err_not_class_template_specialization)
+ << (Name.getAsTemplateDecl() &&
+ isa<TemplateTemplateParmDecl>(Name.getAsTemplateDecl()));
+ return true;
+ }
+
+ bool isExplicitSpecialization = false;
+ bool isPartialSpecialization = false;
+
+ // Check the validity of the template headers that introduce this
+ // template.
+ // FIXME: We probably shouldn't complain about these headers for
+ // friend declarations.
+ bool Invalid = false;
+ TemplateParameterList *TemplateParams =
+ MatchTemplateParametersToScopeSpecifier(
+ KWLoc, TemplateNameLoc, SS, &TemplateId,
+ TemplateParameterLists, TUK == TUK_Friend, isExplicitSpecialization,
+ Invalid);
+ if (Invalid)
+ return true;
+
+ if (TemplateParams && TemplateParams->size() > 0) {
+ isPartialSpecialization = true;
+
+ if (TUK == TUK_Friend) {
+ Diag(KWLoc, diag::err_partial_specialization_friend)
+ << SourceRange(LAngleLoc, RAngleLoc);
+ return true;
+ }
+
+ // C++ [temp.class.spec]p10:
+ // The template parameter list of a specialization shall not
+ // contain default template argument values.
+ for (unsigned I = 0, N = TemplateParams->size(); I != N; ++I) {
+ Decl *Param = TemplateParams->getParam(I);
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
+ if (TTP->hasDefaultArgument()) {
+ Diag(TTP->getDefaultArgumentLoc(),
+ diag::err_default_arg_in_partial_spec);
+ TTP->removeDefaultArgument();
+ }
+ } else if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
+ if (Expr *DefArg = NTTP->getDefaultArgument()) {
+ Diag(NTTP->getDefaultArgumentLoc(),
+ diag::err_default_arg_in_partial_spec)
+ << DefArg->getSourceRange();
+ NTTP->removeDefaultArgument();
+ }
+ } else {
+ TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(Param);
+ if (TTP->hasDefaultArgument()) {
+ Diag(TTP->getDefaultArgument().getLocation(),
+ diag::err_default_arg_in_partial_spec)
+ << TTP->getDefaultArgument().getSourceRange();
+ TTP->removeDefaultArgument();
+ }
+ }
+ }
+ } else if (TemplateParams) {
+ if (TUK == TUK_Friend)
+ Diag(KWLoc, diag::err_template_spec_friend)
+ << FixItHint::CreateRemoval(
+ SourceRange(TemplateParams->getTemplateLoc(),
+ TemplateParams->getRAngleLoc()))
+ << SourceRange(LAngleLoc, RAngleLoc);
+ else
+ isExplicitSpecialization = true;
+ } else {
+ assert(TUK == TUK_Friend && "should have a 'template<>' for this decl");
+ }
+
+ // Check that the specialization uses the same tag kind as the
+ // original template.
+ TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
+ assert(Kind != TTK_Enum && "Invalid enum tag in class template spec!");
+ if (!isAcceptableTagRedeclaration(ClassTemplate->getTemplatedDecl(),
+ Kind, TUK == TUK_Definition, KWLoc,
+ ClassTemplate->getIdentifier())) {
+ Diag(KWLoc, diag::err_use_with_wrong_tag)
+ << ClassTemplate
+ << FixItHint::CreateReplacement(KWLoc,
+ ClassTemplate->getTemplatedDecl()->getKindName());
+ Diag(ClassTemplate->getTemplatedDecl()->getLocation(),
+ diag::note_previous_use);
+ Kind = ClassTemplate->getTemplatedDecl()->getTagKind();
+ }
+
+ // Translate the parser's template argument list in our AST format.
+ TemplateArgumentListInfo TemplateArgs =
+ makeTemplateArgumentListInfo(*this, TemplateId);
+
+ // Check for unexpanded parameter packs in any of the template arguments.
+ for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
+ if (DiagnoseUnexpandedParameterPack(TemplateArgs[I],
+ UPPC_PartialSpecialization))
+ return true;
+
+ // Check that the template argument list is well-formed for this
+ // template.
+ SmallVector<TemplateArgument, 4> Converted;
+ if (CheckTemplateArgumentList(ClassTemplate, TemplateNameLoc,
+ TemplateArgs, false, Converted))
+ return true;
+
+ // Find the class template (partial) specialization declaration that
+ // corresponds to these arguments.
+ if (isPartialSpecialization) {
+ if (CheckTemplatePartialSpecializationArgs(
+ *this, TemplateNameLoc, ClassTemplate->getTemplateParameters(),
+ TemplateArgs.size(), Converted))
+ return true;
+
+ bool InstantiationDependent;
+ if (!Name.isDependent() &&
+ !TemplateSpecializationType::anyDependentTemplateArguments(
+ TemplateArgs.getArgumentArray(),
+ TemplateArgs.size(),
+ InstantiationDependent)) {
+ Diag(TemplateNameLoc, diag::err_partial_spec_fully_specialized)
+ << ClassTemplate->getDeclName();
+ isPartialSpecialization = false;
+ }
+ }
+
+ void *InsertPos = nullptr;
+ ClassTemplateSpecializationDecl *PrevDecl = nullptr;
+
+ if (isPartialSpecialization)
+ // FIXME: Template parameter list matters, too
+ PrevDecl = ClassTemplate->findPartialSpecialization(Converted, InsertPos);
+ else
+ PrevDecl = ClassTemplate->findSpecialization(Converted, InsertPos);
+
+ ClassTemplateSpecializationDecl *Specialization = nullptr;
+
+ // Check whether we can declare a class template specialization in
+ // the current scope.
+ if (TUK != TUK_Friend &&
+ CheckTemplateSpecializationScope(*this, ClassTemplate, PrevDecl,
+ TemplateNameLoc,
+ isPartialSpecialization))
+ return true;
+
+ // The canonical type
+ QualType CanonType;
+ if (isPartialSpecialization) {
+ // Build the canonical type that describes the converted template
+ // arguments of the class template partial specialization.
+ TemplateName CanonTemplate = Context.getCanonicalTemplateName(Name);
+ CanonType = Context.getTemplateSpecializationType(CanonTemplate,
+ Converted.data(),
+ Converted.size());
+
+ if (Context.hasSameType(CanonType,
+ ClassTemplate->getInjectedClassNameSpecialization())) {
+ // C++ [temp.class.spec]p9b3:
+ //
+ // -- The argument list of the specialization shall not be identical
+ // to the implicit argument list of the primary template.
+ Diag(TemplateNameLoc, diag::err_partial_spec_args_match_primary_template)
+ << /*class template*/0 << (TUK == TUK_Definition)
+ << FixItHint::CreateRemoval(SourceRange(LAngleLoc, RAngleLoc));
+ return CheckClassTemplate(S, TagSpec, TUK, KWLoc, SS,
+ ClassTemplate->getIdentifier(),
+ TemplateNameLoc,
+ Attr,
+ TemplateParams,
+ AS_none, /*ModulePrivateLoc=*/SourceLocation(),
+ /*FriendLoc*/SourceLocation(),
+ TemplateParameterLists.size() - 1,
+ TemplateParameterLists.data());
+ }
+
+ // Create a new class template partial specialization declaration node.
+ ClassTemplatePartialSpecializationDecl *PrevPartial
+ = cast_or_null<ClassTemplatePartialSpecializationDecl>(PrevDecl);
+ ClassTemplatePartialSpecializationDecl *Partial
+ = ClassTemplatePartialSpecializationDecl::Create(Context, Kind,
+ ClassTemplate->getDeclContext(),
+ KWLoc, TemplateNameLoc,
+ TemplateParams,
+ ClassTemplate,
+ Converted.data(),
+ Converted.size(),
+ TemplateArgs,
+ CanonType,
+ PrevPartial);
+ SetNestedNameSpecifier(Partial, SS);
+ if (TemplateParameterLists.size() > 1 && SS.isSet()) {
+ Partial->setTemplateParameterListsInfo(
+ Context, TemplateParameterLists.drop_back(1));
+ }
+
+ if (!PrevPartial)
+ ClassTemplate->AddPartialSpecialization(Partial, InsertPos);
+ Specialization = Partial;
+
+ // If we are providing an explicit specialization of a member class
+ // template specialization, make a note of that.
+ if (PrevPartial && PrevPartial->getInstantiatedFromMember())
+ PrevPartial->setMemberSpecialization();
+
+ // Check that all of the template parameters of the class template
+ // partial specialization are deducible from the template
+ // arguments. If not, this class template partial specialization
+ // will never be used.
+ llvm::SmallBitVector DeducibleParams(TemplateParams->size());
+ MarkUsedTemplateParameters(Partial->getTemplateArgs(), true,
+ TemplateParams->getDepth(),
+ DeducibleParams);
+
+ if (!DeducibleParams.all()) {
+ unsigned NumNonDeducible = DeducibleParams.size()-DeducibleParams.count();
+ Diag(TemplateNameLoc, diag::warn_partial_specs_not_deducible)
+ << /*class template*/0 << (NumNonDeducible > 1)
+ << SourceRange(TemplateNameLoc, RAngleLoc);
+ for (unsigned I = 0, N = DeducibleParams.size(); I != N; ++I) {
+ if (!DeducibleParams[I]) {
+ NamedDecl *Param = cast<NamedDecl>(TemplateParams->getParam(I));
+ if (Param->getDeclName())
+ Diag(Param->getLocation(),
+ diag::note_partial_spec_unused_parameter)
+ << Param->getDeclName();
+ else
+ Diag(Param->getLocation(),
+ diag::note_partial_spec_unused_parameter)
+ << "(anonymous)";
+ }
+ }
+ }
+ } else {
+ // Create a new class template specialization declaration node for
+ // this explicit specialization or friend declaration.
+ Specialization
+ = ClassTemplateSpecializationDecl::Create(Context, Kind,
+ ClassTemplate->getDeclContext(),
+ KWLoc, TemplateNameLoc,
+ ClassTemplate,
+ Converted.data(),
+ Converted.size(),
+ PrevDecl);
+ SetNestedNameSpecifier(Specialization, SS);
+ if (TemplateParameterLists.size() > 0) {
+ Specialization->setTemplateParameterListsInfo(Context,
+ TemplateParameterLists);
+ }
+
+ if (!PrevDecl)
+ ClassTemplate->AddSpecialization(Specialization, InsertPos);
+
+ if (CurContext->isDependentContext()) {
+ // -fms-extensions permits specialization of nested classes without
+ // fully specializing the outer class(es).
+ assert(getLangOpts().MicrosoftExt &&
+ "Only possible with -fms-extensions!");
+ TemplateName CanonTemplate = Context.getCanonicalTemplateName(Name);
+ CanonType = Context.getTemplateSpecializationType(
+ CanonTemplate, Converted.data(), Converted.size());
+ } else {
+ CanonType = Context.getTypeDeclType(Specialization);
+ }
+ }
+
+ // C++ [temp.expl.spec]p6:
+ // If a template, a member template or the member of a class template is
+ // explicitly specialized then that specialization shall be declared
+ // before the first use of that specialization that would cause an implicit
+ // instantiation to take place, in every translation unit in which such a
+ // use occurs; no diagnostic is required.
+ if (PrevDecl && PrevDecl->getPointOfInstantiation().isValid()) {
+ bool Okay = false;
+ for (Decl *Prev = PrevDecl; Prev; Prev = Prev->getPreviousDecl()) {
+ // Is there any previous explicit specialization declaration?
+ if (getTemplateSpecializationKind(Prev) == TSK_ExplicitSpecialization) {
+ Okay = true;
+ break;
+ }
+ }
+
+ if (!Okay) {
+ SourceRange Range(TemplateNameLoc, RAngleLoc);
+ Diag(TemplateNameLoc, diag::err_specialization_after_instantiation)
+ << Context.getTypeDeclType(Specialization) << Range;
+
+ Diag(PrevDecl->getPointOfInstantiation(),
+ diag::note_instantiation_required_here)
+ << (PrevDecl->getTemplateSpecializationKind()
+ != TSK_ImplicitInstantiation);
+ return true;
+ }
+ }
+
+ // If this is not a friend, note that this is an explicit specialization.
+ if (TUK != TUK_Friend)
+ Specialization->setSpecializationKind(TSK_ExplicitSpecialization);
+
+ // Check that this isn't a redefinition of this specialization.
+ if (TUK == TUK_Definition) {
+ RecordDecl *Def = Specialization->getDefinition();
+ NamedDecl *Hidden = nullptr;
+ if (Def && SkipBody && !hasVisibleDefinition(Def, &Hidden)) {
+ SkipBody->ShouldSkip = true;
+ makeMergedDefinitionVisible(Hidden, KWLoc);
+ // From here on out, treat this as just a redeclaration.
+ TUK = TUK_Declaration;
+ } else if (Def) {
+ SourceRange Range(TemplateNameLoc, RAngleLoc);
+ Diag(TemplateNameLoc, diag::err_redefinition)
+ << Context.getTypeDeclType(Specialization) << Range;
+ Diag(Def->getLocation(), diag::note_previous_definition);
+ Specialization->setInvalidDecl();
+ return true;
+ }
+ }
+
+ if (Attr)
+ ProcessDeclAttributeList(S, Specialization, Attr);
+
+ // Add alignment attributes if necessary; these attributes are checked when
+ // the ASTContext lays out the structure.
+ if (TUK == TUK_Definition) {
+ AddAlignmentAttributesForRecord(Specialization);
+ AddMsStructLayoutForRecord(Specialization);
+ }
+
+ if (ModulePrivateLoc.isValid())
+ Diag(Specialization->getLocation(), diag::err_module_private_specialization)
+ << (isPartialSpecialization? 1 : 0)
+ << FixItHint::CreateRemoval(ModulePrivateLoc);
+
+ // Build the fully-sugared type for this class template
+ // specialization as the user wrote in the specialization
+ // itself. This means that we'll pretty-print the type retrieved
+ // from the specialization's declaration the way that the user
+ // actually wrote the specialization, rather than formatting the
+ // name based on the "canonical" representation used to store the
+ // template arguments in the specialization.
+ TypeSourceInfo *WrittenTy
+ = Context.getTemplateSpecializationTypeInfo(Name, TemplateNameLoc,
+ TemplateArgs, CanonType);
+ if (TUK != TUK_Friend) {
+ Specialization->setTypeAsWritten(WrittenTy);
+ Specialization->setTemplateKeywordLoc(TemplateKWLoc);
+ }
+
+ // C++ [temp.expl.spec]p9:
+ // A template explicit specialization is in the scope of the
+ // namespace in which the template was defined.
+ //
+ // We actually implement this paragraph where we set the semantic
+ // context (in the creation of the ClassTemplateSpecializationDecl),
+ // but we also maintain the lexical context where the actual
+ // definition occurs.
+ Specialization->setLexicalDeclContext(CurContext);
+
+ // We may be starting the definition of this specialization.
+ if (TUK == TUK_Definition)
+ Specialization->startDefinition();
+
+ if (TUK == TUK_Friend) {
+ FriendDecl *Friend = FriendDecl::Create(Context, CurContext,
+ TemplateNameLoc,
+ WrittenTy,
+ /*FIXME:*/KWLoc);
+ Friend->setAccess(AS_public);
+ CurContext->addDecl(Friend);
+ } else {
+ // Add the specialization into its lexical context, so that it can
+ // be seen when iterating through the list of declarations in that
+ // context. However, specializations are not found by name lookup.
+ CurContext->addDecl(Specialization);
+ }
+ return Specialization;
+}
+
+Decl *Sema::ActOnTemplateDeclarator(Scope *S,
+ MultiTemplateParamsArg TemplateParameterLists,
+ Declarator &D) {
+ Decl *NewDecl = HandleDeclarator(S, D, TemplateParameterLists);
+ ActOnDocumentableDecl(NewDecl);
+ return NewDecl;
+}
+
+/// \brief Strips various properties off an implicit instantiation
+/// that has just been explicitly specialized.
+static void StripImplicitInstantiation(NamedDecl *D) {
+ D->dropAttr<DLLImportAttr>();
+ D->dropAttr<DLLExportAttr>();
+
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ FD->setInlineSpecified(false);
+}
+
+/// \brief Compute the diagnostic location for an explicit instantiation
+// declaration or definition.
+static SourceLocation DiagLocForExplicitInstantiation(
+ NamedDecl* D, SourceLocation PointOfInstantiation) {
+ // Explicit instantiations following a specialization have no effect and
+ // hence no PointOfInstantiation. In that case, walk decl backwards
+ // until a valid name loc is found.
+ SourceLocation PrevDiagLoc = PointOfInstantiation;
+ for (Decl *Prev = D; Prev && !PrevDiagLoc.isValid();
+ Prev = Prev->getPreviousDecl()) {
+ PrevDiagLoc = Prev->getLocation();
+ }
+ assert(PrevDiagLoc.isValid() &&
+ "Explicit instantiation without point of instantiation?");
+ return PrevDiagLoc;
+}
+
+/// \brief Diagnose cases where we have an explicit template specialization
+/// before/after an explicit template instantiation, producing diagnostics
+/// for those cases where they are required and determining whether the
+/// new specialization/instantiation will have any effect.
+///
+/// \param NewLoc the location of the new explicit specialization or
+/// instantiation.
+///
+/// \param NewTSK the kind of the new explicit specialization or instantiation.
+///
+/// \param PrevDecl the previous declaration of the entity.
+///
+/// \param PrevTSK the kind of the old explicit specialization or instantiatin.
+///
+/// \param PrevPointOfInstantiation if valid, indicates where the previus
+/// declaration was instantiated (either implicitly or explicitly).
+///
+/// \param HasNoEffect will be set to true to indicate that the new
+/// specialization or instantiation has no effect and should be ignored.
+///
+/// \returns true if there was an error that should prevent the introduction of
+/// the new declaration into the AST, false otherwise.
+bool
+Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
+ TemplateSpecializationKind NewTSK,
+ NamedDecl *PrevDecl,
+ TemplateSpecializationKind PrevTSK,
+ SourceLocation PrevPointOfInstantiation,
+ bool &HasNoEffect) {
+ HasNoEffect = false;
+
+ switch (NewTSK) {
+ case TSK_Undeclared:
+ case TSK_ImplicitInstantiation:
+ assert(
+ (PrevTSK == TSK_Undeclared || PrevTSK == TSK_ImplicitInstantiation) &&
+ "previous declaration must be implicit!");
+ return false;
+
+ case TSK_ExplicitSpecialization:
+ switch (PrevTSK) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ // Okay, we're just specializing something that is either already
+ // explicitly specialized or has merely been mentioned without any
+ // instantiation.
+ return false;
+
+ case TSK_ImplicitInstantiation:
+ if (PrevPointOfInstantiation.isInvalid()) {
+ // The declaration itself has not actually been instantiated, so it is
+ // still okay to specialize it.
+ StripImplicitInstantiation(PrevDecl);
+ return false;
+ }
+ // Fall through
+
+ case TSK_ExplicitInstantiationDeclaration:
+ case TSK_ExplicitInstantiationDefinition:
+ assert((PrevTSK == TSK_ImplicitInstantiation ||
+ PrevPointOfInstantiation.isValid()) &&
+ "Explicit instantiation without point of instantiation?");
+
+ // C++ [temp.expl.spec]p6:
+ // If a template, a member template or the member of a class template
+ // is explicitly specialized then that specialization shall be declared
+ // before the first use of that specialization that would cause an
+ // implicit instantiation to take place, in every translation unit in
+ // which such a use occurs; no diagnostic is required.
+ for (Decl *Prev = PrevDecl; Prev; Prev = Prev->getPreviousDecl()) {
+ // Is there any previous explicit specialization declaration?
+ if (getTemplateSpecializationKind(Prev) == TSK_ExplicitSpecialization)
+ return false;
+ }
+
+ Diag(NewLoc, diag::err_specialization_after_instantiation)
+ << PrevDecl;
+ Diag(PrevPointOfInstantiation, diag::note_instantiation_required_here)
+ << (PrevTSK != TSK_ImplicitInstantiation);
+
+ return true;
+ }
+
+ case TSK_ExplicitInstantiationDeclaration:
+ switch (PrevTSK) {
+ case TSK_ExplicitInstantiationDeclaration:
+ // This explicit instantiation declaration is redundant (that's okay).
+ HasNoEffect = true;
+ return false;
+
+ case TSK_Undeclared:
+ case TSK_ImplicitInstantiation:
+ // We're explicitly instantiating something that may have already been
+ // implicitly instantiated; that's fine.
+ return false;
+
+ case TSK_ExplicitSpecialization:
+ // C++0x [temp.explicit]p4:
+ // For a given set of template parameters, if an explicit instantiation
+ // of a template appears after a declaration of an explicit
+ // specialization for that template, the explicit instantiation has no
+ // effect.
+ HasNoEffect = true;
+ return false;
+
+ case TSK_ExplicitInstantiationDefinition:
+ // C++0x [temp.explicit]p10:
+ // If an entity is the subject of both an explicit instantiation
+ // declaration and an explicit instantiation definition in the same
+ // translation unit, the definition shall follow the declaration.
+ Diag(NewLoc,
+ diag::err_explicit_instantiation_declaration_after_definition);
+
+ // Explicit instantiations following a specialization have no effect and
+ // hence no PrevPointOfInstantiation. In that case, walk decl backwards
+ // until a valid name loc is found.
+ Diag(DiagLocForExplicitInstantiation(PrevDecl, PrevPointOfInstantiation),
+ diag::note_explicit_instantiation_definition_here);
+ HasNoEffect = true;
+ return false;
+ }
+
+ case TSK_ExplicitInstantiationDefinition:
+ switch (PrevTSK) {
+ case TSK_Undeclared:
+ case TSK_ImplicitInstantiation:
+ // We're explicitly instantiating something that may have already been
+ // implicitly instantiated; that's fine.
+ return false;
+
+ case TSK_ExplicitSpecialization:
+ // C++ DR 259, C++0x [temp.explicit]p4:
+ // For a given set of template parameters, if an explicit
+ // instantiation of a template appears after a declaration of
+ // an explicit specialization for that template, the explicit
+ // instantiation has no effect.
+ //
+ // In C++98/03 mode, we only give an extension warning here, because it
+ // is not harmful to try to explicitly instantiate something that
+ // has been explicitly specialized.
+ Diag(NewLoc, getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_explicit_instantiation_after_specialization :
+ diag::ext_explicit_instantiation_after_specialization)
+ << PrevDecl;
+ Diag(PrevDecl->getLocation(),
+ diag::note_previous_template_specialization);
+ HasNoEffect = true;
+ return false;
+
+ case TSK_ExplicitInstantiationDeclaration:
+ // We're explicity instantiating a definition for something for which we
+ // were previously asked to suppress instantiations. That's fine.
+
+ // C++0x [temp.explicit]p4:
+ // For a given set of template parameters, if an explicit instantiation
+ // of a template appears after a declaration of an explicit
+ // specialization for that template, the explicit instantiation has no
+ // effect.
+ for (Decl *Prev = PrevDecl; Prev; Prev = Prev->getPreviousDecl()) {
+ // Is there any previous explicit specialization declaration?
+ if (getTemplateSpecializationKind(Prev) == TSK_ExplicitSpecialization) {
+ HasNoEffect = true;
+ break;
+ }
+ }
+
+ return false;
+
+ case TSK_ExplicitInstantiationDefinition:
+ // C++0x [temp.spec]p5:
+ // For a given template and a given set of template-arguments,
+ // - an explicit instantiation definition shall appear at most once
+ // in a program,
+
+ // MSVCCompat: MSVC silently ignores duplicate explicit instantiations.
+ Diag(NewLoc, (getLangOpts().MSVCCompat)
+ ? diag::ext_explicit_instantiation_duplicate
+ : diag::err_explicit_instantiation_duplicate)
+ << PrevDecl;
+ Diag(DiagLocForExplicitInstantiation(PrevDecl, PrevPointOfInstantiation),
+ diag::note_previous_explicit_instantiation);
+ HasNoEffect = true;
+ return false;
+ }
+ }
+
+ llvm_unreachable("Missing specialization/instantiation case?");
+}
+
+/// \brief Perform semantic analysis for the given dependent function
+/// template specialization.
+///
+/// The only possible way to get a dependent function template specialization
+/// is with a friend declaration, like so:
+///
+/// \code
+/// template \<class T> void foo(T);
+/// template \<class T> class A {
+/// friend void foo<>(T);
+/// };
+/// \endcode
+///
+/// There really isn't any useful analysis we can do here, so we
+/// just store the information.
+bool
+Sema::CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
+ const TemplateArgumentListInfo &ExplicitTemplateArgs,
+ LookupResult &Previous) {
+ // Remove anything from Previous that isn't a function template in
+ // the correct context.
+ DeclContext *FDLookupContext = FD->getDeclContext()->getRedeclContext();
+ LookupResult::Filter F = Previous.makeFilter();
+ while (F.hasNext()) {
+ NamedDecl *D = F.next()->getUnderlyingDecl();
+ if (!isa<FunctionTemplateDecl>(D) ||
+ !FDLookupContext->InEnclosingNamespaceSetOf(
+ D->getDeclContext()->getRedeclContext()))
+ F.erase();
+ }
+ F.done();
+
+ // Should this be diagnosed here?
+ if (Previous.empty()) return true;
+
+ FD->setDependentTemplateSpecialization(Context, Previous.asUnresolvedSet(),
+ ExplicitTemplateArgs);
+ return false;
+}
+
+/// \brief Perform semantic analysis for the given function template
+/// specialization.
+///
+/// This routine performs all of the semantic analysis required for an
+/// explicit function template specialization. On successful completion,
+/// the function declaration \p FD will become a function template
+/// specialization.
+///
+/// \param FD the function declaration, which will be updated to become a
+/// function template specialization.
+///
+/// \param ExplicitTemplateArgs the explicitly-provided template arguments,
+/// if any. Note that this may be valid info even when 0 arguments are
+/// explicitly provided as in, e.g., \c void sort<>(char*, char*);
+/// as it anyway contains info on the angle brackets locations.
+///
+/// \param Previous the set of declarations that may be specialized by
+/// this function specialization.
+bool Sema::CheckFunctionTemplateSpecialization(
+ FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
+ LookupResult &Previous) {
+ // The set of function template specializations that could match this
+ // explicit function template specialization.
+ UnresolvedSet<8> Candidates;
+ TemplateSpecCandidateSet FailedCandidates(FD->getLocation(),
+ /*ForTakingAddress=*/false);
+
+ llvm::SmallDenseMap<FunctionDecl *, TemplateArgumentListInfo, 8>
+ ConvertedTemplateArgs;
+
+ DeclContext *FDLookupContext = FD->getDeclContext()->getRedeclContext();
+ for (LookupResult::iterator I = Previous.begin(), E = Previous.end();
+ I != E; ++I) {
+ NamedDecl *Ovl = (*I)->getUnderlyingDecl();
+ if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(Ovl)) {
+ // Only consider templates found within the same semantic lookup scope as
+ // FD.
+ if (!FDLookupContext->InEnclosingNamespaceSetOf(
+ Ovl->getDeclContext()->getRedeclContext()))
+ continue;
+
+ // When matching a constexpr member function template specialization
+ // against the primary template, we don't yet know whether the
+ // specialization has an implicit 'const' (because we don't know whether
+ // it will be a static member function until we know which template it
+ // specializes), so adjust it now assuming it specializes this template.
+ QualType FT = FD->getType();
+ if (FD->isConstexpr()) {
+ CXXMethodDecl *OldMD =
+ dyn_cast<CXXMethodDecl>(FunTmpl->getTemplatedDecl());
+ if (OldMD && OldMD->isConst()) {
+ const FunctionProtoType *FPT = FT->castAs<FunctionProtoType>();
+ FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
+ EPI.TypeQuals |= Qualifiers::Const;
+ FT = Context.getFunctionType(FPT->getReturnType(),
+ FPT->getParamTypes(), EPI);
+ }
+ }
+
+ TemplateArgumentListInfo Args;
+ if (ExplicitTemplateArgs)
+ Args = *ExplicitTemplateArgs;
+
+ // C++ [temp.expl.spec]p11:
+ // A trailing template-argument can be left unspecified in the
+ // template-id naming an explicit function template specialization
+ // provided it can be deduced from the function argument type.
+ // Perform template argument deduction to determine whether we may be
+ // specializing this template.
+ // FIXME: It is somewhat wasteful to build
+ TemplateDeductionInfo Info(FailedCandidates.getLocation());
+ FunctionDecl *Specialization = nullptr;
+ if (TemplateDeductionResult TDK = DeduceTemplateArguments(
+ cast<FunctionTemplateDecl>(FunTmpl->getFirstDecl()),
+ ExplicitTemplateArgs ? &Args : nullptr, FT, Specialization, Info)) {
+ // Template argument deduction failed; record why it failed, so
+ // that we can provide nifty diagnostics.
+ FailedCandidates.addCandidate()
+ .set(FunTmpl->getTemplatedDecl(),
+ MakeDeductionFailureInfo(Context, TDK, Info));
+ (void)TDK;
+ continue;
+ }
+
+ // Record this candidate.
+ if (ExplicitTemplateArgs)
+ ConvertedTemplateArgs[Specialization] = std::move(Args);
+ Candidates.addDecl(Specialization, I.getAccess());
+ }
+ }
+
+ // Find the most specialized function template.
+ UnresolvedSetIterator Result = getMostSpecialized(
+ Candidates.begin(), Candidates.end(), FailedCandidates,
+ FD->getLocation(),
+ PDiag(diag::err_function_template_spec_no_match) << FD->getDeclName(),
+ PDiag(diag::err_function_template_spec_ambiguous)
+ << FD->getDeclName() << (ExplicitTemplateArgs != nullptr),
+ PDiag(diag::note_function_template_spec_matched));
+
+ if (Result == Candidates.end())
+ return true;
+
+ // Ignore access information; it doesn't figure into redeclaration checking.
+ FunctionDecl *Specialization = cast<FunctionDecl>(*Result);
+
+ FunctionTemplateSpecializationInfo *SpecInfo
+ = Specialization->getTemplateSpecializationInfo();
+ assert(SpecInfo && "Function template specialization info missing?");
+
+ // Note: do not overwrite location info if previous template
+ // specialization kind was explicit.
+ TemplateSpecializationKind TSK = SpecInfo->getTemplateSpecializationKind();
+ if (TSK == TSK_Undeclared || TSK == TSK_ImplicitInstantiation) {
+ Specialization->setLocation(FD->getLocation());
+ // C++11 [dcl.constexpr]p1: An explicit specialization of a constexpr
+ // function can differ from the template declaration with respect to
+ // the constexpr specifier.
+ Specialization->setConstexpr(FD->isConstexpr());
+ }
+
+ // FIXME: Check if the prior specialization has a point of instantiation.
+ // If so, we have run afoul of .
+
+ // If this is a friend declaration, then we're not really declaring
+ // an explicit specialization.
+ bool isFriend = (FD->getFriendObjectKind() != Decl::FOK_None);
+
+ // Check the scope of this explicit specialization.
+ if (!isFriend &&
+ CheckTemplateSpecializationScope(*this,
+ Specialization->getPrimaryTemplate(),
+ Specialization, FD->getLocation(),
+ false))
+ return true;
+
+ // C++ [temp.expl.spec]p6:
+ // If a template, a member template or the member of a class template is
+ // explicitly specialized then that specialization shall be declared
+ // before the first use of that specialization that would cause an implicit
+ // instantiation to take place, in every translation unit in which such a
+ // use occurs; no diagnostic is required.
+ bool HasNoEffect = false;
+ if (!isFriend &&
+ CheckSpecializationInstantiationRedecl(FD->getLocation(),
+ TSK_ExplicitSpecialization,
+ Specialization,
+ SpecInfo->getTemplateSpecializationKind(),
+ SpecInfo->getPointOfInstantiation(),
+ HasNoEffect))
+ return true;
+
+ // Mark the prior declaration as an explicit specialization, so that later
+ // clients know that this is an explicit specialization.
+ if (!isFriend) {
+ SpecInfo->setTemplateSpecializationKind(TSK_ExplicitSpecialization);
+ MarkUnusedFileScopedDecl(Specialization);
+ }
+
+ // Turn the given function declaration into a function template
+ // specialization, with the template arguments from the previous
+ // specialization.
+ // Take copies of (semantic and syntactic) template argument lists.
+ const TemplateArgumentList* TemplArgs = new (Context)
+ TemplateArgumentList(Specialization->getTemplateSpecializationArgs());
+ FD->setFunctionTemplateSpecialization(
+ Specialization->getPrimaryTemplate(), TemplArgs, /*InsertPos=*/nullptr,
+ SpecInfo->getTemplateSpecializationKind(),
+ ExplicitTemplateArgs ? &ConvertedTemplateArgs[Specialization] : nullptr);
+
+ // The "previous declaration" for this function template specialization is
+ // the prior function template specialization.
+ Previous.clear();
+ Previous.addDecl(Specialization);
+ return false;
+}
+
+/// \brief Perform semantic analysis for the given non-template member
+/// specialization.
+///
+/// This routine performs all of the semantic analysis required for an
+/// explicit member function specialization. On successful completion,
+/// the function declaration \p FD will become a member function
+/// specialization.
+///
+/// \param Member the member declaration, which will be updated to become a
+/// specialization.
+///
+/// \param Previous the set of declarations, one of which may be specialized
+/// by this function specialization; the set will be modified to contain the
+/// redeclared member.
+bool
+Sema::CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous) {
+ assert(!isa<TemplateDecl>(Member) && "Only for non-template members");
+
+ // Try to find the member we are instantiating.
+ NamedDecl *Instantiation = nullptr;
+ NamedDecl *InstantiatedFrom = nullptr;
+ MemberSpecializationInfo *MSInfo = nullptr;
+
+ if (Previous.empty()) {
+ // Nowhere to look anyway.
+ } else if (FunctionDecl *Function = dyn_cast<FunctionDecl>(Member)) {
+ for (LookupResult::iterator I = Previous.begin(), E = Previous.end();
+ I != E; ++I) {
+ NamedDecl *D = (*I)->getUnderlyingDecl();
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ QualType Adjusted = Function->getType();
+ if (!hasExplicitCallingConv(Adjusted))
+ Adjusted = adjustCCAndNoReturn(Adjusted, Method->getType());
+ if (Context.hasSameType(Adjusted, Method->getType())) {
+ Instantiation = Method;
+ InstantiatedFrom = Method->getInstantiatedFromMemberFunction();
+ MSInfo = Method->getMemberSpecializationInfo();
+ break;
+ }
+ }
+ }
+ } else if (isa<VarDecl>(Member)) {
+ VarDecl *PrevVar;
+ if (Previous.isSingleResult() &&
+ (PrevVar = dyn_cast<VarDecl>(Previous.getFoundDecl())))
+ if (PrevVar->isStaticDataMember()) {
+ Instantiation = PrevVar;
+ InstantiatedFrom = PrevVar->getInstantiatedFromStaticDataMember();
+ MSInfo = PrevVar->getMemberSpecializationInfo();
+ }
+ } else if (isa<RecordDecl>(Member)) {
+ CXXRecordDecl *PrevRecord;
+ if (Previous.isSingleResult() &&
+ (PrevRecord = dyn_cast<CXXRecordDecl>(Previous.getFoundDecl()))) {
+ Instantiation = PrevRecord;
+ InstantiatedFrom = PrevRecord->getInstantiatedFromMemberClass();
+ MSInfo = PrevRecord->getMemberSpecializationInfo();
+ }
+ } else if (isa<EnumDecl>(Member)) {
+ EnumDecl *PrevEnum;
+ if (Previous.isSingleResult() &&
+ (PrevEnum = dyn_cast<EnumDecl>(Previous.getFoundDecl()))) {
+ Instantiation = PrevEnum;
+ InstantiatedFrom = PrevEnum->getInstantiatedFromMemberEnum();
+ MSInfo = PrevEnum->getMemberSpecializationInfo();
+ }
+ }
+
+ if (!Instantiation) {
+ // There is no previous declaration that matches. Since member
+ // specializations are always out-of-line, the caller will complain about
+ // this mismatch later.
+ return false;
+ }
+
+ // If this is a friend, just bail out here before we start turning
+ // things into explicit specializations.
+ if (Member->getFriendObjectKind() != Decl::FOK_None) {
+ // Preserve instantiation information.
+ if (InstantiatedFrom && isa<CXXMethodDecl>(Member)) {
+ cast<CXXMethodDecl>(Member)->setInstantiationOfMemberFunction(
+ cast<CXXMethodDecl>(InstantiatedFrom),
+ cast<CXXMethodDecl>(Instantiation)->getTemplateSpecializationKind());
+ } else if (InstantiatedFrom && isa<CXXRecordDecl>(Member)) {
+ cast<CXXRecordDecl>(Member)->setInstantiationOfMemberClass(
+ cast<CXXRecordDecl>(InstantiatedFrom),
+ cast<CXXRecordDecl>(Instantiation)->getTemplateSpecializationKind());
+ }
+
+ Previous.clear();
+ Previous.addDecl(Instantiation);
+ return false;
+ }
+
+ // Make sure that this is a specialization of a member.
+ if (!InstantiatedFrom) {
+ Diag(Member->getLocation(), diag::err_spec_member_not_instantiated)
+ << Member;
+ Diag(Instantiation->getLocation(), diag::note_specialized_decl);
+ return true;
+ }
+
+ // C++ [temp.expl.spec]p6:
+ // If a template, a member template or the member of a class template is
+ // explicitly specialized then that specialization shall be declared
+ // before the first use of that specialization that would cause an implicit
+ // instantiation to take place, in every translation unit in which such a
+ // use occurs; no diagnostic is required.
+ assert(MSInfo && "Member specialization info missing?");
+
+ bool HasNoEffect = false;
+ if (CheckSpecializationInstantiationRedecl(Member->getLocation(),
+ TSK_ExplicitSpecialization,
+ Instantiation,
+ MSInfo->getTemplateSpecializationKind(),
+ MSInfo->getPointOfInstantiation(),
+ HasNoEffect))
+ return true;
+
+ // Check the scope of this explicit specialization.
+ if (CheckTemplateSpecializationScope(*this,
+ InstantiatedFrom,
+ Instantiation, Member->getLocation(),
+ false))
+ return true;
+
+ // Note that this is an explicit instantiation of a member.
+ // the original declaration to note that it is an explicit specialization
+ // (if it was previously an implicit instantiation). This latter step
+ // makes bookkeeping easier.
+ if (isa<FunctionDecl>(Member)) {
+ FunctionDecl *InstantiationFunction = cast<FunctionDecl>(Instantiation);
+ if (InstantiationFunction->getTemplateSpecializationKind() ==
+ TSK_ImplicitInstantiation) {
+ InstantiationFunction->setTemplateSpecializationKind(
+ TSK_ExplicitSpecialization);
+ InstantiationFunction->setLocation(Member->getLocation());
+ }
+
+ cast<FunctionDecl>(Member)->setInstantiationOfMemberFunction(
+ cast<CXXMethodDecl>(InstantiatedFrom),
+ TSK_ExplicitSpecialization);
+ MarkUnusedFileScopedDecl(InstantiationFunction);
+ } else if (isa<VarDecl>(Member)) {
+ VarDecl *InstantiationVar = cast<VarDecl>(Instantiation);
+ if (InstantiationVar->getTemplateSpecializationKind() ==
+ TSK_ImplicitInstantiation) {
+ InstantiationVar->setTemplateSpecializationKind(
+ TSK_ExplicitSpecialization);
+ InstantiationVar->setLocation(Member->getLocation());
+ }
+
+ cast<VarDecl>(Member)->setInstantiationOfStaticDataMember(
+ cast<VarDecl>(InstantiatedFrom), TSK_ExplicitSpecialization);
+ MarkUnusedFileScopedDecl(InstantiationVar);
+ } else if (isa<CXXRecordDecl>(Member)) {
+ CXXRecordDecl *InstantiationClass = cast<CXXRecordDecl>(Instantiation);
+ if (InstantiationClass->getTemplateSpecializationKind() ==
+ TSK_ImplicitInstantiation) {
+ InstantiationClass->setTemplateSpecializationKind(
+ TSK_ExplicitSpecialization);
+ InstantiationClass->setLocation(Member->getLocation());
+ }
+
+ cast<CXXRecordDecl>(Member)->setInstantiationOfMemberClass(
+ cast<CXXRecordDecl>(InstantiatedFrom),
+ TSK_ExplicitSpecialization);
+ } else {
+ assert(isa<EnumDecl>(Member) && "Only member enums remain");
+ EnumDecl *InstantiationEnum = cast<EnumDecl>(Instantiation);
+ if (InstantiationEnum->getTemplateSpecializationKind() ==
+ TSK_ImplicitInstantiation) {
+ InstantiationEnum->setTemplateSpecializationKind(
+ TSK_ExplicitSpecialization);
+ InstantiationEnum->setLocation(Member->getLocation());
+ }
+
+ cast<EnumDecl>(Member)->setInstantiationOfMemberEnum(
+ cast<EnumDecl>(InstantiatedFrom), TSK_ExplicitSpecialization);
+ }
+
+ // Save the caller the trouble of having to figure out which declaration
+ // this specialization matches.
+ Previous.clear();
+ Previous.addDecl(Instantiation);
+ return false;
+}
+
+/// \brief Check the scope of an explicit instantiation.
+///
+/// \returns true if a serious error occurs, false otherwise.
+static bool CheckExplicitInstantiationScope(Sema &S, NamedDecl *D,
+ SourceLocation InstLoc,
+ bool WasQualifiedName) {
+ DeclContext *OrigContext= D->getDeclContext()->getEnclosingNamespaceContext();
+ DeclContext *CurContext = S.CurContext->getRedeclContext();
+
+ if (CurContext->isRecord()) {
+ S.Diag(InstLoc, diag::err_explicit_instantiation_in_class)
+ << D;
+ return true;
+ }
+
+ // C++11 [temp.explicit]p3:
+ // An explicit instantiation shall appear in an enclosing namespace of its
+ // template. If the name declared in the explicit instantiation is an
+ // unqualified name, the explicit instantiation shall appear in the
+ // namespace where its template is declared or, if that namespace is inline
+ // (7.3.1), any namespace from its enclosing namespace set.
+ //
+ // This is DR275, which we do not retroactively apply to C++98/03.
+ if (WasQualifiedName) {
+ if (CurContext->Encloses(OrigContext))
+ return false;
+ } else {
+ if (CurContext->InEnclosingNamespaceSetOf(OrigContext))
+ return false;
+ }
+
+ if (NamespaceDecl *NS = dyn_cast<NamespaceDecl>(OrigContext)) {
+ if (WasQualifiedName)
+ S.Diag(InstLoc,
+ S.getLangOpts().CPlusPlus11?
+ diag::err_explicit_instantiation_out_of_scope :
+ diag::warn_explicit_instantiation_out_of_scope_0x)
+ << D << NS;
+ else
+ S.Diag(InstLoc,
+ S.getLangOpts().CPlusPlus11?
+ diag::err_explicit_instantiation_unqualified_wrong_namespace :
+ diag::warn_explicit_instantiation_unqualified_wrong_namespace_0x)
+ << D << NS;
+ } else
+ S.Diag(InstLoc,
+ S.getLangOpts().CPlusPlus11?
+ diag::err_explicit_instantiation_must_be_global :
+ diag::warn_explicit_instantiation_must_be_global_0x)
+ << D;
+ S.Diag(D->getLocation(), diag::note_explicit_instantiation_here);
+ return false;
+}
+
+/// \brief Determine whether the given scope specifier has a template-id in it.
+static bool ScopeSpecifierHasTemplateId(const CXXScopeSpec &SS) {
+ if (!SS.isSet())
+ return false;
+
+ // C++11 [temp.explicit]p3:
+ // If the explicit instantiation is for a member function, a member class
+ // or a static data member of a class template specialization, the name of
+ // the class template specialization in the qualified-id for the member
+ // name shall be a simple-template-id.
+ //
+ // C++98 has the same restriction, just worded differently.
+ for (NestedNameSpecifier *NNS = SS.getScopeRep(); NNS;
+ NNS = NNS->getPrefix())
+ if (const Type *T = NNS->getAsType())
+ if (isa<TemplateSpecializationType>(T))
+ return true;
+
+ return false;
+}
+
+// Explicit instantiation of a class template specialization
+DeclResult
+Sema::ActOnExplicitInstantiation(Scope *S,
+ SourceLocation ExternLoc,
+ SourceLocation TemplateLoc,
+ unsigned TagSpec,
+ SourceLocation KWLoc,
+ const CXXScopeSpec &SS,
+ TemplateTy TemplateD,
+ SourceLocation TemplateNameLoc,
+ SourceLocation LAngleLoc,
+ ASTTemplateArgsPtr TemplateArgsIn,
+ SourceLocation RAngleLoc,
+ AttributeList *Attr) {
+ // Find the class template we're specializing
+ TemplateName Name = TemplateD.get();
+ TemplateDecl *TD = Name.getAsTemplateDecl();
+ // Check that the specialization uses the same tag kind as the
+ // original template.
+ TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
+ assert(Kind != TTK_Enum &&
+ "Invalid enum tag in class template explicit instantiation!");
+
+ if (isa<TypeAliasTemplateDecl>(TD)) {
+ Diag(KWLoc, diag::err_tag_reference_non_tag) << Kind;
+ Diag(TD->getTemplatedDecl()->getLocation(),
+ diag::note_previous_use);
+ return true;
+ }
+
+ ClassTemplateDecl *ClassTemplate = cast<ClassTemplateDecl>(TD);
+
+ if (!isAcceptableTagRedeclaration(ClassTemplate->getTemplatedDecl(),
+ Kind, /*isDefinition*/false, KWLoc,
+ ClassTemplate->getIdentifier())) {
+ Diag(KWLoc, diag::err_use_with_wrong_tag)
+ << ClassTemplate
+ << FixItHint::CreateReplacement(KWLoc,
+ ClassTemplate->getTemplatedDecl()->getKindName());
+ Diag(ClassTemplate->getTemplatedDecl()->getLocation(),
+ diag::note_previous_use);
+ Kind = ClassTemplate->getTemplatedDecl()->getTagKind();
+ }
+
+ // C++0x [temp.explicit]p2:
+ // There are two forms of explicit instantiation: an explicit instantiation
+ // definition and an explicit instantiation declaration. An explicit
+ // instantiation declaration begins with the extern keyword. [...]
+ TemplateSpecializationKind TSK = ExternLoc.isInvalid()
+ ? TSK_ExplicitInstantiationDefinition
+ : TSK_ExplicitInstantiationDeclaration;
+
+ if (TSK == TSK_ExplicitInstantiationDeclaration) {
+ // Check for dllexport class template instantiation declarations.
+ for (AttributeList *A = Attr; A; A = A->getNext()) {
+ if (A->getKind() == AttributeList::AT_DLLExport) {
+ Diag(ExternLoc,
+ diag::warn_attribute_dllexport_explicit_instantiation_decl);
+ Diag(A->getLoc(), diag::note_attribute);
+ break;
+ }
+ }
+
+ if (auto *A = ClassTemplate->getTemplatedDecl()->getAttr<DLLExportAttr>()) {
+ Diag(ExternLoc,
+ diag::warn_attribute_dllexport_explicit_instantiation_decl);
+ Diag(A->getLocation(), diag::note_attribute);
+ }
+ }
+
+ // Translate the parser's template argument list in our AST format.
+ TemplateArgumentListInfo TemplateArgs(LAngleLoc, RAngleLoc);
+ translateTemplateArguments(TemplateArgsIn, TemplateArgs);
+
+ // Check that the template argument list is well-formed for this
+ // template.
+ SmallVector<TemplateArgument, 4> Converted;
+ if (CheckTemplateArgumentList(ClassTemplate, TemplateNameLoc,
+ TemplateArgs, false, Converted))
+ return true;
+
+ // Find the class template specialization declaration that
+ // corresponds to these arguments.
+ void *InsertPos = nullptr;
+ ClassTemplateSpecializationDecl *PrevDecl
+ = ClassTemplate->findSpecialization(Converted, InsertPos);
+
+ TemplateSpecializationKind PrevDecl_TSK
+ = PrevDecl ? PrevDecl->getTemplateSpecializationKind() : TSK_Undeclared;
+
+ // C++0x [temp.explicit]p2:
+ // [...] An explicit instantiation shall appear in an enclosing
+ // namespace of its template. [...]
+ //
+ // This is C++ DR 275.
+ if (CheckExplicitInstantiationScope(*this, ClassTemplate, TemplateNameLoc,
+ SS.isSet()))
+ return true;
+
+ ClassTemplateSpecializationDecl *Specialization = nullptr;
+
+ bool HasNoEffect = false;
+ if (PrevDecl) {
+ if (CheckSpecializationInstantiationRedecl(TemplateNameLoc, TSK,
+ PrevDecl, PrevDecl_TSK,
+ PrevDecl->getPointOfInstantiation(),
+ HasNoEffect))
+ return PrevDecl;
+
+ // Even though HasNoEffect == true means that this explicit instantiation
+ // has no effect on semantics, we go on to put its syntax in the AST.
+
+ if (PrevDecl_TSK == TSK_ImplicitInstantiation ||
+ PrevDecl_TSK == TSK_Undeclared) {
+ // Since the only prior class template specialization with these
+ // arguments was referenced but not declared, reuse that
+ // declaration node as our own, updating the source location
+ // for the template name to reflect our new declaration.
+ // (Other source locations will be updated later.)
+ Specialization = PrevDecl;
+ Specialization->setLocation(TemplateNameLoc);
+ PrevDecl = nullptr;
+ }
+ }
+
+ if (!Specialization) {
+ // Create a new class template specialization declaration node for
+ // this explicit specialization.
+ Specialization
+ = ClassTemplateSpecializationDecl::Create(Context, Kind,
+ ClassTemplate->getDeclContext(),
+ KWLoc, TemplateNameLoc,
+ ClassTemplate,
+ Converted.data(),
+ Converted.size(),
+ PrevDecl);
+ SetNestedNameSpecifier(Specialization, SS);
+
+ if (!HasNoEffect && !PrevDecl) {
+ // Insert the new specialization.
+ ClassTemplate->AddSpecialization(Specialization, InsertPos);
+ }
+ }
+
+ // Build the fully-sugared type for this explicit instantiation as
+ // the user wrote in the explicit instantiation itself. This means
+ // that we'll pretty-print the type retrieved from the
+ // specialization's declaration the way that the user actually wrote
+ // the explicit instantiation, rather than formatting the name based
+ // on the "canonical" representation used to store the template
+ // arguments in the specialization.
+ TypeSourceInfo *WrittenTy
+ = Context.getTemplateSpecializationTypeInfo(Name, TemplateNameLoc,
+ TemplateArgs,
+ Context.getTypeDeclType(Specialization));
+ Specialization->setTypeAsWritten(WrittenTy);
+
+ // Set source locations for keywords.
+ Specialization->setExternLoc(ExternLoc);
+ Specialization->setTemplateKeywordLoc(TemplateLoc);
+ Specialization->setRBraceLoc(SourceLocation());
+
+ if (Attr)
+ ProcessDeclAttributeList(S, Specialization, Attr);
+
+ // Add the explicit instantiation into its lexical context. However,
+ // since explicit instantiations are never found by name lookup, we
+ // just put it into the declaration context directly.
+ Specialization->setLexicalDeclContext(CurContext);
+ CurContext->addDecl(Specialization);
+
+ // Syntax is now OK, so return if it has no other effect on semantics.
+ if (HasNoEffect) {
+ // Set the template specialization kind.
+ Specialization->setTemplateSpecializationKind(TSK);
+ return Specialization;
+ }
+
+ // C++ [temp.explicit]p3:
+ // A definition of a class template or class member template
+ // shall be in scope at the point of the explicit instantiation of
+ // the class template or class member template.
+ //
+ // This check comes when we actually try to perform the
+ // instantiation.
+ ClassTemplateSpecializationDecl *Def
+ = cast_or_null<ClassTemplateSpecializationDecl>(
+ Specialization->getDefinition());
+ if (!Def)
+ InstantiateClassTemplateSpecialization(TemplateNameLoc, Specialization, TSK);
+ else if (TSK == TSK_ExplicitInstantiationDefinition) {
+ MarkVTableUsed(TemplateNameLoc, Specialization, true);
+ Specialization->setPointOfInstantiation(Def->getPointOfInstantiation());
+ }
+
+ // Instantiate the members of this class template specialization.
+ Def = cast_or_null<ClassTemplateSpecializationDecl>(
+ Specialization->getDefinition());
+ if (Def) {
+ TemplateSpecializationKind Old_TSK = Def->getTemplateSpecializationKind();
+
+ // Fix a TSK_ExplicitInstantiationDeclaration followed by a
+ // TSK_ExplicitInstantiationDefinition
+ if (Old_TSK == TSK_ExplicitInstantiationDeclaration &&
+ TSK == TSK_ExplicitInstantiationDefinition) {
+ // FIXME: Need to notify the ASTMutationListener that we did this.
+ Def->setTemplateSpecializationKind(TSK);
+
+ if (!getDLLAttr(Def) && getDLLAttr(Specialization) &&
+ Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+ // In the MS ABI, an explicit instantiation definition can add a dll
+ // attribute to a template with a previous instantiation declaration.
+ // MinGW doesn't allow this.
+ auto *A = cast<InheritableAttr>(
+ getDLLAttr(Specialization)->clone(getASTContext()));
+ A->setInherited(true);
+ Def->addAttr(A);
+ checkClassLevelDLLAttribute(Def);
+
+ // Propagate attribute to base class templates.
+ for (auto &B : Def->bases()) {
+ if (auto *BT = dyn_cast_or_null<ClassTemplateSpecializationDecl>(
+ B.getType()->getAsCXXRecordDecl()))
+ propagateDLLAttrToBaseClassTemplate(Def, A, BT, B.getLocStart());
+ }
+ }
+ }
+
+ // Set the template specialization kind. Make sure it is set before
+ // instantiating the members which will trigger ASTConsumer callbacks.
+ Specialization->setTemplateSpecializationKind(TSK);
+ InstantiateClassTemplateSpecializationMembers(TemplateNameLoc, Def, TSK);
+ } else {
+
+ // Set the template specialization kind.
+ Specialization->setTemplateSpecializationKind(TSK);
+ }
+
+ return Specialization;
+}
+
+// Explicit instantiation of a member class of a class template.
+DeclResult
+Sema::ActOnExplicitInstantiation(Scope *S,
+ SourceLocation ExternLoc,
+ SourceLocation TemplateLoc,
+ unsigned TagSpec,
+ SourceLocation KWLoc,
+ CXXScopeSpec &SS,
+ IdentifierInfo *Name,
+ SourceLocation NameLoc,
+ AttributeList *Attr) {
+
+ bool Owned = false;
+ bool IsDependent = false;
+ Decl *TagD = ActOnTag(S, TagSpec, Sema::TUK_Reference,
+ KWLoc, SS, Name, NameLoc, Attr, AS_none,
+ /*ModulePrivateLoc=*/SourceLocation(),
+ MultiTemplateParamsArg(), Owned, IsDependent,
+ SourceLocation(), false, TypeResult(),
+ /*IsTypeSpecifier*/false);
+ assert(!IsDependent && "explicit instantiation of dependent name not yet handled");
+
+ if (!TagD)
+ return true;
+
+ TagDecl *Tag = cast<TagDecl>(TagD);
+ assert(!Tag->isEnum() && "shouldn't see enumerations here");
+
+ if (Tag->isInvalidDecl())
+ return true;
+
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(Tag);
+ CXXRecordDecl *Pattern = Record->getInstantiatedFromMemberClass();
+ if (!Pattern) {
+ Diag(TemplateLoc, diag::err_explicit_instantiation_nontemplate_type)
+ << Context.getTypeDeclType(Record);
+ Diag(Record->getLocation(), diag::note_nontemplate_decl_here);
+ return true;
+ }
+
+ // C++0x [temp.explicit]p2:
+ // If the explicit instantiation is for a class or member class, the
+ // elaborated-type-specifier in the declaration shall include a
+ // simple-template-id.
+ //
+ // C++98 has the same restriction, just worded differently.
+ if (!ScopeSpecifierHasTemplateId(SS))
+ Diag(TemplateLoc, diag::ext_explicit_instantiation_without_qualified_id)
+ << Record << SS.getRange();
+
+ // C++0x [temp.explicit]p2:
+ // There are two forms of explicit instantiation: an explicit instantiation
+ // definition and an explicit instantiation declaration. An explicit
+ // instantiation declaration begins with the extern keyword. [...]
+ TemplateSpecializationKind TSK
+ = ExternLoc.isInvalid()? TSK_ExplicitInstantiationDefinition
+ : TSK_ExplicitInstantiationDeclaration;
+
+ // C++0x [temp.explicit]p2:
+ // [...] An explicit instantiation shall appear in an enclosing
+ // namespace of its template. [...]
+ //
+ // This is C++ DR 275.
+ CheckExplicitInstantiationScope(*this, Record, NameLoc, true);
+
+ // Verify that it is okay to explicitly instantiate here.
+ CXXRecordDecl *PrevDecl
+ = cast_or_null<CXXRecordDecl>(Record->getPreviousDecl());
+ if (!PrevDecl && Record->getDefinition())
+ PrevDecl = Record;
+ if (PrevDecl) {
+ MemberSpecializationInfo *MSInfo = PrevDecl->getMemberSpecializationInfo();
+ bool HasNoEffect = false;
+ assert(MSInfo && "No member specialization information?");
+ if (CheckSpecializationInstantiationRedecl(TemplateLoc, TSK,
+ PrevDecl,
+ MSInfo->getTemplateSpecializationKind(),
+ MSInfo->getPointOfInstantiation(),
+ HasNoEffect))
+ return true;
+ if (HasNoEffect)
+ return TagD;
+ }
+
+ CXXRecordDecl *RecordDef
+ = cast_or_null<CXXRecordDecl>(Record->getDefinition());
+ if (!RecordDef) {
+ // C++ [temp.explicit]p3:
+ // A definition of a member class of a class template shall be in scope
+ // at the point of an explicit instantiation of the member class.
+ CXXRecordDecl *Def
+ = cast_or_null<CXXRecordDecl>(Pattern->getDefinition());
+ if (!Def) {
+ Diag(TemplateLoc, diag::err_explicit_instantiation_undefined_member)
+ << 0 << Record->getDeclName() << Record->getDeclContext();
+ Diag(Pattern->getLocation(), diag::note_forward_declaration)
+ << Pattern;
+ return true;
+ } else {
+ if (InstantiateClass(NameLoc, Record, Def,
+ getTemplateInstantiationArgs(Record),
+ TSK))
+ return true;
+
+ RecordDef = cast_or_null<CXXRecordDecl>(Record->getDefinition());
+ if (!RecordDef)
+ return true;
+ }
+ }
+
+ // Instantiate all of the members of the class.
+ InstantiateClassMembers(NameLoc, RecordDef,
+ getTemplateInstantiationArgs(Record), TSK);
+
+ if (TSK == TSK_ExplicitInstantiationDefinition)
+ MarkVTableUsed(NameLoc, RecordDef, true);
+
+ // FIXME: We don't have any representation for explicit instantiations of
+ // member classes. Such a representation is not needed for compilation, but it
+ // should be available for clients that want to see all of the declarations in
+ // the source code.
+ return TagD;
+}
+
+DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
+ SourceLocation ExternLoc,
+ SourceLocation TemplateLoc,
+ Declarator &D) {
+ // Explicit instantiations always require a name.
+ // TODO: check if/when DNInfo should replace Name.
+ DeclarationNameInfo NameInfo = GetNameForDeclarator(D);
+ DeclarationName Name = NameInfo.getName();
+ if (!Name) {
+ if (!D.isInvalidType())
+ Diag(D.getDeclSpec().getLocStart(),
+ diag::err_explicit_instantiation_requires_name)
+ << D.getDeclSpec().getSourceRange()
+ << D.getSourceRange();
+
+ return true;
+ }
+
+ // The scope passed in may not be a decl scope. Zip up the scope tree until
+ // we find one that is.
+ while ((S->getFlags() & Scope::DeclScope) == 0 ||
+ (S->getFlags() & Scope::TemplateParamScope) != 0)
+ S = S->getParent();
+
+ // Determine the type of the declaration.
+ TypeSourceInfo *T = GetTypeForDeclarator(D, S);
+ QualType R = T->getType();
+ if (R.isNull())
+ return true;
+
+ // C++ [dcl.stc]p1:
+ // A storage-class-specifier shall not be specified in [...] an explicit
+ // instantiation (14.7.2) directive.
+ if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef) {
+ Diag(D.getIdentifierLoc(), diag::err_explicit_instantiation_of_typedef)
+ << Name;
+ return true;
+ } else if (D.getDeclSpec().getStorageClassSpec()
+ != DeclSpec::SCS_unspecified) {
+ // Complain about then remove the storage class specifier.
+ Diag(D.getIdentifierLoc(), diag::err_explicit_instantiation_storage_class)
+ << FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
+
+ D.getMutableDeclSpec().ClearStorageClassSpecs();
+ }
+
+ // C++0x [temp.explicit]p1:
+ // [...] An explicit instantiation of a function template shall not use the
+ // inline or constexpr specifiers.
+ // Presumably, this also applies to member functions of class templates as
+ // well.
+ if (D.getDeclSpec().isInlineSpecified())
+ Diag(D.getDeclSpec().getInlineSpecLoc(),
+ getLangOpts().CPlusPlus11 ?
+ diag::err_explicit_instantiation_inline :
+ diag::warn_explicit_instantiation_inline_0x)
+ << FixItHint::CreateRemoval(D.getDeclSpec().getInlineSpecLoc());
+ if (D.getDeclSpec().isConstexprSpecified() && R->isFunctionType())
+ // FIXME: Add a fix-it to remove the 'constexpr' and add a 'const' if one is
+ // not already specified.
+ Diag(D.getDeclSpec().getConstexprSpecLoc(),
+ diag::err_explicit_instantiation_constexpr);
+
+ // C++0x [temp.explicit]p2:
+ // There are two forms of explicit instantiation: an explicit instantiation
+ // definition and an explicit instantiation declaration. An explicit
+ // instantiation declaration begins with the extern keyword. [...]
+ TemplateSpecializationKind TSK
+ = ExternLoc.isInvalid()? TSK_ExplicitInstantiationDefinition
+ : TSK_ExplicitInstantiationDeclaration;
+
+ LookupResult Previous(*this, NameInfo, LookupOrdinaryName);
+ LookupParsedName(Previous, S, &D.getCXXScopeSpec());
+
+ if (!R->isFunctionType()) {
+ // C++ [temp.explicit]p1:
+ // A [...] static data member of a class template can be explicitly
+ // instantiated from the member definition associated with its class
+ // template.
+ // C++1y [temp.explicit]p1:
+ // A [...] variable [...] template specialization can be explicitly
+ // instantiated from its template.
+ if (Previous.isAmbiguous())
+ return true;
+
+ VarDecl *Prev = Previous.getAsSingle<VarDecl>();
+ VarTemplateDecl *PrevTemplate = Previous.getAsSingle<VarTemplateDecl>();
+
+ if (!PrevTemplate) {
+ if (!Prev || !Prev->isStaticDataMember()) {
+ // We expect to see a data data member here.
+ Diag(D.getIdentifierLoc(), diag::err_explicit_instantiation_not_known)
+ << Name;
+ for (LookupResult::iterator P = Previous.begin(), PEnd = Previous.end();
+ P != PEnd; ++P)
+ Diag((*P)->getLocation(), diag::note_explicit_instantiation_here);
+ return true;
+ }
+
+ if (!Prev->getInstantiatedFromStaticDataMember()) {
+ // FIXME: Check for explicit specialization?
+ Diag(D.getIdentifierLoc(),
+ diag::err_explicit_instantiation_data_member_not_instantiated)
+ << Prev;
+ Diag(Prev->getLocation(), diag::note_explicit_instantiation_here);
+ // FIXME: Can we provide a note showing where this was declared?
+ return true;
+ }
+ } else {
+ // Explicitly instantiate a variable template.
+
+ // C++1y [dcl.spec.auto]p6:
+ // ... A program that uses auto or decltype(auto) in a context not
+ // explicitly allowed in this section is ill-formed.
+ //
+ // This includes auto-typed variable template instantiations.
+ if (R->isUndeducedType()) {
+ Diag(T->getTypeLoc().getLocStart(),
+ diag::err_auto_not_allowed_var_inst);
+ return true;
+ }
+
+ if (D.getName().getKind() != UnqualifiedId::IK_TemplateId) {
+ // C++1y [temp.explicit]p3:
+ // If the explicit instantiation is for a variable, the unqualified-id
+ // in the declaration shall be a template-id.
+ Diag(D.getIdentifierLoc(),
+ diag::err_explicit_instantiation_without_template_id)
+ << PrevTemplate;
+ Diag(PrevTemplate->getLocation(),
+ diag::note_explicit_instantiation_here);
+ return true;
+ }
+
+ // Translate the parser's template argument list into our AST format.
+ TemplateArgumentListInfo TemplateArgs =
+ makeTemplateArgumentListInfo(*this, *D.getName().TemplateId);
+
+ DeclResult Res = CheckVarTemplateId(PrevTemplate, TemplateLoc,
+ D.getIdentifierLoc(), TemplateArgs);
+ if (Res.isInvalid())
+ return true;
+
+ // Ignore access control bits, we don't need them for redeclaration
+ // checking.
+ Prev = cast<VarDecl>(Res.get());
+ }
+
+ // C++0x [temp.explicit]p2:
+ // If the explicit instantiation is for a member function, a member class
+ // or a static data member of a class template specialization, the name of
+ // the class template specialization in the qualified-id for the member
+ // name shall be a simple-template-id.
+ //
+ // C++98 has the same restriction, just worded differently.
+ //
+ // This does not apply to variable template specializations, where the
+ // template-id is in the unqualified-id instead.
+ if (!ScopeSpecifierHasTemplateId(D.getCXXScopeSpec()) && !PrevTemplate)
+ Diag(D.getIdentifierLoc(),
+ diag::ext_explicit_instantiation_without_qualified_id)
+ << Prev << D.getCXXScopeSpec().getRange();
+
+ // Check the scope of this explicit instantiation.
+ CheckExplicitInstantiationScope(*this, Prev, D.getIdentifierLoc(), true);
+
+ // Verify that it is okay to explicitly instantiate here.
+ TemplateSpecializationKind PrevTSK = Prev->getTemplateSpecializationKind();
+ SourceLocation POI = Prev->getPointOfInstantiation();
+ bool HasNoEffect = false;
+ if (CheckSpecializationInstantiationRedecl(D.getIdentifierLoc(), TSK, Prev,
+ PrevTSK, POI, HasNoEffect))
+ return true;
+
+ if (!HasNoEffect) {
+ // Instantiate static data member or variable template.
+
+ Prev->setTemplateSpecializationKind(TSK, D.getIdentifierLoc());
+ if (PrevTemplate) {
+ // Merge attributes.
+ if (AttributeList *Attr = D.getDeclSpec().getAttributes().getList())
+ ProcessDeclAttributeList(S, Prev, Attr);
+ }
+ if (TSK == TSK_ExplicitInstantiationDefinition)
+ InstantiateVariableDefinition(D.getIdentifierLoc(), Prev);
+ }
+
+ // Check the new variable specialization against the parsed input.
+ if (PrevTemplate && Prev && !Context.hasSameType(Prev->getType(), R)) {
+ Diag(T->getTypeLoc().getLocStart(),
+ diag::err_invalid_var_template_spec_type)
+ << 0 << PrevTemplate << R << Prev->getType();
+ Diag(PrevTemplate->getLocation(), diag::note_template_declared_here)
+ << 2 << PrevTemplate->getDeclName();
+ return true;
+ }
+
+ // FIXME: Create an ExplicitInstantiation node?
+ return (Decl*) nullptr;
+ }
+
+ // If the declarator is a template-id, translate the parser's template
+ // argument list into our AST format.
+ bool HasExplicitTemplateArgs = false;
+ TemplateArgumentListInfo TemplateArgs;
+ if (D.getName().getKind() == UnqualifiedId::IK_TemplateId) {
+ TemplateArgs = makeTemplateArgumentListInfo(*this, *D.getName().TemplateId);
+ HasExplicitTemplateArgs = true;
+ }
+
+ // C++ [temp.explicit]p1:
+ // A [...] function [...] can be explicitly instantiated from its template.
+ // A member function [...] of a class template can be explicitly
+ // instantiated from the member definition associated with its class
+ // template.
+ UnresolvedSet<8> Matches;
+ TemplateSpecCandidateSet FailedCandidates(D.getIdentifierLoc());
+ for (LookupResult::iterator P = Previous.begin(), PEnd = Previous.end();
+ P != PEnd; ++P) {
+ NamedDecl *Prev = *P;
+ if (!HasExplicitTemplateArgs) {
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Prev)) {
+ QualType Adjusted = adjustCCAndNoReturn(R, Method->getType());
+ if (Context.hasSameUnqualifiedType(Method->getType(), Adjusted)) {
+ Matches.clear();
+
+ Matches.addDecl(Method, P.getAccess());
+ if (Method->getTemplateSpecializationKind() == TSK_Undeclared)
+ break;
+ }
+ }
+ }
+
+ FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(Prev);
+ if (!FunTmpl)
+ continue;
+
+ TemplateDeductionInfo Info(FailedCandidates.getLocation());
+ FunctionDecl *Specialization = nullptr;
+ if (TemplateDeductionResult TDK
+ = DeduceTemplateArguments(FunTmpl,
+ (HasExplicitTemplateArgs ? &TemplateArgs
+ : nullptr),
+ R, Specialization, Info)) {
+ // Keep track of almost-matches.
+ FailedCandidates.addCandidate()
+ .set(FunTmpl->getTemplatedDecl(),
+ MakeDeductionFailureInfo(Context, TDK, Info));
+ (void)TDK;
+ continue;
+ }
+
+ Matches.addDecl(Specialization, P.getAccess());
+ }
+
+ // Find the most specialized function template specialization.
+ UnresolvedSetIterator Result = getMostSpecialized(
+ Matches.begin(), Matches.end(), FailedCandidates,
+ D.getIdentifierLoc(),
+ PDiag(diag::err_explicit_instantiation_not_known) << Name,
+ PDiag(diag::err_explicit_instantiation_ambiguous) << Name,
+ PDiag(diag::note_explicit_instantiation_candidate));
+
+ if (Result == Matches.end())
+ return true;
+
+ // Ignore access control bits, we don't need them for redeclaration checking.
+ FunctionDecl *Specialization = cast<FunctionDecl>(*Result);
+
+ // C++11 [except.spec]p4
+ // In an explicit instantiation an exception-specification may be specified,
+ // but is not required.
+ // If an exception-specification is specified in an explicit instantiation
+ // directive, it shall be compatible with the exception-specifications of
+ // other declarations of that function.
+ if (auto *FPT = R->getAs<FunctionProtoType>())
+ if (FPT->hasExceptionSpec()) {
+ unsigned DiagID =
+ diag::err_mismatched_exception_spec_explicit_instantiation;
+ if (getLangOpts().MicrosoftExt)
+ DiagID = diag::ext_mismatched_exception_spec_explicit_instantiation;
+ bool Result = CheckEquivalentExceptionSpec(
+ PDiag(DiagID) << Specialization->getType(),
+ PDiag(diag::note_explicit_instantiation_here),
+ Specialization->getType()->getAs<FunctionProtoType>(),
+ Specialization->getLocation(), FPT, D.getLocStart());
+ // In Microsoft mode, mismatching exception specifications just cause a
+ // warning.
+ if (!getLangOpts().MicrosoftExt && Result)
+ return true;
+ }
+
+ if (Specialization->getTemplateSpecializationKind() == TSK_Undeclared) {
+ Diag(D.getIdentifierLoc(),
+ diag::err_explicit_instantiation_member_function_not_instantiated)
+ << Specialization
+ << (Specialization->getTemplateSpecializationKind() ==
+ TSK_ExplicitSpecialization);
+ Diag(Specialization->getLocation(), diag::note_explicit_instantiation_here);
+ return true;
+ }
+
+ FunctionDecl *PrevDecl = Specialization->getPreviousDecl();
+ if (!PrevDecl && Specialization->isThisDeclarationADefinition())
+ PrevDecl = Specialization;
+
+ if (PrevDecl) {
+ bool HasNoEffect = false;
+ if (CheckSpecializationInstantiationRedecl(D.getIdentifierLoc(), TSK,
+ PrevDecl,
+ PrevDecl->getTemplateSpecializationKind(),
+ PrevDecl->getPointOfInstantiation(),
+ HasNoEffect))
+ return true;
+
+ // FIXME: We may still want to build some representation of this
+ // explicit specialization.
+ if (HasNoEffect)
+ return (Decl*) nullptr;
+ }
+
+ Specialization->setTemplateSpecializationKind(TSK, D.getIdentifierLoc());
+ AttributeList *Attr = D.getDeclSpec().getAttributes().getList();
+ if (Attr)
+ ProcessDeclAttributeList(S, Specialization, Attr);
+
+ if (Specialization->isDefined()) {
+ // Let the ASTConsumer know that this function has been explicitly
+ // instantiated now, and its linkage might have changed.
+ Consumer.HandleTopLevelDecl(DeclGroupRef(Specialization));
+ } else if (TSK == TSK_ExplicitInstantiationDefinition)
+ InstantiateFunctionDefinition(D.getIdentifierLoc(), Specialization);
+
+ // C++0x [temp.explicit]p2:
+ // If the explicit instantiation is for a member function, a member class
+ // or a static data member of a class template specialization, the name of
+ // the class template specialization in the qualified-id for the member
+ // name shall be a simple-template-id.
+ //
+ // C++98 has the same restriction, just worded differently.
+ FunctionTemplateDecl *FunTmpl = Specialization->getPrimaryTemplate();
+ if (D.getName().getKind() != UnqualifiedId::IK_TemplateId && !FunTmpl &&
+ D.getCXXScopeSpec().isSet() &&
+ !ScopeSpecifierHasTemplateId(D.getCXXScopeSpec()))
+ Diag(D.getIdentifierLoc(),
+ diag::ext_explicit_instantiation_without_qualified_id)
+ << Specialization << D.getCXXScopeSpec().getRange();
+
+ CheckExplicitInstantiationScope(*this,
+ FunTmpl? (NamedDecl *)FunTmpl
+ : Specialization->getInstantiatedFromMemberFunction(),
+ D.getIdentifierLoc(),
+ D.getCXXScopeSpec().isSet());
+
+ // FIXME: Create some kind of ExplicitInstantiationDecl here.
+ return (Decl*) nullptr;
+}
+
+TypeResult
+Sema::ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
+ const CXXScopeSpec &SS, IdentifierInfo *Name,
+ SourceLocation TagLoc, SourceLocation NameLoc) {
+ // This has to hold, because SS is expected to be defined.
+ assert(Name && "Expected a name in a dependent tag");
+
+ NestedNameSpecifier *NNS = SS.getScopeRep();
+ if (!NNS)
+ return true;
+
+ TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
+
+ if (TUK == TUK_Declaration || TUK == TUK_Definition) {
+ Diag(NameLoc, diag::err_dependent_tag_decl)
+ << (TUK == TUK_Definition) << Kind << SS.getRange();
+ return true;
+ }
+
+ // Create the resulting type.
+ ElaboratedTypeKeyword Kwd = TypeWithKeyword::getKeywordForTagTypeKind(Kind);
+ QualType Result = Context.getDependentNameType(Kwd, NNS, Name);
+
+ // Create type-source location information for this type.
+ TypeLocBuilder TLB;
+ DependentNameTypeLoc TL = TLB.push<DependentNameTypeLoc>(Result);
+ TL.setElaboratedKeywordLoc(TagLoc);
+ TL.setQualifierLoc(SS.getWithLocInContext(Context));
+ TL.setNameLoc(NameLoc);
+ return CreateParsedType(Result, TLB.getTypeSourceInfo(Context, Result));
+}
+
+TypeResult
+Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
+ const CXXScopeSpec &SS, const IdentifierInfo &II,
+ SourceLocation IdLoc) {
+ if (SS.isInvalid())
+ return true;
+
+ if (TypenameLoc.isValid() && S && !S->getTemplateParamParent())
+ Diag(TypenameLoc,
+ getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_typename_outside_of_template :
+ diag::ext_typename_outside_of_template)
+ << FixItHint::CreateRemoval(TypenameLoc);
+
+ NestedNameSpecifierLoc QualifierLoc = SS.getWithLocInContext(Context);
+ QualType T = CheckTypenameType(TypenameLoc.isValid()? ETK_Typename : ETK_None,
+ TypenameLoc, QualifierLoc, II, IdLoc);
+ if (T.isNull())
+ return true;
+
+ TypeSourceInfo *TSI = Context.CreateTypeSourceInfo(T);
+ if (isa<DependentNameType>(T)) {
+ DependentNameTypeLoc TL = TSI->getTypeLoc().castAs<DependentNameTypeLoc>();
+ TL.setElaboratedKeywordLoc(TypenameLoc);
+ TL.setQualifierLoc(QualifierLoc);
+ TL.setNameLoc(IdLoc);
+ } else {
+ ElaboratedTypeLoc TL = TSI->getTypeLoc().castAs<ElaboratedTypeLoc>();
+ TL.setElaboratedKeywordLoc(TypenameLoc);
+ TL.setQualifierLoc(QualifierLoc);
+ TL.getNamedTypeLoc().castAs<TypeSpecTypeLoc>().setNameLoc(IdLoc);
+ }
+
+ return CreateParsedType(T, TSI);
+}
+
+TypeResult
+Sema::ActOnTypenameType(Scope *S,
+ SourceLocation TypenameLoc,
+ const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ TemplateTy TemplateIn,
+ SourceLocation TemplateNameLoc,
+ SourceLocation LAngleLoc,
+ ASTTemplateArgsPtr TemplateArgsIn,
+ SourceLocation RAngleLoc) {
+ if (TypenameLoc.isValid() && S && !S->getTemplateParamParent())
+ Diag(TypenameLoc,
+ getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_typename_outside_of_template :
+ diag::ext_typename_outside_of_template)
+ << FixItHint::CreateRemoval(TypenameLoc);
+
+ // Translate the parser's template argument list in our AST format.
+ TemplateArgumentListInfo TemplateArgs(LAngleLoc, RAngleLoc);
+ translateTemplateArguments(TemplateArgsIn, TemplateArgs);
+
+ TemplateName Template = TemplateIn.get();
+ if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) {
+ // Construct a dependent template specialization type.
+ assert(DTN && "dependent template has non-dependent name?");
+ assert(DTN->getQualifier() == SS.getScopeRep());
+ QualType T = Context.getDependentTemplateSpecializationType(ETK_Typename,
+ DTN->getQualifier(),
+ DTN->getIdentifier(),
+ TemplateArgs);
+
+ // Create source-location information for this type.
+ TypeLocBuilder Builder;
+ DependentTemplateSpecializationTypeLoc SpecTL
+ = Builder.push<DependentTemplateSpecializationTypeLoc>(T);
+ SpecTL.setElaboratedKeywordLoc(TypenameLoc);
+ SpecTL.setQualifierLoc(SS.getWithLocInContext(Context));
+ SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
+ SpecTL.setTemplateNameLoc(TemplateNameLoc);
+ SpecTL.setLAngleLoc(LAngleLoc);
+ SpecTL.setRAngleLoc(RAngleLoc);
+ for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
+ SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
+ return CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
+ }
+
+ QualType T = CheckTemplateIdType(Template, TemplateNameLoc, TemplateArgs);
+ if (T.isNull())
+ return true;
+
+ // Provide source-location information for the template specialization type.
+ TypeLocBuilder Builder;
+ TemplateSpecializationTypeLoc SpecTL
+ = Builder.push<TemplateSpecializationTypeLoc>(T);
+ SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
+ SpecTL.setTemplateNameLoc(TemplateNameLoc);
+ SpecTL.setLAngleLoc(LAngleLoc);
+ SpecTL.setRAngleLoc(RAngleLoc);
+ for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
+ SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
+
+ T = Context.getElaboratedType(ETK_Typename, SS.getScopeRep(), T);
+ ElaboratedTypeLoc TL = Builder.push<ElaboratedTypeLoc>(T);
+ TL.setElaboratedKeywordLoc(TypenameLoc);
+ TL.setQualifierLoc(SS.getWithLocInContext(Context));
+
+ TypeSourceInfo *TSI = Builder.getTypeSourceInfo(Context, T);
+ return CreateParsedType(T, TSI);
+}
+
+
+/// Determine whether this failed name lookup should be treated as being
+/// disabled by a usage of std::enable_if.
+static bool isEnableIf(NestedNameSpecifierLoc NNS, const IdentifierInfo &II,
+ SourceRange &CondRange) {
+ // We must be looking for a ::type...
+ if (!II.isStr("type"))
+ return false;
+
+ // ... within an explicitly-written template specialization...
+ if (!NNS || !NNS.getNestedNameSpecifier()->getAsType())
+ return false;
+ TypeLoc EnableIfTy = NNS.getTypeLoc();
+ TemplateSpecializationTypeLoc EnableIfTSTLoc =
+ EnableIfTy.getAs<TemplateSpecializationTypeLoc>();
+ if (!EnableIfTSTLoc || EnableIfTSTLoc.getNumArgs() == 0)
+ return false;
+ const TemplateSpecializationType *EnableIfTST =
+ cast<TemplateSpecializationType>(EnableIfTSTLoc.getTypePtr());
+
+ // ... which names a complete class template declaration...
+ const TemplateDecl *EnableIfDecl =
+ EnableIfTST->getTemplateName().getAsTemplateDecl();
+ if (!EnableIfDecl || EnableIfTST->isIncompleteType())
+ return false;
+
+ // ... called "enable_if".
+ const IdentifierInfo *EnableIfII =
+ EnableIfDecl->getDeclName().getAsIdentifierInfo();
+ if (!EnableIfII || !EnableIfII->isStr("enable_if"))
+ return false;
+
+ // Assume the first template argument is the condition.
+ CondRange = EnableIfTSTLoc.getArgLoc(0).getSourceRange();
+ return true;
+}
+
+/// \brief Build the type that describes a C++ typename specifier,
+/// e.g., "typename T::type".
+QualType
+Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
+ SourceLocation KeywordLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ const IdentifierInfo &II,
+ SourceLocation IILoc) {
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+
+ DeclContext *Ctx = computeDeclContext(SS);
+ if (!Ctx) {
+ // If the nested-name-specifier is dependent and couldn't be
+ // resolved to a type, build a typename type.
+ assert(QualifierLoc.getNestedNameSpecifier()->isDependent());
+ return Context.getDependentNameType(Keyword,
+ QualifierLoc.getNestedNameSpecifier(),
+ &II);
+ }
+
+ // If the nested-name-specifier refers to the current instantiation,
+ // the "typename" keyword itself is superfluous. In C++03, the
+ // program is actually ill-formed. However, DR 382 (in C++0x CD1)
+ // allows such extraneous "typename" keywords, and we retroactively
+ // apply this DR to C++03 code with only a warning. In any case we continue.
+
+ if (RequireCompleteDeclContext(SS, Ctx))
+ return QualType();
+
+ DeclarationName Name(&II);
+ LookupResult Result(*this, Name, IILoc, LookupOrdinaryName);
+ LookupQualifiedName(Result, Ctx, SS);
+ unsigned DiagID = 0;
+ Decl *Referenced = nullptr;
+ switch (Result.getResultKind()) {
+ case LookupResult::NotFound: {
+ // If we're looking up 'type' within a template named 'enable_if', produce
+ // a more specific diagnostic.
+ SourceRange CondRange;
+ if (isEnableIf(QualifierLoc, II, CondRange)) {
+ Diag(CondRange.getBegin(), diag::err_typename_nested_not_found_enable_if)
+ << Ctx << CondRange;
+ return QualType();
+ }
+
+ DiagID = diag::err_typename_nested_not_found;
+ break;
+ }
+
+ case LookupResult::FoundUnresolvedValue: {
+ // We found a using declaration that is a value. Most likely, the using
+ // declaration itself is meant to have the 'typename' keyword.
+ SourceRange FullRange(KeywordLoc.isValid() ? KeywordLoc : SS.getBeginLoc(),
+ IILoc);
+ Diag(IILoc, diag::err_typename_refers_to_using_value_decl)
+ << Name << Ctx << FullRange;
+ if (UnresolvedUsingValueDecl *Using
+ = dyn_cast<UnresolvedUsingValueDecl>(Result.getRepresentativeDecl())){
+ SourceLocation Loc = Using->getQualifierLoc().getBeginLoc();
+ Diag(Loc, diag::note_using_value_decl_missing_typename)
+ << FixItHint::CreateInsertion(Loc, "typename ");
+ }
+ }
+ // Fall through to create a dependent typename type, from which we can recover
+ // better.
+
+ case LookupResult::NotFoundInCurrentInstantiation:
+ // Okay, it's a member of an unknown instantiation.
+ return Context.getDependentNameType(Keyword,
+ QualifierLoc.getNestedNameSpecifier(),
+ &II);
+
+ case LookupResult::Found:
+ if (TypeDecl *Type = dyn_cast<TypeDecl>(Result.getFoundDecl())) {
+ // We found a type. Build an ElaboratedType, since the
+ // typename-specifier was just sugar.
+ MarkAnyDeclReferenced(Type->getLocation(), Type, /*OdrUse=*/false);
+ return Context.getElaboratedType(ETK_Typename,
+ QualifierLoc.getNestedNameSpecifier(),
+ Context.getTypeDeclType(Type));
+ }
+
+ DiagID = diag::err_typename_nested_not_type;
+ Referenced = Result.getFoundDecl();
+ break;
+
+ case LookupResult::FoundOverloaded:
+ DiagID = diag::err_typename_nested_not_type;
+ Referenced = *Result.begin();
+ break;
+
+ case LookupResult::Ambiguous:
+ return QualType();
+ }
+
+ // If we get here, it's because name lookup did not find a
+ // type. Emit an appropriate diagnostic and return an error.
+ SourceRange FullRange(KeywordLoc.isValid() ? KeywordLoc : SS.getBeginLoc(),
+ IILoc);
+ Diag(IILoc, DiagID) << FullRange << Name << Ctx;
+ if (Referenced)
+ Diag(Referenced->getLocation(), diag::note_typename_refers_here)
+ << Name;
+ return QualType();
+}
+
+namespace {
+ // See Sema::RebuildTypeInCurrentInstantiation
+ class CurrentInstantiationRebuilder
+ : public TreeTransform<CurrentInstantiationRebuilder> {
+ SourceLocation Loc;
+ DeclarationName Entity;
+
+ public:
+ typedef TreeTransform<CurrentInstantiationRebuilder> inherited;
+
+ CurrentInstantiationRebuilder(Sema &SemaRef,
+ SourceLocation Loc,
+ DeclarationName Entity)
+ : TreeTransform<CurrentInstantiationRebuilder>(SemaRef),
+ Loc(Loc), Entity(Entity) { }
+
+ /// \brief Determine whether the given type \p T has already been
+ /// transformed.
+ ///
+ /// For the purposes of type reconstruction, a type has already been
+ /// transformed if it is NULL or if it is not dependent.
+ bool AlreadyTransformed(QualType T) {
+ return T.isNull() || !T->isDependentType();
+ }
+
+ /// \brief Returns the location of the entity whose type is being
+ /// rebuilt.
+ SourceLocation getBaseLocation() { return Loc; }
+
+ /// \brief Returns the name of the entity whose type is being rebuilt.
+ DeclarationName getBaseEntity() { return Entity; }
+
+ /// \brief Sets the "base" location and entity when that
+ /// information is known based on another transformation.
+ void setBase(SourceLocation Loc, DeclarationName Entity) {
+ this->Loc = Loc;
+ this->Entity = Entity;
+ }
+
+ ExprResult TransformLambdaExpr(LambdaExpr *E) {
+ // Lambdas never need to be transformed.
+ return E;
+ }
+ };
+}
+
+/// \brief Rebuilds a type within the context of the current instantiation.
+///
+/// The type \p T is part of the type of an out-of-line member definition of
+/// a class template (or class template partial specialization) that was parsed
+/// and constructed before we entered the scope of the class template (or
+/// partial specialization thereof). This routine will rebuild that type now
+/// that we have entered the declarator's scope, which may produce different
+/// canonical types, e.g.,
+///
+/// \code
+/// template<typename T>
+/// struct X {
+/// typedef T* pointer;
+/// pointer data();
+/// };
+///
+/// template<typename T>
+/// typename X<T>::pointer X<T>::data() { ... }
+/// \endcode
+///
+/// Here, the type "typename X<T>::pointer" will be created as a DependentNameType,
+/// since we do not know that we can look into X<T> when we parsed the type.
+/// This function will rebuild the type, performing the lookup of "pointer"
+/// in X<T> and returning an ElaboratedType whose canonical type is the same
+/// as the canonical type of T*, allowing the return types of the out-of-line
+/// definition and the declaration to match.
+TypeSourceInfo *Sema::RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
+ SourceLocation Loc,
+ DeclarationName Name) {
+ if (!T || !T->getType()->isDependentType())
+ return T;
+
+ CurrentInstantiationRebuilder Rebuilder(*this, Loc, Name);
+ return Rebuilder.TransformType(T);
+}
+
+ExprResult Sema::RebuildExprInCurrentInstantiation(Expr *E) {
+ CurrentInstantiationRebuilder Rebuilder(*this, E->getExprLoc(),
+ DeclarationName());
+ return Rebuilder.TransformExpr(E);
+}
+
+bool Sema::RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS) {
+ if (SS.isInvalid())
+ return true;
+
+ NestedNameSpecifierLoc QualifierLoc = SS.getWithLocInContext(Context);
+ CurrentInstantiationRebuilder Rebuilder(*this, SS.getRange().getBegin(),
+ DeclarationName());
+ NestedNameSpecifierLoc Rebuilt
+ = Rebuilder.TransformNestedNameSpecifierLoc(QualifierLoc);
+ if (!Rebuilt)
+ return true;
+
+ SS.Adopt(Rebuilt);
+ return false;
+}
+
+/// \brief Rebuild the template parameters now that we know we're in a current
+/// instantiation.
+bool Sema::RebuildTemplateParamsInCurrentInstantiation(
+ TemplateParameterList *Params) {
+ for (unsigned I = 0, N = Params->size(); I != N; ++I) {
+ Decl *Param = Params->getParam(I);
+
+ // There is nothing to rebuild in a type parameter.
+ if (isa<TemplateTypeParmDecl>(Param))
+ continue;
+
+ // Rebuild the template parameter list of a template template parameter.
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(Param)) {
+ if (RebuildTemplateParamsInCurrentInstantiation(
+ TTP->getTemplateParameters()))
+ return true;
+
+ continue;
+ }
+
+ // Rebuild the type of a non-type template parameter.
+ NonTypeTemplateParmDecl *NTTP = cast<NonTypeTemplateParmDecl>(Param);
+ TypeSourceInfo *NewTSI
+ = RebuildTypeInCurrentInstantiation(NTTP->getTypeSourceInfo(),
+ NTTP->getLocation(),
+ NTTP->getDeclName());
+ if (!NewTSI)
+ return true;
+
+ if (NewTSI != NTTP->getTypeSourceInfo()) {
+ NTTP->setTypeSourceInfo(NewTSI);
+ NTTP->setType(NewTSI->getType());
+ }
+ }
+
+ return false;
+}
+
+/// \brief Produces a formatted string that describes the binding of
+/// template parameters to template arguments.
+std::string
+Sema::getTemplateArgumentBindingsText(const TemplateParameterList *Params,
+ const TemplateArgumentList &Args) {
+ return getTemplateArgumentBindingsText(Params, Args.data(), Args.size());
+}
+
+std::string
+Sema::getTemplateArgumentBindingsText(const TemplateParameterList *Params,
+ const TemplateArgument *Args,
+ unsigned NumArgs) {
+ SmallString<128> Str;
+ llvm::raw_svector_ostream Out(Str);
+
+ if (!Params || Params->size() == 0 || NumArgs == 0)
+ return std::string();
+
+ for (unsigned I = 0, N = Params->size(); I != N; ++I) {
+ if (I >= NumArgs)
+ break;
+
+ if (I == 0)
+ Out << "[with ";
+ else
+ Out << ", ";
+
+ if (const IdentifierInfo *Id = Params->getParam(I)->getIdentifier()) {
+ Out << Id->getName();
+ } else {
+ Out << '$' << I;
+ }
+
+ Out << " = ";
+ Args[I].print(getPrintingPolicy(), Out);
+ }
+
+ Out << ']';
+ return Out.str();
+}
+
+void Sema::MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
+ CachedTokens &Toks) {
+ if (!FD)
+ return;
+
+ LateParsedTemplate *LPT = new LateParsedTemplate;
+
+ // Take tokens to avoid allocations
+ LPT->Toks.swap(Toks);
+ LPT->D = FnD;
+ LateParsedTemplateMap.insert(std::make_pair(FD, LPT));
+
+ FD->setLateTemplateParsed(true);
+}
+
+void Sema::UnmarkAsLateParsedTemplate(FunctionDecl *FD) {
+ if (!FD)
+ return;
+ FD->setLateTemplateParsed(false);
+}
+
+bool Sema::IsInsideALocalClassWithinATemplateFunction() {
+ DeclContext *DC = CurContext;
+
+ while (DC) {
+ if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(CurContext)) {
+ const FunctionDecl *FD = RD->isLocalClass();
+ return (FD && FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate);
+ } else if (DC->isTranslationUnit() || DC->isNamespace())
+ return false;
+
+ DC = DC->getParent();
+ }
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp
new file mode 100644
index 0000000..cd54920
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp
@@ -0,0 +1,5074 @@
+//===------- SemaTemplateDeduction.cpp - Template Argument Deduction ------===/
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===/
+//
+// This file implements C++ template argument deduction.
+//
+//===----------------------------------------------------------------------===/
+
+#include "clang/Sema/TemplateDeduction.h"
+#include "TreeTransform.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTLambda.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Sema/Template.h"
+#include "llvm/ADT/SmallBitVector.h"
+#include <algorithm>
+
+namespace clang {
+ using namespace sema;
+ /// \brief Various flags that control template argument deduction.
+ ///
+ /// These flags can be bitwise-OR'd together.
+ enum TemplateDeductionFlags {
+ /// \brief No template argument deduction flags, which indicates the
+ /// strictest results for template argument deduction (as used for, e.g.,
+ /// matching class template partial specializations).
+ TDF_None = 0,
+ /// \brief Within template argument deduction from a function call, we are
+ /// matching with a parameter type for which the original parameter was
+ /// a reference.
+ TDF_ParamWithReferenceType = 0x1,
+ /// \brief Within template argument deduction from a function call, we
+ /// are matching in a case where we ignore cv-qualifiers.
+ TDF_IgnoreQualifiers = 0x02,
+ /// \brief Within template argument deduction from a function call,
+ /// we are matching in a case where we can perform template argument
+ /// deduction from a template-id of a derived class of the argument type.
+ TDF_DerivedClass = 0x04,
+ /// \brief Allow non-dependent types to differ, e.g., when performing
+ /// template argument deduction from a function call where conversions
+ /// may apply.
+ TDF_SkipNonDependent = 0x08,
+ /// \brief Whether we are performing template argument deduction for
+ /// parameters and arguments in a top-level template argument
+ TDF_TopLevelParameterTypeList = 0x10,
+ /// \brief Within template argument deduction from overload resolution per
+ /// C++ [over.over] allow matching function types that are compatible in
+ /// terms of noreturn and default calling convention adjustments.
+ TDF_InOverloadResolution = 0x20
+ };
+}
+
+using namespace clang;
+
+/// \brief Compare two APSInts, extending and switching the sign as
+/// necessary to compare their values regardless of underlying type.
+static bool hasSameExtendedValue(llvm::APSInt X, llvm::APSInt Y) {
+ if (Y.getBitWidth() > X.getBitWidth())
+ X = X.extend(Y.getBitWidth());
+ else if (Y.getBitWidth() < X.getBitWidth())
+ Y = Y.extend(X.getBitWidth());
+
+ // If there is a signedness mismatch, correct it.
+ if (X.isSigned() != Y.isSigned()) {
+ // If the signed value is negative, then the values cannot be the same.
+ if ((Y.isSigned() && Y.isNegative()) || (X.isSigned() && X.isNegative()))
+ return false;
+
+ Y.setIsSigned(true);
+ X.setIsSigned(true);
+ }
+
+ return X == Y;
+}
+
+static Sema::TemplateDeductionResult
+DeduceTemplateArguments(Sema &S,
+ TemplateParameterList *TemplateParams,
+ const TemplateArgument &Param,
+ TemplateArgument Arg,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced);
+
+static Sema::TemplateDeductionResult
+DeduceTemplateArgumentsByTypeMatch(Sema &S,
+ TemplateParameterList *TemplateParams,
+ QualType Param,
+ QualType Arg,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &
+ Deduced,
+ unsigned TDF,
+ bool PartialOrdering = false);
+
+static Sema::TemplateDeductionResult
+DeduceTemplateArguments(Sema &S,
+ TemplateParameterList *TemplateParams,
+ const TemplateArgument *Params, unsigned NumParams,
+ const TemplateArgument *Args, unsigned NumArgs,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced);
+
+/// \brief If the given expression is of a form that permits the deduction
+/// of a non-type template parameter, return the declaration of that
+/// non-type template parameter.
+static NonTypeTemplateParmDecl *getDeducedParameterFromExpr(Expr *E) {
+ // If we are within an alias template, the expression may have undergone
+ // any number of parameter substitutions already.
+ while (1) {
+ if (ImplicitCastExpr *IC = dyn_cast<ImplicitCastExpr>(E))
+ E = IC->getSubExpr();
+ else if (SubstNonTypeTemplateParmExpr *Subst =
+ dyn_cast<SubstNonTypeTemplateParmExpr>(E))
+ E = Subst->getReplacement();
+ else
+ break;
+ }
+
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ return dyn_cast<NonTypeTemplateParmDecl>(DRE->getDecl());
+
+ return nullptr;
+}
+
+/// \brief Determine whether two declaration pointers refer to the same
+/// declaration.
+static bool isSameDeclaration(Decl *X, Decl *Y) {
+ if (NamedDecl *NX = dyn_cast<NamedDecl>(X))
+ X = NX->getUnderlyingDecl();
+ if (NamedDecl *NY = dyn_cast<NamedDecl>(Y))
+ Y = NY->getUnderlyingDecl();
+
+ return X->getCanonicalDecl() == Y->getCanonicalDecl();
+}
+
+/// \brief Verify that the given, deduced template arguments are compatible.
+///
+/// \returns The deduced template argument, or a NULL template argument if
+/// the deduced template arguments were incompatible.
+static DeducedTemplateArgument
+checkDeducedTemplateArguments(ASTContext &Context,
+ const DeducedTemplateArgument &X,
+ const DeducedTemplateArgument &Y) {
+ // We have no deduction for one or both of the arguments; they're compatible.
+ if (X.isNull())
+ return Y;
+ if (Y.isNull())
+ return X;
+
+ switch (X.getKind()) {
+ case TemplateArgument::Null:
+ llvm_unreachable("Non-deduced template arguments handled above");
+
+ case TemplateArgument::Type:
+ // If two template type arguments have the same type, they're compatible.
+ if (Y.getKind() == TemplateArgument::Type &&
+ Context.hasSameType(X.getAsType(), Y.getAsType()))
+ return X;
+
+ return DeducedTemplateArgument();
+
+ case TemplateArgument::Integral:
+ // If we deduced a constant in one case and either a dependent expression or
+ // declaration in another case, keep the integral constant.
+ // If both are integral constants with the same value, keep that value.
+ if (Y.getKind() == TemplateArgument::Expression ||
+ Y.getKind() == TemplateArgument::Declaration ||
+ (Y.getKind() == TemplateArgument::Integral &&
+ hasSameExtendedValue(X.getAsIntegral(), Y.getAsIntegral())))
+ return DeducedTemplateArgument(X,
+ X.wasDeducedFromArrayBound() &&
+ Y.wasDeducedFromArrayBound());
+
+ // All other combinations are incompatible.
+ return DeducedTemplateArgument();
+
+ case TemplateArgument::Template:
+ if (Y.getKind() == TemplateArgument::Template &&
+ Context.hasSameTemplateName(X.getAsTemplate(), Y.getAsTemplate()))
+ return X;
+
+ // All other combinations are incompatible.
+ return DeducedTemplateArgument();
+
+ case TemplateArgument::TemplateExpansion:
+ if (Y.getKind() == TemplateArgument::TemplateExpansion &&
+ Context.hasSameTemplateName(X.getAsTemplateOrTemplatePattern(),
+ Y.getAsTemplateOrTemplatePattern()))
+ return X;
+
+ // All other combinations are incompatible.
+ return DeducedTemplateArgument();
+
+ case TemplateArgument::Expression:
+ // If we deduced a dependent expression in one case and either an integral
+ // constant or a declaration in another case, keep the integral constant
+ // or declaration.
+ if (Y.getKind() == TemplateArgument::Integral ||
+ Y.getKind() == TemplateArgument::Declaration)
+ return DeducedTemplateArgument(Y, X.wasDeducedFromArrayBound() &&
+ Y.wasDeducedFromArrayBound());
+
+ if (Y.getKind() == TemplateArgument::Expression) {
+ // Compare the expressions for equality
+ llvm::FoldingSetNodeID ID1, ID2;
+ X.getAsExpr()->Profile(ID1, Context, true);
+ Y.getAsExpr()->Profile(ID2, Context, true);
+ if (ID1 == ID2)
+ return X;
+ }
+
+ // All other combinations are incompatible.
+ return DeducedTemplateArgument();
+
+ case TemplateArgument::Declaration:
+ // If we deduced a declaration and a dependent expression, keep the
+ // declaration.
+ if (Y.getKind() == TemplateArgument::Expression)
+ return X;
+
+ // If we deduced a declaration and an integral constant, keep the
+ // integral constant.
+ if (Y.getKind() == TemplateArgument::Integral)
+ return Y;
+
+ // If we deduced two declarations, make sure they they refer to the
+ // same declaration.
+ if (Y.getKind() == TemplateArgument::Declaration &&
+ isSameDeclaration(X.getAsDecl(), Y.getAsDecl()))
+ return X;
+
+ // All other combinations are incompatible.
+ return DeducedTemplateArgument();
+
+ case TemplateArgument::NullPtr:
+ // If we deduced a null pointer and a dependent expression, keep the
+ // null pointer.
+ if (Y.getKind() == TemplateArgument::Expression)
+ return X;
+
+ // If we deduced a null pointer and an integral constant, keep the
+ // integral constant.
+ if (Y.getKind() == TemplateArgument::Integral)
+ return Y;
+
+ // If we deduced two null pointers, make sure they have the same type.
+ if (Y.getKind() == TemplateArgument::NullPtr &&
+ Context.hasSameType(X.getNullPtrType(), Y.getNullPtrType()))
+ return X;
+
+ // All other combinations are incompatible.
+ return DeducedTemplateArgument();
+
+ case TemplateArgument::Pack:
+ if (Y.getKind() != TemplateArgument::Pack ||
+ X.pack_size() != Y.pack_size())
+ return DeducedTemplateArgument();
+
+ for (TemplateArgument::pack_iterator XA = X.pack_begin(),
+ XAEnd = X.pack_end(),
+ YA = Y.pack_begin();
+ XA != XAEnd; ++XA, ++YA) {
+ // FIXME: Do we need to merge the results together here?
+ if (checkDeducedTemplateArguments(Context,
+ DeducedTemplateArgument(*XA, X.wasDeducedFromArrayBound()),
+ DeducedTemplateArgument(*YA, Y.wasDeducedFromArrayBound()))
+ .isNull())
+ return DeducedTemplateArgument();
+ }
+
+ return X;
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+/// \brief Deduce the value of the given non-type template parameter
+/// from the given constant.
+static Sema::TemplateDeductionResult
+DeduceNonTypeTemplateArgument(Sema &S,
+ NonTypeTemplateParmDecl *NTTP,
+ llvm::APSInt Value, QualType ValueType,
+ bool DeducedFromArrayBound,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
+ assert(NTTP->getDepth() == 0 &&
+ "Cannot deduce non-type template argument with depth > 0");
+
+ DeducedTemplateArgument NewDeduced(S.Context, Value, ValueType,
+ DeducedFromArrayBound);
+ DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context,
+ Deduced[NTTP->getIndex()],
+ NewDeduced);
+ if (Result.isNull()) {
+ Info.Param = NTTP;
+ Info.FirstArg = Deduced[NTTP->getIndex()];
+ Info.SecondArg = NewDeduced;
+ return Sema::TDK_Inconsistent;
+ }
+
+ Deduced[NTTP->getIndex()] = Result;
+ return Sema::TDK_Success;
+}
+
+/// \brief Deduce the value of the given non-type template parameter
+/// from the given type- or value-dependent expression.
+///
+/// \returns true if deduction succeeded, false otherwise.
+static Sema::TemplateDeductionResult
+DeduceNonTypeTemplateArgument(Sema &S,
+ NonTypeTemplateParmDecl *NTTP,
+ Expr *Value,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
+ assert(NTTP->getDepth() == 0 &&
+ "Cannot deduce non-type template argument with depth > 0");
+ assert((Value->isTypeDependent() || Value->isValueDependent()) &&
+ "Expression template argument must be type- or value-dependent.");
+
+ DeducedTemplateArgument NewDeduced(Value);
+ DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context,
+ Deduced[NTTP->getIndex()],
+ NewDeduced);
+
+ if (Result.isNull()) {
+ Info.Param = NTTP;
+ Info.FirstArg = Deduced[NTTP->getIndex()];
+ Info.SecondArg = NewDeduced;
+ return Sema::TDK_Inconsistent;
+ }
+
+ Deduced[NTTP->getIndex()] = Result;
+ return Sema::TDK_Success;
+}
+
+/// \brief Deduce the value of the given non-type template parameter
+/// from the given declaration.
+///
+/// \returns true if deduction succeeded, false otherwise.
+static Sema::TemplateDeductionResult
+DeduceNonTypeTemplateArgument(Sema &S,
+ NonTypeTemplateParmDecl *NTTP,
+ ValueDecl *D,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
+ assert(NTTP->getDepth() == 0 &&
+ "Cannot deduce non-type template argument with depth > 0");
+
+ D = D ? cast<ValueDecl>(D->getCanonicalDecl()) : nullptr;
+ TemplateArgument New(D, NTTP->getType());
+ DeducedTemplateArgument NewDeduced(New);
+ DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context,
+ Deduced[NTTP->getIndex()],
+ NewDeduced);
+ if (Result.isNull()) {
+ Info.Param = NTTP;
+ Info.FirstArg = Deduced[NTTP->getIndex()];
+ Info.SecondArg = NewDeduced;
+ return Sema::TDK_Inconsistent;
+ }
+
+ Deduced[NTTP->getIndex()] = Result;
+ return Sema::TDK_Success;
+}
+
+static Sema::TemplateDeductionResult
+DeduceTemplateArguments(Sema &S,
+ TemplateParameterList *TemplateParams,
+ TemplateName Param,
+ TemplateName Arg,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
+ TemplateDecl *ParamDecl = Param.getAsTemplateDecl();
+ if (!ParamDecl) {
+ // The parameter type is dependent and is not a template template parameter,
+ // so there is nothing that we can deduce.
+ return Sema::TDK_Success;
+ }
+
+ if (TemplateTemplateParmDecl *TempParam
+ = dyn_cast<TemplateTemplateParmDecl>(ParamDecl)) {
+ DeducedTemplateArgument NewDeduced(S.Context.getCanonicalTemplateName(Arg));
+ DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context,
+ Deduced[TempParam->getIndex()],
+ NewDeduced);
+ if (Result.isNull()) {
+ Info.Param = TempParam;
+ Info.FirstArg = Deduced[TempParam->getIndex()];
+ Info.SecondArg = NewDeduced;
+ return Sema::TDK_Inconsistent;
+ }
+
+ Deduced[TempParam->getIndex()] = Result;
+ return Sema::TDK_Success;
+ }
+
+ // Verify that the two template names are equivalent.
+ if (S.Context.hasSameTemplateName(Param, Arg))
+ return Sema::TDK_Success;
+
+ // Mismatch of non-dependent template parameter to argument.
+ Info.FirstArg = TemplateArgument(Param);
+ Info.SecondArg = TemplateArgument(Arg);
+ return Sema::TDK_NonDeducedMismatch;
+}
+
+/// \brief Deduce the template arguments by comparing the template parameter
+/// type (which is a template-id) with the template argument type.
+///
+/// \param S the Sema
+///
+/// \param TemplateParams the template parameters that we are deducing
+///
+/// \param Param the parameter type
+///
+/// \param Arg the argument type
+///
+/// \param Info information about the template argument deduction itself
+///
+/// \param Deduced the deduced template arguments
+///
+/// \returns the result of template argument deduction so far. Note that a
+/// "success" result means that template argument deduction has not yet failed,
+/// but it may still fail, later, for other reasons.
+static Sema::TemplateDeductionResult
+DeduceTemplateArguments(Sema &S,
+ TemplateParameterList *TemplateParams,
+ const TemplateSpecializationType *Param,
+ QualType Arg,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
+ assert(Arg.isCanonical() && "Argument type must be canonical");
+
+ // Check whether the template argument is a dependent template-id.
+ if (const TemplateSpecializationType *SpecArg
+ = dyn_cast<TemplateSpecializationType>(Arg)) {
+ // Perform template argument deduction for the template name.
+ if (Sema::TemplateDeductionResult Result
+ = DeduceTemplateArguments(S, TemplateParams,
+ Param->getTemplateName(),
+ SpecArg->getTemplateName(),
+ Info, Deduced))
+ return Result;
+
+
+ // Perform template argument deduction on each template
+ // argument. Ignore any missing/extra arguments, since they could be
+ // filled in by default arguments.
+ return DeduceTemplateArguments(S, TemplateParams,
+ Param->getArgs(), Param->getNumArgs(),
+ SpecArg->getArgs(), SpecArg->getNumArgs(),
+ Info, Deduced);
+ }
+
+ // If the argument type is a class template specialization, we
+ // perform template argument deduction using its template
+ // arguments.
+ const RecordType *RecordArg = dyn_cast<RecordType>(Arg);
+ if (!RecordArg) {
+ Info.FirstArg = TemplateArgument(QualType(Param, 0));
+ Info.SecondArg = TemplateArgument(Arg);
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
+ ClassTemplateSpecializationDecl *SpecArg
+ = dyn_cast<ClassTemplateSpecializationDecl>(RecordArg->getDecl());
+ if (!SpecArg) {
+ Info.FirstArg = TemplateArgument(QualType(Param, 0));
+ Info.SecondArg = TemplateArgument(Arg);
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
+ // Perform template argument deduction for the template name.
+ if (Sema::TemplateDeductionResult Result
+ = DeduceTemplateArguments(S,
+ TemplateParams,
+ Param->getTemplateName(),
+ TemplateName(SpecArg->getSpecializedTemplate()),
+ Info, Deduced))
+ return Result;
+
+ // Perform template argument deduction for the template arguments.
+ return DeduceTemplateArguments(S, TemplateParams,
+ Param->getArgs(), Param->getNumArgs(),
+ SpecArg->getTemplateArgs().data(),
+ SpecArg->getTemplateArgs().size(),
+ Info, Deduced);
+}
+
+/// \brief Determines whether the given type is an opaque type that
+/// might be more qualified when instantiated.
+static bool IsPossiblyOpaquelyQualifiedType(QualType T) {
+ switch (T->getTypeClass()) {
+ case Type::TypeOfExpr:
+ case Type::TypeOf:
+ case Type::DependentName:
+ case Type::Decltype:
+ case Type::UnresolvedUsing:
+ case Type::TemplateTypeParm:
+ return true;
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ case Type::DependentSizedArray:
+ return IsPossiblyOpaquelyQualifiedType(
+ cast<ArrayType>(T)->getElementType());
+
+ default:
+ return false;
+ }
+}
+
+/// \brief Retrieve the depth and index of a template parameter.
+static std::pair<unsigned, unsigned>
+getDepthAndIndex(NamedDecl *ND) {
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(ND))
+ return std::make_pair(TTP->getDepth(), TTP->getIndex());
+
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(ND))
+ return std::make_pair(NTTP->getDepth(), NTTP->getIndex());
+
+ TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(ND);
+ return std::make_pair(TTP->getDepth(), TTP->getIndex());
+}
+
+/// \brief Retrieve the depth and index of an unexpanded parameter pack.
+static std::pair<unsigned, unsigned>
+getDepthAndIndex(UnexpandedParameterPack UPP) {
+ if (const TemplateTypeParmType *TTP
+ = UPP.first.dyn_cast<const TemplateTypeParmType *>())
+ return std::make_pair(TTP->getDepth(), TTP->getIndex());
+
+ return getDepthAndIndex(UPP.first.get<NamedDecl *>());
+}
+
+/// \brief Helper function to build a TemplateParameter when we don't
+/// know its type statically.
+static TemplateParameter makeTemplateParameter(Decl *D) {
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(D))
+ return TemplateParameter(TTP);
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(D))
+ return TemplateParameter(NTTP);
+
+ return TemplateParameter(cast<TemplateTemplateParmDecl>(D));
+}
+
+/// A pack that we're currently deducing.
+struct clang::DeducedPack {
+ DeducedPack(unsigned Index) : Index(Index), Outer(nullptr) {}
+
+ // The index of the pack.
+ unsigned Index;
+
+ // The old value of the pack before we started deducing it.
+ DeducedTemplateArgument Saved;
+
+ // A deferred value of this pack from an inner deduction, that couldn't be
+ // deduced because this deduction hadn't happened yet.
+ DeducedTemplateArgument DeferredDeduction;
+
+ // The new value of the pack.
+ SmallVector<DeducedTemplateArgument, 4> New;
+
+ // The outer deduction for this pack, if any.
+ DeducedPack *Outer;
+};
+
+namespace {
+/// A scope in which we're performing pack deduction.
+class PackDeductionScope {
+public:
+ PackDeductionScope(Sema &S, TemplateParameterList *TemplateParams,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ TemplateDeductionInfo &Info, TemplateArgument Pattern)
+ : S(S), TemplateParams(TemplateParams), Deduced(Deduced), Info(Info) {
+ // Compute the set of template parameter indices that correspond to
+ // parameter packs expanded by the pack expansion.
+ {
+ llvm::SmallBitVector SawIndices(TemplateParams->size());
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ S.collectUnexpandedParameterPacks(Pattern, Unexpanded);
+ for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
+ unsigned Depth, Index;
+ std::tie(Depth, Index) = getDepthAndIndex(Unexpanded[I]);
+ if (Depth == 0 && !SawIndices[Index]) {
+ SawIndices[Index] = true;
+
+ // Save the deduced template argument for the parameter pack expanded
+ // by this pack expansion, then clear out the deduction.
+ DeducedPack Pack(Index);
+ Pack.Saved = Deduced[Index];
+ Deduced[Index] = TemplateArgument();
+
+ Packs.push_back(Pack);
+ }
+ }
+ }
+ assert(!Packs.empty() && "Pack expansion without unexpanded packs?");
+
+ for (auto &Pack : Packs) {
+ if (Info.PendingDeducedPacks.size() > Pack.Index)
+ Pack.Outer = Info.PendingDeducedPacks[Pack.Index];
+ else
+ Info.PendingDeducedPacks.resize(Pack.Index + 1);
+ Info.PendingDeducedPacks[Pack.Index] = &Pack;
+
+ if (S.CurrentInstantiationScope) {
+ // If the template argument pack was explicitly specified, add that to
+ // the set of deduced arguments.
+ const TemplateArgument *ExplicitArgs;
+ unsigned NumExplicitArgs;
+ NamedDecl *PartiallySubstitutedPack =
+ S.CurrentInstantiationScope->getPartiallySubstitutedPack(
+ &ExplicitArgs, &NumExplicitArgs);
+ if (PartiallySubstitutedPack &&
+ getDepthAndIndex(PartiallySubstitutedPack).second == Pack.Index)
+ Pack.New.append(ExplicitArgs, ExplicitArgs + NumExplicitArgs);
+ }
+ }
+ }
+
+ ~PackDeductionScope() {
+ for (auto &Pack : Packs)
+ Info.PendingDeducedPacks[Pack.Index] = Pack.Outer;
+ }
+
+ /// Move to deducing the next element in each pack that is being deduced.
+ void nextPackElement() {
+ // Capture the deduced template arguments for each parameter pack expanded
+ // by this pack expansion, add them to the list of arguments we've deduced
+ // for that pack, then clear out the deduced argument.
+ for (auto &Pack : Packs) {
+ DeducedTemplateArgument &DeducedArg = Deduced[Pack.Index];
+ if (!DeducedArg.isNull()) {
+ Pack.New.push_back(DeducedArg);
+ DeducedArg = DeducedTemplateArgument();
+ }
+ }
+ }
+
+ /// \brief Finish template argument deduction for a set of argument packs,
+ /// producing the argument packs and checking for consistency with prior
+ /// deductions.
+ Sema::TemplateDeductionResult finish(bool HasAnyArguments) {
+ // Build argument packs for each of the parameter packs expanded by this
+ // pack expansion.
+ for (auto &Pack : Packs) {
+ // Put back the old value for this pack.
+ Deduced[Pack.Index] = Pack.Saved;
+
+ // Build or find a new value for this pack.
+ DeducedTemplateArgument NewPack;
+ if (HasAnyArguments && Pack.New.empty()) {
+ if (Pack.DeferredDeduction.isNull()) {
+ // We were not able to deduce anything for this parameter pack
+ // (because it only appeared in non-deduced contexts), so just
+ // restore the saved argument pack.
+ continue;
+ }
+
+ NewPack = Pack.DeferredDeduction;
+ Pack.DeferredDeduction = TemplateArgument();
+ } else if (Pack.New.empty()) {
+ // If we deduced an empty argument pack, create it now.
+ NewPack = DeducedTemplateArgument(TemplateArgument::getEmptyPack());
+ } else {
+ TemplateArgument *ArgumentPack =
+ new (S.Context) TemplateArgument[Pack.New.size()];
+ std::copy(Pack.New.begin(), Pack.New.end(), ArgumentPack);
+ NewPack = DeducedTemplateArgument(
+ TemplateArgument(llvm::makeArrayRef(ArgumentPack, Pack.New.size())),
+ Pack.New[0].wasDeducedFromArrayBound());
+ }
+
+ // Pick where we're going to put the merged pack.
+ DeducedTemplateArgument *Loc;
+ if (Pack.Outer) {
+ if (Pack.Outer->DeferredDeduction.isNull()) {
+ // Defer checking this pack until we have a complete pack to compare
+ // it against.
+ Pack.Outer->DeferredDeduction = NewPack;
+ continue;
+ }
+ Loc = &Pack.Outer->DeferredDeduction;
+ } else {
+ Loc = &Deduced[Pack.Index];
+ }
+
+ // Check the new pack matches any previous value.
+ DeducedTemplateArgument OldPack = *Loc;
+ DeducedTemplateArgument Result =
+ checkDeducedTemplateArguments(S.Context, OldPack, NewPack);
+
+ // If we deferred a deduction of this pack, check that one now too.
+ if (!Result.isNull() && !Pack.DeferredDeduction.isNull()) {
+ OldPack = Result;
+ NewPack = Pack.DeferredDeduction;
+ Result = checkDeducedTemplateArguments(S.Context, OldPack, NewPack);
+ }
+
+ if (Result.isNull()) {
+ Info.Param =
+ makeTemplateParameter(TemplateParams->getParam(Pack.Index));
+ Info.FirstArg = OldPack;
+ Info.SecondArg = NewPack;
+ return Sema::TDK_Inconsistent;
+ }
+
+ *Loc = Result;
+ }
+
+ return Sema::TDK_Success;
+ }
+
+private:
+ Sema &S;
+ TemplateParameterList *TemplateParams;
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced;
+ TemplateDeductionInfo &Info;
+
+ SmallVector<DeducedPack, 2> Packs;
+};
+} // namespace
+
+/// \brief Deduce the template arguments by comparing the list of parameter
+/// types to the list of argument types, as in the parameter-type-lists of
+/// function types (C++ [temp.deduct.type]p10).
+///
+/// \param S The semantic analysis object within which we are deducing
+///
+/// \param TemplateParams The template parameters that we are deducing
+///
+/// \param Params The list of parameter types
+///
+/// \param NumParams The number of types in \c Params
+///
+/// \param Args The list of argument types
+///
+/// \param NumArgs The number of types in \c Args
+///
+/// \param Info information about the template argument deduction itself
+///
+/// \param Deduced the deduced template arguments
+///
+/// \param TDF bitwise OR of the TemplateDeductionFlags bits that describe
+/// how template argument deduction is performed.
+///
+/// \param PartialOrdering If true, we are performing template argument
+/// deduction for during partial ordering for a call
+/// (C++0x [temp.deduct.partial]).
+///
+/// \returns the result of template argument deduction so far. Note that a
+/// "success" result means that template argument deduction has not yet failed,
+/// but it may still fail, later, for other reasons.
+static Sema::TemplateDeductionResult
+DeduceTemplateArguments(Sema &S,
+ TemplateParameterList *TemplateParams,
+ const QualType *Params, unsigned NumParams,
+ const QualType *Args, unsigned NumArgs,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ unsigned TDF,
+ bool PartialOrdering = false) {
+ // Fast-path check to see if we have too many/too few arguments.
+ if (NumParams != NumArgs &&
+ !(NumParams && isa<PackExpansionType>(Params[NumParams - 1])) &&
+ !(NumArgs && isa<PackExpansionType>(Args[NumArgs - 1])))
+ return Sema::TDK_MiscellaneousDeductionFailure;
+
+ // C++0x [temp.deduct.type]p10:
+ // Similarly, if P has a form that contains (T), then each parameter type
+ // Pi of the respective parameter-type- list of P is compared with the
+ // corresponding parameter type Ai of the corresponding parameter-type-list
+ // of A. [...]
+ unsigned ArgIdx = 0, ParamIdx = 0;
+ for (; ParamIdx != NumParams; ++ParamIdx) {
+ // Check argument types.
+ const PackExpansionType *Expansion
+ = dyn_cast<PackExpansionType>(Params[ParamIdx]);
+ if (!Expansion) {
+ // Simple case: compare the parameter and argument types at this point.
+
+ // Make sure we have an argument.
+ if (ArgIdx >= NumArgs)
+ return Sema::TDK_MiscellaneousDeductionFailure;
+
+ if (isa<PackExpansionType>(Args[ArgIdx])) {
+ // C++0x [temp.deduct.type]p22:
+ // If the original function parameter associated with A is a function
+ // parameter pack and the function parameter associated with P is not
+ // a function parameter pack, then template argument deduction fails.
+ return Sema::TDK_MiscellaneousDeductionFailure;
+ }
+
+ if (Sema::TemplateDeductionResult Result
+ = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ Params[ParamIdx], Args[ArgIdx],
+ Info, Deduced, TDF,
+ PartialOrdering))
+ return Result;
+
+ ++ArgIdx;
+ continue;
+ }
+
+ // C++0x [temp.deduct.type]p5:
+ // The non-deduced contexts are:
+ // - A function parameter pack that does not occur at the end of the
+ // parameter-declaration-clause.
+ if (ParamIdx + 1 < NumParams)
+ return Sema::TDK_Success;
+
+ // C++0x [temp.deduct.type]p10:
+ // If the parameter-declaration corresponding to Pi is a function
+ // parameter pack, then the type of its declarator- id is compared with
+ // each remaining parameter type in the parameter-type-list of A. Each
+ // comparison deduces template arguments for subsequent positions in the
+ // template parameter packs expanded by the function parameter pack.
+
+ QualType Pattern = Expansion->getPattern();
+ PackDeductionScope PackScope(S, TemplateParams, Deduced, Info, Pattern);
+
+ bool HasAnyArguments = false;
+ for (; ArgIdx < NumArgs; ++ArgIdx) {
+ HasAnyArguments = true;
+
+ // Deduce template arguments from the pattern.
+ if (Sema::TemplateDeductionResult Result
+ = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, Pattern,
+ Args[ArgIdx], Info, Deduced,
+ TDF, PartialOrdering))
+ return Result;
+
+ PackScope.nextPackElement();
+ }
+
+ // Build argument packs for each of the parameter packs expanded by this
+ // pack expansion.
+ if (auto Result = PackScope.finish(HasAnyArguments))
+ return Result;
+ }
+
+ // Make sure we don't have any extra arguments.
+ if (ArgIdx < NumArgs)
+ return Sema::TDK_MiscellaneousDeductionFailure;
+
+ return Sema::TDK_Success;
+}
+
+/// \brief Determine whether the parameter has qualifiers that are either
+/// inconsistent with or a superset of the argument's qualifiers.
+static bool hasInconsistentOrSupersetQualifiersOf(QualType ParamType,
+ QualType ArgType) {
+ Qualifiers ParamQs = ParamType.getQualifiers();
+ Qualifiers ArgQs = ArgType.getQualifiers();
+
+ if (ParamQs == ArgQs)
+ return false;
+
+ // Mismatched (but not missing) Objective-C GC attributes.
+ if (ParamQs.getObjCGCAttr() != ArgQs.getObjCGCAttr() &&
+ ParamQs.hasObjCGCAttr())
+ return true;
+
+ // Mismatched (but not missing) address spaces.
+ if (ParamQs.getAddressSpace() != ArgQs.getAddressSpace() &&
+ ParamQs.hasAddressSpace())
+ return true;
+
+ // Mismatched (but not missing) Objective-C lifetime qualifiers.
+ if (ParamQs.getObjCLifetime() != ArgQs.getObjCLifetime() &&
+ ParamQs.hasObjCLifetime())
+ return true;
+
+ // CVR qualifier superset.
+ return (ParamQs.getCVRQualifiers() != ArgQs.getCVRQualifiers()) &&
+ ((ParamQs.getCVRQualifiers() | ArgQs.getCVRQualifiers())
+ == ParamQs.getCVRQualifiers());
+}
+
+/// \brief Compare types for equality with respect to possibly compatible
+/// function types (noreturn adjustment, implicit calling conventions). If any
+/// of parameter and argument is not a function, just perform type comparison.
+///
+/// \param Param the template parameter type.
+///
+/// \param Arg the argument type.
+bool Sema::isSameOrCompatibleFunctionType(CanQualType Param,
+ CanQualType Arg) {
+ const FunctionType *ParamFunction = Param->getAs<FunctionType>(),
+ *ArgFunction = Arg->getAs<FunctionType>();
+
+ // Just compare if not functions.
+ if (!ParamFunction || !ArgFunction)
+ return Param == Arg;
+
+ // Noreturn adjustment.
+ QualType AdjustedParam;
+ if (IsNoReturnConversion(Param, Arg, AdjustedParam))
+ return Arg == Context.getCanonicalType(AdjustedParam);
+
+ // FIXME: Compatible calling conventions.
+
+ return Param == Arg;
+}
+
+/// \brief Deduce the template arguments by comparing the parameter type and
+/// the argument type (C++ [temp.deduct.type]).
+///
+/// \param S the semantic analysis object within which we are deducing
+///
+/// \param TemplateParams the template parameters that we are deducing
+///
+/// \param ParamIn the parameter type
+///
+/// \param ArgIn the argument type
+///
+/// \param Info information about the template argument deduction itself
+///
+/// \param Deduced the deduced template arguments
+///
+/// \param TDF bitwise OR of the TemplateDeductionFlags bits that describe
+/// how template argument deduction is performed.
+///
+/// \param PartialOrdering Whether we're performing template argument deduction
+/// in the context of partial ordering (C++0x [temp.deduct.partial]).
+///
+/// \returns the result of template argument deduction so far. Note that a
+/// "success" result means that template argument deduction has not yet failed,
+/// but it may still fail, later, for other reasons.
+static Sema::TemplateDeductionResult
+DeduceTemplateArgumentsByTypeMatch(Sema &S,
+ TemplateParameterList *TemplateParams,
+ QualType ParamIn, QualType ArgIn,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ unsigned TDF,
+ bool PartialOrdering) {
+ // We only want to look at the canonical types, since typedefs and
+ // sugar are not part of template argument deduction.
+ QualType Param = S.Context.getCanonicalType(ParamIn);
+ QualType Arg = S.Context.getCanonicalType(ArgIn);
+
+ // If the argument type is a pack expansion, look at its pattern.
+ // This isn't explicitly called out
+ if (const PackExpansionType *ArgExpansion
+ = dyn_cast<PackExpansionType>(Arg))
+ Arg = ArgExpansion->getPattern();
+
+ if (PartialOrdering) {
+ // C++11 [temp.deduct.partial]p5:
+ // Before the partial ordering is done, certain transformations are
+ // performed on the types used for partial ordering:
+ // - If P is a reference type, P is replaced by the type referred to.
+ const ReferenceType *ParamRef = Param->getAs<ReferenceType>();
+ if (ParamRef)
+ Param = ParamRef->getPointeeType();
+
+ // - If A is a reference type, A is replaced by the type referred to.
+ const ReferenceType *ArgRef = Arg->getAs<ReferenceType>();
+ if (ArgRef)
+ Arg = ArgRef->getPointeeType();
+
+ if (ParamRef && ArgRef && S.Context.hasSameUnqualifiedType(Param, Arg)) {
+ // C++11 [temp.deduct.partial]p9:
+ // If, for a given type, deduction succeeds in both directions (i.e.,
+ // the types are identical after the transformations above) and both
+ // P and A were reference types [...]:
+ // - if [one type] was an lvalue reference and [the other type] was
+ // not, [the other type] is not considered to be at least as
+ // specialized as [the first type]
+ // - if [one type] is more cv-qualified than [the other type],
+ // [the other type] is not considered to be at least as specialized
+ // as [the first type]
+ // Objective-C ARC adds:
+ // - [one type] has non-trivial lifetime, [the other type] has
+ // __unsafe_unretained lifetime, and the types are otherwise
+ // identical
+ //
+ // A is "considered to be at least as specialized" as P iff deduction
+ // succeeds, so we model this as a deduction failure. Note that
+ // [the first type] is P and [the other type] is A here; the standard
+ // gets this backwards.
+ Qualifiers ParamQuals = Param.getQualifiers();
+ Qualifiers ArgQuals = Arg.getQualifiers();
+ if ((ParamRef->isLValueReferenceType() &&
+ !ArgRef->isLValueReferenceType()) ||
+ ParamQuals.isStrictSupersetOf(ArgQuals) ||
+ (ParamQuals.hasNonTrivialObjCLifetime() &&
+ ArgQuals.getObjCLifetime() == Qualifiers::OCL_ExplicitNone &&
+ ParamQuals.withoutObjCLifetime() ==
+ ArgQuals.withoutObjCLifetime())) {
+ Info.FirstArg = TemplateArgument(ParamIn);
+ Info.SecondArg = TemplateArgument(ArgIn);
+ return Sema::TDK_NonDeducedMismatch;
+ }
+ }
+
+ // C++11 [temp.deduct.partial]p7:
+ // Remove any top-level cv-qualifiers:
+ // - If P is a cv-qualified type, P is replaced by the cv-unqualified
+ // version of P.
+ Param = Param.getUnqualifiedType();
+ // - If A is a cv-qualified type, A is replaced by the cv-unqualified
+ // version of A.
+ Arg = Arg.getUnqualifiedType();
+ } else {
+ // C++0x [temp.deduct.call]p4 bullet 1:
+ // - If the original P is a reference type, the deduced A (i.e., the type
+ // referred to by the reference) can be more cv-qualified than the
+ // transformed A.
+ if (TDF & TDF_ParamWithReferenceType) {
+ Qualifiers Quals;
+ QualType UnqualParam = S.Context.getUnqualifiedArrayType(Param, Quals);
+ Quals.setCVRQualifiers(Quals.getCVRQualifiers() &
+ Arg.getCVRQualifiers());
+ Param = S.Context.getQualifiedType(UnqualParam, Quals);
+ }
+
+ if ((TDF & TDF_TopLevelParameterTypeList) && !Param->isFunctionType()) {
+ // C++0x [temp.deduct.type]p10:
+ // If P and A are function types that originated from deduction when
+ // taking the address of a function template (14.8.2.2) or when deducing
+ // template arguments from a function declaration (14.8.2.6) and Pi and
+ // Ai are parameters of the top-level parameter-type-list of P and A,
+ // respectively, Pi is adjusted if it is an rvalue reference to a
+ // cv-unqualified template parameter and Ai is an lvalue reference, in
+ // which case the type of Pi is changed to be the template parameter
+ // type (i.e., T&& is changed to simply T). [ Note: As a result, when
+ // Pi is T&& and Ai is X&, the adjusted Pi will be T, causing T to be
+ // deduced as X&. - end note ]
+ TDF &= ~TDF_TopLevelParameterTypeList;
+
+ if (const RValueReferenceType *ParamRef
+ = Param->getAs<RValueReferenceType>()) {
+ if (isa<TemplateTypeParmType>(ParamRef->getPointeeType()) &&
+ !ParamRef->getPointeeType().getQualifiers())
+ if (Arg->isLValueReferenceType())
+ Param = ParamRef->getPointeeType();
+ }
+ }
+ }
+
+ // C++ [temp.deduct.type]p9:
+ // A template type argument T, a template template argument TT or a
+ // template non-type argument i can be deduced if P and A have one of
+ // the following forms:
+ //
+ // T
+ // cv-list T
+ if (const TemplateTypeParmType *TemplateTypeParm
+ = Param->getAs<TemplateTypeParmType>()) {
+ // Just skip any attempts to deduce from a placeholder type.
+ if (Arg->isPlaceholderType())
+ return Sema::TDK_Success;
+
+ unsigned Index = TemplateTypeParm->getIndex();
+ bool RecanonicalizeArg = false;
+
+ // If the argument type is an array type, move the qualifiers up to the
+ // top level, so they can be matched with the qualifiers on the parameter.
+ if (isa<ArrayType>(Arg)) {
+ Qualifiers Quals;
+ Arg = S.Context.getUnqualifiedArrayType(Arg, Quals);
+ if (Quals) {
+ Arg = S.Context.getQualifiedType(Arg, Quals);
+ RecanonicalizeArg = true;
+ }
+ }
+
+ // The argument type can not be less qualified than the parameter
+ // type.
+ if (!(TDF & TDF_IgnoreQualifiers) &&
+ hasInconsistentOrSupersetQualifiersOf(Param, Arg)) {
+ Info.Param = cast<TemplateTypeParmDecl>(TemplateParams->getParam(Index));
+ Info.FirstArg = TemplateArgument(Param);
+ Info.SecondArg = TemplateArgument(Arg);
+ return Sema::TDK_Underqualified;
+ }
+
+ assert(TemplateTypeParm->getDepth() == 0 && "Can't deduce with depth > 0");
+ assert(Arg != S.Context.OverloadTy && "Unresolved overloaded function");
+ QualType DeducedType = Arg;
+
+ // Remove any qualifiers on the parameter from the deduced type.
+ // We checked the qualifiers for consistency above.
+ Qualifiers DeducedQs = DeducedType.getQualifiers();
+ Qualifiers ParamQs = Param.getQualifiers();
+ DeducedQs.removeCVRQualifiers(ParamQs.getCVRQualifiers());
+ if (ParamQs.hasObjCGCAttr())
+ DeducedQs.removeObjCGCAttr();
+ if (ParamQs.hasAddressSpace())
+ DeducedQs.removeAddressSpace();
+ if (ParamQs.hasObjCLifetime())
+ DeducedQs.removeObjCLifetime();
+
+ // Objective-C ARC:
+ // If template deduction would produce a lifetime qualifier on a type
+ // that is not a lifetime type, template argument deduction fails.
+ if (ParamQs.hasObjCLifetime() && !DeducedType->isObjCLifetimeType() &&
+ !DeducedType->isDependentType()) {
+ Info.Param = cast<TemplateTypeParmDecl>(TemplateParams->getParam(Index));
+ Info.FirstArg = TemplateArgument(Param);
+ Info.SecondArg = TemplateArgument(Arg);
+ return Sema::TDK_Underqualified;
+ }
+
+ // Objective-C ARC:
+ // If template deduction would produce an argument type with lifetime type
+ // but no lifetime qualifier, the __strong lifetime qualifier is inferred.
+ if (S.getLangOpts().ObjCAutoRefCount &&
+ DeducedType->isObjCLifetimeType() &&
+ !DeducedQs.hasObjCLifetime())
+ DeducedQs.setObjCLifetime(Qualifiers::OCL_Strong);
+
+ DeducedType = S.Context.getQualifiedType(DeducedType.getUnqualifiedType(),
+ DeducedQs);
+
+ if (RecanonicalizeArg)
+ DeducedType = S.Context.getCanonicalType(DeducedType);
+
+ DeducedTemplateArgument NewDeduced(DeducedType);
+ DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context,
+ Deduced[Index],
+ NewDeduced);
+ if (Result.isNull()) {
+ Info.Param = cast<TemplateTypeParmDecl>(TemplateParams->getParam(Index));
+ Info.FirstArg = Deduced[Index];
+ Info.SecondArg = NewDeduced;
+ return Sema::TDK_Inconsistent;
+ }
+
+ Deduced[Index] = Result;
+ return Sema::TDK_Success;
+ }
+
+ // Set up the template argument deduction information for a failure.
+ Info.FirstArg = TemplateArgument(ParamIn);
+ Info.SecondArg = TemplateArgument(ArgIn);
+
+ // If the parameter is an already-substituted template parameter
+ // pack, do nothing: we don't know which of its arguments to look
+ // at, so we have to wait until all of the parameter packs in this
+ // expansion have arguments.
+ if (isa<SubstTemplateTypeParmPackType>(Param))
+ return Sema::TDK_Success;
+
+ // Check the cv-qualifiers on the parameter and argument types.
+ CanQualType CanParam = S.Context.getCanonicalType(Param);
+ CanQualType CanArg = S.Context.getCanonicalType(Arg);
+ if (!(TDF & TDF_IgnoreQualifiers)) {
+ if (TDF & TDF_ParamWithReferenceType) {
+ if (hasInconsistentOrSupersetQualifiersOf(Param, Arg))
+ return Sema::TDK_NonDeducedMismatch;
+ } else if (!IsPossiblyOpaquelyQualifiedType(Param)) {
+ if (Param.getCVRQualifiers() != Arg.getCVRQualifiers())
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
+ // If the parameter type is not dependent, there is nothing to deduce.
+ if (!Param->isDependentType()) {
+ if (!(TDF & TDF_SkipNonDependent)) {
+ bool NonDeduced = (TDF & TDF_InOverloadResolution)?
+ !S.isSameOrCompatibleFunctionType(CanParam, CanArg) :
+ Param != Arg;
+ if (NonDeduced) {
+ return Sema::TDK_NonDeducedMismatch;
+ }
+ }
+ return Sema::TDK_Success;
+ }
+ } else if (!Param->isDependentType()) {
+ CanQualType ParamUnqualType = CanParam.getUnqualifiedType(),
+ ArgUnqualType = CanArg.getUnqualifiedType();
+ bool Success = (TDF & TDF_InOverloadResolution)?
+ S.isSameOrCompatibleFunctionType(ParamUnqualType,
+ ArgUnqualType) :
+ ParamUnqualType == ArgUnqualType;
+ if (Success)
+ return Sema::TDK_Success;
+ }
+
+ switch (Param->getTypeClass()) {
+ // Non-canonical types cannot appear here.
+#define NON_CANONICAL_TYPE(Class, Base) \
+ case Type::Class: llvm_unreachable("deducing non-canonical type: " #Class);
+#define TYPE(Class, Base)
+#include "clang/AST/TypeNodes.def"
+
+ case Type::TemplateTypeParm:
+ case Type::SubstTemplateTypeParmPack:
+ llvm_unreachable("Type nodes handled above");
+
+ // These types cannot be dependent, so simply check whether the types are
+ // the same.
+ case Type::Builtin:
+ case Type::VariableArray:
+ case Type::Vector:
+ case Type::FunctionNoProto:
+ case Type::Record:
+ case Type::Enum:
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::ObjCObjectPointer: {
+ if (TDF & TDF_SkipNonDependent)
+ return Sema::TDK_Success;
+
+ if (TDF & TDF_IgnoreQualifiers) {
+ Param = Param.getUnqualifiedType();
+ Arg = Arg.getUnqualifiedType();
+ }
+
+ return Param == Arg? Sema::TDK_Success : Sema::TDK_NonDeducedMismatch;
+ }
+
+ // _Complex T [placeholder extension]
+ case Type::Complex:
+ if (const ComplexType *ComplexArg = Arg->getAs<ComplexType>())
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ cast<ComplexType>(Param)->getElementType(),
+ ComplexArg->getElementType(),
+ Info, Deduced, TDF);
+
+ return Sema::TDK_NonDeducedMismatch;
+
+ // _Atomic T [extension]
+ case Type::Atomic:
+ if (const AtomicType *AtomicArg = Arg->getAs<AtomicType>())
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ cast<AtomicType>(Param)->getValueType(),
+ AtomicArg->getValueType(),
+ Info, Deduced, TDF);
+
+ return Sema::TDK_NonDeducedMismatch;
+
+ // T *
+ case Type::Pointer: {
+ QualType PointeeType;
+ if (const PointerType *PointerArg = Arg->getAs<PointerType>()) {
+ PointeeType = PointerArg->getPointeeType();
+ } else if (const ObjCObjectPointerType *PointerArg
+ = Arg->getAs<ObjCObjectPointerType>()) {
+ PointeeType = PointerArg->getPointeeType();
+ } else {
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
+ unsigned SubTDF = TDF & (TDF_IgnoreQualifiers | TDF_DerivedClass);
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ cast<PointerType>(Param)->getPointeeType(),
+ PointeeType,
+ Info, Deduced, SubTDF);
+ }
+
+ // T &
+ case Type::LValueReference: {
+ const LValueReferenceType *ReferenceArg =
+ Arg->getAs<LValueReferenceType>();
+ if (!ReferenceArg)
+ return Sema::TDK_NonDeducedMismatch;
+
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ cast<LValueReferenceType>(Param)->getPointeeType(),
+ ReferenceArg->getPointeeType(), Info, Deduced, 0);
+ }
+
+ // T && [C++0x]
+ case Type::RValueReference: {
+ const RValueReferenceType *ReferenceArg =
+ Arg->getAs<RValueReferenceType>();
+ if (!ReferenceArg)
+ return Sema::TDK_NonDeducedMismatch;
+
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ cast<RValueReferenceType>(Param)->getPointeeType(),
+ ReferenceArg->getPointeeType(),
+ Info, Deduced, 0);
+ }
+
+ // T [] (implied, but not stated explicitly)
+ case Type::IncompleteArray: {
+ const IncompleteArrayType *IncompleteArrayArg =
+ S.Context.getAsIncompleteArrayType(Arg);
+ if (!IncompleteArrayArg)
+ return Sema::TDK_NonDeducedMismatch;
+
+ unsigned SubTDF = TDF & TDF_IgnoreQualifiers;
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ S.Context.getAsIncompleteArrayType(Param)->getElementType(),
+ IncompleteArrayArg->getElementType(),
+ Info, Deduced, SubTDF);
+ }
+
+ // T [integer-constant]
+ case Type::ConstantArray: {
+ const ConstantArrayType *ConstantArrayArg =
+ S.Context.getAsConstantArrayType(Arg);
+ if (!ConstantArrayArg)
+ return Sema::TDK_NonDeducedMismatch;
+
+ const ConstantArrayType *ConstantArrayParm =
+ S.Context.getAsConstantArrayType(Param);
+ if (ConstantArrayArg->getSize() != ConstantArrayParm->getSize())
+ return Sema::TDK_NonDeducedMismatch;
+
+ unsigned SubTDF = TDF & TDF_IgnoreQualifiers;
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ ConstantArrayParm->getElementType(),
+ ConstantArrayArg->getElementType(),
+ Info, Deduced, SubTDF);
+ }
+
+ // type [i]
+ case Type::DependentSizedArray: {
+ const ArrayType *ArrayArg = S.Context.getAsArrayType(Arg);
+ if (!ArrayArg)
+ return Sema::TDK_NonDeducedMismatch;
+
+ unsigned SubTDF = TDF & TDF_IgnoreQualifiers;
+
+ // Check the element type of the arrays
+ const DependentSizedArrayType *DependentArrayParm
+ = S.Context.getAsDependentSizedArrayType(Param);
+ if (Sema::TemplateDeductionResult Result
+ = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ DependentArrayParm->getElementType(),
+ ArrayArg->getElementType(),
+ Info, Deduced, SubTDF))
+ return Result;
+
+ // Determine the array bound is something we can deduce.
+ NonTypeTemplateParmDecl *NTTP
+ = getDeducedParameterFromExpr(DependentArrayParm->getSizeExpr());
+ if (!NTTP)
+ return Sema::TDK_Success;
+
+ // We can perform template argument deduction for the given non-type
+ // template parameter.
+ assert(NTTP->getDepth() == 0 &&
+ "Cannot deduce non-type template argument at depth > 0");
+ if (const ConstantArrayType *ConstantArrayArg
+ = dyn_cast<ConstantArrayType>(ArrayArg)) {
+ llvm::APSInt Size(ConstantArrayArg->getSize());
+ return DeduceNonTypeTemplateArgument(S, NTTP, Size,
+ S.Context.getSizeType(),
+ /*ArrayBound=*/true,
+ Info, Deduced);
+ }
+ if (const DependentSizedArrayType *DependentArrayArg
+ = dyn_cast<DependentSizedArrayType>(ArrayArg))
+ if (DependentArrayArg->getSizeExpr())
+ return DeduceNonTypeTemplateArgument(S, NTTP,
+ DependentArrayArg->getSizeExpr(),
+ Info, Deduced);
+
+ // Incomplete type does not match a dependently-sized array type
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
+ // type(*)(T)
+ // T(*)()
+ // T(*)(T)
+ case Type::FunctionProto: {
+ unsigned SubTDF = TDF & TDF_TopLevelParameterTypeList;
+ const FunctionProtoType *FunctionProtoArg =
+ dyn_cast<FunctionProtoType>(Arg);
+ if (!FunctionProtoArg)
+ return Sema::TDK_NonDeducedMismatch;
+
+ const FunctionProtoType *FunctionProtoParam =
+ cast<FunctionProtoType>(Param);
+
+ if (FunctionProtoParam->getTypeQuals()
+ != FunctionProtoArg->getTypeQuals() ||
+ FunctionProtoParam->getRefQualifier()
+ != FunctionProtoArg->getRefQualifier() ||
+ FunctionProtoParam->isVariadic() != FunctionProtoArg->isVariadic())
+ return Sema::TDK_NonDeducedMismatch;
+
+ // Check return types.
+ if (Sema::TemplateDeductionResult Result =
+ DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, FunctionProtoParam->getReturnType(),
+ FunctionProtoArg->getReturnType(), Info, Deduced, 0))
+ return Result;
+
+ return DeduceTemplateArguments(
+ S, TemplateParams, FunctionProtoParam->param_type_begin(),
+ FunctionProtoParam->getNumParams(),
+ FunctionProtoArg->param_type_begin(),
+ FunctionProtoArg->getNumParams(), Info, Deduced, SubTDF);
+ }
+
+ case Type::InjectedClassName: {
+ // Treat a template's injected-class-name as if the template
+ // specialization type had been used.
+ Param = cast<InjectedClassNameType>(Param)
+ ->getInjectedSpecializationType();
+ assert(isa<TemplateSpecializationType>(Param) &&
+ "injected class name is not a template specialization type");
+ // fall through
+ }
+
+ // template-name<T> (where template-name refers to a class template)
+ // template-name<i>
+ // TT<T>
+ // TT<i>
+ // TT<>
+ case Type::TemplateSpecialization: {
+ const TemplateSpecializationType *SpecParam
+ = cast<TemplateSpecializationType>(Param);
+
+ // Try to deduce template arguments from the template-id.
+ Sema::TemplateDeductionResult Result
+ = DeduceTemplateArguments(S, TemplateParams, SpecParam, Arg,
+ Info, Deduced);
+
+ if (Result && (TDF & TDF_DerivedClass)) {
+ // C++ [temp.deduct.call]p3b3:
+ // If P is a class, and P has the form template-id, then A can be a
+ // derived class of the deduced A. Likewise, if P is a pointer to a
+ // class of the form template-id, A can be a pointer to a derived
+ // class pointed to by the deduced A.
+ //
+ // More importantly:
+ // These alternatives are considered only if type deduction would
+ // otherwise fail.
+ if (const RecordType *RecordT = Arg->getAs<RecordType>()) {
+ // We cannot inspect base classes as part of deduction when the type
+ // is incomplete, so either instantiate any templates necessary to
+ // complete the type, or skip over it if it cannot be completed.
+ if (!S.isCompleteType(Info.getLocation(), Arg))
+ return Result;
+
+ // Use data recursion to crawl through the list of base classes.
+ // Visited contains the set of nodes we have already visited, while
+ // ToVisit is our stack of records that we still need to visit.
+ llvm::SmallPtrSet<const RecordType *, 8> Visited;
+ SmallVector<const RecordType *, 8> ToVisit;
+ ToVisit.push_back(RecordT);
+ bool Successful = false;
+ SmallVector<DeducedTemplateArgument, 8> DeducedOrig(Deduced.begin(),
+ Deduced.end());
+ while (!ToVisit.empty()) {
+ // Retrieve the next class in the inheritance hierarchy.
+ const RecordType *NextT = ToVisit.pop_back_val();
+
+ // If we have already seen this type, skip it.
+ if (!Visited.insert(NextT).second)
+ continue;
+
+ // If this is a base class, try to perform template argument
+ // deduction from it.
+ if (NextT != RecordT) {
+ TemplateDeductionInfo BaseInfo(Info.getLocation());
+ Sema::TemplateDeductionResult BaseResult
+ = DeduceTemplateArguments(S, TemplateParams, SpecParam,
+ QualType(NextT, 0), BaseInfo,
+ Deduced);
+
+ // If template argument deduction for this base was successful,
+ // note that we had some success. Otherwise, ignore any deductions
+ // from this base class.
+ if (BaseResult == Sema::TDK_Success) {
+ Successful = true;
+ DeducedOrig.clear();
+ DeducedOrig.append(Deduced.begin(), Deduced.end());
+ Info.Param = BaseInfo.Param;
+ Info.FirstArg = BaseInfo.FirstArg;
+ Info.SecondArg = BaseInfo.SecondArg;
+ }
+ else
+ Deduced = DeducedOrig;
+ }
+
+ // Visit base classes
+ CXXRecordDecl *Next = cast<CXXRecordDecl>(NextT->getDecl());
+ for (const auto &Base : Next->bases()) {
+ assert(Base.getType()->isRecordType() &&
+ "Base class that isn't a record?");
+ ToVisit.push_back(Base.getType()->getAs<RecordType>());
+ }
+ }
+
+ if (Successful)
+ return Sema::TDK_Success;
+ }
+
+ }
+
+ return Result;
+ }
+
+ // T type::*
+ // T T::*
+ // T (type::*)()
+ // type (T::*)()
+ // type (type::*)(T)
+ // type (T::*)(T)
+ // T (type::*)(T)
+ // T (T::*)()
+ // T (T::*)(T)
+ case Type::MemberPointer: {
+ const MemberPointerType *MemPtrParam = cast<MemberPointerType>(Param);
+ const MemberPointerType *MemPtrArg = dyn_cast<MemberPointerType>(Arg);
+ if (!MemPtrArg)
+ return Sema::TDK_NonDeducedMismatch;
+
+ QualType ParamPointeeType = MemPtrParam->getPointeeType();
+ if (ParamPointeeType->isFunctionType())
+ S.adjustMemberFunctionCC(ParamPointeeType, /*IsStatic=*/true,
+ /*IsCtorOrDtor=*/false, Info.getLocation());
+ QualType ArgPointeeType = MemPtrArg->getPointeeType();
+ if (ArgPointeeType->isFunctionType())
+ S.adjustMemberFunctionCC(ArgPointeeType, /*IsStatic=*/true,
+ /*IsCtorOrDtor=*/false, Info.getLocation());
+
+ if (Sema::TemplateDeductionResult Result
+ = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ ParamPointeeType,
+ ArgPointeeType,
+ Info, Deduced,
+ TDF & TDF_IgnoreQualifiers))
+ return Result;
+
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ QualType(MemPtrParam->getClass(), 0),
+ QualType(MemPtrArg->getClass(), 0),
+ Info, Deduced,
+ TDF & TDF_IgnoreQualifiers);
+ }
+
+ // (clang extension)
+ //
+ // type(^)(T)
+ // T(^)()
+ // T(^)(T)
+ case Type::BlockPointer: {
+ const BlockPointerType *BlockPtrParam = cast<BlockPointerType>(Param);
+ const BlockPointerType *BlockPtrArg = dyn_cast<BlockPointerType>(Arg);
+
+ if (!BlockPtrArg)
+ return Sema::TDK_NonDeducedMismatch;
+
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ BlockPtrParam->getPointeeType(),
+ BlockPtrArg->getPointeeType(),
+ Info, Deduced, 0);
+ }
+
+ // (clang extension)
+ //
+ // T __attribute__(((ext_vector_type(<integral constant>))))
+ case Type::ExtVector: {
+ const ExtVectorType *VectorParam = cast<ExtVectorType>(Param);
+ if (const ExtVectorType *VectorArg = dyn_cast<ExtVectorType>(Arg)) {
+ // Make sure that the vectors have the same number of elements.
+ if (VectorParam->getNumElements() != VectorArg->getNumElements())
+ return Sema::TDK_NonDeducedMismatch;
+
+ // Perform deduction on the element types.
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ VectorParam->getElementType(),
+ VectorArg->getElementType(),
+ Info, Deduced, TDF);
+ }
+
+ if (const DependentSizedExtVectorType *VectorArg
+ = dyn_cast<DependentSizedExtVectorType>(Arg)) {
+ // We can't check the number of elements, since the argument has a
+ // dependent number of elements. This can only occur during partial
+ // ordering.
+
+ // Perform deduction on the element types.
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ VectorParam->getElementType(),
+ VectorArg->getElementType(),
+ Info, Deduced, TDF);
+ }
+
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
+ // (clang extension)
+ //
+ // T __attribute__(((ext_vector_type(N))))
+ case Type::DependentSizedExtVector: {
+ const DependentSizedExtVectorType *VectorParam
+ = cast<DependentSizedExtVectorType>(Param);
+
+ if (const ExtVectorType *VectorArg = dyn_cast<ExtVectorType>(Arg)) {
+ // Perform deduction on the element types.
+ if (Sema::TemplateDeductionResult Result
+ = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ VectorParam->getElementType(),
+ VectorArg->getElementType(),
+ Info, Deduced, TDF))
+ return Result;
+
+ // Perform deduction on the vector size, if we can.
+ NonTypeTemplateParmDecl *NTTP
+ = getDeducedParameterFromExpr(VectorParam->getSizeExpr());
+ if (!NTTP)
+ return Sema::TDK_Success;
+
+ llvm::APSInt ArgSize(S.Context.getTypeSize(S.Context.IntTy), false);
+ ArgSize = VectorArg->getNumElements();
+ return DeduceNonTypeTemplateArgument(S, NTTP, ArgSize, S.Context.IntTy,
+ false, Info, Deduced);
+ }
+
+ if (const DependentSizedExtVectorType *VectorArg
+ = dyn_cast<DependentSizedExtVectorType>(Arg)) {
+ // Perform deduction on the element types.
+ if (Sema::TemplateDeductionResult Result
+ = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ VectorParam->getElementType(),
+ VectorArg->getElementType(),
+ Info, Deduced, TDF))
+ return Result;
+
+ // Perform deduction on the vector size, if we can.
+ NonTypeTemplateParmDecl *NTTP
+ = getDeducedParameterFromExpr(VectorParam->getSizeExpr());
+ if (!NTTP)
+ return Sema::TDK_Success;
+
+ return DeduceNonTypeTemplateArgument(S, NTTP, VectorArg->getSizeExpr(),
+ Info, Deduced);
+ }
+
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
+ case Type::TypeOfExpr:
+ case Type::TypeOf:
+ case Type::DependentName:
+ case Type::UnresolvedUsing:
+ case Type::Decltype:
+ case Type::UnaryTransform:
+ case Type::Auto:
+ case Type::DependentTemplateSpecialization:
+ case Type::PackExpansion:
+ // No template argument deduction for these types
+ return Sema::TDK_Success;
+ }
+
+ llvm_unreachable("Invalid Type Class!");
+}
+
+static Sema::TemplateDeductionResult
+DeduceTemplateArguments(Sema &S,
+ TemplateParameterList *TemplateParams,
+ const TemplateArgument &Param,
+ TemplateArgument Arg,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
+ // If the template argument is a pack expansion, perform template argument
+ // deduction against the pattern of that expansion. This only occurs during
+ // partial ordering.
+ if (Arg.isPackExpansion())
+ Arg = Arg.getPackExpansionPattern();
+
+ switch (Param.getKind()) {
+ case TemplateArgument::Null:
+ llvm_unreachable("Null template argument in parameter list");
+
+ case TemplateArgument::Type:
+ if (Arg.getKind() == TemplateArgument::Type)
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ Param.getAsType(),
+ Arg.getAsType(),
+ Info, Deduced, 0);
+ Info.FirstArg = Param;
+ Info.SecondArg = Arg;
+ return Sema::TDK_NonDeducedMismatch;
+
+ case TemplateArgument::Template:
+ if (Arg.getKind() == TemplateArgument::Template)
+ return DeduceTemplateArguments(S, TemplateParams,
+ Param.getAsTemplate(),
+ Arg.getAsTemplate(), Info, Deduced);
+ Info.FirstArg = Param;
+ Info.SecondArg = Arg;
+ return Sema::TDK_NonDeducedMismatch;
+
+ case TemplateArgument::TemplateExpansion:
+ llvm_unreachable("caller should handle pack expansions");
+
+ case TemplateArgument::Declaration:
+ if (Arg.getKind() == TemplateArgument::Declaration &&
+ isSameDeclaration(Param.getAsDecl(), Arg.getAsDecl()))
+ return Sema::TDK_Success;
+
+ Info.FirstArg = Param;
+ Info.SecondArg = Arg;
+ return Sema::TDK_NonDeducedMismatch;
+
+ case TemplateArgument::NullPtr:
+ if (Arg.getKind() == TemplateArgument::NullPtr &&
+ S.Context.hasSameType(Param.getNullPtrType(), Arg.getNullPtrType()))
+ return Sema::TDK_Success;
+
+ Info.FirstArg = Param;
+ Info.SecondArg = Arg;
+ return Sema::TDK_NonDeducedMismatch;
+
+ case TemplateArgument::Integral:
+ if (Arg.getKind() == TemplateArgument::Integral) {
+ if (hasSameExtendedValue(Param.getAsIntegral(), Arg.getAsIntegral()))
+ return Sema::TDK_Success;
+
+ Info.FirstArg = Param;
+ Info.SecondArg = Arg;
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
+ if (Arg.getKind() == TemplateArgument::Expression) {
+ Info.FirstArg = Param;
+ Info.SecondArg = Arg;
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
+ Info.FirstArg = Param;
+ Info.SecondArg = Arg;
+ return Sema::TDK_NonDeducedMismatch;
+
+ case TemplateArgument::Expression: {
+ if (NonTypeTemplateParmDecl *NTTP
+ = getDeducedParameterFromExpr(Param.getAsExpr())) {
+ if (Arg.getKind() == TemplateArgument::Integral)
+ return DeduceNonTypeTemplateArgument(S, NTTP,
+ Arg.getAsIntegral(),
+ Arg.getIntegralType(),
+ /*ArrayBound=*/false,
+ Info, Deduced);
+ if (Arg.getKind() == TemplateArgument::Expression)
+ return DeduceNonTypeTemplateArgument(S, NTTP, Arg.getAsExpr(),
+ Info, Deduced);
+ if (Arg.getKind() == TemplateArgument::Declaration)
+ return DeduceNonTypeTemplateArgument(S, NTTP, Arg.getAsDecl(),
+ Info, Deduced);
+
+ Info.FirstArg = Param;
+ Info.SecondArg = Arg;
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
+ // Can't deduce anything, but that's okay.
+ return Sema::TDK_Success;
+ }
+ case TemplateArgument::Pack:
+ llvm_unreachable("Argument packs should be expanded by the caller!");
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+/// \brief Determine whether there is a template argument to be used for
+/// deduction.
+///
+/// This routine "expands" argument packs in-place, overriding its input
+/// parameters so that \c Args[ArgIdx] will be the available template argument.
+///
+/// \returns true if there is another template argument (which will be at
+/// \c Args[ArgIdx]), false otherwise.
+static bool hasTemplateArgumentForDeduction(const TemplateArgument *&Args,
+ unsigned &ArgIdx,
+ unsigned &NumArgs) {
+ if (ArgIdx == NumArgs)
+ return false;
+
+ const TemplateArgument &Arg = Args[ArgIdx];
+ if (Arg.getKind() != TemplateArgument::Pack)
+ return true;
+
+ assert(ArgIdx == NumArgs - 1 && "Pack not at the end of argument list?");
+ Args = Arg.pack_begin();
+ NumArgs = Arg.pack_size();
+ ArgIdx = 0;
+ return ArgIdx < NumArgs;
+}
+
+/// \brief Determine whether the given set of template arguments has a pack
+/// expansion that is not the last template argument.
+static bool hasPackExpansionBeforeEnd(const TemplateArgument *Args,
+ unsigned NumArgs) {
+ unsigned ArgIdx = 0;
+ while (ArgIdx < NumArgs) {
+ const TemplateArgument &Arg = Args[ArgIdx];
+
+ // Unwrap argument packs.
+ if (Args[ArgIdx].getKind() == TemplateArgument::Pack) {
+ Args = Arg.pack_begin();
+ NumArgs = Arg.pack_size();
+ ArgIdx = 0;
+ continue;
+ }
+
+ ++ArgIdx;
+ if (ArgIdx == NumArgs)
+ return false;
+
+ if (Arg.isPackExpansion())
+ return true;
+ }
+
+ return false;
+}
+
+static Sema::TemplateDeductionResult
+DeduceTemplateArguments(Sema &S,
+ TemplateParameterList *TemplateParams,
+ const TemplateArgument *Params, unsigned NumParams,
+ const TemplateArgument *Args, unsigned NumArgs,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
+ // C++0x [temp.deduct.type]p9:
+ // If the template argument list of P contains a pack expansion that is not
+ // the last template argument, the entire template argument list is a
+ // non-deduced context.
+ if (hasPackExpansionBeforeEnd(Params, NumParams))
+ return Sema::TDK_Success;
+
+ // C++0x [temp.deduct.type]p9:
+ // If P has a form that contains <T> or <i>, then each argument Pi of the
+ // respective template argument list P is compared with the corresponding
+ // argument Ai of the corresponding template argument list of A.
+ unsigned ArgIdx = 0, ParamIdx = 0;
+ for (; hasTemplateArgumentForDeduction(Params, ParamIdx, NumParams);
+ ++ParamIdx) {
+ if (!Params[ParamIdx].isPackExpansion()) {
+ // The simple case: deduce template arguments by matching Pi and Ai.
+
+ // Check whether we have enough arguments.
+ if (!hasTemplateArgumentForDeduction(Args, ArgIdx, NumArgs))
+ return Sema::TDK_Success;
+
+ if (Args[ArgIdx].isPackExpansion()) {
+ // FIXME: We follow the logic of C++0x [temp.deduct.type]p22 here,
+ // but applied to pack expansions that are template arguments.
+ return Sema::TDK_MiscellaneousDeductionFailure;
+ }
+
+ // Perform deduction for this Pi/Ai pair.
+ if (Sema::TemplateDeductionResult Result
+ = DeduceTemplateArguments(S, TemplateParams,
+ Params[ParamIdx], Args[ArgIdx],
+ Info, Deduced))
+ return Result;
+
+ // Move to the next argument.
+ ++ArgIdx;
+ continue;
+ }
+
+ // The parameter is a pack expansion.
+
+ // C++0x [temp.deduct.type]p9:
+ // If Pi is a pack expansion, then the pattern of Pi is compared with
+ // each remaining argument in the template argument list of A. Each
+ // comparison deduces template arguments for subsequent positions in the
+ // template parameter packs expanded by Pi.
+ TemplateArgument Pattern = Params[ParamIdx].getPackExpansionPattern();
+
+ // FIXME: If there are no remaining arguments, we can bail out early
+ // and set any deduced parameter packs to an empty argument pack.
+ // The latter part of this is a (minor) correctness issue.
+
+ // Prepare to deduce the packs within the pattern.
+ PackDeductionScope PackScope(S, TemplateParams, Deduced, Info, Pattern);
+
+ // Keep track of the deduced template arguments for each parameter pack
+ // expanded by this pack expansion (the outer index) and for each
+ // template argument (the inner SmallVectors).
+ bool HasAnyArguments = false;
+ for (; hasTemplateArgumentForDeduction(Args, ArgIdx, NumArgs); ++ArgIdx) {
+ HasAnyArguments = true;
+
+ // Deduce template arguments from the pattern.
+ if (Sema::TemplateDeductionResult Result
+ = DeduceTemplateArguments(S, TemplateParams, Pattern, Args[ArgIdx],
+ Info, Deduced))
+ return Result;
+
+ PackScope.nextPackElement();
+ }
+
+ // Build argument packs for each of the parameter packs expanded by this
+ // pack expansion.
+ if (auto Result = PackScope.finish(HasAnyArguments))
+ return Result;
+ }
+
+ return Sema::TDK_Success;
+}
+
+static Sema::TemplateDeductionResult
+DeduceTemplateArguments(Sema &S,
+ TemplateParameterList *TemplateParams,
+ const TemplateArgumentList &ParamList,
+ const TemplateArgumentList &ArgList,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
+ return DeduceTemplateArguments(S, TemplateParams,
+ ParamList.data(), ParamList.size(),
+ ArgList.data(), ArgList.size(),
+ Info, Deduced);
+}
+
+/// \brief Determine whether two template arguments are the same.
+static bool isSameTemplateArg(ASTContext &Context,
+ const TemplateArgument &X,
+ const TemplateArgument &Y) {
+ if (X.getKind() != Y.getKind())
+ return false;
+
+ switch (X.getKind()) {
+ case TemplateArgument::Null:
+ llvm_unreachable("Comparing NULL template argument");
+
+ case TemplateArgument::Type:
+ return Context.getCanonicalType(X.getAsType()) ==
+ Context.getCanonicalType(Y.getAsType());
+
+ case TemplateArgument::Declaration:
+ return isSameDeclaration(X.getAsDecl(), Y.getAsDecl());
+
+ case TemplateArgument::NullPtr:
+ return Context.hasSameType(X.getNullPtrType(), Y.getNullPtrType());
+
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion:
+ return Context.getCanonicalTemplateName(
+ X.getAsTemplateOrTemplatePattern()).getAsVoidPointer() ==
+ Context.getCanonicalTemplateName(
+ Y.getAsTemplateOrTemplatePattern()).getAsVoidPointer();
+
+ case TemplateArgument::Integral:
+ return X.getAsIntegral() == Y.getAsIntegral();
+
+ case TemplateArgument::Expression: {
+ llvm::FoldingSetNodeID XID, YID;
+ X.getAsExpr()->Profile(XID, Context, true);
+ Y.getAsExpr()->Profile(YID, Context, true);
+ return XID == YID;
+ }
+
+ case TemplateArgument::Pack:
+ if (X.pack_size() != Y.pack_size())
+ return false;
+
+ for (TemplateArgument::pack_iterator XP = X.pack_begin(),
+ XPEnd = X.pack_end(),
+ YP = Y.pack_begin();
+ XP != XPEnd; ++XP, ++YP)
+ if (!isSameTemplateArg(Context, *XP, *YP))
+ return false;
+
+ return true;
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+/// \brief Allocate a TemplateArgumentLoc where all locations have
+/// been initialized to the given location.
+///
+/// \param S The semantic analysis object.
+///
+/// \param Arg The template argument we are producing template argument
+/// location information for.
+///
+/// \param NTTPType For a declaration template argument, the type of
+/// the non-type template parameter that corresponds to this template
+/// argument.
+///
+/// \param Loc The source location to use for the resulting template
+/// argument.
+static TemplateArgumentLoc
+getTrivialTemplateArgumentLoc(Sema &S,
+ const TemplateArgument &Arg,
+ QualType NTTPType,
+ SourceLocation Loc) {
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ llvm_unreachable("Can't get a NULL template argument here");
+
+ case TemplateArgument::Type:
+ return TemplateArgumentLoc(Arg,
+ S.Context.getTrivialTypeSourceInfo(Arg.getAsType(), Loc));
+
+ case TemplateArgument::Declaration: {
+ Expr *E
+ = S.BuildExpressionFromDeclTemplateArgument(Arg, NTTPType, Loc)
+ .getAs<Expr>();
+ return TemplateArgumentLoc(TemplateArgument(E), E);
+ }
+
+ case TemplateArgument::NullPtr: {
+ Expr *E
+ = S.BuildExpressionFromDeclTemplateArgument(Arg, NTTPType, Loc)
+ .getAs<Expr>();
+ return TemplateArgumentLoc(TemplateArgument(NTTPType, /*isNullPtr*/true),
+ E);
+ }
+
+ case TemplateArgument::Integral: {
+ Expr *E
+ = S.BuildExpressionFromIntegralTemplateArgument(Arg, Loc).getAs<Expr>();
+ return TemplateArgumentLoc(TemplateArgument(E), E);
+ }
+
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion: {
+ NestedNameSpecifierLocBuilder Builder;
+ TemplateName Template = Arg.getAsTemplate();
+ if (DependentTemplateName *DTN = Template.getAsDependentTemplateName())
+ Builder.MakeTrivial(S.Context, DTN->getQualifier(), Loc);
+ else if (QualifiedTemplateName *QTN =
+ Template.getAsQualifiedTemplateName())
+ Builder.MakeTrivial(S.Context, QTN->getQualifier(), Loc);
+
+ if (Arg.getKind() == TemplateArgument::Template)
+ return TemplateArgumentLoc(Arg,
+ Builder.getWithLocInContext(S.Context),
+ Loc);
+
+
+ return TemplateArgumentLoc(Arg, Builder.getWithLocInContext(S.Context),
+ Loc, Loc);
+ }
+
+ case TemplateArgument::Expression:
+ return TemplateArgumentLoc(Arg, Arg.getAsExpr());
+
+ case TemplateArgument::Pack:
+ return TemplateArgumentLoc(Arg, TemplateArgumentLocInfo());
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+
+/// \brief Convert the given deduced template argument and add it to the set of
+/// fully-converted template arguments.
+static bool
+ConvertDeducedTemplateArgument(Sema &S, NamedDecl *Param,
+ DeducedTemplateArgument Arg,
+ NamedDecl *Template,
+ QualType NTTPType,
+ unsigned ArgumentPackIndex,
+ TemplateDeductionInfo &Info,
+ bool InFunctionTemplate,
+ SmallVectorImpl<TemplateArgument> &Output) {
+ if (Arg.getKind() == TemplateArgument::Pack) {
+ // This is a template argument pack, so check each of its arguments against
+ // the template parameter.
+ SmallVector<TemplateArgument, 2> PackedArgsBuilder;
+ for (const auto &P : Arg.pack_elements()) {
+ // When converting the deduced template argument, append it to the
+ // general output list. We need to do this so that the template argument
+ // checking logic has all of the prior template arguments available.
+ DeducedTemplateArgument InnerArg(P);
+ InnerArg.setDeducedFromArrayBound(Arg.wasDeducedFromArrayBound());
+ if (ConvertDeducedTemplateArgument(S, Param, InnerArg, Template,
+ NTTPType, PackedArgsBuilder.size(),
+ Info, InFunctionTemplate, Output))
+ return true;
+
+ // Move the converted template argument into our argument pack.
+ PackedArgsBuilder.push_back(Output.pop_back_val());
+ }
+
+ // Create the resulting argument pack.
+ Output.push_back(
+ TemplateArgument::CreatePackCopy(S.Context, PackedArgsBuilder));
+ return false;
+ }
+
+ // Convert the deduced template argument into a template
+ // argument that we can check, almost as if the user had written
+ // the template argument explicitly.
+ TemplateArgumentLoc ArgLoc = getTrivialTemplateArgumentLoc(S, Arg, NTTPType,
+ Info.getLocation());
+
+ // Check the template argument, converting it as necessary.
+ return S.CheckTemplateArgument(Param, ArgLoc,
+ Template,
+ Template->getLocation(),
+ Template->getSourceRange().getEnd(),
+ ArgumentPackIndex,
+ Output,
+ InFunctionTemplate
+ ? (Arg.wasDeducedFromArrayBound()
+ ? Sema::CTAK_DeducedFromArrayBound
+ : Sema::CTAK_Deduced)
+ : Sema::CTAK_Specified);
+}
+
+/// Complete template argument deduction for a class template partial
+/// specialization.
+static Sema::TemplateDeductionResult
+FinishTemplateArgumentDeduction(Sema &S,
+ ClassTemplatePartialSpecializationDecl *Partial,
+ const TemplateArgumentList &TemplateArgs,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ TemplateDeductionInfo &Info) {
+ // Unevaluated SFINAE context.
+ EnterExpressionEvaluationContext Unevaluated(S, Sema::Unevaluated);
+ Sema::SFINAETrap Trap(S);
+
+ Sema::ContextRAII SavedContext(S, Partial);
+
+ // C++ [temp.deduct.type]p2:
+ // [...] or if any template argument remains neither deduced nor
+ // explicitly specified, template argument deduction fails.
+ SmallVector<TemplateArgument, 4> Builder;
+ TemplateParameterList *PartialParams = Partial->getTemplateParameters();
+ for (unsigned I = 0, N = PartialParams->size(); I != N; ++I) {
+ NamedDecl *Param = PartialParams->getParam(I);
+ if (Deduced[I].isNull()) {
+ Info.Param = makeTemplateParameter(Param);
+ return Sema::TDK_Incomplete;
+ }
+
+ // We have deduced this argument, so it still needs to be
+ // checked and converted.
+
+ // First, for a non-type template parameter type that is
+ // initialized by a declaration, we need the type of the
+ // corresponding non-type template parameter.
+ QualType NTTPType;
+ if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
+ NTTPType = NTTP->getType();
+ if (NTTPType->isDependentType()) {
+ TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack,
+ Builder.data(), Builder.size());
+ NTTPType = S.SubstType(NTTPType,
+ MultiLevelTemplateArgumentList(TemplateArgs),
+ NTTP->getLocation(),
+ NTTP->getDeclName());
+ if (NTTPType.isNull()) {
+ Info.Param = makeTemplateParameter(Param);
+ // FIXME: These template arguments are temporary. Free them!
+ Info.reset(TemplateArgumentList::CreateCopy(S.Context,
+ Builder.data(),
+ Builder.size()));
+ return Sema::TDK_SubstitutionFailure;
+ }
+ }
+ }
+
+ if (ConvertDeducedTemplateArgument(S, Param, Deduced[I],
+ Partial, NTTPType, 0, Info, false,
+ Builder)) {
+ Info.Param = makeTemplateParameter(Param);
+ // FIXME: These template arguments are temporary. Free them!
+ Info.reset(TemplateArgumentList::CreateCopy(S.Context, Builder.data(),
+ Builder.size()));
+ return Sema::TDK_SubstitutionFailure;
+ }
+ }
+
+ // Form the template argument list from the deduced template arguments.
+ TemplateArgumentList *DeducedArgumentList
+ = TemplateArgumentList::CreateCopy(S.Context, Builder.data(),
+ Builder.size());
+
+ Info.reset(DeducedArgumentList);
+
+ // Substitute the deduced template arguments into the template
+ // arguments of the class template partial specialization, and
+ // verify that the instantiated template arguments are both valid
+ // and are equivalent to the template arguments originally provided
+ // to the class template.
+ LocalInstantiationScope InstScope(S);
+ ClassTemplateDecl *ClassTemplate = Partial->getSpecializedTemplate();
+ const ASTTemplateArgumentListInfo *PartialTemplArgInfo
+ = Partial->getTemplateArgsAsWritten();
+ const TemplateArgumentLoc *PartialTemplateArgs
+ = PartialTemplArgInfo->getTemplateArgs();
+
+ TemplateArgumentListInfo InstArgs(PartialTemplArgInfo->LAngleLoc,
+ PartialTemplArgInfo->RAngleLoc);
+
+ if (S.Subst(PartialTemplateArgs, PartialTemplArgInfo->NumTemplateArgs,
+ InstArgs, MultiLevelTemplateArgumentList(*DeducedArgumentList))) {
+ unsigned ArgIdx = InstArgs.size(), ParamIdx = ArgIdx;
+ if (ParamIdx >= Partial->getTemplateParameters()->size())
+ ParamIdx = Partial->getTemplateParameters()->size() - 1;
+
+ Decl *Param
+ = const_cast<NamedDecl *>(
+ Partial->getTemplateParameters()->getParam(ParamIdx));
+ Info.Param = makeTemplateParameter(Param);
+ Info.FirstArg = PartialTemplateArgs[ArgIdx].getArgument();
+ return Sema::TDK_SubstitutionFailure;
+ }
+
+ SmallVector<TemplateArgument, 4> ConvertedInstArgs;
+ if (S.CheckTemplateArgumentList(ClassTemplate, Partial->getLocation(),
+ InstArgs, false, ConvertedInstArgs))
+ return Sema::TDK_SubstitutionFailure;
+
+ TemplateParameterList *TemplateParams
+ = ClassTemplate->getTemplateParameters();
+ for (unsigned I = 0, E = TemplateParams->size(); I != E; ++I) {
+ TemplateArgument InstArg = ConvertedInstArgs.data()[I];
+ if (!isSameTemplateArg(S.Context, TemplateArgs[I], InstArg)) {
+ Info.Param = makeTemplateParameter(TemplateParams->getParam(I));
+ Info.FirstArg = TemplateArgs[I];
+ Info.SecondArg = InstArg;
+ return Sema::TDK_NonDeducedMismatch;
+ }
+ }
+
+ if (Trap.hasErrorOccurred())
+ return Sema::TDK_SubstitutionFailure;
+
+ return Sema::TDK_Success;
+}
+
+/// \brief Perform template argument deduction to determine whether
+/// the given template arguments match the given class template
+/// partial specialization per C++ [temp.class.spec.match].
+Sema::TemplateDeductionResult
+Sema::DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
+ const TemplateArgumentList &TemplateArgs,
+ TemplateDeductionInfo &Info) {
+ if (Partial->isInvalidDecl())
+ return TDK_Invalid;
+
+ // C++ [temp.class.spec.match]p2:
+ // A partial specialization matches a given actual template
+ // argument list if the template arguments of the partial
+ // specialization can be deduced from the actual template argument
+ // list (14.8.2).
+
+ // Unevaluated SFINAE context.
+ EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
+ SFINAETrap Trap(*this);
+
+ SmallVector<DeducedTemplateArgument, 4> Deduced;
+ Deduced.resize(Partial->getTemplateParameters()->size());
+ if (TemplateDeductionResult Result
+ = ::DeduceTemplateArguments(*this,
+ Partial->getTemplateParameters(),
+ Partial->getTemplateArgs(),
+ TemplateArgs, Info, Deduced))
+ return Result;
+
+ SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(), Deduced.end());
+ InstantiatingTemplate Inst(*this, Info.getLocation(), Partial, DeducedArgs,
+ Info);
+ if (Inst.isInvalid())
+ return TDK_InstantiationDepth;
+
+ if (Trap.hasErrorOccurred())
+ return Sema::TDK_SubstitutionFailure;
+
+ return ::FinishTemplateArgumentDeduction(*this, Partial, TemplateArgs,
+ Deduced, Info);
+}
+
+/// Complete template argument deduction for a variable template partial
+/// specialization.
+/// TODO: Unify with ClassTemplatePartialSpecializationDecl version?
+/// May require unifying ClassTemplate(Partial)SpecializationDecl and
+/// VarTemplate(Partial)SpecializationDecl with a new data
+/// structure Template(Partial)SpecializationDecl, and
+/// using Template(Partial)SpecializationDecl as input type.
+static Sema::TemplateDeductionResult FinishTemplateArgumentDeduction(
+ Sema &S, VarTemplatePartialSpecializationDecl *Partial,
+ const TemplateArgumentList &TemplateArgs,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ TemplateDeductionInfo &Info) {
+ // Unevaluated SFINAE context.
+ EnterExpressionEvaluationContext Unevaluated(S, Sema::Unevaluated);
+ Sema::SFINAETrap Trap(S);
+
+ // C++ [temp.deduct.type]p2:
+ // [...] or if any template argument remains neither deduced nor
+ // explicitly specified, template argument deduction fails.
+ SmallVector<TemplateArgument, 4> Builder;
+ TemplateParameterList *PartialParams = Partial->getTemplateParameters();
+ for (unsigned I = 0, N = PartialParams->size(); I != N; ++I) {
+ NamedDecl *Param = PartialParams->getParam(I);
+ if (Deduced[I].isNull()) {
+ Info.Param = makeTemplateParameter(Param);
+ return Sema::TDK_Incomplete;
+ }
+
+ // We have deduced this argument, so it still needs to be
+ // checked and converted.
+
+ // First, for a non-type template parameter type that is
+ // initialized by a declaration, we need the type of the
+ // corresponding non-type template parameter.
+ QualType NTTPType;
+ if (NonTypeTemplateParmDecl *NTTP =
+ dyn_cast<NonTypeTemplateParmDecl>(Param)) {
+ NTTPType = NTTP->getType();
+ if (NTTPType->isDependentType()) {
+ TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack,
+ Builder.data(), Builder.size());
+ NTTPType =
+ S.SubstType(NTTPType, MultiLevelTemplateArgumentList(TemplateArgs),
+ NTTP->getLocation(), NTTP->getDeclName());
+ if (NTTPType.isNull()) {
+ Info.Param = makeTemplateParameter(Param);
+ // FIXME: These template arguments are temporary. Free them!
+ Info.reset(TemplateArgumentList::CreateCopy(S.Context, Builder.data(),
+ Builder.size()));
+ return Sema::TDK_SubstitutionFailure;
+ }
+ }
+ }
+
+ if (ConvertDeducedTemplateArgument(S, Param, Deduced[I], Partial, NTTPType,
+ 0, Info, false, Builder)) {
+ Info.Param = makeTemplateParameter(Param);
+ // FIXME: These template arguments are temporary. Free them!
+ Info.reset(TemplateArgumentList::CreateCopy(S.Context, Builder.data(),
+ Builder.size()));
+ return Sema::TDK_SubstitutionFailure;
+ }
+ }
+
+ // Form the template argument list from the deduced template arguments.
+ TemplateArgumentList *DeducedArgumentList = TemplateArgumentList::CreateCopy(
+ S.Context, Builder.data(), Builder.size());
+
+ Info.reset(DeducedArgumentList);
+
+ // Substitute the deduced template arguments into the template
+ // arguments of the class template partial specialization, and
+ // verify that the instantiated template arguments are both valid
+ // and are equivalent to the template arguments originally provided
+ // to the class template.
+ LocalInstantiationScope InstScope(S);
+ VarTemplateDecl *VarTemplate = Partial->getSpecializedTemplate();
+ const ASTTemplateArgumentListInfo *PartialTemplArgInfo
+ = Partial->getTemplateArgsAsWritten();
+ const TemplateArgumentLoc *PartialTemplateArgs
+ = PartialTemplArgInfo->getTemplateArgs();
+
+ TemplateArgumentListInfo InstArgs(PartialTemplArgInfo->LAngleLoc,
+ PartialTemplArgInfo->RAngleLoc);
+
+ if (S.Subst(PartialTemplateArgs, PartialTemplArgInfo->NumTemplateArgs,
+ InstArgs, MultiLevelTemplateArgumentList(*DeducedArgumentList))) {
+ unsigned ArgIdx = InstArgs.size(), ParamIdx = ArgIdx;
+ if (ParamIdx >= Partial->getTemplateParameters()->size())
+ ParamIdx = Partial->getTemplateParameters()->size() - 1;
+
+ Decl *Param = const_cast<NamedDecl *>(
+ Partial->getTemplateParameters()->getParam(ParamIdx));
+ Info.Param = makeTemplateParameter(Param);
+ Info.FirstArg = PartialTemplateArgs[ArgIdx].getArgument();
+ return Sema::TDK_SubstitutionFailure;
+ }
+ SmallVector<TemplateArgument, 4> ConvertedInstArgs;
+ if (S.CheckTemplateArgumentList(VarTemplate, Partial->getLocation(), InstArgs,
+ false, ConvertedInstArgs))
+ return Sema::TDK_SubstitutionFailure;
+
+ TemplateParameterList *TemplateParams = VarTemplate->getTemplateParameters();
+ for (unsigned I = 0, E = TemplateParams->size(); I != E; ++I) {
+ TemplateArgument InstArg = ConvertedInstArgs.data()[I];
+ if (!isSameTemplateArg(S.Context, TemplateArgs[I], InstArg)) {
+ Info.Param = makeTemplateParameter(TemplateParams->getParam(I));
+ Info.FirstArg = TemplateArgs[I];
+ Info.SecondArg = InstArg;
+ return Sema::TDK_NonDeducedMismatch;
+ }
+ }
+
+ if (Trap.hasErrorOccurred())
+ return Sema::TDK_SubstitutionFailure;
+
+ return Sema::TDK_Success;
+}
+
+/// \brief Perform template argument deduction to determine whether
+/// the given template arguments match the given variable template
+/// partial specialization per C++ [temp.class.spec.match].
+/// TODO: Unify with ClassTemplatePartialSpecializationDecl version?
+/// May require unifying ClassTemplate(Partial)SpecializationDecl and
+/// VarTemplate(Partial)SpecializationDecl with a new data
+/// structure Template(Partial)SpecializationDecl, and
+/// using Template(Partial)SpecializationDecl as input type.
+Sema::TemplateDeductionResult
+Sema::DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
+ const TemplateArgumentList &TemplateArgs,
+ TemplateDeductionInfo &Info) {
+ if (Partial->isInvalidDecl())
+ return TDK_Invalid;
+
+ // C++ [temp.class.spec.match]p2:
+ // A partial specialization matches a given actual template
+ // argument list if the template arguments of the partial
+ // specialization can be deduced from the actual template argument
+ // list (14.8.2).
+
+ // Unevaluated SFINAE context.
+ EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
+ SFINAETrap Trap(*this);
+
+ SmallVector<DeducedTemplateArgument, 4> Deduced;
+ Deduced.resize(Partial->getTemplateParameters()->size());
+ if (TemplateDeductionResult Result = ::DeduceTemplateArguments(
+ *this, Partial->getTemplateParameters(), Partial->getTemplateArgs(),
+ TemplateArgs, Info, Deduced))
+ return Result;
+
+ SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(), Deduced.end());
+ InstantiatingTemplate Inst(*this, Info.getLocation(), Partial, DeducedArgs,
+ Info);
+ if (Inst.isInvalid())
+ return TDK_InstantiationDepth;
+
+ if (Trap.hasErrorOccurred())
+ return Sema::TDK_SubstitutionFailure;
+
+ return ::FinishTemplateArgumentDeduction(*this, Partial, TemplateArgs,
+ Deduced, Info);
+}
+
+/// \brief Determine whether the given type T is a simple-template-id type.
+static bool isSimpleTemplateIdType(QualType T) {
+ if (const TemplateSpecializationType *Spec
+ = T->getAs<TemplateSpecializationType>())
+ return Spec->getTemplateName().getAsTemplateDecl() != nullptr;
+
+ return false;
+}
+
+/// \brief Substitute the explicitly-provided template arguments into the
+/// given function template according to C++ [temp.arg.explicit].
+///
+/// \param FunctionTemplate the function template into which the explicit
+/// template arguments will be substituted.
+///
+/// \param ExplicitTemplateArgs the explicitly-specified template
+/// arguments.
+///
+/// \param Deduced the deduced template arguments, which will be populated
+/// with the converted and checked explicit template arguments.
+///
+/// \param ParamTypes will be populated with the instantiated function
+/// parameters.
+///
+/// \param FunctionType if non-NULL, the result type of the function template
+/// will also be instantiated and the pointed-to value will be updated with
+/// the instantiated function type.
+///
+/// \param Info if substitution fails for any reason, this object will be
+/// populated with more information about the failure.
+///
+/// \returns TDK_Success if substitution was successful, or some failure
+/// condition.
+Sema::TemplateDeductionResult
+Sema::SubstituteExplicitTemplateArguments(
+ FunctionTemplateDecl *FunctionTemplate,
+ TemplateArgumentListInfo &ExplicitTemplateArgs,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ SmallVectorImpl<QualType> &ParamTypes,
+ QualType *FunctionType,
+ TemplateDeductionInfo &Info) {
+ FunctionDecl *Function = FunctionTemplate->getTemplatedDecl();
+ TemplateParameterList *TemplateParams
+ = FunctionTemplate->getTemplateParameters();
+
+ if (ExplicitTemplateArgs.size() == 0) {
+ // No arguments to substitute; just copy over the parameter types and
+ // fill in the function type.
+ for (auto P : Function->params())
+ ParamTypes.push_back(P->getType());
+
+ if (FunctionType)
+ *FunctionType = Function->getType();
+ return TDK_Success;
+ }
+
+ // Unevaluated SFINAE context.
+ EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
+ SFINAETrap Trap(*this);
+
+ // C++ [temp.arg.explicit]p3:
+ // Template arguments that are present shall be specified in the
+ // declaration order of their corresponding template-parameters. The
+ // template argument list shall not specify more template-arguments than
+ // there are corresponding template-parameters.
+ SmallVector<TemplateArgument, 4> Builder;
+
+ // Enter a new template instantiation context where we check the
+ // explicitly-specified template arguments against this function template,
+ // and then substitute them into the function parameter types.
+ SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(), Deduced.end());
+ InstantiatingTemplate Inst(*this, Info.getLocation(), FunctionTemplate,
+ DeducedArgs,
+ ActiveTemplateInstantiation::ExplicitTemplateArgumentSubstitution,
+ Info);
+ if (Inst.isInvalid())
+ return TDK_InstantiationDepth;
+
+ if (CheckTemplateArgumentList(FunctionTemplate,
+ SourceLocation(),
+ ExplicitTemplateArgs,
+ true,
+ Builder) || Trap.hasErrorOccurred()) {
+ unsigned Index = Builder.size();
+ if (Index >= TemplateParams->size())
+ Index = TemplateParams->size() - 1;
+ Info.Param = makeTemplateParameter(TemplateParams->getParam(Index));
+ return TDK_InvalidExplicitArguments;
+ }
+
+ // Form the template argument list from the explicitly-specified
+ // template arguments.
+ TemplateArgumentList *ExplicitArgumentList
+ = TemplateArgumentList::CreateCopy(Context, Builder.data(), Builder.size());
+ Info.reset(ExplicitArgumentList);
+
+ // Template argument deduction and the final substitution should be
+ // done in the context of the templated declaration. Explicit
+ // argument substitution, on the other hand, needs to happen in the
+ // calling context.
+ ContextRAII SavedContext(*this, FunctionTemplate->getTemplatedDecl());
+
+ // If we deduced template arguments for a template parameter pack,
+ // note that the template argument pack is partially substituted and record
+ // the explicit template arguments. They'll be used as part of deduction
+ // for this template parameter pack.
+ for (unsigned I = 0, N = Builder.size(); I != N; ++I) {
+ const TemplateArgument &Arg = Builder[I];
+ if (Arg.getKind() == TemplateArgument::Pack) {
+ CurrentInstantiationScope->SetPartiallySubstitutedPack(
+ TemplateParams->getParam(I),
+ Arg.pack_begin(),
+ Arg.pack_size());
+ break;
+ }
+ }
+
+ const FunctionProtoType *Proto
+ = Function->getType()->getAs<FunctionProtoType>();
+ assert(Proto && "Function template does not have a prototype?");
+
+ // Isolate our substituted parameters from our caller.
+ LocalInstantiationScope InstScope(*this, /*MergeWithOuterScope*/true);
+
+ // Instantiate the types of each of the function parameters given the
+ // explicitly-specified template arguments. If the function has a trailing
+ // return type, substitute it after the arguments to ensure we substitute
+ // in lexical order.
+ if (Proto->hasTrailingReturn()) {
+ if (SubstParmTypes(Function->getLocation(),
+ Function->param_begin(), Function->getNumParams(),
+ MultiLevelTemplateArgumentList(*ExplicitArgumentList),
+ ParamTypes))
+ return TDK_SubstitutionFailure;
+ }
+
+ // Instantiate the return type.
+ QualType ResultType;
+ {
+ // C++11 [expr.prim.general]p3:
+ // If a declaration declares a member function or member function
+ // template of a class X, the expression this is a prvalue of type
+ // "pointer to cv-qualifier-seq X" between the optional cv-qualifer-seq
+ // and the end of the function-definition, member-declarator, or
+ // declarator.
+ unsigned ThisTypeQuals = 0;
+ CXXRecordDecl *ThisContext = nullptr;
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Function)) {
+ ThisContext = Method->getParent();
+ ThisTypeQuals = Method->getTypeQualifiers();
+ }
+
+ CXXThisScopeRAII ThisScope(*this, ThisContext, ThisTypeQuals,
+ getLangOpts().CPlusPlus11);
+
+ ResultType =
+ SubstType(Proto->getReturnType(),
+ MultiLevelTemplateArgumentList(*ExplicitArgumentList),
+ Function->getTypeSpecStartLoc(), Function->getDeclName());
+ if (ResultType.isNull() || Trap.hasErrorOccurred())
+ return TDK_SubstitutionFailure;
+ }
+
+ // Instantiate the types of each of the function parameters given the
+ // explicitly-specified template arguments if we didn't do so earlier.
+ if (!Proto->hasTrailingReturn() &&
+ SubstParmTypes(Function->getLocation(),
+ Function->param_begin(), Function->getNumParams(),
+ MultiLevelTemplateArgumentList(*ExplicitArgumentList),
+ ParamTypes))
+ return TDK_SubstitutionFailure;
+
+ if (FunctionType) {
+ *FunctionType = BuildFunctionType(ResultType, ParamTypes,
+ Function->getLocation(),
+ Function->getDeclName(),
+ Proto->getExtProtoInfo());
+ if (FunctionType->isNull() || Trap.hasErrorOccurred())
+ return TDK_SubstitutionFailure;
+ }
+
+ // C++ [temp.arg.explicit]p2:
+ // Trailing template arguments that can be deduced (14.8.2) may be
+ // omitted from the list of explicit template-arguments. If all of the
+ // template arguments can be deduced, they may all be omitted; in this
+ // case, the empty template argument list <> itself may also be omitted.
+ //
+ // Take all of the explicitly-specified arguments and put them into
+ // the set of deduced template arguments. Explicitly-specified
+ // parameter packs, however, will be set to NULL since the deduction
+ // mechanisms handle explicitly-specified argument packs directly.
+ Deduced.reserve(TemplateParams->size());
+ for (unsigned I = 0, N = ExplicitArgumentList->size(); I != N; ++I) {
+ const TemplateArgument &Arg = ExplicitArgumentList->get(I);
+ if (Arg.getKind() == TemplateArgument::Pack)
+ Deduced.push_back(DeducedTemplateArgument());
+ else
+ Deduced.push_back(Arg);
+ }
+
+ return TDK_Success;
+}
+
+/// \brief Check whether the deduced argument type for a call to a function
+/// template matches the actual argument type per C++ [temp.deduct.call]p4.
+static bool
+CheckOriginalCallArgDeduction(Sema &S, Sema::OriginalCallArg OriginalArg,
+ QualType DeducedA) {
+ ASTContext &Context = S.Context;
+
+ QualType A = OriginalArg.OriginalArgType;
+ QualType OriginalParamType = OriginalArg.OriginalParamType;
+
+ // Check for type equality (top-level cv-qualifiers are ignored).
+ if (Context.hasSameUnqualifiedType(A, DeducedA))
+ return false;
+
+ // Strip off references on the argument types; they aren't needed for
+ // the following checks.
+ if (const ReferenceType *DeducedARef = DeducedA->getAs<ReferenceType>())
+ DeducedA = DeducedARef->getPointeeType();
+ if (const ReferenceType *ARef = A->getAs<ReferenceType>())
+ A = ARef->getPointeeType();
+
+ // C++ [temp.deduct.call]p4:
+ // [...] However, there are three cases that allow a difference:
+ // - If the original P is a reference type, the deduced A (i.e., the
+ // type referred to by the reference) can be more cv-qualified than
+ // the transformed A.
+ if (const ReferenceType *OriginalParamRef
+ = OriginalParamType->getAs<ReferenceType>()) {
+ // We don't want to keep the reference around any more.
+ OriginalParamType = OriginalParamRef->getPointeeType();
+
+ Qualifiers AQuals = A.getQualifiers();
+ Qualifiers DeducedAQuals = DeducedA.getQualifiers();
+
+ // Under Objective-C++ ARC, the deduced type may have implicitly
+ // been given strong or (when dealing with a const reference)
+ // unsafe_unretained lifetime. If so, update the original
+ // qualifiers to include this lifetime.
+ if (S.getLangOpts().ObjCAutoRefCount &&
+ ((DeducedAQuals.getObjCLifetime() == Qualifiers::OCL_Strong &&
+ AQuals.getObjCLifetime() == Qualifiers::OCL_None) ||
+ (DeducedAQuals.hasConst() &&
+ DeducedAQuals.getObjCLifetime() == Qualifiers::OCL_ExplicitNone))) {
+ AQuals.setObjCLifetime(DeducedAQuals.getObjCLifetime());
+ }
+
+ if (AQuals == DeducedAQuals) {
+ // Qualifiers match; there's nothing to do.
+ } else if (!DeducedAQuals.compatiblyIncludes(AQuals)) {
+ return true;
+ } else {
+ // Qualifiers are compatible, so have the argument type adopt the
+ // deduced argument type's qualifiers as if we had performed the
+ // qualification conversion.
+ A = Context.getQualifiedType(A.getUnqualifiedType(), DeducedAQuals);
+ }
+ }
+
+ // - The transformed A can be another pointer or pointer to member
+ // type that can be converted to the deduced A via a qualification
+ // conversion.
+ //
+ // Also allow conversions which merely strip [[noreturn]] from function types
+ // (recursively) as an extension.
+ // FIXME: Currently, this doesn't play nicely with qualification conversions.
+ bool ObjCLifetimeConversion = false;
+ QualType ResultTy;
+ if ((A->isAnyPointerType() || A->isMemberPointerType()) &&
+ (S.IsQualificationConversion(A, DeducedA, false,
+ ObjCLifetimeConversion) ||
+ S.IsNoReturnConversion(A, DeducedA, ResultTy)))
+ return false;
+
+
+ // - If P is a class and P has the form simple-template-id, then the
+ // transformed A can be a derived class of the deduced A. [...]
+ // [...] Likewise, if P is a pointer to a class of the form
+ // simple-template-id, the transformed A can be a pointer to a
+ // derived class pointed to by the deduced A.
+ if (const PointerType *OriginalParamPtr
+ = OriginalParamType->getAs<PointerType>()) {
+ if (const PointerType *DeducedAPtr = DeducedA->getAs<PointerType>()) {
+ if (const PointerType *APtr = A->getAs<PointerType>()) {
+ if (A->getPointeeType()->isRecordType()) {
+ OriginalParamType = OriginalParamPtr->getPointeeType();
+ DeducedA = DeducedAPtr->getPointeeType();
+ A = APtr->getPointeeType();
+ }
+ }
+ }
+ }
+
+ if (Context.hasSameUnqualifiedType(A, DeducedA))
+ return false;
+
+ if (A->isRecordType() && isSimpleTemplateIdType(OriginalParamType) &&
+ S.IsDerivedFrom(SourceLocation(), A, DeducedA))
+ return false;
+
+ return true;
+}
+
+/// \brief Finish template argument deduction for a function template,
+/// checking the deduced template arguments for completeness and forming
+/// the function template specialization.
+///
+/// \param OriginalCallArgs If non-NULL, the original call arguments against
+/// which the deduced argument types should be compared.
+Sema::TemplateDeductionResult
+Sema::FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ unsigned NumExplicitlySpecified,
+ FunctionDecl *&Specialization,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs,
+ bool PartialOverloading) {
+ TemplateParameterList *TemplateParams
+ = FunctionTemplate->getTemplateParameters();
+
+ // Unevaluated SFINAE context.
+ EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
+ SFINAETrap Trap(*this);
+
+ // Enter a new template instantiation context while we instantiate the
+ // actual function declaration.
+ SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(), Deduced.end());
+ InstantiatingTemplate Inst(*this, Info.getLocation(), FunctionTemplate,
+ DeducedArgs,
+ ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution,
+ Info);
+ if (Inst.isInvalid())
+ return TDK_InstantiationDepth;
+
+ ContextRAII SavedContext(*this, FunctionTemplate->getTemplatedDecl());
+
+ // C++ [temp.deduct.type]p2:
+ // [...] or if any template argument remains neither deduced nor
+ // explicitly specified, template argument deduction fails.
+ SmallVector<TemplateArgument, 4> Builder;
+ for (unsigned I = 0, N = TemplateParams->size(); I != N; ++I) {
+ NamedDecl *Param = TemplateParams->getParam(I);
+
+ if (!Deduced[I].isNull()) {
+ if (I < NumExplicitlySpecified) {
+ // We have already fully type-checked and converted this
+ // argument, because it was explicitly-specified. Just record the
+ // presence of this argument.
+ Builder.push_back(Deduced[I]);
+ // We may have had explicitly-specified template arguments for a
+ // template parameter pack (that may or may not have been extended
+ // via additional deduced arguments).
+ if (Param->isParameterPack() && CurrentInstantiationScope) {
+ if (CurrentInstantiationScope->getPartiallySubstitutedPack() ==
+ Param) {
+ // Forget the partially-substituted pack; its substitution is now
+ // complete.
+ CurrentInstantiationScope->ResetPartiallySubstitutedPack();
+ }
+ }
+ continue;
+ }
+ // We have deduced this argument, so it still needs to be
+ // checked and converted.
+
+ // First, for a non-type template parameter type that is
+ // initialized by a declaration, we need the type of the
+ // corresponding non-type template parameter.
+ QualType NTTPType;
+ if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
+ NTTPType = NTTP->getType();
+ if (NTTPType->isDependentType()) {
+ TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack,
+ Builder.data(), Builder.size());
+ NTTPType = SubstType(NTTPType,
+ MultiLevelTemplateArgumentList(TemplateArgs),
+ NTTP->getLocation(),
+ NTTP->getDeclName());
+ if (NTTPType.isNull()) {
+ Info.Param = makeTemplateParameter(Param);
+ // FIXME: These template arguments are temporary. Free them!
+ Info.reset(TemplateArgumentList::CreateCopy(Context,
+ Builder.data(),
+ Builder.size()));
+ return TDK_SubstitutionFailure;
+ }
+ }
+ }
+
+ if (ConvertDeducedTemplateArgument(*this, Param, Deduced[I],
+ FunctionTemplate, NTTPType, 0, Info,
+ true, Builder)) {
+ Info.Param = makeTemplateParameter(Param);
+ // FIXME: These template arguments are temporary. Free them!
+ Info.reset(TemplateArgumentList::CreateCopy(Context, Builder.data(),
+ Builder.size()));
+ return TDK_SubstitutionFailure;
+ }
+
+ continue;
+ }
+
+ // C++0x [temp.arg.explicit]p3:
+ // A trailing template parameter pack (14.5.3) not otherwise deduced will
+ // be deduced to an empty sequence of template arguments.
+ // FIXME: Where did the word "trailing" come from?
+ if (Param->isTemplateParameterPack()) {
+ // We may have had explicitly-specified template arguments for this
+ // template parameter pack. If so, our empty deduction extends the
+ // explicitly-specified set (C++0x [temp.arg.explicit]p9).
+ const TemplateArgument *ExplicitArgs;
+ unsigned NumExplicitArgs;
+ if (CurrentInstantiationScope &&
+ CurrentInstantiationScope->getPartiallySubstitutedPack(&ExplicitArgs,
+ &NumExplicitArgs)
+ == Param) {
+ Builder.push_back(TemplateArgument(
+ llvm::makeArrayRef(ExplicitArgs, NumExplicitArgs)));
+
+ // Forget the partially-substituted pack; it's substitution is now
+ // complete.
+ CurrentInstantiationScope->ResetPartiallySubstitutedPack();
+ } else {
+ Builder.push_back(TemplateArgument::getEmptyPack());
+ }
+ continue;
+ }
+
+ // Substitute into the default template argument, if available.
+ bool HasDefaultArg = false;
+ TemplateArgumentLoc DefArg
+ = SubstDefaultTemplateArgumentIfAvailable(FunctionTemplate,
+ FunctionTemplate->getLocation(),
+ FunctionTemplate->getSourceRange().getEnd(),
+ Param,
+ Builder, HasDefaultArg);
+
+ // If there was no default argument, deduction is incomplete.
+ if (DefArg.getArgument().isNull()) {
+ Info.Param = makeTemplateParameter(
+ const_cast<NamedDecl *>(TemplateParams->getParam(I)));
+ Info.reset(TemplateArgumentList::CreateCopy(Context, Builder.data(),
+ Builder.size()));
+ if (PartialOverloading) break;
+
+ return HasDefaultArg ? TDK_SubstitutionFailure : TDK_Incomplete;
+ }
+
+ // Check whether we can actually use the default argument.
+ if (CheckTemplateArgument(Param, DefArg,
+ FunctionTemplate,
+ FunctionTemplate->getLocation(),
+ FunctionTemplate->getSourceRange().getEnd(),
+ 0, Builder,
+ CTAK_Specified)) {
+ Info.Param = makeTemplateParameter(
+ const_cast<NamedDecl *>(TemplateParams->getParam(I)));
+ // FIXME: These template arguments are temporary. Free them!
+ Info.reset(TemplateArgumentList::CreateCopy(Context, Builder.data(),
+ Builder.size()));
+ return TDK_SubstitutionFailure;
+ }
+
+ // If we get here, we successfully used the default template argument.
+ }
+
+ // Form the template argument list from the deduced template arguments.
+ TemplateArgumentList *DeducedArgumentList
+ = TemplateArgumentList::CreateCopy(Context, Builder.data(), Builder.size());
+ Info.reset(DeducedArgumentList);
+
+ // Substitute the deduced template arguments into the function template
+ // declaration to produce the function template specialization.
+ DeclContext *Owner = FunctionTemplate->getDeclContext();
+ if (FunctionTemplate->getFriendObjectKind())
+ Owner = FunctionTemplate->getLexicalDeclContext();
+ Specialization = cast_or_null<FunctionDecl>(
+ SubstDecl(FunctionTemplate->getTemplatedDecl(), Owner,
+ MultiLevelTemplateArgumentList(*DeducedArgumentList)));
+ if (!Specialization || Specialization->isInvalidDecl())
+ return TDK_SubstitutionFailure;
+
+ assert(Specialization->getPrimaryTemplate()->getCanonicalDecl() ==
+ FunctionTemplate->getCanonicalDecl());
+
+ // If the template argument list is owned by the function template
+ // specialization, release it.
+ if (Specialization->getTemplateSpecializationArgs() == DeducedArgumentList &&
+ !Trap.hasErrorOccurred())
+ Info.take();
+
+ // There may have been an error that did not prevent us from constructing a
+ // declaration. Mark the declaration invalid and return with a substitution
+ // failure.
+ if (Trap.hasErrorOccurred()) {
+ Specialization->setInvalidDecl(true);
+ return TDK_SubstitutionFailure;
+ }
+
+ if (OriginalCallArgs) {
+ // C++ [temp.deduct.call]p4:
+ // In general, the deduction process attempts to find template argument
+ // values that will make the deduced A identical to A (after the type A
+ // is transformed as described above). [...]
+ for (unsigned I = 0, N = OriginalCallArgs->size(); I != N; ++I) {
+ OriginalCallArg OriginalArg = (*OriginalCallArgs)[I];
+ unsigned ParamIdx = OriginalArg.ArgIdx;
+
+ if (ParamIdx >= Specialization->getNumParams())
+ continue;
+
+ QualType DeducedA = Specialization->getParamDecl(ParamIdx)->getType();
+ if (CheckOriginalCallArgDeduction(*this, OriginalArg, DeducedA)) {
+ Info.FirstArg = TemplateArgument(DeducedA);
+ Info.SecondArg = TemplateArgument(OriginalArg.OriginalArgType);
+ Info.CallArgIndex = OriginalArg.ArgIdx;
+ return TDK_DeducedMismatch;
+ }
+ }
+ }
+
+ // If we suppressed any diagnostics while performing template argument
+ // deduction, and if we haven't already instantiated this declaration,
+ // keep track of these diagnostics. They'll be emitted if this specialization
+ // is actually used.
+ if (Info.diag_begin() != Info.diag_end()) {
+ SuppressedDiagnosticsMap::iterator
+ Pos = SuppressedDiagnostics.find(Specialization->getCanonicalDecl());
+ if (Pos == SuppressedDiagnostics.end())
+ SuppressedDiagnostics[Specialization->getCanonicalDecl()]
+ .append(Info.diag_begin(), Info.diag_end());
+ }
+
+ return TDK_Success;
+}
+
+/// Gets the type of a function for template-argument-deducton
+/// purposes when it's considered as part of an overload set.
+static QualType GetTypeOfFunction(Sema &S, const OverloadExpr::FindResult &R,
+ FunctionDecl *Fn) {
+ // We may need to deduce the return type of the function now.
+ if (S.getLangOpts().CPlusPlus14 && Fn->getReturnType()->isUndeducedType() &&
+ S.DeduceReturnType(Fn, R.Expression->getExprLoc(), /*Diagnose*/ false))
+ return QualType();
+
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn))
+ if (Method->isInstance()) {
+ // An instance method that's referenced in a form that doesn't
+ // look like a member pointer is just invalid.
+ if (!R.HasFormOfMemberPointer) return QualType();
+
+ return S.Context.getMemberPointerType(Fn->getType(),
+ S.Context.getTypeDeclType(Method->getParent()).getTypePtr());
+ }
+
+ if (!R.IsAddressOfOperand) return Fn->getType();
+ return S.Context.getPointerType(Fn->getType());
+}
+
+/// Apply the deduction rules for overload sets.
+///
+/// \return the null type if this argument should be treated as an
+/// undeduced context
+static QualType
+ResolveOverloadForDeduction(Sema &S, TemplateParameterList *TemplateParams,
+ Expr *Arg, QualType ParamType,
+ bool ParamWasReference) {
+
+ OverloadExpr::FindResult R = OverloadExpr::find(Arg);
+
+ OverloadExpr *Ovl = R.Expression;
+
+ // C++0x [temp.deduct.call]p4
+ unsigned TDF = 0;
+ if (ParamWasReference)
+ TDF |= TDF_ParamWithReferenceType;
+ if (R.IsAddressOfOperand)
+ TDF |= TDF_IgnoreQualifiers;
+
+ // C++0x [temp.deduct.call]p6:
+ // When P is a function type, pointer to function type, or pointer
+ // to member function type:
+
+ if (!ParamType->isFunctionType() &&
+ !ParamType->isFunctionPointerType() &&
+ !ParamType->isMemberFunctionPointerType()) {
+ if (Ovl->hasExplicitTemplateArgs()) {
+ // But we can still look for an explicit specialization.
+ if (FunctionDecl *ExplicitSpec
+ = S.ResolveSingleFunctionTemplateSpecialization(Ovl))
+ return GetTypeOfFunction(S, R, ExplicitSpec);
+ }
+
+ return QualType();
+ }
+
+ // Gather the explicit template arguments, if any.
+ TemplateArgumentListInfo ExplicitTemplateArgs;
+ if (Ovl->hasExplicitTemplateArgs())
+ Ovl->copyTemplateArgumentsInto(ExplicitTemplateArgs);
+ QualType Match;
+ for (UnresolvedSetIterator I = Ovl->decls_begin(),
+ E = Ovl->decls_end(); I != E; ++I) {
+ NamedDecl *D = (*I)->getUnderlyingDecl();
+
+ if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(D)) {
+ // - If the argument is an overload set containing one or more
+ // function templates, the parameter is treated as a
+ // non-deduced context.
+ if (!Ovl->hasExplicitTemplateArgs())
+ return QualType();
+
+ // Otherwise, see if we can resolve a function type
+ FunctionDecl *Specialization = nullptr;
+ TemplateDeductionInfo Info(Ovl->getNameLoc());
+ if (S.DeduceTemplateArguments(FunTmpl, &ExplicitTemplateArgs,
+ Specialization, Info))
+ continue;
+
+ D = Specialization;
+ }
+
+ FunctionDecl *Fn = cast<FunctionDecl>(D);
+ QualType ArgType = GetTypeOfFunction(S, R, Fn);
+ if (ArgType.isNull()) continue;
+
+ // Function-to-pointer conversion.
+ if (!ParamWasReference && ParamType->isPointerType() &&
+ ArgType->isFunctionType())
+ ArgType = S.Context.getPointerType(ArgType);
+
+ // - If the argument is an overload set (not containing function
+ // templates), trial argument deduction is attempted using each
+ // of the members of the set. If deduction succeeds for only one
+ // of the overload set members, that member is used as the
+ // argument value for the deduction. If deduction succeeds for
+ // more than one member of the overload set the parameter is
+ // treated as a non-deduced context.
+
+ // We do all of this in a fresh context per C++0x [temp.deduct.type]p2:
+ // Type deduction is done independently for each P/A pair, and
+ // the deduced template argument values are then combined.
+ // So we do not reject deductions which were made elsewhere.
+ SmallVector<DeducedTemplateArgument, 8>
+ Deduced(TemplateParams->size());
+ TemplateDeductionInfo Info(Ovl->getNameLoc());
+ Sema::TemplateDeductionResult Result
+ = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, ParamType,
+ ArgType, Info, Deduced, TDF);
+ if (Result) continue;
+ if (!Match.isNull()) return QualType();
+ Match = ArgType;
+ }
+
+ return Match;
+}
+
+/// \brief Perform the adjustments to the parameter and argument types
+/// described in C++ [temp.deduct.call].
+///
+/// \returns true if the caller should not attempt to perform any template
+/// argument deduction based on this P/A pair because the argument is an
+/// overloaded function set that could not be resolved.
+static bool AdjustFunctionParmAndArgTypesForDeduction(Sema &S,
+ TemplateParameterList *TemplateParams,
+ QualType &ParamType,
+ QualType &ArgType,
+ Expr *Arg,
+ unsigned &TDF) {
+ // C++0x [temp.deduct.call]p3:
+ // If P is a cv-qualified type, the top level cv-qualifiers of P's type
+ // are ignored for type deduction.
+ if (ParamType.hasQualifiers())
+ ParamType = ParamType.getUnqualifiedType();
+
+ // [...] If P is a reference type, the type referred to by P is
+ // used for type deduction.
+ const ReferenceType *ParamRefType = ParamType->getAs<ReferenceType>();
+ if (ParamRefType)
+ ParamType = ParamRefType->getPointeeType();
+
+ // Overload sets usually make this parameter an undeduced context,
+ // but there are sometimes special circumstances. Typically
+ // involving a template-id-expr.
+ if (ArgType == S.Context.OverloadTy) {
+ ArgType = ResolveOverloadForDeduction(S, TemplateParams,
+ Arg, ParamType,
+ ParamRefType != nullptr);
+ if (ArgType.isNull())
+ return true;
+ }
+
+ if (ParamRefType) {
+ // If the argument has incomplete array type, try to complete its type.
+ if (ArgType->isIncompleteArrayType()) {
+ S.completeExprArrayBound(Arg);
+ ArgType = Arg->getType();
+ }
+
+ // C++0x [temp.deduct.call]p3:
+ // If P is an rvalue reference to a cv-unqualified template
+ // parameter and the argument is an lvalue, the type "lvalue
+ // reference to A" is used in place of A for type deduction.
+ if (ParamRefType->isRValueReferenceType() &&
+ !ParamType.getQualifiers() &&
+ isa<TemplateTypeParmType>(ParamType) &&
+ Arg->isLValue())
+ ArgType = S.Context.getLValueReferenceType(ArgType);
+ } else {
+ // C++ [temp.deduct.call]p2:
+ // If P is not a reference type:
+ // - If A is an array type, the pointer type produced by the
+ // array-to-pointer standard conversion (4.2) is used in place of
+ // A for type deduction; otherwise,
+ if (ArgType->isArrayType())
+ ArgType = S.Context.getArrayDecayedType(ArgType);
+ // - If A is a function type, the pointer type produced by the
+ // function-to-pointer standard conversion (4.3) is used in place
+ // of A for type deduction; otherwise,
+ else if (ArgType->isFunctionType())
+ ArgType = S.Context.getPointerType(ArgType);
+ else {
+ // - If A is a cv-qualified type, the top level cv-qualifiers of A's
+ // type are ignored for type deduction.
+ ArgType = ArgType.getUnqualifiedType();
+ }
+ }
+
+ // C++0x [temp.deduct.call]p4:
+ // In general, the deduction process attempts to find template argument
+ // values that will make the deduced A identical to A (after the type A
+ // is transformed as described above). [...]
+ TDF = TDF_SkipNonDependent;
+
+ // - If the original P is a reference type, the deduced A (i.e., the
+ // type referred to by the reference) can be more cv-qualified than
+ // the transformed A.
+ if (ParamRefType)
+ TDF |= TDF_ParamWithReferenceType;
+ // - The transformed A can be another pointer or pointer to member
+ // type that can be converted to the deduced A via a qualification
+ // conversion (4.4).
+ if (ArgType->isPointerType() || ArgType->isMemberPointerType() ||
+ ArgType->isObjCObjectPointerType())
+ TDF |= TDF_IgnoreQualifiers;
+ // - If P is a class and P has the form simple-template-id, then the
+ // transformed A can be a derived class of the deduced A. Likewise,
+ // if P is a pointer to a class of the form simple-template-id, the
+ // transformed A can be a pointer to a derived class pointed to by
+ // the deduced A.
+ if (isSimpleTemplateIdType(ParamType) ||
+ (isa<PointerType>(ParamType) &&
+ isSimpleTemplateIdType(
+ ParamType->getAs<PointerType>()->getPointeeType())))
+ TDF |= TDF_DerivedClass;
+
+ return false;
+}
+
+static bool
+hasDeducibleTemplateParameters(Sema &S, FunctionTemplateDecl *FunctionTemplate,
+ QualType T);
+
+static Sema::TemplateDeductionResult DeduceTemplateArgumentByListElement(
+ Sema &S, TemplateParameterList *TemplateParams, QualType ParamType,
+ Expr *Arg, TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned TDF);
+
+/// \brief Attempt template argument deduction from an initializer list
+/// deemed to be an argument in a function call.
+static bool
+DeduceFromInitializerList(Sema &S, TemplateParameterList *TemplateParams,
+ QualType AdjustedParamType, InitListExpr *ILE,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ unsigned TDF, Sema::TemplateDeductionResult &Result) {
+
+ // [temp.deduct.call] p1 (post CWG-1591)
+ // If removing references and cv-qualifiers from P gives
+ // std::initializer_list<P0> or P0[N] for some P0 and N and the argument is a
+ // non-empty initializer list (8.5.4), then deduction is performed instead for
+ // each element of the initializer list, taking P0 as a function template
+ // parameter type and the initializer element as its argument, and in the
+ // P0[N] case, if N is a non-type template parameter, N is deduced from the
+ // length of the initializer list. Otherwise, an initializer list argument
+ // causes the parameter to be considered a non-deduced context
+
+ const bool IsConstSizedArray = AdjustedParamType->isConstantArrayType();
+
+ const bool IsDependentSizedArray =
+ !IsConstSizedArray && AdjustedParamType->isDependentSizedArrayType();
+
+ QualType ElTy; // The element type of the std::initializer_list or the array.
+
+ const bool IsSTDList = !IsConstSizedArray && !IsDependentSizedArray &&
+ S.isStdInitializerList(AdjustedParamType, &ElTy);
+
+ if (!IsConstSizedArray && !IsDependentSizedArray && !IsSTDList)
+ return false;
+
+ Result = Sema::TDK_Success;
+ // If we are not deducing against the 'T' in a std::initializer_list<T> then
+ // deduce against the 'T' in T[N].
+ if (ElTy.isNull()) {
+ assert(!IsSTDList);
+ ElTy = S.Context.getAsArrayType(AdjustedParamType)->getElementType();
+ }
+ // Deduction only needs to be done for dependent types.
+ if (ElTy->isDependentType()) {
+ for (Expr *E : ILE->inits()) {
+ if ((Result = DeduceTemplateArgumentByListElement(S, TemplateParams, ElTy,
+ E, Info, Deduced, TDF)))
+ return true;
+ }
+ }
+ if (IsDependentSizedArray) {
+ const DependentSizedArrayType *ArrTy =
+ S.Context.getAsDependentSizedArrayType(AdjustedParamType);
+ // Determine the array bound is something we can deduce.
+ if (NonTypeTemplateParmDecl *NTTP =
+ getDeducedParameterFromExpr(ArrTy->getSizeExpr())) {
+ // We can perform template argument deduction for the given non-type
+ // template parameter.
+ assert(NTTP->getDepth() == 0 &&
+ "Cannot deduce non-type template argument at depth > 0");
+ llvm::APInt Size(S.Context.getIntWidth(NTTP->getType()),
+ ILE->getNumInits());
+
+ Result = DeduceNonTypeTemplateArgument(
+ S, NTTP, llvm::APSInt(Size), NTTP->getType(),
+ /*ArrayBound=*/true, Info, Deduced);
+ }
+ }
+ return true;
+}
+
+/// \brief Perform template argument deduction by matching a parameter type
+/// against a single expression, where the expression is an element of
+/// an initializer list that was originally matched against a parameter
+/// of type \c initializer_list\<ParamType\>.
+static Sema::TemplateDeductionResult
+DeduceTemplateArgumentByListElement(Sema &S,
+ TemplateParameterList *TemplateParams,
+ QualType ParamType, Expr *Arg,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ unsigned TDF) {
+ // Handle the case where an init list contains another init list as the
+ // element.
+ if (InitListExpr *ILE = dyn_cast<InitListExpr>(Arg)) {
+ Sema::TemplateDeductionResult Result;
+ if (!DeduceFromInitializerList(S, TemplateParams,
+ ParamType.getNonReferenceType(), ILE, Info,
+ Deduced, TDF, Result))
+ return Sema::TDK_Success; // Just ignore this expression.
+
+ return Result;
+ }
+
+ // For all other cases, just match by type.
+ QualType ArgType = Arg->getType();
+ if (AdjustFunctionParmAndArgTypesForDeduction(S, TemplateParams, ParamType,
+ ArgType, Arg, TDF)) {
+ Info.Expression = Arg;
+ return Sema::TDK_FailedOverloadResolution;
+ }
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, ParamType,
+ ArgType, Info, Deduced, TDF);
+}
+
+/// \brief Perform template argument deduction from a function call
+/// (C++ [temp.deduct.call]).
+///
+/// \param FunctionTemplate the function template for which we are performing
+/// template argument deduction.
+///
+/// \param ExplicitTemplateArgs the explicit template arguments provided
+/// for this call.
+///
+/// \param Args the function call arguments
+///
+/// \param Specialization if template argument deduction was successful,
+/// this will be set to the function template specialization produced by
+/// template argument deduction.
+///
+/// \param Info the argument will be updated to provide additional information
+/// about template argument deduction.
+///
+/// \returns the result of template argument deduction.
+Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
+ FunctionTemplateDecl *FunctionTemplate,
+ TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
+ FunctionDecl *&Specialization, TemplateDeductionInfo &Info,
+ bool PartialOverloading) {
+ if (FunctionTemplate->isInvalidDecl())
+ return TDK_Invalid;
+
+ FunctionDecl *Function = FunctionTemplate->getTemplatedDecl();
+ unsigned NumParams = Function->getNumParams();
+
+ // C++ [temp.deduct.call]p1:
+ // Template argument deduction is done by comparing each function template
+ // parameter type (call it P) with the type of the corresponding argument
+ // of the call (call it A) as described below.
+ unsigned CheckArgs = Args.size();
+ if (Args.size() < Function->getMinRequiredArguments() && !PartialOverloading)
+ return TDK_TooFewArguments;
+ else if (TooManyArguments(NumParams, Args.size(), PartialOverloading)) {
+ const FunctionProtoType *Proto
+ = Function->getType()->getAs<FunctionProtoType>();
+ if (Proto->isTemplateVariadic())
+ /* Do nothing */;
+ else if (Proto->isVariadic())
+ CheckArgs = NumParams;
+ else
+ return TDK_TooManyArguments;
+ }
+
+ // The types of the parameters from which we will perform template argument
+ // deduction.
+ LocalInstantiationScope InstScope(*this);
+ TemplateParameterList *TemplateParams
+ = FunctionTemplate->getTemplateParameters();
+ SmallVector<DeducedTemplateArgument, 4> Deduced;
+ SmallVector<QualType, 4> ParamTypes;
+ unsigned NumExplicitlySpecified = 0;
+ if (ExplicitTemplateArgs) {
+ TemplateDeductionResult Result =
+ SubstituteExplicitTemplateArguments(FunctionTemplate,
+ *ExplicitTemplateArgs,
+ Deduced,
+ ParamTypes,
+ nullptr,
+ Info);
+ if (Result)
+ return Result;
+
+ NumExplicitlySpecified = Deduced.size();
+ } else {
+ // Just fill in the parameter types from the function declaration.
+ for (unsigned I = 0; I != NumParams; ++I)
+ ParamTypes.push_back(Function->getParamDecl(I)->getType());
+ }
+
+ // Deduce template arguments from the function parameters.
+ Deduced.resize(TemplateParams->size());
+ unsigned ArgIdx = 0;
+ SmallVector<OriginalCallArg, 4> OriginalCallArgs;
+ for (unsigned ParamIdx = 0, NumParamTypes = ParamTypes.size();
+ ParamIdx != NumParamTypes; ++ParamIdx) {
+ QualType OrigParamType = ParamTypes[ParamIdx];
+ QualType ParamType = OrigParamType;
+
+ const PackExpansionType *ParamExpansion
+ = dyn_cast<PackExpansionType>(ParamType);
+ if (!ParamExpansion) {
+ // Simple case: matching a function parameter to a function argument.
+ if (ArgIdx >= CheckArgs)
+ break;
+
+ Expr *Arg = Args[ArgIdx++];
+ QualType ArgType = Arg->getType();
+
+ unsigned TDF = 0;
+ if (AdjustFunctionParmAndArgTypesForDeduction(*this, TemplateParams,
+ ParamType, ArgType, Arg,
+ TDF))
+ continue;
+
+ // If we have nothing to deduce, we're done.
+ if (!hasDeducibleTemplateParameters(*this, FunctionTemplate, ParamType))
+ continue;
+
+ // If the argument is an initializer list ...
+ if (InitListExpr *ILE = dyn_cast<InitListExpr>(Arg)) {
+ TemplateDeductionResult Result;
+ // Removing references was already done.
+ if (!DeduceFromInitializerList(*this, TemplateParams, ParamType, ILE,
+ Info, Deduced, TDF, Result))
+ continue;
+
+ if (Result)
+ return Result;
+ // Don't track the argument type, since an initializer list has none.
+ continue;
+ }
+
+ // Keep track of the argument type and corresponding parameter index,
+ // so we can check for compatibility between the deduced A and A.
+ OriginalCallArgs.push_back(OriginalCallArg(OrigParamType, ArgIdx-1,
+ ArgType));
+
+ if (TemplateDeductionResult Result
+ = DeduceTemplateArgumentsByTypeMatch(*this, TemplateParams,
+ ParamType, ArgType,
+ Info, Deduced, TDF))
+ return Result;
+
+ continue;
+ }
+
+ // C++0x [temp.deduct.call]p1:
+ // For a function parameter pack that occurs at the end of the
+ // parameter-declaration-list, the type A of each remaining argument of
+ // the call is compared with the type P of the declarator-id of the
+ // function parameter pack. Each comparison deduces template arguments
+ // for subsequent positions in the template parameter packs expanded by
+ // the function parameter pack. For a function parameter pack that does
+ // not occur at the end of the parameter-declaration-list, the type of
+ // the parameter pack is a non-deduced context.
+ if (ParamIdx + 1 < NumParamTypes)
+ break;
+
+ QualType ParamPattern = ParamExpansion->getPattern();
+ PackDeductionScope PackScope(*this, TemplateParams, Deduced, Info,
+ ParamPattern);
+
+ bool HasAnyArguments = false;
+ for (; ArgIdx < Args.size(); ++ArgIdx) {
+ HasAnyArguments = true;
+
+ QualType OrigParamType = ParamPattern;
+ ParamType = OrigParamType;
+ Expr *Arg = Args[ArgIdx];
+ QualType ArgType = Arg->getType();
+
+ unsigned TDF = 0;
+ if (AdjustFunctionParmAndArgTypesForDeduction(*this, TemplateParams,
+ ParamType, ArgType, Arg,
+ TDF)) {
+ // We can't actually perform any deduction for this argument, so stop
+ // deduction at this point.
+ ++ArgIdx;
+ break;
+ }
+
+ // As above, initializer lists need special handling.
+ if (InitListExpr *ILE = dyn_cast<InitListExpr>(Arg)) {
+ TemplateDeductionResult Result;
+ if (!DeduceFromInitializerList(*this, TemplateParams, ParamType, ILE,
+ Info, Deduced, TDF, Result)) {
+ ++ArgIdx;
+ break;
+ }
+
+ if (Result)
+ return Result;
+ } else {
+
+ // Keep track of the argument type and corresponding argument index,
+ // so we can check for compatibility between the deduced A and A.
+ if (hasDeducibleTemplateParameters(*this, FunctionTemplate, ParamType))
+ OriginalCallArgs.push_back(OriginalCallArg(OrigParamType, ArgIdx,
+ ArgType));
+
+ if (TemplateDeductionResult Result
+ = DeduceTemplateArgumentsByTypeMatch(*this, TemplateParams,
+ ParamType, ArgType, Info,
+ Deduced, TDF))
+ return Result;
+ }
+
+ PackScope.nextPackElement();
+ }
+
+ // Build argument packs for each of the parameter packs expanded by this
+ // pack expansion.
+ if (auto Result = PackScope.finish(HasAnyArguments))
+ return Result;
+
+ // After we've matching against a parameter pack, we're done.
+ break;
+ }
+
+ return FinishTemplateArgumentDeduction(FunctionTemplate, Deduced,
+ NumExplicitlySpecified, Specialization,
+ Info, &OriginalCallArgs,
+ PartialOverloading);
+}
+
+QualType Sema::adjustCCAndNoReturn(QualType ArgFunctionType,
+ QualType FunctionType) {
+ if (ArgFunctionType.isNull())
+ return ArgFunctionType;
+
+ const FunctionProtoType *FunctionTypeP =
+ FunctionType->castAs<FunctionProtoType>();
+ CallingConv CC = FunctionTypeP->getCallConv();
+ bool NoReturn = FunctionTypeP->getNoReturnAttr();
+ const FunctionProtoType *ArgFunctionTypeP =
+ ArgFunctionType->getAs<FunctionProtoType>();
+ if (ArgFunctionTypeP->getCallConv() == CC &&
+ ArgFunctionTypeP->getNoReturnAttr() == NoReturn)
+ return ArgFunctionType;
+
+ FunctionType::ExtInfo EI = ArgFunctionTypeP->getExtInfo().withCallingConv(CC);
+ EI = EI.withNoReturn(NoReturn);
+ ArgFunctionTypeP =
+ cast<FunctionProtoType>(Context.adjustFunctionType(ArgFunctionTypeP, EI));
+ return QualType(ArgFunctionTypeP, 0);
+}
+
+/// \brief Deduce template arguments when taking the address of a function
+/// template (C++ [temp.deduct.funcaddr]) or matching a specialization to
+/// a template.
+///
+/// \param FunctionTemplate the function template for which we are performing
+/// template argument deduction.
+///
+/// \param ExplicitTemplateArgs the explicitly-specified template
+/// arguments.
+///
+/// \param ArgFunctionType the function type that will be used as the
+/// "argument" type (A) when performing template argument deduction from the
+/// function template's function type. This type may be NULL, if there is no
+/// argument type to compare against, in C++0x [temp.arg.explicit]p3.
+///
+/// \param Specialization if template argument deduction was successful,
+/// this will be set to the function template specialization produced by
+/// template argument deduction.
+///
+/// \param Info the argument will be updated to provide additional information
+/// about template argument deduction.
+///
+/// \returns the result of template argument deduction.
+Sema::TemplateDeductionResult
+Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ QualType ArgFunctionType,
+ FunctionDecl *&Specialization,
+ TemplateDeductionInfo &Info,
+ bool InOverloadResolution) {
+ if (FunctionTemplate->isInvalidDecl())
+ return TDK_Invalid;
+
+ FunctionDecl *Function = FunctionTemplate->getTemplatedDecl();
+ TemplateParameterList *TemplateParams
+ = FunctionTemplate->getTemplateParameters();
+ QualType FunctionType = Function->getType();
+ if (!InOverloadResolution)
+ ArgFunctionType = adjustCCAndNoReturn(ArgFunctionType, FunctionType);
+
+ // Substitute any explicit template arguments.
+ LocalInstantiationScope InstScope(*this);
+ SmallVector<DeducedTemplateArgument, 4> Deduced;
+ unsigned NumExplicitlySpecified = 0;
+ SmallVector<QualType, 4> ParamTypes;
+ if (ExplicitTemplateArgs) {
+ if (TemplateDeductionResult Result
+ = SubstituteExplicitTemplateArguments(FunctionTemplate,
+ *ExplicitTemplateArgs,
+ Deduced, ParamTypes,
+ &FunctionType, Info))
+ return Result;
+
+ NumExplicitlySpecified = Deduced.size();
+ }
+
+ // Unevaluated SFINAE context.
+ EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
+ SFINAETrap Trap(*this);
+
+ Deduced.resize(TemplateParams->size());
+
+ // If the function has a deduced return type, substitute it for a dependent
+ // type so that we treat it as a non-deduced context in what follows.
+ bool HasDeducedReturnType = false;
+ if (getLangOpts().CPlusPlus14 && InOverloadResolution &&
+ Function->getReturnType()->getContainedAutoType()) {
+ FunctionType = SubstAutoType(FunctionType, Context.DependentTy);
+ HasDeducedReturnType = true;
+ }
+
+ if (!ArgFunctionType.isNull()) {
+ unsigned TDF = TDF_TopLevelParameterTypeList;
+ if (InOverloadResolution) TDF |= TDF_InOverloadResolution;
+ // Deduce template arguments from the function type.
+ if (TemplateDeductionResult Result
+ = DeduceTemplateArgumentsByTypeMatch(*this, TemplateParams,
+ FunctionType, ArgFunctionType,
+ Info, Deduced, TDF))
+ return Result;
+ }
+
+ if (TemplateDeductionResult Result
+ = FinishTemplateArgumentDeduction(FunctionTemplate, Deduced,
+ NumExplicitlySpecified,
+ Specialization, Info))
+ return Result;
+
+ // If the function has a deduced return type, deduce it now, so we can check
+ // that the deduced function type matches the requested type.
+ if (HasDeducedReturnType &&
+ Specialization->getReturnType()->isUndeducedType() &&
+ DeduceReturnType(Specialization, Info.getLocation(), false))
+ return TDK_MiscellaneousDeductionFailure;
+
+ // If the requested function type does not match the actual type of the
+ // specialization with respect to arguments of compatible pointer to function
+ // types, template argument deduction fails.
+ if (!ArgFunctionType.isNull()) {
+ if (InOverloadResolution && !isSameOrCompatibleFunctionType(
+ Context.getCanonicalType(Specialization->getType()),
+ Context.getCanonicalType(ArgFunctionType)))
+ return TDK_MiscellaneousDeductionFailure;
+ else if(!InOverloadResolution &&
+ !Context.hasSameType(Specialization->getType(), ArgFunctionType))
+ return TDK_MiscellaneousDeductionFailure;
+ }
+
+ return TDK_Success;
+}
+
+/// \brief Given a function declaration (e.g. a generic lambda conversion
+/// function) that contains an 'auto' in its result type, substitute it
+/// with TypeToReplaceAutoWith. Be careful to pass in the type you want
+/// to replace 'auto' with and not the actual result type you want
+/// to set the function to.
+static inline void
+SubstAutoWithinFunctionReturnType(FunctionDecl *F,
+ QualType TypeToReplaceAutoWith, Sema &S) {
+ assert(!TypeToReplaceAutoWith->getContainedAutoType());
+ QualType AutoResultType = F->getReturnType();
+ assert(AutoResultType->getContainedAutoType());
+ QualType DeducedResultType = S.SubstAutoType(AutoResultType,
+ TypeToReplaceAutoWith);
+ S.Context.adjustDeducedFunctionResultType(F, DeducedResultType);
+}
+
+/// \brief Given a specialized conversion operator of a generic lambda
+/// create the corresponding specializations of the call operator and
+/// the static-invoker. If the return type of the call operator is auto,
+/// deduce its return type and check if that matches the
+/// return type of the destination function ptr.
+
+static inline Sema::TemplateDeductionResult
+SpecializeCorrespondingLambdaCallOperatorAndInvoker(
+ CXXConversionDecl *ConversionSpecialized,
+ SmallVectorImpl<DeducedTemplateArgument> &DeducedArguments,
+ QualType ReturnTypeOfDestFunctionPtr,
+ TemplateDeductionInfo &TDInfo,
+ Sema &S) {
+
+ CXXRecordDecl *LambdaClass = ConversionSpecialized->getParent();
+ assert(LambdaClass && LambdaClass->isGenericLambda());
+
+ CXXMethodDecl *CallOpGeneric = LambdaClass->getLambdaCallOperator();
+ QualType CallOpResultType = CallOpGeneric->getReturnType();
+ const bool GenericLambdaCallOperatorHasDeducedReturnType =
+ CallOpResultType->getContainedAutoType();
+
+ FunctionTemplateDecl *CallOpTemplate =
+ CallOpGeneric->getDescribedFunctionTemplate();
+
+ FunctionDecl *CallOpSpecialized = nullptr;
+ // Use the deduced arguments of the conversion function, to specialize our
+ // generic lambda's call operator.
+ if (Sema::TemplateDeductionResult Result
+ = S.FinishTemplateArgumentDeduction(CallOpTemplate,
+ DeducedArguments,
+ 0, CallOpSpecialized, TDInfo))
+ return Result;
+
+ // If we need to deduce the return type, do so (instantiates the callop).
+ if (GenericLambdaCallOperatorHasDeducedReturnType &&
+ CallOpSpecialized->getReturnType()->isUndeducedType())
+ S.DeduceReturnType(CallOpSpecialized,
+ CallOpSpecialized->getPointOfInstantiation(),
+ /*Diagnose*/ true);
+
+ // Check to see if the return type of the destination ptr-to-function
+ // matches the return type of the call operator.
+ if (!S.Context.hasSameType(CallOpSpecialized->getReturnType(),
+ ReturnTypeOfDestFunctionPtr))
+ return Sema::TDK_NonDeducedMismatch;
+ // Since we have succeeded in matching the source and destination
+ // ptr-to-functions (now including return type), and have successfully
+ // specialized our corresponding call operator, we are ready to
+ // specialize the static invoker with the deduced arguments of our
+ // ptr-to-function.
+ FunctionDecl *InvokerSpecialized = nullptr;
+ FunctionTemplateDecl *InvokerTemplate = LambdaClass->
+ getLambdaStaticInvoker()->getDescribedFunctionTemplate();
+
+#ifndef NDEBUG
+ Sema::TemplateDeductionResult LLVM_ATTRIBUTE_UNUSED Result =
+#endif
+ S.FinishTemplateArgumentDeduction(InvokerTemplate, DeducedArguments, 0,
+ InvokerSpecialized, TDInfo);
+ assert(Result == Sema::TDK_Success &&
+ "If the call operator succeeded so should the invoker!");
+ // Set the result type to match the corresponding call operator
+ // specialization's result type.
+ if (GenericLambdaCallOperatorHasDeducedReturnType &&
+ InvokerSpecialized->getReturnType()->isUndeducedType()) {
+ // Be sure to get the type to replace 'auto' with and not
+ // the full result type of the call op specialization
+ // to substitute into the 'auto' of the invoker and conversion
+ // function.
+ // For e.g.
+ // int* (*fp)(int*) = [](auto* a) -> auto* { return a; };
+ // We don't want to subst 'int*' into 'auto' to get int**.
+
+ QualType TypeToReplaceAutoWith = CallOpSpecialized->getReturnType()
+ ->getContainedAutoType()
+ ->getDeducedType();
+ SubstAutoWithinFunctionReturnType(InvokerSpecialized,
+ TypeToReplaceAutoWith, S);
+ SubstAutoWithinFunctionReturnType(ConversionSpecialized,
+ TypeToReplaceAutoWith, S);
+ }
+
+ // Ensure that static invoker doesn't have a const qualifier.
+ // FIXME: When creating the InvokerTemplate in SemaLambda.cpp
+ // do not use the CallOperator's TypeSourceInfo which allows
+ // the const qualifier to leak through.
+ const FunctionProtoType *InvokerFPT = InvokerSpecialized->
+ getType().getTypePtr()->castAs<FunctionProtoType>();
+ FunctionProtoType::ExtProtoInfo EPI = InvokerFPT->getExtProtoInfo();
+ EPI.TypeQuals = 0;
+ InvokerSpecialized->setType(S.Context.getFunctionType(
+ InvokerFPT->getReturnType(), InvokerFPT->getParamTypes(), EPI));
+ return Sema::TDK_Success;
+}
+/// \brief Deduce template arguments for a templated conversion
+/// function (C++ [temp.deduct.conv]) and, if successful, produce a
+/// conversion function template specialization.
+Sema::TemplateDeductionResult
+Sema::DeduceTemplateArguments(FunctionTemplateDecl *ConversionTemplate,
+ QualType ToType,
+ CXXConversionDecl *&Specialization,
+ TemplateDeductionInfo &Info) {
+ if (ConversionTemplate->isInvalidDecl())
+ return TDK_Invalid;
+
+ CXXConversionDecl *ConversionGeneric
+ = cast<CXXConversionDecl>(ConversionTemplate->getTemplatedDecl());
+
+ QualType FromType = ConversionGeneric->getConversionType();
+
+ // Canonicalize the types for deduction.
+ QualType P = Context.getCanonicalType(FromType);
+ QualType A = Context.getCanonicalType(ToType);
+
+ // C++0x [temp.deduct.conv]p2:
+ // If P is a reference type, the type referred to by P is used for
+ // type deduction.
+ if (const ReferenceType *PRef = P->getAs<ReferenceType>())
+ P = PRef->getPointeeType();
+
+ // C++0x [temp.deduct.conv]p4:
+ // [...] If A is a reference type, the type referred to by A is used
+ // for type deduction.
+ if (const ReferenceType *ARef = A->getAs<ReferenceType>())
+ A = ARef->getPointeeType().getUnqualifiedType();
+ // C++ [temp.deduct.conv]p3:
+ //
+ // If A is not a reference type:
+ else {
+ assert(!A->isReferenceType() && "Reference types were handled above");
+
+ // - If P is an array type, the pointer type produced by the
+ // array-to-pointer standard conversion (4.2) is used in place
+ // of P for type deduction; otherwise,
+ if (P->isArrayType())
+ P = Context.getArrayDecayedType(P);
+ // - If P is a function type, the pointer type produced by the
+ // function-to-pointer standard conversion (4.3) is used in
+ // place of P for type deduction; otherwise,
+ else if (P->isFunctionType())
+ P = Context.getPointerType(P);
+ // - If P is a cv-qualified type, the top level cv-qualifiers of
+ // P's type are ignored for type deduction.
+ else
+ P = P.getUnqualifiedType();
+
+ // C++0x [temp.deduct.conv]p4:
+ // If A is a cv-qualified type, the top level cv-qualifiers of A's
+ // type are ignored for type deduction. If A is a reference type, the type
+ // referred to by A is used for type deduction.
+ A = A.getUnqualifiedType();
+ }
+
+ // Unevaluated SFINAE context.
+ EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
+ SFINAETrap Trap(*this);
+
+ // C++ [temp.deduct.conv]p1:
+ // Template argument deduction is done by comparing the return
+ // type of the template conversion function (call it P) with the
+ // type that is required as the result of the conversion (call it
+ // A) as described in 14.8.2.4.
+ TemplateParameterList *TemplateParams
+ = ConversionTemplate->getTemplateParameters();
+ SmallVector<DeducedTemplateArgument, 4> Deduced;
+ Deduced.resize(TemplateParams->size());
+
+ // C++0x [temp.deduct.conv]p4:
+ // In general, the deduction process attempts to find template
+ // argument values that will make the deduced A identical to
+ // A. However, there are two cases that allow a difference:
+ unsigned TDF = 0;
+ // - If the original A is a reference type, A can be more
+ // cv-qualified than the deduced A (i.e., the type referred to
+ // by the reference)
+ if (ToType->isReferenceType())
+ TDF |= TDF_ParamWithReferenceType;
+ // - The deduced A can be another pointer or pointer to member
+ // type that can be converted to A via a qualification
+ // conversion.
+ //
+ // (C++0x [temp.deduct.conv]p6 clarifies that this only happens when
+ // both P and A are pointers or member pointers. In this case, we
+ // just ignore cv-qualifiers completely).
+ if ((P->isPointerType() && A->isPointerType()) ||
+ (P->isMemberPointerType() && A->isMemberPointerType()))
+ TDF |= TDF_IgnoreQualifiers;
+ if (TemplateDeductionResult Result
+ = DeduceTemplateArgumentsByTypeMatch(*this, TemplateParams,
+ P, A, Info, Deduced, TDF))
+ return Result;
+
+ // Create an Instantiation Scope for finalizing the operator.
+ LocalInstantiationScope InstScope(*this);
+ // Finish template argument deduction.
+ FunctionDecl *ConversionSpecialized = nullptr;
+ TemplateDeductionResult Result
+ = FinishTemplateArgumentDeduction(ConversionTemplate, Deduced, 0,
+ ConversionSpecialized, Info);
+ Specialization = cast_or_null<CXXConversionDecl>(ConversionSpecialized);
+
+ // If the conversion operator is being invoked on a lambda closure to convert
+ // to a ptr-to-function, use the deduced arguments from the conversion
+ // function to specialize the corresponding call operator.
+ // e.g., int (*fp)(int) = [](auto a) { return a; };
+ if (Result == TDK_Success && isLambdaConversionOperator(ConversionGeneric)) {
+
+ // Get the return type of the destination ptr-to-function we are converting
+ // to. This is necessary for matching the lambda call operator's return
+ // type to that of the destination ptr-to-function's return type.
+ assert(A->isPointerType() &&
+ "Can only convert from lambda to ptr-to-function");
+ const FunctionType *ToFunType =
+ A->getPointeeType().getTypePtr()->getAs<FunctionType>();
+ const QualType DestFunctionPtrReturnType = ToFunType->getReturnType();
+
+ // Create the corresponding specializations of the call operator and
+ // the static-invoker; and if the return type is auto,
+ // deduce the return type and check if it matches the
+ // DestFunctionPtrReturnType.
+ // For instance:
+ // auto L = [](auto a) { return f(a); };
+ // int (*fp)(int) = L;
+ // char (*fp2)(int) = L; <-- Not OK.
+
+ Result = SpecializeCorrespondingLambdaCallOperatorAndInvoker(
+ Specialization, Deduced, DestFunctionPtrReturnType,
+ Info, *this);
+ }
+ return Result;
+}
+
+/// \brief Deduce template arguments for a function template when there is
+/// nothing to deduce against (C++0x [temp.arg.explicit]p3).
+///
+/// \param FunctionTemplate the function template for which we are performing
+/// template argument deduction.
+///
+/// \param ExplicitTemplateArgs the explicitly-specified template
+/// arguments.
+///
+/// \param Specialization if template argument deduction was successful,
+/// this will be set to the function template specialization produced by
+/// template argument deduction.
+///
+/// \param Info the argument will be updated to provide additional information
+/// about template argument deduction.
+///
+/// \returns the result of template argument deduction.
+Sema::TemplateDeductionResult
+Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ FunctionDecl *&Specialization,
+ TemplateDeductionInfo &Info,
+ bool InOverloadResolution) {
+ return DeduceTemplateArguments(FunctionTemplate, ExplicitTemplateArgs,
+ QualType(), Specialization, Info,
+ InOverloadResolution);
+}
+
+namespace {
+ /// Substitute the 'auto' type specifier within a type for a given replacement
+ /// type.
+ class SubstituteAutoTransform :
+ public TreeTransform<SubstituteAutoTransform> {
+ QualType Replacement;
+ public:
+ SubstituteAutoTransform(Sema &SemaRef, QualType Replacement)
+ : TreeTransform<SubstituteAutoTransform>(SemaRef),
+ Replacement(Replacement) {}
+
+ QualType TransformAutoType(TypeLocBuilder &TLB, AutoTypeLoc TL) {
+ // If we're building the type pattern to deduce against, don't wrap the
+ // substituted type in an AutoType. Certain template deduction rules
+ // apply only when a template type parameter appears directly (and not if
+ // the parameter is found through desugaring). For instance:
+ // auto &&lref = lvalue;
+ // must transform into "rvalue reference to T" not "rvalue reference to
+ // auto type deduced as T" in order for [temp.deduct.call]p3 to apply.
+ if (!Replacement.isNull() && isa<TemplateTypeParmType>(Replacement)) {
+ QualType Result = Replacement;
+ TemplateTypeParmTypeLoc NewTL =
+ TLB.push<TemplateTypeParmTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ return Result;
+ } else {
+ bool Dependent =
+ !Replacement.isNull() && Replacement->isDependentType();
+ QualType Result =
+ SemaRef.Context.getAutoType(Dependent ? QualType() : Replacement,
+ TL.getTypePtr()->getKeyword(),
+ Dependent);
+ AutoTypeLoc NewTL = TLB.push<AutoTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ return Result;
+ }
+ }
+
+ ExprResult TransformLambdaExpr(LambdaExpr *E) {
+ // Lambdas never need to be transformed.
+ return E;
+ }
+
+ QualType Apply(TypeLoc TL) {
+ // Create some scratch storage for the transformed type locations.
+ // FIXME: We're just going to throw this information away. Don't build it.
+ TypeLocBuilder TLB;
+ TLB.reserve(TL.getFullDataSize());
+ return TransformType(TLB, TL);
+ }
+ };
+}
+
+Sema::DeduceAutoResult
+Sema::DeduceAutoType(TypeSourceInfo *Type, Expr *&Init, QualType &Result) {
+ return DeduceAutoType(Type->getTypeLoc(), Init, Result);
+}
+
+/// \brief Deduce the type for an auto type-specifier (C++11 [dcl.spec.auto]p6)
+///
+/// \param Type the type pattern using the auto type-specifier.
+/// \param Init the initializer for the variable whose type is to be deduced.
+/// \param Result if type deduction was successful, this will be set to the
+/// deduced type.
+Sema::DeduceAutoResult
+Sema::DeduceAutoType(TypeLoc Type, Expr *&Init, QualType &Result) {
+ if (Init->getType()->isNonOverloadPlaceholderType()) {
+ ExprResult NonPlaceholder = CheckPlaceholderExpr(Init);
+ if (NonPlaceholder.isInvalid())
+ return DAR_FailedAlreadyDiagnosed;
+ Init = NonPlaceholder.get();
+ }
+
+ if (Init->isTypeDependent() || Type.getType()->isDependentType()) {
+ Result = SubstituteAutoTransform(*this, Context.DependentTy).Apply(Type);
+ assert(!Result.isNull() && "substituting DependentTy can't fail");
+ return DAR_Succeeded;
+ }
+
+ // If this is a 'decltype(auto)' specifier, do the decltype dance.
+ // Since 'decltype(auto)' can only occur at the top of the type, we
+ // don't need to go digging for it.
+ if (const AutoType *AT = Type.getType()->getAs<AutoType>()) {
+ if (AT->isDecltypeAuto()) {
+ if (isa<InitListExpr>(Init)) {
+ Diag(Init->getLocStart(), diag::err_decltype_auto_initializer_list);
+ return DAR_FailedAlreadyDiagnosed;
+ }
+
+ QualType Deduced = BuildDecltypeType(Init, Init->getLocStart(), false);
+ if (Deduced.isNull())
+ return DAR_FailedAlreadyDiagnosed;
+ // FIXME: Support a non-canonical deduced type for 'auto'.
+ Deduced = Context.getCanonicalType(Deduced);
+ Result = SubstituteAutoTransform(*this, Deduced).Apply(Type);
+ if (Result.isNull())
+ return DAR_FailedAlreadyDiagnosed;
+ return DAR_Succeeded;
+ } else if (!getLangOpts().CPlusPlus) {
+ if (isa<InitListExpr>(Init)) {
+ Diag(Init->getLocStart(), diag::err_auto_init_list_from_c);
+ return DAR_FailedAlreadyDiagnosed;
+ }
+ }
+ }
+
+ SourceLocation Loc = Init->getExprLoc();
+
+ LocalInstantiationScope InstScope(*this);
+
+ // Build template<class TemplParam> void Func(FuncParam);
+ TemplateTypeParmDecl *TemplParam =
+ TemplateTypeParmDecl::Create(Context, nullptr, SourceLocation(), Loc, 0, 0,
+ nullptr, false, false);
+ QualType TemplArg = QualType(TemplParam->getTypeForDecl(), 0);
+ NamedDecl *TemplParamPtr = TemplParam;
+ FixedSizeTemplateParameterListStorage<1> TemplateParamsSt(
+ Loc, Loc, TemplParamPtr, Loc);
+
+ QualType FuncParam = SubstituteAutoTransform(*this, TemplArg).Apply(Type);
+ assert(!FuncParam.isNull() &&
+ "substituting template parameter for 'auto' failed");
+
+ // Deduce type of TemplParam in Func(Init)
+ SmallVector<DeducedTemplateArgument, 1> Deduced;
+ Deduced.resize(1);
+ QualType InitType = Init->getType();
+ unsigned TDF = 0;
+
+ TemplateDeductionInfo Info(Loc);
+
+ InitListExpr *InitList = dyn_cast<InitListExpr>(Init);
+ if (InitList) {
+ for (unsigned i = 0, e = InitList->getNumInits(); i < e; ++i) {
+ if (DeduceTemplateArgumentByListElement(*this, TemplateParamsSt.get(),
+ TemplArg, InitList->getInit(i),
+ Info, Deduced, TDF))
+ return DAR_Failed;
+ }
+ } else {
+ if (!getLangOpts().CPlusPlus && Init->refersToBitField()) {
+ Diag(Loc, diag::err_auto_bitfield);
+ return DAR_FailedAlreadyDiagnosed;
+ }
+
+ if (AdjustFunctionParmAndArgTypesForDeduction(
+ *this, TemplateParamsSt.get(), FuncParam, InitType, Init, TDF))
+ return DAR_Failed;
+
+ if (DeduceTemplateArgumentsByTypeMatch(*this, TemplateParamsSt.get(),
+ FuncParam, InitType, Info, Deduced,
+ TDF))
+ return DAR_Failed;
+ }
+
+ if (Deduced[0].getKind() != TemplateArgument::Type)
+ return DAR_Failed;
+
+ QualType DeducedType = Deduced[0].getAsType();
+
+ if (InitList) {
+ DeducedType = BuildStdInitializerList(DeducedType, Loc);
+ if (DeducedType.isNull())
+ return DAR_FailedAlreadyDiagnosed;
+ }
+
+ Result = SubstituteAutoTransform(*this, DeducedType).Apply(Type);
+ if (Result.isNull())
+ return DAR_FailedAlreadyDiagnosed;
+
+ // Check that the deduced argument type is compatible with the original
+ // argument type per C++ [temp.deduct.call]p4.
+ if (!InitList && !Result.isNull() &&
+ CheckOriginalCallArgDeduction(*this,
+ Sema::OriginalCallArg(FuncParam,0,InitType),
+ Result)) {
+ Result = QualType();
+ return DAR_Failed;
+ }
+
+ return DAR_Succeeded;
+}
+
+QualType Sema::SubstAutoType(QualType TypeWithAuto,
+ QualType TypeToReplaceAuto) {
+ return SubstituteAutoTransform(*this, TypeToReplaceAuto).
+ TransformType(TypeWithAuto);
+}
+
+TypeSourceInfo* Sema::SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
+ QualType TypeToReplaceAuto) {
+ return SubstituteAutoTransform(*this, TypeToReplaceAuto).
+ TransformType(TypeWithAuto);
+}
+
+void Sema::DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init) {
+ if (isa<InitListExpr>(Init))
+ Diag(VDecl->getLocation(),
+ VDecl->isInitCapture()
+ ? diag::err_init_capture_deduction_failure_from_init_list
+ : diag::err_auto_var_deduction_failure_from_init_list)
+ << VDecl->getDeclName() << VDecl->getType() << Init->getSourceRange();
+ else
+ Diag(VDecl->getLocation(),
+ VDecl->isInitCapture() ? diag::err_init_capture_deduction_failure
+ : diag::err_auto_var_deduction_failure)
+ << VDecl->getDeclName() << VDecl->getType() << Init->getType()
+ << Init->getSourceRange();
+}
+
+bool Sema::DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
+ bool Diagnose) {
+ assert(FD->getReturnType()->isUndeducedType());
+
+ if (FD->getTemplateInstantiationPattern())
+ InstantiateFunctionDefinition(Loc, FD);
+
+ bool StillUndeduced = FD->getReturnType()->isUndeducedType();
+ if (StillUndeduced && Diagnose && !FD->isInvalidDecl()) {
+ Diag(Loc, diag::err_auto_fn_used_before_defined) << FD;
+ Diag(FD->getLocation(), diag::note_callee_decl) << FD;
+ }
+
+ return StillUndeduced;
+}
+
+static void
+MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
+ bool OnlyDeduced,
+ unsigned Level,
+ llvm::SmallBitVector &Deduced);
+
+/// \brief If this is a non-static member function,
+static void
+AddImplicitObjectParameterType(ASTContext &Context,
+ CXXMethodDecl *Method,
+ SmallVectorImpl<QualType> &ArgTypes) {
+ // C++11 [temp.func.order]p3:
+ // [...] The new parameter is of type "reference to cv A," where cv are
+ // the cv-qualifiers of the function template (if any) and A is
+ // the class of which the function template is a member.
+ //
+ // The standard doesn't say explicitly, but we pick the appropriate kind of
+ // reference type based on [over.match.funcs]p4.
+ QualType ArgTy = Context.getTypeDeclType(Method->getParent());
+ ArgTy = Context.getQualifiedType(ArgTy,
+ Qualifiers::fromCVRMask(Method->getTypeQualifiers()));
+ if (Method->getRefQualifier() == RQ_RValue)
+ ArgTy = Context.getRValueReferenceType(ArgTy);
+ else
+ ArgTy = Context.getLValueReferenceType(ArgTy);
+ ArgTypes.push_back(ArgTy);
+}
+
+/// \brief Determine whether the function template \p FT1 is at least as
+/// specialized as \p FT2.
+static bool isAtLeastAsSpecializedAs(Sema &S,
+ SourceLocation Loc,
+ FunctionTemplateDecl *FT1,
+ FunctionTemplateDecl *FT2,
+ TemplatePartialOrderingContext TPOC,
+ unsigned NumCallArguments1) {
+ FunctionDecl *FD1 = FT1->getTemplatedDecl();
+ FunctionDecl *FD2 = FT2->getTemplatedDecl();
+ const FunctionProtoType *Proto1 = FD1->getType()->getAs<FunctionProtoType>();
+ const FunctionProtoType *Proto2 = FD2->getType()->getAs<FunctionProtoType>();
+
+ assert(Proto1 && Proto2 && "Function templates must have prototypes");
+ TemplateParameterList *TemplateParams = FT2->getTemplateParameters();
+ SmallVector<DeducedTemplateArgument, 4> Deduced;
+ Deduced.resize(TemplateParams->size());
+
+ // C++0x [temp.deduct.partial]p3:
+ // The types used to determine the ordering depend on the context in which
+ // the partial ordering is done:
+ TemplateDeductionInfo Info(Loc);
+ SmallVector<QualType, 4> Args2;
+ switch (TPOC) {
+ case TPOC_Call: {
+ // - In the context of a function call, the function parameter types are
+ // used.
+ CXXMethodDecl *Method1 = dyn_cast<CXXMethodDecl>(FD1);
+ CXXMethodDecl *Method2 = dyn_cast<CXXMethodDecl>(FD2);
+
+ // C++11 [temp.func.order]p3:
+ // [...] If only one of the function templates is a non-static
+ // member, that function template is considered to have a new
+ // first parameter inserted in its function parameter list. The
+ // new parameter is of type "reference to cv A," where cv are
+ // the cv-qualifiers of the function template (if any) and A is
+ // the class of which the function template is a member.
+ //
+ // Note that we interpret this to mean "if one of the function
+ // templates is a non-static member and the other is a non-member";
+ // otherwise, the ordering rules for static functions against non-static
+ // functions don't make any sense.
+ //
+ // C++98/03 doesn't have this provision but we've extended DR532 to cover
+ // it as wording was broken prior to it.
+ SmallVector<QualType, 4> Args1;
+
+ unsigned NumComparedArguments = NumCallArguments1;
+
+ if (!Method2 && Method1 && !Method1->isStatic()) {
+ // Compare 'this' from Method1 against first parameter from Method2.
+ AddImplicitObjectParameterType(S.Context, Method1, Args1);
+ ++NumComparedArguments;
+ } else if (!Method1 && Method2 && !Method2->isStatic()) {
+ // Compare 'this' from Method2 against first parameter from Method1.
+ AddImplicitObjectParameterType(S.Context, Method2, Args2);
+ }
+
+ Args1.insert(Args1.end(), Proto1->param_type_begin(),
+ Proto1->param_type_end());
+ Args2.insert(Args2.end(), Proto2->param_type_begin(),
+ Proto2->param_type_end());
+
+ // C++ [temp.func.order]p5:
+ // The presence of unused ellipsis and default arguments has no effect on
+ // the partial ordering of function templates.
+ if (Args1.size() > NumComparedArguments)
+ Args1.resize(NumComparedArguments);
+ if (Args2.size() > NumComparedArguments)
+ Args2.resize(NumComparedArguments);
+ if (DeduceTemplateArguments(S, TemplateParams, Args2.data(), Args2.size(),
+ Args1.data(), Args1.size(), Info, Deduced,
+ TDF_None, /*PartialOrdering=*/true))
+ return false;
+
+ break;
+ }
+
+ case TPOC_Conversion:
+ // - In the context of a call to a conversion operator, the return types
+ // of the conversion function templates are used.
+ if (DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, Proto2->getReturnType(), Proto1->getReturnType(),
+ Info, Deduced, TDF_None,
+ /*PartialOrdering=*/true))
+ return false;
+ break;
+
+ case TPOC_Other:
+ // - In other contexts (14.6.6.2) the function template's function type
+ // is used.
+ if (DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ FD2->getType(), FD1->getType(),
+ Info, Deduced, TDF_None,
+ /*PartialOrdering=*/true))
+ return false;
+ break;
+ }
+
+ // C++0x [temp.deduct.partial]p11:
+ // In most cases, all template parameters must have values in order for
+ // deduction to succeed, but for partial ordering purposes a template
+ // parameter may remain without a value provided it is not used in the
+ // types being used for partial ordering. [ Note: a template parameter used
+ // in a non-deduced context is considered used. -end note]
+ unsigned ArgIdx = 0, NumArgs = Deduced.size();
+ for (; ArgIdx != NumArgs; ++ArgIdx)
+ if (Deduced[ArgIdx].isNull())
+ break;
+
+ if (ArgIdx == NumArgs) {
+ // All template arguments were deduced. FT1 is at least as specialized
+ // as FT2.
+ return true;
+ }
+
+ // Figure out which template parameters were used.
+ llvm::SmallBitVector UsedParameters(TemplateParams->size());
+ switch (TPOC) {
+ case TPOC_Call:
+ for (unsigned I = 0, N = Args2.size(); I != N; ++I)
+ ::MarkUsedTemplateParameters(S.Context, Args2[I], false,
+ TemplateParams->getDepth(),
+ UsedParameters);
+ break;
+
+ case TPOC_Conversion:
+ ::MarkUsedTemplateParameters(S.Context, Proto2->getReturnType(), false,
+ TemplateParams->getDepth(), UsedParameters);
+ break;
+
+ case TPOC_Other:
+ ::MarkUsedTemplateParameters(S.Context, FD2->getType(), false,
+ TemplateParams->getDepth(),
+ UsedParameters);
+ break;
+ }
+
+ for (; ArgIdx != NumArgs; ++ArgIdx)
+ // If this argument had no value deduced but was used in one of the types
+ // used for partial ordering, then deduction fails.
+ if (Deduced[ArgIdx].isNull() && UsedParameters[ArgIdx])
+ return false;
+
+ return true;
+}
+
+/// \brief Determine whether this a function template whose parameter-type-list
+/// ends with a function parameter pack.
+static bool isVariadicFunctionTemplate(FunctionTemplateDecl *FunTmpl) {
+ FunctionDecl *Function = FunTmpl->getTemplatedDecl();
+ unsigned NumParams = Function->getNumParams();
+ if (NumParams == 0)
+ return false;
+
+ ParmVarDecl *Last = Function->getParamDecl(NumParams - 1);
+ if (!Last->isParameterPack())
+ return false;
+
+ // Make sure that no previous parameter is a parameter pack.
+ while (--NumParams > 0) {
+ if (Function->getParamDecl(NumParams - 1)->isParameterPack())
+ return false;
+ }
+
+ return true;
+}
+
+/// \brief Returns the more specialized function template according
+/// to the rules of function template partial ordering (C++ [temp.func.order]).
+///
+/// \param FT1 the first function template
+///
+/// \param FT2 the second function template
+///
+/// \param TPOC the context in which we are performing partial ordering of
+/// function templates.
+///
+/// \param NumCallArguments1 The number of arguments in the call to FT1, used
+/// only when \c TPOC is \c TPOC_Call.
+///
+/// \param NumCallArguments2 The number of arguments in the call to FT2, used
+/// only when \c TPOC is \c TPOC_Call.
+///
+/// \returns the more specialized function template. If neither
+/// template is more specialized, returns NULL.
+FunctionTemplateDecl *
+Sema::getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
+ FunctionTemplateDecl *FT2,
+ SourceLocation Loc,
+ TemplatePartialOrderingContext TPOC,
+ unsigned NumCallArguments1,
+ unsigned NumCallArguments2) {
+ bool Better1 = isAtLeastAsSpecializedAs(*this, Loc, FT1, FT2, TPOC,
+ NumCallArguments1);
+ bool Better2 = isAtLeastAsSpecializedAs(*this, Loc, FT2, FT1, TPOC,
+ NumCallArguments2);
+
+ if (Better1 != Better2) // We have a clear winner
+ return Better1 ? FT1 : FT2;
+
+ if (!Better1 && !Better2) // Neither is better than the other
+ return nullptr;
+
+ // FIXME: This mimics what GCC implements, but doesn't match up with the
+ // proposed resolution for core issue 692. This area needs to be sorted out,
+ // but for now we attempt to maintain compatibility.
+ bool Variadic1 = isVariadicFunctionTemplate(FT1);
+ bool Variadic2 = isVariadicFunctionTemplate(FT2);
+ if (Variadic1 != Variadic2)
+ return Variadic1? FT2 : FT1;
+
+ return nullptr;
+}
+
+/// \brief Determine if the two templates are equivalent.
+static bool isSameTemplate(TemplateDecl *T1, TemplateDecl *T2) {
+ if (T1 == T2)
+ return true;
+
+ if (!T1 || !T2)
+ return false;
+
+ return T1->getCanonicalDecl() == T2->getCanonicalDecl();
+}
+
+/// \brief Retrieve the most specialized of the given function template
+/// specializations.
+///
+/// \param SpecBegin the start iterator of the function template
+/// specializations that we will be comparing.
+///
+/// \param SpecEnd the end iterator of the function template
+/// specializations, paired with \p SpecBegin.
+///
+/// \param Loc the location where the ambiguity or no-specializations
+/// diagnostic should occur.
+///
+/// \param NoneDiag partial diagnostic used to diagnose cases where there are
+/// no matching candidates.
+///
+/// \param AmbigDiag partial diagnostic used to diagnose an ambiguity, if one
+/// occurs.
+///
+/// \param CandidateDiag partial diagnostic used for each function template
+/// specialization that is a candidate in the ambiguous ordering. One parameter
+/// in this diagnostic should be unbound, which will correspond to the string
+/// describing the template arguments for the function template specialization.
+///
+/// \returns the most specialized function template specialization, if
+/// found. Otherwise, returns SpecEnd.
+UnresolvedSetIterator Sema::getMostSpecialized(
+ UnresolvedSetIterator SpecBegin, UnresolvedSetIterator SpecEnd,
+ TemplateSpecCandidateSet &FailedCandidates,
+ SourceLocation Loc, const PartialDiagnostic &NoneDiag,
+ const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag,
+ bool Complain, QualType TargetType) {
+ if (SpecBegin == SpecEnd) {
+ if (Complain) {
+ Diag(Loc, NoneDiag);
+ FailedCandidates.NoteCandidates(*this, Loc);
+ }
+ return SpecEnd;
+ }
+
+ if (SpecBegin + 1 == SpecEnd)
+ return SpecBegin;
+
+ // Find the function template that is better than all of the templates it
+ // has been compared to.
+ UnresolvedSetIterator Best = SpecBegin;
+ FunctionTemplateDecl *BestTemplate
+ = cast<FunctionDecl>(*Best)->getPrimaryTemplate();
+ assert(BestTemplate && "Not a function template specialization?");
+ for (UnresolvedSetIterator I = SpecBegin + 1; I != SpecEnd; ++I) {
+ FunctionTemplateDecl *Challenger
+ = cast<FunctionDecl>(*I)->getPrimaryTemplate();
+ assert(Challenger && "Not a function template specialization?");
+ if (isSameTemplate(getMoreSpecializedTemplate(BestTemplate, Challenger,
+ Loc, TPOC_Other, 0, 0),
+ Challenger)) {
+ Best = I;
+ BestTemplate = Challenger;
+ }
+ }
+
+ // Make sure that the "best" function template is more specialized than all
+ // of the others.
+ bool Ambiguous = false;
+ for (UnresolvedSetIterator I = SpecBegin; I != SpecEnd; ++I) {
+ FunctionTemplateDecl *Challenger
+ = cast<FunctionDecl>(*I)->getPrimaryTemplate();
+ if (I != Best &&
+ !isSameTemplate(getMoreSpecializedTemplate(BestTemplate, Challenger,
+ Loc, TPOC_Other, 0, 0),
+ BestTemplate)) {
+ Ambiguous = true;
+ break;
+ }
+ }
+
+ if (!Ambiguous) {
+ // We found an answer. Return it.
+ return Best;
+ }
+
+ // Diagnose the ambiguity.
+ if (Complain) {
+ Diag(Loc, AmbigDiag);
+
+ // FIXME: Can we order the candidates in some sane way?
+ for (UnresolvedSetIterator I = SpecBegin; I != SpecEnd; ++I) {
+ PartialDiagnostic PD = CandidateDiag;
+ PD << getTemplateArgumentBindingsText(
+ cast<FunctionDecl>(*I)->getPrimaryTemplate()->getTemplateParameters(),
+ *cast<FunctionDecl>(*I)->getTemplateSpecializationArgs());
+ if (!TargetType.isNull())
+ HandleFunctionTypeMismatch(PD, cast<FunctionDecl>(*I)->getType(),
+ TargetType);
+ Diag((*I)->getLocation(), PD);
+ }
+ }
+
+ return SpecEnd;
+}
+
+/// \brief Returns the more specialized class template partial specialization
+/// according to the rules of partial ordering of class template partial
+/// specializations (C++ [temp.class.order]).
+///
+/// \param PS1 the first class template partial specialization
+///
+/// \param PS2 the second class template partial specialization
+///
+/// \returns the more specialized class template partial specialization. If
+/// neither partial specialization is more specialized, returns NULL.
+ClassTemplatePartialSpecializationDecl *
+Sema::getMoreSpecializedPartialSpecialization(
+ ClassTemplatePartialSpecializationDecl *PS1,
+ ClassTemplatePartialSpecializationDecl *PS2,
+ SourceLocation Loc) {
+ // C++ [temp.class.order]p1:
+ // For two class template partial specializations, the first is at least as
+ // specialized as the second if, given the following rewrite to two
+ // function templates, the first function template is at least as
+ // specialized as the second according to the ordering rules for function
+ // templates (14.6.6.2):
+ // - the first function template has the same template parameters as the
+ // first partial specialization and has a single function parameter
+ // whose type is a class template specialization with the template
+ // arguments of the first partial specialization, and
+ // - the second function template has the same template parameters as the
+ // second partial specialization and has a single function parameter
+ // whose type is a class template specialization with the template
+ // arguments of the second partial specialization.
+ //
+ // Rather than synthesize function templates, we merely perform the
+ // equivalent partial ordering by performing deduction directly on
+ // the template arguments of the class template partial
+ // specializations. This computation is slightly simpler than the
+ // general problem of function template partial ordering, because
+ // class template partial specializations are more constrained. We
+ // know that every template parameter is deducible from the class
+ // template partial specialization's template arguments, for
+ // example.
+ SmallVector<DeducedTemplateArgument, 4> Deduced;
+ TemplateDeductionInfo Info(Loc);
+
+ QualType PT1 = PS1->getInjectedSpecializationType();
+ QualType PT2 = PS2->getInjectedSpecializationType();
+
+ // Determine whether PS1 is at least as specialized as PS2
+ Deduced.resize(PS2->getTemplateParameters()->size());
+ bool Better1 = !DeduceTemplateArgumentsByTypeMatch(*this,
+ PS2->getTemplateParameters(),
+ PT2, PT1, Info, Deduced, TDF_None,
+ /*PartialOrdering=*/true);
+ if (Better1) {
+ SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(),Deduced.end());
+ InstantiatingTemplate Inst(*this, Loc, PS2, DeducedArgs, Info);
+ Better1 = !::FinishTemplateArgumentDeduction(
+ *this, PS2, PS1->getTemplateArgs(), Deduced, Info);
+ }
+
+ // Determine whether PS2 is at least as specialized as PS1
+ Deduced.clear();
+ Deduced.resize(PS1->getTemplateParameters()->size());
+ bool Better2 = !DeduceTemplateArgumentsByTypeMatch(
+ *this, PS1->getTemplateParameters(), PT1, PT2, Info, Deduced, TDF_None,
+ /*PartialOrdering=*/true);
+ if (Better2) {
+ SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(),
+ Deduced.end());
+ InstantiatingTemplate Inst(*this, Loc, PS1, DeducedArgs, Info);
+ Better2 = !::FinishTemplateArgumentDeduction(
+ *this, PS1, PS2->getTemplateArgs(), Deduced, Info);
+ }
+
+ if (Better1 == Better2)
+ return nullptr;
+
+ return Better1 ? PS1 : PS2;
+}
+
+/// TODO: Unify with ClassTemplatePartialSpecializationDecl version?
+/// May require unifying ClassTemplate(Partial)SpecializationDecl and
+/// VarTemplate(Partial)SpecializationDecl with a new data
+/// structure Template(Partial)SpecializationDecl, and
+/// using Template(Partial)SpecializationDecl as input type.
+VarTemplatePartialSpecializationDecl *
+Sema::getMoreSpecializedPartialSpecialization(
+ VarTemplatePartialSpecializationDecl *PS1,
+ VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc) {
+ SmallVector<DeducedTemplateArgument, 4> Deduced;
+ TemplateDeductionInfo Info(Loc);
+
+ assert(PS1->getSpecializedTemplate() == PS2->getSpecializedTemplate() &&
+ "the partial specializations being compared should specialize"
+ " the same template.");
+ TemplateName Name(PS1->getSpecializedTemplate());
+ TemplateName CanonTemplate = Context.getCanonicalTemplateName(Name);
+ QualType PT1 = Context.getTemplateSpecializationType(
+ CanonTemplate, PS1->getTemplateArgs().data(),
+ PS1->getTemplateArgs().size());
+ QualType PT2 = Context.getTemplateSpecializationType(
+ CanonTemplate, PS2->getTemplateArgs().data(),
+ PS2->getTemplateArgs().size());
+
+ // Determine whether PS1 is at least as specialized as PS2
+ Deduced.resize(PS2->getTemplateParameters()->size());
+ bool Better1 = !DeduceTemplateArgumentsByTypeMatch(
+ *this, PS2->getTemplateParameters(), PT2, PT1, Info, Deduced, TDF_None,
+ /*PartialOrdering=*/true);
+ if (Better1) {
+ SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(),
+ Deduced.end());
+ InstantiatingTemplate Inst(*this, Loc, PS2, DeducedArgs, Info);
+ Better1 = !::FinishTemplateArgumentDeduction(*this, PS2,
+ PS1->getTemplateArgs(),
+ Deduced, Info);
+ }
+
+ // Determine whether PS2 is at least as specialized as PS1
+ Deduced.clear();
+ Deduced.resize(PS1->getTemplateParameters()->size());
+ bool Better2 = !DeduceTemplateArgumentsByTypeMatch(*this,
+ PS1->getTemplateParameters(),
+ PT1, PT2, Info, Deduced, TDF_None,
+ /*PartialOrdering=*/true);
+ if (Better2) {
+ SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(),Deduced.end());
+ InstantiatingTemplate Inst(*this, Loc, PS1, DeducedArgs, Info);
+ Better2 = !::FinishTemplateArgumentDeduction(*this, PS1,
+ PS2->getTemplateArgs(),
+ Deduced, Info);
+ }
+
+ if (Better1 == Better2)
+ return nullptr;
+
+ return Better1? PS1 : PS2;
+}
+
+static void
+MarkUsedTemplateParameters(ASTContext &Ctx,
+ const TemplateArgument &TemplateArg,
+ bool OnlyDeduced,
+ unsigned Depth,
+ llvm::SmallBitVector &Used);
+
+/// \brief Mark the template parameters that are used by the given
+/// expression.
+static void
+MarkUsedTemplateParameters(ASTContext &Ctx,
+ const Expr *E,
+ bool OnlyDeduced,
+ unsigned Depth,
+ llvm::SmallBitVector &Used) {
+ // We can deduce from a pack expansion.
+ if (const PackExpansionExpr *Expansion = dyn_cast<PackExpansionExpr>(E))
+ E = Expansion->getPattern();
+
+ // Skip through any implicit casts we added while type-checking, and any
+ // substitutions performed by template alias expansion.
+ while (1) {
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E))
+ E = ICE->getSubExpr();
+ else if (const SubstNonTypeTemplateParmExpr *Subst =
+ dyn_cast<SubstNonTypeTemplateParmExpr>(E))
+ E = Subst->getReplacement();
+ else
+ break;
+ }
+
+ // FIXME: if !OnlyDeduced, we have to walk the whole subexpression to
+ // find other occurrences of template parameters.
+ const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E);
+ if (!DRE)
+ return;
+
+ const NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(DRE->getDecl());
+ if (!NTTP)
+ return;
+
+ if (NTTP->getDepth() == Depth)
+ Used[NTTP->getIndex()] = true;
+}
+
+/// \brief Mark the template parameters that are used by the given
+/// nested name specifier.
+static void
+MarkUsedTemplateParameters(ASTContext &Ctx,
+ NestedNameSpecifier *NNS,
+ bool OnlyDeduced,
+ unsigned Depth,
+ llvm::SmallBitVector &Used) {
+ if (!NNS)
+ return;
+
+ MarkUsedTemplateParameters(Ctx, NNS->getPrefix(), OnlyDeduced, Depth,
+ Used);
+ MarkUsedTemplateParameters(Ctx, QualType(NNS->getAsType(), 0),
+ OnlyDeduced, Depth, Used);
+}
+
+/// \brief Mark the template parameters that are used by the given
+/// template name.
+static void
+MarkUsedTemplateParameters(ASTContext &Ctx,
+ TemplateName Name,
+ bool OnlyDeduced,
+ unsigned Depth,
+ llvm::SmallBitVector &Used) {
+ if (TemplateDecl *Template = Name.getAsTemplateDecl()) {
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(Template)) {
+ if (TTP->getDepth() == Depth)
+ Used[TTP->getIndex()] = true;
+ }
+ return;
+ }
+
+ if (QualifiedTemplateName *QTN = Name.getAsQualifiedTemplateName())
+ MarkUsedTemplateParameters(Ctx, QTN->getQualifier(), OnlyDeduced,
+ Depth, Used);
+ if (DependentTemplateName *DTN = Name.getAsDependentTemplateName())
+ MarkUsedTemplateParameters(Ctx, DTN->getQualifier(), OnlyDeduced,
+ Depth, Used);
+}
+
+/// \brief Mark the template parameters that are used by the given
+/// type.
+static void
+MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
+ bool OnlyDeduced,
+ unsigned Depth,
+ llvm::SmallBitVector &Used) {
+ if (T.isNull())
+ return;
+
+ // Non-dependent types have nothing deducible
+ if (!T->isDependentType())
+ return;
+
+ T = Ctx.getCanonicalType(T);
+ switch (T->getTypeClass()) {
+ case Type::Pointer:
+ MarkUsedTemplateParameters(Ctx,
+ cast<PointerType>(T)->getPointeeType(),
+ OnlyDeduced,
+ Depth,
+ Used);
+ break;
+
+ case Type::BlockPointer:
+ MarkUsedTemplateParameters(Ctx,
+ cast<BlockPointerType>(T)->getPointeeType(),
+ OnlyDeduced,
+ Depth,
+ Used);
+ break;
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ MarkUsedTemplateParameters(Ctx,
+ cast<ReferenceType>(T)->getPointeeType(),
+ OnlyDeduced,
+ Depth,
+ Used);
+ break;
+
+ case Type::MemberPointer: {
+ const MemberPointerType *MemPtr = cast<MemberPointerType>(T.getTypePtr());
+ MarkUsedTemplateParameters(Ctx, MemPtr->getPointeeType(), OnlyDeduced,
+ Depth, Used);
+ MarkUsedTemplateParameters(Ctx, QualType(MemPtr->getClass(), 0),
+ OnlyDeduced, Depth, Used);
+ break;
+ }
+
+ case Type::DependentSizedArray:
+ MarkUsedTemplateParameters(Ctx,
+ cast<DependentSizedArrayType>(T)->getSizeExpr(),
+ OnlyDeduced, Depth, Used);
+ // Fall through to check the element type
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ MarkUsedTemplateParameters(Ctx,
+ cast<ArrayType>(T)->getElementType(),
+ OnlyDeduced, Depth, Used);
+ break;
+
+ case Type::Vector:
+ case Type::ExtVector:
+ MarkUsedTemplateParameters(Ctx,
+ cast<VectorType>(T)->getElementType(),
+ OnlyDeduced, Depth, Used);
+ break;
+
+ case Type::DependentSizedExtVector: {
+ const DependentSizedExtVectorType *VecType
+ = cast<DependentSizedExtVectorType>(T);
+ MarkUsedTemplateParameters(Ctx, VecType->getElementType(), OnlyDeduced,
+ Depth, Used);
+ MarkUsedTemplateParameters(Ctx, VecType->getSizeExpr(), OnlyDeduced,
+ Depth, Used);
+ break;
+ }
+
+ case Type::FunctionProto: {
+ const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
+ MarkUsedTemplateParameters(Ctx, Proto->getReturnType(), OnlyDeduced, Depth,
+ Used);
+ for (unsigned I = 0, N = Proto->getNumParams(); I != N; ++I)
+ MarkUsedTemplateParameters(Ctx, Proto->getParamType(I), OnlyDeduced,
+ Depth, Used);
+ break;
+ }
+
+ case Type::TemplateTypeParm: {
+ const TemplateTypeParmType *TTP = cast<TemplateTypeParmType>(T);
+ if (TTP->getDepth() == Depth)
+ Used[TTP->getIndex()] = true;
+ break;
+ }
+
+ case Type::SubstTemplateTypeParmPack: {
+ const SubstTemplateTypeParmPackType *Subst
+ = cast<SubstTemplateTypeParmPackType>(T);
+ MarkUsedTemplateParameters(Ctx,
+ QualType(Subst->getReplacedParameter(), 0),
+ OnlyDeduced, Depth, Used);
+ MarkUsedTemplateParameters(Ctx, Subst->getArgumentPack(),
+ OnlyDeduced, Depth, Used);
+ break;
+ }
+
+ case Type::InjectedClassName:
+ T = cast<InjectedClassNameType>(T)->getInjectedSpecializationType();
+ // fall through
+
+ case Type::TemplateSpecialization: {
+ const TemplateSpecializationType *Spec
+ = cast<TemplateSpecializationType>(T);
+ MarkUsedTemplateParameters(Ctx, Spec->getTemplateName(), OnlyDeduced,
+ Depth, Used);
+
+ // C++0x [temp.deduct.type]p9:
+ // If the template argument list of P contains a pack expansion that is
+ // not the last template argument, the entire template argument list is a
+ // non-deduced context.
+ if (OnlyDeduced &&
+ hasPackExpansionBeforeEnd(Spec->getArgs(), Spec->getNumArgs()))
+ break;
+
+ for (unsigned I = 0, N = Spec->getNumArgs(); I != N; ++I)
+ MarkUsedTemplateParameters(Ctx, Spec->getArg(I), OnlyDeduced, Depth,
+ Used);
+ break;
+ }
+
+ case Type::Complex:
+ if (!OnlyDeduced)
+ MarkUsedTemplateParameters(Ctx,
+ cast<ComplexType>(T)->getElementType(),
+ OnlyDeduced, Depth, Used);
+ break;
+
+ case Type::Atomic:
+ if (!OnlyDeduced)
+ MarkUsedTemplateParameters(Ctx,
+ cast<AtomicType>(T)->getValueType(),
+ OnlyDeduced, Depth, Used);
+ break;
+
+ case Type::DependentName:
+ if (!OnlyDeduced)
+ MarkUsedTemplateParameters(Ctx,
+ cast<DependentNameType>(T)->getQualifier(),
+ OnlyDeduced, Depth, Used);
+ break;
+
+ case Type::DependentTemplateSpecialization: {
+ // C++14 [temp.deduct.type]p5:
+ // The non-deduced contexts are:
+ // -- The nested-name-specifier of a type that was specified using a
+ // qualified-id
+ //
+ // C++14 [temp.deduct.type]p6:
+ // When a type name is specified in a way that includes a non-deduced
+ // context, all of the types that comprise that type name are also
+ // non-deduced.
+ if (OnlyDeduced)
+ break;
+
+ const DependentTemplateSpecializationType *Spec
+ = cast<DependentTemplateSpecializationType>(T);
+
+ MarkUsedTemplateParameters(Ctx, Spec->getQualifier(),
+ OnlyDeduced, Depth, Used);
+
+ for (unsigned I = 0, N = Spec->getNumArgs(); I != N; ++I)
+ MarkUsedTemplateParameters(Ctx, Spec->getArg(I), OnlyDeduced, Depth,
+ Used);
+ break;
+ }
+
+ case Type::TypeOf:
+ if (!OnlyDeduced)
+ MarkUsedTemplateParameters(Ctx,
+ cast<TypeOfType>(T)->getUnderlyingType(),
+ OnlyDeduced, Depth, Used);
+ break;
+
+ case Type::TypeOfExpr:
+ if (!OnlyDeduced)
+ MarkUsedTemplateParameters(Ctx,
+ cast<TypeOfExprType>(T)->getUnderlyingExpr(),
+ OnlyDeduced, Depth, Used);
+ break;
+
+ case Type::Decltype:
+ if (!OnlyDeduced)
+ MarkUsedTemplateParameters(Ctx,
+ cast<DecltypeType>(T)->getUnderlyingExpr(),
+ OnlyDeduced, Depth, Used);
+ break;
+
+ case Type::UnaryTransform:
+ if (!OnlyDeduced)
+ MarkUsedTemplateParameters(Ctx,
+ cast<UnaryTransformType>(T)->getUnderlyingType(),
+ OnlyDeduced, Depth, Used);
+ break;
+
+ case Type::PackExpansion:
+ MarkUsedTemplateParameters(Ctx,
+ cast<PackExpansionType>(T)->getPattern(),
+ OnlyDeduced, Depth, Used);
+ break;
+
+ case Type::Auto:
+ MarkUsedTemplateParameters(Ctx,
+ cast<AutoType>(T)->getDeducedType(),
+ OnlyDeduced, Depth, Used);
+
+ // None of these types have any template parameters in them.
+ case Type::Builtin:
+ case Type::VariableArray:
+ case Type::FunctionNoProto:
+ case Type::Record:
+ case Type::Enum:
+ case Type::ObjCInterface:
+ case Type::ObjCObject:
+ case Type::ObjCObjectPointer:
+ case Type::UnresolvedUsing:
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ break;
+ }
+}
+
+/// \brief Mark the template parameters that are used by this
+/// template argument.
+static void
+MarkUsedTemplateParameters(ASTContext &Ctx,
+ const TemplateArgument &TemplateArg,
+ bool OnlyDeduced,
+ unsigned Depth,
+ llvm::SmallBitVector &Used) {
+ switch (TemplateArg.getKind()) {
+ case TemplateArgument::Null:
+ case TemplateArgument::Integral:
+ case TemplateArgument::Declaration:
+ break;
+
+ case TemplateArgument::NullPtr:
+ MarkUsedTemplateParameters(Ctx, TemplateArg.getNullPtrType(), OnlyDeduced,
+ Depth, Used);
+ break;
+
+ case TemplateArgument::Type:
+ MarkUsedTemplateParameters(Ctx, TemplateArg.getAsType(), OnlyDeduced,
+ Depth, Used);
+ break;
+
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion:
+ MarkUsedTemplateParameters(Ctx,
+ TemplateArg.getAsTemplateOrTemplatePattern(),
+ OnlyDeduced, Depth, Used);
+ break;
+
+ case TemplateArgument::Expression:
+ MarkUsedTemplateParameters(Ctx, TemplateArg.getAsExpr(), OnlyDeduced,
+ Depth, Used);
+ break;
+
+ case TemplateArgument::Pack:
+ for (const auto &P : TemplateArg.pack_elements())
+ MarkUsedTemplateParameters(Ctx, P, OnlyDeduced, Depth, Used);
+ break;
+ }
+}
+
+/// \brief Mark which template parameters can be deduced from a given
+/// template argument list.
+///
+/// \param TemplateArgs the template argument list from which template
+/// parameters will be deduced.
+///
+/// \param Used a bit vector whose elements will be set to \c true
+/// to indicate when the corresponding template parameter will be
+/// deduced.
+void
+Sema::MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
+ bool OnlyDeduced, unsigned Depth,
+ llvm::SmallBitVector &Used) {
+ // C++0x [temp.deduct.type]p9:
+ // If the template argument list of P contains a pack expansion that is not
+ // the last template argument, the entire template argument list is a
+ // non-deduced context.
+ if (OnlyDeduced &&
+ hasPackExpansionBeforeEnd(TemplateArgs.data(), TemplateArgs.size()))
+ return;
+
+ for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
+ ::MarkUsedTemplateParameters(Context, TemplateArgs[I], OnlyDeduced,
+ Depth, Used);
+}
+
+/// \brief Marks all of the template parameters that will be deduced by a
+/// call to the given function template.
+void Sema::MarkDeducedTemplateParameters(
+ ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate,
+ llvm::SmallBitVector &Deduced) {
+ TemplateParameterList *TemplateParams
+ = FunctionTemplate->getTemplateParameters();
+ Deduced.clear();
+ Deduced.resize(TemplateParams->size());
+
+ FunctionDecl *Function = FunctionTemplate->getTemplatedDecl();
+ for (unsigned I = 0, N = Function->getNumParams(); I != N; ++I)
+ ::MarkUsedTemplateParameters(Ctx, Function->getParamDecl(I)->getType(),
+ true, TemplateParams->getDepth(), Deduced);
+}
+
+bool hasDeducibleTemplateParameters(Sema &S,
+ FunctionTemplateDecl *FunctionTemplate,
+ QualType T) {
+ if (!T->isDependentType())
+ return false;
+
+ TemplateParameterList *TemplateParams
+ = FunctionTemplate->getTemplateParameters();
+ llvm::SmallBitVector Deduced(TemplateParams->size());
+ ::MarkUsedTemplateParameters(S.Context, T, true, TemplateParams->getDepth(),
+ Deduced);
+
+ return Deduced.any();
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp
new file mode 100644
index 0000000..fb7fc10
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -0,0 +1,2868 @@
+//===------- SemaTemplateInstantiate.cpp - C++ Template Instantiation ------===/
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===/
+//
+// This file implements C++ template instantiation.
+//
+//===----------------------------------------------------------------------===/
+
+#include "clang/Sema/SemaInternal.h"
+#include "TreeTransform.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTLambda.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Template.h"
+#include "clang/Sema/TemplateDeduction.h"
+
+using namespace clang;
+using namespace sema;
+
+//===----------------------------------------------------------------------===/
+// Template Instantiation Support
+//===----------------------------------------------------------------------===/
+
+/// \brief Retrieve the template argument list(s) that should be used to
+/// instantiate the definition of the given declaration.
+///
+/// \param D the declaration for which we are computing template instantiation
+/// arguments.
+///
+/// \param Innermost if non-NULL, the innermost template argument list.
+///
+/// \param RelativeToPrimary true if we should get the template
+/// arguments relative to the primary template, even when we're
+/// dealing with a specialization. This is only relevant for function
+/// template specializations.
+///
+/// \param Pattern If non-NULL, indicates the pattern from which we will be
+/// instantiating the definition of the given declaration, \p D. This is
+/// used to determine the proper set of template instantiation arguments for
+/// friend function template specializations.
+MultiLevelTemplateArgumentList
+Sema::getTemplateInstantiationArgs(NamedDecl *D,
+ const TemplateArgumentList *Innermost,
+ bool RelativeToPrimary,
+ const FunctionDecl *Pattern) {
+ // Accumulate the set of template argument lists in this structure.
+ MultiLevelTemplateArgumentList Result;
+
+ if (Innermost)
+ Result.addOuterTemplateArguments(Innermost);
+
+ DeclContext *Ctx = dyn_cast<DeclContext>(D);
+ if (!Ctx) {
+ Ctx = D->getDeclContext();
+
+ // Add template arguments from a variable template instantiation.
+ if (VarTemplateSpecializationDecl *Spec =
+ dyn_cast<VarTemplateSpecializationDecl>(D)) {
+ // We're done when we hit an explicit specialization.
+ if (Spec->getSpecializationKind() == TSK_ExplicitSpecialization &&
+ !isa<VarTemplatePartialSpecializationDecl>(Spec))
+ return Result;
+
+ Result.addOuterTemplateArguments(&Spec->getTemplateInstantiationArgs());
+
+ // If this variable template specialization was instantiated from a
+ // specialized member that is a variable template, we're done.
+ assert(Spec->getSpecializedTemplate() && "No variable template?");
+ llvm::PointerUnion<VarTemplateDecl*,
+ VarTemplatePartialSpecializationDecl*> Specialized
+ = Spec->getSpecializedTemplateOrPartial();
+ if (VarTemplatePartialSpecializationDecl *Partial =
+ Specialized.dyn_cast<VarTemplatePartialSpecializationDecl *>()) {
+ if (Partial->isMemberSpecialization())
+ return Result;
+ } else {
+ VarTemplateDecl *Tmpl = Specialized.get<VarTemplateDecl *>();
+ if (Tmpl->isMemberSpecialization())
+ return Result;
+ }
+ }
+
+ // If we have a template template parameter with translation unit context,
+ // then we're performing substitution into a default template argument of
+ // this template template parameter before we've constructed the template
+ // that will own this template template parameter. In this case, we
+ // use empty template parameter lists for all of the outer templates
+ // to avoid performing any substitutions.
+ if (Ctx->isTranslationUnit()) {
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(D)) {
+ for (unsigned I = 0, N = TTP->getDepth() + 1; I != N; ++I)
+ Result.addOuterTemplateArguments(None);
+ return Result;
+ }
+ }
+ }
+
+ while (!Ctx->isFileContext()) {
+ // Add template arguments from a class template instantiation.
+ if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(Ctx)) {
+ // We're done when we hit an explicit specialization.
+ if (Spec->getSpecializationKind() == TSK_ExplicitSpecialization &&
+ !isa<ClassTemplatePartialSpecializationDecl>(Spec))
+ break;
+
+ Result.addOuterTemplateArguments(&Spec->getTemplateInstantiationArgs());
+
+ // If this class template specialization was instantiated from a
+ // specialized member that is a class template, we're done.
+ assert(Spec->getSpecializedTemplate() && "No class template?");
+ if (Spec->getSpecializedTemplate()->isMemberSpecialization())
+ break;
+ }
+ // Add template arguments from a function template specialization.
+ else if (FunctionDecl *Function = dyn_cast<FunctionDecl>(Ctx)) {
+ if (!RelativeToPrimary &&
+ (Function->getTemplateSpecializationKind() ==
+ TSK_ExplicitSpecialization &&
+ !Function->getClassScopeSpecializationPattern()))
+ break;
+
+ if (const TemplateArgumentList *TemplateArgs
+ = Function->getTemplateSpecializationArgs()) {
+ // Add the template arguments for this specialization.
+ Result.addOuterTemplateArguments(TemplateArgs);
+
+ // If this function was instantiated from a specialized member that is
+ // a function template, we're done.
+ assert(Function->getPrimaryTemplate() && "No function template?");
+ if (Function->getPrimaryTemplate()->isMemberSpecialization())
+ break;
+
+ // If this function is a generic lambda specialization, we are done.
+ if (isGenericLambdaCallOperatorSpecialization(Function))
+ break;
+
+ } else if (FunctionTemplateDecl *FunTmpl
+ = Function->getDescribedFunctionTemplate()) {
+ // Add the "injected" template arguments.
+ Result.addOuterTemplateArguments(FunTmpl->getInjectedTemplateArgs());
+ }
+
+ // If this is a friend declaration and it declares an entity at
+ // namespace scope, take arguments from its lexical parent
+ // instead of its semantic parent, unless of course the pattern we're
+ // instantiating actually comes from the file's context!
+ if (Function->getFriendObjectKind() &&
+ Function->getDeclContext()->isFileContext() &&
+ (!Pattern || !Pattern->getLexicalDeclContext()->isFileContext())) {
+ Ctx = Function->getLexicalDeclContext();
+ RelativeToPrimary = false;
+ continue;
+ }
+ } else if (CXXRecordDecl *Rec = dyn_cast<CXXRecordDecl>(Ctx)) {
+ if (ClassTemplateDecl *ClassTemplate = Rec->getDescribedClassTemplate()) {
+ QualType T = ClassTemplate->getInjectedClassNameSpecialization();
+ const TemplateSpecializationType *TST =
+ cast<TemplateSpecializationType>(Context.getCanonicalType(T));
+ Result.addOuterTemplateArguments(
+ llvm::makeArrayRef(TST->getArgs(), TST->getNumArgs()));
+ if (ClassTemplate->isMemberSpecialization())
+ break;
+ }
+ }
+
+ Ctx = Ctx->getParent();
+ RelativeToPrimary = false;
+ }
+
+ return Result;
+}
+
+bool Sema::ActiveTemplateInstantiation::isInstantiationRecord() const {
+ switch (Kind) {
+ case TemplateInstantiation:
+ case ExceptionSpecInstantiation:
+ case DefaultTemplateArgumentInstantiation:
+ case DefaultFunctionArgumentInstantiation:
+ case ExplicitTemplateArgumentSubstitution:
+ case DeducedTemplateArgumentSubstitution:
+ case PriorTemplateArgumentSubstitution:
+ return true;
+
+ case DefaultTemplateArgumentChecking:
+ return false;
+ }
+
+ llvm_unreachable("Invalid InstantiationKind!");
+}
+
+Sema::InstantiatingTemplate::InstantiatingTemplate(
+ Sema &SemaRef, ActiveTemplateInstantiation::InstantiationKind Kind,
+ SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
+ Decl *Entity, NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs,
+ sema::TemplateDeductionInfo *DeductionInfo)
+ : SemaRef(SemaRef), SavedInNonInstantiationSFINAEContext(
+ SemaRef.InNonInstantiationSFINAEContext) {
+ // Don't allow further instantiation if a fatal error has occcured. Any
+ // diagnostics we might have raised will not be visible.
+ if (SemaRef.Diags.hasFatalErrorOccurred()) {
+ Invalid = true;
+ return;
+ }
+ Invalid = CheckInstantiationDepth(PointOfInstantiation, InstantiationRange);
+ if (!Invalid) {
+ ActiveTemplateInstantiation Inst;
+ Inst.Kind = Kind;
+ Inst.PointOfInstantiation = PointOfInstantiation;
+ Inst.Entity = Entity;
+ Inst.Template = Template;
+ Inst.TemplateArgs = TemplateArgs.data();
+ Inst.NumTemplateArgs = TemplateArgs.size();
+ Inst.DeductionInfo = DeductionInfo;
+ Inst.InstantiationRange = InstantiationRange;
+ SemaRef.InNonInstantiationSFINAEContext = false;
+ SemaRef.ActiveTemplateInstantiations.push_back(Inst);
+ if (!Inst.isInstantiationRecord())
+ ++SemaRef.NonInstantiationEntries;
+ }
+}
+
+Sema::InstantiatingTemplate::InstantiatingTemplate(
+ Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity,
+ SourceRange InstantiationRange)
+ : InstantiatingTemplate(SemaRef,
+ ActiveTemplateInstantiation::TemplateInstantiation,
+ PointOfInstantiation, InstantiationRange, Entity) {}
+
+Sema::InstantiatingTemplate::InstantiatingTemplate(
+ Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity,
+ ExceptionSpecification, SourceRange InstantiationRange)
+ : InstantiatingTemplate(
+ SemaRef, ActiveTemplateInstantiation::ExceptionSpecInstantiation,
+ PointOfInstantiation, InstantiationRange, Entity) {}
+
+Sema::InstantiatingTemplate::InstantiatingTemplate(
+ Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template,
+ ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange)
+ : InstantiatingTemplate(
+ SemaRef,
+ ActiveTemplateInstantiation::DefaultTemplateArgumentInstantiation,
+ PointOfInstantiation, InstantiationRange, Template, nullptr,
+ TemplateArgs) {}
+
+Sema::InstantiatingTemplate::InstantiatingTemplate(
+ Sema &SemaRef, SourceLocation PointOfInstantiation,
+ FunctionTemplateDecl *FunctionTemplate,
+ ArrayRef<TemplateArgument> TemplateArgs,
+ ActiveTemplateInstantiation::InstantiationKind Kind,
+ sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange)
+ : InstantiatingTemplate(SemaRef, Kind, PointOfInstantiation,
+ InstantiationRange, FunctionTemplate, nullptr,
+ TemplateArgs, &DeductionInfo) {}
+
+Sema::InstantiatingTemplate::InstantiatingTemplate(
+ Sema &SemaRef, SourceLocation PointOfInstantiation,
+ ClassTemplatePartialSpecializationDecl *PartialSpec,
+ ArrayRef<TemplateArgument> TemplateArgs,
+ sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange)
+ : InstantiatingTemplate(
+ SemaRef,
+ ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution,
+ PointOfInstantiation, InstantiationRange, PartialSpec, nullptr,
+ TemplateArgs, &DeductionInfo) {}
+
+Sema::InstantiatingTemplate::InstantiatingTemplate(
+ Sema &SemaRef, SourceLocation PointOfInstantiation,
+ VarTemplatePartialSpecializationDecl *PartialSpec,
+ ArrayRef<TemplateArgument> TemplateArgs,
+ sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange)
+ : InstantiatingTemplate(
+ SemaRef,
+ ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution,
+ PointOfInstantiation, InstantiationRange, PartialSpec, nullptr,
+ TemplateArgs, &DeductionInfo) {}
+
+Sema::InstantiatingTemplate::InstantiatingTemplate(
+ Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param,
+ ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange)
+ : InstantiatingTemplate(
+ SemaRef,
+ ActiveTemplateInstantiation::DefaultFunctionArgumentInstantiation,
+ PointOfInstantiation, InstantiationRange, Param, nullptr,
+ TemplateArgs) {}
+
+Sema::InstantiatingTemplate::InstantiatingTemplate(
+ Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template,
+ NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs,
+ SourceRange InstantiationRange)
+ : InstantiatingTemplate(
+ SemaRef,
+ ActiveTemplateInstantiation::PriorTemplateArgumentSubstitution,
+ PointOfInstantiation, InstantiationRange, Param, Template,
+ TemplateArgs) {}
+
+Sema::InstantiatingTemplate::InstantiatingTemplate(
+ Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template,
+ TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs,
+ SourceRange InstantiationRange)
+ : InstantiatingTemplate(
+ SemaRef,
+ ActiveTemplateInstantiation::PriorTemplateArgumentSubstitution,
+ PointOfInstantiation, InstantiationRange, Param, Template,
+ TemplateArgs) {}
+
+Sema::InstantiatingTemplate::InstantiatingTemplate(
+ Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template,
+ NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs,
+ SourceRange InstantiationRange)
+ : InstantiatingTemplate(
+ SemaRef, ActiveTemplateInstantiation::DefaultTemplateArgumentChecking,
+ PointOfInstantiation, InstantiationRange, Param, Template,
+ TemplateArgs) {}
+
+void Sema::InstantiatingTemplate::Clear() {
+ if (!Invalid) {
+ if (!SemaRef.ActiveTemplateInstantiations.back().isInstantiationRecord()) {
+ assert(SemaRef.NonInstantiationEntries > 0);
+ --SemaRef.NonInstantiationEntries;
+ }
+ SemaRef.InNonInstantiationSFINAEContext
+ = SavedInNonInstantiationSFINAEContext;
+
+ // Name lookup no longer looks in this template's defining module.
+ assert(SemaRef.ActiveTemplateInstantiations.size() >=
+ SemaRef.ActiveTemplateInstantiationLookupModules.size() &&
+ "forgot to remove a lookup module for a template instantiation");
+ if (SemaRef.ActiveTemplateInstantiations.size() ==
+ SemaRef.ActiveTemplateInstantiationLookupModules.size()) {
+ if (Module *M = SemaRef.ActiveTemplateInstantiationLookupModules.back())
+ SemaRef.LookupModulesCache.erase(M);
+ SemaRef.ActiveTemplateInstantiationLookupModules.pop_back();
+ }
+
+ SemaRef.ActiveTemplateInstantiations.pop_back();
+ Invalid = true;
+ }
+}
+
+bool Sema::InstantiatingTemplate::CheckInstantiationDepth(
+ SourceLocation PointOfInstantiation,
+ SourceRange InstantiationRange) {
+ assert(SemaRef.NonInstantiationEntries <=
+ SemaRef.ActiveTemplateInstantiations.size());
+ if ((SemaRef.ActiveTemplateInstantiations.size() -
+ SemaRef.NonInstantiationEntries)
+ <= SemaRef.getLangOpts().InstantiationDepth)
+ return false;
+
+ SemaRef.Diag(PointOfInstantiation,
+ diag::err_template_recursion_depth_exceeded)
+ << SemaRef.getLangOpts().InstantiationDepth
+ << InstantiationRange;
+ SemaRef.Diag(PointOfInstantiation, diag::note_template_recursion_depth)
+ << SemaRef.getLangOpts().InstantiationDepth;
+ return true;
+}
+
+/// \brief Prints the current instantiation stack through a series of
+/// notes.
+void Sema::PrintInstantiationStack() {
+ // Determine which template instantiations to skip, if any.
+ unsigned SkipStart = ActiveTemplateInstantiations.size(), SkipEnd = SkipStart;
+ unsigned Limit = Diags.getTemplateBacktraceLimit();
+ if (Limit && Limit < ActiveTemplateInstantiations.size()) {
+ SkipStart = Limit / 2 + Limit % 2;
+ SkipEnd = ActiveTemplateInstantiations.size() - Limit / 2;
+ }
+
+ // FIXME: In all of these cases, we need to show the template arguments
+ unsigned InstantiationIdx = 0;
+ for (SmallVectorImpl<ActiveTemplateInstantiation>::reverse_iterator
+ Active = ActiveTemplateInstantiations.rbegin(),
+ ActiveEnd = ActiveTemplateInstantiations.rend();
+ Active != ActiveEnd;
+ ++Active, ++InstantiationIdx) {
+ // Skip this instantiation?
+ if (InstantiationIdx >= SkipStart && InstantiationIdx < SkipEnd) {
+ if (InstantiationIdx == SkipStart) {
+ // Note that we're skipping instantiations.
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_instantiation_contexts_suppressed)
+ << unsigned(ActiveTemplateInstantiations.size() - Limit);
+ }
+ continue;
+ }
+
+ switch (Active->Kind) {
+ case ActiveTemplateInstantiation::TemplateInstantiation: {
+ Decl *D = Active->Entity;
+ if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(D)) {
+ unsigned DiagID = diag::note_template_member_class_here;
+ if (isa<ClassTemplateSpecializationDecl>(Record))
+ DiagID = diag::note_template_class_instantiation_here;
+ Diags.Report(Active->PointOfInstantiation, DiagID)
+ << Context.getTypeDeclType(Record)
+ << Active->InstantiationRange;
+ } else if (FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
+ unsigned DiagID;
+ if (Function->getPrimaryTemplate())
+ DiagID = diag::note_function_template_spec_here;
+ else
+ DiagID = diag::note_template_member_function_here;
+ Diags.Report(Active->PointOfInstantiation, DiagID)
+ << Function
+ << Active->InstantiationRange;
+ } else if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ Diags.Report(Active->PointOfInstantiation,
+ VD->isStaticDataMember()?
+ diag::note_template_static_data_member_def_here
+ : diag::note_template_variable_def_here)
+ << VD
+ << Active->InstantiationRange;
+ } else if (EnumDecl *ED = dyn_cast<EnumDecl>(D)) {
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_template_enum_def_here)
+ << ED
+ << Active->InstantiationRange;
+ } else if (FieldDecl *FD = dyn_cast<FieldDecl>(D)) {
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_template_nsdmi_here)
+ << FD << Active->InstantiationRange;
+ } else {
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_template_type_alias_instantiation_here)
+ << cast<TypeAliasTemplateDecl>(D)
+ << Active->InstantiationRange;
+ }
+ break;
+ }
+
+ case ActiveTemplateInstantiation::DefaultTemplateArgumentInstantiation: {
+ TemplateDecl *Template = cast<TemplateDecl>(Active->Entity);
+ SmallVector<char, 128> TemplateArgsStr;
+ llvm::raw_svector_ostream OS(TemplateArgsStr);
+ Template->printName(OS);
+ TemplateSpecializationType::PrintTemplateArgumentList(OS,
+ Active->TemplateArgs,
+ Active->NumTemplateArgs,
+ getPrintingPolicy());
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_default_arg_instantiation_here)
+ << OS.str()
+ << Active->InstantiationRange;
+ break;
+ }
+
+ case ActiveTemplateInstantiation::ExplicitTemplateArgumentSubstitution: {
+ FunctionTemplateDecl *FnTmpl = cast<FunctionTemplateDecl>(Active->Entity);
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_explicit_template_arg_substitution_here)
+ << FnTmpl
+ << getTemplateArgumentBindingsText(FnTmpl->getTemplateParameters(),
+ Active->TemplateArgs,
+ Active->NumTemplateArgs)
+ << Active->InstantiationRange;
+ break;
+ }
+
+ case ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution:
+ if (ClassTemplatePartialSpecializationDecl *PartialSpec =
+ dyn_cast<ClassTemplatePartialSpecializationDecl>(Active->Entity)) {
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_partial_spec_deduct_instantiation_here)
+ << Context.getTypeDeclType(PartialSpec)
+ << getTemplateArgumentBindingsText(
+ PartialSpec->getTemplateParameters(),
+ Active->TemplateArgs,
+ Active->NumTemplateArgs)
+ << Active->InstantiationRange;
+ } else {
+ FunctionTemplateDecl *FnTmpl
+ = cast<FunctionTemplateDecl>(Active->Entity);
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_function_template_deduction_instantiation_here)
+ << FnTmpl
+ << getTemplateArgumentBindingsText(FnTmpl->getTemplateParameters(),
+ Active->TemplateArgs,
+ Active->NumTemplateArgs)
+ << Active->InstantiationRange;
+ }
+ break;
+
+ case ActiveTemplateInstantiation::DefaultFunctionArgumentInstantiation: {
+ ParmVarDecl *Param = cast<ParmVarDecl>(Active->Entity);
+ FunctionDecl *FD = cast<FunctionDecl>(Param->getDeclContext());
+
+ SmallVector<char, 128> TemplateArgsStr;
+ llvm::raw_svector_ostream OS(TemplateArgsStr);
+ FD->printName(OS);
+ TemplateSpecializationType::PrintTemplateArgumentList(OS,
+ Active->TemplateArgs,
+ Active->NumTemplateArgs,
+ getPrintingPolicy());
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_default_function_arg_instantiation_here)
+ << OS.str()
+ << Active->InstantiationRange;
+ break;
+ }
+
+ case ActiveTemplateInstantiation::PriorTemplateArgumentSubstitution: {
+ NamedDecl *Parm = cast<NamedDecl>(Active->Entity);
+ std::string Name;
+ if (!Parm->getName().empty())
+ Name = std::string(" '") + Parm->getName().str() + "'";
+
+ TemplateParameterList *TemplateParams = nullptr;
+ if (TemplateDecl *Template = dyn_cast<TemplateDecl>(Active->Template))
+ TemplateParams = Template->getTemplateParameters();
+ else
+ TemplateParams =
+ cast<ClassTemplatePartialSpecializationDecl>(Active->Template)
+ ->getTemplateParameters();
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_prior_template_arg_substitution)
+ << isa<TemplateTemplateParmDecl>(Parm)
+ << Name
+ << getTemplateArgumentBindingsText(TemplateParams,
+ Active->TemplateArgs,
+ Active->NumTemplateArgs)
+ << Active->InstantiationRange;
+ break;
+ }
+
+ case ActiveTemplateInstantiation::DefaultTemplateArgumentChecking: {
+ TemplateParameterList *TemplateParams = nullptr;
+ if (TemplateDecl *Template = dyn_cast<TemplateDecl>(Active->Template))
+ TemplateParams = Template->getTemplateParameters();
+ else
+ TemplateParams =
+ cast<ClassTemplatePartialSpecializationDecl>(Active->Template)
+ ->getTemplateParameters();
+
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_template_default_arg_checking)
+ << getTemplateArgumentBindingsText(TemplateParams,
+ Active->TemplateArgs,
+ Active->NumTemplateArgs)
+ << Active->InstantiationRange;
+ break;
+ }
+
+ case ActiveTemplateInstantiation::ExceptionSpecInstantiation:
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_template_exception_spec_instantiation_here)
+ << cast<FunctionDecl>(Active->Entity)
+ << Active->InstantiationRange;
+ break;
+ }
+ }
+}
+
+Optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
+ if (InNonInstantiationSFINAEContext)
+ return Optional<TemplateDeductionInfo *>(nullptr);
+
+ for (SmallVectorImpl<ActiveTemplateInstantiation>::const_reverse_iterator
+ Active = ActiveTemplateInstantiations.rbegin(),
+ ActiveEnd = ActiveTemplateInstantiations.rend();
+ Active != ActiveEnd;
+ ++Active)
+ {
+ switch(Active->Kind) {
+ case ActiveTemplateInstantiation::TemplateInstantiation:
+ // An instantiation of an alias template may or may not be a SFINAE
+ // context, depending on what else is on the stack.
+ if (isa<TypeAliasTemplateDecl>(Active->Entity))
+ break;
+ // Fall through.
+ case ActiveTemplateInstantiation::DefaultFunctionArgumentInstantiation:
+ case ActiveTemplateInstantiation::ExceptionSpecInstantiation:
+ // This is a template instantiation, so there is no SFINAE.
+ return None;
+
+ case ActiveTemplateInstantiation::DefaultTemplateArgumentInstantiation:
+ case ActiveTemplateInstantiation::PriorTemplateArgumentSubstitution:
+ case ActiveTemplateInstantiation::DefaultTemplateArgumentChecking:
+ // A default template argument instantiation and substitution into
+ // template parameters with arguments for prior parameters may or may
+ // not be a SFINAE context; look further up the stack.
+ break;
+
+ case ActiveTemplateInstantiation::ExplicitTemplateArgumentSubstitution:
+ case ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution:
+ // We're either substitution explicitly-specified template arguments
+ // or deduced template arguments, so SFINAE applies.
+ assert(Active->DeductionInfo && "Missing deduction info pointer");
+ return Active->DeductionInfo;
+ }
+ }
+
+ return None;
+}
+
+/// \brief Retrieve the depth and index of a parameter pack.
+static std::pair<unsigned, unsigned>
+getDepthAndIndex(NamedDecl *ND) {
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(ND))
+ return std::make_pair(TTP->getDepth(), TTP->getIndex());
+
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(ND))
+ return std::make_pair(NTTP->getDepth(), NTTP->getIndex());
+
+ TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(ND);
+ return std::make_pair(TTP->getDepth(), TTP->getIndex());
+}
+
+//===----------------------------------------------------------------------===/
+// Template Instantiation for Types
+//===----------------------------------------------------------------------===/
+namespace {
+ class TemplateInstantiator : public TreeTransform<TemplateInstantiator> {
+ const MultiLevelTemplateArgumentList &TemplateArgs;
+ SourceLocation Loc;
+ DeclarationName Entity;
+
+ public:
+ typedef TreeTransform<TemplateInstantiator> inherited;
+
+ TemplateInstantiator(Sema &SemaRef,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ SourceLocation Loc,
+ DeclarationName Entity)
+ : inherited(SemaRef), TemplateArgs(TemplateArgs), Loc(Loc),
+ Entity(Entity) { }
+
+ /// \brief Determine whether the given type \p T has already been
+ /// transformed.
+ ///
+ /// For the purposes of template instantiation, a type has already been
+ /// transformed if it is NULL or if it is not dependent.
+ bool AlreadyTransformed(QualType T);
+
+ /// \brief Returns the location of the entity being instantiated, if known.
+ SourceLocation getBaseLocation() { return Loc; }
+
+ /// \brief Returns the name of the entity being instantiated, if any.
+ DeclarationName getBaseEntity() { return Entity; }
+
+ /// \brief Sets the "base" location and entity when that
+ /// information is known based on another transformation.
+ void setBase(SourceLocation Loc, DeclarationName Entity) {
+ this->Loc = Loc;
+ this->Entity = Entity;
+ }
+
+ bool TryExpandParameterPacks(SourceLocation EllipsisLoc,
+ SourceRange PatternRange,
+ ArrayRef<UnexpandedParameterPack> Unexpanded,
+ bool &ShouldExpand, bool &RetainExpansion,
+ Optional<unsigned> &NumExpansions) {
+ return getSema().CheckParameterPacksForExpansion(EllipsisLoc,
+ PatternRange, Unexpanded,
+ TemplateArgs,
+ ShouldExpand,
+ RetainExpansion,
+ NumExpansions);
+ }
+
+ void ExpandingFunctionParameterPack(ParmVarDecl *Pack) {
+ SemaRef.CurrentInstantiationScope->MakeInstantiatedLocalArgPack(Pack);
+ }
+
+ TemplateArgument ForgetPartiallySubstitutedPack() {
+ TemplateArgument Result;
+ if (NamedDecl *PartialPack
+ = SemaRef.CurrentInstantiationScope->getPartiallySubstitutedPack()){
+ MultiLevelTemplateArgumentList &TemplateArgs
+ = const_cast<MultiLevelTemplateArgumentList &>(this->TemplateArgs);
+ unsigned Depth, Index;
+ std::tie(Depth, Index) = getDepthAndIndex(PartialPack);
+ if (TemplateArgs.hasTemplateArgument(Depth, Index)) {
+ Result = TemplateArgs(Depth, Index);
+ TemplateArgs.setArgument(Depth, Index, TemplateArgument());
+ }
+ }
+
+ return Result;
+ }
+
+ void RememberPartiallySubstitutedPack(TemplateArgument Arg) {
+ if (Arg.isNull())
+ return;
+
+ if (NamedDecl *PartialPack
+ = SemaRef.CurrentInstantiationScope->getPartiallySubstitutedPack()){
+ MultiLevelTemplateArgumentList &TemplateArgs
+ = const_cast<MultiLevelTemplateArgumentList &>(this->TemplateArgs);
+ unsigned Depth, Index;
+ std::tie(Depth, Index) = getDepthAndIndex(PartialPack);
+ TemplateArgs.setArgument(Depth, Index, Arg);
+ }
+ }
+
+ /// \brief Transform the given declaration by instantiating a reference to
+ /// this declaration.
+ Decl *TransformDecl(SourceLocation Loc, Decl *D);
+
+ void transformAttrs(Decl *Old, Decl *New) {
+ SemaRef.InstantiateAttrs(TemplateArgs, Old, New);
+ }
+
+ void transformedLocalDecl(Decl *Old, Decl *New) {
+ // If we've instantiated the call operator of a lambda or the call
+ // operator template of a generic lambda, update the "instantiation of"
+ // information.
+ auto *NewMD = dyn_cast<CXXMethodDecl>(New);
+ if (NewMD && isLambdaCallOperator(NewMD)) {
+ auto *OldMD = dyn_cast<CXXMethodDecl>(Old);
+ if (auto *NewTD = NewMD->getDescribedFunctionTemplate())
+ NewTD->setInstantiatedFromMemberTemplate(
+ OldMD->getDescribedFunctionTemplate());
+ else
+ NewMD->setInstantiationOfMemberFunction(OldMD,
+ TSK_ImplicitInstantiation);
+ }
+
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(Old, New);
+ }
+
+ /// \brief Transform the definition of the given declaration by
+ /// instantiating it.
+ Decl *TransformDefinition(SourceLocation Loc, Decl *D);
+
+ /// \brief Transform the first qualifier within a scope by instantiating the
+ /// declaration.
+ NamedDecl *TransformFirstQualifierInScope(NamedDecl *D, SourceLocation Loc);
+
+ /// \brief Rebuild the exception declaration and register the declaration
+ /// as an instantiated local.
+ VarDecl *RebuildExceptionDecl(VarDecl *ExceptionDecl,
+ TypeSourceInfo *Declarator,
+ SourceLocation StartLoc,
+ SourceLocation NameLoc,
+ IdentifierInfo *Name);
+
+ /// \brief Rebuild the Objective-C exception declaration and register the
+ /// declaration as an instantiated local.
+ VarDecl *RebuildObjCExceptionDecl(VarDecl *ExceptionDecl,
+ TypeSourceInfo *TSInfo, QualType T);
+
+ /// \brief Check for tag mismatches when instantiating an
+ /// elaborated type.
+ QualType RebuildElaboratedType(SourceLocation KeywordLoc,
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifierLoc QualifierLoc,
+ QualType T);
+
+ TemplateName
+ TransformTemplateName(CXXScopeSpec &SS, TemplateName Name,
+ SourceLocation NameLoc,
+ QualType ObjectType = QualType(),
+ NamedDecl *FirstQualifierInScope = nullptr);
+
+ const LoopHintAttr *TransformLoopHintAttr(const LoopHintAttr *LH);
+
+ ExprResult TransformPredefinedExpr(PredefinedExpr *E);
+ ExprResult TransformDeclRefExpr(DeclRefExpr *E);
+ ExprResult TransformCXXDefaultArgExpr(CXXDefaultArgExpr *E);
+
+ ExprResult TransformTemplateParmRefExpr(DeclRefExpr *E,
+ NonTypeTemplateParmDecl *D);
+ ExprResult TransformSubstNonTypeTemplateParmPackExpr(
+ SubstNonTypeTemplateParmPackExpr *E);
+
+ /// \brief Rebuild a DeclRefExpr for a ParmVarDecl reference.
+ ExprResult RebuildParmVarDeclRefExpr(ParmVarDecl *PD, SourceLocation Loc);
+
+ /// \brief Transform a reference to a function parameter pack.
+ ExprResult TransformFunctionParmPackRefExpr(DeclRefExpr *E,
+ ParmVarDecl *PD);
+
+ /// \brief Transform a FunctionParmPackExpr which was built when we couldn't
+ /// expand a function parameter pack reference which refers to an expanded
+ /// pack.
+ ExprResult TransformFunctionParmPackExpr(FunctionParmPackExpr *E);
+
+ QualType TransformFunctionProtoType(TypeLocBuilder &TLB,
+ FunctionProtoTypeLoc TL) {
+ // Call the base version; it will forward to our overridden version below.
+ return inherited::TransformFunctionProtoType(TLB, TL);
+ }
+
+ template<typename Fn>
+ QualType TransformFunctionProtoType(TypeLocBuilder &TLB,
+ FunctionProtoTypeLoc TL,
+ CXXRecordDecl *ThisContext,
+ unsigned ThisTypeQuals,
+ Fn TransformExceptionSpec);
+
+ ParmVarDecl *TransformFunctionTypeParam(ParmVarDecl *OldParm,
+ int indexAdjustment,
+ Optional<unsigned> NumExpansions,
+ bool ExpectParameterPack);
+
+ /// \brief Transforms a template type parameter type by performing
+ /// substitution of the corresponding template type argument.
+ QualType TransformTemplateTypeParmType(TypeLocBuilder &TLB,
+ TemplateTypeParmTypeLoc TL);
+
+ /// \brief Transforms an already-substituted template type parameter pack
+ /// into either itself (if we aren't substituting into its pack expansion)
+ /// or the appropriate substituted argument.
+ QualType TransformSubstTemplateTypeParmPackType(TypeLocBuilder &TLB,
+ SubstTemplateTypeParmPackTypeLoc TL);
+
+ ExprResult TransformLambdaExpr(LambdaExpr *E) {
+ LocalInstantiationScope Scope(SemaRef, /*CombineWithOuterScope=*/true);
+ return TreeTransform<TemplateInstantiator>::TransformLambdaExpr(E);
+ }
+
+ TemplateParameterList *TransformTemplateParameterList(
+ TemplateParameterList *OrigTPL) {
+ if (!OrigTPL || !OrigTPL->size()) return OrigTPL;
+
+ DeclContext *Owner = OrigTPL->getParam(0)->getDeclContext();
+ TemplateDeclInstantiator DeclInstantiator(getSema(),
+ /* DeclContext *Owner */ Owner, TemplateArgs);
+ return DeclInstantiator.SubstTemplateParams(OrigTPL);
+ }
+ private:
+ ExprResult transformNonTypeTemplateParmRef(NonTypeTemplateParmDecl *parm,
+ SourceLocation loc,
+ TemplateArgument arg);
+ };
+}
+
+bool TemplateInstantiator::AlreadyTransformed(QualType T) {
+ if (T.isNull())
+ return true;
+
+ if (T->isInstantiationDependentType() || T->isVariablyModifiedType())
+ return false;
+
+ getSema().MarkDeclarationsReferencedInType(Loc, T);
+ return true;
+}
+
+static TemplateArgument
+getPackSubstitutedTemplateArgument(Sema &S, TemplateArgument Arg) {
+ assert(S.ArgumentPackSubstitutionIndex >= 0);
+ assert(S.ArgumentPackSubstitutionIndex < (int)Arg.pack_size());
+ Arg = Arg.pack_begin()[S.ArgumentPackSubstitutionIndex];
+ if (Arg.isPackExpansion())
+ Arg = Arg.getPackExpansionPattern();
+ return Arg;
+}
+
+Decl *TemplateInstantiator::TransformDecl(SourceLocation Loc, Decl *D) {
+ if (!D)
+ return nullptr;
+
+ if (TemplateTemplateParmDecl *TTP = dyn_cast<TemplateTemplateParmDecl>(D)) {
+ if (TTP->getDepth() < TemplateArgs.getNumLevels()) {
+ // If the corresponding template argument is NULL or non-existent, it's
+ // because we are performing instantiation from explicitly-specified
+ // template arguments in a function template, but there were some
+ // arguments left unspecified.
+ if (!TemplateArgs.hasTemplateArgument(TTP->getDepth(),
+ TTP->getPosition()))
+ return D;
+
+ TemplateArgument Arg = TemplateArgs(TTP->getDepth(), TTP->getPosition());
+
+ if (TTP->isParameterPack()) {
+ assert(Arg.getKind() == TemplateArgument::Pack &&
+ "Missing argument pack");
+ Arg = getPackSubstitutedTemplateArgument(getSema(), Arg);
+ }
+
+ TemplateName Template = Arg.getAsTemplate();
+ assert(!Template.isNull() && Template.getAsTemplateDecl() &&
+ "Wrong kind of template template argument");
+ return Template.getAsTemplateDecl();
+ }
+
+ // Fall through to find the instantiated declaration for this template
+ // template parameter.
+ }
+
+ return SemaRef.FindInstantiatedDecl(Loc, cast<NamedDecl>(D), TemplateArgs);
+}
+
+Decl *TemplateInstantiator::TransformDefinition(SourceLocation Loc, Decl *D) {
+ Decl *Inst = getSema().SubstDecl(D, getSema().CurContext, TemplateArgs);
+ if (!Inst)
+ return nullptr;
+
+ getSema().CurrentInstantiationScope->InstantiatedLocal(D, Inst);
+ return Inst;
+}
+
+NamedDecl *
+TemplateInstantiator::TransformFirstQualifierInScope(NamedDecl *D,
+ SourceLocation Loc) {
+ // If the first part of the nested-name-specifier was a template type
+ // parameter, instantiate that type parameter down to a tag type.
+ if (TemplateTypeParmDecl *TTPD = dyn_cast_or_null<TemplateTypeParmDecl>(D)) {
+ const TemplateTypeParmType *TTP
+ = cast<TemplateTypeParmType>(getSema().Context.getTypeDeclType(TTPD));
+
+ if (TTP->getDepth() < TemplateArgs.getNumLevels()) {
+ // FIXME: This needs testing w/ member access expressions.
+ TemplateArgument Arg = TemplateArgs(TTP->getDepth(), TTP->getIndex());
+
+ if (TTP->isParameterPack()) {
+ assert(Arg.getKind() == TemplateArgument::Pack &&
+ "Missing argument pack");
+
+ if (getSema().ArgumentPackSubstitutionIndex == -1)
+ return nullptr;
+
+ Arg = getPackSubstitutedTemplateArgument(getSema(), Arg);
+ }
+
+ QualType T = Arg.getAsType();
+ if (T.isNull())
+ return cast_or_null<NamedDecl>(TransformDecl(Loc, D));
+
+ if (const TagType *Tag = T->getAs<TagType>())
+ return Tag->getDecl();
+
+ // The resulting type is not a tag; complain.
+ getSema().Diag(Loc, diag::err_nested_name_spec_non_tag) << T;
+ return nullptr;
+ }
+ }
+
+ return cast_or_null<NamedDecl>(TransformDecl(Loc, D));
+}
+
+VarDecl *
+TemplateInstantiator::RebuildExceptionDecl(VarDecl *ExceptionDecl,
+ TypeSourceInfo *Declarator,
+ SourceLocation StartLoc,
+ SourceLocation NameLoc,
+ IdentifierInfo *Name) {
+ VarDecl *Var = inherited::RebuildExceptionDecl(ExceptionDecl, Declarator,
+ StartLoc, NameLoc, Name);
+ if (Var)
+ getSema().CurrentInstantiationScope->InstantiatedLocal(ExceptionDecl, Var);
+ return Var;
+}
+
+VarDecl *TemplateInstantiator::RebuildObjCExceptionDecl(VarDecl *ExceptionDecl,
+ TypeSourceInfo *TSInfo,
+ QualType T) {
+ VarDecl *Var = inherited::RebuildObjCExceptionDecl(ExceptionDecl, TSInfo, T);
+ if (Var)
+ getSema().CurrentInstantiationScope->InstantiatedLocal(ExceptionDecl, Var);
+ return Var;
+}
+
+QualType
+TemplateInstantiator::RebuildElaboratedType(SourceLocation KeywordLoc,
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifierLoc QualifierLoc,
+ QualType T) {
+ if (const TagType *TT = T->getAs<TagType>()) {
+ TagDecl* TD = TT->getDecl();
+
+ SourceLocation TagLocation = KeywordLoc;
+
+ IdentifierInfo *Id = TD->getIdentifier();
+
+ // TODO: should we even warn on struct/class mismatches for this? Seems
+ // like it's likely to produce a lot of spurious errors.
+ if (Id && Keyword != ETK_None && Keyword != ETK_Typename) {
+ TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForKeyword(Keyword);
+ if (!SemaRef.isAcceptableTagRedeclaration(TD, Kind, /*isDefinition*/false,
+ TagLocation, Id)) {
+ SemaRef.Diag(TagLocation, diag::err_use_with_wrong_tag)
+ << Id
+ << FixItHint::CreateReplacement(SourceRange(TagLocation),
+ TD->getKindName());
+ SemaRef.Diag(TD->getLocation(), diag::note_previous_use);
+ }
+ }
+ }
+
+ return TreeTransform<TemplateInstantiator>::RebuildElaboratedType(KeywordLoc,
+ Keyword,
+ QualifierLoc,
+ T);
+}
+
+TemplateName TemplateInstantiator::TransformTemplateName(CXXScopeSpec &SS,
+ TemplateName Name,
+ SourceLocation NameLoc,
+ QualType ObjectType,
+ NamedDecl *FirstQualifierInScope) {
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast_or_null<TemplateTemplateParmDecl>(Name.getAsTemplateDecl())) {
+ if (TTP->getDepth() < TemplateArgs.getNumLevels()) {
+ // If the corresponding template argument is NULL or non-existent, it's
+ // because we are performing instantiation from explicitly-specified
+ // template arguments in a function template, but there were some
+ // arguments left unspecified.
+ if (!TemplateArgs.hasTemplateArgument(TTP->getDepth(),
+ TTP->getPosition()))
+ return Name;
+
+ TemplateArgument Arg = TemplateArgs(TTP->getDepth(), TTP->getPosition());
+
+ if (TTP->isParameterPack()) {
+ assert(Arg.getKind() == TemplateArgument::Pack &&
+ "Missing argument pack");
+
+ if (getSema().ArgumentPackSubstitutionIndex == -1) {
+ // We have the template argument pack to substitute, but we're not
+ // actually expanding the enclosing pack expansion yet. So, just
+ // keep the entire argument pack.
+ return getSema().Context.getSubstTemplateTemplateParmPack(TTP, Arg);
+ }
+
+ Arg = getPackSubstitutedTemplateArgument(getSema(), Arg);
+ }
+
+ TemplateName Template = Arg.getAsTemplate();
+ assert(!Template.isNull() && "Null template template argument");
+
+ // We don't ever want to substitute for a qualified template name, since
+ // the qualifier is handled separately. So, look through the qualified
+ // template name to its underlying declaration.
+ if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
+ Template = TemplateName(QTN->getTemplateDecl());
+
+ Template = getSema().Context.getSubstTemplateTemplateParm(TTP, Template);
+ return Template;
+ }
+ }
+
+ if (SubstTemplateTemplateParmPackStorage *SubstPack
+ = Name.getAsSubstTemplateTemplateParmPack()) {
+ if (getSema().ArgumentPackSubstitutionIndex == -1)
+ return Name;
+
+ TemplateArgument Arg = SubstPack->getArgumentPack();
+ Arg = getPackSubstitutedTemplateArgument(getSema(), Arg);
+ return Arg.getAsTemplate();
+ }
+
+ return inherited::TransformTemplateName(SS, Name, NameLoc, ObjectType,
+ FirstQualifierInScope);
+}
+
+ExprResult
+TemplateInstantiator::TransformPredefinedExpr(PredefinedExpr *E) {
+ if (!E->isTypeDependent())
+ return E;
+
+ return getSema().BuildPredefinedExpr(E->getLocation(), E->getIdentType());
+}
+
+ExprResult
+TemplateInstantiator::TransformTemplateParmRefExpr(DeclRefExpr *E,
+ NonTypeTemplateParmDecl *NTTP) {
+ // If the corresponding template argument is NULL or non-existent, it's
+ // because we are performing instantiation from explicitly-specified
+ // template arguments in a function template, but there were some
+ // arguments left unspecified.
+ if (!TemplateArgs.hasTemplateArgument(NTTP->getDepth(),
+ NTTP->getPosition()))
+ return E;
+
+ TemplateArgument Arg = TemplateArgs(NTTP->getDepth(), NTTP->getPosition());
+ if (NTTP->isParameterPack()) {
+ assert(Arg.getKind() == TemplateArgument::Pack &&
+ "Missing argument pack");
+
+ if (getSema().ArgumentPackSubstitutionIndex == -1) {
+ // We have an argument pack, but we can't select a particular argument
+ // out of it yet. Therefore, we'll build an expression to hold on to that
+ // argument pack.
+ QualType TargetType = SemaRef.SubstType(NTTP->getType(), TemplateArgs,
+ E->getLocation(),
+ NTTP->getDeclName());
+ if (TargetType.isNull())
+ return ExprError();
+
+ return new (SemaRef.Context) SubstNonTypeTemplateParmPackExpr(TargetType,
+ NTTP,
+ E->getLocation(),
+ Arg);
+ }
+
+ Arg = getPackSubstitutedTemplateArgument(getSema(), Arg);
+ }
+
+ return transformNonTypeTemplateParmRef(NTTP, E->getLocation(), Arg);
+}
+
+const LoopHintAttr *
+TemplateInstantiator::TransformLoopHintAttr(const LoopHintAttr *LH) {
+ Expr *TransformedExpr = getDerived().TransformExpr(LH->getValue()).get();
+
+ if (TransformedExpr == LH->getValue())
+ return LH;
+
+ // Generate error if there is a problem with the value.
+ if (getSema().CheckLoopHintExpr(TransformedExpr, LH->getLocation()))
+ return LH;
+
+ // Create new LoopHintValueAttr with integral expression in place of the
+ // non-type template parameter.
+ return LoopHintAttr::CreateImplicit(
+ getSema().Context, LH->getSemanticSpelling(), LH->getOption(),
+ LH->getState(), TransformedExpr, LH->getRange());
+}
+
+ExprResult TemplateInstantiator::transformNonTypeTemplateParmRef(
+ NonTypeTemplateParmDecl *parm,
+ SourceLocation loc,
+ TemplateArgument arg) {
+ ExprResult result;
+ QualType type;
+
+ // The template argument itself might be an expression, in which
+ // case we just return that expression.
+ if (arg.getKind() == TemplateArgument::Expression) {
+ Expr *argExpr = arg.getAsExpr();
+ result = argExpr;
+ type = argExpr->getType();
+
+ } else if (arg.getKind() == TemplateArgument::Declaration ||
+ arg.getKind() == TemplateArgument::NullPtr) {
+ ValueDecl *VD;
+ if (arg.getKind() == TemplateArgument::Declaration) {
+ VD = cast<ValueDecl>(arg.getAsDecl());
+
+ // Find the instantiation of the template argument. This is
+ // required for nested templates.
+ VD = cast_or_null<ValueDecl>(
+ getSema().FindInstantiatedDecl(loc, VD, TemplateArgs));
+ if (!VD)
+ return ExprError();
+ } else {
+ // Propagate NULL template argument.
+ VD = nullptr;
+ }
+
+ // Derive the type we want the substituted decl to have. This had
+ // better be non-dependent, or these checks will have serious problems.
+ if (parm->isExpandedParameterPack()) {
+ type = parm->getExpansionType(SemaRef.ArgumentPackSubstitutionIndex);
+ } else if (parm->isParameterPack() &&
+ isa<PackExpansionType>(parm->getType())) {
+ type = SemaRef.SubstType(
+ cast<PackExpansionType>(parm->getType())->getPattern(),
+ TemplateArgs, loc, parm->getDeclName());
+ } else {
+ type = SemaRef.SubstType(parm->getType(), TemplateArgs,
+ loc, parm->getDeclName());
+ }
+ assert(!type.isNull() && "type substitution failed for param type");
+ assert(!type->isDependentType() && "param type still dependent");
+ result = SemaRef.BuildExpressionFromDeclTemplateArgument(arg, type, loc);
+
+ if (!result.isInvalid()) type = result.get()->getType();
+ } else {
+ result = SemaRef.BuildExpressionFromIntegralTemplateArgument(arg, loc);
+
+ // Note that this type can be different from the type of 'result',
+ // e.g. if it's an enum type.
+ type = arg.getIntegralType();
+ }
+ if (result.isInvalid()) return ExprError();
+
+ Expr *resultExpr = result.get();
+ return new (SemaRef.Context) SubstNonTypeTemplateParmExpr(
+ type, resultExpr->getValueKind(), loc, parm, resultExpr);
+}
+
+ExprResult
+TemplateInstantiator::TransformSubstNonTypeTemplateParmPackExpr(
+ SubstNonTypeTemplateParmPackExpr *E) {
+ if (getSema().ArgumentPackSubstitutionIndex == -1) {
+ // We aren't expanding the parameter pack, so just return ourselves.
+ return E;
+ }
+
+ TemplateArgument Arg = E->getArgumentPack();
+ Arg = getPackSubstitutedTemplateArgument(getSema(), Arg);
+ return transformNonTypeTemplateParmRef(E->getParameterPack(),
+ E->getParameterPackLocation(),
+ Arg);
+}
+
+ExprResult
+TemplateInstantiator::RebuildParmVarDeclRefExpr(ParmVarDecl *PD,
+ SourceLocation Loc) {
+ DeclarationNameInfo NameInfo(PD->getDeclName(), Loc);
+ return getSema().BuildDeclarationNameExpr(CXXScopeSpec(), NameInfo, PD);
+}
+
+ExprResult
+TemplateInstantiator::TransformFunctionParmPackExpr(FunctionParmPackExpr *E) {
+ if (getSema().ArgumentPackSubstitutionIndex != -1) {
+ // We can expand this parameter pack now.
+ ParmVarDecl *D = E->getExpansion(getSema().ArgumentPackSubstitutionIndex);
+ ValueDecl *VD = cast_or_null<ValueDecl>(TransformDecl(E->getExprLoc(), D));
+ if (!VD)
+ return ExprError();
+ return RebuildParmVarDeclRefExpr(cast<ParmVarDecl>(VD), E->getExprLoc());
+ }
+
+ QualType T = TransformType(E->getType());
+ if (T.isNull())
+ return ExprError();
+
+ // Transform each of the parameter expansions into the corresponding
+ // parameters in the instantiation of the function decl.
+ SmallVector<ParmVarDecl *, 8> Parms;
+ Parms.reserve(E->getNumExpansions());
+ for (FunctionParmPackExpr::iterator I = E->begin(), End = E->end();
+ I != End; ++I) {
+ ParmVarDecl *D =
+ cast_or_null<ParmVarDecl>(TransformDecl(E->getExprLoc(), *I));
+ if (!D)
+ return ExprError();
+ Parms.push_back(D);
+ }
+
+ return FunctionParmPackExpr::Create(getSema().Context, T,
+ E->getParameterPack(),
+ E->getParameterPackLocation(), Parms);
+}
+
+ExprResult
+TemplateInstantiator::TransformFunctionParmPackRefExpr(DeclRefExpr *E,
+ ParmVarDecl *PD) {
+ typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack;
+ llvm::PointerUnion<Decl *, DeclArgumentPack *> *Found
+ = getSema().CurrentInstantiationScope->findInstantiationOf(PD);
+ assert(Found && "no instantiation for parameter pack");
+
+ Decl *TransformedDecl;
+ if (DeclArgumentPack *Pack = Found->dyn_cast<DeclArgumentPack *>()) {
+ // If this is a reference to a function parameter pack which we can
+ // substitute but can't yet expand, build a FunctionParmPackExpr for it.
+ if (getSema().ArgumentPackSubstitutionIndex == -1) {
+ QualType T = TransformType(E->getType());
+ if (T.isNull())
+ return ExprError();
+ return FunctionParmPackExpr::Create(getSema().Context, T, PD,
+ E->getExprLoc(), *Pack);
+ }
+
+ TransformedDecl = (*Pack)[getSema().ArgumentPackSubstitutionIndex];
+ } else {
+ TransformedDecl = Found->get<Decl*>();
+ }
+
+ // We have either an unexpanded pack or a specific expansion.
+ return RebuildParmVarDeclRefExpr(cast<ParmVarDecl>(TransformedDecl),
+ E->getExprLoc());
+}
+
+ExprResult
+TemplateInstantiator::TransformDeclRefExpr(DeclRefExpr *E) {
+ NamedDecl *D = E->getDecl();
+
+ // Handle references to non-type template parameters and non-type template
+ // parameter packs.
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(D)) {
+ if (NTTP->getDepth() < TemplateArgs.getNumLevels())
+ return TransformTemplateParmRefExpr(E, NTTP);
+
+ // We have a non-type template parameter that isn't fully substituted;
+ // FindInstantiatedDecl will find it in the local instantiation scope.
+ }
+
+ // Handle references to function parameter packs.
+ if (ParmVarDecl *PD = dyn_cast<ParmVarDecl>(D))
+ if (PD->isParameterPack())
+ return TransformFunctionParmPackRefExpr(E, PD);
+
+ return TreeTransform<TemplateInstantiator>::TransformDeclRefExpr(E);
+}
+
+ExprResult TemplateInstantiator::TransformCXXDefaultArgExpr(
+ CXXDefaultArgExpr *E) {
+ assert(!cast<FunctionDecl>(E->getParam()->getDeclContext())->
+ getDescribedFunctionTemplate() &&
+ "Default arg expressions are never formed in dependent cases.");
+ return SemaRef.BuildCXXDefaultArgExpr(E->getUsedLocation(),
+ cast<FunctionDecl>(E->getParam()->getDeclContext()),
+ E->getParam());
+}
+
+template<typename Fn>
+QualType TemplateInstantiator::TransformFunctionProtoType(TypeLocBuilder &TLB,
+ FunctionProtoTypeLoc TL,
+ CXXRecordDecl *ThisContext,
+ unsigned ThisTypeQuals,
+ Fn TransformExceptionSpec) {
+ // We need a local instantiation scope for this function prototype.
+ LocalInstantiationScope Scope(SemaRef, /*CombineWithOuterScope=*/true);
+ return inherited::TransformFunctionProtoType(
+ TLB, TL, ThisContext, ThisTypeQuals, TransformExceptionSpec);
+}
+
+ParmVarDecl *
+TemplateInstantiator::TransformFunctionTypeParam(ParmVarDecl *OldParm,
+ int indexAdjustment,
+ Optional<unsigned> NumExpansions,
+ bool ExpectParameterPack) {
+ return SemaRef.SubstParmVarDecl(OldParm, TemplateArgs, indexAdjustment,
+ NumExpansions, ExpectParameterPack);
+}
+
+QualType
+TemplateInstantiator::TransformTemplateTypeParmType(TypeLocBuilder &TLB,
+ TemplateTypeParmTypeLoc TL) {
+ const TemplateTypeParmType *T = TL.getTypePtr();
+ if (T->getDepth() < TemplateArgs.getNumLevels()) {
+ // Replace the template type parameter with its corresponding
+ // template argument.
+
+ // If the corresponding template argument is NULL or doesn't exist, it's
+ // because we are performing instantiation from explicitly-specified
+ // template arguments in a function template class, but there were some
+ // arguments left unspecified.
+ if (!TemplateArgs.hasTemplateArgument(T->getDepth(), T->getIndex())) {
+ TemplateTypeParmTypeLoc NewTL
+ = TLB.push<TemplateTypeParmTypeLoc>(TL.getType());
+ NewTL.setNameLoc(TL.getNameLoc());
+ return TL.getType();
+ }
+
+ TemplateArgument Arg = TemplateArgs(T->getDepth(), T->getIndex());
+
+ if (T->isParameterPack()) {
+ assert(Arg.getKind() == TemplateArgument::Pack &&
+ "Missing argument pack");
+
+ if (getSema().ArgumentPackSubstitutionIndex == -1) {
+ // We have the template argument pack, but we're not expanding the
+ // enclosing pack expansion yet. Just save the template argument
+ // pack for later substitution.
+ QualType Result
+ = getSema().Context.getSubstTemplateTypeParmPackType(T, Arg);
+ SubstTemplateTypeParmPackTypeLoc NewTL
+ = TLB.push<SubstTemplateTypeParmPackTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ return Result;
+ }
+
+ Arg = getPackSubstitutedTemplateArgument(getSema(), Arg);
+ }
+
+ assert(Arg.getKind() == TemplateArgument::Type &&
+ "Template argument kind mismatch");
+
+ QualType Replacement = Arg.getAsType();
+
+ // TODO: only do this uniquing once, at the start of instantiation.
+ QualType Result
+ = getSema().Context.getSubstTemplateTypeParmType(T, Replacement);
+ SubstTemplateTypeParmTypeLoc NewTL
+ = TLB.push<SubstTemplateTypeParmTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ return Result;
+ }
+
+ // The template type parameter comes from an inner template (e.g.,
+ // the template parameter list of a member template inside the
+ // template we are instantiating). Create a new template type
+ // parameter with the template "level" reduced by one.
+ TemplateTypeParmDecl *NewTTPDecl = nullptr;
+ if (TemplateTypeParmDecl *OldTTPDecl = T->getDecl())
+ NewTTPDecl = cast_or_null<TemplateTypeParmDecl>(
+ TransformDecl(TL.getNameLoc(), OldTTPDecl));
+
+ QualType Result
+ = getSema().Context.getTemplateTypeParmType(T->getDepth()
+ - TemplateArgs.getNumLevels(),
+ T->getIndex(),
+ T->isParameterPack(),
+ NewTTPDecl);
+ TemplateTypeParmTypeLoc NewTL = TLB.push<TemplateTypeParmTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ return Result;
+}
+
+QualType
+TemplateInstantiator::TransformSubstTemplateTypeParmPackType(
+ TypeLocBuilder &TLB,
+ SubstTemplateTypeParmPackTypeLoc TL) {
+ if (getSema().ArgumentPackSubstitutionIndex == -1) {
+ // We aren't expanding the parameter pack, so just return ourselves.
+ SubstTemplateTypeParmPackTypeLoc NewTL
+ = TLB.push<SubstTemplateTypeParmPackTypeLoc>(TL.getType());
+ NewTL.setNameLoc(TL.getNameLoc());
+ return TL.getType();
+ }
+
+ TemplateArgument Arg = TL.getTypePtr()->getArgumentPack();
+ Arg = getPackSubstitutedTemplateArgument(getSema(), Arg);
+ QualType Result = Arg.getAsType();
+
+ Result = getSema().Context.getSubstTemplateTypeParmType(
+ TL.getTypePtr()->getReplacedParameter(),
+ Result);
+ SubstTemplateTypeParmTypeLoc NewTL
+ = TLB.push<SubstTemplateTypeParmTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ return Result;
+}
+
+/// \brief Perform substitution on the type T with a given set of template
+/// arguments.
+///
+/// This routine substitutes the given template arguments into the
+/// type T and produces the instantiated type.
+///
+/// \param T the type into which the template arguments will be
+/// substituted. If this type is not dependent, it will be returned
+/// immediately.
+///
+/// \param Args the template arguments that will be
+/// substituted for the top-level template parameters within T.
+///
+/// \param Loc the location in the source code where this substitution
+/// is being performed. It will typically be the location of the
+/// declarator (if we're instantiating the type of some declaration)
+/// or the location of the type in the source code (if, e.g., we're
+/// instantiating the type of a cast expression).
+///
+/// \param Entity the name of the entity associated with a declaration
+/// being instantiated (if any). May be empty to indicate that there
+/// is no such entity (if, e.g., this is a type that occurs as part of
+/// a cast expression) or that the entity has no name (e.g., an
+/// unnamed function parameter).
+///
+/// \returns If the instantiation succeeds, the instantiated
+/// type. Otherwise, produces diagnostics and returns a NULL type.
+TypeSourceInfo *Sema::SubstType(TypeSourceInfo *T,
+ const MultiLevelTemplateArgumentList &Args,
+ SourceLocation Loc,
+ DeclarationName Entity) {
+ assert(!ActiveTemplateInstantiations.empty() &&
+ "Cannot perform an instantiation without some context on the "
+ "instantiation stack");
+
+ if (!T->getType()->isInstantiationDependentType() &&
+ !T->getType()->isVariablyModifiedType())
+ return T;
+
+ TemplateInstantiator Instantiator(*this, Args, Loc, Entity);
+ return Instantiator.TransformType(T);
+}
+
+TypeSourceInfo *Sema::SubstType(TypeLoc TL,
+ const MultiLevelTemplateArgumentList &Args,
+ SourceLocation Loc,
+ DeclarationName Entity) {
+ assert(!ActiveTemplateInstantiations.empty() &&
+ "Cannot perform an instantiation without some context on the "
+ "instantiation stack");
+
+ if (TL.getType().isNull())
+ return nullptr;
+
+ if (!TL.getType()->isInstantiationDependentType() &&
+ !TL.getType()->isVariablyModifiedType()) {
+ // FIXME: Make a copy of the TypeLoc data here, so that we can
+ // return a new TypeSourceInfo. Inefficient!
+ TypeLocBuilder TLB;
+ TLB.pushFullCopy(TL);
+ return TLB.getTypeSourceInfo(Context, TL.getType());
+ }
+
+ TemplateInstantiator Instantiator(*this, Args, Loc, Entity);
+ TypeLocBuilder TLB;
+ TLB.reserve(TL.getFullDataSize());
+ QualType Result = Instantiator.TransformType(TLB, TL);
+ if (Result.isNull())
+ return nullptr;
+
+ return TLB.getTypeSourceInfo(Context, Result);
+}
+
+/// Deprecated form of the above.
+QualType Sema::SubstType(QualType T,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ SourceLocation Loc, DeclarationName Entity) {
+ assert(!ActiveTemplateInstantiations.empty() &&
+ "Cannot perform an instantiation without some context on the "
+ "instantiation stack");
+
+ // If T is not a dependent type or a variably-modified type, there
+ // is nothing to do.
+ if (!T->isInstantiationDependentType() && !T->isVariablyModifiedType())
+ return T;
+
+ TemplateInstantiator Instantiator(*this, TemplateArgs, Loc, Entity);
+ return Instantiator.TransformType(T);
+}
+
+static bool NeedsInstantiationAsFunctionType(TypeSourceInfo *T) {
+ if (T->getType()->isInstantiationDependentType() ||
+ T->getType()->isVariablyModifiedType())
+ return true;
+
+ TypeLoc TL = T->getTypeLoc().IgnoreParens();
+ if (!TL.getAs<FunctionProtoTypeLoc>())
+ return false;
+
+ FunctionProtoTypeLoc FP = TL.castAs<FunctionProtoTypeLoc>();
+ for (unsigned I = 0, E = FP.getNumParams(); I != E; ++I) {
+ ParmVarDecl *P = FP.getParam(I);
+
+ // This must be synthesized from a typedef.
+ if (!P) continue;
+
+ // The parameter's type as written might be dependent even if the
+ // decayed type was not dependent.
+ if (TypeSourceInfo *TSInfo = P->getTypeSourceInfo())
+ if (TSInfo->getType()->isInstantiationDependentType())
+ return true;
+
+ // TODO: currently we always rebuild expressions. When we
+ // properly get lazier about this, we should use the same
+ // logic to avoid rebuilding prototypes here.
+ if (P->hasDefaultArg())
+ return true;
+ }
+
+ return false;
+}
+
+/// A form of SubstType intended specifically for instantiating the
+/// type of a FunctionDecl. Its purpose is solely to force the
+/// instantiation of default-argument expressions and to avoid
+/// instantiating an exception-specification.
+TypeSourceInfo *Sema::SubstFunctionDeclType(TypeSourceInfo *T,
+ const MultiLevelTemplateArgumentList &Args,
+ SourceLocation Loc,
+ DeclarationName Entity,
+ CXXRecordDecl *ThisContext,
+ unsigned ThisTypeQuals) {
+ assert(!ActiveTemplateInstantiations.empty() &&
+ "Cannot perform an instantiation without some context on the "
+ "instantiation stack");
+
+ if (!NeedsInstantiationAsFunctionType(T))
+ return T;
+
+ TemplateInstantiator Instantiator(*this, Args, Loc, Entity);
+
+ TypeLocBuilder TLB;
+
+ TypeLoc TL = T->getTypeLoc();
+ TLB.reserve(TL.getFullDataSize());
+
+ QualType Result;
+
+ if (FunctionProtoTypeLoc Proto =
+ TL.IgnoreParens().getAs<FunctionProtoTypeLoc>()) {
+ // Instantiate the type, other than its exception specification. The
+ // exception specification is instantiated in InitFunctionInstantiation
+ // once we've built the FunctionDecl.
+ // FIXME: Set the exception specification to EST_Uninstantiated here,
+ // instead of rebuilding the function type again later.
+ Result = Instantiator.TransformFunctionProtoType(
+ TLB, Proto, ThisContext, ThisTypeQuals,
+ [](FunctionProtoType::ExceptionSpecInfo &ESI,
+ bool &Changed) { return false; });
+ } else {
+ Result = Instantiator.TransformType(TLB, TL);
+ }
+ if (Result.isNull())
+ return nullptr;
+
+ return TLB.getTypeSourceInfo(Context, Result);
+}
+
+void Sema::SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
+ const MultiLevelTemplateArgumentList &Args) {
+ FunctionProtoType::ExceptionSpecInfo ESI =
+ Proto->getExtProtoInfo().ExceptionSpec;
+ assert(ESI.Type != EST_Uninstantiated);
+
+ TemplateInstantiator Instantiator(*this, Args, New->getLocation(),
+ New->getDeclName());
+
+ SmallVector<QualType, 4> ExceptionStorage;
+ bool Changed = false;
+ if (Instantiator.TransformExceptionSpec(
+ New->getTypeSourceInfo()->getTypeLoc().getLocEnd(), ESI,
+ ExceptionStorage, Changed))
+ // On error, recover by dropping the exception specification.
+ ESI.Type = EST_None;
+
+ UpdateExceptionSpec(New, ESI);
+}
+
+ParmVarDecl *Sema::SubstParmVarDecl(ParmVarDecl *OldParm,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ int indexAdjustment,
+ Optional<unsigned> NumExpansions,
+ bool ExpectParameterPack) {
+ TypeSourceInfo *OldDI = OldParm->getTypeSourceInfo();
+ TypeSourceInfo *NewDI = nullptr;
+
+ TypeLoc OldTL = OldDI->getTypeLoc();
+ if (PackExpansionTypeLoc ExpansionTL = OldTL.getAs<PackExpansionTypeLoc>()) {
+
+ // We have a function parameter pack. Substitute into the pattern of the
+ // expansion.
+ NewDI = SubstType(ExpansionTL.getPatternLoc(), TemplateArgs,
+ OldParm->getLocation(), OldParm->getDeclName());
+ if (!NewDI)
+ return nullptr;
+
+ if (NewDI->getType()->containsUnexpandedParameterPack()) {
+ // We still have unexpanded parameter packs, which means that
+ // our function parameter is still a function parameter pack.
+ // Therefore, make its type a pack expansion type.
+ NewDI = CheckPackExpansion(NewDI, ExpansionTL.getEllipsisLoc(),
+ NumExpansions);
+ } else if (ExpectParameterPack) {
+ // We expected to get a parameter pack but didn't (because the type
+ // itself is not a pack expansion type), so complain. This can occur when
+ // the substitution goes through an alias template that "loses" the
+ // pack expansion.
+ Diag(OldParm->getLocation(),
+ diag::err_function_parameter_pack_without_parameter_packs)
+ << NewDI->getType();
+ return nullptr;
+ }
+ } else {
+ NewDI = SubstType(OldDI, TemplateArgs, OldParm->getLocation(),
+ OldParm->getDeclName());
+ }
+
+ if (!NewDI)
+ return nullptr;
+
+ if (NewDI->getType()->isVoidType()) {
+ Diag(OldParm->getLocation(), diag::err_param_with_void_type);
+ return nullptr;
+ }
+
+ ParmVarDecl *NewParm = CheckParameter(Context.getTranslationUnitDecl(),
+ OldParm->getInnerLocStart(),
+ OldParm->getLocation(),
+ OldParm->getIdentifier(),
+ NewDI->getType(), NewDI,
+ OldParm->getStorageClass());
+ if (!NewParm)
+ return nullptr;
+
+ // Mark the (new) default argument as uninstantiated (if any).
+ if (OldParm->hasUninstantiatedDefaultArg()) {
+ Expr *Arg = OldParm->getUninstantiatedDefaultArg();
+ NewParm->setUninstantiatedDefaultArg(Arg);
+ } else if (OldParm->hasUnparsedDefaultArg()) {
+ NewParm->setUnparsedDefaultArg();
+ UnparsedDefaultArgInstantiations[OldParm].push_back(NewParm);
+ } else if (Expr *Arg = OldParm->getDefaultArg()) {
+ FunctionDecl *OwningFunc = cast<FunctionDecl>(OldParm->getDeclContext());
+ if (OwningFunc->isLexicallyWithinFunctionOrMethod()) {
+ // Instantiate default arguments for methods of local classes (DR1484)
+ // and non-defining declarations.
+ Sema::ContextRAII SavedContext(*this, OwningFunc);
+ LocalInstantiationScope Local(*this);
+ ExprResult NewArg = SubstExpr(Arg, TemplateArgs);
+ if (NewArg.isUsable()) {
+ // It would be nice if we still had this.
+ SourceLocation EqualLoc = NewArg.get()->getLocStart();
+ SetParamDefaultArgument(NewParm, NewArg.get(), EqualLoc);
+ }
+ } else {
+ // FIXME: if we non-lazily instantiated non-dependent default args for
+ // non-dependent parameter types we could remove a bunch of duplicate
+ // conversion warnings for such arguments.
+ NewParm->setUninstantiatedDefaultArg(Arg);
+ }
+ }
+
+ NewParm->setHasInheritedDefaultArg(OldParm->hasInheritedDefaultArg());
+
+ if (OldParm->isParameterPack() && !NewParm->isParameterPack()) {
+ // Add the new parameter to the instantiated parameter pack.
+ CurrentInstantiationScope->InstantiatedLocalPackArg(OldParm, NewParm);
+ } else {
+ // Introduce an Old -> New mapping
+ CurrentInstantiationScope->InstantiatedLocal(OldParm, NewParm);
+ }
+
+ // FIXME: OldParm may come from a FunctionProtoType, in which case CurContext
+ // can be anything, is this right ?
+ NewParm->setDeclContext(CurContext);
+
+ NewParm->setScopeInfo(OldParm->getFunctionScopeDepth(),
+ OldParm->getFunctionScopeIndex() + indexAdjustment);
+
+ InstantiateAttrs(TemplateArgs, OldParm, NewParm);
+
+ return NewParm;
+}
+
+/// \brief Substitute the given template arguments into the given set of
+/// parameters, producing the set of parameter types that would be generated
+/// from such a substitution.
+bool Sema::SubstParmTypes(SourceLocation Loc,
+ ParmVarDecl **Params, unsigned NumParams,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ SmallVectorImpl<QualType> &ParamTypes,
+ SmallVectorImpl<ParmVarDecl *> *OutParams) {
+ assert(!ActiveTemplateInstantiations.empty() &&
+ "Cannot perform an instantiation without some context on the "
+ "instantiation stack");
+
+ TemplateInstantiator Instantiator(*this, TemplateArgs, Loc,
+ DeclarationName());
+ return Instantiator.TransformFunctionTypeParams(Loc, Params, NumParams,
+ nullptr, ParamTypes,
+ OutParams);
+}
+
+/// \brief Perform substitution on the base class specifiers of the
+/// given class template specialization.
+///
+/// Produces a diagnostic and returns true on error, returns false and
+/// attaches the instantiated base classes to the class template
+/// specialization if successful.
+bool
+Sema::SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
+ CXXRecordDecl *Pattern,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+ bool Invalid = false;
+ SmallVector<CXXBaseSpecifier*, 4> InstantiatedBases;
+ for (const auto &Base : Pattern->bases()) {
+ if (!Base.getType()->isDependentType()) {
+ if (const CXXRecordDecl *RD = Base.getType()->getAsCXXRecordDecl()) {
+ if (RD->isInvalidDecl())
+ Instantiation->setInvalidDecl();
+ }
+ InstantiatedBases.push_back(new (Context) CXXBaseSpecifier(Base));
+ continue;
+ }
+
+ SourceLocation EllipsisLoc;
+ TypeSourceInfo *BaseTypeLoc;
+ if (Base.isPackExpansion()) {
+ // This is a pack expansion. See whether we should expand it now, or
+ // wait until later.
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ collectUnexpandedParameterPacks(Base.getTypeSourceInfo()->getTypeLoc(),
+ Unexpanded);
+ bool ShouldExpand = false;
+ bool RetainExpansion = false;
+ Optional<unsigned> NumExpansions;
+ if (CheckParameterPacksForExpansion(Base.getEllipsisLoc(),
+ Base.getSourceRange(),
+ Unexpanded,
+ TemplateArgs, ShouldExpand,
+ RetainExpansion,
+ NumExpansions)) {
+ Invalid = true;
+ continue;
+ }
+
+ // If we should expand this pack expansion now, do so.
+ if (ShouldExpand) {
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(*this, I);
+
+ TypeSourceInfo *BaseTypeLoc = SubstType(Base.getTypeSourceInfo(),
+ TemplateArgs,
+ Base.getSourceRange().getBegin(),
+ DeclarationName());
+ if (!BaseTypeLoc) {
+ Invalid = true;
+ continue;
+ }
+
+ if (CXXBaseSpecifier *InstantiatedBase
+ = CheckBaseSpecifier(Instantiation,
+ Base.getSourceRange(),
+ Base.isVirtual(),
+ Base.getAccessSpecifierAsWritten(),
+ BaseTypeLoc,
+ SourceLocation()))
+ InstantiatedBases.push_back(InstantiatedBase);
+ else
+ Invalid = true;
+ }
+
+ continue;
+ }
+
+ // The resulting base specifier will (still) be a pack expansion.
+ EllipsisLoc = Base.getEllipsisLoc();
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(*this, -1);
+ BaseTypeLoc = SubstType(Base.getTypeSourceInfo(),
+ TemplateArgs,
+ Base.getSourceRange().getBegin(),
+ DeclarationName());
+ } else {
+ BaseTypeLoc = SubstType(Base.getTypeSourceInfo(),
+ TemplateArgs,
+ Base.getSourceRange().getBegin(),
+ DeclarationName());
+ }
+
+ if (!BaseTypeLoc) {
+ Invalid = true;
+ continue;
+ }
+
+ if (CXXBaseSpecifier *InstantiatedBase
+ = CheckBaseSpecifier(Instantiation,
+ Base.getSourceRange(),
+ Base.isVirtual(),
+ Base.getAccessSpecifierAsWritten(),
+ BaseTypeLoc,
+ EllipsisLoc))
+ InstantiatedBases.push_back(InstantiatedBase);
+ else
+ Invalid = true;
+ }
+
+ if (!Invalid && AttachBaseSpecifiers(Instantiation, InstantiatedBases))
+ Invalid = true;
+
+ return Invalid;
+}
+
+// Defined via #include from SemaTemplateInstantiateDecl.cpp
+namespace clang {
+ namespace sema {
+ Attr *instantiateTemplateAttribute(const Attr *At, ASTContext &C, Sema &S,
+ const MultiLevelTemplateArgumentList &TemplateArgs);
+ }
+}
+
+/// Determine whether we would be unable to instantiate this template (because
+/// it either has no definition, or is in the process of being instantiated).
+static bool DiagnoseUninstantiableTemplate(Sema &S,
+ SourceLocation PointOfInstantiation,
+ TagDecl *Instantiation,
+ bool InstantiatedFromMember,
+ TagDecl *Pattern,
+ TagDecl *PatternDef,
+ TemplateSpecializationKind TSK,
+ bool Complain = true) {
+ if (PatternDef && !PatternDef->isBeingDefined())
+ return false;
+
+ if (!Complain || (PatternDef && PatternDef->isInvalidDecl())) {
+ // Say nothing
+ } else if (PatternDef) {
+ assert(PatternDef->isBeingDefined());
+ S.Diag(PointOfInstantiation,
+ diag::err_template_instantiate_within_definition)
+ << (TSK != TSK_ImplicitInstantiation)
+ << S.Context.getTypeDeclType(Instantiation);
+ // Not much point in noting the template declaration here, since
+ // we're lexically inside it.
+ Instantiation->setInvalidDecl();
+ } else if (InstantiatedFromMember) {
+ S.Diag(PointOfInstantiation,
+ diag::err_implicit_instantiate_member_undefined)
+ << S.Context.getTypeDeclType(Instantiation);
+ S.Diag(Pattern->getLocation(), diag::note_member_declared_at);
+ } else {
+ S.Diag(PointOfInstantiation, diag::err_template_instantiate_undefined)
+ << (TSK != TSK_ImplicitInstantiation)
+ << S.Context.getTypeDeclType(Instantiation);
+ S.Diag(Pattern->getLocation(), diag::note_template_decl_here);
+ }
+
+ // In general, Instantiation isn't marked invalid to get more than one
+ // error for multiple undefined instantiations. But the code that does
+ // explicit declaration -> explicit definition conversion can't handle
+ // invalid declarations, so mark as invalid in that case.
+ if (TSK == TSK_ExplicitInstantiationDeclaration)
+ Instantiation->setInvalidDecl();
+ return true;
+}
+
+/// \brief Instantiate the definition of a class from a given pattern.
+///
+/// \param PointOfInstantiation The point of instantiation within the
+/// source code.
+///
+/// \param Instantiation is the declaration whose definition is being
+/// instantiated. This will be either a class template specialization
+/// or a member class of a class template specialization.
+///
+/// \param Pattern is the pattern from which the instantiation
+/// occurs. This will be either the declaration of a class template or
+/// the declaration of a member class of a class template.
+///
+/// \param TemplateArgs The template arguments to be substituted into
+/// the pattern.
+///
+/// \param TSK the kind of implicit or explicit instantiation to perform.
+///
+/// \param Complain whether to complain if the class cannot be instantiated due
+/// to the lack of a definition.
+///
+/// \returns true if an error occurred, false otherwise.
+bool
+Sema::InstantiateClass(SourceLocation PointOfInstantiation,
+ CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ TemplateSpecializationKind TSK,
+ bool Complain) {
+ CXXRecordDecl *PatternDef
+ = cast_or_null<CXXRecordDecl>(Pattern->getDefinition());
+ if (DiagnoseUninstantiableTemplate(*this, PointOfInstantiation, Instantiation,
+ Instantiation->getInstantiatedFromMemberClass(),
+ Pattern, PatternDef, TSK, Complain))
+ return true;
+ Pattern = PatternDef;
+
+ // \brief Record the point of instantiation.
+ if (MemberSpecializationInfo *MSInfo
+ = Instantiation->getMemberSpecializationInfo()) {
+ MSInfo->setTemplateSpecializationKind(TSK);
+ MSInfo->setPointOfInstantiation(PointOfInstantiation);
+ } else if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(Instantiation)) {
+ Spec->setTemplateSpecializationKind(TSK);
+ Spec->setPointOfInstantiation(PointOfInstantiation);
+ }
+
+ InstantiatingTemplate Inst(*this, PointOfInstantiation, Instantiation);
+ if (Inst.isInvalid())
+ return true;
+
+ // Enter the scope of this instantiation. We don't use
+ // PushDeclContext because we don't have a scope.
+ ContextRAII SavedContext(*this, Instantiation);
+ EnterExpressionEvaluationContext EvalContext(*this,
+ Sema::PotentiallyEvaluated);
+
+ // If this is an instantiation of a local class, merge this local
+ // instantiation scope with the enclosing scope. Otherwise, every
+ // instantiation of a class has its own local instantiation scope.
+ bool MergeWithParentScope = !Instantiation->isDefinedOutsideFunctionOrMethod();
+ LocalInstantiationScope Scope(*this, MergeWithParentScope);
+
+ // Pull attributes from the pattern onto the instantiation.
+ InstantiateAttrs(TemplateArgs, Pattern, Instantiation);
+
+ // Start the definition of this instantiation.
+ Instantiation->startDefinition();
+
+ // The instantiation is visible here, even if it was first declared in an
+ // unimported module.
+ Instantiation->setHidden(false);
+
+ // FIXME: This loses the as-written tag kind for an explicit instantiation.
+ Instantiation->setTagKind(Pattern->getTagKind());
+
+ // Do substitution on the base class specifiers.
+ if (SubstBaseSpecifiers(Instantiation, Pattern, TemplateArgs))
+ Instantiation->setInvalidDecl();
+
+ TemplateDeclInstantiator Instantiator(*this, Instantiation, TemplateArgs);
+ SmallVector<Decl*, 4> Fields;
+ // Delay instantiation of late parsed attributes.
+ LateInstantiatedAttrVec LateAttrs;
+ Instantiator.enableLateAttributeInstantiation(&LateAttrs);
+
+ for (auto *Member : Pattern->decls()) {
+ // Don't instantiate members not belonging in this semantic context.
+ // e.g. for:
+ // @code
+ // template <int i> class A {
+ // class B *g;
+ // };
+ // @endcode
+ // 'class B' has the template as lexical context but semantically it is
+ // introduced in namespace scope.
+ if (Member->getDeclContext() != Pattern)
+ continue;
+
+ if (Member->isInvalidDecl()) {
+ Instantiation->setInvalidDecl();
+ continue;
+ }
+
+ Decl *NewMember = Instantiator.Visit(Member);
+ if (NewMember) {
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(NewMember)) {
+ Fields.push_back(Field);
+ } else if (EnumDecl *Enum = dyn_cast<EnumDecl>(NewMember)) {
+ // C++11 [temp.inst]p1: The implicit instantiation of a class template
+ // specialization causes the implicit instantiation of the definitions
+ // of unscoped member enumerations.
+ // Record a point of instantiation for this implicit instantiation.
+ if (TSK == TSK_ImplicitInstantiation && !Enum->isScoped() &&
+ Enum->isCompleteDefinition()) {
+ MemberSpecializationInfo *MSInfo =Enum->getMemberSpecializationInfo();
+ assert(MSInfo && "no spec info for member enum specialization");
+ MSInfo->setTemplateSpecializationKind(TSK_ImplicitInstantiation);
+ MSInfo->setPointOfInstantiation(PointOfInstantiation);
+ }
+ } else if (StaticAssertDecl *SA = dyn_cast<StaticAssertDecl>(NewMember)) {
+ if (SA->isFailed()) {
+ // A static_assert failed. Bail out; instantiating this
+ // class is probably not meaningful.
+ Instantiation->setInvalidDecl();
+ break;
+ }
+ }
+
+ if (NewMember->isInvalidDecl())
+ Instantiation->setInvalidDecl();
+ } else {
+ // FIXME: Eventually, a NULL return will mean that one of the
+ // instantiations was a semantic disaster, and we'll want to mark the
+ // declaration invalid.
+ // For now, we expect to skip some members that we can't yet handle.
+ }
+ }
+
+ // Finish checking fields.
+ ActOnFields(nullptr, Instantiation->getLocation(), Instantiation, Fields,
+ SourceLocation(), SourceLocation(), nullptr);
+ CheckCompletedCXXClass(Instantiation);
+
+ // Default arguments are parsed, if not instantiated. We can go instantiate
+ // default arg exprs for default constructors if necessary now.
+ ActOnFinishCXXNonNestedClass(Instantiation);
+
+ // Instantiate late parsed attributes, and attach them to their decls.
+ // See Sema::InstantiateAttrs
+ for (LateInstantiatedAttrVec::iterator I = LateAttrs.begin(),
+ E = LateAttrs.end(); I != E; ++I) {
+ assert(CurrentInstantiationScope == Instantiator.getStartingScope());
+ CurrentInstantiationScope = I->Scope;
+
+ // Allow 'this' within late-parsed attributes.
+ NamedDecl *ND = dyn_cast<NamedDecl>(I->NewDecl);
+ CXXRecordDecl *ThisContext =
+ dyn_cast_or_null<CXXRecordDecl>(ND->getDeclContext());
+ CXXThisScopeRAII ThisScope(*this, ThisContext, /*TypeQuals*/0,
+ ND && ND->isCXXInstanceMember());
+
+ Attr *NewAttr =
+ instantiateTemplateAttribute(I->TmplAttr, Context, *this, TemplateArgs);
+ I->NewDecl->addAttr(NewAttr);
+ LocalInstantiationScope::deleteScopes(I->Scope,
+ Instantiator.getStartingScope());
+ }
+ Instantiator.disableLateAttributeInstantiation();
+ LateAttrs.clear();
+
+ ActOnFinishDelayedMemberInitializers(Instantiation);
+
+ // FIXME: We should do something similar for explicit instantiations so they
+ // end up in the right module.
+ if (TSK == TSK_ImplicitInstantiation) {
+ Instantiation->setLocation(Pattern->getLocation());
+ Instantiation->setLocStart(Pattern->getInnerLocStart());
+ Instantiation->setRBraceLoc(Pattern->getRBraceLoc());
+ }
+
+ if (!Instantiation->isInvalidDecl()) {
+ // Perform any dependent diagnostics from the pattern.
+ PerformDependentDiagnostics(Pattern, TemplateArgs);
+
+ // Instantiate any out-of-line class template partial
+ // specializations now.
+ for (TemplateDeclInstantiator::delayed_partial_spec_iterator
+ P = Instantiator.delayed_partial_spec_begin(),
+ PEnd = Instantiator.delayed_partial_spec_end();
+ P != PEnd; ++P) {
+ if (!Instantiator.InstantiateClassTemplatePartialSpecialization(
+ P->first, P->second)) {
+ Instantiation->setInvalidDecl();
+ break;
+ }
+ }
+
+ // Instantiate any out-of-line variable template partial
+ // specializations now.
+ for (TemplateDeclInstantiator::delayed_var_partial_spec_iterator
+ P = Instantiator.delayed_var_partial_spec_begin(),
+ PEnd = Instantiator.delayed_var_partial_spec_end();
+ P != PEnd; ++P) {
+ if (!Instantiator.InstantiateVarTemplatePartialSpecialization(
+ P->first, P->second)) {
+ Instantiation->setInvalidDecl();
+ break;
+ }
+ }
+ }
+
+ // Exit the scope of this instantiation.
+ SavedContext.pop();
+
+ if (!Instantiation->isInvalidDecl()) {
+ Consumer.HandleTagDeclDefinition(Instantiation);
+
+ // Always emit the vtable for an explicit instantiation definition
+ // of a polymorphic class template specialization.
+ if (TSK == TSK_ExplicitInstantiationDefinition)
+ MarkVTableUsed(PointOfInstantiation, Instantiation, true);
+ }
+
+ return Instantiation->isInvalidDecl();
+}
+
+/// \brief Instantiate the definition of an enum from a given pattern.
+///
+/// \param PointOfInstantiation The point of instantiation within the
+/// source code.
+/// \param Instantiation is the declaration whose definition is being
+/// instantiated. This will be a member enumeration of a class
+/// temploid specialization, or a local enumeration within a
+/// function temploid specialization.
+/// \param Pattern The templated declaration from which the instantiation
+/// occurs.
+/// \param TemplateArgs The template arguments to be substituted into
+/// the pattern.
+/// \param TSK The kind of implicit or explicit instantiation to perform.
+///
+/// \return \c true if an error occurred, \c false otherwise.
+bool Sema::InstantiateEnum(SourceLocation PointOfInstantiation,
+ EnumDecl *Instantiation, EnumDecl *Pattern,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ TemplateSpecializationKind TSK) {
+ EnumDecl *PatternDef = Pattern->getDefinition();
+ if (DiagnoseUninstantiableTemplate(*this, PointOfInstantiation, Instantiation,
+ Instantiation->getInstantiatedFromMemberEnum(),
+ Pattern, PatternDef, TSK,/*Complain*/true))
+ return true;
+ Pattern = PatternDef;
+
+ // Record the point of instantiation.
+ if (MemberSpecializationInfo *MSInfo
+ = Instantiation->getMemberSpecializationInfo()) {
+ MSInfo->setTemplateSpecializationKind(TSK);
+ MSInfo->setPointOfInstantiation(PointOfInstantiation);
+ }
+
+ InstantiatingTemplate Inst(*this, PointOfInstantiation, Instantiation);
+ if (Inst.isInvalid())
+ return true;
+
+ // The instantiation is visible here, even if it was first declared in an
+ // unimported module.
+ Instantiation->setHidden(false);
+
+ // Enter the scope of this instantiation. We don't use
+ // PushDeclContext because we don't have a scope.
+ ContextRAII SavedContext(*this, Instantiation);
+ EnterExpressionEvaluationContext EvalContext(*this,
+ Sema::PotentiallyEvaluated);
+
+ LocalInstantiationScope Scope(*this, /*MergeWithParentScope*/true);
+
+ // Pull attributes from the pattern onto the instantiation.
+ InstantiateAttrs(TemplateArgs, Pattern, Instantiation);
+
+ TemplateDeclInstantiator Instantiator(*this, Instantiation, TemplateArgs);
+ Instantiator.InstantiateEnumDefinition(Instantiation, Pattern);
+
+ // Exit the scope of this instantiation.
+ SavedContext.pop();
+
+ return Instantiation->isInvalidDecl();
+}
+
+
+/// \brief Instantiate the definition of a field from the given pattern.
+///
+/// \param PointOfInstantiation The point of instantiation within the
+/// source code.
+/// \param Instantiation is the declaration whose definition is being
+/// instantiated. This will be a class of a class temploid
+/// specialization, or a local enumeration within a function temploid
+/// specialization.
+/// \param Pattern The templated declaration from which the instantiation
+/// occurs.
+/// \param TemplateArgs The template arguments to be substituted into
+/// the pattern.
+///
+/// \return \c true if an error occurred, \c false otherwise.
+bool Sema::InstantiateInClassInitializer(
+ SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
+ FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs) {
+ // If there is no initializer, we don't need to do anything.
+ if (!Pattern->hasInClassInitializer())
+ return false;
+
+ assert(Instantiation->getInClassInitStyle() ==
+ Pattern->getInClassInitStyle() &&
+ "pattern and instantiation disagree about init style");
+
+ // Error out if we haven't parsed the initializer of the pattern yet because
+ // we are waiting for the closing brace of the outer class.
+ Expr *OldInit = Pattern->getInClassInitializer();
+ if (!OldInit) {
+ RecordDecl *PatternRD = Pattern->getParent();
+ RecordDecl *OutermostClass = PatternRD->getOuterLexicalRecordContext();
+ if (OutermostClass == PatternRD) {
+ Diag(Pattern->getLocEnd(), diag::err_in_class_initializer_not_yet_parsed)
+ << PatternRD << Pattern;
+ } else {
+ Diag(Pattern->getLocEnd(),
+ diag::err_in_class_initializer_not_yet_parsed_outer_class)
+ << PatternRD << OutermostClass << Pattern;
+ }
+ Instantiation->setInvalidDecl();
+ return true;
+ }
+
+ InstantiatingTemplate Inst(*this, PointOfInstantiation, Instantiation);
+ if (Inst.isInvalid())
+ return true;
+
+ // Enter the scope of this instantiation. We don't use PushDeclContext because
+ // we don't have a scope.
+ ContextRAII SavedContext(*this, Instantiation->getParent());
+ EnterExpressionEvaluationContext EvalContext(*this,
+ Sema::PotentiallyEvaluated);
+
+ LocalInstantiationScope Scope(*this, true);
+
+ // Instantiate the initializer.
+ ActOnStartCXXInClassMemberInitializer();
+ CXXThisScopeRAII ThisScope(*this, Instantiation->getParent(), /*TypeQuals=*/0);
+
+ ExprResult NewInit = SubstInitializer(OldInit, TemplateArgs,
+ /*CXXDirectInit=*/false);
+ Expr *Init = NewInit.get();
+ assert((!Init || !isa<ParenListExpr>(Init)) && "call-style init in class");
+ ActOnFinishCXXInClassMemberInitializer(
+ Instantiation, Init ? Init->getLocStart() : SourceLocation(), Init);
+
+ // Exit the scope of this instantiation.
+ SavedContext.pop();
+
+ // Return true if the in-class initializer is still missing.
+ return !Instantiation->getInClassInitializer();
+}
+
+namespace {
+ /// \brief A partial specialization whose template arguments have matched
+ /// a given template-id.
+ struct PartialSpecMatchResult {
+ ClassTemplatePartialSpecializationDecl *Partial;
+ TemplateArgumentList *Args;
+ };
+}
+
+bool Sema::InstantiateClassTemplateSpecialization(
+ SourceLocation PointOfInstantiation,
+ ClassTemplateSpecializationDecl *ClassTemplateSpec,
+ TemplateSpecializationKind TSK, bool Complain) {
+ // Perform the actual instantiation on the canonical declaration.
+ ClassTemplateSpec = cast<ClassTemplateSpecializationDecl>(
+ ClassTemplateSpec->getCanonicalDecl());
+ if (ClassTemplateSpec->isInvalidDecl())
+ return true;
+
+ ClassTemplateDecl *Template = ClassTemplateSpec->getSpecializedTemplate();
+ CXXRecordDecl *Pattern = nullptr;
+
+ // C++ [temp.class.spec.match]p1:
+ // When a class template is used in a context that requires an
+ // instantiation of the class, it is necessary to determine
+ // whether the instantiation is to be generated using the primary
+ // template or one of the partial specializations. This is done by
+ // matching the template arguments of the class template
+ // specialization with the template argument lists of the partial
+ // specializations.
+ typedef PartialSpecMatchResult MatchResult;
+ SmallVector<MatchResult, 4> Matched;
+ SmallVector<ClassTemplatePartialSpecializationDecl *, 4> PartialSpecs;
+ Template->getPartialSpecializations(PartialSpecs);
+ TemplateSpecCandidateSet FailedCandidates(PointOfInstantiation);
+ for (unsigned I = 0, N = PartialSpecs.size(); I != N; ++I) {
+ ClassTemplatePartialSpecializationDecl *Partial = PartialSpecs[I];
+ TemplateDeductionInfo Info(FailedCandidates.getLocation());
+ if (TemplateDeductionResult Result
+ = DeduceTemplateArguments(Partial,
+ ClassTemplateSpec->getTemplateArgs(),
+ Info)) {
+ // Store the failed-deduction information for use in diagnostics, later.
+ // TODO: Actually use the failed-deduction info?
+ FailedCandidates.addCandidate()
+ .set(Partial, MakeDeductionFailureInfo(Context, Result, Info));
+ (void)Result;
+ } else {
+ Matched.push_back(PartialSpecMatchResult());
+ Matched.back().Partial = Partial;
+ Matched.back().Args = Info.take();
+ }
+ }
+
+ // If we're dealing with a member template where the template parameters
+ // have been instantiated, this provides the original template parameters
+ // from which the member template's parameters were instantiated.
+
+ if (Matched.size() >= 1) {
+ SmallVectorImpl<MatchResult>::iterator Best = Matched.begin();
+ if (Matched.size() == 1) {
+ // -- If exactly one matching specialization is found, the
+ // instantiation is generated from that specialization.
+ // We don't need to do anything for this.
+ } else {
+ // -- If more than one matching specialization is found, the
+ // partial order rules (14.5.4.2) are used to determine
+ // whether one of the specializations is more specialized
+ // than the others. If none of the specializations is more
+ // specialized than all of the other matching
+ // specializations, then the use of the class template is
+ // ambiguous and the program is ill-formed.
+ for (SmallVectorImpl<MatchResult>::iterator P = Best + 1,
+ PEnd = Matched.end();
+ P != PEnd; ++P) {
+ if (getMoreSpecializedPartialSpecialization(P->Partial, Best->Partial,
+ PointOfInstantiation)
+ == P->Partial)
+ Best = P;
+ }
+
+ // Determine if the best partial specialization is more specialized than
+ // the others.
+ bool Ambiguous = false;
+ for (SmallVectorImpl<MatchResult>::iterator P = Matched.begin(),
+ PEnd = Matched.end();
+ P != PEnd; ++P) {
+ if (P != Best &&
+ getMoreSpecializedPartialSpecialization(P->Partial, Best->Partial,
+ PointOfInstantiation)
+ != Best->Partial) {
+ Ambiguous = true;
+ break;
+ }
+ }
+
+ if (Ambiguous) {
+ // Partial ordering did not produce a clear winner. Complain.
+ ClassTemplateSpec->setInvalidDecl();
+ Diag(PointOfInstantiation, diag::err_partial_spec_ordering_ambiguous)
+ << ClassTemplateSpec;
+
+ // Print the matching partial specializations.
+ for (SmallVectorImpl<MatchResult>::iterator P = Matched.begin(),
+ PEnd = Matched.end();
+ P != PEnd; ++P)
+ Diag(P->Partial->getLocation(), diag::note_partial_spec_match)
+ << getTemplateArgumentBindingsText(
+ P->Partial->getTemplateParameters(),
+ *P->Args);
+
+ return true;
+ }
+ }
+
+ // Instantiate using the best class template partial specialization.
+ ClassTemplatePartialSpecializationDecl *OrigPartialSpec = Best->Partial;
+ while (OrigPartialSpec->getInstantiatedFromMember()) {
+ // If we've found an explicit specialization of this class template,
+ // stop here and use that as the pattern.
+ if (OrigPartialSpec->isMemberSpecialization())
+ break;
+
+ OrigPartialSpec = OrigPartialSpec->getInstantiatedFromMember();
+ }
+
+ Pattern = OrigPartialSpec;
+ ClassTemplateSpec->setInstantiationOf(Best->Partial, Best->Args);
+ } else {
+ // -- If no matches are found, the instantiation is generated
+ // from the primary template.
+ ClassTemplateDecl *OrigTemplate = Template;
+ while (OrigTemplate->getInstantiatedFromMemberTemplate()) {
+ // If we've found an explicit specialization of this class template,
+ // stop here and use that as the pattern.
+ if (OrigTemplate->isMemberSpecialization())
+ break;
+
+ OrigTemplate = OrigTemplate->getInstantiatedFromMemberTemplate();
+ }
+
+ Pattern = OrigTemplate->getTemplatedDecl();
+ }
+
+ bool Result = InstantiateClass(PointOfInstantiation, ClassTemplateSpec,
+ Pattern,
+ getTemplateInstantiationArgs(ClassTemplateSpec),
+ TSK,
+ Complain);
+
+ return Result;
+}
+
+/// \brief Instantiates the definitions of all of the member
+/// of the given class, which is an instantiation of a class template
+/// or a member class of a template.
+void
+Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation,
+ CXXRecordDecl *Instantiation,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ TemplateSpecializationKind TSK) {
+ // FIXME: We need to notify the ASTMutationListener that we did all of these
+ // things, in case we have an explicit instantiation definition in a PCM, a
+ // module, or preamble, and the declaration is in an imported AST.
+ assert(
+ (TSK == TSK_ExplicitInstantiationDefinition ||
+ TSK == TSK_ExplicitInstantiationDeclaration ||
+ (TSK == TSK_ImplicitInstantiation && Instantiation->isLocalClass())) &&
+ "Unexpected template specialization kind!");
+ for (auto *D : Instantiation->decls()) {
+ bool SuppressNew = false;
+ if (auto *Function = dyn_cast<FunctionDecl>(D)) {
+ if (FunctionDecl *Pattern
+ = Function->getInstantiatedFromMemberFunction()) {
+ MemberSpecializationInfo *MSInfo
+ = Function->getMemberSpecializationInfo();
+ assert(MSInfo && "No member specialization information?");
+ if (MSInfo->getTemplateSpecializationKind()
+ == TSK_ExplicitSpecialization)
+ continue;
+
+ if (CheckSpecializationInstantiationRedecl(PointOfInstantiation, TSK,
+ Function,
+ MSInfo->getTemplateSpecializationKind(),
+ MSInfo->getPointOfInstantiation(),
+ SuppressNew) ||
+ SuppressNew)
+ continue;
+
+ // C++11 [temp.explicit]p8:
+ // An explicit instantiation definition that names a class template
+ // specialization explicitly instantiates the class template
+ // specialization and is only an explicit instantiation definition
+ // of members whose definition is visible at the point of
+ // instantiation.
+ if (TSK == TSK_ExplicitInstantiationDefinition && !Pattern->isDefined())
+ continue;
+
+ Function->setTemplateSpecializationKind(TSK, PointOfInstantiation);
+
+ if (Function->isDefined()) {
+ // Let the ASTConsumer know that this function has been explicitly
+ // instantiated now, and its linkage might have changed.
+ Consumer.HandleTopLevelDecl(DeclGroupRef(Function));
+ } else if (TSK == TSK_ExplicitInstantiationDefinition) {
+ InstantiateFunctionDefinition(PointOfInstantiation, Function);
+ } else if (TSK == TSK_ImplicitInstantiation) {
+ PendingLocalImplicitInstantiations.push_back(
+ std::make_pair(Function, PointOfInstantiation));
+ }
+ }
+ } else if (auto *Var = dyn_cast<VarDecl>(D)) {
+ if (isa<VarTemplateSpecializationDecl>(Var))
+ continue;
+
+ if (Var->isStaticDataMember()) {
+ MemberSpecializationInfo *MSInfo = Var->getMemberSpecializationInfo();
+ assert(MSInfo && "No member specialization information?");
+ if (MSInfo->getTemplateSpecializationKind()
+ == TSK_ExplicitSpecialization)
+ continue;
+
+ if (CheckSpecializationInstantiationRedecl(PointOfInstantiation, TSK,
+ Var,
+ MSInfo->getTemplateSpecializationKind(),
+ MSInfo->getPointOfInstantiation(),
+ SuppressNew) ||
+ SuppressNew)
+ continue;
+
+ if (TSK == TSK_ExplicitInstantiationDefinition) {
+ // C++0x [temp.explicit]p8:
+ // An explicit instantiation definition that names a class template
+ // specialization explicitly instantiates the class template
+ // specialization and is only an explicit instantiation definition
+ // of members whose definition is visible at the point of
+ // instantiation.
+ if (!Var->getInstantiatedFromStaticDataMember()
+ ->getOutOfLineDefinition())
+ continue;
+
+ Var->setTemplateSpecializationKind(TSK, PointOfInstantiation);
+ InstantiateStaticDataMemberDefinition(PointOfInstantiation, Var);
+ } else {
+ Var->setTemplateSpecializationKind(TSK, PointOfInstantiation);
+ }
+ }
+ } else if (auto *Record = dyn_cast<CXXRecordDecl>(D)) {
+ // Always skip the injected-class-name, along with any
+ // redeclarations of nested classes, since both would cause us
+ // to try to instantiate the members of a class twice.
+ // Skip closure types; they'll get instantiated when we instantiate
+ // the corresponding lambda-expression.
+ if (Record->isInjectedClassName() || Record->getPreviousDecl() ||
+ Record->isLambda())
+ continue;
+
+ MemberSpecializationInfo *MSInfo = Record->getMemberSpecializationInfo();
+ assert(MSInfo && "No member specialization information?");
+
+ if (MSInfo->getTemplateSpecializationKind()
+ == TSK_ExplicitSpecialization)
+ continue;
+
+ if (CheckSpecializationInstantiationRedecl(PointOfInstantiation, TSK,
+ Record,
+ MSInfo->getTemplateSpecializationKind(),
+ MSInfo->getPointOfInstantiation(),
+ SuppressNew) ||
+ SuppressNew)
+ continue;
+
+ CXXRecordDecl *Pattern = Record->getInstantiatedFromMemberClass();
+ assert(Pattern && "Missing instantiated-from-template information");
+
+ if (!Record->getDefinition()) {
+ if (!Pattern->getDefinition()) {
+ // C++0x [temp.explicit]p8:
+ // An explicit instantiation definition that names a class template
+ // specialization explicitly instantiates the class template
+ // specialization and is only an explicit instantiation definition
+ // of members whose definition is visible at the point of
+ // instantiation.
+ if (TSK == TSK_ExplicitInstantiationDeclaration) {
+ MSInfo->setTemplateSpecializationKind(TSK);
+ MSInfo->setPointOfInstantiation(PointOfInstantiation);
+ }
+
+ continue;
+ }
+
+ InstantiateClass(PointOfInstantiation, Record, Pattern,
+ TemplateArgs,
+ TSK);
+ } else {
+ if (TSK == TSK_ExplicitInstantiationDefinition &&
+ Record->getTemplateSpecializationKind() ==
+ TSK_ExplicitInstantiationDeclaration) {
+ Record->setTemplateSpecializationKind(TSK);
+ MarkVTableUsed(PointOfInstantiation, Record, true);
+ }
+ }
+
+ Pattern = cast_or_null<CXXRecordDecl>(Record->getDefinition());
+ if (Pattern)
+ InstantiateClassMembers(PointOfInstantiation, Pattern, TemplateArgs,
+ TSK);
+ } else if (auto *Enum = dyn_cast<EnumDecl>(D)) {
+ MemberSpecializationInfo *MSInfo = Enum->getMemberSpecializationInfo();
+ assert(MSInfo && "No member specialization information?");
+
+ if (MSInfo->getTemplateSpecializationKind()
+ == TSK_ExplicitSpecialization)
+ continue;
+
+ if (CheckSpecializationInstantiationRedecl(
+ PointOfInstantiation, TSK, Enum,
+ MSInfo->getTemplateSpecializationKind(),
+ MSInfo->getPointOfInstantiation(), SuppressNew) ||
+ SuppressNew)
+ continue;
+
+ if (Enum->getDefinition())
+ continue;
+
+ EnumDecl *Pattern = Enum->getInstantiatedFromMemberEnum();
+ assert(Pattern && "Missing instantiated-from-template information");
+
+ if (TSK == TSK_ExplicitInstantiationDefinition) {
+ if (!Pattern->getDefinition())
+ continue;
+
+ InstantiateEnum(PointOfInstantiation, Enum, Pattern, TemplateArgs, TSK);
+ } else {
+ MSInfo->setTemplateSpecializationKind(TSK);
+ MSInfo->setPointOfInstantiation(PointOfInstantiation);
+ }
+ } else if (auto *Field = dyn_cast<FieldDecl>(D)) {
+ // No need to instantiate in-class initializers during explicit
+ // instantiation.
+ if (Field->hasInClassInitializer() && TSK == TSK_ImplicitInstantiation) {
+ CXXRecordDecl *ClassPattern =
+ Instantiation->getTemplateInstantiationPattern();
+ DeclContext::lookup_result Lookup =
+ ClassPattern->lookup(Field->getDeclName());
+ assert(Lookup.size() == 1);
+ FieldDecl *Pattern = cast<FieldDecl>(Lookup[0]);
+ InstantiateInClassInitializer(PointOfInstantiation, Field, Pattern,
+ TemplateArgs);
+ }
+ }
+ }
+}
+
+/// \brief Instantiate the definitions of all of the members of the
+/// given class template specialization, which was named as part of an
+/// explicit instantiation.
+void
+Sema::InstantiateClassTemplateSpecializationMembers(
+ SourceLocation PointOfInstantiation,
+ ClassTemplateSpecializationDecl *ClassTemplateSpec,
+ TemplateSpecializationKind TSK) {
+ // C++0x [temp.explicit]p7:
+ // An explicit instantiation that names a class template
+ // specialization is an explicit instantion of the same kind
+ // (declaration or definition) of each of its members (not
+ // including members inherited from base classes) that has not
+ // been previously explicitly specialized in the translation unit
+ // containing the explicit instantiation, except as described
+ // below.
+ InstantiateClassMembers(PointOfInstantiation, ClassTemplateSpec,
+ getTemplateInstantiationArgs(ClassTemplateSpec),
+ TSK);
+}
+
+StmtResult
+Sema::SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs) {
+ if (!S)
+ return S;
+
+ TemplateInstantiator Instantiator(*this, TemplateArgs,
+ SourceLocation(),
+ DeclarationName());
+ return Instantiator.TransformStmt(S);
+}
+
+ExprResult
+Sema::SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs) {
+ if (!E)
+ return E;
+
+ TemplateInstantiator Instantiator(*this, TemplateArgs,
+ SourceLocation(),
+ DeclarationName());
+ return Instantiator.TransformExpr(E);
+}
+
+ExprResult Sema::SubstInitializer(Expr *Init,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ bool CXXDirectInit) {
+ TemplateInstantiator Instantiator(*this, TemplateArgs,
+ SourceLocation(),
+ DeclarationName());
+ return Instantiator.TransformInitializer(Init, CXXDirectInit);
+}
+
+bool Sema::SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ SmallVectorImpl<Expr *> &Outputs) {
+ if (Exprs.empty())
+ return false;
+
+ TemplateInstantiator Instantiator(*this, TemplateArgs,
+ SourceLocation(),
+ DeclarationName());
+ return Instantiator.TransformExprs(Exprs.data(), Exprs.size(),
+ IsCall, Outputs);
+}
+
+NestedNameSpecifierLoc
+Sema::SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+ if (!NNS)
+ return NestedNameSpecifierLoc();
+
+ TemplateInstantiator Instantiator(*this, TemplateArgs, NNS.getBeginLoc(),
+ DeclarationName());
+ return Instantiator.TransformNestedNameSpecifierLoc(NNS);
+}
+
+/// \brief Do template substitution on declaration name info.
+DeclarationNameInfo
+Sema::SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+ TemplateInstantiator Instantiator(*this, TemplateArgs, NameInfo.getLoc(),
+ NameInfo.getName());
+ return Instantiator.TransformDeclarationNameInfo(NameInfo);
+}
+
+TemplateName
+Sema::SubstTemplateName(NestedNameSpecifierLoc QualifierLoc,
+ TemplateName Name, SourceLocation Loc,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+ TemplateInstantiator Instantiator(*this, TemplateArgs, Loc,
+ DeclarationName());
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+ return Instantiator.TransformTemplateName(SS, Name, Loc);
+}
+
+bool Sema::Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
+ TemplateArgumentListInfo &Result,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+ TemplateInstantiator Instantiator(*this, TemplateArgs, SourceLocation(),
+ DeclarationName());
+
+ return Instantiator.TransformTemplateArguments(Args, NumArgs, Result);
+}
+
+static const Decl *getCanonicalParmVarDecl(const Decl *D) {
+ // When storing ParmVarDecls in the local instantiation scope, we always
+ // want to use the ParmVarDecl from the canonical function declaration,
+ // since the map is then valid for any redeclaration or definition of that
+ // function.
+ if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(D)) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) {
+ unsigned i = PV->getFunctionScopeIndex();
+ // This parameter might be from a freestanding function type within the
+ // function and isn't necessarily referring to one of FD's parameters.
+ if (FD->getParamDecl(i) == PV)
+ return FD->getCanonicalDecl()->getParamDecl(i);
+ }
+ }
+ return D;
+}
+
+
+llvm::PointerUnion<Decl *, LocalInstantiationScope::DeclArgumentPack *> *
+LocalInstantiationScope::findInstantiationOf(const Decl *D) {
+ D = getCanonicalParmVarDecl(D);
+ for (LocalInstantiationScope *Current = this; Current;
+ Current = Current->Outer) {
+
+ // Check if we found something within this scope.
+ const Decl *CheckD = D;
+ do {
+ LocalDeclsMap::iterator Found = Current->LocalDecls.find(CheckD);
+ if (Found != Current->LocalDecls.end())
+ return &Found->second;
+
+ // If this is a tag declaration, it's possible that we need to look for
+ // a previous declaration.
+ if (const TagDecl *Tag = dyn_cast<TagDecl>(CheckD))
+ CheckD = Tag->getPreviousDecl();
+ else
+ CheckD = nullptr;
+ } while (CheckD);
+
+ // If we aren't combined with our outer scope, we're done.
+ if (!Current->CombineWithOuterScope)
+ break;
+ }
+
+ // If we're performing a partial substitution during template argument
+ // deduction, we may not have values for template parameters yet.
+ if (isa<NonTypeTemplateParmDecl>(D) || isa<TemplateTypeParmDecl>(D) ||
+ isa<TemplateTemplateParmDecl>(D))
+ return nullptr;
+
+ // Local types referenced prior to definition may require instantiation.
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D))
+ if (RD->isLocalClass())
+ return nullptr;
+
+ // Enumeration types referenced prior to definition may appear as a result of
+ // error recovery.
+ if (isa<EnumDecl>(D))
+ return nullptr;
+
+ // If we didn't find the decl, then we either have a sema bug, or we have a
+ // forward reference to a label declaration. Return null to indicate that
+ // we have an uninstantiated label.
+ assert(isa<LabelDecl>(D) && "declaration not instantiated in this scope");
+ return nullptr;
+}
+
+void LocalInstantiationScope::InstantiatedLocal(const Decl *D, Decl *Inst) {
+ D = getCanonicalParmVarDecl(D);
+ llvm::PointerUnion<Decl *, DeclArgumentPack *> &Stored = LocalDecls[D];
+ if (Stored.isNull()) {
+#ifndef NDEBUG
+ // It should not be present in any surrounding scope either.
+ LocalInstantiationScope *Current = this;
+ while (Current->CombineWithOuterScope && Current->Outer) {
+ Current = Current->Outer;
+ assert(Current->LocalDecls.find(D) == Current->LocalDecls.end() &&
+ "Instantiated local in inner and outer scopes");
+ }
+#endif
+ Stored = Inst;
+ } else if (DeclArgumentPack *Pack = Stored.dyn_cast<DeclArgumentPack *>()) {
+ Pack->push_back(cast<ParmVarDecl>(Inst));
+ } else {
+ assert(Stored.get<Decl *>() == Inst && "Already instantiated this local");
+ }
+}
+
+void LocalInstantiationScope::InstantiatedLocalPackArg(const Decl *D,
+ ParmVarDecl *Inst) {
+ D = getCanonicalParmVarDecl(D);
+ DeclArgumentPack *Pack = LocalDecls[D].get<DeclArgumentPack *>();
+ Pack->push_back(Inst);
+}
+
+void LocalInstantiationScope::MakeInstantiatedLocalArgPack(const Decl *D) {
+#ifndef NDEBUG
+ // This should be the first time we've been told about this decl.
+ for (LocalInstantiationScope *Current = this;
+ Current && Current->CombineWithOuterScope; Current = Current->Outer)
+ assert(Current->LocalDecls.find(D) == Current->LocalDecls.end() &&
+ "Creating local pack after instantiation of local");
+#endif
+
+ D = getCanonicalParmVarDecl(D);
+ llvm::PointerUnion<Decl *, DeclArgumentPack *> &Stored = LocalDecls[D];
+ DeclArgumentPack *Pack = new DeclArgumentPack;
+ Stored = Pack;
+ ArgumentPacks.push_back(Pack);
+}
+
+void LocalInstantiationScope::SetPartiallySubstitutedPack(NamedDecl *Pack,
+ const TemplateArgument *ExplicitArgs,
+ unsigned NumExplicitArgs) {
+ assert((!PartiallySubstitutedPack || PartiallySubstitutedPack == Pack) &&
+ "Already have a partially-substituted pack");
+ assert((!PartiallySubstitutedPack
+ || NumArgsInPartiallySubstitutedPack == NumExplicitArgs) &&
+ "Wrong number of arguments in partially-substituted pack");
+ PartiallySubstitutedPack = Pack;
+ ArgsInPartiallySubstitutedPack = ExplicitArgs;
+ NumArgsInPartiallySubstitutedPack = NumExplicitArgs;
+}
+
+NamedDecl *LocalInstantiationScope::getPartiallySubstitutedPack(
+ const TemplateArgument **ExplicitArgs,
+ unsigned *NumExplicitArgs) const {
+ if (ExplicitArgs)
+ *ExplicitArgs = nullptr;
+ if (NumExplicitArgs)
+ *NumExplicitArgs = 0;
+
+ for (const LocalInstantiationScope *Current = this; Current;
+ Current = Current->Outer) {
+ if (Current->PartiallySubstitutedPack) {
+ if (ExplicitArgs)
+ *ExplicitArgs = Current->ArgsInPartiallySubstitutedPack;
+ if (NumExplicitArgs)
+ *NumExplicitArgs = Current->NumArgsInPartiallySubstitutedPack;
+
+ return Current->PartiallySubstitutedPack;
+ }
+
+ if (!Current->CombineWithOuterScope)
+ break;
+ }
+
+ return nullptr;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
new file mode 100644
index 0000000..7a452af
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -0,0 +1,4726 @@
+//===--- SemaTemplateInstantiateDecl.cpp - C++ Template Decl Instantiation ===/
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===/
+//
+// This file implements C++ template instantiation for declarations.
+//
+//===----------------------------------------------------------------------===/
+#include "clang/Sema/SemaInternal.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/DependentDiagnostic.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/PrettyDeclStackTrace.h"
+#include "clang/Sema/Template.h"
+
+using namespace clang;
+
+static bool isDeclWithinFunction(const Decl *D) {
+ const DeclContext *DC = D->getDeclContext();
+ if (DC->isFunctionOrMethod())
+ return true;
+
+ if (DC->isRecord())
+ return cast<CXXRecordDecl>(DC)->isLocalClass();
+
+ return false;
+}
+
+template<typename DeclT>
+static bool SubstQualifier(Sema &SemaRef, const DeclT *OldDecl, DeclT *NewDecl,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+ if (!OldDecl->getQualifierLoc())
+ return false;
+
+ assert((NewDecl->getFriendObjectKind() ||
+ !OldDecl->getLexicalDeclContext()->isDependentContext()) &&
+ "non-friend with qualified name defined in dependent context");
+ Sema::ContextRAII SavedContext(
+ SemaRef,
+ const_cast<DeclContext *>(NewDecl->getFriendObjectKind()
+ ? NewDecl->getLexicalDeclContext()
+ : OldDecl->getLexicalDeclContext()));
+
+ NestedNameSpecifierLoc NewQualifierLoc
+ = SemaRef.SubstNestedNameSpecifierLoc(OldDecl->getQualifierLoc(),
+ TemplateArgs);
+
+ if (!NewQualifierLoc)
+ return true;
+
+ NewDecl->setQualifierInfo(NewQualifierLoc);
+ return false;
+}
+
+bool TemplateDeclInstantiator::SubstQualifier(const DeclaratorDecl *OldDecl,
+ DeclaratorDecl *NewDecl) {
+ return ::SubstQualifier(SemaRef, OldDecl, NewDecl, TemplateArgs);
+}
+
+bool TemplateDeclInstantiator::SubstQualifier(const TagDecl *OldDecl,
+ TagDecl *NewDecl) {
+ return ::SubstQualifier(SemaRef, OldDecl, NewDecl, TemplateArgs);
+}
+
+// Include attribute instantiation code.
+#include "clang/Sema/AttrTemplateInstantiate.inc"
+
+static void instantiateDependentAlignedAttr(
+ Sema &S, const MultiLevelTemplateArgumentList &TemplateArgs,
+ const AlignedAttr *Aligned, Decl *New, bool IsPackExpansion) {
+ if (Aligned->isAlignmentExpr()) {
+ // The alignment expression is a constant expression.
+ EnterExpressionEvaluationContext Unevaluated(S, Sema::ConstantEvaluated);
+ ExprResult Result = S.SubstExpr(Aligned->getAlignmentExpr(), TemplateArgs);
+ if (!Result.isInvalid())
+ S.AddAlignedAttr(Aligned->getLocation(), New, Result.getAs<Expr>(),
+ Aligned->getSpellingListIndex(), IsPackExpansion);
+ } else {
+ TypeSourceInfo *Result = S.SubstType(Aligned->getAlignmentType(),
+ TemplateArgs, Aligned->getLocation(),
+ DeclarationName());
+ if (Result)
+ S.AddAlignedAttr(Aligned->getLocation(), New, Result,
+ Aligned->getSpellingListIndex(), IsPackExpansion);
+ }
+}
+
+static void instantiateDependentAlignedAttr(
+ Sema &S, const MultiLevelTemplateArgumentList &TemplateArgs,
+ const AlignedAttr *Aligned, Decl *New) {
+ if (!Aligned->isPackExpansion()) {
+ instantiateDependentAlignedAttr(S, TemplateArgs, Aligned, New, false);
+ return;
+ }
+
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ if (Aligned->isAlignmentExpr())
+ S.collectUnexpandedParameterPacks(Aligned->getAlignmentExpr(),
+ Unexpanded);
+ else
+ S.collectUnexpandedParameterPacks(Aligned->getAlignmentType()->getTypeLoc(),
+ Unexpanded);
+ assert(!Unexpanded.empty() && "Pack expansion without parameter packs?");
+
+ // Determine whether we can expand this attribute pack yet.
+ bool Expand = true, RetainExpansion = false;
+ Optional<unsigned> NumExpansions;
+ // FIXME: Use the actual location of the ellipsis.
+ SourceLocation EllipsisLoc = Aligned->getLocation();
+ if (S.CheckParameterPacksForExpansion(EllipsisLoc, Aligned->getRange(),
+ Unexpanded, TemplateArgs, Expand,
+ RetainExpansion, NumExpansions))
+ return;
+
+ if (!Expand) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(S, -1);
+ instantiateDependentAlignedAttr(S, TemplateArgs, Aligned, New, true);
+ } else {
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(S, I);
+ instantiateDependentAlignedAttr(S, TemplateArgs, Aligned, New, false);
+ }
+ }
+}
+
+static void instantiateDependentAssumeAlignedAttr(
+ Sema &S, const MultiLevelTemplateArgumentList &TemplateArgs,
+ const AssumeAlignedAttr *Aligned, Decl *New) {
+ // The alignment expression is a constant expression.
+ EnterExpressionEvaluationContext Unevaluated(S, Sema::ConstantEvaluated);
+
+ Expr *E, *OE = nullptr;
+ ExprResult Result = S.SubstExpr(Aligned->getAlignment(), TemplateArgs);
+ if (Result.isInvalid())
+ return;
+ E = Result.getAs<Expr>();
+
+ if (Aligned->getOffset()) {
+ Result = S.SubstExpr(Aligned->getOffset(), TemplateArgs);
+ if (Result.isInvalid())
+ return;
+ OE = Result.getAs<Expr>();
+ }
+
+ S.AddAssumeAlignedAttr(Aligned->getLocation(), New, E, OE,
+ Aligned->getSpellingListIndex());
+}
+
+static void instantiateDependentAlignValueAttr(
+ Sema &S, const MultiLevelTemplateArgumentList &TemplateArgs,
+ const AlignValueAttr *Aligned, Decl *New) {
+ // The alignment expression is a constant expression.
+ EnterExpressionEvaluationContext Unevaluated(S, Sema::ConstantEvaluated);
+ ExprResult Result = S.SubstExpr(Aligned->getAlignment(), TemplateArgs);
+ if (!Result.isInvalid())
+ S.AddAlignValueAttr(Aligned->getLocation(), New, Result.getAs<Expr>(),
+ Aligned->getSpellingListIndex());
+}
+
+static void instantiateDependentEnableIfAttr(
+ Sema &S, const MultiLevelTemplateArgumentList &TemplateArgs,
+ const EnableIfAttr *A, const Decl *Tmpl, Decl *New) {
+ Expr *Cond = nullptr;
+ {
+ EnterExpressionEvaluationContext Unevaluated(S, Sema::Unevaluated);
+ ExprResult Result = S.SubstExpr(A->getCond(), TemplateArgs);
+ if (Result.isInvalid())
+ return;
+ Cond = Result.getAs<Expr>();
+ }
+ if (A->getCond()->isTypeDependent() && !Cond->isTypeDependent()) {
+ ExprResult Converted = S.PerformContextuallyConvertToBool(Cond);
+ if (Converted.isInvalid())
+ return;
+ Cond = Converted.get();
+ }
+
+ SmallVector<PartialDiagnosticAt, 8> Diags;
+ if (A->getCond()->isValueDependent() && !Cond->isValueDependent() &&
+ !Expr::isPotentialConstantExprUnevaluated(Cond, cast<FunctionDecl>(Tmpl),
+ Diags)) {
+ S.Diag(A->getLocation(), diag::err_enable_if_never_constant_expr);
+ for (int I = 0, N = Diags.size(); I != N; ++I)
+ S.Diag(Diags[I].first, Diags[I].second);
+ return;
+ }
+
+ EnableIfAttr *EIA = new (S.getASTContext())
+ EnableIfAttr(A->getLocation(), S.getASTContext(), Cond,
+ A->getMessage(),
+ A->getSpellingListIndex());
+ New->addAttr(EIA);
+}
+
+// Constructs and adds to New a new instance of CUDALaunchBoundsAttr using
+// template A as the base and arguments from TemplateArgs.
+static void instantiateDependentCUDALaunchBoundsAttr(
+ Sema &S, const MultiLevelTemplateArgumentList &TemplateArgs,
+ const CUDALaunchBoundsAttr &Attr, Decl *New) {
+ // The alignment expression is a constant expression.
+ EnterExpressionEvaluationContext Unevaluated(S, Sema::ConstantEvaluated);
+
+ ExprResult Result = S.SubstExpr(Attr.getMaxThreads(), TemplateArgs);
+ if (Result.isInvalid())
+ return;
+ Expr *MaxThreads = Result.getAs<Expr>();
+
+ Expr *MinBlocks = nullptr;
+ if (Attr.getMinBlocks()) {
+ Result = S.SubstExpr(Attr.getMinBlocks(), TemplateArgs);
+ if (Result.isInvalid())
+ return;
+ MinBlocks = Result.getAs<Expr>();
+ }
+
+ S.AddLaunchBoundsAttr(Attr.getLocation(), New, MaxThreads, MinBlocks,
+ Attr.getSpellingListIndex());
+}
+
+void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
+ const Decl *Tmpl, Decl *New,
+ LateInstantiatedAttrVec *LateAttrs,
+ LocalInstantiationScope *OuterMostScope) {
+ for (const auto *TmplAttr : Tmpl->attrs()) {
+ // FIXME: This should be generalized to more than just the AlignedAttr.
+ const AlignedAttr *Aligned = dyn_cast<AlignedAttr>(TmplAttr);
+ if (Aligned && Aligned->isAlignmentDependent()) {
+ instantiateDependentAlignedAttr(*this, TemplateArgs, Aligned, New);
+ continue;
+ }
+
+ const AssumeAlignedAttr *AssumeAligned = dyn_cast<AssumeAlignedAttr>(TmplAttr);
+ if (AssumeAligned) {
+ instantiateDependentAssumeAlignedAttr(*this, TemplateArgs, AssumeAligned, New);
+ continue;
+ }
+
+ const AlignValueAttr *AlignValue = dyn_cast<AlignValueAttr>(TmplAttr);
+ if (AlignValue) {
+ instantiateDependentAlignValueAttr(*this, TemplateArgs, AlignValue, New);
+ continue;
+ }
+
+ const EnableIfAttr *EnableIf = dyn_cast<EnableIfAttr>(TmplAttr);
+ if (EnableIf && EnableIf->getCond()->isValueDependent()) {
+ instantiateDependentEnableIfAttr(*this, TemplateArgs, EnableIf, Tmpl,
+ New);
+ continue;
+ }
+
+ if (const CUDALaunchBoundsAttr *CUDALaunchBounds =
+ dyn_cast<CUDALaunchBoundsAttr>(TmplAttr)) {
+ instantiateDependentCUDALaunchBoundsAttr(*this, TemplateArgs,
+ *CUDALaunchBounds, New);
+ continue;
+ }
+
+ // Existing DLL attribute on the instantiation takes precedence.
+ if (TmplAttr->getKind() == attr::DLLExport ||
+ TmplAttr->getKind() == attr::DLLImport) {
+ if (New->hasAttr<DLLExportAttr>() || New->hasAttr<DLLImportAttr>()) {
+ continue;
+ }
+ }
+
+ assert(!TmplAttr->isPackExpansion());
+ if (TmplAttr->isLateParsed() && LateAttrs) {
+ // Late parsed attributes must be instantiated and attached after the
+ // enclosing class has been instantiated. See Sema::InstantiateClass.
+ LocalInstantiationScope *Saved = nullptr;
+ if (CurrentInstantiationScope)
+ Saved = CurrentInstantiationScope->cloneScopes(OuterMostScope);
+ LateAttrs->push_back(LateInstantiatedAttribute(TmplAttr, Saved, New));
+ } else {
+ // Allow 'this' within late-parsed attributes.
+ NamedDecl *ND = dyn_cast<NamedDecl>(New);
+ CXXRecordDecl *ThisContext =
+ dyn_cast_or_null<CXXRecordDecl>(ND->getDeclContext());
+ CXXThisScopeRAII ThisScope(*this, ThisContext, /*TypeQuals*/0,
+ ND && ND->isCXXInstanceMember());
+
+ Attr *NewAttr = sema::instantiateTemplateAttribute(TmplAttr, Context,
+ *this, TemplateArgs);
+ if (NewAttr)
+ New->addAttr(NewAttr);
+ }
+ }
+}
+
+/// Get the previous declaration of a declaration for the purposes of template
+/// instantiation. If this finds a previous declaration, then the previous
+/// declaration of the instantiation of D should be an instantiation of the
+/// result of this function.
+template<typename DeclT>
+static DeclT *getPreviousDeclForInstantiation(DeclT *D) {
+ DeclT *Result = D->getPreviousDecl();
+
+ // If the declaration is within a class, and the previous declaration was
+ // merged from a different definition of that class, then we don't have a
+ // previous declaration for the purpose of template instantiation.
+ if (Result && isa<CXXRecordDecl>(D->getDeclContext()) &&
+ D->getLexicalDeclContext() != Result->getLexicalDeclContext())
+ return nullptr;
+
+ return Result;
+}
+
+Decl *
+TemplateDeclInstantiator::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
+ llvm_unreachable("Translation units cannot be instantiated");
+}
+
+Decl *
+TemplateDeclInstantiator::VisitExternCContextDecl(ExternCContextDecl *D) {
+ llvm_unreachable("extern \"C\" context cannot be instantiated");
+}
+
+Decl *
+TemplateDeclInstantiator::VisitLabelDecl(LabelDecl *D) {
+ LabelDecl *Inst = LabelDecl::Create(SemaRef.Context, Owner, D->getLocation(),
+ D->getIdentifier());
+ Owner->addDecl(Inst);
+ return Inst;
+}
+
+Decl *
+TemplateDeclInstantiator::VisitNamespaceDecl(NamespaceDecl *D) {
+ llvm_unreachable("Namespaces cannot be instantiated");
+}
+
+Decl *
+TemplateDeclInstantiator::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
+ NamespaceAliasDecl *Inst
+ = NamespaceAliasDecl::Create(SemaRef.Context, Owner,
+ D->getNamespaceLoc(),
+ D->getAliasLoc(),
+ D->getIdentifier(),
+ D->getQualifierLoc(),
+ D->getTargetNameLoc(),
+ D->getNamespace());
+ Owner->addDecl(Inst);
+ return Inst;
+}
+
+Decl *TemplateDeclInstantiator::InstantiateTypedefNameDecl(TypedefNameDecl *D,
+ bool IsTypeAlias) {
+ bool Invalid = false;
+ TypeSourceInfo *DI = D->getTypeSourceInfo();
+ if (DI->getType()->isInstantiationDependentType() ||
+ DI->getType()->isVariablyModifiedType()) {
+ DI = SemaRef.SubstType(DI, TemplateArgs,
+ D->getLocation(), D->getDeclName());
+ if (!DI) {
+ Invalid = true;
+ DI = SemaRef.Context.getTrivialTypeSourceInfo(SemaRef.Context.IntTy);
+ }
+ } else {
+ SemaRef.MarkDeclarationsReferencedInType(D->getLocation(), DI->getType());
+ }
+
+ // HACK: g++ has a bug where it gets the value kind of ?: wrong.
+ // libstdc++ relies upon this bug in its implementation of common_type.
+ // If we happen to be processing that implementation, fake up the g++ ?:
+ // semantics. See LWG issue 2141 for more information on the bug.
+ const DecltypeType *DT = DI->getType()->getAs<DecltypeType>();
+ CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D->getDeclContext());
+ if (DT && RD && isa<ConditionalOperator>(DT->getUnderlyingExpr()) &&
+ DT->isReferenceType() &&
+ RD->getEnclosingNamespaceContext() == SemaRef.getStdNamespace() &&
+ RD->getIdentifier() && RD->getIdentifier()->isStr("common_type") &&
+ D->getIdentifier() && D->getIdentifier()->isStr("type") &&
+ SemaRef.getSourceManager().isInSystemHeader(D->getLocStart()))
+ // Fold it to the (non-reference) type which g++ would have produced.
+ DI = SemaRef.Context.getTrivialTypeSourceInfo(
+ DI->getType().getNonReferenceType());
+
+ // Create the new typedef
+ TypedefNameDecl *Typedef;
+ if (IsTypeAlias)
+ Typedef = TypeAliasDecl::Create(SemaRef.Context, Owner, D->getLocStart(),
+ D->getLocation(), D->getIdentifier(), DI);
+ else
+ Typedef = TypedefDecl::Create(SemaRef.Context, Owner, D->getLocStart(),
+ D->getLocation(), D->getIdentifier(), DI);
+ if (Invalid)
+ Typedef->setInvalidDecl();
+
+ // If the old typedef was the name for linkage purposes of an anonymous
+ // tag decl, re-establish that relationship for the new typedef.
+ if (const TagType *oldTagType = D->getUnderlyingType()->getAs<TagType>()) {
+ TagDecl *oldTag = oldTagType->getDecl();
+ if (oldTag->getTypedefNameForAnonDecl() == D && !Invalid) {
+ TagDecl *newTag = DI->getType()->castAs<TagType>()->getDecl();
+ assert(!newTag->hasNameForLinkage());
+ newTag->setTypedefNameForAnonDecl(Typedef);
+ }
+ }
+
+ if (TypedefNameDecl *Prev = getPreviousDeclForInstantiation(D)) {
+ NamedDecl *InstPrev = SemaRef.FindInstantiatedDecl(D->getLocation(), Prev,
+ TemplateArgs);
+ if (!InstPrev)
+ return nullptr;
+
+ TypedefNameDecl *InstPrevTypedef = cast<TypedefNameDecl>(InstPrev);
+
+ // If the typedef types are not identical, reject them.
+ SemaRef.isIncompatibleTypedef(InstPrevTypedef, Typedef);
+
+ Typedef->setPreviousDecl(InstPrevTypedef);
+ }
+
+ SemaRef.InstantiateAttrs(TemplateArgs, D, Typedef);
+
+ Typedef->setAccess(D->getAccess());
+
+ return Typedef;
+}
+
+Decl *TemplateDeclInstantiator::VisitTypedefDecl(TypedefDecl *D) {
+ Decl *Typedef = InstantiateTypedefNameDecl(D, /*IsTypeAlias=*/false);
+ if (Typedef)
+ Owner->addDecl(Typedef);
+ return Typedef;
+}
+
+Decl *TemplateDeclInstantiator::VisitTypeAliasDecl(TypeAliasDecl *D) {
+ Decl *Typedef = InstantiateTypedefNameDecl(D, /*IsTypeAlias=*/true);
+ if (Typedef)
+ Owner->addDecl(Typedef);
+ return Typedef;
+}
+
+Decl *
+TemplateDeclInstantiator::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
+ // Create a local instantiation scope for this type alias template, which
+ // will contain the instantiations of the template parameters.
+ LocalInstantiationScope Scope(SemaRef);
+
+ TemplateParameterList *TempParams = D->getTemplateParameters();
+ TemplateParameterList *InstParams = SubstTemplateParams(TempParams);
+ if (!InstParams)
+ return nullptr;
+
+ TypeAliasDecl *Pattern = D->getTemplatedDecl();
+
+ TypeAliasTemplateDecl *PrevAliasTemplate = nullptr;
+ if (getPreviousDeclForInstantiation<TypedefNameDecl>(Pattern)) {
+ DeclContext::lookup_result Found = Owner->lookup(Pattern->getDeclName());
+ if (!Found.empty()) {
+ PrevAliasTemplate = dyn_cast<TypeAliasTemplateDecl>(Found.front());
+ }
+ }
+
+ TypeAliasDecl *AliasInst = cast_or_null<TypeAliasDecl>(
+ InstantiateTypedefNameDecl(Pattern, /*IsTypeAlias=*/true));
+ if (!AliasInst)
+ return nullptr;
+
+ TypeAliasTemplateDecl *Inst
+ = TypeAliasTemplateDecl::Create(SemaRef.Context, Owner, D->getLocation(),
+ D->getDeclName(), InstParams, AliasInst);
+ AliasInst->setDescribedAliasTemplate(Inst);
+ if (PrevAliasTemplate)
+ Inst->setPreviousDecl(PrevAliasTemplate);
+
+ Inst->setAccess(D->getAccess());
+
+ if (!PrevAliasTemplate)
+ Inst->setInstantiatedFromMemberTemplate(D);
+
+ Owner->addDecl(Inst);
+
+ return Inst;
+}
+
+Decl *TemplateDeclInstantiator::VisitVarDecl(VarDecl *D) {
+ return VisitVarDecl(D, /*InstantiatingVarTemplate=*/false);
+}
+
+Decl *TemplateDeclInstantiator::VisitVarDecl(VarDecl *D,
+ bool InstantiatingVarTemplate) {
+
+ // If this is the variable for an anonymous struct or union,
+ // instantiate the anonymous struct/union type first.
+ if (const RecordType *RecordTy = D->getType()->getAs<RecordType>())
+ if (RecordTy->getDecl()->isAnonymousStructOrUnion())
+ if (!VisitCXXRecordDecl(cast<CXXRecordDecl>(RecordTy->getDecl())))
+ return nullptr;
+
+ // Do substitution on the type of the declaration
+ TypeSourceInfo *DI = SemaRef.SubstType(D->getTypeSourceInfo(),
+ TemplateArgs,
+ D->getTypeSpecStartLoc(),
+ D->getDeclName());
+ if (!DI)
+ return nullptr;
+
+ if (DI->getType()->isFunctionType()) {
+ SemaRef.Diag(D->getLocation(), diag::err_variable_instantiates_to_function)
+ << D->isStaticDataMember() << DI->getType();
+ return nullptr;
+ }
+
+ DeclContext *DC = Owner;
+ if (D->isLocalExternDecl())
+ SemaRef.adjustContextForLocalExternDecl(DC);
+
+ // Build the instantiated declaration.
+ VarDecl *Var = VarDecl::Create(SemaRef.Context, DC, D->getInnerLocStart(),
+ D->getLocation(), D->getIdentifier(),
+ DI->getType(), DI, D->getStorageClass());
+
+ // In ARC, infer 'retaining' for variables of retainable type.
+ if (SemaRef.getLangOpts().ObjCAutoRefCount &&
+ SemaRef.inferObjCARCLifetime(Var))
+ Var->setInvalidDecl();
+
+ // Substitute the nested name specifier, if any.
+ if (SubstQualifier(D, Var))
+ return nullptr;
+
+ SemaRef.BuildVariableInstantiation(Var, D, TemplateArgs, LateAttrs, Owner,
+ StartingScope, InstantiatingVarTemplate);
+
+ if (D->isNRVOVariable()) {
+ QualType ReturnType = cast<FunctionDecl>(DC)->getReturnType();
+ if (SemaRef.isCopyElisionCandidate(ReturnType, Var, false))
+ Var->setNRVOVariable(true);
+ }
+
+ Var->setImplicit(D->isImplicit());
+
+ return Var;
+}
+
+Decl *TemplateDeclInstantiator::VisitAccessSpecDecl(AccessSpecDecl *D) {
+ AccessSpecDecl* AD
+ = AccessSpecDecl::Create(SemaRef.Context, D->getAccess(), Owner,
+ D->getAccessSpecifierLoc(), D->getColonLoc());
+ Owner->addHiddenDecl(AD);
+ return AD;
+}
+
+Decl *TemplateDeclInstantiator::VisitFieldDecl(FieldDecl *D) {
+ bool Invalid = false;
+ TypeSourceInfo *DI = D->getTypeSourceInfo();
+ if (DI->getType()->isInstantiationDependentType() ||
+ DI->getType()->isVariablyModifiedType()) {
+ DI = SemaRef.SubstType(DI, TemplateArgs,
+ D->getLocation(), D->getDeclName());
+ if (!DI) {
+ DI = D->getTypeSourceInfo();
+ Invalid = true;
+ } else if (DI->getType()->isFunctionType()) {
+ // C++ [temp.arg.type]p3:
+ // If a declaration acquires a function type through a type
+ // dependent on a template-parameter and this causes a
+ // declaration that does not use the syntactic form of a
+ // function declarator to have function type, the program is
+ // ill-formed.
+ SemaRef.Diag(D->getLocation(), diag::err_field_instantiates_to_function)
+ << DI->getType();
+ Invalid = true;
+ }
+ } else {
+ SemaRef.MarkDeclarationsReferencedInType(D->getLocation(), DI->getType());
+ }
+
+ Expr *BitWidth = D->getBitWidth();
+ if (Invalid)
+ BitWidth = nullptr;
+ else if (BitWidth) {
+ // The bit-width expression is a constant expression.
+ EnterExpressionEvaluationContext Unevaluated(SemaRef,
+ Sema::ConstantEvaluated);
+
+ ExprResult InstantiatedBitWidth
+ = SemaRef.SubstExpr(BitWidth, TemplateArgs);
+ if (InstantiatedBitWidth.isInvalid()) {
+ Invalid = true;
+ BitWidth = nullptr;
+ } else
+ BitWidth = InstantiatedBitWidth.getAs<Expr>();
+ }
+
+ FieldDecl *Field = SemaRef.CheckFieldDecl(D->getDeclName(),
+ DI->getType(), DI,
+ cast<RecordDecl>(Owner),
+ D->getLocation(),
+ D->isMutable(),
+ BitWidth,
+ D->getInClassInitStyle(),
+ D->getInnerLocStart(),
+ D->getAccess(),
+ nullptr);
+ if (!Field) {
+ cast<Decl>(Owner)->setInvalidDecl();
+ return nullptr;
+ }
+
+ SemaRef.InstantiateAttrs(TemplateArgs, D, Field, LateAttrs, StartingScope);
+
+ if (Field->hasAttrs())
+ SemaRef.CheckAlignasUnderalignment(Field);
+
+ if (Invalid)
+ Field->setInvalidDecl();
+
+ if (!Field->getDeclName()) {
+ // Keep track of where this decl came from.
+ SemaRef.Context.setInstantiatedFromUnnamedFieldDecl(Field, D);
+ }
+ if (CXXRecordDecl *Parent= dyn_cast<CXXRecordDecl>(Field->getDeclContext())) {
+ if (Parent->isAnonymousStructOrUnion() &&
+ Parent->getRedeclContext()->isFunctionOrMethod())
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, Field);
+ }
+
+ Field->setImplicit(D->isImplicit());
+ Field->setAccess(D->getAccess());
+ Owner->addDecl(Field);
+
+ return Field;
+}
+
+Decl *TemplateDeclInstantiator::VisitMSPropertyDecl(MSPropertyDecl *D) {
+ bool Invalid = false;
+ TypeSourceInfo *DI = D->getTypeSourceInfo();
+
+ if (DI->getType()->isVariablyModifiedType()) {
+ SemaRef.Diag(D->getLocation(), diag::err_property_is_variably_modified)
+ << D;
+ Invalid = true;
+ } else if (DI->getType()->isInstantiationDependentType()) {
+ DI = SemaRef.SubstType(DI, TemplateArgs,
+ D->getLocation(), D->getDeclName());
+ if (!DI) {
+ DI = D->getTypeSourceInfo();
+ Invalid = true;
+ } else if (DI->getType()->isFunctionType()) {
+ // C++ [temp.arg.type]p3:
+ // If a declaration acquires a function type through a type
+ // dependent on a template-parameter and this causes a
+ // declaration that does not use the syntactic form of a
+ // function declarator to have function type, the program is
+ // ill-formed.
+ SemaRef.Diag(D->getLocation(), diag::err_field_instantiates_to_function)
+ << DI->getType();
+ Invalid = true;
+ }
+ } else {
+ SemaRef.MarkDeclarationsReferencedInType(D->getLocation(), DI->getType());
+ }
+
+ MSPropertyDecl *Property = MSPropertyDecl::Create(
+ SemaRef.Context, Owner, D->getLocation(), D->getDeclName(), DI->getType(),
+ DI, D->getLocStart(), D->getGetterId(), D->getSetterId());
+
+ SemaRef.InstantiateAttrs(TemplateArgs, D, Property, LateAttrs,
+ StartingScope);
+
+ if (Invalid)
+ Property->setInvalidDecl();
+
+ Property->setAccess(D->getAccess());
+ Owner->addDecl(Property);
+
+ return Property;
+}
+
+Decl *TemplateDeclInstantiator::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
+ NamedDecl **NamedChain =
+ new (SemaRef.Context)NamedDecl*[D->getChainingSize()];
+
+ int i = 0;
+ for (auto *PI : D->chain()) {
+ NamedDecl *Next = SemaRef.FindInstantiatedDecl(D->getLocation(), PI,
+ TemplateArgs);
+ if (!Next)
+ return nullptr;
+
+ NamedChain[i++] = Next;
+ }
+
+ QualType T = cast<FieldDecl>(NamedChain[i-1])->getType();
+ IndirectFieldDecl *IndirectField = IndirectFieldDecl::Create(
+ SemaRef.Context, Owner, D->getLocation(), D->getIdentifier(), T,
+ NamedChain, D->getChainingSize());
+
+ for (const auto *Attr : D->attrs())
+ IndirectField->addAttr(Attr->clone(SemaRef.Context));
+
+ IndirectField->setImplicit(D->isImplicit());
+ IndirectField->setAccess(D->getAccess());
+ Owner->addDecl(IndirectField);
+ return IndirectField;
+}
+
+Decl *TemplateDeclInstantiator::VisitFriendDecl(FriendDecl *D) {
+ // Handle friend type expressions by simply substituting template
+ // parameters into the pattern type and checking the result.
+ if (TypeSourceInfo *Ty = D->getFriendType()) {
+ TypeSourceInfo *InstTy;
+ // If this is an unsupported friend, don't bother substituting template
+ // arguments into it. The actual type referred to won't be used by any
+ // parts of Clang, and may not be valid for instantiating. Just use the
+ // same info for the instantiated friend.
+ if (D->isUnsupportedFriend()) {
+ InstTy = Ty;
+ } else {
+ InstTy = SemaRef.SubstType(Ty, TemplateArgs,
+ D->getLocation(), DeclarationName());
+ }
+ if (!InstTy)
+ return nullptr;
+
+ FriendDecl *FD = SemaRef.CheckFriendTypeDecl(D->getLocStart(),
+ D->getFriendLoc(), InstTy);
+ if (!FD)
+ return nullptr;
+
+ FD->setAccess(AS_public);
+ FD->setUnsupportedFriend(D->isUnsupportedFriend());
+ Owner->addDecl(FD);
+ return FD;
+ }
+
+ NamedDecl *ND = D->getFriendDecl();
+ assert(ND && "friend decl must be a decl or a type!");
+
+ // All of the Visit implementations for the various potential friend
+ // declarations have to be carefully written to work for friend
+ // objects, with the most important detail being that the target
+ // decl should almost certainly not be placed in Owner.
+ Decl *NewND = Visit(ND);
+ if (!NewND) return nullptr;
+
+ FriendDecl *FD =
+ FriendDecl::Create(SemaRef.Context, Owner, D->getLocation(),
+ cast<NamedDecl>(NewND), D->getFriendLoc());
+ FD->setAccess(AS_public);
+ FD->setUnsupportedFriend(D->isUnsupportedFriend());
+ Owner->addDecl(FD);
+ return FD;
+}
+
+Decl *TemplateDeclInstantiator::VisitStaticAssertDecl(StaticAssertDecl *D) {
+ Expr *AssertExpr = D->getAssertExpr();
+
+ // The expression in a static assertion is a constant expression.
+ EnterExpressionEvaluationContext Unevaluated(SemaRef,
+ Sema::ConstantEvaluated);
+
+ ExprResult InstantiatedAssertExpr
+ = SemaRef.SubstExpr(AssertExpr, TemplateArgs);
+ if (InstantiatedAssertExpr.isInvalid())
+ return nullptr;
+
+ return SemaRef.BuildStaticAssertDeclaration(D->getLocation(),
+ InstantiatedAssertExpr.get(),
+ D->getMessage(),
+ D->getRParenLoc(),
+ D->isFailed());
+}
+
+Decl *TemplateDeclInstantiator::VisitEnumDecl(EnumDecl *D) {
+ EnumDecl *PrevDecl = nullptr;
+ if (EnumDecl *PatternPrev = getPreviousDeclForInstantiation(D)) {
+ NamedDecl *Prev = SemaRef.FindInstantiatedDecl(D->getLocation(),
+ PatternPrev,
+ TemplateArgs);
+ if (!Prev) return nullptr;
+ PrevDecl = cast<EnumDecl>(Prev);
+ }
+
+ EnumDecl *Enum = EnumDecl::Create(SemaRef.Context, Owner, D->getLocStart(),
+ D->getLocation(), D->getIdentifier(),
+ PrevDecl, D->isScoped(),
+ D->isScopedUsingClassTag(), D->isFixed());
+ if (D->isFixed()) {
+ if (TypeSourceInfo *TI = D->getIntegerTypeSourceInfo()) {
+ // If we have type source information for the underlying type, it means it
+ // has been explicitly set by the user. Perform substitution on it before
+ // moving on.
+ SourceLocation UnderlyingLoc = TI->getTypeLoc().getBeginLoc();
+ TypeSourceInfo *NewTI = SemaRef.SubstType(TI, TemplateArgs, UnderlyingLoc,
+ DeclarationName());
+ if (!NewTI || SemaRef.CheckEnumUnderlyingType(NewTI))
+ Enum->setIntegerType(SemaRef.Context.IntTy);
+ else
+ Enum->setIntegerTypeSourceInfo(NewTI);
+ } else {
+ assert(!D->getIntegerType()->isDependentType()
+ && "Dependent type without type source info");
+ Enum->setIntegerType(D->getIntegerType());
+ }
+ }
+
+ SemaRef.InstantiateAttrs(TemplateArgs, D, Enum);
+
+ Enum->setInstantiationOfMemberEnum(D, TSK_ImplicitInstantiation);
+ Enum->setAccess(D->getAccess());
+ // Forward the mangling number from the template to the instantiated decl.
+ SemaRef.Context.setManglingNumber(Enum, SemaRef.Context.getManglingNumber(D));
+ // See if the old tag was defined along with a declarator.
+ // If it did, mark the new tag as being associated with that declarator.
+ if (DeclaratorDecl *DD = SemaRef.Context.getDeclaratorForUnnamedTagDecl(D))
+ SemaRef.Context.addDeclaratorForUnnamedTagDecl(Enum, DD);
+ // See if the old tag was defined along with a typedef.
+ // If it did, mark the new tag as being associated with that typedef.
+ if (TypedefNameDecl *TND = SemaRef.Context.getTypedefNameForUnnamedTagDecl(D))
+ SemaRef.Context.addTypedefNameForUnnamedTagDecl(Enum, TND);
+ if (SubstQualifier(D, Enum)) return nullptr;
+ Owner->addDecl(Enum);
+
+ EnumDecl *Def = D->getDefinition();
+ if (Def && Def != D) {
+ // If this is an out-of-line definition of an enum member template, check
+ // that the underlying types match in the instantiation of both
+ // declarations.
+ if (TypeSourceInfo *TI = Def->getIntegerTypeSourceInfo()) {
+ SourceLocation UnderlyingLoc = TI->getTypeLoc().getBeginLoc();
+ QualType DefnUnderlying =
+ SemaRef.SubstType(TI->getType(), TemplateArgs,
+ UnderlyingLoc, DeclarationName());
+ SemaRef.CheckEnumRedeclaration(Def->getLocation(), Def->isScoped(),
+ DefnUnderlying,
+ /*EnumUnderlyingIsImplicit=*/false, Enum);
+ }
+ }
+
+ // C++11 [temp.inst]p1: The implicit instantiation of a class template
+ // specialization causes the implicit instantiation of the declarations, but
+ // not the definitions of scoped member enumerations.
+ //
+ // DR1484 clarifies that enumeration definitions inside of a template
+ // declaration aren't considered entities that can be separately instantiated
+ // from the rest of the entity they are declared inside of.
+ if (isDeclWithinFunction(D) ? D == Def : Def && !Enum->isScoped()) {
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, Enum);
+ InstantiateEnumDefinition(Enum, Def);
+ }
+
+ return Enum;
+}
+
+void TemplateDeclInstantiator::InstantiateEnumDefinition(
+ EnumDecl *Enum, EnumDecl *Pattern) {
+ Enum->startDefinition();
+
+ // Update the location to refer to the definition.
+ Enum->setLocation(Pattern->getLocation());
+
+ SmallVector<Decl*, 4> Enumerators;
+
+ EnumConstantDecl *LastEnumConst = nullptr;
+ for (auto *EC : Pattern->enumerators()) {
+ // The specified value for the enumerator.
+ ExprResult Value((Expr *)nullptr);
+ if (Expr *UninstValue = EC->getInitExpr()) {
+ // The enumerator's value expression is a constant expression.
+ EnterExpressionEvaluationContext Unevaluated(SemaRef,
+ Sema::ConstantEvaluated);
+
+ Value = SemaRef.SubstExpr(UninstValue, TemplateArgs);
+ }
+
+ // Drop the initial value and continue.
+ bool isInvalid = false;
+ if (Value.isInvalid()) {
+ Value = nullptr;
+ isInvalid = true;
+ }
+
+ EnumConstantDecl *EnumConst
+ = SemaRef.CheckEnumConstant(Enum, LastEnumConst,
+ EC->getLocation(), EC->getIdentifier(),
+ Value.get());
+
+ if (isInvalid) {
+ if (EnumConst)
+ EnumConst->setInvalidDecl();
+ Enum->setInvalidDecl();
+ }
+
+ if (EnumConst) {
+ SemaRef.InstantiateAttrs(TemplateArgs, EC, EnumConst);
+
+ EnumConst->setAccess(Enum->getAccess());
+ Enum->addDecl(EnumConst);
+ Enumerators.push_back(EnumConst);
+ LastEnumConst = EnumConst;
+
+ if (Pattern->getDeclContext()->isFunctionOrMethod() &&
+ !Enum->isScoped()) {
+ // If the enumeration is within a function or method, record the enum
+ // constant as a local.
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(EC, EnumConst);
+ }
+ }
+ }
+
+ // FIXME: Fixup LBraceLoc
+ SemaRef.ActOnEnumBody(Enum->getLocation(), SourceLocation(),
+ Enum->getRBraceLoc(), Enum,
+ Enumerators,
+ nullptr, nullptr);
+}
+
+Decl *TemplateDeclInstantiator::VisitEnumConstantDecl(EnumConstantDecl *D) {
+ llvm_unreachable("EnumConstantDecls can only occur within EnumDecls.");
+}
+
+Decl *
+TemplateDeclInstantiator::VisitBuiltinTemplateDecl(BuiltinTemplateDecl *D) {
+ llvm_unreachable("BuiltinTemplateDecls cannot be instantiated.");
+}
+
+Decl *TemplateDeclInstantiator::VisitClassTemplateDecl(ClassTemplateDecl *D) {
+ bool isFriend = (D->getFriendObjectKind() != Decl::FOK_None);
+
+ // Create a local instantiation scope for this class template, which
+ // will contain the instantiations of the template parameters.
+ LocalInstantiationScope Scope(SemaRef);
+ TemplateParameterList *TempParams = D->getTemplateParameters();
+ TemplateParameterList *InstParams = SubstTemplateParams(TempParams);
+ if (!InstParams)
+ return nullptr;
+
+ CXXRecordDecl *Pattern = D->getTemplatedDecl();
+
+ // Instantiate the qualifier. We have to do this first in case
+ // we're a friend declaration, because if we are then we need to put
+ // the new declaration in the appropriate context.
+ NestedNameSpecifierLoc QualifierLoc = Pattern->getQualifierLoc();
+ if (QualifierLoc) {
+ QualifierLoc = SemaRef.SubstNestedNameSpecifierLoc(QualifierLoc,
+ TemplateArgs);
+ if (!QualifierLoc)
+ return nullptr;
+ }
+
+ CXXRecordDecl *PrevDecl = nullptr;
+ ClassTemplateDecl *PrevClassTemplate = nullptr;
+
+ if (!isFriend && getPreviousDeclForInstantiation(Pattern)) {
+ DeclContext::lookup_result Found = Owner->lookup(Pattern->getDeclName());
+ if (!Found.empty()) {
+ PrevClassTemplate = dyn_cast<ClassTemplateDecl>(Found.front());
+ if (PrevClassTemplate)
+ PrevDecl = PrevClassTemplate->getTemplatedDecl();
+ }
+ }
+
+ // If this isn't a friend, then it's a member template, in which
+ // case we just want to build the instantiation in the
+ // specialization. If it is a friend, we want to build it in
+ // the appropriate context.
+ DeclContext *DC = Owner;
+ if (isFriend) {
+ if (QualifierLoc) {
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+ DC = SemaRef.computeDeclContext(SS);
+ if (!DC) return nullptr;
+ } else {
+ DC = SemaRef.FindInstantiatedContext(Pattern->getLocation(),
+ Pattern->getDeclContext(),
+ TemplateArgs);
+ }
+
+ // Look for a previous declaration of the template in the owning
+ // context.
+ LookupResult R(SemaRef, Pattern->getDeclName(), Pattern->getLocation(),
+ Sema::LookupOrdinaryName, Sema::ForRedeclaration);
+ SemaRef.LookupQualifiedName(R, DC);
+
+ if (R.isSingleResult()) {
+ PrevClassTemplate = R.getAsSingle<ClassTemplateDecl>();
+ if (PrevClassTemplate)
+ PrevDecl = PrevClassTemplate->getTemplatedDecl();
+ }
+
+ if (!PrevClassTemplate && QualifierLoc) {
+ SemaRef.Diag(Pattern->getLocation(), diag::err_not_tag_in_scope)
+ << D->getTemplatedDecl()->getTagKind() << Pattern->getDeclName() << DC
+ << QualifierLoc.getSourceRange();
+ return nullptr;
+ }
+
+ bool AdoptedPreviousTemplateParams = false;
+ if (PrevClassTemplate) {
+ bool Complain = true;
+
+ // HACK: libstdc++ 4.2.1 contains an ill-formed friend class
+ // template for struct std::tr1::__detail::_Map_base, where the
+ // template parameters of the friend declaration don't match the
+ // template parameters of the original declaration. In this one
+ // case, we don't complain about the ill-formed friend
+ // declaration.
+ if (isFriend && Pattern->getIdentifier() &&
+ Pattern->getIdentifier()->isStr("_Map_base") &&
+ DC->isNamespace() &&
+ cast<NamespaceDecl>(DC)->getIdentifier() &&
+ cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__detail")) {
+ DeclContext *DCParent = DC->getParent();
+ if (DCParent->isNamespace() &&
+ cast<NamespaceDecl>(DCParent)->getIdentifier() &&
+ cast<NamespaceDecl>(DCParent)->getIdentifier()->isStr("tr1")) {
+ if (cast<Decl>(DCParent)->isInStdNamespace())
+ Complain = false;
+ }
+ }
+
+ TemplateParameterList *PrevParams
+ = PrevClassTemplate->getTemplateParameters();
+
+ // Make sure the parameter lists match.
+ if (!SemaRef.TemplateParameterListsAreEqual(InstParams, PrevParams,
+ Complain,
+ Sema::TPL_TemplateMatch)) {
+ if (Complain)
+ return nullptr;
+
+ AdoptedPreviousTemplateParams = true;
+ InstParams = PrevParams;
+ }
+
+ // Do some additional validation, then merge default arguments
+ // from the existing declarations.
+ if (!AdoptedPreviousTemplateParams &&
+ SemaRef.CheckTemplateParameterList(InstParams, PrevParams,
+ Sema::TPC_ClassTemplate))
+ return nullptr;
+ }
+ }
+
+ CXXRecordDecl *RecordInst
+ = CXXRecordDecl::Create(SemaRef.Context, Pattern->getTagKind(), DC,
+ Pattern->getLocStart(), Pattern->getLocation(),
+ Pattern->getIdentifier(), PrevDecl,
+ /*DelayTypeCreation=*/true);
+
+ if (QualifierLoc)
+ RecordInst->setQualifierInfo(QualifierLoc);
+
+ ClassTemplateDecl *Inst
+ = ClassTemplateDecl::Create(SemaRef.Context, DC, D->getLocation(),
+ D->getIdentifier(), InstParams, RecordInst,
+ PrevClassTemplate);
+ RecordInst->setDescribedClassTemplate(Inst);
+
+ if (isFriend) {
+ if (PrevClassTemplate)
+ Inst->setAccess(PrevClassTemplate->getAccess());
+ else
+ Inst->setAccess(D->getAccess());
+
+ Inst->setObjectOfFriendDecl();
+ // TODO: do we want to track the instantiation progeny of this
+ // friend target decl?
+ } else {
+ Inst->setAccess(D->getAccess());
+ if (!PrevClassTemplate)
+ Inst->setInstantiatedFromMemberTemplate(D);
+ }
+
+ // Trigger creation of the type for the instantiation.
+ SemaRef.Context.getInjectedClassNameType(RecordInst,
+ Inst->getInjectedClassNameSpecialization());
+
+ // Finish handling of friends.
+ if (isFriend) {
+ DC->makeDeclVisibleInContext(Inst);
+ Inst->setLexicalDeclContext(Owner);
+ RecordInst->setLexicalDeclContext(Owner);
+ return Inst;
+ }
+
+ if (D->isOutOfLine()) {
+ Inst->setLexicalDeclContext(D->getLexicalDeclContext());
+ RecordInst->setLexicalDeclContext(D->getLexicalDeclContext());
+ }
+
+ Owner->addDecl(Inst);
+
+ if (!PrevClassTemplate) {
+ // Queue up any out-of-line partial specializations of this member
+ // class template; the client will force their instantiation once
+ // the enclosing class has been instantiated.
+ SmallVector<ClassTemplatePartialSpecializationDecl *, 4> PartialSpecs;
+ D->getPartialSpecializations(PartialSpecs);
+ for (unsigned I = 0, N = PartialSpecs.size(); I != N; ++I)
+ if (PartialSpecs[I]->getFirstDecl()->isOutOfLine())
+ OutOfLinePartialSpecs.push_back(std::make_pair(Inst, PartialSpecs[I]));
+ }
+
+ return Inst;
+}
+
+Decl *
+TemplateDeclInstantiator::VisitClassTemplatePartialSpecializationDecl(
+ ClassTemplatePartialSpecializationDecl *D) {
+ ClassTemplateDecl *ClassTemplate = D->getSpecializedTemplate();
+
+ // Lookup the already-instantiated declaration in the instantiation
+ // of the class template and return that.
+ DeclContext::lookup_result Found
+ = Owner->lookup(ClassTemplate->getDeclName());
+ if (Found.empty())
+ return nullptr;
+
+ ClassTemplateDecl *InstClassTemplate
+ = dyn_cast<ClassTemplateDecl>(Found.front());
+ if (!InstClassTemplate)
+ return nullptr;
+
+ if (ClassTemplatePartialSpecializationDecl *Result
+ = InstClassTemplate->findPartialSpecInstantiatedFromMember(D))
+ return Result;
+
+ return InstantiateClassTemplatePartialSpecialization(InstClassTemplate, D);
+}
+
+Decl *TemplateDeclInstantiator::VisitVarTemplateDecl(VarTemplateDecl *D) {
+ assert(D->getTemplatedDecl()->isStaticDataMember() &&
+ "Only static data member templates are allowed.");
+
+ // Create a local instantiation scope for this variable template, which
+ // will contain the instantiations of the template parameters.
+ LocalInstantiationScope Scope(SemaRef);
+ TemplateParameterList *TempParams = D->getTemplateParameters();
+ TemplateParameterList *InstParams = SubstTemplateParams(TempParams);
+ if (!InstParams)
+ return nullptr;
+
+ VarDecl *Pattern = D->getTemplatedDecl();
+ VarTemplateDecl *PrevVarTemplate = nullptr;
+
+ if (getPreviousDeclForInstantiation(Pattern)) {
+ DeclContext::lookup_result Found = Owner->lookup(Pattern->getDeclName());
+ if (!Found.empty())
+ PrevVarTemplate = dyn_cast<VarTemplateDecl>(Found.front());
+ }
+
+ VarDecl *VarInst =
+ cast_or_null<VarDecl>(VisitVarDecl(Pattern,
+ /*InstantiatingVarTemplate=*/true));
+ if (!VarInst) return nullptr;
+
+ DeclContext *DC = Owner;
+
+ VarTemplateDecl *Inst = VarTemplateDecl::Create(
+ SemaRef.Context, DC, D->getLocation(), D->getIdentifier(), InstParams,
+ VarInst);
+ VarInst->setDescribedVarTemplate(Inst);
+ Inst->setPreviousDecl(PrevVarTemplate);
+
+ Inst->setAccess(D->getAccess());
+ if (!PrevVarTemplate)
+ Inst->setInstantiatedFromMemberTemplate(D);
+
+ if (D->isOutOfLine()) {
+ Inst->setLexicalDeclContext(D->getLexicalDeclContext());
+ VarInst->setLexicalDeclContext(D->getLexicalDeclContext());
+ }
+
+ Owner->addDecl(Inst);
+
+ if (!PrevVarTemplate) {
+ // Queue up any out-of-line partial specializations of this member
+ // variable template; the client will force their instantiation once
+ // the enclosing class has been instantiated.
+ SmallVector<VarTemplatePartialSpecializationDecl *, 4> PartialSpecs;
+ D->getPartialSpecializations(PartialSpecs);
+ for (unsigned I = 0, N = PartialSpecs.size(); I != N; ++I)
+ if (PartialSpecs[I]->getFirstDecl()->isOutOfLine())
+ OutOfLineVarPartialSpecs.push_back(
+ std::make_pair(Inst, PartialSpecs[I]));
+ }
+
+ return Inst;
+}
+
+Decl *TemplateDeclInstantiator::VisitVarTemplatePartialSpecializationDecl(
+ VarTemplatePartialSpecializationDecl *D) {
+ assert(D->isStaticDataMember() &&
+ "Only static data member templates are allowed.");
+
+ VarTemplateDecl *VarTemplate = D->getSpecializedTemplate();
+
+ // Lookup the already-instantiated declaration and return that.
+ DeclContext::lookup_result Found = Owner->lookup(VarTemplate->getDeclName());
+ assert(!Found.empty() && "Instantiation found nothing?");
+
+ VarTemplateDecl *InstVarTemplate = dyn_cast<VarTemplateDecl>(Found.front());
+ assert(InstVarTemplate && "Instantiation did not find a variable template?");
+
+ if (VarTemplatePartialSpecializationDecl *Result =
+ InstVarTemplate->findPartialSpecInstantiatedFromMember(D))
+ return Result;
+
+ return InstantiateVarTemplatePartialSpecialization(InstVarTemplate, D);
+}
+
+Decl *
+TemplateDeclInstantiator::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
+ // Create a local instantiation scope for this function template, which
+ // will contain the instantiations of the template parameters and then get
+ // merged with the local instantiation scope for the function template
+ // itself.
+ LocalInstantiationScope Scope(SemaRef);
+
+ TemplateParameterList *TempParams = D->getTemplateParameters();
+ TemplateParameterList *InstParams = SubstTemplateParams(TempParams);
+ if (!InstParams)
+ return nullptr;
+
+ FunctionDecl *Instantiated = nullptr;
+ if (CXXMethodDecl *DMethod = dyn_cast<CXXMethodDecl>(D->getTemplatedDecl()))
+ Instantiated = cast_or_null<FunctionDecl>(VisitCXXMethodDecl(DMethod,
+ InstParams));
+ else
+ Instantiated = cast_or_null<FunctionDecl>(VisitFunctionDecl(
+ D->getTemplatedDecl(),
+ InstParams));
+
+ if (!Instantiated)
+ return nullptr;
+
+ // Link the instantiated function template declaration to the function
+ // template from which it was instantiated.
+ FunctionTemplateDecl *InstTemplate
+ = Instantiated->getDescribedFunctionTemplate();
+ InstTemplate->setAccess(D->getAccess());
+ assert(InstTemplate &&
+ "VisitFunctionDecl/CXXMethodDecl didn't create a template!");
+
+ bool isFriend = (InstTemplate->getFriendObjectKind() != Decl::FOK_None);
+
+ // Link the instantiation back to the pattern *unless* this is a
+ // non-definition friend declaration.
+ if (!InstTemplate->getInstantiatedFromMemberTemplate() &&
+ !(isFriend && !D->getTemplatedDecl()->isThisDeclarationADefinition()))
+ InstTemplate->setInstantiatedFromMemberTemplate(D);
+
+ // Make declarations visible in the appropriate context.
+ if (!isFriend) {
+ Owner->addDecl(InstTemplate);
+ } else if (InstTemplate->getDeclContext()->isRecord() &&
+ !getPreviousDeclForInstantiation(D)) {
+ SemaRef.CheckFriendAccess(InstTemplate);
+ }
+
+ return InstTemplate;
+}
+
+Decl *TemplateDeclInstantiator::VisitCXXRecordDecl(CXXRecordDecl *D) {
+ CXXRecordDecl *PrevDecl = nullptr;
+ if (D->isInjectedClassName())
+ PrevDecl = cast<CXXRecordDecl>(Owner);
+ else if (CXXRecordDecl *PatternPrev = getPreviousDeclForInstantiation(D)) {
+ NamedDecl *Prev = SemaRef.FindInstantiatedDecl(D->getLocation(),
+ PatternPrev,
+ TemplateArgs);
+ if (!Prev) return nullptr;
+ PrevDecl = cast<CXXRecordDecl>(Prev);
+ }
+
+ CXXRecordDecl *Record
+ = CXXRecordDecl::Create(SemaRef.Context, D->getTagKind(), Owner,
+ D->getLocStart(), D->getLocation(),
+ D->getIdentifier(), PrevDecl);
+
+ // Substitute the nested name specifier, if any.
+ if (SubstQualifier(D, Record))
+ return nullptr;
+
+ Record->setImplicit(D->isImplicit());
+ // FIXME: Check against AS_none is an ugly hack to work around the issue that
+ // the tag decls introduced by friend class declarations don't have an access
+ // specifier. Remove once this area of the code gets sorted out.
+ if (D->getAccess() != AS_none)
+ Record->setAccess(D->getAccess());
+ if (!D->isInjectedClassName())
+ Record->setInstantiationOfMemberClass(D, TSK_ImplicitInstantiation);
+
+ // If the original function was part of a friend declaration,
+ // inherit its namespace state.
+ if (D->getFriendObjectKind())
+ Record->setObjectOfFriendDecl();
+
+ // Make sure that anonymous structs and unions are recorded.
+ if (D->isAnonymousStructOrUnion())
+ Record->setAnonymousStructOrUnion(true);
+
+ if (D->isLocalClass())
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, Record);
+
+ // Forward the mangling number from the template to the instantiated decl.
+ SemaRef.Context.setManglingNumber(Record,
+ SemaRef.Context.getManglingNumber(D));
+
+ // See if the old tag was defined along with a declarator.
+ // If it did, mark the new tag as being associated with that declarator.
+ if (DeclaratorDecl *DD = SemaRef.Context.getDeclaratorForUnnamedTagDecl(D))
+ SemaRef.Context.addDeclaratorForUnnamedTagDecl(Record, DD);
+
+ // See if the old tag was defined along with a typedef.
+ // If it did, mark the new tag as being associated with that typedef.
+ if (TypedefNameDecl *TND = SemaRef.Context.getTypedefNameForUnnamedTagDecl(D))
+ SemaRef.Context.addTypedefNameForUnnamedTagDecl(Record, TND);
+
+ Owner->addDecl(Record);
+
+ // DR1484 clarifies that the members of a local class are instantiated as part
+ // of the instantiation of their enclosing entity.
+ if (D->isCompleteDefinition() && D->isLocalClass()) {
+ Sema::SavePendingLocalImplicitInstantiationsRAII
+ SavedPendingLocalImplicitInstantiations(SemaRef);
+
+ SemaRef.InstantiateClass(D->getLocation(), Record, D, TemplateArgs,
+ TSK_ImplicitInstantiation,
+ /*Complain=*/true);
+
+ SemaRef.InstantiateClassMembers(D->getLocation(), Record, TemplateArgs,
+ TSK_ImplicitInstantiation);
+
+ // This class may have local implicit instantiations that need to be
+ // performed within this scope.
+ SemaRef.PerformPendingInstantiations(/*LocalOnly=*/true);
+ }
+
+ SemaRef.DiagnoseUnusedNestedTypedefs(Record);
+
+ return Record;
+}
+
+/// \brief Adjust the given function type for an instantiation of the
+/// given declaration, to cope with modifications to the function's type that
+/// aren't reflected in the type-source information.
+///
+/// \param D The declaration we're instantiating.
+/// \param TInfo The already-instantiated type.
+static QualType adjustFunctionTypeForInstantiation(ASTContext &Context,
+ FunctionDecl *D,
+ TypeSourceInfo *TInfo) {
+ const FunctionProtoType *OrigFunc
+ = D->getType()->castAs<FunctionProtoType>();
+ const FunctionProtoType *NewFunc
+ = TInfo->getType()->castAs<FunctionProtoType>();
+ if (OrigFunc->getExtInfo() == NewFunc->getExtInfo())
+ return TInfo->getType();
+
+ FunctionProtoType::ExtProtoInfo NewEPI = NewFunc->getExtProtoInfo();
+ NewEPI.ExtInfo = OrigFunc->getExtInfo();
+ return Context.getFunctionType(NewFunc->getReturnType(),
+ NewFunc->getParamTypes(), NewEPI);
+}
+
+/// Normal class members are of more specific types and therefore
+/// don't make it here. This function serves two purposes:
+/// 1) instantiating function templates
+/// 2) substituting friend declarations
+Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
+ TemplateParameterList *TemplateParams) {
+ // Check whether there is already a function template specialization for
+ // this declaration.
+ FunctionTemplateDecl *FunctionTemplate = D->getDescribedFunctionTemplate();
+ if (FunctionTemplate && !TemplateParams) {
+ ArrayRef<TemplateArgument> Innermost = TemplateArgs.getInnermost();
+
+ void *InsertPos = nullptr;
+ FunctionDecl *SpecFunc
+ = FunctionTemplate->findSpecialization(Innermost, InsertPos);
+
+ // If we already have a function template specialization, return it.
+ if (SpecFunc)
+ return SpecFunc;
+ }
+
+ bool isFriend;
+ if (FunctionTemplate)
+ isFriend = (FunctionTemplate->getFriendObjectKind() != Decl::FOK_None);
+ else
+ isFriend = (D->getFriendObjectKind() != Decl::FOK_None);
+
+ bool MergeWithParentScope = (TemplateParams != nullptr) ||
+ Owner->isFunctionOrMethod() ||
+ !(isa<Decl>(Owner) &&
+ cast<Decl>(Owner)->isDefinedOutsideFunctionOrMethod());
+ LocalInstantiationScope Scope(SemaRef, MergeWithParentScope);
+
+ SmallVector<ParmVarDecl *, 4> Params;
+ TypeSourceInfo *TInfo = SubstFunctionType(D, Params);
+ if (!TInfo)
+ return nullptr;
+ QualType T = adjustFunctionTypeForInstantiation(SemaRef.Context, D, TInfo);
+
+ NestedNameSpecifierLoc QualifierLoc = D->getQualifierLoc();
+ if (QualifierLoc) {
+ QualifierLoc = SemaRef.SubstNestedNameSpecifierLoc(QualifierLoc,
+ TemplateArgs);
+ if (!QualifierLoc)
+ return nullptr;
+ }
+
+ // If we're instantiating a local function declaration, put the result
+ // in the enclosing namespace; otherwise we need to find the instantiated
+ // context.
+ DeclContext *DC;
+ if (D->isLocalExternDecl()) {
+ DC = Owner;
+ SemaRef.adjustContextForLocalExternDecl(DC);
+ } else if (isFriend && QualifierLoc) {
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+ DC = SemaRef.computeDeclContext(SS);
+ if (!DC) return nullptr;
+ } else {
+ DC = SemaRef.FindInstantiatedContext(D->getLocation(), D->getDeclContext(),
+ TemplateArgs);
+ }
+
+ FunctionDecl *Function =
+ FunctionDecl::Create(SemaRef.Context, DC, D->getInnerLocStart(),
+ D->getNameInfo(), T, TInfo,
+ D->getCanonicalDecl()->getStorageClass(),
+ D->isInlineSpecified(), D->hasWrittenPrototype(),
+ D->isConstexpr());
+ Function->setRangeEnd(D->getSourceRange().getEnd());
+
+ if (D->isInlined())
+ Function->setImplicitlyInline();
+
+ if (QualifierLoc)
+ Function->setQualifierInfo(QualifierLoc);
+
+ if (D->isLocalExternDecl())
+ Function->setLocalExternDecl();
+
+ DeclContext *LexicalDC = Owner;
+ if (!isFriend && D->isOutOfLine() && !D->isLocalExternDecl()) {
+ assert(D->getDeclContext()->isFileContext());
+ LexicalDC = D->getDeclContext();
+ }
+
+ Function->setLexicalDeclContext(LexicalDC);
+
+ // Attach the parameters
+ for (unsigned P = 0; P < Params.size(); ++P)
+ if (Params[P])
+ Params[P]->setOwningFunction(Function);
+ Function->setParams(Params);
+
+ SourceLocation InstantiateAtPOI;
+ if (TemplateParams) {
+ // Our resulting instantiation is actually a function template, since we
+ // are substituting only the outer template parameters. For example, given
+ //
+ // template<typename T>
+ // struct X {
+ // template<typename U> friend void f(T, U);
+ // };
+ //
+ // X<int> x;
+ //
+ // We are instantiating the friend function template "f" within X<int>,
+ // which means substituting int for T, but leaving "f" as a friend function
+ // template.
+ // Build the function template itself.
+ FunctionTemplate = FunctionTemplateDecl::Create(SemaRef.Context, DC,
+ Function->getLocation(),
+ Function->getDeclName(),
+ TemplateParams, Function);
+ Function->setDescribedFunctionTemplate(FunctionTemplate);
+
+ FunctionTemplate->setLexicalDeclContext(LexicalDC);
+
+ if (isFriend && D->isThisDeclarationADefinition()) {
+ // TODO: should we remember this connection regardless of whether
+ // the friend declaration provided a body?
+ FunctionTemplate->setInstantiatedFromMemberTemplate(
+ D->getDescribedFunctionTemplate());
+ }
+ } else if (FunctionTemplate) {
+ // Record this function template specialization.
+ ArrayRef<TemplateArgument> Innermost = TemplateArgs.getInnermost();
+ Function->setFunctionTemplateSpecialization(FunctionTemplate,
+ TemplateArgumentList::CreateCopy(SemaRef.Context,
+ Innermost.begin(),
+ Innermost.size()),
+ /*InsertPos=*/nullptr);
+ } else if (isFriend) {
+ // Note, we need this connection even if the friend doesn't have a body.
+ // Its body may exist but not have been attached yet due to deferred
+ // parsing.
+ // FIXME: It might be cleaner to set this when attaching the body to the
+ // friend function declaration, however that would require finding all the
+ // instantiations and modifying them.
+ Function->setInstantiationOfMemberFunction(D, TSK_ImplicitInstantiation);
+ }
+
+ if (InitFunctionInstantiation(Function, D))
+ Function->setInvalidDecl();
+
+ bool isExplicitSpecialization = false;
+
+ LookupResult Previous(
+ SemaRef, Function->getDeclName(), SourceLocation(),
+ D->isLocalExternDecl() ? Sema::LookupRedeclarationWithLinkage
+ : Sema::LookupOrdinaryName,
+ Sema::ForRedeclaration);
+
+ if (DependentFunctionTemplateSpecializationInfo *Info
+ = D->getDependentSpecializationInfo()) {
+ assert(isFriend && "non-friend has dependent specialization info?");
+
+ // This needs to be set now for future sanity.
+ Function->setObjectOfFriendDecl();
+
+ // Instantiate the explicit template arguments.
+ TemplateArgumentListInfo ExplicitArgs(Info->getLAngleLoc(),
+ Info->getRAngleLoc());
+ if (SemaRef.Subst(Info->getTemplateArgs(), Info->getNumTemplateArgs(),
+ ExplicitArgs, TemplateArgs))
+ return nullptr;
+
+ // Map the candidate templates to their instantiations.
+ for (unsigned I = 0, E = Info->getNumTemplates(); I != E; ++I) {
+ Decl *Temp = SemaRef.FindInstantiatedDecl(D->getLocation(),
+ Info->getTemplate(I),
+ TemplateArgs);
+ if (!Temp) return nullptr;
+
+ Previous.addDecl(cast<FunctionTemplateDecl>(Temp));
+ }
+
+ if (SemaRef.CheckFunctionTemplateSpecialization(Function,
+ &ExplicitArgs,
+ Previous))
+ Function->setInvalidDecl();
+
+ isExplicitSpecialization = true;
+
+ } else if (TemplateParams || !FunctionTemplate) {
+ // Look only into the namespace where the friend would be declared to
+ // find a previous declaration. This is the innermost enclosing namespace,
+ // as described in ActOnFriendFunctionDecl.
+ SemaRef.LookupQualifiedName(Previous, DC);
+
+ // In C++, the previous declaration we find might be a tag type
+ // (class or enum). In this case, the new declaration will hide the
+ // tag type. Note that this does does not apply if we're declaring a
+ // typedef (C++ [dcl.typedef]p4).
+ if (Previous.isSingleTagDecl())
+ Previous.clear();
+ }
+
+ SemaRef.CheckFunctionDeclaration(/*Scope*/ nullptr, Function, Previous,
+ isExplicitSpecialization);
+
+ NamedDecl *PrincipalDecl = (TemplateParams
+ ? cast<NamedDecl>(FunctionTemplate)
+ : Function);
+
+ // If the original function was part of a friend declaration,
+ // inherit its namespace state and add it to the owner.
+ if (isFriend) {
+ PrincipalDecl->setObjectOfFriendDecl();
+ DC->makeDeclVisibleInContext(PrincipalDecl);
+
+ bool QueuedInstantiation = false;
+
+ // C++11 [temp.friend]p4 (DR329):
+ // When a function is defined in a friend function declaration in a class
+ // template, the function is instantiated when the function is odr-used.
+ // The same restrictions on multiple declarations and definitions that
+ // apply to non-template function declarations and definitions also apply
+ // to these implicit definitions.
+ if (D->isThisDeclarationADefinition()) {
+ // Check for a function body.
+ const FunctionDecl *Definition = nullptr;
+ if (Function->isDefined(Definition) &&
+ Definition->getTemplateSpecializationKind() == TSK_Undeclared) {
+ SemaRef.Diag(Function->getLocation(), diag::err_redefinition)
+ << Function->getDeclName();
+ SemaRef.Diag(Definition->getLocation(), diag::note_previous_definition);
+ }
+ // Check for redefinitions due to other instantiations of this or
+ // a similar friend function.
+ else for (auto R : Function->redecls()) {
+ if (R == Function)
+ continue;
+
+ // If some prior declaration of this function has been used, we need
+ // to instantiate its definition.
+ if (!QueuedInstantiation && R->isUsed(false)) {
+ if (MemberSpecializationInfo *MSInfo =
+ Function->getMemberSpecializationInfo()) {
+ if (MSInfo->getPointOfInstantiation().isInvalid()) {
+ SourceLocation Loc = R->getLocation(); // FIXME
+ MSInfo->setPointOfInstantiation(Loc);
+ SemaRef.PendingLocalImplicitInstantiations.push_back(
+ std::make_pair(Function, Loc));
+ QueuedInstantiation = true;
+ }
+ }
+ }
+
+ // If some prior declaration of this function was a friend with an
+ // uninstantiated definition, reject it.
+ if (R->getFriendObjectKind()) {
+ if (const FunctionDecl *RPattern =
+ R->getTemplateInstantiationPattern()) {
+ if (RPattern->isDefined(RPattern)) {
+ SemaRef.Diag(Function->getLocation(), diag::err_redefinition)
+ << Function->getDeclName();
+ SemaRef.Diag(R->getLocation(), diag::note_previous_definition);
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if (Function->isLocalExternDecl() && !Function->getPreviousDecl())
+ DC->makeDeclVisibleInContext(PrincipalDecl);
+
+ if (Function->isOverloadedOperator() && !DC->isRecord() &&
+ PrincipalDecl->isInIdentifierNamespace(Decl::IDNS_Ordinary))
+ PrincipalDecl->setNonMemberOperator();
+
+ assert(!D->isDefaulted() && "only methods should be defaulted");
+ return Function;
+}
+
+Decl *
+TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
+ TemplateParameterList *TemplateParams,
+ bool IsClassScopeSpecialization) {
+ FunctionTemplateDecl *FunctionTemplate = D->getDescribedFunctionTemplate();
+ if (FunctionTemplate && !TemplateParams) {
+ // We are creating a function template specialization from a function
+ // template. Check whether there is already a function template
+ // specialization for this particular set of template arguments.
+ ArrayRef<TemplateArgument> Innermost = TemplateArgs.getInnermost();
+
+ void *InsertPos = nullptr;
+ FunctionDecl *SpecFunc
+ = FunctionTemplate->findSpecialization(Innermost, InsertPos);
+
+ // If we already have a function template specialization, return it.
+ if (SpecFunc)
+ return SpecFunc;
+ }
+
+ bool isFriend;
+ if (FunctionTemplate)
+ isFriend = (FunctionTemplate->getFriendObjectKind() != Decl::FOK_None);
+ else
+ isFriend = (D->getFriendObjectKind() != Decl::FOK_None);
+
+ bool MergeWithParentScope = (TemplateParams != nullptr) ||
+ !(isa<Decl>(Owner) &&
+ cast<Decl>(Owner)->isDefinedOutsideFunctionOrMethod());
+ LocalInstantiationScope Scope(SemaRef, MergeWithParentScope);
+
+ // Instantiate enclosing template arguments for friends.
+ SmallVector<TemplateParameterList *, 4> TempParamLists;
+ unsigned NumTempParamLists = 0;
+ if (isFriend && (NumTempParamLists = D->getNumTemplateParameterLists())) {
+ TempParamLists.resize(NumTempParamLists);
+ for (unsigned I = 0; I != NumTempParamLists; ++I) {
+ TemplateParameterList *TempParams = D->getTemplateParameterList(I);
+ TemplateParameterList *InstParams = SubstTemplateParams(TempParams);
+ if (!InstParams)
+ return nullptr;
+ TempParamLists[I] = InstParams;
+ }
+ }
+
+ SmallVector<ParmVarDecl *, 4> Params;
+ TypeSourceInfo *TInfo = SubstFunctionType(D, Params);
+ if (!TInfo)
+ return nullptr;
+ QualType T = adjustFunctionTypeForInstantiation(SemaRef.Context, D, TInfo);
+
+ NestedNameSpecifierLoc QualifierLoc = D->getQualifierLoc();
+ if (QualifierLoc) {
+ QualifierLoc = SemaRef.SubstNestedNameSpecifierLoc(QualifierLoc,
+ TemplateArgs);
+ if (!QualifierLoc)
+ return nullptr;
+ }
+
+ DeclContext *DC = Owner;
+ if (isFriend) {
+ if (QualifierLoc) {
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+ DC = SemaRef.computeDeclContext(SS);
+
+ if (DC && SemaRef.RequireCompleteDeclContext(SS, DC))
+ return nullptr;
+ } else {
+ DC = SemaRef.FindInstantiatedContext(D->getLocation(),
+ D->getDeclContext(),
+ TemplateArgs);
+ }
+ if (!DC) return nullptr;
+ }
+
+ // Build the instantiated method declaration.
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(DC);
+ CXXMethodDecl *Method = nullptr;
+
+ SourceLocation StartLoc = D->getInnerLocStart();
+ DeclarationNameInfo NameInfo
+ = SemaRef.SubstDeclarationNameInfo(D->getNameInfo(), TemplateArgs);
+ if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
+ Method = CXXConstructorDecl::Create(SemaRef.Context, Record,
+ StartLoc, NameInfo, T, TInfo,
+ Constructor->isExplicit(),
+ Constructor->isInlineSpecified(),
+ false, Constructor->isConstexpr());
+
+ // Claim that the instantiation of a constructor or constructor template
+ // inherits the same constructor that the template does.
+ if (CXXConstructorDecl *Inh = const_cast<CXXConstructorDecl *>(
+ Constructor->getInheritedConstructor())) {
+ // If we're instantiating a specialization of a function template, our
+ // "inherited constructor" will actually itself be a function template.
+ // Instantiate a declaration of it, too.
+ if (FunctionTemplate) {
+ assert(!TemplateParams && Inh->getDescribedFunctionTemplate() &&
+ !Inh->getParent()->isDependentContext() &&
+ "inheriting constructor template in dependent context?");
+ Sema::InstantiatingTemplate Inst(SemaRef, Constructor->getLocation(),
+ Inh);
+ if (Inst.isInvalid())
+ return nullptr;
+ Sema::ContextRAII SavedContext(SemaRef, Inh->getDeclContext());
+ LocalInstantiationScope LocalScope(SemaRef);
+
+ // Use the same template arguments that we deduced for the inheriting
+ // constructor. There's no way they could be deduced differently.
+ MultiLevelTemplateArgumentList InheritedArgs;
+ InheritedArgs.addOuterTemplateArguments(TemplateArgs.getInnermost());
+ Inh = cast_or_null<CXXConstructorDecl>(
+ SemaRef.SubstDecl(Inh, Inh->getDeclContext(), InheritedArgs));
+ if (!Inh)
+ return nullptr;
+ }
+ cast<CXXConstructorDecl>(Method)->setInheritedConstructor(Inh);
+ }
+ } else if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(D)) {
+ Method = CXXDestructorDecl::Create(SemaRef.Context, Record,
+ StartLoc, NameInfo, T, TInfo,
+ Destructor->isInlineSpecified(),
+ false);
+ } else if (CXXConversionDecl *Conversion = dyn_cast<CXXConversionDecl>(D)) {
+ Method = CXXConversionDecl::Create(SemaRef.Context, Record,
+ StartLoc, NameInfo, T, TInfo,
+ Conversion->isInlineSpecified(),
+ Conversion->isExplicit(),
+ Conversion->isConstexpr(),
+ Conversion->getLocEnd());
+ } else {
+ StorageClass SC = D->isStatic() ? SC_Static : SC_None;
+ Method = CXXMethodDecl::Create(SemaRef.Context, Record,
+ StartLoc, NameInfo, T, TInfo,
+ SC, D->isInlineSpecified(),
+ D->isConstexpr(), D->getLocEnd());
+ }
+
+ if (D->isInlined())
+ Method->setImplicitlyInline();
+
+ if (QualifierLoc)
+ Method->setQualifierInfo(QualifierLoc);
+
+ if (TemplateParams) {
+ // Our resulting instantiation is actually a function template, since we
+ // are substituting only the outer template parameters. For example, given
+ //
+ // template<typename T>
+ // struct X {
+ // template<typename U> void f(T, U);
+ // };
+ //
+ // X<int> x;
+ //
+ // We are instantiating the member template "f" within X<int>, which means
+ // substituting int for T, but leaving "f" as a member function template.
+ // Build the function template itself.
+ FunctionTemplate = FunctionTemplateDecl::Create(SemaRef.Context, Record,
+ Method->getLocation(),
+ Method->getDeclName(),
+ TemplateParams, Method);
+ if (isFriend) {
+ FunctionTemplate->setLexicalDeclContext(Owner);
+ FunctionTemplate->setObjectOfFriendDecl();
+ } else if (D->isOutOfLine())
+ FunctionTemplate->setLexicalDeclContext(D->getLexicalDeclContext());
+ Method->setDescribedFunctionTemplate(FunctionTemplate);
+ } else if (FunctionTemplate) {
+ // Record this function template specialization.
+ ArrayRef<TemplateArgument> Innermost = TemplateArgs.getInnermost();
+ Method->setFunctionTemplateSpecialization(FunctionTemplate,
+ TemplateArgumentList::CreateCopy(SemaRef.Context,
+ Innermost.begin(),
+ Innermost.size()),
+ /*InsertPos=*/nullptr);
+ } else if (!isFriend) {
+ // Record that this is an instantiation of a member function.
+ Method->setInstantiationOfMemberFunction(D, TSK_ImplicitInstantiation);
+ }
+
+ // If we are instantiating a member function defined
+ // out-of-line, the instantiation will have the same lexical
+ // context (which will be a namespace scope) as the template.
+ if (isFriend) {
+ if (NumTempParamLists)
+ Method->setTemplateParameterListsInfo(
+ SemaRef.Context,
+ llvm::makeArrayRef(TempParamLists.data(), NumTempParamLists));
+
+ Method->setLexicalDeclContext(Owner);
+ Method->setObjectOfFriendDecl();
+ } else if (D->isOutOfLine())
+ Method->setLexicalDeclContext(D->getLexicalDeclContext());
+
+ // Attach the parameters
+ for (unsigned P = 0; P < Params.size(); ++P)
+ Params[P]->setOwningFunction(Method);
+ Method->setParams(Params);
+
+ if (InitMethodInstantiation(Method, D))
+ Method->setInvalidDecl();
+
+ LookupResult Previous(SemaRef, NameInfo, Sema::LookupOrdinaryName,
+ Sema::ForRedeclaration);
+
+ if (!FunctionTemplate || TemplateParams || isFriend) {
+ SemaRef.LookupQualifiedName(Previous, Record);
+
+ // In C++, the previous declaration we find might be a tag type
+ // (class or enum). In this case, the new declaration will hide the
+ // tag type. Note that this does does not apply if we're declaring a
+ // typedef (C++ [dcl.typedef]p4).
+ if (Previous.isSingleTagDecl())
+ Previous.clear();
+ }
+
+ if (!IsClassScopeSpecialization)
+ SemaRef.CheckFunctionDeclaration(nullptr, Method, Previous, false);
+
+ if (D->isPure())
+ SemaRef.CheckPureMethod(Method, SourceRange());
+
+ // Propagate access. For a non-friend declaration, the access is
+ // whatever we're propagating from. For a friend, it should be the
+ // previous declaration we just found.
+ if (isFriend && Method->getPreviousDecl())
+ Method->setAccess(Method->getPreviousDecl()->getAccess());
+ else
+ Method->setAccess(D->getAccess());
+ if (FunctionTemplate)
+ FunctionTemplate->setAccess(Method->getAccess());
+
+ SemaRef.CheckOverrideControl(Method);
+
+ // If a function is defined as defaulted or deleted, mark it as such now.
+ if (D->isExplicitlyDefaulted())
+ SemaRef.SetDeclDefaulted(Method, Method->getLocation());
+ if (D->isDeletedAsWritten())
+ SemaRef.SetDeclDeleted(Method, Method->getLocation());
+
+ // If there's a function template, let our caller handle it.
+ if (FunctionTemplate) {
+ // do nothing
+
+ // Don't hide a (potentially) valid declaration with an invalid one.
+ } else if (Method->isInvalidDecl() && !Previous.empty()) {
+ // do nothing
+
+ // Otherwise, check access to friends and make them visible.
+ } else if (isFriend) {
+ // We only need to re-check access for methods which we didn't
+ // manage to match during parsing.
+ if (!D->getPreviousDecl())
+ SemaRef.CheckFriendAccess(Method);
+
+ Record->makeDeclVisibleInContext(Method);
+
+ // Otherwise, add the declaration. We don't need to do this for
+ // class-scope specializations because we'll have matched them with
+ // the appropriate template.
+ } else if (!IsClassScopeSpecialization) {
+ Owner->addDecl(Method);
+ }
+
+ return Method;
+}
+
+Decl *TemplateDeclInstantiator::VisitCXXConstructorDecl(CXXConstructorDecl *D) {
+ return VisitCXXMethodDecl(D);
+}
+
+Decl *TemplateDeclInstantiator::VisitCXXDestructorDecl(CXXDestructorDecl *D) {
+ return VisitCXXMethodDecl(D);
+}
+
+Decl *TemplateDeclInstantiator::VisitCXXConversionDecl(CXXConversionDecl *D) {
+ return VisitCXXMethodDecl(D);
+}
+
+Decl *TemplateDeclInstantiator::VisitParmVarDecl(ParmVarDecl *D) {
+ return SemaRef.SubstParmVarDecl(D, TemplateArgs, /*indexAdjustment*/ 0, None,
+ /*ExpectParameterPack=*/ false);
+}
+
+Decl *TemplateDeclInstantiator::VisitTemplateTypeParmDecl(
+ TemplateTypeParmDecl *D) {
+ // TODO: don't always clone when decls are refcounted.
+ assert(D->getTypeForDecl()->isTemplateTypeParmType());
+
+ TemplateTypeParmDecl *Inst =
+ TemplateTypeParmDecl::Create(SemaRef.Context, Owner,
+ D->getLocStart(), D->getLocation(),
+ D->getDepth() - TemplateArgs.getNumLevels(),
+ D->getIndex(), D->getIdentifier(),
+ D->wasDeclaredWithTypename(),
+ D->isParameterPack());
+ Inst->setAccess(AS_public);
+
+ if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited()) {
+ TypeSourceInfo *InstantiatedDefaultArg =
+ SemaRef.SubstType(D->getDefaultArgumentInfo(), TemplateArgs,
+ D->getDefaultArgumentLoc(), D->getDeclName());
+ if (InstantiatedDefaultArg)
+ Inst->setDefaultArgument(InstantiatedDefaultArg);
+ }
+
+ // Introduce this template parameter's instantiation into the instantiation
+ // scope.
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, Inst);
+
+ return Inst;
+}
+
+Decl *TemplateDeclInstantiator::VisitNonTypeTemplateParmDecl(
+ NonTypeTemplateParmDecl *D) {
+ // Substitute into the type of the non-type template parameter.
+ TypeLoc TL = D->getTypeSourceInfo()->getTypeLoc();
+ SmallVector<TypeSourceInfo *, 4> ExpandedParameterPackTypesAsWritten;
+ SmallVector<QualType, 4> ExpandedParameterPackTypes;
+ bool IsExpandedParameterPack = false;
+ TypeSourceInfo *DI;
+ QualType T;
+ bool Invalid = false;
+
+ if (D->isExpandedParameterPack()) {
+ // The non-type template parameter pack is an already-expanded pack
+ // expansion of types. Substitute into each of the expanded types.
+ ExpandedParameterPackTypes.reserve(D->getNumExpansionTypes());
+ ExpandedParameterPackTypesAsWritten.reserve(D->getNumExpansionTypes());
+ for (unsigned I = 0, N = D->getNumExpansionTypes(); I != N; ++I) {
+ TypeSourceInfo *NewDI =SemaRef.SubstType(D->getExpansionTypeSourceInfo(I),
+ TemplateArgs,
+ D->getLocation(),
+ D->getDeclName());
+ if (!NewDI)
+ return nullptr;
+
+ ExpandedParameterPackTypesAsWritten.push_back(NewDI);
+ QualType NewT =SemaRef.CheckNonTypeTemplateParameterType(NewDI->getType(),
+ D->getLocation());
+ if (NewT.isNull())
+ return nullptr;
+ ExpandedParameterPackTypes.push_back(NewT);
+ }
+
+ IsExpandedParameterPack = true;
+ DI = D->getTypeSourceInfo();
+ T = DI->getType();
+ } else if (D->isPackExpansion()) {
+ // The non-type template parameter pack's type is a pack expansion of types.
+ // Determine whether we need to expand this parameter pack into separate
+ // types.
+ PackExpansionTypeLoc Expansion = TL.castAs<PackExpansionTypeLoc>();
+ TypeLoc Pattern = Expansion.getPatternLoc();
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ SemaRef.collectUnexpandedParameterPacks(Pattern, Unexpanded);
+
+ // Determine whether the set of unexpanded parameter packs can and should
+ // be expanded.
+ bool Expand = true;
+ bool RetainExpansion = false;
+ Optional<unsigned> OrigNumExpansions
+ = Expansion.getTypePtr()->getNumExpansions();
+ Optional<unsigned> NumExpansions = OrigNumExpansions;
+ if (SemaRef.CheckParameterPacksForExpansion(Expansion.getEllipsisLoc(),
+ Pattern.getSourceRange(),
+ Unexpanded,
+ TemplateArgs,
+ Expand, RetainExpansion,
+ NumExpansions))
+ return nullptr;
+
+ if (Expand) {
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, I);
+ TypeSourceInfo *NewDI = SemaRef.SubstType(Pattern, TemplateArgs,
+ D->getLocation(),
+ D->getDeclName());
+ if (!NewDI)
+ return nullptr;
+
+ ExpandedParameterPackTypesAsWritten.push_back(NewDI);
+ QualType NewT = SemaRef.CheckNonTypeTemplateParameterType(
+ NewDI->getType(),
+ D->getLocation());
+ if (NewT.isNull())
+ return nullptr;
+ ExpandedParameterPackTypes.push_back(NewT);
+ }
+
+ // Note that we have an expanded parameter pack. The "type" of this
+ // expanded parameter pack is the original expansion type, but callers
+ // will end up using the expanded parameter pack types for type-checking.
+ IsExpandedParameterPack = true;
+ DI = D->getTypeSourceInfo();
+ T = DI->getType();
+ } else {
+ // We cannot fully expand the pack expansion now, so substitute into the
+ // pattern and create a new pack expansion type.
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, -1);
+ TypeSourceInfo *NewPattern = SemaRef.SubstType(Pattern, TemplateArgs,
+ D->getLocation(),
+ D->getDeclName());
+ if (!NewPattern)
+ return nullptr;
+
+ DI = SemaRef.CheckPackExpansion(NewPattern, Expansion.getEllipsisLoc(),
+ NumExpansions);
+ if (!DI)
+ return nullptr;
+
+ T = DI->getType();
+ }
+ } else {
+ // Simple case: substitution into a parameter that is not a parameter pack.
+ DI = SemaRef.SubstType(D->getTypeSourceInfo(), TemplateArgs,
+ D->getLocation(), D->getDeclName());
+ if (!DI)
+ return nullptr;
+
+ // Check that this type is acceptable for a non-type template parameter.
+ T = SemaRef.CheckNonTypeTemplateParameterType(DI->getType(),
+ D->getLocation());
+ if (T.isNull()) {
+ T = SemaRef.Context.IntTy;
+ Invalid = true;
+ }
+ }
+
+ NonTypeTemplateParmDecl *Param;
+ if (IsExpandedParameterPack)
+ Param = NonTypeTemplateParmDecl::Create(SemaRef.Context, Owner,
+ D->getInnerLocStart(),
+ D->getLocation(),
+ D->getDepth() - TemplateArgs.getNumLevels(),
+ D->getPosition(),
+ D->getIdentifier(), T,
+ DI,
+ ExpandedParameterPackTypes.data(),
+ ExpandedParameterPackTypes.size(),
+ ExpandedParameterPackTypesAsWritten.data());
+ else
+ Param = NonTypeTemplateParmDecl::Create(SemaRef.Context, Owner,
+ D->getInnerLocStart(),
+ D->getLocation(),
+ D->getDepth() - TemplateArgs.getNumLevels(),
+ D->getPosition(),
+ D->getIdentifier(), T,
+ D->isParameterPack(), DI);
+
+ Param->setAccess(AS_public);
+ if (Invalid)
+ Param->setInvalidDecl();
+
+ if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited()) {
+ ExprResult Value = SemaRef.SubstExpr(D->getDefaultArgument(), TemplateArgs);
+ if (!Value.isInvalid())
+ Param->setDefaultArgument(Value.get());
+ }
+
+ // Introduce this template parameter's instantiation into the instantiation
+ // scope.
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, Param);
+ return Param;
+}
+
+static void collectUnexpandedParameterPacks(
+ Sema &S,
+ TemplateParameterList *Params,
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) {
+ for (const auto &P : *Params) {
+ if (P->isTemplateParameterPack())
+ continue;
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(P))
+ S.collectUnexpandedParameterPacks(NTTP->getTypeSourceInfo()->getTypeLoc(),
+ Unexpanded);
+ if (TemplateTemplateParmDecl *TTP = dyn_cast<TemplateTemplateParmDecl>(P))
+ collectUnexpandedParameterPacks(S, TTP->getTemplateParameters(),
+ Unexpanded);
+ }
+}
+
+Decl *
+TemplateDeclInstantiator::VisitTemplateTemplateParmDecl(
+ TemplateTemplateParmDecl *D) {
+ // Instantiate the template parameter list of the template template parameter.
+ TemplateParameterList *TempParams = D->getTemplateParameters();
+ TemplateParameterList *InstParams;
+ SmallVector<TemplateParameterList*, 8> ExpandedParams;
+
+ bool IsExpandedParameterPack = false;
+
+ if (D->isExpandedParameterPack()) {
+ // The template template parameter pack is an already-expanded pack
+ // expansion of template parameters. Substitute into each of the expanded
+ // parameters.
+ ExpandedParams.reserve(D->getNumExpansionTemplateParameters());
+ for (unsigned I = 0, N = D->getNumExpansionTemplateParameters();
+ I != N; ++I) {
+ LocalInstantiationScope Scope(SemaRef);
+ TemplateParameterList *Expansion =
+ SubstTemplateParams(D->getExpansionTemplateParameters(I));
+ if (!Expansion)
+ return nullptr;
+ ExpandedParams.push_back(Expansion);
+ }
+
+ IsExpandedParameterPack = true;
+ InstParams = TempParams;
+ } else if (D->isPackExpansion()) {
+ // The template template parameter pack expands to a pack of template
+ // template parameters. Determine whether we need to expand this parameter
+ // pack into separate parameters.
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ collectUnexpandedParameterPacks(SemaRef, D->getTemplateParameters(),
+ Unexpanded);
+
+ // Determine whether the set of unexpanded parameter packs can and should
+ // be expanded.
+ bool Expand = true;
+ bool RetainExpansion = false;
+ Optional<unsigned> NumExpansions;
+ if (SemaRef.CheckParameterPacksForExpansion(D->getLocation(),
+ TempParams->getSourceRange(),
+ Unexpanded,
+ TemplateArgs,
+ Expand, RetainExpansion,
+ NumExpansions))
+ return nullptr;
+
+ if (Expand) {
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, I);
+ LocalInstantiationScope Scope(SemaRef);
+ TemplateParameterList *Expansion = SubstTemplateParams(TempParams);
+ if (!Expansion)
+ return nullptr;
+ ExpandedParams.push_back(Expansion);
+ }
+
+ // Note that we have an expanded parameter pack. The "type" of this
+ // expanded parameter pack is the original expansion type, but callers
+ // will end up using the expanded parameter pack types for type-checking.
+ IsExpandedParameterPack = true;
+ InstParams = TempParams;
+ } else {
+ // We cannot fully expand the pack expansion now, so just substitute
+ // into the pattern.
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, -1);
+
+ LocalInstantiationScope Scope(SemaRef);
+ InstParams = SubstTemplateParams(TempParams);
+ if (!InstParams)
+ return nullptr;
+ }
+ } else {
+ // Perform the actual substitution of template parameters within a new,
+ // local instantiation scope.
+ LocalInstantiationScope Scope(SemaRef);
+ InstParams = SubstTemplateParams(TempParams);
+ if (!InstParams)
+ return nullptr;
+ }
+
+ // Build the template template parameter.
+ TemplateTemplateParmDecl *Param;
+ if (IsExpandedParameterPack)
+ Param = TemplateTemplateParmDecl::Create(SemaRef.Context, Owner,
+ D->getLocation(),
+ D->getDepth() - TemplateArgs.getNumLevels(),
+ D->getPosition(),
+ D->getIdentifier(), InstParams,
+ ExpandedParams);
+ else
+ Param = TemplateTemplateParmDecl::Create(SemaRef.Context, Owner,
+ D->getLocation(),
+ D->getDepth() - TemplateArgs.getNumLevels(),
+ D->getPosition(),
+ D->isParameterPack(),
+ D->getIdentifier(), InstParams);
+ if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited()) {
+ NestedNameSpecifierLoc QualifierLoc =
+ D->getDefaultArgument().getTemplateQualifierLoc();
+ QualifierLoc =
+ SemaRef.SubstNestedNameSpecifierLoc(QualifierLoc, TemplateArgs);
+ TemplateName TName = SemaRef.SubstTemplateName(
+ QualifierLoc, D->getDefaultArgument().getArgument().getAsTemplate(),
+ D->getDefaultArgument().getTemplateNameLoc(), TemplateArgs);
+ if (!TName.isNull())
+ Param->setDefaultArgument(
+ SemaRef.Context,
+ TemplateArgumentLoc(TemplateArgument(TName),
+ D->getDefaultArgument().getTemplateQualifierLoc(),
+ D->getDefaultArgument().getTemplateNameLoc()));
+ }
+ Param->setAccess(AS_public);
+
+ // Introduce this template parameter's instantiation into the instantiation
+ // scope.
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, Param);
+
+ return Param;
+}
+
+Decl *TemplateDeclInstantiator::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
+ // Using directives are never dependent (and never contain any types or
+ // expressions), so they require no explicit instantiation work.
+
+ UsingDirectiveDecl *Inst
+ = UsingDirectiveDecl::Create(SemaRef.Context, Owner, D->getLocation(),
+ D->getNamespaceKeyLocation(),
+ D->getQualifierLoc(),
+ D->getIdentLocation(),
+ D->getNominatedNamespace(),
+ D->getCommonAncestor());
+
+ // Add the using directive to its declaration context
+ // only if this is not a function or method.
+ if (!Owner->isFunctionOrMethod())
+ Owner->addDecl(Inst);
+
+ return Inst;
+}
+
+Decl *TemplateDeclInstantiator::VisitUsingDecl(UsingDecl *D) {
+
+ // The nested name specifier may be dependent, for example
+ // template <typename T> struct t {
+ // struct s1 { T f1(); };
+ // struct s2 : s1 { using s1::f1; };
+ // };
+ // template struct t<int>;
+ // Here, in using s1::f1, s1 refers to t<T>::s1;
+ // we need to substitute for t<int>::s1.
+ NestedNameSpecifierLoc QualifierLoc
+ = SemaRef.SubstNestedNameSpecifierLoc(D->getQualifierLoc(),
+ TemplateArgs);
+ if (!QualifierLoc)
+ return nullptr;
+
+ // The name info is non-dependent, so no transformation
+ // is required.
+ DeclarationNameInfo NameInfo = D->getNameInfo();
+
+ // We only need to do redeclaration lookups if we're in a class
+ // scope (in fact, it's not really even possible in non-class
+ // scopes).
+ bool CheckRedeclaration = Owner->isRecord();
+
+ LookupResult Prev(SemaRef, NameInfo, Sema::LookupUsingDeclName,
+ Sema::ForRedeclaration);
+
+ UsingDecl *NewUD = UsingDecl::Create(SemaRef.Context, Owner,
+ D->getUsingLoc(),
+ QualifierLoc,
+ NameInfo,
+ D->hasTypename());
+
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+ if (CheckRedeclaration) {
+ Prev.setHideTags(false);
+ SemaRef.LookupQualifiedName(Prev, Owner);
+
+ // Check for invalid redeclarations.
+ if (SemaRef.CheckUsingDeclRedeclaration(D->getUsingLoc(),
+ D->hasTypename(), SS,
+ D->getLocation(), Prev))
+ NewUD->setInvalidDecl();
+
+ }
+
+ if (!NewUD->isInvalidDecl() &&
+ SemaRef.CheckUsingDeclQualifier(D->getUsingLoc(), SS, NameInfo,
+ D->getLocation()))
+ NewUD->setInvalidDecl();
+
+ SemaRef.Context.setInstantiatedFromUsingDecl(NewUD, D);
+ NewUD->setAccess(D->getAccess());
+ Owner->addDecl(NewUD);
+
+ // Don't process the shadow decls for an invalid decl.
+ if (NewUD->isInvalidDecl())
+ return NewUD;
+
+ if (NameInfo.getName().getNameKind() == DeclarationName::CXXConstructorName) {
+ SemaRef.CheckInheritingConstructorUsingDecl(NewUD);
+ return NewUD;
+ }
+
+ bool isFunctionScope = Owner->isFunctionOrMethod();
+
+ // Process the shadow decls.
+ for (auto *Shadow : D->shadows()) {
+ NamedDecl *InstTarget =
+ cast_or_null<NamedDecl>(SemaRef.FindInstantiatedDecl(
+ Shadow->getLocation(), Shadow->getTargetDecl(), TemplateArgs));
+ if (!InstTarget)
+ return nullptr;
+
+ UsingShadowDecl *PrevDecl = nullptr;
+ if (CheckRedeclaration) {
+ if (SemaRef.CheckUsingShadowDecl(NewUD, InstTarget, Prev, PrevDecl))
+ continue;
+ } else if (UsingShadowDecl *OldPrev =
+ getPreviousDeclForInstantiation(Shadow)) {
+ PrevDecl = cast_or_null<UsingShadowDecl>(SemaRef.FindInstantiatedDecl(
+ Shadow->getLocation(), OldPrev, TemplateArgs));
+ }
+
+ UsingShadowDecl *InstShadow =
+ SemaRef.BuildUsingShadowDecl(/*Scope*/nullptr, NewUD, InstTarget,
+ PrevDecl);
+ SemaRef.Context.setInstantiatedFromUsingShadowDecl(InstShadow, Shadow);
+
+ if (isFunctionScope)
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(Shadow, InstShadow);
+ }
+
+ return NewUD;
+}
+
+Decl *TemplateDeclInstantiator::VisitUsingShadowDecl(UsingShadowDecl *D) {
+ // Ignore these; we handle them in bulk when processing the UsingDecl.
+ return nullptr;
+}
+
+Decl * TemplateDeclInstantiator
+ ::VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D) {
+ NestedNameSpecifierLoc QualifierLoc
+ = SemaRef.SubstNestedNameSpecifierLoc(D->getQualifierLoc(),
+ TemplateArgs);
+ if (!QualifierLoc)
+ return nullptr;
+
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+
+ // Since NameInfo refers to a typename, it cannot be a C++ special name.
+ // Hence, no transformation is required for it.
+ DeclarationNameInfo NameInfo(D->getDeclName(), D->getLocation());
+ NamedDecl *UD =
+ SemaRef.BuildUsingDeclaration(/*Scope*/ nullptr, D->getAccess(),
+ D->getUsingLoc(), SS, NameInfo, nullptr,
+ /*instantiation*/ true,
+ /*typename*/ true, D->getTypenameLoc());
+ if (UD)
+ SemaRef.Context.setInstantiatedFromUsingDecl(cast<UsingDecl>(UD), D);
+
+ return UD;
+}
+
+Decl * TemplateDeclInstantiator
+ ::VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D) {
+ NestedNameSpecifierLoc QualifierLoc
+ = SemaRef.SubstNestedNameSpecifierLoc(D->getQualifierLoc(), TemplateArgs);
+ if (!QualifierLoc)
+ return nullptr;
+
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+
+ DeclarationNameInfo NameInfo
+ = SemaRef.SubstDeclarationNameInfo(D->getNameInfo(), TemplateArgs);
+
+ NamedDecl *UD =
+ SemaRef.BuildUsingDeclaration(/*Scope*/ nullptr, D->getAccess(),
+ D->getUsingLoc(), SS, NameInfo, nullptr,
+ /*instantiation*/ true,
+ /*typename*/ false, SourceLocation());
+ if (UD)
+ SemaRef.Context.setInstantiatedFromUsingDecl(cast<UsingDecl>(UD), D);
+
+ return UD;
+}
+
+
+Decl *TemplateDeclInstantiator::VisitClassScopeFunctionSpecializationDecl(
+ ClassScopeFunctionSpecializationDecl *Decl) {
+ CXXMethodDecl *OldFD = Decl->getSpecialization();
+ CXXMethodDecl *NewFD =
+ cast_or_null<CXXMethodDecl>(VisitCXXMethodDecl(OldFD, nullptr, true));
+ if (!NewFD)
+ return nullptr;
+
+ LookupResult Previous(SemaRef, NewFD->getNameInfo(), Sema::LookupOrdinaryName,
+ Sema::ForRedeclaration);
+
+ TemplateArgumentListInfo TemplateArgs;
+ TemplateArgumentListInfo *TemplateArgsPtr = nullptr;
+ if (Decl->hasExplicitTemplateArgs()) {
+ TemplateArgs = Decl->templateArgs();
+ TemplateArgsPtr = &TemplateArgs;
+ }
+
+ SemaRef.LookupQualifiedName(Previous, SemaRef.CurContext);
+ if (SemaRef.CheckFunctionTemplateSpecialization(NewFD, TemplateArgsPtr,
+ Previous)) {
+ NewFD->setInvalidDecl();
+ return NewFD;
+ }
+
+ // Associate the specialization with the pattern.
+ FunctionDecl *Specialization = cast<FunctionDecl>(Previous.getFoundDecl());
+ assert(Specialization && "Class scope Specialization is null");
+ SemaRef.Context.setClassScopeSpecializationPattern(Specialization, OldFD);
+
+ return NewFD;
+}
+
+Decl *TemplateDeclInstantiator::VisitOMPThreadPrivateDecl(
+ OMPThreadPrivateDecl *D) {
+ SmallVector<Expr *, 5> Vars;
+ for (auto *I : D->varlists()) {
+ Expr *Var = SemaRef.SubstExpr(I, TemplateArgs).get();
+ assert(isa<DeclRefExpr>(Var) && "threadprivate arg is not a DeclRefExpr");
+ Vars.push_back(Var);
+ }
+
+ OMPThreadPrivateDecl *TD =
+ SemaRef.CheckOMPThreadPrivateDecl(D->getLocation(), Vars);
+
+ TD->setAccess(AS_public);
+ Owner->addDecl(TD);
+
+ return TD;
+}
+
+Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D) {
+ return VisitFunctionDecl(D, nullptr);
+}
+
+Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D) {
+ return VisitCXXMethodDecl(D, nullptr);
+}
+
+Decl *TemplateDeclInstantiator::VisitRecordDecl(RecordDecl *D) {
+ llvm_unreachable("There are only CXXRecordDecls in C++");
+}
+
+Decl *
+TemplateDeclInstantiator::VisitClassTemplateSpecializationDecl(
+ ClassTemplateSpecializationDecl *D) {
+ // As a MS extension, we permit class-scope explicit specialization
+ // of member class templates.
+ ClassTemplateDecl *ClassTemplate = D->getSpecializedTemplate();
+ assert(ClassTemplate->getDeclContext()->isRecord() &&
+ D->getTemplateSpecializationKind() == TSK_ExplicitSpecialization &&
+ "can only instantiate an explicit specialization "
+ "for a member class template");
+
+ // Lookup the already-instantiated declaration in the instantiation
+ // of the class template. FIXME: Diagnose or assert if this fails?
+ DeclContext::lookup_result Found
+ = Owner->lookup(ClassTemplate->getDeclName());
+ if (Found.empty())
+ return nullptr;
+ ClassTemplateDecl *InstClassTemplate
+ = dyn_cast<ClassTemplateDecl>(Found.front());
+ if (!InstClassTemplate)
+ return nullptr;
+
+ // Substitute into the template arguments of the class template explicit
+ // specialization.
+ TemplateSpecializationTypeLoc Loc = D->getTypeAsWritten()->getTypeLoc().
+ castAs<TemplateSpecializationTypeLoc>();
+ TemplateArgumentListInfo InstTemplateArgs(Loc.getLAngleLoc(),
+ Loc.getRAngleLoc());
+ SmallVector<TemplateArgumentLoc, 4> ArgLocs;
+ for (unsigned I = 0; I != Loc.getNumArgs(); ++I)
+ ArgLocs.push_back(Loc.getArgLoc(I));
+ if (SemaRef.Subst(ArgLocs.data(), ArgLocs.size(),
+ InstTemplateArgs, TemplateArgs))
+ return nullptr;
+
+ // Check that the template argument list is well-formed for this
+ // class template.
+ SmallVector<TemplateArgument, 4> Converted;
+ if (SemaRef.CheckTemplateArgumentList(InstClassTemplate,
+ D->getLocation(),
+ InstTemplateArgs,
+ false,
+ Converted))
+ return nullptr;
+
+ // Figure out where to insert this class template explicit specialization
+ // in the member template's set of class template explicit specializations.
+ void *InsertPos = nullptr;
+ ClassTemplateSpecializationDecl *PrevDecl =
+ InstClassTemplate->findSpecialization(Converted, InsertPos);
+
+ // Check whether we've already seen a conflicting instantiation of this
+ // declaration (for instance, if there was a prior implicit instantiation).
+ bool Ignored;
+ if (PrevDecl &&
+ SemaRef.CheckSpecializationInstantiationRedecl(D->getLocation(),
+ D->getSpecializationKind(),
+ PrevDecl,
+ PrevDecl->getSpecializationKind(),
+ PrevDecl->getPointOfInstantiation(),
+ Ignored))
+ return nullptr;
+
+ // If PrevDecl was a definition and D is also a definition, diagnose.
+ // This happens in cases like:
+ //
+ // template<typename T, typename U>
+ // struct Outer {
+ // template<typename X> struct Inner;
+ // template<> struct Inner<T> {};
+ // template<> struct Inner<U> {};
+ // };
+ //
+ // Outer<int, int> outer; // error: the explicit specializations of Inner
+ // // have the same signature.
+ if (PrevDecl && PrevDecl->getDefinition() &&
+ D->isThisDeclarationADefinition()) {
+ SemaRef.Diag(D->getLocation(), diag::err_redefinition) << PrevDecl;
+ SemaRef.Diag(PrevDecl->getDefinition()->getLocation(),
+ diag::note_previous_definition);
+ return nullptr;
+ }
+
+ // Create the class template partial specialization declaration.
+ ClassTemplateSpecializationDecl *InstD
+ = ClassTemplateSpecializationDecl::Create(SemaRef.Context,
+ D->getTagKind(),
+ Owner,
+ D->getLocStart(),
+ D->getLocation(),
+ InstClassTemplate,
+ Converted.data(),
+ Converted.size(),
+ PrevDecl);
+
+ // Add this partial specialization to the set of class template partial
+ // specializations.
+ if (!PrevDecl)
+ InstClassTemplate->AddSpecialization(InstD, InsertPos);
+
+ // Substitute the nested name specifier, if any.
+ if (SubstQualifier(D, InstD))
+ return nullptr;
+
+ // Build the canonical type that describes the converted template
+ // arguments of the class template explicit specialization.
+ QualType CanonType = SemaRef.Context.getTemplateSpecializationType(
+ TemplateName(InstClassTemplate), Converted.data(), Converted.size(),
+ SemaRef.Context.getRecordType(InstD));
+
+ // Build the fully-sugared type for this class template
+ // specialization as the user wrote in the specialization
+ // itself. This means that we'll pretty-print the type retrieved
+ // from the specialization's declaration the way that the user
+ // actually wrote the specialization, rather than formatting the
+ // name based on the "canonical" representation used to store the
+ // template arguments in the specialization.
+ TypeSourceInfo *WrittenTy = SemaRef.Context.getTemplateSpecializationTypeInfo(
+ TemplateName(InstClassTemplate), D->getLocation(), InstTemplateArgs,
+ CanonType);
+
+ InstD->setAccess(D->getAccess());
+ InstD->setInstantiationOfMemberClass(D, TSK_ImplicitInstantiation);
+ InstD->setSpecializationKind(D->getSpecializationKind());
+ InstD->setTypeAsWritten(WrittenTy);
+ InstD->setExternLoc(D->getExternLoc());
+ InstD->setTemplateKeywordLoc(D->getTemplateKeywordLoc());
+
+ Owner->addDecl(InstD);
+
+ // Instantiate the members of the class-scope explicit specialization eagerly.
+ // We don't have support for lazy instantiation of an explicit specialization
+ // yet, and MSVC eagerly instantiates in this case.
+ if (D->isThisDeclarationADefinition() &&
+ SemaRef.InstantiateClass(D->getLocation(), InstD, D, TemplateArgs,
+ TSK_ImplicitInstantiation,
+ /*Complain=*/true))
+ return nullptr;
+
+ return InstD;
+}
+
+Decl *TemplateDeclInstantiator::VisitVarTemplateSpecializationDecl(
+ VarTemplateSpecializationDecl *D) {
+
+ TemplateArgumentListInfo VarTemplateArgsInfo;
+ VarTemplateDecl *VarTemplate = D->getSpecializedTemplate();
+ assert(VarTemplate &&
+ "A template specialization without specialized template?");
+
+ // Substitute the current template arguments.
+ const TemplateArgumentListInfo &TemplateArgsInfo = D->getTemplateArgsInfo();
+ VarTemplateArgsInfo.setLAngleLoc(TemplateArgsInfo.getLAngleLoc());
+ VarTemplateArgsInfo.setRAngleLoc(TemplateArgsInfo.getRAngleLoc());
+
+ if (SemaRef.Subst(TemplateArgsInfo.getArgumentArray(),
+ TemplateArgsInfo.size(), VarTemplateArgsInfo, TemplateArgs))
+ return nullptr;
+
+ // Check that the template argument list is well-formed for this template.
+ SmallVector<TemplateArgument, 4> Converted;
+ if (SemaRef.CheckTemplateArgumentList(
+ VarTemplate, VarTemplate->getLocStart(),
+ const_cast<TemplateArgumentListInfo &>(VarTemplateArgsInfo), false,
+ Converted))
+ return nullptr;
+
+ // Find the variable template specialization declaration that
+ // corresponds to these arguments.
+ void *InsertPos = nullptr;
+ if (VarTemplateSpecializationDecl *VarSpec = VarTemplate->findSpecialization(
+ Converted, InsertPos))
+ // If we already have a variable template specialization, return it.
+ return VarSpec;
+
+ return VisitVarTemplateSpecializationDecl(VarTemplate, D, InsertPos,
+ VarTemplateArgsInfo, Converted);
+}
+
+Decl *TemplateDeclInstantiator::VisitVarTemplateSpecializationDecl(
+ VarTemplateDecl *VarTemplate, VarDecl *D, void *InsertPos,
+ const TemplateArgumentListInfo &TemplateArgsInfo,
+ ArrayRef<TemplateArgument> Converted) {
+
+ // If this is the variable for an anonymous struct or union,
+ // instantiate the anonymous struct/union type first.
+ if (const RecordType *RecordTy = D->getType()->getAs<RecordType>())
+ if (RecordTy->getDecl()->isAnonymousStructOrUnion())
+ if (!VisitCXXRecordDecl(cast<CXXRecordDecl>(RecordTy->getDecl())))
+ return nullptr;
+
+ // Do substitution on the type of the declaration
+ TypeSourceInfo *DI =
+ SemaRef.SubstType(D->getTypeSourceInfo(), TemplateArgs,
+ D->getTypeSpecStartLoc(), D->getDeclName());
+ if (!DI)
+ return nullptr;
+
+ if (DI->getType()->isFunctionType()) {
+ SemaRef.Diag(D->getLocation(), diag::err_variable_instantiates_to_function)
+ << D->isStaticDataMember() << DI->getType();
+ return nullptr;
+ }
+
+ // Build the instantiated declaration
+ VarTemplateSpecializationDecl *Var = VarTemplateSpecializationDecl::Create(
+ SemaRef.Context, Owner, D->getInnerLocStart(), D->getLocation(),
+ VarTemplate, DI->getType(), DI, D->getStorageClass(), Converted.data(),
+ Converted.size());
+ Var->setTemplateArgsInfo(TemplateArgsInfo);
+ if (InsertPos)
+ VarTemplate->AddSpecialization(Var, InsertPos);
+
+ // Substitute the nested name specifier, if any.
+ if (SubstQualifier(D, Var))
+ return nullptr;
+
+ SemaRef.BuildVariableInstantiation(Var, D, TemplateArgs, LateAttrs,
+ Owner, StartingScope);
+
+ return Var;
+}
+
+Decl *TemplateDeclInstantiator::VisitObjCAtDefsFieldDecl(ObjCAtDefsFieldDecl *D) {
+ llvm_unreachable("@defs is not supported in Objective-C++");
+}
+
+Decl *TemplateDeclInstantiator::VisitFriendTemplateDecl(FriendTemplateDecl *D) {
+ // FIXME: We need to be able to instantiate FriendTemplateDecls.
+ unsigned DiagID = SemaRef.getDiagnostics().getCustomDiagID(
+ DiagnosticsEngine::Error,
+ "cannot instantiate %0 yet");
+ SemaRef.Diag(D->getLocation(), DiagID)
+ << D->getDeclKindName();
+
+ return nullptr;
+}
+
+Decl *TemplateDeclInstantiator::VisitDecl(Decl *D) {
+ llvm_unreachable("Unexpected decl");
+}
+
+Decl *Sema::SubstDecl(Decl *D, DeclContext *Owner,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+ TemplateDeclInstantiator Instantiator(*this, Owner, TemplateArgs);
+ if (D->isInvalidDecl())
+ return nullptr;
+
+ return Instantiator.Visit(D);
+}
+
+/// \brief Instantiates a nested template parameter list in the current
+/// instantiation context.
+///
+/// \param L The parameter list to instantiate
+///
+/// \returns NULL if there was an error
+TemplateParameterList *
+TemplateDeclInstantiator::SubstTemplateParams(TemplateParameterList *L) {
+ // Get errors for all the parameters before bailing out.
+ bool Invalid = false;
+
+ unsigned N = L->size();
+ typedef SmallVector<NamedDecl *, 8> ParamVector;
+ ParamVector Params;
+ Params.reserve(N);
+ for (auto &P : *L) {
+ NamedDecl *D = cast_or_null<NamedDecl>(Visit(P));
+ Params.push_back(D);
+ Invalid = Invalid || !D || D->isInvalidDecl();
+ }
+
+ // Clean up if we had an error.
+ if (Invalid)
+ return nullptr;
+
+ TemplateParameterList *InstL
+ = TemplateParameterList::Create(SemaRef.Context, L->getTemplateLoc(),
+ L->getLAngleLoc(), Params,
+ L->getRAngleLoc());
+ return InstL;
+}
+
+/// \brief Instantiate the declaration of a class template partial
+/// specialization.
+///
+/// \param ClassTemplate the (instantiated) class template that is partially
+// specialized by the instantiation of \p PartialSpec.
+///
+/// \param PartialSpec the (uninstantiated) class template partial
+/// specialization that we are instantiating.
+///
+/// \returns The instantiated partial specialization, if successful; otherwise,
+/// NULL to indicate an error.
+ClassTemplatePartialSpecializationDecl *
+TemplateDeclInstantiator::InstantiateClassTemplatePartialSpecialization(
+ ClassTemplateDecl *ClassTemplate,
+ ClassTemplatePartialSpecializationDecl *PartialSpec) {
+ // Create a local instantiation scope for this class template partial
+ // specialization, which will contain the instantiations of the template
+ // parameters.
+ LocalInstantiationScope Scope(SemaRef);
+
+ // Substitute into the template parameters of the class template partial
+ // specialization.
+ TemplateParameterList *TempParams = PartialSpec->getTemplateParameters();
+ TemplateParameterList *InstParams = SubstTemplateParams(TempParams);
+ if (!InstParams)
+ return nullptr;
+
+ // Substitute into the template arguments of the class template partial
+ // specialization.
+ const ASTTemplateArgumentListInfo *TemplArgInfo
+ = PartialSpec->getTemplateArgsAsWritten();
+ TemplateArgumentListInfo InstTemplateArgs(TemplArgInfo->LAngleLoc,
+ TemplArgInfo->RAngleLoc);
+ if (SemaRef.Subst(TemplArgInfo->getTemplateArgs(),
+ TemplArgInfo->NumTemplateArgs,
+ InstTemplateArgs, TemplateArgs))
+ return nullptr;
+
+ // Check that the template argument list is well-formed for this
+ // class template.
+ SmallVector<TemplateArgument, 4> Converted;
+ if (SemaRef.CheckTemplateArgumentList(ClassTemplate,
+ PartialSpec->getLocation(),
+ InstTemplateArgs,
+ false,
+ Converted))
+ return nullptr;
+
+ // Figure out where to insert this class template partial specialization
+ // in the member template's set of class template partial specializations.
+ void *InsertPos = nullptr;
+ ClassTemplateSpecializationDecl *PrevDecl
+ = ClassTemplate->findPartialSpecialization(Converted, InsertPos);
+
+ // Build the canonical type that describes the converted template
+ // arguments of the class template partial specialization.
+ QualType CanonType
+ = SemaRef.Context.getTemplateSpecializationType(TemplateName(ClassTemplate),
+ Converted.data(),
+ Converted.size());
+
+ // Build the fully-sugared type for this class template
+ // specialization as the user wrote in the specialization
+ // itself. This means that we'll pretty-print the type retrieved
+ // from the specialization's declaration the way that the user
+ // actually wrote the specialization, rather than formatting the
+ // name based on the "canonical" representation used to store the
+ // template arguments in the specialization.
+ TypeSourceInfo *WrittenTy
+ = SemaRef.Context.getTemplateSpecializationTypeInfo(
+ TemplateName(ClassTemplate),
+ PartialSpec->getLocation(),
+ InstTemplateArgs,
+ CanonType);
+
+ if (PrevDecl) {
+ // We've already seen a partial specialization with the same template
+ // parameters and template arguments. This can happen, for example, when
+ // substituting the outer template arguments ends up causing two
+ // class template partial specializations of a member class template
+ // to have identical forms, e.g.,
+ //
+ // template<typename T, typename U>
+ // struct Outer {
+ // template<typename X, typename Y> struct Inner;
+ // template<typename Y> struct Inner<T, Y>;
+ // template<typename Y> struct Inner<U, Y>;
+ // };
+ //
+ // Outer<int, int> outer; // error: the partial specializations of Inner
+ // // have the same signature.
+ SemaRef.Diag(PartialSpec->getLocation(), diag::err_partial_spec_redeclared)
+ << WrittenTy->getType();
+ SemaRef.Diag(PrevDecl->getLocation(), diag::note_prev_partial_spec_here)
+ << SemaRef.Context.getTypeDeclType(PrevDecl);
+ return nullptr;
+ }
+
+
+ // Create the class template partial specialization declaration.
+ ClassTemplatePartialSpecializationDecl *InstPartialSpec
+ = ClassTemplatePartialSpecializationDecl::Create(SemaRef.Context,
+ PartialSpec->getTagKind(),
+ Owner,
+ PartialSpec->getLocStart(),
+ PartialSpec->getLocation(),
+ InstParams,
+ ClassTemplate,
+ Converted.data(),
+ Converted.size(),
+ InstTemplateArgs,
+ CanonType,
+ nullptr);
+ // Substitute the nested name specifier, if any.
+ if (SubstQualifier(PartialSpec, InstPartialSpec))
+ return nullptr;
+
+ InstPartialSpec->setInstantiatedFromMember(PartialSpec);
+ InstPartialSpec->setTypeAsWritten(WrittenTy);
+
+ // Add this partial specialization to the set of class template partial
+ // specializations.
+ ClassTemplate->AddPartialSpecialization(InstPartialSpec,
+ /*InsertPos=*/nullptr);
+ return InstPartialSpec;
+}
+
+/// \brief Instantiate the declaration of a variable template partial
+/// specialization.
+///
+/// \param VarTemplate the (instantiated) variable template that is partially
+/// specialized by the instantiation of \p PartialSpec.
+///
+/// \param PartialSpec the (uninstantiated) variable template partial
+/// specialization that we are instantiating.
+///
+/// \returns The instantiated partial specialization, if successful; otherwise,
+/// NULL to indicate an error.
+VarTemplatePartialSpecializationDecl *
+TemplateDeclInstantiator::InstantiateVarTemplatePartialSpecialization(
+ VarTemplateDecl *VarTemplate,
+ VarTemplatePartialSpecializationDecl *PartialSpec) {
+ // Create a local instantiation scope for this variable template partial
+ // specialization, which will contain the instantiations of the template
+ // parameters.
+ LocalInstantiationScope Scope(SemaRef);
+
+ // Substitute into the template parameters of the variable template partial
+ // specialization.
+ TemplateParameterList *TempParams = PartialSpec->getTemplateParameters();
+ TemplateParameterList *InstParams = SubstTemplateParams(TempParams);
+ if (!InstParams)
+ return nullptr;
+
+ // Substitute into the template arguments of the variable template partial
+ // specialization.
+ const ASTTemplateArgumentListInfo *TemplArgInfo
+ = PartialSpec->getTemplateArgsAsWritten();
+ TemplateArgumentListInfo InstTemplateArgs(TemplArgInfo->LAngleLoc,
+ TemplArgInfo->RAngleLoc);
+ if (SemaRef.Subst(TemplArgInfo->getTemplateArgs(),
+ TemplArgInfo->NumTemplateArgs,
+ InstTemplateArgs, TemplateArgs))
+ return nullptr;
+
+ // Check that the template argument list is well-formed for this
+ // class template.
+ SmallVector<TemplateArgument, 4> Converted;
+ if (SemaRef.CheckTemplateArgumentList(VarTemplate, PartialSpec->getLocation(),
+ InstTemplateArgs, false, Converted))
+ return nullptr;
+
+ // Figure out where to insert this variable template partial specialization
+ // in the member template's set of variable template partial specializations.
+ void *InsertPos = nullptr;
+ VarTemplateSpecializationDecl *PrevDecl =
+ VarTemplate->findPartialSpecialization(Converted, InsertPos);
+
+ // Build the canonical type that describes the converted template
+ // arguments of the variable template partial specialization.
+ QualType CanonType = SemaRef.Context.getTemplateSpecializationType(
+ TemplateName(VarTemplate), Converted.data(), Converted.size());
+
+ // Build the fully-sugared type for this variable template
+ // specialization as the user wrote in the specialization
+ // itself. This means that we'll pretty-print the type retrieved
+ // from the specialization's declaration the way that the user
+ // actually wrote the specialization, rather than formatting the
+ // name based on the "canonical" representation used to store the
+ // template arguments in the specialization.
+ TypeSourceInfo *WrittenTy = SemaRef.Context.getTemplateSpecializationTypeInfo(
+ TemplateName(VarTemplate), PartialSpec->getLocation(), InstTemplateArgs,
+ CanonType);
+
+ if (PrevDecl) {
+ // We've already seen a partial specialization with the same template
+ // parameters and template arguments. This can happen, for example, when
+ // substituting the outer template arguments ends up causing two
+ // variable template partial specializations of a member variable template
+ // to have identical forms, e.g.,
+ //
+ // template<typename T, typename U>
+ // struct Outer {
+ // template<typename X, typename Y> pair<X,Y> p;
+ // template<typename Y> pair<T, Y> p;
+ // template<typename Y> pair<U, Y> p;
+ // };
+ //
+ // Outer<int, int> outer; // error: the partial specializations of Inner
+ // // have the same signature.
+ SemaRef.Diag(PartialSpec->getLocation(),
+ diag::err_var_partial_spec_redeclared)
+ << WrittenTy->getType();
+ SemaRef.Diag(PrevDecl->getLocation(),
+ diag::note_var_prev_partial_spec_here);
+ return nullptr;
+ }
+
+ // Do substitution on the type of the declaration
+ TypeSourceInfo *DI = SemaRef.SubstType(
+ PartialSpec->getTypeSourceInfo(), TemplateArgs,
+ PartialSpec->getTypeSpecStartLoc(), PartialSpec->getDeclName());
+ if (!DI)
+ return nullptr;
+
+ if (DI->getType()->isFunctionType()) {
+ SemaRef.Diag(PartialSpec->getLocation(),
+ diag::err_variable_instantiates_to_function)
+ << PartialSpec->isStaticDataMember() << DI->getType();
+ return nullptr;
+ }
+
+ // Create the variable template partial specialization declaration.
+ VarTemplatePartialSpecializationDecl *InstPartialSpec =
+ VarTemplatePartialSpecializationDecl::Create(
+ SemaRef.Context, Owner, PartialSpec->getInnerLocStart(),
+ PartialSpec->getLocation(), InstParams, VarTemplate, DI->getType(),
+ DI, PartialSpec->getStorageClass(), Converted.data(),
+ Converted.size(), InstTemplateArgs);
+
+ // Substitute the nested name specifier, if any.
+ if (SubstQualifier(PartialSpec, InstPartialSpec))
+ return nullptr;
+
+ InstPartialSpec->setInstantiatedFromMember(PartialSpec);
+ InstPartialSpec->setTypeAsWritten(WrittenTy);
+
+ // Add this partial specialization to the set of variable template partial
+ // specializations. The instantiation of the initializer is not necessary.
+ VarTemplate->AddPartialSpecialization(InstPartialSpec, /*InsertPos=*/nullptr);
+
+ SemaRef.BuildVariableInstantiation(InstPartialSpec, PartialSpec, TemplateArgs,
+ LateAttrs, Owner, StartingScope);
+
+ return InstPartialSpec;
+}
+
+TypeSourceInfo*
+TemplateDeclInstantiator::SubstFunctionType(FunctionDecl *D,
+ SmallVectorImpl<ParmVarDecl *> &Params) {
+ TypeSourceInfo *OldTInfo = D->getTypeSourceInfo();
+ assert(OldTInfo && "substituting function without type source info");
+ assert(Params.empty() && "parameter vector is non-empty at start");
+
+ CXXRecordDecl *ThisContext = nullptr;
+ unsigned ThisTypeQuals = 0;
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ ThisContext = cast<CXXRecordDecl>(Owner);
+ ThisTypeQuals = Method->getTypeQualifiers();
+ }
+
+ TypeSourceInfo *NewTInfo
+ = SemaRef.SubstFunctionDeclType(OldTInfo, TemplateArgs,
+ D->getTypeSpecStartLoc(),
+ D->getDeclName(),
+ ThisContext, ThisTypeQuals);
+ if (!NewTInfo)
+ return nullptr;
+
+ TypeLoc OldTL = OldTInfo->getTypeLoc().IgnoreParens();
+ if (FunctionProtoTypeLoc OldProtoLoc = OldTL.getAs<FunctionProtoTypeLoc>()) {
+ if (NewTInfo != OldTInfo) {
+ // Get parameters from the new type info.
+ TypeLoc NewTL = NewTInfo->getTypeLoc().IgnoreParens();
+ FunctionProtoTypeLoc NewProtoLoc = NewTL.castAs<FunctionProtoTypeLoc>();
+ unsigned NewIdx = 0;
+ for (unsigned OldIdx = 0, NumOldParams = OldProtoLoc.getNumParams();
+ OldIdx != NumOldParams; ++OldIdx) {
+ ParmVarDecl *OldParam = OldProtoLoc.getParam(OldIdx);
+ LocalInstantiationScope *Scope = SemaRef.CurrentInstantiationScope;
+
+ Optional<unsigned> NumArgumentsInExpansion;
+ if (OldParam->isParameterPack())
+ NumArgumentsInExpansion =
+ SemaRef.getNumArgumentsInExpansion(OldParam->getType(),
+ TemplateArgs);
+ if (!NumArgumentsInExpansion) {
+ // Simple case: normal parameter, or a parameter pack that's
+ // instantiated to a (still-dependent) parameter pack.
+ ParmVarDecl *NewParam = NewProtoLoc.getParam(NewIdx++);
+ Params.push_back(NewParam);
+ Scope->InstantiatedLocal(OldParam, NewParam);
+ } else {
+ // Parameter pack expansion: make the instantiation an argument pack.
+ Scope->MakeInstantiatedLocalArgPack(OldParam);
+ for (unsigned I = 0; I != *NumArgumentsInExpansion; ++I) {
+ ParmVarDecl *NewParam = NewProtoLoc.getParam(NewIdx++);
+ Params.push_back(NewParam);
+ Scope->InstantiatedLocalPackArg(OldParam, NewParam);
+ }
+ }
+ }
+ } else {
+ // The function type itself was not dependent and therefore no
+ // substitution occurred. However, we still need to instantiate
+ // the function parameters themselves.
+ const FunctionProtoType *OldProto =
+ cast<FunctionProtoType>(OldProtoLoc.getType());
+ for (unsigned i = 0, i_end = OldProtoLoc.getNumParams(); i != i_end;
+ ++i) {
+ ParmVarDecl *OldParam = OldProtoLoc.getParam(i);
+ if (!OldParam) {
+ Params.push_back(SemaRef.BuildParmVarDeclForTypedef(
+ D, D->getLocation(), OldProto->getParamType(i)));
+ continue;
+ }
+
+ ParmVarDecl *Parm =
+ cast_or_null<ParmVarDecl>(VisitParmVarDecl(OldParam));
+ if (!Parm)
+ return nullptr;
+ Params.push_back(Parm);
+ }
+ }
+ } else {
+ // If the type of this function, after ignoring parentheses, is not
+ // *directly* a function type, then we're instantiating a function that
+ // was declared via a typedef or with attributes, e.g.,
+ //
+ // typedef int functype(int, int);
+ // functype func;
+ // int __cdecl meth(int, int);
+ //
+ // In this case, we'll just go instantiate the ParmVarDecls that we
+ // synthesized in the method declaration.
+ SmallVector<QualType, 4> ParamTypes;
+ if (SemaRef.SubstParmTypes(D->getLocation(), D->param_begin(),
+ D->getNumParams(), TemplateArgs, ParamTypes,
+ &Params))
+ return nullptr;
+ }
+
+ return NewTInfo;
+}
+
+/// Introduce the instantiated function parameters into the local
+/// instantiation scope, and set the parameter names to those used
+/// in the template.
+static bool addInstantiatedParametersToScope(Sema &S, FunctionDecl *Function,
+ const FunctionDecl *PatternDecl,
+ LocalInstantiationScope &Scope,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+ unsigned FParamIdx = 0;
+ for (unsigned I = 0, N = PatternDecl->getNumParams(); I != N; ++I) {
+ const ParmVarDecl *PatternParam = PatternDecl->getParamDecl(I);
+ if (!PatternParam->isParameterPack()) {
+ // Simple case: not a parameter pack.
+ assert(FParamIdx < Function->getNumParams());
+ ParmVarDecl *FunctionParam = Function->getParamDecl(FParamIdx);
+ FunctionParam->setDeclName(PatternParam->getDeclName());
+ // If the parameter's type is not dependent, update it to match the type
+ // in the pattern. They can differ in top-level cv-qualifiers, and we want
+ // the pattern's type here. If the type is dependent, they can't differ,
+ // per core issue 1668. Substitute into the type from the pattern, in case
+ // it's instantiation-dependent.
+ // FIXME: Updating the type to work around this is at best fragile.
+ if (!PatternDecl->getType()->isDependentType()) {
+ QualType T = S.SubstType(PatternParam->getType(), TemplateArgs,
+ FunctionParam->getLocation(),
+ FunctionParam->getDeclName());
+ if (T.isNull())
+ return true;
+ FunctionParam->setType(T);
+ }
+
+ Scope.InstantiatedLocal(PatternParam, FunctionParam);
+ ++FParamIdx;
+ continue;
+ }
+
+ // Expand the parameter pack.
+ Scope.MakeInstantiatedLocalArgPack(PatternParam);
+ Optional<unsigned> NumArgumentsInExpansion
+ = S.getNumArgumentsInExpansion(PatternParam->getType(), TemplateArgs);
+ assert(NumArgumentsInExpansion &&
+ "should only be called when all template arguments are known");
+ QualType PatternType =
+ PatternParam->getType()->castAs<PackExpansionType>()->getPattern();
+ for (unsigned Arg = 0; Arg < *NumArgumentsInExpansion; ++Arg) {
+ ParmVarDecl *FunctionParam = Function->getParamDecl(FParamIdx);
+ FunctionParam->setDeclName(PatternParam->getDeclName());
+ if (!PatternDecl->getType()->isDependentType()) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(S, Arg);
+ QualType T = S.SubstType(PatternType, TemplateArgs,
+ FunctionParam->getLocation(),
+ FunctionParam->getDeclName());
+ if (T.isNull())
+ return true;
+ FunctionParam->setType(T);
+ }
+
+ Scope.InstantiatedLocalPackArg(PatternParam, FunctionParam);
+ ++FParamIdx;
+ }
+ }
+
+ return false;
+}
+
+void Sema::InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
+ FunctionDecl *Decl) {
+ const FunctionProtoType *Proto = Decl->getType()->castAs<FunctionProtoType>();
+ if (Proto->getExceptionSpecType() != EST_Uninstantiated)
+ return;
+
+ InstantiatingTemplate Inst(*this, PointOfInstantiation, Decl,
+ InstantiatingTemplate::ExceptionSpecification());
+ if (Inst.isInvalid()) {
+ // We hit the instantiation depth limit. Clear the exception specification
+ // so that our callers don't have to cope with EST_Uninstantiated.
+ UpdateExceptionSpec(Decl, EST_None);
+ return;
+ }
+
+ // Enter the scope of this instantiation. We don't use
+ // PushDeclContext because we don't have a scope.
+ Sema::ContextRAII savedContext(*this, Decl);
+ LocalInstantiationScope Scope(*this);
+
+ MultiLevelTemplateArgumentList TemplateArgs =
+ getTemplateInstantiationArgs(Decl, nullptr, /*RelativeToPrimary*/true);
+
+ FunctionDecl *Template = Proto->getExceptionSpecTemplate();
+ if (addInstantiatedParametersToScope(*this, Decl, Template, Scope,
+ TemplateArgs)) {
+ UpdateExceptionSpec(Decl, EST_None);
+ return;
+ }
+
+ SubstExceptionSpec(Decl, Template->getType()->castAs<FunctionProtoType>(),
+ TemplateArgs);
+}
+
+/// \brief Initializes the common fields of an instantiation function
+/// declaration (New) from the corresponding fields of its template (Tmpl).
+///
+/// \returns true if there was an error
+bool
+TemplateDeclInstantiator::InitFunctionInstantiation(FunctionDecl *New,
+ FunctionDecl *Tmpl) {
+ if (Tmpl->isDeleted())
+ New->setDeletedAsWritten();
+
+ // Forward the mangling number from the template to the instantiated decl.
+ SemaRef.Context.setManglingNumber(New,
+ SemaRef.Context.getManglingNumber(Tmpl));
+
+ // If we are performing substituting explicitly-specified template arguments
+ // or deduced template arguments into a function template and we reach this
+ // point, we are now past the point where SFINAE applies and have committed
+ // to keeping the new function template specialization. We therefore
+ // convert the active template instantiation for the function template
+ // into a template instantiation for this specific function template
+ // specialization, which is not a SFINAE context, so that we diagnose any
+ // further errors in the declaration itself.
+ typedef Sema::ActiveTemplateInstantiation ActiveInstType;
+ ActiveInstType &ActiveInst = SemaRef.ActiveTemplateInstantiations.back();
+ if (ActiveInst.Kind == ActiveInstType::ExplicitTemplateArgumentSubstitution ||
+ ActiveInst.Kind == ActiveInstType::DeducedTemplateArgumentSubstitution) {
+ if (FunctionTemplateDecl *FunTmpl
+ = dyn_cast<FunctionTemplateDecl>(ActiveInst.Entity)) {
+ assert(FunTmpl->getTemplatedDecl() == Tmpl &&
+ "Deduction from the wrong function template?");
+ (void) FunTmpl;
+ ActiveInst.Kind = ActiveInstType::TemplateInstantiation;
+ ActiveInst.Entity = New;
+ }
+ }
+
+ const FunctionProtoType *Proto = Tmpl->getType()->getAs<FunctionProtoType>();
+ assert(Proto && "Function template without prototype?");
+
+ if (Proto->hasExceptionSpec() || Proto->getNoReturnAttr()) {
+ FunctionProtoType::ExtProtoInfo EPI = Proto->getExtProtoInfo();
+
+ // DR1330: In C++11, defer instantiation of a non-trivial
+ // exception specification.
+ // DR1484: Local classes and their members are instantiated along with the
+ // containing function.
+ if (SemaRef.getLangOpts().CPlusPlus11 &&
+ EPI.ExceptionSpec.Type != EST_None &&
+ EPI.ExceptionSpec.Type != EST_DynamicNone &&
+ EPI.ExceptionSpec.Type != EST_BasicNoexcept &&
+ !Tmpl->isLexicallyWithinFunctionOrMethod()) {
+ FunctionDecl *ExceptionSpecTemplate = Tmpl;
+ if (EPI.ExceptionSpec.Type == EST_Uninstantiated)
+ ExceptionSpecTemplate = EPI.ExceptionSpec.SourceTemplate;
+ ExceptionSpecificationType NewEST = EST_Uninstantiated;
+ if (EPI.ExceptionSpec.Type == EST_Unevaluated)
+ NewEST = EST_Unevaluated;
+
+ // Mark the function has having an uninstantiated exception specification.
+ const FunctionProtoType *NewProto
+ = New->getType()->getAs<FunctionProtoType>();
+ assert(NewProto && "Template instantiation without function prototype?");
+ EPI = NewProto->getExtProtoInfo();
+ EPI.ExceptionSpec.Type = NewEST;
+ EPI.ExceptionSpec.SourceDecl = New;
+ EPI.ExceptionSpec.SourceTemplate = ExceptionSpecTemplate;
+ New->setType(SemaRef.Context.getFunctionType(
+ NewProto->getReturnType(), NewProto->getParamTypes(), EPI));
+ } else {
+ SemaRef.SubstExceptionSpec(New, Proto, TemplateArgs);
+ }
+ }
+
+ // Get the definition. Leaves the variable unchanged if undefined.
+ const FunctionDecl *Definition = Tmpl;
+ Tmpl->isDefined(Definition);
+
+ SemaRef.InstantiateAttrs(TemplateArgs, Definition, New,
+ LateAttrs, StartingScope);
+
+ return false;
+}
+
+/// \brief Initializes common fields of an instantiated method
+/// declaration (New) from the corresponding fields of its template
+/// (Tmpl).
+///
+/// \returns true if there was an error
+bool
+TemplateDeclInstantiator::InitMethodInstantiation(CXXMethodDecl *New,
+ CXXMethodDecl *Tmpl) {
+ if (InitFunctionInstantiation(New, Tmpl))
+ return true;
+
+ New->setAccess(Tmpl->getAccess());
+ if (Tmpl->isVirtualAsWritten())
+ New->setVirtualAsWritten(true);
+
+ // FIXME: New needs a pointer to Tmpl
+ return false;
+}
+
+/// \brief Instantiate the definition of the given function from its
+/// template.
+///
+/// \param PointOfInstantiation the point at which the instantiation was
+/// required. Note that this is not precisely a "point of instantiation"
+/// for the function, but it's close.
+///
+/// \param Function the already-instantiated declaration of a
+/// function template specialization or member function of a class template
+/// specialization.
+///
+/// \param Recursive if true, recursively instantiates any functions that
+/// are required by this instantiation.
+///
+/// \param DefinitionRequired if true, then we are performing an explicit
+/// instantiation where the body of the function is required. Complain if
+/// there is no such body.
+void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
+ FunctionDecl *Function,
+ bool Recursive,
+ bool DefinitionRequired) {
+ if (Function->isInvalidDecl() || Function->isDefined())
+ return;
+
+ // Never instantiate an explicit specialization except if it is a class scope
+ // explicit specialization.
+ if (Function->getTemplateSpecializationKind() == TSK_ExplicitSpecialization &&
+ !Function->getClassScopeSpecializationPattern())
+ return;
+
+ // Find the function body that we'll be substituting.
+ const FunctionDecl *PatternDecl = Function->getTemplateInstantiationPattern();
+ assert(PatternDecl && "instantiating a non-template");
+
+ Stmt *Pattern = PatternDecl->getBody(PatternDecl);
+ assert(PatternDecl && "template definition is not a template");
+ if (!Pattern) {
+ // Try to find a defaulted definition
+ PatternDecl->isDefined(PatternDecl);
+ }
+ assert(PatternDecl && "template definition is not a template");
+
+ // Postpone late parsed template instantiations.
+ if (PatternDecl->isLateTemplateParsed() &&
+ !LateTemplateParser) {
+ PendingInstantiations.push_back(
+ std::make_pair(Function, PointOfInstantiation));
+ return;
+ }
+
+ // If we're performing recursive template instantiation, create our own
+ // queue of pending implicit instantiations that we will instantiate later,
+ // while we're still within our own instantiation context.
+ // This has to happen before LateTemplateParser below is called, so that
+ // it marks vtables used in late parsed templates as used.
+ SavePendingLocalImplicitInstantiationsRAII
+ SavedPendingLocalImplicitInstantiations(*this);
+ SavePendingInstantiationsAndVTableUsesRAII
+ SavePendingInstantiationsAndVTableUses(*this, /*Enabled=*/Recursive);
+
+ // Call the LateTemplateParser callback if there is a need to late parse
+ // a templated function definition.
+ if (!Pattern && PatternDecl->isLateTemplateParsed() &&
+ LateTemplateParser) {
+ // FIXME: Optimize to allow individual templates to be deserialized.
+ if (PatternDecl->isFromASTFile())
+ ExternalSource->ReadLateParsedTemplates(LateParsedTemplateMap);
+
+ LateParsedTemplate *LPT = LateParsedTemplateMap.lookup(PatternDecl);
+ assert(LPT && "missing LateParsedTemplate");
+ LateTemplateParser(OpaqueParser, *LPT);
+ Pattern = PatternDecl->getBody(PatternDecl);
+ }
+
+ if (!Pattern && !PatternDecl->isDefaulted()) {
+ if (DefinitionRequired) {
+ if (Function->getPrimaryTemplate())
+ Diag(PointOfInstantiation,
+ diag::err_explicit_instantiation_undefined_func_template)
+ << Function->getPrimaryTemplate();
+ else
+ Diag(PointOfInstantiation,
+ diag::err_explicit_instantiation_undefined_member)
+ << 1 << Function->getDeclName() << Function->getDeclContext();
+
+ if (PatternDecl)
+ Diag(PatternDecl->getLocation(),
+ diag::note_explicit_instantiation_here);
+ Function->setInvalidDecl();
+ } else if (Function->getTemplateSpecializationKind()
+ == TSK_ExplicitInstantiationDefinition) {
+ assert(!Recursive);
+ PendingInstantiations.push_back(
+ std::make_pair(Function, PointOfInstantiation));
+ }
+
+ return;
+ }
+
+ // C++1y [temp.explicit]p10:
+ // Except for inline functions, declarations with types deduced from their
+ // initializer or return value, and class template specializations, other
+ // explicit instantiation declarations have the effect of suppressing the
+ // implicit instantiation of the entity to which they refer.
+ if (Function->getTemplateSpecializationKind() ==
+ TSK_ExplicitInstantiationDeclaration &&
+ !PatternDecl->isInlined() &&
+ !PatternDecl->getReturnType()->getContainedAutoType())
+ return;
+
+ if (PatternDecl->isInlined()) {
+ // Function, and all later redeclarations of it (from imported modules,
+ // for instance), are now implicitly inline.
+ for (auto *D = Function->getMostRecentDecl(); /**/;
+ D = D->getPreviousDecl()) {
+ D->setImplicitlyInline();
+ if (D == Function)
+ break;
+ }
+ }
+
+ InstantiatingTemplate Inst(*this, PointOfInstantiation, Function);
+ if (Inst.isInvalid())
+ return;
+
+ // Copy the inner loc start from the pattern.
+ Function->setInnerLocStart(PatternDecl->getInnerLocStart());
+
+ EnterExpressionEvaluationContext EvalContext(*this,
+ Sema::PotentiallyEvaluated);
+
+ // Introduce a new scope where local variable instantiations will be
+ // recorded, unless we're actually a member function within a local
+ // class, in which case we need to merge our results with the parent
+ // scope (of the enclosing function).
+ bool MergeWithParentScope = false;
+ if (CXXRecordDecl *Rec = dyn_cast<CXXRecordDecl>(Function->getDeclContext()))
+ MergeWithParentScope = Rec->isLocalClass();
+
+ LocalInstantiationScope Scope(*this, MergeWithParentScope);
+
+ if (PatternDecl->isDefaulted())
+ SetDeclDefaulted(Function, PatternDecl->getLocation());
+ else {
+ MultiLevelTemplateArgumentList TemplateArgs =
+ getTemplateInstantiationArgs(Function, nullptr, false, PatternDecl);
+
+ // Substitute into the qualifier; we can get a substitution failure here
+ // through evil use of alias templates.
+ // FIXME: Is CurContext correct for this? Should we go to the (instantiation
+ // of the) lexical context of the pattern?
+ SubstQualifier(*this, PatternDecl, Function, TemplateArgs);
+
+ ActOnStartOfFunctionDef(nullptr, Function);
+
+ // Enter the scope of this instantiation. We don't use
+ // PushDeclContext because we don't have a scope.
+ Sema::ContextRAII savedContext(*this, Function);
+
+ if (addInstantiatedParametersToScope(*this, Function, PatternDecl, Scope,
+ TemplateArgs))
+ return;
+
+ // If this is a constructor, instantiate the member initializers.
+ if (const CXXConstructorDecl *Ctor =
+ dyn_cast<CXXConstructorDecl>(PatternDecl)) {
+ InstantiateMemInitializers(cast<CXXConstructorDecl>(Function), Ctor,
+ TemplateArgs);
+ }
+
+ // Instantiate the function body.
+ StmtResult Body = SubstStmt(Pattern, TemplateArgs);
+
+ if (Body.isInvalid())
+ Function->setInvalidDecl();
+
+ ActOnFinishFunctionBody(Function, Body.get(),
+ /*IsInstantiation=*/true);
+
+ PerformDependentDiagnostics(PatternDecl, TemplateArgs);
+
+ if (auto *Listener = getASTMutationListener())
+ Listener->FunctionDefinitionInstantiated(Function);
+
+ savedContext.pop();
+ }
+
+ DeclGroupRef DG(Function);
+ Consumer.HandleTopLevelDecl(DG);
+
+ // This class may have local implicit instantiations that need to be
+ // instantiation within this scope.
+ PerformPendingInstantiations(/*LocalOnly=*/true);
+ Scope.Exit();
+
+ if (Recursive) {
+ // Define any pending vtables.
+ DefineUsedVTables();
+
+ // Instantiate any pending implicit instantiations found during the
+ // instantiation of this template.
+ PerformPendingInstantiations();
+
+ // PendingInstantiations and VTableUses are restored through
+ // SavePendingInstantiationsAndVTableUses's destructor.
+ }
+}
+
+VarTemplateSpecializationDecl *Sema::BuildVarTemplateInstantiation(
+ VarTemplateDecl *VarTemplate, VarDecl *FromVar,
+ const TemplateArgumentList &TemplateArgList,
+ const TemplateArgumentListInfo &TemplateArgsInfo,
+ SmallVectorImpl<TemplateArgument> &Converted,
+ SourceLocation PointOfInstantiation, void *InsertPos,
+ LateInstantiatedAttrVec *LateAttrs,
+ LocalInstantiationScope *StartingScope) {
+ if (FromVar->isInvalidDecl())
+ return nullptr;
+
+ InstantiatingTemplate Inst(*this, PointOfInstantiation, FromVar);
+ if (Inst.isInvalid())
+ return nullptr;
+
+ MultiLevelTemplateArgumentList TemplateArgLists;
+ TemplateArgLists.addOuterTemplateArguments(&TemplateArgList);
+
+ // Instantiate the first declaration of the variable template: for a partial
+ // specialization of a static data member template, the first declaration may
+ // or may not be the declaration in the class; if it's in the class, we want
+ // to instantiate a member in the class (a declaration), and if it's outside,
+ // we want to instantiate a definition.
+ //
+ // If we're instantiating an explicitly-specialized member template or member
+ // partial specialization, don't do this. The member specialization completely
+ // replaces the original declaration in this case.
+ bool IsMemberSpec = false;
+ if (VarTemplatePartialSpecializationDecl *PartialSpec =
+ dyn_cast<VarTemplatePartialSpecializationDecl>(FromVar))
+ IsMemberSpec = PartialSpec->isMemberSpecialization();
+ else if (VarTemplateDecl *FromTemplate = FromVar->getDescribedVarTemplate())
+ IsMemberSpec = FromTemplate->isMemberSpecialization();
+ if (!IsMemberSpec)
+ FromVar = FromVar->getFirstDecl();
+
+ MultiLevelTemplateArgumentList MultiLevelList(TemplateArgList);
+ TemplateDeclInstantiator Instantiator(*this, FromVar->getDeclContext(),
+ MultiLevelList);
+
+ // TODO: Set LateAttrs and StartingScope ...
+
+ return cast_or_null<VarTemplateSpecializationDecl>(
+ Instantiator.VisitVarTemplateSpecializationDecl(
+ VarTemplate, FromVar, InsertPos, TemplateArgsInfo, Converted));
+}
+
+/// \brief Instantiates a variable template specialization by completing it
+/// with appropriate type information and initializer.
+VarTemplateSpecializationDecl *Sema::CompleteVarTemplateSpecializationDecl(
+ VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+
+ // Do substitution on the type of the declaration
+ TypeSourceInfo *DI =
+ SubstType(PatternDecl->getTypeSourceInfo(), TemplateArgs,
+ PatternDecl->getTypeSpecStartLoc(), PatternDecl->getDeclName());
+ if (!DI)
+ return nullptr;
+
+ // Update the type of this variable template specialization.
+ VarSpec->setType(DI->getType());
+
+ // Instantiate the initializer.
+ InstantiateVariableInitializer(VarSpec, PatternDecl, TemplateArgs);
+
+ return VarSpec;
+}
+
+/// BuildVariableInstantiation - Used after a new variable has been created.
+/// Sets basic variable data and decides whether to postpone the
+/// variable instantiation.
+void Sema::BuildVariableInstantiation(
+ VarDecl *NewVar, VarDecl *OldVar,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner,
+ LocalInstantiationScope *StartingScope,
+ bool InstantiatingVarTemplate) {
+
+ // If we are instantiating a local extern declaration, the
+ // instantiation belongs lexically to the containing function.
+ // If we are instantiating a static data member defined
+ // out-of-line, the instantiation will have the same lexical
+ // context (which will be a namespace scope) as the template.
+ if (OldVar->isLocalExternDecl()) {
+ NewVar->setLocalExternDecl();
+ NewVar->setLexicalDeclContext(Owner);
+ } else if (OldVar->isOutOfLine())
+ NewVar->setLexicalDeclContext(OldVar->getLexicalDeclContext());
+ NewVar->setTSCSpec(OldVar->getTSCSpec());
+ NewVar->setInitStyle(OldVar->getInitStyle());
+ NewVar->setCXXForRangeDecl(OldVar->isCXXForRangeDecl());
+ NewVar->setConstexpr(OldVar->isConstexpr());
+ NewVar->setInitCapture(OldVar->isInitCapture());
+ NewVar->setPreviousDeclInSameBlockScope(
+ OldVar->isPreviousDeclInSameBlockScope());
+ NewVar->setAccess(OldVar->getAccess());
+
+ if (!OldVar->isStaticDataMember()) {
+ if (OldVar->isUsed(false))
+ NewVar->setIsUsed();
+ NewVar->setReferenced(OldVar->isReferenced());
+ }
+
+ InstantiateAttrs(TemplateArgs, OldVar, NewVar, LateAttrs, StartingScope);
+
+ LookupResult Previous(
+ *this, NewVar->getDeclName(), NewVar->getLocation(),
+ NewVar->isLocalExternDecl() ? Sema::LookupRedeclarationWithLinkage
+ : Sema::LookupOrdinaryName,
+ Sema::ForRedeclaration);
+
+ if (NewVar->isLocalExternDecl() && OldVar->getPreviousDecl() &&
+ (!OldVar->getPreviousDecl()->getDeclContext()->isDependentContext() ||
+ OldVar->getPreviousDecl()->getDeclContext()==OldVar->getDeclContext())) {
+ // We have a previous declaration. Use that one, so we merge with the
+ // right type.
+ if (NamedDecl *NewPrev = FindInstantiatedDecl(
+ NewVar->getLocation(), OldVar->getPreviousDecl(), TemplateArgs))
+ Previous.addDecl(NewPrev);
+ } else if (!isa<VarTemplateSpecializationDecl>(NewVar) &&
+ OldVar->hasLinkage())
+ LookupQualifiedName(Previous, NewVar->getDeclContext(), false);
+ CheckVariableDeclaration(NewVar, Previous);
+
+ if (!InstantiatingVarTemplate) {
+ NewVar->getLexicalDeclContext()->addHiddenDecl(NewVar);
+ if (!NewVar->isLocalExternDecl() || !NewVar->getPreviousDecl())
+ NewVar->getDeclContext()->makeDeclVisibleInContext(NewVar);
+ }
+
+ if (!OldVar->isOutOfLine()) {
+ if (NewVar->getDeclContext()->isFunctionOrMethod())
+ CurrentInstantiationScope->InstantiatedLocal(OldVar, NewVar);
+ }
+
+ // Link instantiations of static data members back to the template from
+ // which they were instantiated.
+ if (NewVar->isStaticDataMember() && !InstantiatingVarTemplate)
+ NewVar->setInstantiationOfStaticDataMember(OldVar,
+ TSK_ImplicitInstantiation);
+
+ // Forward the mangling number from the template to the instantiated decl.
+ Context.setManglingNumber(NewVar, Context.getManglingNumber(OldVar));
+ Context.setStaticLocalNumber(NewVar, Context.getStaticLocalNumber(OldVar));
+
+ // Delay instantiation of the initializer for variable templates until a
+ // definition of the variable is needed. We need it right away if the type
+ // contains 'auto'.
+ if ((!isa<VarTemplateSpecializationDecl>(NewVar) &&
+ !InstantiatingVarTemplate) ||
+ NewVar->getType()->isUndeducedType())
+ InstantiateVariableInitializer(NewVar, OldVar, TemplateArgs);
+
+ // Diagnose unused local variables with dependent types, where the diagnostic
+ // will have been deferred.
+ if (!NewVar->isInvalidDecl() &&
+ NewVar->getDeclContext()->isFunctionOrMethod() &&
+ OldVar->getType()->isDependentType())
+ DiagnoseUnusedDecl(NewVar);
+}
+
+/// \brief Instantiate the initializer of a variable.
+void Sema::InstantiateVariableInitializer(
+ VarDecl *Var, VarDecl *OldVar,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+
+ if (Var->getAnyInitializer())
+ // We already have an initializer in the class.
+ return;
+
+ if (OldVar->getInit()) {
+ if (Var->isStaticDataMember() && !OldVar->isOutOfLine())
+ PushExpressionEvaluationContext(Sema::ConstantEvaluated, OldVar);
+ else
+ PushExpressionEvaluationContext(Sema::PotentiallyEvaluated, OldVar);
+
+ // Instantiate the initializer.
+ ExprResult Init =
+ SubstInitializer(OldVar->getInit(), TemplateArgs,
+ OldVar->getInitStyle() == VarDecl::CallInit);
+ if (!Init.isInvalid()) {
+ bool TypeMayContainAuto = true;
+ Expr *InitExpr = Init.get();
+
+ if (Var->hasAttr<DLLImportAttr>() &&
+ (!InitExpr ||
+ !InitExpr->isConstantInitializer(getASTContext(), false))) {
+ // Do not dynamically initialize dllimport variables.
+ } else if (InitExpr) {
+ bool DirectInit = OldVar->isDirectInit();
+ AddInitializerToDecl(Var, InitExpr, DirectInit, TypeMayContainAuto);
+ } else
+ ActOnUninitializedDecl(Var, TypeMayContainAuto);
+ } else {
+ // FIXME: Not too happy about invalidating the declaration
+ // because of a bogus initializer.
+ Var->setInvalidDecl();
+ }
+
+ PopExpressionEvaluationContext();
+ } else if ((!Var->isStaticDataMember() || Var->isOutOfLine()) &&
+ !Var->isCXXForRangeDecl())
+ ActOnUninitializedDecl(Var, false);
+}
+
+/// \brief Instantiate the definition of the given variable from its
+/// template.
+///
+/// \param PointOfInstantiation the point at which the instantiation was
+/// required. Note that this is not precisely a "point of instantiation"
+/// for the function, but it's close.
+///
+/// \param Var the already-instantiated declaration of a static member
+/// variable of a class template specialization.
+///
+/// \param Recursive if true, recursively instantiates any functions that
+/// are required by this instantiation.
+///
+/// \param DefinitionRequired if true, then we are performing an explicit
+/// instantiation where an out-of-line definition of the member variable
+/// is required. Complain if there is no such definition.
+void Sema::InstantiateStaticDataMemberDefinition(
+ SourceLocation PointOfInstantiation,
+ VarDecl *Var,
+ bool Recursive,
+ bool DefinitionRequired) {
+ InstantiateVariableDefinition(PointOfInstantiation, Var, Recursive,
+ DefinitionRequired);
+}
+
+void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
+ VarDecl *Var, bool Recursive,
+ bool DefinitionRequired) {
+ if (Var->isInvalidDecl())
+ return;
+
+ VarTemplateSpecializationDecl *VarSpec =
+ dyn_cast<VarTemplateSpecializationDecl>(Var);
+ VarDecl *PatternDecl = nullptr, *Def = nullptr;
+ MultiLevelTemplateArgumentList TemplateArgs =
+ getTemplateInstantiationArgs(Var);
+
+ if (VarSpec) {
+ // If this is a variable template specialization, make sure that it is
+ // non-dependent, then find its instantiation pattern.
+ bool InstantiationDependent = false;
+ assert(!TemplateSpecializationType::anyDependentTemplateArguments(
+ VarSpec->getTemplateArgsInfo(), InstantiationDependent) &&
+ "Only instantiate variable template specializations that are "
+ "not type-dependent");
+ (void)InstantiationDependent;
+
+ // Find the variable initialization that we'll be substituting. If the
+ // pattern was instantiated from a member template, look back further to
+ // find the real pattern.
+ assert(VarSpec->getSpecializedTemplate() &&
+ "Specialization without specialized template?");
+ llvm::PointerUnion<VarTemplateDecl *,
+ VarTemplatePartialSpecializationDecl *> PatternPtr =
+ VarSpec->getSpecializedTemplateOrPartial();
+ if (PatternPtr.is<VarTemplatePartialSpecializationDecl *>()) {
+ VarTemplatePartialSpecializationDecl *Tmpl =
+ PatternPtr.get<VarTemplatePartialSpecializationDecl *>();
+ while (VarTemplatePartialSpecializationDecl *From =
+ Tmpl->getInstantiatedFromMember()) {
+ if (Tmpl->isMemberSpecialization())
+ break;
+
+ Tmpl = From;
+ }
+ PatternDecl = Tmpl;
+ } else {
+ VarTemplateDecl *Tmpl = PatternPtr.get<VarTemplateDecl *>();
+ while (VarTemplateDecl *From =
+ Tmpl->getInstantiatedFromMemberTemplate()) {
+ if (Tmpl->isMemberSpecialization())
+ break;
+
+ Tmpl = From;
+ }
+ PatternDecl = Tmpl->getTemplatedDecl();
+ }
+
+ // If this is a static data member template, there might be an
+ // uninstantiated initializer on the declaration. If so, instantiate
+ // it now.
+ if (PatternDecl->isStaticDataMember() &&
+ (PatternDecl = PatternDecl->getFirstDecl())->hasInit() &&
+ !Var->hasInit()) {
+ // FIXME: Factor out the duplicated instantiation context setup/tear down
+ // code here.
+ InstantiatingTemplate Inst(*this, PointOfInstantiation, Var);
+ if (Inst.isInvalid())
+ return;
+
+ // If we're performing recursive template instantiation, create our own
+ // queue of pending implicit instantiations that we will instantiate
+ // later, while we're still within our own instantiation context.
+ SavePendingInstantiationsAndVTableUsesRAII
+ SavePendingInstantiationsAndVTableUses(*this, /*Enabled=*/Recursive);
+
+ LocalInstantiationScope Local(*this);
+
+ // Enter the scope of this instantiation. We don't use
+ // PushDeclContext because we don't have a scope.
+ ContextRAII PreviousContext(*this, Var->getDeclContext());
+ InstantiateVariableInitializer(Var, PatternDecl, TemplateArgs);
+ PreviousContext.pop();
+
+ // FIXME: Need to inform the ASTConsumer that we instantiated the
+ // initializer?
+
+ // This variable may have local implicit instantiations that need to be
+ // instantiated within this scope.
+ PerformPendingInstantiations(/*LocalOnly=*/true);
+
+ Local.Exit();
+
+ if (Recursive) {
+ // Define any newly required vtables.
+ DefineUsedVTables();
+
+ // Instantiate any pending implicit instantiations found during the
+ // instantiation of this template.
+ PerformPendingInstantiations();
+
+ // PendingInstantiations and VTableUses are restored through
+ // SavePendingInstantiationsAndVTableUses's destructor.
+ }
+ }
+
+ // Find actual definition
+ Def = PatternDecl->getDefinition(getASTContext());
+ } else {
+ // If this is a static data member, find its out-of-line definition.
+ assert(Var->isStaticDataMember() && "not a static data member?");
+ PatternDecl = Var->getInstantiatedFromStaticDataMember();
+
+ assert(PatternDecl && "data member was not instantiated from a template?");
+ assert(PatternDecl->isStaticDataMember() && "not a static data member?");
+ Def = PatternDecl->getOutOfLineDefinition();
+ }
+
+ // If we don't have a definition of the variable template, we won't perform
+ // any instantiation. Rather, we rely on the user to instantiate this
+ // definition (or provide a specialization for it) in another translation
+ // unit.
+ if (!Def) {
+ if (DefinitionRequired) {
+ if (VarSpec)
+ Diag(PointOfInstantiation,
+ diag::err_explicit_instantiation_undefined_var_template) << Var;
+ else
+ Diag(PointOfInstantiation,
+ diag::err_explicit_instantiation_undefined_member)
+ << 2 << Var->getDeclName() << Var->getDeclContext();
+ Diag(PatternDecl->getLocation(),
+ diag::note_explicit_instantiation_here);
+ if (VarSpec)
+ Var->setInvalidDecl();
+ } else if (Var->getTemplateSpecializationKind()
+ == TSK_ExplicitInstantiationDefinition) {
+ PendingInstantiations.push_back(
+ std::make_pair(Var, PointOfInstantiation));
+ }
+
+ return;
+ }
+
+ TemplateSpecializationKind TSK = Var->getTemplateSpecializationKind();
+
+ // Never instantiate an explicit specialization.
+ if (TSK == TSK_ExplicitSpecialization)
+ return;
+
+ // C++11 [temp.explicit]p10:
+ // Except for inline functions, [...] explicit instantiation declarations
+ // have the effect of suppressing the implicit instantiation of the entity
+ // to which they refer.
+ if (TSK == TSK_ExplicitInstantiationDeclaration)
+ return;
+
+ // Make sure to pass the instantiated variable to the consumer at the end.
+ struct PassToConsumerRAII {
+ ASTConsumer &Consumer;
+ VarDecl *Var;
+
+ PassToConsumerRAII(ASTConsumer &Consumer, VarDecl *Var)
+ : Consumer(Consumer), Var(Var) { }
+
+ ~PassToConsumerRAII() {
+ Consumer.HandleCXXStaticMemberVarInstantiation(Var);
+ }
+ } PassToConsumerRAII(Consumer, Var);
+
+ // If we already have a definition, we're done.
+ if (VarDecl *Def = Var->getDefinition()) {
+ // We may be explicitly instantiating something we've already implicitly
+ // instantiated.
+ Def->setTemplateSpecializationKind(Var->getTemplateSpecializationKind(),
+ PointOfInstantiation);
+ return;
+ }
+
+ InstantiatingTemplate Inst(*this, PointOfInstantiation, Var);
+ if (Inst.isInvalid())
+ return;
+
+ // If we're performing recursive template instantiation, create our own
+ // queue of pending implicit instantiations that we will instantiate later,
+ // while we're still within our own instantiation context.
+ SavePendingLocalImplicitInstantiationsRAII
+ SavedPendingLocalImplicitInstantiations(*this);
+ SavePendingInstantiationsAndVTableUsesRAII
+ SavePendingInstantiationsAndVTableUses(*this, /*Enabled=*/Recursive);
+
+ // Enter the scope of this instantiation. We don't use
+ // PushDeclContext because we don't have a scope.
+ ContextRAII PreviousContext(*this, Var->getDeclContext());
+ LocalInstantiationScope Local(*this);
+
+ VarDecl *OldVar = Var;
+ if (!VarSpec)
+ Var = cast_or_null<VarDecl>(SubstDecl(Def, Var->getDeclContext(),
+ TemplateArgs));
+ else if (Var->isStaticDataMember() &&
+ Var->getLexicalDeclContext()->isRecord()) {
+ // We need to instantiate the definition of a static data member template,
+ // and all we have is the in-class declaration of it. Instantiate a separate
+ // declaration of the definition.
+ TemplateDeclInstantiator Instantiator(*this, Var->getDeclContext(),
+ TemplateArgs);
+ Var = cast_or_null<VarDecl>(Instantiator.VisitVarTemplateSpecializationDecl(
+ VarSpec->getSpecializedTemplate(), Def, nullptr,
+ VarSpec->getTemplateArgsInfo(), VarSpec->getTemplateArgs().asArray()));
+ if (Var) {
+ llvm::PointerUnion<VarTemplateDecl *,
+ VarTemplatePartialSpecializationDecl *> PatternPtr =
+ VarSpec->getSpecializedTemplateOrPartial();
+ if (VarTemplatePartialSpecializationDecl *Partial =
+ PatternPtr.dyn_cast<VarTemplatePartialSpecializationDecl *>())
+ cast<VarTemplateSpecializationDecl>(Var)->setInstantiationOf(
+ Partial, &VarSpec->getTemplateInstantiationArgs());
+
+ // Merge the definition with the declaration.
+ LookupResult R(*this, Var->getDeclName(), Var->getLocation(),
+ LookupOrdinaryName, ForRedeclaration);
+ R.addDecl(OldVar);
+ MergeVarDecl(Var, R);
+
+ // Attach the initializer.
+ InstantiateVariableInitializer(Var, Def, TemplateArgs);
+ }
+ } else
+ // Complete the existing variable's definition with an appropriately
+ // substituted type and initializer.
+ Var = CompleteVarTemplateSpecializationDecl(VarSpec, Def, TemplateArgs);
+
+ PreviousContext.pop();
+
+ if (Var) {
+ PassToConsumerRAII.Var = Var;
+ Var->setTemplateSpecializationKind(OldVar->getTemplateSpecializationKind(),
+ OldVar->getPointOfInstantiation());
+ }
+
+ // This variable may have local implicit instantiations that need to be
+ // instantiated within this scope.
+ PerformPendingInstantiations(/*LocalOnly=*/true);
+
+ Local.Exit();
+
+ if (Recursive) {
+ // Define any newly required vtables.
+ DefineUsedVTables();
+
+ // Instantiate any pending implicit instantiations found during the
+ // instantiation of this template.
+ PerformPendingInstantiations();
+
+ // PendingInstantiations and VTableUses are restored through
+ // SavePendingInstantiationsAndVTableUses's destructor.
+ }
+}
+
+void
+Sema::InstantiateMemInitializers(CXXConstructorDecl *New,
+ const CXXConstructorDecl *Tmpl,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+
+ SmallVector<CXXCtorInitializer*, 4> NewInits;
+ bool AnyErrors = Tmpl->isInvalidDecl();
+
+ // Instantiate all the initializers.
+ for (const auto *Init : Tmpl->inits()) {
+ // Only instantiate written initializers, let Sema re-construct implicit
+ // ones.
+ if (!Init->isWritten())
+ continue;
+
+ SourceLocation EllipsisLoc;
+
+ if (Init->isPackExpansion()) {
+ // This is a pack expansion. We should expand it now.
+ TypeLoc BaseTL = Init->getTypeSourceInfo()->getTypeLoc();
+ SmallVector<UnexpandedParameterPack, 4> Unexpanded;
+ collectUnexpandedParameterPacks(BaseTL, Unexpanded);
+ collectUnexpandedParameterPacks(Init->getInit(), Unexpanded);
+ bool ShouldExpand = false;
+ bool RetainExpansion = false;
+ Optional<unsigned> NumExpansions;
+ if (CheckParameterPacksForExpansion(Init->getEllipsisLoc(),
+ BaseTL.getSourceRange(),
+ Unexpanded,
+ TemplateArgs, ShouldExpand,
+ RetainExpansion,
+ NumExpansions)) {
+ AnyErrors = true;
+ New->setInvalidDecl();
+ continue;
+ }
+ assert(ShouldExpand && "Partial instantiation of base initializer?");
+
+ // Loop over all of the arguments in the argument pack(s),
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(*this, I);
+
+ // Instantiate the initializer.
+ ExprResult TempInit = SubstInitializer(Init->getInit(), TemplateArgs,
+ /*CXXDirectInit=*/true);
+ if (TempInit.isInvalid()) {
+ AnyErrors = true;
+ break;
+ }
+
+ // Instantiate the base type.
+ TypeSourceInfo *BaseTInfo = SubstType(Init->getTypeSourceInfo(),
+ TemplateArgs,
+ Init->getSourceLocation(),
+ New->getDeclName());
+ if (!BaseTInfo) {
+ AnyErrors = true;
+ break;
+ }
+
+ // Build the initializer.
+ MemInitResult NewInit = BuildBaseInitializer(BaseTInfo->getType(),
+ BaseTInfo, TempInit.get(),
+ New->getParent(),
+ SourceLocation());
+ if (NewInit.isInvalid()) {
+ AnyErrors = true;
+ break;
+ }
+
+ NewInits.push_back(NewInit.get());
+ }
+
+ continue;
+ }
+
+ // Instantiate the initializer.
+ ExprResult TempInit = SubstInitializer(Init->getInit(), TemplateArgs,
+ /*CXXDirectInit=*/true);
+ if (TempInit.isInvalid()) {
+ AnyErrors = true;
+ continue;
+ }
+
+ MemInitResult NewInit;
+ if (Init->isDelegatingInitializer() || Init->isBaseInitializer()) {
+ TypeSourceInfo *TInfo = SubstType(Init->getTypeSourceInfo(),
+ TemplateArgs,
+ Init->getSourceLocation(),
+ New->getDeclName());
+ if (!TInfo) {
+ AnyErrors = true;
+ New->setInvalidDecl();
+ continue;
+ }
+
+ if (Init->isBaseInitializer())
+ NewInit = BuildBaseInitializer(TInfo->getType(), TInfo, TempInit.get(),
+ New->getParent(), EllipsisLoc);
+ else
+ NewInit = BuildDelegatingInitializer(TInfo, TempInit.get(),
+ cast<CXXRecordDecl>(CurContext->getParent()));
+ } else if (Init->isMemberInitializer()) {
+ FieldDecl *Member = cast_or_null<FieldDecl>(FindInstantiatedDecl(
+ Init->getMemberLocation(),
+ Init->getMember(),
+ TemplateArgs));
+ if (!Member) {
+ AnyErrors = true;
+ New->setInvalidDecl();
+ continue;
+ }
+
+ NewInit = BuildMemberInitializer(Member, TempInit.get(),
+ Init->getSourceLocation());
+ } else if (Init->isIndirectMemberInitializer()) {
+ IndirectFieldDecl *IndirectMember =
+ cast_or_null<IndirectFieldDecl>(FindInstantiatedDecl(
+ Init->getMemberLocation(),
+ Init->getIndirectMember(), TemplateArgs));
+
+ if (!IndirectMember) {
+ AnyErrors = true;
+ New->setInvalidDecl();
+ continue;
+ }
+
+ NewInit = BuildMemberInitializer(IndirectMember, TempInit.get(),
+ Init->getSourceLocation());
+ }
+
+ if (NewInit.isInvalid()) {
+ AnyErrors = true;
+ New->setInvalidDecl();
+ } else {
+ NewInits.push_back(NewInit.get());
+ }
+ }
+
+ // Assign all the initializers to the new constructor.
+ ActOnMemInitializers(New,
+ /*FIXME: ColonLoc */
+ SourceLocation(),
+ NewInits,
+ AnyErrors);
+}
+
+// TODO: this could be templated if the various decl types used the
+// same method name.
+static bool isInstantiationOf(ClassTemplateDecl *Pattern,
+ ClassTemplateDecl *Instance) {
+ Pattern = Pattern->getCanonicalDecl();
+
+ do {
+ Instance = Instance->getCanonicalDecl();
+ if (Pattern == Instance) return true;
+ Instance = Instance->getInstantiatedFromMemberTemplate();
+ } while (Instance);
+
+ return false;
+}
+
+static bool isInstantiationOf(FunctionTemplateDecl *Pattern,
+ FunctionTemplateDecl *Instance) {
+ Pattern = Pattern->getCanonicalDecl();
+
+ do {
+ Instance = Instance->getCanonicalDecl();
+ if (Pattern == Instance) return true;
+ Instance = Instance->getInstantiatedFromMemberTemplate();
+ } while (Instance);
+
+ return false;
+}
+
+static bool
+isInstantiationOf(ClassTemplatePartialSpecializationDecl *Pattern,
+ ClassTemplatePartialSpecializationDecl *Instance) {
+ Pattern
+ = cast<ClassTemplatePartialSpecializationDecl>(Pattern->getCanonicalDecl());
+ do {
+ Instance = cast<ClassTemplatePartialSpecializationDecl>(
+ Instance->getCanonicalDecl());
+ if (Pattern == Instance)
+ return true;
+ Instance = Instance->getInstantiatedFromMember();
+ } while (Instance);
+
+ return false;
+}
+
+static bool isInstantiationOf(CXXRecordDecl *Pattern,
+ CXXRecordDecl *Instance) {
+ Pattern = Pattern->getCanonicalDecl();
+
+ do {
+ Instance = Instance->getCanonicalDecl();
+ if (Pattern == Instance) return true;
+ Instance = Instance->getInstantiatedFromMemberClass();
+ } while (Instance);
+
+ return false;
+}
+
+static bool isInstantiationOf(FunctionDecl *Pattern,
+ FunctionDecl *Instance) {
+ Pattern = Pattern->getCanonicalDecl();
+
+ do {
+ Instance = Instance->getCanonicalDecl();
+ if (Pattern == Instance) return true;
+ Instance = Instance->getInstantiatedFromMemberFunction();
+ } while (Instance);
+
+ return false;
+}
+
+static bool isInstantiationOf(EnumDecl *Pattern,
+ EnumDecl *Instance) {
+ Pattern = Pattern->getCanonicalDecl();
+
+ do {
+ Instance = Instance->getCanonicalDecl();
+ if (Pattern == Instance) return true;
+ Instance = Instance->getInstantiatedFromMemberEnum();
+ } while (Instance);
+
+ return false;
+}
+
+static bool isInstantiationOf(UsingShadowDecl *Pattern,
+ UsingShadowDecl *Instance,
+ ASTContext &C) {
+ return declaresSameEntity(C.getInstantiatedFromUsingShadowDecl(Instance),
+ Pattern);
+}
+
+static bool isInstantiationOf(UsingDecl *Pattern,
+ UsingDecl *Instance,
+ ASTContext &C) {
+ return declaresSameEntity(C.getInstantiatedFromUsingDecl(Instance), Pattern);
+}
+
+static bool isInstantiationOf(UnresolvedUsingValueDecl *Pattern,
+ UsingDecl *Instance,
+ ASTContext &C) {
+ return declaresSameEntity(C.getInstantiatedFromUsingDecl(Instance), Pattern);
+}
+
+static bool isInstantiationOf(UnresolvedUsingTypenameDecl *Pattern,
+ UsingDecl *Instance,
+ ASTContext &C) {
+ return declaresSameEntity(C.getInstantiatedFromUsingDecl(Instance), Pattern);
+}
+
+static bool isInstantiationOfStaticDataMember(VarDecl *Pattern,
+ VarDecl *Instance) {
+ assert(Instance->isStaticDataMember());
+
+ Pattern = Pattern->getCanonicalDecl();
+
+ do {
+ Instance = Instance->getCanonicalDecl();
+ if (Pattern == Instance) return true;
+ Instance = Instance->getInstantiatedFromStaticDataMember();
+ } while (Instance);
+
+ return false;
+}
+
+// Other is the prospective instantiation
+// D is the prospective pattern
+static bool isInstantiationOf(ASTContext &Ctx, NamedDecl *D, Decl *Other) {
+ if (D->getKind() != Other->getKind()) {
+ if (UnresolvedUsingTypenameDecl *UUD
+ = dyn_cast<UnresolvedUsingTypenameDecl>(D)) {
+ if (UsingDecl *UD = dyn_cast<UsingDecl>(Other)) {
+ return isInstantiationOf(UUD, UD, Ctx);
+ }
+ }
+
+ if (UnresolvedUsingValueDecl *UUD
+ = dyn_cast<UnresolvedUsingValueDecl>(D)) {
+ if (UsingDecl *UD = dyn_cast<UsingDecl>(Other)) {
+ return isInstantiationOf(UUD, UD, Ctx);
+ }
+ }
+
+ return false;
+ }
+
+ if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Other))
+ return isInstantiationOf(cast<CXXRecordDecl>(D), Record);
+
+ if (FunctionDecl *Function = dyn_cast<FunctionDecl>(Other))
+ return isInstantiationOf(cast<FunctionDecl>(D), Function);
+
+ if (EnumDecl *Enum = dyn_cast<EnumDecl>(Other))
+ return isInstantiationOf(cast<EnumDecl>(D), Enum);
+
+ if (VarDecl *Var = dyn_cast<VarDecl>(Other))
+ if (Var->isStaticDataMember())
+ return isInstantiationOfStaticDataMember(cast<VarDecl>(D), Var);
+
+ if (ClassTemplateDecl *Temp = dyn_cast<ClassTemplateDecl>(Other))
+ return isInstantiationOf(cast<ClassTemplateDecl>(D), Temp);
+
+ if (FunctionTemplateDecl *Temp = dyn_cast<FunctionTemplateDecl>(Other))
+ return isInstantiationOf(cast<FunctionTemplateDecl>(D), Temp);
+
+ if (ClassTemplatePartialSpecializationDecl *PartialSpec
+ = dyn_cast<ClassTemplatePartialSpecializationDecl>(Other))
+ return isInstantiationOf(cast<ClassTemplatePartialSpecializationDecl>(D),
+ PartialSpec);
+
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(Other)) {
+ if (!Field->getDeclName()) {
+ // This is an unnamed field.
+ return declaresSameEntity(Ctx.getInstantiatedFromUnnamedFieldDecl(Field),
+ cast<FieldDecl>(D));
+ }
+ }
+
+ if (UsingDecl *Using = dyn_cast<UsingDecl>(Other))
+ return isInstantiationOf(cast<UsingDecl>(D), Using, Ctx);
+
+ if (UsingShadowDecl *Shadow = dyn_cast<UsingShadowDecl>(Other))
+ return isInstantiationOf(cast<UsingShadowDecl>(D), Shadow, Ctx);
+
+ return D->getDeclName() && isa<NamedDecl>(Other) &&
+ D->getDeclName() == cast<NamedDecl>(Other)->getDeclName();
+}
+
+template<typename ForwardIterator>
+static NamedDecl *findInstantiationOf(ASTContext &Ctx,
+ NamedDecl *D,
+ ForwardIterator first,
+ ForwardIterator last) {
+ for (; first != last; ++first)
+ if (isInstantiationOf(Ctx, D, *first))
+ return cast<NamedDecl>(*first);
+
+ return nullptr;
+}
+
+/// \brief Finds the instantiation of the given declaration context
+/// within the current instantiation.
+///
+/// \returns NULL if there was an error
+DeclContext *Sema::FindInstantiatedContext(SourceLocation Loc, DeclContext* DC,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+ if (NamedDecl *D = dyn_cast<NamedDecl>(DC)) {
+ Decl* ID = FindInstantiatedDecl(Loc, D, TemplateArgs);
+ return cast_or_null<DeclContext>(ID);
+ } else return DC;
+}
+
+/// \brief Find the instantiation of the given declaration within the
+/// current instantiation.
+///
+/// This routine is intended to be used when \p D is a declaration
+/// referenced from within a template, that needs to mapped into the
+/// corresponding declaration within an instantiation. For example,
+/// given:
+///
+/// \code
+/// template<typename T>
+/// struct X {
+/// enum Kind {
+/// KnownValue = sizeof(T)
+/// };
+///
+/// bool getKind() const { return KnownValue; }
+/// };
+///
+/// template struct X<int>;
+/// \endcode
+///
+/// In the instantiation of <tt>X<int>::getKind()</tt>, we need to map the
+/// \p EnumConstantDecl for \p KnownValue (which refers to
+/// <tt>X<T>::<Kind>::KnownValue</tt>) to its instantiation
+/// (<tt>X<int>::<Kind>::KnownValue</tt>). \p FindInstantiatedDecl performs
+/// this mapping from within the instantiation of <tt>X<int></tt>.
+NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+ DeclContext *ParentDC = D->getDeclContext();
+ // FIXME: Parmeters of pointer to functions (y below) that are themselves
+ // parameters (p below) can have their ParentDC set to the translation-unit
+ // - thus we can not consistently check if the ParentDC of such a parameter
+ // is Dependent or/and a FunctionOrMethod.
+ // For e.g. this code, during Template argument deduction tries to
+ // find an instantiated decl for (T y) when the ParentDC for y is
+ // the translation unit.
+ // e.g. template <class T> void Foo(auto (*p)(T y) -> decltype(y())) {}
+ // float baz(float(*)()) { return 0.0; }
+ // Foo(baz);
+ // The better fix here is perhaps to ensure that a ParmVarDecl, by the time
+ // it gets here, always has a FunctionOrMethod as its ParentDC??
+ // For now:
+ // - as long as we have a ParmVarDecl whose parent is non-dependent and
+ // whose type is not instantiation dependent, do nothing to the decl
+ // - otherwise find its instantiated decl.
+ if (isa<ParmVarDecl>(D) && !ParentDC->isDependentContext() &&
+ !cast<ParmVarDecl>(D)->getType()->isInstantiationDependentType())
+ return D;
+ if (isa<ParmVarDecl>(D) || isa<NonTypeTemplateParmDecl>(D) ||
+ isa<TemplateTypeParmDecl>(D) || isa<TemplateTemplateParmDecl>(D) ||
+ (ParentDC->isFunctionOrMethod() && ParentDC->isDependentContext()) ||
+ (isa<CXXRecordDecl>(D) && cast<CXXRecordDecl>(D)->isLambda())) {
+ // D is a local of some kind. Look into the map of local
+ // declarations to their instantiations.
+ if (CurrentInstantiationScope) {
+ if (auto Found = CurrentInstantiationScope->findInstantiationOf(D)) {
+ if (Decl *FD = Found->dyn_cast<Decl *>())
+ return cast<NamedDecl>(FD);
+
+ int PackIdx = ArgumentPackSubstitutionIndex;
+ assert(PackIdx != -1 &&
+ "found declaration pack but not pack expanding");
+ typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack;
+ return cast<NamedDecl>((*Found->get<DeclArgumentPack *>())[PackIdx]);
+ }
+ }
+
+ // If we're performing a partial substitution during template argument
+ // deduction, we may not have values for template parameters yet. They
+ // just map to themselves.
+ if (isa<NonTypeTemplateParmDecl>(D) || isa<TemplateTypeParmDecl>(D) ||
+ isa<TemplateTemplateParmDecl>(D))
+ return D;
+
+ if (D->isInvalidDecl())
+ return nullptr;
+
+ // Normally this function only searches for already instantiated declaration
+ // however we have to make an exclusion for local types used before
+ // definition as in the code:
+ //
+ // template<typename T> void f1() {
+ // void g1(struct x1);
+ // struct x1 {};
+ // }
+ //
+ // In this case instantiation of the type of 'g1' requires definition of
+ // 'x1', which is defined later. Error recovery may produce an enum used
+ // before definition. In these cases we need to instantiate relevant
+ // declarations here.
+ bool NeedInstantiate = false;
+ if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D))
+ NeedInstantiate = RD->isLocalClass();
+ else
+ NeedInstantiate = isa<EnumDecl>(D);
+ if (NeedInstantiate) {
+ Decl *Inst = SubstDecl(D, CurContext, TemplateArgs);
+ CurrentInstantiationScope->InstantiatedLocal(D, Inst);
+ return cast<TypeDecl>(Inst);
+ }
+
+ // If we didn't find the decl, then we must have a label decl that hasn't
+ // been found yet. Lazily instantiate it and return it now.
+ assert(isa<LabelDecl>(D));
+
+ Decl *Inst = SubstDecl(D, CurContext, TemplateArgs);
+ assert(Inst && "Failed to instantiate label??");
+
+ CurrentInstantiationScope->InstantiatedLocal(D, Inst);
+ return cast<LabelDecl>(Inst);
+ }
+
+ // For variable template specializations, update those that are still
+ // type-dependent.
+ if (VarTemplateSpecializationDecl *VarSpec =
+ dyn_cast<VarTemplateSpecializationDecl>(D)) {
+ bool InstantiationDependent = false;
+ const TemplateArgumentListInfo &VarTemplateArgs =
+ VarSpec->getTemplateArgsInfo();
+ if (TemplateSpecializationType::anyDependentTemplateArguments(
+ VarTemplateArgs, InstantiationDependent))
+ D = cast<NamedDecl>(
+ SubstDecl(D, VarSpec->getDeclContext(), TemplateArgs));
+ return D;
+ }
+
+ if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(D)) {
+ if (!Record->isDependentContext())
+ return D;
+
+ // Determine whether this record is the "templated" declaration describing
+ // a class template or class template partial specialization.
+ ClassTemplateDecl *ClassTemplate = Record->getDescribedClassTemplate();
+ if (ClassTemplate)
+ ClassTemplate = ClassTemplate->getCanonicalDecl();
+ else if (ClassTemplatePartialSpecializationDecl *PartialSpec
+ = dyn_cast<ClassTemplatePartialSpecializationDecl>(Record))
+ ClassTemplate = PartialSpec->getSpecializedTemplate()->getCanonicalDecl();
+
+ // Walk the current context to find either the record or an instantiation of
+ // it.
+ DeclContext *DC = CurContext;
+ while (!DC->isFileContext()) {
+ // If we're performing substitution while we're inside the template
+ // definition, we'll find our own context. We're done.
+ if (DC->Equals(Record))
+ return Record;
+
+ if (CXXRecordDecl *InstRecord = dyn_cast<CXXRecordDecl>(DC)) {
+ // Check whether we're in the process of instantiating a class template
+ // specialization of the template we're mapping.
+ if (ClassTemplateSpecializationDecl *InstSpec
+ = dyn_cast<ClassTemplateSpecializationDecl>(InstRecord)){
+ ClassTemplateDecl *SpecTemplate = InstSpec->getSpecializedTemplate();
+ if (ClassTemplate && isInstantiationOf(ClassTemplate, SpecTemplate))
+ return InstRecord;
+ }
+
+ // Check whether we're in the process of instantiating a member class.
+ if (isInstantiationOf(Record, InstRecord))
+ return InstRecord;
+ }
+
+ // Move to the outer template scope.
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(DC)) {
+ if (FD->getFriendObjectKind() && FD->getDeclContext()->isFileContext()){
+ DC = FD->getLexicalDeclContext();
+ continue;
+ }
+ }
+
+ DC = DC->getParent();
+ }
+
+ // Fall through to deal with other dependent record types (e.g.,
+ // anonymous unions in class templates).
+ }
+
+ if (!ParentDC->isDependentContext())
+ return D;
+
+ ParentDC = FindInstantiatedContext(Loc, ParentDC, TemplateArgs);
+ if (!ParentDC)
+ return nullptr;
+
+ if (ParentDC != D->getDeclContext()) {
+ // We performed some kind of instantiation in the parent context,
+ // so now we need to look into the instantiated parent context to
+ // find the instantiation of the declaration D.
+
+ // If our context used to be dependent, we may need to instantiate
+ // it before performing lookup into that context.
+ bool IsBeingInstantiated = false;
+ if (CXXRecordDecl *Spec = dyn_cast<CXXRecordDecl>(ParentDC)) {
+ if (!Spec->isDependentContext()) {
+ QualType T = Context.getTypeDeclType(Spec);
+ const RecordType *Tag = T->getAs<RecordType>();
+ assert(Tag && "type of non-dependent record is not a RecordType");
+ if (Tag->isBeingDefined())
+ IsBeingInstantiated = true;
+ if (!Tag->isBeingDefined() &&
+ RequireCompleteType(Loc, T, diag::err_incomplete_type))
+ return nullptr;
+
+ ParentDC = Tag->getDecl();
+ }
+ }
+
+ NamedDecl *Result = nullptr;
+ if (D->getDeclName()) {
+ DeclContext::lookup_result Found = ParentDC->lookup(D->getDeclName());
+ Result = findInstantiationOf(Context, D, Found.begin(), Found.end());
+ } else {
+ // Since we don't have a name for the entity we're looking for,
+ // our only option is to walk through all of the declarations to
+ // find that name. This will occur in a few cases:
+ //
+ // - anonymous struct/union within a template
+ // - unnamed class/struct/union/enum within a template
+ //
+ // FIXME: Find a better way to find these instantiations!
+ Result = findInstantiationOf(Context, D,
+ ParentDC->decls_begin(),
+ ParentDC->decls_end());
+ }
+
+ if (!Result) {
+ if (isa<UsingShadowDecl>(D)) {
+ // UsingShadowDecls can instantiate to nothing because of using hiding.
+ } else if (Diags.hasErrorOccurred()) {
+ // We've already complained about something, so most likely this
+ // declaration failed to instantiate. There's no point in complaining
+ // further, since this is normal in invalid code.
+ } else if (IsBeingInstantiated) {
+ // The class in which this member exists is currently being
+ // instantiated, and we haven't gotten around to instantiating this
+ // member yet. This can happen when the code uses forward declarations
+ // of member classes, and introduces ordering dependencies via
+ // template instantiation.
+ Diag(Loc, diag::err_member_not_yet_instantiated)
+ << D->getDeclName()
+ << Context.getTypeDeclType(cast<CXXRecordDecl>(ParentDC));
+ Diag(D->getLocation(), diag::note_non_instantiated_member_here);
+ } else if (EnumConstantDecl *ED = dyn_cast<EnumConstantDecl>(D)) {
+ // This enumeration constant was found when the template was defined,
+ // but can't be found in the instantiation. This can happen if an
+ // unscoped enumeration member is explicitly specialized.
+ EnumDecl *Enum = cast<EnumDecl>(ED->getLexicalDeclContext());
+ EnumDecl *Spec = cast<EnumDecl>(FindInstantiatedDecl(Loc, Enum,
+ TemplateArgs));
+ assert(Spec->getTemplateSpecializationKind() ==
+ TSK_ExplicitSpecialization);
+ Diag(Loc, diag::err_enumerator_does_not_exist)
+ << D->getDeclName()
+ << Context.getTypeDeclType(cast<TypeDecl>(Spec->getDeclContext()));
+ Diag(Spec->getLocation(), diag::note_enum_specialized_here)
+ << Context.getTypeDeclType(Spec);
+ } else {
+ // We should have found something, but didn't.
+ llvm_unreachable("Unable to find instantiation of declaration!");
+ }
+ }
+
+ D = Result;
+ }
+
+ return D;
+}
+
+/// \brief Performs template instantiation for all implicit template
+/// instantiations we have seen until this point.
+void Sema::PerformPendingInstantiations(bool LocalOnly) {
+ while (!PendingLocalImplicitInstantiations.empty() ||
+ (!LocalOnly && !PendingInstantiations.empty())) {
+ PendingImplicitInstantiation Inst;
+
+ if (PendingLocalImplicitInstantiations.empty()) {
+ Inst = PendingInstantiations.front();
+ PendingInstantiations.pop_front();
+ } else {
+ Inst = PendingLocalImplicitInstantiations.front();
+ PendingLocalImplicitInstantiations.pop_front();
+ }
+
+ // Instantiate function definitions
+ if (FunctionDecl *Function = dyn_cast<FunctionDecl>(Inst.first)) {
+ PrettyDeclStackTraceEntry CrashInfo(*this, Function, SourceLocation(),
+ "instantiating function definition");
+ bool DefinitionRequired = Function->getTemplateSpecializationKind() ==
+ TSK_ExplicitInstantiationDefinition;
+ InstantiateFunctionDefinition(/*FIXME:*/Inst.second, Function, true,
+ DefinitionRequired);
+ continue;
+ }
+
+ // Instantiate variable definitions
+ VarDecl *Var = cast<VarDecl>(Inst.first);
+
+ assert((Var->isStaticDataMember() ||
+ isa<VarTemplateSpecializationDecl>(Var)) &&
+ "Not a static data member, nor a variable template"
+ " specialization?");
+
+ // Don't try to instantiate declarations if the most recent redeclaration
+ // is invalid.
+ if (Var->getMostRecentDecl()->isInvalidDecl())
+ continue;
+
+ // Check if the most recent declaration has changed the specialization kind
+ // and removed the need for implicit instantiation.
+ switch (Var->getMostRecentDecl()->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ llvm_unreachable("Cannot instantitiate an undeclared specialization.");
+ case TSK_ExplicitInstantiationDeclaration:
+ case TSK_ExplicitSpecialization:
+ continue; // No longer need to instantiate this type.
+ case TSK_ExplicitInstantiationDefinition:
+ // We only need an instantiation if the pending instantiation *is* the
+ // explicit instantiation.
+ if (Var != Var->getMostRecentDecl()) continue;
+ case TSK_ImplicitInstantiation:
+ break;
+ }
+
+ PrettyDeclStackTraceEntry CrashInfo(*this, Var, SourceLocation(),
+ "instantiating variable definition");
+ bool DefinitionRequired = Var->getTemplateSpecializationKind() ==
+ TSK_ExplicitInstantiationDefinition;
+
+ // Instantiate static data member definitions or variable template
+ // specializations.
+ InstantiateVariableDefinition(/*FIXME:*/ Inst.second, Var, true,
+ DefinitionRequired);
+ }
+}
+
+void Sema::PerformDependentDiagnostics(const DeclContext *Pattern,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+ for (auto DD : Pattern->ddiags()) {
+ switch (DD->getKind()) {
+ case DependentDiagnostic::Access:
+ HandleDependentAccessCheck(*DD, TemplateArgs);
+ break;
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp
new file mode 100644
index 0000000..61052f0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp
@@ -0,0 +1,1038 @@
+//===------- SemaTemplateVariadic.cpp - C++ Variadic Templates ------------===/
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===/
+//
+// This file implements semantic analysis for C++0x variadic templates.
+//===----------------------------------------------------------------------===/
+
+#include "clang/Sema/Sema.h"
+#include "TypeLocBuilder.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Template.h"
+
+using namespace clang;
+
+//----------------------------------------------------------------------------
+// Visitor that collects unexpanded parameter packs
+//----------------------------------------------------------------------------
+
+namespace {
+ /// \brief A class that collects unexpanded parameter packs.
+ class CollectUnexpandedParameterPacksVisitor :
+ public RecursiveASTVisitor<CollectUnexpandedParameterPacksVisitor>
+ {
+ typedef RecursiveASTVisitor<CollectUnexpandedParameterPacksVisitor>
+ inherited;
+
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded;
+
+ bool InLambda;
+
+ public:
+ explicit CollectUnexpandedParameterPacksVisitor(
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded)
+ : Unexpanded(Unexpanded), InLambda(false) { }
+
+ bool shouldWalkTypesOfTypeLocs() const { return false; }
+
+ //------------------------------------------------------------------------
+ // Recording occurrences of (unexpanded) parameter packs.
+ //------------------------------------------------------------------------
+
+ /// \brief Record occurrences of template type parameter packs.
+ bool VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) {
+ if (TL.getTypePtr()->isParameterPack())
+ Unexpanded.push_back(std::make_pair(TL.getTypePtr(), TL.getNameLoc()));
+ return true;
+ }
+
+ /// \brief Record occurrences of template type parameter packs
+ /// when we don't have proper source-location information for
+ /// them.
+ ///
+ /// Ideally, this routine would never be used.
+ bool VisitTemplateTypeParmType(TemplateTypeParmType *T) {
+ if (T->isParameterPack())
+ Unexpanded.push_back(std::make_pair(T, SourceLocation()));
+
+ return true;
+ }
+
+ /// \brief Record occurrences of function and non-type template
+ /// parameter packs in an expression.
+ bool VisitDeclRefExpr(DeclRefExpr *E) {
+ if (E->getDecl()->isParameterPack())
+ Unexpanded.push_back(std::make_pair(E->getDecl(), E->getLocation()));
+
+ return true;
+ }
+
+ /// \brief Record occurrences of template template parameter packs.
+ bool TraverseTemplateName(TemplateName Template) {
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast_or_null<TemplateTemplateParmDecl>(
+ Template.getAsTemplateDecl()))
+ if (TTP->isParameterPack())
+ Unexpanded.push_back(std::make_pair(TTP, SourceLocation()));
+
+ return inherited::TraverseTemplateName(Template);
+ }
+
+ /// \brief Suppress traversal into Objective-C container literal
+ /// elements that are pack expansions.
+ bool TraverseObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
+ if (!E->containsUnexpandedParameterPack())
+ return true;
+
+ for (unsigned I = 0, N = E->getNumElements(); I != N; ++I) {
+ ObjCDictionaryElement Element = E->getKeyValueElement(I);
+ if (Element.isPackExpansion())
+ continue;
+
+ TraverseStmt(Element.Key);
+ TraverseStmt(Element.Value);
+ }
+ return true;
+ }
+ //------------------------------------------------------------------------
+ // Pruning the search for unexpanded parameter packs.
+ //------------------------------------------------------------------------
+
+ /// \brief Suppress traversal into statements and expressions that
+ /// do not contain unexpanded parameter packs.
+ bool TraverseStmt(Stmt *S) {
+ Expr *E = dyn_cast_or_null<Expr>(S);
+ if ((E && E->containsUnexpandedParameterPack()) || InLambda)
+ return inherited::TraverseStmt(S);
+
+ return true;
+ }
+
+ /// \brief Suppress traversal into types that do not contain
+ /// unexpanded parameter packs.
+ bool TraverseType(QualType T) {
+ if ((!T.isNull() && T->containsUnexpandedParameterPack()) || InLambda)
+ return inherited::TraverseType(T);
+
+ return true;
+ }
+
+ /// \brief Suppress traversel into types with location information
+ /// that do not contain unexpanded parameter packs.
+ bool TraverseTypeLoc(TypeLoc TL) {
+ if ((!TL.getType().isNull() &&
+ TL.getType()->containsUnexpandedParameterPack()) ||
+ InLambda)
+ return inherited::TraverseTypeLoc(TL);
+
+ return true;
+ }
+
+ /// \brief Suppress traversal of non-parameter declarations, since
+ /// they cannot contain unexpanded parameter packs.
+ bool TraverseDecl(Decl *D) {
+ if ((D && isa<ParmVarDecl>(D)) || InLambda)
+ return inherited::TraverseDecl(D);
+
+ return true;
+ }
+
+ /// \brief Suppress traversal of template argument pack expansions.
+ bool TraverseTemplateArgument(const TemplateArgument &Arg) {
+ if (Arg.isPackExpansion())
+ return true;
+
+ return inherited::TraverseTemplateArgument(Arg);
+ }
+
+ /// \brief Suppress traversal of template argument pack expansions.
+ bool TraverseTemplateArgumentLoc(const TemplateArgumentLoc &ArgLoc) {
+ if (ArgLoc.getArgument().isPackExpansion())
+ return true;
+
+ return inherited::TraverseTemplateArgumentLoc(ArgLoc);
+ }
+
+ /// \brief Note whether we're traversing a lambda containing an unexpanded
+ /// parameter pack. In this case, the unexpanded pack can occur anywhere,
+ /// including all the places where we normally wouldn't look. Within a
+ /// lambda, we don't propagate the 'contains unexpanded parameter pack' bit
+ /// outside an expression.
+ bool TraverseLambdaExpr(LambdaExpr *Lambda) {
+ // The ContainsUnexpandedParameterPack bit on a lambda is always correct,
+ // even if it's contained within another lambda.
+ if (!Lambda->containsUnexpandedParameterPack())
+ return true;
+
+ bool WasInLambda = InLambda;
+ InLambda = true;
+
+ // If any capture names a function parameter pack, that pack is expanded
+ // when the lambda is expanded.
+ for (LambdaExpr::capture_iterator I = Lambda->capture_begin(),
+ E = Lambda->capture_end();
+ I != E; ++I) {
+ if (I->capturesVariable()) {
+ VarDecl *VD = I->getCapturedVar();
+ if (VD->isParameterPack())
+ Unexpanded.push_back(std::make_pair(VD, I->getLocation()));
+ }
+ }
+
+ inherited::TraverseLambdaExpr(Lambda);
+
+ InLambda = WasInLambda;
+ return true;
+ }
+ };
+}
+
+/// \brief Determine whether it's possible for an unexpanded parameter pack to
+/// be valid in this location. This only happens when we're in a declaration
+/// that is nested within an expression that could be expanded, such as a
+/// lambda-expression within a function call.
+///
+/// This is conservatively correct, but may claim that some unexpanded packs are
+/// permitted when they are not.
+bool Sema::isUnexpandedParameterPackPermitted() {
+ for (auto *SI : FunctionScopes)
+ if (isa<sema::LambdaScopeInfo>(SI))
+ return true;
+ return false;
+}
+
+/// \brief Diagnose all of the unexpanded parameter packs in the given
+/// vector.
+bool
+Sema::DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
+ UnexpandedParameterPackContext UPPC,
+ ArrayRef<UnexpandedParameterPack> Unexpanded) {
+ if (Unexpanded.empty())
+ return false;
+
+ // If we are within a lambda expression, that lambda contains an unexpanded
+ // parameter pack, and we are done.
+ // FIXME: Store 'Unexpanded' on the lambda so we don't need to recompute it
+ // later.
+ for (unsigned N = FunctionScopes.size(); N; --N) {
+ if (sema::LambdaScopeInfo *LSI =
+ dyn_cast<sema::LambdaScopeInfo>(FunctionScopes[N-1])) {
+ LSI->ContainsUnexpandedParameterPack = true;
+ return false;
+ }
+ }
+
+ SmallVector<SourceLocation, 4> Locations;
+ SmallVector<IdentifierInfo *, 4> Names;
+ llvm::SmallPtrSet<IdentifierInfo *, 4> NamesKnown;
+
+ for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
+ IdentifierInfo *Name = nullptr;
+ if (const TemplateTypeParmType *TTP
+ = Unexpanded[I].first.dyn_cast<const TemplateTypeParmType *>())
+ Name = TTP->getIdentifier();
+ else
+ Name = Unexpanded[I].first.get<NamedDecl *>()->getIdentifier();
+
+ if (Name && NamesKnown.insert(Name).second)
+ Names.push_back(Name);
+
+ if (Unexpanded[I].second.isValid())
+ Locations.push_back(Unexpanded[I].second);
+ }
+
+ DiagnosticBuilder DB = Diag(Loc, diag::err_unexpanded_parameter_pack)
+ << (int)UPPC << (int)Names.size();
+ for (size_t I = 0, E = std::min(Names.size(), (size_t)2); I != E; ++I)
+ DB << Names[I];
+
+ for (unsigned I = 0, N = Locations.size(); I != N; ++I)
+ DB << SourceRange(Locations[I]);
+ return true;
+}
+
+bool Sema::DiagnoseUnexpandedParameterPack(SourceLocation Loc,
+ TypeSourceInfo *T,
+ UnexpandedParameterPackContext UPPC) {
+ // C++0x [temp.variadic]p5:
+ // An appearance of a name of a parameter pack that is not expanded is
+ // ill-formed.
+ if (!T->getType()->containsUnexpandedParameterPack())
+ return false;
+
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseTypeLoc(
+ T->getTypeLoc());
+ assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
+ return DiagnoseUnexpandedParameterPacks(Loc, UPPC, Unexpanded);
+}
+
+bool Sema::DiagnoseUnexpandedParameterPack(Expr *E,
+ UnexpandedParameterPackContext UPPC) {
+ // C++0x [temp.variadic]p5:
+ // An appearance of a name of a parameter pack that is not expanded is
+ // ill-formed.
+ if (!E->containsUnexpandedParameterPack())
+ return false;
+
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseStmt(E);
+ assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
+ return DiagnoseUnexpandedParameterPacks(E->getLocStart(), UPPC, Unexpanded);
+}
+
+bool Sema::DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
+ UnexpandedParameterPackContext UPPC) {
+ // C++0x [temp.variadic]p5:
+ // An appearance of a name of a parameter pack that is not expanded is
+ // ill-formed.
+ if (!SS.getScopeRep() ||
+ !SS.getScopeRep()->containsUnexpandedParameterPack())
+ return false;
+
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ CollectUnexpandedParameterPacksVisitor(Unexpanded)
+ .TraverseNestedNameSpecifier(SS.getScopeRep());
+ assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
+ return DiagnoseUnexpandedParameterPacks(SS.getRange().getBegin(),
+ UPPC, Unexpanded);
+}
+
+bool Sema::DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
+ UnexpandedParameterPackContext UPPC) {
+ // C++0x [temp.variadic]p5:
+ // An appearance of a name of a parameter pack that is not expanded is
+ // ill-formed.
+ switch (NameInfo.getName().getNameKind()) {
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXOperatorName:
+ case DeclarationName::CXXLiteralOperatorName:
+ case DeclarationName::CXXUsingDirective:
+ return false;
+
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ // FIXME: We shouldn't need this null check!
+ if (TypeSourceInfo *TSInfo = NameInfo.getNamedTypeInfo())
+ return DiagnoseUnexpandedParameterPack(NameInfo.getLoc(), TSInfo, UPPC);
+
+ if (!NameInfo.getName().getCXXNameType()->containsUnexpandedParameterPack())
+ return false;
+
+ break;
+ }
+
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ CollectUnexpandedParameterPacksVisitor(Unexpanded)
+ .TraverseType(NameInfo.getName().getCXXNameType());
+ assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
+ return DiagnoseUnexpandedParameterPacks(NameInfo.getLoc(), UPPC, Unexpanded);
+}
+
+bool Sema::DiagnoseUnexpandedParameterPack(SourceLocation Loc,
+ TemplateName Template,
+ UnexpandedParameterPackContext UPPC) {
+
+ if (Template.isNull() || !Template.containsUnexpandedParameterPack())
+ return false;
+
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ CollectUnexpandedParameterPacksVisitor(Unexpanded)
+ .TraverseTemplateName(Template);
+ assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
+ return DiagnoseUnexpandedParameterPacks(Loc, UPPC, Unexpanded);
+}
+
+bool Sema::DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
+ UnexpandedParameterPackContext UPPC) {
+ if (Arg.getArgument().isNull() ||
+ !Arg.getArgument().containsUnexpandedParameterPack())
+ return false;
+
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ CollectUnexpandedParameterPacksVisitor(Unexpanded)
+ .TraverseTemplateArgumentLoc(Arg);
+ assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
+ return DiagnoseUnexpandedParameterPacks(Arg.getLocation(), UPPC, Unexpanded);
+}
+
+void Sema::collectUnexpandedParameterPacks(TemplateArgument Arg,
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) {
+ CollectUnexpandedParameterPacksVisitor(Unexpanded)
+ .TraverseTemplateArgument(Arg);
+}
+
+void Sema::collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) {
+ CollectUnexpandedParameterPacksVisitor(Unexpanded)
+ .TraverseTemplateArgumentLoc(Arg);
+}
+
+void Sema::collectUnexpandedParameterPacks(QualType T,
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) {
+ CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseType(T);
+}
+
+void Sema::collectUnexpandedParameterPacks(TypeLoc TL,
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) {
+ CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseTypeLoc(TL);
+}
+
+void Sema::collectUnexpandedParameterPacks(CXXScopeSpec &SS,
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) {
+ NestedNameSpecifier *Qualifier = SS.getScopeRep();
+ if (!Qualifier)
+ return;
+
+ NestedNameSpecifierLoc QualifierLoc(Qualifier, SS.location_data());
+ CollectUnexpandedParameterPacksVisitor(Unexpanded)
+ .TraverseNestedNameSpecifierLoc(QualifierLoc);
+}
+
+void Sema::collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) {
+ CollectUnexpandedParameterPacksVisitor(Unexpanded)
+ .TraverseDeclarationNameInfo(NameInfo);
+}
+
+
+ParsedTemplateArgument
+Sema::ActOnPackExpansion(const ParsedTemplateArgument &Arg,
+ SourceLocation EllipsisLoc) {
+ if (Arg.isInvalid())
+ return Arg;
+
+ switch (Arg.getKind()) {
+ case ParsedTemplateArgument::Type: {
+ TypeResult Result = ActOnPackExpansion(Arg.getAsType(), EllipsisLoc);
+ if (Result.isInvalid())
+ return ParsedTemplateArgument();
+
+ return ParsedTemplateArgument(Arg.getKind(), Result.get().getAsOpaquePtr(),
+ Arg.getLocation());
+ }
+
+ case ParsedTemplateArgument::NonType: {
+ ExprResult Result = ActOnPackExpansion(Arg.getAsExpr(), EllipsisLoc);
+ if (Result.isInvalid())
+ return ParsedTemplateArgument();
+
+ return ParsedTemplateArgument(Arg.getKind(), Result.get(),
+ Arg.getLocation());
+ }
+
+ case ParsedTemplateArgument::Template:
+ if (!Arg.getAsTemplate().get().containsUnexpandedParameterPack()) {
+ SourceRange R(Arg.getLocation());
+ if (Arg.getScopeSpec().isValid())
+ R.setBegin(Arg.getScopeSpec().getBeginLoc());
+ Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
+ << R;
+ return ParsedTemplateArgument();
+ }
+
+ return Arg.getTemplatePackExpansion(EllipsisLoc);
+ }
+ llvm_unreachable("Unhandled template argument kind?");
+}
+
+TypeResult Sema::ActOnPackExpansion(ParsedType Type,
+ SourceLocation EllipsisLoc) {
+ TypeSourceInfo *TSInfo;
+ GetTypeFromParser(Type, &TSInfo);
+ if (!TSInfo)
+ return true;
+
+ TypeSourceInfo *TSResult = CheckPackExpansion(TSInfo, EllipsisLoc, None);
+ if (!TSResult)
+ return true;
+
+ return CreateParsedType(TSResult->getType(), TSResult);
+}
+
+TypeSourceInfo *
+Sema::CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc,
+ Optional<unsigned> NumExpansions) {
+ // Create the pack expansion type and source-location information.
+ QualType Result = CheckPackExpansion(Pattern->getType(),
+ Pattern->getTypeLoc().getSourceRange(),
+ EllipsisLoc, NumExpansions);
+ if (Result.isNull())
+ return nullptr;
+
+ TypeLocBuilder TLB;
+ TLB.pushFullCopy(Pattern->getTypeLoc());
+ PackExpansionTypeLoc TL = TLB.push<PackExpansionTypeLoc>(Result);
+ TL.setEllipsisLoc(EllipsisLoc);
+
+ return TLB.getTypeSourceInfo(Context, Result);
+}
+
+QualType Sema::CheckPackExpansion(QualType Pattern, SourceRange PatternRange,
+ SourceLocation EllipsisLoc,
+ Optional<unsigned> NumExpansions) {
+ // C++0x [temp.variadic]p5:
+ // The pattern of a pack expansion shall name one or more
+ // parameter packs that are not expanded by a nested pack
+ // expansion.
+ if (!Pattern->containsUnexpandedParameterPack()) {
+ Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
+ << PatternRange;
+ return QualType();
+ }
+
+ return Context.getPackExpansionType(Pattern, NumExpansions);
+}
+
+ExprResult Sema::ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc) {
+ return CheckPackExpansion(Pattern, EllipsisLoc, None);
+}
+
+ExprResult Sema::CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
+ Optional<unsigned> NumExpansions) {
+ if (!Pattern)
+ return ExprError();
+
+ // C++0x [temp.variadic]p5:
+ // The pattern of a pack expansion shall name one or more
+ // parameter packs that are not expanded by a nested pack
+ // expansion.
+ if (!Pattern->containsUnexpandedParameterPack()) {
+ Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
+ << Pattern->getSourceRange();
+ return ExprError();
+ }
+
+ // Create the pack expansion expression and source-location information.
+ return new (Context)
+ PackExpansionExpr(Context.DependentTy, Pattern, EllipsisLoc, NumExpansions);
+}
+
+/// \brief Retrieve the depth and index of a parameter pack.
+static std::pair<unsigned, unsigned>
+getDepthAndIndex(NamedDecl *ND) {
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(ND))
+ return std::make_pair(TTP->getDepth(), TTP->getIndex());
+
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(ND))
+ return std::make_pair(NTTP->getDepth(), NTTP->getIndex());
+
+ TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(ND);
+ return std::make_pair(TTP->getDepth(), TTP->getIndex());
+}
+
+bool Sema::CheckParameterPacksForExpansion(
+ SourceLocation EllipsisLoc, SourceRange PatternRange,
+ ArrayRef<UnexpandedParameterPack> Unexpanded,
+ const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand,
+ bool &RetainExpansion, Optional<unsigned> &NumExpansions) {
+ ShouldExpand = true;
+ RetainExpansion = false;
+ std::pair<IdentifierInfo *, SourceLocation> FirstPack;
+ bool HaveFirstPack = false;
+
+ for (ArrayRef<UnexpandedParameterPack>::iterator i = Unexpanded.begin(),
+ end = Unexpanded.end();
+ i != end; ++i) {
+ // Compute the depth and index for this parameter pack.
+ unsigned Depth = 0, Index = 0;
+ IdentifierInfo *Name;
+ bool IsFunctionParameterPack = false;
+
+ if (const TemplateTypeParmType *TTP
+ = i->first.dyn_cast<const TemplateTypeParmType *>()) {
+ Depth = TTP->getDepth();
+ Index = TTP->getIndex();
+ Name = TTP->getIdentifier();
+ } else {
+ NamedDecl *ND = i->first.get<NamedDecl *>();
+ if (isa<ParmVarDecl>(ND))
+ IsFunctionParameterPack = true;
+ else
+ std::tie(Depth, Index) = getDepthAndIndex(ND);
+
+ Name = ND->getIdentifier();
+ }
+
+ // Determine the size of this argument pack.
+ unsigned NewPackSize;
+ if (IsFunctionParameterPack) {
+ // Figure out whether we're instantiating to an argument pack or not.
+ typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack;
+
+ llvm::PointerUnion<Decl *, DeclArgumentPack *> *Instantiation
+ = CurrentInstantiationScope->findInstantiationOf(
+ i->first.get<NamedDecl *>());
+ if (Instantiation->is<DeclArgumentPack *>()) {
+ // We could expand this function parameter pack.
+ NewPackSize = Instantiation->get<DeclArgumentPack *>()->size();
+ } else {
+ // We can't expand this function parameter pack, so we can't expand
+ // the pack expansion.
+ ShouldExpand = false;
+ continue;
+ }
+ } else {
+ // If we don't have a template argument at this depth/index, then we
+ // cannot expand the pack expansion. Make a note of this, but we still
+ // want to check any parameter packs we *do* have arguments for.
+ if (Depth >= TemplateArgs.getNumLevels() ||
+ !TemplateArgs.hasTemplateArgument(Depth, Index)) {
+ ShouldExpand = false;
+ continue;
+ }
+
+ // Determine the size of the argument pack.
+ NewPackSize = TemplateArgs(Depth, Index).pack_size();
+ }
+
+ // C++0x [temp.arg.explicit]p9:
+ // Template argument deduction can extend the sequence of template
+ // arguments corresponding to a template parameter pack, even when the
+ // sequence contains explicitly specified template arguments.
+ if (!IsFunctionParameterPack) {
+ if (NamedDecl *PartialPack
+ = CurrentInstantiationScope->getPartiallySubstitutedPack()){
+ unsigned PartialDepth, PartialIndex;
+ std::tie(PartialDepth, PartialIndex) = getDepthAndIndex(PartialPack);
+ if (PartialDepth == Depth && PartialIndex == Index)
+ RetainExpansion = true;
+ }
+ }
+
+ if (!NumExpansions) {
+ // The is the first pack we've seen for which we have an argument.
+ // Record it.
+ NumExpansions = NewPackSize;
+ FirstPack.first = Name;
+ FirstPack.second = i->second;
+ HaveFirstPack = true;
+ continue;
+ }
+
+ if (NewPackSize != *NumExpansions) {
+ // C++0x [temp.variadic]p5:
+ // All of the parameter packs expanded by a pack expansion shall have
+ // the same number of arguments specified.
+ if (HaveFirstPack)
+ Diag(EllipsisLoc, diag::err_pack_expansion_length_conflict)
+ << FirstPack.first << Name << *NumExpansions << NewPackSize
+ << SourceRange(FirstPack.second) << SourceRange(i->second);
+ else
+ Diag(EllipsisLoc, diag::err_pack_expansion_length_conflict_multilevel)
+ << Name << *NumExpansions << NewPackSize
+ << SourceRange(i->second);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+Optional<unsigned> Sema::getNumArgumentsInExpansion(QualType T,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+ QualType Pattern = cast<PackExpansionType>(T)->getPattern();
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseType(Pattern);
+
+ Optional<unsigned> Result;
+ for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
+ // Compute the depth and index for this parameter pack.
+ unsigned Depth;
+ unsigned Index;
+
+ if (const TemplateTypeParmType *TTP
+ = Unexpanded[I].first.dyn_cast<const TemplateTypeParmType *>()) {
+ Depth = TTP->getDepth();
+ Index = TTP->getIndex();
+ } else {
+ NamedDecl *ND = Unexpanded[I].first.get<NamedDecl *>();
+ if (isa<ParmVarDecl>(ND)) {
+ // Function parameter pack.
+ typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack;
+
+ llvm::PointerUnion<Decl *, DeclArgumentPack *> *Instantiation
+ = CurrentInstantiationScope->findInstantiationOf(
+ Unexpanded[I].first.get<NamedDecl *>());
+ if (Instantiation->is<Decl*>())
+ // The pattern refers to an unexpanded pack. We're not ready to expand
+ // this pack yet.
+ return None;
+
+ unsigned Size = Instantiation->get<DeclArgumentPack *>()->size();
+ assert((!Result || *Result == Size) && "inconsistent pack sizes");
+ Result = Size;
+ continue;
+ }
+
+ std::tie(Depth, Index) = getDepthAndIndex(ND);
+ }
+ if (Depth >= TemplateArgs.getNumLevels() ||
+ !TemplateArgs.hasTemplateArgument(Depth, Index))
+ // The pattern refers to an unknown template argument. We're not ready to
+ // expand this pack yet.
+ return None;
+
+ // Determine the size of the argument pack.
+ unsigned Size = TemplateArgs(Depth, Index).pack_size();
+ assert((!Result || *Result == Size) && "inconsistent pack sizes");
+ Result = Size;
+ }
+
+ return Result;
+}
+
+bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
+ const DeclSpec &DS = D.getDeclSpec();
+ switch (DS.getTypeSpecType()) {
+ case TST_typename:
+ case TST_typeofType:
+ case TST_underlyingType:
+ case TST_atomic: {
+ QualType T = DS.getRepAsType().get();
+ if (!T.isNull() && T->containsUnexpandedParameterPack())
+ return true;
+ break;
+ }
+
+ case TST_typeofExpr:
+ case TST_decltype:
+ if (DS.getRepAsExpr() &&
+ DS.getRepAsExpr()->containsUnexpandedParameterPack())
+ return true;
+ break;
+
+ case TST_unspecified:
+ case TST_void:
+ case TST_char:
+ case TST_wchar:
+ case TST_char16:
+ case TST_char32:
+ case TST_int:
+ case TST_int128:
+ case TST_half:
+ case TST_float:
+ case TST_double:
+ case TST_bool:
+ case TST_decimal32:
+ case TST_decimal64:
+ case TST_decimal128:
+ case TST_enum:
+ case TST_union:
+ case TST_struct:
+ case TST_interface:
+ case TST_class:
+ case TST_auto:
+ case TST_auto_type:
+ case TST_decltype_auto:
+ case TST_unknown_anytype:
+ case TST_error:
+ break;
+ }
+
+ for (unsigned I = 0, N = D.getNumTypeObjects(); I != N; ++I) {
+ const DeclaratorChunk &Chunk = D.getTypeObject(I);
+ switch (Chunk.Kind) {
+ case DeclaratorChunk::Pointer:
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::Paren:
+ case DeclaratorChunk::BlockPointer:
+ // These declarator chunks cannot contain any parameter packs.
+ break;
+
+ case DeclaratorChunk::Array:
+ if (Chunk.Arr.NumElts &&
+ Chunk.Arr.NumElts->containsUnexpandedParameterPack())
+ return true;
+ break;
+ case DeclaratorChunk::Function:
+ for (unsigned i = 0, e = Chunk.Fun.NumParams; i != e; ++i) {
+ ParmVarDecl *Param = cast<ParmVarDecl>(Chunk.Fun.Params[i].Param);
+ QualType ParamTy = Param->getType();
+ assert(!ParamTy.isNull() && "Couldn't parse type?");
+ if (ParamTy->containsUnexpandedParameterPack()) return true;
+ }
+
+ if (Chunk.Fun.getExceptionSpecType() == EST_Dynamic) {
+ for (unsigned i = 0; i != Chunk.Fun.NumExceptions; ++i) {
+ if (Chunk.Fun.Exceptions[i]
+ .Ty.get()
+ ->containsUnexpandedParameterPack())
+ return true;
+ }
+ } else if (Chunk.Fun.getExceptionSpecType() == EST_ComputedNoexcept &&
+ Chunk.Fun.NoexceptExpr->containsUnexpandedParameterPack())
+ return true;
+
+ if (Chunk.Fun.hasTrailingReturnType()) {
+ QualType T = Chunk.Fun.getTrailingReturnType().get();
+ if (!T.isNull() && T->containsUnexpandedParameterPack())
+ return true;
+ }
+ break;
+
+ case DeclaratorChunk::MemberPointer:
+ if (Chunk.Mem.Scope().getScopeRep() &&
+ Chunk.Mem.Scope().getScopeRep()->containsUnexpandedParameterPack())
+ return true;
+ break;
+ }
+ }
+
+ return false;
+}
+
+namespace {
+
+// Callback to only accept typo corrections that refer to parameter packs.
+class ParameterPackValidatorCCC : public CorrectionCandidateCallback {
+ public:
+ bool ValidateCandidate(const TypoCorrection &candidate) override {
+ NamedDecl *ND = candidate.getCorrectionDecl();
+ return ND && ND->isParameterPack();
+ }
+};
+
+}
+
+/// \brief Called when an expression computing the size of a parameter pack
+/// is parsed.
+///
+/// \code
+/// template<typename ...Types> struct count {
+/// static const unsigned value = sizeof...(Types);
+/// };
+/// \endcode
+///
+//
+/// \param OpLoc The location of the "sizeof" keyword.
+/// \param Name The name of the parameter pack whose size will be determined.
+/// \param NameLoc The source location of the name of the parameter pack.
+/// \param RParenLoc The location of the closing parentheses.
+ExprResult Sema::ActOnSizeofParameterPackExpr(Scope *S,
+ SourceLocation OpLoc,
+ IdentifierInfo &Name,
+ SourceLocation NameLoc,
+ SourceLocation RParenLoc) {
+ // C++0x [expr.sizeof]p5:
+ // The identifier in a sizeof... expression shall name a parameter pack.
+ LookupResult R(*this, &Name, NameLoc, LookupOrdinaryName);
+ LookupName(R, S);
+
+ NamedDecl *ParameterPack = nullptr;
+ switch (R.getResultKind()) {
+ case LookupResult::Found:
+ ParameterPack = R.getFoundDecl();
+ break;
+
+ case LookupResult::NotFound:
+ case LookupResult::NotFoundInCurrentInstantiation:
+ if (TypoCorrection Corrected =
+ CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(), S, nullptr,
+ llvm::make_unique<ParameterPackValidatorCCC>(),
+ CTK_ErrorRecovery)) {
+ diagnoseTypo(Corrected,
+ PDiag(diag::err_sizeof_pack_no_pack_name_suggest) << &Name,
+ PDiag(diag::note_parameter_pack_here));
+ ParameterPack = Corrected.getCorrectionDecl();
+ }
+
+ case LookupResult::FoundOverloaded:
+ case LookupResult::FoundUnresolvedValue:
+ break;
+
+ case LookupResult::Ambiguous:
+ DiagnoseAmbiguousLookup(R);
+ return ExprError();
+ }
+
+ if (!ParameterPack || !ParameterPack->isParameterPack()) {
+ Diag(NameLoc, diag::err_sizeof_pack_no_pack_name)
+ << &Name;
+ return ExprError();
+ }
+
+ MarkAnyDeclReferenced(OpLoc, ParameterPack, true);
+
+ return SizeOfPackExpr::Create(Context, OpLoc, ParameterPack, NameLoc,
+ RParenLoc);
+}
+
+TemplateArgumentLoc
+Sema::getTemplateArgumentPackExpansionPattern(
+ TemplateArgumentLoc OrigLoc,
+ SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const {
+ const TemplateArgument &Argument = OrigLoc.getArgument();
+ assert(Argument.isPackExpansion());
+ switch (Argument.getKind()) {
+ case TemplateArgument::Type: {
+ // FIXME: We shouldn't ever have to worry about missing
+ // type-source info!
+ TypeSourceInfo *ExpansionTSInfo = OrigLoc.getTypeSourceInfo();
+ if (!ExpansionTSInfo)
+ ExpansionTSInfo = Context.getTrivialTypeSourceInfo(Argument.getAsType(),
+ Ellipsis);
+ PackExpansionTypeLoc Expansion =
+ ExpansionTSInfo->getTypeLoc().castAs<PackExpansionTypeLoc>();
+ Ellipsis = Expansion.getEllipsisLoc();
+
+ TypeLoc Pattern = Expansion.getPatternLoc();
+ NumExpansions = Expansion.getTypePtr()->getNumExpansions();
+
+ // We need to copy the TypeLoc because TemplateArgumentLocs store a
+ // TypeSourceInfo.
+ // FIXME: Find some way to avoid the copy?
+ TypeLocBuilder TLB;
+ TLB.pushFullCopy(Pattern);
+ TypeSourceInfo *PatternTSInfo =
+ TLB.getTypeSourceInfo(Context, Pattern.getType());
+ return TemplateArgumentLoc(TemplateArgument(Pattern.getType()),
+ PatternTSInfo);
+ }
+
+ case TemplateArgument::Expression: {
+ PackExpansionExpr *Expansion
+ = cast<PackExpansionExpr>(Argument.getAsExpr());
+ Expr *Pattern = Expansion->getPattern();
+ Ellipsis = Expansion->getEllipsisLoc();
+ NumExpansions = Expansion->getNumExpansions();
+ return TemplateArgumentLoc(Pattern, Pattern);
+ }
+
+ case TemplateArgument::TemplateExpansion:
+ Ellipsis = OrigLoc.getTemplateEllipsisLoc();
+ NumExpansions = Argument.getNumTemplateExpansions();
+ return TemplateArgumentLoc(Argument.getPackExpansionPattern(),
+ OrigLoc.getTemplateQualifierLoc(),
+ OrigLoc.getTemplateNameLoc());
+
+ case TemplateArgument::Declaration:
+ case TemplateArgument::NullPtr:
+ case TemplateArgument::Template:
+ case TemplateArgument::Integral:
+ case TemplateArgument::Pack:
+ case TemplateArgument::Null:
+ return TemplateArgumentLoc();
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+static void CheckFoldOperand(Sema &S, Expr *E) {
+ if (!E)
+ return;
+
+ E = E->IgnoreImpCasts();
+ if (isa<BinaryOperator>(E) || isa<AbstractConditionalOperator>(E)) {
+ S.Diag(E->getExprLoc(), diag::err_fold_expression_bad_operand)
+ << E->getSourceRange()
+ << FixItHint::CreateInsertion(E->getLocStart(), "(")
+ << FixItHint::CreateInsertion(E->getLocEnd(), ")");
+ }
+}
+
+ExprResult Sema::ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
+ tok::TokenKind Operator,
+ SourceLocation EllipsisLoc, Expr *RHS,
+ SourceLocation RParenLoc) {
+ // LHS and RHS must be cast-expressions. We allow an arbitrary expression
+ // in the parser and reduce down to just cast-expressions here.
+ CheckFoldOperand(*this, LHS);
+ CheckFoldOperand(*this, RHS);
+
+ // [expr.prim.fold]p3:
+ // In a binary fold, op1 and op2 shall be the same fold-operator, and
+ // either e1 shall contain an unexpanded parameter pack or e2 shall contain
+ // an unexpanded parameter pack, but not both.
+ if (LHS && RHS &&
+ LHS->containsUnexpandedParameterPack() ==
+ RHS->containsUnexpandedParameterPack()) {
+ return Diag(EllipsisLoc,
+ LHS->containsUnexpandedParameterPack()
+ ? diag::err_fold_expression_packs_both_sides
+ : diag::err_pack_expansion_without_parameter_packs)
+ << LHS->getSourceRange() << RHS->getSourceRange();
+ }
+
+ // [expr.prim.fold]p2:
+ // In a unary fold, the cast-expression shall contain an unexpanded
+ // parameter pack.
+ if (!LHS || !RHS) {
+ Expr *Pack = LHS ? LHS : RHS;
+ assert(Pack && "fold expression with neither LHS nor RHS");
+ if (!Pack->containsUnexpandedParameterPack())
+ return Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
+ << Pack->getSourceRange();
+ }
+
+ BinaryOperatorKind Opc = ConvertTokenKindToBinaryOpcode(Operator);
+ return BuildCXXFoldExpr(LParenLoc, LHS, Opc, EllipsisLoc, RHS, RParenLoc);
+}
+
+ExprResult Sema::BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
+ BinaryOperatorKind Operator,
+ SourceLocation EllipsisLoc, Expr *RHS,
+ SourceLocation RParenLoc) {
+ return new (Context) CXXFoldExpr(Context.DependentTy, LParenLoc, LHS,
+ Operator, EllipsisLoc, RHS, RParenLoc);
+}
+
+ExprResult Sema::BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
+ BinaryOperatorKind Operator) {
+ // [temp.variadic]p9:
+ // If N is zero for a unary fold-expression, the value of the expression is
+ // * -> 1
+ // + -> int()
+ // & -> -1
+ // | -> int()
+ // && -> true
+ // || -> false
+ // , -> void()
+ // if the operator is not listed [above], the instantiation is ill-formed.
+ //
+ // Note that we need to use something like int() here, not merely 0, to
+ // prevent the result from being a null pointer constant.
+ QualType ScalarType;
+ switch (Operator) {
+ case BO_Add:
+ ScalarType = Context.IntTy;
+ break;
+ case BO_Mul:
+ return ActOnIntegerConstant(EllipsisLoc, 1);
+ case BO_Or:
+ ScalarType = Context.IntTy;
+ break;
+ case BO_And:
+ return CreateBuiltinUnaryOp(EllipsisLoc, UO_Minus,
+ ActOnIntegerConstant(EllipsisLoc, 1).get());
+ case BO_LOr:
+ return ActOnCXXBoolLiteral(EllipsisLoc, tok::kw_false);
+ case BO_LAnd:
+ return ActOnCXXBoolLiteral(EllipsisLoc, tok::kw_true);
+ case BO_Comma:
+ ScalarType = Context.VoidTy;
+ break;
+
+ default:
+ return Diag(EllipsisLoc, diag::err_fold_expression_empty)
+ << BinaryOperator::getOpcodeStr(Operator);
+ }
+
+ return new (Context) CXXScalarValueInitExpr(
+ ScalarType, Context.getTrivialTypeSourceInfo(ScalarType, EllipsisLoc),
+ EllipsisLoc);
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp
new file mode 100644
index 0000000..c70568c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp
@@ -0,0 +1,7029 @@
+//===--- SemaType.cpp - Semantic Analysis for Types -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements type-related semantic analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "TypeLocBuilder.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/AST/TypeLocVisitor.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/DelayedDiagnostic.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/Template.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace clang;
+
+enum TypeDiagSelector {
+ TDS_Function,
+ TDS_Pointer,
+ TDS_ObjCObjOrBlock
+};
+
+/// isOmittedBlockReturnType - Return true if this declarator is missing a
+/// return type because this is a omitted return type on a block literal.
+static bool isOmittedBlockReturnType(const Declarator &D) {
+ if (D.getContext() != Declarator::BlockLiteralContext ||
+ D.getDeclSpec().hasTypeSpecifier())
+ return false;
+
+ if (D.getNumTypeObjects() == 0)
+ return true; // ^{ ... }
+
+ if (D.getNumTypeObjects() == 1 &&
+ D.getTypeObject(0).Kind == DeclaratorChunk::Function)
+ return true; // ^(int X, float Y) { ... }
+
+ return false;
+}
+
+/// diagnoseBadTypeAttribute - Diagnoses a type attribute which
+/// doesn't apply to the given type.
+static void diagnoseBadTypeAttribute(Sema &S, const AttributeList &attr,
+ QualType type) {
+ TypeDiagSelector WhichType;
+ bool useExpansionLoc = true;
+ switch (attr.getKind()) {
+ case AttributeList::AT_ObjCGC: WhichType = TDS_Pointer; break;
+ case AttributeList::AT_ObjCOwnership: WhichType = TDS_ObjCObjOrBlock; break;
+ default:
+ // Assume everything else was a function attribute.
+ WhichType = TDS_Function;
+ useExpansionLoc = false;
+ break;
+ }
+
+ SourceLocation loc = attr.getLoc();
+ StringRef name = attr.getName()->getName();
+
+ // The GC attributes are usually written with macros; special-case them.
+ IdentifierInfo *II = attr.isArgIdent(0) ? attr.getArgAsIdent(0)->Ident
+ : nullptr;
+ if (useExpansionLoc && loc.isMacroID() && II) {
+ if (II->isStr("strong")) {
+ if (S.findMacroSpelling(loc, "__strong")) name = "__strong";
+ } else if (II->isStr("weak")) {
+ if (S.findMacroSpelling(loc, "__weak")) name = "__weak";
+ }
+ }
+
+ S.Diag(loc, diag::warn_type_attribute_wrong_type) << name << WhichType
+ << type;
+}
+
+// objc_gc applies to Objective-C pointers or, otherwise, to the
+// smallest available pointer type (i.e. 'void*' in 'void**').
+#define OBJC_POINTER_TYPE_ATTRS_CASELIST \
+ case AttributeList::AT_ObjCGC: \
+ case AttributeList::AT_ObjCOwnership
+
+// Function type attributes.
+#define FUNCTION_TYPE_ATTRS_CASELIST \
+ case AttributeList::AT_NoReturn: \
+ case AttributeList::AT_CDecl: \
+ case AttributeList::AT_FastCall: \
+ case AttributeList::AT_StdCall: \
+ case AttributeList::AT_ThisCall: \
+ case AttributeList::AT_Pascal: \
+ case AttributeList::AT_VectorCall: \
+ case AttributeList::AT_MSABI: \
+ case AttributeList::AT_SysVABI: \
+ case AttributeList::AT_Regparm: \
+ case AttributeList::AT_Pcs: \
+ case AttributeList::AT_IntelOclBicc
+
+// Microsoft-specific type qualifiers.
+#define MS_TYPE_ATTRS_CASELIST \
+ case AttributeList::AT_Ptr32: \
+ case AttributeList::AT_Ptr64: \
+ case AttributeList::AT_SPtr: \
+ case AttributeList::AT_UPtr
+
+// Nullability qualifiers.
+#define NULLABILITY_TYPE_ATTRS_CASELIST \
+ case AttributeList::AT_TypeNonNull: \
+ case AttributeList::AT_TypeNullable: \
+ case AttributeList::AT_TypeNullUnspecified
+
+namespace {
+ /// An object which stores processing state for the entire
+ /// GetTypeForDeclarator process.
+ class TypeProcessingState {
+ Sema &sema;
+
+ /// The declarator being processed.
+ Declarator &declarator;
+
+ /// The index of the declarator chunk we're currently processing.
+ /// May be the total number of valid chunks, indicating the
+ /// DeclSpec.
+ unsigned chunkIndex;
+
+ /// Whether there are non-trivial modifications to the decl spec.
+ bool trivial;
+
+ /// Whether we saved the attributes in the decl spec.
+ bool hasSavedAttrs;
+
+ /// The original set of attributes on the DeclSpec.
+ SmallVector<AttributeList*, 2> savedAttrs;
+
+ /// A list of attributes to diagnose the uselessness of when the
+ /// processing is complete.
+ SmallVector<AttributeList*, 2> ignoredTypeAttrs;
+
+ public:
+ TypeProcessingState(Sema &sema, Declarator &declarator)
+ : sema(sema), declarator(declarator),
+ chunkIndex(declarator.getNumTypeObjects()),
+ trivial(true), hasSavedAttrs(false) {}
+
+ Sema &getSema() const {
+ return sema;
+ }
+
+ Declarator &getDeclarator() const {
+ return declarator;
+ }
+
+ bool isProcessingDeclSpec() const {
+ return chunkIndex == declarator.getNumTypeObjects();
+ }
+
+ unsigned getCurrentChunkIndex() const {
+ return chunkIndex;
+ }
+
+ void setCurrentChunkIndex(unsigned idx) {
+ assert(idx <= declarator.getNumTypeObjects());
+ chunkIndex = idx;
+ }
+
+ AttributeList *&getCurrentAttrListRef() const {
+ if (isProcessingDeclSpec())
+ return getMutableDeclSpec().getAttributes().getListRef();
+ return declarator.getTypeObject(chunkIndex).getAttrListRef();
+ }
+
+ /// Save the current set of attributes on the DeclSpec.
+ void saveDeclSpecAttrs() {
+ // Don't try to save them multiple times.
+ if (hasSavedAttrs) return;
+
+ DeclSpec &spec = getMutableDeclSpec();
+ for (AttributeList *attr = spec.getAttributes().getList(); attr;
+ attr = attr->getNext())
+ savedAttrs.push_back(attr);
+ trivial &= savedAttrs.empty();
+ hasSavedAttrs = true;
+ }
+
+ /// Record that we had nowhere to put the given type attribute.
+ /// We will diagnose such attributes later.
+ void addIgnoredTypeAttr(AttributeList &attr) {
+ ignoredTypeAttrs.push_back(&attr);
+ }
+
+ /// Diagnose all the ignored type attributes, given that the
+ /// declarator worked out to the given type.
+ void diagnoseIgnoredTypeAttrs(QualType type) const {
+ for (auto *Attr : ignoredTypeAttrs)
+ diagnoseBadTypeAttribute(getSema(), *Attr, type);
+ }
+
+ ~TypeProcessingState() {
+ if (trivial) return;
+
+ restoreDeclSpecAttrs();
+ }
+
+ private:
+ DeclSpec &getMutableDeclSpec() const {
+ return const_cast<DeclSpec&>(declarator.getDeclSpec());
+ }
+
+ void restoreDeclSpecAttrs() {
+ assert(hasSavedAttrs);
+
+ if (savedAttrs.empty()) {
+ getMutableDeclSpec().getAttributes().set(nullptr);
+ return;
+ }
+
+ getMutableDeclSpec().getAttributes().set(savedAttrs[0]);
+ for (unsigned i = 0, e = savedAttrs.size() - 1; i != e; ++i)
+ savedAttrs[i]->setNext(savedAttrs[i+1]);
+ savedAttrs.back()->setNext(nullptr);
+ }
+ };
+}
+
+static void spliceAttrIntoList(AttributeList &attr, AttributeList *&head) {
+ attr.setNext(head);
+ head = &attr;
+}
+
+static void spliceAttrOutOfList(AttributeList &attr, AttributeList *&head) {
+ if (head == &attr) {
+ head = attr.getNext();
+ return;
+ }
+
+ AttributeList *cur = head;
+ while (true) {
+ assert(cur && cur->getNext() && "ran out of attrs?");
+ if (cur->getNext() == &attr) {
+ cur->setNext(attr.getNext());
+ return;
+ }
+ cur = cur->getNext();
+ }
+}
+
+static void moveAttrFromListToList(AttributeList &attr,
+ AttributeList *&fromList,
+ AttributeList *&toList) {
+ spliceAttrOutOfList(attr, fromList);
+ spliceAttrIntoList(attr, toList);
+}
+
+/// The location of a type attribute.
+enum TypeAttrLocation {
+ /// The attribute is in the decl-specifier-seq.
+ TAL_DeclSpec,
+ /// The attribute is part of a DeclaratorChunk.
+ TAL_DeclChunk,
+ /// The attribute is immediately after the declaration's name.
+ TAL_DeclName
+};
+
+static void processTypeAttrs(TypeProcessingState &state,
+ QualType &type, TypeAttrLocation TAL,
+ AttributeList *attrs);
+
+static bool handleFunctionTypeAttr(TypeProcessingState &state,
+ AttributeList &attr,
+ QualType &type);
+
+static bool handleMSPointerTypeQualifierAttr(TypeProcessingState &state,
+ AttributeList &attr,
+ QualType &type);
+
+static bool handleObjCGCTypeAttr(TypeProcessingState &state,
+ AttributeList &attr, QualType &type);
+
+static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
+ AttributeList &attr, QualType &type);
+
+static bool handleObjCPointerTypeAttr(TypeProcessingState &state,
+ AttributeList &attr, QualType &type) {
+ if (attr.getKind() == AttributeList::AT_ObjCGC)
+ return handleObjCGCTypeAttr(state, attr, type);
+ assert(attr.getKind() == AttributeList::AT_ObjCOwnership);
+ return handleObjCOwnershipTypeAttr(state, attr, type);
+}
+
+/// Given the index of a declarator chunk, check whether that chunk
+/// directly specifies the return type of a function and, if so, find
+/// an appropriate place for it.
+///
+/// \param i - a notional index which the search will start
+/// immediately inside
+///
+/// \param onlyBlockPointers Whether we should only look into block
+/// pointer types (vs. all pointer types).
+static DeclaratorChunk *maybeMovePastReturnType(Declarator &declarator,
+ unsigned i,
+ bool onlyBlockPointers) {
+ assert(i <= declarator.getNumTypeObjects());
+
+ DeclaratorChunk *result = nullptr;
+
+ // First, look inwards past parens for a function declarator.
+ for (; i != 0; --i) {
+ DeclaratorChunk &fnChunk = declarator.getTypeObject(i-1);
+ switch (fnChunk.Kind) {
+ case DeclaratorChunk::Paren:
+ continue;
+
+ // If we find anything except a function, bail out.
+ case DeclaratorChunk::Pointer:
+ case DeclaratorChunk::BlockPointer:
+ case DeclaratorChunk::Array:
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::MemberPointer:
+ return result;
+
+ // If we do find a function declarator, scan inwards from that,
+ // looking for a (block-)pointer declarator.
+ case DeclaratorChunk::Function:
+ for (--i; i != 0; --i) {
+ DeclaratorChunk &ptrChunk = declarator.getTypeObject(i-1);
+ switch (ptrChunk.Kind) {
+ case DeclaratorChunk::Paren:
+ case DeclaratorChunk::Array:
+ case DeclaratorChunk::Function:
+ case DeclaratorChunk::Reference:
+ continue;
+
+ case DeclaratorChunk::MemberPointer:
+ case DeclaratorChunk::Pointer:
+ if (onlyBlockPointers)
+ continue;
+
+ // fallthrough
+
+ case DeclaratorChunk::BlockPointer:
+ result = &ptrChunk;
+ goto continue_outer;
+ }
+ llvm_unreachable("bad declarator chunk kind");
+ }
+
+ // If we run out of declarators doing that, we're done.
+ return result;
+ }
+ llvm_unreachable("bad declarator chunk kind");
+
+ // Okay, reconsider from our new point.
+ continue_outer: ;
+ }
+
+ // Ran out of chunks, bail out.
+ return result;
+}
+
+/// Given that an objc_gc attribute was written somewhere on a
+/// declaration *other* than on the declarator itself (for which, use
+/// distributeObjCPointerTypeAttrFromDeclarator), and given that it
+/// didn't apply in whatever position it was written in, try to move
+/// it to a more appropriate position.
+static void distributeObjCPointerTypeAttr(TypeProcessingState &state,
+ AttributeList &attr,
+ QualType type) {
+ Declarator &declarator = state.getDeclarator();
+
+ // Move it to the outermost normal or block pointer declarator.
+ for (unsigned i = state.getCurrentChunkIndex(); i != 0; --i) {
+ DeclaratorChunk &chunk = declarator.getTypeObject(i-1);
+ switch (chunk.Kind) {
+ case DeclaratorChunk::Pointer:
+ case DeclaratorChunk::BlockPointer: {
+ // But don't move an ARC ownership attribute to the return type
+ // of a block.
+ DeclaratorChunk *destChunk = nullptr;
+ if (state.isProcessingDeclSpec() &&
+ attr.getKind() == AttributeList::AT_ObjCOwnership)
+ destChunk = maybeMovePastReturnType(declarator, i - 1,
+ /*onlyBlockPointers=*/true);
+ if (!destChunk) destChunk = &chunk;
+
+ moveAttrFromListToList(attr, state.getCurrentAttrListRef(),
+ destChunk->getAttrListRef());
+ return;
+ }
+
+ case DeclaratorChunk::Paren:
+ case DeclaratorChunk::Array:
+ continue;
+
+ // We may be starting at the return type of a block.
+ case DeclaratorChunk::Function:
+ if (state.isProcessingDeclSpec() &&
+ attr.getKind() == AttributeList::AT_ObjCOwnership) {
+ if (DeclaratorChunk *dest = maybeMovePastReturnType(
+ declarator, i,
+ /*onlyBlockPointers=*/true)) {
+ moveAttrFromListToList(attr, state.getCurrentAttrListRef(),
+ dest->getAttrListRef());
+ return;
+ }
+ }
+ goto error;
+
+ // Don't walk through these.
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::MemberPointer:
+ goto error;
+ }
+ }
+ error:
+
+ diagnoseBadTypeAttribute(state.getSema(), attr, type);
+}
+
+/// Distribute an objc_gc type attribute that was written on the
+/// declarator.
+static void
+distributeObjCPointerTypeAttrFromDeclarator(TypeProcessingState &state,
+ AttributeList &attr,
+ QualType &declSpecType) {
+ Declarator &declarator = state.getDeclarator();
+
+ // objc_gc goes on the innermost pointer to something that's not a
+ // pointer.
+ unsigned innermost = -1U;
+ bool considerDeclSpec = true;
+ for (unsigned i = 0, e = declarator.getNumTypeObjects(); i != e; ++i) {
+ DeclaratorChunk &chunk = declarator.getTypeObject(i);
+ switch (chunk.Kind) {
+ case DeclaratorChunk::Pointer:
+ case DeclaratorChunk::BlockPointer:
+ innermost = i;
+ continue;
+
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::MemberPointer:
+ case DeclaratorChunk::Paren:
+ case DeclaratorChunk::Array:
+ continue;
+
+ case DeclaratorChunk::Function:
+ considerDeclSpec = false;
+ goto done;
+ }
+ }
+ done:
+
+ // That might actually be the decl spec if we weren't blocked by
+ // anything in the declarator.
+ if (considerDeclSpec) {
+ if (handleObjCPointerTypeAttr(state, attr, declSpecType)) {
+ // Splice the attribute into the decl spec. Prevents the
+ // attribute from being applied multiple times and gives
+ // the source-location-filler something to work with.
+ state.saveDeclSpecAttrs();
+ moveAttrFromListToList(attr, declarator.getAttrListRef(),
+ declarator.getMutableDeclSpec().getAttributes().getListRef());
+ return;
+ }
+ }
+
+ // Otherwise, if we found an appropriate chunk, splice the attribute
+ // into it.
+ if (innermost != -1U) {
+ moveAttrFromListToList(attr, declarator.getAttrListRef(),
+ declarator.getTypeObject(innermost).getAttrListRef());
+ return;
+ }
+
+ // Otherwise, diagnose when we're done building the type.
+ spliceAttrOutOfList(attr, declarator.getAttrListRef());
+ state.addIgnoredTypeAttr(attr);
+}
+
+/// A function type attribute was written somewhere in a declaration
+/// *other* than on the declarator itself or in the decl spec. Given
+/// that it didn't apply in whatever position it was written in, try
+/// to move it to a more appropriate position.
+static void distributeFunctionTypeAttr(TypeProcessingState &state,
+ AttributeList &attr,
+ QualType type) {
+ Declarator &declarator = state.getDeclarator();
+
+ // Try to push the attribute from the return type of a function to
+ // the function itself.
+ for (unsigned i = state.getCurrentChunkIndex(); i != 0; --i) {
+ DeclaratorChunk &chunk = declarator.getTypeObject(i-1);
+ switch (chunk.Kind) {
+ case DeclaratorChunk::Function:
+ moveAttrFromListToList(attr, state.getCurrentAttrListRef(),
+ chunk.getAttrListRef());
+ return;
+
+ case DeclaratorChunk::Paren:
+ case DeclaratorChunk::Pointer:
+ case DeclaratorChunk::BlockPointer:
+ case DeclaratorChunk::Array:
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::MemberPointer:
+ continue;
+ }
+ }
+
+ diagnoseBadTypeAttribute(state.getSema(), attr, type);
+}
+
+/// Try to distribute a function type attribute to the innermost
+/// function chunk or type. Returns true if the attribute was
+/// distributed, false if no location was found.
+static bool
+distributeFunctionTypeAttrToInnermost(TypeProcessingState &state,
+ AttributeList &attr,
+ AttributeList *&attrList,
+ QualType &declSpecType) {
+ Declarator &declarator = state.getDeclarator();
+
+ // Put it on the innermost function chunk, if there is one.
+ for (unsigned i = 0, e = declarator.getNumTypeObjects(); i != e; ++i) {
+ DeclaratorChunk &chunk = declarator.getTypeObject(i);
+ if (chunk.Kind != DeclaratorChunk::Function) continue;
+
+ moveAttrFromListToList(attr, attrList, chunk.getAttrListRef());
+ return true;
+ }
+
+ return handleFunctionTypeAttr(state, attr, declSpecType);
+}
+
+/// A function type attribute was written in the decl spec. Try to
+/// apply it somewhere.
+static void
+distributeFunctionTypeAttrFromDeclSpec(TypeProcessingState &state,
+ AttributeList &attr,
+ QualType &declSpecType) {
+ state.saveDeclSpecAttrs();
+
+ // C++11 attributes before the decl specifiers actually appertain to
+ // the declarators. Move them straight there. We don't support the
+ // 'put them wherever you like' semantics we allow for GNU attributes.
+ if (attr.isCXX11Attribute()) {
+ moveAttrFromListToList(attr, state.getCurrentAttrListRef(),
+ state.getDeclarator().getAttrListRef());
+ return;
+ }
+
+ // Try to distribute to the innermost.
+ if (distributeFunctionTypeAttrToInnermost(state, attr,
+ state.getCurrentAttrListRef(),
+ declSpecType))
+ return;
+
+ // If that failed, diagnose the bad attribute when the declarator is
+ // fully built.
+ state.addIgnoredTypeAttr(attr);
+}
+
+/// A function type attribute was written on the declarator. Try to
+/// apply it somewhere.
+static void
+distributeFunctionTypeAttrFromDeclarator(TypeProcessingState &state,
+ AttributeList &attr,
+ QualType &declSpecType) {
+ Declarator &declarator = state.getDeclarator();
+
+ // Try to distribute to the innermost.
+ if (distributeFunctionTypeAttrToInnermost(state, attr,
+ declarator.getAttrListRef(),
+ declSpecType))
+ return;
+
+ // If that failed, diagnose the bad attribute when the declarator is
+ // fully built.
+ spliceAttrOutOfList(attr, declarator.getAttrListRef());
+ state.addIgnoredTypeAttr(attr);
+}
+
+/// \brief Given that there are attributes written on the declarator
+/// itself, try to distribute any type attributes to the appropriate
+/// declarator chunk.
+///
+/// These are attributes like the following:
+/// int f ATTR;
+/// int (f ATTR)();
+/// but not necessarily this:
+/// int f() ATTR;
+static void distributeTypeAttrsFromDeclarator(TypeProcessingState &state,
+ QualType &declSpecType) {
+ // Collect all the type attributes from the declarator itself.
+ assert(state.getDeclarator().getAttributes() && "declarator has no attrs!");
+ AttributeList *attr = state.getDeclarator().getAttributes();
+ AttributeList *next;
+ do {
+ next = attr->getNext();
+
+ // Do not distribute C++11 attributes. They have strict rules for what
+ // they appertain to.
+ if (attr->isCXX11Attribute())
+ continue;
+
+ switch (attr->getKind()) {
+ OBJC_POINTER_TYPE_ATTRS_CASELIST:
+ distributeObjCPointerTypeAttrFromDeclarator(state, *attr, declSpecType);
+ break;
+
+ case AttributeList::AT_NSReturnsRetained:
+ if (!state.getSema().getLangOpts().ObjCAutoRefCount)
+ break;
+ // fallthrough
+
+ FUNCTION_TYPE_ATTRS_CASELIST:
+ distributeFunctionTypeAttrFromDeclarator(state, *attr, declSpecType);
+ break;
+
+ MS_TYPE_ATTRS_CASELIST:
+ // Microsoft type attributes cannot go after the declarator-id.
+ continue;
+
+ NULLABILITY_TYPE_ATTRS_CASELIST:
+ // Nullability specifiers cannot go after the declarator-id.
+
+ // Objective-C __kindof does not get distributed.
+ case AttributeList::AT_ObjCKindOf:
+ continue;
+
+ default:
+ break;
+ }
+ } while ((attr = next));
+}
+
+/// Add a synthetic '()' to a block-literal declarator if it is
+/// required, given the return type.
+static void maybeSynthesizeBlockSignature(TypeProcessingState &state,
+ QualType declSpecType) {
+ Declarator &declarator = state.getDeclarator();
+
+ // First, check whether the declarator would produce a function,
+ // i.e. whether the innermost semantic chunk is a function.
+ if (declarator.isFunctionDeclarator()) {
+ // If so, make that declarator a prototyped declarator.
+ declarator.getFunctionTypeInfo().hasPrototype = true;
+ return;
+ }
+
+ // If there are any type objects, the type as written won't name a
+ // function, regardless of the decl spec type. This is because a
+ // block signature declarator is always an abstract-declarator, and
+ // abstract-declarators can't just be parentheses chunks. Therefore
+ // we need to build a function chunk unless there are no type
+ // objects and the decl spec type is a function.
+ if (!declarator.getNumTypeObjects() && declSpecType->isFunctionType())
+ return;
+
+ // Note that there *are* cases with invalid declarators where
+ // declarators consist solely of parentheses. In general, these
+ // occur only in failed efforts to make function declarators, so
+ // faking up the function chunk is still the right thing to do.
+
+ // Otherwise, we need to fake up a function declarator.
+ SourceLocation loc = declarator.getLocStart();
+
+ // ...and *prepend* it to the declarator.
+ SourceLocation NoLoc;
+ declarator.AddInnermostTypeInfo(DeclaratorChunk::getFunction(
+ /*HasProto=*/true,
+ /*IsAmbiguous=*/false,
+ /*LParenLoc=*/NoLoc,
+ /*ArgInfo=*/nullptr,
+ /*NumArgs=*/0,
+ /*EllipsisLoc=*/NoLoc,
+ /*RParenLoc=*/NoLoc,
+ /*TypeQuals=*/0,
+ /*RefQualifierIsLvalueRef=*/true,
+ /*RefQualifierLoc=*/NoLoc,
+ /*ConstQualifierLoc=*/NoLoc,
+ /*VolatileQualifierLoc=*/NoLoc,
+ /*RestrictQualifierLoc=*/NoLoc,
+ /*MutableLoc=*/NoLoc, EST_None,
+ /*ESpecRange=*/SourceRange(),
+ /*Exceptions=*/nullptr,
+ /*ExceptionRanges=*/nullptr,
+ /*NumExceptions=*/0,
+ /*NoexceptExpr=*/nullptr,
+ /*ExceptionSpecTokens=*/nullptr,
+ loc, loc, declarator));
+
+ // For consistency, make sure the state still has us as processing
+ // the decl spec.
+ assert(state.getCurrentChunkIndex() == declarator.getNumTypeObjects() - 1);
+ state.setCurrentChunkIndex(declarator.getNumTypeObjects());
+}
+
+static void diagnoseAndRemoveTypeQualifiers(Sema &S, const DeclSpec &DS,
+ unsigned &TypeQuals,
+ QualType TypeSoFar,
+ unsigned RemoveTQs,
+ unsigned DiagID) {
+ // If this occurs outside a template instantiation, warn the user about
+ // it; they probably didn't mean to specify a redundant qualifier.
+ typedef std::pair<DeclSpec::TQ, SourceLocation> QualLoc;
+ for (QualLoc Qual : {QualLoc(DeclSpec::TQ_const, DS.getConstSpecLoc()),
+ QualLoc(DeclSpec::TQ_volatile, DS.getVolatileSpecLoc()),
+ QualLoc(DeclSpec::TQ_atomic, DS.getAtomicSpecLoc())}) {
+ if (!(RemoveTQs & Qual.first))
+ continue;
+
+ if (S.ActiveTemplateInstantiations.empty()) {
+ if (TypeQuals & Qual.first)
+ S.Diag(Qual.second, DiagID)
+ << DeclSpec::getSpecifierName(Qual.first) << TypeSoFar
+ << FixItHint::CreateRemoval(Qual.second);
+ }
+
+ TypeQuals &= ~Qual.first;
+ }
+}
+
+/// Apply Objective-C type arguments to the given type.
+static QualType applyObjCTypeArgs(Sema &S, SourceLocation loc, QualType type,
+ ArrayRef<TypeSourceInfo *> typeArgs,
+ SourceRange typeArgsRange,
+ bool failOnError = false) {
+ // We can only apply type arguments to an Objective-C class type.
+ const auto *objcObjectType = type->getAs<ObjCObjectType>();
+ if (!objcObjectType || !objcObjectType->getInterface()) {
+ S.Diag(loc, diag::err_objc_type_args_non_class)
+ << type
+ << typeArgsRange;
+
+ if (failOnError)
+ return QualType();
+ return type;
+ }
+
+ // The class type must be parameterized.
+ ObjCInterfaceDecl *objcClass = objcObjectType->getInterface();
+ ObjCTypeParamList *typeParams = objcClass->getTypeParamList();
+ if (!typeParams) {
+ S.Diag(loc, diag::err_objc_type_args_non_parameterized_class)
+ << objcClass->getDeclName()
+ << FixItHint::CreateRemoval(typeArgsRange);
+
+ if (failOnError)
+ return QualType();
+
+ return type;
+ }
+
+ // The type must not already be specialized.
+ if (objcObjectType->isSpecialized()) {
+ S.Diag(loc, diag::err_objc_type_args_specialized_class)
+ << type
+ << FixItHint::CreateRemoval(typeArgsRange);
+
+ if (failOnError)
+ return QualType();
+
+ return type;
+ }
+
+ // Check the type arguments.
+ SmallVector<QualType, 4> finalTypeArgs;
+ unsigned numTypeParams = typeParams->size();
+ bool anyPackExpansions = false;
+ for (unsigned i = 0, n = typeArgs.size(); i != n; ++i) {
+ TypeSourceInfo *typeArgInfo = typeArgs[i];
+ QualType typeArg = typeArgInfo->getType();
+
+ // Type arguments cannot have explicit qualifiers or nullability.
+ // We ignore indirect sources of these, e.g. behind typedefs or
+ // template arguments.
+ if (TypeLoc qual = typeArgInfo->getTypeLoc().findExplicitQualifierLoc()) {
+ bool diagnosed = false;
+ SourceRange rangeToRemove;
+ if (auto attr = qual.getAs<AttributedTypeLoc>()) {
+ rangeToRemove = attr.getLocalSourceRange();
+ if (attr.getTypePtr()->getImmediateNullability()) {
+ typeArg = attr.getTypePtr()->getModifiedType();
+ S.Diag(attr.getLocStart(),
+ diag::err_objc_type_arg_explicit_nullability)
+ << typeArg << FixItHint::CreateRemoval(rangeToRemove);
+ diagnosed = true;
+ }
+ }
+
+ if (!diagnosed) {
+ S.Diag(qual.getLocStart(), diag::err_objc_type_arg_qualified)
+ << typeArg << typeArg.getQualifiers().getAsString()
+ << FixItHint::CreateRemoval(rangeToRemove);
+ }
+ }
+
+ // Remove qualifiers even if they're non-local.
+ typeArg = typeArg.getUnqualifiedType();
+
+ finalTypeArgs.push_back(typeArg);
+
+ if (typeArg->getAs<PackExpansionType>())
+ anyPackExpansions = true;
+
+ // Find the corresponding type parameter, if there is one.
+ ObjCTypeParamDecl *typeParam = nullptr;
+ if (!anyPackExpansions) {
+ if (i < numTypeParams) {
+ typeParam = typeParams->begin()[i];
+ } else {
+ // Too many arguments.
+ S.Diag(loc, diag::err_objc_type_args_wrong_arity)
+ << false
+ << objcClass->getDeclName()
+ << (unsigned)typeArgs.size()
+ << numTypeParams;
+ S.Diag(objcClass->getLocation(), diag::note_previous_decl)
+ << objcClass;
+
+ if (failOnError)
+ return QualType();
+
+ return type;
+ }
+ }
+
+ // Objective-C object pointer types must be substitutable for the bounds.
+ if (const auto *typeArgObjC = typeArg->getAs<ObjCObjectPointerType>()) {
+ // If we don't have a type parameter to match against, assume
+ // everything is fine. There was a prior pack expansion that
+ // means we won't be able to match anything.
+ if (!typeParam) {
+ assert(anyPackExpansions && "Too many arguments?");
+ continue;
+ }
+
+ // Retrieve the bound.
+ QualType bound = typeParam->getUnderlyingType();
+ const auto *boundObjC = bound->getAs<ObjCObjectPointerType>();
+
+ // Determine whether the type argument is substitutable for the bound.
+ if (typeArgObjC->isObjCIdType()) {
+ // When the type argument is 'id', the only acceptable type
+ // parameter bound is 'id'.
+ if (boundObjC->isObjCIdType())
+ continue;
+ } else if (S.Context.canAssignObjCInterfaces(boundObjC, typeArgObjC)) {
+ // Otherwise, we follow the assignability rules.
+ continue;
+ }
+
+ // Diagnose the mismatch.
+ S.Diag(typeArgInfo->getTypeLoc().getLocStart(),
+ diag::err_objc_type_arg_does_not_match_bound)
+ << typeArg << bound << typeParam->getDeclName();
+ S.Diag(typeParam->getLocation(), diag::note_objc_type_param_here)
+ << typeParam->getDeclName();
+
+ if (failOnError)
+ return QualType();
+
+ return type;
+ }
+
+ // Block pointer types are permitted for unqualified 'id' bounds.
+ if (typeArg->isBlockPointerType()) {
+ // If we don't have a type parameter to match against, assume
+ // everything is fine. There was a prior pack expansion that
+ // means we won't be able to match anything.
+ if (!typeParam) {
+ assert(anyPackExpansions && "Too many arguments?");
+ continue;
+ }
+
+ // Retrieve the bound.
+ QualType bound = typeParam->getUnderlyingType();
+ if (bound->isBlockCompatibleObjCPointerType(S.Context))
+ continue;
+
+ // Diagnose the mismatch.
+ S.Diag(typeArgInfo->getTypeLoc().getLocStart(),
+ diag::err_objc_type_arg_does_not_match_bound)
+ << typeArg << bound << typeParam->getDeclName();
+ S.Diag(typeParam->getLocation(), diag::note_objc_type_param_here)
+ << typeParam->getDeclName();
+
+ if (failOnError)
+ return QualType();
+
+ return type;
+ }
+
+ // Dependent types will be checked at instantiation time.
+ if (typeArg->isDependentType()) {
+ continue;
+ }
+
+ // Diagnose non-id-compatible type arguments.
+ S.Diag(typeArgInfo->getTypeLoc().getLocStart(),
+ diag::err_objc_type_arg_not_id_compatible)
+ << typeArg
+ << typeArgInfo->getTypeLoc().getSourceRange();
+
+ if (failOnError)
+ return QualType();
+
+ return type;
+ }
+
+ // Make sure we didn't have the wrong number of arguments.
+ if (!anyPackExpansions && finalTypeArgs.size() != numTypeParams) {
+ S.Diag(loc, diag::err_objc_type_args_wrong_arity)
+ << (typeArgs.size() < typeParams->size())
+ << objcClass->getDeclName()
+ << (unsigned)finalTypeArgs.size()
+ << (unsigned)numTypeParams;
+ S.Diag(objcClass->getLocation(), diag::note_previous_decl)
+ << objcClass;
+
+ if (failOnError)
+ return QualType();
+
+ return type;
+ }
+
+ // Success. Form the specialized type.
+ return S.Context.getObjCObjectType(type, finalTypeArgs, { }, false);
+}
+
+/// Apply Objective-C protocol qualifiers to the given type.
+static QualType applyObjCProtocolQualifiers(
+ Sema &S, SourceLocation loc, SourceRange range, QualType type,
+ ArrayRef<ObjCProtocolDecl *> protocols,
+ const SourceLocation *protocolLocs,
+ bool failOnError = false) {
+ ASTContext &ctx = S.Context;
+ if (const ObjCObjectType *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){
+ // FIXME: Check for protocols to which the class type is already
+ // known to conform.
+
+ return ctx.getObjCObjectType(objT->getBaseType(),
+ objT->getTypeArgsAsWritten(),
+ protocols,
+ objT->isKindOfTypeAsWritten());
+ }
+
+ if (type->isObjCObjectType()) {
+ // Silently overwrite any existing protocol qualifiers.
+ // TODO: determine whether that's the right thing to do.
+
+ // FIXME: Check for protocols to which the class type is already
+ // known to conform.
+ return ctx.getObjCObjectType(type, { }, protocols, false);
+ }
+
+ // id<protocol-list>
+ if (type->isObjCIdType()) {
+ const ObjCObjectPointerType *objPtr = type->castAs<ObjCObjectPointerType>();
+ type = ctx.getObjCObjectType(ctx.ObjCBuiltinIdTy, { }, protocols,
+ objPtr->isKindOfType());
+ return ctx.getObjCObjectPointerType(type);
+ }
+
+ // Class<protocol-list>
+ if (type->isObjCClassType()) {
+ const ObjCObjectPointerType *objPtr = type->castAs<ObjCObjectPointerType>();
+ type = ctx.getObjCObjectType(ctx.ObjCBuiltinClassTy, { }, protocols,
+ objPtr->isKindOfType());
+ return ctx.getObjCObjectPointerType(type);
+ }
+
+ S.Diag(loc, diag::err_invalid_protocol_qualifiers)
+ << range;
+
+ if (failOnError)
+ return QualType();
+
+ return type;
+}
+
+QualType Sema::BuildObjCObjectType(QualType BaseType,
+ SourceLocation Loc,
+ SourceLocation TypeArgsLAngleLoc,
+ ArrayRef<TypeSourceInfo *> TypeArgs,
+ SourceLocation TypeArgsRAngleLoc,
+ SourceLocation ProtocolLAngleLoc,
+ ArrayRef<ObjCProtocolDecl *> Protocols,
+ ArrayRef<SourceLocation> ProtocolLocs,
+ SourceLocation ProtocolRAngleLoc,
+ bool FailOnError) {
+ QualType Result = BaseType;
+ if (!TypeArgs.empty()) {
+ Result = applyObjCTypeArgs(*this, Loc, Result, TypeArgs,
+ SourceRange(TypeArgsLAngleLoc,
+ TypeArgsRAngleLoc),
+ FailOnError);
+ if (FailOnError && Result.isNull())
+ return QualType();
+ }
+
+ if (!Protocols.empty()) {
+ Result = applyObjCProtocolQualifiers(*this, Loc,
+ SourceRange(ProtocolLAngleLoc,
+ ProtocolRAngleLoc),
+ Result, Protocols,
+ ProtocolLocs.data(),
+ FailOnError);
+ if (FailOnError && Result.isNull())
+ return QualType();
+ }
+
+ return Result;
+}
+
+TypeResult Sema::actOnObjCProtocolQualifierType(
+ SourceLocation lAngleLoc,
+ ArrayRef<Decl *> protocols,
+ ArrayRef<SourceLocation> protocolLocs,
+ SourceLocation rAngleLoc) {
+ // Form id<protocol-list>.
+ QualType Result = Context.getObjCObjectType(
+ Context.ObjCBuiltinIdTy, { },
+ llvm::makeArrayRef(
+ (ObjCProtocolDecl * const *)protocols.data(),
+ protocols.size()),
+ false);
+ Result = Context.getObjCObjectPointerType(Result);
+
+ TypeSourceInfo *ResultTInfo = Context.CreateTypeSourceInfo(Result);
+ TypeLoc ResultTL = ResultTInfo->getTypeLoc();
+
+ auto ObjCObjectPointerTL = ResultTL.castAs<ObjCObjectPointerTypeLoc>();
+ ObjCObjectPointerTL.setStarLoc(SourceLocation()); // implicit
+
+ auto ObjCObjectTL = ObjCObjectPointerTL.getPointeeLoc()
+ .castAs<ObjCObjectTypeLoc>();
+ ObjCObjectTL.setHasBaseTypeAsWritten(false);
+ ObjCObjectTL.getBaseLoc().initialize(Context, SourceLocation());
+
+ // No type arguments.
+ ObjCObjectTL.setTypeArgsLAngleLoc(SourceLocation());
+ ObjCObjectTL.setTypeArgsRAngleLoc(SourceLocation());
+
+ // Fill in protocol qualifiers.
+ ObjCObjectTL.setProtocolLAngleLoc(lAngleLoc);
+ ObjCObjectTL.setProtocolRAngleLoc(rAngleLoc);
+ for (unsigned i = 0, n = protocols.size(); i != n; ++i)
+ ObjCObjectTL.setProtocolLoc(i, protocolLocs[i]);
+
+ // We're done. Return the completed type to the parser.
+ return CreateParsedType(Result, ResultTInfo);
+}
+
+TypeResult Sema::actOnObjCTypeArgsAndProtocolQualifiers(
+ Scope *S,
+ SourceLocation Loc,
+ ParsedType BaseType,
+ SourceLocation TypeArgsLAngleLoc,
+ ArrayRef<ParsedType> TypeArgs,
+ SourceLocation TypeArgsRAngleLoc,
+ SourceLocation ProtocolLAngleLoc,
+ ArrayRef<Decl *> Protocols,
+ ArrayRef<SourceLocation> ProtocolLocs,
+ SourceLocation ProtocolRAngleLoc) {
+ TypeSourceInfo *BaseTypeInfo = nullptr;
+ QualType T = GetTypeFromParser(BaseType, &BaseTypeInfo);
+ if (T.isNull())
+ return true;
+
+ // Handle missing type-source info.
+ if (!BaseTypeInfo)
+ BaseTypeInfo = Context.getTrivialTypeSourceInfo(T, Loc);
+
+ // Extract type arguments.
+ SmallVector<TypeSourceInfo *, 4> ActualTypeArgInfos;
+ for (unsigned i = 0, n = TypeArgs.size(); i != n; ++i) {
+ TypeSourceInfo *TypeArgInfo = nullptr;
+ QualType TypeArg = GetTypeFromParser(TypeArgs[i], &TypeArgInfo);
+ if (TypeArg.isNull()) {
+ ActualTypeArgInfos.clear();
+ break;
+ }
+
+ assert(TypeArgInfo && "No type source info?");
+ ActualTypeArgInfos.push_back(TypeArgInfo);
+ }
+
+ // Build the object type.
+ QualType Result = BuildObjCObjectType(
+ T, BaseTypeInfo->getTypeLoc().getSourceRange().getBegin(),
+ TypeArgsLAngleLoc, ActualTypeArgInfos, TypeArgsRAngleLoc,
+ ProtocolLAngleLoc,
+ llvm::makeArrayRef((ObjCProtocolDecl * const *)Protocols.data(),
+ Protocols.size()),
+ ProtocolLocs, ProtocolRAngleLoc,
+ /*FailOnError=*/false);
+
+ if (Result == T)
+ return BaseType;
+
+ // Create source information for this type.
+ TypeSourceInfo *ResultTInfo = Context.CreateTypeSourceInfo(Result);
+ TypeLoc ResultTL = ResultTInfo->getTypeLoc();
+
+ // For id<Proto1, Proto2> or Class<Proto1, Proto2>, we'll have an
+ // object pointer type. Fill in source information for it.
+ if (auto ObjCObjectPointerTL = ResultTL.getAs<ObjCObjectPointerTypeLoc>()) {
+ // The '*' is implicit.
+ ObjCObjectPointerTL.setStarLoc(SourceLocation());
+ ResultTL = ObjCObjectPointerTL.getPointeeLoc();
+ }
+
+ auto ObjCObjectTL = ResultTL.castAs<ObjCObjectTypeLoc>();
+
+ // Type argument information.
+ if (ObjCObjectTL.getNumTypeArgs() > 0) {
+ assert(ObjCObjectTL.getNumTypeArgs() == ActualTypeArgInfos.size());
+ ObjCObjectTL.setTypeArgsLAngleLoc(TypeArgsLAngleLoc);
+ ObjCObjectTL.setTypeArgsRAngleLoc(TypeArgsRAngleLoc);
+ for (unsigned i = 0, n = ActualTypeArgInfos.size(); i != n; ++i)
+ ObjCObjectTL.setTypeArgTInfo(i, ActualTypeArgInfos[i]);
+ } else {
+ ObjCObjectTL.setTypeArgsLAngleLoc(SourceLocation());
+ ObjCObjectTL.setTypeArgsRAngleLoc(SourceLocation());
+ }
+
+ // Protocol qualifier information.
+ if (ObjCObjectTL.getNumProtocols() > 0) {
+ assert(ObjCObjectTL.getNumProtocols() == Protocols.size());
+ ObjCObjectTL.setProtocolLAngleLoc(ProtocolLAngleLoc);
+ ObjCObjectTL.setProtocolRAngleLoc(ProtocolRAngleLoc);
+ for (unsigned i = 0, n = Protocols.size(); i != n; ++i)
+ ObjCObjectTL.setProtocolLoc(i, ProtocolLocs[i]);
+ } else {
+ ObjCObjectTL.setProtocolLAngleLoc(SourceLocation());
+ ObjCObjectTL.setProtocolRAngleLoc(SourceLocation());
+ }
+
+ // Base type.
+ ObjCObjectTL.setHasBaseTypeAsWritten(true);
+ if (ObjCObjectTL.getType() == T)
+ ObjCObjectTL.getBaseLoc().initializeFullCopy(BaseTypeInfo->getTypeLoc());
+ else
+ ObjCObjectTL.getBaseLoc().initialize(Context, Loc);
+
+ // We're done. Return the completed type to the parser.
+ return CreateParsedType(Result, ResultTInfo);
+}
+
+/// \brief Convert the specified declspec to the appropriate type
+/// object.
+/// \param state Specifies the declarator containing the declaration specifier
+/// to be converted, along with other associated processing state.
+/// \returns The type described by the declaration specifiers. This function
+/// never returns null.
+static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
+ // FIXME: Should move the logic from DeclSpec::Finish to here for validity
+ // checking.
+
+ Sema &S = state.getSema();
+ Declarator &declarator = state.getDeclarator();
+ const DeclSpec &DS = declarator.getDeclSpec();
+ SourceLocation DeclLoc = declarator.getIdentifierLoc();
+ if (DeclLoc.isInvalid())
+ DeclLoc = DS.getLocStart();
+
+ ASTContext &Context = S.Context;
+
+ QualType Result;
+ switch (DS.getTypeSpecType()) {
+ case DeclSpec::TST_void:
+ Result = Context.VoidTy;
+ break;
+ case DeclSpec::TST_char:
+ if (DS.getTypeSpecSign() == DeclSpec::TSS_unspecified)
+ Result = Context.CharTy;
+ else if (DS.getTypeSpecSign() == DeclSpec::TSS_signed)
+ Result = Context.SignedCharTy;
+ else {
+ assert(DS.getTypeSpecSign() == DeclSpec::TSS_unsigned &&
+ "Unknown TSS value");
+ Result = Context.UnsignedCharTy;
+ }
+ break;
+ case DeclSpec::TST_wchar:
+ if (DS.getTypeSpecSign() == DeclSpec::TSS_unspecified)
+ Result = Context.WCharTy;
+ else if (DS.getTypeSpecSign() == DeclSpec::TSS_signed) {
+ S.Diag(DS.getTypeSpecSignLoc(), diag::ext_invalid_sign_spec)
+ << DS.getSpecifierName(DS.getTypeSpecType(),
+ Context.getPrintingPolicy());
+ Result = Context.getSignedWCharType();
+ } else {
+ assert(DS.getTypeSpecSign() == DeclSpec::TSS_unsigned &&
+ "Unknown TSS value");
+ S.Diag(DS.getTypeSpecSignLoc(), diag::ext_invalid_sign_spec)
+ << DS.getSpecifierName(DS.getTypeSpecType(),
+ Context.getPrintingPolicy());
+ Result = Context.getUnsignedWCharType();
+ }
+ break;
+ case DeclSpec::TST_char16:
+ assert(DS.getTypeSpecSign() == DeclSpec::TSS_unspecified &&
+ "Unknown TSS value");
+ Result = Context.Char16Ty;
+ break;
+ case DeclSpec::TST_char32:
+ assert(DS.getTypeSpecSign() == DeclSpec::TSS_unspecified &&
+ "Unknown TSS value");
+ Result = Context.Char32Ty;
+ break;
+ case DeclSpec::TST_unspecified:
+ // If this is a missing declspec in a block literal return context, then it
+ // is inferred from the return statements inside the block.
+ // The declspec is always missing in a lambda expr context; it is either
+ // specified with a trailing return type or inferred.
+ if (S.getLangOpts().CPlusPlus14 &&
+ declarator.getContext() == Declarator::LambdaExprContext) {
+ // In C++1y, a lambda's implicit return type is 'auto'.
+ Result = Context.getAutoDeductType();
+ break;
+ } else if (declarator.getContext() == Declarator::LambdaExprContext ||
+ isOmittedBlockReturnType(declarator)) {
+ Result = Context.DependentTy;
+ break;
+ }
+
+ // Unspecified typespec defaults to int in C90. However, the C90 grammar
+ // [C90 6.5] only allows a decl-spec if there was *some* type-specifier,
+ // type-qualifier, or storage-class-specifier. If not, emit an extwarn.
+ // Note that the one exception to this is function definitions, which are
+ // allowed to be completely missing a declspec. This is handled in the
+ // parser already though by it pretending to have seen an 'int' in this
+ // case.
+ if (S.getLangOpts().ImplicitInt) {
+ // In C89 mode, we only warn if there is a completely missing declspec
+ // when one is not allowed.
+ if (DS.isEmpty()) {
+ S.Diag(DeclLoc, diag::ext_missing_declspec)
+ << DS.getSourceRange()
+ << FixItHint::CreateInsertion(DS.getLocStart(), "int");
+ }
+ } else if (!DS.hasTypeSpecifier()) {
+ // C99 and C++ require a type specifier. For example, C99 6.7.2p2 says:
+ // "At least one type specifier shall be given in the declaration
+ // specifiers in each declaration, and in the specifier-qualifier list in
+ // each struct declaration and type name."
+ if (S.getLangOpts().CPlusPlus) {
+ S.Diag(DeclLoc, diag::err_missing_type_specifier)
+ << DS.getSourceRange();
+
+ // When this occurs in C++ code, often something is very broken with the
+ // value being declared, poison it as invalid so we don't get chains of
+ // errors.
+ declarator.setInvalidType(true);
+ } else {
+ S.Diag(DeclLoc, diag::ext_missing_type_specifier)
+ << DS.getSourceRange();
+ }
+ }
+
+ // FALL THROUGH.
+ case DeclSpec::TST_int: {
+ if (DS.getTypeSpecSign() != DeclSpec::TSS_unsigned) {
+ switch (DS.getTypeSpecWidth()) {
+ case DeclSpec::TSW_unspecified: Result = Context.IntTy; break;
+ case DeclSpec::TSW_short: Result = Context.ShortTy; break;
+ case DeclSpec::TSW_long: Result = Context.LongTy; break;
+ case DeclSpec::TSW_longlong:
+ Result = Context.LongLongTy;
+
+ // 'long long' is a C99 or C++11 feature.
+ if (!S.getLangOpts().C99) {
+ if (S.getLangOpts().CPlusPlus)
+ S.Diag(DS.getTypeSpecWidthLoc(),
+ S.getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_longlong : diag::ext_cxx11_longlong);
+ else
+ S.Diag(DS.getTypeSpecWidthLoc(), diag::ext_c99_longlong);
+ }
+ break;
+ }
+ } else {
+ switch (DS.getTypeSpecWidth()) {
+ case DeclSpec::TSW_unspecified: Result = Context.UnsignedIntTy; break;
+ case DeclSpec::TSW_short: Result = Context.UnsignedShortTy; break;
+ case DeclSpec::TSW_long: Result = Context.UnsignedLongTy; break;
+ case DeclSpec::TSW_longlong:
+ Result = Context.UnsignedLongLongTy;
+
+ // 'long long' is a C99 or C++11 feature.
+ if (!S.getLangOpts().C99) {
+ if (S.getLangOpts().CPlusPlus)
+ S.Diag(DS.getTypeSpecWidthLoc(),
+ S.getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_longlong : diag::ext_cxx11_longlong);
+ else
+ S.Diag(DS.getTypeSpecWidthLoc(), diag::ext_c99_longlong);
+ }
+ break;
+ }
+ }
+ break;
+ }
+ case DeclSpec::TST_int128:
+ if (!S.Context.getTargetInfo().hasInt128Type())
+ S.Diag(DS.getTypeSpecTypeLoc(), diag::err_int128_unsupported);
+ if (DS.getTypeSpecSign() == DeclSpec::TSS_unsigned)
+ Result = Context.UnsignedInt128Ty;
+ else
+ Result = Context.Int128Ty;
+ break;
+ case DeclSpec::TST_half: Result = Context.HalfTy; break;
+ case DeclSpec::TST_float: Result = Context.FloatTy; break;
+ case DeclSpec::TST_double:
+ if (DS.getTypeSpecWidth() == DeclSpec::TSW_long)
+ Result = Context.LongDoubleTy;
+ else
+ Result = Context.DoubleTy;
+
+ if (S.getLangOpts().OpenCL &&
+ !((S.getLangOpts().OpenCLVersion >= 120) ||
+ S.getOpenCLOptions().cl_khr_fp64)) {
+ S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_requires_extension)
+ << Result << "cl_khr_fp64";
+ declarator.setInvalidType(true);
+ }
+ break;
+ case DeclSpec::TST_bool: Result = Context.BoolTy; break; // _Bool or bool
+ case DeclSpec::TST_decimal32: // _Decimal32
+ case DeclSpec::TST_decimal64: // _Decimal64
+ case DeclSpec::TST_decimal128: // _Decimal128
+ S.Diag(DS.getTypeSpecTypeLoc(), diag::err_decimal_unsupported);
+ Result = Context.IntTy;
+ declarator.setInvalidType(true);
+ break;
+ case DeclSpec::TST_class:
+ case DeclSpec::TST_enum:
+ case DeclSpec::TST_union:
+ case DeclSpec::TST_struct:
+ case DeclSpec::TST_interface: {
+ TypeDecl *D = dyn_cast_or_null<TypeDecl>(DS.getRepAsDecl());
+ if (!D) {
+ // This can happen in C++ with ambiguous lookups.
+ Result = Context.IntTy;
+ declarator.setInvalidType(true);
+ break;
+ }
+
+ // If the type is deprecated or unavailable, diagnose it.
+ S.DiagnoseUseOfDecl(D, DS.getTypeSpecTypeNameLoc());
+
+ assert(DS.getTypeSpecWidth() == 0 && DS.getTypeSpecComplex() == 0 &&
+ DS.getTypeSpecSign() == 0 && "No qualifiers on tag names!");
+
+ // TypeQuals handled by caller.
+ Result = Context.getTypeDeclType(D);
+
+ // In both C and C++, make an ElaboratedType.
+ ElaboratedTypeKeyword Keyword
+ = ElaboratedType::getKeywordForTypeSpec(DS.getTypeSpecType());
+ Result = S.getElaboratedType(Keyword, DS.getTypeSpecScope(), Result);
+ break;
+ }
+ case DeclSpec::TST_typename: {
+ assert(DS.getTypeSpecWidth() == 0 && DS.getTypeSpecComplex() == 0 &&
+ DS.getTypeSpecSign() == 0 &&
+ "Can't handle qualifiers on typedef names yet!");
+ Result = S.GetTypeFromParser(DS.getRepAsType());
+ if (Result.isNull()) {
+ declarator.setInvalidType(true);
+ } else if (S.getLangOpts().OpenCL) {
+ if (Result->getAs<AtomicType>()) {
+ StringRef TypeName = Result.getBaseTypeIdentifier()->getName();
+ bool NoExtTypes =
+ llvm::StringSwitch<bool>(TypeName)
+ .Cases("atomic_int", "atomic_uint", "atomic_float",
+ "atomic_flag", true)
+ .Default(false);
+ if (!S.getOpenCLOptions().cl_khr_int64_base_atomics && !NoExtTypes) {
+ S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_requires_extension)
+ << Result << "cl_khr_int64_base_atomics";
+ declarator.setInvalidType(true);
+ }
+ if (!S.getOpenCLOptions().cl_khr_int64_extended_atomics &&
+ !NoExtTypes) {
+ S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_requires_extension)
+ << Result << "cl_khr_int64_extended_atomics";
+ declarator.setInvalidType(true);
+ }
+ if (!S.getOpenCLOptions().cl_khr_fp64 &&
+ !TypeName.compare("atomic_double")) {
+ S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_requires_extension)
+ << Result << "cl_khr_fp64";
+ declarator.setInvalidType(true);
+ }
+ } else if (!S.getOpenCLOptions().cl_khr_gl_msaa_sharing &&
+ (Result->isImage2dMSAAT() || Result->isImage2dArrayMSAAT() ||
+ Result->isImage2dArrayMSAATDepth() ||
+ Result->isImage2dMSAATDepth())) {
+ S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_requires_extension)
+ << Result << "cl_khr_gl_msaa_sharing";
+ declarator.setInvalidType(true);
+ }
+ }
+
+ // TypeQuals handled by caller.
+ break;
+ }
+ case DeclSpec::TST_typeofType:
+ // FIXME: Preserve type source info.
+ Result = S.GetTypeFromParser(DS.getRepAsType());
+ assert(!Result.isNull() && "Didn't get a type for typeof?");
+ if (!Result->isDependentType())
+ if (const TagType *TT = Result->getAs<TagType>())
+ S.DiagnoseUseOfDecl(TT->getDecl(), DS.getTypeSpecTypeLoc());
+ // TypeQuals handled by caller.
+ Result = Context.getTypeOfType(Result);
+ break;
+ case DeclSpec::TST_typeofExpr: {
+ Expr *E = DS.getRepAsExpr();
+ assert(E && "Didn't get an expression for typeof?");
+ // TypeQuals handled by caller.
+ Result = S.BuildTypeofExprType(E, DS.getTypeSpecTypeLoc());
+ if (Result.isNull()) {
+ Result = Context.IntTy;
+ declarator.setInvalidType(true);
+ }
+ break;
+ }
+ case DeclSpec::TST_decltype: {
+ Expr *E = DS.getRepAsExpr();
+ assert(E && "Didn't get an expression for decltype?");
+ // TypeQuals handled by caller.
+ Result = S.BuildDecltypeType(E, DS.getTypeSpecTypeLoc());
+ if (Result.isNull()) {
+ Result = Context.IntTy;
+ declarator.setInvalidType(true);
+ }
+ break;
+ }
+ case DeclSpec::TST_underlyingType:
+ Result = S.GetTypeFromParser(DS.getRepAsType());
+ assert(!Result.isNull() && "Didn't get a type for __underlying_type?");
+ Result = S.BuildUnaryTransformType(Result,
+ UnaryTransformType::EnumUnderlyingType,
+ DS.getTypeSpecTypeLoc());
+ if (Result.isNull()) {
+ Result = Context.IntTy;
+ declarator.setInvalidType(true);
+ }
+ break;
+
+ case DeclSpec::TST_auto:
+ // TypeQuals handled by caller.
+ // If auto is mentioned in a lambda parameter context, convert it to a
+ // template parameter type immediately, with the appropriate depth and
+ // index, and update sema's state (LambdaScopeInfo) for the current lambda
+ // being analyzed (which tracks the invented type template parameter).
+ if (declarator.getContext() == Declarator::LambdaExprParameterContext) {
+ sema::LambdaScopeInfo *LSI = S.getCurLambda();
+ assert(LSI && "No LambdaScopeInfo on the stack!");
+ const unsigned TemplateParameterDepth = LSI->AutoTemplateParameterDepth;
+ const unsigned AutoParameterPosition = LSI->AutoTemplateParams.size();
+ const bool IsParameterPack = declarator.hasEllipsis();
+
+ // Turns out we must create the TemplateTypeParmDecl here to
+ // retrieve the corresponding template parameter type.
+ TemplateTypeParmDecl *CorrespondingTemplateParam =
+ TemplateTypeParmDecl::Create(Context,
+ // Temporarily add to the TranslationUnit DeclContext. When the
+ // associated TemplateParameterList is attached to a template
+ // declaration (such as FunctionTemplateDecl), the DeclContext
+ // for each template parameter gets updated appropriately via
+ // a call to AdoptTemplateParameterList.
+ Context.getTranslationUnitDecl(),
+ /*KeyLoc*/ SourceLocation(),
+ /*NameLoc*/ declarator.getLocStart(),
+ TemplateParameterDepth,
+ AutoParameterPosition, // our template param index
+ /* Identifier*/ nullptr, false, IsParameterPack);
+ LSI->AutoTemplateParams.push_back(CorrespondingTemplateParam);
+ // Replace the 'auto' in the function parameter with this invented
+ // template type parameter.
+ Result = QualType(CorrespondingTemplateParam->getTypeForDecl(), 0);
+ } else {
+ Result = Context.getAutoType(QualType(), AutoTypeKeyword::Auto, false);
+ }
+ break;
+
+ case DeclSpec::TST_auto_type:
+ Result = Context.getAutoType(QualType(), AutoTypeKeyword::GNUAutoType, false);
+ break;
+
+ case DeclSpec::TST_decltype_auto:
+ Result = Context.getAutoType(QualType(), AutoTypeKeyword::DecltypeAuto,
+ /*IsDependent*/ false);
+ break;
+
+ case DeclSpec::TST_unknown_anytype:
+ Result = Context.UnknownAnyTy;
+ break;
+
+ case DeclSpec::TST_atomic:
+ Result = S.GetTypeFromParser(DS.getRepAsType());
+ assert(!Result.isNull() && "Didn't get a type for _Atomic?");
+ Result = S.BuildAtomicType(Result, DS.getTypeSpecTypeLoc());
+ if (Result.isNull()) {
+ Result = Context.IntTy;
+ declarator.setInvalidType(true);
+ }
+ break;
+
+ case DeclSpec::TST_error:
+ Result = Context.IntTy;
+ declarator.setInvalidType(true);
+ break;
+ }
+
+ // Handle complex types.
+ if (DS.getTypeSpecComplex() == DeclSpec::TSC_complex) {
+ if (S.getLangOpts().Freestanding)
+ S.Diag(DS.getTypeSpecComplexLoc(), diag::ext_freestanding_complex);
+ Result = Context.getComplexType(Result);
+ } else if (DS.isTypeAltiVecVector()) {
+ unsigned typeSize = static_cast<unsigned>(Context.getTypeSize(Result));
+ assert(typeSize > 0 && "type size for vector must be greater than 0 bits");
+ VectorType::VectorKind VecKind = VectorType::AltiVecVector;
+ if (DS.isTypeAltiVecPixel())
+ VecKind = VectorType::AltiVecPixel;
+ else if (DS.isTypeAltiVecBool())
+ VecKind = VectorType::AltiVecBool;
+ Result = Context.getVectorType(Result, 128/typeSize, VecKind);
+ }
+
+ // FIXME: Imaginary.
+ if (DS.getTypeSpecComplex() == DeclSpec::TSC_imaginary)
+ S.Diag(DS.getTypeSpecComplexLoc(), diag::err_imaginary_not_supported);
+
+ // Before we process any type attributes, synthesize a block literal
+ // function declarator if necessary.
+ if (declarator.getContext() == Declarator::BlockLiteralContext)
+ maybeSynthesizeBlockSignature(state, Result);
+
+ // Apply any type attributes from the decl spec. This may cause the
+ // list of type attributes to be temporarily saved while the type
+ // attributes are pushed around.
+ processTypeAttrs(state, Result, TAL_DeclSpec, DS.getAttributes().getList());
+
+ // Apply const/volatile/restrict qualifiers to T.
+ if (unsigned TypeQuals = DS.getTypeQualifiers()) {
+ // Warn about CV qualifiers on function types.
+ // C99 6.7.3p8:
+ // If the specification of a function type includes any type qualifiers,
+ // the behavior is undefined.
+ // C++11 [dcl.fct]p7:
+ // The effect of a cv-qualifier-seq in a function declarator is not the
+ // same as adding cv-qualification on top of the function type. In the
+ // latter case, the cv-qualifiers are ignored.
+ if (TypeQuals && Result->isFunctionType()) {
+ diagnoseAndRemoveTypeQualifiers(
+ S, DS, TypeQuals, Result, DeclSpec::TQ_const | DeclSpec::TQ_volatile,
+ S.getLangOpts().CPlusPlus
+ ? diag::warn_typecheck_function_qualifiers_ignored
+ : diag::warn_typecheck_function_qualifiers_unspecified);
+ // No diagnostic for 'restrict' or '_Atomic' applied to a
+ // function type; we'll diagnose those later, in BuildQualifiedType.
+ }
+
+ // C++11 [dcl.ref]p1:
+ // Cv-qualified references are ill-formed except when the
+ // cv-qualifiers are introduced through the use of a typedef-name
+ // or decltype-specifier, in which case the cv-qualifiers are ignored.
+ //
+ // There don't appear to be any other contexts in which a cv-qualified
+ // reference type could be formed, so the 'ill-formed' clause here appears
+ // to never happen.
+ if (TypeQuals && Result->isReferenceType()) {
+ diagnoseAndRemoveTypeQualifiers(
+ S, DS, TypeQuals, Result,
+ DeclSpec::TQ_const | DeclSpec::TQ_volatile | DeclSpec::TQ_atomic,
+ diag::warn_typecheck_reference_qualifiers);
+ }
+
+ // C90 6.5.3 constraints: "The same type qualifier shall not appear more
+ // than once in the same specifier-list or qualifier-list, either directly
+ // or via one or more typedefs."
+ if (!S.getLangOpts().C99 && !S.getLangOpts().CPlusPlus
+ && TypeQuals & Result.getCVRQualifiers()) {
+ if (TypeQuals & DeclSpec::TQ_const && Result.isConstQualified()) {
+ S.Diag(DS.getConstSpecLoc(), diag::ext_duplicate_declspec)
+ << "const";
+ }
+
+ if (TypeQuals & DeclSpec::TQ_volatile && Result.isVolatileQualified()) {
+ S.Diag(DS.getVolatileSpecLoc(), diag::ext_duplicate_declspec)
+ << "volatile";
+ }
+
+ // C90 doesn't have restrict nor _Atomic, so it doesn't force us to
+ // produce a warning in this case.
+ }
+
+ QualType Qualified = S.BuildQualifiedType(Result, DeclLoc, TypeQuals, &DS);
+
+ // If adding qualifiers fails, just use the unqualified type.
+ if (Qualified.isNull())
+ declarator.setInvalidType(true);
+ else
+ Result = Qualified;
+ }
+
+ assert(!Result.isNull() && "This function should not return a null type");
+ return Result;
+}
+
+static std::string getPrintableNameForEntity(DeclarationName Entity) {
+ if (Entity)
+ return Entity.getAsString();
+
+ return "type name";
+}
+
+QualType Sema::BuildQualifiedType(QualType T, SourceLocation Loc,
+ Qualifiers Qs, const DeclSpec *DS) {
+ if (T.isNull())
+ return QualType();
+
+ // Enforce C99 6.7.3p2: "Types other than pointer types derived from
+ // object or incomplete types shall not be restrict-qualified."
+ if (Qs.hasRestrict()) {
+ unsigned DiagID = 0;
+ QualType ProblemTy;
+
+ if (T->isAnyPointerType() || T->isReferenceType() ||
+ T->isMemberPointerType()) {
+ QualType EltTy;
+ if (T->isObjCObjectPointerType())
+ EltTy = T;
+ else if (const MemberPointerType *PTy = T->getAs<MemberPointerType>())
+ EltTy = PTy->getPointeeType();
+ else
+ EltTy = T->getPointeeType();
+
+ // If we have a pointer or reference, the pointee must have an object
+ // incomplete type.
+ if (!EltTy->isIncompleteOrObjectType()) {
+ DiagID = diag::err_typecheck_invalid_restrict_invalid_pointee;
+ ProblemTy = EltTy;
+ }
+ } else if (!T->isDependentType()) {
+ DiagID = diag::err_typecheck_invalid_restrict_not_pointer;
+ ProblemTy = T;
+ }
+
+ if (DiagID) {
+ Diag(DS ? DS->getRestrictSpecLoc() : Loc, DiagID) << ProblemTy;
+ Qs.removeRestrict();
+ }
+ }
+
+ return Context.getQualifiedType(T, Qs);
+}
+
+QualType Sema::BuildQualifiedType(QualType T, SourceLocation Loc,
+ unsigned CVRA, const DeclSpec *DS) {
+ if (T.isNull())
+ return QualType();
+
+ // Convert from DeclSpec::TQ to Qualifiers::TQ by just dropping TQ_atomic.
+ unsigned CVR = CVRA & ~DeclSpec::TQ_atomic;
+
+ // C11 6.7.3/5:
+ // If the same qualifier appears more than once in the same
+ // specifier-qualifier-list, either directly or via one or more typedefs,
+ // the behavior is the same as if it appeared only once.
+ //
+ // It's not specified what happens when the _Atomic qualifier is applied to
+ // a type specified with the _Atomic specifier, but we assume that this
+ // should be treated as if the _Atomic qualifier appeared multiple times.
+ if (CVRA & DeclSpec::TQ_atomic && !T->isAtomicType()) {
+ // C11 6.7.3/5:
+ // If other qualifiers appear along with the _Atomic qualifier in a
+ // specifier-qualifier-list, the resulting type is the so-qualified
+ // atomic type.
+ //
+ // Don't need to worry about array types here, since _Atomic can't be
+ // applied to such types.
+ SplitQualType Split = T.getSplitUnqualifiedType();
+ T = BuildAtomicType(QualType(Split.Ty, 0),
+ DS ? DS->getAtomicSpecLoc() : Loc);
+ if (T.isNull())
+ return T;
+ Split.Quals.addCVRQualifiers(CVR);
+ return BuildQualifiedType(T, Loc, Split.Quals);
+ }
+
+ return BuildQualifiedType(T, Loc, Qualifiers::fromCVRMask(CVR), DS);
+}
+
+/// \brief Build a paren type including \p T.
+QualType Sema::BuildParenType(QualType T) {
+ return Context.getParenType(T);
+}
+
+/// Given that we're building a pointer or reference to the given
+static QualType inferARCLifetimeForPointee(Sema &S, QualType type,
+ SourceLocation loc,
+ bool isReference) {
+ // Bail out if retention is unrequired or already specified.
+ if (!type->isObjCLifetimeType() ||
+ type.getObjCLifetime() != Qualifiers::OCL_None)
+ return type;
+
+ Qualifiers::ObjCLifetime implicitLifetime = Qualifiers::OCL_None;
+
+ // If the object type is const-qualified, we can safely use
+ // __unsafe_unretained. This is safe (because there are no read
+ // barriers), and it'll be safe to coerce anything but __weak* to
+ // the resulting type.
+ if (type.isConstQualified()) {
+ implicitLifetime = Qualifiers::OCL_ExplicitNone;
+
+ // Otherwise, check whether the static type does not require
+ // retaining. This currently only triggers for Class (possibly
+ // protocol-qualifed, and arrays thereof).
+ } else if (type->isObjCARCImplicitlyUnretainedType()) {
+ implicitLifetime = Qualifiers::OCL_ExplicitNone;
+
+ // If we are in an unevaluated context, like sizeof, skip adding a
+ // qualification.
+ } else if (S.isUnevaluatedContext()) {
+ return type;
+
+ // If that failed, give an error and recover using __strong. __strong
+ // is the option most likely to prevent spurious second-order diagnostics,
+ // like when binding a reference to a field.
+ } else {
+ // These types can show up in private ivars in system headers, so
+ // we need this to not be an error in those cases. Instead we
+ // want to delay.
+ if (S.DelayedDiagnostics.shouldDelayDiagnostics()) {
+ S.DelayedDiagnostics.add(
+ sema::DelayedDiagnostic::makeForbiddenType(loc,
+ diag::err_arc_indirect_no_ownership, type, isReference));
+ } else {
+ S.Diag(loc, diag::err_arc_indirect_no_ownership) << type << isReference;
+ }
+ implicitLifetime = Qualifiers::OCL_Strong;
+ }
+ assert(implicitLifetime && "didn't infer any lifetime!");
+
+ Qualifiers qs;
+ qs.addObjCLifetime(implicitLifetime);
+ return S.Context.getQualifiedType(type, qs);
+}
+
+static std::string getFunctionQualifiersAsString(const FunctionProtoType *FnTy){
+ std::string Quals =
+ Qualifiers::fromCVRMask(FnTy->getTypeQuals()).getAsString();
+
+ switch (FnTy->getRefQualifier()) {
+ case RQ_None:
+ break;
+
+ case RQ_LValue:
+ if (!Quals.empty())
+ Quals += ' ';
+ Quals += '&';
+ break;
+
+ case RQ_RValue:
+ if (!Quals.empty())
+ Quals += ' ';
+ Quals += "&&";
+ break;
+ }
+
+ return Quals;
+}
+
+namespace {
+/// Kinds of declarator that cannot contain a qualified function type.
+///
+/// C++98 [dcl.fct]p4 / C++11 [dcl.fct]p6:
+/// a function type with a cv-qualifier or a ref-qualifier can only appear
+/// at the topmost level of a type.
+///
+/// Parens and member pointers are permitted. We don't diagnose array and
+/// function declarators, because they don't allow function types at all.
+///
+/// The values of this enum are used in diagnostics.
+enum QualifiedFunctionKind { QFK_BlockPointer, QFK_Pointer, QFK_Reference };
+}
+
+/// Check whether the type T is a qualified function type, and if it is,
+/// diagnose that it cannot be contained within the given kind of declarator.
+static bool checkQualifiedFunction(Sema &S, QualType T, SourceLocation Loc,
+ QualifiedFunctionKind QFK) {
+ // Does T refer to a function type with a cv-qualifier or a ref-qualifier?
+ const FunctionProtoType *FPT = T->getAs<FunctionProtoType>();
+ if (!FPT || (FPT->getTypeQuals() == 0 && FPT->getRefQualifier() == RQ_None))
+ return false;
+
+ S.Diag(Loc, diag::err_compound_qualified_function_type)
+ << QFK << isa<FunctionType>(T.IgnoreParens()) << T
+ << getFunctionQualifiersAsString(FPT);
+ return true;
+}
+
+/// \brief Build a pointer type.
+///
+/// \param T The type to which we'll be building a pointer.
+///
+/// \param Loc The location of the entity whose type involves this
+/// pointer type or, if there is no such entity, the location of the
+/// type that will have pointer type.
+///
+/// \param Entity The name of the entity that involves the pointer
+/// type, if known.
+///
+/// \returns A suitable pointer type, if there are no
+/// errors. Otherwise, returns a NULL type.
+QualType Sema::BuildPointerType(QualType T,
+ SourceLocation Loc, DeclarationName Entity) {
+ if (T->isReferenceType()) {
+ // C++ 8.3.2p4: There shall be no ... pointers to references ...
+ Diag(Loc, diag::err_illegal_decl_pointer_to_reference)
+ << getPrintableNameForEntity(Entity) << T;
+ return QualType();
+ }
+
+ if (checkQualifiedFunction(*this, T, Loc, QFK_Pointer))
+ return QualType();
+
+ assert(!T->isObjCObjectType() && "Should build ObjCObjectPointerType");
+
+ // In ARC, it is forbidden to build pointers to unqualified pointers.
+ if (getLangOpts().ObjCAutoRefCount)
+ T = inferARCLifetimeForPointee(*this, T, Loc, /*reference*/ false);
+
+ // Build the pointer type.
+ return Context.getPointerType(T);
+}
+
+/// \brief Build a reference type.
+///
+/// \param T The type to which we'll be building a reference.
+///
+/// \param Loc The location of the entity whose type involves this
+/// reference type or, if there is no such entity, the location of the
+/// type that will have reference type.
+///
+/// \param Entity The name of the entity that involves the reference
+/// type, if known.
+///
+/// \returns A suitable reference type, if there are no
+/// errors. Otherwise, returns a NULL type.
+QualType Sema::BuildReferenceType(QualType T, bool SpelledAsLValue,
+ SourceLocation Loc,
+ DeclarationName Entity) {
+ assert(Context.getCanonicalType(T) != Context.OverloadTy &&
+ "Unresolved overloaded function type");
+
+ // C++0x [dcl.ref]p6:
+ // If a typedef (7.1.3), a type template-parameter (14.3.1), or a
+ // decltype-specifier (7.1.6.2) denotes a type TR that is a reference to a
+ // type T, an attempt to create the type "lvalue reference to cv TR" creates
+ // the type "lvalue reference to T", while an attempt to create the type
+ // "rvalue reference to cv TR" creates the type TR.
+ bool LValueRef = SpelledAsLValue || T->getAs<LValueReferenceType>();
+
+ // C++ [dcl.ref]p4: There shall be no references to references.
+ //
+ // According to C++ DR 106, references to references are only
+ // diagnosed when they are written directly (e.g., "int & &"),
+ // but not when they happen via a typedef:
+ //
+ // typedef int& intref;
+ // typedef intref& intref2;
+ //
+ // Parser::ParseDeclaratorInternal diagnoses the case where
+ // references are written directly; here, we handle the
+ // collapsing of references-to-references as described in C++0x.
+ // DR 106 and 540 introduce reference-collapsing into C++98/03.
+
+ // C++ [dcl.ref]p1:
+ // A declarator that specifies the type "reference to cv void"
+ // is ill-formed.
+ if (T->isVoidType()) {
+ Diag(Loc, diag::err_reference_to_void);
+ return QualType();
+ }
+
+ if (checkQualifiedFunction(*this, T, Loc, QFK_Reference))
+ return QualType();
+
+ // In ARC, it is forbidden to build references to unqualified pointers.
+ if (getLangOpts().ObjCAutoRefCount)
+ T = inferARCLifetimeForPointee(*this, T, Loc, /*reference*/ true);
+
+ // Handle restrict on references.
+ if (LValueRef)
+ return Context.getLValueReferenceType(T, SpelledAsLValue);
+ return Context.getRValueReferenceType(T);
+}
+
+/// Check whether the specified array size makes the array type a VLA. If so,
+/// return true, if not, return the size of the array in SizeVal.
+static bool isArraySizeVLA(Sema &S, Expr *ArraySize, llvm::APSInt &SizeVal) {
+ // If the size is an ICE, it certainly isn't a VLA. If we're in a GNU mode
+ // (like gnu99, but not c99) accept any evaluatable value as an extension.
+ class VLADiagnoser : public Sema::VerifyICEDiagnoser {
+ public:
+ VLADiagnoser() : Sema::VerifyICEDiagnoser(true) {}
+
+ void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) override {
+ }
+
+ void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR) override {
+ S.Diag(Loc, diag::ext_vla_folded_to_constant) << SR;
+ }
+ } Diagnoser;
+
+ return S.VerifyIntegerConstantExpression(ArraySize, &SizeVal, Diagnoser,
+ S.LangOpts.GNUMode).isInvalid();
+}
+
+
+/// \brief Build an array type.
+///
+/// \param T The type of each element in the array.
+///
+/// \param ASM C99 array size modifier (e.g., '*', 'static').
+///
+/// \param ArraySize Expression describing the size of the array.
+///
+/// \param Brackets The range from the opening '[' to the closing ']'.
+///
+/// \param Entity The name of the entity that involves the array
+/// type, if known.
+///
+/// \returns A suitable array type, if there are no errors. Otherwise,
+/// returns a NULL type.
+QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
+ Expr *ArraySize, unsigned Quals,
+ SourceRange Brackets, DeclarationName Entity) {
+
+ SourceLocation Loc = Brackets.getBegin();
+ if (getLangOpts().CPlusPlus) {
+ // C++ [dcl.array]p1:
+ // T is called the array element type; this type shall not be a reference
+ // type, the (possibly cv-qualified) type void, a function type or an
+ // abstract class type.
+ //
+ // C++ [dcl.array]p3:
+ // When several "array of" specifications are adjacent, [...] only the
+ // first of the constant expressions that specify the bounds of the arrays
+ // may be omitted.
+ //
+ // Note: function types are handled in the common path with C.
+ if (T->isReferenceType()) {
+ Diag(Loc, diag::err_illegal_decl_array_of_references)
+ << getPrintableNameForEntity(Entity) << T;
+ return QualType();
+ }
+
+ if (T->isVoidType() || T->isIncompleteArrayType()) {
+ Diag(Loc, diag::err_illegal_decl_array_incomplete_type) << T;
+ return QualType();
+ }
+
+ if (RequireNonAbstractType(Brackets.getBegin(), T,
+ diag::err_array_of_abstract_type))
+ return QualType();
+
+ // Mentioning a member pointer type for an array type causes us to lock in
+ // an inheritance model, even if it's inside an unused typedef.
+ if (Context.getTargetInfo().getCXXABI().isMicrosoft())
+ if (const MemberPointerType *MPTy = T->getAs<MemberPointerType>())
+ if (!MPTy->getClass()->isDependentType())
+ (void)isCompleteType(Loc, T);
+
+ } else {
+ // C99 6.7.5.2p1: If the element type is an incomplete or function type,
+ // reject it (e.g. void ary[7], struct foo ary[7], void ary[7]())
+ if (RequireCompleteType(Loc, T,
+ diag::err_illegal_decl_array_incomplete_type))
+ return QualType();
+ }
+
+ if (T->isFunctionType()) {
+ Diag(Loc, diag::err_illegal_decl_array_of_functions)
+ << getPrintableNameForEntity(Entity) << T;
+ return QualType();
+ }
+
+ if (const RecordType *EltTy = T->getAs<RecordType>()) {
+ // If the element type is a struct or union that contains a variadic
+ // array, accept it as a GNU extension: C99 6.7.2.1p2.
+ if (EltTy->getDecl()->hasFlexibleArrayMember())
+ Diag(Loc, diag::ext_flexible_array_in_array) << T;
+ } else if (T->isObjCObjectType()) {
+ Diag(Loc, diag::err_objc_array_of_interfaces) << T;
+ return QualType();
+ }
+
+ // Do placeholder conversions on the array size expression.
+ if (ArraySize && ArraySize->hasPlaceholderType()) {
+ ExprResult Result = CheckPlaceholderExpr(ArraySize);
+ if (Result.isInvalid()) return QualType();
+ ArraySize = Result.get();
+ }
+
+ // Do lvalue-to-rvalue conversions on the array size expression.
+ if (ArraySize && !ArraySize->isRValue()) {
+ ExprResult Result = DefaultLvalueConversion(ArraySize);
+ if (Result.isInvalid())
+ return QualType();
+
+ ArraySize = Result.get();
+ }
+
+ // C99 6.7.5.2p1: The size expression shall have integer type.
+ // C++11 allows contextual conversions to such types.
+ if (!getLangOpts().CPlusPlus11 &&
+ ArraySize && !ArraySize->isTypeDependent() &&
+ !ArraySize->getType()->isIntegralOrUnscopedEnumerationType()) {
+ Diag(ArraySize->getLocStart(), diag::err_array_size_non_int)
+ << ArraySize->getType() << ArraySize->getSourceRange();
+ return QualType();
+ }
+
+ llvm::APSInt ConstVal(Context.getTypeSize(Context.getSizeType()));
+ if (!ArraySize) {
+ if (ASM == ArrayType::Star)
+ T = Context.getVariableArrayType(T, nullptr, ASM, Quals, Brackets);
+ else
+ T = Context.getIncompleteArrayType(T, ASM, Quals);
+ } else if (ArraySize->isTypeDependent() || ArraySize->isValueDependent()) {
+ T = Context.getDependentSizedArrayType(T, ArraySize, ASM, Quals, Brackets);
+ } else if ((!T->isDependentType() && !T->isIncompleteType() &&
+ !T->isConstantSizeType()) ||
+ isArraySizeVLA(*this, ArraySize, ConstVal)) {
+ // Even in C++11, don't allow contextual conversions in the array bound
+ // of a VLA.
+ if (getLangOpts().CPlusPlus11 &&
+ !ArraySize->getType()->isIntegralOrUnscopedEnumerationType()) {
+ Diag(ArraySize->getLocStart(), diag::err_array_size_non_int)
+ << ArraySize->getType() << ArraySize->getSourceRange();
+ return QualType();
+ }
+
+ // C99: an array with an element type that has a non-constant-size is a VLA.
+ // C99: an array with a non-ICE size is a VLA. We accept any expression
+ // that we can fold to a non-zero positive value as an extension.
+ T = Context.getVariableArrayType(T, ArraySize, ASM, Quals, Brackets);
+ } else {
+ // C99 6.7.5.2p1: If the expression is a constant expression, it shall
+ // have a value greater than zero.
+ if (ConstVal.isSigned() && ConstVal.isNegative()) {
+ if (Entity)
+ Diag(ArraySize->getLocStart(), diag::err_decl_negative_array_size)
+ << getPrintableNameForEntity(Entity) << ArraySize->getSourceRange();
+ else
+ Diag(ArraySize->getLocStart(), diag::err_typecheck_negative_array_size)
+ << ArraySize->getSourceRange();
+ return QualType();
+ }
+ if (ConstVal == 0) {
+ // GCC accepts zero sized static arrays. We allow them when
+ // we're not in a SFINAE context.
+ Diag(ArraySize->getLocStart(),
+ isSFINAEContext()? diag::err_typecheck_zero_array_size
+ : diag::ext_typecheck_zero_array_size)
+ << ArraySize->getSourceRange();
+
+ if (ASM == ArrayType::Static) {
+ Diag(ArraySize->getLocStart(),
+ diag::warn_typecheck_zero_static_array_size)
+ << ArraySize->getSourceRange();
+ ASM = ArrayType::Normal;
+ }
+ } else if (!T->isDependentType() && !T->isVariablyModifiedType() &&
+ !T->isIncompleteType() && !T->isUndeducedType()) {
+ // Is the array too large?
+ unsigned ActiveSizeBits
+ = ConstantArrayType::getNumAddressingBits(Context, T, ConstVal);
+ if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context)) {
+ Diag(ArraySize->getLocStart(), diag::err_array_too_large)
+ << ConstVal.toString(10)
+ << ArraySize->getSourceRange();
+ return QualType();
+ }
+ }
+
+ T = Context.getConstantArrayType(T, ConstVal, ASM, Quals);
+ }
+
+ // OpenCL v1.2 s6.9.d: variable length arrays are not supported.
+ if (getLangOpts().OpenCL && T->isVariableArrayType()) {
+ Diag(Loc, diag::err_opencl_vla);
+ return QualType();
+ }
+ // If this is not C99, extwarn about VLA's and C99 array size modifiers.
+ if (!getLangOpts().C99) {
+ if (T->isVariableArrayType()) {
+ // Prohibit the use of non-POD types in VLAs.
+ QualType BaseT = Context.getBaseElementType(T);
+ if (!T->isDependentType() && isCompleteType(Loc, BaseT) &&
+ !BaseT.isPODType(Context) && !BaseT->isObjCLifetimeType()) {
+ Diag(Loc, diag::err_vla_non_pod) << BaseT;
+ return QualType();
+ }
+ // Prohibit the use of VLAs during template argument deduction.
+ else if (isSFINAEContext()) {
+ Diag(Loc, diag::err_vla_in_sfinae);
+ return QualType();
+ }
+ // Just extwarn about VLAs.
+ else
+ Diag(Loc, diag::ext_vla);
+ } else if (ASM != ArrayType::Normal || Quals != 0)
+ Diag(Loc,
+ getLangOpts().CPlusPlus? diag::err_c99_array_usage_cxx
+ : diag::ext_c99_array_usage) << ASM;
+ }
+
+ if (T->isVariableArrayType()) {
+ // Warn about VLAs for -Wvla.
+ Diag(Loc, diag::warn_vla_used);
+ }
+
+ return T;
+}
+
+/// \brief Build an ext-vector type.
+///
+/// Run the required checks for the extended vector type.
+QualType Sema::BuildExtVectorType(QualType T, Expr *ArraySize,
+ SourceLocation AttrLoc) {
+ // unlike gcc's vector_size attribute, we do not allow vectors to be defined
+ // in conjunction with complex types (pointers, arrays, functions, etc.).
+ if (!T->isDependentType() &&
+ !T->isIntegerType() && !T->isRealFloatingType()) {
+ Diag(AttrLoc, diag::err_attribute_invalid_vector_type) << T;
+ return QualType();
+ }
+
+ if (!ArraySize->isTypeDependent() && !ArraySize->isValueDependent()) {
+ llvm::APSInt vecSize(32);
+ if (!ArraySize->isIntegerConstantExpr(vecSize, Context)) {
+ Diag(AttrLoc, diag::err_attribute_argument_type)
+ << "ext_vector_type" << AANT_ArgumentIntegerConstant
+ << ArraySize->getSourceRange();
+ return QualType();
+ }
+
+ // unlike gcc's vector_size attribute, the size is specified as the
+ // number of elements, not the number of bytes.
+ unsigned vectorSize = static_cast<unsigned>(vecSize.getZExtValue());
+
+ if (vectorSize == 0) {
+ Diag(AttrLoc, diag::err_attribute_zero_size)
+ << ArraySize->getSourceRange();
+ return QualType();
+ }
+
+ if (VectorType::isVectorSizeTooLarge(vectorSize)) {
+ Diag(AttrLoc, diag::err_attribute_size_too_large)
+ << ArraySize->getSourceRange();
+ return QualType();
+ }
+
+ return Context.getExtVectorType(T, vectorSize);
+ }
+
+ return Context.getDependentSizedExtVectorType(T, ArraySize, AttrLoc);
+}
+
+bool Sema::CheckFunctionReturnType(QualType T, SourceLocation Loc) {
+ if (T->isArrayType() || T->isFunctionType()) {
+ Diag(Loc, diag::err_func_returning_array_function)
+ << T->isFunctionType() << T;
+ return true;
+ }
+
+ // Functions cannot return half FP.
+ if (T->isHalfType() && !getLangOpts().HalfArgsAndReturns) {
+ Diag(Loc, diag::err_parameters_retval_cannot_have_fp16_type) << 1 <<
+ FixItHint::CreateInsertion(Loc, "*");
+ return true;
+ }
+
+ // Methods cannot return interface types. All ObjC objects are
+ // passed by reference.
+ if (T->isObjCObjectType()) {
+ Diag(Loc, diag::err_object_cannot_be_passed_returned_by_value) << 0 << T;
+ return 0;
+ }
+
+ return false;
+}
+
+QualType Sema::BuildFunctionType(QualType T,
+ MutableArrayRef<QualType> ParamTypes,
+ SourceLocation Loc, DeclarationName Entity,
+ const FunctionProtoType::ExtProtoInfo &EPI) {
+ bool Invalid = false;
+
+ Invalid |= CheckFunctionReturnType(T, Loc);
+
+ for (unsigned Idx = 0, Cnt = ParamTypes.size(); Idx < Cnt; ++Idx) {
+ // FIXME: Loc is too inprecise here, should use proper locations for args.
+ QualType ParamType = Context.getAdjustedParameterType(ParamTypes[Idx]);
+ if (ParamType->isVoidType()) {
+ Diag(Loc, diag::err_param_with_void_type);
+ Invalid = true;
+ } else if (ParamType->isHalfType() && !getLangOpts().HalfArgsAndReturns) {
+ // Disallow half FP arguments.
+ Diag(Loc, diag::err_parameters_retval_cannot_have_fp16_type) << 0 <<
+ FixItHint::CreateInsertion(Loc, "*");
+ Invalid = true;
+ }
+
+ ParamTypes[Idx] = ParamType;
+ }
+
+ if (Invalid)
+ return QualType();
+
+ return Context.getFunctionType(T, ParamTypes, EPI);
+}
+
+/// \brief Build a member pointer type \c T Class::*.
+///
+/// \param T the type to which the member pointer refers.
+/// \param Class the class type into which the member pointer points.
+/// \param Loc the location where this type begins
+/// \param Entity the name of the entity that will have this member pointer type
+///
+/// \returns a member pointer type, if successful, or a NULL type if there was
+/// an error.
+QualType Sema::BuildMemberPointerType(QualType T, QualType Class,
+ SourceLocation Loc,
+ DeclarationName Entity) {
+ // Verify that we're not building a pointer to pointer to function with
+ // exception specification.
+ if (CheckDistantExceptionSpec(T)) {
+ Diag(Loc, diag::err_distant_exception_spec);
+ return QualType();
+ }
+
+ // C++ 8.3.3p3: A pointer to member shall not point to ... a member
+ // with reference type, or "cv void."
+ if (T->isReferenceType()) {
+ Diag(Loc, diag::err_illegal_decl_mempointer_to_reference)
+ << getPrintableNameForEntity(Entity) << T;
+ return QualType();
+ }
+
+ if (T->isVoidType()) {
+ Diag(Loc, diag::err_illegal_decl_mempointer_to_void)
+ << getPrintableNameForEntity(Entity);
+ return QualType();
+ }
+
+ if (!Class->isDependentType() && !Class->isRecordType()) {
+ Diag(Loc, diag::err_mempointer_in_nonclass_type) << Class;
+ return QualType();
+ }
+
+ // Adjust the default free function calling convention to the default method
+ // calling convention.
+ bool IsCtorOrDtor =
+ (Entity.getNameKind() == DeclarationName::CXXConstructorName) ||
+ (Entity.getNameKind() == DeclarationName::CXXDestructorName);
+ if (T->isFunctionType())
+ adjustMemberFunctionCC(T, /*IsStatic=*/false, IsCtorOrDtor, Loc);
+
+ return Context.getMemberPointerType(T, Class.getTypePtr());
+}
+
+/// \brief Build a block pointer type.
+///
+/// \param T The type to which we'll be building a block pointer.
+///
+/// \param Loc The source location, used for diagnostics.
+///
+/// \param Entity The name of the entity that involves the block pointer
+/// type, if known.
+///
+/// \returns A suitable block pointer type, if there are no
+/// errors. Otherwise, returns a NULL type.
+QualType Sema::BuildBlockPointerType(QualType T,
+ SourceLocation Loc,
+ DeclarationName Entity) {
+ if (!T->isFunctionType()) {
+ Diag(Loc, diag::err_nonfunction_block_type);
+ return QualType();
+ }
+
+ if (checkQualifiedFunction(*this, T, Loc, QFK_BlockPointer))
+ return QualType();
+
+ return Context.getBlockPointerType(T);
+}
+
+QualType Sema::GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo) {
+ QualType QT = Ty.get();
+ if (QT.isNull()) {
+ if (TInfo) *TInfo = nullptr;
+ return QualType();
+ }
+
+ TypeSourceInfo *DI = nullptr;
+ if (const LocInfoType *LIT = dyn_cast<LocInfoType>(QT)) {
+ QT = LIT->getType();
+ DI = LIT->getTypeSourceInfo();
+ }
+
+ if (TInfo) *TInfo = DI;
+ return QT;
+}
+
+static void transferARCOwnershipToDeclaratorChunk(TypeProcessingState &state,
+ Qualifiers::ObjCLifetime ownership,
+ unsigned chunkIndex);
+
+/// Given that this is the declaration of a parameter under ARC,
+/// attempt to infer attributes and such for pointer-to-whatever
+/// types.
+static void inferARCWriteback(TypeProcessingState &state,
+ QualType &declSpecType) {
+ Sema &S = state.getSema();
+ Declarator &declarator = state.getDeclarator();
+
+ // TODO: should we care about decl qualifiers?
+
+ // Check whether the declarator has the expected form. We walk
+ // from the inside out in order to make the block logic work.
+ unsigned outermostPointerIndex = 0;
+ bool isBlockPointer = false;
+ unsigned numPointers = 0;
+ for (unsigned i = 0, e = declarator.getNumTypeObjects(); i != e; ++i) {
+ unsigned chunkIndex = i;
+ DeclaratorChunk &chunk = declarator.getTypeObject(chunkIndex);
+ switch (chunk.Kind) {
+ case DeclaratorChunk::Paren:
+ // Ignore parens.
+ break;
+
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::Pointer:
+ // Count the number of pointers. Treat references
+ // interchangeably as pointers; if they're mis-ordered, normal
+ // type building will discover that.
+ outermostPointerIndex = chunkIndex;
+ numPointers++;
+ break;
+
+ case DeclaratorChunk::BlockPointer:
+ // If we have a pointer to block pointer, that's an acceptable
+ // indirect reference; anything else is not an application of
+ // the rules.
+ if (numPointers != 1) return;
+ numPointers++;
+ outermostPointerIndex = chunkIndex;
+ isBlockPointer = true;
+
+ // We don't care about pointer structure in return values here.
+ goto done;
+
+ case DeclaratorChunk::Array: // suppress if written (id[])?
+ case DeclaratorChunk::Function:
+ case DeclaratorChunk::MemberPointer:
+ return;
+ }
+ }
+ done:
+
+ // If we have *one* pointer, then we want to throw the qualifier on
+ // the declaration-specifiers, which means that it needs to be a
+ // retainable object type.
+ if (numPointers == 1) {
+ // If it's not a retainable object type, the rule doesn't apply.
+ if (!declSpecType->isObjCRetainableType()) return;
+
+ // If it already has lifetime, don't do anything.
+ if (declSpecType.getObjCLifetime()) return;
+
+ // Otherwise, modify the type in-place.
+ Qualifiers qs;
+
+ if (declSpecType->isObjCARCImplicitlyUnretainedType())
+ qs.addObjCLifetime(Qualifiers::OCL_ExplicitNone);
+ else
+ qs.addObjCLifetime(Qualifiers::OCL_Autoreleasing);
+ declSpecType = S.Context.getQualifiedType(declSpecType, qs);
+
+ // If we have *two* pointers, then we want to throw the qualifier on
+ // the outermost pointer.
+ } else if (numPointers == 2) {
+ // If we don't have a block pointer, we need to check whether the
+ // declaration-specifiers gave us something that will turn into a
+ // retainable object pointer after we slap the first pointer on it.
+ if (!isBlockPointer && !declSpecType->isObjCObjectType())
+ return;
+
+ // Look for an explicit lifetime attribute there.
+ DeclaratorChunk &chunk = declarator.getTypeObject(outermostPointerIndex);
+ if (chunk.Kind != DeclaratorChunk::Pointer &&
+ chunk.Kind != DeclaratorChunk::BlockPointer)
+ return;
+ for (const AttributeList *attr = chunk.getAttrs(); attr;
+ attr = attr->getNext())
+ if (attr->getKind() == AttributeList::AT_ObjCOwnership)
+ return;
+
+ transferARCOwnershipToDeclaratorChunk(state, Qualifiers::OCL_Autoreleasing,
+ outermostPointerIndex);
+
+ // Any other number of pointers/references does not trigger the rule.
+ } else return;
+
+ // TODO: mark whether we did this inference?
+}
+
+void Sema::diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
+ SourceLocation FallbackLoc,
+ SourceLocation ConstQualLoc,
+ SourceLocation VolatileQualLoc,
+ SourceLocation RestrictQualLoc,
+ SourceLocation AtomicQualLoc) {
+ if (!Quals)
+ return;
+
+ struct Qual {
+ const char *Name;
+ unsigned Mask;
+ SourceLocation Loc;
+ } const QualKinds[4] = {
+ { "const", DeclSpec::TQ_const, ConstQualLoc },
+ { "volatile", DeclSpec::TQ_volatile, VolatileQualLoc },
+ { "restrict", DeclSpec::TQ_restrict, RestrictQualLoc },
+ { "_Atomic", DeclSpec::TQ_atomic, AtomicQualLoc }
+ };
+
+ SmallString<32> QualStr;
+ unsigned NumQuals = 0;
+ SourceLocation Loc;
+ FixItHint FixIts[4];
+
+ // Build a string naming the redundant qualifiers.
+ for (unsigned I = 0; I != 4; ++I) {
+ if (Quals & QualKinds[I].Mask) {
+ if (!QualStr.empty()) QualStr += ' ';
+ QualStr += QualKinds[I].Name;
+
+ // If we have a location for the qualifier, offer a fixit.
+ SourceLocation QualLoc = QualKinds[I].Loc;
+ if (QualLoc.isValid()) {
+ FixIts[NumQuals] = FixItHint::CreateRemoval(QualLoc);
+ if (Loc.isInvalid() ||
+ getSourceManager().isBeforeInTranslationUnit(QualLoc, Loc))
+ Loc = QualLoc;
+ }
+
+ ++NumQuals;
+ }
+ }
+
+ Diag(Loc.isInvalid() ? FallbackLoc : Loc, DiagID)
+ << QualStr << NumQuals << FixIts[0] << FixIts[1] << FixIts[2] << FixIts[3];
+}
+
+// Diagnose pointless type qualifiers on the return type of a function.
+static void diagnoseRedundantReturnTypeQualifiers(Sema &S, QualType RetTy,
+ Declarator &D,
+ unsigned FunctionChunkIndex) {
+ if (D.getTypeObject(FunctionChunkIndex).Fun.hasTrailingReturnType()) {
+ // FIXME: TypeSourceInfo doesn't preserve location information for
+ // qualifiers.
+ S.diagnoseIgnoredQualifiers(diag::warn_qual_return_type,
+ RetTy.getLocalCVRQualifiers(),
+ D.getIdentifierLoc());
+ return;
+ }
+
+ for (unsigned OuterChunkIndex = FunctionChunkIndex + 1,
+ End = D.getNumTypeObjects();
+ OuterChunkIndex != End; ++OuterChunkIndex) {
+ DeclaratorChunk &OuterChunk = D.getTypeObject(OuterChunkIndex);
+ switch (OuterChunk.Kind) {
+ case DeclaratorChunk::Paren:
+ continue;
+
+ case DeclaratorChunk::Pointer: {
+ DeclaratorChunk::PointerTypeInfo &PTI = OuterChunk.Ptr;
+ S.diagnoseIgnoredQualifiers(
+ diag::warn_qual_return_type,
+ PTI.TypeQuals,
+ SourceLocation(),
+ SourceLocation::getFromRawEncoding(PTI.ConstQualLoc),
+ SourceLocation::getFromRawEncoding(PTI.VolatileQualLoc),
+ SourceLocation::getFromRawEncoding(PTI.RestrictQualLoc),
+ SourceLocation::getFromRawEncoding(PTI.AtomicQualLoc));
+ return;
+ }
+
+ case DeclaratorChunk::Function:
+ case DeclaratorChunk::BlockPointer:
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::Array:
+ case DeclaratorChunk::MemberPointer:
+ // FIXME: We can't currently provide an accurate source location and a
+ // fix-it hint for these.
+ unsigned AtomicQual = RetTy->isAtomicType() ? DeclSpec::TQ_atomic : 0;
+ S.diagnoseIgnoredQualifiers(diag::warn_qual_return_type,
+ RetTy.getCVRQualifiers() | AtomicQual,
+ D.getIdentifierLoc());
+ return;
+ }
+
+ llvm_unreachable("unknown declarator chunk kind");
+ }
+
+ // If the qualifiers come from a conversion function type, don't diagnose
+ // them -- they're not necessarily redundant, since such a conversion
+ // operator can be explicitly called as "x.operator const int()".
+ if (D.getName().getKind() == UnqualifiedId::IK_ConversionFunctionId)
+ return;
+
+ // Just parens all the way out to the decl specifiers. Diagnose any qualifiers
+ // which are present there.
+ S.diagnoseIgnoredQualifiers(diag::warn_qual_return_type,
+ D.getDeclSpec().getTypeQualifiers(),
+ D.getIdentifierLoc(),
+ D.getDeclSpec().getConstSpecLoc(),
+ D.getDeclSpec().getVolatileSpecLoc(),
+ D.getDeclSpec().getRestrictSpecLoc(),
+ D.getDeclSpec().getAtomicSpecLoc());
+}
+
+static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
+ TypeSourceInfo *&ReturnTypeInfo) {
+ Sema &SemaRef = state.getSema();
+ Declarator &D = state.getDeclarator();
+ QualType T;
+ ReturnTypeInfo = nullptr;
+
+ // The TagDecl owned by the DeclSpec.
+ TagDecl *OwnedTagDecl = nullptr;
+
+ switch (D.getName().getKind()) {
+ case UnqualifiedId::IK_ImplicitSelfParam:
+ case UnqualifiedId::IK_OperatorFunctionId:
+ case UnqualifiedId::IK_Identifier:
+ case UnqualifiedId::IK_LiteralOperatorId:
+ case UnqualifiedId::IK_TemplateId:
+ T = ConvertDeclSpecToType(state);
+
+ if (!D.isInvalidType() && D.getDeclSpec().isTypeSpecOwned()) {
+ OwnedTagDecl = cast<TagDecl>(D.getDeclSpec().getRepAsDecl());
+ // Owned declaration is embedded in declarator.
+ OwnedTagDecl->setEmbeddedInDeclarator(true);
+ }
+ break;
+
+ case UnqualifiedId::IK_ConstructorName:
+ case UnqualifiedId::IK_ConstructorTemplateId:
+ case UnqualifiedId::IK_DestructorName:
+ // Constructors and destructors don't have return types. Use
+ // "void" instead.
+ T = SemaRef.Context.VoidTy;
+ processTypeAttrs(state, T, TAL_DeclSpec,
+ D.getDeclSpec().getAttributes().getList());
+ break;
+
+ case UnqualifiedId::IK_ConversionFunctionId:
+ // The result type of a conversion function is the type that it
+ // converts to.
+ T = SemaRef.GetTypeFromParser(D.getName().ConversionFunctionId,
+ &ReturnTypeInfo);
+ break;
+ }
+
+ if (D.getAttributes())
+ distributeTypeAttrsFromDeclarator(state, T);
+
+ // C++11 [dcl.spec.auto]p5: reject 'auto' if it is not in an allowed context.
+ if (D.getDeclSpec().containsPlaceholderType()) {
+ int Error = -1;
+
+ switch (D.getContext()) {
+ case Declarator::LambdaExprContext:
+ llvm_unreachable("Can't specify a type specifier in lambda grammar");
+ case Declarator::ObjCParameterContext:
+ case Declarator::ObjCResultContext:
+ case Declarator::PrototypeContext:
+ Error = 0;
+ break;
+ case Declarator::LambdaExprParameterContext:
+ // In C++14, generic lambdas allow 'auto' in their parameters.
+ if (!(SemaRef.getLangOpts().CPlusPlus14
+ && D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto))
+ Error = 16;
+ break;
+ case Declarator::MemberContext: {
+ if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static ||
+ D.isFunctionDeclarator())
+ break;
+ bool Cxx = SemaRef.getLangOpts().CPlusPlus;
+ switch (cast<TagDecl>(SemaRef.CurContext)->getTagKind()) {
+ case TTK_Enum: llvm_unreachable("unhandled tag kind");
+ case TTK_Struct: Error = Cxx ? 1 : 2; /* Struct member */ break;
+ case TTK_Union: Error = Cxx ? 3 : 4; /* Union member */ break;
+ case TTK_Class: Error = 5; /* Class member */ break;
+ case TTK_Interface: Error = 6; /* Interface member */ break;
+ }
+ break;
+ }
+ case Declarator::CXXCatchContext:
+ case Declarator::ObjCCatchContext:
+ Error = 7; // Exception declaration
+ break;
+ case Declarator::TemplateParamContext:
+ Error = 8; // Template parameter
+ break;
+ case Declarator::BlockLiteralContext:
+ Error = 9; // Block literal
+ break;
+ case Declarator::TemplateTypeArgContext:
+ Error = 10; // Template type argument
+ break;
+ case Declarator::AliasDeclContext:
+ case Declarator::AliasTemplateContext:
+ Error = 12; // Type alias
+ break;
+ case Declarator::TrailingReturnContext:
+ if (!SemaRef.getLangOpts().CPlusPlus14 ||
+ D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto_type)
+ Error = 13; // Function return type
+ break;
+ case Declarator::ConversionIdContext:
+ if (!SemaRef.getLangOpts().CPlusPlus14 ||
+ D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto_type)
+ Error = 14; // conversion-type-id
+ break;
+ case Declarator::TypeNameContext:
+ Error = 15; // Generic
+ break;
+ case Declarator::FileContext:
+ case Declarator::BlockContext:
+ case Declarator::ForContext:
+ case Declarator::ConditionContext:
+ break;
+ case Declarator::CXXNewContext:
+ if (D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto_type)
+ Error = 17; // 'new' type
+ break;
+ case Declarator::KNRTypeListContext:
+ Error = 18; // K&R function parameter
+ break;
+ }
+
+ if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef)
+ Error = 11;
+
+ // In Objective-C it is an error to use 'auto' on a function declarator
+ // (and everywhere for '__auto_type').
+ if (D.isFunctionDeclarator() &&
+ (!SemaRef.getLangOpts().CPlusPlus11 ||
+ D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto_type))
+ Error = 13;
+
+ bool HaveTrailing = false;
+
+ // C++11 [dcl.spec.auto]p2: 'auto' is always fine if the declarator
+ // contains a trailing return type. That is only legal at the outermost
+ // level. Check all declarator chunks (outermost first) anyway, to give
+ // better diagnostics.
+ // We don't support '__auto_type' with trailing return types.
+ if (SemaRef.getLangOpts().CPlusPlus11 &&
+ D.getDeclSpec().getTypeSpecType() != DeclSpec::TST_auto_type) {
+ for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
+ unsigned chunkIndex = e - i - 1;
+ state.setCurrentChunkIndex(chunkIndex);
+ DeclaratorChunk &DeclType = D.getTypeObject(chunkIndex);
+ if (DeclType.Kind == DeclaratorChunk::Function) {
+ const DeclaratorChunk::FunctionTypeInfo &FTI = DeclType.Fun;
+ if (FTI.hasTrailingReturnType()) {
+ HaveTrailing = true;
+ Error = -1;
+ break;
+ }
+ }
+ }
+ }
+
+ SourceRange AutoRange = D.getDeclSpec().getTypeSpecTypeLoc();
+ if (D.getName().getKind() == UnqualifiedId::IK_ConversionFunctionId)
+ AutoRange = D.getName().getSourceRange();
+
+ if (Error != -1) {
+ unsigned Keyword;
+ switch (D.getDeclSpec().getTypeSpecType()) {
+ case DeclSpec::TST_auto: Keyword = 0; break;
+ case DeclSpec::TST_decltype_auto: Keyword = 1; break;
+ case DeclSpec::TST_auto_type: Keyword = 2; break;
+ default: llvm_unreachable("unknown auto TypeSpecType");
+ }
+ SemaRef.Diag(AutoRange.getBegin(), diag::err_auto_not_allowed)
+ << Keyword << Error << AutoRange;
+ T = SemaRef.Context.IntTy;
+ D.setInvalidType(true);
+ } else if (!HaveTrailing) {
+ // If there was a trailing return type, we already got
+ // warn_cxx98_compat_trailing_return_type in the parser.
+ SemaRef.Diag(AutoRange.getBegin(),
+ diag::warn_cxx98_compat_auto_type_specifier)
+ << AutoRange;
+ }
+ }
+
+ if (SemaRef.getLangOpts().CPlusPlus &&
+ OwnedTagDecl && OwnedTagDecl->isCompleteDefinition()) {
+ // Check the contexts where C++ forbids the declaration of a new class
+ // or enumeration in a type-specifier-seq.
+ unsigned DiagID = 0;
+ switch (D.getContext()) {
+ case Declarator::TrailingReturnContext:
+ // Class and enumeration definitions are syntactically not allowed in
+ // trailing return types.
+ llvm_unreachable("parser should not have allowed this");
+ break;
+ case Declarator::FileContext:
+ case Declarator::MemberContext:
+ case Declarator::BlockContext:
+ case Declarator::ForContext:
+ case Declarator::BlockLiteralContext:
+ case Declarator::LambdaExprContext:
+ // C++11 [dcl.type]p3:
+ // A type-specifier-seq shall not define a class or enumeration unless
+ // it appears in the type-id of an alias-declaration (7.1.3) that is not
+ // the declaration of a template-declaration.
+ case Declarator::AliasDeclContext:
+ break;
+ case Declarator::AliasTemplateContext:
+ DiagID = diag::err_type_defined_in_alias_template;
+ break;
+ case Declarator::TypeNameContext:
+ case Declarator::ConversionIdContext:
+ case Declarator::TemplateParamContext:
+ case Declarator::CXXNewContext:
+ case Declarator::CXXCatchContext:
+ case Declarator::ObjCCatchContext:
+ case Declarator::TemplateTypeArgContext:
+ DiagID = diag::err_type_defined_in_type_specifier;
+ break;
+ case Declarator::PrototypeContext:
+ case Declarator::LambdaExprParameterContext:
+ case Declarator::ObjCParameterContext:
+ case Declarator::ObjCResultContext:
+ case Declarator::KNRTypeListContext:
+ // C++ [dcl.fct]p6:
+ // Types shall not be defined in return or parameter types.
+ DiagID = diag::err_type_defined_in_param_type;
+ break;
+ case Declarator::ConditionContext:
+ // C++ 6.4p2:
+ // The type-specifier-seq shall not contain typedef and shall not declare
+ // a new class or enumeration.
+ DiagID = diag::err_type_defined_in_condition;
+ break;
+ }
+
+ if (DiagID != 0) {
+ SemaRef.Diag(OwnedTagDecl->getLocation(), DiagID)
+ << SemaRef.Context.getTypeDeclType(OwnedTagDecl);
+ D.setInvalidType(true);
+ }
+ }
+
+ assert(!T.isNull() && "This function should not return a null type");
+ return T;
+}
+
+/// Produce an appropriate diagnostic for an ambiguity between a function
+/// declarator and a C++ direct-initializer.
+static void warnAboutAmbiguousFunction(Sema &S, Declarator &D,
+ DeclaratorChunk &DeclType, QualType RT) {
+ const DeclaratorChunk::FunctionTypeInfo &FTI = DeclType.Fun;
+ assert(FTI.isAmbiguous && "no direct-initializer / function ambiguity");
+
+ // If the return type is void there is no ambiguity.
+ if (RT->isVoidType())
+ return;
+
+ // An initializer for a non-class type can have at most one argument.
+ if (!RT->isRecordType() && FTI.NumParams > 1)
+ return;
+
+ // An initializer for a reference must have exactly one argument.
+ if (RT->isReferenceType() && FTI.NumParams != 1)
+ return;
+
+ // Only warn if this declarator is declaring a function at block scope, and
+ // doesn't have a storage class (such as 'extern') specified.
+ if (!D.isFunctionDeclarator() ||
+ D.getFunctionDefinitionKind() != FDK_Declaration ||
+ !S.CurContext->isFunctionOrMethod() ||
+ D.getDeclSpec().getStorageClassSpec()
+ != DeclSpec::SCS_unspecified)
+ return;
+
+ // Inside a condition, a direct initializer is not permitted. We allow one to
+ // be parsed in order to give better diagnostics in condition parsing.
+ if (D.getContext() == Declarator::ConditionContext)
+ return;
+
+ SourceRange ParenRange(DeclType.Loc, DeclType.EndLoc);
+
+ S.Diag(DeclType.Loc,
+ FTI.NumParams ? diag::warn_parens_disambiguated_as_function_declaration
+ : diag::warn_empty_parens_are_function_decl)
+ << ParenRange;
+
+ // If the declaration looks like:
+ // T var1,
+ // f();
+ // and name lookup finds a function named 'f', then the ',' was
+ // probably intended to be a ';'.
+ if (!D.isFirstDeclarator() && D.getIdentifier()) {
+ FullSourceLoc Comma(D.getCommaLoc(), S.SourceMgr);
+ FullSourceLoc Name(D.getIdentifierLoc(), S.SourceMgr);
+ if (Comma.getFileID() != Name.getFileID() ||
+ Comma.getSpellingLineNumber() != Name.getSpellingLineNumber()) {
+ LookupResult Result(S, D.getIdentifier(), SourceLocation(),
+ Sema::LookupOrdinaryName);
+ if (S.LookupName(Result, S.getCurScope()))
+ S.Diag(D.getCommaLoc(), diag::note_empty_parens_function_call)
+ << FixItHint::CreateReplacement(D.getCommaLoc(), ";")
+ << D.getIdentifier();
+ }
+ }
+
+ if (FTI.NumParams > 0) {
+ // For a declaration with parameters, eg. "T var(T());", suggest adding
+ // parens around the first parameter to turn the declaration into a
+ // variable declaration.
+ SourceRange Range = FTI.Params[0].Param->getSourceRange();
+ SourceLocation B = Range.getBegin();
+ SourceLocation E = S.getLocForEndOfToken(Range.getEnd());
+ // FIXME: Maybe we should suggest adding braces instead of parens
+ // in C++11 for classes that don't have an initializer_list constructor.
+ S.Diag(B, diag::note_additional_parens_for_variable_declaration)
+ << FixItHint::CreateInsertion(B, "(")
+ << FixItHint::CreateInsertion(E, ")");
+ } else {
+ // For a declaration without parameters, eg. "T var();", suggest replacing
+ // the parens with an initializer to turn the declaration into a variable
+ // declaration.
+ const CXXRecordDecl *RD = RT->getAsCXXRecordDecl();
+
+ // Empty parens mean value-initialization, and no parens mean
+ // default initialization. These are equivalent if the default
+ // constructor is user-provided or if zero-initialization is a
+ // no-op.
+ if (RD && RD->hasDefinition() &&
+ (RD->isEmpty() || RD->hasUserProvidedDefaultConstructor()))
+ S.Diag(DeclType.Loc, diag::note_empty_parens_default_ctor)
+ << FixItHint::CreateRemoval(ParenRange);
+ else {
+ std::string Init =
+ S.getFixItZeroInitializerForType(RT, ParenRange.getBegin());
+ if (Init.empty() && S.LangOpts.CPlusPlus11)
+ Init = "{}";
+ if (!Init.empty())
+ S.Diag(DeclType.Loc, diag::note_empty_parens_zero_initialize)
+ << FixItHint::CreateReplacement(ParenRange, Init);
+ }
+ }
+}
+
+/// Helper for figuring out the default CC for a function declarator type. If
+/// this is the outermost chunk, then we can determine the CC from the
+/// declarator context. If not, then this could be either a member function
+/// type or normal function type.
+static CallingConv
+getCCForDeclaratorChunk(Sema &S, Declarator &D,
+ const DeclaratorChunk::FunctionTypeInfo &FTI,
+ unsigned ChunkIndex) {
+ assert(D.getTypeObject(ChunkIndex).Kind == DeclaratorChunk::Function);
+
+ bool IsCXXInstanceMethod = false;
+
+ if (S.getLangOpts().CPlusPlus) {
+ // Look inwards through parentheses to see if this chunk will form a
+ // member pointer type or if we're the declarator. Any type attributes
+ // between here and there will override the CC we choose here.
+ unsigned I = ChunkIndex;
+ bool FoundNonParen = false;
+ while (I && !FoundNonParen) {
+ --I;
+ if (D.getTypeObject(I).Kind != DeclaratorChunk::Paren)
+ FoundNonParen = true;
+ }
+
+ if (FoundNonParen) {
+ // If we're not the declarator, we're a regular function type unless we're
+ // in a member pointer.
+ IsCXXInstanceMethod =
+ D.getTypeObject(I).Kind == DeclaratorChunk::MemberPointer;
+ } else if (D.getContext() == Declarator::LambdaExprContext) {
+ // This can only be a call operator for a lambda, which is an instance
+ // method.
+ IsCXXInstanceMethod = true;
+ } else {
+ // We're the innermost decl chunk, so must be a function declarator.
+ assert(D.isFunctionDeclarator());
+
+ // If we're inside a record, we're declaring a method, but it could be
+ // explicitly or implicitly static.
+ IsCXXInstanceMethod =
+ D.isFirstDeclarationOfMember() &&
+ D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_typedef &&
+ !D.isStaticMember();
+ }
+ }
+
+ CallingConv CC = S.Context.getDefaultCallingConvention(FTI.isVariadic,
+ IsCXXInstanceMethod);
+
+ // Attribute AT_OpenCLKernel affects the calling convention only on
+ // the SPIR target, hence it cannot be treated as a calling
+ // convention attribute. This is the simplest place to infer
+ // "spir_kernel" for OpenCL kernels on SPIR.
+ if (CC == CC_SpirFunction) {
+ for (const AttributeList *Attr = D.getDeclSpec().getAttributes().getList();
+ Attr; Attr = Attr->getNext()) {
+ if (Attr->getKind() == AttributeList::AT_OpenCLKernel) {
+ CC = CC_SpirKernel;
+ break;
+ }
+ }
+ }
+
+ return CC;
+}
+
+namespace {
+ /// A simple notion of pointer kinds, which matches up with the various
+ /// pointer declarators.
+ enum class SimplePointerKind {
+ Pointer,
+ BlockPointer,
+ MemberPointer,
+ };
+}
+
+IdentifierInfo *Sema::getNullabilityKeyword(NullabilityKind nullability) {
+ switch (nullability) {
+ case NullabilityKind::NonNull:
+ if (!Ident__Nonnull)
+ Ident__Nonnull = PP.getIdentifierInfo("_Nonnull");
+ return Ident__Nonnull;
+
+ case NullabilityKind::Nullable:
+ if (!Ident__Nullable)
+ Ident__Nullable = PP.getIdentifierInfo("_Nullable");
+ return Ident__Nullable;
+
+ case NullabilityKind::Unspecified:
+ if (!Ident__Null_unspecified)
+ Ident__Null_unspecified = PP.getIdentifierInfo("_Null_unspecified");
+ return Ident__Null_unspecified;
+ }
+ llvm_unreachable("Unknown nullability kind.");
+}
+
+/// Retrieve the identifier "NSError".
+IdentifierInfo *Sema::getNSErrorIdent() {
+ if (!Ident_NSError)
+ Ident_NSError = PP.getIdentifierInfo("NSError");
+
+ return Ident_NSError;
+}
+
+/// Check whether there is a nullability attribute of any kind in the given
+/// attribute list.
+static bool hasNullabilityAttr(const AttributeList *attrs) {
+ for (const AttributeList *attr = attrs; attr;
+ attr = attr->getNext()) {
+ if (attr->getKind() == AttributeList::AT_TypeNonNull ||
+ attr->getKind() == AttributeList::AT_TypeNullable ||
+ attr->getKind() == AttributeList::AT_TypeNullUnspecified)
+ return true;
+ }
+
+ return false;
+}
+
+namespace {
+ /// Describes the kind of a pointer a declarator describes.
+ enum class PointerDeclaratorKind {
+ // Not a pointer.
+ NonPointer,
+ // Single-level pointer.
+ SingleLevelPointer,
+ // Multi-level pointer (of any pointer kind).
+ MultiLevelPointer,
+ // CFFooRef*
+ MaybePointerToCFRef,
+ // CFErrorRef*
+ CFErrorRefPointer,
+ // NSError**
+ NSErrorPointerPointer,
+ };
+}
+
+/// Classify the given declarator, whose type-specified is \c type, based on
+/// what kind of pointer it refers to.
+///
+/// This is used to determine the default nullability.
+static PointerDeclaratorKind classifyPointerDeclarator(Sema &S,
+ QualType type,
+ Declarator &declarator) {
+ unsigned numNormalPointers = 0;
+
+ // For any dependent type, we consider it a non-pointer.
+ if (type->isDependentType())
+ return PointerDeclaratorKind::NonPointer;
+
+ // Look through the declarator chunks to identify pointers.
+ for (unsigned i = 0, n = declarator.getNumTypeObjects(); i != n; ++i) {
+ DeclaratorChunk &chunk = declarator.getTypeObject(i);
+ switch (chunk.Kind) {
+ case DeclaratorChunk::Array:
+ case DeclaratorChunk::Function:
+ break;
+
+ case DeclaratorChunk::BlockPointer:
+ case DeclaratorChunk::MemberPointer:
+ return numNormalPointers > 0 ? PointerDeclaratorKind::MultiLevelPointer
+ : PointerDeclaratorKind::SingleLevelPointer;
+
+ case DeclaratorChunk::Paren:
+ case DeclaratorChunk::Reference:
+ continue;
+
+ case DeclaratorChunk::Pointer:
+ ++numNormalPointers;
+ if (numNormalPointers > 2)
+ return PointerDeclaratorKind::MultiLevelPointer;
+ continue;
+ }
+ }
+
+ // Then, dig into the type specifier itself.
+ unsigned numTypeSpecifierPointers = 0;
+ do {
+ // Decompose normal pointers.
+ if (auto ptrType = type->getAs<PointerType>()) {
+ ++numNormalPointers;
+
+ if (numNormalPointers > 2)
+ return PointerDeclaratorKind::MultiLevelPointer;
+
+ type = ptrType->getPointeeType();
+ ++numTypeSpecifierPointers;
+ continue;
+ }
+
+ // Decompose block pointers.
+ if (type->getAs<BlockPointerType>()) {
+ return numNormalPointers > 0 ? PointerDeclaratorKind::MultiLevelPointer
+ : PointerDeclaratorKind::SingleLevelPointer;
+ }
+
+ // Decompose member pointers.
+ if (type->getAs<MemberPointerType>()) {
+ return numNormalPointers > 0 ? PointerDeclaratorKind::MultiLevelPointer
+ : PointerDeclaratorKind::SingleLevelPointer;
+ }
+
+ // Look at Objective-C object pointers.
+ if (auto objcObjectPtr = type->getAs<ObjCObjectPointerType>()) {
+ ++numNormalPointers;
+ ++numTypeSpecifierPointers;
+
+ // If this is NSError**, report that.
+ if (auto objcClassDecl = objcObjectPtr->getInterfaceDecl()) {
+ if (objcClassDecl->getIdentifier() == S.getNSErrorIdent() &&
+ numNormalPointers == 2 && numTypeSpecifierPointers < 2) {
+ return PointerDeclaratorKind::NSErrorPointerPointer;
+ }
+ }
+
+ break;
+ }
+
+ // Look at Objective-C class types.
+ if (auto objcClass = type->getAs<ObjCInterfaceType>()) {
+ if (objcClass->getInterface()->getIdentifier() == S.getNSErrorIdent()) {
+ if (numNormalPointers == 2 && numTypeSpecifierPointers < 2)
+ return PointerDeclaratorKind::NSErrorPointerPointer;;
+ }
+
+ break;
+ }
+
+ // If at this point we haven't seen a pointer, we won't see one.
+ if (numNormalPointers == 0)
+ return PointerDeclaratorKind::NonPointer;
+
+ if (auto recordType = type->getAs<RecordType>()) {
+ RecordDecl *recordDecl = recordType->getDecl();
+
+ bool isCFError = false;
+ if (S.CFError) {
+ // If we already know about CFError, test it directly.
+ isCFError = (S.CFError == recordDecl);
+ } else {
+ // Check whether this is CFError, which we identify based on its bridge
+ // to NSError.
+ if (recordDecl->getTagKind() == TTK_Struct && numNormalPointers > 0) {
+ if (auto bridgeAttr = recordDecl->getAttr<ObjCBridgeAttr>()) {
+ if (bridgeAttr->getBridgedType() == S.getNSErrorIdent()) {
+ S.CFError = recordDecl;
+ isCFError = true;
+ }
+ }
+ }
+ }
+
+ // If this is CFErrorRef*, report it as such.
+ if (isCFError && numNormalPointers == 2 && numTypeSpecifierPointers < 2) {
+ return PointerDeclaratorKind::CFErrorRefPointer;
+ }
+ break;
+ }
+
+ break;
+ } while (true);
+
+
+ switch (numNormalPointers) {
+ case 0:
+ return PointerDeclaratorKind::NonPointer;
+
+ case 1:
+ return PointerDeclaratorKind::SingleLevelPointer;
+
+ case 2:
+ return PointerDeclaratorKind::MaybePointerToCFRef;
+
+ default:
+ return PointerDeclaratorKind::MultiLevelPointer;
+ }
+}
+
+static FileID getNullabilityCompletenessCheckFileID(Sema &S,
+ SourceLocation loc) {
+ // If we're anywhere in a function, method, or closure context, don't perform
+ // completeness checks.
+ for (DeclContext *ctx = S.CurContext; ctx; ctx = ctx->getParent()) {
+ if (ctx->isFunctionOrMethod())
+ return FileID();
+
+ if (ctx->isFileContext())
+ break;
+ }
+
+ // We only care about the expansion location.
+ loc = S.SourceMgr.getExpansionLoc(loc);
+ FileID file = S.SourceMgr.getFileID(loc);
+ if (file.isInvalid())
+ return FileID();
+
+ // Retrieve file information.
+ bool invalid = false;
+ const SrcMgr::SLocEntry &sloc = S.SourceMgr.getSLocEntry(file, &invalid);
+ if (invalid || !sloc.isFile())
+ return FileID();
+
+ // We don't want to perform completeness checks on the main file or in
+ // system headers.
+ const SrcMgr::FileInfo &fileInfo = sloc.getFile();
+ if (fileInfo.getIncludeLoc().isInvalid())
+ return FileID();
+ if (fileInfo.getFileCharacteristic() != SrcMgr::C_User &&
+ S.Diags.getSuppressSystemWarnings()) {
+ return FileID();
+ }
+
+ return file;
+}
+
+/// Check for consistent use of nullability.
+static void checkNullabilityConsistency(TypeProcessingState &state,
+ SimplePointerKind pointerKind,
+ SourceLocation pointerLoc) {
+ Sema &S = state.getSema();
+
+ // Determine which file we're performing consistency checking for.
+ FileID file = getNullabilityCompletenessCheckFileID(S, pointerLoc);
+ if (file.isInvalid())
+ return;
+
+ // If we haven't seen any type nullability in this file, we won't warn now
+ // about anything.
+ FileNullability &fileNullability = S.NullabilityMap[file];
+ if (!fileNullability.SawTypeNullability) {
+ // If this is the first pointer declarator in the file, record it.
+ if (fileNullability.PointerLoc.isInvalid() &&
+ !S.Context.getDiagnostics().isIgnored(diag::warn_nullability_missing,
+ pointerLoc)) {
+ fileNullability.PointerLoc = pointerLoc;
+ fileNullability.PointerKind = static_cast<unsigned>(pointerKind);
+ }
+
+ return;
+ }
+
+ // Complain about missing nullability.
+ S.Diag(pointerLoc, diag::warn_nullability_missing)
+ << static_cast<unsigned>(pointerKind);
+}
+
+static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
+ QualType declSpecType,
+ TypeSourceInfo *TInfo) {
+ // The TypeSourceInfo that this function returns will not be a null type.
+ // If there is an error, this function will fill in a dummy type as fallback.
+ QualType T = declSpecType;
+ Declarator &D = state.getDeclarator();
+ Sema &S = state.getSema();
+ ASTContext &Context = S.Context;
+ const LangOptions &LangOpts = S.getLangOpts();
+
+ // The name we're declaring, if any.
+ DeclarationName Name;
+ if (D.getIdentifier())
+ Name = D.getIdentifier();
+
+ // Does this declaration declare a typedef-name?
+ bool IsTypedefName =
+ D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef ||
+ D.getContext() == Declarator::AliasDeclContext ||
+ D.getContext() == Declarator::AliasTemplateContext;
+
+ // Does T refer to a function type with a cv-qualifier or a ref-qualifier?
+ bool IsQualifiedFunction = T->isFunctionProtoType() &&
+ (T->castAs<FunctionProtoType>()->getTypeQuals() != 0 ||
+ T->castAs<FunctionProtoType>()->getRefQualifier() != RQ_None);
+
+ // If T is 'decltype(auto)', the only declarators we can have are parens
+ // and at most one function declarator if this is a function declaration.
+ if (const AutoType *AT = T->getAs<AutoType>()) {
+ if (AT->isDecltypeAuto()) {
+ for (unsigned I = 0, E = D.getNumTypeObjects(); I != E; ++I) {
+ unsigned Index = E - I - 1;
+ DeclaratorChunk &DeclChunk = D.getTypeObject(Index);
+ unsigned DiagId = diag::err_decltype_auto_compound_type;
+ unsigned DiagKind = 0;
+ switch (DeclChunk.Kind) {
+ case DeclaratorChunk::Paren:
+ continue;
+ case DeclaratorChunk::Function: {
+ unsigned FnIndex;
+ if (D.isFunctionDeclarationContext() &&
+ D.isFunctionDeclarator(FnIndex) && FnIndex == Index)
+ continue;
+ DiagId = diag::err_decltype_auto_function_declarator_not_declaration;
+ break;
+ }
+ case DeclaratorChunk::Pointer:
+ case DeclaratorChunk::BlockPointer:
+ case DeclaratorChunk::MemberPointer:
+ DiagKind = 0;
+ break;
+ case DeclaratorChunk::Reference:
+ DiagKind = 1;
+ break;
+ case DeclaratorChunk::Array:
+ DiagKind = 2;
+ break;
+ }
+
+ S.Diag(DeclChunk.Loc, DiagId) << DiagKind;
+ D.setInvalidType(true);
+ break;
+ }
+ }
+ }
+
+ // Determine whether we should infer _Nonnull on pointer types.
+ Optional<NullabilityKind> inferNullability;
+ bool inferNullabilityCS = false;
+ bool inferNullabilityInnerOnly = false;
+ bool inferNullabilityInnerOnlyComplete = false;
+
+ // Are we in an assume-nonnull region?
+ bool inAssumeNonNullRegion = false;
+ if (S.PP.getPragmaAssumeNonNullLoc().isValid()) {
+ inAssumeNonNullRegion = true;
+ // Determine which file we saw the assume-nonnull region in.
+ FileID file = getNullabilityCompletenessCheckFileID(
+ S, S.PP.getPragmaAssumeNonNullLoc());
+ if (file.isValid()) {
+ FileNullability &fileNullability = S.NullabilityMap[file];
+
+ // If we haven't seen any type nullability before, now we have.
+ if (!fileNullability.SawTypeNullability) {
+ if (fileNullability.PointerLoc.isValid()) {
+ S.Diag(fileNullability.PointerLoc, diag::warn_nullability_missing)
+ << static_cast<unsigned>(fileNullability.PointerKind);
+ }
+
+ fileNullability.SawTypeNullability = true;
+ }
+ }
+ }
+
+ // Whether to complain about missing nullability specifiers or not.
+ enum {
+ /// Never complain.
+ CAMN_No,
+ /// Complain on the inner pointers (but not the outermost
+ /// pointer).
+ CAMN_InnerPointers,
+ /// Complain about any pointers that don't have nullability
+ /// specified or inferred.
+ CAMN_Yes
+ } complainAboutMissingNullability = CAMN_No;
+ unsigned NumPointersRemaining = 0;
+
+ if (IsTypedefName) {
+ // For typedefs, we do not infer any nullability (the default),
+ // and we only complain about missing nullability specifiers on
+ // inner pointers.
+ complainAboutMissingNullability = CAMN_InnerPointers;
+
+ if (T->canHaveNullability() && !T->getNullability(S.Context)) {
+ ++NumPointersRemaining;
+ }
+
+ for (unsigned i = 0, n = D.getNumTypeObjects(); i != n; ++i) {
+ DeclaratorChunk &chunk = D.getTypeObject(i);
+ switch (chunk.Kind) {
+ case DeclaratorChunk::Array:
+ case DeclaratorChunk::Function:
+ break;
+
+ case DeclaratorChunk::BlockPointer:
+ case DeclaratorChunk::MemberPointer:
+ ++NumPointersRemaining;
+ break;
+
+ case DeclaratorChunk::Paren:
+ case DeclaratorChunk::Reference:
+ continue;
+
+ case DeclaratorChunk::Pointer:
+ ++NumPointersRemaining;
+ continue;
+ }
+ }
+ } else {
+ bool isFunctionOrMethod = false;
+ switch (auto context = state.getDeclarator().getContext()) {
+ case Declarator::ObjCParameterContext:
+ case Declarator::ObjCResultContext:
+ case Declarator::PrototypeContext:
+ case Declarator::TrailingReturnContext:
+ isFunctionOrMethod = true;
+ // fallthrough
+
+ case Declarator::MemberContext:
+ if (state.getDeclarator().isObjCIvar() && !isFunctionOrMethod) {
+ complainAboutMissingNullability = CAMN_No;
+ break;
+ }
+
+ // Weak properties are inferred to be nullable.
+ if (state.getDeclarator().isObjCWeakProperty() && inAssumeNonNullRegion) {
+ inferNullability = NullabilityKind::Nullable;
+ break;
+ }
+
+ // fallthrough
+
+ case Declarator::FileContext:
+ case Declarator::KNRTypeListContext:
+ complainAboutMissingNullability = CAMN_Yes;
+
+ // Nullability inference depends on the type and declarator.
+ switch (classifyPointerDeclarator(S, T, D)) {
+ case PointerDeclaratorKind::NonPointer:
+ case PointerDeclaratorKind::MultiLevelPointer:
+ // Cannot infer nullability.
+ break;
+
+ case PointerDeclaratorKind::SingleLevelPointer:
+ // Infer _Nonnull if we are in an assumes-nonnull region.
+ if (inAssumeNonNullRegion) {
+ inferNullability = NullabilityKind::NonNull;
+ inferNullabilityCS = (context == Declarator::ObjCParameterContext ||
+ context == Declarator::ObjCResultContext);
+ }
+ break;
+
+ case PointerDeclaratorKind::CFErrorRefPointer:
+ case PointerDeclaratorKind::NSErrorPointerPointer:
+ // Within a function or method signature, infer _Nullable at both
+ // levels.
+ if (isFunctionOrMethod && inAssumeNonNullRegion)
+ inferNullability = NullabilityKind::Nullable;
+ break;
+
+ case PointerDeclaratorKind::MaybePointerToCFRef:
+ if (isFunctionOrMethod) {
+ // On pointer-to-pointer parameters marked cf_returns_retained or
+ // cf_returns_not_retained, if the outer pointer is explicit then
+ // infer the inner pointer as _Nullable.
+ auto hasCFReturnsAttr = [](const AttributeList *NextAttr) -> bool {
+ while (NextAttr) {
+ if (NextAttr->getKind() == AttributeList::AT_CFReturnsRetained ||
+ NextAttr->getKind() == AttributeList::AT_CFReturnsNotRetained)
+ return true;
+ NextAttr = NextAttr->getNext();
+ }
+ return false;
+ };
+ if (const auto *InnermostChunk = D.getInnermostNonParenChunk()) {
+ if (hasCFReturnsAttr(D.getAttributes()) ||
+ hasCFReturnsAttr(InnermostChunk->getAttrs()) ||
+ hasCFReturnsAttr(D.getDeclSpec().getAttributes().getList())) {
+ inferNullability = NullabilityKind::Nullable;
+ inferNullabilityInnerOnly = true;
+ }
+ }
+ }
+ break;
+ }
+ break;
+
+ case Declarator::ConversionIdContext:
+ complainAboutMissingNullability = CAMN_Yes;
+ break;
+
+ case Declarator::AliasDeclContext:
+ case Declarator::AliasTemplateContext:
+ case Declarator::BlockContext:
+ case Declarator::BlockLiteralContext:
+ case Declarator::ConditionContext:
+ case Declarator::CXXCatchContext:
+ case Declarator::CXXNewContext:
+ case Declarator::ForContext:
+ case Declarator::LambdaExprContext:
+ case Declarator::LambdaExprParameterContext:
+ case Declarator::ObjCCatchContext:
+ case Declarator::TemplateParamContext:
+ case Declarator::TemplateTypeArgContext:
+ case Declarator::TypeNameContext:
+ // Don't infer in these contexts.
+ break;
+ }
+ }
+
+ // Local function that checks the nullability for a given pointer declarator.
+ // Returns true if _Nonnull was inferred.
+ auto inferPointerNullability = [&](SimplePointerKind pointerKind,
+ SourceLocation pointerLoc,
+ AttributeList *&attrs) -> AttributeList * {
+ // We've seen a pointer.
+ if (NumPointersRemaining > 0)
+ --NumPointersRemaining;
+
+ // If a nullability attribute is present, there's nothing to do.
+ if (hasNullabilityAttr(attrs))
+ return nullptr;
+
+ // If we're supposed to infer nullability, do so now.
+ if (inferNullability && !inferNullabilityInnerOnlyComplete) {
+ AttributeList::Syntax syntax
+ = inferNullabilityCS ? AttributeList::AS_ContextSensitiveKeyword
+ : AttributeList::AS_Keyword;
+ AttributeList *nullabilityAttr = state.getDeclarator().getAttributePool()
+ .create(
+ S.getNullabilityKeyword(
+ *inferNullability),
+ SourceRange(pointerLoc),
+ nullptr, SourceLocation(),
+ nullptr, 0, syntax);
+
+ spliceAttrIntoList(*nullabilityAttr, attrs);
+
+ if (inferNullabilityCS) {
+ state.getDeclarator().getMutableDeclSpec().getObjCQualifiers()
+ ->setObjCDeclQualifier(ObjCDeclSpec::DQ_CSNullability);
+ }
+
+ if (inferNullabilityInnerOnly)
+ inferNullabilityInnerOnlyComplete = true;
+ return nullabilityAttr;
+ }
+
+ // If we're supposed to complain about missing nullability, do so
+ // now if it's truly missing.
+ switch (complainAboutMissingNullability) {
+ case CAMN_No:
+ break;
+
+ case CAMN_InnerPointers:
+ if (NumPointersRemaining == 0)
+ break;
+ // Fallthrough.
+
+ case CAMN_Yes:
+ checkNullabilityConsistency(state, pointerKind, pointerLoc);
+ }
+ return nullptr;
+ };
+
+ // If the type itself could have nullability but does not, infer pointer
+ // nullability and perform consistency checking.
+ if (T->canHaveNullability() && S.ActiveTemplateInstantiations.empty() &&
+ !T->getNullability(S.Context)) {
+ SimplePointerKind pointerKind = SimplePointerKind::Pointer;
+ if (T->isBlockPointerType())
+ pointerKind = SimplePointerKind::BlockPointer;
+ else if (T->isMemberPointerType())
+ pointerKind = SimplePointerKind::MemberPointer;
+
+ if (auto *attr = inferPointerNullability(
+ pointerKind, D.getDeclSpec().getTypeSpecTypeLoc(),
+ D.getMutableDeclSpec().getAttributes().getListRef())) {
+ T = Context.getAttributedType(
+ AttributedType::getNullabilityAttrKind(*inferNullability), T, T);
+ attr->setUsedAsTypeAttr();
+ }
+ }
+
+ // Walk the DeclTypeInfo, building the recursive type as we go.
+ // DeclTypeInfos are ordered from the identifier out, which is
+ // opposite of what we want :).
+ for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
+ unsigned chunkIndex = e - i - 1;
+ state.setCurrentChunkIndex(chunkIndex);
+ DeclaratorChunk &DeclType = D.getTypeObject(chunkIndex);
+ IsQualifiedFunction &= DeclType.Kind == DeclaratorChunk::Paren;
+ switch (DeclType.Kind) {
+ case DeclaratorChunk::Paren:
+ T = S.BuildParenType(T);
+ break;
+ case DeclaratorChunk::BlockPointer:
+ // If blocks are disabled, emit an error.
+ if (!LangOpts.Blocks)
+ S.Diag(DeclType.Loc, diag::err_blocks_disable);
+
+ // Handle pointer nullability.
+ inferPointerNullability(SimplePointerKind::BlockPointer,
+ DeclType.Loc, DeclType.getAttrListRef());
+
+ T = S.BuildBlockPointerType(T, D.getIdentifierLoc(), Name);
+ if (DeclType.Cls.TypeQuals)
+ T = S.BuildQualifiedType(T, DeclType.Loc, DeclType.Cls.TypeQuals);
+ break;
+ case DeclaratorChunk::Pointer:
+ // Verify that we're not building a pointer to pointer to function with
+ // exception specification.
+ if (LangOpts.CPlusPlus && S.CheckDistantExceptionSpec(T)) {
+ S.Diag(D.getIdentifierLoc(), diag::err_distant_exception_spec);
+ D.setInvalidType(true);
+ // Build the type anyway.
+ }
+
+ // Handle pointer nullability
+ inferPointerNullability(SimplePointerKind::Pointer, DeclType.Loc,
+ DeclType.getAttrListRef());
+
+ if (LangOpts.ObjC1 && T->getAs<ObjCObjectType>()) {
+ T = Context.getObjCObjectPointerType(T);
+ if (DeclType.Ptr.TypeQuals)
+ T = S.BuildQualifiedType(T, DeclType.Loc, DeclType.Ptr.TypeQuals);
+ break;
+ }
+ T = S.BuildPointerType(T, DeclType.Loc, Name);
+ if (DeclType.Ptr.TypeQuals)
+ T = S.BuildQualifiedType(T, DeclType.Loc, DeclType.Ptr.TypeQuals);
+
+ break;
+ case DeclaratorChunk::Reference: {
+ // Verify that we're not building a reference to pointer to function with
+ // exception specification.
+ if (LangOpts.CPlusPlus && S.CheckDistantExceptionSpec(T)) {
+ S.Diag(D.getIdentifierLoc(), diag::err_distant_exception_spec);
+ D.setInvalidType(true);
+ // Build the type anyway.
+ }
+ T = S.BuildReferenceType(T, DeclType.Ref.LValueRef, DeclType.Loc, Name);
+
+ if (DeclType.Ref.HasRestrict)
+ T = S.BuildQualifiedType(T, DeclType.Loc, Qualifiers::Restrict);
+ break;
+ }
+ case DeclaratorChunk::Array: {
+ // Verify that we're not building an array of pointers to function with
+ // exception specification.
+ if (LangOpts.CPlusPlus && S.CheckDistantExceptionSpec(T)) {
+ S.Diag(D.getIdentifierLoc(), diag::err_distant_exception_spec);
+ D.setInvalidType(true);
+ // Build the type anyway.
+ }
+ DeclaratorChunk::ArrayTypeInfo &ATI = DeclType.Arr;
+ Expr *ArraySize = static_cast<Expr*>(ATI.NumElts);
+ ArrayType::ArraySizeModifier ASM;
+ if (ATI.isStar)
+ ASM = ArrayType::Star;
+ else if (ATI.hasStatic)
+ ASM = ArrayType::Static;
+ else
+ ASM = ArrayType::Normal;
+ if (ASM == ArrayType::Star && !D.isPrototypeContext()) {
+ // FIXME: This check isn't quite right: it allows star in prototypes
+ // for function definitions, and disallows some edge cases detailed
+ // in http://gcc.gnu.org/ml/gcc-patches/2009-02/msg00133.html
+ S.Diag(DeclType.Loc, diag::err_array_star_outside_prototype);
+ ASM = ArrayType::Normal;
+ D.setInvalidType(true);
+ }
+
+ // C99 6.7.5.2p1: The optional type qualifiers and the keyword static
+ // shall appear only in a declaration of a function parameter with an
+ // array type, ...
+ if (ASM == ArrayType::Static || ATI.TypeQuals) {
+ if (!(D.isPrototypeContext() ||
+ D.getContext() == Declarator::KNRTypeListContext)) {
+ S.Diag(DeclType.Loc, diag::err_array_static_outside_prototype) <<
+ (ASM == ArrayType::Static ? "'static'" : "type qualifier");
+ // Remove the 'static' and the type qualifiers.
+ if (ASM == ArrayType::Static)
+ ASM = ArrayType::Normal;
+ ATI.TypeQuals = 0;
+ D.setInvalidType(true);
+ }
+
+ // C99 6.7.5.2p1: ... and then only in the outermost array type
+ // derivation.
+ unsigned x = chunkIndex;
+ while (x != 0) {
+ // Walk outwards along the declarator chunks.
+ x--;
+ const DeclaratorChunk &DC = D.getTypeObject(x);
+ switch (DC.Kind) {
+ case DeclaratorChunk::Paren:
+ continue;
+ case DeclaratorChunk::Array:
+ case DeclaratorChunk::Pointer:
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::MemberPointer:
+ S.Diag(DeclType.Loc, diag::err_array_static_not_outermost) <<
+ (ASM == ArrayType::Static ? "'static'" : "type qualifier");
+ if (ASM == ArrayType::Static)
+ ASM = ArrayType::Normal;
+ ATI.TypeQuals = 0;
+ D.setInvalidType(true);
+ break;
+ case DeclaratorChunk::Function:
+ case DeclaratorChunk::BlockPointer:
+ // These are invalid anyway, so just ignore.
+ break;
+ }
+ }
+ }
+ const AutoType *AT = T->getContainedAutoType();
+ // Allow arrays of auto if we are a generic lambda parameter.
+ // i.e. [](auto (&array)[5]) { return array[0]; }; OK
+ if (AT && D.getContext() != Declarator::LambdaExprParameterContext) {
+ // We've already diagnosed this for decltype(auto).
+ if (!AT->isDecltypeAuto())
+ S.Diag(DeclType.Loc, diag::err_illegal_decl_array_of_auto)
+ << getPrintableNameForEntity(Name) << T;
+ T = QualType();
+ break;
+ }
+
+ T = S.BuildArrayType(T, ASM, ArraySize, ATI.TypeQuals,
+ SourceRange(DeclType.Loc, DeclType.EndLoc), Name);
+ break;
+ }
+ case DeclaratorChunk::Function: {
+ // If the function declarator has a prototype (i.e. it is not () and
+ // does not have a K&R-style identifier list), then the arguments are part
+ // of the type, otherwise the argument list is ().
+ const DeclaratorChunk::FunctionTypeInfo &FTI = DeclType.Fun;
+ IsQualifiedFunction = FTI.TypeQuals || FTI.hasRefQualifier();
+
+ // Check for auto functions and trailing return type and adjust the
+ // return type accordingly.
+ if (!D.isInvalidType()) {
+ // trailing-return-type is only required if we're declaring a function,
+ // and not, for instance, a pointer to a function.
+ if (D.getDeclSpec().containsPlaceholderType() &&
+ !FTI.hasTrailingReturnType() && chunkIndex == 0 &&
+ !S.getLangOpts().CPlusPlus14) {
+ S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
+ D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto
+ ? diag::err_auto_missing_trailing_return
+ : diag::err_deduced_return_type);
+ T = Context.IntTy;
+ D.setInvalidType(true);
+ } else if (FTI.hasTrailingReturnType()) {
+ // T must be exactly 'auto' at this point. See CWG issue 681.
+ if (isa<ParenType>(T)) {
+ S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
+ diag::err_trailing_return_in_parens)
+ << T << D.getDeclSpec().getSourceRange();
+ D.setInvalidType(true);
+ } else if (D.getContext() != Declarator::LambdaExprContext &&
+ (T.hasQualifiers() || !isa<AutoType>(T) ||
+ cast<AutoType>(T)->getKeyword() != AutoTypeKeyword::Auto)) {
+ S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
+ diag::err_trailing_return_without_auto)
+ << T << D.getDeclSpec().getSourceRange();
+ D.setInvalidType(true);
+ }
+ T = S.GetTypeFromParser(FTI.getTrailingReturnType(), &TInfo);
+ if (T.isNull()) {
+ // An error occurred parsing the trailing return type.
+ T = Context.IntTy;
+ D.setInvalidType(true);
+ }
+ }
+ }
+
+ // C99 6.7.5.3p1: The return type may not be a function or array type.
+ // For conversion functions, we'll diagnose this particular error later.
+ if ((T->isArrayType() || T->isFunctionType()) &&
+ (D.getName().getKind() != UnqualifiedId::IK_ConversionFunctionId)) {
+ unsigned diagID = diag::err_func_returning_array_function;
+ // Last processing chunk in block context means this function chunk
+ // represents the block.
+ if (chunkIndex == 0 &&
+ D.getContext() == Declarator::BlockLiteralContext)
+ diagID = diag::err_block_returning_array_function;
+ S.Diag(DeclType.Loc, diagID) << T->isFunctionType() << T;
+ T = Context.IntTy;
+ D.setInvalidType(true);
+ }
+
+ // Do not allow returning half FP value.
+ // FIXME: This really should be in BuildFunctionType.
+ if (T->isHalfType()) {
+ if (S.getLangOpts().OpenCL) {
+ if (!S.getOpenCLOptions().cl_khr_fp16) {
+ S.Diag(D.getIdentifierLoc(), diag::err_opencl_half_return) << T;
+ D.setInvalidType(true);
+ }
+ } else if (!S.getLangOpts().HalfArgsAndReturns) {
+ S.Diag(D.getIdentifierLoc(),
+ diag::err_parameters_retval_cannot_have_fp16_type) << 1;
+ D.setInvalidType(true);
+ }
+ }
+
+ // Methods cannot return interface types. All ObjC objects are
+ // passed by reference.
+ if (T->isObjCObjectType()) {
+ SourceLocation DiagLoc, FixitLoc;
+ if (TInfo) {
+ DiagLoc = TInfo->getTypeLoc().getLocStart();
+ FixitLoc = S.getLocForEndOfToken(TInfo->getTypeLoc().getLocEnd());
+ } else {
+ DiagLoc = D.getDeclSpec().getTypeSpecTypeLoc();
+ FixitLoc = S.getLocForEndOfToken(D.getDeclSpec().getLocEnd());
+ }
+ S.Diag(DiagLoc, diag::err_object_cannot_be_passed_returned_by_value)
+ << 0 << T
+ << FixItHint::CreateInsertion(FixitLoc, "*");
+
+ T = Context.getObjCObjectPointerType(T);
+ if (TInfo) {
+ TypeLocBuilder TLB;
+ TLB.pushFullCopy(TInfo->getTypeLoc());
+ ObjCObjectPointerTypeLoc TLoc = TLB.push<ObjCObjectPointerTypeLoc>(T);
+ TLoc.setStarLoc(FixitLoc);
+ TInfo = TLB.getTypeSourceInfo(Context, T);
+ }
+
+ D.setInvalidType(true);
+ }
+
+ // cv-qualifiers on return types are pointless except when the type is a
+ // class type in C++.
+ if ((T.getCVRQualifiers() || T->isAtomicType()) &&
+ !(S.getLangOpts().CPlusPlus &&
+ (T->isDependentType() || T->isRecordType()))) {
+ if (T->isVoidType() && !S.getLangOpts().CPlusPlus &&
+ D.getFunctionDefinitionKind() == FDK_Definition) {
+ // [6.9.1/3] qualified void return is invalid on a C
+ // function definition. Apparently ok on declarations and
+ // in C++ though (!)
+ S.Diag(DeclType.Loc, diag::err_func_returning_qualified_void) << T;
+ } else
+ diagnoseRedundantReturnTypeQualifiers(S, T, D, chunkIndex);
+ }
+
+ // Objective-C ARC ownership qualifiers are ignored on the function
+ // return type (by type canonicalization). Complain if this attribute
+ // was written here.
+ if (T.getQualifiers().hasObjCLifetime()) {
+ SourceLocation AttrLoc;
+ if (chunkIndex + 1 < D.getNumTypeObjects()) {
+ DeclaratorChunk ReturnTypeChunk = D.getTypeObject(chunkIndex + 1);
+ for (const AttributeList *Attr = ReturnTypeChunk.getAttrs();
+ Attr; Attr = Attr->getNext()) {
+ if (Attr->getKind() == AttributeList::AT_ObjCOwnership) {
+ AttrLoc = Attr->getLoc();
+ break;
+ }
+ }
+ }
+ if (AttrLoc.isInvalid()) {
+ for (const AttributeList *Attr
+ = D.getDeclSpec().getAttributes().getList();
+ Attr; Attr = Attr->getNext()) {
+ if (Attr->getKind() == AttributeList::AT_ObjCOwnership) {
+ AttrLoc = Attr->getLoc();
+ break;
+ }
+ }
+ }
+
+ if (AttrLoc.isValid()) {
+ // The ownership attributes are almost always written via
+ // the predefined
+ // __strong/__weak/__autoreleasing/__unsafe_unretained.
+ if (AttrLoc.isMacroID())
+ AttrLoc = S.SourceMgr.getImmediateExpansionRange(AttrLoc).first;
+
+ S.Diag(AttrLoc, diag::warn_arc_lifetime_result_type)
+ << T.getQualifiers().getObjCLifetime();
+ }
+ }
+
+ if (LangOpts.CPlusPlus && D.getDeclSpec().hasTagDefinition()) {
+ // C++ [dcl.fct]p6:
+ // Types shall not be defined in return or parameter types.
+ TagDecl *Tag = cast<TagDecl>(D.getDeclSpec().getRepAsDecl());
+ S.Diag(Tag->getLocation(), diag::err_type_defined_in_result_type)
+ << Context.getTypeDeclType(Tag);
+ }
+
+ // Exception specs are not allowed in typedefs. Complain, but add it
+ // anyway.
+ if (IsTypedefName && FTI.getExceptionSpecType())
+ S.Diag(FTI.getExceptionSpecLocBeg(),
+ diag::err_exception_spec_in_typedef)
+ << (D.getContext() == Declarator::AliasDeclContext ||
+ D.getContext() == Declarator::AliasTemplateContext);
+
+ // If we see "T var();" or "T var(T());" at block scope, it is probably
+ // an attempt to initialize a variable, not a function declaration.
+ if (FTI.isAmbiguous)
+ warnAboutAmbiguousFunction(S, D, DeclType, T);
+
+ FunctionType::ExtInfo EI(getCCForDeclaratorChunk(S, D, FTI, chunkIndex));
+
+ if (!FTI.NumParams && !FTI.isVariadic && !LangOpts.CPlusPlus) {
+ // Simple void foo(), where the incoming T is the result type.
+ T = Context.getFunctionNoProtoType(T, EI);
+ } else {
+ // We allow a zero-parameter variadic function in C if the
+ // function is marked with the "overloadable" attribute. Scan
+ // for this attribute now.
+ if (!FTI.NumParams && FTI.isVariadic && !LangOpts.CPlusPlus) {
+ bool Overloadable = false;
+ for (const AttributeList *Attrs = D.getAttributes();
+ Attrs; Attrs = Attrs->getNext()) {
+ if (Attrs->getKind() == AttributeList::AT_Overloadable) {
+ Overloadable = true;
+ break;
+ }
+ }
+
+ if (!Overloadable)
+ S.Diag(FTI.getEllipsisLoc(), diag::err_ellipsis_first_param);
+ }
+
+ if (FTI.NumParams && FTI.Params[0].Param == nullptr) {
+ // C99 6.7.5.3p3: Reject int(x,y,z) when it's not a function
+ // definition.
+ S.Diag(FTI.Params[0].IdentLoc,
+ diag::err_ident_list_in_fn_declaration);
+ D.setInvalidType(true);
+ // Recover by creating a K&R-style function type.
+ T = Context.getFunctionNoProtoType(T, EI);
+ break;
+ }
+
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.ExtInfo = EI;
+ EPI.Variadic = FTI.isVariadic;
+ EPI.HasTrailingReturn = FTI.hasTrailingReturnType();
+ EPI.TypeQuals = FTI.TypeQuals;
+ EPI.RefQualifier = !FTI.hasRefQualifier()? RQ_None
+ : FTI.RefQualifierIsLValueRef? RQ_LValue
+ : RQ_RValue;
+
+ // Otherwise, we have a function with a parameter list that is
+ // potentially variadic.
+ SmallVector<QualType, 16> ParamTys;
+ ParamTys.reserve(FTI.NumParams);
+
+ SmallVector<bool, 16> ConsumedParameters;
+ ConsumedParameters.reserve(FTI.NumParams);
+ bool HasAnyConsumedParameters = false;
+
+ for (unsigned i = 0, e = FTI.NumParams; i != e; ++i) {
+ ParmVarDecl *Param = cast<ParmVarDecl>(FTI.Params[i].Param);
+ QualType ParamTy = Param->getType();
+ assert(!ParamTy.isNull() && "Couldn't parse type?");
+
+ // Look for 'void'. void is allowed only as a single parameter to a
+ // function with no other parameters (C99 6.7.5.3p10). We record
+ // int(void) as a FunctionProtoType with an empty parameter list.
+ if (ParamTy->isVoidType()) {
+ // If this is something like 'float(int, void)', reject it. 'void'
+ // is an incomplete type (C99 6.2.5p19) and function decls cannot
+ // have parameters of incomplete type.
+ if (FTI.NumParams != 1 || FTI.isVariadic) {
+ S.Diag(DeclType.Loc, diag::err_void_only_param);
+ ParamTy = Context.IntTy;
+ Param->setType(ParamTy);
+ } else if (FTI.Params[i].Ident) {
+ // Reject, but continue to parse 'int(void abc)'.
+ S.Diag(FTI.Params[i].IdentLoc, diag::err_param_with_void_type);
+ ParamTy = Context.IntTy;
+ Param->setType(ParamTy);
+ } else {
+ // Reject, but continue to parse 'float(const void)'.
+ if (ParamTy.hasQualifiers())
+ S.Diag(DeclType.Loc, diag::err_void_param_qualified);
+
+ // Do not add 'void' to the list.
+ break;
+ }
+ } else if (ParamTy->isHalfType()) {
+ // Disallow half FP parameters.
+ // FIXME: This really should be in BuildFunctionType.
+ if (S.getLangOpts().OpenCL) {
+ if (!S.getOpenCLOptions().cl_khr_fp16) {
+ S.Diag(Param->getLocation(),
+ diag::err_opencl_half_param) << ParamTy;
+ D.setInvalidType();
+ Param->setInvalidDecl();
+ }
+ } else if (!S.getLangOpts().HalfArgsAndReturns) {
+ S.Diag(Param->getLocation(),
+ diag::err_parameters_retval_cannot_have_fp16_type) << 0;
+ D.setInvalidType();
+ }
+ } else if (!FTI.hasPrototype) {
+ if (ParamTy->isPromotableIntegerType()) {
+ ParamTy = Context.getPromotedIntegerType(ParamTy);
+ Param->setKNRPromoted(true);
+ } else if (const BuiltinType* BTy = ParamTy->getAs<BuiltinType>()) {
+ if (BTy->getKind() == BuiltinType::Float) {
+ ParamTy = Context.DoubleTy;
+ Param->setKNRPromoted(true);
+ }
+ }
+ }
+
+ if (LangOpts.ObjCAutoRefCount) {
+ bool Consumed = Param->hasAttr<NSConsumedAttr>();
+ ConsumedParameters.push_back(Consumed);
+ HasAnyConsumedParameters |= Consumed;
+ }
+
+ ParamTys.push_back(ParamTy);
+ }
+
+ if (HasAnyConsumedParameters)
+ EPI.ConsumedParameters = ConsumedParameters.data();
+
+ SmallVector<QualType, 4> Exceptions;
+ SmallVector<ParsedType, 2> DynamicExceptions;
+ SmallVector<SourceRange, 2> DynamicExceptionRanges;
+ Expr *NoexceptExpr = nullptr;
+
+ if (FTI.getExceptionSpecType() == EST_Dynamic) {
+ // FIXME: It's rather inefficient to have to split into two vectors
+ // here.
+ unsigned N = FTI.NumExceptions;
+ DynamicExceptions.reserve(N);
+ DynamicExceptionRanges.reserve(N);
+ for (unsigned I = 0; I != N; ++I) {
+ DynamicExceptions.push_back(FTI.Exceptions[I].Ty);
+ DynamicExceptionRanges.push_back(FTI.Exceptions[I].Range);
+ }
+ } else if (FTI.getExceptionSpecType() == EST_ComputedNoexcept) {
+ NoexceptExpr = FTI.NoexceptExpr;
+ }
+
+ S.checkExceptionSpecification(D.isFunctionDeclarationContext(),
+ FTI.getExceptionSpecType(),
+ DynamicExceptions,
+ DynamicExceptionRanges,
+ NoexceptExpr,
+ Exceptions,
+ EPI.ExceptionSpec);
+
+ T = Context.getFunctionType(T, ParamTys, EPI);
+ }
+
+ break;
+ }
+ case DeclaratorChunk::MemberPointer:
+ // The scope spec must refer to a class, or be dependent.
+ CXXScopeSpec &SS = DeclType.Mem.Scope();
+ QualType ClsType;
+
+ // Handle pointer nullability.
+ inferPointerNullability(SimplePointerKind::MemberPointer,
+ DeclType.Loc, DeclType.getAttrListRef());
+
+ if (SS.isInvalid()) {
+ // Avoid emitting extra errors if we already errored on the scope.
+ D.setInvalidType(true);
+ } else if (S.isDependentScopeSpecifier(SS) ||
+ dyn_cast_or_null<CXXRecordDecl>(S.computeDeclContext(SS))) {
+ NestedNameSpecifier *NNS = SS.getScopeRep();
+ NestedNameSpecifier *NNSPrefix = NNS->getPrefix();
+ switch (NNS->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ ClsType = Context.getDependentNameType(ETK_None, NNSPrefix,
+ NNS->getAsIdentifier());
+ break;
+
+ case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::NamespaceAlias:
+ case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Super:
+ llvm_unreachable("Nested-name-specifier must name a type");
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ ClsType = QualType(NNS->getAsType(), 0);
+ // Note: if the NNS has a prefix and ClsType is a nondependent
+ // TemplateSpecializationType, then the NNS prefix is NOT included
+ // in ClsType; hence we wrap ClsType into an ElaboratedType.
+ // NOTE: in particular, no wrap occurs if ClsType already is an
+ // Elaborated, DependentName, or DependentTemplateSpecialization.
+ if (NNSPrefix && isa<TemplateSpecializationType>(NNS->getAsType()))
+ ClsType = Context.getElaboratedType(ETK_None, NNSPrefix, ClsType);
+ break;
+ }
+ } else {
+ S.Diag(DeclType.Mem.Scope().getBeginLoc(),
+ diag::err_illegal_decl_mempointer_in_nonclass)
+ << (D.getIdentifier() ? D.getIdentifier()->getName() : "type name")
+ << DeclType.Mem.Scope().getRange();
+ D.setInvalidType(true);
+ }
+
+ if (!ClsType.isNull())
+ T = S.BuildMemberPointerType(T, ClsType, DeclType.Loc,
+ D.getIdentifier());
+ if (T.isNull()) {
+ T = Context.IntTy;
+ D.setInvalidType(true);
+ } else if (DeclType.Mem.TypeQuals) {
+ T = S.BuildQualifiedType(T, DeclType.Loc, DeclType.Mem.TypeQuals);
+ }
+ break;
+ }
+
+ if (T.isNull()) {
+ D.setInvalidType(true);
+ T = Context.IntTy;
+ }
+
+ // See if there are any attributes on this declarator chunk.
+ processTypeAttrs(state, T, TAL_DeclChunk,
+ const_cast<AttributeList *>(DeclType.getAttrs()));
+ }
+
+ assert(!T.isNull() && "T must not be null after this point");
+
+ if (LangOpts.CPlusPlus && T->isFunctionType()) {
+ const FunctionProtoType *FnTy = T->getAs<FunctionProtoType>();
+ assert(FnTy && "Why oh why is there not a FunctionProtoType here?");
+
+ // C++ 8.3.5p4:
+ // A cv-qualifier-seq shall only be part of the function type
+ // for a nonstatic member function, the function type to which a pointer
+ // to member refers, or the top-level function type of a function typedef
+ // declaration.
+ //
+ // Core issue 547 also allows cv-qualifiers on function types that are
+ // top-level template type arguments.
+ bool FreeFunction;
+ if (!D.getCXXScopeSpec().isSet()) {
+ FreeFunction = ((D.getContext() != Declarator::MemberContext &&
+ D.getContext() != Declarator::LambdaExprContext) ||
+ D.getDeclSpec().isFriendSpecified());
+ } else {
+ DeclContext *DC = S.computeDeclContext(D.getCXXScopeSpec());
+ FreeFunction = (DC && !DC->isRecord());
+ }
+
+ // C++11 [dcl.fct]p6 (w/DR1417):
+ // An attempt to specify a function type with a cv-qualifier-seq or a
+ // ref-qualifier (including by typedef-name) is ill-formed unless it is:
+ // - the function type for a non-static member function,
+ // - the function type to which a pointer to member refers,
+ // - the top-level function type of a function typedef declaration or
+ // alias-declaration,
+ // - the type-id in the default argument of a type-parameter, or
+ // - the type-id of a template-argument for a type-parameter
+ //
+ // FIXME: Checking this here is insufficient. We accept-invalid on:
+ //
+ // template<typename T> struct S { void f(T); };
+ // S<int() const> s;
+ //
+ // ... for instance.
+ if (IsQualifiedFunction &&
+ !(!FreeFunction &&
+ D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_static) &&
+ !IsTypedefName &&
+ D.getContext() != Declarator::TemplateTypeArgContext) {
+ SourceLocation Loc = D.getLocStart();
+ SourceRange RemovalRange;
+ unsigned I;
+ if (D.isFunctionDeclarator(I)) {
+ SmallVector<SourceLocation, 4> RemovalLocs;
+ const DeclaratorChunk &Chunk = D.getTypeObject(I);
+ assert(Chunk.Kind == DeclaratorChunk::Function);
+ if (Chunk.Fun.hasRefQualifier())
+ RemovalLocs.push_back(Chunk.Fun.getRefQualifierLoc());
+ if (Chunk.Fun.TypeQuals & Qualifiers::Const)
+ RemovalLocs.push_back(Chunk.Fun.getConstQualifierLoc());
+ if (Chunk.Fun.TypeQuals & Qualifiers::Volatile)
+ RemovalLocs.push_back(Chunk.Fun.getVolatileQualifierLoc());
+ if (Chunk.Fun.TypeQuals & Qualifiers::Restrict)
+ RemovalLocs.push_back(Chunk.Fun.getRestrictQualifierLoc());
+ if (!RemovalLocs.empty()) {
+ std::sort(RemovalLocs.begin(), RemovalLocs.end(),
+ BeforeThanCompare<SourceLocation>(S.getSourceManager()));
+ RemovalRange = SourceRange(RemovalLocs.front(), RemovalLocs.back());
+ Loc = RemovalLocs.front();
+ }
+ }
+
+ S.Diag(Loc, diag::err_invalid_qualified_function_type)
+ << FreeFunction << D.isFunctionDeclarator() << T
+ << getFunctionQualifiersAsString(FnTy)
+ << FixItHint::CreateRemoval(RemovalRange);
+
+ // Strip the cv-qualifiers and ref-qualifiers from the type.
+ FunctionProtoType::ExtProtoInfo EPI = FnTy->getExtProtoInfo();
+ EPI.TypeQuals = 0;
+ EPI.RefQualifier = RQ_None;
+
+ T = Context.getFunctionType(FnTy->getReturnType(), FnTy->getParamTypes(),
+ EPI);
+ // Rebuild any parens around the identifier in the function type.
+ for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
+ if (D.getTypeObject(i).Kind != DeclaratorChunk::Paren)
+ break;
+ T = S.BuildParenType(T);
+ }
+ }
+ }
+
+ // Apply any undistributed attributes from the declarator.
+ processTypeAttrs(state, T, TAL_DeclName, D.getAttributes());
+
+ // Diagnose any ignored type attributes.
+ state.diagnoseIgnoredTypeAttrs(T);
+
+ // C++0x [dcl.constexpr]p9:
+ // A constexpr specifier used in an object declaration declares the object
+ // as const.
+ if (D.getDeclSpec().isConstexprSpecified() && T->isObjectType()) {
+ T.addConst();
+ }
+
+ // If there was an ellipsis in the declarator, the declaration declares a
+ // parameter pack whose type may be a pack expansion type.
+ if (D.hasEllipsis()) {
+ // C++0x [dcl.fct]p13:
+ // A declarator-id or abstract-declarator containing an ellipsis shall
+ // only be used in a parameter-declaration. Such a parameter-declaration
+ // is a parameter pack (14.5.3). [...]
+ switch (D.getContext()) {
+ case Declarator::PrototypeContext:
+ case Declarator::LambdaExprParameterContext:
+ // C++0x [dcl.fct]p13:
+ // [...] When it is part of a parameter-declaration-clause, the
+ // parameter pack is a function parameter pack (14.5.3). The type T
+ // of the declarator-id of the function parameter pack shall contain
+ // a template parameter pack; each template parameter pack in T is
+ // expanded by the function parameter pack.
+ //
+ // We represent function parameter packs as function parameters whose
+ // type is a pack expansion.
+ if (!T->containsUnexpandedParameterPack()) {
+ S.Diag(D.getEllipsisLoc(),
+ diag::err_function_parameter_pack_without_parameter_packs)
+ << T << D.getSourceRange();
+ D.setEllipsisLoc(SourceLocation());
+ } else {
+ T = Context.getPackExpansionType(T, None);
+ }
+ break;
+ case Declarator::TemplateParamContext:
+ // C++0x [temp.param]p15:
+ // If a template-parameter is a [...] is a parameter-declaration that
+ // declares a parameter pack (8.3.5), then the template-parameter is a
+ // template parameter pack (14.5.3).
+ //
+ // Note: core issue 778 clarifies that, if there are any unexpanded
+ // parameter packs in the type of the non-type template parameter, then
+ // it expands those parameter packs.
+ if (T->containsUnexpandedParameterPack())
+ T = Context.getPackExpansionType(T, None);
+ else
+ S.Diag(D.getEllipsisLoc(),
+ LangOpts.CPlusPlus11
+ ? diag::warn_cxx98_compat_variadic_templates
+ : diag::ext_variadic_templates);
+ break;
+
+ case Declarator::FileContext:
+ case Declarator::KNRTypeListContext:
+ case Declarator::ObjCParameterContext: // FIXME: special diagnostic here?
+ case Declarator::ObjCResultContext: // FIXME: special diagnostic here?
+ case Declarator::TypeNameContext:
+ case Declarator::CXXNewContext:
+ case Declarator::AliasDeclContext:
+ case Declarator::AliasTemplateContext:
+ case Declarator::MemberContext:
+ case Declarator::BlockContext:
+ case Declarator::ForContext:
+ case Declarator::ConditionContext:
+ case Declarator::CXXCatchContext:
+ case Declarator::ObjCCatchContext:
+ case Declarator::BlockLiteralContext:
+ case Declarator::LambdaExprContext:
+ case Declarator::ConversionIdContext:
+ case Declarator::TrailingReturnContext:
+ case Declarator::TemplateTypeArgContext:
+ // FIXME: We may want to allow parameter packs in block-literal contexts
+ // in the future.
+ S.Diag(D.getEllipsisLoc(),
+ diag::err_ellipsis_in_declarator_not_parameter);
+ D.setEllipsisLoc(SourceLocation());
+ break;
+ }
+ }
+
+ assert(!T.isNull() && "T must not be null at the end of this function");
+ if (D.isInvalidType())
+ return Context.getTrivialTypeSourceInfo(T);
+
+ return S.GetTypeSourceInfoForDeclarator(D, T, TInfo);
+}
+
+/// GetTypeForDeclarator - Convert the type for the specified
+/// declarator to Type instances.
+///
+/// The result of this call will never be null, but the associated
+/// type may be a null type if there's an unrecoverable error.
+TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S) {
+ // Determine the type of the declarator. Not all forms of declarator
+ // have a type.
+
+ TypeProcessingState state(*this, D);
+
+ TypeSourceInfo *ReturnTypeInfo = nullptr;
+ QualType T = GetDeclSpecTypeForDeclarator(state, ReturnTypeInfo);
+
+ if (D.isPrototypeContext() && getLangOpts().ObjCAutoRefCount)
+ inferARCWriteback(state, T);
+
+ return GetFullTypeForDeclarator(state, T, ReturnTypeInfo);
+}
+
+static void transferARCOwnershipToDeclSpec(Sema &S,
+ QualType &declSpecTy,
+ Qualifiers::ObjCLifetime ownership) {
+ if (declSpecTy->isObjCRetainableType() &&
+ declSpecTy.getObjCLifetime() == Qualifiers::OCL_None) {
+ Qualifiers qs;
+ qs.addObjCLifetime(ownership);
+ declSpecTy = S.Context.getQualifiedType(declSpecTy, qs);
+ }
+}
+
+static void transferARCOwnershipToDeclaratorChunk(TypeProcessingState &state,
+ Qualifiers::ObjCLifetime ownership,
+ unsigned chunkIndex) {
+ Sema &S = state.getSema();
+ Declarator &D = state.getDeclarator();
+
+ // Look for an explicit lifetime attribute.
+ DeclaratorChunk &chunk = D.getTypeObject(chunkIndex);
+ for (const AttributeList *attr = chunk.getAttrs(); attr;
+ attr = attr->getNext())
+ if (attr->getKind() == AttributeList::AT_ObjCOwnership)
+ return;
+
+ const char *attrStr = nullptr;
+ switch (ownership) {
+ case Qualifiers::OCL_None: llvm_unreachable("no ownership!");
+ case Qualifiers::OCL_ExplicitNone: attrStr = "none"; break;
+ case Qualifiers::OCL_Strong: attrStr = "strong"; break;
+ case Qualifiers::OCL_Weak: attrStr = "weak"; break;
+ case Qualifiers::OCL_Autoreleasing: attrStr = "autoreleasing"; break;
+ }
+
+ IdentifierLoc *Arg = new (S.Context) IdentifierLoc;
+ Arg->Ident = &S.Context.Idents.get(attrStr);
+ Arg->Loc = SourceLocation();
+
+ ArgsUnion Args(Arg);
+
+ // If there wasn't one, add one (with an invalid source location
+ // so that we don't make an AttributedType for it).
+ AttributeList *attr = D.getAttributePool()
+ .create(&S.Context.Idents.get("objc_ownership"), SourceLocation(),
+ /*scope*/ nullptr, SourceLocation(),
+ /*args*/ &Args, 1, AttributeList::AS_GNU);
+ spliceAttrIntoList(*attr, chunk.getAttrListRef());
+
+ // TODO: mark whether we did this inference?
+}
+
+/// \brief Used for transferring ownership in casts resulting in l-values.
+static void transferARCOwnership(TypeProcessingState &state,
+ QualType &declSpecTy,
+ Qualifiers::ObjCLifetime ownership) {
+ Sema &S = state.getSema();
+ Declarator &D = state.getDeclarator();
+
+ int inner = -1;
+ bool hasIndirection = false;
+ for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
+ DeclaratorChunk &chunk = D.getTypeObject(i);
+ switch (chunk.Kind) {
+ case DeclaratorChunk::Paren:
+ // Ignore parens.
+ break;
+
+ case DeclaratorChunk::Array:
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::Pointer:
+ if (inner != -1)
+ hasIndirection = true;
+ inner = i;
+ break;
+
+ case DeclaratorChunk::BlockPointer:
+ if (inner != -1)
+ transferARCOwnershipToDeclaratorChunk(state, ownership, i);
+ return;
+
+ case DeclaratorChunk::Function:
+ case DeclaratorChunk::MemberPointer:
+ return;
+ }
+ }
+
+ if (inner == -1)
+ return;
+
+ DeclaratorChunk &chunk = D.getTypeObject(inner);
+ if (chunk.Kind == DeclaratorChunk::Pointer) {
+ if (declSpecTy->isObjCRetainableType())
+ return transferARCOwnershipToDeclSpec(S, declSpecTy, ownership);
+ if (declSpecTy->isObjCObjectType() && hasIndirection)
+ return transferARCOwnershipToDeclaratorChunk(state, ownership, inner);
+ } else {
+ assert(chunk.Kind == DeclaratorChunk::Array ||
+ chunk.Kind == DeclaratorChunk::Reference);
+ return transferARCOwnershipToDeclSpec(S, declSpecTy, ownership);
+ }
+}
+
+TypeSourceInfo *Sema::GetTypeForDeclaratorCast(Declarator &D, QualType FromTy) {
+ TypeProcessingState state(*this, D);
+
+ TypeSourceInfo *ReturnTypeInfo = nullptr;
+ QualType declSpecTy = GetDeclSpecTypeForDeclarator(state, ReturnTypeInfo);
+
+ if (getLangOpts().ObjC1) {
+ Qualifiers::ObjCLifetime ownership = Context.getInnerObjCOwnership(FromTy);
+ if (ownership != Qualifiers::OCL_None)
+ transferARCOwnership(state, declSpecTy, ownership);
+ }
+
+ return GetFullTypeForDeclarator(state, declSpecTy, ReturnTypeInfo);
+}
+
+/// Map an AttributedType::Kind to an AttributeList::Kind.
+static AttributeList::Kind getAttrListKind(AttributedType::Kind kind) {
+ switch (kind) {
+ case AttributedType::attr_address_space:
+ return AttributeList::AT_AddressSpace;
+ case AttributedType::attr_regparm:
+ return AttributeList::AT_Regparm;
+ case AttributedType::attr_vector_size:
+ return AttributeList::AT_VectorSize;
+ case AttributedType::attr_neon_vector_type:
+ return AttributeList::AT_NeonVectorType;
+ case AttributedType::attr_neon_polyvector_type:
+ return AttributeList::AT_NeonPolyVectorType;
+ case AttributedType::attr_objc_gc:
+ return AttributeList::AT_ObjCGC;
+ case AttributedType::attr_objc_ownership:
+ case AttributedType::attr_objc_inert_unsafe_unretained:
+ return AttributeList::AT_ObjCOwnership;
+ case AttributedType::attr_noreturn:
+ return AttributeList::AT_NoReturn;
+ case AttributedType::attr_cdecl:
+ return AttributeList::AT_CDecl;
+ case AttributedType::attr_fastcall:
+ return AttributeList::AT_FastCall;
+ case AttributedType::attr_stdcall:
+ return AttributeList::AT_StdCall;
+ case AttributedType::attr_thiscall:
+ return AttributeList::AT_ThisCall;
+ case AttributedType::attr_pascal:
+ return AttributeList::AT_Pascal;
+ case AttributedType::attr_vectorcall:
+ return AttributeList::AT_VectorCall;
+ case AttributedType::attr_pcs:
+ case AttributedType::attr_pcs_vfp:
+ return AttributeList::AT_Pcs;
+ case AttributedType::attr_inteloclbicc:
+ return AttributeList::AT_IntelOclBicc;
+ case AttributedType::attr_ms_abi:
+ return AttributeList::AT_MSABI;
+ case AttributedType::attr_sysv_abi:
+ return AttributeList::AT_SysVABI;
+ case AttributedType::attr_ptr32:
+ return AttributeList::AT_Ptr32;
+ case AttributedType::attr_ptr64:
+ return AttributeList::AT_Ptr64;
+ case AttributedType::attr_sptr:
+ return AttributeList::AT_SPtr;
+ case AttributedType::attr_uptr:
+ return AttributeList::AT_UPtr;
+ case AttributedType::attr_nonnull:
+ return AttributeList::AT_TypeNonNull;
+ case AttributedType::attr_nullable:
+ return AttributeList::AT_TypeNullable;
+ case AttributedType::attr_null_unspecified:
+ return AttributeList::AT_TypeNullUnspecified;
+ case AttributedType::attr_objc_kindof:
+ return AttributeList::AT_ObjCKindOf;
+ }
+ llvm_unreachable("unexpected attribute kind!");
+}
+
+static void fillAttributedTypeLoc(AttributedTypeLoc TL,
+ const AttributeList *attrs,
+ const AttributeList *DeclAttrs = nullptr) {
+ // DeclAttrs and attrs cannot be both empty.
+ assert((attrs || DeclAttrs) &&
+ "no type attributes in the expected location!");
+
+ AttributeList::Kind parsedKind = getAttrListKind(TL.getAttrKind());
+ // Try to search for an attribute of matching kind in attrs list.
+ while (attrs && attrs->getKind() != parsedKind)
+ attrs = attrs->getNext();
+ if (!attrs) {
+ // No matching type attribute in attrs list found.
+ // Try searching through C++11 attributes in the declarator attribute list.
+ while (DeclAttrs && (!DeclAttrs->isCXX11Attribute() ||
+ DeclAttrs->getKind() != parsedKind))
+ DeclAttrs = DeclAttrs->getNext();
+ attrs = DeclAttrs;
+ }
+
+ assert(attrs && "no matching type attribute in expected location!");
+
+ TL.setAttrNameLoc(attrs->getLoc());
+ if (TL.hasAttrExprOperand()) {
+ assert(attrs->isArgExpr(0) && "mismatched attribute operand kind");
+ TL.setAttrExprOperand(attrs->getArgAsExpr(0));
+ } else if (TL.hasAttrEnumOperand()) {
+ assert((attrs->isArgIdent(0) || attrs->isArgExpr(0)) &&
+ "unexpected attribute operand kind");
+ if (attrs->isArgIdent(0))
+ TL.setAttrEnumOperandLoc(attrs->getArgAsIdent(0)->Loc);
+ else
+ TL.setAttrEnumOperandLoc(attrs->getArgAsExpr(0)->getExprLoc());
+ }
+
+ // FIXME: preserve this information to here.
+ if (TL.hasAttrOperand())
+ TL.setAttrOperandParensRange(SourceRange());
+}
+
+namespace {
+ class TypeSpecLocFiller : public TypeLocVisitor<TypeSpecLocFiller> {
+ ASTContext &Context;
+ const DeclSpec &DS;
+
+ public:
+ TypeSpecLocFiller(ASTContext &Context, const DeclSpec &DS)
+ : Context(Context), DS(DS) {}
+
+ void VisitAttributedTypeLoc(AttributedTypeLoc TL) {
+ fillAttributedTypeLoc(TL, DS.getAttributes().getList());
+ Visit(TL.getModifiedLoc());
+ }
+ void VisitQualifiedTypeLoc(QualifiedTypeLoc TL) {
+ Visit(TL.getUnqualifiedLoc());
+ }
+ void VisitTypedefTypeLoc(TypedefTypeLoc TL) {
+ TL.setNameLoc(DS.getTypeSpecTypeLoc());
+ }
+ void VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc TL) {
+ TL.setNameLoc(DS.getTypeSpecTypeLoc());
+ // FIXME. We should have DS.getTypeSpecTypeEndLoc(). But, it requires
+ // addition field. What we have is good enough for dispay of location
+ // of 'fixit' on interface name.
+ TL.setNameEndLoc(DS.getLocEnd());
+ }
+ void VisitObjCObjectTypeLoc(ObjCObjectTypeLoc TL) {
+ TypeSourceInfo *RepTInfo = nullptr;
+ Sema::GetTypeFromParser(DS.getRepAsType(), &RepTInfo);
+ TL.copy(RepTInfo->getTypeLoc());
+ }
+ void VisitObjCObjectPointerTypeLoc(ObjCObjectPointerTypeLoc TL) {
+ TypeSourceInfo *RepTInfo = nullptr;
+ Sema::GetTypeFromParser(DS.getRepAsType(), &RepTInfo);
+ TL.copy(RepTInfo->getTypeLoc());
+ }
+ void VisitTemplateSpecializationTypeLoc(TemplateSpecializationTypeLoc TL) {
+ TypeSourceInfo *TInfo = nullptr;
+ Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
+
+ // If we got no declarator info from previous Sema routines,
+ // just fill with the typespec loc.
+ if (!TInfo) {
+ TL.initialize(Context, DS.getTypeSpecTypeNameLoc());
+ return;
+ }
+
+ TypeLoc OldTL = TInfo->getTypeLoc();
+ if (TInfo->getType()->getAs<ElaboratedType>()) {
+ ElaboratedTypeLoc ElabTL = OldTL.castAs<ElaboratedTypeLoc>();
+ TemplateSpecializationTypeLoc NamedTL = ElabTL.getNamedTypeLoc()
+ .castAs<TemplateSpecializationTypeLoc>();
+ TL.copy(NamedTL);
+ } else {
+ TL.copy(OldTL.castAs<TemplateSpecializationTypeLoc>());
+ assert(TL.getRAngleLoc() == OldTL.castAs<TemplateSpecializationTypeLoc>().getRAngleLoc());
+ }
+
+ }
+ void VisitTypeOfExprTypeLoc(TypeOfExprTypeLoc TL) {
+ assert(DS.getTypeSpecType() == DeclSpec::TST_typeofExpr);
+ TL.setTypeofLoc(DS.getTypeSpecTypeLoc());
+ TL.setParensRange(DS.getTypeofParensRange());
+ }
+ void VisitTypeOfTypeLoc(TypeOfTypeLoc TL) {
+ assert(DS.getTypeSpecType() == DeclSpec::TST_typeofType);
+ TL.setTypeofLoc(DS.getTypeSpecTypeLoc());
+ TL.setParensRange(DS.getTypeofParensRange());
+ assert(DS.getRepAsType());
+ TypeSourceInfo *TInfo = nullptr;
+ Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
+ TL.setUnderlyingTInfo(TInfo);
+ }
+ void VisitUnaryTransformTypeLoc(UnaryTransformTypeLoc TL) {
+ // FIXME: This holds only because we only have one unary transform.
+ assert(DS.getTypeSpecType() == DeclSpec::TST_underlyingType);
+ TL.setKWLoc(DS.getTypeSpecTypeLoc());
+ TL.setParensRange(DS.getTypeofParensRange());
+ assert(DS.getRepAsType());
+ TypeSourceInfo *TInfo = nullptr;
+ Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
+ TL.setUnderlyingTInfo(TInfo);
+ }
+ void VisitBuiltinTypeLoc(BuiltinTypeLoc TL) {
+ // By default, use the source location of the type specifier.
+ TL.setBuiltinLoc(DS.getTypeSpecTypeLoc());
+ if (TL.needsExtraLocalData()) {
+ // Set info for the written builtin specifiers.
+ TL.getWrittenBuiltinSpecs() = DS.getWrittenBuiltinSpecs();
+ // Try to have a meaningful source location.
+ if (TL.getWrittenSignSpec() != TSS_unspecified)
+ // Sign spec loc overrides the others (e.g., 'unsigned long').
+ TL.setBuiltinLoc(DS.getTypeSpecSignLoc());
+ else if (TL.getWrittenWidthSpec() != TSW_unspecified)
+ // Width spec loc overrides type spec loc (e.g., 'short int').
+ TL.setBuiltinLoc(DS.getTypeSpecWidthLoc());
+ }
+ }
+ void VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) {
+ ElaboratedTypeKeyword Keyword
+ = TypeWithKeyword::getKeywordForTypeSpec(DS.getTypeSpecType());
+ if (DS.getTypeSpecType() == TST_typename) {
+ TypeSourceInfo *TInfo = nullptr;
+ Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
+ if (TInfo) {
+ TL.copy(TInfo->getTypeLoc().castAs<ElaboratedTypeLoc>());
+ return;
+ }
+ }
+ TL.setElaboratedKeywordLoc(Keyword != ETK_None
+ ? DS.getTypeSpecTypeLoc()
+ : SourceLocation());
+ const CXXScopeSpec& SS = DS.getTypeSpecScope();
+ TL.setQualifierLoc(SS.getWithLocInContext(Context));
+ Visit(TL.getNextTypeLoc().getUnqualifiedLoc());
+ }
+ void VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
+ assert(DS.getTypeSpecType() == TST_typename);
+ TypeSourceInfo *TInfo = nullptr;
+ Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
+ assert(TInfo);
+ TL.copy(TInfo->getTypeLoc().castAs<DependentNameTypeLoc>());
+ }
+ void VisitDependentTemplateSpecializationTypeLoc(
+ DependentTemplateSpecializationTypeLoc TL) {
+ assert(DS.getTypeSpecType() == TST_typename);
+ TypeSourceInfo *TInfo = nullptr;
+ Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
+ assert(TInfo);
+ TL.copy(
+ TInfo->getTypeLoc().castAs<DependentTemplateSpecializationTypeLoc>());
+ }
+ void VisitTagTypeLoc(TagTypeLoc TL) {
+ TL.setNameLoc(DS.getTypeSpecTypeNameLoc());
+ }
+ void VisitAtomicTypeLoc(AtomicTypeLoc TL) {
+ // An AtomicTypeLoc can come from either an _Atomic(...) type specifier
+ // or an _Atomic qualifier.
+ if (DS.getTypeSpecType() == DeclSpec::TST_atomic) {
+ TL.setKWLoc(DS.getTypeSpecTypeLoc());
+ TL.setParensRange(DS.getTypeofParensRange());
+
+ TypeSourceInfo *TInfo = nullptr;
+ Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
+ assert(TInfo);
+ TL.getValueLoc().initializeFullCopy(TInfo->getTypeLoc());
+ } else {
+ TL.setKWLoc(DS.getAtomicSpecLoc());
+ // No parens, to indicate this was spelled as an _Atomic qualifier.
+ TL.setParensRange(SourceRange());
+ Visit(TL.getValueLoc());
+ }
+ }
+
+ void VisitTypeLoc(TypeLoc TL) {
+ // FIXME: add other typespec types and change this to an assert.
+ TL.initialize(Context, DS.getTypeSpecTypeLoc());
+ }
+ };
+
+ class DeclaratorLocFiller : public TypeLocVisitor<DeclaratorLocFiller> {
+ ASTContext &Context;
+ const DeclaratorChunk &Chunk;
+
+ public:
+ DeclaratorLocFiller(ASTContext &Context, const DeclaratorChunk &Chunk)
+ : Context(Context), Chunk(Chunk) {}
+
+ void VisitQualifiedTypeLoc(QualifiedTypeLoc TL) {
+ llvm_unreachable("qualified type locs not expected here!");
+ }
+ void VisitDecayedTypeLoc(DecayedTypeLoc TL) {
+ llvm_unreachable("decayed type locs not expected here!");
+ }
+
+ void VisitAttributedTypeLoc(AttributedTypeLoc TL) {
+ fillAttributedTypeLoc(TL, Chunk.getAttrs());
+ }
+ void VisitAdjustedTypeLoc(AdjustedTypeLoc TL) {
+ // nothing
+ }
+ void VisitBlockPointerTypeLoc(BlockPointerTypeLoc TL) {
+ assert(Chunk.Kind == DeclaratorChunk::BlockPointer);
+ TL.setCaretLoc(Chunk.Loc);
+ }
+ void VisitPointerTypeLoc(PointerTypeLoc TL) {
+ assert(Chunk.Kind == DeclaratorChunk::Pointer);
+ TL.setStarLoc(Chunk.Loc);
+ }
+ void VisitObjCObjectPointerTypeLoc(ObjCObjectPointerTypeLoc TL) {
+ assert(Chunk.Kind == DeclaratorChunk::Pointer);
+ TL.setStarLoc(Chunk.Loc);
+ }
+ void VisitMemberPointerTypeLoc(MemberPointerTypeLoc TL) {
+ assert(Chunk.Kind == DeclaratorChunk::MemberPointer);
+ const CXXScopeSpec& SS = Chunk.Mem.Scope();
+ NestedNameSpecifierLoc NNSLoc = SS.getWithLocInContext(Context);
+
+ const Type* ClsTy = TL.getClass();
+ QualType ClsQT = QualType(ClsTy, 0);
+ TypeSourceInfo *ClsTInfo = Context.CreateTypeSourceInfo(ClsQT, 0);
+ // Now copy source location info into the type loc component.
+ TypeLoc ClsTL = ClsTInfo->getTypeLoc();
+ switch (NNSLoc.getNestedNameSpecifier()->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ assert(isa<DependentNameType>(ClsTy) && "Unexpected TypeLoc");
+ {
+ DependentNameTypeLoc DNTLoc = ClsTL.castAs<DependentNameTypeLoc>();
+ DNTLoc.setElaboratedKeywordLoc(SourceLocation());
+ DNTLoc.setQualifierLoc(NNSLoc.getPrefix());
+ DNTLoc.setNameLoc(NNSLoc.getLocalBeginLoc());
+ }
+ break;
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ if (isa<ElaboratedType>(ClsTy)) {
+ ElaboratedTypeLoc ETLoc = ClsTL.castAs<ElaboratedTypeLoc>();
+ ETLoc.setElaboratedKeywordLoc(SourceLocation());
+ ETLoc.setQualifierLoc(NNSLoc.getPrefix());
+ TypeLoc NamedTL = ETLoc.getNamedTypeLoc();
+ NamedTL.initializeFullCopy(NNSLoc.getTypeLoc());
+ } else {
+ ClsTL.initializeFullCopy(NNSLoc.getTypeLoc());
+ }
+ break;
+
+ case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::NamespaceAlias:
+ case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Super:
+ llvm_unreachable("Nested-name-specifier must name a type");
+ }
+
+ // Finally fill in MemberPointerLocInfo fields.
+ TL.setStarLoc(Chunk.Loc);
+ TL.setClassTInfo(ClsTInfo);
+ }
+ void VisitLValueReferenceTypeLoc(LValueReferenceTypeLoc TL) {
+ assert(Chunk.Kind == DeclaratorChunk::Reference);
+ // 'Amp' is misleading: this might have been originally
+ /// spelled with AmpAmp.
+ TL.setAmpLoc(Chunk.Loc);
+ }
+ void VisitRValueReferenceTypeLoc(RValueReferenceTypeLoc TL) {
+ assert(Chunk.Kind == DeclaratorChunk::Reference);
+ assert(!Chunk.Ref.LValueRef);
+ TL.setAmpAmpLoc(Chunk.Loc);
+ }
+ void VisitArrayTypeLoc(ArrayTypeLoc TL) {
+ assert(Chunk.Kind == DeclaratorChunk::Array);
+ TL.setLBracketLoc(Chunk.Loc);
+ TL.setRBracketLoc(Chunk.EndLoc);
+ TL.setSizeExpr(static_cast<Expr*>(Chunk.Arr.NumElts));
+ }
+ void VisitFunctionTypeLoc(FunctionTypeLoc TL) {
+ assert(Chunk.Kind == DeclaratorChunk::Function);
+ TL.setLocalRangeBegin(Chunk.Loc);
+ TL.setLocalRangeEnd(Chunk.EndLoc);
+
+ const DeclaratorChunk::FunctionTypeInfo &FTI = Chunk.Fun;
+ TL.setLParenLoc(FTI.getLParenLoc());
+ TL.setRParenLoc(FTI.getRParenLoc());
+ for (unsigned i = 0, e = TL.getNumParams(), tpi = 0; i != e; ++i) {
+ ParmVarDecl *Param = cast<ParmVarDecl>(FTI.Params[i].Param);
+ TL.setParam(tpi++, Param);
+ }
+ // FIXME: exception specs
+ }
+ void VisitParenTypeLoc(ParenTypeLoc TL) {
+ assert(Chunk.Kind == DeclaratorChunk::Paren);
+ TL.setLParenLoc(Chunk.Loc);
+ TL.setRParenLoc(Chunk.EndLoc);
+ }
+
+ void VisitTypeLoc(TypeLoc TL) {
+ llvm_unreachable("unsupported TypeLoc kind in declarator!");
+ }
+ };
+}
+
+static void fillAtomicQualLoc(AtomicTypeLoc ATL, const DeclaratorChunk &Chunk) {
+ SourceLocation Loc;
+ switch (Chunk.Kind) {
+ case DeclaratorChunk::Function:
+ case DeclaratorChunk::Array:
+ case DeclaratorChunk::Paren:
+ llvm_unreachable("cannot be _Atomic qualified");
+
+ case DeclaratorChunk::Pointer:
+ Loc = SourceLocation::getFromRawEncoding(Chunk.Ptr.AtomicQualLoc);
+ break;
+
+ case DeclaratorChunk::BlockPointer:
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::MemberPointer:
+ // FIXME: Provide a source location for the _Atomic keyword.
+ break;
+ }
+
+ ATL.setKWLoc(Loc);
+ ATL.setParensRange(SourceRange());
+}
+
+/// \brief Create and instantiate a TypeSourceInfo with type source information.
+///
+/// \param T QualType referring to the type as written in source code.
+///
+/// \param ReturnTypeInfo For declarators whose return type does not show
+/// up in the normal place in the declaration specifiers (such as a C++
+/// conversion function), this pointer will refer to a type source information
+/// for that return type.
+TypeSourceInfo *
+Sema::GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
+ TypeSourceInfo *ReturnTypeInfo) {
+ TypeSourceInfo *TInfo = Context.CreateTypeSourceInfo(T);
+ UnqualTypeLoc CurrTL = TInfo->getTypeLoc().getUnqualifiedLoc();
+ const AttributeList *DeclAttrs = D.getAttributes();
+
+ // Handle parameter packs whose type is a pack expansion.
+ if (isa<PackExpansionType>(T)) {
+ CurrTL.castAs<PackExpansionTypeLoc>().setEllipsisLoc(D.getEllipsisLoc());
+ CurrTL = CurrTL.getNextTypeLoc().getUnqualifiedLoc();
+ }
+
+ for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
+ // An AtomicTypeLoc might be produced by an atomic qualifier in this
+ // declarator chunk.
+ if (AtomicTypeLoc ATL = CurrTL.getAs<AtomicTypeLoc>()) {
+ fillAtomicQualLoc(ATL, D.getTypeObject(i));
+ CurrTL = ATL.getValueLoc().getUnqualifiedLoc();
+ }
+
+ while (AttributedTypeLoc TL = CurrTL.getAs<AttributedTypeLoc>()) {
+ fillAttributedTypeLoc(TL, D.getTypeObject(i).getAttrs(), DeclAttrs);
+ CurrTL = TL.getNextTypeLoc().getUnqualifiedLoc();
+ }
+
+ // FIXME: Ordering here?
+ while (AdjustedTypeLoc TL = CurrTL.getAs<AdjustedTypeLoc>())
+ CurrTL = TL.getNextTypeLoc().getUnqualifiedLoc();
+
+ DeclaratorLocFiller(Context, D.getTypeObject(i)).Visit(CurrTL);
+ CurrTL = CurrTL.getNextTypeLoc().getUnqualifiedLoc();
+ }
+
+ // If we have different source information for the return type, use
+ // that. This really only applies to C++ conversion functions.
+ if (ReturnTypeInfo) {
+ TypeLoc TL = ReturnTypeInfo->getTypeLoc();
+ assert(TL.getFullDataSize() == CurrTL.getFullDataSize());
+ memcpy(CurrTL.getOpaqueData(), TL.getOpaqueData(), TL.getFullDataSize());
+ } else {
+ TypeSpecLocFiller(Context, D.getDeclSpec()).Visit(CurrTL);
+ }
+
+ return TInfo;
+}
+
+/// \brief Create a LocInfoType to hold the given QualType and TypeSourceInfo.
+ParsedType Sema::CreateParsedType(QualType T, TypeSourceInfo *TInfo) {
+ // FIXME: LocInfoTypes are "transient", only needed for passing to/from Parser
+ // and Sema during declaration parsing. Try deallocating/caching them when
+ // it's appropriate, instead of allocating them and keeping them around.
+ LocInfoType *LocT = (LocInfoType*)BumpAlloc.Allocate(sizeof(LocInfoType),
+ TypeAlignment);
+ new (LocT) LocInfoType(T, TInfo);
+ assert(LocT->getTypeClass() != T->getTypeClass() &&
+ "LocInfoType's TypeClass conflicts with an existing Type class");
+ return ParsedType::make(QualType(LocT, 0));
+}
+
+void LocInfoType::getAsStringInternal(std::string &Str,
+ const PrintingPolicy &Policy) const {
+ llvm_unreachable("LocInfoType leaked into the type system; an opaque TypeTy*"
+ " was used directly instead of getting the QualType through"
+ " GetTypeFromParser");
+}
+
+TypeResult Sema::ActOnTypeName(Scope *S, Declarator &D) {
+ // C99 6.7.6: Type names have no identifier. This is already validated by
+ // the parser.
+ assert(D.getIdentifier() == nullptr &&
+ "Type name should have no identifier!");
+
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ QualType T = TInfo->getType();
+ if (D.isInvalidType())
+ return true;
+
+ // Make sure there are no unused decl attributes on the declarator.
+ // We don't want to do this for ObjC parameters because we're going
+ // to apply them to the actual parameter declaration.
+ // Likewise, we don't want to do this for alias declarations, because
+ // we are actually going to build a declaration from this eventually.
+ if (D.getContext() != Declarator::ObjCParameterContext &&
+ D.getContext() != Declarator::AliasDeclContext &&
+ D.getContext() != Declarator::AliasTemplateContext)
+ checkUnusedDeclAttributes(D);
+
+ if (getLangOpts().CPlusPlus) {
+ // Check that there are no default arguments (C++ only).
+ CheckExtraCXXDefaultArguments(D);
+ }
+
+ return CreateParsedType(T, TInfo);
+}
+
+ParsedType Sema::ActOnObjCInstanceType(SourceLocation Loc) {
+ QualType T = Context.getObjCInstanceType();
+ TypeSourceInfo *TInfo = Context.getTrivialTypeSourceInfo(T, Loc);
+ return CreateParsedType(T, TInfo);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Type Attribute Processing
+//===----------------------------------------------------------------------===//
+
+/// HandleAddressSpaceTypeAttribute - Process an address_space attribute on the
+/// specified type. The attribute contains 1 argument, the id of the address
+/// space for the type.
+static void HandleAddressSpaceTypeAttribute(QualType &Type,
+ const AttributeList &Attr, Sema &S){
+
+ // If this type is already address space qualified, reject it.
+ // ISO/IEC TR 18037 S5.3 (amending C99 6.7.3): "No type shall be qualified by
+ // qualifiers for two or more different address spaces."
+ if (Type.getAddressSpace()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_address_multiple_qualifiers);
+ Attr.setInvalid();
+ return;
+ }
+
+ // ISO/IEC TR 18037 S5.3 (amending C99 6.7.3): "A function type shall not be
+ // qualified by an address-space qualifier."
+ if (Type->isFunctionType()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_address_function_type);
+ Attr.setInvalid();
+ return;
+ }
+
+ unsigned ASIdx;
+ if (Attr.getKind() == AttributeList::AT_AddressSpace) {
+ // Check the attribute arguments.
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << Attr.getName() << 1;
+ Attr.setInvalid();
+ return;
+ }
+ Expr *ASArgExpr = static_cast<Expr *>(Attr.getArgAsExpr(0));
+ llvm::APSInt addrSpace(32);
+ if (ASArgExpr->isTypeDependent() || ASArgExpr->isValueDependent() ||
+ !ASArgExpr->isIntegerConstantExpr(addrSpace, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_type)
+ << Attr.getName() << AANT_ArgumentIntegerConstant
+ << ASArgExpr->getSourceRange();
+ Attr.setInvalid();
+ return;
+ }
+
+ // Bounds checking.
+ if (addrSpace.isSigned()) {
+ if (addrSpace.isNegative()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_address_space_negative)
+ << ASArgExpr->getSourceRange();
+ Attr.setInvalid();
+ return;
+ }
+ addrSpace.setIsSigned(false);
+ }
+ llvm::APSInt max(addrSpace.getBitWidth());
+ max = Qualifiers::MaxAddressSpace;
+ if (addrSpace > max) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_address_space_too_high)
+ << int(Qualifiers::MaxAddressSpace) << ASArgExpr->getSourceRange();
+ Attr.setInvalid();
+ return;
+ }
+ ASIdx = static_cast<unsigned>(addrSpace.getZExtValue());
+ } else {
+ // The keyword-based type attributes imply which address space to use.
+ switch (Attr.getKind()) {
+ case AttributeList::AT_OpenCLGlobalAddressSpace:
+ ASIdx = LangAS::opencl_global; break;
+ case AttributeList::AT_OpenCLLocalAddressSpace:
+ ASIdx = LangAS::opencl_local; break;
+ case AttributeList::AT_OpenCLConstantAddressSpace:
+ ASIdx = LangAS::opencl_constant; break;
+ case AttributeList::AT_OpenCLGenericAddressSpace:
+ ASIdx = LangAS::opencl_generic; break;
+ default:
+ assert(Attr.getKind() == AttributeList::AT_OpenCLPrivateAddressSpace);
+ ASIdx = 0; break;
+ }
+ }
+
+ Type = S.Context.getAddrSpaceQualType(Type, ASIdx);
+}
+
+/// Does this type have a "direct" ownership qualifier? That is,
+/// is it written like "__strong id", as opposed to something like
+/// "typeof(foo)", where that happens to be strong?
+static bool hasDirectOwnershipQualifier(QualType type) {
+ // Fast path: no qualifier at all.
+ assert(type.getQualifiers().hasObjCLifetime());
+
+ while (true) {
+ // __strong id
+ if (const AttributedType *attr = dyn_cast<AttributedType>(type)) {
+ if (attr->getAttrKind() == AttributedType::attr_objc_ownership)
+ return true;
+
+ type = attr->getModifiedType();
+
+ // X *__strong (...)
+ } else if (const ParenType *paren = dyn_cast<ParenType>(type)) {
+ type = paren->getInnerType();
+
+ // That's it for things we want to complain about. In particular,
+ // we do not want to look through typedefs, typeof(expr),
+ // typeof(type), or any other way that the type is somehow
+ // abstracted.
+ } else {
+
+ return false;
+ }
+ }
+}
+
+/// handleObjCOwnershipTypeAttr - Process an objc_ownership
+/// attribute on the specified type.
+///
+/// Returns 'true' if the attribute was handled.
+static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
+ AttributeList &attr,
+ QualType &type) {
+ bool NonObjCPointer = false;
+
+ if (!type->isDependentType() && !type->isUndeducedType()) {
+ if (const PointerType *ptr = type->getAs<PointerType>()) {
+ QualType pointee = ptr->getPointeeType();
+ if (pointee->isObjCRetainableType() || pointee->isPointerType())
+ return false;
+ // It is important not to lose the source info that there was an attribute
+ // applied to non-objc pointer. We will create an attributed type but
+ // its type will be the same as the original type.
+ NonObjCPointer = true;
+ } else if (!type->isObjCRetainableType()) {
+ return false;
+ }
+
+ // Don't accept an ownership attribute in the declspec if it would
+ // just be the return type of a block pointer.
+ if (state.isProcessingDeclSpec()) {
+ Declarator &D = state.getDeclarator();
+ if (maybeMovePastReturnType(D, D.getNumTypeObjects(),
+ /*onlyBlockPointers=*/true))
+ return false;
+ }
+ }
+
+ Sema &S = state.getSema();
+ SourceLocation AttrLoc = attr.getLoc();
+ if (AttrLoc.isMacroID())
+ AttrLoc = S.getSourceManager().getImmediateExpansionRange(AttrLoc).first;
+
+ if (!attr.isArgIdent(0)) {
+ S.Diag(AttrLoc, diag::err_attribute_argument_type)
+ << attr.getName() << AANT_ArgumentString;
+ attr.setInvalid();
+ return true;
+ }
+
+ IdentifierInfo *II = attr.getArgAsIdent(0)->Ident;
+ Qualifiers::ObjCLifetime lifetime;
+ if (II->isStr("none"))
+ lifetime = Qualifiers::OCL_ExplicitNone;
+ else if (II->isStr("strong"))
+ lifetime = Qualifiers::OCL_Strong;
+ else if (II->isStr("weak"))
+ lifetime = Qualifiers::OCL_Weak;
+ else if (II->isStr("autoreleasing"))
+ lifetime = Qualifiers::OCL_Autoreleasing;
+ else {
+ S.Diag(AttrLoc, diag::warn_attribute_type_not_supported)
+ << attr.getName() << II;
+ attr.setInvalid();
+ return true;
+ }
+
+ // Just ignore lifetime attributes other than __weak and __unsafe_unretained
+ // outside of ARC mode.
+ if (!S.getLangOpts().ObjCAutoRefCount &&
+ lifetime != Qualifiers::OCL_Weak &&
+ lifetime != Qualifiers::OCL_ExplicitNone) {
+ return true;
+ }
+
+ SplitQualType underlyingType = type.split();
+
+ // Check for redundant/conflicting ownership qualifiers.
+ if (Qualifiers::ObjCLifetime previousLifetime
+ = type.getQualifiers().getObjCLifetime()) {
+ // If it's written directly, that's an error.
+ if (hasDirectOwnershipQualifier(type)) {
+ S.Diag(AttrLoc, diag::err_attr_objc_ownership_redundant)
+ << type;
+ return true;
+ }
+
+ // Otherwise, if the qualifiers actually conflict, pull sugar off
+ // until we reach a type that is directly qualified.
+ if (previousLifetime != lifetime) {
+ // This should always terminate: the canonical type is
+ // qualified, so some bit of sugar must be hiding it.
+ while (!underlyingType.Quals.hasObjCLifetime()) {
+ underlyingType = underlyingType.getSingleStepDesugaredType();
+ }
+ underlyingType.Quals.removeObjCLifetime();
+ }
+ }
+
+ underlyingType.Quals.addObjCLifetime(lifetime);
+
+ if (NonObjCPointer) {
+ StringRef name = attr.getName()->getName();
+ switch (lifetime) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ break;
+ case Qualifiers::OCL_Strong: name = "__strong"; break;
+ case Qualifiers::OCL_Weak: name = "__weak"; break;
+ case Qualifiers::OCL_Autoreleasing: name = "__autoreleasing"; break;
+ }
+ S.Diag(AttrLoc, diag::warn_type_attribute_wrong_type) << name
+ << TDS_ObjCObjOrBlock << type;
+ }
+
+ // Don't actually add the __unsafe_unretained qualifier in non-ARC files,
+ // because having both 'T' and '__unsafe_unretained T' exist in the type
+ // system causes unfortunate widespread consistency problems. (For example,
+ // they're not considered compatible types, and we mangle them identicially
+ // as template arguments.) These problems are all individually fixable,
+ // but it's easier to just not add the qualifier and instead sniff it out
+ // in specific places using isObjCInertUnsafeUnretainedType().
+ //
+ // Doing this does means we miss some trivial consistency checks that
+ // would've triggered in ARC, but that's better than trying to solve all
+ // the coexistence problems with __unsafe_unretained.
+ if (!S.getLangOpts().ObjCAutoRefCount &&
+ lifetime == Qualifiers::OCL_ExplicitNone) {
+ type = S.Context.getAttributedType(
+ AttributedType::attr_objc_inert_unsafe_unretained,
+ type, type);
+ return true;
+ }
+
+ QualType origType = type;
+ if (!NonObjCPointer)
+ type = S.Context.getQualifiedType(underlyingType);
+
+ // If we have a valid source location for the attribute, use an
+ // AttributedType instead.
+ if (AttrLoc.isValid())
+ type = S.Context.getAttributedType(AttributedType::attr_objc_ownership,
+ origType, type);
+
+ auto diagnoseOrDelay = [](Sema &S, SourceLocation loc,
+ unsigned diagnostic, QualType type) {
+ if (S.DelayedDiagnostics.shouldDelayDiagnostics()) {
+ S.DelayedDiagnostics.add(
+ sema::DelayedDiagnostic::makeForbiddenType(
+ S.getSourceManager().getExpansionLoc(loc),
+ diagnostic, type, /*ignored*/ 0));
+ } else {
+ S.Diag(loc, diagnostic);
+ }
+ };
+
+ // Sometimes, __weak isn't allowed.
+ if (lifetime == Qualifiers::OCL_Weak &&
+ !S.getLangOpts().ObjCWeak && !NonObjCPointer) {
+
+ // Use a specialized diagnostic if the runtime just doesn't support them.
+ unsigned diagnostic =
+ (S.getLangOpts().ObjCWeakRuntime ? diag::err_arc_weak_disabled
+ : diag::err_arc_weak_no_runtime);
+
+ // In any case, delay the diagnostic until we know what we're parsing.
+ diagnoseOrDelay(S, AttrLoc, diagnostic, type);
+
+ attr.setInvalid();
+ return true;
+ }
+
+ // Forbid __weak for class objects marked as
+ // objc_arc_weak_reference_unavailable
+ if (lifetime == Qualifiers::OCL_Weak) {
+ if (const ObjCObjectPointerType *ObjT =
+ type->getAs<ObjCObjectPointerType>()) {
+ if (ObjCInterfaceDecl *Class = ObjT->getInterfaceDecl()) {
+ if (Class->isArcWeakrefUnavailable()) {
+ S.Diag(AttrLoc, diag::err_arc_unsupported_weak_class);
+ S.Diag(ObjT->getInterfaceDecl()->getLocation(),
+ diag::note_class_declared);
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+/// handleObjCGCTypeAttr - Process the __attribute__((objc_gc)) type
+/// attribute on the specified type. Returns true to indicate that
+/// the attribute was handled, false to indicate that the type does
+/// not permit the attribute.
+static bool handleObjCGCTypeAttr(TypeProcessingState &state,
+ AttributeList &attr,
+ QualType &type) {
+ Sema &S = state.getSema();
+
+ // Delay if this isn't some kind of pointer.
+ if (!type->isPointerType() &&
+ !type->isObjCObjectPointerType() &&
+ !type->isBlockPointerType())
+ return false;
+
+ if (type.getObjCGCAttr() != Qualifiers::GCNone) {
+ S.Diag(attr.getLoc(), diag::err_attribute_multiple_objc_gc);
+ attr.setInvalid();
+ return true;
+ }
+
+ // Check the attribute arguments.
+ if (!attr.isArgIdent(0)) {
+ S.Diag(attr.getLoc(), diag::err_attribute_argument_type)
+ << attr.getName() << AANT_ArgumentString;
+ attr.setInvalid();
+ return true;
+ }
+ Qualifiers::GC GCAttr;
+ if (attr.getNumArgs() > 1) {
+ S.Diag(attr.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << attr.getName() << 1;
+ attr.setInvalid();
+ return true;
+ }
+
+ IdentifierInfo *II = attr.getArgAsIdent(0)->Ident;
+ if (II->isStr("weak"))
+ GCAttr = Qualifiers::Weak;
+ else if (II->isStr("strong"))
+ GCAttr = Qualifiers::Strong;
+ else {
+ S.Diag(attr.getLoc(), diag::warn_attribute_type_not_supported)
+ << attr.getName() << II;
+ attr.setInvalid();
+ return true;
+ }
+
+ QualType origType = type;
+ type = S.Context.getObjCGCQualType(origType, GCAttr);
+
+ // Make an attributed type to preserve the source information.
+ if (attr.getLoc().isValid())
+ type = S.Context.getAttributedType(AttributedType::attr_objc_gc,
+ origType, type);
+
+ return true;
+}
+
+namespace {
+ /// A helper class to unwrap a type down to a function for the
+ /// purposes of applying attributes there.
+ ///
+ /// Use:
+ /// FunctionTypeUnwrapper unwrapped(SemaRef, T);
+ /// if (unwrapped.isFunctionType()) {
+ /// const FunctionType *fn = unwrapped.get();
+ /// // change fn somehow
+ /// T = unwrapped.wrap(fn);
+ /// }
+ struct FunctionTypeUnwrapper {
+ enum WrapKind {
+ Desugar,
+ Parens,
+ Pointer,
+ BlockPointer,
+ Reference,
+ MemberPointer
+ };
+
+ QualType Original;
+ const FunctionType *Fn;
+ SmallVector<unsigned char /*WrapKind*/, 8> Stack;
+
+ FunctionTypeUnwrapper(Sema &S, QualType T) : Original(T) {
+ while (true) {
+ const Type *Ty = T.getTypePtr();
+ if (isa<FunctionType>(Ty)) {
+ Fn = cast<FunctionType>(Ty);
+ return;
+ } else if (isa<ParenType>(Ty)) {
+ T = cast<ParenType>(Ty)->getInnerType();
+ Stack.push_back(Parens);
+ } else if (isa<PointerType>(Ty)) {
+ T = cast<PointerType>(Ty)->getPointeeType();
+ Stack.push_back(Pointer);
+ } else if (isa<BlockPointerType>(Ty)) {
+ T = cast<BlockPointerType>(Ty)->getPointeeType();
+ Stack.push_back(BlockPointer);
+ } else if (isa<MemberPointerType>(Ty)) {
+ T = cast<MemberPointerType>(Ty)->getPointeeType();
+ Stack.push_back(MemberPointer);
+ } else if (isa<ReferenceType>(Ty)) {
+ T = cast<ReferenceType>(Ty)->getPointeeType();
+ Stack.push_back(Reference);
+ } else {
+ const Type *DTy = Ty->getUnqualifiedDesugaredType();
+ if (Ty == DTy) {
+ Fn = nullptr;
+ return;
+ }
+
+ T = QualType(DTy, 0);
+ Stack.push_back(Desugar);
+ }
+ }
+ }
+
+ bool isFunctionType() const { return (Fn != nullptr); }
+ const FunctionType *get() const { return Fn; }
+
+ QualType wrap(Sema &S, const FunctionType *New) {
+ // If T wasn't modified from the unwrapped type, do nothing.
+ if (New == get()) return Original;
+
+ Fn = New;
+ return wrap(S.Context, Original, 0);
+ }
+
+ private:
+ QualType wrap(ASTContext &C, QualType Old, unsigned I) {
+ if (I == Stack.size())
+ return C.getQualifiedType(Fn, Old.getQualifiers());
+
+ // Build up the inner type, applying the qualifiers from the old
+ // type to the new type.
+ SplitQualType SplitOld = Old.split();
+
+ // As a special case, tail-recurse if there are no qualifiers.
+ if (SplitOld.Quals.empty())
+ return wrap(C, SplitOld.Ty, I);
+ return C.getQualifiedType(wrap(C, SplitOld.Ty, I), SplitOld.Quals);
+ }
+
+ QualType wrap(ASTContext &C, const Type *Old, unsigned I) {
+ if (I == Stack.size()) return QualType(Fn, 0);
+
+ switch (static_cast<WrapKind>(Stack[I++])) {
+ case Desugar:
+ // This is the point at which we potentially lose source
+ // information.
+ return wrap(C, Old->getUnqualifiedDesugaredType(), I);
+
+ case Parens: {
+ QualType New = wrap(C, cast<ParenType>(Old)->getInnerType(), I);
+ return C.getParenType(New);
+ }
+
+ case Pointer: {
+ QualType New = wrap(C, cast<PointerType>(Old)->getPointeeType(), I);
+ return C.getPointerType(New);
+ }
+
+ case BlockPointer: {
+ QualType New = wrap(C, cast<BlockPointerType>(Old)->getPointeeType(),I);
+ return C.getBlockPointerType(New);
+ }
+
+ case MemberPointer: {
+ const MemberPointerType *OldMPT = cast<MemberPointerType>(Old);
+ QualType New = wrap(C, OldMPT->getPointeeType(), I);
+ return C.getMemberPointerType(New, OldMPT->getClass());
+ }
+
+ case Reference: {
+ const ReferenceType *OldRef = cast<ReferenceType>(Old);
+ QualType New = wrap(C, OldRef->getPointeeType(), I);
+ if (isa<LValueReferenceType>(OldRef))
+ return C.getLValueReferenceType(New, OldRef->isSpelledAsLValue());
+ else
+ return C.getRValueReferenceType(New);
+ }
+ }
+
+ llvm_unreachable("unknown wrapping kind");
+ }
+ };
+}
+
+static bool handleMSPointerTypeQualifierAttr(TypeProcessingState &State,
+ AttributeList &Attr,
+ QualType &Type) {
+ Sema &S = State.getSema();
+
+ AttributeList::Kind Kind = Attr.getKind();
+ QualType Desugared = Type;
+ const AttributedType *AT = dyn_cast<AttributedType>(Type);
+ while (AT) {
+ AttributedType::Kind CurAttrKind = AT->getAttrKind();
+
+ // You cannot specify duplicate type attributes, so if the attribute has
+ // already been applied, flag it.
+ if (getAttrListKind(CurAttrKind) == Kind) {
+ S.Diag(Attr.getLoc(), diag::warn_duplicate_attribute_exact)
+ << Attr.getName();
+ return true;
+ }
+
+ // You cannot have both __sptr and __uptr on the same type, nor can you
+ // have __ptr32 and __ptr64.
+ if ((CurAttrKind == AttributedType::attr_ptr32 &&
+ Kind == AttributeList::AT_Ptr64) ||
+ (CurAttrKind == AttributedType::attr_ptr64 &&
+ Kind == AttributeList::AT_Ptr32)) {
+ S.Diag(Attr.getLoc(), diag::err_attributes_are_not_compatible)
+ << "'__ptr32'" << "'__ptr64'";
+ return true;
+ } else if ((CurAttrKind == AttributedType::attr_sptr &&
+ Kind == AttributeList::AT_UPtr) ||
+ (CurAttrKind == AttributedType::attr_uptr &&
+ Kind == AttributeList::AT_SPtr)) {
+ S.Diag(Attr.getLoc(), diag::err_attributes_are_not_compatible)
+ << "'__sptr'" << "'__uptr'";
+ return true;
+ }
+
+ Desugared = AT->getEquivalentType();
+ AT = dyn_cast<AttributedType>(Desugared);
+ }
+
+ // Pointer type qualifiers can only operate on pointer types, but not
+ // pointer-to-member types.
+ if (!isa<PointerType>(Desugared)) {
+ if (Type->isMemberPointerType())
+ S.Diag(Attr.getLoc(), diag::err_attribute_no_member_pointers)
+ << Attr.getName();
+ else
+ S.Diag(Attr.getLoc(), diag::err_attribute_pointers_only)
+ << Attr.getName() << 0;
+ return true;
+ }
+
+ AttributedType::Kind TAK;
+ switch (Kind) {
+ default: llvm_unreachable("Unknown attribute kind");
+ case AttributeList::AT_Ptr32: TAK = AttributedType::attr_ptr32; break;
+ case AttributeList::AT_Ptr64: TAK = AttributedType::attr_ptr64; break;
+ case AttributeList::AT_SPtr: TAK = AttributedType::attr_sptr; break;
+ case AttributeList::AT_UPtr: TAK = AttributedType::attr_uptr; break;
+ }
+
+ Type = S.Context.getAttributedType(TAK, Type, Type);
+ return false;
+}
+
+bool Sema::checkNullabilityTypeSpecifier(QualType &type,
+ NullabilityKind nullability,
+ SourceLocation nullabilityLoc,
+ bool isContextSensitive) {
+ // We saw a nullability type specifier. If this is the first one for
+ // this file, note that.
+ FileID file = getNullabilityCompletenessCheckFileID(*this, nullabilityLoc);
+ if (!file.isInvalid()) {
+ FileNullability &fileNullability = NullabilityMap[file];
+ if (!fileNullability.SawTypeNullability) {
+ // If we have already seen a pointer declarator without a nullability
+ // annotation, complain about it.
+ if (fileNullability.PointerLoc.isValid()) {
+ Diag(fileNullability.PointerLoc, diag::warn_nullability_missing)
+ << static_cast<unsigned>(fileNullability.PointerKind);
+ }
+
+ fileNullability.SawTypeNullability = true;
+ }
+ }
+
+ // Check for existing nullability attributes on the type.
+ QualType desugared = type;
+ while (auto attributed = dyn_cast<AttributedType>(desugared.getTypePtr())) {
+ // Check whether there is already a null
+ if (auto existingNullability = attributed->getImmediateNullability()) {
+ // Duplicated nullability.
+ if (nullability == *existingNullability) {
+ Diag(nullabilityLoc, diag::warn_nullability_duplicate)
+ << DiagNullabilityKind(nullability, isContextSensitive)
+ << FixItHint::CreateRemoval(nullabilityLoc);
+
+ break;
+ }
+
+ // Conflicting nullability.
+ Diag(nullabilityLoc, diag::err_nullability_conflicting)
+ << DiagNullabilityKind(nullability, isContextSensitive)
+ << DiagNullabilityKind(*existingNullability, false);
+ return true;
+ }
+
+ desugared = attributed->getModifiedType();
+ }
+
+ // If there is already a different nullability specifier, complain.
+ // This (unlike the code above) looks through typedefs that might
+ // have nullability specifiers on them, which means we cannot
+ // provide a useful Fix-It.
+ if (auto existingNullability = desugared->getNullability(Context)) {
+ if (nullability != *existingNullability) {
+ Diag(nullabilityLoc, diag::err_nullability_conflicting)
+ << DiagNullabilityKind(nullability, isContextSensitive)
+ << DiagNullabilityKind(*existingNullability, false);
+
+ // Try to find the typedef with the existing nullability specifier.
+ if (auto typedefType = desugared->getAs<TypedefType>()) {
+ TypedefNameDecl *typedefDecl = typedefType->getDecl();
+ QualType underlyingType = typedefDecl->getUnderlyingType();
+ if (auto typedefNullability
+ = AttributedType::stripOuterNullability(underlyingType)) {
+ if (*typedefNullability == *existingNullability) {
+ Diag(typedefDecl->getLocation(), diag::note_nullability_here)
+ << DiagNullabilityKind(*existingNullability, false);
+ }
+ }
+ }
+
+ return true;
+ }
+ }
+
+ // If this definitely isn't a pointer type, reject the specifier.
+ if (!desugared->canHaveNullability()) {
+ Diag(nullabilityLoc, diag::err_nullability_nonpointer)
+ << DiagNullabilityKind(nullability, isContextSensitive) << type;
+ return true;
+ }
+
+ // For the context-sensitive keywords/Objective-C property
+ // attributes, require that the type be a single-level pointer.
+ if (isContextSensitive) {
+ // Make sure that the pointee isn't itself a pointer type.
+ QualType pointeeType = desugared->getPointeeType();
+ if (pointeeType->isAnyPointerType() ||
+ pointeeType->isObjCObjectPointerType() ||
+ pointeeType->isMemberPointerType()) {
+ Diag(nullabilityLoc, diag::err_nullability_cs_multilevel)
+ << DiagNullabilityKind(nullability, true)
+ << type;
+ Diag(nullabilityLoc, diag::note_nullability_type_specifier)
+ << DiagNullabilityKind(nullability, false)
+ << type
+ << FixItHint::CreateReplacement(nullabilityLoc,
+ getNullabilitySpelling(nullability));
+ return true;
+ }
+ }
+
+ // Form the attributed type.
+ type = Context.getAttributedType(
+ AttributedType::getNullabilityAttrKind(nullability), type, type);
+ return false;
+}
+
+bool Sema::checkObjCKindOfType(QualType &type, SourceLocation loc) {
+ // Find out if it's an Objective-C object or object pointer type;
+ const ObjCObjectPointerType *ptrType = type->getAs<ObjCObjectPointerType>();
+ const ObjCObjectType *objType = ptrType ? ptrType->getObjectType()
+ : type->getAs<ObjCObjectType>();
+
+ // If not, we can't apply __kindof.
+ if (!objType) {
+ // FIXME: Handle dependent types that aren't yet object types.
+ Diag(loc, diag::err_objc_kindof_nonobject)
+ << type;
+ return true;
+ }
+
+ // Rebuild the "equivalent" type, which pushes __kindof down into
+ // the object type.
+ QualType equivType = Context.getObjCObjectType(objType->getBaseType(),
+ objType->getTypeArgsAsWritten(),
+ objType->getProtocols(),
+ /*isKindOf=*/true);
+
+ // If we started with an object pointer type, rebuild it.
+ if (ptrType) {
+ equivType = Context.getObjCObjectPointerType(equivType);
+ if (auto nullability = type->getNullability(Context)) {
+ auto attrKind = AttributedType::getNullabilityAttrKind(*nullability);
+ equivType = Context.getAttributedType(attrKind, equivType, equivType);
+ }
+ }
+
+ // Build the attributed type to record where __kindof occurred.
+ type = Context.getAttributedType(AttributedType::attr_objc_kindof,
+ type,
+ equivType);
+
+ return false;
+}
+
+/// Map a nullability attribute kind to a nullability kind.
+static NullabilityKind mapNullabilityAttrKind(AttributeList::Kind kind) {
+ switch (kind) {
+ case AttributeList::AT_TypeNonNull:
+ return NullabilityKind::NonNull;
+
+ case AttributeList::AT_TypeNullable:
+ return NullabilityKind::Nullable;
+
+ case AttributeList::AT_TypeNullUnspecified:
+ return NullabilityKind::Unspecified;
+
+ default:
+ llvm_unreachable("not a nullability attribute kind");
+ }
+}
+
+/// Distribute a nullability type attribute that cannot be applied to
+/// the type specifier to a pointer, block pointer, or member pointer
+/// declarator, complaining if necessary.
+///
+/// \returns true if the nullability annotation was distributed, false
+/// otherwise.
+static bool distributeNullabilityTypeAttr(TypeProcessingState &state,
+ QualType type,
+ AttributeList &attr) {
+ Declarator &declarator = state.getDeclarator();
+
+ /// Attempt to move the attribute to the specified chunk.
+ auto moveToChunk = [&](DeclaratorChunk &chunk, bool inFunction) -> bool {
+ // If there is already a nullability attribute there, don't add
+ // one.
+ if (hasNullabilityAttr(chunk.getAttrListRef()))
+ return false;
+
+ // Complain about the nullability qualifier being in the wrong
+ // place.
+ enum {
+ PK_Pointer,
+ PK_BlockPointer,
+ PK_MemberPointer,
+ PK_FunctionPointer,
+ PK_MemberFunctionPointer,
+ } pointerKind
+ = chunk.Kind == DeclaratorChunk::Pointer ? (inFunction ? PK_FunctionPointer
+ : PK_Pointer)
+ : chunk.Kind == DeclaratorChunk::BlockPointer ? PK_BlockPointer
+ : inFunction? PK_MemberFunctionPointer : PK_MemberPointer;
+
+ auto diag = state.getSema().Diag(attr.getLoc(),
+ diag::warn_nullability_declspec)
+ << DiagNullabilityKind(mapNullabilityAttrKind(attr.getKind()),
+ attr.isContextSensitiveKeywordAttribute())
+ << type
+ << static_cast<unsigned>(pointerKind);
+
+ // FIXME: MemberPointer chunks don't carry the location of the *.
+ if (chunk.Kind != DeclaratorChunk::MemberPointer) {
+ diag << FixItHint::CreateRemoval(attr.getLoc())
+ << FixItHint::CreateInsertion(
+ state.getSema().getPreprocessor()
+ .getLocForEndOfToken(chunk.Loc),
+ " " + attr.getName()->getName().str() + " ");
+ }
+
+ moveAttrFromListToList(attr, state.getCurrentAttrListRef(),
+ chunk.getAttrListRef());
+ return true;
+ };
+
+ // Move it to the outermost pointer, member pointer, or block
+ // pointer declarator.
+ for (unsigned i = state.getCurrentChunkIndex(); i != 0; --i) {
+ DeclaratorChunk &chunk = declarator.getTypeObject(i-1);
+ switch (chunk.Kind) {
+ case DeclaratorChunk::Pointer:
+ case DeclaratorChunk::BlockPointer:
+ case DeclaratorChunk::MemberPointer:
+ return moveToChunk(chunk, false);
+
+ case DeclaratorChunk::Paren:
+ case DeclaratorChunk::Array:
+ continue;
+
+ case DeclaratorChunk::Function:
+ // Try to move past the return type to a function/block/member
+ // function pointer.
+ if (DeclaratorChunk *dest = maybeMovePastReturnType(
+ declarator, i,
+ /*onlyBlockPointers=*/false)) {
+ return moveToChunk(*dest, true);
+ }
+
+ return false;
+
+ // Don't walk through these.
+ case DeclaratorChunk::Reference:
+ return false;
+ }
+ }
+
+ return false;
+}
+
+static AttributedType::Kind getCCTypeAttrKind(AttributeList &Attr) {
+ assert(!Attr.isInvalid());
+ switch (Attr.getKind()) {
+ default:
+ llvm_unreachable("not a calling convention attribute");
+ case AttributeList::AT_CDecl:
+ return AttributedType::attr_cdecl;
+ case AttributeList::AT_FastCall:
+ return AttributedType::attr_fastcall;
+ case AttributeList::AT_StdCall:
+ return AttributedType::attr_stdcall;
+ case AttributeList::AT_ThisCall:
+ return AttributedType::attr_thiscall;
+ case AttributeList::AT_Pascal:
+ return AttributedType::attr_pascal;
+ case AttributeList::AT_VectorCall:
+ return AttributedType::attr_vectorcall;
+ case AttributeList::AT_Pcs: {
+ // The attribute may have had a fixit applied where we treated an
+ // identifier as a string literal. The contents of the string are valid,
+ // but the form may not be.
+ StringRef Str;
+ if (Attr.isArgExpr(0))
+ Str = cast<StringLiteral>(Attr.getArgAsExpr(0))->getString();
+ else
+ Str = Attr.getArgAsIdent(0)->Ident->getName();
+ return llvm::StringSwitch<AttributedType::Kind>(Str)
+ .Case("aapcs", AttributedType::attr_pcs)
+ .Case("aapcs-vfp", AttributedType::attr_pcs_vfp);
+ }
+ case AttributeList::AT_IntelOclBicc:
+ return AttributedType::attr_inteloclbicc;
+ case AttributeList::AT_MSABI:
+ return AttributedType::attr_ms_abi;
+ case AttributeList::AT_SysVABI:
+ return AttributedType::attr_sysv_abi;
+ }
+ llvm_unreachable("unexpected attribute kind!");
+}
+
+/// Process an individual function attribute. Returns true to
+/// indicate that the attribute was handled, false if it wasn't.
+static bool handleFunctionTypeAttr(TypeProcessingState &state,
+ AttributeList &attr,
+ QualType &type) {
+ Sema &S = state.getSema();
+
+ FunctionTypeUnwrapper unwrapped(S, type);
+
+ if (attr.getKind() == AttributeList::AT_NoReturn) {
+ if (S.CheckNoReturnAttr(attr))
+ return true;
+
+ // Delay if this is not a function type.
+ if (!unwrapped.isFunctionType())
+ return false;
+
+ // Otherwise we can process right away.
+ FunctionType::ExtInfo EI = unwrapped.get()->getExtInfo().withNoReturn(true);
+ type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI));
+ return true;
+ }
+
+ // ns_returns_retained is not always a type attribute, but if we got
+ // here, we're treating it as one right now.
+ if (attr.getKind() == AttributeList::AT_NSReturnsRetained) {
+ assert(S.getLangOpts().ObjCAutoRefCount &&
+ "ns_returns_retained treated as type attribute in non-ARC");
+ if (attr.getNumArgs()) return true;
+
+ // Delay if this is not a function type.
+ if (!unwrapped.isFunctionType())
+ return false;
+
+ FunctionType::ExtInfo EI
+ = unwrapped.get()->getExtInfo().withProducesResult(true);
+ type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI));
+ return true;
+ }
+
+ if (attr.getKind() == AttributeList::AT_Regparm) {
+ unsigned value;
+ if (S.CheckRegparmAttr(attr, value))
+ return true;
+
+ // Delay if this is not a function type.
+ if (!unwrapped.isFunctionType())
+ return false;
+
+ // Diagnose regparm with fastcall.
+ const FunctionType *fn = unwrapped.get();
+ CallingConv CC = fn->getCallConv();
+ if (CC == CC_X86FastCall) {
+ S.Diag(attr.getLoc(), diag::err_attributes_are_not_compatible)
+ << FunctionType::getNameForCallConv(CC)
+ << "regparm";
+ attr.setInvalid();
+ return true;
+ }
+
+ FunctionType::ExtInfo EI =
+ unwrapped.get()->getExtInfo().withRegParm(value);
+ type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI));
+ return true;
+ }
+
+ // Delay if the type didn't work out to a function.
+ if (!unwrapped.isFunctionType()) return false;
+
+ // Otherwise, a calling convention.
+ CallingConv CC;
+ if (S.CheckCallingConvAttr(attr, CC))
+ return true;
+
+ const FunctionType *fn = unwrapped.get();
+ CallingConv CCOld = fn->getCallConv();
+ AttributedType::Kind CCAttrKind = getCCTypeAttrKind(attr);
+
+ if (CCOld != CC) {
+ // Error out on when there's already an attribute on the type
+ // and the CCs don't match.
+ const AttributedType *AT = S.getCallingConvAttributedType(type);
+ if (AT && AT->getAttrKind() != CCAttrKind) {
+ S.Diag(attr.getLoc(), diag::err_attributes_are_not_compatible)
+ << FunctionType::getNameForCallConv(CC)
+ << FunctionType::getNameForCallConv(CCOld);
+ attr.setInvalid();
+ return true;
+ }
+ }
+
+ // Diagnose use of callee-cleanup calling convention on variadic functions.
+ if (!supportsVariadicCall(CC)) {
+ const FunctionProtoType *FnP = dyn_cast<FunctionProtoType>(fn);
+ if (FnP && FnP->isVariadic()) {
+ unsigned DiagID = diag::err_cconv_varargs;
+ // stdcall and fastcall are ignored with a warning for GCC and MS
+ // compatibility.
+ if (CC == CC_X86StdCall || CC == CC_X86FastCall)
+ DiagID = diag::warn_cconv_varargs;
+
+ S.Diag(attr.getLoc(), DiagID) << FunctionType::getNameForCallConv(CC);
+ attr.setInvalid();
+ return true;
+ }
+ }
+
+ // Also diagnose fastcall with regparm.
+ if (CC == CC_X86FastCall && fn->getHasRegParm()) {
+ S.Diag(attr.getLoc(), diag::err_attributes_are_not_compatible)
+ << "regparm" << FunctionType::getNameForCallConv(CC_X86FastCall);
+ attr.setInvalid();
+ return true;
+ }
+
+ // Modify the CC from the wrapped function type, wrap it all back, and then
+ // wrap the whole thing in an AttributedType as written. The modified type
+ // might have a different CC if we ignored the attribute.
+ FunctionType::ExtInfo EI = unwrapped.get()->getExtInfo().withCallingConv(CC);
+ QualType Equivalent =
+ unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI));
+ type = S.Context.getAttributedType(CCAttrKind, type, Equivalent);
+ return true;
+}
+
+bool Sema::hasExplicitCallingConv(QualType &T) {
+ QualType R = T.IgnoreParens();
+ while (const AttributedType *AT = dyn_cast<AttributedType>(R)) {
+ if (AT->isCallingConv())
+ return true;
+ R = AT->getModifiedType().IgnoreParens();
+ }
+ return false;
+}
+
+void Sema::adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
+ SourceLocation Loc) {
+ FunctionTypeUnwrapper Unwrapped(*this, T);
+ const FunctionType *FT = Unwrapped.get();
+ bool IsVariadic = (isa<FunctionProtoType>(FT) &&
+ cast<FunctionProtoType>(FT)->isVariadic());
+ CallingConv CurCC = FT->getCallConv();
+ CallingConv ToCC = Context.getDefaultCallingConvention(IsVariadic, !IsStatic);
+
+ if (CurCC == ToCC)
+ return;
+
+ // MS compiler ignores explicit calling convention attributes on structors. We
+ // should do the same.
+ if (Context.getTargetInfo().getCXXABI().isMicrosoft() && IsCtorOrDtor) {
+ // Issue a warning on ignored calling convention -- except of __stdcall.
+ // Again, this is what MS compiler does.
+ if (CurCC != CC_X86StdCall)
+ Diag(Loc, diag::warn_cconv_structors)
+ << FunctionType::getNameForCallConv(CurCC);
+ // Default adjustment.
+ } else {
+ // Only adjust types with the default convention. For example, on Windows
+ // we should adjust a __cdecl type to __thiscall for instance methods, and a
+ // __thiscall type to __cdecl for static methods.
+ CallingConv DefaultCC =
+ Context.getDefaultCallingConvention(IsVariadic, IsStatic);
+
+ if (CurCC != DefaultCC || DefaultCC == ToCC)
+ return;
+
+ if (hasExplicitCallingConv(T))
+ return;
+ }
+
+ FT = Context.adjustFunctionType(FT, FT->getExtInfo().withCallingConv(ToCC));
+ QualType Wrapped = Unwrapped.wrap(*this, FT);
+ T = Context.getAdjustedType(T, Wrapped);
+}
+
+/// HandleVectorSizeAttribute - this attribute is only applicable to integral
+/// and float scalars, although arrays, pointers, and function return values are
+/// allowed in conjunction with this construct. Aggregates with this attribute
+/// are invalid, even if they are of the same size as a corresponding scalar.
+/// The raw attribute should contain precisely 1 argument, the vector size for
+/// the variable, measured in bytes. If curType and rawAttr are well formed,
+/// this routine will return a new vector type.
+static void HandleVectorSizeAttr(QualType& CurType, const AttributeList &Attr,
+ Sema &S) {
+ // Check the attribute arguments.
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << Attr.getName() << 1;
+ Attr.setInvalid();
+ return;
+ }
+ Expr *sizeExpr = static_cast<Expr *>(Attr.getArgAsExpr(0));
+ llvm::APSInt vecSize(32);
+ if (sizeExpr->isTypeDependent() || sizeExpr->isValueDependent() ||
+ !sizeExpr->isIntegerConstantExpr(vecSize, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_type)
+ << Attr.getName() << AANT_ArgumentIntegerConstant
+ << sizeExpr->getSourceRange();
+ Attr.setInvalid();
+ return;
+ }
+ // The base type must be integer (not Boolean or enumeration) or float, and
+ // can't already be a vector.
+ if (!CurType->isBuiltinType() || CurType->isBooleanType() ||
+ (!CurType->isIntegerType() && !CurType->isRealFloatingType())) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_invalid_vector_type) << CurType;
+ Attr.setInvalid();
+ return;
+ }
+ unsigned typeSize = static_cast<unsigned>(S.Context.getTypeSize(CurType));
+ // vecSize is specified in bytes - convert to bits.
+ unsigned vectorSize = static_cast<unsigned>(vecSize.getZExtValue() * 8);
+
+ // the vector size needs to be an integral multiple of the type size.
+ if (vectorSize % typeSize) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_invalid_size)
+ << sizeExpr->getSourceRange();
+ Attr.setInvalid();
+ return;
+ }
+ if (VectorType::isVectorSizeTooLarge(vectorSize / typeSize)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_size_too_large)
+ << sizeExpr->getSourceRange();
+ Attr.setInvalid();
+ return;
+ }
+ if (vectorSize == 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_zero_size)
+ << sizeExpr->getSourceRange();
+ Attr.setInvalid();
+ return;
+ }
+
+ // Success! Instantiate the vector type, the number of elements is > 0, and
+ // not required to be a power of 2, unlike GCC.
+ CurType = S.Context.getVectorType(CurType, vectorSize/typeSize,
+ VectorType::GenericVector);
+}
+
+/// \brief Process the OpenCL-like ext_vector_type attribute when it occurs on
+/// a type.
+static void HandleExtVectorTypeAttr(QualType &CurType,
+ const AttributeList &Attr,
+ Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << Attr.getName() << 1;
+ return;
+ }
+
+ Expr *sizeExpr;
+
+ // Special case where the argument is a template id.
+ if (Attr.isArgIdent(0)) {
+ CXXScopeSpec SS;
+ SourceLocation TemplateKWLoc;
+ UnqualifiedId id;
+ id.setIdentifier(Attr.getArgAsIdent(0)->Ident, Attr.getLoc());
+
+ ExprResult Size = S.ActOnIdExpression(S.getCurScope(), SS, TemplateKWLoc,
+ id, false, false);
+ if (Size.isInvalid())
+ return;
+
+ sizeExpr = Size.get();
+ } else {
+ sizeExpr = Attr.getArgAsExpr(0);
+ }
+
+ // Create the vector type.
+ QualType T = S.BuildExtVectorType(CurType, sizeExpr, Attr.getLoc());
+ if (!T.isNull())
+ CurType = T;
+}
+
+static bool isPermittedNeonBaseType(QualType &Ty,
+ VectorType::VectorKind VecKind, Sema &S) {
+ const BuiltinType *BTy = Ty->getAs<BuiltinType>();
+ if (!BTy)
+ return false;
+
+ llvm::Triple Triple = S.Context.getTargetInfo().getTriple();
+
+ // Signed poly is mathematically wrong, but has been baked into some ABIs by
+ // now.
+ bool IsPolyUnsigned = Triple.getArch() == llvm::Triple::aarch64 ||
+ Triple.getArch() == llvm::Triple::aarch64_be;
+ if (VecKind == VectorType::NeonPolyVector) {
+ if (IsPolyUnsigned) {
+ // AArch64 polynomial vectors are unsigned and support poly64.
+ return BTy->getKind() == BuiltinType::UChar ||
+ BTy->getKind() == BuiltinType::UShort ||
+ BTy->getKind() == BuiltinType::ULong ||
+ BTy->getKind() == BuiltinType::ULongLong;
+ } else {
+ // AArch32 polynomial vector are signed.
+ return BTy->getKind() == BuiltinType::SChar ||
+ BTy->getKind() == BuiltinType::Short;
+ }
+ }
+
+ // Non-polynomial vector types: the usual suspects are allowed, as well as
+ // float64_t on AArch64.
+ bool Is64Bit = Triple.getArch() == llvm::Triple::aarch64 ||
+ Triple.getArch() == llvm::Triple::aarch64_be;
+
+ if (Is64Bit && BTy->getKind() == BuiltinType::Double)
+ return true;
+
+ return BTy->getKind() == BuiltinType::SChar ||
+ BTy->getKind() == BuiltinType::UChar ||
+ BTy->getKind() == BuiltinType::Short ||
+ BTy->getKind() == BuiltinType::UShort ||
+ BTy->getKind() == BuiltinType::Int ||
+ BTy->getKind() == BuiltinType::UInt ||
+ BTy->getKind() == BuiltinType::Long ||
+ BTy->getKind() == BuiltinType::ULong ||
+ BTy->getKind() == BuiltinType::LongLong ||
+ BTy->getKind() == BuiltinType::ULongLong ||
+ BTy->getKind() == BuiltinType::Float ||
+ BTy->getKind() == BuiltinType::Half;
+}
+
+/// HandleNeonVectorTypeAttr - The "neon_vector_type" and
+/// "neon_polyvector_type" attributes are used to create vector types that
+/// are mangled according to ARM's ABI. Otherwise, these types are identical
+/// to those created with the "vector_size" attribute. Unlike "vector_size"
+/// the argument to these Neon attributes is the number of vector elements,
+/// not the vector size in bytes. The vector width and element type must
+/// match one of the standard Neon vector types.
+static void HandleNeonVectorTypeAttr(QualType& CurType,
+ const AttributeList &Attr, Sema &S,
+ VectorType::VectorKind VecKind) {
+ // Target must have NEON
+ if (!S.Context.getTargetInfo().hasFeature("neon")) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_unsupported) << Attr.getName();
+ Attr.setInvalid();
+ return;
+ }
+ // Check the attribute arguments.
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << Attr.getName() << 1;
+ Attr.setInvalid();
+ return;
+ }
+ // The number of elements must be an ICE.
+ Expr *numEltsExpr = static_cast<Expr *>(Attr.getArgAsExpr(0));
+ llvm::APSInt numEltsInt(32);
+ if (numEltsExpr->isTypeDependent() || numEltsExpr->isValueDependent() ||
+ !numEltsExpr->isIntegerConstantExpr(numEltsInt, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_type)
+ << Attr.getName() << AANT_ArgumentIntegerConstant
+ << numEltsExpr->getSourceRange();
+ Attr.setInvalid();
+ return;
+ }
+ // Only certain element types are supported for Neon vectors.
+ if (!isPermittedNeonBaseType(CurType, VecKind, S)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_invalid_vector_type) << CurType;
+ Attr.setInvalid();
+ return;
+ }
+
+ // The total size of the vector must be 64 or 128 bits.
+ unsigned typeSize = static_cast<unsigned>(S.Context.getTypeSize(CurType));
+ unsigned numElts = static_cast<unsigned>(numEltsInt.getZExtValue());
+ unsigned vecSize = typeSize * numElts;
+ if (vecSize != 64 && vecSize != 128) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_bad_neon_vector_size) << CurType;
+ Attr.setInvalid();
+ return;
+ }
+
+ CurType = S.Context.getVectorType(CurType, numElts, VecKind);
+}
+
+static void processTypeAttrs(TypeProcessingState &state, QualType &type,
+ TypeAttrLocation TAL, AttributeList *attrs) {
+ // Scan through and apply attributes to this type where it makes sense. Some
+ // attributes (such as __address_space__, __vector_size__, etc) apply to the
+ // type, but others can be present in the type specifiers even though they
+ // apply to the decl. Here we apply type attributes and ignore the rest.
+
+ bool hasOpenCLAddressSpace = false;
+ while (attrs) {
+ AttributeList &attr = *attrs;
+ attrs = attr.getNext(); // reset to the next here due to early loop continue
+ // stmts
+
+ // Skip attributes that were marked to be invalid.
+ if (attr.isInvalid())
+ continue;
+
+ if (attr.isCXX11Attribute()) {
+ // [[gnu::...]] attributes are treated as declaration attributes, so may
+ // not appertain to a DeclaratorChunk, even if we handle them as type
+ // attributes.
+ if (attr.getScopeName() && attr.getScopeName()->isStr("gnu")) {
+ if (TAL == TAL_DeclChunk) {
+ state.getSema().Diag(attr.getLoc(),
+ diag::warn_cxx11_gnu_attribute_on_type)
+ << attr.getName();
+ continue;
+ }
+ } else if (TAL != TAL_DeclChunk) {
+ // Otherwise, only consider type processing for a C++11 attribute if
+ // it's actually been applied to a type.
+ continue;
+ }
+ }
+
+ // If this is an attribute we can handle, do so now,
+ // otherwise, add it to the FnAttrs list for rechaining.
+ switch (attr.getKind()) {
+ default:
+ // A C++11 attribute on a declarator chunk must appertain to a type.
+ if (attr.isCXX11Attribute() && TAL == TAL_DeclChunk) {
+ state.getSema().Diag(attr.getLoc(), diag::err_attribute_not_type_attr)
+ << attr.getName();
+ attr.setUsedAsTypeAttr();
+ }
+ break;
+
+ case AttributeList::UnknownAttribute:
+ if (attr.isCXX11Attribute() && TAL == TAL_DeclChunk)
+ state.getSema().Diag(attr.getLoc(),
+ diag::warn_unknown_attribute_ignored)
+ << attr.getName();
+ break;
+
+ case AttributeList::IgnoredAttribute:
+ break;
+
+ case AttributeList::AT_MayAlias:
+ // FIXME: This attribute needs to actually be handled, but if we ignore
+ // it it breaks large amounts of Linux software.
+ attr.setUsedAsTypeAttr();
+ break;
+ case AttributeList::AT_OpenCLPrivateAddressSpace:
+ case AttributeList::AT_OpenCLGlobalAddressSpace:
+ case AttributeList::AT_OpenCLLocalAddressSpace:
+ case AttributeList::AT_OpenCLConstantAddressSpace:
+ case AttributeList::AT_OpenCLGenericAddressSpace:
+ case AttributeList::AT_AddressSpace:
+ HandleAddressSpaceTypeAttribute(type, attr, state.getSema());
+ attr.setUsedAsTypeAttr();
+ hasOpenCLAddressSpace = true;
+ break;
+ OBJC_POINTER_TYPE_ATTRS_CASELIST:
+ if (!handleObjCPointerTypeAttr(state, attr, type))
+ distributeObjCPointerTypeAttr(state, attr, type);
+ attr.setUsedAsTypeAttr();
+ break;
+ case AttributeList::AT_VectorSize:
+ HandleVectorSizeAttr(type, attr, state.getSema());
+ attr.setUsedAsTypeAttr();
+ break;
+ case AttributeList::AT_ExtVectorType:
+ HandleExtVectorTypeAttr(type, attr, state.getSema());
+ attr.setUsedAsTypeAttr();
+ break;
+ case AttributeList::AT_NeonVectorType:
+ HandleNeonVectorTypeAttr(type, attr, state.getSema(),
+ VectorType::NeonVector);
+ attr.setUsedAsTypeAttr();
+ break;
+ case AttributeList::AT_NeonPolyVectorType:
+ HandleNeonVectorTypeAttr(type, attr, state.getSema(),
+ VectorType::NeonPolyVector);
+ attr.setUsedAsTypeAttr();
+ break;
+ case AttributeList::AT_OpenCLImageAccess:
+ // FIXME: there should be some type checking happening here, I would
+ // imagine, but the original handler's checking was entirely superfluous.
+ attr.setUsedAsTypeAttr();
+ break;
+
+ MS_TYPE_ATTRS_CASELIST:
+ if (!handleMSPointerTypeQualifierAttr(state, attr, type))
+ attr.setUsedAsTypeAttr();
+ break;
+
+
+ NULLABILITY_TYPE_ATTRS_CASELIST:
+ // Either add nullability here or try to distribute it. We
+ // don't want to distribute the nullability specifier past any
+ // dependent type, because that complicates the user model.
+ if (type->canHaveNullability() || type->isDependentType() ||
+ !distributeNullabilityTypeAttr(state, type, attr)) {
+ if (state.getSema().checkNullabilityTypeSpecifier(
+ type,
+ mapNullabilityAttrKind(attr.getKind()),
+ attr.getLoc(),
+ attr.isContextSensitiveKeywordAttribute())) {
+ attr.setInvalid();
+ }
+
+ attr.setUsedAsTypeAttr();
+ }
+ break;
+
+ case AttributeList::AT_ObjCKindOf:
+ // '__kindof' must be part of the decl-specifiers.
+ switch (TAL) {
+ case TAL_DeclSpec:
+ break;
+
+ case TAL_DeclChunk:
+ case TAL_DeclName:
+ state.getSema().Diag(attr.getLoc(),
+ diag::err_objc_kindof_wrong_position)
+ << FixItHint::CreateRemoval(attr.getLoc())
+ << FixItHint::CreateInsertion(
+ state.getDeclarator().getDeclSpec().getLocStart(), "__kindof ");
+ break;
+ }
+
+ // Apply it regardless.
+ if (state.getSema().checkObjCKindOfType(type, attr.getLoc()))
+ attr.setInvalid();
+ attr.setUsedAsTypeAttr();
+ break;
+
+ case AttributeList::AT_NSReturnsRetained:
+ if (!state.getSema().getLangOpts().ObjCAutoRefCount)
+ break;
+ // fallthrough into the function attrs
+
+ FUNCTION_TYPE_ATTRS_CASELIST:
+ attr.setUsedAsTypeAttr();
+
+ // Never process function type attributes as part of the
+ // declaration-specifiers.
+ if (TAL == TAL_DeclSpec)
+ distributeFunctionTypeAttrFromDeclSpec(state, attr, type);
+
+ // Otherwise, handle the possible delays.
+ else if (!handleFunctionTypeAttr(state, attr, type))
+ distributeFunctionTypeAttr(state, attr, type);
+ break;
+ }
+ }
+
+ // If address space is not set, OpenCL 2.0 defines non private default
+ // address spaces for some cases:
+ // OpenCL 2.0, section 6.5:
+ // The address space for a variable at program scope or a static variable
+ // inside a function can either be __global or __constant, but defaults to
+ // __global if not specified.
+ // (...)
+ // Pointers that are declared without pointing to a named address space point
+ // to the generic address space.
+ if (state.getSema().getLangOpts().OpenCLVersion >= 200 &&
+ !hasOpenCLAddressSpace && type.getAddressSpace() == 0 &&
+ (TAL == TAL_DeclSpec || TAL == TAL_DeclChunk)) {
+ Declarator &D = state.getDeclarator();
+ if (state.getCurrentChunkIndex() > 0 &&
+ D.getTypeObject(state.getCurrentChunkIndex() - 1).Kind ==
+ DeclaratorChunk::Pointer) {
+ type = state.getSema().Context.getAddrSpaceQualType(
+ type, LangAS::opencl_generic);
+ } else if (state.getCurrentChunkIndex() == 0 &&
+ D.getContext() == Declarator::FileContext &&
+ !D.isFunctionDeclarator() && !D.isFunctionDefinition() &&
+ D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_typedef &&
+ !type->isSamplerT())
+ type = state.getSema().Context.getAddrSpaceQualType(
+ type, LangAS::opencl_global);
+ else if (state.getCurrentChunkIndex() == 0 &&
+ D.getContext() == Declarator::BlockContext &&
+ D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static)
+ type = state.getSema().Context.getAddrSpaceQualType(
+ type, LangAS::opencl_global);
+ }
+}
+
+void Sema::completeExprArrayBound(Expr *E) {
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->IgnoreParens())) {
+ if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl())) {
+ if (isTemplateInstantiation(Var->getTemplateSpecializationKind())) {
+ SourceLocation PointOfInstantiation = E->getExprLoc();
+
+ if (MemberSpecializationInfo *MSInfo =
+ Var->getMemberSpecializationInfo()) {
+ // If we don't already have a point of instantiation, this is it.
+ if (MSInfo->getPointOfInstantiation().isInvalid()) {
+ MSInfo->setPointOfInstantiation(PointOfInstantiation);
+
+ // This is a modification of an existing AST node. Notify
+ // listeners.
+ if (ASTMutationListener *L = getASTMutationListener())
+ L->StaticDataMemberInstantiated(Var);
+ }
+ } else {
+ VarTemplateSpecializationDecl *VarSpec =
+ cast<VarTemplateSpecializationDecl>(Var);
+ if (VarSpec->getPointOfInstantiation().isInvalid())
+ VarSpec->setPointOfInstantiation(PointOfInstantiation);
+ }
+
+ InstantiateVariableDefinition(PointOfInstantiation, Var);
+
+ // Update the type to the newly instantiated definition's type both
+ // here and within the expression.
+ if (VarDecl *Def = Var->getDefinition()) {
+ DRE->setDecl(Def);
+ QualType T = Def->getType();
+ DRE->setType(T);
+ // FIXME: Update the type on all intervening expressions.
+ E->setType(T);
+ }
+
+ // We still go on to try to complete the type independently, as it
+ // may also require instantiations or diagnostics if it remains
+ // incomplete.
+ }
+ }
+ }
+}
+
+/// \brief Ensure that the type of the given expression is complete.
+///
+/// This routine checks whether the expression \p E has a complete type. If the
+/// expression refers to an instantiable construct, that instantiation is
+/// performed as needed to complete its type. Furthermore
+/// Sema::RequireCompleteType is called for the expression's type (or in the
+/// case of a reference type, the referred-to type).
+///
+/// \param E The expression whose type is required to be complete.
+/// \param Diagnoser The object that will emit a diagnostic if the type is
+/// incomplete.
+///
+/// \returns \c true if the type of \p E is incomplete and diagnosed, \c false
+/// otherwise.
+bool Sema::RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser) {
+ QualType T = E->getType();
+
+ // Incomplete array types may be completed by the initializer attached to
+ // their definitions. For static data members of class templates and for
+ // variable templates, we need to instantiate the definition to get this
+ // initializer and complete the type.
+ if (T->isIncompleteArrayType()) {
+ completeExprArrayBound(E);
+ T = E->getType();
+ }
+
+ // FIXME: Are there other cases which require instantiating something other
+ // than the type to complete the type of an expression?
+
+ return RequireCompleteType(E->getExprLoc(), T, Diagnoser);
+}
+
+bool Sema::RequireCompleteExprType(Expr *E, unsigned DiagID) {
+ BoundTypeDiagnoser<> Diagnoser(DiagID);
+ return RequireCompleteExprType(E, Diagnoser);
+}
+
+/// @brief Ensure that the type T is a complete type.
+///
+/// This routine checks whether the type @p T is complete in any
+/// context where a complete type is required. If @p T is a complete
+/// type, returns false. If @p T is a class template specialization,
+/// this routine then attempts to perform class template
+/// instantiation. If instantiation fails, or if @p T is incomplete
+/// and cannot be completed, issues the diagnostic @p diag (giving it
+/// the type @p T) and returns true.
+///
+/// @param Loc The location in the source that the incomplete type
+/// diagnostic should refer to.
+///
+/// @param T The type that this routine is examining for completeness.
+///
+/// @returns @c true if @p T is incomplete and a diagnostic was emitted,
+/// @c false otherwise.
+bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
+ TypeDiagnoser &Diagnoser) {
+ if (RequireCompleteTypeImpl(Loc, T, &Diagnoser))
+ return true;
+ if (const TagType *Tag = T->getAs<TagType>()) {
+ if (!Tag->getDecl()->isCompleteDefinitionRequired()) {
+ Tag->getDecl()->setCompleteDefinitionRequired();
+ Consumer.HandleTagDeclRequiredDefinition(Tag->getDecl());
+ }
+ }
+ return false;
+}
+
+/// \brief Determine whether there is any declaration of \p D that was ever a
+/// definition (perhaps before module merging) and is currently visible.
+/// \param D The definition of the entity.
+/// \param Suggested Filled in with the declaration that should be made visible
+/// in order to provide a definition of this entity.
+/// \param OnlyNeedComplete If \c true, we only need the type to be complete,
+/// not defined. This only matters for enums with a fixed underlying
+/// type, since in all other cases, a type is complete if and only if it
+/// is defined.
+bool Sema::hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
+ bool OnlyNeedComplete) {
+ // Easy case: if we don't have modules, all declarations are visible.
+ if (!getLangOpts().Modules && !getLangOpts().ModulesLocalVisibility)
+ return true;
+
+ // If this definition was instantiated from a template, map back to the
+ // pattern from which it was instantiated.
+ if (isa<TagDecl>(D) && cast<TagDecl>(D)->isBeingDefined()) {
+ // We're in the middle of defining it; this definition should be treated
+ // as visible.
+ return true;
+ } else if (auto *RD = dyn_cast<CXXRecordDecl>(D)) {
+ if (auto *Pattern = RD->getTemplateInstantiationPattern())
+ RD = Pattern;
+ D = RD->getDefinition();
+ } else if (auto *ED = dyn_cast<EnumDecl>(D)) {
+ while (auto *NewED = ED->getInstantiatedFromMemberEnum())
+ ED = NewED;
+ if (OnlyNeedComplete && ED->isFixed()) {
+ // If the enum has a fixed underlying type, and we're only looking for a
+ // complete type (not a definition), any visible declaration of it will
+ // do.
+ *Suggested = nullptr;
+ for (auto *Redecl : ED->redecls()) {
+ if (isVisible(Redecl))
+ return true;
+ if (Redecl->isThisDeclarationADefinition() ||
+ (Redecl->isCanonicalDecl() && !*Suggested))
+ *Suggested = Redecl;
+ }
+ return false;
+ }
+ D = ED->getDefinition();
+ }
+ assert(D && "missing definition for pattern of instantiated definition");
+
+ *Suggested = D;
+ if (isVisible(D))
+ return true;
+
+ // The external source may have additional definitions of this type that are
+ // visible, so complete the redeclaration chain now and ask again.
+ if (auto *Source = Context.getExternalSource()) {
+ Source->CompleteRedeclChain(D);
+ return isVisible(D);
+ }
+
+ return false;
+}
+
+/// Locks in the inheritance model for the given class and all of its bases.
+static void assignInheritanceModel(Sema &S, CXXRecordDecl *RD) {
+ RD = RD->getMostRecentDecl();
+ if (!RD->hasAttr<MSInheritanceAttr>()) {
+ MSInheritanceAttr::Spelling IM;
+
+ switch (S.MSPointerToMemberRepresentationMethod) {
+ case LangOptions::PPTMK_BestCase:
+ IM = RD->calculateInheritanceModel();
+ break;
+ case LangOptions::PPTMK_FullGeneralitySingleInheritance:
+ IM = MSInheritanceAttr::Keyword_single_inheritance;
+ break;
+ case LangOptions::PPTMK_FullGeneralityMultipleInheritance:
+ IM = MSInheritanceAttr::Keyword_multiple_inheritance;
+ break;
+ case LangOptions::PPTMK_FullGeneralityVirtualInheritance:
+ IM = MSInheritanceAttr::Keyword_unspecified_inheritance;
+ break;
+ }
+
+ RD->addAttr(MSInheritanceAttr::CreateImplicit(
+ S.getASTContext(), IM,
+ /*BestCase=*/S.MSPointerToMemberRepresentationMethod ==
+ LangOptions::PPTMK_BestCase,
+ S.ImplicitMSInheritanceAttrLoc.isValid()
+ ? S.ImplicitMSInheritanceAttrLoc
+ : RD->getSourceRange()));
+ }
+}
+
+/// \brief The implementation of RequireCompleteType
+bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
+ TypeDiagnoser *Diagnoser) {
+ // FIXME: Add this assertion to make sure we always get instantiation points.
+ // assert(!Loc.isInvalid() && "Invalid location in RequireCompleteType");
+ // FIXME: Add this assertion to help us flush out problems with
+ // checking for dependent types and type-dependent expressions.
+ //
+ // assert(!T->isDependentType() &&
+ // "Can't ask whether a dependent type is complete");
+
+ // We lock in the inheritance model once somebody has asked us to ensure
+ // that a pointer-to-member type is complete.
+ if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+ if (const MemberPointerType *MPTy = T->getAs<MemberPointerType>()) {
+ if (!MPTy->getClass()->isDependentType()) {
+ (void)isCompleteType(Loc, QualType(MPTy->getClass(), 0));
+ assignInheritanceModel(*this, MPTy->getMostRecentCXXRecordDecl());
+ }
+ }
+ }
+
+ // If we have a complete type, we're done.
+ NamedDecl *Def = nullptr;
+ if (!T->isIncompleteType(&Def)) {
+ // If we know about the definition but it is not visible, complain.
+ NamedDecl *SuggestedDef = nullptr;
+ if (Def &&
+ !hasVisibleDefinition(Def, &SuggestedDef, /*OnlyNeedComplete*/true)) {
+ // If the user is going to see an error here, recover by making the
+ // definition visible.
+ bool TreatAsComplete = Diagnoser && !isSFINAEContext();
+ if (Diagnoser)
+ diagnoseMissingImport(Loc, SuggestedDef, /*NeedDefinition*/true,
+ /*Recover*/TreatAsComplete);
+ return !TreatAsComplete;
+ }
+
+ return false;
+ }
+
+ const TagType *Tag = T->getAs<TagType>();
+ const ObjCInterfaceType *IFace = T->getAs<ObjCInterfaceType>();
+
+ // If there's an unimported definition of this type in a module (for
+ // instance, because we forward declared it, then imported the definition),
+ // import that definition now.
+ //
+ // FIXME: What about other cases where an import extends a redeclaration
+ // chain for a declaration that can be accessed through a mechanism other
+ // than name lookup (eg, referenced in a template, or a variable whose type
+ // could be completed by the module)?
+ //
+ // FIXME: Should we map through to the base array element type before
+ // checking for a tag type?
+ if (Tag || IFace) {
+ NamedDecl *D =
+ Tag ? static_cast<NamedDecl *>(Tag->getDecl()) : IFace->getDecl();
+
+ // Avoid diagnosing invalid decls as incomplete.
+ if (D->isInvalidDecl())
+ return true;
+
+ // Give the external AST source a chance to complete the type.
+ if (auto *Source = Context.getExternalSource()) {
+ if (Tag)
+ Source->CompleteType(Tag->getDecl());
+ else
+ Source->CompleteType(IFace->getDecl());
+
+ // If the external source completed the type, go through the motions
+ // again to ensure we're allowed to use the completed type.
+ if (!T->isIncompleteType())
+ return RequireCompleteTypeImpl(Loc, T, Diagnoser);
+ }
+ }
+
+ // If we have a class template specialization or a class member of a
+ // class template specialization, or an array with known size of such,
+ // try to instantiate it.
+ QualType MaybeTemplate = T;
+ while (const ConstantArrayType *Array
+ = Context.getAsConstantArrayType(MaybeTemplate))
+ MaybeTemplate = Array->getElementType();
+ if (const RecordType *Record = MaybeTemplate->getAs<RecordType>()) {
+ bool Instantiated = false;
+ bool Diagnosed = false;
+ if (ClassTemplateSpecializationDecl *ClassTemplateSpec
+ = dyn_cast<ClassTemplateSpecializationDecl>(Record->getDecl())) {
+ if (ClassTemplateSpec->getSpecializationKind() == TSK_Undeclared) {
+ Diagnosed = InstantiateClassTemplateSpecialization(
+ Loc, ClassTemplateSpec, TSK_ImplicitInstantiation,
+ /*Complain=*/Diagnoser);
+ Instantiated = true;
+ }
+ } else if (CXXRecordDecl *Rec
+ = dyn_cast<CXXRecordDecl>(Record->getDecl())) {
+ CXXRecordDecl *Pattern = Rec->getInstantiatedFromMemberClass();
+ if (!Rec->isBeingDefined() && Pattern) {
+ MemberSpecializationInfo *MSI = Rec->getMemberSpecializationInfo();
+ assert(MSI && "Missing member specialization information?");
+ // This record was instantiated from a class within a template.
+ if (MSI->getTemplateSpecializationKind() !=
+ TSK_ExplicitSpecialization) {
+ Diagnosed = InstantiateClass(Loc, Rec, Pattern,
+ getTemplateInstantiationArgs(Rec),
+ TSK_ImplicitInstantiation,
+ /*Complain=*/Diagnoser);
+ Instantiated = true;
+ }
+ }
+ }
+
+ if (Instantiated) {
+ // Instantiate* might have already complained that the template is not
+ // defined, if we asked it to.
+ if (Diagnoser && Diagnosed)
+ return true;
+ // If we instantiated a definition, check that it's usable, even if
+ // instantiation produced an error, so that repeated calls to this
+ // function give consistent answers.
+ if (!T->isIncompleteType())
+ return RequireCompleteTypeImpl(Loc, T, Diagnoser);
+ }
+ }
+
+ if (!Diagnoser)
+ return true;
+
+ // We have an incomplete type. Produce a diagnostic.
+ if (Ident___float128 &&
+ T == Context.getTypeDeclType(Context.getFloat128StubType())) {
+ Diag(Loc, diag::err_typecheck_decl_incomplete_type___float128);
+ return true;
+ }
+
+ Diagnoser->diagnose(*this, Loc, T);
+
+ // If the type was a forward declaration of a class/struct/union
+ // type, produce a note.
+ if (Tag && !Tag->getDecl()->isInvalidDecl())
+ Diag(Tag->getDecl()->getLocation(),
+ Tag->isBeingDefined() ? diag::note_type_being_defined
+ : diag::note_forward_declaration)
+ << QualType(Tag, 0);
+
+ // If the Objective-C class was a forward declaration, produce a note.
+ if (IFace && !IFace->getDecl()->isInvalidDecl())
+ Diag(IFace->getDecl()->getLocation(), diag::note_forward_class);
+
+ // If we have external information that we can use to suggest a fix,
+ // produce a note.
+ if (ExternalSource)
+ ExternalSource->MaybeDiagnoseMissingCompleteType(Loc, T);
+
+ return true;
+}
+
+bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
+ unsigned DiagID) {
+ BoundTypeDiagnoser<> Diagnoser(DiagID);
+ return RequireCompleteType(Loc, T, Diagnoser);
+}
+
+/// \brief Get diagnostic %select index for tag kind for
+/// literal type diagnostic message.
+/// WARNING: Indexes apply to particular diagnostics only!
+///
+/// \returns diagnostic %select index.
+static unsigned getLiteralDiagFromTagKind(TagTypeKind Tag) {
+ switch (Tag) {
+ case TTK_Struct: return 0;
+ case TTK_Interface: return 1;
+ case TTK_Class: return 2;
+ default: llvm_unreachable("Invalid tag kind for literal type diagnostic!");
+ }
+}
+
+/// @brief Ensure that the type T is a literal type.
+///
+/// This routine checks whether the type @p T is a literal type. If @p T is an
+/// incomplete type, an attempt is made to complete it. If @p T is a literal
+/// type, or @p AllowIncompleteType is true and @p T is an incomplete type,
+/// returns false. Otherwise, this routine issues the diagnostic @p PD (giving
+/// it the type @p T), along with notes explaining why the type is not a
+/// literal type, and returns true.
+///
+/// @param Loc The location in the source that the non-literal type
+/// diagnostic should refer to.
+///
+/// @param T The type that this routine is examining for literalness.
+///
+/// @param Diagnoser Emits a diagnostic if T is not a literal type.
+///
+/// @returns @c true if @p T is not a literal type and a diagnostic was emitted,
+/// @c false otherwise.
+bool Sema::RequireLiteralType(SourceLocation Loc, QualType T,
+ TypeDiagnoser &Diagnoser) {
+ assert(!T->isDependentType() && "type should not be dependent");
+
+ QualType ElemType = Context.getBaseElementType(T);
+ if ((isCompleteType(Loc, ElemType) || ElemType->isVoidType()) &&
+ T->isLiteralType(Context))
+ return false;
+
+ Diagnoser.diagnose(*this, Loc, T);
+
+ if (T->isVariableArrayType())
+ return true;
+
+ const RecordType *RT = ElemType->getAs<RecordType>();
+ if (!RT)
+ return true;
+
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+
+ // A partially-defined class type can't be a literal type, because a literal
+ // class type must have a trivial destructor (which can't be checked until
+ // the class definition is complete).
+ if (RequireCompleteType(Loc, ElemType, diag::note_non_literal_incomplete, T))
+ return true;
+
+ // If the class has virtual base classes, then it's not an aggregate, and
+ // cannot have any constexpr constructors or a trivial default constructor,
+ // so is non-literal. This is better to diagnose than the resulting absence
+ // of constexpr constructors.
+ if (RD->getNumVBases()) {
+ Diag(RD->getLocation(), diag::note_non_literal_virtual_base)
+ << getLiteralDiagFromTagKind(RD->getTagKind()) << RD->getNumVBases();
+ for (const auto &I : RD->vbases())
+ Diag(I.getLocStart(), diag::note_constexpr_virtual_base_here)
+ << I.getSourceRange();
+ } else if (!RD->isAggregate() && !RD->hasConstexprNonCopyMoveConstructor() &&
+ !RD->hasTrivialDefaultConstructor()) {
+ Diag(RD->getLocation(), diag::note_non_literal_no_constexpr_ctors) << RD;
+ } else if (RD->hasNonLiteralTypeFieldsOrBases()) {
+ for (const auto &I : RD->bases()) {
+ if (!I.getType()->isLiteralType(Context)) {
+ Diag(I.getLocStart(),
+ diag::note_non_literal_base_class)
+ << RD << I.getType() << I.getSourceRange();
+ return true;
+ }
+ }
+ for (const auto *I : RD->fields()) {
+ if (!I->getType()->isLiteralType(Context) ||
+ I->getType().isVolatileQualified()) {
+ Diag(I->getLocation(), diag::note_non_literal_field)
+ << RD << I << I->getType()
+ << I->getType().isVolatileQualified();
+ return true;
+ }
+ }
+ } else if (!RD->hasTrivialDestructor()) {
+ // All fields and bases are of literal types, so have trivial destructors.
+ // If this class's destructor is non-trivial it must be user-declared.
+ CXXDestructorDecl *Dtor = RD->getDestructor();
+ assert(Dtor && "class has literal fields and bases but no dtor?");
+ if (!Dtor)
+ return true;
+
+ Diag(Dtor->getLocation(), Dtor->isUserProvided() ?
+ diag::note_non_literal_user_provided_dtor :
+ diag::note_non_literal_nontrivial_dtor) << RD;
+ if (!Dtor->isUserProvided())
+ SpecialMemberIsTrivial(Dtor, CXXDestructor, /*Diagnose*/true);
+ }
+
+ return true;
+}
+
+bool Sema::RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID) {
+ BoundTypeDiagnoser<> Diagnoser(DiagID);
+ return RequireLiteralType(Loc, T, Diagnoser);
+}
+
+/// \brief Retrieve a version of the type 'T' that is elaborated by Keyword
+/// and qualified by the nested-name-specifier contained in SS.
+QualType Sema::getElaboratedType(ElaboratedTypeKeyword Keyword,
+ const CXXScopeSpec &SS, QualType T) {
+ if (T.isNull())
+ return T;
+ NestedNameSpecifier *NNS;
+ if (SS.isValid())
+ NNS = SS.getScopeRep();
+ else {
+ if (Keyword == ETK_None)
+ return T;
+ NNS = nullptr;
+ }
+ return Context.getElaboratedType(Keyword, NNS, T);
+}
+
+QualType Sema::BuildTypeofExprType(Expr *E, SourceLocation Loc) {
+ ExprResult ER = CheckPlaceholderExpr(E);
+ if (ER.isInvalid()) return QualType();
+ E = ER.get();
+
+ if (!getLangOpts().CPlusPlus && E->refersToBitField())
+ Diag(E->getExprLoc(), diag::err_sizeof_alignof_typeof_bitfield) << 2;
+
+ if (!E->isTypeDependent()) {
+ QualType T = E->getType();
+ if (const TagType *TT = T->getAs<TagType>())
+ DiagnoseUseOfDecl(TT->getDecl(), E->getExprLoc());
+ }
+ return Context.getTypeOfExprType(E);
+}
+
+/// getDecltypeForExpr - Given an expr, will return the decltype for
+/// that expression, according to the rules in C++11
+/// [dcl.type.simple]p4 and C++11 [expr.lambda.prim]p18.
+static QualType getDecltypeForExpr(Sema &S, Expr *E) {
+ if (E->isTypeDependent())
+ return S.Context.DependentTy;
+
+ // C++11 [dcl.type.simple]p4:
+ // The type denoted by decltype(e) is defined as follows:
+ //
+ // - if e is an unparenthesized id-expression or an unparenthesized class
+ // member access (5.2.5), decltype(e) is the type of the entity named
+ // by e. If there is no such entity, or if e names a set of overloaded
+ // functions, the program is ill-formed;
+ //
+ // We apply the same rules for Objective-C ivar and property references.
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ if (const ValueDecl *VD = dyn_cast<ValueDecl>(DRE->getDecl()))
+ return VD->getType();
+ } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
+ if (const FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()))
+ return FD->getType();
+ } else if (const ObjCIvarRefExpr *IR = dyn_cast<ObjCIvarRefExpr>(E)) {
+ return IR->getDecl()->getType();
+ } else if (const ObjCPropertyRefExpr *PR = dyn_cast<ObjCPropertyRefExpr>(E)) {
+ if (PR->isExplicitProperty())
+ return PR->getExplicitProperty()->getType();
+ } else if (auto *PE = dyn_cast<PredefinedExpr>(E)) {
+ return PE->getType();
+ }
+
+ // C++11 [expr.lambda.prim]p18:
+ // Every occurrence of decltype((x)) where x is a possibly
+ // parenthesized id-expression that names an entity of automatic
+ // storage duration is treated as if x were transformed into an
+ // access to a corresponding data member of the closure type that
+ // would have been declared if x were an odr-use of the denoted
+ // entity.
+ using namespace sema;
+ if (S.getCurLambda()) {
+ if (isa<ParenExpr>(E)) {
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->IgnoreParens())) {
+ if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl())) {
+ QualType T = S.getCapturedDeclRefType(Var, DRE->getLocation());
+ if (!T.isNull())
+ return S.Context.getLValueReferenceType(T);
+ }
+ }
+ }
+ }
+
+
+ // C++11 [dcl.type.simple]p4:
+ // [...]
+ QualType T = E->getType();
+ switch (E->getValueKind()) {
+ // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the
+ // type of e;
+ case VK_XValue: T = S.Context.getRValueReferenceType(T); break;
+ // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the
+ // type of e;
+ case VK_LValue: T = S.Context.getLValueReferenceType(T); break;
+ // - otherwise, decltype(e) is the type of e.
+ case VK_RValue: break;
+ }
+
+ return T;
+}
+
+QualType Sema::BuildDecltypeType(Expr *E, SourceLocation Loc,
+ bool AsUnevaluated) {
+ ExprResult ER = CheckPlaceholderExpr(E);
+ if (ER.isInvalid()) return QualType();
+ E = ER.get();
+
+ if (AsUnevaluated && ActiveTemplateInstantiations.empty() &&
+ E->HasSideEffects(Context, false)) {
+ // The expression operand for decltype is in an unevaluated expression
+ // context, so side effects could result in unintended consequences.
+ Diag(E->getExprLoc(), diag::warn_side_effects_unevaluated_context);
+ }
+
+ return Context.getDecltypeType(E, getDecltypeForExpr(*this, E));
+}
+
+QualType Sema::BuildUnaryTransformType(QualType BaseType,
+ UnaryTransformType::UTTKind UKind,
+ SourceLocation Loc) {
+ switch (UKind) {
+ case UnaryTransformType::EnumUnderlyingType:
+ if (!BaseType->isDependentType() && !BaseType->isEnumeralType()) {
+ Diag(Loc, diag::err_only_enums_have_underlying_types);
+ return QualType();
+ } else {
+ QualType Underlying = BaseType;
+ if (!BaseType->isDependentType()) {
+ // The enum could be incomplete if we're parsing its definition or
+ // recovering from an error.
+ NamedDecl *FwdDecl = nullptr;
+ if (BaseType->isIncompleteType(&FwdDecl)) {
+ Diag(Loc, diag::err_underlying_type_of_incomplete_enum) << BaseType;
+ Diag(FwdDecl->getLocation(), diag::note_forward_declaration) << FwdDecl;
+ return QualType();
+ }
+
+ EnumDecl *ED = BaseType->getAs<EnumType>()->getDecl();
+ assert(ED && "EnumType has no EnumDecl");
+
+ DiagnoseUseOfDecl(ED, Loc);
+
+ Underlying = ED->getIntegerType();
+ assert(!Underlying.isNull());
+ }
+ return Context.getUnaryTransformType(BaseType, Underlying,
+ UnaryTransformType::EnumUnderlyingType);
+ }
+ }
+ llvm_unreachable("unknown unary transform type");
+}
+
+QualType Sema::BuildAtomicType(QualType T, SourceLocation Loc) {
+ if (!T->isDependentType()) {
+ // FIXME: It isn't entirely clear whether incomplete atomic types
+ // are allowed or not; for simplicity, ban them for the moment.
+ if (RequireCompleteType(Loc, T, diag::err_atomic_specifier_bad_type, 0))
+ return QualType();
+
+ int DisallowedKind = -1;
+ if (T->isArrayType())
+ DisallowedKind = 1;
+ else if (T->isFunctionType())
+ DisallowedKind = 2;
+ else if (T->isReferenceType())
+ DisallowedKind = 3;
+ else if (T->isAtomicType())
+ DisallowedKind = 4;
+ else if (T.hasQualifiers())
+ DisallowedKind = 5;
+ else if (!T.isTriviallyCopyableType(Context))
+ // Some other non-trivially-copyable type (probably a C++ class)
+ DisallowedKind = 6;
+
+ if (DisallowedKind != -1) {
+ Diag(Loc, diag::err_atomic_specifier_bad_type) << DisallowedKind << T;
+ return QualType();
+ }
+
+ // FIXME: Do we need any handling for ARC here?
+ }
+
+ // Build the pointer type.
+ return Context.getAtomicType(T);
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h b/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h
new file mode 100644
index 0000000..e0a9653
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h
@@ -0,0 +1,11603 @@
+//===------- TreeTransform.h - Semantic Tree Transformation -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===//
+//
+// This file implements a semantic tree transformation that takes a given
+// AST and rebuilds it, possibly transforming some nodes in the process.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_SEMA_TREETRANSFORM_H
+#define LLVM_CLANG_LIB_SEMA_TREETRANSFORM_H
+
+#include "TypeLocBuilder.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ExprOpenMP.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/AST/StmtOpenMP.h"
+#include "clang/Sema/Designator.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Ownership.h"
+#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Sema/SemaInternal.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+
+namespace clang {
+using namespace sema;
+
+/// \brief A semantic tree transformation that allows one to transform one
+/// abstract syntax tree into another.
+///
+/// A new tree transformation is defined by creating a new subclass \c X of
+/// \c TreeTransform<X> and then overriding certain operations to provide
+/// behavior specific to that transformation. For example, template
+/// instantiation is implemented as a tree transformation where the
+/// transformation of TemplateTypeParmType nodes involves substituting the
+/// template arguments for their corresponding template parameters; a similar
+/// transformation is performed for non-type template parameters and
+/// template template parameters.
+///
+/// This tree-transformation template uses static polymorphism to allow
+/// subclasses to customize any of its operations. Thus, a subclass can
+/// override any of the transformation or rebuild operators by providing an
+/// operation with the same signature as the default implementation. The
+/// overridding function should not be virtual.
+///
+/// Semantic tree transformations are split into two stages, either of which
+/// can be replaced by a subclass. The "transform" step transforms an AST node
+/// or the parts of an AST node using the various transformation functions,
+/// then passes the pieces on to the "rebuild" step, which constructs a new AST
+/// node of the appropriate kind from the pieces. The default transformation
+/// routines recursively transform the operands to composite AST nodes (e.g.,
+/// the pointee type of a PointerType node) and, if any of those operand nodes
+/// were changed by the transformation, invokes the rebuild operation to create
+/// a new AST node.
+///
+/// Subclasses can customize the transformation at various levels. The
+/// most coarse-grained transformations involve replacing TransformType(),
+/// TransformExpr(), TransformDecl(), TransformNestedNameSpecifierLoc(),
+/// TransformTemplateName(), or TransformTemplateArgument() with entirely
+/// new implementations.
+///
+/// For more fine-grained transformations, subclasses can replace any of the
+/// \c TransformXXX functions (where XXX is the name of an AST node, e.g.,
+/// PointerType, StmtExpr) to alter the transformation. As mentioned previously,
+/// replacing TransformTemplateTypeParmType() allows template instantiation
+/// to substitute template arguments for their corresponding template
+/// parameters. Additionally, subclasses can override the \c RebuildXXX
+/// functions to control how AST nodes are rebuilt when their operands change.
+/// By default, \c TreeTransform will invoke semantic analysis to rebuild
+/// AST nodes. However, certain other tree transformations (e.g, cloning) may
+/// be able to use more efficient rebuild steps.
+///
+/// There are a handful of other functions that can be overridden, allowing one
+/// to avoid traversing nodes that don't need any transformation
+/// (\c AlreadyTransformed()), force rebuilding AST nodes even when their
+/// operands have not changed (\c AlwaysRebuild()), and customize the
+/// default locations and entity names used for type-checking
+/// (\c getBaseLocation(), \c getBaseEntity()).
+template<typename Derived>
+class TreeTransform {
+ /// \brief Private RAII object that helps us forget and then re-remember
+ /// the template argument corresponding to a partially-substituted parameter
+ /// pack.
+ class ForgetPartiallySubstitutedPackRAII {
+ Derived &Self;
+ TemplateArgument Old;
+
+ public:
+ ForgetPartiallySubstitutedPackRAII(Derived &Self) : Self(Self) {
+ Old = Self.ForgetPartiallySubstitutedPack();
+ }
+
+ ~ForgetPartiallySubstitutedPackRAII() {
+ Self.RememberPartiallySubstitutedPack(Old);
+ }
+ };
+
+protected:
+ Sema &SemaRef;
+
+ /// \brief The set of local declarations that have been transformed, for
+ /// cases where we are forced to build new declarations within the transformer
+ /// rather than in the subclass (e.g., lambda closure types).
+ llvm::DenseMap<Decl *, Decl *> TransformedLocalDecls;
+
+public:
+ /// \brief Initializes a new tree transformer.
+ TreeTransform(Sema &SemaRef) : SemaRef(SemaRef) { }
+
+ /// \brief Retrieves a reference to the derived class.
+ Derived &getDerived() { return static_cast<Derived&>(*this); }
+
+ /// \brief Retrieves a reference to the derived class.
+ const Derived &getDerived() const {
+ return static_cast<const Derived&>(*this);
+ }
+
+ static inline ExprResult Owned(Expr *E) { return E; }
+ static inline StmtResult Owned(Stmt *S) { return S; }
+
+ /// \brief Retrieves a reference to the semantic analysis object used for
+ /// this tree transform.
+ Sema &getSema() const { return SemaRef; }
+
+ /// \brief Whether the transformation should always rebuild AST nodes, even
+ /// if none of the children have changed.
+ ///
+ /// Subclasses may override this function to specify when the transformation
+ /// should rebuild all AST nodes.
+ ///
+ /// We must always rebuild all AST nodes when performing variadic template
+ /// pack expansion, in order to avoid violating the AST invariant that each
+ /// statement node appears at most once in its containing declaration.
+ bool AlwaysRebuild() { return SemaRef.ArgumentPackSubstitutionIndex != -1; }
+
+ /// \brief Returns the location of the entity being transformed, if that
+ /// information was not available elsewhere in the AST.
+ ///
+ /// By default, returns no source-location information. Subclasses can
+ /// provide an alternative implementation that provides better location
+ /// information.
+ SourceLocation getBaseLocation() { return SourceLocation(); }
+
+ /// \brief Returns the name of the entity being transformed, if that
+ /// information was not available elsewhere in the AST.
+ ///
+ /// By default, returns an empty name. Subclasses can provide an alternative
+ /// implementation with a more precise name.
+ DeclarationName getBaseEntity() { return DeclarationName(); }
+
+ /// \brief Sets the "base" location and entity when that
+ /// information is known based on another transformation.
+ ///
+ /// By default, the source location and entity are ignored. Subclasses can
+ /// override this function to provide a customized implementation.
+ void setBase(SourceLocation Loc, DeclarationName Entity) { }
+
+ /// \brief RAII object that temporarily sets the base location and entity
+ /// used for reporting diagnostics in types.
+ class TemporaryBase {
+ TreeTransform &Self;
+ SourceLocation OldLocation;
+ DeclarationName OldEntity;
+
+ public:
+ TemporaryBase(TreeTransform &Self, SourceLocation Location,
+ DeclarationName Entity) : Self(Self) {
+ OldLocation = Self.getDerived().getBaseLocation();
+ OldEntity = Self.getDerived().getBaseEntity();
+
+ if (Location.isValid())
+ Self.getDerived().setBase(Location, Entity);
+ }
+
+ ~TemporaryBase() {
+ Self.getDerived().setBase(OldLocation, OldEntity);
+ }
+ };
+
+ /// \brief Determine whether the given type \p T has already been
+ /// transformed.
+ ///
+ /// Subclasses can provide an alternative implementation of this routine
+ /// to short-circuit evaluation when it is known that a given type will
+ /// not change. For example, template instantiation need not traverse
+ /// non-dependent types.
+ bool AlreadyTransformed(QualType T) {
+ return T.isNull();
+ }
+
+ /// \brief Determine whether the given call argument should be dropped, e.g.,
+ /// because it is a default argument.
+ ///
+ /// Subclasses can provide an alternative implementation of this routine to
+ /// determine which kinds of call arguments get dropped. By default,
+ /// CXXDefaultArgument nodes are dropped (prior to transformation).
+ bool DropCallArgument(Expr *E) {
+ return E->isDefaultArgument();
+ }
+
+ /// \brief Determine whether we should expand a pack expansion with the
+ /// given set of parameter packs into separate arguments by repeatedly
+ /// transforming the pattern.
+ ///
+ /// By default, the transformer never tries to expand pack expansions.
+ /// Subclasses can override this routine to provide different behavior.
+ ///
+ /// \param EllipsisLoc The location of the ellipsis that identifies the
+ /// pack expansion.
+ ///
+ /// \param PatternRange The source range that covers the entire pattern of
+ /// the pack expansion.
+ ///
+ /// \param Unexpanded The set of unexpanded parameter packs within the
+ /// pattern.
+ ///
+ /// \param ShouldExpand Will be set to \c true if the transformer should
+ /// expand the corresponding pack expansions into separate arguments. When
+ /// set, \c NumExpansions must also be set.
+ ///
+ /// \param RetainExpansion Whether the caller should add an unexpanded
+ /// pack expansion after all of the expanded arguments. This is used
+ /// when extending explicitly-specified template argument packs per
+ /// C++0x [temp.arg.explicit]p9.
+ ///
+ /// \param NumExpansions The number of separate arguments that will be in
+ /// the expanded form of the corresponding pack expansion. This is both an
+ /// input and an output parameter, which can be set by the caller if the
+ /// number of expansions is known a priori (e.g., due to a prior substitution)
+ /// and will be set by the callee when the number of expansions is known.
+ /// The callee must set this value when \c ShouldExpand is \c true; it may
+ /// set this value in other cases.
+ ///
+ /// \returns true if an error occurred (e.g., because the parameter packs
+ /// are to be instantiated with arguments of different lengths), false
+ /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
+ /// must be set.
+ bool TryExpandParameterPacks(SourceLocation EllipsisLoc,
+ SourceRange PatternRange,
+ ArrayRef<UnexpandedParameterPack> Unexpanded,
+ bool &ShouldExpand,
+ bool &RetainExpansion,
+ Optional<unsigned> &NumExpansions) {
+ ShouldExpand = false;
+ return false;
+ }
+
+ /// \brief "Forget" about the partially-substituted pack template argument,
+ /// when performing an instantiation that must preserve the parameter pack
+ /// use.
+ ///
+ /// This routine is meant to be overridden by the template instantiator.
+ TemplateArgument ForgetPartiallySubstitutedPack() {
+ return TemplateArgument();
+ }
+
+ /// \brief "Remember" the partially-substituted pack template argument
+ /// after performing an instantiation that must preserve the parameter pack
+ /// use.
+ ///
+ /// This routine is meant to be overridden by the template instantiator.
+ void RememberPartiallySubstitutedPack(TemplateArgument Arg) { }
+
+ /// \brief Note to the derived class when a function parameter pack is
+ /// being expanded.
+ void ExpandingFunctionParameterPack(ParmVarDecl *Pack) { }
+
+ /// \brief Transforms the given type into another type.
+ ///
+ /// By default, this routine transforms a type by creating a
+ /// TypeSourceInfo for it and delegating to the appropriate
+ /// function. This is expensive, but we don't mind, because
+ /// this method is deprecated anyway; all users should be
+ /// switched to storing TypeSourceInfos.
+ ///
+ /// \returns the transformed type.
+ QualType TransformType(QualType T);
+
+ /// \brief Transforms the given type-with-location into a new
+ /// type-with-location.
+ ///
+ /// By default, this routine transforms a type by delegating to the
+ /// appropriate TransformXXXType to build a new type. Subclasses
+ /// may override this function (to take over all type
+ /// transformations) or some set of the TransformXXXType functions
+ /// to alter the transformation.
+ TypeSourceInfo *TransformType(TypeSourceInfo *DI);
+
+ /// \brief Transform the given type-with-location into a new
+ /// type, collecting location information in the given builder
+ /// as necessary.
+ ///
+ QualType TransformType(TypeLocBuilder &TLB, TypeLoc TL);
+
+ /// \brief Transform the given statement.
+ ///
+ /// By default, this routine transforms a statement by delegating to the
+ /// appropriate TransformXXXStmt function to transform a specific kind of
+ /// statement or the TransformExpr() function to transform an expression.
+ /// Subclasses may override this function to transform statements using some
+ /// other mechanism.
+ ///
+ /// \returns the transformed statement.
+ StmtResult TransformStmt(Stmt *S);
+
+ /// \brief Transform the given statement.
+ ///
+ /// By default, this routine transforms a statement by delegating to the
+ /// appropriate TransformOMPXXXClause function to transform a specific kind
+ /// of clause. Subclasses may override this function to transform statements
+ /// using some other mechanism.
+ ///
+ /// \returns the transformed OpenMP clause.
+ OMPClause *TransformOMPClause(OMPClause *S);
+
+ /// \brief Transform the given attribute.
+ ///
+ /// By default, this routine transforms a statement by delegating to the
+ /// appropriate TransformXXXAttr function to transform a specific kind
+ /// of attribute. Subclasses may override this function to transform
+ /// attributed statements using some other mechanism.
+ ///
+ /// \returns the transformed attribute
+ const Attr *TransformAttr(const Attr *S);
+
+/// \brief Transform the specified attribute.
+///
+/// Subclasses should override the transformation of attributes with a pragma
+/// spelling to transform expressions stored within the attribute.
+///
+/// \returns the transformed attribute.
+#define ATTR(X)
+#define PRAGMA_SPELLING_ATTR(X) \
+ const X##Attr *Transform##X##Attr(const X##Attr *R) { return R; }
+#include "clang/Basic/AttrList.inc"
+
+ /// \brief Transform the given expression.
+ ///
+ /// By default, this routine transforms an expression by delegating to the
+ /// appropriate TransformXXXExpr function to build a new expression.
+ /// Subclasses may override this function to transform expressions using some
+ /// other mechanism.
+ ///
+ /// \returns the transformed expression.
+ ExprResult TransformExpr(Expr *E);
+
+ /// \brief Transform the given initializer.
+ ///
+ /// By default, this routine transforms an initializer by stripping off the
+ /// semantic nodes added by initialization, then passing the result to
+ /// TransformExpr or TransformExprs.
+ ///
+ /// \returns the transformed initializer.
+ ExprResult TransformInitializer(Expr *Init, bool NotCopyInit);
+
+ /// \brief Transform the given list of expressions.
+ ///
+ /// This routine transforms a list of expressions by invoking
+ /// \c TransformExpr() for each subexpression. However, it also provides
+ /// support for variadic templates by expanding any pack expansions (if the
+ /// derived class permits such expansion) along the way. When pack expansions
+ /// are present, the number of outputs may not equal the number of inputs.
+ ///
+ /// \param Inputs The set of expressions to be transformed.
+ ///
+ /// \param NumInputs The number of expressions in \c Inputs.
+ ///
+ /// \param IsCall If \c true, then this transform is being performed on
+ /// function-call arguments, and any arguments that should be dropped, will
+ /// be.
+ ///
+ /// \param Outputs The transformed input expressions will be added to this
+ /// vector.
+ ///
+ /// \param ArgChanged If non-NULL, will be set \c true if any argument changed
+ /// due to transformation.
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ bool TransformExprs(Expr *const *Inputs, unsigned NumInputs, bool IsCall,
+ SmallVectorImpl<Expr *> &Outputs,
+ bool *ArgChanged = nullptr);
+
+ /// \brief Transform the given declaration, which is referenced from a type
+ /// or expression.
+ ///
+ /// By default, acts as the identity function on declarations, unless the
+ /// transformer has had to transform the declaration itself. Subclasses
+ /// may override this function to provide alternate behavior.
+ Decl *TransformDecl(SourceLocation Loc, Decl *D) {
+ llvm::DenseMap<Decl *, Decl *>::iterator Known
+ = TransformedLocalDecls.find(D);
+ if (Known != TransformedLocalDecls.end())
+ return Known->second;
+
+ return D;
+ }
+
+ /// \brief Transform the attributes associated with the given declaration and
+ /// place them on the new declaration.
+ ///
+ /// By default, this operation does nothing. Subclasses may override this
+ /// behavior to transform attributes.
+ void transformAttrs(Decl *Old, Decl *New) { }
+
+ /// \brief Note that a local declaration has been transformed by this
+ /// transformer.
+ ///
+ /// Local declarations are typically transformed via a call to
+ /// TransformDefinition. However, in some cases (e.g., lambda expressions),
+ /// the transformer itself has to transform the declarations. This routine
+ /// can be overridden by a subclass that keeps track of such mappings.
+ void transformedLocalDecl(Decl *Old, Decl *New) {
+ TransformedLocalDecls[Old] = New;
+ }
+
+ /// \brief Transform the definition of the given declaration.
+ ///
+ /// By default, invokes TransformDecl() to transform the declaration.
+ /// Subclasses may override this function to provide alternate behavior.
+ Decl *TransformDefinition(SourceLocation Loc, Decl *D) {
+ return getDerived().TransformDecl(Loc, D);
+ }
+
+ /// \brief Transform the given declaration, which was the first part of a
+ /// nested-name-specifier in a member access expression.
+ ///
+ /// This specific declaration transformation only applies to the first
+ /// identifier in a nested-name-specifier of a member access expression, e.g.,
+ /// the \c T in \c x->T::member
+ ///
+ /// By default, invokes TransformDecl() to transform the declaration.
+ /// Subclasses may override this function to provide alternate behavior.
+ NamedDecl *TransformFirstQualifierInScope(NamedDecl *D, SourceLocation Loc) {
+ return cast_or_null<NamedDecl>(getDerived().TransformDecl(Loc, D));
+ }
+
+ /// \brief Transform the given nested-name-specifier with source-location
+ /// information.
+ ///
+ /// By default, transforms all of the types and declarations within the
+ /// nested-name-specifier. Subclasses may override this function to provide
+ /// alternate behavior.
+ NestedNameSpecifierLoc
+ TransformNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
+ QualType ObjectType = QualType(),
+ NamedDecl *FirstQualifierInScope = nullptr);
+
+ /// \brief Transform the given declaration name.
+ ///
+ /// By default, transforms the types of conversion function, constructor,
+ /// and destructor names and then (if needed) rebuilds the declaration name.
+ /// Identifiers and selectors are returned unmodified. Sublcasses may
+ /// override this function to provide alternate behavior.
+ DeclarationNameInfo
+ TransformDeclarationNameInfo(const DeclarationNameInfo &NameInfo);
+
+ /// \brief Transform the given template name.
+ ///
+ /// \param SS The nested-name-specifier that qualifies the template
+ /// name. This nested-name-specifier must already have been transformed.
+ ///
+ /// \param Name The template name to transform.
+ ///
+ /// \param NameLoc The source location of the template name.
+ ///
+ /// \param ObjectType If we're translating a template name within a member
+ /// access expression, this is the type of the object whose member template
+ /// is being referenced.
+ ///
+ /// \param FirstQualifierInScope If the first part of a nested-name-specifier
+ /// also refers to a name within the current (lexical) scope, this is the
+ /// declaration it refers to.
+ ///
+ /// By default, transforms the template name by transforming the declarations
+ /// and nested-name-specifiers that occur within the template name.
+ /// Subclasses may override this function to provide alternate behavior.
+ TemplateName
+ TransformTemplateName(CXXScopeSpec &SS, TemplateName Name,
+ SourceLocation NameLoc,
+ QualType ObjectType = QualType(),
+ NamedDecl *FirstQualifierInScope = nullptr);
+
+ /// \brief Transform the given template argument.
+ ///
+ /// By default, this operation transforms the type, expression, or
+ /// declaration stored within the template argument and constructs a
+ /// new template argument from the transformed result. Subclasses may
+ /// override this function to provide alternate behavior.
+ ///
+ /// Returns true if there was an error.
+ bool TransformTemplateArgument(const TemplateArgumentLoc &Input,
+ TemplateArgumentLoc &Output,
+ bool Uneval = false);
+
+ /// \brief Transform the given set of template arguments.
+ ///
+ /// By default, this operation transforms all of the template arguments
+ /// in the input set using \c TransformTemplateArgument(), and appends
+ /// the transformed arguments to the output list.
+ ///
+ /// Note that this overload of \c TransformTemplateArguments() is merely
+ /// a convenience function. Subclasses that wish to override this behavior
+ /// should override the iterator-based member template version.
+ ///
+ /// \param Inputs The set of template arguments to be transformed.
+ ///
+ /// \param NumInputs The number of template arguments in \p Inputs.
+ ///
+ /// \param Outputs The set of transformed template arguments output by this
+ /// routine.
+ ///
+ /// Returns true if an error occurred.
+ bool TransformTemplateArguments(const TemplateArgumentLoc *Inputs,
+ unsigned NumInputs,
+ TemplateArgumentListInfo &Outputs,
+ bool Uneval = false) {
+ return TransformTemplateArguments(Inputs, Inputs + NumInputs, Outputs,
+ Uneval);
+ }
+
+ /// \brief Transform the given set of template arguments.
+ ///
+ /// By default, this operation transforms all of the template arguments
+ /// in the input set using \c TransformTemplateArgument(), and appends
+ /// the transformed arguments to the output list.
+ ///
+ /// \param First An iterator to the first template argument.
+ ///
+ /// \param Last An iterator one step past the last template argument.
+ ///
+ /// \param Outputs The set of transformed template arguments output by this
+ /// routine.
+ ///
+ /// Returns true if an error occurred.
+ template<typename InputIterator>
+ bool TransformTemplateArguments(InputIterator First,
+ InputIterator Last,
+ TemplateArgumentListInfo &Outputs,
+ bool Uneval = false);
+
+ /// \brief Fakes up a TemplateArgumentLoc for a given TemplateArgument.
+ void InventTemplateArgumentLoc(const TemplateArgument &Arg,
+ TemplateArgumentLoc &ArgLoc);
+
+ /// \brief Fakes up a TypeSourceInfo for a type.
+ TypeSourceInfo *InventTypeSourceInfo(QualType T) {
+ return SemaRef.Context.getTrivialTypeSourceInfo(T,
+ getDerived().getBaseLocation());
+ }
+
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ QualType Transform##CLASS##Type(TypeLocBuilder &TLB, CLASS##TypeLoc T);
+#include "clang/AST/TypeLocNodes.def"
+
+ template<typename Fn>
+ QualType TransformFunctionProtoType(TypeLocBuilder &TLB,
+ FunctionProtoTypeLoc TL,
+ CXXRecordDecl *ThisContext,
+ unsigned ThisTypeQuals,
+ Fn TransformExceptionSpec);
+
+ bool TransformExceptionSpec(SourceLocation Loc,
+ FunctionProtoType::ExceptionSpecInfo &ESI,
+ SmallVectorImpl<QualType> &Exceptions,
+ bool &Changed);
+
+ StmtResult TransformSEHHandler(Stmt *Handler);
+
+ QualType
+ TransformTemplateSpecializationType(TypeLocBuilder &TLB,
+ TemplateSpecializationTypeLoc TL,
+ TemplateName Template);
+
+ QualType
+ TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB,
+ DependentTemplateSpecializationTypeLoc TL,
+ TemplateName Template,
+ CXXScopeSpec &SS);
+
+ QualType TransformDependentTemplateSpecializationType(
+ TypeLocBuilder &TLB, DependentTemplateSpecializationTypeLoc TL,
+ NestedNameSpecifierLoc QualifierLoc);
+
+ /// \brief Transforms the parameters of a function type into the
+ /// given vectors.
+ ///
+ /// The result vectors should be kept in sync; null entries in the
+ /// variables vector are acceptable.
+ ///
+ /// Return true on error.
+ bool TransformFunctionTypeParams(SourceLocation Loc,
+ ParmVarDecl **Params, unsigned NumParams,
+ const QualType *ParamTypes,
+ SmallVectorImpl<QualType> &PTypes,
+ SmallVectorImpl<ParmVarDecl*> *PVars);
+
+ /// \brief Transforms a single function-type parameter. Return null
+ /// on error.
+ ///
+ /// \param indexAdjustment - A number to add to the parameter's
+ /// scope index; can be negative
+ ParmVarDecl *TransformFunctionTypeParam(ParmVarDecl *OldParm,
+ int indexAdjustment,
+ Optional<unsigned> NumExpansions,
+ bool ExpectParameterPack);
+
+ QualType TransformReferenceType(TypeLocBuilder &TLB, ReferenceTypeLoc TL);
+
+ StmtResult TransformCompoundStmt(CompoundStmt *S, bool IsStmtExpr);
+ ExprResult TransformCXXNamedCastExpr(CXXNamedCastExpr *E);
+
+ TemplateParameterList *TransformTemplateParameterList(
+ TemplateParameterList *TPL) {
+ return TPL;
+ }
+
+ ExprResult TransformAddressOfOperand(Expr *E);
+
+ ExprResult TransformDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E,
+ bool IsAddressOfOperand,
+ TypeSourceInfo **RecoveryTSI);
+
+ ExprResult TransformParenDependentScopeDeclRefExpr(
+ ParenExpr *PE, DependentScopeDeclRefExpr *DRE, bool IsAddressOfOperand,
+ TypeSourceInfo **RecoveryTSI);
+
+ StmtResult TransformOMPExecutableDirective(OMPExecutableDirective *S);
+
+// FIXME: We use LLVM_ATTRIBUTE_NOINLINE because inlining causes a ridiculous
+// amount of stack usage with clang.
+#define STMT(Node, Parent) \
+ LLVM_ATTRIBUTE_NOINLINE \
+ StmtResult Transform##Node(Node *S);
+#define EXPR(Node, Parent) \
+ LLVM_ATTRIBUTE_NOINLINE \
+ ExprResult Transform##Node(Node *E);
+#define ABSTRACT_STMT(Stmt)
+#include "clang/AST/StmtNodes.inc"
+
+#define OPENMP_CLAUSE(Name, Class) \
+ LLVM_ATTRIBUTE_NOINLINE \
+ OMPClause *Transform ## Class(Class *S);
+#include "clang/Basic/OpenMPKinds.def"
+
+ /// \brief Build a new pointer type given its pointee type.
+ ///
+ /// By default, performs semantic analysis when building the pointer type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildPointerType(QualType PointeeType, SourceLocation Sigil);
+
+ /// \brief Build a new block pointer type given its pointee type.
+ ///
+ /// By default, performs semantic analysis when building the block pointer
+ /// type. Subclasses may override this routine to provide different behavior.
+ QualType RebuildBlockPointerType(QualType PointeeType, SourceLocation Sigil);
+
+ /// \brief Build a new reference type given the type it references.
+ ///
+ /// By default, performs semantic analysis when building the
+ /// reference type. Subclasses may override this routine to provide
+ /// different behavior.
+ ///
+ /// \param LValue whether the type was written with an lvalue sigil
+ /// or an rvalue sigil.
+ QualType RebuildReferenceType(QualType ReferentType,
+ bool LValue,
+ SourceLocation Sigil);
+
+ /// \brief Build a new member pointer type given the pointee type and the
+ /// class type it refers into.
+ ///
+ /// By default, performs semantic analysis when building the member pointer
+ /// type. Subclasses may override this routine to provide different behavior.
+ QualType RebuildMemberPointerType(QualType PointeeType, QualType ClassType,
+ SourceLocation Sigil);
+
+ /// \brief Build an Objective-C object type.
+ ///
+ /// By default, performs semantic analysis when building the object type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildObjCObjectType(QualType BaseType,
+ SourceLocation Loc,
+ SourceLocation TypeArgsLAngleLoc,
+ ArrayRef<TypeSourceInfo *> TypeArgs,
+ SourceLocation TypeArgsRAngleLoc,
+ SourceLocation ProtocolLAngleLoc,
+ ArrayRef<ObjCProtocolDecl *> Protocols,
+ ArrayRef<SourceLocation> ProtocolLocs,
+ SourceLocation ProtocolRAngleLoc);
+
+ /// \brief Build a new Objective-C object pointer type given the pointee type.
+ ///
+ /// By default, directly builds the pointer type, with no additional semantic
+ /// analysis.
+ QualType RebuildObjCObjectPointerType(QualType PointeeType,
+ SourceLocation Star);
+
+ /// \brief Build a new array type given the element type, size
+ /// modifier, size of the array (if known), size expression, and index type
+ /// qualifiers.
+ ///
+ /// By default, performs semantic analysis when building the array type.
+ /// Subclasses may override this routine to provide different behavior.
+ /// Also by default, all of the other Rebuild*Array
+ QualType RebuildArrayType(QualType ElementType,
+ ArrayType::ArraySizeModifier SizeMod,
+ const llvm::APInt *Size,
+ Expr *SizeExpr,
+ unsigned IndexTypeQuals,
+ SourceRange BracketsRange);
+
+ /// \brief Build a new constant array type given the element type, size
+ /// modifier, (known) size of the array, and index type qualifiers.
+ ///
+ /// By default, performs semantic analysis when building the array type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildConstantArrayType(QualType ElementType,
+ ArrayType::ArraySizeModifier SizeMod,
+ const llvm::APInt &Size,
+ unsigned IndexTypeQuals,
+ SourceRange BracketsRange);
+
+ /// \brief Build a new incomplete array type given the element type, size
+ /// modifier, and index type qualifiers.
+ ///
+ /// By default, performs semantic analysis when building the array type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildIncompleteArrayType(QualType ElementType,
+ ArrayType::ArraySizeModifier SizeMod,
+ unsigned IndexTypeQuals,
+ SourceRange BracketsRange);
+
+ /// \brief Build a new variable-length array type given the element type,
+ /// size modifier, size expression, and index type qualifiers.
+ ///
+ /// By default, performs semantic analysis when building the array type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildVariableArrayType(QualType ElementType,
+ ArrayType::ArraySizeModifier SizeMod,
+ Expr *SizeExpr,
+ unsigned IndexTypeQuals,
+ SourceRange BracketsRange);
+
+ /// \brief Build a new dependent-sized array type given the element type,
+ /// size modifier, size expression, and index type qualifiers.
+ ///
+ /// By default, performs semantic analysis when building the array type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildDependentSizedArrayType(QualType ElementType,
+ ArrayType::ArraySizeModifier SizeMod,
+ Expr *SizeExpr,
+ unsigned IndexTypeQuals,
+ SourceRange BracketsRange);
+
+ /// \brief Build a new vector type given the element type and
+ /// number of elements.
+ ///
+ /// By default, performs semantic analysis when building the vector type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildVectorType(QualType ElementType, unsigned NumElements,
+ VectorType::VectorKind VecKind);
+
+ /// \brief Build a new extended vector type given the element type and
+ /// number of elements.
+ ///
+ /// By default, performs semantic analysis when building the vector type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildExtVectorType(QualType ElementType, unsigned NumElements,
+ SourceLocation AttributeLoc);
+
+ /// \brief Build a new potentially dependently-sized extended vector type
+ /// given the element type and number of elements.
+ ///
+ /// By default, performs semantic analysis when building the vector type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildDependentSizedExtVectorType(QualType ElementType,
+ Expr *SizeExpr,
+ SourceLocation AttributeLoc);
+
+ /// \brief Build a new function type.
+ ///
+ /// By default, performs semantic analysis when building the function type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildFunctionProtoType(QualType T,
+ MutableArrayRef<QualType> ParamTypes,
+ const FunctionProtoType::ExtProtoInfo &EPI);
+
+ /// \brief Build a new unprototyped function type.
+ QualType RebuildFunctionNoProtoType(QualType ResultType);
+
+ /// \brief Rebuild an unresolved typename type, given the decl that
+ /// the UnresolvedUsingTypenameDecl was transformed to.
+ QualType RebuildUnresolvedUsingType(Decl *D);
+
+ /// \brief Build a new typedef type.
+ QualType RebuildTypedefType(TypedefNameDecl *Typedef) {
+ return SemaRef.Context.getTypeDeclType(Typedef);
+ }
+
+ /// \brief Build a new class/struct/union type.
+ QualType RebuildRecordType(RecordDecl *Record) {
+ return SemaRef.Context.getTypeDeclType(Record);
+ }
+
+ /// \brief Build a new Enum type.
+ QualType RebuildEnumType(EnumDecl *Enum) {
+ return SemaRef.Context.getTypeDeclType(Enum);
+ }
+
+ /// \brief Build a new typeof(expr) type.
+ ///
+ /// By default, performs semantic analysis when building the typeof type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildTypeOfExprType(Expr *Underlying, SourceLocation Loc);
+
+ /// \brief Build a new typeof(type) type.
+ ///
+ /// By default, builds a new TypeOfType with the given underlying type.
+ QualType RebuildTypeOfType(QualType Underlying);
+
+ /// \brief Build a new unary transform type.
+ QualType RebuildUnaryTransformType(QualType BaseType,
+ UnaryTransformType::UTTKind UKind,
+ SourceLocation Loc);
+
+ /// \brief Build a new C++11 decltype type.
+ ///
+ /// By default, performs semantic analysis when building the decltype type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildDecltypeType(Expr *Underlying, SourceLocation Loc);
+
+ /// \brief Build a new C++11 auto type.
+ ///
+ /// By default, builds a new AutoType with the given deduced type.
+ QualType RebuildAutoType(QualType Deduced, AutoTypeKeyword Keyword) {
+ // Note, IsDependent is always false here: we implicitly convert an 'auto'
+ // which has been deduced to a dependent type into an undeduced 'auto', so
+ // that we'll retry deduction after the transformation.
+ return SemaRef.Context.getAutoType(Deduced, Keyword,
+ /*IsDependent*/ false);
+ }
+
+ /// \brief Build a new template specialization type.
+ ///
+ /// By default, performs semantic analysis when building the template
+ /// specialization type. Subclasses may override this routine to provide
+ /// different behavior.
+ QualType RebuildTemplateSpecializationType(TemplateName Template,
+ SourceLocation TemplateLoc,
+ TemplateArgumentListInfo &Args);
+
+ /// \brief Build a new parenthesized type.
+ ///
+ /// By default, builds a new ParenType type from the inner type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildParenType(QualType InnerType) {
+ return SemaRef.Context.getParenType(InnerType);
+ }
+
+ /// \brief Build a new qualified name type.
+ ///
+ /// By default, builds a new ElaboratedType type from the keyword,
+ /// the nested-name-specifier and the named type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildElaboratedType(SourceLocation KeywordLoc,
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifierLoc QualifierLoc,
+ QualType Named) {
+ return SemaRef.Context.getElaboratedType(Keyword,
+ QualifierLoc.getNestedNameSpecifier(),
+ Named);
+ }
+
+ /// \brief Build a new typename type that refers to a template-id.
+ ///
+ /// By default, builds a new DependentNameType type from the
+ /// nested-name-specifier and the given type. Subclasses may override
+ /// this routine to provide different behavior.
+ QualType RebuildDependentTemplateSpecializationType(
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifierLoc QualifierLoc,
+ const IdentifierInfo *Name,
+ SourceLocation NameLoc,
+ TemplateArgumentListInfo &Args) {
+ // Rebuild the template name.
+ // TODO: avoid TemplateName abstraction
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+ TemplateName InstName
+ = getDerived().RebuildTemplateName(SS, *Name, NameLoc, QualType(),
+ nullptr);
+
+ if (InstName.isNull())
+ return QualType();
+
+ // If it's still dependent, make a dependent specialization.
+ if (InstName.getAsDependentTemplateName())
+ return SemaRef.Context.getDependentTemplateSpecializationType(Keyword,
+ QualifierLoc.getNestedNameSpecifier(),
+ Name,
+ Args);
+
+ // Otherwise, make an elaborated type wrapping a non-dependent
+ // specialization.
+ QualType T =
+ getDerived().RebuildTemplateSpecializationType(InstName, NameLoc, Args);
+ if (T.isNull()) return QualType();
+
+ if (Keyword == ETK_None && QualifierLoc.getNestedNameSpecifier() == nullptr)
+ return T;
+
+ return SemaRef.Context.getElaboratedType(Keyword,
+ QualifierLoc.getNestedNameSpecifier(),
+ T);
+ }
+
+ /// \brief Build a new typename type that refers to an identifier.
+ ///
+ /// By default, performs semantic analysis when building the typename type
+ /// (or elaborated type). Subclasses may override this routine to provide
+ /// different behavior.
+ QualType RebuildDependentNameType(ElaboratedTypeKeyword Keyword,
+ SourceLocation KeywordLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ const IdentifierInfo *Id,
+ SourceLocation IdLoc) {
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+
+ if (QualifierLoc.getNestedNameSpecifier()->isDependent()) {
+ // If the name is still dependent, just build a new dependent name type.
+ if (!SemaRef.computeDeclContext(SS))
+ return SemaRef.Context.getDependentNameType(Keyword,
+ QualifierLoc.getNestedNameSpecifier(),
+ Id);
+ }
+
+ if (Keyword == ETK_None || Keyword == ETK_Typename)
+ return SemaRef.CheckTypenameType(Keyword, KeywordLoc, QualifierLoc,
+ *Id, IdLoc);
+
+ TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForKeyword(Keyword);
+
+ // We had a dependent elaborated-type-specifier that has been transformed
+ // into a non-dependent elaborated-type-specifier. Find the tag we're
+ // referring to.
+ LookupResult Result(SemaRef, Id, IdLoc, Sema::LookupTagName);
+ DeclContext *DC = SemaRef.computeDeclContext(SS, false);
+ if (!DC)
+ return QualType();
+
+ if (SemaRef.RequireCompleteDeclContext(SS, DC))
+ return QualType();
+
+ TagDecl *Tag = nullptr;
+ SemaRef.LookupQualifiedName(Result, DC);
+ switch (Result.getResultKind()) {
+ case LookupResult::NotFound:
+ case LookupResult::NotFoundInCurrentInstantiation:
+ break;
+
+ case LookupResult::Found:
+ Tag = Result.getAsSingle<TagDecl>();
+ break;
+
+ case LookupResult::FoundOverloaded:
+ case LookupResult::FoundUnresolvedValue:
+ llvm_unreachable("Tag lookup cannot find non-tags");
+
+ case LookupResult::Ambiguous:
+ // Let the LookupResult structure handle ambiguities.
+ return QualType();
+ }
+
+ if (!Tag) {
+ // Check where the name exists but isn't a tag type and use that to emit
+ // better diagnostics.
+ LookupResult Result(SemaRef, Id, IdLoc, Sema::LookupTagName);
+ SemaRef.LookupQualifiedName(Result, DC);
+ switch (Result.getResultKind()) {
+ case LookupResult::Found:
+ case LookupResult::FoundOverloaded:
+ case LookupResult::FoundUnresolvedValue: {
+ NamedDecl *SomeDecl = Result.getRepresentativeDecl();
+ unsigned Kind = 0;
+ if (isa<TypedefDecl>(SomeDecl)) Kind = 1;
+ else if (isa<TypeAliasDecl>(SomeDecl)) Kind = 2;
+ else if (isa<ClassTemplateDecl>(SomeDecl)) Kind = 3;
+ SemaRef.Diag(IdLoc, diag::err_tag_reference_non_tag) << Kind;
+ SemaRef.Diag(SomeDecl->getLocation(), diag::note_declared_at);
+ break;
+ }
+ default:
+ SemaRef.Diag(IdLoc, diag::err_not_tag_in_scope)
+ << Kind << Id << DC << QualifierLoc.getSourceRange();
+ break;
+ }
+ return QualType();
+ }
+
+ if (!SemaRef.isAcceptableTagRedeclaration(Tag, Kind, /*isDefinition*/false,
+ IdLoc, Id)) {
+ SemaRef.Diag(KeywordLoc, diag::err_use_with_wrong_tag) << Id;
+ SemaRef.Diag(Tag->getLocation(), diag::note_previous_use);
+ return QualType();
+ }
+
+ // Build the elaborated-type-specifier type.
+ QualType T = SemaRef.Context.getTypeDeclType(Tag);
+ return SemaRef.Context.getElaboratedType(Keyword,
+ QualifierLoc.getNestedNameSpecifier(),
+ T);
+ }
+
+ /// \brief Build a new pack expansion type.
+ ///
+ /// By default, builds a new PackExpansionType type from the given pattern.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildPackExpansionType(QualType Pattern,
+ SourceRange PatternRange,
+ SourceLocation EllipsisLoc,
+ Optional<unsigned> NumExpansions) {
+ return getSema().CheckPackExpansion(Pattern, PatternRange, EllipsisLoc,
+ NumExpansions);
+ }
+
+ /// \brief Build a new atomic type given its value type.
+ ///
+ /// By default, performs semantic analysis when building the atomic type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildAtomicType(QualType ValueType, SourceLocation KWLoc);
+
+ /// \brief Build a new template name given a nested name specifier, a flag
+ /// indicating whether the "template" keyword was provided, and the template
+ /// that the template name refers to.
+ ///
+ /// By default, builds the new template name directly. Subclasses may override
+ /// this routine to provide different behavior.
+ TemplateName RebuildTemplateName(CXXScopeSpec &SS,
+ bool TemplateKW,
+ TemplateDecl *Template);
+
+ /// \brief Build a new template name given a nested name specifier and the
+ /// name that is referred to as a template.
+ ///
+ /// By default, performs semantic analysis to determine whether the name can
+ /// be resolved to a specific template, then builds the appropriate kind of
+ /// template name. Subclasses may override this routine to provide different
+ /// behavior.
+ TemplateName RebuildTemplateName(CXXScopeSpec &SS,
+ const IdentifierInfo &Name,
+ SourceLocation NameLoc,
+ QualType ObjectType,
+ NamedDecl *FirstQualifierInScope);
+
+ /// \brief Build a new template name given a nested name specifier and the
+ /// overloaded operator name that is referred to as a template.
+ ///
+ /// By default, performs semantic analysis to determine whether the name can
+ /// be resolved to a specific template, then builds the appropriate kind of
+ /// template name. Subclasses may override this routine to provide different
+ /// behavior.
+ TemplateName RebuildTemplateName(CXXScopeSpec &SS,
+ OverloadedOperatorKind Operator,
+ SourceLocation NameLoc,
+ QualType ObjectType);
+
+ /// \brief Build a new template name given a template template parameter pack
+ /// and the
+ ///
+ /// By default, performs semantic analysis to determine whether the name can
+ /// be resolved to a specific template, then builds the appropriate kind of
+ /// template name. Subclasses may override this routine to provide different
+ /// behavior.
+ TemplateName RebuildTemplateName(TemplateTemplateParmDecl *Param,
+ const TemplateArgument &ArgPack) {
+ return getSema().Context.getSubstTemplateTemplateParmPack(Param, ArgPack);
+ }
+
+ /// \brief Build a new compound statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildCompoundStmt(SourceLocation LBraceLoc,
+ MultiStmtArg Statements,
+ SourceLocation RBraceLoc,
+ bool IsStmtExpr) {
+ return getSema().ActOnCompoundStmt(LBraceLoc, RBraceLoc, Statements,
+ IsStmtExpr);
+ }
+
+ /// \brief Build a new case statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildCaseStmt(SourceLocation CaseLoc,
+ Expr *LHS,
+ SourceLocation EllipsisLoc,
+ Expr *RHS,
+ SourceLocation ColonLoc) {
+ return getSema().ActOnCaseStmt(CaseLoc, LHS, EllipsisLoc, RHS,
+ ColonLoc);
+ }
+
+ /// \brief Attach the body to a new case statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildCaseStmtBody(Stmt *S, Stmt *Body) {
+ getSema().ActOnCaseStmtBody(S, Body);
+ return S;
+ }
+
+ /// \brief Build a new default statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildDefaultStmt(SourceLocation DefaultLoc,
+ SourceLocation ColonLoc,
+ Stmt *SubStmt) {
+ return getSema().ActOnDefaultStmt(DefaultLoc, ColonLoc, SubStmt,
+ /*CurScope=*/nullptr);
+ }
+
+ /// \brief Build a new label statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildLabelStmt(SourceLocation IdentLoc, LabelDecl *L,
+ SourceLocation ColonLoc, Stmt *SubStmt) {
+ return SemaRef.ActOnLabelStmt(IdentLoc, L, ColonLoc, SubStmt);
+ }
+
+ /// \brief Build a new label statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildAttributedStmt(SourceLocation AttrLoc,
+ ArrayRef<const Attr*> Attrs,
+ Stmt *SubStmt) {
+ return SemaRef.ActOnAttributedStmt(AttrLoc, Attrs, SubStmt);
+ }
+
+ /// \brief Build a new "if" statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildIfStmt(SourceLocation IfLoc, Sema::FullExprArg Cond,
+ VarDecl *CondVar, Stmt *Then,
+ SourceLocation ElseLoc, Stmt *Else) {
+ return getSema().ActOnIfStmt(IfLoc, Cond, CondVar, Then, ElseLoc, Else);
+ }
+
+ /// \brief Start building a new switch statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildSwitchStmtStart(SourceLocation SwitchLoc,
+ Expr *Cond, VarDecl *CondVar) {
+ return getSema().ActOnStartOfSwitchStmt(SwitchLoc, Cond,
+ CondVar);
+ }
+
+ /// \brief Attach the body to the switch statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildSwitchStmtBody(SourceLocation SwitchLoc,
+ Stmt *Switch, Stmt *Body) {
+ return getSema().ActOnFinishSwitchStmt(SwitchLoc, Switch, Body);
+ }
+
+ /// \brief Build a new while statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildWhileStmt(SourceLocation WhileLoc, Sema::FullExprArg Cond,
+ VarDecl *CondVar, Stmt *Body) {
+ return getSema().ActOnWhileStmt(WhileLoc, Cond, CondVar, Body);
+ }
+
+ /// \brief Build a new do-while statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildDoStmt(SourceLocation DoLoc, Stmt *Body,
+ SourceLocation WhileLoc, SourceLocation LParenLoc,
+ Expr *Cond, SourceLocation RParenLoc) {
+ return getSema().ActOnDoStmt(DoLoc, Body, WhileLoc, LParenLoc,
+ Cond, RParenLoc);
+ }
+
+ /// \brief Build a new for statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildForStmt(SourceLocation ForLoc, SourceLocation LParenLoc,
+ Stmt *Init, Sema::FullExprArg Cond,
+ VarDecl *CondVar, Sema::FullExprArg Inc,
+ SourceLocation RParenLoc, Stmt *Body) {
+ return getSema().ActOnForStmt(ForLoc, LParenLoc, Init, Cond,
+ CondVar, Inc, RParenLoc, Body);
+ }
+
+ /// \brief Build a new goto statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc,
+ LabelDecl *Label) {
+ return getSema().ActOnGotoStmt(GotoLoc, LabelLoc, Label);
+ }
+
+ /// \brief Build a new indirect goto statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildIndirectGotoStmt(SourceLocation GotoLoc,
+ SourceLocation StarLoc,
+ Expr *Target) {
+ return getSema().ActOnIndirectGotoStmt(GotoLoc, StarLoc, Target);
+ }
+
+ /// \brief Build a new return statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildReturnStmt(SourceLocation ReturnLoc, Expr *Result) {
+ return getSema().BuildReturnStmt(ReturnLoc, Result);
+ }
+
+ /// \brief Build a new declaration statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildDeclStmt(MutableArrayRef<Decl *> Decls,
+ SourceLocation StartLoc, SourceLocation EndLoc) {
+ Sema::DeclGroupPtrTy DG = getSema().BuildDeclaratorGroup(Decls);
+ return getSema().ActOnDeclStmt(DG, StartLoc, EndLoc);
+ }
+
+ /// \brief Build a new inline asm statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
+ bool IsVolatile, unsigned NumOutputs,
+ unsigned NumInputs, IdentifierInfo **Names,
+ MultiExprArg Constraints, MultiExprArg Exprs,
+ Expr *AsmString, MultiExprArg Clobbers,
+ SourceLocation RParenLoc) {
+ return getSema().ActOnGCCAsmStmt(AsmLoc, IsSimple, IsVolatile, NumOutputs,
+ NumInputs, Names, Constraints, Exprs,
+ AsmString, Clobbers, RParenLoc);
+ }
+
+ /// \brief Build a new MS style inline asm statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
+ ArrayRef<Token> AsmToks,
+ StringRef AsmString,
+ unsigned NumOutputs, unsigned NumInputs,
+ ArrayRef<StringRef> Constraints,
+ ArrayRef<StringRef> Clobbers,
+ ArrayRef<Expr*> Exprs,
+ SourceLocation EndLoc) {
+ return getSema().ActOnMSAsmStmt(AsmLoc, LBraceLoc, AsmToks, AsmString,
+ NumOutputs, NumInputs,
+ Constraints, Clobbers, Exprs, EndLoc);
+ }
+
+ /// \brief Build a new co_return statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildCoreturnStmt(SourceLocation CoreturnLoc, Expr *Result) {
+ return getSema().BuildCoreturnStmt(CoreturnLoc, Result);
+ }
+
+ /// \brief Build a new co_await expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCoawaitExpr(SourceLocation CoawaitLoc, Expr *Result) {
+ return getSema().BuildCoawaitExpr(CoawaitLoc, Result);
+ }
+
+ /// \brief Build a new co_yield expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCoyieldExpr(SourceLocation CoyieldLoc, Expr *Result) {
+ return getSema().BuildCoyieldExpr(CoyieldLoc, Result);
+ }
+
+ /// \brief Build a new Objective-C \@try statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildObjCAtTryStmt(SourceLocation AtLoc,
+ Stmt *TryBody,
+ MultiStmtArg CatchStmts,
+ Stmt *Finally) {
+ return getSema().ActOnObjCAtTryStmt(AtLoc, TryBody, CatchStmts,
+ Finally);
+ }
+
+ /// \brief Rebuild an Objective-C exception declaration.
+ ///
+ /// By default, performs semantic analysis to build the new declaration.
+ /// Subclasses may override this routine to provide different behavior.
+ VarDecl *RebuildObjCExceptionDecl(VarDecl *ExceptionDecl,
+ TypeSourceInfo *TInfo, QualType T) {
+ return getSema().BuildObjCExceptionDecl(TInfo, T,
+ ExceptionDecl->getInnerLocStart(),
+ ExceptionDecl->getLocation(),
+ ExceptionDecl->getIdentifier());
+ }
+
+ /// \brief Build a new Objective-C \@catch statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildObjCAtCatchStmt(SourceLocation AtLoc,
+ SourceLocation RParenLoc,
+ VarDecl *Var,
+ Stmt *Body) {
+ return getSema().ActOnObjCAtCatchStmt(AtLoc, RParenLoc,
+ Var, Body);
+ }
+
+ /// \brief Build a new Objective-C \@finally statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildObjCAtFinallyStmt(SourceLocation AtLoc,
+ Stmt *Body) {
+ return getSema().ActOnObjCAtFinallyStmt(AtLoc, Body);
+ }
+
+ /// \brief Build a new Objective-C \@throw statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildObjCAtThrowStmt(SourceLocation AtLoc,
+ Expr *Operand) {
+ return getSema().BuildObjCAtThrowStmt(AtLoc, Operand);
+ }
+
+ /// \brief Build a new OpenMP executable directive.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildOMPExecutableDirective(OpenMPDirectiveKind Kind,
+ DeclarationNameInfo DirName,
+ OpenMPDirectiveKind CancelRegion,
+ ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPExecutableDirective(
+ Kind, DirName, CancelRegion, Clauses, AStmt, StartLoc, EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'if' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPIfClause(OpenMPDirectiveKind NameModifier,
+ Expr *Condition, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation NameModifierLoc,
+ SourceLocation ColonLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPIfClause(NameModifier, Condition, StartLoc,
+ LParenLoc, NameModifierLoc, ColonLoc,
+ EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'final' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPFinalClause(Expr *Condition, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPFinalClause(Condition, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'num_threads' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPNumThreadsClause(Expr *NumThreads,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPNumThreadsClause(NumThreads, StartLoc,
+ LParenLoc, EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'safelen' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPSafelenClause(Expr *Len, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPSafelenClause(Len, StartLoc, LParenLoc, EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'simdlen' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPSimdlenClause(Expr *Len, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPSimdlenClause(Len, StartLoc, LParenLoc, EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'collapse' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPCollapseClause(Expr *Num, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPCollapseClause(Num, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'default' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPDefaultClause(OpenMPDefaultClauseKind Kind,
+ SourceLocation KindKwLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPDefaultClause(Kind, KindKwLoc,
+ StartLoc, LParenLoc, EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'proc_bind' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPProcBindClause(OpenMPProcBindClauseKind Kind,
+ SourceLocation KindKwLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPProcBindClause(Kind, KindKwLoc,
+ StartLoc, LParenLoc, EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'schedule' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPScheduleClause(
+ OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
+ OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
+ SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPScheduleClause(
+ M1, M2, Kind, ChunkSize, StartLoc, LParenLoc, M1Loc, M2Loc, KindLoc,
+ CommaLoc, EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'ordered' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPOrderedClause(SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ SourceLocation LParenLoc, Expr *Num) {
+ return getSema().ActOnOpenMPOrderedClause(StartLoc, EndLoc, LParenLoc, Num);
+ }
+
+ /// \brief Build a new OpenMP 'private' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPPrivateClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPPrivateClause(VarList, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'firstprivate' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPFirstprivateClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPFirstprivateClause(VarList, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'lastprivate' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPLastprivateClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPLastprivateClause(VarList, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'shared' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPSharedClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPSharedClause(VarList, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'reduction' clause.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPReductionClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation ColonLoc,
+ SourceLocation EndLoc,
+ CXXScopeSpec &ReductionIdScopeSpec,
+ const DeclarationNameInfo &ReductionId) {
+ return getSema().ActOnOpenMPReductionClause(
+ VarList, StartLoc, LParenLoc, ColonLoc, EndLoc, ReductionIdScopeSpec,
+ ReductionId);
+ }
+
+ /// \brief Build a new OpenMP 'linear' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ OpenMPLinearClauseKind Modifier,
+ SourceLocation ModifierLoc,
+ SourceLocation ColonLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPLinearClause(VarList, Step, StartLoc, LParenLoc,
+ Modifier, ModifierLoc, ColonLoc,
+ EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'aligned' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation ColonLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPAlignedClause(VarList, Alignment, StartLoc,
+ LParenLoc, ColonLoc, EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'copyin' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPCopyinClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPCopyinClause(VarList, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'copyprivate' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPCopyprivateClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPCopyprivateClause(VarList, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'flush' pseudo clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPFlushClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPFlushClause(VarList, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'depend' pseudo clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *
+ RebuildOMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
+ SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPDependClause(DepKind, DepLoc, ColonLoc, VarList,
+ StartLoc, LParenLoc, EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'device' clause.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPDeviceClause(Expr *Device, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPDeviceClause(Device, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'map' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPMapClause(
+ OpenMPMapClauseKind MapTypeModifier, OpenMPMapClauseKind MapType,
+ SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPMapClause(MapTypeModifier, MapType, MapLoc,
+ ColonLoc, VarList,StartLoc,
+ LParenLoc, EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'num_teams' clause.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPNumTeamsClause(NumTeams, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'thread_limit' clause.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPThreadLimitClause(Expr *ThreadLimit,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPThreadLimitClause(ThreadLimit, StartLoc,
+ LParenLoc, EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'priority' clause.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPPriorityClause(Priority, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'grainsize' clause.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPGrainsizeClause(Expr *Grainsize, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPGrainsizeClause(Grainsize, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'num_tasks' clause.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPNumTasksClause(NumTasks, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'hint' clause.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPHintClause(Expr *Hint, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPHintClause(Hint, StartLoc, LParenLoc, EndLoc);
+ }
+
+ /// \brief Rebuild the operand to an Objective-C \@synchronized statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildObjCAtSynchronizedOperand(SourceLocation atLoc,
+ Expr *object) {
+ return getSema().ActOnObjCAtSynchronizedOperand(atLoc, object);
+ }
+
+ /// \brief Build a new Objective-C \@synchronized statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildObjCAtSynchronizedStmt(SourceLocation AtLoc,
+ Expr *Object, Stmt *Body) {
+ return getSema().ActOnObjCAtSynchronizedStmt(AtLoc, Object, Body);
+ }
+
+ /// \brief Build a new Objective-C \@autoreleasepool statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildObjCAutoreleasePoolStmt(SourceLocation AtLoc,
+ Stmt *Body) {
+ return getSema().ActOnObjCAutoreleasePoolStmt(AtLoc, Body);
+ }
+
+ /// \brief Build a new Objective-C fast enumeration statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildObjCForCollectionStmt(SourceLocation ForLoc,
+ Stmt *Element,
+ Expr *Collection,
+ SourceLocation RParenLoc,
+ Stmt *Body) {
+ StmtResult ForEachStmt = getSema().ActOnObjCForCollectionStmt(ForLoc,
+ Element,
+ Collection,
+ RParenLoc);
+ if (ForEachStmt.isInvalid())
+ return StmtError();
+
+ return getSema().FinishObjCForCollectionStmt(ForEachStmt.get(), Body);
+ }
+
+ /// \brief Build a new C++ exception declaration.
+ ///
+ /// By default, performs semantic analysis to build the new decaration.
+ /// Subclasses may override this routine to provide different behavior.
+ VarDecl *RebuildExceptionDecl(VarDecl *ExceptionDecl,
+ TypeSourceInfo *Declarator,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc,
+ IdentifierInfo *Id) {
+ VarDecl *Var = getSema().BuildExceptionDeclaration(nullptr, Declarator,
+ StartLoc, IdLoc, Id);
+ if (Var)
+ getSema().CurContext->addDecl(Var);
+ return Var;
+ }
+
+ /// \brief Build a new C++ catch statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildCXXCatchStmt(SourceLocation CatchLoc,
+ VarDecl *ExceptionDecl,
+ Stmt *Handler) {
+ return Owned(new (getSema().Context) CXXCatchStmt(CatchLoc, ExceptionDecl,
+ Handler));
+ }
+
+ /// \brief Build a new C++ try statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildCXXTryStmt(SourceLocation TryLoc, Stmt *TryBlock,
+ ArrayRef<Stmt *> Handlers) {
+ return getSema().ActOnCXXTryBlock(TryLoc, TryBlock, Handlers);
+ }
+
+ /// \brief Build a new C++0x range-based for statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildCXXForRangeStmt(SourceLocation ForLoc,
+ SourceLocation CoawaitLoc,
+ SourceLocation ColonLoc,
+ Stmt *Range, Stmt *BeginEnd,
+ Expr *Cond, Expr *Inc,
+ Stmt *LoopVar,
+ SourceLocation RParenLoc) {
+ // If we've just learned that the range is actually an Objective-C
+ // collection, treat this as an Objective-C fast enumeration loop.
+ if (DeclStmt *RangeStmt = dyn_cast<DeclStmt>(Range)) {
+ if (RangeStmt->isSingleDecl()) {
+ if (VarDecl *RangeVar = dyn_cast<VarDecl>(RangeStmt->getSingleDecl())) {
+ if (RangeVar->isInvalidDecl())
+ return StmtError();
+
+ Expr *RangeExpr = RangeVar->getInit();
+ if (!RangeExpr->isTypeDependent() &&
+ RangeExpr->getType()->isObjCObjectPointerType())
+ return getSema().ActOnObjCForCollectionStmt(ForLoc, LoopVar, RangeExpr,
+ RParenLoc);
+ }
+ }
+ }
+
+ return getSema().BuildCXXForRangeStmt(ForLoc, CoawaitLoc, ColonLoc,
+ Range, BeginEnd,
+ Cond, Inc, LoopVar, RParenLoc,
+ Sema::BFRK_Rebuild);
+ }
+
+ /// \brief Build a new C++0x range-based for statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildMSDependentExistsStmt(SourceLocation KeywordLoc,
+ bool IsIfExists,
+ NestedNameSpecifierLoc QualifierLoc,
+ DeclarationNameInfo NameInfo,
+ Stmt *Nested) {
+ return getSema().BuildMSDependentExistsStmt(KeywordLoc, IsIfExists,
+ QualifierLoc, NameInfo, Nested);
+ }
+
+ /// \brief Attach body to a C++0x range-based for statement.
+ ///
+ /// By default, performs semantic analysis to finish the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body) {
+ return getSema().FinishCXXForRangeStmt(ForRange, Body);
+ }
+
+ StmtResult RebuildSEHTryStmt(bool IsCXXTry, SourceLocation TryLoc,
+ Stmt *TryBlock, Stmt *Handler) {
+ return getSema().ActOnSEHTryBlock(IsCXXTry, TryLoc, TryBlock, Handler);
+ }
+
+ StmtResult RebuildSEHExceptStmt(SourceLocation Loc, Expr *FilterExpr,
+ Stmt *Block) {
+ return getSema().ActOnSEHExceptBlock(Loc, FilterExpr, Block);
+ }
+
+ StmtResult RebuildSEHFinallyStmt(SourceLocation Loc, Stmt *Block) {
+ return SEHFinallyStmt::Create(getSema().getASTContext(), Loc, Block);
+ }
+
+ /// \brief Build a new predefined expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildPredefinedExpr(SourceLocation Loc,
+ PredefinedExpr::IdentType IT) {
+ return getSema().BuildPredefinedExpr(Loc, IT);
+ }
+
+ /// \brief Build a new expression that references a declaration.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildDeclarationNameExpr(const CXXScopeSpec &SS,
+ LookupResult &R,
+ bool RequiresADL) {
+ return getSema().BuildDeclarationNameExpr(SS, R, RequiresADL);
+ }
+
+
+ /// \brief Build a new expression that references a declaration.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildDeclRefExpr(NestedNameSpecifierLoc QualifierLoc,
+ ValueDecl *VD,
+ const DeclarationNameInfo &NameInfo,
+ TemplateArgumentListInfo *TemplateArgs) {
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+
+ // FIXME: loses template args.
+
+ return getSema().BuildDeclarationNameExpr(SS, NameInfo, VD);
+ }
+
+ /// \brief Build a new expression in parentheses.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildParenExpr(Expr *SubExpr, SourceLocation LParen,
+ SourceLocation RParen) {
+ return getSema().ActOnParenExpr(LParen, RParen, SubExpr);
+ }
+
+ /// \brief Build a new pseudo-destructor expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXPseudoDestructorExpr(Expr *Base,
+ SourceLocation OperatorLoc,
+ bool isArrow,
+ CXXScopeSpec &SS,
+ TypeSourceInfo *ScopeType,
+ SourceLocation CCLoc,
+ SourceLocation TildeLoc,
+ PseudoDestructorTypeStorage Destroyed);
+
+ /// \brief Build a new unary operator expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildUnaryOperator(SourceLocation OpLoc,
+ UnaryOperatorKind Opc,
+ Expr *SubExpr) {
+ return getSema().BuildUnaryOp(/*Scope=*/nullptr, OpLoc, Opc, SubExpr);
+ }
+
+ /// \brief Build a new builtin offsetof expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildOffsetOfExpr(SourceLocation OperatorLoc,
+ TypeSourceInfo *Type,
+ ArrayRef<Sema::OffsetOfComponent> Components,
+ SourceLocation RParenLoc) {
+ return getSema().BuildBuiltinOffsetOf(OperatorLoc, Type, Components,
+ RParenLoc);
+ }
+
+ /// \brief Build a new sizeof, alignof or vec_step expression with a
+ /// type argument.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildUnaryExprOrTypeTrait(TypeSourceInfo *TInfo,
+ SourceLocation OpLoc,
+ UnaryExprOrTypeTrait ExprKind,
+ SourceRange R) {
+ return getSema().CreateUnaryExprOrTypeTraitExpr(TInfo, OpLoc, ExprKind, R);
+ }
+
+ /// \brief Build a new sizeof, alignof or vec step expression with an
+ /// expression argument.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildUnaryExprOrTypeTrait(Expr *SubExpr, SourceLocation OpLoc,
+ UnaryExprOrTypeTrait ExprKind,
+ SourceRange R) {
+ ExprResult Result
+ = getSema().CreateUnaryExprOrTypeTraitExpr(SubExpr, OpLoc, ExprKind);
+ if (Result.isInvalid())
+ return ExprError();
+
+ return Result;
+ }
+
+ /// \brief Build a new array subscript expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildArraySubscriptExpr(Expr *LHS,
+ SourceLocation LBracketLoc,
+ Expr *RHS,
+ SourceLocation RBracketLoc) {
+ return getSema().ActOnArraySubscriptExpr(/*Scope=*/nullptr, LHS,
+ LBracketLoc, RHS,
+ RBracketLoc);
+ }
+
+ /// \brief Build a new array section expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildOMPArraySectionExpr(Expr *Base, SourceLocation LBracketLoc,
+ Expr *LowerBound,
+ SourceLocation ColonLoc, Expr *Length,
+ SourceLocation RBracketLoc) {
+ return getSema().ActOnOMPArraySectionExpr(Base, LBracketLoc, LowerBound,
+ ColonLoc, Length, RBracketLoc);
+ }
+
+ /// \brief Build a new call expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCallExpr(Expr *Callee, SourceLocation LParenLoc,
+ MultiExprArg Args,
+ SourceLocation RParenLoc,
+ Expr *ExecConfig = nullptr) {
+ return getSema().ActOnCallExpr(/*Scope=*/nullptr, Callee, LParenLoc,
+ Args, RParenLoc, ExecConfig);
+ }
+
+ /// \brief Build a new member access expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildMemberExpr(Expr *Base, SourceLocation OpLoc,
+ bool isArrow,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &MemberNameInfo,
+ ValueDecl *Member,
+ NamedDecl *FoundDecl,
+ const TemplateArgumentListInfo *ExplicitTemplateArgs,
+ NamedDecl *FirstQualifierInScope) {
+ ExprResult BaseResult = getSema().PerformMemberExprBaseConversion(Base,
+ isArrow);
+ if (!Member->getDeclName()) {
+ // We have a reference to an unnamed field. This is always the
+ // base of an anonymous struct/union member access, i.e. the
+ // field is always of record type.
+ assert(!QualifierLoc && "Can't have an unnamed field with a qualifier!");
+ assert(Member->getType()->isRecordType() &&
+ "unnamed member not of record type?");
+
+ BaseResult =
+ getSema().PerformObjectMemberConversion(BaseResult.get(),
+ QualifierLoc.getNestedNameSpecifier(),
+ FoundDecl, Member);
+ if (BaseResult.isInvalid())
+ return ExprError();
+ Base = BaseResult.get();
+ ExprValueKind VK = isArrow ? VK_LValue : Base->getValueKind();
+ MemberExpr *ME = new (getSema().Context)
+ MemberExpr(Base, isArrow, OpLoc, Member, MemberNameInfo,
+ cast<FieldDecl>(Member)->getType(), VK, OK_Ordinary);
+ return ME;
+ }
+
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+
+ Base = BaseResult.get();
+ QualType BaseType = Base->getType();
+
+ // FIXME: this involves duplicating earlier analysis in a lot of
+ // cases; we should avoid this when possible.
+ LookupResult R(getSema(), MemberNameInfo, Sema::LookupMemberName);
+ R.addDecl(FoundDecl);
+ R.resolveKind();
+
+ return getSema().BuildMemberReferenceExpr(Base, BaseType, OpLoc, isArrow,
+ SS, TemplateKWLoc,
+ FirstQualifierInScope,
+ R, ExplicitTemplateArgs,
+ /*S*/nullptr);
+ }
+
+ /// \brief Build a new binary operator expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildBinaryOperator(SourceLocation OpLoc,
+ BinaryOperatorKind Opc,
+ Expr *LHS, Expr *RHS) {
+ return getSema().BuildBinOp(/*Scope=*/nullptr, OpLoc, Opc, LHS, RHS);
+ }
+
+ /// \brief Build a new conditional operator expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildConditionalOperator(Expr *Cond,
+ SourceLocation QuestionLoc,
+ Expr *LHS,
+ SourceLocation ColonLoc,
+ Expr *RHS) {
+ return getSema().ActOnConditionalOp(QuestionLoc, ColonLoc, Cond,
+ LHS, RHS);
+ }
+
+ /// \brief Build a new C-style cast expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCStyleCastExpr(SourceLocation LParenLoc,
+ TypeSourceInfo *TInfo,
+ SourceLocation RParenLoc,
+ Expr *SubExpr) {
+ return getSema().BuildCStyleCastExpr(LParenLoc, TInfo, RParenLoc,
+ SubExpr);
+ }
+
+ /// \brief Build a new compound literal expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCompoundLiteralExpr(SourceLocation LParenLoc,
+ TypeSourceInfo *TInfo,
+ SourceLocation RParenLoc,
+ Expr *Init) {
+ return getSema().BuildCompoundLiteralExpr(LParenLoc, TInfo, RParenLoc,
+ Init);
+ }
+
+ /// \brief Build a new extended vector element access expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildExtVectorElementExpr(Expr *Base,
+ SourceLocation OpLoc,
+ SourceLocation AccessorLoc,
+ IdentifierInfo &Accessor) {
+
+ CXXScopeSpec SS;
+ DeclarationNameInfo NameInfo(&Accessor, AccessorLoc);
+ return getSema().BuildMemberReferenceExpr(Base, Base->getType(),
+ OpLoc, /*IsArrow*/ false,
+ SS, SourceLocation(),
+ /*FirstQualifierInScope*/ nullptr,
+ NameInfo,
+ /* TemplateArgs */ nullptr,
+ /*S*/ nullptr);
+ }
+
+ /// \brief Build a new initializer list expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildInitList(SourceLocation LBraceLoc,
+ MultiExprArg Inits,
+ SourceLocation RBraceLoc,
+ QualType ResultTy) {
+ ExprResult Result
+ = SemaRef.ActOnInitList(LBraceLoc, Inits, RBraceLoc);
+ if (Result.isInvalid() || ResultTy->isDependentType())
+ return Result;
+
+ // Patch in the result type we were given, which may have been computed
+ // when the initial InitListExpr was built.
+ InitListExpr *ILE = cast<InitListExpr>((Expr *)Result.get());
+ ILE->setType(ResultTy);
+ return Result;
+ }
+
+ /// \brief Build a new designated initializer expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildDesignatedInitExpr(Designation &Desig,
+ MultiExprArg ArrayExprs,
+ SourceLocation EqualOrColonLoc,
+ bool GNUSyntax,
+ Expr *Init) {
+ ExprResult Result
+ = SemaRef.ActOnDesignatedInitializer(Desig, EqualOrColonLoc, GNUSyntax,
+ Init);
+ if (Result.isInvalid())
+ return ExprError();
+
+ return Result;
+ }
+
+ /// \brief Build a new value-initialized expression.
+ ///
+ /// By default, builds the implicit value initialization without performing
+ /// any semantic analysis. Subclasses may override this routine to provide
+ /// different behavior.
+ ExprResult RebuildImplicitValueInitExpr(QualType T) {
+ return new (SemaRef.Context) ImplicitValueInitExpr(T);
+ }
+
+ /// \brief Build a new \c va_arg expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildVAArgExpr(SourceLocation BuiltinLoc,
+ Expr *SubExpr, TypeSourceInfo *TInfo,
+ SourceLocation RParenLoc) {
+ return getSema().BuildVAArgExpr(BuiltinLoc,
+ SubExpr, TInfo,
+ RParenLoc);
+ }
+
+ /// \brief Build a new expression list in parentheses.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildParenListExpr(SourceLocation LParenLoc,
+ MultiExprArg SubExprs,
+ SourceLocation RParenLoc) {
+ return getSema().ActOnParenListExpr(LParenLoc, RParenLoc, SubExprs);
+ }
+
+ /// \brief Build a new address-of-label expression.
+ ///
+ /// By default, performs semantic analysis, using the name of the label
+ /// rather than attempting to map the label statement itself.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildAddrLabelExpr(SourceLocation AmpAmpLoc,
+ SourceLocation LabelLoc, LabelDecl *Label) {
+ return getSema().ActOnAddrLabel(AmpAmpLoc, LabelLoc, Label);
+ }
+
+ /// \brief Build a new GNU statement expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildStmtExpr(SourceLocation LParenLoc,
+ Stmt *SubStmt,
+ SourceLocation RParenLoc) {
+ return getSema().ActOnStmtExpr(LParenLoc, SubStmt, RParenLoc);
+ }
+
+ /// \brief Build a new __builtin_choose_expr expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildChooseExpr(SourceLocation BuiltinLoc,
+ Expr *Cond, Expr *LHS, Expr *RHS,
+ SourceLocation RParenLoc) {
+ return SemaRef.ActOnChooseExpr(BuiltinLoc,
+ Cond, LHS, RHS,
+ RParenLoc);
+ }
+
+ /// \brief Build a new generic selection expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildGenericSelectionExpr(SourceLocation KeyLoc,
+ SourceLocation DefaultLoc,
+ SourceLocation RParenLoc,
+ Expr *ControllingExpr,
+ ArrayRef<TypeSourceInfo *> Types,
+ ArrayRef<Expr *> Exprs) {
+ return getSema().CreateGenericSelectionExpr(KeyLoc, DefaultLoc, RParenLoc,
+ ControllingExpr, Types, Exprs);
+ }
+
+ /// \brief Build a new overloaded operator call expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// The semantic analysis provides the behavior of template instantiation,
+ /// copying with transformations that turn what looks like an overloaded
+ /// operator call into a use of a builtin operator, performing
+ /// argument-dependent lookup, etc. Subclasses may override this routine to
+ /// provide different behavior.
+ ExprResult RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op,
+ SourceLocation OpLoc,
+ Expr *Callee,
+ Expr *First,
+ Expr *Second);
+
+ /// \brief Build a new C++ "named" cast expression, such as static_cast or
+ /// reinterpret_cast.
+ ///
+ /// By default, this routine dispatches to one of the more-specific routines
+ /// for a particular named case, e.g., RebuildCXXStaticCastExpr().
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXNamedCastExpr(SourceLocation OpLoc,
+ Stmt::StmtClass Class,
+ SourceLocation LAngleLoc,
+ TypeSourceInfo *TInfo,
+ SourceLocation RAngleLoc,
+ SourceLocation LParenLoc,
+ Expr *SubExpr,
+ SourceLocation RParenLoc) {
+ switch (Class) {
+ case Stmt::CXXStaticCastExprClass:
+ return getDerived().RebuildCXXStaticCastExpr(OpLoc, LAngleLoc, TInfo,
+ RAngleLoc, LParenLoc,
+ SubExpr, RParenLoc);
+
+ case Stmt::CXXDynamicCastExprClass:
+ return getDerived().RebuildCXXDynamicCastExpr(OpLoc, LAngleLoc, TInfo,
+ RAngleLoc, LParenLoc,
+ SubExpr, RParenLoc);
+
+ case Stmt::CXXReinterpretCastExprClass:
+ return getDerived().RebuildCXXReinterpretCastExpr(OpLoc, LAngleLoc, TInfo,
+ RAngleLoc, LParenLoc,
+ SubExpr,
+ RParenLoc);
+
+ case Stmt::CXXConstCastExprClass:
+ return getDerived().RebuildCXXConstCastExpr(OpLoc, LAngleLoc, TInfo,
+ RAngleLoc, LParenLoc,
+ SubExpr, RParenLoc);
+
+ default:
+ llvm_unreachable("Invalid C++ named cast");
+ }
+ }
+
+ /// \brief Build a new C++ static_cast expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXStaticCastExpr(SourceLocation OpLoc,
+ SourceLocation LAngleLoc,
+ TypeSourceInfo *TInfo,
+ SourceLocation RAngleLoc,
+ SourceLocation LParenLoc,
+ Expr *SubExpr,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXNamedCast(OpLoc, tok::kw_static_cast,
+ TInfo, SubExpr,
+ SourceRange(LAngleLoc, RAngleLoc),
+ SourceRange(LParenLoc, RParenLoc));
+ }
+
+ /// \brief Build a new C++ dynamic_cast expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXDynamicCastExpr(SourceLocation OpLoc,
+ SourceLocation LAngleLoc,
+ TypeSourceInfo *TInfo,
+ SourceLocation RAngleLoc,
+ SourceLocation LParenLoc,
+ Expr *SubExpr,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXNamedCast(OpLoc, tok::kw_dynamic_cast,
+ TInfo, SubExpr,
+ SourceRange(LAngleLoc, RAngleLoc),
+ SourceRange(LParenLoc, RParenLoc));
+ }
+
+ /// \brief Build a new C++ reinterpret_cast expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXReinterpretCastExpr(SourceLocation OpLoc,
+ SourceLocation LAngleLoc,
+ TypeSourceInfo *TInfo,
+ SourceLocation RAngleLoc,
+ SourceLocation LParenLoc,
+ Expr *SubExpr,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXNamedCast(OpLoc, tok::kw_reinterpret_cast,
+ TInfo, SubExpr,
+ SourceRange(LAngleLoc, RAngleLoc),
+ SourceRange(LParenLoc, RParenLoc));
+ }
+
+ /// \brief Build a new C++ const_cast expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXConstCastExpr(SourceLocation OpLoc,
+ SourceLocation LAngleLoc,
+ TypeSourceInfo *TInfo,
+ SourceLocation RAngleLoc,
+ SourceLocation LParenLoc,
+ Expr *SubExpr,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXNamedCast(OpLoc, tok::kw_const_cast,
+ TInfo, SubExpr,
+ SourceRange(LAngleLoc, RAngleLoc),
+ SourceRange(LParenLoc, RParenLoc));
+ }
+
+ /// \brief Build a new C++ functional-style cast expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo,
+ SourceLocation LParenLoc,
+ Expr *Sub,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXTypeConstructExpr(TInfo, LParenLoc,
+ MultiExprArg(&Sub, 1),
+ RParenLoc);
+ }
+
+ /// \brief Build a new C++ typeid(type) expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXTypeidExpr(QualType TypeInfoType,
+ SourceLocation TypeidLoc,
+ TypeSourceInfo *Operand,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXTypeId(TypeInfoType, TypeidLoc, Operand,
+ RParenLoc);
+ }
+
+
+ /// \brief Build a new C++ typeid(expr) expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXTypeidExpr(QualType TypeInfoType,
+ SourceLocation TypeidLoc,
+ Expr *Operand,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXTypeId(TypeInfoType, TypeidLoc, Operand,
+ RParenLoc);
+ }
+
+ /// \brief Build a new C++ __uuidof(type) expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXUuidofExpr(QualType TypeInfoType,
+ SourceLocation TypeidLoc,
+ TypeSourceInfo *Operand,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXUuidof(TypeInfoType, TypeidLoc, Operand,
+ RParenLoc);
+ }
+
+ /// \brief Build a new C++ __uuidof(expr) expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXUuidofExpr(QualType TypeInfoType,
+ SourceLocation TypeidLoc,
+ Expr *Operand,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXUuidof(TypeInfoType, TypeidLoc, Operand,
+ RParenLoc);
+ }
+
+ /// \brief Build a new C++ "this" expression.
+ ///
+ /// By default, builds a new "this" expression without performing any
+ /// semantic analysis. Subclasses may override this routine to provide
+ /// different behavior.
+ ExprResult RebuildCXXThisExpr(SourceLocation ThisLoc,
+ QualType ThisType,
+ bool isImplicit) {
+ getSema().CheckCXXThisCapture(ThisLoc);
+ return new (getSema().Context) CXXThisExpr(ThisLoc, ThisType, isImplicit);
+ }
+
+ /// \brief Build a new C++ throw expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXThrowExpr(SourceLocation ThrowLoc, Expr *Sub,
+ bool IsThrownVariableInScope) {
+ return getSema().BuildCXXThrow(ThrowLoc, Sub, IsThrownVariableInScope);
+ }
+
+ /// \brief Build a new C++ default-argument expression.
+ ///
+ /// By default, builds a new default-argument expression, which does not
+ /// require any semantic analysis. Subclasses may override this routine to
+ /// provide different behavior.
+ ExprResult RebuildCXXDefaultArgExpr(SourceLocation Loc,
+ ParmVarDecl *Param) {
+ return CXXDefaultArgExpr::Create(getSema().Context, Loc, Param);
+ }
+
+ /// \brief Build a new C++11 default-initialization expression.
+ ///
+ /// By default, builds a new default field initialization expression, which
+ /// does not require any semantic analysis. Subclasses may override this
+ /// routine to provide different behavior.
+ ExprResult RebuildCXXDefaultInitExpr(SourceLocation Loc,
+ FieldDecl *Field) {
+ return CXXDefaultInitExpr::Create(getSema().Context, Loc, Field);
+ }
+
+ /// \brief Build a new C++ zero-initialization expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXScalarValueInitExpr(TypeSourceInfo *TSInfo,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXTypeConstructExpr(TSInfo, LParenLoc,
+ None, RParenLoc);
+ }
+
+ /// \brief Build a new C++ "new" expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXNewExpr(SourceLocation StartLoc,
+ bool UseGlobal,
+ SourceLocation PlacementLParen,
+ MultiExprArg PlacementArgs,
+ SourceLocation PlacementRParen,
+ SourceRange TypeIdParens,
+ QualType AllocatedType,
+ TypeSourceInfo *AllocatedTypeInfo,
+ Expr *ArraySize,
+ SourceRange DirectInitRange,
+ Expr *Initializer) {
+ return getSema().BuildCXXNew(StartLoc, UseGlobal,
+ PlacementLParen,
+ PlacementArgs,
+ PlacementRParen,
+ TypeIdParens,
+ AllocatedType,
+ AllocatedTypeInfo,
+ ArraySize,
+ DirectInitRange,
+ Initializer);
+ }
+
+ /// \brief Build a new C++ "delete" expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXDeleteExpr(SourceLocation StartLoc,
+ bool IsGlobalDelete,
+ bool IsArrayForm,
+ Expr *Operand) {
+ return getSema().ActOnCXXDelete(StartLoc, IsGlobalDelete, IsArrayForm,
+ Operand);
+ }
+
+ /// \brief Build a new type trait expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildTypeTrait(TypeTrait Trait,
+ SourceLocation StartLoc,
+ ArrayRef<TypeSourceInfo *> Args,
+ SourceLocation RParenLoc) {
+ return getSema().BuildTypeTrait(Trait, StartLoc, Args, RParenLoc);
+ }
+
+ /// \brief Build a new array type trait expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildArrayTypeTrait(ArrayTypeTrait Trait,
+ SourceLocation StartLoc,
+ TypeSourceInfo *TSInfo,
+ Expr *DimExpr,
+ SourceLocation RParenLoc) {
+ return getSema().BuildArrayTypeTrait(Trait, StartLoc, TSInfo, DimExpr, RParenLoc);
+ }
+
+ /// \brief Build a new expression trait expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildExpressionTrait(ExpressionTrait Trait,
+ SourceLocation StartLoc,
+ Expr *Queried,
+ SourceLocation RParenLoc) {
+ return getSema().BuildExpressionTrait(Trait, StartLoc, Queried, RParenLoc);
+ }
+
+ /// \brief Build a new (previously unresolved) declaration reference
+ /// expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildDependentScopeDeclRefExpr(
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *TemplateArgs,
+ bool IsAddressOfOperand,
+ TypeSourceInfo **RecoveryTSI) {
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+
+ if (TemplateArgs || TemplateKWLoc.isValid())
+ return getSema().BuildQualifiedTemplateIdExpr(SS, TemplateKWLoc, NameInfo,
+ TemplateArgs);
+
+ return getSema().BuildQualifiedDeclarationNameExpr(
+ SS, NameInfo, IsAddressOfOperand, /*S*/nullptr, RecoveryTSI);
+ }
+
+ /// \brief Build a new template-id expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildTemplateIdExpr(const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ LookupResult &R,
+ bool RequiresADL,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ return getSema().BuildTemplateIdExpr(SS, TemplateKWLoc, R, RequiresADL,
+ TemplateArgs);
+ }
+
+ /// \brief Build a new object-construction expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXConstructExpr(QualType T,
+ SourceLocation Loc,
+ CXXConstructorDecl *Constructor,
+ bool IsElidable,
+ MultiExprArg Args,
+ bool HadMultipleCandidates,
+ bool ListInitialization,
+ bool StdInitListInitialization,
+ bool RequiresZeroInit,
+ CXXConstructExpr::ConstructionKind ConstructKind,
+ SourceRange ParenRange) {
+ SmallVector<Expr*, 8> ConvertedArgs;
+ if (getSema().CompleteConstructorCall(Constructor, Args, Loc,
+ ConvertedArgs))
+ return ExprError();
+
+ return getSema().BuildCXXConstructExpr(Loc, T, Constructor, IsElidable,
+ ConvertedArgs,
+ HadMultipleCandidates,
+ ListInitialization,
+ StdInitListInitialization,
+ RequiresZeroInit, ConstructKind,
+ ParenRange);
+ }
+
+ /// \brief Build a new object-construction expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXTemporaryObjectExpr(TypeSourceInfo *TSInfo,
+ SourceLocation LParenLoc,
+ MultiExprArg Args,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXTypeConstructExpr(TSInfo,
+ LParenLoc,
+ Args,
+ RParenLoc);
+ }
+
+ /// \brief Build a new object-construction expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXUnresolvedConstructExpr(TypeSourceInfo *TSInfo,
+ SourceLocation LParenLoc,
+ MultiExprArg Args,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXTypeConstructExpr(TSInfo,
+ LParenLoc,
+ Args,
+ RParenLoc);
+ }
+
+ /// \brief Build a new member reference expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXDependentScopeMemberExpr(Expr *BaseE,
+ QualType BaseType,
+ bool IsArrow,
+ SourceLocation OperatorLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ NamedDecl *FirstQualifierInScope,
+ const DeclarationNameInfo &MemberNameInfo,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+
+ return SemaRef.BuildMemberReferenceExpr(BaseE, BaseType,
+ OperatorLoc, IsArrow,
+ SS, TemplateKWLoc,
+ FirstQualifierInScope,
+ MemberNameInfo,
+ TemplateArgs, /*S*/nullptr);
+ }
+
+ /// \brief Build a new member reference expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildUnresolvedMemberExpr(Expr *BaseE, QualType BaseType,
+ SourceLocation OperatorLoc,
+ bool IsArrow,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ NamedDecl *FirstQualifierInScope,
+ LookupResult &R,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+
+ return SemaRef.BuildMemberReferenceExpr(BaseE, BaseType,
+ OperatorLoc, IsArrow,
+ SS, TemplateKWLoc,
+ FirstQualifierInScope,
+ R, TemplateArgs, /*S*/nullptr);
+ }
+
+ /// \brief Build a new noexcept expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXNoexceptExpr(SourceRange Range, Expr *Arg) {
+ return SemaRef.BuildCXXNoexceptExpr(Range.getBegin(), Arg, Range.getEnd());
+ }
+
+ /// \brief Build a new expression to compute the length of a parameter pack.
+ ExprResult RebuildSizeOfPackExpr(SourceLocation OperatorLoc,
+ NamedDecl *Pack,
+ SourceLocation PackLoc,
+ SourceLocation RParenLoc,
+ Optional<unsigned> Length,
+ ArrayRef<TemplateArgument> PartialArgs) {
+ return SizeOfPackExpr::Create(SemaRef.Context, OperatorLoc, Pack, PackLoc,
+ RParenLoc, Length, PartialArgs);
+ }
+
+ /// \brief Build a new Objective-C boxed expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
+ return getSema().BuildObjCBoxedExpr(SR, ValueExpr);
+ }
+
+ /// \brief Build a new Objective-C array literal.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildObjCArrayLiteral(SourceRange Range,
+ Expr **Elements, unsigned NumElements) {
+ return getSema().BuildObjCArrayLiteral(Range,
+ MultiExprArg(Elements, NumElements));
+ }
+
+ ExprResult RebuildObjCSubscriptRefExpr(SourceLocation RB,
+ Expr *Base, Expr *Key,
+ ObjCMethodDecl *getterMethod,
+ ObjCMethodDecl *setterMethod) {
+ return getSema().BuildObjCSubscriptExpression(RB, Base, Key,
+ getterMethod, setterMethod);
+ }
+
+ /// \brief Build a new Objective-C dictionary literal.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildObjCDictionaryLiteral(SourceRange Range,
+ MutableArrayRef<ObjCDictionaryElement> Elements) {
+ return getSema().BuildObjCDictionaryLiteral(Range, Elements);
+ }
+
+ /// \brief Build a new Objective-C \@encode expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildObjCEncodeExpr(SourceLocation AtLoc,
+ TypeSourceInfo *EncodeTypeInfo,
+ SourceLocation RParenLoc) {
+ return SemaRef.BuildObjCEncodeExpression(AtLoc, EncodeTypeInfo, RParenLoc);
+ }
+
+ /// \brief Build a new Objective-C class message.
+ ExprResult RebuildObjCMessageExpr(TypeSourceInfo *ReceiverTypeInfo,
+ Selector Sel,
+ ArrayRef<SourceLocation> SelectorLocs,
+ ObjCMethodDecl *Method,
+ SourceLocation LBracLoc,
+ MultiExprArg Args,
+ SourceLocation RBracLoc) {
+ return SemaRef.BuildClassMessage(ReceiverTypeInfo,
+ ReceiverTypeInfo->getType(),
+ /*SuperLoc=*/SourceLocation(),
+ Sel, Method, LBracLoc, SelectorLocs,
+ RBracLoc, Args);
+ }
+
+ /// \brief Build a new Objective-C instance message.
+ ExprResult RebuildObjCMessageExpr(Expr *Receiver,
+ Selector Sel,
+ ArrayRef<SourceLocation> SelectorLocs,
+ ObjCMethodDecl *Method,
+ SourceLocation LBracLoc,
+ MultiExprArg Args,
+ SourceLocation RBracLoc) {
+ return SemaRef.BuildInstanceMessage(Receiver,
+ Receiver->getType(),
+ /*SuperLoc=*/SourceLocation(),
+ Sel, Method, LBracLoc, SelectorLocs,
+ RBracLoc, Args);
+ }
+
+ /// \brief Build a new Objective-C instance/class message to 'super'.
+ ExprResult RebuildObjCMessageExpr(SourceLocation SuperLoc,
+ Selector Sel,
+ ArrayRef<SourceLocation> SelectorLocs,
+ QualType SuperType,
+ ObjCMethodDecl *Method,
+ SourceLocation LBracLoc,
+ MultiExprArg Args,
+ SourceLocation RBracLoc) {
+ return Method->isInstanceMethod() ? SemaRef.BuildInstanceMessage(nullptr,
+ SuperType,
+ SuperLoc,
+ Sel, Method, LBracLoc, SelectorLocs,
+ RBracLoc, Args)
+ : SemaRef.BuildClassMessage(nullptr,
+ SuperType,
+ SuperLoc,
+ Sel, Method, LBracLoc, SelectorLocs,
+ RBracLoc, Args);
+
+
+ }
+
+ /// \brief Build a new Objective-C ivar reference expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildObjCIvarRefExpr(Expr *BaseArg, ObjCIvarDecl *Ivar,
+ SourceLocation IvarLoc,
+ bool IsArrow, bool IsFreeIvar) {
+ // FIXME: We lose track of the IsFreeIvar bit.
+ CXXScopeSpec SS;
+ DeclarationNameInfo NameInfo(Ivar->getDeclName(), IvarLoc);
+ return getSema().BuildMemberReferenceExpr(BaseArg, BaseArg->getType(),
+ /*FIXME:*/IvarLoc, IsArrow,
+ SS, SourceLocation(),
+ /*FirstQualifierInScope=*/nullptr,
+ NameInfo,
+ /*TemplateArgs=*/nullptr,
+ /*S=*/nullptr);
+ }
+
+ /// \brief Build a new Objective-C property reference expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildObjCPropertyRefExpr(Expr *BaseArg,
+ ObjCPropertyDecl *Property,
+ SourceLocation PropertyLoc) {
+ CXXScopeSpec SS;
+ DeclarationNameInfo NameInfo(Property->getDeclName(), PropertyLoc);
+ return getSema().BuildMemberReferenceExpr(BaseArg, BaseArg->getType(),
+ /*FIXME:*/PropertyLoc,
+ /*IsArrow=*/false,
+ SS, SourceLocation(),
+ /*FirstQualifierInScope=*/nullptr,
+ NameInfo,
+ /*TemplateArgs=*/nullptr,
+ /*S=*/nullptr);
+ }
+
+ /// \brief Build a new Objective-C property reference expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildObjCPropertyRefExpr(Expr *Base, QualType T,
+ ObjCMethodDecl *Getter,
+ ObjCMethodDecl *Setter,
+ SourceLocation PropertyLoc) {
+ // Since these expressions can only be value-dependent, we do not
+ // need to perform semantic analysis again.
+ return Owned(
+ new (getSema().Context) ObjCPropertyRefExpr(Getter, Setter, T,
+ VK_LValue, OK_ObjCProperty,
+ PropertyLoc, Base));
+ }
+
+ /// \brief Build a new Objective-C "isa" expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildObjCIsaExpr(Expr *BaseArg, SourceLocation IsaLoc,
+ SourceLocation OpLoc, bool IsArrow) {
+ CXXScopeSpec SS;
+ DeclarationNameInfo NameInfo(&getSema().Context.Idents.get("isa"), IsaLoc);
+ return getSema().BuildMemberReferenceExpr(BaseArg, BaseArg->getType(),
+ OpLoc, IsArrow,
+ SS, SourceLocation(),
+ /*FirstQualifierInScope=*/nullptr,
+ NameInfo,
+ /*TemplateArgs=*/nullptr,
+ /*S=*/nullptr);
+ }
+
+ /// \brief Build a new shuffle vector expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildShuffleVectorExpr(SourceLocation BuiltinLoc,
+ MultiExprArg SubExprs,
+ SourceLocation RParenLoc) {
+ // Find the declaration for __builtin_shufflevector
+ const IdentifierInfo &Name
+ = SemaRef.Context.Idents.get("__builtin_shufflevector");
+ TranslationUnitDecl *TUDecl = SemaRef.Context.getTranslationUnitDecl();
+ DeclContext::lookup_result Lookup = TUDecl->lookup(DeclarationName(&Name));
+ assert(!Lookup.empty() && "No __builtin_shufflevector?");
+
+ // Build a reference to the __builtin_shufflevector builtin
+ FunctionDecl *Builtin = cast<FunctionDecl>(Lookup.front());
+ Expr *Callee = new (SemaRef.Context) DeclRefExpr(Builtin, false,
+ SemaRef.Context.BuiltinFnTy,
+ VK_RValue, BuiltinLoc);
+ QualType CalleePtrTy = SemaRef.Context.getPointerType(Builtin->getType());
+ Callee = SemaRef.ImpCastExprToType(Callee, CalleePtrTy,
+ CK_BuiltinFnToFnPtr).get();
+
+ // Build the CallExpr
+ ExprResult TheCall = new (SemaRef.Context) CallExpr(
+ SemaRef.Context, Callee, SubExprs, Builtin->getCallResultType(),
+ Expr::getValueKindForType(Builtin->getReturnType()), RParenLoc);
+
+ // Type-check the __builtin_shufflevector expression.
+ return SemaRef.SemaBuiltinShuffleVector(cast<CallExpr>(TheCall.get()));
+ }
+
+ /// \brief Build a new convert vector expression.
+ ExprResult RebuildConvertVectorExpr(SourceLocation BuiltinLoc,
+ Expr *SrcExpr, TypeSourceInfo *DstTInfo,
+ SourceLocation RParenLoc) {
+ return SemaRef.SemaConvertVectorExpr(SrcExpr, DstTInfo,
+ BuiltinLoc, RParenLoc);
+ }
+
+ /// \brief Build a new template argument pack expansion.
+ ///
+ /// By default, performs semantic analysis to build a new pack expansion
+ /// for a template argument. Subclasses may override this routine to provide
+ /// different behavior.
+ TemplateArgumentLoc RebuildPackExpansion(TemplateArgumentLoc Pattern,
+ SourceLocation EllipsisLoc,
+ Optional<unsigned> NumExpansions) {
+ switch (Pattern.getArgument().getKind()) {
+ case TemplateArgument::Expression: {
+ ExprResult Result
+ = getSema().CheckPackExpansion(Pattern.getSourceExpression(),
+ EllipsisLoc, NumExpansions);
+ if (Result.isInvalid())
+ return TemplateArgumentLoc();
+
+ return TemplateArgumentLoc(Result.get(), Result.get());
+ }
+
+ case TemplateArgument::Template:
+ return TemplateArgumentLoc(TemplateArgument(
+ Pattern.getArgument().getAsTemplate(),
+ NumExpansions),
+ Pattern.getTemplateQualifierLoc(),
+ Pattern.getTemplateNameLoc(),
+ EllipsisLoc);
+
+ case TemplateArgument::Null:
+ case TemplateArgument::Integral:
+ case TemplateArgument::Declaration:
+ case TemplateArgument::Pack:
+ case TemplateArgument::TemplateExpansion:
+ case TemplateArgument::NullPtr:
+ llvm_unreachable("Pack expansion pattern has no parameter packs");
+
+ case TemplateArgument::Type:
+ if (TypeSourceInfo *Expansion
+ = getSema().CheckPackExpansion(Pattern.getTypeSourceInfo(),
+ EllipsisLoc,
+ NumExpansions))
+ return TemplateArgumentLoc(TemplateArgument(Expansion->getType()),
+ Expansion);
+ break;
+ }
+
+ return TemplateArgumentLoc();
+ }
+
+ /// \brief Build a new expression pack expansion.
+ ///
+ /// By default, performs semantic analysis to build a new pack expansion
+ /// for an expression. Subclasses may override this routine to provide
+ /// different behavior.
+ ExprResult RebuildPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
+ Optional<unsigned> NumExpansions) {
+ return getSema().CheckPackExpansion(Pattern, EllipsisLoc, NumExpansions);
+ }
+
+ /// \brief Build a new C++1z fold-expression.
+ ///
+ /// By default, performs semantic analysis in order to build a new fold
+ /// expression.
+ ExprResult RebuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
+ BinaryOperatorKind Operator,
+ SourceLocation EllipsisLoc, Expr *RHS,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXFoldExpr(LParenLoc, LHS, Operator, EllipsisLoc,
+ RHS, RParenLoc);
+ }
+
+ /// \brief Build an empty C++1z fold-expression with the given operator.
+ ///
+ /// By default, produces the fallback value for the fold-expression, or
+ /// produce an error if there is no fallback value.
+ ExprResult RebuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
+ BinaryOperatorKind Operator) {
+ return getSema().BuildEmptyCXXFoldExpr(EllipsisLoc, Operator);
+ }
+
+ /// \brief Build a new atomic operation expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildAtomicExpr(SourceLocation BuiltinLoc,
+ MultiExprArg SubExprs,
+ QualType RetTy,
+ AtomicExpr::AtomicOp Op,
+ SourceLocation RParenLoc) {
+ // Just create the expression; there is not any interesting semantic
+ // analysis here because we can't actually build an AtomicExpr until
+ // we are sure it is semantically sound.
+ return new (SemaRef.Context) AtomicExpr(BuiltinLoc, SubExprs, RetTy, Op,
+ RParenLoc);
+ }
+
+private:
+ TypeLoc TransformTypeInObjectScope(TypeLoc TL,
+ QualType ObjectType,
+ NamedDecl *FirstQualifierInScope,
+ CXXScopeSpec &SS);
+
+ TypeSourceInfo *TransformTypeInObjectScope(TypeSourceInfo *TSInfo,
+ QualType ObjectType,
+ NamedDecl *FirstQualifierInScope,
+ CXXScopeSpec &SS);
+
+ TypeSourceInfo *TransformTSIInObjectScope(TypeLoc TL, QualType ObjectType,
+ NamedDecl *FirstQualifierInScope,
+ CXXScopeSpec &SS);
+};
+
+template<typename Derived>
+StmtResult TreeTransform<Derived>::TransformStmt(Stmt *S) {
+ if (!S)
+ return S;
+
+ switch (S->getStmtClass()) {
+ case Stmt::NoStmtClass: break;
+
+ // Transform individual statement nodes
+#define STMT(Node, Parent) \
+ case Stmt::Node##Class: return getDerived().Transform##Node(cast<Node>(S));
+#define ABSTRACT_STMT(Node)
+#define EXPR(Node, Parent)
+#include "clang/AST/StmtNodes.inc"
+
+ // Transform expressions by calling TransformExpr.
+#define STMT(Node, Parent)
+#define ABSTRACT_STMT(Stmt)
+#define EXPR(Node, Parent) case Stmt::Node##Class:
+#include "clang/AST/StmtNodes.inc"
+ {
+ ExprResult E = getDerived().TransformExpr(cast<Expr>(S));
+ if (E.isInvalid())
+ return StmtError();
+
+ return getSema().ActOnExprStmt(E);
+ }
+ }
+
+ return S;
+}
+
+template<typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPClause(OMPClause *S) {
+ if (!S)
+ return S;
+
+ switch (S->getClauseKind()) {
+ default: break;
+ // Transform individual clause nodes
+#define OPENMP_CLAUSE(Name, Class) \
+ case OMPC_ ## Name : \
+ return getDerived().Transform ## Class(cast<Class>(S));
+#include "clang/Basic/OpenMPKinds.def"
+ }
+
+ return S;
+}
+
+
+template<typename Derived>
+ExprResult TreeTransform<Derived>::TransformExpr(Expr *E) {
+ if (!E)
+ return E;
+
+ switch (E->getStmtClass()) {
+ case Stmt::NoStmtClass: break;
+#define STMT(Node, Parent) case Stmt::Node##Class: break;
+#define ABSTRACT_STMT(Stmt)
+#define EXPR(Node, Parent) \
+ case Stmt::Node##Class: return getDerived().Transform##Node(cast<Node>(E));
+#include "clang/AST/StmtNodes.inc"
+ }
+
+ return E;
+}
+
+template<typename Derived>
+ExprResult TreeTransform<Derived>::TransformInitializer(Expr *Init,
+ bool NotCopyInit) {
+ // Initializers are instantiated like expressions, except that various outer
+ // layers are stripped.
+ if (!Init)
+ return Init;
+
+ if (ExprWithCleanups *ExprTemp = dyn_cast<ExprWithCleanups>(Init))
+ Init = ExprTemp->getSubExpr();
+
+ if (MaterializeTemporaryExpr *MTE = dyn_cast<MaterializeTemporaryExpr>(Init))
+ Init = MTE->GetTemporaryExpr();
+
+ while (CXXBindTemporaryExpr *Binder = dyn_cast<CXXBindTemporaryExpr>(Init))
+ Init = Binder->getSubExpr();
+
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Init))
+ Init = ICE->getSubExprAsWritten();
+
+ if (CXXStdInitializerListExpr *ILE =
+ dyn_cast<CXXStdInitializerListExpr>(Init))
+ return TransformInitializer(ILE->getSubExpr(), NotCopyInit);
+
+ // If this is copy-initialization, we only need to reconstruct
+ // InitListExprs. Other forms of copy-initialization will be a no-op if
+ // the initializer is already the right type.
+ CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init);
+ if (!NotCopyInit && !(Construct && Construct->isListInitialization()))
+ return getDerived().TransformExpr(Init);
+
+ // Revert value-initialization back to empty parens.
+ if (CXXScalarValueInitExpr *VIE = dyn_cast<CXXScalarValueInitExpr>(Init)) {
+ SourceRange Parens = VIE->getSourceRange();
+ return getDerived().RebuildParenListExpr(Parens.getBegin(), None,
+ Parens.getEnd());
+ }
+
+ // FIXME: We shouldn't build ImplicitValueInitExprs for direct-initialization.
+ if (isa<ImplicitValueInitExpr>(Init))
+ return getDerived().RebuildParenListExpr(SourceLocation(), None,
+ SourceLocation());
+
+ // Revert initialization by constructor back to a parenthesized or braced list
+ // of expressions. Any other form of initializer can just be reused directly.
+ if (!Construct || isa<CXXTemporaryObjectExpr>(Construct))
+ return getDerived().TransformExpr(Init);
+
+ // If the initialization implicitly converted an initializer list to a
+ // std::initializer_list object, unwrap the std::initializer_list too.
+ if (Construct && Construct->isStdInitListInitialization())
+ return TransformInitializer(Construct->getArg(0), NotCopyInit);
+
+ SmallVector<Expr*, 8> NewArgs;
+ bool ArgChanged = false;
+ if (getDerived().TransformExprs(Construct->getArgs(), Construct->getNumArgs(),
+ /*IsCall*/true, NewArgs, &ArgChanged))
+ return ExprError();
+
+ // If this was list initialization, revert to list form.
+ if (Construct->isListInitialization())
+ return getDerived().RebuildInitList(Construct->getLocStart(), NewArgs,
+ Construct->getLocEnd(),
+ Construct->getType());
+
+ // Build a ParenListExpr to represent anything else.
+ SourceRange Parens = Construct->getParenOrBraceRange();
+ if (Parens.isInvalid()) {
+ // This was a variable declaration's initialization for which no initializer
+ // was specified.
+ assert(NewArgs.empty() &&
+ "no parens or braces but have direct init with arguments?");
+ return ExprEmpty();
+ }
+ return getDerived().RebuildParenListExpr(Parens.getBegin(), NewArgs,
+ Parens.getEnd());
+}
+
+template<typename Derived>
+bool TreeTransform<Derived>::TransformExprs(Expr *const *Inputs,
+ unsigned NumInputs,
+ bool IsCall,
+ SmallVectorImpl<Expr *> &Outputs,
+ bool *ArgChanged) {
+ for (unsigned I = 0; I != NumInputs; ++I) {
+ // If requested, drop call arguments that need to be dropped.
+ if (IsCall && getDerived().DropCallArgument(Inputs[I])) {
+ if (ArgChanged)
+ *ArgChanged = true;
+
+ break;
+ }
+
+ if (PackExpansionExpr *Expansion = dyn_cast<PackExpansionExpr>(Inputs[I])) {
+ Expr *Pattern = Expansion->getPattern();
+
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ getSema().collectUnexpandedParameterPacks(Pattern, Unexpanded);
+ assert(!Unexpanded.empty() && "Pack expansion without parameter packs?");
+
+ // Determine whether the set of unexpanded parameter packs can and should
+ // be expanded.
+ bool Expand = true;
+ bool RetainExpansion = false;
+ Optional<unsigned> OrigNumExpansions = Expansion->getNumExpansions();
+ Optional<unsigned> NumExpansions = OrigNumExpansions;
+ if (getDerived().TryExpandParameterPacks(Expansion->getEllipsisLoc(),
+ Pattern->getSourceRange(),
+ Unexpanded,
+ Expand, RetainExpansion,
+ NumExpansions))
+ return true;
+
+ if (!Expand) {
+ // The transform has determined that we should perform a simple
+ // transformation on the pack expansion, producing another pack
+ // expansion.
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
+ ExprResult OutPattern = getDerived().TransformExpr(Pattern);
+ if (OutPattern.isInvalid())
+ return true;
+
+ ExprResult Out = getDerived().RebuildPackExpansion(OutPattern.get(),
+ Expansion->getEllipsisLoc(),
+ NumExpansions);
+ if (Out.isInvalid())
+ return true;
+
+ if (ArgChanged)
+ *ArgChanged = true;
+ Outputs.push_back(Out.get());
+ continue;
+ }
+
+ // Record right away that the argument was changed. This needs
+ // to happen even if the array expands to nothing.
+ if (ArgChanged) *ArgChanged = true;
+
+ // The transform has determined that we should perform an elementwise
+ // expansion of the pattern. Do so.
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I);
+ ExprResult Out = getDerived().TransformExpr(Pattern);
+ if (Out.isInvalid())
+ return true;
+
+ // FIXME: Can this happen? We should not try to expand the pack
+ // in this case.
+ if (Out.get()->containsUnexpandedParameterPack()) {
+ Out = getDerived().RebuildPackExpansion(
+ Out.get(), Expansion->getEllipsisLoc(), OrigNumExpansions);
+ if (Out.isInvalid())
+ return true;
+ }
+
+ Outputs.push_back(Out.get());
+ }
+
+ // If we're supposed to retain a pack expansion, do so by temporarily
+ // forgetting the partially-substituted parameter pack.
+ if (RetainExpansion) {
+ ForgetPartiallySubstitutedPackRAII Forget(getDerived());
+
+ ExprResult Out = getDerived().TransformExpr(Pattern);
+ if (Out.isInvalid())
+ return true;
+
+ Out = getDerived().RebuildPackExpansion(
+ Out.get(), Expansion->getEllipsisLoc(), OrigNumExpansions);
+ if (Out.isInvalid())
+ return true;
+
+ Outputs.push_back(Out.get());
+ }
+
+ continue;
+ }
+
+ ExprResult Result =
+ IsCall ? getDerived().TransformInitializer(Inputs[I], /*DirectInit*/false)
+ : getDerived().TransformExpr(Inputs[I]);
+ if (Result.isInvalid())
+ return true;
+
+ if (Result.get() != Inputs[I] && ArgChanged)
+ *ArgChanged = true;
+
+ Outputs.push_back(Result.get());
+ }
+
+ return false;
+}
+
+template<typename Derived>
+NestedNameSpecifierLoc
+TreeTransform<Derived>::TransformNestedNameSpecifierLoc(
+ NestedNameSpecifierLoc NNS,
+ QualType ObjectType,
+ NamedDecl *FirstQualifierInScope) {
+ SmallVector<NestedNameSpecifierLoc, 4> Qualifiers;
+ for (NestedNameSpecifierLoc Qualifier = NNS; Qualifier;
+ Qualifier = Qualifier.getPrefix())
+ Qualifiers.push_back(Qualifier);
+
+ CXXScopeSpec SS;
+ while (!Qualifiers.empty()) {
+ NestedNameSpecifierLoc Q = Qualifiers.pop_back_val();
+ NestedNameSpecifier *QNNS = Q.getNestedNameSpecifier();
+
+ switch (QNNS->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ if (SemaRef.BuildCXXNestedNameSpecifier(/*Scope=*/nullptr,
+ *QNNS->getAsIdentifier(),
+ Q.getLocalBeginLoc(),
+ Q.getLocalEndLoc(),
+ ObjectType, false, SS,
+ FirstQualifierInScope, false))
+ return NestedNameSpecifierLoc();
+
+ break;
+
+ case NestedNameSpecifier::Namespace: {
+ NamespaceDecl *NS
+ = cast_or_null<NamespaceDecl>(
+ getDerived().TransformDecl(
+ Q.getLocalBeginLoc(),
+ QNNS->getAsNamespace()));
+ SS.Extend(SemaRef.Context, NS, Q.getLocalBeginLoc(), Q.getLocalEndLoc());
+ break;
+ }
+
+ case NestedNameSpecifier::NamespaceAlias: {
+ NamespaceAliasDecl *Alias
+ = cast_or_null<NamespaceAliasDecl>(
+ getDerived().TransformDecl(Q.getLocalBeginLoc(),
+ QNNS->getAsNamespaceAlias()));
+ SS.Extend(SemaRef.Context, Alias, Q.getLocalBeginLoc(),
+ Q.getLocalEndLoc());
+ break;
+ }
+
+ case NestedNameSpecifier::Global:
+ // There is no meaningful transformation that one could perform on the
+ // global scope.
+ SS.MakeGlobal(SemaRef.Context, Q.getBeginLoc());
+ break;
+
+ case NestedNameSpecifier::Super: {
+ CXXRecordDecl *RD =
+ cast_or_null<CXXRecordDecl>(getDerived().TransformDecl(
+ SourceLocation(), QNNS->getAsRecordDecl()));
+ SS.MakeSuper(SemaRef.Context, RD, Q.getBeginLoc(), Q.getEndLoc());
+ break;
+ }
+
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ case NestedNameSpecifier::TypeSpec: {
+ TypeLoc TL = TransformTypeInObjectScope(Q.getTypeLoc(), ObjectType,
+ FirstQualifierInScope, SS);
+
+ if (!TL)
+ return NestedNameSpecifierLoc();
+
+ if (TL.getType()->isDependentType() || TL.getType()->isRecordType() ||
+ (SemaRef.getLangOpts().CPlusPlus11 &&
+ TL.getType()->isEnumeralType())) {
+ assert(!TL.getType().hasLocalQualifiers() &&
+ "Can't get cv-qualifiers here");
+ if (TL.getType()->isEnumeralType())
+ SemaRef.Diag(TL.getBeginLoc(),
+ diag::warn_cxx98_compat_enum_nested_name_spec);
+ SS.Extend(SemaRef.Context, /*FIXME:*/SourceLocation(), TL,
+ Q.getLocalEndLoc());
+ break;
+ }
+ // If the nested-name-specifier is an invalid type def, don't emit an
+ // error because a previous error should have already been emitted.
+ TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>();
+ if (!TTL || !TTL.getTypedefNameDecl()->isInvalidDecl()) {
+ SemaRef.Diag(TL.getBeginLoc(), diag::err_nested_name_spec_non_tag)
+ << TL.getType() << SS.getRange();
+ }
+ return NestedNameSpecifierLoc();
+ }
+ }
+
+ // The qualifier-in-scope and object type only apply to the leftmost entity.
+ FirstQualifierInScope = nullptr;
+ ObjectType = QualType();
+ }
+
+ // Don't rebuild the nested-name-specifier if we don't have to.
+ if (SS.getScopeRep() == NNS.getNestedNameSpecifier() &&
+ !getDerived().AlwaysRebuild())
+ return NNS;
+
+ // If we can re-use the source-location data from the original
+ // nested-name-specifier, do so.
+ if (SS.location_size() == NNS.getDataLength() &&
+ memcmp(SS.location_data(), NNS.getOpaqueData(), SS.location_size()) == 0)
+ return NestedNameSpecifierLoc(SS.getScopeRep(), NNS.getOpaqueData());
+
+ // Allocate new nested-name-specifier location information.
+ return SS.getWithLocInContext(SemaRef.Context);
+}
+
+template<typename Derived>
+DeclarationNameInfo
+TreeTransform<Derived>
+::TransformDeclarationNameInfo(const DeclarationNameInfo &NameInfo) {
+ DeclarationName Name = NameInfo.getName();
+ if (!Name)
+ return DeclarationNameInfo();
+
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXOperatorName:
+ case DeclarationName::CXXLiteralOperatorName:
+ case DeclarationName::CXXUsingDirective:
+ return NameInfo;
+
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName: {
+ TypeSourceInfo *NewTInfo;
+ CanQualType NewCanTy;
+ if (TypeSourceInfo *OldTInfo = NameInfo.getNamedTypeInfo()) {
+ NewTInfo = getDerived().TransformType(OldTInfo);
+ if (!NewTInfo)
+ return DeclarationNameInfo();
+ NewCanTy = SemaRef.Context.getCanonicalType(NewTInfo->getType());
+ }
+ else {
+ NewTInfo = nullptr;
+ TemporaryBase Rebase(*this, NameInfo.getLoc(), Name);
+ QualType NewT = getDerived().TransformType(Name.getCXXNameType());
+ if (NewT.isNull())
+ return DeclarationNameInfo();
+ NewCanTy = SemaRef.Context.getCanonicalType(NewT);
+ }
+
+ DeclarationName NewName
+ = SemaRef.Context.DeclarationNames.getCXXSpecialName(Name.getNameKind(),
+ NewCanTy);
+ DeclarationNameInfo NewNameInfo(NameInfo);
+ NewNameInfo.setName(NewName);
+ NewNameInfo.setNamedTypeInfo(NewTInfo);
+ return NewNameInfo;
+ }
+ }
+
+ llvm_unreachable("Unknown name kind.");
+}
+
+template<typename Derived>
+TemplateName
+TreeTransform<Derived>::TransformTemplateName(CXXScopeSpec &SS,
+ TemplateName Name,
+ SourceLocation NameLoc,
+ QualType ObjectType,
+ NamedDecl *FirstQualifierInScope) {
+ if (QualifiedTemplateName *QTN = Name.getAsQualifiedTemplateName()) {
+ TemplateDecl *Template = QTN->getTemplateDecl();
+ assert(Template && "qualified template name must refer to a template");
+
+ TemplateDecl *TransTemplate
+ = cast_or_null<TemplateDecl>(getDerived().TransformDecl(NameLoc,
+ Template));
+ if (!TransTemplate)
+ return TemplateName();
+
+ if (!getDerived().AlwaysRebuild() &&
+ SS.getScopeRep() == QTN->getQualifier() &&
+ TransTemplate == Template)
+ return Name;
+
+ return getDerived().RebuildTemplateName(SS, QTN->hasTemplateKeyword(),
+ TransTemplate);
+ }
+
+ if (DependentTemplateName *DTN = Name.getAsDependentTemplateName()) {
+ if (SS.getScopeRep()) {
+ // These apply to the scope specifier, not the template.
+ ObjectType = QualType();
+ FirstQualifierInScope = nullptr;
+ }
+
+ if (!getDerived().AlwaysRebuild() &&
+ SS.getScopeRep() == DTN->getQualifier() &&
+ ObjectType.isNull())
+ return Name;
+
+ if (DTN->isIdentifier()) {
+ return getDerived().RebuildTemplateName(SS,
+ *DTN->getIdentifier(),
+ NameLoc,
+ ObjectType,
+ FirstQualifierInScope);
+ }
+
+ return getDerived().RebuildTemplateName(SS, DTN->getOperator(), NameLoc,
+ ObjectType);
+ }
+
+ if (TemplateDecl *Template = Name.getAsTemplateDecl()) {
+ TemplateDecl *TransTemplate
+ = cast_or_null<TemplateDecl>(getDerived().TransformDecl(NameLoc,
+ Template));
+ if (!TransTemplate)
+ return TemplateName();
+
+ if (!getDerived().AlwaysRebuild() &&
+ TransTemplate == Template)
+ return Name;
+
+ return TemplateName(TransTemplate);
+ }
+
+ if (SubstTemplateTemplateParmPackStorage *SubstPack
+ = Name.getAsSubstTemplateTemplateParmPack()) {
+ TemplateTemplateParmDecl *TransParam
+ = cast_or_null<TemplateTemplateParmDecl>(
+ getDerived().TransformDecl(NameLoc, SubstPack->getParameterPack()));
+ if (!TransParam)
+ return TemplateName();
+
+ if (!getDerived().AlwaysRebuild() &&
+ TransParam == SubstPack->getParameterPack())
+ return Name;
+
+ return getDerived().RebuildTemplateName(TransParam,
+ SubstPack->getArgumentPack());
+ }
+
+ // These should be getting filtered out before they reach the AST.
+ llvm_unreachable("overloaded function decl survived to here");
+}
+
+template<typename Derived>
+void TreeTransform<Derived>::InventTemplateArgumentLoc(
+ const TemplateArgument &Arg,
+ TemplateArgumentLoc &Output) {
+ SourceLocation Loc = getDerived().getBaseLocation();
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ llvm_unreachable("null template argument in TreeTransform");
+ break;
+
+ case TemplateArgument::Type:
+ Output = TemplateArgumentLoc(Arg,
+ SemaRef.Context.getTrivialTypeSourceInfo(Arg.getAsType(), Loc));
+
+ break;
+
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion: {
+ NestedNameSpecifierLocBuilder Builder;
+ TemplateName Template = Arg.getAsTemplate();
+ if (DependentTemplateName *DTN = Template.getAsDependentTemplateName())
+ Builder.MakeTrivial(SemaRef.Context, DTN->getQualifier(), Loc);
+ else if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
+ Builder.MakeTrivial(SemaRef.Context, QTN->getQualifier(), Loc);
+
+ if (Arg.getKind() == TemplateArgument::Template)
+ Output = TemplateArgumentLoc(Arg,
+ Builder.getWithLocInContext(SemaRef.Context),
+ Loc);
+ else
+ Output = TemplateArgumentLoc(Arg,
+ Builder.getWithLocInContext(SemaRef.Context),
+ Loc, Loc);
+
+ break;
+ }
+
+ case TemplateArgument::Expression:
+ Output = TemplateArgumentLoc(Arg, Arg.getAsExpr());
+ break;
+
+ case TemplateArgument::Declaration:
+ case TemplateArgument::Integral:
+ case TemplateArgument::Pack:
+ case TemplateArgument::NullPtr:
+ Output = TemplateArgumentLoc(Arg, TemplateArgumentLocInfo());
+ break;
+ }
+}
+
+template<typename Derived>
+bool TreeTransform<Derived>::TransformTemplateArgument(
+ const TemplateArgumentLoc &Input,
+ TemplateArgumentLoc &Output, bool Uneval) {
+ const TemplateArgument &Arg = Input.getArgument();
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ case TemplateArgument::Integral:
+ case TemplateArgument::Pack:
+ case TemplateArgument::Declaration:
+ case TemplateArgument::NullPtr:
+ llvm_unreachable("Unexpected TemplateArgument");
+
+ case TemplateArgument::Type: {
+ TypeSourceInfo *DI = Input.getTypeSourceInfo();
+ if (!DI)
+ DI = InventTypeSourceInfo(Input.getArgument().getAsType());
+
+ DI = getDerived().TransformType(DI);
+ if (!DI) return true;
+
+ Output = TemplateArgumentLoc(TemplateArgument(DI->getType()), DI);
+ return false;
+ }
+
+ case TemplateArgument::Template: {
+ NestedNameSpecifierLoc QualifierLoc = Input.getTemplateQualifierLoc();
+ if (QualifierLoc) {
+ QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(QualifierLoc);
+ if (!QualifierLoc)
+ return true;
+ }
+
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+ TemplateName Template
+ = getDerived().TransformTemplateName(SS, Arg.getAsTemplate(),
+ Input.getTemplateNameLoc());
+ if (Template.isNull())
+ return true;
+
+ Output = TemplateArgumentLoc(TemplateArgument(Template), QualifierLoc,
+ Input.getTemplateNameLoc());
+ return false;
+ }
+
+ case TemplateArgument::TemplateExpansion:
+ llvm_unreachable("Caller should expand pack expansions");
+
+ case TemplateArgument::Expression: {
+ // Template argument expressions are constant expressions.
+ EnterExpressionEvaluationContext Unevaluated(
+ getSema(), Uneval ? Sema::Unevaluated : Sema::ConstantEvaluated);
+
+ Expr *InputExpr = Input.getSourceExpression();
+ if (!InputExpr) InputExpr = Input.getArgument().getAsExpr();
+
+ ExprResult E = getDerived().TransformExpr(InputExpr);
+ E = SemaRef.ActOnConstantExpression(E);
+ if (E.isInvalid()) return true;
+ Output = TemplateArgumentLoc(TemplateArgument(E.get()), E.get());
+ return false;
+ }
+ }
+
+ // Work around bogus GCC warning
+ return true;
+}
+
+/// \brief Iterator adaptor that invents template argument location information
+/// for each of the template arguments in its underlying iterator.
+template<typename Derived, typename InputIterator>
+class TemplateArgumentLocInventIterator {
+ TreeTransform<Derived> &Self;
+ InputIterator Iter;
+
+public:
+ typedef TemplateArgumentLoc value_type;
+ typedef TemplateArgumentLoc reference;
+ typedef typename std::iterator_traits<InputIterator>::difference_type
+ difference_type;
+ typedef std::input_iterator_tag iterator_category;
+
+ class pointer {
+ TemplateArgumentLoc Arg;
+
+ public:
+ explicit pointer(TemplateArgumentLoc Arg) : Arg(Arg) { }
+
+ const TemplateArgumentLoc *operator->() const { return &Arg; }
+ };
+
+ TemplateArgumentLocInventIterator() { }
+
+ explicit TemplateArgumentLocInventIterator(TreeTransform<Derived> &Self,
+ InputIterator Iter)
+ : Self(Self), Iter(Iter) { }
+
+ TemplateArgumentLocInventIterator &operator++() {
+ ++Iter;
+ return *this;
+ }
+
+ TemplateArgumentLocInventIterator operator++(int) {
+ TemplateArgumentLocInventIterator Old(*this);
+ ++(*this);
+ return Old;
+ }
+
+ reference operator*() const {
+ TemplateArgumentLoc Result;
+ Self.InventTemplateArgumentLoc(*Iter, Result);
+ return Result;
+ }
+
+ pointer operator->() const { return pointer(**this); }
+
+ friend bool operator==(const TemplateArgumentLocInventIterator &X,
+ const TemplateArgumentLocInventIterator &Y) {
+ return X.Iter == Y.Iter;
+ }
+
+ friend bool operator!=(const TemplateArgumentLocInventIterator &X,
+ const TemplateArgumentLocInventIterator &Y) {
+ return X.Iter != Y.Iter;
+ }
+};
+
+template<typename Derived>
+template<typename InputIterator>
+bool TreeTransform<Derived>::TransformTemplateArguments(
+ InputIterator First, InputIterator Last, TemplateArgumentListInfo &Outputs,
+ bool Uneval) {
+ for (; First != Last; ++First) {
+ TemplateArgumentLoc Out;
+ TemplateArgumentLoc In = *First;
+
+ if (In.getArgument().getKind() == TemplateArgument::Pack) {
+ // Unpack argument packs, which we translate them into separate
+ // arguments.
+ // FIXME: We could do much better if we could guarantee that the
+ // TemplateArgumentLocInfo for the pack expansion would be usable for
+ // all of the template arguments in the argument pack.
+ typedef TemplateArgumentLocInventIterator<Derived,
+ TemplateArgument::pack_iterator>
+ PackLocIterator;
+ if (TransformTemplateArguments(PackLocIterator(*this,
+ In.getArgument().pack_begin()),
+ PackLocIterator(*this,
+ In.getArgument().pack_end()),
+ Outputs, Uneval))
+ return true;
+
+ continue;
+ }
+
+ if (In.getArgument().isPackExpansion()) {
+ // We have a pack expansion, for which we will be substituting into
+ // the pattern.
+ SourceLocation Ellipsis;
+ Optional<unsigned> OrigNumExpansions;
+ TemplateArgumentLoc Pattern
+ = getSema().getTemplateArgumentPackExpansionPattern(
+ In, Ellipsis, OrigNumExpansions);
+
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ getSema().collectUnexpandedParameterPacks(Pattern, Unexpanded);
+ assert(!Unexpanded.empty() && "Pack expansion without parameter packs?");
+
+ // Determine whether the set of unexpanded parameter packs can and should
+ // be expanded.
+ bool Expand = true;
+ bool RetainExpansion = false;
+ Optional<unsigned> NumExpansions = OrigNumExpansions;
+ if (getDerived().TryExpandParameterPacks(Ellipsis,
+ Pattern.getSourceRange(),
+ Unexpanded,
+ Expand,
+ RetainExpansion,
+ NumExpansions))
+ return true;
+
+ if (!Expand) {
+ // The transform has determined that we should perform a simple
+ // transformation on the pack expansion, producing another pack
+ // expansion.
+ TemplateArgumentLoc OutPattern;
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
+ if (getDerived().TransformTemplateArgument(Pattern, OutPattern, Uneval))
+ return true;
+
+ Out = getDerived().RebuildPackExpansion(OutPattern, Ellipsis,
+ NumExpansions);
+ if (Out.getArgument().isNull())
+ return true;
+
+ Outputs.addArgument(Out);
+ continue;
+ }
+
+ // The transform has determined that we should perform an elementwise
+ // expansion of the pattern. Do so.
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I);
+
+ if (getDerived().TransformTemplateArgument(Pattern, Out, Uneval))
+ return true;
+
+ if (Out.getArgument().containsUnexpandedParameterPack()) {
+ Out = getDerived().RebuildPackExpansion(Out, Ellipsis,
+ OrigNumExpansions);
+ if (Out.getArgument().isNull())
+ return true;
+ }
+
+ Outputs.addArgument(Out);
+ }
+
+ // If we're supposed to retain a pack expansion, do so by temporarily
+ // forgetting the partially-substituted parameter pack.
+ if (RetainExpansion) {
+ ForgetPartiallySubstitutedPackRAII Forget(getDerived());
+
+ if (getDerived().TransformTemplateArgument(Pattern, Out, Uneval))
+ return true;
+
+ Out = getDerived().RebuildPackExpansion(Out, Ellipsis,
+ OrigNumExpansions);
+ if (Out.getArgument().isNull())
+ return true;
+
+ Outputs.addArgument(Out);
+ }
+
+ continue;
+ }
+
+ // The simple case:
+ if (getDerived().TransformTemplateArgument(In, Out, Uneval))
+ return true;
+
+ Outputs.addArgument(Out);
+ }
+
+ return false;
+
+}
+
+//===----------------------------------------------------------------------===//
+// Type transformation
+//===----------------------------------------------------------------------===//
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformType(QualType T) {
+ if (getDerived().AlreadyTransformed(T))
+ return T;
+
+ // Temporary workaround. All of these transformations should
+ // eventually turn into transformations on TypeLocs.
+ TypeSourceInfo *DI = getSema().Context.getTrivialTypeSourceInfo(T,
+ getDerived().getBaseLocation());
+
+ TypeSourceInfo *NewDI = getDerived().TransformType(DI);
+
+ if (!NewDI)
+ return QualType();
+
+ return NewDI->getType();
+}
+
+template<typename Derived>
+TypeSourceInfo *TreeTransform<Derived>::TransformType(TypeSourceInfo *DI) {
+ // Refine the base location to the type's location.
+ TemporaryBase Rebase(*this, DI->getTypeLoc().getBeginLoc(),
+ getDerived().getBaseEntity());
+ if (getDerived().AlreadyTransformed(DI->getType()))
+ return DI;
+
+ TypeLocBuilder TLB;
+
+ TypeLoc TL = DI->getTypeLoc();
+ TLB.reserve(TL.getFullDataSize());
+
+ QualType Result = getDerived().TransformType(TLB, TL);
+ if (Result.isNull())
+ return nullptr;
+
+ return TLB.getTypeSourceInfo(SemaRef.Context, Result);
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformType(TypeLocBuilder &TLB, TypeLoc T) {
+ switch (T.getTypeLocClass()) {
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ case TypeLoc::CLASS: \
+ return getDerived().Transform##CLASS##Type(TLB, \
+ T.castAs<CLASS##TypeLoc>());
+#include "clang/AST/TypeLocNodes.def"
+ }
+
+ llvm_unreachable("unhandled type loc!");
+}
+
+/// FIXME: By default, this routine adds type qualifiers only to types
+/// that can have qualifiers, and silently suppresses those qualifiers
+/// that are not permitted (e.g., qualifiers on reference or function
+/// types). This is the right thing for template instantiation, but
+/// probably not for other clients.
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformQualifiedType(TypeLocBuilder &TLB,
+ QualifiedTypeLoc T) {
+ Qualifiers Quals = T.getType().getLocalQualifiers();
+
+ QualType Result = getDerived().TransformType(TLB, T.getUnqualifiedLoc());
+ if (Result.isNull())
+ return QualType();
+
+ // Silently suppress qualifiers if the result type can't be qualified.
+ // FIXME: this is the right thing for template instantiation, but
+ // probably not for other clients.
+ if (Result->isFunctionType() || Result->isReferenceType())
+ return Result;
+
+ // Suppress Objective-C lifetime qualifiers if they don't make sense for the
+ // resulting type.
+ if (Quals.hasObjCLifetime()) {
+ if (!Result->isObjCLifetimeType() && !Result->isDependentType())
+ Quals.removeObjCLifetime();
+ else if (Result.getObjCLifetime()) {
+ // Objective-C ARC:
+ // A lifetime qualifier applied to a substituted template parameter
+ // overrides the lifetime qualifier from the template argument.
+ const AutoType *AutoTy;
+ if (const SubstTemplateTypeParmType *SubstTypeParam
+ = dyn_cast<SubstTemplateTypeParmType>(Result)) {
+ QualType Replacement = SubstTypeParam->getReplacementType();
+ Qualifiers Qs = Replacement.getQualifiers();
+ Qs.removeObjCLifetime();
+ Replacement
+ = SemaRef.Context.getQualifiedType(Replacement.getUnqualifiedType(),
+ Qs);
+ Result = SemaRef.Context.getSubstTemplateTypeParmType(
+ SubstTypeParam->getReplacedParameter(),
+ Replacement);
+ TLB.TypeWasModifiedSafely(Result);
+ } else if ((AutoTy = dyn_cast<AutoType>(Result)) && AutoTy->isDeduced()) {
+ // 'auto' types behave the same way as template parameters.
+ QualType Deduced = AutoTy->getDeducedType();
+ Qualifiers Qs = Deduced.getQualifiers();
+ Qs.removeObjCLifetime();
+ Deduced = SemaRef.Context.getQualifiedType(Deduced.getUnqualifiedType(),
+ Qs);
+ Result = SemaRef.Context.getAutoType(Deduced, AutoTy->getKeyword(),
+ AutoTy->isDependentType());
+ TLB.TypeWasModifiedSafely(Result);
+ } else {
+ // Otherwise, complain about the addition of a qualifier to an
+ // already-qualified type.
+ SourceRange R = T.getUnqualifiedLoc().getSourceRange();
+ SemaRef.Diag(R.getBegin(), diag::err_attr_objc_ownership_redundant)
+ << Result << R;
+
+ Quals.removeObjCLifetime();
+ }
+ }
+ }
+ if (!Quals.empty()) {
+ Result = SemaRef.BuildQualifiedType(Result, T.getBeginLoc(), Quals);
+ // BuildQualifiedType might not add qualifiers if they are invalid.
+ if (Result.hasLocalQualifiers())
+ TLB.push<QualifiedTypeLoc>(Result);
+ // No location information to preserve.
+ }
+
+ return Result;
+}
+
+template<typename Derived>
+TypeLoc
+TreeTransform<Derived>::TransformTypeInObjectScope(TypeLoc TL,
+ QualType ObjectType,
+ NamedDecl *UnqualLookup,
+ CXXScopeSpec &SS) {
+ if (getDerived().AlreadyTransformed(TL.getType()))
+ return TL;
+
+ TypeSourceInfo *TSI =
+ TransformTSIInObjectScope(TL, ObjectType, UnqualLookup, SS);
+ if (TSI)
+ return TSI->getTypeLoc();
+ return TypeLoc();
+}
+
+template<typename Derived>
+TypeSourceInfo *
+TreeTransform<Derived>::TransformTypeInObjectScope(TypeSourceInfo *TSInfo,
+ QualType ObjectType,
+ NamedDecl *UnqualLookup,
+ CXXScopeSpec &SS) {
+ if (getDerived().AlreadyTransformed(TSInfo->getType()))
+ return TSInfo;
+
+ return TransformTSIInObjectScope(TSInfo->getTypeLoc(), ObjectType,
+ UnqualLookup, SS);
+}
+
+template <typename Derived>
+TypeSourceInfo *TreeTransform<Derived>::TransformTSIInObjectScope(
+ TypeLoc TL, QualType ObjectType, NamedDecl *UnqualLookup,
+ CXXScopeSpec &SS) {
+ QualType T = TL.getType();
+ assert(!getDerived().AlreadyTransformed(T));
+
+ TypeLocBuilder TLB;
+ QualType Result;
+
+ if (isa<TemplateSpecializationType>(T)) {
+ TemplateSpecializationTypeLoc SpecTL =
+ TL.castAs<TemplateSpecializationTypeLoc>();
+
+ TemplateName Template
+ = getDerived().TransformTemplateName(SS,
+ SpecTL.getTypePtr()->getTemplateName(),
+ SpecTL.getTemplateNameLoc(),
+ ObjectType, UnqualLookup);
+ if (Template.isNull())
+ return nullptr;
+
+ Result = getDerived().TransformTemplateSpecializationType(TLB, SpecTL,
+ Template);
+ } else if (isa<DependentTemplateSpecializationType>(T)) {
+ DependentTemplateSpecializationTypeLoc SpecTL =
+ TL.castAs<DependentTemplateSpecializationTypeLoc>();
+
+ TemplateName Template
+ = getDerived().RebuildTemplateName(SS,
+ *SpecTL.getTypePtr()->getIdentifier(),
+ SpecTL.getTemplateNameLoc(),
+ ObjectType, UnqualLookup);
+ if (Template.isNull())
+ return nullptr;
+
+ Result = getDerived().TransformDependentTemplateSpecializationType(TLB,
+ SpecTL,
+ Template,
+ SS);
+ } else {
+ // Nothing special needs to be done for these.
+ Result = getDerived().TransformType(TLB, TL);
+ }
+
+ if (Result.isNull())
+ return nullptr;
+
+ return TLB.getTypeSourceInfo(SemaRef.Context, Result);
+}
+
+template <class TyLoc> static inline
+QualType TransformTypeSpecType(TypeLocBuilder &TLB, TyLoc T) {
+ TyLoc NewT = TLB.push<TyLoc>(T.getType());
+ NewT.setNameLoc(T.getNameLoc());
+ return T.getType();
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformBuiltinType(TypeLocBuilder &TLB,
+ BuiltinTypeLoc T) {
+ BuiltinTypeLoc NewT = TLB.push<BuiltinTypeLoc>(T.getType());
+ NewT.setBuiltinLoc(T.getBuiltinLoc());
+ if (T.needsExtraLocalData())
+ NewT.getWrittenBuiltinSpecs() = T.getWrittenBuiltinSpecs();
+ return T.getType();
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformComplexType(TypeLocBuilder &TLB,
+ ComplexTypeLoc T) {
+ // FIXME: recurse?
+ return TransformTypeSpecType(TLB, T);
+}
+
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformAdjustedType(TypeLocBuilder &TLB,
+ AdjustedTypeLoc TL) {
+ // Adjustments applied during transformation are handled elsewhere.
+ return getDerived().TransformType(TLB, TL.getOriginalLoc());
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformDecayedType(TypeLocBuilder &TLB,
+ DecayedTypeLoc TL) {
+ QualType OriginalType = getDerived().TransformType(TLB, TL.getOriginalLoc());
+ if (OriginalType.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ OriginalType != TL.getOriginalLoc().getType())
+ Result = SemaRef.Context.getDecayedType(OriginalType);
+ TLB.push<DecayedTypeLoc>(Result);
+ // Nothing to set for DecayedTypeLoc.
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformPointerType(TypeLocBuilder &TLB,
+ PointerTypeLoc TL) {
+ QualType PointeeType
+ = getDerived().TransformType(TLB, TL.getPointeeLoc());
+ if (PointeeType.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (PointeeType->getAs<ObjCObjectType>()) {
+ // A dependent pointer type 'T *' has is being transformed such
+ // that an Objective-C class type is being replaced for 'T'. The
+ // resulting pointer type is an ObjCObjectPointerType, not a
+ // PointerType.
+ Result = SemaRef.Context.getObjCObjectPointerType(PointeeType);
+
+ ObjCObjectPointerTypeLoc NewT = TLB.push<ObjCObjectPointerTypeLoc>(Result);
+ NewT.setStarLoc(TL.getStarLoc());
+ return Result;
+ }
+
+ if (getDerived().AlwaysRebuild() ||
+ PointeeType != TL.getPointeeLoc().getType()) {
+ Result = getDerived().RebuildPointerType(PointeeType, TL.getSigilLoc());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ // Objective-C ARC can add lifetime qualifiers to the type that we're
+ // pointing to.
+ TLB.TypeWasModifiedSafely(Result->getPointeeType());
+
+ PointerTypeLoc NewT = TLB.push<PointerTypeLoc>(Result);
+ NewT.setSigilLoc(TL.getSigilLoc());
+ return Result;
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformBlockPointerType(TypeLocBuilder &TLB,
+ BlockPointerTypeLoc TL) {
+ QualType PointeeType
+ = getDerived().TransformType(TLB, TL.getPointeeLoc());
+ if (PointeeType.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ PointeeType != TL.getPointeeLoc().getType()) {
+ Result = getDerived().RebuildBlockPointerType(PointeeType,
+ TL.getSigilLoc());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ BlockPointerTypeLoc NewT = TLB.push<BlockPointerTypeLoc>(Result);
+ NewT.setSigilLoc(TL.getSigilLoc());
+ return Result;
+}
+
+/// Transforms a reference type. Note that somewhat paradoxically we
+/// don't care whether the type itself is an l-value type or an r-value
+/// type; we only care if the type was *written* as an l-value type
+/// or an r-value type.
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformReferenceType(TypeLocBuilder &TLB,
+ ReferenceTypeLoc TL) {
+ const ReferenceType *T = TL.getTypePtr();
+
+ // Note that this works with the pointee-as-written.
+ QualType PointeeType = getDerived().TransformType(TLB, TL.getPointeeLoc());
+ if (PointeeType.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ PointeeType != T->getPointeeTypeAsWritten()) {
+ Result = getDerived().RebuildReferenceType(PointeeType,
+ T->isSpelledAsLValue(),
+ TL.getSigilLoc());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ // Objective-C ARC can add lifetime qualifiers to the type that we're
+ // referring to.
+ TLB.TypeWasModifiedSafely(
+ Result->getAs<ReferenceType>()->getPointeeTypeAsWritten());
+
+ // r-value references can be rebuilt as l-value references.
+ ReferenceTypeLoc NewTL;
+ if (isa<LValueReferenceType>(Result))
+ NewTL = TLB.push<LValueReferenceTypeLoc>(Result);
+ else
+ NewTL = TLB.push<RValueReferenceTypeLoc>(Result);
+ NewTL.setSigilLoc(TL.getSigilLoc());
+
+ return Result;
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformLValueReferenceType(TypeLocBuilder &TLB,
+ LValueReferenceTypeLoc TL) {
+ return TransformReferenceType(TLB, TL);
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformRValueReferenceType(TypeLocBuilder &TLB,
+ RValueReferenceTypeLoc TL) {
+ return TransformReferenceType(TLB, TL);
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformMemberPointerType(TypeLocBuilder &TLB,
+ MemberPointerTypeLoc TL) {
+ QualType PointeeType = getDerived().TransformType(TLB, TL.getPointeeLoc());
+ if (PointeeType.isNull())
+ return QualType();
+
+ TypeSourceInfo* OldClsTInfo = TL.getClassTInfo();
+ TypeSourceInfo *NewClsTInfo = nullptr;
+ if (OldClsTInfo) {
+ NewClsTInfo = getDerived().TransformType(OldClsTInfo);
+ if (!NewClsTInfo)
+ return QualType();
+ }
+
+ const MemberPointerType *T = TL.getTypePtr();
+ QualType OldClsType = QualType(T->getClass(), 0);
+ QualType NewClsType;
+ if (NewClsTInfo)
+ NewClsType = NewClsTInfo->getType();
+ else {
+ NewClsType = getDerived().TransformType(OldClsType);
+ if (NewClsType.isNull())
+ return QualType();
+ }
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ PointeeType != T->getPointeeType() ||
+ NewClsType != OldClsType) {
+ Result = getDerived().RebuildMemberPointerType(PointeeType, NewClsType,
+ TL.getStarLoc());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ // If we had to adjust the pointee type when building a member pointer, make
+ // sure to push TypeLoc info for it.
+ const MemberPointerType *MPT = Result->getAs<MemberPointerType>();
+ if (MPT && PointeeType != MPT->getPointeeType()) {
+ assert(isa<AdjustedType>(MPT->getPointeeType()));
+ TLB.push<AdjustedTypeLoc>(MPT->getPointeeType());
+ }
+
+ MemberPointerTypeLoc NewTL = TLB.push<MemberPointerTypeLoc>(Result);
+ NewTL.setSigilLoc(TL.getSigilLoc());
+ NewTL.setClassTInfo(NewClsTInfo);
+
+ return Result;
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformConstantArrayType(TypeLocBuilder &TLB,
+ ConstantArrayTypeLoc TL) {
+ const ConstantArrayType *T = TL.getTypePtr();
+ QualType ElementType = getDerived().TransformType(TLB, TL.getElementLoc());
+ if (ElementType.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ ElementType != T->getElementType()) {
+ Result = getDerived().RebuildConstantArrayType(ElementType,
+ T->getSizeModifier(),
+ T->getSize(),
+ T->getIndexTypeCVRQualifiers(),
+ TL.getBracketsRange());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ // We might have either a ConstantArrayType or a VariableArrayType now:
+ // a ConstantArrayType is allowed to have an element type which is a
+ // VariableArrayType if the type is dependent. Fortunately, all array
+ // types have the same location layout.
+ ArrayTypeLoc NewTL = TLB.push<ArrayTypeLoc>(Result);
+ NewTL.setLBracketLoc(TL.getLBracketLoc());
+ NewTL.setRBracketLoc(TL.getRBracketLoc());
+
+ Expr *Size = TL.getSizeExpr();
+ if (Size) {
+ EnterExpressionEvaluationContext Unevaluated(SemaRef,
+ Sema::ConstantEvaluated);
+ Size = getDerived().TransformExpr(Size).template getAs<Expr>();
+ Size = SemaRef.ActOnConstantExpression(Size).get();
+ }
+ NewTL.setSizeExpr(Size);
+
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformIncompleteArrayType(
+ TypeLocBuilder &TLB,
+ IncompleteArrayTypeLoc TL) {
+ const IncompleteArrayType *T = TL.getTypePtr();
+ QualType ElementType = getDerived().TransformType(TLB, TL.getElementLoc());
+ if (ElementType.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ ElementType != T->getElementType()) {
+ Result = getDerived().RebuildIncompleteArrayType(ElementType,
+ T->getSizeModifier(),
+ T->getIndexTypeCVRQualifiers(),
+ TL.getBracketsRange());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ IncompleteArrayTypeLoc NewTL = TLB.push<IncompleteArrayTypeLoc>(Result);
+ NewTL.setLBracketLoc(TL.getLBracketLoc());
+ NewTL.setRBracketLoc(TL.getRBracketLoc());
+ NewTL.setSizeExpr(nullptr);
+
+ return Result;
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformVariableArrayType(TypeLocBuilder &TLB,
+ VariableArrayTypeLoc TL) {
+ const VariableArrayType *T = TL.getTypePtr();
+ QualType ElementType = getDerived().TransformType(TLB, TL.getElementLoc());
+ if (ElementType.isNull())
+ return QualType();
+
+ ExprResult SizeResult
+ = getDerived().TransformExpr(T->getSizeExpr());
+ if (SizeResult.isInvalid())
+ return QualType();
+
+ Expr *Size = SizeResult.get();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ ElementType != T->getElementType() ||
+ Size != T->getSizeExpr()) {
+ Result = getDerived().RebuildVariableArrayType(ElementType,
+ T->getSizeModifier(),
+ Size,
+ T->getIndexTypeCVRQualifiers(),
+ TL.getBracketsRange());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ // We might have constant size array now, but fortunately it has the same
+ // location layout.
+ ArrayTypeLoc NewTL = TLB.push<ArrayTypeLoc>(Result);
+ NewTL.setLBracketLoc(TL.getLBracketLoc());
+ NewTL.setRBracketLoc(TL.getRBracketLoc());
+ NewTL.setSizeExpr(Size);
+
+ return Result;
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformDependentSizedArrayType(TypeLocBuilder &TLB,
+ DependentSizedArrayTypeLoc TL) {
+ const DependentSizedArrayType *T = TL.getTypePtr();
+ QualType ElementType = getDerived().TransformType(TLB, TL.getElementLoc());
+ if (ElementType.isNull())
+ return QualType();
+
+ // Array bounds are constant expressions.
+ EnterExpressionEvaluationContext Unevaluated(SemaRef,
+ Sema::ConstantEvaluated);
+
+ // Prefer the expression from the TypeLoc; the other may have been uniqued.
+ Expr *origSize = TL.getSizeExpr();
+ if (!origSize) origSize = T->getSizeExpr();
+
+ ExprResult sizeResult
+ = getDerived().TransformExpr(origSize);
+ sizeResult = SemaRef.ActOnConstantExpression(sizeResult);
+ if (sizeResult.isInvalid())
+ return QualType();
+
+ Expr *size = sizeResult.get();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ ElementType != T->getElementType() ||
+ size != origSize) {
+ Result = getDerived().RebuildDependentSizedArrayType(ElementType,
+ T->getSizeModifier(),
+ size,
+ T->getIndexTypeCVRQualifiers(),
+ TL.getBracketsRange());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ // We might have any sort of array type now, but fortunately they
+ // all have the same location layout.
+ ArrayTypeLoc NewTL = TLB.push<ArrayTypeLoc>(Result);
+ NewTL.setLBracketLoc(TL.getLBracketLoc());
+ NewTL.setRBracketLoc(TL.getRBracketLoc());
+ NewTL.setSizeExpr(size);
+
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformDependentSizedExtVectorType(
+ TypeLocBuilder &TLB,
+ DependentSizedExtVectorTypeLoc TL) {
+ const DependentSizedExtVectorType *T = TL.getTypePtr();
+
+ // FIXME: ext vector locs should be nested
+ QualType ElementType = getDerived().TransformType(T->getElementType());
+ if (ElementType.isNull())
+ return QualType();
+
+ // Vector sizes are constant expressions.
+ EnterExpressionEvaluationContext Unevaluated(SemaRef,
+ Sema::ConstantEvaluated);
+
+ ExprResult Size = getDerived().TransformExpr(T->getSizeExpr());
+ Size = SemaRef.ActOnConstantExpression(Size);
+ if (Size.isInvalid())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ ElementType != T->getElementType() ||
+ Size.get() != T->getSizeExpr()) {
+ Result = getDerived().RebuildDependentSizedExtVectorType(ElementType,
+ Size.get(),
+ T->getAttributeLoc());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ // Result might be dependent or not.
+ if (isa<DependentSizedExtVectorType>(Result)) {
+ DependentSizedExtVectorTypeLoc NewTL
+ = TLB.push<DependentSizedExtVectorTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ } else {
+ ExtVectorTypeLoc NewTL = TLB.push<ExtVectorTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ }
+
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformVectorType(TypeLocBuilder &TLB,
+ VectorTypeLoc TL) {
+ const VectorType *T = TL.getTypePtr();
+ QualType ElementType = getDerived().TransformType(T->getElementType());
+ if (ElementType.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ ElementType != T->getElementType()) {
+ Result = getDerived().RebuildVectorType(ElementType, T->getNumElements(),
+ T->getVectorKind());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ VectorTypeLoc NewTL = TLB.push<VectorTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformExtVectorType(TypeLocBuilder &TLB,
+ ExtVectorTypeLoc TL) {
+ const VectorType *T = TL.getTypePtr();
+ QualType ElementType = getDerived().TransformType(T->getElementType());
+ if (ElementType.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ ElementType != T->getElementType()) {
+ Result = getDerived().RebuildExtVectorType(ElementType,
+ T->getNumElements(),
+ /*FIXME*/ SourceLocation());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ ExtVectorTypeLoc NewTL = TLB.push<ExtVectorTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+
+ return Result;
+}
+
+template <typename Derived>
+ParmVarDecl *TreeTransform<Derived>::TransformFunctionTypeParam(
+ ParmVarDecl *OldParm, int indexAdjustment, Optional<unsigned> NumExpansions,
+ bool ExpectParameterPack) {
+ TypeSourceInfo *OldDI = OldParm->getTypeSourceInfo();
+ TypeSourceInfo *NewDI = nullptr;
+
+ if (NumExpansions && isa<PackExpansionType>(OldDI->getType())) {
+ // If we're substituting into a pack expansion type and we know the
+ // length we want to expand to, just substitute for the pattern.
+ TypeLoc OldTL = OldDI->getTypeLoc();
+ PackExpansionTypeLoc OldExpansionTL = OldTL.castAs<PackExpansionTypeLoc>();
+
+ TypeLocBuilder TLB;
+ TypeLoc NewTL = OldDI->getTypeLoc();
+ TLB.reserve(NewTL.getFullDataSize());
+
+ QualType Result = getDerived().TransformType(TLB,
+ OldExpansionTL.getPatternLoc());
+ if (Result.isNull())
+ return nullptr;
+
+ Result = RebuildPackExpansionType(Result,
+ OldExpansionTL.getPatternLoc().getSourceRange(),
+ OldExpansionTL.getEllipsisLoc(),
+ NumExpansions);
+ if (Result.isNull())
+ return nullptr;
+
+ PackExpansionTypeLoc NewExpansionTL
+ = TLB.push<PackExpansionTypeLoc>(Result);
+ NewExpansionTL.setEllipsisLoc(OldExpansionTL.getEllipsisLoc());
+ NewDI = TLB.getTypeSourceInfo(SemaRef.Context, Result);
+ } else
+ NewDI = getDerived().TransformType(OldDI);
+ if (!NewDI)
+ return nullptr;
+
+ if (NewDI == OldDI && indexAdjustment == 0)
+ return OldParm;
+
+ ParmVarDecl *newParm = ParmVarDecl::Create(SemaRef.Context,
+ OldParm->getDeclContext(),
+ OldParm->getInnerLocStart(),
+ OldParm->getLocation(),
+ OldParm->getIdentifier(),
+ NewDI->getType(),
+ NewDI,
+ OldParm->getStorageClass(),
+ /* DefArg */ nullptr);
+ newParm->setScopeInfo(OldParm->getFunctionScopeDepth(),
+ OldParm->getFunctionScopeIndex() + indexAdjustment);
+ return newParm;
+}
+
+template<typename Derived>
+bool TreeTransform<Derived>::
+ TransformFunctionTypeParams(SourceLocation Loc,
+ ParmVarDecl **Params, unsigned NumParams,
+ const QualType *ParamTypes,
+ SmallVectorImpl<QualType> &OutParamTypes,
+ SmallVectorImpl<ParmVarDecl*> *PVars) {
+ int indexAdjustment = 0;
+
+ for (unsigned i = 0; i != NumParams; ++i) {
+ if (ParmVarDecl *OldParm = Params[i]) {
+ assert(OldParm->getFunctionScopeIndex() == i);
+
+ Optional<unsigned> NumExpansions;
+ ParmVarDecl *NewParm = nullptr;
+ if (OldParm->isParameterPack()) {
+ // We have a function parameter pack that may need to be expanded.
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+
+ // Find the parameter packs that could be expanded.
+ TypeLoc TL = OldParm->getTypeSourceInfo()->getTypeLoc();
+ PackExpansionTypeLoc ExpansionTL = TL.castAs<PackExpansionTypeLoc>();
+ TypeLoc Pattern = ExpansionTL.getPatternLoc();
+ SemaRef.collectUnexpandedParameterPacks(Pattern, Unexpanded);
+ assert(Unexpanded.size() > 0 && "Could not find parameter packs!");
+
+ // Determine whether we should expand the parameter packs.
+ bool ShouldExpand = false;
+ bool RetainExpansion = false;
+ Optional<unsigned> OrigNumExpansions =
+ ExpansionTL.getTypePtr()->getNumExpansions();
+ NumExpansions = OrigNumExpansions;
+ if (getDerived().TryExpandParameterPacks(ExpansionTL.getEllipsisLoc(),
+ Pattern.getSourceRange(),
+ Unexpanded,
+ ShouldExpand,
+ RetainExpansion,
+ NumExpansions)) {
+ return true;
+ }
+
+ if (ShouldExpand) {
+ // Expand the function parameter pack into multiple, separate
+ // parameters.
+ getDerived().ExpandingFunctionParameterPack(OldParm);
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I);
+ ParmVarDecl *NewParm
+ = getDerived().TransformFunctionTypeParam(OldParm,
+ indexAdjustment++,
+ OrigNumExpansions,
+ /*ExpectParameterPack=*/false);
+ if (!NewParm)
+ return true;
+
+ OutParamTypes.push_back(NewParm->getType());
+ if (PVars)
+ PVars->push_back(NewParm);
+ }
+
+ // If we're supposed to retain a pack expansion, do so by temporarily
+ // forgetting the partially-substituted parameter pack.
+ if (RetainExpansion) {
+ ForgetPartiallySubstitutedPackRAII Forget(getDerived());
+ ParmVarDecl *NewParm
+ = getDerived().TransformFunctionTypeParam(OldParm,
+ indexAdjustment++,
+ OrigNumExpansions,
+ /*ExpectParameterPack=*/false);
+ if (!NewParm)
+ return true;
+
+ OutParamTypes.push_back(NewParm->getType());
+ if (PVars)
+ PVars->push_back(NewParm);
+ }
+
+ // The next parameter should have the same adjustment as the
+ // last thing we pushed, but we post-incremented indexAdjustment
+ // on every push. Also, if we push nothing, the adjustment should
+ // go down by one.
+ indexAdjustment--;
+
+ // We're done with the pack expansion.
+ continue;
+ }
+
+ // We'll substitute the parameter now without expanding the pack
+ // expansion.
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
+ NewParm = getDerived().TransformFunctionTypeParam(OldParm,
+ indexAdjustment,
+ NumExpansions,
+ /*ExpectParameterPack=*/true);
+ } else {
+ NewParm = getDerived().TransformFunctionTypeParam(
+ OldParm, indexAdjustment, None, /*ExpectParameterPack=*/ false);
+ }
+
+ if (!NewParm)
+ return true;
+
+ OutParamTypes.push_back(NewParm->getType());
+ if (PVars)
+ PVars->push_back(NewParm);
+ continue;
+ }
+
+ // Deal with the possibility that we don't have a parameter
+ // declaration for this parameter.
+ QualType OldType = ParamTypes[i];
+ bool IsPackExpansion = false;
+ Optional<unsigned> NumExpansions;
+ QualType NewType;
+ if (const PackExpansionType *Expansion
+ = dyn_cast<PackExpansionType>(OldType)) {
+ // We have a function parameter pack that may need to be expanded.
+ QualType Pattern = Expansion->getPattern();
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ getSema().collectUnexpandedParameterPacks(Pattern, Unexpanded);
+
+ // Determine whether we should expand the parameter packs.
+ bool ShouldExpand = false;
+ bool RetainExpansion = false;
+ if (getDerived().TryExpandParameterPacks(Loc, SourceRange(),
+ Unexpanded,
+ ShouldExpand,
+ RetainExpansion,
+ NumExpansions)) {
+ return true;
+ }
+
+ if (ShouldExpand) {
+ // Expand the function parameter pack into multiple, separate
+ // parameters.
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I);
+ QualType NewType = getDerived().TransformType(Pattern);
+ if (NewType.isNull())
+ return true;
+
+ OutParamTypes.push_back(NewType);
+ if (PVars)
+ PVars->push_back(nullptr);
+ }
+
+ // We're done with the pack expansion.
+ continue;
+ }
+
+ // If we're supposed to retain a pack expansion, do so by temporarily
+ // forgetting the partially-substituted parameter pack.
+ if (RetainExpansion) {
+ ForgetPartiallySubstitutedPackRAII Forget(getDerived());
+ QualType NewType = getDerived().TransformType(Pattern);
+ if (NewType.isNull())
+ return true;
+
+ OutParamTypes.push_back(NewType);
+ if (PVars)
+ PVars->push_back(nullptr);
+ }
+
+ // We'll substitute the parameter now without expanding the pack
+ // expansion.
+ OldType = Expansion->getPattern();
+ IsPackExpansion = true;
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
+ NewType = getDerived().TransformType(OldType);
+ } else {
+ NewType = getDerived().TransformType(OldType);
+ }
+
+ if (NewType.isNull())
+ return true;
+
+ if (IsPackExpansion)
+ NewType = getSema().Context.getPackExpansionType(NewType,
+ NumExpansions);
+
+ OutParamTypes.push_back(NewType);
+ if (PVars)
+ PVars->push_back(nullptr);
+ }
+
+#ifndef NDEBUG
+ if (PVars) {
+ for (unsigned i = 0, e = PVars->size(); i != e; ++i)
+ if (ParmVarDecl *parm = (*PVars)[i])
+ assert(parm->getFunctionScopeIndex() == i);
+ }
+#endif
+
+ return false;
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformFunctionProtoType(TypeLocBuilder &TLB,
+ FunctionProtoTypeLoc TL) {
+ SmallVector<QualType, 4> ExceptionStorage;
+ TreeTransform *This = this; // Work around gcc.gnu.org/PR56135.
+ return getDerived().TransformFunctionProtoType(
+ TLB, TL, nullptr, 0,
+ [&](FunctionProtoType::ExceptionSpecInfo &ESI, bool &Changed) {
+ return This->TransformExceptionSpec(TL.getBeginLoc(), ESI,
+ ExceptionStorage, Changed);
+ });
+}
+
+template<typename Derived> template<typename Fn>
+QualType TreeTransform<Derived>::TransformFunctionProtoType(
+ TypeLocBuilder &TLB, FunctionProtoTypeLoc TL, CXXRecordDecl *ThisContext,
+ unsigned ThisTypeQuals, Fn TransformExceptionSpec) {
+ // Transform the parameters and return type.
+ //
+ // We are required to instantiate the params and return type in source order.
+ // When the function has a trailing return type, we instantiate the
+ // parameters before the return type, since the return type can then refer
+ // to the parameters themselves (via decltype, sizeof, etc.).
+ //
+ SmallVector<QualType, 4> ParamTypes;
+ SmallVector<ParmVarDecl*, 4> ParamDecls;
+ const FunctionProtoType *T = TL.getTypePtr();
+
+ QualType ResultType;
+
+ if (T->hasTrailingReturn()) {
+ if (getDerived().TransformFunctionTypeParams(
+ TL.getBeginLoc(), TL.getParmArray(), TL.getNumParams(),
+ TL.getTypePtr()->param_type_begin(), ParamTypes, &ParamDecls))
+ return QualType();
+
+ {
+ // C++11 [expr.prim.general]p3:
+ // If a declaration declares a member function or member function
+ // template of a class X, the expression this is a prvalue of type
+ // "pointer to cv-qualifier-seq X" between the optional cv-qualifer-seq
+ // and the end of the function-definition, member-declarator, or
+ // declarator.
+ Sema::CXXThisScopeRAII ThisScope(SemaRef, ThisContext, ThisTypeQuals);
+
+ ResultType = getDerived().TransformType(TLB, TL.getReturnLoc());
+ if (ResultType.isNull())
+ return QualType();
+ }
+ }
+ else {
+ ResultType = getDerived().TransformType(TLB, TL.getReturnLoc());
+ if (ResultType.isNull())
+ return QualType();
+
+ if (getDerived().TransformFunctionTypeParams(
+ TL.getBeginLoc(), TL.getParmArray(), TL.getNumParams(),
+ TL.getTypePtr()->param_type_begin(), ParamTypes, &ParamDecls))
+ return QualType();
+ }
+
+ FunctionProtoType::ExtProtoInfo EPI = T->getExtProtoInfo();
+
+ bool EPIChanged = false;
+ if (TransformExceptionSpec(EPI.ExceptionSpec, EPIChanged))
+ return QualType();
+
+ // FIXME: Need to transform ConsumedParameters for variadic template
+ // expansion.
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() || ResultType != T->getReturnType() ||
+ T->getParamTypes() != llvm::makeArrayRef(ParamTypes) || EPIChanged) {
+ Result = getDerived().RebuildFunctionProtoType(ResultType, ParamTypes, EPI);
+ if (Result.isNull())
+ return QualType();
+ }
+
+ FunctionProtoTypeLoc NewTL = TLB.push<FunctionProtoTypeLoc>(Result);
+ NewTL.setLocalRangeBegin(TL.getLocalRangeBegin());
+ NewTL.setLParenLoc(TL.getLParenLoc());
+ NewTL.setRParenLoc(TL.getRParenLoc());
+ NewTL.setLocalRangeEnd(TL.getLocalRangeEnd());
+ for (unsigned i = 0, e = NewTL.getNumParams(); i != e; ++i)
+ NewTL.setParam(i, ParamDecls[i]);
+
+ return Result;
+}
+
+template<typename Derived>
+bool TreeTransform<Derived>::TransformExceptionSpec(
+ SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI,
+ SmallVectorImpl<QualType> &Exceptions, bool &Changed) {
+ assert(ESI.Type != EST_Uninstantiated && ESI.Type != EST_Unevaluated);
+
+ // Instantiate a dynamic noexcept expression, if any.
+ if (ESI.Type == EST_ComputedNoexcept) {
+ EnterExpressionEvaluationContext Unevaluated(getSema(),
+ Sema::ConstantEvaluated);
+ ExprResult NoexceptExpr = getDerived().TransformExpr(ESI.NoexceptExpr);
+ if (NoexceptExpr.isInvalid())
+ return true;
+
+ NoexceptExpr = getSema().CheckBooleanCondition(
+ NoexceptExpr.get(), NoexceptExpr.get()->getLocStart());
+ if (NoexceptExpr.isInvalid())
+ return true;
+
+ if (!NoexceptExpr.get()->isValueDependent()) {
+ NoexceptExpr = getSema().VerifyIntegerConstantExpression(
+ NoexceptExpr.get(), nullptr,
+ diag::err_noexcept_needs_constant_expression,
+ /*AllowFold*/false);
+ if (NoexceptExpr.isInvalid())
+ return true;
+ }
+
+ if (ESI.NoexceptExpr != NoexceptExpr.get())
+ Changed = true;
+ ESI.NoexceptExpr = NoexceptExpr.get();
+ }
+
+ if (ESI.Type != EST_Dynamic)
+ return false;
+
+ // Instantiate a dynamic exception specification's type.
+ for (QualType T : ESI.Exceptions) {
+ if (const PackExpansionType *PackExpansion =
+ T->getAs<PackExpansionType>()) {
+ Changed = true;
+
+ // We have a pack expansion. Instantiate it.
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ SemaRef.collectUnexpandedParameterPacks(PackExpansion->getPattern(),
+ Unexpanded);
+ assert(!Unexpanded.empty() && "Pack expansion without parameter packs?");
+
+ // Determine whether the set of unexpanded parameter packs can and
+ // should
+ // be expanded.
+ bool Expand = false;
+ bool RetainExpansion = false;
+ Optional<unsigned> NumExpansions = PackExpansion->getNumExpansions();
+ // FIXME: Track the location of the ellipsis (and track source location
+ // information for the types in the exception specification in general).
+ if (getDerived().TryExpandParameterPacks(
+ Loc, SourceRange(), Unexpanded, Expand,
+ RetainExpansion, NumExpansions))
+ return true;
+
+ if (!Expand) {
+ // We can't expand this pack expansion into separate arguments yet;
+ // just substitute into the pattern and create a new pack expansion
+ // type.
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
+ QualType U = getDerived().TransformType(PackExpansion->getPattern());
+ if (U.isNull())
+ return true;
+
+ U = SemaRef.Context.getPackExpansionType(U, NumExpansions);
+ Exceptions.push_back(U);
+ continue;
+ }
+
+ // Substitute into the pack expansion pattern for each slice of the
+ // pack.
+ for (unsigned ArgIdx = 0; ArgIdx != *NumExpansions; ++ArgIdx) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), ArgIdx);
+
+ QualType U = getDerived().TransformType(PackExpansion->getPattern());
+ if (U.isNull() || SemaRef.CheckSpecifiedExceptionType(U, Loc))
+ return true;
+
+ Exceptions.push_back(U);
+ }
+ } else {
+ QualType U = getDerived().TransformType(T);
+ if (U.isNull() || SemaRef.CheckSpecifiedExceptionType(U, Loc))
+ return true;
+ if (T != U)
+ Changed = true;
+
+ Exceptions.push_back(U);
+ }
+ }
+
+ ESI.Exceptions = Exceptions;
+ return false;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformFunctionNoProtoType(
+ TypeLocBuilder &TLB,
+ FunctionNoProtoTypeLoc TL) {
+ const FunctionNoProtoType *T = TL.getTypePtr();
+ QualType ResultType = getDerived().TransformType(TLB, TL.getReturnLoc());
+ if (ResultType.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() || ResultType != T->getReturnType())
+ Result = getDerived().RebuildFunctionNoProtoType(ResultType);
+
+ FunctionNoProtoTypeLoc NewTL = TLB.push<FunctionNoProtoTypeLoc>(Result);
+ NewTL.setLocalRangeBegin(TL.getLocalRangeBegin());
+ NewTL.setLParenLoc(TL.getLParenLoc());
+ NewTL.setRParenLoc(TL.getRParenLoc());
+ NewTL.setLocalRangeEnd(TL.getLocalRangeEnd());
+
+ return Result;
+}
+
+template<typename Derived> QualType
+TreeTransform<Derived>::TransformUnresolvedUsingType(TypeLocBuilder &TLB,
+ UnresolvedUsingTypeLoc TL) {
+ const UnresolvedUsingType *T = TL.getTypePtr();
+ Decl *D = getDerived().TransformDecl(TL.getNameLoc(), T->getDecl());
+ if (!D)
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() || D != T->getDecl()) {
+ Result = getDerived().RebuildUnresolvedUsingType(D);
+ if (Result.isNull())
+ return QualType();
+ }
+
+ // We might get an arbitrary type spec type back. We should at
+ // least always get a type spec type, though.
+ TypeSpecTypeLoc NewTL = TLB.pushTypeSpec(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformTypedefType(TypeLocBuilder &TLB,
+ TypedefTypeLoc TL) {
+ const TypedefType *T = TL.getTypePtr();
+ TypedefNameDecl *Typedef
+ = cast_or_null<TypedefNameDecl>(getDerived().TransformDecl(TL.getNameLoc(),
+ T->getDecl()));
+ if (!Typedef)
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ Typedef != T->getDecl()) {
+ Result = getDerived().RebuildTypedefType(Typedef);
+ if (Result.isNull())
+ return QualType();
+ }
+
+ TypedefTypeLoc NewTL = TLB.push<TypedefTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformTypeOfExprType(TypeLocBuilder &TLB,
+ TypeOfExprTypeLoc TL) {
+ // typeof expressions are not potentially evaluated contexts
+ EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated,
+ Sema::ReuseLambdaContextDecl);
+
+ ExprResult E = getDerived().TransformExpr(TL.getUnderlyingExpr());
+ if (E.isInvalid())
+ return QualType();
+
+ E = SemaRef.HandleExprEvaluationContextForTypeof(E.get());
+ if (E.isInvalid())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ E.get() != TL.getUnderlyingExpr()) {
+ Result = getDerived().RebuildTypeOfExprType(E.get(), TL.getTypeofLoc());
+ if (Result.isNull())
+ return QualType();
+ }
+ else E.get();
+
+ TypeOfExprTypeLoc NewTL = TLB.push<TypeOfExprTypeLoc>(Result);
+ NewTL.setTypeofLoc(TL.getTypeofLoc());
+ NewTL.setLParenLoc(TL.getLParenLoc());
+ NewTL.setRParenLoc(TL.getRParenLoc());
+
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformTypeOfType(TypeLocBuilder &TLB,
+ TypeOfTypeLoc TL) {
+ TypeSourceInfo* Old_Under_TI = TL.getUnderlyingTInfo();
+ TypeSourceInfo* New_Under_TI = getDerived().TransformType(Old_Under_TI);
+ if (!New_Under_TI)
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() || New_Under_TI != Old_Under_TI) {
+ Result = getDerived().RebuildTypeOfType(New_Under_TI->getType());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ TypeOfTypeLoc NewTL = TLB.push<TypeOfTypeLoc>(Result);
+ NewTL.setTypeofLoc(TL.getTypeofLoc());
+ NewTL.setLParenLoc(TL.getLParenLoc());
+ NewTL.setRParenLoc(TL.getRParenLoc());
+ NewTL.setUnderlyingTInfo(New_Under_TI);
+
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformDecltypeType(TypeLocBuilder &TLB,
+ DecltypeTypeLoc TL) {
+ const DecltypeType *T = TL.getTypePtr();
+
+ // decltype expressions are not potentially evaluated contexts
+ EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated,
+ nullptr, /*IsDecltype=*/ true);
+
+ ExprResult E = getDerived().TransformExpr(T->getUnderlyingExpr());
+ if (E.isInvalid())
+ return QualType();
+
+ E = getSema().ActOnDecltypeExpression(E.get());
+ if (E.isInvalid())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ E.get() != T->getUnderlyingExpr()) {
+ Result = getDerived().RebuildDecltypeType(E.get(), TL.getNameLoc());
+ if (Result.isNull())
+ return QualType();
+ }
+ else E.get();
+
+ DecltypeTypeLoc NewTL = TLB.push<DecltypeTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformUnaryTransformType(
+ TypeLocBuilder &TLB,
+ UnaryTransformTypeLoc TL) {
+ QualType Result = TL.getType();
+ if (Result->isDependentType()) {
+ const UnaryTransformType *T = TL.getTypePtr();
+ QualType NewBase =
+ getDerived().TransformType(TL.getUnderlyingTInfo())->getType();
+ Result = getDerived().RebuildUnaryTransformType(NewBase,
+ T->getUTTKind(),
+ TL.getKWLoc());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ UnaryTransformTypeLoc NewTL = TLB.push<UnaryTransformTypeLoc>(Result);
+ NewTL.setKWLoc(TL.getKWLoc());
+ NewTL.setParensRange(TL.getParensRange());
+ NewTL.setUnderlyingTInfo(TL.getUnderlyingTInfo());
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformAutoType(TypeLocBuilder &TLB,
+ AutoTypeLoc TL) {
+ const AutoType *T = TL.getTypePtr();
+ QualType OldDeduced = T->getDeducedType();
+ QualType NewDeduced;
+ if (!OldDeduced.isNull()) {
+ NewDeduced = getDerived().TransformType(OldDeduced);
+ if (NewDeduced.isNull())
+ return QualType();
+ }
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() || NewDeduced != OldDeduced ||
+ T->isDependentType()) {
+ Result = getDerived().RebuildAutoType(NewDeduced, T->getKeyword());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ AutoTypeLoc NewTL = TLB.push<AutoTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformRecordType(TypeLocBuilder &TLB,
+ RecordTypeLoc TL) {
+ const RecordType *T = TL.getTypePtr();
+ RecordDecl *Record
+ = cast_or_null<RecordDecl>(getDerived().TransformDecl(TL.getNameLoc(),
+ T->getDecl()));
+ if (!Record)
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ Record != T->getDecl()) {
+ Result = getDerived().RebuildRecordType(Record);
+ if (Result.isNull())
+ return QualType();
+ }
+
+ RecordTypeLoc NewTL = TLB.push<RecordTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformEnumType(TypeLocBuilder &TLB,
+ EnumTypeLoc TL) {
+ const EnumType *T = TL.getTypePtr();
+ EnumDecl *Enum
+ = cast_or_null<EnumDecl>(getDerived().TransformDecl(TL.getNameLoc(),
+ T->getDecl()));
+ if (!Enum)
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ Enum != T->getDecl()) {
+ Result = getDerived().RebuildEnumType(Enum);
+ if (Result.isNull())
+ return QualType();
+ }
+
+ EnumTypeLoc NewTL = TLB.push<EnumTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformInjectedClassNameType(
+ TypeLocBuilder &TLB,
+ InjectedClassNameTypeLoc TL) {
+ Decl *D = getDerived().TransformDecl(TL.getNameLoc(),
+ TL.getTypePtr()->getDecl());
+ if (!D) return QualType();
+
+ QualType T = SemaRef.Context.getTypeDeclType(cast<TypeDecl>(D));
+ TLB.pushTypeSpec(T).setNameLoc(TL.getNameLoc());
+ return T;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformTemplateTypeParmType(
+ TypeLocBuilder &TLB,
+ TemplateTypeParmTypeLoc TL) {
+ return TransformTypeSpecType(TLB, TL);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformSubstTemplateTypeParmType(
+ TypeLocBuilder &TLB,
+ SubstTemplateTypeParmTypeLoc TL) {
+ const SubstTemplateTypeParmType *T = TL.getTypePtr();
+
+ // Substitute into the replacement type, which itself might involve something
+ // that needs to be transformed. This only tends to occur with default
+ // template arguments of template template parameters.
+ TemporaryBase Rebase(*this, TL.getNameLoc(), DeclarationName());
+ QualType Replacement = getDerived().TransformType(T->getReplacementType());
+ if (Replacement.isNull())
+ return QualType();
+
+ // Always canonicalize the replacement type.
+ Replacement = SemaRef.Context.getCanonicalType(Replacement);
+ QualType Result
+ = SemaRef.Context.getSubstTemplateTypeParmType(T->getReplacedParameter(),
+ Replacement);
+
+ // Propagate type-source information.
+ SubstTemplateTypeParmTypeLoc NewTL
+ = TLB.push<SubstTemplateTypeParmTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ return Result;
+
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformSubstTemplateTypeParmPackType(
+ TypeLocBuilder &TLB,
+ SubstTemplateTypeParmPackTypeLoc TL) {
+ return TransformTypeSpecType(TLB, TL);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformTemplateSpecializationType(
+ TypeLocBuilder &TLB,
+ TemplateSpecializationTypeLoc TL) {
+ const TemplateSpecializationType *T = TL.getTypePtr();
+
+ // The nested-name-specifier never matters in a TemplateSpecializationType,
+ // because we can't have a dependent nested-name-specifier anyway.
+ CXXScopeSpec SS;
+ TemplateName Template
+ = getDerived().TransformTemplateName(SS, T->getTemplateName(),
+ TL.getTemplateNameLoc());
+ if (Template.isNull())
+ return QualType();
+
+ return getDerived().TransformTemplateSpecializationType(TLB, TL, Template);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformAtomicType(TypeLocBuilder &TLB,
+ AtomicTypeLoc TL) {
+ QualType ValueType = getDerived().TransformType(TLB, TL.getValueLoc());
+ if (ValueType.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ ValueType != TL.getValueLoc().getType()) {
+ Result = getDerived().RebuildAtomicType(ValueType, TL.getKWLoc());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ AtomicTypeLoc NewTL = TLB.push<AtomicTypeLoc>(Result);
+ NewTL.setKWLoc(TL.getKWLoc());
+ NewTL.setLParenLoc(TL.getLParenLoc());
+ NewTL.setRParenLoc(TL.getRParenLoc());
+
+ return Result;
+}
+
+ /// \brief Simple iterator that traverses the template arguments in a
+ /// container that provides a \c getArgLoc() member function.
+ ///
+ /// This iterator is intended to be used with the iterator form of
+ /// \c TreeTransform<Derived>::TransformTemplateArguments().
+ template<typename ArgLocContainer>
+ class TemplateArgumentLocContainerIterator {
+ ArgLocContainer *Container;
+ unsigned Index;
+
+ public:
+ typedef TemplateArgumentLoc value_type;
+ typedef TemplateArgumentLoc reference;
+ typedef int difference_type;
+ typedef std::input_iterator_tag iterator_category;
+
+ class pointer {
+ TemplateArgumentLoc Arg;
+
+ public:
+ explicit pointer(TemplateArgumentLoc Arg) : Arg(Arg) { }
+
+ const TemplateArgumentLoc *operator->() const {
+ return &Arg;
+ }
+ };
+
+
+ TemplateArgumentLocContainerIterator() {}
+
+ TemplateArgumentLocContainerIterator(ArgLocContainer &Container,
+ unsigned Index)
+ : Container(&Container), Index(Index) { }
+
+ TemplateArgumentLocContainerIterator &operator++() {
+ ++Index;
+ return *this;
+ }
+
+ TemplateArgumentLocContainerIterator operator++(int) {
+ TemplateArgumentLocContainerIterator Old(*this);
+ ++(*this);
+ return Old;
+ }
+
+ TemplateArgumentLoc operator*() const {
+ return Container->getArgLoc(Index);
+ }
+
+ pointer operator->() const {
+ return pointer(Container->getArgLoc(Index));
+ }
+
+ friend bool operator==(const TemplateArgumentLocContainerIterator &X,
+ const TemplateArgumentLocContainerIterator &Y) {
+ return X.Container == Y.Container && X.Index == Y.Index;
+ }
+
+ friend bool operator!=(const TemplateArgumentLocContainerIterator &X,
+ const TemplateArgumentLocContainerIterator &Y) {
+ return !(X == Y);
+ }
+ };
+
+
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformTemplateSpecializationType(
+ TypeLocBuilder &TLB,
+ TemplateSpecializationTypeLoc TL,
+ TemplateName Template) {
+ TemplateArgumentListInfo NewTemplateArgs;
+ NewTemplateArgs.setLAngleLoc(TL.getLAngleLoc());
+ NewTemplateArgs.setRAngleLoc(TL.getRAngleLoc());
+ typedef TemplateArgumentLocContainerIterator<TemplateSpecializationTypeLoc>
+ ArgIterator;
+ if (getDerived().TransformTemplateArguments(ArgIterator(TL, 0),
+ ArgIterator(TL, TL.getNumArgs()),
+ NewTemplateArgs))
+ return QualType();
+
+ // FIXME: maybe don't rebuild if all the template arguments are the same.
+
+ QualType Result =
+ getDerived().RebuildTemplateSpecializationType(Template,
+ TL.getTemplateNameLoc(),
+ NewTemplateArgs);
+
+ if (!Result.isNull()) {
+ // Specializations of template template parameters are represented as
+ // TemplateSpecializationTypes, and substitution of type alias templates
+ // within a dependent context can transform them into
+ // DependentTemplateSpecializationTypes.
+ if (isa<DependentTemplateSpecializationType>(Result)) {
+ DependentTemplateSpecializationTypeLoc NewTL
+ = TLB.push<DependentTemplateSpecializationTypeLoc>(Result);
+ NewTL.setElaboratedKeywordLoc(SourceLocation());
+ NewTL.setQualifierLoc(NestedNameSpecifierLoc());
+ NewTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
+ NewTL.setTemplateNameLoc(TL.getTemplateNameLoc());
+ NewTL.setLAngleLoc(TL.getLAngleLoc());
+ NewTL.setRAngleLoc(TL.getRAngleLoc());
+ for (unsigned i = 0, e = NewTemplateArgs.size(); i != e; ++i)
+ NewTL.setArgLocInfo(i, NewTemplateArgs[i].getLocInfo());
+ return Result;
+ }
+
+ TemplateSpecializationTypeLoc NewTL
+ = TLB.push<TemplateSpecializationTypeLoc>(Result);
+ NewTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
+ NewTL.setTemplateNameLoc(TL.getTemplateNameLoc());
+ NewTL.setLAngleLoc(TL.getLAngleLoc());
+ NewTL.setRAngleLoc(TL.getRAngleLoc());
+ for (unsigned i = 0, e = NewTemplateArgs.size(); i != e; ++i)
+ NewTL.setArgLocInfo(i, NewTemplateArgs[i].getLocInfo());
+ }
+
+ return Result;
+}
+
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformDependentTemplateSpecializationType(
+ TypeLocBuilder &TLB,
+ DependentTemplateSpecializationTypeLoc TL,
+ TemplateName Template,
+ CXXScopeSpec &SS) {
+ TemplateArgumentListInfo NewTemplateArgs;
+ NewTemplateArgs.setLAngleLoc(TL.getLAngleLoc());
+ NewTemplateArgs.setRAngleLoc(TL.getRAngleLoc());
+ typedef TemplateArgumentLocContainerIterator<
+ DependentTemplateSpecializationTypeLoc> ArgIterator;
+ if (getDerived().TransformTemplateArguments(ArgIterator(TL, 0),
+ ArgIterator(TL, TL.getNumArgs()),
+ NewTemplateArgs))
+ return QualType();
+
+ // FIXME: maybe don't rebuild if all the template arguments are the same.
+
+ if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) {
+ QualType Result
+ = getSema().Context.getDependentTemplateSpecializationType(
+ TL.getTypePtr()->getKeyword(),
+ DTN->getQualifier(),
+ DTN->getIdentifier(),
+ NewTemplateArgs);
+
+ DependentTemplateSpecializationTypeLoc NewTL
+ = TLB.push<DependentTemplateSpecializationTypeLoc>(Result);
+ NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
+ NewTL.setQualifierLoc(SS.getWithLocInContext(SemaRef.Context));
+ NewTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
+ NewTL.setTemplateNameLoc(TL.getTemplateNameLoc());
+ NewTL.setLAngleLoc(TL.getLAngleLoc());
+ NewTL.setRAngleLoc(TL.getRAngleLoc());
+ for (unsigned i = 0, e = NewTemplateArgs.size(); i != e; ++i)
+ NewTL.setArgLocInfo(i, NewTemplateArgs[i].getLocInfo());
+ return Result;
+ }
+
+ QualType Result
+ = getDerived().RebuildTemplateSpecializationType(Template,
+ TL.getTemplateNameLoc(),
+ NewTemplateArgs);
+
+ if (!Result.isNull()) {
+ /// FIXME: Wrap this in an elaborated-type-specifier?
+ TemplateSpecializationTypeLoc NewTL
+ = TLB.push<TemplateSpecializationTypeLoc>(Result);
+ NewTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
+ NewTL.setTemplateNameLoc(TL.getTemplateNameLoc());
+ NewTL.setLAngleLoc(TL.getLAngleLoc());
+ NewTL.setRAngleLoc(TL.getRAngleLoc());
+ for (unsigned i = 0, e = NewTemplateArgs.size(); i != e; ++i)
+ NewTL.setArgLocInfo(i, NewTemplateArgs[i].getLocInfo());
+ }
+
+ return Result;
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformElaboratedType(TypeLocBuilder &TLB,
+ ElaboratedTypeLoc TL) {
+ const ElaboratedType *T = TL.getTypePtr();
+
+ NestedNameSpecifierLoc QualifierLoc;
+ // NOTE: the qualifier in an ElaboratedType is optional.
+ if (TL.getQualifierLoc()) {
+ QualifierLoc
+ = getDerived().TransformNestedNameSpecifierLoc(TL.getQualifierLoc());
+ if (!QualifierLoc)
+ return QualType();
+ }
+
+ QualType NamedT = getDerived().TransformType(TLB, TL.getNamedTypeLoc());
+ if (NamedT.isNull())
+ return QualType();
+
+ // C++0x [dcl.type.elab]p2:
+ // If the identifier resolves to a typedef-name or the simple-template-id
+ // resolves to an alias template specialization, the
+ // elaborated-type-specifier is ill-formed.
+ if (T->getKeyword() != ETK_None && T->getKeyword() != ETK_Typename) {
+ if (const TemplateSpecializationType *TST =
+ NamedT->getAs<TemplateSpecializationType>()) {
+ TemplateName Template = TST->getTemplateName();
+ if (TypeAliasTemplateDecl *TAT = dyn_cast_or_null<TypeAliasTemplateDecl>(
+ Template.getAsTemplateDecl())) {
+ SemaRef.Diag(TL.getNamedTypeLoc().getBeginLoc(),
+ diag::err_tag_reference_non_tag) << 4;
+ SemaRef.Diag(TAT->getLocation(), diag::note_declared_at);
+ }
+ }
+ }
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ QualifierLoc != TL.getQualifierLoc() ||
+ NamedT != T->getNamedType()) {
+ Result = getDerived().RebuildElaboratedType(TL.getElaboratedKeywordLoc(),
+ T->getKeyword(),
+ QualifierLoc, NamedT);
+ if (Result.isNull())
+ return QualType();
+ }
+
+ ElaboratedTypeLoc NewTL = TLB.push<ElaboratedTypeLoc>(Result);
+ NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
+ NewTL.setQualifierLoc(QualifierLoc);
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformAttributedType(
+ TypeLocBuilder &TLB,
+ AttributedTypeLoc TL) {
+ const AttributedType *oldType = TL.getTypePtr();
+ QualType modifiedType = getDerived().TransformType(TLB, TL.getModifiedLoc());
+ if (modifiedType.isNull())
+ return QualType();
+
+ QualType result = TL.getType();
+
+ // FIXME: dependent operand expressions?
+ if (getDerived().AlwaysRebuild() ||
+ modifiedType != oldType->getModifiedType()) {
+ // TODO: this is really lame; we should really be rebuilding the
+ // equivalent type from first principles.
+ QualType equivalentType
+ = getDerived().TransformType(oldType->getEquivalentType());
+ if (equivalentType.isNull())
+ return QualType();
+
+ // Check whether we can add nullability; it is only represented as
+ // type sugar, and therefore cannot be diagnosed in any other way.
+ if (auto nullability = oldType->getImmediateNullability()) {
+ if (!modifiedType->canHaveNullability()) {
+ SemaRef.Diag(TL.getAttrNameLoc(), diag::err_nullability_nonpointer)
+ << DiagNullabilityKind(*nullability, false) << modifiedType;
+ return QualType();
+ }
+ }
+
+ result = SemaRef.Context.getAttributedType(oldType->getAttrKind(),
+ modifiedType,
+ equivalentType);
+ }
+
+ AttributedTypeLoc newTL = TLB.push<AttributedTypeLoc>(result);
+ newTL.setAttrNameLoc(TL.getAttrNameLoc());
+ if (TL.hasAttrOperand())
+ newTL.setAttrOperandParensRange(TL.getAttrOperandParensRange());
+ if (TL.hasAttrExprOperand())
+ newTL.setAttrExprOperand(TL.getAttrExprOperand());
+ else if (TL.hasAttrEnumOperand())
+ newTL.setAttrEnumOperandLoc(TL.getAttrEnumOperandLoc());
+
+ return result;
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformParenType(TypeLocBuilder &TLB,
+ ParenTypeLoc TL) {
+ QualType Inner = getDerived().TransformType(TLB, TL.getInnerLoc());
+ if (Inner.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ Inner != TL.getInnerLoc().getType()) {
+ Result = getDerived().RebuildParenType(Inner);
+ if (Result.isNull())
+ return QualType();
+ }
+
+ ParenTypeLoc NewTL = TLB.push<ParenTypeLoc>(Result);
+ NewTL.setLParenLoc(TL.getLParenLoc());
+ NewTL.setRParenLoc(TL.getRParenLoc());
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformDependentNameType(TypeLocBuilder &TLB,
+ DependentNameTypeLoc TL) {
+ const DependentNameType *T = TL.getTypePtr();
+
+ NestedNameSpecifierLoc QualifierLoc
+ = getDerived().TransformNestedNameSpecifierLoc(TL.getQualifierLoc());
+ if (!QualifierLoc)
+ return QualType();
+
+ QualType Result
+ = getDerived().RebuildDependentNameType(T->getKeyword(),
+ TL.getElaboratedKeywordLoc(),
+ QualifierLoc,
+ T->getIdentifier(),
+ TL.getNameLoc());
+ if (Result.isNull())
+ return QualType();
+
+ if (const ElaboratedType* ElabT = Result->getAs<ElaboratedType>()) {
+ QualType NamedT = ElabT->getNamedType();
+ TLB.pushTypeSpec(NamedT).setNameLoc(TL.getNameLoc());
+
+ ElaboratedTypeLoc NewTL = TLB.push<ElaboratedTypeLoc>(Result);
+ NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
+ NewTL.setQualifierLoc(QualifierLoc);
+ } else {
+ DependentNameTypeLoc NewTL = TLB.push<DependentNameTypeLoc>(Result);
+ NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
+ NewTL.setQualifierLoc(QualifierLoc);
+ NewTL.setNameLoc(TL.getNameLoc());
+ }
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::
+ TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB,
+ DependentTemplateSpecializationTypeLoc TL) {
+ NestedNameSpecifierLoc QualifierLoc;
+ if (TL.getQualifierLoc()) {
+ QualifierLoc
+ = getDerived().TransformNestedNameSpecifierLoc(TL.getQualifierLoc());
+ if (!QualifierLoc)
+ return QualType();
+ }
+
+ return getDerived()
+ .TransformDependentTemplateSpecializationType(TLB, TL, QualifierLoc);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::
+TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB,
+ DependentTemplateSpecializationTypeLoc TL,
+ NestedNameSpecifierLoc QualifierLoc) {
+ const DependentTemplateSpecializationType *T = TL.getTypePtr();
+
+ TemplateArgumentListInfo NewTemplateArgs;
+ NewTemplateArgs.setLAngleLoc(TL.getLAngleLoc());
+ NewTemplateArgs.setRAngleLoc(TL.getRAngleLoc());
+
+ typedef TemplateArgumentLocContainerIterator<
+ DependentTemplateSpecializationTypeLoc> ArgIterator;
+ if (getDerived().TransformTemplateArguments(ArgIterator(TL, 0),
+ ArgIterator(TL, TL.getNumArgs()),
+ NewTemplateArgs))
+ return QualType();
+
+ QualType Result
+ = getDerived().RebuildDependentTemplateSpecializationType(T->getKeyword(),
+ QualifierLoc,
+ T->getIdentifier(),
+ TL.getTemplateNameLoc(),
+ NewTemplateArgs);
+ if (Result.isNull())
+ return QualType();
+
+ if (const ElaboratedType *ElabT = dyn_cast<ElaboratedType>(Result)) {
+ QualType NamedT = ElabT->getNamedType();
+
+ // Copy information relevant to the template specialization.
+ TemplateSpecializationTypeLoc NamedTL
+ = TLB.push<TemplateSpecializationTypeLoc>(NamedT);
+ NamedTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
+ NamedTL.setTemplateNameLoc(TL.getTemplateNameLoc());
+ NamedTL.setLAngleLoc(TL.getLAngleLoc());
+ NamedTL.setRAngleLoc(TL.getRAngleLoc());
+ for (unsigned I = 0, E = NewTemplateArgs.size(); I != E; ++I)
+ NamedTL.setArgLocInfo(I, NewTemplateArgs[I].getLocInfo());
+
+ // Copy information relevant to the elaborated type.
+ ElaboratedTypeLoc NewTL = TLB.push<ElaboratedTypeLoc>(Result);
+ NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
+ NewTL.setQualifierLoc(QualifierLoc);
+ } else if (isa<DependentTemplateSpecializationType>(Result)) {
+ DependentTemplateSpecializationTypeLoc SpecTL
+ = TLB.push<DependentTemplateSpecializationTypeLoc>(Result);
+ SpecTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
+ SpecTL.setQualifierLoc(QualifierLoc);
+ SpecTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
+ SpecTL.setTemplateNameLoc(TL.getTemplateNameLoc());
+ SpecTL.setLAngleLoc(TL.getLAngleLoc());
+ SpecTL.setRAngleLoc(TL.getRAngleLoc());
+ for (unsigned I = 0, E = NewTemplateArgs.size(); I != E; ++I)
+ SpecTL.setArgLocInfo(I, NewTemplateArgs[I].getLocInfo());
+ } else {
+ TemplateSpecializationTypeLoc SpecTL
+ = TLB.push<TemplateSpecializationTypeLoc>(Result);
+ SpecTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
+ SpecTL.setTemplateNameLoc(TL.getTemplateNameLoc());
+ SpecTL.setLAngleLoc(TL.getLAngleLoc());
+ SpecTL.setRAngleLoc(TL.getRAngleLoc());
+ for (unsigned I = 0, E = NewTemplateArgs.size(); I != E; ++I)
+ SpecTL.setArgLocInfo(I, NewTemplateArgs[I].getLocInfo());
+ }
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformPackExpansionType(TypeLocBuilder &TLB,
+ PackExpansionTypeLoc TL) {
+ QualType Pattern
+ = getDerived().TransformType(TLB, TL.getPatternLoc());
+ if (Pattern.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ Pattern != TL.getPatternLoc().getType()) {
+ Result = getDerived().RebuildPackExpansionType(Pattern,
+ TL.getPatternLoc().getSourceRange(),
+ TL.getEllipsisLoc(),
+ TL.getTypePtr()->getNumExpansions());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ PackExpansionTypeLoc NewT = TLB.push<PackExpansionTypeLoc>(Result);
+ NewT.setEllipsisLoc(TL.getEllipsisLoc());
+ return Result;
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformObjCInterfaceType(TypeLocBuilder &TLB,
+ ObjCInterfaceTypeLoc TL) {
+ // ObjCInterfaceType is never dependent.
+ TLB.pushFullCopy(TL);
+ return TL.getType();
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformObjCObjectType(TypeLocBuilder &TLB,
+ ObjCObjectTypeLoc TL) {
+ // Transform base type.
+ QualType BaseType = getDerived().TransformType(TLB, TL.getBaseLoc());
+ if (BaseType.isNull())
+ return QualType();
+
+ bool AnyChanged = BaseType != TL.getBaseLoc().getType();
+
+ // Transform type arguments.
+ SmallVector<TypeSourceInfo *, 4> NewTypeArgInfos;
+ for (unsigned i = 0, n = TL.getNumTypeArgs(); i != n; ++i) {
+ TypeSourceInfo *TypeArgInfo = TL.getTypeArgTInfo(i);
+ TypeLoc TypeArgLoc = TypeArgInfo->getTypeLoc();
+ QualType TypeArg = TypeArgInfo->getType();
+ if (auto PackExpansionLoc = TypeArgLoc.getAs<PackExpansionTypeLoc>()) {
+ AnyChanged = true;
+
+ // We have a pack expansion. Instantiate it.
+ const auto *PackExpansion = PackExpansionLoc.getType()
+ ->castAs<PackExpansionType>();
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ SemaRef.collectUnexpandedParameterPacks(PackExpansion->getPattern(),
+ Unexpanded);
+ assert(!Unexpanded.empty() && "Pack expansion without parameter packs?");
+
+ // Determine whether the set of unexpanded parameter packs can
+ // and should be expanded.
+ TypeLoc PatternLoc = PackExpansionLoc.getPatternLoc();
+ bool Expand = false;
+ bool RetainExpansion = false;
+ Optional<unsigned> NumExpansions = PackExpansion->getNumExpansions();
+ if (getDerived().TryExpandParameterPacks(
+ PackExpansionLoc.getEllipsisLoc(), PatternLoc.getSourceRange(),
+ Unexpanded, Expand, RetainExpansion, NumExpansions))
+ return QualType();
+
+ if (!Expand) {
+ // We can't expand this pack expansion into separate arguments yet;
+ // just substitute into the pattern and create a new pack expansion
+ // type.
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
+
+ TypeLocBuilder TypeArgBuilder;
+ TypeArgBuilder.reserve(PatternLoc.getFullDataSize());
+ QualType NewPatternType = getDerived().TransformType(TypeArgBuilder,
+ PatternLoc);
+ if (NewPatternType.isNull())
+ return QualType();
+
+ QualType NewExpansionType = SemaRef.Context.getPackExpansionType(
+ NewPatternType, NumExpansions);
+ auto NewExpansionLoc = TLB.push<PackExpansionTypeLoc>(NewExpansionType);
+ NewExpansionLoc.setEllipsisLoc(PackExpansionLoc.getEllipsisLoc());
+ NewTypeArgInfos.push_back(
+ TypeArgBuilder.getTypeSourceInfo(SemaRef.Context, NewExpansionType));
+ continue;
+ }
+
+ // Substitute into the pack expansion pattern for each slice of the
+ // pack.
+ for (unsigned ArgIdx = 0; ArgIdx != *NumExpansions; ++ArgIdx) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), ArgIdx);
+
+ TypeLocBuilder TypeArgBuilder;
+ TypeArgBuilder.reserve(PatternLoc.getFullDataSize());
+
+ QualType NewTypeArg = getDerived().TransformType(TypeArgBuilder,
+ PatternLoc);
+ if (NewTypeArg.isNull())
+ return QualType();
+
+ NewTypeArgInfos.push_back(
+ TypeArgBuilder.getTypeSourceInfo(SemaRef.Context, NewTypeArg));
+ }
+
+ continue;
+ }
+
+ TypeLocBuilder TypeArgBuilder;
+ TypeArgBuilder.reserve(TypeArgLoc.getFullDataSize());
+ QualType NewTypeArg = getDerived().TransformType(TypeArgBuilder, TypeArgLoc);
+ if (NewTypeArg.isNull())
+ return QualType();
+
+ // If nothing changed, just keep the old TypeSourceInfo.
+ if (NewTypeArg == TypeArg) {
+ NewTypeArgInfos.push_back(TypeArgInfo);
+ continue;
+ }
+
+ NewTypeArgInfos.push_back(
+ TypeArgBuilder.getTypeSourceInfo(SemaRef.Context, NewTypeArg));
+ AnyChanged = true;
+ }
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() || AnyChanged) {
+ // Rebuild the type.
+ Result = getDerived().RebuildObjCObjectType(
+ BaseType,
+ TL.getLocStart(),
+ TL.getTypeArgsLAngleLoc(),
+ NewTypeArgInfos,
+ TL.getTypeArgsRAngleLoc(),
+ TL.getProtocolLAngleLoc(),
+ llvm::makeArrayRef(TL.getTypePtr()->qual_begin(),
+ TL.getNumProtocols()),
+ TL.getProtocolLocs(),
+ TL.getProtocolRAngleLoc());
+
+ if (Result.isNull())
+ return QualType();
+ }
+
+ ObjCObjectTypeLoc NewT = TLB.push<ObjCObjectTypeLoc>(Result);
+ assert(TL.hasBaseTypeAsWritten() && "Can't be dependent");
+ NewT.setHasBaseTypeAsWritten(true);
+ NewT.setTypeArgsLAngleLoc(TL.getTypeArgsLAngleLoc());
+ for (unsigned i = 0, n = TL.getNumTypeArgs(); i != n; ++i)
+ NewT.setTypeArgTInfo(i, NewTypeArgInfos[i]);
+ NewT.setTypeArgsRAngleLoc(TL.getTypeArgsRAngleLoc());
+ NewT.setProtocolLAngleLoc(TL.getProtocolLAngleLoc());
+ for (unsigned i = 0, n = TL.getNumProtocols(); i != n; ++i)
+ NewT.setProtocolLoc(i, TL.getProtocolLoc(i));
+ NewT.setProtocolRAngleLoc(TL.getProtocolRAngleLoc());
+ return Result;
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformObjCObjectPointerType(TypeLocBuilder &TLB,
+ ObjCObjectPointerTypeLoc TL) {
+ QualType PointeeType = getDerived().TransformType(TLB, TL.getPointeeLoc());
+ if (PointeeType.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ PointeeType != TL.getPointeeLoc().getType()) {
+ Result = getDerived().RebuildObjCObjectPointerType(PointeeType,
+ TL.getStarLoc());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ ObjCObjectPointerTypeLoc NewT = TLB.push<ObjCObjectPointerTypeLoc>(Result);
+ NewT.setStarLoc(TL.getStarLoc());
+ return Result;
+}
+
+//===----------------------------------------------------------------------===//
+// Statement transformation
+//===----------------------------------------------------------------------===//
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformNullStmt(NullStmt *S) {
+ return S;
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformCompoundStmt(CompoundStmt *S) {
+ return getDerived().TransformCompoundStmt(S, false);
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformCompoundStmt(CompoundStmt *S,
+ bool IsStmtExpr) {
+ Sema::CompoundScopeRAII CompoundScope(getSema());
+
+ bool SubStmtInvalid = false;
+ bool SubStmtChanged = false;
+ SmallVector<Stmt*, 8> Statements;
+ for (auto *B : S->body()) {
+ StmtResult Result = getDerived().TransformStmt(B);
+ if (Result.isInvalid()) {
+ // Immediately fail if this was a DeclStmt, since it's very
+ // likely that this will cause problems for future statements.
+ if (isa<DeclStmt>(B))
+ return StmtError();
+
+ // Otherwise, just keep processing substatements and fail later.
+ SubStmtInvalid = true;
+ continue;
+ }
+
+ SubStmtChanged = SubStmtChanged || Result.get() != B;
+ Statements.push_back(Result.getAs<Stmt>());
+ }
+
+ if (SubStmtInvalid)
+ return StmtError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ !SubStmtChanged)
+ return S;
+
+ return getDerived().RebuildCompoundStmt(S->getLBracLoc(),
+ Statements,
+ S->getRBracLoc(),
+ IsStmtExpr);
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformCaseStmt(CaseStmt *S) {
+ ExprResult LHS, RHS;
+ {
+ EnterExpressionEvaluationContext Unevaluated(SemaRef,
+ Sema::ConstantEvaluated);
+
+ // Transform the left-hand case value.
+ LHS = getDerived().TransformExpr(S->getLHS());
+ LHS = SemaRef.ActOnConstantExpression(LHS);
+ if (LHS.isInvalid())
+ return StmtError();
+
+ // Transform the right-hand case value (for the GNU case-range extension).
+ RHS = getDerived().TransformExpr(S->getRHS());
+ RHS = SemaRef.ActOnConstantExpression(RHS);
+ if (RHS.isInvalid())
+ return StmtError();
+ }
+
+ // Build the case statement.
+ // Case statements are always rebuilt so that they will attached to their
+ // transformed switch statement.
+ StmtResult Case = getDerived().RebuildCaseStmt(S->getCaseLoc(),
+ LHS.get(),
+ S->getEllipsisLoc(),
+ RHS.get(),
+ S->getColonLoc());
+ if (Case.isInvalid())
+ return StmtError();
+
+ // Transform the statement following the case
+ StmtResult SubStmt = getDerived().TransformStmt(S->getSubStmt());
+ if (SubStmt.isInvalid())
+ return StmtError();
+
+ // Attach the body to the case statement
+ return getDerived().RebuildCaseStmtBody(Case.get(), SubStmt.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformDefaultStmt(DefaultStmt *S) {
+ // Transform the statement following the default case
+ StmtResult SubStmt = getDerived().TransformStmt(S->getSubStmt());
+ if (SubStmt.isInvalid())
+ return StmtError();
+
+ // Default statements are always rebuilt
+ return getDerived().RebuildDefaultStmt(S->getDefaultLoc(), S->getColonLoc(),
+ SubStmt.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformLabelStmt(LabelStmt *S) {
+ StmtResult SubStmt = getDerived().TransformStmt(S->getSubStmt());
+ if (SubStmt.isInvalid())
+ return StmtError();
+
+ Decl *LD = getDerived().TransformDecl(S->getDecl()->getLocation(),
+ S->getDecl());
+ if (!LD)
+ return StmtError();
+
+
+ // FIXME: Pass the real colon location in.
+ return getDerived().RebuildLabelStmt(S->getIdentLoc(),
+ cast<LabelDecl>(LD), SourceLocation(),
+ SubStmt.get());
+}
+
+template <typename Derived>
+const Attr *TreeTransform<Derived>::TransformAttr(const Attr *R) {
+ if (!R)
+ return R;
+
+ switch (R->getKind()) {
+// Transform attributes with a pragma spelling by calling TransformXXXAttr.
+#define ATTR(X)
+#define PRAGMA_SPELLING_ATTR(X) \
+ case attr::X: \
+ return getDerived().Transform##X##Attr(cast<X##Attr>(R));
+#include "clang/Basic/AttrList.inc"
+ default:
+ return R;
+ }
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformAttributedStmt(AttributedStmt *S) {
+ bool AttrsChanged = false;
+ SmallVector<const Attr *, 1> Attrs;
+
+ // Visit attributes and keep track if any are transformed.
+ for (const auto *I : S->getAttrs()) {
+ const Attr *R = getDerived().TransformAttr(I);
+ AttrsChanged |= (I != R);
+ Attrs.push_back(R);
+ }
+
+ StmtResult SubStmt = getDerived().TransformStmt(S->getSubStmt());
+ if (SubStmt.isInvalid())
+ return StmtError();
+
+ if (SubStmt.get() == S->getSubStmt() && !AttrsChanged)
+ return S;
+
+ return getDerived().RebuildAttributedStmt(S->getAttrLoc(), Attrs,
+ SubStmt.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformIfStmt(IfStmt *S) {
+ // Transform the condition
+ ExprResult Cond;
+ VarDecl *ConditionVar = nullptr;
+ if (S->getConditionVariable()) {
+ ConditionVar
+ = cast_or_null<VarDecl>(
+ getDerived().TransformDefinition(
+ S->getConditionVariable()->getLocation(),
+ S->getConditionVariable()));
+ if (!ConditionVar)
+ return StmtError();
+ } else {
+ Cond = getDerived().TransformExpr(S->getCond());
+
+ if (Cond.isInvalid())
+ return StmtError();
+
+ // Convert the condition to a boolean value.
+ if (S->getCond()) {
+ ExprResult CondE = getSema().ActOnBooleanCondition(nullptr, S->getIfLoc(),
+ Cond.get());
+ if (CondE.isInvalid())
+ return StmtError();
+
+ Cond = CondE.get();
+ }
+ }
+
+ Sema::FullExprArg FullCond(getSema().MakeFullExpr(Cond.get()));
+ if (!S->getConditionVariable() && S->getCond() && !FullCond.get())
+ return StmtError();
+
+ // Transform the "then" branch.
+ StmtResult Then = getDerived().TransformStmt(S->getThen());
+ if (Then.isInvalid())
+ return StmtError();
+
+ // Transform the "else" branch.
+ StmtResult Else = getDerived().TransformStmt(S->getElse());
+ if (Else.isInvalid())
+ return StmtError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ FullCond.get() == S->getCond() &&
+ ConditionVar == S->getConditionVariable() &&
+ Then.get() == S->getThen() &&
+ Else.get() == S->getElse())
+ return S;
+
+ return getDerived().RebuildIfStmt(S->getIfLoc(), FullCond, ConditionVar,
+ Then.get(),
+ S->getElseLoc(), Else.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformSwitchStmt(SwitchStmt *S) {
+ // Transform the condition.
+ ExprResult Cond;
+ VarDecl *ConditionVar = nullptr;
+ if (S->getConditionVariable()) {
+ ConditionVar
+ = cast_or_null<VarDecl>(
+ getDerived().TransformDefinition(
+ S->getConditionVariable()->getLocation(),
+ S->getConditionVariable()));
+ if (!ConditionVar)
+ return StmtError();
+ } else {
+ Cond = getDerived().TransformExpr(S->getCond());
+
+ if (Cond.isInvalid())
+ return StmtError();
+ }
+
+ // Rebuild the switch statement.
+ StmtResult Switch
+ = getDerived().RebuildSwitchStmtStart(S->getSwitchLoc(), Cond.get(),
+ ConditionVar);
+ if (Switch.isInvalid())
+ return StmtError();
+
+ // Transform the body of the switch statement.
+ StmtResult Body = getDerived().TransformStmt(S->getBody());
+ if (Body.isInvalid())
+ return StmtError();
+
+ // Complete the switch statement.
+ return getDerived().RebuildSwitchStmtBody(S->getSwitchLoc(), Switch.get(),
+ Body.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformWhileStmt(WhileStmt *S) {
+ // Transform the condition
+ ExprResult Cond;
+ VarDecl *ConditionVar = nullptr;
+ if (S->getConditionVariable()) {
+ ConditionVar
+ = cast_or_null<VarDecl>(
+ getDerived().TransformDefinition(
+ S->getConditionVariable()->getLocation(),
+ S->getConditionVariable()));
+ if (!ConditionVar)
+ return StmtError();
+ } else {
+ Cond = getDerived().TransformExpr(S->getCond());
+
+ if (Cond.isInvalid())
+ return StmtError();
+
+ if (S->getCond()) {
+ // Convert the condition to a boolean value.
+ ExprResult CondE = getSema().ActOnBooleanCondition(nullptr,
+ S->getWhileLoc(),
+ Cond.get());
+ if (CondE.isInvalid())
+ return StmtError();
+ Cond = CondE;
+ }
+ }
+
+ Sema::FullExprArg FullCond(getSema().MakeFullExpr(Cond.get()));
+ if (!S->getConditionVariable() && S->getCond() && !FullCond.get())
+ return StmtError();
+
+ // Transform the body
+ StmtResult Body = getDerived().TransformStmt(S->getBody());
+ if (Body.isInvalid())
+ return StmtError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ FullCond.get() == S->getCond() &&
+ ConditionVar == S->getConditionVariable() &&
+ Body.get() == S->getBody())
+ return Owned(S);
+
+ return getDerived().RebuildWhileStmt(S->getWhileLoc(), FullCond,
+ ConditionVar, Body.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformDoStmt(DoStmt *S) {
+ // Transform the body
+ StmtResult Body = getDerived().TransformStmt(S->getBody());
+ if (Body.isInvalid())
+ return StmtError();
+
+ // Transform the condition
+ ExprResult Cond = getDerived().TransformExpr(S->getCond());
+ if (Cond.isInvalid())
+ return StmtError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ Cond.get() == S->getCond() &&
+ Body.get() == S->getBody())
+ return S;
+
+ return getDerived().RebuildDoStmt(S->getDoLoc(), Body.get(), S->getWhileLoc(),
+ /*FIXME:*/S->getWhileLoc(), Cond.get(),
+ S->getRParenLoc());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformForStmt(ForStmt *S) {
+ // Transform the initialization statement
+ StmtResult Init = getDerived().TransformStmt(S->getInit());
+ if (Init.isInvalid())
+ return StmtError();
+
+ // In OpenMP loop region loop control variable must be captured and be
+ // private. Perform analysis of first part (if any).
+ if (getSema().getLangOpts().OpenMP && Init.isUsable())
+ getSema().ActOnOpenMPLoopInitialization(S->getForLoc(), Init.get());
+
+ // Transform the condition
+ ExprResult Cond;
+ VarDecl *ConditionVar = nullptr;
+ if (S->getConditionVariable()) {
+ ConditionVar
+ = cast_or_null<VarDecl>(
+ getDerived().TransformDefinition(
+ S->getConditionVariable()->getLocation(),
+ S->getConditionVariable()));
+ if (!ConditionVar)
+ return StmtError();
+ } else {
+ Cond = getDerived().TransformExpr(S->getCond());
+
+ if (Cond.isInvalid())
+ return StmtError();
+
+ if (S->getCond()) {
+ // Convert the condition to a boolean value.
+ ExprResult CondE = getSema().ActOnBooleanCondition(nullptr,
+ S->getForLoc(),
+ Cond.get());
+ if (CondE.isInvalid())
+ return StmtError();
+
+ Cond = CondE.get();
+ }
+ }
+
+ Sema::FullExprArg FullCond(getSema().MakeFullExpr(Cond.get()));
+ if (!S->getConditionVariable() && S->getCond() && !FullCond.get())
+ return StmtError();
+
+ // Transform the increment
+ ExprResult Inc = getDerived().TransformExpr(S->getInc());
+ if (Inc.isInvalid())
+ return StmtError();
+
+ Sema::FullExprArg FullInc(getSema().MakeFullDiscardedValueExpr(Inc.get()));
+ if (S->getInc() && !FullInc.get())
+ return StmtError();
+
+ // Transform the body
+ StmtResult Body = getDerived().TransformStmt(S->getBody());
+ if (Body.isInvalid())
+ return StmtError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ Init.get() == S->getInit() &&
+ FullCond.get() == S->getCond() &&
+ Inc.get() == S->getInc() &&
+ Body.get() == S->getBody())
+ return S;
+
+ return getDerived().RebuildForStmt(S->getForLoc(), S->getLParenLoc(),
+ Init.get(), FullCond, ConditionVar,
+ FullInc, S->getRParenLoc(), Body.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformGotoStmt(GotoStmt *S) {
+ Decl *LD = getDerived().TransformDecl(S->getLabel()->getLocation(),
+ S->getLabel());
+ if (!LD)
+ return StmtError();
+
+ // Goto statements must always be rebuilt, to resolve the label.
+ return getDerived().RebuildGotoStmt(S->getGotoLoc(), S->getLabelLoc(),
+ cast<LabelDecl>(LD));
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformIndirectGotoStmt(IndirectGotoStmt *S) {
+ ExprResult Target = getDerived().TransformExpr(S->getTarget());
+ if (Target.isInvalid())
+ return StmtError();
+ Target = SemaRef.MaybeCreateExprWithCleanups(Target.get());
+
+ if (!getDerived().AlwaysRebuild() &&
+ Target.get() == S->getTarget())
+ return S;
+
+ return getDerived().RebuildIndirectGotoStmt(S->getGotoLoc(), S->getStarLoc(),
+ Target.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformContinueStmt(ContinueStmt *S) {
+ return S;
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformBreakStmt(BreakStmt *S) {
+ return S;
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformReturnStmt(ReturnStmt *S) {
+ ExprResult Result = getDerived().TransformInitializer(S->getRetValue(),
+ /*NotCopyInit*/false);
+ if (Result.isInvalid())
+ return StmtError();
+
+ // FIXME: We always rebuild the return statement because there is no way
+ // to tell whether the return type of the function has changed.
+ return getDerived().RebuildReturnStmt(S->getReturnLoc(), Result.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformDeclStmt(DeclStmt *S) {
+ bool DeclChanged = false;
+ SmallVector<Decl *, 4> Decls;
+ for (auto *D : S->decls()) {
+ Decl *Transformed = getDerived().TransformDefinition(D->getLocation(), D);
+ if (!Transformed)
+ return StmtError();
+
+ if (Transformed != D)
+ DeclChanged = true;
+
+ Decls.push_back(Transformed);
+ }
+
+ if (!getDerived().AlwaysRebuild() && !DeclChanged)
+ return S;
+
+ return getDerived().RebuildDeclStmt(Decls, S->getStartLoc(), S->getEndLoc());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformGCCAsmStmt(GCCAsmStmt *S) {
+
+ SmallVector<Expr*, 8> Constraints;
+ SmallVector<Expr*, 8> Exprs;
+ SmallVector<IdentifierInfo *, 4> Names;
+
+ ExprResult AsmString;
+ SmallVector<Expr*, 8> Clobbers;
+
+ bool ExprsChanged = false;
+
+ // Go through the outputs.
+ for (unsigned I = 0, E = S->getNumOutputs(); I != E; ++I) {
+ Names.push_back(S->getOutputIdentifier(I));
+
+ // No need to transform the constraint literal.
+ Constraints.push_back(S->getOutputConstraintLiteral(I));
+
+ // Transform the output expr.
+ Expr *OutputExpr = S->getOutputExpr(I);
+ ExprResult Result = getDerived().TransformExpr(OutputExpr);
+ if (Result.isInvalid())
+ return StmtError();
+
+ ExprsChanged |= Result.get() != OutputExpr;
+
+ Exprs.push_back(Result.get());
+ }
+
+ // Go through the inputs.
+ for (unsigned I = 0, E = S->getNumInputs(); I != E; ++I) {
+ Names.push_back(S->getInputIdentifier(I));
+
+ // No need to transform the constraint literal.
+ Constraints.push_back(S->getInputConstraintLiteral(I));
+
+ // Transform the input expr.
+ Expr *InputExpr = S->getInputExpr(I);
+ ExprResult Result = getDerived().TransformExpr(InputExpr);
+ if (Result.isInvalid())
+ return StmtError();
+
+ ExprsChanged |= Result.get() != InputExpr;
+
+ Exprs.push_back(Result.get());
+ }
+
+ if (!getDerived().AlwaysRebuild() && !ExprsChanged)
+ return S;
+
+ // Go through the clobbers.
+ for (unsigned I = 0, E = S->getNumClobbers(); I != E; ++I)
+ Clobbers.push_back(S->getClobberStringLiteral(I));
+
+ // No need to transform the asm string literal.
+ AsmString = S->getAsmString();
+ return getDerived().RebuildGCCAsmStmt(S->getAsmLoc(), S->isSimple(),
+ S->isVolatile(), S->getNumOutputs(),
+ S->getNumInputs(), Names.data(),
+ Constraints, Exprs, AsmString.get(),
+ Clobbers, S->getRParenLoc());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformMSAsmStmt(MSAsmStmt *S) {
+ ArrayRef<Token> AsmToks =
+ llvm::makeArrayRef(S->getAsmToks(), S->getNumAsmToks());
+
+ bool HadError = false, HadChange = false;
+
+ ArrayRef<Expr*> SrcExprs = S->getAllExprs();
+ SmallVector<Expr*, 8> TransformedExprs;
+ TransformedExprs.reserve(SrcExprs.size());
+ for (unsigned i = 0, e = SrcExprs.size(); i != e; ++i) {
+ ExprResult Result = getDerived().TransformExpr(SrcExprs[i]);
+ if (!Result.isUsable()) {
+ HadError = true;
+ } else {
+ HadChange |= (Result.get() != SrcExprs[i]);
+ TransformedExprs.push_back(Result.get());
+ }
+ }
+
+ if (HadError) return StmtError();
+ if (!HadChange && !getDerived().AlwaysRebuild())
+ return Owned(S);
+
+ return getDerived().RebuildMSAsmStmt(S->getAsmLoc(), S->getLBraceLoc(),
+ AsmToks, S->getAsmString(),
+ S->getNumOutputs(), S->getNumInputs(),
+ S->getAllConstraints(), S->getClobbers(),
+ TransformedExprs, S->getEndLoc());
+}
+
+// C++ Coroutines TS
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformCoroutineBodyStmt(CoroutineBodyStmt *S) {
+ // The coroutine body should be re-formed by the caller if necessary.
+ return getDerived().TransformStmt(S->getBody());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformCoreturnStmt(CoreturnStmt *S) {
+ ExprResult Result = getDerived().TransformInitializer(S->getOperand(),
+ /*NotCopyInit*/false);
+ if (Result.isInvalid())
+ return StmtError();
+
+ // Always rebuild; we don't know if this needs to be injected into a new
+ // context or if the promise type has changed.
+ return getDerived().RebuildCoreturnStmt(S->getKeywordLoc(), Result.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCoawaitExpr(CoawaitExpr *E) {
+ ExprResult Result = getDerived().TransformInitializer(E->getOperand(),
+ /*NotCopyInit*/false);
+ if (Result.isInvalid())
+ return ExprError();
+
+ // Always rebuild; we don't know if this needs to be injected into a new
+ // context or if the promise type has changed.
+ return getDerived().RebuildCoawaitExpr(E->getKeywordLoc(), Result.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCoyieldExpr(CoyieldExpr *E) {
+ ExprResult Result = getDerived().TransformInitializer(E->getOperand(),
+ /*NotCopyInit*/false);
+ if (Result.isInvalid())
+ return ExprError();
+
+ // Always rebuild; we don't know if this needs to be injected into a new
+ // context or if the promise type has changed.
+ return getDerived().RebuildCoyieldExpr(E->getKeywordLoc(), Result.get());
+}
+
+// Objective-C Statements.
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformObjCAtTryStmt(ObjCAtTryStmt *S) {
+ // Transform the body of the @try.
+ StmtResult TryBody = getDerived().TransformStmt(S->getTryBody());
+ if (TryBody.isInvalid())
+ return StmtError();
+
+ // Transform the @catch statements (if present).
+ bool AnyCatchChanged = false;
+ SmallVector<Stmt*, 8> CatchStmts;
+ for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I) {
+ StmtResult Catch = getDerived().TransformStmt(S->getCatchStmt(I));
+ if (Catch.isInvalid())
+ return StmtError();
+ if (Catch.get() != S->getCatchStmt(I))
+ AnyCatchChanged = true;
+ CatchStmts.push_back(Catch.get());
+ }
+
+ // Transform the @finally statement (if present).
+ StmtResult Finally;
+ if (S->getFinallyStmt()) {
+ Finally = getDerived().TransformStmt(S->getFinallyStmt());
+ if (Finally.isInvalid())
+ return StmtError();
+ }
+
+ // If nothing changed, just retain this statement.
+ if (!getDerived().AlwaysRebuild() &&
+ TryBody.get() == S->getTryBody() &&
+ !AnyCatchChanged &&
+ Finally.get() == S->getFinallyStmt())
+ return S;
+
+ // Build a new statement.
+ return getDerived().RebuildObjCAtTryStmt(S->getAtTryLoc(), TryBody.get(),
+ CatchStmts, Finally.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformObjCAtCatchStmt(ObjCAtCatchStmt *S) {
+ // Transform the @catch parameter, if there is one.
+ VarDecl *Var = nullptr;
+ if (VarDecl *FromVar = S->getCatchParamDecl()) {
+ TypeSourceInfo *TSInfo = nullptr;
+ if (FromVar->getTypeSourceInfo()) {
+ TSInfo = getDerived().TransformType(FromVar->getTypeSourceInfo());
+ if (!TSInfo)
+ return StmtError();
+ }
+
+ QualType T;
+ if (TSInfo)
+ T = TSInfo->getType();
+ else {
+ T = getDerived().TransformType(FromVar->getType());
+ if (T.isNull())
+ return StmtError();
+ }
+
+ Var = getDerived().RebuildObjCExceptionDecl(FromVar, TSInfo, T);
+ if (!Var)
+ return StmtError();
+ }
+
+ StmtResult Body = getDerived().TransformStmt(S->getCatchBody());
+ if (Body.isInvalid())
+ return StmtError();
+
+ return getDerived().RebuildObjCAtCatchStmt(S->getAtCatchLoc(),
+ S->getRParenLoc(),
+ Var, Body.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
+ // Transform the body.
+ StmtResult Body = getDerived().TransformStmt(S->getFinallyBody());
+ if (Body.isInvalid())
+ return StmtError();
+
+ // If nothing changed, just retain this statement.
+ if (!getDerived().AlwaysRebuild() &&
+ Body.get() == S->getFinallyBody())
+ return S;
+
+ // Build a new statement.
+ return getDerived().RebuildObjCAtFinallyStmt(S->getAtFinallyLoc(),
+ Body.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformObjCAtThrowStmt(ObjCAtThrowStmt *S) {
+ ExprResult Operand;
+ if (S->getThrowExpr()) {
+ Operand = getDerived().TransformExpr(S->getThrowExpr());
+ if (Operand.isInvalid())
+ return StmtError();
+ }
+
+ if (!getDerived().AlwaysRebuild() &&
+ Operand.get() == S->getThrowExpr())
+ return S;
+
+ return getDerived().RebuildObjCAtThrowStmt(S->getThrowLoc(), Operand.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformObjCAtSynchronizedStmt(
+ ObjCAtSynchronizedStmt *S) {
+ // Transform the object we are locking.
+ ExprResult Object = getDerived().TransformExpr(S->getSynchExpr());
+ if (Object.isInvalid())
+ return StmtError();
+ Object =
+ getDerived().RebuildObjCAtSynchronizedOperand(S->getAtSynchronizedLoc(),
+ Object.get());
+ if (Object.isInvalid())
+ return StmtError();
+
+ // Transform the body.
+ StmtResult Body = getDerived().TransformStmt(S->getSynchBody());
+ if (Body.isInvalid())
+ return StmtError();
+
+ // If nothing change, just retain the current statement.
+ if (!getDerived().AlwaysRebuild() &&
+ Object.get() == S->getSynchExpr() &&
+ Body.get() == S->getSynchBody())
+ return S;
+
+ // Build a new statement.
+ return getDerived().RebuildObjCAtSynchronizedStmt(S->getAtSynchronizedLoc(),
+ Object.get(), Body.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformObjCAutoreleasePoolStmt(
+ ObjCAutoreleasePoolStmt *S) {
+ // Transform the body.
+ StmtResult Body = getDerived().TransformStmt(S->getSubStmt());
+ if (Body.isInvalid())
+ return StmtError();
+
+ // If nothing changed, just retain this statement.
+ if (!getDerived().AlwaysRebuild() &&
+ Body.get() == S->getSubStmt())
+ return S;
+
+ // Build a new statement.
+ return getDerived().RebuildObjCAutoreleasePoolStmt(
+ S->getAtLoc(), Body.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformObjCForCollectionStmt(
+ ObjCForCollectionStmt *S) {
+ // Transform the element statement.
+ StmtResult Element = getDerived().TransformStmt(S->getElement());
+ if (Element.isInvalid())
+ return StmtError();
+
+ // Transform the collection expression.
+ ExprResult Collection = getDerived().TransformExpr(S->getCollection());
+ if (Collection.isInvalid())
+ return StmtError();
+
+ // Transform the body.
+ StmtResult Body = getDerived().TransformStmt(S->getBody());
+ if (Body.isInvalid())
+ return StmtError();
+
+ // If nothing changed, just retain this statement.
+ if (!getDerived().AlwaysRebuild() &&
+ Element.get() == S->getElement() &&
+ Collection.get() == S->getCollection() &&
+ Body.get() == S->getBody())
+ return S;
+
+ // Build a new statement.
+ return getDerived().RebuildObjCForCollectionStmt(S->getForLoc(),
+ Element.get(),
+ Collection.get(),
+ S->getRParenLoc(),
+ Body.get());
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformCXXCatchStmt(CXXCatchStmt *S) {
+ // Transform the exception declaration, if any.
+ VarDecl *Var = nullptr;
+ if (VarDecl *ExceptionDecl = S->getExceptionDecl()) {
+ TypeSourceInfo *T =
+ getDerived().TransformType(ExceptionDecl->getTypeSourceInfo());
+ if (!T)
+ return StmtError();
+
+ Var = getDerived().RebuildExceptionDecl(
+ ExceptionDecl, T, ExceptionDecl->getInnerLocStart(),
+ ExceptionDecl->getLocation(), ExceptionDecl->getIdentifier());
+ if (!Var || Var->isInvalidDecl())
+ return StmtError();
+ }
+
+ // Transform the actual exception handler.
+ StmtResult Handler = getDerived().TransformStmt(S->getHandlerBlock());
+ if (Handler.isInvalid())
+ return StmtError();
+
+ if (!getDerived().AlwaysRebuild() && !Var &&
+ Handler.get() == S->getHandlerBlock())
+ return S;
+
+ return getDerived().RebuildCXXCatchStmt(S->getCatchLoc(), Var, Handler.get());
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformCXXTryStmt(CXXTryStmt *S) {
+ // Transform the try block itself.
+ StmtResult TryBlock = getDerived().TransformCompoundStmt(S->getTryBlock());
+ if (TryBlock.isInvalid())
+ return StmtError();
+
+ // Transform the handlers.
+ bool HandlerChanged = false;
+ SmallVector<Stmt *, 8> Handlers;
+ for (unsigned I = 0, N = S->getNumHandlers(); I != N; ++I) {
+ StmtResult Handler = getDerived().TransformCXXCatchStmt(S->getHandler(I));
+ if (Handler.isInvalid())
+ return StmtError();
+
+ HandlerChanged = HandlerChanged || Handler.get() != S->getHandler(I);
+ Handlers.push_back(Handler.getAs<Stmt>());
+ }
+
+ if (!getDerived().AlwaysRebuild() && TryBlock.get() == S->getTryBlock() &&
+ !HandlerChanged)
+ return S;
+
+ return getDerived().RebuildCXXTryStmt(S->getTryLoc(), TryBlock.get(),
+ Handlers);
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformCXXForRangeStmt(CXXForRangeStmt *S) {
+ StmtResult Range = getDerived().TransformStmt(S->getRangeStmt());
+ if (Range.isInvalid())
+ return StmtError();
+
+ StmtResult BeginEnd = getDerived().TransformStmt(S->getBeginEndStmt());
+ if (BeginEnd.isInvalid())
+ return StmtError();
+
+ ExprResult Cond = getDerived().TransformExpr(S->getCond());
+ if (Cond.isInvalid())
+ return StmtError();
+ if (Cond.get())
+ Cond = SemaRef.CheckBooleanCondition(Cond.get(), S->getColonLoc());
+ if (Cond.isInvalid())
+ return StmtError();
+ if (Cond.get())
+ Cond = SemaRef.MaybeCreateExprWithCleanups(Cond.get());
+
+ ExprResult Inc = getDerived().TransformExpr(S->getInc());
+ if (Inc.isInvalid())
+ return StmtError();
+ if (Inc.get())
+ Inc = SemaRef.MaybeCreateExprWithCleanups(Inc.get());
+
+ StmtResult LoopVar = getDerived().TransformStmt(S->getLoopVarStmt());
+ if (LoopVar.isInvalid())
+ return StmtError();
+
+ StmtResult NewStmt = S;
+ if (getDerived().AlwaysRebuild() ||
+ Range.get() != S->getRangeStmt() ||
+ BeginEnd.get() != S->getBeginEndStmt() ||
+ Cond.get() != S->getCond() ||
+ Inc.get() != S->getInc() ||
+ LoopVar.get() != S->getLoopVarStmt()) {
+ NewStmt = getDerived().RebuildCXXForRangeStmt(S->getForLoc(),
+ S->getCoawaitLoc(),
+ S->getColonLoc(), Range.get(),
+ BeginEnd.get(), Cond.get(),
+ Inc.get(), LoopVar.get(),
+ S->getRParenLoc());
+ if (NewStmt.isInvalid())
+ return StmtError();
+ }
+
+ StmtResult Body = getDerived().TransformStmt(S->getBody());
+ if (Body.isInvalid())
+ return StmtError();
+
+ // Body has changed but we didn't rebuild the for-range statement. Rebuild
+ // it now so we have a new statement to attach the body to.
+ if (Body.get() != S->getBody() && NewStmt.get() == S) {
+ NewStmt = getDerived().RebuildCXXForRangeStmt(S->getForLoc(),
+ S->getCoawaitLoc(),
+ S->getColonLoc(), Range.get(),
+ BeginEnd.get(), Cond.get(),
+ Inc.get(), LoopVar.get(),
+ S->getRParenLoc());
+ if (NewStmt.isInvalid())
+ return StmtError();
+ }
+
+ if (NewStmt.get() == S)
+ return S;
+
+ return FinishCXXForRangeStmt(NewStmt.get(), Body.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformMSDependentExistsStmt(
+ MSDependentExistsStmt *S) {
+ // Transform the nested-name-specifier, if any.
+ NestedNameSpecifierLoc QualifierLoc;
+ if (S->getQualifierLoc()) {
+ QualifierLoc
+ = getDerived().TransformNestedNameSpecifierLoc(S->getQualifierLoc());
+ if (!QualifierLoc)
+ return StmtError();
+ }
+
+ // Transform the declaration name.
+ DeclarationNameInfo NameInfo = S->getNameInfo();
+ if (NameInfo.getName()) {
+ NameInfo = getDerived().TransformDeclarationNameInfo(NameInfo);
+ if (!NameInfo.getName())
+ return StmtError();
+ }
+
+ // Check whether anything changed.
+ if (!getDerived().AlwaysRebuild() &&
+ QualifierLoc == S->getQualifierLoc() &&
+ NameInfo.getName() == S->getNameInfo().getName())
+ return S;
+
+ // Determine whether this name exists, if we can.
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+ bool Dependent = false;
+ switch (getSema().CheckMicrosoftIfExistsSymbol(/*S=*/nullptr, SS, NameInfo)) {
+ case Sema::IER_Exists:
+ if (S->isIfExists())
+ break;
+
+ return new (getSema().Context) NullStmt(S->getKeywordLoc());
+
+ case Sema::IER_DoesNotExist:
+ if (S->isIfNotExists())
+ break;
+
+ return new (getSema().Context) NullStmt(S->getKeywordLoc());
+
+ case Sema::IER_Dependent:
+ Dependent = true;
+ break;
+
+ case Sema::IER_Error:
+ return StmtError();
+ }
+
+ // We need to continue with the instantiation, so do so now.
+ StmtResult SubStmt = getDerived().TransformCompoundStmt(S->getSubStmt());
+ if (SubStmt.isInvalid())
+ return StmtError();
+
+ // If we have resolved the name, just transform to the substatement.
+ if (!Dependent)
+ return SubStmt;
+
+ // The name is still dependent, so build a dependent expression again.
+ return getDerived().RebuildMSDependentExistsStmt(S->getKeywordLoc(),
+ S->isIfExists(),
+ QualifierLoc,
+ NameInfo,
+ SubStmt.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformMSPropertyRefExpr(MSPropertyRefExpr *E) {
+ NestedNameSpecifierLoc QualifierLoc;
+ if (E->getQualifierLoc()) {
+ QualifierLoc
+ = getDerived().TransformNestedNameSpecifierLoc(E->getQualifierLoc());
+ if (!QualifierLoc)
+ return ExprError();
+ }
+
+ MSPropertyDecl *PD = cast_or_null<MSPropertyDecl>(
+ getDerived().TransformDecl(E->getMemberLoc(), E->getPropertyDecl()));
+ if (!PD)
+ return ExprError();
+
+ ExprResult Base = getDerived().TransformExpr(E->getBaseExpr());
+ if (Base.isInvalid())
+ return ExprError();
+
+ return new (SemaRef.getASTContext())
+ MSPropertyRefExpr(Base.get(), PD, E->isArrow(),
+ SemaRef.getASTContext().PseudoObjectTy, VK_LValue,
+ QualifierLoc, E->getMemberLoc());
+}
+
+template <typename Derived>
+ExprResult TreeTransform<Derived>::TransformMSPropertySubscriptExpr(
+ MSPropertySubscriptExpr *E) {
+ auto BaseRes = getDerived().TransformExpr(E->getBase());
+ if (BaseRes.isInvalid())
+ return ExprError();
+ auto IdxRes = getDerived().TransformExpr(E->getIdx());
+ if (IdxRes.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ BaseRes.get() == E->getBase() &&
+ IdxRes.get() == E->getIdx())
+ return E;
+
+ return getDerived().RebuildArraySubscriptExpr(
+ BaseRes.get(), SourceLocation(), IdxRes.get(), E->getRBracketLoc());
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformSEHTryStmt(SEHTryStmt *S) {
+ StmtResult TryBlock = getDerived().TransformCompoundStmt(S->getTryBlock());
+ if (TryBlock.isInvalid())
+ return StmtError();
+
+ StmtResult Handler = getDerived().TransformSEHHandler(S->getHandler());
+ if (Handler.isInvalid())
+ return StmtError();
+
+ if (!getDerived().AlwaysRebuild() && TryBlock.get() == S->getTryBlock() &&
+ Handler.get() == S->getHandler())
+ return S;
+
+ return getDerived().RebuildSEHTryStmt(S->getIsCXXTry(), S->getTryLoc(),
+ TryBlock.get(), Handler.get());
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformSEHFinallyStmt(SEHFinallyStmt *S) {
+ StmtResult Block = getDerived().TransformCompoundStmt(S->getBlock());
+ if (Block.isInvalid())
+ return StmtError();
+
+ return getDerived().RebuildSEHFinallyStmt(S->getFinallyLoc(), Block.get());
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformSEHExceptStmt(SEHExceptStmt *S) {
+ ExprResult FilterExpr = getDerived().TransformExpr(S->getFilterExpr());
+ if (FilterExpr.isInvalid())
+ return StmtError();
+
+ StmtResult Block = getDerived().TransformCompoundStmt(S->getBlock());
+ if (Block.isInvalid())
+ return StmtError();
+
+ return getDerived().RebuildSEHExceptStmt(S->getExceptLoc(), FilterExpr.get(),
+ Block.get());
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformSEHHandler(Stmt *Handler) {
+ if (isa<SEHFinallyStmt>(Handler))
+ return getDerived().TransformSEHFinallyStmt(cast<SEHFinallyStmt>(Handler));
+ else
+ return getDerived().TransformSEHExceptStmt(cast<SEHExceptStmt>(Handler));
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformSEHLeaveStmt(SEHLeaveStmt *S) {
+ return S;
+}
+
+//===----------------------------------------------------------------------===//
+// OpenMP directive transformation
+//===----------------------------------------------------------------------===//
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPExecutableDirective(
+ OMPExecutableDirective *D) {
+
+ // Transform the clauses
+ llvm::SmallVector<OMPClause *, 16> TClauses;
+ ArrayRef<OMPClause *> Clauses = D->clauses();
+ TClauses.reserve(Clauses.size());
+ for (ArrayRef<OMPClause *>::iterator I = Clauses.begin(), E = Clauses.end();
+ I != E; ++I) {
+ if (*I) {
+ getDerived().getSema().StartOpenMPClause((*I)->getClauseKind());
+ OMPClause *Clause = getDerived().TransformOMPClause(*I);
+ getDerived().getSema().EndOpenMPClause();
+ if (Clause)
+ TClauses.push_back(Clause);
+ } else {
+ TClauses.push_back(nullptr);
+ }
+ }
+ StmtResult AssociatedStmt;
+ if (D->hasAssociatedStmt() && D->getAssociatedStmt()) {
+ getDerived().getSema().ActOnOpenMPRegionStart(D->getDirectiveKind(),
+ /*CurScope=*/nullptr);
+ StmtResult Body;
+ {
+ Sema::CompoundScopeRAII CompoundScope(getSema());
+ Body = getDerived().TransformStmt(
+ cast<CapturedStmt>(D->getAssociatedStmt())->getCapturedStmt());
+ }
+ AssociatedStmt =
+ getDerived().getSema().ActOnOpenMPRegionEnd(Body, TClauses);
+ if (AssociatedStmt.isInvalid()) {
+ return StmtError();
+ }
+ }
+ if (TClauses.size() != Clauses.size()) {
+ return StmtError();
+ }
+
+ // Transform directive name for 'omp critical' directive.
+ DeclarationNameInfo DirName;
+ if (D->getDirectiveKind() == OMPD_critical) {
+ DirName = cast<OMPCriticalDirective>(D)->getDirectiveName();
+ DirName = getDerived().TransformDeclarationNameInfo(DirName);
+ }
+ OpenMPDirectiveKind CancelRegion = OMPD_unknown;
+ if (D->getDirectiveKind() == OMPD_cancellation_point) {
+ CancelRegion = cast<OMPCancellationPointDirective>(D)->getCancelRegion();
+ } else if (D->getDirectiveKind() == OMPD_cancel) {
+ CancelRegion = cast<OMPCancelDirective>(D)->getCancelRegion();
+ }
+
+ return getDerived().RebuildOMPExecutableDirective(
+ D->getDirectiveKind(), DirName, CancelRegion, TClauses,
+ AssociatedStmt.get(), D->getLocStart(), D->getLocEnd());
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPParallelDirective(OMPParallelDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_parallel, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPSimdDirective(OMPSimdDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_simd, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPForDirective(OMPForDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_for, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPForSimdDirective(OMPForSimdDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_for_simd, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPSectionsDirective(OMPSectionsDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_sections, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPSectionDirective(OMPSectionDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_section, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPSingleDirective(OMPSingleDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_single, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPMasterDirective(OMPMasterDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_master, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPCriticalDirective(OMPCriticalDirective *D) {
+ getDerived().getSema().StartOpenMPDSABlock(
+ OMPD_critical, D->getDirectiveName(), nullptr, D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPParallelForDirective(
+ OMPParallelForDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_parallel_for, DirName,
+ nullptr, D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPParallelForSimdDirective(
+ OMPParallelForSimdDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_parallel_for_simd, DirName,
+ nullptr, D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPParallelSectionsDirective(
+ OMPParallelSectionsDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_parallel_sections, DirName,
+ nullptr, D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPTaskDirective(OMPTaskDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_task, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPTaskyieldDirective(
+ OMPTaskyieldDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_taskyield, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPBarrierDirective(OMPBarrierDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_barrier, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPTaskwaitDirective(OMPTaskwaitDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_taskwait, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPTaskgroupDirective(
+ OMPTaskgroupDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_taskgroup, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPFlushDirective(OMPFlushDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_flush, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPOrderedDirective(OMPOrderedDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_ordered, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPAtomicDirective(OMPAtomicDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_atomic, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPTargetDirective(OMPTargetDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_target, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPTargetDataDirective(
+ OMPTargetDataDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_target_data, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPTeamsDirective(OMPTeamsDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_teams, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPCancellationPointDirective(
+ OMPCancellationPointDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_cancellation_point, DirName,
+ nullptr, D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPCancelDirective(OMPCancelDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_cancel, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPTaskLoopDirective(OMPTaskLoopDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_taskloop, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPTaskLoopSimdDirective(
+ OMPTaskLoopSimdDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_taskloop_simd, DirName,
+ nullptr, D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPDistributeDirective(
+ OMPDistributeDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_distribute, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+//===----------------------------------------------------------------------===//
+// OpenMP clause transformation
+//===----------------------------------------------------------------------===//
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPIfClause(OMPIfClause *C) {
+ ExprResult Cond = getDerived().TransformExpr(C->getCondition());
+ if (Cond.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPIfClause(
+ C->getNameModifier(), Cond.get(), C->getLocStart(), C->getLParenLoc(),
+ C->getNameModifierLoc(), C->getColonLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPFinalClause(OMPFinalClause *C) {
+ ExprResult Cond = getDerived().TransformExpr(C->getCondition());
+ if (Cond.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPFinalClause(Cond.get(), C->getLocStart(),
+ C->getLParenLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPNumThreadsClause(OMPNumThreadsClause *C) {
+ ExprResult NumThreads = getDerived().TransformExpr(C->getNumThreads());
+ if (NumThreads.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPNumThreadsClause(
+ NumThreads.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPSafelenClause(OMPSafelenClause *C) {
+ ExprResult E = getDerived().TransformExpr(C->getSafelen());
+ if (E.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPSafelenClause(
+ E.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPSimdlenClause(OMPSimdlenClause *C) {
+ ExprResult E = getDerived().TransformExpr(C->getSimdlen());
+ if (E.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPSimdlenClause(
+ E.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPCollapseClause(OMPCollapseClause *C) {
+ ExprResult E = getDerived().TransformExpr(C->getNumForLoops());
+ if (E.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPCollapseClause(
+ E.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPDefaultClause(OMPDefaultClause *C) {
+ return getDerived().RebuildOMPDefaultClause(
+ C->getDefaultKind(), C->getDefaultKindKwLoc(), C->getLocStart(),
+ C->getLParenLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPProcBindClause(OMPProcBindClause *C) {
+ return getDerived().RebuildOMPProcBindClause(
+ C->getProcBindKind(), C->getProcBindKindKwLoc(), C->getLocStart(),
+ C->getLParenLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPScheduleClause(OMPScheduleClause *C) {
+ ExprResult E = getDerived().TransformExpr(C->getChunkSize());
+ if (E.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPScheduleClause(
+ C->getFirstScheduleModifier(), C->getSecondScheduleModifier(),
+ C->getScheduleKind(), E.get(), C->getLocStart(), C->getLParenLoc(),
+ C->getFirstScheduleModifierLoc(), C->getSecondScheduleModifierLoc(),
+ C->getScheduleKindLoc(), C->getCommaLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPOrderedClause(OMPOrderedClause *C) {
+ ExprResult E;
+ if (auto *Num = C->getNumForLoops()) {
+ E = getDerived().TransformExpr(Num);
+ if (E.isInvalid())
+ return nullptr;
+ }
+ return getDerived().RebuildOMPOrderedClause(C->getLocStart(), C->getLocEnd(),
+ C->getLParenLoc(), E.get());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPNowaitClause(OMPNowaitClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPUntiedClause(OMPUntiedClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPMergeableClause(OMPMergeableClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPReadClause(OMPReadClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPWriteClause(OMPWriteClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPUpdateClause(OMPUpdateClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPCaptureClause(OMPCaptureClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPSeqCstClause(OMPSeqCstClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPThreadsClause(OMPThreadsClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPSIMDClause(OMPSIMDClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPNogroupClause(OMPNogroupClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPPrivateClause(OMPPrivateClause *C) {
+ llvm::SmallVector<Expr *, 16> Vars;
+ Vars.reserve(C->varlist_size());
+ for (auto *VE : C->varlists()) {
+ ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE));
+ if (EVar.isInvalid())
+ return nullptr;
+ Vars.push_back(EVar.get());
+ }
+ return getDerived().RebuildOMPPrivateClause(
+ Vars, C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPFirstprivateClause(
+ OMPFirstprivateClause *C) {
+ llvm::SmallVector<Expr *, 16> Vars;
+ Vars.reserve(C->varlist_size());
+ for (auto *VE : C->varlists()) {
+ ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE));
+ if (EVar.isInvalid())
+ return nullptr;
+ Vars.push_back(EVar.get());
+ }
+ return getDerived().RebuildOMPFirstprivateClause(
+ Vars, C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPLastprivateClause(OMPLastprivateClause *C) {
+ llvm::SmallVector<Expr *, 16> Vars;
+ Vars.reserve(C->varlist_size());
+ for (auto *VE : C->varlists()) {
+ ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE));
+ if (EVar.isInvalid())
+ return nullptr;
+ Vars.push_back(EVar.get());
+ }
+ return getDerived().RebuildOMPLastprivateClause(
+ Vars, C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPSharedClause(OMPSharedClause *C) {
+ llvm::SmallVector<Expr *, 16> Vars;
+ Vars.reserve(C->varlist_size());
+ for (auto *VE : C->varlists()) {
+ ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE));
+ if (EVar.isInvalid())
+ return nullptr;
+ Vars.push_back(EVar.get());
+ }
+ return getDerived().RebuildOMPSharedClause(Vars, C->getLocStart(),
+ C->getLParenLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPReductionClause(OMPReductionClause *C) {
+ llvm::SmallVector<Expr *, 16> Vars;
+ Vars.reserve(C->varlist_size());
+ for (auto *VE : C->varlists()) {
+ ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE));
+ if (EVar.isInvalid())
+ return nullptr;
+ Vars.push_back(EVar.get());
+ }
+ CXXScopeSpec ReductionIdScopeSpec;
+ ReductionIdScopeSpec.Adopt(C->getQualifierLoc());
+
+ DeclarationNameInfo NameInfo = C->getNameInfo();
+ if (NameInfo.getName()) {
+ NameInfo = getDerived().TransformDeclarationNameInfo(NameInfo);
+ if (!NameInfo.getName())
+ return nullptr;
+ }
+ return getDerived().RebuildOMPReductionClause(
+ Vars, C->getLocStart(), C->getLParenLoc(), C->getColonLoc(),
+ C->getLocEnd(), ReductionIdScopeSpec, NameInfo);
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPLinearClause(OMPLinearClause *C) {
+ llvm::SmallVector<Expr *, 16> Vars;
+ Vars.reserve(C->varlist_size());
+ for (auto *VE : C->varlists()) {
+ ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE));
+ if (EVar.isInvalid())
+ return nullptr;
+ Vars.push_back(EVar.get());
+ }
+ ExprResult Step = getDerived().TransformExpr(C->getStep());
+ if (Step.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPLinearClause(
+ Vars, Step.get(), C->getLocStart(), C->getLParenLoc(), C->getModifier(),
+ C->getModifierLoc(), C->getColonLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPAlignedClause(OMPAlignedClause *C) {
+ llvm::SmallVector<Expr *, 16> Vars;
+ Vars.reserve(C->varlist_size());
+ for (auto *VE : C->varlists()) {
+ ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE));
+ if (EVar.isInvalid())
+ return nullptr;
+ Vars.push_back(EVar.get());
+ }
+ ExprResult Alignment = getDerived().TransformExpr(C->getAlignment());
+ if (Alignment.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPAlignedClause(
+ Vars, Alignment.get(), C->getLocStart(), C->getLParenLoc(),
+ C->getColonLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPCopyinClause(OMPCopyinClause *C) {
+ llvm::SmallVector<Expr *, 16> Vars;
+ Vars.reserve(C->varlist_size());
+ for (auto *VE : C->varlists()) {
+ ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE));
+ if (EVar.isInvalid())
+ return nullptr;
+ Vars.push_back(EVar.get());
+ }
+ return getDerived().RebuildOMPCopyinClause(Vars, C->getLocStart(),
+ C->getLParenLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPCopyprivateClause(OMPCopyprivateClause *C) {
+ llvm::SmallVector<Expr *, 16> Vars;
+ Vars.reserve(C->varlist_size());
+ for (auto *VE : C->varlists()) {
+ ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE));
+ if (EVar.isInvalid())
+ return nullptr;
+ Vars.push_back(EVar.get());
+ }
+ return getDerived().RebuildOMPCopyprivateClause(
+ Vars, C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPFlushClause(OMPFlushClause *C) {
+ llvm::SmallVector<Expr *, 16> Vars;
+ Vars.reserve(C->varlist_size());
+ for (auto *VE : C->varlists()) {
+ ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE));
+ if (EVar.isInvalid())
+ return nullptr;
+ Vars.push_back(EVar.get());
+ }
+ return getDerived().RebuildOMPFlushClause(Vars, C->getLocStart(),
+ C->getLParenLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPDependClause(OMPDependClause *C) {
+ llvm::SmallVector<Expr *, 16> Vars;
+ Vars.reserve(C->varlist_size());
+ for (auto *VE : C->varlists()) {
+ ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE));
+ if (EVar.isInvalid())
+ return nullptr;
+ Vars.push_back(EVar.get());
+ }
+ return getDerived().RebuildOMPDependClause(
+ C->getDependencyKind(), C->getDependencyLoc(), C->getColonLoc(), Vars,
+ C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPDeviceClause(OMPDeviceClause *C) {
+ ExprResult E = getDerived().TransformExpr(C->getDevice());
+ if (E.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPDeviceClause(
+ E.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPMapClause(OMPMapClause *C) {
+ llvm::SmallVector<Expr *, 16> Vars;
+ Vars.reserve(C->varlist_size());
+ for (auto *VE : C->varlists()) {
+ ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE));
+ if (EVar.isInvalid())
+ return nullptr;
+ Vars.push_back(EVar.get());
+ }
+ return getDerived().RebuildOMPMapClause(
+ C->getMapTypeModifier(), C->getMapType(), C->getMapLoc(),
+ C->getColonLoc(), Vars, C->getLocStart(), C->getLParenLoc(),
+ C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPNumTeamsClause(OMPNumTeamsClause *C) {
+ ExprResult E = getDerived().TransformExpr(C->getNumTeams());
+ if (E.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPNumTeamsClause(
+ E.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPThreadLimitClause(OMPThreadLimitClause *C) {
+ ExprResult E = getDerived().TransformExpr(C->getThreadLimit());
+ if (E.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPThreadLimitClause(
+ E.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPPriorityClause(OMPPriorityClause *C) {
+ ExprResult E = getDerived().TransformExpr(C->getPriority());
+ if (E.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPPriorityClause(
+ E.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPGrainsizeClause(OMPGrainsizeClause *C) {
+ ExprResult E = getDerived().TransformExpr(C->getGrainsize());
+ if (E.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPGrainsizeClause(
+ E.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPNumTasksClause(OMPNumTasksClause *C) {
+ ExprResult E = getDerived().TransformExpr(C->getNumTasks());
+ if (E.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPNumTasksClause(
+ E.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPHintClause(OMPHintClause *C) {
+ ExprResult E = getDerived().TransformExpr(C->getHint());
+ if (E.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPHintClause(E.get(), C->getLocStart(),
+ C->getLParenLoc(), C->getLocEnd());
+}
+
+//===----------------------------------------------------------------------===//
+// Expression transformation
+//===----------------------------------------------------------------------===//
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformPredefinedExpr(PredefinedExpr *E) {
+ if (!E->isTypeDependent())
+ return E;
+
+ return getDerived().RebuildPredefinedExpr(E->getLocation(),
+ E->getIdentType());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformDeclRefExpr(DeclRefExpr *E) {
+ NestedNameSpecifierLoc QualifierLoc;
+ if (E->getQualifierLoc()) {
+ QualifierLoc
+ = getDerived().TransformNestedNameSpecifierLoc(E->getQualifierLoc());
+ if (!QualifierLoc)
+ return ExprError();
+ }
+
+ ValueDecl *ND
+ = cast_or_null<ValueDecl>(getDerived().TransformDecl(E->getLocation(),
+ E->getDecl()));
+ if (!ND)
+ return ExprError();
+
+ DeclarationNameInfo NameInfo = E->getNameInfo();
+ if (NameInfo.getName()) {
+ NameInfo = getDerived().TransformDeclarationNameInfo(NameInfo);
+ if (!NameInfo.getName())
+ return ExprError();
+ }
+
+ if (!getDerived().AlwaysRebuild() &&
+ QualifierLoc == E->getQualifierLoc() &&
+ ND == E->getDecl() &&
+ NameInfo.getName() == E->getDecl()->getDeclName() &&
+ !E->hasExplicitTemplateArgs()) {
+
+ // Mark it referenced in the new context regardless.
+ // FIXME: this is a bit instantiation-specific.
+ SemaRef.MarkDeclRefReferenced(E);
+
+ return E;
+ }
+
+ TemplateArgumentListInfo TransArgs, *TemplateArgs = nullptr;
+ if (E->hasExplicitTemplateArgs()) {
+ TemplateArgs = &TransArgs;
+ TransArgs.setLAngleLoc(E->getLAngleLoc());
+ TransArgs.setRAngleLoc(E->getRAngleLoc());
+ if (getDerived().TransformTemplateArguments(E->getTemplateArgs(),
+ E->getNumTemplateArgs(),
+ TransArgs))
+ return ExprError();
+ }
+
+ return getDerived().RebuildDeclRefExpr(QualifierLoc, ND, NameInfo,
+ TemplateArgs);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformIntegerLiteral(IntegerLiteral *E) {
+ return E;
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformFloatingLiteral(FloatingLiteral *E) {
+ return E;
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformImaginaryLiteral(ImaginaryLiteral *E) {
+ return E;
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformStringLiteral(StringLiteral *E) {
+ return E;
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCharacterLiteral(CharacterLiteral *E) {
+ return E;
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformUserDefinedLiteral(UserDefinedLiteral *E) {
+ if (FunctionDecl *FD = E->getDirectCallee())
+ SemaRef.MarkFunctionReferenced(E->getLocStart(), FD);
+ return SemaRef.MaybeBindToTemporary(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformGenericSelectionExpr(GenericSelectionExpr *E) {
+ ExprResult ControllingExpr =
+ getDerived().TransformExpr(E->getControllingExpr());
+ if (ControllingExpr.isInvalid())
+ return ExprError();
+
+ SmallVector<Expr *, 4> AssocExprs;
+ SmallVector<TypeSourceInfo *, 4> AssocTypes;
+ for (unsigned i = 0; i != E->getNumAssocs(); ++i) {
+ TypeSourceInfo *TS = E->getAssocTypeSourceInfo(i);
+ if (TS) {
+ TypeSourceInfo *AssocType = getDerived().TransformType(TS);
+ if (!AssocType)
+ return ExprError();
+ AssocTypes.push_back(AssocType);
+ } else {
+ AssocTypes.push_back(nullptr);
+ }
+
+ ExprResult AssocExpr = getDerived().TransformExpr(E->getAssocExpr(i));
+ if (AssocExpr.isInvalid())
+ return ExprError();
+ AssocExprs.push_back(AssocExpr.get());
+ }
+
+ return getDerived().RebuildGenericSelectionExpr(E->getGenericLoc(),
+ E->getDefaultLoc(),
+ E->getRParenLoc(),
+ ControllingExpr.get(),
+ AssocTypes,
+ AssocExprs);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformParenExpr(ParenExpr *E) {
+ ExprResult SubExpr = getDerived().TransformExpr(E->getSubExpr());
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() && SubExpr.get() == E->getSubExpr())
+ return E;
+
+ return getDerived().RebuildParenExpr(SubExpr.get(), E->getLParen(),
+ E->getRParen());
+}
+
+/// \brief The operand of a unary address-of operator has special rules: it's
+/// allowed to refer to a non-static member of a class even if there's no 'this'
+/// object available.
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformAddressOfOperand(Expr *E) {
+ if (DependentScopeDeclRefExpr *DRE = dyn_cast<DependentScopeDeclRefExpr>(E))
+ return getDerived().TransformDependentScopeDeclRefExpr(DRE, true, nullptr);
+ else
+ return getDerived().TransformExpr(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformUnaryOperator(UnaryOperator *E) {
+ ExprResult SubExpr;
+ if (E->getOpcode() == UO_AddrOf)
+ SubExpr = TransformAddressOfOperand(E->getSubExpr());
+ else
+ SubExpr = TransformExpr(E->getSubExpr());
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() && SubExpr.get() == E->getSubExpr())
+ return E;
+
+ return getDerived().RebuildUnaryOperator(E->getOperatorLoc(),
+ E->getOpcode(),
+ SubExpr.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformOffsetOfExpr(OffsetOfExpr *E) {
+ // Transform the type.
+ TypeSourceInfo *Type = getDerived().TransformType(E->getTypeSourceInfo());
+ if (!Type)
+ return ExprError();
+
+ // Transform all of the components into components similar to what the
+ // parser uses.
+ // FIXME: It would be slightly more efficient in the non-dependent case to
+ // just map FieldDecls, rather than requiring the rebuilder to look for
+ // the fields again. However, __builtin_offsetof is rare enough in
+ // template code that we don't care.
+ bool ExprChanged = false;
+ typedef Sema::OffsetOfComponent Component;
+ SmallVector<Component, 4> Components;
+ for (unsigned I = 0, N = E->getNumComponents(); I != N; ++I) {
+ const OffsetOfNode &ON = E->getComponent(I);
+ Component Comp;
+ Comp.isBrackets = true;
+ Comp.LocStart = ON.getSourceRange().getBegin();
+ Comp.LocEnd = ON.getSourceRange().getEnd();
+ switch (ON.getKind()) {
+ case OffsetOfNode::Array: {
+ Expr *FromIndex = E->getIndexExpr(ON.getArrayExprIndex());
+ ExprResult Index = getDerived().TransformExpr(FromIndex);
+ if (Index.isInvalid())
+ return ExprError();
+
+ ExprChanged = ExprChanged || Index.get() != FromIndex;
+ Comp.isBrackets = true;
+ Comp.U.E = Index.get();
+ break;
+ }
+
+ case OffsetOfNode::Field:
+ case OffsetOfNode::Identifier:
+ Comp.isBrackets = false;
+ Comp.U.IdentInfo = ON.getFieldName();
+ if (!Comp.U.IdentInfo)
+ continue;
+
+ break;
+
+ case OffsetOfNode::Base:
+ // Will be recomputed during the rebuild.
+ continue;
+ }
+
+ Components.push_back(Comp);
+ }
+
+ // If nothing changed, retain the existing expression.
+ if (!getDerived().AlwaysRebuild() &&
+ Type == E->getTypeSourceInfo() &&
+ !ExprChanged)
+ return E;
+
+ // Build a new offsetof expression.
+ return getDerived().RebuildOffsetOfExpr(E->getOperatorLoc(), Type,
+ Components, E->getRParenLoc());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformOpaqueValueExpr(OpaqueValueExpr *E) {
+ assert((!E->getSourceExpr() || getDerived().AlreadyTransformed(E->getType())) &&
+ "opaque value expression requires transformation");
+ return E;
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformTypoExpr(TypoExpr *E) {
+ return E;
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformPseudoObjectExpr(PseudoObjectExpr *E) {
+ // Rebuild the syntactic form. The original syntactic form has
+ // opaque-value expressions in it, so strip those away and rebuild
+ // the result. This is a really awful way of doing this, but the
+ // better solution (rebuilding the semantic expressions and
+ // rebinding OVEs as necessary) doesn't work; we'd need
+ // TreeTransform to not strip away implicit conversions.
+ Expr *newSyntacticForm = SemaRef.recreateSyntacticForm(E);
+ ExprResult result = getDerived().TransformExpr(newSyntacticForm);
+ if (result.isInvalid()) return ExprError();
+
+ // If that gives us a pseudo-object result back, the pseudo-object
+ // expression must have been an lvalue-to-rvalue conversion which we
+ // should reapply.
+ if (result.get()->hasPlaceholderType(BuiltinType::PseudoObject))
+ result = SemaRef.checkPseudoObjectRValue(result.get());
+
+ return result;
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformUnaryExprOrTypeTraitExpr(
+ UnaryExprOrTypeTraitExpr *E) {
+ if (E->isArgumentType()) {
+ TypeSourceInfo *OldT = E->getArgumentTypeInfo();
+
+ TypeSourceInfo *NewT = getDerived().TransformType(OldT);
+ if (!NewT)
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() && OldT == NewT)
+ return E;
+
+ return getDerived().RebuildUnaryExprOrTypeTrait(NewT, E->getOperatorLoc(),
+ E->getKind(),
+ E->getSourceRange());
+ }
+
+ // C++0x [expr.sizeof]p1:
+ // The operand is either an expression, which is an unevaluated operand
+ // [...]
+ EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated,
+ Sema::ReuseLambdaContextDecl);
+
+ // Try to recover if we have something like sizeof(T::X) where X is a type.
+ // Notably, there must be *exactly* one set of parens if X is a type.
+ TypeSourceInfo *RecoveryTSI = nullptr;
+ ExprResult SubExpr;
+ auto *PE = dyn_cast<ParenExpr>(E->getArgumentExpr());
+ if (auto *DRE =
+ PE ? dyn_cast<DependentScopeDeclRefExpr>(PE->getSubExpr()) : nullptr)
+ SubExpr = getDerived().TransformParenDependentScopeDeclRefExpr(
+ PE, DRE, false, &RecoveryTSI);
+ else
+ SubExpr = getDerived().TransformExpr(E->getArgumentExpr());
+
+ if (RecoveryTSI) {
+ return getDerived().RebuildUnaryExprOrTypeTrait(
+ RecoveryTSI, E->getOperatorLoc(), E->getKind(), E->getSourceRange());
+ } else if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() && SubExpr.get() == E->getArgumentExpr())
+ return E;
+
+ return getDerived().RebuildUnaryExprOrTypeTrait(SubExpr.get(),
+ E->getOperatorLoc(),
+ E->getKind(),
+ E->getSourceRange());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformArraySubscriptExpr(ArraySubscriptExpr *E) {
+ ExprResult LHS = getDerived().TransformExpr(E->getLHS());
+ if (LHS.isInvalid())
+ return ExprError();
+
+ ExprResult RHS = getDerived().TransformExpr(E->getRHS());
+ if (RHS.isInvalid())
+ return ExprError();
+
+
+ if (!getDerived().AlwaysRebuild() &&
+ LHS.get() == E->getLHS() &&
+ RHS.get() == E->getRHS())
+ return E;
+
+ return getDerived().RebuildArraySubscriptExpr(LHS.get(),
+ /*FIXME:*/E->getLHS()->getLocStart(),
+ RHS.get(),
+ E->getRBracketLoc());
+}
+
+template <typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformOMPArraySectionExpr(OMPArraySectionExpr *E) {
+ ExprResult Base = getDerived().TransformExpr(E->getBase());
+ if (Base.isInvalid())
+ return ExprError();
+
+ ExprResult LowerBound;
+ if (E->getLowerBound()) {
+ LowerBound = getDerived().TransformExpr(E->getLowerBound());
+ if (LowerBound.isInvalid())
+ return ExprError();
+ }
+
+ ExprResult Length;
+ if (E->getLength()) {
+ Length = getDerived().TransformExpr(E->getLength());
+ if (Length.isInvalid())
+ return ExprError();
+ }
+
+ if (!getDerived().AlwaysRebuild() && Base.get() == E->getBase() &&
+ LowerBound.get() == E->getLowerBound() && Length.get() == E->getLength())
+ return E;
+
+ return getDerived().RebuildOMPArraySectionExpr(
+ Base.get(), E->getBase()->getLocEnd(), LowerBound.get(), E->getColonLoc(),
+ Length.get(), E->getRBracketLoc());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCallExpr(CallExpr *E) {
+ // Transform the callee.
+ ExprResult Callee = getDerived().TransformExpr(E->getCallee());
+ if (Callee.isInvalid())
+ return ExprError();
+
+ // Transform arguments.
+ bool ArgChanged = false;
+ SmallVector<Expr*, 8> Args;
+ if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
+ &ArgChanged))
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ Callee.get() == E->getCallee() &&
+ !ArgChanged)
+ return SemaRef.MaybeBindToTemporary(E);
+
+ // FIXME: Wrong source location information for the '('.
+ SourceLocation FakeLParenLoc
+ = ((Expr *)Callee.get())->getSourceRange().getBegin();
+ return getDerived().RebuildCallExpr(Callee.get(), FakeLParenLoc,
+ Args,
+ E->getRParenLoc());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformMemberExpr(MemberExpr *E) {
+ ExprResult Base = getDerived().TransformExpr(E->getBase());
+ if (Base.isInvalid())
+ return ExprError();
+
+ NestedNameSpecifierLoc QualifierLoc;
+ if (E->hasQualifier()) {
+ QualifierLoc
+ = getDerived().TransformNestedNameSpecifierLoc(E->getQualifierLoc());
+
+ if (!QualifierLoc)
+ return ExprError();
+ }
+ SourceLocation TemplateKWLoc = E->getTemplateKeywordLoc();
+
+ ValueDecl *Member
+ = cast_or_null<ValueDecl>(getDerived().TransformDecl(E->getMemberLoc(),
+ E->getMemberDecl()));
+ if (!Member)
+ return ExprError();
+
+ NamedDecl *FoundDecl = E->getFoundDecl();
+ if (FoundDecl == E->getMemberDecl()) {
+ FoundDecl = Member;
+ } else {
+ FoundDecl = cast_or_null<NamedDecl>(
+ getDerived().TransformDecl(E->getMemberLoc(), FoundDecl));
+ if (!FoundDecl)
+ return ExprError();
+ }
+
+ if (!getDerived().AlwaysRebuild() &&
+ Base.get() == E->getBase() &&
+ QualifierLoc == E->getQualifierLoc() &&
+ Member == E->getMemberDecl() &&
+ FoundDecl == E->getFoundDecl() &&
+ !E->hasExplicitTemplateArgs()) {
+
+ // Mark it referenced in the new context regardless.
+ // FIXME: this is a bit instantiation-specific.
+ SemaRef.MarkMemberReferenced(E);
+
+ return E;
+ }
+
+ TemplateArgumentListInfo TransArgs;
+ if (E->hasExplicitTemplateArgs()) {
+ TransArgs.setLAngleLoc(E->getLAngleLoc());
+ TransArgs.setRAngleLoc(E->getRAngleLoc());
+ if (getDerived().TransformTemplateArguments(E->getTemplateArgs(),
+ E->getNumTemplateArgs(),
+ TransArgs))
+ return ExprError();
+ }
+
+ // FIXME: Bogus source location for the operator
+ SourceLocation FakeOperatorLoc =
+ SemaRef.getLocForEndOfToken(E->getBase()->getSourceRange().getEnd());
+
+ // FIXME: to do this check properly, we will need to preserve the
+ // first-qualifier-in-scope here, just in case we had a dependent
+ // base (and therefore couldn't do the check) and a
+ // nested-name-qualifier (and therefore could do the lookup).
+ NamedDecl *FirstQualifierInScope = nullptr;
+
+ return getDerived().RebuildMemberExpr(Base.get(), FakeOperatorLoc,
+ E->isArrow(),
+ QualifierLoc,
+ TemplateKWLoc,
+ E->getMemberNameInfo(),
+ Member,
+ FoundDecl,
+ (E->hasExplicitTemplateArgs()
+ ? &TransArgs : nullptr),
+ FirstQualifierInScope);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformBinaryOperator(BinaryOperator *E) {
+ ExprResult LHS = getDerived().TransformExpr(E->getLHS());
+ if (LHS.isInvalid())
+ return ExprError();
+
+ ExprResult RHS = getDerived().TransformExpr(E->getRHS());
+ if (RHS.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ LHS.get() == E->getLHS() &&
+ RHS.get() == E->getRHS())
+ return E;
+
+ Sema::FPContractStateRAII FPContractState(getSema());
+ getSema().FPFeatures.fp_contract = E->isFPContractable();
+
+ return getDerived().RebuildBinaryOperator(E->getOperatorLoc(), E->getOpcode(),
+ LHS.get(), RHS.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCompoundAssignOperator(
+ CompoundAssignOperator *E) {
+ return getDerived().TransformBinaryOperator(E);
+}
+
+template<typename Derived>
+ExprResult TreeTransform<Derived>::
+TransformBinaryConditionalOperator(BinaryConditionalOperator *e) {
+ // Just rebuild the common and RHS expressions and see whether we
+ // get any changes.
+
+ ExprResult commonExpr = getDerived().TransformExpr(e->getCommon());
+ if (commonExpr.isInvalid())
+ return ExprError();
+
+ ExprResult rhs = getDerived().TransformExpr(e->getFalseExpr());
+ if (rhs.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ commonExpr.get() == e->getCommon() &&
+ rhs.get() == e->getFalseExpr())
+ return e;
+
+ return getDerived().RebuildConditionalOperator(commonExpr.get(),
+ e->getQuestionLoc(),
+ nullptr,
+ e->getColonLoc(),
+ rhs.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformConditionalOperator(ConditionalOperator *E) {
+ ExprResult Cond = getDerived().TransformExpr(E->getCond());
+ if (Cond.isInvalid())
+ return ExprError();
+
+ ExprResult LHS = getDerived().TransformExpr(E->getLHS());
+ if (LHS.isInvalid())
+ return ExprError();
+
+ ExprResult RHS = getDerived().TransformExpr(E->getRHS());
+ if (RHS.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ Cond.get() == E->getCond() &&
+ LHS.get() == E->getLHS() &&
+ RHS.get() == E->getRHS())
+ return E;
+
+ return getDerived().RebuildConditionalOperator(Cond.get(),
+ E->getQuestionLoc(),
+ LHS.get(),
+ E->getColonLoc(),
+ RHS.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformImplicitCastExpr(ImplicitCastExpr *E) {
+ // Implicit casts are eliminated during transformation, since they
+ // will be recomputed by semantic analysis after transformation.
+ return getDerived().TransformExpr(E->getSubExprAsWritten());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCStyleCastExpr(CStyleCastExpr *E) {
+ TypeSourceInfo *Type = getDerived().TransformType(E->getTypeInfoAsWritten());
+ if (!Type)
+ return ExprError();
+
+ ExprResult SubExpr
+ = getDerived().TransformExpr(E->getSubExprAsWritten());
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ Type == E->getTypeInfoAsWritten() &&
+ SubExpr.get() == E->getSubExpr())
+ return E;
+
+ return getDerived().RebuildCStyleCastExpr(E->getLParenLoc(),
+ Type,
+ E->getRParenLoc(),
+ SubExpr.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ TypeSourceInfo *OldT = E->getTypeSourceInfo();
+ TypeSourceInfo *NewT = getDerived().TransformType(OldT);
+ if (!NewT)
+ return ExprError();
+
+ ExprResult Init = getDerived().TransformExpr(E->getInitializer());
+ if (Init.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ OldT == NewT &&
+ Init.get() == E->getInitializer())
+ return SemaRef.MaybeBindToTemporary(E);
+
+ // Note: the expression type doesn't necessarily match the
+ // type-as-written, but that's okay, because it should always be
+ // derivable from the initializer.
+
+ return getDerived().RebuildCompoundLiteralExpr(E->getLParenLoc(), NewT,
+ /*FIXME:*/E->getInitializer()->getLocEnd(),
+ Init.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformExtVectorElementExpr(ExtVectorElementExpr *E) {
+ ExprResult Base = getDerived().TransformExpr(E->getBase());
+ if (Base.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ Base.get() == E->getBase())
+ return E;
+
+ // FIXME: Bad source location
+ SourceLocation FakeOperatorLoc =
+ SemaRef.getLocForEndOfToken(E->getBase()->getLocEnd());
+ return getDerived().RebuildExtVectorElementExpr(Base.get(), FakeOperatorLoc,
+ E->getAccessorLoc(),
+ E->getAccessor());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformInitListExpr(InitListExpr *E) {
+ if (InitListExpr *Syntactic = E->getSyntacticForm())
+ E = Syntactic;
+
+ bool InitChanged = false;
+
+ SmallVector<Expr*, 4> Inits;
+ if (getDerived().TransformExprs(E->getInits(), E->getNumInits(), false,
+ Inits, &InitChanged))
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() && !InitChanged) {
+ // FIXME: Attempt to reuse the existing syntactic form of the InitListExpr
+ // in some cases. We can't reuse it in general, because the syntactic and
+ // semantic forms are linked, and we can't know that semantic form will
+ // match even if the syntactic form does.
+ }
+
+ return getDerived().RebuildInitList(E->getLBraceLoc(), Inits,
+ E->getRBraceLoc(), E->getType());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformDesignatedInitExpr(DesignatedInitExpr *E) {
+ Designation Desig;
+
+ // transform the initializer value
+ ExprResult Init = getDerived().TransformExpr(E->getInit());
+ if (Init.isInvalid())
+ return ExprError();
+
+ // transform the designators.
+ SmallVector<Expr*, 4> ArrayExprs;
+ bool ExprChanged = false;
+ for (DesignatedInitExpr::designators_iterator D = E->designators_begin(),
+ DEnd = E->designators_end();
+ D != DEnd; ++D) {
+ if (D->isFieldDesignator()) {
+ Desig.AddDesignator(Designator::getField(D->getFieldName(),
+ D->getDotLoc(),
+ D->getFieldLoc()));
+ continue;
+ }
+
+ if (D->isArrayDesignator()) {
+ ExprResult Index = getDerived().TransformExpr(E->getArrayIndex(*D));
+ if (Index.isInvalid())
+ return ExprError();
+
+ Desig.AddDesignator(Designator::getArray(Index.get(),
+ D->getLBracketLoc()));
+
+ ExprChanged = ExprChanged || Init.get() != E->getArrayIndex(*D);
+ ArrayExprs.push_back(Index.get());
+ continue;
+ }
+
+ assert(D->isArrayRangeDesignator() && "New kind of designator?");
+ ExprResult Start
+ = getDerived().TransformExpr(E->getArrayRangeStart(*D));
+ if (Start.isInvalid())
+ return ExprError();
+
+ ExprResult End = getDerived().TransformExpr(E->getArrayRangeEnd(*D));
+ if (End.isInvalid())
+ return ExprError();
+
+ Desig.AddDesignator(Designator::getArrayRange(Start.get(),
+ End.get(),
+ D->getLBracketLoc(),
+ D->getEllipsisLoc()));
+
+ ExprChanged = ExprChanged || Start.get() != E->getArrayRangeStart(*D) ||
+ End.get() != E->getArrayRangeEnd(*D);
+
+ ArrayExprs.push_back(Start.get());
+ ArrayExprs.push_back(End.get());
+ }
+
+ if (!getDerived().AlwaysRebuild() &&
+ Init.get() == E->getInit() &&
+ !ExprChanged)
+ return E;
+
+ return getDerived().RebuildDesignatedInitExpr(Desig, ArrayExprs,
+ E->getEqualOrColonLoc(),
+ E->usesGNUSyntax(), Init.get());
+}
+
+// Seems that if TransformInitListExpr() only works on the syntactic form of an
+// InitListExpr, then a DesignatedInitUpdateExpr is not encountered.
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformDesignatedInitUpdateExpr(
+ DesignatedInitUpdateExpr *E) {
+ llvm_unreachable("Unexpected DesignatedInitUpdateExpr in syntactic form of "
+ "initializer");
+ return ExprError();
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformNoInitExpr(
+ NoInitExpr *E) {
+ llvm_unreachable("Unexpected NoInitExpr in syntactic form of initializer");
+ return ExprError();
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformImplicitValueInitExpr(
+ ImplicitValueInitExpr *E) {
+ TemporaryBase Rebase(*this, E->getLocStart(), DeclarationName());
+
+ // FIXME: Will we ever have proper type location here? Will we actually
+ // need to transform the type?
+ QualType T = getDerived().TransformType(E->getType());
+ if (T.isNull())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ T == E->getType())
+ return E;
+
+ return getDerived().RebuildImplicitValueInitExpr(T);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformVAArgExpr(VAArgExpr *E) {
+ TypeSourceInfo *TInfo = getDerived().TransformType(E->getWrittenTypeInfo());
+ if (!TInfo)
+ return ExprError();
+
+ ExprResult SubExpr = getDerived().TransformExpr(E->getSubExpr());
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ TInfo == E->getWrittenTypeInfo() &&
+ SubExpr.get() == E->getSubExpr())
+ return E;
+
+ return getDerived().RebuildVAArgExpr(E->getBuiltinLoc(), SubExpr.get(),
+ TInfo, E->getRParenLoc());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformParenListExpr(ParenListExpr *E) {
+ bool ArgumentChanged = false;
+ SmallVector<Expr*, 4> Inits;
+ if (TransformExprs(E->getExprs(), E->getNumExprs(), true, Inits,
+ &ArgumentChanged))
+ return ExprError();
+
+ return getDerived().RebuildParenListExpr(E->getLParenLoc(),
+ Inits,
+ E->getRParenLoc());
+}
+
+/// \brief Transform an address-of-label expression.
+///
+/// By default, the transformation of an address-of-label expression always
+/// rebuilds the expression, so that the label identifier can be resolved to
+/// the corresponding label statement by semantic analysis.
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformAddrLabelExpr(AddrLabelExpr *E) {
+ Decl *LD = getDerived().TransformDecl(E->getLabel()->getLocation(),
+ E->getLabel());
+ if (!LD)
+ return ExprError();
+
+ return getDerived().RebuildAddrLabelExpr(E->getAmpAmpLoc(), E->getLabelLoc(),
+ cast<LabelDecl>(LD));
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformStmtExpr(StmtExpr *E) {
+ SemaRef.ActOnStartStmtExpr();
+ StmtResult SubStmt
+ = getDerived().TransformCompoundStmt(E->getSubStmt(), true);
+ if (SubStmt.isInvalid()) {
+ SemaRef.ActOnStmtExprError();
+ return ExprError();
+ }
+
+ if (!getDerived().AlwaysRebuild() &&
+ SubStmt.get() == E->getSubStmt()) {
+ // Calling this an 'error' is unintuitive, but it does the right thing.
+ SemaRef.ActOnStmtExprError();
+ return SemaRef.MaybeBindToTemporary(E);
+ }
+
+ return getDerived().RebuildStmtExpr(E->getLParenLoc(),
+ SubStmt.get(),
+ E->getRParenLoc());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformChooseExpr(ChooseExpr *E) {
+ ExprResult Cond = getDerived().TransformExpr(E->getCond());
+ if (Cond.isInvalid())
+ return ExprError();
+
+ ExprResult LHS = getDerived().TransformExpr(E->getLHS());
+ if (LHS.isInvalid())
+ return ExprError();
+
+ ExprResult RHS = getDerived().TransformExpr(E->getRHS());
+ if (RHS.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ Cond.get() == E->getCond() &&
+ LHS.get() == E->getLHS() &&
+ RHS.get() == E->getRHS())
+ return E;
+
+ return getDerived().RebuildChooseExpr(E->getBuiltinLoc(),
+ Cond.get(), LHS.get(), RHS.get(),
+ E->getRParenLoc());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformGNUNullExpr(GNUNullExpr *E) {
+ return E;
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
+ switch (E->getOperator()) {
+ case OO_New:
+ case OO_Delete:
+ case OO_Array_New:
+ case OO_Array_Delete:
+ llvm_unreachable("new and delete operators cannot use CXXOperatorCallExpr");
+
+ case OO_Call: {
+ // This is a call to an object's operator().
+ assert(E->getNumArgs() >= 1 && "Object call is missing arguments");
+
+ // Transform the object itself.
+ ExprResult Object = getDerived().TransformExpr(E->getArg(0));
+ if (Object.isInvalid())
+ return ExprError();
+
+ // FIXME: Poor location information
+ SourceLocation FakeLParenLoc = SemaRef.getLocForEndOfToken(
+ static_cast<Expr *>(Object.get())->getLocEnd());
+
+ // Transform the call arguments.
+ SmallVector<Expr*, 8> Args;
+ if (getDerived().TransformExprs(E->getArgs() + 1, E->getNumArgs() - 1, true,
+ Args))
+ return ExprError();
+
+ return getDerived().RebuildCallExpr(Object.get(), FakeLParenLoc,
+ Args,
+ E->getLocEnd());
+ }
+
+#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
+ case OO_##Name:
+#define OVERLOADED_OPERATOR_MULTI(Name,Spelling,Unary,Binary,MemberOnly)
+#include "clang/Basic/OperatorKinds.def"
+ case OO_Subscript:
+ // Handled below.
+ break;
+
+ case OO_Conditional:
+ llvm_unreachable("conditional operator is not actually overloadable");
+
+ case OO_None:
+ case NUM_OVERLOADED_OPERATORS:
+ llvm_unreachable("not an overloaded operator?");
+ }
+
+ ExprResult Callee = getDerived().TransformExpr(E->getCallee());
+ if (Callee.isInvalid())
+ return ExprError();
+
+ ExprResult First;
+ if (E->getOperator() == OO_Amp)
+ First = getDerived().TransformAddressOfOperand(E->getArg(0));
+ else
+ First = getDerived().TransformExpr(E->getArg(0));
+ if (First.isInvalid())
+ return ExprError();
+
+ ExprResult Second;
+ if (E->getNumArgs() == 2) {
+ Second = getDerived().TransformExpr(E->getArg(1));
+ if (Second.isInvalid())
+ return ExprError();
+ }
+
+ if (!getDerived().AlwaysRebuild() &&
+ Callee.get() == E->getCallee() &&
+ First.get() == E->getArg(0) &&
+ (E->getNumArgs() != 2 || Second.get() == E->getArg(1)))
+ return SemaRef.MaybeBindToTemporary(E);
+
+ Sema::FPContractStateRAII FPContractState(getSema());
+ getSema().FPFeatures.fp_contract = E->isFPContractable();
+
+ return getDerived().RebuildCXXOperatorCallExpr(E->getOperator(),
+ E->getOperatorLoc(),
+ Callee.get(),
+ First.get(),
+ Second.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXMemberCallExpr(CXXMemberCallExpr *E) {
+ return getDerived().TransformCallExpr(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCUDAKernelCallExpr(CUDAKernelCallExpr *E) {
+ // Transform the callee.
+ ExprResult Callee = getDerived().TransformExpr(E->getCallee());
+ if (Callee.isInvalid())
+ return ExprError();
+
+ // Transform exec config.
+ ExprResult EC = getDerived().TransformCallExpr(E->getConfig());
+ if (EC.isInvalid())
+ return ExprError();
+
+ // Transform arguments.
+ bool ArgChanged = false;
+ SmallVector<Expr*, 8> Args;
+ if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
+ &ArgChanged))
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ Callee.get() == E->getCallee() &&
+ !ArgChanged)
+ return SemaRef.MaybeBindToTemporary(E);
+
+ // FIXME: Wrong source location information for the '('.
+ SourceLocation FakeLParenLoc
+ = ((Expr *)Callee.get())->getSourceRange().getBegin();
+ return getDerived().RebuildCallExpr(Callee.get(), FakeLParenLoc,
+ Args,
+ E->getRParenLoc(), EC.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXNamedCastExpr(CXXNamedCastExpr *E) {
+ TypeSourceInfo *Type = getDerived().TransformType(E->getTypeInfoAsWritten());
+ if (!Type)
+ return ExprError();
+
+ ExprResult SubExpr
+ = getDerived().TransformExpr(E->getSubExprAsWritten());
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ Type == E->getTypeInfoAsWritten() &&
+ SubExpr.get() == E->getSubExpr())
+ return E;
+ return getDerived().RebuildCXXNamedCastExpr(
+ E->getOperatorLoc(), E->getStmtClass(), E->getAngleBrackets().getBegin(),
+ Type, E->getAngleBrackets().getEnd(),
+ // FIXME. this should be '(' location
+ E->getAngleBrackets().getEnd(), SubExpr.get(), E->getRParenLoc());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXStaticCastExpr(CXXStaticCastExpr *E) {
+ return getDerived().TransformCXXNamedCastExpr(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXDynamicCastExpr(CXXDynamicCastExpr *E) {
+ return getDerived().TransformCXXNamedCastExpr(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXReinterpretCastExpr(
+ CXXReinterpretCastExpr *E) {
+ return getDerived().TransformCXXNamedCastExpr(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXConstCastExpr(CXXConstCastExpr *E) {
+ return getDerived().TransformCXXNamedCastExpr(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXFunctionalCastExpr(
+ CXXFunctionalCastExpr *E) {
+ TypeSourceInfo *Type = getDerived().TransformType(E->getTypeInfoAsWritten());
+ if (!Type)
+ return ExprError();
+
+ ExprResult SubExpr
+ = getDerived().TransformExpr(E->getSubExprAsWritten());
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ Type == E->getTypeInfoAsWritten() &&
+ SubExpr.get() == E->getSubExpr())
+ return E;
+
+ return getDerived().RebuildCXXFunctionalCastExpr(Type,
+ E->getLParenLoc(),
+ SubExpr.get(),
+ E->getRParenLoc());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXTypeidExpr(CXXTypeidExpr *E) {
+ if (E->isTypeOperand()) {
+ TypeSourceInfo *TInfo
+ = getDerived().TransformType(E->getTypeOperandSourceInfo());
+ if (!TInfo)
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ TInfo == E->getTypeOperandSourceInfo())
+ return E;
+
+ return getDerived().RebuildCXXTypeidExpr(E->getType(),
+ E->getLocStart(),
+ TInfo,
+ E->getLocEnd());
+ }
+
+ // We don't know whether the subexpression is potentially evaluated until
+ // after we perform semantic analysis. We speculatively assume it is
+ // unevaluated; it will get fixed later if the subexpression is in fact
+ // potentially evaluated.
+ EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated,
+ Sema::ReuseLambdaContextDecl);
+
+ ExprResult SubExpr = getDerived().TransformExpr(E->getExprOperand());
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ SubExpr.get() == E->getExprOperand())
+ return E;
+
+ return getDerived().RebuildCXXTypeidExpr(E->getType(),
+ E->getLocStart(),
+ SubExpr.get(),
+ E->getLocEnd());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXUuidofExpr(CXXUuidofExpr *E) {
+ if (E->isTypeOperand()) {
+ TypeSourceInfo *TInfo
+ = getDerived().TransformType(E->getTypeOperandSourceInfo());
+ if (!TInfo)
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ TInfo == E->getTypeOperandSourceInfo())
+ return E;
+
+ return getDerived().RebuildCXXUuidofExpr(E->getType(),
+ E->getLocStart(),
+ TInfo,
+ E->getLocEnd());
+ }
+
+ EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
+
+ ExprResult SubExpr = getDerived().TransformExpr(E->getExprOperand());
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ SubExpr.get() == E->getExprOperand())
+ return E;
+
+ return getDerived().RebuildCXXUuidofExpr(E->getType(),
+ E->getLocStart(),
+ SubExpr.get(),
+ E->getLocEnd());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) {
+ return E;
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXNullPtrLiteralExpr(
+ CXXNullPtrLiteralExpr *E) {
+ return E;
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXThisExpr(CXXThisExpr *E) {
+ QualType T = getSema().getCurrentThisType();
+
+ if (!getDerived().AlwaysRebuild() && T == E->getType()) {
+ // Make sure that we capture 'this'.
+ getSema().CheckCXXThisCapture(E->getLocStart());
+ return E;
+ }
+
+ return getDerived().RebuildCXXThisExpr(E->getLocStart(), T, E->isImplicit());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXThrowExpr(CXXThrowExpr *E) {
+ ExprResult SubExpr = getDerived().TransformExpr(E->getSubExpr());
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ SubExpr.get() == E->getSubExpr())
+ return E;
+
+ return getDerived().RebuildCXXThrowExpr(E->getThrowLoc(), SubExpr.get(),
+ E->isThrownVariableInScope());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
+ ParmVarDecl *Param
+ = cast_or_null<ParmVarDecl>(getDerived().TransformDecl(E->getLocStart(),
+ E->getParam()));
+ if (!Param)
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ Param == E->getParam())
+ return E;
+
+ return getDerived().RebuildCXXDefaultArgExpr(E->getUsedLocation(), Param);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXDefaultInitExpr(CXXDefaultInitExpr *E) {
+ FieldDecl *Field
+ = cast_or_null<FieldDecl>(getDerived().TransformDecl(E->getLocStart(),
+ E->getField()));
+ if (!Field)
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() && Field == E->getField())
+ return E;
+
+ return getDerived().RebuildCXXDefaultInitExpr(E->getExprLoc(), Field);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXScalarValueInitExpr(
+ CXXScalarValueInitExpr *E) {
+ TypeSourceInfo *T = getDerived().TransformType(E->getTypeSourceInfo());
+ if (!T)
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ T == E->getTypeSourceInfo())
+ return E;
+
+ return getDerived().RebuildCXXScalarValueInitExpr(T,
+ /*FIXME:*/T->getTypeLoc().getEndLoc(),
+ E->getRParenLoc());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
+ // Transform the type that we're allocating
+ TypeSourceInfo *AllocTypeInfo
+ = getDerived().TransformType(E->getAllocatedTypeSourceInfo());
+ if (!AllocTypeInfo)
+ return ExprError();
+
+ // Transform the size of the array we're allocating (if any).
+ ExprResult ArraySize = getDerived().TransformExpr(E->getArraySize());
+ if (ArraySize.isInvalid())
+ return ExprError();
+
+ // Transform the placement arguments (if any).
+ bool ArgumentChanged = false;
+ SmallVector<Expr*, 8> PlacementArgs;
+ if (getDerived().TransformExprs(E->getPlacementArgs(),
+ E->getNumPlacementArgs(), true,
+ PlacementArgs, &ArgumentChanged))
+ return ExprError();
+
+ // Transform the initializer (if any).
+ Expr *OldInit = E->getInitializer();
+ ExprResult NewInit;
+ if (OldInit)
+ NewInit = getDerived().TransformInitializer(OldInit, true);
+ if (NewInit.isInvalid())
+ return ExprError();
+
+ // Transform new operator and delete operator.
+ FunctionDecl *OperatorNew = nullptr;
+ if (E->getOperatorNew()) {
+ OperatorNew = cast_or_null<FunctionDecl>(
+ getDerived().TransformDecl(E->getLocStart(),
+ E->getOperatorNew()));
+ if (!OperatorNew)
+ return ExprError();
+ }
+
+ FunctionDecl *OperatorDelete = nullptr;
+ if (E->getOperatorDelete()) {
+ OperatorDelete = cast_or_null<FunctionDecl>(
+ getDerived().TransformDecl(E->getLocStart(),
+ E->getOperatorDelete()));
+ if (!OperatorDelete)
+ return ExprError();
+ }
+
+ if (!getDerived().AlwaysRebuild() &&
+ AllocTypeInfo == E->getAllocatedTypeSourceInfo() &&
+ ArraySize.get() == E->getArraySize() &&
+ NewInit.get() == OldInit &&
+ OperatorNew == E->getOperatorNew() &&
+ OperatorDelete == E->getOperatorDelete() &&
+ !ArgumentChanged) {
+ // Mark any declarations we need as referenced.
+ // FIXME: instantiation-specific.
+ if (OperatorNew)
+ SemaRef.MarkFunctionReferenced(E->getLocStart(), OperatorNew);
+ if (OperatorDelete)
+ SemaRef.MarkFunctionReferenced(E->getLocStart(), OperatorDelete);
+
+ if (E->isArray() && !E->getAllocatedType()->isDependentType()) {
+ QualType ElementType
+ = SemaRef.Context.getBaseElementType(E->getAllocatedType());
+ if (const RecordType *RecordT = ElementType->getAs<RecordType>()) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordT->getDecl());
+ if (CXXDestructorDecl *Destructor = SemaRef.LookupDestructor(Record)) {
+ SemaRef.MarkFunctionReferenced(E->getLocStart(), Destructor);
+ }
+ }
+ }
+
+ return E;
+ }
+
+ QualType AllocType = AllocTypeInfo->getType();
+ if (!ArraySize.get()) {
+ // If no array size was specified, but the new expression was
+ // instantiated with an array type (e.g., "new T" where T is
+ // instantiated with "int[4]"), extract the outer bound from the
+ // array type as our array size. We do this with constant and
+ // dependently-sized array types.
+ const ArrayType *ArrayT = SemaRef.Context.getAsArrayType(AllocType);
+ if (!ArrayT) {
+ // Do nothing
+ } else if (const ConstantArrayType *ConsArrayT
+ = dyn_cast<ConstantArrayType>(ArrayT)) {
+ ArraySize = IntegerLiteral::Create(SemaRef.Context, ConsArrayT->getSize(),
+ SemaRef.Context.getSizeType(),
+ /*FIXME:*/ E->getLocStart());
+ AllocType = ConsArrayT->getElementType();
+ } else if (const DependentSizedArrayType *DepArrayT
+ = dyn_cast<DependentSizedArrayType>(ArrayT)) {
+ if (DepArrayT->getSizeExpr()) {
+ ArraySize = DepArrayT->getSizeExpr();
+ AllocType = DepArrayT->getElementType();
+ }
+ }
+ }
+
+ return getDerived().RebuildCXXNewExpr(E->getLocStart(),
+ E->isGlobalNew(),
+ /*FIXME:*/E->getLocStart(),
+ PlacementArgs,
+ /*FIXME:*/E->getLocStart(),
+ E->getTypeIdParens(),
+ AllocType,
+ AllocTypeInfo,
+ ArraySize.get(),
+ E->getDirectInitRange(),
+ NewInit.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXDeleteExpr(CXXDeleteExpr *E) {
+ ExprResult Operand = getDerived().TransformExpr(E->getArgument());
+ if (Operand.isInvalid())
+ return ExprError();
+
+ // Transform the delete operator, if known.
+ FunctionDecl *OperatorDelete = nullptr;
+ if (E->getOperatorDelete()) {
+ OperatorDelete = cast_or_null<FunctionDecl>(
+ getDerived().TransformDecl(E->getLocStart(),
+ E->getOperatorDelete()));
+ if (!OperatorDelete)
+ return ExprError();
+ }
+
+ if (!getDerived().AlwaysRebuild() &&
+ Operand.get() == E->getArgument() &&
+ OperatorDelete == E->getOperatorDelete()) {
+ // Mark any declarations we need as referenced.
+ // FIXME: instantiation-specific.
+ if (OperatorDelete)
+ SemaRef.MarkFunctionReferenced(E->getLocStart(), OperatorDelete);
+
+ if (!E->getArgument()->isTypeDependent()) {
+ QualType Destroyed = SemaRef.Context.getBaseElementType(
+ E->getDestroyedType());
+ if (const RecordType *DestroyedRec = Destroyed->getAs<RecordType>()) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(DestroyedRec->getDecl());
+ SemaRef.MarkFunctionReferenced(E->getLocStart(),
+ SemaRef.LookupDestructor(Record));
+ }
+ }
+
+ return E;
+ }
+
+ return getDerived().RebuildCXXDeleteExpr(E->getLocStart(),
+ E->isGlobalDelete(),
+ E->isArrayForm(),
+ Operand.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXPseudoDestructorExpr(
+ CXXPseudoDestructorExpr *E) {
+ ExprResult Base = getDerived().TransformExpr(E->getBase());
+ if (Base.isInvalid())
+ return ExprError();
+
+ ParsedType ObjectTypePtr;
+ bool MayBePseudoDestructor = false;
+ Base = SemaRef.ActOnStartCXXMemberReference(nullptr, Base.get(),
+ E->getOperatorLoc(),
+ E->isArrow()? tok::arrow : tok::period,
+ ObjectTypePtr,
+ MayBePseudoDestructor);
+ if (Base.isInvalid())
+ return ExprError();
+
+ QualType ObjectType = ObjectTypePtr.get();
+ NestedNameSpecifierLoc QualifierLoc = E->getQualifierLoc();
+ if (QualifierLoc) {
+ QualifierLoc
+ = getDerived().TransformNestedNameSpecifierLoc(QualifierLoc, ObjectType);
+ if (!QualifierLoc)
+ return ExprError();
+ }
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+
+ PseudoDestructorTypeStorage Destroyed;
+ if (E->getDestroyedTypeInfo()) {
+ TypeSourceInfo *DestroyedTypeInfo
+ = getDerived().TransformTypeInObjectScope(E->getDestroyedTypeInfo(),
+ ObjectType, nullptr, SS);
+ if (!DestroyedTypeInfo)
+ return ExprError();
+ Destroyed = DestroyedTypeInfo;
+ } else if (!ObjectType.isNull() && ObjectType->isDependentType()) {
+ // We aren't likely to be able to resolve the identifier down to a type
+ // now anyway, so just retain the identifier.
+ Destroyed = PseudoDestructorTypeStorage(E->getDestroyedTypeIdentifier(),
+ E->getDestroyedTypeLoc());
+ } else {
+ // Look for a destructor known with the given name.
+ ParsedType T = SemaRef.getDestructorName(E->getTildeLoc(),
+ *E->getDestroyedTypeIdentifier(),
+ E->getDestroyedTypeLoc(),
+ /*Scope=*/nullptr,
+ SS, ObjectTypePtr,
+ false);
+ if (!T)
+ return ExprError();
+
+ Destroyed
+ = SemaRef.Context.getTrivialTypeSourceInfo(SemaRef.GetTypeFromParser(T),
+ E->getDestroyedTypeLoc());
+ }
+
+ TypeSourceInfo *ScopeTypeInfo = nullptr;
+ if (E->getScopeTypeInfo()) {
+ CXXScopeSpec EmptySS;
+ ScopeTypeInfo = getDerived().TransformTypeInObjectScope(
+ E->getScopeTypeInfo(), ObjectType, nullptr, EmptySS);
+ if (!ScopeTypeInfo)
+ return ExprError();
+ }
+
+ return getDerived().RebuildCXXPseudoDestructorExpr(Base.get(),
+ E->getOperatorLoc(),
+ E->isArrow(),
+ SS,
+ ScopeTypeInfo,
+ E->getColonColonLoc(),
+ E->getTildeLoc(),
+ Destroyed);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformUnresolvedLookupExpr(
+ UnresolvedLookupExpr *Old) {
+ LookupResult R(SemaRef, Old->getName(), Old->getNameLoc(),
+ Sema::LookupOrdinaryName);
+
+ // Transform all the decls.
+ for (UnresolvedLookupExpr::decls_iterator I = Old->decls_begin(),
+ E = Old->decls_end(); I != E; ++I) {
+ NamedDecl *InstD = static_cast<NamedDecl*>(
+ getDerived().TransformDecl(Old->getNameLoc(),
+ *I));
+ if (!InstD) {
+ // Silently ignore these if a UsingShadowDecl instantiated to nothing.
+ // This can happen because of dependent hiding.
+ if (isa<UsingShadowDecl>(*I))
+ continue;
+ else {
+ R.clear();
+ return ExprError();
+ }
+ }
+
+ // Expand using declarations.
+ if (isa<UsingDecl>(InstD)) {
+ UsingDecl *UD = cast<UsingDecl>(InstD);
+ for (auto *I : UD->shadows())
+ R.addDecl(I);
+ continue;
+ }
+
+ R.addDecl(InstD);
+ }
+
+ // Resolve a kind, but don't do any further analysis. If it's
+ // ambiguous, the callee needs to deal with it.
+ R.resolveKind();
+
+ // Rebuild the nested-name qualifier, if present.
+ CXXScopeSpec SS;
+ if (Old->getQualifierLoc()) {
+ NestedNameSpecifierLoc QualifierLoc
+ = getDerived().TransformNestedNameSpecifierLoc(Old->getQualifierLoc());
+ if (!QualifierLoc)
+ return ExprError();
+
+ SS.Adopt(QualifierLoc);
+ }
+
+ if (Old->getNamingClass()) {
+ CXXRecordDecl *NamingClass
+ = cast_or_null<CXXRecordDecl>(getDerived().TransformDecl(
+ Old->getNameLoc(),
+ Old->getNamingClass()));
+ if (!NamingClass) {
+ R.clear();
+ return ExprError();
+ }
+
+ R.setNamingClass(NamingClass);
+ }
+
+ SourceLocation TemplateKWLoc = Old->getTemplateKeywordLoc();
+
+ // If we have neither explicit template arguments, nor the template keyword,
+ // it's a normal declaration name or member reference.
+ if (!Old->hasExplicitTemplateArgs() && !TemplateKWLoc.isValid()) {
+ NamedDecl *D = R.getAsSingle<NamedDecl>();
+ // In a C++11 unevaluated context, an UnresolvedLookupExpr might refer to an
+ // instance member. In other contexts, BuildPossibleImplicitMemberExpr will
+ // give a good diagnostic.
+ if (D && D->isCXXInstanceMember()) {
+ return SemaRef.BuildPossibleImplicitMemberExpr(SS, TemplateKWLoc, R,
+ /*TemplateArgs=*/nullptr,
+ /*Scope=*/nullptr);
+ }
+
+ return getDerived().RebuildDeclarationNameExpr(SS, R, Old->requiresADL());
+ }
+
+ // If we have template arguments, rebuild them, then rebuild the
+ // templateid expression.
+ TemplateArgumentListInfo TransArgs(Old->getLAngleLoc(), Old->getRAngleLoc());
+ if (Old->hasExplicitTemplateArgs() &&
+ getDerived().TransformTemplateArguments(Old->getTemplateArgs(),
+ Old->getNumTemplateArgs(),
+ TransArgs)) {
+ R.clear();
+ return ExprError();
+ }
+
+ return getDerived().RebuildTemplateIdExpr(SS, TemplateKWLoc, R,
+ Old->requiresADL(), &TransArgs);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformTypeTraitExpr(TypeTraitExpr *E) {
+ bool ArgChanged = false;
+ SmallVector<TypeSourceInfo *, 4> Args;
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I) {
+ TypeSourceInfo *From = E->getArg(I);
+ TypeLoc FromTL = From->getTypeLoc();
+ if (!FromTL.getAs<PackExpansionTypeLoc>()) {
+ TypeLocBuilder TLB;
+ TLB.reserve(FromTL.getFullDataSize());
+ QualType To = getDerived().TransformType(TLB, FromTL);
+ if (To.isNull())
+ return ExprError();
+
+ if (To == From->getType())
+ Args.push_back(From);
+ else {
+ Args.push_back(TLB.getTypeSourceInfo(SemaRef.Context, To));
+ ArgChanged = true;
+ }
+ continue;
+ }
+
+ ArgChanged = true;
+
+ // We have a pack expansion. Instantiate it.
+ PackExpansionTypeLoc ExpansionTL = FromTL.castAs<PackExpansionTypeLoc>();
+ TypeLoc PatternTL = ExpansionTL.getPatternLoc();
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ SemaRef.collectUnexpandedParameterPacks(PatternTL, Unexpanded);
+
+ // Determine whether the set of unexpanded parameter packs can and should
+ // be expanded.
+ bool Expand = true;
+ bool RetainExpansion = false;
+ Optional<unsigned> OrigNumExpansions =
+ ExpansionTL.getTypePtr()->getNumExpansions();
+ Optional<unsigned> NumExpansions = OrigNumExpansions;
+ if (getDerived().TryExpandParameterPacks(ExpansionTL.getEllipsisLoc(),
+ PatternTL.getSourceRange(),
+ Unexpanded,
+ Expand, RetainExpansion,
+ NumExpansions))
+ return ExprError();
+
+ if (!Expand) {
+ // The transform has determined that we should perform a simple
+ // transformation on the pack expansion, producing another pack
+ // expansion.
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
+
+ TypeLocBuilder TLB;
+ TLB.reserve(From->getTypeLoc().getFullDataSize());
+
+ QualType To = getDerived().TransformType(TLB, PatternTL);
+ if (To.isNull())
+ return ExprError();
+
+ To = getDerived().RebuildPackExpansionType(To,
+ PatternTL.getSourceRange(),
+ ExpansionTL.getEllipsisLoc(),
+ NumExpansions);
+ if (To.isNull())
+ return ExprError();
+
+ PackExpansionTypeLoc ToExpansionTL
+ = TLB.push<PackExpansionTypeLoc>(To);
+ ToExpansionTL.setEllipsisLoc(ExpansionTL.getEllipsisLoc());
+ Args.push_back(TLB.getTypeSourceInfo(SemaRef.Context, To));
+ continue;
+ }
+
+ // Expand the pack expansion by substituting for each argument in the
+ // pack(s).
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, I);
+ TypeLocBuilder TLB;
+ TLB.reserve(PatternTL.getFullDataSize());
+ QualType To = getDerived().TransformType(TLB, PatternTL);
+ if (To.isNull())
+ return ExprError();
+
+ if (To->containsUnexpandedParameterPack()) {
+ To = getDerived().RebuildPackExpansionType(To,
+ PatternTL.getSourceRange(),
+ ExpansionTL.getEllipsisLoc(),
+ NumExpansions);
+ if (To.isNull())
+ return ExprError();
+
+ PackExpansionTypeLoc ToExpansionTL
+ = TLB.push<PackExpansionTypeLoc>(To);
+ ToExpansionTL.setEllipsisLoc(ExpansionTL.getEllipsisLoc());
+ }
+
+ Args.push_back(TLB.getTypeSourceInfo(SemaRef.Context, To));
+ }
+
+ if (!RetainExpansion)
+ continue;
+
+ // If we're supposed to retain a pack expansion, do so by temporarily
+ // forgetting the partially-substituted parameter pack.
+ ForgetPartiallySubstitutedPackRAII Forget(getDerived());
+
+ TypeLocBuilder TLB;
+ TLB.reserve(From->getTypeLoc().getFullDataSize());
+
+ QualType To = getDerived().TransformType(TLB, PatternTL);
+ if (To.isNull())
+ return ExprError();
+
+ To = getDerived().RebuildPackExpansionType(To,
+ PatternTL.getSourceRange(),
+ ExpansionTL.getEllipsisLoc(),
+ NumExpansions);
+ if (To.isNull())
+ return ExprError();
+
+ PackExpansionTypeLoc ToExpansionTL
+ = TLB.push<PackExpansionTypeLoc>(To);
+ ToExpansionTL.setEllipsisLoc(ExpansionTL.getEllipsisLoc());
+ Args.push_back(TLB.getTypeSourceInfo(SemaRef.Context, To));
+ }
+
+ if (!getDerived().AlwaysRebuild() && !ArgChanged)
+ return E;
+
+ return getDerived().RebuildTypeTrait(E->getTrait(),
+ E->getLocStart(),
+ Args,
+ E->getLocEnd());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
+ TypeSourceInfo *T = getDerived().TransformType(E->getQueriedTypeSourceInfo());
+ if (!T)
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ T == E->getQueriedTypeSourceInfo())
+ return E;
+
+ ExprResult SubExpr;
+ {
+ EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
+ SubExpr = getDerived().TransformExpr(E->getDimensionExpression());
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() && SubExpr.get() == E->getDimensionExpression())
+ return E;
+ }
+
+ return getDerived().RebuildArrayTypeTrait(E->getTrait(),
+ E->getLocStart(),
+ T,
+ SubExpr.get(),
+ E->getLocEnd());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformExpressionTraitExpr(ExpressionTraitExpr *E) {
+ ExprResult SubExpr;
+ {
+ EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
+ SubExpr = getDerived().TransformExpr(E->getQueriedExpression());
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() && SubExpr.get() == E->getQueriedExpression())
+ return E;
+ }
+
+ return getDerived().RebuildExpressionTrait(
+ E->getTrait(), E->getLocStart(), SubExpr.get(), E->getLocEnd());
+}
+
+template <typename Derived>
+ExprResult TreeTransform<Derived>::TransformParenDependentScopeDeclRefExpr(
+ ParenExpr *PE, DependentScopeDeclRefExpr *DRE, bool AddrTaken,
+ TypeSourceInfo **RecoveryTSI) {
+ ExprResult NewDRE = getDerived().TransformDependentScopeDeclRefExpr(
+ DRE, AddrTaken, RecoveryTSI);
+
+ // Propagate both errors and recovered types, which return ExprEmpty.
+ if (!NewDRE.isUsable())
+ return NewDRE;
+
+ // We got an expr, wrap it up in parens.
+ if (!getDerived().AlwaysRebuild() && NewDRE.get() == DRE)
+ return PE;
+ return getDerived().RebuildParenExpr(NewDRE.get(), PE->getLParen(),
+ PE->getRParen());
+}
+
+template <typename Derived>
+ExprResult TreeTransform<Derived>::TransformDependentScopeDeclRefExpr(
+ DependentScopeDeclRefExpr *E) {
+ return TransformDependentScopeDeclRefExpr(E, /*IsAddressOfOperand=*/false,
+ nullptr);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformDependentScopeDeclRefExpr(
+ DependentScopeDeclRefExpr *E,
+ bool IsAddressOfOperand,
+ TypeSourceInfo **RecoveryTSI) {
+ assert(E->getQualifierLoc());
+ NestedNameSpecifierLoc QualifierLoc
+ = getDerived().TransformNestedNameSpecifierLoc(E->getQualifierLoc());
+ if (!QualifierLoc)
+ return ExprError();
+ SourceLocation TemplateKWLoc = E->getTemplateKeywordLoc();
+
+ // TODO: If this is a conversion-function-id, verify that the
+ // destination type name (if present) resolves the same way after
+ // instantiation as it did in the local scope.
+
+ DeclarationNameInfo NameInfo
+ = getDerived().TransformDeclarationNameInfo(E->getNameInfo());
+ if (!NameInfo.getName())
+ return ExprError();
+
+ if (!E->hasExplicitTemplateArgs()) {
+ if (!getDerived().AlwaysRebuild() &&
+ QualifierLoc == E->getQualifierLoc() &&
+ // Note: it is sufficient to compare the Name component of NameInfo:
+ // if name has not changed, DNLoc has not changed either.
+ NameInfo.getName() == E->getDeclName())
+ return E;
+
+ return getDerived().RebuildDependentScopeDeclRefExpr(
+ QualifierLoc, TemplateKWLoc, NameInfo, /*TemplateArgs=*/nullptr,
+ IsAddressOfOperand, RecoveryTSI);
+ }
+
+ TemplateArgumentListInfo TransArgs(E->getLAngleLoc(), E->getRAngleLoc());
+ if (getDerived().TransformTemplateArguments(E->getTemplateArgs(),
+ E->getNumTemplateArgs(),
+ TransArgs))
+ return ExprError();
+
+ return getDerived().RebuildDependentScopeDeclRefExpr(
+ QualifierLoc, TemplateKWLoc, NameInfo, &TransArgs, IsAddressOfOperand,
+ RecoveryTSI);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXConstructExpr(CXXConstructExpr *E) {
+ // CXXConstructExprs other than for list-initialization and
+ // CXXTemporaryObjectExpr are always implicit, so when we have
+ // a 1-argument construction we just transform that argument.
+ if ((E->getNumArgs() == 1 ||
+ (E->getNumArgs() > 1 && getDerived().DropCallArgument(E->getArg(1)))) &&
+ (!getDerived().DropCallArgument(E->getArg(0))) &&
+ !E->isListInitialization())
+ return getDerived().TransformExpr(E->getArg(0));
+
+ TemporaryBase Rebase(*this, /*FIXME*/E->getLocStart(), DeclarationName());
+
+ QualType T = getDerived().TransformType(E->getType());
+ if (T.isNull())
+ return ExprError();
+
+ CXXConstructorDecl *Constructor
+ = cast_or_null<CXXConstructorDecl>(
+ getDerived().TransformDecl(E->getLocStart(),
+ E->getConstructor()));
+ if (!Constructor)
+ return ExprError();
+
+ bool ArgumentChanged = false;
+ SmallVector<Expr*, 8> Args;
+ if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
+ &ArgumentChanged))
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ T == E->getType() &&
+ Constructor == E->getConstructor() &&
+ !ArgumentChanged) {
+ // Mark the constructor as referenced.
+ // FIXME: Instantiation-specific
+ SemaRef.MarkFunctionReferenced(E->getLocStart(), Constructor);
+ return E;
+ }
+
+ return getDerived().RebuildCXXConstructExpr(T, /*FIXME:*/E->getLocStart(),
+ Constructor, E->isElidable(),
+ Args,
+ E->hadMultipleCandidates(),
+ E->isListInitialization(),
+ E->isStdInitListInitialization(),
+ E->requiresZeroInitialization(),
+ E->getConstructionKind(),
+ E->getParenOrBraceRange());
+}
+
+/// \brief Transform a C++ temporary-binding expression.
+///
+/// Since CXXBindTemporaryExpr nodes are implicitly generated, we just
+/// transform the subexpression and return that.
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
+ return getDerived().TransformExpr(E->getSubExpr());
+}
+
+/// \brief Transform a C++ expression that contains cleanups that should
+/// be run after the expression is evaluated.
+///
+/// Since ExprWithCleanups nodes are implicitly generated, we
+/// just transform the subexpression and return that.
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformExprWithCleanups(ExprWithCleanups *E) {
+ return getDerived().TransformExpr(E->getSubExpr());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXTemporaryObjectExpr(
+ CXXTemporaryObjectExpr *E) {
+ TypeSourceInfo *T = getDerived().TransformType(E->getTypeSourceInfo());
+ if (!T)
+ return ExprError();
+
+ CXXConstructorDecl *Constructor
+ = cast_or_null<CXXConstructorDecl>(
+ getDerived().TransformDecl(E->getLocStart(),
+ E->getConstructor()));
+ if (!Constructor)
+ return ExprError();
+
+ bool ArgumentChanged = false;
+ SmallVector<Expr*, 8> Args;
+ Args.reserve(E->getNumArgs());
+ if (TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
+ &ArgumentChanged))
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ T == E->getTypeSourceInfo() &&
+ Constructor == E->getConstructor() &&
+ !ArgumentChanged) {
+ // FIXME: Instantiation-specific
+ SemaRef.MarkFunctionReferenced(E->getLocStart(), Constructor);
+ return SemaRef.MaybeBindToTemporary(E);
+ }
+
+ // FIXME: Pass in E->isListInitialization().
+ return getDerived().RebuildCXXTemporaryObjectExpr(T,
+ /*FIXME:*/T->getTypeLoc().getEndLoc(),
+ Args,
+ E->getLocEnd());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
+ // Transform any init-capture expressions before entering the scope of the
+ // lambda body, because they are not semantically within that scope.
+ typedef std::pair<ExprResult, QualType> InitCaptureInfoTy;
+ SmallVector<InitCaptureInfoTy, 8> InitCaptureExprsAndTypes;
+ InitCaptureExprsAndTypes.resize(E->explicit_capture_end() -
+ E->explicit_capture_begin());
+ for (LambdaExpr::capture_iterator C = E->capture_begin(),
+ CEnd = E->capture_end();
+ C != CEnd; ++C) {
+ if (!E->isInitCapture(C))
+ continue;
+ EnterExpressionEvaluationContext EEEC(getSema(),
+ Sema::PotentiallyEvaluated);
+ ExprResult NewExprInitResult = getDerived().TransformInitializer(
+ C->getCapturedVar()->getInit(),
+ C->getCapturedVar()->getInitStyle() == VarDecl::CallInit);
+
+ if (NewExprInitResult.isInvalid())
+ return ExprError();
+ Expr *NewExprInit = NewExprInitResult.get();
+
+ VarDecl *OldVD = C->getCapturedVar();
+ QualType NewInitCaptureType =
+ getSema().buildLambdaInitCaptureInitialization(
+ C->getLocation(), OldVD->getType()->isReferenceType(),
+ OldVD->getIdentifier(),
+ C->getCapturedVar()->getInitStyle() != VarDecl::CInit, NewExprInit);
+ NewExprInitResult = NewExprInit;
+ InitCaptureExprsAndTypes[C - E->capture_begin()] =
+ std::make_pair(NewExprInitResult, NewInitCaptureType);
+ }
+
+ // Transform the template parameters, and add them to the current
+ // instantiation scope. The null case is handled correctly.
+ auto TPL = getDerived().TransformTemplateParameterList(
+ E->getTemplateParameterList());
+
+ // Transform the type of the original lambda's call operator.
+ // The transformation MUST be done in the CurrentInstantiationScope since
+ // it introduces a mapping of the original to the newly created
+ // transformed parameters.
+ TypeSourceInfo *NewCallOpTSI = nullptr;
+ {
+ TypeSourceInfo *OldCallOpTSI = E->getCallOperator()->getTypeSourceInfo();
+ FunctionProtoTypeLoc OldCallOpFPTL =
+ OldCallOpTSI->getTypeLoc().getAs<FunctionProtoTypeLoc>();
+
+ TypeLocBuilder NewCallOpTLBuilder;
+ SmallVector<QualType, 4> ExceptionStorage;
+ TreeTransform *This = this; // Work around gcc.gnu.org/PR56135.
+ QualType NewCallOpType = TransformFunctionProtoType(
+ NewCallOpTLBuilder, OldCallOpFPTL, nullptr, 0,
+ [&](FunctionProtoType::ExceptionSpecInfo &ESI, bool &Changed) {
+ return This->TransformExceptionSpec(OldCallOpFPTL.getBeginLoc(), ESI,
+ ExceptionStorage, Changed);
+ });
+ if (NewCallOpType.isNull())
+ return ExprError();
+ NewCallOpTSI = NewCallOpTLBuilder.getTypeSourceInfo(getSema().Context,
+ NewCallOpType);
+ }
+
+ LambdaScopeInfo *LSI = getSema().PushLambdaScope();
+ Sema::FunctionScopeRAII FuncScopeCleanup(getSema());
+ LSI->GLTemplateParameterList = TPL;
+
+ // Create the local class that will describe the lambda.
+ CXXRecordDecl *Class
+ = getSema().createLambdaClosureType(E->getIntroducerRange(),
+ NewCallOpTSI,
+ /*KnownDependent=*/false,
+ E->getCaptureDefault());
+ getDerived().transformedLocalDecl(E->getLambdaClass(), Class);
+
+ // Build the call operator.
+ CXXMethodDecl *NewCallOperator = getSema().startLambdaDefinition(
+ Class, E->getIntroducerRange(), NewCallOpTSI,
+ E->getCallOperator()->getLocEnd(),
+ NewCallOpTSI->getTypeLoc().castAs<FunctionProtoTypeLoc>().getParams());
+ LSI->CallOperator = NewCallOperator;
+
+ getDerived().transformAttrs(E->getCallOperator(), NewCallOperator);
+ getDerived().transformedLocalDecl(E->getCallOperator(), NewCallOperator);
+
+ // Introduce the context of the call operator.
+ Sema::ContextRAII SavedContext(getSema(), NewCallOperator,
+ /*NewThisContext*/false);
+
+ // Enter the scope of the lambda.
+ getSema().buildLambdaScope(LSI, NewCallOperator,
+ E->getIntroducerRange(),
+ E->getCaptureDefault(),
+ E->getCaptureDefaultLoc(),
+ E->hasExplicitParameters(),
+ E->hasExplicitResultType(),
+ E->isMutable());
+
+ bool Invalid = false;
+
+ // Transform captures.
+ bool FinishedExplicitCaptures = false;
+ for (LambdaExpr::capture_iterator C = E->capture_begin(),
+ CEnd = E->capture_end();
+ C != CEnd; ++C) {
+ // When we hit the first implicit capture, tell Sema that we've finished
+ // the list of explicit captures.
+ if (!FinishedExplicitCaptures && C->isImplicit()) {
+ getSema().finishLambdaExplicitCaptures(LSI);
+ FinishedExplicitCaptures = true;
+ }
+
+ // Capturing 'this' is trivial.
+ if (C->capturesThis()) {
+ getSema().CheckCXXThisCapture(C->getLocation(), C->isExplicit());
+ continue;
+ }
+ // Captured expression will be recaptured during captured variables
+ // rebuilding.
+ if (C->capturesVLAType())
+ continue;
+
+ // Rebuild init-captures, including the implied field declaration.
+ if (E->isInitCapture(C)) {
+ InitCaptureInfoTy InitExprTypePair =
+ InitCaptureExprsAndTypes[C - E->capture_begin()];
+ ExprResult Init = InitExprTypePair.first;
+ QualType InitQualType = InitExprTypePair.second;
+ if (Init.isInvalid() || InitQualType.isNull()) {
+ Invalid = true;
+ continue;
+ }
+ VarDecl *OldVD = C->getCapturedVar();
+ VarDecl *NewVD = getSema().createLambdaInitCaptureVarDecl(
+ OldVD->getLocation(), InitExprTypePair.second, OldVD->getIdentifier(),
+ OldVD->getInitStyle(), Init.get());
+ if (!NewVD)
+ Invalid = true;
+ else {
+ getDerived().transformedLocalDecl(OldVD, NewVD);
+ }
+ getSema().buildInitCaptureField(LSI, NewVD);
+ continue;
+ }
+
+ assert(C->capturesVariable() && "unexpected kind of lambda capture");
+
+ // Determine the capture kind for Sema.
+ Sema::TryCaptureKind Kind
+ = C->isImplicit()? Sema::TryCapture_Implicit
+ : C->getCaptureKind() == LCK_ByCopy
+ ? Sema::TryCapture_ExplicitByVal
+ : Sema::TryCapture_ExplicitByRef;
+ SourceLocation EllipsisLoc;
+ if (C->isPackExpansion()) {
+ UnexpandedParameterPack Unexpanded(C->getCapturedVar(), C->getLocation());
+ bool ShouldExpand = false;
+ bool RetainExpansion = false;
+ Optional<unsigned> NumExpansions;
+ if (getDerived().TryExpandParameterPacks(C->getEllipsisLoc(),
+ C->getLocation(),
+ Unexpanded,
+ ShouldExpand, RetainExpansion,
+ NumExpansions)) {
+ Invalid = true;
+ continue;
+ }
+
+ if (ShouldExpand) {
+ // The transform has determined that we should perform an expansion;
+ // transform and capture each of the arguments.
+ // expansion of the pattern. Do so.
+ VarDecl *Pack = C->getCapturedVar();
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I);
+ VarDecl *CapturedVar
+ = cast_or_null<VarDecl>(getDerived().TransformDecl(C->getLocation(),
+ Pack));
+ if (!CapturedVar) {
+ Invalid = true;
+ continue;
+ }
+
+ // Capture the transformed variable.
+ getSema().tryCaptureVariable(CapturedVar, C->getLocation(), Kind);
+ }
+
+ // FIXME: Retain a pack expansion if RetainExpansion is true.
+
+ continue;
+ }
+
+ EllipsisLoc = C->getEllipsisLoc();
+ }
+
+ // Transform the captured variable.
+ VarDecl *CapturedVar
+ = cast_or_null<VarDecl>(getDerived().TransformDecl(C->getLocation(),
+ C->getCapturedVar()));
+ if (!CapturedVar || CapturedVar->isInvalidDecl()) {
+ Invalid = true;
+ continue;
+ }
+
+ // Capture the transformed variable.
+ getSema().tryCaptureVariable(CapturedVar, C->getLocation(), Kind,
+ EllipsisLoc);
+ }
+ if (!FinishedExplicitCaptures)
+ getSema().finishLambdaExplicitCaptures(LSI);
+
+ // Enter a new evaluation context to insulate the lambda from any
+ // cleanups from the enclosing full-expression.
+ getSema().PushExpressionEvaluationContext(Sema::PotentiallyEvaluated);
+
+ // Instantiate the body of the lambda expression.
+ StmtResult Body =
+ Invalid ? StmtError() : getDerived().TransformStmt(E->getBody());
+
+ // ActOnLambda* will pop the function scope for us.
+ FuncScopeCleanup.disable();
+
+ if (Body.isInvalid()) {
+ SavedContext.pop();
+ getSema().ActOnLambdaError(E->getLocStart(), /*CurScope=*/nullptr,
+ /*IsInstantiation=*/true);
+ return ExprError();
+ }
+
+ // Copy the LSI before ActOnFinishFunctionBody removes it.
+ // FIXME: This is dumb. Store the lambda information somewhere that outlives
+ // the call operator.
+ auto LSICopy = *LSI;
+ getSema().ActOnFinishFunctionBody(NewCallOperator, Body.get(),
+ /*IsInstantiation*/ true);
+ SavedContext.pop();
+
+ return getSema().BuildLambdaExpr(E->getLocStart(), Body.get()->getLocEnd(),
+ &LSICopy);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXUnresolvedConstructExpr(
+ CXXUnresolvedConstructExpr *E) {
+ TypeSourceInfo *T = getDerived().TransformType(E->getTypeSourceInfo());
+ if (!T)
+ return ExprError();
+
+ bool ArgumentChanged = false;
+ SmallVector<Expr*, 8> Args;
+ Args.reserve(E->arg_size());
+ if (getDerived().TransformExprs(E->arg_begin(), E->arg_size(), true, Args,
+ &ArgumentChanged))
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ T == E->getTypeSourceInfo() &&
+ !ArgumentChanged)
+ return E;
+
+ // FIXME: we're faking the locations of the commas
+ return getDerived().RebuildCXXUnresolvedConstructExpr(T,
+ E->getLParenLoc(),
+ Args,
+ E->getRParenLoc());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXDependentScopeMemberExpr(
+ CXXDependentScopeMemberExpr *E) {
+ // Transform the base of the expression.
+ ExprResult Base((Expr*) nullptr);
+ Expr *OldBase;
+ QualType BaseType;
+ QualType ObjectType;
+ if (!E->isImplicitAccess()) {
+ OldBase = E->getBase();
+ Base = getDerived().TransformExpr(OldBase);
+ if (Base.isInvalid())
+ return ExprError();
+
+ // Start the member reference and compute the object's type.
+ ParsedType ObjectTy;
+ bool MayBePseudoDestructor = false;
+ Base = SemaRef.ActOnStartCXXMemberReference(nullptr, Base.get(),
+ E->getOperatorLoc(),
+ E->isArrow()? tok::arrow : tok::period,
+ ObjectTy,
+ MayBePseudoDestructor);
+ if (Base.isInvalid())
+ return ExprError();
+
+ ObjectType = ObjectTy.get();
+ BaseType = ((Expr*) Base.get())->getType();
+ } else {
+ OldBase = nullptr;
+ BaseType = getDerived().TransformType(E->getBaseType());
+ ObjectType = BaseType->getAs<PointerType>()->getPointeeType();
+ }
+
+ // Transform the first part of the nested-name-specifier that qualifies
+ // the member name.
+ NamedDecl *FirstQualifierInScope
+ = getDerived().TransformFirstQualifierInScope(
+ E->getFirstQualifierFoundInScope(),
+ E->getQualifierLoc().getBeginLoc());
+
+ NestedNameSpecifierLoc QualifierLoc;
+ if (E->getQualifier()) {
+ QualifierLoc
+ = getDerived().TransformNestedNameSpecifierLoc(E->getQualifierLoc(),
+ ObjectType,
+ FirstQualifierInScope);
+ if (!QualifierLoc)
+ return ExprError();
+ }
+
+ SourceLocation TemplateKWLoc = E->getTemplateKeywordLoc();
+
+ // TODO: If this is a conversion-function-id, verify that the
+ // destination type name (if present) resolves the same way after
+ // instantiation as it did in the local scope.
+
+ DeclarationNameInfo NameInfo
+ = getDerived().TransformDeclarationNameInfo(E->getMemberNameInfo());
+ if (!NameInfo.getName())
+ return ExprError();
+
+ if (!E->hasExplicitTemplateArgs()) {
+ // This is a reference to a member without an explicitly-specified
+ // template argument list. Optimize for this common case.
+ if (!getDerived().AlwaysRebuild() &&
+ Base.get() == OldBase &&
+ BaseType == E->getBaseType() &&
+ QualifierLoc == E->getQualifierLoc() &&
+ NameInfo.getName() == E->getMember() &&
+ FirstQualifierInScope == E->getFirstQualifierFoundInScope())
+ return E;
+
+ return getDerived().RebuildCXXDependentScopeMemberExpr(Base.get(),
+ BaseType,
+ E->isArrow(),
+ E->getOperatorLoc(),
+ QualifierLoc,
+ TemplateKWLoc,
+ FirstQualifierInScope,
+ NameInfo,
+ /*TemplateArgs*/nullptr);
+ }
+
+ TemplateArgumentListInfo TransArgs(E->getLAngleLoc(), E->getRAngleLoc());
+ if (getDerived().TransformTemplateArguments(E->getTemplateArgs(),
+ E->getNumTemplateArgs(),
+ TransArgs))
+ return ExprError();
+
+ return getDerived().RebuildCXXDependentScopeMemberExpr(Base.get(),
+ BaseType,
+ E->isArrow(),
+ E->getOperatorLoc(),
+ QualifierLoc,
+ TemplateKWLoc,
+ FirstQualifierInScope,
+ NameInfo,
+ &TransArgs);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformUnresolvedMemberExpr(UnresolvedMemberExpr *Old) {
+ // Transform the base of the expression.
+ ExprResult Base((Expr*) nullptr);
+ QualType BaseType;
+ if (!Old->isImplicitAccess()) {
+ Base = getDerived().TransformExpr(Old->getBase());
+ if (Base.isInvalid())
+ return ExprError();
+ Base = getSema().PerformMemberExprBaseConversion(Base.get(),
+ Old->isArrow());
+ if (Base.isInvalid())
+ return ExprError();
+ BaseType = Base.get()->getType();
+ } else {
+ BaseType = getDerived().TransformType(Old->getBaseType());
+ }
+
+ NestedNameSpecifierLoc QualifierLoc;
+ if (Old->getQualifierLoc()) {
+ QualifierLoc
+ = getDerived().TransformNestedNameSpecifierLoc(Old->getQualifierLoc());
+ if (!QualifierLoc)
+ return ExprError();
+ }
+
+ SourceLocation TemplateKWLoc = Old->getTemplateKeywordLoc();
+
+ LookupResult R(SemaRef, Old->getMemberNameInfo(),
+ Sema::LookupOrdinaryName);
+
+ // Transform all the decls.
+ for (UnresolvedMemberExpr::decls_iterator I = Old->decls_begin(),
+ E = Old->decls_end(); I != E; ++I) {
+ NamedDecl *InstD = static_cast<NamedDecl*>(
+ getDerived().TransformDecl(Old->getMemberLoc(),
+ *I));
+ if (!InstD) {
+ // Silently ignore these if a UsingShadowDecl instantiated to nothing.
+ // This can happen because of dependent hiding.
+ if (isa<UsingShadowDecl>(*I))
+ continue;
+ else {
+ R.clear();
+ return ExprError();
+ }
+ }
+
+ // Expand using declarations.
+ if (isa<UsingDecl>(InstD)) {
+ UsingDecl *UD = cast<UsingDecl>(InstD);
+ for (auto *I : UD->shadows())
+ R.addDecl(I);
+ continue;
+ }
+
+ R.addDecl(InstD);
+ }
+
+ R.resolveKind();
+
+ // Determine the naming class.
+ if (Old->getNamingClass()) {
+ CXXRecordDecl *NamingClass
+ = cast_or_null<CXXRecordDecl>(getDerived().TransformDecl(
+ Old->getMemberLoc(),
+ Old->getNamingClass()));
+ if (!NamingClass)
+ return ExprError();
+
+ R.setNamingClass(NamingClass);
+ }
+
+ TemplateArgumentListInfo TransArgs;
+ if (Old->hasExplicitTemplateArgs()) {
+ TransArgs.setLAngleLoc(Old->getLAngleLoc());
+ TransArgs.setRAngleLoc(Old->getRAngleLoc());
+ if (getDerived().TransformTemplateArguments(Old->getTemplateArgs(),
+ Old->getNumTemplateArgs(),
+ TransArgs))
+ return ExprError();
+ }
+
+ // FIXME: to do this check properly, we will need to preserve the
+ // first-qualifier-in-scope here, just in case we had a dependent
+ // base (and therefore couldn't do the check) and a
+ // nested-name-qualifier (and therefore could do the lookup).
+ NamedDecl *FirstQualifierInScope = nullptr;
+
+ return getDerived().RebuildUnresolvedMemberExpr(Base.get(),
+ BaseType,
+ Old->getOperatorLoc(),
+ Old->isArrow(),
+ QualifierLoc,
+ TemplateKWLoc,
+ FirstQualifierInScope,
+ R,
+ (Old->hasExplicitTemplateArgs()
+ ? &TransArgs : nullptr));
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXNoexceptExpr(CXXNoexceptExpr *E) {
+ EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
+ ExprResult SubExpr = getDerived().TransformExpr(E->getOperand());
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() && SubExpr.get() == E->getOperand())
+ return E;
+
+ return getDerived().RebuildCXXNoexceptExpr(E->getSourceRange(),SubExpr.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformPackExpansionExpr(PackExpansionExpr *E) {
+ ExprResult Pattern = getDerived().TransformExpr(E->getPattern());
+ if (Pattern.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() && Pattern.get() == E->getPattern())
+ return E;
+
+ return getDerived().RebuildPackExpansion(Pattern.get(), E->getEllipsisLoc(),
+ E->getNumExpansions());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformSizeOfPackExpr(SizeOfPackExpr *E) {
+ // If E is not value-dependent, then nothing will change when we transform it.
+ // Note: This is an instantiation-centric view.
+ if (!E->isValueDependent())
+ return E;
+
+ EnterExpressionEvaluationContext Unevaluated(getSema(), Sema::Unevaluated);
+
+ ArrayRef<TemplateArgument> PackArgs;
+ TemplateArgument ArgStorage;
+
+ // Find the argument list to transform.
+ if (E->isPartiallySubstituted()) {
+ PackArgs = E->getPartialArguments();
+ } else if (E->isValueDependent()) {
+ UnexpandedParameterPack Unexpanded(E->getPack(), E->getPackLoc());
+ bool ShouldExpand = false;
+ bool RetainExpansion = false;
+ Optional<unsigned> NumExpansions;
+ if (getDerived().TryExpandParameterPacks(E->getOperatorLoc(), E->getPackLoc(),
+ Unexpanded,
+ ShouldExpand, RetainExpansion,
+ NumExpansions))
+ return ExprError();
+
+ // If we need to expand the pack, build a template argument from it and
+ // expand that.
+ if (ShouldExpand) {
+ auto *Pack = E->getPack();
+ if (auto *TTPD = dyn_cast<TemplateTypeParmDecl>(Pack)) {
+ ArgStorage = getSema().Context.getPackExpansionType(
+ getSema().Context.getTypeDeclType(TTPD), None);
+ } else if (auto *TTPD = dyn_cast<TemplateTemplateParmDecl>(Pack)) {
+ ArgStorage = TemplateArgument(TemplateName(TTPD), None);
+ } else {
+ auto *VD = cast<ValueDecl>(Pack);
+ ExprResult DRE = getSema().BuildDeclRefExpr(VD, VD->getType(),
+ VK_RValue, E->getPackLoc());
+ if (DRE.isInvalid())
+ return ExprError();
+ ArgStorage = new (getSema().Context) PackExpansionExpr(
+ getSema().Context.DependentTy, DRE.get(), E->getPackLoc(), None);
+ }
+ PackArgs = ArgStorage;
+ }
+ }
+
+ // If we're not expanding the pack, just transform the decl.
+ if (!PackArgs.size()) {
+ auto *Pack = cast_or_null<NamedDecl>(
+ getDerived().TransformDecl(E->getPackLoc(), E->getPack()));
+ if (!Pack)
+ return ExprError();
+ return getDerived().RebuildSizeOfPackExpr(E->getOperatorLoc(), Pack,
+ E->getPackLoc(),
+ E->getRParenLoc(), None, None);
+ }
+
+ TemplateArgumentListInfo TransformedPackArgs(E->getPackLoc(),
+ E->getPackLoc());
+ {
+ TemporaryBase Rebase(*this, E->getPackLoc(), getBaseEntity());
+ typedef TemplateArgumentLocInventIterator<
+ Derived, const TemplateArgument*> PackLocIterator;
+ if (TransformTemplateArguments(PackLocIterator(*this, PackArgs.begin()),
+ PackLocIterator(*this, PackArgs.end()),
+ TransformedPackArgs, /*Uneval*/true))
+ return ExprError();
+ }
+
+ SmallVector<TemplateArgument, 8> Args;
+ bool PartialSubstitution = false;
+ for (auto &Loc : TransformedPackArgs.arguments()) {
+ Args.push_back(Loc.getArgument());
+ if (Loc.getArgument().isPackExpansion())
+ PartialSubstitution = true;
+ }
+
+ if (PartialSubstitution)
+ return getDerived().RebuildSizeOfPackExpr(E->getOperatorLoc(), E->getPack(),
+ E->getPackLoc(),
+ E->getRParenLoc(), None, Args);
+
+ return getDerived().RebuildSizeOfPackExpr(E->getOperatorLoc(), E->getPack(),
+ E->getPackLoc(), E->getRParenLoc(),
+ Args.size(), None);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformSubstNonTypeTemplateParmPackExpr(
+ SubstNonTypeTemplateParmPackExpr *E) {
+ // Default behavior is to do nothing with this transformation.
+ return E;
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformSubstNonTypeTemplateParmExpr(
+ SubstNonTypeTemplateParmExpr *E) {
+ // Default behavior is to do nothing with this transformation.
+ return E;
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformFunctionParmPackExpr(FunctionParmPackExpr *E) {
+ // Default behavior is to do nothing with this transformation.
+ return E;
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformMaterializeTemporaryExpr(
+ MaterializeTemporaryExpr *E) {
+ return getDerived().TransformExpr(E->GetTemporaryExpr());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXFoldExpr(CXXFoldExpr *E) {
+ Expr *Pattern = E->getPattern();
+
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ getSema().collectUnexpandedParameterPacks(Pattern, Unexpanded);
+ assert(!Unexpanded.empty() && "Pack expansion without parameter packs?");
+
+ // Determine whether the set of unexpanded parameter packs can and should
+ // be expanded.
+ bool Expand = true;
+ bool RetainExpansion = false;
+ Optional<unsigned> NumExpansions;
+ if (getDerived().TryExpandParameterPacks(E->getEllipsisLoc(),
+ Pattern->getSourceRange(),
+ Unexpanded,
+ Expand, RetainExpansion,
+ NumExpansions))
+ return true;
+
+ if (!Expand) {
+ // Do not expand any packs here, just transform and rebuild a fold
+ // expression.
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
+
+ ExprResult LHS =
+ E->getLHS() ? getDerived().TransformExpr(E->getLHS()) : ExprResult();
+ if (LHS.isInvalid())
+ return true;
+
+ ExprResult RHS =
+ E->getRHS() ? getDerived().TransformExpr(E->getRHS()) : ExprResult();
+ if (RHS.isInvalid())
+ return true;
+
+ if (!getDerived().AlwaysRebuild() &&
+ LHS.get() == E->getLHS() && RHS.get() == E->getRHS())
+ return E;
+
+ return getDerived().RebuildCXXFoldExpr(
+ E->getLocStart(), LHS.get(), E->getOperator(), E->getEllipsisLoc(),
+ RHS.get(), E->getLocEnd());
+ }
+
+ // The transform has determined that we should perform an elementwise
+ // expansion of the pattern. Do so.
+ ExprResult Result = getDerived().TransformExpr(E->getInit());
+ if (Result.isInvalid())
+ return true;
+ bool LeftFold = E->isLeftFold();
+
+ // If we're retaining an expansion for a right fold, it is the innermost
+ // component and takes the init (if any).
+ if (!LeftFold && RetainExpansion) {
+ ForgetPartiallySubstitutedPackRAII Forget(getDerived());
+
+ ExprResult Out = getDerived().TransformExpr(Pattern);
+ if (Out.isInvalid())
+ return true;
+
+ Result = getDerived().RebuildCXXFoldExpr(
+ E->getLocStart(), Out.get(), E->getOperator(), E->getEllipsisLoc(),
+ Result.get(), E->getLocEnd());
+ if (Result.isInvalid())
+ return true;
+ }
+
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(
+ getSema(), LeftFold ? I : *NumExpansions - I - 1);
+ ExprResult Out = getDerived().TransformExpr(Pattern);
+ if (Out.isInvalid())
+ return true;
+
+ if (Out.get()->containsUnexpandedParameterPack()) {
+ // We still have a pack; retain a pack expansion for this slice.
+ Result = getDerived().RebuildCXXFoldExpr(
+ E->getLocStart(),
+ LeftFold ? Result.get() : Out.get(),
+ E->getOperator(), E->getEllipsisLoc(),
+ LeftFold ? Out.get() : Result.get(),
+ E->getLocEnd());
+ } else if (Result.isUsable()) {
+ // We've got down to a single element; build a binary operator.
+ Result = getDerived().RebuildBinaryOperator(
+ E->getEllipsisLoc(), E->getOperator(),
+ LeftFold ? Result.get() : Out.get(),
+ LeftFold ? Out.get() : Result.get());
+ } else
+ Result = Out;
+
+ if (Result.isInvalid())
+ return true;
+ }
+
+ // If we're retaining an expansion for a left fold, it is the outermost
+ // component and takes the complete expansion so far as its init (if any).
+ if (LeftFold && RetainExpansion) {
+ ForgetPartiallySubstitutedPackRAII Forget(getDerived());
+
+ ExprResult Out = getDerived().TransformExpr(Pattern);
+ if (Out.isInvalid())
+ return true;
+
+ Result = getDerived().RebuildCXXFoldExpr(
+ E->getLocStart(), Result.get(),
+ E->getOperator(), E->getEllipsisLoc(),
+ Out.get(), E->getLocEnd());
+ if (Result.isInvalid())
+ return true;
+ }
+
+ // If we had no init and an empty pack, and we're not retaining an expansion,
+ // then produce a fallback value or error.
+ if (Result.isUnset())
+ return getDerived().RebuildEmptyCXXFoldExpr(E->getEllipsisLoc(),
+ E->getOperator());
+
+ return Result;
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXStdInitializerListExpr(
+ CXXStdInitializerListExpr *E) {
+ return getDerived().TransformExpr(E->getSubExpr());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformObjCStringLiteral(ObjCStringLiteral *E) {
+ return SemaRef.MaybeBindToTemporary(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformObjCBoolLiteralExpr(ObjCBoolLiteralExpr *E) {
+ return E;
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformObjCBoxedExpr(ObjCBoxedExpr *E) {
+ ExprResult SubExpr = getDerived().TransformExpr(E->getSubExpr());
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ SubExpr.get() == E->getSubExpr())
+ return E;
+
+ return getDerived().RebuildObjCBoxedExpr(E->getSourceRange(), SubExpr.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformObjCArrayLiteral(ObjCArrayLiteral *E) {
+ // Transform each of the elements.
+ SmallVector<Expr *, 8> Elements;
+ bool ArgChanged = false;
+ if (getDerived().TransformExprs(E->getElements(), E->getNumElements(),
+ /*IsCall=*/false, Elements, &ArgChanged))
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() && !ArgChanged)
+ return SemaRef.MaybeBindToTemporary(E);
+
+ return getDerived().RebuildObjCArrayLiteral(E->getSourceRange(),
+ Elements.data(),
+ Elements.size());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformObjCDictionaryLiteral(
+ ObjCDictionaryLiteral *E) {
+ // Transform each of the elements.
+ SmallVector<ObjCDictionaryElement, 8> Elements;
+ bool ArgChanged = false;
+ for (unsigned I = 0, N = E->getNumElements(); I != N; ++I) {
+ ObjCDictionaryElement OrigElement = E->getKeyValueElement(I);
+
+ if (OrigElement.isPackExpansion()) {
+ // This key/value element is a pack expansion.
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ getSema().collectUnexpandedParameterPacks(OrigElement.Key, Unexpanded);
+ getSema().collectUnexpandedParameterPacks(OrigElement.Value, Unexpanded);
+ assert(!Unexpanded.empty() && "Pack expansion without parameter packs?");
+
+ // Determine whether the set of unexpanded parameter packs can
+ // and should be expanded.
+ bool Expand = true;
+ bool RetainExpansion = false;
+ Optional<unsigned> OrigNumExpansions = OrigElement.NumExpansions;
+ Optional<unsigned> NumExpansions = OrigNumExpansions;
+ SourceRange PatternRange(OrigElement.Key->getLocStart(),
+ OrigElement.Value->getLocEnd());
+ if (getDerived().TryExpandParameterPacks(OrigElement.EllipsisLoc,
+ PatternRange,
+ Unexpanded,
+ Expand, RetainExpansion,
+ NumExpansions))
+ return ExprError();
+
+ if (!Expand) {
+ // The transform has determined that we should perform a simple
+ // transformation on the pack expansion, producing another pack
+ // expansion.
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
+ ExprResult Key = getDerived().TransformExpr(OrigElement.Key);
+ if (Key.isInvalid())
+ return ExprError();
+
+ if (Key.get() != OrigElement.Key)
+ ArgChanged = true;
+
+ ExprResult Value = getDerived().TransformExpr(OrigElement.Value);
+ if (Value.isInvalid())
+ return ExprError();
+
+ if (Value.get() != OrigElement.Value)
+ ArgChanged = true;
+
+ ObjCDictionaryElement Expansion = {
+ Key.get(), Value.get(), OrigElement.EllipsisLoc, NumExpansions
+ };
+ Elements.push_back(Expansion);
+ continue;
+ }
+
+ // Record right away that the argument was changed. This needs
+ // to happen even if the array expands to nothing.
+ ArgChanged = true;
+
+ // The transform has determined that we should perform an elementwise
+ // expansion of the pattern. Do so.
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I);
+ ExprResult Key = getDerived().TransformExpr(OrigElement.Key);
+ if (Key.isInvalid())
+ return ExprError();
+
+ ExprResult Value = getDerived().TransformExpr(OrigElement.Value);
+ if (Value.isInvalid())
+ return ExprError();
+
+ ObjCDictionaryElement Element = {
+ Key.get(), Value.get(), SourceLocation(), NumExpansions
+ };
+
+ // If any unexpanded parameter packs remain, we still have a
+ // pack expansion.
+ // FIXME: Can this really happen?
+ if (Key.get()->containsUnexpandedParameterPack() ||
+ Value.get()->containsUnexpandedParameterPack())
+ Element.EllipsisLoc = OrigElement.EllipsisLoc;
+
+ Elements.push_back(Element);
+ }
+
+ // FIXME: Retain a pack expansion if RetainExpansion is true.
+
+ // We've finished with this pack expansion.
+ continue;
+ }
+
+ // Transform and check key.
+ ExprResult Key = getDerived().TransformExpr(OrigElement.Key);
+ if (Key.isInvalid())
+ return ExprError();
+
+ if (Key.get() != OrigElement.Key)
+ ArgChanged = true;
+
+ // Transform and check value.
+ ExprResult Value
+ = getDerived().TransformExpr(OrigElement.Value);
+ if (Value.isInvalid())
+ return ExprError();
+
+ if (Value.get() != OrigElement.Value)
+ ArgChanged = true;
+
+ ObjCDictionaryElement Element = {
+ Key.get(), Value.get(), SourceLocation(), None
+ };
+ Elements.push_back(Element);
+ }
+
+ if (!getDerived().AlwaysRebuild() && !ArgChanged)
+ return SemaRef.MaybeBindToTemporary(E);
+
+ return getDerived().RebuildObjCDictionaryLiteral(E->getSourceRange(),
+ Elements);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformObjCEncodeExpr(ObjCEncodeExpr *E) {
+ TypeSourceInfo *EncodedTypeInfo
+ = getDerived().TransformType(E->getEncodedTypeSourceInfo());
+ if (!EncodedTypeInfo)
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ EncodedTypeInfo == E->getEncodedTypeSourceInfo())
+ return E;
+
+ return getDerived().RebuildObjCEncodeExpr(E->getAtLoc(),
+ EncodedTypeInfo,
+ E->getRParenLoc());
+}
+
+template<typename Derived>
+ExprResult TreeTransform<Derived>::
+TransformObjCIndirectCopyRestoreExpr(ObjCIndirectCopyRestoreExpr *E) {
+ // This is a kind of implicit conversion, and it needs to get dropped
+ // and recomputed for the same general reasons that ImplicitCastExprs
+ // do, as well a more specific one: this expression is only valid when
+ // it appears *immediately* as an argument expression.
+ return getDerived().TransformExpr(E->getSubExpr());
+}
+
+template<typename Derived>
+ExprResult TreeTransform<Derived>::
+TransformObjCBridgedCastExpr(ObjCBridgedCastExpr *E) {
+ TypeSourceInfo *TSInfo
+ = getDerived().TransformType(E->getTypeInfoAsWritten());
+ if (!TSInfo)
+ return ExprError();
+
+ ExprResult Result = getDerived().TransformExpr(E->getSubExpr());
+ if (Result.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ TSInfo == E->getTypeInfoAsWritten() &&
+ Result.get() == E->getSubExpr())
+ return E;
+
+ return SemaRef.BuildObjCBridgedCast(E->getLParenLoc(), E->getBridgeKind(),
+ E->getBridgeKeywordLoc(), TSInfo,
+ Result.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformObjCMessageExpr(ObjCMessageExpr *E) {
+ // Transform arguments.
+ bool ArgChanged = false;
+ SmallVector<Expr*, 8> Args;
+ Args.reserve(E->getNumArgs());
+ if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), false, Args,
+ &ArgChanged))
+ return ExprError();
+
+ if (E->getReceiverKind() == ObjCMessageExpr::Class) {
+ // Class message: transform the receiver type.
+ TypeSourceInfo *ReceiverTypeInfo
+ = getDerived().TransformType(E->getClassReceiverTypeInfo());
+ if (!ReceiverTypeInfo)
+ return ExprError();
+
+ // If nothing changed, just retain the existing message send.
+ if (!getDerived().AlwaysRebuild() &&
+ ReceiverTypeInfo == E->getClassReceiverTypeInfo() && !ArgChanged)
+ return SemaRef.MaybeBindToTemporary(E);
+
+ // Build a new class message send.
+ SmallVector<SourceLocation, 16> SelLocs;
+ E->getSelectorLocs(SelLocs);
+ return getDerived().RebuildObjCMessageExpr(ReceiverTypeInfo,
+ E->getSelector(),
+ SelLocs,
+ E->getMethodDecl(),
+ E->getLeftLoc(),
+ Args,
+ E->getRightLoc());
+ }
+ else if (E->getReceiverKind() == ObjCMessageExpr::SuperClass ||
+ E->getReceiverKind() == ObjCMessageExpr::SuperInstance) {
+ // Build a new class message send to 'super'.
+ SmallVector<SourceLocation, 16> SelLocs;
+ E->getSelectorLocs(SelLocs);
+ return getDerived().RebuildObjCMessageExpr(E->getSuperLoc(),
+ E->getSelector(),
+ SelLocs,
+ E->getReceiverType(),
+ E->getMethodDecl(),
+ E->getLeftLoc(),
+ Args,
+ E->getRightLoc());
+ }
+
+ // Instance message: transform the receiver
+ assert(E->getReceiverKind() == ObjCMessageExpr::Instance &&
+ "Only class and instance messages may be instantiated");
+ ExprResult Receiver
+ = getDerived().TransformExpr(E->getInstanceReceiver());
+ if (Receiver.isInvalid())
+ return ExprError();
+
+ // If nothing changed, just retain the existing message send.
+ if (!getDerived().AlwaysRebuild() &&
+ Receiver.get() == E->getInstanceReceiver() && !ArgChanged)
+ return SemaRef.MaybeBindToTemporary(E);
+
+ // Build a new instance message send.
+ SmallVector<SourceLocation, 16> SelLocs;
+ E->getSelectorLocs(SelLocs);
+ return getDerived().RebuildObjCMessageExpr(Receiver.get(),
+ E->getSelector(),
+ SelLocs,
+ E->getMethodDecl(),
+ E->getLeftLoc(),
+ Args,
+ E->getRightLoc());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformObjCSelectorExpr(ObjCSelectorExpr *E) {
+ return E;
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformObjCProtocolExpr(ObjCProtocolExpr *E) {
+ return E;
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ // Transform the base expression.
+ ExprResult Base = getDerived().TransformExpr(E->getBase());
+ if (Base.isInvalid())
+ return ExprError();
+
+ // We don't need to transform the ivar; it will never change.
+
+ // If nothing changed, just retain the existing expression.
+ if (!getDerived().AlwaysRebuild() &&
+ Base.get() == E->getBase())
+ return E;
+
+ return getDerived().RebuildObjCIvarRefExpr(Base.get(), E->getDecl(),
+ E->getLocation(),
+ E->isArrow(), E->isFreeIvar());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
+ // 'super' and types never change. Property never changes. Just
+ // retain the existing expression.
+ if (!E->isObjectReceiver())
+ return E;
+
+ // Transform the base expression.
+ ExprResult Base = getDerived().TransformExpr(E->getBase());
+ if (Base.isInvalid())
+ return ExprError();
+
+ // We don't need to transform the property; it will never change.
+
+ // If nothing changed, just retain the existing expression.
+ if (!getDerived().AlwaysRebuild() &&
+ Base.get() == E->getBase())
+ return E;
+
+ if (E->isExplicitProperty())
+ return getDerived().RebuildObjCPropertyRefExpr(Base.get(),
+ E->getExplicitProperty(),
+ E->getLocation());
+
+ return getDerived().RebuildObjCPropertyRefExpr(Base.get(),
+ SemaRef.Context.PseudoObjectTy,
+ E->getImplicitPropertyGetter(),
+ E->getImplicitPropertySetter(),
+ E->getLocation());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformObjCSubscriptRefExpr(ObjCSubscriptRefExpr *E) {
+ // Transform the base expression.
+ ExprResult Base = getDerived().TransformExpr(E->getBaseExpr());
+ if (Base.isInvalid())
+ return ExprError();
+
+ // Transform the key expression.
+ ExprResult Key = getDerived().TransformExpr(E->getKeyExpr());
+ if (Key.isInvalid())
+ return ExprError();
+
+ // If nothing changed, just retain the existing expression.
+ if (!getDerived().AlwaysRebuild() &&
+ Key.get() == E->getKeyExpr() && Base.get() == E->getBaseExpr())
+ return E;
+
+ return getDerived().RebuildObjCSubscriptRefExpr(E->getRBracket(),
+ Base.get(), Key.get(),
+ E->getAtIndexMethodDecl(),
+ E->setAtIndexMethodDecl());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformObjCIsaExpr(ObjCIsaExpr *E) {
+ // Transform the base expression.
+ ExprResult Base = getDerived().TransformExpr(E->getBase());
+ if (Base.isInvalid())
+ return ExprError();
+
+ // If nothing changed, just retain the existing expression.
+ if (!getDerived().AlwaysRebuild() &&
+ Base.get() == E->getBase())
+ return E;
+
+ return getDerived().RebuildObjCIsaExpr(Base.get(), E->getIsaMemberLoc(),
+ E->getOpLoc(),
+ E->isArrow());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformShuffleVectorExpr(ShuffleVectorExpr *E) {
+ bool ArgumentChanged = false;
+ SmallVector<Expr*, 8> SubExprs;
+ SubExprs.reserve(E->getNumSubExprs());
+ if (getDerived().TransformExprs(E->getSubExprs(), E->getNumSubExprs(), false,
+ SubExprs, &ArgumentChanged))
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ !ArgumentChanged)
+ return E;
+
+ return getDerived().RebuildShuffleVectorExpr(E->getBuiltinLoc(),
+ SubExprs,
+ E->getRParenLoc());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformConvertVectorExpr(ConvertVectorExpr *E) {
+ ExprResult SrcExpr = getDerived().TransformExpr(E->getSrcExpr());
+ if (SrcExpr.isInvalid())
+ return ExprError();
+
+ TypeSourceInfo *Type = getDerived().TransformType(E->getTypeSourceInfo());
+ if (!Type)
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ Type == E->getTypeSourceInfo() &&
+ SrcExpr.get() == E->getSrcExpr())
+ return E;
+
+ return getDerived().RebuildConvertVectorExpr(E->getBuiltinLoc(),
+ SrcExpr.get(), Type,
+ E->getRParenLoc());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformBlockExpr(BlockExpr *E) {
+ BlockDecl *oldBlock = E->getBlockDecl();
+
+ SemaRef.ActOnBlockStart(E->getCaretLocation(), /*Scope=*/nullptr);
+ BlockScopeInfo *blockScope = SemaRef.getCurBlock();
+
+ blockScope->TheDecl->setIsVariadic(oldBlock->isVariadic());
+ blockScope->TheDecl->setBlockMissingReturnType(
+ oldBlock->blockMissingReturnType());
+
+ SmallVector<ParmVarDecl*, 4> params;
+ SmallVector<QualType, 4> paramTypes;
+
+ // Parameter substitution.
+ if (getDerived().TransformFunctionTypeParams(E->getCaretLocation(),
+ oldBlock->param_begin(),
+ oldBlock->param_size(),
+ nullptr, paramTypes, &params)) {
+ getSema().ActOnBlockError(E->getCaretLocation(), /*Scope=*/nullptr);
+ return ExprError();
+ }
+
+ const FunctionProtoType *exprFunctionType = E->getFunctionType();
+ QualType exprResultType =
+ getDerived().TransformType(exprFunctionType->getReturnType());
+
+ QualType functionType =
+ getDerived().RebuildFunctionProtoType(exprResultType, paramTypes,
+ exprFunctionType->getExtProtoInfo());
+ blockScope->FunctionType = functionType;
+
+ // Set the parameters on the block decl.
+ if (!params.empty())
+ blockScope->TheDecl->setParams(params);
+
+ if (!oldBlock->blockMissingReturnType()) {
+ blockScope->HasImplicitReturnType = false;
+ blockScope->ReturnType = exprResultType;
+ }
+
+ // Transform the body
+ StmtResult body = getDerived().TransformStmt(E->getBody());
+ if (body.isInvalid()) {
+ getSema().ActOnBlockError(E->getCaretLocation(), /*Scope=*/nullptr);
+ return ExprError();
+ }
+
+#ifndef NDEBUG
+ // In builds with assertions, make sure that we captured everything we
+ // captured before.
+ if (!SemaRef.getDiagnostics().hasErrorOccurred()) {
+ for (const auto &I : oldBlock->captures()) {
+ VarDecl *oldCapture = I.getVariable();
+
+ // Ignore parameter packs.
+ if (isa<ParmVarDecl>(oldCapture) &&
+ cast<ParmVarDecl>(oldCapture)->isParameterPack())
+ continue;
+
+ VarDecl *newCapture =
+ cast<VarDecl>(getDerived().TransformDecl(E->getCaretLocation(),
+ oldCapture));
+ assert(blockScope->CaptureMap.count(newCapture));
+ }
+ assert(oldBlock->capturesCXXThis() == blockScope->isCXXThisCaptured());
+ }
+#endif
+
+ return SemaRef.ActOnBlockStmtExpr(E->getCaretLocation(), body.get(),
+ /*Scope=*/nullptr);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformAsTypeExpr(AsTypeExpr *E) {
+ llvm_unreachable("Cannot transform asType expressions yet");
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformAtomicExpr(AtomicExpr *E) {
+ QualType RetTy = getDerived().TransformType(E->getType());
+ bool ArgumentChanged = false;
+ SmallVector<Expr*, 8> SubExprs;
+ SubExprs.reserve(E->getNumSubExprs());
+ if (getDerived().TransformExprs(E->getSubExprs(), E->getNumSubExprs(), false,
+ SubExprs, &ArgumentChanged))
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ !ArgumentChanged)
+ return E;
+
+ return getDerived().RebuildAtomicExpr(E->getBuiltinLoc(), SubExprs,
+ RetTy, E->getOp(), E->getRParenLoc());
+}
+
+//===----------------------------------------------------------------------===//
+// Type reconstruction
+//===----------------------------------------------------------------------===//
+
+template<typename Derived>
+QualType TreeTransform<Derived>::RebuildPointerType(QualType PointeeType,
+ SourceLocation Star) {
+ return SemaRef.BuildPointerType(PointeeType, Star,
+ getDerived().getBaseEntity());
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::RebuildBlockPointerType(QualType PointeeType,
+ SourceLocation Star) {
+ return SemaRef.BuildBlockPointerType(PointeeType, Star,
+ getDerived().getBaseEntity());
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::RebuildReferenceType(QualType ReferentType,
+ bool WrittenAsLValue,
+ SourceLocation Sigil) {
+ return SemaRef.BuildReferenceType(ReferentType, WrittenAsLValue,
+ Sigil, getDerived().getBaseEntity());
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::RebuildMemberPointerType(QualType PointeeType,
+ QualType ClassType,
+ SourceLocation Sigil) {
+ return SemaRef.BuildMemberPointerType(PointeeType, ClassType, Sigil,
+ getDerived().getBaseEntity());
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::RebuildObjCObjectType(
+ QualType BaseType,
+ SourceLocation Loc,
+ SourceLocation TypeArgsLAngleLoc,
+ ArrayRef<TypeSourceInfo *> TypeArgs,
+ SourceLocation TypeArgsRAngleLoc,
+ SourceLocation ProtocolLAngleLoc,
+ ArrayRef<ObjCProtocolDecl *> Protocols,
+ ArrayRef<SourceLocation> ProtocolLocs,
+ SourceLocation ProtocolRAngleLoc) {
+ return SemaRef.BuildObjCObjectType(BaseType, Loc, TypeArgsLAngleLoc,
+ TypeArgs, TypeArgsRAngleLoc,
+ ProtocolLAngleLoc, Protocols, ProtocolLocs,
+ ProtocolRAngleLoc,
+ /*FailOnError=*/true);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::RebuildObjCObjectPointerType(
+ QualType PointeeType,
+ SourceLocation Star) {
+ return SemaRef.Context.getObjCObjectPointerType(PointeeType);
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::RebuildArrayType(QualType ElementType,
+ ArrayType::ArraySizeModifier SizeMod,
+ const llvm::APInt *Size,
+ Expr *SizeExpr,
+ unsigned IndexTypeQuals,
+ SourceRange BracketsRange) {
+ if (SizeExpr || !Size)
+ return SemaRef.BuildArrayType(ElementType, SizeMod, SizeExpr,
+ IndexTypeQuals, BracketsRange,
+ getDerived().getBaseEntity());
+
+ QualType Types[] = {
+ SemaRef.Context.UnsignedCharTy, SemaRef.Context.UnsignedShortTy,
+ SemaRef.Context.UnsignedIntTy, SemaRef.Context.UnsignedLongTy,
+ SemaRef.Context.UnsignedLongLongTy, SemaRef.Context.UnsignedInt128Ty
+ };
+ const unsigned NumTypes = llvm::array_lengthof(Types);
+ QualType SizeType;
+ for (unsigned I = 0; I != NumTypes; ++I)
+ if (Size->getBitWidth() == SemaRef.Context.getIntWidth(Types[I])) {
+ SizeType = Types[I];
+ break;
+ }
+
+ // Note that we can return a VariableArrayType here in the case where
+ // the element type was a dependent VariableArrayType.
+ IntegerLiteral *ArraySize
+ = IntegerLiteral::Create(SemaRef.Context, *Size, SizeType,
+ /*FIXME*/BracketsRange.getBegin());
+ return SemaRef.BuildArrayType(ElementType, SizeMod, ArraySize,
+ IndexTypeQuals, BracketsRange,
+ getDerived().getBaseEntity());
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::RebuildConstantArrayType(QualType ElementType,
+ ArrayType::ArraySizeModifier SizeMod,
+ const llvm::APInt &Size,
+ unsigned IndexTypeQuals,
+ SourceRange BracketsRange) {
+ return getDerived().RebuildArrayType(ElementType, SizeMod, &Size, nullptr,
+ IndexTypeQuals, BracketsRange);
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::RebuildIncompleteArrayType(QualType ElementType,
+ ArrayType::ArraySizeModifier SizeMod,
+ unsigned IndexTypeQuals,
+ SourceRange BracketsRange) {
+ return getDerived().RebuildArrayType(ElementType, SizeMod, nullptr, nullptr,
+ IndexTypeQuals, BracketsRange);
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::RebuildVariableArrayType(QualType ElementType,
+ ArrayType::ArraySizeModifier SizeMod,
+ Expr *SizeExpr,
+ unsigned IndexTypeQuals,
+ SourceRange BracketsRange) {
+ return getDerived().RebuildArrayType(ElementType, SizeMod, nullptr,
+ SizeExpr,
+ IndexTypeQuals, BracketsRange);
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::RebuildDependentSizedArrayType(QualType ElementType,
+ ArrayType::ArraySizeModifier SizeMod,
+ Expr *SizeExpr,
+ unsigned IndexTypeQuals,
+ SourceRange BracketsRange) {
+ return getDerived().RebuildArrayType(ElementType, SizeMod, nullptr,
+ SizeExpr,
+ IndexTypeQuals, BracketsRange);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::RebuildVectorType(QualType ElementType,
+ unsigned NumElements,
+ VectorType::VectorKind VecKind) {
+ // FIXME: semantic checking!
+ return SemaRef.Context.getVectorType(ElementType, NumElements, VecKind);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::RebuildExtVectorType(QualType ElementType,
+ unsigned NumElements,
+ SourceLocation AttributeLoc) {
+ llvm::APInt numElements(SemaRef.Context.getIntWidth(SemaRef.Context.IntTy),
+ NumElements, true);
+ IntegerLiteral *VectorSize
+ = IntegerLiteral::Create(SemaRef.Context, numElements, SemaRef.Context.IntTy,
+ AttributeLoc);
+ return SemaRef.BuildExtVectorType(ElementType, VectorSize, AttributeLoc);
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::RebuildDependentSizedExtVectorType(QualType ElementType,
+ Expr *SizeExpr,
+ SourceLocation AttributeLoc) {
+ return SemaRef.BuildExtVectorType(ElementType, SizeExpr, AttributeLoc);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::RebuildFunctionProtoType(
+ QualType T,
+ MutableArrayRef<QualType> ParamTypes,
+ const FunctionProtoType::ExtProtoInfo &EPI) {
+ return SemaRef.BuildFunctionType(T, ParamTypes,
+ getDerived().getBaseLocation(),
+ getDerived().getBaseEntity(),
+ EPI);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::RebuildFunctionNoProtoType(QualType T) {
+ return SemaRef.Context.getFunctionNoProtoType(T);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::RebuildUnresolvedUsingType(Decl *D) {
+ assert(D && "no decl found");
+ if (D->isInvalidDecl()) return QualType();
+
+ // FIXME: Doesn't account for ObjCInterfaceDecl!
+ TypeDecl *Ty;
+ if (isa<UsingDecl>(D)) {
+ UsingDecl *Using = cast<UsingDecl>(D);
+ assert(Using->hasTypename() &&
+ "UnresolvedUsingTypenameDecl transformed to non-typename using");
+
+ // A valid resolved using typename decl points to exactly one type decl.
+ assert(++Using->shadow_begin() == Using->shadow_end());
+ Ty = cast<TypeDecl>((*Using->shadow_begin())->getTargetDecl());
+
+ } else {
+ assert(isa<UnresolvedUsingTypenameDecl>(D) &&
+ "UnresolvedUsingTypenameDecl transformed to non-using decl");
+ Ty = cast<UnresolvedUsingTypenameDecl>(D);
+ }
+
+ return SemaRef.Context.getTypeDeclType(Ty);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::RebuildTypeOfExprType(Expr *E,
+ SourceLocation Loc) {
+ return SemaRef.BuildTypeofExprType(E, Loc);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::RebuildTypeOfType(QualType Underlying) {
+ return SemaRef.Context.getTypeOfType(Underlying);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::RebuildDecltypeType(Expr *E,
+ SourceLocation Loc) {
+ return SemaRef.BuildDecltypeType(E, Loc);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::RebuildUnaryTransformType(QualType BaseType,
+ UnaryTransformType::UTTKind UKind,
+ SourceLocation Loc) {
+ return SemaRef.BuildUnaryTransformType(BaseType, UKind, Loc);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::RebuildTemplateSpecializationType(
+ TemplateName Template,
+ SourceLocation TemplateNameLoc,
+ TemplateArgumentListInfo &TemplateArgs) {
+ return SemaRef.CheckTemplateIdType(Template, TemplateNameLoc, TemplateArgs);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::RebuildAtomicType(QualType ValueType,
+ SourceLocation KWLoc) {
+ return SemaRef.BuildAtomicType(ValueType, KWLoc);
+}
+
+template<typename Derived>
+TemplateName
+TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
+ bool TemplateKW,
+ TemplateDecl *Template) {
+ return SemaRef.Context.getQualifiedTemplateName(SS.getScopeRep(), TemplateKW,
+ Template);
+}
+
+template<typename Derived>
+TemplateName
+TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
+ const IdentifierInfo &Name,
+ SourceLocation NameLoc,
+ QualType ObjectType,
+ NamedDecl *FirstQualifierInScope) {
+ UnqualifiedId TemplateName;
+ TemplateName.setIdentifier(&Name, NameLoc);
+ Sema::TemplateTy Template;
+ SourceLocation TemplateKWLoc; // FIXME: retrieve it from caller.
+ getSema().ActOnDependentTemplateName(/*Scope=*/nullptr,
+ SS, TemplateKWLoc, TemplateName,
+ ParsedType::make(ObjectType),
+ /*EnteringContext=*/false,
+ Template);
+ return Template.get();
+}
+
+template<typename Derived>
+TemplateName
+TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
+ OverloadedOperatorKind Operator,
+ SourceLocation NameLoc,
+ QualType ObjectType) {
+ UnqualifiedId Name;
+ // FIXME: Bogus location information.
+ SourceLocation SymbolLocations[3] = { NameLoc, NameLoc, NameLoc };
+ Name.setOperatorFunctionId(NameLoc, Operator, SymbolLocations);
+ SourceLocation TemplateKWLoc; // FIXME: retrieve it from caller.
+ Sema::TemplateTy Template;
+ getSema().ActOnDependentTemplateName(/*Scope=*/nullptr,
+ SS, TemplateKWLoc, Name,
+ ParsedType::make(ObjectType),
+ /*EnteringContext=*/false,
+ Template);
+ return Template.get();
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op,
+ SourceLocation OpLoc,
+ Expr *OrigCallee,
+ Expr *First,
+ Expr *Second) {
+ Expr *Callee = OrigCallee->IgnoreParenCasts();
+ bool isPostIncDec = Second && (Op == OO_PlusPlus || Op == OO_MinusMinus);
+
+ if (First->getObjectKind() == OK_ObjCProperty) {
+ BinaryOperatorKind Opc = BinaryOperator::getOverloadedOpcode(Op);
+ if (BinaryOperator::isAssignmentOp(Opc))
+ return SemaRef.checkPseudoObjectAssignment(/*Scope=*/nullptr, OpLoc, Opc,
+ First, Second);
+ ExprResult Result = SemaRef.CheckPlaceholderExpr(First);
+ if (Result.isInvalid())
+ return ExprError();
+ First = Result.get();
+ }
+
+ if (Second && Second->getObjectKind() == OK_ObjCProperty) {
+ ExprResult Result = SemaRef.CheckPlaceholderExpr(Second);
+ if (Result.isInvalid())
+ return ExprError();
+ Second = Result.get();
+ }
+
+ // Determine whether this should be a builtin operation.
+ if (Op == OO_Subscript) {
+ if (!First->getType()->isOverloadableType() &&
+ !Second->getType()->isOverloadableType())
+ return getSema().CreateBuiltinArraySubscriptExpr(First,
+ Callee->getLocStart(),
+ Second, OpLoc);
+ } else if (Op == OO_Arrow) {
+ // -> is never a builtin operation.
+ return SemaRef.BuildOverloadedArrowExpr(nullptr, First, OpLoc);
+ } else if (Second == nullptr || isPostIncDec) {
+ if (!First->getType()->isOverloadableType()) {
+ // The argument is not of overloadable type, so try to create a
+ // built-in unary operation.
+ UnaryOperatorKind Opc
+ = UnaryOperator::getOverloadedOpcode(Op, isPostIncDec);
+
+ return getSema().CreateBuiltinUnaryOp(OpLoc, Opc, First);
+ }
+ } else {
+ if (!First->getType()->isOverloadableType() &&
+ !Second->getType()->isOverloadableType()) {
+ // Neither of the arguments is an overloadable type, so try to
+ // create a built-in binary operation.
+ BinaryOperatorKind Opc = BinaryOperator::getOverloadedOpcode(Op);
+ ExprResult Result
+ = SemaRef.CreateBuiltinBinOp(OpLoc, Opc, First, Second);
+ if (Result.isInvalid())
+ return ExprError();
+
+ return Result;
+ }
+ }
+
+ // Compute the transformed set of functions (and function templates) to be
+ // used during overload resolution.
+ UnresolvedSet<16> Functions;
+
+ if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(Callee)) {
+ assert(ULE->requiresADL());
+ Functions.append(ULE->decls_begin(), ULE->decls_end());
+ } else {
+ // If we've resolved this to a particular non-member function, just call
+ // that function. If we resolved it to a member function,
+ // CreateOverloaded* will find that function for us.
+ NamedDecl *ND = cast<DeclRefExpr>(Callee)->getDecl();
+ if (!isa<CXXMethodDecl>(ND))
+ Functions.addDecl(ND);
+ }
+
+ // Add any functions found via argument-dependent lookup.
+ Expr *Args[2] = { First, Second };
+ unsigned NumArgs = 1 + (Second != nullptr);
+
+ // Create the overloaded operator invocation for unary operators.
+ if (NumArgs == 1 || isPostIncDec) {
+ UnaryOperatorKind Opc
+ = UnaryOperator::getOverloadedOpcode(Op, isPostIncDec);
+ return SemaRef.CreateOverloadedUnaryOp(OpLoc, Opc, Functions, First);
+ }
+
+ if (Op == OO_Subscript) {
+ SourceLocation LBrace;
+ SourceLocation RBrace;
+
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Callee)) {
+ DeclarationNameLoc NameLoc = DRE->getNameInfo().getInfo();
+ LBrace = SourceLocation::getFromRawEncoding(
+ NameLoc.CXXOperatorName.BeginOpNameLoc);
+ RBrace = SourceLocation::getFromRawEncoding(
+ NameLoc.CXXOperatorName.EndOpNameLoc);
+ } else {
+ LBrace = Callee->getLocStart();
+ RBrace = OpLoc;
+ }
+
+ return SemaRef.CreateOverloadedArraySubscriptExpr(LBrace, RBrace,
+ First, Second);
+ }
+
+ // Create the overloaded operator invocation for binary operators.
+ BinaryOperatorKind Opc = BinaryOperator::getOverloadedOpcode(Op);
+ ExprResult Result
+ = SemaRef.CreateOverloadedBinOp(OpLoc, Opc, Functions, Args[0], Args[1]);
+ if (Result.isInvalid())
+ return ExprError();
+
+ return Result;
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::RebuildCXXPseudoDestructorExpr(Expr *Base,
+ SourceLocation OperatorLoc,
+ bool isArrow,
+ CXXScopeSpec &SS,
+ TypeSourceInfo *ScopeType,
+ SourceLocation CCLoc,
+ SourceLocation TildeLoc,
+ PseudoDestructorTypeStorage Destroyed) {
+ QualType BaseType = Base->getType();
+ if (Base->isTypeDependent() || Destroyed.getIdentifier() ||
+ (!isArrow && !BaseType->getAs<RecordType>()) ||
+ (isArrow && BaseType->getAs<PointerType>() &&
+ !BaseType->getAs<PointerType>()->getPointeeType()
+ ->template getAs<RecordType>())){
+ // This pseudo-destructor expression is still a pseudo-destructor.
+ return SemaRef.BuildPseudoDestructorExpr(
+ Base, OperatorLoc, isArrow ? tok::arrow : tok::period, SS, ScopeType,
+ CCLoc, TildeLoc, Destroyed);
+ }
+
+ TypeSourceInfo *DestroyedType = Destroyed.getTypeSourceInfo();
+ DeclarationName Name(SemaRef.Context.DeclarationNames.getCXXDestructorName(
+ SemaRef.Context.getCanonicalType(DestroyedType->getType())));
+ DeclarationNameInfo NameInfo(Name, Destroyed.getLocation());
+ NameInfo.setNamedTypeInfo(DestroyedType);
+
+ // The scope type is now known to be a valid nested name specifier
+ // component. Tack it on to the end of the nested name specifier.
+ if (ScopeType) {
+ if (!ScopeType->getType()->getAs<TagType>()) {
+ getSema().Diag(ScopeType->getTypeLoc().getBeginLoc(),
+ diag::err_expected_class_or_namespace)
+ << ScopeType->getType() << getSema().getLangOpts().CPlusPlus;
+ return ExprError();
+ }
+ SS.Extend(SemaRef.Context, SourceLocation(), ScopeType->getTypeLoc(),
+ CCLoc);
+ }
+
+ SourceLocation TemplateKWLoc; // FIXME: retrieve it from caller.
+ return getSema().BuildMemberReferenceExpr(Base, BaseType,
+ OperatorLoc, isArrow,
+ SS, TemplateKWLoc,
+ /*FIXME: FirstQualifier*/ nullptr,
+ NameInfo,
+ /*TemplateArgs*/ nullptr,
+ /*S*/nullptr);
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformCapturedStmt(CapturedStmt *S) {
+ SourceLocation Loc = S->getLocStart();
+ CapturedDecl *CD = S->getCapturedDecl();
+ unsigned NumParams = CD->getNumParams();
+ unsigned ContextParamPos = CD->getContextParamPosition();
+ SmallVector<Sema::CapturedParamNameType, 4> Params;
+ for (unsigned I = 0; I < NumParams; ++I) {
+ if (I != ContextParamPos) {
+ Params.push_back(
+ std::make_pair(
+ CD->getParam(I)->getName(),
+ getDerived().TransformType(CD->getParam(I)->getType())));
+ } else {
+ Params.push_back(std::make_pair(StringRef(), QualType()));
+ }
+ }
+ getSema().ActOnCapturedRegionStart(Loc, /*CurScope*/nullptr,
+ S->getCapturedRegionKind(), Params);
+ StmtResult Body;
+ {
+ Sema::CompoundScopeRAII CompoundScope(getSema());
+ Body = getDerived().TransformStmt(S->getCapturedStmt());
+ }
+
+ if (Body.isInvalid()) {
+ getSema().ActOnCapturedRegionError();
+ return StmtError();
+ }
+
+ return getSema().ActOnCapturedRegionEnd(Body.get());
+}
+
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_SEMA_TREETRANSFORM_H
diff --git a/contrib/llvm/tools/clang/lib/Sema/TypeLocBuilder.cpp b/contrib/llvm/tools/clang/lib/Sema/TypeLocBuilder.cpp
new file mode 100644
index 0000000..be99540
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/TypeLocBuilder.cpp
@@ -0,0 +1,136 @@
+//===--- TypeLocBuilder.cpp - Type Source Info collector ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This files defines TypeLocBuilder, a class for building TypeLocs
+// bottom-up.
+//
+//===----------------------------------------------------------------------===//
+
+#include "TypeLocBuilder.h"
+
+using namespace clang;
+
+void TypeLocBuilder::pushFullCopy(TypeLoc L) {
+ size_t Size = L.getFullDataSize();
+ reserve(Size);
+
+ SmallVector<TypeLoc, 4> TypeLocs;
+ TypeLoc CurTL = L;
+ while (CurTL) {
+ TypeLocs.push_back(CurTL);
+ CurTL = CurTL.getNextTypeLoc();
+ }
+
+ for (unsigned i = 0, e = TypeLocs.size(); i < e; ++i) {
+ TypeLoc CurTL = TypeLocs[e-i-1];
+ switch (CurTL.getTypeLocClass()) {
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ case TypeLoc::CLASS: { \
+ CLASS##TypeLoc NewTL = push<class CLASS##TypeLoc>(CurTL.getType()); \
+ memcpy(NewTL.getOpaqueData(), CurTL.getOpaqueData(), NewTL.getLocalDataSize()); \
+ break; \
+ }
+#include "clang/AST/TypeLocNodes.def"
+ }
+ }
+}
+
+void TypeLocBuilder::grow(size_t NewCapacity) {
+ assert(NewCapacity > Capacity);
+
+ // Allocate the new buffer and copy the old data into it.
+ char *NewBuffer = new char[NewCapacity];
+ unsigned NewIndex = Index + NewCapacity - Capacity;
+ memcpy(&NewBuffer[NewIndex],
+ &Buffer[Index],
+ Capacity - Index);
+
+ if (Buffer != InlineBuffer.buffer)
+ delete[] Buffer;
+
+ Buffer = NewBuffer;
+ Capacity = NewCapacity;
+ Index = NewIndex;
+}
+
+TypeLoc TypeLocBuilder::pushImpl(QualType T, size_t LocalSize, unsigned LocalAlignment) {
+#ifndef NDEBUG
+ QualType TLast = TypeLoc(T, nullptr).getNextTypeLoc().getType();
+ assert(TLast == LastTy &&
+ "mismatch between last type and new type's inner type");
+ LastTy = T;
+#endif
+
+ assert(LocalAlignment <= BufferMaxAlignment && "Unexpected alignment");
+
+ // If we need to grow, grow by a factor of 2.
+ if (LocalSize > Index) {
+ size_t RequiredCapacity = Capacity + (LocalSize - Index);
+ size_t NewCapacity = Capacity * 2;
+ while (RequiredCapacity > NewCapacity)
+ NewCapacity *= 2;
+ grow(NewCapacity);
+ }
+
+ // Because we're adding elements to the TypeLoc backwards, we have to
+ // do some extra work to keep everything aligned appropriately.
+ // FIXME: This algorithm is a absolute mess because every TypeLoc returned
+ // needs to be valid. Partial TypeLocs are a terrible idea.
+ // FIXME: 4 and 8 are sufficient at the moment, but it's pretty ugly to
+ // hardcode them.
+ if (LocalAlignment == 4) {
+ if (NumBytesAtAlign8 == 0) {
+ NumBytesAtAlign4 += LocalSize;
+ } else {
+ unsigned Padding = NumBytesAtAlign4 % 8;
+ if (Padding == 0) {
+ if (LocalSize % 8 == 0) {
+ // Everything is set: there's no padding and we don't need to add
+ // any.
+ } else {
+ assert(LocalSize % 8 == 4);
+ // No existing padding; add in 4 bytes padding
+ memmove(&Buffer[Index - 4], &Buffer[Index], NumBytesAtAlign4);
+ Index -= 4;
+ }
+ } else {
+ assert(Padding == 4);
+ if (LocalSize % 8 == 0) {
+ // Everything is set: there's 4 bytes padding and we don't need
+ // to add any.
+ } else {
+ assert(LocalSize % 8 == 4);
+ // There are 4 bytes padding, but we don't need any; remove it.
+ memmove(&Buffer[Index + 4], &Buffer[Index], NumBytesAtAlign4);
+ Index += 4;
+ }
+ }
+ NumBytesAtAlign4 += LocalSize;
+ }
+ } else if (LocalAlignment == 8) {
+ if (!NumBytesAtAlign8 && NumBytesAtAlign4 % 8 != 0) {
+ // No existing padding and misaligned members; add in 4 bytes padding
+ memmove(&Buffer[Index - 4], &Buffer[Index], NumBytesAtAlign4);
+ Index -= 4;
+ }
+ // Forget about any padding.
+ NumBytesAtAlign4 = 0;
+ NumBytesAtAlign8 += LocalSize;
+ } else {
+ assert(LocalSize == 0);
+ }
+
+ Index -= LocalSize;
+
+ assert(Capacity - Index == TypeLoc::getFullDataSizeForType(T) &&
+ "incorrect data size provided to CreateTypeSourceInfo!");
+
+ return getTemporaryTypeLoc(T);
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/TypeLocBuilder.h b/contrib/llvm/tools/clang/lib/Sema/TypeLocBuilder.h
new file mode 100644
index 0000000..82844b3
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/TypeLocBuilder.h
@@ -0,0 +1,151 @@
+//===--- TypeLocBuilder.h - Type Source Info collector ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This files defines TypeLocBuilder, a class for building TypeLocs
+// bottom-up.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_SEMA_TYPELOCBUILDER_H
+#define LLVM_CLANG_LIB_SEMA_TYPELOCBUILDER_H
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/TypeLoc.h"
+
+namespace clang {
+
+class TypeLocBuilder {
+ enum { InlineCapacity = 8 * sizeof(SourceLocation) };
+
+ /// The underlying location-data buffer. Data grows from the end
+ /// of the buffer backwards.
+ char *Buffer;
+
+ /// The capacity of the current buffer.
+ size_t Capacity;
+
+ /// The index of the first occupied byte in the buffer.
+ size_t Index;
+
+#ifndef NDEBUG
+ /// The last type pushed on this builder.
+ QualType LastTy;
+#endif
+
+ /// The inline buffer.
+ enum { BufferMaxAlignment = llvm::AlignOf<void*>::Alignment };
+ llvm::AlignedCharArray<BufferMaxAlignment, InlineCapacity> InlineBuffer;
+ unsigned NumBytesAtAlign4, NumBytesAtAlign8;
+
+ public:
+ TypeLocBuilder()
+ : Buffer(InlineBuffer.buffer), Capacity(InlineCapacity),
+ Index(InlineCapacity), NumBytesAtAlign4(0), NumBytesAtAlign8(0)
+ {
+ }
+
+ ~TypeLocBuilder() {
+ if (Buffer != InlineBuffer.buffer)
+ delete[] Buffer;
+ }
+
+ /// Ensures that this buffer has at least as much capacity as described.
+ void reserve(size_t Requested) {
+ if (Requested > Capacity)
+ // For now, match the request exactly.
+ grow(Requested);
+ }
+
+ /// Pushes a copy of the given TypeLoc onto this builder. The builder
+ /// must be empty for this to work.
+ void pushFullCopy(TypeLoc L);
+
+ /// Pushes space for a typespec TypeLoc. Invalidates any TypeLocs
+ /// previously retrieved from this builder.
+ TypeSpecTypeLoc pushTypeSpec(QualType T) {
+ size_t LocalSize = TypeSpecTypeLoc::LocalDataSize;
+ unsigned LocalAlign = TypeSpecTypeLoc::LocalDataAlignment;
+ return pushImpl(T, LocalSize, LocalAlign).castAs<TypeSpecTypeLoc>();
+ }
+
+ /// Resets this builder to the newly-initialized state.
+ void clear() {
+#ifndef NDEBUG
+ LastTy = QualType();
+#endif
+ Index = Capacity;
+ NumBytesAtAlign4 = NumBytesAtAlign8 = 0;
+ }
+
+ /// \brief Tell the TypeLocBuilder that the type it is storing has been
+ /// modified in some safe way that doesn't affect type-location information.
+ void TypeWasModifiedSafely(QualType T) {
+#ifndef NDEBUG
+ LastTy = T;
+#endif
+ }
+
+ /// Pushes space for a new TypeLoc of the given type. Invalidates
+ /// any TypeLocs previously retrieved from this builder.
+ template <class TyLocType> TyLocType push(QualType T) {
+ TyLocType Loc = TypeLoc(T, nullptr).castAs<TyLocType>();
+ size_t LocalSize = Loc.getLocalDataSize();
+ unsigned LocalAlign = Loc.getLocalDataAlignment();
+ return pushImpl(T, LocalSize, LocalAlign).castAs<TyLocType>();
+ }
+
+ /// Creates a TypeSourceInfo for the given type.
+ TypeSourceInfo *getTypeSourceInfo(ASTContext& Context, QualType T) {
+#ifndef NDEBUG
+ assert(T == LastTy && "type doesn't match last type pushed!");
+#endif
+
+ size_t FullDataSize = Capacity - Index;
+ TypeSourceInfo *DI = Context.CreateTypeSourceInfo(T, FullDataSize);
+ memcpy(DI->getTypeLoc().getOpaqueData(), &Buffer[Index], FullDataSize);
+ return DI;
+ }
+
+ /// \brief Copies the type-location information to the given AST context and
+ /// returns a \c TypeLoc referring into the AST context.
+ TypeLoc getTypeLocInContext(ASTContext &Context, QualType T) {
+#ifndef NDEBUG
+ assert(T == LastTy && "type doesn't match last type pushed!");
+#endif
+
+ size_t FullDataSize = Capacity - Index;
+ void *Mem = Context.Allocate(FullDataSize);
+ memcpy(Mem, &Buffer[Index], FullDataSize);
+ return TypeLoc(T, Mem);
+ }
+
+private:
+
+ TypeLoc pushImpl(QualType T, size_t LocalSize, unsigned LocalAlignment);
+
+ /// Grow to the given capacity.
+ void grow(size_t NewCapacity);
+
+ /// \brief Retrieve a temporary TypeLoc that refers into this \c TypeLocBuilder
+ /// object.
+ ///
+ /// The resulting \c TypeLoc should only be used so long as the
+ /// \c TypeLocBuilder is active and has not had more type information
+ /// pushed into it.
+ TypeLoc getTemporaryTypeLoc(QualType T) {
+#ifndef NDEBUG
+ assert(LastTy == T && "type doesn't match last type pushed!");
+#endif
+ return TypeLoc(T, &Buffer[Index]);
+ }
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.cpp
new file mode 100644
index 0000000..2b78d74
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.cpp
@@ -0,0 +1,369 @@
+//===--- ASTCommon.cpp - Common stuff for ASTReader/ASTWriter----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines common functions that both ASTReader and ASTWriter use.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ASTCommon.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Serialization/ASTDeserializationListener.h"
+#include "llvm/ADT/StringExtras.h"
+
+using namespace clang;
+
+// Give ASTDeserializationListener's VTable a home.
+ASTDeserializationListener::~ASTDeserializationListener() { }
+
+serialization::TypeIdx
+serialization::TypeIdxFromBuiltin(const BuiltinType *BT) {
+ unsigned ID = 0;
+ switch (BT->getKind()) {
+ case BuiltinType::Void:
+ ID = PREDEF_TYPE_VOID_ID;
+ break;
+ case BuiltinType::Bool:
+ ID = PREDEF_TYPE_BOOL_ID;
+ break;
+ case BuiltinType::Char_U:
+ ID = PREDEF_TYPE_CHAR_U_ID;
+ break;
+ case BuiltinType::UChar:
+ ID = PREDEF_TYPE_UCHAR_ID;
+ break;
+ case BuiltinType::UShort:
+ ID = PREDEF_TYPE_USHORT_ID;
+ break;
+ case BuiltinType::UInt:
+ ID = PREDEF_TYPE_UINT_ID;
+ break;
+ case BuiltinType::ULong:
+ ID = PREDEF_TYPE_ULONG_ID;
+ break;
+ case BuiltinType::ULongLong:
+ ID = PREDEF_TYPE_ULONGLONG_ID;
+ break;
+ case BuiltinType::UInt128:
+ ID = PREDEF_TYPE_UINT128_ID;
+ break;
+ case BuiltinType::Char_S:
+ ID = PREDEF_TYPE_CHAR_S_ID;
+ break;
+ case BuiltinType::SChar:
+ ID = PREDEF_TYPE_SCHAR_ID;
+ break;
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ ID = PREDEF_TYPE_WCHAR_ID;
+ break;
+ case BuiltinType::Short:
+ ID = PREDEF_TYPE_SHORT_ID;
+ break;
+ case BuiltinType::Int:
+ ID = PREDEF_TYPE_INT_ID;
+ break;
+ case BuiltinType::Long:
+ ID = PREDEF_TYPE_LONG_ID;
+ break;
+ case BuiltinType::LongLong:
+ ID = PREDEF_TYPE_LONGLONG_ID;
+ break;
+ case BuiltinType::Int128:
+ ID = PREDEF_TYPE_INT128_ID;
+ break;
+ case BuiltinType::Half:
+ ID = PREDEF_TYPE_HALF_ID;
+ break;
+ case BuiltinType::Float:
+ ID = PREDEF_TYPE_FLOAT_ID;
+ break;
+ case BuiltinType::Double:
+ ID = PREDEF_TYPE_DOUBLE_ID;
+ break;
+ case BuiltinType::LongDouble:
+ ID = PREDEF_TYPE_LONGDOUBLE_ID;
+ break;
+ case BuiltinType::NullPtr:
+ ID = PREDEF_TYPE_NULLPTR_ID;
+ break;
+ case BuiltinType::Char16:
+ ID = PREDEF_TYPE_CHAR16_ID;
+ break;
+ case BuiltinType::Char32:
+ ID = PREDEF_TYPE_CHAR32_ID;
+ break;
+ case BuiltinType::Overload:
+ ID = PREDEF_TYPE_OVERLOAD_ID;
+ break;
+ case BuiltinType::BoundMember:
+ ID = PREDEF_TYPE_BOUND_MEMBER;
+ break;
+ case BuiltinType::PseudoObject:
+ ID = PREDEF_TYPE_PSEUDO_OBJECT;
+ break;
+ case BuiltinType::Dependent:
+ ID = PREDEF_TYPE_DEPENDENT_ID;
+ break;
+ case BuiltinType::UnknownAny:
+ ID = PREDEF_TYPE_UNKNOWN_ANY;
+ break;
+ case BuiltinType::ARCUnbridgedCast:
+ ID = PREDEF_TYPE_ARC_UNBRIDGED_CAST;
+ break;
+ case BuiltinType::ObjCId:
+ ID = PREDEF_TYPE_OBJC_ID;
+ break;
+ case BuiltinType::ObjCClass:
+ ID = PREDEF_TYPE_OBJC_CLASS;
+ break;
+ case BuiltinType::ObjCSel:
+ ID = PREDEF_TYPE_OBJC_SEL;
+ break;
+ case BuiltinType::OCLImage1d:
+ ID = PREDEF_TYPE_IMAGE1D_ID;
+ break;
+ case BuiltinType::OCLImage1dArray:
+ ID = PREDEF_TYPE_IMAGE1D_ARR_ID;
+ break;
+ case BuiltinType::OCLImage1dBuffer:
+ ID = PREDEF_TYPE_IMAGE1D_BUFF_ID;
+ break;
+ case BuiltinType::OCLImage2d:
+ ID = PREDEF_TYPE_IMAGE2D_ID;
+ break;
+ case BuiltinType::OCLImage2dArray:
+ ID = PREDEF_TYPE_IMAGE2D_ARR_ID;
+ break;
+ case BuiltinType::OCLImage2dDepth:
+ ID = PREDEF_TYPE_IMAGE2D_DEP_ID;
+ break;
+ case BuiltinType::OCLImage2dArrayDepth:
+ ID = PREDEF_TYPE_IMAGE2D_ARR_DEP_ID;
+ break;
+ case BuiltinType::OCLImage2dMSAA:
+ ID = PREDEF_TYPE_IMAGE2D_MSAA_ID;
+ break;
+ case BuiltinType::OCLImage2dArrayMSAA:
+ ID = PREDEF_TYPE_IMAGE2D_ARR_MSAA_ID;
+ break;
+ case BuiltinType::OCLImage2dMSAADepth:
+ ID = PREDEF_TYPE_IMAGE2D_MSAA_DEP_ID;
+ break;
+ case BuiltinType::OCLImage2dArrayMSAADepth:
+ ID = PREDEF_TYPE_IMAGE2D_ARR_MSAA_DEPTH_ID;
+ break;
+ case BuiltinType::OCLImage3d:
+ ID = PREDEF_TYPE_IMAGE3D_ID;
+ break;
+ case BuiltinType::OCLSampler:
+ ID = PREDEF_TYPE_SAMPLER_ID;
+ break;
+ case BuiltinType::OCLEvent:
+ ID = PREDEF_TYPE_EVENT_ID;
+ break;
+ case BuiltinType::OCLClkEvent:
+ ID = PREDEF_TYPE_CLK_EVENT_ID;
+ break;
+ case BuiltinType::OCLQueue:
+ ID = PREDEF_TYPE_QUEUE_ID;
+ break;
+ case BuiltinType::OCLNDRange:
+ ID = PREDEF_TYPE_NDRANGE_ID;
+ break;
+ case BuiltinType::OCLReserveID:
+ ID = PREDEF_TYPE_RESERVE_ID_ID;
+ break;
+ case BuiltinType::BuiltinFn:
+ ID = PREDEF_TYPE_BUILTIN_FN;
+ break;
+ case BuiltinType::OMPArraySection:
+ ID = PREDEF_TYPE_OMP_ARRAY_SECTION;
+ break;
+ }
+
+ return TypeIdx(ID);
+}
+
+unsigned serialization::ComputeHash(Selector Sel) {
+ unsigned N = Sel.getNumArgs();
+ if (N == 0)
+ ++N;
+ unsigned R = 5381;
+ for (unsigned I = 0; I != N; ++I)
+ if (IdentifierInfo *II = Sel.getIdentifierInfoForSlot(I))
+ R = llvm::HashString(II->getName(), R);
+ return R;
+}
+
+const DeclContext *
+serialization::getDefinitiveDeclContext(const DeclContext *DC) {
+ switch (DC->getDeclKind()) {
+ // These entities may have multiple definitions.
+ case Decl::TranslationUnit:
+ case Decl::ExternCContext:
+ case Decl::Namespace:
+ case Decl::LinkageSpec:
+ return nullptr;
+
+ // C/C++ tag types can only be defined in one place.
+ case Decl::Enum:
+ case Decl::Record:
+ if (const TagDecl *Def = cast<TagDecl>(DC)->getDefinition())
+ return Def;
+ return nullptr;
+
+ // FIXME: These can be defined in one place... except special member
+ // functions and out-of-line definitions.
+ case Decl::CXXRecord:
+ case Decl::ClassTemplateSpecialization:
+ case Decl::ClassTemplatePartialSpecialization:
+ return nullptr;
+
+ // Each function, method, and block declaration is its own DeclContext.
+ case Decl::Function:
+ case Decl::CXXMethod:
+ case Decl::CXXConstructor:
+ case Decl::CXXDestructor:
+ case Decl::CXXConversion:
+ case Decl::ObjCMethod:
+ case Decl::Block:
+ case Decl::Captured:
+ // Objective C categories, category implementations, and class
+ // implementations can only be defined in one place.
+ case Decl::ObjCCategory:
+ case Decl::ObjCCategoryImpl:
+ case Decl::ObjCImplementation:
+ return DC;
+
+ case Decl::ObjCProtocol:
+ if (const ObjCProtocolDecl *Def
+ = cast<ObjCProtocolDecl>(DC)->getDefinition())
+ return Def;
+ return nullptr;
+
+ // FIXME: These are defined in one place, but properties in class extensions
+ // end up being back-patched into the main interface. See
+ // Sema::HandlePropertyInClassExtension for the offending code.
+ case Decl::ObjCInterface:
+ return nullptr;
+
+ default:
+ llvm_unreachable("Unhandled DeclContext in AST reader");
+ }
+
+ llvm_unreachable("Unhandled decl kind");
+}
+
+bool serialization::isRedeclarableDeclKind(unsigned Kind) {
+ switch (static_cast<Decl::Kind>(Kind)) {
+ case Decl::TranslationUnit:
+ case Decl::ExternCContext:
+ // Special case of a "merged" declaration.
+ return true;
+
+ case Decl::Namespace:
+ case Decl::NamespaceAlias:
+ case Decl::Typedef:
+ case Decl::TypeAlias:
+ case Decl::Enum:
+ case Decl::Record:
+ case Decl::CXXRecord:
+ case Decl::ClassTemplateSpecialization:
+ case Decl::ClassTemplatePartialSpecialization:
+ case Decl::VarTemplateSpecialization:
+ case Decl::VarTemplatePartialSpecialization:
+ case Decl::Function:
+ case Decl::CXXMethod:
+ case Decl::CXXConstructor:
+ case Decl::CXXDestructor:
+ case Decl::CXXConversion:
+ case Decl::UsingShadow:
+ case Decl::Var:
+ case Decl::FunctionTemplate:
+ case Decl::ClassTemplate:
+ case Decl::VarTemplate:
+ case Decl::TypeAliasTemplate:
+ case Decl::ObjCProtocol:
+ case Decl::ObjCInterface:
+ case Decl::Empty:
+ return true;
+
+ // Never redeclarable.
+ case Decl::UsingDirective:
+ case Decl::Label:
+ case Decl::UnresolvedUsingTypename:
+ case Decl::TemplateTypeParm:
+ case Decl::EnumConstant:
+ case Decl::UnresolvedUsingValue:
+ case Decl::IndirectField:
+ case Decl::Field:
+ case Decl::MSProperty:
+ case Decl::ObjCIvar:
+ case Decl::ObjCAtDefsField:
+ case Decl::NonTypeTemplateParm:
+ case Decl::TemplateTemplateParm:
+ case Decl::Using:
+ case Decl::ObjCMethod:
+ case Decl::ObjCCategory:
+ case Decl::ObjCCategoryImpl:
+ case Decl::ObjCImplementation:
+ case Decl::ObjCProperty:
+ case Decl::ObjCCompatibleAlias:
+ case Decl::LinkageSpec:
+ case Decl::ObjCPropertyImpl:
+ case Decl::FileScopeAsm:
+ case Decl::AccessSpec:
+ case Decl::Friend:
+ case Decl::FriendTemplate:
+ case Decl::StaticAssert:
+ case Decl::Block:
+ case Decl::Captured:
+ case Decl::ClassScopeFunctionSpecialization:
+ case Decl::Import:
+ case Decl::OMPThreadPrivate:
+ case Decl::BuiltinTemplate:
+ return false;
+
+ // These indirectly derive from Redeclarable<T> but are not actually
+ // redeclarable.
+ case Decl::ImplicitParam:
+ case Decl::ParmVar:
+ case Decl::ObjCTypeParam:
+ return false;
+ }
+
+ llvm_unreachable("Unhandled declaration kind");
+}
+
+bool serialization::needsAnonymousDeclarationNumber(const NamedDecl *D) {
+ // Friend declarations in dependent contexts aren't anonymous in the usual
+ // sense, but they cannot be found by name lookup in their semantic context
+ // (or indeed in any context), so we treat them as anonymous.
+ //
+ // This doesn't apply to friend tag decls; Sema makes those available to name
+ // lookup in the surrounding context.
+ if (D->getFriendObjectKind() &&
+ D->getLexicalDeclContext()->isDependentContext() && !isa<TagDecl>(D)) {
+ // For function templates and class templates, the template is numbered and
+ // not its pattern.
+ if (auto *FD = dyn_cast<FunctionDecl>(D))
+ return !FD->getDescribedFunctionTemplate();
+ if (auto *RD = dyn_cast<CXXRecordDecl>(D))
+ return !RD->getDescribedClassTemplate();
+ return true;
+ }
+
+ // Otherwise, we only care about anonymous class members.
+ if (D->getDeclName() || !isa<CXXRecordDecl>(D->getLexicalDeclContext()))
+ return false;
+ return isa<TagDecl>(D) || isa<FieldDecl>(D);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.h b/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.h
new file mode 100644
index 0000000..e59bc89
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.h
@@ -0,0 +1,112 @@
+//===- ASTCommon.h - Common stuff for ASTReader/ASTWriter -*- C++ -*-=========//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines common functions that both ASTReader and ASTWriter use.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_SERIALIZATION_ASTCOMMON_H
+#define LLVM_CLANG_LIB_SERIALIZATION_ASTCOMMON_H
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclFriend.h"
+#include "clang/Serialization/ASTBitCodes.h"
+
+namespace clang {
+
+namespace serialization {
+
+enum DeclUpdateKind {
+ UPD_CXX_ADDED_IMPLICIT_MEMBER,
+ UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION,
+ UPD_CXX_ADDED_ANONYMOUS_NAMESPACE,
+ UPD_CXX_ADDED_FUNCTION_DEFINITION,
+ UPD_CXX_INSTANTIATED_STATIC_DATA_MEMBER,
+ UPD_CXX_INSTANTIATED_CLASS_DEFINITION,
+ UPD_CXX_RESOLVED_DTOR_DELETE,
+ UPD_CXX_RESOLVED_EXCEPTION_SPEC,
+ UPD_CXX_DEDUCED_RETURN_TYPE,
+ UPD_DECL_MARKED_USED,
+ UPD_MANGLING_NUMBER,
+ UPD_STATIC_LOCAL_NUMBER,
+ UPD_DECL_MARKED_OPENMP_THREADPRIVATE,
+ UPD_DECL_EXPORTED,
+ UPD_ADDED_ATTR_TO_RECORD
+};
+
+TypeIdx TypeIdxFromBuiltin(const BuiltinType *BT);
+
+template <typename IdxForTypeTy>
+TypeID MakeTypeID(ASTContext &Context, QualType T, IdxForTypeTy IdxForType) {
+ if (T.isNull())
+ return PREDEF_TYPE_NULL_ID;
+
+ unsigned FastQuals = T.getLocalFastQualifiers();
+ T.removeLocalFastQualifiers();
+
+ if (T.hasLocalNonFastQualifiers())
+ return IdxForType(T).asTypeID(FastQuals);
+
+ assert(!T.hasLocalQualifiers());
+
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(T.getTypePtr()))
+ return TypeIdxFromBuiltin(BT).asTypeID(FastQuals);
+
+ if (T == Context.AutoDeductTy)
+ return TypeIdx(PREDEF_TYPE_AUTO_DEDUCT).asTypeID(FastQuals);
+ if (T == Context.AutoRRefDeductTy)
+ return TypeIdx(PREDEF_TYPE_AUTO_RREF_DEDUCT).asTypeID(FastQuals);
+
+ return IdxForType(T).asTypeID(FastQuals);
+}
+
+unsigned ComputeHash(Selector Sel);
+
+/// \brief Retrieve the "definitive" declaration that provides all of the
+/// visible entries for the given declaration context, if there is one.
+///
+/// The "definitive" declaration is the only place where we need to look to
+/// find information about the declarations within the given declaration
+/// context. For example, C++ and Objective-C classes, C structs/unions, and
+/// Objective-C protocols, categories, and extensions are all defined in a
+/// single place in the source code, so they have definitive declarations
+/// associated with them. C++ namespaces, on the other hand, can have
+/// multiple definitions.
+const DeclContext *getDefinitiveDeclContext(const DeclContext *DC);
+
+/// \brief Determine whether the given declaration kind is redeclarable.
+bool isRedeclarableDeclKind(unsigned Kind);
+
+/// \brief Determine whether the given declaration needs an anonymous
+/// declaration number.
+bool needsAnonymousDeclarationNumber(const NamedDecl *D);
+
+/// \brief Visit each declaration within \c DC that needs an anonymous
+/// declaration number and call \p Visit with the declaration and its number.
+template<typename Fn> void numberAnonymousDeclsWithin(const DeclContext *DC,
+ Fn Visit) {
+ unsigned Index = 0;
+ for (Decl *LexicalD : DC->decls()) {
+ // For a friend decl, we care about the declaration within it, if any.
+ if (auto *FD = dyn_cast<FriendDecl>(LexicalD))
+ LexicalD = FD->getFriendDecl();
+
+ auto *ND = dyn_cast_or_null<NamedDecl>(LexicalD);
+ if (!ND || !needsAnonymousDeclarationNumber(ND))
+ continue;
+
+ Visit(ND, Index++);
+ }
+}
+
+} // namespace serialization
+
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp
new file mode 100644
index 0000000..a279475
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp
@@ -0,0 +1,8687 @@
+//===-- ASTReader.cpp - AST File Reader ----------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ASTReader class, which reads AST files.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Serialization/ASTReader.h"
+#include "ASTCommon.h"
+#include "ASTReaderInternals.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/Frontend/PCHContainerOperations.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/TypeLocVisitor.h"
+#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/SourceManagerInternals.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TargetOptions.h"
+#include "clang/Basic/Version.h"
+#include "clang/Basic/VersionTuple.h"
+#include "clang/Frontend/Utils.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/HeaderSearchOptions.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/PreprocessingRecord.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/PreprocessorOptions.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Serialization/ASTDeserializationListener.h"
+#include "clang/Serialization/GlobalModuleIndex.h"
+#include "clang/Serialization/ModuleManager.h"
+#include "clang/Serialization/SerializationDiagnostic.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Bitcode/BitstreamReader.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/SaveAndRestore.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cstdio>
+#include <iterator>
+#include <system_error>
+
+using namespace clang;
+using namespace clang::serialization;
+using namespace clang::serialization::reader;
+using llvm::BitstreamCursor;
+
+
+//===----------------------------------------------------------------------===//
+// ChainedASTReaderListener implementation
+//===----------------------------------------------------------------------===//
+
+bool
+ChainedASTReaderListener::ReadFullVersionInformation(StringRef FullVersion) {
+ return First->ReadFullVersionInformation(FullVersion) ||
+ Second->ReadFullVersionInformation(FullVersion);
+}
+void ChainedASTReaderListener::ReadModuleName(StringRef ModuleName) {
+ First->ReadModuleName(ModuleName);
+ Second->ReadModuleName(ModuleName);
+}
+void ChainedASTReaderListener::ReadModuleMapFile(StringRef ModuleMapPath) {
+ First->ReadModuleMapFile(ModuleMapPath);
+ Second->ReadModuleMapFile(ModuleMapPath);
+}
+bool
+ChainedASTReaderListener::ReadLanguageOptions(const LangOptions &LangOpts,
+ bool Complain,
+ bool AllowCompatibleDifferences) {
+ return First->ReadLanguageOptions(LangOpts, Complain,
+ AllowCompatibleDifferences) ||
+ Second->ReadLanguageOptions(LangOpts, Complain,
+ AllowCompatibleDifferences);
+}
+bool ChainedASTReaderListener::ReadTargetOptions(
+ const TargetOptions &TargetOpts, bool Complain,
+ bool AllowCompatibleDifferences) {
+ return First->ReadTargetOptions(TargetOpts, Complain,
+ AllowCompatibleDifferences) ||
+ Second->ReadTargetOptions(TargetOpts, Complain,
+ AllowCompatibleDifferences);
+}
+bool ChainedASTReaderListener::ReadDiagnosticOptions(
+ IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts, bool Complain) {
+ return First->ReadDiagnosticOptions(DiagOpts, Complain) ||
+ Second->ReadDiagnosticOptions(DiagOpts, Complain);
+}
+bool
+ChainedASTReaderListener::ReadFileSystemOptions(const FileSystemOptions &FSOpts,
+ bool Complain) {
+ return First->ReadFileSystemOptions(FSOpts, Complain) ||
+ Second->ReadFileSystemOptions(FSOpts, Complain);
+}
+
+bool ChainedASTReaderListener::ReadHeaderSearchOptions(
+ const HeaderSearchOptions &HSOpts, StringRef SpecificModuleCachePath,
+ bool Complain) {
+ return First->ReadHeaderSearchOptions(HSOpts, SpecificModuleCachePath,
+ Complain) ||
+ Second->ReadHeaderSearchOptions(HSOpts, SpecificModuleCachePath,
+ Complain);
+}
+bool ChainedASTReaderListener::ReadPreprocessorOptions(
+ const PreprocessorOptions &PPOpts, bool Complain,
+ std::string &SuggestedPredefines) {
+ return First->ReadPreprocessorOptions(PPOpts, Complain,
+ SuggestedPredefines) ||
+ Second->ReadPreprocessorOptions(PPOpts, Complain, SuggestedPredefines);
+}
+void ChainedASTReaderListener::ReadCounter(const serialization::ModuleFile &M,
+ unsigned Value) {
+ First->ReadCounter(M, Value);
+ Second->ReadCounter(M, Value);
+}
+bool ChainedASTReaderListener::needsInputFileVisitation() {
+ return First->needsInputFileVisitation() ||
+ Second->needsInputFileVisitation();
+}
+bool ChainedASTReaderListener::needsSystemInputFileVisitation() {
+ return First->needsSystemInputFileVisitation() ||
+ Second->needsSystemInputFileVisitation();
+}
+void ChainedASTReaderListener::visitModuleFile(StringRef Filename,
+ ModuleKind Kind) {
+ First->visitModuleFile(Filename, Kind);
+ Second->visitModuleFile(Filename, Kind);
+}
+bool ChainedASTReaderListener::visitInputFile(StringRef Filename,
+ bool isSystem,
+ bool isOverridden,
+ bool isExplicitModule) {
+ bool Continue = false;
+ if (First->needsInputFileVisitation() &&
+ (!isSystem || First->needsSystemInputFileVisitation()))
+ Continue |= First->visitInputFile(Filename, isSystem, isOverridden,
+ isExplicitModule);
+ if (Second->needsInputFileVisitation() &&
+ (!isSystem || Second->needsSystemInputFileVisitation()))
+ Continue |= Second->visitInputFile(Filename, isSystem, isOverridden,
+ isExplicitModule);
+ return Continue;
+}
+
+void ChainedASTReaderListener::readModuleFileExtension(
+ const ModuleFileExtensionMetadata &Metadata) {
+ First->readModuleFileExtension(Metadata);
+ Second->readModuleFileExtension(Metadata);
+}
+
+//===----------------------------------------------------------------------===//
+// PCH validator implementation
+//===----------------------------------------------------------------------===//
+
+ASTReaderListener::~ASTReaderListener() {}
+
+/// \brief Compare the given set of language options against an existing set of
+/// language options.
+///
+/// \param Diags If non-NULL, diagnostics will be emitted via this engine.
+/// \param AllowCompatibleDifferences If true, differences between compatible
+/// language options will be permitted.
+///
+/// \returns true if the languagae options mis-match, false otherwise.
+static bool checkLanguageOptions(const LangOptions &LangOpts,
+ const LangOptions &ExistingLangOpts,
+ DiagnosticsEngine *Diags,
+ bool AllowCompatibleDifferences = true) {
+#define LANGOPT(Name, Bits, Default, Description) \
+ if (ExistingLangOpts.Name != LangOpts.Name) { \
+ if (Diags) \
+ Diags->Report(diag::err_pch_langopt_mismatch) \
+ << Description << LangOpts.Name << ExistingLangOpts.Name; \
+ return true; \
+ }
+
+#define VALUE_LANGOPT(Name, Bits, Default, Description) \
+ if (ExistingLangOpts.Name != LangOpts.Name) { \
+ if (Diags) \
+ Diags->Report(diag::err_pch_langopt_value_mismatch) \
+ << Description; \
+ return true; \
+ }
+
+#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
+ if (ExistingLangOpts.get##Name() != LangOpts.get##Name()) { \
+ if (Diags) \
+ Diags->Report(diag::err_pch_langopt_value_mismatch) \
+ << Description; \
+ return true; \
+ }
+
+#define COMPATIBLE_LANGOPT(Name, Bits, Default, Description) \
+ if (!AllowCompatibleDifferences) \
+ LANGOPT(Name, Bits, Default, Description)
+
+#define COMPATIBLE_ENUM_LANGOPT(Name, Bits, Default, Description) \
+ if (!AllowCompatibleDifferences) \
+ ENUM_LANGOPT(Name, Bits, Default, Description)
+
+#define BENIGN_LANGOPT(Name, Bits, Default, Description)
+#define BENIGN_ENUM_LANGOPT(Name, Type, Bits, Default, Description)
+#include "clang/Basic/LangOptions.def"
+
+ if (ExistingLangOpts.ModuleFeatures != LangOpts.ModuleFeatures) {
+ if (Diags)
+ Diags->Report(diag::err_pch_langopt_value_mismatch) << "module features";
+ return true;
+ }
+
+ if (ExistingLangOpts.ObjCRuntime != LangOpts.ObjCRuntime) {
+ if (Diags)
+ Diags->Report(diag::err_pch_langopt_value_mismatch)
+ << "target Objective-C runtime";
+ return true;
+ }
+
+ if (ExistingLangOpts.CommentOpts.BlockCommandNames !=
+ LangOpts.CommentOpts.BlockCommandNames) {
+ if (Diags)
+ Diags->Report(diag::err_pch_langopt_value_mismatch)
+ << "block command names";
+ return true;
+ }
+
+ return false;
+}
+
+/// \brief Compare the given set of target options against an existing set of
+/// target options.
+///
+/// \param Diags If non-NULL, diagnostics will be emitted via this engine.
+///
+/// \returns true if the target options mis-match, false otherwise.
+static bool checkTargetOptions(const TargetOptions &TargetOpts,
+ const TargetOptions &ExistingTargetOpts,
+ DiagnosticsEngine *Diags,
+ bool AllowCompatibleDifferences = true) {
+#define CHECK_TARGET_OPT(Field, Name) \
+ if (TargetOpts.Field != ExistingTargetOpts.Field) { \
+ if (Diags) \
+ Diags->Report(diag::err_pch_targetopt_mismatch) \
+ << Name << TargetOpts.Field << ExistingTargetOpts.Field; \
+ return true; \
+ }
+
+ // The triple and ABI must match exactly.
+ CHECK_TARGET_OPT(Triple, "target");
+ CHECK_TARGET_OPT(ABI, "target ABI");
+
+ // We can tolerate different CPUs in many cases, notably when one CPU
+ // supports a strict superset of another. When allowing compatible
+ // differences skip this check.
+ if (!AllowCompatibleDifferences)
+ CHECK_TARGET_OPT(CPU, "target CPU");
+
+#undef CHECK_TARGET_OPT
+
+ // Compare feature sets.
+ SmallVector<StringRef, 4> ExistingFeatures(
+ ExistingTargetOpts.FeaturesAsWritten.begin(),
+ ExistingTargetOpts.FeaturesAsWritten.end());
+ SmallVector<StringRef, 4> ReadFeatures(TargetOpts.FeaturesAsWritten.begin(),
+ TargetOpts.FeaturesAsWritten.end());
+ std::sort(ExistingFeatures.begin(), ExistingFeatures.end());
+ std::sort(ReadFeatures.begin(), ReadFeatures.end());
+
+ // We compute the set difference in both directions explicitly so that we can
+ // diagnose the differences differently.
+ SmallVector<StringRef, 4> UnmatchedExistingFeatures, UnmatchedReadFeatures;
+ std::set_difference(
+ ExistingFeatures.begin(), ExistingFeatures.end(), ReadFeatures.begin(),
+ ReadFeatures.end(), std::back_inserter(UnmatchedExistingFeatures));
+ std::set_difference(ReadFeatures.begin(), ReadFeatures.end(),
+ ExistingFeatures.begin(), ExistingFeatures.end(),
+ std::back_inserter(UnmatchedReadFeatures));
+
+ // If we are allowing compatible differences and the read feature set is
+ // a strict subset of the existing feature set, there is nothing to diagnose.
+ if (AllowCompatibleDifferences && UnmatchedReadFeatures.empty())
+ return false;
+
+ if (Diags) {
+ for (StringRef Feature : UnmatchedReadFeatures)
+ Diags->Report(diag::err_pch_targetopt_feature_mismatch)
+ << /* is-existing-feature */ false << Feature;
+ for (StringRef Feature : UnmatchedExistingFeatures)
+ Diags->Report(diag::err_pch_targetopt_feature_mismatch)
+ << /* is-existing-feature */ true << Feature;
+ }
+
+ return !UnmatchedReadFeatures.empty() || !UnmatchedExistingFeatures.empty();
+}
+
+bool
+PCHValidator::ReadLanguageOptions(const LangOptions &LangOpts,
+ bool Complain,
+ bool AllowCompatibleDifferences) {
+ const LangOptions &ExistingLangOpts = PP.getLangOpts();
+ return checkLanguageOptions(LangOpts, ExistingLangOpts,
+ Complain ? &Reader.Diags : nullptr,
+ AllowCompatibleDifferences);
+}
+
+bool PCHValidator::ReadTargetOptions(const TargetOptions &TargetOpts,
+ bool Complain,
+ bool AllowCompatibleDifferences) {
+ const TargetOptions &ExistingTargetOpts = PP.getTargetInfo().getTargetOpts();
+ return checkTargetOptions(TargetOpts, ExistingTargetOpts,
+ Complain ? &Reader.Diags : nullptr,
+ AllowCompatibleDifferences);
+}
+
+namespace {
+ typedef llvm::StringMap<std::pair<StringRef, bool /*IsUndef*/> >
+ MacroDefinitionsMap;
+ typedef llvm::DenseMap<DeclarationName, SmallVector<NamedDecl *, 8> >
+ DeclsMap;
+}
+
+static bool checkDiagnosticGroupMappings(DiagnosticsEngine &StoredDiags,
+ DiagnosticsEngine &Diags,
+ bool Complain) {
+ typedef DiagnosticsEngine::Level Level;
+
+ // Check current mappings for new -Werror mappings, and the stored mappings
+ // for cases that were explicitly mapped to *not* be errors that are now
+ // errors because of options like -Werror.
+ DiagnosticsEngine *MappingSources[] = { &Diags, &StoredDiags };
+
+ for (DiagnosticsEngine *MappingSource : MappingSources) {
+ for (auto DiagIDMappingPair : MappingSource->getDiagnosticMappings()) {
+ diag::kind DiagID = DiagIDMappingPair.first;
+ Level CurLevel = Diags.getDiagnosticLevel(DiagID, SourceLocation());
+ if (CurLevel < DiagnosticsEngine::Error)
+ continue; // not significant
+ Level StoredLevel =
+ StoredDiags.getDiagnosticLevel(DiagID, SourceLocation());
+ if (StoredLevel < DiagnosticsEngine::Error) {
+ if (Complain)
+ Diags.Report(diag::err_pch_diagopt_mismatch) << "-Werror=" +
+ Diags.getDiagnosticIDs()->getWarningOptionForDiag(DiagID).str();
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static bool isExtHandlingFromDiagsError(DiagnosticsEngine &Diags) {
+ diag::Severity Ext = Diags.getExtensionHandlingBehavior();
+ if (Ext == diag::Severity::Warning && Diags.getWarningsAsErrors())
+ return true;
+ return Ext >= diag::Severity::Error;
+}
+
+static bool checkDiagnosticMappings(DiagnosticsEngine &StoredDiags,
+ DiagnosticsEngine &Diags,
+ bool IsSystem, bool Complain) {
+ // Top-level options
+ if (IsSystem) {
+ if (Diags.getSuppressSystemWarnings())
+ return false;
+ // If -Wsystem-headers was not enabled before, be conservative
+ if (StoredDiags.getSuppressSystemWarnings()) {
+ if (Complain)
+ Diags.Report(diag::err_pch_diagopt_mismatch) << "-Wsystem-headers";
+ return true;
+ }
+ }
+
+ if (Diags.getWarningsAsErrors() && !StoredDiags.getWarningsAsErrors()) {
+ if (Complain)
+ Diags.Report(diag::err_pch_diagopt_mismatch) << "-Werror";
+ return true;
+ }
+
+ if (Diags.getWarningsAsErrors() && Diags.getEnableAllWarnings() &&
+ !StoredDiags.getEnableAllWarnings()) {
+ if (Complain)
+ Diags.Report(diag::err_pch_diagopt_mismatch) << "-Weverything -Werror";
+ return true;
+ }
+
+ if (isExtHandlingFromDiagsError(Diags) &&
+ !isExtHandlingFromDiagsError(StoredDiags)) {
+ if (Complain)
+ Diags.Report(diag::err_pch_diagopt_mismatch) << "-pedantic-errors";
+ return true;
+ }
+
+ return checkDiagnosticGroupMappings(StoredDiags, Diags, Complain);
+}
+
+bool PCHValidator::ReadDiagnosticOptions(
+ IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts, bool Complain) {
+ DiagnosticsEngine &ExistingDiags = PP.getDiagnostics();
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagIDs(ExistingDiags.getDiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ new DiagnosticsEngine(DiagIDs, DiagOpts.get()));
+ // This should never fail, because we would have processed these options
+ // before writing them to an ASTFile.
+ ProcessWarningOptions(*Diags, *DiagOpts, /*Report*/false);
+
+ ModuleManager &ModuleMgr = Reader.getModuleManager();
+ assert(ModuleMgr.size() >= 1 && "what ASTFile is this then");
+
+ // If the original import came from a file explicitly generated by the user,
+ // don't check the diagnostic mappings.
+ // FIXME: currently this is approximated by checking whether this is not a
+ // module import of an implicitly-loaded module file.
+ // Note: ModuleMgr.rbegin() may not be the current module, but it must be in
+ // the transitive closure of its imports, since unrelated modules cannot be
+ // imported until after this module finishes validation.
+ ModuleFile *TopImport = *ModuleMgr.rbegin();
+ while (!TopImport->ImportedBy.empty())
+ TopImport = TopImport->ImportedBy[0];
+ if (TopImport->Kind != MK_ImplicitModule)
+ return false;
+
+ StringRef ModuleName = TopImport->ModuleName;
+ assert(!ModuleName.empty() && "diagnostic options read before module name");
+
+ Module *M = PP.getHeaderSearchInfo().lookupModule(ModuleName);
+ assert(M && "missing module");
+
+ // FIXME: if the diagnostics are incompatible, save a DiagnosticOptions that
+ // contains the union of their flags.
+ return checkDiagnosticMappings(*Diags, ExistingDiags, M->IsSystem, Complain);
+}
+
+/// \brief Collect the macro definitions provided by the given preprocessor
+/// options.
+static void
+collectMacroDefinitions(const PreprocessorOptions &PPOpts,
+ MacroDefinitionsMap &Macros,
+ SmallVectorImpl<StringRef> *MacroNames = nullptr) {
+ for (unsigned I = 0, N = PPOpts.Macros.size(); I != N; ++I) {
+ StringRef Macro = PPOpts.Macros[I].first;
+ bool IsUndef = PPOpts.Macros[I].second;
+
+ std::pair<StringRef, StringRef> MacroPair = Macro.split('=');
+ StringRef MacroName = MacroPair.first;
+ StringRef MacroBody = MacroPair.second;
+
+ // For an #undef'd macro, we only care about the name.
+ if (IsUndef) {
+ if (MacroNames && !Macros.count(MacroName))
+ MacroNames->push_back(MacroName);
+
+ Macros[MacroName] = std::make_pair("", true);
+ continue;
+ }
+
+ // For a #define'd macro, figure out the actual definition.
+ if (MacroName.size() == Macro.size())
+ MacroBody = "1";
+ else {
+ // Note: GCC drops anything following an end-of-line character.
+ StringRef::size_type End = MacroBody.find_first_of("\n\r");
+ MacroBody = MacroBody.substr(0, End);
+ }
+
+ if (MacroNames && !Macros.count(MacroName))
+ MacroNames->push_back(MacroName);
+ Macros[MacroName] = std::make_pair(MacroBody, false);
+ }
+}
+
+/// \brief Check the preprocessor options deserialized from the control block
+/// against the preprocessor options in an existing preprocessor.
+///
+/// \param Diags If non-null, produce diagnostics for any mismatches incurred.
+static bool checkPreprocessorOptions(const PreprocessorOptions &PPOpts,
+ const PreprocessorOptions &ExistingPPOpts,
+ DiagnosticsEngine *Diags,
+ FileManager &FileMgr,
+ std::string &SuggestedPredefines,
+ const LangOptions &LangOpts) {
+ // Check macro definitions.
+ MacroDefinitionsMap ASTFileMacros;
+ collectMacroDefinitions(PPOpts, ASTFileMacros);
+ MacroDefinitionsMap ExistingMacros;
+ SmallVector<StringRef, 4> ExistingMacroNames;
+ collectMacroDefinitions(ExistingPPOpts, ExistingMacros, &ExistingMacroNames);
+
+ for (unsigned I = 0, N = ExistingMacroNames.size(); I != N; ++I) {
+ // Dig out the macro definition in the existing preprocessor options.
+ StringRef MacroName = ExistingMacroNames[I];
+ std::pair<StringRef, bool> Existing = ExistingMacros[MacroName];
+
+ // Check whether we know anything about this macro name or not.
+ llvm::StringMap<std::pair<StringRef, bool /*IsUndef*/> >::iterator Known
+ = ASTFileMacros.find(MacroName);
+ if (Known == ASTFileMacros.end()) {
+ // FIXME: Check whether this identifier was referenced anywhere in the
+ // AST file. If so, we should reject the AST file. Unfortunately, this
+ // information isn't in the control block. What shall we do about it?
+
+ if (Existing.second) {
+ SuggestedPredefines += "#undef ";
+ SuggestedPredefines += MacroName.str();
+ SuggestedPredefines += '\n';
+ } else {
+ SuggestedPredefines += "#define ";
+ SuggestedPredefines += MacroName.str();
+ SuggestedPredefines += ' ';
+ SuggestedPredefines += Existing.first.str();
+ SuggestedPredefines += '\n';
+ }
+ continue;
+ }
+
+ // If the macro was defined in one but undef'd in the other, we have a
+ // conflict.
+ if (Existing.second != Known->second.second) {
+ if (Diags) {
+ Diags->Report(diag::err_pch_macro_def_undef)
+ << MacroName << Known->second.second;
+ }
+ return true;
+ }
+
+ // If the macro was #undef'd in both, or if the macro bodies are identical,
+ // it's fine.
+ if (Existing.second || Existing.first == Known->second.first)
+ continue;
+
+ // The macro bodies differ; complain.
+ if (Diags) {
+ Diags->Report(diag::err_pch_macro_def_conflict)
+ << MacroName << Known->second.first << Existing.first;
+ }
+ return true;
+ }
+
+ // Check whether we're using predefines.
+ if (PPOpts.UsePredefines != ExistingPPOpts.UsePredefines) {
+ if (Diags) {
+ Diags->Report(diag::err_pch_undef) << ExistingPPOpts.UsePredefines;
+ }
+ return true;
+ }
+
+ // Detailed record is important since it is used for the module cache hash.
+ if (LangOpts.Modules &&
+ PPOpts.DetailedRecord != ExistingPPOpts.DetailedRecord) {
+ if (Diags) {
+ Diags->Report(diag::err_pch_pp_detailed_record) << PPOpts.DetailedRecord;
+ }
+ return true;
+ }
+
+ // Compute the #include and #include_macros lines we need.
+ for (unsigned I = 0, N = ExistingPPOpts.Includes.size(); I != N; ++I) {
+ StringRef File = ExistingPPOpts.Includes[I];
+ if (File == ExistingPPOpts.ImplicitPCHInclude)
+ continue;
+
+ if (std::find(PPOpts.Includes.begin(), PPOpts.Includes.end(), File)
+ != PPOpts.Includes.end())
+ continue;
+
+ SuggestedPredefines += "#include \"";
+ SuggestedPredefines += File;
+ SuggestedPredefines += "\"\n";
+ }
+
+ for (unsigned I = 0, N = ExistingPPOpts.MacroIncludes.size(); I != N; ++I) {
+ StringRef File = ExistingPPOpts.MacroIncludes[I];
+ if (std::find(PPOpts.MacroIncludes.begin(), PPOpts.MacroIncludes.end(),
+ File)
+ != PPOpts.MacroIncludes.end())
+ continue;
+
+ SuggestedPredefines += "#__include_macros \"";
+ SuggestedPredefines += File;
+ SuggestedPredefines += "\"\n##\n";
+ }
+
+ return false;
+}
+
+bool PCHValidator::ReadPreprocessorOptions(const PreprocessorOptions &PPOpts,
+ bool Complain,
+ std::string &SuggestedPredefines) {
+ const PreprocessorOptions &ExistingPPOpts = PP.getPreprocessorOpts();
+
+ return checkPreprocessorOptions(PPOpts, ExistingPPOpts,
+ Complain? &Reader.Diags : nullptr,
+ PP.getFileManager(),
+ SuggestedPredefines,
+ PP.getLangOpts());
+}
+
+/// Check the header search options deserialized from the control block
+/// against the header search options in an existing preprocessor.
+///
+/// \param Diags If non-null, produce diagnostics for any mismatches incurred.
+static bool checkHeaderSearchOptions(const HeaderSearchOptions &HSOpts,
+ StringRef SpecificModuleCachePath,
+ StringRef ExistingModuleCachePath,
+ DiagnosticsEngine *Diags,
+ const LangOptions &LangOpts) {
+ if (LangOpts.Modules) {
+ if (SpecificModuleCachePath != ExistingModuleCachePath) {
+ if (Diags)
+ Diags->Report(diag::err_pch_modulecache_mismatch)
+ << SpecificModuleCachePath << ExistingModuleCachePath;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool PCHValidator::ReadHeaderSearchOptions(const HeaderSearchOptions &HSOpts,
+ StringRef SpecificModuleCachePath,
+ bool Complain) {
+ return checkHeaderSearchOptions(HSOpts, SpecificModuleCachePath,
+ PP.getHeaderSearchInfo().getModuleCachePath(),
+ Complain ? &Reader.Diags : nullptr,
+ PP.getLangOpts());
+}
+
+void PCHValidator::ReadCounter(const ModuleFile &M, unsigned Value) {
+ PP.setCounterValue(Value);
+}
+
+//===----------------------------------------------------------------------===//
+// AST reader implementation
+//===----------------------------------------------------------------------===//
+
+void ASTReader::setDeserializationListener(ASTDeserializationListener *Listener,
+ bool TakeOwnership) {
+ DeserializationListener = Listener;
+ OwnsDeserializationListener = TakeOwnership;
+}
+
+
+
+unsigned ASTSelectorLookupTrait::ComputeHash(Selector Sel) {
+ return serialization::ComputeHash(Sel);
+}
+
+
+std::pair<unsigned, unsigned>
+ASTSelectorLookupTrait::ReadKeyDataLength(const unsigned char*& d) {
+ using namespace llvm::support;
+ unsigned KeyLen = endian::readNext<uint16_t, little, unaligned>(d);
+ unsigned DataLen = endian::readNext<uint16_t, little, unaligned>(d);
+ return std::make_pair(KeyLen, DataLen);
+}
+
+ASTSelectorLookupTrait::internal_key_type
+ASTSelectorLookupTrait::ReadKey(const unsigned char* d, unsigned) {
+ using namespace llvm::support;
+ SelectorTable &SelTable = Reader.getContext().Selectors;
+ unsigned N = endian::readNext<uint16_t, little, unaligned>(d);
+ IdentifierInfo *FirstII = Reader.getLocalIdentifier(
+ F, endian::readNext<uint32_t, little, unaligned>(d));
+ if (N == 0)
+ return SelTable.getNullarySelector(FirstII);
+ else if (N == 1)
+ return SelTable.getUnarySelector(FirstII);
+
+ SmallVector<IdentifierInfo *, 16> Args;
+ Args.push_back(FirstII);
+ for (unsigned I = 1; I != N; ++I)
+ Args.push_back(Reader.getLocalIdentifier(
+ F, endian::readNext<uint32_t, little, unaligned>(d)));
+
+ return SelTable.getSelector(N, Args.data());
+}
+
+ASTSelectorLookupTrait::data_type
+ASTSelectorLookupTrait::ReadData(Selector, const unsigned char* d,
+ unsigned DataLen) {
+ using namespace llvm::support;
+
+ data_type Result;
+
+ Result.ID = Reader.getGlobalSelectorID(
+ F, endian::readNext<uint32_t, little, unaligned>(d));
+ unsigned FullInstanceBits = endian::readNext<uint16_t, little, unaligned>(d);
+ unsigned FullFactoryBits = endian::readNext<uint16_t, little, unaligned>(d);
+ Result.InstanceBits = FullInstanceBits & 0x3;
+ Result.InstanceHasMoreThanOneDecl = (FullInstanceBits >> 2) & 0x1;
+ Result.FactoryBits = FullFactoryBits & 0x3;
+ Result.FactoryHasMoreThanOneDecl = (FullFactoryBits >> 2) & 0x1;
+ unsigned NumInstanceMethods = FullInstanceBits >> 3;
+ unsigned NumFactoryMethods = FullFactoryBits >> 3;
+
+ // Load instance methods
+ for (unsigned I = 0; I != NumInstanceMethods; ++I) {
+ if (ObjCMethodDecl *Method = Reader.GetLocalDeclAs<ObjCMethodDecl>(
+ F, endian::readNext<uint32_t, little, unaligned>(d)))
+ Result.Instance.push_back(Method);
+ }
+
+ // Load factory methods
+ for (unsigned I = 0; I != NumFactoryMethods; ++I) {
+ if (ObjCMethodDecl *Method = Reader.GetLocalDeclAs<ObjCMethodDecl>(
+ F, endian::readNext<uint32_t, little, unaligned>(d)))
+ Result.Factory.push_back(Method);
+ }
+
+ return Result;
+}
+
+unsigned ASTIdentifierLookupTraitBase::ComputeHash(const internal_key_type& a) {
+ return llvm::HashString(a);
+}
+
+std::pair<unsigned, unsigned>
+ASTIdentifierLookupTraitBase::ReadKeyDataLength(const unsigned char*& d) {
+ using namespace llvm::support;
+ unsigned DataLen = endian::readNext<uint16_t, little, unaligned>(d);
+ unsigned KeyLen = endian::readNext<uint16_t, little, unaligned>(d);
+ return std::make_pair(KeyLen, DataLen);
+}
+
+ASTIdentifierLookupTraitBase::internal_key_type
+ASTIdentifierLookupTraitBase::ReadKey(const unsigned char* d, unsigned n) {
+ assert(n >= 2 && d[n-1] == '\0');
+ return StringRef((const char*) d, n-1);
+}
+
+/// \brief Whether the given identifier is "interesting".
+static bool isInterestingIdentifier(ASTReader &Reader, IdentifierInfo &II,
+ bool IsModule) {
+ return II.hadMacroDefinition() ||
+ II.isPoisoned() ||
+ (IsModule ? II.hasRevertedBuiltin() : II.getObjCOrBuiltinID()) ||
+ II.hasRevertedTokenIDToIdentifier() ||
+ (!(IsModule && Reader.getContext().getLangOpts().CPlusPlus) &&
+ II.getFETokenInfo<void>());
+}
+
+static bool readBit(unsigned &Bits) {
+ bool Value = Bits & 0x1;
+ Bits >>= 1;
+ return Value;
+}
+
+IdentID ASTIdentifierLookupTrait::ReadIdentifierID(const unsigned char *d) {
+ using namespace llvm::support;
+ unsigned RawID = endian::readNext<uint32_t, little, unaligned>(d);
+ return Reader.getGlobalIdentifierID(F, RawID >> 1);
+}
+
+IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k,
+ const unsigned char* d,
+ unsigned DataLen) {
+ using namespace llvm::support;
+ unsigned RawID = endian::readNext<uint32_t, little, unaligned>(d);
+ bool IsInteresting = RawID & 0x01;
+
+ // Wipe out the "is interesting" bit.
+ RawID = RawID >> 1;
+
+ // Build the IdentifierInfo and link the identifier ID with it.
+ IdentifierInfo *II = KnownII;
+ if (!II) {
+ II = &Reader.getIdentifierTable().getOwn(k);
+ KnownII = II;
+ }
+ if (!II->isFromAST()) {
+ II->setIsFromAST();
+ bool IsModule = Reader.PP.getCurrentModule() != nullptr;
+ if (isInterestingIdentifier(Reader, *II, IsModule))
+ II->setChangedSinceDeserialization();
+ }
+ Reader.markIdentifierUpToDate(II);
+
+ IdentID ID = Reader.getGlobalIdentifierID(F, RawID);
+ if (!IsInteresting) {
+ // For uninteresting identifiers, there's nothing else to do. Just notify
+ // the reader that we've finished loading this identifier.
+ Reader.SetIdentifierInfo(ID, II);
+ return II;
+ }
+
+ unsigned ObjCOrBuiltinID = endian::readNext<uint16_t, little, unaligned>(d);
+ unsigned Bits = endian::readNext<uint16_t, little, unaligned>(d);
+ bool CPlusPlusOperatorKeyword = readBit(Bits);
+ bool HasRevertedTokenIDToIdentifier = readBit(Bits);
+ bool HasRevertedBuiltin = readBit(Bits);
+ bool Poisoned = readBit(Bits);
+ bool ExtensionToken = readBit(Bits);
+ bool HadMacroDefinition = readBit(Bits);
+
+ assert(Bits == 0 && "Extra bits in the identifier?");
+ DataLen -= 8;
+
+ // Set or check the various bits in the IdentifierInfo structure.
+ // Token IDs are read-only.
+ if (HasRevertedTokenIDToIdentifier && II->getTokenID() != tok::identifier)
+ II->revertTokenIDToIdentifier();
+ if (!F.isModule())
+ II->setObjCOrBuiltinID(ObjCOrBuiltinID);
+ else if (HasRevertedBuiltin && II->getBuiltinID()) {
+ II->revertBuiltin();
+ assert((II->hasRevertedBuiltin() ||
+ II->getObjCOrBuiltinID() == ObjCOrBuiltinID) &&
+ "Incorrect ObjC keyword or builtin ID");
+ }
+ assert(II->isExtensionToken() == ExtensionToken &&
+ "Incorrect extension token flag");
+ (void)ExtensionToken;
+ if (Poisoned)
+ II->setIsPoisoned(true);
+ assert(II->isCPlusPlusOperatorKeyword() == CPlusPlusOperatorKeyword &&
+ "Incorrect C++ operator keyword flag");
+ (void)CPlusPlusOperatorKeyword;
+
+ // If this identifier is a macro, deserialize the macro
+ // definition.
+ if (HadMacroDefinition) {
+ uint32_t MacroDirectivesOffset =
+ endian::readNext<uint32_t, little, unaligned>(d);
+ DataLen -= 4;
+
+ Reader.addPendingMacro(II, &F, MacroDirectivesOffset);
+ }
+
+ Reader.SetIdentifierInfo(ID, II);
+
+ // Read all of the declarations visible at global scope with this
+ // name.
+ if (DataLen > 0) {
+ SmallVector<uint32_t, 4> DeclIDs;
+ for (; DataLen > 0; DataLen -= 4)
+ DeclIDs.push_back(Reader.getGlobalDeclID(
+ F, endian::readNext<uint32_t, little, unaligned>(d)));
+ Reader.SetGloballyVisibleDecls(II, DeclIDs);
+ }
+
+ return II;
+}
+
+DeclarationNameKey::DeclarationNameKey(DeclarationName Name)
+ : Kind(Name.getNameKind()) {
+ switch (Kind) {
+ case DeclarationName::Identifier:
+ Data = (uint64_t)Name.getAsIdentifierInfo();
+ break;
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ Data = (uint64_t)Name.getObjCSelector().getAsOpaquePtr();
+ break;
+ case DeclarationName::CXXOperatorName:
+ Data = Name.getCXXOverloadedOperator();
+ break;
+ case DeclarationName::CXXLiteralOperatorName:
+ Data = (uint64_t)Name.getCXXLiteralIdentifier();
+ break;
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ case DeclarationName::CXXUsingDirective:
+ Data = 0;
+ break;
+ }
+}
+
+unsigned DeclarationNameKey::getHash() const {
+ llvm::FoldingSetNodeID ID;
+ ID.AddInteger(Kind);
+
+ switch (Kind) {
+ case DeclarationName::Identifier:
+ case DeclarationName::CXXLiteralOperatorName:
+ ID.AddString(((IdentifierInfo*)Data)->getName());
+ break;
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ ID.AddInteger(serialization::ComputeHash(Selector(Data)));
+ break;
+ case DeclarationName::CXXOperatorName:
+ ID.AddInteger((OverloadedOperatorKind)Data);
+ break;
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ case DeclarationName::CXXUsingDirective:
+ break;
+ }
+
+ return ID.ComputeHash();
+}
+
+ModuleFile *
+ASTDeclContextNameLookupTrait::ReadFileRef(const unsigned char *&d) {
+ using namespace llvm::support;
+ uint32_t ModuleFileID = endian::readNext<uint32_t, little, unaligned>(d);
+ return Reader.getLocalModuleFile(F, ModuleFileID);
+}
+
+std::pair<unsigned, unsigned>
+ASTDeclContextNameLookupTrait::ReadKeyDataLength(const unsigned char *&d) {
+ using namespace llvm::support;
+ unsigned KeyLen = endian::readNext<uint16_t, little, unaligned>(d);
+ unsigned DataLen = endian::readNext<uint16_t, little, unaligned>(d);
+ return std::make_pair(KeyLen, DataLen);
+}
+
+ASTDeclContextNameLookupTrait::internal_key_type
+ASTDeclContextNameLookupTrait::ReadKey(const unsigned char *d, unsigned) {
+ using namespace llvm::support;
+
+ auto Kind = (DeclarationName::NameKind)*d++;
+ uint64_t Data;
+ switch (Kind) {
+ case DeclarationName::Identifier:
+ Data = (uint64_t)Reader.getLocalIdentifier(
+ F, endian::readNext<uint32_t, little, unaligned>(d));
+ break;
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ Data =
+ (uint64_t)Reader.getLocalSelector(
+ F, endian::readNext<uint32_t, little, unaligned>(
+ d)).getAsOpaquePtr();
+ break;
+ case DeclarationName::CXXOperatorName:
+ Data = *d++; // OverloadedOperatorKind
+ break;
+ case DeclarationName::CXXLiteralOperatorName:
+ Data = (uint64_t)Reader.getLocalIdentifier(
+ F, endian::readNext<uint32_t, little, unaligned>(d));
+ break;
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ case DeclarationName::CXXUsingDirective:
+ Data = 0;
+ break;
+ }
+
+ return DeclarationNameKey(Kind, Data);
+}
+
+void ASTDeclContextNameLookupTrait::ReadDataInto(internal_key_type,
+ const unsigned char *d,
+ unsigned DataLen,
+ data_type_builder &Val) {
+ using namespace llvm::support;
+ for (unsigned NumDecls = DataLen / 4; NumDecls; --NumDecls) {
+ uint32_t LocalID = endian::readNext<uint32_t, little, unaligned>(d);
+ Val.insert(Reader.getGlobalDeclID(F, LocalID));
+ }
+}
+
+bool ASTReader::ReadLexicalDeclContextStorage(ModuleFile &M,
+ BitstreamCursor &Cursor,
+ uint64_t Offset,
+ DeclContext *DC) {
+ assert(Offset != 0);
+
+ SavedStreamPosition SavedPosition(Cursor);
+ Cursor.JumpToBit(Offset);
+
+ RecordData Record;
+ StringRef Blob;
+ unsigned Code = Cursor.ReadCode();
+ unsigned RecCode = Cursor.readRecord(Code, Record, &Blob);
+ if (RecCode != DECL_CONTEXT_LEXICAL) {
+ Error("Expected lexical block");
+ return true;
+ }
+
+ assert(!isa<TranslationUnitDecl>(DC) &&
+ "expected a TU_UPDATE_LEXICAL record for TU");
+ // If we are handling a C++ class template instantiation, we can see multiple
+ // lexical updates for the same record. It's important that we select only one
+ // of them, so that field numbering works properly. Just pick the first one we
+ // see.
+ auto &Lex = LexicalDecls[DC];
+ if (!Lex.first) {
+ Lex = std::make_pair(
+ &M, llvm::makeArrayRef(
+ reinterpret_cast<const llvm::support::unaligned_uint32_t *>(
+ Blob.data()),
+ Blob.size() / 4));
+ }
+ DC->setHasExternalLexicalStorage(true);
+ return false;
+}
+
+bool ASTReader::ReadVisibleDeclContextStorage(ModuleFile &M,
+ BitstreamCursor &Cursor,
+ uint64_t Offset,
+ DeclID ID) {
+ assert(Offset != 0);
+
+ SavedStreamPosition SavedPosition(Cursor);
+ Cursor.JumpToBit(Offset);
+
+ RecordData Record;
+ StringRef Blob;
+ unsigned Code = Cursor.ReadCode();
+ unsigned RecCode = Cursor.readRecord(Code, Record, &Blob);
+ if (RecCode != DECL_CONTEXT_VISIBLE) {
+ Error("Expected visible lookup table block");
+ return true;
+ }
+
+ // We can't safely determine the primary context yet, so delay attaching the
+ // lookup table until we're done with recursive deserialization.
+ auto *Data = (const unsigned char*)Blob.data();
+ PendingVisibleUpdates[ID].push_back(PendingVisibleUpdate{&M, Data});
+ return false;
+}
+
+void ASTReader::Error(StringRef Msg) {
+ Error(diag::err_fe_pch_malformed, Msg);
+ if (Context.getLangOpts().Modules && !Diags.isDiagnosticInFlight() &&
+ !PP.getHeaderSearchInfo().getModuleCachePath().empty()) {
+ Diag(diag::note_module_cache_path)
+ << PP.getHeaderSearchInfo().getModuleCachePath();
+ }
+}
+
+void ASTReader::Error(unsigned DiagID,
+ StringRef Arg1, StringRef Arg2) {
+ if (Diags.isDiagnosticInFlight())
+ Diags.SetDelayedDiagnostic(DiagID, Arg1, Arg2);
+ else
+ Diag(DiagID) << Arg1 << Arg2;
+}
+
+//===----------------------------------------------------------------------===//
+// Source Manager Deserialization
+//===----------------------------------------------------------------------===//
+
+/// \brief Read the line table in the source manager block.
+/// \returns true if there was an error.
+bool ASTReader::ParseLineTable(ModuleFile &F,
+ const RecordData &Record) {
+ unsigned Idx = 0;
+ LineTableInfo &LineTable = SourceMgr.getLineTable();
+
+ // Parse the file names
+ std::map<int, int> FileIDs;
+ for (unsigned I = 0; Record[Idx]; ++I) {
+ // Extract the file name
+ auto Filename = ReadPath(F, Record, Idx);
+ FileIDs[I] = LineTable.getLineTableFilenameID(Filename);
+ }
+ ++Idx;
+
+ // Parse the line entries
+ std::vector<LineEntry> Entries;
+ while (Idx < Record.size()) {
+ int FID = Record[Idx++];
+ assert(FID >= 0 && "Serialized line entries for non-local file.");
+ // Remap FileID from 1-based old view.
+ FID += F.SLocEntryBaseID - 1;
+
+ // Extract the line entries
+ unsigned NumEntries = Record[Idx++];
+ assert(NumEntries && "no line entries for file ID");
+ Entries.clear();
+ Entries.reserve(NumEntries);
+ for (unsigned I = 0; I != NumEntries; ++I) {
+ unsigned FileOffset = Record[Idx++];
+ unsigned LineNo = Record[Idx++];
+ int FilenameID = FileIDs[Record[Idx++]];
+ SrcMgr::CharacteristicKind FileKind
+ = (SrcMgr::CharacteristicKind)Record[Idx++];
+ unsigned IncludeOffset = Record[Idx++];
+ Entries.push_back(LineEntry::get(FileOffset, LineNo, FilenameID,
+ FileKind, IncludeOffset));
+ }
+ LineTable.AddEntry(FileID::get(FID), Entries);
+ }
+
+ return false;
+}
+
+/// \brief Read a source manager block
+bool ASTReader::ReadSourceManagerBlock(ModuleFile &F) {
+ using namespace SrcMgr;
+
+ BitstreamCursor &SLocEntryCursor = F.SLocEntryCursor;
+
+ // Set the source-location entry cursor to the current position in
+ // the stream. This cursor will be used to read the contents of the
+ // source manager block initially, and then lazily read
+ // source-location entries as needed.
+ SLocEntryCursor = F.Stream;
+
+ // The stream itself is going to skip over the source manager block.
+ if (F.Stream.SkipBlock()) {
+ Error("malformed block record in AST file");
+ return true;
+ }
+
+ // Enter the source manager block.
+ if (SLocEntryCursor.EnterSubBlock(SOURCE_MANAGER_BLOCK_ID)) {
+ Error("malformed source manager block record in AST file");
+ return true;
+ }
+
+ RecordData Record;
+ while (true) {
+ llvm::BitstreamEntry E = SLocEntryCursor.advanceSkippingSubblocks();
+
+ switch (E.Kind) {
+ case llvm::BitstreamEntry::SubBlock: // Handled for us already.
+ case llvm::BitstreamEntry::Error:
+ Error("malformed block record in AST file");
+ return true;
+ case llvm::BitstreamEntry::EndBlock:
+ return false;
+ case llvm::BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ // Read a record.
+ Record.clear();
+ StringRef Blob;
+ switch (SLocEntryCursor.readRecord(E.ID, Record, &Blob)) {
+ default: // Default behavior: ignore.
+ break;
+
+ case SM_SLOC_FILE_ENTRY:
+ case SM_SLOC_BUFFER_ENTRY:
+ case SM_SLOC_EXPANSION_ENTRY:
+ // Once we hit one of the source location entries, we're done.
+ return false;
+ }
+ }
+}
+
+/// \brief If a header file is not found at the path that we expect it to be
+/// and the PCH file was moved from its original location, try to resolve the
+/// file by assuming that header+PCH were moved together and the header is in
+/// the same place relative to the PCH.
+static std::string
+resolveFileRelativeToOriginalDir(const std::string &Filename,
+ const std::string &OriginalDir,
+ const std::string &CurrDir) {
+ assert(OriginalDir != CurrDir &&
+ "No point trying to resolve the file if the PCH dir didn't change");
+ using namespace llvm::sys;
+ SmallString<128> filePath(Filename);
+ fs::make_absolute(filePath);
+ assert(path::is_absolute(OriginalDir));
+ SmallString<128> currPCHPath(CurrDir);
+
+ path::const_iterator fileDirI = path::begin(path::parent_path(filePath)),
+ fileDirE = path::end(path::parent_path(filePath));
+ path::const_iterator origDirI = path::begin(OriginalDir),
+ origDirE = path::end(OriginalDir);
+ // Skip the common path components from filePath and OriginalDir.
+ while (fileDirI != fileDirE && origDirI != origDirE &&
+ *fileDirI == *origDirI) {
+ ++fileDirI;
+ ++origDirI;
+ }
+ for (; origDirI != origDirE; ++origDirI)
+ path::append(currPCHPath, "..");
+ path::append(currPCHPath, fileDirI, fileDirE);
+ path::append(currPCHPath, path::filename(Filename));
+ return currPCHPath.str();
+}
+
+bool ASTReader::ReadSLocEntry(int ID) {
+ if (ID == 0)
+ return false;
+
+ if (unsigned(-ID) - 2 >= getTotalNumSLocs() || ID > 0) {
+ Error("source location entry ID out-of-range for AST file");
+ return true;
+ }
+
+ ModuleFile *F = GlobalSLocEntryMap.find(-ID)->second;
+ F->SLocEntryCursor.JumpToBit(F->SLocEntryOffsets[ID - F->SLocEntryBaseID]);
+ BitstreamCursor &SLocEntryCursor = F->SLocEntryCursor;
+ unsigned BaseOffset = F->SLocEntryBaseOffset;
+
+ ++NumSLocEntriesRead;
+ llvm::BitstreamEntry Entry = SLocEntryCursor.advance();
+ if (Entry.Kind != llvm::BitstreamEntry::Record) {
+ Error("incorrectly-formatted source location entry in AST file");
+ return true;
+ }
+
+ RecordData Record;
+ StringRef Blob;
+ switch (SLocEntryCursor.readRecord(Entry.ID, Record, &Blob)) {
+ default:
+ Error("incorrectly-formatted source location entry in AST file");
+ return true;
+
+ case SM_SLOC_FILE_ENTRY: {
+ // We will detect whether a file changed and return 'Failure' for it, but
+ // we will also try to fail gracefully by setting up the SLocEntry.
+ unsigned InputID = Record[4];
+ InputFile IF = getInputFile(*F, InputID);
+ const FileEntry *File = IF.getFile();
+ bool OverriddenBuffer = IF.isOverridden();
+
+ // Note that we only check if a File was returned. If it was out-of-date
+ // we have complained but we will continue creating a FileID to recover
+ // gracefully.
+ if (!File)
+ return true;
+
+ SourceLocation IncludeLoc = ReadSourceLocation(*F, Record[1]);
+ if (IncludeLoc.isInvalid() && F->Kind != MK_MainFile) {
+ // This is the module's main file.
+ IncludeLoc = getImportLocation(F);
+ }
+ SrcMgr::CharacteristicKind
+ FileCharacter = (SrcMgr::CharacteristicKind)Record[2];
+ FileID FID = SourceMgr.createFileID(File, IncludeLoc, FileCharacter,
+ ID, BaseOffset + Record[0]);
+ SrcMgr::FileInfo &FileInfo =
+ const_cast<SrcMgr::FileInfo&>(SourceMgr.getSLocEntry(FID).getFile());
+ FileInfo.NumCreatedFIDs = Record[5];
+ if (Record[3])
+ FileInfo.setHasLineDirectives();
+
+ const DeclID *FirstDecl = F->FileSortedDecls + Record[6];
+ unsigned NumFileDecls = Record[7];
+ if (NumFileDecls) {
+ assert(F->FileSortedDecls && "FILE_SORTED_DECLS not encountered yet ?");
+ FileDeclIDs[FID] = FileDeclsInfo(F, llvm::makeArrayRef(FirstDecl,
+ NumFileDecls));
+ }
+
+ const SrcMgr::ContentCache *ContentCache
+ = SourceMgr.getOrCreateContentCache(File,
+ /*isSystemFile=*/FileCharacter != SrcMgr::C_User);
+ if (OverriddenBuffer && !ContentCache->BufferOverridden &&
+ ContentCache->ContentsEntry == ContentCache->OrigEntry &&
+ !ContentCache->getRawBuffer()) {
+ unsigned Code = SLocEntryCursor.ReadCode();
+ Record.clear();
+ unsigned RecCode = SLocEntryCursor.readRecord(Code, Record, &Blob);
+
+ if (RecCode != SM_SLOC_BUFFER_BLOB) {
+ Error("AST record has invalid code");
+ return true;
+ }
+
+ std::unique_ptr<llvm::MemoryBuffer> Buffer
+ = llvm::MemoryBuffer::getMemBuffer(Blob.drop_back(1), File->getName());
+ SourceMgr.overrideFileContents(File, std::move(Buffer));
+ }
+
+ break;
+ }
+
+ case SM_SLOC_BUFFER_ENTRY: {
+ const char *Name = Blob.data();
+ unsigned Offset = Record[0];
+ SrcMgr::CharacteristicKind
+ FileCharacter = (SrcMgr::CharacteristicKind)Record[2];
+ SourceLocation IncludeLoc = ReadSourceLocation(*F, Record[1]);
+ if (IncludeLoc.isInvalid() &&
+ (F->Kind == MK_ImplicitModule || F->Kind == MK_ExplicitModule)) {
+ IncludeLoc = getImportLocation(F);
+ }
+ unsigned Code = SLocEntryCursor.ReadCode();
+ Record.clear();
+ unsigned RecCode
+ = SLocEntryCursor.readRecord(Code, Record, &Blob);
+
+ if (RecCode != SM_SLOC_BUFFER_BLOB) {
+ Error("AST record has invalid code");
+ return true;
+ }
+
+ std::unique_ptr<llvm::MemoryBuffer> Buffer =
+ llvm::MemoryBuffer::getMemBuffer(Blob.drop_back(1), Name);
+ SourceMgr.createFileID(std::move(Buffer), FileCharacter, ID,
+ BaseOffset + Offset, IncludeLoc);
+ break;
+ }
+
+ case SM_SLOC_EXPANSION_ENTRY: {
+ SourceLocation SpellingLoc = ReadSourceLocation(*F, Record[1]);
+ SourceMgr.createExpansionLoc(SpellingLoc,
+ ReadSourceLocation(*F, Record[2]),
+ ReadSourceLocation(*F, Record[3]),
+ Record[4],
+ ID,
+ BaseOffset + Record[0]);
+ break;
+ }
+ }
+
+ return false;
+}
+
+std::pair<SourceLocation, StringRef> ASTReader::getModuleImportLoc(int ID) {
+ if (ID == 0)
+ return std::make_pair(SourceLocation(), "");
+
+ if (unsigned(-ID) - 2 >= getTotalNumSLocs() || ID > 0) {
+ Error("source location entry ID out-of-range for AST file");
+ return std::make_pair(SourceLocation(), "");
+ }
+
+ // Find which module file this entry lands in.
+ ModuleFile *M = GlobalSLocEntryMap.find(-ID)->second;
+ if (M->Kind != MK_ImplicitModule && M->Kind != MK_ExplicitModule)
+ return std::make_pair(SourceLocation(), "");
+
+ // FIXME: Can we map this down to a particular submodule? That would be
+ // ideal.
+ return std::make_pair(M->ImportLoc, StringRef(M->ModuleName));
+}
+
+/// \brief Find the location where the module F is imported.
+SourceLocation ASTReader::getImportLocation(ModuleFile *F) {
+ if (F->ImportLoc.isValid())
+ return F->ImportLoc;
+
+ // Otherwise we have a PCH. It's considered to be "imported" at the first
+ // location of its includer.
+ if (F->ImportedBy.empty() || !F->ImportedBy[0]) {
+ // Main file is the importer.
+ assert(SourceMgr.getMainFileID().isValid() && "missing main file");
+ return SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID());
+ }
+ return F->ImportedBy[0]->FirstLoc;
+}
+
+/// ReadBlockAbbrevs - Enter a subblock of the specified BlockID with the
+/// specified cursor. Read the abbreviations that are at the top of the block
+/// and then leave the cursor pointing into the block.
+bool ASTReader::ReadBlockAbbrevs(BitstreamCursor &Cursor, unsigned BlockID) {
+ if (Cursor.EnterSubBlock(BlockID))
+ return true;
+
+ while (true) {
+ uint64_t Offset = Cursor.GetCurrentBitNo();
+ unsigned Code = Cursor.ReadCode();
+
+ // We expect all abbrevs to be at the start of the block.
+ if (Code != llvm::bitc::DEFINE_ABBREV) {
+ Cursor.JumpToBit(Offset);
+ return false;
+ }
+ Cursor.ReadAbbrevRecord();
+ }
+}
+
+Token ASTReader::ReadToken(ModuleFile &F, const RecordDataImpl &Record,
+ unsigned &Idx) {
+ Token Tok;
+ Tok.startToken();
+ Tok.setLocation(ReadSourceLocation(F, Record, Idx));
+ Tok.setLength(Record[Idx++]);
+ if (IdentifierInfo *II = getLocalIdentifier(F, Record[Idx++]))
+ Tok.setIdentifierInfo(II);
+ Tok.setKind((tok::TokenKind)Record[Idx++]);
+ Tok.setFlag((Token::TokenFlags)Record[Idx++]);
+ return Tok;
+}
+
+MacroInfo *ASTReader::ReadMacroRecord(ModuleFile &F, uint64_t Offset) {
+ BitstreamCursor &Stream = F.MacroCursor;
+
+ // Keep track of where we are in the stream, then jump back there
+ // after reading this macro.
+ SavedStreamPosition SavedPosition(Stream);
+
+ Stream.JumpToBit(Offset);
+ RecordData Record;
+ SmallVector<IdentifierInfo*, 16> MacroArgs;
+ MacroInfo *Macro = nullptr;
+
+ while (true) {
+ // Advance to the next record, but if we get to the end of the block, don't
+ // pop it (removing all the abbreviations from the cursor) since we want to
+ // be able to reseek within the block and read entries.
+ unsigned Flags = BitstreamCursor::AF_DontPopBlockAtEnd;
+ llvm::BitstreamEntry Entry = Stream.advanceSkippingSubblocks(Flags);
+
+ switch (Entry.Kind) {
+ case llvm::BitstreamEntry::SubBlock: // Handled for us already.
+ case llvm::BitstreamEntry::Error:
+ Error("malformed block record in AST file");
+ return Macro;
+ case llvm::BitstreamEntry::EndBlock:
+ return Macro;
+ case llvm::BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ // Read a record.
+ Record.clear();
+ PreprocessorRecordTypes RecType =
+ (PreprocessorRecordTypes)Stream.readRecord(Entry.ID, Record);
+ switch (RecType) {
+ case PP_MODULE_MACRO:
+ case PP_MACRO_DIRECTIVE_HISTORY:
+ return Macro;
+
+ case PP_MACRO_OBJECT_LIKE:
+ case PP_MACRO_FUNCTION_LIKE: {
+ // If we already have a macro, that means that we've hit the end
+ // of the definition of the macro we were looking for. We're
+ // done.
+ if (Macro)
+ return Macro;
+
+ unsigned NextIndex = 1; // Skip identifier ID.
+ SubmoduleID SubModID = getGlobalSubmoduleID(F, Record[NextIndex++]);
+ SourceLocation Loc = ReadSourceLocation(F, Record, NextIndex);
+ MacroInfo *MI = PP.AllocateDeserializedMacroInfo(Loc, SubModID);
+ MI->setDefinitionEndLoc(ReadSourceLocation(F, Record, NextIndex));
+ MI->setIsUsed(Record[NextIndex++]);
+ MI->setUsedForHeaderGuard(Record[NextIndex++]);
+
+ if (RecType == PP_MACRO_FUNCTION_LIKE) {
+ // Decode function-like macro info.
+ bool isC99VarArgs = Record[NextIndex++];
+ bool isGNUVarArgs = Record[NextIndex++];
+ bool hasCommaPasting = Record[NextIndex++];
+ MacroArgs.clear();
+ unsigned NumArgs = Record[NextIndex++];
+ for (unsigned i = 0; i != NumArgs; ++i)
+ MacroArgs.push_back(getLocalIdentifier(F, Record[NextIndex++]));
+
+ // Install function-like macro info.
+ MI->setIsFunctionLike();
+ if (isC99VarArgs) MI->setIsC99Varargs();
+ if (isGNUVarArgs) MI->setIsGNUVarargs();
+ if (hasCommaPasting) MI->setHasCommaPasting();
+ MI->setArgumentList(MacroArgs, PP.getPreprocessorAllocator());
+ }
+
+ // Remember that we saw this macro last so that we add the tokens that
+ // form its body to it.
+ Macro = MI;
+
+ if (NextIndex + 1 == Record.size() && PP.getPreprocessingRecord() &&
+ Record[NextIndex]) {
+ // We have a macro definition. Register the association
+ PreprocessedEntityID
+ GlobalID = getGlobalPreprocessedEntityID(F, Record[NextIndex]);
+ PreprocessingRecord &PPRec = *PP.getPreprocessingRecord();
+ PreprocessingRecord::PPEntityID PPID =
+ PPRec.getPPEntityID(GlobalID - 1, /*isLoaded=*/true);
+ MacroDefinitionRecord *PPDef = cast_or_null<MacroDefinitionRecord>(
+ PPRec.getPreprocessedEntity(PPID));
+ if (PPDef)
+ PPRec.RegisterMacroDefinition(Macro, PPDef);
+ }
+
+ ++NumMacrosRead;
+ break;
+ }
+
+ case PP_TOKEN: {
+ // If we see a TOKEN before a PP_MACRO_*, then the file is
+ // erroneous, just pretend we didn't see this.
+ if (!Macro) break;
+
+ unsigned Idx = 0;
+ Token Tok = ReadToken(F, Record, Idx);
+ Macro->AddTokenToBody(Tok);
+ break;
+ }
+ }
+ }
+}
+
+PreprocessedEntityID
+ASTReader::getGlobalPreprocessedEntityID(ModuleFile &M, unsigned LocalID) const {
+ ContinuousRangeMap<uint32_t, int, 2>::const_iterator
+ I = M.PreprocessedEntityRemap.find(LocalID - NUM_PREDEF_PP_ENTITY_IDS);
+ assert(I != M.PreprocessedEntityRemap.end()
+ && "Invalid index into preprocessed entity index remap");
+
+ return LocalID + I->second;
+}
+
+unsigned HeaderFileInfoTrait::ComputeHash(internal_key_ref ikey) {
+ return llvm::hash_combine(ikey.Size, ikey.ModTime);
+}
+
+HeaderFileInfoTrait::internal_key_type
+HeaderFileInfoTrait::GetInternalKey(const FileEntry *FE) {
+ internal_key_type ikey = {FE->getSize(),
+ M.HasTimestamps ? FE->getModificationTime() : 0,
+ FE->getName(), /*Imported*/ false};
+ return ikey;
+}
+
+bool HeaderFileInfoTrait::EqualKey(internal_key_ref a, internal_key_ref b) {
+ if (a.Size != b.Size || (a.ModTime && b.ModTime && a.ModTime != b.ModTime))
+ return false;
+
+ if (llvm::sys::path::is_absolute(a.Filename) &&
+ strcmp(a.Filename, b.Filename) == 0)
+ return true;
+
+ // Determine whether the actual files are equivalent.
+ FileManager &FileMgr = Reader.getFileManager();
+ auto GetFile = [&](const internal_key_type &Key) -> const FileEntry* {
+ if (!Key.Imported)
+ return FileMgr.getFile(Key.Filename);
+
+ std::string Resolved = Key.Filename;
+ Reader.ResolveImportedPath(M, Resolved);
+ return FileMgr.getFile(Resolved);
+ };
+
+ const FileEntry *FEA = GetFile(a);
+ const FileEntry *FEB = GetFile(b);
+ return FEA && FEA == FEB;
+}
+
+std::pair<unsigned, unsigned>
+HeaderFileInfoTrait::ReadKeyDataLength(const unsigned char*& d) {
+ using namespace llvm::support;
+ unsigned KeyLen = (unsigned) endian::readNext<uint16_t, little, unaligned>(d);
+ unsigned DataLen = (unsigned) *d++;
+ return std::make_pair(KeyLen, DataLen);
+}
+
+HeaderFileInfoTrait::internal_key_type
+HeaderFileInfoTrait::ReadKey(const unsigned char *d, unsigned) {
+ using namespace llvm::support;
+ internal_key_type ikey;
+ ikey.Size = off_t(endian::readNext<uint64_t, little, unaligned>(d));
+ ikey.ModTime = time_t(endian::readNext<uint64_t, little, unaligned>(d));
+ ikey.Filename = (const char *)d;
+ ikey.Imported = true;
+ return ikey;
+}
+
+HeaderFileInfoTrait::data_type
+HeaderFileInfoTrait::ReadData(internal_key_ref key, const unsigned char *d,
+ unsigned DataLen) {
+ const unsigned char *End = d + DataLen;
+ using namespace llvm::support;
+ HeaderFileInfo HFI;
+ unsigned Flags = *d++;
+ // FIXME: Refactor with mergeHeaderFileInfo in HeaderSearch.cpp.
+ HFI.isImport |= (Flags >> 4) & 0x01;
+ HFI.isPragmaOnce |= (Flags >> 3) & 0x01;
+ HFI.DirInfo = (Flags >> 1) & 0x03;
+ HFI.IndexHeaderMapHeader = Flags & 0x01;
+ // FIXME: Find a better way to handle this. Maybe just store a
+ // "has been included" flag?
+ HFI.NumIncludes = std::max(endian::readNext<uint16_t, little, unaligned>(d),
+ HFI.NumIncludes);
+ HFI.ControllingMacroID = Reader.getGlobalIdentifierID(
+ M, endian::readNext<uint32_t, little, unaligned>(d));
+ if (unsigned FrameworkOffset =
+ endian::readNext<uint32_t, little, unaligned>(d)) {
+ // The framework offset is 1 greater than the actual offset,
+ // since 0 is used as an indicator for "no framework name".
+ StringRef FrameworkName(FrameworkStrings + FrameworkOffset - 1);
+ HFI.Framework = HS->getUniqueFrameworkName(FrameworkName);
+ }
+
+ assert((End - d) % 4 == 0 &&
+ "Wrong data length in HeaderFileInfo deserialization");
+ while (d != End) {
+ uint32_t LocalSMID = endian::readNext<uint32_t, little, unaligned>(d);
+ auto HeaderRole = static_cast<ModuleMap::ModuleHeaderRole>(LocalSMID & 3);
+ LocalSMID >>= 2;
+
+ // This header is part of a module. Associate it with the module to enable
+ // implicit module import.
+ SubmoduleID GlobalSMID = Reader.getGlobalSubmoduleID(M, LocalSMID);
+ Module *Mod = Reader.getSubmodule(GlobalSMID);
+ FileManager &FileMgr = Reader.getFileManager();
+ ModuleMap &ModMap =
+ Reader.getPreprocessor().getHeaderSearchInfo().getModuleMap();
+
+ std::string Filename = key.Filename;
+ if (key.Imported)
+ Reader.ResolveImportedPath(M, Filename);
+ // FIXME: This is not always the right filename-as-written, but we're not
+ // going to use this information to rebuild the module, so it doesn't make
+ // a lot of difference.
+ Module::Header H = { key.Filename, FileMgr.getFile(Filename) };
+ ModMap.addHeader(Mod, H, HeaderRole, /*Imported*/true);
+ HFI.isModuleHeader |= !(HeaderRole & ModuleMap::TextualHeader);
+ }
+
+ // This HeaderFileInfo was externally loaded.
+ HFI.External = true;
+ HFI.IsValid = true;
+ return HFI;
+}
+
+void ASTReader::addPendingMacro(IdentifierInfo *II,
+ ModuleFile *M,
+ uint64_t MacroDirectivesOffset) {
+ assert(NumCurrentElementsDeserializing > 0 &&"Missing deserialization guard");
+ PendingMacroIDs[II].push_back(PendingMacroInfo(M, MacroDirectivesOffset));
+}
+
+void ASTReader::ReadDefinedMacros() {
+ // Note that we are loading defined macros.
+ Deserializing Macros(this);
+
+ for (auto &I : llvm::reverse(ModuleMgr)) {
+ BitstreamCursor &MacroCursor = I->MacroCursor;
+
+ // If there was no preprocessor block, skip this file.
+ if (!MacroCursor.getBitStreamReader())
+ continue;
+
+ BitstreamCursor Cursor = MacroCursor;
+ Cursor.JumpToBit(I->MacroStartOffset);
+
+ RecordData Record;
+ while (true) {
+ llvm::BitstreamEntry E = Cursor.advanceSkippingSubblocks();
+
+ switch (E.Kind) {
+ case llvm::BitstreamEntry::SubBlock: // Handled for us already.
+ case llvm::BitstreamEntry::Error:
+ Error("malformed block record in AST file");
+ return;
+ case llvm::BitstreamEntry::EndBlock:
+ goto NextCursor;
+
+ case llvm::BitstreamEntry::Record:
+ Record.clear();
+ switch (Cursor.readRecord(E.ID, Record)) {
+ default: // Default behavior: ignore.
+ break;
+
+ case PP_MACRO_OBJECT_LIKE:
+ case PP_MACRO_FUNCTION_LIKE:
+ getLocalIdentifier(*I, Record[0]);
+ break;
+
+ case PP_TOKEN:
+ // Ignore tokens.
+ break;
+ }
+ break;
+ }
+ }
+ NextCursor: ;
+ }
+}
+
+namespace {
+ /// \brief Visitor class used to look up identifirs in an AST file.
+ class IdentifierLookupVisitor {
+ StringRef Name;
+ unsigned NameHash;
+ unsigned PriorGeneration;
+ unsigned &NumIdentifierLookups;
+ unsigned &NumIdentifierLookupHits;
+ IdentifierInfo *Found;
+
+ public:
+ IdentifierLookupVisitor(StringRef Name, unsigned PriorGeneration,
+ unsigned &NumIdentifierLookups,
+ unsigned &NumIdentifierLookupHits)
+ : Name(Name), NameHash(ASTIdentifierLookupTrait::ComputeHash(Name)),
+ PriorGeneration(PriorGeneration),
+ NumIdentifierLookups(NumIdentifierLookups),
+ NumIdentifierLookupHits(NumIdentifierLookupHits),
+ Found()
+ {
+ }
+
+ bool operator()(ModuleFile &M) {
+ // If we've already searched this module file, skip it now.
+ if (M.Generation <= PriorGeneration)
+ return true;
+
+ ASTIdentifierLookupTable *IdTable
+ = (ASTIdentifierLookupTable *)M.IdentifierLookupTable;
+ if (!IdTable)
+ return false;
+
+ ASTIdentifierLookupTrait Trait(IdTable->getInfoObj().getReader(), M,
+ Found);
+ ++NumIdentifierLookups;
+ ASTIdentifierLookupTable::iterator Pos =
+ IdTable->find_hashed(Name, NameHash, &Trait);
+ if (Pos == IdTable->end())
+ return false;
+
+ // Dereferencing the iterator has the effect of building the
+ // IdentifierInfo node and populating it with the various
+ // declarations it needs.
+ ++NumIdentifierLookupHits;
+ Found = *Pos;
+ return true;
+ }
+
+ // \brief Retrieve the identifier info found within the module
+ // files.
+ IdentifierInfo *getIdentifierInfo() const { return Found; }
+ };
+}
+
+void ASTReader::updateOutOfDateIdentifier(IdentifierInfo &II) {
+ // Note that we are loading an identifier.
+ Deserializing AnIdentifier(this);
+
+ unsigned PriorGeneration = 0;
+ if (getContext().getLangOpts().Modules)
+ PriorGeneration = IdentifierGeneration[&II];
+
+ // If there is a global index, look there first to determine which modules
+ // provably do not have any results for this identifier.
+ GlobalModuleIndex::HitSet Hits;
+ GlobalModuleIndex::HitSet *HitsPtr = nullptr;
+ if (!loadGlobalIndex()) {
+ if (GlobalIndex->lookupIdentifier(II.getName(), Hits)) {
+ HitsPtr = &Hits;
+ }
+ }
+
+ IdentifierLookupVisitor Visitor(II.getName(), PriorGeneration,
+ NumIdentifierLookups,
+ NumIdentifierLookupHits);
+ ModuleMgr.visit(Visitor, HitsPtr);
+ markIdentifierUpToDate(&II);
+}
+
+void ASTReader::markIdentifierUpToDate(IdentifierInfo *II) {
+ if (!II)
+ return;
+
+ II->setOutOfDate(false);
+
+ // Update the generation for this identifier.
+ if (getContext().getLangOpts().Modules)
+ IdentifierGeneration[II] = getGeneration();
+}
+
+void ASTReader::resolvePendingMacro(IdentifierInfo *II,
+ const PendingMacroInfo &PMInfo) {
+ ModuleFile &M = *PMInfo.M;
+
+ BitstreamCursor &Cursor = M.MacroCursor;
+ SavedStreamPosition SavedPosition(Cursor);
+ Cursor.JumpToBit(PMInfo.MacroDirectivesOffset);
+
+ struct ModuleMacroRecord {
+ SubmoduleID SubModID;
+ MacroInfo *MI;
+ SmallVector<SubmoduleID, 8> Overrides;
+ };
+ llvm::SmallVector<ModuleMacroRecord, 8> ModuleMacros;
+
+ // We expect to see a sequence of PP_MODULE_MACRO records listing exported
+ // macros, followed by a PP_MACRO_DIRECTIVE_HISTORY record with the complete
+ // macro histroy.
+ RecordData Record;
+ while (true) {
+ llvm::BitstreamEntry Entry =
+ Cursor.advance(BitstreamCursor::AF_DontPopBlockAtEnd);
+ if (Entry.Kind != llvm::BitstreamEntry::Record) {
+ Error("malformed block record in AST file");
+ return;
+ }
+
+ Record.clear();
+ switch ((PreprocessorRecordTypes)Cursor.readRecord(Entry.ID, Record)) {
+ case PP_MACRO_DIRECTIVE_HISTORY:
+ break;
+
+ case PP_MODULE_MACRO: {
+ ModuleMacros.push_back(ModuleMacroRecord());
+ auto &Info = ModuleMacros.back();
+ Info.SubModID = getGlobalSubmoduleID(M, Record[0]);
+ Info.MI = getMacro(getGlobalMacroID(M, Record[1]));
+ for (int I = 2, N = Record.size(); I != N; ++I)
+ Info.Overrides.push_back(getGlobalSubmoduleID(M, Record[I]));
+ continue;
+ }
+
+ default:
+ Error("malformed block record in AST file");
+ return;
+ }
+
+ // We found the macro directive history; that's the last record
+ // for this macro.
+ break;
+ }
+
+ // Module macros are listed in reverse dependency order.
+ {
+ std::reverse(ModuleMacros.begin(), ModuleMacros.end());
+ llvm::SmallVector<ModuleMacro*, 8> Overrides;
+ for (auto &MMR : ModuleMacros) {
+ Overrides.clear();
+ for (unsigned ModID : MMR.Overrides) {
+ Module *Mod = getSubmodule(ModID);
+ auto *Macro = PP.getModuleMacro(Mod, II);
+ assert(Macro && "missing definition for overridden macro");
+ Overrides.push_back(Macro);
+ }
+
+ bool Inserted = false;
+ Module *Owner = getSubmodule(MMR.SubModID);
+ PP.addModuleMacro(Owner, II, MMR.MI, Overrides, Inserted);
+ }
+ }
+
+ // Don't read the directive history for a module; we don't have anywhere
+ // to put it.
+ if (M.Kind == MK_ImplicitModule || M.Kind == MK_ExplicitModule)
+ return;
+
+ // Deserialize the macro directives history in reverse source-order.
+ MacroDirective *Latest = nullptr, *Earliest = nullptr;
+ unsigned Idx = 0, N = Record.size();
+ while (Idx < N) {
+ MacroDirective *MD = nullptr;
+ SourceLocation Loc = ReadSourceLocation(M, Record, Idx);
+ MacroDirective::Kind K = (MacroDirective::Kind)Record[Idx++];
+ switch (K) {
+ case MacroDirective::MD_Define: {
+ MacroInfo *MI = getMacro(getGlobalMacroID(M, Record[Idx++]));
+ MD = PP.AllocateDefMacroDirective(MI, Loc);
+ break;
+ }
+ case MacroDirective::MD_Undefine: {
+ MD = PP.AllocateUndefMacroDirective(Loc);
+ break;
+ }
+ case MacroDirective::MD_Visibility:
+ bool isPublic = Record[Idx++];
+ MD = PP.AllocateVisibilityMacroDirective(Loc, isPublic);
+ break;
+ }
+
+ if (!Latest)
+ Latest = MD;
+ if (Earliest)
+ Earliest->setPrevious(MD);
+ Earliest = MD;
+ }
+
+ if (Latest)
+ PP.setLoadedMacroDirective(II, Latest);
+}
+
+ASTReader::InputFileInfo
+ASTReader::readInputFileInfo(ModuleFile &F, unsigned ID) {
+ // Go find this input file.
+ BitstreamCursor &Cursor = F.InputFilesCursor;
+ SavedStreamPosition SavedPosition(Cursor);
+ Cursor.JumpToBit(F.InputFileOffsets[ID-1]);
+
+ unsigned Code = Cursor.ReadCode();
+ RecordData Record;
+ StringRef Blob;
+
+ unsigned Result = Cursor.readRecord(Code, Record, &Blob);
+ assert(static_cast<InputFileRecordTypes>(Result) == INPUT_FILE &&
+ "invalid record type for input file");
+ (void)Result;
+
+ assert(Record[0] == ID && "Bogus stored ID or offset");
+ InputFileInfo R;
+ R.StoredSize = static_cast<off_t>(Record[1]);
+ R.StoredTime = static_cast<time_t>(Record[2]);
+ R.Overridden = static_cast<bool>(Record[3]);
+ R.Transient = static_cast<bool>(Record[4]);
+ R.Filename = Blob;
+ ResolveImportedPath(F, R.Filename);
+ return R;
+}
+
+InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
+ // If this ID is bogus, just return an empty input file.
+ if (ID == 0 || ID > F.InputFilesLoaded.size())
+ return InputFile();
+
+ // If we've already loaded this input file, return it.
+ if (F.InputFilesLoaded[ID-1].getFile())
+ return F.InputFilesLoaded[ID-1];
+
+ if (F.InputFilesLoaded[ID-1].isNotFound())
+ return InputFile();
+
+ // Go find this input file.
+ BitstreamCursor &Cursor = F.InputFilesCursor;
+ SavedStreamPosition SavedPosition(Cursor);
+ Cursor.JumpToBit(F.InputFileOffsets[ID-1]);
+
+ InputFileInfo FI = readInputFileInfo(F, ID);
+ off_t StoredSize = FI.StoredSize;
+ time_t StoredTime = FI.StoredTime;
+ bool Overridden = FI.Overridden;
+ bool Transient = FI.Transient;
+ StringRef Filename = FI.Filename;
+
+ const FileEntry *File = FileMgr.getFile(Filename, /*OpenFile=*/false);
+
+ // If we didn't find the file, resolve it relative to the
+ // original directory from which this AST file was created.
+ if (File == nullptr && !F.OriginalDir.empty() && !CurrentDir.empty() &&
+ F.OriginalDir != CurrentDir) {
+ std::string Resolved = resolveFileRelativeToOriginalDir(Filename,
+ F.OriginalDir,
+ CurrentDir);
+ if (!Resolved.empty())
+ File = FileMgr.getFile(Resolved);
+ }
+
+ // For an overridden file, create a virtual file with the stored
+ // size/timestamp.
+ if ((Overridden || Transient) && File == nullptr)
+ File = FileMgr.getVirtualFile(Filename, StoredSize, StoredTime);
+
+ if (File == nullptr) {
+ if (Complain) {
+ std::string ErrorStr = "could not find file '";
+ ErrorStr += Filename;
+ ErrorStr += "' referenced by AST file '";
+ ErrorStr += F.FileName;
+ ErrorStr += "'";
+ Error(ErrorStr.c_str());
+ }
+ // Record that we didn't find the file.
+ F.InputFilesLoaded[ID-1] = InputFile::getNotFound();
+ return InputFile();
+ }
+
+ // Check if there was a request to override the contents of the file
+ // that was part of the precompiled header. Overridding such a file
+ // can lead to problems when lexing using the source locations from the
+ // PCH.
+ SourceManager &SM = getSourceManager();
+ // FIXME: Reject if the overrides are different.
+ if ((!Overridden && !Transient) && SM.isFileOverridden(File)) {
+ if (Complain)
+ Error(diag::err_fe_pch_file_overridden, Filename);
+ // After emitting the diagnostic, recover by disabling the override so
+ // that the original file will be used.
+ //
+ // FIXME: This recovery is just as broken as the original state; there may
+ // be another precompiled module that's using the overridden contents, or
+ // we might be half way through parsing it. Instead, we should treat the
+ // overridden contents as belonging to a separate FileEntry.
+ SM.disableFileContentsOverride(File);
+ // The FileEntry is a virtual file entry with the size of the contents
+ // that would override the original contents. Set it to the original's
+ // size/time.
+ FileMgr.modifyFileEntry(const_cast<FileEntry*>(File),
+ StoredSize, StoredTime);
+ }
+
+ bool IsOutOfDate = false;
+
+ // For an overridden file, there is nothing to validate.
+ if (!Overridden && //
+ (StoredSize != File->getSize() ||
+#if defined(LLVM_ON_WIN32)
+ false
+#else
+ // In our regression testing, the Windows file system seems to
+ // have inconsistent modification times that sometimes
+ // erroneously trigger this error-handling path.
+ //
+ // FIXME: This probably also breaks HeaderFileInfo lookups on Windows.
+ (StoredTime && StoredTime != File->getModificationTime() &&
+ !DisableValidation)
+#endif
+ )) {
+ if (Complain) {
+ // Build a list of the PCH imports that got us here (in reverse).
+ SmallVector<ModuleFile *, 4> ImportStack(1, &F);
+ while (ImportStack.back()->ImportedBy.size() > 0)
+ ImportStack.push_back(ImportStack.back()->ImportedBy[0]);
+
+ // The top-level PCH is stale.
+ StringRef TopLevelPCHName(ImportStack.back()->FileName);
+ Error(diag::err_fe_pch_file_modified, Filename, TopLevelPCHName);
+
+ // Print the import stack.
+ if (ImportStack.size() > 1 && !Diags.isDiagnosticInFlight()) {
+ Diag(diag::note_pch_required_by)
+ << Filename << ImportStack[0]->FileName;
+ for (unsigned I = 1; I < ImportStack.size(); ++I)
+ Diag(diag::note_pch_required_by)
+ << ImportStack[I-1]->FileName << ImportStack[I]->FileName;
+ }
+
+ if (!Diags.isDiagnosticInFlight())
+ Diag(diag::note_pch_rebuild_required) << TopLevelPCHName;
+ }
+
+ IsOutOfDate = true;
+ }
+ // FIXME: If the file is overridden and we've already opened it,
+ // issue an error (or split it into a separate FileEntry).
+
+ InputFile IF = InputFile(File, Overridden || Transient, IsOutOfDate);
+
+ // Note that we've loaded this input file.
+ F.InputFilesLoaded[ID-1] = IF;
+ return IF;
+}
+
+/// \brief If we are loading a relocatable PCH or module file, and the filename
+/// is not an absolute path, add the system or module root to the beginning of
+/// the file name.
+void ASTReader::ResolveImportedPath(ModuleFile &M, std::string &Filename) {
+ // Resolve relative to the base directory, if we have one.
+ if (!M.BaseDirectory.empty())
+ return ResolveImportedPath(Filename, M.BaseDirectory);
+}
+
+void ASTReader::ResolveImportedPath(std::string &Filename, StringRef Prefix) {
+ if (Filename.empty() || llvm::sys::path::is_absolute(Filename))
+ return;
+
+ SmallString<128> Buffer;
+ llvm::sys::path::append(Buffer, Prefix, Filename);
+ Filename.assign(Buffer.begin(), Buffer.end());
+}
+
+static bool isDiagnosedResult(ASTReader::ASTReadResult ARR, unsigned Caps) {
+ switch (ARR) {
+ case ASTReader::Failure: return true;
+ case ASTReader::Missing: return !(Caps & ASTReader::ARR_Missing);
+ case ASTReader::OutOfDate: return !(Caps & ASTReader::ARR_OutOfDate);
+ case ASTReader::VersionMismatch: return !(Caps & ASTReader::ARR_VersionMismatch);
+ case ASTReader::ConfigurationMismatch:
+ return !(Caps & ASTReader::ARR_ConfigurationMismatch);
+ case ASTReader::HadErrors: return true;
+ case ASTReader::Success: return false;
+ }
+
+ llvm_unreachable("unknown ASTReadResult");
+}
+
+ASTReader::ASTReadResult ASTReader::ReadOptionsBlock(
+ BitstreamCursor &Stream, unsigned ClientLoadCapabilities,
+ bool AllowCompatibleConfigurationMismatch, ASTReaderListener &Listener,
+ std::string &SuggestedPredefines) {
+ if (Stream.EnterSubBlock(OPTIONS_BLOCK_ID))
+ return Failure;
+
+ // Read all of the records in the options block.
+ RecordData Record;
+ ASTReadResult Result = Success;
+ while (1) {
+ llvm::BitstreamEntry Entry = Stream.advance();
+
+ switch (Entry.Kind) {
+ case llvm::BitstreamEntry::Error:
+ case llvm::BitstreamEntry::SubBlock:
+ return Failure;
+
+ case llvm::BitstreamEntry::EndBlock:
+ return Result;
+
+ case llvm::BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ // Read and process a record.
+ Record.clear();
+ switch ((OptionsRecordTypes)Stream.readRecord(Entry.ID, Record)) {
+ case LANGUAGE_OPTIONS: {
+ bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch) == 0;
+ if (ParseLanguageOptions(Record, Complain, Listener,
+ AllowCompatibleConfigurationMismatch))
+ Result = ConfigurationMismatch;
+ break;
+ }
+
+ case TARGET_OPTIONS: {
+ bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch) == 0;
+ if (ParseTargetOptions(Record, Complain, Listener,
+ AllowCompatibleConfigurationMismatch))
+ Result = ConfigurationMismatch;
+ break;
+ }
+
+ case DIAGNOSTIC_OPTIONS: {
+ bool Complain = (ClientLoadCapabilities & ARR_OutOfDate) == 0;
+ if (!AllowCompatibleConfigurationMismatch &&
+ ParseDiagnosticOptions(Record, Complain, Listener))
+ return OutOfDate;
+ break;
+ }
+
+ case FILE_SYSTEM_OPTIONS: {
+ bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch) == 0;
+ if (!AllowCompatibleConfigurationMismatch &&
+ ParseFileSystemOptions(Record, Complain, Listener))
+ Result = ConfigurationMismatch;
+ break;
+ }
+
+ case HEADER_SEARCH_OPTIONS: {
+ bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch) == 0;
+ if (!AllowCompatibleConfigurationMismatch &&
+ ParseHeaderSearchOptions(Record, Complain, Listener))
+ Result = ConfigurationMismatch;
+ break;
+ }
+
+ case PREPROCESSOR_OPTIONS:
+ bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch) == 0;
+ if (!AllowCompatibleConfigurationMismatch &&
+ ParsePreprocessorOptions(Record, Complain, Listener,
+ SuggestedPredefines))
+ Result = ConfigurationMismatch;
+ break;
+ }
+ }
+}
+
+ASTReader::ASTReadResult
+ASTReader::ReadControlBlock(ModuleFile &F,
+ SmallVectorImpl<ImportedModule> &Loaded,
+ const ModuleFile *ImportedBy,
+ unsigned ClientLoadCapabilities) {
+ BitstreamCursor &Stream = F.Stream;
+ ASTReadResult Result = Success;
+
+ if (Stream.EnterSubBlock(CONTROL_BLOCK_ID)) {
+ Error("malformed block record in AST file");
+ return Failure;
+ }
+
+ // Read all of the records and blocks in the control block.
+ RecordData Record;
+ unsigned NumInputs = 0;
+ unsigned NumUserInputs = 0;
+ while (1) {
+ llvm::BitstreamEntry Entry = Stream.advance();
+
+ switch (Entry.Kind) {
+ case llvm::BitstreamEntry::Error:
+ Error("malformed block record in AST file");
+ return Failure;
+ case llvm::BitstreamEntry::EndBlock: {
+ // Validate input files.
+ const HeaderSearchOptions &HSOpts =
+ PP.getHeaderSearchInfo().getHeaderSearchOpts();
+
+ // All user input files reside at the index range [0, NumUserInputs), and
+ // system input files reside at [NumUserInputs, NumInputs). For explicitly
+ // loaded module files, ignore missing inputs.
+ if (!DisableValidation && F.Kind != MK_ExplicitModule) {
+ bool Complain = (ClientLoadCapabilities & ARR_OutOfDate) == 0;
+
+ // If we are reading a module, we will create a verification timestamp,
+ // so we verify all input files. Otherwise, verify only user input
+ // files.
+
+ unsigned N = NumUserInputs;
+ if (ValidateSystemInputs ||
+ (HSOpts.ModulesValidateOncePerBuildSession &&
+ F.InputFilesValidationTimestamp <= HSOpts.BuildSessionTimestamp &&
+ F.Kind == MK_ImplicitModule))
+ N = NumInputs;
+
+ for (unsigned I = 0; I < N; ++I) {
+ InputFile IF = getInputFile(F, I+1, Complain);
+ if (!IF.getFile() || IF.isOutOfDate())
+ return OutOfDate;
+ }
+ }
+
+ if (Listener)
+ Listener->visitModuleFile(F.FileName, F.Kind);
+
+ if (Listener && Listener->needsInputFileVisitation()) {
+ unsigned N = Listener->needsSystemInputFileVisitation() ? NumInputs
+ : NumUserInputs;
+ for (unsigned I = 0; I < N; ++I) {
+ bool IsSystem = I >= NumUserInputs;
+ InputFileInfo FI = readInputFileInfo(F, I+1);
+ Listener->visitInputFile(FI.Filename, IsSystem, FI.Overridden,
+ F.Kind == MK_ExplicitModule);
+ }
+ }
+
+ return Result;
+ }
+
+ case llvm::BitstreamEntry::SubBlock:
+ switch (Entry.ID) {
+ case INPUT_FILES_BLOCK_ID:
+ F.InputFilesCursor = Stream;
+ if (Stream.SkipBlock() || // Skip with the main cursor
+ // Read the abbreviations
+ ReadBlockAbbrevs(F.InputFilesCursor, INPUT_FILES_BLOCK_ID)) {
+ Error("malformed block record in AST file");
+ return Failure;
+ }
+ continue;
+
+ case OPTIONS_BLOCK_ID:
+ // If we're reading the first module for this group, check its options
+ // are compatible with ours. For modules it imports, no further checking
+ // is required, because we checked them when we built it.
+ if (Listener && !ImportedBy) {
+ // Should we allow the configuration of the module file to differ from
+ // the configuration of the current translation unit in a compatible
+ // way?
+ //
+ // FIXME: Allow this for files explicitly specified with -include-pch.
+ bool AllowCompatibleConfigurationMismatch =
+ F.Kind == MK_ExplicitModule;
+
+ Result = ReadOptionsBlock(Stream, ClientLoadCapabilities,
+ AllowCompatibleConfigurationMismatch,
+ *Listener, SuggestedPredefines);
+ if (Result == Failure) {
+ Error("malformed block record in AST file");
+ return Result;
+ }
+
+ if (DisableValidation ||
+ (AllowConfigurationMismatch && Result == ConfigurationMismatch))
+ Result = Success;
+
+ // If we've diagnosed a problem, we're done.
+ if (Result != Success &&
+ isDiagnosedResult(Result, ClientLoadCapabilities))
+ return Result;
+ } else if (Stream.SkipBlock()) {
+ Error("malformed block record in AST file");
+ return Failure;
+ }
+ continue;
+
+ default:
+ if (Stream.SkipBlock()) {
+ Error("malformed block record in AST file");
+ return Failure;
+ }
+ continue;
+ }
+
+ case llvm::BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ // Read and process a record.
+ Record.clear();
+ StringRef Blob;
+ switch ((ControlRecordTypes)Stream.readRecord(Entry.ID, Record, &Blob)) {
+ case METADATA: {
+ if (Record[0] != VERSION_MAJOR && !DisableValidation) {
+ if ((ClientLoadCapabilities & ARR_VersionMismatch) == 0)
+ Diag(Record[0] < VERSION_MAJOR? diag::err_pch_version_too_old
+ : diag::err_pch_version_too_new);
+ return VersionMismatch;
+ }
+
+ bool hasErrors = Record[6];
+ if (hasErrors && !DisableValidation && !AllowASTWithCompilerErrors) {
+ Diag(diag::err_pch_with_compiler_errors);
+ return HadErrors;
+ }
+
+ F.RelocatablePCH = Record[4];
+ // Relative paths in a relocatable PCH are relative to our sysroot.
+ if (F.RelocatablePCH)
+ F.BaseDirectory = isysroot.empty() ? "/" : isysroot;
+
+ F.HasTimestamps = Record[5];
+
+ const std::string &CurBranch = getClangFullRepositoryVersion();
+ StringRef ASTBranch = Blob;
+ if (StringRef(CurBranch) != ASTBranch && !DisableValidation) {
+ if ((ClientLoadCapabilities & ARR_VersionMismatch) == 0)
+ Diag(diag::err_pch_different_branch) << ASTBranch << CurBranch;
+ return VersionMismatch;
+ }
+ break;
+ }
+
+ case SIGNATURE:
+ assert((!F.Signature || F.Signature == Record[0]) && "signature changed");
+ F.Signature = Record[0];
+ break;
+
+ case IMPORTS: {
+ // Load each of the imported PCH files.
+ unsigned Idx = 0, N = Record.size();
+ while (Idx < N) {
+ // Read information about the AST file.
+ ModuleKind ImportedKind = (ModuleKind)Record[Idx++];
+ // The import location will be the local one for now; we will adjust
+ // all import locations of module imports after the global source
+ // location info are setup.
+ SourceLocation ImportLoc =
+ SourceLocation::getFromRawEncoding(Record[Idx++]);
+ off_t StoredSize = (off_t)Record[Idx++];
+ time_t StoredModTime = (time_t)Record[Idx++];
+ ASTFileSignature StoredSignature = Record[Idx++];
+ auto ImportedFile = ReadPath(F, Record, Idx);
+
+ // If our client can't cope with us being out of date, we can't cope with
+ // our dependency being missing.
+ unsigned Capabilities = ClientLoadCapabilities;
+ if ((ClientLoadCapabilities & ARR_OutOfDate) == 0)
+ Capabilities &= ~ARR_Missing;
+
+ // Load the AST file.
+ auto Result = ReadASTCore(ImportedFile, ImportedKind, ImportLoc, &F,
+ Loaded, StoredSize, StoredModTime,
+ StoredSignature, Capabilities);
+
+ // If we diagnosed a problem, produce a backtrace.
+ if (isDiagnosedResult(Result, Capabilities))
+ Diag(diag::note_module_file_imported_by)
+ << F.FileName << !F.ModuleName.empty() << F.ModuleName;
+
+ switch (Result) {
+ case Failure: return Failure;
+ // If we have to ignore the dependency, we'll have to ignore this too.
+ case Missing:
+ case OutOfDate: return OutOfDate;
+ case VersionMismatch: return VersionMismatch;
+ case ConfigurationMismatch: return ConfigurationMismatch;
+ case HadErrors: return HadErrors;
+ case Success: break;
+ }
+ }
+ break;
+ }
+
+ case ORIGINAL_FILE:
+ F.OriginalSourceFileID = FileID::get(Record[0]);
+ F.ActualOriginalSourceFileName = Blob;
+ F.OriginalSourceFileName = F.ActualOriginalSourceFileName;
+ ResolveImportedPath(F, F.OriginalSourceFileName);
+ break;
+
+ case ORIGINAL_FILE_ID:
+ F.OriginalSourceFileID = FileID::get(Record[0]);
+ break;
+
+ case ORIGINAL_PCH_DIR:
+ F.OriginalDir = Blob;
+ break;
+
+ case MODULE_NAME:
+ F.ModuleName = Blob;
+ if (Listener)
+ Listener->ReadModuleName(F.ModuleName);
+ break;
+
+ case MODULE_DIRECTORY: {
+ assert(!F.ModuleName.empty() &&
+ "MODULE_DIRECTORY found before MODULE_NAME");
+ // If we've already loaded a module map file covering this module, we may
+ // have a better path for it (relative to the current build).
+ Module *M = PP.getHeaderSearchInfo().lookupModule(F.ModuleName);
+ if (M && M->Directory) {
+ // If we're implicitly loading a module, the base directory can't
+ // change between the build and use.
+ if (F.Kind != MK_ExplicitModule) {
+ const DirectoryEntry *BuildDir =
+ PP.getFileManager().getDirectory(Blob);
+ if (!BuildDir || BuildDir != M->Directory) {
+ if ((ClientLoadCapabilities & ARR_OutOfDate) == 0)
+ Diag(diag::err_imported_module_relocated)
+ << F.ModuleName << Blob << M->Directory->getName();
+ return OutOfDate;
+ }
+ }
+ F.BaseDirectory = M->Directory->getName();
+ } else {
+ F.BaseDirectory = Blob;
+ }
+ break;
+ }
+
+ case MODULE_MAP_FILE:
+ if (ASTReadResult Result =
+ ReadModuleMapFileBlock(Record, F, ImportedBy, ClientLoadCapabilities))
+ return Result;
+ break;
+
+ case INPUT_FILE_OFFSETS:
+ NumInputs = Record[0];
+ NumUserInputs = Record[1];
+ F.InputFileOffsets =
+ (const llvm::support::unaligned_uint64_t *)Blob.data();
+ F.InputFilesLoaded.resize(NumInputs);
+ break;
+ }
+ }
+}
+
+ASTReader::ASTReadResult
+ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
+ BitstreamCursor &Stream = F.Stream;
+
+ if (Stream.EnterSubBlock(AST_BLOCK_ID)) {
+ Error("malformed block record in AST file");
+ return Failure;
+ }
+
+ // Read all of the records and blocks for the AST file.
+ RecordData Record;
+ while (1) {
+ llvm::BitstreamEntry Entry = Stream.advance();
+
+ switch (Entry.Kind) {
+ case llvm::BitstreamEntry::Error:
+ Error("error at end of module block in AST file");
+ return Failure;
+ case llvm::BitstreamEntry::EndBlock: {
+ // Outside of C++, we do not store a lookup map for the translation unit.
+ // Instead, mark it as needing a lookup map to be built if this module
+ // contains any declarations lexically within it (which it always does!).
+ // This usually has no cost, since we very rarely need the lookup map for
+ // the translation unit outside C++.
+ DeclContext *DC = Context.getTranslationUnitDecl();
+ if (DC->hasExternalLexicalStorage() &&
+ !getContext().getLangOpts().CPlusPlus)
+ DC->setMustBuildLookupTable();
+
+ return Success;
+ }
+ case llvm::BitstreamEntry::SubBlock:
+ switch (Entry.ID) {
+ case DECLTYPES_BLOCK_ID:
+ // We lazily load the decls block, but we want to set up the
+ // DeclsCursor cursor to point into it. Clone our current bitcode
+ // cursor to it, enter the block and read the abbrevs in that block.
+ // With the main cursor, we just skip over it.
+ F.DeclsCursor = Stream;
+ if (Stream.SkipBlock() || // Skip with the main cursor.
+ // Read the abbrevs.
+ ReadBlockAbbrevs(F.DeclsCursor, DECLTYPES_BLOCK_ID)) {
+ Error("malformed block record in AST file");
+ return Failure;
+ }
+ break;
+
+ case PREPROCESSOR_BLOCK_ID:
+ F.MacroCursor = Stream;
+ if (!PP.getExternalSource())
+ PP.setExternalSource(this);
+
+ if (Stream.SkipBlock() ||
+ ReadBlockAbbrevs(F.MacroCursor, PREPROCESSOR_BLOCK_ID)) {
+ Error("malformed block record in AST file");
+ return Failure;
+ }
+ F.MacroStartOffset = F.MacroCursor.GetCurrentBitNo();
+ break;
+
+ case PREPROCESSOR_DETAIL_BLOCK_ID:
+ F.PreprocessorDetailCursor = Stream;
+ if (Stream.SkipBlock() ||
+ ReadBlockAbbrevs(F.PreprocessorDetailCursor,
+ PREPROCESSOR_DETAIL_BLOCK_ID)) {
+ Error("malformed preprocessor detail record in AST file");
+ return Failure;
+ }
+ F.PreprocessorDetailStartOffset
+ = F.PreprocessorDetailCursor.GetCurrentBitNo();
+
+ if (!PP.getPreprocessingRecord())
+ PP.createPreprocessingRecord();
+ if (!PP.getPreprocessingRecord()->getExternalSource())
+ PP.getPreprocessingRecord()->SetExternalSource(*this);
+ break;
+
+ case SOURCE_MANAGER_BLOCK_ID:
+ if (ReadSourceManagerBlock(F))
+ return Failure;
+ break;
+
+ case SUBMODULE_BLOCK_ID:
+ if (ASTReadResult Result = ReadSubmoduleBlock(F, ClientLoadCapabilities))
+ return Result;
+ break;
+
+ case COMMENTS_BLOCK_ID: {
+ BitstreamCursor C = Stream;
+ if (Stream.SkipBlock() ||
+ ReadBlockAbbrevs(C, COMMENTS_BLOCK_ID)) {
+ Error("malformed comments block in AST file");
+ return Failure;
+ }
+ CommentsCursors.push_back(std::make_pair(C, &F));
+ break;
+ }
+
+ default:
+ if (Stream.SkipBlock()) {
+ Error("malformed block record in AST file");
+ return Failure;
+ }
+ break;
+ }
+ continue;
+
+ case llvm::BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ // Read and process a record.
+ Record.clear();
+ StringRef Blob;
+ switch ((ASTRecordTypes)Stream.readRecord(Entry.ID, Record, &Blob)) {
+ default: // Default behavior: ignore.
+ break;
+
+ case TYPE_OFFSET: {
+ if (F.LocalNumTypes != 0) {
+ Error("duplicate TYPE_OFFSET record in AST file");
+ return Failure;
+ }
+ F.TypeOffsets = (const uint32_t *)Blob.data();
+ F.LocalNumTypes = Record[0];
+ unsigned LocalBaseTypeIndex = Record[1];
+ F.BaseTypeIndex = getTotalNumTypes();
+
+ if (F.LocalNumTypes > 0) {
+ // Introduce the global -> local mapping for types within this module.
+ GlobalTypeMap.insert(std::make_pair(getTotalNumTypes(), &F));
+
+ // Introduce the local -> global mapping for types within this module.
+ F.TypeRemap.insertOrReplace(
+ std::make_pair(LocalBaseTypeIndex,
+ F.BaseTypeIndex - LocalBaseTypeIndex));
+
+ TypesLoaded.resize(TypesLoaded.size() + F.LocalNumTypes);
+ }
+ break;
+ }
+
+ case DECL_OFFSET: {
+ if (F.LocalNumDecls != 0) {
+ Error("duplicate DECL_OFFSET record in AST file");
+ return Failure;
+ }
+ F.DeclOffsets = (const DeclOffset *)Blob.data();
+ F.LocalNumDecls = Record[0];
+ unsigned LocalBaseDeclID = Record[1];
+ F.BaseDeclID = getTotalNumDecls();
+
+ if (F.LocalNumDecls > 0) {
+ // Introduce the global -> local mapping for declarations within this
+ // module.
+ GlobalDeclMap.insert(
+ std::make_pair(getTotalNumDecls() + NUM_PREDEF_DECL_IDS, &F));
+
+ // Introduce the local -> global mapping for declarations within this
+ // module.
+ F.DeclRemap.insertOrReplace(
+ std::make_pair(LocalBaseDeclID, F.BaseDeclID - LocalBaseDeclID));
+
+ // Introduce the global -> local mapping for declarations within this
+ // module.
+ F.GlobalToLocalDeclIDs[&F] = LocalBaseDeclID;
+
+ DeclsLoaded.resize(DeclsLoaded.size() + F.LocalNumDecls);
+ }
+ break;
+ }
+
+ case TU_UPDATE_LEXICAL: {
+ DeclContext *TU = Context.getTranslationUnitDecl();
+ LexicalContents Contents(
+ reinterpret_cast<const llvm::support::unaligned_uint32_t *>(
+ Blob.data()),
+ static_cast<unsigned int>(Blob.size() / 4));
+ TULexicalDecls.push_back(std::make_pair(&F, Contents));
+ TU->setHasExternalLexicalStorage(true);
+ break;
+ }
+
+ case UPDATE_VISIBLE: {
+ unsigned Idx = 0;
+ serialization::DeclID ID = ReadDeclID(F, Record, Idx);
+ auto *Data = (const unsigned char*)Blob.data();
+ PendingVisibleUpdates[ID].push_back(PendingVisibleUpdate{&F, Data});
+ // If we've already loaded the decl, perform the updates when we finish
+ // loading this block.
+ if (Decl *D = GetExistingDecl(ID))
+ PendingUpdateRecords.push_back(std::make_pair(ID, D));
+ break;
+ }
+
+ case IDENTIFIER_TABLE:
+ F.IdentifierTableData = Blob.data();
+ if (Record[0]) {
+ F.IdentifierLookupTable = ASTIdentifierLookupTable::Create(
+ (const unsigned char *)F.IdentifierTableData + Record[0],
+ (const unsigned char *)F.IdentifierTableData + sizeof(uint32_t),
+ (const unsigned char *)F.IdentifierTableData,
+ ASTIdentifierLookupTrait(*this, F));
+
+ PP.getIdentifierTable().setExternalIdentifierLookup(this);
+ }
+ break;
+
+ case IDENTIFIER_OFFSET: {
+ if (F.LocalNumIdentifiers != 0) {
+ Error("duplicate IDENTIFIER_OFFSET record in AST file");
+ return Failure;
+ }
+ F.IdentifierOffsets = (const uint32_t *)Blob.data();
+ F.LocalNumIdentifiers = Record[0];
+ unsigned LocalBaseIdentifierID = Record[1];
+ F.BaseIdentifierID = getTotalNumIdentifiers();
+
+ if (F.LocalNumIdentifiers > 0) {
+ // Introduce the global -> local mapping for identifiers within this
+ // module.
+ GlobalIdentifierMap.insert(std::make_pair(getTotalNumIdentifiers() + 1,
+ &F));
+
+ // Introduce the local -> global mapping for identifiers within this
+ // module.
+ F.IdentifierRemap.insertOrReplace(
+ std::make_pair(LocalBaseIdentifierID,
+ F.BaseIdentifierID - LocalBaseIdentifierID));
+
+ IdentifiersLoaded.resize(IdentifiersLoaded.size()
+ + F.LocalNumIdentifiers);
+ }
+ break;
+ }
+
+ case INTERESTING_IDENTIFIERS:
+ F.PreloadIdentifierOffsets.assign(Record.begin(), Record.end());
+ break;
+
+ case EAGERLY_DESERIALIZED_DECLS:
+ // FIXME: Skip reading this record if our ASTConsumer doesn't care
+ // about "interesting" decls (for instance, if we're building a module).
+ for (unsigned I = 0, N = Record.size(); I != N; ++I)
+ EagerlyDeserializedDecls.push_back(getGlobalDeclID(F, Record[I]));
+ break;
+
+ case SPECIAL_TYPES:
+ if (SpecialTypes.empty()) {
+ for (unsigned I = 0, N = Record.size(); I != N; ++I)
+ SpecialTypes.push_back(getGlobalTypeID(F, Record[I]));
+ break;
+ }
+
+ if (SpecialTypes.size() != Record.size()) {
+ Error("invalid special-types record");
+ return Failure;
+ }
+
+ for (unsigned I = 0, N = Record.size(); I != N; ++I) {
+ serialization::TypeID ID = getGlobalTypeID(F, Record[I]);
+ if (!SpecialTypes[I])
+ SpecialTypes[I] = ID;
+ // FIXME: If ID && SpecialTypes[I] != ID, do we need a separate
+ // merge step?
+ }
+ break;
+
+ case STATISTICS:
+ TotalNumStatements += Record[0];
+ TotalNumMacros += Record[1];
+ TotalLexicalDeclContexts += Record[2];
+ TotalVisibleDeclContexts += Record[3];
+ break;
+
+ case UNUSED_FILESCOPED_DECLS:
+ for (unsigned I = 0, N = Record.size(); I != N; ++I)
+ UnusedFileScopedDecls.push_back(getGlobalDeclID(F, Record[I]));
+ break;
+
+ case DELEGATING_CTORS:
+ for (unsigned I = 0, N = Record.size(); I != N; ++I)
+ DelegatingCtorDecls.push_back(getGlobalDeclID(F, Record[I]));
+ break;
+
+ case WEAK_UNDECLARED_IDENTIFIERS:
+ if (Record.size() % 4 != 0) {
+ Error("invalid weak identifiers record");
+ return Failure;
+ }
+
+ // FIXME: Ignore weak undeclared identifiers from non-original PCH
+ // files. This isn't the way to do it :)
+ WeakUndeclaredIdentifiers.clear();
+
+ // Translate the weak, undeclared identifiers into global IDs.
+ for (unsigned I = 0, N = Record.size(); I < N; /* in loop */) {
+ WeakUndeclaredIdentifiers.push_back(
+ getGlobalIdentifierID(F, Record[I++]));
+ WeakUndeclaredIdentifiers.push_back(
+ getGlobalIdentifierID(F, Record[I++]));
+ WeakUndeclaredIdentifiers.push_back(
+ ReadSourceLocation(F, Record, I).getRawEncoding());
+ WeakUndeclaredIdentifiers.push_back(Record[I++]);
+ }
+ break;
+
+ case SELECTOR_OFFSETS: {
+ F.SelectorOffsets = (const uint32_t *)Blob.data();
+ F.LocalNumSelectors = Record[0];
+ unsigned LocalBaseSelectorID = Record[1];
+ F.BaseSelectorID = getTotalNumSelectors();
+
+ if (F.LocalNumSelectors > 0) {
+ // Introduce the global -> local mapping for selectors within this
+ // module.
+ GlobalSelectorMap.insert(std::make_pair(getTotalNumSelectors()+1, &F));
+
+ // Introduce the local -> global mapping for selectors within this
+ // module.
+ F.SelectorRemap.insertOrReplace(
+ std::make_pair(LocalBaseSelectorID,
+ F.BaseSelectorID - LocalBaseSelectorID));
+
+ SelectorsLoaded.resize(SelectorsLoaded.size() + F.LocalNumSelectors);
+ }
+ break;
+ }
+
+ case METHOD_POOL:
+ F.SelectorLookupTableData = (const unsigned char *)Blob.data();
+ if (Record[0])
+ F.SelectorLookupTable
+ = ASTSelectorLookupTable::Create(
+ F.SelectorLookupTableData + Record[0],
+ F.SelectorLookupTableData,
+ ASTSelectorLookupTrait(*this, F));
+ TotalNumMethodPoolEntries += Record[1];
+ break;
+
+ case REFERENCED_SELECTOR_POOL:
+ if (!Record.empty()) {
+ for (unsigned Idx = 0, N = Record.size() - 1; Idx < N; /* in loop */) {
+ ReferencedSelectorsData.push_back(getGlobalSelectorID(F,
+ Record[Idx++]));
+ ReferencedSelectorsData.push_back(ReadSourceLocation(F, Record, Idx).
+ getRawEncoding());
+ }
+ }
+ break;
+
+ case PP_COUNTER_VALUE:
+ if (!Record.empty() && Listener)
+ Listener->ReadCounter(F, Record[0]);
+ break;
+
+ case FILE_SORTED_DECLS:
+ F.FileSortedDecls = (const DeclID *)Blob.data();
+ F.NumFileSortedDecls = Record[0];
+ break;
+
+ case SOURCE_LOCATION_OFFSETS: {
+ F.SLocEntryOffsets = (const uint32_t *)Blob.data();
+ F.LocalNumSLocEntries = Record[0];
+ unsigned SLocSpaceSize = Record[1];
+ std::tie(F.SLocEntryBaseID, F.SLocEntryBaseOffset) =
+ SourceMgr.AllocateLoadedSLocEntries(F.LocalNumSLocEntries,
+ SLocSpaceSize);
+ if (!F.SLocEntryBaseID) {
+ Error("ran out of source locations");
+ break;
+ }
+ // Make our entry in the range map. BaseID is negative and growing, so
+ // we invert it. Because we invert it, though, we need the other end of
+ // the range.
+ unsigned RangeStart =
+ unsigned(-F.SLocEntryBaseID) - F.LocalNumSLocEntries + 1;
+ GlobalSLocEntryMap.insert(std::make_pair(RangeStart, &F));
+ F.FirstLoc = SourceLocation::getFromRawEncoding(F.SLocEntryBaseOffset);
+
+ // SLocEntryBaseOffset is lower than MaxLoadedOffset and decreasing.
+ assert((F.SLocEntryBaseOffset & (1U << 31U)) == 0);
+ GlobalSLocOffsetMap.insert(
+ std::make_pair(SourceManager::MaxLoadedOffset - F.SLocEntryBaseOffset
+ - SLocSpaceSize,&F));
+
+ // Initialize the remapping table.
+ // Invalid stays invalid.
+ F.SLocRemap.insertOrReplace(std::make_pair(0U, 0));
+ // This module. Base was 2 when being compiled.
+ F.SLocRemap.insertOrReplace(std::make_pair(2U,
+ static_cast<int>(F.SLocEntryBaseOffset - 2)));
+
+ TotalNumSLocEntries += F.LocalNumSLocEntries;
+ break;
+ }
+
+ case MODULE_OFFSET_MAP: {
+ // Additional remapping information.
+ const unsigned char *Data = (const unsigned char*)Blob.data();
+ const unsigned char *DataEnd = Data + Blob.size();
+
+ // If we see this entry before SOURCE_LOCATION_OFFSETS, add placeholders.
+ if (F.SLocRemap.find(0) == F.SLocRemap.end()) {
+ F.SLocRemap.insert(std::make_pair(0U, 0));
+ F.SLocRemap.insert(std::make_pair(2U, 1));
+ }
+
+ // Continuous range maps we may be updating in our module.
+ typedef ContinuousRangeMap<uint32_t, int, 2>::Builder
+ RemapBuilder;
+ RemapBuilder SLocRemap(F.SLocRemap);
+ RemapBuilder IdentifierRemap(F.IdentifierRemap);
+ RemapBuilder MacroRemap(F.MacroRemap);
+ RemapBuilder PreprocessedEntityRemap(F.PreprocessedEntityRemap);
+ RemapBuilder SubmoduleRemap(F.SubmoduleRemap);
+ RemapBuilder SelectorRemap(F.SelectorRemap);
+ RemapBuilder DeclRemap(F.DeclRemap);
+ RemapBuilder TypeRemap(F.TypeRemap);
+
+ while (Data < DataEnd) {
+ using namespace llvm::support;
+ uint16_t Len = endian::readNext<uint16_t, little, unaligned>(Data);
+ StringRef Name = StringRef((const char*)Data, Len);
+ Data += Len;
+ ModuleFile *OM = ModuleMgr.lookup(Name);
+ if (!OM) {
+ Error("SourceLocation remap refers to unknown module");
+ return Failure;
+ }
+
+ uint32_t SLocOffset =
+ endian::readNext<uint32_t, little, unaligned>(Data);
+ uint32_t IdentifierIDOffset =
+ endian::readNext<uint32_t, little, unaligned>(Data);
+ uint32_t MacroIDOffset =
+ endian::readNext<uint32_t, little, unaligned>(Data);
+ uint32_t PreprocessedEntityIDOffset =
+ endian::readNext<uint32_t, little, unaligned>(Data);
+ uint32_t SubmoduleIDOffset =
+ endian::readNext<uint32_t, little, unaligned>(Data);
+ uint32_t SelectorIDOffset =
+ endian::readNext<uint32_t, little, unaligned>(Data);
+ uint32_t DeclIDOffset =
+ endian::readNext<uint32_t, little, unaligned>(Data);
+ uint32_t TypeIndexOffset =
+ endian::readNext<uint32_t, little, unaligned>(Data);
+
+ uint32_t None = std::numeric_limits<uint32_t>::max();
+
+ auto mapOffset = [&](uint32_t Offset, uint32_t BaseOffset,
+ RemapBuilder &Remap) {
+ if (Offset != None)
+ Remap.insert(std::make_pair(Offset,
+ static_cast<int>(BaseOffset - Offset)));
+ };
+ mapOffset(SLocOffset, OM->SLocEntryBaseOffset, SLocRemap);
+ mapOffset(IdentifierIDOffset, OM->BaseIdentifierID, IdentifierRemap);
+ mapOffset(MacroIDOffset, OM->BaseMacroID, MacroRemap);
+ mapOffset(PreprocessedEntityIDOffset, OM->BasePreprocessedEntityID,
+ PreprocessedEntityRemap);
+ mapOffset(SubmoduleIDOffset, OM->BaseSubmoduleID, SubmoduleRemap);
+ mapOffset(SelectorIDOffset, OM->BaseSelectorID, SelectorRemap);
+ mapOffset(DeclIDOffset, OM->BaseDeclID, DeclRemap);
+ mapOffset(TypeIndexOffset, OM->BaseTypeIndex, TypeRemap);
+
+ // Global -> local mappings.
+ F.GlobalToLocalDeclIDs[OM] = DeclIDOffset;
+ }
+ break;
+ }
+
+ case SOURCE_MANAGER_LINE_TABLE:
+ if (ParseLineTable(F, Record))
+ return Failure;
+ break;
+
+ case SOURCE_LOCATION_PRELOADS: {
+ // Need to transform from the local view (1-based IDs) to the global view,
+ // which is based off F.SLocEntryBaseID.
+ if (!F.PreloadSLocEntries.empty()) {
+ Error("Multiple SOURCE_LOCATION_PRELOADS records in AST file");
+ return Failure;
+ }
+
+ F.PreloadSLocEntries.swap(Record);
+ break;
+ }
+
+ case EXT_VECTOR_DECLS:
+ for (unsigned I = 0, N = Record.size(); I != N; ++I)
+ ExtVectorDecls.push_back(getGlobalDeclID(F, Record[I]));
+ break;
+
+ case VTABLE_USES:
+ if (Record.size() % 3 != 0) {
+ Error("Invalid VTABLE_USES record");
+ return Failure;
+ }
+
+ // Later tables overwrite earlier ones.
+ // FIXME: Modules will have some trouble with this. This is clearly not
+ // the right way to do this.
+ VTableUses.clear();
+
+ for (unsigned Idx = 0, N = Record.size(); Idx != N; /* In loop */) {
+ VTableUses.push_back(getGlobalDeclID(F, Record[Idx++]));
+ VTableUses.push_back(
+ ReadSourceLocation(F, Record, Idx).getRawEncoding());
+ VTableUses.push_back(Record[Idx++]);
+ }
+ break;
+
+ case PENDING_IMPLICIT_INSTANTIATIONS:
+ if (PendingInstantiations.size() % 2 != 0) {
+ Error("Invalid existing PendingInstantiations");
+ return Failure;
+ }
+
+ if (Record.size() % 2 != 0) {
+ Error("Invalid PENDING_IMPLICIT_INSTANTIATIONS block");
+ return Failure;
+ }
+
+ for (unsigned I = 0, N = Record.size(); I != N; /* in loop */) {
+ PendingInstantiations.push_back(getGlobalDeclID(F, Record[I++]));
+ PendingInstantiations.push_back(
+ ReadSourceLocation(F, Record, I).getRawEncoding());
+ }
+ break;
+
+ case SEMA_DECL_REFS:
+ if (Record.size() != 2) {
+ Error("Invalid SEMA_DECL_REFS block");
+ return Failure;
+ }
+ for (unsigned I = 0, N = Record.size(); I != N; ++I)
+ SemaDeclRefs.push_back(getGlobalDeclID(F, Record[I]));
+ break;
+
+ case PPD_ENTITIES_OFFSETS: {
+ F.PreprocessedEntityOffsets = (const PPEntityOffset *)Blob.data();
+ assert(Blob.size() % sizeof(PPEntityOffset) == 0);
+ F.NumPreprocessedEntities = Blob.size() / sizeof(PPEntityOffset);
+
+ unsigned LocalBasePreprocessedEntityID = Record[0];
+
+ unsigned StartingID;
+ if (!PP.getPreprocessingRecord())
+ PP.createPreprocessingRecord();
+ if (!PP.getPreprocessingRecord()->getExternalSource())
+ PP.getPreprocessingRecord()->SetExternalSource(*this);
+ StartingID
+ = PP.getPreprocessingRecord()
+ ->allocateLoadedEntities(F.NumPreprocessedEntities);
+ F.BasePreprocessedEntityID = StartingID;
+
+ if (F.NumPreprocessedEntities > 0) {
+ // Introduce the global -> local mapping for preprocessed entities in
+ // this module.
+ GlobalPreprocessedEntityMap.insert(std::make_pair(StartingID, &F));
+
+ // Introduce the local -> global mapping for preprocessed entities in
+ // this module.
+ F.PreprocessedEntityRemap.insertOrReplace(
+ std::make_pair(LocalBasePreprocessedEntityID,
+ F.BasePreprocessedEntityID - LocalBasePreprocessedEntityID));
+ }
+
+ break;
+ }
+
+ case DECL_UPDATE_OFFSETS: {
+ if (Record.size() % 2 != 0) {
+ Error("invalid DECL_UPDATE_OFFSETS block in AST file");
+ return Failure;
+ }
+ for (unsigned I = 0, N = Record.size(); I != N; I += 2) {
+ GlobalDeclID ID = getGlobalDeclID(F, Record[I]);
+ DeclUpdateOffsets[ID].push_back(std::make_pair(&F, Record[I + 1]));
+
+ // If we've already loaded the decl, perform the updates when we finish
+ // loading this block.
+ if (Decl *D = GetExistingDecl(ID))
+ PendingUpdateRecords.push_back(std::make_pair(ID, D));
+ }
+ break;
+ }
+
+ case DECL_REPLACEMENTS: {
+ if (Record.size() % 3 != 0) {
+ Error("invalid DECL_REPLACEMENTS block in AST file");
+ return Failure;
+ }
+ for (unsigned I = 0, N = Record.size(); I != N; I += 3)
+ ReplacedDecls[getGlobalDeclID(F, Record[I])]
+ = ReplacedDeclInfo(&F, Record[I+1], Record[I+2]);
+ break;
+ }
+
+ case OBJC_CATEGORIES_MAP: {
+ if (F.LocalNumObjCCategoriesInMap != 0) {
+ Error("duplicate OBJC_CATEGORIES_MAP record in AST file");
+ return Failure;
+ }
+
+ F.LocalNumObjCCategoriesInMap = Record[0];
+ F.ObjCCategoriesMap = (const ObjCCategoriesInfo *)Blob.data();
+ break;
+ }
+
+ case OBJC_CATEGORIES:
+ F.ObjCCategories.swap(Record);
+ break;
+
+ case CXX_BASE_SPECIFIER_OFFSETS: {
+ if (F.LocalNumCXXBaseSpecifiers != 0) {
+ Error("duplicate CXX_BASE_SPECIFIER_OFFSETS record in AST file");
+ return Failure;
+ }
+
+ F.LocalNumCXXBaseSpecifiers = Record[0];
+ F.CXXBaseSpecifiersOffsets = (const uint32_t *)Blob.data();
+ break;
+ }
+
+ case CXX_CTOR_INITIALIZERS_OFFSETS: {
+ if (F.LocalNumCXXCtorInitializers != 0) {
+ Error("duplicate CXX_CTOR_INITIALIZERS_OFFSETS record in AST file");
+ return Failure;
+ }
+
+ F.LocalNumCXXCtorInitializers = Record[0];
+ F.CXXCtorInitializersOffsets = (const uint32_t *)Blob.data();
+ break;
+ }
+
+ case DIAG_PRAGMA_MAPPINGS:
+ if (F.PragmaDiagMappings.empty())
+ F.PragmaDiagMappings.swap(Record);
+ else
+ F.PragmaDiagMappings.insert(F.PragmaDiagMappings.end(),
+ Record.begin(), Record.end());
+ break;
+
+ case CUDA_SPECIAL_DECL_REFS:
+ // Later tables overwrite earlier ones.
+ // FIXME: Modules will have trouble with this.
+ CUDASpecialDeclRefs.clear();
+ for (unsigned I = 0, N = Record.size(); I != N; ++I)
+ CUDASpecialDeclRefs.push_back(getGlobalDeclID(F, Record[I]));
+ break;
+
+ case HEADER_SEARCH_TABLE: {
+ F.HeaderFileInfoTableData = Blob.data();
+ F.LocalNumHeaderFileInfos = Record[1];
+ if (Record[0]) {
+ F.HeaderFileInfoTable
+ = HeaderFileInfoLookupTable::Create(
+ (const unsigned char *)F.HeaderFileInfoTableData + Record[0],
+ (const unsigned char *)F.HeaderFileInfoTableData,
+ HeaderFileInfoTrait(*this, F,
+ &PP.getHeaderSearchInfo(),
+ Blob.data() + Record[2]));
+
+ PP.getHeaderSearchInfo().SetExternalSource(this);
+ if (!PP.getHeaderSearchInfo().getExternalLookup())
+ PP.getHeaderSearchInfo().SetExternalLookup(this);
+ }
+ break;
+ }
+
+ case FP_PRAGMA_OPTIONS:
+ // Later tables overwrite earlier ones.
+ FPPragmaOptions.swap(Record);
+ break;
+
+ case OPENCL_EXTENSIONS:
+ // Later tables overwrite earlier ones.
+ OpenCLExtensions.swap(Record);
+ break;
+
+ case TENTATIVE_DEFINITIONS:
+ for (unsigned I = 0, N = Record.size(); I != N; ++I)
+ TentativeDefinitions.push_back(getGlobalDeclID(F, Record[I]));
+ break;
+
+ case KNOWN_NAMESPACES:
+ for (unsigned I = 0, N = Record.size(); I != N; ++I)
+ KnownNamespaces.push_back(getGlobalDeclID(F, Record[I]));
+ break;
+
+ case UNDEFINED_BUT_USED:
+ if (UndefinedButUsed.size() % 2 != 0) {
+ Error("Invalid existing UndefinedButUsed");
+ return Failure;
+ }
+
+ if (Record.size() % 2 != 0) {
+ Error("invalid undefined-but-used record");
+ return Failure;
+ }
+ for (unsigned I = 0, N = Record.size(); I != N; /* in loop */) {
+ UndefinedButUsed.push_back(getGlobalDeclID(F, Record[I++]));
+ UndefinedButUsed.push_back(
+ ReadSourceLocation(F, Record, I).getRawEncoding());
+ }
+ break;
+ case DELETE_EXPRS_TO_ANALYZE:
+ for (unsigned I = 0, N = Record.size(); I != N;) {
+ DelayedDeleteExprs.push_back(getGlobalDeclID(F, Record[I++]));
+ const uint64_t Count = Record[I++];
+ DelayedDeleteExprs.push_back(Count);
+ for (uint64_t C = 0; C < Count; ++C) {
+ DelayedDeleteExprs.push_back(ReadSourceLocation(F, Record, I).getRawEncoding());
+ bool IsArrayForm = Record[I++] == 1;
+ DelayedDeleteExprs.push_back(IsArrayForm);
+ }
+ }
+ break;
+
+ case IMPORTED_MODULES: {
+ if (F.Kind != MK_ImplicitModule && F.Kind != MK_ExplicitModule) {
+ // If we aren't loading a module (which has its own exports), make
+ // all of the imported modules visible.
+ // FIXME: Deal with macros-only imports.
+ for (unsigned I = 0, N = Record.size(); I != N; /**/) {
+ unsigned GlobalID = getGlobalSubmoduleID(F, Record[I++]);
+ SourceLocation Loc = ReadSourceLocation(F, Record, I);
+ if (GlobalID)
+ ImportedModules.push_back(ImportedSubmodule(GlobalID, Loc));
+ }
+ }
+ break;
+ }
+
+ case MACRO_OFFSET: {
+ if (F.LocalNumMacros != 0) {
+ Error("duplicate MACRO_OFFSET record in AST file");
+ return Failure;
+ }
+ F.MacroOffsets = (const uint32_t *)Blob.data();
+ F.LocalNumMacros = Record[0];
+ unsigned LocalBaseMacroID = Record[1];
+ F.BaseMacroID = getTotalNumMacros();
+
+ if (F.LocalNumMacros > 0) {
+ // Introduce the global -> local mapping for macros within this module.
+ GlobalMacroMap.insert(std::make_pair(getTotalNumMacros() + 1, &F));
+
+ // Introduce the local -> global mapping for macros within this module.
+ F.MacroRemap.insertOrReplace(
+ std::make_pair(LocalBaseMacroID,
+ F.BaseMacroID - LocalBaseMacroID));
+
+ MacrosLoaded.resize(MacrosLoaded.size() + F.LocalNumMacros);
+ }
+ break;
+ }
+
+ case LATE_PARSED_TEMPLATE: {
+ LateParsedTemplates.append(Record.begin(), Record.end());
+ break;
+ }
+
+ case OPTIMIZE_PRAGMA_OPTIONS:
+ if (Record.size() != 1) {
+ Error("invalid pragma optimize record");
+ return Failure;
+ }
+ OptimizeOffPragmaLocation = ReadSourceLocation(F, Record[0]);
+ break;
+
+ case UNUSED_LOCAL_TYPEDEF_NAME_CANDIDATES:
+ for (unsigned I = 0, N = Record.size(); I != N; ++I)
+ UnusedLocalTypedefNameCandidates.push_back(
+ getGlobalDeclID(F, Record[I]));
+ break;
+ }
+ }
+}
+
+ASTReader::ASTReadResult
+ASTReader::ReadModuleMapFileBlock(RecordData &Record, ModuleFile &F,
+ const ModuleFile *ImportedBy,
+ unsigned ClientLoadCapabilities) {
+ unsigned Idx = 0;
+ F.ModuleMapPath = ReadPath(F, Record, Idx);
+
+ if (F.Kind == MK_ExplicitModule) {
+ // For an explicitly-loaded module, we don't care whether the original
+ // module map file exists or matches.
+ return Success;
+ }
+
+ // Try to resolve ModuleName in the current header search context and
+ // verify that it is found in the same module map file as we saved. If the
+ // top-level AST file is a main file, skip this check because there is no
+ // usable header search context.
+ assert(!F.ModuleName.empty() &&
+ "MODULE_NAME should come before MODULE_MAP_FILE");
+ if (F.Kind == MK_ImplicitModule &&
+ (*ModuleMgr.begin())->Kind != MK_MainFile) {
+ // An implicitly-loaded module file should have its module listed in some
+ // module map file that we've already loaded.
+ Module *M = PP.getHeaderSearchInfo().lookupModule(F.ModuleName);
+ auto &Map = PP.getHeaderSearchInfo().getModuleMap();
+ const FileEntry *ModMap = M ? Map.getModuleMapFileForUniquing(M) : nullptr;
+ if (!ModMap) {
+ assert(ImportedBy && "top-level import should be verified");
+ if ((ClientLoadCapabilities & ARR_OutOfDate) == 0) {
+ if (auto *ASTFE = M ? M->getASTFile() : nullptr)
+ // This module was defined by an imported (explicit) module.
+ Diag(diag::err_module_file_conflict) << F.ModuleName << F.FileName
+ << ASTFE->getName();
+ else
+ // This module was built with a different module map.
+ Diag(diag::err_imported_module_not_found)
+ << F.ModuleName << F.FileName << ImportedBy->FileName
+ << F.ModuleMapPath;
+ }
+ return OutOfDate;
+ }
+
+ assert(M->Name == F.ModuleName && "found module with different name");
+
+ // Check the primary module map file.
+ const FileEntry *StoredModMap = FileMgr.getFile(F.ModuleMapPath);
+ if (StoredModMap == nullptr || StoredModMap != ModMap) {
+ assert(ModMap && "found module is missing module map file");
+ assert(ImportedBy && "top-level import should be verified");
+ if ((ClientLoadCapabilities & ARR_OutOfDate) == 0)
+ Diag(diag::err_imported_module_modmap_changed)
+ << F.ModuleName << ImportedBy->FileName
+ << ModMap->getName() << F.ModuleMapPath;
+ return OutOfDate;
+ }
+
+ llvm::SmallPtrSet<const FileEntry *, 1> AdditionalStoredMaps;
+ for (unsigned I = 0, N = Record[Idx++]; I < N; ++I) {
+ // FIXME: we should use input files rather than storing names.
+ std::string Filename = ReadPath(F, Record, Idx);
+ const FileEntry *F =
+ FileMgr.getFile(Filename, false, false);
+ if (F == nullptr) {
+ if ((ClientLoadCapabilities & ARR_OutOfDate) == 0)
+ Error("could not find file '" + Filename +"' referenced by AST file");
+ return OutOfDate;
+ }
+ AdditionalStoredMaps.insert(F);
+ }
+
+ // Check any additional module map files (e.g. module.private.modulemap)
+ // that are not in the pcm.
+ if (auto *AdditionalModuleMaps = Map.getAdditionalModuleMapFiles(M)) {
+ for (const FileEntry *ModMap : *AdditionalModuleMaps) {
+ // Remove files that match
+ // Note: SmallPtrSet::erase is really remove
+ if (!AdditionalStoredMaps.erase(ModMap)) {
+ if ((ClientLoadCapabilities & ARR_OutOfDate) == 0)
+ Diag(diag::err_module_different_modmap)
+ << F.ModuleName << /*new*/0 << ModMap->getName();
+ return OutOfDate;
+ }
+ }
+ }
+
+ // Check any additional module map files that are in the pcm, but not
+ // found in header search. Cases that match are already removed.
+ for (const FileEntry *ModMap : AdditionalStoredMaps) {
+ if ((ClientLoadCapabilities & ARR_OutOfDate) == 0)
+ Diag(diag::err_module_different_modmap)
+ << F.ModuleName << /*not new*/1 << ModMap->getName();
+ return OutOfDate;
+ }
+ }
+
+ if (Listener)
+ Listener->ReadModuleMapFile(F.ModuleMapPath);
+ return Success;
+}
+
+
+/// \brief Move the given method to the back of the global list of methods.
+static void moveMethodToBackOfGlobalList(Sema &S, ObjCMethodDecl *Method) {
+ // Find the entry for this selector in the method pool.
+ Sema::GlobalMethodPool::iterator Known
+ = S.MethodPool.find(Method->getSelector());
+ if (Known == S.MethodPool.end())
+ return;
+
+ // Retrieve the appropriate method list.
+ ObjCMethodList &Start = Method->isInstanceMethod()? Known->second.first
+ : Known->second.second;
+ bool Found = false;
+ for (ObjCMethodList *List = &Start; List; List = List->getNext()) {
+ if (!Found) {
+ if (List->getMethod() == Method) {
+ Found = true;
+ } else {
+ // Keep searching.
+ continue;
+ }
+ }
+
+ if (List->getNext())
+ List->setMethod(List->getNext()->getMethod());
+ else
+ List->setMethod(Method);
+ }
+}
+
+void ASTReader::makeNamesVisible(const HiddenNames &Names, Module *Owner) {
+ assert(Owner->NameVisibility != Module::Hidden && "nothing to make visible?");
+ for (Decl *D : Names) {
+ bool wasHidden = D->Hidden;
+ D->Hidden = false;
+
+ if (wasHidden && SemaObj) {
+ if (ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(D)) {
+ moveMethodToBackOfGlobalList(*SemaObj, Method);
+ }
+ }
+ }
+}
+
+void ASTReader::makeModuleVisible(Module *Mod,
+ Module::NameVisibilityKind NameVisibility,
+ SourceLocation ImportLoc) {
+ llvm::SmallPtrSet<Module *, 4> Visited;
+ SmallVector<Module *, 4> Stack;
+ Stack.push_back(Mod);
+ while (!Stack.empty()) {
+ Mod = Stack.pop_back_val();
+
+ if (NameVisibility <= Mod->NameVisibility) {
+ // This module already has this level of visibility (or greater), so
+ // there is nothing more to do.
+ continue;
+ }
+
+ if (!Mod->isAvailable()) {
+ // Modules that aren't available cannot be made visible.
+ continue;
+ }
+
+ // Update the module's name visibility.
+ Mod->NameVisibility = NameVisibility;
+
+ // If we've already deserialized any names from this module,
+ // mark them as visible.
+ HiddenNamesMapType::iterator Hidden = HiddenNamesMap.find(Mod);
+ if (Hidden != HiddenNamesMap.end()) {
+ auto HiddenNames = std::move(*Hidden);
+ HiddenNamesMap.erase(Hidden);
+ makeNamesVisible(HiddenNames.second, HiddenNames.first);
+ assert(HiddenNamesMap.find(Mod) == HiddenNamesMap.end() &&
+ "making names visible added hidden names");
+ }
+
+ // Push any exported modules onto the stack to be marked as visible.
+ SmallVector<Module *, 16> Exports;
+ Mod->getExportedModules(Exports);
+ for (SmallVectorImpl<Module *>::iterator
+ I = Exports.begin(), E = Exports.end(); I != E; ++I) {
+ Module *Exported = *I;
+ if (Visited.insert(Exported).second)
+ Stack.push_back(Exported);
+ }
+ }
+}
+
+bool ASTReader::loadGlobalIndex() {
+ if (GlobalIndex)
+ return false;
+
+ if (TriedLoadingGlobalIndex || !UseGlobalIndex ||
+ !Context.getLangOpts().Modules)
+ return true;
+
+ // Try to load the global index.
+ TriedLoadingGlobalIndex = true;
+ StringRef ModuleCachePath
+ = getPreprocessor().getHeaderSearchInfo().getModuleCachePath();
+ std::pair<GlobalModuleIndex *, GlobalModuleIndex::ErrorCode> Result
+ = GlobalModuleIndex::readIndex(ModuleCachePath);
+ if (!Result.first)
+ return true;
+
+ GlobalIndex.reset(Result.first);
+ ModuleMgr.setGlobalIndex(GlobalIndex.get());
+ return false;
+}
+
+bool ASTReader::isGlobalIndexUnavailable() const {
+ return Context.getLangOpts().Modules && UseGlobalIndex &&
+ !hasGlobalIndex() && TriedLoadingGlobalIndex;
+}
+
+static void updateModuleTimestamp(ModuleFile &MF) {
+ // Overwrite the timestamp file contents so that file's mtime changes.
+ std::string TimestampFilename = MF.getTimestampFilename();
+ std::error_code EC;
+ llvm::raw_fd_ostream OS(TimestampFilename, EC, llvm::sys::fs::F_Text);
+ if (EC)
+ return;
+ OS << "Timestamp file\n";
+}
+
+/// \brief Given a cursor at the start of an AST file, scan ahead and drop the
+/// cursor into the start of the given block ID, returning false on success and
+/// true on failure.
+static bool SkipCursorToBlock(BitstreamCursor &Cursor, unsigned BlockID) {
+ while (1) {
+ llvm::BitstreamEntry Entry = Cursor.advance();
+ switch (Entry.Kind) {
+ case llvm::BitstreamEntry::Error:
+ case llvm::BitstreamEntry::EndBlock:
+ return true;
+
+ case llvm::BitstreamEntry::Record:
+ // Ignore top-level records.
+ Cursor.skipRecord(Entry.ID);
+ break;
+
+ case llvm::BitstreamEntry::SubBlock:
+ if (Entry.ID == BlockID) {
+ if (Cursor.EnterSubBlock(BlockID))
+ return true;
+ // Found it!
+ return false;
+ }
+
+ if (Cursor.SkipBlock())
+ return true;
+ }
+ }
+}
+
+ASTReader::ASTReadResult ASTReader::ReadAST(const std::string &FileName,
+ ModuleKind Type,
+ SourceLocation ImportLoc,
+ unsigned ClientLoadCapabilities) {
+ llvm::SaveAndRestore<SourceLocation>
+ SetCurImportLocRAII(CurrentImportLoc, ImportLoc);
+
+ // Defer any pending actions until we get to the end of reading the AST file.
+ Deserializing AnASTFile(this);
+
+ // Bump the generation number.
+ unsigned PreviousGeneration = incrementGeneration(Context);
+
+ unsigned NumModules = ModuleMgr.size();
+ SmallVector<ImportedModule, 4> Loaded;
+ switch(ASTReadResult ReadResult = ReadASTCore(FileName, Type, ImportLoc,
+ /*ImportedBy=*/nullptr, Loaded,
+ 0, 0, 0,
+ ClientLoadCapabilities)) {
+ case Failure:
+ case Missing:
+ case OutOfDate:
+ case VersionMismatch:
+ case ConfigurationMismatch:
+ case HadErrors: {
+ llvm::SmallPtrSet<ModuleFile *, 4> LoadedSet;
+ for (const ImportedModule &IM : Loaded)
+ LoadedSet.insert(IM.Mod);
+
+ ModuleMgr.removeModules(ModuleMgr.begin() + NumModules, ModuleMgr.end(),
+ LoadedSet,
+ Context.getLangOpts().Modules
+ ? &PP.getHeaderSearchInfo().getModuleMap()
+ : nullptr);
+
+ // If we find that any modules are unusable, the global index is going
+ // to be out-of-date. Just remove it.
+ GlobalIndex.reset();
+ ModuleMgr.setGlobalIndex(nullptr);
+ return ReadResult;
+ }
+ case Success:
+ break;
+ }
+
+ // Here comes stuff that we only do once the entire chain is loaded.
+
+ // Load the AST blocks of all of the modules that we loaded.
+ for (SmallVectorImpl<ImportedModule>::iterator M = Loaded.begin(),
+ MEnd = Loaded.end();
+ M != MEnd; ++M) {
+ ModuleFile &F = *M->Mod;
+
+ // Read the AST block.
+ if (ASTReadResult Result = ReadASTBlock(F, ClientLoadCapabilities))
+ return Result;
+
+ // Read the extension blocks.
+ while (!SkipCursorToBlock(F.Stream, EXTENSION_BLOCK_ID)) {
+ if (ASTReadResult Result = ReadExtensionBlock(F))
+ return Result;
+ }
+
+ // Once read, set the ModuleFile bit base offset and update the size in
+ // bits of all files we've seen.
+ F.GlobalBitOffset = TotalModulesSizeInBits;
+ TotalModulesSizeInBits += F.SizeInBits;
+ GlobalBitOffsetsMap.insert(std::make_pair(F.GlobalBitOffset, &F));
+
+ // Preload SLocEntries.
+ for (unsigned I = 0, N = F.PreloadSLocEntries.size(); I != N; ++I) {
+ int Index = int(F.PreloadSLocEntries[I] - 1) + F.SLocEntryBaseID;
+ // Load it through the SourceManager and don't call ReadSLocEntry()
+ // directly because the entry may have already been loaded in which case
+ // calling ReadSLocEntry() directly would trigger an assertion in
+ // SourceManager.
+ SourceMgr.getLoadedSLocEntryByID(Index);
+ }
+
+ // Preload all the pending interesting identifiers by marking them out of
+ // date.
+ for (auto Offset : F.PreloadIdentifierOffsets) {
+ const unsigned char *Data = reinterpret_cast<const unsigned char *>(
+ F.IdentifierTableData + Offset);
+
+ ASTIdentifierLookupTrait Trait(*this, F);
+ auto KeyDataLen = Trait.ReadKeyDataLength(Data);
+ auto Key = Trait.ReadKey(Data, KeyDataLen.first);
+ auto &II = PP.getIdentifierTable().getOwn(Key);
+ II.setOutOfDate(true);
+
+ // Mark this identifier as being from an AST file so that we can track
+ // whether we need to serialize it.
+ if (!II.isFromAST()) {
+ II.setIsFromAST();
+ bool IsModule = PP.getCurrentModule() != nullptr;
+ if (isInterestingIdentifier(*this, II, IsModule))
+ II.setChangedSinceDeserialization();
+ }
+
+ // Associate the ID with the identifier so that the writer can reuse it.
+ auto ID = Trait.ReadIdentifierID(Data + KeyDataLen.first);
+ SetIdentifierInfo(ID, &II);
+ }
+ }
+
+ // Setup the import locations and notify the module manager that we've
+ // committed to these module files.
+ for (SmallVectorImpl<ImportedModule>::iterator M = Loaded.begin(),
+ MEnd = Loaded.end();
+ M != MEnd; ++M) {
+ ModuleFile &F = *M->Mod;
+
+ ModuleMgr.moduleFileAccepted(&F);
+
+ // Set the import location.
+ F.DirectImportLoc = ImportLoc;
+ if (!M->ImportedBy)
+ F.ImportLoc = M->ImportLoc;
+ else
+ F.ImportLoc = ReadSourceLocation(*M->ImportedBy,
+ M->ImportLoc.getRawEncoding());
+ }
+
+ if (!Context.getLangOpts().CPlusPlus ||
+ (Type != MK_ImplicitModule && Type != MK_ExplicitModule)) {
+ // Mark all of the identifiers in the identifier table as being out of date,
+ // so that various accessors know to check the loaded modules when the
+ // identifier is used.
+ //
+ // For C++ modules, we don't need information on many identifiers (just
+ // those that provide macros or are poisoned), so we mark all of
+ // the interesting ones via PreloadIdentifierOffsets.
+ for (IdentifierTable::iterator Id = PP.getIdentifierTable().begin(),
+ IdEnd = PP.getIdentifierTable().end();
+ Id != IdEnd; ++Id)
+ Id->second->setOutOfDate(true);
+ }
+
+ // Resolve any unresolved module exports.
+ for (unsigned I = 0, N = UnresolvedModuleRefs.size(); I != N; ++I) {
+ UnresolvedModuleRef &Unresolved = UnresolvedModuleRefs[I];
+ SubmoduleID GlobalID = getGlobalSubmoduleID(*Unresolved.File,Unresolved.ID);
+ Module *ResolvedMod = getSubmodule(GlobalID);
+
+ switch (Unresolved.Kind) {
+ case UnresolvedModuleRef::Conflict:
+ if (ResolvedMod) {
+ Module::Conflict Conflict;
+ Conflict.Other = ResolvedMod;
+ Conflict.Message = Unresolved.String.str();
+ Unresolved.Mod->Conflicts.push_back(Conflict);
+ }
+ continue;
+
+ case UnresolvedModuleRef::Import:
+ if (ResolvedMod)
+ Unresolved.Mod->Imports.insert(ResolvedMod);
+ continue;
+
+ case UnresolvedModuleRef::Export:
+ if (ResolvedMod || Unresolved.IsWildcard)
+ Unresolved.Mod->Exports.push_back(
+ Module::ExportDecl(ResolvedMod, Unresolved.IsWildcard));
+ continue;
+ }
+ }
+ UnresolvedModuleRefs.clear();
+
+ // FIXME: How do we load the 'use'd modules? They may not be submodules.
+ // Might be unnecessary as use declarations are only used to build the
+ // module itself.
+
+ InitializeContext();
+
+ if (SemaObj)
+ UpdateSema();
+
+ if (DeserializationListener)
+ DeserializationListener->ReaderInitialized(this);
+
+ ModuleFile &PrimaryModule = ModuleMgr.getPrimaryModule();
+ if (PrimaryModule.OriginalSourceFileID.isValid()) {
+ PrimaryModule.OriginalSourceFileID
+ = FileID::get(PrimaryModule.SLocEntryBaseID
+ + PrimaryModule.OriginalSourceFileID.getOpaqueValue() - 1);
+
+ // If this AST file is a precompiled preamble, then set the
+ // preamble file ID of the source manager to the file source file
+ // from which the preamble was built.
+ if (Type == MK_Preamble) {
+ SourceMgr.setPreambleFileID(PrimaryModule.OriginalSourceFileID);
+ } else if (Type == MK_MainFile) {
+ SourceMgr.setMainFileID(PrimaryModule.OriginalSourceFileID);
+ }
+ }
+
+ // For any Objective-C class definitions we have already loaded, make sure
+ // that we load any additional categories.
+ for (unsigned I = 0, N = ObjCClassesLoaded.size(); I != N; ++I) {
+ loadObjCCategories(ObjCClassesLoaded[I]->getGlobalID(),
+ ObjCClassesLoaded[I],
+ PreviousGeneration);
+ }
+
+ if (PP.getHeaderSearchInfo()
+ .getHeaderSearchOpts()
+ .ModulesValidateOncePerBuildSession) {
+ // Now we are certain that the module and all modules it depends on are
+ // up to date. Create or update timestamp files for modules that are
+ // located in the module cache (not for PCH files that could be anywhere
+ // in the filesystem).
+ for (unsigned I = 0, N = Loaded.size(); I != N; ++I) {
+ ImportedModule &M = Loaded[I];
+ if (M.Mod->Kind == MK_ImplicitModule) {
+ updateModuleTimestamp(*M.Mod);
+ }
+ }
+ }
+
+ return Success;
+}
+
+static ASTFileSignature readASTFileSignature(llvm::BitstreamReader &StreamFile);
+
+/// \brief Whether \p Stream starts with the AST/PCH file magic number 'CPCH'.
+static bool startsWithASTFileMagic(BitstreamCursor &Stream) {
+ return Stream.Read(8) == 'C' &&
+ Stream.Read(8) == 'P' &&
+ Stream.Read(8) == 'C' &&
+ Stream.Read(8) == 'H';
+}
+
+static unsigned moduleKindForDiagnostic(ModuleKind Kind) {
+ switch (Kind) {
+ case MK_PCH:
+ return 0; // PCH
+ case MK_ImplicitModule:
+ case MK_ExplicitModule:
+ return 1; // module
+ case MK_MainFile:
+ case MK_Preamble:
+ return 2; // main source file
+ }
+ llvm_unreachable("unknown module kind");
+}
+
+ASTReader::ASTReadResult
+ASTReader::ReadASTCore(StringRef FileName,
+ ModuleKind Type,
+ SourceLocation ImportLoc,
+ ModuleFile *ImportedBy,
+ SmallVectorImpl<ImportedModule> &Loaded,
+ off_t ExpectedSize, time_t ExpectedModTime,
+ ASTFileSignature ExpectedSignature,
+ unsigned ClientLoadCapabilities) {
+ ModuleFile *M;
+ std::string ErrorStr;
+ ModuleManager::AddModuleResult AddResult
+ = ModuleMgr.addModule(FileName, Type, ImportLoc, ImportedBy,
+ getGeneration(), ExpectedSize, ExpectedModTime,
+ ExpectedSignature, readASTFileSignature,
+ M, ErrorStr);
+
+ switch (AddResult) {
+ case ModuleManager::AlreadyLoaded:
+ return Success;
+
+ case ModuleManager::NewlyLoaded:
+ // Load module file below.
+ break;
+
+ case ModuleManager::Missing:
+ // The module file was missing; if the client can handle that, return
+ // it.
+ if (ClientLoadCapabilities & ARR_Missing)
+ return Missing;
+
+ // Otherwise, return an error.
+ Diag(diag::err_module_file_not_found) << moduleKindForDiagnostic(Type)
+ << FileName << ErrorStr.empty()
+ << ErrorStr;
+ return Failure;
+
+ case ModuleManager::OutOfDate:
+ // We couldn't load the module file because it is out-of-date. If the
+ // client can handle out-of-date, return it.
+ if (ClientLoadCapabilities & ARR_OutOfDate)
+ return OutOfDate;
+
+ // Otherwise, return an error.
+ Diag(diag::err_module_file_out_of_date) << moduleKindForDiagnostic(Type)
+ << FileName << ErrorStr.empty()
+ << ErrorStr;
+ return Failure;
+ }
+
+ assert(M && "Missing module file");
+
+ // FIXME: This seems rather a hack. Should CurrentDir be part of the
+ // module?
+ if (FileName != "-") {
+ CurrentDir = llvm::sys::path::parent_path(FileName);
+ if (CurrentDir.empty()) CurrentDir = ".";
+ }
+
+ ModuleFile &F = *M;
+ BitstreamCursor &Stream = F.Stream;
+ PCHContainerRdr.ExtractPCH(F.Buffer->getMemBufferRef(), F.StreamFile);
+ Stream.init(&F.StreamFile);
+ F.SizeInBits = F.Buffer->getBufferSize() * 8;
+
+ // Sniff for the signature.
+ if (!startsWithASTFileMagic(Stream)) {
+ Diag(diag::err_module_file_invalid) << moduleKindForDiagnostic(Type)
+ << FileName;
+ return Failure;
+ }
+
+ // This is used for compatibility with older PCH formats.
+ bool HaveReadControlBlock = false;
+ while (1) {
+ llvm::BitstreamEntry Entry = Stream.advance();
+
+ switch (Entry.Kind) {
+ case llvm::BitstreamEntry::Error:
+ case llvm::BitstreamEntry::Record:
+ case llvm::BitstreamEntry::EndBlock:
+ Error("invalid record at top-level of AST file");
+ return Failure;
+
+ case llvm::BitstreamEntry::SubBlock:
+ break;
+ }
+
+ switch (Entry.ID) {
+ case CONTROL_BLOCK_ID:
+ HaveReadControlBlock = true;
+ switch (ReadControlBlock(F, Loaded, ImportedBy, ClientLoadCapabilities)) {
+ case Success:
+ // Check that we didn't try to load a non-module AST file as a module.
+ //
+ // FIXME: Should we also perform the converse check? Loading a module as
+ // a PCH file sort of works, but it's a bit wonky.
+ if ((Type == MK_ImplicitModule || Type == MK_ExplicitModule) &&
+ F.ModuleName.empty()) {
+ auto Result = (Type == MK_ImplicitModule) ? OutOfDate : Failure;
+ if (Result != OutOfDate ||
+ (ClientLoadCapabilities & ARR_OutOfDate) == 0)
+ Diag(diag::err_module_file_not_module) << FileName;
+ return Result;
+ }
+ break;
+
+ case Failure: return Failure;
+ case Missing: return Missing;
+ case OutOfDate: return OutOfDate;
+ case VersionMismatch: return VersionMismatch;
+ case ConfigurationMismatch: return ConfigurationMismatch;
+ case HadErrors: return HadErrors;
+ }
+ break;
+
+ case AST_BLOCK_ID:
+ if (!HaveReadControlBlock) {
+ if ((ClientLoadCapabilities & ARR_VersionMismatch) == 0)
+ Diag(diag::err_pch_version_too_old);
+ return VersionMismatch;
+ }
+
+ // Record that we've loaded this module.
+ Loaded.push_back(ImportedModule(M, ImportedBy, ImportLoc));
+ return Success;
+
+ default:
+ if (Stream.SkipBlock()) {
+ Error("malformed block record in AST file");
+ return Failure;
+ }
+ break;
+ }
+ }
+
+ return Success;
+}
+
+/// Parse a record and blob containing module file extension metadata.
+static bool parseModuleFileExtensionMetadata(
+ const SmallVectorImpl<uint64_t> &Record,
+ StringRef Blob,
+ ModuleFileExtensionMetadata &Metadata) {
+ if (Record.size() < 4) return true;
+
+ Metadata.MajorVersion = Record[0];
+ Metadata.MinorVersion = Record[1];
+
+ unsigned BlockNameLen = Record[2];
+ unsigned UserInfoLen = Record[3];
+
+ if (BlockNameLen + UserInfoLen > Blob.size()) return true;
+
+ Metadata.BlockName = std::string(Blob.data(), Blob.data() + BlockNameLen);
+ Metadata.UserInfo = std::string(Blob.data() + BlockNameLen,
+ Blob.data() + BlockNameLen + UserInfoLen);
+ return false;
+}
+
+ASTReader::ASTReadResult ASTReader::ReadExtensionBlock(ModuleFile &F) {
+ BitstreamCursor &Stream = F.Stream;
+
+ RecordData Record;
+ while (true) {
+ llvm::BitstreamEntry Entry = Stream.advance();
+ switch (Entry.Kind) {
+ case llvm::BitstreamEntry::SubBlock:
+ if (Stream.SkipBlock())
+ return Failure;
+
+ continue;
+
+ case llvm::BitstreamEntry::EndBlock:
+ return Success;
+
+ case llvm::BitstreamEntry::Error:
+ return HadErrors;
+
+ case llvm::BitstreamEntry::Record:
+ break;
+ }
+
+ Record.clear();
+ StringRef Blob;
+ unsigned RecCode = Stream.readRecord(Entry.ID, Record, &Blob);
+ switch (RecCode) {
+ case EXTENSION_METADATA: {
+ ModuleFileExtensionMetadata Metadata;
+ if (parseModuleFileExtensionMetadata(Record, Blob, Metadata))
+ return Failure;
+
+ // Find a module file extension with this block name.
+ auto Known = ModuleFileExtensions.find(Metadata.BlockName);
+ if (Known == ModuleFileExtensions.end()) break;
+
+ // Form a reader.
+ if (auto Reader = Known->second->createExtensionReader(Metadata, *this,
+ F, Stream)) {
+ F.ExtensionReaders.push_back(std::move(Reader));
+ }
+
+ break;
+ }
+ }
+ }
+
+ return Success;
+}
+
+void ASTReader::InitializeContext() {
+ // If there's a listener, notify them that we "read" the translation unit.
+ if (DeserializationListener)
+ DeserializationListener->DeclRead(PREDEF_DECL_TRANSLATION_UNIT_ID,
+ Context.getTranslationUnitDecl());
+
+ // FIXME: Find a better way to deal with collisions between these
+ // built-in types. Right now, we just ignore the problem.
+
+ // Load the special types.
+ if (SpecialTypes.size() >= NumSpecialTypeIDs) {
+ if (unsigned String = SpecialTypes[SPECIAL_TYPE_CF_CONSTANT_STRING]) {
+ if (!Context.CFConstantStringTypeDecl)
+ Context.setCFConstantStringType(GetType(String));
+ }
+
+ if (unsigned File = SpecialTypes[SPECIAL_TYPE_FILE]) {
+ QualType FileType = GetType(File);
+ if (FileType.isNull()) {
+ Error("FILE type is NULL");
+ return;
+ }
+
+ if (!Context.FILEDecl) {
+ if (const TypedefType *Typedef = FileType->getAs<TypedefType>())
+ Context.setFILEDecl(Typedef->getDecl());
+ else {
+ const TagType *Tag = FileType->getAs<TagType>();
+ if (!Tag) {
+ Error("Invalid FILE type in AST file");
+ return;
+ }
+ Context.setFILEDecl(Tag->getDecl());
+ }
+ }
+ }
+
+ if (unsigned Jmp_buf = SpecialTypes[SPECIAL_TYPE_JMP_BUF]) {
+ QualType Jmp_bufType = GetType(Jmp_buf);
+ if (Jmp_bufType.isNull()) {
+ Error("jmp_buf type is NULL");
+ return;
+ }
+
+ if (!Context.jmp_bufDecl) {
+ if (const TypedefType *Typedef = Jmp_bufType->getAs<TypedefType>())
+ Context.setjmp_bufDecl(Typedef->getDecl());
+ else {
+ const TagType *Tag = Jmp_bufType->getAs<TagType>();
+ if (!Tag) {
+ Error("Invalid jmp_buf type in AST file");
+ return;
+ }
+ Context.setjmp_bufDecl(Tag->getDecl());
+ }
+ }
+ }
+
+ if (unsigned Sigjmp_buf = SpecialTypes[SPECIAL_TYPE_SIGJMP_BUF]) {
+ QualType Sigjmp_bufType = GetType(Sigjmp_buf);
+ if (Sigjmp_bufType.isNull()) {
+ Error("sigjmp_buf type is NULL");
+ return;
+ }
+
+ if (!Context.sigjmp_bufDecl) {
+ if (const TypedefType *Typedef = Sigjmp_bufType->getAs<TypedefType>())
+ Context.setsigjmp_bufDecl(Typedef->getDecl());
+ else {
+ const TagType *Tag = Sigjmp_bufType->getAs<TagType>();
+ assert(Tag && "Invalid sigjmp_buf type in AST file");
+ Context.setsigjmp_bufDecl(Tag->getDecl());
+ }
+ }
+ }
+
+ if (unsigned ObjCIdRedef
+ = SpecialTypes[SPECIAL_TYPE_OBJC_ID_REDEFINITION]) {
+ if (Context.ObjCIdRedefinitionType.isNull())
+ Context.ObjCIdRedefinitionType = GetType(ObjCIdRedef);
+ }
+
+ if (unsigned ObjCClassRedef
+ = SpecialTypes[SPECIAL_TYPE_OBJC_CLASS_REDEFINITION]) {
+ if (Context.ObjCClassRedefinitionType.isNull())
+ Context.ObjCClassRedefinitionType = GetType(ObjCClassRedef);
+ }
+
+ if (unsigned ObjCSelRedef
+ = SpecialTypes[SPECIAL_TYPE_OBJC_SEL_REDEFINITION]) {
+ if (Context.ObjCSelRedefinitionType.isNull())
+ Context.ObjCSelRedefinitionType = GetType(ObjCSelRedef);
+ }
+
+ if (unsigned Ucontext_t = SpecialTypes[SPECIAL_TYPE_UCONTEXT_T]) {
+ QualType Ucontext_tType = GetType(Ucontext_t);
+ if (Ucontext_tType.isNull()) {
+ Error("ucontext_t type is NULL");
+ return;
+ }
+
+ if (!Context.ucontext_tDecl) {
+ if (const TypedefType *Typedef = Ucontext_tType->getAs<TypedefType>())
+ Context.setucontext_tDecl(Typedef->getDecl());
+ else {
+ const TagType *Tag = Ucontext_tType->getAs<TagType>();
+ assert(Tag && "Invalid ucontext_t type in AST file");
+ Context.setucontext_tDecl(Tag->getDecl());
+ }
+ }
+ }
+ }
+
+ ReadPragmaDiagnosticMappings(Context.getDiagnostics());
+
+ // If there were any CUDA special declarations, deserialize them.
+ if (!CUDASpecialDeclRefs.empty()) {
+ assert(CUDASpecialDeclRefs.size() == 1 && "More decl refs than expected!");
+ Context.setcudaConfigureCallDecl(
+ cast<FunctionDecl>(GetDecl(CUDASpecialDeclRefs[0])));
+ }
+
+ // Re-export any modules that were imported by a non-module AST file.
+ // FIXME: This does not make macro-only imports visible again.
+ for (auto &Import : ImportedModules) {
+ if (Module *Imported = getSubmodule(Import.ID)) {
+ makeModuleVisible(Imported, Module::AllVisible,
+ /*ImportLoc=*/Import.ImportLoc);
+ PP.makeModuleVisible(Imported, Import.ImportLoc);
+ }
+ }
+ ImportedModules.clear();
+}
+
+void ASTReader::finalizeForWriting() {
+ // Nothing to do for now.
+}
+
+/// \brief Reads and return the signature record from \p StreamFile's control
+/// block, or else returns 0.
+static ASTFileSignature readASTFileSignature(llvm::BitstreamReader &StreamFile){
+ BitstreamCursor Stream(StreamFile);
+ if (!startsWithASTFileMagic(Stream))
+ return 0;
+
+ // Scan for the CONTROL_BLOCK_ID block.
+ if (SkipCursorToBlock(Stream, CONTROL_BLOCK_ID))
+ return 0;
+
+ // Scan for SIGNATURE inside the control block.
+ ASTReader::RecordData Record;
+ while (1) {
+ llvm::BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
+ if (Entry.Kind == llvm::BitstreamEntry::EndBlock ||
+ Entry.Kind != llvm::BitstreamEntry::Record)
+ return 0;
+
+ Record.clear();
+ StringRef Blob;
+ if (SIGNATURE == Stream.readRecord(Entry.ID, Record, &Blob))
+ return Record[0];
+ }
+}
+
+/// \brief Retrieve the name of the original source file name
+/// directly from the AST file, without actually loading the AST
+/// file.
+std::string ASTReader::getOriginalSourceFile(
+ const std::string &ASTFileName, FileManager &FileMgr,
+ const PCHContainerReader &PCHContainerRdr, DiagnosticsEngine &Diags) {
+ // Open the AST file.
+ auto Buffer = FileMgr.getBufferForFile(ASTFileName);
+ if (!Buffer) {
+ Diags.Report(diag::err_fe_unable_to_read_pch_file)
+ << ASTFileName << Buffer.getError().message();
+ return std::string();
+ }
+
+ // Initialize the stream
+ llvm::BitstreamReader StreamFile;
+ PCHContainerRdr.ExtractPCH((*Buffer)->getMemBufferRef(), StreamFile);
+ BitstreamCursor Stream(StreamFile);
+
+ // Sniff for the signature.
+ if (!startsWithASTFileMagic(Stream)) {
+ Diags.Report(diag::err_fe_not_a_pch_file) << ASTFileName;
+ return std::string();
+ }
+
+ // Scan for the CONTROL_BLOCK_ID block.
+ if (SkipCursorToBlock(Stream, CONTROL_BLOCK_ID)) {
+ Diags.Report(diag::err_fe_pch_malformed_block) << ASTFileName;
+ return std::string();
+ }
+
+ // Scan for ORIGINAL_FILE inside the control block.
+ RecordData Record;
+ while (1) {
+ llvm::BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
+ if (Entry.Kind == llvm::BitstreamEntry::EndBlock)
+ return std::string();
+
+ if (Entry.Kind != llvm::BitstreamEntry::Record) {
+ Diags.Report(diag::err_fe_pch_malformed_block) << ASTFileName;
+ return std::string();
+ }
+
+ Record.clear();
+ StringRef Blob;
+ if (Stream.readRecord(Entry.ID, Record, &Blob) == ORIGINAL_FILE)
+ return Blob.str();
+ }
+}
+
+namespace {
+ class SimplePCHValidator : public ASTReaderListener {
+ const LangOptions &ExistingLangOpts;
+ const TargetOptions &ExistingTargetOpts;
+ const PreprocessorOptions &ExistingPPOpts;
+ std::string ExistingModuleCachePath;
+ FileManager &FileMgr;
+
+ public:
+ SimplePCHValidator(const LangOptions &ExistingLangOpts,
+ const TargetOptions &ExistingTargetOpts,
+ const PreprocessorOptions &ExistingPPOpts,
+ StringRef ExistingModuleCachePath,
+ FileManager &FileMgr)
+ : ExistingLangOpts(ExistingLangOpts),
+ ExistingTargetOpts(ExistingTargetOpts),
+ ExistingPPOpts(ExistingPPOpts),
+ ExistingModuleCachePath(ExistingModuleCachePath),
+ FileMgr(FileMgr)
+ {
+ }
+
+ bool ReadLanguageOptions(const LangOptions &LangOpts, bool Complain,
+ bool AllowCompatibleDifferences) override {
+ return checkLanguageOptions(ExistingLangOpts, LangOpts, nullptr,
+ AllowCompatibleDifferences);
+ }
+ bool ReadTargetOptions(const TargetOptions &TargetOpts, bool Complain,
+ bool AllowCompatibleDifferences) override {
+ return checkTargetOptions(ExistingTargetOpts, TargetOpts, nullptr,
+ AllowCompatibleDifferences);
+ }
+ bool ReadHeaderSearchOptions(const HeaderSearchOptions &HSOpts,
+ StringRef SpecificModuleCachePath,
+ bool Complain) override {
+ return checkHeaderSearchOptions(HSOpts, SpecificModuleCachePath,
+ ExistingModuleCachePath,
+ nullptr, ExistingLangOpts);
+ }
+ bool ReadPreprocessorOptions(const PreprocessorOptions &PPOpts,
+ bool Complain,
+ std::string &SuggestedPredefines) override {
+ return checkPreprocessorOptions(ExistingPPOpts, PPOpts, nullptr, FileMgr,
+ SuggestedPredefines, ExistingLangOpts);
+ }
+ };
+}
+
+bool ASTReader::readASTFileControlBlock(
+ StringRef Filename, FileManager &FileMgr,
+ const PCHContainerReader &PCHContainerRdr,
+ bool FindModuleFileExtensions,
+ ASTReaderListener &Listener) {
+ // Open the AST file.
+ // FIXME: This allows use of the VFS; we do not allow use of the
+ // VFS when actually loading a module.
+ auto Buffer = FileMgr.getBufferForFile(Filename);
+ if (!Buffer) {
+ return true;
+ }
+
+ // Initialize the stream
+ llvm::BitstreamReader StreamFile;
+ PCHContainerRdr.ExtractPCH((*Buffer)->getMemBufferRef(), StreamFile);
+ BitstreamCursor Stream(StreamFile);
+
+ // Sniff for the signature.
+ if (!startsWithASTFileMagic(Stream))
+ return true;
+
+ // Scan for the CONTROL_BLOCK_ID block.
+ if (SkipCursorToBlock(Stream, CONTROL_BLOCK_ID))
+ return true;
+
+ bool NeedsInputFiles = Listener.needsInputFileVisitation();
+ bool NeedsSystemInputFiles = Listener.needsSystemInputFileVisitation();
+ bool NeedsImports = Listener.needsImportVisitation();
+ BitstreamCursor InputFilesCursor;
+
+ RecordData Record;
+ std::string ModuleDir;
+ bool DoneWithControlBlock = false;
+ while (!DoneWithControlBlock) {
+ llvm::BitstreamEntry Entry = Stream.advance();
+
+ switch (Entry.Kind) {
+ case llvm::BitstreamEntry::SubBlock: {
+ switch (Entry.ID) {
+ case OPTIONS_BLOCK_ID: {
+ std::string IgnoredSuggestedPredefines;
+ if (ReadOptionsBlock(Stream, ARR_ConfigurationMismatch | ARR_OutOfDate,
+ /*AllowCompatibleConfigurationMismatch*/ false,
+ Listener, IgnoredSuggestedPredefines) != Success)
+ return true;
+ break;
+ }
+
+ case INPUT_FILES_BLOCK_ID:
+ InputFilesCursor = Stream;
+ if (Stream.SkipBlock() ||
+ (NeedsInputFiles &&
+ ReadBlockAbbrevs(InputFilesCursor, INPUT_FILES_BLOCK_ID)))
+ return true;
+ break;
+
+ default:
+ if (Stream.SkipBlock())
+ return true;
+ break;
+ }
+
+ continue;
+ }
+
+ case llvm::BitstreamEntry::EndBlock:
+ DoneWithControlBlock = true;
+ break;
+
+ case llvm::BitstreamEntry::Error:
+ return true;
+
+ case llvm::BitstreamEntry::Record:
+ break;
+ }
+
+ if (DoneWithControlBlock) break;
+
+ Record.clear();
+ StringRef Blob;
+ unsigned RecCode = Stream.readRecord(Entry.ID, Record, &Blob);
+ switch ((ControlRecordTypes)RecCode) {
+ case METADATA: {
+ if (Record[0] != VERSION_MAJOR)
+ return true;
+
+ if (Listener.ReadFullVersionInformation(Blob))
+ return true;
+
+ break;
+ }
+ case MODULE_NAME:
+ Listener.ReadModuleName(Blob);
+ break;
+ case MODULE_DIRECTORY:
+ ModuleDir = Blob;
+ break;
+ case MODULE_MAP_FILE: {
+ unsigned Idx = 0;
+ auto Path = ReadString(Record, Idx);
+ ResolveImportedPath(Path, ModuleDir);
+ Listener.ReadModuleMapFile(Path);
+ break;
+ }
+ case INPUT_FILE_OFFSETS: {
+ if (!NeedsInputFiles)
+ break;
+
+ unsigned NumInputFiles = Record[0];
+ unsigned NumUserFiles = Record[1];
+ const uint64_t *InputFileOffs = (const uint64_t *)Blob.data();
+ for (unsigned I = 0; I != NumInputFiles; ++I) {
+ // Go find this input file.
+ bool isSystemFile = I >= NumUserFiles;
+
+ if (isSystemFile && !NeedsSystemInputFiles)
+ break; // the rest are system input files
+
+ BitstreamCursor &Cursor = InputFilesCursor;
+ SavedStreamPosition SavedPosition(Cursor);
+ Cursor.JumpToBit(InputFileOffs[I]);
+
+ unsigned Code = Cursor.ReadCode();
+ RecordData Record;
+ StringRef Blob;
+ bool shouldContinue = false;
+ switch ((InputFileRecordTypes)Cursor.readRecord(Code, Record, &Blob)) {
+ case INPUT_FILE:
+ bool Overridden = static_cast<bool>(Record[3]);
+ std::string Filename = Blob;
+ ResolveImportedPath(Filename, ModuleDir);
+ shouldContinue = Listener.visitInputFile(
+ Filename, isSystemFile, Overridden, /*IsExplicitModule*/false);
+ break;
+ }
+ if (!shouldContinue)
+ break;
+ }
+ break;
+ }
+
+ case IMPORTS: {
+ if (!NeedsImports)
+ break;
+
+ unsigned Idx = 0, N = Record.size();
+ while (Idx < N) {
+ // Read information about the AST file.
+ Idx += 5; // ImportLoc, Size, ModTime, Signature
+ std::string Filename = ReadString(Record, Idx);
+ ResolveImportedPath(Filename, ModuleDir);
+ Listener.visitImport(Filename);
+ }
+ break;
+ }
+
+ default:
+ // No other validation to perform.
+ break;
+ }
+ }
+
+ // Look for module file extension blocks, if requested.
+ if (FindModuleFileExtensions) {
+ while (!SkipCursorToBlock(Stream, EXTENSION_BLOCK_ID)) {
+ bool DoneWithExtensionBlock = false;
+ while (!DoneWithExtensionBlock) {
+ llvm::BitstreamEntry Entry = Stream.advance();
+
+ switch (Entry.Kind) {
+ case llvm::BitstreamEntry::SubBlock:
+ if (Stream.SkipBlock())
+ return true;
+
+ continue;
+
+ case llvm::BitstreamEntry::EndBlock:
+ DoneWithExtensionBlock = true;
+ continue;
+
+ case llvm::BitstreamEntry::Error:
+ return true;
+
+ case llvm::BitstreamEntry::Record:
+ break;
+ }
+
+ Record.clear();
+ StringRef Blob;
+ unsigned RecCode = Stream.readRecord(Entry.ID, Record, &Blob);
+ switch (RecCode) {
+ case EXTENSION_METADATA: {
+ ModuleFileExtensionMetadata Metadata;
+ if (parseModuleFileExtensionMetadata(Record, Blob, Metadata))
+ return true;
+
+ Listener.readModuleFileExtension(Metadata);
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
+bool ASTReader::isAcceptableASTFile(
+ StringRef Filename, FileManager &FileMgr,
+ const PCHContainerReader &PCHContainerRdr, const LangOptions &LangOpts,
+ const TargetOptions &TargetOpts, const PreprocessorOptions &PPOpts,
+ std::string ExistingModuleCachePath) {
+ SimplePCHValidator validator(LangOpts, TargetOpts, PPOpts,
+ ExistingModuleCachePath, FileMgr);
+ return !readASTFileControlBlock(Filename, FileMgr, PCHContainerRdr,
+ /*FindModuleFileExtensions=*/false,
+ validator);
+}
+
+ASTReader::ASTReadResult
+ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
+ // Enter the submodule block.
+ if (F.Stream.EnterSubBlock(SUBMODULE_BLOCK_ID)) {
+ Error("malformed submodule block record in AST file");
+ return Failure;
+ }
+
+ ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap();
+ bool First = true;
+ Module *CurrentModule = nullptr;
+ RecordData Record;
+ while (true) {
+ llvm::BitstreamEntry Entry = F.Stream.advanceSkippingSubblocks();
+
+ switch (Entry.Kind) {
+ case llvm::BitstreamEntry::SubBlock: // Handled for us already.
+ case llvm::BitstreamEntry::Error:
+ Error("malformed block record in AST file");
+ return Failure;
+ case llvm::BitstreamEntry::EndBlock:
+ return Success;
+ case llvm::BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ // Read a record.
+ StringRef Blob;
+ Record.clear();
+ auto Kind = F.Stream.readRecord(Entry.ID, Record, &Blob);
+
+ if ((Kind == SUBMODULE_METADATA) != First) {
+ Error("submodule metadata record should be at beginning of block");
+ return Failure;
+ }
+ First = false;
+
+ // Submodule information is only valid if we have a current module.
+ // FIXME: Should we error on these cases?
+ if (!CurrentModule && Kind != SUBMODULE_METADATA &&
+ Kind != SUBMODULE_DEFINITION)
+ continue;
+
+ switch (Kind) {
+ default: // Default behavior: ignore.
+ break;
+
+ case SUBMODULE_DEFINITION: {
+ if (Record.size() < 8) {
+ Error("malformed module definition");
+ return Failure;
+ }
+
+ StringRef Name = Blob;
+ unsigned Idx = 0;
+ SubmoduleID GlobalID = getGlobalSubmoduleID(F, Record[Idx++]);
+ SubmoduleID Parent = getGlobalSubmoduleID(F, Record[Idx++]);
+ bool IsFramework = Record[Idx++];
+ bool IsExplicit = Record[Idx++];
+ bool IsSystem = Record[Idx++];
+ bool IsExternC = Record[Idx++];
+ bool InferSubmodules = Record[Idx++];
+ bool InferExplicitSubmodules = Record[Idx++];
+ bool InferExportWildcard = Record[Idx++];
+ bool ConfigMacrosExhaustive = Record[Idx++];
+
+ Module *ParentModule = nullptr;
+ if (Parent)
+ ParentModule = getSubmodule(Parent);
+
+ // Retrieve this (sub)module from the module map, creating it if
+ // necessary.
+ CurrentModule = ModMap.findOrCreateModule(Name, ParentModule, IsFramework,
+ IsExplicit).first;
+
+ // FIXME: set the definition loc for CurrentModule, or call
+ // ModMap.setInferredModuleAllowedBy()
+
+ SubmoduleID GlobalIndex = GlobalID - NUM_PREDEF_SUBMODULE_IDS;
+ if (GlobalIndex >= SubmodulesLoaded.size() ||
+ SubmodulesLoaded[GlobalIndex]) {
+ Error("too many submodules");
+ return Failure;
+ }
+
+ if (!ParentModule) {
+ if (const FileEntry *CurFile = CurrentModule->getASTFile()) {
+ if (CurFile != F.File) {
+ if (!Diags.isDiagnosticInFlight()) {
+ Diag(diag::err_module_file_conflict)
+ << CurrentModule->getTopLevelModuleName()
+ << CurFile->getName()
+ << F.File->getName();
+ }
+ return Failure;
+ }
+ }
+
+ CurrentModule->setASTFile(F.File);
+ }
+
+ CurrentModule->Signature = F.Signature;
+ CurrentModule->IsFromModuleFile = true;
+ CurrentModule->IsSystem = IsSystem || CurrentModule->IsSystem;
+ CurrentModule->IsExternC = IsExternC;
+ CurrentModule->InferSubmodules = InferSubmodules;
+ CurrentModule->InferExplicitSubmodules = InferExplicitSubmodules;
+ CurrentModule->InferExportWildcard = InferExportWildcard;
+ CurrentModule->ConfigMacrosExhaustive = ConfigMacrosExhaustive;
+ if (DeserializationListener)
+ DeserializationListener->ModuleRead(GlobalID, CurrentModule);
+
+ SubmodulesLoaded[GlobalIndex] = CurrentModule;
+
+ // Clear out data that will be replaced by what is the module file.
+ CurrentModule->LinkLibraries.clear();
+ CurrentModule->ConfigMacros.clear();
+ CurrentModule->UnresolvedConflicts.clear();
+ CurrentModule->Conflicts.clear();
+ break;
+ }
+
+ case SUBMODULE_UMBRELLA_HEADER: {
+ std::string Filename = Blob;
+ ResolveImportedPath(F, Filename);
+ if (auto *Umbrella = PP.getFileManager().getFile(Filename)) {
+ if (!CurrentModule->getUmbrellaHeader())
+ ModMap.setUmbrellaHeader(CurrentModule, Umbrella, Blob);
+ else if (CurrentModule->getUmbrellaHeader().Entry != Umbrella) {
+ // This can be a spurious difference caused by changing the VFS to
+ // point to a different copy of the file, and it is too late to
+ // to rebuild safely.
+ // FIXME: If we wrote the virtual paths instead of the 'real' paths,
+ // after input file validation only real problems would remain and we
+ // could just error. For now, assume it's okay.
+ break;
+ }
+ }
+ break;
+ }
+
+ case SUBMODULE_HEADER:
+ case SUBMODULE_EXCLUDED_HEADER:
+ case SUBMODULE_PRIVATE_HEADER:
+ // We lazily associate headers with their modules via the HeaderInfo table.
+ // FIXME: Re-evaluate this section; maybe only store InputFile IDs instead
+ // of complete filenames or remove it entirely.
+ break;
+
+ case SUBMODULE_TEXTUAL_HEADER:
+ case SUBMODULE_PRIVATE_TEXTUAL_HEADER:
+ // FIXME: Textual headers are not marked in the HeaderInfo table. Load
+ // them here.
+ break;
+
+ case SUBMODULE_TOPHEADER: {
+ CurrentModule->addTopHeaderFilename(Blob);
+ break;
+ }
+
+ case SUBMODULE_UMBRELLA_DIR: {
+ std::string Dirname = Blob;
+ ResolveImportedPath(F, Dirname);
+ if (auto *Umbrella = PP.getFileManager().getDirectory(Dirname)) {
+ if (!CurrentModule->getUmbrellaDir())
+ ModMap.setUmbrellaDir(CurrentModule, Umbrella, Blob);
+ else if (CurrentModule->getUmbrellaDir().Entry != Umbrella) {
+ if ((ClientLoadCapabilities & ARR_OutOfDate) == 0)
+ Error("mismatched umbrella directories in submodule");
+ return OutOfDate;
+ }
+ }
+ break;
+ }
+
+ case SUBMODULE_METADATA: {
+ F.BaseSubmoduleID = getTotalNumSubmodules();
+ F.LocalNumSubmodules = Record[0];
+ unsigned LocalBaseSubmoduleID = Record[1];
+ if (F.LocalNumSubmodules > 0) {
+ // Introduce the global -> local mapping for submodules within this
+ // module.
+ GlobalSubmoduleMap.insert(std::make_pair(getTotalNumSubmodules()+1,&F));
+
+ // Introduce the local -> global mapping for submodules within this
+ // module.
+ F.SubmoduleRemap.insertOrReplace(
+ std::make_pair(LocalBaseSubmoduleID,
+ F.BaseSubmoduleID - LocalBaseSubmoduleID));
+
+ SubmodulesLoaded.resize(SubmodulesLoaded.size() + F.LocalNumSubmodules);
+ }
+ break;
+ }
+
+ case SUBMODULE_IMPORTS: {
+ for (unsigned Idx = 0; Idx != Record.size(); ++Idx) {
+ UnresolvedModuleRef Unresolved;
+ Unresolved.File = &F;
+ Unresolved.Mod = CurrentModule;
+ Unresolved.ID = Record[Idx];
+ Unresolved.Kind = UnresolvedModuleRef::Import;
+ Unresolved.IsWildcard = false;
+ UnresolvedModuleRefs.push_back(Unresolved);
+ }
+ break;
+ }
+
+ case SUBMODULE_EXPORTS: {
+ for (unsigned Idx = 0; Idx + 1 < Record.size(); Idx += 2) {
+ UnresolvedModuleRef Unresolved;
+ Unresolved.File = &F;
+ Unresolved.Mod = CurrentModule;
+ Unresolved.ID = Record[Idx];
+ Unresolved.Kind = UnresolvedModuleRef::Export;
+ Unresolved.IsWildcard = Record[Idx + 1];
+ UnresolvedModuleRefs.push_back(Unresolved);
+ }
+
+ // Once we've loaded the set of exports, there's no reason to keep
+ // the parsed, unresolved exports around.
+ CurrentModule->UnresolvedExports.clear();
+ break;
+ }
+ case SUBMODULE_REQUIRES: {
+ CurrentModule->addRequirement(Blob, Record[0], Context.getLangOpts(),
+ Context.getTargetInfo());
+ break;
+ }
+
+ case SUBMODULE_LINK_LIBRARY:
+ CurrentModule->LinkLibraries.push_back(
+ Module::LinkLibrary(Blob, Record[0]));
+ break;
+
+ case SUBMODULE_CONFIG_MACRO:
+ CurrentModule->ConfigMacros.push_back(Blob.str());
+ break;
+
+ case SUBMODULE_CONFLICT: {
+ UnresolvedModuleRef Unresolved;
+ Unresolved.File = &F;
+ Unresolved.Mod = CurrentModule;
+ Unresolved.ID = Record[0];
+ Unresolved.Kind = UnresolvedModuleRef::Conflict;
+ Unresolved.IsWildcard = false;
+ Unresolved.String = Blob;
+ UnresolvedModuleRefs.push_back(Unresolved);
+ break;
+ }
+ }
+ }
+}
+
+/// \brief Parse the record that corresponds to a LangOptions data
+/// structure.
+///
+/// This routine parses the language options from the AST file and then gives
+/// them to the AST listener if one is set.
+///
+/// \returns true if the listener deems the file unacceptable, false otherwise.
+bool ASTReader::ParseLanguageOptions(const RecordData &Record,
+ bool Complain,
+ ASTReaderListener &Listener,
+ bool AllowCompatibleDifferences) {
+ LangOptions LangOpts;
+ unsigned Idx = 0;
+#define LANGOPT(Name, Bits, Default, Description) \
+ LangOpts.Name = Record[Idx++];
+#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
+ LangOpts.set##Name(static_cast<LangOptions::Type>(Record[Idx++]));
+#include "clang/Basic/LangOptions.def"
+#define SANITIZER(NAME, ID) \
+ LangOpts.Sanitize.set(SanitizerKind::ID, Record[Idx++]);
+#include "clang/Basic/Sanitizers.def"
+
+ for (unsigned N = Record[Idx++]; N; --N)
+ LangOpts.ModuleFeatures.push_back(ReadString(Record, Idx));
+
+ ObjCRuntime::Kind runtimeKind = (ObjCRuntime::Kind) Record[Idx++];
+ VersionTuple runtimeVersion = ReadVersionTuple(Record, Idx);
+ LangOpts.ObjCRuntime = ObjCRuntime(runtimeKind, runtimeVersion);
+
+ LangOpts.CurrentModule = ReadString(Record, Idx);
+
+ // Comment options.
+ for (unsigned N = Record[Idx++]; N; --N) {
+ LangOpts.CommentOpts.BlockCommandNames.push_back(
+ ReadString(Record, Idx));
+ }
+ LangOpts.CommentOpts.ParseAllComments = Record[Idx++];
+
+ // OpenMP offloading options.
+ for (unsigned N = Record[Idx++]; N; --N) {
+ LangOpts.OMPTargetTriples.push_back(llvm::Triple(ReadString(Record, Idx)));
+ }
+
+ LangOpts.OMPHostIRFile = ReadString(Record, Idx);
+
+ return Listener.ReadLanguageOptions(LangOpts, Complain,
+ AllowCompatibleDifferences);
+}
+
+bool ASTReader::ParseTargetOptions(const RecordData &Record, bool Complain,
+ ASTReaderListener &Listener,
+ bool AllowCompatibleDifferences) {
+ unsigned Idx = 0;
+ TargetOptions TargetOpts;
+ TargetOpts.Triple = ReadString(Record, Idx);
+ TargetOpts.CPU = ReadString(Record, Idx);
+ TargetOpts.ABI = ReadString(Record, Idx);
+ for (unsigned N = Record[Idx++]; N; --N) {
+ TargetOpts.FeaturesAsWritten.push_back(ReadString(Record, Idx));
+ }
+ for (unsigned N = Record[Idx++]; N; --N) {
+ TargetOpts.Features.push_back(ReadString(Record, Idx));
+ }
+
+ return Listener.ReadTargetOptions(TargetOpts, Complain,
+ AllowCompatibleDifferences);
+}
+
+bool ASTReader::ParseDiagnosticOptions(const RecordData &Record, bool Complain,
+ ASTReaderListener &Listener) {
+ IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts(new DiagnosticOptions);
+ unsigned Idx = 0;
+#define DIAGOPT(Name, Bits, Default) DiagOpts->Name = Record[Idx++];
+#define ENUM_DIAGOPT(Name, Type, Bits, Default) \
+ DiagOpts->set##Name(static_cast<Type>(Record[Idx++]));
+#include "clang/Basic/DiagnosticOptions.def"
+
+ for (unsigned N = Record[Idx++]; N; --N)
+ DiagOpts->Warnings.push_back(ReadString(Record, Idx));
+ for (unsigned N = Record[Idx++]; N; --N)
+ DiagOpts->Remarks.push_back(ReadString(Record, Idx));
+
+ return Listener.ReadDiagnosticOptions(DiagOpts, Complain);
+}
+
+bool ASTReader::ParseFileSystemOptions(const RecordData &Record, bool Complain,
+ ASTReaderListener &Listener) {
+ FileSystemOptions FSOpts;
+ unsigned Idx = 0;
+ FSOpts.WorkingDir = ReadString(Record, Idx);
+ return Listener.ReadFileSystemOptions(FSOpts, Complain);
+}
+
+bool ASTReader::ParseHeaderSearchOptions(const RecordData &Record,
+ bool Complain,
+ ASTReaderListener &Listener) {
+ HeaderSearchOptions HSOpts;
+ unsigned Idx = 0;
+ HSOpts.Sysroot = ReadString(Record, Idx);
+
+ // Include entries.
+ for (unsigned N = Record[Idx++]; N; --N) {
+ std::string Path = ReadString(Record, Idx);
+ frontend::IncludeDirGroup Group
+ = static_cast<frontend::IncludeDirGroup>(Record[Idx++]);
+ bool IsFramework = Record[Idx++];
+ bool IgnoreSysRoot = Record[Idx++];
+ HSOpts.UserEntries.emplace_back(std::move(Path), Group, IsFramework,
+ IgnoreSysRoot);
+ }
+
+ // System header prefixes.
+ for (unsigned N = Record[Idx++]; N; --N) {
+ std::string Prefix = ReadString(Record, Idx);
+ bool IsSystemHeader = Record[Idx++];
+ HSOpts.SystemHeaderPrefixes.emplace_back(std::move(Prefix), IsSystemHeader);
+ }
+
+ HSOpts.ResourceDir = ReadString(Record, Idx);
+ HSOpts.ModuleCachePath = ReadString(Record, Idx);
+ HSOpts.ModuleUserBuildPath = ReadString(Record, Idx);
+ HSOpts.DisableModuleHash = Record[Idx++];
+ HSOpts.UseBuiltinIncludes = Record[Idx++];
+ HSOpts.UseStandardSystemIncludes = Record[Idx++];
+ HSOpts.UseStandardCXXIncludes = Record[Idx++];
+ HSOpts.UseLibcxx = Record[Idx++];
+ std::string SpecificModuleCachePath = ReadString(Record, Idx);
+
+ return Listener.ReadHeaderSearchOptions(HSOpts, SpecificModuleCachePath,
+ Complain);
+}
+
+bool ASTReader::ParsePreprocessorOptions(const RecordData &Record,
+ bool Complain,
+ ASTReaderListener &Listener,
+ std::string &SuggestedPredefines) {
+ PreprocessorOptions PPOpts;
+ unsigned Idx = 0;
+
+ // Macro definitions/undefs
+ for (unsigned N = Record[Idx++]; N; --N) {
+ std::string Macro = ReadString(Record, Idx);
+ bool IsUndef = Record[Idx++];
+ PPOpts.Macros.push_back(std::make_pair(Macro, IsUndef));
+ }
+
+ // Includes
+ for (unsigned N = Record[Idx++]; N; --N) {
+ PPOpts.Includes.push_back(ReadString(Record, Idx));
+ }
+
+ // Macro Includes
+ for (unsigned N = Record[Idx++]; N; --N) {
+ PPOpts.MacroIncludes.push_back(ReadString(Record, Idx));
+ }
+
+ PPOpts.UsePredefines = Record[Idx++];
+ PPOpts.DetailedRecord = Record[Idx++];
+ PPOpts.ImplicitPCHInclude = ReadString(Record, Idx);
+ PPOpts.ImplicitPTHInclude = ReadString(Record, Idx);
+ PPOpts.ObjCXXARCStandardLibrary =
+ static_cast<ObjCXXARCStandardLibraryKind>(Record[Idx++]);
+ SuggestedPredefines.clear();
+ return Listener.ReadPreprocessorOptions(PPOpts, Complain,
+ SuggestedPredefines);
+}
+
+std::pair<ModuleFile *, unsigned>
+ASTReader::getModulePreprocessedEntity(unsigned GlobalIndex) {
+ GlobalPreprocessedEntityMapType::iterator
+ I = GlobalPreprocessedEntityMap.find(GlobalIndex);
+ assert(I != GlobalPreprocessedEntityMap.end() &&
+ "Corrupted global preprocessed entity map");
+ ModuleFile *M = I->second;
+ unsigned LocalIndex = GlobalIndex - M->BasePreprocessedEntityID;
+ return std::make_pair(M, LocalIndex);
+}
+
+llvm::iterator_range<PreprocessingRecord::iterator>
+ASTReader::getModulePreprocessedEntities(ModuleFile &Mod) const {
+ if (PreprocessingRecord *PPRec = PP.getPreprocessingRecord())
+ return PPRec->getIteratorsForLoadedRange(Mod.BasePreprocessedEntityID,
+ Mod.NumPreprocessedEntities);
+
+ return llvm::make_range(PreprocessingRecord::iterator(),
+ PreprocessingRecord::iterator());
+}
+
+llvm::iterator_range<ASTReader::ModuleDeclIterator>
+ASTReader::getModuleFileLevelDecls(ModuleFile &Mod) {
+ return llvm::make_range(
+ ModuleDeclIterator(this, &Mod, Mod.FileSortedDecls),
+ ModuleDeclIterator(this, &Mod,
+ Mod.FileSortedDecls + Mod.NumFileSortedDecls));
+}
+
+PreprocessedEntity *ASTReader::ReadPreprocessedEntity(unsigned Index) {
+ PreprocessedEntityID PPID = Index+1;
+ std::pair<ModuleFile *, unsigned> PPInfo = getModulePreprocessedEntity(Index);
+ ModuleFile &M = *PPInfo.first;
+ unsigned LocalIndex = PPInfo.second;
+ const PPEntityOffset &PPOffs = M.PreprocessedEntityOffsets[LocalIndex];
+
+ if (!PP.getPreprocessingRecord()) {
+ Error("no preprocessing record");
+ return nullptr;
+ }
+
+ SavedStreamPosition SavedPosition(M.PreprocessorDetailCursor);
+ M.PreprocessorDetailCursor.JumpToBit(PPOffs.BitOffset);
+
+ llvm::BitstreamEntry Entry =
+ M.PreprocessorDetailCursor.advance(BitstreamCursor::AF_DontPopBlockAtEnd);
+ if (Entry.Kind != llvm::BitstreamEntry::Record)
+ return nullptr;
+
+ // Read the record.
+ SourceRange Range(ReadSourceLocation(M, PPOffs.Begin),
+ ReadSourceLocation(M, PPOffs.End));
+ PreprocessingRecord &PPRec = *PP.getPreprocessingRecord();
+ StringRef Blob;
+ RecordData Record;
+ PreprocessorDetailRecordTypes RecType =
+ (PreprocessorDetailRecordTypes)M.PreprocessorDetailCursor.readRecord(
+ Entry.ID, Record, &Blob);
+ switch (RecType) {
+ case PPD_MACRO_EXPANSION: {
+ bool isBuiltin = Record[0];
+ IdentifierInfo *Name = nullptr;
+ MacroDefinitionRecord *Def = nullptr;
+ if (isBuiltin)
+ Name = getLocalIdentifier(M, Record[1]);
+ else {
+ PreprocessedEntityID GlobalID =
+ getGlobalPreprocessedEntityID(M, Record[1]);
+ Def = cast<MacroDefinitionRecord>(
+ PPRec.getLoadedPreprocessedEntity(GlobalID - 1));
+ }
+
+ MacroExpansion *ME;
+ if (isBuiltin)
+ ME = new (PPRec) MacroExpansion(Name, Range);
+ else
+ ME = new (PPRec) MacroExpansion(Def, Range);
+
+ return ME;
+ }
+
+ case PPD_MACRO_DEFINITION: {
+ // Decode the identifier info and then check again; if the macro is
+ // still defined and associated with the identifier,
+ IdentifierInfo *II = getLocalIdentifier(M, Record[0]);
+ MacroDefinitionRecord *MD = new (PPRec) MacroDefinitionRecord(II, Range);
+
+ if (DeserializationListener)
+ DeserializationListener->MacroDefinitionRead(PPID, MD);
+
+ return MD;
+ }
+
+ case PPD_INCLUSION_DIRECTIVE: {
+ const char *FullFileNameStart = Blob.data() + Record[0];
+ StringRef FullFileName(FullFileNameStart, Blob.size() - Record[0]);
+ const FileEntry *File = nullptr;
+ if (!FullFileName.empty())
+ File = PP.getFileManager().getFile(FullFileName);
+
+ // FIXME: Stable encoding
+ InclusionDirective::InclusionKind Kind
+ = static_cast<InclusionDirective::InclusionKind>(Record[2]);
+ InclusionDirective *ID
+ = new (PPRec) InclusionDirective(PPRec, Kind,
+ StringRef(Blob.data(), Record[0]),
+ Record[1], Record[3],
+ File,
+ Range);
+ return ID;
+ }
+ }
+
+ llvm_unreachable("Invalid PreprocessorDetailRecordTypes");
+}
+
+/// \brief \arg SLocMapI points at a chunk of a module that contains no
+/// preprocessed entities or the entities it contains are not the ones we are
+/// looking for. Find the next module that contains entities and return the ID
+/// of the first entry.
+PreprocessedEntityID ASTReader::findNextPreprocessedEntity(
+ GlobalSLocOffsetMapType::const_iterator SLocMapI) const {
+ ++SLocMapI;
+ for (GlobalSLocOffsetMapType::const_iterator
+ EndI = GlobalSLocOffsetMap.end(); SLocMapI != EndI; ++SLocMapI) {
+ ModuleFile &M = *SLocMapI->second;
+ if (M.NumPreprocessedEntities)
+ return M.BasePreprocessedEntityID;
+ }
+
+ return getTotalNumPreprocessedEntities();
+}
+
+namespace {
+
+template <unsigned PPEntityOffset::*PPLoc>
+struct PPEntityComp {
+ const ASTReader &Reader;
+ ModuleFile &M;
+
+ PPEntityComp(const ASTReader &Reader, ModuleFile &M) : Reader(Reader), M(M) { }
+
+ bool operator()(const PPEntityOffset &L, const PPEntityOffset &R) const {
+ SourceLocation LHS = getLoc(L);
+ SourceLocation RHS = getLoc(R);
+ return Reader.getSourceManager().isBeforeInTranslationUnit(LHS, RHS);
+ }
+
+ bool operator()(const PPEntityOffset &L, SourceLocation RHS) const {
+ SourceLocation LHS = getLoc(L);
+ return Reader.getSourceManager().isBeforeInTranslationUnit(LHS, RHS);
+ }
+
+ bool operator()(SourceLocation LHS, const PPEntityOffset &R) const {
+ SourceLocation RHS = getLoc(R);
+ return Reader.getSourceManager().isBeforeInTranslationUnit(LHS, RHS);
+ }
+
+ SourceLocation getLoc(const PPEntityOffset &PPE) const {
+ return Reader.ReadSourceLocation(M, PPE.*PPLoc);
+ }
+};
+
+}
+
+PreprocessedEntityID ASTReader::findPreprocessedEntity(SourceLocation Loc,
+ bool EndsAfter) const {
+ if (SourceMgr.isLocalSourceLocation(Loc))
+ return getTotalNumPreprocessedEntities();
+
+ GlobalSLocOffsetMapType::const_iterator SLocMapI = GlobalSLocOffsetMap.find(
+ SourceManager::MaxLoadedOffset - Loc.getOffset() - 1);
+ assert(SLocMapI != GlobalSLocOffsetMap.end() &&
+ "Corrupted global sloc offset map");
+
+ if (SLocMapI->second->NumPreprocessedEntities == 0)
+ return findNextPreprocessedEntity(SLocMapI);
+
+ ModuleFile &M = *SLocMapI->second;
+ typedef const PPEntityOffset *pp_iterator;
+ pp_iterator pp_begin = M.PreprocessedEntityOffsets;
+ pp_iterator pp_end = pp_begin + M.NumPreprocessedEntities;
+
+ size_t Count = M.NumPreprocessedEntities;
+ size_t Half;
+ pp_iterator First = pp_begin;
+ pp_iterator PPI;
+
+ if (EndsAfter) {
+ PPI = std::upper_bound(pp_begin, pp_end, Loc,
+ PPEntityComp<&PPEntityOffset::Begin>(*this, M));
+ } else {
+ // Do a binary search manually instead of using std::lower_bound because
+ // The end locations of entities may be unordered (when a macro expansion
+ // is inside another macro argument), but for this case it is not important
+ // whether we get the first macro expansion or its containing macro.
+ while (Count > 0) {
+ Half = Count / 2;
+ PPI = First;
+ std::advance(PPI, Half);
+ if (SourceMgr.isBeforeInTranslationUnit(ReadSourceLocation(M, PPI->End),
+ Loc)) {
+ First = PPI;
+ ++First;
+ Count = Count - Half - 1;
+ } else
+ Count = Half;
+ }
+ }
+
+ if (PPI == pp_end)
+ return findNextPreprocessedEntity(SLocMapI);
+
+ return M.BasePreprocessedEntityID + (PPI - pp_begin);
+}
+
+/// \brief Returns a pair of [Begin, End) indices of preallocated
+/// preprocessed entities that \arg Range encompasses.
+std::pair<unsigned, unsigned>
+ ASTReader::findPreprocessedEntitiesInRange(SourceRange Range) {
+ if (Range.isInvalid())
+ return std::make_pair(0,0);
+ assert(!SourceMgr.isBeforeInTranslationUnit(Range.getEnd(),Range.getBegin()));
+
+ PreprocessedEntityID BeginID =
+ findPreprocessedEntity(Range.getBegin(), false);
+ PreprocessedEntityID EndID = findPreprocessedEntity(Range.getEnd(), true);
+ return std::make_pair(BeginID, EndID);
+}
+
+/// \brief Optionally returns true or false if the preallocated preprocessed
+/// entity with index \arg Index came from file \arg FID.
+Optional<bool> ASTReader::isPreprocessedEntityInFileID(unsigned Index,
+ FileID FID) {
+ if (FID.isInvalid())
+ return false;
+
+ std::pair<ModuleFile *, unsigned> PPInfo = getModulePreprocessedEntity(Index);
+ ModuleFile &M = *PPInfo.first;
+ unsigned LocalIndex = PPInfo.second;
+ const PPEntityOffset &PPOffs = M.PreprocessedEntityOffsets[LocalIndex];
+
+ SourceLocation Loc = ReadSourceLocation(M, PPOffs.Begin);
+ if (Loc.isInvalid())
+ return false;
+
+ if (SourceMgr.isInFileID(SourceMgr.getFileLoc(Loc), FID))
+ return true;
+ else
+ return false;
+}
+
+namespace {
+ /// \brief Visitor used to search for information about a header file.
+ class HeaderFileInfoVisitor {
+ const FileEntry *FE;
+
+ Optional<HeaderFileInfo> HFI;
+
+ public:
+ explicit HeaderFileInfoVisitor(const FileEntry *FE)
+ : FE(FE) { }
+
+ bool operator()(ModuleFile &M) {
+ HeaderFileInfoLookupTable *Table
+ = static_cast<HeaderFileInfoLookupTable *>(M.HeaderFileInfoTable);
+ if (!Table)
+ return false;
+
+ // Look in the on-disk hash table for an entry for this file name.
+ HeaderFileInfoLookupTable::iterator Pos = Table->find(FE);
+ if (Pos == Table->end())
+ return false;
+
+ HFI = *Pos;
+ return true;
+ }
+
+ Optional<HeaderFileInfo> getHeaderFileInfo() const { return HFI; }
+ };
+}
+
+HeaderFileInfo ASTReader::GetHeaderFileInfo(const FileEntry *FE) {
+ HeaderFileInfoVisitor Visitor(FE);
+ ModuleMgr.visit(Visitor);
+ if (Optional<HeaderFileInfo> HFI = Visitor.getHeaderFileInfo())
+ return *HFI;
+
+ return HeaderFileInfo();
+}
+
+void ASTReader::ReadPragmaDiagnosticMappings(DiagnosticsEngine &Diag) {
+ // FIXME: Make it work properly with modules.
+ SmallVector<DiagnosticsEngine::DiagState *, 32> DiagStates;
+ for (ModuleIterator I = ModuleMgr.begin(), E = ModuleMgr.end(); I != E; ++I) {
+ ModuleFile &F = *(*I);
+ unsigned Idx = 0;
+ DiagStates.clear();
+ assert(!Diag.DiagStates.empty());
+ DiagStates.push_back(&Diag.DiagStates.front()); // the command-line one.
+ while (Idx < F.PragmaDiagMappings.size()) {
+ SourceLocation Loc = ReadSourceLocation(F, F.PragmaDiagMappings[Idx++]);
+ unsigned DiagStateID = F.PragmaDiagMappings[Idx++];
+ if (DiagStateID != 0) {
+ Diag.DiagStatePoints.push_back(
+ DiagnosticsEngine::DiagStatePoint(DiagStates[DiagStateID-1],
+ FullSourceLoc(Loc, SourceMgr)));
+ continue;
+ }
+
+ assert(DiagStateID == 0);
+ // A new DiagState was created here.
+ Diag.DiagStates.push_back(*Diag.GetCurDiagState());
+ DiagnosticsEngine::DiagState *NewState = &Diag.DiagStates.back();
+ DiagStates.push_back(NewState);
+ Diag.DiagStatePoints.push_back(
+ DiagnosticsEngine::DiagStatePoint(NewState,
+ FullSourceLoc(Loc, SourceMgr)));
+ while (1) {
+ assert(Idx < F.PragmaDiagMappings.size() &&
+ "Invalid data, didn't find '-1' marking end of diag/map pairs");
+ if (Idx >= F.PragmaDiagMappings.size()) {
+ break; // Something is messed up but at least avoid infinite loop in
+ // release build.
+ }
+ unsigned DiagID = F.PragmaDiagMappings[Idx++];
+ if (DiagID == (unsigned)-1) {
+ break; // no more diag/map pairs for this location.
+ }
+ diag::Severity Map = (diag::Severity)F.PragmaDiagMappings[Idx++];
+ DiagnosticMapping Mapping = Diag.makeUserMapping(Map, Loc);
+ Diag.GetCurDiagState()->setMapping(DiagID, Mapping);
+ }
+ }
+ }
+}
+
+/// \brief Get the correct cursor and offset for loading a type.
+ASTReader::RecordLocation ASTReader::TypeCursorForIndex(unsigned Index) {
+ GlobalTypeMapType::iterator I = GlobalTypeMap.find(Index);
+ assert(I != GlobalTypeMap.end() && "Corrupted global type map");
+ ModuleFile *M = I->second;
+ return RecordLocation(M, M->TypeOffsets[Index - M->BaseTypeIndex]);
+}
+
+/// \brief Read and return the type with the given index..
+///
+/// The index is the type ID, shifted and minus the number of predefs. This
+/// routine actually reads the record corresponding to the type at the given
+/// location. It is a helper routine for GetType, which deals with reading type
+/// IDs.
+QualType ASTReader::readTypeRecord(unsigned Index) {
+ RecordLocation Loc = TypeCursorForIndex(Index);
+ BitstreamCursor &DeclsCursor = Loc.F->DeclsCursor;
+
+ // Keep track of where we are in the stream, then jump back there
+ // after reading this type.
+ SavedStreamPosition SavedPosition(DeclsCursor);
+
+ ReadingKindTracker ReadingKind(Read_Type, *this);
+
+ // Note that we are loading a type record.
+ Deserializing AType(this);
+
+ unsigned Idx = 0;
+ DeclsCursor.JumpToBit(Loc.Offset);
+ RecordData Record;
+ unsigned Code = DeclsCursor.ReadCode();
+ switch ((TypeCode)DeclsCursor.readRecord(Code, Record)) {
+ case TYPE_EXT_QUAL: {
+ if (Record.size() != 2) {
+ Error("Incorrect encoding of extended qualifier type");
+ return QualType();
+ }
+ QualType Base = readType(*Loc.F, Record, Idx);
+ Qualifiers Quals = Qualifiers::fromOpaqueValue(Record[Idx++]);
+ return Context.getQualifiedType(Base, Quals);
+ }
+
+ case TYPE_COMPLEX: {
+ if (Record.size() != 1) {
+ Error("Incorrect encoding of complex type");
+ return QualType();
+ }
+ QualType ElemType = readType(*Loc.F, Record, Idx);
+ return Context.getComplexType(ElemType);
+ }
+
+ case TYPE_POINTER: {
+ if (Record.size() != 1) {
+ Error("Incorrect encoding of pointer type");
+ return QualType();
+ }
+ QualType PointeeType = readType(*Loc.F, Record, Idx);
+ return Context.getPointerType(PointeeType);
+ }
+
+ case TYPE_DECAYED: {
+ if (Record.size() != 1) {
+ Error("Incorrect encoding of decayed type");
+ return QualType();
+ }
+ QualType OriginalType = readType(*Loc.F, Record, Idx);
+ QualType DT = Context.getAdjustedParameterType(OriginalType);
+ if (!isa<DecayedType>(DT))
+ Error("Decayed type does not decay");
+ return DT;
+ }
+
+ case TYPE_ADJUSTED: {
+ if (Record.size() != 2) {
+ Error("Incorrect encoding of adjusted type");
+ return QualType();
+ }
+ QualType OriginalTy = readType(*Loc.F, Record, Idx);
+ QualType AdjustedTy = readType(*Loc.F, Record, Idx);
+ return Context.getAdjustedType(OriginalTy, AdjustedTy);
+ }
+
+ case TYPE_BLOCK_POINTER: {
+ if (Record.size() != 1) {
+ Error("Incorrect encoding of block pointer type");
+ return QualType();
+ }
+ QualType PointeeType = readType(*Loc.F, Record, Idx);
+ return Context.getBlockPointerType(PointeeType);
+ }
+
+ case TYPE_LVALUE_REFERENCE: {
+ if (Record.size() != 2) {
+ Error("Incorrect encoding of lvalue reference type");
+ return QualType();
+ }
+ QualType PointeeType = readType(*Loc.F, Record, Idx);
+ return Context.getLValueReferenceType(PointeeType, Record[1]);
+ }
+
+ case TYPE_RVALUE_REFERENCE: {
+ if (Record.size() != 1) {
+ Error("Incorrect encoding of rvalue reference type");
+ return QualType();
+ }
+ QualType PointeeType = readType(*Loc.F, Record, Idx);
+ return Context.getRValueReferenceType(PointeeType);
+ }
+
+ case TYPE_MEMBER_POINTER: {
+ if (Record.size() != 2) {
+ Error("Incorrect encoding of member pointer type");
+ return QualType();
+ }
+ QualType PointeeType = readType(*Loc.F, Record, Idx);
+ QualType ClassType = readType(*Loc.F, Record, Idx);
+ if (PointeeType.isNull() || ClassType.isNull())
+ return QualType();
+
+ return Context.getMemberPointerType(PointeeType, ClassType.getTypePtr());
+ }
+
+ case TYPE_CONSTANT_ARRAY: {
+ QualType ElementType = readType(*Loc.F, Record, Idx);
+ ArrayType::ArraySizeModifier ASM = (ArrayType::ArraySizeModifier)Record[1];
+ unsigned IndexTypeQuals = Record[2];
+ unsigned Idx = 3;
+ llvm::APInt Size = ReadAPInt(Record, Idx);
+ return Context.getConstantArrayType(ElementType, Size,
+ ASM, IndexTypeQuals);
+ }
+
+ case TYPE_INCOMPLETE_ARRAY: {
+ QualType ElementType = readType(*Loc.F, Record, Idx);
+ ArrayType::ArraySizeModifier ASM = (ArrayType::ArraySizeModifier)Record[1];
+ unsigned IndexTypeQuals = Record[2];
+ return Context.getIncompleteArrayType(ElementType, ASM, IndexTypeQuals);
+ }
+
+ case TYPE_VARIABLE_ARRAY: {
+ QualType ElementType = readType(*Loc.F, Record, Idx);
+ ArrayType::ArraySizeModifier ASM = (ArrayType::ArraySizeModifier)Record[1];
+ unsigned IndexTypeQuals = Record[2];
+ SourceLocation LBLoc = ReadSourceLocation(*Loc.F, Record[3]);
+ SourceLocation RBLoc = ReadSourceLocation(*Loc.F, Record[4]);
+ return Context.getVariableArrayType(ElementType, ReadExpr(*Loc.F),
+ ASM, IndexTypeQuals,
+ SourceRange(LBLoc, RBLoc));
+ }
+
+ case TYPE_VECTOR: {
+ if (Record.size() != 3) {
+ Error("incorrect encoding of vector type in AST file");
+ return QualType();
+ }
+
+ QualType ElementType = readType(*Loc.F, Record, Idx);
+ unsigned NumElements = Record[1];
+ unsigned VecKind = Record[2];
+ return Context.getVectorType(ElementType, NumElements,
+ (VectorType::VectorKind)VecKind);
+ }
+
+ case TYPE_EXT_VECTOR: {
+ if (Record.size() != 3) {
+ Error("incorrect encoding of extended vector type in AST file");
+ return QualType();
+ }
+
+ QualType ElementType = readType(*Loc.F, Record, Idx);
+ unsigned NumElements = Record[1];
+ return Context.getExtVectorType(ElementType, NumElements);
+ }
+
+ case TYPE_FUNCTION_NO_PROTO: {
+ if (Record.size() != 6) {
+ Error("incorrect encoding of no-proto function type");
+ return QualType();
+ }
+ QualType ResultType = readType(*Loc.F, Record, Idx);
+ FunctionType::ExtInfo Info(Record[1], Record[2], Record[3],
+ (CallingConv)Record[4], Record[5]);
+ return Context.getFunctionNoProtoType(ResultType, Info);
+ }
+
+ case TYPE_FUNCTION_PROTO: {
+ QualType ResultType = readType(*Loc.F, Record, Idx);
+
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.ExtInfo = FunctionType::ExtInfo(/*noreturn*/ Record[1],
+ /*hasregparm*/ Record[2],
+ /*regparm*/ Record[3],
+ static_cast<CallingConv>(Record[4]),
+ /*produces*/ Record[5]);
+
+ unsigned Idx = 6;
+
+ EPI.Variadic = Record[Idx++];
+ EPI.HasTrailingReturn = Record[Idx++];
+ EPI.TypeQuals = Record[Idx++];
+ EPI.RefQualifier = static_cast<RefQualifierKind>(Record[Idx++]);
+ SmallVector<QualType, 8> ExceptionStorage;
+ readExceptionSpec(*Loc.F, ExceptionStorage, EPI.ExceptionSpec, Record, Idx);
+
+ unsigned NumParams = Record[Idx++];
+ SmallVector<QualType, 16> ParamTypes;
+ for (unsigned I = 0; I != NumParams; ++I)
+ ParamTypes.push_back(readType(*Loc.F, Record, Idx));
+
+ return Context.getFunctionType(ResultType, ParamTypes, EPI);
+ }
+
+ case TYPE_UNRESOLVED_USING: {
+ unsigned Idx = 0;
+ return Context.getTypeDeclType(
+ ReadDeclAs<UnresolvedUsingTypenameDecl>(*Loc.F, Record, Idx));
+ }
+
+ case TYPE_TYPEDEF: {
+ if (Record.size() != 2) {
+ Error("incorrect encoding of typedef type");
+ return QualType();
+ }
+ unsigned Idx = 0;
+ TypedefNameDecl *Decl = ReadDeclAs<TypedefNameDecl>(*Loc.F, Record, Idx);
+ QualType Canonical = readType(*Loc.F, Record, Idx);
+ if (!Canonical.isNull())
+ Canonical = Context.getCanonicalType(Canonical);
+ return Context.getTypedefType(Decl, Canonical);
+ }
+
+ case TYPE_TYPEOF_EXPR:
+ return Context.getTypeOfExprType(ReadExpr(*Loc.F));
+
+ case TYPE_TYPEOF: {
+ if (Record.size() != 1) {
+ Error("incorrect encoding of typeof(type) in AST file");
+ return QualType();
+ }
+ QualType UnderlyingType = readType(*Loc.F, Record, Idx);
+ return Context.getTypeOfType(UnderlyingType);
+ }
+
+ case TYPE_DECLTYPE: {
+ QualType UnderlyingType = readType(*Loc.F, Record, Idx);
+ return Context.getDecltypeType(ReadExpr(*Loc.F), UnderlyingType);
+ }
+
+ case TYPE_UNARY_TRANSFORM: {
+ QualType BaseType = readType(*Loc.F, Record, Idx);
+ QualType UnderlyingType = readType(*Loc.F, Record, Idx);
+ UnaryTransformType::UTTKind UKind = (UnaryTransformType::UTTKind)Record[2];
+ return Context.getUnaryTransformType(BaseType, UnderlyingType, UKind);
+ }
+
+ case TYPE_AUTO: {
+ QualType Deduced = readType(*Loc.F, Record, Idx);
+ AutoTypeKeyword Keyword = (AutoTypeKeyword)Record[Idx++];
+ bool IsDependent = Deduced.isNull() ? Record[Idx++] : false;
+ return Context.getAutoType(Deduced, Keyword, IsDependent);
+ }
+
+ case TYPE_RECORD: {
+ if (Record.size() != 2) {
+ Error("incorrect encoding of record type");
+ return QualType();
+ }
+ unsigned Idx = 0;
+ bool IsDependent = Record[Idx++];
+ RecordDecl *RD = ReadDeclAs<RecordDecl>(*Loc.F, Record, Idx);
+ RD = cast_or_null<RecordDecl>(RD->getCanonicalDecl());
+ QualType T = Context.getRecordType(RD);
+ const_cast<Type*>(T.getTypePtr())->setDependent(IsDependent);
+ return T;
+ }
+
+ case TYPE_ENUM: {
+ if (Record.size() != 2) {
+ Error("incorrect encoding of enum type");
+ return QualType();
+ }
+ unsigned Idx = 0;
+ bool IsDependent = Record[Idx++];
+ QualType T
+ = Context.getEnumType(ReadDeclAs<EnumDecl>(*Loc.F, Record, Idx));
+ const_cast<Type*>(T.getTypePtr())->setDependent(IsDependent);
+ return T;
+ }
+
+ case TYPE_ATTRIBUTED: {
+ if (Record.size() != 3) {
+ Error("incorrect encoding of attributed type");
+ return QualType();
+ }
+ QualType modifiedType = readType(*Loc.F, Record, Idx);
+ QualType equivalentType = readType(*Loc.F, Record, Idx);
+ AttributedType::Kind kind = static_cast<AttributedType::Kind>(Record[2]);
+ return Context.getAttributedType(kind, modifiedType, equivalentType);
+ }
+
+ case TYPE_PAREN: {
+ if (Record.size() != 1) {
+ Error("incorrect encoding of paren type");
+ return QualType();
+ }
+ QualType InnerType = readType(*Loc.F, Record, Idx);
+ return Context.getParenType(InnerType);
+ }
+
+ case TYPE_PACK_EXPANSION: {
+ if (Record.size() != 2) {
+ Error("incorrect encoding of pack expansion type");
+ return QualType();
+ }
+ QualType Pattern = readType(*Loc.F, Record, Idx);
+ if (Pattern.isNull())
+ return QualType();
+ Optional<unsigned> NumExpansions;
+ if (Record[1])
+ NumExpansions = Record[1] - 1;
+ return Context.getPackExpansionType(Pattern, NumExpansions);
+ }
+
+ case TYPE_ELABORATED: {
+ unsigned Idx = 0;
+ ElaboratedTypeKeyword Keyword = (ElaboratedTypeKeyword)Record[Idx++];
+ NestedNameSpecifier *NNS = ReadNestedNameSpecifier(*Loc.F, Record, Idx);
+ QualType NamedType = readType(*Loc.F, Record, Idx);
+ return Context.getElaboratedType(Keyword, NNS, NamedType);
+ }
+
+ case TYPE_OBJC_INTERFACE: {
+ unsigned Idx = 0;
+ ObjCInterfaceDecl *ItfD
+ = ReadDeclAs<ObjCInterfaceDecl>(*Loc.F, Record, Idx);
+ return Context.getObjCInterfaceType(ItfD->getCanonicalDecl());
+ }
+
+ case TYPE_OBJC_OBJECT: {
+ unsigned Idx = 0;
+ QualType Base = readType(*Loc.F, Record, Idx);
+ unsigned NumTypeArgs = Record[Idx++];
+ SmallVector<QualType, 4> TypeArgs;
+ for (unsigned I = 0; I != NumTypeArgs; ++I)
+ TypeArgs.push_back(readType(*Loc.F, Record, Idx));
+ unsigned NumProtos = Record[Idx++];
+ SmallVector<ObjCProtocolDecl*, 4> Protos;
+ for (unsigned I = 0; I != NumProtos; ++I)
+ Protos.push_back(ReadDeclAs<ObjCProtocolDecl>(*Loc.F, Record, Idx));
+ bool IsKindOf = Record[Idx++];
+ return Context.getObjCObjectType(Base, TypeArgs, Protos, IsKindOf);
+ }
+
+ case TYPE_OBJC_OBJECT_POINTER: {
+ unsigned Idx = 0;
+ QualType Pointee = readType(*Loc.F, Record, Idx);
+ return Context.getObjCObjectPointerType(Pointee);
+ }
+
+ case TYPE_SUBST_TEMPLATE_TYPE_PARM: {
+ unsigned Idx = 0;
+ QualType Parm = readType(*Loc.F, Record, Idx);
+ QualType Replacement = readType(*Loc.F, Record, Idx);
+ return Context.getSubstTemplateTypeParmType(
+ cast<TemplateTypeParmType>(Parm),
+ Context.getCanonicalType(Replacement));
+ }
+
+ case TYPE_SUBST_TEMPLATE_TYPE_PARM_PACK: {
+ unsigned Idx = 0;
+ QualType Parm = readType(*Loc.F, Record, Idx);
+ TemplateArgument ArgPack = ReadTemplateArgument(*Loc.F, Record, Idx);
+ return Context.getSubstTemplateTypeParmPackType(
+ cast<TemplateTypeParmType>(Parm),
+ ArgPack);
+ }
+
+ case TYPE_INJECTED_CLASS_NAME: {
+ CXXRecordDecl *D = ReadDeclAs<CXXRecordDecl>(*Loc.F, Record, Idx);
+ QualType TST = readType(*Loc.F, Record, Idx); // probably derivable
+ // FIXME: ASTContext::getInjectedClassNameType is not currently suitable
+ // for AST reading, too much interdependencies.
+ const Type *T = nullptr;
+ for (auto *DI = D; DI; DI = DI->getPreviousDecl()) {
+ if (const Type *Existing = DI->getTypeForDecl()) {
+ T = Existing;
+ break;
+ }
+ }
+ if (!T) {
+ T = new (Context, TypeAlignment) InjectedClassNameType(D, TST);
+ for (auto *DI = D; DI; DI = DI->getPreviousDecl())
+ DI->setTypeForDecl(T);
+ }
+ return QualType(T, 0);
+ }
+
+ case TYPE_TEMPLATE_TYPE_PARM: {
+ unsigned Idx = 0;
+ unsigned Depth = Record[Idx++];
+ unsigned Index = Record[Idx++];
+ bool Pack = Record[Idx++];
+ TemplateTypeParmDecl *D
+ = ReadDeclAs<TemplateTypeParmDecl>(*Loc.F, Record, Idx);
+ return Context.getTemplateTypeParmType(Depth, Index, Pack, D);
+ }
+
+ case TYPE_DEPENDENT_NAME: {
+ unsigned Idx = 0;
+ ElaboratedTypeKeyword Keyword = (ElaboratedTypeKeyword)Record[Idx++];
+ NestedNameSpecifier *NNS = ReadNestedNameSpecifier(*Loc.F, Record, Idx);
+ const IdentifierInfo *Name = GetIdentifierInfo(*Loc.F, Record, Idx);
+ QualType Canon = readType(*Loc.F, Record, Idx);
+ if (!Canon.isNull())
+ Canon = Context.getCanonicalType(Canon);
+ return Context.getDependentNameType(Keyword, NNS, Name, Canon);
+ }
+
+ case TYPE_DEPENDENT_TEMPLATE_SPECIALIZATION: {
+ unsigned Idx = 0;
+ ElaboratedTypeKeyword Keyword = (ElaboratedTypeKeyword)Record[Idx++];
+ NestedNameSpecifier *NNS = ReadNestedNameSpecifier(*Loc.F, Record, Idx);
+ const IdentifierInfo *Name = GetIdentifierInfo(*Loc.F, Record, Idx);
+ unsigned NumArgs = Record[Idx++];
+ SmallVector<TemplateArgument, 8> Args;
+ Args.reserve(NumArgs);
+ while (NumArgs--)
+ Args.push_back(ReadTemplateArgument(*Loc.F, Record, Idx));
+ return Context.getDependentTemplateSpecializationType(Keyword, NNS, Name,
+ Args.size(), Args.data());
+ }
+
+ case TYPE_DEPENDENT_SIZED_ARRAY: {
+ unsigned Idx = 0;
+
+ // ArrayType
+ QualType ElementType = readType(*Loc.F, Record, Idx);
+ ArrayType::ArraySizeModifier ASM
+ = (ArrayType::ArraySizeModifier)Record[Idx++];
+ unsigned IndexTypeQuals = Record[Idx++];
+
+ // DependentSizedArrayType
+ Expr *NumElts = ReadExpr(*Loc.F);
+ SourceRange Brackets = ReadSourceRange(*Loc.F, Record, Idx);
+
+ return Context.getDependentSizedArrayType(ElementType, NumElts, ASM,
+ IndexTypeQuals, Brackets);
+ }
+
+ case TYPE_TEMPLATE_SPECIALIZATION: {
+ unsigned Idx = 0;
+ bool IsDependent = Record[Idx++];
+ TemplateName Name = ReadTemplateName(*Loc.F, Record, Idx);
+ SmallVector<TemplateArgument, 8> Args;
+ ReadTemplateArgumentList(Args, *Loc.F, Record, Idx);
+ QualType Underlying = readType(*Loc.F, Record, Idx);
+ QualType T;
+ if (Underlying.isNull())
+ T = Context.getCanonicalTemplateSpecializationType(Name, Args.data(),
+ Args.size());
+ else
+ T = Context.getTemplateSpecializationType(Name, Args.data(),
+ Args.size(), Underlying);
+ const_cast<Type*>(T.getTypePtr())->setDependent(IsDependent);
+ return T;
+ }
+
+ case TYPE_ATOMIC: {
+ if (Record.size() != 1) {
+ Error("Incorrect encoding of atomic type");
+ return QualType();
+ }
+ QualType ValueType = readType(*Loc.F, Record, Idx);
+ return Context.getAtomicType(ValueType);
+ }
+ }
+ llvm_unreachable("Invalid TypeCode!");
+}
+
+void ASTReader::readExceptionSpec(ModuleFile &ModuleFile,
+ SmallVectorImpl<QualType> &Exceptions,
+ FunctionProtoType::ExceptionSpecInfo &ESI,
+ const RecordData &Record, unsigned &Idx) {
+ ExceptionSpecificationType EST =
+ static_cast<ExceptionSpecificationType>(Record[Idx++]);
+ ESI.Type = EST;
+ if (EST == EST_Dynamic) {
+ for (unsigned I = 0, N = Record[Idx++]; I != N; ++I)
+ Exceptions.push_back(readType(ModuleFile, Record, Idx));
+ ESI.Exceptions = Exceptions;
+ } else if (EST == EST_ComputedNoexcept) {
+ ESI.NoexceptExpr = ReadExpr(ModuleFile);
+ } else if (EST == EST_Uninstantiated) {
+ ESI.SourceDecl = ReadDeclAs<FunctionDecl>(ModuleFile, Record, Idx);
+ ESI.SourceTemplate = ReadDeclAs<FunctionDecl>(ModuleFile, Record, Idx);
+ } else if (EST == EST_Unevaluated) {
+ ESI.SourceDecl = ReadDeclAs<FunctionDecl>(ModuleFile, Record, Idx);
+ }
+}
+
+class clang::TypeLocReader : public TypeLocVisitor<TypeLocReader> {
+ ASTReader &Reader;
+ ModuleFile &F;
+ const ASTReader::RecordData &Record;
+ unsigned &Idx;
+
+ SourceLocation ReadSourceLocation(const ASTReader::RecordData &R,
+ unsigned &I) {
+ return Reader.ReadSourceLocation(F, R, I);
+ }
+
+ template<typename T>
+ T *ReadDeclAs(const ASTReader::RecordData &Record, unsigned &Idx) {
+ return Reader.ReadDeclAs<T>(F, Record, Idx);
+ }
+
+public:
+ TypeLocReader(ASTReader &Reader, ModuleFile &F,
+ const ASTReader::RecordData &Record, unsigned &Idx)
+ : Reader(Reader), F(F), Record(Record), Idx(Idx)
+ { }
+
+ // We want compile-time assurance that we've enumerated all of
+ // these, so unfortunately we have to declare them first, then
+ // define them out-of-line.
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ void Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc);
+#include "clang/AST/TypeLocNodes.def"
+
+ void VisitFunctionTypeLoc(FunctionTypeLoc);
+ void VisitArrayTypeLoc(ArrayTypeLoc);
+};
+
+void TypeLocReader::VisitQualifiedTypeLoc(QualifiedTypeLoc TL) {
+ // nothing to do
+}
+void TypeLocReader::VisitBuiltinTypeLoc(BuiltinTypeLoc TL) {
+ TL.setBuiltinLoc(ReadSourceLocation(Record, Idx));
+ if (TL.needsExtraLocalData()) {
+ TL.setWrittenTypeSpec(static_cast<DeclSpec::TST>(Record[Idx++]));
+ TL.setWrittenSignSpec(static_cast<DeclSpec::TSS>(Record[Idx++]));
+ TL.setWrittenWidthSpec(static_cast<DeclSpec::TSW>(Record[Idx++]));
+ TL.setModeAttr(Record[Idx++]);
+ }
+}
+void TypeLocReader::VisitComplexTypeLoc(ComplexTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitPointerTypeLoc(PointerTypeLoc TL) {
+ TL.setStarLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitDecayedTypeLoc(DecayedTypeLoc TL) {
+ // nothing to do
+}
+void TypeLocReader::VisitAdjustedTypeLoc(AdjustedTypeLoc TL) {
+ // nothing to do
+}
+void TypeLocReader::VisitBlockPointerTypeLoc(BlockPointerTypeLoc TL) {
+ TL.setCaretLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitLValueReferenceTypeLoc(LValueReferenceTypeLoc TL) {
+ TL.setAmpLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitRValueReferenceTypeLoc(RValueReferenceTypeLoc TL) {
+ TL.setAmpAmpLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitMemberPointerTypeLoc(MemberPointerTypeLoc TL) {
+ TL.setStarLoc(ReadSourceLocation(Record, Idx));
+ TL.setClassTInfo(Reader.GetTypeSourceInfo(F, Record, Idx));
+}
+void TypeLocReader::VisitArrayTypeLoc(ArrayTypeLoc TL) {
+ TL.setLBracketLoc(ReadSourceLocation(Record, Idx));
+ TL.setRBracketLoc(ReadSourceLocation(Record, Idx));
+ if (Record[Idx++])
+ TL.setSizeExpr(Reader.ReadExpr(F));
+ else
+ TL.setSizeExpr(nullptr);
+}
+void TypeLocReader::VisitConstantArrayTypeLoc(ConstantArrayTypeLoc TL) {
+ VisitArrayTypeLoc(TL);
+}
+void TypeLocReader::VisitIncompleteArrayTypeLoc(IncompleteArrayTypeLoc TL) {
+ VisitArrayTypeLoc(TL);
+}
+void TypeLocReader::VisitVariableArrayTypeLoc(VariableArrayTypeLoc TL) {
+ VisitArrayTypeLoc(TL);
+}
+void TypeLocReader::VisitDependentSizedArrayTypeLoc(
+ DependentSizedArrayTypeLoc TL) {
+ VisitArrayTypeLoc(TL);
+}
+void TypeLocReader::VisitDependentSizedExtVectorTypeLoc(
+ DependentSizedExtVectorTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitVectorTypeLoc(VectorTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitExtVectorTypeLoc(ExtVectorTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitFunctionTypeLoc(FunctionTypeLoc TL) {
+ TL.setLocalRangeBegin(ReadSourceLocation(Record, Idx));
+ TL.setLParenLoc(ReadSourceLocation(Record, Idx));
+ TL.setRParenLoc(ReadSourceLocation(Record, Idx));
+ TL.setLocalRangeEnd(ReadSourceLocation(Record, Idx));
+ for (unsigned i = 0, e = TL.getNumParams(); i != e; ++i) {
+ TL.setParam(i, ReadDeclAs<ParmVarDecl>(Record, Idx));
+ }
+}
+void TypeLocReader::VisitFunctionProtoTypeLoc(FunctionProtoTypeLoc TL) {
+ VisitFunctionTypeLoc(TL);
+}
+void TypeLocReader::VisitFunctionNoProtoTypeLoc(FunctionNoProtoTypeLoc TL) {
+ VisitFunctionTypeLoc(TL);
+}
+void TypeLocReader::VisitUnresolvedUsingTypeLoc(UnresolvedUsingTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitTypedefTypeLoc(TypedefTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitTypeOfExprTypeLoc(TypeOfExprTypeLoc TL) {
+ TL.setTypeofLoc(ReadSourceLocation(Record, Idx));
+ TL.setLParenLoc(ReadSourceLocation(Record, Idx));
+ TL.setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitTypeOfTypeLoc(TypeOfTypeLoc TL) {
+ TL.setTypeofLoc(ReadSourceLocation(Record, Idx));
+ TL.setLParenLoc(ReadSourceLocation(Record, Idx));
+ TL.setRParenLoc(ReadSourceLocation(Record, Idx));
+ TL.setUnderlyingTInfo(Reader.GetTypeSourceInfo(F, Record, Idx));
+}
+void TypeLocReader::VisitDecltypeTypeLoc(DecltypeTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitUnaryTransformTypeLoc(UnaryTransformTypeLoc TL) {
+ TL.setKWLoc(ReadSourceLocation(Record, Idx));
+ TL.setLParenLoc(ReadSourceLocation(Record, Idx));
+ TL.setRParenLoc(ReadSourceLocation(Record, Idx));
+ TL.setUnderlyingTInfo(Reader.GetTypeSourceInfo(F, Record, Idx));
+}
+void TypeLocReader::VisitAutoTypeLoc(AutoTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitRecordTypeLoc(RecordTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitEnumTypeLoc(EnumTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitAttributedTypeLoc(AttributedTypeLoc TL) {
+ TL.setAttrNameLoc(ReadSourceLocation(Record, Idx));
+ if (TL.hasAttrOperand()) {
+ SourceRange range;
+ range.setBegin(ReadSourceLocation(Record, Idx));
+ range.setEnd(ReadSourceLocation(Record, Idx));
+ TL.setAttrOperandParensRange(range);
+ }
+ if (TL.hasAttrExprOperand()) {
+ if (Record[Idx++])
+ TL.setAttrExprOperand(Reader.ReadExpr(F));
+ else
+ TL.setAttrExprOperand(nullptr);
+ } else if (TL.hasAttrEnumOperand())
+ TL.setAttrEnumOperandLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitSubstTemplateTypeParmTypeLoc(
+ SubstTemplateTypeParmTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitSubstTemplateTypeParmPackTypeLoc(
+ SubstTemplateTypeParmPackTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitTemplateSpecializationTypeLoc(
+ TemplateSpecializationTypeLoc TL) {
+ TL.setTemplateKeywordLoc(ReadSourceLocation(Record, Idx));
+ TL.setTemplateNameLoc(ReadSourceLocation(Record, Idx));
+ TL.setLAngleLoc(ReadSourceLocation(Record, Idx));
+ TL.setRAngleLoc(ReadSourceLocation(Record, Idx));
+ for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
+ TL.setArgLocInfo(i,
+ Reader.GetTemplateArgumentLocInfo(F,
+ TL.getTypePtr()->getArg(i).getKind(),
+ Record, Idx));
+}
+void TypeLocReader::VisitParenTypeLoc(ParenTypeLoc TL) {
+ TL.setLParenLoc(ReadSourceLocation(Record, Idx));
+ TL.setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) {
+ TL.setElaboratedKeywordLoc(ReadSourceLocation(Record, Idx));
+ TL.setQualifierLoc(Reader.ReadNestedNameSpecifierLoc(F, Record, Idx));
+}
+void TypeLocReader::VisitInjectedClassNameTypeLoc(InjectedClassNameTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
+ TL.setElaboratedKeywordLoc(ReadSourceLocation(Record, Idx));
+ TL.setQualifierLoc(Reader.ReadNestedNameSpecifierLoc(F, Record, Idx));
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitDependentTemplateSpecializationTypeLoc(
+ DependentTemplateSpecializationTypeLoc TL) {
+ TL.setElaboratedKeywordLoc(ReadSourceLocation(Record, Idx));
+ TL.setQualifierLoc(Reader.ReadNestedNameSpecifierLoc(F, Record, Idx));
+ TL.setTemplateKeywordLoc(ReadSourceLocation(Record, Idx));
+ TL.setTemplateNameLoc(ReadSourceLocation(Record, Idx));
+ TL.setLAngleLoc(ReadSourceLocation(Record, Idx));
+ TL.setRAngleLoc(ReadSourceLocation(Record, Idx));
+ for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I)
+ TL.setArgLocInfo(I,
+ Reader.GetTemplateArgumentLocInfo(F,
+ TL.getTypePtr()->getArg(I).getKind(),
+ Record, Idx));
+}
+void TypeLocReader::VisitPackExpansionTypeLoc(PackExpansionTypeLoc TL) {
+ TL.setEllipsisLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitObjCObjectTypeLoc(ObjCObjectTypeLoc TL) {
+ TL.setHasBaseTypeAsWritten(Record[Idx++]);
+ TL.setTypeArgsLAngleLoc(ReadSourceLocation(Record, Idx));
+ TL.setTypeArgsRAngleLoc(ReadSourceLocation(Record, Idx));
+ for (unsigned i = 0, e = TL.getNumTypeArgs(); i != e; ++i)
+ TL.setTypeArgTInfo(i, Reader.GetTypeSourceInfo(F, Record, Idx));
+ TL.setProtocolLAngleLoc(ReadSourceLocation(Record, Idx));
+ TL.setProtocolRAngleLoc(ReadSourceLocation(Record, Idx));
+ for (unsigned i = 0, e = TL.getNumProtocols(); i != e; ++i)
+ TL.setProtocolLoc(i, ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitObjCObjectPointerTypeLoc(ObjCObjectPointerTypeLoc TL) {
+ TL.setStarLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitAtomicTypeLoc(AtomicTypeLoc TL) {
+ TL.setKWLoc(ReadSourceLocation(Record, Idx));
+ TL.setLParenLoc(ReadSourceLocation(Record, Idx));
+ TL.setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+TypeSourceInfo *ASTReader::GetTypeSourceInfo(ModuleFile &F,
+ const RecordData &Record,
+ unsigned &Idx) {
+ QualType InfoTy = readType(F, Record, Idx);
+ if (InfoTy.isNull())
+ return nullptr;
+
+ TypeSourceInfo *TInfo = getContext().CreateTypeSourceInfo(InfoTy);
+ TypeLocReader TLR(*this, F, Record, Idx);
+ for (TypeLoc TL = TInfo->getTypeLoc(); !TL.isNull(); TL = TL.getNextTypeLoc())
+ TLR.Visit(TL);
+ return TInfo;
+}
+
+QualType ASTReader::GetType(TypeID ID) {
+ unsigned FastQuals = ID & Qualifiers::FastMask;
+ unsigned Index = ID >> Qualifiers::FastWidth;
+
+ if (Index < NUM_PREDEF_TYPE_IDS) {
+ QualType T;
+ switch ((PredefinedTypeIDs)Index) {
+ case PREDEF_TYPE_NULL_ID:
+ return QualType();
+ case PREDEF_TYPE_VOID_ID:
+ T = Context.VoidTy;
+ break;
+ case PREDEF_TYPE_BOOL_ID:
+ T = Context.BoolTy;
+ break;
+
+ case PREDEF_TYPE_CHAR_U_ID:
+ case PREDEF_TYPE_CHAR_S_ID:
+ // FIXME: Check that the signedness of CharTy is correct!
+ T = Context.CharTy;
+ break;
+
+ case PREDEF_TYPE_UCHAR_ID:
+ T = Context.UnsignedCharTy;
+ break;
+ case PREDEF_TYPE_USHORT_ID:
+ T = Context.UnsignedShortTy;
+ break;
+ case PREDEF_TYPE_UINT_ID:
+ T = Context.UnsignedIntTy;
+ break;
+ case PREDEF_TYPE_ULONG_ID:
+ T = Context.UnsignedLongTy;
+ break;
+ case PREDEF_TYPE_ULONGLONG_ID:
+ T = Context.UnsignedLongLongTy;
+ break;
+ case PREDEF_TYPE_UINT128_ID:
+ T = Context.UnsignedInt128Ty;
+ break;
+ case PREDEF_TYPE_SCHAR_ID:
+ T = Context.SignedCharTy;
+ break;
+ case PREDEF_TYPE_WCHAR_ID:
+ T = Context.WCharTy;
+ break;
+ case PREDEF_TYPE_SHORT_ID:
+ T = Context.ShortTy;
+ break;
+ case PREDEF_TYPE_INT_ID:
+ T = Context.IntTy;
+ break;
+ case PREDEF_TYPE_LONG_ID:
+ T = Context.LongTy;
+ break;
+ case PREDEF_TYPE_LONGLONG_ID:
+ T = Context.LongLongTy;
+ break;
+ case PREDEF_TYPE_INT128_ID:
+ T = Context.Int128Ty;
+ break;
+ case PREDEF_TYPE_HALF_ID:
+ T = Context.HalfTy;
+ break;
+ case PREDEF_TYPE_FLOAT_ID:
+ T = Context.FloatTy;
+ break;
+ case PREDEF_TYPE_DOUBLE_ID:
+ T = Context.DoubleTy;
+ break;
+ case PREDEF_TYPE_LONGDOUBLE_ID:
+ T = Context.LongDoubleTy;
+ break;
+ case PREDEF_TYPE_OVERLOAD_ID:
+ T = Context.OverloadTy;
+ break;
+ case PREDEF_TYPE_BOUND_MEMBER:
+ T = Context.BoundMemberTy;
+ break;
+ case PREDEF_TYPE_PSEUDO_OBJECT:
+ T = Context.PseudoObjectTy;
+ break;
+ case PREDEF_TYPE_DEPENDENT_ID:
+ T = Context.DependentTy;
+ break;
+ case PREDEF_TYPE_UNKNOWN_ANY:
+ T = Context.UnknownAnyTy;
+ break;
+ case PREDEF_TYPE_NULLPTR_ID:
+ T = Context.NullPtrTy;
+ break;
+ case PREDEF_TYPE_CHAR16_ID:
+ T = Context.Char16Ty;
+ break;
+ case PREDEF_TYPE_CHAR32_ID:
+ T = Context.Char32Ty;
+ break;
+ case PREDEF_TYPE_OBJC_ID:
+ T = Context.ObjCBuiltinIdTy;
+ break;
+ case PREDEF_TYPE_OBJC_CLASS:
+ T = Context.ObjCBuiltinClassTy;
+ break;
+ case PREDEF_TYPE_OBJC_SEL:
+ T = Context.ObjCBuiltinSelTy;
+ break;
+ case PREDEF_TYPE_IMAGE1D_ID:
+ T = Context.OCLImage1dTy;
+ break;
+ case PREDEF_TYPE_IMAGE1D_ARR_ID:
+ T = Context.OCLImage1dArrayTy;
+ break;
+ case PREDEF_TYPE_IMAGE1D_BUFF_ID:
+ T = Context.OCLImage1dBufferTy;
+ break;
+ case PREDEF_TYPE_IMAGE2D_ID:
+ T = Context.OCLImage2dTy;
+ break;
+ case PREDEF_TYPE_IMAGE2D_ARR_ID:
+ T = Context.OCLImage2dArrayTy;
+ break;
+ case PREDEF_TYPE_IMAGE2D_DEP_ID:
+ T = Context.OCLImage2dDepthTy;
+ break;
+ case PREDEF_TYPE_IMAGE2D_ARR_DEP_ID:
+ T = Context.OCLImage2dArrayDepthTy;
+ break;
+ case PREDEF_TYPE_IMAGE2D_MSAA_ID:
+ T = Context.OCLImage2dMSAATy;
+ break;
+ case PREDEF_TYPE_IMAGE2D_ARR_MSAA_ID:
+ T = Context.OCLImage2dArrayMSAATy;
+ break;
+ case PREDEF_TYPE_IMAGE2D_MSAA_DEP_ID:
+ T = Context.OCLImage2dMSAADepthTy;
+ break;
+ case PREDEF_TYPE_IMAGE2D_ARR_MSAA_DEPTH_ID:
+ T = Context.OCLImage2dArrayMSAADepthTy;
+ break;
+ case PREDEF_TYPE_IMAGE3D_ID:
+ T = Context.OCLImage3dTy;
+ break;
+ case PREDEF_TYPE_SAMPLER_ID:
+ T = Context.OCLSamplerTy;
+ break;
+ case PREDEF_TYPE_EVENT_ID:
+ T = Context.OCLEventTy;
+ break;
+ case PREDEF_TYPE_CLK_EVENT_ID:
+ T = Context.OCLClkEventTy;
+ break;
+ case PREDEF_TYPE_QUEUE_ID:
+ T = Context.OCLQueueTy;
+ break;
+ case PREDEF_TYPE_NDRANGE_ID:
+ T = Context.OCLNDRangeTy;
+ break;
+ case PREDEF_TYPE_RESERVE_ID_ID:
+ T = Context.OCLReserveIDTy;
+ break;
+ case PREDEF_TYPE_AUTO_DEDUCT:
+ T = Context.getAutoDeductType();
+ break;
+
+ case PREDEF_TYPE_AUTO_RREF_DEDUCT:
+ T = Context.getAutoRRefDeductType();
+ break;
+
+ case PREDEF_TYPE_ARC_UNBRIDGED_CAST:
+ T = Context.ARCUnbridgedCastTy;
+ break;
+
+ case PREDEF_TYPE_BUILTIN_FN:
+ T = Context.BuiltinFnTy;
+ break;
+
+ case PREDEF_TYPE_OMP_ARRAY_SECTION:
+ T = Context.OMPArraySectionTy;
+ break;
+ }
+
+ assert(!T.isNull() && "Unknown predefined type");
+ return T.withFastQualifiers(FastQuals);
+ }
+
+ Index -= NUM_PREDEF_TYPE_IDS;
+ assert(Index < TypesLoaded.size() && "Type index out-of-range");
+ if (TypesLoaded[Index].isNull()) {
+ TypesLoaded[Index] = readTypeRecord(Index);
+ if (TypesLoaded[Index].isNull())
+ return QualType();
+
+ TypesLoaded[Index]->setFromAST();
+ if (DeserializationListener)
+ DeserializationListener->TypeRead(TypeIdx::fromTypeID(ID),
+ TypesLoaded[Index]);
+ }
+
+ return TypesLoaded[Index].withFastQualifiers(FastQuals);
+}
+
+QualType ASTReader::getLocalType(ModuleFile &F, unsigned LocalID) {
+ return GetType(getGlobalTypeID(F, LocalID));
+}
+
+serialization::TypeID
+ASTReader::getGlobalTypeID(ModuleFile &F, unsigned LocalID) const {
+ unsigned FastQuals = LocalID & Qualifiers::FastMask;
+ unsigned LocalIndex = LocalID >> Qualifiers::FastWidth;
+
+ if (LocalIndex < NUM_PREDEF_TYPE_IDS)
+ return LocalID;
+
+ ContinuousRangeMap<uint32_t, int, 2>::iterator I
+ = F.TypeRemap.find(LocalIndex - NUM_PREDEF_TYPE_IDS);
+ assert(I != F.TypeRemap.end() && "Invalid index into type index remap");
+
+ unsigned GlobalIndex = LocalIndex + I->second;
+ return (GlobalIndex << Qualifiers::FastWidth) | FastQuals;
+}
+
+TemplateArgumentLocInfo
+ASTReader::GetTemplateArgumentLocInfo(ModuleFile &F,
+ TemplateArgument::ArgKind Kind,
+ const RecordData &Record,
+ unsigned &Index) {
+ switch (Kind) {
+ case TemplateArgument::Expression:
+ return ReadExpr(F);
+ case TemplateArgument::Type:
+ return GetTypeSourceInfo(F, Record, Index);
+ case TemplateArgument::Template: {
+ NestedNameSpecifierLoc QualifierLoc = ReadNestedNameSpecifierLoc(F, Record,
+ Index);
+ SourceLocation TemplateNameLoc = ReadSourceLocation(F, Record, Index);
+ return TemplateArgumentLocInfo(QualifierLoc, TemplateNameLoc,
+ SourceLocation());
+ }
+ case TemplateArgument::TemplateExpansion: {
+ NestedNameSpecifierLoc QualifierLoc = ReadNestedNameSpecifierLoc(F, Record,
+ Index);
+ SourceLocation TemplateNameLoc = ReadSourceLocation(F, Record, Index);
+ SourceLocation EllipsisLoc = ReadSourceLocation(F, Record, Index);
+ return TemplateArgumentLocInfo(QualifierLoc, TemplateNameLoc,
+ EllipsisLoc);
+ }
+ case TemplateArgument::Null:
+ case TemplateArgument::Integral:
+ case TemplateArgument::Declaration:
+ case TemplateArgument::NullPtr:
+ case TemplateArgument::Pack:
+ // FIXME: Is this right?
+ return TemplateArgumentLocInfo();
+ }
+ llvm_unreachable("unexpected template argument loc");
+}
+
+TemplateArgumentLoc
+ASTReader::ReadTemplateArgumentLoc(ModuleFile &F,
+ const RecordData &Record, unsigned &Index) {
+ TemplateArgument Arg = ReadTemplateArgument(F, Record, Index);
+
+ if (Arg.getKind() == TemplateArgument::Expression) {
+ if (Record[Index++]) // bool InfoHasSameExpr.
+ return TemplateArgumentLoc(Arg, TemplateArgumentLocInfo(Arg.getAsExpr()));
+ }
+ return TemplateArgumentLoc(Arg, GetTemplateArgumentLocInfo(F, Arg.getKind(),
+ Record, Index));
+}
+
+const ASTTemplateArgumentListInfo*
+ASTReader::ReadASTTemplateArgumentListInfo(ModuleFile &F,
+ const RecordData &Record,
+ unsigned &Index) {
+ SourceLocation LAngleLoc = ReadSourceLocation(F, Record, Index);
+ SourceLocation RAngleLoc = ReadSourceLocation(F, Record, Index);
+ unsigned NumArgsAsWritten = Record[Index++];
+ TemplateArgumentListInfo TemplArgsInfo(LAngleLoc, RAngleLoc);
+ for (unsigned i = 0; i != NumArgsAsWritten; ++i)
+ TemplArgsInfo.addArgument(ReadTemplateArgumentLoc(F, Record, Index));
+ return ASTTemplateArgumentListInfo::Create(getContext(), TemplArgsInfo);
+}
+
+Decl *ASTReader::GetExternalDecl(uint32_t ID) {
+ return GetDecl(ID);
+}
+
+template<typename TemplateSpecializationDecl>
+static void completeRedeclChainForTemplateSpecialization(Decl *D) {
+ if (auto *TSD = dyn_cast<TemplateSpecializationDecl>(D))
+ TSD->getSpecializedTemplate()->LoadLazySpecializations();
+}
+
+void ASTReader::CompleteRedeclChain(const Decl *D) {
+ if (NumCurrentElementsDeserializing) {
+ // We arrange to not care about the complete redeclaration chain while we're
+ // deserializing. Just remember that the AST has marked this one as complete
+ // but that it's not actually complete yet, so we know we still need to
+ // complete it later.
+ PendingIncompleteDeclChains.push_back(const_cast<Decl*>(D));
+ return;
+ }
+
+ const DeclContext *DC = D->getDeclContext()->getRedeclContext();
+
+ // If this is a named declaration, complete it by looking it up
+ // within its context.
+ //
+ // FIXME: Merging a function definition should merge
+ // all mergeable entities within it.
+ if (isa<TranslationUnitDecl>(DC) || isa<NamespaceDecl>(DC) ||
+ isa<CXXRecordDecl>(DC) || isa<EnumDecl>(DC)) {
+ if (DeclarationName Name = cast<NamedDecl>(D)->getDeclName()) {
+ if (!getContext().getLangOpts().CPlusPlus &&
+ isa<TranslationUnitDecl>(DC)) {
+ // Outside of C++, we don't have a lookup table for the TU, so update
+ // the identifier instead. (For C++ modules, we don't store decls
+ // in the serialized identifier table, so we do the lookup in the TU.)
+ auto *II = Name.getAsIdentifierInfo();
+ assert(II && "non-identifier name in C?");
+ if (II->isOutOfDate())
+ updateOutOfDateIdentifier(*II);
+ } else
+ DC->lookup(Name);
+ } else if (needsAnonymousDeclarationNumber(cast<NamedDecl>(D))) {
+ // Find all declarations of this kind from the relevant context.
+ for (auto *DCDecl : cast<Decl>(D->getLexicalDeclContext())->redecls()) {
+ auto *DC = cast<DeclContext>(DCDecl);
+ SmallVector<Decl*, 8> Decls;
+ FindExternalLexicalDecls(
+ DC, [&](Decl::Kind K) { return K == D->getKind(); }, Decls);
+ }
+ }
+ }
+
+ if (auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D))
+ CTSD->getSpecializedTemplate()->LoadLazySpecializations();
+ if (auto *VTSD = dyn_cast<VarTemplateSpecializationDecl>(D))
+ VTSD->getSpecializedTemplate()->LoadLazySpecializations();
+ if (auto *FD = dyn_cast<FunctionDecl>(D)) {
+ if (auto *Template = FD->getPrimaryTemplate())
+ Template->LoadLazySpecializations();
+ }
+}
+
+uint64_t ASTReader::ReadCXXCtorInitializersRef(ModuleFile &M,
+ const RecordData &Record,
+ unsigned &Idx) {
+ if (Idx >= Record.size() || Record[Idx] > M.LocalNumCXXCtorInitializers) {
+ Error("malformed AST file: missing C++ ctor initializers");
+ return 0;
+ }
+
+ unsigned LocalID = Record[Idx++];
+ return getGlobalBitOffset(M, M.CXXCtorInitializersOffsets[LocalID - 1]);
+}
+
+CXXCtorInitializer **
+ASTReader::GetExternalCXXCtorInitializers(uint64_t Offset) {
+ RecordLocation Loc = getLocalBitOffset(Offset);
+ BitstreamCursor &Cursor = Loc.F->DeclsCursor;
+ SavedStreamPosition SavedPosition(Cursor);
+ Cursor.JumpToBit(Loc.Offset);
+ ReadingKindTracker ReadingKind(Read_Decl, *this);
+
+ RecordData Record;
+ unsigned Code = Cursor.ReadCode();
+ unsigned RecCode = Cursor.readRecord(Code, Record);
+ if (RecCode != DECL_CXX_CTOR_INITIALIZERS) {
+ Error("malformed AST file: missing C++ ctor initializers");
+ return nullptr;
+ }
+
+ unsigned Idx = 0;
+ return ReadCXXCtorInitializers(*Loc.F, Record, Idx);
+}
+
+uint64_t ASTReader::readCXXBaseSpecifiers(ModuleFile &M,
+ const RecordData &Record,
+ unsigned &Idx) {
+ if (Idx >= Record.size() || Record[Idx] > M.LocalNumCXXBaseSpecifiers) {
+ Error("malformed AST file: missing C++ base specifier");
+ return 0;
+ }
+
+ unsigned LocalID = Record[Idx++];
+ return getGlobalBitOffset(M, M.CXXBaseSpecifiersOffsets[LocalID - 1]);
+}
+
+CXXBaseSpecifier *ASTReader::GetExternalCXXBaseSpecifiers(uint64_t Offset) {
+ RecordLocation Loc = getLocalBitOffset(Offset);
+ BitstreamCursor &Cursor = Loc.F->DeclsCursor;
+ SavedStreamPosition SavedPosition(Cursor);
+ Cursor.JumpToBit(Loc.Offset);
+ ReadingKindTracker ReadingKind(Read_Decl, *this);
+ RecordData Record;
+ unsigned Code = Cursor.ReadCode();
+ unsigned RecCode = Cursor.readRecord(Code, Record);
+ if (RecCode != DECL_CXX_BASE_SPECIFIERS) {
+ Error("malformed AST file: missing C++ base specifiers");
+ return nullptr;
+ }
+
+ unsigned Idx = 0;
+ unsigned NumBases = Record[Idx++];
+ void *Mem = Context.Allocate(sizeof(CXXBaseSpecifier) * NumBases);
+ CXXBaseSpecifier *Bases = new (Mem) CXXBaseSpecifier [NumBases];
+ for (unsigned I = 0; I != NumBases; ++I)
+ Bases[I] = ReadCXXBaseSpecifier(*Loc.F, Record, Idx);
+ return Bases;
+}
+
+serialization::DeclID
+ASTReader::getGlobalDeclID(ModuleFile &F, LocalDeclID LocalID) const {
+ if (LocalID < NUM_PREDEF_DECL_IDS)
+ return LocalID;
+
+ ContinuousRangeMap<uint32_t, int, 2>::iterator I
+ = F.DeclRemap.find(LocalID - NUM_PREDEF_DECL_IDS);
+ assert(I != F.DeclRemap.end() && "Invalid index into decl index remap");
+
+ return LocalID + I->second;
+}
+
+bool ASTReader::isDeclIDFromModule(serialization::GlobalDeclID ID,
+ ModuleFile &M) const {
+ // Predefined decls aren't from any module.
+ if (ID < NUM_PREDEF_DECL_IDS)
+ return false;
+
+ return ID - NUM_PREDEF_DECL_IDS >= M.BaseDeclID &&
+ ID - NUM_PREDEF_DECL_IDS < M.BaseDeclID + M.LocalNumDecls;
+}
+
+ModuleFile *ASTReader::getOwningModuleFile(const Decl *D) {
+ if (!D->isFromASTFile())
+ return nullptr;
+ GlobalDeclMapType::const_iterator I = GlobalDeclMap.find(D->getGlobalID());
+ assert(I != GlobalDeclMap.end() && "Corrupted global declaration map");
+ return I->second;
+}
+
+SourceLocation ASTReader::getSourceLocationForDeclID(GlobalDeclID ID) {
+ if (ID < NUM_PREDEF_DECL_IDS)
+ return SourceLocation();
+
+ unsigned Index = ID - NUM_PREDEF_DECL_IDS;
+
+ if (Index > DeclsLoaded.size()) {
+ Error("declaration ID out-of-range for AST file");
+ return SourceLocation();
+ }
+
+ if (Decl *D = DeclsLoaded[Index])
+ return D->getLocation();
+
+ unsigned RawLocation = 0;
+ RecordLocation Rec = DeclCursorForID(ID, RawLocation);
+ return ReadSourceLocation(*Rec.F, RawLocation);
+}
+
+static Decl *getPredefinedDecl(ASTContext &Context, PredefinedDeclIDs ID) {
+ switch (ID) {
+ case PREDEF_DECL_NULL_ID:
+ return nullptr;
+
+ case PREDEF_DECL_TRANSLATION_UNIT_ID:
+ return Context.getTranslationUnitDecl();
+
+ case PREDEF_DECL_OBJC_ID_ID:
+ return Context.getObjCIdDecl();
+
+ case PREDEF_DECL_OBJC_SEL_ID:
+ return Context.getObjCSelDecl();
+
+ case PREDEF_DECL_OBJC_CLASS_ID:
+ return Context.getObjCClassDecl();
+
+ case PREDEF_DECL_OBJC_PROTOCOL_ID:
+ return Context.getObjCProtocolDecl();
+
+ case PREDEF_DECL_INT_128_ID:
+ return Context.getInt128Decl();
+
+ case PREDEF_DECL_UNSIGNED_INT_128_ID:
+ return Context.getUInt128Decl();
+
+ case PREDEF_DECL_OBJC_INSTANCETYPE_ID:
+ return Context.getObjCInstanceTypeDecl();
+
+ case PREDEF_DECL_BUILTIN_VA_LIST_ID:
+ return Context.getBuiltinVaListDecl();
+
+ case PREDEF_DECL_VA_LIST_TAG:
+ return Context.getVaListTagDecl();
+
+ case PREDEF_DECL_BUILTIN_MS_VA_LIST_ID:
+ return Context.getBuiltinMSVaListDecl();
+
+ case PREDEF_DECL_EXTERN_C_CONTEXT_ID:
+ return Context.getExternCContextDecl();
+
+ case PREDEF_DECL_MAKE_INTEGER_SEQ_ID:
+ return Context.getMakeIntegerSeqDecl();
+ }
+ llvm_unreachable("PredefinedDeclIDs unknown enum value");
+}
+
+Decl *ASTReader::GetExistingDecl(DeclID ID) {
+ if (ID < NUM_PREDEF_DECL_IDS) {
+ Decl *D = getPredefinedDecl(Context, (PredefinedDeclIDs)ID);
+ if (D) {
+ // Track that we have merged the declaration with ID \p ID into the
+ // pre-existing predefined declaration \p D.
+ auto &Merged = KeyDecls[D->getCanonicalDecl()];
+ if (Merged.empty())
+ Merged.push_back(ID);
+ }
+ return D;
+ }
+
+ unsigned Index = ID - NUM_PREDEF_DECL_IDS;
+
+ if (Index >= DeclsLoaded.size()) {
+ assert(0 && "declaration ID out-of-range for AST file");
+ Error("declaration ID out-of-range for AST file");
+ return nullptr;
+ }
+
+ return DeclsLoaded[Index];
+}
+
+Decl *ASTReader::GetDecl(DeclID ID) {
+ if (ID < NUM_PREDEF_DECL_IDS)
+ return GetExistingDecl(ID);
+
+ unsigned Index = ID - NUM_PREDEF_DECL_IDS;
+
+ if (Index >= DeclsLoaded.size()) {
+ assert(0 && "declaration ID out-of-range for AST file");
+ Error("declaration ID out-of-range for AST file");
+ return nullptr;
+ }
+
+ if (!DeclsLoaded[Index]) {
+ ReadDeclRecord(ID);
+ if (DeserializationListener)
+ DeserializationListener->DeclRead(ID, DeclsLoaded[Index]);
+ }
+
+ return DeclsLoaded[Index];
+}
+
+DeclID ASTReader::mapGlobalIDToModuleFileGlobalID(ModuleFile &M,
+ DeclID GlobalID) {
+ if (GlobalID < NUM_PREDEF_DECL_IDS)
+ return GlobalID;
+
+ GlobalDeclMapType::const_iterator I = GlobalDeclMap.find(GlobalID);
+ assert(I != GlobalDeclMap.end() && "Corrupted global declaration map");
+ ModuleFile *Owner = I->second;
+
+ llvm::DenseMap<ModuleFile *, serialization::DeclID>::iterator Pos
+ = M.GlobalToLocalDeclIDs.find(Owner);
+ if (Pos == M.GlobalToLocalDeclIDs.end())
+ return 0;
+
+ return GlobalID - Owner->BaseDeclID + Pos->second;
+}
+
+serialization::DeclID ASTReader::ReadDeclID(ModuleFile &F,
+ const RecordData &Record,
+ unsigned &Idx) {
+ if (Idx >= Record.size()) {
+ Error("Corrupted AST file");
+ return 0;
+ }
+
+ return getGlobalDeclID(F, Record[Idx++]);
+}
+
+/// \brief Resolve the offset of a statement into a statement.
+///
+/// This operation will read a new statement from the external
+/// source each time it is called, and is meant to be used via a
+/// LazyOffsetPtr (which is used by Decls for the body of functions, etc).
+Stmt *ASTReader::GetExternalDeclStmt(uint64_t Offset) {
+ // Switch case IDs are per Decl.
+ ClearSwitchCaseIDs();
+
+ // Offset here is a global offset across the entire chain.
+ RecordLocation Loc = getLocalBitOffset(Offset);
+ Loc.F->DeclsCursor.JumpToBit(Loc.Offset);
+ return ReadStmtFromStream(*Loc.F);
+}
+
+void ASTReader::FindExternalLexicalDecls(
+ const DeclContext *DC, llvm::function_ref<bool(Decl::Kind)> IsKindWeWant,
+ SmallVectorImpl<Decl *> &Decls) {
+ bool PredefsVisited[NUM_PREDEF_DECL_IDS] = {};
+
+ auto Visit = [&] (ModuleFile *M, LexicalContents LexicalDecls) {
+ assert(LexicalDecls.size() % 2 == 0 && "expected an even number of entries");
+ for (int I = 0, N = LexicalDecls.size(); I != N; I += 2) {
+ auto K = (Decl::Kind)+LexicalDecls[I];
+ if (!IsKindWeWant(K))
+ continue;
+
+ auto ID = (serialization::DeclID)+LexicalDecls[I + 1];
+
+ // Don't add predefined declarations to the lexical context more
+ // than once.
+ if (ID < NUM_PREDEF_DECL_IDS) {
+ if (PredefsVisited[ID])
+ continue;
+
+ PredefsVisited[ID] = true;
+ }
+
+ if (Decl *D = GetLocalDecl(*M, ID)) {
+ assert(D->getKind() == K && "wrong kind for lexical decl");
+ if (!DC->isDeclInLexicalTraversal(D))
+ Decls.push_back(D);
+ }
+ }
+ };
+
+ if (isa<TranslationUnitDecl>(DC)) {
+ for (auto Lexical : TULexicalDecls)
+ Visit(Lexical.first, Lexical.second);
+ } else {
+ auto I = LexicalDecls.find(DC);
+ if (I != LexicalDecls.end())
+ Visit(I->second.first, I->second.second);
+ }
+
+ ++NumLexicalDeclContextsRead;
+}
+
+namespace {
+
+class DeclIDComp {
+ ASTReader &Reader;
+ ModuleFile &Mod;
+
+public:
+ DeclIDComp(ASTReader &Reader, ModuleFile &M) : Reader(Reader), Mod(M) {}
+
+ bool operator()(LocalDeclID L, LocalDeclID R) const {
+ SourceLocation LHS = getLocation(L);
+ SourceLocation RHS = getLocation(R);
+ return Reader.getSourceManager().isBeforeInTranslationUnit(LHS, RHS);
+ }
+
+ bool operator()(SourceLocation LHS, LocalDeclID R) const {
+ SourceLocation RHS = getLocation(R);
+ return Reader.getSourceManager().isBeforeInTranslationUnit(LHS, RHS);
+ }
+
+ bool operator()(LocalDeclID L, SourceLocation RHS) const {
+ SourceLocation LHS = getLocation(L);
+ return Reader.getSourceManager().isBeforeInTranslationUnit(LHS, RHS);
+ }
+
+ SourceLocation getLocation(LocalDeclID ID) const {
+ return Reader.getSourceManager().getFileLoc(
+ Reader.getSourceLocationForDeclID(Reader.getGlobalDeclID(Mod, ID)));
+ }
+};
+
+}
+
+void ASTReader::FindFileRegionDecls(FileID File,
+ unsigned Offset, unsigned Length,
+ SmallVectorImpl<Decl *> &Decls) {
+ SourceManager &SM = getSourceManager();
+
+ llvm::DenseMap<FileID, FileDeclsInfo>::iterator I = FileDeclIDs.find(File);
+ if (I == FileDeclIDs.end())
+ return;
+
+ FileDeclsInfo &DInfo = I->second;
+ if (DInfo.Decls.empty())
+ return;
+
+ SourceLocation
+ BeginLoc = SM.getLocForStartOfFile(File).getLocWithOffset(Offset);
+ SourceLocation EndLoc = BeginLoc.getLocWithOffset(Length);
+
+ DeclIDComp DIDComp(*this, *DInfo.Mod);
+ ArrayRef<serialization::LocalDeclID>::iterator
+ BeginIt = std::lower_bound(DInfo.Decls.begin(), DInfo.Decls.end(),
+ BeginLoc, DIDComp);
+ if (BeginIt != DInfo.Decls.begin())
+ --BeginIt;
+
+ // If we are pointing at a top-level decl inside an objc container, we need
+ // to backtrack until we find it otherwise we will fail to report that the
+ // region overlaps with an objc container.
+ while (BeginIt != DInfo.Decls.begin() &&
+ GetDecl(getGlobalDeclID(*DInfo.Mod, *BeginIt))
+ ->isTopLevelDeclInObjCContainer())
+ --BeginIt;
+
+ ArrayRef<serialization::LocalDeclID>::iterator
+ EndIt = std::upper_bound(DInfo.Decls.begin(), DInfo.Decls.end(),
+ EndLoc, DIDComp);
+ if (EndIt != DInfo.Decls.end())
+ ++EndIt;
+
+ for (ArrayRef<serialization::LocalDeclID>::iterator
+ DIt = BeginIt; DIt != EndIt; ++DIt)
+ Decls.push_back(GetDecl(getGlobalDeclID(*DInfo.Mod, *DIt)));
+}
+
+bool
+ASTReader::FindExternalVisibleDeclsByName(const DeclContext *DC,
+ DeclarationName Name) {
+ assert(DC->hasExternalVisibleStorage() && DC == DC->getPrimaryContext() &&
+ "DeclContext has no visible decls in storage");
+ if (!Name)
+ return false;
+
+ auto It = Lookups.find(DC);
+ if (It == Lookups.end())
+ return false;
+
+ Deserializing LookupResults(this);
+
+ // Load the list of declarations.
+ SmallVector<NamedDecl *, 64> Decls;
+ for (DeclID ID : It->second.Table.find(Name)) {
+ NamedDecl *ND = cast<NamedDecl>(GetDecl(ID));
+ if (ND->getDeclName() == Name)
+ Decls.push_back(ND);
+ }
+
+ ++NumVisibleDeclContextsRead;
+ SetExternalVisibleDeclsForName(DC, Name, Decls);
+ return !Decls.empty();
+}
+
+void ASTReader::completeVisibleDeclsMap(const DeclContext *DC) {
+ if (!DC->hasExternalVisibleStorage())
+ return;
+
+ auto It = Lookups.find(DC);
+ assert(It != Lookups.end() &&
+ "have external visible storage but no lookup tables");
+
+ DeclsMap Decls;
+
+ for (DeclID ID : It->second.Table.findAll()) {
+ NamedDecl *ND = cast<NamedDecl>(GetDecl(ID));
+ Decls[ND->getDeclName()].push_back(ND);
+ }
+
+ ++NumVisibleDeclContextsRead;
+
+ for (DeclsMap::iterator I = Decls.begin(), E = Decls.end(); I != E; ++I) {
+ SetExternalVisibleDeclsForName(DC, I->first, I->second);
+ }
+ const_cast<DeclContext *>(DC)->setHasExternalVisibleStorage(false);
+}
+
+const serialization::reader::DeclContextLookupTable *
+ASTReader::getLoadedLookupTables(DeclContext *Primary) const {
+ auto I = Lookups.find(Primary);
+ return I == Lookups.end() ? nullptr : &I->second;
+}
+
+/// \brief Under non-PCH compilation the consumer receives the objc methods
+/// before receiving the implementation, and codegen depends on this.
+/// We simulate this by deserializing and passing to consumer the methods of the
+/// implementation before passing the deserialized implementation decl.
+static void PassObjCImplDeclToConsumer(ObjCImplDecl *ImplD,
+ ASTConsumer *Consumer) {
+ assert(ImplD && Consumer);
+
+ for (auto *I : ImplD->methods())
+ Consumer->HandleInterestingDecl(DeclGroupRef(I));
+
+ Consumer->HandleInterestingDecl(DeclGroupRef(ImplD));
+}
+
+void ASTReader::PassInterestingDeclsToConsumer() {
+ assert(Consumer);
+
+ if (PassingDeclsToConsumer)
+ return;
+
+ // Guard variable to avoid recursively redoing the process of passing
+ // decls to consumer.
+ SaveAndRestore<bool> GuardPassingDeclsToConsumer(PassingDeclsToConsumer,
+ true);
+
+ // Ensure that we've loaded all potentially-interesting declarations
+ // that need to be eagerly loaded.
+ for (auto ID : EagerlyDeserializedDecls)
+ GetDecl(ID);
+ EagerlyDeserializedDecls.clear();
+
+ while (!InterestingDecls.empty()) {
+ Decl *D = InterestingDecls.front();
+ InterestingDecls.pop_front();
+
+ PassInterestingDeclToConsumer(D);
+ }
+}
+
+void ASTReader::PassInterestingDeclToConsumer(Decl *D) {
+ if (ObjCImplDecl *ImplD = dyn_cast<ObjCImplDecl>(D))
+ PassObjCImplDeclToConsumer(ImplD, Consumer);
+ else
+ Consumer->HandleInterestingDecl(DeclGroupRef(D));
+}
+
+void ASTReader::StartTranslationUnit(ASTConsumer *Consumer) {
+ this->Consumer = Consumer;
+
+ if (Consumer)
+ PassInterestingDeclsToConsumer();
+
+ if (DeserializationListener)
+ DeserializationListener->ReaderInitialized(this);
+}
+
+void ASTReader::PrintStats() {
+ std::fprintf(stderr, "*** AST File Statistics:\n");
+
+ unsigned NumTypesLoaded
+ = TypesLoaded.size() - std::count(TypesLoaded.begin(), TypesLoaded.end(),
+ QualType());
+ unsigned NumDeclsLoaded
+ = DeclsLoaded.size() - std::count(DeclsLoaded.begin(), DeclsLoaded.end(),
+ (Decl *)nullptr);
+ unsigned NumIdentifiersLoaded
+ = IdentifiersLoaded.size() - std::count(IdentifiersLoaded.begin(),
+ IdentifiersLoaded.end(),
+ (IdentifierInfo *)nullptr);
+ unsigned NumMacrosLoaded
+ = MacrosLoaded.size() - std::count(MacrosLoaded.begin(),
+ MacrosLoaded.end(),
+ (MacroInfo *)nullptr);
+ unsigned NumSelectorsLoaded
+ = SelectorsLoaded.size() - std::count(SelectorsLoaded.begin(),
+ SelectorsLoaded.end(),
+ Selector());
+
+ if (unsigned TotalNumSLocEntries = getTotalNumSLocs())
+ std::fprintf(stderr, " %u/%u source location entries read (%f%%)\n",
+ NumSLocEntriesRead, TotalNumSLocEntries,
+ ((float)NumSLocEntriesRead/TotalNumSLocEntries * 100));
+ if (!TypesLoaded.empty())
+ std::fprintf(stderr, " %u/%u types read (%f%%)\n",
+ NumTypesLoaded, (unsigned)TypesLoaded.size(),
+ ((float)NumTypesLoaded/TypesLoaded.size() * 100));
+ if (!DeclsLoaded.empty())
+ std::fprintf(stderr, " %u/%u declarations read (%f%%)\n",
+ NumDeclsLoaded, (unsigned)DeclsLoaded.size(),
+ ((float)NumDeclsLoaded/DeclsLoaded.size() * 100));
+ if (!IdentifiersLoaded.empty())
+ std::fprintf(stderr, " %u/%u identifiers read (%f%%)\n",
+ NumIdentifiersLoaded, (unsigned)IdentifiersLoaded.size(),
+ ((float)NumIdentifiersLoaded/IdentifiersLoaded.size() * 100));
+ if (!MacrosLoaded.empty())
+ std::fprintf(stderr, " %u/%u macros read (%f%%)\n",
+ NumMacrosLoaded, (unsigned)MacrosLoaded.size(),
+ ((float)NumMacrosLoaded/MacrosLoaded.size() * 100));
+ if (!SelectorsLoaded.empty())
+ std::fprintf(stderr, " %u/%u selectors read (%f%%)\n",
+ NumSelectorsLoaded, (unsigned)SelectorsLoaded.size(),
+ ((float)NumSelectorsLoaded/SelectorsLoaded.size() * 100));
+ if (TotalNumStatements)
+ std::fprintf(stderr, " %u/%u statements read (%f%%)\n",
+ NumStatementsRead, TotalNumStatements,
+ ((float)NumStatementsRead/TotalNumStatements * 100));
+ if (TotalNumMacros)
+ std::fprintf(stderr, " %u/%u macros read (%f%%)\n",
+ NumMacrosRead, TotalNumMacros,
+ ((float)NumMacrosRead/TotalNumMacros * 100));
+ if (TotalLexicalDeclContexts)
+ std::fprintf(stderr, " %u/%u lexical declcontexts read (%f%%)\n",
+ NumLexicalDeclContextsRead, TotalLexicalDeclContexts,
+ ((float)NumLexicalDeclContextsRead/TotalLexicalDeclContexts
+ * 100));
+ if (TotalVisibleDeclContexts)
+ std::fprintf(stderr, " %u/%u visible declcontexts read (%f%%)\n",
+ NumVisibleDeclContextsRead, TotalVisibleDeclContexts,
+ ((float)NumVisibleDeclContextsRead/TotalVisibleDeclContexts
+ * 100));
+ if (TotalNumMethodPoolEntries) {
+ std::fprintf(stderr, " %u/%u method pool entries read (%f%%)\n",
+ NumMethodPoolEntriesRead, TotalNumMethodPoolEntries,
+ ((float)NumMethodPoolEntriesRead/TotalNumMethodPoolEntries
+ * 100));
+ }
+ if (NumMethodPoolLookups) {
+ std::fprintf(stderr, " %u/%u method pool lookups succeeded (%f%%)\n",
+ NumMethodPoolHits, NumMethodPoolLookups,
+ ((float)NumMethodPoolHits/NumMethodPoolLookups * 100.0));
+ }
+ if (NumMethodPoolTableLookups) {
+ std::fprintf(stderr, " %u/%u method pool table lookups succeeded (%f%%)\n",
+ NumMethodPoolTableHits, NumMethodPoolTableLookups,
+ ((float)NumMethodPoolTableHits/NumMethodPoolTableLookups
+ * 100.0));
+ }
+
+ if (NumIdentifierLookupHits) {
+ std::fprintf(stderr,
+ " %u / %u identifier table lookups succeeded (%f%%)\n",
+ NumIdentifierLookupHits, NumIdentifierLookups,
+ (double)NumIdentifierLookupHits*100.0/NumIdentifierLookups);
+ }
+
+ if (GlobalIndex) {
+ std::fprintf(stderr, "\n");
+ GlobalIndex->printStats();
+ }
+
+ std::fprintf(stderr, "\n");
+ dump();
+ std::fprintf(stderr, "\n");
+}
+
+template<typename Key, typename ModuleFile, unsigned InitialCapacity>
+static void
+dumpModuleIDMap(StringRef Name,
+ const ContinuousRangeMap<Key, ModuleFile *,
+ InitialCapacity> &Map) {
+ if (Map.begin() == Map.end())
+ return;
+
+ typedef ContinuousRangeMap<Key, ModuleFile *, InitialCapacity> MapType;
+ llvm::errs() << Name << ":\n";
+ for (typename MapType::const_iterator I = Map.begin(), IEnd = Map.end();
+ I != IEnd; ++I) {
+ llvm::errs() << " " << I->first << " -> " << I->second->FileName
+ << "\n";
+ }
+}
+
+void ASTReader::dump() {
+ llvm::errs() << "*** PCH/ModuleFile Remappings:\n";
+ dumpModuleIDMap("Global bit offset map", GlobalBitOffsetsMap);
+ dumpModuleIDMap("Global source location entry map", GlobalSLocEntryMap);
+ dumpModuleIDMap("Global type map", GlobalTypeMap);
+ dumpModuleIDMap("Global declaration map", GlobalDeclMap);
+ dumpModuleIDMap("Global identifier map", GlobalIdentifierMap);
+ dumpModuleIDMap("Global macro map", GlobalMacroMap);
+ dumpModuleIDMap("Global submodule map", GlobalSubmoduleMap);
+ dumpModuleIDMap("Global selector map", GlobalSelectorMap);
+ dumpModuleIDMap("Global preprocessed entity map",
+ GlobalPreprocessedEntityMap);
+
+ llvm::errs() << "\n*** PCH/Modules Loaded:";
+ for (ModuleManager::ModuleConstIterator M = ModuleMgr.begin(),
+ MEnd = ModuleMgr.end();
+ M != MEnd; ++M)
+ (*M)->dump();
+}
+
+/// Return the amount of memory used by memory buffers, breaking down
+/// by heap-backed versus mmap'ed memory.
+void ASTReader::getMemoryBufferSizes(MemoryBufferSizes &sizes) const {
+ for (ModuleConstIterator I = ModuleMgr.begin(),
+ E = ModuleMgr.end(); I != E; ++I) {
+ if (llvm::MemoryBuffer *buf = (*I)->Buffer.get()) {
+ size_t bytes = buf->getBufferSize();
+ switch (buf->getBufferKind()) {
+ case llvm::MemoryBuffer::MemoryBuffer_Malloc:
+ sizes.malloc_bytes += bytes;
+ break;
+ case llvm::MemoryBuffer::MemoryBuffer_MMap:
+ sizes.mmap_bytes += bytes;
+ break;
+ }
+ }
+ }
+}
+
+void ASTReader::InitializeSema(Sema &S) {
+ SemaObj = &S;
+ S.addExternalSource(this);
+
+ // Makes sure any declarations that were deserialized "too early"
+ // still get added to the identifier's declaration chains.
+ for (uint64_t ID : PreloadedDeclIDs) {
+ NamedDecl *D = cast<NamedDecl>(GetDecl(ID));
+ pushExternalDeclIntoScope(D, D->getDeclName());
+ }
+ PreloadedDeclIDs.clear();
+
+ // FIXME: What happens if these are changed by a module import?
+ if (!FPPragmaOptions.empty()) {
+ assert(FPPragmaOptions.size() == 1 && "Wrong number of FP_PRAGMA_OPTIONS");
+ SemaObj->FPFeatures.fp_contract = FPPragmaOptions[0];
+ }
+
+ // FIXME: What happens if these are changed by a module import?
+ if (!OpenCLExtensions.empty()) {
+ unsigned I = 0;
+#define OPENCLEXT(nm) SemaObj->OpenCLFeatures.nm = OpenCLExtensions[I++];
+#include "clang/Basic/OpenCLExtensions.def"
+
+ assert(OpenCLExtensions.size() == I && "Wrong number of OPENCL_EXTENSIONS");
+ }
+
+ UpdateSema();
+}
+
+void ASTReader::UpdateSema() {
+ assert(SemaObj && "no Sema to update");
+
+ // Load the offsets of the declarations that Sema references.
+ // They will be lazily deserialized when needed.
+ if (!SemaDeclRefs.empty()) {
+ assert(SemaDeclRefs.size() % 2 == 0);
+ for (unsigned I = 0; I != SemaDeclRefs.size(); I += 2) {
+ if (!SemaObj->StdNamespace)
+ SemaObj->StdNamespace = SemaDeclRefs[I];
+ if (!SemaObj->StdBadAlloc)
+ SemaObj->StdBadAlloc = SemaDeclRefs[I+1];
+ }
+ SemaDeclRefs.clear();
+ }
+
+ // Update the state of 'pragma clang optimize'. Use the same API as if we had
+ // encountered the pragma in the source.
+ if(OptimizeOffPragmaLocation.isValid())
+ SemaObj->ActOnPragmaOptimize(/* IsOn = */ false, OptimizeOffPragmaLocation);
+}
+
+IdentifierInfo *ASTReader::get(StringRef Name) {
+ // Note that we are loading an identifier.
+ Deserializing AnIdentifier(this);
+
+ IdentifierLookupVisitor Visitor(Name, /*PriorGeneration=*/0,
+ NumIdentifierLookups,
+ NumIdentifierLookupHits);
+
+ // We don't need to do identifier table lookups in C++ modules (we preload
+ // all interesting declarations, and don't need to use the scope for name
+ // lookups). Perform the lookup in PCH files, though, since we don't build
+ // a complete initial identifier table if we're carrying on from a PCH.
+ if (Context.getLangOpts().CPlusPlus) {
+ for (auto F : ModuleMgr.pch_modules())
+ if (Visitor(*F))
+ break;
+ } else {
+ // If there is a global index, look there first to determine which modules
+ // provably do not have any results for this identifier.
+ GlobalModuleIndex::HitSet Hits;
+ GlobalModuleIndex::HitSet *HitsPtr = nullptr;
+ if (!loadGlobalIndex()) {
+ if (GlobalIndex->lookupIdentifier(Name, Hits)) {
+ HitsPtr = &Hits;
+ }
+ }
+
+ ModuleMgr.visit(Visitor, HitsPtr);
+ }
+
+ IdentifierInfo *II = Visitor.getIdentifierInfo();
+ markIdentifierUpToDate(II);
+ return II;
+}
+
+namespace clang {
+ /// \brief An identifier-lookup iterator that enumerates all of the
+ /// identifiers stored within a set of AST files.
+ class ASTIdentifierIterator : public IdentifierIterator {
+ /// \brief The AST reader whose identifiers are being enumerated.
+ const ASTReader &Reader;
+
+ /// \brief The current index into the chain of AST files stored in
+ /// the AST reader.
+ unsigned Index;
+
+ /// \brief The current position within the identifier lookup table
+ /// of the current AST file.
+ ASTIdentifierLookupTable::key_iterator Current;
+
+ /// \brief The end position within the identifier lookup table of
+ /// the current AST file.
+ ASTIdentifierLookupTable::key_iterator End;
+
+ public:
+ explicit ASTIdentifierIterator(const ASTReader &Reader);
+
+ StringRef Next() override;
+ };
+}
+
+ASTIdentifierIterator::ASTIdentifierIterator(const ASTReader &Reader)
+ : Reader(Reader), Index(Reader.ModuleMgr.size() - 1) {
+ ASTIdentifierLookupTable *IdTable
+ = (ASTIdentifierLookupTable *)Reader.ModuleMgr[Index].IdentifierLookupTable;
+ Current = IdTable->key_begin();
+ End = IdTable->key_end();
+}
+
+StringRef ASTIdentifierIterator::Next() {
+ while (Current == End) {
+ // If we have exhausted all of our AST files, we're done.
+ if (Index == 0)
+ return StringRef();
+
+ --Index;
+ ASTIdentifierLookupTable *IdTable
+ = (ASTIdentifierLookupTable *)Reader.ModuleMgr[Index].
+ IdentifierLookupTable;
+ Current = IdTable->key_begin();
+ End = IdTable->key_end();
+ }
+
+ // We have any identifiers remaining in the current AST file; return
+ // the next one.
+ StringRef Result = *Current;
+ ++Current;
+ return Result;
+}
+
+IdentifierIterator *ASTReader::getIdentifiers() {
+ if (!loadGlobalIndex())
+ return GlobalIndex->createIdentifierIterator();
+
+ return new ASTIdentifierIterator(*this);
+}
+
+namespace clang { namespace serialization {
+ class ReadMethodPoolVisitor {
+ ASTReader &Reader;
+ Selector Sel;
+ unsigned PriorGeneration;
+ unsigned InstanceBits;
+ unsigned FactoryBits;
+ bool InstanceHasMoreThanOneDecl;
+ bool FactoryHasMoreThanOneDecl;
+ SmallVector<ObjCMethodDecl *, 4> InstanceMethods;
+ SmallVector<ObjCMethodDecl *, 4> FactoryMethods;
+
+ public:
+ ReadMethodPoolVisitor(ASTReader &Reader, Selector Sel,
+ unsigned PriorGeneration)
+ : Reader(Reader), Sel(Sel), PriorGeneration(PriorGeneration),
+ InstanceBits(0), FactoryBits(0), InstanceHasMoreThanOneDecl(false),
+ FactoryHasMoreThanOneDecl(false) {}
+
+ bool operator()(ModuleFile &M) {
+ if (!M.SelectorLookupTable)
+ return false;
+
+ // If we've already searched this module file, skip it now.
+ if (M.Generation <= PriorGeneration)
+ return true;
+
+ ++Reader.NumMethodPoolTableLookups;
+ ASTSelectorLookupTable *PoolTable
+ = (ASTSelectorLookupTable*)M.SelectorLookupTable;
+ ASTSelectorLookupTable::iterator Pos = PoolTable->find(Sel);
+ if (Pos == PoolTable->end())
+ return false;
+
+ ++Reader.NumMethodPoolTableHits;
+ ++Reader.NumSelectorsRead;
+ // FIXME: Not quite happy with the statistics here. We probably should
+ // disable this tracking when called via LoadSelector.
+ // Also, should entries without methods count as misses?
+ ++Reader.NumMethodPoolEntriesRead;
+ ASTSelectorLookupTrait::data_type Data = *Pos;
+ if (Reader.DeserializationListener)
+ Reader.DeserializationListener->SelectorRead(Data.ID, Sel);
+
+ InstanceMethods.append(Data.Instance.begin(), Data.Instance.end());
+ FactoryMethods.append(Data.Factory.begin(), Data.Factory.end());
+ InstanceBits = Data.InstanceBits;
+ FactoryBits = Data.FactoryBits;
+ InstanceHasMoreThanOneDecl = Data.InstanceHasMoreThanOneDecl;
+ FactoryHasMoreThanOneDecl = Data.FactoryHasMoreThanOneDecl;
+ return true;
+ }
+
+ /// \brief Retrieve the instance methods found by this visitor.
+ ArrayRef<ObjCMethodDecl *> getInstanceMethods() const {
+ return InstanceMethods;
+ }
+
+ /// \brief Retrieve the instance methods found by this visitor.
+ ArrayRef<ObjCMethodDecl *> getFactoryMethods() const {
+ return FactoryMethods;
+ }
+
+ unsigned getInstanceBits() const { return InstanceBits; }
+ unsigned getFactoryBits() const { return FactoryBits; }
+ bool instanceHasMoreThanOneDecl() const {
+ return InstanceHasMoreThanOneDecl;
+ }
+ bool factoryHasMoreThanOneDecl() const { return FactoryHasMoreThanOneDecl; }
+ };
+} } // end namespace clang::serialization
+
+/// \brief Add the given set of methods to the method list.
+static void addMethodsToPool(Sema &S, ArrayRef<ObjCMethodDecl *> Methods,
+ ObjCMethodList &List) {
+ for (unsigned I = 0, N = Methods.size(); I != N; ++I) {
+ S.addMethodToGlobalList(&List, Methods[I]);
+ }
+}
+
+void ASTReader::ReadMethodPool(Selector Sel) {
+ // Get the selector generation and update it to the current generation.
+ unsigned &Generation = SelectorGeneration[Sel];
+ unsigned PriorGeneration = Generation;
+ Generation = getGeneration();
+
+ // Search for methods defined with this selector.
+ ++NumMethodPoolLookups;
+ ReadMethodPoolVisitor Visitor(*this, Sel, PriorGeneration);
+ ModuleMgr.visit(Visitor);
+
+ if (Visitor.getInstanceMethods().empty() &&
+ Visitor.getFactoryMethods().empty())
+ return;
+
+ ++NumMethodPoolHits;
+
+ if (!getSema())
+ return;
+
+ Sema &S = *getSema();
+ Sema::GlobalMethodPool::iterator Pos
+ = S.MethodPool.insert(std::make_pair(Sel, Sema::GlobalMethods())).first;
+
+ Pos->second.first.setBits(Visitor.getInstanceBits());
+ Pos->second.first.setHasMoreThanOneDecl(Visitor.instanceHasMoreThanOneDecl());
+ Pos->second.second.setBits(Visitor.getFactoryBits());
+ Pos->second.second.setHasMoreThanOneDecl(Visitor.factoryHasMoreThanOneDecl());
+
+ // Add methods to the global pool *after* setting hasMoreThanOneDecl, since
+ // when building a module we keep every method individually and may need to
+ // update hasMoreThanOneDecl as we add the methods.
+ addMethodsToPool(S, Visitor.getInstanceMethods(), Pos->second.first);
+ addMethodsToPool(S, Visitor.getFactoryMethods(), Pos->second.second);
+}
+
+void ASTReader::ReadKnownNamespaces(
+ SmallVectorImpl<NamespaceDecl *> &Namespaces) {
+ Namespaces.clear();
+
+ for (unsigned I = 0, N = KnownNamespaces.size(); I != N; ++I) {
+ if (NamespaceDecl *Namespace
+ = dyn_cast_or_null<NamespaceDecl>(GetDecl(KnownNamespaces[I])))
+ Namespaces.push_back(Namespace);
+ }
+}
+
+void ASTReader::ReadUndefinedButUsed(
+ llvm::DenseMap<NamedDecl*, SourceLocation> &Undefined) {
+ for (unsigned Idx = 0, N = UndefinedButUsed.size(); Idx != N;) {
+ NamedDecl *D = cast<NamedDecl>(GetDecl(UndefinedButUsed[Idx++]));
+ SourceLocation Loc =
+ SourceLocation::getFromRawEncoding(UndefinedButUsed[Idx++]);
+ Undefined.insert(std::make_pair(D, Loc));
+ }
+}
+
+void ASTReader::ReadMismatchingDeleteExpressions(llvm::MapVector<
+ FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> &
+ Exprs) {
+ for (unsigned Idx = 0, N = DelayedDeleteExprs.size(); Idx != N;) {
+ FieldDecl *FD = cast<FieldDecl>(GetDecl(DelayedDeleteExprs[Idx++]));
+ uint64_t Count = DelayedDeleteExprs[Idx++];
+ for (uint64_t C = 0; C < Count; ++C) {
+ SourceLocation DeleteLoc =
+ SourceLocation::getFromRawEncoding(DelayedDeleteExprs[Idx++]);
+ const bool IsArrayForm = DelayedDeleteExprs[Idx++];
+ Exprs[FD].push_back(std::make_pair(DeleteLoc, IsArrayForm));
+ }
+ }
+}
+
+void ASTReader::ReadTentativeDefinitions(
+ SmallVectorImpl<VarDecl *> &TentativeDefs) {
+ for (unsigned I = 0, N = TentativeDefinitions.size(); I != N; ++I) {
+ VarDecl *Var = dyn_cast_or_null<VarDecl>(GetDecl(TentativeDefinitions[I]));
+ if (Var)
+ TentativeDefs.push_back(Var);
+ }
+ TentativeDefinitions.clear();
+}
+
+void ASTReader::ReadUnusedFileScopedDecls(
+ SmallVectorImpl<const DeclaratorDecl *> &Decls) {
+ for (unsigned I = 0, N = UnusedFileScopedDecls.size(); I != N; ++I) {
+ DeclaratorDecl *D
+ = dyn_cast_or_null<DeclaratorDecl>(GetDecl(UnusedFileScopedDecls[I]));
+ if (D)
+ Decls.push_back(D);
+ }
+ UnusedFileScopedDecls.clear();
+}
+
+void ASTReader::ReadDelegatingConstructors(
+ SmallVectorImpl<CXXConstructorDecl *> &Decls) {
+ for (unsigned I = 0, N = DelegatingCtorDecls.size(); I != N; ++I) {
+ CXXConstructorDecl *D
+ = dyn_cast_or_null<CXXConstructorDecl>(GetDecl(DelegatingCtorDecls[I]));
+ if (D)
+ Decls.push_back(D);
+ }
+ DelegatingCtorDecls.clear();
+}
+
+void ASTReader::ReadExtVectorDecls(SmallVectorImpl<TypedefNameDecl *> &Decls) {
+ for (unsigned I = 0, N = ExtVectorDecls.size(); I != N; ++I) {
+ TypedefNameDecl *D
+ = dyn_cast_or_null<TypedefNameDecl>(GetDecl(ExtVectorDecls[I]));
+ if (D)
+ Decls.push_back(D);
+ }
+ ExtVectorDecls.clear();
+}
+
+void ASTReader::ReadUnusedLocalTypedefNameCandidates(
+ llvm::SmallSetVector<const TypedefNameDecl *, 4> &Decls) {
+ for (unsigned I = 0, N = UnusedLocalTypedefNameCandidates.size(); I != N;
+ ++I) {
+ TypedefNameDecl *D = dyn_cast_or_null<TypedefNameDecl>(
+ GetDecl(UnusedLocalTypedefNameCandidates[I]));
+ if (D)
+ Decls.insert(D);
+ }
+ UnusedLocalTypedefNameCandidates.clear();
+}
+
+void ASTReader::ReadReferencedSelectors(
+ SmallVectorImpl<std::pair<Selector, SourceLocation> > &Sels) {
+ if (ReferencedSelectorsData.empty())
+ return;
+
+ // If there are @selector references added them to its pool. This is for
+ // implementation of -Wselector.
+ unsigned int DataSize = ReferencedSelectorsData.size()-1;
+ unsigned I = 0;
+ while (I < DataSize) {
+ Selector Sel = DecodeSelector(ReferencedSelectorsData[I++]);
+ SourceLocation SelLoc
+ = SourceLocation::getFromRawEncoding(ReferencedSelectorsData[I++]);
+ Sels.push_back(std::make_pair(Sel, SelLoc));
+ }
+ ReferencedSelectorsData.clear();
+}
+
+void ASTReader::ReadWeakUndeclaredIdentifiers(
+ SmallVectorImpl<std::pair<IdentifierInfo *, WeakInfo> > &WeakIDs) {
+ if (WeakUndeclaredIdentifiers.empty())
+ return;
+
+ for (unsigned I = 0, N = WeakUndeclaredIdentifiers.size(); I < N; /*none*/) {
+ IdentifierInfo *WeakId
+ = DecodeIdentifierInfo(WeakUndeclaredIdentifiers[I++]);
+ IdentifierInfo *AliasId
+ = DecodeIdentifierInfo(WeakUndeclaredIdentifiers[I++]);
+ SourceLocation Loc
+ = SourceLocation::getFromRawEncoding(WeakUndeclaredIdentifiers[I++]);
+ bool Used = WeakUndeclaredIdentifiers[I++];
+ WeakInfo WI(AliasId, Loc);
+ WI.setUsed(Used);
+ WeakIDs.push_back(std::make_pair(WeakId, WI));
+ }
+ WeakUndeclaredIdentifiers.clear();
+}
+
+void ASTReader::ReadUsedVTables(SmallVectorImpl<ExternalVTableUse> &VTables) {
+ for (unsigned Idx = 0, N = VTableUses.size(); Idx < N; /* In loop */) {
+ ExternalVTableUse VT;
+ VT.Record = dyn_cast_or_null<CXXRecordDecl>(GetDecl(VTableUses[Idx++]));
+ VT.Location = SourceLocation::getFromRawEncoding(VTableUses[Idx++]);
+ VT.DefinitionRequired = VTableUses[Idx++];
+ VTables.push_back(VT);
+ }
+
+ VTableUses.clear();
+}
+
+void ASTReader::ReadPendingInstantiations(
+ SmallVectorImpl<std::pair<ValueDecl *, SourceLocation> > &Pending) {
+ for (unsigned Idx = 0, N = PendingInstantiations.size(); Idx < N;) {
+ ValueDecl *D = cast<ValueDecl>(GetDecl(PendingInstantiations[Idx++]));
+ SourceLocation Loc
+ = SourceLocation::getFromRawEncoding(PendingInstantiations[Idx++]);
+
+ Pending.push_back(std::make_pair(D, Loc));
+ }
+ PendingInstantiations.clear();
+}
+
+void ASTReader::ReadLateParsedTemplates(
+ llvm::MapVector<const FunctionDecl *, LateParsedTemplate *> &LPTMap) {
+ for (unsigned Idx = 0, N = LateParsedTemplates.size(); Idx < N;
+ /* In loop */) {
+ FunctionDecl *FD = cast<FunctionDecl>(GetDecl(LateParsedTemplates[Idx++]));
+
+ LateParsedTemplate *LT = new LateParsedTemplate;
+ LT->D = GetDecl(LateParsedTemplates[Idx++]);
+
+ ModuleFile *F = getOwningModuleFile(LT->D);
+ assert(F && "No module");
+
+ unsigned TokN = LateParsedTemplates[Idx++];
+ LT->Toks.reserve(TokN);
+ for (unsigned T = 0; T < TokN; ++T)
+ LT->Toks.push_back(ReadToken(*F, LateParsedTemplates, Idx));
+
+ LPTMap.insert(std::make_pair(FD, LT));
+ }
+
+ LateParsedTemplates.clear();
+}
+
+void ASTReader::LoadSelector(Selector Sel) {
+ // It would be complicated to avoid reading the methods anyway. So don't.
+ ReadMethodPool(Sel);
+}
+
+void ASTReader::SetIdentifierInfo(IdentifierID ID, IdentifierInfo *II) {
+ assert(ID && "Non-zero identifier ID required");
+ assert(ID <= IdentifiersLoaded.size() && "identifier ID out of range");
+ IdentifiersLoaded[ID - 1] = II;
+ if (DeserializationListener)
+ DeserializationListener->IdentifierRead(ID, II);
+}
+
+/// \brief Set the globally-visible declarations associated with the given
+/// identifier.
+///
+/// If the AST reader is currently in a state where the given declaration IDs
+/// cannot safely be resolved, they are queued until it is safe to resolve
+/// them.
+///
+/// \param II an IdentifierInfo that refers to one or more globally-visible
+/// declarations.
+///
+/// \param DeclIDs the set of declaration IDs with the name @p II that are
+/// visible at global scope.
+///
+/// \param Decls if non-null, this vector will be populated with the set of
+/// deserialized declarations. These declarations will not be pushed into
+/// scope.
+void
+ASTReader::SetGloballyVisibleDecls(IdentifierInfo *II,
+ const SmallVectorImpl<uint32_t> &DeclIDs,
+ SmallVectorImpl<Decl *> *Decls) {
+ if (NumCurrentElementsDeserializing && !Decls) {
+ PendingIdentifierInfos[II].append(DeclIDs.begin(), DeclIDs.end());
+ return;
+ }
+
+ for (unsigned I = 0, N = DeclIDs.size(); I != N; ++I) {
+ if (!SemaObj) {
+ // Queue this declaration so that it will be added to the
+ // translation unit scope and identifier's declaration chain
+ // once a Sema object is known.
+ PreloadedDeclIDs.push_back(DeclIDs[I]);
+ continue;
+ }
+
+ NamedDecl *D = cast<NamedDecl>(GetDecl(DeclIDs[I]));
+
+ // If we're simply supposed to record the declarations, do so now.
+ if (Decls) {
+ Decls->push_back(D);
+ continue;
+ }
+
+ // Introduce this declaration into the translation-unit scope
+ // and add it to the declaration chain for this identifier, so
+ // that (unqualified) name lookup will find it.
+ pushExternalDeclIntoScope(D, II);
+ }
+}
+
+IdentifierInfo *ASTReader::DecodeIdentifierInfo(IdentifierID ID) {
+ if (ID == 0)
+ return nullptr;
+
+ if (IdentifiersLoaded.empty()) {
+ Error("no identifier table in AST file");
+ return nullptr;
+ }
+
+ ID -= 1;
+ if (!IdentifiersLoaded[ID]) {
+ GlobalIdentifierMapType::iterator I = GlobalIdentifierMap.find(ID + 1);
+ assert(I != GlobalIdentifierMap.end() && "Corrupted global identifier map");
+ ModuleFile *M = I->second;
+ unsigned Index = ID - M->BaseIdentifierID;
+ const char *Str = M->IdentifierTableData + M->IdentifierOffsets[Index];
+
+ // All of the strings in the AST file are preceded by a 16-bit length.
+ // Extract that 16-bit length to avoid having to execute strlen().
+ // NOTE: 'StrLenPtr' is an 'unsigned char*' so that we load bytes as
+ // unsigned integers. This is important to avoid integer overflow when
+ // we cast them to 'unsigned'.
+ const unsigned char *StrLenPtr = (const unsigned char*) Str - 2;
+ unsigned StrLen = (((unsigned) StrLenPtr[0])
+ | (((unsigned) StrLenPtr[1]) << 8)) - 1;
+ IdentifiersLoaded[ID]
+ = &PP.getIdentifierTable().get(StringRef(Str, StrLen));
+ if (DeserializationListener)
+ DeserializationListener->IdentifierRead(ID + 1, IdentifiersLoaded[ID]);
+ }
+
+ return IdentifiersLoaded[ID];
+}
+
+IdentifierInfo *ASTReader::getLocalIdentifier(ModuleFile &M, unsigned LocalID) {
+ return DecodeIdentifierInfo(getGlobalIdentifierID(M, LocalID));
+}
+
+IdentifierID ASTReader::getGlobalIdentifierID(ModuleFile &M, unsigned LocalID) {
+ if (LocalID < NUM_PREDEF_IDENT_IDS)
+ return LocalID;
+
+ ContinuousRangeMap<uint32_t, int, 2>::iterator I
+ = M.IdentifierRemap.find(LocalID - NUM_PREDEF_IDENT_IDS);
+ assert(I != M.IdentifierRemap.end()
+ && "Invalid index into identifier index remap");
+
+ return LocalID + I->second;
+}
+
+MacroInfo *ASTReader::getMacro(MacroID ID) {
+ if (ID == 0)
+ return nullptr;
+
+ if (MacrosLoaded.empty()) {
+ Error("no macro table in AST file");
+ return nullptr;
+ }
+
+ ID -= NUM_PREDEF_MACRO_IDS;
+ if (!MacrosLoaded[ID]) {
+ GlobalMacroMapType::iterator I
+ = GlobalMacroMap.find(ID + NUM_PREDEF_MACRO_IDS);
+ assert(I != GlobalMacroMap.end() && "Corrupted global macro map");
+ ModuleFile *M = I->second;
+ unsigned Index = ID - M->BaseMacroID;
+ MacrosLoaded[ID] = ReadMacroRecord(*M, M->MacroOffsets[Index]);
+
+ if (DeserializationListener)
+ DeserializationListener->MacroRead(ID + NUM_PREDEF_MACRO_IDS,
+ MacrosLoaded[ID]);
+ }
+
+ return MacrosLoaded[ID];
+}
+
+MacroID ASTReader::getGlobalMacroID(ModuleFile &M, unsigned LocalID) {
+ if (LocalID < NUM_PREDEF_MACRO_IDS)
+ return LocalID;
+
+ ContinuousRangeMap<uint32_t, int, 2>::iterator I
+ = M.MacroRemap.find(LocalID - NUM_PREDEF_MACRO_IDS);
+ assert(I != M.MacroRemap.end() && "Invalid index into macro index remap");
+
+ return LocalID + I->second;
+}
+
+serialization::SubmoduleID
+ASTReader::getGlobalSubmoduleID(ModuleFile &M, unsigned LocalID) {
+ if (LocalID < NUM_PREDEF_SUBMODULE_IDS)
+ return LocalID;
+
+ ContinuousRangeMap<uint32_t, int, 2>::iterator I
+ = M.SubmoduleRemap.find(LocalID - NUM_PREDEF_SUBMODULE_IDS);
+ assert(I != M.SubmoduleRemap.end()
+ && "Invalid index into submodule index remap");
+
+ return LocalID + I->second;
+}
+
+Module *ASTReader::getSubmodule(SubmoduleID GlobalID) {
+ if (GlobalID < NUM_PREDEF_SUBMODULE_IDS) {
+ assert(GlobalID == 0 && "Unhandled global submodule ID");
+ return nullptr;
+ }
+
+ if (GlobalID > SubmodulesLoaded.size()) {
+ Error("submodule ID out of range in AST file");
+ return nullptr;
+ }
+
+ return SubmodulesLoaded[GlobalID - NUM_PREDEF_SUBMODULE_IDS];
+}
+
+Module *ASTReader::getModule(unsigned ID) {
+ return getSubmodule(ID);
+}
+
+ModuleFile *ASTReader::getLocalModuleFile(ModuleFile &F, unsigned ID) {
+ if (ID & 1) {
+ // It's a module, look it up by submodule ID.
+ auto I = GlobalSubmoduleMap.find(getGlobalSubmoduleID(F, ID >> 1));
+ return I == GlobalSubmoduleMap.end() ? nullptr : I->second;
+ } else {
+ // It's a prefix (preamble, PCH, ...). Look it up by index.
+ unsigned IndexFromEnd = ID >> 1;
+ assert(IndexFromEnd && "got reference to unknown module file");
+ return getModuleManager().pch_modules().end()[-IndexFromEnd];
+ }
+}
+
+unsigned ASTReader::getModuleFileID(ModuleFile *F) {
+ if (!F)
+ return 1;
+
+ // For a file representing a module, use the submodule ID of the top-level
+ // module as the file ID. For any other kind of file, the number of such
+ // files loaded beforehand will be the same on reload.
+ // FIXME: Is this true even if we have an explicit module file and a PCH?
+ if (F->isModule())
+ return ((F->BaseSubmoduleID + NUM_PREDEF_SUBMODULE_IDS) << 1) | 1;
+
+ auto PCHModules = getModuleManager().pch_modules();
+ auto I = std::find(PCHModules.begin(), PCHModules.end(), F);
+ assert(I != PCHModules.end() && "emitting reference to unknown file");
+ return (I - PCHModules.end()) << 1;
+}
+
+llvm::Optional<ExternalASTSource::ASTSourceDescriptor>
+ASTReader::getSourceDescriptor(unsigned ID) {
+ if (const Module *M = getSubmodule(ID))
+ return ExternalASTSource::ASTSourceDescriptor(*M);
+
+ // If there is only a single PCH, return it instead.
+ // Chained PCH are not suported.
+ if (ModuleMgr.size() == 1) {
+ ModuleFile &MF = ModuleMgr.getPrimaryModule();
+ return ASTReader::ASTSourceDescriptor(
+ MF.OriginalSourceFileName, MF.OriginalDir, MF.FileName, MF.Signature);
+ }
+ return None;
+}
+
+Selector ASTReader::getLocalSelector(ModuleFile &M, unsigned LocalID) {
+ return DecodeSelector(getGlobalSelectorID(M, LocalID));
+}
+
+Selector ASTReader::DecodeSelector(serialization::SelectorID ID) {
+ if (ID == 0)
+ return Selector();
+
+ if (ID > SelectorsLoaded.size()) {
+ Error("selector ID out of range in AST file");
+ return Selector();
+ }
+
+ if (SelectorsLoaded[ID - 1].getAsOpaquePtr() == nullptr) {
+ // Load this selector from the selector table.
+ GlobalSelectorMapType::iterator I = GlobalSelectorMap.find(ID);
+ assert(I != GlobalSelectorMap.end() && "Corrupted global selector map");
+ ModuleFile &M = *I->second;
+ ASTSelectorLookupTrait Trait(*this, M);
+ unsigned Idx = ID - M.BaseSelectorID - NUM_PREDEF_SELECTOR_IDS;
+ SelectorsLoaded[ID - 1] =
+ Trait.ReadKey(M.SelectorLookupTableData + M.SelectorOffsets[Idx], 0);
+ if (DeserializationListener)
+ DeserializationListener->SelectorRead(ID, SelectorsLoaded[ID - 1]);
+ }
+
+ return SelectorsLoaded[ID - 1];
+}
+
+Selector ASTReader::GetExternalSelector(serialization::SelectorID ID) {
+ return DecodeSelector(ID);
+}
+
+uint32_t ASTReader::GetNumExternalSelectors() {
+ // ID 0 (the null selector) is considered an external selector.
+ return getTotalNumSelectors() + 1;
+}
+
+serialization::SelectorID
+ASTReader::getGlobalSelectorID(ModuleFile &M, unsigned LocalID) const {
+ if (LocalID < NUM_PREDEF_SELECTOR_IDS)
+ return LocalID;
+
+ ContinuousRangeMap<uint32_t, int, 2>::iterator I
+ = M.SelectorRemap.find(LocalID - NUM_PREDEF_SELECTOR_IDS);
+ assert(I != M.SelectorRemap.end()
+ && "Invalid index into selector index remap");
+
+ return LocalID + I->second;
+}
+
+DeclarationName
+ASTReader::ReadDeclarationName(ModuleFile &F,
+ const RecordData &Record, unsigned &Idx) {
+ DeclarationName::NameKind Kind = (DeclarationName::NameKind)Record[Idx++];
+ switch (Kind) {
+ case DeclarationName::Identifier:
+ return DeclarationName(GetIdentifierInfo(F, Record, Idx));
+
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ return DeclarationName(ReadSelector(F, Record, Idx));
+
+ case DeclarationName::CXXConstructorName:
+ return Context.DeclarationNames.getCXXConstructorName(
+ Context.getCanonicalType(readType(F, Record, Idx)));
+
+ case DeclarationName::CXXDestructorName:
+ return Context.DeclarationNames.getCXXDestructorName(
+ Context.getCanonicalType(readType(F, Record, Idx)));
+
+ case DeclarationName::CXXConversionFunctionName:
+ return Context.DeclarationNames.getCXXConversionFunctionName(
+ Context.getCanonicalType(readType(F, Record, Idx)));
+
+ case DeclarationName::CXXOperatorName:
+ return Context.DeclarationNames.getCXXOperatorName(
+ (OverloadedOperatorKind)Record[Idx++]);
+
+ case DeclarationName::CXXLiteralOperatorName:
+ return Context.DeclarationNames.getCXXLiteralOperatorName(
+ GetIdentifierInfo(F, Record, Idx));
+
+ case DeclarationName::CXXUsingDirective:
+ return DeclarationName::getUsingDirectiveName();
+ }
+
+ llvm_unreachable("Invalid NameKind!");
+}
+
+void ASTReader::ReadDeclarationNameLoc(ModuleFile &F,
+ DeclarationNameLoc &DNLoc,
+ DeclarationName Name,
+ const RecordData &Record, unsigned &Idx) {
+ switch (Name.getNameKind()) {
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ DNLoc.NamedType.TInfo = GetTypeSourceInfo(F, Record, Idx);
+ break;
+
+ case DeclarationName::CXXOperatorName:
+ DNLoc.CXXOperatorName.BeginOpNameLoc
+ = ReadSourceLocation(F, Record, Idx).getRawEncoding();
+ DNLoc.CXXOperatorName.EndOpNameLoc
+ = ReadSourceLocation(F, Record, Idx).getRawEncoding();
+ break;
+
+ case DeclarationName::CXXLiteralOperatorName:
+ DNLoc.CXXLiteralOperatorName.OpNameLoc
+ = ReadSourceLocation(F, Record, Idx).getRawEncoding();
+ break;
+
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXUsingDirective:
+ break;
+ }
+}
+
+void ASTReader::ReadDeclarationNameInfo(ModuleFile &F,
+ DeclarationNameInfo &NameInfo,
+ const RecordData &Record, unsigned &Idx) {
+ NameInfo.setName(ReadDeclarationName(F, Record, Idx));
+ NameInfo.setLoc(ReadSourceLocation(F, Record, Idx));
+ DeclarationNameLoc DNLoc;
+ ReadDeclarationNameLoc(F, DNLoc, NameInfo.getName(), Record, Idx);
+ NameInfo.setInfo(DNLoc);
+}
+
+void ASTReader::ReadQualifierInfo(ModuleFile &F, QualifierInfo &Info,
+ const RecordData &Record, unsigned &Idx) {
+ Info.QualifierLoc = ReadNestedNameSpecifierLoc(F, Record, Idx);
+ unsigned NumTPLists = Record[Idx++];
+ Info.NumTemplParamLists = NumTPLists;
+ if (NumTPLists) {
+ Info.TemplParamLists = new (Context) TemplateParameterList*[NumTPLists];
+ for (unsigned i=0; i != NumTPLists; ++i)
+ Info.TemplParamLists[i] = ReadTemplateParameterList(F, Record, Idx);
+ }
+}
+
+TemplateName
+ASTReader::ReadTemplateName(ModuleFile &F, const RecordData &Record,
+ unsigned &Idx) {
+ TemplateName::NameKind Kind = (TemplateName::NameKind)Record[Idx++];
+ switch (Kind) {
+ case TemplateName::Template:
+ return TemplateName(ReadDeclAs<TemplateDecl>(F, Record, Idx));
+
+ case TemplateName::OverloadedTemplate: {
+ unsigned size = Record[Idx++];
+ UnresolvedSet<8> Decls;
+ while (size--)
+ Decls.addDecl(ReadDeclAs<NamedDecl>(F, Record, Idx));
+
+ return Context.getOverloadedTemplateName(Decls.begin(), Decls.end());
+ }
+
+ case TemplateName::QualifiedTemplate: {
+ NestedNameSpecifier *NNS = ReadNestedNameSpecifier(F, Record, Idx);
+ bool hasTemplKeyword = Record[Idx++];
+ TemplateDecl *Template = ReadDeclAs<TemplateDecl>(F, Record, Idx);
+ return Context.getQualifiedTemplateName(NNS, hasTemplKeyword, Template);
+ }
+
+ case TemplateName::DependentTemplate: {
+ NestedNameSpecifier *NNS = ReadNestedNameSpecifier(F, Record, Idx);
+ if (Record[Idx++]) // isIdentifier
+ return Context.getDependentTemplateName(NNS,
+ GetIdentifierInfo(F, Record,
+ Idx));
+ return Context.getDependentTemplateName(NNS,
+ (OverloadedOperatorKind)Record[Idx++]);
+ }
+
+ case TemplateName::SubstTemplateTemplateParm: {
+ TemplateTemplateParmDecl *param
+ = ReadDeclAs<TemplateTemplateParmDecl>(F, Record, Idx);
+ if (!param) return TemplateName();
+ TemplateName replacement = ReadTemplateName(F, Record, Idx);
+ return Context.getSubstTemplateTemplateParm(param, replacement);
+ }
+
+ case TemplateName::SubstTemplateTemplateParmPack: {
+ TemplateTemplateParmDecl *Param
+ = ReadDeclAs<TemplateTemplateParmDecl>(F, Record, Idx);
+ if (!Param)
+ return TemplateName();
+
+ TemplateArgument ArgPack = ReadTemplateArgument(F, Record, Idx);
+ if (ArgPack.getKind() != TemplateArgument::Pack)
+ return TemplateName();
+
+ return Context.getSubstTemplateTemplateParmPack(Param, ArgPack);
+ }
+ }
+
+ llvm_unreachable("Unhandled template name kind!");
+}
+
+TemplateArgument ASTReader::ReadTemplateArgument(ModuleFile &F,
+ const RecordData &Record,
+ unsigned &Idx,
+ bool Canonicalize) {
+ if (Canonicalize) {
+ // The caller wants a canonical template argument. Sometimes the AST only
+ // wants template arguments in canonical form (particularly as the template
+ // argument lists of template specializations) so ensure we preserve that
+ // canonical form across serialization.
+ TemplateArgument Arg = ReadTemplateArgument(F, Record, Idx, false);
+ return Context.getCanonicalTemplateArgument(Arg);
+ }
+
+ TemplateArgument::ArgKind Kind = (TemplateArgument::ArgKind)Record[Idx++];
+ switch (Kind) {
+ case TemplateArgument::Null:
+ return TemplateArgument();
+ case TemplateArgument::Type:
+ return TemplateArgument(readType(F, Record, Idx));
+ case TemplateArgument::Declaration: {
+ ValueDecl *D = ReadDeclAs<ValueDecl>(F, Record, Idx);
+ return TemplateArgument(D, readType(F, Record, Idx));
+ }
+ case TemplateArgument::NullPtr:
+ return TemplateArgument(readType(F, Record, Idx), /*isNullPtr*/true);
+ case TemplateArgument::Integral: {
+ llvm::APSInt Value = ReadAPSInt(Record, Idx);
+ QualType T = readType(F, Record, Idx);
+ return TemplateArgument(Context, Value, T);
+ }
+ case TemplateArgument::Template:
+ return TemplateArgument(ReadTemplateName(F, Record, Idx));
+ case TemplateArgument::TemplateExpansion: {
+ TemplateName Name = ReadTemplateName(F, Record, Idx);
+ Optional<unsigned> NumTemplateExpansions;
+ if (unsigned NumExpansions = Record[Idx++])
+ NumTemplateExpansions = NumExpansions - 1;
+ return TemplateArgument(Name, NumTemplateExpansions);
+ }
+ case TemplateArgument::Expression:
+ return TemplateArgument(ReadExpr(F));
+ case TemplateArgument::Pack: {
+ unsigned NumArgs = Record[Idx++];
+ TemplateArgument *Args = new (Context) TemplateArgument[NumArgs];
+ for (unsigned I = 0; I != NumArgs; ++I)
+ Args[I] = ReadTemplateArgument(F, Record, Idx);
+ return TemplateArgument(llvm::makeArrayRef(Args, NumArgs));
+ }
+ }
+
+ llvm_unreachable("Unhandled template argument kind!");
+}
+
+TemplateParameterList *
+ASTReader::ReadTemplateParameterList(ModuleFile &F,
+ const RecordData &Record, unsigned &Idx) {
+ SourceLocation TemplateLoc = ReadSourceLocation(F, Record, Idx);
+ SourceLocation LAngleLoc = ReadSourceLocation(F, Record, Idx);
+ SourceLocation RAngleLoc = ReadSourceLocation(F, Record, Idx);
+
+ unsigned NumParams = Record[Idx++];
+ SmallVector<NamedDecl *, 16> Params;
+ Params.reserve(NumParams);
+ while (NumParams--)
+ Params.push_back(ReadDeclAs<NamedDecl>(F, Record, Idx));
+
+ TemplateParameterList* TemplateParams =
+ TemplateParameterList::Create(Context, TemplateLoc, LAngleLoc,
+ Params, RAngleLoc);
+ return TemplateParams;
+}
+
+void
+ASTReader::
+ReadTemplateArgumentList(SmallVectorImpl<TemplateArgument> &TemplArgs,
+ ModuleFile &F, const RecordData &Record,
+ unsigned &Idx, bool Canonicalize) {
+ unsigned NumTemplateArgs = Record[Idx++];
+ TemplArgs.reserve(NumTemplateArgs);
+ while (NumTemplateArgs--)
+ TemplArgs.push_back(ReadTemplateArgument(F, Record, Idx, Canonicalize));
+}
+
+/// \brief Read a UnresolvedSet structure.
+void ASTReader::ReadUnresolvedSet(ModuleFile &F, LazyASTUnresolvedSet &Set,
+ const RecordData &Record, unsigned &Idx) {
+ unsigned NumDecls = Record[Idx++];
+ Set.reserve(Context, NumDecls);
+ while (NumDecls--) {
+ DeclID ID = ReadDeclID(F, Record, Idx);
+ AccessSpecifier AS = (AccessSpecifier)Record[Idx++];
+ Set.addLazyDecl(Context, ID, AS);
+ }
+}
+
+CXXBaseSpecifier
+ASTReader::ReadCXXBaseSpecifier(ModuleFile &F,
+ const RecordData &Record, unsigned &Idx) {
+ bool isVirtual = static_cast<bool>(Record[Idx++]);
+ bool isBaseOfClass = static_cast<bool>(Record[Idx++]);
+ AccessSpecifier AS = static_cast<AccessSpecifier>(Record[Idx++]);
+ bool inheritConstructors = static_cast<bool>(Record[Idx++]);
+ TypeSourceInfo *TInfo = GetTypeSourceInfo(F, Record, Idx);
+ SourceRange Range = ReadSourceRange(F, Record, Idx);
+ SourceLocation EllipsisLoc = ReadSourceLocation(F, Record, Idx);
+ CXXBaseSpecifier Result(Range, isVirtual, isBaseOfClass, AS, TInfo,
+ EllipsisLoc);
+ Result.setInheritConstructors(inheritConstructors);
+ return Result;
+}
+
+CXXCtorInitializer **
+ASTReader::ReadCXXCtorInitializers(ModuleFile &F, const RecordData &Record,
+ unsigned &Idx) {
+ unsigned NumInitializers = Record[Idx++];
+ assert(NumInitializers && "wrote ctor initializers but have no inits");
+ auto **CtorInitializers = new (Context) CXXCtorInitializer*[NumInitializers];
+ for (unsigned i = 0; i != NumInitializers; ++i) {
+ TypeSourceInfo *TInfo = nullptr;
+ bool IsBaseVirtual = false;
+ FieldDecl *Member = nullptr;
+ IndirectFieldDecl *IndirectMember = nullptr;
+
+ CtorInitializerType Type = (CtorInitializerType)Record[Idx++];
+ switch (Type) {
+ case CTOR_INITIALIZER_BASE:
+ TInfo = GetTypeSourceInfo(F, Record, Idx);
+ IsBaseVirtual = Record[Idx++];
+ break;
+
+ case CTOR_INITIALIZER_DELEGATING:
+ TInfo = GetTypeSourceInfo(F, Record, Idx);
+ break;
+
+ case CTOR_INITIALIZER_MEMBER:
+ Member = ReadDeclAs<FieldDecl>(F, Record, Idx);
+ break;
+
+ case CTOR_INITIALIZER_INDIRECT_MEMBER:
+ IndirectMember = ReadDeclAs<IndirectFieldDecl>(F, Record, Idx);
+ break;
+ }
+
+ SourceLocation MemberOrEllipsisLoc = ReadSourceLocation(F, Record, Idx);
+ Expr *Init = ReadExpr(F);
+ SourceLocation LParenLoc = ReadSourceLocation(F, Record, Idx);
+ SourceLocation RParenLoc = ReadSourceLocation(F, Record, Idx);
+ bool IsWritten = Record[Idx++];
+ unsigned SourceOrderOrNumArrayIndices;
+ SmallVector<VarDecl *, 8> Indices;
+ if (IsWritten) {
+ SourceOrderOrNumArrayIndices = Record[Idx++];
+ } else {
+ SourceOrderOrNumArrayIndices = Record[Idx++];
+ Indices.reserve(SourceOrderOrNumArrayIndices);
+ for (unsigned i=0; i != SourceOrderOrNumArrayIndices; ++i)
+ Indices.push_back(ReadDeclAs<VarDecl>(F, Record, Idx));
+ }
+
+ CXXCtorInitializer *BOMInit;
+ if (Type == CTOR_INITIALIZER_BASE) {
+ BOMInit = new (Context)
+ CXXCtorInitializer(Context, TInfo, IsBaseVirtual, LParenLoc, Init,
+ RParenLoc, MemberOrEllipsisLoc);
+ } else if (Type == CTOR_INITIALIZER_DELEGATING) {
+ BOMInit = new (Context)
+ CXXCtorInitializer(Context, TInfo, LParenLoc, Init, RParenLoc);
+ } else if (IsWritten) {
+ if (Member)
+ BOMInit = new (Context) CXXCtorInitializer(
+ Context, Member, MemberOrEllipsisLoc, LParenLoc, Init, RParenLoc);
+ else
+ BOMInit = new (Context)
+ CXXCtorInitializer(Context, IndirectMember, MemberOrEllipsisLoc,
+ LParenLoc, Init, RParenLoc);
+ } else {
+ if (IndirectMember) {
+ assert(Indices.empty() && "Indirect field improperly initialized");
+ BOMInit = new (Context)
+ CXXCtorInitializer(Context, IndirectMember, MemberOrEllipsisLoc,
+ LParenLoc, Init, RParenLoc);
+ } else {
+ BOMInit = CXXCtorInitializer::Create(
+ Context, Member, MemberOrEllipsisLoc, LParenLoc, Init, RParenLoc,
+ Indices.data(), Indices.size());
+ }
+ }
+
+ if (IsWritten)
+ BOMInit->setSourceOrder(SourceOrderOrNumArrayIndices);
+ CtorInitializers[i] = BOMInit;
+ }
+
+ return CtorInitializers;
+}
+
+NestedNameSpecifier *
+ASTReader::ReadNestedNameSpecifier(ModuleFile &F,
+ const RecordData &Record, unsigned &Idx) {
+ unsigned N = Record[Idx++];
+ NestedNameSpecifier *NNS = nullptr, *Prev = nullptr;
+ for (unsigned I = 0; I != N; ++I) {
+ NestedNameSpecifier::SpecifierKind Kind
+ = (NestedNameSpecifier::SpecifierKind)Record[Idx++];
+ switch (Kind) {
+ case NestedNameSpecifier::Identifier: {
+ IdentifierInfo *II = GetIdentifierInfo(F, Record, Idx);
+ NNS = NestedNameSpecifier::Create(Context, Prev, II);
+ break;
+ }
+
+ case NestedNameSpecifier::Namespace: {
+ NamespaceDecl *NS = ReadDeclAs<NamespaceDecl>(F, Record, Idx);
+ NNS = NestedNameSpecifier::Create(Context, Prev, NS);
+ break;
+ }
+
+ case NestedNameSpecifier::NamespaceAlias: {
+ NamespaceAliasDecl *Alias =ReadDeclAs<NamespaceAliasDecl>(F, Record, Idx);
+ NNS = NestedNameSpecifier::Create(Context, Prev, Alias);
+ break;
+ }
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate: {
+ const Type *T = readType(F, Record, Idx).getTypePtrOrNull();
+ if (!T)
+ return nullptr;
+
+ bool Template = Record[Idx++];
+ NNS = NestedNameSpecifier::Create(Context, Prev, Template, T);
+ break;
+ }
+
+ case NestedNameSpecifier::Global: {
+ NNS = NestedNameSpecifier::GlobalSpecifier(Context);
+ // No associated value, and there can't be a prefix.
+ break;
+ }
+
+ case NestedNameSpecifier::Super: {
+ CXXRecordDecl *RD = ReadDeclAs<CXXRecordDecl>(F, Record, Idx);
+ NNS = NestedNameSpecifier::SuperSpecifier(Context, RD);
+ break;
+ }
+ }
+ Prev = NNS;
+ }
+ return NNS;
+}
+
+NestedNameSpecifierLoc
+ASTReader::ReadNestedNameSpecifierLoc(ModuleFile &F, const RecordData &Record,
+ unsigned &Idx) {
+ unsigned N = Record[Idx++];
+ NestedNameSpecifierLocBuilder Builder;
+ for (unsigned I = 0; I != N; ++I) {
+ NestedNameSpecifier::SpecifierKind Kind
+ = (NestedNameSpecifier::SpecifierKind)Record[Idx++];
+ switch (Kind) {
+ case NestedNameSpecifier::Identifier: {
+ IdentifierInfo *II = GetIdentifierInfo(F, Record, Idx);
+ SourceRange Range = ReadSourceRange(F, Record, Idx);
+ Builder.Extend(Context, II, Range.getBegin(), Range.getEnd());
+ break;
+ }
+
+ case NestedNameSpecifier::Namespace: {
+ NamespaceDecl *NS = ReadDeclAs<NamespaceDecl>(F, Record, Idx);
+ SourceRange Range = ReadSourceRange(F, Record, Idx);
+ Builder.Extend(Context, NS, Range.getBegin(), Range.getEnd());
+ break;
+ }
+
+ case NestedNameSpecifier::NamespaceAlias: {
+ NamespaceAliasDecl *Alias =ReadDeclAs<NamespaceAliasDecl>(F, Record, Idx);
+ SourceRange Range = ReadSourceRange(F, Record, Idx);
+ Builder.Extend(Context, Alias, Range.getBegin(), Range.getEnd());
+ break;
+ }
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate: {
+ bool Template = Record[Idx++];
+ TypeSourceInfo *T = GetTypeSourceInfo(F, Record, Idx);
+ if (!T)
+ return NestedNameSpecifierLoc();
+ SourceLocation ColonColonLoc = ReadSourceLocation(F, Record, Idx);
+
+ // FIXME: 'template' keyword location not saved anywhere, so we fake it.
+ Builder.Extend(Context,
+ Template? T->getTypeLoc().getBeginLoc() : SourceLocation(),
+ T->getTypeLoc(), ColonColonLoc);
+ break;
+ }
+
+ case NestedNameSpecifier::Global: {
+ SourceLocation ColonColonLoc = ReadSourceLocation(F, Record, Idx);
+ Builder.MakeGlobal(Context, ColonColonLoc);
+ break;
+ }
+
+ case NestedNameSpecifier::Super: {
+ CXXRecordDecl *RD = ReadDeclAs<CXXRecordDecl>(F, Record, Idx);
+ SourceRange Range = ReadSourceRange(F, Record, Idx);
+ Builder.MakeSuper(Context, RD, Range.getBegin(), Range.getEnd());
+ break;
+ }
+ }
+ }
+
+ return Builder.getWithLocInContext(Context);
+}
+
+SourceRange
+ASTReader::ReadSourceRange(ModuleFile &F, const RecordData &Record,
+ unsigned &Idx) {
+ SourceLocation beg = ReadSourceLocation(F, Record, Idx);
+ SourceLocation end = ReadSourceLocation(F, Record, Idx);
+ return SourceRange(beg, end);
+}
+
+/// \brief Read an integral value
+llvm::APInt ASTReader::ReadAPInt(const RecordData &Record, unsigned &Idx) {
+ unsigned BitWidth = Record[Idx++];
+ unsigned NumWords = llvm::APInt::getNumWords(BitWidth);
+ llvm::APInt Result(BitWidth, NumWords, &Record[Idx]);
+ Idx += NumWords;
+ return Result;
+}
+
+/// \brief Read a signed integral value
+llvm::APSInt ASTReader::ReadAPSInt(const RecordData &Record, unsigned &Idx) {
+ bool isUnsigned = Record[Idx++];
+ return llvm::APSInt(ReadAPInt(Record, Idx), isUnsigned);
+}
+
+/// \brief Read a floating-point value
+llvm::APFloat ASTReader::ReadAPFloat(const RecordData &Record,
+ const llvm::fltSemantics &Sem,
+ unsigned &Idx) {
+ return llvm::APFloat(Sem, ReadAPInt(Record, Idx));
+}
+
+// \brief Read a string
+std::string ASTReader::ReadString(const RecordData &Record, unsigned &Idx) {
+ unsigned Len = Record[Idx++];
+ std::string Result(Record.data() + Idx, Record.data() + Idx + Len);
+ Idx += Len;
+ return Result;
+}
+
+std::string ASTReader::ReadPath(ModuleFile &F, const RecordData &Record,
+ unsigned &Idx) {
+ std::string Filename = ReadString(Record, Idx);
+ ResolveImportedPath(F, Filename);
+ return Filename;
+}
+
+VersionTuple ASTReader::ReadVersionTuple(const RecordData &Record,
+ unsigned &Idx) {
+ unsigned Major = Record[Idx++];
+ unsigned Minor = Record[Idx++];
+ unsigned Subminor = Record[Idx++];
+ if (Minor == 0)
+ return VersionTuple(Major);
+ if (Subminor == 0)
+ return VersionTuple(Major, Minor - 1);
+ return VersionTuple(Major, Minor - 1, Subminor - 1);
+}
+
+CXXTemporary *ASTReader::ReadCXXTemporary(ModuleFile &F,
+ const RecordData &Record,
+ unsigned &Idx) {
+ CXXDestructorDecl *Decl = ReadDeclAs<CXXDestructorDecl>(F, Record, Idx);
+ return CXXTemporary::Create(Context, Decl);
+}
+
+DiagnosticBuilder ASTReader::Diag(unsigned DiagID) {
+ return Diag(CurrentImportLoc, DiagID);
+}
+
+DiagnosticBuilder ASTReader::Diag(SourceLocation Loc, unsigned DiagID) {
+ return Diags.Report(Loc, DiagID);
+}
+
+/// \brief Retrieve the identifier table associated with the
+/// preprocessor.
+IdentifierTable &ASTReader::getIdentifierTable() {
+ return PP.getIdentifierTable();
+}
+
+/// \brief Record that the given ID maps to the given switch-case
+/// statement.
+void ASTReader::RecordSwitchCaseID(SwitchCase *SC, unsigned ID) {
+ assert((*CurrSwitchCaseStmts)[ID] == nullptr &&
+ "Already have a SwitchCase with this ID");
+ (*CurrSwitchCaseStmts)[ID] = SC;
+}
+
+/// \brief Retrieve the switch-case statement with the given ID.
+SwitchCase *ASTReader::getSwitchCaseWithID(unsigned ID) {
+ assert((*CurrSwitchCaseStmts)[ID] != nullptr && "No SwitchCase with this ID");
+ return (*CurrSwitchCaseStmts)[ID];
+}
+
+void ASTReader::ClearSwitchCaseIDs() {
+ CurrSwitchCaseStmts->clear();
+}
+
+void ASTReader::ReadComments() {
+ std::vector<RawComment *> Comments;
+ for (SmallVectorImpl<std::pair<BitstreamCursor,
+ serialization::ModuleFile *> >::iterator
+ I = CommentsCursors.begin(),
+ E = CommentsCursors.end();
+ I != E; ++I) {
+ Comments.clear();
+ BitstreamCursor &Cursor = I->first;
+ serialization::ModuleFile &F = *I->second;
+ SavedStreamPosition SavedPosition(Cursor);
+
+ RecordData Record;
+ while (true) {
+ llvm::BitstreamEntry Entry =
+ Cursor.advanceSkippingSubblocks(BitstreamCursor::AF_DontPopBlockAtEnd);
+
+ switch (Entry.Kind) {
+ case llvm::BitstreamEntry::SubBlock: // Handled for us already.
+ case llvm::BitstreamEntry::Error:
+ Error("malformed block record in AST file");
+ return;
+ case llvm::BitstreamEntry::EndBlock:
+ goto NextCursor;
+ case llvm::BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ // Read a record.
+ Record.clear();
+ switch ((CommentRecordTypes)Cursor.readRecord(Entry.ID, Record)) {
+ case COMMENTS_RAW_COMMENT: {
+ unsigned Idx = 0;
+ SourceRange SR = ReadSourceRange(F, Record, Idx);
+ RawComment::CommentKind Kind =
+ (RawComment::CommentKind) Record[Idx++];
+ bool IsTrailingComment = Record[Idx++];
+ bool IsAlmostTrailingComment = Record[Idx++];
+ Comments.push_back(new (Context) RawComment(
+ SR, Kind, IsTrailingComment, IsAlmostTrailingComment,
+ Context.getLangOpts().CommentOpts.ParseAllComments));
+ break;
+ }
+ }
+ }
+ NextCursor:
+ Context.Comments.addDeserializedComments(Comments);
+ }
+}
+
+std::string ASTReader::getOwningModuleNameForDiagnostic(const Decl *D) {
+ // If we know the owning module, use it.
+ if (Module *M = D->getImportedOwningModule())
+ return M->getFullModuleName();
+
+ // Otherwise, use the name of the top-level module the decl is within.
+ if (ModuleFile *M = getOwningModuleFile(D))
+ return M->ModuleName;
+
+ // Not from a module.
+ return "";
+}
+
+void ASTReader::finishPendingActions() {
+ while (!PendingIdentifierInfos.empty() ||
+ !PendingIncompleteDeclChains.empty() || !PendingDeclChains.empty() ||
+ !PendingMacroIDs.empty() || !PendingDeclContextInfos.empty() ||
+ !PendingUpdateRecords.empty()) {
+ // If any identifiers with corresponding top-level declarations have
+ // been loaded, load those declarations now.
+ typedef llvm::DenseMap<IdentifierInfo *, SmallVector<Decl *, 2> >
+ TopLevelDeclsMap;
+ TopLevelDeclsMap TopLevelDecls;
+
+ while (!PendingIdentifierInfos.empty()) {
+ IdentifierInfo *II = PendingIdentifierInfos.back().first;
+ SmallVector<uint32_t, 4> DeclIDs =
+ std::move(PendingIdentifierInfos.back().second);
+ PendingIdentifierInfos.pop_back();
+
+ SetGloballyVisibleDecls(II, DeclIDs, &TopLevelDecls[II]);
+ }
+
+ // For each decl chain that we wanted to complete while deserializing, mark
+ // it as "still needs to be completed".
+ for (unsigned I = 0; I != PendingIncompleteDeclChains.size(); ++I) {
+ markIncompleteDeclChain(PendingIncompleteDeclChains[I]);
+ }
+ PendingIncompleteDeclChains.clear();
+
+ // Load pending declaration chains.
+ for (unsigned I = 0; I != PendingDeclChains.size(); ++I)
+ loadPendingDeclChain(PendingDeclChains[I].first, PendingDeclChains[I].second);
+ PendingDeclChains.clear();
+
+ // Make the most recent of the top-level declarations visible.
+ for (TopLevelDeclsMap::iterator TLD = TopLevelDecls.begin(),
+ TLDEnd = TopLevelDecls.end(); TLD != TLDEnd; ++TLD) {
+ IdentifierInfo *II = TLD->first;
+ for (unsigned I = 0, N = TLD->second.size(); I != N; ++I) {
+ pushExternalDeclIntoScope(cast<NamedDecl>(TLD->second[I]), II);
+ }
+ }
+
+ // Load any pending macro definitions.
+ for (unsigned I = 0; I != PendingMacroIDs.size(); ++I) {
+ IdentifierInfo *II = PendingMacroIDs.begin()[I].first;
+ SmallVector<PendingMacroInfo, 2> GlobalIDs;
+ GlobalIDs.swap(PendingMacroIDs.begin()[I].second);
+ // Initialize the macro history from chained-PCHs ahead of module imports.
+ for (unsigned IDIdx = 0, NumIDs = GlobalIDs.size(); IDIdx != NumIDs;
+ ++IDIdx) {
+ const PendingMacroInfo &Info = GlobalIDs[IDIdx];
+ if (Info.M->Kind != MK_ImplicitModule &&
+ Info.M->Kind != MK_ExplicitModule)
+ resolvePendingMacro(II, Info);
+ }
+ // Handle module imports.
+ for (unsigned IDIdx = 0, NumIDs = GlobalIDs.size(); IDIdx != NumIDs;
+ ++IDIdx) {
+ const PendingMacroInfo &Info = GlobalIDs[IDIdx];
+ if (Info.M->Kind == MK_ImplicitModule ||
+ Info.M->Kind == MK_ExplicitModule)
+ resolvePendingMacro(II, Info);
+ }
+ }
+ PendingMacroIDs.clear();
+
+ // Wire up the DeclContexts for Decls that we delayed setting until
+ // recursive loading is completed.
+ while (!PendingDeclContextInfos.empty()) {
+ PendingDeclContextInfo Info = PendingDeclContextInfos.front();
+ PendingDeclContextInfos.pop_front();
+ DeclContext *SemaDC = cast<DeclContext>(GetDecl(Info.SemaDC));
+ DeclContext *LexicalDC = cast<DeclContext>(GetDecl(Info.LexicalDC));
+ Info.D->setDeclContextsImpl(SemaDC, LexicalDC, getContext());
+ }
+
+ // Perform any pending declaration updates.
+ while (!PendingUpdateRecords.empty()) {
+ auto Update = PendingUpdateRecords.pop_back_val();
+ ReadingKindTracker ReadingKind(Read_Decl, *this);
+ loadDeclUpdateRecords(Update.first, Update.second);
+ }
+ }
+
+ // At this point, all update records for loaded decls are in place, so any
+ // fake class definitions should have become real.
+ assert(PendingFakeDefinitionData.empty() &&
+ "faked up a class definition but never saw the real one");
+
+ // If we deserialized any C++ or Objective-C class definitions, any
+ // Objective-C protocol definitions, or any redeclarable templates, make sure
+ // that all redeclarations point to the definitions. Note that this can only
+ // happen now, after the redeclaration chains have been fully wired.
+ for (Decl *D : PendingDefinitions) {
+ if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
+ if (const TagType *TagT = dyn_cast<TagType>(TD->getTypeForDecl())) {
+ // Make sure that the TagType points at the definition.
+ const_cast<TagType*>(TagT)->decl = TD;
+ }
+
+ if (auto RD = dyn_cast<CXXRecordDecl>(D)) {
+ for (auto *R = getMostRecentExistingDecl(RD); R;
+ R = R->getPreviousDecl()) {
+ assert((R == D) ==
+ cast<CXXRecordDecl>(R)->isThisDeclarationADefinition() &&
+ "declaration thinks it's the definition but it isn't");
+ cast<CXXRecordDecl>(R)->DefinitionData = RD->DefinitionData;
+ }
+ }
+
+ continue;
+ }
+
+ if (auto ID = dyn_cast<ObjCInterfaceDecl>(D)) {
+ // Make sure that the ObjCInterfaceType points at the definition.
+ const_cast<ObjCInterfaceType *>(cast<ObjCInterfaceType>(ID->TypeForDecl))
+ ->Decl = ID;
+
+ for (auto *R = getMostRecentExistingDecl(ID); R; R = R->getPreviousDecl())
+ cast<ObjCInterfaceDecl>(R)->Data = ID->Data;
+
+ continue;
+ }
+
+ if (auto PD = dyn_cast<ObjCProtocolDecl>(D)) {
+ for (auto *R = getMostRecentExistingDecl(PD); R; R = R->getPreviousDecl())
+ cast<ObjCProtocolDecl>(R)->Data = PD->Data;
+
+ continue;
+ }
+
+ auto RTD = cast<RedeclarableTemplateDecl>(D)->getCanonicalDecl();
+ for (auto *R = getMostRecentExistingDecl(RTD); R; R = R->getPreviousDecl())
+ cast<RedeclarableTemplateDecl>(R)->Common = RTD->Common;
+ }
+ PendingDefinitions.clear();
+
+ // Load the bodies of any functions or methods we've encountered. We do
+ // this now (delayed) so that we can be sure that the declaration chains
+ // have been fully wired up (hasBody relies on this).
+ // FIXME: We shouldn't require complete redeclaration chains here.
+ for (PendingBodiesMap::iterator PB = PendingBodies.begin(),
+ PBEnd = PendingBodies.end();
+ PB != PBEnd; ++PB) {
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(PB->first)) {
+ // FIXME: Check for =delete/=default?
+ // FIXME: Complain about ODR violations here?
+ if (!getContext().getLangOpts().Modules || !FD->hasBody())
+ FD->setLazyBody(PB->second);
+ continue;
+ }
+
+ ObjCMethodDecl *MD = cast<ObjCMethodDecl>(PB->first);
+ if (!getContext().getLangOpts().Modules || !MD->hasBody())
+ MD->setLazyBody(PB->second);
+ }
+ PendingBodies.clear();
+
+ // Do some cleanup.
+ for (auto *ND : PendingMergedDefinitionsToDeduplicate)
+ getContext().deduplicateMergedDefinitonsFor(ND);
+ PendingMergedDefinitionsToDeduplicate.clear();
+}
+
+void ASTReader::diagnoseOdrViolations() {
+ if (PendingOdrMergeFailures.empty() && PendingOdrMergeChecks.empty())
+ return;
+
+ // Trigger the import of the full definition of each class that had any
+ // odr-merging problems, so we can produce better diagnostics for them.
+ // These updates may in turn find and diagnose some ODR failures, so take
+ // ownership of the set first.
+ auto OdrMergeFailures = std::move(PendingOdrMergeFailures);
+ PendingOdrMergeFailures.clear();
+ for (auto &Merge : OdrMergeFailures) {
+ Merge.first->buildLookup();
+ Merge.first->decls_begin();
+ Merge.first->bases_begin();
+ Merge.first->vbases_begin();
+ for (auto *RD : Merge.second) {
+ RD->decls_begin();
+ RD->bases_begin();
+ RD->vbases_begin();
+ }
+ }
+
+ // For each declaration from a merged context, check that the canonical
+ // definition of that context also contains a declaration of the same
+ // entity.
+ //
+ // Caution: this loop does things that might invalidate iterators into
+ // PendingOdrMergeChecks. Don't turn this into a range-based for loop!
+ while (!PendingOdrMergeChecks.empty()) {
+ NamedDecl *D = PendingOdrMergeChecks.pop_back_val();
+
+ // FIXME: Skip over implicit declarations for now. This matters for things
+ // like implicitly-declared special member functions. This isn't entirely
+ // correct; we can end up with multiple unmerged declarations of the same
+ // implicit entity.
+ if (D->isImplicit())
+ continue;
+
+ DeclContext *CanonDef = D->getDeclContext();
+
+ bool Found = false;
+ const Decl *DCanon = D->getCanonicalDecl();
+
+ for (auto RI : D->redecls()) {
+ if (RI->getLexicalDeclContext() == CanonDef) {
+ Found = true;
+ break;
+ }
+ }
+ if (Found)
+ continue;
+
+ // Quick check failed, time to do the slow thing. Note, we can't just
+ // look up the name of D in CanonDef here, because the member that is
+ // in CanonDef might not be found by name lookup (it might have been
+ // replaced by a more recent declaration in the lookup table), and we
+ // can't necessarily find it in the redeclaration chain because it might
+ // be merely mergeable, not redeclarable.
+ llvm::SmallVector<const NamedDecl*, 4> Candidates;
+ for (auto *CanonMember : CanonDef->decls()) {
+ if (CanonMember->getCanonicalDecl() == DCanon) {
+ // This can happen if the declaration is merely mergeable and not
+ // actually redeclarable (we looked for redeclarations earlier).
+ //
+ // FIXME: We should be able to detect this more efficiently, without
+ // pulling in all of the members of CanonDef.
+ Found = true;
+ break;
+ }
+ if (auto *ND = dyn_cast<NamedDecl>(CanonMember))
+ if (ND->getDeclName() == D->getDeclName())
+ Candidates.push_back(ND);
+ }
+
+ if (!Found) {
+ // The AST doesn't like TagDecls becoming invalid after they've been
+ // completed. We only really need to mark FieldDecls as invalid here.
+ if (!isa<TagDecl>(D))
+ D->setInvalidDecl();
+
+ // Ensure we don't accidentally recursively enter deserialization while
+ // we're producing our diagnostic.
+ Deserializing RecursionGuard(this);
+
+ std::string CanonDefModule =
+ getOwningModuleNameForDiagnostic(cast<Decl>(CanonDef));
+ Diag(D->getLocation(), diag::err_module_odr_violation_missing_decl)
+ << D << getOwningModuleNameForDiagnostic(D)
+ << CanonDef << CanonDefModule.empty() << CanonDefModule;
+
+ if (Candidates.empty())
+ Diag(cast<Decl>(CanonDef)->getLocation(),
+ diag::note_module_odr_violation_no_possible_decls) << D;
+ else {
+ for (unsigned I = 0, N = Candidates.size(); I != N; ++I)
+ Diag(Candidates[I]->getLocation(),
+ diag::note_module_odr_violation_possible_decl)
+ << Candidates[I];
+ }
+
+ DiagnosedOdrMergeFailures.insert(CanonDef);
+ }
+ }
+
+ if (OdrMergeFailures.empty())
+ return;
+
+ // Ensure we don't accidentally recursively enter deserialization while
+ // we're producing our diagnostics.
+ Deserializing RecursionGuard(this);
+
+ // Issue any pending ODR-failure diagnostics.
+ for (auto &Merge : OdrMergeFailures) {
+ // If we've already pointed out a specific problem with this class, don't
+ // bother issuing a general "something's different" diagnostic.
+ if (!DiagnosedOdrMergeFailures.insert(Merge.first).second)
+ continue;
+
+ bool Diagnosed = false;
+ for (auto *RD : Merge.second) {
+ // Multiple different declarations got merged together; tell the user
+ // where they came from.
+ if (Merge.first != RD) {
+ // FIXME: Walk the definition, figure out what's different,
+ // and diagnose that.
+ if (!Diagnosed) {
+ std::string Module = getOwningModuleNameForDiagnostic(Merge.first);
+ Diag(Merge.first->getLocation(),
+ diag::err_module_odr_violation_different_definitions)
+ << Merge.first << Module.empty() << Module;
+ Diagnosed = true;
+ }
+
+ Diag(RD->getLocation(),
+ diag::note_module_odr_violation_different_definitions)
+ << getOwningModuleNameForDiagnostic(RD);
+ }
+ }
+
+ if (!Diagnosed) {
+ // All definitions are updates to the same declaration. This happens if a
+ // module instantiates the declaration of a class template specialization
+ // and two or more other modules instantiate its definition.
+ //
+ // FIXME: Indicate which modules had instantiations of this definition.
+ // FIXME: How can this even happen?
+ Diag(Merge.first->getLocation(),
+ diag::err_module_odr_violation_different_instantiations)
+ << Merge.first;
+ }
+ }
+}
+
+void ASTReader::StartedDeserializing() {
+ if (++NumCurrentElementsDeserializing == 1 && ReadTimer.get())
+ ReadTimer->startTimer();
+}
+
+void ASTReader::FinishedDeserializing() {
+ assert(NumCurrentElementsDeserializing &&
+ "FinishedDeserializing not paired with StartedDeserializing");
+ if (NumCurrentElementsDeserializing == 1) {
+ // We decrease NumCurrentElementsDeserializing only after pending actions
+ // are finished, to avoid recursively re-calling finishPendingActions().
+ finishPendingActions();
+ }
+ --NumCurrentElementsDeserializing;
+
+ if (NumCurrentElementsDeserializing == 0) {
+ // Propagate exception specification updates along redeclaration chains.
+ while (!PendingExceptionSpecUpdates.empty()) {
+ auto Updates = std::move(PendingExceptionSpecUpdates);
+ PendingExceptionSpecUpdates.clear();
+ for (auto Update : Updates) {
+ auto *FPT = Update.second->getType()->castAs<FunctionProtoType>();
+ auto ESI = FPT->getExtProtoInfo().ExceptionSpec;
+ if (auto *Listener = Context.getASTMutationListener())
+ Listener->ResolvedExceptionSpec(cast<FunctionDecl>(Update.second));
+ for (auto *Redecl : Update.second->redecls())
+ Context.adjustExceptionSpec(cast<FunctionDecl>(Redecl), ESI);
+ }
+ }
+
+ if (ReadTimer)
+ ReadTimer->stopTimer();
+
+ diagnoseOdrViolations();
+
+ // We are not in recursive loading, so it's safe to pass the "interesting"
+ // decls to the consumer.
+ if (Consumer)
+ PassInterestingDeclsToConsumer();
+ }
+}
+
+void ASTReader::pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name) {
+ if (IdentifierInfo *II = Name.getAsIdentifierInfo()) {
+ // Remove any fake results before adding any real ones.
+ auto It = PendingFakeLookupResults.find(II);
+ if (It != PendingFakeLookupResults.end()) {
+ for (auto *ND : It->second)
+ SemaObj->IdResolver.RemoveDecl(ND);
+ // FIXME: this works around module+PCH performance issue.
+ // Rather than erase the result from the map, which is O(n), just clear
+ // the vector of NamedDecls.
+ It->second.clear();
+ }
+ }
+
+ if (SemaObj->IdResolver.tryAddTopLevelDecl(D, Name) && SemaObj->TUScope) {
+ SemaObj->TUScope->AddDecl(D);
+ } else if (SemaObj->TUScope) {
+ // Adding the decl to IdResolver may have failed because it was already in
+ // (even though it was not added in scope). If it is already in, make sure
+ // it gets in the scope as well.
+ if (std::find(SemaObj->IdResolver.begin(Name),
+ SemaObj->IdResolver.end(), D) != SemaObj->IdResolver.end())
+ SemaObj->TUScope->AddDecl(D);
+ }
+}
+
+ASTReader::ASTReader(
+ Preprocessor &PP, ASTContext &Context,
+ const PCHContainerReader &PCHContainerRdr,
+ ArrayRef<IntrusiveRefCntPtr<ModuleFileExtension>> Extensions,
+ StringRef isysroot, bool DisableValidation,
+ bool AllowASTWithCompilerErrors,
+ bool AllowConfigurationMismatch, bool ValidateSystemInputs,
+ bool UseGlobalIndex,
+ std::unique_ptr<llvm::Timer> ReadTimer)
+ : Listener(new PCHValidator(PP, *this)), DeserializationListener(nullptr),
+ OwnsDeserializationListener(false), SourceMgr(PP.getSourceManager()),
+ FileMgr(PP.getFileManager()), PCHContainerRdr(PCHContainerRdr),
+ Diags(PP.getDiagnostics()), SemaObj(nullptr), PP(PP), Context(Context),
+ Consumer(nullptr), ModuleMgr(PP.getFileManager(), PCHContainerRdr),
+ ReadTimer(std::move(ReadTimer)),
+ isysroot(isysroot), DisableValidation(DisableValidation),
+ AllowASTWithCompilerErrors(AllowASTWithCompilerErrors),
+ AllowConfigurationMismatch(AllowConfigurationMismatch),
+ ValidateSystemInputs(ValidateSystemInputs),
+ UseGlobalIndex(UseGlobalIndex), TriedLoadingGlobalIndex(false),
+ CurrSwitchCaseStmts(&SwitchCaseStmts), NumSLocEntriesRead(0),
+ TotalNumSLocEntries(0), NumStatementsRead(0), TotalNumStatements(0),
+ NumMacrosRead(0), TotalNumMacros(0), NumIdentifierLookups(0),
+ NumIdentifierLookupHits(0), NumSelectorsRead(0),
+ NumMethodPoolEntriesRead(0), NumMethodPoolLookups(0),
+ NumMethodPoolHits(0), NumMethodPoolTableLookups(0),
+ NumMethodPoolTableHits(0), TotalNumMethodPoolEntries(0),
+ NumLexicalDeclContextsRead(0), TotalLexicalDeclContexts(0),
+ NumVisibleDeclContextsRead(0), TotalVisibleDeclContexts(0),
+ TotalModulesSizeInBits(0), NumCurrentElementsDeserializing(0),
+ PassingDeclsToConsumer(false), ReadingKind(Read_None) {
+ SourceMgr.setExternalSLocEntrySource(this);
+
+ for (const auto &Ext : Extensions) {
+ auto BlockName = Ext->getExtensionMetadata().BlockName;
+ auto Known = ModuleFileExtensions.find(BlockName);
+ if (Known != ModuleFileExtensions.end()) {
+ Diags.Report(diag::warn_duplicate_module_file_extension)
+ << BlockName;
+ continue;
+ }
+
+ ModuleFileExtensions.insert({BlockName, Ext});
+ }
+}
+
+ASTReader::~ASTReader() {
+ if (OwnsDeserializationListener)
+ delete DeserializationListener;
+}
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp
new file mode 100644
index 0000000..8fb110e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp
@@ -0,0 +1,3816 @@
+//===--- ASTReaderDecl.cpp - Decl Deserialization ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ASTReader::ReadDeclRecord method, which is the
+// entrypoint for loading a decl.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Serialization/ASTReader.h"
+#include "ASTCommon.h"
+#include "ASTReaderInternals.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclGroup.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/Expr.h"
+#include "clang/Sema/IdentifierResolver.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "llvm/Support/SaveAndRestore.h"
+
+using namespace clang;
+using namespace clang::serialization;
+
+//===----------------------------------------------------------------------===//
+// Declaration deserialization
+//===----------------------------------------------------------------------===//
+
+namespace clang {
+ class ASTDeclReader : public DeclVisitor<ASTDeclReader, void> {
+ ASTReader &Reader;
+ ModuleFile &F;
+ const DeclID ThisDeclID;
+ const unsigned RawLocation;
+ typedef ASTReader::RecordData RecordData;
+ const RecordData &Record;
+ unsigned &Idx;
+ TypeID TypeIDForTypeDecl;
+ unsigned AnonymousDeclNumber;
+ GlobalDeclID NamedDeclForTagDecl;
+ IdentifierInfo *TypedefNameForLinkage;
+
+ bool HasPendingBody;
+
+ uint64_t GetCurrentCursorOffset();
+
+ SourceLocation ReadSourceLocation(const RecordData &R, unsigned &I) {
+ return Reader.ReadSourceLocation(F, R, I);
+ }
+
+ SourceRange ReadSourceRange(const RecordData &R, unsigned &I) {
+ return Reader.ReadSourceRange(F, R, I);
+ }
+
+ TypeSourceInfo *GetTypeSourceInfo(const RecordData &R, unsigned &I) {
+ return Reader.GetTypeSourceInfo(F, R, I);
+ }
+
+ serialization::DeclID ReadDeclID(const RecordData &R, unsigned &I) {
+ return Reader.ReadDeclID(F, R, I);
+ }
+
+ void ReadDeclIDList(SmallVectorImpl<DeclID> &IDs) {
+ for (unsigned I = 0, Size = Record[Idx++]; I != Size; ++I)
+ IDs.push_back(ReadDeclID(Record, Idx));
+ }
+
+ Decl *ReadDecl(const RecordData &R, unsigned &I) {
+ return Reader.ReadDecl(F, R, I);
+ }
+
+ template<typename T>
+ T *ReadDeclAs(const RecordData &R, unsigned &I) {
+ return Reader.ReadDeclAs<T>(F, R, I);
+ }
+
+ void ReadQualifierInfo(QualifierInfo &Info,
+ const RecordData &R, unsigned &I) {
+ Reader.ReadQualifierInfo(F, Info, R, I);
+ }
+
+ void ReadDeclarationNameLoc(DeclarationNameLoc &DNLoc, DeclarationName Name,
+ const RecordData &R, unsigned &I) {
+ Reader.ReadDeclarationNameLoc(F, DNLoc, Name, R, I);
+ }
+
+ void ReadDeclarationNameInfo(DeclarationNameInfo &NameInfo,
+ const RecordData &R, unsigned &I) {
+ Reader.ReadDeclarationNameInfo(F, NameInfo, R, I);
+ }
+
+ serialization::SubmoduleID readSubmoduleID(const RecordData &R,
+ unsigned &I) {
+ if (I >= R.size())
+ return 0;
+
+ return Reader.getGlobalSubmoduleID(F, R[I++]);
+ }
+
+ Module *readModule(const RecordData &R, unsigned &I) {
+ return Reader.getSubmodule(readSubmoduleID(R, I));
+ }
+
+ void ReadCXXRecordDefinition(CXXRecordDecl *D, bool Update);
+ void ReadCXXDefinitionData(struct CXXRecordDecl::DefinitionData &Data,
+ const RecordData &R, unsigned &I);
+ void MergeDefinitionData(CXXRecordDecl *D,
+ struct CXXRecordDecl::DefinitionData &&NewDD);
+
+ static NamedDecl *getAnonymousDeclForMerging(ASTReader &Reader,
+ DeclContext *DC,
+ unsigned Index);
+ static void setAnonymousDeclForMerging(ASTReader &Reader, DeclContext *DC,
+ unsigned Index, NamedDecl *D);
+
+ /// Results from loading a RedeclarableDecl.
+ class RedeclarableResult {
+ GlobalDeclID FirstID;
+ Decl *MergeWith;
+ bool IsKeyDecl;
+
+ public:
+ RedeclarableResult(GlobalDeclID FirstID, Decl *MergeWith, bool IsKeyDecl)
+ : FirstID(FirstID), MergeWith(MergeWith), IsKeyDecl(IsKeyDecl) {}
+
+ /// \brief Retrieve the first ID.
+ GlobalDeclID getFirstID() const { return FirstID; }
+
+ /// \brief Is this declaration a key declaration?
+ bool isKeyDecl() const { return IsKeyDecl; }
+
+ /// \brief Get a known declaration that this should be merged with, if
+ /// any.
+ Decl *getKnownMergeTarget() const { return MergeWith; }
+ };
+
+ /// \brief Class used to capture the result of searching for an existing
+ /// declaration of a specific kind and name, along with the ability
+ /// to update the place where this result was found (the declaration
+ /// chain hanging off an identifier or the DeclContext we searched in)
+ /// if requested.
+ class FindExistingResult {
+ ASTReader &Reader;
+ NamedDecl *New;
+ NamedDecl *Existing;
+ mutable bool AddResult;
+
+ unsigned AnonymousDeclNumber;
+ IdentifierInfo *TypedefNameForLinkage;
+
+ void operator=(FindExistingResult&) = delete;
+
+ public:
+ FindExistingResult(ASTReader &Reader)
+ : Reader(Reader), New(nullptr), Existing(nullptr), AddResult(false),
+ AnonymousDeclNumber(0), TypedefNameForLinkage(nullptr) {}
+
+ FindExistingResult(ASTReader &Reader, NamedDecl *New, NamedDecl *Existing,
+ unsigned AnonymousDeclNumber,
+ IdentifierInfo *TypedefNameForLinkage)
+ : Reader(Reader), New(New), Existing(Existing), AddResult(true),
+ AnonymousDeclNumber(AnonymousDeclNumber),
+ TypedefNameForLinkage(TypedefNameForLinkage) {}
+
+ FindExistingResult(const FindExistingResult &Other)
+ : Reader(Other.Reader), New(Other.New), Existing(Other.Existing),
+ AddResult(Other.AddResult),
+ AnonymousDeclNumber(Other.AnonymousDeclNumber),
+ TypedefNameForLinkage(Other.TypedefNameForLinkage) {
+ Other.AddResult = false;
+ }
+
+ ~FindExistingResult();
+
+ /// \brief Suppress the addition of this result into the known set of
+ /// names.
+ void suppress() { AddResult = false; }
+
+ operator NamedDecl*() const { return Existing; }
+
+ template<typename T>
+ operator T*() const { return dyn_cast_or_null<T>(Existing); }
+ };
+
+ static DeclContext *getPrimaryContextForMerging(ASTReader &Reader,
+ DeclContext *DC);
+ FindExistingResult findExisting(NamedDecl *D);
+
+ public:
+ ASTDeclReader(ASTReader &Reader, ModuleFile &F, DeclID thisDeclID,
+ unsigned RawLocation, const RecordData &Record, unsigned &Idx)
+ : Reader(Reader), F(F), ThisDeclID(thisDeclID),
+ RawLocation(RawLocation), Record(Record), Idx(Idx),
+ TypeIDForTypeDecl(0), NamedDeclForTagDecl(0),
+ TypedefNameForLinkage(nullptr), HasPendingBody(false) {}
+
+ template <typename DeclT>
+ static Decl *getMostRecentDeclImpl(Redeclarable<DeclT> *D);
+ static Decl *getMostRecentDeclImpl(...);
+ static Decl *getMostRecentDecl(Decl *D);
+
+ template <typename DeclT>
+ static void attachPreviousDeclImpl(ASTReader &Reader,
+ Redeclarable<DeclT> *D, Decl *Previous,
+ Decl *Canon);
+ static void attachPreviousDeclImpl(ASTReader &Reader, ...);
+ static void attachPreviousDecl(ASTReader &Reader, Decl *D, Decl *Previous,
+ Decl *Canon);
+
+ template <typename DeclT>
+ static void attachLatestDeclImpl(Redeclarable<DeclT> *D, Decl *Latest);
+ static void attachLatestDeclImpl(...);
+ static void attachLatestDecl(Decl *D, Decl *latest);
+
+ template <typename DeclT>
+ static void markIncompleteDeclChainImpl(Redeclarable<DeclT> *D);
+ static void markIncompleteDeclChainImpl(...);
+
+ /// \brief Determine whether this declaration has a pending body.
+ bool hasPendingBody() const { return HasPendingBody; }
+
+ void Visit(Decl *D);
+
+ void UpdateDecl(Decl *D, ModuleFile &ModuleFile,
+ const RecordData &Record);
+
+ static void setNextObjCCategory(ObjCCategoryDecl *Cat,
+ ObjCCategoryDecl *Next) {
+ Cat->NextClassCategory = Next;
+ }
+
+ void VisitDecl(Decl *D);
+ void VisitTranslationUnitDecl(TranslationUnitDecl *TU);
+ void VisitNamedDecl(NamedDecl *ND);
+ void VisitLabelDecl(LabelDecl *LD);
+ void VisitNamespaceDecl(NamespaceDecl *D);
+ void VisitUsingDirectiveDecl(UsingDirectiveDecl *D);
+ void VisitNamespaceAliasDecl(NamespaceAliasDecl *D);
+ void VisitTypeDecl(TypeDecl *TD);
+ RedeclarableResult VisitTypedefNameDecl(TypedefNameDecl *TD);
+ void VisitTypedefDecl(TypedefDecl *TD);
+ void VisitTypeAliasDecl(TypeAliasDecl *TD);
+ void VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D);
+ RedeclarableResult VisitTagDecl(TagDecl *TD);
+ void VisitEnumDecl(EnumDecl *ED);
+ RedeclarableResult VisitRecordDeclImpl(RecordDecl *RD);
+ void VisitRecordDecl(RecordDecl *RD) { VisitRecordDeclImpl(RD); }
+ RedeclarableResult VisitCXXRecordDeclImpl(CXXRecordDecl *D);
+ void VisitCXXRecordDecl(CXXRecordDecl *D) { VisitCXXRecordDeclImpl(D); }
+ RedeclarableResult VisitClassTemplateSpecializationDeclImpl(
+ ClassTemplateSpecializationDecl *D);
+ void VisitClassTemplateSpecializationDecl(
+ ClassTemplateSpecializationDecl *D) {
+ VisitClassTemplateSpecializationDeclImpl(D);
+ }
+ void VisitClassTemplatePartialSpecializationDecl(
+ ClassTemplatePartialSpecializationDecl *D);
+ void VisitClassScopeFunctionSpecializationDecl(
+ ClassScopeFunctionSpecializationDecl *D);
+ RedeclarableResult
+ VisitVarTemplateSpecializationDeclImpl(VarTemplateSpecializationDecl *D);
+ void VisitVarTemplateSpecializationDecl(VarTemplateSpecializationDecl *D) {
+ VisitVarTemplateSpecializationDeclImpl(D);
+ }
+ void VisitVarTemplatePartialSpecializationDecl(
+ VarTemplatePartialSpecializationDecl *D);
+ void VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D);
+ void VisitValueDecl(ValueDecl *VD);
+ void VisitEnumConstantDecl(EnumConstantDecl *ECD);
+ void VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D);
+ void VisitDeclaratorDecl(DeclaratorDecl *DD);
+ void VisitFunctionDecl(FunctionDecl *FD);
+ void VisitCXXMethodDecl(CXXMethodDecl *D);
+ void VisitCXXConstructorDecl(CXXConstructorDecl *D);
+ void VisitCXXDestructorDecl(CXXDestructorDecl *D);
+ void VisitCXXConversionDecl(CXXConversionDecl *D);
+ void VisitFieldDecl(FieldDecl *FD);
+ void VisitMSPropertyDecl(MSPropertyDecl *FD);
+ void VisitIndirectFieldDecl(IndirectFieldDecl *FD);
+ RedeclarableResult VisitVarDeclImpl(VarDecl *D);
+ void VisitVarDecl(VarDecl *VD) { VisitVarDeclImpl(VD); }
+ void VisitImplicitParamDecl(ImplicitParamDecl *PD);
+ void VisitParmVarDecl(ParmVarDecl *PD);
+ void VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D);
+ DeclID VisitTemplateDecl(TemplateDecl *D);
+ RedeclarableResult VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D);
+ void VisitClassTemplateDecl(ClassTemplateDecl *D);
+ void VisitBuiltinTemplateDecl(BuiltinTemplateDecl *D);
+ void VisitVarTemplateDecl(VarTemplateDecl *D);
+ void VisitFunctionTemplateDecl(FunctionTemplateDecl *D);
+ void VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D);
+ void VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D);
+ void VisitUsingDecl(UsingDecl *D);
+ void VisitUsingShadowDecl(UsingShadowDecl *D);
+ void VisitLinkageSpecDecl(LinkageSpecDecl *D);
+ void VisitFileScopeAsmDecl(FileScopeAsmDecl *AD);
+ void VisitImportDecl(ImportDecl *D);
+ void VisitAccessSpecDecl(AccessSpecDecl *D);
+ void VisitFriendDecl(FriendDecl *D);
+ void VisitFriendTemplateDecl(FriendTemplateDecl *D);
+ void VisitStaticAssertDecl(StaticAssertDecl *D);
+ void VisitBlockDecl(BlockDecl *BD);
+ void VisitCapturedDecl(CapturedDecl *CD);
+ void VisitEmptyDecl(EmptyDecl *D);
+
+ std::pair<uint64_t, uint64_t> VisitDeclContext(DeclContext *DC);
+
+ template<typename T>
+ RedeclarableResult VisitRedeclarable(Redeclarable<T> *D);
+
+ template<typename T>
+ void mergeRedeclarable(Redeclarable<T> *D, RedeclarableResult &Redecl,
+ DeclID TemplatePatternID = 0);
+
+ template<typename T>
+ void mergeRedeclarable(Redeclarable<T> *D, T *Existing,
+ RedeclarableResult &Redecl,
+ DeclID TemplatePatternID = 0);
+
+ template<typename T>
+ void mergeMergeable(Mergeable<T> *D);
+
+ void mergeTemplatePattern(RedeclarableTemplateDecl *D,
+ RedeclarableTemplateDecl *Existing,
+ DeclID DsID, bool IsKeyDecl);
+
+ ObjCTypeParamList *ReadObjCTypeParamList();
+
+ // FIXME: Reorder according to DeclNodes.td?
+ void VisitObjCMethodDecl(ObjCMethodDecl *D);
+ void VisitObjCTypeParamDecl(ObjCTypeParamDecl *D);
+ void VisitObjCContainerDecl(ObjCContainerDecl *D);
+ void VisitObjCInterfaceDecl(ObjCInterfaceDecl *D);
+ void VisitObjCIvarDecl(ObjCIvarDecl *D);
+ void VisitObjCProtocolDecl(ObjCProtocolDecl *D);
+ void VisitObjCAtDefsFieldDecl(ObjCAtDefsFieldDecl *D);
+ void VisitObjCCategoryDecl(ObjCCategoryDecl *D);
+ void VisitObjCImplDecl(ObjCImplDecl *D);
+ void VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D);
+ void VisitObjCImplementationDecl(ObjCImplementationDecl *D);
+ void VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *D);
+ void VisitObjCPropertyDecl(ObjCPropertyDecl *D);
+ void VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D);
+ void VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D);
+
+ /// We've merged the definition \p MergedDef into the existing definition
+ /// \p Def. Ensure that \p Def is made visible whenever \p MergedDef is made
+ /// visible.
+ void mergeDefinitionVisibility(NamedDecl *Def, NamedDecl *MergedDef) {
+ if (Def->isHidden()) {
+ // If MergedDef is visible or becomes visible, make the definition visible.
+ if (!MergedDef->isHidden())
+ Def->Hidden = false;
+ else if (Reader.getContext().getLangOpts().ModulesLocalVisibility) {
+ Reader.getContext().mergeDefinitionIntoModule(
+ Def, MergedDef->getImportedOwningModule(),
+ /*NotifyListeners*/ false);
+ Reader.PendingMergedDefinitionsToDeduplicate.insert(Def);
+ } else {
+ auto SubmoduleID = MergedDef->getOwningModuleID();
+ assert(SubmoduleID && "hidden definition in no module");
+ Reader.HiddenNamesMap[Reader.getSubmodule(SubmoduleID)].push_back(Def);
+ }
+ }
+ }
+ };
+} // end namespace clang
+
+namespace {
+/// Iterator over the redeclarations of a declaration that have already
+/// been merged into the same redeclaration chain.
+template<typename DeclT>
+class MergedRedeclIterator {
+ DeclT *Start, *Canonical, *Current;
+public:
+ MergedRedeclIterator() : Current(nullptr) {}
+ MergedRedeclIterator(DeclT *Start)
+ : Start(Start), Canonical(nullptr), Current(Start) {}
+
+ DeclT *operator*() { return Current; }
+
+ MergedRedeclIterator &operator++() {
+ if (Current->isFirstDecl()) {
+ Canonical = Current;
+ Current = Current->getMostRecentDecl();
+ } else
+ Current = Current->getPreviousDecl();
+
+ // If we started in the merged portion, we'll reach our start position
+ // eventually. Otherwise, we'll never reach it, but the second declaration
+ // we reached was the canonical declaration, so stop when we see that one
+ // again.
+ if (Current == Start || Current == Canonical)
+ Current = nullptr;
+ return *this;
+ }
+
+ friend bool operator!=(const MergedRedeclIterator &A,
+ const MergedRedeclIterator &B) {
+ return A.Current != B.Current;
+ }
+};
+} // end anonymous namespace
+
+template<typename DeclT>
+llvm::iterator_range<MergedRedeclIterator<DeclT>> merged_redecls(DeclT *D) {
+ return llvm::make_range(MergedRedeclIterator<DeclT>(D),
+ MergedRedeclIterator<DeclT>());
+}
+
+uint64_t ASTDeclReader::GetCurrentCursorOffset() {
+ return F.DeclsCursor.GetCurrentBitNo() + F.GlobalBitOffset;
+}
+
+void ASTDeclReader::Visit(Decl *D) {
+ DeclVisitor<ASTDeclReader, void>::Visit(D);
+
+ if (DeclaratorDecl *DD = dyn_cast<DeclaratorDecl>(D)) {
+ if (DD->DeclInfo) {
+ DeclaratorDecl::ExtInfo *Info =
+ DD->DeclInfo.get<DeclaratorDecl::ExtInfo *>();
+ Info->TInfo =
+ GetTypeSourceInfo(Record, Idx);
+ }
+ else {
+ DD->DeclInfo = GetTypeSourceInfo(Record, Idx);
+ }
+ }
+
+ if (TypeDecl *TD = dyn_cast<TypeDecl>(D)) {
+ // We have a fully initialized TypeDecl. Read its type now.
+ TD->setTypeForDecl(Reader.GetType(TypeIDForTypeDecl).getTypePtrOrNull());
+
+ // If this is a tag declaration with a typedef name for linkage, it's safe
+ // to load that typedef now.
+ if (NamedDeclForTagDecl)
+ cast<TagDecl>(D)->TypedefNameDeclOrQualifier =
+ cast<TypedefNameDecl>(Reader.GetDecl(NamedDeclForTagDecl));
+ } else if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D)) {
+ // if we have a fully initialized TypeDecl, we can safely read its type now.
+ ID->TypeForDecl = Reader.GetType(TypeIDForTypeDecl).getTypePtrOrNull();
+ } else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // FunctionDecl's body was written last after all other Stmts/Exprs.
+ // We only read it if FD doesn't already have a body (e.g., from another
+ // module).
+ // FIXME: Can we diagnose ODR violations somehow?
+ if (Record[Idx++]) {
+ if (auto *CD = dyn_cast<CXXConstructorDecl>(FD)) {
+ CD->NumCtorInitializers = Record[Idx++];
+ if (CD->NumCtorInitializers)
+ CD->CtorInitializers =
+ Reader.ReadCXXCtorInitializersRef(F, Record, Idx);
+ }
+ Reader.PendingBodies[FD] = GetCurrentCursorOffset();
+ HasPendingBody = true;
+ }
+ }
+}
+
+void ASTDeclReader::VisitDecl(Decl *D) {
+ if (D->isTemplateParameter() || D->isTemplateParameterPack() ||
+ isa<ParmVarDecl>(D)) {
+ // We don't want to deserialize the DeclContext of a template
+ // parameter or of a parameter of a function template immediately. These
+ // entities might be used in the formulation of its DeclContext (for
+ // example, a function parameter can be used in decltype() in trailing
+ // return type of the function). Use the translation unit DeclContext as a
+ // placeholder.
+ GlobalDeclID SemaDCIDForTemplateParmDecl = ReadDeclID(Record, Idx);
+ GlobalDeclID LexicalDCIDForTemplateParmDecl = ReadDeclID(Record, Idx);
+ if (!LexicalDCIDForTemplateParmDecl)
+ LexicalDCIDForTemplateParmDecl = SemaDCIDForTemplateParmDecl;
+ Reader.addPendingDeclContextInfo(D,
+ SemaDCIDForTemplateParmDecl,
+ LexicalDCIDForTemplateParmDecl);
+ D->setDeclContext(Reader.getContext().getTranslationUnitDecl());
+ } else {
+ DeclContext *SemaDC = ReadDeclAs<DeclContext>(Record, Idx);
+ DeclContext *LexicalDC = ReadDeclAs<DeclContext>(Record, Idx);
+ if (!LexicalDC)
+ LexicalDC = SemaDC;
+ DeclContext *MergedSemaDC = Reader.MergedDeclContexts.lookup(SemaDC);
+ // Avoid calling setLexicalDeclContext() directly because it uses
+ // Decl::getASTContext() internally which is unsafe during derialization.
+ D->setDeclContextsImpl(MergedSemaDC ? MergedSemaDC : SemaDC, LexicalDC,
+ Reader.getContext());
+ }
+ D->setLocation(Reader.ReadSourceLocation(F, RawLocation));
+ D->setInvalidDecl(Record[Idx++]);
+ if (Record[Idx++]) { // hasAttrs
+ AttrVec Attrs;
+ Reader.ReadAttributes(F, Attrs, Record, Idx);
+ // Avoid calling setAttrs() directly because it uses Decl::getASTContext()
+ // internally which is unsafe during derialization.
+ D->setAttrsImpl(Attrs, Reader.getContext());
+ }
+ D->setImplicit(Record[Idx++]);
+ D->Used = Record[Idx++];
+ D->setReferenced(Record[Idx++]);
+ D->setTopLevelDeclInObjCContainer(Record[Idx++]);
+ D->setAccess((AccessSpecifier)Record[Idx++]);
+ D->FromASTFile = true;
+ D->setModulePrivate(Record[Idx++]);
+ D->Hidden = D->isModulePrivate();
+
+ // Determine whether this declaration is part of a (sub)module. If so, it
+ // may not yet be visible.
+ if (unsigned SubmoduleID = readSubmoduleID(Record, Idx)) {
+ // Store the owning submodule ID in the declaration.
+ D->setOwningModuleID(SubmoduleID);
+
+ if (D->Hidden) {
+ // Module-private declarations are never visible, so there is no work to do.
+ } else if (Reader.getContext().getLangOpts().ModulesLocalVisibility) {
+ // If local visibility is being tracked, this declaration will become
+ // hidden and visible as the owning module does. Inform Sema that this
+ // declaration might not be visible.
+ D->Hidden = true;
+ } else if (Module *Owner = Reader.getSubmodule(SubmoduleID)) {
+ if (Owner->NameVisibility != Module::AllVisible) {
+ // The owning module is not visible. Mark this declaration as hidden.
+ D->Hidden = true;
+
+ // Note that this declaration was hidden because its owning module is
+ // not yet visible.
+ Reader.HiddenNamesMap[Owner].push_back(D);
+ }
+ }
+ }
+}
+
+void ASTDeclReader::VisitTranslationUnitDecl(TranslationUnitDecl *TU) {
+ llvm_unreachable("Translation units are not serialized");
+}
+
+void ASTDeclReader::VisitNamedDecl(NamedDecl *ND) {
+ VisitDecl(ND);
+ ND->setDeclName(Reader.ReadDeclarationName(F, Record, Idx));
+ AnonymousDeclNumber = Record[Idx++];
+}
+
+void ASTDeclReader::VisitTypeDecl(TypeDecl *TD) {
+ VisitNamedDecl(TD);
+ TD->setLocStart(ReadSourceLocation(Record, Idx));
+ // Delay type reading until after we have fully initialized the decl.
+ TypeIDForTypeDecl = Reader.getGlobalTypeID(F, Record[Idx++]);
+}
+
+ASTDeclReader::RedeclarableResult
+ASTDeclReader::VisitTypedefNameDecl(TypedefNameDecl *TD) {
+ RedeclarableResult Redecl = VisitRedeclarable(TD);
+ VisitTypeDecl(TD);
+ TypeSourceInfo *TInfo = GetTypeSourceInfo(Record, Idx);
+ if (Record[Idx++]) { // isModed
+ QualType modedT = Reader.readType(F, Record, Idx);
+ TD->setModedTypeSourceInfo(TInfo, modedT);
+ } else
+ TD->setTypeSourceInfo(TInfo);
+ return Redecl;
+}
+
+void ASTDeclReader::VisitTypedefDecl(TypedefDecl *TD) {
+ RedeclarableResult Redecl = VisitTypedefNameDecl(TD);
+ mergeRedeclarable(TD, Redecl);
+}
+
+void ASTDeclReader::VisitTypeAliasDecl(TypeAliasDecl *TD) {
+ RedeclarableResult Redecl = VisitTypedefNameDecl(TD);
+ if (auto *Template = ReadDeclAs<TypeAliasTemplateDecl>(Record, Idx))
+ // Merged when we merge the template.
+ TD->setDescribedAliasTemplate(Template);
+ else
+ mergeRedeclarable(TD, Redecl);
+}
+
+ASTDeclReader::RedeclarableResult ASTDeclReader::VisitTagDecl(TagDecl *TD) {
+ RedeclarableResult Redecl = VisitRedeclarable(TD);
+ VisitTypeDecl(TD);
+
+ TD->IdentifierNamespace = Record[Idx++];
+ TD->setTagKind((TagDecl::TagKind)Record[Idx++]);
+ if (!isa<CXXRecordDecl>(TD))
+ TD->setCompleteDefinition(Record[Idx++]);
+ TD->setEmbeddedInDeclarator(Record[Idx++]);
+ TD->setFreeStanding(Record[Idx++]);
+ TD->setCompleteDefinitionRequired(Record[Idx++]);
+ TD->setRBraceLoc(ReadSourceLocation(Record, Idx));
+
+ switch (Record[Idx++]) {
+ case 0:
+ break;
+ case 1: { // ExtInfo
+ TagDecl::ExtInfo *Info = new (Reader.getContext()) TagDecl::ExtInfo();
+ ReadQualifierInfo(*Info, Record, Idx);
+ TD->TypedefNameDeclOrQualifier = Info;
+ break;
+ }
+ case 2: // TypedefNameForAnonDecl
+ NamedDeclForTagDecl = ReadDeclID(Record, Idx);
+ TypedefNameForLinkage = Reader.GetIdentifierInfo(F, Record, Idx);
+ break;
+ default:
+ llvm_unreachable("unexpected tag info kind");
+ }
+
+ if (!isa<CXXRecordDecl>(TD))
+ mergeRedeclarable(TD, Redecl);
+ return Redecl;
+}
+
+void ASTDeclReader::VisitEnumDecl(EnumDecl *ED) {
+ VisitTagDecl(ED);
+ if (TypeSourceInfo *TI = Reader.GetTypeSourceInfo(F, Record, Idx))
+ ED->setIntegerTypeSourceInfo(TI);
+ else
+ ED->setIntegerType(Reader.readType(F, Record, Idx));
+ ED->setPromotionType(Reader.readType(F, Record, Idx));
+ ED->setNumPositiveBits(Record[Idx++]);
+ ED->setNumNegativeBits(Record[Idx++]);
+ ED->IsScoped = Record[Idx++];
+ ED->IsScopedUsingClassTag = Record[Idx++];
+ ED->IsFixed = Record[Idx++];
+
+ // If this is a definition subject to the ODR, and we already have a
+ // definition, merge this one into it.
+ if (ED->IsCompleteDefinition &&
+ Reader.getContext().getLangOpts().Modules &&
+ Reader.getContext().getLangOpts().CPlusPlus) {
+ EnumDecl *&OldDef = Reader.EnumDefinitions[ED->getCanonicalDecl()];
+ if (!OldDef) {
+ // This is the first time we've seen an imported definition. Look for a
+ // local definition before deciding that we are the first definition.
+ for (auto *D : merged_redecls(ED->getCanonicalDecl())) {
+ if (!D->isFromASTFile() && D->isCompleteDefinition()) {
+ OldDef = D;
+ break;
+ }
+ }
+ }
+ if (OldDef) {
+ Reader.MergedDeclContexts.insert(std::make_pair(ED, OldDef));
+ ED->IsCompleteDefinition = false;
+ mergeDefinitionVisibility(OldDef, ED);
+ } else {
+ OldDef = ED;
+ }
+ }
+
+ if (EnumDecl *InstED = ReadDeclAs<EnumDecl>(Record, Idx)) {
+ TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record[Idx++];
+ SourceLocation POI = ReadSourceLocation(Record, Idx);
+ ED->setInstantiationOfMemberEnum(Reader.getContext(), InstED, TSK);
+ ED->getMemberSpecializationInfo()->setPointOfInstantiation(POI);
+ }
+}
+
+ASTDeclReader::RedeclarableResult
+ASTDeclReader::VisitRecordDeclImpl(RecordDecl *RD) {
+ RedeclarableResult Redecl = VisitTagDecl(RD);
+ RD->setHasFlexibleArrayMember(Record[Idx++]);
+ RD->setAnonymousStructOrUnion(Record[Idx++]);
+ RD->setHasObjectMember(Record[Idx++]);
+ RD->setHasVolatileMember(Record[Idx++]);
+ return Redecl;
+}
+
+void ASTDeclReader::VisitValueDecl(ValueDecl *VD) {
+ VisitNamedDecl(VD);
+ VD->setType(Reader.readType(F, Record, Idx));
+}
+
+void ASTDeclReader::VisitEnumConstantDecl(EnumConstantDecl *ECD) {
+ VisitValueDecl(ECD);
+ if (Record[Idx++])
+ ECD->setInitExpr(Reader.ReadExpr(F));
+ ECD->setInitVal(Reader.ReadAPSInt(Record, Idx));
+ mergeMergeable(ECD);
+}
+
+void ASTDeclReader::VisitDeclaratorDecl(DeclaratorDecl *DD) {
+ VisitValueDecl(DD);
+ DD->setInnerLocStart(ReadSourceLocation(Record, Idx));
+ if (Record[Idx++]) { // hasExtInfo
+ DeclaratorDecl::ExtInfo *Info
+ = new (Reader.getContext()) DeclaratorDecl::ExtInfo();
+ ReadQualifierInfo(*Info, Record, Idx);
+ DD->DeclInfo = Info;
+ }
+}
+
+void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
+ RedeclarableResult Redecl = VisitRedeclarable(FD);
+ VisitDeclaratorDecl(FD);
+
+ ReadDeclarationNameLoc(FD->DNLoc, FD->getDeclName(), Record, Idx);
+ FD->IdentifierNamespace = Record[Idx++];
+
+ // FunctionDecl's body is handled last at ASTDeclReader::Visit,
+ // after everything else is read.
+
+ FD->SClass = (StorageClass)Record[Idx++];
+ FD->IsInline = Record[Idx++];
+ FD->IsInlineSpecified = Record[Idx++];
+ FD->IsVirtualAsWritten = Record[Idx++];
+ FD->IsPure = Record[Idx++];
+ FD->HasInheritedPrototype = Record[Idx++];
+ FD->HasWrittenPrototype = Record[Idx++];
+ FD->IsDeleted = Record[Idx++];
+ FD->IsTrivial = Record[Idx++];
+ FD->IsDefaulted = Record[Idx++];
+ FD->IsExplicitlyDefaulted = Record[Idx++];
+ FD->HasImplicitReturnZero = Record[Idx++];
+ FD->IsConstexpr = Record[Idx++];
+ FD->HasSkippedBody = Record[Idx++];
+ FD->IsLateTemplateParsed = Record[Idx++];
+ FD->setCachedLinkage(Linkage(Record[Idx++]));
+ FD->EndRangeLoc = ReadSourceLocation(Record, Idx);
+
+ switch ((FunctionDecl::TemplatedKind)Record[Idx++]) {
+ case FunctionDecl::TK_NonTemplate:
+ mergeRedeclarable(FD, Redecl);
+ break;
+ case FunctionDecl::TK_FunctionTemplate:
+ // Merged when we merge the template.
+ FD->setDescribedFunctionTemplate(ReadDeclAs<FunctionTemplateDecl>(Record,
+ Idx));
+ break;
+ case FunctionDecl::TK_MemberSpecialization: {
+ FunctionDecl *InstFD = ReadDeclAs<FunctionDecl>(Record, Idx);
+ TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record[Idx++];
+ SourceLocation POI = ReadSourceLocation(Record, Idx);
+ FD->setInstantiationOfMemberFunction(Reader.getContext(), InstFD, TSK);
+ FD->getMemberSpecializationInfo()->setPointOfInstantiation(POI);
+ mergeRedeclarable(FD, Redecl);
+ break;
+ }
+ case FunctionDecl::TK_FunctionTemplateSpecialization: {
+ FunctionTemplateDecl *Template = ReadDeclAs<FunctionTemplateDecl>(Record,
+ Idx);
+ TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record[Idx++];
+
+ // Template arguments.
+ SmallVector<TemplateArgument, 8> TemplArgs;
+ Reader.ReadTemplateArgumentList(TemplArgs, F, Record, Idx,
+ /*Canonicalize*/ true);
+
+ // Template args as written.
+ SmallVector<TemplateArgumentLoc, 8> TemplArgLocs;
+ SourceLocation LAngleLoc, RAngleLoc;
+ bool HasTemplateArgumentsAsWritten = Record[Idx++];
+ if (HasTemplateArgumentsAsWritten) {
+ unsigned NumTemplateArgLocs = Record[Idx++];
+ TemplArgLocs.reserve(NumTemplateArgLocs);
+ for (unsigned i=0; i != NumTemplateArgLocs; ++i)
+ TemplArgLocs.push_back(
+ Reader.ReadTemplateArgumentLoc(F, Record, Idx));
+
+ LAngleLoc = ReadSourceLocation(Record, Idx);
+ RAngleLoc = ReadSourceLocation(Record, Idx);
+ }
+
+ SourceLocation POI = ReadSourceLocation(Record, Idx);
+
+ ASTContext &C = Reader.getContext();
+ TemplateArgumentList *TemplArgList
+ = TemplateArgumentList::CreateCopy(C, TemplArgs.data(), TemplArgs.size());
+ TemplateArgumentListInfo TemplArgsInfo(LAngleLoc, RAngleLoc);
+ for (unsigned i=0, e = TemplArgLocs.size(); i != e; ++i)
+ TemplArgsInfo.addArgument(TemplArgLocs[i]);
+ FunctionTemplateSpecializationInfo *FTInfo
+ = FunctionTemplateSpecializationInfo::Create(C, FD, Template, TSK,
+ TemplArgList,
+ HasTemplateArgumentsAsWritten ? &TemplArgsInfo
+ : nullptr,
+ POI);
+ FD->TemplateOrSpecialization = FTInfo;
+
+ if (FD->isCanonicalDecl()) { // if canonical add to template's set.
+ // The template that contains the specializations set. It's not safe to
+ // use getCanonicalDecl on Template since it may still be initializing.
+ FunctionTemplateDecl *CanonTemplate
+ = ReadDeclAs<FunctionTemplateDecl>(Record, Idx);
+ // Get the InsertPos by FindNodeOrInsertPos() instead of calling
+ // InsertNode(FTInfo) directly to avoid the getASTContext() call in
+ // FunctionTemplateSpecializationInfo's Profile().
+ // We avoid getASTContext because a decl in the parent hierarchy may
+ // be initializing.
+ llvm::FoldingSetNodeID ID;
+ FunctionTemplateSpecializationInfo::Profile(ID, TemplArgs, C);
+ void *InsertPos = nullptr;
+ FunctionTemplateDecl::Common *CommonPtr = CanonTemplate->getCommonPtr();
+ FunctionTemplateSpecializationInfo *ExistingInfo =
+ CommonPtr->Specializations.FindNodeOrInsertPos(ID, InsertPos);
+ if (InsertPos)
+ CommonPtr->Specializations.InsertNode(FTInfo, InsertPos);
+ else {
+ assert(Reader.getContext().getLangOpts().Modules &&
+ "already deserialized this template specialization");
+ mergeRedeclarable(FD, ExistingInfo->Function, Redecl);
+ }
+ }
+ break;
+ }
+ case FunctionDecl::TK_DependentFunctionTemplateSpecialization: {
+ // Templates.
+ UnresolvedSet<8> TemplDecls;
+ unsigned NumTemplates = Record[Idx++];
+ while (NumTemplates--)
+ TemplDecls.addDecl(ReadDeclAs<NamedDecl>(Record, Idx));
+
+ // Templates args.
+ TemplateArgumentListInfo TemplArgs;
+ unsigned NumArgs = Record[Idx++];
+ while (NumArgs--)
+ TemplArgs.addArgument(Reader.ReadTemplateArgumentLoc(F, Record, Idx));
+ TemplArgs.setLAngleLoc(ReadSourceLocation(Record, Idx));
+ TemplArgs.setRAngleLoc(ReadSourceLocation(Record, Idx));
+
+ FD->setDependentTemplateSpecialization(Reader.getContext(),
+ TemplDecls, TemplArgs);
+ // These are not merged; we don't need to merge redeclarations of dependent
+ // template friends.
+ break;
+ }
+ }
+
+ // Read in the parameters.
+ unsigned NumParams = Record[Idx++];
+ SmallVector<ParmVarDecl *, 16> Params;
+ Params.reserve(NumParams);
+ for (unsigned I = 0; I != NumParams; ++I)
+ Params.push_back(ReadDeclAs<ParmVarDecl>(Record, Idx));
+ FD->setParams(Reader.getContext(), Params);
+}
+
+void ASTDeclReader::VisitObjCMethodDecl(ObjCMethodDecl *MD) {
+ VisitNamedDecl(MD);
+ if (Record[Idx++]) {
+ // Load the body on-demand. Most clients won't care, because method
+ // definitions rarely show up in headers.
+ Reader.PendingBodies[MD] = GetCurrentCursorOffset();
+ HasPendingBody = true;
+ MD->setSelfDecl(ReadDeclAs<ImplicitParamDecl>(Record, Idx));
+ MD->setCmdDecl(ReadDeclAs<ImplicitParamDecl>(Record, Idx));
+ }
+ MD->setInstanceMethod(Record[Idx++]);
+ MD->setVariadic(Record[Idx++]);
+ MD->setPropertyAccessor(Record[Idx++]);
+ MD->setDefined(Record[Idx++]);
+ MD->IsOverriding = Record[Idx++];
+ MD->HasSkippedBody = Record[Idx++];
+
+ MD->IsRedeclaration = Record[Idx++];
+ MD->HasRedeclaration = Record[Idx++];
+ if (MD->HasRedeclaration)
+ Reader.getContext().setObjCMethodRedeclaration(MD,
+ ReadDeclAs<ObjCMethodDecl>(Record, Idx));
+
+ MD->setDeclImplementation((ObjCMethodDecl::ImplementationControl)Record[Idx++]);
+ MD->setObjCDeclQualifier((Decl::ObjCDeclQualifier)Record[Idx++]);
+ MD->SetRelatedResultType(Record[Idx++]);
+ MD->setReturnType(Reader.readType(F, Record, Idx));
+ MD->setReturnTypeSourceInfo(GetTypeSourceInfo(Record, Idx));
+ MD->DeclEndLoc = ReadSourceLocation(Record, Idx);
+ unsigned NumParams = Record[Idx++];
+ SmallVector<ParmVarDecl *, 16> Params;
+ Params.reserve(NumParams);
+ for (unsigned I = 0; I != NumParams; ++I)
+ Params.push_back(ReadDeclAs<ParmVarDecl>(Record, Idx));
+
+ MD->SelLocsKind = Record[Idx++];
+ unsigned NumStoredSelLocs = Record[Idx++];
+ SmallVector<SourceLocation, 16> SelLocs;
+ SelLocs.reserve(NumStoredSelLocs);
+ for (unsigned i = 0; i != NumStoredSelLocs; ++i)
+ SelLocs.push_back(ReadSourceLocation(Record, Idx));
+
+ MD->setParamsAndSelLocs(Reader.getContext(), Params, SelLocs);
+}
+
+void ASTDeclReader::VisitObjCTypeParamDecl(ObjCTypeParamDecl *D) {
+ VisitTypedefNameDecl(D);
+
+ D->Variance = Record[Idx++];
+ D->Index = Record[Idx++];
+ D->VarianceLoc = ReadSourceLocation(Record, Idx);
+ D->ColonLoc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTDeclReader::VisitObjCContainerDecl(ObjCContainerDecl *CD) {
+ VisitNamedDecl(CD);
+ CD->setAtStartLoc(ReadSourceLocation(Record, Idx));
+ CD->setAtEndRange(ReadSourceRange(Record, Idx));
+}
+
+ObjCTypeParamList *ASTDeclReader::ReadObjCTypeParamList() {
+ unsigned numParams = Record[Idx++];
+ if (numParams == 0)
+ return nullptr;
+
+ SmallVector<ObjCTypeParamDecl *, 4> typeParams;
+ typeParams.reserve(numParams);
+ for (unsigned i = 0; i != numParams; ++i) {
+ auto typeParam = ReadDeclAs<ObjCTypeParamDecl>(Record, Idx);
+ if (!typeParam)
+ return nullptr;
+
+ typeParams.push_back(typeParam);
+ }
+
+ SourceLocation lAngleLoc = ReadSourceLocation(Record, Idx);
+ SourceLocation rAngleLoc = ReadSourceLocation(Record, Idx);
+
+ return ObjCTypeParamList::create(Reader.getContext(), lAngleLoc,
+ typeParams, rAngleLoc);
+}
+
+void ASTDeclReader::VisitObjCInterfaceDecl(ObjCInterfaceDecl *ID) {
+ RedeclarableResult Redecl = VisitRedeclarable(ID);
+ VisitObjCContainerDecl(ID);
+ TypeIDForTypeDecl = Reader.getGlobalTypeID(F, Record[Idx++]);
+ mergeRedeclarable(ID, Redecl);
+
+ ID->TypeParamList = ReadObjCTypeParamList();
+ if (Record[Idx++]) {
+ // Read the definition.
+ ID->allocateDefinitionData();
+
+ // Set the definition data of the canonical declaration, so other
+ // redeclarations will see it.
+ ID->getCanonicalDecl()->Data = ID->Data;
+
+ ObjCInterfaceDecl::DefinitionData &Data = ID->data();
+
+ // Read the superclass.
+ Data.SuperClassTInfo = GetTypeSourceInfo(Record, Idx);
+
+ Data.EndLoc = ReadSourceLocation(Record, Idx);
+ Data.HasDesignatedInitializers = Record[Idx++];
+
+ // Read the directly referenced protocols and their SourceLocations.
+ unsigned NumProtocols = Record[Idx++];
+ SmallVector<ObjCProtocolDecl *, 16> Protocols;
+ Protocols.reserve(NumProtocols);
+ for (unsigned I = 0; I != NumProtocols; ++I)
+ Protocols.push_back(ReadDeclAs<ObjCProtocolDecl>(Record, Idx));
+ SmallVector<SourceLocation, 16> ProtoLocs;
+ ProtoLocs.reserve(NumProtocols);
+ for (unsigned I = 0; I != NumProtocols; ++I)
+ ProtoLocs.push_back(ReadSourceLocation(Record, Idx));
+ ID->setProtocolList(Protocols.data(), NumProtocols, ProtoLocs.data(),
+ Reader.getContext());
+
+ // Read the transitive closure of protocols referenced by this class.
+ NumProtocols = Record[Idx++];
+ Protocols.clear();
+ Protocols.reserve(NumProtocols);
+ for (unsigned I = 0; I != NumProtocols; ++I)
+ Protocols.push_back(ReadDeclAs<ObjCProtocolDecl>(Record, Idx));
+ ID->data().AllReferencedProtocols.set(Protocols.data(), NumProtocols,
+ Reader.getContext());
+
+ // We will rebuild this list lazily.
+ ID->setIvarList(nullptr);
+
+ // Note that we have deserialized a definition.
+ Reader.PendingDefinitions.insert(ID);
+
+ // Note that we've loaded this Objective-C class.
+ Reader.ObjCClassesLoaded.push_back(ID);
+ } else {
+ ID->Data = ID->getCanonicalDecl()->Data;
+ }
+}
+
+void ASTDeclReader::VisitObjCIvarDecl(ObjCIvarDecl *IVD) {
+ VisitFieldDecl(IVD);
+ IVD->setAccessControl((ObjCIvarDecl::AccessControl)Record[Idx++]);
+ // This field will be built lazily.
+ IVD->setNextIvar(nullptr);
+ bool synth = Record[Idx++];
+ IVD->setSynthesize(synth);
+}
+
+void ASTDeclReader::VisitObjCProtocolDecl(ObjCProtocolDecl *PD) {
+ RedeclarableResult Redecl = VisitRedeclarable(PD);
+ VisitObjCContainerDecl(PD);
+ mergeRedeclarable(PD, Redecl);
+
+ if (Record[Idx++]) {
+ // Read the definition.
+ PD->allocateDefinitionData();
+
+ // Set the definition data of the canonical declaration, so other
+ // redeclarations will see it.
+ PD->getCanonicalDecl()->Data = PD->Data;
+
+ unsigned NumProtoRefs = Record[Idx++];
+ SmallVector<ObjCProtocolDecl *, 16> ProtoRefs;
+ ProtoRefs.reserve(NumProtoRefs);
+ for (unsigned I = 0; I != NumProtoRefs; ++I)
+ ProtoRefs.push_back(ReadDeclAs<ObjCProtocolDecl>(Record, Idx));
+ SmallVector<SourceLocation, 16> ProtoLocs;
+ ProtoLocs.reserve(NumProtoRefs);
+ for (unsigned I = 0; I != NumProtoRefs; ++I)
+ ProtoLocs.push_back(ReadSourceLocation(Record, Idx));
+ PD->setProtocolList(ProtoRefs.data(), NumProtoRefs, ProtoLocs.data(),
+ Reader.getContext());
+
+ // Note that we have deserialized a definition.
+ Reader.PendingDefinitions.insert(PD);
+ } else {
+ PD->Data = PD->getCanonicalDecl()->Data;
+ }
+}
+
+void ASTDeclReader::VisitObjCAtDefsFieldDecl(ObjCAtDefsFieldDecl *FD) {
+ VisitFieldDecl(FD);
+}
+
+void ASTDeclReader::VisitObjCCategoryDecl(ObjCCategoryDecl *CD) {
+ VisitObjCContainerDecl(CD);
+ CD->setCategoryNameLoc(ReadSourceLocation(Record, Idx));
+ CD->setIvarLBraceLoc(ReadSourceLocation(Record, Idx));
+ CD->setIvarRBraceLoc(ReadSourceLocation(Record, Idx));
+
+ // Note that this category has been deserialized. We do this before
+ // deserializing the interface declaration, so that it will consider this
+ /// category.
+ Reader.CategoriesDeserialized.insert(CD);
+
+ CD->ClassInterface = ReadDeclAs<ObjCInterfaceDecl>(Record, Idx);
+ CD->TypeParamList = ReadObjCTypeParamList();
+ unsigned NumProtoRefs = Record[Idx++];
+ SmallVector<ObjCProtocolDecl *, 16> ProtoRefs;
+ ProtoRefs.reserve(NumProtoRefs);
+ for (unsigned I = 0; I != NumProtoRefs; ++I)
+ ProtoRefs.push_back(ReadDeclAs<ObjCProtocolDecl>(Record, Idx));
+ SmallVector<SourceLocation, 16> ProtoLocs;
+ ProtoLocs.reserve(NumProtoRefs);
+ for (unsigned I = 0; I != NumProtoRefs; ++I)
+ ProtoLocs.push_back(ReadSourceLocation(Record, Idx));
+ CD->setProtocolList(ProtoRefs.data(), NumProtoRefs, ProtoLocs.data(),
+ Reader.getContext());
+}
+
+void ASTDeclReader::VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *CAD) {
+ VisitNamedDecl(CAD);
+ CAD->setClassInterface(ReadDeclAs<ObjCInterfaceDecl>(Record, Idx));
+}
+
+void ASTDeclReader::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
+ VisitNamedDecl(D);
+ D->setAtLoc(ReadSourceLocation(Record, Idx));
+ D->setLParenLoc(ReadSourceLocation(Record, Idx));
+ QualType T = Reader.readType(F, Record, Idx);
+ TypeSourceInfo *TSI = GetTypeSourceInfo(Record, Idx);
+ D->setType(T, TSI);
+ D->setPropertyAttributes(
+ (ObjCPropertyDecl::PropertyAttributeKind)Record[Idx++]);
+ D->setPropertyAttributesAsWritten(
+ (ObjCPropertyDecl::PropertyAttributeKind)Record[Idx++]);
+ D->setPropertyImplementation(
+ (ObjCPropertyDecl::PropertyControl)Record[Idx++]);
+ D->setGetterName(Reader.ReadDeclarationName(F,Record, Idx).getObjCSelector());
+ D->setSetterName(Reader.ReadDeclarationName(F,Record, Idx).getObjCSelector());
+ D->setGetterMethodDecl(ReadDeclAs<ObjCMethodDecl>(Record, Idx));
+ D->setSetterMethodDecl(ReadDeclAs<ObjCMethodDecl>(Record, Idx));
+ D->setPropertyIvarDecl(ReadDeclAs<ObjCIvarDecl>(Record, Idx));
+}
+
+void ASTDeclReader::VisitObjCImplDecl(ObjCImplDecl *D) {
+ VisitObjCContainerDecl(D);
+ D->setClassInterface(ReadDeclAs<ObjCInterfaceDecl>(Record, Idx));
+}
+
+void ASTDeclReader::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D) {
+ VisitObjCImplDecl(D);
+ D->setIdentifier(Reader.GetIdentifierInfo(F, Record, Idx));
+ D->CategoryNameLoc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTDeclReader::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
+ VisitObjCImplDecl(D);
+ D->setSuperClass(ReadDeclAs<ObjCInterfaceDecl>(Record, Idx));
+ D->SuperLoc = ReadSourceLocation(Record, Idx);
+ D->setIvarLBraceLoc(ReadSourceLocation(Record, Idx));
+ D->setIvarRBraceLoc(ReadSourceLocation(Record, Idx));
+ D->setHasNonZeroConstructors(Record[Idx++]);
+ D->setHasDestructors(Record[Idx++]);
+ D->NumIvarInitializers = Record[Idx++];
+ if (D->NumIvarInitializers)
+ D->IvarInitializers = Reader.ReadCXXCtorInitializersRef(F, Record, Idx);
+}
+
+void ASTDeclReader::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
+ VisitDecl(D);
+ D->setAtLoc(ReadSourceLocation(Record, Idx));
+ D->setPropertyDecl(ReadDeclAs<ObjCPropertyDecl>(Record, Idx));
+ D->PropertyIvarDecl = ReadDeclAs<ObjCIvarDecl>(Record, Idx);
+ D->IvarLoc = ReadSourceLocation(Record, Idx);
+ D->setGetterCXXConstructor(Reader.ReadExpr(F));
+ D->setSetterCXXAssignment(Reader.ReadExpr(F));
+}
+
+void ASTDeclReader::VisitFieldDecl(FieldDecl *FD) {
+ VisitDeclaratorDecl(FD);
+ FD->Mutable = Record[Idx++];
+ if (int BitWidthOrInitializer = Record[Idx++]) {
+ FD->InitStorage.setInt(
+ static_cast<FieldDecl::InitStorageKind>(BitWidthOrInitializer - 1));
+ if (FD->InitStorage.getInt() == FieldDecl::ISK_CapturedVLAType) {
+ // Read captured variable length array.
+ FD->InitStorage.setPointer(
+ Reader.readType(F, Record, Idx).getAsOpaquePtr());
+ } else {
+ FD->InitStorage.setPointer(Reader.ReadExpr(F));
+ }
+ }
+ if (!FD->getDeclName()) {
+ if (FieldDecl *Tmpl = ReadDeclAs<FieldDecl>(Record, Idx))
+ Reader.getContext().setInstantiatedFromUnnamedFieldDecl(FD, Tmpl);
+ }
+ mergeMergeable(FD);
+}
+
+void ASTDeclReader::VisitMSPropertyDecl(MSPropertyDecl *PD) {
+ VisitDeclaratorDecl(PD);
+ PD->GetterId = Reader.GetIdentifierInfo(F, Record, Idx);
+ PD->SetterId = Reader.GetIdentifierInfo(F, Record, Idx);
+}
+
+void ASTDeclReader::VisitIndirectFieldDecl(IndirectFieldDecl *FD) {
+ VisitValueDecl(FD);
+
+ FD->ChainingSize = Record[Idx++];
+ assert(FD->ChainingSize >= 2 && "Anonymous chaining must be >= 2");
+ FD->Chaining = new (Reader.getContext())NamedDecl*[FD->ChainingSize];
+
+ for (unsigned I = 0; I != FD->ChainingSize; ++I)
+ FD->Chaining[I] = ReadDeclAs<NamedDecl>(Record, Idx);
+
+ mergeMergeable(FD);
+}
+
+ASTDeclReader::RedeclarableResult ASTDeclReader::VisitVarDeclImpl(VarDecl *VD) {
+ RedeclarableResult Redecl = VisitRedeclarable(VD);
+ VisitDeclaratorDecl(VD);
+
+ VD->VarDeclBits.SClass = (StorageClass)Record[Idx++];
+ VD->VarDeclBits.TSCSpec = Record[Idx++];
+ VD->VarDeclBits.InitStyle = Record[Idx++];
+ if (!isa<ParmVarDecl>(VD)) {
+ VD->NonParmVarDeclBits.ExceptionVar = Record[Idx++];
+ VD->NonParmVarDeclBits.NRVOVariable = Record[Idx++];
+ VD->NonParmVarDeclBits.CXXForRangeDecl = Record[Idx++];
+ VD->NonParmVarDeclBits.ARCPseudoStrong = Record[Idx++];
+ VD->NonParmVarDeclBits.IsConstexpr = Record[Idx++];
+ VD->NonParmVarDeclBits.IsInitCapture = Record[Idx++];
+ VD->NonParmVarDeclBits.PreviousDeclInSameBlockScope = Record[Idx++];
+ }
+ Linkage VarLinkage = Linkage(Record[Idx++]);
+ VD->setCachedLinkage(VarLinkage);
+
+ // Reconstruct the one piece of the IdentifierNamespace that we need.
+ if (VD->getStorageClass() == SC_Extern && VarLinkage != NoLinkage &&
+ VD->getLexicalDeclContext()->isFunctionOrMethod())
+ VD->setLocalExternDecl();
+
+ if (uint64_t Val = Record[Idx++]) {
+ VD->setInit(Reader.ReadExpr(F));
+ if (Val > 1) {
+ EvaluatedStmt *Eval = VD->ensureEvaluatedStmt();
+ Eval->CheckedICE = true;
+ Eval->IsICE = Val == 3;
+ }
+ }
+
+ enum VarKind {
+ VarNotTemplate = 0, VarTemplate, StaticDataMemberSpecialization
+ };
+ switch ((VarKind)Record[Idx++]) {
+ case VarNotTemplate:
+ // Only true variables (not parameters or implicit parameters) can be
+ // merged; the other kinds are not really redeclarable at all.
+ if (!isa<ParmVarDecl>(VD) && !isa<ImplicitParamDecl>(VD) &&
+ !isa<VarTemplateSpecializationDecl>(VD))
+ mergeRedeclarable(VD, Redecl);
+ break;
+ case VarTemplate:
+ // Merged when we merge the template.
+ VD->setDescribedVarTemplate(ReadDeclAs<VarTemplateDecl>(Record, Idx));
+ break;
+ case StaticDataMemberSpecialization: { // HasMemberSpecializationInfo.
+ VarDecl *Tmpl = ReadDeclAs<VarDecl>(Record, Idx);
+ TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record[Idx++];
+ SourceLocation POI = ReadSourceLocation(Record, Idx);
+ Reader.getContext().setInstantiatedFromStaticDataMember(VD, Tmpl, TSK,POI);
+ mergeRedeclarable(VD, Redecl);
+ break;
+ }
+ }
+
+ return Redecl;
+}
+
+void ASTDeclReader::VisitImplicitParamDecl(ImplicitParamDecl *PD) {
+ VisitVarDecl(PD);
+}
+
+void ASTDeclReader::VisitParmVarDecl(ParmVarDecl *PD) {
+ VisitVarDecl(PD);
+ unsigned isObjCMethodParam = Record[Idx++];
+ unsigned scopeDepth = Record[Idx++];
+ unsigned scopeIndex = Record[Idx++];
+ unsigned declQualifier = Record[Idx++];
+ if (isObjCMethodParam) {
+ assert(scopeDepth == 0);
+ PD->setObjCMethodScopeInfo(scopeIndex);
+ PD->ParmVarDeclBits.ScopeDepthOrObjCQuals = declQualifier;
+ } else {
+ PD->setScopeInfo(scopeDepth, scopeIndex);
+ }
+ PD->ParmVarDeclBits.IsKNRPromoted = Record[Idx++];
+ PD->ParmVarDeclBits.HasInheritedDefaultArg = Record[Idx++];
+ if (Record[Idx++]) // hasUninstantiatedDefaultArg.
+ PD->setUninstantiatedDefaultArg(Reader.ReadExpr(F));
+
+ // FIXME: If this is a redeclaration of a function from another module, handle
+ // inheritance of default arguments.
+}
+
+void ASTDeclReader::VisitFileScopeAsmDecl(FileScopeAsmDecl *AD) {
+ VisitDecl(AD);
+ AD->setAsmString(cast<StringLiteral>(Reader.ReadExpr(F)));
+ AD->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTDeclReader::VisitBlockDecl(BlockDecl *BD) {
+ VisitDecl(BD);
+ BD->setBody(cast_or_null<CompoundStmt>(Reader.ReadStmt(F)));
+ BD->setSignatureAsWritten(GetTypeSourceInfo(Record, Idx));
+ unsigned NumParams = Record[Idx++];
+ SmallVector<ParmVarDecl *, 16> Params;
+ Params.reserve(NumParams);
+ for (unsigned I = 0; I != NumParams; ++I)
+ Params.push_back(ReadDeclAs<ParmVarDecl>(Record, Idx));
+ BD->setParams(Params);
+
+ BD->setIsVariadic(Record[Idx++]);
+ BD->setBlockMissingReturnType(Record[Idx++]);
+ BD->setIsConversionFromLambda(Record[Idx++]);
+
+ bool capturesCXXThis = Record[Idx++];
+ unsigned numCaptures = Record[Idx++];
+ SmallVector<BlockDecl::Capture, 16> captures;
+ captures.reserve(numCaptures);
+ for (unsigned i = 0; i != numCaptures; ++i) {
+ VarDecl *decl = ReadDeclAs<VarDecl>(Record, Idx);
+ unsigned flags = Record[Idx++];
+ bool byRef = (flags & 1);
+ bool nested = (flags & 2);
+ Expr *copyExpr = ((flags & 4) ? Reader.ReadExpr(F) : nullptr);
+
+ captures.push_back(BlockDecl::Capture(decl, byRef, nested, copyExpr));
+ }
+ BD->setCaptures(Reader.getContext(), captures, capturesCXXThis);
+}
+
+void ASTDeclReader::VisitCapturedDecl(CapturedDecl *CD) {
+ VisitDecl(CD);
+ unsigned ContextParamPos = Record[Idx++];
+ CD->setNothrow(Record[Idx++] != 0);
+ // Body is set by VisitCapturedStmt.
+ for (unsigned I = 0; I < CD->NumParams; ++I) {
+ if (I != ContextParamPos)
+ CD->setParam(I, ReadDeclAs<ImplicitParamDecl>(Record, Idx));
+ else
+ CD->setContextParam(I, ReadDeclAs<ImplicitParamDecl>(Record, Idx));
+ }
+}
+
+void ASTDeclReader::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
+ VisitDecl(D);
+ D->setLanguage((LinkageSpecDecl::LanguageIDs)Record[Idx++]);
+ D->setExternLoc(ReadSourceLocation(Record, Idx));
+ D->setRBraceLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTDeclReader::VisitLabelDecl(LabelDecl *D) {
+ VisitNamedDecl(D);
+ D->setLocStart(ReadSourceLocation(Record, Idx));
+}
+
+void ASTDeclReader::VisitNamespaceDecl(NamespaceDecl *D) {
+ RedeclarableResult Redecl = VisitRedeclarable(D);
+ VisitNamedDecl(D);
+ D->setInline(Record[Idx++]);
+ D->LocStart = ReadSourceLocation(Record, Idx);
+ D->RBraceLoc = ReadSourceLocation(Record, Idx);
+
+ // Defer loading the anonymous namespace until we've finished merging
+ // this namespace; loading it might load a later declaration of the
+ // same namespace, and we have an invariant that older declarations
+ // get merged before newer ones try to merge.
+ GlobalDeclID AnonNamespace = 0;
+ if (Redecl.getFirstID() == ThisDeclID) {
+ AnonNamespace = ReadDeclID(Record, Idx);
+ } else {
+ // Link this namespace back to the first declaration, which has already
+ // been deserialized.
+ D->AnonOrFirstNamespaceAndInline.setPointer(D->getFirstDecl());
+ }
+
+ mergeRedeclarable(D, Redecl);
+
+ if (AnonNamespace) {
+ // Each module has its own anonymous namespace, which is disjoint from
+ // any other module's anonymous namespaces, so don't attach the anonymous
+ // namespace at all.
+ NamespaceDecl *Anon = cast<NamespaceDecl>(Reader.GetDecl(AnonNamespace));
+ if (F.Kind != MK_ImplicitModule && F.Kind != MK_ExplicitModule)
+ D->setAnonymousNamespace(Anon);
+ }
+}
+
+void ASTDeclReader::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
+ RedeclarableResult Redecl = VisitRedeclarable(D);
+ VisitNamedDecl(D);
+ D->NamespaceLoc = ReadSourceLocation(Record, Idx);
+ D->IdentLoc = ReadSourceLocation(Record, Idx);
+ D->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
+ D->Namespace = ReadDeclAs<NamedDecl>(Record, Idx);
+ mergeRedeclarable(D, Redecl);
+}
+
+void ASTDeclReader::VisitUsingDecl(UsingDecl *D) {
+ VisitNamedDecl(D);
+ D->setUsingLoc(ReadSourceLocation(Record, Idx));
+ D->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
+ ReadDeclarationNameLoc(D->DNLoc, D->getDeclName(), Record, Idx);
+ D->FirstUsingShadow.setPointer(ReadDeclAs<UsingShadowDecl>(Record, Idx));
+ D->setTypename(Record[Idx++]);
+ if (NamedDecl *Pattern = ReadDeclAs<NamedDecl>(Record, Idx))
+ Reader.getContext().setInstantiatedFromUsingDecl(D, Pattern);
+ mergeMergeable(D);
+}
+
+void ASTDeclReader::VisitUsingShadowDecl(UsingShadowDecl *D) {
+ RedeclarableResult Redecl = VisitRedeclarable(D);
+ VisitNamedDecl(D);
+ D->setTargetDecl(ReadDeclAs<NamedDecl>(Record, Idx));
+ D->UsingOrNextShadow = ReadDeclAs<NamedDecl>(Record, Idx);
+ UsingShadowDecl *Pattern = ReadDeclAs<UsingShadowDecl>(Record, Idx);
+ if (Pattern)
+ Reader.getContext().setInstantiatedFromUsingShadowDecl(D, Pattern);
+ mergeRedeclarable(D, Redecl);
+}
+
+void ASTDeclReader::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
+ VisitNamedDecl(D);
+ D->UsingLoc = ReadSourceLocation(Record, Idx);
+ D->NamespaceLoc = ReadSourceLocation(Record, Idx);
+ D->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
+ D->NominatedNamespace = ReadDeclAs<NamedDecl>(Record, Idx);
+ D->CommonAncestor = ReadDeclAs<DeclContext>(Record, Idx);
+}
+
+void ASTDeclReader::VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D) {
+ VisitValueDecl(D);
+ D->setUsingLoc(ReadSourceLocation(Record, Idx));
+ D->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
+ ReadDeclarationNameLoc(D->DNLoc, D->getDeclName(), Record, Idx);
+ mergeMergeable(D);
+}
+
+void ASTDeclReader::VisitUnresolvedUsingTypenameDecl(
+ UnresolvedUsingTypenameDecl *D) {
+ VisitTypeDecl(D);
+ D->TypenameLocation = ReadSourceLocation(Record, Idx);
+ D->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
+ mergeMergeable(D);
+}
+
+void ASTDeclReader::ReadCXXDefinitionData(
+ struct CXXRecordDecl::DefinitionData &Data,
+ const RecordData &Record, unsigned &Idx) {
+ // Note: the caller has deserialized the IsLambda bit already.
+ Data.UserDeclaredConstructor = Record[Idx++];
+ Data.UserDeclaredSpecialMembers = Record[Idx++];
+ Data.Aggregate = Record[Idx++];
+ Data.PlainOldData = Record[Idx++];
+ Data.Empty = Record[Idx++];
+ Data.Polymorphic = Record[Idx++];
+ Data.Abstract = Record[Idx++];
+ Data.IsStandardLayout = Record[Idx++];
+ Data.HasNoNonEmptyBases = Record[Idx++];
+ Data.HasPrivateFields = Record[Idx++];
+ Data.HasProtectedFields = Record[Idx++];
+ Data.HasPublicFields = Record[Idx++];
+ Data.HasMutableFields = Record[Idx++];
+ Data.HasVariantMembers = Record[Idx++];
+ Data.HasOnlyCMembers = Record[Idx++];
+ Data.HasInClassInitializer = Record[Idx++];
+ Data.HasUninitializedReferenceMember = Record[Idx++];
+ Data.NeedOverloadResolutionForMoveConstructor = Record[Idx++];
+ Data.NeedOverloadResolutionForMoveAssignment = Record[Idx++];
+ Data.NeedOverloadResolutionForDestructor = Record[Idx++];
+ Data.DefaultedMoveConstructorIsDeleted = Record[Idx++];
+ Data.DefaultedMoveAssignmentIsDeleted = Record[Idx++];
+ Data.DefaultedDestructorIsDeleted = Record[Idx++];
+ Data.HasTrivialSpecialMembers = Record[Idx++];
+ Data.DeclaredNonTrivialSpecialMembers = Record[Idx++];
+ Data.HasIrrelevantDestructor = Record[Idx++];
+ Data.HasConstexprNonCopyMoveConstructor = Record[Idx++];
+ Data.DefaultedDefaultConstructorIsConstexpr = Record[Idx++];
+ Data.HasConstexprDefaultConstructor = Record[Idx++];
+ Data.HasNonLiteralTypeFieldsOrBases = Record[Idx++];
+ Data.ComputedVisibleConversions = Record[Idx++];
+ Data.UserProvidedDefaultConstructor = Record[Idx++];
+ Data.DeclaredSpecialMembers = Record[Idx++];
+ Data.ImplicitCopyConstructorHasConstParam = Record[Idx++];
+ Data.ImplicitCopyAssignmentHasConstParam = Record[Idx++];
+ Data.HasDeclaredCopyConstructorWithConstParam = Record[Idx++];
+ Data.HasDeclaredCopyAssignmentWithConstParam = Record[Idx++];
+
+ Data.NumBases = Record[Idx++];
+ if (Data.NumBases)
+ Data.Bases = Reader.readCXXBaseSpecifiers(F, Record, Idx);
+ Data.NumVBases = Record[Idx++];
+ if (Data.NumVBases)
+ Data.VBases = Reader.readCXXBaseSpecifiers(F, Record, Idx);
+
+ Reader.ReadUnresolvedSet(F, Data.Conversions, Record, Idx);
+ Reader.ReadUnresolvedSet(F, Data.VisibleConversions, Record, Idx);
+ assert(Data.Definition && "Data.Definition should be already set!");
+ Data.FirstFriend = ReadDeclID(Record, Idx);
+
+ if (Data.IsLambda) {
+ typedef LambdaCapture Capture;
+ CXXRecordDecl::LambdaDefinitionData &Lambda
+ = static_cast<CXXRecordDecl::LambdaDefinitionData &>(Data);
+ Lambda.Dependent = Record[Idx++];
+ Lambda.IsGenericLambda = Record[Idx++];
+ Lambda.CaptureDefault = Record[Idx++];
+ Lambda.NumCaptures = Record[Idx++];
+ Lambda.NumExplicitCaptures = Record[Idx++];
+ Lambda.ManglingNumber = Record[Idx++];
+ Lambda.ContextDecl = ReadDecl(Record, Idx);
+ Lambda.Captures
+ = (Capture*)Reader.Context.Allocate(sizeof(Capture)*Lambda.NumCaptures);
+ Capture *ToCapture = Lambda.Captures;
+ Lambda.MethodTyInfo = GetTypeSourceInfo(Record, Idx);
+ for (unsigned I = 0, N = Lambda.NumCaptures; I != N; ++I) {
+ SourceLocation Loc = ReadSourceLocation(Record, Idx);
+ bool IsImplicit = Record[Idx++];
+ LambdaCaptureKind Kind = static_cast<LambdaCaptureKind>(Record[Idx++]);
+ switch (Kind) {
+ case LCK_This:
+ case LCK_VLAType:
+ *ToCapture++ = Capture(Loc, IsImplicit, Kind, nullptr,SourceLocation());
+ break;
+ case LCK_ByCopy:
+ case LCK_ByRef:
+ VarDecl *Var = ReadDeclAs<VarDecl>(Record, Idx);
+ SourceLocation EllipsisLoc = ReadSourceLocation(Record, Idx);
+ *ToCapture++ = Capture(Loc, IsImplicit, Kind, Var, EllipsisLoc);
+ break;
+ }
+ }
+ }
+}
+
+void ASTDeclReader::MergeDefinitionData(
+ CXXRecordDecl *D, struct CXXRecordDecl::DefinitionData &&MergeDD) {
+ assert(D->DefinitionData.getNotUpdated() &&
+ "merging class definition into non-definition");
+ auto &DD = *D->DefinitionData.getNotUpdated();
+
+ if (DD.Definition != MergeDD.Definition) {
+ // Track that we merged the definitions.
+ Reader.MergedDeclContexts.insert(std::make_pair(MergeDD.Definition,
+ DD.Definition));
+ Reader.PendingDefinitions.erase(MergeDD.Definition);
+ MergeDD.Definition->IsCompleteDefinition = false;
+ mergeDefinitionVisibility(DD.Definition, MergeDD.Definition);
+ assert(Reader.Lookups.find(MergeDD.Definition) == Reader.Lookups.end() &&
+ "already loaded pending lookups for merged definition");
+ }
+
+ auto PFDI = Reader.PendingFakeDefinitionData.find(&DD);
+ if (PFDI != Reader.PendingFakeDefinitionData.end() &&
+ PFDI->second == ASTReader::PendingFakeDefinitionKind::Fake) {
+ // We faked up this definition data because we found a class for which we'd
+ // not yet loaded the definition. Replace it with the real thing now.
+ assert(!DD.IsLambda && !MergeDD.IsLambda && "faked up lambda definition?");
+ PFDI->second = ASTReader::PendingFakeDefinitionKind::FakeLoaded;
+
+ // Don't change which declaration is the definition; that is required
+ // to be invariant once we select it.
+ auto *Def = DD.Definition;
+ DD = std::move(MergeDD);
+ DD.Definition = Def;
+ return;
+ }
+
+ // FIXME: Move this out into a .def file?
+ bool DetectedOdrViolation = false;
+#define OR_FIELD(Field) DD.Field |= MergeDD.Field;
+#define MATCH_FIELD(Field) \
+ DetectedOdrViolation |= DD.Field != MergeDD.Field; \
+ OR_FIELD(Field)
+ MATCH_FIELD(UserDeclaredConstructor)
+ MATCH_FIELD(UserDeclaredSpecialMembers)
+ MATCH_FIELD(Aggregate)
+ MATCH_FIELD(PlainOldData)
+ MATCH_FIELD(Empty)
+ MATCH_FIELD(Polymorphic)
+ MATCH_FIELD(Abstract)
+ MATCH_FIELD(IsStandardLayout)
+ MATCH_FIELD(HasNoNonEmptyBases)
+ MATCH_FIELD(HasPrivateFields)
+ MATCH_FIELD(HasProtectedFields)
+ MATCH_FIELD(HasPublicFields)
+ MATCH_FIELD(HasMutableFields)
+ MATCH_FIELD(HasVariantMembers)
+ MATCH_FIELD(HasOnlyCMembers)
+ MATCH_FIELD(HasInClassInitializer)
+ MATCH_FIELD(HasUninitializedReferenceMember)
+ MATCH_FIELD(NeedOverloadResolutionForMoveConstructor)
+ MATCH_FIELD(NeedOverloadResolutionForMoveAssignment)
+ MATCH_FIELD(NeedOverloadResolutionForDestructor)
+ MATCH_FIELD(DefaultedMoveConstructorIsDeleted)
+ MATCH_FIELD(DefaultedMoveAssignmentIsDeleted)
+ MATCH_FIELD(DefaultedDestructorIsDeleted)
+ OR_FIELD(HasTrivialSpecialMembers)
+ OR_FIELD(DeclaredNonTrivialSpecialMembers)
+ MATCH_FIELD(HasIrrelevantDestructor)
+ OR_FIELD(HasConstexprNonCopyMoveConstructor)
+ MATCH_FIELD(DefaultedDefaultConstructorIsConstexpr)
+ OR_FIELD(HasConstexprDefaultConstructor)
+ MATCH_FIELD(HasNonLiteralTypeFieldsOrBases)
+ // ComputedVisibleConversions is handled below.
+ MATCH_FIELD(UserProvidedDefaultConstructor)
+ OR_FIELD(DeclaredSpecialMembers)
+ MATCH_FIELD(ImplicitCopyConstructorHasConstParam)
+ MATCH_FIELD(ImplicitCopyAssignmentHasConstParam)
+ OR_FIELD(HasDeclaredCopyConstructorWithConstParam)
+ OR_FIELD(HasDeclaredCopyAssignmentWithConstParam)
+ MATCH_FIELD(IsLambda)
+#undef OR_FIELD
+#undef MATCH_FIELD
+
+ if (DD.NumBases != MergeDD.NumBases || DD.NumVBases != MergeDD.NumVBases)
+ DetectedOdrViolation = true;
+ // FIXME: Issue a diagnostic if the base classes don't match when we come
+ // to lazily load them.
+
+ // FIXME: Issue a diagnostic if the list of conversion functions doesn't
+ // match when we come to lazily load them.
+ if (MergeDD.ComputedVisibleConversions && !DD.ComputedVisibleConversions) {
+ DD.VisibleConversions = std::move(MergeDD.VisibleConversions);
+ DD.ComputedVisibleConversions = true;
+ }
+
+ // FIXME: Issue a diagnostic if FirstFriend doesn't match when we come to
+ // lazily load it.
+
+ if (DD.IsLambda) {
+ // FIXME: ODR-checking for merging lambdas (this happens, for instance,
+ // when they occur within the body of a function template specialization).
+ }
+
+ if (DetectedOdrViolation)
+ Reader.PendingOdrMergeFailures[DD.Definition].push_back(MergeDD.Definition);
+}
+
+void ASTDeclReader::ReadCXXRecordDefinition(CXXRecordDecl *D, bool Update) {
+ struct CXXRecordDecl::DefinitionData *DD;
+ ASTContext &C = Reader.getContext();
+
+ // Determine whether this is a lambda closure type, so that we can
+ // allocate the appropriate DefinitionData structure.
+ bool IsLambda = Record[Idx++];
+ if (IsLambda)
+ DD = new (C) CXXRecordDecl::LambdaDefinitionData(D, nullptr, false, false,
+ LCD_None);
+ else
+ DD = new (C) struct CXXRecordDecl::DefinitionData(D);
+
+ ReadCXXDefinitionData(*DD, Record, Idx);
+
+ // We might already have a definition for this record. This can happen either
+ // because we're reading an update record, or because we've already done some
+ // merging. Either way, just merge into it.
+ CXXRecordDecl *Canon = D->getCanonicalDecl();
+ if (Canon->DefinitionData.getNotUpdated()) {
+ MergeDefinitionData(Canon, std::move(*DD));
+ D->DefinitionData = Canon->DefinitionData;
+ return;
+ }
+
+ // Mark this declaration as being a definition.
+ D->IsCompleteDefinition = true;
+ D->DefinitionData = DD;
+
+ // If this is not the first declaration or is an update record, we can have
+ // other redeclarations already. Make a note that we need to propagate the
+ // DefinitionData pointer onto them.
+ if (Update || Canon != D) {
+ Canon->DefinitionData = D->DefinitionData;
+ Reader.PendingDefinitions.insert(D);
+ }
+}
+
+ASTDeclReader::RedeclarableResult
+ASTDeclReader::VisitCXXRecordDeclImpl(CXXRecordDecl *D) {
+ RedeclarableResult Redecl = VisitRecordDeclImpl(D);
+
+ ASTContext &C = Reader.getContext();
+
+ enum CXXRecKind {
+ CXXRecNotTemplate = 0, CXXRecTemplate, CXXRecMemberSpecialization
+ };
+ switch ((CXXRecKind)Record[Idx++]) {
+ case CXXRecNotTemplate:
+ // Merged when we merge the folding set entry in the primary template.
+ if (!isa<ClassTemplateSpecializationDecl>(D))
+ mergeRedeclarable(D, Redecl);
+ break;
+ case CXXRecTemplate: {
+ // Merged when we merge the template.
+ ClassTemplateDecl *Template = ReadDeclAs<ClassTemplateDecl>(Record, Idx);
+ D->TemplateOrInstantiation = Template;
+ if (!Template->getTemplatedDecl()) {
+ // We've not actually loaded the ClassTemplateDecl yet, because we're
+ // currently being loaded as its pattern. Rely on it to set up our
+ // TypeForDecl (see VisitClassTemplateDecl).
+ //
+ // Beware: we do not yet know our canonical declaration, and may still
+ // get merged once the surrounding class template has got off the ground.
+ TypeIDForTypeDecl = 0;
+ }
+ break;
+ }
+ case CXXRecMemberSpecialization: {
+ CXXRecordDecl *RD = ReadDeclAs<CXXRecordDecl>(Record, Idx);
+ TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record[Idx++];
+ SourceLocation POI = ReadSourceLocation(Record, Idx);
+ MemberSpecializationInfo *MSI = new (C) MemberSpecializationInfo(RD, TSK);
+ MSI->setPointOfInstantiation(POI);
+ D->TemplateOrInstantiation = MSI;
+ mergeRedeclarable(D, Redecl);
+ break;
+ }
+ }
+
+ bool WasDefinition = Record[Idx++];
+ if (WasDefinition)
+ ReadCXXRecordDefinition(D, /*Update*/false);
+ else
+ // Propagate DefinitionData pointer from the canonical declaration.
+ D->DefinitionData = D->getCanonicalDecl()->DefinitionData;
+
+ // Lazily load the key function to avoid deserializing every method so we can
+ // compute it.
+ if (WasDefinition) {
+ DeclID KeyFn = ReadDeclID(Record, Idx);
+ if (KeyFn && D->IsCompleteDefinition)
+ // FIXME: This is wrong for the ARM ABI, where some other module may have
+ // made this function no longer be a key function. We need an update
+ // record or similar for that case.
+ C.KeyFunctions[D] = KeyFn;
+ }
+
+ return Redecl;
+}
+
+void ASTDeclReader::VisitCXXMethodDecl(CXXMethodDecl *D) {
+ VisitFunctionDecl(D);
+
+ unsigned NumOverridenMethods = Record[Idx++];
+ if (D->isCanonicalDecl()) {
+ while (NumOverridenMethods--) {
+ // Avoid invariant checking of CXXMethodDecl::addOverriddenMethod,
+ // MD may be initializing.
+ if (CXXMethodDecl *MD = ReadDeclAs<CXXMethodDecl>(Record, Idx))
+ Reader.getContext().addOverriddenMethod(D, MD->getCanonicalDecl());
+ }
+ } else {
+ // We don't care about which declarations this used to override; we get
+ // the relevant information from the canonical declaration.
+ Idx += NumOverridenMethods;
+ }
+}
+
+void ASTDeclReader::VisitCXXConstructorDecl(CXXConstructorDecl *D) {
+ VisitCXXMethodDecl(D);
+
+ if (auto *CD = ReadDeclAs<CXXConstructorDecl>(Record, Idx))
+ if (D->isCanonicalDecl())
+ D->setInheritedConstructor(CD->getCanonicalDecl());
+ D->IsExplicitSpecified = Record[Idx++];
+}
+
+void ASTDeclReader::VisitCXXDestructorDecl(CXXDestructorDecl *D) {
+ VisitCXXMethodDecl(D);
+
+ if (auto *OperatorDelete = ReadDeclAs<FunctionDecl>(Record, Idx)) {
+ auto *Canon = cast<CXXDestructorDecl>(D->getCanonicalDecl());
+ // FIXME: Check consistency if we have an old and new operator delete.
+ if (!Canon->OperatorDelete)
+ Canon->OperatorDelete = OperatorDelete;
+ }
+}
+
+void ASTDeclReader::VisitCXXConversionDecl(CXXConversionDecl *D) {
+ VisitCXXMethodDecl(D);
+ D->IsExplicitSpecified = Record[Idx++];
+}
+
+void ASTDeclReader::VisitImportDecl(ImportDecl *D) {
+ VisitDecl(D);
+ D->ImportedAndComplete.setPointer(readModule(Record, Idx));
+ D->ImportedAndComplete.setInt(Record[Idx++]);
+ SourceLocation *StoredLocs = D->getTrailingObjects<SourceLocation>();
+ for (unsigned I = 0, N = Record.back(); I != N; ++I)
+ StoredLocs[I] = ReadSourceLocation(Record, Idx);
+ ++Idx; // The number of stored source locations.
+}
+
+void ASTDeclReader::VisitAccessSpecDecl(AccessSpecDecl *D) {
+ VisitDecl(D);
+ D->setColonLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTDeclReader::VisitFriendDecl(FriendDecl *D) {
+ VisitDecl(D);
+ if (Record[Idx++]) // hasFriendDecl
+ D->Friend = ReadDeclAs<NamedDecl>(Record, Idx);
+ else
+ D->Friend = GetTypeSourceInfo(Record, Idx);
+ for (unsigned i = 0; i != D->NumTPLists; ++i)
+ D->getTrailingObjects<TemplateParameterList *>()[i] =
+ Reader.ReadTemplateParameterList(F, Record, Idx);
+ D->NextFriend = ReadDeclID(Record, Idx);
+ D->UnsupportedFriend = (Record[Idx++] != 0);
+ D->FriendLoc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTDeclReader::VisitFriendTemplateDecl(FriendTemplateDecl *D) {
+ VisitDecl(D);
+ unsigned NumParams = Record[Idx++];
+ D->NumParams = NumParams;
+ D->Params = new TemplateParameterList*[NumParams];
+ for (unsigned i = 0; i != NumParams; ++i)
+ D->Params[i] = Reader.ReadTemplateParameterList(F, Record, Idx);
+ if (Record[Idx++]) // HasFriendDecl
+ D->Friend = ReadDeclAs<NamedDecl>(Record, Idx);
+ else
+ D->Friend = GetTypeSourceInfo(Record, Idx);
+ D->FriendLoc = ReadSourceLocation(Record, Idx);
+}
+
+DeclID ASTDeclReader::VisitTemplateDecl(TemplateDecl *D) {
+ VisitNamedDecl(D);
+
+ DeclID PatternID = ReadDeclID(Record, Idx);
+ NamedDecl *TemplatedDecl = cast_or_null<NamedDecl>(Reader.GetDecl(PatternID));
+ TemplateParameterList* TemplateParams
+ = Reader.ReadTemplateParameterList(F, Record, Idx);
+ D->init(TemplatedDecl, TemplateParams);
+
+ return PatternID;
+}
+
+ASTDeclReader::RedeclarableResult
+ASTDeclReader::VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D) {
+ RedeclarableResult Redecl = VisitRedeclarable(D);
+
+ // Make sure we've allocated the Common pointer first. We do this before
+ // VisitTemplateDecl so that getCommonPtr() can be used during initialization.
+ RedeclarableTemplateDecl *CanonD = D->getCanonicalDecl();
+ if (!CanonD->Common) {
+ CanonD->Common = CanonD->newCommon(Reader.getContext());
+ Reader.PendingDefinitions.insert(CanonD);
+ }
+ D->Common = CanonD->Common;
+
+ // If this is the first declaration of the template, fill in the information
+ // for the 'common' pointer.
+ if (ThisDeclID == Redecl.getFirstID()) {
+ if (RedeclarableTemplateDecl *RTD
+ = ReadDeclAs<RedeclarableTemplateDecl>(Record, Idx)) {
+ assert(RTD->getKind() == D->getKind() &&
+ "InstantiatedFromMemberTemplate kind mismatch");
+ D->setInstantiatedFromMemberTemplate(RTD);
+ if (Record[Idx++])
+ D->setMemberSpecialization();
+ }
+ }
+
+ DeclID PatternID = VisitTemplateDecl(D);
+ D->IdentifierNamespace = Record[Idx++];
+
+ mergeRedeclarable(D, Redecl, PatternID);
+
+ // If we merged the template with a prior declaration chain, merge the common
+ // pointer.
+ // FIXME: Actually merge here, don't just overwrite.
+ D->Common = D->getCanonicalDecl()->Common;
+
+ return Redecl;
+}
+
+static DeclID *newDeclIDList(ASTContext &Context, DeclID *Old,
+ SmallVectorImpl<DeclID> &IDs) {
+ assert(!IDs.empty() && "no IDs to add to list");
+ if (Old) {
+ IDs.insert(IDs.end(), Old + 1, Old + 1 + Old[0]);
+ std::sort(IDs.begin(), IDs.end());
+ IDs.erase(std::unique(IDs.begin(), IDs.end()), IDs.end());
+ }
+
+ auto *Result = new (Context) DeclID[1 + IDs.size()];
+ *Result = IDs.size();
+ std::copy(IDs.begin(), IDs.end(), Result + 1);
+ return Result;
+}
+
+void ASTDeclReader::VisitClassTemplateDecl(ClassTemplateDecl *D) {
+ RedeclarableResult Redecl = VisitRedeclarableTemplateDecl(D);
+
+ if (ThisDeclID == Redecl.getFirstID()) {
+ // This ClassTemplateDecl owns a CommonPtr; read it to keep track of all of
+ // the specializations.
+ SmallVector<serialization::DeclID, 32> SpecIDs;
+ ReadDeclIDList(SpecIDs);
+
+ if (!SpecIDs.empty()) {
+ auto *CommonPtr = D->getCommonPtr();
+ CommonPtr->LazySpecializations = newDeclIDList(
+ Reader.getContext(), CommonPtr->LazySpecializations, SpecIDs);
+ }
+ }
+
+ if (D->getTemplatedDecl()->TemplateOrInstantiation) {
+ // We were loaded before our templated declaration was. We've not set up
+ // its corresponding type yet (see VisitCXXRecordDeclImpl), so reconstruct
+ // it now.
+ Reader.Context.getInjectedClassNameType(
+ D->getTemplatedDecl(), D->getInjectedClassNameSpecialization());
+ }
+}
+
+void ASTDeclReader::VisitBuiltinTemplateDecl(BuiltinTemplateDecl *D) {
+ llvm_unreachable("BuiltinTemplates are not serialized");
+}
+
+/// TODO: Unify with ClassTemplateDecl version?
+/// May require unifying ClassTemplateDecl and
+/// VarTemplateDecl beyond TemplateDecl...
+void ASTDeclReader::VisitVarTemplateDecl(VarTemplateDecl *D) {
+ RedeclarableResult Redecl = VisitRedeclarableTemplateDecl(D);
+
+ if (ThisDeclID == Redecl.getFirstID()) {
+ // This VarTemplateDecl owns a CommonPtr; read it to keep track of all of
+ // the specializations.
+ SmallVector<serialization::DeclID, 32> SpecIDs;
+ ReadDeclIDList(SpecIDs);
+
+ if (!SpecIDs.empty()) {
+ auto *CommonPtr = D->getCommonPtr();
+ CommonPtr->LazySpecializations = newDeclIDList(
+ Reader.getContext(), CommonPtr->LazySpecializations, SpecIDs);
+ }
+ }
+}
+
+ASTDeclReader::RedeclarableResult
+ASTDeclReader::VisitClassTemplateSpecializationDeclImpl(
+ ClassTemplateSpecializationDecl *D) {
+ RedeclarableResult Redecl = VisitCXXRecordDeclImpl(D);
+
+ ASTContext &C = Reader.getContext();
+ if (Decl *InstD = ReadDecl(Record, Idx)) {
+ if (ClassTemplateDecl *CTD = dyn_cast<ClassTemplateDecl>(InstD)) {
+ D->SpecializedTemplate = CTD;
+ } else {
+ SmallVector<TemplateArgument, 8> TemplArgs;
+ Reader.ReadTemplateArgumentList(TemplArgs, F, Record, Idx);
+ TemplateArgumentList *ArgList
+ = TemplateArgumentList::CreateCopy(C, TemplArgs.data(),
+ TemplArgs.size());
+ ClassTemplateSpecializationDecl::SpecializedPartialSpecialization *PS
+ = new (C) ClassTemplateSpecializationDecl::
+ SpecializedPartialSpecialization();
+ PS->PartialSpecialization
+ = cast<ClassTemplatePartialSpecializationDecl>(InstD);
+ PS->TemplateArgs = ArgList;
+ D->SpecializedTemplate = PS;
+ }
+ }
+
+ SmallVector<TemplateArgument, 8> TemplArgs;
+ Reader.ReadTemplateArgumentList(TemplArgs, F, Record, Idx,
+ /*Canonicalize*/ true);
+ D->TemplateArgs = TemplateArgumentList::CreateCopy(C, TemplArgs.data(),
+ TemplArgs.size());
+ D->PointOfInstantiation = ReadSourceLocation(Record, Idx);
+ D->SpecializationKind = (TemplateSpecializationKind)Record[Idx++];
+
+ bool writtenAsCanonicalDecl = Record[Idx++];
+ if (writtenAsCanonicalDecl) {
+ ClassTemplateDecl *CanonPattern = ReadDeclAs<ClassTemplateDecl>(Record,Idx);
+ if (D->isCanonicalDecl()) { // It's kept in the folding set.
+ // Set this as, or find, the canonical declaration for this specialization
+ ClassTemplateSpecializationDecl *CanonSpec;
+ if (ClassTemplatePartialSpecializationDecl *Partial =
+ dyn_cast<ClassTemplatePartialSpecializationDecl>(D)) {
+ CanonSpec = CanonPattern->getCommonPtr()->PartialSpecializations
+ .GetOrInsertNode(Partial);
+ } else {
+ CanonSpec =
+ CanonPattern->getCommonPtr()->Specializations.GetOrInsertNode(D);
+ }
+ // If there was already a canonical specialization, merge into it.
+ if (CanonSpec != D) {
+ mergeRedeclarable<TagDecl>(D, CanonSpec, Redecl);
+
+ // This declaration might be a definition. Merge with any existing
+ // definition.
+ if (auto *DDD = D->DefinitionData.getNotUpdated()) {
+ if (CanonSpec->DefinitionData.getNotUpdated())
+ MergeDefinitionData(CanonSpec, std::move(*DDD));
+ else
+ CanonSpec->DefinitionData = D->DefinitionData;
+ }
+ D->DefinitionData = CanonSpec->DefinitionData;
+ }
+ }
+ }
+
+ // Explicit info.
+ if (TypeSourceInfo *TyInfo = GetTypeSourceInfo(Record, Idx)) {
+ ClassTemplateSpecializationDecl::ExplicitSpecializationInfo *ExplicitInfo
+ = new (C) ClassTemplateSpecializationDecl::ExplicitSpecializationInfo;
+ ExplicitInfo->TypeAsWritten = TyInfo;
+ ExplicitInfo->ExternLoc = ReadSourceLocation(Record, Idx);
+ ExplicitInfo->TemplateKeywordLoc = ReadSourceLocation(Record, Idx);
+ D->ExplicitInfo = ExplicitInfo;
+ }
+
+ return Redecl;
+}
+
+void ASTDeclReader::VisitClassTemplatePartialSpecializationDecl(
+ ClassTemplatePartialSpecializationDecl *D) {
+ RedeclarableResult Redecl = VisitClassTemplateSpecializationDeclImpl(D);
+
+ D->TemplateParams = Reader.ReadTemplateParameterList(F, Record, Idx);
+ D->ArgsAsWritten = Reader.ReadASTTemplateArgumentListInfo(F, Record, Idx);
+
+ // These are read/set from/to the first declaration.
+ if (ThisDeclID == Redecl.getFirstID()) {
+ D->InstantiatedFromMember.setPointer(
+ ReadDeclAs<ClassTemplatePartialSpecializationDecl>(Record, Idx));
+ D->InstantiatedFromMember.setInt(Record[Idx++]);
+ }
+}
+
+void ASTDeclReader::VisitClassScopeFunctionSpecializationDecl(
+ ClassScopeFunctionSpecializationDecl *D) {
+ VisitDecl(D);
+ D->Specialization = ReadDeclAs<CXXMethodDecl>(Record, Idx);
+}
+
+void ASTDeclReader::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
+ RedeclarableResult Redecl = VisitRedeclarableTemplateDecl(D);
+
+ if (ThisDeclID == Redecl.getFirstID()) {
+ // This FunctionTemplateDecl owns a CommonPtr; read it.
+ SmallVector<serialization::DeclID, 32> SpecIDs;
+ ReadDeclIDList(SpecIDs);
+
+ if (!SpecIDs.empty()) {
+ auto *CommonPtr = D->getCommonPtr();
+ CommonPtr->LazySpecializations = newDeclIDList(
+ Reader.getContext(), CommonPtr->LazySpecializations, SpecIDs);
+ }
+ }
+}
+
+/// TODO: Unify with ClassTemplateSpecializationDecl version?
+/// May require unifying ClassTemplate(Partial)SpecializationDecl and
+/// VarTemplate(Partial)SpecializationDecl with a new data
+/// structure Template(Partial)SpecializationDecl, and
+/// using Template(Partial)SpecializationDecl as input type.
+ASTDeclReader::RedeclarableResult
+ASTDeclReader::VisitVarTemplateSpecializationDeclImpl(
+ VarTemplateSpecializationDecl *D) {
+ RedeclarableResult Redecl = VisitVarDeclImpl(D);
+
+ ASTContext &C = Reader.getContext();
+ if (Decl *InstD = ReadDecl(Record, Idx)) {
+ if (VarTemplateDecl *VTD = dyn_cast<VarTemplateDecl>(InstD)) {
+ D->SpecializedTemplate = VTD;
+ } else {
+ SmallVector<TemplateArgument, 8> TemplArgs;
+ Reader.ReadTemplateArgumentList(TemplArgs, F, Record, Idx);
+ TemplateArgumentList *ArgList = TemplateArgumentList::CreateCopy(
+ C, TemplArgs.data(), TemplArgs.size());
+ VarTemplateSpecializationDecl::SpecializedPartialSpecialization *PS =
+ new (C)
+ VarTemplateSpecializationDecl::SpecializedPartialSpecialization();
+ PS->PartialSpecialization =
+ cast<VarTemplatePartialSpecializationDecl>(InstD);
+ PS->TemplateArgs = ArgList;
+ D->SpecializedTemplate = PS;
+ }
+ }
+
+ // Explicit info.
+ if (TypeSourceInfo *TyInfo = GetTypeSourceInfo(Record, Idx)) {
+ VarTemplateSpecializationDecl::ExplicitSpecializationInfo *ExplicitInfo =
+ new (C) VarTemplateSpecializationDecl::ExplicitSpecializationInfo;
+ ExplicitInfo->TypeAsWritten = TyInfo;
+ ExplicitInfo->ExternLoc = ReadSourceLocation(Record, Idx);
+ ExplicitInfo->TemplateKeywordLoc = ReadSourceLocation(Record, Idx);
+ D->ExplicitInfo = ExplicitInfo;
+ }
+
+ SmallVector<TemplateArgument, 8> TemplArgs;
+ Reader.ReadTemplateArgumentList(TemplArgs, F, Record, Idx,
+ /*Canonicalize*/ true);
+ D->TemplateArgs =
+ TemplateArgumentList::CreateCopy(C, TemplArgs.data(), TemplArgs.size());
+ D->PointOfInstantiation = ReadSourceLocation(Record, Idx);
+ D->SpecializationKind = (TemplateSpecializationKind)Record[Idx++];
+
+ bool writtenAsCanonicalDecl = Record[Idx++];
+ if (writtenAsCanonicalDecl) {
+ VarTemplateDecl *CanonPattern = ReadDeclAs<VarTemplateDecl>(Record, Idx);
+ if (D->isCanonicalDecl()) { // It's kept in the folding set.
+ // FIXME: If it's already present, merge it.
+ if (VarTemplatePartialSpecializationDecl *Partial =
+ dyn_cast<VarTemplatePartialSpecializationDecl>(D)) {
+ CanonPattern->getCommonPtr()->PartialSpecializations
+ .GetOrInsertNode(Partial);
+ } else {
+ CanonPattern->getCommonPtr()->Specializations.GetOrInsertNode(D);
+ }
+ }
+ }
+
+ return Redecl;
+}
+
+/// TODO: Unify with ClassTemplatePartialSpecializationDecl version?
+/// May require unifying ClassTemplate(Partial)SpecializationDecl and
+/// VarTemplate(Partial)SpecializationDecl with a new data
+/// structure Template(Partial)SpecializationDecl, and
+/// using Template(Partial)SpecializationDecl as input type.
+void ASTDeclReader::VisitVarTemplatePartialSpecializationDecl(
+ VarTemplatePartialSpecializationDecl *D) {
+ RedeclarableResult Redecl = VisitVarTemplateSpecializationDeclImpl(D);
+
+ D->TemplateParams = Reader.ReadTemplateParameterList(F, Record, Idx);
+ D->ArgsAsWritten = Reader.ReadASTTemplateArgumentListInfo(F, Record, Idx);
+
+ // These are read/set from/to the first declaration.
+ if (ThisDeclID == Redecl.getFirstID()) {
+ D->InstantiatedFromMember.setPointer(
+ ReadDeclAs<VarTemplatePartialSpecializationDecl>(Record, Idx));
+ D->InstantiatedFromMember.setInt(Record[Idx++]);
+ }
+}
+
+void ASTDeclReader::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
+ VisitTypeDecl(D);
+
+ D->setDeclaredWithTypename(Record[Idx++]);
+
+ if (Record[Idx++])
+ D->setDefaultArgument(GetTypeSourceInfo(Record, Idx));
+}
+
+void ASTDeclReader::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
+ VisitDeclaratorDecl(D);
+ // TemplateParmPosition.
+ D->setDepth(Record[Idx++]);
+ D->setPosition(Record[Idx++]);
+ if (D->isExpandedParameterPack()) {
+ auto TypesAndInfos =
+ D->getTrailingObjects<std::pair<QualType, TypeSourceInfo *>>();
+ for (unsigned I = 0, N = D->getNumExpansionTypes(); I != N; ++I) {
+ new (&TypesAndInfos[I].first) QualType(Reader.readType(F, Record, Idx));
+ TypesAndInfos[I].second = GetTypeSourceInfo(Record, Idx);
+ }
+ } else {
+ // Rest of NonTypeTemplateParmDecl.
+ D->ParameterPack = Record[Idx++];
+ if (Record[Idx++])
+ D->setDefaultArgument(Reader.ReadExpr(F));
+ }
+}
+
+void ASTDeclReader::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
+ VisitTemplateDecl(D);
+ // TemplateParmPosition.
+ D->setDepth(Record[Idx++]);
+ D->setPosition(Record[Idx++]);
+ if (D->isExpandedParameterPack()) {
+ TemplateParameterList **Data =
+ D->getTrailingObjects<TemplateParameterList *>();
+ for (unsigned I = 0, N = D->getNumExpansionTemplateParameters();
+ I != N; ++I)
+ Data[I] = Reader.ReadTemplateParameterList(F, Record, Idx);
+ } else {
+ // Rest of TemplateTemplateParmDecl.
+ D->ParameterPack = Record[Idx++];
+ if (Record[Idx++])
+ D->setDefaultArgument(Reader.getContext(),
+ Reader.ReadTemplateArgumentLoc(F, Record, Idx));
+ }
+}
+
+void ASTDeclReader::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
+ VisitRedeclarableTemplateDecl(D);
+}
+
+void ASTDeclReader::VisitStaticAssertDecl(StaticAssertDecl *D) {
+ VisitDecl(D);
+ D->AssertExprAndFailed.setPointer(Reader.ReadExpr(F));
+ D->AssertExprAndFailed.setInt(Record[Idx++]);
+ D->Message = cast<StringLiteral>(Reader.ReadExpr(F));
+ D->RParenLoc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTDeclReader::VisitEmptyDecl(EmptyDecl *D) {
+ VisitDecl(D);
+}
+
+std::pair<uint64_t, uint64_t>
+ASTDeclReader::VisitDeclContext(DeclContext *DC) {
+ uint64_t LexicalOffset = Record[Idx++];
+ uint64_t VisibleOffset = Record[Idx++];
+ return std::make_pair(LexicalOffset, VisibleOffset);
+}
+
+template <typename T>
+ASTDeclReader::RedeclarableResult
+ASTDeclReader::VisitRedeclarable(Redeclarable<T> *D) {
+ DeclID FirstDeclID = ReadDeclID(Record, Idx);
+ Decl *MergeWith = nullptr;
+
+ bool IsKeyDecl = ThisDeclID == FirstDeclID;
+ bool IsFirstLocalDecl = false;
+
+ uint64_t RedeclOffset = 0;
+
+ // 0 indicates that this declaration was the only declaration of its entity,
+ // and is used for space optimization.
+ if (FirstDeclID == 0) {
+ FirstDeclID = ThisDeclID;
+ IsKeyDecl = true;
+ IsFirstLocalDecl = true;
+ } else if (unsigned N = Record[Idx++]) {
+ // This declaration was the first local declaration, but may have imported
+ // other declarations.
+ IsKeyDecl = N == 1;
+ IsFirstLocalDecl = true;
+
+ // We have some declarations that must be before us in our redeclaration
+ // chain. Read them now, and remember that we ought to merge with one of
+ // them.
+ // FIXME: Provide a known merge target to the second and subsequent such
+ // declaration.
+ for (unsigned I = 0; I != N - 1; ++I)
+ MergeWith = ReadDecl(Record, Idx/*, MergeWith*/);
+
+ RedeclOffset = Record[Idx++];
+ } else {
+ // This declaration was not the first local declaration. Read the first
+ // local declaration now, to trigger the import of other redeclarations.
+ (void)ReadDecl(Record, Idx);
+ }
+
+ T *FirstDecl = cast_or_null<T>(Reader.GetDecl(FirstDeclID));
+ if (FirstDecl != D) {
+ // We delay loading of the redeclaration chain to avoid deeply nested calls.
+ // We temporarily set the first (canonical) declaration as the previous one
+ // which is the one that matters and mark the real previous DeclID to be
+ // loaded & attached later on.
+ D->RedeclLink = Redeclarable<T>::PreviousDeclLink(FirstDecl);
+ D->First = FirstDecl->getCanonicalDecl();
+ }
+
+ T *DAsT = static_cast<T*>(D);
+
+ // Note that we need to load local redeclarations of this decl and build a
+ // decl chain for them. This must happen *after* we perform the preloading
+ // above; this ensures that the redeclaration chain is built in the correct
+ // order.
+ if (IsFirstLocalDecl)
+ Reader.PendingDeclChains.push_back(std::make_pair(DAsT, RedeclOffset));
+
+ return RedeclarableResult(FirstDeclID, MergeWith, IsKeyDecl);
+}
+
+/// \brief Attempts to merge the given declaration (D) with another declaration
+/// of the same entity.
+template<typename T>
+void ASTDeclReader::mergeRedeclarable(Redeclarable<T> *DBase,
+ RedeclarableResult &Redecl,
+ DeclID TemplatePatternID) {
+ T *D = static_cast<T*>(DBase);
+
+ // If modules are not available, there is no reason to perform this merge.
+ if (!Reader.getContext().getLangOpts().Modules)
+ return;
+
+ // If we're not the canonical declaration, we don't need to merge.
+ if (!DBase->isFirstDecl())
+ return;
+
+ if (auto *Existing = Redecl.getKnownMergeTarget())
+ // We already know of an existing declaration we should merge with.
+ mergeRedeclarable(D, cast<T>(Existing), Redecl, TemplatePatternID);
+ else if (FindExistingResult ExistingRes = findExisting(D))
+ if (T *Existing = ExistingRes)
+ mergeRedeclarable(D, Existing, Redecl, TemplatePatternID);
+}
+
+/// \brief "Cast" to type T, asserting if we don't have an implicit conversion.
+/// We use this to put code in a template that will only be valid for certain
+/// instantiations.
+template<typename T> static T assert_cast(T t) { return t; }
+template<typename T> static T assert_cast(...) {
+ llvm_unreachable("bad assert_cast");
+}
+
+/// \brief Merge together the pattern declarations from two template
+/// declarations.
+void ASTDeclReader::mergeTemplatePattern(RedeclarableTemplateDecl *D,
+ RedeclarableTemplateDecl *Existing,
+ DeclID DsID, bool IsKeyDecl) {
+ auto *DPattern = D->getTemplatedDecl();
+ auto *ExistingPattern = Existing->getTemplatedDecl();
+ RedeclarableResult Result(DPattern->getCanonicalDecl()->getGlobalID(),
+ /*MergeWith*/ ExistingPattern, IsKeyDecl);
+
+ if (auto *DClass = dyn_cast<CXXRecordDecl>(DPattern)) {
+ // Merge with any existing definition.
+ // FIXME: This is duplicated in several places. Refactor.
+ auto *ExistingClass =
+ cast<CXXRecordDecl>(ExistingPattern)->getCanonicalDecl();
+ if (auto *DDD = DClass->DefinitionData.getNotUpdated()) {
+ if (ExistingClass->DefinitionData.getNotUpdated()) {
+ MergeDefinitionData(ExistingClass, std::move(*DDD));
+ } else {
+ ExistingClass->DefinitionData = DClass->DefinitionData;
+ // We may have skipped this before because we thought that DClass
+ // was the canonical declaration.
+ Reader.PendingDefinitions.insert(DClass);
+ }
+ }
+ DClass->DefinitionData = ExistingClass->DefinitionData;
+
+ return mergeRedeclarable(DClass, cast<TagDecl>(ExistingPattern),
+ Result);
+ }
+ if (auto *DFunction = dyn_cast<FunctionDecl>(DPattern))
+ return mergeRedeclarable(DFunction, cast<FunctionDecl>(ExistingPattern),
+ Result);
+ if (auto *DVar = dyn_cast<VarDecl>(DPattern))
+ return mergeRedeclarable(DVar, cast<VarDecl>(ExistingPattern), Result);
+ if (auto *DAlias = dyn_cast<TypeAliasDecl>(DPattern))
+ return mergeRedeclarable(DAlias, cast<TypedefNameDecl>(ExistingPattern),
+ Result);
+ llvm_unreachable("merged an unknown kind of redeclarable template");
+}
+
+/// \brief Attempts to merge the given declaration (D) with another declaration
+/// of the same entity.
+template<typename T>
+void ASTDeclReader::mergeRedeclarable(Redeclarable<T> *DBase, T *Existing,
+ RedeclarableResult &Redecl,
+ DeclID TemplatePatternID) {
+ T *D = static_cast<T*>(DBase);
+ T *ExistingCanon = Existing->getCanonicalDecl();
+ T *DCanon = D->getCanonicalDecl();
+ if (ExistingCanon != DCanon) {
+ assert(DCanon->getGlobalID() == Redecl.getFirstID() &&
+ "already merged this declaration");
+
+ // Have our redeclaration link point back at the canonical declaration
+ // of the existing declaration, so that this declaration has the
+ // appropriate canonical declaration.
+ D->RedeclLink = Redeclarable<T>::PreviousDeclLink(ExistingCanon);
+ D->First = ExistingCanon;
+
+ // When we merge a namespace, update its pointer to the first namespace.
+ // We cannot have loaded any redeclarations of this declaration yet, so
+ // there's nothing else that needs to be updated.
+ if (auto *Namespace = dyn_cast<NamespaceDecl>(D))
+ Namespace->AnonOrFirstNamespaceAndInline.setPointer(
+ assert_cast<NamespaceDecl*>(ExistingCanon));
+
+ // When we merge a template, merge its pattern.
+ if (auto *DTemplate = dyn_cast<RedeclarableTemplateDecl>(D))
+ mergeTemplatePattern(
+ DTemplate, assert_cast<RedeclarableTemplateDecl*>(ExistingCanon),
+ TemplatePatternID, Redecl.isKeyDecl());
+
+ // If this declaration is a key declaration, make a note of that.
+ if (Redecl.isKeyDecl())
+ Reader.KeyDecls[ExistingCanon].push_back(Redecl.getFirstID());
+ }
+}
+
+/// \brief Attempts to merge the given declaration (D) with another declaration
+/// of the same entity, for the case where the entity is not actually
+/// redeclarable. This happens, for instance, when merging the fields of
+/// identical class definitions from two different modules.
+template<typename T>
+void ASTDeclReader::mergeMergeable(Mergeable<T> *D) {
+ // If modules are not available, there is no reason to perform this merge.
+ if (!Reader.getContext().getLangOpts().Modules)
+ return;
+
+ // ODR-based merging is only performed in C++. In C, identically-named things
+ // in different translation units are not redeclarations (but may still have
+ // compatible types).
+ if (!Reader.getContext().getLangOpts().CPlusPlus)
+ return;
+
+ if (FindExistingResult ExistingRes = findExisting(static_cast<T*>(D)))
+ if (T *Existing = ExistingRes)
+ Reader.Context.setPrimaryMergedDecl(static_cast<T*>(D),
+ Existing->getCanonicalDecl());
+}
+
+void ASTDeclReader::VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D) {
+ VisitDecl(D);
+ unsigned NumVars = D->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i) {
+ Vars.push_back(Reader.ReadExpr(F));
+ }
+ D->setVars(Vars);
+}
+
+//===----------------------------------------------------------------------===//
+// Attribute Reading
+//===----------------------------------------------------------------------===//
+
+/// \brief Reads attributes from the current stream position.
+void ASTReader::ReadAttributes(ModuleFile &F, AttrVec &Attrs,
+ const RecordData &Record, unsigned &Idx) {
+ for (unsigned i = 0, e = Record[Idx++]; i != e; ++i) {
+ Attr *New = nullptr;
+ attr::Kind Kind = (attr::Kind)Record[Idx++];
+ SourceRange Range = ReadSourceRange(F, Record, Idx);
+
+#include "clang/Serialization/AttrPCHRead.inc"
+
+ assert(New && "Unable to decode attribute?");
+ Attrs.push_back(New);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// ASTReader Implementation
+//===----------------------------------------------------------------------===//
+
+/// \brief Note that we have loaded the declaration with the given
+/// Index.
+///
+/// This routine notes that this declaration has already been loaded,
+/// so that future GetDecl calls will return this declaration rather
+/// than trying to load a new declaration.
+inline void ASTReader::LoadedDecl(unsigned Index, Decl *D) {
+ assert(!DeclsLoaded[Index] && "Decl loaded twice?");
+ DeclsLoaded[Index] = D;
+}
+
+
+/// \brief Determine whether the consumer will be interested in seeing
+/// this declaration (via HandleTopLevelDecl).
+///
+/// This routine should return true for anything that might affect
+/// code generation, e.g., inline function definitions, Objective-C
+/// declarations with metadata, etc.
+static bool isConsumerInterestedIn(Decl *D, bool HasBody) {
+ // An ObjCMethodDecl is never considered as "interesting" because its
+ // implementation container always is.
+
+ if (isa<FileScopeAsmDecl>(D) ||
+ isa<ObjCProtocolDecl>(D) ||
+ isa<ObjCImplDecl>(D) ||
+ isa<ImportDecl>(D) ||
+ isa<OMPThreadPrivateDecl>(D))
+ return true;
+ if (VarDecl *Var = dyn_cast<VarDecl>(D))
+ return Var->isFileVarDecl() &&
+ Var->isThisDeclarationADefinition() == VarDecl::Definition;
+ if (FunctionDecl *Func = dyn_cast<FunctionDecl>(D))
+ return Func->doesThisDeclarationHaveABody() || HasBody;
+
+ return false;
+}
+
+/// \brief Get the correct cursor and offset for loading a declaration.
+ASTReader::RecordLocation
+ASTReader::DeclCursorForID(DeclID ID, unsigned &RawLocation) {
+ // See if there's an override.
+ DeclReplacementMap::iterator It = ReplacedDecls.find(ID);
+ if (It != ReplacedDecls.end()) {
+ RawLocation = It->second.RawLoc;
+ return RecordLocation(It->second.Mod, It->second.Offset);
+ }
+
+ GlobalDeclMapType::iterator I = GlobalDeclMap.find(ID);
+ assert(I != GlobalDeclMap.end() && "Corrupted global declaration map");
+ ModuleFile *M = I->second;
+ const DeclOffset &
+ DOffs = M->DeclOffsets[ID - M->BaseDeclID - NUM_PREDEF_DECL_IDS];
+ RawLocation = DOffs.Loc;
+ return RecordLocation(M, DOffs.BitOffset);
+}
+
+ASTReader::RecordLocation ASTReader::getLocalBitOffset(uint64_t GlobalOffset) {
+ ContinuousRangeMap<uint64_t, ModuleFile*, 4>::iterator I
+ = GlobalBitOffsetsMap.find(GlobalOffset);
+
+ assert(I != GlobalBitOffsetsMap.end() && "Corrupted global bit offsets map");
+ return RecordLocation(I->second, GlobalOffset - I->second->GlobalBitOffset);
+}
+
+uint64_t ASTReader::getGlobalBitOffset(ModuleFile &M, uint32_t LocalOffset) {
+ return LocalOffset + M.GlobalBitOffset;
+}
+
+static bool isSameTemplateParameterList(const TemplateParameterList *X,
+ const TemplateParameterList *Y);
+
+/// \brief Determine whether two template parameters are similar enough
+/// that they may be used in declarations of the same template.
+static bool isSameTemplateParameter(const NamedDecl *X,
+ const NamedDecl *Y) {
+ if (X->getKind() != Y->getKind())
+ return false;
+
+ if (const TemplateTypeParmDecl *TX = dyn_cast<TemplateTypeParmDecl>(X)) {
+ const TemplateTypeParmDecl *TY = cast<TemplateTypeParmDecl>(Y);
+ return TX->isParameterPack() == TY->isParameterPack();
+ }
+
+ if (const NonTypeTemplateParmDecl *TX = dyn_cast<NonTypeTemplateParmDecl>(X)) {
+ const NonTypeTemplateParmDecl *TY = cast<NonTypeTemplateParmDecl>(Y);
+ return TX->isParameterPack() == TY->isParameterPack() &&
+ TX->getASTContext().hasSameType(TX->getType(), TY->getType());
+ }
+
+ const TemplateTemplateParmDecl *TX = cast<TemplateTemplateParmDecl>(X);
+ const TemplateTemplateParmDecl *TY = cast<TemplateTemplateParmDecl>(Y);
+ return TX->isParameterPack() == TY->isParameterPack() &&
+ isSameTemplateParameterList(TX->getTemplateParameters(),
+ TY->getTemplateParameters());
+}
+
+static NamespaceDecl *getNamespace(const NestedNameSpecifier *X) {
+ if (auto *NS = X->getAsNamespace())
+ return NS;
+ if (auto *NAS = X->getAsNamespaceAlias())
+ return NAS->getNamespace();
+ return nullptr;
+}
+
+static bool isSameQualifier(const NestedNameSpecifier *X,
+ const NestedNameSpecifier *Y) {
+ if (auto *NSX = getNamespace(X)) {
+ auto *NSY = getNamespace(Y);
+ if (!NSY || NSX->getCanonicalDecl() != NSY->getCanonicalDecl())
+ return false;
+ } else if (X->getKind() != Y->getKind())
+ return false;
+
+ // FIXME: For namespaces and types, we're permitted to check that the entity
+ // is named via the same tokens. We should probably do so.
+ switch (X->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ if (X->getAsIdentifier() != Y->getAsIdentifier())
+ return false;
+ break;
+ case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::NamespaceAlias:
+ // We've already checked that we named the same namespace.
+ break;
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ if (X->getAsType()->getCanonicalTypeInternal() !=
+ Y->getAsType()->getCanonicalTypeInternal())
+ return false;
+ break;
+ case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Super:
+ return true;
+ }
+
+ // Recurse into earlier portion of NNS, if any.
+ auto *PX = X->getPrefix();
+ auto *PY = Y->getPrefix();
+ if (PX && PY)
+ return isSameQualifier(PX, PY);
+ return !PX && !PY;
+}
+
+/// \brief Determine whether two template parameter lists are similar enough
+/// that they may be used in declarations of the same template.
+static bool isSameTemplateParameterList(const TemplateParameterList *X,
+ const TemplateParameterList *Y) {
+ if (X->size() != Y->size())
+ return false;
+
+ for (unsigned I = 0, N = X->size(); I != N; ++I)
+ if (!isSameTemplateParameter(X->getParam(I), Y->getParam(I)))
+ return false;
+
+ return true;
+}
+
+/// \brief Determine whether the two declarations refer to the same entity.
+static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
+ assert(X->getDeclName() == Y->getDeclName() && "Declaration name mismatch!");
+
+ if (X == Y)
+ return true;
+
+ // Must be in the same context.
+ if (!X->getDeclContext()->getRedeclContext()->Equals(
+ Y->getDeclContext()->getRedeclContext()))
+ return false;
+
+ // Two typedefs refer to the same entity if they have the same underlying
+ // type.
+ if (TypedefNameDecl *TypedefX = dyn_cast<TypedefNameDecl>(X))
+ if (TypedefNameDecl *TypedefY = dyn_cast<TypedefNameDecl>(Y))
+ return X->getASTContext().hasSameType(TypedefX->getUnderlyingType(),
+ TypedefY->getUnderlyingType());
+
+ // Must have the same kind.
+ if (X->getKind() != Y->getKind())
+ return false;
+
+ // Objective-C classes and protocols with the same name always match.
+ if (isa<ObjCInterfaceDecl>(X) || isa<ObjCProtocolDecl>(X))
+ return true;
+
+ if (isa<ClassTemplateSpecializationDecl>(X)) {
+ // No need to handle these here: we merge them when adding them to the
+ // template.
+ return false;
+ }
+
+ // Compatible tags match.
+ if (TagDecl *TagX = dyn_cast<TagDecl>(X)) {
+ TagDecl *TagY = cast<TagDecl>(Y);
+ return (TagX->getTagKind() == TagY->getTagKind()) ||
+ ((TagX->getTagKind() == TTK_Struct || TagX->getTagKind() == TTK_Class ||
+ TagX->getTagKind() == TTK_Interface) &&
+ (TagY->getTagKind() == TTK_Struct || TagY->getTagKind() == TTK_Class ||
+ TagY->getTagKind() == TTK_Interface));
+ }
+
+ // Functions with the same type and linkage match.
+ // FIXME: This needs to cope with merging of prototyped/non-prototyped
+ // functions, etc.
+ if (FunctionDecl *FuncX = dyn_cast<FunctionDecl>(X)) {
+ FunctionDecl *FuncY = cast<FunctionDecl>(Y);
+ return (FuncX->getLinkageInternal() == FuncY->getLinkageInternal()) &&
+ FuncX->getASTContext().hasSameType(FuncX->getType(), FuncY->getType());
+ }
+
+ // Variables with the same type and linkage match.
+ if (VarDecl *VarX = dyn_cast<VarDecl>(X)) {
+ VarDecl *VarY = cast<VarDecl>(Y);
+ return (VarX->getLinkageInternal() == VarY->getLinkageInternal()) &&
+ VarX->getASTContext().hasSameType(VarX->getType(), VarY->getType());
+ }
+
+ // Namespaces with the same name and inlinedness match.
+ if (NamespaceDecl *NamespaceX = dyn_cast<NamespaceDecl>(X)) {
+ NamespaceDecl *NamespaceY = cast<NamespaceDecl>(Y);
+ return NamespaceX->isInline() == NamespaceY->isInline();
+ }
+
+ // Identical template names and kinds match if their template parameter lists
+ // and patterns match.
+ if (TemplateDecl *TemplateX = dyn_cast<TemplateDecl>(X)) {
+ TemplateDecl *TemplateY = cast<TemplateDecl>(Y);
+ return isSameEntity(TemplateX->getTemplatedDecl(),
+ TemplateY->getTemplatedDecl()) &&
+ isSameTemplateParameterList(TemplateX->getTemplateParameters(),
+ TemplateY->getTemplateParameters());
+ }
+
+ // Fields with the same name and the same type match.
+ if (FieldDecl *FDX = dyn_cast<FieldDecl>(X)) {
+ FieldDecl *FDY = cast<FieldDecl>(Y);
+ // FIXME: Also check the bitwidth is odr-equivalent, if any.
+ return X->getASTContext().hasSameType(FDX->getType(), FDY->getType());
+ }
+
+ // Indirect fields with the same target field match.
+ if (auto *IFDX = dyn_cast<IndirectFieldDecl>(X)) {
+ auto *IFDY = cast<IndirectFieldDecl>(Y);
+ return IFDX->getAnonField()->getCanonicalDecl() ==
+ IFDY->getAnonField()->getCanonicalDecl();
+ }
+
+ // Enumerators with the same name match.
+ if (isa<EnumConstantDecl>(X))
+ // FIXME: Also check the value is odr-equivalent.
+ return true;
+
+ // Using shadow declarations with the same target match.
+ if (UsingShadowDecl *USX = dyn_cast<UsingShadowDecl>(X)) {
+ UsingShadowDecl *USY = cast<UsingShadowDecl>(Y);
+ return USX->getTargetDecl() == USY->getTargetDecl();
+ }
+
+ // Using declarations with the same qualifier match. (We already know that
+ // the name matches.)
+ if (auto *UX = dyn_cast<UsingDecl>(X)) {
+ auto *UY = cast<UsingDecl>(Y);
+ return isSameQualifier(UX->getQualifier(), UY->getQualifier()) &&
+ UX->hasTypename() == UY->hasTypename() &&
+ UX->isAccessDeclaration() == UY->isAccessDeclaration();
+ }
+ if (auto *UX = dyn_cast<UnresolvedUsingValueDecl>(X)) {
+ auto *UY = cast<UnresolvedUsingValueDecl>(Y);
+ return isSameQualifier(UX->getQualifier(), UY->getQualifier()) &&
+ UX->isAccessDeclaration() == UY->isAccessDeclaration();
+ }
+ if (auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(X))
+ return isSameQualifier(
+ UX->getQualifier(),
+ cast<UnresolvedUsingTypenameDecl>(Y)->getQualifier());
+
+ // Namespace alias definitions with the same target match.
+ if (auto *NAX = dyn_cast<NamespaceAliasDecl>(X)) {
+ auto *NAY = cast<NamespaceAliasDecl>(Y);
+ return NAX->getNamespace()->Equals(NAY->getNamespace());
+ }
+
+ return false;
+}
+
+/// Find the context in which we should search for previous declarations when
+/// looking for declarations to merge.
+DeclContext *ASTDeclReader::getPrimaryContextForMerging(ASTReader &Reader,
+ DeclContext *DC) {
+ if (NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC))
+ return ND->getOriginalNamespace();
+
+ if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC)) {
+ // Try to dig out the definition.
+ auto *DD = RD->DefinitionData.getNotUpdated();
+ if (!DD)
+ DD = RD->getCanonicalDecl()->DefinitionData.getNotUpdated();
+
+ // If there's no definition yet, then DC's definition is added by an update
+ // record, but we've not yet loaded that update record. In this case, we
+ // commit to DC being the canonical definition now, and will fix this when
+ // we load the update record.
+ if (!DD) {
+ DD = new (Reader.Context) struct CXXRecordDecl::DefinitionData(RD);
+ RD->IsCompleteDefinition = true;
+ RD->DefinitionData = DD;
+ RD->getCanonicalDecl()->DefinitionData = DD;
+
+ // Track that we did this horrible thing so that we can fix it later.
+ Reader.PendingFakeDefinitionData.insert(
+ std::make_pair(DD, ASTReader::PendingFakeDefinitionKind::Fake));
+ }
+
+ return DD->Definition;
+ }
+
+ if (EnumDecl *ED = dyn_cast<EnumDecl>(DC))
+ return ED->getASTContext().getLangOpts().CPlusPlus? ED->getDefinition()
+ : nullptr;
+
+ // We can see the TU here only if we have no Sema object. In that case,
+ // there's no TU scope to look in, so using the DC alone is sufficient.
+ if (auto *TU = dyn_cast<TranslationUnitDecl>(DC))
+ return TU;
+
+ return nullptr;
+}
+
+ASTDeclReader::FindExistingResult::~FindExistingResult() {
+ // Record that we had a typedef name for linkage whether or not we merge
+ // with that declaration.
+ if (TypedefNameForLinkage) {
+ DeclContext *DC = New->getDeclContext()->getRedeclContext();
+ Reader.ImportedTypedefNamesForLinkage.insert(
+ std::make_pair(std::make_pair(DC, TypedefNameForLinkage), New));
+ return;
+ }
+
+ if (!AddResult || Existing)
+ return;
+
+ DeclarationName Name = New->getDeclName();
+ DeclContext *DC = New->getDeclContext()->getRedeclContext();
+ if (needsAnonymousDeclarationNumber(New)) {
+ setAnonymousDeclForMerging(Reader, New->getLexicalDeclContext(),
+ AnonymousDeclNumber, New);
+ } else if (DC->isTranslationUnit() && Reader.SemaObj &&
+ !Reader.getContext().getLangOpts().CPlusPlus) {
+ if (Reader.SemaObj->IdResolver.tryAddTopLevelDecl(New, Name))
+ Reader.PendingFakeLookupResults[Name.getAsIdentifierInfo()]
+ .push_back(New);
+ } else if (DeclContext *MergeDC = getPrimaryContextForMerging(Reader, DC)) {
+ // Add the declaration to its redeclaration context so later merging
+ // lookups will find it.
+ MergeDC->makeDeclVisibleInContextImpl(New, /*Internal*/true);
+ }
+}
+
+/// Find the declaration that should be merged into, given the declaration found
+/// by name lookup. If we're merging an anonymous declaration within a typedef,
+/// we need a matching typedef, and we merge with the type inside it.
+static NamedDecl *getDeclForMerging(NamedDecl *Found,
+ bool IsTypedefNameForLinkage) {
+ if (!IsTypedefNameForLinkage)
+ return Found;
+
+ // If we found a typedef declaration that gives a name to some other
+ // declaration, then we want that inner declaration. Declarations from
+ // AST files are handled via ImportedTypedefNamesForLinkage.
+ if (Found->isFromASTFile())
+ return nullptr;
+
+ if (auto *TND = dyn_cast<TypedefNameDecl>(Found))
+ return TND->getAnonDeclWithTypedefName();
+
+ return nullptr;
+}
+
+NamedDecl *ASTDeclReader::getAnonymousDeclForMerging(ASTReader &Reader,
+ DeclContext *DC,
+ unsigned Index) {
+ // If the lexical context has been merged, look into the now-canonical
+ // definition.
+ if (auto *Merged = Reader.MergedDeclContexts.lookup(DC))
+ DC = Merged;
+
+ // If we've seen this before, return the canonical declaration.
+ auto &Previous = Reader.AnonymousDeclarationsForMerging[DC];
+ if (Index < Previous.size() && Previous[Index])
+ return Previous[Index];
+
+ // If this is the first time, but we have parsed a declaration of the context,
+ // build the anonymous declaration list from the parsed declaration.
+ if (!cast<Decl>(DC)->isFromASTFile()) {
+ numberAnonymousDeclsWithin(DC, [&](NamedDecl *ND, unsigned Number) {
+ if (Previous.size() == Number)
+ Previous.push_back(cast<NamedDecl>(ND->getCanonicalDecl()));
+ else
+ Previous[Number] = cast<NamedDecl>(ND->getCanonicalDecl());
+ });
+ }
+
+ return Index < Previous.size() ? Previous[Index] : nullptr;
+}
+
+void ASTDeclReader::setAnonymousDeclForMerging(ASTReader &Reader,
+ DeclContext *DC, unsigned Index,
+ NamedDecl *D) {
+ if (auto *Merged = Reader.MergedDeclContexts.lookup(DC))
+ DC = Merged;
+
+ auto &Previous = Reader.AnonymousDeclarationsForMerging[DC];
+ if (Index >= Previous.size())
+ Previous.resize(Index + 1);
+ if (!Previous[Index])
+ Previous[Index] = D;
+}
+
+ASTDeclReader::FindExistingResult ASTDeclReader::findExisting(NamedDecl *D) {
+ DeclarationName Name = TypedefNameForLinkage ? TypedefNameForLinkage
+ : D->getDeclName();
+
+ if (!Name && !needsAnonymousDeclarationNumber(D)) {
+ // Don't bother trying to find unnamed declarations that are in
+ // unmergeable contexts.
+ FindExistingResult Result(Reader, D, /*Existing=*/nullptr,
+ AnonymousDeclNumber, TypedefNameForLinkage);
+ Result.suppress();
+ return Result;
+ }
+
+ DeclContext *DC = D->getDeclContext()->getRedeclContext();
+ if (TypedefNameForLinkage) {
+ auto It = Reader.ImportedTypedefNamesForLinkage.find(
+ std::make_pair(DC, TypedefNameForLinkage));
+ if (It != Reader.ImportedTypedefNamesForLinkage.end())
+ if (isSameEntity(It->second, D))
+ return FindExistingResult(Reader, D, It->second, AnonymousDeclNumber,
+ TypedefNameForLinkage);
+ // Go on to check in other places in case an existing typedef name
+ // was not imported.
+ }
+
+ if (needsAnonymousDeclarationNumber(D)) {
+ // This is an anonymous declaration that we may need to merge. Look it up
+ // in its context by number.
+ if (auto *Existing = getAnonymousDeclForMerging(
+ Reader, D->getLexicalDeclContext(), AnonymousDeclNumber))
+ if (isSameEntity(Existing, D))
+ return FindExistingResult(Reader, D, Existing, AnonymousDeclNumber,
+ TypedefNameForLinkage);
+ } else if (DC->isTranslationUnit() && Reader.SemaObj &&
+ !Reader.getContext().getLangOpts().CPlusPlus) {
+ IdentifierResolver &IdResolver = Reader.SemaObj->IdResolver;
+
+ // Temporarily consider the identifier to be up-to-date. We don't want to
+ // cause additional lookups here.
+ class UpToDateIdentifierRAII {
+ IdentifierInfo *II;
+ bool WasOutToDate;
+
+ public:
+ explicit UpToDateIdentifierRAII(IdentifierInfo *II)
+ : II(II), WasOutToDate(false)
+ {
+ if (II) {
+ WasOutToDate = II->isOutOfDate();
+ if (WasOutToDate)
+ II->setOutOfDate(false);
+ }
+ }
+
+ ~UpToDateIdentifierRAII() {
+ if (WasOutToDate)
+ II->setOutOfDate(true);
+ }
+ } UpToDate(Name.getAsIdentifierInfo());
+
+ for (IdentifierResolver::iterator I = IdResolver.begin(Name),
+ IEnd = IdResolver.end();
+ I != IEnd; ++I) {
+ if (NamedDecl *Existing = getDeclForMerging(*I, TypedefNameForLinkage))
+ if (isSameEntity(Existing, D))
+ return FindExistingResult(Reader, D, Existing, AnonymousDeclNumber,
+ TypedefNameForLinkage);
+ }
+ } else if (DeclContext *MergeDC = getPrimaryContextForMerging(Reader, DC)) {
+ DeclContext::lookup_result R = MergeDC->noload_lookup(Name);
+ for (DeclContext::lookup_iterator I = R.begin(), E = R.end(); I != E; ++I) {
+ if (NamedDecl *Existing = getDeclForMerging(*I, TypedefNameForLinkage))
+ if (isSameEntity(Existing, D))
+ return FindExistingResult(Reader, D, Existing, AnonymousDeclNumber,
+ TypedefNameForLinkage);
+ }
+ } else {
+ // Not in a mergeable context.
+ return FindExistingResult(Reader);
+ }
+
+ // If this declaration is from a merged context, make a note that we need to
+ // check that the canonical definition of that context contains the decl.
+ //
+ // FIXME: We should do something similar if we merge two definitions of the
+ // same template specialization into the same CXXRecordDecl.
+ auto MergedDCIt = Reader.MergedDeclContexts.find(D->getLexicalDeclContext());
+ if (MergedDCIt != Reader.MergedDeclContexts.end() &&
+ MergedDCIt->second == D->getDeclContext())
+ Reader.PendingOdrMergeChecks.push_back(D);
+
+ return FindExistingResult(Reader, D, /*Existing=*/nullptr,
+ AnonymousDeclNumber, TypedefNameForLinkage);
+}
+
+template<typename DeclT>
+Decl *ASTDeclReader::getMostRecentDeclImpl(Redeclarable<DeclT> *D) {
+ return D->RedeclLink.getLatestNotUpdated();
+}
+Decl *ASTDeclReader::getMostRecentDeclImpl(...) {
+ llvm_unreachable("getMostRecentDecl on non-redeclarable declaration");
+}
+
+Decl *ASTDeclReader::getMostRecentDecl(Decl *D) {
+ assert(D);
+
+ switch (D->getKind()) {
+#define ABSTRACT_DECL(TYPE)
+#define DECL(TYPE, BASE) \
+ case Decl::TYPE: \
+ return getMostRecentDeclImpl(cast<TYPE##Decl>(D));
+#include "clang/AST/DeclNodes.inc"
+ }
+ llvm_unreachable("unknown decl kind");
+}
+
+Decl *ASTReader::getMostRecentExistingDecl(Decl *D) {
+ return ASTDeclReader::getMostRecentDecl(D->getCanonicalDecl());
+}
+
+template<typename DeclT>
+void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader,
+ Redeclarable<DeclT> *D,
+ Decl *Previous, Decl *Canon) {
+ D->RedeclLink.setPrevious(cast<DeclT>(Previous));
+ D->First = cast<DeclT>(Previous)->First;
+}
+
+namespace clang {
+template<>
+void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader,
+ Redeclarable<FunctionDecl> *D,
+ Decl *Previous, Decl *Canon) {
+ FunctionDecl *FD = static_cast<FunctionDecl*>(D);
+ FunctionDecl *PrevFD = cast<FunctionDecl>(Previous);
+
+ FD->RedeclLink.setPrevious(PrevFD);
+ FD->First = PrevFD->First;
+
+ // If the previous declaration is an inline function declaration, then this
+ // declaration is too.
+ if (PrevFD->IsInline != FD->IsInline) {
+ // FIXME: [dcl.fct.spec]p4:
+ // If a function with external linkage is declared inline in one
+ // translation unit, it shall be declared inline in all translation
+ // units in which it appears.
+ //
+ // Be careful of this case:
+ //
+ // module A:
+ // template<typename T> struct X { void f(); };
+ // template<typename T> inline void X<T>::f() {}
+ //
+ // module B instantiates the declaration of X<int>::f
+ // module C instantiates the definition of X<int>::f
+ //
+ // If module B and C are merged, we do not have a violation of this rule.
+ FD->IsInline = true;
+ }
+
+ // If we need to propagate an exception specification along the redecl
+ // chain, make a note of that so that we can do so later.
+ auto *FPT = FD->getType()->getAs<FunctionProtoType>();
+ auto *PrevFPT = PrevFD->getType()->getAs<FunctionProtoType>();
+ if (FPT && PrevFPT) {
+ bool IsUnresolved = isUnresolvedExceptionSpec(FPT->getExceptionSpecType());
+ bool WasUnresolved =
+ isUnresolvedExceptionSpec(PrevFPT->getExceptionSpecType());
+ if (IsUnresolved != WasUnresolved)
+ Reader.PendingExceptionSpecUpdates.insert(
+ std::make_pair(Canon, IsUnresolved ? PrevFD : FD));
+ }
+}
+} // end namespace clang
+
+void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader, ...) {
+ llvm_unreachable("attachPreviousDecl on non-redeclarable declaration");
+}
+
+/// Inherit the default template argument from \p From to \p To. Returns
+/// \c false if there is no default template for \p From.
+template <typename ParmDecl>
+static bool inheritDefaultTemplateArgument(ASTContext &Context, ParmDecl *From,
+ Decl *ToD) {
+ auto *To = cast<ParmDecl>(ToD);
+ if (!From->hasDefaultArgument())
+ return false;
+ To->setInheritedDefaultArgument(Context, From);
+ return true;
+}
+
+static void inheritDefaultTemplateArguments(ASTContext &Context,
+ TemplateDecl *From,
+ TemplateDecl *To) {
+ auto *FromTP = From->getTemplateParameters();
+ auto *ToTP = To->getTemplateParameters();
+ assert(FromTP->size() == ToTP->size() && "merged mismatched templates?");
+
+ for (unsigned I = 0, N = FromTP->size(); I != N; ++I) {
+ NamedDecl *FromParam = FromTP->getParam(N - I - 1);
+ NamedDecl *ToParam = ToTP->getParam(N - I - 1);
+
+ if (auto *FTTP = dyn_cast<TemplateTypeParmDecl>(FromParam)) {
+ if (!inheritDefaultTemplateArgument(Context, FTTP, ToParam))
+ break;
+ } else if (auto *FNTTP = dyn_cast<NonTypeTemplateParmDecl>(FromParam)) {
+ if (!inheritDefaultTemplateArgument(Context, FNTTP, ToParam))
+ break;
+ } else {
+ if (!inheritDefaultTemplateArgument(
+ Context, cast<TemplateTemplateParmDecl>(FromParam), ToParam))
+ break;
+ }
+ }
+}
+
+void ASTDeclReader::attachPreviousDecl(ASTReader &Reader, Decl *D,
+ Decl *Previous, Decl *Canon) {
+ assert(D && Previous);
+
+ switch (D->getKind()) {
+#define ABSTRACT_DECL(TYPE)
+#define DECL(TYPE, BASE) \
+ case Decl::TYPE: \
+ attachPreviousDeclImpl(Reader, cast<TYPE##Decl>(D), Previous, Canon); \
+ break;
+#include "clang/AST/DeclNodes.inc"
+ }
+
+ // If the declaration was visible in one module, a redeclaration of it in
+ // another module remains visible even if it wouldn't be visible by itself.
+ //
+ // FIXME: In this case, the declaration should only be visible if a module
+ // that makes it visible has been imported.
+ D->IdentifierNamespace |=
+ Previous->IdentifierNamespace &
+ (Decl::IDNS_Ordinary | Decl::IDNS_Tag | Decl::IDNS_Type);
+
+ // If the previous declaration is marked as used, then this declaration should
+ // be too.
+ if (Previous->Used)
+ D->Used = true;
+
+ // If the declaration declares a template, it may inherit default arguments
+ // from the previous declaration.
+ if (TemplateDecl *TD = dyn_cast<TemplateDecl>(D))
+ inheritDefaultTemplateArguments(Reader.getContext(),
+ cast<TemplateDecl>(Previous), TD);
+}
+
+template<typename DeclT>
+void ASTDeclReader::attachLatestDeclImpl(Redeclarable<DeclT> *D, Decl *Latest) {
+ D->RedeclLink.setLatest(cast<DeclT>(Latest));
+}
+void ASTDeclReader::attachLatestDeclImpl(...) {
+ llvm_unreachable("attachLatestDecl on non-redeclarable declaration");
+}
+
+void ASTDeclReader::attachLatestDecl(Decl *D, Decl *Latest) {
+ assert(D && Latest);
+
+ switch (D->getKind()) {
+#define ABSTRACT_DECL(TYPE)
+#define DECL(TYPE, BASE) \
+ case Decl::TYPE: \
+ attachLatestDeclImpl(cast<TYPE##Decl>(D), Latest); \
+ break;
+#include "clang/AST/DeclNodes.inc"
+ }
+}
+
+template<typename DeclT>
+void ASTDeclReader::markIncompleteDeclChainImpl(Redeclarable<DeclT> *D) {
+ D->RedeclLink.markIncomplete();
+}
+void ASTDeclReader::markIncompleteDeclChainImpl(...) {
+ llvm_unreachable("markIncompleteDeclChain on non-redeclarable declaration");
+}
+
+void ASTReader::markIncompleteDeclChain(Decl *D) {
+ switch (D->getKind()) {
+#define ABSTRACT_DECL(TYPE)
+#define DECL(TYPE, BASE) \
+ case Decl::TYPE: \
+ ASTDeclReader::markIncompleteDeclChainImpl(cast<TYPE##Decl>(D)); \
+ break;
+#include "clang/AST/DeclNodes.inc"
+ }
+}
+
+/// \brief Read the declaration at the given offset from the AST file.
+Decl *ASTReader::ReadDeclRecord(DeclID ID) {
+ unsigned Index = ID - NUM_PREDEF_DECL_IDS;
+ unsigned RawLocation = 0;
+ RecordLocation Loc = DeclCursorForID(ID, RawLocation);
+ llvm::BitstreamCursor &DeclsCursor = Loc.F->DeclsCursor;
+ // Keep track of where we are in the stream, then jump back there
+ // after reading this declaration.
+ SavedStreamPosition SavedPosition(DeclsCursor);
+
+ ReadingKindTracker ReadingKind(Read_Decl, *this);
+
+ // Note that we are loading a declaration record.
+ Deserializing ADecl(this);
+
+ DeclsCursor.JumpToBit(Loc.Offset);
+ RecordData Record;
+ unsigned Code = DeclsCursor.ReadCode();
+ unsigned Idx = 0;
+ ASTDeclReader Reader(*this, *Loc.F, ID, RawLocation, Record,Idx);
+
+ Decl *D = nullptr;
+ switch ((DeclCode)DeclsCursor.readRecord(Code, Record)) {
+ case DECL_CONTEXT_LEXICAL:
+ case DECL_CONTEXT_VISIBLE:
+ llvm_unreachable("Record cannot be de-serialized with ReadDeclRecord");
+ case DECL_TYPEDEF:
+ D = TypedefDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_TYPEALIAS:
+ D = TypeAliasDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_ENUM:
+ D = EnumDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_RECORD:
+ D = RecordDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_ENUM_CONSTANT:
+ D = EnumConstantDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_FUNCTION:
+ D = FunctionDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_LINKAGE_SPEC:
+ D = LinkageSpecDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_LABEL:
+ D = LabelDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_NAMESPACE:
+ D = NamespaceDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_NAMESPACE_ALIAS:
+ D = NamespaceAliasDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_USING:
+ D = UsingDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_USING_SHADOW:
+ D = UsingShadowDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_USING_DIRECTIVE:
+ D = UsingDirectiveDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_UNRESOLVED_USING_VALUE:
+ D = UnresolvedUsingValueDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_UNRESOLVED_USING_TYPENAME:
+ D = UnresolvedUsingTypenameDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_CXX_RECORD:
+ D = CXXRecordDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_CXX_METHOD:
+ D = CXXMethodDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_CXX_CONSTRUCTOR:
+ D = CXXConstructorDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_CXX_DESTRUCTOR:
+ D = CXXDestructorDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_CXX_CONVERSION:
+ D = CXXConversionDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_ACCESS_SPEC:
+ D = AccessSpecDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_FRIEND:
+ D = FriendDecl::CreateDeserialized(Context, ID, Record[Idx++]);
+ break;
+ case DECL_FRIEND_TEMPLATE:
+ D = FriendTemplateDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_CLASS_TEMPLATE:
+ D = ClassTemplateDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_CLASS_TEMPLATE_SPECIALIZATION:
+ D = ClassTemplateSpecializationDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_CLASS_TEMPLATE_PARTIAL_SPECIALIZATION:
+ D = ClassTemplatePartialSpecializationDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_VAR_TEMPLATE:
+ D = VarTemplateDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_VAR_TEMPLATE_SPECIALIZATION:
+ D = VarTemplateSpecializationDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_VAR_TEMPLATE_PARTIAL_SPECIALIZATION:
+ D = VarTemplatePartialSpecializationDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_CLASS_SCOPE_FUNCTION_SPECIALIZATION:
+ D = ClassScopeFunctionSpecializationDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_FUNCTION_TEMPLATE:
+ D = FunctionTemplateDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_TEMPLATE_TYPE_PARM:
+ D = TemplateTypeParmDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_NON_TYPE_TEMPLATE_PARM:
+ D = NonTypeTemplateParmDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_EXPANDED_NON_TYPE_TEMPLATE_PARM_PACK:
+ D = NonTypeTemplateParmDecl::CreateDeserialized(Context, ID, Record[Idx++]);
+ break;
+ case DECL_TEMPLATE_TEMPLATE_PARM:
+ D = TemplateTemplateParmDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_EXPANDED_TEMPLATE_TEMPLATE_PARM_PACK:
+ D = TemplateTemplateParmDecl::CreateDeserialized(Context, ID,
+ Record[Idx++]);
+ break;
+ case DECL_TYPE_ALIAS_TEMPLATE:
+ D = TypeAliasTemplateDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_STATIC_ASSERT:
+ D = StaticAssertDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_OBJC_METHOD:
+ D = ObjCMethodDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_OBJC_INTERFACE:
+ D = ObjCInterfaceDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_OBJC_IVAR:
+ D = ObjCIvarDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_OBJC_PROTOCOL:
+ D = ObjCProtocolDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_OBJC_AT_DEFS_FIELD:
+ D = ObjCAtDefsFieldDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_OBJC_CATEGORY:
+ D = ObjCCategoryDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_OBJC_CATEGORY_IMPL:
+ D = ObjCCategoryImplDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_OBJC_IMPLEMENTATION:
+ D = ObjCImplementationDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_OBJC_COMPATIBLE_ALIAS:
+ D = ObjCCompatibleAliasDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_OBJC_PROPERTY:
+ D = ObjCPropertyDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_OBJC_PROPERTY_IMPL:
+ D = ObjCPropertyImplDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_FIELD:
+ D = FieldDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_INDIRECTFIELD:
+ D = IndirectFieldDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_VAR:
+ D = VarDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_IMPLICIT_PARAM:
+ D = ImplicitParamDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_PARM_VAR:
+ D = ParmVarDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_FILE_SCOPE_ASM:
+ D = FileScopeAsmDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_BLOCK:
+ D = BlockDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_MS_PROPERTY:
+ D = MSPropertyDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_CAPTURED:
+ D = CapturedDecl::CreateDeserialized(Context, ID, Record[Idx++]);
+ break;
+ case DECL_CXX_BASE_SPECIFIERS:
+ Error("attempt to read a C++ base-specifier record as a declaration");
+ return nullptr;
+ case DECL_CXX_CTOR_INITIALIZERS:
+ Error("attempt to read a C++ ctor initializer record as a declaration");
+ return nullptr;
+ case DECL_IMPORT:
+ // Note: last entry of the ImportDecl record is the number of stored source
+ // locations.
+ D = ImportDecl::CreateDeserialized(Context, ID, Record.back());
+ break;
+ case DECL_OMP_THREADPRIVATE:
+ D = OMPThreadPrivateDecl::CreateDeserialized(Context, ID, Record[Idx++]);
+ break;
+ case DECL_EMPTY:
+ D = EmptyDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_OBJC_TYPE_PARAM:
+ D = ObjCTypeParamDecl::CreateDeserialized(Context, ID);
+ break;
+ }
+
+ assert(D && "Unknown declaration reading AST file");
+ LoadedDecl(Index, D);
+ // Set the DeclContext before doing any deserialization, to make sure internal
+ // calls to Decl::getASTContext() by Decl's methods will find the
+ // TranslationUnitDecl without crashing.
+ D->setDeclContext(Context.getTranslationUnitDecl());
+ Reader.Visit(D);
+
+ // If this declaration is also a declaration context, get the
+ // offsets for its tables of lexical and visible declarations.
+ if (DeclContext *DC = dyn_cast<DeclContext>(D)) {
+ std::pair<uint64_t, uint64_t> Offsets = Reader.VisitDeclContext(DC);
+ if (Offsets.first &&
+ ReadLexicalDeclContextStorage(*Loc.F, DeclsCursor, Offsets.first, DC))
+ return nullptr;
+ if (Offsets.second &&
+ ReadVisibleDeclContextStorage(*Loc.F, DeclsCursor, Offsets.second, ID))
+ return nullptr;
+ }
+ assert(Idx == Record.size());
+
+ // Load any relevant update records.
+ PendingUpdateRecords.push_back(std::make_pair(ID, D));
+
+ // Load the categories after recursive loading is finished.
+ if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(D))
+ if (Class->isThisDeclarationADefinition())
+ loadObjCCategories(ID, Class);
+
+ // If we have deserialized a declaration that has a definition the
+ // AST consumer might need to know about, queue it.
+ // We don't pass it to the consumer immediately because we may be in recursive
+ // loading, and some declarations may still be initializing.
+ if (isConsumerInterestedIn(D, Reader.hasPendingBody()))
+ InterestingDecls.push_back(D);
+
+ return D;
+}
+
+void ASTReader::loadDeclUpdateRecords(serialization::DeclID ID, Decl *D) {
+ // Load the pending visible updates for this decl context, if it has any.
+ auto I = PendingVisibleUpdates.find(ID);
+ if (I != PendingVisibleUpdates.end()) {
+ auto VisibleUpdates = std::move(I->second);
+ PendingVisibleUpdates.erase(I);
+
+ auto *DC = cast<DeclContext>(D)->getPrimaryContext();
+ for (const PendingVisibleUpdate &Update : VisibleUpdates)
+ Lookups[DC].Table.add(
+ Update.Mod, Update.Data,
+ reader::ASTDeclContextNameLookupTrait(*this, *Update.Mod));
+ DC->setHasExternalVisibleStorage(true);
+ }
+
+ // The declaration may have been modified by files later in the chain.
+ // If this is the case, read the record containing the updates from each file
+ // and pass it to ASTDeclReader to make the modifications.
+ DeclUpdateOffsetsMap::iterator UpdI = DeclUpdateOffsets.find(ID);
+ if (UpdI != DeclUpdateOffsets.end()) {
+ auto UpdateOffsets = std::move(UpdI->second);
+ DeclUpdateOffsets.erase(UpdI);
+
+ bool WasInteresting = isConsumerInterestedIn(D, false);
+ for (auto &FileAndOffset : UpdateOffsets) {
+ ModuleFile *F = FileAndOffset.first;
+ uint64_t Offset = FileAndOffset.second;
+ llvm::BitstreamCursor &Cursor = F->DeclsCursor;
+ SavedStreamPosition SavedPosition(Cursor);
+ Cursor.JumpToBit(Offset);
+ RecordData Record;
+ unsigned Code = Cursor.ReadCode();
+ unsigned RecCode = Cursor.readRecord(Code, Record);
+ (void)RecCode;
+ assert(RecCode == DECL_UPDATES && "Expected DECL_UPDATES record!");
+
+ unsigned Idx = 0;
+ ASTDeclReader Reader(*this, *F, ID, 0, Record, Idx);
+ Reader.UpdateDecl(D, *F, Record);
+
+ // We might have made this declaration interesting. If so, remember that
+ // we need to hand it off to the consumer.
+ if (!WasInteresting &&
+ isConsumerInterestedIn(D, Reader.hasPendingBody())) {
+ InterestingDecls.push_back(D);
+ WasInteresting = true;
+ }
+ }
+ }
+}
+
+void ASTReader::loadPendingDeclChain(Decl *FirstLocal, uint64_t LocalOffset) {
+ // Attach FirstLocal to the end of the decl chain.
+ Decl *CanonDecl = FirstLocal->getCanonicalDecl();
+ if (FirstLocal != CanonDecl) {
+ Decl *PrevMostRecent = ASTDeclReader::getMostRecentDecl(CanonDecl);
+ ASTDeclReader::attachPreviousDecl(
+ *this, FirstLocal, PrevMostRecent ? PrevMostRecent : CanonDecl,
+ CanonDecl);
+ }
+
+ if (!LocalOffset) {
+ ASTDeclReader::attachLatestDecl(CanonDecl, FirstLocal);
+ return;
+ }
+
+ // Load the list of other redeclarations from this module file.
+ ModuleFile *M = getOwningModuleFile(FirstLocal);
+ assert(M && "imported decl from no module file");
+
+ llvm::BitstreamCursor &Cursor = M->DeclsCursor;
+ SavedStreamPosition SavedPosition(Cursor);
+ Cursor.JumpToBit(LocalOffset);
+
+ RecordData Record;
+ unsigned Code = Cursor.ReadCode();
+ unsigned RecCode = Cursor.readRecord(Code, Record);
+ (void)RecCode;
+ assert(RecCode == LOCAL_REDECLARATIONS && "expected LOCAL_REDECLARATIONS record!");
+
+ // FIXME: We have several different dispatches on decl kind here; maybe
+ // we should instead generate one loop per kind and dispatch up-front?
+ Decl *MostRecent = FirstLocal;
+ for (unsigned I = 0, N = Record.size(); I != N; ++I) {
+ auto *D = GetLocalDecl(*M, Record[N - I - 1]);
+ ASTDeclReader::attachPreviousDecl(*this, D, MostRecent, CanonDecl);
+ MostRecent = D;
+ }
+ ASTDeclReader::attachLatestDecl(CanonDecl, MostRecent);
+}
+
+namespace {
+ /// \brief Given an ObjC interface, goes through the modules and links to the
+ /// interface all the categories for it.
+ class ObjCCategoriesVisitor {
+ ASTReader &Reader;
+ serialization::GlobalDeclID InterfaceID;
+ ObjCInterfaceDecl *Interface;
+ llvm::SmallPtrSetImpl<ObjCCategoryDecl *> &Deserialized;
+ unsigned PreviousGeneration;
+ ObjCCategoryDecl *Tail;
+ llvm::DenseMap<DeclarationName, ObjCCategoryDecl *> NameCategoryMap;
+
+ void add(ObjCCategoryDecl *Cat) {
+ // Only process each category once.
+ if (!Deserialized.erase(Cat))
+ return;
+
+ // Check for duplicate categories.
+ if (Cat->getDeclName()) {
+ ObjCCategoryDecl *&Existing = NameCategoryMap[Cat->getDeclName()];
+ if (Existing &&
+ Reader.getOwningModuleFile(Existing)
+ != Reader.getOwningModuleFile(Cat)) {
+ // FIXME: We should not warn for duplicates in diamond:
+ //
+ // MT //
+ // / \ //
+ // ML MR //
+ // \ / //
+ // MB //
+ //
+ // If there are duplicates in ML/MR, there will be warning when
+ // creating MB *and* when importing MB. We should not warn when
+ // importing.
+ Reader.Diag(Cat->getLocation(), diag::warn_dup_category_def)
+ << Interface->getDeclName() << Cat->getDeclName();
+ Reader.Diag(Existing->getLocation(), diag::note_previous_definition);
+ } else if (!Existing) {
+ // Record this category.
+ Existing = Cat;
+ }
+ }
+
+ // Add this category to the end of the chain.
+ if (Tail)
+ ASTDeclReader::setNextObjCCategory(Tail, Cat);
+ else
+ Interface->setCategoryListRaw(Cat);
+ Tail = Cat;
+ }
+
+ public:
+ ObjCCategoriesVisitor(ASTReader &Reader,
+ serialization::GlobalDeclID InterfaceID,
+ ObjCInterfaceDecl *Interface,
+ llvm::SmallPtrSetImpl<ObjCCategoryDecl *> &Deserialized,
+ unsigned PreviousGeneration)
+ : Reader(Reader), InterfaceID(InterfaceID), Interface(Interface),
+ Deserialized(Deserialized), PreviousGeneration(PreviousGeneration),
+ Tail(nullptr)
+ {
+ // Populate the name -> category map with the set of known categories.
+ for (auto *Cat : Interface->known_categories()) {
+ if (Cat->getDeclName())
+ NameCategoryMap[Cat->getDeclName()] = Cat;
+
+ // Keep track of the tail of the category list.
+ Tail = Cat;
+ }
+ }
+
+ bool operator()(ModuleFile &M) {
+ // If we've loaded all of the category information we care about from
+ // this module file, we're done.
+ if (M.Generation <= PreviousGeneration)
+ return true;
+
+ // Map global ID of the definition down to the local ID used in this
+ // module file. If there is no such mapping, we'll find nothing here
+ // (or in any module it imports).
+ DeclID LocalID = Reader.mapGlobalIDToModuleFileGlobalID(M, InterfaceID);
+ if (!LocalID)
+ return true;
+
+ // Perform a binary search to find the local redeclarations for this
+ // declaration (if any).
+ const ObjCCategoriesInfo Compare = { LocalID, 0 };
+ const ObjCCategoriesInfo *Result
+ = std::lower_bound(M.ObjCCategoriesMap,
+ M.ObjCCategoriesMap + M.LocalNumObjCCategoriesInMap,
+ Compare);
+ if (Result == M.ObjCCategoriesMap + M.LocalNumObjCCategoriesInMap ||
+ Result->DefinitionID != LocalID) {
+ // We didn't find anything. If the class definition is in this module
+ // file, then the module files it depends on cannot have any categories,
+ // so suppress further lookup.
+ return Reader.isDeclIDFromModule(InterfaceID, M);
+ }
+
+ // We found something. Dig out all of the categories.
+ unsigned Offset = Result->Offset;
+ unsigned N = M.ObjCCategories[Offset];
+ M.ObjCCategories[Offset++] = 0; // Don't try to deserialize again
+ for (unsigned I = 0; I != N; ++I)
+ add(cast_or_null<ObjCCategoryDecl>(
+ Reader.GetLocalDecl(M, M.ObjCCategories[Offset++])));
+ return true;
+ }
+ };
+} // end anonymous namespace
+
+void ASTReader::loadObjCCategories(serialization::GlobalDeclID ID,
+ ObjCInterfaceDecl *D,
+ unsigned PreviousGeneration) {
+ ObjCCategoriesVisitor Visitor(*this, ID, D, CategoriesDeserialized,
+ PreviousGeneration);
+ ModuleMgr.visit(Visitor);
+}
+
+template<typename DeclT, typename Fn>
+static void forAllLaterRedecls(DeclT *D, Fn F) {
+ F(D);
+
+ // Check whether we've already merged D into its redeclaration chain.
+ // MostRecent may or may not be nullptr if D has not been merged. If
+ // not, walk the merged redecl chain and see if it's there.
+ auto *MostRecent = D->getMostRecentDecl();
+ bool Found = false;
+ for (auto *Redecl = MostRecent; Redecl && !Found;
+ Redecl = Redecl->getPreviousDecl())
+ Found = (Redecl == D);
+
+ // If this declaration is merged, apply the functor to all later decls.
+ if (Found) {
+ for (auto *Redecl = MostRecent; Redecl != D;
+ Redecl = Redecl->getPreviousDecl())
+ F(Redecl);
+ }
+}
+
+void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile,
+ const RecordData &Record) {
+ while (Idx < Record.size()) {
+ switch ((DeclUpdateKind)Record[Idx++]) {
+ case UPD_CXX_ADDED_IMPLICIT_MEMBER: {
+ auto *RD = cast<CXXRecordDecl>(D);
+ // FIXME: If we also have an update record for instantiating the
+ // definition of D, we need that to happen before we get here.
+ Decl *MD = Reader.ReadDecl(ModuleFile, Record, Idx);
+ assert(MD && "couldn't read decl from update record");
+ // FIXME: We should call addHiddenDecl instead, to add the member
+ // to its DeclContext.
+ RD->addedMember(MD);
+ break;
+ }
+
+ case UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION:
+ // It will be added to the template's specializations set when loaded.
+ (void)Reader.ReadDecl(ModuleFile, Record, Idx);
+ break;
+
+ case UPD_CXX_ADDED_ANONYMOUS_NAMESPACE: {
+ NamespaceDecl *Anon
+ = Reader.ReadDeclAs<NamespaceDecl>(ModuleFile, Record, Idx);
+
+ // Each module has its own anonymous namespace, which is disjoint from
+ // any other module's anonymous namespaces, so don't attach the anonymous
+ // namespace at all.
+ if (ModuleFile.Kind != MK_ImplicitModule &&
+ ModuleFile.Kind != MK_ExplicitModule) {
+ if (TranslationUnitDecl *TU = dyn_cast<TranslationUnitDecl>(D))
+ TU->setAnonymousNamespace(Anon);
+ else
+ cast<NamespaceDecl>(D)->setAnonymousNamespace(Anon);
+ }
+ break;
+ }
+
+ case UPD_CXX_INSTANTIATED_STATIC_DATA_MEMBER:
+ cast<VarDecl>(D)->getMemberSpecializationInfo()->setPointOfInstantiation(
+ Reader.ReadSourceLocation(ModuleFile, Record, Idx));
+ break;
+
+ case UPD_CXX_ADDED_FUNCTION_DEFINITION: {
+ FunctionDecl *FD = cast<FunctionDecl>(D);
+ if (Reader.PendingBodies[FD]) {
+ // FIXME: Maybe check for ODR violations.
+ // It's safe to stop now because this update record is always last.
+ return;
+ }
+
+ if (Record[Idx++]) {
+ // Maintain AST consistency: any later redeclarations of this function
+ // are inline if this one is. (We might have merged another declaration
+ // into this one.)
+ forAllLaterRedecls(FD, [](FunctionDecl *FD) {
+ FD->setImplicitlyInline();
+ });
+ }
+ FD->setInnerLocStart(Reader.ReadSourceLocation(ModuleFile, Record, Idx));
+ if (auto *CD = dyn_cast<CXXConstructorDecl>(FD)) {
+ CD->NumCtorInitializers = Record[Idx++];
+ if (CD->NumCtorInitializers)
+ CD->CtorInitializers =
+ Reader.ReadCXXCtorInitializersRef(F, Record, Idx);
+ }
+ // Store the offset of the body so we can lazily load it later.
+ Reader.PendingBodies[FD] = GetCurrentCursorOffset();
+ HasPendingBody = true;
+ assert(Idx == Record.size() && "lazy body must be last");
+ break;
+ }
+
+ case UPD_CXX_INSTANTIATED_CLASS_DEFINITION: {
+ auto *RD = cast<CXXRecordDecl>(D);
+ auto *OldDD = RD->DefinitionData.getNotUpdated();
+ bool HadRealDefinition =
+ OldDD && (OldDD->Definition != RD ||
+ !Reader.PendingFakeDefinitionData.count(OldDD));
+ ReadCXXRecordDefinition(RD, /*Update*/true);
+
+ // Visible update is handled separately.
+ uint64_t LexicalOffset = Record[Idx++];
+ if (!HadRealDefinition && LexicalOffset) {
+ Reader.ReadLexicalDeclContextStorage(ModuleFile, ModuleFile.DeclsCursor,
+ LexicalOffset, RD);
+ Reader.PendingFakeDefinitionData.erase(OldDD);
+ }
+
+ auto TSK = (TemplateSpecializationKind)Record[Idx++];
+ SourceLocation POI = Reader.ReadSourceLocation(ModuleFile, Record, Idx);
+ if (MemberSpecializationInfo *MSInfo =
+ RD->getMemberSpecializationInfo()) {
+ MSInfo->setTemplateSpecializationKind(TSK);
+ MSInfo->setPointOfInstantiation(POI);
+ } else {
+ ClassTemplateSpecializationDecl *Spec =
+ cast<ClassTemplateSpecializationDecl>(RD);
+ Spec->setTemplateSpecializationKind(TSK);
+ Spec->setPointOfInstantiation(POI);
+
+ if (Record[Idx++]) {
+ auto PartialSpec =
+ ReadDeclAs<ClassTemplatePartialSpecializationDecl>(Record, Idx);
+ SmallVector<TemplateArgument, 8> TemplArgs;
+ Reader.ReadTemplateArgumentList(TemplArgs, F, Record, Idx);
+ auto *TemplArgList = TemplateArgumentList::CreateCopy(
+ Reader.getContext(), TemplArgs.data(), TemplArgs.size());
+
+ // FIXME: If we already have a partial specialization set,
+ // check that it matches.
+ if (!Spec->getSpecializedTemplateOrPartial()
+ .is<ClassTemplatePartialSpecializationDecl *>())
+ Spec->setInstantiationOf(PartialSpec, TemplArgList);
+ }
+ }
+
+ RD->setTagKind((TagTypeKind)Record[Idx++]);
+ RD->setLocation(Reader.ReadSourceLocation(ModuleFile, Record, Idx));
+ RD->setLocStart(Reader.ReadSourceLocation(ModuleFile, Record, Idx));
+ RD->setRBraceLoc(Reader.ReadSourceLocation(ModuleFile, Record, Idx));
+
+ if (Record[Idx++]) {
+ AttrVec Attrs;
+ Reader.ReadAttributes(F, Attrs, Record, Idx);
+ D->setAttrsImpl(Attrs, Reader.getContext());
+ }
+ break;
+ }
+
+ case UPD_CXX_RESOLVED_DTOR_DELETE: {
+ // Set the 'operator delete' directly to avoid emitting another update
+ // record.
+ auto *Del = Reader.ReadDeclAs<FunctionDecl>(ModuleFile, Record, Idx);
+ auto *First = cast<CXXDestructorDecl>(D->getCanonicalDecl());
+ // FIXME: Check consistency if we have an old and new operator delete.
+ if (!First->OperatorDelete)
+ First->OperatorDelete = Del;
+ break;
+ }
+
+ case UPD_CXX_RESOLVED_EXCEPTION_SPEC: {
+ FunctionProtoType::ExceptionSpecInfo ESI;
+ SmallVector<QualType, 8> ExceptionStorage;
+ Reader.readExceptionSpec(ModuleFile, ExceptionStorage, ESI, Record, Idx);
+
+ // Update this declaration's exception specification, if needed.
+ auto *FD = cast<FunctionDecl>(D);
+ auto *FPT = FD->getType()->castAs<FunctionProtoType>();
+ // FIXME: If the exception specification is already present, check that it
+ // matches.
+ if (isUnresolvedExceptionSpec(FPT->getExceptionSpecType())) {
+ FD->setType(Reader.Context.getFunctionType(
+ FPT->getReturnType(), FPT->getParamTypes(),
+ FPT->getExtProtoInfo().withExceptionSpec(ESI)));
+
+ // When we get to the end of deserializing, see if there are other decls
+ // that we need to propagate this exception specification onto.
+ Reader.PendingExceptionSpecUpdates.insert(
+ std::make_pair(FD->getCanonicalDecl(), FD));
+ }
+ break;
+ }
+
+ case UPD_CXX_DEDUCED_RETURN_TYPE: {
+ // FIXME: Also do this when merging redecls.
+ QualType DeducedResultType = Reader.readType(ModuleFile, Record, Idx);
+ for (auto *Redecl : merged_redecls(D)) {
+ // FIXME: If the return type is already deduced, check that it matches.
+ FunctionDecl *FD = cast<FunctionDecl>(Redecl);
+ Reader.Context.adjustDeducedFunctionResultType(FD, DeducedResultType);
+ }
+ break;
+ }
+
+ case UPD_DECL_MARKED_USED: {
+ // FIXME: This doesn't send the right notifications if there are
+ // ASTMutationListeners other than an ASTWriter.
+
+ // Maintain AST consistency: any later redeclarations are used too.
+ forAllLaterRedecls(D, [](Decl *D) { D->Used = true; });
+ break;
+ }
+
+ case UPD_MANGLING_NUMBER:
+ Reader.Context.setManglingNumber(cast<NamedDecl>(D), Record[Idx++]);
+ break;
+
+ case UPD_STATIC_LOCAL_NUMBER:
+ Reader.Context.setStaticLocalNumber(cast<VarDecl>(D), Record[Idx++]);
+ break;
+
+ case UPD_DECL_MARKED_OPENMP_THREADPRIVATE:
+ D->addAttr(OMPThreadPrivateDeclAttr::CreateImplicit(
+ Reader.Context, ReadSourceRange(Record, Idx)));
+ break;
+
+ case UPD_DECL_EXPORTED: {
+ unsigned SubmoduleID = readSubmoduleID(Record, Idx);
+ auto *Exported = cast<NamedDecl>(D);
+ if (auto *TD = dyn_cast<TagDecl>(Exported))
+ Exported = TD->getDefinition();
+ Module *Owner = SubmoduleID ? Reader.getSubmodule(SubmoduleID) : nullptr;
+ if (Reader.getContext().getLangOpts().ModulesLocalVisibility) {
+ // FIXME: This doesn't send the right notifications if there are
+ // ASTMutationListeners other than an ASTWriter.
+ Reader.getContext().mergeDefinitionIntoModule(
+ cast<NamedDecl>(Exported), Owner,
+ /*NotifyListeners*/ false);
+ Reader.PendingMergedDefinitionsToDeduplicate.insert(
+ cast<NamedDecl>(Exported));
+ } else if (Owner && Owner->NameVisibility != Module::AllVisible) {
+ // If Owner is made visible at some later point, make this declaration
+ // visible too.
+ Reader.HiddenNamesMap[Owner].push_back(Exported);
+ } else {
+ // The declaration is now visible.
+ Exported->Hidden = false;
+ }
+ break;
+ }
+
+ case UPD_ADDED_ATTR_TO_RECORD:
+ AttrVec Attrs;
+ Reader.ReadAttributes(F, Attrs, Record, Idx);
+ assert(Attrs.size() == 1);
+ D->addAttr(Attrs[0]);
+ break;
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderInternals.h b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderInternals.h
new file mode 100644
index 0000000..d392364
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderInternals.h
@@ -0,0 +1,296 @@
+//===--- ASTReaderInternals.h - AST Reader Internals ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides internal definitions used in the AST reader.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_LIB_SERIALIZATION_ASTREADERINTERNALS_H
+#define LLVM_CLANG_LIB_SERIALIZATION_ASTREADERINTERNALS_H
+
+#include "clang/AST/DeclarationName.h"
+#include "clang/Serialization/ASTBitCodes.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/TinyPtrVector.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/OnDiskHashTable.h"
+#include "MultiOnDiskHashTable.h"
+#include <utility>
+
+namespace clang {
+
+class ASTReader;
+class HeaderSearch;
+struct HeaderFileInfo;
+class FileEntry;
+
+namespace serialization {
+
+class ModuleFile;
+
+namespace reader {
+
+/// \brief Class that performs name lookup into a DeclContext stored
+/// in an AST file.
+class ASTDeclContextNameLookupTrait {
+ ASTReader &Reader;
+ ModuleFile &F;
+
+public:
+ // Maximum number of lookup tables we allow before condensing the tables.
+ static const int MaxTables = 4;
+
+ /// The lookup result is a list of global declaration IDs.
+ typedef llvm::SmallVector<DeclID, 4> data_type;
+ struct data_type_builder {
+ data_type &Data;
+ llvm::DenseSet<DeclID> Found;
+
+ data_type_builder(data_type &D) : Data(D) {}
+ void insert(DeclID ID) {
+ // Just use a linear scan unless we have more than a few IDs.
+ if (Found.empty() && !Data.empty()) {
+ if (Data.size() <= 4) {
+ for (auto I : Found)
+ if (I == ID)
+ return;
+ Data.push_back(ID);
+ return;
+ }
+
+ // Switch to tracking found IDs in the set.
+ Found.insert(Data.begin(), Data.end());
+ }
+
+ if (Found.insert(ID).second)
+ Data.push_back(ID);
+ }
+ };
+ typedef unsigned hash_value_type;
+ typedef unsigned offset_type;
+ typedef ModuleFile *file_type;
+
+ typedef DeclarationName external_key_type;
+ typedef DeclarationNameKey internal_key_type;
+
+ explicit ASTDeclContextNameLookupTrait(ASTReader &Reader, ModuleFile &F)
+ : Reader(Reader), F(F) { }
+
+ static bool EqualKey(const internal_key_type &a, const internal_key_type &b) {
+ return a == b;
+ }
+
+ static hash_value_type ComputeHash(const internal_key_type &Key) {
+ return Key.getHash();
+ }
+ static internal_key_type GetInternalKey(const external_key_type &Name) {
+ return Name;
+ }
+
+ static std::pair<unsigned, unsigned>
+ ReadKeyDataLength(const unsigned char *&d);
+
+ internal_key_type ReadKey(const unsigned char *d, unsigned);
+
+ void ReadDataInto(internal_key_type, const unsigned char *d,
+ unsigned DataLen, data_type_builder &Val);
+
+ static void MergeDataInto(const data_type &From, data_type_builder &To) {
+ To.Data.reserve(To.Data.size() + From.size());
+ for (DeclID ID : From)
+ To.insert(ID);
+ }
+
+ file_type ReadFileRef(const unsigned char *&d);
+};
+
+struct DeclContextLookupTable {
+ MultiOnDiskHashTable<ASTDeclContextNameLookupTrait> Table;
+
+ // These look redundant, but don't remove them -- they work around MSVC 2013's
+ // inability to synthesize move operations. Without them, the
+ // MultiOnDiskHashTable will be copied (despite being move-only!).
+ DeclContextLookupTable() : Table() {}
+ DeclContextLookupTable(DeclContextLookupTable &&O)
+ : Table(std::move(O.Table)) {}
+ DeclContextLookupTable &operator=(DeclContextLookupTable &&O) {
+ Table = std::move(O.Table);
+ return *this;
+ }
+};
+
+/// \brief Base class for the trait describing the on-disk hash table for the
+/// identifiers in an AST file.
+///
+/// This class is not useful by itself; rather, it provides common
+/// functionality for accessing the on-disk hash table of identifiers
+/// in an AST file. Different subclasses customize that functionality
+/// based on what information they are interested in. Those subclasses
+/// must provide the \c data_type typedef and the ReadData operation,
+/// only.
+class ASTIdentifierLookupTraitBase {
+public:
+ typedef StringRef external_key_type;
+ typedef StringRef internal_key_type;
+ typedef unsigned hash_value_type;
+ typedef unsigned offset_type;
+
+ static bool EqualKey(const internal_key_type& a, const internal_key_type& b) {
+ return a == b;
+ }
+
+ static hash_value_type ComputeHash(const internal_key_type& a);
+
+ static std::pair<unsigned, unsigned>
+ ReadKeyDataLength(const unsigned char*& d);
+
+ // This hopefully will just get inlined and removed by the optimizer.
+ static const internal_key_type&
+ GetInternalKey(const external_key_type& x) { return x; }
+
+ // This hopefully will just get inlined and removed by the optimizer.
+ static const external_key_type&
+ GetExternalKey(const internal_key_type& x) { return x; }
+
+ static internal_key_type ReadKey(const unsigned char* d, unsigned n);
+};
+
+/// \brief Class that performs lookup for an identifier stored in an AST file.
+class ASTIdentifierLookupTrait : public ASTIdentifierLookupTraitBase {
+ ASTReader &Reader;
+ ModuleFile &F;
+
+ // If we know the IdentifierInfo in advance, it is here and we will
+ // not build a new one. Used when deserializing information about an
+ // identifier that was constructed before the AST file was read.
+ IdentifierInfo *KnownII;
+
+public:
+ typedef IdentifierInfo * data_type;
+
+ ASTIdentifierLookupTrait(ASTReader &Reader, ModuleFile &F,
+ IdentifierInfo *II = nullptr)
+ : Reader(Reader), F(F), KnownII(II) { }
+
+ data_type ReadData(const internal_key_type& k,
+ const unsigned char* d,
+ unsigned DataLen);
+
+ IdentID ReadIdentifierID(const unsigned char *d);
+
+ ASTReader &getReader() const { return Reader; }
+};
+
+/// \brief The on-disk hash table used to contain information about
+/// all of the identifiers in the program.
+typedef llvm::OnDiskIterableChainedHashTable<ASTIdentifierLookupTrait>
+ ASTIdentifierLookupTable;
+
+/// \brief Class that performs lookup for a selector's entries in the global
+/// method pool stored in an AST file.
+class ASTSelectorLookupTrait {
+ ASTReader &Reader;
+ ModuleFile &F;
+
+public:
+ struct data_type {
+ SelectorID ID;
+ unsigned InstanceBits;
+ unsigned FactoryBits;
+ bool InstanceHasMoreThanOneDecl;
+ bool FactoryHasMoreThanOneDecl;
+ SmallVector<ObjCMethodDecl *, 2> Instance;
+ SmallVector<ObjCMethodDecl *, 2> Factory;
+ };
+
+ typedef Selector external_key_type;
+ typedef external_key_type internal_key_type;
+ typedef unsigned hash_value_type;
+ typedef unsigned offset_type;
+
+ ASTSelectorLookupTrait(ASTReader &Reader, ModuleFile &F)
+ : Reader(Reader), F(F) { }
+
+ static bool EqualKey(const internal_key_type& a,
+ const internal_key_type& b) {
+ return a == b;
+ }
+
+ static hash_value_type ComputeHash(Selector Sel);
+
+ static const internal_key_type&
+ GetInternalKey(const external_key_type& x) { return x; }
+
+ static std::pair<unsigned, unsigned>
+ ReadKeyDataLength(const unsigned char*& d);
+
+ internal_key_type ReadKey(const unsigned char* d, unsigned);
+ data_type ReadData(Selector, const unsigned char* d, unsigned DataLen);
+};
+
+/// \brief The on-disk hash table used for the global method pool.
+typedef llvm::OnDiskChainedHashTable<ASTSelectorLookupTrait>
+ ASTSelectorLookupTable;
+
+/// \brief Trait class used to search the on-disk hash table containing all of
+/// the header search information.
+///
+/// The on-disk hash table contains a mapping from each header path to
+/// information about that header (how many times it has been included, its
+/// controlling macro, etc.). Note that we actually hash based on the size
+/// and mtime, and support "deep" comparisons of file names based on current
+/// inode numbers, so that the search can cope with non-normalized path names
+/// and symlinks.
+class HeaderFileInfoTrait {
+ ASTReader &Reader;
+ ModuleFile &M;
+ HeaderSearch *HS;
+ const char *FrameworkStrings;
+
+public:
+ typedef const FileEntry *external_key_type;
+
+ struct internal_key_type {
+ off_t Size;
+ time_t ModTime;
+ const char *Filename;
+ bool Imported;
+ };
+ typedef const internal_key_type &internal_key_ref;
+
+ typedef HeaderFileInfo data_type;
+ typedef unsigned hash_value_type;
+ typedef unsigned offset_type;
+
+ HeaderFileInfoTrait(ASTReader &Reader, ModuleFile &M, HeaderSearch *HS,
+ const char *FrameworkStrings)
+ : Reader(Reader), M(M), HS(HS), FrameworkStrings(FrameworkStrings) { }
+
+ static hash_value_type ComputeHash(internal_key_ref ikey);
+ internal_key_type GetInternalKey(const FileEntry *FE);
+ bool EqualKey(internal_key_ref a, internal_key_ref b);
+
+ static std::pair<unsigned, unsigned>
+ ReadKeyDataLength(const unsigned char*& d);
+
+ static internal_key_type ReadKey(const unsigned char *d, unsigned);
+
+ data_type ReadData(internal_key_ref,const unsigned char *d, unsigned DataLen);
+};
+
+/// \brief The on-disk hash table used for known header files.
+typedef llvm::OnDiskChainedHashTable<HeaderFileInfoTrait>
+ HeaderFileInfoLookupTable;
+
+} // end namespace clang::serialization::reader
+} // end namespace clang::serialization
+} // end namespace clang
+
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp
new file mode 100644
index 0000000..bc678af
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp
@@ -0,0 +1,3378 @@
+//===--- ASTReaderStmt.cpp - Stmt/Expr Deserialization ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Statement/expression deserialization. This implements the
+// ASTReader::ReadStmt method.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Serialization/ASTReader.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Lex/Token.h"
+#include "llvm/ADT/SmallString.h"
+using namespace clang;
+using namespace clang::serialization;
+
+namespace clang {
+
+ class ASTStmtReader : public StmtVisitor<ASTStmtReader> {
+ friend class OMPClauseReader;
+ typedef ASTReader::RecordData RecordData;
+
+ ASTReader &Reader;
+ ModuleFile &F;
+ llvm::BitstreamCursor &DeclsCursor;
+ const ASTReader::RecordData &Record;
+ unsigned &Idx;
+
+ Token ReadToken(const RecordData &R, unsigned &I) {
+ return Reader.ReadToken(F, R, I);
+ }
+
+ SourceLocation ReadSourceLocation(const RecordData &R, unsigned &I) {
+ return Reader.ReadSourceLocation(F, R, I);
+ }
+
+ SourceRange ReadSourceRange(const RecordData &R, unsigned &I) {
+ return Reader.ReadSourceRange(F, R, I);
+ }
+
+ std::string ReadString(const RecordData &R, unsigned &I) {
+ return Reader.ReadString(R, I);
+ }
+
+ TypeSourceInfo *GetTypeSourceInfo(const RecordData &R, unsigned &I) {
+ return Reader.GetTypeSourceInfo(F, R, I);
+ }
+
+ serialization::DeclID ReadDeclID(const RecordData &R, unsigned &I) {
+ return Reader.ReadDeclID(F, R, I);
+ }
+
+ Decl *ReadDecl(const RecordData &R, unsigned &I) {
+ return Reader.ReadDecl(F, R, I);
+ }
+
+ template<typename T>
+ T *ReadDeclAs(const RecordData &R, unsigned &I) {
+ return Reader.ReadDeclAs<T>(F, R, I);
+ }
+
+ void ReadDeclarationNameLoc(DeclarationNameLoc &DNLoc, DeclarationName Name,
+ const ASTReader::RecordData &R, unsigned &I) {
+ Reader.ReadDeclarationNameLoc(F, DNLoc, Name, R, I);
+ }
+
+ void ReadDeclarationNameInfo(DeclarationNameInfo &NameInfo,
+ const ASTReader::RecordData &R, unsigned &I) {
+ Reader.ReadDeclarationNameInfo(F, NameInfo, R, I);
+ }
+
+ public:
+ ASTStmtReader(ASTReader &Reader, ModuleFile &F,
+ llvm::BitstreamCursor &Cursor,
+ const ASTReader::RecordData &Record, unsigned &Idx)
+ : Reader(Reader), F(F), DeclsCursor(Cursor), Record(Record), Idx(Idx) { }
+
+ /// \brief The number of record fields required for the Stmt class
+ /// itself.
+ static const unsigned NumStmtFields = 0;
+
+ /// \brief The number of record fields required for the Expr class
+ /// itself.
+ static const unsigned NumExprFields = NumStmtFields + 7;
+
+ /// \brief Read and initialize a ExplicitTemplateArgumentList structure.
+ void ReadTemplateKWAndArgsInfo(ASTTemplateKWAndArgsInfo &Args,
+ TemplateArgumentLoc *ArgsLocArray,
+ unsigned NumTemplateArgs);
+ /// \brief Read and initialize a ExplicitTemplateArgumentList structure.
+ void ReadExplicitTemplateArgumentList(ASTTemplateArgumentListInfo &ArgList,
+ unsigned NumTemplateArgs);
+
+ void VisitStmt(Stmt *S);
+#define STMT(Type, Base) \
+ void Visit##Type(Type *);
+#include "clang/AST/StmtNodes.inc"
+ };
+}
+
+void ASTStmtReader::ReadTemplateKWAndArgsInfo(ASTTemplateKWAndArgsInfo &Args,
+ TemplateArgumentLoc *ArgsLocArray,
+ unsigned NumTemplateArgs) {
+ SourceLocation TemplateKWLoc = ReadSourceLocation(Record, Idx);
+ TemplateArgumentListInfo ArgInfo;
+ ArgInfo.setLAngleLoc(ReadSourceLocation(Record, Idx));
+ ArgInfo.setRAngleLoc(ReadSourceLocation(Record, Idx));
+ for (unsigned i = 0; i != NumTemplateArgs; ++i)
+ ArgInfo.addArgument(
+ Reader.ReadTemplateArgumentLoc(F, Record, Idx));
+ Args.initializeFrom(TemplateKWLoc, ArgInfo, ArgsLocArray);
+}
+
+void ASTStmtReader::VisitStmt(Stmt *S) {
+ assert(Idx == NumStmtFields && "Incorrect statement field count");
+}
+
+void ASTStmtReader::VisitNullStmt(NullStmt *S) {
+ VisitStmt(S);
+ S->setSemiLoc(ReadSourceLocation(Record, Idx));
+ S->HasLeadingEmptyMacro = Record[Idx++];
+}
+
+void ASTStmtReader::VisitCompoundStmt(CompoundStmt *S) {
+ VisitStmt(S);
+ SmallVector<Stmt *, 16> Stmts;
+ unsigned NumStmts = Record[Idx++];
+ while (NumStmts--)
+ Stmts.push_back(Reader.ReadSubStmt());
+ S->setStmts(Reader.getContext(), Stmts);
+ S->LBraceLoc = ReadSourceLocation(Record, Idx);
+ S->RBraceLoc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTStmtReader::VisitSwitchCase(SwitchCase *S) {
+ VisitStmt(S);
+ Reader.RecordSwitchCaseID(S, Record[Idx++]);
+ S->setKeywordLoc(ReadSourceLocation(Record, Idx));
+ S->setColonLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitCaseStmt(CaseStmt *S) {
+ VisitSwitchCase(S);
+ S->setLHS(Reader.ReadSubExpr());
+ S->setRHS(Reader.ReadSubExpr());
+ S->setSubStmt(Reader.ReadSubStmt());
+ S->setEllipsisLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitDefaultStmt(DefaultStmt *S) {
+ VisitSwitchCase(S);
+ S->setSubStmt(Reader.ReadSubStmt());
+}
+
+void ASTStmtReader::VisitLabelStmt(LabelStmt *S) {
+ VisitStmt(S);
+ LabelDecl *LD = ReadDeclAs<LabelDecl>(Record, Idx);
+ LD->setStmt(S);
+ S->setDecl(LD);
+ S->setSubStmt(Reader.ReadSubStmt());
+ S->setIdentLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitAttributedStmt(AttributedStmt *S) {
+ VisitStmt(S);
+ uint64_t NumAttrs = Record[Idx++];
+ AttrVec Attrs;
+ Reader.ReadAttributes(F, Attrs, Record, Idx);
+ (void)NumAttrs;
+ assert(NumAttrs == S->NumAttrs);
+ assert(NumAttrs == Attrs.size());
+ std::copy(Attrs.begin(), Attrs.end(), S->getAttrArrayPtr());
+ S->SubStmt = Reader.ReadSubStmt();
+ S->AttrLoc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTStmtReader::VisitIfStmt(IfStmt *S) {
+ VisitStmt(S);
+ S->setConditionVariable(Reader.getContext(),
+ ReadDeclAs<VarDecl>(Record, Idx));
+ S->setCond(Reader.ReadSubExpr());
+ S->setThen(Reader.ReadSubStmt());
+ S->setElse(Reader.ReadSubStmt());
+ S->setIfLoc(ReadSourceLocation(Record, Idx));
+ S->setElseLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitSwitchStmt(SwitchStmt *S) {
+ VisitStmt(S);
+ S->setConditionVariable(Reader.getContext(),
+ ReadDeclAs<VarDecl>(Record, Idx));
+ S->setCond(Reader.ReadSubExpr());
+ S->setBody(Reader.ReadSubStmt());
+ S->setSwitchLoc(ReadSourceLocation(Record, Idx));
+ if (Record[Idx++])
+ S->setAllEnumCasesCovered();
+
+ SwitchCase *PrevSC = nullptr;
+ for (unsigned N = Record.size(); Idx != N; ++Idx) {
+ SwitchCase *SC = Reader.getSwitchCaseWithID(Record[Idx]);
+ if (PrevSC)
+ PrevSC->setNextSwitchCase(SC);
+ else
+ S->setSwitchCaseList(SC);
+
+ PrevSC = SC;
+ }
+}
+
+void ASTStmtReader::VisitWhileStmt(WhileStmt *S) {
+ VisitStmt(S);
+ S->setConditionVariable(Reader.getContext(),
+ ReadDeclAs<VarDecl>(Record, Idx));
+
+ S->setCond(Reader.ReadSubExpr());
+ S->setBody(Reader.ReadSubStmt());
+ S->setWhileLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitDoStmt(DoStmt *S) {
+ VisitStmt(S);
+ S->setCond(Reader.ReadSubExpr());
+ S->setBody(Reader.ReadSubStmt());
+ S->setDoLoc(ReadSourceLocation(Record, Idx));
+ S->setWhileLoc(ReadSourceLocation(Record, Idx));
+ S->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitForStmt(ForStmt *S) {
+ VisitStmt(S);
+ S->setInit(Reader.ReadSubStmt());
+ S->setCond(Reader.ReadSubExpr());
+ S->setConditionVariable(Reader.getContext(),
+ ReadDeclAs<VarDecl>(Record, Idx));
+ S->setInc(Reader.ReadSubExpr());
+ S->setBody(Reader.ReadSubStmt());
+ S->setForLoc(ReadSourceLocation(Record, Idx));
+ S->setLParenLoc(ReadSourceLocation(Record, Idx));
+ S->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitGotoStmt(GotoStmt *S) {
+ VisitStmt(S);
+ S->setLabel(ReadDeclAs<LabelDecl>(Record, Idx));
+ S->setGotoLoc(ReadSourceLocation(Record, Idx));
+ S->setLabelLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitIndirectGotoStmt(IndirectGotoStmt *S) {
+ VisitStmt(S);
+ S->setGotoLoc(ReadSourceLocation(Record, Idx));
+ S->setStarLoc(ReadSourceLocation(Record, Idx));
+ S->setTarget(Reader.ReadSubExpr());
+}
+
+void ASTStmtReader::VisitContinueStmt(ContinueStmt *S) {
+ VisitStmt(S);
+ S->setContinueLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitBreakStmt(BreakStmt *S) {
+ VisitStmt(S);
+ S->setBreakLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitReturnStmt(ReturnStmt *S) {
+ VisitStmt(S);
+ S->setRetValue(Reader.ReadSubExpr());
+ S->setReturnLoc(ReadSourceLocation(Record, Idx));
+ S->setNRVOCandidate(ReadDeclAs<VarDecl>(Record, Idx));
+}
+
+void ASTStmtReader::VisitDeclStmt(DeclStmt *S) {
+ VisitStmt(S);
+ S->setStartLoc(ReadSourceLocation(Record, Idx));
+ S->setEndLoc(ReadSourceLocation(Record, Idx));
+
+ if (Idx + 1 == Record.size()) {
+ // Single declaration
+ S->setDeclGroup(DeclGroupRef(ReadDecl(Record, Idx)));
+ } else {
+ SmallVector<Decl *, 16> Decls;
+ Decls.reserve(Record.size() - Idx);
+ for (unsigned N = Record.size(); Idx != N; )
+ Decls.push_back(ReadDecl(Record, Idx));
+ S->setDeclGroup(DeclGroupRef(DeclGroup::Create(Reader.getContext(),
+ Decls.data(),
+ Decls.size())));
+ }
+}
+
+void ASTStmtReader::VisitAsmStmt(AsmStmt *S) {
+ VisitStmt(S);
+ S->NumOutputs = Record[Idx++];
+ S->NumInputs = Record[Idx++];
+ S->NumClobbers = Record[Idx++];
+ S->setAsmLoc(ReadSourceLocation(Record, Idx));
+ S->setVolatile(Record[Idx++]);
+ S->setSimple(Record[Idx++]);
+}
+
+void ASTStmtReader::VisitGCCAsmStmt(GCCAsmStmt *S) {
+ VisitAsmStmt(S);
+ S->setRParenLoc(ReadSourceLocation(Record, Idx));
+ S->setAsmString(cast_or_null<StringLiteral>(Reader.ReadSubStmt()));
+
+ unsigned NumOutputs = S->getNumOutputs();
+ unsigned NumInputs = S->getNumInputs();
+ unsigned NumClobbers = S->getNumClobbers();
+
+ // Outputs and inputs
+ SmallVector<IdentifierInfo *, 16> Names;
+ SmallVector<StringLiteral*, 16> Constraints;
+ SmallVector<Stmt*, 16> Exprs;
+ for (unsigned I = 0, N = NumOutputs + NumInputs; I != N; ++I) {
+ Names.push_back(Reader.GetIdentifierInfo(F, Record, Idx));
+ Constraints.push_back(cast_or_null<StringLiteral>(Reader.ReadSubStmt()));
+ Exprs.push_back(Reader.ReadSubStmt());
+ }
+
+ // Constraints
+ SmallVector<StringLiteral*, 16> Clobbers;
+ for (unsigned I = 0; I != NumClobbers; ++I)
+ Clobbers.push_back(cast_or_null<StringLiteral>(Reader.ReadSubStmt()));
+
+ S->setOutputsAndInputsAndClobbers(Reader.getContext(),
+ Names.data(), Constraints.data(),
+ Exprs.data(), NumOutputs, NumInputs,
+ Clobbers.data(), NumClobbers);
+}
+
+void ASTStmtReader::VisitMSAsmStmt(MSAsmStmt *S) {
+ VisitAsmStmt(S);
+ S->LBraceLoc = ReadSourceLocation(Record, Idx);
+ S->EndLoc = ReadSourceLocation(Record, Idx);
+ S->NumAsmToks = Record[Idx++];
+ std::string AsmStr = ReadString(Record, Idx);
+
+ // Read the tokens.
+ SmallVector<Token, 16> AsmToks;
+ AsmToks.reserve(S->NumAsmToks);
+ for (unsigned i = 0, e = S->NumAsmToks; i != e; ++i) {
+ AsmToks.push_back(ReadToken(Record, Idx));
+ }
+
+ // The calls to reserve() for the FooData vectors are mandatory to
+ // prevent dead StringRefs in the Foo vectors.
+
+ // Read the clobbers.
+ SmallVector<std::string, 16> ClobbersData;
+ SmallVector<StringRef, 16> Clobbers;
+ ClobbersData.reserve(S->NumClobbers);
+ Clobbers.reserve(S->NumClobbers);
+ for (unsigned i = 0, e = S->NumClobbers; i != e; ++i) {
+ ClobbersData.push_back(ReadString(Record, Idx));
+ Clobbers.push_back(ClobbersData.back());
+ }
+
+ // Read the operands.
+ unsigned NumOperands = S->NumOutputs + S->NumInputs;
+ SmallVector<Expr*, 16> Exprs;
+ SmallVector<std::string, 16> ConstraintsData;
+ SmallVector<StringRef, 16> Constraints;
+ Exprs.reserve(NumOperands);
+ ConstraintsData.reserve(NumOperands);
+ Constraints.reserve(NumOperands);
+ for (unsigned i = 0; i != NumOperands; ++i) {
+ Exprs.push_back(cast<Expr>(Reader.ReadSubStmt()));
+ ConstraintsData.push_back(ReadString(Record, Idx));
+ Constraints.push_back(ConstraintsData.back());
+ }
+
+ S->initialize(Reader.getContext(), AsmStr, AsmToks,
+ Constraints, Exprs, Clobbers);
+}
+
+void ASTStmtReader::VisitCoroutineBodyStmt(CoroutineBodyStmt *S) {
+ // FIXME: Implement coroutine serialization.
+ llvm_unreachable("unimplemented");
+}
+
+void ASTStmtReader::VisitCoreturnStmt(CoreturnStmt *S) {
+ // FIXME: Implement coroutine serialization.
+ llvm_unreachable("unimplemented");
+}
+
+void ASTStmtReader::VisitCoawaitExpr(CoawaitExpr *S) {
+ // FIXME: Implement coroutine serialization.
+ llvm_unreachable("unimplemented");
+}
+
+void ASTStmtReader::VisitCoyieldExpr(CoyieldExpr *S) {
+ // FIXME: Implement coroutine serialization.
+ llvm_unreachable("unimplemented");
+}
+
+void ASTStmtReader::VisitCapturedStmt(CapturedStmt *S) {
+ VisitStmt(S);
+ ++Idx;
+ S->setCapturedDecl(ReadDeclAs<CapturedDecl>(Record, Idx));
+ S->setCapturedRegionKind(static_cast<CapturedRegionKind>(Record[Idx++]));
+ S->setCapturedRecordDecl(ReadDeclAs<RecordDecl>(Record, Idx));
+
+ // Capture inits
+ for (CapturedStmt::capture_init_iterator I = S->capture_init_begin(),
+ E = S->capture_init_end();
+ I != E; ++I)
+ *I = Reader.ReadSubExpr();
+
+ // Body
+ S->setCapturedStmt(Reader.ReadSubStmt());
+ S->getCapturedDecl()->setBody(S->getCapturedStmt());
+
+ // Captures
+ for (auto &I : S->captures()) {
+ I.VarAndKind.setPointer(ReadDeclAs<VarDecl>(Record, Idx));
+ I.VarAndKind
+ .setInt(static_cast<CapturedStmt::VariableCaptureKind>(Record[Idx++]));
+ I.Loc = ReadSourceLocation(Record, Idx);
+ }
+}
+
+void ASTStmtReader::VisitExpr(Expr *E) {
+ VisitStmt(E);
+ E->setType(Reader.readType(F, Record, Idx));
+ E->setTypeDependent(Record[Idx++]);
+ E->setValueDependent(Record[Idx++]);
+ E->setInstantiationDependent(Record[Idx++]);
+ E->ExprBits.ContainsUnexpandedParameterPack = Record[Idx++];
+ E->setValueKind(static_cast<ExprValueKind>(Record[Idx++]));
+ E->setObjectKind(static_cast<ExprObjectKind>(Record[Idx++]));
+ assert(Idx == NumExprFields && "Incorrect expression field count");
+}
+
+void ASTStmtReader::VisitPredefinedExpr(PredefinedExpr *E) {
+ VisitExpr(E);
+ E->setLocation(ReadSourceLocation(Record, Idx));
+ E->Type = (PredefinedExpr::IdentType)Record[Idx++];
+ E->FnName = cast_or_null<StringLiteral>(Reader.ReadSubExpr());
+}
+
+void ASTStmtReader::VisitDeclRefExpr(DeclRefExpr *E) {
+ VisitExpr(E);
+
+ E->DeclRefExprBits.HasQualifier = Record[Idx++];
+ E->DeclRefExprBits.HasFoundDecl = Record[Idx++];
+ E->DeclRefExprBits.HasTemplateKWAndArgsInfo = Record[Idx++];
+ E->DeclRefExprBits.HadMultipleCandidates = Record[Idx++];
+ E->DeclRefExprBits.RefersToEnclosingVariableOrCapture = Record[Idx++];
+ unsigned NumTemplateArgs = 0;
+ if (E->hasTemplateKWAndArgsInfo())
+ NumTemplateArgs = Record[Idx++];
+
+ if (E->hasQualifier())
+ new (E->getTrailingObjects<NestedNameSpecifierLoc>())
+ NestedNameSpecifierLoc(
+ Reader.ReadNestedNameSpecifierLoc(F, Record, Idx));
+
+ if (E->hasFoundDecl())
+ *E->getTrailingObjects<NamedDecl *>() = ReadDeclAs<NamedDecl>(Record, Idx);
+
+ if (E->hasTemplateKWAndArgsInfo())
+ ReadTemplateKWAndArgsInfo(
+ *E->getTrailingObjects<ASTTemplateKWAndArgsInfo>(),
+ E->getTrailingObjects<TemplateArgumentLoc>(), NumTemplateArgs);
+
+ E->setDecl(ReadDeclAs<ValueDecl>(Record, Idx));
+ E->setLocation(ReadSourceLocation(Record, Idx));
+ ReadDeclarationNameLoc(E->DNLoc, E->getDecl()->getDeclName(), Record, Idx);
+}
+
+void ASTStmtReader::VisitIntegerLiteral(IntegerLiteral *E) {
+ VisitExpr(E);
+ E->setLocation(ReadSourceLocation(Record, Idx));
+ E->setValue(Reader.getContext(), Reader.ReadAPInt(Record, Idx));
+}
+
+void ASTStmtReader::VisitFloatingLiteral(FloatingLiteral *E) {
+ VisitExpr(E);
+ E->setRawSemantics(static_cast<Stmt::APFloatSemantics>(Record[Idx++]));
+ E->setExact(Record[Idx++]);
+ E->setValue(Reader.getContext(),
+ Reader.ReadAPFloat(Record, E->getSemantics(), Idx));
+ E->setLocation(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitImaginaryLiteral(ImaginaryLiteral *E) {
+ VisitExpr(E);
+ E->setSubExpr(Reader.ReadSubExpr());
+}
+
+void ASTStmtReader::VisitStringLiteral(StringLiteral *E) {
+ VisitExpr(E);
+ unsigned Len = Record[Idx++];
+ assert(Record[Idx] == E->getNumConcatenated() &&
+ "Wrong number of concatenated tokens!");
+ ++Idx;
+ StringLiteral::StringKind kind =
+ static_cast<StringLiteral::StringKind>(Record[Idx++]);
+ bool isPascal = Record[Idx++];
+
+ // Read string data
+ SmallString<16> Str(&Record[Idx], &Record[Idx] + Len);
+ E->setString(Reader.getContext(), Str, kind, isPascal);
+ Idx += Len;
+
+ // Read source locations
+ for (unsigned I = 0, N = E->getNumConcatenated(); I != N; ++I)
+ E->setStrTokenLoc(I, ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitCharacterLiteral(CharacterLiteral *E) {
+ VisitExpr(E);
+ E->setValue(Record[Idx++]);
+ E->setLocation(ReadSourceLocation(Record, Idx));
+ E->setKind(static_cast<CharacterLiteral::CharacterKind>(Record[Idx++]));
+}
+
+void ASTStmtReader::VisitParenExpr(ParenExpr *E) {
+ VisitExpr(E);
+ E->setLParen(ReadSourceLocation(Record, Idx));
+ E->setRParen(ReadSourceLocation(Record, Idx));
+ E->setSubExpr(Reader.ReadSubExpr());
+}
+
+void ASTStmtReader::VisitParenListExpr(ParenListExpr *E) {
+ VisitExpr(E);
+ unsigned NumExprs = Record[Idx++];
+ E->Exprs = new (Reader.getContext()) Stmt*[NumExprs];
+ for (unsigned i = 0; i != NumExprs; ++i)
+ E->Exprs[i] = Reader.ReadSubStmt();
+ E->NumExprs = NumExprs;
+ E->LParenLoc = ReadSourceLocation(Record, Idx);
+ E->RParenLoc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTStmtReader::VisitUnaryOperator(UnaryOperator *E) {
+ VisitExpr(E);
+ E->setSubExpr(Reader.ReadSubExpr());
+ E->setOpcode((UnaryOperator::Opcode)Record[Idx++]);
+ E->setOperatorLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitOffsetOfExpr(OffsetOfExpr *E) {
+ VisitExpr(E);
+ assert(E->getNumComponents() == Record[Idx]);
+ ++Idx;
+ assert(E->getNumExpressions() == Record[Idx]);
+ ++Idx;
+ E->setOperatorLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
+ E->setTypeSourceInfo(GetTypeSourceInfo(Record, Idx));
+ for (unsigned I = 0, N = E->getNumComponents(); I != N; ++I) {
+ OffsetOfNode::Kind Kind = static_cast<OffsetOfNode::Kind>(Record[Idx++]);
+ SourceLocation Start = ReadSourceLocation(Record, Idx);
+ SourceLocation End = ReadSourceLocation(Record, Idx);
+ switch (Kind) {
+ case OffsetOfNode::Array:
+ E->setComponent(I, OffsetOfNode(Start, Record[Idx++], End));
+ break;
+
+ case OffsetOfNode::Field:
+ E->setComponent(
+ I, OffsetOfNode(Start, ReadDeclAs<FieldDecl>(Record, Idx), End));
+ break;
+
+ case OffsetOfNode::Identifier:
+ E->setComponent(
+ I,
+ OffsetOfNode(Start, Reader.GetIdentifierInfo(F, Record, Idx), End));
+ break;
+
+ case OffsetOfNode::Base: {
+ CXXBaseSpecifier *Base = new (Reader.getContext()) CXXBaseSpecifier();
+ *Base = Reader.ReadCXXBaseSpecifier(F, Record, Idx);
+ E->setComponent(I, OffsetOfNode(Base));
+ break;
+ }
+ }
+ }
+
+ for (unsigned I = 0, N = E->getNumExpressions(); I != N; ++I)
+ E->setIndexExpr(I, Reader.ReadSubExpr());
+}
+
+void ASTStmtReader::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E) {
+ VisitExpr(E);
+ E->setKind(static_cast<UnaryExprOrTypeTrait>(Record[Idx++]));
+ if (Record[Idx] == 0) {
+ E->setArgument(Reader.ReadSubExpr());
+ ++Idx;
+ } else {
+ E->setArgument(GetTypeSourceInfo(Record, Idx));
+ }
+ E->setOperatorLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
+ VisitExpr(E);
+ E->setLHS(Reader.ReadSubExpr());
+ E->setRHS(Reader.ReadSubExpr());
+ E->setRBracketLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitOMPArraySectionExpr(OMPArraySectionExpr *E) {
+ VisitExpr(E);
+ E->setBase(Reader.ReadSubExpr());
+ E->setLowerBound(Reader.ReadSubExpr());
+ E->setLength(Reader.ReadSubExpr());
+ E->setColonLoc(ReadSourceLocation(Record, Idx));
+ E->setRBracketLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitCallExpr(CallExpr *E) {
+ VisitExpr(E);
+ E->setNumArgs(Reader.getContext(), Record[Idx++]);
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
+ E->setCallee(Reader.ReadSubExpr());
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
+ E->setArg(I, Reader.ReadSubExpr());
+}
+
+void ASTStmtReader::VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
+ VisitCallExpr(E);
+}
+
+void ASTStmtReader::VisitMemberExpr(MemberExpr *E) {
+ // Don't call VisitExpr, this is fully initialized at creation.
+ assert(E->getStmtClass() == Stmt::MemberExprClass &&
+ "It's a subclass, we must advance Idx!");
+}
+
+void ASTStmtReader::VisitObjCIsaExpr(ObjCIsaExpr *E) {
+ VisitExpr(E);
+ E->setBase(Reader.ReadSubExpr());
+ E->setIsaMemberLoc(ReadSourceLocation(Record, Idx));
+ E->setOpLoc(ReadSourceLocation(Record, Idx));
+ E->setArrow(Record[Idx++]);
+}
+
+void ASTStmtReader::
+VisitObjCIndirectCopyRestoreExpr(ObjCIndirectCopyRestoreExpr *E) {
+ VisitExpr(E);
+ E->Operand = Reader.ReadSubExpr();
+ E->setShouldCopy(Record[Idx++]);
+}
+
+void ASTStmtReader::VisitObjCBridgedCastExpr(ObjCBridgedCastExpr *E) {
+ VisitExplicitCastExpr(E);
+ E->LParenLoc = ReadSourceLocation(Record, Idx);
+ E->BridgeKeywordLoc = ReadSourceLocation(Record, Idx);
+ E->Kind = Record[Idx++];
+}
+
+void ASTStmtReader::VisitCastExpr(CastExpr *E) {
+ VisitExpr(E);
+ unsigned NumBaseSpecs = Record[Idx++];
+ assert(NumBaseSpecs == E->path_size());
+ E->setSubExpr(Reader.ReadSubExpr());
+ E->setCastKind((CastKind)Record[Idx++]);
+ CastExpr::path_iterator BaseI = E->path_begin();
+ while (NumBaseSpecs--) {
+ CXXBaseSpecifier *BaseSpec = new (Reader.getContext()) CXXBaseSpecifier;
+ *BaseSpec = Reader.ReadCXXBaseSpecifier(F, Record, Idx);
+ *BaseI++ = BaseSpec;
+ }
+}
+
+void ASTStmtReader::VisitBinaryOperator(BinaryOperator *E) {
+ VisitExpr(E);
+ E->setLHS(Reader.ReadSubExpr());
+ E->setRHS(Reader.ReadSubExpr());
+ E->setOpcode((BinaryOperator::Opcode)Record[Idx++]);
+ E->setOperatorLoc(ReadSourceLocation(Record, Idx));
+ E->setFPContractable((bool)Record[Idx++]);
+}
+
+void ASTStmtReader::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
+ VisitBinaryOperator(E);
+ E->setComputationLHSType(Reader.readType(F, Record, Idx));
+ E->setComputationResultType(Reader.readType(F, Record, Idx));
+}
+
+void ASTStmtReader::VisitConditionalOperator(ConditionalOperator *E) {
+ VisitExpr(E);
+ E->SubExprs[ConditionalOperator::COND] = Reader.ReadSubExpr();
+ E->SubExprs[ConditionalOperator::LHS] = Reader.ReadSubExpr();
+ E->SubExprs[ConditionalOperator::RHS] = Reader.ReadSubExpr();
+ E->QuestionLoc = ReadSourceLocation(Record, Idx);
+ E->ColonLoc = ReadSourceLocation(Record, Idx);
+}
+
+void
+ASTStmtReader::VisitBinaryConditionalOperator(BinaryConditionalOperator *E) {
+ VisitExpr(E);
+ E->OpaqueValue = cast<OpaqueValueExpr>(Reader.ReadSubExpr());
+ E->SubExprs[BinaryConditionalOperator::COMMON] = Reader.ReadSubExpr();
+ E->SubExprs[BinaryConditionalOperator::COND] = Reader.ReadSubExpr();
+ E->SubExprs[BinaryConditionalOperator::LHS] = Reader.ReadSubExpr();
+ E->SubExprs[BinaryConditionalOperator::RHS] = Reader.ReadSubExpr();
+ E->QuestionLoc = ReadSourceLocation(Record, Idx);
+ E->ColonLoc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTStmtReader::VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ VisitCastExpr(E);
+}
+
+void ASTStmtReader::VisitExplicitCastExpr(ExplicitCastExpr *E) {
+ VisitCastExpr(E);
+ E->setTypeInfoAsWritten(GetTypeSourceInfo(Record, Idx));
+}
+
+void ASTStmtReader::VisitCStyleCastExpr(CStyleCastExpr *E) {
+ VisitExplicitCastExpr(E);
+ E->setLParenLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ VisitExpr(E);
+ E->setLParenLoc(ReadSourceLocation(Record, Idx));
+ E->setTypeSourceInfo(GetTypeSourceInfo(Record, Idx));
+ E->setInitializer(Reader.ReadSubExpr());
+ E->setFileScope(Record[Idx++]);
+}
+
+void ASTStmtReader::VisitExtVectorElementExpr(ExtVectorElementExpr *E) {
+ VisitExpr(E);
+ E->setBase(Reader.ReadSubExpr());
+ E->setAccessor(Reader.GetIdentifierInfo(F, Record, Idx));
+ E->setAccessorLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitInitListExpr(InitListExpr *E) {
+ VisitExpr(E);
+ if (InitListExpr *SyntForm = cast_or_null<InitListExpr>(Reader.ReadSubStmt()))
+ E->setSyntacticForm(SyntForm);
+ E->setLBraceLoc(ReadSourceLocation(Record, Idx));
+ E->setRBraceLoc(ReadSourceLocation(Record, Idx));
+ bool isArrayFiller = Record[Idx++];
+ Expr *filler = nullptr;
+ if (isArrayFiller) {
+ filler = Reader.ReadSubExpr();
+ E->ArrayFillerOrUnionFieldInit = filler;
+ } else
+ E->ArrayFillerOrUnionFieldInit = ReadDeclAs<FieldDecl>(Record, Idx);
+ E->sawArrayRangeDesignator(Record[Idx++]);
+ unsigned NumInits = Record[Idx++];
+ E->reserveInits(Reader.getContext(), NumInits);
+ if (isArrayFiller) {
+ for (unsigned I = 0; I != NumInits; ++I) {
+ Expr *init = Reader.ReadSubExpr();
+ E->updateInit(Reader.getContext(), I, init ? init : filler);
+ }
+ } else {
+ for (unsigned I = 0; I != NumInits; ++I)
+ E->updateInit(Reader.getContext(), I, Reader.ReadSubExpr());
+ }
+}
+
+void ASTStmtReader::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
+ typedef DesignatedInitExpr::Designator Designator;
+
+ VisitExpr(E);
+ unsigned NumSubExprs = Record[Idx++];
+ assert(NumSubExprs == E->getNumSubExprs() && "Wrong number of subexprs");
+ for (unsigned I = 0; I != NumSubExprs; ++I)
+ E->setSubExpr(I, Reader.ReadSubExpr());
+ E->setEqualOrColonLoc(ReadSourceLocation(Record, Idx));
+ E->setGNUSyntax(Record[Idx++]);
+
+ SmallVector<Designator, 4> Designators;
+ while (Idx < Record.size()) {
+ switch ((DesignatorTypes)Record[Idx++]) {
+ case DESIG_FIELD_DECL: {
+ FieldDecl *Field = ReadDeclAs<FieldDecl>(Record, Idx);
+ SourceLocation DotLoc
+ = ReadSourceLocation(Record, Idx);
+ SourceLocation FieldLoc
+ = ReadSourceLocation(Record, Idx);
+ Designators.push_back(Designator(Field->getIdentifier(), DotLoc,
+ FieldLoc));
+ Designators.back().setField(Field);
+ break;
+ }
+
+ case DESIG_FIELD_NAME: {
+ const IdentifierInfo *Name = Reader.GetIdentifierInfo(F, Record, Idx);
+ SourceLocation DotLoc
+ = ReadSourceLocation(Record, Idx);
+ SourceLocation FieldLoc
+ = ReadSourceLocation(Record, Idx);
+ Designators.push_back(Designator(Name, DotLoc, FieldLoc));
+ break;
+ }
+
+ case DESIG_ARRAY: {
+ unsigned Index = Record[Idx++];
+ SourceLocation LBracketLoc
+ = ReadSourceLocation(Record, Idx);
+ SourceLocation RBracketLoc
+ = ReadSourceLocation(Record, Idx);
+ Designators.push_back(Designator(Index, LBracketLoc, RBracketLoc));
+ break;
+ }
+
+ case DESIG_ARRAY_RANGE: {
+ unsigned Index = Record[Idx++];
+ SourceLocation LBracketLoc
+ = ReadSourceLocation(Record, Idx);
+ SourceLocation EllipsisLoc
+ = ReadSourceLocation(Record, Idx);
+ SourceLocation RBracketLoc
+ = ReadSourceLocation(Record, Idx);
+ Designators.push_back(Designator(Index, LBracketLoc, EllipsisLoc,
+ RBracketLoc));
+ break;
+ }
+ }
+ }
+ E->setDesignators(Reader.getContext(),
+ Designators.data(), Designators.size());
+}
+
+void ASTStmtReader::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
+ VisitExpr(E);
+ E->setBase(Reader.ReadSubExpr());
+ E->setUpdater(Reader.ReadSubExpr());
+}
+
+void ASTStmtReader::VisitNoInitExpr(NoInitExpr *E) {
+ VisitExpr(E);
+}
+
+void ASTStmtReader::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
+ VisitExpr(E);
+}
+
+void ASTStmtReader::VisitVAArgExpr(VAArgExpr *E) {
+ VisitExpr(E);
+ E->setSubExpr(Reader.ReadSubExpr());
+ E->setWrittenTypeInfo(GetTypeSourceInfo(Record, Idx));
+ E->setBuiltinLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
+ E->setIsMicrosoftABI(Record[Idx++]);
+}
+
+void ASTStmtReader::VisitAddrLabelExpr(AddrLabelExpr *E) {
+ VisitExpr(E);
+ E->setAmpAmpLoc(ReadSourceLocation(Record, Idx));
+ E->setLabelLoc(ReadSourceLocation(Record, Idx));
+ E->setLabel(ReadDeclAs<LabelDecl>(Record, Idx));
+}
+
+void ASTStmtReader::VisitStmtExpr(StmtExpr *E) {
+ VisitExpr(E);
+ E->setLParenLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
+ E->setSubStmt(cast_or_null<CompoundStmt>(Reader.ReadSubStmt()));
+}
+
+void ASTStmtReader::VisitChooseExpr(ChooseExpr *E) {
+ VisitExpr(E);
+ E->setCond(Reader.ReadSubExpr());
+ E->setLHS(Reader.ReadSubExpr());
+ E->setRHS(Reader.ReadSubExpr());
+ E->setBuiltinLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
+ E->setIsConditionTrue(Record[Idx++]);
+}
+
+void ASTStmtReader::VisitGNUNullExpr(GNUNullExpr *E) {
+ VisitExpr(E);
+ E->setTokenLocation(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
+ VisitExpr(E);
+ SmallVector<Expr *, 16> Exprs;
+ unsigned NumExprs = Record[Idx++];
+ while (NumExprs--)
+ Exprs.push_back(Reader.ReadSubExpr());
+ E->setExprs(Reader.getContext(), Exprs);
+ E->setBuiltinLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitConvertVectorExpr(ConvertVectorExpr *E) {
+ VisitExpr(E);
+ E->BuiltinLoc = ReadSourceLocation(Record, Idx);
+ E->RParenLoc = ReadSourceLocation(Record, Idx);
+ E->TInfo = GetTypeSourceInfo(Record, Idx);
+ E->SrcExpr = Reader.ReadSubExpr();
+}
+
+void ASTStmtReader::VisitBlockExpr(BlockExpr *E) {
+ VisitExpr(E);
+ E->setBlockDecl(ReadDeclAs<BlockDecl>(Record, Idx));
+}
+
+void ASTStmtReader::VisitGenericSelectionExpr(GenericSelectionExpr *E) {
+ VisitExpr(E);
+ E->NumAssocs = Record[Idx++];
+ E->AssocTypes = new (Reader.getContext()) TypeSourceInfo*[E->NumAssocs];
+ E->SubExprs =
+ new(Reader.getContext()) Stmt*[GenericSelectionExpr::END_EXPR+E->NumAssocs];
+
+ E->SubExprs[GenericSelectionExpr::CONTROLLING] = Reader.ReadSubExpr();
+ for (unsigned I = 0, N = E->getNumAssocs(); I != N; ++I) {
+ E->AssocTypes[I] = GetTypeSourceInfo(Record, Idx);
+ E->SubExprs[GenericSelectionExpr::END_EXPR+I] = Reader.ReadSubExpr();
+ }
+ E->ResultIndex = Record[Idx++];
+
+ E->GenericLoc = ReadSourceLocation(Record, Idx);
+ E->DefaultLoc = ReadSourceLocation(Record, Idx);
+ E->RParenLoc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTStmtReader::VisitPseudoObjectExpr(PseudoObjectExpr *E) {
+ VisitExpr(E);
+ unsigned numSemanticExprs = Record[Idx++];
+ assert(numSemanticExprs + 1 == E->PseudoObjectExprBits.NumSubExprs);
+ E->PseudoObjectExprBits.ResultIndex = Record[Idx++];
+
+ // Read the syntactic expression.
+ E->getSubExprsBuffer()[0] = Reader.ReadSubExpr();
+
+ // Read all the semantic expressions.
+ for (unsigned i = 0; i != numSemanticExprs; ++i) {
+ Expr *subExpr = Reader.ReadSubExpr();
+ E->getSubExprsBuffer()[i+1] = subExpr;
+ }
+}
+
+void ASTStmtReader::VisitAtomicExpr(AtomicExpr *E) {
+ VisitExpr(E);
+ E->Op = AtomicExpr::AtomicOp(Record[Idx++]);
+ E->NumSubExprs = AtomicExpr::getNumSubExprs(E->Op);
+ for (unsigned I = 0; I != E->NumSubExprs; ++I)
+ E->SubExprs[I] = Reader.ReadSubExpr();
+ E->BuiltinLoc = ReadSourceLocation(Record, Idx);
+ E->RParenLoc = ReadSourceLocation(Record, Idx);
+}
+
+//===----------------------------------------------------------------------===//
+// Objective-C Expressions and Statements
+
+void ASTStmtReader::VisitObjCStringLiteral(ObjCStringLiteral *E) {
+ VisitExpr(E);
+ E->setString(cast<StringLiteral>(Reader.ReadSubStmt()));
+ E->setAtLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
+ VisitExpr(E);
+ // could be one of several IntegerLiteral, FloatLiteral, etc.
+ E->SubExpr = Reader.ReadSubStmt();
+ E->BoxingMethod = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
+ E->Range = ReadSourceRange(Record, Idx);
+}
+
+void ASTStmtReader::VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
+ VisitExpr(E);
+ unsigned NumElements = Record[Idx++];
+ assert(NumElements == E->getNumElements() && "Wrong number of elements");
+ Expr **Elements = E->getElements();
+ for (unsigned I = 0, N = NumElements; I != N; ++I)
+ Elements[I] = Reader.ReadSubExpr();
+ E->ArrayWithObjectsMethod = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
+ E->Range = ReadSourceRange(Record, Idx);
+}
+
+void ASTStmtReader::VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
+ VisitExpr(E);
+ unsigned NumElements = Record[Idx++];
+ assert(NumElements == E->getNumElements() && "Wrong number of elements");
+ bool HasPackExpansions = Record[Idx++];
+ assert(HasPackExpansions == E->HasPackExpansions &&"Pack expansion mismatch");
+ ObjCDictionaryLiteral::KeyValuePair *KeyValues =
+ E->getTrailingObjects<ObjCDictionaryLiteral::KeyValuePair>();
+ ObjCDictionaryLiteral::ExpansionData *Expansions =
+ E->getTrailingObjects<ObjCDictionaryLiteral::ExpansionData>();
+ for (unsigned I = 0; I != NumElements; ++I) {
+ KeyValues[I].Key = Reader.ReadSubExpr();
+ KeyValues[I].Value = Reader.ReadSubExpr();
+ if (HasPackExpansions) {
+ Expansions[I].EllipsisLoc = ReadSourceLocation(Record, Idx);
+ Expansions[I].NumExpansionsPlusOne = Record[Idx++];
+ }
+ }
+ E->DictWithObjectsMethod = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
+ E->Range = ReadSourceRange(Record, Idx);
+}
+
+void ASTStmtReader::VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
+ VisitExpr(E);
+ E->setEncodedTypeSourceInfo(GetTypeSourceInfo(Record, Idx));
+ E->setAtLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
+ VisitExpr(E);
+ E->setSelector(Reader.ReadSelector(F, Record, Idx));
+ E->setAtLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
+ VisitExpr(E);
+ E->setProtocol(ReadDeclAs<ObjCProtocolDecl>(Record, Idx));
+ E->setAtLoc(ReadSourceLocation(Record, Idx));
+ E->ProtoLoc = ReadSourceLocation(Record, Idx);
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ VisitExpr(E);
+ E->setDecl(ReadDeclAs<ObjCIvarDecl>(Record, Idx));
+ E->setLocation(ReadSourceLocation(Record, Idx));
+ E->setOpLoc(ReadSourceLocation(Record, Idx));
+ E->setBase(Reader.ReadSubExpr());
+ E->setIsArrow(Record[Idx++]);
+ E->setIsFreeIvar(Record[Idx++]);
+}
+
+void ASTStmtReader::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
+ VisitExpr(E);
+ unsigned MethodRefFlags = Record[Idx++];
+ bool Implicit = Record[Idx++] != 0;
+ if (Implicit) {
+ ObjCMethodDecl *Getter = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
+ ObjCMethodDecl *Setter = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
+ E->setImplicitProperty(Getter, Setter, MethodRefFlags);
+ } else {
+ E->setExplicitProperty(ReadDeclAs<ObjCPropertyDecl>(Record, Idx),
+ MethodRefFlags);
+ }
+ E->setLocation(ReadSourceLocation(Record, Idx));
+ E->setReceiverLocation(ReadSourceLocation(Record, Idx));
+ switch (Record[Idx++]) {
+ case 0:
+ E->setBase(Reader.ReadSubExpr());
+ break;
+ case 1:
+ E->setSuperReceiver(Reader.readType(F, Record, Idx));
+ break;
+ case 2:
+ E->setClassReceiver(ReadDeclAs<ObjCInterfaceDecl>(Record, Idx));
+ break;
+ }
+}
+
+void ASTStmtReader::VisitObjCSubscriptRefExpr(ObjCSubscriptRefExpr *E) {
+ VisitExpr(E);
+ E->setRBracket(ReadSourceLocation(Record, Idx));
+ E->setBaseExpr(Reader.ReadSubExpr());
+ E->setKeyExpr(Reader.ReadSubExpr());
+ E->GetAtIndexMethodDecl = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
+ E->SetAtIndexMethodDecl = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
+}
+
+void ASTStmtReader::VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ VisitExpr(E);
+ assert(Record[Idx] == E->getNumArgs());
+ ++Idx;
+ unsigned NumStoredSelLocs = Record[Idx++];
+ E->SelLocsKind = Record[Idx++];
+ E->setDelegateInitCall(Record[Idx++]);
+ E->IsImplicit = Record[Idx++];
+ ObjCMessageExpr::ReceiverKind Kind
+ = static_cast<ObjCMessageExpr::ReceiverKind>(Record[Idx++]);
+ switch (Kind) {
+ case ObjCMessageExpr::Instance:
+ E->setInstanceReceiver(Reader.ReadSubExpr());
+ break;
+
+ case ObjCMessageExpr::Class:
+ E->setClassReceiver(GetTypeSourceInfo(Record, Idx));
+ break;
+
+ case ObjCMessageExpr::SuperClass:
+ case ObjCMessageExpr::SuperInstance: {
+ QualType T = Reader.readType(F, Record, Idx);
+ SourceLocation SuperLoc = ReadSourceLocation(Record, Idx);
+ E->setSuper(SuperLoc, T, Kind == ObjCMessageExpr::SuperInstance);
+ break;
+ }
+ }
+
+ assert(Kind == E->getReceiverKind());
+
+ if (Record[Idx++])
+ E->setMethodDecl(ReadDeclAs<ObjCMethodDecl>(Record, Idx));
+ else
+ E->setSelector(Reader.ReadSelector(F, Record, Idx));
+
+ E->LBracLoc = ReadSourceLocation(Record, Idx);
+ E->RBracLoc = ReadSourceLocation(Record, Idx);
+
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
+ E->setArg(I, Reader.ReadSubExpr());
+
+ SourceLocation *Locs = E->getStoredSelLocs();
+ for (unsigned I = 0; I != NumStoredSelLocs; ++I)
+ Locs[I] = ReadSourceLocation(Record, Idx);
+}
+
+void ASTStmtReader::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
+ VisitStmt(S);
+ S->setElement(Reader.ReadSubStmt());
+ S->setCollection(Reader.ReadSubExpr());
+ S->setBody(Reader.ReadSubStmt());
+ S->setForLoc(ReadSourceLocation(Record, Idx));
+ S->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) {
+ VisitStmt(S);
+ S->setCatchBody(Reader.ReadSubStmt());
+ S->setCatchParamDecl(ReadDeclAs<VarDecl>(Record, Idx));
+ S->setAtCatchLoc(ReadSourceLocation(Record, Idx));
+ S->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
+ VisitStmt(S);
+ S->setFinallyBody(Reader.ReadSubStmt());
+ S->setAtFinallyLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S) {
+ VisitStmt(S);
+ S->setSubStmt(Reader.ReadSubStmt());
+ S->setAtLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
+ VisitStmt(S);
+ assert(Record[Idx] == S->getNumCatchStmts());
+ ++Idx;
+ bool HasFinally = Record[Idx++];
+ S->setTryBody(Reader.ReadSubStmt());
+ for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I)
+ S->setCatchStmt(I, cast_or_null<ObjCAtCatchStmt>(Reader.ReadSubStmt()));
+
+ if (HasFinally)
+ S->setFinallyStmt(Reader.ReadSubStmt());
+ S->setAtTryLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
+ VisitStmt(S);
+ S->setSynchExpr(Reader.ReadSubStmt());
+ S->setSynchBody(Reader.ReadSubStmt());
+ S->setAtSynchronizedLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
+ VisitStmt(S);
+ S->setThrowExpr(Reader.ReadSubStmt());
+ S->setThrowLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitObjCBoolLiteralExpr(ObjCBoolLiteralExpr *E) {
+ VisitExpr(E);
+ E->setValue(Record[Idx++]);
+ E->setLocation(ReadSourceLocation(Record, Idx));
+}
+
+//===----------------------------------------------------------------------===//
+// C++ Expressions and Statements
+//===----------------------------------------------------------------------===//
+
+void ASTStmtReader::VisitCXXCatchStmt(CXXCatchStmt *S) {
+ VisitStmt(S);
+ S->CatchLoc = ReadSourceLocation(Record, Idx);
+ S->ExceptionDecl = ReadDeclAs<VarDecl>(Record, Idx);
+ S->HandlerBlock = Reader.ReadSubStmt();
+}
+
+void ASTStmtReader::VisitCXXTryStmt(CXXTryStmt *S) {
+ VisitStmt(S);
+ assert(Record[Idx] == S->getNumHandlers() && "NumStmtFields is wrong ?");
+ ++Idx;
+ S->TryLoc = ReadSourceLocation(Record, Idx);
+ S->getStmts()[0] = Reader.ReadSubStmt();
+ for (unsigned i = 0, e = S->getNumHandlers(); i != e; ++i)
+ S->getStmts()[i + 1] = Reader.ReadSubStmt();
+}
+
+void ASTStmtReader::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
+ VisitStmt(S);
+ S->ForLoc = ReadSourceLocation(Record, Idx);
+ S->CoawaitLoc = ReadSourceLocation(Record, Idx);
+ S->ColonLoc = ReadSourceLocation(Record, Idx);
+ S->RParenLoc = ReadSourceLocation(Record, Idx);
+ S->setRangeStmt(Reader.ReadSubStmt());
+ S->setBeginEndStmt(Reader.ReadSubStmt());
+ S->setCond(Reader.ReadSubExpr());
+ S->setInc(Reader.ReadSubExpr());
+ S->setLoopVarStmt(Reader.ReadSubStmt());
+ S->setBody(Reader.ReadSubStmt());
+}
+
+void ASTStmtReader::VisitMSDependentExistsStmt(MSDependentExistsStmt *S) {
+ VisitStmt(S);
+ S->KeywordLoc = ReadSourceLocation(Record, Idx);
+ S->IsIfExists = Record[Idx++];
+ S->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
+ ReadDeclarationNameInfo(S->NameInfo, Record, Idx);
+ S->SubStmt = Reader.ReadSubStmt();
+}
+
+void ASTStmtReader::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
+ VisitCallExpr(E);
+ E->Operator = (OverloadedOperatorKind)Record[Idx++];
+ E->Range = Reader.ReadSourceRange(F, Record, Idx);
+ E->setFPContractable((bool)Record[Idx++]);
+}
+
+void ASTStmtReader::VisitCXXConstructExpr(CXXConstructExpr *E) {
+ VisitExpr(E);
+ E->NumArgs = Record[Idx++];
+ if (E->NumArgs)
+ E->Args = new (Reader.getContext()) Stmt*[E->NumArgs];
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
+ E->setArg(I, Reader.ReadSubExpr());
+ E->setConstructor(ReadDeclAs<CXXConstructorDecl>(Record, Idx));
+ E->setLocation(ReadSourceLocation(Record, Idx));
+ E->setElidable(Record[Idx++]);
+ E->setHadMultipleCandidates(Record[Idx++]);
+ E->setListInitialization(Record[Idx++]);
+ E->setStdInitListInitialization(Record[Idx++]);
+ E->setRequiresZeroInitialization(Record[Idx++]);
+ E->setConstructionKind((CXXConstructExpr::ConstructionKind)Record[Idx++]);
+ E->ParenOrBraceRange = ReadSourceRange(Record, Idx);
+}
+
+void ASTStmtReader::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E) {
+ VisitCXXConstructExpr(E);
+ E->Type = GetTypeSourceInfo(Record, Idx);
+}
+
+void ASTStmtReader::VisitLambdaExpr(LambdaExpr *E) {
+ VisitExpr(E);
+ unsigned NumCaptures = Record[Idx++];
+ assert(NumCaptures == E->NumCaptures);(void)NumCaptures;
+ unsigned NumArrayIndexVars = Record[Idx++];
+ E->IntroducerRange = ReadSourceRange(Record, Idx);
+ E->CaptureDefault = static_cast<LambdaCaptureDefault>(Record[Idx++]);
+ E->CaptureDefaultLoc = ReadSourceLocation(Record, Idx);
+ E->ExplicitParams = Record[Idx++];
+ E->ExplicitResultType = Record[Idx++];
+ E->ClosingBrace = ReadSourceLocation(Record, Idx);
+
+ // Read capture initializers.
+ for (LambdaExpr::capture_init_iterator C = E->capture_init_begin(),
+ CEnd = E->capture_init_end();
+ C != CEnd; ++C)
+ *C = Reader.ReadSubExpr();
+
+ // Read array capture index variables.
+ if (NumArrayIndexVars > 0) {
+ unsigned *ArrayIndexStarts = E->getArrayIndexStarts();
+ for (unsigned I = 0; I != NumCaptures + 1; ++I)
+ ArrayIndexStarts[I] = Record[Idx++];
+
+ VarDecl **ArrayIndexVars = E->getArrayIndexVars();
+ for (unsigned I = 0; I != NumArrayIndexVars; ++I)
+ ArrayIndexVars[I] = ReadDeclAs<VarDecl>(Record, Idx);
+ }
+}
+
+void
+ASTStmtReader::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
+ VisitExpr(E);
+ E->SubExpr = Reader.ReadSubExpr();
+}
+
+void ASTStmtReader::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
+ VisitExplicitCastExpr(E);
+ SourceRange R = ReadSourceRange(Record, Idx);
+ E->Loc = R.getBegin();
+ E->RParenLoc = R.getEnd();
+ R = ReadSourceRange(Record, Idx);
+ E->AngleBrackets = R;
+}
+
+void ASTStmtReader::VisitCXXStaticCastExpr(CXXStaticCastExpr *E) {
+ return VisitCXXNamedCastExpr(E);
+}
+
+void ASTStmtReader::VisitCXXDynamicCastExpr(CXXDynamicCastExpr *E) {
+ return VisitCXXNamedCastExpr(E);
+}
+
+void ASTStmtReader::VisitCXXReinterpretCastExpr(CXXReinterpretCastExpr *E) {
+ return VisitCXXNamedCastExpr(E);
+}
+
+void ASTStmtReader::VisitCXXConstCastExpr(CXXConstCastExpr *E) {
+ return VisitCXXNamedCastExpr(E);
+}
+
+void ASTStmtReader::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E) {
+ VisitExplicitCastExpr(E);
+ E->setLParenLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitUserDefinedLiteral(UserDefinedLiteral *E) {
+ VisitCallExpr(E);
+ E->UDSuffixLoc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTStmtReader::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) {
+ VisitExpr(E);
+ E->setValue(Record[Idx++]);
+ E->setLocation(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E) {
+ VisitExpr(E);
+ E->setLocation(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitCXXTypeidExpr(CXXTypeidExpr *E) {
+ VisitExpr(E);
+ E->setSourceRange(ReadSourceRange(Record, Idx));
+ if (E->isTypeOperand()) { // typeid(int)
+ E->setTypeOperandSourceInfo(
+ GetTypeSourceInfo(Record, Idx));
+ return;
+ }
+
+ // typeid(42+2)
+ E->setExprOperand(Reader.ReadSubExpr());
+}
+
+void ASTStmtReader::VisitCXXThisExpr(CXXThisExpr *E) {
+ VisitExpr(E);
+ E->setLocation(ReadSourceLocation(Record, Idx));
+ E->setImplicit(Record[Idx++]);
+}
+
+void ASTStmtReader::VisitCXXThrowExpr(CXXThrowExpr *E) {
+ VisitExpr(E);
+ E->ThrowLoc = ReadSourceLocation(Record, Idx);
+ E->Op = Reader.ReadSubExpr();
+ E->IsThrownVariableInScope = Record[Idx++];
+}
+
+void ASTStmtReader::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
+ VisitExpr(E);
+
+ assert((bool)Record[Idx] == E->Param.getInt() && "We messed up at creation ?");
+ ++Idx; // HasOtherExprStored and SubExpr was handled during creation.
+ E->Param.setPointer(ReadDeclAs<ParmVarDecl>(Record, Idx));
+ E->Loc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTStmtReader::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E) {
+ VisitExpr(E);
+ E->Field = ReadDeclAs<FieldDecl>(Record, Idx);
+ E->Loc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTStmtReader::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
+ VisitExpr(E);
+ E->setTemporary(Reader.ReadCXXTemporary(F, Record, Idx));
+ E->setSubExpr(Reader.ReadSubExpr());
+}
+
+void ASTStmtReader::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
+ VisitExpr(E);
+ E->TypeInfo = GetTypeSourceInfo(Record, Idx);
+ E->RParenLoc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTStmtReader::VisitCXXNewExpr(CXXNewExpr *E) {
+ VisitExpr(E);
+ E->GlobalNew = Record[Idx++];
+ bool isArray = Record[Idx++];
+ E->UsualArrayDeleteWantsSize = Record[Idx++];
+ unsigned NumPlacementArgs = Record[Idx++];
+ E->StoredInitializationStyle = Record[Idx++];
+ E->setOperatorNew(ReadDeclAs<FunctionDecl>(Record, Idx));
+ E->setOperatorDelete(ReadDeclAs<FunctionDecl>(Record, Idx));
+ E->AllocatedTypeInfo = GetTypeSourceInfo(Record, Idx);
+ E->TypeIdParens = ReadSourceRange(Record, Idx);
+ E->Range = ReadSourceRange(Record, Idx);
+ E->DirectInitRange = ReadSourceRange(Record, Idx);
+
+ E->AllocateArgsArray(Reader.getContext(), isArray, NumPlacementArgs,
+ E->StoredInitializationStyle != 0);
+
+ // Install all the subexpressions.
+ for (CXXNewExpr::raw_arg_iterator I = E->raw_arg_begin(),e = E->raw_arg_end();
+ I != e; ++I)
+ *I = Reader.ReadSubStmt();
+}
+
+void ASTStmtReader::VisitCXXDeleteExpr(CXXDeleteExpr *E) {
+ VisitExpr(E);
+ E->GlobalDelete = Record[Idx++];
+ E->ArrayForm = Record[Idx++];
+ E->ArrayFormAsWritten = Record[Idx++];
+ E->UsualArrayDeleteWantsSize = Record[Idx++];
+ E->OperatorDelete = ReadDeclAs<FunctionDecl>(Record, Idx);
+ E->Argument = Reader.ReadSubExpr();
+ E->Loc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTStmtReader::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
+ VisitExpr(E);
+
+ E->Base = Reader.ReadSubExpr();
+ E->IsArrow = Record[Idx++];
+ E->OperatorLoc = ReadSourceLocation(Record, Idx);
+ E->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
+ E->ScopeType = GetTypeSourceInfo(Record, Idx);
+ E->ColonColonLoc = ReadSourceLocation(Record, Idx);
+ E->TildeLoc = ReadSourceLocation(Record, Idx);
+
+ IdentifierInfo *II = Reader.GetIdentifierInfo(F, Record, Idx);
+ if (II)
+ E->setDestroyedType(II, ReadSourceLocation(Record, Idx));
+ else
+ E->setDestroyedType(GetTypeSourceInfo(Record, Idx));
+}
+
+void ASTStmtReader::VisitExprWithCleanups(ExprWithCleanups *E) {
+ VisitExpr(E);
+
+ unsigned NumObjects = Record[Idx++];
+ assert(NumObjects == E->getNumObjects());
+ for (unsigned i = 0; i != NumObjects; ++i)
+ E->getTrailingObjects<BlockDecl *>()[i] =
+ ReadDeclAs<BlockDecl>(Record, Idx);
+
+ E->SubExpr = Reader.ReadSubExpr();
+}
+
+void
+ASTStmtReader::VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E){
+ VisitExpr(E);
+
+ if (Record[Idx++]) // HasTemplateKWAndArgsInfo
+ ReadTemplateKWAndArgsInfo(
+ *E->getTrailingObjects<ASTTemplateKWAndArgsInfo>(),
+ E->getTrailingObjects<TemplateArgumentLoc>(),
+ /*NumTemplateArgs=*/Record[Idx++]);
+
+ E->Base = Reader.ReadSubExpr();
+ E->BaseType = Reader.readType(F, Record, Idx);
+ E->IsArrow = Record[Idx++];
+ E->OperatorLoc = ReadSourceLocation(Record, Idx);
+ E->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
+ E->FirstQualifierFoundInScope = ReadDeclAs<NamedDecl>(Record, Idx);
+ ReadDeclarationNameInfo(E->MemberNameInfo, Record, Idx);
+}
+
+void
+ASTStmtReader::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) {
+ VisitExpr(E);
+
+ if (Record[Idx++]) // HasTemplateKWAndArgsInfo
+ ReadTemplateKWAndArgsInfo(
+ *E->getTrailingObjects<ASTTemplateKWAndArgsInfo>(),
+ E->getTrailingObjects<TemplateArgumentLoc>(),
+ /*NumTemplateArgs=*/Record[Idx++]);
+
+ E->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
+ ReadDeclarationNameInfo(E->NameInfo, Record, Idx);
+}
+
+void
+ASTStmtReader::VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E) {
+ VisitExpr(E);
+ assert(Record[Idx] == E->arg_size() && "Read wrong record during creation ?");
+ ++Idx; // NumArgs;
+ for (unsigned I = 0, N = E->arg_size(); I != N; ++I)
+ E->setArg(I, Reader.ReadSubExpr());
+ E->Type = GetTypeSourceInfo(Record, Idx);
+ E->setLParenLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitOverloadExpr(OverloadExpr *E) {
+ VisitExpr(E);
+
+ if (Record[Idx++]) // HasTemplateKWAndArgsInfo
+ ReadTemplateKWAndArgsInfo(*E->getTrailingASTTemplateKWAndArgsInfo(),
+ E->getTrailingTemplateArgumentLoc(),
+ /*NumTemplateArgs=*/Record[Idx++]);
+
+ unsigned NumDecls = Record[Idx++];
+ UnresolvedSet<8> Decls;
+ for (unsigned i = 0; i != NumDecls; ++i) {
+ NamedDecl *D = ReadDeclAs<NamedDecl>(Record, Idx);
+ AccessSpecifier AS = (AccessSpecifier)Record[Idx++];
+ Decls.addDecl(D, AS);
+ }
+ E->initializeResults(Reader.getContext(), Decls.begin(), Decls.end());
+
+ ReadDeclarationNameInfo(E->NameInfo, Record, Idx);
+ E->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
+}
+
+void ASTStmtReader::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) {
+ VisitOverloadExpr(E);
+ E->IsArrow = Record[Idx++];
+ E->HasUnresolvedUsing = Record[Idx++];
+ E->Base = Reader.ReadSubExpr();
+ E->BaseType = Reader.readType(F, Record, Idx);
+ E->OperatorLoc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTStmtReader::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
+ VisitOverloadExpr(E);
+ E->RequiresADL = Record[Idx++];
+ E->Overloaded = Record[Idx++];
+ E->NamingClass = ReadDeclAs<CXXRecordDecl>(Record, Idx);
+}
+
+void ASTStmtReader::VisitTypeTraitExpr(TypeTraitExpr *E) {
+ VisitExpr(E);
+ E->TypeTraitExprBits.NumArgs = Record[Idx++];
+ E->TypeTraitExprBits.Kind = Record[Idx++];
+ E->TypeTraitExprBits.Value = Record[Idx++];
+ SourceRange Range = ReadSourceRange(Record, Idx);
+ E->Loc = Range.getBegin();
+ E->RParenLoc = Range.getEnd();
+
+ TypeSourceInfo **Args = E->getTrailingObjects<TypeSourceInfo *>();
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
+ Args[I] = GetTypeSourceInfo(Record, Idx);
+}
+
+void ASTStmtReader::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
+ VisitExpr(E);
+ E->ATT = (ArrayTypeTrait)Record[Idx++];
+ E->Value = (unsigned int)Record[Idx++];
+ SourceRange Range = ReadSourceRange(Record, Idx);
+ E->Loc = Range.getBegin();
+ E->RParen = Range.getEnd();
+ E->QueriedType = GetTypeSourceInfo(Record, Idx);
+}
+
+void ASTStmtReader::VisitExpressionTraitExpr(ExpressionTraitExpr *E) {
+ VisitExpr(E);
+ E->ET = (ExpressionTrait)Record[Idx++];
+ E->Value = (bool)Record[Idx++];
+ SourceRange Range = ReadSourceRange(Record, Idx);
+ E->QueriedExpression = Reader.ReadSubExpr();
+ E->Loc = Range.getBegin();
+ E->RParen = Range.getEnd();
+}
+
+void ASTStmtReader::VisitCXXNoexceptExpr(CXXNoexceptExpr *E) {
+ VisitExpr(E);
+ E->Value = (bool)Record[Idx++];
+ E->Range = ReadSourceRange(Record, Idx);
+ E->Operand = Reader.ReadSubExpr();
+}
+
+void ASTStmtReader::VisitPackExpansionExpr(PackExpansionExpr *E) {
+ VisitExpr(E);
+ E->EllipsisLoc = ReadSourceLocation(Record, Idx);
+ E->NumExpansions = Record[Idx++];
+ E->Pattern = Reader.ReadSubExpr();
+}
+
+void ASTStmtReader::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
+ VisitExpr(E);
+ unsigned NumPartialArgs = Record[Idx++];
+ E->OperatorLoc = ReadSourceLocation(Record, Idx);
+ E->PackLoc = ReadSourceLocation(Record, Idx);
+ E->RParenLoc = ReadSourceLocation(Record, Idx);
+ E->Pack = Reader.ReadDeclAs<NamedDecl>(F, Record, Idx);
+ if (E->isPartiallySubstituted()) {
+ assert(E->Length == NumPartialArgs);
+ for (auto *I = E->getTrailingObjects<TemplateArgument>(),
+ *E = I + NumPartialArgs;
+ I != E; ++I)
+ new (I) TemplateArgument(Reader.ReadTemplateArgument(F, Record, Idx));
+ } else if (!E->isValueDependent()) {
+ E->Length = Record[Idx++];
+ }
+}
+
+void ASTStmtReader::VisitSubstNonTypeTemplateParmExpr(
+ SubstNonTypeTemplateParmExpr *E) {
+ VisitExpr(E);
+ E->Param = ReadDeclAs<NonTypeTemplateParmDecl>(Record, Idx);
+ E->NameLoc = ReadSourceLocation(Record, Idx);
+ E->Replacement = Reader.ReadSubExpr();
+}
+
+void ASTStmtReader::VisitSubstNonTypeTemplateParmPackExpr(
+ SubstNonTypeTemplateParmPackExpr *E) {
+ VisitExpr(E);
+ E->Param = ReadDeclAs<NonTypeTemplateParmDecl>(Record, Idx);
+ TemplateArgument ArgPack = Reader.ReadTemplateArgument(F, Record, Idx);
+ if (ArgPack.getKind() != TemplateArgument::Pack)
+ return;
+
+ E->Arguments = ArgPack.pack_begin();
+ E->NumArguments = ArgPack.pack_size();
+ E->NameLoc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTStmtReader::VisitFunctionParmPackExpr(FunctionParmPackExpr *E) {
+ VisitExpr(E);
+ E->NumParameters = Record[Idx++];
+ E->ParamPack = ReadDeclAs<ParmVarDecl>(Record, Idx);
+ E->NameLoc = ReadSourceLocation(Record, Idx);
+ ParmVarDecl **Parms = E->getTrailingObjects<ParmVarDecl *>();
+ for (unsigned i = 0, n = E->NumParameters; i != n; ++i)
+ Parms[i] = ReadDeclAs<ParmVarDecl>(Record, Idx);
+}
+
+void ASTStmtReader::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
+ VisitExpr(E);
+ E->State = Reader.ReadSubExpr();
+ auto VD = ReadDeclAs<ValueDecl>(Record, Idx);
+ unsigned ManglingNumber = Record[Idx++];
+ E->setExtendingDecl(VD, ManglingNumber);
+}
+
+void ASTStmtReader::VisitCXXFoldExpr(CXXFoldExpr *E) {
+ VisitExpr(E);
+ E->LParenLoc = ReadSourceLocation(Record, Idx);
+ E->EllipsisLoc = ReadSourceLocation(Record, Idx);
+ E->RParenLoc = ReadSourceLocation(Record, Idx);
+ E->SubExprs[0] = Reader.ReadSubExpr();
+ E->SubExprs[1] = Reader.ReadSubExpr();
+ E->Opcode = (BinaryOperatorKind)Record[Idx++];
+}
+
+void ASTStmtReader::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
+ VisitExpr(E);
+ E->SourceExpr = Reader.ReadSubExpr();
+ E->Loc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTStmtReader::VisitTypoExpr(TypoExpr *E) {
+ llvm_unreachable("Cannot read TypoExpr nodes");
+}
+
+//===----------------------------------------------------------------------===//
+// Microsoft Expressions and Statements
+//===----------------------------------------------------------------------===//
+void ASTStmtReader::VisitMSPropertyRefExpr(MSPropertyRefExpr *E) {
+ VisitExpr(E);
+ E->IsArrow = (Record[Idx++] != 0);
+ E->BaseExpr = Reader.ReadSubExpr();
+ E->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
+ E->MemberLoc = ReadSourceLocation(Record, Idx);
+ E->TheDecl = ReadDeclAs<MSPropertyDecl>(Record, Idx);
+}
+
+void ASTStmtReader::VisitMSPropertySubscriptExpr(MSPropertySubscriptExpr *E) {
+ VisitExpr(E);
+ E->setBase(Reader.ReadSubExpr());
+ E->setIdx(Reader.ReadSubExpr());
+ E->setRBracketLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitCXXUuidofExpr(CXXUuidofExpr *E) {
+ VisitExpr(E);
+ E->setSourceRange(ReadSourceRange(Record, Idx));
+ if (E->isTypeOperand()) { // __uuidof(ComType)
+ E->setTypeOperandSourceInfo(
+ GetTypeSourceInfo(Record, Idx));
+ return;
+ }
+
+ // __uuidof(expr)
+ E->setExprOperand(Reader.ReadSubExpr());
+}
+
+void ASTStmtReader::VisitSEHLeaveStmt(SEHLeaveStmt *S) {
+ VisitStmt(S);
+ S->setLeaveLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitSEHExceptStmt(SEHExceptStmt *S) {
+ VisitStmt(S);
+ S->Loc = ReadSourceLocation(Record, Idx);
+ S->Children[SEHExceptStmt::FILTER_EXPR] = Reader.ReadSubStmt();
+ S->Children[SEHExceptStmt::BLOCK] = Reader.ReadSubStmt();
+}
+
+void ASTStmtReader::VisitSEHFinallyStmt(SEHFinallyStmt *S) {
+ VisitStmt(S);
+ S->Loc = ReadSourceLocation(Record, Idx);
+ S->Block = Reader.ReadSubStmt();
+}
+
+void ASTStmtReader::VisitSEHTryStmt(SEHTryStmt *S) {
+ VisitStmt(S);
+ S->IsCXXTry = Record[Idx++];
+ S->TryLoc = ReadSourceLocation(Record, Idx);
+ S->Children[SEHTryStmt::TRY] = Reader.ReadSubStmt();
+ S->Children[SEHTryStmt::HANDLER] = Reader.ReadSubStmt();
+}
+
+//===----------------------------------------------------------------------===//
+// CUDA Expressions and Statements
+//===----------------------------------------------------------------------===//
+
+void ASTStmtReader::VisitCUDAKernelCallExpr(CUDAKernelCallExpr *E) {
+ VisitCallExpr(E);
+ E->setConfig(cast<CallExpr>(Reader.ReadSubExpr()));
+}
+
+//===----------------------------------------------------------------------===//
+// OpenCL Expressions and Statements.
+//===----------------------------------------------------------------------===//
+void ASTStmtReader::VisitAsTypeExpr(AsTypeExpr *E) {
+ VisitExpr(E);
+ E->BuiltinLoc = ReadSourceLocation(Record, Idx);
+ E->RParenLoc = ReadSourceLocation(Record, Idx);
+ E->SrcExpr = Reader.ReadSubExpr();
+}
+
+//===----------------------------------------------------------------------===//
+// OpenMP Clauses.
+//===----------------------------------------------------------------------===//
+
+namespace clang {
+class OMPClauseReader : public OMPClauseVisitor<OMPClauseReader> {
+ ASTStmtReader *Reader;
+ ASTContext &Context;
+ const ASTReader::RecordData &Record;
+ unsigned &Idx;
+public:
+ OMPClauseReader(ASTStmtReader *R, ASTContext &C,
+ const ASTReader::RecordData &Record, unsigned &Idx)
+ : Reader(R), Context(C), Record(Record), Idx(Idx) { }
+#define OPENMP_CLAUSE(Name, Class) \
+ void Visit##Class(Class *S);
+#include "clang/Basic/OpenMPKinds.def"
+ OMPClause *readClause();
+};
+}
+
+OMPClause *OMPClauseReader::readClause() {
+ OMPClause *C;
+ switch (Record[Idx++]) {
+ case OMPC_if:
+ C = new (Context) OMPIfClause();
+ break;
+ case OMPC_final:
+ C = new (Context) OMPFinalClause();
+ break;
+ case OMPC_num_threads:
+ C = new (Context) OMPNumThreadsClause();
+ break;
+ case OMPC_safelen:
+ C = new (Context) OMPSafelenClause();
+ break;
+ case OMPC_simdlen:
+ C = new (Context) OMPSimdlenClause();
+ break;
+ case OMPC_collapse:
+ C = new (Context) OMPCollapseClause();
+ break;
+ case OMPC_default:
+ C = new (Context) OMPDefaultClause();
+ break;
+ case OMPC_proc_bind:
+ C = new (Context) OMPProcBindClause();
+ break;
+ case OMPC_schedule:
+ C = new (Context) OMPScheduleClause();
+ break;
+ case OMPC_ordered:
+ C = new (Context) OMPOrderedClause();
+ break;
+ case OMPC_nowait:
+ C = new (Context) OMPNowaitClause();
+ break;
+ case OMPC_untied:
+ C = new (Context) OMPUntiedClause();
+ break;
+ case OMPC_mergeable:
+ C = new (Context) OMPMergeableClause();
+ break;
+ case OMPC_read:
+ C = new (Context) OMPReadClause();
+ break;
+ case OMPC_write:
+ C = new (Context) OMPWriteClause();
+ break;
+ case OMPC_update:
+ C = new (Context) OMPUpdateClause();
+ break;
+ case OMPC_capture:
+ C = new (Context) OMPCaptureClause();
+ break;
+ case OMPC_seq_cst:
+ C = new (Context) OMPSeqCstClause();
+ break;
+ case OMPC_threads:
+ C = new (Context) OMPThreadsClause();
+ break;
+ case OMPC_simd:
+ C = new (Context) OMPSIMDClause();
+ break;
+ case OMPC_nogroup:
+ C = new (Context) OMPNogroupClause();
+ break;
+ case OMPC_private:
+ C = OMPPrivateClause::CreateEmpty(Context, Record[Idx++]);
+ break;
+ case OMPC_firstprivate:
+ C = OMPFirstprivateClause::CreateEmpty(Context, Record[Idx++]);
+ break;
+ case OMPC_lastprivate:
+ C = OMPLastprivateClause::CreateEmpty(Context, Record[Idx++]);
+ break;
+ case OMPC_shared:
+ C = OMPSharedClause::CreateEmpty(Context, Record[Idx++]);
+ break;
+ case OMPC_reduction:
+ C = OMPReductionClause::CreateEmpty(Context, Record[Idx++]);
+ break;
+ case OMPC_linear:
+ C = OMPLinearClause::CreateEmpty(Context, Record[Idx++]);
+ break;
+ case OMPC_aligned:
+ C = OMPAlignedClause::CreateEmpty(Context, Record[Idx++]);
+ break;
+ case OMPC_copyin:
+ C = OMPCopyinClause::CreateEmpty(Context, Record[Idx++]);
+ break;
+ case OMPC_copyprivate:
+ C = OMPCopyprivateClause::CreateEmpty(Context, Record[Idx++]);
+ break;
+ case OMPC_flush:
+ C = OMPFlushClause::CreateEmpty(Context, Record[Idx++]);
+ break;
+ case OMPC_depend:
+ C = OMPDependClause::CreateEmpty(Context, Record[Idx++]);
+ break;
+ case OMPC_device:
+ C = new (Context) OMPDeviceClause();
+ break;
+ case OMPC_map:
+ C = OMPMapClause::CreateEmpty(Context, Record[Idx++]);
+ break;
+ case OMPC_num_teams:
+ C = new (Context) OMPNumTeamsClause();
+ break;
+ case OMPC_thread_limit:
+ C = new (Context) OMPThreadLimitClause();
+ break;
+ case OMPC_priority:
+ C = new (Context) OMPPriorityClause();
+ break;
+ case OMPC_grainsize:
+ C = new (Context) OMPGrainsizeClause();
+ break;
+ case OMPC_num_tasks:
+ C = new (Context) OMPNumTasksClause();
+ break;
+ case OMPC_hint:
+ C = new (Context) OMPHintClause();
+ break;
+ }
+ Visit(C);
+ C->setLocStart(Reader->ReadSourceLocation(Record, Idx));
+ C->setLocEnd(Reader->ReadSourceLocation(Record, Idx));
+
+ return C;
+}
+
+void OMPClauseReader::VisitOMPIfClause(OMPIfClause *C) {
+ C->setNameModifier(static_cast<OpenMPDirectiveKind>(Record[Idx++]));
+ C->setNameModifierLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setColonLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setCondition(Reader->Reader.ReadSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+}
+
+void OMPClauseReader::VisitOMPFinalClause(OMPFinalClause *C) {
+ C->setCondition(Reader->Reader.ReadSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+}
+
+void OMPClauseReader::VisitOMPNumThreadsClause(OMPNumThreadsClause *C) {
+ C->setNumThreads(Reader->Reader.ReadSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+}
+
+void OMPClauseReader::VisitOMPSafelenClause(OMPSafelenClause *C) {
+ C->setSafelen(Reader->Reader.ReadSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+}
+
+void OMPClauseReader::VisitOMPSimdlenClause(OMPSimdlenClause *C) {
+ C->setSimdlen(Reader->Reader.ReadSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+}
+
+void OMPClauseReader::VisitOMPCollapseClause(OMPCollapseClause *C) {
+ C->setNumForLoops(Reader->Reader.ReadSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+}
+
+void OMPClauseReader::VisitOMPDefaultClause(OMPDefaultClause *C) {
+ C->setDefaultKind(
+ static_cast<OpenMPDefaultClauseKind>(Record[Idx++]));
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setDefaultKindKwLoc(Reader->ReadSourceLocation(Record, Idx));
+}
+
+void OMPClauseReader::VisitOMPProcBindClause(OMPProcBindClause *C) {
+ C->setProcBindKind(
+ static_cast<OpenMPProcBindClauseKind>(Record[Idx++]));
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setProcBindKindKwLoc(Reader->ReadSourceLocation(Record, Idx));
+}
+
+void OMPClauseReader::VisitOMPScheduleClause(OMPScheduleClause *C) {
+ C->setScheduleKind(
+ static_cast<OpenMPScheduleClauseKind>(Record[Idx++]));
+ C->setFirstScheduleModifier(
+ static_cast<OpenMPScheduleClauseModifier>(Record[Idx++]));
+ C->setSecondScheduleModifier(
+ static_cast<OpenMPScheduleClauseModifier>(Record[Idx++]));
+ C->setChunkSize(Reader->Reader.ReadSubExpr());
+ C->setHelperChunkSize(Reader->Reader.ReadSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setFirstScheduleModifierLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setSecondScheduleModifierLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setScheduleKindLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setCommaLoc(Reader->ReadSourceLocation(Record, Idx));
+}
+
+void OMPClauseReader::VisitOMPOrderedClause(OMPOrderedClause *C) {
+ C->setNumForLoops(Reader->Reader.ReadSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+}
+
+void OMPClauseReader::VisitOMPNowaitClause(OMPNowaitClause *) {}
+
+void OMPClauseReader::VisitOMPUntiedClause(OMPUntiedClause *) {}
+
+void OMPClauseReader::VisitOMPMergeableClause(OMPMergeableClause *) {}
+
+void OMPClauseReader::VisitOMPReadClause(OMPReadClause *) {}
+
+void OMPClauseReader::VisitOMPWriteClause(OMPWriteClause *) {}
+
+void OMPClauseReader::VisitOMPUpdateClause(OMPUpdateClause *) {}
+
+void OMPClauseReader::VisitOMPCaptureClause(OMPCaptureClause *) {}
+
+void OMPClauseReader::VisitOMPSeqCstClause(OMPSeqCstClause *) {}
+
+void OMPClauseReader::VisitOMPThreadsClause(OMPThreadsClause *) {}
+
+void OMPClauseReader::VisitOMPSIMDClause(OMPSIMDClause *) {}
+
+void OMPClauseReader::VisitOMPNogroupClause(OMPNogroupClause *) {}
+
+void OMPClauseReader::VisitOMPPrivateClause(OMPPrivateClause *C) {
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Reader.ReadSubExpr());
+ C->setVarRefs(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Reader.ReadSubExpr());
+ C->setPrivateCopies(Vars);
+}
+
+void OMPClauseReader::VisitOMPFirstprivateClause(OMPFirstprivateClause *C) {
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Reader.ReadSubExpr());
+ C->setVarRefs(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Reader.ReadSubExpr());
+ C->setPrivateCopies(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Reader.ReadSubExpr());
+ C->setInits(Vars);
+}
+
+void OMPClauseReader::VisitOMPLastprivateClause(OMPLastprivateClause *C) {
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Reader.ReadSubExpr());
+ C->setVarRefs(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Reader.ReadSubExpr());
+ C->setPrivateCopies(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Reader.ReadSubExpr());
+ C->setSourceExprs(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Reader.ReadSubExpr());
+ C->setDestinationExprs(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Reader.ReadSubExpr());
+ C->setAssignmentOps(Vars);
+}
+
+void OMPClauseReader::VisitOMPSharedClause(OMPSharedClause *C) {
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Reader.ReadSubExpr());
+ C->setVarRefs(Vars);
+}
+
+void OMPClauseReader::VisitOMPReductionClause(OMPReductionClause *C) {
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setColonLoc(Reader->ReadSourceLocation(Record, Idx));
+ NestedNameSpecifierLoc NNSL =
+ Reader->Reader.ReadNestedNameSpecifierLoc(Reader->F, Record, Idx);
+ DeclarationNameInfo DNI;
+ Reader->ReadDeclarationNameInfo(DNI, Record, Idx);
+ C->setQualifierLoc(NNSL);
+ C->setNameInfo(DNI);
+
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Reader.ReadSubExpr());
+ C->setVarRefs(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Reader.ReadSubExpr());
+ C->setPrivates(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Reader.ReadSubExpr());
+ C->setLHSExprs(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Reader.ReadSubExpr());
+ C->setRHSExprs(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Reader.ReadSubExpr());
+ C->setReductionOps(Vars);
+}
+
+void OMPClauseReader::VisitOMPLinearClause(OMPLinearClause *C) {
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setColonLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setModifier(static_cast<OpenMPLinearClauseKind>(Record[Idx++]));
+ C->setModifierLoc(Reader->ReadSourceLocation(Record, Idx));
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Reader.ReadSubExpr());
+ C->setVarRefs(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Reader.ReadSubExpr());
+ C->setPrivates(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Reader.ReadSubExpr());
+ C->setInits(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Reader.ReadSubExpr());
+ C->setUpdates(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Reader.ReadSubExpr());
+ C->setFinals(Vars);
+ C->setStep(Reader->Reader.ReadSubExpr());
+ C->setCalcStep(Reader->Reader.ReadSubExpr());
+}
+
+void OMPClauseReader::VisitOMPAlignedClause(OMPAlignedClause *C) {
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setColonLoc(Reader->ReadSourceLocation(Record, Idx));
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Reader.ReadSubExpr());
+ C->setVarRefs(Vars);
+ C->setAlignment(Reader->Reader.ReadSubExpr());
+}
+
+void OMPClauseReader::VisitOMPCopyinClause(OMPCopyinClause *C) {
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Exprs;
+ Exprs.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Exprs.push_back(Reader->Reader.ReadSubExpr());
+ C->setVarRefs(Exprs);
+ Exprs.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Exprs.push_back(Reader->Reader.ReadSubExpr());
+ C->setSourceExprs(Exprs);
+ Exprs.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Exprs.push_back(Reader->Reader.ReadSubExpr());
+ C->setDestinationExprs(Exprs);
+ Exprs.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Exprs.push_back(Reader->Reader.ReadSubExpr());
+ C->setAssignmentOps(Exprs);
+}
+
+void OMPClauseReader::VisitOMPCopyprivateClause(OMPCopyprivateClause *C) {
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Exprs;
+ Exprs.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Exprs.push_back(Reader->Reader.ReadSubExpr());
+ C->setVarRefs(Exprs);
+ Exprs.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Exprs.push_back(Reader->Reader.ReadSubExpr());
+ C->setSourceExprs(Exprs);
+ Exprs.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Exprs.push_back(Reader->Reader.ReadSubExpr());
+ C->setDestinationExprs(Exprs);
+ Exprs.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Exprs.push_back(Reader->Reader.ReadSubExpr());
+ C->setAssignmentOps(Exprs);
+}
+
+void OMPClauseReader::VisitOMPFlushClause(OMPFlushClause *C) {
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Reader.ReadSubExpr());
+ C->setVarRefs(Vars);
+}
+
+void OMPClauseReader::VisitOMPDependClause(OMPDependClause *C) {
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setDependencyKind(static_cast<OpenMPDependClauseKind>(Record[Idx++]));
+ C->setDependencyLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setColonLoc(Reader->ReadSourceLocation(Record, Idx));
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Reader.ReadSubExpr());
+ C->setVarRefs(Vars);
+}
+
+void OMPClauseReader::VisitOMPDeviceClause(OMPDeviceClause *C) {
+ C->setDevice(Reader->Reader.ReadSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+}
+
+void OMPClauseReader::VisitOMPMapClause(OMPMapClause *C) {
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setMapTypeModifier(
+ static_cast<OpenMPMapClauseKind>(Record[Idx++]));
+ C->setMapType(
+ static_cast<OpenMPMapClauseKind>(Record[Idx++]));
+ C->setMapLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setColonLoc(Reader->ReadSourceLocation(Record, Idx));
+ auto NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i) {
+ Vars.push_back(Reader->Reader.ReadSubExpr());
+ }
+ C->setVarRefs(Vars);
+}
+
+void OMPClauseReader::VisitOMPNumTeamsClause(OMPNumTeamsClause *C) {
+ C->setNumTeams(Reader->Reader.ReadSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+}
+
+void OMPClauseReader::VisitOMPThreadLimitClause(OMPThreadLimitClause *C) {
+ C->setThreadLimit(Reader->Reader.ReadSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+}
+
+void OMPClauseReader::VisitOMPPriorityClause(OMPPriorityClause *C) {
+ C->setPriority(Reader->Reader.ReadSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+}
+
+void OMPClauseReader::VisitOMPGrainsizeClause(OMPGrainsizeClause *C) {
+ C->setGrainsize(Reader->Reader.ReadSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+}
+
+void OMPClauseReader::VisitOMPNumTasksClause(OMPNumTasksClause *C) {
+ C->setNumTasks(Reader->Reader.ReadSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+}
+
+void OMPClauseReader::VisitOMPHintClause(OMPHintClause *C) {
+ C->setHint(Reader->Reader.ReadSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+}
+
+//===----------------------------------------------------------------------===//
+// OpenMP Directives.
+//===----------------------------------------------------------------------===//
+void ASTStmtReader::VisitOMPExecutableDirective(OMPExecutableDirective *E) {
+ E->setLocStart(ReadSourceLocation(Record, Idx));
+ E->setLocEnd(ReadSourceLocation(Record, Idx));
+ OMPClauseReader ClauseReader(this, Reader.getContext(), Record, Idx);
+ SmallVector<OMPClause *, 5> Clauses;
+ for (unsigned i = 0; i < E->getNumClauses(); ++i)
+ Clauses.push_back(ClauseReader.readClause());
+ E->setClauses(Clauses);
+ if (E->hasAssociatedStmt())
+ E->setAssociatedStmt(Reader.ReadSubStmt());
+}
+
+void ASTStmtReader::VisitOMPLoopDirective(OMPLoopDirective *D) {
+ VisitStmt(D);
+ // Two fields (NumClauses and CollapsedNum) were read in ReadStmtFromStream.
+ Idx += 2;
+ VisitOMPExecutableDirective(D);
+ D->setIterationVariable(Reader.ReadSubExpr());
+ D->setLastIteration(Reader.ReadSubExpr());
+ D->setCalcLastIteration(Reader.ReadSubExpr());
+ D->setPreCond(Reader.ReadSubExpr());
+ D->setCond(Reader.ReadSubExpr());
+ D->setInit(Reader.ReadSubExpr());
+ D->setInc(Reader.ReadSubExpr());
+ if (isOpenMPWorksharingDirective(D->getDirectiveKind())) {
+ D->setIsLastIterVariable(Reader.ReadSubExpr());
+ D->setLowerBoundVariable(Reader.ReadSubExpr());
+ D->setUpperBoundVariable(Reader.ReadSubExpr());
+ D->setStrideVariable(Reader.ReadSubExpr());
+ D->setEnsureUpperBound(Reader.ReadSubExpr());
+ D->setNextLowerBound(Reader.ReadSubExpr());
+ D->setNextUpperBound(Reader.ReadSubExpr());
+ }
+ SmallVector<Expr *, 4> Sub;
+ unsigned CollapsedNum = D->getCollapsedNumber();
+ Sub.reserve(CollapsedNum);
+ for (unsigned i = 0; i < CollapsedNum; ++i)
+ Sub.push_back(Reader.ReadSubExpr());
+ D->setCounters(Sub);
+ Sub.clear();
+ for (unsigned i = 0; i < CollapsedNum; ++i)
+ Sub.push_back(Reader.ReadSubExpr());
+ D->setPrivateCounters(Sub);
+ Sub.clear();
+ for (unsigned i = 0; i < CollapsedNum; ++i)
+ Sub.push_back(Reader.ReadSubExpr());
+ D->setInits(Sub);
+ Sub.clear();
+ for (unsigned i = 0; i < CollapsedNum; ++i)
+ Sub.push_back(Reader.ReadSubExpr());
+ D->setUpdates(Sub);
+ Sub.clear();
+ for (unsigned i = 0; i < CollapsedNum; ++i)
+ Sub.push_back(Reader.ReadSubExpr());
+ D->setFinals(Sub);
+}
+
+void ASTStmtReader::VisitOMPParallelDirective(OMPParallelDirective *D) {
+ VisitStmt(D);
+ // The NumClauses field was read in ReadStmtFromStream.
+ ++Idx;
+ VisitOMPExecutableDirective(D);
+ D->setHasCancel(Record[Idx++]);
+}
+
+void ASTStmtReader::VisitOMPSimdDirective(OMPSimdDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
+void ASTStmtReader::VisitOMPForDirective(OMPForDirective *D) {
+ VisitOMPLoopDirective(D);
+ D->setHasCancel(Record[Idx++]);
+}
+
+void ASTStmtReader::VisitOMPForSimdDirective(OMPForSimdDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
+void ASTStmtReader::VisitOMPSectionsDirective(OMPSectionsDirective *D) {
+ VisitStmt(D);
+ // The NumClauses field was read in ReadStmtFromStream.
+ ++Idx;
+ VisitOMPExecutableDirective(D);
+ D->setHasCancel(Record[Idx++]);
+}
+
+void ASTStmtReader::VisitOMPSectionDirective(OMPSectionDirective *D) {
+ VisitStmt(D);
+ VisitOMPExecutableDirective(D);
+ D->setHasCancel(Record[Idx++]);
+}
+
+void ASTStmtReader::VisitOMPSingleDirective(OMPSingleDirective *D) {
+ VisitStmt(D);
+ // The NumClauses field was read in ReadStmtFromStream.
+ ++Idx;
+ VisitOMPExecutableDirective(D);
+}
+
+void ASTStmtReader::VisitOMPMasterDirective(OMPMasterDirective *D) {
+ VisitStmt(D);
+ VisitOMPExecutableDirective(D);
+}
+
+void ASTStmtReader::VisitOMPCriticalDirective(OMPCriticalDirective *D) {
+ VisitStmt(D);
+ // The NumClauses field was read in ReadStmtFromStream.
+ ++Idx;
+ VisitOMPExecutableDirective(D);
+ ReadDeclarationNameInfo(D->DirName, Record, Idx);
+}
+
+void ASTStmtReader::VisitOMPParallelForDirective(OMPParallelForDirective *D) {
+ VisitOMPLoopDirective(D);
+ D->setHasCancel(Record[Idx++]);
+}
+
+void ASTStmtReader::VisitOMPParallelForSimdDirective(
+ OMPParallelForSimdDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
+void ASTStmtReader::VisitOMPParallelSectionsDirective(
+ OMPParallelSectionsDirective *D) {
+ VisitStmt(D);
+ // The NumClauses field was read in ReadStmtFromStream.
+ ++Idx;
+ VisitOMPExecutableDirective(D);
+ D->setHasCancel(Record[Idx++]);
+}
+
+void ASTStmtReader::VisitOMPTaskDirective(OMPTaskDirective *D) {
+ VisitStmt(D);
+ // The NumClauses field was read in ReadStmtFromStream.
+ ++Idx;
+ VisitOMPExecutableDirective(D);
+ D->setHasCancel(Record[Idx++]);
+}
+
+void ASTStmtReader::VisitOMPTaskyieldDirective(OMPTaskyieldDirective *D) {
+ VisitStmt(D);
+ VisitOMPExecutableDirective(D);
+}
+
+void ASTStmtReader::VisitOMPBarrierDirective(OMPBarrierDirective *D) {
+ VisitStmt(D);
+ VisitOMPExecutableDirective(D);
+}
+
+void ASTStmtReader::VisitOMPTaskwaitDirective(OMPTaskwaitDirective *D) {
+ VisitStmt(D);
+ VisitOMPExecutableDirective(D);
+}
+
+void ASTStmtReader::VisitOMPTaskgroupDirective(OMPTaskgroupDirective *D) {
+ VisitStmt(D);
+ VisitOMPExecutableDirective(D);
+}
+
+void ASTStmtReader::VisitOMPFlushDirective(OMPFlushDirective *D) {
+ VisitStmt(D);
+ // The NumClauses field was read in ReadStmtFromStream.
+ ++Idx;
+ VisitOMPExecutableDirective(D);
+}
+
+void ASTStmtReader::VisitOMPOrderedDirective(OMPOrderedDirective *D) {
+ VisitStmt(D);
+ // The NumClauses field was read in ReadStmtFromStream.
+ ++Idx;
+ VisitOMPExecutableDirective(D);
+}
+
+void ASTStmtReader::VisitOMPAtomicDirective(OMPAtomicDirective *D) {
+ VisitStmt(D);
+ // The NumClauses field was read in ReadStmtFromStream.
+ ++Idx;
+ VisitOMPExecutableDirective(D);
+ D->setX(Reader.ReadSubExpr());
+ D->setV(Reader.ReadSubExpr());
+ D->setExpr(Reader.ReadSubExpr());
+ D->setUpdateExpr(Reader.ReadSubExpr());
+ D->IsXLHSInRHSPart = Record[Idx++] != 0;
+ D->IsPostfixUpdate = Record[Idx++] != 0;
+}
+
+void ASTStmtReader::VisitOMPTargetDirective(OMPTargetDirective *D) {
+ VisitStmt(D);
+ // The NumClauses field was read in ReadStmtFromStream.
+ ++Idx;
+ VisitOMPExecutableDirective(D);
+}
+
+void ASTStmtReader::VisitOMPTargetDataDirective(OMPTargetDataDirective *D) {
+ VisitStmt(D);
+ ++Idx;
+ VisitOMPExecutableDirective(D);
+}
+
+void ASTStmtReader::VisitOMPTeamsDirective(OMPTeamsDirective *D) {
+ VisitStmt(D);
+ // The NumClauses field was read in ReadStmtFromStream.
+ ++Idx;
+ VisitOMPExecutableDirective(D);
+}
+
+void ASTStmtReader::VisitOMPCancellationPointDirective(
+ OMPCancellationPointDirective *D) {
+ VisitStmt(D);
+ VisitOMPExecutableDirective(D);
+ D->setCancelRegion(static_cast<OpenMPDirectiveKind>(Record[Idx++]));
+}
+
+void ASTStmtReader::VisitOMPCancelDirective(OMPCancelDirective *D) {
+ VisitStmt(D);
+ // The NumClauses field was read in ReadStmtFromStream.
+ ++Idx;
+ VisitOMPExecutableDirective(D);
+ D->setCancelRegion(static_cast<OpenMPDirectiveKind>(Record[Idx++]));
+}
+
+void ASTStmtReader::VisitOMPTaskLoopDirective(OMPTaskLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
+void ASTStmtReader::VisitOMPTaskLoopSimdDirective(OMPTaskLoopSimdDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
+void ASTStmtReader::VisitOMPDistributeDirective(OMPDistributeDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
+//===----------------------------------------------------------------------===//
+// ASTReader Implementation
+//===----------------------------------------------------------------------===//
+
+Stmt *ASTReader::ReadStmt(ModuleFile &F) {
+ switch (ReadingKind) {
+ case Read_None:
+ llvm_unreachable("should not call this when not reading anything");
+ case Read_Decl:
+ case Read_Type:
+ return ReadStmtFromStream(F);
+ case Read_Stmt:
+ return ReadSubStmt();
+ }
+
+ llvm_unreachable("ReadingKind not set ?");
+}
+
+Expr *ASTReader::ReadExpr(ModuleFile &F) {
+ return cast_or_null<Expr>(ReadStmt(F));
+}
+
+Expr *ASTReader::ReadSubExpr() {
+ return cast_or_null<Expr>(ReadSubStmt());
+}
+
+// Within the bitstream, expressions are stored in Reverse Polish
+// Notation, with each of the subexpressions preceding the
+// expression they are stored in. Subexpressions are stored from last to first.
+// To evaluate expressions, we continue reading expressions and placing them on
+// the stack, with expressions having operands removing those operands from the
+// stack. Evaluation terminates when we see a STMT_STOP record, and
+// the single remaining expression on the stack is our result.
+Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
+
+ ReadingKindTracker ReadingKind(Read_Stmt, *this);
+ llvm::BitstreamCursor &Cursor = F.DeclsCursor;
+
+ // Map of offset to previously deserialized stmt. The offset points
+ /// just after the stmt record.
+ llvm::DenseMap<uint64_t, Stmt *> StmtEntries;
+
+#ifndef NDEBUG
+ unsigned PrevNumStmts = StmtStack.size();
+#endif
+
+ RecordData Record;
+ unsigned Idx;
+ ASTStmtReader Reader(*this, F, Cursor, Record, Idx);
+ Stmt::EmptyShell Empty;
+
+ while (true) {
+ llvm::BitstreamEntry Entry = Cursor.advanceSkippingSubblocks();
+
+ switch (Entry.Kind) {
+ case llvm::BitstreamEntry::SubBlock: // Handled for us already.
+ case llvm::BitstreamEntry::Error:
+ Error("malformed block record in AST file");
+ return nullptr;
+ case llvm::BitstreamEntry::EndBlock:
+ goto Done;
+ case llvm::BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ Stmt *S = nullptr;
+ Idx = 0;
+ Record.clear();
+ bool Finished = false;
+ bool IsStmtReference = false;
+ switch ((StmtCode)Cursor.readRecord(Entry.ID, Record)) {
+ case STMT_STOP:
+ Finished = true;
+ break;
+
+ case STMT_REF_PTR:
+ IsStmtReference = true;
+ assert(StmtEntries.find(Record[0]) != StmtEntries.end() &&
+ "No stmt was recorded for this offset reference!");
+ S = StmtEntries[Record[Idx++]];
+ break;
+
+ case STMT_NULL_PTR:
+ S = nullptr;
+ break;
+
+ case STMT_NULL:
+ S = new (Context) NullStmt(Empty);
+ break;
+
+ case STMT_COMPOUND:
+ S = new (Context) CompoundStmt(Empty);
+ break;
+
+ case STMT_CASE:
+ S = new (Context) CaseStmt(Empty);
+ break;
+
+ case STMT_DEFAULT:
+ S = new (Context) DefaultStmt(Empty);
+ break;
+
+ case STMT_LABEL:
+ S = new (Context) LabelStmt(Empty);
+ break;
+
+ case STMT_ATTRIBUTED:
+ S = AttributedStmt::CreateEmpty(
+ Context,
+ /*NumAttrs*/Record[ASTStmtReader::NumStmtFields]);
+ break;
+
+ case STMT_IF:
+ S = new (Context) IfStmt(Empty);
+ break;
+
+ case STMT_SWITCH:
+ S = new (Context) SwitchStmt(Empty);
+ break;
+
+ case STMT_WHILE:
+ S = new (Context) WhileStmt(Empty);
+ break;
+
+ case STMT_DO:
+ S = new (Context) DoStmt(Empty);
+ break;
+
+ case STMT_FOR:
+ S = new (Context) ForStmt(Empty);
+ break;
+
+ case STMT_GOTO:
+ S = new (Context) GotoStmt(Empty);
+ break;
+
+ case STMT_INDIRECT_GOTO:
+ S = new (Context) IndirectGotoStmt(Empty);
+ break;
+
+ case STMT_CONTINUE:
+ S = new (Context) ContinueStmt(Empty);
+ break;
+
+ case STMT_BREAK:
+ S = new (Context) BreakStmt(Empty);
+ break;
+
+ case STMT_RETURN:
+ S = new (Context) ReturnStmt(Empty);
+ break;
+
+ case STMT_DECL:
+ S = new (Context) DeclStmt(Empty);
+ break;
+
+ case STMT_GCCASM:
+ S = new (Context) GCCAsmStmt(Empty);
+ break;
+
+ case STMT_MSASM:
+ S = new (Context) MSAsmStmt(Empty);
+ break;
+
+ case STMT_CAPTURED:
+ S = CapturedStmt::CreateDeserialized(Context,
+ Record[ASTStmtReader::NumStmtFields]);
+ break;
+
+ case EXPR_PREDEFINED:
+ S = new (Context) PredefinedExpr(Empty);
+ break;
+
+ case EXPR_DECL_REF:
+ S = DeclRefExpr::CreateEmpty(
+ Context,
+ /*HasQualifier=*/Record[ASTStmtReader::NumExprFields],
+ /*HasFoundDecl=*/Record[ASTStmtReader::NumExprFields + 1],
+ /*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields + 2],
+ /*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields + 2] ?
+ Record[ASTStmtReader::NumExprFields + 5] : 0);
+ break;
+
+ case EXPR_INTEGER_LITERAL:
+ S = IntegerLiteral::Create(Context, Empty);
+ break;
+
+ case EXPR_FLOATING_LITERAL:
+ S = FloatingLiteral::Create(Context, Empty);
+ break;
+
+ case EXPR_IMAGINARY_LITERAL:
+ S = new (Context) ImaginaryLiteral(Empty);
+ break;
+
+ case EXPR_STRING_LITERAL:
+ S = StringLiteral::CreateEmpty(Context,
+ Record[ASTStmtReader::NumExprFields + 1]);
+ break;
+
+ case EXPR_CHARACTER_LITERAL:
+ S = new (Context) CharacterLiteral(Empty);
+ break;
+
+ case EXPR_PAREN:
+ S = new (Context) ParenExpr(Empty);
+ break;
+
+ case EXPR_PAREN_LIST:
+ S = new (Context) ParenListExpr(Empty);
+ break;
+
+ case EXPR_UNARY_OPERATOR:
+ S = new (Context) UnaryOperator(Empty);
+ break;
+
+ case EXPR_OFFSETOF:
+ S = OffsetOfExpr::CreateEmpty(Context,
+ Record[ASTStmtReader::NumExprFields],
+ Record[ASTStmtReader::NumExprFields + 1]);
+ break;
+
+ case EXPR_SIZEOF_ALIGN_OF:
+ S = new (Context) UnaryExprOrTypeTraitExpr(Empty);
+ break;
+
+ case EXPR_ARRAY_SUBSCRIPT:
+ S = new (Context) ArraySubscriptExpr(Empty);
+ break;
+
+ case EXPR_OMP_ARRAY_SECTION:
+ S = new (Context) OMPArraySectionExpr(Empty);
+ break;
+
+ case EXPR_CALL:
+ S = new (Context) CallExpr(Context, Stmt::CallExprClass, Empty);
+ break;
+
+ case EXPR_MEMBER: {
+ // We load everything here and fully initialize it at creation.
+ // That way we can use MemberExpr::Create and don't have to duplicate its
+ // logic with a MemberExpr::CreateEmpty.
+
+ assert(Idx == 0);
+ NestedNameSpecifierLoc QualifierLoc;
+ if (Record[Idx++]) { // HasQualifier.
+ QualifierLoc = ReadNestedNameSpecifierLoc(F, Record, Idx);
+ }
+
+ SourceLocation TemplateKWLoc;
+ TemplateArgumentListInfo ArgInfo;
+ bool HasTemplateKWAndArgsInfo = Record[Idx++];
+ if (HasTemplateKWAndArgsInfo) {
+ TemplateKWLoc = ReadSourceLocation(F, Record, Idx);
+ unsigned NumTemplateArgs = Record[Idx++];
+ ArgInfo.setLAngleLoc(ReadSourceLocation(F, Record, Idx));
+ ArgInfo.setRAngleLoc(ReadSourceLocation(F, Record, Idx));
+ for (unsigned i = 0; i != NumTemplateArgs; ++i)
+ ArgInfo.addArgument(ReadTemplateArgumentLoc(F, Record, Idx));
+ }
+
+ bool HadMultipleCandidates = Record[Idx++];
+
+ NamedDecl *FoundD = ReadDeclAs<NamedDecl>(F, Record, Idx);
+ AccessSpecifier AS = (AccessSpecifier)Record[Idx++];
+ DeclAccessPair FoundDecl = DeclAccessPair::make(FoundD, AS);
+
+ QualType T = readType(F, Record, Idx);
+ ExprValueKind VK = static_cast<ExprValueKind>(Record[Idx++]);
+ ExprObjectKind OK = static_cast<ExprObjectKind>(Record[Idx++]);
+ Expr *Base = ReadSubExpr();
+ ValueDecl *MemberD = ReadDeclAs<ValueDecl>(F, Record, Idx);
+ SourceLocation MemberLoc = ReadSourceLocation(F, Record, Idx);
+ DeclarationNameInfo MemberNameInfo(MemberD->getDeclName(), MemberLoc);
+ bool IsArrow = Record[Idx++];
+ SourceLocation OperatorLoc = ReadSourceLocation(F, Record, Idx);
+
+ S = MemberExpr::Create(Context, Base, IsArrow, OperatorLoc, QualifierLoc,
+ TemplateKWLoc, MemberD, FoundDecl, MemberNameInfo,
+ HasTemplateKWAndArgsInfo ? &ArgInfo : nullptr, T,
+ VK, OK);
+ ReadDeclarationNameLoc(F, cast<MemberExpr>(S)->MemberDNLoc,
+ MemberD->getDeclName(), Record, Idx);
+ if (HadMultipleCandidates)
+ cast<MemberExpr>(S)->setHadMultipleCandidates(true);
+ break;
+ }
+
+ case EXPR_BINARY_OPERATOR:
+ S = new (Context) BinaryOperator(Empty);
+ break;
+
+ case EXPR_COMPOUND_ASSIGN_OPERATOR:
+ S = new (Context) CompoundAssignOperator(Empty);
+ break;
+
+ case EXPR_CONDITIONAL_OPERATOR:
+ S = new (Context) ConditionalOperator(Empty);
+ break;
+
+ case EXPR_BINARY_CONDITIONAL_OPERATOR:
+ S = new (Context) BinaryConditionalOperator(Empty);
+ break;
+
+ case EXPR_IMPLICIT_CAST:
+ S = ImplicitCastExpr::CreateEmpty(Context,
+ /*PathSize*/ Record[ASTStmtReader::NumExprFields]);
+ break;
+
+ case EXPR_CSTYLE_CAST:
+ S = CStyleCastExpr::CreateEmpty(Context,
+ /*PathSize*/ Record[ASTStmtReader::NumExprFields]);
+ break;
+
+ case EXPR_COMPOUND_LITERAL:
+ S = new (Context) CompoundLiteralExpr(Empty);
+ break;
+
+ case EXPR_EXT_VECTOR_ELEMENT:
+ S = new (Context) ExtVectorElementExpr(Empty);
+ break;
+
+ case EXPR_INIT_LIST:
+ S = new (Context) InitListExpr(Empty);
+ break;
+
+ case EXPR_DESIGNATED_INIT:
+ S = DesignatedInitExpr::CreateEmpty(Context,
+ Record[ASTStmtReader::NumExprFields] - 1);
+
+ break;
+
+ case EXPR_DESIGNATED_INIT_UPDATE:
+ S = new (Context) DesignatedInitUpdateExpr(Empty);
+ break;
+
+ case EXPR_IMPLICIT_VALUE_INIT:
+ S = new (Context) ImplicitValueInitExpr(Empty);
+ break;
+
+ case EXPR_NO_INIT:
+ S = new (Context) NoInitExpr(Empty);
+ break;
+
+ case EXPR_VA_ARG:
+ S = new (Context) VAArgExpr(Empty);
+ break;
+
+ case EXPR_ADDR_LABEL:
+ S = new (Context) AddrLabelExpr(Empty);
+ break;
+
+ case EXPR_STMT:
+ S = new (Context) StmtExpr(Empty);
+ break;
+
+ case EXPR_CHOOSE:
+ S = new (Context) ChooseExpr(Empty);
+ break;
+
+ case EXPR_GNU_NULL:
+ S = new (Context) GNUNullExpr(Empty);
+ break;
+
+ case EXPR_SHUFFLE_VECTOR:
+ S = new (Context) ShuffleVectorExpr(Empty);
+ break;
+
+ case EXPR_CONVERT_VECTOR:
+ S = new (Context) ConvertVectorExpr(Empty);
+ break;
+
+ case EXPR_BLOCK:
+ S = new (Context) BlockExpr(Empty);
+ break;
+
+ case EXPR_GENERIC_SELECTION:
+ S = new (Context) GenericSelectionExpr(Empty);
+ break;
+
+ case EXPR_OBJC_STRING_LITERAL:
+ S = new (Context) ObjCStringLiteral(Empty);
+ break;
+ case EXPR_OBJC_BOXED_EXPRESSION:
+ S = new (Context) ObjCBoxedExpr(Empty);
+ break;
+ case EXPR_OBJC_ARRAY_LITERAL:
+ S = ObjCArrayLiteral::CreateEmpty(Context,
+ Record[ASTStmtReader::NumExprFields]);
+ break;
+ case EXPR_OBJC_DICTIONARY_LITERAL:
+ S = ObjCDictionaryLiteral::CreateEmpty(Context,
+ Record[ASTStmtReader::NumExprFields],
+ Record[ASTStmtReader::NumExprFields + 1]);
+ break;
+ case EXPR_OBJC_ENCODE:
+ S = new (Context) ObjCEncodeExpr(Empty);
+ break;
+ case EXPR_OBJC_SELECTOR_EXPR:
+ S = new (Context) ObjCSelectorExpr(Empty);
+ break;
+ case EXPR_OBJC_PROTOCOL_EXPR:
+ S = new (Context) ObjCProtocolExpr(Empty);
+ break;
+ case EXPR_OBJC_IVAR_REF_EXPR:
+ S = new (Context) ObjCIvarRefExpr(Empty);
+ break;
+ case EXPR_OBJC_PROPERTY_REF_EXPR:
+ S = new (Context) ObjCPropertyRefExpr(Empty);
+ break;
+ case EXPR_OBJC_SUBSCRIPT_REF_EXPR:
+ S = new (Context) ObjCSubscriptRefExpr(Empty);
+ break;
+ case EXPR_OBJC_KVC_REF_EXPR:
+ llvm_unreachable("mismatching AST file");
+ case EXPR_OBJC_MESSAGE_EXPR:
+ S = ObjCMessageExpr::CreateEmpty(Context,
+ Record[ASTStmtReader::NumExprFields],
+ Record[ASTStmtReader::NumExprFields + 1]);
+ break;
+ case EXPR_OBJC_ISA:
+ S = new (Context) ObjCIsaExpr(Empty);
+ break;
+ case EXPR_OBJC_INDIRECT_COPY_RESTORE:
+ S = new (Context) ObjCIndirectCopyRestoreExpr(Empty);
+ break;
+ case EXPR_OBJC_BRIDGED_CAST:
+ S = new (Context) ObjCBridgedCastExpr(Empty);
+ break;
+ case STMT_OBJC_FOR_COLLECTION:
+ S = new (Context) ObjCForCollectionStmt(Empty);
+ break;
+ case STMT_OBJC_CATCH:
+ S = new (Context) ObjCAtCatchStmt(Empty);
+ break;
+ case STMT_OBJC_FINALLY:
+ S = new (Context) ObjCAtFinallyStmt(Empty);
+ break;
+ case STMT_OBJC_AT_TRY:
+ S = ObjCAtTryStmt::CreateEmpty(Context,
+ Record[ASTStmtReader::NumStmtFields],
+ Record[ASTStmtReader::NumStmtFields + 1]);
+ break;
+ case STMT_OBJC_AT_SYNCHRONIZED:
+ S = new (Context) ObjCAtSynchronizedStmt(Empty);
+ break;
+ case STMT_OBJC_AT_THROW:
+ S = new (Context) ObjCAtThrowStmt(Empty);
+ break;
+ case STMT_OBJC_AUTORELEASE_POOL:
+ S = new (Context) ObjCAutoreleasePoolStmt(Empty);
+ break;
+ case EXPR_OBJC_BOOL_LITERAL:
+ S = new (Context) ObjCBoolLiteralExpr(Empty);
+ break;
+ case STMT_SEH_LEAVE:
+ S = new (Context) SEHLeaveStmt(Empty);
+ break;
+ case STMT_SEH_EXCEPT:
+ S = new (Context) SEHExceptStmt(Empty);
+ break;
+ case STMT_SEH_FINALLY:
+ S = new (Context) SEHFinallyStmt(Empty);
+ break;
+ case STMT_SEH_TRY:
+ S = new (Context) SEHTryStmt(Empty);
+ break;
+ case STMT_CXX_CATCH:
+ S = new (Context) CXXCatchStmt(Empty);
+ break;
+
+ case STMT_CXX_TRY:
+ S = CXXTryStmt::Create(Context, Empty,
+ /*NumHandlers=*/Record[ASTStmtReader::NumStmtFields]);
+ break;
+
+ case STMT_CXX_FOR_RANGE:
+ S = new (Context) CXXForRangeStmt(Empty);
+ break;
+
+ case STMT_MS_DEPENDENT_EXISTS:
+ S = new (Context) MSDependentExistsStmt(SourceLocation(), true,
+ NestedNameSpecifierLoc(),
+ DeclarationNameInfo(),
+ nullptr);
+ break;
+
+ case STMT_OMP_PARALLEL_DIRECTIVE:
+ S =
+ OMPParallelDirective::CreateEmpty(Context,
+ Record[ASTStmtReader::NumStmtFields],
+ Empty);
+ break;
+
+ case STMT_OMP_SIMD_DIRECTIVE: {
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPSimdDirective::CreateEmpty(Context, NumClauses,
+ CollapsedNum, Empty);
+ break;
+ }
+
+ case STMT_OMP_FOR_DIRECTIVE: {
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPForDirective::CreateEmpty(Context, NumClauses, CollapsedNum,
+ Empty);
+ break;
+ }
+
+ case STMT_OMP_FOR_SIMD_DIRECTIVE: {
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPForSimdDirective::CreateEmpty(Context, NumClauses, CollapsedNum,
+ Empty);
+ break;
+ }
+
+ case STMT_OMP_SECTIONS_DIRECTIVE:
+ S = OMPSectionsDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ break;
+
+ case STMT_OMP_SECTION_DIRECTIVE:
+ S = OMPSectionDirective::CreateEmpty(Context, Empty);
+ break;
+
+ case STMT_OMP_SINGLE_DIRECTIVE:
+ S = OMPSingleDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ break;
+
+ case STMT_OMP_MASTER_DIRECTIVE:
+ S = OMPMasterDirective::CreateEmpty(Context, Empty);
+ break;
+
+ case STMT_OMP_CRITICAL_DIRECTIVE:
+ S = OMPCriticalDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ break;
+
+ case STMT_OMP_PARALLEL_FOR_DIRECTIVE: {
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPParallelForDirective::CreateEmpty(Context, NumClauses,
+ CollapsedNum, Empty);
+ break;
+ }
+
+ case STMT_OMP_PARALLEL_FOR_SIMD_DIRECTIVE: {
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPParallelForSimdDirective::CreateEmpty(Context, NumClauses,
+ CollapsedNum, Empty);
+ break;
+ }
+
+ case STMT_OMP_PARALLEL_SECTIONS_DIRECTIVE:
+ S = OMPParallelSectionsDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ break;
+
+ case STMT_OMP_TASK_DIRECTIVE:
+ S = OMPTaskDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ break;
+
+ case STMT_OMP_TASKYIELD_DIRECTIVE:
+ S = OMPTaskyieldDirective::CreateEmpty(Context, Empty);
+ break;
+
+ case STMT_OMP_BARRIER_DIRECTIVE:
+ S = OMPBarrierDirective::CreateEmpty(Context, Empty);
+ break;
+
+ case STMT_OMP_TASKWAIT_DIRECTIVE:
+ S = OMPTaskwaitDirective::CreateEmpty(Context, Empty);
+ break;
+
+ case STMT_OMP_TASKGROUP_DIRECTIVE:
+ S = OMPTaskgroupDirective::CreateEmpty(Context, Empty);
+ break;
+
+ case STMT_OMP_FLUSH_DIRECTIVE:
+ S = OMPFlushDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ break;
+
+ case STMT_OMP_ORDERED_DIRECTIVE:
+ S = OMPOrderedDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ break;
+
+ case STMT_OMP_ATOMIC_DIRECTIVE:
+ S = OMPAtomicDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ break;
+
+ case STMT_OMP_TARGET_DIRECTIVE:
+ S = OMPTargetDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ break;
+
+ case STMT_OMP_TARGET_DATA_DIRECTIVE:
+ S = OMPTargetDataDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ break;
+
+ case STMT_OMP_TEAMS_DIRECTIVE:
+ S = OMPTeamsDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ break;
+
+ case STMT_OMP_CANCELLATION_POINT_DIRECTIVE:
+ S = OMPCancellationPointDirective::CreateEmpty(Context, Empty);
+ break;
+
+ case STMT_OMP_CANCEL_DIRECTIVE:
+ S = OMPCancelDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ break;
+
+ case STMT_OMP_TASKLOOP_DIRECTIVE: {
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPTaskLoopDirective::CreateEmpty(Context, NumClauses, CollapsedNum,
+ Empty);
+ break;
+ }
+
+ case STMT_OMP_TASKLOOP_SIMD_DIRECTIVE: {
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPTaskLoopSimdDirective::CreateEmpty(Context, NumClauses,
+ CollapsedNum, Empty);
+ break;
+ }
+
+ case STMT_OMP_DISTRIBUTE_DIRECTIVE: {
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPDistributeDirective::CreateEmpty(Context, NumClauses, CollapsedNum,
+ Empty);
+ break;
+ }
+
+ case EXPR_CXX_OPERATOR_CALL:
+ S = new (Context) CXXOperatorCallExpr(Context, Empty);
+ break;
+
+ case EXPR_CXX_MEMBER_CALL:
+ S = new (Context) CXXMemberCallExpr(Context, Empty);
+ break;
+
+ case EXPR_CXX_CONSTRUCT:
+ S = new (Context) CXXConstructExpr(Empty);
+ break;
+
+ case EXPR_CXX_TEMPORARY_OBJECT:
+ S = new (Context) CXXTemporaryObjectExpr(Empty);
+ break;
+
+ case EXPR_CXX_STATIC_CAST:
+ S = CXXStaticCastExpr::CreateEmpty(Context,
+ /*PathSize*/ Record[ASTStmtReader::NumExprFields]);
+ break;
+
+ case EXPR_CXX_DYNAMIC_CAST:
+ S = CXXDynamicCastExpr::CreateEmpty(Context,
+ /*PathSize*/ Record[ASTStmtReader::NumExprFields]);
+ break;
+
+ case EXPR_CXX_REINTERPRET_CAST:
+ S = CXXReinterpretCastExpr::CreateEmpty(Context,
+ /*PathSize*/ Record[ASTStmtReader::NumExprFields]);
+ break;
+
+ case EXPR_CXX_CONST_CAST:
+ S = CXXConstCastExpr::CreateEmpty(Context);
+ break;
+
+ case EXPR_CXX_FUNCTIONAL_CAST:
+ S = CXXFunctionalCastExpr::CreateEmpty(Context,
+ /*PathSize*/ Record[ASTStmtReader::NumExprFields]);
+ break;
+
+ case EXPR_USER_DEFINED_LITERAL:
+ S = new (Context) UserDefinedLiteral(Context, Empty);
+ break;
+
+ case EXPR_CXX_STD_INITIALIZER_LIST:
+ S = new (Context) CXXStdInitializerListExpr(Empty);
+ break;
+
+ case EXPR_CXX_BOOL_LITERAL:
+ S = new (Context) CXXBoolLiteralExpr(Empty);
+ break;
+
+ case EXPR_CXX_NULL_PTR_LITERAL:
+ S = new (Context) CXXNullPtrLiteralExpr(Empty);
+ break;
+ case EXPR_CXX_TYPEID_EXPR:
+ S = new (Context) CXXTypeidExpr(Empty, true);
+ break;
+ case EXPR_CXX_TYPEID_TYPE:
+ S = new (Context) CXXTypeidExpr(Empty, false);
+ break;
+ case EXPR_CXX_UUIDOF_EXPR:
+ S = new (Context) CXXUuidofExpr(Empty, true);
+ break;
+ case EXPR_CXX_PROPERTY_REF_EXPR:
+ S = new (Context) MSPropertyRefExpr(Empty);
+ break;
+ case EXPR_CXX_PROPERTY_SUBSCRIPT_EXPR:
+ S = new (Context) MSPropertySubscriptExpr(Empty);
+ break;
+ case EXPR_CXX_UUIDOF_TYPE:
+ S = new (Context) CXXUuidofExpr(Empty, false);
+ break;
+ case EXPR_CXX_THIS:
+ S = new (Context) CXXThisExpr(Empty);
+ break;
+ case EXPR_CXX_THROW:
+ S = new (Context) CXXThrowExpr(Empty);
+ break;
+ case EXPR_CXX_DEFAULT_ARG: {
+ bool HasOtherExprStored = Record[ASTStmtReader::NumExprFields];
+ if (HasOtherExprStored) {
+ Expr *SubExpr = ReadSubExpr();
+ S = CXXDefaultArgExpr::Create(Context, SourceLocation(), nullptr,
+ SubExpr);
+ } else
+ S = new (Context) CXXDefaultArgExpr(Empty);
+ break;
+ }
+ case EXPR_CXX_DEFAULT_INIT:
+ S = new (Context) CXXDefaultInitExpr(Empty);
+ break;
+ case EXPR_CXX_BIND_TEMPORARY:
+ S = new (Context) CXXBindTemporaryExpr(Empty);
+ break;
+
+ case EXPR_CXX_SCALAR_VALUE_INIT:
+ S = new (Context) CXXScalarValueInitExpr(Empty);
+ break;
+ case EXPR_CXX_NEW:
+ S = new (Context) CXXNewExpr(Empty);
+ break;
+ case EXPR_CXX_DELETE:
+ S = new (Context) CXXDeleteExpr(Empty);
+ break;
+ case EXPR_CXX_PSEUDO_DESTRUCTOR:
+ S = new (Context) CXXPseudoDestructorExpr(Empty);
+ break;
+
+ case EXPR_EXPR_WITH_CLEANUPS:
+ S = ExprWithCleanups::Create(Context, Empty,
+ Record[ASTStmtReader::NumExprFields]);
+ break;
+
+ case EXPR_CXX_DEPENDENT_SCOPE_MEMBER:
+ S = CXXDependentScopeMemberExpr::CreateEmpty(Context,
+ /*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields],
+ /*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields]
+ ? Record[ASTStmtReader::NumExprFields + 1]
+ : 0);
+ break;
+
+ case EXPR_CXX_DEPENDENT_SCOPE_DECL_REF:
+ S = DependentScopeDeclRefExpr::CreateEmpty(Context,
+ /*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields],
+ /*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields]
+ ? Record[ASTStmtReader::NumExprFields + 1]
+ : 0);
+ break;
+
+ case EXPR_CXX_UNRESOLVED_CONSTRUCT:
+ S = CXXUnresolvedConstructExpr::CreateEmpty(Context,
+ /*NumArgs=*/Record[ASTStmtReader::NumExprFields]);
+ break;
+
+ case EXPR_CXX_UNRESOLVED_MEMBER:
+ S = UnresolvedMemberExpr::CreateEmpty(Context,
+ /*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields],
+ /*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields]
+ ? Record[ASTStmtReader::NumExprFields + 1]
+ : 0);
+ break;
+
+ case EXPR_CXX_UNRESOLVED_LOOKUP:
+ S = UnresolvedLookupExpr::CreateEmpty(Context,
+ /*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields],
+ /*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields]
+ ? Record[ASTStmtReader::NumExprFields + 1]
+ : 0);
+ break;
+
+ case EXPR_TYPE_TRAIT:
+ S = TypeTraitExpr::CreateDeserialized(Context,
+ Record[ASTStmtReader::NumExprFields]);
+ break;
+
+ case EXPR_ARRAY_TYPE_TRAIT:
+ S = new (Context) ArrayTypeTraitExpr(Empty);
+ break;
+
+ case EXPR_CXX_EXPRESSION_TRAIT:
+ S = new (Context) ExpressionTraitExpr(Empty);
+ break;
+
+ case EXPR_CXX_NOEXCEPT:
+ S = new (Context) CXXNoexceptExpr(Empty);
+ break;
+
+ case EXPR_PACK_EXPANSION:
+ S = new (Context) PackExpansionExpr(Empty);
+ break;
+
+ case EXPR_SIZEOF_PACK:
+ S = SizeOfPackExpr::CreateDeserialized(
+ Context,
+ /*NumPartialArgs=*/Record[ASTStmtReader::NumExprFields]);
+ break;
+
+ case EXPR_SUBST_NON_TYPE_TEMPLATE_PARM:
+ S = new (Context) SubstNonTypeTemplateParmExpr(Empty);
+ break;
+
+ case EXPR_SUBST_NON_TYPE_TEMPLATE_PARM_PACK:
+ S = new (Context) SubstNonTypeTemplateParmPackExpr(Empty);
+ break;
+
+ case EXPR_FUNCTION_PARM_PACK:
+ S = FunctionParmPackExpr::CreateEmpty(Context,
+ Record[ASTStmtReader::NumExprFields]);
+ break;
+
+ case EXPR_MATERIALIZE_TEMPORARY:
+ S = new (Context) MaterializeTemporaryExpr(Empty);
+ break;
+
+ case EXPR_CXX_FOLD:
+ S = new (Context) CXXFoldExpr(Empty);
+ break;
+
+ case EXPR_OPAQUE_VALUE:
+ S = new (Context) OpaqueValueExpr(Empty);
+ break;
+
+ case EXPR_CUDA_KERNEL_CALL:
+ S = new (Context) CUDAKernelCallExpr(Context, Empty);
+ break;
+
+ case EXPR_ASTYPE:
+ S = new (Context) AsTypeExpr(Empty);
+ break;
+
+ case EXPR_PSEUDO_OBJECT: {
+ unsigned numSemanticExprs = Record[ASTStmtReader::NumExprFields];
+ S = PseudoObjectExpr::Create(Context, Empty, numSemanticExprs);
+ break;
+ }
+
+ case EXPR_ATOMIC:
+ S = new (Context) AtomicExpr(Empty);
+ break;
+
+ case EXPR_LAMBDA: {
+ unsigned NumCaptures = Record[ASTStmtReader::NumExprFields];
+ unsigned NumArrayIndexVars = Record[ASTStmtReader::NumExprFields + 1];
+ S = LambdaExpr::CreateDeserialized(Context, NumCaptures,
+ NumArrayIndexVars);
+ break;
+ }
+ }
+
+ // We hit a STMT_STOP, so we're done with this expression.
+ if (Finished)
+ break;
+
+ ++NumStatementsRead;
+
+ if (S && !IsStmtReference) {
+ Reader.Visit(S);
+ StmtEntries[Cursor.GetCurrentBitNo()] = S;
+ }
+
+
+ assert(Idx == Record.size() && "Invalid deserialization of statement");
+ StmtStack.push_back(S);
+ }
+Done:
+ assert(StmtStack.size() > PrevNumStmts && "Read too many sub-stmts!");
+ assert(StmtStack.size() == PrevNumStmts + 1 && "Extra expressions on stack!");
+ return StmtStack.pop_back_val();
+}
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp
new file mode 100644
index 0000000..0f50d7a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp
@@ -0,0 +1,5821 @@
+//===--- ASTWriter.cpp - AST File Writer ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ASTWriter class, which writes AST files.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Serialization/ASTWriter.h"
+#include "clang/Serialization/ModuleFileExtension.h"
+#include "ASTCommon.h"
+#include "ASTReaderInternals.h"
+#include "MultiOnDiskHashTable.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclContextInternals.h"
+#include "clang/AST/DeclFriend.h"
+#include "clang/AST/DeclLookups.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/TypeLocVisitor.h"
+#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/FileSystemStatCache.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/SourceManagerInternals.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TargetOptions.h"
+#include "clang/Basic/Version.h"
+#include "clang/Basic/VersionTuple.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/HeaderSearchOptions.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/PreprocessingRecord.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/PreprocessorOptions.h"
+#include "clang/Sema/IdentifierResolver.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Serialization/ASTReader.h"
+#include "clang/Serialization/SerializationDiagnostic.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Bitcode/BitstreamWriter.h"
+#include "llvm/Support/EndianStream.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/OnDiskHashTable.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Process.h"
+#include <algorithm>
+#include <cstdio>
+#include <string.h>
+#include <utility>
+
+using namespace clang;
+using namespace clang::serialization;
+
+template <typename T, typename Allocator>
+static StringRef bytes(const std::vector<T, Allocator> &v) {
+ if (v.empty()) return StringRef();
+ return StringRef(reinterpret_cast<const char*>(&v[0]),
+ sizeof(T) * v.size());
+}
+
+template <typename T>
+static StringRef bytes(const SmallVectorImpl<T> &v) {
+ return StringRef(reinterpret_cast<const char*>(v.data()),
+ sizeof(T) * v.size());
+}
+
+//===----------------------------------------------------------------------===//
+// Type serialization
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class ASTTypeWriter {
+ ASTWriter &Writer;
+ ASTWriter::RecordDataImpl &Record;
+
+ public:
+ /// \brief Type code that corresponds to the record generated.
+ TypeCode Code;
+ /// \brief Abbreviation to use for the record, if any.
+ unsigned AbbrevToUse;
+
+ ASTTypeWriter(ASTWriter &Writer, ASTWriter::RecordDataImpl &Record)
+ : Writer(Writer), Record(Record), Code(TYPE_EXT_QUAL) { }
+
+ void VisitArrayType(const ArrayType *T);
+ void VisitFunctionType(const FunctionType *T);
+ void VisitTagType(const TagType *T);
+
+#define TYPE(Class, Base) void Visit##Class##Type(const Class##Type *T);
+#define ABSTRACT_TYPE(Class, Base)
+#include "clang/AST/TypeNodes.def"
+ };
+} // end anonymous namespace
+
+void ASTTypeWriter::VisitBuiltinType(const BuiltinType *T) {
+ llvm_unreachable("Built-in types are never serialized");
+}
+
+void ASTTypeWriter::VisitComplexType(const ComplexType *T) {
+ Writer.AddTypeRef(T->getElementType(), Record);
+ Code = TYPE_COMPLEX;
+}
+
+void ASTTypeWriter::VisitPointerType(const PointerType *T) {
+ Writer.AddTypeRef(T->getPointeeType(), Record);
+ Code = TYPE_POINTER;
+}
+
+void ASTTypeWriter::VisitDecayedType(const DecayedType *T) {
+ Writer.AddTypeRef(T->getOriginalType(), Record);
+ Code = TYPE_DECAYED;
+}
+
+void ASTTypeWriter::VisitAdjustedType(const AdjustedType *T) {
+ Writer.AddTypeRef(T->getOriginalType(), Record);
+ Writer.AddTypeRef(T->getAdjustedType(), Record);
+ Code = TYPE_ADJUSTED;
+}
+
+void ASTTypeWriter::VisitBlockPointerType(const BlockPointerType *T) {
+ Writer.AddTypeRef(T->getPointeeType(), Record);
+ Code = TYPE_BLOCK_POINTER;
+}
+
+void ASTTypeWriter::VisitLValueReferenceType(const LValueReferenceType *T) {
+ Writer.AddTypeRef(T->getPointeeTypeAsWritten(), Record);
+ Record.push_back(T->isSpelledAsLValue());
+ Code = TYPE_LVALUE_REFERENCE;
+}
+
+void ASTTypeWriter::VisitRValueReferenceType(const RValueReferenceType *T) {
+ Writer.AddTypeRef(T->getPointeeTypeAsWritten(), Record);
+ Code = TYPE_RVALUE_REFERENCE;
+}
+
+void ASTTypeWriter::VisitMemberPointerType(const MemberPointerType *T) {
+ Writer.AddTypeRef(T->getPointeeType(), Record);
+ Writer.AddTypeRef(QualType(T->getClass(), 0), Record);
+ Code = TYPE_MEMBER_POINTER;
+}
+
+void ASTTypeWriter::VisitArrayType(const ArrayType *T) {
+ Writer.AddTypeRef(T->getElementType(), Record);
+ Record.push_back(T->getSizeModifier()); // FIXME: stable values
+ Record.push_back(T->getIndexTypeCVRQualifiers()); // FIXME: stable values
+}
+
+void ASTTypeWriter::VisitConstantArrayType(const ConstantArrayType *T) {
+ VisitArrayType(T);
+ Writer.AddAPInt(T->getSize(), Record);
+ Code = TYPE_CONSTANT_ARRAY;
+}
+
+void ASTTypeWriter::VisitIncompleteArrayType(const IncompleteArrayType *T) {
+ VisitArrayType(T);
+ Code = TYPE_INCOMPLETE_ARRAY;
+}
+
+void ASTTypeWriter::VisitVariableArrayType(const VariableArrayType *T) {
+ VisitArrayType(T);
+ Writer.AddSourceLocation(T->getLBracketLoc(), Record);
+ Writer.AddSourceLocation(T->getRBracketLoc(), Record);
+ Writer.AddStmt(T->getSizeExpr());
+ Code = TYPE_VARIABLE_ARRAY;
+}
+
+void ASTTypeWriter::VisitVectorType(const VectorType *T) {
+ Writer.AddTypeRef(T->getElementType(), Record);
+ Record.push_back(T->getNumElements());
+ Record.push_back(T->getVectorKind());
+ Code = TYPE_VECTOR;
+}
+
+void ASTTypeWriter::VisitExtVectorType(const ExtVectorType *T) {
+ VisitVectorType(T);
+ Code = TYPE_EXT_VECTOR;
+}
+
+void ASTTypeWriter::VisitFunctionType(const FunctionType *T) {
+ Writer.AddTypeRef(T->getReturnType(), Record);
+ FunctionType::ExtInfo C = T->getExtInfo();
+ Record.push_back(C.getNoReturn());
+ Record.push_back(C.getHasRegParm());
+ Record.push_back(C.getRegParm());
+ // FIXME: need to stabilize encoding of calling convention...
+ Record.push_back(C.getCC());
+ Record.push_back(C.getProducesResult());
+
+ if (C.getHasRegParm() || C.getRegParm() || C.getProducesResult())
+ AbbrevToUse = 0;
+}
+
+void ASTTypeWriter::VisitFunctionNoProtoType(const FunctionNoProtoType *T) {
+ VisitFunctionType(T);
+ Code = TYPE_FUNCTION_NO_PROTO;
+}
+
+static void addExceptionSpec(ASTWriter &Writer, const FunctionProtoType *T,
+ ASTWriter::RecordDataImpl &Record) {
+ Record.push_back(T->getExceptionSpecType());
+ if (T->getExceptionSpecType() == EST_Dynamic) {
+ Record.push_back(T->getNumExceptions());
+ for (unsigned I = 0, N = T->getNumExceptions(); I != N; ++I)
+ Writer.AddTypeRef(T->getExceptionType(I), Record);
+ } else if (T->getExceptionSpecType() == EST_ComputedNoexcept) {
+ Writer.AddStmt(T->getNoexceptExpr());
+ } else if (T->getExceptionSpecType() == EST_Uninstantiated) {
+ Writer.AddDeclRef(T->getExceptionSpecDecl(), Record);
+ Writer.AddDeclRef(T->getExceptionSpecTemplate(), Record);
+ } else if (T->getExceptionSpecType() == EST_Unevaluated) {
+ Writer.AddDeclRef(T->getExceptionSpecDecl(), Record);
+ }
+}
+
+void ASTTypeWriter::VisitFunctionProtoType(const FunctionProtoType *T) {
+ VisitFunctionType(T);
+
+ Record.push_back(T->isVariadic());
+ Record.push_back(T->hasTrailingReturn());
+ Record.push_back(T->getTypeQuals());
+ Record.push_back(static_cast<unsigned>(T->getRefQualifier()));
+ addExceptionSpec(Writer, T, Record);
+
+ Record.push_back(T->getNumParams());
+ for (unsigned I = 0, N = T->getNumParams(); I != N; ++I)
+ Writer.AddTypeRef(T->getParamType(I), Record);
+
+ if (T->isVariadic() || T->hasTrailingReturn() || T->getTypeQuals() ||
+ T->getRefQualifier() || T->getExceptionSpecType() != EST_None)
+ AbbrevToUse = 0;
+
+ Code = TYPE_FUNCTION_PROTO;
+}
+
+void ASTTypeWriter::VisitUnresolvedUsingType(const UnresolvedUsingType *T) {
+ Writer.AddDeclRef(T->getDecl(), Record);
+ Code = TYPE_UNRESOLVED_USING;
+}
+
+void ASTTypeWriter::VisitTypedefType(const TypedefType *T) {
+ Writer.AddDeclRef(T->getDecl(), Record);
+ assert(!T->isCanonicalUnqualified() && "Invalid typedef ?");
+ Writer.AddTypeRef(T->getCanonicalTypeInternal(), Record);
+ Code = TYPE_TYPEDEF;
+}
+
+void ASTTypeWriter::VisitTypeOfExprType(const TypeOfExprType *T) {
+ Writer.AddStmt(T->getUnderlyingExpr());
+ Code = TYPE_TYPEOF_EXPR;
+}
+
+void ASTTypeWriter::VisitTypeOfType(const TypeOfType *T) {
+ Writer.AddTypeRef(T->getUnderlyingType(), Record);
+ Code = TYPE_TYPEOF;
+}
+
+void ASTTypeWriter::VisitDecltypeType(const DecltypeType *T) {
+ Writer.AddTypeRef(T->getUnderlyingType(), Record);
+ Writer.AddStmt(T->getUnderlyingExpr());
+ Code = TYPE_DECLTYPE;
+}
+
+void ASTTypeWriter::VisitUnaryTransformType(const UnaryTransformType *T) {
+ Writer.AddTypeRef(T->getBaseType(), Record);
+ Writer.AddTypeRef(T->getUnderlyingType(), Record);
+ Record.push_back(T->getUTTKind());
+ Code = TYPE_UNARY_TRANSFORM;
+}
+
+void ASTTypeWriter::VisitAutoType(const AutoType *T) {
+ Writer.AddTypeRef(T->getDeducedType(), Record);
+ Record.push_back((unsigned)T->getKeyword());
+ if (T->getDeducedType().isNull())
+ Record.push_back(T->isDependentType());
+ Code = TYPE_AUTO;
+}
+
+void ASTTypeWriter::VisitTagType(const TagType *T) {
+ Record.push_back(T->isDependentType());
+ Writer.AddDeclRef(T->getDecl()->getCanonicalDecl(), Record);
+ assert(!T->isBeingDefined() &&
+ "Cannot serialize in the middle of a type definition");
+}
+
+void ASTTypeWriter::VisitRecordType(const RecordType *T) {
+ VisitTagType(T);
+ Code = TYPE_RECORD;
+}
+
+void ASTTypeWriter::VisitEnumType(const EnumType *T) {
+ VisitTagType(T);
+ Code = TYPE_ENUM;
+}
+
+void ASTTypeWriter::VisitAttributedType(const AttributedType *T) {
+ Writer.AddTypeRef(T->getModifiedType(), Record);
+ Writer.AddTypeRef(T->getEquivalentType(), Record);
+ Record.push_back(T->getAttrKind());
+ Code = TYPE_ATTRIBUTED;
+}
+
+void
+ASTTypeWriter::VisitSubstTemplateTypeParmType(
+ const SubstTemplateTypeParmType *T) {
+ Writer.AddTypeRef(QualType(T->getReplacedParameter(), 0), Record);
+ Writer.AddTypeRef(T->getReplacementType(), Record);
+ Code = TYPE_SUBST_TEMPLATE_TYPE_PARM;
+}
+
+void
+ASTTypeWriter::VisitSubstTemplateTypeParmPackType(
+ const SubstTemplateTypeParmPackType *T) {
+ Writer.AddTypeRef(QualType(T->getReplacedParameter(), 0), Record);
+ Writer.AddTemplateArgument(T->getArgumentPack(), Record);
+ Code = TYPE_SUBST_TEMPLATE_TYPE_PARM_PACK;
+}
+
+void
+ASTTypeWriter::VisitTemplateSpecializationType(
+ const TemplateSpecializationType *T) {
+ Record.push_back(T->isDependentType());
+ Writer.AddTemplateName(T->getTemplateName(), Record);
+ Record.push_back(T->getNumArgs());
+ for (const auto &ArgI : *T)
+ Writer.AddTemplateArgument(ArgI, Record);
+ Writer.AddTypeRef(T->isTypeAlias() ? T->getAliasedType() :
+ T->isCanonicalUnqualified() ? QualType()
+ : T->getCanonicalTypeInternal(),
+ Record);
+ Code = TYPE_TEMPLATE_SPECIALIZATION;
+}
+
+void
+ASTTypeWriter::VisitDependentSizedArrayType(const DependentSizedArrayType *T) {
+ VisitArrayType(T);
+ Writer.AddStmt(T->getSizeExpr());
+ Writer.AddSourceRange(T->getBracketsRange(), Record);
+ Code = TYPE_DEPENDENT_SIZED_ARRAY;
+}
+
+void
+ASTTypeWriter::VisitDependentSizedExtVectorType(
+ const DependentSizedExtVectorType *T) {
+ // FIXME: Serialize this type (C++ only)
+ llvm_unreachable("Cannot serialize dependent sized extended vector types");
+}
+
+void
+ASTTypeWriter::VisitTemplateTypeParmType(const TemplateTypeParmType *T) {
+ Record.push_back(T->getDepth());
+ Record.push_back(T->getIndex());
+ Record.push_back(T->isParameterPack());
+ Writer.AddDeclRef(T->getDecl(), Record);
+ Code = TYPE_TEMPLATE_TYPE_PARM;
+}
+
+void
+ASTTypeWriter::VisitDependentNameType(const DependentNameType *T) {
+ Record.push_back(T->getKeyword());
+ Writer.AddNestedNameSpecifier(T->getQualifier(), Record);
+ Writer.AddIdentifierRef(T->getIdentifier(), Record);
+ Writer.AddTypeRef(T->isCanonicalUnqualified() ? QualType()
+ : T->getCanonicalTypeInternal(),
+ Record);
+ Code = TYPE_DEPENDENT_NAME;
+}
+
+void
+ASTTypeWriter::VisitDependentTemplateSpecializationType(
+ const DependentTemplateSpecializationType *T) {
+ Record.push_back(T->getKeyword());
+ Writer.AddNestedNameSpecifier(T->getQualifier(), Record);
+ Writer.AddIdentifierRef(T->getIdentifier(), Record);
+ Record.push_back(T->getNumArgs());
+ for (const auto &I : *T)
+ Writer.AddTemplateArgument(I, Record);
+ Code = TYPE_DEPENDENT_TEMPLATE_SPECIALIZATION;
+}
+
+void ASTTypeWriter::VisitPackExpansionType(const PackExpansionType *T) {
+ Writer.AddTypeRef(T->getPattern(), Record);
+ if (Optional<unsigned> NumExpansions = T->getNumExpansions())
+ Record.push_back(*NumExpansions + 1);
+ else
+ Record.push_back(0);
+ Code = TYPE_PACK_EXPANSION;
+}
+
+void ASTTypeWriter::VisitParenType(const ParenType *T) {
+ Writer.AddTypeRef(T->getInnerType(), Record);
+ Code = TYPE_PAREN;
+}
+
+void ASTTypeWriter::VisitElaboratedType(const ElaboratedType *T) {
+ Record.push_back(T->getKeyword());
+ Writer.AddNestedNameSpecifier(T->getQualifier(), Record);
+ Writer.AddTypeRef(T->getNamedType(), Record);
+ Code = TYPE_ELABORATED;
+}
+
+void ASTTypeWriter::VisitInjectedClassNameType(const InjectedClassNameType *T) {
+ Writer.AddDeclRef(T->getDecl()->getCanonicalDecl(), Record);
+ Writer.AddTypeRef(T->getInjectedSpecializationType(), Record);
+ Code = TYPE_INJECTED_CLASS_NAME;
+}
+
+void ASTTypeWriter::VisitObjCInterfaceType(const ObjCInterfaceType *T) {
+ Writer.AddDeclRef(T->getDecl()->getCanonicalDecl(), Record);
+ Code = TYPE_OBJC_INTERFACE;
+}
+
+void ASTTypeWriter::VisitObjCObjectType(const ObjCObjectType *T) {
+ Writer.AddTypeRef(T->getBaseType(), Record);
+ Record.push_back(T->getTypeArgsAsWritten().size());
+ for (auto TypeArg : T->getTypeArgsAsWritten())
+ Writer.AddTypeRef(TypeArg, Record);
+ Record.push_back(T->getNumProtocols());
+ for (const auto *I : T->quals())
+ Writer.AddDeclRef(I, Record);
+ Record.push_back(T->isKindOfTypeAsWritten());
+ Code = TYPE_OBJC_OBJECT;
+}
+
+void
+ASTTypeWriter::VisitObjCObjectPointerType(const ObjCObjectPointerType *T) {
+ Writer.AddTypeRef(T->getPointeeType(), Record);
+ Code = TYPE_OBJC_OBJECT_POINTER;
+}
+
+void
+ASTTypeWriter::VisitAtomicType(const AtomicType *T) {
+ Writer.AddTypeRef(T->getValueType(), Record);
+ Code = TYPE_ATOMIC;
+}
+
+namespace {
+
+class TypeLocWriter : public TypeLocVisitor<TypeLocWriter> {
+ ASTWriter &Writer;
+ ASTWriter::RecordDataImpl &Record;
+
+public:
+ TypeLocWriter(ASTWriter &Writer, ASTWriter::RecordDataImpl &Record)
+ : Writer(Writer), Record(Record) { }
+
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ void Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc);
+#include "clang/AST/TypeLocNodes.def"
+
+ void VisitArrayTypeLoc(ArrayTypeLoc TyLoc);
+ void VisitFunctionTypeLoc(FunctionTypeLoc TyLoc);
+};
+
+} // end anonymous namespace
+
+void TypeLocWriter::VisitQualifiedTypeLoc(QualifiedTypeLoc TL) {
+ // nothing to do
+}
+void TypeLocWriter::VisitBuiltinTypeLoc(BuiltinTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getBuiltinLoc(), Record);
+ if (TL.needsExtraLocalData()) {
+ Record.push_back(TL.getWrittenTypeSpec());
+ Record.push_back(TL.getWrittenSignSpec());
+ Record.push_back(TL.getWrittenWidthSpec());
+ Record.push_back(TL.hasModeAttr());
+ }
+}
+void TypeLocWriter::VisitComplexTypeLoc(ComplexTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitPointerTypeLoc(PointerTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getStarLoc(), Record);
+}
+void TypeLocWriter::VisitDecayedTypeLoc(DecayedTypeLoc TL) {
+ // nothing to do
+}
+void TypeLocWriter::VisitAdjustedTypeLoc(AdjustedTypeLoc TL) {
+ // nothing to do
+}
+void TypeLocWriter::VisitBlockPointerTypeLoc(BlockPointerTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getCaretLoc(), Record);
+}
+void TypeLocWriter::VisitLValueReferenceTypeLoc(LValueReferenceTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getAmpLoc(), Record);
+}
+void TypeLocWriter::VisitRValueReferenceTypeLoc(RValueReferenceTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getAmpAmpLoc(), Record);
+}
+void TypeLocWriter::VisitMemberPointerTypeLoc(MemberPointerTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getStarLoc(), Record);
+ Writer.AddTypeSourceInfo(TL.getClassTInfo(), Record);
+}
+void TypeLocWriter::VisitArrayTypeLoc(ArrayTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getLBracketLoc(), Record);
+ Writer.AddSourceLocation(TL.getRBracketLoc(), Record);
+ Record.push_back(TL.getSizeExpr() ? 1 : 0);
+ if (TL.getSizeExpr())
+ Writer.AddStmt(TL.getSizeExpr());
+}
+void TypeLocWriter::VisitConstantArrayTypeLoc(ConstantArrayTypeLoc TL) {
+ VisitArrayTypeLoc(TL);
+}
+void TypeLocWriter::VisitIncompleteArrayTypeLoc(IncompleteArrayTypeLoc TL) {
+ VisitArrayTypeLoc(TL);
+}
+void TypeLocWriter::VisitVariableArrayTypeLoc(VariableArrayTypeLoc TL) {
+ VisitArrayTypeLoc(TL);
+}
+void TypeLocWriter::VisitDependentSizedArrayTypeLoc(
+ DependentSizedArrayTypeLoc TL) {
+ VisitArrayTypeLoc(TL);
+}
+void TypeLocWriter::VisitDependentSizedExtVectorTypeLoc(
+ DependentSizedExtVectorTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitVectorTypeLoc(VectorTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitExtVectorTypeLoc(ExtVectorTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitFunctionTypeLoc(FunctionTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getLocalRangeBegin(), Record);
+ Writer.AddSourceLocation(TL.getLParenLoc(), Record);
+ Writer.AddSourceLocation(TL.getRParenLoc(), Record);
+ Writer.AddSourceLocation(TL.getLocalRangeEnd(), Record);
+ for (unsigned i = 0, e = TL.getNumParams(); i != e; ++i)
+ Writer.AddDeclRef(TL.getParam(i), Record);
+}
+void TypeLocWriter::VisitFunctionProtoTypeLoc(FunctionProtoTypeLoc TL) {
+ VisitFunctionTypeLoc(TL);
+}
+void TypeLocWriter::VisitFunctionNoProtoTypeLoc(FunctionNoProtoTypeLoc TL) {
+ VisitFunctionTypeLoc(TL);
+}
+void TypeLocWriter::VisitUnresolvedUsingTypeLoc(UnresolvedUsingTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitTypedefTypeLoc(TypedefTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitTypeOfExprTypeLoc(TypeOfExprTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getTypeofLoc(), Record);
+ Writer.AddSourceLocation(TL.getLParenLoc(), Record);
+ Writer.AddSourceLocation(TL.getRParenLoc(), Record);
+}
+void TypeLocWriter::VisitTypeOfTypeLoc(TypeOfTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getTypeofLoc(), Record);
+ Writer.AddSourceLocation(TL.getLParenLoc(), Record);
+ Writer.AddSourceLocation(TL.getRParenLoc(), Record);
+ Writer.AddTypeSourceInfo(TL.getUnderlyingTInfo(), Record);
+}
+void TypeLocWriter::VisitDecltypeTypeLoc(DecltypeTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitUnaryTransformTypeLoc(UnaryTransformTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getKWLoc(), Record);
+ Writer.AddSourceLocation(TL.getLParenLoc(), Record);
+ Writer.AddSourceLocation(TL.getRParenLoc(), Record);
+ Writer.AddTypeSourceInfo(TL.getUnderlyingTInfo(), Record);
+}
+void TypeLocWriter::VisitAutoTypeLoc(AutoTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitRecordTypeLoc(RecordTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitEnumTypeLoc(EnumTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitAttributedTypeLoc(AttributedTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getAttrNameLoc(), Record);
+ if (TL.hasAttrOperand()) {
+ SourceRange range = TL.getAttrOperandParensRange();
+ Writer.AddSourceLocation(range.getBegin(), Record);
+ Writer.AddSourceLocation(range.getEnd(), Record);
+ }
+ if (TL.hasAttrExprOperand()) {
+ Expr *operand = TL.getAttrExprOperand();
+ Record.push_back(operand ? 1 : 0);
+ if (operand) Writer.AddStmt(operand);
+ } else if (TL.hasAttrEnumOperand()) {
+ Writer.AddSourceLocation(TL.getAttrEnumOperandLoc(), Record);
+ }
+}
+void TypeLocWriter::VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitSubstTemplateTypeParmTypeLoc(
+ SubstTemplateTypeParmTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitSubstTemplateTypeParmPackTypeLoc(
+ SubstTemplateTypeParmPackTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitTemplateSpecializationTypeLoc(
+ TemplateSpecializationTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getTemplateKeywordLoc(), Record);
+ Writer.AddSourceLocation(TL.getTemplateNameLoc(), Record);
+ Writer.AddSourceLocation(TL.getLAngleLoc(), Record);
+ Writer.AddSourceLocation(TL.getRAngleLoc(), Record);
+ for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
+ Writer.AddTemplateArgumentLocInfo(TL.getArgLoc(i).getArgument().getKind(),
+ TL.getArgLoc(i).getLocInfo(), Record);
+}
+void TypeLocWriter::VisitParenTypeLoc(ParenTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getLParenLoc(), Record);
+ Writer.AddSourceLocation(TL.getRParenLoc(), Record);
+}
+void TypeLocWriter::VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getElaboratedKeywordLoc(), Record);
+ Writer.AddNestedNameSpecifierLoc(TL.getQualifierLoc(), Record);
+}
+void TypeLocWriter::VisitInjectedClassNameTypeLoc(InjectedClassNameTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getElaboratedKeywordLoc(), Record);
+ Writer.AddNestedNameSpecifierLoc(TL.getQualifierLoc(), Record);
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitDependentTemplateSpecializationTypeLoc(
+ DependentTemplateSpecializationTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getElaboratedKeywordLoc(), Record);
+ Writer.AddNestedNameSpecifierLoc(TL.getQualifierLoc(), Record);
+ Writer.AddSourceLocation(TL.getTemplateKeywordLoc(), Record);
+ Writer.AddSourceLocation(TL.getTemplateNameLoc(), Record);
+ Writer.AddSourceLocation(TL.getLAngleLoc(), Record);
+ Writer.AddSourceLocation(TL.getRAngleLoc(), Record);
+ for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I)
+ Writer.AddTemplateArgumentLocInfo(TL.getArgLoc(I).getArgument().getKind(),
+ TL.getArgLoc(I).getLocInfo(), Record);
+}
+void TypeLocWriter::VisitPackExpansionTypeLoc(PackExpansionTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getEllipsisLoc(), Record);
+}
+void TypeLocWriter::VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitObjCObjectTypeLoc(ObjCObjectTypeLoc TL) {
+ Record.push_back(TL.hasBaseTypeAsWritten());
+ Writer.AddSourceLocation(TL.getTypeArgsLAngleLoc(), Record);
+ Writer.AddSourceLocation(TL.getTypeArgsRAngleLoc(), Record);
+ for (unsigned i = 0, e = TL.getNumTypeArgs(); i != e; ++i)
+ Writer.AddTypeSourceInfo(TL.getTypeArgTInfo(i), Record);
+ Writer.AddSourceLocation(TL.getProtocolLAngleLoc(), Record);
+ Writer.AddSourceLocation(TL.getProtocolRAngleLoc(), Record);
+ for (unsigned i = 0, e = TL.getNumProtocols(); i != e; ++i)
+ Writer.AddSourceLocation(TL.getProtocolLoc(i), Record);
+}
+void TypeLocWriter::VisitObjCObjectPointerTypeLoc(ObjCObjectPointerTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getStarLoc(), Record);
+}
+void TypeLocWriter::VisitAtomicTypeLoc(AtomicTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getKWLoc(), Record);
+ Writer.AddSourceLocation(TL.getLParenLoc(), Record);
+ Writer.AddSourceLocation(TL.getRParenLoc(), Record);
+}
+
+void ASTWriter::WriteTypeAbbrevs() {
+ using namespace llvm;
+
+ BitCodeAbbrev *Abv;
+
+ // Abbreviation for TYPE_EXT_QUAL
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::TYPE_EXT_QUAL));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 3)); // Quals
+ TypeExtQualAbbrev = Stream.EmitAbbrev(Abv);
+
+ // Abbreviation for TYPE_FUNCTION_PROTO
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::TYPE_FUNCTION_PROTO));
+ // FunctionType
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // ReturnType
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // NoReturn
+ Abv->Add(BitCodeAbbrevOp(0)); // HasRegParm
+ Abv->Add(BitCodeAbbrevOp(0)); // RegParm
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // CC
+ Abv->Add(BitCodeAbbrevOp(0)); // ProducesResult
+ // FunctionProtoType
+ Abv->Add(BitCodeAbbrevOp(0)); // IsVariadic
+ Abv->Add(BitCodeAbbrevOp(0)); // HasTrailingReturn
+ Abv->Add(BitCodeAbbrevOp(0)); // TypeQuals
+ Abv->Add(BitCodeAbbrevOp(0)); // RefQualifier
+ Abv->Add(BitCodeAbbrevOp(EST_None)); // ExceptionSpec
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // NumParams
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Params
+ TypeFunctionProtoAbbrev = Stream.EmitAbbrev(Abv);
+}
+
+//===----------------------------------------------------------------------===//
+// ASTWriter Implementation
+//===----------------------------------------------------------------------===//
+
+static void EmitBlockID(unsigned ID, const char *Name,
+ llvm::BitstreamWriter &Stream,
+ ASTWriter::RecordDataImpl &Record) {
+ Record.clear();
+ Record.push_back(ID);
+ Stream.EmitRecord(llvm::bitc::BLOCKINFO_CODE_SETBID, Record);
+
+ // Emit the block name if present.
+ if (!Name || Name[0] == 0)
+ return;
+ Record.clear();
+ while (*Name)
+ Record.push_back(*Name++);
+ Stream.EmitRecord(llvm::bitc::BLOCKINFO_CODE_BLOCKNAME, Record);
+}
+
+static void EmitRecordID(unsigned ID, const char *Name,
+ llvm::BitstreamWriter &Stream,
+ ASTWriter::RecordDataImpl &Record) {
+ Record.clear();
+ Record.push_back(ID);
+ while (*Name)
+ Record.push_back(*Name++);
+ Stream.EmitRecord(llvm::bitc::BLOCKINFO_CODE_SETRECORDNAME, Record);
+}
+
+static void AddStmtsExprs(llvm::BitstreamWriter &Stream,
+ ASTWriter::RecordDataImpl &Record) {
+#define RECORD(X) EmitRecordID(X, #X, Stream, Record)
+ RECORD(STMT_STOP);
+ RECORD(STMT_NULL_PTR);
+ RECORD(STMT_REF_PTR);
+ RECORD(STMT_NULL);
+ RECORD(STMT_COMPOUND);
+ RECORD(STMT_CASE);
+ RECORD(STMT_DEFAULT);
+ RECORD(STMT_LABEL);
+ RECORD(STMT_ATTRIBUTED);
+ RECORD(STMT_IF);
+ RECORD(STMT_SWITCH);
+ RECORD(STMT_WHILE);
+ RECORD(STMT_DO);
+ RECORD(STMT_FOR);
+ RECORD(STMT_GOTO);
+ RECORD(STMT_INDIRECT_GOTO);
+ RECORD(STMT_CONTINUE);
+ RECORD(STMT_BREAK);
+ RECORD(STMT_RETURN);
+ RECORD(STMT_DECL);
+ RECORD(STMT_GCCASM);
+ RECORD(STMT_MSASM);
+ RECORD(EXPR_PREDEFINED);
+ RECORD(EXPR_DECL_REF);
+ RECORD(EXPR_INTEGER_LITERAL);
+ RECORD(EXPR_FLOATING_LITERAL);
+ RECORD(EXPR_IMAGINARY_LITERAL);
+ RECORD(EXPR_STRING_LITERAL);
+ RECORD(EXPR_CHARACTER_LITERAL);
+ RECORD(EXPR_PAREN);
+ RECORD(EXPR_PAREN_LIST);
+ RECORD(EXPR_UNARY_OPERATOR);
+ RECORD(EXPR_SIZEOF_ALIGN_OF);
+ RECORD(EXPR_ARRAY_SUBSCRIPT);
+ RECORD(EXPR_CALL);
+ RECORD(EXPR_MEMBER);
+ RECORD(EXPR_BINARY_OPERATOR);
+ RECORD(EXPR_COMPOUND_ASSIGN_OPERATOR);
+ RECORD(EXPR_CONDITIONAL_OPERATOR);
+ RECORD(EXPR_IMPLICIT_CAST);
+ RECORD(EXPR_CSTYLE_CAST);
+ RECORD(EXPR_COMPOUND_LITERAL);
+ RECORD(EXPR_EXT_VECTOR_ELEMENT);
+ RECORD(EXPR_INIT_LIST);
+ RECORD(EXPR_DESIGNATED_INIT);
+ RECORD(EXPR_DESIGNATED_INIT_UPDATE);
+ RECORD(EXPR_IMPLICIT_VALUE_INIT);
+ RECORD(EXPR_NO_INIT);
+ RECORD(EXPR_VA_ARG);
+ RECORD(EXPR_ADDR_LABEL);
+ RECORD(EXPR_STMT);
+ RECORD(EXPR_CHOOSE);
+ RECORD(EXPR_GNU_NULL);
+ RECORD(EXPR_SHUFFLE_VECTOR);
+ RECORD(EXPR_BLOCK);
+ RECORD(EXPR_GENERIC_SELECTION);
+ RECORD(EXPR_OBJC_STRING_LITERAL);
+ RECORD(EXPR_OBJC_BOXED_EXPRESSION);
+ RECORD(EXPR_OBJC_ARRAY_LITERAL);
+ RECORD(EXPR_OBJC_DICTIONARY_LITERAL);
+ RECORD(EXPR_OBJC_ENCODE);
+ RECORD(EXPR_OBJC_SELECTOR_EXPR);
+ RECORD(EXPR_OBJC_PROTOCOL_EXPR);
+ RECORD(EXPR_OBJC_IVAR_REF_EXPR);
+ RECORD(EXPR_OBJC_PROPERTY_REF_EXPR);
+ RECORD(EXPR_OBJC_KVC_REF_EXPR);
+ RECORD(EXPR_OBJC_MESSAGE_EXPR);
+ RECORD(STMT_OBJC_FOR_COLLECTION);
+ RECORD(STMT_OBJC_CATCH);
+ RECORD(STMT_OBJC_FINALLY);
+ RECORD(STMT_OBJC_AT_TRY);
+ RECORD(STMT_OBJC_AT_SYNCHRONIZED);
+ RECORD(STMT_OBJC_AT_THROW);
+ RECORD(EXPR_OBJC_BOOL_LITERAL);
+ RECORD(STMT_CXX_CATCH);
+ RECORD(STMT_CXX_TRY);
+ RECORD(STMT_CXX_FOR_RANGE);
+ RECORD(EXPR_CXX_OPERATOR_CALL);
+ RECORD(EXPR_CXX_MEMBER_CALL);
+ RECORD(EXPR_CXX_CONSTRUCT);
+ RECORD(EXPR_CXX_TEMPORARY_OBJECT);
+ RECORD(EXPR_CXX_STATIC_CAST);
+ RECORD(EXPR_CXX_DYNAMIC_CAST);
+ RECORD(EXPR_CXX_REINTERPRET_CAST);
+ RECORD(EXPR_CXX_CONST_CAST);
+ RECORD(EXPR_CXX_FUNCTIONAL_CAST);
+ RECORD(EXPR_USER_DEFINED_LITERAL);
+ RECORD(EXPR_CXX_STD_INITIALIZER_LIST);
+ RECORD(EXPR_CXX_BOOL_LITERAL);
+ RECORD(EXPR_CXX_NULL_PTR_LITERAL);
+ RECORD(EXPR_CXX_TYPEID_EXPR);
+ RECORD(EXPR_CXX_TYPEID_TYPE);
+ RECORD(EXPR_CXX_THIS);
+ RECORD(EXPR_CXX_THROW);
+ RECORD(EXPR_CXX_DEFAULT_ARG);
+ RECORD(EXPR_CXX_DEFAULT_INIT);
+ RECORD(EXPR_CXX_BIND_TEMPORARY);
+ RECORD(EXPR_CXX_SCALAR_VALUE_INIT);
+ RECORD(EXPR_CXX_NEW);
+ RECORD(EXPR_CXX_DELETE);
+ RECORD(EXPR_CXX_PSEUDO_DESTRUCTOR);
+ RECORD(EXPR_EXPR_WITH_CLEANUPS);
+ RECORD(EXPR_CXX_DEPENDENT_SCOPE_MEMBER);
+ RECORD(EXPR_CXX_DEPENDENT_SCOPE_DECL_REF);
+ RECORD(EXPR_CXX_UNRESOLVED_CONSTRUCT);
+ RECORD(EXPR_CXX_UNRESOLVED_MEMBER);
+ RECORD(EXPR_CXX_UNRESOLVED_LOOKUP);
+ RECORD(EXPR_CXX_EXPRESSION_TRAIT);
+ RECORD(EXPR_CXX_NOEXCEPT);
+ RECORD(EXPR_OPAQUE_VALUE);
+ RECORD(EXPR_BINARY_CONDITIONAL_OPERATOR);
+ RECORD(EXPR_TYPE_TRAIT);
+ RECORD(EXPR_ARRAY_TYPE_TRAIT);
+ RECORD(EXPR_PACK_EXPANSION);
+ RECORD(EXPR_SIZEOF_PACK);
+ RECORD(EXPR_SUBST_NON_TYPE_TEMPLATE_PARM);
+ RECORD(EXPR_SUBST_NON_TYPE_TEMPLATE_PARM_PACK);
+ RECORD(EXPR_FUNCTION_PARM_PACK);
+ RECORD(EXPR_MATERIALIZE_TEMPORARY);
+ RECORD(EXPR_CUDA_KERNEL_CALL);
+ RECORD(EXPR_CXX_UUIDOF_EXPR);
+ RECORD(EXPR_CXX_UUIDOF_TYPE);
+ RECORD(EXPR_LAMBDA);
+#undef RECORD
+}
+
+void ASTWriter::WriteBlockInfoBlock() {
+ RecordData Record;
+ Stream.EnterSubblock(llvm::bitc::BLOCKINFO_BLOCK_ID, 3);
+
+#define BLOCK(X) EmitBlockID(X ## _ID, #X, Stream, Record)
+#define RECORD(X) EmitRecordID(X, #X, Stream, Record)
+
+ // Control Block.
+ BLOCK(CONTROL_BLOCK);
+ RECORD(METADATA);
+ RECORD(SIGNATURE);
+ RECORD(MODULE_NAME);
+ RECORD(MODULE_DIRECTORY);
+ RECORD(MODULE_MAP_FILE);
+ RECORD(IMPORTS);
+ RECORD(ORIGINAL_FILE);
+ RECORD(ORIGINAL_PCH_DIR);
+ RECORD(ORIGINAL_FILE_ID);
+ RECORD(INPUT_FILE_OFFSETS);
+
+ BLOCK(OPTIONS_BLOCK);
+ RECORD(LANGUAGE_OPTIONS);
+ RECORD(TARGET_OPTIONS);
+ RECORD(DIAGNOSTIC_OPTIONS);
+ RECORD(FILE_SYSTEM_OPTIONS);
+ RECORD(HEADER_SEARCH_OPTIONS);
+ RECORD(PREPROCESSOR_OPTIONS);
+
+ BLOCK(INPUT_FILES_BLOCK);
+ RECORD(INPUT_FILE);
+
+ // AST Top-Level Block.
+ BLOCK(AST_BLOCK);
+ RECORD(TYPE_OFFSET);
+ RECORD(DECL_OFFSET);
+ RECORD(IDENTIFIER_OFFSET);
+ RECORD(IDENTIFIER_TABLE);
+ RECORD(EAGERLY_DESERIALIZED_DECLS);
+ RECORD(SPECIAL_TYPES);
+ RECORD(STATISTICS);
+ RECORD(TENTATIVE_DEFINITIONS);
+ RECORD(SELECTOR_OFFSETS);
+ RECORD(METHOD_POOL);
+ RECORD(PP_COUNTER_VALUE);
+ RECORD(SOURCE_LOCATION_OFFSETS);
+ RECORD(SOURCE_LOCATION_PRELOADS);
+ RECORD(EXT_VECTOR_DECLS);
+ RECORD(UNUSED_FILESCOPED_DECLS);
+ RECORD(PPD_ENTITIES_OFFSETS);
+ RECORD(VTABLE_USES);
+ RECORD(REFERENCED_SELECTOR_POOL);
+ RECORD(TU_UPDATE_LEXICAL);
+ RECORD(SEMA_DECL_REFS);
+ RECORD(WEAK_UNDECLARED_IDENTIFIERS);
+ RECORD(PENDING_IMPLICIT_INSTANTIATIONS);
+ RECORD(DECL_REPLACEMENTS);
+ RECORD(UPDATE_VISIBLE);
+ RECORD(DECL_UPDATE_OFFSETS);
+ RECORD(DECL_UPDATES);
+ RECORD(CXX_BASE_SPECIFIER_OFFSETS);
+ RECORD(DIAG_PRAGMA_MAPPINGS);
+ RECORD(CUDA_SPECIAL_DECL_REFS);
+ RECORD(HEADER_SEARCH_TABLE);
+ RECORD(FP_PRAGMA_OPTIONS);
+ RECORD(OPENCL_EXTENSIONS);
+ RECORD(DELEGATING_CTORS);
+ RECORD(KNOWN_NAMESPACES);
+ RECORD(MODULE_OFFSET_MAP);
+ RECORD(SOURCE_MANAGER_LINE_TABLE);
+ RECORD(OBJC_CATEGORIES_MAP);
+ RECORD(FILE_SORTED_DECLS);
+ RECORD(IMPORTED_MODULES);
+ RECORD(OBJC_CATEGORIES);
+ RECORD(MACRO_OFFSET);
+ RECORD(INTERESTING_IDENTIFIERS);
+ RECORD(UNDEFINED_BUT_USED);
+ RECORD(LATE_PARSED_TEMPLATE);
+ RECORD(OPTIMIZE_PRAGMA_OPTIONS);
+ RECORD(UNUSED_LOCAL_TYPEDEF_NAME_CANDIDATES);
+ RECORD(CXX_CTOR_INITIALIZERS_OFFSETS);
+ RECORD(DELETE_EXPRS_TO_ANALYZE);
+
+ // SourceManager Block.
+ BLOCK(SOURCE_MANAGER_BLOCK);
+ RECORD(SM_SLOC_FILE_ENTRY);
+ RECORD(SM_SLOC_BUFFER_ENTRY);
+ RECORD(SM_SLOC_BUFFER_BLOB);
+ RECORD(SM_SLOC_EXPANSION_ENTRY);
+
+ // Preprocessor Block.
+ BLOCK(PREPROCESSOR_BLOCK);
+ RECORD(PP_MACRO_DIRECTIVE_HISTORY);
+ RECORD(PP_MACRO_FUNCTION_LIKE);
+ RECORD(PP_MACRO_OBJECT_LIKE);
+ RECORD(PP_MODULE_MACRO);
+ RECORD(PP_TOKEN);
+
+ // Submodule Block.
+ BLOCK(SUBMODULE_BLOCK);
+ RECORD(SUBMODULE_METADATA);
+ RECORD(SUBMODULE_DEFINITION);
+ RECORD(SUBMODULE_UMBRELLA_HEADER);
+ RECORD(SUBMODULE_HEADER);
+ RECORD(SUBMODULE_TOPHEADER);
+ RECORD(SUBMODULE_UMBRELLA_DIR);
+ RECORD(SUBMODULE_IMPORTS);
+ RECORD(SUBMODULE_EXPORTS);
+ RECORD(SUBMODULE_REQUIRES);
+ RECORD(SUBMODULE_EXCLUDED_HEADER);
+ RECORD(SUBMODULE_LINK_LIBRARY);
+ RECORD(SUBMODULE_CONFIG_MACRO);
+ RECORD(SUBMODULE_CONFLICT);
+ RECORD(SUBMODULE_PRIVATE_HEADER);
+ RECORD(SUBMODULE_TEXTUAL_HEADER);
+ RECORD(SUBMODULE_PRIVATE_TEXTUAL_HEADER);
+
+ // Comments Block.
+ BLOCK(COMMENTS_BLOCK);
+ RECORD(COMMENTS_RAW_COMMENT);
+
+ // Decls and Types block.
+ BLOCK(DECLTYPES_BLOCK);
+ RECORD(TYPE_EXT_QUAL);
+ RECORD(TYPE_COMPLEX);
+ RECORD(TYPE_POINTER);
+ RECORD(TYPE_BLOCK_POINTER);
+ RECORD(TYPE_LVALUE_REFERENCE);
+ RECORD(TYPE_RVALUE_REFERENCE);
+ RECORD(TYPE_MEMBER_POINTER);
+ RECORD(TYPE_CONSTANT_ARRAY);
+ RECORD(TYPE_INCOMPLETE_ARRAY);
+ RECORD(TYPE_VARIABLE_ARRAY);
+ RECORD(TYPE_VECTOR);
+ RECORD(TYPE_EXT_VECTOR);
+ RECORD(TYPE_FUNCTION_NO_PROTO);
+ RECORD(TYPE_FUNCTION_PROTO);
+ RECORD(TYPE_TYPEDEF);
+ RECORD(TYPE_TYPEOF_EXPR);
+ RECORD(TYPE_TYPEOF);
+ RECORD(TYPE_RECORD);
+ RECORD(TYPE_ENUM);
+ RECORD(TYPE_OBJC_INTERFACE);
+ RECORD(TYPE_OBJC_OBJECT_POINTER);
+ RECORD(TYPE_DECLTYPE);
+ RECORD(TYPE_ELABORATED);
+ RECORD(TYPE_SUBST_TEMPLATE_TYPE_PARM);
+ RECORD(TYPE_UNRESOLVED_USING);
+ RECORD(TYPE_INJECTED_CLASS_NAME);
+ RECORD(TYPE_OBJC_OBJECT);
+ RECORD(TYPE_TEMPLATE_TYPE_PARM);
+ RECORD(TYPE_TEMPLATE_SPECIALIZATION);
+ RECORD(TYPE_DEPENDENT_NAME);
+ RECORD(TYPE_DEPENDENT_TEMPLATE_SPECIALIZATION);
+ RECORD(TYPE_DEPENDENT_SIZED_ARRAY);
+ RECORD(TYPE_PAREN);
+ RECORD(TYPE_PACK_EXPANSION);
+ RECORD(TYPE_ATTRIBUTED);
+ RECORD(TYPE_SUBST_TEMPLATE_TYPE_PARM_PACK);
+ RECORD(TYPE_AUTO);
+ RECORD(TYPE_UNARY_TRANSFORM);
+ RECORD(TYPE_ATOMIC);
+ RECORD(TYPE_DECAYED);
+ RECORD(TYPE_ADJUSTED);
+ RECORD(LOCAL_REDECLARATIONS);
+ RECORD(DECL_TYPEDEF);
+ RECORD(DECL_TYPEALIAS);
+ RECORD(DECL_ENUM);
+ RECORD(DECL_RECORD);
+ RECORD(DECL_ENUM_CONSTANT);
+ RECORD(DECL_FUNCTION);
+ RECORD(DECL_OBJC_METHOD);
+ RECORD(DECL_OBJC_INTERFACE);
+ RECORD(DECL_OBJC_PROTOCOL);
+ RECORD(DECL_OBJC_IVAR);
+ RECORD(DECL_OBJC_AT_DEFS_FIELD);
+ RECORD(DECL_OBJC_CATEGORY);
+ RECORD(DECL_OBJC_CATEGORY_IMPL);
+ RECORD(DECL_OBJC_IMPLEMENTATION);
+ RECORD(DECL_OBJC_COMPATIBLE_ALIAS);
+ RECORD(DECL_OBJC_PROPERTY);
+ RECORD(DECL_OBJC_PROPERTY_IMPL);
+ RECORD(DECL_FIELD);
+ RECORD(DECL_MS_PROPERTY);
+ RECORD(DECL_VAR);
+ RECORD(DECL_IMPLICIT_PARAM);
+ RECORD(DECL_PARM_VAR);
+ RECORD(DECL_FILE_SCOPE_ASM);
+ RECORD(DECL_BLOCK);
+ RECORD(DECL_CONTEXT_LEXICAL);
+ RECORD(DECL_CONTEXT_VISIBLE);
+ RECORD(DECL_NAMESPACE);
+ RECORD(DECL_NAMESPACE_ALIAS);
+ RECORD(DECL_USING);
+ RECORD(DECL_USING_SHADOW);
+ RECORD(DECL_USING_DIRECTIVE);
+ RECORD(DECL_UNRESOLVED_USING_VALUE);
+ RECORD(DECL_UNRESOLVED_USING_TYPENAME);
+ RECORD(DECL_LINKAGE_SPEC);
+ RECORD(DECL_CXX_RECORD);
+ RECORD(DECL_CXX_METHOD);
+ RECORD(DECL_CXX_CONSTRUCTOR);
+ RECORD(DECL_CXX_DESTRUCTOR);
+ RECORD(DECL_CXX_CONVERSION);
+ RECORD(DECL_ACCESS_SPEC);
+ RECORD(DECL_FRIEND);
+ RECORD(DECL_FRIEND_TEMPLATE);
+ RECORD(DECL_CLASS_TEMPLATE);
+ RECORD(DECL_CLASS_TEMPLATE_SPECIALIZATION);
+ RECORD(DECL_CLASS_TEMPLATE_PARTIAL_SPECIALIZATION);
+ RECORD(DECL_VAR_TEMPLATE);
+ RECORD(DECL_VAR_TEMPLATE_SPECIALIZATION);
+ RECORD(DECL_VAR_TEMPLATE_PARTIAL_SPECIALIZATION);
+ RECORD(DECL_FUNCTION_TEMPLATE);
+ RECORD(DECL_TEMPLATE_TYPE_PARM);
+ RECORD(DECL_NON_TYPE_TEMPLATE_PARM);
+ RECORD(DECL_TEMPLATE_TEMPLATE_PARM);
+ RECORD(DECL_STATIC_ASSERT);
+ RECORD(DECL_CXX_BASE_SPECIFIERS);
+ RECORD(DECL_INDIRECTFIELD);
+ RECORD(DECL_EXPANDED_NON_TYPE_TEMPLATE_PARM_PACK);
+
+ // Statements and Exprs can occur in the Decls and Types block.
+ AddStmtsExprs(Stream, Record);
+
+ BLOCK(PREPROCESSOR_DETAIL_BLOCK);
+ RECORD(PPD_MACRO_EXPANSION);
+ RECORD(PPD_MACRO_DEFINITION);
+ RECORD(PPD_INCLUSION_DIRECTIVE);
+
+ // Decls and Types block.
+ BLOCK(EXTENSION_BLOCK);
+ RECORD(EXTENSION_METADATA);
+
+#undef RECORD
+#undef BLOCK
+ Stream.ExitBlock();
+}
+
+/// \brief Prepares a path for being written to an AST file by converting it
+/// to an absolute path and removing nested './'s.
+///
+/// \return \c true if the path was changed.
+static bool cleanPathForOutput(FileManager &FileMgr,
+ SmallVectorImpl<char> &Path) {
+ bool Changed = FileMgr.makeAbsolutePath(Path);
+ return Changed | llvm::sys::path::remove_dots(Path);
+}
+
+/// \brief Adjusts the given filename to only write out the portion of the
+/// filename that is not part of the system root directory.
+///
+/// \param Filename the file name to adjust.
+///
+/// \param BaseDir When non-NULL, the PCH file is a relocatable AST file and
+/// the returned filename will be adjusted by this root directory.
+///
+/// \returns either the original filename (if it needs no adjustment) or the
+/// adjusted filename (which points into the @p Filename parameter).
+static const char *
+adjustFilenameForRelocatableAST(const char *Filename, StringRef BaseDir) {
+ assert(Filename && "No file name to adjust?");
+
+ if (BaseDir.empty())
+ return Filename;
+
+ // Verify that the filename and the system root have the same prefix.
+ unsigned Pos = 0;
+ for (; Filename[Pos] && Pos < BaseDir.size(); ++Pos)
+ if (Filename[Pos] != BaseDir[Pos])
+ return Filename; // Prefixes don't match.
+
+ // We hit the end of the filename before we hit the end of the system root.
+ if (!Filename[Pos])
+ return Filename;
+
+ // If there's not a path separator at the end of the base directory nor
+ // immediately after it, then this isn't within the base directory.
+ if (!llvm::sys::path::is_separator(Filename[Pos])) {
+ if (!llvm::sys::path::is_separator(BaseDir.back()))
+ return Filename;
+ } else {
+ // If the file name has a '/' at the current position, skip over the '/'.
+ // We distinguish relative paths from absolute paths by the
+ // absence of '/' at the beginning of relative paths.
+ //
+ // FIXME: This is wrong. We distinguish them by asking if the path is
+ // absolute, which isn't the same thing. And there might be multiple '/'s
+ // in a row. Use a better mechanism to indicate whether we have emitted an
+ // absolute or relative path.
+ ++Pos;
+ }
+
+ return Filename + Pos;
+}
+
+static ASTFileSignature getSignature() {
+ while (1) {
+ if (ASTFileSignature S = llvm::sys::Process::GetRandomNumber())
+ return S;
+ // Rely on GetRandomNumber to eventually return non-zero...
+ }
+}
+
+/// \brief Write the control block.
+uint64_t ASTWriter::WriteControlBlock(Preprocessor &PP,
+ ASTContext &Context,
+ StringRef isysroot,
+ const std::string &OutputFile) {
+ ASTFileSignature Signature = 0;
+
+ using namespace llvm;
+ Stream.EnterSubblock(CONTROL_BLOCK_ID, 5);
+ RecordData Record;
+
+ // Metadata
+ auto *MetadataAbbrev = new BitCodeAbbrev();
+ MetadataAbbrev->Add(BitCodeAbbrevOp(METADATA));
+ MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Major
+ MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Minor
+ MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Clang maj.
+ MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Clang min.
+ MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Relocatable
+ MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Timestamps
+ MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Errors
+ MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // SVN branch/tag
+ unsigned MetadataAbbrevCode = Stream.EmitAbbrev(MetadataAbbrev);
+ assert((!WritingModule || isysroot.empty()) &&
+ "writing module as a relocatable PCH?");
+ {
+ RecordData::value_type Record[] = {METADATA, VERSION_MAJOR, VERSION_MINOR,
+ CLANG_VERSION_MAJOR, CLANG_VERSION_MINOR,
+ !isysroot.empty(), IncludeTimestamps,
+ ASTHasCompilerErrors};
+ Stream.EmitRecordWithBlob(MetadataAbbrevCode, Record,
+ getClangFullRepositoryVersion());
+ }
+ if (WritingModule) {
+ // For implicit modules we output a signature that we can use to ensure
+ // duplicate module builds don't collide in the cache as their output order
+ // is non-deterministic.
+ // FIXME: Remove this when output is deterministic.
+ if (Context.getLangOpts().ImplicitModules) {
+ Signature = getSignature();
+ RecordData::value_type Record[] = {Signature};
+ Stream.EmitRecord(SIGNATURE, Record);
+ }
+
+ // Module name
+ auto *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(MODULE_NAME));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
+ unsigned AbbrevCode = Stream.EmitAbbrev(Abbrev);
+ RecordData::value_type Record[] = {MODULE_NAME};
+ Stream.EmitRecordWithBlob(AbbrevCode, Record, WritingModule->Name);
+ }
+
+ if (WritingModule && WritingModule->Directory) {
+ SmallString<128> BaseDir(WritingModule->Directory->getName());
+ cleanPathForOutput(Context.getSourceManager().getFileManager(), BaseDir);
+
+ // If the home of the module is the current working directory, then we
+ // want to pick up the cwd of the build process loading the module, not
+ // our cwd, when we load this module.
+ if (!PP.getHeaderSearchInfo()
+ .getHeaderSearchOpts()
+ .ModuleMapFileHomeIsCwd ||
+ WritingModule->Directory->getName() != StringRef(".")) {
+ // Module directory.
+ auto *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(MODULE_DIRECTORY));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Directory
+ unsigned AbbrevCode = Stream.EmitAbbrev(Abbrev);
+
+ RecordData::value_type Record[] = {MODULE_DIRECTORY};
+ Stream.EmitRecordWithBlob(AbbrevCode, Record, BaseDir);
+ }
+
+ // Write out all other paths relative to the base directory if possible.
+ BaseDirectory.assign(BaseDir.begin(), BaseDir.end());
+ } else if (!isysroot.empty()) {
+ // Write out paths relative to the sysroot if possible.
+ BaseDirectory = isysroot;
+ }
+
+ // Module map file
+ if (WritingModule) {
+ Record.clear();
+
+ auto &Map = PP.getHeaderSearchInfo().getModuleMap();
+
+ // Primary module map file.
+ AddPath(Map.getModuleMapFileForUniquing(WritingModule)->getName(), Record);
+
+ // Additional module map files.
+ if (auto *AdditionalModMaps =
+ Map.getAdditionalModuleMapFiles(WritingModule)) {
+ Record.push_back(AdditionalModMaps->size());
+ for (const FileEntry *F : *AdditionalModMaps)
+ AddPath(F->getName(), Record);
+ } else {
+ Record.push_back(0);
+ }
+
+ Stream.EmitRecord(MODULE_MAP_FILE, Record);
+ }
+
+ // Imports
+ if (Chain) {
+ serialization::ModuleManager &Mgr = Chain->getModuleManager();
+ Record.clear();
+
+ for (auto *M : Mgr) {
+ // Skip modules that weren't directly imported.
+ if (!M->isDirectlyImported())
+ continue;
+
+ Record.push_back((unsigned)M->Kind); // FIXME: Stable encoding
+ AddSourceLocation(M->ImportLoc, Record);
+ Record.push_back(M->File->getSize());
+ Record.push_back(getTimestampForOutput(M->File));
+ Record.push_back(M->Signature);
+ AddPath(M->FileName, Record);
+ }
+ Stream.EmitRecord(IMPORTS, Record);
+ }
+
+ // Write the options block.
+ Stream.EnterSubblock(OPTIONS_BLOCK_ID, 4);
+
+ // Language options.
+ Record.clear();
+ const LangOptions &LangOpts = Context.getLangOpts();
+#define LANGOPT(Name, Bits, Default, Description) \
+ Record.push_back(LangOpts.Name);
+#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
+ Record.push_back(static_cast<unsigned>(LangOpts.get##Name()));
+#include "clang/Basic/LangOptions.def"
+#define SANITIZER(NAME, ID) \
+ Record.push_back(LangOpts.Sanitize.has(SanitizerKind::ID));
+#include "clang/Basic/Sanitizers.def"
+
+ Record.push_back(LangOpts.ModuleFeatures.size());
+ for (StringRef Feature : LangOpts.ModuleFeatures)
+ AddString(Feature, Record);
+
+ Record.push_back((unsigned) LangOpts.ObjCRuntime.getKind());
+ AddVersionTuple(LangOpts.ObjCRuntime.getVersion(), Record);
+
+ AddString(LangOpts.CurrentModule, Record);
+
+ // Comment options.
+ Record.push_back(LangOpts.CommentOpts.BlockCommandNames.size());
+ for (const auto &I : LangOpts.CommentOpts.BlockCommandNames) {
+ AddString(I, Record);
+ }
+ Record.push_back(LangOpts.CommentOpts.ParseAllComments);
+
+ // OpenMP offloading options.
+ Record.push_back(LangOpts.OMPTargetTriples.size());
+ for (auto &T : LangOpts.OMPTargetTriples)
+ AddString(T.getTriple(), Record);
+
+ AddString(LangOpts.OMPHostIRFile, Record);
+
+ Stream.EmitRecord(LANGUAGE_OPTIONS, Record);
+
+ // Target options.
+ Record.clear();
+ const TargetInfo &Target = Context.getTargetInfo();
+ const TargetOptions &TargetOpts = Target.getTargetOpts();
+ AddString(TargetOpts.Triple, Record);
+ AddString(TargetOpts.CPU, Record);
+ AddString(TargetOpts.ABI, Record);
+ Record.push_back(TargetOpts.FeaturesAsWritten.size());
+ for (unsigned I = 0, N = TargetOpts.FeaturesAsWritten.size(); I != N; ++I) {
+ AddString(TargetOpts.FeaturesAsWritten[I], Record);
+ }
+ Record.push_back(TargetOpts.Features.size());
+ for (unsigned I = 0, N = TargetOpts.Features.size(); I != N; ++I) {
+ AddString(TargetOpts.Features[I], Record);
+ }
+ Stream.EmitRecord(TARGET_OPTIONS, Record);
+
+ // Diagnostic options.
+ Record.clear();
+ const DiagnosticOptions &DiagOpts
+ = Context.getDiagnostics().getDiagnosticOptions();
+#define DIAGOPT(Name, Bits, Default) Record.push_back(DiagOpts.Name);
+#define ENUM_DIAGOPT(Name, Type, Bits, Default) \
+ Record.push_back(static_cast<unsigned>(DiagOpts.get##Name()));
+#include "clang/Basic/DiagnosticOptions.def"
+ Record.push_back(DiagOpts.Warnings.size());
+ for (unsigned I = 0, N = DiagOpts.Warnings.size(); I != N; ++I)
+ AddString(DiagOpts.Warnings[I], Record);
+ Record.push_back(DiagOpts.Remarks.size());
+ for (unsigned I = 0, N = DiagOpts.Remarks.size(); I != N; ++I)
+ AddString(DiagOpts.Remarks[I], Record);
+ // Note: we don't serialize the log or serialization file names, because they
+ // are generally transient files and will almost always be overridden.
+ Stream.EmitRecord(DIAGNOSTIC_OPTIONS, Record);
+
+ // File system options.
+ Record.clear();
+ const FileSystemOptions &FSOpts =
+ Context.getSourceManager().getFileManager().getFileSystemOpts();
+ AddString(FSOpts.WorkingDir, Record);
+ Stream.EmitRecord(FILE_SYSTEM_OPTIONS, Record);
+
+ // Header search options.
+ Record.clear();
+ const HeaderSearchOptions &HSOpts
+ = PP.getHeaderSearchInfo().getHeaderSearchOpts();
+ AddString(HSOpts.Sysroot, Record);
+
+ // Include entries.
+ Record.push_back(HSOpts.UserEntries.size());
+ for (unsigned I = 0, N = HSOpts.UserEntries.size(); I != N; ++I) {
+ const HeaderSearchOptions::Entry &Entry = HSOpts.UserEntries[I];
+ AddString(Entry.Path, Record);
+ Record.push_back(static_cast<unsigned>(Entry.Group));
+ Record.push_back(Entry.IsFramework);
+ Record.push_back(Entry.IgnoreSysRoot);
+ }
+
+ // System header prefixes.
+ Record.push_back(HSOpts.SystemHeaderPrefixes.size());
+ for (unsigned I = 0, N = HSOpts.SystemHeaderPrefixes.size(); I != N; ++I) {
+ AddString(HSOpts.SystemHeaderPrefixes[I].Prefix, Record);
+ Record.push_back(HSOpts.SystemHeaderPrefixes[I].IsSystemHeader);
+ }
+
+ AddString(HSOpts.ResourceDir, Record);
+ AddString(HSOpts.ModuleCachePath, Record);
+ AddString(HSOpts.ModuleUserBuildPath, Record);
+ Record.push_back(HSOpts.DisableModuleHash);
+ Record.push_back(HSOpts.UseBuiltinIncludes);
+ Record.push_back(HSOpts.UseStandardSystemIncludes);
+ Record.push_back(HSOpts.UseStandardCXXIncludes);
+ Record.push_back(HSOpts.UseLibcxx);
+ // Write out the specific module cache path that contains the module files.
+ AddString(PP.getHeaderSearchInfo().getModuleCachePath(), Record);
+ Stream.EmitRecord(HEADER_SEARCH_OPTIONS, Record);
+
+ // Preprocessor options.
+ Record.clear();
+ const PreprocessorOptions &PPOpts = PP.getPreprocessorOpts();
+
+ // Macro definitions.
+ Record.push_back(PPOpts.Macros.size());
+ for (unsigned I = 0, N = PPOpts.Macros.size(); I != N; ++I) {
+ AddString(PPOpts.Macros[I].first, Record);
+ Record.push_back(PPOpts.Macros[I].second);
+ }
+
+ // Includes
+ Record.push_back(PPOpts.Includes.size());
+ for (unsigned I = 0, N = PPOpts.Includes.size(); I != N; ++I)
+ AddString(PPOpts.Includes[I], Record);
+
+ // Macro includes
+ Record.push_back(PPOpts.MacroIncludes.size());
+ for (unsigned I = 0, N = PPOpts.MacroIncludes.size(); I != N; ++I)
+ AddString(PPOpts.MacroIncludes[I], Record);
+
+ Record.push_back(PPOpts.UsePredefines);
+ // Detailed record is important since it is used for the module cache hash.
+ Record.push_back(PPOpts.DetailedRecord);
+ AddString(PPOpts.ImplicitPCHInclude, Record);
+ AddString(PPOpts.ImplicitPTHInclude, Record);
+ Record.push_back(static_cast<unsigned>(PPOpts.ObjCXXARCStandardLibrary));
+ Stream.EmitRecord(PREPROCESSOR_OPTIONS, Record);
+
+ // Leave the options block.
+ Stream.ExitBlock();
+
+ // Original file name and file ID
+ SourceManager &SM = Context.getSourceManager();
+ if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
+ auto *FileAbbrev = new BitCodeAbbrev();
+ FileAbbrev->Add(BitCodeAbbrevOp(ORIGINAL_FILE));
+ FileAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // File ID
+ FileAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // File name
+ unsigned FileAbbrevCode = Stream.EmitAbbrev(FileAbbrev);
+
+ Record.clear();
+ Record.push_back(ORIGINAL_FILE);
+ Record.push_back(SM.getMainFileID().getOpaqueValue());
+ EmitRecordWithPath(FileAbbrevCode, Record, MainFile->getName());
+ }
+
+ Record.clear();
+ Record.push_back(SM.getMainFileID().getOpaqueValue());
+ Stream.EmitRecord(ORIGINAL_FILE_ID, Record);
+
+ // Original PCH directory
+ if (!OutputFile.empty() && OutputFile != "-") {
+ auto *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(ORIGINAL_PCH_DIR));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // File name
+ unsigned AbbrevCode = Stream.EmitAbbrev(Abbrev);
+
+ SmallString<128> OutputPath(OutputFile);
+
+ SM.getFileManager().makeAbsolutePath(OutputPath);
+ StringRef origDir = llvm::sys::path::parent_path(OutputPath);
+
+ RecordData::value_type Record[] = {ORIGINAL_PCH_DIR};
+ Stream.EmitRecordWithBlob(AbbrevCode, Record, origDir);
+ }
+
+ WriteInputFiles(Context.SourceMgr,
+ PP.getHeaderSearchInfo().getHeaderSearchOpts(),
+ PP.getLangOpts().Modules);
+ Stream.ExitBlock();
+ return Signature;
+}
+
+namespace {
+ /// \brief An input file.
+ struct InputFileEntry {
+ const FileEntry *File;
+ bool IsSystemFile;
+ bool IsTransient;
+ bool BufferOverridden;
+ };
+} // end anonymous namespace
+
+void ASTWriter::WriteInputFiles(SourceManager &SourceMgr,
+ HeaderSearchOptions &HSOpts,
+ bool Modules) {
+ using namespace llvm;
+ Stream.EnterSubblock(INPUT_FILES_BLOCK_ID, 4);
+
+ // Create input-file abbreviation.
+ auto *IFAbbrev = new BitCodeAbbrev();
+ IFAbbrev->Add(BitCodeAbbrevOp(INPUT_FILE));
+ IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // ID
+ IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 12)); // Size
+ IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 32)); // Modification time
+ IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Overridden
+ IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Transient
+ IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // File name
+ unsigned IFAbbrevCode = Stream.EmitAbbrev(IFAbbrev);
+
+ // Get all ContentCache objects for files, sorted by whether the file is a
+ // system one or not. System files go at the back, users files at the front.
+ std::deque<InputFileEntry> SortedFiles;
+ for (unsigned I = 1, N = SourceMgr.local_sloc_entry_size(); I != N; ++I) {
+ // Get this source location entry.
+ const SrcMgr::SLocEntry *SLoc = &SourceMgr.getLocalSLocEntry(I);
+ assert(&SourceMgr.getSLocEntry(FileID::get(I)) == SLoc);
+
+ // We only care about file entries that were not overridden.
+ if (!SLoc->isFile())
+ continue;
+ const SrcMgr::ContentCache *Cache = SLoc->getFile().getContentCache();
+ if (!Cache->OrigEntry)
+ continue;
+
+ InputFileEntry Entry;
+ Entry.File = Cache->OrigEntry;
+ Entry.IsSystemFile = Cache->IsSystemFile;
+ Entry.IsTransient = Cache->IsTransient;
+ Entry.BufferOverridden = Cache->BufferOverridden;
+ if (Cache->IsSystemFile)
+ SortedFiles.push_back(Entry);
+ else
+ SortedFiles.push_front(Entry);
+ }
+
+ unsigned UserFilesNum = 0;
+ // Write out all of the input files.
+ std::vector<uint64_t> InputFileOffsets;
+ for (const auto &Entry : SortedFiles) {
+ uint32_t &InputFileID = InputFileIDs[Entry.File];
+ if (InputFileID != 0)
+ continue; // already recorded this file.
+
+ // Record this entry's offset.
+ InputFileOffsets.push_back(Stream.GetCurrentBitNo());
+
+ InputFileID = InputFileOffsets.size();
+
+ if (!Entry.IsSystemFile)
+ ++UserFilesNum;
+
+ // Emit size/modification time for this file.
+ // And whether this file was overridden.
+ RecordData::value_type Record[] = {
+ INPUT_FILE,
+ InputFileOffsets.size(),
+ (uint64_t)Entry.File->getSize(),
+ (uint64_t)getTimestampForOutput(Entry.File),
+ Entry.BufferOverridden,
+ Entry.IsTransient};
+
+ EmitRecordWithPath(IFAbbrevCode, Record, Entry.File->getName());
+ }
+
+ Stream.ExitBlock();
+
+ // Create input file offsets abbreviation.
+ auto *OffsetsAbbrev = new BitCodeAbbrev();
+ OffsetsAbbrev->Add(BitCodeAbbrevOp(INPUT_FILE_OFFSETS));
+ OffsetsAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // # input files
+ OffsetsAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // # non-system
+ // input files
+ OffsetsAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Array
+ unsigned OffsetsAbbrevCode = Stream.EmitAbbrev(OffsetsAbbrev);
+
+ // Write input file offsets.
+ RecordData::value_type Record[] = {INPUT_FILE_OFFSETS,
+ InputFileOffsets.size(), UserFilesNum};
+ Stream.EmitRecordWithBlob(OffsetsAbbrevCode, Record, bytes(InputFileOffsets));
+}
+
+//===----------------------------------------------------------------------===//
+// Source Manager Serialization
+//===----------------------------------------------------------------------===//
+
+/// \brief Create an abbreviation for the SLocEntry that refers to a
+/// file.
+static unsigned CreateSLocFileAbbrev(llvm::BitstreamWriter &Stream) {
+ using namespace llvm;
+
+ auto *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SM_SLOC_FILE_ENTRY));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Offset
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Include location
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // Characteristic
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Line directives
+ // FileEntry fields.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Input File ID
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // NumCreatedFIDs
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 24)); // FirstDeclIndex
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // NumDecls
+ return Stream.EmitAbbrev(Abbrev);
+}
+
+/// \brief Create an abbreviation for the SLocEntry that refers to a
+/// buffer.
+static unsigned CreateSLocBufferAbbrev(llvm::BitstreamWriter &Stream) {
+ using namespace llvm;
+
+ auto *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SM_SLOC_BUFFER_ENTRY));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Offset
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Include location
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // Characteristic
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Line directives
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Buffer name blob
+ return Stream.EmitAbbrev(Abbrev);
+}
+
+/// \brief Create an abbreviation for the SLocEntry that refers to a
+/// buffer's blob.
+static unsigned CreateSLocBufferBlobAbbrev(llvm::BitstreamWriter &Stream) {
+ using namespace llvm;
+
+ auto *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SM_SLOC_BUFFER_BLOB));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Blob
+ return Stream.EmitAbbrev(Abbrev);
+}
+
+/// \brief Create an abbreviation for the SLocEntry that refers to a macro
+/// expansion.
+static unsigned CreateSLocExpansionAbbrev(llvm::BitstreamWriter &Stream) {
+ using namespace llvm;
+
+ auto *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SM_SLOC_EXPANSION_ENTRY));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Offset
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Spelling location
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Start location
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // End location
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Token length
+ return Stream.EmitAbbrev(Abbrev);
+}
+
+namespace {
+ // Trait used for the on-disk hash table of header search information.
+ class HeaderFileInfoTrait {
+ ASTWriter &Writer;
+ const HeaderSearch &HS;
+
+ // Keep track of the framework names we've used during serialization.
+ SmallVector<char, 128> FrameworkStringData;
+ llvm::StringMap<unsigned> FrameworkNameOffset;
+
+ public:
+ HeaderFileInfoTrait(ASTWriter &Writer, const HeaderSearch &HS)
+ : Writer(Writer), HS(HS) { }
+
+ struct key_type {
+ const FileEntry *FE;
+ const char *Filename;
+ };
+ typedef const key_type &key_type_ref;
+
+ typedef HeaderFileInfo data_type;
+ typedef const data_type &data_type_ref;
+ typedef unsigned hash_value_type;
+ typedef unsigned offset_type;
+
+ hash_value_type ComputeHash(key_type_ref key) {
+ // The hash is based only on size/time of the file, so that the reader can
+ // match even when symlinking or excess path elements ("foo/../", "../")
+ // change the form of the name. However, complete path is still the key.
+ return llvm::hash_combine(key.FE->getSize(),
+ Writer.getTimestampForOutput(key.FE));
+ }
+
+ std::pair<unsigned,unsigned>
+ EmitKeyDataLength(raw_ostream& Out, key_type_ref key, data_type_ref Data) {
+ using namespace llvm::support;
+ endian::Writer<little> LE(Out);
+ unsigned KeyLen = strlen(key.Filename) + 1 + 8 + 8;
+ LE.write<uint16_t>(KeyLen);
+ unsigned DataLen = 1 + 2 + 4 + 4;
+ for (auto ModInfo : HS.getModuleMap().findAllModulesForHeader(key.FE))
+ if (Writer.getLocalOrImportedSubmoduleID(ModInfo.getModule()))
+ DataLen += 4;
+ LE.write<uint8_t>(DataLen);
+ return std::make_pair(KeyLen, DataLen);
+ }
+
+ void EmitKey(raw_ostream& Out, key_type_ref key, unsigned KeyLen) {
+ using namespace llvm::support;
+ endian::Writer<little> LE(Out);
+ LE.write<uint64_t>(key.FE->getSize());
+ KeyLen -= 8;
+ LE.write<uint64_t>(Writer.getTimestampForOutput(key.FE));
+ KeyLen -= 8;
+ Out.write(key.Filename, KeyLen);
+ }
+
+ void EmitData(raw_ostream &Out, key_type_ref key,
+ data_type_ref Data, unsigned DataLen) {
+ using namespace llvm::support;
+ endian::Writer<little> LE(Out);
+ uint64_t Start = Out.tell(); (void)Start;
+
+ unsigned char Flags = (Data.isImport << 4)
+ | (Data.isPragmaOnce << 3)
+ | (Data.DirInfo << 1)
+ | Data.IndexHeaderMapHeader;
+ LE.write<uint8_t>(Flags);
+ LE.write<uint16_t>(Data.NumIncludes);
+
+ if (!Data.ControllingMacro)
+ LE.write<uint32_t>(Data.ControllingMacroID);
+ else
+ LE.write<uint32_t>(Writer.getIdentifierRef(Data.ControllingMacro));
+
+ unsigned Offset = 0;
+ if (!Data.Framework.empty()) {
+ // If this header refers into a framework, save the framework name.
+ llvm::StringMap<unsigned>::iterator Pos
+ = FrameworkNameOffset.find(Data.Framework);
+ if (Pos == FrameworkNameOffset.end()) {
+ Offset = FrameworkStringData.size() + 1;
+ FrameworkStringData.append(Data.Framework.begin(),
+ Data.Framework.end());
+ FrameworkStringData.push_back(0);
+
+ FrameworkNameOffset[Data.Framework] = Offset;
+ } else
+ Offset = Pos->second;
+ }
+ LE.write<uint32_t>(Offset);
+
+ // FIXME: If the header is excluded, we should write out some
+ // record of that fact.
+ for (auto ModInfo : HS.getModuleMap().findAllModulesForHeader(key.FE)) {
+ if (uint32_t ModID =
+ Writer.getLocalOrImportedSubmoduleID(ModInfo.getModule())) {
+ uint32_t Value = (ModID << 2) | (unsigned)ModInfo.getRole();
+ assert((Value >> 2) == ModID && "overflow in header module info");
+ LE.write<uint32_t>(Value);
+ }
+ }
+
+ assert(Out.tell() - Start == DataLen && "Wrong data length");
+ }
+
+ const char *strings_begin() const { return FrameworkStringData.begin(); }
+ const char *strings_end() const { return FrameworkStringData.end(); }
+ };
+} // end anonymous namespace
+
+/// \brief Write the header search block for the list of files that
+///
+/// \param HS The header search structure to save.
+void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS) {
+ SmallVector<const FileEntry *, 16> FilesByUID;
+ HS.getFileMgr().GetUniqueIDMapping(FilesByUID);
+
+ if (FilesByUID.size() > HS.header_file_size())
+ FilesByUID.resize(HS.header_file_size());
+
+ HeaderFileInfoTrait GeneratorTrait(*this, HS);
+ llvm::OnDiskChainedHashTableGenerator<HeaderFileInfoTrait> Generator;
+ SmallVector<const char *, 4> SavedStrings;
+ unsigned NumHeaderSearchEntries = 0;
+ for (unsigned UID = 0, LastUID = FilesByUID.size(); UID != LastUID; ++UID) {
+ const FileEntry *File = FilesByUID[UID];
+ if (!File)
+ continue;
+
+ // Get the file info. This will load info from the external source if
+ // necessary. Skip emitting this file if we have no information on it
+ // as a header file (in which case HFI will be null) or if it hasn't
+ // changed since it was loaded. Also skip it if it's for a modular header
+ // from a different module; in that case, we rely on the module(s)
+ // containing the header to provide this information.
+ const HeaderFileInfo *HFI =
+ HS.getExistingFileInfo(File, /*WantExternal*/!Chain);
+ if (!HFI || (HFI->isModuleHeader && !HFI->isCompilingModuleHeader))
+ continue;
+
+ // Massage the file path into an appropriate form.
+ const char *Filename = File->getName();
+ SmallString<128> FilenameTmp(Filename);
+ if (PreparePathForOutput(FilenameTmp)) {
+ // If we performed any translation on the file name at all, we need to
+ // save this string, since the generator will refer to it later.
+ Filename = strdup(FilenameTmp.c_str());
+ SavedStrings.push_back(Filename);
+ }
+
+ HeaderFileInfoTrait::key_type key = { File, Filename };
+ Generator.insert(key, *HFI, GeneratorTrait);
+ ++NumHeaderSearchEntries;
+ }
+
+ // Create the on-disk hash table in a buffer.
+ SmallString<4096> TableData;
+ uint32_t BucketOffset;
+ {
+ using namespace llvm::support;
+ llvm::raw_svector_ostream Out(TableData);
+ // Make sure that no bucket is at offset 0
+ endian::Writer<little>(Out).write<uint32_t>(0);
+ BucketOffset = Generator.Emit(Out, GeneratorTrait);
+ }
+
+ // Create a blob abbreviation
+ using namespace llvm;
+
+ auto *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(HEADER_SEARCH_TABLE));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned TableAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ // Write the header search table
+ RecordData::value_type Record[] = {HEADER_SEARCH_TABLE, BucketOffset,
+ NumHeaderSearchEntries, TableData.size()};
+ TableData.append(GeneratorTrait.strings_begin(),GeneratorTrait.strings_end());
+ Stream.EmitRecordWithBlob(TableAbbrev, Record, TableData);
+
+ // Free all of the strings we had to duplicate.
+ for (unsigned I = 0, N = SavedStrings.size(); I != N; ++I)
+ free(const_cast<char *>(SavedStrings[I]));
+}
+
+/// \brief Writes the block containing the serialized form of the
+/// source manager.
+///
+/// TODO: We should probably use an on-disk hash table (stored in a
+/// blob), indexed based on the file name, so that we only create
+/// entries for files that we actually need. In the common case (no
+/// errors), we probably won't have to create file entries for any of
+/// the files in the AST.
+void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
+ const Preprocessor &PP) {
+ RecordData Record;
+
+ // Enter the source manager block.
+ Stream.EnterSubblock(SOURCE_MANAGER_BLOCK_ID, 3);
+
+ // Abbreviations for the various kinds of source-location entries.
+ unsigned SLocFileAbbrv = CreateSLocFileAbbrev(Stream);
+ unsigned SLocBufferAbbrv = CreateSLocBufferAbbrev(Stream);
+ unsigned SLocBufferBlobAbbrv = CreateSLocBufferBlobAbbrev(Stream);
+ unsigned SLocExpansionAbbrv = CreateSLocExpansionAbbrev(Stream);
+
+ // Write out the source location entry table. We skip the first
+ // entry, which is always the same dummy entry.
+ std::vector<uint32_t> SLocEntryOffsets;
+ RecordData PreloadSLocs;
+ SLocEntryOffsets.reserve(SourceMgr.local_sloc_entry_size() - 1);
+ for (unsigned I = 1, N = SourceMgr.local_sloc_entry_size();
+ I != N; ++I) {
+ // Get this source location entry.
+ const SrcMgr::SLocEntry *SLoc = &SourceMgr.getLocalSLocEntry(I);
+ FileID FID = FileID::get(I);
+ assert(&SourceMgr.getSLocEntry(FID) == SLoc);
+
+ // Record the offset of this source-location entry.
+ SLocEntryOffsets.push_back(Stream.GetCurrentBitNo());
+
+ // Figure out which record code to use.
+ unsigned Code;
+ if (SLoc->isFile()) {
+ const SrcMgr::ContentCache *Cache = SLoc->getFile().getContentCache();
+ if (Cache->OrigEntry) {
+ Code = SM_SLOC_FILE_ENTRY;
+ } else
+ Code = SM_SLOC_BUFFER_ENTRY;
+ } else
+ Code = SM_SLOC_EXPANSION_ENTRY;
+ Record.clear();
+ Record.push_back(Code);
+
+ // Starting offset of this entry within this module, so skip the dummy.
+ Record.push_back(SLoc->getOffset() - 2);
+ if (SLoc->isFile()) {
+ const SrcMgr::FileInfo &File = SLoc->getFile();
+ Record.push_back(File.getIncludeLoc().getRawEncoding());
+ Record.push_back(File.getFileCharacteristic()); // FIXME: stable encoding
+ Record.push_back(File.hasLineDirectives());
+
+ const SrcMgr::ContentCache *Content = File.getContentCache();
+ if (Content->OrigEntry) {
+ assert(Content->OrigEntry == Content->ContentsEntry &&
+ "Writing to AST an overridden file is not supported");
+
+ // The source location entry is a file. Emit input file ID.
+ assert(InputFileIDs[Content->OrigEntry] != 0 && "Missed file entry");
+ Record.push_back(InputFileIDs[Content->OrigEntry]);
+
+ Record.push_back(File.NumCreatedFIDs);
+
+ FileDeclIDsTy::iterator FDI = FileDeclIDs.find(FID);
+ if (FDI != FileDeclIDs.end()) {
+ Record.push_back(FDI->second->FirstDeclIndex);
+ Record.push_back(FDI->second->DeclIDs.size());
+ } else {
+ Record.push_back(0);
+ Record.push_back(0);
+ }
+
+ Stream.EmitRecordWithAbbrev(SLocFileAbbrv, Record);
+
+ if (Content->BufferOverridden || Content->IsTransient) {
+ RecordData::value_type Record[] = {SM_SLOC_BUFFER_BLOB};
+ const llvm::MemoryBuffer *Buffer
+ = Content->getBuffer(PP.getDiagnostics(), PP.getSourceManager());
+ Stream.EmitRecordWithBlob(SLocBufferBlobAbbrv, Record,
+ StringRef(Buffer->getBufferStart(),
+ Buffer->getBufferSize() + 1));
+ }
+ } else {
+ // The source location entry is a buffer. The blob associated
+ // with this entry contains the contents of the buffer.
+
+ // We add one to the size so that we capture the trailing NULL
+ // that is required by llvm::MemoryBuffer::getMemBuffer (on
+ // the reader side).
+ const llvm::MemoryBuffer *Buffer
+ = Content->getBuffer(PP.getDiagnostics(), PP.getSourceManager());
+ const char *Name = Buffer->getBufferIdentifier();
+ Stream.EmitRecordWithBlob(SLocBufferAbbrv, Record,
+ StringRef(Name, strlen(Name) + 1));
+ RecordData::value_type Record[] = {SM_SLOC_BUFFER_BLOB};
+ Stream.EmitRecordWithBlob(SLocBufferBlobAbbrv, Record,
+ StringRef(Buffer->getBufferStart(),
+ Buffer->getBufferSize() + 1));
+
+ if (strcmp(Name, "<built-in>") == 0) {
+ PreloadSLocs.push_back(SLocEntryOffsets.size());
+ }
+ }
+ } else {
+ // The source location entry is a macro expansion.
+ const SrcMgr::ExpansionInfo &Expansion = SLoc->getExpansion();
+ Record.push_back(Expansion.getSpellingLoc().getRawEncoding());
+ Record.push_back(Expansion.getExpansionLocStart().getRawEncoding());
+ Record.push_back(Expansion.isMacroArgExpansion() ? 0
+ : Expansion.getExpansionLocEnd().getRawEncoding());
+
+ // Compute the token length for this macro expansion.
+ unsigned NextOffset = SourceMgr.getNextLocalOffset();
+ if (I + 1 != N)
+ NextOffset = SourceMgr.getLocalSLocEntry(I + 1).getOffset();
+ Record.push_back(NextOffset - SLoc->getOffset() - 1);
+ Stream.EmitRecordWithAbbrev(SLocExpansionAbbrv, Record);
+ }
+ }
+
+ Stream.ExitBlock();
+
+ if (SLocEntryOffsets.empty())
+ return;
+
+ // Write the source-location offsets table into the AST block. This
+ // table is used for lazily loading source-location information.
+ using namespace llvm;
+
+ auto *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SOURCE_LOCATION_OFFSETS));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 16)); // # of slocs
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 16)); // total size
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // offsets
+ unsigned SLocOffsetsAbbrev = Stream.EmitAbbrev(Abbrev);
+ {
+ RecordData::value_type Record[] = {
+ SOURCE_LOCATION_OFFSETS, SLocEntryOffsets.size(),
+ SourceMgr.getNextLocalOffset() - 1 /* skip dummy */};
+ Stream.EmitRecordWithBlob(SLocOffsetsAbbrev, Record,
+ bytes(SLocEntryOffsets));
+ }
+ // Write the source location entry preloads array, telling the AST
+ // reader which source locations entries it should load eagerly.
+ Stream.EmitRecord(SOURCE_LOCATION_PRELOADS, PreloadSLocs);
+
+ // Write the line table. It depends on remapping working, so it must come
+ // after the source location offsets.
+ if (SourceMgr.hasLineTable()) {
+ LineTableInfo &LineTable = SourceMgr.getLineTable();
+
+ Record.clear();
+
+ // Emit the needed file names.
+ llvm::DenseMap<int, int> FilenameMap;
+ for (const auto &L : LineTable) {
+ if (L.first.ID < 0)
+ continue;
+ for (auto &LE : L.second) {
+ if (FilenameMap.insert(std::make_pair(LE.FilenameID,
+ FilenameMap.size())).second)
+ AddPath(LineTable.getFilename(LE.FilenameID), Record);
+ }
+ }
+ Record.push_back(0);
+
+ // Emit the line entries
+ for (const auto &L : LineTable) {
+ // Only emit entries for local files.
+ if (L.first.ID < 0)
+ continue;
+
+ // Emit the file ID
+ Record.push_back(L.first.ID);
+
+ // Emit the line entries
+ Record.push_back(L.second.size());
+ for (const auto &LE : L.second) {
+ Record.push_back(LE.FileOffset);
+ Record.push_back(LE.LineNo);
+ Record.push_back(FilenameMap[LE.FilenameID]);
+ Record.push_back((unsigned)LE.FileKind);
+ Record.push_back(LE.IncludeOffset);
+ }
+ }
+
+ Stream.EmitRecord(SOURCE_MANAGER_LINE_TABLE, Record);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Serialization
+//===----------------------------------------------------------------------===//
+
+static bool shouldIgnoreMacro(MacroDirective *MD, bool IsModule,
+ const Preprocessor &PP) {
+ if (MacroInfo *MI = MD->getMacroInfo())
+ if (MI->isBuiltinMacro())
+ return true;
+
+ if (IsModule) {
+ SourceLocation Loc = MD->getLocation();
+ if (Loc.isInvalid())
+ return true;
+ if (PP.getSourceManager().getFileID(Loc) == PP.getPredefinesFileID())
+ return true;
+ }
+
+ return false;
+}
+
+/// \brief Writes the block containing the serialized form of the
+/// preprocessor.
+///
+void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
+ PreprocessingRecord *PPRec = PP.getPreprocessingRecord();
+ if (PPRec)
+ WritePreprocessorDetail(*PPRec);
+
+ RecordData Record;
+ RecordData ModuleMacroRecord;
+
+ // If the preprocessor __COUNTER__ value has been bumped, remember it.
+ if (PP.getCounterValue() != 0) {
+ RecordData::value_type Record[] = {PP.getCounterValue()};
+ Stream.EmitRecord(PP_COUNTER_VALUE, Record);
+ }
+
+ // Enter the preprocessor block.
+ Stream.EnterSubblock(PREPROCESSOR_BLOCK_ID, 3);
+
+ // If the AST file contains __DATE__ or __TIME__ emit a warning about this.
+ // FIXME: Include a location for the use, and say which one was used.
+ if (PP.SawDateOrTime())
+ PP.Diag(SourceLocation(), diag::warn_module_uses_date_time) << IsModule;
+
+ // Loop over all the macro directives that are live at the end of the file,
+ // emitting each to the PP section.
+
+ // Construct the list of identifiers with macro directives that need to be
+ // serialized.
+ SmallVector<const IdentifierInfo *, 128> MacroIdentifiers;
+ for (auto &Id : PP.getIdentifierTable())
+ if (Id.second->hadMacroDefinition() &&
+ (!Id.second->isFromAST() ||
+ Id.second->hasChangedSinceDeserialization()))
+ MacroIdentifiers.push_back(Id.second);
+ // Sort the set of macro definitions that need to be serialized by the
+ // name of the macro, to provide a stable ordering.
+ std::sort(MacroIdentifiers.begin(), MacroIdentifiers.end(),
+ llvm::less_ptr<IdentifierInfo>());
+
+ // Emit the macro directives as a list and associate the offset with the
+ // identifier they belong to.
+ for (const IdentifierInfo *Name : MacroIdentifiers) {
+ MacroDirective *MD = PP.getLocalMacroDirectiveHistory(Name);
+ auto StartOffset = Stream.GetCurrentBitNo();
+
+ // Emit the macro directives in reverse source order.
+ for (; MD; MD = MD->getPrevious()) {
+ // Once we hit an ignored macro, we're done: the rest of the chain
+ // will all be ignored macros.
+ if (shouldIgnoreMacro(MD, IsModule, PP))
+ break;
+
+ AddSourceLocation(MD->getLocation(), Record);
+ Record.push_back(MD->getKind());
+ if (auto *DefMD = dyn_cast<DefMacroDirective>(MD)) {
+ Record.push_back(getMacroRef(DefMD->getInfo(), Name));
+ } else if (auto *VisMD = dyn_cast<VisibilityMacroDirective>(MD)) {
+ Record.push_back(VisMD->isPublic());
+ }
+ }
+
+ // Write out any exported module macros.
+ bool EmittedModuleMacros = false;
+ if (IsModule) {
+ auto Leafs = PP.getLeafModuleMacros(Name);
+ SmallVector<ModuleMacro*, 8> Worklist(Leafs.begin(), Leafs.end());
+ llvm::DenseMap<ModuleMacro*, unsigned> Visits;
+ while (!Worklist.empty()) {
+ auto *Macro = Worklist.pop_back_val();
+
+ // Emit a record indicating this submodule exports this macro.
+ ModuleMacroRecord.push_back(
+ getSubmoduleID(Macro->getOwningModule()));
+ ModuleMacroRecord.push_back(getMacroRef(Macro->getMacroInfo(), Name));
+ for (auto *M : Macro->overrides())
+ ModuleMacroRecord.push_back(getSubmoduleID(M->getOwningModule()));
+
+ Stream.EmitRecord(PP_MODULE_MACRO, ModuleMacroRecord);
+ ModuleMacroRecord.clear();
+
+ // Enqueue overridden macros once we've visited all their ancestors.
+ for (auto *M : Macro->overrides())
+ if (++Visits[M] == M->getNumOverridingMacros())
+ Worklist.push_back(M);
+
+ EmittedModuleMacros = true;
+ }
+ }
+
+ if (Record.empty() && !EmittedModuleMacros)
+ continue;
+
+ IdentMacroDirectivesOffsetMap[Name] = StartOffset;
+ Stream.EmitRecord(PP_MACRO_DIRECTIVE_HISTORY, Record);
+ Record.clear();
+ }
+
+ /// \brief Offsets of each of the macros into the bitstream, indexed by
+ /// the local macro ID
+ ///
+ /// For each identifier that is associated with a macro, this map
+ /// provides the offset into the bitstream where that macro is
+ /// defined.
+ std::vector<uint32_t> MacroOffsets;
+
+ for (unsigned I = 0, N = MacroInfosToEmit.size(); I != N; ++I) {
+ const IdentifierInfo *Name = MacroInfosToEmit[I].Name;
+ MacroInfo *MI = MacroInfosToEmit[I].MI;
+ MacroID ID = MacroInfosToEmit[I].ID;
+
+ if (ID < FirstMacroID) {
+ assert(0 && "Loaded MacroInfo entered MacroInfosToEmit ?");
+ continue;
+ }
+
+ // Record the local offset of this macro.
+ unsigned Index = ID - FirstMacroID;
+ if (Index == MacroOffsets.size())
+ MacroOffsets.push_back(Stream.GetCurrentBitNo());
+ else {
+ if (Index > MacroOffsets.size())
+ MacroOffsets.resize(Index + 1);
+
+ MacroOffsets[Index] = Stream.GetCurrentBitNo();
+ }
+
+ AddIdentifierRef(Name, Record);
+ Record.push_back(inferSubmoduleIDFromLocation(MI->getDefinitionLoc()));
+ AddSourceLocation(MI->getDefinitionLoc(), Record);
+ AddSourceLocation(MI->getDefinitionEndLoc(), Record);
+ Record.push_back(MI->isUsed());
+ Record.push_back(MI->isUsedForHeaderGuard());
+ unsigned Code;
+ if (MI->isObjectLike()) {
+ Code = PP_MACRO_OBJECT_LIKE;
+ } else {
+ Code = PP_MACRO_FUNCTION_LIKE;
+
+ Record.push_back(MI->isC99Varargs());
+ Record.push_back(MI->isGNUVarargs());
+ Record.push_back(MI->hasCommaPasting());
+ Record.push_back(MI->getNumArgs());
+ for (const IdentifierInfo *Arg : MI->args())
+ AddIdentifierRef(Arg, Record);
+ }
+
+ // If we have a detailed preprocessing record, record the macro definition
+ // ID that corresponds to this macro.
+ if (PPRec)
+ Record.push_back(MacroDefinitions[PPRec->findMacroDefinition(MI)]);
+
+ Stream.EmitRecord(Code, Record);
+ Record.clear();
+
+ // Emit the tokens array.
+ for (unsigned TokNo = 0, e = MI->getNumTokens(); TokNo != e; ++TokNo) {
+ // Note that we know that the preprocessor does not have any annotation
+ // tokens in it because they are created by the parser, and thus can't
+ // be in a macro definition.
+ const Token &Tok = MI->getReplacementToken(TokNo);
+ AddToken(Tok, Record);
+ Stream.EmitRecord(PP_TOKEN, Record);
+ Record.clear();
+ }
+ ++NumMacros;
+ }
+
+ Stream.ExitBlock();
+
+ // Write the offsets table for macro IDs.
+ using namespace llvm;
+
+ auto *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(MACRO_OFFSET));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // # of macros
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // first ID
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+
+ unsigned MacroOffsetAbbrev = Stream.EmitAbbrev(Abbrev);
+ {
+ RecordData::value_type Record[] = {MACRO_OFFSET, MacroOffsets.size(),
+ FirstMacroID - NUM_PREDEF_MACRO_IDS};
+ Stream.EmitRecordWithBlob(MacroOffsetAbbrev, Record, bytes(MacroOffsets));
+ }
+}
+
+void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec) {
+ if (PPRec.local_begin() == PPRec.local_end())
+ return;
+
+ SmallVector<PPEntityOffset, 64> PreprocessedEntityOffsets;
+
+ // Enter the preprocessor block.
+ Stream.EnterSubblock(PREPROCESSOR_DETAIL_BLOCK_ID, 3);
+
+ // If the preprocessor has a preprocessing record, emit it.
+ unsigned NumPreprocessingRecords = 0;
+ using namespace llvm;
+
+ // Set up the abbreviation for
+ unsigned InclusionAbbrev = 0;
+ {
+ auto *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(PPD_INCLUSION_DIRECTIVE));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // filename length
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // in quotes
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // kind
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // imported module
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ InclusionAbbrev = Stream.EmitAbbrev(Abbrev);
+ }
+
+ unsigned FirstPreprocessorEntityID
+ = (Chain ? PPRec.getNumLoadedPreprocessedEntities() : 0)
+ + NUM_PREDEF_PP_ENTITY_IDS;
+ unsigned NextPreprocessorEntityID = FirstPreprocessorEntityID;
+ RecordData Record;
+ for (PreprocessingRecord::iterator E = PPRec.local_begin(),
+ EEnd = PPRec.local_end();
+ E != EEnd;
+ (void)++E, ++NumPreprocessingRecords, ++NextPreprocessorEntityID) {
+ Record.clear();
+
+ PreprocessedEntityOffsets.push_back(
+ PPEntityOffset((*E)->getSourceRange(), Stream.GetCurrentBitNo()));
+
+ if (auto *MD = dyn_cast<MacroDefinitionRecord>(*E)) {
+ // Record this macro definition's ID.
+ MacroDefinitions[MD] = NextPreprocessorEntityID;
+
+ AddIdentifierRef(MD->getName(), Record);
+ Stream.EmitRecord(PPD_MACRO_DEFINITION, Record);
+ continue;
+ }
+
+ if (auto *ME = dyn_cast<MacroExpansion>(*E)) {
+ Record.push_back(ME->isBuiltinMacro());
+ if (ME->isBuiltinMacro())
+ AddIdentifierRef(ME->getName(), Record);
+ else
+ Record.push_back(MacroDefinitions[ME->getDefinition()]);
+ Stream.EmitRecord(PPD_MACRO_EXPANSION, Record);
+ continue;
+ }
+
+ if (auto *ID = dyn_cast<InclusionDirective>(*E)) {
+ Record.push_back(PPD_INCLUSION_DIRECTIVE);
+ Record.push_back(ID->getFileName().size());
+ Record.push_back(ID->wasInQuotes());
+ Record.push_back(static_cast<unsigned>(ID->getKind()));
+ Record.push_back(ID->importedModule());
+ SmallString<64> Buffer;
+ Buffer += ID->getFileName();
+ // Check that the FileEntry is not null because it was not resolved and
+ // we create a PCH even with compiler errors.
+ if (ID->getFile())
+ Buffer += ID->getFile()->getName();
+ Stream.EmitRecordWithBlob(InclusionAbbrev, Record, Buffer);
+ continue;
+ }
+
+ llvm_unreachable("Unhandled PreprocessedEntity in ASTWriter");
+ }
+ Stream.ExitBlock();
+
+ // Write the offsets table for the preprocessing record.
+ if (NumPreprocessingRecords > 0) {
+ assert(PreprocessedEntityOffsets.size() == NumPreprocessingRecords);
+
+ // Write the offsets table for identifier IDs.
+ using namespace llvm;
+
+ auto *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(PPD_ENTITIES_OFFSETS));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // first pp entity
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned PPEOffsetAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ RecordData::value_type Record[] = {PPD_ENTITIES_OFFSETS,
+ FirstPreprocessorEntityID -
+ NUM_PREDEF_PP_ENTITY_IDS};
+ Stream.EmitRecordWithBlob(PPEOffsetAbbrev, Record,
+ bytes(PreprocessedEntityOffsets));
+ }
+}
+
+unsigned ASTWriter::getLocalOrImportedSubmoduleID(Module *Mod) {
+ if (!Mod)
+ return 0;
+
+ llvm::DenseMap<Module *, unsigned>::iterator Known = SubmoduleIDs.find(Mod);
+ if (Known != SubmoduleIDs.end())
+ return Known->second;
+
+ if (Mod->getTopLevelModule() != WritingModule)
+ return 0;
+
+ return SubmoduleIDs[Mod] = NextSubmoduleID++;
+}
+
+unsigned ASTWriter::getSubmoduleID(Module *Mod) {
+ // FIXME: This can easily happen, if we have a reference to a submodule that
+ // did not result in us loading a module file for that submodule. For
+ // instance, a cross-top-level-module 'conflict' declaration will hit this.
+ unsigned ID = getLocalOrImportedSubmoduleID(Mod);
+ assert((ID || !Mod) &&
+ "asked for module ID for non-local, non-imported module");
+ return ID;
+}
+
+/// \brief Compute the number of modules within the given tree (including the
+/// given module).
+static unsigned getNumberOfModules(Module *Mod) {
+ unsigned ChildModules = 0;
+ for (auto Sub = Mod->submodule_begin(), SubEnd = Mod->submodule_end();
+ Sub != SubEnd; ++Sub)
+ ChildModules += getNumberOfModules(*Sub);
+
+ return ChildModules + 1;
+}
+
+void ASTWriter::WriteSubmodules(Module *WritingModule) {
+ // Enter the submodule description block.
+ Stream.EnterSubblock(SUBMODULE_BLOCK_ID, /*bits for abbreviations*/5);
+
+ // Write the abbreviations needed for the submodules block.
+ using namespace llvm;
+
+ auto *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_DEFINITION));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // ID
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Parent
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsFramework
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsExplicit
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsSystem
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsExternC
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // InferSubmodules...
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // InferExplicit...
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // InferExportWild...
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // ConfigMacrosExh...
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
+ unsigned DefinitionAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_UMBRELLA_HEADER));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
+ unsigned UmbrellaAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_HEADER));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
+ unsigned HeaderAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_TOPHEADER));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
+ unsigned TopHeaderAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_UMBRELLA_DIR));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
+ unsigned UmbrellaDirAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_REQUIRES));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // State
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Feature
+ unsigned RequiresAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_EXCLUDED_HEADER));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
+ unsigned ExcludedHeaderAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_TEXTUAL_HEADER));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
+ unsigned TextualHeaderAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_PRIVATE_HEADER));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
+ unsigned PrivateHeaderAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_PRIVATE_TEXTUAL_HEADER));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
+ unsigned PrivateTextualHeaderAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_LINK_LIBRARY));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsFramework
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
+ unsigned LinkLibraryAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_CONFIG_MACRO));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Macro name
+ unsigned ConfigMacroAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_CONFLICT));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Other module
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Message
+ unsigned ConflictAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ // Write the submodule metadata block.
+ RecordData::value_type Record[] = {getNumberOfModules(WritingModule),
+ FirstSubmoduleID -
+ NUM_PREDEF_SUBMODULE_IDS};
+ Stream.EmitRecord(SUBMODULE_METADATA, Record);
+
+ // Write all of the submodules.
+ std::queue<Module *> Q;
+ Q.push(WritingModule);
+ while (!Q.empty()) {
+ Module *Mod = Q.front();
+ Q.pop();
+ unsigned ID = getSubmoduleID(Mod);
+
+ uint64_t ParentID = 0;
+ if (Mod->Parent) {
+ assert(SubmoduleIDs[Mod->Parent] && "Submodule parent not written?");
+ ParentID = SubmoduleIDs[Mod->Parent];
+ }
+
+ // Emit the definition of the block.
+ {
+ RecordData::value_type Record[] = {
+ SUBMODULE_DEFINITION, ID, ParentID, Mod->IsFramework, Mod->IsExplicit,
+ Mod->IsSystem, Mod->IsExternC, Mod->InferSubmodules,
+ Mod->InferExplicitSubmodules, Mod->InferExportWildcard,
+ Mod->ConfigMacrosExhaustive};
+ Stream.EmitRecordWithBlob(DefinitionAbbrev, Record, Mod->Name);
+ }
+
+ // Emit the requirements.
+ for (const auto &R : Mod->Requirements) {
+ RecordData::value_type Record[] = {SUBMODULE_REQUIRES, R.second};
+ Stream.EmitRecordWithBlob(RequiresAbbrev, Record, R.first);
+ }
+
+ // Emit the umbrella header, if there is one.
+ if (auto UmbrellaHeader = Mod->getUmbrellaHeader()) {
+ RecordData::value_type Record[] = {SUBMODULE_UMBRELLA_HEADER};
+ Stream.EmitRecordWithBlob(UmbrellaAbbrev, Record,
+ UmbrellaHeader.NameAsWritten);
+ } else if (auto UmbrellaDir = Mod->getUmbrellaDir()) {
+ RecordData::value_type Record[] = {SUBMODULE_UMBRELLA_DIR};
+ Stream.EmitRecordWithBlob(UmbrellaDirAbbrev, Record,
+ UmbrellaDir.NameAsWritten);
+ }
+
+ // Emit the headers.
+ struct {
+ unsigned RecordKind;
+ unsigned Abbrev;
+ Module::HeaderKind HeaderKind;
+ } HeaderLists[] = {
+ {SUBMODULE_HEADER, HeaderAbbrev, Module::HK_Normal},
+ {SUBMODULE_TEXTUAL_HEADER, TextualHeaderAbbrev, Module::HK_Textual},
+ {SUBMODULE_PRIVATE_HEADER, PrivateHeaderAbbrev, Module::HK_Private},
+ {SUBMODULE_PRIVATE_TEXTUAL_HEADER, PrivateTextualHeaderAbbrev,
+ Module::HK_PrivateTextual},
+ {SUBMODULE_EXCLUDED_HEADER, ExcludedHeaderAbbrev, Module::HK_Excluded}
+ };
+ for (auto &HL : HeaderLists) {
+ RecordData::value_type Record[] = {HL.RecordKind};
+ for (auto &H : Mod->Headers[HL.HeaderKind])
+ Stream.EmitRecordWithBlob(HL.Abbrev, Record, H.NameAsWritten);
+ }
+
+ // Emit the top headers.
+ {
+ auto TopHeaders = Mod->getTopHeaders(PP->getFileManager());
+ RecordData::value_type Record[] = {SUBMODULE_TOPHEADER};
+ for (auto *H : TopHeaders)
+ Stream.EmitRecordWithBlob(TopHeaderAbbrev, Record, H->getName());
+ }
+
+ // Emit the imports.
+ if (!Mod->Imports.empty()) {
+ RecordData Record;
+ for (auto *I : Mod->Imports)
+ Record.push_back(getSubmoduleID(I));
+ Stream.EmitRecord(SUBMODULE_IMPORTS, Record);
+ }
+
+ // Emit the exports.
+ if (!Mod->Exports.empty()) {
+ RecordData Record;
+ for (const auto &E : Mod->Exports) {
+ // FIXME: This may fail; we don't require that all exported modules
+ // are local or imported.
+ Record.push_back(getSubmoduleID(E.getPointer()));
+ Record.push_back(E.getInt());
+ }
+ Stream.EmitRecord(SUBMODULE_EXPORTS, Record);
+ }
+
+ //FIXME: How do we emit the 'use'd modules? They may not be submodules.
+ // Might be unnecessary as use declarations are only used to build the
+ // module itself.
+
+ // Emit the link libraries.
+ for (const auto &LL : Mod->LinkLibraries) {
+ RecordData::value_type Record[] = {SUBMODULE_LINK_LIBRARY,
+ LL.IsFramework};
+ Stream.EmitRecordWithBlob(LinkLibraryAbbrev, Record, LL.Library);
+ }
+
+ // Emit the conflicts.
+ for (const auto &C : Mod->Conflicts) {
+ // FIXME: This may fail; we don't require that all conflicting modules
+ // are local or imported.
+ RecordData::value_type Record[] = {SUBMODULE_CONFLICT,
+ getSubmoduleID(C.Other)};
+ Stream.EmitRecordWithBlob(ConflictAbbrev, Record, C.Message);
+ }
+
+ // Emit the configuration macros.
+ for (const auto &CM : Mod->ConfigMacros) {
+ RecordData::value_type Record[] = {SUBMODULE_CONFIG_MACRO};
+ Stream.EmitRecordWithBlob(ConfigMacroAbbrev, Record, CM);
+ }
+
+ // Queue up the submodules of this module.
+ for (auto *M : Mod->submodules())
+ Q.push(M);
+ }
+
+ Stream.ExitBlock();
+
+ assert((NextSubmoduleID - FirstSubmoduleID ==
+ getNumberOfModules(WritingModule)) &&
+ "Wrong # of submodules; found a reference to a non-local, "
+ "non-imported submodule?");
+}
+
+serialization::SubmoduleID
+ASTWriter::inferSubmoduleIDFromLocation(SourceLocation Loc) {
+ if (Loc.isInvalid() || !WritingModule)
+ return 0; // No submodule
+
+ // Find the module that owns this location.
+ ModuleMap &ModMap = PP->getHeaderSearchInfo().getModuleMap();
+ Module *OwningMod
+ = ModMap.inferModuleFromLocation(FullSourceLoc(Loc,PP->getSourceManager()));
+ if (!OwningMod)
+ return 0;
+
+ // Check whether this submodule is part of our own module.
+ if (WritingModule != OwningMod && !OwningMod->isSubModuleOf(WritingModule))
+ return 0;
+
+ return getSubmoduleID(OwningMod);
+}
+
+void ASTWriter::WritePragmaDiagnosticMappings(const DiagnosticsEngine &Diag,
+ bool isModule) {
+ // Make sure set diagnostic pragmas don't affect the translation unit that
+ // imports the module.
+ // FIXME: Make diagnostic pragma sections work properly with modules.
+ if (isModule)
+ return;
+
+ llvm::SmallDenseMap<const DiagnosticsEngine::DiagState *, unsigned, 64>
+ DiagStateIDMap;
+ unsigned CurrID = 0;
+ DiagStateIDMap[&Diag.DiagStates.front()] = ++CurrID; // the command-line one.
+ RecordData Record;
+ for (DiagnosticsEngine::DiagStatePointsTy::const_iterator
+ I = Diag.DiagStatePoints.begin(), E = Diag.DiagStatePoints.end();
+ I != E; ++I) {
+ const DiagnosticsEngine::DiagStatePoint &point = *I;
+ if (point.Loc.isInvalid())
+ continue;
+
+ Record.push_back(point.Loc.getRawEncoding());
+ unsigned &DiagStateID = DiagStateIDMap[point.State];
+ Record.push_back(DiagStateID);
+
+ if (DiagStateID == 0) {
+ DiagStateID = ++CurrID;
+ for (const auto &I : *(point.State)) {
+ if (I.second.isPragma()) {
+ Record.push_back(I.first);
+ Record.push_back((unsigned)I.second.getSeverity());
+ }
+ }
+ Record.push_back(-1); // mark the end of the diag/map pairs for this
+ // location.
+ }
+ }
+
+ if (!Record.empty())
+ Stream.EmitRecord(DIAG_PRAGMA_MAPPINGS, Record);
+}
+
+void ASTWriter::WriteCXXCtorInitializersOffsets() {
+ if (CXXCtorInitializersOffsets.empty())
+ return;
+
+ // Create a blob abbreviation for the C++ ctor initializer offsets.
+ using namespace llvm;
+
+ auto *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(CXX_CTOR_INITIALIZERS_OFFSETS));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // size
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned CtorInitializersOffsetAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ // Write the base specifier offsets table.
+ RecordData::value_type Record[] = {CXX_CTOR_INITIALIZERS_OFFSETS,
+ CXXCtorInitializersOffsets.size()};
+ Stream.EmitRecordWithBlob(CtorInitializersOffsetAbbrev, Record,
+ bytes(CXXCtorInitializersOffsets));
+}
+
+void ASTWriter::WriteCXXBaseSpecifiersOffsets() {
+ if (CXXBaseSpecifiersOffsets.empty())
+ return;
+
+ // Create a blob abbreviation for the C++ base specifiers offsets.
+ using namespace llvm;
+
+ auto *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(CXX_BASE_SPECIFIER_OFFSETS));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // size
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned BaseSpecifierOffsetAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ // Write the base specifier offsets table.
+ RecordData::value_type Record[] = {CXX_BASE_SPECIFIER_OFFSETS,
+ CXXBaseSpecifiersOffsets.size()};
+ Stream.EmitRecordWithBlob(BaseSpecifierOffsetAbbrev, Record,
+ bytes(CXXBaseSpecifiersOffsets));
+}
+
+//===----------------------------------------------------------------------===//
+// Type Serialization
+//===----------------------------------------------------------------------===//
+
+/// \brief Write the representation of a type to the AST stream.
+void ASTWriter::WriteType(QualType T) {
+ TypeIdx &Idx = TypeIdxs[T];
+ if (Idx.getIndex() == 0) // we haven't seen this type before.
+ Idx = TypeIdx(NextTypeID++);
+
+ assert(Idx.getIndex() >= FirstTypeID && "Re-writing a type from a prior AST");
+
+ // Record the offset for this type.
+ unsigned Index = Idx.getIndex() - FirstTypeID;
+ if (TypeOffsets.size() == Index)
+ TypeOffsets.push_back(Stream.GetCurrentBitNo());
+ else if (TypeOffsets.size() < Index) {
+ TypeOffsets.resize(Index + 1);
+ TypeOffsets[Index] = Stream.GetCurrentBitNo();
+ }
+
+ RecordData Record;
+
+ // Emit the type's representation.
+ ASTTypeWriter W(*this, Record);
+ W.AbbrevToUse = 0;
+
+ if (T.hasLocalNonFastQualifiers()) {
+ Qualifiers Qs = T.getLocalQualifiers();
+ AddTypeRef(T.getLocalUnqualifiedType(), Record);
+ Record.push_back(Qs.getAsOpaqueValue());
+ W.Code = TYPE_EXT_QUAL;
+ W.AbbrevToUse = TypeExtQualAbbrev;
+ } else {
+ switch (T->getTypeClass()) {
+ // For all of the concrete, non-dependent types, call the
+ // appropriate visitor function.
+#define TYPE(Class, Base) \
+ case Type::Class: W.Visit##Class##Type(cast<Class##Type>(T)); break;
+#define ABSTRACT_TYPE(Class, Base)
+#include "clang/AST/TypeNodes.def"
+ }
+ }
+
+ // Emit the serialized record.
+ Stream.EmitRecord(W.Code, Record, W.AbbrevToUse);
+
+ // Flush any expressions that were written as part of this type.
+ FlushStmts();
+}
+
+//===----------------------------------------------------------------------===//
+// Declaration Serialization
+//===----------------------------------------------------------------------===//
+
+/// \brief Write the block containing all of the declaration IDs
+/// lexically declared within the given DeclContext.
+///
+/// \returns the offset of the DECL_CONTEXT_LEXICAL block within the
+/// bistream, or 0 if no block was written.
+uint64_t ASTWriter::WriteDeclContextLexicalBlock(ASTContext &Context,
+ DeclContext *DC) {
+ if (DC->decls_empty())
+ return 0;
+
+ uint64_t Offset = Stream.GetCurrentBitNo();
+ SmallVector<uint32_t, 128> KindDeclPairs;
+ for (const auto *D : DC->decls()) {
+ KindDeclPairs.push_back(D->getKind());
+ KindDeclPairs.push_back(GetDeclRef(D));
+ }
+
+ ++NumLexicalDeclContexts;
+ RecordData::value_type Record[] = {DECL_CONTEXT_LEXICAL};
+ Stream.EmitRecordWithBlob(DeclContextLexicalAbbrev, Record,
+ bytes(KindDeclPairs));
+ return Offset;
+}
+
+void ASTWriter::WriteTypeDeclOffsets() {
+ using namespace llvm;
+
+ // Write the type offsets array
+ auto *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(TYPE_OFFSET));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // # of types
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // base type index
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // types block
+ unsigned TypeOffsetAbbrev = Stream.EmitAbbrev(Abbrev);
+ {
+ RecordData::value_type Record[] = {TYPE_OFFSET, TypeOffsets.size(),
+ FirstTypeID - NUM_PREDEF_TYPE_IDS};
+ Stream.EmitRecordWithBlob(TypeOffsetAbbrev, Record, bytes(TypeOffsets));
+ }
+
+ // Write the declaration offsets array
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(DECL_OFFSET));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // # of declarations
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // base decl ID
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // declarations block
+ unsigned DeclOffsetAbbrev = Stream.EmitAbbrev(Abbrev);
+ {
+ RecordData::value_type Record[] = {DECL_OFFSET, DeclOffsets.size(),
+ FirstDeclID - NUM_PREDEF_DECL_IDS};
+ Stream.EmitRecordWithBlob(DeclOffsetAbbrev, Record, bytes(DeclOffsets));
+ }
+}
+
+void ASTWriter::WriteFileDeclIDsMap() {
+ using namespace llvm;
+
+ SmallVector<std::pair<FileID, DeclIDInFileInfo *>, 64> SortedFileDeclIDs(
+ FileDeclIDs.begin(), FileDeclIDs.end());
+ std::sort(SortedFileDeclIDs.begin(), SortedFileDeclIDs.end(),
+ llvm::less_first());
+
+ // Join the vectors of DeclIDs from all files.
+ SmallVector<DeclID, 256> FileGroupedDeclIDs;
+ for (auto &FileDeclEntry : SortedFileDeclIDs) {
+ DeclIDInFileInfo &Info = *FileDeclEntry.second;
+ Info.FirstDeclIndex = FileGroupedDeclIDs.size();
+ for (auto &LocDeclEntry : Info.DeclIDs)
+ FileGroupedDeclIDs.push_back(LocDeclEntry.second);
+ }
+
+ auto *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(FILE_SORTED_DECLS));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned AbbrevCode = Stream.EmitAbbrev(Abbrev);
+ RecordData::value_type Record[] = {FILE_SORTED_DECLS,
+ FileGroupedDeclIDs.size()};
+ Stream.EmitRecordWithBlob(AbbrevCode, Record, bytes(FileGroupedDeclIDs));
+}
+
+void ASTWriter::WriteComments() {
+ Stream.EnterSubblock(COMMENTS_BLOCK_ID, 3);
+ ArrayRef<RawComment *> RawComments = Context->Comments.getComments();
+ RecordData Record;
+ for (const auto *I : RawComments) {
+ Record.clear();
+ AddSourceRange(I->getSourceRange(), Record);
+ Record.push_back(I->getKind());
+ Record.push_back(I->isTrailingComment());
+ Record.push_back(I->isAlmostTrailingComment());
+ Stream.EmitRecord(COMMENTS_RAW_COMMENT, Record);
+ }
+ Stream.ExitBlock();
+}
+
+//===----------------------------------------------------------------------===//
+// Global Method Pool and Selector Serialization
+//===----------------------------------------------------------------------===//
+
+namespace {
+// Trait used for the on-disk hash table used in the method pool.
+class ASTMethodPoolTrait {
+ ASTWriter &Writer;
+
+public:
+ typedef Selector key_type;
+ typedef key_type key_type_ref;
+
+ struct data_type {
+ SelectorID ID;
+ ObjCMethodList Instance, Factory;
+ };
+ typedef const data_type& data_type_ref;
+
+ typedef unsigned hash_value_type;
+ typedef unsigned offset_type;
+
+ explicit ASTMethodPoolTrait(ASTWriter &Writer) : Writer(Writer) { }
+
+ static hash_value_type ComputeHash(Selector Sel) {
+ return serialization::ComputeHash(Sel);
+ }
+
+ std::pair<unsigned,unsigned>
+ EmitKeyDataLength(raw_ostream& Out, Selector Sel,
+ data_type_ref Methods) {
+ using namespace llvm::support;
+ endian::Writer<little> LE(Out);
+ unsigned KeyLen = 2 + (Sel.getNumArgs()? Sel.getNumArgs() * 4 : 4);
+ LE.write<uint16_t>(KeyLen);
+ unsigned DataLen = 4 + 2 + 2; // 2 bytes for each of the method counts
+ for (const ObjCMethodList *Method = &Methods.Instance; Method;
+ Method = Method->getNext())
+ if (Method->getMethod())
+ DataLen += 4;
+ for (const ObjCMethodList *Method = &Methods.Factory; Method;
+ Method = Method->getNext())
+ if (Method->getMethod())
+ DataLen += 4;
+ LE.write<uint16_t>(DataLen);
+ return std::make_pair(KeyLen, DataLen);
+ }
+
+ void EmitKey(raw_ostream& Out, Selector Sel, unsigned) {
+ using namespace llvm::support;
+ endian::Writer<little> LE(Out);
+ uint64_t Start = Out.tell();
+ assert((Start >> 32) == 0 && "Selector key offset too large");
+ Writer.SetSelectorOffset(Sel, Start);
+ unsigned N = Sel.getNumArgs();
+ LE.write<uint16_t>(N);
+ if (N == 0)
+ N = 1;
+ for (unsigned I = 0; I != N; ++I)
+ LE.write<uint32_t>(
+ Writer.getIdentifierRef(Sel.getIdentifierInfoForSlot(I)));
+ }
+
+ void EmitData(raw_ostream& Out, key_type_ref,
+ data_type_ref Methods, unsigned DataLen) {
+ using namespace llvm::support;
+ endian::Writer<little> LE(Out);
+ uint64_t Start = Out.tell(); (void)Start;
+ LE.write<uint32_t>(Methods.ID);
+ unsigned NumInstanceMethods = 0;
+ for (const ObjCMethodList *Method = &Methods.Instance; Method;
+ Method = Method->getNext())
+ if (Method->getMethod())
+ ++NumInstanceMethods;
+
+ unsigned NumFactoryMethods = 0;
+ for (const ObjCMethodList *Method = &Methods.Factory; Method;
+ Method = Method->getNext())
+ if (Method->getMethod())
+ ++NumFactoryMethods;
+
+ unsigned InstanceBits = Methods.Instance.getBits();
+ assert(InstanceBits < 4);
+ unsigned InstanceHasMoreThanOneDeclBit =
+ Methods.Instance.hasMoreThanOneDecl();
+ unsigned FullInstanceBits = (NumInstanceMethods << 3) |
+ (InstanceHasMoreThanOneDeclBit << 2) |
+ InstanceBits;
+ unsigned FactoryBits = Methods.Factory.getBits();
+ assert(FactoryBits < 4);
+ unsigned FactoryHasMoreThanOneDeclBit =
+ Methods.Factory.hasMoreThanOneDecl();
+ unsigned FullFactoryBits = (NumFactoryMethods << 3) |
+ (FactoryHasMoreThanOneDeclBit << 2) |
+ FactoryBits;
+ LE.write<uint16_t>(FullInstanceBits);
+ LE.write<uint16_t>(FullFactoryBits);
+ for (const ObjCMethodList *Method = &Methods.Instance; Method;
+ Method = Method->getNext())
+ if (Method->getMethod())
+ LE.write<uint32_t>(Writer.getDeclID(Method->getMethod()));
+ for (const ObjCMethodList *Method = &Methods.Factory; Method;
+ Method = Method->getNext())
+ if (Method->getMethod())
+ LE.write<uint32_t>(Writer.getDeclID(Method->getMethod()));
+
+ assert(Out.tell() - Start == DataLen && "Data length is wrong");
+ }
+};
+} // end anonymous namespace
+
+/// \brief Write ObjC data: selectors and the method pool.
+///
+/// The method pool contains both instance and factory methods, stored
+/// in an on-disk hash table indexed by the selector. The hash table also
+/// contains an empty entry for every other selector known to Sema.
+void ASTWriter::WriteSelectors(Sema &SemaRef) {
+ using namespace llvm;
+
+ // Do we have to do anything at all?
+ if (SemaRef.MethodPool.empty() && SelectorIDs.empty())
+ return;
+ unsigned NumTableEntries = 0;
+ // Create and write out the blob that contains selectors and the method pool.
+ {
+ llvm::OnDiskChainedHashTableGenerator<ASTMethodPoolTrait> Generator;
+ ASTMethodPoolTrait Trait(*this);
+
+ // Create the on-disk hash table representation. We walk through every
+ // selector we've seen and look it up in the method pool.
+ SelectorOffsets.resize(NextSelectorID - FirstSelectorID);
+ for (auto &SelectorAndID : SelectorIDs) {
+ Selector S = SelectorAndID.first;
+ SelectorID ID = SelectorAndID.second;
+ Sema::GlobalMethodPool::iterator F = SemaRef.MethodPool.find(S);
+ ASTMethodPoolTrait::data_type Data = {
+ ID,
+ ObjCMethodList(),
+ ObjCMethodList()
+ };
+ if (F != SemaRef.MethodPool.end()) {
+ Data.Instance = F->second.first;
+ Data.Factory = F->second.second;
+ }
+ // Only write this selector if it's not in an existing AST or something
+ // changed.
+ if (Chain && ID < FirstSelectorID) {
+ // Selector already exists. Did it change?
+ bool changed = false;
+ for (ObjCMethodList *M = &Data.Instance;
+ !changed && M && M->getMethod(); M = M->getNext()) {
+ if (!M->getMethod()->isFromASTFile())
+ changed = true;
+ }
+ for (ObjCMethodList *M = &Data.Factory; !changed && M && M->getMethod();
+ M = M->getNext()) {
+ if (!M->getMethod()->isFromASTFile())
+ changed = true;
+ }
+ if (!changed)
+ continue;
+ } else if (Data.Instance.getMethod() || Data.Factory.getMethod()) {
+ // A new method pool entry.
+ ++NumTableEntries;
+ }
+ Generator.insert(S, Data, Trait);
+ }
+
+ // Create the on-disk hash table in a buffer.
+ SmallString<4096> MethodPool;
+ uint32_t BucketOffset;
+ {
+ using namespace llvm::support;
+ ASTMethodPoolTrait Trait(*this);
+ llvm::raw_svector_ostream Out(MethodPool);
+ // Make sure that no bucket is at offset 0
+ endian::Writer<little>(Out).write<uint32_t>(0);
+ BucketOffset = Generator.Emit(Out, Trait);
+ }
+
+ // Create a blob abbreviation
+ auto *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(METHOD_POOL));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned MethodPoolAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ // Write the method pool
+ {
+ RecordData::value_type Record[] = {METHOD_POOL, BucketOffset,
+ NumTableEntries};
+ Stream.EmitRecordWithBlob(MethodPoolAbbrev, Record, MethodPool);
+ }
+
+ // Create a blob abbreviation for the selector table offsets.
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SELECTOR_OFFSETS));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // size
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // first ID
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned SelectorOffsetAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ // Write the selector offsets table.
+ {
+ RecordData::value_type Record[] = {
+ SELECTOR_OFFSETS, SelectorOffsets.size(),
+ FirstSelectorID - NUM_PREDEF_SELECTOR_IDS};
+ Stream.EmitRecordWithBlob(SelectorOffsetAbbrev, Record,
+ bytes(SelectorOffsets));
+ }
+ }
+}
+
+/// \brief Write the selectors referenced in @selector expression into AST file.
+void ASTWriter::WriteReferencedSelectorsPool(Sema &SemaRef) {
+ using namespace llvm;
+ if (SemaRef.ReferencedSelectors.empty())
+ return;
+
+ RecordData Record;
+
+ // Note: this writes out all references even for a dependent AST. But it is
+ // very tricky to fix, and given that @selector shouldn't really appear in
+ // headers, probably not worth it. It's not a correctness issue.
+ for (auto &SelectorAndLocation : SemaRef.ReferencedSelectors) {
+ Selector Sel = SelectorAndLocation.first;
+ SourceLocation Loc = SelectorAndLocation.second;
+ AddSelectorRef(Sel, Record);
+ AddSourceLocation(Loc, Record);
+ }
+ Stream.EmitRecord(REFERENCED_SELECTOR_POOL, Record);
+}
+
+//===----------------------------------------------------------------------===//
+// Identifier Table Serialization
+//===----------------------------------------------------------------------===//
+
+/// Determine the declaration that should be put into the name lookup table to
+/// represent the given declaration in this module. This is usually D itself,
+/// but if D was imported and merged into a local declaration, we want the most
+/// recent local declaration instead. The chosen declaration will be the most
+/// recent declaration in any module that imports this one.
+static NamedDecl *getDeclForLocalLookup(const LangOptions &LangOpts,
+ NamedDecl *D) {
+ if (!LangOpts.Modules || !D->isFromASTFile())
+ return D;
+
+ if (Decl *Redecl = D->getPreviousDecl()) {
+ // For Redeclarable decls, a prior declaration might be local.
+ for (; Redecl; Redecl = Redecl->getPreviousDecl()) {
+ if (!Redecl->isFromASTFile())
+ return cast<NamedDecl>(Redecl);
+ // If we find a decl from a (chained-)PCH stop since we won't find a
+ // local one.
+ if (D->getOwningModuleID() == 0)
+ break;
+ }
+ } else if (Decl *First = D->getCanonicalDecl()) {
+ // For Mergeable decls, the first decl might be local.
+ if (!First->isFromASTFile())
+ return cast<NamedDecl>(First);
+ }
+
+ // All declarations are imported. Our most recent declaration will also be
+ // the most recent one in anyone who imports us.
+ return D;
+}
+
+namespace {
+class ASTIdentifierTableTrait {
+ ASTWriter &Writer;
+ Preprocessor &PP;
+ IdentifierResolver &IdResolver;
+ bool IsModule;
+ bool NeedDecls;
+ ASTWriter::RecordData *InterestingIdentifierOffsets;
+
+ /// \brief Determines whether this is an "interesting" identifier that needs a
+ /// full IdentifierInfo structure written into the hash table. Notably, this
+ /// doesn't check whether the name has macros defined; use PublicMacroIterator
+ /// to check that.
+ bool isInterestingIdentifier(const IdentifierInfo *II, uint64_t MacroOffset) {
+ if (MacroOffset ||
+ II->isPoisoned() ||
+ (IsModule ? II->hasRevertedBuiltin() : II->getObjCOrBuiltinID()) ||
+ II->hasRevertedTokenIDToIdentifier() ||
+ (NeedDecls && II->getFETokenInfo<void>()))
+ return true;
+
+ return false;
+ }
+
+public:
+ typedef IdentifierInfo* key_type;
+ typedef key_type key_type_ref;
+
+ typedef IdentID data_type;
+ typedef data_type data_type_ref;
+
+ typedef unsigned hash_value_type;
+ typedef unsigned offset_type;
+
+ ASTIdentifierTableTrait(ASTWriter &Writer, Preprocessor &PP,
+ IdentifierResolver &IdResolver, bool IsModule,
+ ASTWriter::RecordData *InterestingIdentifierOffsets)
+ : Writer(Writer), PP(PP), IdResolver(IdResolver), IsModule(IsModule),
+ NeedDecls(!IsModule || !Writer.getLangOpts().CPlusPlus),
+ InterestingIdentifierOffsets(InterestingIdentifierOffsets) {}
+
+ static hash_value_type ComputeHash(const IdentifierInfo* II) {
+ return llvm::HashString(II->getName());
+ }
+
+ bool isInterestingIdentifier(const IdentifierInfo *II) {
+ auto MacroOffset = Writer.getMacroDirectivesOffset(II);
+ return isInterestingIdentifier(II, MacroOffset);
+ }
+ bool isInterestingNonMacroIdentifier(const IdentifierInfo *II) {
+ return isInterestingIdentifier(II, 0);
+ }
+
+ std::pair<unsigned,unsigned>
+ EmitKeyDataLength(raw_ostream& Out, IdentifierInfo* II, IdentID ID) {
+ unsigned KeyLen = II->getLength() + 1;
+ unsigned DataLen = 4; // 4 bytes for the persistent ID << 1
+ auto MacroOffset = Writer.getMacroDirectivesOffset(II);
+ if (isInterestingIdentifier(II, MacroOffset)) {
+ DataLen += 2; // 2 bytes for builtin ID
+ DataLen += 2; // 2 bytes for flags
+ if (MacroOffset)
+ DataLen += 4; // MacroDirectives offset.
+
+ if (NeedDecls) {
+ for (IdentifierResolver::iterator D = IdResolver.begin(II),
+ DEnd = IdResolver.end();
+ D != DEnd; ++D)
+ DataLen += 4;
+ }
+ }
+ using namespace llvm::support;
+ endian::Writer<little> LE(Out);
+
+ assert((uint16_t)DataLen == DataLen && (uint16_t)KeyLen == KeyLen);
+ LE.write<uint16_t>(DataLen);
+ // We emit the key length after the data length so that every
+ // string is preceded by a 16-bit length. This matches the PTH
+ // format for storing identifiers.
+ LE.write<uint16_t>(KeyLen);
+ return std::make_pair(KeyLen, DataLen);
+ }
+
+ void EmitKey(raw_ostream& Out, const IdentifierInfo* II,
+ unsigned KeyLen) {
+ // Record the location of the key data. This is used when generating
+ // the mapping from persistent IDs to strings.
+ Writer.SetIdentifierOffset(II, Out.tell());
+
+ // Emit the offset of the key/data length information to the interesting
+ // identifiers table if necessary.
+ if (InterestingIdentifierOffsets && isInterestingIdentifier(II))
+ InterestingIdentifierOffsets->push_back(Out.tell() - 4);
+
+ Out.write(II->getNameStart(), KeyLen);
+ }
+
+ void EmitData(raw_ostream& Out, IdentifierInfo* II,
+ IdentID ID, unsigned) {
+ using namespace llvm::support;
+ endian::Writer<little> LE(Out);
+
+ auto MacroOffset = Writer.getMacroDirectivesOffset(II);
+ if (!isInterestingIdentifier(II, MacroOffset)) {
+ LE.write<uint32_t>(ID << 1);
+ return;
+ }
+
+ LE.write<uint32_t>((ID << 1) | 0x01);
+ uint32_t Bits = (uint32_t)II->getObjCOrBuiltinID();
+ assert((Bits & 0xffff) == Bits && "ObjCOrBuiltinID too big for ASTReader.");
+ LE.write<uint16_t>(Bits);
+ Bits = 0;
+ bool HadMacroDefinition = MacroOffset != 0;
+ Bits = (Bits << 1) | unsigned(HadMacroDefinition);
+ Bits = (Bits << 1) | unsigned(II->isExtensionToken());
+ Bits = (Bits << 1) | unsigned(II->isPoisoned());
+ Bits = (Bits << 1) | unsigned(II->hasRevertedBuiltin());
+ Bits = (Bits << 1) | unsigned(II->hasRevertedTokenIDToIdentifier());
+ Bits = (Bits << 1) | unsigned(II->isCPlusPlusOperatorKeyword());
+ LE.write<uint16_t>(Bits);
+
+ if (HadMacroDefinition)
+ LE.write<uint32_t>(MacroOffset);
+
+ if (NeedDecls) {
+ // Emit the declaration IDs in reverse order, because the
+ // IdentifierResolver provides the declarations as they would be
+ // visible (e.g., the function "stat" would come before the struct
+ // "stat"), but the ASTReader adds declarations to the end of the list
+ // (so we need to see the struct "stat" before the function "stat").
+ // Only emit declarations that aren't from a chained PCH, though.
+ SmallVector<NamedDecl *, 16> Decls(IdResolver.begin(II),
+ IdResolver.end());
+ for (SmallVectorImpl<NamedDecl *>::reverse_iterator D = Decls.rbegin(),
+ DEnd = Decls.rend();
+ D != DEnd; ++D)
+ LE.write<uint32_t>(
+ Writer.getDeclID(getDeclForLocalLookup(PP.getLangOpts(), *D)));
+ }
+ }
+};
+} // end anonymous namespace
+
+/// \brief Write the identifier table into the AST file.
+///
+/// The identifier table consists of a blob containing string data
+/// (the actual identifiers themselves) and a separate "offsets" index
+/// that maps identifier IDs to locations within the blob.
+void ASTWriter::WriteIdentifierTable(Preprocessor &PP,
+ IdentifierResolver &IdResolver,
+ bool IsModule) {
+ using namespace llvm;
+
+ RecordData InterestingIdents;
+
+ // Create and write out the blob that contains the identifier
+ // strings.
+ {
+ llvm::OnDiskChainedHashTableGenerator<ASTIdentifierTableTrait> Generator;
+ ASTIdentifierTableTrait Trait(
+ *this, PP, IdResolver, IsModule,
+ (getLangOpts().CPlusPlus && IsModule) ? &InterestingIdents : nullptr);
+
+ // Look for any identifiers that were named while processing the
+ // headers, but are otherwise not needed. We add these to the hash
+ // table to enable checking of the predefines buffer in the case
+ // where the user adds new macro definitions when building the AST
+ // file.
+ SmallVector<const IdentifierInfo *, 128> IIs;
+ for (const auto &ID : PP.getIdentifierTable())
+ IIs.push_back(ID.second);
+ // Sort the identifiers lexicographically before getting them references so
+ // that their order is stable.
+ std::sort(IIs.begin(), IIs.end(), llvm::less_ptr<IdentifierInfo>());
+ for (const IdentifierInfo *II : IIs)
+ if (Trait.isInterestingNonMacroIdentifier(II))
+ getIdentifierRef(II);
+
+ // Create the on-disk hash table representation. We only store offsets
+ // for identifiers that appear here for the first time.
+ IdentifierOffsets.resize(NextIdentID - FirstIdentID);
+ for (auto IdentIDPair : IdentifierIDs) {
+ auto *II = const_cast<IdentifierInfo *>(IdentIDPair.first);
+ IdentID ID = IdentIDPair.second;
+ assert(II && "NULL identifier in identifier table");
+ if (!Chain || !II->isFromAST() || II->hasChangedSinceDeserialization())
+ Generator.insert(II, ID, Trait);
+ }
+
+ // Create the on-disk hash table in a buffer.
+ SmallString<4096> IdentifierTable;
+ uint32_t BucketOffset;
+ {
+ using namespace llvm::support;
+ llvm::raw_svector_ostream Out(IdentifierTable);
+ // Make sure that no bucket is at offset 0
+ endian::Writer<little>(Out).write<uint32_t>(0);
+ BucketOffset = Generator.Emit(Out, Trait);
+ }
+
+ // Create a blob abbreviation
+ auto *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(IDENTIFIER_TABLE));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned IDTableAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ // Write the identifier table
+ RecordData::value_type Record[] = {IDENTIFIER_TABLE, BucketOffset};
+ Stream.EmitRecordWithBlob(IDTableAbbrev, Record, IdentifierTable);
+ }
+
+ // Write the offsets table for identifier IDs.
+ auto *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(IDENTIFIER_OFFSET));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // # of identifiers
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // first ID
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned IdentifierOffsetAbbrev = Stream.EmitAbbrev(Abbrev);
+
+#ifndef NDEBUG
+ for (unsigned I = 0, N = IdentifierOffsets.size(); I != N; ++I)
+ assert(IdentifierOffsets[I] && "Missing identifier offset?");
+#endif
+
+ RecordData::value_type Record[] = {IDENTIFIER_OFFSET,
+ IdentifierOffsets.size(),
+ FirstIdentID - NUM_PREDEF_IDENT_IDS};
+ Stream.EmitRecordWithBlob(IdentifierOffsetAbbrev, Record,
+ bytes(IdentifierOffsets));
+
+ // In C++, write the list of interesting identifiers (those that are
+ // defined as macros, poisoned, or similar unusual things).
+ if (!InterestingIdents.empty())
+ Stream.EmitRecord(INTERESTING_IDENTIFIERS, InterestingIdents);
+}
+
+//===----------------------------------------------------------------------===//
+// DeclContext's Name Lookup Table Serialization
+//===----------------------------------------------------------------------===//
+
+namespace {
+// Trait used for the on-disk hash table used in the method pool.
+class ASTDeclContextNameLookupTrait {
+ ASTWriter &Writer;
+ llvm::SmallVector<DeclID, 64> DeclIDs;
+
+public:
+ typedef DeclarationNameKey key_type;
+ typedef key_type key_type_ref;
+
+ /// A start and end index into DeclIDs, representing a sequence of decls.
+ typedef std::pair<unsigned, unsigned> data_type;
+ typedef const data_type& data_type_ref;
+
+ typedef unsigned hash_value_type;
+ typedef unsigned offset_type;
+
+ explicit ASTDeclContextNameLookupTrait(ASTWriter &Writer) : Writer(Writer) { }
+
+ template<typename Coll>
+ data_type getData(const Coll &Decls) {
+ unsigned Start = DeclIDs.size();
+ for (NamedDecl *D : Decls) {
+ DeclIDs.push_back(
+ Writer.GetDeclRef(getDeclForLocalLookup(Writer.getLangOpts(), D)));
+ }
+ return std::make_pair(Start, DeclIDs.size());
+ }
+
+ data_type ImportData(const reader::ASTDeclContextNameLookupTrait::data_type &FromReader) {
+ unsigned Start = DeclIDs.size();
+ for (auto ID : FromReader)
+ DeclIDs.push_back(ID);
+ return std::make_pair(Start, DeclIDs.size());
+ }
+
+ static bool EqualKey(key_type_ref a, key_type_ref b) {
+ return a == b;
+ }
+
+ hash_value_type ComputeHash(DeclarationNameKey Name) {
+ return Name.getHash();
+ }
+
+ void EmitFileRef(raw_ostream &Out, ModuleFile *F) const {
+ assert(Writer.hasChain() &&
+ "have reference to loaded module file but no chain?");
+
+ using namespace llvm::support;
+ endian::Writer<little>(Out)
+ .write<uint32_t>(Writer.getChain()->getModuleFileID(F));
+ }
+
+ std::pair<unsigned, unsigned> EmitKeyDataLength(raw_ostream &Out,
+ DeclarationNameKey Name,
+ data_type_ref Lookup) {
+ using namespace llvm::support;
+ endian::Writer<little> LE(Out);
+ unsigned KeyLen = 1;
+ switch (Name.getKind()) {
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXLiteralOperatorName:
+ KeyLen += 4;
+ break;
+ case DeclarationName::CXXOperatorName:
+ KeyLen += 1;
+ break;
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ case DeclarationName::CXXUsingDirective:
+ break;
+ }
+ LE.write<uint16_t>(KeyLen);
+
+ // 4 bytes for each DeclID.
+ unsigned DataLen = 4 * (Lookup.second - Lookup.first);
+ assert(uint16_t(DataLen) == DataLen &&
+ "too many decls for serialized lookup result");
+ LE.write<uint16_t>(DataLen);
+
+ return std::make_pair(KeyLen, DataLen);
+ }
+
+ void EmitKey(raw_ostream &Out, DeclarationNameKey Name, unsigned) {
+ using namespace llvm::support;
+ endian::Writer<little> LE(Out);
+ LE.write<uint8_t>(Name.getKind());
+ switch (Name.getKind()) {
+ case DeclarationName::Identifier:
+ case DeclarationName::CXXLiteralOperatorName:
+ LE.write<uint32_t>(Writer.getIdentifierRef(Name.getIdentifier()));
+ return;
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ LE.write<uint32_t>(Writer.getSelectorRef(Name.getSelector()));
+ return;
+ case DeclarationName::CXXOperatorName:
+ assert(Name.getOperatorKind() < NUM_OVERLOADED_OPERATORS &&
+ "Invalid operator?");
+ LE.write<uint8_t>(Name.getOperatorKind());
+ return;
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ case DeclarationName::CXXUsingDirective:
+ return;
+ }
+
+ llvm_unreachable("Invalid name kind?");
+ }
+
+ void EmitData(raw_ostream &Out, key_type_ref, data_type Lookup,
+ unsigned DataLen) {
+ using namespace llvm::support;
+ endian::Writer<little> LE(Out);
+ uint64_t Start = Out.tell(); (void)Start;
+ for (unsigned I = Lookup.first, N = Lookup.second; I != N; ++I)
+ LE.write<uint32_t>(DeclIDs[I]);
+ assert(Out.tell() - Start == DataLen && "Data length is wrong");
+ }
+};
+} // end anonymous namespace
+
+bool ASTWriter::isLookupResultExternal(StoredDeclsList &Result,
+ DeclContext *DC) {
+ return Result.hasExternalDecls() && DC->NeedToReconcileExternalVisibleStorage;
+}
+
+bool ASTWriter::isLookupResultEntirelyExternal(StoredDeclsList &Result,
+ DeclContext *DC) {
+ for (auto *D : Result.getLookupResult())
+ if (!getDeclForLocalLookup(getLangOpts(), D)->isFromASTFile())
+ return false;
+
+ return true;
+}
+
+void
+ASTWriter::GenerateNameLookupTable(const DeclContext *ConstDC,
+ llvm::SmallVectorImpl<char> &LookupTable) {
+ assert(!ConstDC->HasLazyLocalLexicalLookups &&
+ !ConstDC->HasLazyExternalLexicalLookups &&
+ "must call buildLookups first");
+
+ // FIXME: We need to build the lookups table, which is logically const.
+ auto *DC = const_cast<DeclContext*>(ConstDC);
+ assert(DC == DC->getPrimaryContext() && "only primary DC has lookup table");
+
+ // Create the on-disk hash table representation.
+ MultiOnDiskHashTableGenerator<reader::ASTDeclContextNameLookupTrait,
+ ASTDeclContextNameLookupTrait> Generator;
+ ASTDeclContextNameLookupTrait Trait(*this);
+
+ // The first step is to collect the declaration names which we need to
+ // serialize into the name lookup table, and to collect them in a stable
+ // order.
+ SmallVector<DeclarationName, 16> Names;
+
+ // We also build up small sets of the constructor and conversion function
+ // names which are visible.
+ llvm::SmallSet<DeclarationName, 8> ConstructorNameSet, ConversionNameSet;
+
+ for (auto &Lookup : *DC->buildLookup()) {
+ auto &Name = Lookup.first;
+ auto &Result = Lookup.second;
+
+ // If there are no local declarations in our lookup result, we
+ // don't need to write an entry for the name at all. If we can't
+ // write out a lookup set without performing more deserialization,
+ // just skip this entry.
+ if (isLookupResultExternal(Result, DC) &&
+ isLookupResultEntirelyExternal(Result, DC))
+ continue;
+
+ // We also skip empty results. If any of the results could be external and
+ // the currently available results are empty, then all of the results are
+ // external and we skip it above. So the only way we get here with an empty
+ // results is when no results could have been external *and* we have
+ // external results.
+ //
+ // FIXME: While we might want to start emitting on-disk entries for negative
+ // lookups into a decl context as an optimization, today we *have* to skip
+ // them because there are names with empty lookup results in decl contexts
+ // which we can't emit in any stable ordering: we lookup constructors and
+ // conversion functions in the enclosing namespace scope creating empty
+ // results for them. This in almost certainly a bug in Clang's name lookup,
+ // but that is likely to be hard or impossible to fix and so we tolerate it
+ // here by omitting lookups with empty results.
+ if (Lookup.second.getLookupResult().empty())
+ continue;
+
+ switch (Lookup.first.getNameKind()) {
+ default:
+ Names.push_back(Lookup.first);
+ break;
+
+ case DeclarationName::CXXConstructorName:
+ assert(isa<CXXRecordDecl>(DC) &&
+ "Cannot have a constructor name outside of a class!");
+ ConstructorNameSet.insert(Name);
+ break;
+
+ case DeclarationName::CXXConversionFunctionName:
+ assert(isa<CXXRecordDecl>(DC) &&
+ "Cannot have a conversion function name outside of a class!");
+ ConversionNameSet.insert(Name);
+ break;
+ }
+ }
+
+ // Sort the names into a stable order.
+ std::sort(Names.begin(), Names.end());
+
+ if (auto *D = dyn_cast<CXXRecordDecl>(DC)) {
+ // We need to establish an ordering of constructor and conversion function
+ // names, and they don't have an intrinsic ordering.
+
+ // First we try the easy case by forming the current context's constructor
+ // name and adding that name first. This is a very useful optimization to
+ // avoid walking the lexical declarations in many cases, and it also
+ // handles the only case where a constructor name can come from some other
+ // lexical context -- when that name is an implicit constructor merged from
+ // another declaration in the redecl chain. Any non-implicit constructor or
+ // conversion function which doesn't occur in all the lexical contexts
+ // would be an ODR violation.
+ auto ImplicitCtorName = Context->DeclarationNames.getCXXConstructorName(
+ Context->getCanonicalType(Context->getRecordType(D)));
+ if (ConstructorNameSet.erase(ImplicitCtorName))
+ Names.push_back(ImplicitCtorName);
+
+ // If we still have constructors or conversion functions, we walk all the
+ // names in the decl and add the constructors and conversion functions
+ // which are visible in the order they lexically occur within the context.
+ if (!ConstructorNameSet.empty() || !ConversionNameSet.empty())
+ for (Decl *ChildD : cast<CXXRecordDecl>(DC)->decls())
+ if (auto *ChildND = dyn_cast<NamedDecl>(ChildD)) {
+ auto Name = ChildND->getDeclName();
+ switch (Name.getNameKind()) {
+ default:
+ continue;
+
+ case DeclarationName::CXXConstructorName:
+ if (ConstructorNameSet.erase(Name))
+ Names.push_back(Name);
+ break;
+
+ case DeclarationName::CXXConversionFunctionName:
+ if (ConversionNameSet.erase(Name))
+ Names.push_back(Name);
+ break;
+ }
+
+ if (ConstructorNameSet.empty() && ConversionNameSet.empty())
+ break;
+ }
+
+ assert(ConstructorNameSet.empty() && "Failed to find all of the visible "
+ "constructors by walking all the "
+ "lexical members of the context.");
+ assert(ConversionNameSet.empty() && "Failed to find all of the visible "
+ "conversion functions by walking all "
+ "the lexical members of the context.");
+ }
+
+ // Next we need to do a lookup with each name into this decl context to fully
+ // populate any results from external sources. We don't actually use the
+ // results of these lookups because we only want to use the results after all
+ // results have been loaded and the pointers into them will be stable.
+ for (auto &Name : Names)
+ DC->lookup(Name);
+
+ // Now we need to insert the results for each name into the hash table. For
+ // constructor names and conversion function names, we actually need to merge
+ // all of the results for them into one list of results each and insert
+ // those.
+ SmallVector<NamedDecl *, 8> ConstructorDecls;
+ SmallVector<NamedDecl *, 8> ConversionDecls;
+
+ // Now loop over the names, either inserting them or appending for the two
+ // special cases.
+ for (auto &Name : Names) {
+ DeclContext::lookup_result Result = DC->noload_lookup(Name);
+
+ switch (Name.getNameKind()) {
+ default:
+ Generator.insert(Name, Trait.getData(Result), Trait);
+ break;
+
+ case DeclarationName::CXXConstructorName:
+ ConstructorDecls.append(Result.begin(), Result.end());
+ break;
+
+ case DeclarationName::CXXConversionFunctionName:
+ ConversionDecls.append(Result.begin(), Result.end());
+ break;
+ }
+ }
+
+ // Handle our two special cases if we ended up having any. We arbitrarily use
+ // the first declaration's name here because the name itself isn't part of
+ // the key, only the kind of name is used.
+ if (!ConstructorDecls.empty())
+ Generator.insert(ConstructorDecls.front()->getDeclName(),
+ Trait.getData(ConstructorDecls), Trait);
+ if (!ConversionDecls.empty())
+ Generator.insert(ConversionDecls.front()->getDeclName(),
+ Trait.getData(ConversionDecls), Trait);
+
+ // Create the on-disk hash table. Also emit the existing imported and
+ // merged table if there is one.
+ auto *Lookups = Chain ? Chain->getLoadedLookupTables(DC) : nullptr;
+ Generator.emit(LookupTable, Trait, Lookups ? &Lookups->Table : nullptr);
+}
+
+/// \brief Write the block containing all of the declaration IDs
+/// visible from the given DeclContext.
+///
+/// \returns the offset of the DECL_CONTEXT_VISIBLE block within the
+/// bitstream, or 0 if no block was written.
+uint64_t ASTWriter::WriteDeclContextVisibleBlock(ASTContext &Context,
+ DeclContext *DC) {
+ // If we imported a key declaration of this namespace, write the visible
+ // lookup results as an update record for it rather than including them
+ // on this declaration. We will only look at key declarations on reload.
+ if (isa<NamespaceDecl>(DC) && Chain &&
+ Chain->getKeyDeclaration(cast<Decl>(DC))->isFromASTFile()) {
+ // Only do this once, for the first local declaration of the namespace.
+ for (auto *Prev = cast<NamespaceDecl>(DC)->getPreviousDecl(); Prev;
+ Prev = Prev->getPreviousDecl())
+ if (!Prev->isFromASTFile())
+ return 0;
+
+ // Note that we need to emit an update record for the primary context.
+ UpdatedDeclContexts.insert(DC->getPrimaryContext());
+
+ // Make sure all visible decls are written. They will be recorded later. We
+ // do this using a side data structure so we can sort the names into
+ // a deterministic order.
+ StoredDeclsMap *Map = DC->getPrimaryContext()->buildLookup();
+ SmallVector<std::pair<DeclarationName, DeclContext::lookup_result>, 16>
+ LookupResults;
+ if (Map) {
+ LookupResults.reserve(Map->size());
+ for (auto &Entry : *Map)
+ LookupResults.push_back(
+ std::make_pair(Entry.first, Entry.second.getLookupResult()));
+ }
+
+ std::sort(LookupResults.begin(), LookupResults.end(), llvm::less_first());
+ for (auto &NameAndResult : LookupResults) {
+ DeclarationName Name = NameAndResult.first;
+ DeclContext::lookup_result Result = NameAndResult.second;
+ if (Name.getNameKind() == DeclarationName::CXXConstructorName ||
+ Name.getNameKind() == DeclarationName::CXXConversionFunctionName) {
+ // We have to work around a name lookup bug here where negative lookup
+ // results for these names get cached in namespace lookup tables (these
+ // names should never be looked up in a namespace).
+ assert(Result.empty() && "Cannot have a constructor or conversion "
+ "function name in a namespace!");
+ continue;
+ }
+
+ for (NamedDecl *ND : Result)
+ if (!ND->isFromASTFile())
+ GetDeclRef(ND);
+ }
+
+ return 0;
+ }
+
+ if (DC->getPrimaryContext() != DC)
+ return 0;
+
+ // Skip contexts which don't support name lookup.
+ if (!DC->isLookupContext())
+ return 0;
+
+ // If not in C++, we perform name lookup for the translation unit via the
+ // IdentifierInfo chains, don't bother to build a visible-declarations table.
+ if (DC->isTranslationUnit() && !Context.getLangOpts().CPlusPlus)
+ return 0;
+
+ // Serialize the contents of the mapping used for lookup. Note that,
+ // although we have two very different code paths, the serialized
+ // representation is the same for both cases: a declaration name,
+ // followed by a size, followed by references to the visible
+ // declarations that have that name.
+ uint64_t Offset = Stream.GetCurrentBitNo();
+ StoredDeclsMap *Map = DC->buildLookup();
+ if (!Map || Map->empty())
+ return 0;
+
+ // Create the on-disk hash table in a buffer.
+ SmallString<4096> LookupTable;
+ GenerateNameLookupTable(DC, LookupTable);
+
+ // Write the lookup table
+ RecordData::value_type Record[] = {DECL_CONTEXT_VISIBLE};
+ Stream.EmitRecordWithBlob(DeclContextVisibleLookupAbbrev, Record,
+ LookupTable);
+ ++NumVisibleDeclContexts;
+ return Offset;
+}
+
+/// \brief Write an UPDATE_VISIBLE block for the given context.
+///
+/// UPDATE_VISIBLE blocks contain the declarations that are added to an existing
+/// DeclContext in a dependent AST file. As such, they only exist for the TU
+/// (in C++), for namespaces, and for classes with forward-declared unscoped
+/// enumeration members (in C++11).
+void ASTWriter::WriteDeclContextVisibleUpdate(const DeclContext *DC) {
+ StoredDeclsMap *Map = DC->getLookupPtr();
+ if (!Map || Map->empty())
+ return;
+
+ // Create the on-disk hash table in a buffer.
+ SmallString<4096> LookupTable;
+ GenerateNameLookupTable(DC, LookupTable);
+
+ // If we're updating a namespace, select a key declaration as the key for the
+ // update record; those are the only ones that will be checked on reload.
+ if (isa<NamespaceDecl>(DC))
+ DC = cast<DeclContext>(Chain->getKeyDeclaration(cast<Decl>(DC)));
+
+ // Write the lookup table
+ RecordData::value_type Record[] = {UPDATE_VISIBLE, getDeclID(cast<Decl>(DC))};
+ Stream.EmitRecordWithBlob(UpdateVisibleAbbrev, Record, LookupTable);
+}
+
+/// \brief Write an FP_PRAGMA_OPTIONS block for the given FPOptions.
+void ASTWriter::WriteFPPragmaOptions(const FPOptions &Opts) {
+ RecordData::value_type Record[] = {Opts.fp_contract};
+ Stream.EmitRecord(FP_PRAGMA_OPTIONS, Record);
+}
+
+/// \brief Write an OPENCL_EXTENSIONS block for the given OpenCLOptions.
+void ASTWriter::WriteOpenCLExtensions(Sema &SemaRef) {
+ if (!SemaRef.Context.getLangOpts().OpenCL)
+ return;
+
+ const OpenCLOptions &Opts = SemaRef.getOpenCLOptions();
+ RecordData Record;
+#define OPENCLEXT(nm) Record.push_back(Opts.nm);
+#include "clang/Basic/OpenCLExtensions.def"
+ Stream.EmitRecord(OPENCL_EXTENSIONS, Record);
+}
+
+void ASTWriter::WriteObjCCategories() {
+ SmallVector<ObjCCategoriesInfo, 2> CategoriesMap;
+ RecordData Categories;
+
+ for (unsigned I = 0, N = ObjCClassesWithCategories.size(); I != N; ++I) {
+ unsigned Size = 0;
+ unsigned StartIndex = Categories.size();
+
+ ObjCInterfaceDecl *Class = ObjCClassesWithCategories[I];
+
+ // Allocate space for the size.
+ Categories.push_back(0);
+
+ // Add the categories.
+ for (ObjCInterfaceDecl::known_categories_iterator
+ Cat = Class->known_categories_begin(),
+ CatEnd = Class->known_categories_end();
+ Cat != CatEnd; ++Cat, ++Size) {
+ assert(getDeclID(*Cat) != 0 && "Bogus category");
+ AddDeclRef(*Cat, Categories);
+ }
+
+ // Update the size.
+ Categories[StartIndex] = Size;
+
+ // Record this interface -> category map.
+ ObjCCategoriesInfo CatInfo = { getDeclID(Class), StartIndex };
+ CategoriesMap.push_back(CatInfo);
+ }
+
+ // Sort the categories map by the definition ID, since the reader will be
+ // performing binary searches on this information.
+ llvm::array_pod_sort(CategoriesMap.begin(), CategoriesMap.end());
+
+ // Emit the categories map.
+ using namespace llvm;
+
+ auto *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(OBJC_CATEGORIES_MAP));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // # of entries
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned AbbrevID = Stream.EmitAbbrev(Abbrev);
+
+ RecordData::value_type Record[] = {OBJC_CATEGORIES_MAP, CategoriesMap.size()};
+ Stream.EmitRecordWithBlob(AbbrevID, Record,
+ reinterpret_cast<char *>(CategoriesMap.data()),
+ CategoriesMap.size() * sizeof(ObjCCategoriesInfo));
+
+ // Emit the category lists.
+ Stream.EmitRecord(OBJC_CATEGORIES, Categories);
+}
+
+void ASTWriter::WriteLateParsedTemplates(Sema &SemaRef) {
+ Sema::LateParsedTemplateMapT &LPTMap = SemaRef.LateParsedTemplateMap;
+
+ if (LPTMap.empty())
+ return;
+
+ RecordData Record;
+ for (auto LPTMapEntry : LPTMap) {
+ const FunctionDecl *FD = LPTMapEntry.first;
+ LateParsedTemplate *LPT = LPTMapEntry.second;
+ AddDeclRef(FD, Record);
+ AddDeclRef(LPT->D, Record);
+ Record.push_back(LPT->Toks.size());
+
+ for (const auto &Tok : LPT->Toks) {
+ AddToken(Tok, Record);
+ }
+ }
+ Stream.EmitRecord(LATE_PARSED_TEMPLATE, Record);
+}
+
+/// \brief Write the state of 'pragma clang optimize' at the end of the module.
+void ASTWriter::WriteOptimizePragmaOptions(Sema &SemaRef) {
+ RecordData Record;
+ SourceLocation PragmaLoc = SemaRef.getOptimizeOffPragmaLocation();
+ AddSourceLocation(PragmaLoc, Record);
+ Stream.EmitRecord(OPTIMIZE_PRAGMA_OPTIONS, Record);
+}
+
+void ASTWriter::WriteModuleFileExtension(Sema &SemaRef,
+ ModuleFileExtensionWriter &Writer) {
+ // Enter the extension block.
+ Stream.EnterSubblock(EXTENSION_BLOCK_ID, 4);
+
+ // Emit the metadata record abbreviation.
+ auto *Abv = new llvm::BitCodeAbbrev();
+ Abv->Add(llvm::BitCodeAbbrevOp(EXTENSION_METADATA));
+ Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::VBR, 6));
+ Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::VBR, 6));
+ Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::VBR, 6));
+ Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::VBR, 6));
+ Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Blob));
+ unsigned Abbrev = Stream.EmitAbbrev(Abv);
+
+ // Emit the metadata record.
+ RecordData Record;
+ auto Metadata = Writer.getExtension()->getExtensionMetadata();
+ Record.push_back(EXTENSION_METADATA);
+ Record.push_back(Metadata.MajorVersion);
+ Record.push_back(Metadata.MinorVersion);
+ Record.push_back(Metadata.BlockName.size());
+ Record.push_back(Metadata.UserInfo.size());
+ SmallString<64> Buffer;
+ Buffer += Metadata.BlockName;
+ Buffer += Metadata.UserInfo;
+ Stream.EmitRecordWithBlob(Abbrev, Record, Buffer);
+
+ // Emit the contents of the extension block.
+ Writer.writeExtensionContents(SemaRef, Stream);
+
+ // Exit the extension block.
+ Stream.ExitBlock();
+}
+
+//===----------------------------------------------------------------------===//
+// General Serialization Routines
+//===----------------------------------------------------------------------===//
+
+/// \brief Write a record containing the given attributes.
+void ASTWriter::WriteAttributes(ArrayRef<const Attr*> Attrs,
+ RecordDataImpl &Record) {
+ Record.push_back(Attrs.size());
+ for (const auto *A : Attrs) {
+ Record.push_back(A->getKind()); // FIXME: stable encoding, target attrs
+ AddSourceRange(A->getRange(), Record);
+
+#include "clang/Serialization/AttrPCHWrite.inc"
+
+ }
+}
+
+void ASTWriter::AddToken(const Token &Tok, RecordDataImpl &Record) {
+ AddSourceLocation(Tok.getLocation(), Record);
+ Record.push_back(Tok.getLength());
+
+ // FIXME: When reading literal tokens, reconstruct the literal pointer
+ // if it is needed.
+ AddIdentifierRef(Tok.getIdentifierInfo(), Record);
+ // FIXME: Should translate token kind to a stable encoding.
+ Record.push_back(Tok.getKind());
+ // FIXME: Should translate token flags to a stable encoding.
+ Record.push_back(Tok.getFlags());
+}
+
+void ASTWriter::AddString(StringRef Str, RecordDataImpl &Record) {
+ Record.push_back(Str.size());
+ Record.insert(Record.end(), Str.begin(), Str.end());
+}
+
+bool ASTWriter::PreparePathForOutput(SmallVectorImpl<char> &Path) {
+ assert(Context && "should have context when outputting path");
+
+ bool Changed =
+ cleanPathForOutput(Context->getSourceManager().getFileManager(), Path);
+
+ // Remove a prefix to make the path relative, if relevant.
+ const char *PathBegin = Path.data();
+ const char *PathPtr =
+ adjustFilenameForRelocatableAST(PathBegin, BaseDirectory);
+ if (PathPtr != PathBegin) {
+ Path.erase(Path.begin(), Path.begin() + (PathPtr - PathBegin));
+ Changed = true;
+ }
+
+ return Changed;
+}
+
+void ASTWriter::AddPath(StringRef Path, RecordDataImpl &Record) {
+ SmallString<128> FilePath(Path);
+ PreparePathForOutput(FilePath);
+ AddString(FilePath, Record);
+}
+
+void ASTWriter::EmitRecordWithPath(unsigned Abbrev, RecordDataRef Record,
+ StringRef Path) {
+ SmallString<128> FilePath(Path);
+ PreparePathForOutput(FilePath);
+ Stream.EmitRecordWithBlob(Abbrev, Record, FilePath);
+}
+
+void ASTWriter::AddVersionTuple(const VersionTuple &Version,
+ RecordDataImpl &Record) {
+ Record.push_back(Version.getMajor());
+ if (Optional<unsigned> Minor = Version.getMinor())
+ Record.push_back(*Minor + 1);
+ else
+ Record.push_back(0);
+ if (Optional<unsigned> Subminor = Version.getSubminor())
+ Record.push_back(*Subminor + 1);
+ else
+ Record.push_back(0);
+}
+
+/// \brief Note that the identifier II occurs at the given offset
+/// within the identifier table.
+void ASTWriter::SetIdentifierOffset(const IdentifierInfo *II, uint32_t Offset) {
+ IdentID ID = IdentifierIDs[II];
+ // Only store offsets new to this AST file. Other identifier names are looked
+ // up earlier in the chain and thus don't need an offset.
+ if (ID >= FirstIdentID)
+ IdentifierOffsets[ID - FirstIdentID] = Offset;
+}
+
+/// \brief Note that the selector Sel occurs at the given offset
+/// within the method pool/selector table.
+void ASTWriter::SetSelectorOffset(Selector Sel, uint32_t Offset) {
+ unsigned ID = SelectorIDs[Sel];
+ assert(ID && "Unknown selector");
+ // Don't record offsets for selectors that are also available in a different
+ // file.
+ if (ID < FirstSelectorID)
+ return;
+ SelectorOffsets[ID - FirstSelectorID] = Offset;
+}
+
+ASTWriter::ASTWriter(
+ llvm::BitstreamWriter &Stream,
+ ArrayRef<llvm::IntrusiveRefCntPtr<ModuleFileExtension>> Extensions,
+ bool IncludeTimestamps)
+ : Stream(Stream), Context(nullptr), PP(nullptr), Chain(nullptr),
+ WritingModule(nullptr), IncludeTimestamps(IncludeTimestamps),
+ WritingAST(false), DoneWritingDeclsAndTypes(false),
+ ASTHasCompilerErrors(false), FirstDeclID(NUM_PREDEF_DECL_IDS),
+ NextDeclID(FirstDeclID), FirstTypeID(NUM_PREDEF_TYPE_IDS),
+ NextTypeID(FirstTypeID), FirstIdentID(NUM_PREDEF_IDENT_IDS),
+ NextIdentID(FirstIdentID), FirstMacroID(NUM_PREDEF_MACRO_IDS),
+ NextMacroID(FirstMacroID), FirstSubmoduleID(NUM_PREDEF_SUBMODULE_IDS),
+ NextSubmoduleID(FirstSubmoduleID),
+ FirstSelectorID(NUM_PREDEF_SELECTOR_IDS), NextSelectorID(FirstSelectorID),
+ CollectedStmts(&StmtsToEmit), NumStatements(0), NumMacros(0),
+ NumLexicalDeclContexts(0), NumVisibleDeclContexts(0),
+ NextCXXBaseSpecifiersID(1), NextCXXCtorInitializersID(1),
+ TypeExtQualAbbrev(0), TypeFunctionProtoAbbrev(0), DeclParmVarAbbrev(0),
+ DeclContextLexicalAbbrev(0), DeclContextVisibleLookupAbbrev(0),
+ UpdateVisibleAbbrev(0), DeclRecordAbbrev(0), DeclTypedefAbbrev(0),
+ DeclVarAbbrev(0), DeclFieldAbbrev(0), DeclEnumAbbrev(0),
+ DeclObjCIvarAbbrev(0), DeclCXXMethodAbbrev(0), DeclRefExprAbbrev(0),
+ CharacterLiteralAbbrev(0), IntegerLiteralAbbrev(0),
+ ExprImplicitCastAbbrev(0) {
+ for (const auto &Ext : Extensions) {
+ if (auto Writer = Ext->createExtensionWriter(*this))
+ ModuleFileExtensionWriters.push_back(std::move(Writer));
+ }
+}
+
+ASTWriter::~ASTWriter() {
+ llvm::DeleteContainerSeconds(FileDeclIDs);
+}
+
+const LangOptions &ASTWriter::getLangOpts() const {
+ assert(WritingAST && "can't determine lang opts when not writing AST");
+ return Context->getLangOpts();
+}
+
+time_t ASTWriter::getTimestampForOutput(const FileEntry *E) const {
+ return IncludeTimestamps ? E->getModificationTime() : 0;
+}
+
+uint64_t ASTWriter::WriteAST(Sema &SemaRef, const std::string &OutputFile,
+ Module *WritingModule, StringRef isysroot,
+ bool hasErrors) {
+ WritingAST = true;
+
+ ASTHasCompilerErrors = hasErrors;
+
+ // Emit the file header.
+ Stream.Emit((unsigned)'C', 8);
+ Stream.Emit((unsigned)'P', 8);
+ Stream.Emit((unsigned)'C', 8);
+ Stream.Emit((unsigned)'H', 8);
+
+ WriteBlockInfoBlock();
+
+ Context = &SemaRef.Context;
+ PP = &SemaRef.PP;
+ this->WritingModule = WritingModule;
+ ASTFileSignature Signature =
+ WriteASTCore(SemaRef, isysroot, OutputFile, WritingModule);
+ Context = nullptr;
+ PP = nullptr;
+ this->WritingModule = nullptr;
+ this->BaseDirectory.clear();
+
+ WritingAST = false;
+ return Signature;
+}
+
+template<typename Vector>
+static void AddLazyVectorDecls(ASTWriter &Writer, Vector &Vec,
+ ASTWriter::RecordData &Record) {
+ for (typename Vector::iterator I = Vec.begin(nullptr, true), E = Vec.end();
+ I != E; ++I) {
+ Writer.AddDeclRef(*I, Record);
+ }
+}
+
+uint64_t ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
+ const std::string &OutputFile,
+ Module *WritingModule) {
+ using namespace llvm;
+
+ bool isModule = WritingModule != nullptr;
+
+ // Make sure that the AST reader knows to finalize itself.
+ if (Chain)
+ Chain->finalizeForWriting();
+
+ ASTContext &Context = SemaRef.Context;
+ Preprocessor &PP = SemaRef.PP;
+
+ // Set up predefined declaration IDs.
+ auto RegisterPredefDecl = [&] (Decl *D, PredefinedDeclIDs ID) {
+ if (D) {
+ assert(D->isCanonicalDecl() && "predefined decl is not canonical");
+ DeclIDs[D] = ID;
+ }
+ };
+ RegisterPredefDecl(Context.getTranslationUnitDecl(),
+ PREDEF_DECL_TRANSLATION_UNIT_ID);
+ RegisterPredefDecl(Context.ObjCIdDecl, PREDEF_DECL_OBJC_ID_ID);
+ RegisterPredefDecl(Context.ObjCSelDecl, PREDEF_DECL_OBJC_SEL_ID);
+ RegisterPredefDecl(Context.ObjCClassDecl, PREDEF_DECL_OBJC_CLASS_ID);
+ RegisterPredefDecl(Context.ObjCProtocolClassDecl,
+ PREDEF_DECL_OBJC_PROTOCOL_ID);
+ RegisterPredefDecl(Context.Int128Decl, PREDEF_DECL_INT_128_ID);
+ RegisterPredefDecl(Context.UInt128Decl, PREDEF_DECL_UNSIGNED_INT_128_ID);
+ RegisterPredefDecl(Context.ObjCInstanceTypeDecl,
+ PREDEF_DECL_OBJC_INSTANCETYPE_ID);
+ RegisterPredefDecl(Context.BuiltinVaListDecl, PREDEF_DECL_BUILTIN_VA_LIST_ID);
+ RegisterPredefDecl(Context.VaListTagDecl, PREDEF_DECL_VA_LIST_TAG);
+ RegisterPredefDecl(Context.BuiltinMSVaListDecl,
+ PREDEF_DECL_BUILTIN_MS_VA_LIST_ID);
+ RegisterPredefDecl(Context.ExternCContext, PREDEF_DECL_EXTERN_C_CONTEXT_ID);
+ RegisterPredefDecl(Context.MakeIntegerSeqDecl,
+ PREDEF_DECL_MAKE_INTEGER_SEQ_ID);
+
+ // Build a record containing all of the tentative definitions in this file, in
+ // TentativeDefinitions order. Generally, this record will be empty for
+ // headers.
+ RecordData TentativeDefinitions;
+ AddLazyVectorDecls(*this, SemaRef.TentativeDefinitions, TentativeDefinitions);
+
+ // Build a record containing all of the file scoped decls in this file.
+ RecordData UnusedFileScopedDecls;
+ if (!isModule)
+ AddLazyVectorDecls(*this, SemaRef.UnusedFileScopedDecls,
+ UnusedFileScopedDecls);
+
+ // Build a record containing all of the delegating constructors we still need
+ // to resolve.
+ RecordData DelegatingCtorDecls;
+ if (!isModule)
+ AddLazyVectorDecls(*this, SemaRef.DelegatingCtorDecls, DelegatingCtorDecls);
+
+ // Write the set of weak, undeclared identifiers. We always write the
+ // entire table, since later PCH files in a PCH chain are only interested in
+ // the results at the end of the chain.
+ RecordData WeakUndeclaredIdentifiers;
+ for (auto &WeakUndeclaredIdentifier : SemaRef.WeakUndeclaredIdentifiers) {
+ IdentifierInfo *II = WeakUndeclaredIdentifier.first;
+ WeakInfo &WI = WeakUndeclaredIdentifier.second;
+ AddIdentifierRef(II, WeakUndeclaredIdentifiers);
+ AddIdentifierRef(WI.getAlias(), WeakUndeclaredIdentifiers);
+ AddSourceLocation(WI.getLocation(), WeakUndeclaredIdentifiers);
+ WeakUndeclaredIdentifiers.push_back(WI.getUsed());
+ }
+
+ // Build a record containing all of the ext_vector declarations.
+ RecordData ExtVectorDecls;
+ AddLazyVectorDecls(*this, SemaRef.ExtVectorDecls, ExtVectorDecls);
+
+ // Build a record containing all of the VTable uses information.
+ RecordData VTableUses;
+ if (!SemaRef.VTableUses.empty()) {
+ for (unsigned I = 0, N = SemaRef.VTableUses.size(); I != N; ++I) {
+ AddDeclRef(SemaRef.VTableUses[I].first, VTableUses);
+ AddSourceLocation(SemaRef.VTableUses[I].second, VTableUses);
+ VTableUses.push_back(SemaRef.VTablesUsed[SemaRef.VTableUses[I].first]);
+ }
+ }
+
+ // Build a record containing all of the UnusedLocalTypedefNameCandidates.
+ RecordData UnusedLocalTypedefNameCandidates;
+ for (const TypedefNameDecl *TD : SemaRef.UnusedLocalTypedefNameCandidates)
+ AddDeclRef(TD, UnusedLocalTypedefNameCandidates);
+
+ // Build a record containing all of pending implicit instantiations.
+ RecordData PendingInstantiations;
+ for (const auto &I : SemaRef.PendingInstantiations) {
+ AddDeclRef(I.first, PendingInstantiations);
+ AddSourceLocation(I.second, PendingInstantiations);
+ }
+ assert(SemaRef.PendingLocalImplicitInstantiations.empty() &&
+ "There are local ones at end of translation unit!");
+
+ // Build a record containing some declaration references.
+ RecordData SemaDeclRefs;
+ if (SemaRef.StdNamespace || SemaRef.StdBadAlloc) {
+ AddDeclRef(SemaRef.getStdNamespace(), SemaDeclRefs);
+ AddDeclRef(SemaRef.getStdBadAlloc(), SemaDeclRefs);
+ }
+
+ RecordData CUDASpecialDeclRefs;
+ if (Context.getcudaConfigureCallDecl()) {
+ AddDeclRef(Context.getcudaConfigureCallDecl(), CUDASpecialDeclRefs);
+ }
+
+ // Build a record containing all of the known namespaces.
+ RecordData KnownNamespaces;
+ for (const auto &I : SemaRef.KnownNamespaces) {
+ if (!I.second)
+ AddDeclRef(I.first, KnownNamespaces);
+ }
+
+ // Build a record of all used, undefined objects that require definitions.
+ RecordData UndefinedButUsed;
+
+ SmallVector<std::pair<NamedDecl *, SourceLocation>, 16> Undefined;
+ SemaRef.getUndefinedButUsed(Undefined);
+ for (const auto &I : Undefined) {
+ AddDeclRef(I.first, UndefinedButUsed);
+ AddSourceLocation(I.second, UndefinedButUsed);
+ }
+
+ // Build a record containing all delete-expressions that we would like to
+ // analyze later in AST.
+ RecordData DeleteExprsToAnalyze;
+
+ for (const auto &DeleteExprsInfo :
+ SemaRef.getMismatchingDeleteExpressions()) {
+ AddDeclRef(DeleteExprsInfo.first, DeleteExprsToAnalyze);
+ DeleteExprsToAnalyze.push_back(DeleteExprsInfo.second.size());
+ for (const auto &DeleteLoc : DeleteExprsInfo.second) {
+ AddSourceLocation(DeleteLoc.first, DeleteExprsToAnalyze);
+ DeleteExprsToAnalyze.push_back(DeleteLoc.second);
+ }
+ }
+
+ // Write the control block
+ uint64_t Signature = WriteControlBlock(PP, Context, isysroot, OutputFile);
+
+ // Write the remaining AST contents.
+ Stream.EnterSubblock(AST_BLOCK_ID, 5);
+
+ // This is so that older clang versions, before the introduction
+ // of the control block, can read and reject the newer PCH format.
+ {
+ RecordData Record = {VERSION_MAJOR};
+ Stream.EmitRecord(METADATA_OLD_FORMAT, Record);
+ }
+
+ // Create a lexical update block containing all of the declarations in the
+ // translation unit that do not come from other AST files.
+ const TranslationUnitDecl *TU = Context.getTranslationUnitDecl();
+ SmallVector<uint32_t, 128> NewGlobalKindDeclPairs;
+ for (const auto *D : TU->noload_decls()) {
+ if (!D->isFromASTFile()) {
+ NewGlobalKindDeclPairs.push_back(D->getKind());
+ NewGlobalKindDeclPairs.push_back(GetDeclRef(D));
+ }
+ }
+
+ auto *Abv = new llvm::BitCodeAbbrev();
+ Abv->Add(llvm::BitCodeAbbrevOp(TU_UPDATE_LEXICAL));
+ Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Blob));
+ unsigned TuUpdateLexicalAbbrev = Stream.EmitAbbrev(Abv);
+ {
+ RecordData::value_type Record[] = {TU_UPDATE_LEXICAL};
+ Stream.EmitRecordWithBlob(TuUpdateLexicalAbbrev, Record,
+ bytes(NewGlobalKindDeclPairs));
+ }
+
+ // And a visible updates block for the translation unit.
+ Abv = new llvm::BitCodeAbbrev();
+ Abv->Add(llvm::BitCodeAbbrevOp(UPDATE_VISIBLE));
+ Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::VBR, 6));
+ Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Blob));
+ UpdateVisibleAbbrev = Stream.EmitAbbrev(Abv);
+ WriteDeclContextVisibleUpdate(TU);
+
+ // If we have any extern "C" names, write out a visible update for them.
+ if (Context.ExternCContext)
+ WriteDeclContextVisibleUpdate(Context.ExternCContext);
+
+ // If the translation unit has an anonymous namespace, and we don't already
+ // have an update block for it, write it as an update block.
+ // FIXME: Why do we not do this if there's already an update block?
+ if (NamespaceDecl *NS = TU->getAnonymousNamespace()) {
+ ASTWriter::UpdateRecord &Record = DeclUpdates[TU];
+ if (Record.empty())
+ Record.push_back(DeclUpdate(UPD_CXX_ADDED_ANONYMOUS_NAMESPACE, NS));
+ }
+
+ // Add update records for all mangling numbers and static local numbers.
+ // These aren't really update records, but this is a convenient way of
+ // tagging this rare extra data onto the declarations.
+ for (const auto &Number : Context.MangleNumbers)
+ if (!Number.first->isFromASTFile())
+ DeclUpdates[Number.first].push_back(DeclUpdate(UPD_MANGLING_NUMBER,
+ Number.second));
+ for (const auto &Number : Context.StaticLocalNumbers)
+ if (!Number.first->isFromASTFile())
+ DeclUpdates[Number.first].push_back(DeclUpdate(UPD_STATIC_LOCAL_NUMBER,
+ Number.second));
+
+ // Make sure visible decls, added to DeclContexts previously loaded from
+ // an AST file, are registered for serialization.
+ for (const auto *I : UpdatingVisibleDecls) {
+ GetDeclRef(I);
+ }
+
+ // Make sure all decls associated with an identifier are registered for
+ // serialization, if we're storing decls with identifiers.
+ if (!WritingModule || !getLangOpts().CPlusPlus) {
+ llvm::SmallVector<const IdentifierInfo*, 256> IIs;
+ for (const auto &ID : PP.getIdentifierTable()) {
+ const IdentifierInfo *II = ID.second;
+ if (!Chain || !II->isFromAST() || II->hasChangedSinceDeserialization())
+ IIs.push_back(II);
+ }
+ // Sort the identifiers to visit based on their name.
+ std::sort(IIs.begin(), IIs.end(), llvm::less_ptr<IdentifierInfo>());
+ for (const IdentifierInfo *II : IIs) {
+ for (IdentifierResolver::iterator D = SemaRef.IdResolver.begin(II),
+ DEnd = SemaRef.IdResolver.end();
+ D != DEnd; ++D) {
+ GetDeclRef(*D);
+ }
+ }
+ }
+
+ // Form the record of special types.
+ RecordData SpecialTypes;
+ AddTypeRef(Context.getRawCFConstantStringType(), SpecialTypes);
+ AddTypeRef(Context.getFILEType(), SpecialTypes);
+ AddTypeRef(Context.getjmp_bufType(), SpecialTypes);
+ AddTypeRef(Context.getsigjmp_bufType(), SpecialTypes);
+ AddTypeRef(Context.ObjCIdRedefinitionType, SpecialTypes);
+ AddTypeRef(Context.ObjCClassRedefinitionType, SpecialTypes);
+ AddTypeRef(Context.ObjCSelRedefinitionType, SpecialTypes);
+ AddTypeRef(Context.getucontext_tType(), SpecialTypes);
+
+ if (Chain) {
+ // Write the mapping information describing our module dependencies and how
+ // each of those modules were mapped into our own offset/ID space, so that
+ // the reader can build the appropriate mapping to its own offset/ID space.
+ // The map consists solely of a blob with the following format:
+ // *(module-name-len:i16 module-name:len*i8
+ // source-location-offset:i32
+ // identifier-id:i32
+ // preprocessed-entity-id:i32
+ // macro-definition-id:i32
+ // submodule-id:i32
+ // selector-id:i32
+ // declaration-id:i32
+ // c++-base-specifiers-id:i32
+ // type-id:i32)
+ //
+ auto *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(MODULE_OFFSET_MAP));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned ModuleOffsetMapAbbrev = Stream.EmitAbbrev(Abbrev);
+ SmallString<2048> Buffer;
+ {
+ llvm::raw_svector_ostream Out(Buffer);
+ for (ModuleFile *M : Chain->ModuleMgr) {
+ using namespace llvm::support;
+ endian::Writer<little> LE(Out);
+ StringRef FileName = M->FileName;
+ LE.write<uint16_t>(FileName.size());
+ Out.write(FileName.data(), FileName.size());
+
+ // Note: if a base ID was uint max, it would not be possible to load
+ // another module after it or have more than one entity inside it.
+ uint32_t None = std::numeric_limits<uint32_t>::max();
+
+ auto writeBaseIDOrNone = [&](uint32_t BaseID, bool ShouldWrite) {
+ assert(BaseID < std::numeric_limits<uint32_t>::max() && "base id too high");
+ if (ShouldWrite)
+ LE.write<uint32_t>(BaseID);
+ else
+ LE.write<uint32_t>(None);
+ };
+
+ // These values should be unique within a chain, since they will be read
+ // as keys into ContinuousRangeMaps.
+ writeBaseIDOrNone(M->SLocEntryBaseOffset, M->LocalNumSLocEntries);
+ writeBaseIDOrNone(M->BaseIdentifierID, M->LocalNumIdentifiers);
+ writeBaseIDOrNone(M->BaseMacroID, M->LocalNumMacros);
+ writeBaseIDOrNone(M->BasePreprocessedEntityID,
+ M->NumPreprocessedEntities);
+ writeBaseIDOrNone(M->BaseSubmoduleID, M->LocalNumSubmodules);
+ writeBaseIDOrNone(M->BaseSelectorID, M->LocalNumSelectors);
+ writeBaseIDOrNone(M->BaseDeclID, M->LocalNumDecls);
+ writeBaseIDOrNone(M->BaseTypeIndex, M->LocalNumTypes);
+ }
+ }
+ RecordData::value_type Record[] = {MODULE_OFFSET_MAP};
+ Stream.EmitRecordWithBlob(ModuleOffsetMapAbbrev, Record,
+ Buffer.data(), Buffer.size());
+ }
+
+ RecordData DeclUpdatesOffsetsRecord;
+
+ // Keep writing types, declarations, and declaration update records
+ // until we've emitted all of them.
+ Stream.EnterSubblock(DECLTYPES_BLOCK_ID, /*bits for abbreviations*/5);
+ WriteTypeAbbrevs();
+ WriteDeclAbbrevs();
+ do {
+ WriteDeclUpdatesBlocks(DeclUpdatesOffsetsRecord);
+ while (!DeclTypesToEmit.empty()) {
+ DeclOrType DOT = DeclTypesToEmit.front();
+ DeclTypesToEmit.pop();
+ if (DOT.isType())
+ WriteType(DOT.getType());
+ else
+ WriteDecl(Context, DOT.getDecl());
+ }
+ } while (!DeclUpdates.empty());
+ Stream.ExitBlock();
+
+ DoneWritingDeclsAndTypes = true;
+
+ // These things can only be done once we've written out decls and types.
+ WriteTypeDeclOffsets();
+ if (!DeclUpdatesOffsetsRecord.empty())
+ Stream.EmitRecord(DECL_UPDATE_OFFSETS, DeclUpdatesOffsetsRecord);
+ WriteCXXBaseSpecifiersOffsets();
+ WriteCXXCtorInitializersOffsets();
+ WriteFileDeclIDsMap();
+ WriteSourceManagerBlock(Context.getSourceManager(), PP);
+ WriteComments();
+ WritePreprocessor(PP, isModule);
+ WriteHeaderSearch(PP.getHeaderSearchInfo());
+ WriteSelectors(SemaRef);
+ WriteReferencedSelectorsPool(SemaRef);
+ WriteLateParsedTemplates(SemaRef);
+ WriteIdentifierTable(PP, SemaRef.IdResolver, isModule);
+ WriteFPPragmaOptions(SemaRef.getFPOptions());
+ WriteOpenCLExtensions(SemaRef);
+ WritePragmaDiagnosticMappings(Context.getDiagnostics(), isModule);
+
+ // If we're emitting a module, write out the submodule information.
+ if (WritingModule)
+ WriteSubmodules(WritingModule);
+
+ Stream.EmitRecord(SPECIAL_TYPES, SpecialTypes);
+
+ // Write the record containing external, unnamed definitions.
+ if (!EagerlyDeserializedDecls.empty())
+ Stream.EmitRecord(EAGERLY_DESERIALIZED_DECLS, EagerlyDeserializedDecls);
+
+ // Write the record containing tentative definitions.
+ if (!TentativeDefinitions.empty())
+ Stream.EmitRecord(TENTATIVE_DEFINITIONS, TentativeDefinitions);
+
+ // Write the record containing unused file scoped decls.
+ if (!UnusedFileScopedDecls.empty())
+ Stream.EmitRecord(UNUSED_FILESCOPED_DECLS, UnusedFileScopedDecls);
+
+ // Write the record containing weak undeclared identifiers.
+ if (!WeakUndeclaredIdentifiers.empty())
+ Stream.EmitRecord(WEAK_UNDECLARED_IDENTIFIERS,
+ WeakUndeclaredIdentifiers);
+
+ // Write the record containing ext_vector type names.
+ if (!ExtVectorDecls.empty())
+ Stream.EmitRecord(EXT_VECTOR_DECLS, ExtVectorDecls);
+
+ // Write the record containing VTable uses information.
+ if (!VTableUses.empty())
+ Stream.EmitRecord(VTABLE_USES, VTableUses);
+
+ // Write the record containing potentially unused local typedefs.
+ if (!UnusedLocalTypedefNameCandidates.empty())
+ Stream.EmitRecord(UNUSED_LOCAL_TYPEDEF_NAME_CANDIDATES,
+ UnusedLocalTypedefNameCandidates);
+
+ // Write the record containing pending implicit instantiations.
+ if (!PendingInstantiations.empty())
+ Stream.EmitRecord(PENDING_IMPLICIT_INSTANTIATIONS, PendingInstantiations);
+
+ // Write the record containing declaration references of Sema.
+ if (!SemaDeclRefs.empty())
+ Stream.EmitRecord(SEMA_DECL_REFS, SemaDeclRefs);
+
+ // Write the record containing CUDA-specific declaration references.
+ if (!CUDASpecialDeclRefs.empty())
+ Stream.EmitRecord(CUDA_SPECIAL_DECL_REFS, CUDASpecialDeclRefs);
+
+ // Write the delegating constructors.
+ if (!DelegatingCtorDecls.empty())
+ Stream.EmitRecord(DELEGATING_CTORS, DelegatingCtorDecls);
+
+ // Write the known namespaces.
+ if (!KnownNamespaces.empty())
+ Stream.EmitRecord(KNOWN_NAMESPACES, KnownNamespaces);
+
+ // Write the undefined internal functions and variables, and inline functions.
+ if (!UndefinedButUsed.empty())
+ Stream.EmitRecord(UNDEFINED_BUT_USED, UndefinedButUsed);
+
+ if (!DeleteExprsToAnalyze.empty())
+ Stream.EmitRecord(DELETE_EXPRS_TO_ANALYZE, DeleteExprsToAnalyze);
+
+ // Write the visible updates to DeclContexts.
+ for (auto *DC : UpdatedDeclContexts)
+ WriteDeclContextVisibleUpdate(DC);
+
+ if (!WritingModule) {
+ // Write the submodules that were imported, if any.
+ struct ModuleInfo {
+ uint64_t ID;
+ Module *M;
+ ModuleInfo(uint64_t ID, Module *M) : ID(ID), M(M) {}
+ };
+ llvm::SmallVector<ModuleInfo, 64> Imports;
+ for (const auto *I : Context.local_imports()) {
+ assert(SubmoduleIDs.find(I->getImportedModule()) != SubmoduleIDs.end());
+ Imports.push_back(ModuleInfo(SubmoduleIDs[I->getImportedModule()],
+ I->getImportedModule()));
+ }
+
+ if (!Imports.empty()) {
+ auto Cmp = [](const ModuleInfo &A, const ModuleInfo &B) {
+ return A.ID < B.ID;
+ };
+ auto Eq = [](const ModuleInfo &A, const ModuleInfo &B) {
+ return A.ID == B.ID;
+ };
+
+ // Sort and deduplicate module IDs.
+ std::sort(Imports.begin(), Imports.end(), Cmp);
+ Imports.erase(std::unique(Imports.begin(), Imports.end(), Eq),
+ Imports.end());
+
+ RecordData ImportedModules;
+ for (const auto &Import : Imports) {
+ ImportedModules.push_back(Import.ID);
+ // FIXME: If the module has macros imported then later has declarations
+ // imported, this location won't be the right one as a location for the
+ // declaration imports.
+ AddSourceLocation(PP.getModuleImportLoc(Import.M), ImportedModules);
+ }
+
+ Stream.EmitRecord(IMPORTED_MODULES, ImportedModules);
+ }
+ }
+
+ WriteDeclReplacementsBlock();
+ WriteObjCCategories();
+ if(!WritingModule)
+ WriteOptimizePragmaOptions(SemaRef);
+
+ // Some simple statistics
+ RecordData::value_type Record[] = {
+ NumStatements, NumMacros, NumLexicalDeclContexts, NumVisibleDeclContexts};
+ Stream.EmitRecord(STATISTICS, Record);
+ Stream.ExitBlock();
+
+ // Write the module file extension blocks.
+ for (const auto &ExtWriter : ModuleFileExtensionWriters)
+ WriteModuleFileExtension(SemaRef, *ExtWriter);
+
+ return Signature;
+}
+
+void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
+ if (DeclUpdates.empty())
+ return;
+
+ DeclUpdateMap LocalUpdates;
+ LocalUpdates.swap(DeclUpdates);
+
+ for (auto &DeclUpdate : LocalUpdates) {
+ const Decl *D = DeclUpdate.first;
+
+ bool HasUpdatedBody = false;
+ RecordData Record;
+ for (auto &Update : DeclUpdate.second) {
+ DeclUpdateKind Kind = (DeclUpdateKind)Update.getKind();
+
+ Record.push_back(Kind);
+ switch (Kind) {
+ case UPD_CXX_ADDED_IMPLICIT_MEMBER:
+ case UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION:
+ case UPD_CXX_ADDED_ANONYMOUS_NAMESPACE:
+ assert(Update.getDecl() && "no decl to add?");
+ Record.push_back(GetDeclRef(Update.getDecl()));
+ break;
+
+ case UPD_CXX_ADDED_FUNCTION_DEFINITION:
+ // An updated body is emitted last, so that the reader doesn't need
+ // to skip over the lazy body to reach statements for other records.
+ Record.pop_back();
+ HasUpdatedBody = true;
+ break;
+
+ case UPD_CXX_INSTANTIATED_STATIC_DATA_MEMBER:
+ AddSourceLocation(Update.getLoc(), Record);
+ break;
+
+ case UPD_CXX_INSTANTIATED_CLASS_DEFINITION: {
+ auto *RD = cast<CXXRecordDecl>(D);
+ UpdatedDeclContexts.insert(RD->getPrimaryContext());
+ AddCXXDefinitionData(RD, Record);
+ Record.push_back(WriteDeclContextLexicalBlock(
+ *Context, const_cast<CXXRecordDecl *>(RD)));
+
+ // This state is sometimes updated by template instantiation, when we
+ // switch from the specialization referring to the template declaration
+ // to it referring to the template definition.
+ if (auto *MSInfo = RD->getMemberSpecializationInfo()) {
+ Record.push_back(MSInfo->getTemplateSpecializationKind());
+ AddSourceLocation(MSInfo->getPointOfInstantiation(), Record);
+ } else {
+ auto *Spec = cast<ClassTemplateSpecializationDecl>(RD);
+ Record.push_back(Spec->getTemplateSpecializationKind());
+ AddSourceLocation(Spec->getPointOfInstantiation(), Record);
+
+ // The instantiation might have been resolved to a partial
+ // specialization. If so, record which one.
+ auto From = Spec->getInstantiatedFrom();
+ if (auto PartialSpec =
+ From.dyn_cast<ClassTemplatePartialSpecializationDecl*>()) {
+ Record.push_back(true);
+ AddDeclRef(PartialSpec, Record);
+ AddTemplateArgumentList(&Spec->getTemplateInstantiationArgs(),
+ Record);
+ } else {
+ Record.push_back(false);
+ }
+ }
+ Record.push_back(RD->getTagKind());
+ AddSourceLocation(RD->getLocation(), Record);
+ AddSourceLocation(RD->getLocStart(), Record);
+ AddSourceLocation(RD->getRBraceLoc(), Record);
+
+ // Instantiation may change attributes; write them all out afresh.
+ Record.push_back(D->hasAttrs());
+ if (Record.back())
+ WriteAttributes(llvm::makeArrayRef(D->getAttrs().begin(),
+ D->getAttrs().size()), Record);
+
+ // FIXME: Ensure we don't get here for explicit instantiations.
+ break;
+ }
+
+ case UPD_CXX_RESOLVED_DTOR_DELETE:
+ AddDeclRef(Update.getDecl(), Record);
+ break;
+
+ case UPD_CXX_RESOLVED_EXCEPTION_SPEC:
+ addExceptionSpec(
+ *this,
+ cast<FunctionDecl>(D)->getType()->castAs<FunctionProtoType>(),
+ Record);
+ break;
+
+ case UPD_CXX_DEDUCED_RETURN_TYPE:
+ Record.push_back(GetOrCreateTypeID(Update.getType()));
+ break;
+
+ case UPD_DECL_MARKED_USED:
+ break;
+
+ case UPD_MANGLING_NUMBER:
+ case UPD_STATIC_LOCAL_NUMBER:
+ Record.push_back(Update.getNumber());
+ break;
+
+ case UPD_DECL_MARKED_OPENMP_THREADPRIVATE:
+ AddSourceRange(D->getAttr<OMPThreadPrivateDeclAttr>()->getRange(),
+ Record);
+ break;
+
+ case UPD_DECL_EXPORTED:
+ Record.push_back(getSubmoduleID(Update.getModule()));
+ break;
+
+ case UPD_ADDED_ATTR_TO_RECORD:
+ WriteAttributes(llvm::makeArrayRef(Update.getAttr()), Record);
+ break;
+ }
+ }
+
+ if (HasUpdatedBody) {
+ const auto *Def = cast<FunctionDecl>(D);
+ Record.push_back(UPD_CXX_ADDED_FUNCTION_DEFINITION);
+ Record.push_back(Def->isInlined());
+ AddSourceLocation(Def->getInnerLocStart(), Record);
+ AddFunctionDefinition(Def, Record);
+ }
+
+ OffsetsRecord.push_back(GetDeclRef(D));
+ OffsetsRecord.push_back(Stream.GetCurrentBitNo());
+
+ Stream.EmitRecord(DECL_UPDATES, Record);
+
+ FlushPendingAfterDecl();
+ }
+}
+
+void ASTWriter::WriteDeclReplacementsBlock() {
+ if (ReplacedDecls.empty())
+ return;
+
+ RecordData Record;
+ for (const auto &I : ReplacedDecls) {
+ Record.push_back(I.ID);
+ Record.push_back(I.Offset);
+ Record.push_back(I.Loc);
+ }
+ Stream.EmitRecord(DECL_REPLACEMENTS, Record);
+}
+
+void ASTWriter::AddSourceLocation(SourceLocation Loc, RecordDataImpl &Record) {
+ Record.push_back(Loc.getRawEncoding());
+}
+
+void ASTWriter::AddSourceRange(SourceRange Range, RecordDataImpl &Record) {
+ AddSourceLocation(Range.getBegin(), Record);
+ AddSourceLocation(Range.getEnd(), Record);
+}
+
+void ASTWriter::AddAPInt(const llvm::APInt &Value, RecordDataImpl &Record) {
+ Record.push_back(Value.getBitWidth());
+ const uint64_t *Words = Value.getRawData();
+ Record.append(Words, Words + Value.getNumWords());
+}
+
+void ASTWriter::AddAPSInt(const llvm::APSInt &Value, RecordDataImpl &Record) {
+ Record.push_back(Value.isUnsigned());
+ AddAPInt(Value, Record);
+}
+
+void ASTWriter::AddAPFloat(const llvm::APFloat &Value, RecordDataImpl &Record) {
+ AddAPInt(Value.bitcastToAPInt(), Record);
+}
+
+void ASTWriter::AddIdentifierRef(const IdentifierInfo *II, RecordDataImpl &Record) {
+ Record.push_back(getIdentifierRef(II));
+}
+
+IdentID ASTWriter::getIdentifierRef(const IdentifierInfo *II) {
+ if (!II)
+ return 0;
+
+ IdentID &ID = IdentifierIDs[II];
+ if (ID == 0)
+ ID = NextIdentID++;
+ return ID;
+}
+
+MacroID ASTWriter::getMacroRef(MacroInfo *MI, const IdentifierInfo *Name) {
+ // Don't emit builtin macros like __LINE__ to the AST file unless they
+ // have been redefined by the header (in which case they are not
+ // isBuiltinMacro).
+ if (!MI || MI->isBuiltinMacro())
+ return 0;
+
+ MacroID &ID = MacroIDs[MI];
+ if (ID == 0) {
+ ID = NextMacroID++;
+ MacroInfoToEmitData Info = { Name, MI, ID };
+ MacroInfosToEmit.push_back(Info);
+ }
+ return ID;
+}
+
+MacroID ASTWriter::getMacroID(MacroInfo *MI) {
+ if (!MI || MI->isBuiltinMacro())
+ return 0;
+
+ assert(MacroIDs.find(MI) != MacroIDs.end() && "Macro not emitted!");
+ return MacroIDs[MI];
+}
+
+uint64_t ASTWriter::getMacroDirectivesOffset(const IdentifierInfo *Name) {
+ return IdentMacroDirectivesOffsetMap.lookup(Name);
+}
+
+void ASTWriter::AddSelectorRef(const Selector SelRef, RecordDataImpl &Record) {
+ Record.push_back(getSelectorRef(SelRef));
+}
+
+SelectorID ASTWriter::getSelectorRef(Selector Sel) {
+ if (Sel.getAsOpaquePtr() == nullptr) {
+ return 0;
+ }
+
+ SelectorID SID = SelectorIDs[Sel];
+ if (SID == 0 && Chain) {
+ // This might trigger a ReadSelector callback, which will set the ID for
+ // this selector.
+ Chain->LoadSelector(Sel);
+ SID = SelectorIDs[Sel];
+ }
+ if (SID == 0) {
+ SID = NextSelectorID++;
+ SelectorIDs[Sel] = SID;
+ }
+ return SID;
+}
+
+void ASTWriter::AddCXXTemporary(const CXXTemporary *Temp, RecordDataImpl &Record) {
+ AddDeclRef(Temp->getDestructor(), Record);
+}
+
+void ASTWriter::AddCXXCtorInitializersRef(ArrayRef<CXXCtorInitializer *> Inits,
+ RecordDataImpl &Record) {
+ assert(!Inits.empty() && "Empty ctor initializer sets are not recorded");
+ CXXCtorInitializersToWrite.push_back(
+ QueuedCXXCtorInitializers(NextCXXCtorInitializersID, Inits));
+ Record.push_back(NextCXXCtorInitializersID++);
+}
+
+void ASTWriter::AddCXXBaseSpecifiersRef(CXXBaseSpecifier const *Bases,
+ CXXBaseSpecifier const *BasesEnd,
+ RecordDataImpl &Record) {
+ assert(Bases != BasesEnd && "Empty base-specifier sets are not recorded");
+ CXXBaseSpecifiersToWrite.push_back(
+ QueuedCXXBaseSpecifiers(NextCXXBaseSpecifiersID,
+ Bases, BasesEnd));
+ Record.push_back(NextCXXBaseSpecifiersID++);
+}
+
+void ASTWriter::AddTemplateArgumentLocInfo(TemplateArgument::ArgKind Kind,
+ const TemplateArgumentLocInfo &Arg,
+ RecordDataImpl &Record) {
+ switch (Kind) {
+ case TemplateArgument::Expression:
+ AddStmt(Arg.getAsExpr());
+ break;
+ case TemplateArgument::Type:
+ AddTypeSourceInfo(Arg.getAsTypeSourceInfo(), Record);
+ break;
+ case TemplateArgument::Template:
+ AddNestedNameSpecifierLoc(Arg.getTemplateQualifierLoc(), Record);
+ AddSourceLocation(Arg.getTemplateNameLoc(), Record);
+ break;
+ case TemplateArgument::TemplateExpansion:
+ AddNestedNameSpecifierLoc(Arg.getTemplateQualifierLoc(), Record);
+ AddSourceLocation(Arg.getTemplateNameLoc(), Record);
+ AddSourceLocation(Arg.getTemplateEllipsisLoc(), Record);
+ break;
+ case TemplateArgument::Null:
+ case TemplateArgument::Integral:
+ case TemplateArgument::Declaration:
+ case TemplateArgument::NullPtr:
+ case TemplateArgument::Pack:
+ // FIXME: Is this right?
+ break;
+ }
+}
+
+void ASTWriter::AddTemplateArgumentLoc(const TemplateArgumentLoc &Arg,
+ RecordDataImpl &Record) {
+ AddTemplateArgument(Arg.getArgument(), Record);
+
+ if (Arg.getArgument().getKind() == TemplateArgument::Expression) {
+ bool InfoHasSameExpr
+ = Arg.getArgument().getAsExpr() == Arg.getLocInfo().getAsExpr();
+ Record.push_back(InfoHasSameExpr);
+ if (InfoHasSameExpr)
+ return; // Avoid storing the same expr twice.
+ }
+ AddTemplateArgumentLocInfo(Arg.getArgument().getKind(), Arg.getLocInfo(),
+ Record);
+}
+
+void ASTWriter::AddTypeSourceInfo(TypeSourceInfo *TInfo,
+ RecordDataImpl &Record) {
+ if (!TInfo) {
+ AddTypeRef(QualType(), Record);
+ return;
+ }
+
+ AddTypeLoc(TInfo->getTypeLoc(), Record);
+}
+
+void ASTWriter::AddTypeLoc(TypeLoc TL, RecordDataImpl &Record) {
+ AddTypeRef(TL.getType(), Record);
+
+ TypeLocWriter TLW(*this, Record);
+ for (; !TL.isNull(); TL = TL.getNextTypeLoc())
+ TLW.Visit(TL);
+}
+
+void ASTWriter::AddTypeRef(QualType T, RecordDataImpl &Record) {
+ Record.push_back(GetOrCreateTypeID(T));
+}
+
+TypeID ASTWriter::GetOrCreateTypeID(QualType T) {
+ assert(Context);
+ return MakeTypeID(*Context, T, [&](QualType T) -> TypeIdx {
+ if (T.isNull())
+ return TypeIdx();
+ assert(!T.getLocalFastQualifiers());
+
+ TypeIdx &Idx = TypeIdxs[T];
+ if (Idx.getIndex() == 0) {
+ if (DoneWritingDeclsAndTypes) {
+ assert(0 && "New type seen after serializing all the types to emit!");
+ return TypeIdx();
+ }
+
+ // We haven't seen this type before. Assign it a new ID and put it
+ // into the queue of types to emit.
+ Idx = TypeIdx(NextTypeID++);
+ DeclTypesToEmit.push(T);
+ }
+ return Idx;
+ });
+}
+
+TypeID ASTWriter::getTypeID(QualType T) const {
+ assert(Context);
+ return MakeTypeID(*Context, T, [&](QualType T) -> TypeIdx {
+ if (T.isNull())
+ return TypeIdx();
+ assert(!T.getLocalFastQualifiers());
+
+ TypeIdxMap::const_iterator I = TypeIdxs.find(T);
+ assert(I != TypeIdxs.end() && "Type not emitted!");
+ return I->second;
+ });
+}
+
+void ASTWriter::AddDeclRef(const Decl *D, RecordDataImpl &Record) {
+ Record.push_back(GetDeclRef(D));
+}
+
+DeclID ASTWriter::GetDeclRef(const Decl *D) {
+ assert(WritingAST && "Cannot request a declaration ID before AST writing");
+
+ if (!D) {
+ return 0;
+ }
+
+ // If D comes from an AST file, its declaration ID is already known and
+ // fixed.
+ if (D->isFromASTFile())
+ return D->getGlobalID();
+
+ assert(!(reinterpret_cast<uintptr_t>(D) & 0x01) && "Invalid decl pointer");
+ DeclID &ID = DeclIDs[D];
+ if (ID == 0) {
+ if (DoneWritingDeclsAndTypes) {
+ assert(0 && "New decl seen after serializing all the decls to emit!");
+ return 0;
+ }
+
+ // We haven't seen this declaration before. Give it a new ID and
+ // enqueue it in the list of declarations to emit.
+ ID = NextDeclID++;
+ DeclTypesToEmit.push(const_cast<Decl *>(D));
+ }
+
+ return ID;
+}
+
+DeclID ASTWriter::getDeclID(const Decl *D) {
+ if (!D)
+ return 0;
+
+ // If D comes from an AST file, its declaration ID is already known and
+ // fixed.
+ if (D->isFromASTFile())
+ return D->getGlobalID();
+
+ assert(DeclIDs.find(D) != DeclIDs.end() && "Declaration not emitted!");
+ return DeclIDs[D];
+}
+
+void ASTWriter::associateDeclWithFile(const Decl *D, DeclID ID) {
+ assert(ID);
+ assert(D);
+
+ SourceLocation Loc = D->getLocation();
+ if (Loc.isInvalid())
+ return;
+
+ // We only keep track of the file-level declarations of each file.
+ if (!D->getLexicalDeclContext()->isFileContext())
+ return;
+ // FIXME: ParmVarDecls that are part of a function type of a parameter of
+ // a function/objc method, should not have TU as lexical context.
+ if (isa<ParmVarDecl>(D))
+ return;
+
+ SourceManager &SM = Context->getSourceManager();
+ SourceLocation FileLoc = SM.getFileLoc(Loc);
+ assert(SM.isLocalSourceLocation(FileLoc));
+ FileID FID;
+ unsigned Offset;
+ std::tie(FID, Offset) = SM.getDecomposedLoc(FileLoc);
+ if (FID.isInvalid())
+ return;
+ assert(SM.getSLocEntry(FID).isFile());
+
+ DeclIDInFileInfo *&Info = FileDeclIDs[FID];
+ if (!Info)
+ Info = new DeclIDInFileInfo();
+
+ std::pair<unsigned, serialization::DeclID> LocDecl(Offset, ID);
+ LocDeclIDsTy &Decls = Info->DeclIDs;
+
+ if (Decls.empty() || Decls.back().first <= Offset) {
+ Decls.push_back(LocDecl);
+ return;
+ }
+
+ LocDeclIDsTy::iterator I =
+ std::upper_bound(Decls.begin(), Decls.end(), LocDecl, llvm::less_first());
+
+ Decls.insert(I, LocDecl);
+}
+
+void ASTWriter::AddDeclarationName(DeclarationName Name, RecordDataImpl &Record) {
+ // FIXME: Emit a stable enum for NameKind. 0 = Identifier etc.
+ Record.push_back(Name.getNameKind());
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier:
+ AddIdentifierRef(Name.getAsIdentifierInfo(), Record);
+ break;
+
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ AddSelectorRef(Name.getObjCSelector(), Record);
+ break;
+
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ AddTypeRef(Name.getCXXNameType(), Record);
+ break;
+
+ case DeclarationName::CXXOperatorName:
+ Record.push_back(Name.getCXXOverloadedOperator());
+ break;
+
+ case DeclarationName::CXXLiteralOperatorName:
+ AddIdentifierRef(Name.getCXXLiteralIdentifier(), Record);
+ break;
+
+ case DeclarationName::CXXUsingDirective:
+ // No extra data to emit
+ break;
+ }
+}
+
+unsigned ASTWriter::getAnonymousDeclarationNumber(const NamedDecl *D) {
+ assert(needsAnonymousDeclarationNumber(D) &&
+ "expected an anonymous declaration");
+
+ // Number the anonymous declarations within this context, if we've not
+ // already done so.
+ auto It = AnonymousDeclarationNumbers.find(D);
+ if (It == AnonymousDeclarationNumbers.end()) {
+ auto *DC = D->getLexicalDeclContext();
+ numberAnonymousDeclsWithin(DC, [&](const NamedDecl *ND, unsigned Number) {
+ AnonymousDeclarationNumbers[ND] = Number;
+ });
+
+ It = AnonymousDeclarationNumbers.find(D);
+ assert(It != AnonymousDeclarationNumbers.end() &&
+ "declaration not found within its lexical context");
+ }
+
+ return It->second;
+}
+
+void ASTWriter::AddDeclarationNameLoc(const DeclarationNameLoc &DNLoc,
+ DeclarationName Name, RecordDataImpl &Record) {
+ switch (Name.getNameKind()) {
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ AddTypeSourceInfo(DNLoc.NamedType.TInfo, Record);
+ break;
+
+ case DeclarationName::CXXOperatorName:
+ AddSourceLocation(
+ SourceLocation::getFromRawEncoding(DNLoc.CXXOperatorName.BeginOpNameLoc),
+ Record);
+ AddSourceLocation(
+ SourceLocation::getFromRawEncoding(DNLoc.CXXOperatorName.EndOpNameLoc),
+ Record);
+ break;
+
+ case DeclarationName::CXXLiteralOperatorName:
+ AddSourceLocation(
+ SourceLocation::getFromRawEncoding(DNLoc.CXXLiteralOperatorName.OpNameLoc),
+ Record);
+ break;
+
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXUsingDirective:
+ break;
+ }
+}
+
+void ASTWriter::AddDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
+ RecordDataImpl &Record) {
+ AddDeclarationName(NameInfo.getName(), Record);
+ AddSourceLocation(NameInfo.getLoc(), Record);
+ AddDeclarationNameLoc(NameInfo.getInfo(), NameInfo.getName(), Record);
+}
+
+void ASTWriter::AddQualifierInfo(const QualifierInfo &Info,
+ RecordDataImpl &Record) {
+ AddNestedNameSpecifierLoc(Info.QualifierLoc, Record);
+ Record.push_back(Info.NumTemplParamLists);
+ for (unsigned i=0, e=Info.NumTemplParamLists; i != e; ++i)
+ AddTemplateParameterList(Info.TemplParamLists[i], Record);
+}
+
+void ASTWriter::AddNestedNameSpecifier(NestedNameSpecifier *NNS,
+ RecordDataImpl &Record) {
+ // Nested name specifiers usually aren't too long. I think that 8 would
+ // typically accommodate the vast majority.
+ SmallVector<NestedNameSpecifier *, 8> NestedNames;
+
+ // Push each of the NNS's onto a stack for serialization in reverse order.
+ while (NNS) {
+ NestedNames.push_back(NNS);
+ NNS = NNS->getPrefix();
+ }
+
+ Record.push_back(NestedNames.size());
+ while(!NestedNames.empty()) {
+ NNS = NestedNames.pop_back_val();
+ NestedNameSpecifier::SpecifierKind Kind = NNS->getKind();
+ Record.push_back(Kind);
+ switch (Kind) {
+ case NestedNameSpecifier::Identifier:
+ AddIdentifierRef(NNS->getAsIdentifier(), Record);
+ break;
+
+ case NestedNameSpecifier::Namespace:
+ AddDeclRef(NNS->getAsNamespace(), Record);
+ break;
+
+ case NestedNameSpecifier::NamespaceAlias:
+ AddDeclRef(NNS->getAsNamespaceAlias(), Record);
+ break;
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ AddTypeRef(QualType(NNS->getAsType(), 0), Record);
+ Record.push_back(Kind == NestedNameSpecifier::TypeSpecWithTemplate);
+ break;
+
+ case NestedNameSpecifier::Global:
+ // Don't need to write an associated value.
+ break;
+
+ case NestedNameSpecifier::Super:
+ AddDeclRef(NNS->getAsRecordDecl(), Record);
+ break;
+ }
+ }
+}
+
+void ASTWriter::AddNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
+ RecordDataImpl &Record) {
+ // Nested name specifiers usually aren't too long. I think that 8 would
+ // typically accommodate the vast majority.
+ SmallVector<NestedNameSpecifierLoc , 8> NestedNames;
+
+ // Push each of the nested-name-specifiers's onto a stack for
+ // serialization in reverse order.
+ while (NNS) {
+ NestedNames.push_back(NNS);
+ NNS = NNS.getPrefix();
+ }
+
+ Record.push_back(NestedNames.size());
+ while(!NestedNames.empty()) {
+ NNS = NestedNames.pop_back_val();
+ NestedNameSpecifier::SpecifierKind Kind
+ = NNS.getNestedNameSpecifier()->getKind();
+ Record.push_back(Kind);
+ switch (Kind) {
+ case NestedNameSpecifier::Identifier:
+ AddIdentifierRef(NNS.getNestedNameSpecifier()->getAsIdentifier(), Record);
+ AddSourceRange(NNS.getLocalSourceRange(), Record);
+ break;
+
+ case NestedNameSpecifier::Namespace:
+ AddDeclRef(NNS.getNestedNameSpecifier()->getAsNamespace(), Record);
+ AddSourceRange(NNS.getLocalSourceRange(), Record);
+ break;
+
+ case NestedNameSpecifier::NamespaceAlias:
+ AddDeclRef(NNS.getNestedNameSpecifier()->getAsNamespaceAlias(), Record);
+ AddSourceRange(NNS.getLocalSourceRange(), Record);
+ break;
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ Record.push_back(Kind == NestedNameSpecifier::TypeSpecWithTemplate);
+ AddTypeLoc(NNS.getTypeLoc(), Record);
+ AddSourceLocation(NNS.getLocalSourceRange().getEnd(), Record);
+ break;
+
+ case NestedNameSpecifier::Global:
+ AddSourceLocation(NNS.getLocalSourceRange().getEnd(), Record);
+ break;
+
+ case NestedNameSpecifier::Super:
+ AddDeclRef(NNS.getNestedNameSpecifier()->getAsRecordDecl(), Record);
+ AddSourceRange(NNS.getLocalSourceRange(), Record);
+ break;
+ }
+ }
+}
+
+void ASTWriter::AddTemplateName(TemplateName Name, RecordDataImpl &Record) {
+ TemplateName::NameKind Kind = Name.getKind();
+ Record.push_back(Kind);
+ switch (Kind) {
+ case TemplateName::Template:
+ AddDeclRef(Name.getAsTemplateDecl(), Record);
+ break;
+
+ case TemplateName::OverloadedTemplate: {
+ OverloadedTemplateStorage *OvT = Name.getAsOverloadedTemplate();
+ Record.push_back(OvT->size());
+ for (const auto &I : *OvT)
+ AddDeclRef(I, Record);
+ break;
+ }
+
+ case TemplateName::QualifiedTemplate: {
+ QualifiedTemplateName *QualT = Name.getAsQualifiedTemplateName();
+ AddNestedNameSpecifier(QualT->getQualifier(), Record);
+ Record.push_back(QualT->hasTemplateKeyword());
+ AddDeclRef(QualT->getTemplateDecl(), Record);
+ break;
+ }
+
+ case TemplateName::DependentTemplate: {
+ DependentTemplateName *DepT = Name.getAsDependentTemplateName();
+ AddNestedNameSpecifier(DepT->getQualifier(), Record);
+ Record.push_back(DepT->isIdentifier());
+ if (DepT->isIdentifier())
+ AddIdentifierRef(DepT->getIdentifier(), Record);
+ else
+ Record.push_back(DepT->getOperator());
+ break;
+ }
+
+ case TemplateName::SubstTemplateTemplateParm: {
+ SubstTemplateTemplateParmStorage *subst
+ = Name.getAsSubstTemplateTemplateParm();
+ AddDeclRef(subst->getParameter(), Record);
+ AddTemplateName(subst->getReplacement(), Record);
+ break;
+ }
+
+ case TemplateName::SubstTemplateTemplateParmPack: {
+ SubstTemplateTemplateParmPackStorage *SubstPack
+ = Name.getAsSubstTemplateTemplateParmPack();
+ AddDeclRef(SubstPack->getParameterPack(), Record);
+ AddTemplateArgument(SubstPack->getArgumentPack(), Record);
+ break;
+ }
+ }
+}
+
+void ASTWriter::AddTemplateArgument(const TemplateArgument &Arg,
+ RecordDataImpl &Record) {
+ Record.push_back(Arg.getKind());
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ break;
+ case TemplateArgument::Type:
+ AddTypeRef(Arg.getAsType(), Record);
+ break;
+ case TemplateArgument::Declaration:
+ AddDeclRef(Arg.getAsDecl(), Record);
+ AddTypeRef(Arg.getParamTypeForDecl(), Record);
+ break;
+ case TemplateArgument::NullPtr:
+ AddTypeRef(Arg.getNullPtrType(), Record);
+ break;
+ case TemplateArgument::Integral:
+ AddAPSInt(Arg.getAsIntegral(), Record);
+ AddTypeRef(Arg.getIntegralType(), Record);
+ break;
+ case TemplateArgument::Template:
+ AddTemplateName(Arg.getAsTemplateOrTemplatePattern(), Record);
+ break;
+ case TemplateArgument::TemplateExpansion:
+ AddTemplateName(Arg.getAsTemplateOrTemplatePattern(), Record);
+ if (Optional<unsigned> NumExpansions = Arg.getNumTemplateExpansions())
+ Record.push_back(*NumExpansions + 1);
+ else
+ Record.push_back(0);
+ break;
+ case TemplateArgument::Expression:
+ AddStmt(Arg.getAsExpr());
+ break;
+ case TemplateArgument::Pack:
+ Record.push_back(Arg.pack_size());
+ for (const auto &P : Arg.pack_elements())
+ AddTemplateArgument(P, Record);
+ break;
+ }
+}
+
+void
+ASTWriter::AddTemplateParameterList(const TemplateParameterList *TemplateParams,
+ RecordDataImpl &Record) {
+ assert(TemplateParams && "No TemplateParams!");
+ AddSourceLocation(TemplateParams->getTemplateLoc(), Record);
+ AddSourceLocation(TemplateParams->getLAngleLoc(), Record);
+ AddSourceLocation(TemplateParams->getRAngleLoc(), Record);
+ Record.push_back(TemplateParams->size());
+ for (const auto &P : *TemplateParams)
+ AddDeclRef(P, Record);
+}
+
+/// \brief Emit a template argument list.
+void
+ASTWriter::AddTemplateArgumentList(const TemplateArgumentList *TemplateArgs,
+ RecordDataImpl &Record) {
+ assert(TemplateArgs && "No TemplateArgs!");
+ Record.push_back(TemplateArgs->size());
+ for (int i=0, e = TemplateArgs->size(); i != e; ++i)
+ AddTemplateArgument(TemplateArgs->get(i), Record);
+}
+
+void
+ASTWriter::AddASTTemplateArgumentListInfo
+(const ASTTemplateArgumentListInfo *ASTTemplArgList, RecordDataImpl &Record) {
+ assert(ASTTemplArgList && "No ASTTemplArgList!");
+ AddSourceLocation(ASTTemplArgList->LAngleLoc, Record);
+ AddSourceLocation(ASTTemplArgList->RAngleLoc, Record);
+ Record.push_back(ASTTemplArgList->NumTemplateArgs);
+ const TemplateArgumentLoc *TemplArgs = ASTTemplArgList->getTemplateArgs();
+ for (int i=0, e = ASTTemplArgList->NumTemplateArgs; i != e; ++i)
+ AddTemplateArgumentLoc(TemplArgs[i], Record);
+}
+
+void
+ASTWriter::AddUnresolvedSet(const ASTUnresolvedSet &Set, RecordDataImpl &Record) {
+ Record.push_back(Set.size());
+ for (ASTUnresolvedSet::const_iterator
+ I = Set.begin(), E = Set.end(); I != E; ++I) {
+ AddDeclRef(I.getDecl(), Record);
+ Record.push_back(I.getAccess());
+ }
+}
+
+void ASTWriter::AddCXXBaseSpecifier(const CXXBaseSpecifier &Base,
+ RecordDataImpl &Record) {
+ Record.push_back(Base.isVirtual());
+ Record.push_back(Base.isBaseOfClass());
+ Record.push_back(Base.getAccessSpecifierAsWritten());
+ Record.push_back(Base.getInheritConstructors());
+ AddTypeSourceInfo(Base.getTypeSourceInfo(), Record);
+ AddSourceRange(Base.getSourceRange(), Record);
+ AddSourceLocation(Base.isPackExpansion()? Base.getEllipsisLoc()
+ : SourceLocation(),
+ Record);
+}
+
+void ASTWriter::FlushCXXBaseSpecifiers() {
+ RecordData Record;
+ unsigned N = CXXBaseSpecifiersToWrite.size();
+ for (unsigned I = 0; I != N; ++I) {
+ Record.clear();
+
+ // Record the offset of this base-specifier set.
+ unsigned Index = CXXBaseSpecifiersToWrite[I].ID - 1;
+ if (Index == CXXBaseSpecifiersOffsets.size())
+ CXXBaseSpecifiersOffsets.push_back(Stream.GetCurrentBitNo());
+ else {
+ if (Index > CXXBaseSpecifiersOffsets.size())
+ CXXBaseSpecifiersOffsets.resize(Index + 1);
+ CXXBaseSpecifiersOffsets[Index] = Stream.GetCurrentBitNo();
+ }
+
+ const CXXBaseSpecifier *B = CXXBaseSpecifiersToWrite[I].Bases,
+ *BEnd = CXXBaseSpecifiersToWrite[I].BasesEnd;
+ Record.push_back(BEnd - B);
+ for (; B != BEnd; ++B)
+ AddCXXBaseSpecifier(*B, Record);
+ Stream.EmitRecord(serialization::DECL_CXX_BASE_SPECIFIERS, Record);
+
+ // Flush any expressions that were written as part of the base specifiers.
+ FlushStmts();
+ }
+
+ assert(N == CXXBaseSpecifiersToWrite.size() &&
+ "added more base specifiers while writing base specifiers");
+ CXXBaseSpecifiersToWrite.clear();
+}
+
+void ASTWriter::AddCXXCtorInitializers(
+ const CXXCtorInitializer * const *CtorInitializers,
+ unsigned NumCtorInitializers,
+ RecordDataImpl &Record) {
+ Record.push_back(NumCtorInitializers);
+ for (unsigned i=0; i != NumCtorInitializers; ++i) {
+ const CXXCtorInitializer *Init = CtorInitializers[i];
+
+ if (Init->isBaseInitializer()) {
+ Record.push_back(CTOR_INITIALIZER_BASE);
+ AddTypeSourceInfo(Init->getTypeSourceInfo(), Record);
+ Record.push_back(Init->isBaseVirtual());
+ } else if (Init->isDelegatingInitializer()) {
+ Record.push_back(CTOR_INITIALIZER_DELEGATING);
+ AddTypeSourceInfo(Init->getTypeSourceInfo(), Record);
+ } else if (Init->isMemberInitializer()){
+ Record.push_back(CTOR_INITIALIZER_MEMBER);
+ AddDeclRef(Init->getMember(), Record);
+ } else {
+ Record.push_back(CTOR_INITIALIZER_INDIRECT_MEMBER);
+ AddDeclRef(Init->getIndirectMember(), Record);
+ }
+
+ AddSourceLocation(Init->getMemberLocation(), Record);
+ AddStmt(Init->getInit());
+ AddSourceLocation(Init->getLParenLoc(), Record);
+ AddSourceLocation(Init->getRParenLoc(), Record);
+ Record.push_back(Init->isWritten());
+ if (Init->isWritten()) {
+ Record.push_back(Init->getSourceOrder());
+ } else {
+ Record.push_back(Init->getNumArrayIndices());
+ for (unsigned i=0, e=Init->getNumArrayIndices(); i != e; ++i)
+ AddDeclRef(Init->getArrayIndex(i), Record);
+ }
+ }
+}
+
+void ASTWriter::FlushCXXCtorInitializers() {
+ RecordData Record;
+
+ unsigned N = CXXCtorInitializersToWrite.size();
+ (void)N; // Silence unused warning in non-assert builds.
+ for (auto &Init : CXXCtorInitializersToWrite) {
+ Record.clear();
+
+ // Record the offset of this mem-initializer list.
+ unsigned Index = Init.ID - 1;
+ if (Index == CXXCtorInitializersOffsets.size())
+ CXXCtorInitializersOffsets.push_back(Stream.GetCurrentBitNo());
+ else {
+ if (Index > CXXCtorInitializersOffsets.size())
+ CXXCtorInitializersOffsets.resize(Index + 1);
+ CXXCtorInitializersOffsets[Index] = Stream.GetCurrentBitNo();
+ }
+
+ AddCXXCtorInitializers(Init.Inits.data(), Init.Inits.size(), Record);
+ Stream.EmitRecord(serialization::DECL_CXX_CTOR_INITIALIZERS, Record);
+
+ // Flush any expressions that were written as part of the initializers.
+ FlushStmts();
+ }
+
+ assert(N == CXXCtorInitializersToWrite.size() &&
+ "added more ctor initializers while writing ctor initializers");
+ CXXCtorInitializersToWrite.clear();
+}
+
+void ASTWriter::AddCXXDefinitionData(const CXXRecordDecl *D, RecordDataImpl &Record) {
+ auto &Data = D->data();
+ Record.push_back(Data.IsLambda);
+ Record.push_back(Data.UserDeclaredConstructor);
+ Record.push_back(Data.UserDeclaredSpecialMembers);
+ Record.push_back(Data.Aggregate);
+ Record.push_back(Data.PlainOldData);
+ Record.push_back(Data.Empty);
+ Record.push_back(Data.Polymorphic);
+ Record.push_back(Data.Abstract);
+ Record.push_back(Data.IsStandardLayout);
+ Record.push_back(Data.HasNoNonEmptyBases);
+ Record.push_back(Data.HasPrivateFields);
+ Record.push_back(Data.HasProtectedFields);
+ Record.push_back(Data.HasPublicFields);
+ Record.push_back(Data.HasMutableFields);
+ Record.push_back(Data.HasVariantMembers);
+ Record.push_back(Data.HasOnlyCMembers);
+ Record.push_back(Data.HasInClassInitializer);
+ Record.push_back(Data.HasUninitializedReferenceMember);
+ Record.push_back(Data.NeedOverloadResolutionForMoveConstructor);
+ Record.push_back(Data.NeedOverloadResolutionForMoveAssignment);
+ Record.push_back(Data.NeedOverloadResolutionForDestructor);
+ Record.push_back(Data.DefaultedMoveConstructorIsDeleted);
+ Record.push_back(Data.DefaultedMoveAssignmentIsDeleted);
+ Record.push_back(Data.DefaultedDestructorIsDeleted);
+ Record.push_back(Data.HasTrivialSpecialMembers);
+ Record.push_back(Data.DeclaredNonTrivialSpecialMembers);
+ Record.push_back(Data.HasIrrelevantDestructor);
+ Record.push_back(Data.HasConstexprNonCopyMoveConstructor);
+ Record.push_back(Data.DefaultedDefaultConstructorIsConstexpr);
+ Record.push_back(Data.HasConstexprDefaultConstructor);
+ Record.push_back(Data.HasNonLiteralTypeFieldsOrBases);
+ Record.push_back(Data.ComputedVisibleConversions);
+ Record.push_back(Data.UserProvidedDefaultConstructor);
+ Record.push_back(Data.DeclaredSpecialMembers);
+ Record.push_back(Data.ImplicitCopyConstructorHasConstParam);
+ Record.push_back(Data.ImplicitCopyAssignmentHasConstParam);
+ Record.push_back(Data.HasDeclaredCopyConstructorWithConstParam);
+ Record.push_back(Data.HasDeclaredCopyAssignmentWithConstParam);
+ // IsLambda bit is already saved.
+
+ Record.push_back(Data.NumBases);
+ if (Data.NumBases > 0)
+ AddCXXBaseSpecifiersRef(Data.getBases(), Data.getBases() + Data.NumBases,
+ Record);
+
+ // FIXME: Make VBases lazily computed when needed to avoid storing them.
+ Record.push_back(Data.NumVBases);
+ if (Data.NumVBases > 0)
+ AddCXXBaseSpecifiersRef(Data.getVBases(), Data.getVBases() + Data.NumVBases,
+ Record);
+
+ AddUnresolvedSet(Data.Conversions.get(*Context), Record);
+ AddUnresolvedSet(Data.VisibleConversions.get(*Context), Record);
+ // Data.Definition is the owning decl, no need to write it.
+ AddDeclRef(D->getFirstFriend(), Record);
+
+ // Add lambda-specific data.
+ if (Data.IsLambda) {
+ auto &Lambda = D->getLambdaData();
+ Record.push_back(Lambda.Dependent);
+ Record.push_back(Lambda.IsGenericLambda);
+ Record.push_back(Lambda.CaptureDefault);
+ Record.push_back(Lambda.NumCaptures);
+ Record.push_back(Lambda.NumExplicitCaptures);
+ Record.push_back(Lambda.ManglingNumber);
+ AddDeclRef(Lambda.ContextDecl, Record);
+ AddTypeSourceInfo(Lambda.MethodTyInfo, Record);
+ for (unsigned I = 0, N = Lambda.NumCaptures; I != N; ++I) {
+ const LambdaCapture &Capture = Lambda.Captures[I];
+ AddSourceLocation(Capture.getLocation(), Record);
+ Record.push_back(Capture.isImplicit());
+ Record.push_back(Capture.getCaptureKind());
+ switch (Capture.getCaptureKind()) {
+ case LCK_This:
+ case LCK_VLAType:
+ break;
+ case LCK_ByCopy:
+ case LCK_ByRef:
+ VarDecl *Var =
+ Capture.capturesVariable() ? Capture.getCapturedVar() : nullptr;
+ AddDeclRef(Var, Record);
+ AddSourceLocation(Capture.isPackExpansion() ? Capture.getEllipsisLoc()
+ : SourceLocation(),
+ Record);
+ break;
+ }
+ }
+ }
+}
+
+void ASTWriter::ReaderInitialized(ASTReader *Reader) {
+ assert(Reader && "Cannot remove chain");
+ assert((!Chain || Chain == Reader) && "Cannot replace chain");
+ assert(FirstDeclID == NextDeclID &&
+ FirstTypeID == NextTypeID &&
+ FirstIdentID == NextIdentID &&
+ FirstMacroID == NextMacroID &&
+ FirstSubmoduleID == NextSubmoduleID &&
+ FirstSelectorID == NextSelectorID &&
+ "Setting chain after writing has started.");
+
+ Chain = Reader;
+
+ // Note, this will get called multiple times, once one the reader starts up
+ // and again each time it's done reading a PCH or module.
+ FirstDeclID = NUM_PREDEF_DECL_IDS + Chain->getTotalNumDecls();
+ FirstTypeID = NUM_PREDEF_TYPE_IDS + Chain->getTotalNumTypes();
+ FirstIdentID = NUM_PREDEF_IDENT_IDS + Chain->getTotalNumIdentifiers();
+ FirstMacroID = NUM_PREDEF_MACRO_IDS + Chain->getTotalNumMacros();
+ FirstSubmoduleID = NUM_PREDEF_SUBMODULE_IDS + Chain->getTotalNumSubmodules();
+ FirstSelectorID = NUM_PREDEF_SELECTOR_IDS + Chain->getTotalNumSelectors();
+ NextDeclID = FirstDeclID;
+ NextTypeID = FirstTypeID;
+ NextIdentID = FirstIdentID;
+ NextMacroID = FirstMacroID;
+ NextSelectorID = FirstSelectorID;
+ NextSubmoduleID = FirstSubmoduleID;
+}
+
+void ASTWriter::IdentifierRead(IdentID ID, IdentifierInfo *II) {
+ // Always keep the highest ID. See \p TypeRead() for more information.
+ IdentID &StoredID = IdentifierIDs[II];
+ if (ID > StoredID)
+ StoredID = ID;
+}
+
+void ASTWriter::MacroRead(serialization::MacroID ID, MacroInfo *MI) {
+ // Always keep the highest ID. See \p TypeRead() for more information.
+ MacroID &StoredID = MacroIDs[MI];
+ if (ID > StoredID)
+ StoredID = ID;
+}
+
+void ASTWriter::TypeRead(TypeIdx Idx, QualType T) {
+ // Always take the highest-numbered type index. This copes with an interesting
+ // case for chained AST writing where we schedule writing the type and then,
+ // later, deserialize the type from another AST. In this case, we want to
+ // keep the higher-numbered entry so that we can properly write it out to
+ // the AST file.
+ TypeIdx &StoredIdx = TypeIdxs[T];
+ if (Idx.getIndex() >= StoredIdx.getIndex())
+ StoredIdx = Idx;
+}
+
+void ASTWriter::SelectorRead(SelectorID ID, Selector S) {
+ // Always keep the highest ID. See \p TypeRead() for more information.
+ SelectorID &StoredID = SelectorIDs[S];
+ if (ID > StoredID)
+ StoredID = ID;
+}
+
+void ASTWriter::MacroDefinitionRead(serialization::PreprocessedEntityID ID,
+ MacroDefinitionRecord *MD) {
+ assert(MacroDefinitions.find(MD) == MacroDefinitions.end());
+ MacroDefinitions[MD] = ID;
+}
+
+void ASTWriter::ModuleRead(serialization::SubmoduleID ID, Module *Mod) {
+ assert(SubmoduleIDs.find(Mod) == SubmoduleIDs.end());
+ SubmoduleIDs[Mod] = ID;
+}
+
+void ASTWriter::CompletedTagDefinition(const TagDecl *D) {
+ assert(D->isCompleteDefinition());
+ assert(!WritingAST && "Already writing the AST!");
+ if (auto *RD = dyn_cast<CXXRecordDecl>(D)) {
+ // We are interested when a PCH decl is modified.
+ if (RD->isFromASTFile()) {
+ // A forward reference was mutated into a definition. Rewrite it.
+ // FIXME: This happens during template instantiation, should we
+ // have created a new definition decl instead ?
+ assert(isTemplateInstantiation(RD->getTemplateSpecializationKind()) &&
+ "completed a tag from another module but not by instantiation?");
+ DeclUpdates[RD].push_back(
+ DeclUpdate(UPD_CXX_INSTANTIATED_CLASS_DEFINITION));
+ }
+ }
+}
+
+static bool isImportedDeclContext(ASTReader *Chain, const Decl *D) {
+ if (D->isFromASTFile())
+ return true;
+
+ // If we've not loaded any modules, this can't be imported.
+ if (!Chain || !Chain->getModuleManager().size())
+ return false;
+
+ // The predefined __va_list_tag struct is imported if we imported any decls.
+ // FIXME: This is a gross hack.
+ return D == D->getASTContext().getVaListTagDecl();
+}
+
+void ASTWriter::AddedVisibleDecl(const DeclContext *DC, const Decl *D) {
+ // TU and namespaces are handled elsewhere.
+ if (isa<TranslationUnitDecl>(DC) || isa<NamespaceDecl>(DC))
+ return;
+
+ // We're only interested in cases where a local declaration is added to an
+ // imported context.
+ if (D->isFromASTFile() || !isImportedDeclContext(Chain, cast<Decl>(DC)))
+ return;
+
+ assert(DC == DC->getPrimaryContext() && "added to non-primary context");
+ assert(!getDefinitiveDeclContext(DC) && "DeclContext not definitive!");
+ assert(!WritingAST && "Already writing the AST!");
+ if (UpdatedDeclContexts.insert(DC) && !cast<Decl>(DC)->isFromASTFile()) {
+ // We're adding a visible declaration to a predefined decl context. Ensure
+ // that we write out all of its lookup results so we don't get a nasty
+ // surprise when we try to emit its lookup table.
+ for (auto *Child : DC->decls())
+ UpdatingVisibleDecls.push_back(Child);
+ }
+ UpdatingVisibleDecls.push_back(D);
+}
+
+void ASTWriter::AddedCXXImplicitMember(const CXXRecordDecl *RD, const Decl *D) {
+ assert(D->isImplicit());
+
+ // We're only interested in cases where a local declaration is added to an
+ // imported context.
+ if (D->isFromASTFile() || !isImportedDeclContext(Chain, RD))
+ return;
+
+ if (!isa<CXXMethodDecl>(D))
+ return;
+
+ // A decl coming from PCH was modified.
+ assert(RD->isCompleteDefinition());
+ assert(!WritingAST && "Already writing the AST!");
+ DeclUpdates[RD].push_back(DeclUpdate(UPD_CXX_ADDED_IMPLICIT_MEMBER, D));
+}
+
+void ASTWriter::ResolvedExceptionSpec(const FunctionDecl *FD) {
+ assert(!DoneWritingDeclsAndTypes && "Already done writing updates!");
+ if (!Chain) return;
+ Chain->forEachImportedKeyDecl(FD, [&](const Decl *D) {
+ // If we don't already know the exception specification for this redecl
+ // chain, add an update record for it.
+ if (isUnresolvedExceptionSpec(cast<FunctionDecl>(D)
+ ->getType()
+ ->castAs<FunctionProtoType>()
+ ->getExceptionSpecType()))
+ DeclUpdates[D].push_back(UPD_CXX_RESOLVED_EXCEPTION_SPEC);
+ });
+}
+
+void ASTWriter::DeducedReturnType(const FunctionDecl *FD, QualType ReturnType) {
+ assert(!WritingAST && "Already writing the AST!");
+ if (!Chain) return;
+ Chain->forEachImportedKeyDecl(FD, [&](const Decl *D) {
+ DeclUpdates[D].push_back(
+ DeclUpdate(UPD_CXX_DEDUCED_RETURN_TYPE, ReturnType));
+ });
+}
+
+void ASTWriter::ResolvedOperatorDelete(const CXXDestructorDecl *DD,
+ const FunctionDecl *Delete) {
+ assert(!WritingAST && "Already writing the AST!");
+ assert(Delete && "Not given an operator delete");
+ if (!Chain) return;
+ Chain->forEachImportedKeyDecl(DD, [&](const Decl *D) {
+ DeclUpdates[D].push_back(DeclUpdate(UPD_CXX_RESOLVED_DTOR_DELETE, Delete));
+ });
+}
+
+void ASTWriter::CompletedImplicitDefinition(const FunctionDecl *D) {
+ assert(!WritingAST && "Already writing the AST!");
+ if (!D->isFromASTFile())
+ return; // Declaration not imported from PCH.
+
+ // Implicit function decl from a PCH was defined.
+ DeclUpdates[D].push_back(DeclUpdate(UPD_CXX_ADDED_FUNCTION_DEFINITION));
+}
+
+void ASTWriter::FunctionDefinitionInstantiated(const FunctionDecl *D) {
+ assert(!WritingAST && "Already writing the AST!");
+ if (!D->isFromASTFile())
+ return;
+
+ DeclUpdates[D].push_back(DeclUpdate(UPD_CXX_ADDED_FUNCTION_DEFINITION));
+}
+
+void ASTWriter::StaticDataMemberInstantiated(const VarDecl *D) {
+ assert(!WritingAST && "Already writing the AST!");
+ if (!D->isFromASTFile())
+ return;
+
+ // Since the actual instantiation is delayed, this really means that we need
+ // to update the instantiation location.
+ DeclUpdates[D].push_back(
+ DeclUpdate(UPD_CXX_INSTANTIATED_STATIC_DATA_MEMBER,
+ D->getMemberSpecializationInfo()->getPointOfInstantiation()));
+}
+
+void ASTWriter::AddedObjCCategoryToInterface(const ObjCCategoryDecl *CatD,
+ const ObjCInterfaceDecl *IFD) {
+ assert(!WritingAST && "Already writing the AST!");
+ if (!IFD->isFromASTFile())
+ return; // Declaration not imported from PCH.
+
+ assert(IFD->getDefinition() && "Category on a class without a definition?");
+ ObjCClassesWithCategories.insert(
+ const_cast<ObjCInterfaceDecl *>(IFD->getDefinition()));
+}
+
+void ASTWriter::DeclarationMarkedUsed(const Decl *D) {
+ assert(!WritingAST && "Already writing the AST!");
+ if (!D->isFromASTFile())
+ return;
+
+ DeclUpdates[D].push_back(DeclUpdate(UPD_DECL_MARKED_USED));
+}
+
+void ASTWriter::DeclarationMarkedOpenMPThreadPrivate(const Decl *D) {
+ assert(!WritingAST && "Already writing the AST!");
+ if (!D->isFromASTFile())
+ return;
+
+ DeclUpdates[D].push_back(DeclUpdate(UPD_DECL_MARKED_OPENMP_THREADPRIVATE));
+}
+
+void ASTWriter::RedefinedHiddenDefinition(const NamedDecl *D, Module *M) {
+ assert(!WritingAST && "Already writing the AST!");
+ assert(D->isHidden() && "expected a hidden declaration");
+ DeclUpdates[D].push_back(DeclUpdate(UPD_DECL_EXPORTED, M));
+}
+
+void ASTWriter::AddedAttributeToRecord(const Attr *Attr,
+ const RecordDecl *Record) {
+ assert(!WritingAST && "Already writing the AST!");
+ if (!Record->isFromASTFile())
+ return;
+ DeclUpdates[Record].push_back(DeclUpdate(UPD_ADDED_ATTR_TO_RECORD, Attr));
+}
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp
new file mode 100644
index 0000000..20ca6d6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp
@@ -0,0 +1,2187 @@
+//===--- ASTWriterDecl.cpp - Declaration Serialization --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements serialization for Declarations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Serialization/ASTWriter.h"
+#include "ASTCommon.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclContextInternals.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Serialization/ASTReader.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Bitcode/BitstreamWriter.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace clang;
+using namespace serialization;
+
+//===----------------------------------------------------------------------===//
+// Declaration serialization
+//===----------------------------------------------------------------------===//
+
+namespace clang {
+ class ASTDeclWriter : public DeclVisitor<ASTDeclWriter, void> {
+
+ ASTWriter &Writer;
+ ASTContext &Context;
+ typedef ASTWriter::RecordData RecordData;
+ RecordData &Record;
+
+ public:
+ serialization::DeclCode Code;
+ unsigned AbbrevToUse;
+
+ ASTDeclWriter(ASTWriter &Writer, ASTContext &Context, RecordData &Record)
+ : Writer(Writer), Context(Context), Record(Record) {
+ }
+
+ void Visit(Decl *D);
+
+ void VisitDecl(Decl *D);
+ void VisitTranslationUnitDecl(TranslationUnitDecl *D);
+ void VisitNamedDecl(NamedDecl *D);
+ void VisitLabelDecl(LabelDecl *LD);
+ void VisitNamespaceDecl(NamespaceDecl *D);
+ void VisitUsingDirectiveDecl(UsingDirectiveDecl *D);
+ void VisitNamespaceAliasDecl(NamespaceAliasDecl *D);
+ void VisitTypeDecl(TypeDecl *D);
+ void VisitTypedefNameDecl(TypedefNameDecl *D);
+ void VisitTypedefDecl(TypedefDecl *D);
+ void VisitTypeAliasDecl(TypeAliasDecl *D);
+ void VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D);
+ void VisitTagDecl(TagDecl *D);
+ void VisitEnumDecl(EnumDecl *D);
+ void VisitRecordDecl(RecordDecl *D);
+ void VisitCXXRecordDecl(CXXRecordDecl *D);
+ void VisitClassTemplateSpecializationDecl(
+ ClassTemplateSpecializationDecl *D);
+ void VisitClassTemplatePartialSpecializationDecl(
+ ClassTemplatePartialSpecializationDecl *D);
+ void VisitVarTemplateSpecializationDecl(VarTemplateSpecializationDecl *D);
+ void VisitVarTemplatePartialSpecializationDecl(
+ VarTemplatePartialSpecializationDecl *D);
+ void VisitClassScopeFunctionSpecializationDecl(
+ ClassScopeFunctionSpecializationDecl *D);
+ void VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D);
+ void VisitValueDecl(ValueDecl *D);
+ void VisitEnumConstantDecl(EnumConstantDecl *D);
+ void VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D);
+ void VisitDeclaratorDecl(DeclaratorDecl *D);
+ void VisitFunctionDecl(FunctionDecl *D);
+ void VisitCXXMethodDecl(CXXMethodDecl *D);
+ void VisitCXXConstructorDecl(CXXConstructorDecl *D);
+ void VisitCXXDestructorDecl(CXXDestructorDecl *D);
+ void VisitCXXConversionDecl(CXXConversionDecl *D);
+ void VisitFieldDecl(FieldDecl *D);
+ void VisitMSPropertyDecl(MSPropertyDecl *D);
+ void VisitIndirectFieldDecl(IndirectFieldDecl *D);
+ void VisitVarDecl(VarDecl *D);
+ void VisitImplicitParamDecl(ImplicitParamDecl *D);
+ void VisitParmVarDecl(ParmVarDecl *D);
+ void VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D);
+ void VisitTemplateDecl(TemplateDecl *D);
+ void VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D);
+ void VisitClassTemplateDecl(ClassTemplateDecl *D);
+ void VisitVarTemplateDecl(VarTemplateDecl *D);
+ void VisitFunctionTemplateDecl(FunctionTemplateDecl *D);
+ void VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D);
+ void VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D);
+ void VisitUsingDecl(UsingDecl *D);
+ void VisitUsingShadowDecl(UsingShadowDecl *D);
+ void VisitLinkageSpecDecl(LinkageSpecDecl *D);
+ void VisitFileScopeAsmDecl(FileScopeAsmDecl *D);
+ void VisitImportDecl(ImportDecl *D);
+ void VisitAccessSpecDecl(AccessSpecDecl *D);
+ void VisitFriendDecl(FriendDecl *D);
+ void VisitFriendTemplateDecl(FriendTemplateDecl *D);
+ void VisitStaticAssertDecl(StaticAssertDecl *D);
+ void VisitBlockDecl(BlockDecl *D);
+ void VisitCapturedDecl(CapturedDecl *D);
+ void VisitEmptyDecl(EmptyDecl *D);
+
+ void VisitDeclContext(DeclContext *DC, uint64_t LexicalOffset,
+ uint64_t VisibleOffset);
+ template <typename T> void VisitRedeclarable(Redeclarable<T> *D);
+
+
+ // FIXME: Put in the same order is DeclNodes.td?
+ void VisitObjCMethodDecl(ObjCMethodDecl *D);
+ void VisitObjCTypeParamDecl(ObjCTypeParamDecl *D);
+ void VisitObjCContainerDecl(ObjCContainerDecl *D);
+ void VisitObjCInterfaceDecl(ObjCInterfaceDecl *D);
+ void VisitObjCIvarDecl(ObjCIvarDecl *D);
+ void VisitObjCProtocolDecl(ObjCProtocolDecl *D);
+ void VisitObjCAtDefsFieldDecl(ObjCAtDefsFieldDecl *D);
+ void VisitObjCCategoryDecl(ObjCCategoryDecl *D);
+ void VisitObjCImplDecl(ObjCImplDecl *D);
+ void VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D);
+ void VisitObjCImplementationDecl(ObjCImplementationDecl *D);
+ void VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *D);
+ void VisitObjCPropertyDecl(ObjCPropertyDecl *D);
+ void VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D);
+ void VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D);
+
+ /// Add an Objective-C type parameter list to the given record.
+ void AddObjCTypeParamList(ObjCTypeParamList *typeParams) {
+ // Empty type parameter list.
+ if (!typeParams) {
+ Record.push_back(0);
+ return;
+ }
+
+ Record.push_back(typeParams->size());
+ for (auto typeParam : *typeParams) {
+ Writer.AddDeclRef(typeParam, Record);
+ }
+ Writer.AddSourceLocation(typeParams->getLAngleLoc(), Record);
+ Writer.AddSourceLocation(typeParams->getRAngleLoc(), Record);
+ }
+
+ void AddFunctionDefinition(const FunctionDecl *FD) {
+ assert(FD->doesThisDeclarationHaveABody());
+ if (auto *CD = dyn_cast<CXXConstructorDecl>(FD)) {
+ Record.push_back(CD->NumCtorInitializers);
+ if (CD->NumCtorInitializers)
+ Writer.AddCXXCtorInitializersRef(
+ llvm::makeArrayRef(CD->init_begin(), CD->init_end()), Record);
+ }
+ Writer.AddStmt(FD->getBody());
+ }
+
+ /// Add to the record the first declaration from each module file that
+ /// provides a declaration of D. The intent is to provide a sufficient
+ /// set such that reloading this set will load all current redeclarations.
+ void AddFirstDeclFromEachModule(const Decl *D, bool IncludeLocal) {
+ llvm::MapVector<ModuleFile*, const Decl*> Firsts;
+ // FIXME: We can skip entries that we know are implied by others.
+ for (const Decl *R = D->getMostRecentDecl(); R; R = R->getPreviousDecl()) {
+ if (R->isFromASTFile())
+ Firsts[Writer.Chain->getOwningModuleFile(R)] = R;
+ else if (IncludeLocal)
+ Firsts[nullptr] = R;
+ }
+ for (const auto &F : Firsts)
+ Writer.AddDeclRef(F.second, Record);
+ }
+
+ /// Get the specialization decl from an entry in the specialization list.
+ template <typename EntryType>
+ typename RedeclarableTemplateDecl::SpecEntryTraits<EntryType>::DeclType *
+ getSpecializationDecl(EntryType &T) {
+ return RedeclarableTemplateDecl::SpecEntryTraits<EntryType>::getDecl(&T);
+ }
+
+ /// Get the list of partial specializations from a template's common ptr.
+ template<typename T>
+ decltype(T::PartialSpecializations) &getPartialSpecializations(T *Common) {
+ return Common->PartialSpecializations;
+ }
+ ArrayRef<Decl> getPartialSpecializations(FunctionTemplateDecl::Common *) {
+ return None;
+ }
+
+ template<typename Decl>
+ void AddTemplateSpecializations(Decl *D) {
+ auto *Common = D->getCommonPtr();
+
+ // If we have any lazy specializations, and the external AST source is
+ // our chained AST reader, we can just write out the DeclIDs. Otherwise,
+ // we need to resolve them to actual declarations.
+ if (Writer.Chain != Writer.Context->getExternalSource() &&
+ Common->LazySpecializations) {
+ D->LoadLazySpecializations();
+ assert(!Common->LazySpecializations);
+ }
+
+ auto &Specializations = Common->Specializations;
+ auto &&PartialSpecializations = getPartialSpecializations(Common);
+ ArrayRef<DeclID> LazySpecializations;
+ if (auto *LS = Common->LazySpecializations)
+ LazySpecializations = llvm::makeArrayRef(LS + 1, LS[0]);
+
+ // Add a slot to the record for the number of specializations.
+ unsigned I = Record.size();
+ Record.push_back(0);
+
+ for (auto &Entry : Specializations) {
+ auto *D = getSpecializationDecl(Entry);
+ assert(D->isCanonicalDecl() && "non-canonical decl in set");
+ AddFirstDeclFromEachModule(D, /*IncludeLocal*/true);
+ }
+ for (auto &Entry : PartialSpecializations) {
+ auto *D = getSpecializationDecl(Entry);
+ assert(D->isCanonicalDecl() && "non-canonical decl in set");
+ AddFirstDeclFromEachModule(D, /*IncludeLocal*/true);
+ }
+ Record.append(LazySpecializations.begin(), LazySpecializations.end());
+
+ // Update the size entry we added earlier.
+ Record[I] = Record.size() - I - 1;
+ }
+
+ /// Ensure that this template specialization is associated with the specified
+ /// template on reload.
+ void RegisterTemplateSpecialization(const Decl *Template,
+ const Decl *Specialization) {
+ Template = Template->getCanonicalDecl();
+
+ // If the canonical template is local, we'll write out this specialization
+ // when we emit it.
+ // FIXME: We can do the same thing if there is any local declaration of
+ // the template, to avoid emitting an update record.
+ if (!Template->isFromASTFile())
+ return;
+
+ // We only need to associate the first local declaration of the
+ // specialization. The other declarations will get pulled in by it.
+ if (Writer.getFirstLocalDecl(Specialization) != Specialization)
+ return;
+
+ Writer.DeclUpdates[Template].push_back(ASTWriter::DeclUpdate(
+ UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION, Specialization));
+ }
+ };
+}
+
+void ASTDeclWriter::Visit(Decl *D) {
+ DeclVisitor<ASTDeclWriter>::Visit(D);
+
+ // Source locations require array (variable-length) abbreviations. The
+ // abbreviation infrastructure requires that arrays are encoded last, so
+ // we handle it here in the case of those classes derived from DeclaratorDecl
+ if (DeclaratorDecl *DD = dyn_cast<DeclaratorDecl>(D)) {
+ Writer.AddTypeSourceInfo(DD->getTypeSourceInfo(), Record);
+ }
+
+ // Handle FunctionDecl's body here and write it after all other Stmts/Exprs
+ // have been written. We want it last because we will not read it back when
+ // retrieving it from the AST, we'll just lazily set the offset.
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ Record.push_back(FD->doesThisDeclarationHaveABody());
+ if (FD->doesThisDeclarationHaveABody())
+ AddFunctionDefinition(FD);
+ }
+}
+
+void ASTDeclWriter::VisitDecl(Decl *D) {
+ Writer.AddDeclRef(cast_or_null<Decl>(D->getDeclContext()), Record);
+ if (D->getDeclContext() != D->getLexicalDeclContext())
+ Writer.AddDeclRef(cast_or_null<Decl>(D->getLexicalDeclContext()), Record);
+ else
+ Record.push_back(0);
+ Record.push_back(D->isInvalidDecl());
+ Record.push_back(D->hasAttrs());
+ if (D->hasAttrs())
+ Writer.WriteAttributes(llvm::makeArrayRef(D->getAttrs().begin(),
+ D->getAttrs().size()), Record);
+ Record.push_back(D->isImplicit());
+ Record.push_back(D->isUsed(false));
+ Record.push_back(D->isReferenced());
+ Record.push_back(D->isTopLevelDeclInObjCContainer());
+ Record.push_back(D->getAccess());
+ Record.push_back(D->isModulePrivate());
+ Record.push_back(Writer.inferSubmoduleIDFromLocation(D->getLocation()));
+
+ // If this declaration injected a name into a context different from its
+ // lexical context, and that context is an imported namespace, we need to
+ // update its visible declarations to include this name.
+ //
+ // This happens when we instantiate a class with a friend declaration or a
+ // function with a local extern declaration, for instance.
+ //
+ // FIXME: Can we handle this in AddedVisibleDecl instead?
+ if (D->isOutOfLine()) {
+ auto *DC = D->getDeclContext();
+ while (auto *NS = dyn_cast<NamespaceDecl>(DC->getRedeclContext())) {
+ if (!NS->isFromASTFile())
+ break;
+ Writer.UpdatedDeclContexts.insert(NS->getPrimaryContext());
+ if (!NS->isInlineNamespace())
+ break;
+ DC = NS->getParent();
+ }
+ }
+}
+
+void ASTDeclWriter::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
+ llvm_unreachable("Translation units aren't directly serialized");
+}
+
+void ASTDeclWriter::VisitNamedDecl(NamedDecl *D) {
+ VisitDecl(D);
+ Writer.AddDeclarationName(D->getDeclName(), Record);
+ Record.push_back(needsAnonymousDeclarationNumber(D)
+ ? Writer.getAnonymousDeclarationNumber(D)
+ : 0);
+}
+
+void ASTDeclWriter::VisitTypeDecl(TypeDecl *D) {
+ VisitNamedDecl(D);
+ Writer.AddSourceLocation(D->getLocStart(), Record);
+ Writer.AddTypeRef(QualType(D->getTypeForDecl(), 0), Record);
+}
+
+void ASTDeclWriter::VisitTypedefNameDecl(TypedefNameDecl *D) {
+ VisitRedeclarable(D);
+ VisitTypeDecl(D);
+ Writer.AddTypeSourceInfo(D->getTypeSourceInfo(), Record);
+ Record.push_back(D->isModed());
+ if (D->isModed())
+ Writer.AddTypeRef(D->getUnderlyingType(), Record);
+}
+
+void ASTDeclWriter::VisitTypedefDecl(TypedefDecl *D) {
+ VisitTypedefNameDecl(D);
+ if (D->getDeclContext() == D->getLexicalDeclContext() &&
+ !D->hasAttrs() &&
+ !D->isImplicit() &&
+ D->getFirstDecl() == D->getMostRecentDecl() &&
+ !D->isInvalidDecl() &&
+ !D->isTopLevelDeclInObjCContainer() &&
+ !D->isModulePrivate() &&
+ !needsAnonymousDeclarationNumber(D) &&
+ D->getDeclName().getNameKind() == DeclarationName::Identifier)
+ AbbrevToUse = Writer.getDeclTypedefAbbrev();
+
+ Code = serialization::DECL_TYPEDEF;
+}
+
+void ASTDeclWriter::VisitTypeAliasDecl(TypeAliasDecl *D) {
+ VisitTypedefNameDecl(D);
+ Writer.AddDeclRef(D->getDescribedAliasTemplate(), Record);
+ Code = serialization::DECL_TYPEALIAS;
+}
+
+void ASTDeclWriter::VisitTagDecl(TagDecl *D) {
+ VisitRedeclarable(D);
+ VisitTypeDecl(D);
+ Record.push_back(D->getIdentifierNamespace());
+ Record.push_back((unsigned)D->getTagKind()); // FIXME: stable encoding
+ if (!isa<CXXRecordDecl>(D))
+ Record.push_back(D->isCompleteDefinition());
+ Record.push_back(D->isEmbeddedInDeclarator());
+ Record.push_back(D->isFreeStanding());
+ Record.push_back(D->isCompleteDefinitionRequired());
+ Writer.AddSourceLocation(D->getRBraceLoc(), Record);
+
+ if (D->hasExtInfo()) {
+ Record.push_back(1);
+ Writer.AddQualifierInfo(*D->getExtInfo(), Record);
+ } else if (auto *TD = D->getTypedefNameForAnonDecl()) {
+ Record.push_back(2);
+ Writer.AddDeclRef(TD, Record);
+ Writer.AddIdentifierRef(TD->getDeclName().getAsIdentifierInfo(), Record);
+ } else {
+ Record.push_back(0);
+ }
+}
+
+void ASTDeclWriter::VisitEnumDecl(EnumDecl *D) {
+ VisitTagDecl(D);
+ Writer.AddTypeSourceInfo(D->getIntegerTypeSourceInfo(), Record);
+ if (!D->getIntegerTypeSourceInfo())
+ Writer.AddTypeRef(D->getIntegerType(), Record);
+ Writer.AddTypeRef(D->getPromotionType(), Record);
+ Record.push_back(D->getNumPositiveBits());
+ Record.push_back(D->getNumNegativeBits());
+ Record.push_back(D->isScoped());
+ Record.push_back(D->isScopedUsingClassTag());
+ Record.push_back(D->isFixed());
+ if (MemberSpecializationInfo *MemberInfo = D->getMemberSpecializationInfo()) {
+ Writer.AddDeclRef(MemberInfo->getInstantiatedFrom(), Record);
+ Record.push_back(MemberInfo->getTemplateSpecializationKind());
+ Writer.AddSourceLocation(MemberInfo->getPointOfInstantiation(), Record);
+ } else {
+ Writer.AddDeclRef(nullptr, Record);
+ }
+
+ if (D->getDeclContext() == D->getLexicalDeclContext() &&
+ !D->hasAttrs() &&
+ !D->isImplicit() &&
+ !D->isUsed(false) &&
+ !D->hasExtInfo() &&
+ !D->getTypedefNameForAnonDecl() &&
+ D->getFirstDecl() == D->getMostRecentDecl() &&
+ !D->isInvalidDecl() &&
+ !D->isReferenced() &&
+ !D->isTopLevelDeclInObjCContainer() &&
+ D->getAccess() == AS_none &&
+ !D->isModulePrivate() &&
+ !CXXRecordDecl::classofKind(D->getKind()) &&
+ !D->getIntegerTypeSourceInfo() &&
+ !D->getMemberSpecializationInfo() &&
+ !needsAnonymousDeclarationNumber(D) &&
+ D->getDeclName().getNameKind() == DeclarationName::Identifier)
+ AbbrevToUse = Writer.getDeclEnumAbbrev();
+
+ Code = serialization::DECL_ENUM;
+}
+
+void ASTDeclWriter::VisitRecordDecl(RecordDecl *D) {
+ VisitTagDecl(D);
+ Record.push_back(D->hasFlexibleArrayMember());
+ Record.push_back(D->isAnonymousStructOrUnion());
+ Record.push_back(D->hasObjectMember());
+ Record.push_back(D->hasVolatileMember());
+
+ if (D->getDeclContext() == D->getLexicalDeclContext() &&
+ !D->hasAttrs() &&
+ !D->isImplicit() &&
+ !D->isUsed(false) &&
+ !D->hasExtInfo() &&
+ !D->getTypedefNameForAnonDecl() &&
+ D->getFirstDecl() == D->getMostRecentDecl() &&
+ !D->isInvalidDecl() &&
+ !D->isReferenced() &&
+ !D->isTopLevelDeclInObjCContainer() &&
+ D->getAccess() == AS_none &&
+ !D->isModulePrivate() &&
+ !CXXRecordDecl::classofKind(D->getKind()) &&
+ !needsAnonymousDeclarationNumber(D) &&
+ D->getDeclName().getNameKind() == DeclarationName::Identifier)
+ AbbrevToUse = Writer.getDeclRecordAbbrev();
+
+ Code = serialization::DECL_RECORD;
+}
+
+void ASTDeclWriter::VisitValueDecl(ValueDecl *D) {
+ VisitNamedDecl(D);
+ Writer.AddTypeRef(D->getType(), Record);
+}
+
+void ASTDeclWriter::VisitEnumConstantDecl(EnumConstantDecl *D) {
+ VisitValueDecl(D);
+ Record.push_back(D->getInitExpr()? 1 : 0);
+ if (D->getInitExpr())
+ Writer.AddStmt(D->getInitExpr());
+ Writer.AddAPSInt(D->getInitVal(), Record);
+
+ Code = serialization::DECL_ENUM_CONSTANT;
+}
+
+void ASTDeclWriter::VisitDeclaratorDecl(DeclaratorDecl *D) {
+ VisitValueDecl(D);
+ Writer.AddSourceLocation(D->getInnerLocStart(), Record);
+ Record.push_back(D->hasExtInfo());
+ if (D->hasExtInfo())
+ Writer.AddQualifierInfo(*D->getExtInfo(), Record);
+}
+
+void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
+ VisitRedeclarable(D);
+ VisitDeclaratorDecl(D);
+ Writer.AddDeclarationNameLoc(D->DNLoc, D->getDeclName(), Record);
+ Record.push_back(D->getIdentifierNamespace());
+
+ // FunctionDecl's body is handled last at ASTWriterDecl::Visit,
+ // after everything else is written.
+
+ Record.push_back((int)D->SClass); // FIXME: stable encoding
+ Record.push_back(D->IsInline);
+ Record.push_back(D->IsInlineSpecified);
+ Record.push_back(D->IsVirtualAsWritten);
+ Record.push_back(D->IsPure);
+ Record.push_back(D->HasInheritedPrototype);
+ Record.push_back(D->HasWrittenPrototype);
+ Record.push_back(D->IsDeleted);
+ Record.push_back(D->IsTrivial);
+ Record.push_back(D->IsDefaulted);
+ Record.push_back(D->IsExplicitlyDefaulted);
+ Record.push_back(D->HasImplicitReturnZero);
+ Record.push_back(D->IsConstexpr);
+ Record.push_back(D->HasSkippedBody);
+ Record.push_back(D->IsLateTemplateParsed);
+ Record.push_back(D->getLinkageInternal());
+ Writer.AddSourceLocation(D->getLocEnd(), Record);
+
+ Record.push_back(D->getTemplatedKind());
+ switch (D->getTemplatedKind()) {
+ case FunctionDecl::TK_NonTemplate:
+ break;
+ case FunctionDecl::TK_FunctionTemplate:
+ Writer.AddDeclRef(D->getDescribedFunctionTemplate(), Record);
+ break;
+ case FunctionDecl::TK_MemberSpecialization: {
+ MemberSpecializationInfo *MemberInfo = D->getMemberSpecializationInfo();
+ Writer.AddDeclRef(MemberInfo->getInstantiatedFrom(), Record);
+ Record.push_back(MemberInfo->getTemplateSpecializationKind());
+ Writer.AddSourceLocation(MemberInfo->getPointOfInstantiation(), Record);
+ break;
+ }
+ case FunctionDecl::TK_FunctionTemplateSpecialization: {
+ FunctionTemplateSpecializationInfo *
+ FTSInfo = D->getTemplateSpecializationInfo();
+
+ RegisterTemplateSpecialization(FTSInfo->getTemplate(), D);
+
+ Writer.AddDeclRef(FTSInfo->getTemplate(), Record);
+ Record.push_back(FTSInfo->getTemplateSpecializationKind());
+
+ // Template arguments.
+ Writer.AddTemplateArgumentList(FTSInfo->TemplateArguments, Record);
+
+ // Template args as written.
+ Record.push_back(FTSInfo->TemplateArgumentsAsWritten != nullptr);
+ if (FTSInfo->TemplateArgumentsAsWritten) {
+ Record.push_back(FTSInfo->TemplateArgumentsAsWritten->NumTemplateArgs);
+ for (int i=0, e = FTSInfo->TemplateArgumentsAsWritten->NumTemplateArgs;
+ i!=e; ++i)
+ Writer.AddTemplateArgumentLoc((*FTSInfo->TemplateArgumentsAsWritten)[i],
+ Record);
+ Writer.AddSourceLocation(FTSInfo->TemplateArgumentsAsWritten->LAngleLoc,
+ Record);
+ Writer.AddSourceLocation(FTSInfo->TemplateArgumentsAsWritten->RAngleLoc,
+ Record);
+ }
+
+ Writer.AddSourceLocation(FTSInfo->getPointOfInstantiation(), Record);
+
+ if (D->isCanonicalDecl()) {
+ // Write the template that contains the specializations set. We will
+ // add a FunctionTemplateSpecializationInfo to it when reading.
+ Writer.AddDeclRef(FTSInfo->getTemplate()->getCanonicalDecl(), Record);
+ }
+ break;
+ }
+ case FunctionDecl::TK_DependentFunctionTemplateSpecialization: {
+ DependentFunctionTemplateSpecializationInfo *
+ DFTSInfo = D->getDependentSpecializationInfo();
+
+ // Templates.
+ Record.push_back(DFTSInfo->getNumTemplates());
+ for (int i=0, e = DFTSInfo->getNumTemplates(); i != e; ++i)
+ Writer.AddDeclRef(DFTSInfo->getTemplate(i), Record);
+
+ // Templates args.
+ Record.push_back(DFTSInfo->getNumTemplateArgs());
+ for (int i=0, e = DFTSInfo->getNumTemplateArgs(); i != e; ++i)
+ Writer.AddTemplateArgumentLoc(DFTSInfo->getTemplateArg(i), Record);
+ Writer.AddSourceLocation(DFTSInfo->getLAngleLoc(), Record);
+ Writer.AddSourceLocation(DFTSInfo->getRAngleLoc(), Record);
+ break;
+ }
+ }
+
+ Record.push_back(D->param_size());
+ for (auto P : D->params())
+ Writer.AddDeclRef(P, Record);
+ Code = serialization::DECL_FUNCTION;
+}
+
+void ASTDeclWriter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
+ VisitNamedDecl(D);
+ // FIXME: convert to LazyStmtPtr?
+ // Unlike C/C++, method bodies will never be in header files.
+ bool HasBodyStuff = D->getBody() != nullptr ||
+ D->getSelfDecl() != nullptr || D->getCmdDecl() != nullptr;
+ Record.push_back(HasBodyStuff);
+ if (HasBodyStuff) {
+ Writer.AddStmt(D->getBody());
+ Writer.AddDeclRef(D->getSelfDecl(), Record);
+ Writer.AddDeclRef(D->getCmdDecl(), Record);
+ }
+ Record.push_back(D->isInstanceMethod());
+ Record.push_back(D->isVariadic());
+ Record.push_back(D->isPropertyAccessor());
+ Record.push_back(D->isDefined());
+ Record.push_back(D->IsOverriding);
+ Record.push_back(D->HasSkippedBody);
+
+ Record.push_back(D->IsRedeclaration);
+ Record.push_back(D->HasRedeclaration);
+ if (D->HasRedeclaration) {
+ assert(Context.getObjCMethodRedeclaration(D));
+ Writer.AddDeclRef(Context.getObjCMethodRedeclaration(D), Record);
+ }
+
+ // FIXME: stable encoding for @required/@optional
+ Record.push_back(D->getImplementationControl());
+ // FIXME: stable encoding for in/out/inout/bycopy/byref/oneway/nullability
+ Record.push_back(D->getObjCDeclQualifier());
+ Record.push_back(D->hasRelatedResultType());
+ Writer.AddTypeRef(D->getReturnType(), Record);
+ Writer.AddTypeSourceInfo(D->getReturnTypeSourceInfo(), Record);
+ Writer.AddSourceLocation(D->getLocEnd(), Record);
+ Record.push_back(D->param_size());
+ for (const auto *P : D->params())
+ Writer.AddDeclRef(P, Record);
+
+ Record.push_back(D->SelLocsKind);
+ unsigned NumStoredSelLocs = D->getNumStoredSelLocs();
+ SourceLocation *SelLocs = D->getStoredSelLocs();
+ Record.push_back(NumStoredSelLocs);
+ for (unsigned i = 0; i != NumStoredSelLocs; ++i)
+ Writer.AddSourceLocation(SelLocs[i], Record);
+
+ Code = serialization::DECL_OBJC_METHOD;
+}
+
+void ASTDeclWriter::VisitObjCTypeParamDecl(ObjCTypeParamDecl *D) {
+ VisitTypedefNameDecl(D);
+ Record.push_back(D->Variance);
+ Record.push_back(D->Index);
+ Writer.AddSourceLocation(D->VarianceLoc, Record);
+ Writer.AddSourceLocation(D->ColonLoc, Record);
+
+ Code = serialization::DECL_OBJC_TYPE_PARAM;
+}
+
+void ASTDeclWriter::VisitObjCContainerDecl(ObjCContainerDecl *D) {
+ VisitNamedDecl(D);
+ Writer.AddSourceLocation(D->getAtStartLoc(), Record);
+ Writer.AddSourceRange(D->getAtEndRange(), Record);
+ // Abstract class (no need to define a stable serialization::DECL code).
+}
+
+void ASTDeclWriter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
+ VisitRedeclarable(D);
+ VisitObjCContainerDecl(D);
+ Writer.AddTypeRef(QualType(D->getTypeForDecl(), 0), Record);
+ AddObjCTypeParamList(D->TypeParamList);
+
+ Record.push_back(D->isThisDeclarationADefinition());
+ if (D->isThisDeclarationADefinition()) {
+ // Write the DefinitionData
+ ObjCInterfaceDecl::DefinitionData &Data = D->data();
+
+ Writer.AddTypeSourceInfo(D->getSuperClassTInfo(), Record);
+ Writer.AddSourceLocation(D->getEndOfDefinitionLoc(), Record);
+ Record.push_back(Data.HasDesignatedInitializers);
+
+ // Write out the protocols that are directly referenced by the @interface.
+ Record.push_back(Data.ReferencedProtocols.size());
+ for (const auto *P : D->protocols())
+ Writer.AddDeclRef(P, Record);
+ for (const auto &PL : D->protocol_locs())
+ Writer.AddSourceLocation(PL, Record);
+
+ // Write out the protocols that are transitively referenced.
+ Record.push_back(Data.AllReferencedProtocols.size());
+ for (ObjCList<ObjCProtocolDecl>::iterator
+ P = Data.AllReferencedProtocols.begin(),
+ PEnd = Data.AllReferencedProtocols.end();
+ P != PEnd; ++P)
+ Writer.AddDeclRef(*P, Record);
+
+
+ if (ObjCCategoryDecl *Cat = D->getCategoryListRaw()) {
+ // Ensure that we write out the set of categories for this class.
+ Writer.ObjCClassesWithCategories.insert(D);
+
+ // Make sure that the categories get serialized.
+ for (; Cat; Cat = Cat->getNextClassCategoryRaw())
+ (void)Writer.GetDeclRef(Cat);
+ }
+ }
+
+ Code = serialization::DECL_OBJC_INTERFACE;
+}
+
+void ASTDeclWriter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
+ VisitFieldDecl(D);
+ // FIXME: stable encoding for @public/@private/@protected/@package
+ Record.push_back(D->getAccessControl());
+ Record.push_back(D->getSynthesize());
+
+ if (D->getDeclContext() == D->getLexicalDeclContext() &&
+ !D->hasAttrs() &&
+ !D->isImplicit() &&
+ !D->isUsed(false) &&
+ !D->isInvalidDecl() &&
+ !D->isReferenced() &&
+ !D->isModulePrivate() &&
+ !D->getBitWidth() &&
+ !D->hasExtInfo() &&
+ D->getDeclName())
+ AbbrevToUse = Writer.getDeclObjCIvarAbbrev();
+
+ Code = serialization::DECL_OBJC_IVAR;
+}
+
+void ASTDeclWriter::VisitObjCProtocolDecl(ObjCProtocolDecl *D) {
+ VisitRedeclarable(D);
+ VisitObjCContainerDecl(D);
+
+ Record.push_back(D->isThisDeclarationADefinition());
+ if (D->isThisDeclarationADefinition()) {
+ Record.push_back(D->protocol_size());
+ for (const auto *I : D->protocols())
+ Writer.AddDeclRef(I, Record);
+ for (const auto &PL : D->protocol_locs())
+ Writer.AddSourceLocation(PL, Record);
+ }
+
+ Code = serialization::DECL_OBJC_PROTOCOL;
+}
+
+void ASTDeclWriter::VisitObjCAtDefsFieldDecl(ObjCAtDefsFieldDecl *D) {
+ VisitFieldDecl(D);
+ Code = serialization::DECL_OBJC_AT_DEFS_FIELD;
+}
+
+void ASTDeclWriter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) {
+ VisitObjCContainerDecl(D);
+ Writer.AddSourceLocation(D->getCategoryNameLoc(), Record);
+ Writer.AddSourceLocation(D->getIvarLBraceLoc(), Record);
+ Writer.AddSourceLocation(D->getIvarRBraceLoc(), Record);
+ Writer.AddDeclRef(D->getClassInterface(), Record);
+ AddObjCTypeParamList(D->TypeParamList);
+ Record.push_back(D->protocol_size());
+ for (const auto *I : D->protocols())
+ Writer.AddDeclRef(I, Record);
+ for (const auto &PL : D->protocol_locs())
+ Writer.AddSourceLocation(PL, Record);
+ Code = serialization::DECL_OBJC_CATEGORY;
+}
+
+void ASTDeclWriter::VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *D) {
+ VisitNamedDecl(D);
+ Writer.AddDeclRef(D->getClassInterface(), Record);
+ Code = serialization::DECL_OBJC_COMPATIBLE_ALIAS;
+}
+
+void ASTDeclWriter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
+ VisitNamedDecl(D);
+ Writer.AddSourceLocation(D->getAtLoc(), Record);
+ Writer.AddSourceLocation(D->getLParenLoc(), Record);
+ Writer.AddTypeRef(D->getType(), Record);
+ Writer.AddTypeSourceInfo(D->getTypeSourceInfo(), Record);
+ // FIXME: stable encoding
+ Record.push_back((unsigned)D->getPropertyAttributes());
+ Record.push_back((unsigned)D->getPropertyAttributesAsWritten());
+ // FIXME: stable encoding
+ Record.push_back((unsigned)D->getPropertyImplementation());
+ Writer.AddDeclarationName(D->getGetterName(), Record);
+ Writer.AddDeclarationName(D->getSetterName(), Record);
+ Writer.AddDeclRef(D->getGetterMethodDecl(), Record);
+ Writer.AddDeclRef(D->getSetterMethodDecl(), Record);
+ Writer.AddDeclRef(D->getPropertyIvarDecl(), Record);
+ Code = serialization::DECL_OBJC_PROPERTY;
+}
+
+void ASTDeclWriter::VisitObjCImplDecl(ObjCImplDecl *D) {
+ VisitObjCContainerDecl(D);
+ Writer.AddDeclRef(D->getClassInterface(), Record);
+ // Abstract class (no need to define a stable serialization::DECL code).
+}
+
+void ASTDeclWriter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D) {
+ VisitObjCImplDecl(D);
+ Writer.AddIdentifierRef(D->getIdentifier(), Record);
+ Writer.AddSourceLocation(D->getCategoryNameLoc(), Record);
+ Code = serialization::DECL_OBJC_CATEGORY_IMPL;
+}
+
+void ASTDeclWriter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
+ VisitObjCImplDecl(D);
+ Writer.AddDeclRef(D->getSuperClass(), Record);
+ Writer.AddSourceLocation(D->getSuperClassLoc(), Record);
+ Writer.AddSourceLocation(D->getIvarLBraceLoc(), Record);
+ Writer.AddSourceLocation(D->getIvarRBraceLoc(), Record);
+ Record.push_back(D->hasNonZeroConstructors());
+ Record.push_back(D->hasDestructors());
+ Record.push_back(D->NumIvarInitializers);
+ if (D->NumIvarInitializers)
+ Writer.AddCXXCtorInitializersRef(
+ llvm::makeArrayRef(D->init_begin(), D->init_end()), Record);
+ Code = serialization::DECL_OBJC_IMPLEMENTATION;
+}
+
+void ASTDeclWriter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
+ VisitDecl(D);
+ Writer.AddSourceLocation(D->getLocStart(), Record);
+ Writer.AddDeclRef(D->getPropertyDecl(), Record);
+ Writer.AddDeclRef(D->getPropertyIvarDecl(), Record);
+ Writer.AddSourceLocation(D->getPropertyIvarDeclLoc(), Record);
+ Writer.AddStmt(D->getGetterCXXConstructor());
+ Writer.AddStmt(D->getSetterCXXAssignment());
+ Code = serialization::DECL_OBJC_PROPERTY_IMPL;
+}
+
+void ASTDeclWriter::VisitFieldDecl(FieldDecl *D) {
+ VisitDeclaratorDecl(D);
+ Record.push_back(D->isMutable());
+ if (D->InitStorage.getInt() == FieldDecl::ISK_BitWidthOrNothing &&
+ D->InitStorage.getPointer() == nullptr) {
+ Record.push_back(0);
+ } else if (D->InitStorage.getInt() == FieldDecl::ISK_CapturedVLAType) {
+ Record.push_back(D->InitStorage.getInt() + 1);
+ Writer.AddTypeRef(
+ QualType(static_cast<Type *>(D->InitStorage.getPointer()), 0),
+ Record);
+ } else {
+ Record.push_back(D->InitStorage.getInt() + 1);
+ Writer.AddStmt(static_cast<Expr *>(D->InitStorage.getPointer()));
+ }
+ if (!D->getDeclName())
+ Writer.AddDeclRef(Context.getInstantiatedFromUnnamedFieldDecl(D), Record);
+
+ if (D->getDeclContext() == D->getLexicalDeclContext() &&
+ !D->hasAttrs() &&
+ !D->isImplicit() &&
+ !D->isUsed(false) &&
+ !D->isInvalidDecl() &&
+ !D->isReferenced() &&
+ !D->isTopLevelDeclInObjCContainer() &&
+ !D->isModulePrivate() &&
+ !D->getBitWidth() &&
+ !D->hasInClassInitializer() &&
+ !D->hasExtInfo() &&
+ !ObjCIvarDecl::classofKind(D->getKind()) &&
+ !ObjCAtDefsFieldDecl::classofKind(D->getKind()) &&
+ D->getDeclName())
+ AbbrevToUse = Writer.getDeclFieldAbbrev();
+
+ Code = serialization::DECL_FIELD;
+}
+
+void ASTDeclWriter::VisitMSPropertyDecl(MSPropertyDecl *D) {
+ VisitDeclaratorDecl(D);
+ Writer.AddIdentifierRef(D->getGetterId(), Record);
+ Writer.AddIdentifierRef(D->getSetterId(), Record);
+ Code = serialization::DECL_MS_PROPERTY;
+}
+
+void ASTDeclWriter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
+ VisitValueDecl(D);
+ Record.push_back(D->getChainingSize());
+
+ for (const auto *P : D->chain())
+ Writer.AddDeclRef(P, Record);
+ Code = serialization::DECL_INDIRECTFIELD;
+}
+
+void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
+ VisitRedeclarable(D);
+ VisitDeclaratorDecl(D);
+ Record.push_back(D->getStorageClass());
+ Record.push_back(D->getTSCSpec());
+ Record.push_back(D->getInitStyle());
+ if (!isa<ParmVarDecl>(D)) {
+ Record.push_back(D->isExceptionVariable());
+ Record.push_back(D->isNRVOVariable());
+ Record.push_back(D->isCXXForRangeDecl());
+ Record.push_back(D->isARCPseudoStrong());
+ Record.push_back(D->isConstexpr());
+ Record.push_back(D->isInitCapture());
+ Record.push_back(D->isPreviousDeclInSameBlockScope());
+ }
+ Record.push_back(D->getLinkageInternal());
+
+ if (D->getInit()) {
+ Record.push_back(!D->isInitKnownICE() ? 1 : (D->isInitICE() ? 3 : 2));
+ Writer.AddStmt(D->getInit());
+ } else {
+ Record.push_back(0);
+ }
+
+ enum {
+ VarNotTemplate = 0, VarTemplate, StaticDataMemberSpecialization
+ };
+ if (VarTemplateDecl *TemplD = D->getDescribedVarTemplate()) {
+ Record.push_back(VarTemplate);
+ Writer.AddDeclRef(TemplD, Record);
+ } else if (MemberSpecializationInfo *SpecInfo
+ = D->getMemberSpecializationInfo()) {
+ Record.push_back(StaticDataMemberSpecialization);
+ Writer.AddDeclRef(SpecInfo->getInstantiatedFrom(), Record);
+ Record.push_back(SpecInfo->getTemplateSpecializationKind());
+ Writer.AddSourceLocation(SpecInfo->getPointOfInstantiation(), Record);
+ } else {
+ Record.push_back(VarNotTemplate);
+ }
+
+ if (D->getDeclContext() == D->getLexicalDeclContext() &&
+ !D->hasAttrs() &&
+ !D->isImplicit() &&
+ !D->isUsed(false) &&
+ !D->isInvalidDecl() &&
+ !D->isReferenced() &&
+ !D->isTopLevelDeclInObjCContainer() &&
+ D->getAccess() == AS_none &&
+ !D->isModulePrivate() &&
+ !needsAnonymousDeclarationNumber(D) &&
+ D->getDeclName().getNameKind() == DeclarationName::Identifier &&
+ !D->hasExtInfo() &&
+ D->getFirstDecl() == D->getMostRecentDecl() &&
+ D->getInitStyle() == VarDecl::CInit &&
+ D->getInit() == nullptr &&
+ !isa<ParmVarDecl>(D) &&
+ !isa<VarTemplateSpecializationDecl>(D) &&
+ !D->isConstexpr() &&
+ !D->isInitCapture() &&
+ !D->isPreviousDeclInSameBlockScope() &&
+ !D->getMemberSpecializationInfo())
+ AbbrevToUse = Writer.getDeclVarAbbrev();
+
+ Code = serialization::DECL_VAR;
+}
+
+void ASTDeclWriter::VisitImplicitParamDecl(ImplicitParamDecl *D) {
+ VisitVarDecl(D);
+ Code = serialization::DECL_IMPLICIT_PARAM;
+}
+
+void ASTDeclWriter::VisitParmVarDecl(ParmVarDecl *D) {
+ VisitVarDecl(D);
+ Record.push_back(D->isObjCMethodParameter());
+ Record.push_back(D->getFunctionScopeDepth());
+ Record.push_back(D->getFunctionScopeIndex());
+ Record.push_back(D->getObjCDeclQualifier()); // FIXME: stable encoding
+ Record.push_back(D->isKNRPromoted());
+ Record.push_back(D->hasInheritedDefaultArg());
+ Record.push_back(D->hasUninstantiatedDefaultArg());
+ if (D->hasUninstantiatedDefaultArg())
+ Writer.AddStmt(D->getUninstantiatedDefaultArg());
+ Code = serialization::DECL_PARM_VAR;
+
+ assert(!D->isARCPseudoStrong()); // can be true of ImplicitParamDecl
+
+ // If the assumptions about the DECL_PARM_VAR abbrev are true, use it. Here
+ // we dynamically check for the properties that we optimize for, but don't
+ // know are true of all PARM_VAR_DECLs.
+ if (D->getDeclContext() == D->getLexicalDeclContext() &&
+ !D->hasAttrs() &&
+ !D->hasExtInfo() &&
+ !D->isImplicit() &&
+ !D->isUsed(false) &&
+ !D->isInvalidDecl() &&
+ !D->isReferenced() &&
+ D->getAccess() == AS_none &&
+ !D->isModulePrivate() &&
+ D->getStorageClass() == 0 &&
+ D->getInitStyle() == VarDecl::CInit && // Can params have anything else?
+ D->getFunctionScopeDepth() == 0 &&
+ D->getObjCDeclQualifier() == 0 &&
+ !D->isKNRPromoted() &&
+ !D->hasInheritedDefaultArg() &&
+ D->getInit() == nullptr &&
+ !D->hasUninstantiatedDefaultArg()) // No default expr.
+ AbbrevToUse = Writer.getDeclParmVarAbbrev();
+
+ // Check things we know are true of *every* PARM_VAR_DECL, which is more than
+ // just us assuming it.
+ assert(!D->getTSCSpec() && "PARM_VAR_DECL can't use TLS");
+ assert(D->getAccess() == AS_none && "PARM_VAR_DECL can't be public/private");
+ assert(!D->isExceptionVariable() && "PARM_VAR_DECL can't be exception var");
+ assert(D->getPreviousDecl() == nullptr && "PARM_VAR_DECL can't be redecl");
+ assert(!D->isStaticDataMember() &&
+ "PARM_VAR_DECL can't be static data member");
+}
+
+void ASTDeclWriter::VisitFileScopeAsmDecl(FileScopeAsmDecl *D) {
+ VisitDecl(D);
+ Writer.AddStmt(D->getAsmString());
+ Writer.AddSourceLocation(D->getRParenLoc(), Record);
+ Code = serialization::DECL_FILE_SCOPE_ASM;
+}
+
+void ASTDeclWriter::VisitEmptyDecl(EmptyDecl *D) {
+ VisitDecl(D);
+ Code = serialization::DECL_EMPTY;
+}
+
+void ASTDeclWriter::VisitBlockDecl(BlockDecl *D) {
+ VisitDecl(D);
+ Writer.AddStmt(D->getBody());
+ Writer.AddTypeSourceInfo(D->getSignatureAsWritten(), Record);
+ Record.push_back(D->param_size());
+ for (FunctionDecl::param_iterator P = D->param_begin(), PEnd = D->param_end();
+ P != PEnd; ++P)
+ Writer.AddDeclRef(*P, Record);
+ Record.push_back(D->isVariadic());
+ Record.push_back(D->blockMissingReturnType());
+ Record.push_back(D->isConversionFromLambda());
+ Record.push_back(D->capturesCXXThis());
+ Record.push_back(D->getNumCaptures());
+ for (const auto &capture : D->captures()) {
+ Writer.AddDeclRef(capture.getVariable(), Record);
+
+ unsigned flags = 0;
+ if (capture.isByRef()) flags |= 1;
+ if (capture.isNested()) flags |= 2;
+ if (capture.hasCopyExpr()) flags |= 4;
+ Record.push_back(flags);
+
+ if (capture.hasCopyExpr()) Writer.AddStmt(capture.getCopyExpr());
+ }
+
+ Code = serialization::DECL_BLOCK;
+}
+
+void ASTDeclWriter::VisitCapturedDecl(CapturedDecl *CD) {
+ Record.push_back(CD->getNumParams());
+ VisitDecl(CD);
+ Record.push_back(CD->getContextParamPosition());
+ Record.push_back(CD->isNothrow() ? 1 : 0);
+ // Body is stored by VisitCapturedStmt.
+ for (unsigned I = 0; I < CD->getNumParams(); ++I)
+ Writer.AddDeclRef(CD->getParam(I), Record);
+ Code = serialization::DECL_CAPTURED;
+}
+
+void ASTDeclWriter::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
+ VisitDecl(D);
+ Record.push_back(D->getLanguage());
+ Writer.AddSourceLocation(D->getExternLoc(), Record);
+ Writer.AddSourceLocation(D->getRBraceLoc(), Record);
+ Code = serialization::DECL_LINKAGE_SPEC;
+}
+
+void ASTDeclWriter::VisitLabelDecl(LabelDecl *D) {
+ VisitNamedDecl(D);
+ Writer.AddSourceLocation(D->getLocStart(), Record);
+ Code = serialization::DECL_LABEL;
+}
+
+
+void ASTDeclWriter::VisitNamespaceDecl(NamespaceDecl *D) {
+ VisitRedeclarable(D);
+ VisitNamedDecl(D);
+ Record.push_back(D->isInline());
+ Writer.AddSourceLocation(D->getLocStart(), Record);
+ Writer.AddSourceLocation(D->getRBraceLoc(), Record);
+
+ if (D->isOriginalNamespace())
+ Writer.AddDeclRef(D->getAnonymousNamespace(), Record);
+ Code = serialization::DECL_NAMESPACE;
+
+ if (Writer.hasChain() && D->isAnonymousNamespace() &&
+ D == D->getMostRecentDecl()) {
+ // This is a most recent reopening of the anonymous namespace. If its parent
+ // is in a previous PCH (or is the TU), mark that parent for update, because
+ // the original namespace always points to the latest re-opening of its
+ // anonymous namespace.
+ Decl *Parent = cast<Decl>(
+ D->getParent()->getRedeclContext()->getPrimaryContext());
+ if (Parent->isFromASTFile() || isa<TranslationUnitDecl>(Parent)) {
+ Writer.DeclUpdates[Parent].push_back(
+ ASTWriter::DeclUpdate(UPD_CXX_ADDED_ANONYMOUS_NAMESPACE, D));
+ }
+ }
+}
+
+void ASTDeclWriter::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
+ VisitRedeclarable(D);
+ VisitNamedDecl(D);
+ Writer.AddSourceLocation(D->getNamespaceLoc(), Record);
+ Writer.AddSourceLocation(D->getTargetNameLoc(), Record);
+ Writer.AddNestedNameSpecifierLoc(D->getQualifierLoc(), Record);
+ Writer.AddDeclRef(D->getNamespace(), Record);
+ Code = serialization::DECL_NAMESPACE_ALIAS;
+}
+
+void ASTDeclWriter::VisitUsingDecl(UsingDecl *D) {
+ VisitNamedDecl(D);
+ Writer.AddSourceLocation(D->getUsingLoc(), Record);
+ Writer.AddNestedNameSpecifierLoc(D->getQualifierLoc(), Record);
+ Writer.AddDeclarationNameLoc(D->DNLoc, D->getDeclName(), Record);
+ Writer.AddDeclRef(D->FirstUsingShadow.getPointer(), Record);
+ Record.push_back(D->hasTypename());
+ Writer.AddDeclRef(Context.getInstantiatedFromUsingDecl(D), Record);
+ Code = serialization::DECL_USING;
+}
+
+void ASTDeclWriter::VisitUsingShadowDecl(UsingShadowDecl *D) {
+ VisitRedeclarable(D);
+ VisitNamedDecl(D);
+ Writer.AddDeclRef(D->getTargetDecl(), Record);
+ Writer.AddDeclRef(D->UsingOrNextShadow, Record);
+ Writer.AddDeclRef(Context.getInstantiatedFromUsingShadowDecl(D), Record);
+ Code = serialization::DECL_USING_SHADOW;
+}
+
+void ASTDeclWriter::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
+ VisitNamedDecl(D);
+ Writer.AddSourceLocation(D->getUsingLoc(), Record);
+ Writer.AddSourceLocation(D->getNamespaceKeyLocation(), Record);
+ Writer.AddNestedNameSpecifierLoc(D->getQualifierLoc(), Record);
+ Writer.AddDeclRef(D->getNominatedNamespace(), Record);
+ Writer.AddDeclRef(dyn_cast<Decl>(D->getCommonAncestor()), Record);
+ Code = serialization::DECL_USING_DIRECTIVE;
+}
+
+void ASTDeclWriter::VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D) {
+ VisitValueDecl(D);
+ Writer.AddSourceLocation(D->getUsingLoc(), Record);
+ Writer.AddNestedNameSpecifierLoc(D->getQualifierLoc(), Record);
+ Writer.AddDeclarationNameLoc(D->DNLoc, D->getDeclName(), Record);
+ Code = serialization::DECL_UNRESOLVED_USING_VALUE;
+}
+
+void ASTDeclWriter::VisitUnresolvedUsingTypenameDecl(
+ UnresolvedUsingTypenameDecl *D) {
+ VisitTypeDecl(D);
+ Writer.AddSourceLocation(D->getTypenameLoc(), Record);
+ Writer.AddNestedNameSpecifierLoc(D->getQualifierLoc(), Record);
+ Code = serialization::DECL_UNRESOLVED_USING_TYPENAME;
+}
+
+void ASTDeclWriter::VisitCXXRecordDecl(CXXRecordDecl *D) {
+ VisitRecordDecl(D);
+
+ enum {
+ CXXRecNotTemplate = 0, CXXRecTemplate, CXXRecMemberSpecialization
+ };
+ if (ClassTemplateDecl *TemplD = D->getDescribedClassTemplate()) {
+ Record.push_back(CXXRecTemplate);
+ Writer.AddDeclRef(TemplD, Record);
+ } else if (MemberSpecializationInfo *MSInfo
+ = D->getMemberSpecializationInfo()) {
+ Record.push_back(CXXRecMemberSpecialization);
+ Writer.AddDeclRef(MSInfo->getInstantiatedFrom(), Record);
+ Record.push_back(MSInfo->getTemplateSpecializationKind());
+ Writer.AddSourceLocation(MSInfo->getPointOfInstantiation(), Record);
+ } else {
+ Record.push_back(CXXRecNotTemplate);
+ }
+
+ Record.push_back(D->isThisDeclarationADefinition());
+ if (D->isThisDeclarationADefinition())
+ Writer.AddCXXDefinitionData(D, Record);
+
+ // Store (what we currently believe to be) the key function to avoid
+ // deserializing every method so we can compute it.
+ if (D->IsCompleteDefinition)
+ Writer.AddDeclRef(Context.getCurrentKeyFunction(D), Record);
+
+ Code = serialization::DECL_CXX_RECORD;
+}
+
+void ASTDeclWriter::VisitCXXMethodDecl(CXXMethodDecl *D) {
+ VisitFunctionDecl(D);
+ if (D->isCanonicalDecl()) {
+ Record.push_back(D->size_overridden_methods());
+ for (CXXMethodDecl::method_iterator
+ I = D->begin_overridden_methods(), E = D->end_overridden_methods();
+ I != E; ++I)
+ Writer.AddDeclRef(*I, Record);
+ } else {
+ // We only need to record overridden methods once for the canonical decl.
+ Record.push_back(0);
+ }
+
+ if (D->getDeclContext() == D->getLexicalDeclContext() &&
+ D->getFirstDecl() == D->getMostRecentDecl() &&
+ !D->isInvalidDecl() &&
+ !D->hasAttrs() &&
+ !D->isTopLevelDeclInObjCContainer() &&
+ D->getDeclName().getNameKind() == DeclarationName::Identifier &&
+ !D->hasExtInfo() &&
+ !D->hasInheritedPrototype() &&
+ D->hasWrittenPrototype())
+ AbbrevToUse = Writer.getDeclCXXMethodAbbrev();
+
+ Code = serialization::DECL_CXX_METHOD;
+}
+
+void ASTDeclWriter::VisitCXXConstructorDecl(CXXConstructorDecl *D) {
+ VisitCXXMethodDecl(D);
+
+ Writer.AddDeclRef(D->getInheritedConstructor(), Record);
+ Record.push_back(D->IsExplicitSpecified);
+
+ Code = serialization::DECL_CXX_CONSTRUCTOR;
+}
+
+void ASTDeclWriter::VisitCXXDestructorDecl(CXXDestructorDecl *D) {
+ VisitCXXMethodDecl(D);
+
+ Writer.AddDeclRef(D->getOperatorDelete(), Record);
+
+ Code = serialization::DECL_CXX_DESTRUCTOR;
+}
+
+void ASTDeclWriter::VisitCXXConversionDecl(CXXConversionDecl *D) {
+ VisitCXXMethodDecl(D);
+ Record.push_back(D->IsExplicitSpecified);
+ Code = serialization::DECL_CXX_CONVERSION;
+}
+
+void ASTDeclWriter::VisitImportDecl(ImportDecl *D) {
+ VisitDecl(D);
+ Record.push_back(Writer.getSubmoduleID(D->getImportedModule()));
+ ArrayRef<SourceLocation> IdentifierLocs = D->getIdentifierLocs();
+ Record.push_back(!IdentifierLocs.empty());
+ if (IdentifierLocs.empty()) {
+ Writer.AddSourceLocation(D->getLocEnd(), Record);
+ Record.push_back(1);
+ } else {
+ for (unsigned I = 0, N = IdentifierLocs.size(); I != N; ++I)
+ Writer.AddSourceLocation(IdentifierLocs[I], Record);
+ Record.push_back(IdentifierLocs.size());
+ }
+ // Note: the number of source locations must always be the last element in
+ // the record.
+ Code = serialization::DECL_IMPORT;
+}
+
+void ASTDeclWriter::VisitAccessSpecDecl(AccessSpecDecl *D) {
+ VisitDecl(D);
+ Writer.AddSourceLocation(D->getColonLoc(), Record);
+ Code = serialization::DECL_ACCESS_SPEC;
+}
+
+void ASTDeclWriter::VisitFriendDecl(FriendDecl *D) {
+ // Record the number of friend type template parameter lists here
+ // so as to simplify memory allocation during deserialization.
+ Record.push_back(D->NumTPLists);
+ VisitDecl(D);
+ bool hasFriendDecl = D->Friend.is<NamedDecl*>();
+ Record.push_back(hasFriendDecl);
+ if (hasFriendDecl)
+ Writer.AddDeclRef(D->getFriendDecl(), Record);
+ else
+ Writer.AddTypeSourceInfo(D->getFriendType(), Record);
+ for (unsigned i = 0; i < D->NumTPLists; ++i)
+ Writer.AddTemplateParameterList(D->getFriendTypeTemplateParameterList(i),
+ Record);
+ Writer.AddDeclRef(D->getNextFriend(), Record);
+ Record.push_back(D->UnsupportedFriend);
+ Writer.AddSourceLocation(D->FriendLoc, Record);
+ Code = serialization::DECL_FRIEND;
+}
+
+void ASTDeclWriter::VisitFriendTemplateDecl(FriendTemplateDecl *D) {
+ VisitDecl(D);
+ Record.push_back(D->getNumTemplateParameters());
+ for (unsigned i = 0, e = D->getNumTemplateParameters(); i != e; ++i)
+ Writer.AddTemplateParameterList(D->getTemplateParameterList(i), Record);
+ Record.push_back(D->getFriendDecl() != nullptr);
+ if (D->getFriendDecl())
+ Writer.AddDeclRef(D->getFriendDecl(), Record);
+ else
+ Writer.AddTypeSourceInfo(D->getFriendType(), Record);
+ Writer.AddSourceLocation(D->getFriendLoc(), Record);
+ Code = serialization::DECL_FRIEND_TEMPLATE;
+}
+
+void ASTDeclWriter::VisitTemplateDecl(TemplateDecl *D) {
+ VisitNamedDecl(D);
+
+ Writer.AddDeclRef(D->getTemplatedDecl(), Record);
+ Writer.AddTemplateParameterList(D->getTemplateParameters(), Record);
+}
+
+void ASTDeclWriter::VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D) {
+ VisitRedeclarable(D);
+
+ // Emit data to initialize CommonOrPrev before VisitTemplateDecl so that
+ // getCommonPtr() can be used while this is still initializing.
+ if (D->isFirstDecl()) {
+ // This declaration owns the 'common' pointer, so serialize that data now.
+ Writer.AddDeclRef(D->getInstantiatedFromMemberTemplate(), Record);
+ if (D->getInstantiatedFromMemberTemplate())
+ Record.push_back(D->isMemberSpecialization());
+ }
+
+ VisitTemplateDecl(D);
+ Record.push_back(D->getIdentifierNamespace());
+}
+
+void ASTDeclWriter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
+ VisitRedeclarableTemplateDecl(D);
+
+ if (D->isFirstDecl())
+ AddTemplateSpecializations(D);
+ Code = serialization::DECL_CLASS_TEMPLATE;
+}
+
+void ASTDeclWriter::VisitClassTemplateSpecializationDecl(
+ ClassTemplateSpecializationDecl *D) {
+ RegisterTemplateSpecialization(D->getSpecializedTemplate(), D);
+
+ VisitCXXRecordDecl(D);
+
+ llvm::PointerUnion<ClassTemplateDecl *,
+ ClassTemplatePartialSpecializationDecl *> InstFrom
+ = D->getSpecializedTemplateOrPartial();
+ if (Decl *InstFromD = InstFrom.dyn_cast<ClassTemplateDecl *>()) {
+ Writer.AddDeclRef(InstFromD, Record);
+ } else {
+ Writer.AddDeclRef(InstFrom.get<ClassTemplatePartialSpecializationDecl *>(),
+ Record);
+ Writer.AddTemplateArgumentList(&D->getTemplateInstantiationArgs(), Record);
+ }
+
+ Writer.AddTemplateArgumentList(&D->getTemplateArgs(), Record);
+ Writer.AddSourceLocation(D->getPointOfInstantiation(), Record);
+ Record.push_back(D->getSpecializationKind());
+ Record.push_back(D->isCanonicalDecl());
+
+ if (D->isCanonicalDecl()) {
+ // When reading, we'll add it to the folding set of the following template.
+ Writer.AddDeclRef(D->getSpecializedTemplate()->getCanonicalDecl(), Record);
+ }
+
+ // Explicit info.
+ Writer.AddTypeSourceInfo(D->getTypeAsWritten(), Record);
+ if (D->getTypeAsWritten()) {
+ Writer.AddSourceLocation(D->getExternLoc(), Record);
+ Writer.AddSourceLocation(D->getTemplateKeywordLoc(), Record);
+ }
+
+ Code = serialization::DECL_CLASS_TEMPLATE_SPECIALIZATION;
+}
+
+void ASTDeclWriter::VisitClassTemplatePartialSpecializationDecl(
+ ClassTemplatePartialSpecializationDecl *D) {
+ VisitClassTemplateSpecializationDecl(D);
+
+ Writer.AddTemplateParameterList(D->getTemplateParameters(), Record);
+ Writer.AddASTTemplateArgumentListInfo(D->getTemplateArgsAsWritten(), Record);
+
+ // These are read/set from/to the first declaration.
+ if (D->getPreviousDecl() == nullptr) {
+ Writer.AddDeclRef(D->getInstantiatedFromMember(), Record);
+ Record.push_back(D->isMemberSpecialization());
+ }
+
+ Code = serialization::DECL_CLASS_TEMPLATE_PARTIAL_SPECIALIZATION;
+}
+
+void ASTDeclWriter::VisitVarTemplateDecl(VarTemplateDecl *D) {
+ VisitRedeclarableTemplateDecl(D);
+
+ if (D->isFirstDecl())
+ AddTemplateSpecializations(D);
+ Code = serialization::DECL_VAR_TEMPLATE;
+}
+
+void ASTDeclWriter::VisitVarTemplateSpecializationDecl(
+ VarTemplateSpecializationDecl *D) {
+ RegisterTemplateSpecialization(D->getSpecializedTemplate(), D);
+
+ VisitVarDecl(D);
+
+ llvm::PointerUnion<VarTemplateDecl *, VarTemplatePartialSpecializationDecl *>
+ InstFrom = D->getSpecializedTemplateOrPartial();
+ if (Decl *InstFromD = InstFrom.dyn_cast<VarTemplateDecl *>()) {
+ Writer.AddDeclRef(InstFromD, Record);
+ } else {
+ Writer.AddDeclRef(InstFrom.get<VarTemplatePartialSpecializationDecl *>(),
+ Record);
+ Writer.AddTemplateArgumentList(&D->getTemplateInstantiationArgs(), Record);
+ }
+
+ // Explicit info.
+ Writer.AddTypeSourceInfo(D->getTypeAsWritten(), Record);
+ if (D->getTypeAsWritten()) {
+ Writer.AddSourceLocation(D->getExternLoc(), Record);
+ Writer.AddSourceLocation(D->getTemplateKeywordLoc(), Record);
+ }
+
+ Writer.AddTemplateArgumentList(&D->getTemplateArgs(), Record);
+ Writer.AddSourceLocation(D->getPointOfInstantiation(), Record);
+ Record.push_back(D->getSpecializationKind());
+ Record.push_back(D->isCanonicalDecl());
+
+ if (D->isCanonicalDecl()) {
+ // When reading, we'll add it to the folding set of the following template.
+ Writer.AddDeclRef(D->getSpecializedTemplate()->getCanonicalDecl(), Record);
+ }
+
+ Code = serialization::DECL_VAR_TEMPLATE_SPECIALIZATION;
+}
+
+void ASTDeclWriter::VisitVarTemplatePartialSpecializationDecl(
+ VarTemplatePartialSpecializationDecl *D) {
+ VisitVarTemplateSpecializationDecl(D);
+
+ Writer.AddTemplateParameterList(D->getTemplateParameters(), Record);
+ Writer.AddASTTemplateArgumentListInfo(D->getTemplateArgsAsWritten(), Record);
+
+ // These are read/set from/to the first declaration.
+ if (D->getPreviousDecl() == nullptr) {
+ Writer.AddDeclRef(D->getInstantiatedFromMember(), Record);
+ Record.push_back(D->isMemberSpecialization());
+ }
+
+ Code = serialization::DECL_VAR_TEMPLATE_PARTIAL_SPECIALIZATION;
+}
+
+void ASTDeclWriter::VisitClassScopeFunctionSpecializationDecl(
+ ClassScopeFunctionSpecializationDecl *D) {
+ VisitDecl(D);
+ Writer.AddDeclRef(D->getSpecialization(), Record);
+ Code = serialization::DECL_CLASS_SCOPE_FUNCTION_SPECIALIZATION;
+}
+
+
+void ASTDeclWriter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
+ VisitRedeclarableTemplateDecl(D);
+
+ if (D->isFirstDecl())
+ AddTemplateSpecializations(D);
+ Code = serialization::DECL_FUNCTION_TEMPLATE;
+}
+
+void ASTDeclWriter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
+ VisitTypeDecl(D);
+
+ Record.push_back(D->wasDeclaredWithTypename());
+
+ bool OwnsDefaultArg = D->hasDefaultArgument() &&
+ !D->defaultArgumentWasInherited();
+ Record.push_back(OwnsDefaultArg);
+ if (OwnsDefaultArg)
+ Writer.AddTypeSourceInfo(D->getDefaultArgumentInfo(), Record);
+
+ Code = serialization::DECL_TEMPLATE_TYPE_PARM;
+}
+
+void ASTDeclWriter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
+ // For an expanded parameter pack, record the number of expansion types here
+ // so that it's easier for deserialization to allocate the right amount of
+ // memory.
+ if (D->isExpandedParameterPack())
+ Record.push_back(D->getNumExpansionTypes());
+
+ VisitDeclaratorDecl(D);
+ // TemplateParmPosition.
+ Record.push_back(D->getDepth());
+ Record.push_back(D->getPosition());
+
+ if (D->isExpandedParameterPack()) {
+ for (unsigned I = 0, N = D->getNumExpansionTypes(); I != N; ++I) {
+ Writer.AddTypeRef(D->getExpansionType(I), Record);
+ Writer.AddTypeSourceInfo(D->getExpansionTypeSourceInfo(I), Record);
+ }
+
+ Code = serialization::DECL_EXPANDED_NON_TYPE_TEMPLATE_PARM_PACK;
+ } else {
+ // Rest of NonTypeTemplateParmDecl.
+ Record.push_back(D->isParameterPack());
+ bool OwnsDefaultArg = D->hasDefaultArgument() &&
+ !D->defaultArgumentWasInherited();
+ Record.push_back(OwnsDefaultArg);
+ if (OwnsDefaultArg)
+ Writer.AddStmt(D->getDefaultArgument());
+ Code = serialization::DECL_NON_TYPE_TEMPLATE_PARM;
+ }
+}
+
+void ASTDeclWriter::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
+ // For an expanded parameter pack, record the number of expansion types here
+ // so that it's easier for deserialization to allocate the right amount of
+ // memory.
+ if (D->isExpandedParameterPack())
+ Record.push_back(D->getNumExpansionTemplateParameters());
+
+ VisitTemplateDecl(D);
+ // TemplateParmPosition.
+ Record.push_back(D->getDepth());
+ Record.push_back(D->getPosition());
+
+ if (D->isExpandedParameterPack()) {
+ for (unsigned I = 0, N = D->getNumExpansionTemplateParameters();
+ I != N; ++I)
+ Writer.AddTemplateParameterList(D->getExpansionTemplateParameters(I),
+ Record);
+ Code = serialization::DECL_EXPANDED_TEMPLATE_TEMPLATE_PARM_PACK;
+ } else {
+ // Rest of TemplateTemplateParmDecl.
+ Record.push_back(D->isParameterPack());
+ bool OwnsDefaultArg = D->hasDefaultArgument() &&
+ !D->defaultArgumentWasInherited();
+ Record.push_back(OwnsDefaultArg);
+ if (OwnsDefaultArg)
+ Writer.AddTemplateArgumentLoc(D->getDefaultArgument(), Record);
+ Code = serialization::DECL_TEMPLATE_TEMPLATE_PARM;
+ }
+}
+
+void ASTDeclWriter::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
+ VisitRedeclarableTemplateDecl(D);
+ Code = serialization::DECL_TYPE_ALIAS_TEMPLATE;
+}
+
+void ASTDeclWriter::VisitStaticAssertDecl(StaticAssertDecl *D) {
+ VisitDecl(D);
+ Writer.AddStmt(D->getAssertExpr());
+ Record.push_back(D->isFailed());
+ Writer.AddStmt(D->getMessage());
+ Writer.AddSourceLocation(D->getRParenLoc(), Record);
+ Code = serialization::DECL_STATIC_ASSERT;
+}
+
+/// \brief Emit the DeclContext part of a declaration context decl.
+///
+/// \param LexicalOffset the offset at which the DECL_CONTEXT_LEXICAL
+/// block for this declaration context is stored. May be 0 to indicate
+/// that there are no declarations stored within this context.
+///
+/// \param VisibleOffset the offset at which the DECL_CONTEXT_VISIBLE
+/// block for this declaration context is stored. May be 0 to indicate
+/// that there are no declarations visible from this context. Note
+/// that this value will not be emitted for non-primary declaration
+/// contexts.
+void ASTDeclWriter::VisitDeclContext(DeclContext *DC, uint64_t LexicalOffset,
+ uint64_t VisibleOffset) {
+ Record.push_back(LexicalOffset);
+ Record.push_back(VisibleOffset);
+}
+
+const Decl *ASTWriter::getFirstLocalDecl(const Decl *D) {
+ /// \brief Is this a local declaration (that is, one that will be written to
+ /// our AST file)? This is the case for declarations that are neither imported
+ /// from another AST file nor predefined.
+ auto IsLocalDecl = [&](const Decl *D) -> bool {
+ if (D->isFromASTFile())
+ return false;
+ auto I = DeclIDs.find(D);
+ return (I == DeclIDs.end() || I->second >= NUM_PREDEF_DECL_IDS);
+ };
+
+ assert(IsLocalDecl(D) && "expected a local declaration");
+
+ const Decl *Canon = D->getCanonicalDecl();
+ if (IsLocalDecl(Canon))
+ return Canon;
+
+ const Decl *&CacheEntry = FirstLocalDeclCache[Canon];
+ if (CacheEntry)
+ return CacheEntry;
+
+ for (const Decl *Redecl = D; Redecl; Redecl = Redecl->getPreviousDecl())
+ if (IsLocalDecl(Redecl))
+ D = Redecl;
+ return CacheEntry = D;
+}
+
+template <typename T>
+void ASTDeclWriter::VisitRedeclarable(Redeclarable<T> *D) {
+ T *First = D->getFirstDecl();
+ T *MostRecent = First->getMostRecentDecl();
+ T *DAsT = static_cast<T *>(D);
+ if (MostRecent != First) {
+ assert(isRedeclarableDeclKind(DAsT->getKind()) &&
+ "Not considered redeclarable?");
+
+ Writer.AddDeclRef(First, Record);
+
+ // Write out a list of local redeclarations of this declaration if it's the
+ // first local declaration in the chain.
+ const Decl *FirstLocal = Writer.getFirstLocalDecl(DAsT);
+ if (DAsT == FirstLocal) {
+ // Emit a list of all imported first declarations so that we can be sure
+ // that all redeclarations visible to this module are before D in the
+ // redecl chain.
+ unsigned I = Record.size();
+ Record.push_back(0);
+ if (Writer.Chain)
+ AddFirstDeclFromEachModule(DAsT, /*IncludeLocal*/false);
+ // This is the number of imported first declarations + 1.
+ Record[I] = Record.size() - I;
+
+ // Collect the set of local redeclarations of this declaration, from
+ // newest to oldest.
+ RecordData LocalRedecls;
+ for (const Decl *Prev = FirstLocal->getMostRecentDecl();
+ Prev != FirstLocal; Prev = Prev->getPreviousDecl())
+ if (!Prev->isFromASTFile())
+ Writer.AddDeclRef(Prev, LocalRedecls);
+
+ // If we have any redecls, write them now as a separate record preceding
+ // the declaration itself.
+ if (LocalRedecls.empty())
+ Record.push_back(0);
+ else {
+ Record.push_back(Writer.Stream.GetCurrentBitNo());
+ Writer.Stream.EmitRecord(LOCAL_REDECLARATIONS, LocalRedecls);
+ }
+ } else {
+ Record.push_back(0);
+ Writer.AddDeclRef(FirstLocal, Record);
+ }
+
+ // Make sure that we serialize both the previous and the most-recent
+ // declarations, which (transitively) ensures that all declarations in the
+ // chain get serialized.
+ //
+ // FIXME: This is not correct; when we reach an imported declaration we
+ // won't emit its previous declaration.
+ (void)Writer.GetDeclRef(D->getPreviousDecl());
+ (void)Writer.GetDeclRef(MostRecent);
+ } else {
+ // We use the sentinel value 0 to indicate an only declaration.
+ Record.push_back(0);
+ }
+}
+
+void ASTDeclWriter::VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D) {
+ Record.push_back(D->varlist_size());
+ VisitDecl(D);
+ for (auto *I : D->varlists())
+ Writer.AddStmt(I);
+ Code = serialization::DECL_OMP_THREADPRIVATE;
+}
+
+//===----------------------------------------------------------------------===//
+// ASTWriter Implementation
+//===----------------------------------------------------------------------===//
+
+void ASTWriter::WriteDeclAbbrevs() {
+ using namespace llvm;
+
+ BitCodeAbbrev *Abv;
+
+ // Abbreviation for DECL_FIELD
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::DECL_FIELD));
+ // Decl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // LexicalDeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
+ Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
+ Abv->Add(BitCodeAbbrevOp(0)); // isUsed
+ Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
+ Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // AccessSpecifier
+ Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
+ // NamedDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name
+ Abv->Add(BitCodeAbbrevOp(0)); // AnonDeclNumber
+ // ValueDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
+ // DeclaratorDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // InnerStartLoc
+ Abv->Add(BitCodeAbbrevOp(0)); // hasExtInfo
+ // FieldDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isMutable
+ Abv->Add(BitCodeAbbrevOp(0)); //getBitWidth
+ // Type Source Info
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TypeLoc
+ DeclFieldAbbrev = Stream.EmitAbbrev(Abv);
+
+ // Abbreviation for DECL_OBJC_IVAR
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::DECL_OBJC_IVAR));
+ // Decl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // LexicalDeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
+ Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
+ Abv->Add(BitCodeAbbrevOp(0)); // isUsed
+ Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
+ Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // AccessSpecifier
+ Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
+ // NamedDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name
+ Abv->Add(BitCodeAbbrevOp(0)); // AnonDeclNumber
+ // ValueDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
+ // DeclaratorDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // InnerStartLoc
+ Abv->Add(BitCodeAbbrevOp(0)); // hasExtInfo
+ // FieldDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isMutable
+ Abv->Add(BitCodeAbbrevOp(0)); //getBitWidth
+ // ObjC Ivar
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // getAccessControl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // getSynthesize
+ // Type Source Info
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TypeLoc
+ DeclObjCIvarAbbrev = Stream.EmitAbbrev(Abv);
+
+ // Abbreviation for DECL_ENUM
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::DECL_ENUM));
+ // Redeclarable
+ Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
+ // Decl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // LexicalDeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
+ Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
+ Abv->Add(BitCodeAbbrevOp(0)); // isUsed
+ Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
+ Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
+ Abv->Add(BitCodeAbbrevOp(AS_none)); // C++ AccessSpecifier
+ Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
+ // NamedDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name
+ Abv->Add(BitCodeAbbrevOp(0)); // AnonDeclNumber
+ // TypeDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Source Location
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type Ref
+ // TagDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // IdentifierNamespace
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // getTagKind
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isCompleteDefinition
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // EmbeddedInDeclarator
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsFreeStanding
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsCompleteDefinitionRequired
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SourceLocation
+ Abv->Add(BitCodeAbbrevOp(0)); // ExtInfoKind
+ // EnumDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // AddTypeRef
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // IntegerType
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // getPromotionType
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // getNumPositiveBits
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // getNumNegativeBits
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isScoped
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isScopedUsingClassTag
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isFixed
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // InstantiatedMembEnum
+ // DC
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalOffset
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // VisibleOffset
+ DeclEnumAbbrev = Stream.EmitAbbrev(Abv);
+
+ // Abbreviation for DECL_RECORD
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::DECL_RECORD));
+ // Redeclarable
+ Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
+ // Decl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // LexicalDeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
+ Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
+ Abv->Add(BitCodeAbbrevOp(0)); // isUsed
+ Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
+ Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
+ Abv->Add(BitCodeAbbrevOp(AS_none)); // C++ AccessSpecifier
+ Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
+ // NamedDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name
+ Abv->Add(BitCodeAbbrevOp(0)); // AnonDeclNumber
+ // TypeDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Source Location
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type Ref
+ // TagDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // IdentifierNamespace
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // getTagKind
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isCompleteDefinition
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // EmbeddedInDeclarator
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsFreeStanding
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsCompleteDefinitionRequired
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SourceLocation
+ Abv->Add(BitCodeAbbrevOp(0)); // ExtInfoKind
+ // RecordDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // FlexibleArrayMember
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // AnonymousStructUnion
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // hasObjectMember
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // hasVolatileMember
+ // DC
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalOffset
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // VisibleOffset
+ DeclRecordAbbrev = Stream.EmitAbbrev(Abv);
+
+ // Abbreviation for DECL_PARM_VAR
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::DECL_PARM_VAR));
+ // Redeclarable
+ Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
+ // Decl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // LexicalDeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
+ Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
+ Abv->Add(BitCodeAbbrevOp(0)); // isUsed
+ Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
+ Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
+ Abv->Add(BitCodeAbbrevOp(AS_none)); // C++ AccessSpecifier
+ Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
+ // NamedDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name
+ Abv->Add(BitCodeAbbrevOp(0)); // AnonDeclNumber
+ // ValueDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
+ // DeclaratorDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // InnerStartLoc
+ Abv->Add(BitCodeAbbrevOp(0)); // hasExtInfo
+ // VarDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // StorageClass
+ Abv->Add(BitCodeAbbrevOp(0)); // getTSCSpec
+ Abv->Add(BitCodeAbbrevOp(0)); // hasCXXDirectInitializer
+ Abv->Add(BitCodeAbbrevOp(0)); // Linkage
+ Abv->Add(BitCodeAbbrevOp(0)); // HasInit
+ Abv->Add(BitCodeAbbrevOp(0)); // HasMemberSpecializationInfo
+ // ParmVarDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsObjCMethodParameter
+ Abv->Add(BitCodeAbbrevOp(0)); // ScopeDepth
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // ScopeIndex
+ Abv->Add(BitCodeAbbrevOp(0)); // ObjCDeclQualifier
+ Abv->Add(BitCodeAbbrevOp(0)); // KNRPromoted
+ Abv->Add(BitCodeAbbrevOp(0)); // HasInheritedDefaultArg
+ Abv->Add(BitCodeAbbrevOp(0)); // HasUninstantiatedDefaultArg
+ // Type Source Info
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TypeLoc
+ DeclParmVarAbbrev = Stream.EmitAbbrev(Abv);
+
+ // Abbreviation for DECL_TYPEDEF
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::DECL_TYPEDEF));
+ // Redeclarable
+ Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
+ // Decl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // LexicalDeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
+ Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isUsed
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isReferenced
+ Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // C++ AccessSpecifier
+ Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
+ // NamedDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name
+ Abv->Add(BitCodeAbbrevOp(0)); // AnonDeclNumber
+ // TypeDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Source Location
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type Ref
+ // TypedefDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TypeLoc
+ DeclTypedefAbbrev = Stream.EmitAbbrev(Abv);
+
+ // Abbreviation for DECL_VAR
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::DECL_VAR));
+ // Redeclarable
+ Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
+ // Decl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // LexicalDeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
+ Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
+ Abv->Add(BitCodeAbbrevOp(0)); // isUsed
+ Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
+ Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
+ Abv->Add(BitCodeAbbrevOp(AS_none)); // C++ AccessSpecifier
+ Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
+ // NamedDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name
+ Abv->Add(BitCodeAbbrevOp(0)); // AnonDeclNumber
+ // ValueDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
+ // DeclaratorDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // InnerStartLoc
+ Abv->Add(BitCodeAbbrevOp(0)); // hasExtInfo
+ // VarDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // StorageClass
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // getTSCSpec
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // CXXDirectInitializer
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isExceptionVariable
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isNRVOVariable
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isCXXForRangeDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isARCPseudoStrong
+ Abv->Add(BitCodeAbbrevOp(0)); // isConstexpr
+ Abv->Add(BitCodeAbbrevOp(0)); // isInitCapture
+ Abv->Add(BitCodeAbbrevOp(0)); // isPrevDeclInSameScope
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // Linkage
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // HasInit
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // HasMemberSpecInfo
+ // Type Source Info
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TypeLoc
+ DeclVarAbbrev = Stream.EmitAbbrev(Abv);
+
+ // Abbreviation for DECL_CXX_METHOD
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::DECL_CXX_METHOD));
+ // RedeclarableDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // CanonicalDecl
+ // Decl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // LexicalDeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // Invalid
+ Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Implicit
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Used
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Referenced
+ Abv->Add(BitCodeAbbrevOp(0)); // InObjCContainer
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // Access
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
+ // NamedDecl
+ Abv->Add(BitCodeAbbrevOp(DeclarationName::Identifier)); // NameKind
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Identifier
+ Abv->Add(BitCodeAbbrevOp(0)); // AnonDeclNumber
+ // ValueDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
+ // DeclaratorDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // InnerLocStart
+ Abv->Add(BitCodeAbbrevOp(0)); // HasExtInfo
+ // FunctionDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 11)); // IDNS
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // StorageClass
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Inline
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // InlineSpecified
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // VirtualAsWritten
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Pure
+ Abv->Add(BitCodeAbbrevOp(0)); // HasInheritedProto
+ Abv->Add(BitCodeAbbrevOp(1)); // HasWrittenProto
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Deleted
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Trivial
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Defaulted
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // ExplicitlyDefaulted
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // ImplicitReturnZero
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Constexpr
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // SkippedBody
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // LateParsed
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // Linkage
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LocEnd
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // TemplateKind
+ // This Array slurps the rest of the record. Fortunately we want to encode
+ // (nearly) all the remaining (variable number of) fields in the same way.
+ //
+ // This is the function template information if any, then
+ // NumParams and Params[] from FunctionDecl, and
+ // NumOverriddenMethods, OverriddenMethods[] from CXXMethodDecl.
+ //
+ // Add an AbbrevOp for 'size then elements' and use it here.
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+ DeclCXXMethodAbbrev = Stream.EmitAbbrev(Abv);
+
+ // Abbreviation for EXPR_DECL_REF
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::EXPR_DECL_REF));
+ //Stmt
+ //Expr
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //InstantiationDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //UnexpandedParamPack
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetValueKind
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetObjectKind
+ //DeclRefExpr
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //HasQualifier
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //GetDeclFound
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ExplicitTemplateArgs
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //HadMultipleCandidates
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
+ 1)); // RefersToEnclosingVariableOrCapture
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclRef
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Location
+ DeclRefExprAbbrev = Stream.EmitAbbrev(Abv);
+
+ // Abbreviation for EXPR_INTEGER_LITERAL
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::EXPR_INTEGER_LITERAL));
+ //Stmt
+ //Expr
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //InstantiationDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //UnexpandedParamPack
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetValueKind
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetObjectKind
+ //Integer Literal
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Location
+ Abv->Add(BitCodeAbbrevOp(32)); // Bit Width
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Value
+ IntegerLiteralAbbrev = Stream.EmitAbbrev(Abv);
+
+ // Abbreviation for EXPR_CHARACTER_LITERAL
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::EXPR_CHARACTER_LITERAL));
+ //Stmt
+ //Expr
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //InstantiationDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //UnexpandedParamPack
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetValueKind
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetObjectKind
+ //Character Literal
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // getValue
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Location
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // getKind
+ CharacterLiteralAbbrev = Stream.EmitAbbrev(Abv);
+
+ // Abbreviation for EXPR_IMPLICIT_CAST
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::EXPR_IMPLICIT_CAST));
+ // Stmt
+ // Expr
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //InstantiationDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //UnexpandedParamPack
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetValueKind
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetObjectKind
+ // CastExpr
+ Abv->Add(BitCodeAbbrevOp(0)); // PathSize
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 6)); // CastKind
+ // ImplicitCastExpr
+ ExprImplicitCastAbbrev = Stream.EmitAbbrev(Abv);
+
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::DECL_CONTEXT_LEXICAL));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ DeclContextLexicalAbbrev = Stream.EmitAbbrev(Abv);
+
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::DECL_CONTEXT_VISIBLE));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ DeclContextVisibleLookupAbbrev = Stream.EmitAbbrev(Abv);
+}
+
+/// isRequiredDecl - Check if this is a "required" Decl, which must be seen by
+/// consumers of the AST.
+///
+/// Such decls will always be deserialized from the AST file, so we would like
+/// this to be as restrictive as possible. Currently the predicate is driven by
+/// code generation requirements, if other clients have a different notion of
+/// what is "required" then we may have to consider an alternate scheme where
+/// clients can iterate over the top-level decls and get information on them,
+/// without necessary deserializing them. We could explicitly require such
+/// clients to use a separate API call to "realize" the decl. This should be
+/// relatively painless since they would presumably only do it for top-level
+/// decls.
+static bool isRequiredDecl(const Decl *D, ASTContext &Context,
+ bool WritingModule) {
+ // An ObjCMethodDecl is never considered as "required" because its
+ // implementation container always is.
+
+ // File scoped assembly or obj-c implementation must be seen.
+ if (isa<FileScopeAsmDecl>(D) || isa<ObjCImplDecl>(D))
+ return true;
+
+ // ImportDecl is used by codegen to determine the set of imported modules to
+ // search for inputs for automatic linking; include it if it has a semantic
+ // effect.
+ if (isa<ImportDecl>(D) && !WritingModule)
+ return true;
+
+ return Context.DeclMustBeEmitted(D);
+}
+
+void ASTWriter::WriteDecl(ASTContext &Context, Decl *D) {
+ // Switch case IDs are per Decl.
+ ClearSwitchCaseIDs();
+
+ RecordData Record;
+ ASTDeclWriter W(*this, Context, Record);
+
+ // Determine the ID for this declaration.
+ serialization::DeclID ID;
+ assert(!D->isFromASTFile() && "should not be emitting imported decl");
+ serialization::DeclID &IDR = DeclIDs[D];
+ if (IDR == 0)
+ IDR = NextDeclID++;
+
+ ID = IDR;
+
+ bool isReplacingADecl = ID < FirstDeclID;
+
+ // If this declaration is also a DeclContext, write blocks for the
+ // declarations that lexically stored inside its context and those
+ // declarations that are visible from its context. These blocks
+ // are written before the declaration itself so that we can put
+ // their offsets into the record for the declaration.
+ uint64_t LexicalOffset = 0;
+ uint64_t VisibleOffset = 0;
+ DeclContext *DC = dyn_cast<DeclContext>(D);
+ if (DC) {
+ if (isReplacingADecl) {
+ // It is replacing a decl from a chained PCH; make sure that the
+ // DeclContext is fully loaded.
+ if (DC->hasExternalLexicalStorage())
+ DC->LoadLexicalDeclsFromExternalStorage();
+ if (DC->hasExternalVisibleStorage())
+ Chain->completeVisibleDeclsMap(DC);
+ }
+ LexicalOffset = WriteDeclContextLexicalBlock(Context, DC);
+ VisibleOffset = WriteDeclContextVisibleBlock(Context, DC);
+ }
+
+ // Build a record for this declaration
+ Record.clear();
+ W.Code = (serialization::DeclCode)0;
+ W.AbbrevToUse = 0;
+ W.Visit(D);
+ if (DC) W.VisitDeclContext(DC, LexicalOffset, VisibleOffset);
+
+ if (isReplacingADecl) {
+ // We're replacing a decl in a previous file.
+ ReplacedDecls.push_back(ReplacedDeclInfo(ID, Stream.GetCurrentBitNo(),
+ D->getLocation()));
+ } else {
+ unsigned Index = ID - FirstDeclID;
+
+ // Record the offset for this declaration
+ SourceLocation Loc = D->getLocation();
+ if (DeclOffsets.size() == Index)
+ DeclOffsets.push_back(DeclOffset(Loc, Stream.GetCurrentBitNo()));
+ else if (DeclOffsets.size() < Index) {
+ DeclOffsets.resize(Index+1);
+ DeclOffsets[Index].setLocation(Loc);
+ DeclOffsets[Index].BitOffset = Stream.GetCurrentBitNo();
+ }
+
+ SourceManager &SM = Context.getSourceManager();
+ if (Loc.isValid() && SM.isLocalSourceLocation(Loc))
+ associateDeclWithFile(D, ID);
+ }
+
+ if (!W.Code)
+ llvm::report_fatal_error(StringRef("unexpected declaration kind '") +
+ D->getDeclKindName() + "'");
+ Stream.EmitRecord(W.Code, Record, W.AbbrevToUse);
+
+ // Flush any expressions, base specifiers, and ctor initializers that
+ // were written as part of this declaration.
+ FlushPendingAfterDecl();
+
+ // Note declarations that should be deserialized eagerly so that we can add
+ // them to a record in the AST file later.
+ if (isRequiredDecl(D, Context, WritingModule))
+ EagerlyDeserializedDecls.push_back(ID);
+}
+
+void ASTWriter::AddFunctionDefinition(const FunctionDecl *FD,
+ RecordData &Record) {
+ ClearSwitchCaseIDs();
+
+ ASTDeclWriter W(*this, FD->getASTContext(), Record);
+ W.AddFunctionDefinition(FD);
+}
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp
new file mode 100644
index 0000000..e52ed05
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp
@@ -0,0 +1,2411 @@
+//===--- ASTWriterStmt.cpp - Statement and Expression Serialization -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Implements serialization for Statements and Expressions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/Serialization/ASTWriter.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Lex/Token.h"
+#include "llvm/Bitcode/BitstreamWriter.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Statement/expression serialization
+//===----------------------------------------------------------------------===//
+
+namespace clang {
+
+ class ASTStmtWriter : public StmtVisitor<ASTStmtWriter, void> {
+ friend class OMPClauseWriter;
+ ASTWriter &Writer;
+ ASTWriter::RecordData &Record;
+
+ public:
+ serialization::StmtCode Code;
+ unsigned AbbrevToUse;
+
+ ASTStmtWriter(ASTWriter &Writer, ASTWriter::RecordData &Record)
+ : Writer(Writer), Record(Record) { }
+
+ void AddTemplateKWAndArgsInfo(const ASTTemplateKWAndArgsInfo &ArgInfo,
+ const TemplateArgumentLoc *Args);
+
+ void VisitStmt(Stmt *S);
+#define STMT(Type, Base) \
+ void Visit##Type(Type *);
+#include "clang/AST/StmtNodes.inc"
+ };
+}
+
+void ASTStmtWriter::AddTemplateKWAndArgsInfo(
+ const ASTTemplateKWAndArgsInfo &ArgInfo, const TemplateArgumentLoc *Args) {
+ Writer.AddSourceLocation(ArgInfo.TemplateKWLoc, Record);
+ Writer.AddSourceLocation(ArgInfo.LAngleLoc, Record);
+ Writer.AddSourceLocation(ArgInfo.RAngleLoc, Record);
+ for (unsigned i = 0; i != ArgInfo.NumTemplateArgs; ++i)
+ Writer.AddTemplateArgumentLoc(Args[i], Record);
+}
+
+void ASTStmtWriter::VisitStmt(Stmt *S) {
+}
+
+void ASTStmtWriter::VisitNullStmt(NullStmt *S) {
+ VisitStmt(S);
+ Writer.AddSourceLocation(S->getSemiLoc(), Record);
+ Record.push_back(S->HasLeadingEmptyMacro);
+ Code = serialization::STMT_NULL;
+}
+
+void ASTStmtWriter::VisitCompoundStmt(CompoundStmt *S) {
+ VisitStmt(S);
+ Record.push_back(S->size());
+ for (auto *CS : S->body())
+ Writer.AddStmt(CS);
+ Writer.AddSourceLocation(S->getLBracLoc(), Record);
+ Writer.AddSourceLocation(S->getRBracLoc(), Record);
+ Code = serialization::STMT_COMPOUND;
+}
+
+void ASTStmtWriter::VisitSwitchCase(SwitchCase *S) {
+ VisitStmt(S);
+ Record.push_back(Writer.getSwitchCaseID(S));
+ Writer.AddSourceLocation(S->getKeywordLoc(), Record);
+ Writer.AddSourceLocation(S->getColonLoc(), Record);
+}
+
+void ASTStmtWriter::VisitCaseStmt(CaseStmt *S) {
+ VisitSwitchCase(S);
+ Writer.AddStmt(S->getLHS());
+ Writer.AddStmt(S->getRHS());
+ Writer.AddStmt(S->getSubStmt());
+ Writer.AddSourceLocation(S->getEllipsisLoc(), Record);
+ Code = serialization::STMT_CASE;
+}
+
+void ASTStmtWriter::VisitDefaultStmt(DefaultStmt *S) {
+ VisitSwitchCase(S);
+ Writer.AddStmt(S->getSubStmt());
+ Code = serialization::STMT_DEFAULT;
+}
+
+void ASTStmtWriter::VisitLabelStmt(LabelStmt *S) {
+ VisitStmt(S);
+ Writer.AddDeclRef(S->getDecl(), Record);
+ Writer.AddStmt(S->getSubStmt());
+ Writer.AddSourceLocation(S->getIdentLoc(), Record);
+ Code = serialization::STMT_LABEL;
+}
+
+void ASTStmtWriter::VisitAttributedStmt(AttributedStmt *S) {
+ VisitStmt(S);
+ Record.push_back(S->getAttrs().size());
+ Writer.WriteAttributes(S->getAttrs(), Record);
+ Writer.AddStmt(S->getSubStmt());
+ Writer.AddSourceLocation(S->getAttrLoc(), Record);
+ Code = serialization::STMT_ATTRIBUTED;
+}
+
+void ASTStmtWriter::VisitIfStmt(IfStmt *S) {
+ VisitStmt(S);
+ Writer.AddDeclRef(S->getConditionVariable(), Record);
+ Writer.AddStmt(S->getCond());
+ Writer.AddStmt(S->getThen());
+ Writer.AddStmt(S->getElse());
+ Writer.AddSourceLocation(S->getIfLoc(), Record);
+ Writer.AddSourceLocation(S->getElseLoc(), Record);
+ Code = serialization::STMT_IF;
+}
+
+void ASTStmtWriter::VisitSwitchStmt(SwitchStmt *S) {
+ VisitStmt(S);
+ Writer.AddDeclRef(S->getConditionVariable(), Record);
+ Writer.AddStmt(S->getCond());
+ Writer.AddStmt(S->getBody());
+ Writer.AddSourceLocation(S->getSwitchLoc(), Record);
+ Record.push_back(S->isAllEnumCasesCovered());
+ for (SwitchCase *SC = S->getSwitchCaseList(); SC;
+ SC = SC->getNextSwitchCase())
+ Record.push_back(Writer.RecordSwitchCaseID(SC));
+ Code = serialization::STMT_SWITCH;
+}
+
+void ASTStmtWriter::VisitWhileStmt(WhileStmt *S) {
+ VisitStmt(S);
+ Writer.AddDeclRef(S->getConditionVariable(), Record);
+ Writer.AddStmt(S->getCond());
+ Writer.AddStmt(S->getBody());
+ Writer.AddSourceLocation(S->getWhileLoc(), Record);
+ Code = serialization::STMT_WHILE;
+}
+
+void ASTStmtWriter::VisitDoStmt(DoStmt *S) {
+ VisitStmt(S);
+ Writer.AddStmt(S->getCond());
+ Writer.AddStmt(S->getBody());
+ Writer.AddSourceLocation(S->getDoLoc(), Record);
+ Writer.AddSourceLocation(S->getWhileLoc(), Record);
+ Writer.AddSourceLocation(S->getRParenLoc(), Record);
+ Code = serialization::STMT_DO;
+}
+
+void ASTStmtWriter::VisitForStmt(ForStmt *S) {
+ VisitStmt(S);
+ Writer.AddStmt(S->getInit());
+ Writer.AddStmt(S->getCond());
+ Writer.AddDeclRef(S->getConditionVariable(), Record);
+ Writer.AddStmt(S->getInc());
+ Writer.AddStmt(S->getBody());
+ Writer.AddSourceLocation(S->getForLoc(), Record);
+ Writer.AddSourceLocation(S->getLParenLoc(), Record);
+ Writer.AddSourceLocation(S->getRParenLoc(), Record);
+ Code = serialization::STMT_FOR;
+}
+
+void ASTStmtWriter::VisitGotoStmt(GotoStmt *S) {
+ VisitStmt(S);
+ Writer.AddDeclRef(S->getLabel(), Record);
+ Writer.AddSourceLocation(S->getGotoLoc(), Record);
+ Writer.AddSourceLocation(S->getLabelLoc(), Record);
+ Code = serialization::STMT_GOTO;
+}
+
+void ASTStmtWriter::VisitIndirectGotoStmt(IndirectGotoStmt *S) {
+ VisitStmt(S);
+ Writer.AddSourceLocation(S->getGotoLoc(), Record);
+ Writer.AddSourceLocation(S->getStarLoc(), Record);
+ Writer.AddStmt(S->getTarget());
+ Code = serialization::STMT_INDIRECT_GOTO;
+}
+
+void ASTStmtWriter::VisitContinueStmt(ContinueStmt *S) {
+ VisitStmt(S);
+ Writer.AddSourceLocation(S->getContinueLoc(), Record);
+ Code = serialization::STMT_CONTINUE;
+}
+
+void ASTStmtWriter::VisitBreakStmt(BreakStmt *S) {
+ VisitStmt(S);
+ Writer.AddSourceLocation(S->getBreakLoc(), Record);
+ Code = serialization::STMT_BREAK;
+}
+
+void ASTStmtWriter::VisitReturnStmt(ReturnStmt *S) {
+ VisitStmt(S);
+ Writer.AddStmt(S->getRetValue());
+ Writer.AddSourceLocation(S->getReturnLoc(), Record);
+ Writer.AddDeclRef(S->getNRVOCandidate(), Record);
+ Code = serialization::STMT_RETURN;
+}
+
+void ASTStmtWriter::VisitDeclStmt(DeclStmt *S) {
+ VisitStmt(S);
+ Writer.AddSourceLocation(S->getStartLoc(), Record);
+ Writer.AddSourceLocation(S->getEndLoc(), Record);
+ DeclGroupRef DG = S->getDeclGroup();
+ for (DeclGroupRef::iterator D = DG.begin(), DEnd = DG.end(); D != DEnd; ++D)
+ Writer.AddDeclRef(*D, Record);
+ Code = serialization::STMT_DECL;
+}
+
+void ASTStmtWriter::VisitAsmStmt(AsmStmt *S) {
+ VisitStmt(S);
+ Record.push_back(S->getNumOutputs());
+ Record.push_back(S->getNumInputs());
+ Record.push_back(S->getNumClobbers());
+ Writer.AddSourceLocation(S->getAsmLoc(), Record);
+ Record.push_back(S->isVolatile());
+ Record.push_back(S->isSimple());
+}
+
+void ASTStmtWriter::VisitGCCAsmStmt(GCCAsmStmt *S) {
+ VisitAsmStmt(S);
+ Writer.AddSourceLocation(S->getRParenLoc(), Record);
+ Writer.AddStmt(S->getAsmString());
+
+ // Outputs
+ for (unsigned I = 0, N = S->getNumOutputs(); I != N; ++I) {
+ Writer.AddIdentifierRef(S->getOutputIdentifier(I), Record);
+ Writer.AddStmt(S->getOutputConstraintLiteral(I));
+ Writer.AddStmt(S->getOutputExpr(I));
+ }
+
+ // Inputs
+ for (unsigned I = 0, N = S->getNumInputs(); I != N; ++I) {
+ Writer.AddIdentifierRef(S->getInputIdentifier(I), Record);
+ Writer.AddStmt(S->getInputConstraintLiteral(I));
+ Writer.AddStmt(S->getInputExpr(I));
+ }
+
+ // Clobbers
+ for (unsigned I = 0, N = S->getNumClobbers(); I != N; ++I)
+ Writer.AddStmt(S->getClobberStringLiteral(I));
+
+ Code = serialization::STMT_GCCASM;
+}
+
+void ASTStmtWriter::VisitMSAsmStmt(MSAsmStmt *S) {
+ VisitAsmStmt(S);
+ Writer.AddSourceLocation(S->getLBraceLoc(), Record);
+ Writer.AddSourceLocation(S->getEndLoc(), Record);
+ Record.push_back(S->getNumAsmToks());
+ Writer.AddString(S->getAsmString(), Record);
+
+ // Tokens
+ for (unsigned I = 0, N = S->getNumAsmToks(); I != N; ++I) {
+ Writer.AddToken(S->getAsmToks()[I], Record);
+ }
+
+ // Clobbers
+ for (unsigned I = 0, N = S->getNumClobbers(); I != N; ++I) {
+ Writer.AddString(S->getClobber(I), Record);
+ }
+
+ // Outputs
+ for (unsigned I = 0, N = S->getNumOutputs(); I != N; ++I) {
+ Writer.AddStmt(S->getOutputExpr(I));
+ Writer.AddString(S->getOutputConstraint(I), Record);
+ }
+
+ // Inputs
+ for (unsigned I = 0, N = S->getNumInputs(); I != N; ++I) {
+ Writer.AddStmt(S->getInputExpr(I));
+ Writer.AddString(S->getInputConstraint(I), Record);
+ }
+
+ Code = serialization::STMT_MSASM;
+}
+
+void ASTStmtWriter::VisitCoroutineBodyStmt(CoroutineBodyStmt *S) {
+ // FIXME: Implement coroutine serialization.
+ llvm_unreachable("unimplemented");
+}
+
+void ASTStmtWriter::VisitCoreturnStmt(CoreturnStmt *S) {
+ // FIXME: Implement coroutine serialization.
+ llvm_unreachable("unimplemented");
+}
+
+void ASTStmtWriter::VisitCoawaitExpr(CoawaitExpr *S) {
+ // FIXME: Implement coroutine serialization.
+ llvm_unreachable("unimplemented");
+}
+
+void ASTStmtWriter::VisitCoyieldExpr(CoyieldExpr *S) {
+ // FIXME: Implement coroutine serialization.
+ llvm_unreachable("unimplemented");
+}
+
+void ASTStmtWriter::VisitCapturedStmt(CapturedStmt *S) {
+ VisitStmt(S);
+ // NumCaptures
+ Record.push_back(std::distance(S->capture_begin(), S->capture_end()));
+
+ // CapturedDecl and captured region kind
+ Writer.AddDeclRef(S->getCapturedDecl(), Record);
+ Record.push_back(S->getCapturedRegionKind());
+
+ Writer.AddDeclRef(S->getCapturedRecordDecl(), Record);
+
+ // Capture inits
+ for (auto *I : S->capture_inits())
+ Writer.AddStmt(I);
+
+ // Body
+ Writer.AddStmt(S->getCapturedStmt());
+
+ // Captures
+ for (const auto &I : S->captures()) {
+ if (I.capturesThis() || I.capturesVariableArrayType())
+ Writer.AddDeclRef(nullptr, Record);
+ else
+ Writer.AddDeclRef(I.getCapturedVar(), Record);
+ Record.push_back(I.getCaptureKind());
+ Writer.AddSourceLocation(I.getLocation(), Record);
+ }
+
+ Code = serialization::STMT_CAPTURED;
+}
+
+void ASTStmtWriter::VisitExpr(Expr *E) {
+ VisitStmt(E);
+ Writer.AddTypeRef(E->getType(), Record);
+ Record.push_back(E->isTypeDependent());
+ Record.push_back(E->isValueDependent());
+ Record.push_back(E->isInstantiationDependent());
+ Record.push_back(E->containsUnexpandedParameterPack());
+ Record.push_back(E->getValueKind());
+ Record.push_back(E->getObjectKind());
+}
+
+void ASTStmtWriter::VisitPredefinedExpr(PredefinedExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Record.push_back(E->getIdentType()); // FIXME: stable encoding
+ Writer.AddStmt(E->getFunctionName());
+ Code = serialization::EXPR_PREDEFINED;
+}
+
+void ASTStmtWriter::VisitDeclRefExpr(DeclRefExpr *E) {
+ VisitExpr(E);
+
+ Record.push_back(E->hasQualifier());
+ Record.push_back(E->getDecl() != E->getFoundDecl());
+ Record.push_back(E->hasTemplateKWAndArgsInfo());
+ Record.push_back(E->hadMultipleCandidates());
+ Record.push_back(E->refersToEnclosingVariableOrCapture());
+
+ if (E->hasTemplateKWAndArgsInfo()) {
+ unsigned NumTemplateArgs = E->getNumTemplateArgs();
+ Record.push_back(NumTemplateArgs);
+ }
+
+ DeclarationName::NameKind nk = (E->getDecl()->getDeclName().getNameKind());
+
+ if ((!E->hasTemplateKWAndArgsInfo()) && (!E->hasQualifier()) &&
+ (E->getDecl() == E->getFoundDecl()) &&
+ nk == DeclarationName::Identifier) {
+ AbbrevToUse = Writer.getDeclRefExprAbbrev();
+ }
+
+ if (E->hasQualifier())
+ Writer.AddNestedNameSpecifierLoc(E->getQualifierLoc(), Record);
+
+ if (E->getDecl() != E->getFoundDecl())
+ Writer.AddDeclRef(E->getFoundDecl(), Record);
+
+ if (E->hasTemplateKWAndArgsInfo())
+ AddTemplateKWAndArgsInfo(*E->getTrailingObjects<ASTTemplateKWAndArgsInfo>(),
+ E->getTrailingObjects<TemplateArgumentLoc>());
+
+ Writer.AddDeclRef(E->getDecl(), Record);
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Writer.AddDeclarationNameLoc(E->DNLoc, E->getDecl()->getDeclName(), Record);
+ Code = serialization::EXPR_DECL_REF;
+}
+
+void ASTStmtWriter::VisitIntegerLiteral(IntegerLiteral *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Writer.AddAPInt(E->getValue(), Record);
+
+ if (E->getValue().getBitWidth() == 32) {
+ AbbrevToUse = Writer.getIntegerLiteralAbbrev();
+ }
+
+ Code = serialization::EXPR_INTEGER_LITERAL;
+}
+
+void ASTStmtWriter::VisitFloatingLiteral(FloatingLiteral *E) {
+ VisitExpr(E);
+ Record.push_back(E->getRawSemantics());
+ Record.push_back(E->isExact());
+ Writer.AddAPFloat(E->getValue(), Record);
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Code = serialization::EXPR_FLOATING_LITERAL;
+}
+
+void ASTStmtWriter::VisitImaginaryLiteral(ImaginaryLiteral *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getSubExpr());
+ Code = serialization::EXPR_IMAGINARY_LITERAL;
+}
+
+void ASTStmtWriter::VisitStringLiteral(StringLiteral *E) {
+ VisitExpr(E);
+ Record.push_back(E->getByteLength());
+ Record.push_back(E->getNumConcatenated());
+ Record.push_back(E->getKind());
+ Record.push_back(E->isPascal());
+ // FIXME: String data should be stored as a blob at the end of the
+ // StringLiteral. However, we can't do so now because we have no
+ // provision for coping with abbreviations when we're jumping around
+ // the AST file during deserialization.
+ Record.append(E->getBytes().begin(), E->getBytes().end());
+ for (unsigned I = 0, N = E->getNumConcatenated(); I != N; ++I)
+ Writer.AddSourceLocation(E->getStrTokenLoc(I), Record);
+ Code = serialization::EXPR_STRING_LITERAL;
+}
+
+void ASTStmtWriter::VisitCharacterLiteral(CharacterLiteral *E) {
+ VisitExpr(E);
+ Record.push_back(E->getValue());
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Record.push_back(E->getKind());
+
+ AbbrevToUse = Writer.getCharacterLiteralAbbrev();
+
+ Code = serialization::EXPR_CHARACTER_LITERAL;
+}
+
+void ASTStmtWriter::VisitParenExpr(ParenExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getLParen(), Record);
+ Writer.AddSourceLocation(E->getRParen(), Record);
+ Writer.AddStmt(E->getSubExpr());
+ Code = serialization::EXPR_PAREN;
+}
+
+void ASTStmtWriter::VisitParenListExpr(ParenListExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->NumExprs);
+ for (unsigned i=0; i != E->NumExprs; ++i)
+ Writer.AddStmt(E->Exprs[i]);
+ Writer.AddSourceLocation(E->LParenLoc, Record);
+ Writer.AddSourceLocation(E->RParenLoc, Record);
+ Code = serialization::EXPR_PAREN_LIST;
+}
+
+void ASTStmtWriter::VisitUnaryOperator(UnaryOperator *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getSubExpr());
+ Record.push_back(E->getOpcode()); // FIXME: stable encoding
+ Writer.AddSourceLocation(E->getOperatorLoc(), Record);
+ Code = serialization::EXPR_UNARY_OPERATOR;
+}
+
+void ASTStmtWriter::VisitOffsetOfExpr(OffsetOfExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumComponents());
+ Record.push_back(E->getNumExpressions());
+ Writer.AddSourceLocation(E->getOperatorLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Writer.AddTypeSourceInfo(E->getTypeSourceInfo(), Record);
+ for (unsigned I = 0, N = E->getNumComponents(); I != N; ++I) {
+ const OffsetOfNode &ON = E->getComponent(I);
+ Record.push_back(ON.getKind()); // FIXME: Stable encoding
+ Writer.AddSourceLocation(ON.getSourceRange().getBegin(), Record);
+ Writer.AddSourceLocation(ON.getSourceRange().getEnd(), Record);
+ switch (ON.getKind()) {
+ case OffsetOfNode::Array:
+ Record.push_back(ON.getArrayExprIndex());
+ break;
+
+ case OffsetOfNode::Field:
+ Writer.AddDeclRef(ON.getField(), Record);
+ break;
+
+ case OffsetOfNode::Identifier:
+ Writer.AddIdentifierRef(ON.getFieldName(), Record);
+ break;
+
+ case OffsetOfNode::Base:
+ Writer.AddCXXBaseSpecifier(*ON.getBase(), Record);
+ break;
+ }
+ }
+ for (unsigned I = 0, N = E->getNumExpressions(); I != N; ++I)
+ Writer.AddStmt(E->getIndexExpr(I));
+ Code = serialization::EXPR_OFFSETOF;
+}
+
+void ASTStmtWriter::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getKind());
+ if (E->isArgumentType())
+ Writer.AddTypeSourceInfo(E->getArgumentTypeInfo(), Record);
+ else {
+ Record.push_back(0);
+ Writer.AddStmt(E->getArgumentExpr());
+ }
+ Writer.AddSourceLocation(E->getOperatorLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = serialization::EXPR_SIZEOF_ALIGN_OF;
+}
+
+void ASTStmtWriter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getLHS());
+ Writer.AddStmt(E->getRHS());
+ Writer.AddSourceLocation(E->getRBracketLoc(), Record);
+ Code = serialization::EXPR_ARRAY_SUBSCRIPT;
+}
+
+void ASTStmtWriter::VisitOMPArraySectionExpr(OMPArraySectionExpr *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getBase());
+ Writer.AddStmt(E->getLowerBound());
+ Writer.AddStmt(E->getLength());
+ Writer.AddSourceLocation(E->getColonLoc(), Record);
+ Writer.AddSourceLocation(E->getRBracketLoc(), Record);
+ Code = serialization::EXPR_OMP_ARRAY_SECTION;
+}
+
+void ASTStmtWriter::VisitCallExpr(CallExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumArgs());
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Writer.AddStmt(E->getCallee());
+ for (CallExpr::arg_iterator Arg = E->arg_begin(), ArgEnd = E->arg_end();
+ Arg != ArgEnd; ++Arg)
+ Writer.AddStmt(*Arg);
+ Code = serialization::EXPR_CALL;
+}
+
+void ASTStmtWriter::VisitMemberExpr(MemberExpr *E) {
+ // Don't call VisitExpr, we'll write everything here.
+
+ Record.push_back(E->hasQualifier());
+ if (E->hasQualifier())
+ Writer.AddNestedNameSpecifierLoc(E->getQualifierLoc(), Record);
+
+ Record.push_back(E->HasTemplateKWAndArgsInfo);
+ if (E->HasTemplateKWAndArgsInfo) {
+ Writer.AddSourceLocation(E->getTemplateKeywordLoc(), Record);
+ unsigned NumTemplateArgs = E->getNumTemplateArgs();
+ Record.push_back(NumTemplateArgs);
+ Writer.AddSourceLocation(E->getLAngleLoc(), Record);
+ Writer.AddSourceLocation(E->getRAngleLoc(), Record);
+ for (unsigned i=0; i != NumTemplateArgs; ++i)
+ Writer.AddTemplateArgumentLoc(E->getTemplateArgs()[i], Record);
+ }
+
+ Record.push_back(E->hadMultipleCandidates());
+
+ DeclAccessPair FoundDecl = E->getFoundDecl();
+ Writer.AddDeclRef(FoundDecl.getDecl(), Record);
+ Record.push_back(FoundDecl.getAccess());
+
+ Writer.AddTypeRef(E->getType(), Record);
+ Record.push_back(E->getValueKind());
+ Record.push_back(E->getObjectKind());
+ Writer.AddStmt(E->getBase());
+ Writer.AddDeclRef(E->getMemberDecl(), Record);
+ Writer.AddSourceLocation(E->getMemberLoc(), Record);
+ Record.push_back(E->isArrow());
+ Writer.AddSourceLocation(E->getOperatorLoc(), Record);
+ Writer.AddDeclarationNameLoc(E->MemberDNLoc,
+ E->getMemberDecl()->getDeclName(), Record);
+ Code = serialization::EXPR_MEMBER;
+}
+
+void ASTStmtWriter::VisitObjCIsaExpr(ObjCIsaExpr *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getBase());
+ Writer.AddSourceLocation(E->getIsaMemberLoc(), Record);
+ Writer.AddSourceLocation(E->getOpLoc(), Record);
+ Record.push_back(E->isArrow());
+ Code = serialization::EXPR_OBJC_ISA;
+}
+
+void ASTStmtWriter::
+VisitObjCIndirectCopyRestoreExpr(ObjCIndirectCopyRestoreExpr *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getSubExpr());
+ Record.push_back(E->shouldCopy());
+ Code = serialization::EXPR_OBJC_INDIRECT_COPY_RESTORE;
+}
+
+void ASTStmtWriter::VisitObjCBridgedCastExpr(ObjCBridgedCastExpr *E) {
+ VisitExplicitCastExpr(E);
+ Writer.AddSourceLocation(E->getLParenLoc(), Record);
+ Writer.AddSourceLocation(E->getBridgeKeywordLoc(), Record);
+ Record.push_back(E->getBridgeKind()); // FIXME: Stable encoding
+ Code = serialization::EXPR_OBJC_BRIDGED_CAST;
+}
+
+void ASTStmtWriter::VisitCastExpr(CastExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->path_size());
+ Writer.AddStmt(E->getSubExpr());
+ Record.push_back(E->getCastKind()); // FIXME: stable encoding
+
+ for (CastExpr::path_iterator
+ PI = E->path_begin(), PE = E->path_end(); PI != PE; ++PI)
+ Writer.AddCXXBaseSpecifier(**PI, Record);
+}
+
+void ASTStmtWriter::VisitBinaryOperator(BinaryOperator *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getLHS());
+ Writer.AddStmt(E->getRHS());
+ Record.push_back(E->getOpcode()); // FIXME: stable encoding
+ Writer.AddSourceLocation(E->getOperatorLoc(), Record);
+ Record.push_back(E->isFPContractable());
+ Code = serialization::EXPR_BINARY_OPERATOR;
+}
+
+void ASTStmtWriter::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
+ VisitBinaryOperator(E);
+ Writer.AddTypeRef(E->getComputationLHSType(), Record);
+ Writer.AddTypeRef(E->getComputationResultType(), Record);
+ Code = serialization::EXPR_COMPOUND_ASSIGN_OPERATOR;
+}
+
+void ASTStmtWriter::VisitConditionalOperator(ConditionalOperator *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getCond());
+ Writer.AddStmt(E->getLHS());
+ Writer.AddStmt(E->getRHS());
+ Writer.AddSourceLocation(E->getQuestionLoc(), Record);
+ Writer.AddSourceLocation(E->getColonLoc(), Record);
+ Code = serialization::EXPR_CONDITIONAL_OPERATOR;
+}
+
+void
+ASTStmtWriter::VisitBinaryConditionalOperator(BinaryConditionalOperator *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getOpaqueValue());
+ Writer.AddStmt(E->getCommon());
+ Writer.AddStmt(E->getCond());
+ Writer.AddStmt(E->getTrueExpr());
+ Writer.AddStmt(E->getFalseExpr());
+ Writer.AddSourceLocation(E->getQuestionLoc(), Record);
+ Writer.AddSourceLocation(E->getColonLoc(), Record);
+ Code = serialization::EXPR_BINARY_CONDITIONAL_OPERATOR;
+}
+
+void ASTStmtWriter::VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ VisitCastExpr(E);
+
+ if (E->path_size() == 0)
+ AbbrevToUse = Writer.getExprImplicitCastAbbrev();
+
+ Code = serialization::EXPR_IMPLICIT_CAST;
+}
+
+void ASTStmtWriter::VisitExplicitCastExpr(ExplicitCastExpr *E) {
+ VisitCastExpr(E);
+ Writer.AddTypeSourceInfo(E->getTypeInfoAsWritten(), Record);
+}
+
+void ASTStmtWriter::VisitCStyleCastExpr(CStyleCastExpr *E) {
+ VisitExplicitCastExpr(E);
+ Writer.AddSourceLocation(E->getLParenLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = serialization::EXPR_CSTYLE_CAST;
+}
+
+void ASTStmtWriter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getLParenLoc(), Record);
+ Writer.AddTypeSourceInfo(E->getTypeSourceInfo(), Record);
+ Writer.AddStmt(E->getInitializer());
+ Record.push_back(E->isFileScope());
+ Code = serialization::EXPR_COMPOUND_LITERAL;
+}
+
+void ASTStmtWriter::VisitExtVectorElementExpr(ExtVectorElementExpr *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getBase());
+ Writer.AddIdentifierRef(&E->getAccessor(), Record);
+ Writer.AddSourceLocation(E->getAccessorLoc(), Record);
+ Code = serialization::EXPR_EXT_VECTOR_ELEMENT;
+}
+
+void ASTStmtWriter::VisitInitListExpr(InitListExpr *E) {
+ VisitExpr(E);
+ // NOTE: only add the (possibly null) syntactic form.
+ // No need to serialize the isSemanticForm flag and the semantic form.
+ Writer.AddStmt(E->getSyntacticForm());
+ Writer.AddSourceLocation(E->getLBraceLoc(), Record);
+ Writer.AddSourceLocation(E->getRBraceLoc(), Record);
+ bool isArrayFiller = E->ArrayFillerOrUnionFieldInit.is<Expr*>();
+ Record.push_back(isArrayFiller);
+ if (isArrayFiller)
+ Writer.AddStmt(E->getArrayFiller());
+ else
+ Writer.AddDeclRef(E->getInitializedFieldInUnion(), Record);
+ Record.push_back(E->hadArrayRangeDesignator());
+ Record.push_back(E->getNumInits());
+ if (isArrayFiller) {
+ // ArrayFiller may have filled "holes" due to designated initializer.
+ // Replace them by 0 to indicate that the filler goes in that place.
+ Expr *filler = E->getArrayFiller();
+ for (unsigned I = 0, N = E->getNumInits(); I != N; ++I)
+ Writer.AddStmt(E->getInit(I) != filler ? E->getInit(I) : nullptr);
+ } else {
+ for (unsigned I = 0, N = E->getNumInits(); I != N; ++I)
+ Writer.AddStmt(E->getInit(I));
+ }
+ Code = serialization::EXPR_INIT_LIST;
+}
+
+void ASTStmtWriter::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumSubExprs());
+ for (unsigned I = 0, N = E->getNumSubExprs(); I != N; ++I)
+ Writer.AddStmt(E->getSubExpr(I));
+ Writer.AddSourceLocation(E->getEqualOrColonLoc(), Record);
+ Record.push_back(E->usesGNUSyntax());
+ for (DesignatedInitExpr::designators_iterator D = E->designators_begin(),
+ DEnd = E->designators_end();
+ D != DEnd; ++D) {
+ if (D->isFieldDesignator()) {
+ if (FieldDecl *Field = D->getField()) {
+ Record.push_back(serialization::DESIG_FIELD_DECL);
+ Writer.AddDeclRef(Field, Record);
+ } else {
+ Record.push_back(serialization::DESIG_FIELD_NAME);
+ Writer.AddIdentifierRef(D->getFieldName(), Record);
+ }
+ Writer.AddSourceLocation(D->getDotLoc(), Record);
+ Writer.AddSourceLocation(D->getFieldLoc(), Record);
+ } else if (D->isArrayDesignator()) {
+ Record.push_back(serialization::DESIG_ARRAY);
+ Record.push_back(D->getFirstExprIndex());
+ Writer.AddSourceLocation(D->getLBracketLoc(), Record);
+ Writer.AddSourceLocation(D->getRBracketLoc(), Record);
+ } else {
+ assert(D->isArrayRangeDesignator() && "Unknown designator");
+ Record.push_back(serialization::DESIG_ARRAY_RANGE);
+ Record.push_back(D->getFirstExprIndex());
+ Writer.AddSourceLocation(D->getLBracketLoc(), Record);
+ Writer.AddSourceLocation(D->getEllipsisLoc(), Record);
+ Writer.AddSourceLocation(D->getRBracketLoc(), Record);
+ }
+ }
+ Code = serialization::EXPR_DESIGNATED_INIT;
+}
+
+void ASTStmtWriter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getBase());
+ Writer.AddStmt(E->getUpdater());
+ Code = serialization::EXPR_DESIGNATED_INIT_UPDATE;
+}
+
+void ASTStmtWriter::VisitNoInitExpr(NoInitExpr *E) {
+ VisitExpr(E);
+ Code = serialization::EXPR_NO_INIT;
+}
+
+void ASTStmtWriter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
+ VisitExpr(E);
+ Code = serialization::EXPR_IMPLICIT_VALUE_INIT;
+}
+
+void ASTStmtWriter::VisitVAArgExpr(VAArgExpr *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getSubExpr());
+ Writer.AddTypeSourceInfo(E->getWrittenTypeInfo(), Record);
+ Writer.AddSourceLocation(E->getBuiltinLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Record.push_back(E->isMicrosoftABI());
+ Code = serialization::EXPR_VA_ARG;
+}
+
+void ASTStmtWriter::VisitAddrLabelExpr(AddrLabelExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getAmpAmpLoc(), Record);
+ Writer.AddSourceLocation(E->getLabelLoc(), Record);
+ Writer.AddDeclRef(E->getLabel(), Record);
+ Code = serialization::EXPR_ADDR_LABEL;
+}
+
+void ASTStmtWriter::VisitStmtExpr(StmtExpr *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getSubStmt());
+ Writer.AddSourceLocation(E->getLParenLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = serialization::EXPR_STMT;
+}
+
+void ASTStmtWriter::VisitChooseExpr(ChooseExpr *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getCond());
+ Writer.AddStmt(E->getLHS());
+ Writer.AddStmt(E->getRHS());
+ Writer.AddSourceLocation(E->getBuiltinLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Record.push_back(E->isConditionDependent() ? false : E->isConditionTrue());
+ Code = serialization::EXPR_CHOOSE;
+}
+
+void ASTStmtWriter::VisitGNUNullExpr(GNUNullExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getTokenLocation(), Record);
+ Code = serialization::EXPR_GNU_NULL;
+}
+
+void ASTStmtWriter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumSubExprs());
+ for (unsigned I = 0, N = E->getNumSubExprs(); I != N; ++I)
+ Writer.AddStmt(E->getExpr(I));
+ Writer.AddSourceLocation(E->getBuiltinLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = serialization::EXPR_SHUFFLE_VECTOR;
+}
+
+void ASTStmtWriter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getBuiltinLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Writer.AddTypeSourceInfo(E->getTypeSourceInfo(), Record);
+ Writer.AddStmt(E->getSrcExpr());
+ Code = serialization::EXPR_CONVERT_VECTOR;
+}
+
+void ASTStmtWriter::VisitBlockExpr(BlockExpr *E) {
+ VisitExpr(E);
+ Writer.AddDeclRef(E->getBlockDecl(), Record);
+ Code = serialization::EXPR_BLOCK;
+}
+
+void ASTStmtWriter::VisitGenericSelectionExpr(GenericSelectionExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumAssocs());
+
+ Writer.AddStmt(E->getControllingExpr());
+ for (unsigned I = 0, N = E->getNumAssocs(); I != N; ++I) {
+ Writer.AddTypeSourceInfo(E->getAssocTypeSourceInfo(I), Record);
+ Writer.AddStmt(E->getAssocExpr(I));
+ }
+ Record.push_back(E->isResultDependent() ? -1U : E->getResultIndex());
+
+ Writer.AddSourceLocation(E->getGenericLoc(), Record);
+ Writer.AddSourceLocation(E->getDefaultLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = serialization::EXPR_GENERIC_SELECTION;
+}
+
+void ASTStmtWriter::VisitPseudoObjectExpr(PseudoObjectExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumSemanticExprs());
+
+ // Push the result index. Currently, this needs to exactly match
+ // the encoding used internally for ResultIndex.
+ unsigned result = E->getResultExprIndex();
+ result = (result == PseudoObjectExpr::NoResult ? 0 : result + 1);
+ Record.push_back(result);
+
+ Writer.AddStmt(E->getSyntacticForm());
+ for (PseudoObjectExpr::semantics_iterator
+ i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
+ Writer.AddStmt(*i);
+ }
+ Code = serialization::EXPR_PSEUDO_OBJECT;
+}
+
+void ASTStmtWriter::VisitAtomicExpr(AtomicExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getOp());
+ for (unsigned I = 0, N = E->getNumSubExprs(); I != N; ++I)
+ Writer.AddStmt(E->getSubExprs()[I]);
+ Writer.AddSourceLocation(E->getBuiltinLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = serialization::EXPR_ATOMIC;
+}
+
+//===----------------------------------------------------------------------===//
+// Objective-C Expressions and Statements.
+//===----------------------------------------------------------------------===//
+
+void ASTStmtWriter::VisitObjCStringLiteral(ObjCStringLiteral *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getString());
+ Writer.AddSourceLocation(E->getAtLoc(), Record);
+ Code = serialization::EXPR_OBJC_STRING_LITERAL;
+}
+
+void ASTStmtWriter::VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getSubExpr());
+ Writer.AddDeclRef(E->getBoxingMethod(), Record);
+ Writer.AddSourceRange(E->getSourceRange(), Record);
+ Code = serialization::EXPR_OBJC_BOXED_EXPRESSION;
+}
+
+void ASTStmtWriter::VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumElements());
+ for (unsigned i = 0; i < E->getNumElements(); i++)
+ Writer.AddStmt(E->getElement(i));
+ Writer.AddDeclRef(E->getArrayWithObjectsMethod(), Record);
+ Writer.AddSourceRange(E->getSourceRange(), Record);
+ Code = serialization::EXPR_OBJC_ARRAY_LITERAL;
+}
+
+void ASTStmtWriter::VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumElements());
+ Record.push_back(E->HasPackExpansions);
+ for (unsigned i = 0; i < E->getNumElements(); i++) {
+ ObjCDictionaryElement Element = E->getKeyValueElement(i);
+ Writer.AddStmt(Element.Key);
+ Writer.AddStmt(Element.Value);
+ if (E->HasPackExpansions) {
+ Writer.AddSourceLocation(Element.EllipsisLoc, Record);
+ unsigned NumExpansions = 0;
+ if (Element.NumExpansions)
+ NumExpansions = *Element.NumExpansions + 1;
+ Record.push_back(NumExpansions);
+ }
+ }
+
+ Writer.AddDeclRef(E->getDictWithObjectsMethod(), Record);
+ Writer.AddSourceRange(E->getSourceRange(), Record);
+ Code = serialization::EXPR_OBJC_DICTIONARY_LITERAL;
+}
+
+void ASTStmtWriter::VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
+ VisitExpr(E);
+ Writer.AddTypeSourceInfo(E->getEncodedTypeSourceInfo(), Record);
+ Writer.AddSourceLocation(E->getAtLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = serialization::EXPR_OBJC_ENCODE;
+}
+
+void ASTStmtWriter::VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
+ VisitExpr(E);
+ Writer.AddSelectorRef(E->getSelector(), Record);
+ Writer.AddSourceLocation(E->getAtLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = serialization::EXPR_OBJC_SELECTOR_EXPR;
+}
+
+void ASTStmtWriter::VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
+ VisitExpr(E);
+ Writer.AddDeclRef(E->getProtocol(), Record);
+ Writer.AddSourceLocation(E->getAtLoc(), Record);
+ Writer.AddSourceLocation(E->ProtoLoc, Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = serialization::EXPR_OBJC_PROTOCOL_EXPR;
+}
+
+void ASTStmtWriter::VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ VisitExpr(E);
+ Writer.AddDeclRef(E->getDecl(), Record);
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Writer.AddSourceLocation(E->getOpLoc(), Record);
+ Writer.AddStmt(E->getBase());
+ Record.push_back(E->isArrow());
+ Record.push_back(E->isFreeIvar());
+ Code = serialization::EXPR_OBJC_IVAR_REF_EXPR;
+}
+
+void ASTStmtWriter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->SetterAndMethodRefFlags.getInt());
+ Record.push_back(E->isImplicitProperty());
+ if (E->isImplicitProperty()) {
+ Writer.AddDeclRef(E->getImplicitPropertyGetter(), Record);
+ Writer.AddDeclRef(E->getImplicitPropertySetter(), Record);
+ } else {
+ Writer.AddDeclRef(E->getExplicitProperty(), Record);
+ }
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Writer.AddSourceLocation(E->getReceiverLocation(), Record);
+ if (E->isObjectReceiver()) {
+ Record.push_back(0);
+ Writer.AddStmt(E->getBase());
+ } else if (E->isSuperReceiver()) {
+ Record.push_back(1);
+ Writer.AddTypeRef(E->getSuperReceiverType(), Record);
+ } else {
+ Record.push_back(2);
+ Writer.AddDeclRef(E->getClassReceiver(), Record);
+ }
+
+ Code = serialization::EXPR_OBJC_PROPERTY_REF_EXPR;
+}
+
+void ASTStmtWriter::VisitObjCSubscriptRefExpr(ObjCSubscriptRefExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getRBracket(), Record);
+ Writer.AddStmt(E->getBaseExpr());
+ Writer.AddStmt(E->getKeyExpr());
+ Writer.AddDeclRef(E->getAtIndexMethodDecl(), Record);
+ Writer.AddDeclRef(E->setAtIndexMethodDecl(), Record);
+
+ Code = serialization::EXPR_OBJC_SUBSCRIPT_REF_EXPR;
+}
+
+void ASTStmtWriter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumArgs());
+ Record.push_back(E->getNumStoredSelLocs());
+ Record.push_back(E->SelLocsKind);
+ Record.push_back(E->isDelegateInitCall());
+ Record.push_back(E->IsImplicit);
+ Record.push_back((unsigned)E->getReceiverKind()); // FIXME: stable encoding
+ switch (E->getReceiverKind()) {
+ case ObjCMessageExpr::Instance:
+ Writer.AddStmt(E->getInstanceReceiver());
+ break;
+
+ case ObjCMessageExpr::Class:
+ Writer.AddTypeSourceInfo(E->getClassReceiverTypeInfo(), Record);
+ break;
+
+ case ObjCMessageExpr::SuperClass:
+ case ObjCMessageExpr::SuperInstance:
+ Writer.AddTypeRef(E->getSuperType(), Record);
+ Writer.AddSourceLocation(E->getSuperLoc(), Record);
+ break;
+ }
+
+ if (E->getMethodDecl()) {
+ Record.push_back(1);
+ Writer.AddDeclRef(E->getMethodDecl(), Record);
+ } else {
+ Record.push_back(0);
+ Writer.AddSelectorRef(E->getSelector(), Record);
+ }
+
+ Writer.AddSourceLocation(E->getLeftLoc(), Record);
+ Writer.AddSourceLocation(E->getRightLoc(), Record);
+
+ for (CallExpr::arg_iterator Arg = E->arg_begin(), ArgEnd = E->arg_end();
+ Arg != ArgEnd; ++Arg)
+ Writer.AddStmt(*Arg);
+
+ SourceLocation *Locs = E->getStoredSelLocs();
+ for (unsigned i = 0, e = E->getNumStoredSelLocs(); i != e; ++i)
+ Writer.AddSourceLocation(Locs[i], Record);
+
+ Code = serialization::EXPR_OBJC_MESSAGE_EXPR;
+}
+
+void ASTStmtWriter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
+ VisitStmt(S);
+ Writer.AddStmt(S->getElement());
+ Writer.AddStmt(S->getCollection());
+ Writer.AddStmt(S->getBody());
+ Writer.AddSourceLocation(S->getForLoc(), Record);
+ Writer.AddSourceLocation(S->getRParenLoc(), Record);
+ Code = serialization::STMT_OBJC_FOR_COLLECTION;
+}
+
+void ASTStmtWriter::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) {
+ Writer.AddStmt(S->getCatchBody());
+ Writer.AddDeclRef(S->getCatchParamDecl(), Record);
+ Writer.AddSourceLocation(S->getAtCatchLoc(), Record);
+ Writer.AddSourceLocation(S->getRParenLoc(), Record);
+ Code = serialization::STMT_OBJC_CATCH;
+}
+
+void ASTStmtWriter::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
+ Writer.AddStmt(S->getFinallyBody());
+ Writer.AddSourceLocation(S->getAtFinallyLoc(), Record);
+ Code = serialization::STMT_OBJC_FINALLY;
+}
+
+void ASTStmtWriter::VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S) {
+ Writer.AddStmt(S->getSubStmt());
+ Writer.AddSourceLocation(S->getAtLoc(), Record);
+ Code = serialization::STMT_OBJC_AUTORELEASE_POOL;
+}
+
+void ASTStmtWriter::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
+ Record.push_back(S->getNumCatchStmts());
+ Record.push_back(S->getFinallyStmt() != nullptr);
+ Writer.AddStmt(S->getTryBody());
+ for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I)
+ Writer.AddStmt(S->getCatchStmt(I));
+ if (S->getFinallyStmt())
+ Writer.AddStmt(S->getFinallyStmt());
+ Writer.AddSourceLocation(S->getAtTryLoc(), Record);
+ Code = serialization::STMT_OBJC_AT_TRY;
+}
+
+void ASTStmtWriter::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
+ Writer.AddStmt(S->getSynchExpr());
+ Writer.AddStmt(S->getSynchBody());
+ Writer.AddSourceLocation(S->getAtSynchronizedLoc(), Record);
+ Code = serialization::STMT_OBJC_AT_SYNCHRONIZED;
+}
+
+void ASTStmtWriter::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
+ Writer.AddStmt(S->getThrowExpr());
+ Writer.AddSourceLocation(S->getThrowLoc(), Record);
+ Code = serialization::STMT_OBJC_AT_THROW;
+}
+
+void ASTStmtWriter::VisitObjCBoolLiteralExpr(ObjCBoolLiteralExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getValue());
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Code = serialization::EXPR_OBJC_BOOL_LITERAL;
+}
+
+//===----------------------------------------------------------------------===//
+// C++ Expressions and Statements.
+//===----------------------------------------------------------------------===//
+
+void ASTStmtWriter::VisitCXXCatchStmt(CXXCatchStmt *S) {
+ VisitStmt(S);
+ Writer.AddSourceLocation(S->getCatchLoc(), Record);
+ Writer.AddDeclRef(S->getExceptionDecl(), Record);
+ Writer.AddStmt(S->getHandlerBlock());
+ Code = serialization::STMT_CXX_CATCH;
+}
+
+void ASTStmtWriter::VisitCXXTryStmt(CXXTryStmt *S) {
+ VisitStmt(S);
+ Record.push_back(S->getNumHandlers());
+ Writer.AddSourceLocation(S->getTryLoc(), Record);
+ Writer.AddStmt(S->getTryBlock());
+ for (unsigned i = 0, e = S->getNumHandlers(); i != e; ++i)
+ Writer.AddStmt(S->getHandler(i));
+ Code = serialization::STMT_CXX_TRY;
+}
+
+void ASTStmtWriter::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
+ VisitStmt(S);
+ Writer.AddSourceLocation(S->getForLoc(), Record);
+ Writer.AddSourceLocation(S->getCoawaitLoc(), Record);
+ Writer.AddSourceLocation(S->getColonLoc(), Record);
+ Writer.AddSourceLocation(S->getRParenLoc(), Record);
+ Writer.AddStmt(S->getRangeStmt());
+ Writer.AddStmt(S->getBeginEndStmt());
+ Writer.AddStmt(S->getCond());
+ Writer.AddStmt(S->getInc());
+ Writer.AddStmt(S->getLoopVarStmt());
+ Writer.AddStmt(S->getBody());
+ Code = serialization::STMT_CXX_FOR_RANGE;
+}
+
+void ASTStmtWriter::VisitMSDependentExistsStmt(MSDependentExistsStmt *S) {
+ VisitStmt(S);
+ Writer.AddSourceLocation(S->getKeywordLoc(), Record);
+ Record.push_back(S->isIfExists());
+ Writer.AddNestedNameSpecifierLoc(S->getQualifierLoc(), Record);
+ Writer.AddDeclarationNameInfo(S->getNameInfo(), Record);
+ Writer.AddStmt(S->getSubStmt());
+ Code = serialization::STMT_MS_DEPENDENT_EXISTS;
+}
+
+void ASTStmtWriter::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
+ VisitCallExpr(E);
+ Record.push_back(E->getOperator());
+ Writer.AddSourceRange(E->Range, Record);
+ Record.push_back(E->isFPContractable());
+ Code = serialization::EXPR_CXX_OPERATOR_CALL;
+}
+
+void ASTStmtWriter::VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
+ VisitCallExpr(E);
+ Code = serialization::EXPR_CXX_MEMBER_CALL;
+}
+
+void ASTStmtWriter::VisitCXXConstructExpr(CXXConstructExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumArgs());
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
+ Writer.AddStmt(E->getArg(I));
+ Writer.AddDeclRef(E->getConstructor(), Record);
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Record.push_back(E->isElidable());
+ Record.push_back(E->hadMultipleCandidates());
+ Record.push_back(E->isListInitialization());
+ Record.push_back(E->isStdInitListInitialization());
+ Record.push_back(E->requiresZeroInitialization());
+ Record.push_back(E->getConstructionKind()); // FIXME: stable encoding
+ Writer.AddSourceRange(E->getParenOrBraceRange(), Record);
+ Code = serialization::EXPR_CXX_CONSTRUCT;
+}
+
+void ASTStmtWriter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E) {
+ VisitCXXConstructExpr(E);
+ Writer.AddTypeSourceInfo(E->getTypeSourceInfo(), Record);
+ Code = serialization::EXPR_CXX_TEMPORARY_OBJECT;
+}
+
+void ASTStmtWriter::VisitLambdaExpr(LambdaExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->NumCaptures);
+ unsigned NumArrayIndexVars = 0;
+ if (E->HasArrayIndexVars)
+ NumArrayIndexVars = E->getArrayIndexStarts()[E->NumCaptures];
+ Record.push_back(NumArrayIndexVars);
+ Writer.AddSourceRange(E->IntroducerRange, Record);
+ Record.push_back(E->CaptureDefault); // FIXME: stable encoding
+ Writer.AddSourceLocation(E->CaptureDefaultLoc, Record);
+ Record.push_back(E->ExplicitParams);
+ Record.push_back(E->ExplicitResultType);
+ Writer.AddSourceLocation(E->ClosingBrace, Record);
+
+ // Add capture initializers.
+ for (LambdaExpr::capture_init_iterator C = E->capture_init_begin(),
+ CEnd = E->capture_init_end();
+ C != CEnd; ++C) {
+ Writer.AddStmt(*C);
+ }
+
+ // Add array index variables, if any.
+ if (NumArrayIndexVars) {
+ Record.append(E->getArrayIndexStarts(),
+ E->getArrayIndexStarts() + E->NumCaptures + 1);
+ VarDecl **ArrayIndexVars = E->getArrayIndexVars();
+ for (unsigned I = 0; I != NumArrayIndexVars; ++I)
+ Writer.AddDeclRef(ArrayIndexVars[I], Record);
+ }
+
+ Code = serialization::EXPR_LAMBDA;
+}
+
+void ASTStmtWriter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getSubExpr());
+ Code = serialization::EXPR_CXX_STD_INITIALIZER_LIST;
+}
+
+void ASTStmtWriter::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
+ VisitExplicitCastExpr(E);
+ Writer.AddSourceRange(SourceRange(E->getOperatorLoc(), E->getRParenLoc()),
+ Record);
+ Writer.AddSourceRange(E->getAngleBrackets(), Record);
+}
+
+void ASTStmtWriter::VisitCXXStaticCastExpr(CXXStaticCastExpr *E) {
+ VisitCXXNamedCastExpr(E);
+ Code = serialization::EXPR_CXX_STATIC_CAST;
+}
+
+void ASTStmtWriter::VisitCXXDynamicCastExpr(CXXDynamicCastExpr *E) {
+ VisitCXXNamedCastExpr(E);
+ Code = serialization::EXPR_CXX_DYNAMIC_CAST;
+}
+
+void ASTStmtWriter::VisitCXXReinterpretCastExpr(CXXReinterpretCastExpr *E) {
+ VisitCXXNamedCastExpr(E);
+ Code = serialization::EXPR_CXX_REINTERPRET_CAST;
+}
+
+void ASTStmtWriter::VisitCXXConstCastExpr(CXXConstCastExpr *E) {
+ VisitCXXNamedCastExpr(E);
+ Code = serialization::EXPR_CXX_CONST_CAST;
+}
+
+void ASTStmtWriter::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E) {
+ VisitExplicitCastExpr(E);
+ Writer.AddSourceLocation(E->getLParenLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = serialization::EXPR_CXX_FUNCTIONAL_CAST;
+}
+
+void ASTStmtWriter::VisitUserDefinedLiteral(UserDefinedLiteral *E) {
+ VisitCallExpr(E);
+ Writer.AddSourceLocation(E->UDSuffixLoc, Record);
+ Code = serialization::EXPR_USER_DEFINED_LITERAL;
+}
+
+void ASTStmtWriter::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getValue());
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Code = serialization::EXPR_CXX_BOOL_LITERAL;
+}
+
+void ASTStmtWriter::VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Code = serialization::EXPR_CXX_NULL_PTR_LITERAL;
+}
+
+void ASTStmtWriter::VisitCXXTypeidExpr(CXXTypeidExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceRange(E->getSourceRange(), Record);
+ if (E->isTypeOperand()) {
+ Writer.AddTypeSourceInfo(E->getTypeOperandSourceInfo(), Record);
+ Code = serialization::EXPR_CXX_TYPEID_TYPE;
+ } else {
+ Writer.AddStmt(E->getExprOperand());
+ Code = serialization::EXPR_CXX_TYPEID_EXPR;
+ }
+}
+
+void ASTStmtWriter::VisitCXXThisExpr(CXXThisExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Record.push_back(E->isImplicit());
+ Code = serialization::EXPR_CXX_THIS;
+}
+
+void ASTStmtWriter::VisitCXXThrowExpr(CXXThrowExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getThrowLoc(), Record);
+ Writer.AddStmt(E->getSubExpr());
+ Record.push_back(E->isThrownVariableInScope());
+ Code = serialization::EXPR_CXX_THROW;
+}
+
+void ASTStmtWriter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
+ VisitExpr(E);
+
+ bool HasOtherExprStored = E->Param.getInt();
+ // Store these first, the reader reads them before creation.
+ Record.push_back(HasOtherExprStored);
+ if (HasOtherExprStored)
+ Writer.AddStmt(E->getExpr());
+ Writer.AddDeclRef(E->getParam(), Record);
+ Writer.AddSourceLocation(E->getUsedLocation(), Record);
+
+ Code = serialization::EXPR_CXX_DEFAULT_ARG;
+}
+
+void ASTStmtWriter::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E) {
+ VisitExpr(E);
+ Writer.AddDeclRef(E->getField(), Record);
+ Writer.AddSourceLocation(E->getExprLoc(), Record);
+ Code = serialization::EXPR_CXX_DEFAULT_INIT;
+}
+
+void ASTStmtWriter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
+ VisitExpr(E);
+ Writer.AddCXXTemporary(E->getTemporary(), Record);
+ Writer.AddStmt(E->getSubExpr());
+ Code = serialization::EXPR_CXX_BIND_TEMPORARY;
+}
+
+void ASTStmtWriter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
+ VisitExpr(E);
+ Writer.AddTypeSourceInfo(E->getTypeSourceInfo(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = serialization::EXPR_CXX_SCALAR_VALUE_INIT;
+}
+
+void ASTStmtWriter::VisitCXXNewExpr(CXXNewExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->isGlobalNew());
+ Record.push_back(E->isArray());
+ Record.push_back(E->doesUsualArrayDeleteWantSize());
+ Record.push_back(E->getNumPlacementArgs());
+ Record.push_back(E->StoredInitializationStyle);
+ Writer.AddDeclRef(E->getOperatorNew(), Record);
+ Writer.AddDeclRef(E->getOperatorDelete(), Record);
+ Writer.AddTypeSourceInfo(E->getAllocatedTypeSourceInfo(), Record);
+ Writer.AddSourceRange(E->getTypeIdParens(), Record);
+ Writer.AddSourceRange(E->getSourceRange(), Record);
+ Writer.AddSourceRange(E->getDirectInitRange(), Record);
+ for (CXXNewExpr::arg_iterator I = E->raw_arg_begin(), e = E->raw_arg_end();
+ I != e; ++I)
+ Writer.AddStmt(*I);
+
+ Code = serialization::EXPR_CXX_NEW;
+}
+
+void ASTStmtWriter::VisitCXXDeleteExpr(CXXDeleteExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->isGlobalDelete());
+ Record.push_back(E->isArrayForm());
+ Record.push_back(E->isArrayFormAsWritten());
+ Record.push_back(E->doesUsualArrayDeleteWantSize());
+ Writer.AddDeclRef(E->getOperatorDelete(), Record);
+ Writer.AddStmt(E->getArgument());
+ Writer.AddSourceLocation(E->getSourceRange().getBegin(), Record);
+
+ Code = serialization::EXPR_CXX_DELETE;
+}
+
+void ASTStmtWriter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
+ VisitExpr(E);
+
+ Writer.AddStmt(E->getBase());
+ Record.push_back(E->isArrow());
+ Writer.AddSourceLocation(E->getOperatorLoc(), Record);
+ Writer.AddNestedNameSpecifierLoc(E->getQualifierLoc(), Record);
+ Writer.AddTypeSourceInfo(E->getScopeTypeInfo(), Record);
+ Writer.AddSourceLocation(E->getColonColonLoc(), Record);
+ Writer.AddSourceLocation(E->getTildeLoc(), Record);
+
+ // PseudoDestructorTypeStorage.
+ Writer.AddIdentifierRef(E->getDestroyedTypeIdentifier(), Record);
+ if (E->getDestroyedTypeIdentifier())
+ Writer.AddSourceLocation(E->getDestroyedTypeLoc(), Record);
+ else
+ Writer.AddTypeSourceInfo(E->getDestroyedTypeInfo(), Record);
+
+ Code = serialization::EXPR_CXX_PSEUDO_DESTRUCTOR;
+}
+
+void ASTStmtWriter::VisitExprWithCleanups(ExprWithCleanups *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumObjects());
+ for (unsigned i = 0, e = E->getNumObjects(); i != e; ++i)
+ Writer.AddDeclRef(E->getObject(i), Record);
+
+ Writer.AddStmt(E->getSubExpr());
+ Code = serialization::EXPR_EXPR_WITH_CLEANUPS;
+}
+
+void
+ASTStmtWriter::VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E){
+ VisitExpr(E);
+
+ // Don't emit anything here, HasTemplateKWAndArgsInfo must be
+ // emitted first.
+
+ Record.push_back(E->HasTemplateKWAndArgsInfo);
+ if (E->HasTemplateKWAndArgsInfo) {
+ const ASTTemplateKWAndArgsInfo &ArgInfo =
+ *E->getTrailingObjects<ASTTemplateKWAndArgsInfo>();
+ Record.push_back(ArgInfo.NumTemplateArgs);
+ AddTemplateKWAndArgsInfo(ArgInfo,
+ E->getTrailingObjects<TemplateArgumentLoc>());
+ }
+
+ if (!E->isImplicitAccess())
+ Writer.AddStmt(E->getBase());
+ else
+ Writer.AddStmt(nullptr);
+ Writer.AddTypeRef(E->getBaseType(), Record);
+ Record.push_back(E->isArrow());
+ Writer.AddSourceLocation(E->getOperatorLoc(), Record);
+ Writer.AddNestedNameSpecifierLoc(E->getQualifierLoc(), Record);
+ Writer.AddDeclRef(E->getFirstQualifierFoundInScope(), Record);
+ Writer.AddDeclarationNameInfo(E->MemberNameInfo, Record);
+ Code = serialization::EXPR_CXX_DEPENDENT_SCOPE_MEMBER;
+}
+
+void
+ASTStmtWriter::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) {
+ VisitExpr(E);
+
+ // Don't emit anything here, HasTemplateKWAndArgsInfo must be
+ // emitted first.
+
+ Record.push_back(E->HasTemplateKWAndArgsInfo);
+ if (E->HasTemplateKWAndArgsInfo) {
+ const ASTTemplateKWAndArgsInfo &ArgInfo =
+ *E->getTrailingObjects<ASTTemplateKWAndArgsInfo>();
+ Record.push_back(ArgInfo.NumTemplateArgs);
+ AddTemplateKWAndArgsInfo(ArgInfo,
+ E->getTrailingObjects<TemplateArgumentLoc>());
+ }
+
+ Writer.AddNestedNameSpecifierLoc(E->getQualifierLoc(), Record);
+ Writer.AddDeclarationNameInfo(E->NameInfo, Record);
+ Code = serialization::EXPR_CXX_DEPENDENT_SCOPE_DECL_REF;
+}
+
+void
+ASTStmtWriter::VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->arg_size());
+ for (CXXUnresolvedConstructExpr::arg_iterator
+ ArgI = E->arg_begin(), ArgE = E->arg_end(); ArgI != ArgE; ++ArgI)
+ Writer.AddStmt(*ArgI);
+ Writer.AddTypeSourceInfo(E->getTypeSourceInfo(), Record);
+ Writer.AddSourceLocation(E->getLParenLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = serialization::EXPR_CXX_UNRESOLVED_CONSTRUCT;
+}
+
+void ASTStmtWriter::VisitOverloadExpr(OverloadExpr *E) {
+ VisitExpr(E);
+
+ // Don't emit anything here, HasTemplateKWAndArgsInfo must be
+ // emitted first.
+
+ Record.push_back(E->HasTemplateKWAndArgsInfo);
+ if (E->HasTemplateKWAndArgsInfo) {
+ const ASTTemplateKWAndArgsInfo &ArgInfo =
+ *E->getTrailingASTTemplateKWAndArgsInfo();
+ Record.push_back(ArgInfo.NumTemplateArgs);
+ AddTemplateKWAndArgsInfo(ArgInfo, E->getTrailingTemplateArgumentLoc());
+ }
+
+ Record.push_back(E->getNumDecls());
+ for (OverloadExpr::decls_iterator
+ OvI = E->decls_begin(), OvE = E->decls_end(); OvI != OvE; ++OvI) {
+ Writer.AddDeclRef(OvI.getDecl(), Record);
+ Record.push_back(OvI.getAccess());
+ }
+
+ Writer.AddDeclarationNameInfo(E->NameInfo, Record);
+ Writer.AddNestedNameSpecifierLoc(E->getQualifierLoc(), Record);
+}
+
+void ASTStmtWriter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) {
+ VisitOverloadExpr(E);
+ Record.push_back(E->isArrow());
+ Record.push_back(E->hasUnresolvedUsing());
+ Writer.AddStmt(!E->isImplicitAccess() ? E->getBase() : nullptr);
+ Writer.AddTypeRef(E->getBaseType(), Record);
+ Writer.AddSourceLocation(E->getOperatorLoc(), Record);
+ Code = serialization::EXPR_CXX_UNRESOLVED_MEMBER;
+}
+
+void ASTStmtWriter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
+ VisitOverloadExpr(E);
+ Record.push_back(E->requiresADL());
+ Record.push_back(E->isOverloaded());
+ Writer.AddDeclRef(E->getNamingClass(), Record);
+ Code = serialization::EXPR_CXX_UNRESOLVED_LOOKUP;
+}
+
+void ASTStmtWriter::VisitTypeTraitExpr(TypeTraitExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->TypeTraitExprBits.NumArgs);
+ Record.push_back(E->TypeTraitExprBits.Kind); // FIXME: Stable encoding
+ Record.push_back(E->TypeTraitExprBits.Value);
+ Writer.AddSourceRange(E->getSourceRange(), Record);
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
+ Writer.AddTypeSourceInfo(E->getArg(I), Record);
+ Code = serialization::EXPR_TYPE_TRAIT;
+}
+
+void ASTStmtWriter::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getTrait());
+ Record.push_back(E->getValue());
+ Writer.AddSourceRange(E->getSourceRange(), Record);
+ Writer.AddTypeSourceInfo(E->getQueriedTypeSourceInfo(), Record);
+ Code = serialization::EXPR_ARRAY_TYPE_TRAIT;
+}
+
+void ASTStmtWriter::VisitExpressionTraitExpr(ExpressionTraitExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getTrait());
+ Record.push_back(E->getValue());
+ Writer.AddSourceRange(E->getSourceRange(), Record);
+ Writer.AddStmt(E->getQueriedExpression());
+ Code = serialization::EXPR_CXX_EXPRESSION_TRAIT;
+}
+
+void ASTStmtWriter::VisitCXXNoexceptExpr(CXXNoexceptExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getValue());
+ Writer.AddSourceRange(E->getSourceRange(), Record);
+ Writer.AddStmt(E->getOperand());
+ Code = serialization::EXPR_CXX_NOEXCEPT;
+}
+
+void ASTStmtWriter::VisitPackExpansionExpr(PackExpansionExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getEllipsisLoc(), Record);
+ Record.push_back(E->NumExpansions);
+ Writer.AddStmt(E->getPattern());
+ Code = serialization::EXPR_PACK_EXPANSION;
+}
+
+void ASTStmtWriter::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->isPartiallySubstituted() ? E->getPartialArguments().size()
+ : 0);
+ Writer.AddSourceLocation(E->OperatorLoc, Record);
+ Writer.AddSourceLocation(E->PackLoc, Record);
+ Writer.AddSourceLocation(E->RParenLoc, Record);
+ Writer.AddDeclRef(E->Pack, Record);
+ if (E->isPartiallySubstituted()) {
+ for (const auto &TA : E->getPartialArguments())
+ Writer.AddTemplateArgument(TA, Record);
+ } else if (!E->isValueDependent()) {
+ Record.push_back(E->getPackLength());
+ }
+ Code = serialization::EXPR_SIZEOF_PACK;
+}
+
+void ASTStmtWriter::VisitSubstNonTypeTemplateParmExpr(
+ SubstNonTypeTemplateParmExpr *E) {
+ VisitExpr(E);
+ Writer.AddDeclRef(E->getParameter(), Record);
+ Writer.AddSourceLocation(E->getNameLoc(), Record);
+ Writer.AddStmt(E->getReplacement());
+ Code = serialization::EXPR_SUBST_NON_TYPE_TEMPLATE_PARM;
+}
+
+void ASTStmtWriter::VisitSubstNonTypeTemplateParmPackExpr(
+ SubstNonTypeTemplateParmPackExpr *E) {
+ VisitExpr(E);
+ Writer.AddDeclRef(E->getParameterPack(), Record);
+ Writer.AddTemplateArgument(E->getArgumentPack(), Record);
+ Writer.AddSourceLocation(E->getParameterPackLocation(), Record);
+ Code = serialization::EXPR_SUBST_NON_TYPE_TEMPLATE_PARM_PACK;
+}
+
+void ASTStmtWriter::VisitFunctionParmPackExpr(FunctionParmPackExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumExpansions());
+ Writer.AddDeclRef(E->getParameterPack(), Record);
+ Writer.AddSourceLocation(E->getParameterPackLocation(), Record);
+ for (FunctionParmPackExpr::iterator I = E->begin(), End = E->end();
+ I != End; ++I)
+ Writer.AddDeclRef(*I, Record);
+ Code = serialization::EXPR_FUNCTION_PARM_PACK;
+}
+
+void ASTStmtWriter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getTemporary());
+ Writer.AddDeclRef(E->getExtendingDecl(), Record);
+ Record.push_back(E->getManglingNumber());
+ Code = serialization::EXPR_MATERIALIZE_TEMPORARY;
+}
+
+void ASTStmtWriter::VisitCXXFoldExpr(CXXFoldExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->LParenLoc, Record);
+ Writer.AddSourceLocation(E->EllipsisLoc, Record);
+ Writer.AddSourceLocation(E->RParenLoc, Record);
+ Writer.AddStmt(E->SubExprs[0]);
+ Writer.AddStmt(E->SubExprs[1]);
+ Record.push_back(E->Opcode);
+ Code = serialization::EXPR_CXX_FOLD;
+}
+
+void ASTStmtWriter::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getSourceExpr());
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Code = serialization::EXPR_OPAQUE_VALUE;
+}
+
+void ASTStmtWriter::VisitTypoExpr(TypoExpr *E) {
+ VisitExpr(E);
+ // TODO: Figure out sane writer behavior for a TypoExpr, if necessary
+ assert(false && "Cannot write TypoExpr nodes");
+}
+
+//===----------------------------------------------------------------------===//
+// CUDA Expressions and Statements.
+//===----------------------------------------------------------------------===//
+
+void ASTStmtWriter::VisitCUDAKernelCallExpr(CUDAKernelCallExpr *E) {
+ VisitCallExpr(E);
+ Writer.AddStmt(E->getConfig());
+ Code = serialization::EXPR_CUDA_KERNEL_CALL;
+}
+
+//===----------------------------------------------------------------------===//
+// OpenCL Expressions and Statements.
+//===----------------------------------------------------------------------===//
+void ASTStmtWriter::VisitAsTypeExpr(AsTypeExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getBuiltinLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Writer.AddStmt(E->getSrcExpr());
+ Code = serialization::EXPR_ASTYPE;
+}
+
+//===----------------------------------------------------------------------===//
+// Microsoft Expressions and Statements.
+//===----------------------------------------------------------------------===//
+void ASTStmtWriter::VisitMSPropertyRefExpr(MSPropertyRefExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->isArrow());
+ Writer.AddStmt(E->getBaseExpr());
+ Writer.AddNestedNameSpecifierLoc(E->getQualifierLoc(), Record);
+ Writer.AddSourceLocation(E->getMemberLoc(), Record);
+ Writer.AddDeclRef(E->getPropertyDecl(), Record);
+ Code = serialization::EXPR_CXX_PROPERTY_REF_EXPR;
+}
+
+void ASTStmtWriter::VisitMSPropertySubscriptExpr(MSPropertySubscriptExpr *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getBase());
+ Writer.AddStmt(E->getIdx());
+ Writer.AddSourceLocation(E->getRBracketLoc(), Record);
+ Code = serialization::EXPR_CXX_PROPERTY_SUBSCRIPT_EXPR;
+}
+
+void ASTStmtWriter::VisitCXXUuidofExpr(CXXUuidofExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceRange(E->getSourceRange(), Record);
+ if (E->isTypeOperand()) {
+ Writer.AddTypeSourceInfo(E->getTypeOperandSourceInfo(), Record);
+ Code = serialization::EXPR_CXX_UUIDOF_TYPE;
+ } else {
+ Writer.AddStmt(E->getExprOperand());
+ Code = serialization::EXPR_CXX_UUIDOF_EXPR;
+ }
+}
+
+void ASTStmtWriter::VisitSEHExceptStmt(SEHExceptStmt *S) {
+ VisitStmt(S);
+ Writer.AddSourceLocation(S->getExceptLoc(), Record);
+ Writer.AddStmt(S->getFilterExpr());
+ Writer.AddStmt(S->getBlock());
+ Code = serialization::STMT_SEH_EXCEPT;
+}
+
+void ASTStmtWriter::VisitSEHFinallyStmt(SEHFinallyStmt *S) {
+ VisitStmt(S);
+ Writer.AddSourceLocation(S->getFinallyLoc(), Record);
+ Writer.AddStmt(S->getBlock());
+ Code = serialization::STMT_SEH_FINALLY;
+}
+
+void ASTStmtWriter::VisitSEHTryStmt(SEHTryStmt *S) {
+ VisitStmt(S);
+ Record.push_back(S->getIsCXXTry());
+ Writer.AddSourceLocation(S->getTryLoc(), Record);
+ Writer.AddStmt(S->getTryBlock());
+ Writer.AddStmt(S->getHandler());
+ Code = serialization::STMT_SEH_TRY;
+}
+
+void ASTStmtWriter::VisitSEHLeaveStmt(SEHLeaveStmt *S) {
+ VisitStmt(S);
+ Writer.AddSourceLocation(S->getLeaveLoc(), Record);
+ Code = serialization::STMT_SEH_LEAVE;
+}
+
+//===----------------------------------------------------------------------===//
+// OpenMP Clauses.
+//===----------------------------------------------------------------------===//
+
+namespace clang {
+class OMPClauseWriter : public OMPClauseVisitor<OMPClauseWriter> {
+ ASTStmtWriter *Writer;
+ ASTWriter::RecordData &Record;
+public:
+ OMPClauseWriter(ASTStmtWriter *W, ASTWriter::RecordData &Record)
+ : Writer(W), Record(Record) { }
+#define OPENMP_CLAUSE(Name, Class) \
+ void Visit##Class(Class *S);
+#include "clang/Basic/OpenMPKinds.def"
+ void writeClause(OMPClause *C);
+};
+}
+
+void OMPClauseWriter::writeClause(OMPClause *C) {
+ Record.push_back(C->getClauseKind());
+ Visit(C);
+ Writer->Writer.AddSourceLocation(C->getLocStart(), Record);
+ Writer->Writer.AddSourceLocation(C->getLocEnd(), Record);
+}
+
+void OMPClauseWriter::VisitOMPIfClause(OMPIfClause *C) {
+ Record.push_back(C->getNameModifier());
+ Writer->Writer.AddSourceLocation(C->getNameModifierLoc(), Record);
+ Writer->Writer.AddSourceLocation(C->getColonLoc(), Record);
+ Writer->Writer.AddStmt(C->getCondition());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+}
+
+void OMPClauseWriter::VisitOMPFinalClause(OMPFinalClause *C) {
+ Writer->Writer.AddStmt(C->getCondition());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+}
+
+void OMPClauseWriter::VisitOMPNumThreadsClause(OMPNumThreadsClause *C) {
+ Writer->Writer.AddStmt(C->getNumThreads());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+}
+
+void OMPClauseWriter::VisitOMPSafelenClause(OMPSafelenClause *C) {
+ Writer->Writer.AddStmt(C->getSafelen());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+}
+
+void OMPClauseWriter::VisitOMPSimdlenClause(OMPSimdlenClause *C) {
+ Writer->Writer.AddStmt(C->getSimdlen());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+}
+
+void OMPClauseWriter::VisitOMPCollapseClause(OMPCollapseClause *C) {
+ Writer->Writer.AddStmt(C->getNumForLoops());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+}
+
+void OMPClauseWriter::VisitOMPDefaultClause(OMPDefaultClause *C) {
+ Record.push_back(C->getDefaultKind());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+ Writer->Writer.AddSourceLocation(C->getDefaultKindKwLoc(), Record);
+}
+
+void OMPClauseWriter::VisitOMPProcBindClause(OMPProcBindClause *C) {
+ Record.push_back(C->getProcBindKind());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+ Writer->Writer.AddSourceLocation(C->getProcBindKindKwLoc(), Record);
+}
+
+void OMPClauseWriter::VisitOMPScheduleClause(OMPScheduleClause *C) {
+ Record.push_back(C->getScheduleKind());
+ Record.push_back(C->getFirstScheduleModifier());
+ Record.push_back(C->getSecondScheduleModifier());
+ Writer->Writer.AddStmt(C->getChunkSize());
+ Writer->Writer.AddStmt(C->getHelperChunkSize());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+ Writer->Writer.AddSourceLocation(C->getFirstScheduleModifierLoc(), Record);
+ Writer->Writer.AddSourceLocation(C->getSecondScheduleModifierLoc(), Record);
+ Writer->Writer.AddSourceLocation(C->getScheduleKindLoc(), Record);
+ Writer->Writer.AddSourceLocation(C->getCommaLoc(), Record);
+}
+
+void OMPClauseWriter::VisitOMPOrderedClause(OMPOrderedClause *C) {
+ Writer->Writer.AddStmt(C->getNumForLoops());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+}
+
+void OMPClauseWriter::VisitOMPNowaitClause(OMPNowaitClause *) {}
+
+void OMPClauseWriter::VisitOMPUntiedClause(OMPUntiedClause *) {}
+
+void OMPClauseWriter::VisitOMPMergeableClause(OMPMergeableClause *) {}
+
+void OMPClauseWriter::VisitOMPReadClause(OMPReadClause *) {}
+
+void OMPClauseWriter::VisitOMPWriteClause(OMPWriteClause *) {}
+
+void OMPClauseWriter::VisitOMPUpdateClause(OMPUpdateClause *) {}
+
+void OMPClauseWriter::VisitOMPCaptureClause(OMPCaptureClause *) {}
+
+void OMPClauseWriter::VisitOMPSeqCstClause(OMPSeqCstClause *) {}
+
+void OMPClauseWriter::VisitOMPThreadsClause(OMPThreadsClause *) {}
+
+void OMPClauseWriter::VisitOMPSIMDClause(OMPSIMDClause *) {}
+
+void OMPClauseWriter::VisitOMPNogroupClause(OMPNogroupClause *) {}
+
+void OMPClauseWriter::VisitOMPPrivateClause(OMPPrivateClause *C) {
+ Record.push_back(C->varlist_size());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+ for (auto *VE : C->varlists()) {
+ Writer->Writer.AddStmt(VE);
+ }
+ for (auto *VE : C->private_copies()) {
+ Writer->Writer.AddStmt(VE);
+ }
+}
+
+void OMPClauseWriter::VisitOMPFirstprivateClause(OMPFirstprivateClause *C) {
+ Record.push_back(C->varlist_size());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+ for (auto *VE : C->varlists()) {
+ Writer->Writer.AddStmt(VE);
+ }
+ for (auto *VE : C->private_copies()) {
+ Writer->Writer.AddStmt(VE);
+ }
+ for (auto *VE : C->inits()) {
+ Writer->Writer.AddStmt(VE);
+ }
+}
+
+void OMPClauseWriter::VisitOMPLastprivateClause(OMPLastprivateClause *C) {
+ Record.push_back(C->varlist_size());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+ for (auto *VE : C->varlists())
+ Writer->Writer.AddStmt(VE);
+ for (auto *E : C->private_copies())
+ Writer->Writer.AddStmt(E);
+ for (auto *E : C->source_exprs())
+ Writer->Writer.AddStmt(E);
+ for (auto *E : C->destination_exprs())
+ Writer->Writer.AddStmt(E);
+ for (auto *E : C->assignment_ops())
+ Writer->Writer.AddStmt(E);
+}
+
+void OMPClauseWriter::VisitOMPSharedClause(OMPSharedClause *C) {
+ Record.push_back(C->varlist_size());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+ for (auto *VE : C->varlists())
+ Writer->Writer.AddStmt(VE);
+}
+
+void OMPClauseWriter::VisitOMPReductionClause(OMPReductionClause *C) {
+ Record.push_back(C->varlist_size());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+ Writer->Writer.AddSourceLocation(C->getColonLoc(), Record);
+ Writer->Writer.AddNestedNameSpecifierLoc(C->getQualifierLoc(), Record);
+ Writer->Writer.AddDeclarationNameInfo(C->getNameInfo(), Record);
+ for (auto *VE : C->varlists())
+ Writer->Writer.AddStmt(VE);
+ for (auto *VE : C->privates())
+ Writer->Writer.AddStmt(VE);
+ for (auto *E : C->lhs_exprs())
+ Writer->Writer.AddStmt(E);
+ for (auto *E : C->rhs_exprs())
+ Writer->Writer.AddStmt(E);
+ for (auto *E : C->reduction_ops())
+ Writer->Writer.AddStmt(E);
+}
+
+void OMPClauseWriter::VisitOMPLinearClause(OMPLinearClause *C) {
+ Record.push_back(C->varlist_size());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+ Writer->Writer.AddSourceLocation(C->getColonLoc(), Record);
+ Record.push_back(C->getModifier());
+ Writer->Writer.AddSourceLocation(C->getModifierLoc(), Record);
+ for (auto *VE : C->varlists()) {
+ Writer->Writer.AddStmt(VE);
+ }
+ for (auto *VE : C->privates()) {
+ Writer->Writer.AddStmt(VE);
+ }
+ for (auto *VE : C->inits()) {
+ Writer->Writer.AddStmt(VE);
+ }
+ for (auto *VE : C->updates()) {
+ Writer->Writer.AddStmt(VE);
+ }
+ for (auto *VE : C->finals()) {
+ Writer->Writer.AddStmt(VE);
+ }
+ Writer->Writer.AddStmt(C->getStep());
+ Writer->Writer.AddStmt(C->getCalcStep());
+}
+
+void OMPClauseWriter::VisitOMPAlignedClause(OMPAlignedClause *C) {
+ Record.push_back(C->varlist_size());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+ Writer->Writer.AddSourceLocation(C->getColonLoc(), Record);
+ for (auto *VE : C->varlists())
+ Writer->Writer.AddStmt(VE);
+ Writer->Writer.AddStmt(C->getAlignment());
+}
+
+void OMPClauseWriter::VisitOMPCopyinClause(OMPCopyinClause *C) {
+ Record.push_back(C->varlist_size());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+ for (auto *VE : C->varlists())
+ Writer->Writer.AddStmt(VE);
+ for (auto *E : C->source_exprs())
+ Writer->Writer.AddStmt(E);
+ for (auto *E : C->destination_exprs())
+ Writer->Writer.AddStmt(E);
+ for (auto *E : C->assignment_ops())
+ Writer->Writer.AddStmt(E);
+}
+
+void OMPClauseWriter::VisitOMPCopyprivateClause(OMPCopyprivateClause *C) {
+ Record.push_back(C->varlist_size());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+ for (auto *VE : C->varlists())
+ Writer->Writer.AddStmt(VE);
+ for (auto *E : C->source_exprs())
+ Writer->Writer.AddStmt(E);
+ for (auto *E : C->destination_exprs())
+ Writer->Writer.AddStmt(E);
+ for (auto *E : C->assignment_ops())
+ Writer->Writer.AddStmt(E);
+}
+
+void OMPClauseWriter::VisitOMPFlushClause(OMPFlushClause *C) {
+ Record.push_back(C->varlist_size());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+ for (auto *VE : C->varlists())
+ Writer->Writer.AddStmt(VE);
+}
+
+void OMPClauseWriter::VisitOMPDependClause(OMPDependClause *C) {
+ Record.push_back(C->varlist_size());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+ Record.push_back(C->getDependencyKind());
+ Writer->Writer.AddSourceLocation(C->getDependencyLoc(), Record);
+ Writer->Writer.AddSourceLocation(C->getColonLoc(), Record);
+ for (auto *VE : C->varlists())
+ Writer->Writer.AddStmt(VE);
+}
+
+void OMPClauseWriter::VisitOMPDeviceClause(OMPDeviceClause *C) {
+ Writer->Writer.AddStmt(C->getDevice());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+}
+
+void OMPClauseWriter::VisitOMPMapClause(OMPMapClause *C) {
+ Record.push_back(C->varlist_size());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+ Record.push_back(C->getMapTypeModifier());
+ Record.push_back(C->getMapType());
+ Writer->Writer.AddSourceLocation(C->getMapLoc(), Record);
+ Writer->Writer.AddSourceLocation(C->getColonLoc(), Record);
+ for (auto *VE : C->varlists())
+ Writer->Writer.AddStmt(VE);
+}
+
+void OMPClauseWriter::VisitOMPNumTeamsClause(OMPNumTeamsClause *C) {
+ Writer->Writer.AddStmt(C->getNumTeams());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+}
+
+void OMPClauseWriter::VisitOMPThreadLimitClause(OMPThreadLimitClause *C) {
+ Writer->Writer.AddStmt(C->getThreadLimit());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+}
+
+void OMPClauseWriter::VisitOMPPriorityClause(OMPPriorityClause *C) {
+ Writer->Writer.AddStmt(C->getPriority());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+}
+
+void OMPClauseWriter::VisitOMPGrainsizeClause(OMPGrainsizeClause *C) {
+ Writer->Writer.AddStmt(C->getGrainsize());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+}
+
+void OMPClauseWriter::VisitOMPNumTasksClause(OMPNumTasksClause *C) {
+ Writer->Writer.AddStmt(C->getNumTasks());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+}
+
+void OMPClauseWriter::VisitOMPHintClause(OMPHintClause *C) {
+ Writer->Writer.AddStmt(C->getHint());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+}
+
+//===----------------------------------------------------------------------===//
+// OpenMP Directives.
+//===----------------------------------------------------------------------===//
+void ASTStmtWriter::VisitOMPExecutableDirective(OMPExecutableDirective *E) {
+ Writer.AddSourceLocation(E->getLocStart(), Record);
+ Writer.AddSourceLocation(E->getLocEnd(), Record);
+ OMPClauseWriter ClauseWriter(this, Record);
+ for (unsigned i = 0; i < E->getNumClauses(); ++i) {
+ ClauseWriter.writeClause(E->getClause(i));
+ }
+ if (E->hasAssociatedStmt())
+ Writer.AddStmt(E->getAssociatedStmt());
+}
+
+void ASTStmtWriter::VisitOMPLoopDirective(OMPLoopDirective *D) {
+ VisitStmt(D);
+ Record.push_back(D->getNumClauses());
+ Record.push_back(D->getCollapsedNumber());
+ VisitOMPExecutableDirective(D);
+ Writer.AddStmt(D->getIterationVariable());
+ Writer.AddStmt(D->getLastIteration());
+ Writer.AddStmt(D->getCalcLastIteration());
+ Writer.AddStmt(D->getPreCond());
+ Writer.AddStmt(D->getCond());
+ Writer.AddStmt(D->getInit());
+ Writer.AddStmt(D->getInc());
+ if (isOpenMPWorksharingDirective(D->getDirectiveKind())) {
+ Writer.AddStmt(D->getIsLastIterVariable());
+ Writer.AddStmt(D->getLowerBoundVariable());
+ Writer.AddStmt(D->getUpperBoundVariable());
+ Writer.AddStmt(D->getStrideVariable());
+ Writer.AddStmt(D->getEnsureUpperBound());
+ Writer.AddStmt(D->getNextLowerBound());
+ Writer.AddStmt(D->getNextUpperBound());
+ }
+ for (auto I : D->counters()) {
+ Writer.AddStmt(I);
+ }
+ for (auto I : D->private_counters()) {
+ Writer.AddStmt(I);
+ }
+ for (auto I : D->inits()) {
+ Writer.AddStmt(I);
+ }
+ for (auto I : D->updates()) {
+ Writer.AddStmt(I);
+ }
+ for (auto I : D->finals()) {
+ Writer.AddStmt(I);
+ }
+}
+
+void ASTStmtWriter::VisitOMPParallelDirective(OMPParallelDirective *D) {
+ VisitStmt(D);
+ Record.push_back(D->getNumClauses());
+ VisitOMPExecutableDirective(D);
+ Record.push_back(D->hasCancel() ? 1 : 0);
+ Code = serialization::STMT_OMP_PARALLEL_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPSimdDirective(OMPSimdDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_SIMD_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPForDirective(OMPForDirective *D) {
+ VisitOMPLoopDirective(D);
+ Record.push_back(D->hasCancel() ? 1 : 0);
+ Code = serialization::STMT_OMP_FOR_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPForSimdDirective(OMPForSimdDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_FOR_SIMD_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPSectionsDirective(OMPSectionsDirective *D) {
+ VisitStmt(D);
+ Record.push_back(D->getNumClauses());
+ VisitOMPExecutableDirective(D);
+ Record.push_back(D->hasCancel() ? 1 : 0);
+ Code = serialization::STMT_OMP_SECTIONS_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPSectionDirective(OMPSectionDirective *D) {
+ VisitStmt(D);
+ VisitOMPExecutableDirective(D);
+ Record.push_back(D->hasCancel() ? 1 : 0);
+ Code = serialization::STMT_OMP_SECTION_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPSingleDirective(OMPSingleDirective *D) {
+ VisitStmt(D);
+ Record.push_back(D->getNumClauses());
+ VisitOMPExecutableDirective(D);
+ Code = serialization::STMT_OMP_SINGLE_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPMasterDirective(OMPMasterDirective *D) {
+ VisitStmt(D);
+ VisitOMPExecutableDirective(D);
+ Code = serialization::STMT_OMP_MASTER_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPCriticalDirective(OMPCriticalDirective *D) {
+ VisitStmt(D);
+ Record.push_back(D->getNumClauses());
+ VisitOMPExecutableDirective(D);
+ Writer.AddDeclarationNameInfo(D->getDirectiveName(), Record);
+ Code = serialization::STMT_OMP_CRITICAL_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPParallelForDirective(OMPParallelForDirective *D) {
+ VisitOMPLoopDirective(D);
+ Record.push_back(D->hasCancel() ? 1 : 0);
+ Code = serialization::STMT_OMP_PARALLEL_FOR_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPParallelForSimdDirective(
+ OMPParallelForSimdDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_PARALLEL_FOR_SIMD_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPParallelSectionsDirective(
+ OMPParallelSectionsDirective *D) {
+ VisitStmt(D);
+ Record.push_back(D->getNumClauses());
+ VisitOMPExecutableDirective(D);
+ Record.push_back(D->hasCancel() ? 1 : 0);
+ Code = serialization::STMT_OMP_PARALLEL_SECTIONS_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPTaskDirective(OMPTaskDirective *D) {
+ VisitStmt(D);
+ Record.push_back(D->getNumClauses());
+ VisitOMPExecutableDirective(D);
+ Record.push_back(D->hasCancel() ? 1 : 0);
+ Code = serialization::STMT_OMP_TASK_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPAtomicDirective(OMPAtomicDirective *D) {
+ VisitStmt(D);
+ Record.push_back(D->getNumClauses());
+ VisitOMPExecutableDirective(D);
+ Writer.AddStmt(D->getX());
+ Writer.AddStmt(D->getV());
+ Writer.AddStmt(D->getExpr());
+ Writer.AddStmt(D->getUpdateExpr());
+ Record.push_back(D->isXLHSInRHSPart() ? 1 : 0);
+ Record.push_back(D->isPostfixUpdate() ? 1 : 0);
+ Code = serialization::STMT_OMP_ATOMIC_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPTargetDirective(OMPTargetDirective *D) {
+ VisitStmt(D);
+ Record.push_back(D->getNumClauses());
+ VisitOMPExecutableDirective(D);
+ Code = serialization::STMT_OMP_TARGET_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPTargetDataDirective(OMPTargetDataDirective *D) {
+ VisitStmt(D);
+ Record.push_back(D->getNumClauses());
+ VisitOMPExecutableDirective(D);
+ Code = serialization::STMT_OMP_TARGET_DATA_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPTaskyieldDirective(OMPTaskyieldDirective *D) {
+ VisitStmt(D);
+ VisitOMPExecutableDirective(D);
+ Code = serialization::STMT_OMP_TASKYIELD_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPBarrierDirective(OMPBarrierDirective *D) {
+ VisitStmt(D);
+ VisitOMPExecutableDirective(D);
+ Code = serialization::STMT_OMP_BARRIER_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPTaskwaitDirective(OMPTaskwaitDirective *D) {
+ VisitStmt(D);
+ VisitOMPExecutableDirective(D);
+ Code = serialization::STMT_OMP_TASKWAIT_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPTaskgroupDirective(OMPTaskgroupDirective *D) {
+ VisitStmt(D);
+ VisitOMPExecutableDirective(D);
+ Code = serialization::STMT_OMP_TASKGROUP_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPFlushDirective(OMPFlushDirective *D) {
+ VisitStmt(D);
+ Record.push_back(D->getNumClauses());
+ VisitOMPExecutableDirective(D);
+ Code = serialization::STMT_OMP_FLUSH_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPOrderedDirective(OMPOrderedDirective *D) {
+ VisitStmt(D);
+ Record.push_back(D->getNumClauses());
+ VisitOMPExecutableDirective(D);
+ Code = serialization::STMT_OMP_ORDERED_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPTeamsDirective(OMPTeamsDirective *D) {
+ VisitStmt(D);
+ Record.push_back(D->getNumClauses());
+ VisitOMPExecutableDirective(D);
+ Code = serialization::STMT_OMP_TEAMS_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPCancellationPointDirective(
+ OMPCancellationPointDirective *D) {
+ VisitStmt(D);
+ VisitOMPExecutableDirective(D);
+ Record.push_back(D->getCancelRegion());
+ Code = serialization::STMT_OMP_CANCELLATION_POINT_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPCancelDirective(OMPCancelDirective *D) {
+ VisitStmt(D);
+ Record.push_back(D->getNumClauses());
+ VisitOMPExecutableDirective(D);
+ Record.push_back(D->getCancelRegion());
+ Code = serialization::STMT_OMP_CANCEL_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPTaskLoopDirective(OMPTaskLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_TASKLOOP_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPTaskLoopSimdDirective(OMPTaskLoopSimdDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_TASKLOOP_SIMD_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPDistributeDirective(OMPDistributeDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_DISTRIBUTE_DIRECTIVE;
+}
+
+//===----------------------------------------------------------------------===//
+// ASTWriter Implementation
+//===----------------------------------------------------------------------===//
+
+unsigned ASTWriter::RecordSwitchCaseID(SwitchCase *S) {
+ assert(SwitchCaseIDs.find(S) == SwitchCaseIDs.end() &&
+ "SwitchCase recorded twice");
+ unsigned NextID = SwitchCaseIDs.size();
+ SwitchCaseIDs[S] = NextID;
+ return NextID;
+}
+
+unsigned ASTWriter::getSwitchCaseID(SwitchCase *S) {
+ assert(SwitchCaseIDs.find(S) != SwitchCaseIDs.end() &&
+ "SwitchCase hasn't been seen yet");
+ return SwitchCaseIDs[S];
+}
+
+void ASTWriter::ClearSwitchCaseIDs() {
+ SwitchCaseIDs.clear();
+}
+
+/// \brief Write the given substatement or subexpression to the
+/// bitstream.
+void ASTWriter::WriteSubStmt(Stmt *S,
+ llvm::DenseMap<Stmt *, uint64_t> &SubStmtEntries,
+ llvm::DenseSet<Stmt *> &ParentStmts) {
+ RecordData Record;
+ ASTStmtWriter Writer(*this, Record);
+ ++NumStatements;
+
+ if (!S) {
+ Stream.EmitRecord(serialization::STMT_NULL_PTR, Record);
+ return;
+ }
+
+ llvm::DenseMap<Stmt *, uint64_t>::iterator I = SubStmtEntries.find(S);
+ if (I != SubStmtEntries.end()) {
+ Record.push_back(I->second);
+ Stream.EmitRecord(serialization::STMT_REF_PTR, Record);
+ return;
+ }
+
+#ifndef NDEBUG
+ assert(!ParentStmts.count(S) && "There is a Stmt cycle!");
+
+ struct ParentStmtInserterRAII {
+ Stmt *S;
+ llvm::DenseSet<Stmt *> &ParentStmts;
+
+ ParentStmtInserterRAII(Stmt *S, llvm::DenseSet<Stmt *> &ParentStmts)
+ : S(S), ParentStmts(ParentStmts) {
+ ParentStmts.insert(S);
+ }
+ ~ParentStmtInserterRAII() {
+ ParentStmts.erase(S);
+ }
+ };
+
+ ParentStmtInserterRAII ParentStmtInserter(S, ParentStmts);
+#endif
+
+ // Redirect ASTWriter::AddStmt to collect sub-stmts.
+ SmallVector<Stmt *, 16> SubStmts;
+ CollectedStmts = &SubStmts;
+
+ Writer.Code = serialization::STMT_NULL_PTR;
+ Writer.AbbrevToUse = 0;
+ Writer.Visit(S);
+
+#ifndef NDEBUG
+ if (Writer.Code == serialization::STMT_NULL_PTR) {
+ SourceManager &SrcMgr
+ = DeclIDs.begin()->first->getASTContext().getSourceManager();
+ S->dump(SrcMgr);
+ llvm_unreachable("Unhandled sub-statement writing AST file");
+ }
+#endif
+
+ // Revert ASTWriter::AddStmt.
+ CollectedStmts = &StmtsToEmit;
+
+ // Write the sub-stmts in reverse order, last to first. When reading them back
+ // we will read them in correct order by "pop"ing them from the Stmts stack.
+ // This simplifies reading and allows to store a variable number of sub-stmts
+ // without knowing it in advance.
+ while (!SubStmts.empty())
+ WriteSubStmt(SubStmts.pop_back_val(), SubStmtEntries, ParentStmts);
+
+ Stream.EmitRecord(Writer.Code, Record, Writer.AbbrevToUse);
+
+ SubStmtEntries[S] = Stream.GetCurrentBitNo();
+}
+
+/// \brief Flush all of the statements that have been added to the
+/// queue via AddStmt().
+void ASTWriter::FlushStmts() {
+ RecordData Record;
+
+ // We expect to be the only consumer of the two temporary statement maps,
+ // assert that they are empty.
+ assert(SubStmtEntries.empty() && "unexpected entries in sub-stmt map");
+ assert(ParentStmts.empty() && "unexpected entries in parent stmt map");
+
+ for (unsigned I = 0, N = StmtsToEmit.size(); I != N; ++I) {
+ WriteSubStmt(StmtsToEmit[I], SubStmtEntries, ParentStmts);
+
+ assert(N == StmtsToEmit.size() &&
+ "Substatement written via AddStmt rather than WriteSubStmt!");
+
+ // Note that we are at the end of a full expression. Any
+ // expression records that follow this one are part of a different
+ // expression.
+ Stream.EmitRecord(serialization::STMT_STOP, Record);
+
+ SubStmtEntries.clear();
+ ParentStmts.clear();
+ }
+
+ StmtsToEmit.clear();
+}
diff --git a/contrib/llvm/tools/clang/lib/Serialization/GeneratePCH.cpp b/contrib/llvm/tools/clang/lib/Serialization/GeneratePCH.cpp
new file mode 100644
index 0000000..4a2255a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Serialization/GeneratePCH.cpp
@@ -0,0 +1,65 @@
+//===--- GeneratePCH.cpp - Sema Consumer for PCH Generation -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PCHGenerator, which as a SemaConsumer that generates
+// a PCH file.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Serialization/ASTWriter.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/SemaConsumer.h"
+#include "llvm/Bitcode/BitstreamWriter.h"
+#include <string>
+
+using namespace clang;
+
+PCHGenerator::PCHGenerator(
+ const Preprocessor &PP, StringRef OutputFile,
+ clang::Module *Module, StringRef isysroot,
+ std::shared_ptr<PCHBuffer> Buffer,
+ ArrayRef<llvm::IntrusiveRefCntPtr<ModuleFileExtension>> Extensions,
+ bool AllowASTWithErrors, bool IncludeTimestamps)
+ : PP(PP), OutputFile(OutputFile), Module(Module), isysroot(isysroot.str()),
+ SemaPtr(nullptr), Buffer(Buffer), Stream(Buffer->Data),
+ Writer(Stream, Extensions, IncludeTimestamps),
+ AllowASTWithErrors(AllowASTWithErrors) {
+ Buffer->IsComplete = false;
+}
+
+PCHGenerator::~PCHGenerator() {
+}
+
+void PCHGenerator::HandleTranslationUnit(ASTContext &Ctx) {
+ // Don't create a PCH if there were fatal failures during module loading.
+ if (PP.getModuleLoader().HadFatalFailure)
+ return;
+
+ bool hasErrors = PP.getDiagnostics().hasErrorOccurred();
+ if (hasErrors && !AllowASTWithErrors)
+ return;
+
+ // Emit the PCH file to the Buffer.
+ assert(SemaPtr && "No Sema?");
+ Buffer->Signature =
+ Writer.WriteAST(*SemaPtr, OutputFile, Module, isysroot, hasErrors);
+
+ Buffer->IsComplete = true;
+}
+
+ASTMutationListener *PCHGenerator::GetASTMutationListener() {
+ return &Writer;
+}
+
+ASTDeserializationListener *PCHGenerator::GetASTDeserializationListener() {
+ return &Writer;
+}
diff --git a/contrib/llvm/tools/clang/lib/Serialization/GlobalModuleIndex.cpp b/contrib/llvm/tools/clang/lib/Serialization/GlobalModuleIndex.cpp
new file mode 100644
index 0000000..af5f94a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Serialization/GlobalModuleIndex.cpp
@@ -0,0 +1,889 @@
+//===--- GlobalModuleIndex.cpp - Global Module Index ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the GlobalModuleIndex class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ASTReaderInternals.h"
+#include "clang/Frontend/PCHContainerOperations.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Serialization/ASTBitCodes.h"
+#include "clang/Serialization/GlobalModuleIndex.h"
+#include "clang/Serialization/Module.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Bitcode/BitstreamReader.h"
+#include "llvm/Bitcode/BitstreamWriter.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/LockFileManager.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/OnDiskHashTable.h"
+#include "llvm/Support/Path.h"
+#include <cstdio>
+using namespace clang;
+using namespace serialization;
+
+//----------------------------------------------------------------------------//
+// Shared constants
+//----------------------------------------------------------------------------//
+namespace {
+ enum {
+ /// \brief The block containing the index.
+ GLOBAL_INDEX_BLOCK_ID = llvm::bitc::FIRST_APPLICATION_BLOCKID
+ };
+
+ /// \brief Describes the record types in the index.
+ enum IndexRecordTypes {
+ /// \brief Contains version information and potentially other metadata,
+ /// used to determine if we can read this global index file.
+ INDEX_METADATA,
+ /// \brief Describes a module, including its file name and dependencies.
+ MODULE,
+ /// \brief The index for identifiers.
+ IDENTIFIER_INDEX
+ };
+}
+
+/// \brief The name of the global index file.
+static const char * const IndexFileName = "modules.idx";
+
+/// \brief The global index file version.
+static const unsigned CurrentVersion = 1;
+
+//----------------------------------------------------------------------------//
+// Global module index reader.
+//----------------------------------------------------------------------------//
+
+namespace {
+
+/// \brief Trait used to read the identifier index from the on-disk hash
+/// table.
+class IdentifierIndexReaderTrait {
+public:
+ typedef StringRef external_key_type;
+ typedef StringRef internal_key_type;
+ typedef SmallVector<unsigned, 2> data_type;
+ typedef unsigned hash_value_type;
+ typedef unsigned offset_type;
+
+ static bool EqualKey(const internal_key_type& a, const internal_key_type& b) {
+ return a == b;
+ }
+
+ static hash_value_type ComputeHash(const internal_key_type& a) {
+ return llvm::HashString(a);
+ }
+
+ static std::pair<unsigned, unsigned>
+ ReadKeyDataLength(const unsigned char*& d) {
+ using namespace llvm::support;
+ unsigned KeyLen = endian::readNext<uint16_t, little, unaligned>(d);
+ unsigned DataLen = endian::readNext<uint16_t, little, unaligned>(d);
+ return std::make_pair(KeyLen, DataLen);
+ }
+
+ static const internal_key_type&
+ GetInternalKey(const external_key_type& x) { return x; }
+
+ static const external_key_type&
+ GetExternalKey(const internal_key_type& x) { return x; }
+
+ static internal_key_type ReadKey(const unsigned char* d, unsigned n) {
+ return StringRef((const char *)d, n);
+ }
+
+ static data_type ReadData(const internal_key_type& k,
+ const unsigned char* d,
+ unsigned DataLen) {
+ using namespace llvm::support;
+
+ data_type Result;
+ while (DataLen > 0) {
+ unsigned ID = endian::readNext<uint32_t, little, unaligned>(d);
+ Result.push_back(ID);
+ DataLen -= 4;
+ }
+
+ return Result;
+ }
+};
+
+typedef llvm::OnDiskIterableChainedHashTable<IdentifierIndexReaderTrait>
+ IdentifierIndexTable;
+
+}
+
+GlobalModuleIndex::GlobalModuleIndex(std::unique_ptr<llvm::MemoryBuffer> Buffer,
+ llvm::BitstreamCursor Cursor)
+ : Buffer(std::move(Buffer)), IdentifierIndex(), NumIdentifierLookups(),
+ NumIdentifierLookupHits() {
+ // Read the global index.
+ bool InGlobalIndexBlock = false;
+ bool Done = false;
+ while (!Done) {
+ llvm::BitstreamEntry Entry = Cursor.advance();
+
+ switch (Entry.Kind) {
+ case llvm::BitstreamEntry::Error:
+ return;
+
+ case llvm::BitstreamEntry::EndBlock:
+ if (InGlobalIndexBlock) {
+ InGlobalIndexBlock = false;
+ Done = true;
+ continue;
+ }
+ return;
+
+
+ case llvm::BitstreamEntry::Record:
+ // Entries in the global index block are handled below.
+ if (InGlobalIndexBlock)
+ break;
+
+ return;
+
+ case llvm::BitstreamEntry::SubBlock:
+ if (!InGlobalIndexBlock && Entry.ID == GLOBAL_INDEX_BLOCK_ID) {
+ if (Cursor.EnterSubBlock(GLOBAL_INDEX_BLOCK_ID))
+ return;
+
+ InGlobalIndexBlock = true;
+ } else if (Cursor.SkipBlock()) {
+ return;
+ }
+ continue;
+ }
+
+ SmallVector<uint64_t, 64> Record;
+ StringRef Blob;
+ switch ((IndexRecordTypes)Cursor.readRecord(Entry.ID, Record, &Blob)) {
+ case INDEX_METADATA:
+ // Make sure that the version matches.
+ if (Record.size() < 1 || Record[0] != CurrentVersion)
+ return;
+ break;
+
+ case MODULE: {
+ unsigned Idx = 0;
+ unsigned ID = Record[Idx++];
+
+ // Make room for this module's information.
+ if (ID == Modules.size())
+ Modules.push_back(ModuleInfo());
+ else
+ Modules.resize(ID + 1);
+
+ // Size/modification time for this module file at the time the
+ // global index was built.
+ Modules[ID].Size = Record[Idx++];
+ Modules[ID].ModTime = Record[Idx++];
+
+ // File name.
+ unsigned NameLen = Record[Idx++];
+ Modules[ID].FileName.assign(Record.begin() + Idx,
+ Record.begin() + Idx + NameLen);
+ Idx += NameLen;
+
+ // Dependencies
+ unsigned NumDeps = Record[Idx++];
+ Modules[ID].Dependencies.insert(Modules[ID].Dependencies.end(),
+ Record.begin() + Idx,
+ Record.begin() + Idx + NumDeps);
+ Idx += NumDeps;
+
+ // Make sure we're at the end of the record.
+ assert(Idx == Record.size() && "More module info?");
+
+ // Record this module as an unresolved module.
+ // FIXME: this doesn't work correctly for module names containing path
+ // separators.
+ StringRef ModuleName = llvm::sys::path::stem(Modules[ID].FileName);
+ // Remove the -<hash of ModuleMapPath>
+ ModuleName = ModuleName.rsplit('-').first;
+ UnresolvedModules[ModuleName] = ID;
+ break;
+ }
+
+ case IDENTIFIER_INDEX:
+ // Wire up the identifier index.
+ if (Record[0]) {
+ IdentifierIndex = IdentifierIndexTable::Create(
+ (const unsigned char *)Blob.data() + Record[0],
+ (const unsigned char *)Blob.data() + sizeof(uint32_t),
+ (const unsigned char *)Blob.data(), IdentifierIndexReaderTrait());
+ }
+ break;
+ }
+ }
+}
+
+GlobalModuleIndex::~GlobalModuleIndex() {
+ delete static_cast<IdentifierIndexTable *>(IdentifierIndex);
+}
+
+std::pair<GlobalModuleIndex *, GlobalModuleIndex::ErrorCode>
+GlobalModuleIndex::readIndex(StringRef Path) {
+ // Load the index file, if it's there.
+ llvm::SmallString<128> IndexPath;
+ IndexPath += Path;
+ llvm::sys::path::append(IndexPath, IndexFileName);
+
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> BufferOrErr =
+ llvm::MemoryBuffer::getFile(IndexPath.c_str());
+ if (!BufferOrErr)
+ return std::make_pair(nullptr, EC_NotFound);
+ std::unique_ptr<llvm::MemoryBuffer> Buffer = std::move(BufferOrErr.get());
+
+ /// \brief The bitstream reader from which we'll read the AST file.
+ llvm::BitstreamReader Reader((const unsigned char *)Buffer->getBufferStart(),
+ (const unsigned char *)Buffer->getBufferEnd());
+
+ /// \brief The main bitstream cursor for the main block.
+ llvm::BitstreamCursor Cursor(Reader);
+
+ // Sniff for the signature.
+ if (Cursor.Read(8) != 'B' ||
+ Cursor.Read(8) != 'C' ||
+ Cursor.Read(8) != 'G' ||
+ Cursor.Read(8) != 'I') {
+ return std::make_pair(nullptr, EC_IOError);
+ }
+
+ return std::make_pair(new GlobalModuleIndex(std::move(Buffer), Cursor),
+ EC_None);
+}
+
+void
+GlobalModuleIndex::getKnownModules(SmallVectorImpl<ModuleFile *> &ModuleFiles) {
+ ModuleFiles.clear();
+ for (unsigned I = 0, N = Modules.size(); I != N; ++I) {
+ if (ModuleFile *MF = Modules[I].File)
+ ModuleFiles.push_back(MF);
+ }
+}
+
+void GlobalModuleIndex::getModuleDependencies(
+ ModuleFile *File,
+ SmallVectorImpl<ModuleFile *> &Dependencies) {
+ // Look for information about this module file.
+ llvm::DenseMap<ModuleFile *, unsigned>::iterator Known
+ = ModulesByFile.find(File);
+ if (Known == ModulesByFile.end())
+ return;
+
+ // Record dependencies.
+ Dependencies.clear();
+ ArrayRef<unsigned> StoredDependencies = Modules[Known->second].Dependencies;
+ for (unsigned I = 0, N = StoredDependencies.size(); I != N; ++I) {
+ if (ModuleFile *MF = Modules[I].File)
+ Dependencies.push_back(MF);
+ }
+}
+
+bool GlobalModuleIndex::lookupIdentifier(StringRef Name, HitSet &Hits) {
+ Hits.clear();
+
+ // If there's no identifier index, there is nothing we can do.
+ if (!IdentifierIndex)
+ return false;
+
+ // Look into the identifier index.
+ ++NumIdentifierLookups;
+ IdentifierIndexTable &Table
+ = *static_cast<IdentifierIndexTable *>(IdentifierIndex);
+ IdentifierIndexTable::iterator Known = Table.find(Name);
+ if (Known == Table.end()) {
+ return true;
+ }
+
+ SmallVector<unsigned, 2> ModuleIDs = *Known;
+ for (unsigned I = 0, N = ModuleIDs.size(); I != N; ++I) {
+ if (ModuleFile *MF = Modules[ModuleIDs[I]].File)
+ Hits.insert(MF);
+ }
+
+ ++NumIdentifierLookupHits;
+ return true;
+}
+
+bool GlobalModuleIndex::loadedModuleFile(ModuleFile *File) {
+ // Look for the module in the global module index based on the module name.
+ StringRef Name = File->ModuleName;
+ llvm::StringMap<unsigned>::iterator Known = UnresolvedModules.find(Name);
+ if (Known == UnresolvedModules.end()) {
+ return true;
+ }
+
+ // Rectify this module with the global module index.
+ ModuleInfo &Info = Modules[Known->second];
+
+ // If the size and modification time match what we expected, record this
+ // module file.
+ bool Failed = true;
+ if (File->File->getSize() == Info.Size &&
+ File->File->getModificationTime() == Info.ModTime) {
+ Info.File = File;
+ ModulesByFile[File] = Known->second;
+
+ Failed = false;
+ }
+
+ // One way or another, we have resolved this module file.
+ UnresolvedModules.erase(Known);
+ return Failed;
+}
+
+void GlobalModuleIndex::printStats() {
+ std::fprintf(stderr, "*** Global Module Index Statistics:\n");
+ if (NumIdentifierLookups) {
+ fprintf(stderr, " %u / %u identifier lookups succeeded (%f%%)\n",
+ NumIdentifierLookupHits, NumIdentifierLookups,
+ (double)NumIdentifierLookupHits*100.0/NumIdentifierLookups);
+ }
+ std::fprintf(stderr, "\n");
+}
+
+void GlobalModuleIndex::dump() {
+ llvm::errs() << "*** Global Module Index Dump:\n";
+ llvm::errs() << "Module files:\n";
+ for (auto &MI : Modules) {
+ llvm::errs() << "** " << MI.FileName << "\n";
+ if (MI.File)
+ MI.File->dump();
+ else
+ llvm::errs() << "\n";
+ }
+ llvm::errs() << "\n";
+}
+
+//----------------------------------------------------------------------------//
+// Global module index writer.
+//----------------------------------------------------------------------------//
+
+namespace {
+ /// \brief Provides information about a specific module file.
+ struct ModuleFileInfo {
+ /// \brief The numberic ID for this module file.
+ unsigned ID;
+
+ /// \brief The set of modules on which this module depends. Each entry is
+ /// a module ID.
+ SmallVector<unsigned, 4> Dependencies;
+ };
+
+ /// \brief Builder that generates the global module index file.
+ class GlobalModuleIndexBuilder {
+ FileManager &FileMgr;
+ const PCHContainerReader &PCHContainerRdr;
+
+ /// \brief Mapping from files to module file information.
+ typedef llvm::MapVector<const FileEntry *, ModuleFileInfo> ModuleFilesMap;
+
+ /// \brief Information about each of the known module files.
+ ModuleFilesMap ModuleFiles;
+
+ /// \brief Mapping from identifiers to the list of module file IDs that
+ /// consider this identifier to be interesting.
+ typedef llvm::StringMap<SmallVector<unsigned, 2> > InterestingIdentifierMap;
+
+ /// \brief A mapping from all interesting identifiers to the set of module
+ /// files in which those identifiers are considered interesting.
+ InterestingIdentifierMap InterestingIdentifiers;
+
+ /// \brief Write the block-info block for the global module index file.
+ void emitBlockInfoBlock(llvm::BitstreamWriter &Stream);
+
+ /// \brief Retrieve the module file information for the given file.
+ ModuleFileInfo &getModuleFileInfo(const FileEntry *File) {
+ llvm::MapVector<const FileEntry *, ModuleFileInfo>::iterator Known
+ = ModuleFiles.find(File);
+ if (Known != ModuleFiles.end())
+ return Known->second;
+
+ unsigned NewID = ModuleFiles.size();
+ ModuleFileInfo &Info = ModuleFiles[File];
+ Info.ID = NewID;
+ return Info;
+ }
+
+ public:
+ explicit GlobalModuleIndexBuilder(
+ FileManager &FileMgr, const PCHContainerReader &PCHContainerRdr)
+ : FileMgr(FileMgr), PCHContainerRdr(PCHContainerRdr) {}
+
+ /// \brief Load the contents of the given module file into the builder.
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ bool loadModuleFile(const FileEntry *File);
+
+ /// \brief Write the index to the given bitstream.
+ void writeIndex(llvm::BitstreamWriter &Stream);
+ };
+}
+
+static void emitBlockID(unsigned ID, const char *Name,
+ llvm::BitstreamWriter &Stream,
+ SmallVectorImpl<uint64_t> &Record) {
+ Record.clear();
+ Record.push_back(ID);
+ Stream.EmitRecord(llvm::bitc::BLOCKINFO_CODE_SETBID, Record);
+
+ // Emit the block name if present.
+ if (!Name || Name[0] == 0) return;
+ Record.clear();
+ while (*Name)
+ Record.push_back(*Name++);
+ Stream.EmitRecord(llvm::bitc::BLOCKINFO_CODE_BLOCKNAME, Record);
+}
+
+static void emitRecordID(unsigned ID, const char *Name,
+ llvm::BitstreamWriter &Stream,
+ SmallVectorImpl<uint64_t> &Record) {
+ Record.clear();
+ Record.push_back(ID);
+ while (*Name)
+ Record.push_back(*Name++);
+ Stream.EmitRecord(llvm::bitc::BLOCKINFO_CODE_SETRECORDNAME, Record);
+}
+
+void
+GlobalModuleIndexBuilder::emitBlockInfoBlock(llvm::BitstreamWriter &Stream) {
+ SmallVector<uint64_t, 64> Record;
+ Stream.EnterSubblock(llvm::bitc::BLOCKINFO_BLOCK_ID, 3);
+
+#define BLOCK(X) emitBlockID(X ## _ID, #X, Stream, Record)
+#define RECORD(X) emitRecordID(X, #X, Stream, Record)
+ BLOCK(GLOBAL_INDEX_BLOCK);
+ RECORD(INDEX_METADATA);
+ RECORD(MODULE);
+ RECORD(IDENTIFIER_INDEX);
+#undef RECORD
+#undef BLOCK
+
+ Stream.ExitBlock();
+}
+
+namespace {
+ class InterestingASTIdentifierLookupTrait
+ : public serialization::reader::ASTIdentifierLookupTraitBase {
+
+ public:
+ /// \brief The identifier and whether it is "interesting".
+ typedef std::pair<StringRef, bool> data_type;
+
+ data_type ReadData(const internal_key_type& k,
+ const unsigned char* d,
+ unsigned DataLen) {
+ // The first bit indicates whether this identifier is interesting.
+ // That's all we care about.
+ using namespace llvm::support;
+ unsigned RawID = endian::readNext<uint32_t, little, unaligned>(d);
+ bool IsInteresting = RawID & 0x01;
+ return std::make_pair(k, IsInteresting);
+ }
+ };
+}
+
+bool GlobalModuleIndexBuilder::loadModuleFile(const FileEntry *File) {
+ // Open the module file.
+
+ auto Buffer = FileMgr.getBufferForFile(File, /*isVolatile=*/true);
+ if (!Buffer) {
+ return true;
+ }
+
+ // Initialize the input stream
+ llvm::BitstreamReader InStreamFile;
+ PCHContainerRdr.ExtractPCH((*Buffer)->getMemBufferRef(), InStreamFile);
+ llvm::BitstreamCursor InStream(InStreamFile);
+
+ // Sniff for the signature.
+ if (InStream.Read(8) != 'C' ||
+ InStream.Read(8) != 'P' ||
+ InStream.Read(8) != 'C' ||
+ InStream.Read(8) != 'H') {
+ return true;
+ }
+
+ // Record this module file and assign it a unique ID (if it doesn't have
+ // one already).
+ unsigned ID = getModuleFileInfo(File).ID;
+
+ // Search for the blocks and records we care about.
+ enum { Other, ControlBlock, ASTBlock } State = Other;
+ bool Done = false;
+ while (!Done) {
+ llvm::BitstreamEntry Entry = InStream.advance();
+ switch (Entry.Kind) {
+ case llvm::BitstreamEntry::Error:
+ Done = true;
+ continue;
+
+ case llvm::BitstreamEntry::Record:
+ // In the 'other' state, just skip the record. We don't care.
+ if (State == Other) {
+ InStream.skipRecord(Entry.ID);
+ continue;
+ }
+
+ // Handle potentially-interesting records below.
+ break;
+
+ case llvm::BitstreamEntry::SubBlock:
+ if (Entry.ID == CONTROL_BLOCK_ID) {
+ if (InStream.EnterSubBlock(CONTROL_BLOCK_ID))
+ return true;
+
+ // Found the control block.
+ State = ControlBlock;
+ continue;
+ }
+
+ if (Entry.ID == AST_BLOCK_ID) {
+ if (InStream.EnterSubBlock(AST_BLOCK_ID))
+ return true;
+
+ // Found the AST block.
+ State = ASTBlock;
+ continue;
+ }
+
+ if (InStream.SkipBlock())
+ return true;
+
+ continue;
+
+ case llvm::BitstreamEntry::EndBlock:
+ State = Other;
+ continue;
+ }
+
+ // Read the given record.
+ SmallVector<uint64_t, 64> Record;
+ StringRef Blob;
+ unsigned Code = InStream.readRecord(Entry.ID, Record, &Blob);
+
+ // Handle module dependencies.
+ if (State == ControlBlock && Code == IMPORTS) {
+ // Load each of the imported PCH files.
+ unsigned Idx = 0, N = Record.size();
+ while (Idx < N) {
+ // Read information about the AST file.
+
+ // Skip the imported kind
+ ++Idx;
+
+ // Skip the import location
+ ++Idx;
+
+ // Load stored size/modification time.
+ off_t StoredSize = (off_t)Record[Idx++];
+ time_t StoredModTime = (time_t)Record[Idx++];
+
+ // Skip the stored signature.
+ // FIXME: we could read the signature out of the import and validate it.
+ Idx++;
+
+ // Retrieve the imported file name.
+ unsigned Length = Record[Idx++];
+ SmallString<128> ImportedFile(Record.begin() + Idx,
+ Record.begin() + Idx + Length);
+ Idx += Length;
+
+ // Find the imported module file.
+ const FileEntry *DependsOnFile
+ = FileMgr.getFile(ImportedFile, /*openFile=*/false,
+ /*cacheFailure=*/false);
+ if (!DependsOnFile ||
+ (StoredSize != DependsOnFile->getSize()) ||
+ (StoredModTime != DependsOnFile->getModificationTime()))
+ return true;
+
+ // Record the dependency.
+ unsigned DependsOnID = getModuleFileInfo(DependsOnFile).ID;
+ getModuleFileInfo(File).Dependencies.push_back(DependsOnID);
+ }
+
+ continue;
+ }
+
+ // Handle the identifier table
+ if (State == ASTBlock && Code == IDENTIFIER_TABLE && Record[0] > 0) {
+ typedef llvm::OnDiskIterableChainedHashTable<
+ InterestingASTIdentifierLookupTrait> InterestingIdentifierTable;
+ std::unique_ptr<InterestingIdentifierTable> Table(
+ InterestingIdentifierTable::Create(
+ (const unsigned char *)Blob.data() + Record[0],
+ (const unsigned char *)Blob.data() + sizeof(uint32_t),
+ (const unsigned char *)Blob.data()));
+ for (InterestingIdentifierTable::data_iterator D = Table->data_begin(),
+ DEnd = Table->data_end();
+ D != DEnd; ++D) {
+ std::pair<StringRef, bool> Ident = *D;
+ if (Ident.second)
+ InterestingIdentifiers[Ident.first].push_back(ID);
+ else
+ (void)InterestingIdentifiers[Ident.first];
+ }
+ }
+
+ // We don't care about this record.
+ }
+
+ return false;
+}
+
+namespace {
+
+/// \brief Trait used to generate the identifier index as an on-disk hash
+/// table.
+class IdentifierIndexWriterTrait {
+public:
+ typedef StringRef key_type;
+ typedef StringRef key_type_ref;
+ typedef SmallVector<unsigned, 2> data_type;
+ typedef const SmallVector<unsigned, 2> &data_type_ref;
+ typedef unsigned hash_value_type;
+ typedef unsigned offset_type;
+
+ static hash_value_type ComputeHash(key_type_ref Key) {
+ return llvm::HashString(Key);
+ }
+
+ std::pair<unsigned,unsigned>
+ EmitKeyDataLength(raw_ostream& Out, key_type_ref Key, data_type_ref Data) {
+ using namespace llvm::support;
+ endian::Writer<little> LE(Out);
+ unsigned KeyLen = Key.size();
+ unsigned DataLen = Data.size() * 4;
+ LE.write<uint16_t>(KeyLen);
+ LE.write<uint16_t>(DataLen);
+ return std::make_pair(KeyLen, DataLen);
+ }
+
+ void EmitKey(raw_ostream& Out, key_type_ref Key, unsigned KeyLen) {
+ Out.write(Key.data(), KeyLen);
+ }
+
+ void EmitData(raw_ostream& Out, key_type_ref Key, data_type_ref Data,
+ unsigned DataLen) {
+ using namespace llvm::support;
+ for (unsigned I = 0, N = Data.size(); I != N; ++I)
+ endian::Writer<little>(Out).write<uint32_t>(Data[I]);
+ }
+};
+
+}
+
+void GlobalModuleIndexBuilder::writeIndex(llvm::BitstreamWriter &Stream) {
+ using namespace llvm;
+
+ // Emit the file header.
+ Stream.Emit((unsigned)'B', 8);
+ Stream.Emit((unsigned)'C', 8);
+ Stream.Emit((unsigned)'G', 8);
+ Stream.Emit((unsigned)'I', 8);
+
+ // Write the block-info block, which describes the records in this bitcode
+ // file.
+ emitBlockInfoBlock(Stream);
+
+ Stream.EnterSubblock(GLOBAL_INDEX_BLOCK_ID, 3);
+
+ // Write the metadata.
+ SmallVector<uint64_t, 2> Record;
+ Record.push_back(CurrentVersion);
+ Stream.EmitRecord(INDEX_METADATA, Record);
+
+ // Write the set of known module files.
+ for (ModuleFilesMap::iterator M = ModuleFiles.begin(),
+ MEnd = ModuleFiles.end();
+ M != MEnd; ++M) {
+ Record.clear();
+ Record.push_back(M->second.ID);
+ Record.push_back(M->first->getSize());
+ Record.push_back(M->first->getModificationTime());
+
+ // File name
+ StringRef Name(M->first->getName());
+ Record.push_back(Name.size());
+ Record.append(Name.begin(), Name.end());
+
+ // Dependencies
+ Record.push_back(M->second.Dependencies.size());
+ Record.append(M->second.Dependencies.begin(), M->second.Dependencies.end());
+ Stream.EmitRecord(MODULE, Record);
+ }
+
+ // Write the identifier -> module file mapping.
+ {
+ llvm::OnDiskChainedHashTableGenerator<IdentifierIndexWriterTrait> Generator;
+ IdentifierIndexWriterTrait Trait;
+
+ // Populate the hash table.
+ for (InterestingIdentifierMap::iterator I = InterestingIdentifiers.begin(),
+ IEnd = InterestingIdentifiers.end();
+ I != IEnd; ++I) {
+ Generator.insert(I->first(), I->second, Trait);
+ }
+
+ // Create the on-disk hash table in a buffer.
+ SmallString<4096> IdentifierTable;
+ uint32_t BucketOffset;
+ {
+ using namespace llvm::support;
+ llvm::raw_svector_ostream Out(IdentifierTable);
+ // Make sure that no bucket is at offset 0
+ endian::Writer<little>(Out).write<uint32_t>(0);
+ BucketOffset = Generator.Emit(Out, Trait);
+ }
+
+ // Create a blob abbreviation
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(IDENTIFIER_INDEX));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned IDTableAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ // Write the identifier table
+ uint64_t Record[] = {IDENTIFIER_INDEX, BucketOffset};
+ Stream.EmitRecordWithBlob(IDTableAbbrev, Record, IdentifierTable);
+ }
+
+ Stream.ExitBlock();
+}
+
+GlobalModuleIndex::ErrorCode
+GlobalModuleIndex::writeIndex(FileManager &FileMgr,
+ const PCHContainerReader &PCHContainerRdr,
+ StringRef Path) {
+ llvm::SmallString<128> IndexPath;
+ IndexPath += Path;
+ llvm::sys::path::append(IndexPath, IndexFileName);
+
+ // Coordinate building the global index file with other processes that might
+ // try to do the same.
+ llvm::LockFileManager Locked(IndexPath);
+ switch (Locked) {
+ case llvm::LockFileManager::LFS_Error:
+ return EC_IOError;
+
+ case llvm::LockFileManager::LFS_Owned:
+ // We're responsible for building the index ourselves. Do so below.
+ break;
+
+ case llvm::LockFileManager::LFS_Shared:
+ // Someone else is responsible for building the index. We don't care
+ // when they finish, so we're done.
+ return EC_Building;
+ }
+
+ // The module index builder.
+ GlobalModuleIndexBuilder Builder(FileMgr, PCHContainerRdr);
+
+ // Load each of the module files.
+ std::error_code EC;
+ for (llvm::sys::fs::directory_iterator D(Path, EC), DEnd;
+ D != DEnd && !EC;
+ D.increment(EC)) {
+ // If this isn't a module file, we don't care.
+ if (llvm::sys::path::extension(D->path()) != ".pcm") {
+ // ... unless it's a .pcm.lock file, which indicates that someone is
+ // in the process of rebuilding a module. They'll rebuild the index
+ // at the end of that translation unit, so we don't have to.
+ if (llvm::sys::path::extension(D->path()) == ".pcm.lock")
+ return EC_Building;
+
+ continue;
+ }
+
+ // If we can't find the module file, skip it.
+ const FileEntry *ModuleFile = FileMgr.getFile(D->path());
+ if (!ModuleFile)
+ continue;
+
+ // Load this module file.
+ if (Builder.loadModuleFile(ModuleFile))
+ return EC_IOError;
+ }
+
+ // The output buffer, into which the global index will be written.
+ SmallVector<char, 16> OutputBuffer;
+ {
+ llvm::BitstreamWriter OutputStream(OutputBuffer);
+ Builder.writeIndex(OutputStream);
+ }
+
+ // Write the global index file to a temporary file.
+ llvm::SmallString<128> IndexTmpPath;
+ int TmpFD;
+ if (llvm::sys::fs::createUniqueFile(IndexPath + "-%%%%%%%%", TmpFD,
+ IndexTmpPath))
+ return EC_IOError;
+
+ // Open the temporary global index file for output.
+ llvm::raw_fd_ostream Out(TmpFD, true);
+ if (Out.has_error())
+ return EC_IOError;
+
+ // Write the index.
+ Out.write(OutputBuffer.data(), OutputBuffer.size());
+ Out.close();
+ if (Out.has_error())
+ return EC_IOError;
+
+ // Remove the old index file. It isn't relevant any more.
+ llvm::sys::fs::remove(IndexPath);
+
+ // Rename the newly-written index file to the proper name.
+ if (llvm::sys::fs::rename(IndexTmpPath, IndexPath)) {
+ // Rename failed; just remove the
+ llvm::sys::fs::remove(IndexTmpPath);
+ return EC_IOError;
+ }
+
+ // We're done.
+ return EC_None;
+}
+
+namespace {
+ class GlobalIndexIdentifierIterator : public IdentifierIterator {
+ /// \brief The current position within the identifier lookup table.
+ IdentifierIndexTable::key_iterator Current;
+
+ /// \brief The end position within the identifier lookup table.
+ IdentifierIndexTable::key_iterator End;
+
+ public:
+ explicit GlobalIndexIdentifierIterator(IdentifierIndexTable &Idx) {
+ Current = Idx.key_begin();
+ End = Idx.key_end();
+ }
+
+ StringRef Next() override {
+ if (Current == End)
+ return StringRef();
+
+ StringRef Result = *Current;
+ ++Current;
+ return Result;
+ }
+ };
+}
+
+IdentifierIterator *GlobalModuleIndex::createIdentifierIterator() const {
+ IdentifierIndexTable &Table =
+ *static_cast<IdentifierIndexTable *>(IdentifierIndex);
+ return new GlobalIndexIdentifierIterator(Table);
+}
diff --git a/contrib/llvm/tools/clang/lib/Serialization/Module.cpp b/contrib/llvm/tools/clang/lib/Serialization/Module.cpp
new file mode 100644
index 0000000..4884f0b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Serialization/Module.cpp
@@ -0,0 +1,115 @@
+//===--- Module.cpp - Module description ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Module class, which describes a module that has
+// been loaded from an AST file.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Serialization/Module.h"
+#include "ASTReaderInternals.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace serialization;
+using namespace reader;
+
+ModuleFile::ModuleFile(ModuleKind Kind, unsigned Generation)
+ : Kind(Kind), File(nullptr), Signature(0), DirectlyImported(false),
+ Generation(Generation), SizeInBits(0),
+ LocalNumSLocEntries(0), SLocEntryBaseID(0),
+ SLocEntryBaseOffset(0), SLocEntryOffsets(nullptr),
+ LocalNumIdentifiers(0),
+ IdentifierOffsets(nullptr), BaseIdentifierID(0),
+ IdentifierTableData(nullptr), IdentifierLookupTable(nullptr),
+ LocalNumMacros(0), MacroOffsets(nullptr),
+ BasePreprocessedEntityID(0),
+ PreprocessedEntityOffsets(nullptr), NumPreprocessedEntities(0),
+ LocalNumHeaderFileInfos(0),
+ HeaderFileInfoTableData(nullptr), HeaderFileInfoTable(nullptr),
+ LocalNumSubmodules(0), BaseSubmoduleID(0),
+ LocalNumSelectors(0), SelectorOffsets(nullptr), BaseSelectorID(0),
+ SelectorLookupTableData(nullptr), SelectorLookupTable(nullptr),
+ LocalNumDecls(0), DeclOffsets(nullptr), BaseDeclID(0),
+ LocalNumCXXBaseSpecifiers(0), CXXBaseSpecifiersOffsets(nullptr),
+ LocalNumCXXCtorInitializers(0), CXXCtorInitializersOffsets(nullptr),
+ FileSortedDecls(nullptr), NumFileSortedDecls(0),
+ ObjCCategoriesMap(nullptr), LocalNumObjCCategoriesInMap(0),
+ LocalNumTypes(0), TypeOffsets(nullptr), BaseTypeIndex(0)
+{}
+
+ModuleFile::~ModuleFile() {
+ delete static_cast<ASTIdentifierLookupTable *>(IdentifierLookupTable);
+ delete static_cast<HeaderFileInfoLookupTable *>(HeaderFileInfoTable);
+ delete static_cast<ASTSelectorLookupTable *>(SelectorLookupTable);
+}
+
+template<typename Key, typename Offset, unsigned InitialCapacity>
+static void
+dumpLocalRemap(StringRef Name,
+ const ContinuousRangeMap<Key, Offset, InitialCapacity> &Map) {
+ if (Map.begin() == Map.end())
+ return;
+
+ typedef ContinuousRangeMap<Key, Offset, InitialCapacity> MapType;
+ llvm::errs() << " " << Name << ":\n";
+ for (typename MapType::const_iterator I = Map.begin(), IEnd = Map.end();
+ I != IEnd; ++I) {
+ llvm::errs() << " " << I->first << " -> " << I->second << "\n";
+ }
+}
+
+void ModuleFile::dump() {
+ llvm::errs() << "\nModule: " << FileName << "\n";
+ if (!Imports.empty()) {
+ llvm::errs() << " Imports: ";
+ for (unsigned I = 0, N = Imports.size(); I != N; ++I) {
+ if (I)
+ llvm::errs() << ", ";
+ llvm::errs() << Imports[I]->FileName;
+ }
+ llvm::errs() << "\n";
+ }
+
+ // Remapping tables.
+ llvm::errs() << " Base source location offset: " << SLocEntryBaseOffset
+ << '\n';
+ dumpLocalRemap("Source location offset local -> global map", SLocRemap);
+
+ llvm::errs() << " Base identifier ID: " << BaseIdentifierID << '\n'
+ << " Number of identifiers: " << LocalNumIdentifiers << '\n';
+ dumpLocalRemap("Identifier ID local -> global map", IdentifierRemap);
+
+ llvm::errs() << " Base macro ID: " << BaseMacroID << '\n'
+ << " Number of macros: " << LocalNumMacros << '\n';
+ dumpLocalRemap("Macro ID local -> global map", MacroRemap);
+
+ llvm::errs() << " Base submodule ID: " << BaseSubmoduleID << '\n'
+ << " Number of submodules: " << LocalNumSubmodules << '\n';
+ dumpLocalRemap("Submodule ID local -> global map", SubmoduleRemap);
+
+ llvm::errs() << " Base selector ID: " << BaseSelectorID << '\n'
+ << " Number of selectors: " << LocalNumSelectors << '\n';
+ dumpLocalRemap("Selector ID local -> global map", SelectorRemap);
+
+ llvm::errs() << " Base preprocessed entity ID: " << BasePreprocessedEntityID
+ << '\n'
+ << " Number of preprocessed entities: "
+ << NumPreprocessedEntities << '\n';
+ dumpLocalRemap("Preprocessed entity ID local -> global map",
+ PreprocessedEntityRemap);
+
+ llvm::errs() << " Base type index: " << BaseTypeIndex << '\n'
+ << " Number of types: " << LocalNumTypes << '\n';
+ dumpLocalRemap("Type index local -> global map", TypeRemap);
+
+ llvm::errs() << " Base decl ID: " << BaseDeclID << '\n'
+ << " Number of decls: " << LocalNumDecls << '\n';
+ dumpLocalRemap("Decl ID local -> global map", DeclRemap);
+}
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ModuleFileExtension.cpp b/contrib/llvm/tools/clang/lib/Serialization/ModuleFileExtension.cpp
new file mode 100644
index 0000000..81dcfd6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Serialization/ModuleFileExtension.cpp
@@ -0,0 +1,22 @@
+//===-- ModuleFileExtension.cpp - Module File Extensions ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Serialization/ModuleFileExtension.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+ModuleFileExtension::~ModuleFileExtension() { }
+
+llvm::hash_code ModuleFileExtension::hashExtension(llvm::hash_code Code) const {
+ return Code;
+}
+
+ModuleFileExtensionWriter::~ModuleFileExtensionWriter() { }
+
+ModuleFileExtensionReader::~ModuleFileExtensionReader() { }
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ModuleManager.cpp b/contrib/llvm/tools/clang/lib/Serialization/ModuleManager.cpp
new file mode 100644
index 0000000..74f75a1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Serialization/ModuleManager.cpp
@@ -0,0 +1,476 @@
+//===--- ModuleManager.cpp - Module Manager ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ModuleManager class, which manages a set of loaded
+// modules for the ASTReader.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Frontend/PCHContainerOperations.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/ModuleMap.h"
+#include "clang/Serialization/GlobalModuleIndex.h"
+#include "clang/Serialization/ModuleManager.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+#include <system_error>
+
+#ifndef NDEBUG
+#include "llvm/Support/GraphWriter.h"
+#endif
+
+using namespace clang;
+using namespace serialization;
+
+ModuleFile *ModuleManager::lookup(StringRef Name) {
+ const FileEntry *Entry = FileMgr.getFile(Name, /*openFile=*/false,
+ /*cacheFailure=*/false);
+ if (Entry)
+ return lookup(Entry);
+
+ return nullptr;
+}
+
+ModuleFile *ModuleManager::lookup(const FileEntry *File) {
+ llvm::DenseMap<const FileEntry *, ModuleFile *>::iterator Known
+ = Modules.find(File);
+ if (Known == Modules.end())
+ return nullptr;
+
+ return Known->second;
+}
+
+std::unique_ptr<llvm::MemoryBuffer>
+ModuleManager::lookupBuffer(StringRef Name) {
+ const FileEntry *Entry = FileMgr.getFile(Name, /*openFile=*/false,
+ /*cacheFailure=*/false);
+ return std::move(InMemoryBuffers[Entry]);
+}
+
+ModuleManager::AddModuleResult
+ModuleManager::addModule(StringRef FileName, ModuleKind Type,
+ SourceLocation ImportLoc, ModuleFile *ImportedBy,
+ unsigned Generation,
+ off_t ExpectedSize, time_t ExpectedModTime,
+ ASTFileSignature ExpectedSignature,
+ ASTFileSignatureReader ReadSignature,
+ ModuleFile *&Module,
+ std::string &ErrorStr) {
+ Module = nullptr;
+
+ // Look for the file entry. This only fails if the expected size or
+ // modification time differ.
+ const FileEntry *Entry;
+ if (Type == MK_ExplicitModule) {
+ // If we're not expecting to pull this file out of the module cache, it
+ // might have a different mtime due to being moved across filesystems in
+ // a distributed build. The size must still match, though. (As must the
+ // contents, but we can't check that.)
+ ExpectedModTime = 0;
+ }
+ if (lookupModuleFile(FileName, ExpectedSize, ExpectedModTime, Entry)) {
+ ErrorStr = "module file out of date";
+ return OutOfDate;
+ }
+
+ if (!Entry && FileName != "-") {
+ ErrorStr = "module file not found";
+ return Missing;
+ }
+
+ // Check whether we already loaded this module, before
+ ModuleFile *&ModuleEntry = Modules[Entry];
+ bool NewModule = false;
+ if (!ModuleEntry) {
+ // Allocate a new module.
+ ModuleFile *New = new ModuleFile(Type, Generation);
+ New->Index = Chain.size();
+ New->FileName = FileName.str();
+ New->File = Entry;
+ New->ImportLoc = ImportLoc;
+ Chain.push_back(New);
+ if (!New->isModule())
+ PCHChain.push_back(New);
+ if (!ImportedBy)
+ Roots.push_back(New);
+ NewModule = true;
+ ModuleEntry = New;
+
+ New->InputFilesValidationTimestamp = 0;
+ if (New->Kind == MK_ImplicitModule) {
+ std::string TimestampFilename = New->getTimestampFilename();
+ vfs::Status Status;
+ // A cached stat value would be fine as well.
+ if (!FileMgr.getNoncachedStatValue(TimestampFilename, Status))
+ New->InputFilesValidationTimestamp =
+ Status.getLastModificationTime().toEpochTime();
+ }
+
+ // Load the contents of the module
+ if (std::unique_ptr<llvm::MemoryBuffer> Buffer = lookupBuffer(FileName)) {
+ // The buffer was already provided for us.
+ New->Buffer = std::move(Buffer);
+ } else {
+ // Open the AST file.
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Buf(
+ (std::error_code()));
+ if (FileName == "-") {
+ Buf = llvm::MemoryBuffer::getSTDIN();
+ } else {
+ // Leave the FileEntry open so if it gets read again by another
+ // ModuleManager it must be the same underlying file.
+ // FIXME: Because FileManager::getFile() doesn't guarantee that it will
+ // give us an open file, this may not be 100% reliable.
+ Buf = FileMgr.getBufferForFile(New->File,
+ /*IsVolatile=*/false,
+ /*ShouldClose=*/false);
+ }
+
+ if (!Buf) {
+ ErrorStr = Buf.getError().message();
+ return Missing;
+ }
+
+ New->Buffer = std::move(*Buf);
+ }
+
+ // Initialize the stream.
+ PCHContainerRdr.ExtractPCH(New->Buffer->getMemBufferRef(), New->StreamFile);
+ }
+
+ if (ExpectedSignature) {
+ if (NewModule)
+ ModuleEntry->Signature = ReadSignature(ModuleEntry->StreamFile);
+ else
+ assert(ModuleEntry->Signature == ReadSignature(ModuleEntry->StreamFile));
+
+ if (ModuleEntry->Signature != ExpectedSignature) {
+ ErrorStr = ModuleEntry->Signature ? "signature mismatch"
+ : "could not read module signature";
+
+ if (NewModule) {
+ // Remove the module file immediately, since removeModules might try to
+ // invalidate the file cache for Entry, and that is not safe if this
+ // module is *itself* up to date, but has an out-of-date importer.
+ Modules.erase(Entry);
+ assert(Chain.back() == ModuleEntry);
+ Chain.pop_back();
+ if (!ModuleEntry->isModule())
+ PCHChain.pop_back();
+ if (Roots.back() == ModuleEntry)
+ Roots.pop_back();
+ else
+ assert(ImportedBy);
+ delete ModuleEntry;
+ }
+ return OutOfDate;
+ }
+ }
+
+ if (ImportedBy) {
+ ModuleEntry->ImportedBy.insert(ImportedBy);
+ ImportedBy->Imports.insert(ModuleEntry);
+ } else {
+ if (!ModuleEntry->DirectlyImported)
+ ModuleEntry->ImportLoc = ImportLoc;
+
+ ModuleEntry->DirectlyImported = true;
+ }
+
+ Module = ModuleEntry;
+ return NewModule? NewlyLoaded : AlreadyLoaded;
+}
+
+void ModuleManager::removeModules(
+ ModuleIterator first, ModuleIterator last,
+ llvm::SmallPtrSetImpl<ModuleFile *> &LoadedSuccessfully,
+ ModuleMap *modMap) {
+ if (first == last)
+ return;
+
+ // Explicitly clear VisitOrder since we might not notice it is stale.
+ VisitOrder.clear();
+
+ // Collect the set of module file pointers that we'll be removing.
+ llvm::SmallPtrSet<ModuleFile *, 4> victimSet(first, last);
+
+ auto IsVictim = [&](ModuleFile *MF) {
+ return victimSet.count(MF);
+ };
+ // Remove any references to the now-destroyed modules.
+ for (unsigned i = 0, n = Chain.size(); i != n; ++i) {
+ Chain[i]->ImportedBy.remove_if(IsVictim);
+ }
+ Roots.erase(std::remove_if(Roots.begin(), Roots.end(), IsVictim),
+ Roots.end());
+
+ // Remove the modules from the PCH chain.
+ for (auto I = first; I != last; ++I) {
+ if (!(*I)->isModule()) {
+ PCHChain.erase(std::find(PCHChain.begin(), PCHChain.end(), *I),
+ PCHChain.end());
+ break;
+ }
+ }
+
+ // Delete the modules and erase them from the various structures.
+ for (ModuleIterator victim = first; victim != last; ++victim) {
+ Modules.erase((*victim)->File);
+
+ if (modMap) {
+ StringRef ModuleName = (*victim)->ModuleName;
+ if (Module *mod = modMap->findModule(ModuleName)) {
+ mod->setASTFile(nullptr);
+ }
+ }
+
+ // Files that didn't make it through ReadASTCore successfully will be
+ // rebuilt (or there was an error). Invalidate them so that we can load the
+ // new files that will be renamed over the old ones.
+ if (LoadedSuccessfully.count(*victim) == 0)
+ FileMgr.invalidateCache((*victim)->File);
+
+ delete *victim;
+ }
+
+ // Remove the modules from the chain.
+ Chain.erase(first, last);
+}
+
+void
+ModuleManager::addInMemoryBuffer(StringRef FileName,
+ std::unique_ptr<llvm::MemoryBuffer> Buffer) {
+
+ const FileEntry *Entry =
+ FileMgr.getVirtualFile(FileName, Buffer->getBufferSize(), 0);
+ InMemoryBuffers[Entry] = std::move(Buffer);
+}
+
+ModuleManager::VisitState *ModuleManager::allocateVisitState() {
+ // Fast path: if we have a cached state, use it.
+ if (FirstVisitState) {
+ VisitState *Result = FirstVisitState;
+ FirstVisitState = FirstVisitState->NextState;
+ Result->NextState = nullptr;
+ return Result;
+ }
+
+ // Allocate and return a new state.
+ return new VisitState(size());
+}
+
+void ModuleManager::returnVisitState(VisitState *State) {
+ assert(State->NextState == nullptr && "Visited state is in list?");
+ State->NextState = FirstVisitState;
+ FirstVisitState = State;
+}
+
+void ModuleManager::setGlobalIndex(GlobalModuleIndex *Index) {
+ GlobalIndex = Index;
+ if (!GlobalIndex) {
+ ModulesInCommonWithGlobalIndex.clear();
+ return;
+ }
+
+ // Notify the global module index about all of the modules we've already
+ // loaded.
+ for (unsigned I = 0, N = Chain.size(); I != N; ++I) {
+ if (!GlobalIndex->loadedModuleFile(Chain[I])) {
+ ModulesInCommonWithGlobalIndex.push_back(Chain[I]);
+ }
+ }
+}
+
+void ModuleManager::moduleFileAccepted(ModuleFile *MF) {
+ if (!GlobalIndex || GlobalIndex->loadedModuleFile(MF))
+ return;
+
+ ModulesInCommonWithGlobalIndex.push_back(MF);
+}
+
+ModuleManager::ModuleManager(FileManager &FileMgr,
+ const PCHContainerReader &PCHContainerRdr)
+ : FileMgr(FileMgr), PCHContainerRdr(PCHContainerRdr), GlobalIndex(),
+ FirstVisitState(nullptr) {}
+
+ModuleManager::~ModuleManager() {
+ for (unsigned i = 0, e = Chain.size(); i != e; ++i)
+ delete Chain[e - i - 1];
+ delete FirstVisitState;
+}
+
+void ModuleManager::visit(llvm::function_ref<bool(ModuleFile &M)> Visitor,
+ llvm::SmallPtrSetImpl<ModuleFile *> *ModuleFilesHit) {
+ // If the visitation order vector is the wrong size, recompute the order.
+ if (VisitOrder.size() != Chain.size()) {
+ unsigned N = size();
+ VisitOrder.clear();
+ VisitOrder.reserve(N);
+
+ // Record the number of incoming edges for each module. When we
+ // encounter a module with no incoming edges, push it into the queue
+ // to seed the queue.
+ SmallVector<ModuleFile *, 4> Queue;
+ Queue.reserve(N);
+ llvm::SmallVector<unsigned, 4> UnusedIncomingEdges;
+ UnusedIncomingEdges.resize(size());
+ for (auto M = rbegin(), MEnd = rend(); M != MEnd; ++M) {
+ unsigned Size = (*M)->ImportedBy.size();
+ UnusedIncomingEdges[(*M)->Index] = Size;
+ if (!Size)
+ Queue.push_back(*M);
+ }
+
+ // Traverse the graph, making sure to visit a module before visiting any
+ // of its dependencies.
+ while (!Queue.empty()) {
+ ModuleFile *CurrentModule = Queue.pop_back_val();
+ VisitOrder.push_back(CurrentModule);
+
+ // For any module that this module depends on, push it on the
+ // stack (if it hasn't already been marked as visited).
+ for (auto M = CurrentModule->Imports.rbegin(),
+ MEnd = CurrentModule->Imports.rend();
+ M != MEnd; ++M) {
+ // Remove our current module as an impediment to visiting the
+ // module we depend on. If we were the last unvisited module
+ // that depends on this particular module, push it into the
+ // queue to be visited.
+ unsigned &NumUnusedEdges = UnusedIncomingEdges[(*M)->Index];
+ if (NumUnusedEdges && (--NumUnusedEdges == 0))
+ Queue.push_back(*M);
+ }
+ }
+
+ assert(VisitOrder.size() == N && "Visitation order is wrong?");
+
+ delete FirstVisitState;
+ FirstVisitState = nullptr;
+ }
+
+ VisitState *State = allocateVisitState();
+ unsigned VisitNumber = State->NextVisitNumber++;
+
+ // If the caller has provided us with a hit-set that came from the global
+ // module index, mark every module file in common with the global module
+ // index that is *not* in that set as 'visited'.
+ if (ModuleFilesHit && !ModulesInCommonWithGlobalIndex.empty()) {
+ for (unsigned I = 0, N = ModulesInCommonWithGlobalIndex.size(); I != N; ++I)
+ {
+ ModuleFile *M = ModulesInCommonWithGlobalIndex[I];
+ if (!ModuleFilesHit->count(M))
+ State->VisitNumber[M->Index] = VisitNumber;
+ }
+ }
+
+ for (unsigned I = 0, N = VisitOrder.size(); I != N; ++I) {
+ ModuleFile *CurrentModule = VisitOrder[I];
+ // Should we skip this module file?
+ if (State->VisitNumber[CurrentModule->Index] == VisitNumber)
+ continue;
+
+ // Visit the module.
+ assert(State->VisitNumber[CurrentModule->Index] == VisitNumber - 1);
+ State->VisitNumber[CurrentModule->Index] = VisitNumber;
+ if (!Visitor(*CurrentModule))
+ continue;
+
+ // The visitor has requested that cut off visitation of any
+ // module that the current module depends on. To indicate this
+ // behavior, we mark all of the reachable modules as having been visited.
+ ModuleFile *NextModule = CurrentModule;
+ do {
+ // For any module that this module depends on, push it on the
+ // stack (if it hasn't already been marked as visited).
+ for (llvm::SetVector<ModuleFile *>::iterator
+ M = NextModule->Imports.begin(),
+ MEnd = NextModule->Imports.end();
+ M != MEnd; ++M) {
+ if (State->VisitNumber[(*M)->Index] != VisitNumber) {
+ State->Stack.push_back(*M);
+ State->VisitNumber[(*M)->Index] = VisitNumber;
+ }
+ }
+
+ if (State->Stack.empty())
+ break;
+
+ // Pop the next module off the stack.
+ NextModule = State->Stack.pop_back_val();
+ } while (true);
+ }
+
+ returnVisitState(State);
+}
+
+bool ModuleManager::lookupModuleFile(StringRef FileName,
+ off_t ExpectedSize,
+ time_t ExpectedModTime,
+ const FileEntry *&File) {
+ // Open the file immediately to ensure there is no race between stat'ing and
+ // opening the file.
+ File = FileMgr.getFile(FileName, /*openFile=*/true, /*cacheFailure=*/false);
+
+ if (!File && FileName != "-") {
+ return false;
+ }
+
+ if ((ExpectedSize && ExpectedSize != File->getSize()) ||
+ (ExpectedModTime && ExpectedModTime != File->getModificationTime()))
+ // Do not destroy File, as it may be referenced. If we need to rebuild it,
+ // it will be destroyed by removeModules.
+ return true;
+
+ return false;
+}
+
+#ifndef NDEBUG
+namespace llvm {
+ template<>
+ struct GraphTraits<ModuleManager> {
+ typedef ModuleFile NodeType;
+ typedef llvm::SetVector<ModuleFile *>::const_iterator ChildIteratorType;
+ typedef ModuleManager::ModuleConstIterator nodes_iterator;
+
+ static ChildIteratorType child_begin(NodeType *Node) {
+ return Node->Imports.begin();
+ }
+
+ static ChildIteratorType child_end(NodeType *Node) {
+ return Node->Imports.end();
+ }
+
+ static nodes_iterator nodes_begin(const ModuleManager &Manager) {
+ return Manager.begin();
+ }
+
+ static nodes_iterator nodes_end(const ModuleManager &Manager) {
+ return Manager.end();
+ }
+ };
+
+ template<>
+ struct DOTGraphTraits<ModuleManager> : public DefaultDOTGraphTraits {
+ explicit DOTGraphTraits(bool IsSimple = false)
+ : DefaultDOTGraphTraits(IsSimple) { }
+
+ static bool renderGraphFromBottomUp() {
+ return true;
+ }
+
+ std::string getNodeLabel(ModuleFile *M, const ModuleManager&) {
+ return M->ModuleName;
+ }
+ };
+}
+
+void ModuleManager::viewGraph() {
+ llvm::ViewGraph(*this, "Modules");
+}
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Serialization/MultiOnDiskHashTable.h b/contrib/llvm/tools/clang/lib/Serialization/MultiOnDiskHashTable.h
new file mode 100644
index 0000000..04dea83
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Serialization/MultiOnDiskHashTable.h
@@ -0,0 +1,330 @@
+//===--- MultiOnDiskHashTable.h - Merged set of hash tables -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides a hash table data structure suitable for incremental and
+// distributed storage across a set of files.
+//
+// Multiple hash tables from different files are implicitly merged to improve
+// performance, and on reload the merged table will override those from other
+// files.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_LIB_SERIALIZATION_MULTIONDISKHASHTABLE_H
+#define LLVM_CLANG_LIB_SERIALIZATION_MULTIONDISKHASHTABLE_H
+
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/Support/EndianStream.h"
+#include "llvm/Support/OnDiskHashTable.h"
+
+namespace clang {
+namespace serialization {
+
+class ModuleFile;
+
+/// \brief A collection of on-disk hash tables, merged when relevant for performance.
+template<typename Info> class MultiOnDiskHashTable {
+public:
+ /// A handle to a file, used when overriding tables.
+ typedef typename Info::file_type file_type;
+ /// A pointer to an on-disk representation of the hash table.
+ typedef const unsigned char *storage_type;
+
+ typedef typename Info::external_key_type external_key_type;
+ typedef typename Info::internal_key_type internal_key_type;
+ typedef typename Info::data_type data_type;
+ typedef typename Info::data_type_builder data_type_builder;
+ typedef unsigned hash_value_type;
+
+private:
+ /// \brief A hash table stored on disk.
+ struct OnDiskTable {
+ typedef llvm::OnDiskIterableChainedHashTable<Info> HashTable;
+
+ file_type File;
+ HashTable Table;
+
+ OnDiskTable(file_type File, unsigned NumBuckets, unsigned NumEntries,
+ storage_type Buckets, storage_type Payload, storage_type Base,
+ const Info &InfoObj)
+ : File(File),
+ Table(NumBuckets, NumEntries, Buckets, Payload, Base, InfoObj) {}
+ };
+
+ struct MergedTable {
+ std::vector<file_type> Files;
+ llvm::DenseMap<internal_key_type, data_type> Data;
+ };
+
+ typedef llvm::PointerUnion<OnDiskTable*, MergedTable*> Table;
+ typedef llvm::TinyPtrVector<void*> TableVector;
+
+ /// \brief The current set of on-disk and merged tables.
+ /// We manually store the opaque value of the Table because TinyPtrVector
+ /// can't cope with holding a PointerUnion directly.
+ /// There can be at most one MergedTable in this vector, and if present,
+ /// it is the first table.
+ TableVector Tables;
+
+ /// \brief Files corresponding to overridden tables that we've not yet
+ /// discarded.
+ llvm::TinyPtrVector<file_type> PendingOverrides;
+
+ struct AsOnDiskTable {
+ typedef OnDiskTable *result_type;
+ result_type operator()(void *P) const {
+ return Table::getFromOpaqueValue(P).template get<OnDiskTable *>();
+ }
+ };
+ typedef llvm::mapped_iterator<TableVector::iterator, AsOnDiskTable>
+ table_iterator;
+ typedef llvm::iterator_range<table_iterator> table_range;
+
+ /// \brief The current set of on-disk tables.
+ table_range tables() {
+ auto Begin = Tables.begin(), End = Tables.end();
+ if (getMergedTable())
+ ++Begin;
+ return llvm::make_range(llvm::map_iterator(Begin, AsOnDiskTable()),
+ llvm::map_iterator(End, AsOnDiskTable()));
+ }
+
+ MergedTable *getMergedTable() const {
+ // If we already have a merged table, it's the first one.
+ return Tables.empty() ? nullptr : Table::getFromOpaqueValue(*Tables.begin())
+ .template dyn_cast<MergedTable*>();
+ }
+
+ /// \brief Delete all our current on-disk tables.
+ void clear() {
+ for (auto *T : tables())
+ delete T;
+ if (auto *M = getMergedTable())
+ delete M;
+ Tables.clear();
+ }
+
+ void removeOverriddenTables() {
+ llvm::DenseSet<file_type> Files;
+ Files.insert(PendingOverrides.begin(), PendingOverrides.end());
+ // Explicitly capture Files to work around an MSVC 2015 rejects-valid bug.
+ auto ShouldRemove = [&Files](void *T) -> bool {
+ auto *ODT = Table::getFromOpaqueValue(T).template get<OnDiskTable *>();
+ bool Remove = Files.count(ODT->File);
+ if (Remove)
+ delete ODT;
+ return Remove;
+ };
+ Tables.erase(std::remove_if(tables().begin().getCurrent(), Tables.end(),
+ ShouldRemove),
+ Tables.end());
+ PendingOverrides.clear();
+ }
+
+ void condense() {
+ MergedTable *Merged = getMergedTable();
+ if (!Merged)
+ Merged = new MergedTable;
+
+ // Read in all the tables and merge them together.
+ // FIXME: Be smarter about which tables we merge.
+ for (auto *ODT : tables()) {
+ auto &HT = ODT->Table;
+ Info &InfoObj = HT.getInfoObj();
+
+ for (auto I = HT.data_begin(), E = HT.data_end(); I != E; ++I) {
+ auto *LocalPtr = I.getItem();
+
+ // FIXME: Don't rely on the OnDiskHashTable format here.
+ auto L = InfoObj.ReadKeyDataLength(LocalPtr);
+ const internal_key_type &Key = InfoObj.ReadKey(LocalPtr, L.first);
+ data_type_builder ValueBuilder(Merged->Data[Key]);
+ InfoObj.ReadDataInto(Key, LocalPtr + L.first, L.second,
+ ValueBuilder);
+ }
+
+ Merged->Files.push_back(ODT->File);
+ delete ODT;
+ }
+
+ Tables.clear();
+ Tables.push_back(Table(Merged).getOpaqueValue());
+ }
+
+ /// The generator is permitted to read our merged table.
+ template<typename ReaderInfo, typename WriterInfo>
+ friend class MultiOnDiskHashTableGenerator;
+
+public:
+ MultiOnDiskHashTable() {}
+ MultiOnDiskHashTable(MultiOnDiskHashTable &&O)
+ : Tables(std::move(O.Tables)),
+ PendingOverrides(std::move(O.PendingOverrides)) {
+ O.Tables.clear();
+ }
+ MultiOnDiskHashTable &operator=(MultiOnDiskHashTable &&O) {
+ if (&O == this)
+ return *this;
+ clear();
+ Tables = std::move(O.Tables);
+ O.Tables.clear();
+ PendingOverrides = std::move(O.PendingOverrides);
+ return *this;
+ }
+ ~MultiOnDiskHashTable() { clear(); }
+
+ /// \brief Add the table \p Data loaded from file \p File.
+ void add(file_type File, storage_type Data, Info InfoObj = Info()) {
+ using namespace llvm::support;
+ storage_type Ptr = Data;
+
+ uint32_t BucketOffset = endian::readNext<uint32_t, little, unaligned>(Ptr);
+
+ // Read the list of overridden files.
+ uint32_t NumFiles = endian::readNext<uint32_t, little, unaligned>(Ptr);
+ // FIXME: Add a reserve() to TinyPtrVector so that we don't need to make
+ // an additional copy.
+ llvm::SmallVector<file_type, 16> OverriddenFiles;
+ OverriddenFiles.reserve(NumFiles);
+ for (/**/; NumFiles != 0; --NumFiles)
+ OverriddenFiles.push_back(InfoObj.ReadFileRef(Ptr));
+ PendingOverrides.insert(PendingOverrides.end(), OverriddenFiles.begin(),
+ OverriddenFiles.end());
+
+ // Read the OnDiskChainedHashTable header.
+ storage_type Buckets = Data + BucketOffset;
+ auto NumBucketsAndEntries =
+ OnDiskTable::HashTable::readNumBucketsAndEntries(Buckets);
+
+ // Register the table.
+ Table NewTable = new OnDiskTable(File, NumBucketsAndEntries.first,
+ NumBucketsAndEntries.second,
+ Buckets, Ptr, Data, std::move(InfoObj));
+ Tables.push_back(NewTable.getOpaqueValue());
+ }
+
+ /// \brief Find and read the lookup results for \p EKey.
+ data_type find(const external_key_type &EKey) {
+ data_type Result;
+
+ if (!PendingOverrides.empty())
+ removeOverriddenTables();
+
+ if (Tables.size() > static_cast<unsigned>(Info::MaxTables))
+ condense();
+
+ internal_key_type Key = Info::GetInternalKey(EKey);
+ auto KeyHash = Info::ComputeHash(Key);
+
+ if (MergedTable *M = getMergedTable()) {
+ auto It = M->Data.find(Key);
+ if (It != M->Data.end())
+ Result = It->second;
+ }
+
+ data_type_builder ResultBuilder(Result);
+
+ for (auto *ODT : tables()) {
+ auto &HT = ODT->Table;
+ auto It = HT.find_hashed(Key, KeyHash);
+ if (It != HT.end())
+ HT.getInfoObj().ReadDataInto(Key, It.getDataPtr(), It.getDataLen(),
+ ResultBuilder);
+ }
+
+ return Result;
+ }
+
+ /// \brief Read all the lookup results into a single value. This only makes
+ /// sense if merging values across keys is meaningful.
+ data_type findAll() {
+ data_type Result;
+ data_type_builder ResultBuilder(Result);
+
+ if (!PendingOverrides.empty())
+ removeOverriddenTables();
+
+ if (MergedTable *M = getMergedTable()) {
+ for (auto &KV : M->Data)
+ Info::MergeDataInto(KV.second, ResultBuilder);
+ }
+
+ for (auto *ODT : tables()) {
+ auto &HT = ODT->Table;
+ Info &InfoObj = HT.getInfoObj();
+ for (auto I = HT.data_begin(), E = HT.data_end(); I != E; ++I) {
+ auto *LocalPtr = I.getItem();
+
+ // FIXME: Don't rely on the OnDiskHashTable format here.
+ auto L = InfoObj.ReadKeyDataLength(LocalPtr);
+ const internal_key_type &Key = InfoObj.ReadKey(LocalPtr, L.first);
+ InfoObj.ReadDataInto(Key, LocalPtr + L.first, L.second, ResultBuilder);
+ }
+ }
+
+ return Result;
+ }
+};
+
+/// \brief Writer for the on-disk hash table.
+template<typename ReaderInfo, typename WriterInfo>
+class MultiOnDiskHashTableGenerator {
+ typedef MultiOnDiskHashTable<ReaderInfo> BaseTable;
+ typedef llvm::OnDiskChainedHashTableGenerator<WriterInfo> Generator;
+
+ Generator Gen;
+
+public:
+ MultiOnDiskHashTableGenerator() : Gen() {}
+
+ void insert(typename WriterInfo::key_type_ref Key,
+ typename WriterInfo::data_type_ref Data, WriterInfo &Info) {
+ Gen.insert(Key, Data, Info);
+ }
+
+ void emit(llvm::SmallVectorImpl<char> &Out, WriterInfo &Info,
+ const BaseTable *Base) {
+ using namespace llvm::support;
+ llvm::raw_svector_ostream OutStream(Out);
+
+ // Write our header information.
+ {
+ endian::Writer<little> Writer(OutStream);
+
+ // Reserve four bytes for the bucket offset.
+ Writer.write<uint32_t>(0);
+
+ if (auto *Merged = Base ? Base->getMergedTable() : nullptr) {
+ // Write list of overridden files.
+ Writer.write<uint32_t>(Merged->Files.size());
+ for (const auto &F : Merged->Files)
+ Info.EmitFileRef(OutStream, F);
+
+ // Add all merged entries from Base to the generator.
+ for (auto &KV : Merged->Data) {
+ if (!Gen.contains(KV.first, Info))
+ Gen.insert(KV.first, Info.ImportData(KV.second), Info);
+ }
+ } else {
+ Writer.write<uint32_t>(0);
+ }
+ }
+
+ // Write the table itself.
+ uint32_t BucketOffset = Gen.Emit(OutStream, Info);
+
+ // Replace the first four bytes with the bucket offset.
+ endian::write32le(Out.data(), BucketOffset);
+ }
+};
+
+} // end namespace clang::serialization
+} // end namespace clang
+
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.cpp
new file mode 100644
index 0000000..3dec8a5
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.cpp
@@ -0,0 +1,24 @@
+//=- AllocationDiagnostics.cpp - Config options for allocation diags *- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Declares the configuration functions for leaks/allocation diagnostics.
+//
+//===--------------------------
+
+#include "AllocationDiagnostics.h"
+
+namespace clang {
+namespace ento {
+
+bool shouldIncludeAllocationSiteInLeakDiagnostics(AnalyzerOptions &AOpts) {
+ return AOpts.getBooleanOption("leak-diagnostics-reference-allocation",
+ false);
+}
+
+}}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.h b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.h
new file mode 100644
index 0000000..048418e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.h
@@ -0,0 +1,31 @@
+//=--- AllocationDiagnostics.h - Config options for allocation diags *- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Declares the configuration functions for leaks/allocation diagnostics.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_ALLOCATIONDIAGNOSTICS_H
+#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_ALLOCATIONDIAGNOSTICS_H
+
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
+
+namespace clang { namespace ento {
+
+/// \brief Returns true if leak diagnostics should directly reference
+/// the allocatin site (where possible).
+///
+/// The default is false.
+///
+bool shouldIncludeAllocationSiteInLeakDiagnostics(AnalyzerOptions &AOpts);
+
+}}
+
+#endif
+
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
new file mode 100644
index 0000000..a052d83
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
@@ -0,0 +1,140 @@
+//==--AnalyzerStatsChecker.cpp - Analyzer visitation statistics --*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file reports various statistics about analyzer visitation.
+//===----------------------------------------------------------------------===//
+#include "ClangSACheckers.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+#define DEBUG_TYPE "StatsChecker"
+
+STATISTIC(NumBlocks,
+ "The # of blocks in top level functions");
+STATISTIC(NumBlocksUnreachable,
+ "The # of unreachable blocks in analyzing top level functions");
+
+namespace {
+class AnalyzerStatsChecker : public Checker<check::EndAnalysis> {
+public:
+ void checkEndAnalysis(ExplodedGraph &G, BugReporter &B,ExprEngine &Eng) const;
+};
+}
+
+void AnalyzerStatsChecker::checkEndAnalysis(ExplodedGraph &G,
+ BugReporter &B,
+ ExprEngine &Eng) const {
+ const CFG *C = nullptr;
+ const SourceManager &SM = B.getSourceManager();
+ llvm::SmallPtrSet<const CFGBlock*, 256> reachable;
+
+ // Root node should have the location context of the top most function.
+ const ExplodedNode *GraphRoot = *G.roots_begin();
+ const LocationContext *LC = GraphRoot->getLocation().getLocationContext();
+
+ const Decl *D = LC->getDecl();
+
+ // Iterate over the exploded graph.
+ for (ExplodedGraph::node_iterator I = G.nodes_begin();
+ I != G.nodes_end(); ++I) {
+ const ProgramPoint &P = I->getLocation();
+
+ // Only check the coverage in the top level function (optimization).
+ if (D != P.getLocationContext()->getDecl())
+ continue;
+
+ if (Optional<BlockEntrance> BE = P.getAs<BlockEntrance>()) {
+ const CFGBlock *CB = BE->getBlock();
+ reachable.insert(CB);
+ }
+ }
+
+ // Get the CFG and the Decl of this block.
+ C = LC->getCFG();
+
+ unsigned total = 0, unreachable = 0;
+
+ // Find CFGBlocks that were not covered by any node
+ for (CFG::const_iterator I = C->begin(); I != C->end(); ++I) {
+ const CFGBlock *CB = *I;
+ ++total;
+ // Check if the block is unreachable
+ if (!reachable.count(CB)) {
+ ++unreachable;
+ }
+ }
+
+ // We never 'reach' the entry block, so correct the unreachable count
+ unreachable--;
+ // There is no BlockEntrance corresponding to the exit block as well, so
+ // assume it is reached as well.
+ unreachable--;
+
+ // Generate the warning string
+ SmallString<128> buf;
+ llvm::raw_svector_ostream output(buf);
+ PresumedLoc Loc = SM.getPresumedLoc(D->getLocation());
+ if (!Loc.isValid())
+ return;
+
+ if (isa<FunctionDecl>(D) || isa<ObjCMethodDecl>(D)) {
+ const NamedDecl *ND = cast<NamedDecl>(D);
+ output << *ND;
+ }
+ else if (isa<BlockDecl>(D)) {
+ output << "block(line:" << Loc.getLine() << ":col:" << Loc.getColumn();
+ }
+
+ NumBlocksUnreachable += unreachable;
+ NumBlocks += total;
+ std::string NameOfRootFunction = output.str();
+
+ output << " -> Total CFGBlocks: " << total << " | Unreachable CFGBlocks: "
+ << unreachable << " | Exhausted Block: "
+ << (Eng.wasBlocksExhausted() ? "yes" : "no")
+ << " | Empty WorkList: "
+ << (Eng.hasEmptyWorkList() ? "yes" : "no");
+
+ B.EmitBasicReport(D, this, "Analyzer Statistics", "Internal Statistics",
+ output.str(), PathDiagnosticLocation(D, SM));
+
+ // Emit warning for each block we bailed out on.
+ typedef CoreEngine::BlocksExhausted::const_iterator ExhaustedIterator;
+ const CoreEngine &CE = Eng.getCoreEngine();
+ for (ExhaustedIterator I = CE.blocks_exhausted_begin(),
+ E = CE.blocks_exhausted_end(); I != E; ++I) {
+ const BlockEdge &BE = I->first;
+ const CFGBlock *Exit = BE.getDst();
+ const CFGElement &CE = Exit->front();
+ if (Optional<CFGStmt> CS = CE.getAs<CFGStmt>()) {
+ SmallString<128> bufI;
+ llvm::raw_svector_ostream outputI(bufI);
+ outputI << "(" << NameOfRootFunction << ")" <<
+ ": The analyzer generated a sink at this point";
+ B.EmitBasicReport(
+ D, this, "Sink Point", "Internal Statistics", outputI.str(),
+ PathDiagnosticLocation::createBegin(CS->getStmt(), SM, LC));
+ }
+ }
+}
+
+void ento::registerAnalyzerStatsChecker(CheckerManager &mgr) {
+ mgr.registerChecker<AnalyzerStatsChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
new file mode 100644
index 0000000..c092610
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
@@ -0,0 +1,93 @@
+//== ArrayBoundChecker.cpp ------------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ArrayBoundChecker, which is a path-sensitive check
+// which looks for an out-of-bound array element access.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class ArrayBoundChecker :
+ public Checker<check::Location> {
+ mutable std::unique_ptr<BuiltinBug> BT;
+
+public:
+ void checkLocation(SVal l, bool isLoad, const Stmt* S,
+ CheckerContext &C) const;
+};
+}
+
+void ArrayBoundChecker::checkLocation(SVal l, bool isLoad, const Stmt* LoadS,
+ CheckerContext &C) const {
+ // Check for out of bound array element access.
+ const MemRegion *R = l.getAsRegion();
+ if (!R)
+ return;
+
+ const ElementRegion *ER = dyn_cast<ElementRegion>(R);
+ if (!ER)
+ return;
+
+ // Get the index of the accessed element.
+ DefinedOrUnknownSVal Idx = ER->getIndex().castAs<DefinedOrUnknownSVal>();
+
+ // Zero index is always in bound, this also passes ElementRegions created for
+ // pointer casts.
+ if (Idx.isZeroConstant())
+ return;
+
+ ProgramStateRef state = C.getState();
+
+ // Get the size of the array.
+ DefinedOrUnknownSVal NumElements
+ = C.getStoreManager().getSizeInElements(state, ER->getSuperRegion(),
+ ER->getValueType());
+
+ ProgramStateRef StInBound = state->assumeInBound(Idx, NumElements, true);
+ ProgramStateRef StOutBound = state->assumeInBound(Idx, NumElements, false);
+ if (StOutBound && !StInBound) {
+ ExplodedNode *N = C.generateErrorNode(StOutBound);
+ if (!N)
+ return;
+
+ if (!BT)
+ BT.reset(new BuiltinBug(
+ this, "Out-of-bound array access",
+ "Access out-of-bound array element (buffer overflow)"));
+
+ // FIXME: It would be nice to eventually make this diagnostic more clear,
+ // e.g., by referencing the original declaration or by saying *why* this
+ // reference is outside the range.
+
+ // Generate a report for this bug.
+ auto report = llvm::make_unique<BugReport>(*BT, BT->getDescription(), N);
+
+ report->addRange(LoadS->getSourceRange());
+ C.emitReport(std::move(report));
+ return;
+ }
+
+ // Array bound check succeeded. From this point forward the array bound
+ // should always succeed.
+ C.addTransition(StInBound);
+}
+
+void ento::registerArrayBoundChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ArrayBoundChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
new file mode 100644
index 0000000..f4de733
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
@@ -0,0 +1,306 @@
+//== ArrayBoundCheckerV2.cpp ------------------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ArrayBoundCheckerV2, which is a path-sensitive check
+// which looks for an out-of-bound array element access.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class ArrayBoundCheckerV2 :
+ public Checker<check::Location> {
+ mutable std::unique_ptr<BuiltinBug> BT;
+
+ enum OOB_Kind { OOB_Precedes, OOB_Excedes, OOB_Tainted };
+
+ void reportOOB(CheckerContext &C, ProgramStateRef errorState,
+ OOB_Kind kind) const;
+
+public:
+ void checkLocation(SVal l, bool isLoad, const Stmt*S,
+ CheckerContext &C) const;
+};
+
+// FIXME: Eventually replace RegionRawOffset with this class.
+class RegionRawOffsetV2 {
+private:
+ const SubRegion *baseRegion;
+ SVal byteOffset;
+
+ RegionRawOffsetV2()
+ : baseRegion(nullptr), byteOffset(UnknownVal()) {}
+
+public:
+ RegionRawOffsetV2(const SubRegion* base, SVal offset)
+ : baseRegion(base), byteOffset(offset) {}
+
+ NonLoc getByteOffset() const { return byteOffset.castAs<NonLoc>(); }
+ const SubRegion *getRegion() const { return baseRegion; }
+
+ static RegionRawOffsetV2 computeOffset(ProgramStateRef state,
+ SValBuilder &svalBuilder,
+ SVal location);
+
+ void dump() const;
+ void dumpToStream(raw_ostream &os) const;
+};
+}
+
+static SVal computeExtentBegin(SValBuilder &svalBuilder,
+ const MemRegion *region) {
+ while (true)
+ switch (region->getKind()) {
+ default:
+ return svalBuilder.makeZeroArrayIndex();
+ case MemRegion::SymbolicRegionKind:
+ // FIXME: improve this later by tracking symbolic lower bounds
+ // for symbolic regions.
+ return UnknownVal();
+ case MemRegion::ElementRegionKind:
+ region = cast<SubRegion>(region)->getSuperRegion();
+ continue;
+ }
+}
+
+void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
+ const Stmt* LoadS,
+ CheckerContext &checkerContext) const {
+
+ // NOTE: Instead of using ProgramState::assumeInBound(), we are prototyping
+ // some new logic here that reasons directly about memory region extents.
+ // Once that logic is more mature, we can bring it back to assumeInBound()
+ // for all clients to use.
+ //
+ // The algorithm we are using here for bounds checking is to see if the
+ // memory access is within the extent of the base region. Since we
+ // have some flexibility in defining the base region, we can achieve
+ // various levels of conservatism in our buffer overflow checking.
+ ProgramStateRef state = checkerContext.getState();
+ ProgramStateRef originalState = state;
+
+ SValBuilder &svalBuilder = checkerContext.getSValBuilder();
+ const RegionRawOffsetV2 &rawOffset =
+ RegionRawOffsetV2::computeOffset(state, svalBuilder, location);
+
+ if (!rawOffset.getRegion())
+ return;
+
+ // CHECK LOWER BOUND: Is byteOffset < extent begin?
+ // If so, we are doing a load/store
+ // before the first valid offset in the memory region.
+
+ SVal extentBegin = computeExtentBegin(svalBuilder, rawOffset.getRegion());
+
+ if (Optional<NonLoc> NV = extentBegin.getAs<NonLoc>()) {
+ SVal lowerBound =
+ svalBuilder.evalBinOpNN(state, BO_LT, rawOffset.getByteOffset(), *NV,
+ svalBuilder.getConditionType());
+
+ Optional<NonLoc> lowerBoundToCheck = lowerBound.getAs<NonLoc>();
+ if (!lowerBoundToCheck)
+ return;
+
+ ProgramStateRef state_precedesLowerBound, state_withinLowerBound;
+ std::tie(state_precedesLowerBound, state_withinLowerBound) =
+ state->assume(*lowerBoundToCheck);
+
+ // Are we constrained enough to definitely precede the lower bound?
+ if (state_precedesLowerBound && !state_withinLowerBound) {
+ reportOOB(checkerContext, state_precedesLowerBound, OOB_Precedes);
+ return;
+ }
+
+ // Otherwise, assume the constraint of the lower bound.
+ assert(state_withinLowerBound);
+ state = state_withinLowerBound;
+ }
+
+ do {
+ // CHECK UPPER BOUND: Is byteOffset >= extent(baseRegion)? If so,
+ // we are doing a load/store after the last valid offset.
+ DefinedOrUnknownSVal extentVal =
+ rawOffset.getRegion()->getExtent(svalBuilder);
+ if (!extentVal.getAs<NonLoc>())
+ break;
+
+ SVal upperbound
+ = svalBuilder.evalBinOpNN(state, BO_GE, rawOffset.getByteOffset(),
+ extentVal.castAs<NonLoc>(),
+ svalBuilder.getConditionType());
+
+ Optional<NonLoc> upperboundToCheck = upperbound.getAs<NonLoc>();
+ if (!upperboundToCheck)
+ break;
+
+ ProgramStateRef state_exceedsUpperBound, state_withinUpperBound;
+ std::tie(state_exceedsUpperBound, state_withinUpperBound) =
+ state->assume(*upperboundToCheck);
+
+ // If we are under constrained and the index variables are tainted, report.
+ if (state_exceedsUpperBound && state_withinUpperBound) {
+ if (state->isTainted(rawOffset.getByteOffset()))
+ reportOOB(checkerContext, state_exceedsUpperBound, OOB_Tainted);
+ return;
+ }
+
+ // If we are constrained enough to definitely exceed the upper bound, report.
+ if (state_exceedsUpperBound) {
+ assert(!state_withinUpperBound);
+ reportOOB(checkerContext, state_exceedsUpperBound, OOB_Excedes);
+ return;
+ }
+
+ assert(state_withinUpperBound);
+ state = state_withinUpperBound;
+ }
+ while (false);
+
+ if (state != originalState)
+ checkerContext.addTransition(state);
+}
+
+void ArrayBoundCheckerV2::reportOOB(CheckerContext &checkerContext,
+ ProgramStateRef errorState,
+ OOB_Kind kind) const {
+
+ ExplodedNode *errorNode = checkerContext.generateErrorNode(errorState);
+ if (!errorNode)
+ return;
+
+ if (!BT)
+ BT.reset(new BuiltinBug(this, "Out-of-bound access"));
+
+ // FIXME: This diagnostics are preliminary. We should get far better
+ // diagnostics for explaining buffer overruns.
+
+ SmallString<256> buf;
+ llvm::raw_svector_ostream os(buf);
+ os << "Out of bound memory access ";
+ switch (kind) {
+ case OOB_Precedes:
+ os << "(accessed memory precedes memory block)";
+ break;
+ case OOB_Excedes:
+ os << "(access exceeds upper limit of memory block)";
+ break;
+ case OOB_Tainted:
+ os << "(index is tainted)";
+ break;
+ }
+
+ checkerContext.emitReport(
+ llvm::make_unique<BugReport>(*BT, os.str(), errorNode));
+}
+
+void RegionRawOffsetV2::dump() const {
+ dumpToStream(llvm::errs());
+}
+
+void RegionRawOffsetV2::dumpToStream(raw_ostream &os) const {
+ os << "raw_offset_v2{" << getRegion() << ',' << getByteOffset() << '}';
+}
+
+
+// Lazily computes a value to be used by 'computeOffset'. If 'val'
+// is unknown or undefined, we lazily substitute '0'. Otherwise,
+// return 'val'.
+static inline SVal getValue(SVal val, SValBuilder &svalBuilder) {
+ return val.getAs<UndefinedVal>() ? svalBuilder.makeArrayIndex(0) : val;
+}
+
+// Scale a base value by a scaling factor, and return the scaled
+// value as an SVal. Used by 'computeOffset'.
+static inline SVal scaleValue(ProgramStateRef state,
+ NonLoc baseVal, CharUnits scaling,
+ SValBuilder &sb) {
+ return sb.evalBinOpNN(state, BO_Mul, baseVal,
+ sb.makeArrayIndex(scaling.getQuantity()),
+ sb.getArrayIndexType());
+}
+
+// Add an SVal to another, treating unknown and undefined values as
+// summing to UnknownVal. Used by 'computeOffset'.
+static SVal addValue(ProgramStateRef state, SVal x, SVal y,
+ SValBuilder &svalBuilder) {
+ // We treat UnknownVals and UndefinedVals the same here because we
+ // only care about computing offsets.
+ if (x.isUnknownOrUndef() || y.isUnknownOrUndef())
+ return UnknownVal();
+
+ return svalBuilder.evalBinOpNN(state, BO_Add, x.castAs<NonLoc>(),
+ y.castAs<NonLoc>(),
+ svalBuilder.getArrayIndexType());
+}
+
+/// Compute a raw byte offset from a base region. Used for array bounds
+/// checking.
+RegionRawOffsetV2 RegionRawOffsetV2::computeOffset(ProgramStateRef state,
+ SValBuilder &svalBuilder,
+ SVal location)
+{
+ const MemRegion *region = location.getAsRegion();
+ SVal offset = UndefinedVal();
+
+ while (region) {
+ switch (region->getKind()) {
+ default: {
+ if (const SubRegion *subReg = dyn_cast<SubRegion>(region)) {
+ offset = getValue(offset, svalBuilder);
+ if (!offset.isUnknownOrUndef())
+ return RegionRawOffsetV2(subReg, offset);
+ }
+ return RegionRawOffsetV2();
+ }
+ case MemRegion::ElementRegionKind: {
+ const ElementRegion *elemReg = cast<ElementRegion>(region);
+ SVal index = elemReg->getIndex();
+ if (!index.getAs<NonLoc>())
+ return RegionRawOffsetV2();
+ QualType elemType = elemReg->getElementType();
+ // If the element is an incomplete type, go no further.
+ ASTContext &astContext = svalBuilder.getContext();
+ if (elemType->isIncompleteType())
+ return RegionRawOffsetV2();
+
+ // Update the offset.
+ offset = addValue(state,
+ getValue(offset, svalBuilder),
+ scaleValue(state,
+ index.castAs<NonLoc>(),
+ astContext.getTypeSizeInChars(elemType),
+ svalBuilder),
+ svalBuilder);
+
+ if (offset.isUnknownOrUndef())
+ return RegionRawOffsetV2();
+
+ region = elemReg->getSuperRegion();
+ continue;
+ }
+ }
+ }
+ return RegionRawOffsetV2();
+}
+
+void ento::registerArrayBoundCheckerV2(CheckerManager &mgr) {
+ mgr.registerChecker<ArrayBoundCheckerV2>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
new file mode 100644
index 0000000..26d42ba
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
@@ -0,0 +1,1302 @@
+//== BasicObjCFoundationChecks.cpp - Simple Apple-Foundation checks -*- C++ -*--
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines BasicObjCFoundationChecks, a class that encapsulates
+// a set of simple checks to run on Objective-C code using Apple's Foundation
+// classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "SelectorExtras.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class APIMisuse : public BugType {
+public:
+ APIMisuse(const CheckerBase *checker, const char *name)
+ : BugType(checker, name, "API Misuse (Apple)") {}
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Utility functions.
+//===----------------------------------------------------------------------===//
+
+static StringRef GetReceiverInterfaceName(const ObjCMethodCall &msg) {
+ if (const ObjCInterfaceDecl *ID = msg.getReceiverInterface())
+ return ID->getIdentifier()->getName();
+ return StringRef();
+}
+
+enum FoundationClass {
+ FC_None,
+ FC_NSArray,
+ FC_NSDictionary,
+ FC_NSEnumerator,
+ FC_NSNull,
+ FC_NSOrderedSet,
+ FC_NSSet,
+ FC_NSString
+};
+
+static FoundationClass findKnownClass(const ObjCInterfaceDecl *ID,
+ bool IncludeSuperclasses = true) {
+ static llvm::StringMap<FoundationClass> Classes;
+ if (Classes.empty()) {
+ Classes["NSArray"] = FC_NSArray;
+ Classes["NSDictionary"] = FC_NSDictionary;
+ Classes["NSEnumerator"] = FC_NSEnumerator;
+ Classes["NSNull"] = FC_NSNull;
+ Classes["NSOrderedSet"] = FC_NSOrderedSet;
+ Classes["NSSet"] = FC_NSSet;
+ Classes["NSString"] = FC_NSString;
+ }
+
+ // FIXME: Should we cache this at all?
+ FoundationClass result = Classes.lookup(ID->getIdentifier()->getName());
+ if (result == FC_None && IncludeSuperclasses)
+ if (const ObjCInterfaceDecl *Super = ID->getSuperClass())
+ return findKnownClass(Super);
+
+ return result;
+}
+
+//===----------------------------------------------------------------------===//
+// NilArgChecker - Check for prohibited nil arguments to ObjC method calls.
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class NilArgChecker : public Checker<check::PreObjCMessage,
+ check::PostStmt<ObjCDictionaryLiteral>,
+ check::PostStmt<ObjCArrayLiteral> > {
+ mutable std::unique_ptr<APIMisuse> BT;
+
+ mutable llvm::SmallDenseMap<Selector, unsigned, 16> StringSelectors;
+ mutable Selector ArrayWithObjectSel;
+ mutable Selector AddObjectSel;
+ mutable Selector InsertObjectAtIndexSel;
+ mutable Selector ReplaceObjectAtIndexWithObjectSel;
+ mutable Selector SetObjectAtIndexedSubscriptSel;
+ mutable Selector ArrayByAddingObjectSel;
+ mutable Selector DictionaryWithObjectForKeySel;
+ mutable Selector SetObjectForKeySel;
+ mutable Selector SetObjectForKeyedSubscriptSel;
+ mutable Selector RemoveObjectForKeySel;
+
+ void warnIfNilExpr(const Expr *E,
+ const char *Msg,
+ CheckerContext &C) const;
+
+ void warnIfNilArg(CheckerContext &C,
+ const ObjCMethodCall &msg, unsigned Arg,
+ FoundationClass Class,
+ bool CanBeSubscript = false) const;
+
+ void generateBugReport(ExplodedNode *N,
+ StringRef Msg,
+ SourceRange Range,
+ const Expr *Expr,
+ CheckerContext &C) const;
+
+ public:
+ void checkPreObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const;
+ void checkPostStmt(const ObjCDictionaryLiteral *DL,
+ CheckerContext &C) const;
+ void checkPostStmt(const ObjCArrayLiteral *AL,
+ CheckerContext &C) const;
+ };
+}
+
+void NilArgChecker::warnIfNilExpr(const Expr *E,
+ const char *Msg,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ if (State->isNull(C.getSVal(E)).isConstrainedTrue()) {
+
+ if (ExplodedNode *N = C.generateErrorNode()) {
+ generateBugReport(N, Msg, E->getSourceRange(), E, C);
+ }
+
+ }
+}
+
+void NilArgChecker::warnIfNilArg(CheckerContext &C,
+ const ObjCMethodCall &msg,
+ unsigned int Arg,
+ FoundationClass Class,
+ bool CanBeSubscript) const {
+ // Check if the argument is nil.
+ ProgramStateRef State = C.getState();
+ if (!State->isNull(msg.getArgSVal(Arg)).isConstrainedTrue())
+ return;
+
+ if (ExplodedNode *N = C.generateErrorNode()) {
+ SmallString<128> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+
+ if (CanBeSubscript && msg.getMessageKind() == OCM_Subscript) {
+
+ if (Class == FC_NSArray) {
+ os << "Array element cannot be nil";
+ } else if (Class == FC_NSDictionary) {
+ if (Arg == 0) {
+ os << "Value stored into '";
+ os << GetReceiverInterfaceName(msg) << "' cannot be nil";
+ } else {
+ assert(Arg == 1);
+ os << "'"<< GetReceiverInterfaceName(msg) << "' key cannot be nil";
+ }
+ } else
+ llvm_unreachable("Missing foundation class for the subscript expr");
+
+ } else {
+ if (Class == FC_NSDictionary) {
+ if (Arg == 0)
+ os << "Value argument ";
+ else {
+ assert(Arg == 1);
+ os << "Key argument ";
+ }
+ os << "to '";
+ msg.getSelector().print(os);
+ os << "' cannot be nil";
+ } else {
+ os << "Argument to '" << GetReceiverInterfaceName(msg) << "' method '";
+ msg.getSelector().print(os);
+ os << "' cannot be nil";
+ }
+ }
+
+ generateBugReport(N, os.str(), msg.getArgSourceRange(Arg),
+ msg.getArgExpr(Arg), C);
+ }
+}
+
+void NilArgChecker::generateBugReport(ExplodedNode *N,
+ StringRef Msg,
+ SourceRange Range,
+ const Expr *E,
+ CheckerContext &C) const {
+ if (!BT)
+ BT.reset(new APIMisuse(this, "nil argument"));
+
+ auto R = llvm::make_unique<BugReport>(*BT, Msg, N);
+ R->addRange(Range);
+ bugreporter::trackNullOrUndefValue(N, E, *R);
+ C.emitReport(std::move(R));
+}
+
+void NilArgChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
+ CheckerContext &C) const {
+ const ObjCInterfaceDecl *ID = msg.getReceiverInterface();
+ if (!ID)
+ return;
+
+ FoundationClass Class = findKnownClass(ID);
+
+ static const unsigned InvalidArgIndex = UINT_MAX;
+ unsigned Arg = InvalidArgIndex;
+ bool CanBeSubscript = false;
+
+ if (Class == FC_NSString) {
+ Selector S = msg.getSelector();
+
+ if (S.isUnarySelector())
+ return;
+
+ if (StringSelectors.empty()) {
+ ASTContext &Ctx = C.getASTContext();
+ Selector Sels[] = {
+ getKeywordSelector(Ctx, "caseInsensitiveCompare", nullptr),
+ getKeywordSelector(Ctx, "compare", nullptr),
+ getKeywordSelector(Ctx, "compare", "options", nullptr),
+ getKeywordSelector(Ctx, "compare", "options", "range", nullptr),
+ getKeywordSelector(Ctx, "compare", "options", "range", "locale",
+ nullptr),
+ getKeywordSelector(Ctx, "componentsSeparatedByCharactersInSet",
+ nullptr),
+ getKeywordSelector(Ctx, "initWithFormat",
+ nullptr),
+ getKeywordSelector(Ctx, "localizedCaseInsensitiveCompare", nullptr),
+ getKeywordSelector(Ctx, "localizedCompare", nullptr),
+ getKeywordSelector(Ctx, "localizedStandardCompare", nullptr),
+ };
+ for (Selector KnownSel : Sels)
+ StringSelectors[KnownSel] = 0;
+ }
+ auto I = StringSelectors.find(S);
+ if (I == StringSelectors.end())
+ return;
+ Arg = I->second;
+ } else if (Class == FC_NSArray) {
+ Selector S = msg.getSelector();
+
+ if (S.isUnarySelector())
+ return;
+
+ if (ArrayWithObjectSel.isNull()) {
+ ASTContext &Ctx = C.getASTContext();
+ ArrayWithObjectSel = getKeywordSelector(Ctx, "arrayWithObject", nullptr);
+ AddObjectSel = getKeywordSelector(Ctx, "addObject", nullptr);
+ InsertObjectAtIndexSel =
+ getKeywordSelector(Ctx, "insertObject", "atIndex", nullptr);
+ ReplaceObjectAtIndexWithObjectSel =
+ getKeywordSelector(Ctx, "replaceObjectAtIndex", "withObject", nullptr);
+ SetObjectAtIndexedSubscriptSel =
+ getKeywordSelector(Ctx, "setObject", "atIndexedSubscript", nullptr);
+ ArrayByAddingObjectSel =
+ getKeywordSelector(Ctx, "arrayByAddingObject", nullptr);
+ }
+
+ if (S == ArrayWithObjectSel || S == AddObjectSel ||
+ S == InsertObjectAtIndexSel || S == ArrayByAddingObjectSel) {
+ Arg = 0;
+ } else if (S == SetObjectAtIndexedSubscriptSel) {
+ Arg = 0;
+ CanBeSubscript = true;
+ } else if (S == ReplaceObjectAtIndexWithObjectSel) {
+ Arg = 1;
+ }
+ } else if (Class == FC_NSDictionary) {
+ Selector S = msg.getSelector();
+
+ if (S.isUnarySelector())
+ return;
+
+ if (DictionaryWithObjectForKeySel.isNull()) {
+ ASTContext &Ctx = C.getASTContext();
+ DictionaryWithObjectForKeySel =
+ getKeywordSelector(Ctx, "dictionaryWithObject", "forKey", nullptr);
+ SetObjectForKeySel =
+ getKeywordSelector(Ctx, "setObject", "forKey", nullptr);
+ SetObjectForKeyedSubscriptSel =
+ getKeywordSelector(Ctx, "setObject", "forKeyedSubscript", nullptr);
+ RemoveObjectForKeySel =
+ getKeywordSelector(Ctx, "removeObjectForKey", nullptr);
+ }
+
+ if (S == DictionaryWithObjectForKeySel || S == SetObjectForKeySel) {
+ Arg = 0;
+ warnIfNilArg(C, msg, /* Arg */1, Class);
+ } else if (S == SetObjectForKeyedSubscriptSel) {
+ CanBeSubscript = true;
+ Arg = 1;
+ } else if (S == RemoveObjectForKeySel) {
+ Arg = 0;
+ }
+ }
+
+ // If argument is '0', report a warning.
+ if ((Arg != InvalidArgIndex))
+ warnIfNilArg(C, msg, Arg, Class, CanBeSubscript);
+}
+
+void NilArgChecker::checkPostStmt(const ObjCArrayLiteral *AL,
+ CheckerContext &C) const {
+ unsigned NumOfElements = AL->getNumElements();
+ for (unsigned i = 0; i < NumOfElements; ++i) {
+ warnIfNilExpr(AL->getElement(i), "Array element cannot be nil", C);
+ }
+}
+
+void NilArgChecker::checkPostStmt(const ObjCDictionaryLiteral *DL,
+ CheckerContext &C) const {
+ unsigned NumOfElements = DL->getNumElements();
+ for (unsigned i = 0; i < NumOfElements; ++i) {
+ ObjCDictionaryElement Element = DL->getKeyValueElement(i);
+ warnIfNilExpr(Element.Key, "Dictionary key cannot be nil", C);
+ warnIfNilExpr(Element.Value, "Dictionary value cannot be nil", C);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Error reporting.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class CFNumberCreateChecker : public Checker< check::PreStmt<CallExpr> > {
+ mutable std::unique_ptr<APIMisuse> BT;
+ mutable IdentifierInfo* II;
+public:
+ CFNumberCreateChecker() : II(nullptr) {}
+
+ void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+
+private:
+ void EmitError(const TypedRegion* R, const Expr *Ex,
+ uint64_t SourceSize, uint64_t TargetSize, uint64_t NumberKind);
+};
+} // end anonymous namespace
+
+enum CFNumberType {
+ kCFNumberSInt8Type = 1,
+ kCFNumberSInt16Type = 2,
+ kCFNumberSInt32Type = 3,
+ kCFNumberSInt64Type = 4,
+ kCFNumberFloat32Type = 5,
+ kCFNumberFloat64Type = 6,
+ kCFNumberCharType = 7,
+ kCFNumberShortType = 8,
+ kCFNumberIntType = 9,
+ kCFNumberLongType = 10,
+ kCFNumberLongLongType = 11,
+ kCFNumberFloatType = 12,
+ kCFNumberDoubleType = 13,
+ kCFNumberCFIndexType = 14,
+ kCFNumberNSIntegerType = 15,
+ kCFNumberCGFloatType = 16
+};
+
+static Optional<uint64_t> GetCFNumberSize(ASTContext &Ctx, uint64_t i) {
+ static const unsigned char FixedSize[] = { 8, 16, 32, 64, 32, 64 };
+
+ if (i < kCFNumberCharType)
+ return FixedSize[i-1];
+
+ QualType T;
+
+ switch (i) {
+ case kCFNumberCharType: T = Ctx.CharTy; break;
+ case kCFNumberShortType: T = Ctx.ShortTy; break;
+ case kCFNumberIntType: T = Ctx.IntTy; break;
+ case kCFNumberLongType: T = Ctx.LongTy; break;
+ case kCFNumberLongLongType: T = Ctx.LongLongTy; break;
+ case kCFNumberFloatType: T = Ctx.FloatTy; break;
+ case kCFNumberDoubleType: T = Ctx.DoubleTy; break;
+ case kCFNumberCFIndexType:
+ case kCFNumberNSIntegerType:
+ case kCFNumberCGFloatType:
+ // FIXME: We need a way to map from names to Type*.
+ default:
+ return None;
+ }
+
+ return Ctx.getTypeSize(T);
+}
+
+#if 0
+static const char* GetCFNumberTypeStr(uint64_t i) {
+ static const char* Names[] = {
+ "kCFNumberSInt8Type",
+ "kCFNumberSInt16Type",
+ "kCFNumberSInt32Type",
+ "kCFNumberSInt64Type",
+ "kCFNumberFloat32Type",
+ "kCFNumberFloat64Type",
+ "kCFNumberCharType",
+ "kCFNumberShortType",
+ "kCFNumberIntType",
+ "kCFNumberLongType",
+ "kCFNumberLongLongType",
+ "kCFNumberFloatType",
+ "kCFNumberDoubleType",
+ "kCFNumberCFIndexType",
+ "kCFNumberNSIntegerType",
+ "kCFNumberCGFloatType"
+ };
+
+ return i <= kCFNumberCGFloatType ? Names[i-1] : "Invalid CFNumberType";
+}
+#endif
+
+void CFNumberCreateChecker::checkPreStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ if (!FD)
+ return;
+
+ ASTContext &Ctx = C.getASTContext();
+ if (!II)
+ II = &Ctx.Idents.get("CFNumberCreate");
+
+ if (FD->getIdentifier() != II || CE->getNumArgs() != 3)
+ return;
+
+ // Get the value of the "theType" argument.
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal TheTypeVal = state->getSVal(CE->getArg(1), LCtx);
+
+ // FIXME: We really should allow ranges of valid theType values, and
+ // bifurcate the state appropriately.
+ Optional<nonloc::ConcreteInt> V = TheTypeVal.getAs<nonloc::ConcreteInt>();
+ if (!V)
+ return;
+
+ uint64_t NumberKind = V->getValue().getLimitedValue();
+ Optional<uint64_t> OptTargetSize = GetCFNumberSize(Ctx, NumberKind);
+
+ // FIXME: In some cases we can emit an error.
+ if (!OptTargetSize)
+ return;
+
+ uint64_t TargetSize = *OptTargetSize;
+
+ // Look at the value of the integer being passed by reference. Essentially
+ // we want to catch cases where the value passed in is not equal to the
+ // size of the type being created.
+ SVal TheValueExpr = state->getSVal(CE->getArg(2), LCtx);
+
+ // FIXME: Eventually we should handle arbitrary locations. We can do this
+ // by having an enhanced memory model that does low-level typing.
+ Optional<loc::MemRegionVal> LV = TheValueExpr.getAs<loc::MemRegionVal>();
+ if (!LV)
+ return;
+
+ const TypedValueRegion* R = dyn_cast<TypedValueRegion>(LV->stripCasts());
+ if (!R)
+ return;
+
+ QualType T = Ctx.getCanonicalType(R->getValueType());
+
+ // FIXME: If the pointee isn't an integer type, should we flag a warning?
+ // People can do weird stuff with pointers.
+
+ if (!T->isIntegralOrEnumerationType())
+ return;
+
+ uint64_t SourceSize = Ctx.getTypeSize(T);
+
+ // CHECK: is SourceSize == TargetSize
+ if (SourceSize == TargetSize)
+ return;
+
+ // Generate an error. Only generate a sink error node
+ // if 'SourceSize < TargetSize'; otherwise generate a non-fatal error node.
+ //
+ // FIXME: We can actually create an abstract "CFNumber" object that has
+ // the bits initialized to the provided values.
+ //
+ ExplodedNode *N = SourceSize < TargetSize ? C.generateErrorNode()
+ : C.generateNonFatalErrorNode();
+ if (N) {
+ SmallString<128> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+
+ os << (SourceSize == 8 ? "An " : "A ")
+ << SourceSize << " bit integer is used to initialize a CFNumber "
+ "object that represents "
+ << (TargetSize == 8 ? "an " : "a ")
+ << TargetSize << " bit integer. ";
+
+ if (SourceSize < TargetSize)
+ os << (TargetSize - SourceSize)
+ << " bits of the CFNumber value will be garbage." ;
+ else
+ os << (SourceSize - TargetSize)
+ << " bits of the input integer will be lost.";
+
+ if (!BT)
+ BT.reset(new APIMisuse(this, "Bad use of CFNumberCreate"));
+
+ auto report = llvm::make_unique<BugReport>(*BT, os.str(), N);
+ report->addRange(CE->getArg(2)->getSourceRange());
+ C.emitReport(std::move(report));
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// CFRetain/CFRelease/CFMakeCollectable/CFAutorelease checking for null arguments.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class CFRetainReleaseChecker : public Checker< check::PreStmt<CallExpr> > {
+ mutable std::unique_ptr<APIMisuse> BT;
+ mutable IdentifierInfo *Retain, *Release, *MakeCollectable, *Autorelease;
+public:
+ CFRetainReleaseChecker()
+ : Retain(nullptr), Release(nullptr), MakeCollectable(nullptr),
+ Autorelease(nullptr) {}
+ void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+};
+} // end anonymous namespace
+
+
+void CFRetainReleaseChecker::checkPreStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ // If the CallExpr doesn't have exactly 1 argument just give up checking.
+ if (CE->getNumArgs() != 1)
+ return;
+
+ ProgramStateRef state = C.getState();
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ if (!FD)
+ return;
+
+ if (!BT) {
+ ASTContext &Ctx = C.getASTContext();
+ Retain = &Ctx.Idents.get("CFRetain");
+ Release = &Ctx.Idents.get("CFRelease");
+ MakeCollectable = &Ctx.Idents.get("CFMakeCollectable");
+ Autorelease = &Ctx.Idents.get("CFAutorelease");
+ BT.reset(new APIMisuse(
+ this, "null passed to CF memory management function"));
+ }
+
+ // Check if we called CFRetain/CFRelease/CFMakeCollectable/CFAutorelease.
+ const IdentifierInfo *FuncII = FD->getIdentifier();
+ if (!(FuncII == Retain || FuncII == Release || FuncII == MakeCollectable ||
+ FuncII == Autorelease))
+ return;
+
+ // FIXME: The rest of this just checks that the argument is non-null.
+ // It should probably be refactored and combined with NonNullParamChecker.
+
+ // Get the argument's value.
+ const Expr *Arg = CE->getArg(0);
+ SVal ArgVal = state->getSVal(Arg, C.getLocationContext());
+ Optional<DefinedSVal> DefArgVal = ArgVal.getAs<DefinedSVal>();
+ if (!DefArgVal)
+ return;
+
+ // Get a NULL value.
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ DefinedSVal zero =
+ svalBuilder.makeZeroVal(Arg->getType()).castAs<DefinedSVal>();
+
+ // Make an expression asserting that they're equal.
+ DefinedOrUnknownSVal ArgIsNull = svalBuilder.evalEQ(state, zero, *DefArgVal);
+
+ // Are they equal?
+ ProgramStateRef stateTrue, stateFalse;
+ std::tie(stateTrue, stateFalse) = state->assume(ArgIsNull);
+
+ if (stateTrue && !stateFalse) {
+ ExplodedNode *N = C.generateErrorNode(stateTrue);
+ if (!N)
+ return;
+
+ const char *description;
+ if (FuncII == Retain)
+ description = "Null pointer argument in call to CFRetain";
+ else if (FuncII == Release)
+ description = "Null pointer argument in call to CFRelease";
+ else if (FuncII == MakeCollectable)
+ description = "Null pointer argument in call to CFMakeCollectable";
+ else if (FuncII == Autorelease)
+ description = "Null pointer argument in call to CFAutorelease";
+ else
+ llvm_unreachable("impossible case");
+
+ auto report = llvm::make_unique<BugReport>(*BT, description, N);
+ report->addRange(Arg->getSourceRange());
+ bugreporter::trackNullOrUndefValue(N, Arg, *report);
+ C.emitReport(std::move(report));
+ return;
+ }
+
+ // From here on, we know the argument is non-null.
+ C.addTransition(stateFalse);
+}
+
+//===----------------------------------------------------------------------===//
+// Check for sending 'retain', 'release', or 'autorelease' directly to a Class.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ClassReleaseChecker : public Checker<check::PreObjCMessage> {
+ mutable Selector releaseS;
+ mutable Selector retainS;
+ mutable Selector autoreleaseS;
+ mutable Selector drainS;
+ mutable std::unique_ptr<BugType> BT;
+
+public:
+ void checkPreObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
+};
+}
+
+void ClassReleaseChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
+ CheckerContext &C) const {
+
+ if (!BT) {
+ BT.reset(new APIMisuse(
+ this, "message incorrectly sent to class instead of class instance"));
+
+ ASTContext &Ctx = C.getASTContext();
+ releaseS = GetNullarySelector("release", Ctx);
+ retainS = GetNullarySelector("retain", Ctx);
+ autoreleaseS = GetNullarySelector("autorelease", Ctx);
+ drainS = GetNullarySelector("drain", Ctx);
+ }
+
+ if (msg.isInstanceMessage())
+ return;
+ const ObjCInterfaceDecl *Class = msg.getReceiverInterface();
+ assert(Class);
+
+ Selector S = msg.getSelector();
+ if (!(S == releaseS || S == retainS || S == autoreleaseS || S == drainS))
+ return;
+
+ if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
+ SmallString<200> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ os << "The '";
+ S.print(os);
+ os << "' message should be sent to instances "
+ "of class '" << Class->getName()
+ << "' and not the class directly";
+
+ auto report = llvm::make_unique<BugReport>(*BT, os.str(), N);
+ report->addRange(msg.getSourceRange());
+ C.emitReport(std::move(report));
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Check for passing non-Objective-C types to variadic methods that expect
+// only Objective-C types.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class VariadicMethodTypeChecker : public Checker<check::PreObjCMessage> {
+ mutable Selector arrayWithObjectsS;
+ mutable Selector dictionaryWithObjectsAndKeysS;
+ mutable Selector setWithObjectsS;
+ mutable Selector orderedSetWithObjectsS;
+ mutable Selector initWithObjectsS;
+ mutable Selector initWithObjectsAndKeysS;
+ mutable std::unique_ptr<BugType> BT;
+
+ bool isVariadicMessage(const ObjCMethodCall &msg) const;
+
+public:
+ void checkPreObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
+};
+}
+
+/// isVariadicMessage - Returns whether the given message is a variadic message,
+/// where all arguments must be Objective-C types.
+bool
+VariadicMethodTypeChecker::isVariadicMessage(const ObjCMethodCall &msg) const {
+ const ObjCMethodDecl *MD = msg.getDecl();
+
+ if (!MD || !MD->isVariadic() || isa<ObjCProtocolDecl>(MD->getDeclContext()))
+ return false;
+
+ Selector S = msg.getSelector();
+
+ if (msg.isInstanceMessage()) {
+ // FIXME: Ideally we'd look at the receiver interface here, but that's not
+ // useful for init, because alloc returns 'id'. In theory, this could lead
+ // to false positives, for example if there existed a class that had an
+ // initWithObjects: implementation that does accept non-Objective-C pointer
+ // types, but the chance of that happening is pretty small compared to the
+ // gains that this analysis gives.
+ const ObjCInterfaceDecl *Class = MD->getClassInterface();
+
+ switch (findKnownClass(Class)) {
+ case FC_NSArray:
+ case FC_NSOrderedSet:
+ case FC_NSSet:
+ return S == initWithObjectsS;
+ case FC_NSDictionary:
+ return S == initWithObjectsAndKeysS;
+ default:
+ return false;
+ }
+ } else {
+ const ObjCInterfaceDecl *Class = msg.getReceiverInterface();
+
+ switch (findKnownClass(Class)) {
+ case FC_NSArray:
+ return S == arrayWithObjectsS;
+ case FC_NSOrderedSet:
+ return S == orderedSetWithObjectsS;
+ case FC_NSSet:
+ return S == setWithObjectsS;
+ case FC_NSDictionary:
+ return S == dictionaryWithObjectsAndKeysS;
+ default:
+ return false;
+ }
+ }
+}
+
+void VariadicMethodTypeChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
+ CheckerContext &C) const {
+ if (!BT) {
+ BT.reset(new APIMisuse(this,
+ "Arguments passed to variadic method aren't all "
+ "Objective-C pointer types"));
+
+ ASTContext &Ctx = C.getASTContext();
+ arrayWithObjectsS = GetUnarySelector("arrayWithObjects", Ctx);
+ dictionaryWithObjectsAndKeysS =
+ GetUnarySelector("dictionaryWithObjectsAndKeys", Ctx);
+ setWithObjectsS = GetUnarySelector("setWithObjects", Ctx);
+ orderedSetWithObjectsS = GetUnarySelector("orderedSetWithObjects", Ctx);
+
+ initWithObjectsS = GetUnarySelector("initWithObjects", Ctx);
+ initWithObjectsAndKeysS = GetUnarySelector("initWithObjectsAndKeys", Ctx);
+ }
+
+ if (!isVariadicMessage(msg))
+ return;
+
+ // We are not interested in the selector arguments since they have
+ // well-defined types, so the compiler will issue a warning for them.
+ unsigned variadicArgsBegin = msg.getSelector().getNumArgs();
+
+ // We're not interested in the last argument since it has to be nil or the
+ // compiler would have issued a warning for it elsewhere.
+ unsigned variadicArgsEnd = msg.getNumArgs() - 1;
+
+ if (variadicArgsEnd <= variadicArgsBegin)
+ return;
+
+ // Verify that all arguments have Objective-C types.
+ Optional<ExplodedNode*> errorNode;
+
+ for (unsigned I = variadicArgsBegin; I != variadicArgsEnd; ++I) {
+ QualType ArgTy = msg.getArgExpr(I)->getType();
+ if (ArgTy->isObjCObjectPointerType())
+ continue;
+
+ // Block pointers are treaded as Objective-C pointers.
+ if (ArgTy->isBlockPointerType())
+ continue;
+
+ // Ignore pointer constants.
+ if (msg.getArgSVal(I).getAs<loc::ConcreteInt>())
+ continue;
+
+ // Ignore pointer types annotated with 'NSObject' attribute.
+ if (C.getASTContext().isObjCNSObjectType(ArgTy))
+ continue;
+
+ // Ignore CF references, which can be toll-free bridged.
+ if (coreFoundation::isCFObjectRef(ArgTy))
+ continue;
+
+ // Generate only one error node to use for all bug reports.
+ if (!errorNode.hasValue())
+ errorNode = C.generateNonFatalErrorNode();
+
+ if (!errorNode.getValue())
+ continue;
+
+ SmallString<128> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+
+ StringRef TypeName = GetReceiverInterfaceName(msg);
+ if (!TypeName.empty())
+ os << "Argument to '" << TypeName << "' method '";
+ else
+ os << "Argument to method '";
+
+ msg.getSelector().print(os);
+ os << "' should be an Objective-C pointer type, not '";
+ ArgTy.print(os, C.getLangOpts());
+ os << "'";
+
+ auto R = llvm::make_unique<BugReport>(*BT, os.str(), errorNode.getValue());
+ R->addRange(msg.getArgSourceRange(I));
+ C.emitReport(std::move(R));
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Improves the modeling of loops over Cocoa collections.
+//===----------------------------------------------------------------------===//
+
+// The map from container symbol to the container count symbol.
+// We currently will remember the last countainer count symbol encountered.
+REGISTER_MAP_WITH_PROGRAMSTATE(ContainerCountMap, SymbolRef, SymbolRef)
+REGISTER_MAP_WITH_PROGRAMSTATE(ContainerNonEmptyMap, SymbolRef, bool)
+
+namespace {
+class ObjCLoopChecker
+ : public Checker<check::PostStmt<ObjCForCollectionStmt>,
+ check::PostObjCMessage,
+ check::DeadSymbols,
+ check::PointerEscape > {
+ mutable IdentifierInfo *CountSelectorII;
+
+ bool isCollectionCountMethod(const ObjCMethodCall &M,
+ CheckerContext &C) const;
+
+public:
+ ObjCLoopChecker() : CountSelectorII(nullptr) {}
+ void checkPostStmt(const ObjCForCollectionStmt *FCS, CheckerContext &C) const;
+ void checkPostObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const;
+ void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
+ ProgramStateRef checkPointerEscape(ProgramStateRef State,
+ const InvalidatedSymbols &Escaped,
+ const CallEvent *Call,
+ PointerEscapeKind Kind) const;
+};
+}
+
+static bool isKnownNonNilCollectionType(QualType T) {
+ const ObjCObjectPointerType *PT = T->getAs<ObjCObjectPointerType>();
+ if (!PT)
+ return false;
+
+ const ObjCInterfaceDecl *ID = PT->getInterfaceDecl();
+ if (!ID)
+ return false;
+
+ switch (findKnownClass(ID)) {
+ case FC_NSArray:
+ case FC_NSDictionary:
+ case FC_NSEnumerator:
+ case FC_NSOrderedSet:
+ case FC_NSSet:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/// Assumes that the collection is non-nil.
+///
+/// If the collection is known to be nil, returns NULL to indicate an infeasible
+/// path.
+static ProgramStateRef checkCollectionNonNil(CheckerContext &C,
+ ProgramStateRef State,
+ const ObjCForCollectionStmt *FCS) {
+ if (!State)
+ return nullptr;
+
+ SVal CollectionVal = C.getSVal(FCS->getCollection());
+ Optional<DefinedSVal> KnownCollection = CollectionVal.getAs<DefinedSVal>();
+ if (!KnownCollection)
+ return State;
+
+ ProgramStateRef StNonNil, StNil;
+ std::tie(StNonNil, StNil) = State->assume(*KnownCollection);
+ if (StNil && !StNonNil) {
+ // The collection is nil. This path is infeasible.
+ return nullptr;
+ }
+
+ return StNonNil;
+}
+
+/// Assumes that the collection elements are non-nil.
+///
+/// This only applies if the collection is one of those known not to contain
+/// nil values.
+static ProgramStateRef checkElementNonNil(CheckerContext &C,
+ ProgramStateRef State,
+ const ObjCForCollectionStmt *FCS) {
+ if (!State)
+ return nullptr;
+
+ // See if the collection is one where we /know/ the elements are non-nil.
+ if (!isKnownNonNilCollectionType(FCS->getCollection()->getType()))
+ return State;
+
+ const LocationContext *LCtx = C.getLocationContext();
+ const Stmt *Element = FCS->getElement();
+
+ // FIXME: Copied from ExprEngineObjC.
+ Optional<Loc> ElementLoc;
+ if (const DeclStmt *DS = dyn_cast<DeclStmt>(Element)) {
+ const VarDecl *ElemDecl = cast<VarDecl>(DS->getSingleDecl());
+ assert(ElemDecl->getInit() == nullptr);
+ ElementLoc = State->getLValue(ElemDecl, LCtx);
+ } else {
+ ElementLoc = State->getSVal(Element, LCtx).getAs<Loc>();
+ }
+
+ if (!ElementLoc)
+ return State;
+
+ // Go ahead and assume the value is non-nil.
+ SVal Val = State->getSVal(*ElementLoc);
+ return State->assume(Val.castAs<DefinedOrUnknownSVal>(), true);
+}
+
+/// Returns NULL state if the collection is known to contain elements
+/// (or is known not to contain elements if the Assumption parameter is false.)
+static ProgramStateRef
+assumeCollectionNonEmpty(CheckerContext &C, ProgramStateRef State,
+ SymbolRef CollectionS, bool Assumption) {
+ if (!State || !CollectionS)
+ return State;
+
+ const SymbolRef *CountS = State->get<ContainerCountMap>(CollectionS);
+ if (!CountS) {
+ const bool *KnownNonEmpty = State->get<ContainerNonEmptyMap>(CollectionS);
+ if (!KnownNonEmpty)
+ return State->set<ContainerNonEmptyMap>(CollectionS, Assumption);
+ return (Assumption == *KnownNonEmpty) ? State : nullptr;
+ }
+
+ SValBuilder &SvalBuilder = C.getSValBuilder();
+ SVal CountGreaterThanZeroVal =
+ SvalBuilder.evalBinOp(State, BO_GT,
+ nonloc::SymbolVal(*CountS),
+ SvalBuilder.makeIntVal(0, (*CountS)->getType()),
+ SvalBuilder.getConditionType());
+ Optional<DefinedSVal> CountGreaterThanZero =
+ CountGreaterThanZeroVal.getAs<DefinedSVal>();
+ if (!CountGreaterThanZero) {
+ // The SValBuilder cannot construct a valid SVal for this condition.
+ // This means we cannot properly reason about it.
+ return State;
+ }
+
+ return State->assume(*CountGreaterThanZero, Assumption);
+}
+
+static ProgramStateRef
+assumeCollectionNonEmpty(CheckerContext &C, ProgramStateRef State,
+ const ObjCForCollectionStmt *FCS,
+ bool Assumption) {
+ if (!State)
+ return nullptr;
+
+ SymbolRef CollectionS =
+ State->getSVal(FCS->getCollection(), C.getLocationContext()).getAsSymbol();
+ return assumeCollectionNonEmpty(C, State, CollectionS, Assumption);
+}
+
+
+/// If the fist block edge is a back edge, we are reentering the loop.
+static bool alreadyExecutedAtLeastOneLoopIteration(const ExplodedNode *N,
+ const ObjCForCollectionStmt *FCS) {
+ if (!N)
+ return false;
+
+ ProgramPoint P = N->getLocation();
+ if (Optional<BlockEdge> BE = P.getAs<BlockEdge>()) {
+ return BE->getSrc()->getLoopTarget() == FCS;
+ }
+
+ // Keep looking for a block edge.
+ for (ExplodedNode::const_pred_iterator I = N->pred_begin(),
+ E = N->pred_end(); I != E; ++I) {
+ if (alreadyExecutedAtLeastOneLoopIteration(*I, FCS))
+ return true;
+ }
+
+ return false;
+}
+
+void ObjCLoopChecker::checkPostStmt(const ObjCForCollectionStmt *FCS,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+
+ // Check if this is the branch for the end of the loop.
+ SVal CollectionSentinel = C.getSVal(FCS);
+ if (CollectionSentinel.isZeroConstant()) {
+ if (!alreadyExecutedAtLeastOneLoopIteration(C.getPredecessor(), FCS))
+ State = assumeCollectionNonEmpty(C, State, FCS, /*Assumption*/false);
+
+ // Otherwise, this is a branch that goes through the loop body.
+ } else {
+ State = checkCollectionNonNil(C, State, FCS);
+ State = checkElementNonNil(C, State, FCS);
+ State = assumeCollectionNonEmpty(C, State, FCS, /*Assumption*/true);
+ }
+
+ if (!State)
+ C.generateSink(C.getState(), C.getPredecessor());
+ else if (State != C.getState())
+ C.addTransition(State);
+}
+
+bool ObjCLoopChecker::isCollectionCountMethod(const ObjCMethodCall &M,
+ CheckerContext &C) const {
+ Selector S = M.getSelector();
+ // Initialize the identifiers on first use.
+ if (!CountSelectorII)
+ CountSelectorII = &C.getASTContext().Idents.get("count");
+
+ // If the method returns collection count, record the value.
+ return S.isUnarySelector() &&
+ (S.getIdentifierInfoForSlot(0) == CountSelectorII);
+}
+
+void ObjCLoopChecker::checkPostObjCMessage(const ObjCMethodCall &M,
+ CheckerContext &C) const {
+ if (!M.isInstanceMessage())
+ return;
+
+ const ObjCInterfaceDecl *ClassID = M.getReceiverInterface();
+ if (!ClassID)
+ return;
+
+ FoundationClass Class = findKnownClass(ClassID);
+ if (Class != FC_NSDictionary &&
+ Class != FC_NSArray &&
+ Class != FC_NSSet &&
+ Class != FC_NSOrderedSet)
+ return;
+
+ SymbolRef ContainerS = M.getReceiverSVal().getAsSymbol();
+ if (!ContainerS)
+ return;
+
+ // If we are processing a call to "count", get the symbolic value returned by
+ // a call to "count" and add it to the map.
+ if (!isCollectionCountMethod(M, C))
+ return;
+
+ const Expr *MsgExpr = M.getOriginExpr();
+ SymbolRef CountS = C.getSVal(MsgExpr).getAsSymbol();
+ if (CountS) {
+ ProgramStateRef State = C.getState();
+
+ C.getSymbolManager().addSymbolDependency(ContainerS, CountS);
+ State = State->set<ContainerCountMap>(ContainerS, CountS);
+
+ if (const bool *NonEmpty = State->get<ContainerNonEmptyMap>(ContainerS)) {
+ State = State->remove<ContainerNonEmptyMap>(ContainerS);
+ State = assumeCollectionNonEmpty(C, State, ContainerS, *NonEmpty);
+ }
+
+ C.addTransition(State);
+ }
+ return;
+}
+
+static SymbolRef getMethodReceiverIfKnownImmutable(const CallEvent *Call) {
+ const ObjCMethodCall *Message = dyn_cast_or_null<ObjCMethodCall>(Call);
+ if (!Message)
+ return nullptr;
+
+ const ObjCMethodDecl *MD = Message->getDecl();
+ if (!MD)
+ return nullptr;
+
+ const ObjCInterfaceDecl *StaticClass;
+ if (isa<ObjCProtocolDecl>(MD->getDeclContext())) {
+ // We can't find out where the method was declared without doing more work.
+ // Instead, see if the receiver is statically typed as a known immutable
+ // collection.
+ StaticClass = Message->getOriginExpr()->getReceiverInterface();
+ } else {
+ StaticClass = MD->getClassInterface();
+ }
+
+ if (!StaticClass)
+ return nullptr;
+
+ switch (findKnownClass(StaticClass, /*IncludeSuper=*/false)) {
+ case FC_None:
+ return nullptr;
+ case FC_NSArray:
+ case FC_NSDictionary:
+ case FC_NSEnumerator:
+ case FC_NSNull:
+ case FC_NSOrderedSet:
+ case FC_NSSet:
+ case FC_NSString:
+ break;
+ }
+
+ return Message->getReceiverSVal().getAsSymbol();
+}
+
+ProgramStateRef
+ObjCLoopChecker::checkPointerEscape(ProgramStateRef State,
+ const InvalidatedSymbols &Escaped,
+ const CallEvent *Call,
+ PointerEscapeKind Kind) const {
+ SymbolRef ImmutableReceiver = getMethodReceiverIfKnownImmutable(Call);
+
+ // Remove the invalidated symbols form the collection count map.
+ for (InvalidatedSymbols::const_iterator I = Escaped.begin(),
+ E = Escaped.end();
+ I != E; ++I) {
+ SymbolRef Sym = *I;
+
+ // Don't invalidate this symbol's count if we know the method being called
+ // is declared on an immutable class. This isn't completely correct if the
+ // receiver is also passed as an argument, but in most uses of NSArray,
+ // NSDictionary, etc. this isn't likely to happen in a dangerous way.
+ if (Sym == ImmutableReceiver)
+ continue;
+
+ // The symbol escaped. Pessimistically, assume that the count could have
+ // changed.
+ State = State->remove<ContainerCountMap>(Sym);
+ State = State->remove<ContainerNonEmptyMap>(Sym);
+ }
+ return State;
+}
+
+void ObjCLoopChecker::checkDeadSymbols(SymbolReaper &SymReaper,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+
+ // Remove the dead symbols from the collection count map.
+ ContainerCountMapTy Tracked = State->get<ContainerCountMap>();
+ for (ContainerCountMapTy::iterator I = Tracked.begin(),
+ E = Tracked.end(); I != E; ++I) {
+ SymbolRef Sym = I->first;
+ if (SymReaper.isDead(Sym)) {
+ State = State->remove<ContainerCountMap>(Sym);
+ State = State->remove<ContainerNonEmptyMap>(Sym);
+ }
+ }
+
+ C.addTransition(State);
+}
+
+namespace {
+/// \class ObjCNonNilReturnValueChecker
+/// \brief The checker restricts the return values of APIs known to
+/// never (or almost never) return 'nil'.
+class ObjCNonNilReturnValueChecker
+ : public Checker<check::PostObjCMessage,
+ check::PostStmt<ObjCArrayLiteral>,
+ check::PostStmt<ObjCDictionaryLiteral>,
+ check::PostStmt<ObjCBoxedExpr> > {
+ mutable bool Initialized;
+ mutable Selector ObjectAtIndex;
+ mutable Selector ObjectAtIndexedSubscript;
+ mutable Selector NullSelector;
+
+public:
+ ObjCNonNilReturnValueChecker() : Initialized(false) {}
+
+ ProgramStateRef assumeExprIsNonNull(const Expr *NonNullExpr,
+ ProgramStateRef State,
+ CheckerContext &C) const;
+ void assumeExprIsNonNull(const Expr *E, CheckerContext &C) const {
+ C.addTransition(assumeExprIsNonNull(E, C.getState(), C));
+ }
+
+ void checkPostStmt(const ObjCArrayLiteral *E, CheckerContext &C) const {
+ assumeExprIsNonNull(E, C);
+ }
+ void checkPostStmt(const ObjCDictionaryLiteral *E, CheckerContext &C) const {
+ assumeExprIsNonNull(E, C);
+ }
+ void checkPostStmt(const ObjCBoxedExpr *E, CheckerContext &C) const {
+ assumeExprIsNonNull(E, C);
+ }
+
+ void checkPostObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const;
+};
+}
+
+ProgramStateRef
+ObjCNonNilReturnValueChecker::assumeExprIsNonNull(const Expr *NonNullExpr,
+ ProgramStateRef State,
+ CheckerContext &C) const {
+ SVal Val = State->getSVal(NonNullExpr, C.getLocationContext());
+ if (Optional<DefinedOrUnknownSVal> DV = Val.getAs<DefinedOrUnknownSVal>())
+ return State->assume(*DV, true);
+ return State;
+}
+
+void ObjCNonNilReturnValueChecker::checkPostObjCMessage(const ObjCMethodCall &M,
+ CheckerContext &C)
+ const {
+ ProgramStateRef State = C.getState();
+
+ if (!Initialized) {
+ ASTContext &Ctx = C.getASTContext();
+ ObjectAtIndex = GetUnarySelector("objectAtIndex", Ctx);
+ ObjectAtIndexedSubscript = GetUnarySelector("objectAtIndexedSubscript", Ctx);
+ NullSelector = GetNullarySelector("null", Ctx);
+ }
+
+ // Check the receiver type.
+ if (const ObjCInterfaceDecl *Interface = M.getReceiverInterface()) {
+
+ // Assume that object returned from '[self init]' or '[super init]' is not
+ // 'nil' if we are processing an inlined function/method.
+ //
+ // A defensive callee will (and should) check if the object returned by
+ // '[super init]' is 'nil' before doing it's own initialization. However,
+ // since 'nil' is rarely returned in practice, we should not warn when the
+ // caller to the defensive constructor uses the object in contexts where
+ // 'nil' is not accepted.
+ if (!C.inTopFrame() && M.getDecl() &&
+ M.getDecl()->getMethodFamily() == OMF_init &&
+ M.isReceiverSelfOrSuper()) {
+ State = assumeExprIsNonNull(M.getOriginExpr(), State, C);
+ }
+
+ FoundationClass Cl = findKnownClass(Interface);
+
+ // Objects returned from
+ // [NSArray|NSOrderedSet]::[ObjectAtIndex|ObjectAtIndexedSubscript]
+ // are never 'nil'.
+ if (Cl == FC_NSArray || Cl == FC_NSOrderedSet) {
+ Selector Sel = M.getSelector();
+ if (Sel == ObjectAtIndex || Sel == ObjectAtIndexedSubscript) {
+ // Go ahead and assume the value is non-nil.
+ State = assumeExprIsNonNull(M.getOriginExpr(), State, C);
+ }
+ }
+
+ // Objects returned from [NSNull null] are not nil.
+ if (Cl == FC_NSNull) {
+ if (M.getSelector() == NullSelector) {
+ // Go ahead and assume the value is non-nil.
+ State = assumeExprIsNonNull(M.getOriginExpr(), State, C);
+ }
+ }
+ }
+ C.addTransition(State);
+}
+
+//===----------------------------------------------------------------------===//
+// Check registration.
+//===----------------------------------------------------------------------===//
+
+void ento::registerNilArgChecker(CheckerManager &mgr) {
+ mgr.registerChecker<NilArgChecker>();
+}
+
+void ento::registerCFNumberCreateChecker(CheckerManager &mgr) {
+ mgr.registerChecker<CFNumberCreateChecker>();
+}
+
+void ento::registerCFRetainReleaseChecker(CheckerManager &mgr) {
+ mgr.registerChecker<CFRetainReleaseChecker>();
+}
+
+void ento::registerClassReleaseChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ClassReleaseChecker>();
+}
+
+void ento::registerVariadicMethodTypeChecker(CheckerManager &mgr) {
+ mgr.registerChecker<VariadicMethodTypeChecker>();
+}
+
+void ento::registerObjCLoopChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ObjCLoopChecker>();
+}
+
+void
+ento::registerObjCNonNilReturnValueChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ObjCNonNilReturnValueChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
new file mode 100644
index 0000000..f26f731
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
@@ -0,0 +1,157 @@
+//== BoolAssignmentChecker.cpp - Boolean assignment checker -----*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines BoolAssignmentChecker, a builtin check in ExprEngine that
+// performs checks for assignment of non-Boolean values to Boolean variables.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+ class BoolAssignmentChecker : public Checker< check::Bind > {
+ mutable std::unique_ptr<BuiltinBug> BT;
+ void emitReport(ProgramStateRef state, CheckerContext &C) const;
+ public:
+ void checkBind(SVal loc, SVal val, const Stmt *S, CheckerContext &C) const;
+ };
+} // end anonymous namespace
+
+void BoolAssignmentChecker::emitReport(ProgramStateRef state,
+ CheckerContext &C) const {
+ if (ExplodedNode *N = C.generateNonFatalErrorNode(state)) {
+ if (!BT)
+ BT.reset(new BuiltinBug(this, "Assignment of a non-Boolean value"));
+ C.emitReport(llvm::make_unique<BugReport>(*BT, BT->getDescription(), N));
+ }
+}
+
+static bool isBooleanType(QualType Ty) {
+ if (Ty->isBooleanType()) // C++ or C99
+ return true;
+
+ if (const TypedefType *TT = Ty->getAs<TypedefType>())
+ return TT->getDecl()->getName() == "BOOL" || // Objective-C
+ TT->getDecl()->getName() == "_Bool" || // stdbool.h < C99
+ TT->getDecl()->getName() == "Boolean"; // MacTypes.h
+
+ return false;
+}
+
+void BoolAssignmentChecker::checkBind(SVal loc, SVal val, const Stmt *S,
+ CheckerContext &C) const {
+
+ // We are only interested in stores into Booleans.
+ const TypedValueRegion *TR =
+ dyn_cast_or_null<TypedValueRegion>(loc.getAsRegion());
+
+ if (!TR)
+ return;
+
+ QualType valTy = TR->getValueType();
+
+ if (!isBooleanType(valTy))
+ return;
+
+ // Get the value of the right-hand side. We only care about values
+ // that are defined (UnknownVals and UndefinedVals are handled by other
+ // checkers).
+ Optional<DefinedSVal> DV = val.getAs<DefinedSVal>();
+ if (!DV)
+ return;
+
+ // Check if the assigned value meets our criteria for correctness. It must
+ // be a value that is either 0 or 1. One way to check this is to see if
+ // the value is possibly < 0 (for a negative value) or greater than 1.
+ ProgramStateRef state = C.getState();
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ ConstraintManager &CM = C.getConstraintManager();
+
+ // First, ensure that the value is >= 0.
+ DefinedSVal zeroVal = svalBuilder.makeIntVal(0, valTy);
+ SVal greaterThanOrEqualToZeroVal =
+ svalBuilder.evalBinOp(state, BO_GE, *DV, zeroVal,
+ svalBuilder.getConditionType());
+
+ Optional<DefinedSVal> greaterThanEqualToZero =
+ greaterThanOrEqualToZeroVal.getAs<DefinedSVal>();
+
+ if (!greaterThanEqualToZero) {
+ // The SValBuilder cannot construct a valid SVal for this condition.
+ // This means we cannot properly reason about it.
+ return;
+ }
+
+ ProgramStateRef stateLT, stateGE;
+ std::tie(stateGE, stateLT) = CM.assumeDual(state, *greaterThanEqualToZero);
+
+ // Is it possible for the value to be less than zero?
+ if (stateLT) {
+ // It is possible for the value to be less than zero. We only
+ // want to emit a warning, however, if that value is fully constrained.
+ // If it it possible for the value to be >= 0, then essentially the
+ // value is underconstrained and there is nothing left to be done.
+ if (!stateGE)
+ emitReport(stateLT, C);
+
+ // In either case, we are done.
+ return;
+ }
+
+ // If we reach here, it must be the case that the value is constrained
+ // to only be >= 0.
+ assert(stateGE == state);
+
+ // At this point we know that the value is >= 0.
+ // Now check to ensure that the value is <= 1.
+ DefinedSVal OneVal = svalBuilder.makeIntVal(1, valTy);
+ SVal lessThanEqToOneVal =
+ svalBuilder.evalBinOp(state, BO_LE, *DV, OneVal,
+ svalBuilder.getConditionType());
+
+ Optional<DefinedSVal> lessThanEqToOne =
+ lessThanEqToOneVal.getAs<DefinedSVal>();
+
+ if (!lessThanEqToOne) {
+ // The SValBuilder cannot construct a valid SVal for this condition.
+ // This means we cannot properly reason about it.
+ return;
+ }
+
+ ProgramStateRef stateGT, stateLE;
+ std::tie(stateLE, stateGT) = CM.assumeDual(state, *lessThanEqToOne);
+
+ // Is it possible for the value to be greater than one?
+ if (stateGT) {
+ // It is possible for the value to be greater than one. We only
+ // want to emit a warning, however, if that value is fully constrained.
+ // If it is possible for the value to be <= 1, then essentially the
+ // value is underconstrained and there is nothing left to be done.
+ if (!stateLE)
+ emitReport(stateGT, C);
+
+ // In either case, we are done.
+ return;
+ }
+
+ // If we reach here, it must be the case that the value is constrained
+ // to only be <= 1.
+ assert(stateLE == state);
+}
+
+void ento::registerBoolAssignmentChecker(CheckerManager &mgr) {
+ mgr.registerChecker<BoolAssignmentChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
new file mode 100644
index 0000000..dab2f61
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
@@ -0,0 +1,102 @@
+//=== BuiltinFunctionChecker.cpp --------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker evaluates clang builtin functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class BuiltinFunctionChecker : public Checker<eval::Call> {
+public:
+ bool evalCall(const CallExpr *CE, CheckerContext &C) const;
+};
+
+}
+
+bool BuiltinFunctionChecker::evalCall(const CallExpr *CE,
+ CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ const LocationContext *LCtx = C.getLocationContext();
+ if (!FD)
+ return false;
+
+ switch (FD->getBuiltinID()) {
+ default:
+ return false;
+
+ case Builtin::BI__builtin_unpredictable:
+ case Builtin::BI__builtin_expect:
+ case Builtin::BI__builtin_assume_aligned:
+ case Builtin::BI__builtin_addressof: {
+ // For __builtin_unpredictable, __builtin_expect, and
+ // __builtin_assume_aligned, just return the value of the subexpression.
+ // __builtin_addressof is going from a reference to a pointer, but those
+ // are represented the same way in the analyzer.
+ assert (CE->arg_begin() != CE->arg_end());
+ SVal X = state->getSVal(*(CE->arg_begin()), LCtx);
+ C.addTransition(state->BindExpr(CE, LCtx, X));
+ return true;
+ }
+
+ case Builtin::BI__builtin_alloca: {
+ // FIXME: Refactor into StoreManager itself?
+ MemRegionManager& RM = C.getStoreManager().getRegionManager();
+ const AllocaRegion* R =
+ RM.getAllocaRegion(CE, C.blockCount(), C.getLocationContext());
+
+ // Set the extent of the region in bytes. This enables us to use the
+ // SVal of the argument directly. If we save the extent in bits, we
+ // cannot represent values like symbol*8.
+ DefinedOrUnknownSVal Size =
+ state->getSVal(*(CE->arg_begin()), LCtx).castAs<DefinedOrUnknownSVal>();
+
+ SValBuilder& svalBuilder = C.getSValBuilder();
+ DefinedOrUnknownSVal Extent = R->getExtent(svalBuilder);
+ DefinedOrUnknownSVal extentMatchesSizeArg =
+ svalBuilder.evalEQ(state, Extent, Size);
+ state = state->assume(extentMatchesSizeArg, true);
+ assert(state && "The region should not have any previous constraints");
+
+ C.addTransition(state->BindExpr(CE, LCtx, loc::MemRegionVal(R)));
+ return true;
+ }
+
+ case Builtin::BI__builtin_object_size: {
+ // This must be resolvable at compile time, so we defer to the constant
+ // evaluator for a value.
+ SVal V = UnknownVal();
+ llvm::APSInt Result;
+ if (CE->EvaluateAsInt(Result, C.getASTContext(), Expr::SE_NoSideEffects)) {
+ // Make sure the result has the correct type.
+ SValBuilder &SVB = C.getSValBuilder();
+ BasicValueFactory &BVF = SVB.getBasicValueFactory();
+ BVF.getAPSIntType(CE->getType()).apply(Result);
+ V = SVB.makeIntVal(Result);
+ }
+
+ C.addTransition(state->BindExpr(CE, LCtx, V));
+ return true;
+ }
+ }
+}
+
+void ento::registerBuiltinFunctionChecker(CheckerManager &mgr) {
+ mgr.registerChecker<BuiltinFunctionChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
new file mode 100644
index 0000000..5d78d9b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -0,0 +1,2165 @@
+//= CStringChecker.cpp - Checks calls to C string functions --------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines CStringChecker, which is an assortment of checks on calls
+// to functions in <string.h>.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "InterCheckerAPI.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class CStringChecker : public Checker< eval::Call,
+ check::PreStmt<DeclStmt>,
+ check::LiveSymbols,
+ check::DeadSymbols,
+ check::RegionChanges
+ > {
+ mutable std::unique_ptr<BugType> BT_Null, BT_Bounds, BT_Overlap,
+ BT_NotCString, BT_AdditionOverflow;
+
+ mutable const char *CurrentFunctionDescription;
+
+public:
+ /// The filter is used to filter out the diagnostics which are not enabled by
+ /// the user.
+ struct CStringChecksFilter {
+ DefaultBool CheckCStringNullArg;
+ DefaultBool CheckCStringOutOfBounds;
+ DefaultBool CheckCStringBufferOverlap;
+ DefaultBool CheckCStringNotNullTerm;
+
+ CheckName CheckNameCStringNullArg;
+ CheckName CheckNameCStringOutOfBounds;
+ CheckName CheckNameCStringBufferOverlap;
+ CheckName CheckNameCStringNotNullTerm;
+ };
+
+ CStringChecksFilter Filter;
+
+ static void *getTag() { static int tag; return &tag; }
+
+ bool evalCall(const CallExpr *CE, CheckerContext &C) const;
+ void checkPreStmt(const DeclStmt *DS, CheckerContext &C) const;
+ void checkLiveSymbols(ProgramStateRef state, SymbolReaper &SR) const;
+ void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
+ bool wantsRegionChangeUpdate(ProgramStateRef state) const;
+
+ ProgramStateRef
+ checkRegionChanges(ProgramStateRef state,
+ const InvalidatedSymbols *,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const CallEvent *Call) const;
+
+ typedef void (CStringChecker::*FnCheck)(CheckerContext &,
+ const CallExpr *) const;
+
+ void evalMemcpy(CheckerContext &C, const CallExpr *CE) const;
+ void evalMempcpy(CheckerContext &C, const CallExpr *CE) const;
+ void evalMemmove(CheckerContext &C, const CallExpr *CE) const;
+ void evalBcopy(CheckerContext &C, const CallExpr *CE) const;
+ void evalCopyCommon(CheckerContext &C, const CallExpr *CE,
+ ProgramStateRef state,
+ const Expr *Size,
+ const Expr *Source,
+ const Expr *Dest,
+ bool Restricted = false,
+ bool IsMempcpy = false) const;
+
+ void evalMemcmp(CheckerContext &C, const CallExpr *CE) const;
+
+ void evalstrLength(CheckerContext &C, const CallExpr *CE) const;
+ void evalstrnLength(CheckerContext &C, const CallExpr *CE) const;
+ void evalstrLengthCommon(CheckerContext &C,
+ const CallExpr *CE,
+ bool IsStrnlen = false) const;
+
+ void evalStrcpy(CheckerContext &C, const CallExpr *CE) const;
+ void evalStrncpy(CheckerContext &C, const CallExpr *CE) const;
+ void evalStpcpy(CheckerContext &C, const CallExpr *CE) const;
+ void evalStrcpyCommon(CheckerContext &C,
+ const CallExpr *CE,
+ bool returnEnd,
+ bool isBounded,
+ bool isAppending) const;
+
+ void evalStrcat(CheckerContext &C, const CallExpr *CE) const;
+ void evalStrncat(CheckerContext &C, const CallExpr *CE) const;
+
+ void evalStrcmp(CheckerContext &C, const CallExpr *CE) const;
+ void evalStrncmp(CheckerContext &C, const CallExpr *CE) const;
+ void evalStrcasecmp(CheckerContext &C, const CallExpr *CE) const;
+ void evalStrncasecmp(CheckerContext &C, const CallExpr *CE) const;
+ void evalStrcmpCommon(CheckerContext &C,
+ const CallExpr *CE,
+ bool isBounded = false,
+ bool ignoreCase = false) const;
+
+ void evalStrsep(CheckerContext &C, const CallExpr *CE) const;
+
+ // Utility methods
+ std::pair<ProgramStateRef , ProgramStateRef >
+ static assumeZero(CheckerContext &C,
+ ProgramStateRef state, SVal V, QualType Ty);
+
+ static ProgramStateRef setCStringLength(ProgramStateRef state,
+ const MemRegion *MR,
+ SVal strLength);
+ static SVal getCStringLengthForRegion(CheckerContext &C,
+ ProgramStateRef &state,
+ const Expr *Ex,
+ const MemRegion *MR,
+ bool hypothetical);
+ SVal getCStringLength(CheckerContext &C,
+ ProgramStateRef &state,
+ const Expr *Ex,
+ SVal Buf,
+ bool hypothetical = false) const;
+
+ const StringLiteral *getCStringLiteral(CheckerContext &C,
+ ProgramStateRef &state,
+ const Expr *expr,
+ SVal val) const;
+
+ static ProgramStateRef InvalidateBuffer(CheckerContext &C,
+ ProgramStateRef state,
+ const Expr *Ex, SVal V,
+ bool IsSourceBuffer,
+ const Expr *Size);
+
+ static bool SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
+ const MemRegion *MR);
+
+ // Re-usable checks
+ ProgramStateRef checkNonNull(CheckerContext &C,
+ ProgramStateRef state,
+ const Expr *S,
+ SVal l) const;
+ ProgramStateRef CheckLocation(CheckerContext &C,
+ ProgramStateRef state,
+ const Expr *S,
+ SVal l,
+ const char *message = nullptr) const;
+ ProgramStateRef CheckBufferAccess(CheckerContext &C,
+ ProgramStateRef state,
+ const Expr *Size,
+ const Expr *FirstBuf,
+ const Expr *SecondBuf,
+ const char *firstMessage = nullptr,
+ const char *secondMessage = nullptr,
+ bool WarnAboutSize = false) const;
+
+ ProgramStateRef CheckBufferAccess(CheckerContext &C,
+ ProgramStateRef state,
+ const Expr *Size,
+ const Expr *Buf,
+ const char *message = nullptr,
+ bool WarnAboutSize = false) const {
+ // This is a convenience override.
+ return CheckBufferAccess(C, state, Size, Buf, nullptr, message, nullptr,
+ WarnAboutSize);
+ }
+ ProgramStateRef CheckOverlap(CheckerContext &C,
+ ProgramStateRef state,
+ const Expr *Size,
+ const Expr *First,
+ const Expr *Second) const;
+ void emitOverlapBug(CheckerContext &C,
+ ProgramStateRef state,
+ const Stmt *First,
+ const Stmt *Second) const;
+
+ ProgramStateRef checkAdditionOverflow(CheckerContext &C,
+ ProgramStateRef state,
+ NonLoc left,
+ NonLoc right) const;
+
+ // Return true if the destination buffer of the copy function may be in bound.
+ // Expects SVal of Size to be positive and unsigned.
+ // Expects SVal of FirstBuf to be a FieldRegion.
+ static bool IsFirstBufInBound(CheckerContext &C,
+ ProgramStateRef state,
+ const Expr *FirstBuf,
+ const Expr *Size);
+};
+
+} //end anonymous namespace
+
+REGISTER_MAP_WITH_PROGRAMSTATE(CStringLength, const MemRegion *, SVal)
+
+//===----------------------------------------------------------------------===//
+// Individual checks and utility methods.
+//===----------------------------------------------------------------------===//
+
+std::pair<ProgramStateRef , ProgramStateRef >
+CStringChecker::assumeZero(CheckerContext &C, ProgramStateRef state, SVal V,
+ QualType Ty) {
+ Optional<DefinedSVal> val = V.getAs<DefinedSVal>();
+ if (!val)
+ return std::pair<ProgramStateRef , ProgramStateRef >(state, state);
+
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ DefinedOrUnknownSVal zero = svalBuilder.makeZeroVal(Ty);
+ return state->assume(svalBuilder.evalEQ(state, *val, zero));
+}
+
+ProgramStateRef CStringChecker::checkNonNull(CheckerContext &C,
+ ProgramStateRef state,
+ const Expr *S, SVal l) const {
+ // If a previous check has failed, propagate the failure.
+ if (!state)
+ return nullptr;
+
+ ProgramStateRef stateNull, stateNonNull;
+ std::tie(stateNull, stateNonNull) = assumeZero(C, state, l, S->getType());
+
+ if (stateNull && !stateNonNull) {
+ if (!Filter.CheckCStringNullArg)
+ return nullptr;
+
+ ExplodedNode *N = C.generateErrorNode(stateNull);
+ if (!N)
+ return nullptr;
+
+ if (!BT_Null)
+ BT_Null.reset(new BuiltinBug(
+ Filter.CheckNameCStringNullArg, categories::UnixAPI,
+ "Null pointer argument in call to byte string function"));
+
+ SmallString<80> buf;
+ llvm::raw_svector_ostream os(buf);
+ assert(CurrentFunctionDescription);
+ os << "Null pointer argument in call to " << CurrentFunctionDescription;
+
+ // Generate a report for this bug.
+ BuiltinBug *BT = static_cast<BuiltinBug*>(BT_Null.get());
+ auto report = llvm::make_unique<BugReport>(*BT, os.str(), N);
+
+ report->addRange(S->getSourceRange());
+ bugreporter::trackNullOrUndefValue(N, S, *report);
+ C.emitReport(std::move(report));
+ return nullptr;
+ }
+
+ // From here on, assume that the value is non-null.
+ assert(stateNonNull);
+ return stateNonNull;
+}
+
+// FIXME: This was originally copied from ArrayBoundChecker.cpp. Refactor?
+ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
+ ProgramStateRef state,
+ const Expr *S, SVal l,
+ const char *warningMsg) const {
+ // If a previous check has failed, propagate the failure.
+ if (!state)
+ return nullptr;
+
+ // Check for out of bound array element access.
+ const MemRegion *R = l.getAsRegion();
+ if (!R)
+ return state;
+
+ const ElementRegion *ER = dyn_cast<ElementRegion>(R);
+ if (!ER)
+ return state;
+
+ assert(ER->getValueType() == C.getASTContext().CharTy &&
+ "CheckLocation should only be called with char* ElementRegions");
+
+ // Get the size of the array.
+ const SubRegion *superReg = cast<SubRegion>(ER->getSuperRegion());
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ SVal Extent =
+ svalBuilder.convertToArrayIndex(superReg->getExtent(svalBuilder));
+ DefinedOrUnknownSVal Size = Extent.castAs<DefinedOrUnknownSVal>();
+
+ // Get the index of the accessed element.
+ DefinedOrUnknownSVal Idx = ER->getIndex().castAs<DefinedOrUnknownSVal>();
+
+ ProgramStateRef StInBound = state->assumeInBound(Idx, Size, true);
+ ProgramStateRef StOutBound = state->assumeInBound(Idx, Size, false);
+ if (StOutBound && !StInBound) {
+ ExplodedNode *N = C.generateErrorNode(StOutBound);
+ if (!N)
+ return nullptr;
+
+ if (!BT_Bounds) {
+ BT_Bounds.reset(new BuiltinBug(
+ Filter.CheckNameCStringOutOfBounds, "Out-of-bound array access",
+ "Byte string function accesses out-of-bound array element"));
+ }
+ BuiltinBug *BT = static_cast<BuiltinBug*>(BT_Bounds.get());
+
+ // Generate a report for this bug.
+ std::unique_ptr<BugReport> report;
+ if (warningMsg) {
+ report = llvm::make_unique<BugReport>(*BT, warningMsg, N);
+ } else {
+ assert(CurrentFunctionDescription);
+ assert(CurrentFunctionDescription[0] != '\0');
+
+ SmallString<80> buf;
+ llvm::raw_svector_ostream os(buf);
+ os << toUppercase(CurrentFunctionDescription[0])
+ << &CurrentFunctionDescription[1]
+ << " accesses out-of-bound array element";
+ report = llvm::make_unique<BugReport>(*BT, os.str(), N);
+ }
+
+ // FIXME: It would be nice to eventually make this diagnostic more clear,
+ // e.g., by referencing the original declaration or by saying *why* this
+ // reference is outside the range.
+
+ report->addRange(S->getSourceRange());
+ C.emitReport(std::move(report));
+ return nullptr;
+ }
+
+ // Array bound check succeeded. From this point forward the array bound
+ // should always succeed.
+ return StInBound;
+}
+
+ProgramStateRef CStringChecker::CheckBufferAccess(CheckerContext &C,
+ ProgramStateRef state,
+ const Expr *Size,
+ const Expr *FirstBuf,
+ const Expr *SecondBuf,
+ const char *firstMessage,
+ const char *secondMessage,
+ bool WarnAboutSize) const {
+ // If a previous check has failed, propagate the failure.
+ if (!state)
+ return nullptr;
+
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ ASTContext &Ctx = svalBuilder.getContext();
+ const LocationContext *LCtx = C.getLocationContext();
+
+ QualType sizeTy = Size->getType();
+ QualType PtrTy = Ctx.getPointerType(Ctx.CharTy);
+
+ // Check that the first buffer is non-null.
+ SVal BufVal = state->getSVal(FirstBuf, LCtx);
+ state = checkNonNull(C, state, FirstBuf, BufVal);
+ if (!state)
+ return nullptr;
+
+ // If out-of-bounds checking is turned off, skip the rest.
+ if (!Filter.CheckCStringOutOfBounds)
+ return state;
+
+ // Get the access length and make sure it is known.
+ // FIXME: This assumes the caller has already checked that the access length
+ // is positive. And that it's unsigned.
+ SVal LengthVal = state->getSVal(Size, LCtx);
+ Optional<NonLoc> Length = LengthVal.getAs<NonLoc>();
+ if (!Length)
+ return state;
+
+ // Compute the offset of the last element to be accessed: size-1.
+ NonLoc One = svalBuilder.makeIntVal(1, sizeTy).castAs<NonLoc>();
+ NonLoc LastOffset = svalBuilder
+ .evalBinOpNN(state, BO_Sub, *Length, One, sizeTy).castAs<NonLoc>();
+
+ // Check that the first buffer is sufficiently long.
+ SVal BufStart = svalBuilder.evalCast(BufVal, PtrTy, FirstBuf->getType());
+ if (Optional<Loc> BufLoc = BufStart.getAs<Loc>()) {
+ const Expr *warningExpr = (WarnAboutSize ? Size : FirstBuf);
+
+ SVal BufEnd = svalBuilder.evalBinOpLN(state, BO_Add, *BufLoc,
+ LastOffset, PtrTy);
+ state = CheckLocation(C, state, warningExpr, BufEnd, firstMessage);
+
+ // If the buffer isn't large enough, abort.
+ if (!state)
+ return nullptr;
+ }
+
+ // If there's a second buffer, check it as well.
+ if (SecondBuf) {
+ BufVal = state->getSVal(SecondBuf, LCtx);
+ state = checkNonNull(C, state, SecondBuf, BufVal);
+ if (!state)
+ return nullptr;
+
+ BufStart = svalBuilder.evalCast(BufVal, PtrTy, SecondBuf->getType());
+ if (Optional<Loc> BufLoc = BufStart.getAs<Loc>()) {
+ const Expr *warningExpr = (WarnAboutSize ? Size : SecondBuf);
+
+ SVal BufEnd = svalBuilder.evalBinOpLN(state, BO_Add, *BufLoc,
+ LastOffset, PtrTy);
+ state = CheckLocation(C, state, warningExpr, BufEnd, secondMessage);
+ }
+ }
+
+ // Large enough or not, return this state!
+ return state;
+}
+
+ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
+ ProgramStateRef state,
+ const Expr *Size,
+ const Expr *First,
+ const Expr *Second) const {
+ if (!Filter.CheckCStringBufferOverlap)
+ return state;
+
+ // Do a simple check for overlap: if the two arguments are from the same
+ // buffer, see if the end of the first is greater than the start of the second
+ // or vice versa.
+
+ // If a previous check has failed, propagate the failure.
+ if (!state)
+ return nullptr;
+
+ ProgramStateRef stateTrue, stateFalse;
+
+ // Get the buffer values and make sure they're known locations.
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal firstVal = state->getSVal(First, LCtx);
+ SVal secondVal = state->getSVal(Second, LCtx);
+
+ Optional<Loc> firstLoc = firstVal.getAs<Loc>();
+ if (!firstLoc)
+ return state;
+
+ Optional<Loc> secondLoc = secondVal.getAs<Loc>();
+ if (!secondLoc)
+ return state;
+
+ // Are the two values the same?
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ std::tie(stateTrue, stateFalse) =
+ state->assume(svalBuilder.evalEQ(state, *firstLoc, *secondLoc));
+
+ if (stateTrue && !stateFalse) {
+ // If the values are known to be equal, that's automatically an overlap.
+ emitOverlapBug(C, stateTrue, First, Second);
+ return nullptr;
+ }
+
+ // assume the two expressions are not equal.
+ assert(stateFalse);
+ state = stateFalse;
+
+ // Which value comes first?
+ QualType cmpTy = svalBuilder.getConditionType();
+ SVal reverse = svalBuilder.evalBinOpLL(state, BO_GT,
+ *firstLoc, *secondLoc, cmpTy);
+ Optional<DefinedOrUnknownSVal> reverseTest =
+ reverse.getAs<DefinedOrUnknownSVal>();
+ if (!reverseTest)
+ return state;
+
+ std::tie(stateTrue, stateFalse) = state->assume(*reverseTest);
+ if (stateTrue) {
+ if (stateFalse) {
+ // If we don't know which one comes first, we can't perform this test.
+ return state;
+ } else {
+ // Switch the values so that firstVal is before secondVal.
+ std::swap(firstLoc, secondLoc);
+
+ // Switch the Exprs as well, so that they still correspond.
+ std::swap(First, Second);
+ }
+ }
+
+ // Get the length, and make sure it too is known.
+ SVal LengthVal = state->getSVal(Size, LCtx);
+ Optional<NonLoc> Length = LengthVal.getAs<NonLoc>();
+ if (!Length)
+ return state;
+
+ // Convert the first buffer's start address to char*.
+ // Bail out if the cast fails.
+ ASTContext &Ctx = svalBuilder.getContext();
+ QualType CharPtrTy = Ctx.getPointerType(Ctx.CharTy);
+ SVal FirstStart = svalBuilder.evalCast(*firstLoc, CharPtrTy,
+ First->getType());
+ Optional<Loc> FirstStartLoc = FirstStart.getAs<Loc>();
+ if (!FirstStartLoc)
+ return state;
+
+ // Compute the end of the first buffer. Bail out if THAT fails.
+ SVal FirstEnd = svalBuilder.evalBinOpLN(state, BO_Add,
+ *FirstStartLoc, *Length, CharPtrTy);
+ Optional<Loc> FirstEndLoc = FirstEnd.getAs<Loc>();
+ if (!FirstEndLoc)
+ return state;
+
+ // Is the end of the first buffer past the start of the second buffer?
+ SVal Overlap = svalBuilder.evalBinOpLL(state, BO_GT,
+ *FirstEndLoc, *secondLoc, cmpTy);
+ Optional<DefinedOrUnknownSVal> OverlapTest =
+ Overlap.getAs<DefinedOrUnknownSVal>();
+ if (!OverlapTest)
+ return state;
+
+ std::tie(stateTrue, stateFalse) = state->assume(*OverlapTest);
+
+ if (stateTrue && !stateFalse) {
+ // Overlap!
+ emitOverlapBug(C, stateTrue, First, Second);
+ return nullptr;
+ }
+
+ // assume the two expressions don't overlap.
+ assert(stateFalse);
+ return stateFalse;
+}
+
+void CStringChecker::emitOverlapBug(CheckerContext &C, ProgramStateRef state,
+ const Stmt *First, const Stmt *Second) const {
+ ExplodedNode *N = C.generateErrorNode(state);
+ if (!N)
+ return;
+
+ if (!BT_Overlap)
+ BT_Overlap.reset(new BugType(Filter.CheckNameCStringBufferOverlap,
+ categories::UnixAPI, "Improper arguments"));
+
+ // Generate a report for this bug.
+ auto report = llvm::make_unique<BugReport>(
+ *BT_Overlap, "Arguments must not be overlapping buffers", N);
+ report->addRange(First->getSourceRange());
+ report->addRange(Second->getSourceRange());
+
+ C.emitReport(std::move(report));
+}
+
+ProgramStateRef CStringChecker::checkAdditionOverflow(CheckerContext &C,
+ ProgramStateRef state,
+ NonLoc left,
+ NonLoc right) const {
+ // If out-of-bounds checking is turned off, skip the rest.
+ if (!Filter.CheckCStringOutOfBounds)
+ return state;
+
+ // If a previous check has failed, propagate the failure.
+ if (!state)
+ return nullptr;
+
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ BasicValueFactory &BVF = svalBuilder.getBasicValueFactory();
+
+ QualType sizeTy = svalBuilder.getContext().getSizeType();
+ const llvm::APSInt &maxValInt = BVF.getMaxValue(sizeTy);
+ NonLoc maxVal = svalBuilder.makeIntVal(maxValInt);
+
+ SVal maxMinusRight;
+ if (right.getAs<nonloc::ConcreteInt>()) {
+ maxMinusRight = svalBuilder.evalBinOpNN(state, BO_Sub, maxVal, right,
+ sizeTy);
+ } else {
+ // Try switching the operands. (The order of these two assignments is
+ // important!)
+ maxMinusRight = svalBuilder.evalBinOpNN(state, BO_Sub, maxVal, left,
+ sizeTy);
+ left = right;
+ }
+
+ if (Optional<NonLoc> maxMinusRightNL = maxMinusRight.getAs<NonLoc>()) {
+ QualType cmpTy = svalBuilder.getConditionType();
+ // If left > max - right, we have an overflow.
+ SVal willOverflow = svalBuilder.evalBinOpNN(state, BO_GT, left,
+ *maxMinusRightNL, cmpTy);
+
+ ProgramStateRef stateOverflow, stateOkay;
+ std::tie(stateOverflow, stateOkay) =
+ state->assume(willOverflow.castAs<DefinedOrUnknownSVal>());
+
+ if (stateOverflow && !stateOkay) {
+ // We have an overflow. Emit a bug report.
+ ExplodedNode *N = C.generateErrorNode(stateOverflow);
+ if (!N)
+ return nullptr;
+
+ if (!BT_AdditionOverflow)
+ BT_AdditionOverflow.reset(
+ new BuiltinBug(Filter.CheckNameCStringOutOfBounds, "API",
+ "Sum of expressions causes overflow"));
+
+ // This isn't a great error message, but this should never occur in real
+ // code anyway -- you'd have to create a buffer longer than a size_t can
+ // represent, which is sort of a contradiction.
+ const char *warning =
+ "This expression will create a string whose length is too big to "
+ "be represented as a size_t";
+
+ // Generate a report for this bug.
+ C.emitReport(
+ llvm::make_unique<BugReport>(*BT_AdditionOverflow, warning, N));
+
+ return nullptr;
+ }
+
+ // From now on, assume an overflow didn't occur.
+ assert(stateOkay);
+ state = stateOkay;
+ }
+
+ return state;
+}
+
+ProgramStateRef CStringChecker::setCStringLength(ProgramStateRef state,
+ const MemRegion *MR,
+ SVal strLength) {
+ assert(!strLength.isUndef() && "Attempt to set an undefined string length");
+
+ MR = MR->StripCasts();
+
+ switch (MR->getKind()) {
+ case MemRegion::StringRegionKind:
+ // FIXME: This can happen if we strcpy() into a string region. This is
+ // undefined [C99 6.4.5p6], but we should still warn about it.
+ return state;
+
+ case MemRegion::SymbolicRegionKind:
+ case MemRegion::AllocaRegionKind:
+ case MemRegion::VarRegionKind:
+ case MemRegion::FieldRegionKind:
+ case MemRegion::ObjCIvarRegionKind:
+ // These are the types we can currently track string lengths for.
+ break;
+
+ case MemRegion::ElementRegionKind:
+ // FIXME: Handle element regions by upper-bounding the parent region's
+ // string length.
+ return state;
+
+ default:
+ // Other regions (mostly non-data) can't have a reliable C string length.
+ // For now, just ignore the change.
+ // FIXME: These are rare but not impossible. We should output some kind of
+ // warning for things like strcpy((char[]){'a', 0}, "b");
+ return state;
+ }
+
+ if (strLength.isUnknown())
+ return state->remove<CStringLength>(MR);
+
+ return state->set<CStringLength>(MR, strLength);
+}
+
+SVal CStringChecker::getCStringLengthForRegion(CheckerContext &C,
+ ProgramStateRef &state,
+ const Expr *Ex,
+ const MemRegion *MR,
+ bool hypothetical) {
+ if (!hypothetical) {
+ // If there's a recorded length, go ahead and return it.
+ const SVal *Recorded = state->get<CStringLength>(MR);
+ if (Recorded)
+ return *Recorded;
+ }
+
+ // Otherwise, get a new symbol and update the state.
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ QualType sizeTy = svalBuilder.getContext().getSizeType();
+ SVal strLength = svalBuilder.getMetadataSymbolVal(CStringChecker::getTag(),
+ MR, Ex, sizeTy,
+ C.blockCount());
+
+ if (!hypothetical) {
+ if (Optional<NonLoc> strLn = strLength.getAs<NonLoc>()) {
+ // In case of unbounded calls strlen etc bound the range to SIZE_MAX/4
+ BasicValueFactory &BVF = svalBuilder.getBasicValueFactory();
+ const llvm::APSInt &maxValInt = BVF.getMaxValue(sizeTy);
+ llvm::APSInt fourInt = APSIntType(maxValInt).getValue(4);
+ const llvm::APSInt *maxLengthInt = BVF.evalAPSInt(BO_Div, maxValInt,
+ fourInt);
+ NonLoc maxLength = svalBuilder.makeIntVal(*maxLengthInt);
+ SVal evalLength = svalBuilder.evalBinOpNN(state, BO_LE, *strLn,
+ maxLength, sizeTy);
+ state = state->assume(evalLength.castAs<DefinedOrUnknownSVal>(), true);
+ }
+ state = state->set<CStringLength>(MR, strLength);
+ }
+
+ return strLength;
+}
+
+SVal CStringChecker::getCStringLength(CheckerContext &C, ProgramStateRef &state,
+ const Expr *Ex, SVal Buf,
+ bool hypothetical) const {
+ const MemRegion *MR = Buf.getAsRegion();
+ if (!MR) {
+ // If we can't get a region, see if it's something we /know/ isn't a
+ // C string. In the context of locations, the only time we can issue such
+ // a warning is for labels.
+ if (Optional<loc::GotoLabel> Label = Buf.getAs<loc::GotoLabel>()) {
+ if (!Filter.CheckCStringNotNullTerm)
+ return UndefinedVal();
+
+ if (ExplodedNode *N = C.generateNonFatalErrorNode(state)) {
+ if (!BT_NotCString)
+ BT_NotCString.reset(new BuiltinBug(
+ Filter.CheckNameCStringNotNullTerm, categories::UnixAPI,
+ "Argument is not a null-terminated string."));
+
+ SmallString<120> buf;
+ llvm::raw_svector_ostream os(buf);
+ assert(CurrentFunctionDescription);
+ os << "Argument to " << CurrentFunctionDescription
+ << " is the address of the label '" << Label->getLabel()->getName()
+ << "', which is not a null-terminated string";
+
+ // Generate a report for this bug.
+ auto report = llvm::make_unique<BugReport>(*BT_NotCString, os.str(), N);
+
+ report->addRange(Ex->getSourceRange());
+ C.emitReport(std::move(report));
+ }
+ return UndefinedVal();
+
+ }
+
+ // If it's not a region and not a label, give up.
+ return UnknownVal();
+ }
+
+ // If we have a region, strip casts from it and see if we can figure out
+ // its length. For anything we can't figure out, just return UnknownVal.
+ MR = MR->StripCasts();
+
+ switch (MR->getKind()) {
+ case MemRegion::StringRegionKind: {
+ // Modifying the contents of string regions is undefined [C99 6.4.5p6],
+ // so we can assume that the byte length is the correct C string length.
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ QualType sizeTy = svalBuilder.getContext().getSizeType();
+ const StringLiteral *strLit = cast<StringRegion>(MR)->getStringLiteral();
+ return svalBuilder.makeIntVal(strLit->getByteLength(), sizeTy);
+ }
+ case MemRegion::SymbolicRegionKind:
+ case MemRegion::AllocaRegionKind:
+ case MemRegion::VarRegionKind:
+ case MemRegion::FieldRegionKind:
+ case MemRegion::ObjCIvarRegionKind:
+ return getCStringLengthForRegion(C, state, Ex, MR, hypothetical);
+ case MemRegion::CompoundLiteralRegionKind:
+ // FIXME: Can we track this? Is it necessary?
+ return UnknownVal();
+ case MemRegion::ElementRegionKind:
+ // FIXME: How can we handle this? It's not good enough to subtract the
+ // offset from the base string length; consider "123\x00567" and &a[5].
+ return UnknownVal();
+ default:
+ // Other regions (mostly non-data) can't have a reliable C string length.
+ // In this case, an error is emitted and UndefinedVal is returned.
+ // The caller should always be prepared to handle this case.
+ if (!Filter.CheckCStringNotNullTerm)
+ return UndefinedVal();
+
+ if (ExplodedNode *N = C.generateNonFatalErrorNode(state)) {
+ if (!BT_NotCString)
+ BT_NotCString.reset(new BuiltinBug(
+ Filter.CheckNameCStringNotNullTerm, categories::UnixAPI,
+ "Argument is not a null-terminated string."));
+
+ SmallString<120> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ assert(CurrentFunctionDescription);
+ os << "Argument to " << CurrentFunctionDescription << " is ";
+
+ if (SummarizeRegion(os, C.getASTContext(), MR))
+ os << ", which is not a null-terminated string";
+ else
+ os << "not a null-terminated string";
+
+ // Generate a report for this bug.
+ auto report = llvm::make_unique<BugReport>(*BT_NotCString, os.str(), N);
+
+ report->addRange(Ex->getSourceRange());
+ C.emitReport(std::move(report));
+ }
+
+ return UndefinedVal();
+ }
+}
+
+const StringLiteral *CStringChecker::getCStringLiteral(CheckerContext &C,
+ ProgramStateRef &state, const Expr *expr, SVal val) const {
+
+ // Get the memory region pointed to by the val.
+ const MemRegion *bufRegion = val.getAsRegion();
+ if (!bufRegion)
+ return nullptr;
+
+ // Strip casts off the memory region.
+ bufRegion = bufRegion->StripCasts();
+
+ // Cast the memory region to a string region.
+ const StringRegion *strRegion= dyn_cast<StringRegion>(bufRegion);
+ if (!strRegion)
+ return nullptr;
+
+ // Return the actual string in the string region.
+ return strRegion->getStringLiteral();
+}
+
+bool CStringChecker::IsFirstBufInBound(CheckerContext &C,
+ ProgramStateRef state,
+ const Expr *FirstBuf,
+ const Expr *Size) {
+ // If we do not know that the buffer is long enough we return 'true'.
+ // Otherwise the parent region of this field region would also get
+ // invalidated, which would lead to warnings based on an unknown state.
+
+ // Originally copied from CheckBufferAccess and CheckLocation.
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ ASTContext &Ctx = svalBuilder.getContext();
+ const LocationContext *LCtx = C.getLocationContext();
+
+ QualType sizeTy = Size->getType();
+ QualType PtrTy = Ctx.getPointerType(Ctx.CharTy);
+ SVal BufVal = state->getSVal(FirstBuf, LCtx);
+
+ SVal LengthVal = state->getSVal(Size, LCtx);
+ Optional<NonLoc> Length = LengthVal.getAs<NonLoc>();
+ if (!Length)
+ return true; // cf top comment.
+
+ // Compute the offset of the last element to be accessed: size-1.
+ NonLoc One = svalBuilder.makeIntVal(1, sizeTy).castAs<NonLoc>();
+ NonLoc LastOffset =
+ svalBuilder.evalBinOpNN(state, BO_Sub, *Length, One, sizeTy)
+ .castAs<NonLoc>();
+
+ // Check that the first buffer is sufficiently long.
+ SVal BufStart = svalBuilder.evalCast(BufVal, PtrTy, FirstBuf->getType());
+ Optional<Loc> BufLoc = BufStart.getAs<Loc>();
+ if (!BufLoc)
+ return true; // cf top comment.
+
+ SVal BufEnd =
+ svalBuilder.evalBinOpLN(state, BO_Add, *BufLoc, LastOffset, PtrTy);
+
+ // Check for out of bound array element access.
+ const MemRegion *R = BufEnd.getAsRegion();
+ if (!R)
+ return true; // cf top comment.
+
+ const ElementRegion *ER = dyn_cast<ElementRegion>(R);
+ if (!ER)
+ return true; // cf top comment.
+
+ assert(ER->getValueType() == C.getASTContext().CharTy &&
+ "IsFirstBufInBound should only be called with char* ElementRegions");
+
+ // Get the size of the array.
+ const SubRegion *superReg = cast<SubRegion>(ER->getSuperRegion());
+ SVal Extent =
+ svalBuilder.convertToArrayIndex(superReg->getExtent(svalBuilder));
+ DefinedOrUnknownSVal ExtentSize = Extent.castAs<DefinedOrUnknownSVal>();
+
+ // Get the index of the accessed element.
+ DefinedOrUnknownSVal Idx = ER->getIndex().castAs<DefinedOrUnknownSVal>();
+
+ ProgramStateRef StInBound = state->assumeInBound(Idx, ExtentSize, true);
+
+ return static_cast<bool>(StInBound);
+}
+
+ProgramStateRef CStringChecker::InvalidateBuffer(CheckerContext &C,
+ ProgramStateRef state,
+ const Expr *E, SVal V,
+ bool IsSourceBuffer,
+ const Expr *Size) {
+ Optional<Loc> L = V.getAs<Loc>();
+ if (!L)
+ return state;
+
+ // FIXME: This is a simplified version of what's in CFRefCount.cpp -- it makes
+ // some assumptions about the value that CFRefCount can't. Even so, it should
+ // probably be refactored.
+ if (Optional<loc::MemRegionVal> MR = L->getAs<loc::MemRegionVal>()) {
+ const MemRegion *R = MR->getRegion()->StripCasts();
+
+ // Are we dealing with an ElementRegion? If so, we should be invalidating
+ // the super-region.
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
+ R = ER->getSuperRegion();
+ // FIXME: What about layers of ElementRegions?
+ }
+
+ // Invalidate this region.
+ const LocationContext *LCtx = C.getPredecessor()->getLocationContext();
+
+ bool CausesPointerEscape = false;
+ RegionAndSymbolInvalidationTraits ITraits;
+ // Invalidate and escape only indirect regions accessible through the source
+ // buffer.
+ if (IsSourceBuffer) {
+ ITraits.setTrait(R,
+ RegionAndSymbolInvalidationTraits::TK_PreserveContents);
+ ITraits.setTrait(R, RegionAndSymbolInvalidationTraits::TK_SuppressEscape);
+ CausesPointerEscape = true;
+ } else {
+ const MemRegion::Kind& K = R->getKind();
+ if (K == MemRegion::FieldRegionKind)
+ if (Size && IsFirstBufInBound(C, state, E, Size)) {
+ // If destination buffer is a field region and access is in bound,
+ // do not invalidate its super region.
+ ITraits.setTrait(
+ R,
+ RegionAndSymbolInvalidationTraits::TK_DoNotInvalidateSuperRegion);
+ }
+ }
+
+ return state->invalidateRegions(R, E, C.blockCount(), LCtx,
+ CausesPointerEscape, nullptr, nullptr,
+ &ITraits);
+ }
+
+ // If we have a non-region value by chance, just remove the binding.
+ // FIXME: is this necessary or correct? This handles the non-Region
+ // cases. Is it ever valid to store to these?
+ return state->killBinding(*L);
+}
+
+bool CStringChecker::SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
+ const MemRegion *MR) {
+ const TypedValueRegion *TVR = dyn_cast<TypedValueRegion>(MR);
+
+ switch (MR->getKind()) {
+ case MemRegion::FunctionTextRegionKind: {
+ const NamedDecl *FD = cast<FunctionTextRegion>(MR)->getDecl();
+ if (FD)
+ os << "the address of the function '" << *FD << '\'';
+ else
+ os << "the address of a function";
+ return true;
+ }
+ case MemRegion::BlockTextRegionKind:
+ os << "block text";
+ return true;
+ case MemRegion::BlockDataRegionKind:
+ os << "a block";
+ return true;
+ case MemRegion::CXXThisRegionKind:
+ case MemRegion::CXXTempObjectRegionKind:
+ os << "a C++ temp object of type " << TVR->getValueType().getAsString();
+ return true;
+ case MemRegion::VarRegionKind:
+ os << "a variable of type" << TVR->getValueType().getAsString();
+ return true;
+ case MemRegion::FieldRegionKind:
+ os << "a field of type " << TVR->getValueType().getAsString();
+ return true;
+ case MemRegion::ObjCIvarRegionKind:
+ os << "an instance variable of type " << TVR->getValueType().getAsString();
+ return true;
+ default:
+ return false;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// evaluation of individual function calls.
+//===----------------------------------------------------------------------===//
+
+void CStringChecker::evalCopyCommon(CheckerContext &C,
+ const CallExpr *CE,
+ ProgramStateRef state,
+ const Expr *Size, const Expr *Dest,
+ const Expr *Source, bool Restricted,
+ bool IsMempcpy) const {
+ CurrentFunctionDescription = "memory copy function";
+
+ // See if the size argument is zero.
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal sizeVal = state->getSVal(Size, LCtx);
+ QualType sizeTy = Size->getType();
+
+ ProgramStateRef stateZeroSize, stateNonZeroSize;
+ std::tie(stateZeroSize, stateNonZeroSize) =
+ assumeZero(C, state, sizeVal, sizeTy);
+
+ // Get the value of the Dest.
+ SVal destVal = state->getSVal(Dest, LCtx);
+
+ // If the size is zero, there won't be any actual memory access, so
+ // just bind the return value to the destination buffer and return.
+ if (stateZeroSize && !stateNonZeroSize) {
+ stateZeroSize = stateZeroSize->BindExpr(CE, LCtx, destVal);
+ C.addTransition(stateZeroSize);
+ return;
+ }
+
+ // If the size can be nonzero, we have to check the other arguments.
+ if (stateNonZeroSize) {
+ state = stateNonZeroSize;
+
+ // Ensure the destination is not null. If it is NULL there will be a
+ // NULL pointer dereference.
+ state = checkNonNull(C, state, Dest, destVal);
+ if (!state)
+ return;
+
+ // Get the value of the Src.
+ SVal srcVal = state->getSVal(Source, LCtx);
+
+ // Ensure the source is not null. If it is NULL there will be a
+ // NULL pointer dereference.
+ state = checkNonNull(C, state, Source, srcVal);
+ if (!state)
+ return;
+
+ // Ensure the accesses are valid and that the buffers do not overlap.
+ const char * const writeWarning =
+ "Memory copy function overflows destination buffer";
+ state = CheckBufferAccess(C, state, Size, Dest, Source,
+ writeWarning, /* sourceWarning = */ nullptr);
+ if (Restricted)
+ state = CheckOverlap(C, state, Size, Dest, Source);
+
+ if (!state)
+ return;
+
+ // If this is mempcpy, get the byte after the last byte copied and
+ // bind the expr.
+ if (IsMempcpy) {
+ loc::MemRegionVal destRegVal = destVal.castAs<loc::MemRegionVal>();
+
+ // Get the length to copy.
+ if (Optional<NonLoc> lenValNonLoc = sizeVal.getAs<NonLoc>()) {
+ // Get the byte after the last byte copied.
+ SValBuilder &SvalBuilder = C.getSValBuilder();
+ ASTContext &Ctx = SvalBuilder.getContext();
+ QualType CharPtrTy = Ctx.getPointerType(Ctx.CharTy);
+ loc::MemRegionVal DestRegCharVal = SvalBuilder.evalCast(destRegVal,
+ CharPtrTy, Dest->getType()).castAs<loc::MemRegionVal>();
+ SVal lastElement = C.getSValBuilder().evalBinOpLN(state, BO_Add,
+ DestRegCharVal,
+ *lenValNonLoc,
+ Dest->getType());
+
+ // The byte after the last byte copied is the return value.
+ state = state->BindExpr(CE, LCtx, lastElement);
+ } else {
+ // If we don't know how much we copied, we can at least
+ // conjure a return value for later.
+ SVal result = C.getSValBuilder().conjureSymbolVal(nullptr, CE, LCtx,
+ C.blockCount());
+ state = state->BindExpr(CE, LCtx, result);
+ }
+
+ } else {
+ // All other copies return the destination buffer.
+ // (Well, bcopy() has a void return type, but this won't hurt.)
+ state = state->BindExpr(CE, LCtx, destVal);
+ }
+
+ // Invalidate the destination (regular invalidation without pointer-escaping
+ // the address of the top-level region).
+ // FIXME: Even if we can't perfectly model the copy, we should see if we
+ // can use LazyCompoundVals to copy the source values into the destination.
+ // This would probably remove any existing bindings past the end of the
+ // copied region, but that's still an improvement over blank invalidation.
+ state = InvalidateBuffer(C, state, Dest, C.getSVal(Dest),
+ /*IsSourceBuffer*/false, Size);
+
+ // Invalidate the source (const-invalidation without const-pointer-escaping
+ // the address of the top-level region).
+ state = InvalidateBuffer(C, state, Source, C.getSVal(Source),
+ /*IsSourceBuffer*/true, nullptr);
+
+ C.addTransition(state);
+ }
+}
+
+
+void CStringChecker::evalMemcpy(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 3)
+ return;
+
+ // void *memcpy(void *restrict dst, const void *restrict src, size_t n);
+ // The return value is the address of the destination buffer.
+ const Expr *Dest = CE->getArg(0);
+ ProgramStateRef state = C.getState();
+
+ evalCopyCommon(C, CE, state, CE->getArg(2), Dest, CE->getArg(1), true);
+}
+
+void CStringChecker::evalMempcpy(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 3)
+ return;
+
+ // void *mempcpy(void *restrict dst, const void *restrict src, size_t n);
+ // The return value is a pointer to the byte following the last written byte.
+ const Expr *Dest = CE->getArg(0);
+ ProgramStateRef state = C.getState();
+
+ evalCopyCommon(C, CE, state, CE->getArg(2), Dest, CE->getArg(1), true, true);
+}
+
+void CStringChecker::evalMemmove(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 3)
+ return;
+
+ // void *memmove(void *dst, const void *src, size_t n);
+ // The return value is the address of the destination buffer.
+ const Expr *Dest = CE->getArg(0);
+ ProgramStateRef state = C.getState();
+
+ evalCopyCommon(C, CE, state, CE->getArg(2), Dest, CE->getArg(1));
+}
+
+void CStringChecker::evalBcopy(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 3)
+ return;
+
+ // void bcopy(const void *src, void *dst, size_t n);
+ evalCopyCommon(C, CE, C.getState(),
+ CE->getArg(2), CE->getArg(1), CE->getArg(0));
+}
+
+void CStringChecker::evalMemcmp(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 3)
+ return;
+
+ // int memcmp(const void *s1, const void *s2, size_t n);
+ CurrentFunctionDescription = "memory comparison function";
+
+ const Expr *Left = CE->getArg(0);
+ const Expr *Right = CE->getArg(1);
+ const Expr *Size = CE->getArg(2);
+
+ ProgramStateRef state = C.getState();
+ SValBuilder &svalBuilder = C.getSValBuilder();
+
+ // See if the size argument is zero.
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal sizeVal = state->getSVal(Size, LCtx);
+ QualType sizeTy = Size->getType();
+
+ ProgramStateRef stateZeroSize, stateNonZeroSize;
+ std::tie(stateZeroSize, stateNonZeroSize) =
+ assumeZero(C, state, sizeVal, sizeTy);
+
+ // If the size can be zero, the result will be 0 in that case, and we don't
+ // have to check either of the buffers.
+ if (stateZeroSize) {
+ state = stateZeroSize;
+ state = state->BindExpr(CE, LCtx,
+ svalBuilder.makeZeroVal(CE->getType()));
+ C.addTransition(state);
+ }
+
+ // If the size can be nonzero, we have to check the other arguments.
+ if (stateNonZeroSize) {
+ state = stateNonZeroSize;
+ // If we know the two buffers are the same, we know the result is 0.
+ // First, get the two buffers' addresses. Another checker will have already
+ // made sure they're not undefined.
+ DefinedOrUnknownSVal LV =
+ state->getSVal(Left, LCtx).castAs<DefinedOrUnknownSVal>();
+ DefinedOrUnknownSVal RV =
+ state->getSVal(Right, LCtx).castAs<DefinedOrUnknownSVal>();
+
+ // See if they are the same.
+ DefinedOrUnknownSVal SameBuf = svalBuilder.evalEQ(state, LV, RV);
+ ProgramStateRef StSameBuf, StNotSameBuf;
+ std::tie(StSameBuf, StNotSameBuf) = state->assume(SameBuf);
+
+ // If the two arguments might be the same buffer, we know the result is 0,
+ // and we only need to check one size.
+ if (StSameBuf) {
+ state = StSameBuf;
+ state = CheckBufferAccess(C, state, Size, Left);
+ if (state) {
+ state = StSameBuf->BindExpr(CE, LCtx,
+ svalBuilder.makeZeroVal(CE->getType()));
+ C.addTransition(state);
+ }
+ }
+
+ // If the two arguments might be different buffers, we have to check the
+ // size of both of them.
+ if (StNotSameBuf) {
+ state = StNotSameBuf;
+ state = CheckBufferAccess(C, state, Size, Left, Right);
+ if (state) {
+ // The return value is the comparison result, which we don't know.
+ SVal CmpV = svalBuilder.conjureSymbolVal(nullptr, CE, LCtx,
+ C.blockCount());
+ state = state->BindExpr(CE, LCtx, CmpV);
+ C.addTransition(state);
+ }
+ }
+ }
+}
+
+void CStringChecker::evalstrLength(CheckerContext &C,
+ const CallExpr *CE) const {
+ if (CE->getNumArgs() < 1)
+ return;
+
+ // size_t strlen(const char *s);
+ evalstrLengthCommon(C, CE, /* IsStrnlen = */ false);
+}
+
+void CStringChecker::evalstrnLength(CheckerContext &C,
+ const CallExpr *CE) const {
+ if (CE->getNumArgs() < 2)
+ return;
+
+ // size_t strnlen(const char *s, size_t maxlen);
+ evalstrLengthCommon(C, CE, /* IsStrnlen = */ true);
+}
+
+void CStringChecker::evalstrLengthCommon(CheckerContext &C, const CallExpr *CE,
+ bool IsStrnlen) const {
+ CurrentFunctionDescription = "string length function";
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
+
+ if (IsStrnlen) {
+ const Expr *maxlenExpr = CE->getArg(1);
+ SVal maxlenVal = state->getSVal(maxlenExpr, LCtx);
+
+ ProgramStateRef stateZeroSize, stateNonZeroSize;
+ std::tie(stateZeroSize, stateNonZeroSize) =
+ assumeZero(C, state, maxlenVal, maxlenExpr->getType());
+
+ // If the size can be zero, the result will be 0 in that case, and we don't
+ // have to check the string itself.
+ if (stateZeroSize) {
+ SVal zero = C.getSValBuilder().makeZeroVal(CE->getType());
+ stateZeroSize = stateZeroSize->BindExpr(CE, LCtx, zero);
+ C.addTransition(stateZeroSize);
+ }
+
+ // If the size is GUARANTEED to be zero, we're done!
+ if (!stateNonZeroSize)
+ return;
+
+ // Otherwise, record the assumption that the size is nonzero.
+ state = stateNonZeroSize;
+ }
+
+ // Check that the string argument is non-null.
+ const Expr *Arg = CE->getArg(0);
+ SVal ArgVal = state->getSVal(Arg, LCtx);
+
+ state = checkNonNull(C, state, Arg, ArgVal);
+
+ if (!state)
+ return;
+
+ SVal strLength = getCStringLength(C, state, Arg, ArgVal);
+
+ // If the argument isn't a valid C string, there's no valid state to
+ // transition to.
+ if (strLength.isUndef())
+ return;
+
+ DefinedOrUnknownSVal result = UnknownVal();
+
+ // If the check is for strnlen() then bind the return value to no more than
+ // the maxlen value.
+ if (IsStrnlen) {
+ QualType cmpTy = C.getSValBuilder().getConditionType();
+
+ // It's a little unfortunate to be getting this again,
+ // but it's not that expensive...
+ const Expr *maxlenExpr = CE->getArg(1);
+ SVal maxlenVal = state->getSVal(maxlenExpr, LCtx);
+
+ Optional<NonLoc> strLengthNL = strLength.getAs<NonLoc>();
+ Optional<NonLoc> maxlenValNL = maxlenVal.getAs<NonLoc>();
+
+ if (strLengthNL && maxlenValNL) {
+ ProgramStateRef stateStringTooLong, stateStringNotTooLong;
+
+ // Check if the strLength is greater than the maxlen.
+ std::tie(stateStringTooLong, stateStringNotTooLong) = state->assume(
+ C.getSValBuilder()
+ .evalBinOpNN(state, BO_GT, *strLengthNL, *maxlenValNL, cmpTy)
+ .castAs<DefinedOrUnknownSVal>());
+
+ if (stateStringTooLong && !stateStringNotTooLong) {
+ // If the string is longer than maxlen, return maxlen.
+ result = *maxlenValNL;
+ } else if (stateStringNotTooLong && !stateStringTooLong) {
+ // If the string is shorter than maxlen, return its length.
+ result = *strLengthNL;
+ }
+ }
+
+ if (result.isUnknown()) {
+ // If we don't have enough information for a comparison, there's
+ // no guarantee the full string length will actually be returned.
+ // All we know is the return value is the min of the string length
+ // and the limit. This is better than nothing.
+ result = C.getSValBuilder().conjureSymbolVal(nullptr, CE, LCtx,
+ C.blockCount());
+ NonLoc resultNL = result.castAs<NonLoc>();
+
+ if (strLengthNL) {
+ state = state->assume(C.getSValBuilder().evalBinOpNN(
+ state, BO_LE, resultNL, *strLengthNL, cmpTy)
+ .castAs<DefinedOrUnknownSVal>(), true);
+ }
+
+ if (maxlenValNL) {
+ state = state->assume(C.getSValBuilder().evalBinOpNN(
+ state, BO_LE, resultNL, *maxlenValNL, cmpTy)
+ .castAs<DefinedOrUnknownSVal>(), true);
+ }
+ }
+
+ } else {
+ // This is a plain strlen(), not strnlen().
+ result = strLength.castAs<DefinedOrUnknownSVal>();
+
+ // If we don't know the length of the string, conjure a return
+ // value, so it can be used in constraints, at least.
+ if (result.isUnknown()) {
+ result = C.getSValBuilder().conjureSymbolVal(nullptr, CE, LCtx,
+ C.blockCount());
+ }
+ }
+
+ // Bind the return value.
+ assert(!result.isUnknown() && "Should have conjured a value by now");
+ state = state->BindExpr(CE, LCtx, result);
+ C.addTransition(state);
+}
+
+void CStringChecker::evalStrcpy(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 2)
+ return;
+
+ // char *strcpy(char *restrict dst, const char *restrict src);
+ evalStrcpyCommon(C, CE,
+ /* returnEnd = */ false,
+ /* isBounded = */ false,
+ /* isAppending = */ false);
+}
+
+void CStringChecker::evalStrncpy(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 3)
+ return;
+
+ // char *strncpy(char *restrict dst, const char *restrict src, size_t n);
+ evalStrcpyCommon(C, CE,
+ /* returnEnd = */ false,
+ /* isBounded = */ true,
+ /* isAppending = */ false);
+}
+
+void CStringChecker::evalStpcpy(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 2)
+ return;
+
+ // char *stpcpy(char *restrict dst, const char *restrict src);
+ evalStrcpyCommon(C, CE,
+ /* returnEnd = */ true,
+ /* isBounded = */ false,
+ /* isAppending = */ false);
+}
+
+void CStringChecker::evalStrcat(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 2)
+ return;
+
+ //char *strcat(char *restrict s1, const char *restrict s2);
+ evalStrcpyCommon(C, CE,
+ /* returnEnd = */ false,
+ /* isBounded = */ false,
+ /* isAppending = */ true);
+}
+
+void CStringChecker::evalStrncat(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 3)
+ return;
+
+ //char *strncat(char *restrict s1, const char *restrict s2, size_t n);
+ evalStrcpyCommon(C, CE,
+ /* returnEnd = */ false,
+ /* isBounded = */ true,
+ /* isAppending = */ true);
+}
+
+void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
+ bool returnEnd, bool isBounded,
+ bool isAppending) const {
+ CurrentFunctionDescription = "string copy function";
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
+
+ // Check that the destination is non-null.
+ const Expr *Dst = CE->getArg(0);
+ SVal DstVal = state->getSVal(Dst, LCtx);
+
+ state = checkNonNull(C, state, Dst, DstVal);
+ if (!state)
+ return;
+
+ // Check that the source is non-null.
+ const Expr *srcExpr = CE->getArg(1);
+ SVal srcVal = state->getSVal(srcExpr, LCtx);
+ state = checkNonNull(C, state, srcExpr, srcVal);
+ if (!state)
+ return;
+
+ // Get the string length of the source.
+ SVal strLength = getCStringLength(C, state, srcExpr, srcVal);
+
+ // If the source isn't a valid C string, give up.
+ if (strLength.isUndef())
+ return;
+
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ QualType cmpTy = svalBuilder.getConditionType();
+ QualType sizeTy = svalBuilder.getContext().getSizeType();
+
+ // These two values allow checking two kinds of errors:
+ // - actual overflows caused by a source that doesn't fit in the destination
+ // - potential overflows caused by a bound that could exceed the destination
+ SVal amountCopied = UnknownVal();
+ SVal maxLastElementIndex = UnknownVal();
+ const char *boundWarning = nullptr;
+
+ // If the function is strncpy, strncat, etc... it is bounded.
+ if (isBounded) {
+ // Get the max number of characters to copy.
+ const Expr *lenExpr = CE->getArg(2);
+ SVal lenVal = state->getSVal(lenExpr, LCtx);
+
+ // Protect against misdeclared strncpy().
+ lenVal = svalBuilder.evalCast(lenVal, sizeTy, lenExpr->getType());
+
+ Optional<NonLoc> strLengthNL = strLength.getAs<NonLoc>();
+ Optional<NonLoc> lenValNL = lenVal.getAs<NonLoc>();
+
+ // If we know both values, we might be able to figure out how much
+ // we're copying.
+ if (strLengthNL && lenValNL) {
+ ProgramStateRef stateSourceTooLong, stateSourceNotTooLong;
+
+ // Check if the max number to copy is less than the length of the src.
+ // If the bound is equal to the source length, strncpy won't null-
+ // terminate the result!
+ std::tie(stateSourceTooLong, stateSourceNotTooLong) = state->assume(
+ svalBuilder.evalBinOpNN(state, BO_GE, *strLengthNL, *lenValNL, cmpTy)
+ .castAs<DefinedOrUnknownSVal>());
+
+ if (stateSourceTooLong && !stateSourceNotTooLong) {
+ // Max number to copy is less than the length of the src, so the actual
+ // strLength copied is the max number arg.
+ state = stateSourceTooLong;
+ amountCopied = lenVal;
+
+ } else if (!stateSourceTooLong && stateSourceNotTooLong) {
+ // The source buffer entirely fits in the bound.
+ state = stateSourceNotTooLong;
+ amountCopied = strLength;
+ }
+ }
+
+ // We still want to know if the bound is known to be too large.
+ if (lenValNL) {
+ if (isAppending) {
+ // For strncat, the check is strlen(dst) + lenVal < sizeof(dst)
+
+ // Get the string length of the destination. If the destination is
+ // memory that can't have a string length, we shouldn't be copying
+ // into it anyway.
+ SVal dstStrLength = getCStringLength(C, state, Dst, DstVal);
+ if (dstStrLength.isUndef())
+ return;
+
+ if (Optional<NonLoc> dstStrLengthNL = dstStrLength.getAs<NonLoc>()) {
+ maxLastElementIndex = svalBuilder.evalBinOpNN(state, BO_Add,
+ *lenValNL,
+ *dstStrLengthNL,
+ sizeTy);
+ boundWarning = "Size argument is greater than the free space in the "
+ "destination buffer";
+ }
+
+ } else {
+ // For strncpy, this is just checking that lenVal <= sizeof(dst)
+ // (Yes, strncpy and strncat differ in how they treat termination.
+ // strncat ALWAYS terminates, but strncpy doesn't.)
+
+ // We need a special case for when the copy size is zero, in which
+ // case strncpy will do no work at all. Our bounds check uses n-1
+ // as the last element accessed, so n == 0 is problematic.
+ ProgramStateRef StateZeroSize, StateNonZeroSize;
+ std::tie(StateZeroSize, StateNonZeroSize) =
+ assumeZero(C, state, *lenValNL, sizeTy);
+
+ // If the size is known to be zero, we're done.
+ if (StateZeroSize && !StateNonZeroSize) {
+ StateZeroSize = StateZeroSize->BindExpr(CE, LCtx, DstVal);
+ C.addTransition(StateZeroSize);
+ return;
+ }
+
+ // Otherwise, go ahead and figure out the last element we'll touch.
+ // We don't record the non-zero assumption here because we can't
+ // be sure. We won't warn on a possible zero.
+ NonLoc one = svalBuilder.makeIntVal(1, sizeTy).castAs<NonLoc>();
+ maxLastElementIndex = svalBuilder.evalBinOpNN(state, BO_Sub, *lenValNL,
+ one, sizeTy);
+ boundWarning = "Size argument is greater than the length of the "
+ "destination buffer";
+ }
+ }
+
+ // If we couldn't pin down the copy length, at least bound it.
+ // FIXME: We should actually run this code path for append as well, but
+ // right now it creates problems with constraints (since we can end up
+ // trying to pass constraints from symbol to symbol).
+ if (amountCopied.isUnknown() && !isAppending) {
+ // Try to get a "hypothetical" string length symbol, which we can later
+ // set as a real value if that turns out to be the case.
+ amountCopied = getCStringLength(C, state, lenExpr, srcVal, true);
+ assert(!amountCopied.isUndef());
+
+ if (Optional<NonLoc> amountCopiedNL = amountCopied.getAs<NonLoc>()) {
+ if (lenValNL) {
+ // amountCopied <= lenVal
+ SVal copiedLessThanBound = svalBuilder.evalBinOpNN(state, BO_LE,
+ *amountCopiedNL,
+ *lenValNL,
+ cmpTy);
+ state = state->assume(
+ copiedLessThanBound.castAs<DefinedOrUnknownSVal>(), true);
+ if (!state)
+ return;
+ }
+
+ if (strLengthNL) {
+ // amountCopied <= strlen(source)
+ SVal copiedLessThanSrc = svalBuilder.evalBinOpNN(state, BO_LE,
+ *amountCopiedNL,
+ *strLengthNL,
+ cmpTy);
+ state = state->assume(
+ copiedLessThanSrc.castAs<DefinedOrUnknownSVal>(), true);
+ if (!state)
+ return;
+ }
+ }
+ }
+
+ } else {
+ // The function isn't bounded. The amount copied should match the length
+ // of the source buffer.
+ amountCopied = strLength;
+ }
+
+ assert(state);
+
+ // This represents the number of characters copied into the destination
+ // buffer. (It may not actually be the strlen if the destination buffer
+ // is not terminated.)
+ SVal finalStrLength = UnknownVal();
+
+ // If this is an appending function (strcat, strncat...) then set the
+ // string length to strlen(src) + strlen(dst) since the buffer will
+ // ultimately contain both.
+ if (isAppending) {
+ // Get the string length of the destination. If the destination is memory
+ // that can't have a string length, we shouldn't be copying into it anyway.
+ SVal dstStrLength = getCStringLength(C, state, Dst, DstVal);
+ if (dstStrLength.isUndef())
+ return;
+
+ Optional<NonLoc> srcStrLengthNL = amountCopied.getAs<NonLoc>();
+ Optional<NonLoc> dstStrLengthNL = dstStrLength.getAs<NonLoc>();
+
+ // If we know both string lengths, we might know the final string length.
+ if (srcStrLengthNL && dstStrLengthNL) {
+ // Make sure the two lengths together don't overflow a size_t.
+ state = checkAdditionOverflow(C, state, *srcStrLengthNL, *dstStrLengthNL);
+ if (!state)
+ return;
+
+ finalStrLength = svalBuilder.evalBinOpNN(state, BO_Add, *srcStrLengthNL,
+ *dstStrLengthNL, sizeTy);
+ }
+
+ // If we couldn't get a single value for the final string length,
+ // we can at least bound it by the individual lengths.
+ if (finalStrLength.isUnknown()) {
+ // Try to get a "hypothetical" string length symbol, which we can later
+ // set as a real value if that turns out to be the case.
+ finalStrLength = getCStringLength(C, state, CE, DstVal, true);
+ assert(!finalStrLength.isUndef());
+
+ if (Optional<NonLoc> finalStrLengthNL = finalStrLength.getAs<NonLoc>()) {
+ if (srcStrLengthNL) {
+ // finalStrLength >= srcStrLength
+ SVal sourceInResult = svalBuilder.evalBinOpNN(state, BO_GE,
+ *finalStrLengthNL,
+ *srcStrLengthNL,
+ cmpTy);
+ state = state->assume(sourceInResult.castAs<DefinedOrUnknownSVal>(),
+ true);
+ if (!state)
+ return;
+ }
+
+ if (dstStrLengthNL) {
+ // finalStrLength >= dstStrLength
+ SVal destInResult = svalBuilder.evalBinOpNN(state, BO_GE,
+ *finalStrLengthNL,
+ *dstStrLengthNL,
+ cmpTy);
+ state =
+ state->assume(destInResult.castAs<DefinedOrUnknownSVal>(), true);
+ if (!state)
+ return;
+ }
+ }
+ }
+
+ } else {
+ // Otherwise, this is a copy-over function (strcpy, strncpy, ...), and
+ // the final string length will match the input string length.
+ finalStrLength = amountCopied;
+ }
+
+ // The final result of the function will either be a pointer past the last
+ // copied element, or a pointer to the start of the destination buffer.
+ SVal Result = (returnEnd ? UnknownVal() : DstVal);
+
+ assert(state);
+
+ // If the destination is a MemRegion, try to check for a buffer overflow and
+ // record the new string length.
+ if (Optional<loc::MemRegionVal> dstRegVal =
+ DstVal.getAs<loc::MemRegionVal>()) {
+ QualType ptrTy = Dst->getType();
+
+ // If we have an exact value on a bounded copy, use that to check for
+ // overflows, rather than our estimate about how much is actually copied.
+ if (boundWarning) {
+ if (Optional<NonLoc> maxLastNL = maxLastElementIndex.getAs<NonLoc>()) {
+ SVal maxLastElement = svalBuilder.evalBinOpLN(state, BO_Add, *dstRegVal,
+ *maxLastNL, ptrTy);
+ state = CheckLocation(C, state, CE->getArg(2), maxLastElement,
+ boundWarning);
+ if (!state)
+ return;
+ }
+ }
+
+ // Then, if the final length is known...
+ if (Optional<NonLoc> knownStrLength = finalStrLength.getAs<NonLoc>()) {
+ SVal lastElement = svalBuilder.evalBinOpLN(state, BO_Add, *dstRegVal,
+ *knownStrLength, ptrTy);
+
+ // ...and we haven't checked the bound, we'll check the actual copy.
+ if (!boundWarning) {
+ const char * const warningMsg =
+ "String copy function overflows destination buffer";
+ state = CheckLocation(C, state, Dst, lastElement, warningMsg);
+ if (!state)
+ return;
+ }
+
+ // If this is a stpcpy-style copy, the last element is the return value.
+ if (returnEnd)
+ Result = lastElement;
+ }
+
+ // Invalidate the destination (regular invalidation without pointer-escaping
+ // the address of the top-level region). This must happen before we set the
+ // C string length because invalidation will clear the length.
+ // FIXME: Even if we can't perfectly model the copy, we should see if we
+ // can use LazyCompoundVals to copy the source values into the destination.
+ // This would probably remove any existing bindings past the end of the
+ // string, but that's still an improvement over blank invalidation.
+ state = InvalidateBuffer(C, state, Dst, *dstRegVal,
+ /*IsSourceBuffer*/false, nullptr);
+
+ // Invalidate the source (const-invalidation without const-pointer-escaping
+ // the address of the top-level region).
+ state = InvalidateBuffer(C, state, srcExpr, srcVal, /*IsSourceBuffer*/true,
+ nullptr);
+
+ // Set the C string length of the destination, if we know it.
+ if (isBounded && !isAppending) {
+ // strncpy is annoying in that it doesn't guarantee to null-terminate
+ // the result string. If the original string didn't fit entirely inside
+ // the bound (including the null-terminator), we don't know how long the
+ // result is.
+ if (amountCopied != strLength)
+ finalStrLength = UnknownVal();
+ }
+ state = setCStringLength(state, dstRegVal->getRegion(), finalStrLength);
+ }
+
+ assert(state);
+
+ // If this is a stpcpy-style copy, but we were unable to check for a buffer
+ // overflow, we still need a result. Conjure a return value.
+ if (returnEnd && Result.isUnknown()) {
+ Result = svalBuilder.conjureSymbolVal(nullptr, CE, LCtx, C.blockCount());
+ }
+
+ // Set the return value.
+ state = state->BindExpr(CE, LCtx, Result);
+ C.addTransition(state);
+}
+
+void CStringChecker::evalStrcmp(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 2)
+ return;
+
+ //int strcmp(const char *s1, const char *s2);
+ evalStrcmpCommon(C, CE, /* isBounded = */ false, /* ignoreCase = */ false);
+}
+
+void CStringChecker::evalStrncmp(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 3)
+ return;
+
+ //int strncmp(const char *s1, const char *s2, size_t n);
+ evalStrcmpCommon(C, CE, /* isBounded = */ true, /* ignoreCase = */ false);
+}
+
+void CStringChecker::evalStrcasecmp(CheckerContext &C,
+ const CallExpr *CE) const {
+ if (CE->getNumArgs() < 2)
+ return;
+
+ //int strcasecmp(const char *s1, const char *s2);
+ evalStrcmpCommon(C, CE, /* isBounded = */ false, /* ignoreCase = */ true);
+}
+
+void CStringChecker::evalStrncasecmp(CheckerContext &C,
+ const CallExpr *CE) const {
+ if (CE->getNumArgs() < 3)
+ return;
+
+ //int strncasecmp(const char *s1, const char *s2, size_t n);
+ evalStrcmpCommon(C, CE, /* isBounded = */ true, /* ignoreCase = */ true);
+}
+
+void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
+ bool isBounded, bool ignoreCase) const {
+ CurrentFunctionDescription = "string comparison function";
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
+
+ // Check that the first string is non-null
+ const Expr *s1 = CE->getArg(0);
+ SVal s1Val = state->getSVal(s1, LCtx);
+ state = checkNonNull(C, state, s1, s1Val);
+ if (!state)
+ return;
+
+ // Check that the second string is non-null.
+ const Expr *s2 = CE->getArg(1);
+ SVal s2Val = state->getSVal(s2, LCtx);
+ state = checkNonNull(C, state, s2, s2Val);
+ if (!state)
+ return;
+
+ // Get the string length of the first string or give up.
+ SVal s1Length = getCStringLength(C, state, s1, s1Val);
+ if (s1Length.isUndef())
+ return;
+
+ // Get the string length of the second string or give up.
+ SVal s2Length = getCStringLength(C, state, s2, s2Val);
+ if (s2Length.isUndef())
+ return;
+
+ // If we know the two buffers are the same, we know the result is 0.
+ // First, get the two buffers' addresses. Another checker will have already
+ // made sure they're not undefined.
+ DefinedOrUnknownSVal LV = s1Val.castAs<DefinedOrUnknownSVal>();
+ DefinedOrUnknownSVal RV = s2Val.castAs<DefinedOrUnknownSVal>();
+
+ // See if they are the same.
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ DefinedOrUnknownSVal SameBuf = svalBuilder.evalEQ(state, LV, RV);
+ ProgramStateRef StSameBuf, StNotSameBuf;
+ std::tie(StSameBuf, StNotSameBuf) = state->assume(SameBuf);
+
+ // If the two arguments might be the same buffer, we know the result is 0,
+ // and we only need to check one size.
+ if (StSameBuf) {
+ StSameBuf = StSameBuf->BindExpr(CE, LCtx,
+ svalBuilder.makeZeroVal(CE->getType()));
+ C.addTransition(StSameBuf);
+
+ // If the two arguments are GUARANTEED to be the same, we're done!
+ if (!StNotSameBuf)
+ return;
+ }
+
+ assert(StNotSameBuf);
+ state = StNotSameBuf;
+
+ // At this point we can go about comparing the two buffers.
+ // For now, we only do this if they're both known string literals.
+
+ // Attempt to extract string literals from both expressions.
+ const StringLiteral *s1StrLiteral = getCStringLiteral(C, state, s1, s1Val);
+ const StringLiteral *s2StrLiteral = getCStringLiteral(C, state, s2, s2Val);
+ bool canComputeResult = false;
+
+ if (s1StrLiteral && s2StrLiteral) {
+ StringRef s1StrRef = s1StrLiteral->getString();
+ StringRef s2StrRef = s2StrLiteral->getString();
+
+ if (isBounded) {
+ // Get the max number of characters to compare.
+ const Expr *lenExpr = CE->getArg(2);
+ SVal lenVal = state->getSVal(lenExpr, LCtx);
+
+ // If the length is known, we can get the right substrings.
+ if (const llvm::APSInt *len = svalBuilder.getKnownValue(state, lenVal)) {
+ // Create substrings of each to compare the prefix.
+ s1StrRef = s1StrRef.substr(0, (size_t)len->getZExtValue());
+ s2StrRef = s2StrRef.substr(0, (size_t)len->getZExtValue());
+ canComputeResult = true;
+ }
+ } else {
+ // This is a normal, unbounded strcmp.
+ canComputeResult = true;
+ }
+
+ if (canComputeResult) {
+ // Real strcmp stops at null characters.
+ size_t s1Term = s1StrRef.find('\0');
+ if (s1Term != StringRef::npos)
+ s1StrRef = s1StrRef.substr(0, s1Term);
+
+ size_t s2Term = s2StrRef.find('\0');
+ if (s2Term != StringRef::npos)
+ s2StrRef = s2StrRef.substr(0, s2Term);
+
+ // Use StringRef's comparison methods to compute the actual result.
+ int result;
+
+ if (ignoreCase) {
+ // Compare string 1 to string 2 the same way strcasecmp() does.
+ result = s1StrRef.compare_lower(s2StrRef);
+ } else {
+ // Compare string 1 to string 2 the same way strcmp() does.
+ result = s1StrRef.compare(s2StrRef);
+ }
+
+ // Build the SVal of the comparison and bind the return value.
+ SVal resultVal = svalBuilder.makeIntVal(result, CE->getType());
+ state = state->BindExpr(CE, LCtx, resultVal);
+ }
+ }
+
+ if (!canComputeResult) {
+ // Conjure a symbolic value. It's the best we can do.
+ SVal resultVal = svalBuilder.conjureSymbolVal(nullptr, CE, LCtx,
+ C.blockCount());
+ state = state->BindExpr(CE, LCtx, resultVal);
+ }
+
+ // Record this as a possible path.
+ C.addTransition(state);
+}
+
+void CStringChecker::evalStrsep(CheckerContext &C, const CallExpr *CE) const {
+ //char *strsep(char **stringp, const char *delim);
+ if (CE->getNumArgs() < 2)
+ return;
+
+ // Sanity: does the search string parameter match the return type?
+ const Expr *SearchStrPtr = CE->getArg(0);
+ QualType CharPtrTy = SearchStrPtr->getType()->getPointeeType();
+ if (CharPtrTy.isNull() ||
+ CE->getType().getUnqualifiedType() != CharPtrTy.getUnqualifiedType())
+ return;
+
+ CurrentFunctionDescription = "strsep()";
+ ProgramStateRef State = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
+
+ // Check that the search string pointer is non-null (though it may point to
+ // a null string).
+ SVal SearchStrVal = State->getSVal(SearchStrPtr, LCtx);
+ State = checkNonNull(C, State, SearchStrPtr, SearchStrVal);
+ if (!State)
+ return;
+
+ // Check that the delimiter string is non-null.
+ const Expr *DelimStr = CE->getArg(1);
+ SVal DelimStrVal = State->getSVal(DelimStr, LCtx);
+ State = checkNonNull(C, State, DelimStr, DelimStrVal);
+ if (!State)
+ return;
+
+ SValBuilder &SVB = C.getSValBuilder();
+ SVal Result;
+ if (Optional<Loc> SearchStrLoc = SearchStrVal.getAs<Loc>()) {
+ // Get the current value of the search string pointer, as a char*.
+ Result = State->getSVal(*SearchStrLoc, CharPtrTy);
+
+ // Invalidate the search string, representing the change of one delimiter
+ // character to NUL.
+ State = InvalidateBuffer(C, State, SearchStrPtr, Result,
+ /*IsSourceBuffer*/false, nullptr);
+
+ // Overwrite the search string pointer. The new value is either an address
+ // further along in the same string, or NULL if there are no more tokens.
+ State = State->bindLoc(*SearchStrLoc,
+ SVB.conjureSymbolVal(getTag(), CE, LCtx, CharPtrTy,
+ C.blockCount()));
+ } else {
+ assert(SearchStrVal.isUnknown());
+ // Conjure a symbolic value. It's the best we can do.
+ Result = SVB.conjureSymbolVal(nullptr, CE, LCtx, C.blockCount());
+ }
+
+ // Set the return value, and finish.
+ State = State->BindExpr(CE, LCtx, Result);
+ C.addTransition(State);
+}
+
+
+//===----------------------------------------------------------------------===//
+// The driver method, and other Checker callbacks.
+//===----------------------------------------------------------------------===//
+
+bool CStringChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
+ const FunctionDecl *FDecl = C.getCalleeDecl(CE);
+
+ if (!FDecl)
+ return false;
+
+ // FIXME: Poorly-factored string switches are slow.
+ FnCheck evalFunction = nullptr;
+ if (C.isCLibraryFunction(FDecl, "memcpy"))
+ evalFunction = &CStringChecker::evalMemcpy;
+ else if (C.isCLibraryFunction(FDecl, "mempcpy"))
+ evalFunction = &CStringChecker::evalMempcpy;
+ else if (C.isCLibraryFunction(FDecl, "memcmp"))
+ evalFunction = &CStringChecker::evalMemcmp;
+ else if (C.isCLibraryFunction(FDecl, "memmove"))
+ evalFunction = &CStringChecker::evalMemmove;
+ else if (C.isCLibraryFunction(FDecl, "strcpy"))
+ evalFunction = &CStringChecker::evalStrcpy;
+ else if (C.isCLibraryFunction(FDecl, "strncpy"))
+ evalFunction = &CStringChecker::evalStrncpy;
+ else if (C.isCLibraryFunction(FDecl, "stpcpy"))
+ evalFunction = &CStringChecker::evalStpcpy;
+ else if (C.isCLibraryFunction(FDecl, "strcat"))
+ evalFunction = &CStringChecker::evalStrcat;
+ else if (C.isCLibraryFunction(FDecl, "strncat"))
+ evalFunction = &CStringChecker::evalStrncat;
+ else if (C.isCLibraryFunction(FDecl, "strlen"))
+ evalFunction = &CStringChecker::evalstrLength;
+ else if (C.isCLibraryFunction(FDecl, "strnlen"))
+ evalFunction = &CStringChecker::evalstrnLength;
+ else if (C.isCLibraryFunction(FDecl, "strcmp"))
+ evalFunction = &CStringChecker::evalStrcmp;
+ else if (C.isCLibraryFunction(FDecl, "strncmp"))
+ evalFunction = &CStringChecker::evalStrncmp;
+ else if (C.isCLibraryFunction(FDecl, "strcasecmp"))
+ evalFunction = &CStringChecker::evalStrcasecmp;
+ else if (C.isCLibraryFunction(FDecl, "strncasecmp"))
+ evalFunction = &CStringChecker::evalStrncasecmp;
+ else if (C.isCLibraryFunction(FDecl, "strsep"))
+ evalFunction = &CStringChecker::evalStrsep;
+ else if (C.isCLibraryFunction(FDecl, "bcopy"))
+ evalFunction = &CStringChecker::evalBcopy;
+ else if (C.isCLibraryFunction(FDecl, "bcmp"))
+ evalFunction = &CStringChecker::evalMemcmp;
+
+ // If the callee isn't a string function, let another checker handle it.
+ if (!evalFunction)
+ return false;
+
+ // Check and evaluate the call.
+ (this->*evalFunction)(C, CE);
+
+ // If the evaluate call resulted in no change, chain to the next eval call
+ // handler.
+ // Note, the custom CString evaluation calls assume that basic safety
+ // properties are held. However, if the user chooses to turn off some of these
+ // checks, we ignore the issues and leave the call evaluation to a generic
+ // handler.
+ return C.isDifferent();
+}
+
+void CStringChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
+ // Record string length for char a[] = "abc";
+ ProgramStateRef state = C.getState();
+
+ for (const auto *I : DS->decls()) {
+ const VarDecl *D = dyn_cast<VarDecl>(I);
+ if (!D)
+ continue;
+
+ // FIXME: Handle array fields of structs.
+ if (!D->getType()->isArrayType())
+ continue;
+
+ const Expr *Init = D->getInit();
+ if (!Init)
+ continue;
+ if (!isa<StringLiteral>(Init))
+ continue;
+
+ Loc VarLoc = state->getLValue(D, C.getLocationContext());
+ const MemRegion *MR = VarLoc.getAsRegion();
+ if (!MR)
+ continue;
+
+ SVal StrVal = state->getSVal(Init, C.getLocationContext());
+ assert(StrVal.isValid() && "Initializer string is unknown or undefined");
+ DefinedOrUnknownSVal strLength =
+ getCStringLength(C, state, Init, StrVal).castAs<DefinedOrUnknownSVal>();
+
+ state = state->set<CStringLength>(MR, strLength);
+ }
+
+ C.addTransition(state);
+}
+
+bool CStringChecker::wantsRegionChangeUpdate(ProgramStateRef state) const {
+ CStringLengthTy Entries = state->get<CStringLength>();
+ return !Entries.isEmpty();
+}
+
+ProgramStateRef
+CStringChecker::checkRegionChanges(ProgramStateRef state,
+ const InvalidatedSymbols *,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const CallEvent *Call) const {
+ CStringLengthTy Entries = state->get<CStringLength>();
+ if (Entries.isEmpty())
+ return state;
+
+ llvm::SmallPtrSet<const MemRegion *, 8> Invalidated;
+ llvm::SmallPtrSet<const MemRegion *, 32> SuperRegions;
+
+ // First build sets for the changed regions and their super-regions.
+ for (ArrayRef<const MemRegion *>::iterator
+ I = Regions.begin(), E = Regions.end(); I != E; ++I) {
+ const MemRegion *MR = *I;
+ Invalidated.insert(MR);
+
+ SuperRegions.insert(MR);
+ while (const SubRegion *SR = dyn_cast<SubRegion>(MR)) {
+ MR = SR->getSuperRegion();
+ SuperRegions.insert(MR);
+ }
+ }
+
+ CStringLengthTy::Factory &F = state->get_context<CStringLength>();
+
+ // Then loop over the entries in the current state.
+ for (CStringLengthTy::iterator I = Entries.begin(),
+ E = Entries.end(); I != E; ++I) {
+ const MemRegion *MR = I.getKey();
+
+ // Is this entry for a super-region of a changed region?
+ if (SuperRegions.count(MR)) {
+ Entries = F.remove(Entries, MR);
+ continue;
+ }
+
+ // Is this entry for a sub-region of a changed region?
+ const MemRegion *Super = MR;
+ while (const SubRegion *SR = dyn_cast<SubRegion>(Super)) {
+ Super = SR->getSuperRegion();
+ if (Invalidated.count(Super)) {
+ Entries = F.remove(Entries, MR);
+ break;
+ }
+ }
+ }
+
+ return state->set<CStringLength>(Entries);
+}
+
+void CStringChecker::checkLiveSymbols(ProgramStateRef state,
+ SymbolReaper &SR) const {
+ // Mark all symbols in our string length map as valid.
+ CStringLengthTy Entries = state->get<CStringLength>();
+
+ for (CStringLengthTy::iterator I = Entries.begin(), E = Entries.end();
+ I != E; ++I) {
+ SVal Len = I.getData();
+
+ for (SymExpr::symbol_iterator si = Len.symbol_begin(),
+ se = Len.symbol_end(); si != se; ++si)
+ SR.markInUse(*si);
+ }
+}
+
+void CStringChecker::checkDeadSymbols(SymbolReaper &SR,
+ CheckerContext &C) const {
+ if (!SR.hasDeadSymbols())
+ return;
+
+ ProgramStateRef state = C.getState();
+ CStringLengthTy Entries = state->get<CStringLength>();
+ if (Entries.isEmpty())
+ return;
+
+ CStringLengthTy::Factory &F = state->get_context<CStringLength>();
+ for (CStringLengthTy::iterator I = Entries.begin(), E = Entries.end();
+ I != E; ++I) {
+ SVal Len = I.getData();
+ if (SymbolRef Sym = Len.getAsSymbol()) {
+ if (SR.isDead(Sym))
+ Entries = F.remove(Entries, I.getKey());
+ }
+ }
+
+ state = state->set<CStringLength>(Entries);
+ C.addTransition(state);
+}
+
+#define REGISTER_CHECKER(name) \
+ void ento::register##name(CheckerManager &mgr) { \
+ CStringChecker *checker = mgr.registerChecker<CStringChecker>(); \
+ checker->Filter.Check##name = true; \
+ checker->Filter.CheckName##name = mgr.getCurrentCheckName(); \
+ }
+
+REGISTER_CHECKER(CStringNullArg)
+REGISTER_CHECKER(CStringOutOfBounds)
+REGISTER_CHECKER(CStringBufferOverlap)
+REGISTER_CHECKER(CStringNotNullTerm)
+
+void ento::registerCStringCheckerBasic(CheckerManager &Mgr) {
+ registerCStringNullArg(Mgr);
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
new file mode 100644
index 0000000..3db1994
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
@@ -0,0 +1,191 @@
+//== CStringSyntaxChecker.cpp - CoreFoundation containers API *- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// An AST checker that looks for common pitfalls when using C string APIs.
+// - Identifies erroneous patterns in the last argument to strncat - the number
+// of bytes to copy.
+//
+//===----------------------------------------------------------------------===//
+#include "ClangSACheckers.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/OperationKinds.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TypeTraits.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class WalkAST: public StmtVisitor<WalkAST> {
+ const CheckerBase *Checker;
+ BugReporter &BR;
+ AnalysisDeclContext* AC;
+
+ /// Check if two expressions refer to the same declaration.
+ inline bool sameDecl(const Expr *A1, const Expr *A2) {
+ if (const DeclRefExpr *D1 = dyn_cast<DeclRefExpr>(A1->IgnoreParenCasts()))
+ if (const DeclRefExpr *D2 = dyn_cast<DeclRefExpr>(A2->IgnoreParenCasts()))
+ return D1->getDecl() == D2->getDecl();
+ return false;
+ }
+
+ /// Check if the expression E is a sizeof(WithArg).
+ inline bool isSizeof(const Expr *E, const Expr *WithArg) {
+ if (const UnaryExprOrTypeTraitExpr *UE =
+ dyn_cast<UnaryExprOrTypeTraitExpr>(E))
+ if (UE->getKind() == UETT_SizeOf)
+ return sameDecl(UE->getArgumentExpr(), WithArg);
+ return false;
+ }
+
+ /// Check if the expression E is a strlen(WithArg).
+ inline bool isStrlen(const Expr *E, const Expr *WithArg) {
+ if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
+ const FunctionDecl *FD = CE->getDirectCallee();
+ if (!FD)
+ return false;
+ return (CheckerContext::isCLibraryFunction(FD, "strlen") &&
+ sameDecl(CE->getArg(0), WithArg));
+ }
+ return false;
+ }
+
+ /// Check if the expression is an integer literal with value 1.
+ inline bool isOne(const Expr *E) {
+ if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
+ return (IL->getValue().isIntN(1));
+ return false;
+ }
+
+ inline StringRef getPrintableName(const Expr *E) {
+ if (const DeclRefExpr *D = dyn_cast<DeclRefExpr>(E->IgnoreParenCasts()))
+ return D->getDecl()->getName();
+ return StringRef();
+ }
+
+ /// Identify erroneous patterns in the last argument to strncat - the number
+ /// of bytes to copy.
+ bool containsBadStrncatPattern(const CallExpr *CE);
+
+public:
+ WalkAST(const CheckerBase *checker, BugReporter &br, AnalysisDeclContext *ac)
+ : Checker(checker), BR(br), AC(ac) {}
+
+ // Statement visitor methods.
+ void VisitChildren(Stmt *S);
+ void VisitStmt(Stmt *S) {
+ VisitChildren(S);
+ }
+ void VisitCallExpr(CallExpr *CE);
+};
+} // end anonymous namespace
+
+// The correct size argument should look like following:
+// strncat(dst, src, sizeof(dst) - strlen(dest) - 1);
+// We look for the following anti-patterns:
+// - strncat(dst, src, sizeof(dst) - strlen(dst));
+// - strncat(dst, src, sizeof(dst) - 1);
+// - strncat(dst, src, sizeof(dst));
+bool WalkAST::containsBadStrncatPattern(const CallExpr *CE) {
+ if (CE->getNumArgs() != 3)
+ return false;
+ const Expr *DstArg = CE->getArg(0);
+ const Expr *SrcArg = CE->getArg(1);
+ const Expr *LenArg = CE->getArg(2);
+
+ // Identify wrong size expressions, which are commonly used instead.
+ if (const BinaryOperator *BE =
+ dyn_cast<BinaryOperator>(LenArg->IgnoreParenCasts())) {
+ // - sizeof(dst) - strlen(dst)
+ if (BE->getOpcode() == BO_Sub) {
+ const Expr *L = BE->getLHS();
+ const Expr *R = BE->getRHS();
+ if (isSizeof(L, DstArg) && isStrlen(R, DstArg))
+ return true;
+
+ // - sizeof(dst) - 1
+ if (isSizeof(L, DstArg) && isOne(R->IgnoreParenCasts()))
+ return true;
+ }
+ }
+ // - sizeof(dst)
+ if (isSizeof(LenArg, DstArg))
+ return true;
+
+ // - sizeof(src)
+ if (isSizeof(LenArg, SrcArg))
+ return true;
+ return false;
+}
+
+void WalkAST::VisitCallExpr(CallExpr *CE) {
+ const FunctionDecl *FD = CE->getDirectCallee();
+ if (!FD)
+ return;
+
+ if (CheckerContext::isCLibraryFunction(FD, "strncat")) {
+ if (containsBadStrncatPattern(CE)) {
+ const Expr *DstArg = CE->getArg(0);
+ const Expr *LenArg = CE->getArg(2);
+ PathDiagnosticLocation Loc =
+ PathDiagnosticLocation::createBegin(LenArg, BR.getSourceManager(), AC);
+
+ StringRef DstName = getPrintableName(DstArg);
+
+ SmallString<256> S;
+ llvm::raw_svector_ostream os(S);
+ os << "Potential buffer overflow. ";
+ if (!DstName.empty()) {
+ os << "Replace with 'sizeof(" << DstName << ") "
+ "- strlen(" << DstName <<") - 1'";
+ os << " or u";
+ } else
+ os << "U";
+ os << "se a safer 'strlcat' API";
+
+ BR.EmitBasicReport(FD, Checker, "Anti-pattern in the argument",
+ "C String API", os.str(), Loc,
+ LenArg->getSourceRange());
+ }
+ }
+
+ // Recurse and check children.
+ VisitChildren(CE);
+}
+
+void WalkAST::VisitChildren(Stmt *S) {
+ for (Stmt *Child : S->children())
+ if (Child)
+ Visit(Child);
+}
+
+namespace {
+class CStringSyntaxChecker: public Checker<check::ASTCodeBody> {
+public:
+
+ void checkASTCodeBody(const Decl *D, AnalysisManager& Mgr,
+ BugReporter &BR) const {
+ WalkAST walker(this, BR, Mgr.getAnalysisDeclContext(D));
+ walker.Visit(D->getBody());
+ }
+};
+}
+
+void ento::registerCStringSyntaxChecker(CheckerManager &mgr) {
+ mgr.registerChecker<CStringSyntaxChecker>();
+}
+
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
new file mode 100644
index 0000000..1459083
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
@@ -0,0 +1,598 @@
+//===--- CallAndMessageChecker.cpp ------------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines CallAndMessageChecker, a builtin checker that checks for various
+// errors of call and objc message expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+struct ChecksFilter {
+ DefaultBool Check_CallAndMessageUnInitRefArg;
+ DefaultBool Check_CallAndMessageChecker;
+
+ CheckName CheckName_CallAndMessageUnInitRefArg;
+ CheckName CheckName_CallAndMessageChecker;
+};
+
+class CallAndMessageChecker
+ : public Checker< check::PreStmt<CallExpr>,
+ check::PreStmt<CXXDeleteExpr>,
+ check::PreObjCMessage,
+ check::ObjCMessageNil,
+ check::PreCall > {
+ mutable std::unique_ptr<BugType> BT_call_null;
+ mutable std::unique_ptr<BugType> BT_call_undef;
+ mutable std::unique_ptr<BugType> BT_cxx_call_null;
+ mutable std::unique_ptr<BugType> BT_cxx_call_undef;
+ mutable std::unique_ptr<BugType> BT_call_arg;
+ mutable std::unique_ptr<BugType> BT_cxx_delete_undef;
+ mutable std::unique_ptr<BugType> BT_msg_undef;
+ mutable std::unique_ptr<BugType> BT_objc_prop_undef;
+ mutable std::unique_ptr<BugType> BT_objc_subscript_undef;
+ mutable std::unique_ptr<BugType> BT_msg_arg;
+ mutable std::unique_ptr<BugType> BT_msg_ret;
+ mutable std::unique_ptr<BugType> BT_call_few_args;
+
+public:
+ ChecksFilter Filter;
+
+ void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+ void checkPreStmt(const CXXDeleteExpr *DE, CheckerContext &C) const;
+ void checkPreObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
+
+ /// Fill in the return value that results from messaging nil based on the
+ /// return type and architecture and diagnose if the return value will be
+ /// garbage.
+ void checkObjCMessageNil(const ObjCMethodCall &msg, CheckerContext &C) const;
+
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+
+private:
+ bool PreVisitProcessArg(CheckerContext &C, SVal V, SourceRange ArgRange,
+ const Expr *ArgEx, bool IsFirstArgument,
+ bool CheckUninitFields, const CallEvent &Call,
+ std::unique_ptr<BugType> &BT,
+ const ParmVarDecl *ParamDecl) const;
+
+ static void emitBadCall(BugType *BT, CheckerContext &C, const Expr *BadE);
+ void emitNilReceiverBug(CheckerContext &C, const ObjCMethodCall &msg,
+ ExplodedNode *N) const;
+
+ void HandleNilReceiver(CheckerContext &C,
+ ProgramStateRef state,
+ const ObjCMethodCall &msg) const;
+
+ void LazyInit_BT(const char *desc, std::unique_ptr<BugType> &BT) const {
+ if (!BT)
+ BT.reset(new BuiltinBug(this, desc));
+ }
+ bool uninitRefOrPointer(CheckerContext &C, const SVal &V,
+ SourceRange ArgRange,
+ const Expr *ArgEx, std::unique_ptr<BugType> &BT,
+ const ParmVarDecl *ParamDecl, const char *BD) const;
+};
+} // end anonymous namespace
+
+void CallAndMessageChecker::emitBadCall(BugType *BT, CheckerContext &C,
+ const Expr *BadE) {
+ ExplodedNode *N = C.generateErrorNode();
+ if (!N)
+ return;
+
+ auto R = llvm::make_unique<BugReport>(*BT, BT->getName(), N);
+ if (BadE) {
+ R->addRange(BadE->getSourceRange());
+ if (BadE->isGLValue())
+ BadE = bugreporter::getDerefExpr(BadE);
+ bugreporter::trackNullOrUndefValue(N, BadE, *R);
+ }
+ C.emitReport(std::move(R));
+}
+
+static StringRef describeUninitializedArgumentInCall(const CallEvent &Call,
+ bool IsFirstArgument) {
+ switch (Call.getKind()) {
+ case CE_ObjCMessage: {
+ const ObjCMethodCall &Msg = cast<ObjCMethodCall>(Call);
+ switch (Msg.getMessageKind()) {
+ case OCM_Message:
+ return "Argument in message expression is an uninitialized value";
+ case OCM_PropertyAccess:
+ assert(Msg.isSetter() && "Getters have no args");
+ return "Argument for property setter is an uninitialized value";
+ case OCM_Subscript:
+ if (Msg.isSetter() && IsFirstArgument)
+ return "Argument for subscript setter is an uninitialized value";
+ return "Subscript index is an uninitialized value";
+ }
+ llvm_unreachable("Unknown message kind.");
+ }
+ case CE_Block:
+ return "Block call argument is an uninitialized value";
+ default:
+ return "Function call argument is an uninitialized value";
+ }
+}
+
+bool CallAndMessageChecker::uninitRefOrPointer(CheckerContext &C,
+ const SVal &V,
+ SourceRange ArgRange,
+ const Expr *ArgEx,
+ std::unique_ptr<BugType> &BT,
+ const ParmVarDecl *ParamDecl,
+ const char *BD) const {
+ if (!Filter.Check_CallAndMessageUnInitRefArg)
+ return false;
+
+ // No parameter declaration available, i.e. variadic function argument.
+ if(!ParamDecl)
+ return false;
+
+ // If parameter is declared as pointer to const in function declaration,
+ // then check if corresponding argument in function call is
+ // pointing to undefined symbol value (uninitialized memory).
+ StringRef Message;
+
+ if (ParamDecl->getType()->isPointerType()) {
+ Message = "Function call argument is a pointer to uninitialized value";
+ } else if (ParamDecl->getType()->isReferenceType()) {
+ Message = "Function call argument is an uninitialized value";
+ } else
+ return false;
+
+ if(!ParamDecl->getType()->getPointeeType().isConstQualified())
+ return false;
+
+ if (const MemRegion *SValMemRegion = V.getAsRegion()) {
+ const ProgramStateRef State = C.getState();
+ const SVal PSV = State->getSVal(SValMemRegion);
+ if (PSV.isUndef()) {
+ if (ExplodedNode *N = C.generateErrorNode()) {
+ LazyInit_BT(BD, BT);
+ auto R = llvm::make_unique<BugReport>(*BT, Message, N);
+ R->addRange(ArgRange);
+ if (ArgEx) {
+ bugreporter::trackNullOrUndefValue(N, ArgEx, *R);
+ }
+ C.emitReport(std::move(R));
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
+ SVal V,
+ SourceRange ArgRange,
+ const Expr *ArgEx,
+ bool IsFirstArgument,
+ bool CheckUninitFields,
+ const CallEvent &Call,
+ std::unique_ptr<BugType> &BT,
+ const ParmVarDecl *ParamDecl
+ ) const {
+ const char *BD = "Uninitialized argument value";
+
+ if (uninitRefOrPointer(C, V, ArgRange, ArgEx, BT, ParamDecl, BD))
+ return true;
+
+ if (V.isUndef()) {
+ if (ExplodedNode *N = C.generateErrorNode()) {
+ LazyInit_BT(BD, BT);
+
+ // Generate a report for this bug.
+ StringRef Desc =
+ describeUninitializedArgumentInCall(Call, IsFirstArgument);
+ auto R = llvm::make_unique<BugReport>(*BT, Desc, N);
+ R->addRange(ArgRange);
+ if (ArgEx)
+ bugreporter::trackNullOrUndefValue(N, ArgEx, *R);
+ C.emitReport(std::move(R));
+ }
+ return true;
+ }
+
+ if (!CheckUninitFields)
+ return false;
+
+ if (Optional<nonloc::LazyCompoundVal> LV =
+ V.getAs<nonloc::LazyCompoundVal>()) {
+
+ class FindUninitializedField {
+ public:
+ SmallVector<const FieldDecl *, 10> FieldChain;
+ private:
+ StoreManager &StoreMgr;
+ MemRegionManager &MrMgr;
+ Store store;
+ public:
+ FindUninitializedField(StoreManager &storeMgr,
+ MemRegionManager &mrMgr, Store s)
+ : StoreMgr(storeMgr), MrMgr(mrMgr), store(s) {}
+
+ bool Find(const TypedValueRegion *R) {
+ QualType T = R->getValueType();
+ if (const RecordType *RT = T->getAsStructureType()) {
+ const RecordDecl *RD = RT->getDecl()->getDefinition();
+ assert(RD && "Referred record has no definition");
+ for (const auto *I : RD->fields()) {
+ const FieldRegion *FR = MrMgr.getFieldRegion(I, R);
+ FieldChain.push_back(I);
+ T = I->getType();
+ if (T->getAsStructureType()) {
+ if (Find(FR))
+ return true;
+ }
+ else {
+ const SVal &V = StoreMgr.getBinding(store, loc::MemRegionVal(FR));
+ if (V.isUndef())
+ return true;
+ }
+ FieldChain.pop_back();
+ }
+ }
+
+ return false;
+ }
+ };
+
+ const LazyCompoundValData *D = LV->getCVData();
+ FindUninitializedField F(C.getState()->getStateManager().getStoreManager(),
+ C.getSValBuilder().getRegionManager(),
+ D->getStore());
+
+ if (F.Find(D->getRegion())) {
+ if (ExplodedNode *N = C.generateErrorNode()) {
+ LazyInit_BT(BD, BT);
+ SmallString<512> Str;
+ llvm::raw_svector_ostream os(Str);
+ os << "Passed-by-value struct argument contains uninitialized data";
+
+ if (F.FieldChain.size() == 1)
+ os << " (e.g., field: '" << *F.FieldChain[0] << "')";
+ else {
+ os << " (e.g., via the field chain: '";
+ bool first = true;
+ for (SmallVectorImpl<const FieldDecl *>::iterator
+ DI = F.FieldChain.begin(), DE = F.FieldChain.end(); DI!=DE;++DI){
+ if (first)
+ first = false;
+ else
+ os << '.';
+ os << **DI;
+ }
+ os << "')";
+ }
+
+ // Generate a report for this bug.
+ auto R = llvm::make_unique<BugReport>(*BT, os.str(), N);
+ R->addRange(ArgRange);
+
+ // FIXME: enhance track back for uninitialized value for arbitrary
+ // memregions
+ C.emitReport(std::move(R));
+ }
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void CallAndMessageChecker::checkPreStmt(const CallExpr *CE,
+ CheckerContext &C) const{
+
+ const Expr *Callee = CE->getCallee()->IgnoreParens();
+ ProgramStateRef State = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal L = State->getSVal(Callee, LCtx);
+
+ if (L.isUndef()) {
+ if (!BT_call_undef)
+ BT_call_undef.reset(new BuiltinBug(
+ this, "Called function pointer is an uninitalized pointer value"));
+ emitBadCall(BT_call_undef.get(), C, Callee);
+ return;
+ }
+
+ ProgramStateRef StNonNull, StNull;
+ std::tie(StNonNull, StNull) = State->assume(L.castAs<DefinedOrUnknownSVal>());
+
+ if (StNull && !StNonNull) {
+ if (!BT_call_null)
+ BT_call_null.reset(new BuiltinBug(
+ this, "Called function pointer is null (null dereference)"));
+ emitBadCall(BT_call_null.get(), C, Callee);
+ return;
+ }
+
+ C.addTransition(StNonNull);
+}
+
+void CallAndMessageChecker::checkPreStmt(const CXXDeleteExpr *DE,
+ CheckerContext &C) const {
+
+ SVal Arg = C.getSVal(DE->getArgument());
+ if (Arg.isUndef()) {
+ StringRef Desc;
+ ExplodedNode *N = C.generateErrorNode();
+ if (!N)
+ return;
+ if (!BT_cxx_delete_undef)
+ BT_cxx_delete_undef.reset(
+ new BuiltinBug(this, "Uninitialized argument value"));
+ if (DE->isArrayFormAsWritten())
+ Desc = "Argument to 'delete[]' is uninitialized";
+ else
+ Desc = "Argument to 'delete' is uninitialized";
+ BugType *BT = BT_cxx_delete_undef.get();
+ auto R = llvm::make_unique<BugReport>(*BT, Desc, N);
+ bugreporter::trackNullOrUndefValue(N, DE, *R);
+ C.emitReport(std::move(R));
+ return;
+ }
+}
+
+
+void CallAndMessageChecker::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+
+ // If this is a call to a C++ method, check if the callee is null or
+ // undefined.
+ if (const CXXInstanceCall *CC = dyn_cast<CXXInstanceCall>(&Call)) {
+ SVal V = CC->getCXXThisVal();
+ if (V.isUndef()) {
+ if (!BT_cxx_call_undef)
+ BT_cxx_call_undef.reset(
+ new BuiltinBug(this, "Called C++ object pointer is uninitialized"));
+ emitBadCall(BT_cxx_call_undef.get(), C, CC->getCXXThisExpr());
+ return;
+ }
+
+ ProgramStateRef StNonNull, StNull;
+ std::tie(StNonNull, StNull) =
+ State->assume(V.castAs<DefinedOrUnknownSVal>());
+
+ if (StNull && !StNonNull) {
+ if (!BT_cxx_call_null)
+ BT_cxx_call_null.reset(
+ new BuiltinBug(this, "Called C++ object pointer is null"));
+ emitBadCall(BT_cxx_call_null.get(), C, CC->getCXXThisExpr());
+ return;
+ }
+
+ State = StNonNull;
+ }
+
+ const Decl *D = Call.getDecl();
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (FD) {
+ // If we have a declaration, we can make sure we pass enough parameters to
+ // the function.
+ unsigned Params = FD->getNumParams();
+ if (Call.getNumArgs() < Params) {
+ ExplodedNode *N = C.generateErrorNode();
+ if (!N)
+ return;
+
+ LazyInit_BT("Function call with too few arguments", BT_call_few_args);
+
+ SmallString<512> Str;
+ llvm::raw_svector_ostream os(Str);
+ os << "Function taking " << Params << " argument"
+ << (Params == 1 ? "" : "s") << " is called with less ("
+ << Call.getNumArgs() << ")";
+
+ C.emitReport(
+ llvm::make_unique<BugReport>(*BT_call_few_args, os.str(), N));
+ }
+ }
+
+ // Don't check for uninitialized field values in arguments if the
+ // caller has a body that is available and we have the chance to inline it.
+ // This is a hack, but is a reasonable compromise betweens sometimes warning
+ // and sometimes not depending on if we decide to inline a function.
+ const bool checkUninitFields =
+ !(C.getAnalysisManager().shouldInlineCall() && (D && D->getBody()));
+
+ std::unique_ptr<BugType> *BT;
+ if (isa<ObjCMethodCall>(Call))
+ BT = &BT_msg_arg;
+ else
+ BT = &BT_call_arg;
+
+ for (unsigned i = 0, e = Call.getNumArgs(); i != e; ++i) {
+ const ParmVarDecl *ParamDecl = nullptr;
+ if(FD && i < FD->getNumParams())
+ ParamDecl = FD->getParamDecl(i);
+ if (PreVisitProcessArg(C, Call.getArgSVal(i), Call.getArgSourceRange(i),
+ Call.getArgExpr(i), /*IsFirstArgument=*/i == 0,
+ checkUninitFields, Call, *BT, ParamDecl))
+ return;
+ }
+
+ // If we make it here, record our assumptions about the callee.
+ C.addTransition(State);
+}
+
+void CallAndMessageChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
+ CheckerContext &C) const {
+ SVal recVal = msg.getReceiverSVal();
+ if (recVal.isUndef()) {
+ if (ExplodedNode *N = C.generateErrorNode()) {
+ BugType *BT = nullptr;
+ switch (msg.getMessageKind()) {
+ case OCM_Message:
+ if (!BT_msg_undef)
+ BT_msg_undef.reset(new BuiltinBug(this,
+ "Receiver in message expression "
+ "is an uninitialized value"));
+ BT = BT_msg_undef.get();
+ break;
+ case OCM_PropertyAccess:
+ if (!BT_objc_prop_undef)
+ BT_objc_prop_undef.reset(new BuiltinBug(
+ this, "Property access on an uninitialized object pointer"));
+ BT = BT_objc_prop_undef.get();
+ break;
+ case OCM_Subscript:
+ if (!BT_objc_subscript_undef)
+ BT_objc_subscript_undef.reset(new BuiltinBug(
+ this, "Subscript access on an uninitialized object pointer"));
+ BT = BT_objc_subscript_undef.get();
+ break;
+ }
+ assert(BT && "Unknown message kind.");
+
+ auto R = llvm::make_unique<BugReport>(*BT, BT->getName(), N);
+ const ObjCMessageExpr *ME = msg.getOriginExpr();
+ R->addRange(ME->getReceiverRange());
+
+ // FIXME: getTrackNullOrUndefValueVisitor can't handle "super" yet.
+ if (const Expr *ReceiverE = ME->getInstanceReceiver())
+ bugreporter::trackNullOrUndefValue(N, ReceiverE, *R);
+ C.emitReport(std::move(R));
+ }
+ return;
+ }
+}
+
+void CallAndMessageChecker::checkObjCMessageNil(const ObjCMethodCall &msg,
+ CheckerContext &C) const {
+ HandleNilReceiver(C, C.getState(), msg);
+}
+
+void CallAndMessageChecker::emitNilReceiverBug(CheckerContext &C,
+ const ObjCMethodCall &msg,
+ ExplodedNode *N) const {
+
+ if (!BT_msg_ret)
+ BT_msg_ret.reset(
+ new BuiltinBug(this, "Receiver in message expression is 'nil'"));
+
+ const ObjCMessageExpr *ME = msg.getOriginExpr();
+
+ QualType ResTy = msg.getResultType();
+
+ SmallString<200> buf;
+ llvm::raw_svector_ostream os(buf);
+ os << "The receiver of message '";
+ ME->getSelector().print(os);
+ os << "' is nil";
+ if (ResTy->isReferenceType()) {
+ os << ", which results in forming a null reference";
+ } else {
+ os << " and returns a value of type '";
+ msg.getResultType().print(os, C.getLangOpts());
+ os << "' that will be garbage";
+ }
+
+ auto report = llvm::make_unique<BugReport>(*BT_msg_ret, os.str(), N);
+ report->addRange(ME->getReceiverRange());
+ // FIXME: This won't track "self" in messages to super.
+ if (const Expr *receiver = ME->getInstanceReceiver()) {
+ bugreporter::trackNullOrUndefValue(N, receiver, *report);
+ }
+ C.emitReport(std::move(report));
+}
+
+static bool supportsNilWithFloatRet(const llvm::Triple &triple) {
+ return (triple.getVendor() == llvm::Triple::Apple &&
+ (triple.isiOS() || triple.isWatchOS() ||
+ !triple.isMacOSXVersionLT(10,5)));
+}
+
+void CallAndMessageChecker::HandleNilReceiver(CheckerContext &C,
+ ProgramStateRef state,
+ const ObjCMethodCall &Msg) const {
+ ASTContext &Ctx = C.getASTContext();
+ static CheckerProgramPointTag Tag(this, "NilReceiver");
+
+ // Check the return type of the message expression. A message to nil will
+ // return different values depending on the return type and the architecture.
+ QualType RetTy = Msg.getResultType();
+ CanQualType CanRetTy = Ctx.getCanonicalType(RetTy);
+ const LocationContext *LCtx = C.getLocationContext();
+
+ if (CanRetTy->isStructureOrClassType()) {
+ // Structure returns are safe since the compiler zeroes them out.
+ SVal V = C.getSValBuilder().makeZeroVal(RetTy);
+ C.addTransition(state->BindExpr(Msg.getOriginExpr(), LCtx, V), &Tag);
+ return;
+ }
+
+ // Other cases: check if sizeof(return type) > sizeof(void*)
+ if (CanRetTy != Ctx.VoidTy && C.getLocationContext()->getParentMap()
+ .isConsumedExpr(Msg.getOriginExpr())) {
+ // Compute: sizeof(void *) and sizeof(return type)
+ const uint64_t voidPtrSize = Ctx.getTypeSize(Ctx.VoidPtrTy);
+ const uint64_t returnTypeSize = Ctx.getTypeSize(CanRetTy);
+
+ if (CanRetTy.getTypePtr()->isReferenceType()||
+ (voidPtrSize < returnTypeSize &&
+ !(supportsNilWithFloatRet(Ctx.getTargetInfo().getTriple()) &&
+ (Ctx.FloatTy == CanRetTy ||
+ Ctx.DoubleTy == CanRetTy ||
+ Ctx.LongDoubleTy == CanRetTy ||
+ Ctx.LongLongTy == CanRetTy ||
+ Ctx.UnsignedLongLongTy == CanRetTy)))) {
+ if (ExplodedNode *N = C.generateErrorNode(state, &Tag))
+ emitNilReceiverBug(C, Msg, N);
+ return;
+ }
+
+ // Handle the safe cases where the return value is 0 if the
+ // receiver is nil.
+ //
+ // FIXME: For now take the conservative approach that we only
+ // return null values if we *know* that the receiver is nil.
+ // This is because we can have surprises like:
+ //
+ // ... = [[NSScreens screens] objectAtIndex:0];
+ //
+ // What can happen is that [... screens] could return nil, but
+ // it most likely isn't nil. We should assume the semantics
+ // of this case unless we have *a lot* more knowledge.
+ //
+ SVal V = C.getSValBuilder().makeZeroVal(RetTy);
+ C.addTransition(state->BindExpr(Msg.getOriginExpr(), LCtx, V), &Tag);
+ return;
+ }
+
+ C.addTransition(state);
+}
+
+#define REGISTER_CHECKER(name) \
+ void ento::register##name(CheckerManager &mgr) { \
+ CallAndMessageChecker *Checker = \
+ mgr.registerChecker<CallAndMessageChecker>(); \
+ Checker->Filter.Check_##name = true; \
+ Checker->Filter.CheckName_##name = mgr.getCurrentCheckName(); \
+ }
+
+REGISTER_CHECKER(CallAndMessageUnInitRefArg)
+REGISTER_CHECKER(CallAndMessageChecker)
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
new file mode 100644
index 0000000..2337400
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
@@ -0,0 +1,144 @@
+//=== CastSizeChecker.cpp ---------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// CastSizeChecker checks when casting a malloc'ed symbolic region to type T,
+// whether the size of the symbolic region is a multiple of the size of T.
+//
+//===----------------------------------------------------------------------===//
+#include "ClangSACheckers.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class CastSizeChecker : public Checker< check::PreStmt<CastExpr> > {
+ mutable std::unique_ptr<BuiltinBug> BT;
+
+public:
+ void checkPreStmt(const CastExpr *CE, CheckerContext &C) const;
+};
+}
+
+/// Check if we are casting to a struct with a flexible array at the end.
+/// \code
+/// struct foo {
+/// size_t len;
+/// struct bar data[];
+/// };
+/// \endcode
+/// or
+/// \code
+/// struct foo {
+/// size_t len;
+/// struct bar data[0];
+/// }
+/// \endcode
+/// In these cases it is also valid to allocate size of struct foo + a multiple
+/// of struct bar.
+static bool evenFlexibleArraySize(ASTContext &Ctx, CharUnits RegionSize,
+ CharUnits TypeSize, QualType ToPointeeTy) {
+ const RecordType *RT = ToPointeeTy->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ const RecordDecl *RD = RT->getDecl();
+ RecordDecl::field_iterator Iter(RD->field_begin());
+ RecordDecl::field_iterator End(RD->field_end());
+ const FieldDecl *Last = nullptr;
+ for (; Iter != End; ++Iter)
+ Last = *Iter;
+ assert(Last && "empty structs should already be handled");
+
+ const Type *ElemType = Last->getType()->getArrayElementTypeNoTypeQual();
+ CharUnits FlexSize;
+ if (const ConstantArrayType *ArrayTy =
+ Ctx.getAsConstantArrayType(Last->getType())) {
+ FlexSize = Ctx.getTypeSizeInChars(ElemType);
+ if (ArrayTy->getSize() == 1 && TypeSize > FlexSize)
+ TypeSize -= FlexSize;
+ else if (ArrayTy->getSize() != 0)
+ return false;
+ } else if (RD->hasFlexibleArrayMember()) {
+ FlexSize = Ctx.getTypeSizeInChars(ElemType);
+ } else {
+ return false;
+ }
+
+ if (FlexSize.isZero())
+ return false;
+
+ CharUnits Left = RegionSize - TypeSize;
+ if (Left.isNegative())
+ return false;
+
+ return Left % FlexSize == 0;
+}
+
+void CastSizeChecker::checkPreStmt(const CastExpr *CE,CheckerContext &C) const {
+ const Expr *E = CE->getSubExpr();
+ ASTContext &Ctx = C.getASTContext();
+ QualType ToTy = Ctx.getCanonicalType(CE->getType());
+ const PointerType *ToPTy = dyn_cast<PointerType>(ToTy.getTypePtr());
+
+ if (!ToPTy)
+ return;
+
+ QualType ToPointeeTy = ToPTy->getPointeeType();
+
+ // Only perform the check if 'ToPointeeTy' is a complete type.
+ if (ToPointeeTy->isIncompleteType())
+ return;
+
+ ProgramStateRef state = C.getState();
+ const MemRegion *R = state->getSVal(E, C.getLocationContext()).getAsRegion();
+ if (!R)
+ return;
+
+ const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R);
+ if (!SR)
+ return;
+
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ SVal extent = SR->getExtent(svalBuilder);
+ const llvm::APSInt *extentInt = svalBuilder.getKnownValue(state, extent);
+ if (!extentInt)
+ return;
+
+ CharUnits regionSize = CharUnits::fromQuantity(extentInt->getSExtValue());
+ CharUnits typeSize = C.getASTContext().getTypeSizeInChars(ToPointeeTy);
+
+ // Ignore void, and a few other un-sizeable types.
+ if (typeSize.isZero())
+ return;
+
+ if (regionSize % typeSize == 0)
+ return;
+
+ if (evenFlexibleArraySize(Ctx, regionSize, typeSize, ToPointeeTy))
+ return;
+
+ if (ExplodedNode *errorNode = C.generateErrorNode()) {
+ if (!BT)
+ BT.reset(new BuiltinBug(this, "Cast region with wrong size.",
+ "Cast a region whose size is not a multiple"
+ " of the destination type size."));
+ auto R = llvm::make_unique<BugReport>(*BT, BT->getDescription(), errorNode);
+ R->addRange(CE->getSourceRange());
+ C.emitReport(std::move(R));
+ }
+}
+
+void ento::registerCastSizeChecker(CheckerManager &mgr) {
+ mgr.registerChecker<CastSizeChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
new file mode 100644
index 0000000..fa78413
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
@@ -0,0 +1,75 @@
+//=== CastToStructChecker.cpp - Fixed address usage checker ----*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This files defines CastToStructChecker, a builtin checker that checks for
+// cast from non-struct pointer to struct pointer.
+// This check corresponds to CWE-588.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class CastToStructChecker : public Checker< check::PreStmt<CastExpr> > {
+ mutable std::unique_ptr<BuiltinBug> BT;
+
+public:
+ void checkPreStmt(const CastExpr *CE, CheckerContext &C) const;
+};
+}
+
+void CastToStructChecker::checkPreStmt(const CastExpr *CE,
+ CheckerContext &C) const {
+ const Expr *E = CE->getSubExpr();
+ ASTContext &Ctx = C.getASTContext();
+ QualType OrigTy = Ctx.getCanonicalType(E->getType());
+ QualType ToTy = Ctx.getCanonicalType(CE->getType());
+
+ const PointerType *OrigPTy = dyn_cast<PointerType>(OrigTy.getTypePtr());
+ const PointerType *ToPTy = dyn_cast<PointerType>(ToTy.getTypePtr());
+
+ if (!ToPTy || !OrigPTy)
+ return;
+
+ QualType OrigPointeeTy = OrigPTy->getPointeeType();
+ QualType ToPointeeTy = ToPTy->getPointeeType();
+
+ if (!ToPointeeTy->isStructureOrClassType())
+ return;
+
+ // We allow cast from void*.
+ if (OrigPointeeTy->isVoidType())
+ return;
+
+ // Now the cast-to-type is struct pointer, the original type is not void*.
+ if (!OrigPointeeTy->isRecordType()) {
+ if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
+ if (!BT)
+ BT.reset(
+ new BuiltinBug(this, "Cast from non-struct type to struct type",
+ "Casting a non-structure type to a structure type "
+ "and accessing a field can lead to memory access "
+ "errors or data corruption."));
+ auto R = llvm::make_unique<BugReport>(*BT, BT->getDescription(), N);
+ R->addRange(CE->getSourceRange());
+ C.emitReport(std::move(R));
+ }
+ }
+}
+
+void ento::registerCastToStructChecker(CheckerManager &mgr) {
+ mgr.registerChecker<CastToStructChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
new file mode 100644
index 0000000..25caa00
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
@@ -0,0 +1,248 @@
+//==- CheckObjCDealloc.cpp - Check ObjC -dealloc implementation --*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a CheckObjCDealloc, a checker that
+// analyzes an Objective-C class's implementation to determine if it
+// correctly implements -dealloc.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+static bool scan_ivar_release(Stmt *S, ObjCIvarDecl *ID,
+ const ObjCPropertyDecl *PD,
+ Selector Release,
+ IdentifierInfo* SelfII,
+ ASTContext &Ctx) {
+
+ // [mMyIvar release]
+ if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S))
+ if (ME->getSelector() == Release)
+ if (ME->getInstanceReceiver())
+ if (Expr *Receiver = ME->getInstanceReceiver()->IgnoreParenCasts())
+ if (ObjCIvarRefExpr *E = dyn_cast<ObjCIvarRefExpr>(Receiver))
+ if (E->getDecl() == ID)
+ return true;
+
+ // [self setMyIvar:nil];
+ if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S))
+ if (ME->getInstanceReceiver())
+ if (Expr *Receiver = ME->getInstanceReceiver()->IgnoreParenCasts())
+ if (DeclRefExpr *E = dyn_cast<DeclRefExpr>(Receiver))
+ if (E->getDecl()->getIdentifier() == SelfII)
+ if (ME->getMethodDecl() == PD->getSetterMethodDecl() &&
+ ME->getNumArgs() == 1 &&
+ ME->getArg(0)->isNullPointerConstant(Ctx,
+ Expr::NPC_ValueDependentIsNull))
+ return true;
+
+ // self.myIvar = nil;
+ if (BinaryOperator* BO = dyn_cast<BinaryOperator>(S))
+ if (BO->isAssignmentOp())
+ if (ObjCPropertyRefExpr *PRE =
+ dyn_cast<ObjCPropertyRefExpr>(BO->getLHS()->IgnoreParenCasts()))
+ if (PRE->isExplicitProperty() && PRE->getExplicitProperty() == PD)
+ if (BO->getRHS()->isNullPointerConstant(Ctx,
+ Expr::NPC_ValueDependentIsNull)) {
+ // This is only a 'release' if the property kind is not
+ // 'assign'.
+ return PD->getSetterKind() != ObjCPropertyDecl::Assign;
+ }
+
+ // Recurse to children.
+ for (Stmt *SubStmt : S->children())
+ if (SubStmt && scan_ivar_release(SubStmt, ID, PD, Release, SelfII, Ctx))
+ return true;
+
+ return false;
+}
+
+static void checkObjCDealloc(const CheckerBase *Checker,
+ const ObjCImplementationDecl *D,
+ const LangOptions &LOpts, BugReporter &BR) {
+
+ assert (LOpts.getGC() != LangOptions::GCOnly);
+
+ ASTContext &Ctx = BR.getContext();
+ const ObjCInterfaceDecl *ID = D->getClassInterface();
+
+ // Does the class contain any ivars that are pointers (or id<...>)?
+ // If not, skip the check entirely.
+ // NOTE: This is motivated by PR 2517:
+ // http://llvm.org/bugs/show_bug.cgi?id=2517
+
+ bool containsPointerIvar = false;
+
+ for (const auto *Ivar : ID->ivars()) {
+ QualType T = Ivar->getType();
+
+ if (!T->isObjCObjectPointerType() ||
+ Ivar->hasAttr<IBOutletAttr>() || // Skip IBOutlets.
+ Ivar->hasAttr<IBOutletCollectionAttr>()) // Skip IBOutletCollections.
+ continue;
+
+ containsPointerIvar = true;
+ break;
+ }
+
+ if (!containsPointerIvar)
+ return;
+
+ // Determine if the class subclasses NSObject.
+ IdentifierInfo* NSObjectII = &Ctx.Idents.get("NSObject");
+ IdentifierInfo* SenTestCaseII = &Ctx.Idents.get("SenTestCase");
+
+
+ for ( ; ID ; ID = ID->getSuperClass()) {
+ IdentifierInfo *II = ID->getIdentifier();
+
+ if (II == NSObjectII)
+ break;
+
+ // FIXME: For now, ignore classes that subclass SenTestCase, as these don't
+ // need to implement -dealloc. They implement tear down in another way,
+ // which we should try and catch later.
+ // http://llvm.org/bugs/show_bug.cgi?id=3187
+ if (II == SenTestCaseII)
+ return;
+ }
+
+ if (!ID)
+ return;
+
+ // Get the "dealloc" selector.
+ IdentifierInfo* II = &Ctx.Idents.get("dealloc");
+ Selector S = Ctx.Selectors.getSelector(0, &II);
+ const ObjCMethodDecl *MD = nullptr;
+
+ // Scan the instance methods for "dealloc".
+ for (const auto *I : D->instance_methods()) {
+ if (I->getSelector() == S) {
+ MD = I;
+ break;
+ }
+ }
+
+ PathDiagnosticLocation DLoc =
+ PathDiagnosticLocation::createBegin(D, BR.getSourceManager());
+
+ if (!MD) { // No dealloc found.
+
+ const char* name = LOpts.getGC() == LangOptions::NonGC
+ ? "missing -dealloc"
+ : "missing -dealloc (Hybrid MM, non-GC)";
+
+ std::string buf;
+ llvm::raw_string_ostream os(buf);
+ os << "Objective-C class '" << *D << "' lacks a 'dealloc' instance method";
+
+ BR.EmitBasicReport(D, Checker, name, categories::CoreFoundationObjectiveC,
+ os.str(), DLoc);
+ return;
+ }
+
+ // Get the "release" selector.
+ IdentifierInfo* RII = &Ctx.Idents.get("release");
+ Selector RS = Ctx.Selectors.getSelector(0, &RII);
+
+ // Get the "self" identifier
+ IdentifierInfo* SelfII = &Ctx.Idents.get("self");
+
+ // Scan for missing and extra releases of ivars used by implementations
+ // of synthesized properties
+ for (const auto *I : D->property_impls()) {
+ // We can only check the synthesized properties
+ if (I->getPropertyImplementation() != ObjCPropertyImplDecl::Synthesize)
+ continue;
+
+ ObjCIvarDecl *ID = I->getPropertyIvarDecl();
+ if (!ID)
+ continue;
+
+ QualType T = ID->getType();
+ if (!T->isObjCObjectPointerType()) // Skip non-pointer ivars
+ continue;
+
+ const ObjCPropertyDecl *PD = I->getPropertyDecl();
+ if (!PD)
+ continue;
+
+ // ivars cannot be set via read-only properties, so we'll skip them
+ if (PD->isReadOnly())
+ continue;
+
+ // ivar must be released if and only if the kind of setter was not 'assign'
+ bool requiresRelease = PD->getSetterKind() != ObjCPropertyDecl::Assign;
+ if (scan_ivar_release(MD->getBody(), ID, PD, RS, SelfII, Ctx)
+ != requiresRelease) {
+ const char *name = nullptr;
+ std::string buf;
+ llvm::raw_string_ostream os(buf);
+
+ if (requiresRelease) {
+ name = LOpts.getGC() == LangOptions::NonGC
+ ? "missing ivar release (leak)"
+ : "missing ivar release (Hybrid MM, non-GC)";
+
+ os << "The '" << *ID
+ << "' instance variable was retained by a synthesized property but "
+ "wasn't released in 'dealloc'";
+ } else {
+ name = LOpts.getGC() == LangOptions::NonGC
+ ? "extra ivar release (use-after-release)"
+ : "extra ivar release (Hybrid MM, non-GC)";
+
+ os << "The '" << *ID
+ << "' instance variable was not retained by a synthesized property "
+ "but was released in 'dealloc'";
+ }
+
+ PathDiagnosticLocation SDLoc =
+ PathDiagnosticLocation::createBegin(I, BR.getSourceManager());
+
+ BR.EmitBasicReport(MD, Checker, name,
+ categories::CoreFoundationObjectiveC, os.str(), SDLoc);
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCDeallocChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ObjCDeallocChecker : public Checker<
+ check::ASTDecl<ObjCImplementationDecl> > {
+public:
+ void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ if (mgr.getLangOpts().getGC() == LangOptions::GCOnly)
+ return;
+ checkObjCDealloc(this, cast<ObjCImplementationDecl>(D), mgr.getLangOpts(),
+ BR);
+ }
+};
+}
+
+void ento::registerObjCDeallocChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ObjCDeallocChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
new file mode 100644
index 0000000..cc4c0c3
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
@@ -0,0 +1,140 @@
+//=- CheckObjCInstMethodRetTy.cpp - Check ObjC method signatures -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a CheckObjCInstMethSignature, a flow-insenstive check
+// that determines if an Objective-C class interface incorrectly redefines
+// the method signature in a subclass.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Type.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+static bool AreTypesCompatible(QualType Derived, QualType Ancestor,
+ ASTContext &C) {
+
+ // Right now don't compare the compatibility of pointers. That involves
+ // looking at subtyping relationships. FIXME: Future patch.
+ if (Derived->isAnyPointerType() && Ancestor->isAnyPointerType())
+ return true;
+
+ return C.typesAreCompatible(Derived, Ancestor);
+}
+
+static void CompareReturnTypes(const ObjCMethodDecl *MethDerived,
+ const ObjCMethodDecl *MethAncestor,
+ BugReporter &BR, ASTContext &Ctx,
+ const ObjCImplementationDecl *ID,
+ const CheckerBase *Checker) {
+
+ QualType ResDerived = MethDerived->getReturnType();
+ QualType ResAncestor = MethAncestor->getReturnType();
+
+ if (!AreTypesCompatible(ResDerived, ResAncestor, Ctx)) {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ os << "The Objective-C class '"
+ << *MethDerived->getClassInterface()
+ << "', which is derived from class '"
+ << *MethAncestor->getClassInterface()
+ << "', defines the instance method '";
+ MethDerived->getSelector().print(os);
+ os << "' whose return type is '"
+ << ResDerived.getAsString()
+ << "'. A method with the same name (same selector) is also defined in "
+ "class '"
+ << *MethAncestor->getClassInterface()
+ << "' and has a return type of '"
+ << ResAncestor.getAsString()
+ << "'. These two types are incompatible, and may result in undefined "
+ "behavior for clients of these classes.";
+
+ PathDiagnosticLocation MethDLoc =
+ PathDiagnosticLocation::createBegin(MethDerived,
+ BR.getSourceManager());
+
+ BR.EmitBasicReport(
+ MethDerived, Checker, "Incompatible instance method return type",
+ categories::CoreFoundationObjectiveC, os.str(), MethDLoc);
+ }
+}
+
+static void CheckObjCInstMethSignature(const ObjCImplementationDecl *ID,
+ BugReporter &BR,
+ const CheckerBase *Checker) {
+
+ const ObjCInterfaceDecl *D = ID->getClassInterface();
+ const ObjCInterfaceDecl *C = D->getSuperClass();
+
+ if (!C)
+ return;
+
+ ASTContext &Ctx = BR.getContext();
+
+ // Build a DenseMap of the methods for quick querying.
+ typedef llvm::DenseMap<Selector,ObjCMethodDecl*> MapTy;
+ MapTy IMeths;
+ unsigned NumMethods = 0;
+
+ for (auto *M : ID->instance_methods()) {
+ IMeths[M->getSelector()] = M;
+ ++NumMethods;
+ }
+
+ // Now recurse the class hierarchy chain looking for methods with the
+ // same signatures.
+ while (C && NumMethods) {
+ for (const auto *M : C->instance_methods()) {
+ Selector S = M->getSelector();
+
+ MapTy::iterator MI = IMeths.find(S);
+
+ if (MI == IMeths.end() || MI->second == nullptr)
+ continue;
+
+ --NumMethods;
+ ObjCMethodDecl *MethDerived = MI->second;
+ MI->second = nullptr;
+
+ CompareReturnTypes(MethDerived, M, BR, Ctx, ID, Checker);
+ }
+
+ C = C->getSuperClass();
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCMethSigsChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ObjCMethSigsChecker : public Checker<
+ check::ASTDecl<ObjCImplementationDecl> > {
+public:
+ void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ CheckObjCInstMethSignature(D, BR, this);
+ }
+};
+}
+
+void ento::registerObjCMethSigsChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ObjCMethSigsChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
new file mode 100644
index 0000000..60f1618
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
@@ -0,0 +1,778 @@
+//==- CheckSecuritySyntaxOnly.cpp - Basic security checks --------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a set of flow-insensitive security checks.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+static bool isArc4RandomAvailable(const ASTContext &Ctx) {
+ const llvm::Triple &T = Ctx.getTargetInfo().getTriple();
+ return T.getVendor() == llvm::Triple::Apple ||
+ T.getOS() == llvm::Triple::CloudABI ||
+ T.getOS() == llvm::Triple::FreeBSD ||
+ T.getOS() == llvm::Triple::NetBSD ||
+ T.getOS() == llvm::Triple::OpenBSD ||
+ T.getOS() == llvm::Triple::Bitrig ||
+ T.getOS() == llvm::Triple::DragonFly;
+}
+
+namespace {
+struct ChecksFilter {
+ DefaultBool check_gets;
+ DefaultBool check_getpw;
+ DefaultBool check_mktemp;
+ DefaultBool check_mkstemp;
+ DefaultBool check_strcpy;
+ DefaultBool check_rand;
+ DefaultBool check_vfork;
+ DefaultBool check_FloatLoopCounter;
+ DefaultBool check_UncheckedReturn;
+
+ CheckName checkName_gets;
+ CheckName checkName_getpw;
+ CheckName checkName_mktemp;
+ CheckName checkName_mkstemp;
+ CheckName checkName_strcpy;
+ CheckName checkName_rand;
+ CheckName checkName_vfork;
+ CheckName checkName_FloatLoopCounter;
+ CheckName checkName_UncheckedReturn;
+};
+
+class WalkAST : public StmtVisitor<WalkAST> {
+ BugReporter &BR;
+ AnalysisDeclContext* AC;
+ enum { num_setids = 6 };
+ IdentifierInfo *II_setid[num_setids];
+
+ const bool CheckRand;
+ const ChecksFilter &filter;
+
+public:
+ WalkAST(BugReporter &br, AnalysisDeclContext* ac,
+ const ChecksFilter &f)
+ : BR(br), AC(ac), II_setid(),
+ CheckRand(isArc4RandomAvailable(BR.getContext())),
+ filter(f) {}
+
+ // Statement visitor methods.
+ void VisitCallExpr(CallExpr *CE);
+ void VisitForStmt(ForStmt *S);
+ void VisitCompoundStmt (CompoundStmt *S);
+ void VisitStmt(Stmt *S) { VisitChildren(S); }
+
+ void VisitChildren(Stmt *S);
+
+ // Helpers.
+ bool checkCall_strCommon(const CallExpr *CE, const FunctionDecl *FD);
+
+ typedef void (WalkAST::*FnCheck)(const CallExpr *, const FunctionDecl *);
+
+ // Checker-specific methods.
+ void checkLoopConditionForFloat(const ForStmt *FS);
+ void checkCall_gets(const CallExpr *CE, const FunctionDecl *FD);
+ void checkCall_getpw(const CallExpr *CE, const FunctionDecl *FD);
+ void checkCall_mktemp(const CallExpr *CE, const FunctionDecl *FD);
+ void checkCall_mkstemp(const CallExpr *CE, const FunctionDecl *FD);
+ void checkCall_strcpy(const CallExpr *CE, const FunctionDecl *FD);
+ void checkCall_strcat(const CallExpr *CE, const FunctionDecl *FD);
+ void checkCall_rand(const CallExpr *CE, const FunctionDecl *FD);
+ void checkCall_random(const CallExpr *CE, const FunctionDecl *FD);
+ void checkCall_vfork(const CallExpr *CE, const FunctionDecl *FD);
+ void checkUncheckedReturnValue(CallExpr *CE);
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// AST walking.
+//===----------------------------------------------------------------------===//
+
+void WalkAST::VisitChildren(Stmt *S) {
+ for (Stmt *Child : S->children())
+ if (Child)
+ Visit(Child);
+}
+
+void WalkAST::VisitCallExpr(CallExpr *CE) {
+ // Get the callee.
+ const FunctionDecl *FD = CE->getDirectCallee();
+
+ if (!FD)
+ return;
+
+ // Get the name of the callee. If it's a builtin, strip off the prefix.
+ IdentifierInfo *II = FD->getIdentifier();
+ if (!II) // if no identifier, not a simple C function
+ return;
+ StringRef Name = II->getName();
+ if (Name.startswith("__builtin_"))
+ Name = Name.substr(10);
+
+ // Set the evaluation function by switching on the callee name.
+ FnCheck evalFunction = llvm::StringSwitch<FnCheck>(Name)
+ .Case("gets", &WalkAST::checkCall_gets)
+ .Case("getpw", &WalkAST::checkCall_getpw)
+ .Case("mktemp", &WalkAST::checkCall_mktemp)
+ .Case("mkstemp", &WalkAST::checkCall_mkstemp)
+ .Case("mkdtemp", &WalkAST::checkCall_mkstemp)
+ .Case("mkstemps", &WalkAST::checkCall_mkstemp)
+ .Cases("strcpy", "__strcpy_chk", &WalkAST::checkCall_strcpy)
+ .Cases("strcat", "__strcat_chk", &WalkAST::checkCall_strcat)
+ .Case("drand48", &WalkAST::checkCall_rand)
+ .Case("erand48", &WalkAST::checkCall_rand)
+ .Case("jrand48", &WalkAST::checkCall_rand)
+ .Case("lrand48", &WalkAST::checkCall_rand)
+ .Case("mrand48", &WalkAST::checkCall_rand)
+ .Case("nrand48", &WalkAST::checkCall_rand)
+ .Case("lcong48", &WalkAST::checkCall_rand)
+ .Case("rand", &WalkAST::checkCall_rand)
+ .Case("rand_r", &WalkAST::checkCall_rand)
+ .Case("random", &WalkAST::checkCall_random)
+ .Case("vfork", &WalkAST::checkCall_vfork)
+ .Default(nullptr);
+
+ // If the callee isn't defined, it is not of security concern.
+ // Check and evaluate the call.
+ if (evalFunction)
+ (this->*evalFunction)(CE, FD);
+
+ // Recurse and check children.
+ VisitChildren(CE);
+}
+
+void WalkAST::VisitCompoundStmt(CompoundStmt *S) {
+ for (Stmt *Child : S->children())
+ if (Child) {
+ if (CallExpr *CE = dyn_cast<CallExpr>(Child))
+ checkUncheckedReturnValue(CE);
+ Visit(Child);
+ }
+}
+
+void WalkAST::VisitForStmt(ForStmt *FS) {
+ checkLoopConditionForFloat(FS);
+
+ // Recurse and check children.
+ VisitChildren(FS);
+}
+
+//===----------------------------------------------------------------------===//
+// Check: floating poing variable used as loop counter.
+// Originally: <rdar://problem/6336718>
+// Implements: CERT security coding advisory FLP-30.
+//===----------------------------------------------------------------------===//
+
+static const DeclRefExpr*
+getIncrementedVar(const Expr *expr, const VarDecl *x, const VarDecl *y) {
+ expr = expr->IgnoreParenCasts();
+
+ if (const BinaryOperator *B = dyn_cast<BinaryOperator>(expr)) {
+ if (!(B->isAssignmentOp() || B->isCompoundAssignmentOp() ||
+ B->getOpcode() == BO_Comma))
+ return nullptr;
+
+ if (const DeclRefExpr *lhs = getIncrementedVar(B->getLHS(), x, y))
+ return lhs;
+
+ if (const DeclRefExpr *rhs = getIncrementedVar(B->getRHS(), x, y))
+ return rhs;
+
+ return nullptr;
+ }
+
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(expr)) {
+ const NamedDecl *ND = DR->getDecl();
+ return ND == x || ND == y ? DR : nullptr;
+ }
+
+ if (const UnaryOperator *U = dyn_cast<UnaryOperator>(expr))
+ return U->isIncrementDecrementOp()
+ ? getIncrementedVar(U->getSubExpr(), x, y) : nullptr;
+
+ return nullptr;
+}
+
+/// CheckLoopConditionForFloat - This check looks for 'for' statements that
+/// use a floating point variable as a loop counter.
+/// CERT: FLP30-C, FLP30-CPP.
+///
+void WalkAST::checkLoopConditionForFloat(const ForStmt *FS) {
+ if (!filter.check_FloatLoopCounter)
+ return;
+
+ // Does the loop have a condition?
+ const Expr *condition = FS->getCond();
+
+ if (!condition)
+ return;
+
+ // Does the loop have an increment?
+ const Expr *increment = FS->getInc();
+
+ if (!increment)
+ return;
+
+ // Strip away '()' and casts.
+ condition = condition->IgnoreParenCasts();
+ increment = increment->IgnoreParenCasts();
+
+ // Is the loop condition a comparison?
+ const BinaryOperator *B = dyn_cast<BinaryOperator>(condition);
+
+ if (!B)
+ return;
+
+ // Is this a comparison?
+ if (!(B->isRelationalOp() || B->isEqualityOp()))
+ return;
+
+ // Are we comparing variables?
+ const DeclRefExpr *drLHS =
+ dyn_cast<DeclRefExpr>(B->getLHS()->IgnoreParenLValueCasts());
+ const DeclRefExpr *drRHS =
+ dyn_cast<DeclRefExpr>(B->getRHS()->IgnoreParenLValueCasts());
+
+ // Does at least one of the variables have a floating point type?
+ drLHS = drLHS && drLHS->getType()->isRealFloatingType() ? drLHS : nullptr;
+ drRHS = drRHS && drRHS->getType()->isRealFloatingType() ? drRHS : nullptr;
+
+ if (!drLHS && !drRHS)
+ return;
+
+ const VarDecl *vdLHS = drLHS ? dyn_cast<VarDecl>(drLHS->getDecl()) : nullptr;
+ const VarDecl *vdRHS = drRHS ? dyn_cast<VarDecl>(drRHS->getDecl()) : nullptr;
+
+ if (!vdLHS && !vdRHS)
+ return;
+
+ // Does either variable appear in increment?
+ const DeclRefExpr *drInc = getIncrementedVar(increment, vdLHS, vdRHS);
+
+ if (!drInc)
+ return;
+
+ // Emit the error. First figure out which DeclRefExpr in the condition
+ // referenced the compared variable.
+ assert(drInc->getDecl());
+ const DeclRefExpr *drCond = vdLHS == drInc->getDecl() ? drLHS : drRHS;
+
+ SmallVector<SourceRange, 2> ranges;
+ SmallString<256> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+
+ os << "Variable '" << drCond->getDecl()->getName()
+ << "' with floating point type '" << drCond->getType().getAsString()
+ << "' should not be used as a loop counter";
+
+ ranges.push_back(drCond->getSourceRange());
+ ranges.push_back(drInc->getSourceRange());
+
+ const char *bugType = "Floating point variable used as loop counter";
+
+ PathDiagnosticLocation FSLoc =
+ PathDiagnosticLocation::createBegin(FS, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(), filter.checkName_FloatLoopCounter,
+ bugType, "Security", os.str(),
+ FSLoc, ranges);
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Any use of 'gets' is insecure.
+// Originally: <rdar://problem/6335715>
+// Implements (part of): 300-BSI (buildsecurityin.us-cert.gov)
+// CWE-242: Use of Inherently Dangerous Function
+//===----------------------------------------------------------------------===//
+
+void WalkAST::checkCall_gets(const CallExpr *CE, const FunctionDecl *FD) {
+ if (!filter.check_gets)
+ return;
+
+ const FunctionProtoType *FPT = FD->getType()->getAs<FunctionProtoType>();
+ if (!FPT)
+ return;
+
+ // Verify that the function takes a single argument.
+ if (FPT->getNumParams() != 1)
+ return;
+
+ // Is the argument a 'char*'?
+ const PointerType *PT = FPT->getParamType(0)->getAs<PointerType>();
+ if (!PT)
+ return;
+
+ if (PT->getPointeeType().getUnqualifiedType() != BR.getContext().CharTy)
+ return;
+
+ // Issue a warning.
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(), filter.checkName_gets,
+ "Potential buffer overflow in call to 'gets'",
+ "Security",
+ "Call to function 'gets' is extremely insecure as it can "
+ "always result in a buffer overflow",
+ CELoc, CE->getCallee()->getSourceRange());
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Any use of 'getpwd' is insecure.
+// CWE-477: Use of Obsolete Functions
+//===----------------------------------------------------------------------===//
+
+void WalkAST::checkCall_getpw(const CallExpr *CE, const FunctionDecl *FD) {
+ if (!filter.check_getpw)
+ return;
+
+ const FunctionProtoType *FPT = FD->getType()->getAs<FunctionProtoType>();
+ if (!FPT)
+ return;
+
+ // Verify that the function takes two arguments.
+ if (FPT->getNumParams() != 2)
+ return;
+
+ // Verify the first argument type is integer.
+ if (!FPT->getParamType(0)->isIntegralOrUnscopedEnumerationType())
+ return;
+
+ // Verify the second argument type is char*.
+ const PointerType *PT = FPT->getParamType(1)->getAs<PointerType>();
+ if (!PT)
+ return;
+
+ if (PT->getPointeeType().getUnqualifiedType() != BR.getContext().CharTy)
+ return;
+
+ // Issue a warning.
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(), filter.checkName_getpw,
+ "Potential buffer overflow in call to 'getpw'",
+ "Security",
+ "The getpw() function is dangerous as it may overflow the "
+ "provided buffer. It is obsoleted by getpwuid().",
+ CELoc, CE->getCallee()->getSourceRange());
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Any use of 'mktemp' is insecure. It is obsoleted by mkstemp().
+// CWE-377: Insecure Temporary File
+//===----------------------------------------------------------------------===//
+
+void WalkAST::checkCall_mktemp(const CallExpr *CE, const FunctionDecl *FD) {
+ if (!filter.check_mktemp) {
+ // Fall back to the security check of looking for enough 'X's in the
+ // format string, since that is a less severe warning.
+ checkCall_mkstemp(CE, FD);
+ return;
+ }
+
+ const FunctionProtoType *FPT = FD->getType()->getAs<FunctionProtoType>();
+ if(!FPT)
+ return;
+
+ // Verify that the function takes a single argument.
+ if (FPT->getNumParams() != 1)
+ return;
+
+ // Verify that the argument is Pointer Type.
+ const PointerType *PT = FPT->getParamType(0)->getAs<PointerType>();
+ if (!PT)
+ return;
+
+ // Verify that the argument is a 'char*'.
+ if (PT->getPointeeType().getUnqualifiedType() != BR.getContext().CharTy)
+ return;
+
+ // Issue a warning.
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(), filter.checkName_mktemp,
+ "Potential insecure temporary file in call 'mktemp'",
+ "Security",
+ "Call to function 'mktemp' is insecure as it always "
+ "creates or uses insecure temporary file. Use 'mkstemp' "
+ "instead",
+ CELoc, CE->getCallee()->getSourceRange());
+}
+
+
+//===----------------------------------------------------------------------===//
+// Check: Use of 'mkstemp', 'mktemp', 'mkdtemp' should contain at least 6 X's.
+//===----------------------------------------------------------------------===//
+
+void WalkAST::checkCall_mkstemp(const CallExpr *CE, const FunctionDecl *FD) {
+ if (!filter.check_mkstemp)
+ return;
+
+ StringRef Name = FD->getIdentifier()->getName();
+ std::pair<signed, signed> ArgSuffix =
+ llvm::StringSwitch<std::pair<signed, signed> >(Name)
+ .Case("mktemp", std::make_pair(0,-1))
+ .Case("mkstemp", std::make_pair(0,-1))
+ .Case("mkdtemp", std::make_pair(0,-1))
+ .Case("mkstemps", std::make_pair(0,1))
+ .Default(std::make_pair(-1, -1));
+
+ assert(ArgSuffix.first >= 0 && "Unsupported function");
+
+ // Check if the number of arguments is consistent with out expectations.
+ unsigned numArgs = CE->getNumArgs();
+ if ((signed) numArgs <= ArgSuffix.first)
+ return;
+
+ const StringLiteral *strArg =
+ dyn_cast<StringLiteral>(CE->getArg((unsigned)ArgSuffix.first)
+ ->IgnoreParenImpCasts());
+
+ // Currently we only handle string literals. It is possible to do better,
+ // either by looking at references to const variables, or by doing real
+ // flow analysis.
+ if (!strArg || strArg->getCharByteWidth() != 1)
+ return;
+
+ // Count the number of X's, taking into account a possible cutoff suffix.
+ StringRef str = strArg->getString();
+ unsigned numX = 0;
+ unsigned n = str.size();
+
+ // Take into account the suffix.
+ unsigned suffix = 0;
+ if (ArgSuffix.second >= 0) {
+ const Expr *suffixEx = CE->getArg((unsigned)ArgSuffix.second);
+ llvm::APSInt Result;
+ if (!suffixEx->EvaluateAsInt(Result, BR.getContext()))
+ return;
+ // FIXME: Issue a warning.
+ if (Result.isNegative())
+ return;
+ suffix = (unsigned) Result.getZExtValue();
+ n = (n > suffix) ? n - suffix : 0;
+ }
+
+ for (unsigned i = 0; i < n; ++i)
+ if (str[i] == 'X') ++numX;
+
+ if (numX >= 6)
+ return;
+
+ // Issue a warning.
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ SmallString<512> buf;
+ llvm::raw_svector_ostream out(buf);
+ out << "Call to '" << Name << "' should have at least 6 'X's in the"
+ " format string to be secure (" << numX << " 'X'";
+ if (numX != 1)
+ out << 's';
+ out << " seen";
+ if (suffix) {
+ out << ", " << suffix << " character";
+ if (suffix > 1)
+ out << 's';
+ out << " used as a suffix";
+ }
+ out << ')';
+ BR.EmitBasicReport(AC->getDecl(), filter.checkName_mkstemp,
+ "Insecure temporary file creation", "Security",
+ out.str(), CELoc, strArg->getSourceRange());
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Any use of 'strcpy' is insecure.
+//
+// CWE-119: Improper Restriction of Operations within
+// the Bounds of a Memory Buffer
+//===----------------------------------------------------------------------===//
+void WalkAST::checkCall_strcpy(const CallExpr *CE, const FunctionDecl *FD) {
+ if (!filter.check_strcpy)
+ return;
+
+ if (!checkCall_strCommon(CE, FD))
+ return;
+
+ // Issue a warning.
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(), filter.checkName_strcpy,
+ "Potential insecure memory buffer bounds restriction in "
+ "call 'strcpy'",
+ "Security",
+ "Call to function 'strcpy' is insecure as it does not "
+ "provide bounding of the memory buffer. Replace "
+ "unbounded copy functions with analogous functions that "
+ "support length arguments such as 'strlcpy'. CWE-119.",
+ CELoc, CE->getCallee()->getSourceRange());
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Any use of 'strcat' is insecure.
+//
+// CWE-119: Improper Restriction of Operations within
+// the Bounds of a Memory Buffer
+//===----------------------------------------------------------------------===//
+void WalkAST::checkCall_strcat(const CallExpr *CE, const FunctionDecl *FD) {
+ if (!filter.check_strcpy)
+ return;
+
+ if (!checkCall_strCommon(CE, FD))
+ return;
+
+ // Issue a warning.
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(), filter.checkName_strcpy,
+ "Potential insecure memory buffer bounds restriction in "
+ "call 'strcat'",
+ "Security",
+ "Call to function 'strcat' is insecure as it does not "
+ "provide bounding of the memory buffer. Replace "
+ "unbounded copy functions with analogous functions that "
+ "support length arguments such as 'strlcat'. CWE-119.",
+ CELoc, CE->getCallee()->getSourceRange());
+}
+
+//===----------------------------------------------------------------------===//
+// Common check for str* functions with no bounds parameters.
+//===----------------------------------------------------------------------===//
+bool WalkAST::checkCall_strCommon(const CallExpr *CE, const FunctionDecl *FD) {
+ const FunctionProtoType *FPT = FD->getType()->getAs<FunctionProtoType>();
+ if (!FPT)
+ return false;
+
+ // Verify the function takes two arguments, three in the _chk version.
+ int numArgs = FPT->getNumParams();
+ if (numArgs != 2 && numArgs != 3)
+ return false;
+
+ // Verify the type for both arguments.
+ for (int i = 0; i < 2; i++) {
+ // Verify that the arguments are pointers.
+ const PointerType *PT = FPT->getParamType(i)->getAs<PointerType>();
+ if (!PT)
+ return false;
+
+ // Verify that the argument is a 'char*'.
+ if (PT->getPointeeType().getUnqualifiedType() != BR.getContext().CharTy)
+ return false;
+ }
+
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Linear congruent random number generators should not be used
+// Originally: <rdar://problem/63371000>
+// CWE-338: Use of cryptographically weak prng
+//===----------------------------------------------------------------------===//
+
+void WalkAST::checkCall_rand(const CallExpr *CE, const FunctionDecl *FD) {
+ if (!filter.check_rand || !CheckRand)
+ return;
+
+ const FunctionProtoType *FTP = FD->getType()->getAs<FunctionProtoType>();
+ if (!FTP)
+ return;
+
+ if (FTP->getNumParams() == 1) {
+ // Is the argument an 'unsigned short *'?
+ // (Actually any integer type is allowed.)
+ const PointerType *PT = FTP->getParamType(0)->getAs<PointerType>();
+ if (!PT)
+ return;
+
+ if (! PT->getPointeeType()->isIntegralOrUnscopedEnumerationType())
+ return;
+ } else if (FTP->getNumParams() != 0)
+ return;
+
+ // Issue a warning.
+ SmallString<256> buf1;
+ llvm::raw_svector_ostream os1(buf1);
+ os1 << '\'' << *FD << "' is a poor random number generator";
+
+ SmallString<256> buf2;
+ llvm::raw_svector_ostream os2(buf2);
+ os2 << "Function '" << *FD
+ << "' is obsolete because it implements a poor random number generator."
+ << " Use 'arc4random' instead";
+
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(), filter.checkName_rand, os1.str(),
+ "Security", os2.str(), CELoc,
+ CE->getCallee()->getSourceRange());
+}
+
+//===----------------------------------------------------------------------===//
+// Check: 'random' should not be used
+// Originally: <rdar://problem/63371000>
+//===----------------------------------------------------------------------===//
+
+void WalkAST::checkCall_random(const CallExpr *CE, const FunctionDecl *FD) {
+ if (!CheckRand || !filter.check_rand)
+ return;
+
+ const FunctionProtoType *FTP = FD->getType()->getAs<FunctionProtoType>();
+ if (!FTP)
+ return;
+
+ // Verify that the function takes no argument.
+ if (FTP->getNumParams() != 0)
+ return;
+
+ // Issue a warning.
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(), filter.checkName_rand,
+ "'random' is not a secure random number generator",
+ "Security",
+ "The 'random' function produces a sequence of values that "
+ "an adversary may be able to predict. Use 'arc4random' "
+ "instead", CELoc, CE->getCallee()->getSourceRange());
+}
+
+//===----------------------------------------------------------------------===//
+// Check: 'vfork' should not be used.
+// POS33-C: Do not use vfork().
+//===----------------------------------------------------------------------===//
+
+void WalkAST::checkCall_vfork(const CallExpr *CE, const FunctionDecl *FD) {
+ if (!filter.check_vfork)
+ return;
+
+ // All calls to vfork() are insecure, issue a warning.
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(), filter.checkName_vfork,
+ "Potential insecure implementation-specific behavior in "
+ "call 'vfork'",
+ "Security",
+ "Call to function 'vfork' is insecure as it can lead to "
+ "denial of service situations in the parent process. "
+ "Replace calls to vfork with calls to the safer "
+ "'posix_spawn' function",
+ CELoc, CE->getCallee()->getSourceRange());
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Should check whether privileges are dropped successfully.
+// Originally: <rdar://problem/6337132>
+//===----------------------------------------------------------------------===//
+
+void WalkAST::checkUncheckedReturnValue(CallExpr *CE) {
+ if (!filter.check_UncheckedReturn)
+ return;
+
+ const FunctionDecl *FD = CE->getDirectCallee();
+ if (!FD)
+ return;
+
+ if (II_setid[0] == nullptr) {
+ static const char * const identifiers[num_setids] = {
+ "setuid", "setgid", "seteuid", "setegid",
+ "setreuid", "setregid"
+ };
+
+ for (size_t i = 0; i < num_setids; i++)
+ II_setid[i] = &BR.getContext().Idents.get(identifiers[i]);
+ }
+
+ const IdentifierInfo *id = FD->getIdentifier();
+ size_t identifierid;
+
+ for (identifierid = 0; identifierid < num_setids; identifierid++)
+ if (id == II_setid[identifierid])
+ break;
+
+ if (identifierid >= num_setids)
+ return;
+
+ const FunctionProtoType *FTP = FD->getType()->getAs<FunctionProtoType>();
+ if (!FTP)
+ return;
+
+ // Verify that the function takes one or two arguments (depending on
+ // the function).
+ if (FTP->getNumParams() != (identifierid < 4 ? 1 : 2))
+ return;
+
+ // The arguments must be integers.
+ for (unsigned i = 0; i < FTP->getNumParams(); i++)
+ if (!FTP->getParamType(i)->isIntegralOrUnscopedEnumerationType())
+ return;
+
+ // Issue a warning.
+ SmallString<256> buf1;
+ llvm::raw_svector_ostream os1(buf1);
+ os1 << "Return value is not checked in call to '" << *FD << '\'';
+
+ SmallString<256> buf2;
+ llvm::raw_svector_ostream os2(buf2);
+ os2 << "The return value from the call to '" << *FD
+ << "' is not checked. If an error occurs in '" << *FD
+ << "', the following code may execute with unexpected privileges";
+
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(), filter.checkName_UncheckedReturn, os1.str(),
+ "Security", os2.str(), CELoc,
+ CE->getCallee()->getSourceRange());
+}
+
+//===----------------------------------------------------------------------===//
+// SecuritySyntaxChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class SecuritySyntaxChecker : public Checker<check::ASTCodeBody> {
+public:
+ ChecksFilter filter;
+
+ void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ WalkAST walker(BR, mgr.getAnalysisDeclContext(D), filter);
+ walker.Visit(D->getBody());
+ }
+};
+}
+
+#define REGISTER_CHECKER(name) \
+ void ento::register##name(CheckerManager &mgr) { \
+ SecuritySyntaxChecker *checker = \
+ mgr.registerChecker<SecuritySyntaxChecker>(); \
+ checker->filter.check_##name = true; \
+ checker->filter.checkName_##name = mgr.getCurrentCheckName(); \
+ }
+
+REGISTER_CHECKER(gets)
+REGISTER_CHECKER(getpw)
+REGISTER_CHECKER(mkstemp)
+REGISTER_CHECKER(mktemp)
+REGISTER_CHECKER(strcpy)
+REGISTER_CHECKER(rand)
+REGISTER_CHECKER(vfork)
+REGISTER_CHECKER(FloatLoopCounter)
+REGISTER_CHECKER(UncheckedReturn)
+
+
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
new file mode 100644
index 0000000..e079a8c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
@@ -0,0 +1,93 @@
+//==- CheckSizeofPointer.cpp - Check for sizeof on pointers ------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a check for unintended use of sizeof() on pointer
+// expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class WalkAST : public StmtVisitor<WalkAST> {
+ BugReporter &BR;
+ const CheckerBase *Checker;
+ AnalysisDeclContext* AC;
+
+public:
+ WalkAST(BugReporter &br, const CheckerBase *checker, AnalysisDeclContext *ac)
+ : BR(br), Checker(checker), AC(ac) {}
+ void VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E);
+ void VisitStmt(Stmt *S) { VisitChildren(S); }
+ void VisitChildren(Stmt *S);
+};
+}
+
+void WalkAST::VisitChildren(Stmt *S) {
+ for (Stmt *Child : S->children())
+ if (Child)
+ Visit(Child);
+}
+
+// CWE-467: Use of sizeof() on a Pointer Type
+void WalkAST::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E) {
+ if (E->getKind() != UETT_SizeOf)
+ return;
+
+ // If an explicit type is used in the code, usually the coder knows what they are
+ // doing.
+ if (E->isArgumentType())
+ return;
+
+ QualType T = E->getTypeOfArgument();
+ if (T->isPointerType()) {
+
+ // Many false positives have the form 'sizeof *p'. This is reasonable
+ // because people know what they are doing when they intentionally
+ // dereference the pointer.
+ Expr *ArgEx = E->getArgumentExpr();
+ if (!isa<DeclRefExpr>(ArgEx->IgnoreParens()))
+ return;
+
+ PathDiagnosticLocation ELoc =
+ PathDiagnosticLocation::createBegin(E, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(), Checker,
+ "Potential unintended use of sizeof() on pointer type",
+ categories::LogicError,
+ "The code calls sizeof() on a pointer type. "
+ "This can produce an unexpected result.",
+ ELoc, ArgEx->getSourceRange());
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// SizeofPointerChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class SizeofPointerChecker : public Checker<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ WalkAST walker(BR, this, mgr.getAnalysisDeclContext(D));
+ walker.Visit(D->getBody());
+ }
+};
+}
+
+void ento::registerSizeofPointerChecker(CheckerManager &mgr) {
+ mgr.registerChecker<SizeofPointerChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
new file mode 100644
index 0000000..37b8448
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
@@ -0,0 +1,322 @@
+//= CheckerDocumentation.cpp - Documentation checker ---------------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker lists all the checker callbacks and provides documentation for
+// checker writers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+
+using namespace clang;
+using namespace ento;
+
+// All checkers should be placed into anonymous namespace.
+// We place the CheckerDocumentation inside ento namespace to make the
+// it visible in doxygen.
+namespace clang {
+namespace ento {
+
+/// This checker documents the callback functions checkers can use to implement
+/// the custom handling of the specific events during path exploration as well
+/// as reporting bugs. Most of the callbacks are targeted at path-sensitive
+/// checking.
+///
+/// \sa CheckerContext
+class CheckerDocumentation : public Checker< check::PreStmt<ReturnStmt>,
+ check::PostStmt<DeclStmt>,
+ check::PreObjCMessage,
+ check::PostObjCMessage,
+ check::ObjCMessageNil,
+ check::PreCall,
+ check::PostCall,
+ check::BranchCondition,
+ check::Location,
+ check::Bind,
+ check::DeadSymbols,
+ check::EndFunction,
+ check::EndAnalysis,
+ check::EndOfTranslationUnit,
+ eval::Call,
+ eval::Assume,
+ check::LiveSymbols,
+ check::RegionChanges,
+ check::PointerEscape,
+ check::ConstPointerEscape,
+ check::Event<ImplicitNullDerefEvent>,
+ check::ASTDecl<FunctionDecl> > {
+public:
+
+ /// \brief Pre-visit the Statement.
+ ///
+ /// The method will be called before the analyzer core processes the
+ /// statement. The notification is performed for every explored CFGElement,
+ /// which does not include the control flow statements such as IfStmt. The
+ /// callback can be specialized to be called with any subclass of Stmt.
+ ///
+ /// See checkBranchCondition() callback for performing custom processing of
+ /// the branching statements.
+ ///
+ /// check::PreStmt<ReturnStmt>
+ void checkPreStmt(const ReturnStmt *DS, CheckerContext &C) const {}
+
+ /// \brief Post-visit the Statement.
+ ///
+ /// The method will be called after the analyzer core processes the
+ /// statement. The notification is performed for every explored CFGElement,
+ /// which does not include the control flow statements such as IfStmt. The
+ /// callback can be specialized to be called with any subclass of Stmt.
+ ///
+ /// check::PostStmt<DeclStmt>
+ void checkPostStmt(const DeclStmt *DS, CheckerContext &C) const;
+
+ /// \brief Pre-visit the Objective C message.
+ ///
+ /// This will be called before the analyzer core processes the method call.
+ /// This is called for any action which produces an Objective-C message send,
+ /// including explicit message syntax and property access.
+ ///
+ /// check::PreObjCMessage
+ void checkPreObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const {}
+
+ /// \brief Post-visit the Objective C message.
+ /// \sa checkPreObjCMessage()
+ ///
+ /// check::PostObjCMessage
+ void checkPostObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const {}
+
+ /// \brief Visit an Objective-C message whose receiver is nil.
+ ///
+ /// This will be called when the analyzer core processes a method call whose
+ /// receiver is definitely nil. In this case, check{Pre/Post}ObjCMessage and
+ /// check{Pre/Post}Call will not be called.
+ ///
+ /// check::ObjCMessageNil
+ void checkObjCMessageNil(const ObjCMethodCall &M, CheckerContext &C) const {}
+
+ /// \brief Pre-visit an abstract "call" event.
+ ///
+ /// This is used for checkers that want to check arguments or attributed
+ /// behavior for functions and methods no matter how they are being invoked.
+ ///
+ /// Note that this includes ALL cross-body invocations, so if you want to
+ /// limit your checks to, say, function calls, you should test for that at the
+ /// beginning of your callback function.
+ ///
+ /// check::PreCall
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const {}
+
+ /// \brief Post-visit an abstract "call" event.
+ /// \sa checkPreObjCMessage()
+ ///
+ /// check::PostCall
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const {}
+
+ /// \brief Pre-visit of the condition statement of a branch (such as IfStmt).
+ void checkBranchCondition(const Stmt *Condition, CheckerContext &Ctx) const {}
+
+ /// \brief Called on a load from and a store to a location.
+ ///
+ /// The method will be called each time a location (pointer) value is
+ /// accessed.
+ /// \param Loc The value of the location (pointer).
+ /// \param IsLoad The flag specifying if the location is a store or a load.
+ /// \param S The load is performed while processing the statement.
+ ///
+ /// check::Location
+ void checkLocation(SVal Loc, bool IsLoad, const Stmt *S,
+ CheckerContext &) const {}
+
+ /// \brief Called on binding of a value to a location.
+ ///
+ /// \param Loc The value of the location (pointer).
+ /// \param Val The value which will be stored at the location Loc.
+ /// \param S The bind is performed while processing the statement S.
+ ///
+ /// check::Bind
+ void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &) const {}
+
+
+ /// \brief Called whenever a symbol becomes dead.
+ ///
+ /// This callback should be used by the checkers to aggressively clean
+ /// up/reduce the checker state, which is important for reducing the overall
+ /// memory usage. Specifically, if a checker keeps symbol specific information
+ /// in the sate, it can and should be dropped after the symbol becomes dead.
+ /// In addition, reporting a bug as soon as the checker becomes dead leads to
+ /// more precise diagnostics. (For example, one should report that a malloced
+ /// variable is not freed right after it goes out of scope.)
+ ///
+ /// \param SR The SymbolReaper object can be queried to determine which
+ /// symbols are dead.
+ ///
+ /// check::DeadSymbols
+ void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const {}
+
+ /// \brief Called when the analyzer core reaches the end of a
+ /// function being analyzed.
+ ///
+ /// check::EndFunction
+ void checkEndFunction(CheckerContext &Ctx) const {}
+
+ /// \brief Called after all the paths in the ExplodedGraph reach end of path
+ /// - the symbolic execution graph is fully explored.
+ ///
+ /// This callback should be used in cases when a checker needs to have a
+ /// global view of the information generated on all paths. For example, to
+ /// compare execution summary/result several paths.
+ /// See IdempotentOperationChecker for a usage example.
+ ///
+ /// check::EndAnalysis
+ void checkEndAnalysis(ExplodedGraph &G,
+ BugReporter &BR,
+ ExprEngine &Eng) const {}
+
+ /// \brief Called after analysis of a TranslationUnit is complete.
+ ///
+ /// check::EndOfTranslationUnit
+ void checkEndOfTranslationUnit(const TranslationUnitDecl *TU,
+ AnalysisManager &Mgr,
+ BugReporter &BR) const {}
+
+
+ /// \brief Evaluates function call.
+ ///
+ /// The analysis core threats all function calls in the same way. However, some
+ /// functions have special meaning, which should be reflected in the program
+ /// state. This callback allows a checker to provide domain specific knowledge
+ /// about the particular functions it knows about.
+ ///
+ /// \returns true if the call has been successfully evaluated
+ /// and false otherwise. Note, that only one checker can evaluate a call. If
+ /// more than one checker claims that they can evaluate the same call the
+ /// first one wins.
+ ///
+ /// eval::Call
+ bool evalCall(const CallExpr *CE, CheckerContext &C) const { return true; }
+
+ /// \brief Handles assumptions on symbolic values.
+ ///
+ /// This method is called when a symbolic expression is assumed to be true or
+ /// false. For example, the assumptions are performed when evaluating a
+ /// condition at a branch. The callback allows checkers track the assumptions
+ /// performed on the symbols of interest and change the state accordingly.
+ ///
+ /// eval::Assume
+ ProgramStateRef evalAssume(ProgramStateRef State,
+ SVal Cond,
+ bool Assumption) const { return State; }
+
+ /// Allows modifying SymbolReaper object. For example, checkers can explicitly
+ /// register symbols of interest as live. These symbols will not be marked
+ /// dead and removed.
+ ///
+ /// check::LiveSymbols
+ void checkLiveSymbols(ProgramStateRef State, SymbolReaper &SR) const {}
+
+ /// \brief Called to determine if the checker currently needs to know if when
+ /// contents of any regions change.
+ ///
+ /// Since it is not necessarily cheap to compute which regions are being
+ /// changed, this allows the analyzer core to skip the more expensive
+ /// #checkRegionChanges when no checkers are tracking any state.
+ bool wantsRegionChangeUpdate(ProgramStateRef St) const { return true; }
+
+ /// \brief Called when the contents of one or more regions change.
+ ///
+ /// This can occur in many different ways: an explicit bind, a blanket
+ /// invalidation of the region contents, or by passing a region to a function
+ /// call whose behavior the analyzer cannot model perfectly.
+ ///
+ /// \param State The current program state.
+ /// \param Invalidated A set of all symbols potentially touched by the change.
+ /// \param ExplicitRegions The regions explicitly requested for invalidation.
+ /// For a function call, this would be the arguments. For a bind, this
+ /// would be the region being bound to.
+ /// \param Regions The transitive closure of regions accessible from,
+ /// \p ExplicitRegions, i.e. all regions that may have been touched
+ /// by this change. For a simple bind, this list will be the same as
+ /// \p ExplicitRegions, since a bind does not affect the contents of
+ /// anything accessible through the base region.
+ /// \param Call The opaque call triggering this invalidation. Will be 0 if the
+ /// change was not triggered by a call.
+ ///
+ /// Note that this callback will not be invoked unless
+ /// #wantsRegionChangeUpdate returns \c true.
+ ///
+ /// check::RegionChanges
+ ProgramStateRef
+ checkRegionChanges(ProgramStateRef State,
+ const InvalidatedSymbols *Invalidated,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const CallEvent *Call) const {
+ return State;
+ }
+
+ /// \brief Called when pointers escape.
+ ///
+ /// This notifies the checkers about pointer escape, which occurs whenever
+ /// the analyzer cannot track the symbol any more. For example, as a
+ /// result of assigning a pointer into a global or when it's passed to a
+ /// function call the analyzer cannot model.
+ ///
+ /// \param State The state at the point of escape.
+ /// \param Escaped The list of escaped symbols.
+ /// \param Call The corresponding CallEvent, if the symbols escape as
+ /// parameters to the given call.
+ /// \param Kind How the symbols have escaped.
+ /// \returns Checkers can modify the state by returning a new state.
+ ProgramStateRef checkPointerEscape(ProgramStateRef State,
+ const InvalidatedSymbols &Escaped,
+ const CallEvent *Call,
+ PointerEscapeKind Kind) const {
+ return State;
+ }
+
+ /// \brief Called when const pointers escape.
+ ///
+ /// Note: in most cases checkPointerEscape callback is sufficient.
+ /// \sa checkPointerEscape
+ ProgramStateRef checkConstPointerEscape(ProgramStateRef State,
+ const InvalidatedSymbols &Escaped,
+ const CallEvent *Call,
+ PointerEscapeKind Kind) const {
+ return State;
+ }
+
+ /// check::Event<ImplicitNullDerefEvent>
+ void checkEvent(ImplicitNullDerefEvent Event) const {}
+
+ /// \brief Check every declaration in the AST.
+ ///
+ /// An AST traversal callback, which should only be used when the checker is
+ /// not path sensitive. It will be called for every Declaration in the AST and
+ /// can be specialized to only be called on subclasses of Decl, for example,
+ /// FunctionDecl.
+ ///
+ /// check::ASTDecl<FunctionDecl>
+ void checkASTDecl(const FunctionDecl *D,
+ AnalysisManager &Mgr,
+ BugReporter &BR) const {}
+
+};
+
+void CheckerDocumentation::checkPostStmt(const DeclStmt *DS,
+ CheckerContext &C) const {
+ return;
+}
+
+} // end namespace ento
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td
new file mode 100644
index 0000000..8133d29
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td
@@ -0,0 +1,647 @@
+//===--- Checkers.td - Static Analyzer Checkers -===-----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+include "clang/StaticAnalyzer/Checkers/CheckerBase.td"
+
+//===----------------------------------------------------------------------===//
+// Packages.
+//===----------------------------------------------------------------------===//
+
+// The Alpha package is for checkers that have too many false positives to be
+// turned on by default. The hierarchy under Alpha should be organized in the
+// hierarchy checkers would have had if they were truly at the top level.
+// (For example, a Cocoa-specific checker that is alpha should be in
+// alpha.osx.cocoa).
+def Alpha : Package<"alpha">;
+
+def Core : Package<"core">;
+def CoreBuiltin : Package<"builtin">, InPackage<Core>;
+def CoreUninitialized : Package<"uninitialized">, InPackage<Core>;
+def CoreAlpha : Package<"core">, InPackage<Alpha>, Hidden;
+
+// The OptIn package is for checkers that are not alpha and that would normally
+// be on by default but where the driver does not have enough information to
+// determine when they are applicable. For example, localizability checkers fit
+// this criterion because the driver cannot determine whether a project is
+// localized or not -- this is best determined at the IDE or build-system level.
+//
+// The checker hierarchy under OptIn should mirror that in Alpha: checkers
+// should be organized as if they were at the top level.
+//
+// Note: OptIn is *not* intended for checkers that are too noisy to be on by
+// default. Such checkers belong in the alpha package.
+def OptIn : Package<"optin">;
+
+def Nullability : Package<"nullability">;
+
+def Cplusplus : Package<"cplusplus">;
+def CplusplusAlpha : Package<"cplusplus">, InPackage<Alpha>, Hidden;
+
+def DeadCode : Package<"deadcode">;
+def DeadCodeAlpha : Package<"deadcode">, InPackage<Alpha>, Hidden;
+
+def Performance : Package<"performance">, InPackage<OptIn>;
+
+def Security : Package <"security">;
+def InsecureAPI : Package<"insecureAPI">, InPackage<Security>;
+def SecurityAlpha : Package<"security">, InPackage<Alpha>, Hidden;
+def Taint : Package<"taint">, InPackage<SecurityAlpha>, Hidden;
+
+def Unix : Package<"unix">;
+def UnixAlpha : Package<"unix">, InPackage<Alpha>, Hidden;
+def CString : Package<"cstring">, InPackage<Unix>, Hidden;
+def CStringAlpha : Package<"cstring">, InPackage<UnixAlpha>, Hidden;
+
+def OSX : Package<"osx">;
+def OSXAlpha : Package<"osx">, InPackage<Alpha>, Hidden;
+def OSXOptIn : Package<"osx">, InPackage<OptIn>;
+
+def Cocoa : Package<"cocoa">, InPackage<OSX>;
+def CocoaAlpha : Package<"cocoa">, InPackage<OSXAlpha>, Hidden;
+def CocoaOptIn : Package<"cocoa">, InPackage<OSXOptIn>;
+
+def CoreFoundation : Package<"coreFoundation">, InPackage<OSX>;
+def Containers : Package<"containers">, InPackage<CoreFoundation>;
+
+def LocalizabilityAlpha : Package<"localizability">, InPackage<CocoaAlpha>;
+def LocalizabilityOptIn : Package<"localizability">, InPackage<CocoaOptIn>;
+
+def LLVM : Package<"llvm">;
+def Debug : Package<"debug">;
+
+//===----------------------------------------------------------------------===//
+// Core Checkers.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = Core in {
+
+def DereferenceChecker : Checker<"NullDereference">,
+ HelpText<"Check for dereferences of null pointers">,
+ DescFile<"DereferenceChecker.cpp">;
+
+def CallAndMessageChecker : Checker<"CallAndMessage">,
+ HelpText<"Check for logical errors for function calls and Objective-C message expressions (e.g., uninitialized arguments, null function pointers)">,
+ DescFile<"CallAndMessageChecker.cpp">;
+
+def NonNullParamChecker : Checker<"NonNullParamChecker">,
+ HelpText<"Check for null pointers passed as arguments to a function whose arguments are references or marked with the 'nonnull' attribute">,
+ DescFile<"NonNullParamChecker.cpp">;
+
+def VLASizeChecker : Checker<"VLASize">,
+ HelpText<"Check for declarations of VLA of undefined or zero size">,
+ DescFile<"VLASizeChecker.cpp">;
+
+def DivZeroChecker : Checker<"DivideZero">,
+ HelpText<"Check for division by zero">,
+ DescFile<"DivZeroChecker.cpp">;
+
+def UndefResultChecker : Checker<"UndefinedBinaryOperatorResult">,
+ HelpText<"Check for undefined results of binary operators">,
+ DescFile<"UndefResultChecker.cpp">;
+
+def StackAddrEscapeChecker : Checker<"StackAddressEscape">,
+ HelpText<"Check that addresses to stack memory do not escape the function">,
+ DescFile<"StackAddrEscapeChecker.cpp">;
+
+def DynamicTypePropagation : Checker<"DynamicTypePropagation">,
+ HelpText<"Generate dynamic type information">,
+ DescFile<"DynamicTypePropagation.cpp">;
+
+} // end "core"
+
+let ParentPackage = CoreAlpha in {
+
+def BoolAssignmentChecker : Checker<"BoolAssignment">,
+ HelpText<"Warn about assigning non-{0,1} values to Boolean variables">,
+ DescFile<"BoolAssignmentChecker.cpp">;
+
+def CastSizeChecker : Checker<"CastSize">,
+ HelpText<"Check when casting a malloc'ed type T, whether the size is a multiple of the size of T">,
+ DescFile<"CastSizeChecker.cpp">;
+
+def CastToStructChecker : Checker<"CastToStruct">,
+ HelpText<"Check for cast from non-struct pointer to struct pointer">,
+ DescFile<"CastToStructChecker.cpp">;
+
+def IdenticalExprChecker : Checker<"IdenticalExpr">,
+ HelpText<"Warn about unintended use of identical expressions in operators">,
+ DescFile<"IdenticalExprChecker.cpp">;
+
+def FixedAddressChecker : Checker<"FixedAddr">,
+ HelpText<"Check for assignment of a fixed address to a pointer">,
+ DescFile<"FixedAddressChecker.cpp">;
+
+def PointerArithChecker : Checker<"PointerArithm">,
+ HelpText<"Check for pointer arithmetic on locations other than array elements">,
+ DescFile<"PointerArithChecker">;
+
+def PointerSubChecker : Checker<"PointerSub">,
+ HelpText<"Check for pointer subtractions on two pointers pointing to different memory chunks">,
+ DescFile<"PointerSubChecker">;
+
+def SizeofPointerChecker : Checker<"SizeofPtr">,
+ HelpText<"Warn about unintended use of sizeof() on pointer expressions">,
+ DescFile<"CheckSizeofPointer.cpp">;
+
+def CallAndMessageUnInitRefArg : Checker<"CallAndMessageUnInitRefArg">,
+ HelpText<"Check for logical errors for function calls and Objective-C message expressions (e.g., uninitialized arguments, null function pointers, and pointer to undefined variables)">,
+ DescFile<"CallAndMessageChecker.cpp">;
+
+def TestAfterDivZeroChecker : Checker<"TestAfterDivZero">,
+ HelpText<"Check for division by variable that is later compared against 0. Either the comparison is useless or there is division by zero.">,
+ DescFile<"TestAfterDivZeroChecker.cpp">;
+
+def DynamicTypeChecker : Checker<"DynamicTypeChecker">,
+ HelpText<"Check for cases where the dynamic and the static type of an object are unrelated.">,
+ DescFile<"DynamicTypeChecker.cpp">;
+
+} // end "alpha.core"
+
+let ParentPackage = Nullability in {
+
+def NullPassedToNonnullChecker : Checker<"NullPassedToNonnull">,
+ HelpText<"Warns when a null pointer is passed to a pointer which has a _Nonnull type.">,
+ DescFile<"NullabilityChecker.cpp">;
+
+def NullReturnedFromNonnullChecker : Checker<"NullReturnedFromNonnull">,
+ HelpText<"Warns when a null pointer is returned from a function that has _Nonnull return type.">,
+ DescFile<"NullabilityChecker.cpp">;
+
+def NullableDereferencedChecker : Checker<"NullableDereferenced">,
+ HelpText<"Warns when a nullable pointer is dereferenced.">,
+ DescFile<"NullabilityChecker.cpp">;
+
+def NullablePassedToNonnullChecker : Checker<"NullablePassedToNonnull">,
+ HelpText<"Warns when a nullable pointer is passed to a pointer which has a _Nonnull type.">,
+ DescFile<"NullabilityChecker.cpp">;
+
+def NullableReturnedFromNonnullChecker : Checker<"NullablePassedToNonnull">,
+ HelpText<"Warns when a nullable pointer is returned from a function that has _Nonnull return type.">,
+ DescFile<"NullabilityChecker.cpp">;
+
+} // end "nullability"
+
+//===----------------------------------------------------------------------===//
+// Evaluate "builtin" functions.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = CoreBuiltin in {
+
+def NoReturnFunctionChecker : Checker<"NoReturnFunctions">,
+ HelpText<"Evaluate \"panic\" functions that are known to not return to the caller">,
+ DescFile<"NoReturnFunctionChecker.cpp">;
+
+def BuiltinFunctionChecker : Checker<"BuiltinFunctions">,
+ HelpText<"Evaluate compiler builtin functions (e.g., alloca())">,
+ DescFile<"BuiltinFunctionChecker.cpp">;
+
+} // end "core.builtin"
+
+//===----------------------------------------------------------------------===//
+// Uninitialized values checkers.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = CoreUninitialized in {
+
+def UndefinedArraySubscriptChecker : Checker<"ArraySubscript">,
+ HelpText<"Check for uninitialized values used as array subscripts">,
+ DescFile<"UndefinedArraySubscriptChecker.cpp">;
+
+def UndefinedAssignmentChecker : Checker<"Assign">,
+ HelpText<"Check for assigning uninitialized values">,
+ DescFile<"UndefinedAssignmentChecker.cpp">;
+
+def UndefBranchChecker : Checker<"Branch">,
+ HelpText<"Check for uninitialized values used as branch conditions">,
+ DescFile<"UndefBranchChecker.cpp">;
+
+def UndefCapturedBlockVarChecker : Checker<"CapturedBlockVariable">,
+ HelpText<"Check for blocks that capture uninitialized values">,
+ DescFile<"UndefCapturedBlockVarChecker.cpp">;
+
+def ReturnUndefChecker : Checker<"UndefReturn">,
+ HelpText<"Check for uninitialized values being returned to the caller">,
+ DescFile<"ReturnUndefChecker.cpp">;
+
+} // end "core.uninitialized"
+
+//===----------------------------------------------------------------------===//
+// C++ checkers.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = Cplusplus in {
+
+def NewDeleteChecker : Checker<"NewDelete">,
+ HelpText<"Check for double-free and use-after-free problems. Traces memory managed by new/delete.">,
+ DescFile<"MallocChecker.cpp">;
+
+def NewDeleteLeaksChecker : Checker<"NewDeleteLeaks">,
+ HelpText<"Check for memory leaks. Traces memory managed by new/delete.">,
+ DescFile<"MallocChecker.cpp">;
+
+} // end: "cplusplus"
+
+let ParentPackage = CplusplusAlpha in {
+
+def VirtualCallChecker : Checker<"VirtualCall">,
+ HelpText<"Check virtual function calls during construction or destruction">,
+ DescFile<"VirtualCallChecker.cpp">;
+
+} // end: "alpha.cplusplus"
+
+//===----------------------------------------------------------------------===//
+// Deadcode checkers.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = DeadCode in {
+
+def DeadStoresChecker : Checker<"DeadStores">,
+ HelpText<"Check for values stored to variables that are never read afterwards">,
+ DescFile<"DeadStoresChecker.cpp">;
+} // end DeadCode
+
+let ParentPackage = DeadCodeAlpha in {
+
+def UnreachableCodeChecker : Checker<"UnreachableCode">,
+ HelpText<"Check unreachable code">,
+ DescFile<"UnreachableCodeChecker.cpp">;
+
+} // end "alpha.deadcode"
+
+//===----------------------------------------------------------------------===//
+// Performance checkers.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = Performance in {
+
+def PaddingChecker : Checker<"Padding">,
+ HelpText<"Check for excessively padded structs.">,
+ DescFile<"PaddingChecker.cpp">;
+
+} // end: "padding"
+
+//===----------------------------------------------------------------------===//
+// Security checkers.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = InsecureAPI in {
+ def gets : Checker<"gets">,
+ HelpText<"Warn on uses of the 'gets' function">,
+ DescFile<"CheckSecuritySyntaxOnly.cpp">;
+ def getpw : Checker<"getpw">,
+ HelpText<"Warn on uses of the 'getpw' function">,
+ DescFile<"CheckSecuritySyntaxOnly.cpp">;
+ def mktemp : Checker<"mktemp">,
+ HelpText<"Warn on uses of the 'mktemp' function">,
+ DescFile<"CheckSecuritySyntaxOnly.cpp">;
+ def mkstemp : Checker<"mkstemp">,
+ HelpText<"Warn when 'mkstemp' is passed fewer than 6 X's in the format string">,
+ DescFile<"CheckSecuritySyntaxOnly.cpp">;
+ def rand : Checker<"rand">,
+ HelpText<"Warn on uses of the 'rand', 'random', and related functions">,
+ DescFile<"CheckSecuritySyntaxOnly.cpp">;
+ def strcpy : Checker<"strcpy">,
+ HelpText<"Warn on uses of the 'strcpy' and 'strcat' functions">,
+ DescFile<"CheckSecuritySyntaxOnly.cpp">;
+ def vfork : Checker<"vfork">,
+ HelpText<"Warn on uses of the 'vfork' function">,
+ DescFile<"CheckSecuritySyntaxOnly.cpp">;
+ def UncheckedReturn : Checker<"UncheckedReturn">,
+ HelpText<"Warn on uses of functions whose return values must be always checked">,
+ DescFile<"CheckSecuritySyntaxOnly.cpp">;
+}
+let ParentPackage = Security in {
+ def FloatLoopCounter : Checker<"FloatLoopCounter">,
+ HelpText<"Warn on using a floating point value as a loop counter (CERT: FLP30-C, FLP30-CPP)">,
+ DescFile<"CheckSecuritySyntaxOnly.cpp">;
+}
+
+let ParentPackage = SecurityAlpha in {
+
+def ArrayBoundChecker : Checker<"ArrayBound">,
+ HelpText<"Warn about buffer overflows (older checker)">,
+ DescFile<"ArrayBoundChecker.cpp">;
+
+def ArrayBoundCheckerV2 : Checker<"ArrayBoundV2">,
+ HelpText<"Warn about buffer overflows (newer checker)">,
+ DescFile<"ArrayBoundCheckerV2.cpp">;
+
+def ReturnPointerRangeChecker : Checker<"ReturnPtrRange">,
+ HelpText<"Check for an out-of-bound pointer being returned to callers">,
+ DescFile<"ReturnPointerRangeChecker.cpp">;
+
+def MallocOverflowSecurityChecker : Checker<"MallocOverflow">,
+ HelpText<"Check for overflows in the arguments to malloc()">,
+ DescFile<"MallocOverflowSecurityChecker.cpp">;
+
+} // end "alpha.security"
+
+//===----------------------------------------------------------------------===//
+// Taint checkers.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = Taint in {
+
+def GenericTaintChecker : Checker<"TaintPropagation">,
+ HelpText<"Generate taint information used by other checkers">,
+ DescFile<"GenericTaintChecker.cpp">;
+
+} // end "alpha.security.taint"
+
+//===----------------------------------------------------------------------===//
+// Unix API checkers.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = Unix in {
+
+def UnixAPIChecker : Checker<"API">,
+ HelpText<"Check calls to various UNIX/Posix functions">,
+ DescFile<"UnixAPIChecker.cpp">;
+
+def MallocChecker: Checker<"Malloc">,
+ HelpText<"Check for memory leaks, double free, and use-after-free problems. Traces memory managed by malloc()/free().">,
+ DescFile<"MallocChecker.cpp">;
+
+def MallocSizeofChecker : Checker<"MallocSizeof">,
+ HelpText<"Check for dubious malloc arguments involving sizeof">,
+ DescFile<"MallocSizeofChecker.cpp">;
+
+def MismatchedDeallocatorChecker : Checker<"MismatchedDeallocator">,
+ HelpText<"Check for mismatched deallocators.">,
+ DescFile<"MallocChecker.cpp">;
+
+def VforkChecker : Checker<"Vfork">,
+ HelpText<"Check for proper usage of vfork">,
+ DescFile<"VforkChecker.cpp">;
+
+} // end "unix"
+
+let ParentPackage = UnixAlpha in {
+
+def ChrootChecker : Checker<"Chroot">,
+ HelpText<"Check improper use of chroot">,
+ DescFile<"ChrootChecker.cpp">;
+
+def PthreadLockChecker : Checker<"PthreadLock">,
+ HelpText<"Simple lock -> unlock checker">,
+ DescFile<"PthreadLockChecker.cpp">;
+
+def StreamChecker : Checker<"Stream">,
+ HelpText<"Check stream handling functions">,
+ DescFile<"StreamChecker.cpp">;
+
+def SimpleStreamChecker : Checker<"SimpleStream">,
+ HelpText<"Check for misuses of stream APIs">,
+ DescFile<"SimpleStreamChecker.cpp">;
+
+} // end "alpha.unix"
+
+let ParentPackage = CString in {
+
+def CStringNullArg : Checker<"NullArg">,
+ HelpText<"Check for null pointers being passed as arguments to C string functions">,
+ DescFile<"CStringChecker.cpp">;
+
+def CStringSyntaxChecker : Checker<"BadSizeArg">,
+ HelpText<"Check the size argument passed into C string functions for common erroneous patterns">,
+ DescFile<"CStringSyntaxChecker.cpp">;
+}
+
+let ParentPackage = CStringAlpha in {
+
+def CStringOutOfBounds : Checker<"OutOfBounds">,
+ HelpText<"Check for out-of-bounds access in string functions">,
+ DescFile<"CStringChecker.cpp">;
+
+def CStringBufferOverlap : Checker<"BufferOverlap">,
+ HelpText<"Checks for overlap in two buffer arguments">,
+ DescFile<"CStringChecker.cpp">;
+
+def CStringNotNullTerm : Checker<"NotNullTerminated">,
+ HelpText<"Check for arguments which are not null-terminating strings">,
+ DescFile<"CStringChecker.cpp">;
+}
+
+//===----------------------------------------------------------------------===//
+// Mac OS X, Cocoa, and Core Foundation checkers.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = OSX in {
+
+def MacOSXAPIChecker : Checker<"API">,
+ InPackage<OSX>,
+ HelpText<"Check for proper uses of various Apple APIs">,
+ DescFile<"MacOSXAPIChecker.cpp">;
+
+def MacOSKeychainAPIChecker : Checker<"SecKeychainAPI">,
+ InPackage<OSX>,
+ HelpText<"Check for proper uses of Secure Keychain APIs">,
+ DescFile<"MacOSKeychainAPIChecker.cpp">;
+
+} // end "osx"
+
+let ParentPackage = Cocoa in {
+
+def ObjCAtSyncChecker : Checker<"AtSync">,
+ HelpText<"Check for nil pointers used as mutexes for @synchronized">,
+ DescFile<"ObjCAtSyncChecker.cpp">;
+
+def NilArgChecker : Checker<"NilArg">,
+ HelpText<"Check for prohibited nil arguments to ObjC method calls">,
+ DescFile<"BasicObjCFoundationChecks.cpp">;
+
+def ClassReleaseChecker : Checker<"ClassRelease">,
+ HelpText<"Check for sending 'retain', 'release', or 'autorelease' directly to a Class">,
+ DescFile<"BasicObjCFoundationChecks.cpp">;
+
+def VariadicMethodTypeChecker : Checker<"VariadicMethodTypes">,
+ HelpText<"Check for passing non-Objective-C types to variadic collection "
+ "initialization methods that expect only Objective-C types">,
+ DescFile<"BasicObjCFoundationChecks.cpp">;
+
+def NSAutoreleasePoolChecker : Checker<"NSAutoreleasePool">,
+ HelpText<"Warn for suboptimal uses of NSAutoreleasePool in Objective-C GC mode">,
+ DescFile<"NSAutoreleasePoolChecker.cpp">;
+
+def ObjCMethSigsChecker : Checker<"IncompatibleMethodTypes">,
+ HelpText<"Warn about Objective-C method signatures with type incompatibilities">,
+ DescFile<"CheckObjCInstMethSignature.cpp">;
+
+def ObjCUnusedIvarsChecker : Checker<"UnusedIvars">,
+ HelpText<"Warn about private ivars that are never used">,
+ DescFile<"ObjCUnusedIVarsChecker.cpp">;
+
+def ObjCSelfInitChecker : Checker<"SelfInit">,
+ HelpText<"Check that 'self' is properly initialized inside an initializer method">,
+ DescFile<"ObjCSelfInitChecker.cpp">;
+
+def ObjCLoopChecker : Checker<"Loops">,
+ HelpText<"Improved modeling of loops using Cocoa collection types">,
+ DescFile<"BasicObjCFoundationChecks.cpp">;
+
+def ObjCNonNilReturnValueChecker : Checker<"NonNilReturnValue">,
+ HelpText<"Model the APIs that are guaranteed to return a non-nil value">,
+ DescFile<"BasicObjCFoundationChecks.cpp">;
+
+def ObjCSuperCallChecker : Checker<"MissingSuperCall">,
+ HelpText<"Warn about Objective-C methods that lack a necessary call to super">,
+ DescFile<"ObjCMissingSuperCallChecker.cpp">;
+
+def NSErrorChecker : Checker<"NSError">,
+ HelpText<"Check usage of NSError** parameters">,
+ DescFile<"NSErrorChecker.cpp">;
+
+def RetainCountChecker : Checker<"RetainCount">,
+ HelpText<"Check for leaks and improper reference count management">,
+ DescFile<"RetainCountChecker.cpp">;
+
+def ObjCGenericsChecker : Checker<"ObjCGenerics">,
+ HelpText<"Check for type errors when using Objective-C generics">,
+ DescFile<"DynamicTypePropagation.cpp">;
+
+} // end "osx.cocoa"
+
+let ParentPackage = CocoaAlpha in {
+
+def ObjCDeallocChecker : Checker<"Dealloc">,
+ HelpText<"Warn about Objective-C classes that lack a correct implementation of -dealloc">,
+ DescFile<"CheckObjCDealloc.cpp">;
+
+def InstanceVariableInvalidation : Checker<"InstanceVariableInvalidation">,
+ HelpText<"Check that the invalidatable instance variables are invalidated in the methods annotated with objc_instance_variable_invalidator">,
+ DescFile<"IvarInvalidationChecker.cpp">;
+
+def MissingInvalidationMethod : Checker<"MissingInvalidationMethod">,
+ HelpText<"Check that the invalidation methods are present in classes that contain invalidatable instance variables">,
+ DescFile<"IvarInvalidationChecker.cpp">;
+
+def DirectIvarAssignment : Checker<"DirectIvarAssignment">,
+ HelpText<"Check for direct assignments to instance variables">,
+ DescFile<"DirectIvarAssignment.cpp">;
+
+def DirectIvarAssignmentForAnnotatedFunctions : Checker<"DirectIvarAssignmentForAnnotatedFunctions">,
+ HelpText<"Check for direct assignments to instance variables in the methods annotated with objc_no_direct_instance_variable_assignment">,
+ DescFile<"DirectIvarAssignment.cpp">;
+
+} // end "alpha.osx.cocoa"
+
+let ParentPackage = CoreFoundation in {
+
+def CFNumberCreateChecker : Checker<"CFNumber">,
+ HelpText<"Check for proper uses of CFNumberCreate">,
+ DescFile<"BasicObjCFoundationChecks.cpp">;
+
+def CFRetainReleaseChecker : Checker<"CFRetainRelease">,
+ HelpText<"Check for null arguments to CFRetain/CFRelease/CFMakeCollectable">,
+ DescFile<"BasicObjCFoundationChecks.cpp">;
+
+def CFErrorChecker : Checker<"CFError">,
+ HelpText<"Check usage of CFErrorRef* parameters">,
+ DescFile<"NSErrorChecker.cpp">;
+}
+
+let ParentPackage = Containers in {
+def ObjCContainersASTChecker : Checker<"PointerSizedValues">,
+ HelpText<"Warns if 'CFArray', 'CFDictionary', 'CFSet' are created with non-pointer-size values">,
+ DescFile<"ObjCContainersASTChecker.cpp">;
+
+def ObjCContainersChecker : Checker<"OutOfBounds">,
+ HelpText<"Checks for index out-of-bounds when using 'CFArray' API">,
+ DescFile<"ObjCContainersChecker.cpp">;
+
+}
+
+let ParentPackage = LocalizabilityOptIn in {
+def NonLocalizedStringChecker : Checker<"NonLocalizedStringChecker">,
+ HelpText<"Warns about uses of non-localized NSStrings passed to UI methods expecting localized NSStrings">,
+ DescFile<"LocalizationChecker.cpp">;
+
+def EmptyLocalizationContextChecker : Checker<"EmptyLocalizationContextChecker">,
+ HelpText<"Check that NSLocalizedString macros include a comment for context">,
+ DescFile<"LocalizationChecker.cpp">;
+}
+
+let ParentPackage = LocalizabilityAlpha in {
+def PluralMisuseChecker : Checker<"PluralMisuseChecker">,
+ HelpText<"Warns against using one vs. many plural pattern in code when generating localized strings.">,
+ DescFile<"LocalizationChecker.cpp">;
+}
+
+//===----------------------------------------------------------------------===//
+// Checkers for LLVM development.
+//===----------------------------------------------------------------------===//
+
+def LLVMConventionsChecker : Checker<"Conventions">,
+ InPackage<LLVM>,
+ HelpText<"Check code for LLVM codebase conventions">,
+ DescFile<"LLVMConventionsChecker.cpp">;
+
+//===----------------------------------------------------------------------===//
+// Debugging checkers (for analyzer development).
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = Debug in {
+
+def DominatorsTreeDumper : Checker<"DumpDominators">,
+ HelpText<"Print the dominance tree for a given CFG">,
+ DescFile<"DebugCheckers.cpp">;
+
+def LiveVariablesDumper : Checker<"DumpLiveVars">,
+ HelpText<"Print results of live variable analysis">,
+ DescFile<"DebugCheckers.cpp">;
+
+def CFGViewer : Checker<"ViewCFG">,
+ HelpText<"View Control-Flow Graphs using GraphViz">,
+ DescFile<"DebugCheckers.cpp">;
+
+def CFGDumper : Checker<"DumpCFG">,
+ HelpText<"Display Control-Flow Graphs">,
+ DescFile<"DebugCheckers.cpp">;
+
+def CallGraphViewer : Checker<"ViewCallGraph">,
+ HelpText<"View Call Graph using GraphViz">,
+ DescFile<"DebugCheckers.cpp">;
+
+def CallGraphDumper : Checker<"DumpCallGraph">,
+ HelpText<"Display Call Graph">,
+ DescFile<"DebugCheckers.cpp">;
+
+def ConfigDumper : Checker<"ConfigDumper">,
+ HelpText<"Dump config table">,
+ DescFile<"DebugCheckers.cpp">;
+
+def TraversalDumper : Checker<"DumpTraversal">,
+ HelpText<"Print branch conditions as they are traversed by the engine">,
+ DescFile<"TraversalChecker.cpp">;
+
+def CallDumper : Checker<"DumpCalls">,
+ HelpText<"Print calls as they are traversed by the engine">,
+ DescFile<"TraversalChecker.cpp">;
+
+def AnalyzerStatsChecker : Checker<"Stats">,
+ HelpText<"Emit warnings with analyzer statistics">,
+ DescFile<"AnalyzerStatsChecker.cpp">;
+
+def TaintTesterChecker : Checker<"TaintTest">,
+ HelpText<"Mark tainted symbols as such.">,
+ DescFile<"TaintTesterChecker.cpp">;
+
+def ExprInspectionChecker : Checker<"ExprInspection">,
+ HelpText<"Check the analyzer's understanding of expressions">,
+ DescFile<"ExprInspectionChecker.cpp">;
+
+def ExplodedGraphViewer : Checker<"ViewExplodedGraph">,
+ HelpText<"View Exploded Graphs using GraphViz">,
+ DescFile<"DebugCheckers.cpp">;
+
+def BugHashDumper : Checker<"DumpBugHash">,
+ HelpText<"Dump the bug hash for all statements.">,
+ DescFile<"DebugCheckers.cpp">;
+
+} // end "debug"
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
new file mode 100644
index 0000000..3ad1996
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
@@ -0,0 +1,157 @@
+//===- Chrootchecker.cpp -------- Basic security checks ----------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines chroot checker, which checks improper use of chroot.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "llvm/ADT/ImmutableMap.h"
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+// enum value that represent the jail state
+enum Kind { NO_CHROOT, ROOT_CHANGED, JAIL_ENTERED };
+
+bool isRootChanged(intptr_t k) { return k == ROOT_CHANGED; }
+//bool isJailEntered(intptr_t k) { return k == JAIL_ENTERED; }
+
+// This checker checks improper use of chroot.
+// The state transition:
+// NO_CHROOT ---chroot(path)--> ROOT_CHANGED ---chdir(/) --> JAIL_ENTERED
+// | |
+// ROOT_CHANGED<--chdir(..)-- JAIL_ENTERED<--chdir(..)--
+// | |
+// bug<--foo()-- JAIL_ENTERED<--foo()--
+class ChrootChecker : public Checker<eval::Call, check::PreStmt<CallExpr> > {
+ mutable IdentifierInfo *II_chroot, *II_chdir;
+ // This bug refers to possibly break out of a chroot() jail.
+ mutable std::unique_ptr<BuiltinBug> BT_BreakJail;
+
+public:
+ ChrootChecker() : II_chroot(nullptr), II_chdir(nullptr) {}
+
+ static void *getTag() {
+ static int x;
+ return &x;
+ }
+
+ bool evalCall(const CallExpr *CE, CheckerContext &C) const;
+ void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+
+private:
+ void Chroot(CheckerContext &C, const CallExpr *CE) const;
+ void Chdir(CheckerContext &C, const CallExpr *CE) const;
+};
+
+} // end anonymous namespace
+
+bool ChrootChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ if (!FD)
+ return false;
+
+ ASTContext &Ctx = C.getASTContext();
+ if (!II_chroot)
+ II_chroot = &Ctx.Idents.get("chroot");
+ if (!II_chdir)
+ II_chdir = &Ctx.Idents.get("chdir");
+
+ if (FD->getIdentifier() == II_chroot) {
+ Chroot(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_chdir) {
+ Chdir(C, CE);
+ return true;
+ }
+
+ return false;
+}
+
+void ChrootChecker::Chroot(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+ ProgramStateManager &Mgr = state->getStateManager();
+
+ // Once encouter a chroot(), set the enum value ROOT_CHANGED directly in
+ // the GDM.
+ state = Mgr.addGDM(state, ChrootChecker::getTag(), (void*) ROOT_CHANGED);
+ C.addTransition(state);
+}
+
+void ChrootChecker::Chdir(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+ ProgramStateManager &Mgr = state->getStateManager();
+
+ // If there are no jail state in the GDM, just return.
+ const void *k = state->FindGDM(ChrootChecker::getTag());
+ if (!k)
+ return;
+
+ // After chdir("/"), enter the jail, set the enum value JAIL_ENTERED.
+ const Expr *ArgExpr = CE->getArg(0);
+ SVal ArgVal = state->getSVal(ArgExpr, C.getLocationContext());
+
+ if (const MemRegion *R = ArgVal.getAsRegion()) {
+ R = R->StripCasts();
+ if (const StringRegion* StrRegion= dyn_cast<StringRegion>(R)) {
+ const StringLiteral* Str = StrRegion->getStringLiteral();
+ if (Str->getString() == "/")
+ state = Mgr.addGDM(state, ChrootChecker::getTag(),
+ (void*) JAIL_ENTERED);
+ }
+ }
+
+ C.addTransition(state);
+}
+
+// Check the jail state before any function call except chroot and chdir().
+void ChrootChecker::checkPreStmt(const CallExpr *CE, CheckerContext &C) const {
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ if (!FD)
+ return;
+
+ ASTContext &Ctx = C.getASTContext();
+ if (!II_chroot)
+ II_chroot = &Ctx.Idents.get("chroot");
+ if (!II_chdir)
+ II_chdir = &Ctx.Idents.get("chdir");
+
+ // Ingnore chroot and chdir.
+ if (FD->getIdentifier() == II_chroot || FD->getIdentifier() == II_chdir)
+ return;
+
+ // If jail state is ROOT_CHANGED, generate BugReport.
+ void *const* k = C.getState()->FindGDM(ChrootChecker::getTag());
+ if (k)
+ if (isRootChanged((intptr_t) *k))
+ if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
+ if (!BT_BreakJail)
+ BT_BreakJail.reset(new BuiltinBug(
+ this, "Break out of jail", "No call of chdir(\"/\") immediately "
+ "after chroot"));
+ C.emitReport(llvm::make_unique<BugReport>(
+ *BT_BreakJail, BT_BreakJail->getDescription(), N));
+ }
+
+ return;
+}
+
+void ento::registerChrootChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ChrootChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangCheckers.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangCheckers.cpp
new file mode 100644
index 0000000..77a5a72
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangCheckers.cpp
@@ -0,0 +1,32 @@
+//===--- ClangCheckers.h - Provides builtin checkers ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Checkers/ClangCheckers.h"
+#include "clang/StaticAnalyzer/Core/CheckerRegistry.h"
+
+// FIXME: This is only necessary as long as there are checker registration
+// functions that do additional work besides mgr.registerChecker<CLASS>().
+// The only checkers that currently do this are:
+// - NSAutoreleasePoolChecker
+// - NSErrorChecker
+// - ObjCAtSyncChecker
+// It's probably worth including this information in Checkers.td to minimize
+// boilerplate code.
+#include "ClangSACheckers.h"
+
+using namespace clang;
+using namespace ento;
+
+void ento::registerBuiltinCheckers(CheckerRegistry &registry) {
+#define GET_CHECKERS
+#define CHECKER(FULLNAME,CLASS,DESCFILE,HELPTEXT,GROUPINDEX,HIDDEN) \
+ registry.addChecker(register##CLASS, FULLNAME, HELPTEXT);
+#include "Checkers.inc"
+#undef GET_CHECKERS
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangSACheckers.h b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangSACheckers.h
new file mode 100644
index 0000000..05b4a61
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangSACheckers.h
@@ -0,0 +1,37 @@
+//===--- ClangSACheckers.h - Registration functions for Checkers *- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Declares the registation functions for the checkers defined in
+// libclangStaticAnalyzerCheckers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_CLANGSACHECKERS_H
+#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_CLANGSACHECKERS_H
+
+#include "clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h"
+
+namespace clang {
+
+namespace ento {
+class CheckerManager;
+class CheckerRegistry;
+
+#define GET_CHECKERS
+#define CHECKER(FULLNAME,CLASS,CXXFILE,HELPTEXT,GROUPINDEX,HIDDEN) \
+ void register##CLASS(CheckerManager &mgr);
+#include "Checkers.inc"
+#undef CHECKER
+#undef GET_CHECKERS
+
+} // end ento namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
new file mode 100644
index 0000000..f2a269a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
@@ -0,0 +1,479 @@
+//==- DeadStoresChecker.cpp - Check for stores to dead variables -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a DeadStores, a flow-sensitive checker that looks for
+// stores to variables that are no longer live.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/SaveAndRestore.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+/// A simple visitor to record what VarDecls occur in EH-handling code.
+class EHCodeVisitor : public RecursiveASTVisitor<EHCodeVisitor> {
+public:
+ bool inEH;
+ llvm::DenseSet<const VarDecl *> &S;
+
+ bool TraverseObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
+ SaveAndRestore<bool> inFinally(inEH, true);
+ return ::RecursiveASTVisitor<EHCodeVisitor>::TraverseObjCAtFinallyStmt(S);
+ }
+
+ bool TraverseObjCAtCatchStmt(ObjCAtCatchStmt *S) {
+ SaveAndRestore<bool> inCatch(inEH, true);
+ return ::RecursiveASTVisitor<EHCodeVisitor>::TraverseObjCAtCatchStmt(S);
+ }
+
+ bool TraverseCXXCatchStmt(CXXCatchStmt *S) {
+ SaveAndRestore<bool> inCatch(inEH, true);
+ return TraverseStmt(S->getHandlerBlock());
+ }
+
+ bool VisitDeclRefExpr(DeclRefExpr *DR) {
+ if (inEH)
+ if (const VarDecl *D = dyn_cast<VarDecl>(DR->getDecl()))
+ S.insert(D);
+ return true;
+ }
+
+ EHCodeVisitor(llvm::DenseSet<const VarDecl *> &S) :
+ inEH(false), S(S) {}
+};
+
+// FIXME: Eventually migrate into its own file, and have it managed by
+// AnalysisManager.
+class ReachableCode {
+ const CFG &cfg;
+ llvm::BitVector reachable;
+public:
+ ReachableCode(const CFG &cfg)
+ : cfg(cfg), reachable(cfg.getNumBlockIDs(), false) {}
+
+ void computeReachableBlocks();
+
+ bool isReachable(const CFGBlock *block) const {
+ return reachable[block->getBlockID()];
+ }
+};
+}
+
+void ReachableCode::computeReachableBlocks() {
+ if (!cfg.getNumBlockIDs())
+ return;
+
+ SmallVector<const CFGBlock*, 10> worklist;
+ worklist.push_back(&cfg.getEntry());
+
+ while (!worklist.empty()) {
+ const CFGBlock *block = worklist.pop_back_val();
+ llvm::BitVector::reference isReachable = reachable[block->getBlockID()];
+ if (isReachable)
+ continue;
+ isReachable = true;
+ for (CFGBlock::const_succ_iterator i = block->succ_begin(),
+ e = block->succ_end(); i != e; ++i)
+ if (const CFGBlock *succ = *i)
+ worklist.push_back(succ);
+ }
+}
+
+static const Expr *
+LookThroughTransitiveAssignmentsAndCommaOperators(const Expr *Ex) {
+ while (Ex) {
+ const BinaryOperator *BO =
+ dyn_cast<BinaryOperator>(Ex->IgnoreParenCasts());
+ if (!BO)
+ break;
+ if (BO->getOpcode() == BO_Assign) {
+ Ex = BO->getRHS();
+ continue;
+ }
+ if (BO->getOpcode() == BO_Comma) {
+ Ex = BO->getRHS();
+ continue;
+ }
+ break;
+ }
+ return Ex;
+}
+
+namespace {
+class DeadStoreObs : public LiveVariables::Observer {
+ const CFG &cfg;
+ ASTContext &Ctx;
+ BugReporter& BR;
+ const CheckerBase *Checker;
+ AnalysisDeclContext* AC;
+ ParentMap& Parents;
+ llvm::SmallPtrSet<const VarDecl*, 20> Escaped;
+ std::unique_ptr<ReachableCode> reachableCode;
+ const CFGBlock *currentBlock;
+ std::unique_ptr<llvm::DenseSet<const VarDecl *>> InEH;
+
+ enum DeadStoreKind { Standard, Enclosing, DeadIncrement, DeadInit };
+
+public:
+ DeadStoreObs(const CFG &cfg, ASTContext &ctx, BugReporter &br,
+ const CheckerBase *checker, AnalysisDeclContext *ac,
+ ParentMap &parents,
+ llvm::SmallPtrSet<const VarDecl *, 20> &escaped)
+ : cfg(cfg), Ctx(ctx), BR(br), Checker(checker), AC(ac), Parents(parents),
+ Escaped(escaped), currentBlock(nullptr) {}
+
+ ~DeadStoreObs() override {}
+
+ bool isLive(const LiveVariables::LivenessValues &Live, const VarDecl *D) {
+ if (Live.isLive(D))
+ return true;
+ // Lazily construct the set that records which VarDecls are in
+ // EH code.
+ if (!InEH.get()) {
+ InEH.reset(new llvm::DenseSet<const VarDecl *>());
+ EHCodeVisitor V(*InEH.get());
+ V.TraverseStmt(AC->getBody());
+ }
+ // Treat all VarDecls that occur in EH code as being "always live"
+ // when considering to suppress dead stores. Frequently stores
+ // are followed by reads in EH code, but we don't have the ability
+ // to analyze that yet.
+ return InEH->count(D);
+ }
+
+ void Report(const VarDecl *V, DeadStoreKind dsk,
+ PathDiagnosticLocation L, SourceRange R) {
+ if (Escaped.count(V))
+ return;
+
+ // Compute reachable blocks within the CFG for trivial cases
+ // where a bogus dead store can be reported because itself is unreachable.
+ if (!reachableCode.get()) {
+ reachableCode.reset(new ReachableCode(cfg));
+ reachableCode->computeReachableBlocks();
+ }
+
+ if (!reachableCode->isReachable(currentBlock))
+ return;
+
+ SmallString<64> buf;
+ llvm::raw_svector_ostream os(buf);
+ const char *BugType = nullptr;
+
+ switch (dsk) {
+ case DeadInit:
+ BugType = "Dead initialization";
+ os << "Value stored to '" << *V
+ << "' during its initialization is never read";
+ break;
+
+ case DeadIncrement:
+ BugType = "Dead increment";
+ case Standard:
+ if (!BugType) BugType = "Dead assignment";
+ os << "Value stored to '" << *V << "' is never read";
+ break;
+
+ case Enclosing:
+ // Don't report issues in this case, e.g.: "if (x = foo())",
+ // where 'x' is unused later. We have yet to see a case where
+ // this is a real bug.
+ return;
+ }
+
+ BR.EmitBasicReport(AC->getDecl(), Checker, BugType, "Dead store", os.str(),
+ L, R);
+ }
+
+ void CheckVarDecl(const VarDecl *VD, const Expr *Ex, const Expr *Val,
+ DeadStoreKind dsk,
+ const LiveVariables::LivenessValues &Live) {
+
+ if (!VD->hasLocalStorage())
+ return;
+ // Reference types confuse the dead stores checker. Skip them
+ // for now.
+ if (VD->getType()->getAs<ReferenceType>())
+ return;
+
+ if (!isLive(Live, VD) &&
+ !(VD->hasAttr<UnusedAttr>() || VD->hasAttr<BlocksAttr>() ||
+ VD->hasAttr<ObjCPreciseLifetimeAttr>())) {
+
+ PathDiagnosticLocation ExLoc =
+ PathDiagnosticLocation::createBegin(Ex, BR.getSourceManager(), AC);
+ Report(VD, dsk, ExLoc, Val->getSourceRange());
+ }
+ }
+
+ void CheckDeclRef(const DeclRefExpr *DR, const Expr *Val, DeadStoreKind dsk,
+ const LiveVariables::LivenessValues& Live) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl()))
+ CheckVarDecl(VD, DR, Val, dsk, Live);
+ }
+
+ bool isIncrement(VarDecl *VD, const BinaryOperator* B) {
+ if (B->isCompoundAssignmentOp())
+ return true;
+
+ const Expr *RHS = B->getRHS()->IgnoreParenCasts();
+ const BinaryOperator* BRHS = dyn_cast<BinaryOperator>(RHS);
+
+ if (!BRHS)
+ return false;
+
+ const DeclRefExpr *DR;
+
+ if ((DR = dyn_cast<DeclRefExpr>(BRHS->getLHS()->IgnoreParenCasts())))
+ if (DR->getDecl() == VD)
+ return true;
+
+ if ((DR = dyn_cast<DeclRefExpr>(BRHS->getRHS()->IgnoreParenCasts())))
+ if (DR->getDecl() == VD)
+ return true;
+
+ return false;
+ }
+
+ void observeStmt(const Stmt *S, const CFGBlock *block,
+ const LiveVariables::LivenessValues &Live) override {
+
+ currentBlock = block;
+
+ // Skip statements in macros.
+ if (S->getLocStart().isMacroID())
+ return;
+
+ // Only cover dead stores from regular assignments. ++/-- dead stores
+ // have never flagged a real bug.
+ if (const BinaryOperator* B = dyn_cast<BinaryOperator>(S)) {
+ if (!B->isAssignmentOp()) return; // Skip non-assignments.
+
+ if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(B->getLHS()))
+ if (VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ // Special case: check for assigning null to a pointer.
+ // This is a common form of defensive programming.
+ const Expr *RHS =
+ LookThroughTransitiveAssignmentsAndCommaOperators(B->getRHS());
+ RHS = RHS->IgnoreParenCasts();
+
+ QualType T = VD->getType();
+ if (T->isPointerType() || T->isObjCObjectPointerType()) {
+ if (RHS->isNullPointerConstant(Ctx, Expr::NPC_ValueDependentIsNull))
+ return;
+ }
+
+ // Special case: self-assignments. These are often used to shut up
+ // "unused variable" compiler warnings.
+ if (const DeclRefExpr *RhsDR = dyn_cast<DeclRefExpr>(RHS))
+ if (VD == dyn_cast<VarDecl>(RhsDR->getDecl()))
+ return;
+
+ // Otherwise, issue a warning.
+ DeadStoreKind dsk = Parents.isConsumedExpr(B)
+ ? Enclosing
+ : (isIncrement(VD,B) ? DeadIncrement : Standard);
+
+ CheckVarDecl(VD, DR, B->getRHS(), dsk, Live);
+ }
+ }
+ else if (const UnaryOperator* U = dyn_cast<UnaryOperator>(S)) {
+ if (!U->isIncrementOp() || U->isPrefix())
+ return;
+
+ const Stmt *parent = Parents.getParentIgnoreParenCasts(U);
+ if (!parent || !isa<ReturnStmt>(parent))
+ return;
+
+ const Expr *Ex = U->getSubExpr()->IgnoreParenCasts();
+
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Ex))
+ CheckDeclRef(DR, U, DeadIncrement, Live);
+ }
+ else if (const DeclStmt *DS = dyn_cast<DeclStmt>(S))
+ // Iterate through the decls. Warn if any initializers are complex
+ // expressions that are not live (never used).
+ for (const auto *DI : DS->decls()) {
+ const auto *V = dyn_cast<VarDecl>(DI);
+
+ if (!V)
+ continue;
+
+ if (V->hasLocalStorage()) {
+ // Reference types confuse the dead stores checker. Skip them
+ // for now.
+ if (V->getType()->getAs<ReferenceType>())
+ return;
+
+ if (const Expr *E = V->getInit()) {
+ while (const ExprWithCleanups *exprClean =
+ dyn_cast<ExprWithCleanups>(E))
+ E = exprClean->getSubExpr();
+
+ // Look through transitive assignments, e.g.:
+ // int x = y = 0;
+ E = LookThroughTransitiveAssignmentsAndCommaOperators(E);
+
+ // Don't warn on C++ objects (yet) until we can show that their
+ // constructors/destructors don't have side effects.
+ if (isa<CXXConstructExpr>(E))
+ return;
+
+ // A dead initialization is a variable that is dead after it
+ // is initialized. We don't flag warnings for those variables
+ // marked 'unused' or 'objc_precise_lifetime'.
+ if (!isLive(Live, V) &&
+ !V->hasAttr<UnusedAttr>() &&
+ !V->hasAttr<ObjCPreciseLifetimeAttr>()) {
+ // Special case: check for initializations with constants.
+ //
+ // e.g. : int x = 0;
+ //
+ // If x is EVER assigned a new value later, don't issue
+ // a warning. This is because such initialization can be
+ // due to defensive programming.
+ if (E->isEvaluatable(Ctx))
+ return;
+
+ if (const DeclRefExpr *DRE =
+ dyn_cast<DeclRefExpr>(E->IgnoreParenCasts()))
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ // Special case: check for initialization from constant
+ // variables.
+ //
+ // e.g. extern const int MyConstant;
+ // int x = MyConstant;
+ //
+ if (VD->hasGlobalStorage() &&
+ VD->getType().isConstQualified())
+ return;
+ // Special case: check for initialization from scalar
+ // parameters. This is often a form of defensive
+ // programming. Non-scalars are still an error since
+ // because it more likely represents an actual algorithmic
+ // bug.
+ if (isa<ParmVarDecl>(VD) && VD->getType()->isScalarType())
+ return;
+ }
+
+ PathDiagnosticLocation Loc =
+ PathDiagnosticLocation::create(V, BR.getSourceManager());
+ Report(V, DeadInit, Loc, E->getSourceRange());
+ }
+ }
+ }
+ }
+ }
+};
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Driver function to invoke the Dead-Stores checker on a CFG.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class FindEscaped {
+public:
+ llvm::SmallPtrSet<const VarDecl*, 20> Escaped;
+
+ void operator()(const Stmt *S) {
+ // Check for '&'. Any VarDecl whose address has been taken we treat as
+ // escaped.
+ // FIXME: What about references?
+ if (auto *LE = dyn_cast<LambdaExpr>(S)) {
+ findLambdaReferenceCaptures(LE);
+ return;
+ }
+
+ const UnaryOperator *U = dyn_cast<UnaryOperator>(S);
+ if (!U)
+ return;
+ if (U->getOpcode() != UO_AddrOf)
+ return;
+
+ const Expr *E = U->getSubExpr()->IgnoreParenCasts();
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E))
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl()))
+ Escaped.insert(VD);
+ }
+
+ // Treat local variables captured by reference in C++ lambdas as escaped.
+ void findLambdaReferenceCaptures(const LambdaExpr *LE) {
+ const CXXRecordDecl *LambdaClass = LE->getLambdaClass();
+ llvm::DenseMap<const VarDecl *, FieldDecl *> CaptureFields;
+ FieldDecl *ThisCaptureField;
+ LambdaClass->getCaptureFields(CaptureFields, ThisCaptureField);
+
+ for (const LambdaCapture &C : LE->captures()) {
+ if (!C.capturesVariable())
+ continue;
+
+ VarDecl *VD = C.getCapturedVar();
+ const FieldDecl *FD = CaptureFields[VD];
+ if (!FD)
+ continue;
+
+ // If the capture field is a reference type, it is capture-by-reference.
+ if (FD->getType()->isReferenceType())
+ Escaped.insert(VD);
+ }
+ }
+};
+} // end anonymous namespace
+
+
+//===----------------------------------------------------------------------===//
+// DeadStoresChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class DeadStoresChecker : public Checker<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+
+ // Don't do anything for template instantiations.
+ // Proving that code in a template instantiation is "dead"
+ // means proving that it is dead in all instantiations.
+ // This same problem exists with -Wunreachable-code.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ if (FD->isTemplateInstantiation())
+ return;
+
+ if (LiveVariables *L = mgr.getAnalysis<LiveVariables>(D)) {
+ CFG &cfg = *mgr.getCFG(D);
+ AnalysisDeclContext *AC = mgr.getAnalysisDeclContext(D);
+ ParentMap &pmap = mgr.getParentMap(D);
+ FindEscaped FS;
+ cfg.VisitBlockStmts(FS);
+ DeadStoreObs A(cfg, BR.getContext(), BR, this, AC, pmap, FS.Escaped);
+ L->runOnAllBlocks(A);
+ }
+ }
+};
+}
+
+void ento::registerDeadStoresChecker(CheckerManager &mgr) {
+ mgr.registerChecker<DeadStoresChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
new file mode 100644
index 0000000..2eef168
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
@@ -0,0 +1,247 @@
+//==- DebugCheckers.cpp - Debugging Checkers ---------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines checkers that display debugging information.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/Analysis/Analyses/Dominators.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Analysis/CallGraph.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/IssueHash.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "llvm/Support/Process.h"
+
+using namespace clang;
+using namespace ento;
+
+//===----------------------------------------------------------------------===//
+// DominatorsTreeDumper
+//===----------------------------------------------------------------------===//
+
+namespace {
+class DominatorsTreeDumper : public Checker<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ if (AnalysisDeclContext *AC = mgr.getAnalysisDeclContext(D)) {
+ DominatorTree dom;
+ dom.buildDominatorTree(*AC);
+ dom.dump();
+ }
+ }
+};
+}
+
+void ento::registerDominatorsTreeDumper(CheckerManager &mgr) {
+ mgr.registerChecker<DominatorsTreeDumper>();
+}
+
+//===----------------------------------------------------------------------===//
+// LiveVariablesDumper
+//===----------------------------------------------------------------------===//
+
+namespace {
+class LiveVariablesDumper : public Checker<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ if (LiveVariables* L = mgr.getAnalysis<LiveVariables>(D)) {
+ L->dumpBlockLiveness(mgr.getSourceManager());
+ }
+ }
+};
+}
+
+void ento::registerLiveVariablesDumper(CheckerManager &mgr) {
+ mgr.registerChecker<LiveVariablesDumper>();
+}
+
+//===----------------------------------------------------------------------===//
+// CFGViewer
+//===----------------------------------------------------------------------===//
+
+namespace {
+class CFGViewer : public Checker<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ if (CFG *cfg = mgr.getCFG(D)) {
+ cfg->viewCFG(mgr.getLangOpts());
+ }
+ }
+};
+}
+
+void ento::registerCFGViewer(CheckerManager &mgr) {
+ mgr.registerChecker<CFGViewer>();
+}
+
+//===----------------------------------------------------------------------===//
+// CFGDumper
+//===----------------------------------------------------------------------===//
+
+namespace {
+class CFGDumper : public Checker<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ PrintingPolicy Policy(mgr.getLangOpts());
+ Policy.TerseOutput = true;
+ Policy.PolishForDeclaration = true;
+ D->print(llvm::errs(), Policy);
+
+ if (CFG *cfg = mgr.getCFG(D)) {
+ cfg->dump(mgr.getLangOpts(),
+ llvm::sys::Process::StandardErrHasColors());
+ }
+ }
+};
+}
+
+void ento::registerCFGDumper(CheckerManager &mgr) {
+ mgr.registerChecker<CFGDumper>();
+}
+
+//===----------------------------------------------------------------------===//
+// CallGraphViewer
+//===----------------------------------------------------------------------===//
+
+namespace {
+class CallGraphViewer : public Checker< check::ASTDecl<TranslationUnitDecl> > {
+public:
+ void checkASTDecl(const TranslationUnitDecl *TU, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ CallGraph CG;
+ CG.addToCallGraph(const_cast<TranslationUnitDecl*>(TU));
+ CG.viewGraph();
+ }
+};
+}
+
+void ento::registerCallGraphViewer(CheckerManager &mgr) {
+ mgr.registerChecker<CallGraphViewer>();
+}
+
+//===----------------------------------------------------------------------===//
+// CallGraphDumper
+//===----------------------------------------------------------------------===//
+
+namespace {
+class CallGraphDumper : public Checker< check::ASTDecl<TranslationUnitDecl> > {
+public:
+ void checkASTDecl(const TranslationUnitDecl *TU, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ CallGraph CG;
+ CG.addToCallGraph(const_cast<TranslationUnitDecl*>(TU));
+ CG.dump();
+ }
+};
+}
+
+void ento::registerCallGraphDumper(CheckerManager &mgr) {
+ mgr.registerChecker<CallGraphDumper>();
+}
+
+
+//===----------------------------------------------------------------------===//
+// ConfigDumper
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ConfigDumper : public Checker< check::EndOfTranslationUnit > {
+ typedef AnalyzerOptions::ConfigTable Table;
+
+ static int compareEntry(const Table::MapEntryTy *const *LHS,
+ const Table::MapEntryTy *const *RHS) {
+ return (*LHS)->getKey().compare((*RHS)->getKey());
+ }
+
+public:
+ void checkEndOfTranslationUnit(const TranslationUnitDecl *TU,
+ AnalysisManager& mgr,
+ BugReporter &BR) const {
+ const Table &Config = mgr.options.Config;
+
+ SmallVector<const Table::MapEntryTy *, 32> Keys;
+ for (Table::const_iterator I = Config.begin(), E = Config.end(); I != E;
+ ++I)
+ Keys.push_back(&*I);
+ llvm::array_pod_sort(Keys.begin(), Keys.end(), compareEntry);
+
+ llvm::errs() << "[config]\n";
+ for (unsigned I = 0, E = Keys.size(); I != E; ++I)
+ llvm::errs() << Keys[I]->getKey() << " = " << Keys[I]->second << '\n';
+
+ llvm::errs() << "[stats]\n" << "num-entries = " << Keys.size() << '\n';
+ }
+};
+}
+
+void ento::registerConfigDumper(CheckerManager &mgr) {
+ mgr.registerChecker<ConfigDumper>();
+}
+
+//===----------------------------------------------------------------------===//
+// ExplodedGraph Viewer
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ExplodedGraphViewer : public Checker< check::EndAnalysis > {
+public:
+ ExplodedGraphViewer() {}
+ void checkEndAnalysis(ExplodedGraph &G, BugReporter &B,ExprEngine &Eng) const {
+ Eng.ViewGraph(0);
+ }
+};
+
+}
+
+void ento::registerExplodedGraphViewer(CheckerManager &mgr) {
+ mgr.registerChecker<ExplodedGraphViewer>();
+}
+
+//===----------------------------------------------------------------------===//
+// DumpBugHash
+//===----------------------------------------------------------------------===//
+
+namespace {
+class BugHashDumper : public Checker<check::PostStmt<Stmt>> {
+public:
+ mutable std::unique_ptr<BugType> BT;
+
+ void checkPostStmt(const Stmt *S, CheckerContext &C) const {
+ if (!BT)
+ BT.reset(new BugType(this, "Dump hash components", "debug"));
+
+ ExplodedNode *N = C.generateNonFatalErrorNode();
+ if (!N)
+ return;
+
+ const LangOptions &Opts = C.getLangOpts();
+ const SourceManager &SM = C.getSourceManager();
+ FullSourceLoc FL(S->getLocStart(), SM);
+ std::string HashContent =
+ GetIssueString(SM, FL, getCheckName().getName(), BT->getCategory(),
+ C.getLocationContext()->getDecl(), Opts);
+
+ C.emitReport(llvm::make_unique<BugReport>(*BT, HashContent, N));
+ }
+};
+}
+
+void ento::registerBugHashDumper(CheckerManager &mgr) {
+ mgr.registerChecker<BugHashDumper>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
new file mode 100644
index 0000000..f216f69
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
@@ -0,0 +1,301 @@
+//== NullDerefChecker.cpp - Null dereference checker ------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines NullDerefChecker, a builtin check in ExprEngine that performs
+// checks for null pointers at loads and stores.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ExprOpenMP.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class DereferenceChecker
+ : public Checker< check::Location,
+ check::Bind,
+ EventDispatcher<ImplicitNullDerefEvent> > {
+ mutable std::unique_ptr<BuiltinBug> BT_null;
+ mutable std::unique_ptr<BuiltinBug> BT_undef;
+
+ void reportBug(ProgramStateRef State, const Stmt *S, CheckerContext &C) const;
+
+public:
+ void checkLocation(SVal location, bool isLoad, const Stmt* S,
+ CheckerContext &C) const;
+ void checkBind(SVal L, SVal V, const Stmt *S, CheckerContext &C) const;
+
+ static void AddDerefSource(raw_ostream &os,
+ SmallVectorImpl<SourceRange> &Ranges,
+ const Expr *Ex, const ProgramState *state,
+ const LocationContext *LCtx,
+ bool loadedFrom = false);
+};
+} // end anonymous namespace
+
+void
+DereferenceChecker::AddDerefSource(raw_ostream &os,
+ SmallVectorImpl<SourceRange> &Ranges,
+ const Expr *Ex,
+ const ProgramState *state,
+ const LocationContext *LCtx,
+ bool loadedFrom) {
+ Ex = Ex->IgnoreParenLValueCasts();
+ switch (Ex->getStmtClass()) {
+ default:
+ break;
+ case Stmt::DeclRefExprClass: {
+ const DeclRefExpr *DR = cast<DeclRefExpr>(Ex);
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ os << " (" << (loadedFrom ? "loaded from" : "from")
+ << " variable '" << VD->getName() << "')";
+ Ranges.push_back(DR->getSourceRange());
+ }
+ break;
+ }
+ case Stmt::MemberExprClass: {
+ const MemberExpr *ME = cast<MemberExpr>(Ex);
+ os << " (" << (loadedFrom ? "loaded from" : "via")
+ << " field '" << ME->getMemberNameInfo() << "')";
+ SourceLocation L = ME->getMemberLoc();
+ Ranges.push_back(SourceRange(L, L));
+ break;
+ }
+ case Stmt::ObjCIvarRefExprClass: {
+ const ObjCIvarRefExpr *IV = cast<ObjCIvarRefExpr>(Ex);
+ os << " (" << (loadedFrom ? "loaded from" : "via")
+ << " ivar '" << IV->getDecl()->getName() << "')";
+ SourceLocation L = IV->getLocation();
+ Ranges.push_back(SourceRange(L, L));
+ break;
+ }
+ }
+}
+
+static const Expr *getDereferenceExpr(const Stmt *S, bool IsBind=false){
+ const Expr *E = nullptr;
+
+ // Walk through lvalue casts to get the original expression
+ // that syntactically caused the load.
+ if (const Expr *expr = dyn_cast<Expr>(S))
+ E = expr->IgnoreParenLValueCasts();
+
+ if (IsBind) {
+ const VarDecl *VD;
+ const Expr *Init;
+ std::tie(VD, Init) = parseAssignment(S);
+ if (VD && Init)
+ E = Init;
+ }
+ return E;
+}
+
+static bool suppressReport(const Expr *E) {
+ // Do not report dereferences on memory in non-default address spaces.
+ return E->getType().getQualifiers().hasAddressSpace();
+}
+
+void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S,
+ CheckerContext &C) const {
+ // Generate an error node.
+ ExplodedNode *N = C.generateErrorNode(State);
+ if (!N)
+ return;
+
+ // We know that 'location' cannot be non-null. This is what
+ // we call an "explicit" null dereference.
+ if (!BT_null)
+ BT_null.reset(new BuiltinBug(this, "Dereference of null pointer"));
+
+ SmallString<100> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ SmallVector<SourceRange, 2> Ranges;
+
+ switch (S->getStmtClass()) {
+ case Stmt::ArraySubscriptExprClass: {
+ os << "Array access";
+ const ArraySubscriptExpr *AE = cast<ArraySubscriptExpr>(S);
+ AddDerefSource(os, Ranges, AE->getBase()->IgnoreParenCasts(),
+ State.get(), N->getLocationContext());
+ os << " results in a null pointer dereference";
+ break;
+ }
+ case Stmt::OMPArraySectionExprClass: {
+ os << "Array access";
+ const OMPArraySectionExpr *AE = cast<OMPArraySectionExpr>(S);
+ AddDerefSource(os, Ranges, AE->getBase()->IgnoreParenCasts(),
+ State.get(), N->getLocationContext());
+ os << " results in a null pointer dereference";
+ break;
+ }
+ case Stmt::UnaryOperatorClass: {
+ os << "Dereference of null pointer";
+ const UnaryOperator *U = cast<UnaryOperator>(S);
+ AddDerefSource(os, Ranges, U->getSubExpr()->IgnoreParens(),
+ State.get(), N->getLocationContext(), true);
+ break;
+ }
+ case Stmt::MemberExprClass: {
+ const MemberExpr *M = cast<MemberExpr>(S);
+ if (M->isArrow() || bugreporter::isDeclRefExprToReference(M->getBase())) {
+ os << "Access to field '" << M->getMemberNameInfo()
+ << "' results in a dereference of a null pointer";
+ AddDerefSource(os, Ranges, M->getBase()->IgnoreParenCasts(),
+ State.get(), N->getLocationContext(), true);
+ }
+ break;
+ }
+ case Stmt::ObjCIvarRefExprClass: {
+ const ObjCIvarRefExpr *IV = cast<ObjCIvarRefExpr>(S);
+ os << "Access to instance variable '" << *IV->getDecl()
+ << "' results in a dereference of a null pointer";
+ AddDerefSource(os, Ranges, IV->getBase()->IgnoreParenCasts(),
+ State.get(), N->getLocationContext(), true);
+ break;
+ }
+ default:
+ break;
+ }
+
+ auto report = llvm::make_unique<BugReport>(
+ *BT_null, buf.empty() ? BT_null->getDescription() : StringRef(buf), N);
+
+ bugreporter::trackNullOrUndefValue(N, bugreporter::getDerefExpr(S), *report);
+
+ for (SmallVectorImpl<SourceRange>::iterator
+ I = Ranges.begin(), E = Ranges.end(); I!=E; ++I)
+ report->addRange(*I);
+
+ C.emitReport(std::move(report));
+}
+
+void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
+ CheckerContext &C) const {
+ // Check for dereference of an undefined value.
+ if (l.isUndef()) {
+ if (ExplodedNode *N = C.generateErrorNode()) {
+ if (!BT_undef)
+ BT_undef.reset(
+ new BuiltinBug(this, "Dereference of undefined pointer value"));
+
+ auto report =
+ llvm::make_unique<BugReport>(*BT_undef, BT_undef->getDescription(), N);
+ bugreporter::trackNullOrUndefValue(N, bugreporter::getDerefExpr(S),
+ *report);
+ C.emitReport(std::move(report));
+ }
+ return;
+ }
+
+ DefinedOrUnknownSVal location = l.castAs<DefinedOrUnknownSVal>();
+
+ // Check for null dereferences.
+ if (!location.getAs<Loc>())
+ return;
+
+ ProgramStateRef state = C.getState();
+
+ ProgramStateRef notNullState, nullState;
+ std::tie(notNullState, nullState) = state->assume(location);
+
+ // The explicit NULL case.
+ if (nullState) {
+ if (!notNullState) {
+ const Expr *expr = getDereferenceExpr(S);
+ if (!suppressReport(expr)) {
+ reportBug(nullState, expr, C);
+ return;
+ }
+ }
+
+ // Otherwise, we have the case where the location could either be
+ // null or not-null. Record the error node as an "implicit" null
+ // dereference.
+ if (ExplodedNode *N = C.generateSink(nullState, C.getPredecessor())) {
+ ImplicitNullDerefEvent event = {l, isLoad, N, &C.getBugReporter(),
+ /*IsDirectDereference=*/false};
+ dispatchEvent(event);
+ }
+ }
+
+ // From this point forward, we know that the location is not null.
+ C.addTransition(notNullState);
+}
+
+void DereferenceChecker::checkBind(SVal L, SVal V, const Stmt *S,
+ CheckerContext &C) const {
+ // If we're binding to a reference, check if the value is known to be null.
+ if (V.isUndef())
+ return;
+
+ const MemRegion *MR = L.getAsRegion();
+ const TypedValueRegion *TVR = dyn_cast_or_null<TypedValueRegion>(MR);
+ if (!TVR)
+ return;
+
+ if (!TVR->getValueType()->isReferenceType())
+ return;
+
+ ProgramStateRef State = C.getState();
+
+ ProgramStateRef StNonNull, StNull;
+ std::tie(StNonNull, StNull) = State->assume(V.castAs<DefinedOrUnknownSVal>());
+
+ if (StNull) {
+ if (!StNonNull) {
+ const Expr *expr = getDereferenceExpr(S, /*IsBind=*/true);
+ if (!suppressReport(expr)) {
+ reportBug(StNull, expr, C);
+ return;
+ }
+ }
+
+ // At this point the value could be either null or non-null.
+ // Record this as an "implicit" null dereference.
+ if (ExplodedNode *N = C.generateSink(StNull, C.getPredecessor())) {
+ ImplicitNullDerefEvent event = {V, /*isLoad=*/true, N,
+ &C.getBugReporter(),
+ /*IsDirectDereference=*/false};
+ dispatchEvent(event);
+ }
+ }
+
+ // Unlike a regular null dereference, initializing a reference with a
+ // dereferenced null pointer does not actually cause a runtime exception in
+ // Clang's implementation of references.
+ //
+ // int &r = *p; // safe??
+ // if (p != NULL) return; // uh-oh
+ // r = 5; // trap here
+ //
+ // The standard says this is invalid as soon as we try to create a "null
+ // reference" (there is no such thing), but turning this into an assumption
+ // that 'p' is never null will not match our actual runtime behavior.
+ // So we do not record this assumption, allowing us to warn on the last line
+ // of this example.
+ //
+ // We do need to add a transition because we may have generated a sink for
+ // the "implicit" null dereference.
+ C.addTransition(State, this);
+}
+
+void ento::registerDereferenceChecker(CheckerManager &mgr) {
+ mgr.registerChecker<DereferenceChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
new file mode 100644
index 0000000..ad478cb
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
@@ -0,0 +1,227 @@
+//=- DirectIvarAssignment.cpp - Check rules on ObjC properties -*- C++ ----*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Check that Objective C properties are set with the setter, not though a
+// direct assignment.
+//
+// Two versions of a checker exist: one that checks all methods and the other
+// that only checks the methods annotated with
+// __attribute__((annotate("objc_no_direct_instance_variable_assignment")))
+//
+// The checker does not warn about assignments to Ivars, annotated with
+// __attribute__((objc_allow_direct_instance_variable_assignment"))). This
+// annotation serves as a false positive suppression mechanism for the
+// checker. The annotation is allowed on properties and Ivars.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/ADT/DenseMap.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+/// The default method filter, which is used to filter out the methods on which
+/// the check should not be performed.
+///
+/// Checks for the init, dealloc, and any other functions that might be allowed
+/// to perform direct instance variable assignment based on their name.
+static bool DefaultMethodFilter(const ObjCMethodDecl *M) {
+ return M->getMethodFamily() == OMF_init ||
+ M->getMethodFamily() == OMF_dealloc ||
+ M->getMethodFamily() == OMF_copy ||
+ M->getMethodFamily() == OMF_mutableCopy ||
+ M->getSelector().getNameForSlot(0).find("init") != StringRef::npos ||
+ M->getSelector().getNameForSlot(0).find("Init") != StringRef::npos;
+}
+
+class DirectIvarAssignment :
+ public Checker<check::ASTDecl<ObjCImplementationDecl> > {
+
+ typedef llvm::DenseMap<const ObjCIvarDecl*,
+ const ObjCPropertyDecl*> IvarToPropertyMapTy;
+
+ /// A helper class, which walks the AST and locates all assignments to ivars
+ /// in the given function.
+ class MethodCrawler : public ConstStmtVisitor<MethodCrawler> {
+ const IvarToPropertyMapTy &IvarToPropMap;
+ const ObjCMethodDecl *MD;
+ const ObjCInterfaceDecl *InterfD;
+ BugReporter &BR;
+ const CheckerBase *Checker;
+ LocationOrAnalysisDeclContext DCtx;
+
+ public:
+ MethodCrawler(const IvarToPropertyMapTy &InMap, const ObjCMethodDecl *InMD,
+ const ObjCInterfaceDecl *InID, BugReporter &InBR,
+ const CheckerBase *Checker, AnalysisDeclContext *InDCtx)
+ : IvarToPropMap(InMap), MD(InMD), InterfD(InID), BR(InBR),
+ Checker(Checker), DCtx(InDCtx) {}
+
+ void VisitStmt(const Stmt *S) { VisitChildren(S); }
+
+ void VisitBinaryOperator(const BinaryOperator *BO);
+
+ void VisitChildren(const Stmt *S) {
+ for (const Stmt *Child : S->children())
+ if (Child)
+ this->Visit(Child);
+ }
+ };
+
+public:
+ bool (*ShouldSkipMethod)(const ObjCMethodDecl *);
+
+ DirectIvarAssignment() : ShouldSkipMethod(&DefaultMethodFilter) {}
+
+ void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager& Mgr,
+ BugReporter &BR) const;
+};
+
+static const ObjCIvarDecl *findPropertyBackingIvar(const ObjCPropertyDecl *PD,
+ const ObjCInterfaceDecl *InterD,
+ ASTContext &Ctx) {
+ // Check for synthesized ivars.
+ ObjCIvarDecl *ID = PD->getPropertyIvarDecl();
+ if (ID)
+ return ID;
+
+ ObjCInterfaceDecl *NonConstInterD = const_cast<ObjCInterfaceDecl*>(InterD);
+
+ // Check for existing "_PropName".
+ ID = NonConstInterD->lookupInstanceVariable(PD->getDefaultSynthIvarName(Ctx));
+ if (ID)
+ return ID;
+
+ // Check for existing "PropName".
+ IdentifierInfo *PropIdent = PD->getIdentifier();
+ ID = NonConstInterD->lookupInstanceVariable(PropIdent);
+
+ return ID;
+}
+
+void DirectIvarAssignment::checkASTDecl(const ObjCImplementationDecl *D,
+ AnalysisManager& Mgr,
+ BugReporter &BR) const {
+ const ObjCInterfaceDecl *InterD = D->getClassInterface();
+
+
+ IvarToPropertyMapTy IvarToPropMap;
+
+ // Find all properties for this class.
+ for (const auto *PD : InterD->properties()) {
+ // Find the corresponding IVar.
+ const ObjCIvarDecl *ID = findPropertyBackingIvar(PD, InterD,
+ Mgr.getASTContext());
+
+ if (!ID)
+ continue;
+
+ // Store the IVar to property mapping.
+ IvarToPropMap[ID] = PD;
+ }
+
+ if (IvarToPropMap.empty())
+ return;
+
+ for (const auto *M : D->instance_methods()) {
+ AnalysisDeclContext *DCtx = Mgr.getAnalysisDeclContext(M);
+
+ if ((*ShouldSkipMethod)(M))
+ continue;
+
+ const Stmt *Body = M->getBody();
+ assert(Body);
+
+ MethodCrawler MC(IvarToPropMap, M->getCanonicalDecl(), InterD, BR, this,
+ DCtx);
+ MC.VisitStmt(Body);
+ }
+}
+
+static bool isAnnotatedToAllowDirectAssignment(const Decl *D) {
+ for (const auto *Ann : D->specific_attrs<AnnotateAttr>())
+ if (Ann->getAnnotation() ==
+ "objc_allow_direct_instance_variable_assignment")
+ return true;
+ return false;
+}
+
+void DirectIvarAssignment::MethodCrawler::VisitBinaryOperator(
+ const BinaryOperator *BO) {
+ if (!BO->isAssignmentOp())
+ return;
+
+ const ObjCIvarRefExpr *IvarRef =
+ dyn_cast<ObjCIvarRefExpr>(BO->getLHS()->IgnoreParenCasts());
+
+ if (!IvarRef)
+ return;
+
+ if (const ObjCIvarDecl *D = IvarRef->getDecl()) {
+ IvarToPropertyMapTy::const_iterator I = IvarToPropMap.find(D);
+
+ if (I != IvarToPropMap.end()) {
+ const ObjCPropertyDecl *PD = I->second;
+ // Skip warnings on Ivars, annotated with
+ // objc_allow_direct_instance_variable_assignment. This annotation serves
+ // as a false positive suppression mechanism for the checker. The
+ // annotation is allowed on properties and ivars.
+ if (isAnnotatedToAllowDirectAssignment(PD) ||
+ isAnnotatedToAllowDirectAssignment(D))
+ return;
+
+ ObjCMethodDecl *GetterMethod =
+ InterfD->getInstanceMethod(PD->getGetterName());
+ ObjCMethodDecl *SetterMethod =
+ InterfD->getInstanceMethod(PD->getSetterName());
+
+ if (SetterMethod && SetterMethod->getCanonicalDecl() == MD)
+ return;
+
+ if (GetterMethod && GetterMethod->getCanonicalDecl() == MD)
+ return;
+
+ BR.EmitBasicReport(
+ MD, Checker, "Property access", categories::CoreFoundationObjectiveC,
+ "Direct assignment to an instance variable backing a property; "
+ "use the setter instead",
+ PathDiagnosticLocation(IvarRef, BR.getSourceManager(), DCtx));
+ }
+ }
+}
+}
+
+// Register the checker that checks for direct accesses in all functions,
+// except for the initialization and copy routines.
+void ento::registerDirectIvarAssignment(CheckerManager &mgr) {
+ mgr.registerChecker<DirectIvarAssignment>();
+}
+
+// Register the checker that checks for direct accesses in functions annotated
+// with __attribute__((annotate("objc_no_direct_instance_variable_assignment"))).
+static bool AttrFilter(const ObjCMethodDecl *M) {
+ for (const auto *Ann : M->specific_attrs<AnnotateAttr>())
+ if (Ann->getAnnotation() == "objc_no_direct_instance_variable_assignment")
+ return false;
+ return true;
+}
+
+void ento::registerDirectIvarAssignmentForAnnotatedFunctions(
+ CheckerManager &mgr) {
+ mgr.registerChecker<DirectIvarAssignment>()->ShouldSkipMethod = &AttrFilter;
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
new file mode 100644
index 0000000..5985023
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
@@ -0,0 +1,92 @@
+//== DivZeroChecker.cpp - Division by zero checker --------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines DivZeroChecker, a builtin check in ExprEngine that performs
+// checks for division by zeros.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class DivZeroChecker : public Checker< check::PreStmt<BinaryOperator> > {
+ mutable std::unique_ptr<BuiltinBug> BT;
+ void reportBug(const char *Msg,
+ ProgramStateRef StateZero,
+ CheckerContext &C) const ;
+public:
+ void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
+};
+} // end anonymous namespace
+
+void DivZeroChecker::reportBug(const char *Msg,
+ ProgramStateRef StateZero,
+ CheckerContext &C) const {
+ if (ExplodedNode *N = C.generateErrorNode(StateZero)) {
+ if (!BT)
+ BT.reset(new BuiltinBug(this, "Division by zero"));
+
+ auto R = llvm::make_unique<BugReport>(*BT, Msg, N);
+ bugreporter::trackNullOrUndefValue(N, bugreporter::GetDenomExpr(N), *R);
+ C.emitReport(std::move(R));
+ }
+}
+
+void DivZeroChecker::checkPreStmt(const BinaryOperator *B,
+ CheckerContext &C) const {
+ BinaryOperator::Opcode Op = B->getOpcode();
+ if (Op != BO_Div &&
+ Op != BO_Rem &&
+ Op != BO_DivAssign &&
+ Op != BO_RemAssign)
+ return;
+
+ if (!B->getRHS()->getType()->isScalarType())
+ return;
+
+ SVal Denom = C.getState()->getSVal(B->getRHS(), C.getLocationContext());
+ Optional<DefinedSVal> DV = Denom.getAs<DefinedSVal>();
+
+ // Divide-by-undefined handled in the generic checking for uses of
+ // undefined values.
+ if (!DV)
+ return;
+
+ // Check for divide by zero.
+ ConstraintManager &CM = C.getConstraintManager();
+ ProgramStateRef stateNotZero, stateZero;
+ std::tie(stateNotZero, stateZero) = CM.assumeDual(C.getState(), *DV);
+
+ if (!stateNotZero) {
+ assert(stateZero);
+ reportBug("Division by zero", stateZero, C);
+ return;
+ }
+
+ bool TaintedD = C.getState()->isTainted(*DV);
+ if ((stateNotZero && stateZero && TaintedD)) {
+ reportBug("Division by a tainted value, possibly zero", stateZero, C);
+ return;
+ }
+
+ // If we get here, then the denom should not be zero. We abandon the implicit
+ // zero denom case for now.
+ C.addTransition(stateNotZero);
+}
+
+void ento::registerDivZeroChecker(CheckerManager &mgr) {
+ mgr.registerChecker<DivZeroChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp
new file mode 100644
index 0000000..7e0cb8e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp
@@ -0,0 +1,213 @@
+//== DynamicTypeChecker.cpp ------------------------------------ -*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker looks for cases where the dynamic type of an object is unrelated
+// to its static type. The type information utilized by this check is collected
+// by the DynamicTypePropagation checker. This check does not report any type
+// error for ObjC Generic types, in order to avoid duplicate erros from the
+// ObjC Generics checker. This checker is not supposed to modify the program
+// state, it is just the observer of the type information provided by other
+// checkers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeMap.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class DynamicTypeChecker : public Checker<check::PostStmt<ImplicitCastExpr>> {
+ mutable std::unique_ptr<BugType> BT;
+ void initBugType() const {
+ if (!BT)
+ BT.reset(
+ new BugType(this, "Dynamic and static type mismatch", "Type Error"));
+ }
+
+ class DynamicTypeBugVisitor
+ : public BugReporterVisitorImpl<DynamicTypeBugVisitor> {
+ public:
+ DynamicTypeBugVisitor(const MemRegion *Reg) : Reg(Reg) {}
+
+ void Profile(llvm::FoldingSetNodeID &ID) const override {
+ static int X = 0;
+ ID.AddPointer(&X);
+ ID.AddPointer(Reg);
+ }
+
+ PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) override;
+
+ private:
+ // The tracked region.
+ const MemRegion *Reg;
+ };
+
+ void reportTypeError(QualType DynamicType, QualType StaticType,
+ const MemRegion *Reg, const Stmt *ReportedNode,
+ CheckerContext &C) const;
+
+public:
+ void checkPostStmt(const ImplicitCastExpr *CE, CheckerContext &C) const;
+};
+}
+
+void DynamicTypeChecker::reportTypeError(QualType DynamicType,
+ QualType StaticType,
+ const MemRegion *Reg,
+ const Stmt *ReportedNode,
+ CheckerContext &C) const {
+ initBugType();
+ SmallString<192> Buf;
+ llvm::raw_svector_ostream OS(Buf);
+ OS << "Object has a dynamic type '";
+ QualType::print(DynamicType.getTypePtr(), Qualifiers(), OS, C.getLangOpts(),
+ llvm::Twine());
+ OS << "' which is incompatible with static type '";
+ QualType::print(StaticType.getTypePtr(), Qualifiers(), OS, C.getLangOpts(),
+ llvm::Twine());
+ OS << "'";
+ std::unique_ptr<BugReport> R(
+ new BugReport(*BT, OS.str(), C.generateNonFatalErrorNode()));
+ R->markInteresting(Reg);
+ R->addVisitor(llvm::make_unique<DynamicTypeBugVisitor>(Reg));
+ R->addRange(ReportedNode->getSourceRange());
+ C.emitReport(std::move(R));
+}
+
+PathDiagnosticPiece *DynamicTypeChecker::DynamicTypeBugVisitor::VisitNode(
+ const ExplodedNode *N, const ExplodedNode *PrevN, BugReporterContext &BRC,
+ BugReport &BR) {
+ ProgramStateRef State = N->getState();
+ ProgramStateRef StatePrev = PrevN->getState();
+
+ DynamicTypeInfo TrackedType = getDynamicTypeInfo(State, Reg);
+ DynamicTypeInfo TrackedTypePrev = getDynamicTypeInfo(StatePrev, Reg);
+ if (!TrackedType.isValid())
+ return nullptr;
+
+ if (TrackedTypePrev.isValid() &&
+ TrackedTypePrev.getType() == TrackedType.getType())
+ return nullptr;
+
+ // Retrieve the associated statement.
+ const Stmt *S = nullptr;
+ ProgramPoint ProgLoc = N->getLocation();
+ if (Optional<StmtPoint> SP = ProgLoc.getAs<StmtPoint>()) {
+ S = SP->getStmt();
+ }
+
+ if (!S)
+ return nullptr;
+
+ const LangOptions &LangOpts = BRC.getASTContext().getLangOpts();
+
+ SmallString<256> Buf;
+ llvm::raw_svector_ostream OS(Buf);
+ OS << "Type '";
+ QualType::print(TrackedType.getType().getTypePtr(), Qualifiers(), OS,
+ LangOpts, llvm::Twine());
+ OS << "' is inferred from ";
+
+ if (const auto *ExplicitCast = dyn_cast<ExplicitCastExpr>(S)) {
+ OS << "explicit cast (from '";
+ QualType::print(ExplicitCast->getSubExpr()->getType().getTypePtr(),
+ Qualifiers(), OS, LangOpts, llvm::Twine());
+ OS << "' to '";
+ QualType::print(ExplicitCast->getType().getTypePtr(), Qualifiers(), OS,
+ LangOpts, llvm::Twine());
+ OS << "')";
+ } else if (const auto *ImplicitCast = dyn_cast<ImplicitCastExpr>(S)) {
+ OS << "implicit cast (from '";
+ QualType::print(ImplicitCast->getSubExpr()->getType().getTypePtr(),
+ Qualifiers(), OS, LangOpts, llvm::Twine());
+ OS << "' to '";
+ QualType::print(ImplicitCast->getType().getTypePtr(), Qualifiers(), OS,
+ LangOpts, llvm::Twine());
+ OS << "')";
+ } else {
+ OS << "this context";
+ }
+
+ // Generate the extra diagnostic.
+ PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
+ N->getLocationContext());
+ return new PathDiagnosticEventPiece(Pos, OS.str(), true, nullptr);
+}
+
+static bool hasDefinition(const ObjCObjectPointerType *ObjPtr) {
+ const ObjCInterfaceDecl *Decl = ObjPtr->getInterfaceDecl();
+ if (!Decl)
+ return false;
+
+ return Decl->getDefinition();
+}
+
+// TODO: consider checking explicit casts?
+void DynamicTypeChecker::checkPostStmt(const ImplicitCastExpr *CE,
+ CheckerContext &C) const {
+ // TODO: C++ support.
+ if (CE->getCastKind() != CK_BitCast)
+ return;
+
+ const MemRegion *Region = C.getSVal(CE).getAsRegion();
+ if (!Region)
+ return;
+
+ ProgramStateRef State = C.getState();
+ DynamicTypeInfo DynTypeInfo = getDynamicTypeInfo(State, Region);
+
+ if (!DynTypeInfo.isValid())
+ return;
+
+ QualType DynType = DynTypeInfo.getType();
+ QualType StaticType = CE->getType();
+
+ const auto *DynObjCType = DynType->getAs<ObjCObjectPointerType>();
+ const auto *StaticObjCType = StaticType->getAs<ObjCObjectPointerType>();
+
+ if (!DynObjCType || !StaticObjCType)
+ return;
+
+ if (!hasDefinition(DynObjCType) || !hasDefinition(StaticObjCType))
+ return;
+
+ ASTContext &ASTCtxt = C.getASTContext();
+
+ // Strip kindeofness to correctly detect subtyping relationships.
+ DynObjCType = DynObjCType->stripObjCKindOfTypeAndQuals(ASTCtxt);
+ StaticObjCType = StaticObjCType->stripObjCKindOfTypeAndQuals(ASTCtxt);
+
+ // Specialized objects are handled by the generics checker.
+ if (StaticObjCType->isSpecialized())
+ return;
+
+ if (ASTCtxt.canAssignObjCInterfaces(StaticObjCType, DynObjCType))
+ return;
+
+ if (DynTypeInfo.canBeASubClass() &&
+ ASTCtxt.canAssignObjCInterfaces(DynObjCType, StaticObjCType))
+ return;
+
+ reportTypeError(DynType, StaticType, Region, CE, C);
+}
+
+void ento::registerDynamicTypeChecker(CheckerManager &mgr) {
+ mgr.registerChecker<DynamicTypeChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
new file mode 100644
index 0000000..30f6298
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
@@ -0,0 +1,940 @@
+//== DynamicTypePropagation.cpp -------------------------------- -*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains two checkers. One helps the static analyzer core to track
+// types, the other does type inference on Obj-C generics and report type
+// errors.
+//
+// Dynamic Type Propagation:
+// This checker defines the rules for dynamic type gathering and propagation.
+//
+// Generics Checker for Objective-C:
+// This checker tries to find type errors that the compiler is not able to catch
+// due to the implicit conversions that were introduced for backward
+// compatibility.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeMap.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+
+using namespace clang;
+using namespace ento;
+
+// ProgramState trait - The type inflation is tracked by DynamicTypeMap. This is
+// an auxiliary map that tracks more information about generic types, because in
+// some cases the most derived type is not the most informative one about the
+// type parameters. This types that are stored for each symbol in this map must
+// be specialized.
+// TODO: In some case the type stored in this map is exactly the same that is
+// stored in DynamicTypeMap. We should no store duplicated information in those
+// cases.
+REGISTER_MAP_WITH_PROGRAMSTATE(MostSpecializedTypeArgsMap, SymbolRef,
+ const ObjCObjectPointerType *)
+
+namespace {
+class DynamicTypePropagation:
+ public Checker< check::PreCall,
+ check::PostCall,
+ check::DeadSymbols,
+ check::PostStmt<CastExpr>,
+ check::PostStmt<CXXNewExpr>,
+ check::PreObjCMessage,
+ check::PostObjCMessage > {
+ const ObjCObjectType *getObjectTypeForAllocAndNew(const ObjCMessageExpr *MsgE,
+ CheckerContext &C) const;
+
+ /// \brief Return a better dynamic type if one can be derived from the cast.
+ const ObjCObjectPointerType *getBetterObjCType(const Expr *CastE,
+ CheckerContext &C) const;
+
+ ExplodedNode *dynamicTypePropagationOnCasts(const CastExpr *CE,
+ ProgramStateRef &State,
+ CheckerContext &C) const;
+
+ mutable std::unique_ptr<BugType> ObjCGenericsBugType;
+ void initBugType() const {
+ if (!ObjCGenericsBugType)
+ ObjCGenericsBugType.reset(
+ new BugType(this, "Generics", categories::CoreFoundationObjectiveC));
+ }
+
+ class GenericsBugVisitor : public BugReporterVisitorImpl<GenericsBugVisitor> {
+ public:
+ GenericsBugVisitor(SymbolRef S) : Sym(S) {}
+
+ void Profile(llvm::FoldingSetNodeID &ID) const override {
+ static int X = 0;
+ ID.AddPointer(&X);
+ ID.AddPointer(Sym);
+ }
+
+ PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) override;
+
+ private:
+ // The tracked symbol.
+ SymbolRef Sym;
+ };
+
+ void reportGenericsBug(const ObjCObjectPointerType *From,
+ const ObjCObjectPointerType *To, ExplodedNode *N,
+ SymbolRef Sym, CheckerContext &C,
+ const Stmt *ReportedNode = nullptr) const;
+public:
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkPostStmt(const CastExpr *CastE, CheckerContext &C) const;
+ void checkPostStmt(const CXXNewExpr *NewE, CheckerContext &C) const;
+ void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
+ void checkPreObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const;
+ void checkPostObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const;
+
+ /// This value is set to true, when the Generics checker is turned on.
+ DefaultBool CheckGenerics;
+};
+}
+
+void DynamicTypePropagation::checkDeadSymbols(SymbolReaper &SR,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ DynamicTypeMapImpl TypeMap = State->get<DynamicTypeMap>();
+ for (DynamicTypeMapImpl::iterator I = TypeMap.begin(), E = TypeMap.end();
+ I != E; ++I) {
+ if (!SR.isLiveRegion(I->first)) {
+ State = State->remove<DynamicTypeMap>(I->first);
+ }
+ }
+
+ if (!SR.hasDeadSymbols()) {
+ C.addTransition(State);
+ return;
+ }
+
+ MostSpecializedTypeArgsMapTy TyArgMap =
+ State->get<MostSpecializedTypeArgsMap>();
+ for (MostSpecializedTypeArgsMapTy::iterator I = TyArgMap.begin(),
+ E = TyArgMap.end();
+ I != E; ++I) {
+ if (SR.isDead(I->first)) {
+ State = State->remove<MostSpecializedTypeArgsMap>(I->first);
+ }
+ }
+
+ C.addTransition(State);
+}
+
+static void recordFixedType(const MemRegion *Region, const CXXMethodDecl *MD,
+ CheckerContext &C) {
+ assert(Region);
+ assert(MD);
+
+ ASTContext &Ctx = C.getASTContext();
+ QualType Ty = Ctx.getPointerType(Ctx.getRecordType(MD->getParent()));
+
+ ProgramStateRef State = C.getState();
+ State = setDynamicTypeInfo(State, Region, Ty, /*CanBeSubclass=*/false);
+ C.addTransition(State);
+ return;
+}
+
+void DynamicTypePropagation::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ if (const CXXConstructorCall *Ctor = dyn_cast<CXXConstructorCall>(&Call)) {
+ // C++11 [class.cdtor]p4: When a virtual function is called directly or
+ // indirectly from a constructor or from a destructor, including during
+ // the construction or destruction of the class's non-static data members,
+ // and the object to which the call applies is the object under
+ // construction or destruction, the function called is the final overrider
+ // in the constructor's or destructor's class and not one overriding it in
+ // a more-derived class.
+
+ switch (Ctor->getOriginExpr()->getConstructionKind()) {
+ case CXXConstructExpr::CK_Complete:
+ case CXXConstructExpr::CK_Delegating:
+ // No additional type info necessary.
+ return;
+ case CXXConstructExpr::CK_NonVirtualBase:
+ case CXXConstructExpr::CK_VirtualBase:
+ if (const MemRegion *Target = Ctor->getCXXThisVal().getAsRegion())
+ recordFixedType(Target, Ctor->getDecl(), C);
+ return;
+ }
+
+ return;
+ }
+
+ if (const CXXDestructorCall *Dtor = dyn_cast<CXXDestructorCall>(&Call)) {
+ // C++11 [class.cdtor]p4 (see above)
+ if (!Dtor->isBaseDestructor())
+ return;
+
+ const MemRegion *Target = Dtor->getCXXThisVal().getAsRegion();
+ if (!Target)
+ return;
+
+ const Decl *D = Dtor->getDecl();
+ if (!D)
+ return;
+
+ recordFixedType(Target, cast<CXXDestructorDecl>(D), C);
+ return;
+ }
+}
+
+void DynamicTypePropagation::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ // We can obtain perfect type info for return values from some calls.
+ if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) {
+
+ // Get the returned value if it's a region.
+ const MemRegion *RetReg = Call.getReturnValue().getAsRegion();
+ if (!RetReg)
+ return;
+
+ ProgramStateRef State = C.getState();
+ const ObjCMethodDecl *D = Msg->getDecl();
+
+ if (D && D->hasRelatedResultType()) {
+ switch (Msg->getMethodFamily()) {
+ default:
+ break;
+
+ // We assume that the type of the object returned by alloc and new are the
+ // pointer to the object of the class specified in the receiver of the
+ // message.
+ case OMF_alloc:
+ case OMF_new: {
+ // Get the type of object that will get created.
+ const ObjCMessageExpr *MsgE = Msg->getOriginExpr();
+ const ObjCObjectType *ObjTy = getObjectTypeForAllocAndNew(MsgE, C);
+ if (!ObjTy)
+ return;
+ QualType DynResTy =
+ C.getASTContext().getObjCObjectPointerType(QualType(ObjTy, 0));
+ C.addTransition(setDynamicTypeInfo(State, RetReg, DynResTy, false));
+ break;
+ }
+ case OMF_init: {
+ // Assume, the result of the init method has the same dynamic type as
+ // the receiver and propagate the dynamic type info.
+ const MemRegion *RecReg = Msg->getReceiverSVal().getAsRegion();
+ if (!RecReg)
+ return;
+ DynamicTypeInfo RecDynType = getDynamicTypeInfo(State, RecReg);
+ C.addTransition(setDynamicTypeInfo(State, RetReg, RecDynType));
+ break;
+ }
+ }
+ }
+ return;
+ }
+
+ if (const CXXConstructorCall *Ctor = dyn_cast<CXXConstructorCall>(&Call)) {
+ // We may need to undo the effects of our pre-call check.
+ switch (Ctor->getOriginExpr()->getConstructionKind()) {
+ case CXXConstructExpr::CK_Complete:
+ case CXXConstructExpr::CK_Delegating:
+ // No additional work necessary.
+ // Note: This will leave behind the actual type of the object for
+ // complete constructors, but arguably that's a good thing, since it
+ // means the dynamic type info will be correct even for objects
+ // constructed with operator new.
+ return;
+ case CXXConstructExpr::CK_NonVirtualBase:
+ case CXXConstructExpr::CK_VirtualBase:
+ if (const MemRegion *Target = Ctor->getCXXThisVal().getAsRegion()) {
+ // We just finished a base constructor. Now we can use the subclass's
+ // type when resolving virtual calls.
+ const Decl *D = C.getLocationContext()->getDecl();
+ recordFixedType(Target, cast<CXXConstructorDecl>(D), C);
+ }
+ return;
+ }
+ }
+}
+
+/// TODO: Handle explicit casts.
+/// Handle C++ casts.
+///
+/// Precondition: the cast is between ObjCObjectPointers.
+ExplodedNode *DynamicTypePropagation::dynamicTypePropagationOnCasts(
+ const CastExpr *CE, ProgramStateRef &State, CheckerContext &C) const {
+ // We only track type info for regions.
+ const MemRegion *ToR = C.getSVal(CE).getAsRegion();
+ if (!ToR)
+ return C.getPredecessor();
+
+ if (isa<ExplicitCastExpr>(CE))
+ return C.getPredecessor();
+
+ if (const Type *NewTy = getBetterObjCType(CE, C)) {
+ State = setDynamicTypeInfo(State, ToR, QualType(NewTy, 0));
+ return C.addTransition(State);
+ }
+ return C.getPredecessor();
+}
+
+void DynamicTypePropagation::checkPostStmt(const CXXNewExpr *NewE,
+ CheckerContext &C) const {
+ if (NewE->isArray())
+ return;
+
+ // We only track dynamic type info for regions.
+ const MemRegion *MR = C.getSVal(NewE).getAsRegion();
+ if (!MR)
+ return;
+
+ C.addTransition(setDynamicTypeInfo(C.getState(), MR, NewE->getType(),
+ /*CanBeSubclass=*/false));
+}
+
+const ObjCObjectType *
+DynamicTypePropagation::getObjectTypeForAllocAndNew(const ObjCMessageExpr *MsgE,
+ CheckerContext &C) const {
+ if (MsgE->getReceiverKind() == ObjCMessageExpr::Class) {
+ if (const ObjCObjectType *ObjTy
+ = MsgE->getClassReceiver()->getAs<ObjCObjectType>())
+ return ObjTy;
+ }
+
+ if (MsgE->getReceiverKind() == ObjCMessageExpr::SuperClass) {
+ if (const ObjCObjectType *ObjTy
+ = MsgE->getSuperType()->getAs<ObjCObjectType>())
+ return ObjTy;
+ }
+
+ const Expr *RecE = MsgE->getInstanceReceiver();
+ if (!RecE)
+ return nullptr;
+
+ RecE= RecE->IgnoreParenImpCasts();
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(RecE)) {
+ const StackFrameContext *SFCtx = C.getStackFrame();
+ // Are we calling [self alloc]? If this is self, get the type of the
+ // enclosing ObjC class.
+ if (DRE->getDecl() == SFCtx->getSelfDecl()) {
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(SFCtx->getDecl()))
+ if (const ObjCObjectType *ObjTy =
+ dyn_cast<ObjCObjectType>(MD->getClassInterface()->getTypeForDecl()))
+ return ObjTy;
+ }
+ }
+ return nullptr;
+}
+
+// Return a better dynamic type if one can be derived from the cast.
+// Compare the current dynamic type of the region and the new type to which we
+// are casting. If the new type is lower in the inheritance hierarchy, pick it.
+const ObjCObjectPointerType *
+DynamicTypePropagation::getBetterObjCType(const Expr *CastE,
+ CheckerContext &C) const {
+ const MemRegion *ToR = C.getSVal(CastE).getAsRegion();
+ assert(ToR);
+
+ // Get the old and new types.
+ const ObjCObjectPointerType *NewTy =
+ CastE->getType()->getAs<ObjCObjectPointerType>();
+ if (!NewTy)
+ return nullptr;
+ QualType OldDTy = getDynamicTypeInfo(C.getState(), ToR).getType();
+ if (OldDTy.isNull()) {
+ return NewTy;
+ }
+ const ObjCObjectPointerType *OldTy =
+ OldDTy->getAs<ObjCObjectPointerType>();
+ if (!OldTy)
+ return nullptr;
+
+ // Id the old type is 'id', the new one is more precise.
+ if (OldTy->isObjCIdType() && !NewTy->isObjCIdType())
+ return NewTy;
+
+ // Return new if it's a subclass of old.
+ const ObjCInterfaceDecl *ToI = NewTy->getInterfaceDecl();
+ const ObjCInterfaceDecl *FromI = OldTy->getInterfaceDecl();
+ if (ToI && FromI && FromI->isSuperClassOf(ToI))
+ return NewTy;
+
+ return nullptr;
+}
+
+static const ObjCObjectPointerType *getMostInformativeDerivedClassImpl(
+ const ObjCObjectPointerType *From, const ObjCObjectPointerType *To,
+ const ObjCObjectPointerType *MostInformativeCandidate, ASTContext &C) {
+ // Checking if from and to are the same classes modulo specialization.
+ if (From->getInterfaceDecl()->getCanonicalDecl() ==
+ To->getInterfaceDecl()->getCanonicalDecl()) {
+ if (To->isSpecialized()) {
+ assert(MostInformativeCandidate->isSpecialized());
+ return MostInformativeCandidate;
+ }
+ return From;
+ }
+ const auto *SuperOfTo =
+ To->getObjectType()->getSuperClassType()->getAs<ObjCObjectType>();
+ assert(SuperOfTo);
+ QualType SuperPtrOfToQual =
+ C.getObjCObjectPointerType(QualType(SuperOfTo, 0));
+ const auto *SuperPtrOfTo = SuperPtrOfToQual->getAs<ObjCObjectPointerType>();
+ if (To->isUnspecialized())
+ return getMostInformativeDerivedClassImpl(From, SuperPtrOfTo, SuperPtrOfTo,
+ C);
+ else
+ return getMostInformativeDerivedClassImpl(From, SuperPtrOfTo,
+ MostInformativeCandidate, C);
+}
+
+/// A downcast may loose specialization information. E. g.:
+/// MutableMap<T, U> : Map
+/// The downcast to MutableMap looses the information about the types of the
+/// Map (due to the type parameters are not being forwarded to Map), and in
+/// general there is no way to recover that information from the
+/// declaration. In order to have to most information, lets find the most
+/// derived type that has all the type parameters forwarded.
+///
+/// Get the a subclass of \p From (which has a lower bound \p To) that do not
+/// loose information about type parameters. \p To has to be a subclass of
+/// \p From. From has to be specialized.
+static const ObjCObjectPointerType *
+getMostInformativeDerivedClass(const ObjCObjectPointerType *From,
+ const ObjCObjectPointerType *To, ASTContext &C) {
+ return getMostInformativeDerivedClassImpl(From, To, To, C);
+}
+
+/// Inputs:
+/// \param StaticLowerBound Static lower bound for a symbol. The dynamic lower
+/// bound might be the subclass of this type.
+/// \param StaticUpperBound A static upper bound for a symbol.
+/// \p StaticLowerBound expected to be the subclass of \p StaticUpperBound.
+/// \param Current The type that was inferred for a symbol in a previous
+/// context. Might be null when this is the first time that inference happens.
+/// Precondition:
+/// \p StaticLowerBound or \p StaticUpperBound is specialized. If \p Current
+/// is not null, it is specialized.
+/// Possible cases:
+/// (1) The \p Current is null and \p StaticLowerBound <: \p StaticUpperBound
+/// (2) \p StaticLowerBound <: \p Current <: \p StaticUpperBound
+/// (3) \p Current <: \p StaticLowerBound <: \p StaticUpperBound
+/// (4) \p StaticLowerBound <: \p StaticUpperBound <: \p Current
+/// Effect:
+/// Use getMostInformativeDerivedClass with the upper and lower bound of the
+/// set {\p StaticLowerBound, \p Current, \p StaticUpperBound}. The computed
+/// lower bound must be specialized. If the result differs from \p Current or
+/// \p Current is null, store the result.
+static bool
+storeWhenMoreInformative(ProgramStateRef &State, SymbolRef Sym,
+ const ObjCObjectPointerType *const *Current,
+ const ObjCObjectPointerType *StaticLowerBound,
+ const ObjCObjectPointerType *StaticUpperBound,
+ ASTContext &C) {
+ // Precondition
+ assert(StaticUpperBound->isSpecialized() ||
+ StaticLowerBound->isSpecialized());
+ assert(!Current || (*Current)->isSpecialized());
+
+ // Case (1)
+ if (!Current) {
+ if (StaticUpperBound->isUnspecialized()) {
+ State = State->set<MostSpecializedTypeArgsMap>(Sym, StaticLowerBound);
+ return true;
+ }
+ // Upper bound is specialized.
+ const ObjCObjectPointerType *WithMostInfo =
+ getMostInformativeDerivedClass(StaticUpperBound, StaticLowerBound, C);
+ State = State->set<MostSpecializedTypeArgsMap>(Sym, WithMostInfo);
+ return true;
+ }
+
+ // Case (3)
+ if (C.canAssignObjCInterfaces(StaticLowerBound, *Current)) {
+ return false;
+ }
+
+ // Case (4)
+ if (C.canAssignObjCInterfaces(*Current, StaticUpperBound)) {
+ // The type arguments might not be forwarded at any point of inheritance.
+ const ObjCObjectPointerType *WithMostInfo =
+ getMostInformativeDerivedClass(*Current, StaticUpperBound, C);
+ WithMostInfo =
+ getMostInformativeDerivedClass(WithMostInfo, StaticLowerBound, C);
+ if (WithMostInfo == *Current)
+ return false;
+ State = State->set<MostSpecializedTypeArgsMap>(Sym, WithMostInfo);
+ return true;
+ }
+
+ // Case (2)
+ const ObjCObjectPointerType *WithMostInfo =
+ getMostInformativeDerivedClass(*Current, StaticLowerBound, C);
+ if (WithMostInfo != *Current) {
+ State = State->set<MostSpecializedTypeArgsMap>(Sym, WithMostInfo);
+ return true;
+ }
+
+ return false;
+}
+
+/// Type inference based on static type information that is available for the
+/// cast and the tracked type information for the given symbol. When the tracked
+/// symbol and the destination type of the cast are unrelated, report an error.
+void DynamicTypePropagation::checkPostStmt(const CastExpr *CE,
+ CheckerContext &C) const {
+ if (CE->getCastKind() != CK_BitCast)
+ return;
+
+ QualType OriginType = CE->getSubExpr()->getType();
+ QualType DestType = CE->getType();
+
+ const auto *OrigObjectPtrType = OriginType->getAs<ObjCObjectPointerType>();
+ const auto *DestObjectPtrType = DestType->getAs<ObjCObjectPointerType>();
+
+ if (!OrigObjectPtrType || !DestObjectPtrType)
+ return;
+
+ ProgramStateRef State = C.getState();
+ ExplodedNode *AfterTypeProp = dynamicTypePropagationOnCasts(CE, State, C);
+
+ ASTContext &ASTCtxt = C.getASTContext();
+
+ // This checker detects the subtyping relationships using the assignment
+ // rules. In order to be able to do this the kindofness must be stripped
+ // first. The checker treats every type as kindof type anyways: when the
+ // tracked type is the subtype of the static type it tries to look up the
+ // methods in the tracked type first.
+ OrigObjectPtrType = OrigObjectPtrType->stripObjCKindOfTypeAndQuals(ASTCtxt);
+ DestObjectPtrType = DestObjectPtrType->stripObjCKindOfTypeAndQuals(ASTCtxt);
+
+ // TODO: erase tracked information when there is a cast to unrelated type
+ // and everything is unspecialized statically.
+ if (OrigObjectPtrType->isUnspecialized() &&
+ DestObjectPtrType->isUnspecialized())
+ return;
+
+ SymbolRef Sym = State->getSVal(CE, C.getLocationContext()).getAsSymbol();
+ if (!Sym)
+ return;
+
+ // Check which assignments are legal.
+ bool OrigToDest =
+ ASTCtxt.canAssignObjCInterfaces(DestObjectPtrType, OrigObjectPtrType);
+ bool DestToOrig =
+ ASTCtxt.canAssignObjCInterfaces(OrigObjectPtrType, DestObjectPtrType);
+ const ObjCObjectPointerType *const *TrackedType =
+ State->get<MostSpecializedTypeArgsMap>(Sym);
+
+ // Downcasts and upcasts handled in an uniform way regardless of being
+ // explicit. Explicit casts however can happen between mismatched types.
+ if (isa<ExplicitCastExpr>(CE) && !OrigToDest && !DestToOrig) {
+ // Mismatched types. If the DestType specialized, store it. Forget the
+ // tracked type otherwise.
+ if (DestObjectPtrType->isSpecialized()) {
+ State = State->set<MostSpecializedTypeArgsMap>(Sym, DestObjectPtrType);
+ C.addTransition(State, AfterTypeProp);
+ } else if (TrackedType) {
+ State = State->remove<MostSpecializedTypeArgsMap>(Sym);
+ C.addTransition(State, AfterTypeProp);
+ }
+ return;
+ }
+
+ // The tracked type should be the sub or super class of the static destination
+ // type. When an (implicit) upcast or a downcast happens according to static
+ // types, and there is no subtyping relationship between the tracked and the
+ // static destination types, it indicates an error.
+ if (TrackedType &&
+ !ASTCtxt.canAssignObjCInterfaces(DestObjectPtrType, *TrackedType) &&
+ !ASTCtxt.canAssignObjCInterfaces(*TrackedType, DestObjectPtrType)) {
+ static CheckerProgramPointTag IllegalConv(this, "IllegalConversion");
+ ExplodedNode *N = C.addTransition(State, AfterTypeProp, &IllegalConv);
+ reportGenericsBug(*TrackedType, DestObjectPtrType, N, Sym, C);
+ return;
+ }
+
+ // Handle downcasts and upcasts.
+
+ const ObjCObjectPointerType *LowerBound = DestObjectPtrType;
+ const ObjCObjectPointerType *UpperBound = OrigObjectPtrType;
+ if (OrigToDest && !DestToOrig)
+ std::swap(LowerBound, UpperBound);
+
+ // The id type is not a real bound. Eliminate it.
+ LowerBound = LowerBound->isObjCIdType() ? UpperBound : LowerBound;
+ UpperBound = UpperBound->isObjCIdType() ? LowerBound : UpperBound;
+
+ if (storeWhenMoreInformative(State, Sym, TrackedType, LowerBound, UpperBound,
+ ASTCtxt)) {
+ C.addTransition(State, AfterTypeProp);
+ }
+}
+
+static const Expr *stripCastsAndSugar(const Expr *E) {
+ E = E->IgnoreParenImpCasts();
+ if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E))
+ E = POE->getSyntacticForm()->IgnoreParenImpCasts();
+ if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E))
+ E = OVE->getSourceExpr()->IgnoreParenImpCasts();
+ return E;
+}
+
+static bool isObjCTypeParamDependent(QualType Type) {
+ // It is illegal to typedef parameterized types inside an interface. Therfore
+ // an Objective-C type can only be dependent on a type parameter when the type
+ // parameter structurally present in the type itself.
+ class IsObjCTypeParamDependentTypeVisitor
+ : public RecursiveASTVisitor<IsObjCTypeParamDependentTypeVisitor> {
+ public:
+ IsObjCTypeParamDependentTypeVisitor() : Result(false) {}
+ bool VisitTypedefType(const TypedefType *Type) {
+ if (isa<ObjCTypeParamDecl>(Type->getDecl())) {
+ Result = true;
+ return false;
+ }
+ return true;
+ }
+
+ bool Result;
+ };
+
+ IsObjCTypeParamDependentTypeVisitor Visitor;
+ Visitor.TraverseType(Type);
+ return Visitor.Result;
+}
+
+/// A method might not be available in the interface indicated by the static
+/// type. However it might be available in the tracked type. In order to
+/// properly substitute the type parameters we need the declaration context of
+/// the method. The more specialized the enclosing class of the method is, the
+/// more likely that the parameter substitution will be successful.
+static const ObjCMethodDecl *
+findMethodDecl(const ObjCMessageExpr *MessageExpr,
+ const ObjCObjectPointerType *TrackedType, ASTContext &ASTCtxt) {
+ const ObjCMethodDecl *Method = nullptr;
+
+ QualType ReceiverType = MessageExpr->getReceiverType();
+ const auto *ReceiverObjectPtrType =
+ ReceiverType->getAs<ObjCObjectPointerType>();
+
+ // Do this "devirtualization" on instance and class methods only. Trust the
+ // static type on super and super class calls.
+ if (MessageExpr->getReceiverKind() == ObjCMessageExpr::Instance ||
+ MessageExpr->getReceiverKind() == ObjCMessageExpr::Class) {
+ // When the receiver type is id, Class, or some super class of the tracked
+ // type, look up the method in the tracked type, not in the receiver type.
+ // This way we preserve more information.
+ if (ReceiverType->isObjCIdType() || ReceiverType->isObjCClassType() ||
+ ASTCtxt.canAssignObjCInterfaces(ReceiverObjectPtrType, TrackedType)) {
+ const ObjCInterfaceDecl *InterfaceDecl = TrackedType->getInterfaceDecl();
+ // The method might not be found.
+ Selector Sel = MessageExpr->getSelector();
+ Method = InterfaceDecl->lookupInstanceMethod(Sel);
+ if (!Method)
+ Method = InterfaceDecl->lookupClassMethod(Sel);
+ }
+ }
+
+ // Fallback to statick method lookup when the one based on the tracked type
+ // failed.
+ return Method ? Method : MessageExpr->getMethodDecl();
+}
+
+/// Get the returned ObjCObjectPointerType by a method based on the tracked type
+/// information, or null pointer when the returned type is not an
+/// ObjCObjectPointerType.
+static QualType getReturnTypeForMethod(
+ const ObjCMethodDecl *Method, ArrayRef<QualType> TypeArgs,
+ const ObjCObjectPointerType *SelfType, ASTContext &C) {
+ QualType StaticResultType = Method->getReturnType();
+
+ // Is the return type declared as instance type?
+ if (StaticResultType == C.getObjCInstanceType())
+ return QualType(SelfType, 0);
+
+ // Check whether the result type depends on a type parameter.
+ if (!isObjCTypeParamDependent(StaticResultType))
+ return QualType();
+
+ QualType ResultType = StaticResultType.substObjCTypeArgs(
+ C, TypeArgs, ObjCSubstitutionContext::Result);
+
+ return ResultType;
+}
+
+/// When the receiver has a tracked type, use that type to validate the
+/// argumments of the message expression and the return value.
+void DynamicTypePropagation::checkPreObjCMessage(const ObjCMethodCall &M,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SymbolRef Sym = M.getReceiverSVal().getAsSymbol();
+ if (!Sym)
+ return;
+
+ const ObjCObjectPointerType *const *TrackedType =
+ State->get<MostSpecializedTypeArgsMap>(Sym);
+ if (!TrackedType)
+ return;
+
+ // Get the type arguments from tracked type and substitute type arguments
+ // before do the semantic check.
+
+ ASTContext &ASTCtxt = C.getASTContext();
+ const ObjCMessageExpr *MessageExpr = M.getOriginExpr();
+ const ObjCMethodDecl *Method =
+ findMethodDecl(MessageExpr, *TrackedType, ASTCtxt);
+
+ // It is possible to call non-existent methods in Obj-C.
+ if (!Method)
+ return;
+
+ Optional<ArrayRef<QualType>> TypeArgs =
+ (*TrackedType)->getObjCSubstitutions(Method->getDeclContext());
+ // This case might happen when there is an unspecialized override of a
+ // specialized method.
+ if (!TypeArgs)
+ return;
+
+ for (unsigned i = 0; i < Method->param_size(); i++) {
+ const Expr *Arg = MessageExpr->getArg(i);
+ const ParmVarDecl *Param = Method->parameters()[i];
+
+ QualType OrigParamType = Param->getType();
+ if (!isObjCTypeParamDependent(OrigParamType))
+ continue;
+
+ QualType ParamType = OrigParamType.substObjCTypeArgs(
+ ASTCtxt, *TypeArgs, ObjCSubstitutionContext::Parameter);
+ // Check if it can be assigned
+ const auto *ParamObjectPtrType = ParamType->getAs<ObjCObjectPointerType>();
+ const auto *ArgObjectPtrType =
+ stripCastsAndSugar(Arg)->getType()->getAs<ObjCObjectPointerType>();
+ if (!ParamObjectPtrType || !ArgObjectPtrType)
+ continue;
+
+ // Check if we have more concrete tracked type that is not a super type of
+ // the static argument type.
+ SVal ArgSVal = M.getArgSVal(i);
+ SymbolRef ArgSym = ArgSVal.getAsSymbol();
+ if (ArgSym) {
+ const ObjCObjectPointerType *const *TrackedArgType =
+ State->get<MostSpecializedTypeArgsMap>(ArgSym);
+ if (TrackedArgType &&
+ ASTCtxt.canAssignObjCInterfaces(ArgObjectPtrType, *TrackedArgType)) {
+ ArgObjectPtrType = *TrackedArgType;
+ }
+ }
+
+ // Warn when argument is incompatible with the parameter.
+ if (!ASTCtxt.canAssignObjCInterfaces(ParamObjectPtrType,
+ ArgObjectPtrType)) {
+ static CheckerProgramPointTag Tag(this, "ArgTypeMismatch");
+ ExplodedNode *N = C.addTransition(State, &Tag);
+ reportGenericsBug(ArgObjectPtrType, ParamObjectPtrType, N, Sym, C, Arg);
+ return;
+ }
+ }
+}
+
+/// This callback is used to infer the types for Class variables. This info is
+/// used later to validate messages that sent to classes. Class variables are
+/// initialized with by invoking the 'class' method on a class.
+/// This method is also used to infer the type information for the return
+/// types.
+// TODO: right now it only tracks generic types. Extend this to track every
+// type in the DynamicTypeMap and diagnose type errors!
+void DynamicTypePropagation::checkPostObjCMessage(const ObjCMethodCall &M,
+ CheckerContext &C) const {
+ const ObjCMessageExpr *MessageExpr = M.getOriginExpr();
+
+ SymbolRef RetSym = M.getReturnValue().getAsSymbol();
+ if (!RetSym)
+ return;
+
+ Selector Sel = MessageExpr->getSelector();
+ ProgramStateRef State = C.getState();
+ // Inference for class variables.
+ // We are only interested in cases where the class method is invoked on a
+ // class. This method is provided by the runtime and available on all classes.
+ if (MessageExpr->getReceiverKind() == ObjCMessageExpr::Class &&
+ Sel.getAsString() == "class") {
+
+ QualType ReceiverType = MessageExpr->getClassReceiver();
+ const auto *ReceiverClassType = ReceiverType->getAs<ObjCObjectType>();
+ QualType ReceiverClassPointerType =
+ C.getASTContext().getObjCObjectPointerType(
+ QualType(ReceiverClassType, 0));
+
+ if (!ReceiverClassType->isSpecialized())
+ return;
+ const auto *InferredType =
+ ReceiverClassPointerType->getAs<ObjCObjectPointerType>();
+ assert(InferredType);
+
+ State = State->set<MostSpecializedTypeArgsMap>(RetSym, InferredType);
+ C.addTransition(State);
+ return;
+ }
+
+ // Tracking for return types.
+ SymbolRef RecSym = M.getReceiverSVal().getAsSymbol();
+ if (!RecSym)
+ return;
+
+ const ObjCObjectPointerType *const *TrackedType =
+ State->get<MostSpecializedTypeArgsMap>(RecSym);
+ if (!TrackedType)
+ return;
+
+ ASTContext &ASTCtxt = C.getASTContext();
+ const ObjCMethodDecl *Method =
+ findMethodDecl(MessageExpr, *TrackedType, ASTCtxt);
+ if (!Method)
+ return;
+
+ Optional<ArrayRef<QualType>> TypeArgs =
+ (*TrackedType)->getObjCSubstitutions(Method->getDeclContext());
+ if (!TypeArgs)
+ return;
+
+ QualType ResultType =
+ getReturnTypeForMethod(Method, *TypeArgs, *TrackedType, ASTCtxt);
+ // The static type is the same as the deduced type.
+ if (ResultType.isNull())
+ return;
+
+ const MemRegion *RetRegion = M.getReturnValue().getAsRegion();
+ ExplodedNode *Pred = C.getPredecessor();
+ // When there is an entry available for the return symbol in DynamicTypeMap,
+ // the call was inlined, and the information in the DynamicTypeMap is should
+ // be precise.
+ if (RetRegion && !State->get<DynamicTypeMap>(RetRegion)) {
+ // TODO: we have duplicated information in DynamicTypeMap and
+ // MostSpecializedTypeArgsMap. We should only store anything in the later if
+ // the stored data differs from the one stored in the former.
+ State = setDynamicTypeInfo(State, RetRegion, ResultType,
+ /*CanBeSubclass=*/true);
+ Pred = C.addTransition(State);
+ }
+
+ const auto *ResultPtrType = ResultType->getAs<ObjCObjectPointerType>();
+
+ if (!ResultPtrType || ResultPtrType->isUnspecialized())
+ return;
+
+ // When the result is a specialized type and it is not tracked yet, track it
+ // for the result symbol.
+ if (!State->get<MostSpecializedTypeArgsMap>(RetSym)) {
+ State = State->set<MostSpecializedTypeArgsMap>(RetSym, ResultPtrType);
+ C.addTransition(State, Pred);
+ }
+}
+
+void DynamicTypePropagation::reportGenericsBug(
+ const ObjCObjectPointerType *From, const ObjCObjectPointerType *To,
+ ExplodedNode *N, SymbolRef Sym, CheckerContext &C,
+ const Stmt *ReportedNode) const {
+ if (!CheckGenerics)
+ return;
+
+ initBugType();
+ SmallString<192> Buf;
+ llvm::raw_svector_ostream OS(Buf);
+ OS << "Conversion from value of type '";
+ QualType::print(From, Qualifiers(), OS, C.getLangOpts(), llvm::Twine());
+ OS << "' to incompatible type '";
+ QualType::print(To, Qualifiers(), OS, C.getLangOpts(), llvm::Twine());
+ OS << "'";
+ std::unique_ptr<BugReport> R(
+ new BugReport(*ObjCGenericsBugType, OS.str(), N));
+ R->markInteresting(Sym);
+ R->addVisitor(llvm::make_unique<GenericsBugVisitor>(Sym));
+ if (ReportedNode)
+ R->addRange(ReportedNode->getSourceRange());
+ C.emitReport(std::move(R));
+}
+
+PathDiagnosticPiece *DynamicTypePropagation::GenericsBugVisitor::VisitNode(
+ const ExplodedNode *N, const ExplodedNode *PrevN, BugReporterContext &BRC,
+ BugReport &BR) {
+ ProgramStateRef state = N->getState();
+ ProgramStateRef statePrev = PrevN->getState();
+
+ const ObjCObjectPointerType *const *TrackedType =
+ state->get<MostSpecializedTypeArgsMap>(Sym);
+ const ObjCObjectPointerType *const *TrackedTypePrev =
+ statePrev->get<MostSpecializedTypeArgsMap>(Sym);
+ if (!TrackedType)
+ return nullptr;
+
+ if (TrackedTypePrev && *TrackedTypePrev == *TrackedType)
+ return nullptr;
+
+ // Retrieve the associated statement.
+ const Stmt *S = nullptr;
+ ProgramPoint ProgLoc = N->getLocation();
+ if (Optional<StmtPoint> SP = ProgLoc.getAs<StmtPoint>()) {
+ S = SP->getStmt();
+ }
+
+ if (!S)
+ return nullptr;
+
+ const LangOptions &LangOpts = BRC.getASTContext().getLangOpts();
+
+ SmallString<256> Buf;
+ llvm::raw_svector_ostream OS(Buf);
+ OS << "Type '";
+ QualType::print(*TrackedType, Qualifiers(), OS, LangOpts, llvm::Twine());
+ OS << "' is inferred from ";
+
+ if (const auto *ExplicitCast = dyn_cast<ExplicitCastExpr>(S)) {
+ OS << "explicit cast (from '";
+ QualType::print(ExplicitCast->getSubExpr()->getType().getTypePtr(),
+ Qualifiers(), OS, LangOpts, llvm::Twine());
+ OS << "' to '";
+ QualType::print(ExplicitCast->getType().getTypePtr(), Qualifiers(), OS,
+ LangOpts, llvm::Twine());
+ OS << "')";
+ } else if (const auto *ImplicitCast = dyn_cast<ImplicitCastExpr>(S)) {
+ OS << "implicit cast (from '";
+ QualType::print(ImplicitCast->getSubExpr()->getType().getTypePtr(),
+ Qualifiers(), OS, LangOpts, llvm::Twine());
+ OS << "' to '";
+ QualType::print(ImplicitCast->getType().getTypePtr(), Qualifiers(), OS,
+ LangOpts, llvm::Twine());
+ OS << "')";
+ } else {
+ OS << "this context";
+ }
+
+ // Generate the extra diagnostic.
+ PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
+ N->getLocationContext());
+ return new PathDiagnosticEventPiece(Pos, OS.str(), true, nullptr);
+}
+
+/// Register checkers.
+void ento::registerObjCGenericsChecker(CheckerManager &mgr) {
+ DynamicTypePropagation *checker =
+ mgr.registerChecker<DynamicTypePropagation>();
+ checker->CheckGenerics = true;
+}
+
+void ento::registerDynamicTypePropagation(CheckerManager &mgr) {
+ mgr.registerChecker<DynamicTypePropagation>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
new file mode 100644
index 0000000..8f6c20a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
@@ -0,0 +1,190 @@
+//==- ExprInspectionChecker.cpp - Used for regression tests ------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/ADT/StringSwitch.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class ExprInspectionChecker : public Checker<eval::Call, check::DeadSymbols> {
+ mutable std::unique_ptr<BugType> BT;
+
+ void analyzerEval(const CallExpr *CE, CheckerContext &C) const;
+ void analyzerCheckInlined(const CallExpr *CE, CheckerContext &C) const;
+ void analyzerWarnIfReached(const CallExpr *CE, CheckerContext &C) const;
+ void analyzerCrash(const CallExpr *CE, CheckerContext &C) const;
+ void analyzerWarnOnDeadSymbol(const CallExpr *CE, CheckerContext &C) const;
+
+ typedef void (ExprInspectionChecker::*FnCheck)(const CallExpr *,
+ CheckerContext &C) const;
+
+public:
+ bool evalCall(const CallExpr *CE, CheckerContext &C) const;
+ void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
+};
+}
+
+REGISTER_SET_WITH_PROGRAMSTATE(MarkedSymbols, const void *)
+
+bool ExprInspectionChecker::evalCall(const CallExpr *CE,
+ CheckerContext &C) const {
+ // These checks should have no effect on the surrounding environment
+ // (globals should not be invalidated, etc), hence the use of evalCall.
+ FnCheck Handler = llvm::StringSwitch<FnCheck>(C.getCalleeName(CE))
+ .Case("clang_analyzer_eval", &ExprInspectionChecker::analyzerEval)
+ .Case("clang_analyzer_checkInlined",
+ &ExprInspectionChecker::analyzerCheckInlined)
+ .Case("clang_analyzer_crash", &ExprInspectionChecker::analyzerCrash)
+ .Case("clang_analyzer_warnIfReached",
+ &ExprInspectionChecker::analyzerWarnIfReached)
+ .Case("clang_analyzer_warnOnDeadSymbol",
+ &ExprInspectionChecker::analyzerWarnOnDeadSymbol)
+ .Default(nullptr);
+
+ if (!Handler)
+ return false;
+
+ (this->*Handler)(CE, C);
+ return true;
+}
+
+static const char *getArgumentValueString(const CallExpr *CE,
+ CheckerContext &C) {
+ if (CE->getNumArgs() == 0)
+ return "Missing assertion argument";
+
+ ExplodedNode *N = C.getPredecessor();
+ const LocationContext *LC = N->getLocationContext();
+ ProgramStateRef State = N->getState();
+
+ const Expr *Assertion = CE->getArg(0);
+ SVal AssertionVal = State->getSVal(Assertion, LC);
+
+ if (AssertionVal.isUndef())
+ return "UNDEFINED";
+
+ ProgramStateRef StTrue, StFalse;
+ std::tie(StTrue, StFalse) =
+ State->assume(AssertionVal.castAs<DefinedOrUnknownSVal>());
+
+ if (StTrue) {
+ if (StFalse)
+ return "UNKNOWN";
+ else
+ return "TRUE";
+ } else {
+ if (StFalse)
+ return "FALSE";
+ else
+ llvm_unreachable("Invalid constraint; neither true or false.");
+ }
+}
+
+void ExprInspectionChecker::analyzerEval(const CallExpr *CE,
+ CheckerContext &C) const {
+ const LocationContext *LC = C.getPredecessor()->getLocationContext();
+
+ // A specific instantiation of an inlined function may have more constrained
+ // values than can generally be assumed. Skip the check.
+ if (LC->getCurrentStackFrame()->getParent() != nullptr)
+ return;
+
+ if (!BT)
+ BT.reset(new BugType(this, "Checking analyzer assumptions", "debug"));
+
+ ExplodedNode *N = C.generateNonFatalErrorNode();
+ if (!N)
+ return;
+ C.emitReport(
+ llvm::make_unique<BugReport>(*BT, getArgumentValueString(CE, C), N));
+}
+
+void ExprInspectionChecker::analyzerWarnIfReached(const CallExpr *CE,
+ CheckerContext &C) const {
+
+ if (!BT)
+ BT.reset(new BugType(this, "Checking analyzer assumptions", "debug"));
+
+ ExplodedNode *N = C.generateNonFatalErrorNode();
+ if (!N)
+ return;
+ C.emitReport(llvm::make_unique<BugReport>(*BT, "REACHABLE", N));
+}
+
+void ExprInspectionChecker::analyzerCheckInlined(const CallExpr *CE,
+ CheckerContext &C) const {
+ const LocationContext *LC = C.getPredecessor()->getLocationContext();
+
+ // An inlined function could conceivably also be analyzed as a top-level
+ // function. We ignore this case and only emit a message (TRUE or FALSE)
+ // when we are analyzing it as an inlined function. This means that
+ // clang_analyzer_checkInlined(true) should always print TRUE, but
+ // clang_analyzer_checkInlined(false) should never actually print anything.
+ if (LC->getCurrentStackFrame()->getParent() == nullptr)
+ return;
+
+ if (!BT)
+ BT.reset(new BugType(this, "Checking analyzer assumptions", "debug"));
+
+ ExplodedNode *N = C.generateNonFatalErrorNode();
+ if (!N)
+ return;
+ C.emitReport(
+ llvm::make_unique<BugReport>(*BT, getArgumentValueString(CE, C), N));
+}
+
+void ExprInspectionChecker::analyzerWarnOnDeadSymbol(const CallExpr *CE,
+ CheckerContext &C) const {
+ if (CE->getNumArgs() == 0)
+ return;
+ SVal Val = C.getSVal(CE->getArg(0));
+ SymbolRef Sym = Val.getAsSymbol();
+ if (!Sym)
+ return;
+
+ ProgramStateRef State = C.getState();
+ State = State->add<MarkedSymbols>(Sym);
+ C.addTransition(State);
+}
+
+void ExprInspectionChecker::checkDeadSymbols(SymbolReaper &SymReaper,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ const MarkedSymbolsTy &Syms = State->get<MarkedSymbols>();
+ for (auto I = Syms.begin(), E = Syms.end(); I != E; ++I) {
+ SymbolRef Sym = static_cast<SymbolRef>(*I);
+ if (!SymReaper.isDead(Sym))
+ continue;
+
+ if (!BT)
+ BT.reset(new BugType(this, "Checking analyzer assumptions", "debug"));
+
+ ExplodedNode *N = C.generateNonFatalErrorNode();
+ if (!N)
+ return;
+
+ C.emitReport(llvm::make_unique<BugReport>(*BT, "SYMBOL DEAD", N));
+ C.addTransition(State->remove<MarkedSymbols>(Sym), N);
+ }
+}
+
+void ExprInspectionChecker::analyzerCrash(const CallExpr *CE,
+ CheckerContext &C) const {
+ LLVM_BUILTIN_TRAP;
+}
+
+void ento::registerExprInspectionChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<ExprInspectionChecker>();
+}
+
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
new file mode 100644
index 0000000..3fe89f9
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
@@ -0,0 +1,68 @@
+//=== FixedAddressChecker.cpp - Fixed address usage checker ----*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This files defines FixedAddressChecker, a builtin checker that checks for
+// assignment of a fixed address to a pointer.
+// This check corresponds to CWE-587.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class FixedAddressChecker
+ : public Checker< check::PreStmt<BinaryOperator> > {
+ mutable std::unique_ptr<BuiltinBug> BT;
+
+public:
+ void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
+};
+}
+
+void FixedAddressChecker::checkPreStmt(const BinaryOperator *B,
+ CheckerContext &C) const {
+ // Using a fixed address is not portable because that address will probably
+ // not be valid in all environments or platforms.
+
+ if (B->getOpcode() != BO_Assign)
+ return;
+
+ QualType T = B->getType();
+ if (!T->isPointerType())
+ return;
+
+ ProgramStateRef state = C.getState();
+ SVal RV = state->getSVal(B->getRHS(), C.getLocationContext());
+
+ if (!RV.isConstant() || RV.isZeroConstant())
+ return;
+
+ if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
+ if (!BT)
+ BT.reset(
+ new BuiltinBug(this, "Use fixed address",
+ "Using a fixed address is not portable because that "
+ "address will probably not be valid in all "
+ "environments or platforms."));
+ auto R = llvm::make_unique<BugReport>(*BT, BT->getDescription(), N);
+ R->addRange(B->getRHS()->getSourceRange());
+ C.emitReport(std::move(R));
+ }
+}
+
+void ento::registerFixedAddressChecker(CheckerManager &mgr) {
+ mgr.registerChecker<FixedAddressChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
new file mode 100644
index 0000000..8c8acc6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
@@ -0,0 +1,731 @@
+//== GenericTaintChecker.cpp ----------------------------------- -*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker defines the attack surface for generic taint propagation.
+//
+// The taint information produced by it might be useful to other checkers. For
+// example, checkers should report errors which involve tainted data more
+// aggressively, even if the involved symbols are under constrained.
+//
+//===----------------------------------------------------------------------===//
+#include "ClangSACheckers.h"
+#include "clang/AST/Attr.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include <climits>
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class GenericTaintChecker : public Checker< check::PostStmt<CallExpr>,
+ check::PreStmt<CallExpr> > {
+public:
+ static void *getTag() { static int Tag; return &Tag; }
+
+ void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
+
+ void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+
+private:
+ static const unsigned InvalidArgIndex = UINT_MAX;
+ /// Denotes the return vale.
+ static const unsigned ReturnValueIndex = UINT_MAX - 1;
+
+ mutable std::unique_ptr<BugType> BT;
+ inline void initBugType() const {
+ if (!BT)
+ BT.reset(new BugType(this, "Use of Untrusted Data", "Untrusted Data"));
+ }
+
+ /// \brief Catch taint related bugs. Check if tainted data is passed to a
+ /// system call etc.
+ bool checkPre(const CallExpr *CE, CheckerContext &C) const;
+
+ /// \brief Add taint sources on a pre-visit.
+ void addSourcesPre(const CallExpr *CE, CheckerContext &C) const;
+
+ /// \brief Propagate taint generated at pre-visit.
+ bool propagateFromPre(const CallExpr *CE, CheckerContext &C) const;
+
+ /// \brief Add taint sources on a post visit.
+ void addSourcesPost(const CallExpr *CE, CheckerContext &C) const;
+
+ /// Check if the region the expression evaluates to is the standard input,
+ /// and thus, is tainted.
+ static bool isStdin(const Expr *E, CheckerContext &C);
+
+ /// \brief Given a pointer argument, get the symbol of the value it contains
+ /// (points to).
+ static SymbolRef getPointedToSymbol(CheckerContext &C, const Expr *Arg);
+
+ /// Functions defining the attack surface.
+ typedef ProgramStateRef (GenericTaintChecker::*FnCheck)(const CallExpr *,
+ CheckerContext &C) const;
+ ProgramStateRef postScanf(const CallExpr *CE, CheckerContext &C) const;
+ ProgramStateRef postSocket(const CallExpr *CE, CheckerContext &C) const;
+ ProgramStateRef postRetTaint(const CallExpr *CE, CheckerContext &C) const;
+
+ /// Taint the scanned input if the file is tainted.
+ ProgramStateRef preFscanf(const CallExpr *CE, CheckerContext &C) const;
+
+ /// Check for CWE-134: Uncontrolled Format String.
+ static const char MsgUncontrolledFormatString[];
+ bool checkUncontrolledFormatString(const CallExpr *CE,
+ CheckerContext &C) const;
+
+ /// Check for:
+ /// CERT/STR02-C. "Sanitize data passed to complex subsystems"
+ /// CWE-78, "Failure to Sanitize Data into an OS Command"
+ static const char MsgSanitizeSystemArgs[];
+ bool checkSystemCall(const CallExpr *CE, StringRef Name,
+ CheckerContext &C) const;
+
+ /// Check if tainted data is used as a buffer size ins strn.. functions,
+ /// and allocators.
+ static const char MsgTaintedBufferSize[];
+ bool checkTaintedBufferSize(const CallExpr *CE, const FunctionDecl *FDecl,
+ CheckerContext &C) const;
+
+ /// Generate a report if the expression is tainted or points to tainted data.
+ bool generateReportIfTainted(const Expr *E, const char Msg[],
+ CheckerContext &C) const;
+
+
+ typedef SmallVector<unsigned, 2> ArgVector;
+
+ /// \brief A struct used to specify taint propagation rules for a function.
+ ///
+ /// If any of the possible taint source arguments is tainted, all of the
+ /// destination arguments should also be tainted. Use InvalidArgIndex in the
+ /// src list to specify that all of the arguments can introduce taint. Use
+ /// InvalidArgIndex in the dst arguments to signify that all the non-const
+ /// pointer and reference arguments might be tainted on return. If
+ /// ReturnValueIndex is added to the dst list, the return value will be
+ /// tainted.
+ struct TaintPropagationRule {
+ /// List of arguments which can be taint sources and should be checked.
+ ArgVector SrcArgs;
+ /// List of arguments which should be tainted on function return.
+ ArgVector DstArgs;
+ // TODO: Check if using other data structures would be more optimal.
+
+ TaintPropagationRule() {}
+
+ TaintPropagationRule(unsigned SArg,
+ unsigned DArg, bool TaintRet = false) {
+ SrcArgs.push_back(SArg);
+ DstArgs.push_back(DArg);
+ if (TaintRet)
+ DstArgs.push_back(ReturnValueIndex);
+ }
+
+ TaintPropagationRule(unsigned SArg1, unsigned SArg2,
+ unsigned DArg, bool TaintRet = false) {
+ SrcArgs.push_back(SArg1);
+ SrcArgs.push_back(SArg2);
+ DstArgs.push_back(DArg);
+ if (TaintRet)
+ DstArgs.push_back(ReturnValueIndex);
+ }
+
+ /// Get the propagation rule for a given function.
+ static TaintPropagationRule
+ getTaintPropagationRule(const FunctionDecl *FDecl,
+ StringRef Name,
+ CheckerContext &C);
+
+ inline void addSrcArg(unsigned A) { SrcArgs.push_back(A); }
+ inline void addDstArg(unsigned A) { DstArgs.push_back(A); }
+
+ inline bool isNull() const { return SrcArgs.empty(); }
+
+ inline bool isDestinationArgument(unsigned ArgNum) const {
+ return (std::find(DstArgs.begin(),
+ DstArgs.end(), ArgNum) != DstArgs.end());
+ }
+
+ static inline bool isTaintedOrPointsToTainted(const Expr *E,
+ ProgramStateRef State,
+ CheckerContext &C) {
+ return (State->isTainted(E, C.getLocationContext()) || isStdin(E, C) ||
+ (E->getType().getTypePtr()->isPointerType() &&
+ State->isTainted(getPointedToSymbol(C, E))));
+ }
+
+ /// \brief Pre-process a function which propagates taint according to the
+ /// taint rule.
+ ProgramStateRef process(const CallExpr *CE, CheckerContext &C) const;
+
+ };
+};
+
+const unsigned GenericTaintChecker::ReturnValueIndex;
+const unsigned GenericTaintChecker::InvalidArgIndex;
+
+const char GenericTaintChecker::MsgUncontrolledFormatString[] =
+ "Untrusted data is used as a format string "
+ "(CWE-134: Uncontrolled Format String)";
+
+const char GenericTaintChecker::MsgSanitizeSystemArgs[] =
+ "Untrusted data is passed to a system call "
+ "(CERT/STR02-C. Sanitize data passed to complex subsystems)";
+
+const char GenericTaintChecker::MsgTaintedBufferSize[] =
+ "Untrusted data is used to specify the buffer size "
+ "(CERT/STR31-C. Guarantee that storage for strings has sufficient space for "
+ "character data and the null terminator)";
+
+} // end of anonymous namespace
+
+/// A set which is used to pass information from call pre-visit instruction
+/// to the call post-visit. The values are unsigned integers, which are either
+/// ReturnValueIndex, or indexes of the pointer/reference argument, which
+/// points to data, which should be tainted on return.
+REGISTER_SET_WITH_PROGRAMSTATE(TaintArgsOnPostVisit, unsigned)
+
+GenericTaintChecker::TaintPropagationRule
+GenericTaintChecker::TaintPropagationRule::getTaintPropagationRule(
+ const FunctionDecl *FDecl,
+ StringRef Name,
+ CheckerContext &C) {
+ // TODO: Currently, we might lose precision here: we always mark a return
+ // value as tainted even if it's just a pointer, pointing to tainted data.
+
+ // Check for exact name match for functions without builtin substitutes.
+ TaintPropagationRule Rule = llvm::StringSwitch<TaintPropagationRule>(Name)
+ .Case("atoi", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("atol", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("atoll", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("getc", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("fgetc", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("getc_unlocked", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("getw", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("toupper", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("tolower", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("strchr", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("strrchr", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("read", TaintPropagationRule(0, 2, 1, true))
+ .Case("pread", TaintPropagationRule(InvalidArgIndex, 1, true))
+ .Case("gets", TaintPropagationRule(InvalidArgIndex, 0, true))
+ .Case("fgets", TaintPropagationRule(2, 0, true))
+ .Case("getline", TaintPropagationRule(2, 0))
+ .Case("getdelim", TaintPropagationRule(3, 0))
+ .Case("fgetln", TaintPropagationRule(0, ReturnValueIndex))
+ .Default(TaintPropagationRule());
+
+ if (!Rule.isNull())
+ return Rule;
+
+ // Check if it's one of the memory setting/copying functions.
+ // This check is specialized but faster then calling isCLibraryFunction.
+ unsigned BId = 0;
+ if ( (BId = FDecl->getMemoryFunctionKind()) )
+ switch(BId) {
+ case Builtin::BImemcpy:
+ case Builtin::BImemmove:
+ case Builtin::BIstrncpy:
+ case Builtin::BIstrncat:
+ return TaintPropagationRule(1, 2, 0, true);
+ case Builtin::BIstrlcpy:
+ case Builtin::BIstrlcat:
+ return TaintPropagationRule(1, 2, 0, false);
+ case Builtin::BIstrndup:
+ return TaintPropagationRule(0, 1, ReturnValueIndex);
+
+ default:
+ break;
+ };
+
+ // Process all other functions which could be defined as builtins.
+ if (Rule.isNull()) {
+ if (C.isCLibraryFunction(FDecl, "snprintf") ||
+ C.isCLibraryFunction(FDecl, "sprintf"))
+ return TaintPropagationRule(InvalidArgIndex, 0, true);
+ else if (C.isCLibraryFunction(FDecl, "strcpy") ||
+ C.isCLibraryFunction(FDecl, "stpcpy") ||
+ C.isCLibraryFunction(FDecl, "strcat"))
+ return TaintPropagationRule(1, 0, true);
+ else if (C.isCLibraryFunction(FDecl, "bcopy"))
+ return TaintPropagationRule(0, 2, 1, false);
+ else if (C.isCLibraryFunction(FDecl, "strdup") ||
+ C.isCLibraryFunction(FDecl, "strdupa"))
+ return TaintPropagationRule(0, ReturnValueIndex);
+ else if (C.isCLibraryFunction(FDecl, "wcsdup"))
+ return TaintPropagationRule(0, ReturnValueIndex);
+ }
+
+ // Skipping the following functions, since they might be used for cleansing
+ // or smart memory copy:
+ // - memccpy - copying until hitting a special character.
+
+ return TaintPropagationRule();
+}
+
+void GenericTaintChecker::checkPreStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ // Check for errors first.
+ if (checkPre(CE, C))
+ return;
+
+ // Add taint second.
+ addSourcesPre(CE, C);
+}
+
+void GenericTaintChecker::checkPostStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ if (propagateFromPre(CE, C))
+ return;
+ addSourcesPost(CE, C);
+}
+
+void GenericTaintChecker::addSourcesPre(const CallExpr *CE,
+ CheckerContext &C) const {
+ ProgramStateRef State = nullptr;
+ const FunctionDecl *FDecl = C.getCalleeDecl(CE);
+ if (!FDecl || FDecl->getKind() != Decl::Function)
+ return;
+
+ StringRef Name = C.getCalleeName(FDecl);
+ if (Name.empty())
+ return;
+
+ // First, try generating a propagation rule for this function.
+ TaintPropagationRule Rule =
+ TaintPropagationRule::getTaintPropagationRule(FDecl, Name, C);
+ if (!Rule.isNull()) {
+ State = Rule.process(CE, C);
+ if (!State)
+ return;
+ C.addTransition(State);
+ return;
+ }
+
+ // Otherwise, check if we have custom pre-processing implemented.
+ FnCheck evalFunction = llvm::StringSwitch<FnCheck>(Name)
+ .Case("fscanf", &GenericTaintChecker::preFscanf)
+ .Default(nullptr);
+ // Check and evaluate the call.
+ if (evalFunction)
+ State = (this->*evalFunction)(CE, C);
+ if (!State)
+ return;
+ C.addTransition(State);
+
+}
+
+bool GenericTaintChecker::propagateFromPre(const CallExpr *CE,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+
+ // Depending on what was tainted at pre-visit, we determined a set of
+ // arguments which should be tainted after the function returns. These are
+ // stored in the state as TaintArgsOnPostVisit set.
+ TaintArgsOnPostVisitTy TaintArgs = State->get<TaintArgsOnPostVisit>();
+ if (TaintArgs.isEmpty())
+ return false;
+
+ for (llvm::ImmutableSet<unsigned>::iterator
+ I = TaintArgs.begin(), E = TaintArgs.end(); I != E; ++I) {
+ unsigned ArgNum = *I;
+
+ // Special handling for the tainted return value.
+ if (ArgNum == ReturnValueIndex) {
+ State = State->addTaint(CE, C.getLocationContext());
+ continue;
+ }
+
+ // The arguments are pointer arguments. The data they are pointing at is
+ // tainted after the call.
+ if (CE->getNumArgs() < (ArgNum + 1))
+ return false;
+ const Expr* Arg = CE->getArg(ArgNum);
+ SymbolRef Sym = getPointedToSymbol(C, Arg);
+ if (Sym)
+ State = State->addTaint(Sym);
+ }
+
+ // Clear up the taint info from the state.
+ State = State->remove<TaintArgsOnPostVisit>();
+
+ if (State != C.getState()) {
+ C.addTransition(State);
+ return true;
+ }
+ return false;
+}
+
+void GenericTaintChecker::addSourcesPost(const CallExpr *CE,
+ CheckerContext &C) const {
+ // Define the attack surface.
+ // Set the evaluation function by switching on the callee name.
+ const FunctionDecl *FDecl = C.getCalleeDecl(CE);
+ if (!FDecl || FDecl->getKind() != Decl::Function)
+ return;
+
+ StringRef Name = C.getCalleeName(FDecl);
+ if (Name.empty())
+ return;
+ FnCheck evalFunction = llvm::StringSwitch<FnCheck>(Name)
+ .Case("scanf", &GenericTaintChecker::postScanf)
+ // TODO: Add support for vfscanf & family.
+ .Case("getchar", &GenericTaintChecker::postRetTaint)
+ .Case("getchar_unlocked", &GenericTaintChecker::postRetTaint)
+ .Case("getenv", &GenericTaintChecker::postRetTaint)
+ .Case("fopen", &GenericTaintChecker::postRetTaint)
+ .Case("fdopen", &GenericTaintChecker::postRetTaint)
+ .Case("freopen", &GenericTaintChecker::postRetTaint)
+ .Case("getch", &GenericTaintChecker::postRetTaint)
+ .Case("wgetch", &GenericTaintChecker::postRetTaint)
+ .Case("socket", &GenericTaintChecker::postSocket)
+ .Default(nullptr);
+
+ // If the callee isn't defined, it is not of security concern.
+ // Check and evaluate the call.
+ ProgramStateRef State = nullptr;
+ if (evalFunction)
+ State = (this->*evalFunction)(CE, C);
+ if (!State)
+ return;
+
+ C.addTransition(State);
+}
+
+bool GenericTaintChecker::checkPre(const CallExpr *CE, CheckerContext &C) const{
+
+ if (checkUncontrolledFormatString(CE, C))
+ return true;
+
+ const FunctionDecl *FDecl = C.getCalleeDecl(CE);
+ if (!FDecl || FDecl->getKind() != Decl::Function)
+ return false;
+
+ StringRef Name = C.getCalleeName(FDecl);
+ if (Name.empty())
+ return false;
+
+ if (checkSystemCall(CE, Name, C))
+ return true;
+
+ if (checkTaintedBufferSize(CE, FDecl, C))
+ return true;
+
+ return false;
+}
+
+SymbolRef GenericTaintChecker::getPointedToSymbol(CheckerContext &C,
+ const Expr* Arg) {
+ ProgramStateRef State = C.getState();
+ SVal AddrVal = State->getSVal(Arg->IgnoreParens(), C.getLocationContext());
+ if (AddrVal.isUnknownOrUndef())
+ return nullptr;
+
+ Optional<Loc> AddrLoc = AddrVal.getAs<Loc>();
+ if (!AddrLoc)
+ return nullptr;
+
+ const PointerType *ArgTy =
+ dyn_cast<PointerType>(Arg->getType().getCanonicalType().getTypePtr());
+ SVal Val = State->getSVal(*AddrLoc,
+ ArgTy ? ArgTy->getPointeeType(): QualType());
+ return Val.getAsSymbol();
+}
+
+ProgramStateRef
+GenericTaintChecker::TaintPropagationRule::process(const CallExpr *CE,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+
+ // Check for taint in arguments.
+ bool IsTainted = false;
+ for (ArgVector::const_iterator I = SrcArgs.begin(),
+ E = SrcArgs.end(); I != E; ++I) {
+ unsigned ArgNum = *I;
+
+ if (ArgNum == InvalidArgIndex) {
+ // Check if any of the arguments is tainted, but skip the
+ // destination arguments.
+ for (unsigned int i = 0; i < CE->getNumArgs(); ++i) {
+ if (isDestinationArgument(i))
+ continue;
+ if ((IsTainted = isTaintedOrPointsToTainted(CE->getArg(i), State, C)))
+ break;
+ }
+ break;
+ }
+
+ if (CE->getNumArgs() < (ArgNum + 1))
+ return State;
+ if ((IsTainted = isTaintedOrPointsToTainted(CE->getArg(ArgNum), State, C)))
+ break;
+ }
+ if (!IsTainted)
+ return State;
+
+ // Mark the arguments which should be tainted after the function returns.
+ for (ArgVector::const_iterator I = DstArgs.begin(),
+ E = DstArgs.end(); I != E; ++I) {
+ unsigned ArgNum = *I;
+
+ // Should we mark all arguments as tainted?
+ if (ArgNum == InvalidArgIndex) {
+ // For all pointer and references that were passed in:
+ // If they are not pointing to const data, mark data as tainted.
+ // TODO: So far we are just going one level down; ideally we'd need to
+ // recurse here.
+ for (unsigned int i = 0; i < CE->getNumArgs(); ++i) {
+ const Expr *Arg = CE->getArg(i);
+ // Process pointer argument.
+ const Type *ArgTy = Arg->getType().getTypePtr();
+ QualType PType = ArgTy->getPointeeType();
+ if ((!PType.isNull() && !PType.isConstQualified())
+ || (ArgTy->isReferenceType() && !Arg->getType().isConstQualified()))
+ State = State->add<TaintArgsOnPostVisit>(i);
+ }
+ continue;
+ }
+
+ // Should mark the return value?
+ if (ArgNum == ReturnValueIndex) {
+ State = State->add<TaintArgsOnPostVisit>(ReturnValueIndex);
+ continue;
+ }
+
+ // Mark the given argument.
+ assert(ArgNum < CE->getNumArgs());
+ State = State->add<TaintArgsOnPostVisit>(ArgNum);
+ }
+
+ return State;
+}
+
+
+// If argument 0 (file descriptor) is tainted, all arguments except for arg 0
+// and arg 1 should get taint.
+ProgramStateRef GenericTaintChecker::preFscanf(const CallExpr *CE,
+ CheckerContext &C) const {
+ assert(CE->getNumArgs() >= 2);
+ ProgramStateRef State = C.getState();
+
+ // Check is the file descriptor is tainted.
+ if (State->isTainted(CE->getArg(0), C.getLocationContext()) ||
+ isStdin(CE->getArg(0), C)) {
+ // All arguments except for the first two should get taint.
+ for (unsigned int i = 2; i < CE->getNumArgs(); ++i)
+ State = State->add<TaintArgsOnPostVisit>(i);
+ return State;
+ }
+
+ return nullptr;
+}
+
+
+// If argument 0(protocol domain) is network, the return value should get taint.
+ProgramStateRef GenericTaintChecker::postSocket(const CallExpr *CE,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ if (CE->getNumArgs() < 3)
+ return State;
+
+ SourceLocation DomLoc = CE->getArg(0)->getExprLoc();
+ StringRef DomName = C.getMacroNameOrSpelling(DomLoc);
+ // White list the internal communication protocols.
+ if (DomName.equals("AF_SYSTEM") || DomName.equals("AF_LOCAL") ||
+ DomName.equals("AF_UNIX") || DomName.equals("AF_RESERVED_36"))
+ return State;
+ State = State->addTaint(CE, C.getLocationContext());
+ return State;
+}
+
+ProgramStateRef GenericTaintChecker::postScanf(const CallExpr *CE,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ if (CE->getNumArgs() < 2)
+ return State;
+
+ // All arguments except for the very first one should get taint.
+ for (unsigned int i = 1; i < CE->getNumArgs(); ++i) {
+ // The arguments are pointer arguments. The data they are pointing at is
+ // tainted after the call.
+ const Expr* Arg = CE->getArg(i);
+ SymbolRef Sym = getPointedToSymbol(C, Arg);
+ if (Sym)
+ State = State->addTaint(Sym);
+ }
+ return State;
+}
+
+ProgramStateRef GenericTaintChecker::postRetTaint(const CallExpr *CE,
+ CheckerContext &C) const {
+ return C.getState()->addTaint(CE, C.getLocationContext());
+}
+
+bool GenericTaintChecker::isStdin(const Expr *E, CheckerContext &C) {
+ ProgramStateRef State = C.getState();
+ SVal Val = State->getSVal(E, C.getLocationContext());
+
+ // stdin is a pointer, so it would be a region.
+ const MemRegion *MemReg = Val.getAsRegion();
+
+ // The region should be symbolic, we do not know it's value.
+ const SymbolicRegion *SymReg = dyn_cast_or_null<SymbolicRegion>(MemReg);
+ if (!SymReg)
+ return false;
+
+ // Get it's symbol and find the declaration region it's pointing to.
+ const SymbolRegionValue *Sm =dyn_cast<SymbolRegionValue>(SymReg->getSymbol());
+ if (!Sm)
+ return false;
+ const DeclRegion *DeclReg = dyn_cast_or_null<DeclRegion>(Sm->getRegion());
+ if (!DeclReg)
+ return false;
+
+ // This region corresponds to a declaration, find out if it's a global/extern
+ // variable named stdin with the proper type.
+ if (const VarDecl *D = dyn_cast_or_null<VarDecl>(DeclReg->getDecl())) {
+ D = D->getCanonicalDecl();
+ if ((D->getName().find("stdin") != StringRef::npos) && D->isExternC())
+ if (const PointerType * PtrTy =
+ dyn_cast<PointerType>(D->getType().getTypePtr()))
+ if (PtrTy->getPointeeType() == C.getASTContext().getFILEType())
+ return true;
+ }
+ return false;
+}
+
+static bool getPrintfFormatArgumentNum(const CallExpr *CE,
+ const CheckerContext &C,
+ unsigned int &ArgNum) {
+ // Find if the function contains a format string argument.
+ // Handles: fprintf, printf, sprintf, snprintf, vfprintf, vprintf, vsprintf,
+ // vsnprintf, syslog, custom annotated functions.
+ const FunctionDecl *FDecl = C.getCalleeDecl(CE);
+ if (!FDecl)
+ return false;
+ for (const auto *Format : FDecl->specific_attrs<FormatAttr>()) {
+ ArgNum = Format->getFormatIdx() - 1;
+ if ((Format->getType()->getName() == "printf") &&
+ CE->getNumArgs() > ArgNum)
+ return true;
+ }
+
+ // Or if a function is named setproctitle (this is a heuristic).
+ if (C.getCalleeName(CE).find("setproctitle") != StringRef::npos) {
+ ArgNum = 0;
+ return true;
+ }
+
+ return false;
+}
+
+bool GenericTaintChecker::generateReportIfTainted(const Expr *E,
+ const char Msg[],
+ CheckerContext &C) const {
+ assert(E);
+
+ // Check for taint.
+ ProgramStateRef State = C.getState();
+ if (!State->isTainted(getPointedToSymbol(C, E)) &&
+ !State->isTainted(E, C.getLocationContext()))
+ return false;
+
+ // Generate diagnostic.
+ if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
+ initBugType();
+ auto report = llvm::make_unique<BugReport>(*BT, Msg, N);
+ report->addRange(E->getSourceRange());
+ C.emitReport(std::move(report));
+ return true;
+ }
+ return false;
+}
+
+bool GenericTaintChecker::checkUncontrolledFormatString(const CallExpr *CE,
+ CheckerContext &C) const{
+ // Check if the function contains a format string argument.
+ unsigned int ArgNum = 0;
+ if (!getPrintfFormatArgumentNum(CE, C, ArgNum))
+ return false;
+
+ // If either the format string content or the pointer itself are tainted, warn.
+ return generateReportIfTainted(CE->getArg(ArgNum),
+ MsgUncontrolledFormatString, C);
+}
+
+bool GenericTaintChecker::checkSystemCall(const CallExpr *CE,
+ StringRef Name,
+ CheckerContext &C) const {
+ // TODO: It might make sense to run this check on demand. In some cases,
+ // we should check if the environment has been cleansed here. We also might
+ // need to know if the user was reset before these calls(seteuid).
+ unsigned ArgNum = llvm::StringSwitch<unsigned>(Name)
+ .Case("system", 0)
+ .Case("popen", 0)
+ .Case("execl", 0)
+ .Case("execle", 0)
+ .Case("execlp", 0)
+ .Case("execv", 0)
+ .Case("execvp", 0)
+ .Case("execvP", 0)
+ .Case("execve", 0)
+ .Case("dlopen", 0)
+ .Default(UINT_MAX);
+
+ if (ArgNum == UINT_MAX || CE->getNumArgs() < (ArgNum + 1))
+ return false;
+
+ return generateReportIfTainted(CE->getArg(ArgNum), MsgSanitizeSystemArgs, C);
+}
+
+// TODO: Should this check be a part of the CString checker?
+// If yes, should taint be a global setting?
+bool GenericTaintChecker::checkTaintedBufferSize(const CallExpr *CE,
+ const FunctionDecl *FDecl,
+ CheckerContext &C) const {
+ // If the function has a buffer size argument, set ArgNum.
+ unsigned ArgNum = InvalidArgIndex;
+ unsigned BId = 0;
+ if ( (BId = FDecl->getMemoryFunctionKind()) )
+ switch(BId) {
+ case Builtin::BImemcpy:
+ case Builtin::BImemmove:
+ case Builtin::BIstrncpy:
+ ArgNum = 2;
+ break;
+ case Builtin::BIstrndup:
+ ArgNum = 1;
+ break;
+ default:
+ break;
+ };
+
+ if (ArgNum == InvalidArgIndex) {
+ if (C.isCLibraryFunction(FDecl, "malloc") ||
+ C.isCLibraryFunction(FDecl, "calloc") ||
+ C.isCLibraryFunction(FDecl, "alloca"))
+ ArgNum = 0;
+ else if (C.isCLibraryFunction(FDecl, "memccpy"))
+ ArgNum = 3;
+ else if (C.isCLibraryFunction(FDecl, "realloc"))
+ ArgNum = 1;
+ else if (C.isCLibraryFunction(FDecl, "bcopy"))
+ ArgNum = 2;
+ }
+
+ return ArgNum != InvalidArgIndex && CE->getNumArgs() > ArgNum &&
+ generateReportIfTainted(CE->getArg(ArgNum), MsgTaintedBufferSize, C);
+}
+
+void ento::registerGenericTaintChecker(CheckerManager &mgr) {
+ mgr.registerChecker<GenericTaintChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp
new file mode 100644
index 0000000..0c3bff5
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp
@@ -0,0 +1,512 @@
+//== IdenticalExprChecker.cpp - Identical expression checker----------------==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This defines IdenticalExprChecker, a check that warns about
+/// unintended use of identical expressions.
+///
+/// It checks for use of identical expressions with comparison operators and
+/// inside conditional expressions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+static bool isIdenticalStmt(const ASTContext &Ctx, const Stmt *Stmt1,
+ const Stmt *Stmt2, bool IgnoreSideEffects = false);
+//===----------------------------------------------------------------------===//
+// FindIdenticalExprVisitor - Identify nodes using identical expressions.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class FindIdenticalExprVisitor
+ : public RecursiveASTVisitor<FindIdenticalExprVisitor> {
+ BugReporter &BR;
+ const CheckerBase *Checker;
+ AnalysisDeclContext *AC;
+public:
+ explicit FindIdenticalExprVisitor(BugReporter &B,
+ const CheckerBase *Checker,
+ AnalysisDeclContext *A)
+ : BR(B), Checker(Checker), AC(A) {}
+ // FindIdenticalExprVisitor only visits nodes
+ // that are binary operators, if statements or
+ // conditional operators.
+ bool VisitBinaryOperator(const BinaryOperator *B);
+ bool VisitIfStmt(const IfStmt *I);
+ bool VisitConditionalOperator(const ConditionalOperator *C);
+
+private:
+ void reportIdenticalExpr(const BinaryOperator *B, bool CheckBitwise,
+ ArrayRef<SourceRange> Sr);
+ void checkBitwiseOrLogicalOp(const BinaryOperator *B, bool CheckBitwise);
+ void checkComparisonOp(const BinaryOperator *B);
+};
+} // end anonymous namespace
+
+void FindIdenticalExprVisitor::reportIdenticalExpr(const BinaryOperator *B,
+ bool CheckBitwise,
+ ArrayRef<SourceRange> Sr) {
+ StringRef Message;
+ if (CheckBitwise)
+ Message = "identical expressions on both sides of bitwise operator";
+ else
+ Message = "identical expressions on both sides of logical operator";
+
+ PathDiagnosticLocation ELoc =
+ PathDiagnosticLocation::createOperatorLoc(B, BR.getSourceManager());
+ BR.EmitBasicReport(AC->getDecl(), Checker,
+ "Use of identical expressions",
+ categories::LogicError,
+ Message, ELoc, Sr);
+}
+
+void FindIdenticalExprVisitor::checkBitwiseOrLogicalOp(const BinaryOperator *B,
+ bool CheckBitwise) {
+ SourceRange Sr[2];
+
+ const Expr *LHS = B->getLHS();
+ const Expr *RHS = B->getRHS();
+
+ // Split operators as long as we still have operators to split on. We will
+ // get called for every binary operator in an expression so there is no need
+ // to check every one against each other here, just the right most one with
+ // the others.
+ while (const BinaryOperator *B2 = dyn_cast<BinaryOperator>(LHS)) {
+ if (B->getOpcode() != B2->getOpcode())
+ break;
+ if (isIdenticalStmt(AC->getASTContext(), RHS, B2->getRHS())) {
+ Sr[0] = RHS->getSourceRange();
+ Sr[1] = B2->getRHS()->getSourceRange();
+ reportIdenticalExpr(B, CheckBitwise, Sr);
+ }
+ LHS = B2->getLHS();
+ }
+
+ if (isIdenticalStmt(AC->getASTContext(), RHS, LHS)) {
+ Sr[0] = RHS->getSourceRange();
+ Sr[1] = LHS->getSourceRange();
+ reportIdenticalExpr(B, CheckBitwise, Sr);
+ }
+}
+
+bool FindIdenticalExprVisitor::VisitIfStmt(const IfStmt *I) {
+ const Stmt *Stmt1 = I->getThen();
+ const Stmt *Stmt2 = I->getElse();
+
+ // Check for identical inner condition:
+ //
+ // if (x<10) {
+ // if (x<10) {
+ // ..
+ if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(Stmt1)) {
+ if (!CS->body_empty()) {
+ const IfStmt *InnerIf = dyn_cast<IfStmt>(*CS->body_begin());
+ if (InnerIf && isIdenticalStmt(AC->getASTContext(), I->getCond(), InnerIf->getCond(), /*ignoreSideEffects=*/ false)) {
+ PathDiagnosticLocation ELoc(InnerIf->getCond(), BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(), Checker, "Identical conditions",
+ categories::LogicError,
+ "conditions of the inner and outer statements are identical",
+ ELoc);
+ }
+ }
+ }
+
+ // Check for identical conditions:
+ //
+ // if (b) {
+ // foo1();
+ // } else if (b) {
+ // foo2();
+ // }
+ if (Stmt1 && Stmt2) {
+ const Expr *Cond1 = I->getCond();
+ const Stmt *Else = Stmt2;
+ while (const IfStmt *I2 = dyn_cast_or_null<IfStmt>(Else)) {
+ const Expr *Cond2 = I2->getCond();
+ if (isIdenticalStmt(AC->getASTContext(), Cond1, Cond2, false)) {
+ SourceRange Sr = Cond1->getSourceRange();
+ PathDiagnosticLocation ELoc(Cond2, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(), Checker, "Identical conditions",
+ categories::LogicError,
+ "expression is identical to previous condition",
+ ELoc, Sr);
+ }
+ Else = I2->getElse();
+ }
+ }
+
+ if (!Stmt1 || !Stmt2)
+ return true;
+
+ // Special handling for code like:
+ //
+ // if (b) {
+ // i = 1;
+ // } else
+ // i = 1;
+ if (const CompoundStmt *CompStmt = dyn_cast<CompoundStmt>(Stmt1)) {
+ if (CompStmt->size() == 1)
+ Stmt1 = CompStmt->body_back();
+ }
+ if (const CompoundStmt *CompStmt = dyn_cast<CompoundStmt>(Stmt2)) {
+ if (CompStmt->size() == 1)
+ Stmt2 = CompStmt->body_back();
+ }
+
+ if (isIdenticalStmt(AC->getASTContext(), Stmt1, Stmt2, true)) {
+ PathDiagnosticLocation ELoc =
+ PathDiagnosticLocation::createBegin(I, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(), Checker,
+ "Identical branches",
+ categories::LogicError,
+ "true and false branches are identical", ELoc);
+ }
+ return true;
+}
+
+bool FindIdenticalExprVisitor::VisitBinaryOperator(const BinaryOperator *B) {
+ BinaryOperator::Opcode Op = B->getOpcode();
+
+ if (BinaryOperator::isBitwiseOp(Op))
+ checkBitwiseOrLogicalOp(B, true);
+
+ if (BinaryOperator::isLogicalOp(Op))
+ checkBitwiseOrLogicalOp(B, false);
+
+ if (BinaryOperator::isComparisonOp(Op))
+ checkComparisonOp(B);
+
+ // We want to visit ALL nodes (subexpressions of binary comparison
+ // expressions too) that contains comparison operators.
+ // True is always returned to traverse ALL nodes.
+ return true;
+}
+
+void FindIdenticalExprVisitor::checkComparisonOp(const BinaryOperator *B) {
+ BinaryOperator::Opcode Op = B->getOpcode();
+
+ //
+ // Special case for floating-point representation.
+ //
+ // If expressions on both sides of comparison operator are of type float,
+ // then for some comparison operators no warning shall be
+ // reported even if the expressions are identical from a symbolic point of
+ // view. Comparison between expressions, declared variables and literals
+ // are treated differently.
+ //
+ // != and == between float literals that have the same value should NOT warn.
+ // < > between float literals that have the same value SHOULD warn.
+ //
+ // != and == between the same float declaration should NOT warn.
+ // < > between the same float declaration SHOULD warn.
+ //
+ // != and == between eq. expressions that evaluates into float
+ // should NOT warn.
+ // < > between eq. expressions that evaluates into float
+ // should NOT warn.
+ //
+ const Expr *LHS = B->getLHS()->IgnoreParenImpCasts();
+ const Expr *RHS = B->getRHS()->IgnoreParenImpCasts();
+
+ const DeclRefExpr *DeclRef1 = dyn_cast<DeclRefExpr>(LHS);
+ const DeclRefExpr *DeclRef2 = dyn_cast<DeclRefExpr>(RHS);
+ const FloatingLiteral *FloatLit1 = dyn_cast<FloatingLiteral>(LHS);
+ const FloatingLiteral *FloatLit2 = dyn_cast<FloatingLiteral>(RHS);
+ if ((DeclRef1) && (DeclRef2)) {
+ if ((DeclRef1->getType()->hasFloatingRepresentation()) &&
+ (DeclRef2->getType()->hasFloatingRepresentation())) {
+ if (DeclRef1->getDecl() == DeclRef2->getDecl()) {
+ if ((Op == BO_EQ) || (Op == BO_NE)) {
+ return;
+ }
+ }
+ }
+ } else if ((FloatLit1) && (FloatLit2)) {
+ if (FloatLit1->getValue().bitwiseIsEqual(FloatLit2->getValue())) {
+ if ((Op == BO_EQ) || (Op == BO_NE)) {
+ return;
+ }
+ }
+ } else if (LHS->getType()->hasFloatingRepresentation()) {
+ // If any side of comparison operator still has floating-point
+ // representation, then it's an expression. Don't warn.
+ // Here only LHS is checked since RHS will be implicit casted to float.
+ return;
+ } else {
+ // No special case with floating-point representation, report as usual.
+ }
+
+ if (isIdenticalStmt(AC->getASTContext(), B->getLHS(), B->getRHS())) {
+ PathDiagnosticLocation ELoc =
+ PathDiagnosticLocation::createOperatorLoc(B, BR.getSourceManager());
+ StringRef Message;
+ if (((Op == BO_EQ) || (Op == BO_LE) || (Op == BO_GE)))
+ Message = "comparison of identical expressions always evaluates to true";
+ else
+ Message = "comparison of identical expressions always evaluates to false";
+ BR.EmitBasicReport(AC->getDecl(), Checker,
+ "Compare of identical expressions",
+ categories::LogicError, Message, ELoc);
+ }
+}
+
+bool FindIdenticalExprVisitor::VisitConditionalOperator(
+ const ConditionalOperator *C) {
+
+ // Check if expressions in conditional expression are identical
+ // from a symbolic point of view.
+
+ if (isIdenticalStmt(AC->getASTContext(), C->getTrueExpr(),
+ C->getFalseExpr(), true)) {
+ PathDiagnosticLocation ELoc =
+ PathDiagnosticLocation::createConditionalColonLoc(
+ C, BR.getSourceManager());
+
+ SourceRange Sr[2];
+ Sr[0] = C->getTrueExpr()->getSourceRange();
+ Sr[1] = C->getFalseExpr()->getSourceRange();
+ BR.EmitBasicReport(
+ AC->getDecl(), Checker,
+ "Identical expressions in conditional expression",
+ categories::LogicError,
+ "identical expressions on both sides of ':' in conditional expression",
+ ELoc, Sr);
+ }
+ // We want to visit ALL nodes (expressions in conditional
+ // expressions too) that contains conditional operators,
+ // thus always return true to traverse ALL nodes.
+ return true;
+}
+
+/// \brief Determines whether two statement trees are identical regarding
+/// operators and symbols.
+///
+/// Exceptions: expressions containing macros or functions with possible side
+/// effects are never considered identical.
+/// Limitations: (t + u) and (u + t) are not considered identical.
+/// t*(u + t) and t*u + t*t are not considered identical.
+///
+static bool isIdenticalStmt(const ASTContext &Ctx, const Stmt *Stmt1,
+ const Stmt *Stmt2, bool IgnoreSideEffects) {
+
+ if (!Stmt1 || !Stmt2) {
+ return !Stmt1 && !Stmt2;
+ }
+
+ // If Stmt1 & Stmt2 are of different class then they are not
+ // identical statements.
+ if (Stmt1->getStmtClass() != Stmt2->getStmtClass())
+ return false;
+
+ const Expr *Expr1 = dyn_cast<Expr>(Stmt1);
+ const Expr *Expr2 = dyn_cast<Expr>(Stmt2);
+
+ if (Expr1 && Expr2) {
+ // If Stmt1 has side effects then don't warn even if expressions
+ // are identical.
+ if (!IgnoreSideEffects && Expr1->HasSideEffects(Ctx))
+ return false;
+ // If either expression comes from a macro then don't warn even if
+ // the expressions are identical.
+ if ((Expr1->getExprLoc().isMacroID()) || (Expr2->getExprLoc().isMacroID()))
+ return false;
+
+ // If all children of two expressions are identical, return true.
+ Expr::const_child_iterator I1 = Expr1->child_begin();
+ Expr::const_child_iterator I2 = Expr2->child_begin();
+ while (I1 != Expr1->child_end() && I2 != Expr2->child_end()) {
+ if (!*I1 || !*I2 || !isIdenticalStmt(Ctx, *I1, *I2, IgnoreSideEffects))
+ return false;
+ ++I1;
+ ++I2;
+ }
+ // If there are different number of children in the statements, return
+ // false.
+ if (I1 != Expr1->child_end())
+ return false;
+ if (I2 != Expr2->child_end())
+ return false;
+ }
+
+ switch (Stmt1->getStmtClass()) {
+ default:
+ return false;
+ case Stmt::CallExprClass:
+ case Stmt::ArraySubscriptExprClass:
+ case Stmt::OMPArraySectionExprClass:
+ case Stmt::ImplicitCastExprClass:
+ case Stmt::ParenExprClass:
+ case Stmt::BreakStmtClass:
+ case Stmt::ContinueStmtClass:
+ case Stmt::NullStmtClass:
+ return true;
+ case Stmt::CStyleCastExprClass: {
+ const CStyleCastExpr* CastExpr1 = cast<CStyleCastExpr>(Stmt1);
+ const CStyleCastExpr* CastExpr2 = cast<CStyleCastExpr>(Stmt2);
+
+ return CastExpr1->getTypeAsWritten() == CastExpr2->getTypeAsWritten();
+ }
+ case Stmt::ReturnStmtClass: {
+ const ReturnStmt *ReturnStmt1 = cast<ReturnStmt>(Stmt1);
+ const ReturnStmt *ReturnStmt2 = cast<ReturnStmt>(Stmt2);
+
+ return isIdenticalStmt(Ctx, ReturnStmt1->getRetValue(),
+ ReturnStmt2->getRetValue(), IgnoreSideEffects);
+ }
+ case Stmt::ForStmtClass: {
+ const ForStmt *ForStmt1 = cast<ForStmt>(Stmt1);
+ const ForStmt *ForStmt2 = cast<ForStmt>(Stmt2);
+
+ if (!isIdenticalStmt(Ctx, ForStmt1->getInit(), ForStmt2->getInit(),
+ IgnoreSideEffects))
+ return false;
+ if (!isIdenticalStmt(Ctx, ForStmt1->getCond(), ForStmt2->getCond(),
+ IgnoreSideEffects))
+ return false;
+ if (!isIdenticalStmt(Ctx, ForStmt1->getInc(), ForStmt2->getInc(),
+ IgnoreSideEffects))
+ return false;
+ if (!isIdenticalStmt(Ctx, ForStmt1->getBody(), ForStmt2->getBody(),
+ IgnoreSideEffects))
+ return false;
+ return true;
+ }
+ case Stmt::DoStmtClass: {
+ const DoStmt *DStmt1 = cast<DoStmt>(Stmt1);
+ const DoStmt *DStmt2 = cast<DoStmt>(Stmt2);
+
+ if (!isIdenticalStmt(Ctx, DStmt1->getCond(), DStmt2->getCond(),
+ IgnoreSideEffects))
+ return false;
+ if (!isIdenticalStmt(Ctx, DStmt1->getBody(), DStmt2->getBody(),
+ IgnoreSideEffects))
+ return false;
+ return true;
+ }
+ case Stmt::WhileStmtClass: {
+ const WhileStmt *WStmt1 = cast<WhileStmt>(Stmt1);
+ const WhileStmt *WStmt2 = cast<WhileStmt>(Stmt2);
+
+ if (!isIdenticalStmt(Ctx, WStmt1->getCond(), WStmt2->getCond(),
+ IgnoreSideEffects))
+ return false;
+ if (!isIdenticalStmt(Ctx, WStmt1->getBody(), WStmt2->getBody(),
+ IgnoreSideEffects))
+ return false;
+ return true;
+ }
+ case Stmt::IfStmtClass: {
+ const IfStmt *IStmt1 = cast<IfStmt>(Stmt1);
+ const IfStmt *IStmt2 = cast<IfStmt>(Stmt2);
+
+ if (!isIdenticalStmt(Ctx, IStmt1->getCond(), IStmt2->getCond(),
+ IgnoreSideEffects))
+ return false;
+ if (!isIdenticalStmt(Ctx, IStmt1->getThen(), IStmt2->getThen(),
+ IgnoreSideEffects))
+ return false;
+ if (!isIdenticalStmt(Ctx, IStmt1->getElse(), IStmt2->getElse(),
+ IgnoreSideEffects))
+ return false;
+ return true;
+ }
+ case Stmt::CompoundStmtClass: {
+ const CompoundStmt *CompStmt1 = cast<CompoundStmt>(Stmt1);
+ const CompoundStmt *CompStmt2 = cast<CompoundStmt>(Stmt2);
+
+ if (CompStmt1->size() != CompStmt2->size())
+ return false;
+
+ CompoundStmt::const_body_iterator I1 = CompStmt1->body_begin();
+ CompoundStmt::const_body_iterator I2 = CompStmt2->body_begin();
+ while (I1 != CompStmt1->body_end() && I2 != CompStmt2->body_end()) {
+ if (!isIdenticalStmt(Ctx, *I1, *I2, IgnoreSideEffects))
+ return false;
+ ++I1;
+ ++I2;
+ }
+
+ return true;
+ }
+ case Stmt::CompoundAssignOperatorClass:
+ case Stmt::BinaryOperatorClass: {
+ const BinaryOperator *BinOp1 = cast<BinaryOperator>(Stmt1);
+ const BinaryOperator *BinOp2 = cast<BinaryOperator>(Stmt2);
+ return BinOp1->getOpcode() == BinOp2->getOpcode();
+ }
+ case Stmt::CharacterLiteralClass: {
+ const CharacterLiteral *CharLit1 = cast<CharacterLiteral>(Stmt1);
+ const CharacterLiteral *CharLit2 = cast<CharacterLiteral>(Stmt2);
+ return CharLit1->getValue() == CharLit2->getValue();
+ }
+ case Stmt::DeclRefExprClass: {
+ const DeclRefExpr *DeclRef1 = cast<DeclRefExpr>(Stmt1);
+ const DeclRefExpr *DeclRef2 = cast<DeclRefExpr>(Stmt2);
+ return DeclRef1->getDecl() == DeclRef2->getDecl();
+ }
+ case Stmt::IntegerLiteralClass: {
+ const IntegerLiteral *IntLit1 = cast<IntegerLiteral>(Stmt1);
+ const IntegerLiteral *IntLit2 = cast<IntegerLiteral>(Stmt2);
+
+ llvm::APInt I1 = IntLit1->getValue();
+ llvm::APInt I2 = IntLit2->getValue();
+ if (I1.getBitWidth() != I2.getBitWidth())
+ return false;
+ return I1 == I2;
+ }
+ case Stmt::FloatingLiteralClass: {
+ const FloatingLiteral *FloatLit1 = cast<FloatingLiteral>(Stmt1);
+ const FloatingLiteral *FloatLit2 = cast<FloatingLiteral>(Stmt2);
+ return FloatLit1->getValue().bitwiseIsEqual(FloatLit2->getValue());
+ }
+ case Stmt::StringLiteralClass: {
+ const StringLiteral *StringLit1 = cast<StringLiteral>(Stmt1);
+ const StringLiteral *StringLit2 = cast<StringLiteral>(Stmt2);
+ return StringLit1->getBytes() == StringLit2->getBytes();
+ }
+ case Stmt::MemberExprClass: {
+ const MemberExpr *MemberStmt1 = cast<MemberExpr>(Stmt1);
+ const MemberExpr *MemberStmt2 = cast<MemberExpr>(Stmt2);
+ return MemberStmt1->getMemberDecl() == MemberStmt2->getMemberDecl();
+ }
+ case Stmt::UnaryOperatorClass: {
+ const UnaryOperator *UnaryOp1 = cast<UnaryOperator>(Stmt1);
+ const UnaryOperator *UnaryOp2 = cast<UnaryOperator>(Stmt2);
+ return UnaryOp1->getOpcode() == UnaryOp2->getOpcode();
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// FindIdenticalExprChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class FindIdenticalExprChecker : public Checker<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager &Mgr,
+ BugReporter &BR) const {
+ FindIdenticalExprVisitor Visitor(BR, this, Mgr.getAnalysisDeclContext(D));
+ Visitor.TraverseDecl(const_cast<Decl *>(D));
+ }
+};
+} // end anonymous namespace
+
+void ento::registerIdenticalExprChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<FindIdenticalExprChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h
new file mode 100644
index 0000000..d38d63c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h
@@ -0,0 +1,24 @@
+//==--- InterCheckerAPI.h ---------------------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file allows introduction of checker dependencies. It contains APIs for
+// inter-checker communications.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_INTERCHECKERAPI_H
+#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_INTERCHECKERAPI_H
+namespace clang {
+class CheckerManager;
+
+namespace ento {
+
+/// Register the checker which evaluates CString API calls.
+void registerCStringCheckerBasic(CheckerManager &Mgr);
+
+}}
+#endif /* INTERCHECKERAPI_H_ */
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
new file mode 100644
index 0000000..dffff38
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
@@ -0,0 +1,753 @@
+//=- IvarInvalidationChecker.cpp - -*- C++ -------------------------------*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker implements annotation driven invalidation checking. If a class
+// contains a method annotated with 'objc_instance_variable_invalidator',
+// - (void) foo
+// __attribute__((annotate("objc_instance_variable_invalidator")));
+// all the "ivalidatable" instance variables of this class should be
+// invalidated. We call an instance variable ivalidatable if it is an object of
+// a class which contains an invalidation method. There could be multiple
+// methods annotated with such annotations per class, either one can be used
+// to invalidate the ivar. An ivar or property are considered to be
+// invalidated if they are being assigned 'nil' or an invalidation method has
+// been called on them. An invalidation method should either invalidate all
+// the ivars or call another invalidation method (on self).
+//
+// Partial invalidor annotation allows to addess cases when ivars are
+// invalidated by other methods, which might or might not be called from
+// the invalidation method. The checker checks that each invalidation
+// method and all the partial methods cumulatively invalidate all ivars.
+// __attribute__((annotate("objc_instance_variable_invalidator_partial")));
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallString.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+struct ChecksFilter {
+ /// Check for missing invalidation method declarations.
+ DefaultBool check_MissingInvalidationMethod;
+ /// Check that all ivars are invalidated.
+ DefaultBool check_InstanceVariableInvalidation;
+
+ CheckName checkName_MissingInvalidationMethod;
+ CheckName checkName_InstanceVariableInvalidation;
+};
+
+class IvarInvalidationCheckerImpl {
+
+ typedef llvm::SmallSetVector<const ObjCMethodDecl*, 2> MethodSet;
+ typedef llvm::DenseMap<const ObjCMethodDecl*,
+ const ObjCIvarDecl*> MethToIvarMapTy;
+ typedef llvm::DenseMap<const ObjCPropertyDecl*,
+ const ObjCIvarDecl*> PropToIvarMapTy;
+ typedef llvm::DenseMap<const ObjCIvarDecl*,
+ const ObjCPropertyDecl*> IvarToPropMapTy;
+
+
+ struct InvalidationInfo {
+ /// Has the ivar been invalidated?
+ bool IsInvalidated;
+
+ /// The methods which can be used to invalidate the ivar.
+ MethodSet InvalidationMethods;
+
+ InvalidationInfo() : IsInvalidated(false) {}
+ void addInvalidationMethod(const ObjCMethodDecl *MD) {
+ InvalidationMethods.insert(MD);
+ }
+
+ bool needsInvalidation() const {
+ return !InvalidationMethods.empty();
+ }
+
+ bool hasMethod(const ObjCMethodDecl *MD) {
+ if (IsInvalidated)
+ return true;
+ for (MethodSet::iterator I = InvalidationMethods.begin(),
+ E = InvalidationMethods.end(); I != E; ++I) {
+ if (*I == MD) {
+ IsInvalidated = true;
+ return true;
+ }
+ }
+ return false;
+ }
+ };
+
+ typedef llvm::DenseMap<const ObjCIvarDecl*, InvalidationInfo> IvarSet;
+
+ /// Statement visitor, which walks the method body and flags the ivars
+ /// referenced in it (either directly or via property).
+ class MethodCrawler : public ConstStmtVisitor<MethodCrawler> {
+ /// The set of Ivars which need to be invalidated.
+ IvarSet &IVars;
+
+ /// Flag is set as the result of a message send to another
+ /// invalidation method.
+ bool &CalledAnotherInvalidationMethod;
+
+ /// Property setter to ivar mapping.
+ const MethToIvarMapTy &PropertySetterToIvarMap;
+
+ /// Property getter to ivar mapping.
+ const MethToIvarMapTy &PropertyGetterToIvarMap;
+
+ /// Property to ivar mapping.
+ const PropToIvarMapTy &PropertyToIvarMap;
+
+ /// The invalidation method being currently processed.
+ const ObjCMethodDecl *InvalidationMethod;
+
+ ASTContext &Ctx;
+
+ /// Peel off parens, casts, OpaqueValueExpr, and PseudoObjectExpr.
+ const Expr *peel(const Expr *E) const;
+
+ /// Does this expression represent zero: '0'?
+ bool isZero(const Expr *E) const;
+
+ /// Mark the given ivar as invalidated.
+ void markInvalidated(const ObjCIvarDecl *Iv);
+
+ /// Checks if IvarRef refers to the tracked IVar, if yes, marks it as
+ /// invalidated.
+ void checkObjCIvarRefExpr(const ObjCIvarRefExpr *IvarRef);
+
+ /// Checks if ObjCPropertyRefExpr refers to the tracked IVar, if yes, marks
+ /// it as invalidated.
+ void checkObjCPropertyRefExpr(const ObjCPropertyRefExpr *PA);
+
+ /// Checks if ObjCMessageExpr refers to (is a getter for) the tracked IVar,
+ /// if yes, marks it as invalidated.
+ void checkObjCMessageExpr(const ObjCMessageExpr *ME);
+
+ /// Checks if the Expr refers to an ivar, if yes, marks it as invalidated.
+ void check(const Expr *E);
+
+ public:
+ MethodCrawler(IvarSet &InIVars,
+ bool &InCalledAnotherInvalidationMethod,
+ const MethToIvarMapTy &InPropertySetterToIvarMap,
+ const MethToIvarMapTy &InPropertyGetterToIvarMap,
+ const PropToIvarMapTy &InPropertyToIvarMap,
+ ASTContext &InCtx)
+ : IVars(InIVars),
+ CalledAnotherInvalidationMethod(InCalledAnotherInvalidationMethod),
+ PropertySetterToIvarMap(InPropertySetterToIvarMap),
+ PropertyGetterToIvarMap(InPropertyGetterToIvarMap),
+ PropertyToIvarMap(InPropertyToIvarMap),
+ InvalidationMethod(nullptr),
+ Ctx(InCtx) {}
+
+ void VisitStmt(const Stmt *S) { VisitChildren(S); }
+
+ void VisitBinaryOperator(const BinaryOperator *BO);
+
+ void VisitObjCMessageExpr(const ObjCMessageExpr *ME);
+
+ void VisitChildren(const Stmt *S) {
+ for (const Stmt *Child : S->children()) {
+ if (Child)
+ this->Visit(Child);
+ if (CalledAnotherInvalidationMethod)
+ return;
+ }
+ }
+ };
+
+ /// Check if the any of the methods inside the interface are annotated with
+ /// the invalidation annotation, update the IvarInfo accordingly.
+ /// \param LookForPartial is set when we are searching for partial
+ /// invalidators.
+ static void containsInvalidationMethod(const ObjCContainerDecl *D,
+ InvalidationInfo &Out,
+ bool LookForPartial);
+
+ /// Check if ivar should be tracked and add to TrackedIvars if positive.
+ /// Returns true if ivar should be tracked.
+ static bool trackIvar(const ObjCIvarDecl *Iv, IvarSet &TrackedIvars,
+ const ObjCIvarDecl **FirstIvarDecl);
+
+ /// Given the property declaration, and the list of tracked ivars, finds
+ /// the ivar backing the property when possible. Returns '0' when no such
+ /// ivar could be found.
+ static const ObjCIvarDecl *findPropertyBackingIvar(
+ const ObjCPropertyDecl *Prop,
+ const ObjCInterfaceDecl *InterfaceD,
+ IvarSet &TrackedIvars,
+ const ObjCIvarDecl **FirstIvarDecl);
+
+ /// Print ivar name or the property if the given ivar backs a property.
+ static void printIvar(llvm::raw_svector_ostream &os,
+ const ObjCIvarDecl *IvarDecl,
+ const IvarToPropMapTy &IvarToPopertyMap);
+
+ void reportNoInvalidationMethod(CheckName CheckName,
+ const ObjCIvarDecl *FirstIvarDecl,
+ const IvarToPropMapTy &IvarToPopertyMap,
+ const ObjCInterfaceDecl *InterfaceD,
+ bool MissingDeclaration) const;
+ void reportIvarNeedsInvalidation(const ObjCIvarDecl *IvarD,
+ const IvarToPropMapTy &IvarToPopertyMap,
+ const ObjCMethodDecl *MethodD) const;
+
+ AnalysisManager& Mgr;
+ BugReporter &BR;
+ /// Filter on the checks performed.
+ const ChecksFilter &Filter;
+
+public:
+ IvarInvalidationCheckerImpl(AnalysisManager& InMgr,
+ BugReporter &InBR,
+ const ChecksFilter &InFilter) :
+ Mgr (InMgr), BR(InBR), Filter(InFilter) {}
+
+ void visit(const ObjCImplementationDecl *D) const;
+};
+
+static bool isInvalidationMethod(const ObjCMethodDecl *M, bool LookForPartial) {
+ for (const auto *Ann : M->specific_attrs<AnnotateAttr>()) {
+ if (!LookForPartial &&
+ Ann->getAnnotation() == "objc_instance_variable_invalidator")
+ return true;
+ if (LookForPartial &&
+ Ann->getAnnotation() == "objc_instance_variable_invalidator_partial")
+ return true;
+ }
+ return false;
+}
+
+void IvarInvalidationCheckerImpl::containsInvalidationMethod(
+ const ObjCContainerDecl *D, InvalidationInfo &OutInfo, bool Partial) {
+
+ if (!D)
+ return;
+
+ assert(!isa<ObjCImplementationDecl>(D));
+ // TODO: Cache the results.
+
+ // Check all methods.
+ for (const auto *MDI : D->methods())
+ if (isInvalidationMethod(MDI, Partial))
+ OutInfo.addInvalidationMethod(
+ cast<ObjCMethodDecl>(MDI->getCanonicalDecl()));
+
+ // If interface, check all parent protocols and super.
+ if (const ObjCInterfaceDecl *InterfD = dyn_cast<ObjCInterfaceDecl>(D)) {
+
+ // Visit all protocols.
+ for (const auto *I : InterfD->protocols())
+ containsInvalidationMethod(I->getDefinition(), OutInfo, Partial);
+
+ // Visit all categories in case the invalidation method is declared in
+ // a category.
+ for (const auto *Ext : InterfD->visible_extensions())
+ containsInvalidationMethod(Ext, OutInfo, Partial);
+
+ containsInvalidationMethod(InterfD->getSuperClass(), OutInfo, Partial);
+ return;
+ }
+
+ // If protocol, check all parent protocols.
+ if (const ObjCProtocolDecl *ProtD = dyn_cast<ObjCProtocolDecl>(D)) {
+ for (const auto *I : ProtD->protocols()) {
+ containsInvalidationMethod(I->getDefinition(), OutInfo, Partial);
+ }
+ return;
+ }
+
+ return;
+}
+
+bool IvarInvalidationCheckerImpl::trackIvar(const ObjCIvarDecl *Iv,
+ IvarSet &TrackedIvars,
+ const ObjCIvarDecl **FirstIvarDecl) {
+ QualType IvQTy = Iv->getType();
+ const ObjCObjectPointerType *IvTy = IvQTy->getAs<ObjCObjectPointerType>();
+ if (!IvTy)
+ return false;
+ const ObjCInterfaceDecl *IvInterf = IvTy->getInterfaceDecl();
+
+ InvalidationInfo Info;
+ containsInvalidationMethod(IvInterf, Info, /*LookForPartial*/ false);
+ if (Info.needsInvalidation()) {
+ const ObjCIvarDecl *I = cast<ObjCIvarDecl>(Iv->getCanonicalDecl());
+ TrackedIvars[I] = Info;
+ if (!*FirstIvarDecl)
+ *FirstIvarDecl = I;
+ return true;
+ }
+ return false;
+}
+
+const ObjCIvarDecl *IvarInvalidationCheckerImpl::findPropertyBackingIvar(
+ const ObjCPropertyDecl *Prop,
+ const ObjCInterfaceDecl *InterfaceD,
+ IvarSet &TrackedIvars,
+ const ObjCIvarDecl **FirstIvarDecl) {
+ const ObjCIvarDecl *IvarD = nullptr;
+
+ // Lookup for the synthesized case.
+ IvarD = Prop->getPropertyIvarDecl();
+ // We only track the ivars/properties that are defined in the current
+ // class (not the parent).
+ if (IvarD && IvarD->getContainingInterface() == InterfaceD) {
+ if (TrackedIvars.count(IvarD)) {
+ return IvarD;
+ }
+ // If the ivar is synthesized we still want to track it.
+ if (trackIvar(IvarD, TrackedIvars, FirstIvarDecl))
+ return IvarD;
+ }
+
+ // Lookup IVars named "_PropName"or "PropName" among the tracked Ivars.
+ StringRef PropName = Prop->getIdentifier()->getName();
+ for (IvarSet::const_iterator I = TrackedIvars.begin(),
+ E = TrackedIvars.end(); I != E; ++I) {
+ const ObjCIvarDecl *Iv = I->first;
+ StringRef IvarName = Iv->getName();
+
+ if (IvarName == PropName)
+ return Iv;
+
+ SmallString<128> PropNameWithUnderscore;
+ {
+ llvm::raw_svector_ostream os(PropNameWithUnderscore);
+ os << '_' << PropName;
+ }
+ if (IvarName == PropNameWithUnderscore)
+ return Iv;
+ }
+
+ // Note, this is a possible source of false positives. We could look at the
+ // getter implementation to find the ivar when its name is not derived from
+ // the property name.
+ return nullptr;
+}
+
+void IvarInvalidationCheckerImpl::printIvar(llvm::raw_svector_ostream &os,
+ const ObjCIvarDecl *IvarDecl,
+ const IvarToPropMapTy &IvarToPopertyMap) {
+ if (IvarDecl->getSynthesize()) {
+ const ObjCPropertyDecl *PD = IvarToPopertyMap.lookup(IvarDecl);
+ assert(PD &&"Do we synthesize ivars for something other than properties?");
+ os << "Property "<< PD->getName() << " ";
+ } else {
+ os << "Instance variable "<< IvarDecl->getName() << " ";
+ }
+}
+
+// Check that the invalidatable interfaces with ivars/properties implement the
+// invalidation methods.
+void IvarInvalidationCheckerImpl::
+visit(const ObjCImplementationDecl *ImplD) const {
+ // Collect all ivars that need cleanup.
+ IvarSet Ivars;
+ // Record the first Ivar needing invalidation; used in reporting when only
+ // one ivar is sufficient. Cannot grab the first on the Ivars set to ensure
+ // deterministic output.
+ const ObjCIvarDecl *FirstIvarDecl = nullptr;
+ const ObjCInterfaceDecl *InterfaceD = ImplD->getClassInterface();
+
+ // Collect ivars declared in this class, its extensions and its implementation
+ ObjCInterfaceDecl *IDecl = const_cast<ObjCInterfaceDecl *>(InterfaceD);
+ for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
+ Iv= Iv->getNextIvar())
+ trackIvar(Iv, Ivars, &FirstIvarDecl);
+
+ // Construct Property/Property Accessor to Ivar maps to assist checking if an
+ // ivar which is backing a property has been reset.
+ MethToIvarMapTy PropSetterToIvarMap;
+ MethToIvarMapTy PropGetterToIvarMap;
+ PropToIvarMapTy PropertyToIvarMap;
+ IvarToPropMapTy IvarToPopertyMap;
+
+ ObjCInterfaceDecl::PropertyMap PropMap;
+ ObjCInterfaceDecl::PropertyDeclOrder PropOrder;
+ InterfaceD->collectPropertiesToImplement(PropMap, PropOrder);
+
+ for (ObjCInterfaceDecl::PropertyMap::iterator
+ I = PropMap.begin(), E = PropMap.end(); I != E; ++I) {
+ const ObjCPropertyDecl *PD = I->second;
+
+ const ObjCIvarDecl *ID = findPropertyBackingIvar(PD, InterfaceD, Ivars,
+ &FirstIvarDecl);
+ if (!ID)
+ continue;
+
+ // Store the mappings.
+ PD = cast<ObjCPropertyDecl>(PD->getCanonicalDecl());
+ PropertyToIvarMap[PD] = ID;
+ IvarToPopertyMap[ID] = PD;
+
+ // Find the setter and the getter.
+ const ObjCMethodDecl *SetterD = PD->getSetterMethodDecl();
+ if (SetterD) {
+ SetterD = cast<ObjCMethodDecl>(SetterD->getCanonicalDecl());
+ PropSetterToIvarMap[SetterD] = ID;
+ }
+
+ const ObjCMethodDecl *GetterD = PD->getGetterMethodDecl();
+ if (GetterD) {
+ GetterD = cast<ObjCMethodDecl>(GetterD->getCanonicalDecl());
+ PropGetterToIvarMap[GetterD] = ID;
+ }
+ }
+
+ // If no ivars need invalidation, there is nothing to check here.
+ if (Ivars.empty())
+ return;
+
+ // Find all partial invalidation methods.
+ InvalidationInfo PartialInfo;
+ containsInvalidationMethod(InterfaceD, PartialInfo, /*LookForPartial*/ true);
+
+ // Remove ivars invalidated by the partial invalidation methods. They do not
+ // need to be invalidated in the regular invalidation methods.
+ bool AtImplementationContainsAtLeastOnePartialInvalidationMethod = false;
+ for (MethodSet::iterator
+ I = PartialInfo.InvalidationMethods.begin(),
+ E = PartialInfo.InvalidationMethods.end(); I != E; ++I) {
+ const ObjCMethodDecl *InterfD = *I;
+
+ // Get the corresponding method in the @implementation.
+ const ObjCMethodDecl *D = ImplD->getMethod(InterfD->getSelector(),
+ InterfD->isInstanceMethod());
+ if (D && D->hasBody()) {
+ AtImplementationContainsAtLeastOnePartialInvalidationMethod = true;
+
+ bool CalledAnotherInvalidationMethod = false;
+ // The MethodCrowler is going to remove the invalidated ivars.
+ MethodCrawler(Ivars,
+ CalledAnotherInvalidationMethod,
+ PropSetterToIvarMap,
+ PropGetterToIvarMap,
+ PropertyToIvarMap,
+ BR.getContext()).VisitStmt(D->getBody());
+ // If another invalidation method was called, trust that full invalidation
+ // has occurred.
+ if (CalledAnotherInvalidationMethod)
+ Ivars.clear();
+ }
+ }
+
+ // If all ivars have been invalidated by partial invalidators, there is
+ // nothing to check here.
+ if (Ivars.empty())
+ return;
+
+ // Find all invalidation methods in this @interface declaration and parents.
+ InvalidationInfo Info;
+ containsInvalidationMethod(InterfaceD, Info, /*LookForPartial*/ false);
+
+ // Report an error in case none of the invalidation methods are declared.
+ if (!Info.needsInvalidation() && !PartialInfo.needsInvalidation()) {
+ if (Filter.check_MissingInvalidationMethod)
+ reportNoInvalidationMethod(Filter.checkName_MissingInvalidationMethod,
+ FirstIvarDecl, IvarToPopertyMap, InterfaceD,
+ /*MissingDeclaration*/ true);
+ // If there are no invalidation methods, there is no ivar validation work
+ // to be done.
+ return;
+ }
+
+ // Only check if Ivars are invalidated when InstanceVariableInvalidation
+ // has been requested.
+ if (!Filter.check_InstanceVariableInvalidation)
+ return;
+
+ // Check that all ivars are invalidated by the invalidation methods.
+ bool AtImplementationContainsAtLeastOneInvalidationMethod = false;
+ for (MethodSet::iterator I = Info.InvalidationMethods.begin(),
+ E = Info.InvalidationMethods.end(); I != E; ++I) {
+ const ObjCMethodDecl *InterfD = *I;
+
+ // Get the corresponding method in the @implementation.
+ const ObjCMethodDecl *D = ImplD->getMethod(InterfD->getSelector(),
+ InterfD->isInstanceMethod());
+ if (D && D->hasBody()) {
+ AtImplementationContainsAtLeastOneInvalidationMethod = true;
+
+ // Get a copy of ivars needing invalidation.
+ IvarSet IvarsI = Ivars;
+
+ bool CalledAnotherInvalidationMethod = false;
+ MethodCrawler(IvarsI,
+ CalledAnotherInvalidationMethod,
+ PropSetterToIvarMap,
+ PropGetterToIvarMap,
+ PropertyToIvarMap,
+ BR.getContext()).VisitStmt(D->getBody());
+ // If another invalidation method was called, trust that full invalidation
+ // has occurred.
+ if (CalledAnotherInvalidationMethod)
+ continue;
+
+ // Warn on the ivars that were not invalidated by the method.
+ for (IvarSet::const_iterator
+ I = IvarsI.begin(), E = IvarsI.end(); I != E; ++I)
+ reportIvarNeedsInvalidation(I->first, IvarToPopertyMap, D);
+ }
+ }
+
+ // Report an error in case none of the invalidation methods are implemented.
+ if (!AtImplementationContainsAtLeastOneInvalidationMethod) {
+ if (AtImplementationContainsAtLeastOnePartialInvalidationMethod) {
+ // Warn on the ivars that were not invalidated by the prrtial
+ // invalidation methods.
+ for (IvarSet::const_iterator
+ I = Ivars.begin(), E = Ivars.end(); I != E; ++I)
+ reportIvarNeedsInvalidation(I->first, IvarToPopertyMap, nullptr);
+ } else {
+ // Otherwise, no invalidation methods were implemented.
+ reportNoInvalidationMethod(Filter.checkName_InstanceVariableInvalidation,
+ FirstIvarDecl, IvarToPopertyMap, InterfaceD,
+ /*MissingDeclaration*/ false);
+ }
+ }
+}
+
+void IvarInvalidationCheckerImpl::reportNoInvalidationMethod(
+ CheckName CheckName, const ObjCIvarDecl *FirstIvarDecl,
+ const IvarToPropMapTy &IvarToPopertyMap,
+ const ObjCInterfaceDecl *InterfaceD, bool MissingDeclaration) const {
+ SmallString<128> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+ assert(FirstIvarDecl);
+ printIvar(os, FirstIvarDecl, IvarToPopertyMap);
+ os << "needs to be invalidated; ";
+ if (MissingDeclaration)
+ os << "no invalidation method is declared for ";
+ else
+ os << "no invalidation method is defined in the @implementation for ";
+ os << InterfaceD->getName();
+
+ PathDiagnosticLocation IvarDecLocation =
+ PathDiagnosticLocation::createBegin(FirstIvarDecl, BR.getSourceManager());
+
+ BR.EmitBasicReport(FirstIvarDecl, CheckName, "Incomplete invalidation",
+ categories::CoreFoundationObjectiveC, os.str(),
+ IvarDecLocation);
+}
+
+void IvarInvalidationCheckerImpl::
+reportIvarNeedsInvalidation(const ObjCIvarDecl *IvarD,
+ const IvarToPropMapTy &IvarToPopertyMap,
+ const ObjCMethodDecl *MethodD) const {
+ SmallString<128> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+ printIvar(os, IvarD, IvarToPopertyMap);
+ os << "needs to be invalidated or set to nil";
+ if (MethodD) {
+ PathDiagnosticLocation MethodDecLocation =
+ PathDiagnosticLocation::createEnd(MethodD->getBody(),
+ BR.getSourceManager(),
+ Mgr.getAnalysisDeclContext(MethodD));
+ BR.EmitBasicReport(MethodD, Filter.checkName_InstanceVariableInvalidation,
+ "Incomplete invalidation",
+ categories::CoreFoundationObjectiveC, os.str(),
+ MethodDecLocation);
+ } else {
+ BR.EmitBasicReport(
+ IvarD, Filter.checkName_InstanceVariableInvalidation,
+ "Incomplete invalidation", categories::CoreFoundationObjectiveC,
+ os.str(),
+ PathDiagnosticLocation::createBegin(IvarD, BR.getSourceManager()));
+ }
+}
+
+void IvarInvalidationCheckerImpl::MethodCrawler::markInvalidated(
+ const ObjCIvarDecl *Iv) {
+ IvarSet::iterator I = IVars.find(Iv);
+ if (I != IVars.end()) {
+ // If InvalidationMethod is present, we are processing the message send and
+ // should ensure we are invalidating with the appropriate method,
+ // otherwise, we are processing setting to 'nil'.
+ if (!InvalidationMethod ||
+ (InvalidationMethod && I->second.hasMethod(InvalidationMethod)))
+ IVars.erase(I);
+ }
+}
+
+const Expr *IvarInvalidationCheckerImpl::MethodCrawler::peel(const Expr *E) const {
+ E = E->IgnoreParenCasts();
+ if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E))
+ E = POE->getSyntacticForm()->IgnoreParenCasts();
+ if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E))
+ E = OVE->getSourceExpr()->IgnoreParenCasts();
+ return E;
+}
+
+void IvarInvalidationCheckerImpl::MethodCrawler::checkObjCIvarRefExpr(
+ const ObjCIvarRefExpr *IvarRef) {
+ if (const Decl *D = IvarRef->getDecl())
+ markInvalidated(cast<ObjCIvarDecl>(D->getCanonicalDecl()));
+}
+
+void IvarInvalidationCheckerImpl::MethodCrawler::checkObjCMessageExpr(
+ const ObjCMessageExpr *ME) {
+ const ObjCMethodDecl *MD = ME->getMethodDecl();
+ if (MD) {
+ MD = cast<ObjCMethodDecl>(MD->getCanonicalDecl());
+ MethToIvarMapTy::const_iterator IvI = PropertyGetterToIvarMap.find(MD);
+ if (IvI != PropertyGetterToIvarMap.end())
+ markInvalidated(IvI->second);
+ }
+}
+
+void IvarInvalidationCheckerImpl::MethodCrawler::checkObjCPropertyRefExpr(
+ const ObjCPropertyRefExpr *PA) {
+
+ if (PA->isExplicitProperty()) {
+ const ObjCPropertyDecl *PD = PA->getExplicitProperty();
+ if (PD) {
+ PD = cast<ObjCPropertyDecl>(PD->getCanonicalDecl());
+ PropToIvarMapTy::const_iterator IvI = PropertyToIvarMap.find(PD);
+ if (IvI != PropertyToIvarMap.end())
+ markInvalidated(IvI->second);
+ return;
+ }
+ }
+
+ if (PA->isImplicitProperty()) {
+ const ObjCMethodDecl *MD = PA->getImplicitPropertySetter();
+ if (MD) {
+ MD = cast<ObjCMethodDecl>(MD->getCanonicalDecl());
+ MethToIvarMapTy::const_iterator IvI =PropertyGetterToIvarMap.find(MD);
+ if (IvI != PropertyGetterToIvarMap.end())
+ markInvalidated(IvI->second);
+ return;
+ }
+ }
+}
+
+bool IvarInvalidationCheckerImpl::MethodCrawler::isZero(const Expr *E) const {
+ E = peel(E);
+
+ return (E->isNullPointerConstant(Ctx, Expr::NPC_ValueDependentIsNotNull)
+ != Expr::NPCK_NotNull);
+}
+
+void IvarInvalidationCheckerImpl::MethodCrawler::check(const Expr *E) {
+ E = peel(E);
+
+ if (const ObjCIvarRefExpr *IvarRef = dyn_cast<ObjCIvarRefExpr>(E)) {
+ checkObjCIvarRefExpr(IvarRef);
+ return;
+ }
+
+ if (const ObjCPropertyRefExpr *PropRef = dyn_cast<ObjCPropertyRefExpr>(E)) {
+ checkObjCPropertyRefExpr(PropRef);
+ return;
+ }
+
+ if (const ObjCMessageExpr *MsgExpr = dyn_cast<ObjCMessageExpr>(E)) {
+ checkObjCMessageExpr(MsgExpr);
+ return;
+ }
+}
+
+void IvarInvalidationCheckerImpl::MethodCrawler::VisitBinaryOperator(
+ const BinaryOperator *BO) {
+ VisitStmt(BO);
+
+ // Do we assign/compare against zero? If yes, check the variable we are
+ // assigning to.
+ BinaryOperatorKind Opcode = BO->getOpcode();
+ if (Opcode != BO_Assign &&
+ Opcode != BO_EQ &&
+ Opcode != BO_NE)
+ return;
+
+ if (isZero(BO->getRHS())) {
+ check(BO->getLHS());
+ return;
+ }
+
+ if (Opcode != BO_Assign && isZero(BO->getLHS())) {
+ check(BO->getRHS());
+ return;
+ }
+}
+
+void IvarInvalidationCheckerImpl::MethodCrawler::VisitObjCMessageExpr(
+ const ObjCMessageExpr *ME) {
+ const ObjCMethodDecl *MD = ME->getMethodDecl();
+ const Expr *Receiver = ME->getInstanceReceiver();
+
+ // Stop if we are calling '[self invalidate]'.
+ if (Receiver && isInvalidationMethod(MD, /*LookForPartial*/ false))
+ if (Receiver->isObjCSelfExpr()) {
+ CalledAnotherInvalidationMethod = true;
+ return;
+ }
+
+ // Check if we call a setter and set the property to 'nil'.
+ if (MD && (ME->getNumArgs() == 1) && isZero(ME->getArg(0))) {
+ MD = cast<ObjCMethodDecl>(MD->getCanonicalDecl());
+ MethToIvarMapTy::const_iterator IvI = PropertySetterToIvarMap.find(MD);
+ if (IvI != PropertySetterToIvarMap.end()) {
+ markInvalidated(IvI->second);
+ return;
+ }
+ }
+
+ // Check if we call the 'invalidation' routine on the ivar.
+ if (Receiver) {
+ InvalidationMethod = MD;
+ check(Receiver->IgnoreParenCasts());
+ InvalidationMethod = nullptr;
+ }
+
+ VisitStmt(ME);
+}
+}
+
+// Register the checkers.
+namespace {
+
+class IvarInvalidationChecker :
+ public Checker<check::ASTDecl<ObjCImplementationDecl> > {
+public:
+ ChecksFilter Filter;
+public:
+ void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager& Mgr,
+ BugReporter &BR) const {
+ IvarInvalidationCheckerImpl Walker(Mgr, BR, Filter);
+ Walker.visit(D);
+ }
+};
+}
+
+#define REGISTER_CHECKER(name) \
+ void ento::register##name(CheckerManager &mgr) { \
+ IvarInvalidationChecker *checker = \
+ mgr.registerChecker<IvarInvalidationChecker>(); \
+ checker->Filter.check_##name = true; \
+ checker->Filter.checkName_##name = mgr.getCurrentCheckName(); \
+ }
+
+REGISTER_CHECKER(InstanceVariableInvalidation)
+REGISTER_CHECKER(MissingInvalidationMethod)
+
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
new file mode 100644
index 0000000..db4fbca
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
@@ -0,0 +1,317 @@
+//=== LLVMConventionsChecker.cpp - Check LLVM codebase conventions ---*- C++ -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines LLVMConventionsChecker, a bunch of small little checks
+// for checking specific coding conventions in the LLVM/Clang codebase.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+//===----------------------------------------------------------------------===//
+// Generic type checking routines.
+//===----------------------------------------------------------------------===//
+
+static bool IsLLVMStringRef(QualType T) {
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ return StringRef(QualType(RT, 0).getAsString()) ==
+ "class StringRef";
+}
+
+/// Check whether the declaration is semantically inside the top-level
+/// namespace named by ns.
+static bool InNamespace(const Decl *D, StringRef NS) {
+ const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(D->getDeclContext());
+ if (!ND)
+ return false;
+ const IdentifierInfo *II = ND->getIdentifier();
+ if (!II || !II->getName().equals(NS))
+ return false;
+ return isa<TranslationUnitDecl>(ND->getDeclContext());
+}
+
+static bool IsStdString(QualType T) {
+ if (const ElaboratedType *QT = T->getAs<ElaboratedType>())
+ T = QT->getNamedType();
+
+ const TypedefType *TT = T->getAs<TypedefType>();
+ if (!TT)
+ return false;
+
+ const TypedefNameDecl *TD = TT->getDecl();
+
+ if (!TD->isInStdNamespace())
+ return false;
+
+ return TD->getName() == "string";
+}
+
+static bool IsClangType(const RecordDecl *RD) {
+ return RD->getName() == "Type" && InNamespace(RD, "clang");
+}
+
+static bool IsClangDecl(const RecordDecl *RD) {
+ return RD->getName() == "Decl" && InNamespace(RD, "clang");
+}
+
+static bool IsClangStmt(const RecordDecl *RD) {
+ return RD->getName() == "Stmt" && InNamespace(RD, "clang");
+}
+
+static bool IsClangAttr(const RecordDecl *RD) {
+ return RD->getName() == "Attr" && InNamespace(RD, "clang");
+}
+
+static bool IsStdVector(QualType T) {
+ const TemplateSpecializationType *TS = T->getAs<TemplateSpecializationType>();
+ if (!TS)
+ return false;
+
+ TemplateName TM = TS->getTemplateName();
+ TemplateDecl *TD = TM.getAsTemplateDecl();
+
+ if (!TD || !InNamespace(TD, "std"))
+ return false;
+
+ return TD->getName() == "vector";
+}
+
+static bool IsSmallVector(QualType T) {
+ const TemplateSpecializationType *TS = T->getAs<TemplateSpecializationType>();
+ if (!TS)
+ return false;
+
+ TemplateName TM = TS->getTemplateName();
+ TemplateDecl *TD = TM.getAsTemplateDecl();
+
+ if (!TD || !InNamespace(TD, "llvm"))
+ return false;
+
+ return TD->getName() == "SmallVector";
+}
+
+//===----------------------------------------------------------------------===//
+// CHECK: a StringRef should not be bound to a temporary std::string whose
+// lifetime is shorter than the StringRef's.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class StringRefCheckerVisitor : public StmtVisitor<StringRefCheckerVisitor> {
+ const Decl *DeclWithIssue;
+ BugReporter &BR;
+ const CheckerBase *Checker;
+
+public:
+ StringRefCheckerVisitor(const Decl *declWithIssue, BugReporter &br,
+ const CheckerBase *checker)
+ : DeclWithIssue(declWithIssue), BR(br), Checker(checker) {}
+ void VisitChildren(Stmt *S) {
+ for (Stmt *Child : S->children())
+ if (Child)
+ Visit(Child);
+ }
+ void VisitStmt(Stmt *S) { VisitChildren(S); }
+ void VisitDeclStmt(DeclStmt *DS);
+private:
+ void VisitVarDecl(VarDecl *VD);
+};
+} // end anonymous namespace
+
+static void CheckStringRefAssignedTemporary(const Decl *D, BugReporter &BR,
+ const CheckerBase *Checker) {
+ StringRefCheckerVisitor walker(D, BR, Checker);
+ walker.Visit(D->getBody());
+}
+
+void StringRefCheckerVisitor::VisitDeclStmt(DeclStmt *S) {
+ VisitChildren(S);
+
+ for (auto *I : S->decls())
+ if (VarDecl *VD = dyn_cast<VarDecl>(I))
+ VisitVarDecl(VD);
+}
+
+void StringRefCheckerVisitor::VisitVarDecl(VarDecl *VD) {
+ Expr *Init = VD->getInit();
+ if (!Init)
+ return;
+
+ // Pattern match for:
+ // StringRef x = call() (where call returns std::string)
+ if (!IsLLVMStringRef(VD->getType()))
+ return;
+ ExprWithCleanups *Ex1 = dyn_cast<ExprWithCleanups>(Init);
+ if (!Ex1)
+ return;
+ CXXConstructExpr *Ex2 = dyn_cast<CXXConstructExpr>(Ex1->getSubExpr());
+ if (!Ex2 || Ex2->getNumArgs() != 1)
+ return;
+ ImplicitCastExpr *Ex3 = dyn_cast<ImplicitCastExpr>(Ex2->getArg(0));
+ if (!Ex3)
+ return;
+ CXXConstructExpr *Ex4 = dyn_cast<CXXConstructExpr>(Ex3->getSubExpr());
+ if (!Ex4 || Ex4->getNumArgs() != 1)
+ return;
+ ImplicitCastExpr *Ex5 = dyn_cast<ImplicitCastExpr>(Ex4->getArg(0));
+ if (!Ex5)
+ return;
+ CXXBindTemporaryExpr *Ex6 = dyn_cast<CXXBindTemporaryExpr>(Ex5->getSubExpr());
+ if (!Ex6 || !IsStdString(Ex6->getType()))
+ return;
+
+ // Okay, badness! Report an error.
+ const char *desc = "StringRef should not be bound to temporary "
+ "std::string that it outlives";
+ PathDiagnosticLocation VDLoc =
+ PathDiagnosticLocation::createBegin(VD, BR.getSourceManager());
+ BR.EmitBasicReport(DeclWithIssue, Checker, desc, "LLVM Conventions", desc,
+ VDLoc, Init->getSourceRange());
+}
+
+//===----------------------------------------------------------------------===//
+// CHECK: Clang AST nodes should not have fields that can allocate
+// memory.
+//===----------------------------------------------------------------------===//
+
+static bool AllocatesMemory(QualType T) {
+ return IsStdVector(T) || IsStdString(T) || IsSmallVector(T);
+}
+
+// This type checking could be sped up via dynamic programming.
+static bool IsPartOfAST(const CXXRecordDecl *R) {
+ if (IsClangStmt(R) || IsClangType(R) || IsClangDecl(R) || IsClangAttr(R))
+ return true;
+
+ for (const auto &BS : R->bases()) {
+ QualType T = BS.getType();
+ if (const RecordType *baseT = T->getAs<RecordType>()) {
+ CXXRecordDecl *baseD = cast<CXXRecordDecl>(baseT->getDecl());
+ if (IsPartOfAST(baseD))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+namespace {
+class ASTFieldVisitor {
+ SmallVector<FieldDecl*, 10> FieldChain;
+ const CXXRecordDecl *Root;
+ BugReporter &BR;
+ const CheckerBase *Checker;
+
+public:
+ ASTFieldVisitor(const CXXRecordDecl *root, BugReporter &br,
+ const CheckerBase *checker)
+ : Root(root), BR(br), Checker(checker) {}
+
+ void Visit(FieldDecl *D);
+ void ReportError(QualType T);
+};
+} // end anonymous namespace
+
+static void CheckASTMemory(const CXXRecordDecl *R, BugReporter &BR,
+ const CheckerBase *Checker) {
+ if (!IsPartOfAST(R))
+ return;
+
+ for (auto *I : R->fields()) {
+ ASTFieldVisitor walker(R, BR, Checker);
+ walker.Visit(I);
+ }
+}
+
+void ASTFieldVisitor::Visit(FieldDecl *D) {
+ FieldChain.push_back(D);
+
+ QualType T = D->getType();
+
+ if (AllocatesMemory(T))
+ ReportError(T);
+
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl()->getDefinition();
+ for (auto *I : RD->fields())
+ Visit(I);
+ }
+
+ FieldChain.pop_back();
+}
+
+void ASTFieldVisitor::ReportError(QualType T) {
+ SmallString<1024> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ os << "AST class '" << Root->getName() << "' has a field '"
+ << FieldChain.front()->getName() << "' that allocates heap memory";
+ if (FieldChain.size() > 1) {
+ os << " via the following chain: ";
+ bool isFirst = true;
+ for (SmallVectorImpl<FieldDecl*>::iterator I=FieldChain.begin(),
+ E=FieldChain.end(); I!=E; ++I) {
+ if (!isFirst)
+ os << '.';
+ else
+ isFirst = false;
+ os << (*I)->getName();
+ }
+ }
+ os << " (type " << FieldChain.back()->getType().getAsString() << ")";
+
+ // Note that this will fire for every translation unit that uses this
+ // class. This is suboptimal, but at least scan-build will merge
+ // duplicate HTML reports. In the future we need a unified way of merging
+ // duplicate reports across translation units. For C++ classes we cannot
+ // just report warnings when we see an out-of-line method definition for a
+ // class, as that heuristic doesn't always work (the complete definition of
+ // the class may be in the header file, for example).
+ PathDiagnosticLocation L = PathDiagnosticLocation::createBegin(
+ FieldChain.front(), BR.getSourceManager());
+ BR.EmitBasicReport(Root, Checker, "AST node allocates heap memory",
+ "LLVM Conventions", os.str(), L);
+}
+
+//===----------------------------------------------------------------------===//
+// LLVMConventionsChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class LLVMConventionsChecker : public Checker<
+ check::ASTDecl<CXXRecordDecl>,
+ check::ASTCodeBody > {
+public:
+ void checkASTDecl(const CXXRecordDecl *R, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ if (R->isCompleteDefinition())
+ CheckASTMemory(R, BR, this);
+ }
+
+ void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ CheckStringRefAssignedTemporary(D, BR, this);
+ }
+};
+}
+
+void ento::registerLLVMConventionsChecker(CheckerManager &mgr) {
+ mgr.registerChecker<LLVMConventionsChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
new file mode 100644
index 0000000..56346cd
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
@@ -0,0 +1,1201 @@
+//=- LocalizationChecker.cpp -------------------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a set of checks for localizability including:
+// 1) A checker that warns about uses of non-localized NSStrings passed to
+// UI methods expecting localized strings
+// 2) A syntactic checker that warns against the bad practice of
+// not including a comment in NSLocalizedString macros.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/Support/Unicode.h"
+#include "llvm/ADT/StringSet.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+struct LocalizedState {
+private:
+ enum Kind { NonLocalized, Localized } K;
+ LocalizedState(Kind InK) : K(InK) {}
+
+public:
+ bool isLocalized() const { return K == Localized; }
+ bool isNonLocalized() const { return K == NonLocalized; }
+
+ static LocalizedState getLocalized() { return LocalizedState(Localized); }
+ static LocalizedState getNonLocalized() {
+ return LocalizedState(NonLocalized);
+ }
+
+ // Overload the == operator
+ bool operator==(const LocalizedState &X) const { return K == X.K; }
+
+ // LLVMs equivalent of a hash function
+ void Profile(llvm::FoldingSetNodeID &ID) const { ID.AddInteger(K); }
+};
+
+class NonLocalizedStringChecker
+ : public Checker<check::PostCall, check::PreObjCMessage,
+ check::PostObjCMessage,
+ check::PostStmt<ObjCStringLiteral>> {
+
+ mutable std::unique_ptr<BugType> BT;
+
+ // Methods that require a localized string
+ mutable llvm::DenseMap<const IdentifierInfo *,
+ llvm::DenseMap<Selector, uint8_t>> UIMethods;
+ // Methods that return a localized string
+ mutable llvm::SmallSet<std::pair<const IdentifierInfo *, Selector>, 12> LSM;
+ // C Functions that return a localized string
+ mutable llvm::SmallSet<const IdentifierInfo *, 5> LSF;
+
+ void initUIMethods(ASTContext &Ctx) const;
+ void initLocStringsMethods(ASTContext &Ctx) const;
+
+ bool hasNonLocalizedState(SVal S, CheckerContext &C) const;
+ bool hasLocalizedState(SVal S, CheckerContext &C) const;
+ void setNonLocalizedState(SVal S, CheckerContext &C) const;
+ void setLocalizedState(SVal S, CheckerContext &C) const;
+
+ bool isAnnotatedAsLocalized(const Decl *D) const;
+ void reportLocalizationError(SVal S, const ObjCMethodCall &M,
+ CheckerContext &C, int argumentNumber = 0) const;
+
+ int getLocalizedArgumentForSelector(const IdentifierInfo *Receiver,
+ Selector S) const;
+
+public:
+ NonLocalizedStringChecker();
+
+ // When this parameter is set to true, the checker assumes all
+ // methods that return NSStrings are unlocalized. Thus, more false
+ // positives will be reported.
+ DefaultBool IsAggressive;
+
+ void checkPreObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
+ void checkPostObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
+ void checkPostStmt(const ObjCStringLiteral *SL, CheckerContext &C) const;
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+};
+
+} // end anonymous namespace
+
+REGISTER_MAP_WITH_PROGRAMSTATE(LocalizedMemMap, const MemRegion *,
+ LocalizedState)
+
+NonLocalizedStringChecker::NonLocalizedStringChecker() {
+ BT.reset(new BugType(this, "Unlocalizable string",
+ "Localizability Issue (Apple)"));
+}
+
+#define NEW_RECEIVER(receiver) \
+ llvm::DenseMap<Selector, uint8_t> &receiver##M = \
+ UIMethods.insert({&Ctx.Idents.get(#receiver), \
+ llvm::DenseMap<Selector, uint8_t>()}) \
+ .first->second;
+#define ADD_NULLARY_METHOD(receiver, method, argument) \
+ receiver##M.insert( \
+ {Ctx.Selectors.getNullarySelector(&Ctx.Idents.get(#method)), argument});
+#define ADD_UNARY_METHOD(receiver, method, argument) \
+ receiver##M.insert( \
+ {Ctx.Selectors.getUnarySelector(&Ctx.Idents.get(#method)), argument});
+#define ADD_METHOD(receiver, method_list, count, argument) \
+ receiver##M.insert({Ctx.Selectors.getSelector(count, method_list), argument});
+
+/// Initializes a list of methods that require a localized string
+/// Format: {"ClassName", {{"selectorName:", LocStringArg#}, ...}, ...}
+void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
+ if (!UIMethods.empty())
+ return;
+
+ // UI Methods
+ NEW_RECEIVER(UISearchDisplayController)
+ ADD_UNARY_METHOD(UISearchDisplayController, setSearchResultsTitle, 0)
+
+ NEW_RECEIVER(UITabBarItem)
+ IdentifierInfo *initWithTitleUITabBarItemTag[] = {
+ &Ctx.Idents.get("initWithTitle"), &Ctx.Idents.get("image"),
+ &Ctx.Idents.get("tag")};
+ ADD_METHOD(UITabBarItem, initWithTitleUITabBarItemTag, 3, 0)
+ IdentifierInfo *initWithTitleUITabBarItemImage[] = {
+ &Ctx.Idents.get("initWithTitle"), &Ctx.Idents.get("image"),
+ &Ctx.Idents.get("selectedImage")};
+ ADD_METHOD(UITabBarItem, initWithTitleUITabBarItemImage, 3, 0)
+
+ NEW_RECEIVER(NSDockTile)
+ ADD_UNARY_METHOD(NSDockTile, setBadgeLabel, 0)
+
+ NEW_RECEIVER(NSStatusItem)
+ ADD_UNARY_METHOD(NSStatusItem, setTitle, 0)
+ ADD_UNARY_METHOD(NSStatusItem, setToolTip, 0)
+
+ NEW_RECEIVER(UITableViewRowAction)
+ IdentifierInfo *rowActionWithStyleUITableViewRowAction[] = {
+ &Ctx.Idents.get("rowActionWithStyle"), &Ctx.Idents.get("title"),
+ &Ctx.Idents.get("handler")};
+ ADD_METHOD(UITableViewRowAction, rowActionWithStyleUITableViewRowAction, 3, 1)
+ ADD_UNARY_METHOD(UITableViewRowAction, setTitle, 0)
+
+ NEW_RECEIVER(NSBox)
+ ADD_UNARY_METHOD(NSBox, setTitle, 0)
+
+ NEW_RECEIVER(NSButton)
+ ADD_UNARY_METHOD(NSButton, setTitle, 0)
+ ADD_UNARY_METHOD(NSButton, setAlternateTitle, 0)
+
+ NEW_RECEIVER(NSSavePanel)
+ ADD_UNARY_METHOD(NSSavePanel, setPrompt, 0)
+ ADD_UNARY_METHOD(NSSavePanel, setTitle, 0)
+ ADD_UNARY_METHOD(NSSavePanel, setNameFieldLabel, 0)
+ ADD_UNARY_METHOD(NSSavePanel, setNameFieldStringValue, 0)
+ ADD_UNARY_METHOD(NSSavePanel, setMessage, 0)
+
+ NEW_RECEIVER(UIPrintInfo)
+ ADD_UNARY_METHOD(UIPrintInfo, setJobName, 0)
+
+ NEW_RECEIVER(NSTabViewItem)
+ ADD_UNARY_METHOD(NSTabViewItem, setLabel, 0)
+ ADD_UNARY_METHOD(NSTabViewItem, setToolTip, 0)
+
+ NEW_RECEIVER(NSBrowser)
+ IdentifierInfo *setTitleNSBrowser[] = {&Ctx.Idents.get("setTitle"),
+ &Ctx.Idents.get("ofColumn")};
+ ADD_METHOD(NSBrowser, setTitleNSBrowser, 2, 0)
+
+ NEW_RECEIVER(UIAccessibilityElement)
+ ADD_UNARY_METHOD(UIAccessibilityElement, setAccessibilityLabel, 0)
+ ADD_UNARY_METHOD(UIAccessibilityElement, setAccessibilityHint, 0)
+ ADD_UNARY_METHOD(UIAccessibilityElement, setAccessibilityValue, 0)
+
+ NEW_RECEIVER(UIAlertAction)
+ IdentifierInfo *actionWithTitleUIAlertAction[] = {
+ &Ctx.Idents.get("actionWithTitle"), &Ctx.Idents.get("style"),
+ &Ctx.Idents.get("handler")};
+ ADD_METHOD(UIAlertAction, actionWithTitleUIAlertAction, 3, 0)
+
+ NEW_RECEIVER(NSPopUpButton)
+ ADD_UNARY_METHOD(NSPopUpButton, addItemWithTitle, 0)
+ IdentifierInfo *insertItemWithTitleNSPopUpButton[] = {
+ &Ctx.Idents.get("insertItemWithTitle"), &Ctx.Idents.get("atIndex")};
+ ADD_METHOD(NSPopUpButton, insertItemWithTitleNSPopUpButton, 2, 0)
+ ADD_UNARY_METHOD(NSPopUpButton, removeItemWithTitle, 0)
+ ADD_UNARY_METHOD(NSPopUpButton, selectItemWithTitle, 0)
+ ADD_UNARY_METHOD(NSPopUpButton, setTitle, 0)
+
+ NEW_RECEIVER(NSTableViewRowAction)
+ IdentifierInfo *rowActionWithStyleNSTableViewRowAction[] = {
+ &Ctx.Idents.get("rowActionWithStyle"), &Ctx.Idents.get("title"),
+ &Ctx.Idents.get("handler")};
+ ADD_METHOD(NSTableViewRowAction, rowActionWithStyleNSTableViewRowAction, 3, 1)
+ ADD_UNARY_METHOD(NSTableViewRowAction, setTitle, 0)
+
+ NEW_RECEIVER(NSImage)
+ ADD_UNARY_METHOD(NSImage, setAccessibilityDescription, 0)
+
+ NEW_RECEIVER(NSUserActivity)
+ ADD_UNARY_METHOD(NSUserActivity, setTitle, 0)
+
+ NEW_RECEIVER(NSPathControlItem)
+ ADD_UNARY_METHOD(NSPathControlItem, setTitle, 0)
+
+ NEW_RECEIVER(NSCell)
+ ADD_UNARY_METHOD(NSCell, initTextCell, 0)
+ ADD_UNARY_METHOD(NSCell, setTitle, 0)
+ ADD_UNARY_METHOD(NSCell, setStringValue, 0)
+
+ NEW_RECEIVER(NSPathControl)
+ ADD_UNARY_METHOD(NSPathControl, setPlaceholderString, 0)
+
+ NEW_RECEIVER(UIAccessibility)
+ ADD_UNARY_METHOD(UIAccessibility, setAccessibilityLabel, 0)
+ ADD_UNARY_METHOD(UIAccessibility, setAccessibilityHint, 0)
+ ADD_UNARY_METHOD(UIAccessibility, setAccessibilityValue, 0)
+
+ NEW_RECEIVER(NSTableColumn)
+ ADD_UNARY_METHOD(NSTableColumn, setTitle, 0)
+ ADD_UNARY_METHOD(NSTableColumn, setHeaderToolTip, 0)
+
+ NEW_RECEIVER(NSSegmentedControl)
+ IdentifierInfo *setLabelNSSegmentedControl[] = {
+ &Ctx.Idents.get("setLabel"), &Ctx.Idents.get("forSegment")};
+ ADD_METHOD(NSSegmentedControl, setLabelNSSegmentedControl, 2, 0)
+
+ NEW_RECEIVER(NSButtonCell)
+ ADD_UNARY_METHOD(NSButtonCell, setTitle, 0)
+ ADD_UNARY_METHOD(NSButtonCell, setAlternateTitle, 0)
+
+ NEW_RECEIVER(NSSliderCell)
+ ADD_UNARY_METHOD(NSSliderCell, setTitle, 0)
+
+ NEW_RECEIVER(NSControl)
+ ADD_UNARY_METHOD(NSControl, setStringValue, 0)
+
+ NEW_RECEIVER(NSAccessibility)
+ ADD_UNARY_METHOD(NSAccessibility, setAccessibilityValueDescription, 0)
+ ADD_UNARY_METHOD(NSAccessibility, setAccessibilityLabel, 0)
+ ADD_UNARY_METHOD(NSAccessibility, setAccessibilityTitle, 0)
+ ADD_UNARY_METHOD(NSAccessibility, setAccessibilityPlaceholderValue, 0)
+ ADD_UNARY_METHOD(NSAccessibility, setAccessibilityHelp, 0)
+
+ NEW_RECEIVER(NSMatrix)
+ IdentifierInfo *setToolTipNSMatrix[] = {&Ctx.Idents.get("setToolTip"),
+ &Ctx.Idents.get("forCell")};
+ ADD_METHOD(NSMatrix, setToolTipNSMatrix, 2, 0)
+
+ NEW_RECEIVER(NSPrintPanel)
+ ADD_UNARY_METHOD(NSPrintPanel, setDefaultButtonTitle, 0)
+
+ NEW_RECEIVER(UILocalNotification)
+ ADD_UNARY_METHOD(UILocalNotification, setAlertBody, 0)
+ ADD_UNARY_METHOD(UILocalNotification, setAlertAction, 0)
+ ADD_UNARY_METHOD(UILocalNotification, setAlertTitle, 0)
+
+ NEW_RECEIVER(NSSlider)
+ ADD_UNARY_METHOD(NSSlider, setTitle, 0)
+
+ NEW_RECEIVER(UIMenuItem)
+ IdentifierInfo *initWithTitleUIMenuItem[] = {&Ctx.Idents.get("initWithTitle"),
+ &Ctx.Idents.get("action")};
+ ADD_METHOD(UIMenuItem, initWithTitleUIMenuItem, 2, 0)
+ ADD_UNARY_METHOD(UIMenuItem, setTitle, 0)
+
+ NEW_RECEIVER(UIAlertController)
+ IdentifierInfo *alertControllerWithTitleUIAlertController[] = {
+ &Ctx.Idents.get("alertControllerWithTitle"), &Ctx.Idents.get("message"),
+ &Ctx.Idents.get("preferredStyle")};
+ ADD_METHOD(UIAlertController, alertControllerWithTitleUIAlertController, 3, 1)
+ ADD_UNARY_METHOD(UIAlertController, setTitle, 0)
+ ADD_UNARY_METHOD(UIAlertController, setMessage, 0)
+
+ NEW_RECEIVER(UIApplicationShortcutItem)
+ IdentifierInfo *initWithTypeUIApplicationShortcutItemIcon[] = {
+ &Ctx.Idents.get("initWithType"), &Ctx.Idents.get("localizedTitle"),
+ &Ctx.Idents.get("localizedSubtitle"), &Ctx.Idents.get("icon"),
+ &Ctx.Idents.get("userInfo")};
+ ADD_METHOD(UIApplicationShortcutItem,
+ initWithTypeUIApplicationShortcutItemIcon, 5, 1)
+ IdentifierInfo *initWithTypeUIApplicationShortcutItem[] = {
+ &Ctx.Idents.get("initWithType"), &Ctx.Idents.get("localizedTitle")};
+ ADD_METHOD(UIApplicationShortcutItem, initWithTypeUIApplicationShortcutItem,
+ 2, 1)
+
+ NEW_RECEIVER(UIActionSheet)
+ IdentifierInfo *initWithTitleUIActionSheet[] = {
+ &Ctx.Idents.get("initWithTitle"), &Ctx.Idents.get("delegate"),
+ &Ctx.Idents.get("cancelButtonTitle"),
+ &Ctx.Idents.get("destructiveButtonTitle"),
+ &Ctx.Idents.get("otherButtonTitles")};
+ ADD_METHOD(UIActionSheet, initWithTitleUIActionSheet, 5, 0)
+ ADD_UNARY_METHOD(UIActionSheet, addButtonWithTitle, 0)
+ ADD_UNARY_METHOD(UIActionSheet, setTitle, 0)
+
+ NEW_RECEIVER(NSURLSessionTask)
+ ADD_UNARY_METHOD(NSURLSessionTask, setTaskDescription, 0)
+
+ NEW_RECEIVER(UIAccessibilityCustomAction)
+ IdentifierInfo *initWithNameUIAccessibilityCustomAction[] = {
+ &Ctx.Idents.get("initWithName"), &Ctx.Idents.get("target"),
+ &Ctx.Idents.get("selector")};
+ ADD_METHOD(UIAccessibilityCustomAction,
+ initWithNameUIAccessibilityCustomAction, 3, 0)
+ ADD_UNARY_METHOD(UIAccessibilityCustomAction, setName, 0)
+
+ NEW_RECEIVER(UISearchBar)
+ ADD_UNARY_METHOD(UISearchBar, setText, 0)
+ ADD_UNARY_METHOD(UISearchBar, setPrompt, 0)
+ ADD_UNARY_METHOD(UISearchBar, setPlaceholder, 0)
+
+ NEW_RECEIVER(UIBarItem)
+ ADD_UNARY_METHOD(UIBarItem, setTitle, 0)
+
+ NEW_RECEIVER(UITextView)
+ ADD_UNARY_METHOD(UITextView, setText, 0)
+
+ NEW_RECEIVER(NSView)
+ ADD_UNARY_METHOD(NSView, setToolTip, 0)
+
+ NEW_RECEIVER(NSTextField)
+ ADD_UNARY_METHOD(NSTextField, setPlaceholderString, 0)
+
+ NEW_RECEIVER(NSAttributedString)
+ ADD_UNARY_METHOD(NSAttributedString, initWithString, 0)
+ IdentifierInfo *initWithStringNSAttributedString[] = {
+ &Ctx.Idents.get("initWithString"), &Ctx.Idents.get("attributes")};
+ ADD_METHOD(NSAttributedString, initWithStringNSAttributedString, 2, 0)
+
+ NEW_RECEIVER(NSText)
+ ADD_UNARY_METHOD(NSText, setString, 0)
+
+ NEW_RECEIVER(UIKeyCommand)
+ IdentifierInfo *keyCommandWithInputUIKeyCommand[] = {
+ &Ctx.Idents.get("keyCommandWithInput"), &Ctx.Idents.get("modifierFlags"),
+ &Ctx.Idents.get("action"), &Ctx.Idents.get("discoverabilityTitle")};
+ ADD_METHOD(UIKeyCommand, keyCommandWithInputUIKeyCommand, 4, 3)
+ ADD_UNARY_METHOD(UIKeyCommand, setDiscoverabilityTitle, 0)
+
+ NEW_RECEIVER(UILabel)
+ ADD_UNARY_METHOD(UILabel, setText, 0)
+
+ NEW_RECEIVER(NSAlert)
+ IdentifierInfo *alertWithMessageTextNSAlert[] = {
+ &Ctx.Idents.get("alertWithMessageText"), &Ctx.Idents.get("defaultButton"),
+ &Ctx.Idents.get("alternateButton"), &Ctx.Idents.get("otherButton"),
+ &Ctx.Idents.get("informativeTextWithFormat")};
+ ADD_METHOD(NSAlert, alertWithMessageTextNSAlert, 5, 0)
+ ADD_UNARY_METHOD(NSAlert, addButtonWithTitle, 0)
+ ADD_UNARY_METHOD(NSAlert, setMessageText, 0)
+ ADD_UNARY_METHOD(NSAlert, setInformativeText, 0)
+ ADD_UNARY_METHOD(NSAlert, setHelpAnchor, 0)
+
+ NEW_RECEIVER(UIMutableApplicationShortcutItem)
+ ADD_UNARY_METHOD(UIMutableApplicationShortcutItem, setLocalizedTitle, 0)
+ ADD_UNARY_METHOD(UIMutableApplicationShortcutItem, setLocalizedSubtitle, 0)
+
+ NEW_RECEIVER(UIButton)
+ IdentifierInfo *setTitleUIButton[] = {&Ctx.Idents.get("setTitle"),
+ &Ctx.Idents.get("forState")};
+ ADD_METHOD(UIButton, setTitleUIButton, 2, 0)
+
+ NEW_RECEIVER(NSWindow)
+ ADD_UNARY_METHOD(NSWindow, setTitle, 0)
+ IdentifierInfo *minFrameWidthWithTitleNSWindow[] = {
+ &Ctx.Idents.get("minFrameWidthWithTitle"), &Ctx.Idents.get("styleMask")};
+ ADD_METHOD(NSWindow, minFrameWidthWithTitleNSWindow, 2, 0)
+ ADD_UNARY_METHOD(NSWindow, setMiniwindowTitle, 0)
+
+ NEW_RECEIVER(NSPathCell)
+ ADD_UNARY_METHOD(NSPathCell, setPlaceholderString, 0)
+
+ NEW_RECEIVER(UIDocumentMenuViewController)
+ IdentifierInfo *addOptionWithTitleUIDocumentMenuViewController[] = {
+ &Ctx.Idents.get("addOptionWithTitle"), &Ctx.Idents.get("image"),
+ &Ctx.Idents.get("order"), &Ctx.Idents.get("handler")};
+ ADD_METHOD(UIDocumentMenuViewController,
+ addOptionWithTitleUIDocumentMenuViewController, 4, 0)
+
+ NEW_RECEIVER(UINavigationItem)
+ ADD_UNARY_METHOD(UINavigationItem, initWithTitle, 0)
+ ADD_UNARY_METHOD(UINavigationItem, setTitle, 0)
+ ADD_UNARY_METHOD(UINavigationItem, setPrompt, 0)
+
+ NEW_RECEIVER(UIAlertView)
+ IdentifierInfo *initWithTitleUIAlertView[] = {
+ &Ctx.Idents.get("initWithTitle"), &Ctx.Idents.get("message"),
+ &Ctx.Idents.get("delegate"), &Ctx.Idents.get("cancelButtonTitle"),
+ &Ctx.Idents.get("otherButtonTitles")};
+ ADD_METHOD(UIAlertView, initWithTitleUIAlertView, 5, 0)
+ ADD_UNARY_METHOD(UIAlertView, addButtonWithTitle, 0)
+ ADD_UNARY_METHOD(UIAlertView, setTitle, 0)
+ ADD_UNARY_METHOD(UIAlertView, setMessage, 0)
+
+ NEW_RECEIVER(NSFormCell)
+ ADD_UNARY_METHOD(NSFormCell, initTextCell, 0)
+ ADD_UNARY_METHOD(NSFormCell, setTitle, 0)
+ ADD_UNARY_METHOD(NSFormCell, setPlaceholderString, 0)
+
+ NEW_RECEIVER(NSUserNotification)
+ ADD_UNARY_METHOD(NSUserNotification, setTitle, 0)
+ ADD_UNARY_METHOD(NSUserNotification, setSubtitle, 0)
+ ADD_UNARY_METHOD(NSUserNotification, setInformativeText, 0)
+ ADD_UNARY_METHOD(NSUserNotification, setActionButtonTitle, 0)
+ ADD_UNARY_METHOD(NSUserNotification, setOtherButtonTitle, 0)
+ ADD_UNARY_METHOD(NSUserNotification, setResponsePlaceholder, 0)
+
+ NEW_RECEIVER(NSToolbarItem)
+ ADD_UNARY_METHOD(NSToolbarItem, setLabel, 0)
+ ADD_UNARY_METHOD(NSToolbarItem, setPaletteLabel, 0)
+ ADD_UNARY_METHOD(NSToolbarItem, setToolTip, 0)
+
+ NEW_RECEIVER(NSProgress)
+ ADD_UNARY_METHOD(NSProgress, setLocalizedDescription, 0)
+ ADD_UNARY_METHOD(NSProgress, setLocalizedAdditionalDescription, 0)
+
+ NEW_RECEIVER(NSSegmentedCell)
+ IdentifierInfo *setLabelNSSegmentedCell[] = {&Ctx.Idents.get("setLabel"),
+ &Ctx.Idents.get("forSegment")};
+ ADD_METHOD(NSSegmentedCell, setLabelNSSegmentedCell, 2, 0)
+ IdentifierInfo *setToolTipNSSegmentedCell[] = {&Ctx.Idents.get("setToolTip"),
+ &Ctx.Idents.get("forSegment")};
+ ADD_METHOD(NSSegmentedCell, setToolTipNSSegmentedCell, 2, 0)
+
+ NEW_RECEIVER(NSUndoManager)
+ ADD_UNARY_METHOD(NSUndoManager, setActionName, 0)
+ ADD_UNARY_METHOD(NSUndoManager, undoMenuTitleForUndoActionName, 0)
+ ADD_UNARY_METHOD(NSUndoManager, redoMenuTitleForUndoActionName, 0)
+
+ NEW_RECEIVER(NSMenuItem)
+ IdentifierInfo *initWithTitleNSMenuItem[] = {
+ &Ctx.Idents.get("initWithTitle"), &Ctx.Idents.get("action"),
+ &Ctx.Idents.get("keyEquivalent")};
+ ADD_METHOD(NSMenuItem, initWithTitleNSMenuItem, 3, 0)
+ ADD_UNARY_METHOD(NSMenuItem, setTitle, 0)
+ ADD_UNARY_METHOD(NSMenuItem, setToolTip, 0)
+
+ NEW_RECEIVER(NSPopUpButtonCell)
+ IdentifierInfo *initTextCellNSPopUpButtonCell[] = {
+ &Ctx.Idents.get("initTextCell"), &Ctx.Idents.get("pullsDown")};
+ ADD_METHOD(NSPopUpButtonCell, initTextCellNSPopUpButtonCell, 2, 0)
+ ADD_UNARY_METHOD(NSPopUpButtonCell, addItemWithTitle, 0)
+ IdentifierInfo *insertItemWithTitleNSPopUpButtonCell[] = {
+ &Ctx.Idents.get("insertItemWithTitle"), &Ctx.Idents.get("atIndex")};
+ ADD_METHOD(NSPopUpButtonCell, insertItemWithTitleNSPopUpButtonCell, 2, 0)
+ ADD_UNARY_METHOD(NSPopUpButtonCell, removeItemWithTitle, 0)
+ ADD_UNARY_METHOD(NSPopUpButtonCell, selectItemWithTitle, 0)
+ ADD_UNARY_METHOD(NSPopUpButtonCell, setTitle, 0)
+
+ NEW_RECEIVER(NSViewController)
+ ADD_UNARY_METHOD(NSViewController, setTitle, 0)
+
+ NEW_RECEIVER(NSMenu)
+ ADD_UNARY_METHOD(NSMenu, initWithTitle, 0)
+ IdentifierInfo *insertItemWithTitleNSMenu[] = {
+ &Ctx.Idents.get("insertItemWithTitle"), &Ctx.Idents.get("action"),
+ &Ctx.Idents.get("keyEquivalent"), &Ctx.Idents.get("atIndex")};
+ ADD_METHOD(NSMenu, insertItemWithTitleNSMenu, 4, 0)
+ IdentifierInfo *addItemWithTitleNSMenu[] = {
+ &Ctx.Idents.get("addItemWithTitle"), &Ctx.Idents.get("action"),
+ &Ctx.Idents.get("keyEquivalent")};
+ ADD_METHOD(NSMenu, addItemWithTitleNSMenu, 3, 0)
+ ADD_UNARY_METHOD(NSMenu, setTitle, 0)
+
+ NEW_RECEIVER(UIMutableUserNotificationAction)
+ ADD_UNARY_METHOD(UIMutableUserNotificationAction, setTitle, 0)
+
+ NEW_RECEIVER(NSForm)
+ ADD_UNARY_METHOD(NSForm, addEntry, 0)
+ IdentifierInfo *insertEntryNSForm[] = {&Ctx.Idents.get("insertEntry"),
+ &Ctx.Idents.get("atIndex")};
+ ADD_METHOD(NSForm, insertEntryNSForm, 2, 0)
+
+ NEW_RECEIVER(NSTextFieldCell)
+ ADD_UNARY_METHOD(NSTextFieldCell, setPlaceholderString, 0)
+
+ NEW_RECEIVER(NSUserNotificationAction)
+ IdentifierInfo *actionWithIdentifierNSUserNotificationAction[] = {
+ &Ctx.Idents.get("actionWithIdentifier"), &Ctx.Idents.get("title")};
+ ADD_METHOD(NSUserNotificationAction,
+ actionWithIdentifierNSUserNotificationAction, 2, 1)
+
+ NEW_RECEIVER(NSURLSession)
+ ADD_UNARY_METHOD(NSURLSession, setSessionDescription, 0)
+
+ NEW_RECEIVER(UITextField)
+ ADD_UNARY_METHOD(UITextField, setText, 0)
+ ADD_UNARY_METHOD(UITextField, setPlaceholder, 0)
+
+ NEW_RECEIVER(UIBarButtonItem)
+ IdentifierInfo *initWithTitleUIBarButtonItem[] = {
+ &Ctx.Idents.get("initWithTitle"), &Ctx.Idents.get("style"),
+ &Ctx.Idents.get("target"), &Ctx.Idents.get("action")};
+ ADD_METHOD(UIBarButtonItem, initWithTitleUIBarButtonItem, 4, 0)
+
+ NEW_RECEIVER(UIViewController)
+ ADD_UNARY_METHOD(UIViewController, setTitle, 0)
+
+ NEW_RECEIVER(UISegmentedControl)
+ IdentifierInfo *insertSegmentWithTitleUISegmentedControl[] = {
+ &Ctx.Idents.get("insertSegmentWithTitle"), &Ctx.Idents.get("atIndex"),
+ &Ctx.Idents.get("animated")};
+ ADD_METHOD(UISegmentedControl, insertSegmentWithTitleUISegmentedControl, 3, 0)
+ IdentifierInfo *setTitleUISegmentedControl[] = {
+ &Ctx.Idents.get("setTitle"), &Ctx.Idents.get("forSegmentAtIndex")};
+ ADD_METHOD(UISegmentedControl, setTitleUISegmentedControl, 2, 0)
+}
+
+#define LSF_INSERT(function_name) LSF.insert(&Ctx.Idents.get(function_name));
+#define LSM_INSERT_NULLARY(receiver, method_name) \
+ LSM.insert({&Ctx.Idents.get(receiver), Ctx.Selectors.getNullarySelector( \
+ &Ctx.Idents.get(method_name))});
+#define LSM_INSERT_UNARY(receiver, method_name) \
+ LSM.insert({&Ctx.Idents.get(receiver), \
+ Ctx.Selectors.getUnarySelector(&Ctx.Idents.get(method_name))});
+#define LSM_INSERT_SELECTOR(receiver, method_list, arguments) \
+ LSM.insert({&Ctx.Idents.get(receiver), \
+ Ctx.Selectors.getSelector(arguments, method_list)});
+
+/// Initializes a list of methods and C functions that return a localized string
+void NonLocalizedStringChecker::initLocStringsMethods(ASTContext &Ctx) const {
+ if (!LSM.empty())
+ return;
+
+ IdentifierInfo *LocalizedStringMacro[] = {
+ &Ctx.Idents.get("localizedStringForKey"), &Ctx.Idents.get("value"),
+ &Ctx.Idents.get("table")};
+ LSM_INSERT_SELECTOR("NSBundle", LocalizedStringMacro, 3)
+ LSM_INSERT_UNARY("NSDateFormatter", "stringFromDate")
+ IdentifierInfo *LocalizedStringFromDate[] = {
+ &Ctx.Idents.get("localizedStringFromDate"), &Ctx.Idents.get("dateStyle"),
+ &Ctx.Idents.get("timeStyle")};
+ LSM_INSERT_SELECTOR("NSDateFormatter", LocalizedStringFromDate, 3)
+ LSM_INSERT_UNARY("NSNumberFormatter", "stringFromNumber")
+ LSM_INSERT_NULLARY("UITextField", "text")
+ LSM_INSERT_NULLARY("UITextView", "text")
+ LSM_INSERT_NULLARY("UILabel", "text")
+
+ LSF_INSERT("CFDateFormatterCreateStringWithDate");
+ LSF_INSERT("CFDateFormatterCreateStringWithAbsoluteTime");
+ LSF_INSERT("CFNumberFormatterCreateStringWithNumber");
+}
+
+/// Checks to see if the method / function declaration includes
+/// __attribute__((annotate("returns_localized_nsstring")))
+bool NonLocalizedStringChecker::isAnnotatedAsLocalized(const Decl *D) const {
+ if (!D)
+ return false;
+ return std::any_of(
+ D->specific_attr_begin<AnnotateAttr>(),
+ D->specific_attr_end<AnnotateAttr>(), [](const AnnotateAttr *Ann) {
+ return Ann->getAnnotation() == "returns_localized_nsstring";
+ });
+}
+
+/// Returns true if the given SVal is marked as Localized in the program state
+bool NonLocalizedStringChecker::hasLocalizedState(SVal S,
+ CheckerContext &C) const {
+ const MemRegion *mt = S.getAsRegion();
+ if (mt) {
+ const LocalizedState *LS = C.getState()->get<LocalizedMemMap>(mt);
+ if (LS && LS->isLocalized())
+ return true;
+ }
+ return false;
+}
+
+/// Returns true if the given SVal is marked as NonLocalized in the program
+/// state
+bool NonLocalizedStringChecker::hasNonLocalizedState(SVal S,
+ CheckerContext &C) const {
+ const MemRegion *mt = S.getAsRegion();
+ if (mt) {
+ const LocalizedState *LS = C.getState()->get<LocalizedMemMap>(mt);
+ if (LS && LS->isNonLocalized())
+ return true;
+ }
+ return false;
+}
+
+/// Marks the given SVal as Localized in the program state
+void NonLocalizedStringChecker::setLocalizedState(const SVal S,
+ CheckerContext &C) const {
+ const MemRegion *mt = S.getAsRegion();
+ if (mt) {
+ ProgramStateRef State =
+ C.getState()->set<LocalizedMemMap>(mt, LocalizedState::getLocalized());
+ C.addTransition(State);
+ }
+}
+
+/// Marks the given SVal as NonLocalized in the program state
+void NonLocalizedStringChecker::setNonLocalizedState(const SVal S,
+ CheckerContext &C) const {
+ const MemRegion *mt = S.getAsRegion();
+ if (mt) {
+ ProgramStateRef State = C.getState()->set<LocalizedMemMap>(
+ mt, LocalizedState::getNonLocalized());
+ C.addTransition(State);
+ }
+}
+
+/// Reports a localization error for the passed in method call and SVal
+void NonLocalizedStringChecker::reportLocalizationError(
+ SVal S, const ObjCMethodCall &M, CheckerContext &C,
+ int argumentNumber) const {
+
+ ExplodedNode *ErrNode = C.getPredecessor();
+ static CheckerProgramPointTag Tag("NonLocalizedStringChecker",
+ "UnlocalizedString");
+ ErrNode = C.addTransition(C.getState(), C.getPredecessor(), &Tag);
+
+ if (!ErrNode)
+ return;
+
+ // Generate the bug report.
+ std::unique_ptr<BugReport> R(new BugReport(
+ *BT, "User-facing text should use localized string macro", ErrNode));
+ if (argumentNumber) {
+ R->addRange(M.getArgExpr(argumentNumber - 1)->getSourceRange());
+ } else {
+ R->addRange(M.getSourceRange());
+ }
+ R->markInteresting(S);
+ C.emitReport(std::move(R));
+}
+
+/// Returns the argument number requiring localized string if it exists
+/// otherwise, returns -1
+int NonLocalizedStringChecker::getLocalizedArgumentForSelector(
+ const IdentifierInfo *Receiver, Selector S) const {
+ auto method = UIMethods.find(Receiver);
+
+ if (method == UIMethods.end())
+ return -1;
+
+ auto argumentIterator = method->getSecond().find(S);
+
+ if (argumentIterator == method->getSecond().end())
+ return -1;
+
+ int argumentNumber = argumentIterator->getSecond();
+ return argumentNumber;
+}
+
+/// Check if the string being passed in has NonLocalized state
+void NonLocalizedStringChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
+ CheckerContext &C) const {
+ initUIMethods(C.getASTContext());
+
+ const ObjCInterfaceDecl *OD = msg.getReceiverInterface();
+ if (!OD)
+ return;
+ const IdentifierInfo *odInfo = OD->getIdentifier();
+
+ Selector S = msg.getSelector();
+
+ std::string SelectorString = S.getAsString();
+ StringRef SelectorName = SelectorString;
+ assert(!SelectorName.empty());
+
+ if (odInfo->isStr("NSString")) {
+ // Handle the case where the receiver is an NSString
+ // These special NSString methods draw to the screen
+
+ if (!(SelectorName.startswith("drawAtPoint") ||
+ SelectorName.startswith("drawInRect") ||
+ SelectorName.startswith("drawWithRect")))
+ return;
+
+ SVal svTitle = msg.getReceiverSVal();
+
+ bool isNonLocalized = hasNonLocalizedState(svTitle, C);
+
+ if (isNonLocalized) {
+ reportLocalizationError(svTitle, msg, C);
+ }
+ }
+
+ int argumentNumber = getLocalizedArgumentForSelector(odInfo, S);
+ // Go up each hierarchy of superclasses and their protocols
+ while (argumentNumber < 0 && OD->getSuperClass() != nullptr) {
+ for (const auto *P : OD->all_referenced_protocols()) {
+ argumentNumber = getLocalizedArgumentForSelector(P->getIdentifier(), S);
+ if (argumentNumber >= 0)
+ break;
+ }
+ if (argumentNumber < 0) {
+ OD = OD->getSuperClass();
+ argumentNumber = getLocalizedArgumentForSelector(OD->getIdentifier(), S);
+ }
+ }
+
+ if (argumentNumber < 0) // There was no match in UIMethods
+ return;
+
+ SVal svTitle = msg.getArgSVal(argumentNumber);
+
+ if (const ObjCStringRegion *SR =
+ dyn_cast_or_null<ObjCStringRegion>(svTitle.getAsRegion())) {
+ StringRef stringValue =
+ SR->getObjCStringLiteral()->getString()->getString();
+ if ((stringValue.trim().size() == 0 && stringValue.size() > 0) ||
+ stringValue.empty())
+ return;
+ if (!IsAggressive && llvm::sys::unicode::columnWidthUTF8(stringValue) < 2)
+ return;
+ }
+
+ bool isNonLocalized = hasNonLocalizedState(svTitle, C);
+
+ if (isNonLocalized) {
+ reportLocalizationError(svTitle, msg, C, argumentNumber + 1);
+ }
+}
+
+static inline bool isNSStringType(QualType T, ASTContext &Ctx) {
+
+ const ObjCObjectPointerType *PT = T->getAs<ObjCObjectPointerType>();
+ if (!PT)
+ return false;
+
+ ObjCInterfaceDecl *Cls = PT->getObjectType()->getInterface();
+ if (!Cls)
+ return false;
+
+ IdentifierInfo *ClsName = Cls->getIdentifier();
+
+ // FIXME: Should we walk the chain of classes?
+ return ClsName == &Ctx.Idents.get("NSString") ||
+ ClsName == &Ctx.Idents.get("NSMutableString");
+}
+
+/// Marks a string being returned by any call as localized
+/// if it is in LocStringFunctions (LSF) or the function is annotated.
+/// Otherwise, we mark it as NonLocalized (Aggressive) or
+/// NonLocalized only if it is not backed by a SymRegion (Non-Aggressive),
+/// basically leaving only string literals as NonLocalized.
+void NonLocalizedStringChecker::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ initLocStringsMethods(C.getASTContext());
+
+ if (!Call.getOriginExpr())
+ return;
+
+ // Anything that takes in a localized NSString as an argument
+ // and returns an NSString will be assumed to be returning a
+ // localized NSString. (Counter: Incorrectly combining two LocalizedStrings)
+ const QualType RT = Call.getResultType();
+ if (isNSStringType(RT, C.getASTContext())) {
+ for (unsigned i = 0; i < Call.getNumArgs(); ++i) {
+ SVal argValue = Call.getArgSVal(i);
+ if (hasLocalizedState(argValue, C)) {
+ SVal sv = Call.getReturnValue();
+ setLocalizedState(sv, C);
+ return;
+ }
+ }
+ }
+
+ const Decl *D = Call.getDecl();
+ if (!D)
+ return;
+
+ const IdentifierInfo *Identifier = Call.getCalleeIdentifier();
+
+ SVal sv = Call.getReturnValue();
+ if (isAnnotatedAsLocalized(D) || LSF.count(Identifier) != 0) {
+ setLocalizedState(sv, C);
+ } else if (isNSStringType(RT, C.getASTContext()) &&
+ !hasLocalizedState(sv, C)) {
+ if (IsAggressive) {
+ setNonLocalizedState(sv, C);
+ } else {
+ const SymbolicRegion *SymReg =
+ dyn_cast_or_null<SymbolicRegion>(sv.getAsRegion());
+ if (!SymReg)
+ setNonLocalizedState(sv, C);
+ }
+ }
+}
+
+/// Marks a string being returned by an ObjC method as localized
+/// if it is in LocStringMethods or the method is annotated
+void NonLocalizedStringChecker::checkPostObjCMessage(const ObjCMethodCall &msg,
+ CheckerContext &C) const {
+ initLocStringsMethods(C.getASTContext());
+
+ if (!msg.isInstanceMessage())
+ return;
+
+ const ObjCInterfaceDecl *OD = msg.getReceiverInterface();
+ if (!OD)
+ return;
+ const IdentifierInfo *odInfo = OD->getIdentifier();
+
+ Selector S = msg.getSelector();
+ std::string SelectorName = S.getAsString();
+
+ std::pair<const IdentifierInfo *, Selector> MethodDescription = {odInfo, S};
+
+ if (LSM.count(MethodDescription) || isAnnotatedAsLocalized(msg.getDecl())) {
+ SVal sv = msg.getReturnValue();
+ setLocalizedState(sv, C);
+ }
+}
+
+/// Marks all empty string literals as localized
+void NonLocalizedStringChecker::checkPostStmt(const ObjCStringLiteral *SL,
+ CheckerContext &C) const {
+ SVal sv = C.getSVal(SL);
+ setNonLocalizedState(sv, C);
+}
+
+namespace {
+class EmptyLocalizationContextChecker
+ : public Checker<check::ASTDecl<ObjCImplementationDecl>> {
+
+ // A helper class, which walks the AST
+ class MethodCrawler : public ConstStmtVisitor<MethodCrawler> {
+ const ObjCMethodDecl *MD;
+ BugReporter &BR;
+ AnalysisManager &Mgr;
+ const CheckerBase *Checker;
+ LocationOrAnalysisDeclContext DCtx;
+
+ public:
+ MethodCrawler(const ObjCMethodDecl *InMD, BugReporter &InBR,
+ const CheckerBase *Checker, AnalysisManager &InMgr,
+ AnalysisDeclContext *InDCtx)
+ : MD(InMD), BR(InBR), Mgr(InMgr), Checker(Checker), DCtx(InDCtx) {}
+
+ void VisitStmt(const Stmt *S) { VisitChildren(S); }
+
+ void VisitObjCMessageExpr(const ObjCMessageExpr *ME);
+
+ void reportEmptyContextError(const ObjCMessageExpr *M) const;
+
+ void VisitChildren(const Stmt *S) {
+ for (const Stmt *Child : S->children()) {
+ if (Child)
+ this->Visit(Child);
+ }
+ }
+ };
+
+public:
+ void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager &Mgr,
+ BugReporter &BR) const;
+};
+} // end anonymous namespace
+
+void EmptyLocalizationContextChecker::checkASTDecl(
+ const ObjCImplementationDecl *D, AnalysisManager &Mgr,
+ BugReporter &BR) const {
+
+ for (const ObjCMethodDecl *M : D->methods()) {
+ AnalysisDeclContext *DCtx = Mgr.getAnalysisDeclContext(M);
+
+ const Stmt *Body = M->getBody();
+ assert(Body);
+
+ MethodCrawler MC(M->getCanonicalDecl(), BR, this, Mgr, DCtx);
+ MC.VisitStmt(Body);
+ }
+}
+
+/// This check attempts to match these macros, assuming they are defined as
+/// follows:
+///
+/// #define NSLocalizedString(key, comment) \
+/// [[NSBundle mainBundle] localizedStringForKey:(key) value:@"" table:nil]
+/// #define NSLocalizedStringFromTable(key, tbl, comment) \
+/// [[NSBundle mainBundle] localizedStringForKey:(key) value:@"" table:(tbl)]
+/// #define NSLocalizedStringFromTableInBundle(key, tbl, bundle, comment) \
+/// [bundle localizedStringForKey:(key) value:@"" table:(tbl)]
+/// #define NSLocalizedStringWithDefaultValue(key, tbl, bundle, val, comment)
+///
+/// We cannot use the path sensitive check because the macro argument we are
+/// checking for (comment) is not used and thus not present in the AST,
+/// so we use Lexer on the original macro call and retrieve the value of
+/// the comment. If it's empty or nil, we raise a warning.
+void EmptyLocalizationContextChecker::MethodCrawler::VisitObjCMessageExpr(
+ const ObjCMessageExpr *ME) {
+
+ const ObjCInterfaceDecl *OD = ME->getReceiverInterface();
+ if (!OD)
+ return;
+
+ const IdentifierInfo *odInfo = OD->getIdentifier();
+
+ if (!(odInfo->isStr("NSBundle") &&
+ ME->getSelector().getAsString() ==
+ "localizedStringForKey:value:table:")) {
+ return;
+ }
+
+ SourceRange R = ME->getSourceRange();
+ if (!R.getBegin().isMacroID())
+ return;
+
+ // getImmediateMacroCallerLoc gets the location of the immediate macro
+ // caller, one level up the stack toward the initial macro typed into the
+ // source, so SL should point to the NSLocalizedString macro.
+ SourceLocation SL =
+ Mgr.getSourceManager().getImmediateMacroCallerLoc(R.getBegin());
+ std::pair<FileID, unsigned> SLInfo =
+ Mgr.getSourceManager().getDecomposedLoc(SL);
+
+ SrcMgr::SLocEntry SE = Mgr.getSourceManager().getSLocEntry(SLInfo.first);
+
+ // If NSLocalizedString macro is wrapped in another macro, we need to
+ // unwrap the expansion until we get to the NSLocalizedStringMacro.
+ while (SE.isExpansion()) {
+ SL = SE.getExpansion().getSpellingLoc();
+ SLInfo = Mgr.getSourceManager().getDecomposedLoc(SL);
+ SE = Mgr.getSourceManager().getSLocEntry(SLInfo.first);
+ }
+
+ llvm::MemoryBuffer *BF = SE.getFile().getContentCache()->getRawBuffer();
+ Lexer TheLexer(SL, LangOptions(), BF->getBufferStart(),
+ BF->getBufferStart() + SLInfo.second, BF->getBufferEnd());
+
+ Token I;
+ Token Result; // This will hold the token just before the last ')'
+ int p_count = 0; // This is for parenthesis matching
+ while (!TheLexer.LexFromRawLexer(I)) {
+ if (I.getKind() == tok::l_paren)
+ ++p_count;
+ if (I.getKind() == tok::r_paren) {
+ if (p_count == 1)
+ break;
+ --p_count;
+ }
+ Result = I;
+ }
+
+ if (isAnyIdentifier(Result.getKind())) {
+ if (Result.getRawIdentifier().equals("nil")) {
+ reportEmptyContextError(ME);
+ return;
+ }
+ }
+
+ if (!isStringLiteral(Result.getKind()))
+ return;
+
+ StringRef Comment =
+ StringRef(Result.getLiteralData(), Result.getLength()).trim("\"");
+
+ if ((Comment.trim().size() == 0 && Comment.size() > 0) || // Is Whitespace
+ Comment.empty()) {
+ reportEmptyContextError(ME);
+ }
+}
+
+void EmptyLocalizationContextChecker::MethodCrawler::reportEmptyContextError(
+ const ObjCMessageExpr *ME) const {
+ // Generate the bug report.
+ BR.EmitBasicReport(MD, Checker, "Context Missing",
+ "Localizability Issue (Apple)",
+ "Localized string macro should include a non-empty "
+ "comment for translators",
+ PathDiagnosticLocation(ME, BR.getSourceManager(), DCtx));
+}
+
+namespace {
+class PluralMisuseChecker : public Checker<check::ASTCodeBody> {
+
+ // A helper class, which walks the AST
+ class MethodCrawler : public RecursiveASTVisitor<MethodCrawler> {
+ BugReporter &BR;
+ const CheckerBase *Checker;
+ AnalysisDeclContext *AC;
+
+ // This functions like a stack. We push on any IfStmt or
+ // ConditionalOperator that matches the condition
+ // and pop it off when we leave that statement
+ llvm::SmallVector<const clang::Stmt *, 8> MatchingStatements;
+ // This is true when we are the direct-child of a
+ // matching statement
+ bool InMatchingStatement = false;
+
+ public:
+ explicit MethodCrawler(BugReporter &InBR, const CheckerBase *Checker,
+ AnalysisDeclContext *InAC)
+ : BR(InBR), Checker(Checker), AC(InAC) {}
+
+ bool VisitIfStmt(const IfStmt *I);
+ bool EndVisitIfStmt(IfStmt *I);
+ bool TraverseIfStmt(IfStmt *x);
+ bool VisitConditionalOperator(const ConditionalOperator *C);
+ bool TraverseConditionalOperator(ConditionalOperator *C);
+ bool VisitCallExpr(const CallExpr *CE);
+ bool VisitObjCMessageExpr(const ObjCMessageExpr *ME);
+
+ private:
+ void reportPluralMisuseError(const Stmt *S) const;
+ bool isCheckingPlurality(const Expr *E) const;
+ };
+
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager &Mgr,
+ BugReporter &BR) const {
+ MethodCrawler Visitor(BR, this, Mgr.getAnalysisDeclContext(D));
+ Visitor.TraverseDecl(const_cast<Decl *>(D));
+ }
+};
+} // end anonymous namespace
+
+// Checks the condition of the IfStmt and returns true if one
+// of the following heuristics are met:
+// 1) The conidtion is a variable with "singular" or "plural" in the name
+// 2) The condition is a binary operator with 1 or 2 on the right-hand side
+bool PluralMisuseChecker::MethodCrawler::isCheckingPlurality(
+ const Expr *Condition) const {
+ const BinaryOperator *BO = nullptr;
+ // Accounts for when a VarDecl represents a BinaryOperator
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Condition)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ const Expr *InitExpr = VD->getInit();
+ if (InitExpr) {
+ if (const BinaryOperator *B =
+ dyn_cast<BinaryOperator>(InitExpr->IgnoreParenImpCasts())) {
+ BO = B;
+ }
+ }
+ if (VD->getName().lower().find("plural") != StringRef::npos ||
+ VD->getName().lower().find("singular") != StringRef::npos) {
+ return true;
+ }
+ }
+ } else if (const BinaryOperator *B = dyn_cast<BinaryOperator>(Condition)) {
+ BO = B;
+ }
+
+ if (BO == nullptr)
+ return false;
+
+ if (IntegerLiteral *IL = dyn_cast_or_null<IntegerLiteral>(
+ BO->getRHS()->IgnoreParenImpCasts())) {
+ llvm::APInt Value = IL->getValue();
+ if (Value == 1 || Value == 2) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// A CallExpr with "LOC" in its identifier that takes in a string literal
+// has been shown to almost always be a function that returns a localized
+// string. Raise a diagnostic when this is in a statement that matches
+// the condition.
+bool PluralMisuseChecker::MethodCrawler::VisitCallExpr(const CallExpr *CE) {
+ if (InMatchingStatement) {
+ if (const FunctionDecl *FD = CE->getDirectCallee()) {
+ std::string NormalizedName =
+ StringRef(FD->getNameInfo().getAsString()).lower();
+ if (NormalizedName.find("loc") != std::string::npos) {
+ for (const Expr *Arg : CE->arguments()) {
+ if (isa<ObjCStringLiteral>(Arg))
+ reportPluralMisuseError(CE);
+ }
+ }
+ }
+ }
+ return true;
+}
+
+// The other case is for NSLocalizedString which also returns
+// a localized string. It's a macro for the ObjCMessageExpr
+// [NSBundle localizedStringForKey:value:table:] Raise a
+// diagnostic when this is in a statement that matches
+// the condition.
+bool PluralMisuseChecker::MethodCrawler::VisitObjCMessageExpr(
+ const ObjCMessageExpr *ME) {
+ const ObjCInterfaceDecl *OD = ME->getReceiverInterface();
+ if (!OD)
+ return true;
+
+ const IdentifierInfo *odInfo = OD->getIdentifier();
+
+ if (odInfo->isStr("NSBundle") &&
+ ME->getSelector().getAsString() == "localizedStringForKey:value:table:") {
+ if (InMatchingStatement) {
+ reportPluralMisuseError(ME);
+ }
+ }
+ return true;
+}
+
+/// Override TraverseIfStmt so we know when we are done traversing an IfStmt
+bool PluralMisuseChecker::MethodCrawler::TraverseIfStmt(IfStmt *I) {
+ RecursiveASTVisitor<MethodCrawler>::TraverseIfStmt(I);
+ return EndVisitIfStmt(I);
+}
+
+// EndVisit callbacks are not provided by the RecursiveASTVisitor
+// so we override TraverseIfStmt and make a call to EndVisitIfStmt
+// after traversing the IfStmt
+bool PluralMisuseChecker::MethodCrawler::EndVisitIfStmt(IfStmt *I) {
+ MatchingStatements.pop_back();
+ if (!MatchingStatements.empty()) {
+ if (MatchingStatements.back() != nullptr) {
+ InMatchingStatement = true;
+ return true;
+ }
+ }
+ InMatchingStatement = false;
+ return true;
+}
+
+bool PluralMisuseChecker::MethodCrawler::VisitIfStmt(const IfStmt *I) {
+ const Expr *Condition = I->getCond()->IgnoreParenImpCasts();
+ if (isCheckingPlurality(Condition)) {
+ MatchingStatements.push_back(I);
+ InMatchingStatement = true;
+ } else {
+ MatchingStatements.push_back(nullptr);
+ InMatchingStatement = false;
+ }
+
+ return true;
+}
+
+// Preliminary support for conditional operators.
+bool PluralMisuseChecker::MethodCrawler::TraverseConditionalOperator(
+ ConditionalOperator *C) {
+ RecursiveASTVisitor<MethodCrawler>::TraverseConditionalOperator(C);
+ MatchingStatements.pop_back();
+ if (!MatchingStatements.empty()) {
+ if (MatchingStatements.back() != nullptr)
+ InMatchingStatement = true;
+ else
+ InMatchingStatement = false;
+ } else {
+ InMatchingStatement = false;
+ }
+ return true;
+}
+
+bool PluralMisuseChecker::MethodCrawler::VisitConditionalOperator(
+ const ConditionalOperator *C) {
+ const Expr *Condition = C->getCond()->IgnoreParenImpCasts();
+ if (isCheckingPlurality(Condition)) {
+ MatchingStatements.push_back(C);
+ InMatchingStatement = true;
+ } else {
+ MatchingStatements.push_back(nullptr);
+ InMatchingStatement = false;
+ }
+ return true;
+}
+
+void PluralMisuseChecker::MethodCrawler::reportPluralMisuseError(
+ const Stmt *S) const {
+ // Generate the bug report.
+ BR.EmitBasicReport(AC->getDecl(), Checker, "Plural Misuse",
+ "Localizability Issue (Apple)",
+ "Plural cases are not supported accross all languages. "
+ "Use a .stringsdict file instead",
+ PathDiagnosticLocation(S, BR.getSourceManager(), AC));
+}
+
+//===----------------------------------------------------------------------===//
+// Checker registration.
+//===----------------------------------------------------------------------===//
+
+void ento::registerNonLocalizedStringChecker(CheckerManager &mgr) {
+ NonLocalizedStringChecker *checker =
+ mgr.registerChecker<NonLocalizedStringChecker>();
+ checker->IsAggressive =
+ mgr.getAnalyzerOptions().getBooleanOption("AggressiveReport", false);
+}
+
+void ento::registerEmptyLocalizationContextChecker(CheckerManager &mgr) {
+ mgr.registerChecker<EmptyLocalizationContextChecker>();
+}
+
+void ento::registerPluralMisuseChecker(CheckerManager &mgr) {
+ mgr.registerChecker<PluralMisuseChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
new file mode 100644
index 0000000..1e56d70
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
@@ -0,0 +1,623 @@
+//==--- MacOSKeychainAPIChecker.cpp ------------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This checker flags misuses of KeyChainAPI. In particular, the password data
+// allocated/returned by SecKeychainItemCopyContent,
+// SecKeychainFindGenericPassword, SecKeychainFindInternetPassword functions has
+// to be freed using a call to SecKeychainItemFreeContent.
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class MacOSKeychainAPIChecker : public Checker<check::PreStmt<CallExpr>,
+ check::PostStmt<CallExpr>,
+ check::DeadSymbols> {
+ mutable std::unique_ptr<BugType> BT;
+
+public:
+ /// AllocationState is a part of the checker specific state together with the
+ /// MemRegion corresponding to the allocated data.
+ struct AllocationState {
+ /// The index of the allocator function.
+ unsigned int AllocatorIdx;
+ SymbolRef Region;
+
+ AllocationState(const Expr *E, unsigned int Idx, SymbolRef R) :
+ AllocatorIdx(Idx),
+ Region(R) {}
+
+ bool operator==(const AllocationState &X) const {
+ return (AllocatorIdx == X.AllocatorIdx &&
+ Region == X.Region);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(AllocatorIdx);
+ ID.AddPointer(Region);
+ }
+ };
+
+ void checkPreStmt(const CallExpr *S, CheckerContext &C) const;
+ void checkPostStmt(const CallExpr *S, CheckerContext &C) const;
+ void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
+
+private:
+ typedef std::pair<SymbolRef, const AllocationState*> AllocationPair;
+ typedef SmallVector<AllocationPair, 2> AllocationPairVec;
+
+ enum APIKind {
+ /// Denotes functions tracked by this checker.
+ ValidAPI = 0,
+ /// The functions commonly/mistakenly used in place of the given API.
+ ErrorAPI = 1,
+ /// The functions which may allocate the data. These are tracked to reduce
+ /// the false alarm rate.
+ PossibleAPI = 2
+ };
+ /// Stores the information about the allocator and deallocator functions -
+ /// these are the functions the checker is tracking.
+ struct ADFunctionInfo {
+ const char* Name;
+ unsigned int Param;
+ unsigned int DeallocatorIdx;
+ APIKind Kind;
+ };
+ static const unsigned InvalidIdx = 100000;
+ static const unsigned FunctionsToTrackSize = 8;
+ static const ADFunctionInfo FunctionsToTrack[FunctionsToTrackSize];
+ /// The value, which represents no error return value for allocator functions.
+ static const unsigned NoErr = 0;
+
+ /// Given the function name, returns the index of the allocator/deallocator
+ /// function.
+ static unsigned getTrackedFunctionIndex(StringRef Name, bool IsAllocator);
+
+ inline void initBugType() const {
+ if (!BT)
+ BT.reset(new BugType(this, "Improper use of SecKeychain API",
+ "API Misuse (Apple)"));
+ }
+
+ void generateDeallocatorMismatchReport(const AllocationPair &AP,
+ const Expr *ArgExpr,
+ CheckerContext &C) const;
+
+ /// Find the allocation site for Sym on the path leading to the node N.
+ const ExplodedNode *getAllocationNode(const ExplodedNode *N, SymbolRef Sym,
+ CheckerContext &C) const;
+
+ std::unique_ptr<BugReport> generateAllocatedDataNotReleasedReport(
+ const AllocationPair &AP, ExplodedNode *N, CheckerContext &C) const;
+
+ /// Check if RetSym evaluates to an error value in the current state.
+ bool definitelyReturnedError(SymbolRef RetSym,
+ ProgramStateRef State,
+ SValBuilder &Builder,
+ bool noError = false) const;
+
+ /// Check if RetSym evaluates to a NoErr value in the current state.
+ bool definitelyDidnotReturnError(SymbolRef RetSym,
+ ProgramStateRef State,
+ SValBuilder &Builder) const {
+ return definitelyReturnedError(RetSym, State, Builder, true);
+ }
+
+ /// Mark an AllocationPair interesting for diagnostic reporting.
+ void markInteresting(BugReport *R, const AllocationPair &AP) const {
+ R->markInteresting(AP.first);
+ R->markInteresting(AP.second->Region);
+ }
+
+ /// The bug visitor which allows us to print extra diagnostics along the
+ /// BugReport path. For example, showing the allocation site of the leaked
+ /// region.
+ class SecKeychainBugVisitor
+ : public BugReporterVisitorImpl<SecKeychainBugVisitor> {
+ protected:
+ // The allocated region symbol tracked by the main analysis.
+ SymbolRef Sym;
+
+ public:
+ SecKeychainBugVisitor(SymbolRef S) : Sym(S) {}
+
+ void Profile(llvm::FoldingSetNodeID &ID) const override {
+ static int X = 0;
+ ID.AddPointer(&X);
+ ID.AddPointer(Sym);
+ }
+
+ PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) override;
+ };
+};
+}
+
+/// ProgramState traits to store the currently allocated (and not yet freed)
+/// symbols. This is a map from the allocated content symbol to the
+/// corresponding AllocationState.
+REGISTER_MAP_WITH_PROGRAMSTATE(AllocatedData,
+ SymbolRef,
+ MacOSKeychainAPIChecker::AllocationState)
+
+static bool isEnclosingFunctionParam(const Expr *E) {
+ E = E->IgnoreParenCasts();
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ const ValueDecl *VD = DRE->getDecl();
+ if (isa<ImplicitParamDecl>(VD) || isa<ParmVarDecl>(VD))
+ return true;
+ }
+ return false;
+}
+
+const MacOSKeychainAPIChecker::ADFunctionInfo
+ MacOSKeychainAPIChecker::FunctionsToTrack[FunctionsToTrackSize] = {
+ {"SecKeychainItemCopyContent", 4, 3, ValidAPI}, // 0
+ {"SecKeychainFindGenericPassword", 6, 3, ValidAPI}, // 1
+ {"SecKeychainFindInternetPassword", 13, 3, ValidAPI}, // 2
+ {"SecKeychainItemFreeContent", 1, InvalidIdx, ValidAPI}, // 3
+ {"SecKeychainItemCopyAttributesAndData", 5, 5, ValidAPI}, // 4
+ {"SecKeychainItemFreeAttributesAndData", 1, InvalidIdx, ValidAPI}, // 5
+ {"free", 0, InvalidIdx, ErrorAPI}, // 6
+ {"CFStringCreateWithBytesNoCopy", 1, InvalidIdx, PossibleAPI}, // 7
+};
+
+unsigned MacOSKeychainAPIChecker::getTrackedFunctionIndex(StringRef Name,
+ bool IsAllocator) {
+ for (unsigned I = 0; I < FunctionsToTrackSize; ++I) {
+ ADFunctionInfo FI = FunctionsToTrack[I];
+ if (FI.Name != Name)
+ continue;
+ // Make sure the function is of the right type (allocator vs deallocator).
+ if (IsAllocator && (FI.DeallocatorIdx == InvalidIdx))
+ return InvalidIdx;
+ if (!IsAllocator && (FI.DeallocatorIdx != InvalidIdx))
+ return InvalidIdx;
+
+ return I;
+ }
+ // The function is not tracked.
+ return InvalidIdx;
+}
+
+static bool isBadDeallocationArgument(const MemRegion *Arg) {
+ if (!Arg)
+ return false;
+ return isa<AllocaRegion>(Arg) || isa<BlockDataRegion>(Arg) ||
+ isa<TypedRegion>(Arg);
+}
+
+/// Given the address expression, retrieve the value it's pointing to. Assume
+/// that value is itself an address, and return the corresponding symbol.
+static SymbolRef getAsPointeeSymbol(const Expr *Expr,
+ CheckerContext &C) {
+ ProgramStateRef State = C.getState();
+ SVal ArgV = State->getSVal(Expr, C.getLocationContext());
+
+ if (Optional<loc::MemRegionVal> X = ArgV.getAs<loc::MemRegionVal>()) {
+ StoreManager& SM = C.getStoreManager();
+ SymbolRef sym = SM.getBinding(State->getStore(), *X).getAsLocSymbol();
+ if (sym)
+ return sym;
+ }
+ return nullptr;
+}
+
+// When checking for error code, we need to consider the following cases:
+// 1) noErr / [0]
+// 2) someErr / [1, inf]
+// 3) unknown
+// If noError, returns true iff (1).
+// If !noError, returns true iff (2).
+bool MacOSKeychainAPIChecker::definitelyReturnedError(SymbolRef RetSym,
+ ProgramStateRef State,
+ SValBuilder &Builder,
+ bool noError) const {
+ DefinedOrUnknownSVal NoErrVal = Builder.makeIntVal(NoErr,
+ Builder.getSymbolManager().getType(RetSym));
+ DefinedOrUnknownSVal NoErr = Builder.evalEQ(State, NoErrVal,
+ nonloc::SymbolVal(RetSym));
+ ProgramStateRef ErrState = State->assume(NoErr, noError);
+ return ErrState == State;
+}
+
+// Report deallocator mismatch. Remove the region from tracking - reporting a
+// missing free error after this one is redundant.
+void MacOSKeychainAPIChecker::
+ generateDeallocatorMismatchReport(const AllocationPair &AP,
+ const Expr *ArgExpr,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ State = State->remove<AllocatedData>(AP.first);
+ ExplodedNode *N = C.generateNonFatalErrorNode(State);
+
+ if (!N)
+ return;
+ initBugType();
+ SmallString<80> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+ unsigned int PDeallocIdx =
+ FunctionsToTrack[AP.second->AllocatorIdx].DeallocatorIdx;
+
+ os << "Deallocator doesn't match the allocator: '"
+ << FunctionsToTrack[PDeallocIdx].Name << "' should be used.";
+ auto Report = llvm::make_unique<BugReport>(*BT, os.str(), N);
+ Report->addVisitor(llvm::make_unique<SecKeychainBugVisitor>(AP.first));
+ Report->addRange(ArgExpr->getSourceRange());
+ markInteresting(Report.get(), AP);
+ C.emitReport(std::move(Report));
+}
+
+void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ unsigned idx = InvalidIdx;
+ ProgramStateRef State = C.getState();
+
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ if (!FD || FD->getKind() != Decl::Function)
+ return;
+
+ StringRef funName = C.getCalleeName(FD);
+ if (funName.empty())
+ return;
+
+ // If it is a call to an allocator function, it could be a double allocation.
+ idx = getTrackedFunctionIndex(funName, true);
+ if (idx != InvalidIdx) {
+ unsigned paramIdx = FunctionsToTrack[idx].Param;
+ if (CE->getNumArgs() <= paramIdx)
+ return;
+
+ const Expr *ArgExpr = CE->getArg(paramIdx);
+ if (SymbolRef V = getAsPointeeSymbol(ArgExpr, C))
+ if (const AllocationState *AS = State->get<AllocatedData>(V)) {
+ if (!definitelyReturnedError(AS->Region, State, C.getSValBuilder())) {
+ // Remove the value from the state. The new symbol will be added for
+ // tracking when the second allocator is processed in checkPostStmt().
+ State = State->remove<AllocatedData>(V);
+ ExplodedNode *N = C.generateNonFatalErrorNode(State);
+ if (!N)
+ return;
+ initBugType();
+ SmallString<128> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+ unsigned int DIdx = FunctionsToTrack[AS->AllocatorIdx].DeallocatorIdx;
+ os << "Allocated data should be released before another call to "
+ << "the allocator: missing a call to '"
+ << FunctionsToTrack[DIdx].Name
+ << "'.";
+ auto Report = llvm::make_unique<BugReport>(*BT, os.str(), N);
+ Report->addVisitor(llvm::make_unique<SecKeychainBugVisitor>(V));
+ Report->addRange(ArgExpr->getSourceRange());
+ Report->markInteresting(AS->Region);
+ C.emitReport(std::move(Report));
+ }
+ }
+ return;
+ }
+
+ // Is it a call to one of deallocator functions?
+ idx = getTrackedFunctionIndex(funName, false);
+ if (idx == InvalidIdx)
+ return;
+
+ unsigned paramIdx = FunctionsToTrack[idx].Param;
+ if (CE->getNumArgs() <= paramIdx)
+ return;
+
+ // Check the argument to the deallocator.
+ const Expr *ArgExpr = CE->getArg(paramIdx);
+ SVal ArgSVal = State->getSVal(ArgExpr, C.getLocationContext());
+
+ // Undef is reported by another checker.
+ if (ArgSVal.isUndef())
+ return;
+
+ SymbolRef ArgSM = ArgSVal.getAsLocSymbol();
+
+ // If the argument is coming from the heap, globals, or unknown, do not
+ // report it.
+ bool RegionArgIsBad = false;
+ if (!ArgSM) {
+ if (!isBadDeallocationArgument(ArgSVal.getAsRegion()))
+ return;
+ RegionArgIsBad = true;
+ }
+
+ // Is the argument to the call being tracked?
+ const AllocationState *AS = State->get<AllocatedData>(ArgSM);
+ if (!AS && FunctionsToTrack[idx].Kind != ValidAPI) {
+ return;
+ }
+ // If trying to free data which has not been allocated yet, report as a bug.
+ // TODO: We might want a more precise diagnostic for double free
+ // (that would involve tracking all the freed symbols in the checker state).
+ if (!AS || RegionArgIsBad) {
+ // It is possible that this is a false positive - the argument might
+ // have entered as an enclosing function parameter.
+ if (isEnclosingFunctionParam(ArgExpr))
+ return;
+
+ ExplodedNode *N = C.generateNonFatalErrorNode(State);
+ if (!N)
+ return;
+ initBugType();
+ auto Report = llvm::make_unique<BugReport>(
+ *BT, "Trying to free data which has not been allocated.", N);
+ Report->addRange(ArgExpr->getSourceRange());
+ if (AS)
+ Report->markInteresting(AS->Region);
+ C.emitReport(std::move(Report));
+ return;
+ }
+
+ // Process functions which might deallocate.
+ if (FunctionsToTrack[idx].Kind == PossibleAPI) {
+
+ if (funName == "CFStringCreateWithBytesNoCopy") {
+ const Expr *DeallocatorExpr = CE->getArg(5)->IgnoreParenCasts();
+ // NULL ~ default deallocator, so warn.
+ if (DeallocatorExpr->isNullPointerConstant(C.getASTContext(),
+ Expr::NPC_ValueDependentIsNotNull)) {
+ const AllocationPair AP = std::make_pair(ArgSM, AS);
+ generateDeallocatorMismatchReport(AP, ArgExpr, C);
+ return;
+ }
+ // One of the default allocators, so warn.
+ if (const DeclRefExpr *DE = dyn_cast<DeclRefExpr>(DeallocatorExpr)) {
+ StringRef DeallocatorName = DE->getFoundDecl()->getName();
+ if (DeallocatorName == "kCFAllocatorDefault" ||
+ DeallocatorName == "kCFAllocatorSystemDefault" ||
+ DeallocatorName == "kCFAllocatorMalloc") {
+ const AllocationPair AP = std::make_pair(ArgSM, AS);
+ generateDeallocatorMismatchReport(AP, ArgExpr, C);
+ return;
+ }
+ // If kCFAllocatorNull, which does not deallocate, we still have to
+ // find the deallocator.
+ if (DE->getFoundDecl()->getName() == "kCFAllocatorNull")
+ return;
+ }
+ // In all other cases, assume the user supplied a correct deallocator
+ // that will free memory so stop tracking.
+ State = State->remove<AllocatedData>(ArgSM);
+ C.addTransition(State);
+ return;
+ }
+
+ llvm_unreachable("We know of no other possible APIs.");
+ }
+
+ // The call is deallocating a value we previously allocated, so remove it
+ // from the next state.
+ State = State->remove<AllocatedData>(ArgSM);
+
+ // Check if the proper deallocator is used.
+ unsigned int PDeallocIdx = FunctionsToTrack[AS->AllocatorIdx].DeallocatorIdx;
+ if (PDeallocIdx != idx || (FunctionsToTrack[idx].Kind == ErrorAPI)) {
+ const AllocationPair AP = std::make_pair(ArgSM, AS);
+ generateDeallocatorMismatchReport(AP, ArgExpr, C);
+ return;
+ }
+
+ // If the buffer can be null and the return status can be an error,
+ // report a bad call to free.
+ if (State->assume(ArgSVal.castAs<DefinedSVal>(), false) &&
+ !definitelyDidnotReturnError(AS->Region, State, C.getSValBuilder())) {
+ ExplodedNode *N = C.generateNonFatalErrorNode(State);
+ if (!N)
+ return;
+ initBugType();
+ auto Report = llvm::make_unique<BugReport>(
+ *BT, "Only call free if a valid (non-NULL) buffer was returned.", N);
+ Report->addVisitor(llvm::make_unique<SecKeychainBugVisitor>(ArgSM));
+ Report->addRange(ArgExpr->getSourceRange());
+ Report->markInteresting(AS->Region);
+ C.emitReport(std::move(Report));
+ return;
+ }
+
+ C.addTransition(State);
+}
+
+void MacOSKeychainAPIChecker::checkPostStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ if (!FD || FD->getKind() != Decl::Function)
+ return;
+
+ StringRef funName = C.getCalleeName(FD);
+
+ // If a value has been allocated, add it to the set for tracking.
+ unsigned idx = getTrackedFunctionIndex(funName, true);
+ if (idx == InvalidIdx)
+ return;
+
+ const Expr *ArgExpr = CE->getArg(FunctionsToTrack[idx].Param);
+ // If the argument entered as an enclosing function parameter, skip it to
+ // avoid false positives.
+ if (isEnclosingFunctionParam(ArgExpr) &&
+ C.getLocationContext()->getParent() == nullptr)
+ return;
+
+ if (SymbolRef V = getAsPointeeSymbol(ArgExpr, C)) {
+ // If the argument points to something that's not a symbolic region, it
+ // can be:
+ // - unknown (cannot reason about it)
+ // - undefined (already reported by other checker)
+ // - constant (null - should not be tracked,
+ // other constant will generate a compiler warning)
+ // - goto (should be reported by other checker)
+
+ // The call return value symbol should stay alive for as long as the
+ // allocated value symbol, since our diagnostics depend on the value
+ // returned by the call. Ex: Data should only be freed if noErr was
+ // returned during allocation.)
+ SymbolRef RetStatusSymbol =
+ State->getSVal(CE, C.getLocationContext()).getAsSymbol();
+ C.getSymbolManager().addSymbolDependency(V, RetStatusSymbol);
+
+ // Track the allocated value in the checker state.
+ State = State->set<AllocatedData>(V, AllocationState(ArgExpr, idx,
+ RetStatusSymbol));
+ assert(State);
+ C.addTransition(State);
+ }
+}
+
+// TODO: This logic is the same as in Malloc checker.
+const ExplodedNode *
+MacOSKeychainAPIChecker::getAllocationNode(const ExplodedNode *N,
+ SymbolRef Sym,
+ CheckerContext &C) const {
+ const LocationContext *LeakContext = N->getLocationContext();
+ // Walk the ExplodedGraph backwards and find the first node that referred to
+ // the tracked symbol.
+ const ExplodedNode *AllocNode = N;
+
+ while (N) {
+ if (!N->getState()->get<AllocatedData>(Sym))
+ break;
+ // Allocation node, is the last node in the current or parent context in
+ // which the symbol was tracked.
+ const LocationContext *NContext = N->getLocationContext();
+ if (NContext == LeakContext ||
+ NContext->isParentOf(LeakContext))
+ AllocNode = N;
+ N = N->pred_empty() ? nullptr : *(N->pred_begin());
+ }
+
+ return AllocNode;
+}
+
+std::unique_ptr<BugReport>
+MacOSKeychainAPIChecker::generateAllocatedDataNotReleasedReport(
+ const AllocationPair &AP, ExplodedNode *N, CheckerContext &C) const {
+ const ADFunctionInfo &FI = FunctionsToTrack[AP.second->AllocatorIdx];
+ initBugType();
+ SmallString<70> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+ os << "Allocated data is not released: missing a call to '"
+ << FunctionsToTrack[FI.DeallocatorIdx].Name << "'.";
+
+ // Most bug reports are cached at the location where they occurred.
+ // With leaks, we want to unique them by the location where they were
+ // allocated, and only report a single path.
+ PathDiagnosticLocation LocUsedForUniqueing;
+ const ExplodedNode *AllocNode = getAllocationNode(N, AP.first, C);
+ const Stmt *AllocStmt = nullptr;
+ ProgramPoint P = AllocNode->getLocation();
+ if (Optional<CallExitEnd> Exit = P.getAs<CallExitEnd>())
+ AllocStmt = Exit->getCalleeContext()->getCallSite();
+ else if (Optional<clang::PostStmt> PS = P.getAs<clang::PostStmt>())
+ AllocStmt = PS->getStmt();
+
+ if (AllocStmt)
+ LocUsedForUniqueing = PathDiagnosticLocation::createBegin(AllocStmt,
+ C.getSourceManager(),
+ AllocNode->getLocationContext());
+
+ auto Report =
+ llvm::make_unique<BugReport>(*BT, os.str(), N, LocUsedForUniqueing,
+ AllocNode->getLocationContext()->getDecl());
+
+ Report->addVisitor(llvm::make_unique<SecKeychainBugVisitor>(AP.first));
+ markInteresting(Report.get(), AP);
+ return Report;
+}
+
+void MacOSKeychainAPIChecker::checkDeadSymbols(SymbolReaper &SR,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ AllocatedDataTy ASet = State->get<AllocatedData>();
+ if (ASet.isEmpty())
+ return;
+
+ bool Changed = false;
+ AllocationPairVec Errors;
+ for (AllocatedDataTy::iterator I = ASet.begin(), E = ASet.end(); I != E; ++I) {
+ if (SR.isLive(I->first))
+ continue;
+
+ Changed = true;
+ State = State->remove<AllocatedData>(I->first);
+ // If the allocated symbol is null or if the allocation call might have
+ // returned an error, do not report.
+ ConstraintManager &CMgr = State->getConstraintManager();
+ ConditionTruthVal AllocFailed = CMgr.isNull(State, I.getKey());
+ if (AllocFailed.isConstrainedTrue() ||
+ definitelyReturnedError(I->second.Region, State, C.getSValBuilder()))
+ continue;
+ Errors.push_back(std::make_pair(I->first, &I->second));
+ }
+ if (!Changed) {
+ // Generate the new, cleaned up state.
+ C.addTransition(State);
+ return;
+ }
+
+ static CheckerProgramPointTag Tag(this, "DeadSymbolsLeak");
+ ExplodedNode *N = C.generateNonFatalErrorNode(C.getState(), &Tag);
+ if (!N)
+ return;
+
+ // Generate the error reports.
+ for (const auto &P : Errors)
+ C.emitReport(generateAllocatedDataNotReleasedReport(P, N, C));
+
+ // Generate the new, cleaned up state.
+ C.addTransition(State, N);
+}
+
+
+PathDiagnosticPiece *MacOSKeychainAPIChecker::SecKeychainBugVisitor::VisitNode(
+ const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) {
+ const AllocationState *AS = N->getState()->get<AllocatedData>(Sym);
+ if (!AS)
+ return nullptr;
+ const AllocationState *ASPrev = PrevN->getState()->get<AllocatedData>(Sym);
+ if (ASPrev)
+ return nullptr;
+
+ // (!ASPrev && AS) ~ We started tracking symbol in node N, it must be the
+ // allocation site.
+ const CallExpr *CE =
+ cast<CallExpr>(N->getLocation().castAs<StmtPoint>().getStmt());
+ const FunctionDecl *funDecl = CE->getDirectCallee();
+ assert(funDecl && "We do not support indirect function calls as of now.");
+ StringRef funName = funDecl->getName();
+
+ // Get the expression of the corresponding argument.
+ unsigned Idx = getTrackedFunctionIndex(funName, true);
+ assert(Idx != InvalidIdx && "This should be a call to an allocator.");
+ const Expr *ArgExpr = CE->getArg(FunctionsToTrack[Idx].Param);
+ PathDiagnosticLocation Pos(ArgExpr, BRC.getSourceManager(),
+ N->getLocationContext());
+ return new PathDiagnosticEventPiece(Pos, "Data is allocated here.");
+}
+
+void ento::registerMacOSKeychainAPIChecker(CheckerManager &mgr) {
+ mgr.registerChecker<MacOSKeychainAPIChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
new file mode 100644
index 0000000..4cbe97b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
@@ -0,0 +1,128 @@
+// MacOSXAPIChecker.h - Checks proper use of various MacOS X APIs --*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines MacOSXAPIChecker, which is an assortment of checks on calls
+// to various, widely used Apple APIs.
+//
+// FIXME: What's currently in BasicObjCFoundationChecks.cpp should be migrated
+// to here, using the new Checker interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class MacOSXAPIChecker : public Checker< check::PreStmt<CallExpr> > {
+ mutable std::unique_ptr<BugType> BT_dispatchOnce;
+
+public:
+ void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+
+ void CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
+ StringRef FName) const;
+
+ typedef void (MacOSXAPIChecker::*SubChecker)(CheckerContext &,
+ const CallExpr *,
+ StringRef FName) const;
+};
+} //end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// dispatch_once and dispatch_once_f
+//===----------------------------------------------------------------------===//
+
+void MacOSXAPIChecker::CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
+ StringRef FName) const {
+ if (CE->getNumArgs() < 1)
+ return;
+
+ // Check if the first argument is stack allocated. If so, issue a warning
+ // because that's likely to be bad news.
+ ProgramStateRef state = C.getState();
+ const MemRegion *R =
+ state->getSVal(CE->getArg(0), C.getLocationContext()).getAsRegion();
+ if (!R || !isa<StackSpaceRegion>(R->getMemorySpace()))
+ return;
+
+ ExplodedNode *N = C.generateErrorNode(state);
+ if (!N)
+ return;
+
+ if (!BT_dispatchOnce)
+ BT_dispatchOnce.reset(new BugType(this, "Improper use of 'dispatch_once'",
+ "API Misuse (Apple)"));
+
+ // Handle _dispatch_once. In some versions of the OS X SDK we have the case
+ // that dispatch_once is a macro that wraps a call to _dispatch_once.
+ // _dispatch_once is then a function which then calls the real dispatch_once.
+ // Users do not care; they just want the warning at the top-level call.
+ if (CE->getLocStart().isMacroID()) {
+ StringRef TrimmedFName = FName.ltrim("_");
+ if (TrimmedFName != FName)
+ FName = TrimmedFName;
+ }
+
+ SmallString<256> S;
+ llvm::raw_svector_ostream os(S);
+ os << "Call to '" << FName << "' uses";
+ if (const VarRegion *VR = dyn_cast<VarRegion>(R))
+ os << " the local variable '" << VR->getDecl()->getName() << '\'';
+ else
+ os << " stack allocated memory";
+ os << " for the predicate value. Using such transient memory for "
+ "the predicate is potentially dangerous.";
+ if (isa<VarRegion>(R) && isa<StackLocalsSpaceRegion>(R->getMemorySpace()))
+ os << " Perhaps you intended to declare the variable as 'static'?";
+
+ auto report = llvm::make_unique<BugReport>(*BT_dispatchOnce, os.str(), N);
+ report->addRange(CE->getArg(0)->getSourceRange());
+ C.emitReport(std::move(report));
+}
+
+//===----------------------------------------------------------------------===//
+// Central dispatch function.
+//===----------------------------------------------------------------------===//
+
+void MacOSXAPIChecker::checkPreStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ StringRef Name = C.getCalleeName(CE);
+ if (Name.empty())
+ return;
+
+ SubChecker SC =
+ llvm::StringSwitch<SubChecker>(Name)
+ .Cases("dispatch_once",
+ "_dispatch_once",
+ "dispatch_once_f",
+ &MacOSXAPIChecker::CheckDispatchOnce)
+ .Default(nullptr);
+
+ if (SC)
+ (this->*SC)(C, CE, Name);
+}
+
+//===----------------------------------------------------------------------===//
+// Registration.
+//===----------------------------------------------------------------------===//
+
+void ento::registerMacOSXAPIChecker(CheckerManager &mgr) {
+ mgr.registerChecker<MacOSXAPIChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
new file mode 100644
index 0000000..ce2c194
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -0,0 +1,2744 @@
+//=== MallocChecker.cpp - A malloc/free checker -------------------*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines malloc/free checker, which checks for potential memory
+// leaks, double free, and use-after-free problems.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "InterCheckerAPI.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "llvm/ADT/ImmutableMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include <climits>
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+// Used to check correspondence between allocators and deallocators.
+enum AllocationFamily {
+ AF_None,
+ AF_Malloc,
+ AF_CXXNew,
+ AF_CXXNewArray,
+ AF_IfNameIndex,
+ AF_Alloca
+};
+
+class RefState {
+ enum Kind { // Reference to allocated memory.
+ Allocated,
+ // Reference to zero-allocated memory.
+ AllocatedOfSizeZero,
+ // Reference to released/freed memory.
+ Released,
+ // The responsibility for freeing resources has transferred from
+ // this reference. A relinquished symbol should not be freed.
+ Relinquished,
+ // We are no longer guaranteed to have observed all manipulations
+ // of this pointer/memory. For example, it could have been
+ // passed as a parameter to an opaque function.
+ Escaped
+ };
+
+ const Stmt *S;
+ unsigned K : 3; // Kind enum, but stored as a bitfield.
+ unsigned Family : 29; // Rest of 32-bit word, currently just an allocation
+ // family.
+
+ RefState(Kind k, const Stmt *s, unsigned family)
+ : S(s), K(k), Family(family) {
+ assert(family != AF_None);
+ }
+public:
+ bool isAllocated() const { return K == Allocated; }
+ bool isAllocatedOfSizeZero() const { return K == AllocatedOfSizeZero; }
+ bool isReleased() const { return K == Released; }
+ bool isRelinquished() const { return K == Relinquished; }
+ bool isEscaped() const { return K == Escaped; }
+ AllocationFamily getAllocationFamily() const {
+ return (AllocationFamily)Family;
+ }
+ const Stmt *getStmt() const { return S; }
+
+ bool operator==(const RefState &X) const {
+ return K == X.K && S == X.S && Family == X.Family;
+ }
+
+ static RefState getAllocated(unsigned family, const Stmt *s) {
+ return RefState(Allocated, s, family);
+ }
+ static RefState getAllocatedOfSizeZero(const RefState *RS) {
+ return RefState(AllocatedOfSizeZero, RS->getStmt(),
+ RS->getAllocationFamily());
+ }
+ static RefState getReleased(unsigned family, const Stmt *s) {
+ return RefState(Released, s, family);
+ }
+ static RefState getRelinquished(unsigned family, const Stmt *s) {
+ return RefState(Relinquished, s, family);
+ }
+ static RefState getEscaped(const RefState *RS) {
+ return RefState(Escaped, RS->getStmt(), RS->getAllocationFamily());
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(K);
+ ID.AddPointer(S);
+ ID.AddInteger(Family);
+ }
+
+ void dump(raw_ostream &OS) const {
+ switch (static_cast<Kind>(K)) {
+#define CASE(ID) case ID: OS << #ID; break;
+ CASE(Allocated)
+ CASE(AllocatedOfSizeZero)
+ CASE(Released)
+ CASE(Relinquished)
+ CASE(Escaped)
+ }
+ }
+
+ LLVM_DUMP_METHOD void dump() const { dump(llvm::errs()); }
+};
+
+enum ReallocPairKind {
+ RPToBeFreedAfterFailure,
+ // The symbol has been freed when reallocation failed.
+ RPIsFreeOnFailure,
+ // The symbol does not need to be freed after reallocation fails.
+ RPDoNotTrackAfterFailure
+};
+
+/// \class ReallocPair
+/// \brief Stores information about the symbol being reallocated by a call to
+/// 'realloc' to allow modeling failed reallocation later in the path.
+struct ReallocPair {
+ // \brief The symbol which realloc reallocated.
+ SymbolRef ReallocatedSym;
+ ReallocPairKind Kind;
+
+ ReallocPair(SymbolRef S, ReallocPairKind K) :
+ ReallocatedSym(S), Kind(K) {}
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(Kind);
+ ID.AddPointer(ReallocatedSym);
+ }
+ bool operator==(const ReallocPair &X) const {
+ return ReallocatedSym == X.ReallocatedSym &&
+ Kind == X.Kind;
+ }
+};
+
+typedef std::pair<const ExplodedNode*, const MemRegion*> LeakInfo;
+
+class MallocChecker : public Checker<check::DeadSymbols,
+ check::PointerEscape,
+ check::ConstPointerEscape,
+ check::PreStmt<ReturnStmt>,
+ check::PreCall,
+ check::PostStmt<CallExpr>,
+ check::PostStmt<CXXNewExpr>,
+ check::PreStmt<CXXDeleteExpr>,
+ check::PostStmt<BlockExpr>,
+ check::PostObjCMessage,
+ check::Location,
+ eval::Assume>
+{
+public:
+ MallocChecker()
+ : II_alloca(nullptr), II_malloc(nullptr), II_free(nullptr),
+ II_realloc(nullptr), II_calloc(nullptr), II_valloc(nullptr),
+ II_reallocf(nullptr), II_strndup(nullptr), II_strdup(nullptr),
+ II_kmalloc(nullptr), II_if_nameindex(nullptr),
+ II_if_freenameindex(nullptr) {}
+
+ /// In pessimistic mode, the checker assumes that it does not know which
+ /// functions might free the memory.
+ enum CheckKind {
+ CK_MallocChecker,
+ CK_NewDeleteChecker,
+ CK_NewDeleteLeaksChecker,
+ CK_MismatchedDeallocatorChecker,
+ CK_NumCheckKinds
+ };
+
+ enum class MemoryOperationKind {
+ MOK_Allocate,
+ MOK_Free,
+ MOK_Any
+ };
+
+ DefaultBool IsOptimistic;
+
+ DefaultBool ChecksEnabled[CK_NumCheckKinds];
+ CheckName CheckNames[CK_NumCheckKinds];
+
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
+ void checkPostStmt(const CXXNewExpr *NE, CheckerContext &C) const;
+ void checkPreStmt(const CXXDeleteExpr *DE, CheckerContext &C) const;
+ void checkPostObjCMessage(const ObjCMethodCall &Call, CheckerContext &C) const;
+ void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
+ void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
+ void checkPreStmt(const ReturnStmt *S, CheckerContext &C) const;
+ ProgramStateRef evalAssume(ProgramStateRef state, SVal Cond,
+ bool Assumption) const;
+ void checkLocation(SVal l, bool isLoad, const Stmt *S,
+ CheckerContext &C) const;
+
+ ProgramStateRef checkPointerEscape(ProgramStateRef State,
+ const InvalidatedSymbols &Escaped,
+ const CallEvent *Call,
+ PointerEscapeKind Kind) const;
+ ProgramStateRef checkConstPointerEscape(ProgramStateRef State,
+ const InvalidatedSymbols &Escaped,
+ const CallEvent *Call,
+ PointerEscapeKind Kind) const;
+
+ void printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) const override;
+
+private:
+ mutable std::unique_ptr<BugType> BT_DoubleFree[CK_NumCheckKinds];
+ mutable std::unique_ptr<BugType> BT_DoubleDelete;
+ mutable std::unique_ptr<BugType> BT_Leak[CK_NumCheckKinds];
+ mutable std::unique_ptr<BugType> BT_UseFree[CK_NumCheckKinds];
+ mutable std::unique_ptr<BugType> BT_BadFree[CK_NumCheckKinds];
+ mutable std::unique_ptr<BugType> BT_FreeAlloca[CK_NumCheckKinds];
+ mutable std::unique_ptr<BugType> BT_MismatchedDealloc;
+ mutable std::unique_ptr<BugType> BT_OffsetFree[CK_NumCheckKinds];
+ mutable std::unique_ptr<BugType> BT_UseZerroAllocated[CK_NumCheckKinds];
+ mutable IdentifierInfo *II_alloca, *II_malloc, *II_free, *II_realloc,
+ *II_calloc, *II_valloc, *II_reallocf, *II_strndup,
+ *II_strdup, *II_kmalloc, *II_if_nameindex,
+ *II_if_freenameindex;
+ mutable Optional<uint64_t> KernelZeroFlagVal;
+
+ void initIdentifierInfo(ASTContext &C) const;
+
+ /// \brief Determine family of a deallocation expression.
+ AllocationFamily getAllocationFamily(CheckerContext &C, const Stmt *S) const;
+
+ /// \brief Print names of allocators and deallocators.
+ ///
+ /// \returns true on success.
+ bool printAllocDeallocName(raw_ostream &os, CheckerContext &C,
+ const Expr *E) const;
+
+ /// \brief Print expected name of an allocator based on the deallocator's
+ /// family derived from the DeallocExpr.
+ void printExpectedAllocName(raw_ostream &os, CheckerContext &C,
+ const Expr *DeallocExpr) const;
+ /// \brief Print expected name of a deallocator based on the allocator's
+ /// family.
+ void printExpectedDeallocName(raw_ostream &os, AllocationFamily Family) const;
+
+ ///@{
+ /// Check if this is one of the functions which can allocate/reallocate memory
+ /// pointed to by one of its arguments.
+ bool isMemFunction(const FunctionDecl *FD, ASTContext &C) const;
+ bool isCMemFunction(const FunctionDecl *FD,
+ ASTContext &C,
+ AllocationFamily Family,
+ MemoryOperationKind MemKind) const;
+ bool isStandardNewDelete(const FunctionDecl *FD, ASTContext &C) const;
+ ///@}
+
+ /// \brief Perform a zero-allocation check.
+ ProgramStateRef ProcessZeroAllocation(CheckerContext &C, const Expr *E,
+ const unsigned AllocationSizeArg,
+ ProgramStateRef State) const;
+
+ ProgramStateRef MallocMemReturnsAttr(CheckerContext &C,
+ const CallExpr *CE,
+ const OwnershipAttr* Att,
+ ProgramStateRef State) const;
+ static ProgramStateRef MallocMemAux(CheckerContext &C, const CallExpr *CE,
+ const Expr *SizeEx, SVal Init,
+ ProgramStateRef State,
+ AllocationFamily Family = AF_Malloc);
+ static ProgramStateRef MallocMemAux(CheckerContext &C, const CallExpr *CE,
+ SVal SizeEx, SVal Init,
+ ProgramStateRef State,
+ AllocationFamily Family = AF_Malloc);
+
+ // Check if this malloc() for special flags. At present that means M_ZERO or
+ // __GFP_ZERO (in which case, treat it like calloc).
+ llvm::Optional<ProgramStateRef>
+ performKernelMalloc(const CallExpr *CE, CheckerContext &C,
+ const ProgramStateRef &State) const;
+
+ /// Update the RefState to reflect the new memory allocation.
+ static ProgramStateRef
+ MallocUpdateRefState(CheckerContext &C, const Expr *E, ProgramStateRef State,
+ AllocationFamily Family = AF_Malloc);
+
+ ProgramStateRef FreeMemAttr(CheckerContext &C, const CallExpr *CE,
+ const OwnershipAttr* Att,
+ ProgramStateRef State) const;
+ ProgramStateRef FreeMemAux(CheckerContext &C, const CallExpr *CE,
+ ProgramStateRef state, unsigned Num,
+ bool Hold,
+ bool &ReleasedAllocated,
+ bool ReturnsNullOnFailure = false) const;
+ ProgramStateRef FreeMemAux(CheckerContext &C, const Expr *Arg,
+ const Expr *ParentExpr,
+ ProgramStateRef State,
+ bool Hold,
+ bool &ReleasedAllocated,
+ bool ReturnsNullOnFailure = false) const;
+
+ ProgramStateRef ReallocMem(CheckerContext &C, const CallExpr *CE,
+ bool FreesMemOnFailure,
+ ProgramStateRef State) const;
+ static ProgramStateRef CallocMem(CheckerContext &C, const CallExpr *CE,
+ ProgramStateRef State);
+
+ ///\brief Check if the memory associated with this symbol was released.
+ bool isReleased(SymbolRef Sym, CheckerContext &C) const;
+
+ bool checkUseAfterFree(SymbolRef Sym, CheckerContext &C, const Stmt *S) const;
+
+ void checkUseZeroAllocated(SymbolRef Sym, CheckerContext &C,
+ const Stmt *S) const;
+
+ bool checkDoubleDelete(SymbolRef Sym, CheckerContext &C) const;
+
+ /// Check if the function is known free memory, or if it is
+ /// "interesting" and should be modeled explicitly.
+ ///
+ /// \param [out] EscapingSymbol A function might not free memory in general,
+ /// but could be known to free a particular symbol. In this case, false is
+ /// returned and the single escaping symbol is returned through the out
+ /// parameter.
+ ///
+ /// We assume that pointers do not escape through calls to system functions
+ /// not handled by this checker.
+ bool mayFreeAnyEscapedMemoryOrIsModeledExplicitly(const CallEvent *Call,
+ ProgramStateRef State,
+ SymbolRef &EscapingSymbol) const;
+
+ // Implementation of the checkPointerEscape callabcks.
+ ProgramStateRef checkPointerEscapeAux(ProgramStateRef State,
+ const InvalidatedSymbols &Escaped,
+ const CallEvent *Call,
+ PointerEscapeKind Kind,
+ bool(*CheckRefState)(const RefState*)) const;
+
+ ///@{
+ /// Tells if a given family/call/symbol is tracked by the current checker.
+ /// Sets CheckKind to the kind of the checker responsible for this
+ /// family/call/symbol.
+ Optional<CheckKind> getCheckIfTracked(AllocationFamily Family,
+ bool IsALeakCheck = false) const;
+ Optional<CheckKind> getCheckIfTracked(CheckerContext &C,
+ const Stmt *AllocDeallocStmt,
+ bool IsALeakCheck = false) const;
+ Optional<CheckKind> getCheckIfTracked(CheckerContext &C, SymbolRef Sym,
+ bool IsALeakCheck = false) const;
+ ///@}
+ static bool SummarizeValue(raw_ostream &os, SVal V);
+ static bool SummarizeRegion(raw_ostream &os, const MemRegion *MR);
+ void ReportBadFree(CheckerContext &C, SVal ArgVal, SourceRange Range,
+ const Expr *DeallocExpr) const;
+ void ReportFreeAlloca(CheckerContext &C, SVal ArgVal,
+ SourceRange Range) const;
+ void ReportMismatchedDealloc(CheckerContext &C, SourceRange Range,
+ const Expr *DeallocExpr, const RefState *RS,
+ SymbolRef Sym, bool OwnershipTransferred) const;
+ void ReportOffsetFree(CheckerContext &C, SVal ArgVal, SourceRange Range,
+ const Expr *DeallocExpr,
+ const Expr *AllocExpr = nullptr) const;
+ void ReportUseAfterFree(CheckerContext &C, SourceRange Range,
+ SymbolRef Sym) const;
+ void ReportDoubleFree(CheckerContext &C, SourceRange Range, bool Released,
+ SymbolRef Sym, SymbolRef PrevSym) const;
+
+ void ReportDoubleDelete(CheckerContext &C, SymbolRef Sym) const;
+
+ void ReportUseZeroAllocated(CheckerContext &C, SourceRange Range,
+ SymbolRef Sym) const;
+
+ /// Find the location of the allocation for Sym on the path leading to the
+ /// exploded node N.
+ LeakInfo getAllocationSite(const ExplodedNode *N, SymbolRef Sym,
+ CheckerContext &C) const;
+
+ void reportLeak(SymbolRef Sym, ExplodedNode *N, CheckerContext &C) const;
+
+ /// The bug visitor which allows us to print extra diagnostics along the
+ /// BugReport path. For example, showing the allocation site of the leaked
+ /// region.
+ class MallocBugVisitor final
+ : public BugReporterVisitorImpl<MallocBugVisitor> {
+ protected:
+ enum NotificationMode {
+ Normal,
+ ReallocationFailed
+ };
+
+ // The allocated region symbol tracked by the main analysis.
+ SymbolRef Sym;
+
+ // The mode we are in, i.e. what kind of diagnostics will be emitted.
+ NotificationMode Mode;
+
+ // A symbol from when the primary region should have been reallocated.
+ SymbolRef FailedReallocSymbol;
+
+ bool IsLeak;
+
+ public:
+ MallocBugVisitor(SymbolRef S, bool isLeak = false)
+ : Sym(S), Mode(Normal), FailedReallocSymbol(nullptr), IsLeak(isLeak) {}
+
+ void Profile(llvm::FoldingSetNodeID &ID) const override {
+ static int X = 0;
+ ID.AddPointer(&X);
+ ID.AddPointer(Sym);
+ }
+
+ inline bool isAllocated(const RefState *S, const RefState *SPrev,
+ const Stmt *Stmt) {
+ // Did not track -> allocated. Other state (released) -> allocated.
+ return (Stmt && (isa<CallExpr>(Stmt) || isa<CXXNewExpr>(Stmt)) &&
+ (S && (S->isAllocated() || S->isAllocatedOfSizeZero())) &&
+ (!SPrev || !(SPrev->isAllocated() ||
+ SPrev->isAllocatedOfSizeZero())));
+ }
+
+ inline bool isReleased(const RefState *S, const RefState *SPrev,
+ const Stmt *Stmt) {
+ // Did not track -> released. Other state (allocated) -> released.
+ return (Stmt && (isa<CallExpr>(Stmt) || isa<CXXDeleteExpr>(Stmt)) &&
+ (S && S->isReleased()) && (!SPrev || !SPrev->isReleased()));
+ }
+
+ inline bool isRelinquished(const RefState *S, const RefState *SPrev,
+ const Stmt *Stmt) {
+ // Did not track -> relinquished. Other state (allocated) -> relinquished.
+ return (Stmt && (isa<CallExpr>(Stmt) || isa<ObjCMessageExpr>(Stmt) ||
+ isa<ObjCPropertyRefExpr>(Stmt)) &&
+ (S && S->isRelinquished()) &&
+ (!SPrev || !SPrev->isRelinquished()));
+ }
+
+ inline bool isReallocFailedCheck(const RefState *S, const RefState *SPrev,
+ const Stmt *Stmt) {
+ // If the expression is not a call, and the state change is
+ // released -> allocated, it must be the realloc return value
+ // check. If we have to handle more cases here, it might be cleaner just
+ // to track this extra bit in the state itself.
+ return ((!Stmt || !isa<CallExpr>(Stmt)) &&
+ (S && (S->isAllocated() || S->isAllocatedOfSizeZero())) &&
+ (SPrev && !(SPrev->isAllocated() ||
+ SPrev->isAllocatedOfSizeZero())));
+ }
+
+ PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) override;
+
+ std::unique_ptr<PathDiagnosticPiece>
+ getEndPath(BugReporterContext &BRC, const ExplodedNode *EndPathNode,
+ BugReport &BR) override {
+ if (!IsLeak)
+ return nullptr;
+
+ PathDiagnosticLocation L =
+ PathDiagnosticLocation::createEndOfPath(EndPathNode,
+ BRC.getSourceManager());
+ // Do not add the statement itself as a range in case of leak.
+ return llvm::make_unique<PathDiagnosticEventPiece>(L, BR.getDescription(),
+ false);
+ }
+
+ private:
+ class StackHintGeneratorForReallocationFailed
+ : public StackHintGeneratorForSymbol {
+ public:
+ StackHintGeneratorForReallocationFailed(SymbolRef S, StringRef M)
+ : StackHintGeneratorForSymbol(S, M) {}
+
+ std::string getMessageForArg(const Expr *ArgE,
+ unsigned ArgIndex) override {
+ // Printed parameters start at 1, not 0.
+ ++ArgIndex;
+
+ SmallString<200> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ os << "Reallocation of " << ArgIndex << llvm::getOrdinalSuffix(ArgIndex)
+ << " parameter failed";
+
+ return os.str();
+ }
+
+ std::string getMessageForReturn(const CallExpr *CallExpr) override {
+ return "Reallocation of returned value failed";
+ }
+ };
+ };
+};
+} // end anonymous namespace
+
+REGISTER_MAP_WITH_PROGRAMSTATE(RegionState, SymbolRef, RefState)
+REGISTER_MAP_WITH_PROGRAMSTATE(ReallocPairs, SymbolRef, ReallocPair)
+REGISTER_SET_WITH_PROGRAMSTATE(ReallocSizeZeroSymbols, SymbolRef)
+
+// A map from the freed symbol to the symbol representing the return value of
+// the free function.
+REGISTER_MAP_WITH_PROGRAMSTATE(FreeReturnValue, SymbolRef, SymbolRef)
+
+namespace {
+class StopTrackingCallback final : public SymbolVisitor {
+ ProgramStateRef state;
+public:
+ StopTrackingCallback(ProgramStateRef st) : state(st) {}
+ ProgramStateRef getState() const { return state; }
+
+ bool VisitSymbol(SymbolRef sym) override {
+ state = state->remove<RegionState>(sym);
+ return true;
+ }
+};
+} // end anonymous namespace
+
+void MallocChecker::initIdentifierInfo(ASTContext &Ctx) const {
+ if (II_malloc)
+ return;
+ II_alloca = &Ctx.Idents.get("alloca");
+ II_malloc = &Ctx.Idents.get("malloc");
+ II_free = &Ctx.Idents.get("free");
+ II_realloc = &Ctx.Idents.get("realloc");
+ II_reallocf = &Ctx.Idents.get("reallocf");
+ II_calloc = &Ctx.Idents.get("calloc");
+ II_valloc = &Ctx.Idents.get("valloc");
+ II_strdup = &Ctx.Idents.get("strdup");
+ II_strndup = &Ctx.Idents.get("strndup");
+ II_kmalloc = &Ctx.Idents.get("kmalloc");
+ II_if_nameindex = &Ctx.Idents.get("if_nameindex");
+ II_if_freenameindex = &Ctx.Idents.get("if_freenameindex");
+}
+
+bool MallocChecker::isMemFunction(const FunctionDecl *FD, ASTContext &C) const {
+ if (isCMemFunction(FD, C, AF_Malloc, MemoryOperationKind::MOK_Any))
+ return true;
+
+ if (isCMemFunction(FD, C, AF_IfNameIndex, MemoryOperationKind::MOK_Any))
+ return true;
+
+ if (isCMemFunction(FD, C, AF_Alloca, MemoryOperationKind::MOK_Any))
+ return true;
+
+ if (isStandardNewDelete(FD, C))
+ return true;
+
+ return false;
+}
+
+bool MallocChecker::isCMemFunction(const FunctionDecl *FD,
+ ASTContext &C,
+ AllocationFamily Family,
+ MemoryOperationKind MemKind) const {
+ if (!FD)
+ return false;
+
+ bool CheckFree = (MemKind == MemoryOperationKind::MOK_Any ||
+ MemKind == MemoryOperationKind::MOK_Free);
+ bool CheckAlloc = (MemKind == MemoryOperationKind::MOK_Any ||
+ MemKind == MemoryOperationKind::MOK_Allocate);
+
+ if (FD->getKind() == Decl::Function) {
+ const IdentifierInfo *FunI = FD->getIdentifier();
+ initIdentifierInfo(C);
+
+ if (Family == AF_Malloc && CheckFree) {
+ if (FunI == II_free || FunI == II_realloc || FunI == II_reallocf)
+ return true;
+ }
+
+ if (Family == AF_Malloc && CheckAlloc) {
+ if (FunI == II_malloc || FunI == II_realloc || FunI == II_reallocf ||
+ FunI == II_calloc || FunI == II_valloc || FunI == II_strdup ||
+ FunI == II_strndup || FunI == II_kmalloc)
+ return true;
+ }
+
+ if (Family == AF_IfNameIndex && CheckFree) {
+ if (FunI == II_if_freenameindex)
+ return true;
+ }
+
+ if (Family == AF_IfNameIndex && CheckAlloc) {
+ if (FunI == II_if_nameindex)
+ return true;
+ }
+
+ if (Family == AF_Alloca && CheckAlloc) {
+ if (FunI == II_alloca)
+ return true;
+ }
+ }
+
+ if (Family != AF_Malloc)
+ return false;
+
+ if (IsOptimistic && FD->hasAttrs()) {
+ for (const auto *I : FD->specific_attrs<OwnershipAttr>()) {
+ OwnershipAttr::OwnershipKind OwnKind = I->getOwnKind();
+ if(OwnKind == OwnershipAttr::Takes || OwnKind == OwnershipAttr::Holds) {
+ if (CheckFree)
+ return true;
+ } else if (OwnKind == OwnershipAttr::Returns) {
+ if (CheckAlloc)
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+// Tells if the callee is one of the following:
+// 1) A global non-placement new/delete operator function.
+// 2) A global placement operator function with the single placement argument
+// of type std::nothrow_t.
+bool MallocChecker::isStandardNewDelete(const FunctionDecl *FD,
+ ASTContext &C) const {
+ if (!FD)
+ return false;
+
+ OverloadedOperatorKind Kind = FD->getOverloadedOperator();
+ if (Kind != OO_New && Kind != OO_Array_New &&
+ Kind != OO_Delete && Kind != OO_Array_Delete)
+ return false;
+
+ // Skip all operator new/delete methods.
+ if (isa<CXXMethodDecl>(FD))
+ return false;
+
+ // Return true if tested operator is a standard placement nothrow operator.
+ if (FD->getNumParams() == 2) {
+ QualType T = FD->getParamDecl(1)->getType();
+ if (const IdentifierInfo *II = T.getBaseTypeIdentifier())
+ return II->getName().equals("nothrow_t");
+ }
+
+ // Skip placement operators.
+ if (FD->getNumParams() != 1 || FD->isVariadic())
+ return false;
+
+ // One of the standard new/new[]/delete/delete[] non-placement operators.
+ return true;
+}
+
+llvm::Optional<ProgramStateRef> MallocChecker::performKernelMalloc(
+ const CallExpr *CE, CheckerContext &C, const ProgramStateRef &State) const {
+ // 3-argument malloc(), as commonly used in {Free,Net,Open}BSD Kernels:
+ //
+ // void *malloc(unsigned long size, struct malloc_type *mtp, int flags);
+ //
+ // One of the possible flags is M_ZERO, which means 'give me back an
+ // allocation which is already zeroed', like calloc.
+
+ // 2-argument kmalloc(), as used in the Linux kernel:
+ //
+ // void *kmalloc(size_t size, gfp_t flags);
+ //
+ // Has the similar flag value __GFP_ZERO.
+
+ // This logic is largely cloned from O_CREAT in UnixAPIChecker, maybe some
+ // code could be shared.
+
+ ASTContext &Ctx = C.getASTContext();
+ llvm::Triple::OSType OS = Ctx.getTargetInfo().getTriple().getOS();
+
+ if (!KernelZeroFlagVal.hasValue()) {
+ if (OS == llvm::Triple::FreeBSD)
+ KernelZeroFlagVal = 0x0100;
+ else if (OS == llvm::Triple::NetBSD)
+ KernelZeroFlagVal = 0x0002;
+ else if (OS == llvm::Triple::OpenBSD)
+ KernelZeroFlagVal = 0x0008;
+ else if (OS == llvm::Triple::Linux)
+ // __GFP_ZERO
+ KernelZeroFlagVal = 0x8000;
+ else
+ // FIXME: We need a more general way of getting the M_ZERO value.
+ // See also: O_CREAT in UnixAPIChecker.cpp.
+
+ // Fall back to normal malloc behavior on platforms where we don't
+ // know M_ZERO.
+ return None;
+ }
+
+ // We treat the last argument as the flags argument, and callers fall-back to
+ // normal malloc on a None return. This works for the FreeBSD kernel malloc
+ // as well as Linux kmalloc.
+ if (CE->getNumArgs() < 2)
+ return None;
+
+ const Expr *FlagsEx = CE->getArg(CE->getNumArgs() - 1);
+ const SVal V = State->getSVal(FlagsEx, C.getLocationContext());
+ if (!V.getAs<NonLoc>()) {
+ // The case where 'V' can be a location can only be due to a bad header,
+ // so in this case bail out.
+ return None;
+ }
+
+ NonLoc Flags = V.castAs<NonLoc>();
+ NonLoc ZeroFlag = C.getSValBuilder()
+ .makeIntVal(KernelZeroFlagVal.getValue(), FlagsEx->getType())
+ .castAs<NonLoc>();
+ SVal MaskedFlagsUC = C.getSValBuilder().evalBinOpNN(State, BO_And,
+ Flags, ZeroFlag,
+ FlagsEx->getType());
+ if (MaskedFlagsUC.isUnknownOrUndef())
+ return None;
+ DefinedSVal MaskedFlags = MaskedFlagsUC.castAs<DefinedSVal>();
+
+ // Check if maskedFlags is non-zero.
+ ProgramStateRef TrueState, FalseState;
+ std::tie(TrueState, FalseState) = State->assume(MaskedFlags);
+
+ // If M_ZERO is set, treat this like calloc (initialized).
+ if (TrueState && !FalseState) {
+ SVal ZeroVal = C.getSValBuilder().makeZeroVal(Ctx.CharTy);
+ return MallocMemAux(C, CE, CE->getArg(0), ZeroVal, TrueState);
+ }
+
+ return None;
+}
+
+void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const {
+ if (C.wasInlined)
+ return;
+
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ if (!FD)
+ return;
+
+ ProgramStateRef State = C.getState();
+ bool ReleasedAllocatedMemory = false;
+
+ if (FD->getKind() == Decl::Function) {
+ initIdentifierInfo(C.getASTContext());
+ IdentifierInfo *FunI = FD->getIdentifier();
+
+ if (FunI == II_malloc) {
+ if (CE->getNumArgs() < 1)
+ return;
+ if (CE->getNumArgs() < 3) {
+ State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State);
+ if (CE->getNumArgs() == 1)
+ State = ProcessZeroAllocation(C, CE, 0, State);
+ } else if (CE->getNumArgs() == 3) {
+ llvm::Optional<ProgramStateRef> MaybeState =
+ performKernelMalloc(CE, C, State);
+ if (MaybeState.hasValue())
+ State = MaybeState.getValue();
+ else
+ State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State);
+ }
+ } else if (FunI == II_kmalloc) {
+ llvm::Optional<ProgramStateRef> MaybeState =
+ performKernelMalloc(CE, C, State);
+ if (MaybeState.hasValue())
+ State = MaybeState.getValue();
+ else
+ State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State);
+ } else if (FunI == II_valloc) {
+ if (CE->getNumArgs() < 1)
+ return;
+ State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State);
+ State = ProcessZeroAllocation(C, CE, 0, State);
+ } else if (FunI == II_realloc) {
+ State = ReallocMem(C, CE, false, State);
+ State = ProcessZeroAllocation(C, CE, 1, State);
+ } else if (FunI == II_reallocf) {
+ State = ReallocMem(C, CE, true, State);
+ State = ProcessZeroAllocation(C, CE, 1, State);
+ } else if (FunI == II_calloc) {
+ State = CallocMem(C, CE, State);
+ State = ProcessZeroAllocation(C, CE, 0, State);
+ State = ProcessZeroAllocation(C, CE, 1, State);
+ } else if (FunI == II_free) {
+ State = FreeMemAux(C, CE, State, 0, false, ReleasedAllocatedMemory);
+ } else if (FunI == II_strdup) {
+ State = MallocUpdateRefState(C, CE, State);
+ } else if (FunI == II_strndup) {
+ State = MallocUpdateRefState(C, CE, State);
+ } else if (FunI == II_alloca) {
+ State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State,
+ AF_Alloca);
+ State = ProcessZeroAllocation(C, CE, 0, State);
+ } else if (isStandardNewDelete(FD, C.getASTContext())) {
+ // Process direct calls to operator new/new[]/delete/delete[] functions
+ // as distinct from new/new[]/delete/delete[] expressions that are
+ // processed by the checkPostStmt callbacks for CXXNewExpr and
+ // CXXDeleteExpr.
+ OverloadedOperatorKind K = FD->getOverloadedOperator();
+ if (K == OO_New) {
+ State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State,
+ AF_CXXNew);
+ State = ProcessZeroAllocation(C, CE, 0, State);
+ }
+ else if (K == OO_Array_New) {
+ State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State,
+ AF_CXXNewArray);
+ State = ProcessZeroAllocation(C, CE, 0, State);
+ }
+ else if (K == OO_Delete || K == OO_Array_Delete)
+ State = FreeMemAux(C, CE, State, 0, false, ReleasedAllocatedMemory);
+ else
+ llvm_unreachable("not a new/delete operator");
+ } else if (FunI == II_if_nameindex) {
+ // Should we model this differently? We can allocate a fixed number of
+ // elements with zeros in the last one.
+ State = MallocMemAux(C, CE, UnknownVal(), UnknownVal(), State,
+ AF_IfNameIndex);
+ } else if (FunI == II_if_freenameindex) {
+ State = FreeMemAux(C, CE, State, 0, false, ReleasedAllocatedMemory);
+ }
+ }
+
+ if (IsOptimistic || ChecksEnabled[CK_MismatchedDeallocatorChecker]) {
+ // Check all the attributes, if there are any.
+ // There can be multiple of these attributes.
+ if (FD->hasAttrs())
+ for (const auto *I : FD->specific_attrs<OwnershipAttr>()) {
+ switch (I->getOwnKind()) {
+ case OwnershipAttr::Returns:
+ State = MallocMemReturnsAttr(C, CE, I, State);
+ break;
+ case OwnershipAttr::Takes:
+ case OwnershipAttr::Holds:
+ State = FreeMemAttr(C, CE, I, State);
+ break;
+ }
+ }
+ }
+ C.addTransition(State);
+}
+
+// Performs a 0-sized allocations check.
+ProgramStateRef MallocChecker::ProcessZeroAllocation(CheckerContext &C,
+ const Expr *E,
+ const unsigned AllocationSizeArg,
+ ProgramStateRef State) const {
+ if (!State)
+ return nullptr;
+
+ const Expr *Arg = nullptr;
+
+ if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
+ Arg = CE->getArg(AllocationSizeArg);
+ }
+ else if (const CXXNewExpr *NE = dyn_cast<CXXNewExpr>(E)) {
+ if (NE->isArray())
+ Arg = NE->getArraySize();
+ else
+ return State;
+ }
+ else
+ llvm_unreachable("not a CallExpr or CXXNewExpr");
+
+ assert(Arg);
+
+ Optional<DefinedSVal> DefArgVal =
+ State->getSVal(Arg, C.getLocationContext()).getAs<DefinedSVal>();
+
+ if (!DefArgVal)
+ return State;
+
+ // Check if the allocation size is 0.
+ ProgramStateRef TrueState, FalseState;
+ SValBuilder &SvalBuilder = C.getSValBuilder();
+ DefinedSVal Zero =
+ SvalBuilder.makeZeroVal(Arg->getType()).castAs<DefinedSVal>();
+
+ std::tie(TrueState, FalseState) =
+ State->assume(SvalBuilder.evalEQ(State, *DefArgVal, Zero));
+
+ if (TrueState && !FalseState) {
+ SVal retVal = State->getSVal(E, C.getLocationContext());
+ SymbolRef Sym = retVal.getAsLocSymbol();
+ if (!Sym)
+ return State;
+
+ const RefState *RS = State->get<RegionState>(Sym);
+ if (RS) {
+ if (RS->isAllocated())
+ return TrueState->set<RegionState>(Sym,
+ RefState::getAllocatedOfSizeZero(RS));
+ else
+ return State;
+ } else {
+ // Case of zero-size realloc. Historically 'realloc(ptr, 0)' is treated as
+ // 'free(ptr)' and the returned value from 'realloc(ptr, 0)' is not
+ // tracked. Add zero-reallocated Sym to the state to catch references
+ // to zero-allocated memory.
+ return TrueState->add<ReallocSizeZeroSymbols>(Sym);
+ }
+ }
+
+ // Assume the value is non-zero going forward.
+ assert(FalseState);
+ return FalseState;
+}
+
+static QualType getDeepPointeeType(QualType T) {
+ QualType Result = T, PointeeType = T->getPointeeType();
+ while (!PointeeType.isNull()) {
+ Result = PointeeType;
+ PointeeType = PointeeType->getPointeeType();
+ }
+ return Result;
+}
+
+static bool treatUnusedNewEscaped(const CXXNewExpr *NE) {
+
+ const CXXConstructExpr *ConstructE = NE->getConstructExpr();
+ if (!ConstructE)
+ return false;
+
+ if (!NE->getAllocatedType()->getAsCXXRecordDecl())
+ return false;
+
+ const CXXConstructorDecl *CtorD = ConstructE->getConstructor();
+
+ // Iterate over the constructor parameters.
+ for (const auto *CtorParam : CtorD->params()) {
+
+ QualType CtorParamPointeeT = CtorParam->getType()->getPointeeType();
+ if (CtorParamPointeeT.isNull())
+ continue;
+
+ CtorParamPointeeT = getDeepPointeeType(CtorParamPointeeT);
+
+ if (CtorParamPointeeT->getAsCXXRecordDecl())
+ return true;
+ }
+
+ return false;
+}
+
+void MallocChecker::checkPostStmt(const CXXNewExpr *NE,
+ CheckerContext &C) const {
+
+ if (NE->getNumPlacementArgs())
+ for (CXXNewExpr::const_arg_iterator I = NE->placement_arg_begin(),
+ E = NE->placement_arg_end(); I != E; ++I)
+ if (SymbolRef Sym = C.getSVal(*I).getAsSymbol())
+ checkUseAfterFree(Sym, C, *I);
+
+ if (!isStandardNewDelete(NE->getOperatorNew(), C.getASTContext()))
+ return;
+
+ ParentMap &PM = C.getLocationContext()->getParentMap();
+ if (!PM.isConsumedExpr(NE) && treatUnusedNewEscaped(NE))
+ return;
+
+ ProgramStateRef State = C.getState();
+ // The return value from operator new is bound to a specified initialization
+ // value (if any) and we don't want to loose this value. So we call
+ // MallocUpdateRefState() instead of MallocMemAux() which breakes the
+ // existing binding.
+ State = MallocUpdateRefState(C, NE, State, NE->isArray() ? AF_CXXNewArray
+ : AF_CXXNew);
+ State = ProcessZeroAllocation(C, NE, 0, State);
+ C.addTransition(State);
+}
+
+void MallocChecker::checkPreStmt(const CXXDeleteExpr *DE,
+ CheckerContext &C) const {
+
+ if (!ChecksEnabled[CK_NewDeleteChecker])
+ if (SymbolRef Sym = C.getSVal(DE->getArgument()).getAsSymbol())
+ checkUseAfterFree(Sym, C, DE->getArgument());
+
+ if (!isStandardNewDelete(DE->getOperatorDelete(), C.getASTContext()))
+ return;
+
+ ProgramStateRef State = C.getState();
+ bool ReleasedAllocated;
+ State = FreeMemAux(C, DE->getArgument(), DE, State,
+ /*Hold*/false, ReleasedAllocated);
+
+ C.addTransition(State);
+}
+
+static bool isKnownDeallocObjCMethodName(const ObjCMethodCall &Call) {
+ // If the first selector piece is one of the names below, assume that the
+ // object takes ownership of the memory, promising to eventually deallocate it
+ // with free().
+ // Ex: [NSData dataWithBytesNoCopy:bytes length:10];
+ // (...unless a 'freeWhenDone' parameter is false, but that's checked later.)
+ StringRef FirstSlot = Call.getSelector().getNameForSlot(0);
+ return FirstSlot == "dataWithBytesNoCopy" ||
+ FirstSlot == "initWithBytesNoCopy" ||
+ FirstSlot == "initWithCharactersNoCopy";
+}
+
+static Optional<bool> getFreeWhenDoneArg(const ObjCMethodCall &Call) {
+ Selector S = Call.getSelector();
+
+ // FIXME: We should not rely on fully-constrained symbols being folded.
+ for (unsigned i = 1; i < S.getNumArgs(); ++i)
+ if (S.getNameForSlot(i).equals("freeWhenDone"))
+ return !Call.getArgSVal(i).isZeroConstant();
+
+ return None;
+}
+
+void MallocChecker::checkPostObjCMessage(const ObjCMethodCall &Call,
+ CheckerContext &C) const {
+ if (C.wasInlined)
+ return;
+
+ if (!isKnownDeallocObjCMethodName(Call))
+ return;
+
+ if (Optional<bool> FreeWhenDone = getFreeWhenDoneArg(Call))
+ if (!*FreeWhenDone)
+ return;
+
+ bool ReleasedAllocatedMemory;
+ ProgramStateRef State = FreeMemAux(C, Call.getArgExpr(0),
+ Call.getOriginExpr(), C.getState(),
+ /*Hold=*/true, ReleasedAllocatedMemory,
+ /*RetNullOnFailure=*/true);
+
+ C.addTransition(State);
+}
+
+ProgramStateRef
+MallocChecker::MallocMemReturnsAttr(CheckerContext &C, const CallExpr *CE,
+ const OwnershipAttr *Att,
+ ProgramStateRef State) const {
+ if (!State)
+ return nullptr;
+
+ if (Att->getModule() != II_malloc)
+ return nullptr;
+
+ OwnershipAttr::args_iterator I = Att->args_begin(), E = Att->args_end();
+ if (I != E) {
+ return MallocMemAux(C, CE, CE->getArg(*I), UndefinedVal(), State);
+ }
+ return MallocMemAux(C, CE, UnknownVal(), UndefinedVal(), State);
+}
+
+ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
+ const CallExpr *CE,
+ const Expr *SizeEx, SVal Init,
+ ProgramStateRef State,
+ AllocationFamily Family) {
+ if (!State)
+ return nullptr;
+
+ return MallocMemAux(C, CE, State->getSVal(SizeEx, C.getLocationContext()),
+ Init, State, Family);
+}
+
+ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
+ const CallExpr *CE,
+ SVal Size, SVal Init,
+ ProgramStateRef State,
+ AllocationFamily Family) {
+ if (!State)
+ return nullptr;
+
+ // We expect the malloc functions to return a pointer.
+ if (!Loc::isLocType(CE->getType()))
+ return nullptr;
+
+ // Bind the return value to the symbolic value from the heap region.
+ // TODO: We could rewrite post visit to eval call; 'malloc' does not have
+ // side effects other than what we model here.
+ unsigned Count = C.blockCount();
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ const LocationContext *LCtx = C.getPredecessor()->getLocationContext();
+ DefinedSVal RetVal = svalBuilder.getConjuredHeapSymbolVal(CE, LCtx, Count)
+ .castAs<DefinedSVal>();
+ State = State->BindExpr(CE, C.getLocationContext(), RetVal);
+
+ // Fill the region with the initialization value.
+ State = State->bindDefault(RetVal, Init);
+
+ // Set the region's extent equal to the Size parameter.
+ const SymbolicRegion *R =
+ dyn_cast_or_null<SymbolicRegion>(RetVal.getAsRegion());
+ if (!R)
+ return nullptr;
+ if (Optional<DefinedOrUnknownSVal> DefinedSize =
+ Size.getAs<DefinedOrUnknownSVal>()) {
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ DefinedOrUnknownSVal Extent = R->getExtent(svalBuilder);
+ DefinedOrUnknownSVal extentMatchesSize =
+ svalBuilder.evalEQ(State, Extent, *DefinedSize);
+
+ State = State->assume(extentMatchesSize, true);
+ assert(State);
+ }
+
+ return MallocUpdateRefState(C, CE, State, Family);
+}
+
+ProgramStateRef MallocChecker::MallocUpdateRefState(CheckerContext &C,
+ const Expr *E,
+ ProgramStateRef State,
+ AllocationFamily Family) {
+ if (!State)
+ return nullptr;
+
+ // Get the return value.
+ SVal retVal = State->getSVal(E, C.getLocationContext());
+
+ // We expect the malloc functions to return a pointer.
+ if (!retVal.getAs<Loc>())
+ return nullptr;
+
+ SymbolRef Sym = retVal.getAsLocSymbol();
+ assert(Sym);
+
+ // Set the symbol's state to Allocated.
+ return State->set<RegionState>(Sym, RefState::getAllocated(Family, E));
+}
+
+ProgramStateRef MallocChecker::FreeMemAttr(CheckerContext &C,
+ const CallExpr *CE,
+ const OwnershipAttr *Att,
+ ProgramStateRef State) const {
+ if (!State)
+ return nullptr;
+
+ if (Att->getModule() != II_malloc)
+ return nullptr;
+
+ bool ReleasedAllocated = false;
+
+ for (const auto &Arg : Att->args()) {
+ ProgramStateRef StateI = FreeMemAux(C, CE, State, Arg,
+ Att->getOwnKind() == OwnershipAttr::Holds,
+ ReleasedAllocated);
+ if (StateI)
+ State = StateI;
+ }
+ return State;
+}
+
+ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
+ const CallExpr *CE,
+ ProgramStateRef State,
+ unsigned Num,
+ bool Hold,
+ bool &ReleasedAllocated,
+ bool ReturnsNullOnFailure) const {
+ if (!State)
+ return nullptr;
+
+ if (CE->getNumArgs() < (Num + 1))
+ return nullptr;
+
+ return FreeMemAux(C, CE->getArg(Num), CE, State, Hold,
+ ReleasedAllocated, ReturnsNullOnFailure);
+}
+
+/// Checks if the previous call to free on the given symbol failed - if free
+/// failed, returns true. Also, returns the corresponding return value symbol.
+static bool didPreviousFreeFail(ProgramStateRef State,
+ SymbolRef Sym, SymbolRef &RetStatusSymbol) {
+ const SymbolRef *Ret = State->get<FreeReturnValue>(Sym);
+ if (Ret) {
+ assert(*Ret && "We should not store the null return symbol");
+ ConstraintManager &CMgr = State->getConstraintManager();
+ ConditionTruthVal FreeFailed = CMgr.isNull(State, *Ret);
+ RetStatusSymbol = *Ret;
+ return FreeFailed.isConstrainedTrue();
+ }
+ return false;
+}
+
+AllocationFamily MallocChecker::getAllocationFamily(CheckerContext &C,
+ const Stmt *S) const {
+ if (!S)
+ return AF_None;
+
+ if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+
+ if (!FD)
+ FD = dyn_cast<FunctionDecl>(CE->getCalleeDecl());
+
+ ASTContext &Ctx = C.getASTContext();
+
+ if (isCMemFunction(FD, Ctx, AF_Malloc, MemoryOperationKind::MOK_Any))
+ return AF_Malloc;
+
+ if (isStandardNewDelete(FD, Ctx)) {
+ OverloadedOperatorKind Kind = FD->getOverloadedOperator();
+ if (Kind == OO_New || Kind == OO_Delete)
+ return AF_CXXNew;
+ else if (Kind == OO_Array_New || Kind == OO_Array_Delete)
+ return AF_CXXNewArray;
+ }
+
+ if (isCMemFunction(FD, Ctx, AF_IfNameIndex, MemoryOperationKind::MOK_Any))
+ return AF_IfNameIndex;
+
+ if (isCMemFunction(FD, Ctx, AF_Alloca, MemoryOperationKind::MOK_Any))
+ return AF_Alloca;
+
+ return AF_None;
+ }
+
+ if (const CXXNewExpr *NE = dyn_cast<CXXNewExpr>(S))
+ return NE->isArray() ? AF_CXXNewArray : AF_CXXNew;
+
+ if (const CXXDeleteExpr *DE = dyn_cast<CXXDeleteExpr>(S))
+ return DE->isArrayForm() ? AF_CXXNewArray : AF_CXXNew;
+
+ if (isa<ObjCMessageExpr>(S))
+ return AF_Malloc;
+
+ return AF_None;
+}
+
+bool MallocChecker::printAllocDeallocName(raw_ostream &os, CheckerContext &C,
+ const Expr *E) const {
+ if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
+ // FIXME: This doesn't handle indirect calls.
+ const FunctionDecl *FD = CE->getDirectCallee();
+ if (!FD)
+ return false;
+
+ os << *FD;
+ if (!FD->isOverloadedOperator())
+ os << "()";
+ return true;
+ }
+
+ if (const ObjCMessageExpr *Msg = dyn_cast<ObjCMessageExpr>(E)) {
+ if (Msg->isInstanceMessage())
+ os << "-";
+ else
+ os << "+";
+ Msg->getSelector().print(os);
+ return true;
+ }
+
+ if (const CXXNewExpr *NE = dyn_cast<CXXNewExpr>(E)) {
+ os << "'"
+ << getOperatorSpelling(NE->getOperatorNew()->getOverloadedOperator())
+ << "'";
+ return true;
+ }
+
+ if (const CXXDeleteExpr *DE = dyn_cast<CXXDeleteExpr>(E)) {
+ os << "'"
+ << getOperatorSpelling(DE->getOperatorDelete()->getOverloadedOperator())
+ << "'";
+ return true;
+ }
+
+ return false;
+}
+
+void MallocChecker::printExpectedAllocName(raw_ostream &os, CheckerContext &C,
+ const Expr *E) const {
+ AllocationFamily Family = getAllocationFamily(C, E);
+
+ switch(Family) {
+ case AF_Malloc: os << "malloc()"; return;
+ case AF_CXXNew: os << "'new'"; return;
+ case AF_CXXNewArray: os << "'new[]'"; return;
+ case AF_IfNameIndex: os << "'if_nameindex()'"; return;
+ case AF_Alloca:
+ case AF_None: llvm_unreachable("not a deallocation expression");
+ }
+}
+
+void MallocChecker::printExpectedDeallocName(raw_ostream &os,
+ AllocationFamily Family) const {
+ switch(Family) {
+ case AF_Malloc: os << "free()"; return;
+ case AF_CXXNew: os << "'delete'"; return;
+ case AF_CXXNewArray: os << "'delete[]'"; return;
+ case AF_IfNameIndex: os << "'if_freenameindex()'"; return;
+ case AF_Alloca:
+ case AF_None: llvm_unreachable("suspicious argument");
+ }
+}
+
+ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
+ const Expr *ArgExpr,
+ const Expr *ParentExpr,
+ ProgramStateRef State,
+ bool Hold,
+ bool &ReleasedAllocated,
+ bool ReturnsNullOnFailure) const {
+
+ if (!State)
+ return nullptr;
+
+ SVal ArgVal = State->getSVal(ArgExpr, C.getLocationContext());
+ if (!ArgVal.getAs<DefinedOrUnknownSVal>())
+ return nullptr;
+ DefinedOrUnknownSVal location = ArgVal.castAs<DefinedOrUnknownSVal>();
+
+ // Check for null dereferences.
+ if (!location.getAs<Loc>())
+ return nullptr;
+
+ // The explicit NULL case, no operation is performed.
+ ProgramStateRef notNullState, nullState;
+ std::tie(notNullState, nullState) = State->assume(location);
+ if (nullState && !notNullState)
+ return nullptr;
+
+ // Unknown values could easily be okay
+ // Undefined values are handled elsewhere
+ if (ArgVal.isUnknownOrUndef())
+ return nullptr;
+
+ const MemRegion *R = ArgVal.getAsRegion();
+
+ // Nonlocs can't be freed, of course.
+ // Non-region locations (labels and fixed addresses) also shouldn't be freed.
+ if (!R) {
+ ReportBadFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr);
+ return nullptr;
+ }
+
+ R = R->StripCasts();
+
+ // Blocks might show up as heap data, but should not be free()d
+ if (isa<BlockDataRegion>(R)) {
+ ReportBadFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr);
+ return nullptr;
+ }
+
+ const MemSpaceRegion *MS = R->getMemorySpace();
+
+ // Parameters, locals, statics, globals, and memory returned by
+ // __builtin_alloca() shouldn't be freed.
+ if (!(isa<UnknownSpaceRegion>(MS) || isa<HeapSpaceRegion>(MS))) {
+ // FIXME: at the time this code was written, malloc() regions were
+ // represented by conjured symbols, which are all in UnknownSpaceRegion.
+ // This means that there isn't actually anything from HeapSpaceRegion
+ // that should be freed, even though we allow it here.
+ // Of course, free() can work on memory allocated outside the current
+ // function, so UnknownSpaceRegion is always a possibility.
+ // False negatives are better than false positives.
+
+ if (isa<AllocaRegion>(R))
+ ReportFreeAlloca(C, ArgVal, ArgExpr->getSourceRange());
+ else
+ ReportBadFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr);
+
+ return nullptr;
+ }
+
+ const SymbolicRegion *SrBase = dyn_cast<SymbolicRegion>(R->getBaseRegion());
+ // Various cases could lead to non-symbol values here.
+ // For now, ignore them.
+ if (!SrBase)
+ return nullptr;
+
+ SymbolRef SymBase = SrBase->getSymbol();
+ const RefState *RsBase = State->get<RegionState>(SymBase);
+ SymbolRef PreviousRetStatusSymbol = nullptr;
+
+ if (RsBase) {
+
+ // Memory returned by alloca() shouldn't be freed.
+ if (RsBase->getAllocationFamily() == AF_Alloca) {
+ ReportFreeAlloca(C, ArgVal, ArgExpr->getSourceRange());
+ return nullptr;
+ }
+
+ // Check for double free first.
+ if ((RsBase->isReleased() || RsBase->isRelinquished()) &&
+ !didPreviousFreeFail(State, SymBase, PreviousRetStatusSymbol)) {
+ ReportDoubleFree(C, ParentExpr->getSourceRange(), RsBase->isReleased(),
+ SymBase, PreviousRetStatusSymbol);
+ return nullptr;
+
+ // If the pointer is allocated or escaped, but we are now trying to free it,
+ // check that the call to free is proper.
+ } else if (RsBase->isAllocated() || RsBase->isAllocatedOfSizeZero() ||
+ RsBase->isEscaped()) {
+
+ // Check if an expected deallocation function matches the real one.
+ bool DeallocMatchesAlloc =
+ RsBase->getAllocationFamily() == getAllocationFamily(C, ParentExpr);
+ if (!DeallocMatchesAlloc) {
+ ReportMismatchedDealloc(C, ArgExpr->getSourceRange(),
+ ParentExpr, RsBase, SymBase, Hold);
+ return nullptr;
+ }
+
+ // Check if the memory location being freed is the actual location
+ // allocated, or an offset.
+ RegionOffset Offset = R->getAsOffset();
+ if (Offset.isValid() &&
+ !Offset.hasSymbolicOffset() &&
+ Offset.getOffset() != 0) {
+ const Expr *AllocExpr = cast<Expr>(RsBase->getStmt());
+ ReportOffsetFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr,
+ AllocExpr);
+ return nullptr;
+ }
+ }
+ }
+
+ ReleasedAllocated = (RsBase != nullptr) && (RsBase->isAllocated() ||
+ RsBase->isAllocatedOfSizeZero());
+
+ // Clean out the info on previous call to free return info.
+ State = State->remove<FreeReturnValue>(SymBase);
+
+ // Keep track of the return value. If it is NULL, we will know that free
+ // failed.
+ if (ReturnsNullOnFailure) {
+ SVal RetVal = C.getSVal(ParentExpr);
+ SymbolRef RetStatusSymbol = RetVal.getAsSymbol();
+ if (RetStatusSymbol) {
+ C.getSymbolManager().addSymbolDependency(SymBase, RetStatusSymbol);
+ State = State->set<FreeReturnValue>(SymBase, RetStatusSymbol);
+ }
+ }
+
+ AllocationFamily Family = RsBase ? RsBase->getAllocationFamily()
+ : getAllocationFamily(C, ParentExpr);
+ // Normal free.
+ if (Hold)
+ return State->set<RegionState>(SymBase,
+ RefState::getRelinquished(Family,
+ ParentExpr));
+
+ return State->set<RegionState>(SymBase,
+ RefState::getReleased(Family, ParentExpr));
+}
+
+Optional<MallocChecker::CheckKind>
+MallocChecker::getCheckIfTracked(AllocationFamily Family,
+ bool IsALeakCheck) const {
+ switch (Family) {
+ case AF_Malloc:
+ case AF_Alloca:
+ case AF_IfNameIndex: {
+ if (ChecksEnabled[CK_MallocChecker])
+ return CK_MallocChecker;
+
+ return Optional<MallocChecker::CheckKind>();
+ }
+ case AF_CXXNew:
+ case AF_CXXNewArray: {
+ if (IsALeakCheck) {
+ if (ChecksEnabled[CK_NewDeleteLeaksChecker])
+ return CK_NewDeleteLeaksChecker;
+ }
+ else {
+ if (ChecksEnabled[CK_NewDeleteChecker])
+ return CK_NewDeleteChecker;
+ }
+ return Optional<MallocChecker::CheckKind>();
+ }
+ case AF_None: {
+ llvm_unreachable("no family");
+ }
+ }
+ llvm_unreachable("unhandled family");
+}
+
+Optional<MallocChecker::CheckKind>
+MallocChecker::getCheckIfTracked(CheckerContext &C,
+ const Stmt *AllocDeallocStmt,
+ bool IsALeakCheck) const {
+ return getCheckIfTracked(getAllocationFamily(C, AllocDeallocStmt),
+ IsALeakCheck);
+}
+
+Optional<MallocChecker::CheckKind>
+MallocChecker::getCheckIfTracked(CheckerContext &C, SymbolRef Sym,
+ bool IsALeakCheck) const {
+ if (C.getState()->contains<ReallocSizeZeroSymbols>(Sym))
+ return CK_MallocChecker;
+
+ const RefState *RS = C.getState()->get<RegionState>(Sym);
+ assert(RS);
+ return getCheckIfTracked(RS->getAllocationFamily(), IsALeakCheck);
+}
+
+bool MallocChecker::SummarizeValue(raw_ostream &os, SVal V) {
+ if (Optional<nonloc::ConcreteInt> IntVal = V.getAs<nonloc::ConcreteInt>())
+ os << "an integer (" << IntVal->getValue() << ")";
+ else if (Optional<loc::ConcreteInt> ConstAddr = V.getAs<loc::ConcreteInt>())
+ os << "a constant address (" << ConstAddr->getValue() << ")";
+ else if (Optional<loc::GotoLabel> Label = V.getAs<loc::GotoLabel>())
+ os << "the address of the label '" << Label->getLabel()->getName() << "'";
+ else
+ return false;
+
+ return true;
+}
+
+bool MallocChecker::SummarizeRegion(raw_ostream &os,
+ const MemRegion *MR) {
+ switch (MR->getKind()) {
+ case MemRegion::FunctionTextRegionKind: {
+ const NamedDecl *FD = cast<FunctionTextRegion>(MR)->getDecl();
+ if (FD)
+ os << "the address of the function '" << *FD << '\'';
+ else
+ os << "the address of a function";
+ return true;
+ }
+ case MemRegion::BlockTextRegionKind:
+ os << "block text";
+ return true;
+ case MemRegion::BlockDataRegionKind:
+ // FIXME: where the block came from?
+ os << "a block";
+ return true;
+ default: {
+ const MemSpaceRegion *MS = MR->getMemorySpace();
+
+ if (isa<StackLocalsSpaceRegion>(MS)) {
+ const VarRegion *VR = dyn_cast<VarRegion>(MR);
+ const VarDecl *VD;
+ if (VR)
+ VD = VR->getDecl();
+ else
+ VD = nullptr;
+
+ if (VD)
+ os << "the address of the local variable '" << VD->getName() << "'";
+ else
+ os << "the address of a local stack variable";
+ return true;
+ }
+
+ if (isa<StackArgumentsSpaceRegion>(MS)) {
+ const VarRegion *VR = dyn_cast<VarRegion>(MR);
+ const VarDecl *VD;
+ if (VR)
+ VD = VR->getDecl();
+ else
+ VD = nullptr;
+
+ if (VD)
+ os << "the address of the parameter '" << VD->getName() << "'";
+ else
+ os << "the address of a parameter";
+ return true;
+ }
+
+ if (isa<GlobalsSpaceRegion>(MS)) {
+ const VarRegion *VR = dyn_cast<VarRegion>(MR);
+ const VarDecl *VD;
+ if (VR)
+ VD = VR->getDecl();
+ else
+ VD = nullptr;
+
+ if (VD) {
+ if (VD->isStaticLocal())
+ os << "the address of the static variable '" << VD->getName() << "'";
+ else
+ os << "the address of the global variable '" << VD->getName() << "'";
+ } else
+ os << "the address of a global variable";
+ return true;
+ }
+
+ return false;
+ }
+ }
+}
+
+void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal,
+ SourceRange Range,
+ const Expr *DeallocExpr) const {
+
+ if (!ChecksEnabled[CK_MallocChecker] &&
+ !ChecksEnabled[CK_NewDeleteChecker])
+ return;
+
+ Optional<MallocChecker::CheckKind> CheckKind =
+ getCheckIfTracked(C, DeallocExpr);
+ if (!CheckKind.hasValue())
+ return;
+
+ if (ExplodedNode *N = C.generateErrorNode()) {
+ if (!BT_BadFree[*CheckKind])
+ BT_BadFree[*CheckKind].reset(
+ new BugType(CheckNames[*CheckKind], "Bad free", "Memory Error"));
+
+ SmallString<100> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ const MemRegion *MR = ArgVal.getAsRegion();
+ while (const ElementRegion *ER = dyn_cast_or_null<ElementRegion>(MR))
+ MR = ER->getSuperRegion();
+
+ os << "Argument to ";
+ if (!printAllocDeallocName(os, C, DeallocExpr))
+ os << "deallocator";
+
+ os << " is ";
+ bool Summarized = MR ? SummarizeRegion(os, MR)
+ : SummarizeValue(os, ArgVal);
+ if (Summarized)
+ os << ", which is not memory allocated by ";
+ else
+ os << "not memory allocated by ";
+
+ printExpectedAllocName(os, C, DeallocExpr);
+
+ auto R = llvm::make_unique<BugReport>(*BT_BadFree[*CheckKind], os.str(), N);
+ R->markInteresting(MR);
+ R->addRange(Range);
+ C.emitReport(std::move(R));
+ }
+}
+
+void MallocChecker::ReportFreeAlloca(CheckerContext &C, SVal ArgVal,
+ SourceRange Range) const {
+
+ Optional<MallocChecker::CheckKind> CheckKind;
+
+ if (ChecksEnabled[CK_MallocChecker])
+ CheckKind = CK_MallocChecker;
+ else if (ChecksEnabled[CK_MismatchedDeallocatorChecker])
+ CheckKind = CK_MismatchedDeallocatorChecker;
+ else
+ return;
+
+ if (ExplodedNode *N = C.generateErrorNode()) {
+ if (!BT_FreeAlloca[*CheckKind])
+ BT_FreeAlloca[*CheckKind].reset(
+ new BugType(CheckNames[*CheckKind], "Free alloca()", "Memory Error"));
+
+ auto R = llvm::make_unique<BugReport>(
+ *BT_FreeAlloca[*CheckKind],
+ "Memory allocated by alloca() should not be deallocated", N);
+ R->markInteresting(ArgVal.getAsRegion());
+ R->addRange(Range);
+ C.emitReport(std::move(R));
+ }
+}
+
+void MallocChecker::ReportMismatchedDealloc(CheckerContext &C,
+ SourceRange Range,
+ const Expr *DeallocExpr,
+ const RefState *RS,
+ SymbolRef Sym,
+ bool OwnershipTransferred) const {
+
+ if (!ChecksEnabled[CK_MismatchedDeallocatorChecker])
+ return;
+
+ if (ExplodedNode *N = C.generateErrorNode()) {
+ if (!BT_MismatchedDealloc)
+ BT_MismatchedDealloc.reset(
+ new BugType(CheckNames[CK_MismatchedDeallocatorChecker],
+ "Bad deallocator", "Memory Error"));
+
+ SmallString<100> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ const Expr *AllocExpr = cast<Expr>(RS->getStmt());
+ SmallString<20> AllocBuf;
+ llvm::raw_svector_ostream AllocOs(AllocBuf);
+ SmallString<20> DeallocBuf;
+ llvm::raw_svector_ostream DeallocOs(DeallocBuf);
+
+ if (OwnershipTransferred) {
+ if (printAllocDeallocName(DeallocOs, C, DeallocExpr))
+ os << DeallocOs.str() << " cannot";
+ else
+ os << "Cannot";
+
+ os << " take ownership of memory";
+
+ if (printAllocDeallocName(AllocOs, C, AllocExpr))
+ os << " allocated by " << AllocOs.str();
+ } else {
+ os << "Memory";
+ if (printAllocDeallocName(AllocOs, C, AllocExpr))
+ os << " allocated by " << AllocOs.str();
+
+ os << " should be deallocated by ";
+ printExpectedDeallocName(os, RS->getAllocationFamily());
+
+ if (printAllocDeallocName(DeallocOs, C, DeallocExpr))
+ os << ", not " << DeallocOs.str();
+ }
+
+ auto R = llvm::make_unique<BugReport>(*BT_MismatchedDealloc, os.str(), N);
+ R->markInteresting(Sym);
+ R->addRange(Range);
+ R->addVisitor(llvm::make_unique<MallocBugVisitor>(Sym));
+ C.emitReport(std::move(R));
+ }
+}
+
+void MallocChecker::ReportOffsetFree(CheckerContext &C, SVal ArgVal,
+ SourceRange Range, const Expr *DeallocExpr,
+ const Expr *AllocExpr) const {
+
+
+ if (!ChecksEnabled[CK_MallocChecker] &&
+ !ChecksEnabled[CK_NewDeleteChecker])
+ return;
+
+ Optional<MallocChecker::CheckKind> CheckKind =
+ getCheckIfTracked(C, AllocExpr);
+ if (!CheckKind.hasValue())
+ return;
+
+ ExplodedNode *N = C.generateErrorNode();
+ if (!N)
+ return;
+
+ if (!BT_OffsetFree[*CheckKind])
+ BT_OffsetFree[*CheckKind].reset(
+ new BugType(CheckNames[*CheckKind], "Offset free", "Memory Error"));
+
+ SmallString<100> buf;
+ llvm::raw_svector_ostream os(buf);
+ SmallString<20> AllocNameBuf;
+ llvm::raw_svector_ostream AllocNameOs(AllocNameBuf);
+
+ const MemRegion *MR = ArgVal.getAsRegion();
+ assert(MR && "Only MemRegion based symbols can have offset free errors");
+
+ RegionOffset Offset = MR->getAsOffset();
+ assert((Offset.isValid() &&
+ !Offset.hasSymbolicOffset() &&
+ Offset.getOffset() != 0) &&
+ "Only symbols with a valid offset can have offset free errors");
+
+ int offsetBytes = Offset.getOffset() / C.getASTContext().getCharWidth();
+
+ os << "Argument to ";
+ if (!printAllocDeallocName(os, C, DeallocExpr))
+ os << "deallocator";
+ os << " is offset by "
+ << offsetBytes
+ << " "
+ << ((abs(offsetBytes) > 1) ? "bytes" : "byte")
+ << " from the start of ";
+ if (AllocExpr && printAllocDeallocName(AllocNameOs, C, AllocExpr))
+ os << "memory allocated by " << AllocNameOs.str();
+ else
+ os << "allocated memory";
+
+ auto R = llvm::make_unique<BugReport>(*BT_OffsetFree[*CheckKind], os.str(), N);
+ R->markInteresting(MR->getBaseRegion());
+ R->addRange(Range);
+ C.emitReport(std::move(R));
+}
+
+void MallocChecker::ReportUseAfterFree(CheckerContext &C, SourceRange Range,
+ SymbolRef Sym) const {
+
+ if (!ChecksEnabled[CK_MallocChecker] &&
+ !ChecksEnabled[CK_NewDeleteChecker])
+ return;
+
+ Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
+ if (!CheckKind.hasValue())
+ return;
+
+ if (ExplodedNode *N = C.generateErrorNode()) {
+ if (!BT_UseFree[*CheckKind])
+ BT_UseFree[*CheckKind].reset(new BugType(
+ CheckNames[*CheckKind], "Use-after-free", "Memory Error"));
+
+ auto R = llvm::make_unique<BugReport>(*BT_UseFree[*CheckKind],
+ "Use of memory after it is freed", N);
+
+ R->markInteresting(Sym);
+ R->addRange(Range);
+ R->addVisitor(llvm::make_unique<MallocBugVisitor>(Sym));
+ C.emitReport(std::move(R));
+ }
+}
+
+void MallocChecker::ReportDoubleFree(CheckerContext &C, SourceRange Range,
+ bool Released, SymbolRef Sym,
+ SymbolRef PrevSym) const {
+
+ if (!ChecksEnabled[CK_MallocChecker] &&
+ !ChecksEnabled[CK_NewDeleteChecker])
+ return;
+
+ Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
+ if (!CheckKind.hasValue())
+ return;
+
+ if (ExplodedNode *N = C.generateErrorNode()) {
+ if (!BT_DoubleFree[*CheckKind])
+ BT_DoubleFree[*CheckKind].reset(
+ new BugType(CheckNames[*CheckKind], "Double free", "Memory Error"));
+
+ auto R = llvm::make_unique<BugReport>(
+ *BT_DoubleFree[*CheckKind],
+ (Released ? "Attempt to free released memory"
+ : "Attempt to free non-owned memory"),
+ N);
+ R->addRange(Range);
+ R->markInteresting(Sym);
+ if (PrevSym)
+ R->markInteresting(PrevSym);
+ R->addVisitor(llvm::make_unique<MallocBugVisitor>(Sym));
+ C.emitReport(std::move(R));
+ }
+}
+
+void MallocChecker::ReportDoubleDelete(CheckerContext &C, SymbolRef Sym) const {
+
+ if (!ChecksEnabled[CK_NewDeleteChecker])
+ return;
+
+ Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
+ if (!CheckKind.hasValue())
+ return;
+
+ if (ExplodedNode *N = C.generateErrorNode()) {
+ if (!BT_DoubleDelete)
+ BT_DoubleDelete.reset(new BugType(CheckNames[CK_NewDeleteChecker],
+ "Double delete", "Memory Error"));
+
+ auto R = llvm::make_unique<BugReport>(
+ *BT_DoubleDelete, "Attempt to delete released memory", N);
+
+ R->markInteresting(Sym);
+ R->addVisitor(llvm::make_unique<MallocBugVisitor>(Sym));
+ C.emitReport(std::move(R));
+ }
+}
+
+void MallocChecker::ReportUseZeroAllocated(CheckerContext &C,
+ SourceRange Range,
+ SymbolRef Sym) const {
+
+ if (!ChecksEnabled[CK_MallocChecker] &&
+ !ChecksEnabled[CK_NewDeleteChecker])
+ return;
+
+ Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
+
+ if (!CheckKind.hasValue())
+ return;
+
+ if (ExplodedNode *N = C.generateErrorNode()) {
+ if (!BT_UseZerroAllocated[*CheckKind])
+ BT_UseZerroAllocated[*CheckKind].reset(new BugType(
+ CheckNames[*CheckKind], "Use of zero allocated", "Memory Error"));
+
+ auto R = llvm::make_unique<BugReport>(*BT_UseZerroAllocated[*CheckKind],
+ "Use of zero-allocated memory", N);
+
+ R->addRange(Range);
+ if (Sym) {
+ R->markInteresting(Sym);
+ R->addVisitor(llvm::make_unique<MallocBugVisitor>(Sym));
+ }
+ C.emitReport(std::move(R));
+ }
+}
+
+ProgramStateRef MallocChecker::ReallocMem(CheckerContext &C,
+ const CallExpr *CE,
+ bool FreesOnFail,
+ ProgramStateRef State) const {
+ if (!State)
+ return nullptr;
+
+ if (CE->getNumArgs() < 2)
+ return nullptr;
+
+ const Expr *arg0Expr = CE->getArg(0);
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal Arg0Val = State->getSVal(arg0Expr, LCtx);
+ if (!Arg0Val.getAs<DefinedOrUnknownSVal>())
+ return nullptr;
+ DefinedOrUnknownSVal arg0Val = Arg0Val.castAs<DefinedOrUnknownSVal>();
+
+ SValBuilder &svalBuilder = C.getSValBuilder();
+
+ DefinedOrUnknownSVal PtrEQ =
+ svalBuilder.evalEQ(State, arg0Val, svalBuilder.makeNull());
+
+ // Get the size argument. If there is no size arg then give up.
+ const Expr *Arg1 = CE->getArg(1);
+ if (!Arg1)
+ return nullptr;
+
+ // Get the value of the size argument.
+ SVal Arg1ValG = State->getSVal(Arg1, LCtx);
+ if (!Arg1ValG.getAs<DefinedOrUnknownSVal>())
+ return nullptr;
+ DefinedOrUnknownSVal Arg1Val = Arg1ValG.castAs<DefinedOrUnknownSVal>();
+
+ // Compare the size argument to 0.
+ DefinedOrUnknownSVal SizeZero =
+ svalBuilder.evalEQ(State, Arg1Val,
+ svalBuilder.makeIntValWithPtrWidth(0, false));
+
+ ProgramStateRef StatePtrIsNull, StatePtrNotNull;
+ std::tie(StatePtrIsNull, StatePtrNotNull) = State->assume(PtrEQ);
+ ProgramStateRef StateSizeIsZero, StateSizeNotZero;
+ std::tie(StateSizeIsZero, StateSizeNotZero) = State->assume(SizeZero);
+ // We only assume exceptional states if they are definitely true; if the
+ // state is under-constrained, assume regular realloc behavior.
+ bool PrtIsNull = StatePtrIsNull && !StatePtrNotNull;
+ bool SizeIsZero = StateSizeIsZero && !StateSizeNotZero;
+
+ // If the ptr is NULL and the size is not 0, the call is equivalent to
+ // malloc(size).
+ if ( PrtIsNull && !SizeIsZero) {
+ ProgramStateRef stateMalloc = MallocMemAux(C, CE, CE->getArg(1),
+ UndefinedVal(), StatePtrIsNull);
+ return stateMalloc;
+ }
+
+ if (PrtIsNull && SizeIsZero)
+ return State;
+
+ // Get the from and to pointer symbols as in toPtr = realloc(fromPtr, size).
+ assert(!PrtIsNull);
+ SymbolRef FromPtr = arg0Val.getAsSymbol();
+ SVal RetVal = State->getSVal(CE, LCtx);
+ SymbolRef ToPtr = RetVal.getAsSymbol();
+ if (!FromPtr || !ToPtr)
+ return nullptr;
+
+ bool ReleasedAllocated = false;
+
+ // If the size is 0, free the memory.
+ if (SizeIsZero)
+ if (ProgramStateRef stateFree = FreeMemAux(C, CE, StateSizeIsZero, 0,
+ false, ReleasedAllocated)){
+ // The semantics of the return value are:
+ // If size was equal to 0, either NULL or a pointer suitable to be passed
+ // to free() is returned. We just free the input pointer and do not add
+ // any constrains on the output pointer.
+ return stateFree;
+ }
+
+ // Default behavior.
+ if (ProgramStateRef stateFree =
+ FreeMemAux(C, CE, State, 0, false, ReleasedAllocated)) {
+
+ ProgramStateRef stateRealloc = MallocMemAux(C, CE, CE->getArg(1),
+ UnknownVal(), stateFree);
+ if (!stateRealloc)
+ return nullptr;
+
+ ReallocPairKind Kind = RPToBeFreedAfterFailure;
+ if (FreesOnFail)
+ Kind = RPIsFreeOnFailure;
+ else if (!ReleasedAllocated)
+ Kind = RPDoNotTrackAfterFailure;
+
+ // Record the info about the reallocated symbol so that we could properly
+ // process failed reallocation.
+ stateRealloc = stateRealloc->set<ReallocPairs>(ToPtr,
+ ReallocPair(FromPtr, Kind));
+ // The reallocated symbol should stay alive for as long as the new symbol.
+ C.getSymbolManager().addSymbolDependency(ToPtr, FromPtr);
+ return stateRealloc;
+ }
+ return nullptr;
+}
+
+ProgramStateRef MallocChecker::CallocMem(CheckerContext &C, const CallExpr *CE,
+ ProgramStateRef State) {
+ if (!State)
+ return nullptr;
+
+ if (CE->getNumArgs() < 2)
+ return nullptr;
+
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal count = State->getSVal(CE->getArg(0), LCtx);
+ SVal elementSize = State->getSVal(CE->getArg(1), LCtx);
+ SVal TotalSize = svalBuilder.evalBinOp(State, BO_Mul, count, elementSize,
+ svalBuilder.getContext().getSizeType());
+ SVal zeroVal = svalBuilder.makeZeroVal(svalBuilder.getContext().CharTy);
+
+ return MallocMemAux(C, CE, TotalSize, zeroVal, State);
+}
+
+LeakInfo
+MallocChecker::getAllocationSite(const ExplodedNode *N, SymbolRef Sym,
+ CheckerContext &C) const {
+ const LocationContext *LeakContext = N->getLocationContext();
+ // Walk the ExplodedGraph backwards and find the first node that referred to
+ // the tracked symbol.
+ const ExplodedNode *AllocNode = N;
+ const MemRegion *ReferenceRegion = nullptr;
+
+ while (N) {
+ ProgramStateRef State = N->getState();
+ if (!State->get<RegionState>(Sym))
+ break;
+
+ // Find the most recent expression bound to the symbol in the current
+ // context.
+ if (!ReferenceRegion) {
+ if (const MemRegion *MR = C.getLocationRegionIfPostStore(N)) {
+ SVal Val = State->getSVal(MR);
+ if (Val.getAsLocSymbol() == Sym) {
+ const VarRegion* VR = MR->getBaseRegion()->getAs<VarRegion>();
+ // Do not show local variables belonging to a function other than
+ // where the error is reported.
+ if (!VR ||
+ (VR->getStackFrame() == LeakContext->getCurrentStackFrame()))
+ ReferenceRegion = MR;
+ }
+ }
+ }
+
+ // Allocation node, is the last node in the current or parent context in
+ // which the symbol was tracked.
+ const LocationContext *NContext = N->getLocationContext();
+ if (NContext == LeakContext ||
+ NContext->isParentOf(LeakContext))
+ AllocNode = N;
+ N = N->pred_empty() ? nullptr : *(N->pred_begin());
+ }
+
+ return LeakInfo(AllocNode, ReferenceRegion);
+}
+
+void MallocChecker::reportLeak(SymbolRef Sym, ExplodedNode *N,
+ CheckerContext &C) const {
+
+ if (!ChecksEnabled[CK_MallocChecker] &&
+ !ChecksEnabled[CK_NewDeleteLeaksChecker])
+ return;
+
+ const RefState *RS = C.getState()->get<RegionState>(Sym);
+ assert(RS && "cannot leak an untracked symbol");
+ AllocationFamily Family = RS->getAllocationFamily();
+
+ if (Family == AF_Alloca)
+ return;
+
+ Optional<MallocChecker::CheckKind>
+ CheckKind = getCheckIfTracked(Family, true);
+
+ if (!CheckKind.hasValue())
+ return;
+
+ assert(N);
+ if (!BT_Leak[*CheckKind]) {
+ BT_Leak[*CheckKind].reset(
+ new BugType(CheckNames[*CheckKind], "Memory leak", "Memory Error"));
+ // Leaks should not be reported if they are post-dominated by a sink:
+ // (1) Sinks are higher importance bugs.
+ // (2) NoReturnFunctionChecker uses sink nodes to represent paths ending
+ // with __noreturn functions such as assert() or exit(). We choose not
+ // to report leaks on such paths.
+ BT_Leak[*CheckKind]->setSuppressOnSink(true);
+ }
+
+ // Most bug reports are cached at the location where they occurred.
+ // With leaks, we want to unique them by the location where they were
+ // allocated, and only report a single path.
+ PathDiagnosticLocation LocUsedForUniqueing;
+ const ExplodedNode *AllocNode = nullptr;
+ const MemRegion *Region = nullptr;
+ std::tie(AllocNode, Region) = getAllocationSite(N, Sym, C);
+
+ ProgramPoint P = AllocNode->getLocation();
+ const Stmt *AllocationStmt = nullptr;
+ if (Optional<CallExitEnd> Exit = P.getAs<CallExitEnd>())
+ AllocationStmt = Exit->getCalleeContext()->getCallSite();
+ else if (Optional<StmtPoint> SP = P.getAs<StmtPoint>())
+ AllocationStmt = SP->getStmt();
+ if (AllocationStmt)
+ LocUsedForUniqueing = PathDiagnosticLocation::createBegin(AllocationStmt,
+ C.getSourceManager(),
+ AllocNode->getLocationContext());
+
+ SmallString<200> buf;
+ llvm::raw_svector_ostream os(buf);
+ if (Region && Region->canPrintPretty()) {
+ os << "Potential leak of memory pointed to by ";
+ Region->printPretty(os);
+ } else {
+ os << "Potential memory leak";
+ }
+
+ auto R = llvm::make_unique<BugReport>(
+ *BT_Leak[*CheckKind], os.str(), N, LocUsedForUniqueing,
+ AllocNode->getLocationContext()->getDecl());
+ R->markInteresting(Sym);
+ R->addVisitor(llvm::make_unique<MallocBugVisitor>(Sym, true));
+ C.emitReport(std::move(R));
+}
+
+void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper,
+ CheckerContext &C) const
+{
+ if (!SymReaper.hasDeadSymbols())
+ return;
+
+ ProgramStateRef state = C.getState();
+ RegionStateTy RS = state->get<RegionState>();
+ RegionStateTy::Factory &F = state->get_context<RegionState>();
+
+ SmallVector<SymbolRef, 2> Errors;
+ for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) {
+ if (SymReaper.isDead(I->first)) {
+ if (I->second.isAllocated() || I->second.isAllocatedOfSizeZero())
+ Errors.push_back(I->first);
+ // Remove the dead symbol from the map.
+ RS = F.remove(RS, I->first);
+
+ }
+ }
+
+ // Cleanup the Realloc Pairs Map.
+ ReallocPairsTy RP = state->get<ReallocPairs>();
+ for (ReallocPairsTy::iterator I = RP.begin(), E = RP.end(); I != E; ++I) {
+ if (SymReaper.isDead(I->first) ||
+ SymReaper.isDead(I->second.ReallocatedSym)) {
+ state = state->remove<ReallocPairs>(I->first);
+ }
+ }
+
+ // Cleanup the FreeReturnValue Map.
+ FreeReturnValueTy FR = state->get<FreeReturnValue>();
+ for (FreeReturnValueTy::iterator I = FR.begin(), E = FR.end(); I != E; ++I) {
+ if (SymReaper.isDead(I->first) ||
+ SymReaper.isDead(I->second)) {
+ state = state->remove<FreeReturnValue>(I->first);
+ }
+ }
+
+ // Generate leak node.
+ ExplodedNode *N = C.getPredecessor();
+ if (!Errors.empty()) {
+ static CheckerProgramPointTag Tag("MallocChecker", "DeadSymbolsLeak");
+ N = C.generateNonFatalErrorNode(C.getState(), &Tag);
+ if (N) {
+ for (SmallVectorImpl<SymbolRef>::iterator
+ I = Errors.begin(), E = Errors.end(); I != E; ++I) {
+ reportLeak(*I, N, C);
+ }
+ }
+ }
+
+ C.addTransition(state->set<RegionState>(RS), N);
+}
+
+void MallocChecker::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
+
+ if (const CXXDestructorCall *DC = dyn_cast<CXXDestructorCall>(&Call)) {
+ SymbolRef Sym = DC->getCXXThisVal().getAsSymbol();
+ if (!Sym || checkDoubleDelete(Sym, C))
+ return;
+ }
+
+ // We will check for double free in the post visit.
+ if (const AnyFunctionCall *FC = dyn_cast<AnyFunctionCall>(&Call)) {
+ const FunctionDecl *FD = FC->getDecl();
+ if (!FD)
+ return;
+
+ ASTContext &Ctx = C.getASTContext();
+ if (ChecksEnabled[CK_MallocChecker] &&
+ (isCMemFunction(FD, Ctx, AF_Malloc, MemoryOperationKind::MOK_Free) ||
+ isCMemFunction(FD, Ctx, AF_IfNameIndex,
+ MemoryOperationKind::MOK_Free)))
+ return;
+
+ if (ChecksEnabled[CK_NewDeleteChecker] &&
+ isStandardNewDelete(FD, Ctx))
+ return;
+ }
+
+ // Check if the callee of a method is deleted.
+ if (const CXXInstanceCall *CC = dyn_cast<CXXInstanceCall>(&Call)) {
+ SymbolRef Sym = CC->getCXXThisVal().getAsSymbol();
+ if (!Sym || checkUseAfterFree(Sym, C, CC->getCXXThisExpr()))
+ return;
+ }
+
+ // Check arguments for being used after free.
+ for (unsigned I = 0, E = Call.getNumArgs(); I != E; ++I) {
+ SVal ArgSVal = Call.getArgSVal(I);
+ if (ArgSVal.getAs<Loc>()) {
+ SymbolRef Sym = ArgSVal.getAsSymbol();
+ if (!Sym)
+ continue;
+ if (checkUseAfterFree(Sym, C, Call.getArgExpr(I)))
+ return;
+ }
+ }
+}
+
+void MallocChecker::checkPreStmt(const ReturnStmt *S, CheckerContext &C) const {
+ const Expr *E = S->getRetValue();
+ if (!E)
+ return;
+
+ // Check if we are returning a symbol.
+ ProgramStateRef State = C.getState();
+ SVal RetVal = State->getSVal(E, C.getLocationContext());
+ SymbolRef Sym = RetVal.getAsSymbol();
+ if (!Sym)
+ // If we are returning a field of the allocated struct or an array element,
+ // the callee could still free the memory.
+ // TODO: This logic should be a part of generic symbol escape callback.
+ if (const MemRegion *MR = RetVal.getAsRegion())
+ if (isa<FieldRegion>(MR) || isa<ElementRegion>(MR))
+ if (const SymbolicRegion *BMR =
+ dyn_cast<SymbolicRegion>(MR->getBaseRegion()))
+ Sym = BMR->getSymbol();
+
+ // Check if we are returning freed memory.
+ if (Sym)
+ checkUseAfterFree(Sym, C, E);
+}
+
+// TODO: Blocks should be either inlined or should call invalidate regions
+// upon invocation. After that's in place, special casing here will not be
+// needed.
+void MallocChecker::checkPostStmt(const BlockExpr *BE,
+ CheckerContext &C) const {
+
+ // Scan the BlockDecRefExprs for any object the retain count checker
+ // may be tracking.
+ if (!BE->getBlockDecl()->hasCaptures())
+ return;
+
+ ProgramStateRef state = C.getState();
+ const BlockDataRegion *R =
+ cast<BlockDataRegion>(state->getSVal(BE,
+ C.getLocationContext()).getAsRegion());
+
+ BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
+ E = R->referenced_vars_end();
+
+ if (I == E)
+ return;
+
+ SmallVector<const MemRegion*, 10> Regions;
+ const LocationContext *LC = C.getLocationContext();
+ MemRegionManager &MemMgr = C.getSValBuilder().getRegionManager();
+
+ for ( ; I != E; ++I) {
+ const VarRegion *VR = I.getCapturedRegion();
+ if (VR->getSuperRegion() == R) {
+ VR = MemMgr.getVarRegion(VR->getDecl(), LC);
+ }
+ Regions.push_back(VR);
+ }
+
+ state =
+ state->scanReachableSymbols<StopTrackingCallback>(Regions.data(),
+ Regions.data() + Regions.size()).getState();
+ C.addTransition(state);
+}
+
+bool MallocChecker::isReleased(SymbolRef Sym, CheckerContext &C) const {
+ assert(Sym);
+ const RefState *RS = C.getState()->get<RegionState>(Sym);
+ return (RS && RS->isReleased());
+}
+
+bool MallocChecker::checkUseAfterFree(SymbolRef Sym, CheckerContext &C,
+ const Stmt *S) const {
+
+ if (isReleased(Sym, C)) {
+ ReportUseAfterFree(C, S->getSourceRange(), Sym);
+ return true;
+ }
+
+ return false;
+}
+
+void MallocChecker::checkUseZeroAllocated(SymbolRef Sym, CheckerContext &C,
+ const Stmt *S) const {
+ assert(Sym);
+
+ if (const RefState *RS = C.getState()->get<RegionState>(Sym)) {
+ if (RS->isAllocatedOfSizeZero())
+ ReportUseZeroAllocated(C, RS->getStmt()->getSourceRange(), Sym);
+ }
+ else if (C.getState()->contains<ReallocSizeZeroSymbols>(Sym)) {
+ ReportUseZeroAllocated(C, S->getSourceRange(), Sym);
+ }
+}
+
+bool MallocChecker::checkDoubleDelete(SymbolRef Sym, CheckerContext &C) const {
+
+ if (isReleased(Sym, C)) {
+ ReportDoubleDelete(C, Sym);
+ return true;
+ }
+ return false;
+}
+
+// Check if the location is a freed symbolic region.
+void MallocChecker::checkLocation(SVal l, bool isLoad, const Stmt *S,
+ CheckerContext &C) const {
+ SymbolRef Sym = l.getLocSymbolInBase();
+ if (Sym) {
+ checkUseAfterFree(Sym, C, S);
+ checkUseZeroAllocated(Sym, C, S);
+ }
+}
+
+// If a symbolic region is assumed to NULL (or another constant), stop tracking
+// it - assuming that allocation failed on this path.
+ProgramStateRef MallocChecker::evalAssume(ProgramStateRef state,
+ SVal Cond,
+ bool Assumption) const {
+ RegionStateTy RS = state->get<RegionState>();
+ for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) {
+ // If the symbol is assumed to be NULL, remove it from consideration.
+ ConstraintManager &CMgr = state->getConstraintManager();
+ ConditionTruthVal AllocFailed = CMgr.isNull(state, I.getKey());
+ if (AllocFailed.isConstrainedTrue())
+ state = state->remove<RegionState>(I.getKey());
+ }
+
+ // Realloc returns 0 when reallocation fails, which means that we should
+ // restore the state of the pointer being reallocated.
+ ReallocPairsTy RP = state->get<ReallocPairs>();
+ for (ReallocPairsTy::iterator I = RP.begin(), E = RP.end(); I != E; ++I) {
+ // If the symbol is assumed to be NULL, remove it from consideration.
+ ConstraintManager &CMgr = state->getConstraintManager();
+ ConditionTruthVal AllocFailed = CMgr.isNull(state, I.getKey());
+ if (!AllocFailed.isConstrainedTrue())
+ continue;
+
+ SymbolRef ReallocSym = I.getData().ReallocatedSym;
+ if (const RefState *RS = state->get<RegionState>(ReallocSym)) {
+ if (RS->isReleased()) {
+ if (I.getData().Kind == RPToBeFreedAfterFailure)
+ state = state->set<RegionState>(ReallocSym,
+ RefState::getAllocated(RS->getAllocationFamily(), RS->getStmt()));
+ else if (I.getData().Kind == RPDoNotTrackAfterFailure)
+ state = state->remove<RegionState>(ReallocSym);
+ else
+ assert(I.getData().Kind == RPIsFreeOnFailure);
+ }
+ }
+ state = state->remove<ReallocPairs>(I.getKey());
+ }
+
+ return state;
+}
+
+bool MallocChecker::mayFreeAnyEscapedMemoryOrIsModeledExplicitly(
+ const CallEvent *Call,
+ ProgramStateRef State,
+ SymbolRef &EscapingSymbol) const {
+ assert(Call);
+ EscapingSymbol = nullptr;
+
+ // For now, assume that any C++ or block call can free memory.
+ // TODO: If we want to be more optimistic here, we'll need to make sure that
+ // regions escape to C++ containers. They seem to do that even now, but for
+ // mysterious reasons.
+ if (!(isa<SimpleFunctionCall>(Call) || isa<ObjCMethodCall>(Call)))
+ return true;
+
+ // Check Objective-C messages by selector name.
+ if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) {
+ // If it's not a framework call, or if it takes a callback, assume it
+ // can free memory.
+ if (!Call->isInSystemHeader() || Call->argumentsMayEscape())
+ return true;
+
+ // If it's a method we know about, handle it explicitly post-call.
+ // This should happen before the "freeWhenDone" check below.
+ if (isKnownDeallocObjCMethodName(*Msg))
+ return false;
+
+ // If there's a "freeWhenDone" parameter, but the method isn't one we know
+ // about, we can't be sure that the object will use free() to deallocate the
+ // memory, so we can't model it explicitly. The best we can do is use it to
+ // decide whether the pointer escapes.
+ if (Optional<bool> FreeWhenDone = getFreeWhenDoneArg(*Msg))
+ return *FreeWhenDone;
+
+ // If the first selector piece ends with "NoCopy", and there is no
+ // "freeWhenDone" parameter set to zero, we know ownership is being
+ // transferred. Again, though, we can't be sure that the object will use
+ // free() to deallocate the memory, so we can't model it explicitly.
+ StringRef FirstSlot = Msg->getSelector().getNameForSlot(0);
+ if (FirstSlot.endswith("NoCopy"))
+ return true;
+
+ // If the first selector starts with addPointer, insertPointer,
+ // or replacePointer, assume we are dealing with NSPointerArray or similar.
+ // This is similar to C++ containers (vector); we still might want to check
+ // that the pointers get freed by following the container itself.
+ if (FirstSlot.startswith("addPointer") ||
+ FirstSlot.startswith("insertPointer") ||
+ FirstSlot.startswith("replacePointer") ||
+ FirstSlot.equals("valueWithPointer")) {
+ return true;
+ }
+
+ // We should escape receiver on call to 'init'. This is especially relevant
+ // to the receiver, as the corresponding symbol is usually not referenced
+ // after the call.
+ if (Msg->getMethodFamily() == OMF_init) {
+ EscapingSymbol = Msg->getReceiverSVal().getAsSymbol();
+ return true;
+ }
+
+ // Otherwise, assume that the method does not free memory.
+ // Most framework methods do not free memory.
+ return false;
+ }
+
+ // At this point the only thing left to handle is straight function calls.
+ const FunctionDecl *FD = cast<SimpleFunctionCall>(Call)->getDecl();
+ if (!FD)
+ return true;
+
+ ASTContext &ASTC = State->getStateManager().getContext();
+
+ // If it's one of the allocation functions we can reason about, we model
+ // its behavior explicitly.
+ if (isMemFunction(FD, ASTC))
+ return false;
+
+ // If it's not a system call, assume it frees memory.
+ if (!Call->isInSystemHeader())
+ return true;
+
+ // White list the system functions whose arguments escape.
+ const IdentifierInfo *II = FD->getIdentifier();
+ if (!II)
+ return true;
+ StringRef FName = II->getName();
+
+ // White list the 'XXXNoCopy' CoreFoundation functions.
+ // We specifically check these before
+ if (FName.endswith("NoCopy")) {
+ // Look for the deallocator argument. We know that the memory ownership
+ // is not transferred only if the deallocator argument is
+ // 'kCFAllocatorNull'.
+ for (unsigned i = 1; i < Call->getNumArgs(); ++i) {
+ const Expr *ArgE = Call->getArgExpr(i)->IgnoreParenCasts();
+ if (const DeclRefExpr *DE = dyn_cast<DeclRefExpr>(ArgE)) {
+ StringRef DeallocatorName = DE->getFoundDecl()->getName();
+ if (DeallocatorName == "kCFAllocatorNull")
+ return false;
+ }
+ }
+ return true;
+ }
+
+ // Associating streams with malloced buffers. The pointer can escape if
+ // 'closefn' is specified (and if that function does free memory),
+ // but it will not if closefn is not specified.
+ // Currently, we do not inspect the 'closefn' function (PR12101).
+ if (FName == "funopen")
+ if (Call->getNumArgs() >= 4 && Call->getArgSVal(4).isConstant(0))
+ return false;
+
+ // Do not warn on pointers passed to 'setbuf' when used with std streams,
+ // these leaks might be intentional when setting the buffer for stdio.
+ // http://stackoverflow.com/questions/2671151/who-frees-setvbuf-buffer
+ if (FName == "setbuf" || FName =="setbuffer" ||
+ FName == "setlinebuf" || FName == "setvbuf") {
+ if (Call->getNumArgs() >= 1) {
+ const Expr *ArgE = Call->getArgExpr(0)->IgnoreParenCasts();
+ if (const DeclRefExpr *ArgDRE = dyn_cast<DeclRefExpr>(ArgE))
+ if (const VarDecl *D = dyn_cast<VarDecl>(ArgDRE->getDecl()))
+ if (D->getCanonicalDecl()->getName().find("std") != StringRef::npos)
+ return true;
+ }
+ }
+
+ // A bunch of other functions which either take ownership of a pointer or
+ // wrap the result up in a struct or object, meaning it can be freed later.
+ // (See RetainCountChecker.) Not all the parameters here are invalidated,
+ // but the Malloc checker cannot differentiate between them. The right way
+ // of doing this would be to implement a pointer escapes callback.
+ if (FName == "CGBitmapContextCreate" ||
+ FName == "CGBitmapContextCreateWithData" ||
+ FName == "CVPixelBufferCreateWithBytes" ||
+ FName == "CVPixelBufferCreateWithPlanarBytes" ||
+ FName == "OSAtomicEnqueue") {
+ return true;
+ }
+
+ if (FName == "postEvent" &&
+ FD->getQualifiedNameAsString() == "QCoreApplication::postEvent") {
+ return true;
+ }
+
+ if (FName == "postEvent" &&
+ FD->getQualifiedNameAsString() == "QCoreApplication::postEvent") {
+ return true;
+ }
+
+ // Handle cases where we know a buffer's /address/ can escape.
+ // Note that the above checks handle some special cases where we know that
+ // even though the address escapes, it's still our responsibility to free the
+ // buffer.
+ if (Call->argumentsMayEscape())
+ return true;
+
+ // Otherwise, assume that the function does not free memory.
+ // Most system calls do not free the memory.
+ return false;
+}
+
+static bool retTrue(const RefState *RS) {
+ return true;
+}
+
+static bool checkIfNewOrNewArrayFamily(const RefState *RS) {
+ return (RS->getAllocationFamily() == AF_CXXNewArray ||
+ RS->getAllocationFamily() == AF_CXXNew);
+}
+
+ProgramStateRef MallocChecker::checkPointerEscape(ProgramStateRef State,
+ const InvalidatedSymbols &Escaped,
+ const CallEvent *Call,
+ PointerEscapeKind Kind) const {
+ return checkPointerEscapeAux(State, Escaped, Call, Kind, &retTrue);
+}
+
+ProgramStateRef MallocChecker::checkConstPointerEscape(ProgramStateRef State,
+ const InvalidatedSymbols &Escaped,
+ const CallEvent *Call,
+ PointerEscapeKind Kind) const {
+ return checkPointerEscapeAux(State, Escaped, Call, Kind,
+ &checkIfNewOrNewArrayFamily);
+}
+
+ProgramStateRef MallocChecker::checkPointerEscapeAux(ProgramStateRef State,
+ const InvalidatedSymbols &Escaped,
+ const CallEvent *Call,
+ PointerEscapeKind Kind,
+ bool(*CheckRefState)(const RefState*)) const {
+ // If we know that the call does not free memory, or we want to process the
+ // call later, keep tracking the top level arguments.
+ SymbolRef EscapingSymbol = nullptr;
+ if (Kind == PSK_DirectEscapeOnCall &&
+ !mayFreeAnyEscapedMemoryOrIsModeledExplicitly(Call, State,
+ EscapingSymbol) &&
+ !EscapingSymbol) {
+ return State;
+ }
+
+ for (InvalidatedSymbols::const_iterator I = Escaped.begin(),
+ E = Escaped.end();
+ I != E; ++I) {
+ SymbolRef sym = *I;
+
+ if (EscapingSymbol && EscapingSymbol != sym)
+ continue;
+
+ if (const RefState *RS = State->get<RegionState>(sym)) {
+ if ((RS->isAllocated() || RS->isAllocatedOfSizeZero()) &&
+ CheckRefState(RS)) {
+ State = State->remove<RegionState>(sym);
+ State = State->set<RegionState>(sym, RefState::getEscaped(RS));
+ }
+ }
+ }
+ return State;
+}
+
+static SymbolRef findFailedReallocSymbol(ProgramStateRef currState,
+ ProgramStateRef prevState) {
+ ReallocPairsTy currMap = currState->get<ReallocPairs>();
+ ReallocPairsTy prevMap = prevState->get<ReallocPairs>();
+
+ for (ReallocPairsTy::iterator I = prevMap.begin(), E = prevMap.end();
+ I != E; ++I) {
+ SymbolRef sym = I.getKey();
+ if (!currMap.lookup(sym))
+ return sym;
+ }
+
+ return nullptr;
+}
+
+PathDiagnosticPiece *
+MallocChecker::MallocBugVisitor::VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) {
+ ProgramStateRef state = N->getState();
+ ProgramStateRef statePrev = PrevN->getState();
+
+ const RefState *RS = state->get<RegionState>(Sym);
+ const RefState *RSPrev = statePrev->get<RegionState>(Sym);
+ if (!RS)
+ return nullptr;
+
+ const Stmt *S = nullptr;
+ const char *Msg = nullptr;
+ StackHintGeneratorForSymbol *StackHint = nullptr;
+
+ // Retrieve the associated statement.
+ ProgramPoint ProgLoc = N->getLocation();
+ if (Optional<StmtPoint> SP = ProgLoc.getAs<StmtPoint>()) {
+ S = SP->getStmt();
+ } else if (Optional<CallExitEnd> Exit = ProgLoc.getAs<CallExitEnd>()) {
+ S = Exit->getCalleeContext()->getCallSite();
+ } else if (Optional<BlockEdge> Edge = ProgLoc.getAs<BlockEdge>()) {
+ // If an assumption was made on a branch, it should be caught
+ // here by looking at the state transition.
+ S = Edge->getSrc()->getTerminator();
+ }
+
+ if (!S)
+ return nullptr;
+
+ // FIXME: We will eventually need to handle non-statement-based events
+ // (__attribute__((cleanup))).
+
+ // Find out if this is an interesting point and what is the kind.
+ if (Mode == Normal) {
+ if (isAllocated(RS, RSPrev, S)) {
+ Msg = "Memory is allocated";
+ StackHint = new StackHintGeneratorForSymbol(Sym,
+ "Returned allocated memory");
+ } else if (isReleased(RS, RSPrev, S)) {
+ Msg = "Memory is released";
+ StackHint = new StackHintGeneratorForSymbol(Sym,
+ "Returning; memory was released");
+ } else if (isRelinquished(RS, RSPrev, S)) {
+ Msg = "Memory ownership is transferred";
+ StackHint = new StackHintGeneratorForSymbol(Sym, "");
+ } else if (isReallocFailedCheck(RS, RSPrev, S)) {
+ Mode = ReallocationFailed;
+ Msg = "Reallocation failed";
+ StackHint = new StackHintGeneratorForReallocationFailed(Sym,
+ "Reallocation failed");
+
+ if (SymbolRef sym = findFailedReallocSymbol(state, statePrev)) {
+ // Is it possible to fail two reallocs WITHOUT testing in between?
+ assert((!FailedReallocSymbol || FailedReallocSymbol == sym) &&
+ "We only support one failed realloc at a time.");
+ BR.markInteresting(sym);
+ FailedReallocSymbol = sym;
+ }
+ }
+
+ // We are in a special mode if a reallocation failed later in the path.
+ } else if (Mode == ReallocationFailed) {
+ assert(FailedReallocSymbol && "No symbol to look for.");
+
+ // Is this is the first appearance of the reallocated symbol?
+ if (!statePrev->get<RegionState>(FailedReallocSymbol)) {
+ // We're at the reallocation point.
+ Msg = "Attempt to reallocate memory";
+ StackHint = new StackHintGeneratorForSymbol(Sym,
+ "Returned reallocated memory");
+ FailedReallocSymbol = nullptr;
+ Mode = Normal;
+ }
+ }
+
+ if (!Msg)
+ return nullptr;
+ assert(StackHint);
+
+ // Generate the extra diagnostic.
+ PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
+ N->getLocationContext());
+ return new PathDiagnosticEventPiece(Pos, Msg, true, StackHint);
+}
+
+void MallocChecker::printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) const {
+
+ RegionStateTy RS = State->get<RegionState>();
+
+ if (!RS.isEmpty()) {
+ Out << Sep << "MallocChecker :" << NL;
+ for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) {
+ const RefState *RefS = State->get<RegionState>(I.getKey());
+ AllocationFamily Family = RefS->getAllocationFamily();
+ Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(Family);
+ if (!CheckKind.hasValue())
+ CheckKind = getCheckIfTracked(Family, true);
+
+ I.getKey()->dumpToStream(Out);
+ Out << " : ";
+ I.getData().dump(Out);
+ if (CheckKind.hasValue())
+ Out << " (" << CheckNames[*CheckKind].getName() << ")";
+ Out << NL;
+ }
+ }
+}
+
+void ento::registerNewDeleteLeaksChecker(CheckerManager &mgr) {
+ registerCStringCheckerBasic(mgr);
+ MallocChecker *checker = mgr.registerChecker<MallocChecker>();
+ checker->IsOptimistic = mgr.getAnalyzerOptions().getBooleanOption(
+ "Optimistic", false, checker);
+ checker->ChecksEnabled[MallocChecker::CK_NewDeleteLeaksChecker] = true;
+ checker->CheckNames[MallocChecker::CK_NewDeleteLeaksChecker] =
+ mgr.getCurrentCheckName();
+ // We currently treat NewDeleteLeaks checker as a subchecker of NewDelete
+ // checker.
+ if (!checker->ChecksEnabled[MallocChecker::CK_NewDeleteChecker])
+ checker->ChecksEnabled[MallocChecker::CK_NewDeleteChecker] = true;
+}
+
+#define REGISTER_CHECKER(name) \
+ void ento::register##name(CheckerManager &mgr) { \
+ registerCStringCheckerBasic(mgr); \
+ MallocChecker *checker = mgr.registerChecker<MallocChecker>(); \
+ checker->IsOptimistic = mgr.getAnalyzerOptions().getBooleanOption( \
+ "Optimistic", false, checker); \
+ checker->ChecksEnabled[MallocChecker::CK_##name] = true; \
+ checker->CheckNames[MallocChecker::CK_##name] = mgr.getCurrentCheckName(); \
+ }
+
+REGISTER_CHECKER(MallocChecker)
+REGISTER_CHECKER(NewDeleteChecker)
+REGISTER_CHECKER(MismatchedDeallocatorChecker)
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
new file mode 100644
index 0000000..99ba90d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
@@ -0,0 +1,337 @@
+// MallocOverflowSecurityChecker.cpp - Check for malloc overflows -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker detects a common memory allocation security flaw.
+// Suppose 'unsigned int n' comes from an untrusted source. If the
+// code looks like 'malloc (n * 4)', and an attacker can make 'n' be
+// say MAX_UINT/4+2, then instead of allocating the correct 'n' 4-byte
+// elements, this will actually allocate only two because of overflow.
+// Then when the rest of the program attempts to store values past the
+// second element, these values will actually overwrite other items in
+// the heap, probably allowing the attacker to execute arbitrary code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/SmallVector.h"
+
+using namespace clang;
+using namespace ento;
+using llvm::APInt;
+using llvm::APSInt;
+
+namespace {
+struct MallocOverflowCheck {
+ const BinaryOperator *mulop;
+ const Expr *variable;
+ APSInt maxVal;
+
+ MallocOverflowCheck(const BinaryOperator *m, const Expr *v, APSInt val)
+ : mulop(m), variable(v), maxVal(val) {}
+};
+
+class MallocOverflowSecurityChecker : public Checker<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager &mgr,
+ BugReporter &BR) const;
+
+ void CheckMallocArgument(
+ SmallVectorImpl<MallocOverflowCheck> &PossibleMallocOverflows,
+ const Expr *TheArgument, ASTContext &Context) const;
+
+ void OutputPossibleOverflows(
+ SmallVectorImpl<MallocOverflowCheck> &PossibleMallocOverflows,
+ const Decl *D, BugReporter &BR, AnalysisManager &mgr) const;
+
+};
+} // end anonymous namespace
+
+// Return true for computations which evaluate to zero: e.g., mult by 0.
+static inline bool EvaluatesToZero(APSInt &Val, BinaryOperatorKind op) {
+ return (op == BO_Mul) && (Val == 0);
+}
+
+void MallocOverflowSecurityChecker::CheckMallocArgument(
+ SmallVectorImpl<MallocOverflowCheck> &PossibleMallocOverflows,
+ const Expr *TheArgument,
+ ASTContext &Context) const {
+
+ /* Look for a linear combination with a single variable, and at least
+ one multiplication.
+ Reject anything that applies to the variable: an explicit cast,
+ conditional expression, an operation that could reduce the range
+ of the result, or anything too complicated :-). */
+ const Expr *e = TheArgument;
+ const BinaryOperator * mulop = nullptr;
+ APSInt maxVal;
+
+ for (;;) {
+ maxVal = 0;
+ e = e->IgnoreParenImpCasts();
+ if (const BinaryOperator *binop = dyn_cast<BinaryOperator>(e)) {
+ BinaryOperatorKind opc = binop->getOpcode();
+ // TODO: ignore multiplications by 1, reject if multiplied by 0.
+ if (mulop == nullptr && opc == BO_Mul)
+ mulop = binop;
+ if (opc != BO_Mul && opc != BO_Add && opc != BO_Sub && opc != BO_Shl)
+ return;
+
+ const Expr *lhs = binop->getLHS();
+ const Expr *rhs = binop->getRHS();
+ if (rhs->isEvaluatable(Context)) {
+ e = lhs;
+ maxVal = rhs->EvaluateKnownConstInt(Context);
+ if (EvaluatesToZero(maxVal, opc))
+ return;
+ } else if ((opc == BO_Add || opc == BO_Mul) &&
+ lhs->isEvaluatable(Context)) {
+ maxVal = lhs->EvaluateKnownConstInt(Context);
+ if (EvaluatesToZero(maxVal, opc))
+ return;
+ e = rhs;
+ } else
+ return;
+ }
+ else if (isa<DeclRefExpr>(e) || isa<MemberExpr>(e))
+ break;
+ else
+ return;
+ }
+
+ if (mulop == nullptr)
+ return;
+
+ // We've found the right structure of malloc argument, now save
+ // the data so when the body of the function is completely available
+ // we can check for comparisons.
+
+ // TODO: Could push this into the innermost scope where 'e' is
+ // defined, rather than the whole function.
+ PossibleMallocOverflows.push_back(MallocOverflowCheck(mulop, e, maxVal));
+}
+
+namespace {
+// A worker class for OutputPossibleOverflows.
+class CheckOverflowOps :
+ public EvaluatedExprVisitor<CheckOverflowOps> {
+public:
+ typedef SmallVectorImpl<MallocOverflowCheck> theVecType;
+
+private:
+ theVecType &toScanFor;
+ ASTContext &Context;
+
+ bool isIntZeroExpr(const Expr *E) const {
+ if (!E->getType()->isIntegralOrEnumerationType())
+ return false;
+ llvm::APSInt Result;
+ if (E->EvaluateAsInt(Result, Context))
+ return Result == 0;
+ return false;
+ }
+
+ const Decl *getDecl(const DeclRefExpr *DR) { return DR->getDecl(); }
+
+ const Decl *getDecl(const MemberExpr *ME) { return ME->getMemberDecl(); }
+
+ template <typename T1>
+ void Erase(const T1 *DR, std::function<bool(theVecType::iterator)> pred) {
+ theVecType::iterator i = toScanFor.end();
+ theVecType::iterator e = toScanFor.begin();
+ while (i != e) {
+ --i;
+ if (const T1 *DR_i = dyn_cast<T1>(i->variable)) {
+ if ((getDecl(DR_i) == getDecl(DR)) && pred(i))
+ i = toScanFor.erase(i);
+ }
+ }
+ }
+
+ void CheckExpr(const Expr *E_p) {
+ auto PredTrue = [](theVecType::iterator) -> bool { return true; };
+ const Expr *E = E_p->IgnoreParenImpCasts();
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E))
+ Erase<DeclRefExpr>(DR, PredTrue);
+ else if (const auto *ME = dyn_cast<MemberExpr>(E)) {
+ Erase<MemberExpr>(ME, PredTrue);
+ }
+ }
+
+ // Check if the argument to malloc is assigned a value
+ // which cannot cause an overflow.
+ // e.g., malloc (mul * x) and,
+ // case 1: mul = <constant value>
+ // case 2: mul = a/b, where b > x
+ void CheckAssignmentExpr(BinaryOperator *AssignEx) {
+ bool assignKnown = false;
+ bool numeratorKnown = false, denomKnown = false;
+ APSInt denomVal;
+ denomVal = 0;
+
+ // Erase if the multiplicand was assigned a constant value.
+ const Expr *rhs = AssignEx->getRHS();
+ if (rhs->isEvaluatable(Context))
+ assignKnown = true;
+
+ // Discard the report if the multiplicand was assigned a value,
+ // that can never overflow after multiplication. e.g., the assignment
+ // is a division operator and the denominator is > other multiplicand.
+ const Expr *rhse = rhs->IgnoreParenImpCasts();
+ if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(rhse)) {
+ if (BOp->getOpcode() == BO_Div) {
+ const Expr *denom = BOp->getRHS()->IgnoreParenImpCasts();
+ if (denom->EvaluateAsInt(denomVal, Context))
+ denomKnown = true;
+ const Expr *numerator = BOp->getLHS()->IgnoreParenImpCasts();
+ if (numerator->isEvaluatable(Context))
+ numeratorKnown = true;
+ }
+ }
+ if (!assignKnown && !denomKnown)
+ return;
+ auto denomExtVal = denomVal.getExtValue();
+
+ // Ignore negative denominator.
+ if (denomExtVal < 0)
+ return;
+
+ const Expr *lhs = AssignEx->getLHS();
+ const Expr *E = lhs->IgnoreParenImpCasts();
+
+ auto pred = [assignKnown, numeratorKnown,
+ denomExtVal](theVecType::iterator i) {
+ return assignKnown ||
+ (numeratorKnown && (denomExtVal >= i->maxVal.getExtValue()));
+ };
+
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E))
+ Erase<DeclRefExpr>(DR, pred);
+ else if (const auto *ME = dyn_cast<MemberExpr>(E))
+ Erase<MemberExpr>(ME, pred);
+ }
+
+ public:
+ void VisitBinaryOperator(BinaryOperator *E) {
+ if (E->isComparisonOp()) {
+ const Expr * lhs = E->getLHS();
+ const Expr * rhs = E->getRHS();
+ // Ignore comparisons against zero, since they generally don't
+ // protect against an overflow.
+ if (!isIntZeroExpr(lhs) && !isIntZeroExpr(rhs)) {
+ CheckExpr(lhs);
+ CheckExpr(rhs);
+ }
+ }
+ if (E->isAssignmentOp())
+ CheckAssignmentExpr(E);
+ EvaluatedExprVisitor<CheckOverflowOps>::VisitBinaryOperator(E);
+ }
+
+ /* We specifically ignore loop conditions, because they're typically
+ not error checks. */
+ void VisitWhileStmt(WhileStmt *S) {
+ return this->Visit(S->getBody());
+ }
+ void VisitForStmt(ForStmt *S) {
+ return this->Visit(S->getBody());
+ }
+ void VisitDoStmt(DoStmt *S) {
+ return this->Visit(S->getBody());
+ }
+
+ CheckOverflowOps(theVecType &v, ASTContext &ctx)
+ : EvaluatedExprVisitor<CheckOverflowOps>(ctx),
+ toScanFor(v), Context(ctx)
+ { }
+ };
+}
+
+// OutputPossibleOverflows - We've found a possible overflow earlier,
+// now check whether Body might contain a comparison which might be
+// preventing the overflow.
+// This doesn't do flow analysis, range analysis, or points-to analysis; it's
+// just a dumb "is there a comparison" scan. The aim here is to
+// detect the most blatent cases of overflow and educate the
+// programmer.
+void MallocOverflowSecurityChecker::OutputPossibleOverflows(
+ SmallVectorImpl<MallocOverflowCheck> &PossibleMallocOverflows,
+ const Decl *D, BugReporter &BR, AnalysisManager &mgr) const {
+ // By far the most common case: nothing to check.
+ if (PossibleMallocOverflows.empty())
+ return;
+
+ // Delete any possible overflows which have a comparison.
+ CheckOverflowOps c(PossibleMallocOverflows, BR.getContext());
+ c.Visit(mgr.getAnalysisDeclContext(D)->getBody());
+
+ // Output warnings for all overflows that are left.
+ for (CheckOverflowOps::theVecType::iterator
+ i = PossibleMallocOverflows.begin(),
+ e = PossibleMallocOverflows.end();
+ i != e;
+ ++i) {
+ BR.EmitBasicReport(
+ D, this, "malloc() size overflow", categories::UnixAPI,
+ "the computation of the size of the memory allocation may overflow",
+ PathDiagnosticLocation::createOperatorLoc(i->mulop,
+ BR.getSourceManager()),
+ i->mulop->getSourceRange());
+ }
+}
+
+void MallocOverflowSecurityChecker::checkASTCodeBody(const Decl *D,
+ AnalysisManager &mgr,
+ BugReporter &BR) const {
+
+ CFG *cfg = mgr.getCFG(D);
+ if (!cfg)
+ return;
+
+ // A list of variables referenced in possibly overflowing malloc operands.
+ SmallVector<MallocOverflowCheck, 2> PossibleMallocOverflows;
+
+ for (CFG::iterator it = cfg->begin(), ei = cfg->end(); it != ei; ++it) {
+ CFGBlock *block = *it;
+ for (CFGBlock::iterator bi = block->begin(), be = block->end();
+ bi != be; ++bi) {
+ if (Optional<CFGStmt> CS = bi->getAs<CFGStmt>()) {
+ if (const CallExpr *TheCall = dyn_cast<CallExpr>(CS->getStmt())) {
+ // Get the callee.
+ const FunctionDecl *FD = TheCall->getDirectCallee();
+
+ if (!FD)
+ continue;
+
+ // Get the name of the callee. If it's a builtin, strip off the prefix.
+ IdentifierInfo *FnInfo = FD->getIdentifier();
+ if (!FnInfo)
+ continue;
+
+ if (FnInfo->isStr ("malloc") || FnInfo->isStr ("_MALLOC")) {
+ if (TheCall->getNumArgs() == 1)
+ CheckMallocArgument(PossibleMallocOverflows, TheCall->getArg(0),
+ mgr.getASTContext());
+ }
+ }
+ }
+ }
+ }
+
+ OutputPossibleOverflows(PossibleMallocOverflows, D, BR, mgr);
+}
+
+void
+ento::registerMallocOverflowSecurityChecker(CheckerManager &mgr) {
+ mgr.registerChecker<MallocOverflowSecurityChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
new file mode 100644
index 0000000..80a3fbe
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
@@ -0,0 +1,252 @@
+// MallocSizeofChecker.cpp - Check for dubious malloc arguments ---*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Reports inconsistencies between the casted type of the return value of a
+// malloc/calloc/realloc call and the operand of any sizeof expressions
+// contained within its argument(s).
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+typedef std::pair<const TypeSourceInfo *, const CallExpr *> TypeCallPair;
+typedef llvm::PointerUnion<const Stmt *, const VarDecl *> ExprParent;
+
+class CastedAllocFinder
+ : public ConstStmtVisitor<CastedAllocFinder, TypeCallPair> {
+ IdentifierInfo *II_malloc, *II_calloc, *II_realloc;
+
+public:
+ struct CallRecord {
+ ExprParent CastedExprParent;
+ const Expr *CastedExpr;
+ const TypeSourceInfo *ExplicitCastType;
+ const CallExpr *AllocCall;
+
+ CallRecord(ExprParent CastedExprParent, const Expr *CastedExpr,
+ const TypeSourceInfo *ExplicitCastType,
+ const CallExpr *AllocCall)
+ : CastedExprParent(CastedExprParent), CastedExpr(CastedExpr),
+ ExplicitCastType(ExplicitCastType), AllocCall(AllocCall) {}
+ };
+
+ typedef std::vector<CallRecord> CallVec;
+ CallVec Calls;
+
+ CastedAllocFinder(ASTContext *Ctx) :
+ II_malloc(&Ctx->Idents.get("malloc")),
+ II_calloc(&Ctx->Idents.get("calloc")),
+ II_realloc(&Ctx->Idents.get("realloc")) {}
+
+ void VisitChild(ExprParent Parent, const Stmt *S) {
+ TypeCallPair AllocCall = Visit(S);
+ if (AllocCall.second && AllocCall.second != S)
+ Calls.push_back(CallRecord(Parent, cast<Expr>(S), AllocCall.first,
+ AllocCall.second));
+ }
+
+ void VisitChildren(const Stmt *S) {
+ for (const Stmt *Child : S->children())
+ if (Child)
+ VisitChild(S, Child);
+ }
+
+ TypeCallPair VisitCastExpr(const CastExpr *E) {
+ return Visit(E->getSubExpr());
+ }
+
+ TypeCallPair VisitExplicitCastExpr(const ExplicitCastExpr *E) {
+ return TypeCallPair(E->getTypeInfoAsWritten(),
+ Visit(E->getSubExpr()).second);
+ }
+
+ TypeCallPair VisitParenExpr(const ParenExpr *E) {
+ return Visit(E->getSubExpr());
+ }
+
+ TypeCallPair VisitStmt(const Stmt *S) {
+ VisitChildren(S);
+ return TypeCallPair();
+ }
+
+ TypeCallPair VisitCallExpr(const CallExpr *E) {
+ VisitChildren(E);
+ const FunctionDecl *FD = E->getDirectCallee();
+ if (FD) {
+ IdentifierInfo *II = FD->getIdentifier();
+ if (II == II_malloc || II == II_calloc || II == II_realloc)
+ return TypeCallPair((const TypeSourceInfo *)nullptr, E);
+ }
+ return TypeCallPair();
+ }
+
+ TypeCallPair VisitDeclStmt(const DeclStmt *S) {
+ for (const auto *I : S->decls())
+ if (const VarDecl *VD = dyn_cast<VarDecl>(I))
+ if (const Expr *Init = VD->getInit())
+ VisitChild(VD, Init);
+ return TypeCallPair();
+ }
+};
+
+class SizeofFinder : public ConstStmtVisitor<SizeofFinder> {
+public:
+ std::vector<const UnaryExprOrTypeTraitExpr *> Sizeofs;
+
+ void VisitBinMul(const BinaryOperator *E) {
+ Visit(E->getLHS());
+ Visit(E->getRHS());
+ }
+
+ void VisitImplicitCastExpr(const ImplicitCastExpr *E) {
+ return Visit(E->getSubExpr());
+ }
+
+ void VisitParenExpr(const ParenExpr *E) {
+ return Visit(E->getSubExpr());
+ }
+
+ void VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E) {
+ if (E->getKind() != UETT_SizeOf)
+ return;
+
+ Sizeofs.push_back(E);
+ }
+};
+
+// Determine if the pointee and sizeof types are compatible. Here
+// we ignore constness of pointer types.
+static bool typesCompatible(ASTContext &C, QualType A, QualType B) {
+ // sizeof(void*) is compatible with any other pointer.
+ if (B->isVoidPointerType() && A->getAs<PointerType>())
+ return true;
+
+ while (true) {
+ A = A.getCanonicalType();
+ B = B.getCanonicalType();
+
+ if (A.getTypePtr() == B.getTypePtr())
+ return true;
+
+ if (const PointerType *ptrA = A->getAs<PointerType>())
+ if (const PointerType *ptrB = B->getAs<PointerType>()) {
+ A = ptrA->getPointeeType();
+ B = ptrB->getPointeeType();
+ continue;
+ }
+
+ break;
+ }
+
+ return false;
+}
+
+static bool compatibleWithArrayType(ASTContext &C, QualType PT, QualType T) {
+ // Ex: 'int a[10][2]' is compatible with 'int', 'int[2]', 'int[10][2]'.
+ while (const ArrayType *AT = T->getAsArrayTypeUnsafe()) {
+ QualType ElemType = AT->getElementType();
+ if (typesCompatible(C, PT, AT->getElementType()))
+ return true;
+ T = ElemType;
+ }
+
+ return false;
+}
+
+class MallocSizeofChecker : public Checker<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ AnalysisDeclContext *ADC = mgr.getAnalysisDeclContext(D);
+ CastedAllocFinder Finder(&BR.getContext());
+ Finder.Visit(D->getBody());
+ for (CastedAllocFinder::CallVec::iterator i = Finder.Calls.begin(),
+ e = Finder.Calls.end(); i != e; ++i) {
+ QualType CastedType = i->CastedExpr->getType();
+ if (!CastedType->isPointerType())
+ continue;
+ QualType PointeeType = CastedType->getAs<PointerType>()->getPointeeType();
+ if (PointeeType->isVoidType())
+ continue;
+
+ for (CallExpr::const_arg_iterator ai = i->AllocCall->arg_begin(),
+ ae = i->AllocCall->arg_end(); ai != ae; ++ai) {
+ if (!(*ai)->getType()->isIntegralOrUnscopedEnumerationType())
+ continue;
+
+ SizeofFinder SFinder;
+ SFinder.Visit(*ai);
+ if (SFinder.Sizeofs.size() != 1)
+ continue;
+
+ QualType SizeofType = SFinder.Sizeofs[0]->getTypeOfArgument();
+
+ if (typesCompatible(BR.getContext(), PointeeType, SizeofType))
+ continue;
+
+ // If the argument to sizeof is an array, the result could be a
+ // pointer to any array element.
+ if (compatibleWithArrayType(BR.getContext(), PointeeType, SizeofType))
+ continue;
+
+ const TypeSourceInfo *TSI = nullptr;
+ if (i->CastedExprParent.is<const VarDecl *>()) {
+ TSI =
+ i->CastedExprParent.get<const VarDecl *>()->getTypeSourceInfo();
+ } else {
+ TSI = i->ExplicitCastType;
+ }
+
+ SmallString<64> buf;
+ llvm::raw_svector_ostream OS(buf);
+
+ OS << "Result of ";
+ const FunctionDecl *Callee = i->AllocCall->getDirectCallee();
+ if (Callee && Callee->getIdentifier())
+ OS << '\'' << Callee->getIdentifier()->getName() << '\'';
+ else
+ OS << "call";
+ OS << " is converted to a pointer of type '"
+ << PointeeType.getAsString() << "', which is incompatible with "
+ << "sizeof operand type '" << SizeofType.getAsString() << "'";
+ SmallVector<SourceRange, 4> Ranges;
+ Ranges.push_back(i->AllocCall->getCallee()->getSourceRange());
+ Ranges.push_back(SFinder.Sizeofs[0]->getSourceRange());
+ if (TSI)
+ Ranges.push_back(TSI->getTypeLoc().getSourceRange());
+
+ PathDiagnosticLocation L =
+ PathDiagnosticLocation::createBegin(i->AllocCall->getCallee(),
+ BR.getSourceManager(), ADC);
+
+ BR.EmitBasicReport(D, this, "Allocator sizeof operand mismatch",
+ categories::UnixAPI, OS.str(), L, Ranges);
+ }
+ }
+ }
+};
+
+}
+
+void ento::registerMallocSizeofChecker(CheckerManager &mgr) {
+ mgr.registerChecker<MallocSizeofChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
new file mode 100644
index 0000000..0e78947
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
@@ -0,0 +1,81 @@
+//=- NSAutoreleasePoolChecker.cpp --------------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a NSAutoreleasePoolChecker, a small checker that warns
+// about subpar uses of NSAutoreleasePool. Note that while the check itself
+// (in its current form) could be written as a flow-insensitive check, in
+// can be potentially enhanced in the future with flow-sensitive information.
+// It is also a good example of the CheckerVisitor interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class NSAutoreleasePoolChecker
+ : public Checker<check::PreObjCMessage> {
+ mutable std::unique_ptr<BugType> BT;
+ mutable Selector releaseS;
+
+public:
+ void checkPreObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
+};
+
+} // end anonymous namespace
+
+void NSAutoreleasePoolChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
+ CheckerContext &C) const {
+ if (!msg.isInstanceMessage())
+ return;
+
+ const ObjCInterfaceDecl *OD = msg.getReceiverInterface();
+ if (!OD)
+ return;
+ if (!OD->getIdentifier()->isStr("NSAutoreleasePool"))
+ return;
+
+ if (releaseS.isNull())
+ releaseS = GetNullarySelector("release", C.getASTContext());
+ // Sending 'release' message?
+ if (msg.getSelector() != releaseS)
+ return;
+
+ if (!BT)
+ BT.reset(new BugType(this, "Use -drain instead of -release",
+ "API Upgrade (Apple)"));
+
+ ExplodedNode *N = C.generateNonFatalErrorNode();
+ if (!N) {
+ assert(0);
+ return;
+ }
+
+ auto Report = llvm::make_unique<BugReport>(
+ *BT, "Use -drain instead of -release when using NSAutoreleasePool and "
+ "garbage collection", N);
+ Report->addRange(msg.getSourceRange());
+ C.emitReport(std::move(Report));
+}
+
+void ento::registerNSAutoreleasePoolChecker(CheckerManager &mgr) {
+ if (mgr.getLangOpts().getGC() != LangOptions::NonGC)
+ mgr.registerChecker<NSAutoreleasePoolChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
new file mode 100644
index 0000000..dab068b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
@@ -0,0 +1,324 @@
+//=- NSErrorChecker.cpp - Coding conventions for uses of NSError -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a CheckNSError, a flow-insenstive check
+// that determines if an Objective-C class interface correctly returns
+// a non-void return type.
+//
+// File under feature request PR 2600.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+static bool IsNSError(QualType T, IdentifierInfo *II);
+static bool IsCFError(QualType T, IdentifierInfo *II);
+
+//===----------------------------------------------------------------------===//
+// NSErrorMethodChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class NSErrorMethodChecker
+ : public Checker< check::ASTDecl<ObjCMethodDecl> > {
+ mutable IdentifierInfo *II;
+
+public:
+ NSErrorMethodChecker() : II(nullptr) {}
+
+ void checkASTDecl(const ObjCMethodDecl *D,
+ AnalysisManager &mgr, BugReporter &BR) const;
+};
+}
+
+void NSErrorMethodChecker::checkASTDecl(const ObjCMethodDecl *D,
+ AnalysisManager &mgr,
+ BugReporter &BR) const {
+ if (!D->isThisDeclarationADefinition())
+ return;
+ if (!D->getReturnType()->isVoidType())
+ return;
+
+ if (!II)
+ II = &D->getASTContext().Idents.get("NSError");
+
+ bool hasNSError = false;
+ for (const auto *I : D->params()) {
+ if (IsNSError(I->getType(), II)) {
+ hasNSError = true;
+ break;
+ }
+ }
+
+ if (hasNSError) {
+ const char *err = "Method accepting NSError** "
+ "should have a non-void return value to indicate whether or not an "
+ "error occurred";
+ PathDiagnosticLocation L =
+ PathDiagnosticLocation::create(D, BR.getSourceManager());
+ BR.EmitBasicReport(D, this, "Bad return type when passing NSError**",
+ "Coding conventions (Apple)", err, L);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// CFErrorFunctionChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class CFErrorFunctionChecker
+ : public Checker< check::ASTDecl<FunctionDecl> > {
+ mutable IdentifierInfo *II;
+
+public:
+ CFErrorFunctionChecker() : II(nullptr) {}
+
+ void checkASTDecl(const FunctionDecl *D,
+ AnalysisManager &mgr, BugReporter &BR) const;
+};
+}
+
+void CFErrorFunctionChecker::checkASTDecl(const FunctionDecl *D,
+ AnalysisManager &mgr,
+ BugReporter &BR) const {
+ if (!D->doesThisDeclarationHaveABody())
+ return;
+ if (!D->getReturnType()->isVoidType())
+ return;
+
+ if (!II)
+ II = &D->getASTContext().Idents.get("CFErrorRef");
+
+ bool hasCFError = false;
+ for (auto I : D->params()) {
+ if (IsCFError(I->getType(), II)) {
+ hasCFError = true;
+ break;
+ }
+ }
+
+ if (hasCFError) {
+ const char *err = "Function accepting CFErrorRef* "
+ "should have a non-void return value to indicate whether or not an "
+ "error occurred";
+ PathDiagnosticLocation L =
+ PathDiagnosticLocation::create(D, BR.getSourceManager());
+ BR.EmitBasicReport(D, this, "Bad return type when passing CFErrorRef*",
+ "Coding conventions (Apple)", err, L);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// NSOrCFErrorDerefChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class NSErrorDerefBug : public BugType {
+public:
+ NSErrorDerefBug(const CheckerBase *Checker)
+ : BugType(Checker, "NSError** null dereference",
+ "Coding conventions (Apple)") {}
+};
+
+class CFErrorDerefBug : public BugType {
+public:
+ CFErrorDerefBug(const CheckerBase *Checker)
+ : BugType(Checker, "CFErrorRef* null dereference",
+ "Coding conventions (Apple)") {}
+};
+
+}
+
+namespace {
+class NSOrCFErrorDerefChecker
+ : public Checker< check::Location,
+ check::Event<ImplicitNullDerefEvent> > {
+ mutable IdentifierInfo *NSErrorII, *CFErrorII;
+ mutable std::unique_ptr<NSErrorDerefBug> NSBT;
+ mutable std::unique_ptr<CFErrorDerefBug> CFBT;
+public:
+ bool ShouldCheckNSError, ShouldCheckCFError;
+ NSOrCFErrorDerefChecker() : NSErrorII(nullptr), CFErrorII(nullptr),
+ ShouldCheckNSError(0), ShouldCheckCFError(0) { }
+
+ void checkLocation(SVal loc, bool isLoad, const Stmt *S,
+ CheckerContext &C) const;
+ void checkEvent(ImplicitNullDerefEvent event) const;
+};
+}
+
+typedef llvm::ImmutableMap<SymbolRef, unsigned> ErrorOutFlag;
+REGISTER_TRAIT_WITH_PROGRAMSTATE(NSErrorOut, ErrorOutFlag)
+REGISTER_TRAIT_WITH_PROGRAMSTATE(CFErrorOut, ErrorOutFlag)
+
+template <typename T>
+static bool hasFlag(SVal val, ProgramStateRef state) {
+ if (SymbolRef sym = val.getAsSymbol())
+ if (const unsigned *attachedFlags = state->get<T>(sym))
+ return *attachedFlags;
+ return false;
+}
+
+template <typename T>
+static void setFlag(ProgramStateRef state, SVal val, CheckerContext &C) {
+ // We tag the symbol that the SVal wraps.
+ if (SymbolRef sym = val.getAsSymbol())
+ C.addTransition(state->set<T>(sym, true));
+}
+
+static QualType parameterTypeFromSVal(SVal val, CheckerContext &C) {
+ const StackFrameContext *
+ SFC = C.getLocationContext()->getCurrentStackFrame();
+ if (Optional<loc::MemRegionVal> X = val.getAs<loc::MemRegionVal>()) {
+ const MemRegion* R = X->getRegion();
+ if (const VarRegion *VR = R->getAs<VarRegion>())
+ if (const StackArgumentsSpaceRegion *
+ stackReg = dyn_cast<StackArgumentsSpaceRegion>(VR->getMemorySpace()))
+ if (stackReg->getStackFrame() == SFC)
+ return VR->getValueType();
+ }
+
+ return QualType();
+}
+
+void NSOrCFErrorDerefChecker::checkLocation(SVal loc, bool isLoad,
+ const Stmt *S,
+ CheckerContext &C) const {
+ if (!isLoad)
+ return;
+ if (loc.isUndef() || !loc.getAs<Loc>())
+ return;
+
+ ASTContext &Ctx = C.getASTContext();
+ ProgramStateRef state = C.getState();
+
+ // If we are loading from NSError**/CFErrorRef* parameter, mark the resulting
+ // SVal so that we can later check it when handling the
+ // ImplicitNullDerefEvent event.
+ // FIXME: Cumbersome! Maybe add hook at construction of SVals at start of
+ // function ?
+
+ QualType parmT = parameterTypeFromSVal(loc, C);
+ if (parmT.isNull())
+ return;
+
+ if (!NSErrorII)
+ NSErrorII = &Ctx.Idents.get("NSError");
+ if (!CFErrorII)
+ CFErrorII = &Ctx.Idents.get("CFErrorRef");
+
+ if (ShouldCheckNSError && IsNSError(parmT, NSErrorII)) {
+ setFlag<NSErrorOut>(state, state->getSVal(loc.castAs<Loc>()), C);
+ return;
+ }
+
+ if (ShouldCheckCFError && IsCFError(parmT, CFErrorII)) {
+ setFlag<CFErrorOut>(state, state->getSVal(loc.castAs<Loc>()), C);
+ return;
+ }
+}
+
+void NSOrCFErrorDerefChecker::checkEvent(ImplicitNullDerefEvent event) const {
+ if (event.IsLoad)
+ return;
+
+ SVal loc = event.Location;
+ ProgramStateRef state = event.SinkNode->getState();
+ BugReporter &BR = *event.BR;
+
+ bool isNSError = hasFlag<NSErrorOut>(loc, state);
+ bool isCFError = false;
+ if (!isNSError)
+ isCFError = hasFlag<CFErrorOut>(loc, state);
+
+ if (!(isNSError || isCFError))
+ return;
+
+ // Storing to possible null NSError/CFErrorRef out parameter.
+ SmallString<128> Buf;
+ llvm::raw_svector_ostream os(Buf);
+
+ os << "Potential null dereference. According to coding standards ";
+ os << (isNSError
+ ? "in 'Creating and Returning NSError Objects' the parameter"
+ : "documented in CoreFoundation/CFError.h the parameter");
+
+ os << " may be null";
+
+ BugType *bug = nullptr;
+ if (isNSError) {
+ if (!NSBT)
+ NSBT.reset(new NSErrorDerefBug(this));
+ bug = NSBT.get();
+ }
+ else {
+ if (!CFBT)
+ CFBT.reset(new CFErrorDerefBug(this));
+ bug = CFBT.get();
+ }
+ BR.emitReport(llvm::make_unique<BugReport>(*bug, os.str(), event.SinkNode));
+}
+
+static bool IsNSError(QualType T, IdentifierInfo *II) {
+
+ const PointerType* PPT = T->getAs<PointerType>();
+ if (!PPT)
+ return false;
+
+ const ObjCObjectPointerType* PT =
+ PPT->getPointeeType()->getAs<ObjCObjectPointerType>();
+
+ if (!PT)
+ return false;
+
+ const ObjCInterfaceDecl *ID = PT->getInterfaceDecl();
+
+ // FIXME: Can ID ever be NULL?
+ if (ID)
+ return II == ID->getIdentifier();
+
+ return false;
+}
+
+static bool IsCFError(QualType T, IdentifierInfo *II) {
+ const PointerType* PPT = T->getAs<PointerType>();
+ if (!PPT) return false;
+
+ const TypedefType* TT = PPT->getPointeeType()->getAs<TypedefType>();
+ if (!TT) return false;
+
+ return TT->getDecl()->getIdentifier() == II;
+}
+
+void ento::registerNSErrorChecker(CheckerManager &mgr) {
+ mgr.registerChecker<NSErrorMethodChecker>();
+ NSOrCFErrorDerefChecker *checker =
+ mgr.registerChecker<NSOrCFErrorDerefChecker>();
+ checker->ShouldCheckNSError = true;
+}
+
+void ento::registerCFErrorChecker(CheckerManager &mgr) {
+ mgr.registerChecker<CFErrorFunctionChecker>();
+ NSOrCFErrorDerefChecker *checker =
+ mgr.registerChecker<NSOrCFErrorDerefChecker>();
+ checker->ShouldCheckCFError = true;
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
new file mode 100644
index 0000000..c1deade
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
@@ -0,0 +1,145 @@
+//=== NoReturnFunctionChecker.cpp -------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines NoReturnFunctionChecker, which evaluates functions that do not
+// return to the caller.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "SelectorExtras.h"
+#include "clang/AST/Attr.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/ADT/StringSwitch.h"
+#include <cstdarg>
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class NoReturnFunctionChecker : public Checker< check::PostCall,
+ check::PostObjCMessage > {
+ mutable Selector HandleFailureInFunctionSel;
+ mutable Selector HandleFailureInMethodSel;
+public:
+ void checkPostCall(const CallEvent &CE, CheckerContext &C) const;
+ void checkPostObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
+};
+
+}
+
+void NoReturnFunctionChecker::checkPostCall(const CallEvent &CE,
+ CheckerContext &C) const {
+ bool BuildSinks = false;
+
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CE.getDecl()))
+ BuildSinks = FD->hasAttr<AnalyzerNoReturnAttr>() || FD->isNoReturn();
+
+ const Expr *Callee = CE.getOriginExpr();
+ if (!BuildSinks && Callee)
+ BuildSinks = getFunctionExtInfo(Callee->getType()).getNoReturn();
+
+ if (!BuildSinks && CE.isGlobalCFunction()) {
+ if (const IdentifierInfo *II = CE.getCalleeIdentifier()) {
+ // HACK: Some functions are not marked noreturn, and don't return.
+ // Here are a few hardwired ones. If this takes too long, we can
+ // potentially cache these results.
+ BuildSinks
+ = llvm::StringSwitch<bool>(StringRef(II->getName()))
+ .Case("exit", true)
+ .Case("panic", true)
+ .Case("error", true)
+ .Case("Assert", true)
+ // FIXME: This is just a wrapper around throwing an exception.
+ // Eventually inter-procedural analysis should handle this easily.
+ .Case("ziperr", true)
+ .Case("assfail", true)
+ .Case("db_error", true)
+ .Case("__assert", true)
+ .Case("__assert2", true)
+ // For the purpose of static analysis, we do not care that
+ // this MSVC function will return if the user decides to continue.
+ .Case("_wassert", true)
+ .Case("__assert_rtn", true)
+ .Case("__assert_fail", true)
+ .Case("dtrace_assfail", true)
+ .Case("yy_fatal_error", true)
+ .Case("_XCAssertionFailureHandler", true)
+ .Case("_DTAssertionFailureHandler", true)
+ .Case("_TSAssertionFailureHandler", true)
+ .Default(false);
+ }
+ }
+
+ if (BuildSinks)
+ C.generateSink(C.getState(), C.getPredecessor());
+}
+
+void NoReturnFunctionChecker::checkPostObjCMessage(const ObjCMethodCall &Msg,
+ CheckerContext &C) const {
+ // Check if the method is annotated with analyzer_noreturn.
+ if (const ObjCMethodDecl *MD = Msg.getDecl()) {
+ MD = MD->getCanonicalDecl();
+ if (MD->hasAttr<AnalyzerNoReturnAttr>()) {
+ C.generateSink(C.getState(), C.getPredecessor());
+ return;
+ }
+ }
+
+ // HACK: This entire check is to handle two messages in the Cocoa frameworks:
+ // -[NSAssertionHandler
+ // handleFailureInMethod:object:file:lineNumber:description:]
+ // -[NSAssertionHandler
+ // handleFailureInFunction:file:lineNumber:description:]
+ // Eventually these should be annotated with __attribute__((noreturn)).
+ // Because ObjC messages use dynamic dispatch, it is not generally safe to
+ // assume certain methods can't return. In cases where it is definitely valid,
+ // see if you can mark the methods noreturn or analyzer_noreturn instead of
+ // adding more explicit checks to this method.
+
+ if (!Msg.isInstanceMessage())
+ return;
+
+ const ObjCInterfaceDecl *Receiver = Msg.getReceiverInterface();
+ if (!Receiver)
+ return;
+ if (!Receiver->getIdentifier()->isStr("NSAssertionHandler"))
+ return;
+
+ Selector Sel = Msg.getSelector();
+ switch (Sel.getNumArgs()) {
+ default:
+ return;
+ case 4:
+ lazyInitKeywordSelector(HandleFailureInFunctionSel, C.getASTContext(),
+ "handleFailureInFunction", "file", "lineNumber",
+ "description", nullptr);
+ if (Sel != HandleFailureInFunctionSel)
+ return;
+ break;
+ case 5:
+ lazyInitKeywordSelector(HandleFailureInMethodSel, C.getASTContext(),
+ "handleFailureInMethod", "object", "file",
+ "lineNumber", "description", nullptr);
+ if (Sel != HandleFailureInMethodSel)
+ return;
+ break;
+ }
+
+ // If we got here, it's one of the messages we care about.
+ C.generateSink(C.getState(), C.getPredecessor());
+}
+
+void ento::registerNoReturnFunctionChecker(CheckerManager &mgr) {
+ mgr.registerChecker<NoReturnFunctionChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
new file mode 100644
index 0000000..1f82ab9
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
@@ -0,0 +1,223 @@
+//===--- NonNullParamChecker.cpp - Undefined arguments checker -*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines NonNullParamChecker, which checks for arguments expected not to
+// be null due to:
+// - the corresponding parameters being declared to have nonnull attribute
+// - the corresponding parameters being references; since the call would form
+// a reference to a null pointer
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/Attr.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class NonNullParamChecker
+ : public Checker< check::PreCall, EventDispatcher<ImplicitNullDerefEvent> > {
+ mutable std::unique_ptr<BugType> BTAttrNonNull;
+ mutable std::unique_ptr<BugType> BTNullRefArg;
+
+public:
+
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+
+ std::unique_ptr<BugReport>
+ genReportNullAttrNonNull(const ExplodedNode *ErrorN, const Expr *ArgE) const;
+ std::unique_ptr<BugReport>
+ genReportReferenceToNullPointer(const ExplodedNode *ErrorN,
+ const Expr *ArgE) const;
+};
+} // end anonymous namespace
+
+void NonNullParamChecker::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ const Decl *FD = Call.getDecl();
+ if (!FD)
+ return;
+
+ // Merge all non-null attributes
+ unsigned NumArgs = Call.getNumArgs();
+ llvm::SmallBitVector AttrNonNull(NumArgs);
+ for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) {
+ if (!NonNull->args_size()) {
+ AttrNonNull.set(0, NumArgs);
+ break;
+ }
+ for (unsigned Val : NonNull->args()) {
+ if (Val >= NumArgs)
+ continue;
+ AttrNonNull.set(Val);
+ }
+ }
+
+ ProgramStateRef state = C.getState();
+
+ CallEvent::param_type_iterator TyI = Call.param_type_begin(),
+ TyE = Call.param_type_end();
+
+ for (unsigned idx = 0; idx < NumArgs; ++idx) {
+
+ // Check if the parameter is a reference. We want to report when reference
+ // to a null pointer is passed as a paramter.
+ bool haveRefTypeParam = false;
+ if (TyI != TyE) {
+ haveRefTypeParam = (*TyI)->isReferenceType();
+ TyI++;
+ }
+
+ bool haveAttrNonNull = AttrNonNull[idx];
+ if (!haveAttrNonNull) {
+ // Check if the parameter is also marked 'nonnull'.
+ ArrayRef<ParmVarDecl*> parms = Call.parameters();
+ if (idx < parms.size())
+ haveAttrNonNull = parms[idx]->hasAttr<NonNullAttr>();
+ }
+
+ if (!haveRefTypeParam && !haveAttrNonNull)
+ continue;
+
+ // If the value is unknown or undefined, we can't perform this check.
+ const Expr *ArgE = Call.getArgExpr(idx);
+ SVal V = Call.getArgSVal(idx);
+ Optional<DefinedSVal> DV = V.getAs<DefinedSVal>();
+ if (!DV)
+ continue;
+
+ // Process the case when the argument is not a location.
+ assert(!haveRefTypeParam || DV->getAs<Loc>());
+
+ if (haveAttrNonNull && !DV->getAs<Loc>()) {
+ // If the argument is a union type, we want to handle a potential
+ // transparent_union GCC extension.
+ if (!ArgE)
+ continue;
+
+ QualType T = ArgE->getType();
+ const RecordType *UT = T->getAsUnionType();
+ if (!UT || !UT->getDecl()->hasAttr<TransparentUnionAttr>())
+ continue;
+
+ if (Optional<nonloc::CompoundVal> CSV =
+ DV->getAs<nonloc::CompoundVal>()) {
+ nonloc::CompoundVal::iterator CSV_I = CSV->begin();
+ assert(CSV_I != CSV->end());
+ V = *CSV_I;
+ DV = V.getAs<DefinedSVal>();
+ assert(++CSV_I == CSV->end());
+ // FIXME: Handle (some_union){ some_other_union_val }, which turns into
+ // a LazyCompoundVal inside a CompoundVal.
+ if (!V.getAs<Loc>())
+ continue;
+ // Retrieve the corresponding expression.
+ if (const CompoundLiteralExpr *CE = dyn_cast<CompoundLiteralExpr>(ArgE))
+ if (const InitListExpr *IE =
+ dyn_cast<InitListExpr>(CE->getInitializer()))
+ ArgE = dyn_cast<Expr>(*(IE->begin()));
+
+ } else {
+ // FIXME: Handle LazyCompoundVals?
+ continue;
+ }
+ }
+
+ ConstraintManager &CM = C.getConstraintManager();
+ ProgramStateRef stateNotNull, stateNull;
+ std::tie(stateNotNull, stateNull) = CM.assumeDual(state, *DV);
+
+ if (stateNull) {
+ if (!stateNotNull) {
+ // Generate an error node. Check for a null node in case
+ // we cache out.
+ if (ExplodedNode *errorNode = C.generateErrorNode(stateNull)) {
+
+ std::unique_ptr<BugReport> R;
+ if (haveAttrNonNull)
+ R = genReportNullAttrNonNull(errorNode, ArgE);
+ else if (haveRefTypeParam)
+ R = genReportReferenceToNullPointer(errorNode, ArgE);
+
+ // Highlight the range of the argument that was null.
+ R->addRange(Call.getArgSourceRange(idx));
+
+ // Emit the bug report.
+ C.emitReport(std::move(R));
+ }
+
+ // Always return. Either we cached out or we just emitted an error.
+ return;
+ }
+ if (ExplodedNode *N = C.generateSink(stateNull, C.getPredecessor())) {
+ ImplicitNullDerefEvent event = {
+ V, false, N, &C.getBugReporter(),
+ /*IsDirectDereference=*/haveRefTypeParam};
+ dispatchEvent(event);
+ }
+ }
+
+ // If a pointer value passed the check we should assume that it is
+ // indeed not null from this point forward.
+ assert(stateNotNull);
+ state = stateNotNull;
+ }
+
+ // If we reach here all of the arguments passed the nonnull check.
+ // If 'state' has been updated generated a new node.
+ C.addTransition(state);
+}
+
+std::unique_ptr<BugReport>
+NonNullParamChecker::genReportNullAttrNonNull(const ExplodedNode *ErrorNode,
+ const Expr *ArgE) const {
+ // Lazily allocate the BugType object if it hasn't already been
+ // created. Ownership is transferred to the BugReporter object once
+ // the BugReport is passed to 'EmitWarning'.
+ if (!BTAttrNonNull)
+ BTAttrNonNull.reset(new BugType(
+ this, "Argument with 'nonnull' attribute passed null", "API"));
+
+ auto R = llvm::make_unique<BugReport>(
+ *BTAttrNonNull,
+ "Null pointer passed as an argument to a 'nonnull' parameter", ErrorNode);
+ if (ArgE)
+ bugreporter::trackNullOrUndefValue(ErrorNode, ArgE, *R);
+
+ return R;
+}
+
+std::unique_ptr<BugReport> NonNullParamChecker::genReportReferenceToNullPointer(
+ const ExplodedNode *ErrorNode, const Expr *ArgE) const {
+ if (!BTNullRefArg)
+ BTNullRefArg.reset(new BuiltinBug(this, "Dereference of null pointer"));
+
+ auto R = llvm::make_unique<BugReport>(
+ *BTNullRefArg, "Forming reference to null pointer", ErrorNode);
+ if (ArgE) {
+ const Expr *ArgEDeref = bugreporter::getDerefExpr(ArgE);
+ if (!ArgEDeref)
+ ArgEDeref = ArgE;
+ bugreporter::trackNullOrUndefValue(ErrorNode,
+ ArgEDeref,
+ *R);
+ }
+ return R;
+
+}
+
+void ento::registerNonNullParamChecker(CheckerManager &mgr) {
+ mgr.registerChecker<NonNullParamChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
new file mode 100644
index 0000000..bb86ea4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
@@ -0,0 +1,1066 @@
+//== Nullabilityhecker.cpp - Nullability checker ----------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker tries to find nullability violations. There are several kinds of
+// possible violations:
+// * Null pointer is passed to a pointer which has a _Nonnull type.
+// * Null pointer is returned from a function which has a _Nonnull return type.
+// * Nullable pointer is passed to a pointer which has a _Nonnull type.
+// * Nullable pointer is returned from a function which has a _Nonnull return
+// type.
+// * Nullable pointer is dereferenced.
+//
+// This checker propagates the nullability information of the pointers and looks
+// for the patterns that are described above. Explicit casts are trusted and are
+// considered a way to suppress false positives for this checker. The other way
+// to suppress warnings would be to add asserts or guarding if statements to the
+// code. In addition to the nullability propagation this checker also uses some
+// heuristics to suppress potential false positives.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "llvm/Support/Path.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+// Do not reorder! The getMostNullable method relies on the order.
+// Optimization: Most pointers expected to be unspecified. When a symbol has an
+// unspecified or nonnull type non of the rules would indicate any problem for
+// that symbol. For this reason only nullable and contradicted nullability are
+// stored for a symbol. When a symbol is already contradicted, it can not be
+// casted back to nullable.
+enum class Nullability : char {
+ Contradicted, // Tracked nullability is contradicted by an explicit cast. Do
+ // not report any nullability related issue for this symbol.
+ // This nullability is propagated agressively to avoid false
+ // positive results. See the comment on getMostNullable method.
+ Nullable,
+ Unspecified,
+ Nonnull
+};
+
+/// Returns the most nullable nullability. This is used for message expressions
+/// like [reciever method], where the nullability of this expression is either
+/// the nullability of the receiver or the nullability of the return type of the
+/// method, depending on which is more nullable. Contradicted is considered to
+/// be the most nullable, to avoid false positive results.
+Nullability getMostNullable(Nullability Lhs, Nullability Rhs) {
+ return static_cast<Nullability>(
+ std::min(static_cast<char>(Lhs), static_cast<char>(Rhs)));
+}
+
+const char *getNullabilityString(Nullability Nullab) {
+ switch (Nullab) {
+ case Nullability::Contradicted:
+ return "contradicted";
+ case Nullability::Nullable:
+ return "nullable";
+ case Nullability::Unspecified:
+ return "unspecified";
+ case Nullability::Nonnull:
+ return "nonnull";
+ }
+ llvm_unreachable("Unexpected enumeration.");
+ return "";
+}
+
+// These enums are used as an index to ErrorMessages array.
+enum class ErrorKind : int {
+ NilAssignedToNonnull,
+ NilPassedToNonnull,
+ NilReturnedToNonnull,
+ NullableAssignedToNonnull,
+ NullableReturnedToNonnull,
+ NullableDereferenced,
+ NullablePassedToNonnull
+};
+
+const char *const ErrorMessages[] = {
+ "Null is assigned to a pointer which is expected to have non-null value",
+ "Null passed to a callee that requires a non-null argument",
+ "Null is returned from a function that is expected to return a non-null "
+ "value",
+ "Nullable pointer is assigned to a pointer which is expected to have "
+ "non-null value",
+ "Nullable pointer is returned from a function that is expected to return a "
+ "non-null value",
+ "Nullable pointer is dereferenced",
+ "Nullable pointer is passed to a callee that requires a non-null argument"};
+
+class NullabilityChecker
+ : public Checker<check::Bind, check::PreCall, check::PreStmt<ReturnStmt>,
+ check::PostCall, check::PostStmt<ExplicitCastExpr>,
+ check::PostObjCMessage, check::DeadSymbols,
+ check::Event<ImplicitNullDerefEvent>> {
+ mutable std::unique_ptr<BugType> BT;
+
+public:
+ void checkBind(SVal L, SVal V, const Stmt *S, CheckerContext &C) const;
+ void checkPostStmt(const ExplicitCastExpr *CE, CheckerContext &C) const;
+ void checkPreStmt(const ReturnStmt *S, CheckerContext &C) const;
+ void checkPostObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const;
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
+ void checkEvent(ImplicitNullDerefEvent Event) const;
+
+ void printState(raw_ostream &Out, ProgramStateRef State, const char *NL,
+ const char *Sep) const override;
+
+ struct NullabilityChecksFilter {
+ DefaultBool CheckNullPassedToNonnull;
+ DefaultBool CheckNullReturnedFromNonnull;
+ DefaultBool CheckNullableDereferenced;
+ DefaultBool CheckNullablePassedToNonnull;
+ DefaultBool CheckNullableReturnedFromNonnull;
+
+ CheckName CheckNameNullPassedToNonnull;
+ CheckName CheckNameNullReturnedFromNonnull;
+ CheckName CheckNameNullableDereferenced;
+ CheckName CheckNameNullablePassedToNonnull;
+ CheckName CheckNameNullableReturnedFromNonnull;
+ };
+
+ NullabilityChecksFilter Filter;
+ // When set to false no nullability information will be tracked in
+ // NullabilityMap. It is possible to catch errors like passing a null pointer
+ // to a callee that expects nonnull argument without the information that is
+ // stroed in the NullabilityMap. This is an optimization.
+ DefaultBool NeedTracking;
+
+private:
+ class NullabilityBugVisitor
+ : public BugReporterVisitorImpl<NullabilityBugVisitor> {
+ public:
+ NullabilityBugVisitor(const MemRegion *M) : Region(M) {}
+
+ void Profile(llvm::FoldingSetNodeID &ID) const override {
+ static int X = 0;
+ ID.AddPointer(&X);
+ ID.AddPointer(Region);
+ }
+
+ PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) override;
+
+ private:
+ // The tracked region.
+ const MemRegion *Region;
+ };
+
+ /// When any of the nonnull arguments of the analyzed function is null, do not
+ /// report anything and turn off the check.
+ ///
+ /// When \p SuppressPath is set to true, no more bugs will be reported on this
+ /// path by this checker.
+ void reportBugIfPreconditionHolds(ErrorKind Error, ExplodedNode *N,
+ const MemRegion *Region, CheckerContext &C,
+ const Stmt *ValueExpr = nullptr,
+ bool SuppressPath = false) const;
+
+ void reportBug(ErrorKind Error, ExplodedNode *N, const MemRegion *Region,
+ BugReporter &BR, const Stmt *ValueExpr = nullptr) const {
+ if (!BT)
+ BT.reset(new BugType(this, "Nullability", "Memory error"));
+ const char *Msg = ErrorMessages[static_cast<int>(Error)];
+ std::unique_ptr<BugReport> R(new BugReport(*BT, Msg, N));
+ if (Region) {
+ R->markInteresting(Region);
+ R->addVisitor(llvm::make_unique<NullabilityBugVisitor>(Region));
+ }
+ if (ValueExpr) {
+ R->addRange(ValueExpr->getSourceRange());
+ if (Error == ErrorKind::NilAssignedToNonnull ||
+ Error == ErrorKind::NilPassedToNonnull ||
+ Error == ErrorKind::NilReturnedToNonnull)
+ bugreporter::trackNullOrUndefValue(N, ValueExpr, *R);
+ }
+ BR.emitReport(std::move(R));
+ }
+
+ /// If an SVal wraps a region that should be tracked, it will return a pointer
+ /// to the wrapped region. Otherwise it will return a nullptr.
+ const SymbolicRegion *getTrackRegion(SVal Val,
+ bool CheckSuperRegion = false) const;
+};
+
+class NullabilityState {
+public:
+ NullabilityState(Nullability Nullab, const Stmt *Source = nullptr)
+ : Nullab(Nullab), Source(Source) {}
+
+ const Stmt *getNullabilitySource() const { return Source; }
+
+ Nullability getValue() const { return Nullab; }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(static_cast<char>(Nullab));
+ ID.AddPointer(Source);
+ }
+
+ void print(raw_ostream &Out) const {
+ Out << getNullabilityString(Nullab) << "\n";
+ }
+
+private:
+ Nullability Nullab;
+ // Source is the expression which determined the nullability. For example in a
+ // message like [nullable nonnull_returning] has nullable nullability, because
+ // the receiver is nullable. Here the receiver will be the source of the
+ // nullability. This is useful information when the diagnostics are generated.
+ const Stmt *Source;
+};
+
+bool operator==(NullabilityState Lhs, NullabilityState Rhs) {
+ return Lhs.getValue() == Rhs.getValue() &&
+ Lhs.getNullabilitySource() == Rhs.getNullabilitySource();
+}
+
+} // end anonymous namespace
+
+REGISTER_MAP_WITH_PROGRAMSTATE(NullabilityMap, const MemRegion *,
+ NullabilityState)
+
+// If the nullability precondition of a function is violated, we should not
+// report nullability related issues on that path. For this reason once a
+// precondition is not met on a path, this checker will be esentially turned off
+// for the rest of the analysis. We do not want to generate a sink node however,
+// so this checker would not lead to reduced coverage.
+REGISTER_TRAIT_WITH_PROGRAMSTATE(PreconditionViolated, bool)
+
+enum class NullConstraint { IsNull, IsNotNull, Unknown };
+
+static NullConstraint getNullConstraint(DefinedOrUnknownSVal Val,
+ ProgramStateRef State) {
+ ConditionTruthVal Nullness = State->isNull(Val);
+ if (Nullness.isConstrainedFalse())
+ return NullConstraint::IsNotNull;
+ if (Nullness.isConstrainedTrue())
+ return NullConstraint::IsNull;
+ return NullConstraint::Unknown;
+}
+
+const SymbolicRegion *
+NullabilityChecker::getTrackRegion(SVal Val, bool CheckSuperRegion) const {
+ if (!NeedTracking)
+ return nullptr;
+
+ auto RegionSVal = Val.getAs<loc::MemRegionVal>();
+ if (!RegionSVal)
+ return nullptr;
+
+ const MemRegion *Region = RegionSVal->getRegion();
+
+ if (CheckSuperRegion) {
+ if (auto FieldReg = Region->getAs<FieldRegion>())
+ return dyn_cast<SymbolicRegion>(FieldReg->getSuperRegion());
+ if (auto ElementReg = Region->getAs<ElementRegion>())
+ return dyn_cast<SymbolicRegion>(ElementReg->getSuperRegion());
+ }
+
+ return dyn_cast<SymbolicRegion>(Region);
+}
+
+PathDiagnosticPiece *NullabilityChecker::NullabilityBugVisitor::VisitNode(
+ const ExplodedNode *N, const ExplodedNode *PrevN, BugReporterContext &BRC,
+ BugReport &BR) {
+ ProgramStateRef State = N->getState();
+ ProgramStateRef StatePrev = PrevN->getState();
+
+ const NullabilityState *TrackedNullab = State->get<NullabilityMap>(Region);
+ const NullabilityState *TrackedNullabPrev =
+ StatePrev->get<NullabilityMap>(Region);
+ if (!TrackedNullab)
+ return nullptr;
+
+ if (TrackedNullabPrev &&
+ TrackedNullabPrev->getValue() == TrackedNullab->getValue())
+ return nullptr;
+
+ // Retrieve the associated statement.
+ const Stmt *S = TrackedNullab->getNullabilitySource();
+ if (!S) {
+ ProgramPoint ProgLoc = N->getLocation();
+ if (Optional<StmtPoint> SP = ProgLoc.getAs<StmtPoint>()) {
+ S = SP->getStmt();
+ }
+ }
+
+ if (!S)
+ return nullptr;
+
+ std::string InfoText =
+ (llvm::Twine("Nullability '") +
+ getNullabilityString(TrackedNullab->getValue()) + "' is infered")
+ .str();
+
+ // Generate the extra diagnostic.
+ PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
+ N->getLocationContext());
+ return new PathDiagnosticEventPiece(Pos, InfoText, true, nullptr);
+}
+
+static Nullability getNullabilityAnnotation(QualType Type) {
+ const auto *AttrType = Type->getAs<AttributedType>();
+ if (!AttrType)
+ return Nullability::Unspecified;
+ if (AttrType->getAttrKind() == AttributedType::attr_nullable)
+ return Nullability::Nullable;
+ else if (AttrType->getAttrKind() == AttributedType::attr_nonnull)
+ return Nullability::Nonnull;
+ return Nullability::Unspecified;
+}
+
+template <typename ParamVarDeclRange>
+static bool
+checkParamsForPreconditionViolation(const ParamVarDeclRange &Params,
+ ProgramStateRef State,
+ const LocationContext *LocCtxt) {
+ for (const auto *ParamDecl : Params) {
+ if (ParamDecl->isParameterPack())
+ break;
+
+ if (getNullabilityAnnotation(ParamDecl->getType()) != Nullability::Nonnull)
+ continue;
+
+ auto RegVal = State->getLValue(ParamDecl, LocCtxt)
+ .template getAs<loc::MemRegionVal>();
+ if (!RegVal)
+ continue;
+
+ auto ParamValue = State->getSVal(RegVal->getRegion())
+ .template getAs<DefinedOrUnknownSVal>();
+ if (!ParamValue)
+ continue;
+
+ if (getNullConstraint(*ParamValue, State) == NullConstraint::IsNull) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool checkPreconditionViolation(ProgramStateRef State, ExplodedNode *N,
+ CheckerContext &C) {
+ if (State->get<PreconditionViolated>())
+ return true;
+
+ const LocationContext *LocCtxt = C.getLocationContext();
+ const Decl *D = LocCtxt->getDecl();
+ if (!D)
+ return false;
+
+ if (const auto *BlockD = dyn_cast<BlockDecl>(D)) {
+ if (checkParamsForPreconditionViolation(BlockD->parameters(), State,
+ LocCtxt)) {
+ if (!N->isSink())
+ C.addTransition(State->set<PreconditionViolated>(true), N);
+ return true;
+ }
+ return false;
+ }
+
+ if (const auto *FuncDecl = dyn_cast<FunctionDecl>(D)) {
+ if (checkParamsForPreconditionViolation(FuncDecl->parameters(), State,
+ LocCtxt)) {
+ if (!N->isSink())
+ C.addTransition(State->set<PreconditionViolated>(true), N);
+ return true;
+ }
+ return false;
+ }
+ return false;
+}
+
+void NullabilityChecker::reportBugIfPreconditionHolds(
+ ErrorKind Error, ExplodedNode *N, const MemRegion *Region,
+ CheckerContext &C, const Stmt *ValueExpr, bool SuppressPath) const {
+ ProgramStateRef OriginalState = N->getState();
+
+ if (checkPreconditionViolation(OriginalState, N, C))
+ return;
+ if (SuppressPath) {
+ OriginalState = OriginalState->set<PreconditionViolated>(true);
+ N = C.addTransition(OriginalState, N);
+ }
+
+ reportBug(Error, N, Region, C.getBugReporter(), ValueExpr);
+}
+
+/// Cleaning up the program state.
+void NullabilityChecker::checkDeadSymbols(SymbolReaper &SR,
+ CheckerContext &C) const {
+ if (!SR.hasDeadSymbols())
+ return;
+
+ ProgramStateRef State = C.getState();
+ NullabilityMapTy Nullabilities = State->get<NullabilityMap>();
+ for (NullabilityMapTy::iterator I = Nullabilities.begin(),
+ E = Nullabilities.end();
+ I != E; ++I) {
+ const auto *Region = I->first->getAs<SymbolicRegion>();
+ assert(Region && "Non-symbolic region is tracked.");
+ if (SR.isDead(Region->getSymbol())) {
+ State = State->remove<NullabilityMap>(I->first);
+ }
+ }
+ // When one of the nonnull arguments are constrained to be null, nullability
+ // preconditions are violated. It is not enough to check this only when we
+ // actually report an error, because at that time interesting symbols might be
+ // reaped.
+ if (checkPreconditionViolation(State, C.getPredecessor(), C))
+ return;
+ C.addTransition(State);
+}
+
+/// This callback triggers when a pointer is dereferenced and the analyzer does
+/// not know anything about the value of that pointer. When that pointer is
+/// nullable, this code emits a warning.
+void NullabilityChecker::checkEvent(ImplicitNullDerefEvent Event) const {
+ if (Event.SinkNode->getState()->get<PreconditionViolated>())
+ return;
+
+ const MemRegion *Region =
+ getTrackRegion(Event.Location, /*CheckSuperregion=*/true);
+ if (!Region)
+ return;
+
+ ProgramStateRef State = Event.SinkNode->getState();
+ const NullabilityState *TrackedNullability =
+ State->get<NullabilityMap>(Region);
+
+ if (!TrackedNullability)
+ return;
+
+ if (Filter.CheckNullableDereferenced &&
+ TrackedNullability->getValue() == Nullability::Nullable) {
+ BugReporter &BR = *Event.BR;
+ // Do not suppress errors on defensive code paths, because dereferencing
+ // a nullable pointer is always an error.
+ if (Event.IsDirectDereference)
+ reportBug(ErrorKind::NullableDereferenced, Event.SinkNode, Region, BR);
+ else
+ reportBug(ErrorKind::NullablePassedToNonnull, Event.SinkNode, Region, BR);
+ }
+}
+
+/// This method check when nullable pointer or null value is returned from a
+/// function that has nonnull return type.
+///
+/// TODO: when nullability preconditons are violated, it is ok to violate the
+/// nullability postconditons (i.e.: when one of the nonnull parameters are null
+/// this check should not report any nullability related issue).
+void NullabilityChecker::checkPreStmt(const ReturnStmt *S,
+ CheckerContext &C) const {
+ auto RetExpr = S->getRetValue();
+ if (!RetExpr)
+ return;
+
+ if (!RetExpr->getType()->isAnyPointerType())
+ return;
+
+ ProgramStateRef State = C.getState();
+ if (State->get<PreconditionViolated>())
+ return;
+
+ auto RetSVal =
+ State->getSVal(S, C.getLocationContext()).getAs<DefinedOrUnknownSVal>();
+ if (!RetSVal)
+ return;
+
+ AnalysisDeclContext *DeclCtxt =
+ C.getLocationContext()->getAnalysisDeclContext();
+ const FunctionType *FuncType = DeclCtxt->getDecl()->getFunctionType();
+ if (!FuncType)
+ return;
+
+ NullConstraint Nullness = getNullConstraint(*RetSVal, State);
+
+ Nullability RequiredNullability =
+ getNullabilityAnnotation(FuncType->getReturnType());
+
+ // If the returned value is null but the type of the expression
+ // generating it is nonnull then we will suppress the diagnostic.
+ // This enables explicit suppression when returning a nil literal in a
+ // function with a _Nonnull return type:
+ // return (NSString * _Nonnull)0;
+ Nullability RetExprTypeLevelNullability =
+ getNullabilityAnnotation(RetExpr->getType());
+
+ if (Filter.CheckNullReturnedFromNonnull &&
+ Nullness == NullConstraint::IsNull &&
+ RetExprTypeLevelNullability != Nullability::Nonnull &&
+ RequiredNullability == Nullability::Nonnull) {
+ static CheckerProgramPointTag Tag(this, "NullReturnedFromNonnull");
+ ExplodedNode *N = C.generateErrorNode(State, &Tag);
+ if (!N)
+ return;
+ reportBugIfPreconditionHolds(ErrorKind::NilReturnedToNonnull, N, nullptr, C,
+ RetExpr);
+ return;
+ }
+
+ const MemRegion *Region = getTrackRegion(*RetSVal);
+ if (!Region)
+ return;
+
+ const NullabilityState *TrackedNullability =
+ State->get<NullabilityMap>(Region);
+ if (TrackedNullability) {
+ Nullability TrackedNullabValue = TrackedNullability->getValue();
+ if (Filter.CheckNullableReturnedFromNonnull &&
+ Nullness != NullConstraint::IsNotNull &&
+ TrackedNullabValue == Nullability::Nullable &&
+ RequiredNullability == Nullability::Nonnull) {
+ static CheckerProgramPointTag Tag(this, "NullableReturnedFromNonnull");
+ ExplodedNode *N = C.addTransition(State, C.getPredecessor(), &Tag);
+ reportBugIfPreconditionHolds(ErrorKind::NullableReturnedToNonnull, N,
+ Region, C);
+ }
+ return;
+ }
+ if (RequiredNullability == Nullability::Nullable) {
+ State = State->set<NullabilityMap>(Region,
+ NullabilityState(RequiredNullability,
+ S));
+ C.addTransition(State);
+ }
+}
+
+/// This callback warns when a nullable pointer or a null value is passed to a
+/// function that expects its argument to be nonnull.
+void NullabilityChecker::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ if (!Call.getDecl())
+ return;
+
+ ProgramStateRef State = C.getState();
+ if (State->get<PreconditionViolated>())
+ return;
+
+ ProgramStateRef OrigState = State;
+
+ unsigned Idx = 0;
+ for (const ParmVarDecl *Param : Call.parameters()) {
+ if (Param->isParameterPack())
+ break;
+
+ const Expr *ArgExpr = nullptr;
+ if (Idx < Call.getNumArgs())
+ ArgExpr = Call.getArgExpr(Idx);
+ auto ArgSVal = Call.getArgSVal(Idx++).getAs<DefinedOrUnknownSVal>();
+ if (!ArgSVal)
+ continue;
+
+ if (!Param->getType()->isAnyPointerType() &&
+ !Param->getType()->isReferenceType())
+ continue;
+
+ NullConstraint Nullness = getNullConstraint(*ArgSVal, State);
+
+ Nullability RequiredNullability =
+ getNullabilityAnnotation(Param->getType());
+ Nullability ArgExprTypeLevelNullability =
+ getNullabilityAnnotation(ArgExpr->getType());
+
+ if (Filter.CheckNullPassedToNonnull && Nullness == NullConstraint::IsNull &&
+ ArgExprTypeLevelNullability != Nullability::Nonnull &&
+ RequiredNullability == Nullability::Nonnull) {
+ ExplodedNode *N = C.generateErrorNode(State);
+ if (!N)
+ return;
+ reportBugIfPreconditionHolds(ErrorKind::NilPassedToNonnull, N, nullptr, C,
+ ArgExpr);
+ return;
+ }
+
+ const MemRegion *Region = getTrackRegion(*ArgSVal);
+ if (!Region)
+ continue;
+
+ const NullabilityState *TrackedNullability =
+ State->get<NullabilityMap>(Region);
+
+ if (TrackedNullability) {
+ if (Nullness == NullConstraint::IsNotNull ||
+ TrackedNullability->getValue() != Nullability::Nullable)
+ continue;
+
+ if (Filter.CheckNullablePassedToNonnull &&
+ RequiredNullability == Nullability::Nonnull) {
+ ExplodedNode *N = C.addTransition(State);
+ reportBugIfPreconditionHolds(ErrorKind::NullablePassedToNonnull, N,
+ Region, C, ArgExpr, /*SuppressPath=*/true);
+ return;
+ }
+ if (Filter.CheckNullableDereferenced &&
+ Param->getType()->isReferenceType()) {
+ ExplodedNode *N = C.addTransition(State);
+ reportBugIfPreconditionHolds(ErrorKind::NullableDereferenced, N, Region,
+ C, ArgExpr, /*SuppressPath=*/true);
+ return;
+ }
+ continue;
+ }
+ // No tracked nullability yet.
+ if (ArgExprTypeLevelNullability != Nullability::Nullable)
+ continue;
+ State = State->set<NullabilityMap>(
+ Region, NullabilityState(ArgExprTypeLevelNullability, ArgExpr));
+ }
+ if (State != OrigState)
+ C.addTransition(State);
+}
+
+/// Suppress the nullability warnings for some functions.
+void NullabilityChecker::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ auto Decl = Call.getDecl();
+ if (!Decl)
+ return;
+ // ObjC Messages handles in a different callback.
+ if (Call.getKind() == CE_ObjCMessage)
+ return;
+ const FunctionType *FuncType = Decl->getFunctionType();
+ if (!FuncType)
+ return;
+ QualType ReturnType = FuncType->getReturnType();
+ if (!ReturnType->isAnyPointerType())
+ return;
+ ProgramStateRef State = C.getState();
+ if (State->get<PreconditionViolated>())
+ return;
+
+ const MemRegion *Region = getTrackRegion(Call.getReturnValue());
+ if (!Region)
+ return;
+
+ // CG headers are misannotated. Do not warn for symbols that are the results
+ // of CG calls.
+ const SourceManager &SM = C.getSourceManager();
+ StringRef FilePath = SM.getFilename(SM.getSpellingLoc(Decl->getLocStart()));
+ if (llvm::sys::path::filename(FilePath).startswith("CG")) {
+ State = State->set<NullabilityMap>(Region, Nullability::Contradicted);
+ C.addTransition(State);
+ return;
+ }
+
+ const NullabilityState *TrackedNullability =
+ State->get<NullabilityMap>(Region);
+
+ if (!TrackedNullability &&
+ getNullabilityAnnotation(ReturnType) == Nullability::Nullable) {
+ State = State->set<NullabilityMap>(Region, Nullability::Nullable);
+ C.addTransition(State);
+ }
+}
+
+static Nullability getReceiverNullability(const ObjCMethodCall &M,
+ ProgramStateRef State) {
+ if (M.isReceiverSelfOrSuper()) {
+ // For super and super class receivers we assume that the receiver is
+ // nonnull.
+ return Nullability::Nonnull;
+ }
+ // Otherwise look up nullability in the state.
+ SVal Receiver = M.getReceiverSVal();
+ if (auto DefOrUnknown = Receiver.getAs<DefinedOrUnknownSVal>()) {
+ // If the receiver is constrained to be nonnull, assume that it is nonnull
+ // regardless of its type.
+ NullConstraint Nullness = getNullConstraint(*DefOrUnknown, State);
+ if (Nullness == NullConstraint::IsNotNull)
+ return Nullability::Nonnull;
+ }
+ auto ValueRegionSVal = Receiver.getAs<loc::MemRegionVal>();
+ if (ValueRegionSVal) {
+ const MemRegion *SelfRegion = ValueRegionSVal->getRegion();
+ assert(SelfRegion);
+
+ const NullabilityState *TrackedSelfNullability =
+ State->get<NullabilityMap>(SelfRegion);
+ if (TrackedSelfNullability)
+ return TrackedSelfNullability->getValue();
+ }
+ return Nullability::Unspecified;
+}
+
+/// Calculate the nullability of the result of a message expr based on the
+/// nullability of the receiver, the nullability of the return value, and the
+/// constraints.
+void NullabilityChecker::checkPostObjCMessage(const ObjCMethodCall &M,
+ CheckerContext &C) const {
+ auto Decl = M.getDecl();
+ if (!Decl)
+ return;
+ QualType RetType = Decl->getReturnType();
+ if (!RetType->isAnyPointerType())
+ return;
+
+ ProgramStateRef State = C.getState();
+ if (State->get<PreconditionViolated>())
+ return;
+
+ const MemRegion *ReturnRegion = getTrackRegion(M.getReturnValue());
+ if (!ReturnRegion)
+ return;
+
+ auto Interface = Decl->getClassInterface();
+ auto Name = Interface ? Interface->getName() : "";
+ // In order to reduce the noise in the diagnostics generated by this checker,
+ // some framework and programming style based heuristics are used. These
+ // heuristics are for Cocoa APIs which have NS prefix.
+ if (Name.startswith("NS")) {
+ // Developers rely on dynamic invariants such as an item should be available
+ // in a collection, or a collection is not empty often. Those invariants can
+ // not be inferred by any static analysis tool. To not to bother the users
+ // with too many false positives, every item retrieval function should be
+ // ignored for collections. The instance methods of dictionaries in Cocoa
+ // are either item retrieval related or not interesting nullability wise.
+ // Using this fact, to keep the code easier to read just ignore the return
+ // value of every instance method of dictionaries.
+ if (M.isInstanceMessage() && Name.find("Dictionary") != StringRef::npos) {
+ State =
+ State->set<NullabilityMap>(ReturnRegion, Nullability::Contradicted);
+ C.addTransition(State);
+ return;
+ }
+ // For similar reasons ignore some methods of Cocoa arrays.
+ StringRef FirstSelectorSlot = M.getSelector().getNameForSlot(0);
+ if (Name.find("Array") != StringRef::npos &&
+ (FirstSelectorSlot == "firstObject" ||
+ FirstSelectorSlot == "lastObject")) {
+ State =
+ State->set<NullabilityMap>(ReturnRegion, Nullability::Contradicted);
+ C.addTransition(State);
+ return;
+ }
+
+ // Encoding related methods of string should not fail when lossless
+ // encodings are used. Using lossless encodings is so frequent that ignoring
+ // this class of methods reduced the emitted diagnostics by about 30% on
+ // some projects (and all of that was false positives).
+ if (Name.find("String") != StringRef::npos) {
+ for (auto Param : M.parameters()) {
+ if (Param->getName() == "encoding") {
+ State = State->set<NullabilityMap>(ReturnRegion,
+ Nullability::Contradicted);
+ C.addTransition(State);
+ return;
+ }
+ }
+ }
+ }
+
+ const ObjCMessageExpr *Message = M.getOriginExpr();
+ Nullability SelfNullability = getReceiverNullability(M, State);
+
+ const NullabilityState *NullabilityOfReturn =
+ State->get<NullabilityMap>(ReturnRegion);
+
+ if (NullabilityOfReturn) {
+ // When we have a nullability tracked for the return value, the nullability
+ // of the expression will be the most nullable of the receiver and the
+ // return value.
+ Nullability RetValTracked = NullabilityOfReturn->getValue();
+ Nullability ComputedNullab =
+ getMostNullable(RetValTracked, SelfNullability);
+ if (ComputedNullab != RetValTracked &&
+ ComputedNullab != Nullability::Unspecified) {
+ const Stmt *NullabilitySource =
+ ComputedNullab == RetValTracked
+ ? NullabilityOfReturn->getNullabilitySource()
+ : Message->getInstanceReceiver();
+ State = State->set<NullabilityMap>(
+ ReturnRegion, NullabilityState(ComputedNullab, NullabilitySource));
+ C.addTransition(State);
+ }
+ return;
+ }
+
+ // No tracked information. Use static type information for return value.
+ Nullability RetNullability = getNullabilityAnnotation(RetType);
+
+ // Properties might be computed. For this reason the static analyzer creates a
+ // new symbol each time an unknown property is read. To avoid false pozitives
+ // do not treat unknown properties as nullable, even when they explicitly
+ // marked nullable.
+ if (M.getMessageKind() == OCM_PropertyAccess && !C.wasInlined)
+ RetNullability = Nullability::Nonnull;
+
+ Nullability ComputedNullab = getMostNullable(RetNullability, SelfNullability);
+ if (ComputedNullab == Nullability::Nullable) {
+ const Stmt *NullabilitySource = ComputedNullab == RetNullability
+ ? Message
+ : Message->getInstanceReceiver();
+ State = State->set<NullabilityMap>(
+ ReturnRegion, NullabilityState(ComputedNullab, NullabilitySource));
+ C.addTransition(State);
+ }
+}
+
+/// Explicit casts are trusted. If there is a disagreement in the nullability
+/// annotations in the destination and the source or '0' is casted to nonnull
+/// track the value as having contraditory nullability. This will allow users to
+/// suppress warnings.
+void NullabilityChecker::checkPostStmt(const ExplicitCastExpr *CE,
+ CheckerContext &C) const {
+ QualType OriginType = CE->getSubExpr()->getType();
+ QualType DestType = CE->getType();
+ if (!OriginType->isAnyPointerType())
+ return;
+ if (!DestType->isAnyPointerType())
+ return;
+
+ ProgramStateRef State = C.getState();
+ if (State->get<PreconditionViolated>())
+ return;
+
+ Nullability DestNullability = getNullabilityAnnotation(DestType);
+
+ // No explicit nullability in the destination type, so this cast does not
+ // change the nullability.
+ if (DestNullability == Nullability::Unspecified)
+ return;
+
+ auto RegionSVal =
+ State->getSVal(CE, C.getLocationContext()).getAs<DefinedOrUnknownSVal>();
+ const MemRegion *Region = getTrackRegion(*RegionSVal);
+ if (!Region)
+ return;
+
+ // When 0 is converted to nonnull mark it as contradicted.
+ if (DestNullability == Nullability::Nonnull) {
+ NullConstraint Nullness = getNullConstraint(*RegionSVal, State);
+ if (Nullness == NullConstraint::IsNull) {
+ State = State->set<NullabilityMap>(Region, Nullability::Contradicted);
+ C.addTransition(State);
+ return;
+ }
+ }
+
+ const NullabilityState *TrackedNullability =
+ State->get<NullabilityMap>(Region);
+
+ if (!TrackedNullability) {
+ if (DestNullability != Nullability::Nullable)
+ return;
+ State = State->set<NullabilityMap>(Region,
+ NullabilityState(DestNullability, CE));
+ C.addTransition(State);
+ return;
+ }
+
+ if (TrackedNullability->getValue() != DestNullability &&
+ TrackedNullability->getValue() != Nullability::Contradicted) {
+ State = State->set<NullabilityMap>(Region, Nullability::Contradicted);
+ C.addTransition(State);
+ }
+}
+
+/// For a given statement performing a bind, attempt to syntactically
+/// match the expression resulting in the bound value.
+static const Expr * matchValueExprForBind(const Stmt *S) {
+ // For `x = e` the value expression is the right-hand side.
+ if (auto *BinOp = dyn_cast<BinaryOperator>(S)) {
+ if (BinOp->getOpcode() == BO_Assign)
+ return BinOp->getRHS();
+ }
+
+ // For `int x = e` the value expression is the initializer.
+ if (auto *DS = dyn_cast<DeclStmt>(S)) {
+ if (DS->isSingleDecl()) {
+ auto *VD = dyn_cast<VarDecl>(DS->getSingleDecl());
+ if (!VD)
+ return nullptr;
+
+ if (const Expr *Init = VD->getInit())
+ return Init;
+ }
+ }
+
+ return nullptr;
+}
+
+/// Returns true if \param S is a DeclStmt for a local variable that
+/// ObjC automated reference counting initialized with zero.
+static bool isARCNilInitializedLocal(CheckerContext &C, const Stmt *S) {
+ // We suppress diagnostics for ARC zero-initialized _Nonnull locals. This
+ // prevents false positives when a _Nonnull local variable cannot be
+ // initialized with an initialization expression:
+ // NSString * _Nonnull s; // no-warning
+ // @autoreleasepool {
+ // s = ...
+ // }
+ //
+ // FIXME: We should treat implicitly zero-initialized _Nonnull locals as
+ // uninitialized in Sema's UninitializedValues analysis to warn when a use of
+ // the zero-initialized definition will unexpectedly yield nil.
+
+ // Locals are only zero-initialized when automated reference counting
+ // is turned on.
+ if (!C.getASTContext().getLangOpts().ObjCAutoRefCount)
+ return false;
+
+ auto *DS = dyn_cast<DeclStmt>(S);
+ if (!DS || !DS->isSingleDecl())
+ return false;
+
+ auto *VD = dyn_cast<VarDecl>(DS->getSingleDecl());
+ if (!VD)
+ return false;
+
+ // Sema only zero-initializes locals with ObjCLifetimes.
+ if(!VD->getType().getQualifiers().hasObjCLifetime())
+ return false;
+
+ const Expr *Init = VD->getInit();
+ assert(Init && "ObjC local under ARC without initializer");
+
+ // Return false if the local is explicitly initialized (e.g., with '= nil').
+ if (!isa<ImplicitValueInitExpr>(Init))
+ return false;
+
+ return true;
+}
+
+/// Propagate the nullability information through binds and warn when nullable
+/// pointer or null symbol is assigned to a pointer with a nonnull type.
+void NullabilityChecker::checkBind(SVal L, SVal V, const Stmt *S,
+ CheckerContext &C) const {
+ const TypedValueRegion *TVR =
+ dyn_cast_or_null<TypedValueRegion>(L.getAsRegion());
+ if (!TVR)
+ return;
+
+ QualType LocType = TVR->getValueType();
+ if (!LocType->isAnyPointerType())
+ return;
+
+ ProgramStateRef State = C.getState();
+ if (State->get<PreconditionViolated>())
+ return;
+
+ auto ValDefOrUnknown = V.getAs<DefinedOrUnknownSVal>();
+ if (!ValDefOrUnknown)
+ return;
+
+ NullConstraint RhsNullness = getNullConstraint(*ValDefOrUnknown, State);
+
+ Nullability ValNullability = Nullability::Unspecified;
+ if (SymbolRef Sym = ValDefOrUnknown->getAsSymbol())
+ ValNullability = getNullabilityAnnotation(Sym->getType());
+
+ Nullability LocNullability = getNullabilityAnnotation(LocType);
+ if (Filter.CheckNullPassedToNonnull &&
+ RhsNullness == NullConstraint::IsNull &&
+ ValNullability != Nullability::Nonnull &&
+ LocNullability == Nullability::Nonnull &&
+ !isARCNilInitializedLocal(C, S)) {
+ static CheckerProgramPointTag Tag(this, "NullPassedToNonnull");
+ ExplodedNode *N = C.generateErrorNode(State, &Tag);
+ if (!N)
+ return;
+
+ const Stmt *ValueExpr = matchValueExprForBind(S);
+ if (!ValueExpr)
+ ValueExpr = S;
+
+ reportBugIfPreconditionHolds(ErrorKind::NilAssignedToNonnull, N, nullptr, C,
+ ValueExpr);
+ return;
+ }
+ // Intentionally missing case: '0' is bound to a reference. It is handled by
+ // the DereferenceChecker.
+
+ const MemRegion *ValueRegion = getTrackRegion(*ValDefOrUnknown);
+ if (!ValueRegion)
+ return;
+
+ const NullabilityState *TrackedNullability =
+ State->get<NullabilityMap>(ValueRegion);
+
+ if (TrackedNullability) {
+ if (RhsNullness == NullConstraint::IsNotNull ||
+ TrackedNullability->getValue() != Nullability::Nullable)
+ return;
+ if (Filter.CheckNullablePassedToNonnull &&
+ LocNullability == Nullability::Nonnull) {
+ static CheckerProgramPointTag Tag(this, "NullablePassedToNonnull");
+ ExplodedNode *N = C.addTransition(State, C.getPredecessor(), &Tag);
+ reportBugIfPreconditionHolds(ErrorKind::NullableAssignedToNonnull, N,
+ ValueRegion, C);
+ }
+ return;
+ }
+
+ const auto *BinOp = dyn_cast<BinaryOperator>(S);
+
+ if (ValNullability == Nullability::Nullable) {
+ // Trust the static information of the value more than the static
+ // information on the location.
+ const Stmt *NullabilitySource = BinOp ? BinOp->getRHS() : S;
+ State = State->set<NullabilityMap>(
+ ValueRegion, NullabilityState(ValNullability, NullabilitySource));
+ C.addTransition(State);
+ return;
+ }
+
+ if (LocNullability == Nullability::Nullable) {
+ const Stmt *NullabilitySource = BinOp ? BinOp->getLHS() : S;
+ State = State->set<NullabilityMap>(
+ ValueRegion, NullabilityState(LocNullability, NullabilitySource));
+ C.addTransition(State);
+ }
+}
+
+void NullabilityChecker::printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) const {
+
+ NullabilityMapTy B = State->get<NullabilityMap>();
+
+ if (B.isEmpty())
+ return;
+
+ Out << Sep << NL;
+
+ for (NullabilityMapTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ Out << I->first << " : ";
+ I->second.print(Out);
+ Out << NL;
+ }
+}
+
+#define REGISTER_CHECKER(name, trackingRequired) \
+ void ento::register##name##Checker(CheckerManager &mgr) { \
+ NullabilityChecker *checker = mgr.registerChecker<NullabilityChecker>(); \
+ checker->Filter.Check##name = true; \
+ checker->Filter.CheckName##name = mgr.getCurrentCheckName(); \
+ checker->NeedTracking = checker->NeedTracking || trackingRequired; \
+ }
+
+// The checks are likely to be turned on by default and it is possible to do
+// them without tracking any nullability related information. As an optimization
+// no nullability information will be tracked when only these two checks are
+// enables.
+REGISTER_CHECKER(NullPassedToNonnull, false)
+REGISTER_CHECKER(NullReturnedFromNonnull, false)
+
+REGISTER_CHECKER(NullableDereferenced, true)
+REGISTER_CHECKER(NullablePassedToNonnull, true)
+REGISTER_CHECKER(NullableReturnedFromNonnull, true)
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
new file mode 100644
index 0000000..cbaa5c2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
@@ -0,0 +1,94 @@
+//== ObjCAtSyncChecker.cpp - nil mutex checker for @synchronized -*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines ObjCAtSyncChecker, a builtin check that checks for null pointers
+// used as mutexes for @synchronized.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class ObjCAtSyncChecker
+ : public Checker< check::PreStmt<ObjCAtSynchronizedStmt> > {
+ mutable std::unique_ptr<BuiltinBug> BT_null;
+ mutable std::unique_ptr<BuiltinBug> BT_undef;
+
+public:
+ void checkPreStmt(const ObjCAtSynchronizedStmt *S, CheckerContext &C) const;
+};
+} // end anonymous namespace
+
+void ObjCAtSyncChecker::checkPreStmt(const ObjCAtSynchronizedStmt *S,
+ CheckerContext &C) const {
+
+ const Expr *Ex = S->getSynchExpr();
+ ProgramStateRef state = C.getState();
+ SVal V = state->getSVal(Ex, C.getLocationContext());
+
+ // Uninitialized value used for the mutex?
+ if (V.getAs<UndefinedVal>()) {
+ if (ExplodedNode *N = C.generateErrorNode()) {
+ if (!BT_undef)
+ BT_undef.reset(new BuiltinBug(this, "Uninitialized value used as mutex "
+ "for @synchronized"));
+ auto report =
+ llvm::make_unique<BugReport>(*BT_undef, BT_undef->getDescription(), N);
+ bugreporter::trackNullOrUndefValue(N, Ex, *report);
+ C.emitReport(std::move(report));
+ }
+ return;
+ }
+
+ if (V.isUnknown())
+ return;
+
+ // Check for null mutexes.
+ ProgramStateRef notNullState, nullState;
+ std::tie(notNullState, nullState) = state->assume(V.castAs<DefinedSVal>());
+
+ if (nullState) {
+ if (!notNullState) {
+ // Generate an error node. This isn't a sink since
+ // a null mutex just means no synchronization occurs.
+ if (ExplodedNode *N = C.generateNonFatalErrorNode(nullState)) {
+ if (!BT_null)
+ BT_null.reset(new BuiltinBug(
+ this, "Nil value used as mutex for @synchronized() "
+ "(no synchronization will occur)"));
+ auto report =
+ llvm::make_unique<BugReport>(*BT_null, BT_null->getDescription(), N);
+ bugreporter::trackNullOrUndefValue(N, Ex, *report);
+
+ C.emitReport(std::move(report));
+ return;
+ }
+ }
+ // Don't add a transition for 'nullState'. If the value is
+ // under-constrained to be null or non-null, assume it is non-null
+ // afterwards.
+ }
+
+ if (notNullState)
+ C.addTransition(notNullState);
+}
+
+void ento::registerObjCAtSyncChecker(CheckerManager &mgr) {
+ if (mgr.getLangOpts().ObjC2)
+ mgr.registerChecker<ObjCAtSyncChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
new file mode 100644
index 0000000..b10ec84
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
@@ -0,0 +1,174 @@
+//== ObjCContainersASTChecker.cpp - CoreFoundation containers API *- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// An AST checker that looks for common pitfalls when using 'CFArray',
+// 'CFDictionary', 'CFSet' APIs.
+//
+//===----------------------------------------------------------------------===//
+#include "ClangSACheckers.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class WalkAST : public StmtVisitor<WalkAST> {
+ BugReporter &BR;
+ const CheckerBase *Checker;
+ AnalysisDeclContext* AC;
+ ASTContext &ASTC;
+ uint64_t PtrWidth;
+
+ /// Check if the type has pointer size (very conservative).
+ inline bool isPointerSize(const Type *T) {
+ if (!T)
+ return true;
+ if (T->isIncompleteType())
+ return true;
+ return (ASTC.getTypeSize(T) == PtrWidth);
+ }
+
+ /// Check if the type is a pointer/array to pointer sized values.
+ inline bool hasPointerToPointerSizedType(const Expr *E) {
+ QualType T = E->getType();
+
+ // The type could be either a pointer or array.
+ const Type *TP = T.getTypePtr();
+ QualType PointeeT = TP->getPointeeType();
+ if (!PointeeT.isNull()) {
+ // If the type is a pointer to an array, check the size of the array
+ // elements. To avoid false positives coming from assumption that the
+ // values x and &x are equal when x is an array.
+ if (const Type *TElem = PointeeT->getArrayElementTypeNoTypeQual())
+ if (isPointerSize(TElem))
+ return true;
+
+ // Else, check the pointee size.
+ return isPointerSize(PointeeT.getTypePtr());
+ }
+
+ if (const Type *TElem = TP->getArrayElementTypeNoTypeQual())
+ return isPointerSize(TElem);
+
+ // The type must be an array/pointer type.
+
+ // This could be a null constant, which is allowed.
+ return static_cast<bool>(
+ E->isNullPointerConstant(ASTC, Expr::NPC_ValueDependentIsNull));
+ }
+
+public:
+ WalkAST(BugReporter &br, const CheckerBase *checker, AnalysisDeclContext *ac)
+ : BR(br), Checker(checker), AC(ac), ASTC(AC->getASTContext()),
+ PtrWidth(ASTC.getTargetInfo().getPointerWidth(0)) {}
+
+ // Statement visitor methods.
+ void VisitChildren(Stmt *S);
+ void VisitStmt(Stmt *S) { VisitChildren(S); }
+ void VisitCallExpr(CallExpr *CE);
+};
+} // end anonymous namespace
+
+static StringRef getCalleeName(CallExpr *CE) {
+ const FunctionDecl *FD = CE->getDirectCallee();
+ if (!FD)
+ return StringRef();
+
+ IdentifierInfo *II = FD->getIdentifier();
+ if (!II) // if no identifier, not a simple C function
+ return StringRef();
+
+ return II->getName();
+}
+
+void WalkAST::VisitCallExpr(CallExpr *CE) {
+ StringRef Name = getCalleeName(CE);
+ if (Name.empty())
+ return;
+
+ const Expr *Arg = nullptr;
+ unsigned ArgNum;
+
+ if (Name.equals("CFArrayCreate") || Name.equals("CFSetCreate")) {
+ if (CE->getNumArgs() != 4)
+ return;
+ ArgNum = 1;
+ Arg = CE->getArg(ArgNum)->IgnoreParenCasts();
+ if (hasPointerToPointerSizedType(Arg))
+ return;
+ } else if (Name.equals("CFDictionaryCreate")) {
+ if (CE->getNumArgs() != 6)
+ return;
+ // Check first argument.
+ ArgNum = 1;
+ Arg = CE->getArg(ArgNum)->IgnoreParenCasts();
+ if (hasPointerToPointerSizedType(Arg)) {
+ // Check second argument.
+ ArgNum = 2;
+ Arg = CE->getArg(ArgNum)->IgnoreParenCasts();
+ if (hasPointerToPointerSizedType(Arg))
+ // Both are good, return.
+ return;
+ }
+ }
+
+ if (Arg) {
+ assert(ArgNum == 1 || ArgNum == 2);
+
+ SmallString<64> BufName;
+ llvm::raw_svector_ostream OsName(BufName);
+ OsName << " Invalid use of '" << Name << "'" ;
+
+ SmallString<256> Buf;
+ llvm::raw_svector_ostream Os(Buf);
+ // Use "second" and "third" since users will expect 1-based indexing
+ // for parameter names when mentioned in prose.
+ Os << " The "<< ((ArgNum == 1) ? "second" : "third") << " argument to '"
+ << Name << "' must be a C array of pointer-sized values, not '"
+ << Arg->getType().getAsString() << "'";
+
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(), Checker, OsName.str(),
+ categories::CoreFoundationObjectiveC, Os.str(), CELoc,
+ Arg->getSourceRange());
+ }
+
+ // Recurse and check children.
+ VisitChildren(CE);
+}
+
+void WalkAST::VisitChildren(Stmt *S) {
+ for (Stmt *Child : S->children())
+ if (Child)
+ Visit(Child);
+}
+
+namespace {
+class ObjCContainersASTChecker : public Checker<check::ASTCodeBody> {
+public:
+
+ void checkASTCodeBody(const Decl *D, AnalysisManager& Mgr,
+ BugReporter &BR) const {
+ WalkAST walker(BR, this, Mgr.getAnalysisDeclContext(D));
+ walker.Visit(D->getBody());
+ }
+};
+}
+
+void ento::registerObjCContainersASTChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ObjCContainersASTChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
new file mode 100644
index 0000000..0203d79
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
@@ -0,0 +1,175 @@
+//== ObjCContainersChecker.cpp - Path sensitive checker for CFArray *- C++ -*=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Performs path sensitive checks of Core Foundation static containers like
+// CFArray.
+// 1) Check for buffer overflows:
+// In CFArrayGetArrayAtIndex( myArray, index), if the index is outside the
+// index space of theArray (0 to N-1 inclusive (where N is the count of
+// theArray), the behavior is undefined.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class ObjCContainersChecker : public Checker< check::PreStmt<CallExpr>,
+ check::PostStmt<CallExpr>,
+ check::PointerEscape> {
+ mutable std::unique_ptr<BugType> BT;
+ inline void initBugType() const {
+ if (!BT)
+ BT.reset(new BugType(this, "CFArray API",
+ categories::CoreFoundationObjectiveC));
+ }
+
+ inline SymbolRef getArraySym(const Expr *E, CheckerContext &C) const {
+ SVal ArrayRef = C.getState()->getSVal(E, C.getLocationContext());
+ SymbolRef ArraySym = ArrayRef.getAsSymbol();
+ return ArraySym;
+ }
+
+ void addSizeInfo(const Expr *Array, const Expr *Size,
+ CheckerContext &C) const;
+
+public:
+ /// A tag to id this checker.
+ static void *getTag() { static int Tag; return &Tag; }
+
+ void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
+ void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+ ProgramStateRef checkPointerEscape(ProgramStateRef State,
+ const InvalidatedSymbols &Escaped,
+ const CallEvent *Call,
+ PointerEscapeKind Kind) const;
+};
+} // end anonymous namespace
+
+// ProgramState trait - a map from array symbol to its state.
+REGISTER_MAP_WITH_PROGRAMSTATE(ArraySizeMap, SymbolRef, DefinedSVal)
+
+void ObjCContainersChecker::addSizeInfo(const Expr *Array, const Expr *Size,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SVal SizeV = State->getSVal(Size, C.getLocationContext());
+ // Undefined is reported by another checker.
+ if (SizeV.isUnknownOrUndef())
+ return;
+
+ // Get the ArrayRef symbol.
+ SVal ArrayRef = State->getSVal(Array, C.getLocationContext());
+ SymbolRef ArraySym = ArrayRef.getAsSymbol();
+ if (!ArraySym)
+ return;
+
+ C.addTransition(
+ State->set<ArraySizeMap>(ArraySym, SizeV.castAs<DefinedSVal>()));
+ return;
+}
+
+void ObjCContainersChecker::checkPostStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ StringRef Name = C.getCalleeName(CE);
+ if (Name.empty() || CE->getNumArgs() < 1)
+ return;
+
+ // Add array size information to the state.
+ if (Name.equals("CFArrayCreate")) {
+ if (CE->getNumArgs() < 3)
+ return;
+ // Note, we can visit the Create method in the post-visit because
+ // the CFIndex parameter is passed in by value and will not be invalidated
+ // by the call.
+ addSizeInfo(CE, CE->getArg(2), C);
+ return;
+ }
+
+ if (Name.equals("CFArrayGetCount")) {
+ addSizeInfo(CE->getArg(0), CE, C);
+ return;
+ }
+}
+
+void ObjCContainersChecker::checkPreStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ StringRef Name = C.getCalleeName(CE);
+ if (Name.empty() || CE->getNumArgs() < 2)
+ return;
+
+ // Check the array access.
+ if (Name.equals("CFArrayGetValueAtIndex")) {
+ ProgramStateRef State = C.getState();
+ // Retrieve the size.
+ // Find out if we saw this array symbol before and have information about
+ // it.
+ const Expr *ArrayExpr = CE->getArg(0);
+ SymbolRef ArraySym = getArraySym(ArrayExpr, C);
+ if (!ArraySym)
+ return;
+
+ const DefinedSVal *Size = State->get<ArraySizeMap>(ArraySym);
+
+ if (!Size)
+ return;
+
+ // Get the index.
+ const Expr *IdxExpr = CE->getArg(1);
+ SVal IdxVal = State->getSVal(IdxExpr, C.getLocationContext());
+ if (IdxVal.isUnknownOrUndef())
+ return;
+ DefinedSVal Idx = IdxVal.castAs<DefinedSVal>();
+
+ // Now, check if 'Idx in [0, Size-1]'.
+ const QualType T = IdxExpr->getType();
+ ProgramStateRef StInBound = State->assumeInBound(Idx, *Size, true, T);
+ ProgramStateRef StOutBound = State->assumeInBound(Idx, *Size, false, T);
+ if (StOutBound && !StInBound) {
+ ExplodedNode *N = C.generateErrorNode(StOutBound);
+ if (!N)
+ return;
+ initBugType();
+ auto R = llvm::make_unique<BugReport>(*BT, "Index is out of bounds", N);
+ R->addRange(IdxExpr->getSourceRange());
+ C.emitReport(std::move(R));
+ return;
+ }
+ }
+}
+
+ProgramStateRef
+ObjCContainersChecker::checkPointerEscape(ProgramStateRef State,
+ const InvalidatedSymbols &Escaped,
+ const CallEvent *Call,
+ PointerEscapeKind Kind) const {
+ for (InvalidatedSymbols::const_iterator I = Escaped.begin(),
+ E = Escaped.end();
+ I != E; ++I) {
+ SymbolRef Sym = *I;
+ // When a symbol for a mutable array escapes, we can't reason precisely
+ // about its size any more -- so remove it from the map.
+ // Note that we aren't notified here when a CFMutableArrayRef escapes as a
+ // CFArrayRef. This is because CFArrayRef is typedef'd as a pointer to a
+ // const-qualified type.
+ State = State->remove<ArraySizeMap>(Sym);
+ }
+ return State;
+}
+/// Register checker.
+void ento::registerObjCContainersChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ObjCContainersChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
new file mode 100644
index 0000000..32a1adb
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
@@ -0,0 +1,263 @@
+//==- ObjCMissingSuperCallChecker.cpp - Check missing super-calls in ObjC --==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a ObjCMissingSuperCallChecker, a checker that
+// analyzes a UIViewController implementation to determine if it
+// correctly calls super in the methods where this is mandatory.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+struct SelectorDescriptor {
+ const char *SelectorName;
+ unsigned ArgumentCount;
+};
+
+//===----------------------------------------------------------------------===//
+// FindSuperCallVisitor - Identify specific calls to the superclass.
+//===----------------------------------------------------------------------===//
+
+class FindSuperCallVisitor : public RecursiveASTVisitor<FindSuperCallVisitor> {
+public:
+ explicit FindSuperCallVisitor(Selector S) : DoesCallSuper(false), Sel(S) {}
+
+ bool VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ if (E->getSelector() == Sel)
+ if (E->getReceiverKind() == ObjCMessageExpr::SuperInstance)
+ DoesCallSuper = true;
+
+ // Recurse if we didn't find the super call yet.
+ return !DoesCallSuper;
+ }
+
+ bool DoesCallSuper;
+
+private:
+ Selector Sel;
+};
+
+//===----------------------------------------------------------------------===//
+// ObjCSuperCallChecker
+//===----------------------------------------------------------------------===//
+
+class ObjCSuperCallChecker : public Checker<
+ check::ASTDecl<ObjCImplementationDecl> > {
+public:
+ ObjCSuperCallChecker() : IsInitialized(false) {}
+
+ void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager &Mgr,
+ BugReporter &BR) const;
+private:
+ bool isCheckableClass(const ObjCImplementationDecl *D,
+ StringRef &SuperclassName) const;
+ void initializeSelectors(ASTContext &Ctx) const;
+ void fillSelectors(ASTContext &Ctx, ArrayRef<SelectorDescriptor> Sel,
+ StringRef ClassName) const;
+ mutable llvm::StringMap<llvm::SmallSet<Selector, 16> > SelectorsForClass;
+ mutable bool IsInitialized;
+};
+
+}
+
+/// \brief Determine whether the given class has a superclass that we want
+/// to check. The name of the found superclass is stored in SuperclassName.
+///
+/// \param D The declaration to check for superclasses.
+/// \param[out] SuperclassName On return, the found superclass name.
+bool ObjCSuperCallChecker::isCheckableClass(const ObjCImplementationDecl *D,
+ StringRef &SuperclassName) const {
+ const ObjCInterfaceDecl *ID = D->getClassInterface()->getSuperClass();
+ for ( ; ID ; ID = ID->getSuperClass())
+ {
+ SuperclassName = ID->getIdentifier()->getName();
+ if (SelectorsForClass.count(SuperclassName))
+ return true;
+ }
+ return false;
+}
+
+void ObjCSuperCallChecker::fillSelectors(ASTContext &Ctx,
+ ArrayRef<SelectorDescriptor> Sel,
+ StringRef ClassName) const {
+ llvm::SmallSet<Selector, 16> &ClassSelectors = SelectorsForClass[ClassName];
+ // Fill the Selectors SmallSet with all selectors we want to check.
+ for (ArrayRef<SelectorDescriptor>::iterator I = Sel.begin(), E = Sel.end();
+ I != E; ++I) {
+ SelectorDescriptor Descriptor = *I;
+ assert(Descriptor.ArgumentCount <= 1); // No multi-argument selectors yet.
+
+ // Get the selector.
+ IdentifierInfo *II = &Ctx.Idents.get(Descriptor.SelectorName);
+
+ Selector Sel = Ctx.Selectors.getSelector(Descriptor.ArgumentCount, &II);
+ ClassSelectors.insert(Sel);
+ }
+}
+
+void ObjCSuperCallChecker::initializeSelectors(ASTContext &Ctx) const {
+
+ { // Initialize selectors for: UIViewController
+ const SelectorDescriptor Selectors[] = {
+ { "addChildViewController", 1 },
+ { "viewDidAppear", 1 },
+ { "viewDidDisappear", 1 },
+ { "viewWillAppear", 1 },
+ { "viewWillDisappear", 1 },
+ { "removeFromParentViewController", 0 },
+ { "didReceiveMemoryWarning", 0 },
+ { "viewDidUnload", 0 },
+ { "viewDidLoad", 0 },
+ { "viewWillUnload", 0 },
+ { "updateViewConstraints", 0 },
+ { "encodeRestorableStateWithCoder", 1 },
+ { "restoreStateWithCoder", 1 }};
+
+ fillSelectors(Ctx, Selectors, "UIViewController");
+ }
+
+ { // Initialize selectors for: UIResponder
+ const SelectorDescriptor Selectors[] = {
+ { "resignFirstResponder", 0 }};
+
+ fillSelectors(Ctx, Selectors, "UIResponder");
+ }
+
+ { // Initialize selectors for: NSResponder
+ const SelectorDescriptor Selectors[] = {
+ { "encodeRestorableStateWithCoder", 1 },
+ { "restoreStateWithCoder", 1 }};
+
+ fillSelectors(Ctx, Selectors, "NSResponder");
+ }
+
+ { // Initialize selectors for: NSDocument
+ const SelectorDescriptor Selectors[] = {
+ { "encodeRestorableStateWithCoder", 1 },
+ { "restoreStateWithCoder", 1 }};
+
+ fillSelectors(Ctx, Selectors, "NSDocument");
+ }
+
+ IsInitialized = true;
+}
+
+void ObjCSuperCallChecker::checkASTDecl(const ObjCImplementationDecl *D,
+ AnalysisManager &Mgr,
+ BugReporter &BR) const {
+ ASTContext &Ctx = BR.getContext();
+
+ // We need to initialize the selector table once.
+ if (!IsInitialized)
+ initializeSelectors(Ctx);
+
+ // Find out whether this class has a superclass that we are supposed to check.
+ StringRef SuperclassName;
+ if (!isCheckableClass(D, SuperclassName))
+ return;
+
+
+ // Iterate over all instance methods.
+ for (auto *MD : D->instance_methods()) {
+ Selector S = MD->getSelector();
+ // Find out whether this is a selector that we want to check.
+ if (!SelectorsForClass[SuperclassName].count(S))
+ continue;
+
+ // Check if the method calls its superclass implementation.
+ if (MD->getBody())
+ {
+ FindSuperCallVisitor Visitor(S);
+ Visitor.TraverseDecl(MD);
+
+ // It doesn't call super, emit a diagnostic.
+ if (!Visitor.DoesCallSuper) {
+ PathDiagnosticLocation DLoc =
+ PathDiagnosticLocation::createEnd(MD->getBody(),
+ BR.getSourceManager(),
+ Mgr.getAnalysisDeclContext(D));
+
+ const char *Name = "Missing call to superclass";
+ SmallString<320> Buf;
+ llvm::raw_svector_ostream os(Buf);
+
+ os << "The '" << S.getAsString()
+ << "' instance method in " << SuperclassName.str() << " subclass '"
+ << *D << "' is missing a [super " << S.getAsString() << "] call";
+
+ BR.EmitBasicReport(MD, this, Name, categories::CoreFoundationObjectiveC,
+ os.str(), DLoc);
+ }
+ }
+ }
+}
+
+
+//===----------------------------------------------------------------------===//
+// Check registration.
+//===----------------------------------------------------------------------===//
+
+void ento::registerObjCSuperCallChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<ObjCSuperCallChecker>();
+}
+
+
+/*
+ ToDo list for expanding this check in the future, the list is not exhaustive.
+ There are also cases where calling super is suggested but not "mandatory".
+ In addition to be able to check the classes and methods below, architectural
+ improvements like being able to allow for the super-call to be done in a called
+ method would be good too.
+
+UIDocument subclasses
+- finishedHandlingError:recovered: (is multi-arg)
+- finishedHandlingError:recovered: (is multi-arg)
+
+UIViewController subclasses
+- loadView (should *never* call super)
+- transitionFromViewController:toViewController:
+ duration:options:animations:completion: (is multi-arg)
+
+UICollectionViewController subclasses
+- loadView (take care because UIViewController subclasses should NOT call super
+ in loadView, but UICollectionViewController subclasses should)
+
+NSObject subclasses
+- doesNotRecognizeSelector (it only has to call super if it doesn't throw)
+
+UIPopoverBackgroundView subclasses (some of those are class methods)
+- arrowDirection (should *never* call super)
+- arrowOffset (should *never* call super)
+- arrowBase (should *never* call super)
+- arrowHeight (should *never* call super)
+- contentViewInsets (should *never* call super)
+
+UITextSelectionRect subclasses (some of those are properties)
+- rect (should *never* call super)
+- range (should *never* call super)
+- writingDirection (should *never* call super)
+- isVertical (should *never* call super)
+- containsStart (should *never* call super)
+- containsEnd (should *never* call super)
+*/
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
new file mode 100644
index 0000000..ffa3a27
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
@@ -0,0 +1,439 @@
+//== ObjCSelfInitChecker.cpp - Checker for 'self' initialization -*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines ObjCSelfInitChecker, a builtin check that checks for uses of
+// 'self' before proper initialization.
+//
+//===----------------------------------------------------------------------===//
+
+// This checks initialization methods to verify that they assign 'self' to the
+// result of an initialization call (e.g. [super init], or [self initWith..])
+// before using 'self' or any instance variable.
+//
+// To perform the required checking, values are tagged with flags that indicate
+// 1) if the object is the one pointed to by 'self', and 2) if the object
+// is the result of an initializer (e.g. [super init]).
+//
+// Uses of an object that is true for 1) but not 2) trigger a diagnostic.
+// The uses that are currently checked are:
+// - Using instance variables.
+// - Returning the object.
+//
+// Note that we don't check for an invalid 'self' that is the receiver of an
+// obj-c message expression to cut down false positives where logging functions
+// get information from self (like its class) or doing "invalidation" on self
+// when the initialization fails.
+//
+// Because the object that 'self' points to gets invalidated when a call
+// receives a reference to 'self', the checker keeps track and passes the flags
+// for 1) and 2) to the new object that 'self' points to after the call.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+static bool shouldRunOnFunctionOrMethod(const NamedDecl *ND);
+static bool isInitializationMethod(const ObjCMethodDecl *MD);
+static bool isInitMessage(const ObjCMethodCall &Msg);
+static bool isSelfVar(SVal location, CheckerContext &C);
+
+namespace {
+class ObjCSelfInitChecker : public Checker< check::PostObjCMessage,
+ check::PostStmt<ObjCIvarRefExpr>,
+ check::PreStmt<ReturnStmt>,
+ check::PreCall,
+ check::PostCall,
+ check::Location,
+ check::Bind > {
+ mutable std::unique_ptr<BugType> BT;
+
+ void checkForInvalidSelf(const Expr *E, CheckerContext &C,
+ const char *errorStr) const;
+
+public:
+ ObjCSelfInitChecker() {}
+ void checkPostObjCMessage(const ObjCMethodCall &Msg, CheckerContext &C) const;
+ void checkPostStmt(const ObjCIvarRefExpr *E, CheckerContext &C) const;
+ void checkPreStmt(const ReturnStmt *S, CheckerContext &C) const;
+ void checkLocation(SVal location, bool isLoad, const Stmt *S,
+ CheckerContext &C) const;
+ void checkBind(SVal loc, SVal val, const Stmt *S, CheckerContext &C) const;
+
+ void checkPreCall(const CallEvent &CE, CheckerContext &C) const;
+ void checkPostCall(const CallEvent &CE, CheckerContext &C) const;
+
+ void printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) const override;
+};
+} // end anonymous namespace
+
+namespace {
+enum SelfFlagEnum {
+ /// \brief No flag set.
+ SelfFlag_None = 0x0,
+ /// \brief Value came from 'self'.
+ SelfFlag_Self = 0x1,
+ /// \brief Value came from the result of an initializer (e.g. [super init]).
+ SelfFlag_InitRes = 0x2
+};
+}
+
+REGISTER_MAP_WITH_PROGRAMSTATE(SelfFlag, SymbolRef, unsigned)
+REGISTER_TRAIT_WITH_PROGRAMSTATE(CalledInit, bool)
+
+/// \brief A call receiving a reference to 'self' invalidates the object that
+/// 'self' contains. This keeps the "self flags" assigned to the 'self'
+/// object before the call so we can assign them to the new object that 'self'
+/// points to after the call.
+REGISTER_TRAIT_WITH_PROGRAMSTATE(PreCallSelfFlags, unsigned)
+
+static SelfFlagEnum getSelfFlags(SVal val, ProgramStateRef state) {
+ if (SymbolRef sym = val.getAsSymbol())
+ if (const unsigned *attachedFlags = state->get<SelfFlag>(sym))
+ return (SelfFlagEnum)*attachedFlags;
+ return SelfFlag_None;
+}
+
+static SelfFlagEnum getSelfFlags(SVal val, CheckerContext &C) {
+ return getSelfFlags(val, C.getState());
+}
+
+static void addSelfFlag(ProgramStateRef state, SVal val,
+ SelfFlagEnum flag, CheckerContext &C) {
+ // We tag the symbol that the SVal wraps.
+ if (SymbolRef sym = val.getAsSymbol()) {
+ state = state->set<SelfFlag>(sym, getSelfFlags(val, state) | flag);
+ C.addTransition(state);
+ }
+}
+
+static bool hasSelfFlag(SVal val, SelfFlagEnum flag, CheckerContext &C) {
+ return getSelfFlags(val, C) & flag;
+}
+
+/// \brief Returns true of the value of the expression is the object that 'self'
+/// points to and is an object that did not come from the result of calling
+/// an initializer.
+static bool isInvalidSelf(const Expr *E, CheckerContext &C) {
+ SVal exprVal = C.getState()->getSVal(E, C.getLocationContext());
+ if (!hasSelfFlag(exprVal, SelfFlag_Self, C))
+ return false; // value did not come from 'self'.
+ if (hasSelfFlag(exprVal, SelfFlag_InitRes, C))
+ return false; // 'self' is properly initialized.
+
+ return true;
+}
+
+void ObjCSelfInitChecker::checkForInvalidSelf(const Expr *E, CheckerContext &C,
+ const char *errorStr) const {
+ if (!E)
+ return;
+
+ if (!C.getState()->get<CalledInit>())
+ return;
+
+ if (!isInvalidSelf(E, C))
+ return;
+
+ // Generate an error node.
+ ExplodedNode *N = C.generateErrorNode();
+ if (!N)
+ return;
+
+ if (!BT)
+ BT.reset(new BugType(this, "Missing \"self = [(super or self) init...]\"",
+ categories::CoreFoundationObjectiveC));
+ C.emitReport(llvm::make_unique<BugReport>(*BT, errorStr, N));
+}
+
+void ObjCSelfInitChecker::checkPostObjCMessage(const ObjCMethodCall &Msg,
+ CheckerContext &C) const {
+ // When encountering a message that does initialization (init rule),
+ // tag the return value so that we know later on that if self has this value
+ // then it is properly initialized.
+
+ // FIXME: A callback should disable checkers at the start of functions.
+ if (!shouldRunOnFunctionOrMethod(dyn_cast<NamedDecl>(
+ C.getCurrentAnalysisDeclContext()->getDecl())))
+ return;
+
+ if (isInitMessage(Msg)) {
+ // Tag the return value as the result of an initializer.
+ ProgramStateRef state = C.getState();
+
+ // FIXME this really should be context sensitive, where we record
+ // the current stack frame (for IPA). Also, we need to clean this
+ // value out when we return from this method.
+ state = state->set<CalledInit>(true);
+
+ SVal V = state->getSVal(Msg.getOriginExpr(), C.getLocationContext());
+ addSelfFlag(state, V, SelfFlag_InitRes, C);
+ return;
+ }
+
+ // We don't check for an invalid 'self' in an obj-c message expression to cut
+ // down false positives where logging functions get information from self
+ // (like its class) or doing "invalidation" on self when the initialization
+ // fails.
+}
+
+void ObjCSelfInitChecker::checkPostStmt(const ObjCIvarRefExpr *E,
+ CheckerContext &C) const {
+ // FIXME: A callback should disable checkers at the start of functions.
+ if (!shouldRunOnFunctionOrMethod(dyn_cast<NamedDecl>(
+ C.getCurrentAnalysisDeclContext()->getDecl())))
+ return;
+
+ checkForInvalidSelf(
+ E->getBase(), C,
+ "Instance variable used while 'self' is not set to the result of "
+ "'[(super or self) init...]'");
+}
+
+void ObjCSelfInitChecker::checkPreStmt(const ReturnStmt *S,
+ CheckerContext &C) const {
+ // FIXME: A callback should disable checkers at the start of functions.
+ if (!shouldRunOnFunctionOrMethod(dyn_cast<NamedDecl>(
+ C.getCurrentAnalysisDeclContext()->getDecl())))
+ return;
+
+ checkForInvalidSelf(S->getRetValue(), C,
+ "Returning 'self' while it is not set to the result of "
+ "'[(super or self) init...]'");
+}
+
+// When a call receives a reference to 'self', [Pre/Post]Call pass
+// the SelfFlags from the object 'self' points to before the call to the new
+// object after the call. This is to avoid invalidation of 'self' by logging
+// functions.
+// Another common pattern in classes with multiple initializers is to put the
+// subclass's common initialization bits into a static function that receives
+// the value of 'self', e.g:
+// @code
+// if (!(self = [super init]))
+// return nil;
+// if (!(self = _commonInit(self)))
+// return nil;
+// @endcode
+// Until we can use inter-procedural analysis, in such a call, transfer the
+// SelfFlags to the result of the call.
+
+void ObjCSelfInitChecker::checkPreCall(const CallEvent &CE,
+ CheckerContext &C) const {
+ // FIXME: A callback should disable checkers at the start of functions.
+ if (!shouldRunOnFunctionOrMethod(dyn_cast<NamedDecl>(
+ C.getCurrentAnalysisDeclContext()->getDecl())))
+ return;
+
+ ProgramStateRef state = C.getState();
+ unsigned NumArgs = CE.getNumArgs();
+ // If we passed 'self' as and argument to the call, record it in the state
+ // to be propagated after the call.
+ // Note, we could have just given up, but try to be more optimistic here and
+ // assume that the functions are going to continue initialization or will not
+ // modify self.
+ for (unsigned i = 0; i < NumArgs; ++i) {
+ SVal argV = CE.getArgSVal(i);
+ if (isSelfVar(argV, C)) {
+ unsigned selfFlags = getSelfFlags(state->getSVal(argV.castAs<Loc>()), C);
+ C.addTransition(state->set<PreCallSelfFlags>(selfFlags));
+ return;
+ } else if (hasSelfFlag(argV, SelfFlag_Self, C)) {
+ unsigned selfFlags = getSelfFlags(argV, C);
+ C.addTransition(state->set<PreCallSelfFlags>(selfFlags));
+ return;
+ }
+ }
+}
+
+void ObjCSelfInitChecker::checkPostCall(const CallEvent &CE,
+ CheckerContext &C) const {
+ // FIXME: A callback should disable checkers at the start of functions.
+ if (!shouldRunOnFunctionOrMethod(dyn_cast<NamedDecl>(
+ C.getCurrentAnalysisDeclContext()->getDecl())))
+ return;
+
+ ProgramStateRef state = C.getState();
+ SelfFlagEnum prevFlags = (SelfFlagEnum)state->get<PreCallSelfFlags>();
+ if (!prevFlags)
+ return;
+ state = state->remove<PreCallSelfFlags>();
+
+ unsigned NumArgs = CE.getNumArgs();
+ for (unsigned i = 0; i < NumArgs; ++i) {
+ SVal argV = CE.getArgSVal(i);
+ if (isSelfVar(argV, C)) {
+ // If the address of 'self' is being passed to the call, assume that the
+ // 'self' after the call will have the same flags.
+ // EX: log(&self)
+ addSelfFlag(state, state->getSVal(argV.castAs<Loc>()), prevFlags, C);
+ return;
+ } else if (hasSelfFlag(argV, SelfFlag_Self, C)) {
+ // If 'self' is passed to the call by value, assume that the function
+ // returns 'self'. So assign the flags, which were set on 'self' to the
+ // return value.
+ // EX: self = performMoreInitialization(self)
+ addSelfFlag(state, CE.getReturnValue(), prevFlags, C);
+ return;
+ }
+ }
+
+ C.addTransition(state);
+}
+
+void ObjCSelfInitChecker::checkLocation(SVal location, bool isLoad,
+ const Stmt *S,
+ CheckerContext &C) const {
+ if (!shouldRunOnFunctionOrMethod(dyn_cast<NamedDecl>(
+ C.getCurrentAnalysisDeclContext()->getDecl())))
+ return;
+
+ // Tag the result of a load from 'self' so that we can easily know that the
+ // value is the object that 'self' points to.
+ ProgramStateRef state = C.getState();
+ if (isSelfVar(location, C))
+ addSelfFlag(state, state->getSVal(location.castAs<Loc>()), SelfFlag_Self,
+ C);
+}
+
+
+void ObjCSelfInitChecker::checkBind(SVal loc, SVal val, const Stmt *S,
+ CheckerContext &C) const {
+ // Allow assignment of anything to self. Self is a local variable in the
+ // initializer, so it is legal to assign anything to it, like results of
+ // static functions/method calls. After self is assigned something we cannot
+ // reason about, stop enforcing the rules.
+ // (Only continue checking if the assigned value should be treated as self.)
+ if ((isSelfVar(loc, C)) &&
+ !hasSelfFlag(val, SelfFlag_InitRes, C) &&
+ !hasSelfFlag(val, SelfFlag_Self, C) &&
+ !isSelfVar(val, C)) {
+
+ // Stop tracking the checker-specific state in the state.
+ ProgramStateRef State = C.getState();
+ State = State->remove<CalledInit>();
+ if (SymbolRef sym = loc.getAsSymbol())
+ State = State->remove<SelfFlag>(sym);
+ C.addTransition(State);
+ }
+}
+
+void ObjCSelfInitChecker::printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) const {
+ SelfFlagTy FlagMap = State->get<SelfFlag>();
+ bool DidCallInit = State->get<CalledInit>();
+ SelfFlagEnum PreCallFlags = (SelfFlagEnum)State->get<PreCallSelfFlags>();
+
+ if (FlagMap.isEmpty() && !DidCallInit && !PreCallFlags)
+ return;
+
+ Out << Sep << NL << *this << " :" << NL;
+
+ if (DidCallInit)
+ Out << " An init method has been called." << NL;
+
+ if (PreCallFlags != SelfFlag_None) {
+ if (PreCallFlags & SelfFlag_Self) {
+ Out << " An argument of the current call came from the 'self' variable."
+ << NL;
+ }
+ if (PreCallFlags & SelfFlag_InitRes) {
+ Out << " An argument of the current call came from an init method."
+ << NL;
+ }
+ }
+
+ Out << NL;
+ for (SelfFlagTy::iterator I = FlagMap.begin(), E = FlagMap.end();
+ I != E; ++I) {
+ Out << I->first << " : ";
+
+ if (I->second == SelfFlag_None)
+ Out << "none";
+
+ if (I->second & SelfFlag_Self)
+ Out << "self variable";
+
+ if (I->second & SelfFlag_InitRes) {
+ if (I->second != SelfFlag_InitRes)
+ Out << " | ";
+ Out << "result of init method";
+ }
+
+ Out << NL;
+ }
+}
+
+
+// FIXME: A callback should disable checkers at the start of functions.
+static bool shouldRunOnFunctionOrMethod(const NamedDecl *ND) {
+ if (!ND)
+ return false;
+
+ const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(ND);
+ if (!MD)
+ return false;
+ if (!isInitializationMethod(MD))
+ return false;
+
+ // self = [super init] applies only to NSObject subclasses.
+ // For instance, NSProxy doesn't implement -init.
+ ASTContext &Ctx = MD->getASTContext();
+ IdentifierInfo* NSObjectII = &Ctx.Idents.get("NSObject");
+ ObjCInterfaceDecl *ID = MD->getClassInterface()->getSuperClass();
+ for ( ; ID ; ID = ID->getSuperClass()) {
+ IdentifierInfo *II = ID->getIdentifier();
+
+ if (II == NSObjectII)
+ break;
+ }
+ return ID != nullptr;
+}
+
+/// \brief Returns true if the location is 'self'.
+static bool isSelfVar(SVal location, CheckerContext &C) {
+ AnalysisDeclContext *analCtx = C.getCurrentAnalysisDeclContext();
+ if (!analCtx->getSelfDecl())
+ return false;
+ if (!location.getAs<loc::MemRegionVal>())
+ return false;
+
+ loc::MemRegionVal MRV = location.castAs<loc::MemRegionVal>();
+ if (const DeclRegion *DR = dyn_cast<DeclRegion>(MRV.stripCasts()))
+ return (DR->getDecl() == analCtx->getSelfDecl());
+
+ return false;
+}
+
+static bool isInitializationMethod(const ObjCMethodDecl *MD) {
+ return MD->getMethodFamily() == OMF_init;
+}
+
+static bool isInitMessage(const ObjCMethodCall &Call) {
+ return Call.getMethodFamily() == OMF_init;
+}
+
+//===----------------------------------------------------------------------===//
+// Registration.
+//===----------------------------------------------------------------------===//
+
+void ento::registerObjCSelfInitChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ObjCSelfInitChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
new file mode 100644
index 0000000..c6da37e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
@@ -0,0 +1,188 @@
+//==- ObjCUnusedIVarsChecker.cpp - Check for unused ivars --------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a CheckObjCUnusedIvars, a checker that
+// analyzes an Objective-C class's interface/implementation to determine if it
+// has any ivars that are never accessed.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+
+using namespace clang;
+using namespace ento;
+
+enum IVarState { Unused, Used };
+typedef llvm::DenseMap<const ObjCIvarDecl*,IVarState> IvarUsageMap;
+
+static void Scan(IvarUsageMap& M, const Stmt *S) {
+ if (!S)
+ return;
+
+ if (const ObjCIvarRefExpr *Ex = dyn_cast<ObjCIvarRefExpr>(S)) {
+ const ObjCIvarDecl *D = Ex->getDecl();
+ IvarUsageMap::iterator I = M.find(D);
+ if (I != M.end())
+ I->second = Used;
+ return;
+ }
+
+ // Blocks can reference an instance variable of a class.
+ if (const BlockExpr *BE = dyn_cast<BlockExpr>(S)) {
+ Scan(M, BE->getBody());
+ return;
+ }
+
+ if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(S))
+ for (PseudoObjectExpr::const_semantics_iterator
+ i = POE->semantics_begin(), e = POE->semantics_end(); i != e; ++i) {
+ const Expr *sub = *i;
+ if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(sub))
+ sub = OVE->getSourceExpr();
+ Scan(M, sub);
+ }
+
+ for (const Stmt *SubStmt : S->children())
+ Scan(M, SubStmt);
+}
+
+static void Scan(IvarUsageMap& M, const ObjCPropertyImplDecl *D) {
+ if (!D)
+ return;
+
+ const ObjCIvarDecl *ID = D->getPropertyIvarDecl();
+
+ if (!ID)
+ return;
+
+ IvarUsageMap::iterator I = M.find(ID);
+ if (I != M.end())
+ I->second = Used;
+}
+
+static void Scan(IvarUsageMap& M, const ObjCContainerDecl *D) {
+ // Scan the methods for accesses.
+ for (const auto *I : D->instance_methods())
+ Scan(M, I->getBody());
+
+ if (const ObjCImplementationDecl *ID = dyn_cast<ObjCImplementationDecl>(D)) {
+ // Scan for @synthesized property methods that act as setters/getters
+ // to an ivar.
+ for (const auto *I : ID->property_impls())
+ Scan(M, I);
+
+ // Scan the associated categories as well.
+ for (const auto *Cat : ID->getClassInterface()->visible_categories()) {
+ if (const ObjCCategoryImplDecl *CID = Cat->getImplementation())
+ Scan(M, CID);
+ }
+ }
+}
+
+static void Scan(IvarUsageMap &M, const DeclContext *C, const FileID FID,
+ SourceManager &SM) {
+ for (const auto *I : C->decls())
+ if (const auto *FD = dyn_cast<FunctionDecl>(I)) {
+ SourceLocation L = FD->getLocStart();
+ if (SM.getFileID(L) == FID)
+ Scan(M, FD->getBody());
+ }
+}
+
+static void checkObjCUnusedIvar(const ObjCImplementationDecl *D,
+ BugReporter &BR,
+ const CheckerBase *Checker) {
+
+ const ObjCInterfaceDecl *ID = D->getClassInterface();
+ IvarUsageMap M;
+
+ // Iterate over the ivars.
+ for (const auto *Ivar : ID->ivars()) {
+ // Ignore ivars that...
+ // (a) aren't private
+ // (b) explicitly marked unused
+ // (c) are iboutlets
+ // (d) are unnamed bitfields
+ if (Ivar->getAccessControl() != ObjCIvarDecl::Private ||
+ Ivar->hasAttr<UnusedAttr>() || Ivar->hasAttr<IBOutletAttr>() ||
+ Ivar->hasAttr<IBOutletCollectionAttr>() ||
+ Ivar->isUnnamedBitfield())
+ continue;
+
+ M[Ivar] = Unused;
+ }
+
+ if (M.empty())
+ return;
+
+ // Now scan the implementation declaration.
+ Scan(M, D);
+
+ // Any potentially unused ivars?
+ bool hasUnused = false;
+ for (IvarUsageMap::iterator I = M.begin(), E = M.end(); I!=E; ++I)
+ if (I->second == Unused) {
+ hasUnused = true;
+ break;
+ }
+
+ if (!hasUnused)
+ return;
+
+ // We found some potentially unused ivars. Scan the entire translation unit
+ // for functions inside the @implementation that reference these ivars.
+ // FIXME: In the future hopefully we can just use the lexical DeclContext
+ // to go from the ObjCImplementationDecl to the lexically "nested"
+ // C functions.
+ SourceManager &SM = BR.getSourceManager();
+ Scan(M, D->getDeclContext(), SM.getFileID(D->getLocation()), SM);
+
+ // Find ivars that are unused.
+ for (IvarUsageMap::iterator I = M.begin(), E = M.end(); I!=E; ++I)
+ if (I->second == Unused) {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ os << "Instance variable '" << *I->first << "' in class '" << *ID
+ << "' is never used by the methods in its @implementation "
+ "(although it may be used by category methods).";
+
+ PathDiagnosticLocation L =
+ PathDiagnosticLocation::create(I->first, BR.getSourceManager());
+ BR.EmitBasicReport(D, Checker, "Unused instance variable", "Optimization",
+ os.str(), L);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCUnusedIvarsChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ObjCUnusedIvarsChecker : public Checker<
+ check::ASTDecl<ObjCImplementationDecl> > {
+public:
+ void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ checkObjCUnusedIvar(D, BR, this);
+ }
+};
+}
+
+void ento::registerObjCUnusedIvarsChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ObjCUnusedIvarsChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
new file mode 100644
index 0000000..8ce3735
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
@@ -0,0 +1,314 @@
+//=======- PaddingChecker.cpp ------------------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a checker that checks for padding that could be
+// removed by re-ordering members.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <numeric>
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class PaddingChecker : public Checker<check::ASTDecl<TranslationUnitDecl>> {
+private:
+ mutable std::unique_ptr<BugType> PaddingBug;
+ mutable int64_t AllowedPad;
+ mutable BugReporter *BR;
+
+public:
+ void checkASTDecl(const TranslationUnitDecl *TUD, AnalysisManager &MGR,
+ BugReporter &BRArg) const {
+ BR = &BRArg;
+ AllowedPad =
+ MGR.getAnalyzerOptions().getOptionAsInteger("AllowedPad", 24, this);
+ assert(AllowedPad >= 0 && "AllowedPad option should be non-negative");
+
+ // The calls to checkAST* from AnalysisConsumer don't
+ // visit template instantiations or lambda classes. We
+ // want to visit those, so we make our own RecursiveASTVisitor.
+ struct LocalVisitor : public RecursiveASTVisitor<LocalVisitor> {
+ const PaddingChecker *Checker;
+ bool shouldVisitTemplateInstantiations() const { return true; }
+ bool shouldVisitImplicitCode() const { return true; }
+ explicit LocalVisitor(const PaddingChecker *Checker) : Checker(Checker) {}
+ bool VisitRecordDecl(const RecordDecl *RD) {
+ Checker->visitRecord(RD);
+ return true;
+ }
+ bool VisitVarDecl(const VarDecl *VD) {
+ Checker->visitVariable(VD);
+ return true;
+ }
+ // TODO: Visit array new and mallocs for arrays.
+ };
+
+ LocalVisitor visitor(this);
+ visitor.TraverseDecl(const_cast<TranslationUnitDecl *>(TUD));
+ }
+
+ /// \brief Look for records of overly padded types. If padding *
+ /// PadMultiplier exceeds AllowedPad, then generate a report.
+ /// PadMultiplier is used to share code with the array padding
+ /// checker.
+ void visitRecord(const RecordDecl *RD, uint64_t PadMultiplier = 1) const {
+ if (shouldSkipDecl(RD))
+ return;
+
+ auto &ASTContext = RD->getASTContext();
+ const ASTRecordLayout &RL = ASTContext.getASTRecordLayout(RD);
+ assert(llvm::isPowerOf2_64(RL.getAlignment().getQuantity()));
+
+ CharUnits BaselinePad = calculateBaselinePad(RD, ASTContext, RL);
+ if (BaselinePad.isZero())
+ return;
+ CharUnits OptimalPad = calculateOptimalPad(RD, ASTContext, RL);
+
+ CharUnits DiffPad = PadMultiplier * (BaselinePad - OptimalPad);
+ if (DiffPad.getQuantity() <= AllowedPad) {
+ assert(!DiffPad.isNegative() && "DiffPad should not be negative");
+ // There is not enough excess padding to trigger a warning.
+ return;
+ }
+ reportRecord(RD, BaselinePad, OptimalPad);
+ }
+
+ /// \brief Look for arrays of overly padded types. If the padding of the
+ /// array type exceeds AllowedPad, then generate a report.
+ void visitVariable(const VarDecl *VD) const {
+ const ArrayType *ArrTy = VD->getType()->getAsArrayTypeUnsafe();
+ if (ArrTy == nullptr)
+ return;
+ uint64_t Elts = 0;
+ if (const ConstantArrayType *CArrTy = dyn_cast<ConstantArrayType>(ArrTy))
+ Elts = CArrTy->getSize().getZExtValue();
+ if (Elts == 0)
+ return;
+ const RecordType *RT = ArrTy->getElementType()->getAs<RecordType>();
+ if (RT == nullptr)
+ return;
+
+ // TODO: Recurse into the fields and base classes to see if any
+ // of those have excess padding.
+ visitRecord(RT->getDecl(), Elts);
+ }
+
+ bool shouldSkipDecl(const RecordDecl *RD) const {
+ auto Location = RD->getLocation();
+ // If the construct doesn't have a source file, then it's not something
+ // we want to diagnose.
+ if (!Location.isValid())
+ return true;
+ SrcMgr::CharacteristicKind Kind =
+ BR->getSourceManager().getFileCharacteristic(Location);
+ // Throw out all records that come from system headers.
+ if (Kind != SrcMgr::C_User)
+ return true;
+
+ // Not going to attempt to optimize unions.
+ if (RD->isUnion())
+ return true;
+ // How do you reorder fields if you haven't got any?
+ if (RD->field_empty())
+ return true;
+ if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ // Tail padding with base classes ends up being very complicated.
+ // We will skip objects with base classes for now.
+ if (CXXRD->getNumBases() != 0)
+ return true;
+ // Virtual bases are complicated, skipping those for now.
+ if (CXXRD->getNumVBases() != 0)
+ return true;
+ // Can't layout a template, so skip it. We do still layout the
+ // instantiations though.
+ if (CXXRD->getTypeForDecl()->isDependentType())
+ return true;
+ if (CXXRD->getTypeForDecl()->isInstantiationDependentType())
+ return true;
+ }
+ auto IsTrickyField = [](const FieldDecl *FD) -> bool {
+ // Bitfield layout is hard.
+ if (FD->isBitField())
+ return true;
+
+ // Variable length arrays are tricky too.
+ QualType Ty = FD->getType();
+ if (Ty->isIncompleteArrayType())
+ return true;
+ return false;
+ };
+
+ if (std::any_of(RD->field_begin(), RD->field_end(), IsTrickyField))
+ return true;
+ return false;
+ }
+
+ static CharUnits calculateBaselinePad(const RecordDecl *RD,
+ const ASTContext &ASTContext,
+ const ASTRecordLayout &RL) {
+ CharUnits PaddingSum;
+ CharUnits Offset = ASTContext.toCharUnitsFromBits(RL.getFieldOffset(0));
+ for (const auto &FD : RD->fields()) {
+ // This checker only cares about the padded size of the
+ // field, and not the data size. If the field is a record
+ // with tail padding, then we won't put that number in our
+ // total because reordering fields won't fix that problem.
+ CharUnits FieldSize = ASTContext.getTypeSizeInChars(FD->getType());
+ auto FieldOffsetBits = RL.getFieldOffset(FD->getFieldIndex());
+ CharUnits FieldOffset = ASTContext.toCharUnitsFromBits(FieldOffsetBits);
+ PaddingSum += (FieldOffset - Offset);
+ Offset = FieldOffset + FieldSize;
+ }
+ PaddingSum += RL.getSize() - Offset;
+ return PaddingSum;
+ }
+
+ /// Optimal padding overview:
+ /// 1. Find a close approximation to where we can place our first field.
+ /// This will usually be at offset 0.
+ /// 2. Try to find the best field that can legally be placed at the current
+ /// offset.
+ /// a. "Best" is the largest alignment that is legal, but smallest size.
+ /// This is to account for overly aligned types.
+ /// 3. If no fields can fit, pad by rounding the current offset up to the
+ /// smallest alignment requirement of our fields. Measure and track the
+ // amount of padding added. Go back to 2.
+ /// 4. Increment the current offset by the size of the chosen field.
+ /// 5. Remove the chosen field from the set of future possibilities.
+ /// 6. Go back to 2 if there are still unplaced fields.
+ /// 7. Add tail padding by rounding the current offset up to the structure
+ /// alignment. Track the amount of padding added.
+
+ static CharUnits calculateOptimalPad(const RecordDecl *RD,
+ const ASTContext &ASTContext,
+ const ASTRecordLayout &RL) {
+ struct CharUnitPair {
+ CharUnits Align;
+ CharUnits Size;
+ bool operator<(const CharUnitPair &RHS) const {
+ // Order from small alignments to large alignments,
+ // then large sizes to small sizes.
+ return std::make_pair(Align, -Size) <
+ std::make_pair(RHS.Align, -RHS.Size);
+ }
+ };
+ SmallVector<CharUnitPair, 20> Fields;
+ auto GatherSizesAndAlignments = [](const FieldDecl *FD) {
+ CharUnitPair RetVal;
+ auto &Ctx = FD->getASTContext();
+ std::tie(RetVal.Size, RetVal.Align) =
+ Ctx.getTypeInfoInChars(FD->getType());
+ assert(llvm::isPowerOf2_64(RetVal.Align.getQuantity()));
+ if (auto Max = FD->getMaxAlignment())
+ RetVal.Align = std::max(Ctx.toCharUnitsFromBits(Max), RetVal.Align);
+ return RetVal;
+ };
+ std::transform(RD->field_begin(), RD->field_end(),
+ std::back_inserter(Fields), GatherSizesAndAlignments);
+ std::sort(Fields.begin(), Fields.end());
+
+ // This lets us skip over vptrs and non-virtual bases,
+ // so that we can just worry about the fields in our object.
+ // Note that this does cause us to miss some cases where we
+ // could pack more bytes in to a base class's tail padding.
+ CharUnits NewOffset = ASTContext.toCharUnitsFromBits(RL.getFieldOffset(0));
+ CharUnits NewPad;
+
+ while (!Fields.empty()) {
+ unsigned TrailingZeros =
+ llvm::countTrailingZeros((unsigned long long)NewOffset.getQuantity());
+ // If NewOffset is zero, then countTrailingZeros will be 64. Shifting
+ // 64 will overflow our unsigned long long. Shifting 63 will turn
+ // our long long (and CharUnits internal type) negative. So shift 62.
+ long long CurAlignmentBits = 1ull << (std::min)(TrailingZeros, 62u);
+ CharUnits CurAlignment = CharUnits::fromQuantity(CurAlignmentBits);
+ CharUnitPair InsertPoint = {CurAlignment, CharUnits::Zero()};
+ auto CurBegin = Fields.begin();
+ auto CurEnd = Fields.end();
+
+ // In the typical case, this will find the last element
+ // of the vector. We won't find a middle element unless
+ // we started on a poorly aligned address or have an overly
+ // aligned field.
+ auto Iter = std::upper_bound(CurBegin, CurEnd, InsertPoint);
+ if (Iter != CurBegin) {
+ // We found a field that we can layout with the current alignment.
+ --Iter;
+ NewOffset += Iter->Size;
+ Fields.erase(Iter);
+ } else {
+ // We are poorly aligned, and we need to pad in order to layout another
+ // field. Round up to at least the smallest field alignment that we
+ // currently have.
+ CharUnits NextOffset = NewOffset.RoundUpToAlignment(Fields[0].Align);
+ NewPad += NextOffset - NewOffset;
+ NewOffset = NextOffset;
+ }
+ }
+ // Calculate tail padding.
+ CharUnits NewSize = NewOffset.RoundUpToAlignment(RL.getAlignment());
+ NewPad += NewSize - NewOffset;
+ return NewPad;
+ }
+
+ void reportRecord(const RecordDecl *RD, CharUnits BaselinePad,
+ CharUnits TargetPad) const {
+ if (!PaddingBug)
+ PaddingBug =
+ llvm::make_unique<BugType>(this, "Excessive Padding", "Performance");
+
+ SmallString<100> Buf;
+ llvm::raw_svector_ostream Os(Buf);
+
+ Os << "Excessive padding in '";
+ Os << QualType::getAsString(RD->getTypeForDecl(), Qualifiers()) << "'";
+
+ if (auto *TSD = dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
+ // TODO: make this show up better in the console output and in
+ // the HTML. Maybe just make it show up in HTML like the path
+ // diagnostics show.
+ SourceLocation ILoc = TSD->getPointOfInstantiation();
+ if (ILoc.isValid())
+ Os << " instantiated here: "
+ << ILoc.printToString(BR->getSourceManager());
+ }
+
+ Os << " (" << BaselinePad.getQuantity() << " padding bytes, where "
+ << TargetPad.getQuantity() << " is optimal). Consider reordering "
+ << "the fields or adding explicit padding members.";
+
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::create(RD, BR->getSourceManager());
+
+ auto Report = llvm::make_unique<BugReport>(*PaddingBug, Os.str(), CELoc);
+ Report->setDeclWithIssue(RD);
+ Report->addRange(RD->getSourceRange());
+
+ BR->emitReport(std::move(Report));
+ }
+};
+}
+
+void ento::registerPaddingChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<PaddingChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
new file mode 100644
index 0000000..e336967
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
@@ -0,0 +1,70 @@
+//=== PointerArithChecker.cpp - Pointer arithmetic checker -----*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This files defines PointerArithChecker, a builtin checker that checks for
+// pointer arithmetic on locations other than array elements.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class PointerArithChecker
+ : public Checker< check::PreStmt<BinaryOperator> > {
+ mutable std::unique_ptr<BuiltinBug> BT;
+
+public:
+ void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
+};
+}
+
+void PointerArithChecker::checkPreStmt(const BinaryOperator *B,
+ CheckerContext &C) const {
+ if (B->getOpcode() != BO_Sub && B->getOpcode() != BO_Add)
+ return;
+
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal LV = state->getSVal(B->getLHS(), LCtx);
+ SVal RV = state->getSVal(B->getRHS(), LCtx);
+
+ const MemRegion *LR = LV.getAsRegion();
+
+ if (!LR || !RV.isConstant())
+ return;
+
+ // If pointer arithmetic is done on variables of non-array type, this often
+ // means behavior rely on memory organization, which is dangerous.
+ if (isa<VarRegion>(LR) || isa<CodeTextRegion>(LR) ||
+ isa<CompoundLiteralRegion>(LR)) {
+
+ if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
+ if (!BT)
+ BT.reset(
+ new BuiltinBug(this, "Dangerous pointer arithmetic",
+ "Pointer arithmetic done on non-array variables "
+ "means reliance on memory layout, which is "
+ "dangerous."));
+ auto R = llvm::make_unique<BugReport>(*BT, BT->getDescription(), N);
+ R->addRange(B->getSourceRange());
+ C.emitReport(std::move(R));
+ }
+ }
+}
+
+void ento::registerPointerArithChecker(CheckerManager &mgr) {
+ mgr.registerChecker<PointerArithChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
new file mode 100644
index 0000000..2d33ebc
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
@@ -0,0 +1,77 @@
+//=== PointerSubChecker.cpp - Pointer subtraction checker ------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This files defines PointerSubChecker, a builtin checker that checks for
+// pointer subtractions on two pointers pointing to different memory chunks.
+// This check corresponds to CWE-469.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class PointerSubChecker
+ : public Checker< check::PreStmt<BinaryOperator> > {
+ mutable std::unique_ptr<BuiltinBug> BT;
+
+public:
+ void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
+};
+}
+
+void PointerSubChecker::checkPreStmt(const BinaryOperator *B,
+ CheckerContext &C) const {
+ // When doing pointer subtraction, if the two pointers do not point to the
+ // same memory chunk, emit a warning.
+ if (B->getOpcode() != BO_Sub)
+ return;
+
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal LV = state->getSVal(B->getLHS(), LCtx);
+ SVal RV = state->getSVal(B->getRHS(), LCtx);
+
+ const MemRegion *LR = LV.getAsRegion();
+ const MemRegion *RR = RV.getAsRegion();
+
+ if (!(LR && RR))
+ return;
+
+ const MemRegion *BaseLR = LR->getBaseRegion();
+ const MemRegion *BaseRR = RR->getBaseRegion();
+
+ if (BaseLR == BaseRR)
+ return;
+
+ // Allow arithmetic on different symbolic regions.
+ if (isa<SymbolicRegion>(BaseLR) || isa<SymbolicRegion>(BaseRR))
+ return;
+
+ if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
+ if (!BT)
+ BT.reset(
+ new BuiltinBug(this, "Pointer subtraction",
+ "Subtraction of two pointers that do not point to "
+ "the same memory chunk may cause incorrect result."));
+ auto R = llvm::make_unique<BugReport>(*BT, BT->getDescription(), N);
+ R->addRange(B->getSourceRange());
+ C.emitReport(std::move(R));
+ }
+}
+
+void ento::registerPointerSubChecker(CheckerManager &mgr) {
+ mgr.registerChecker<PointerSubChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
new file mode 100644
index 0000000..28a4a08
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
@@ -0,0 +1,334 @@
+//===--- PthreadLockChecker.cpp - Check for locking problems ---*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines PthreadLockChecker, a simple lock -> unlock checker.
+// Also handles XNU locks, which behave similarly enough to share code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "llvm/ADT/ImmutableList.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+struct LockState {
+ enum Kind { Destroyed, Locked, Unlocked } K;
+
+private:
+ LockState(Kind K) : K(K) {}
+
+public:
+ static LockState getLocked() { return LockState(Locked); }
+ static LockState getUnlocked() { return LockState(Unlocked); }
+ static LockState getDestroyed() { return LockState(Destroyed); }
+
+ bool operator==(const LockState &X) const {
+ return K == X.K;
+ }
+
+ bool isLocked() const { return K == Locked; }
+ bool isUnlocked() const { return K == Unlocked; }
+ bool isDestroyed() const { return K == Destroyed; }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(K);
+ }
+};
+
+class PthreadLockChecker : public Checker< check::PostStmt<CallExpr> > {
+ mutable std::unique_ptr<BugType> BT_doublelock;
+ mutable std::unique_ptr<BugType> BT_doubleunlock;
+ mutable std::unique_ptr<BugType> BT_destroylock;
+ mutable std::unique_ptr<BugType> BT_initlock;
+ mutable std::unique_ptr<BugType> BT_lor;
+ enum LockingSemantics {
+ NotApplicable = 0,
+ PthreadSemantics,
+ XNUSemantics
+ };
+public:
+ void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
+
+ void AcquireLock(CheckerContext &C, const CallExpr *CE, SVal lock,
+ bool isTryLock, enum LockingSemantics semantics) const;
+
+ void ReleaseLock(CheckerContext &C, const CallExpr *CE, SVal lock) const;
+ void DestroyLock(CheckerContext &C, const CallExpr *CE, SVal Lock) const;
+ void InitLock(CheckerContext &C, const CallExpr *CE, SVal Lock) const;
+ void reportUseDestroyedBug(CheckerContext &C, const CallExpr *CE) const;
+};
+} // end anonymous namespace
+
+// GDM Entry for tracking lock state.
+REGISTER_LIST_WITH_PROGRAMSTATE(LockSet, const MemRegion *)
+
+REGISTER_MAP_WITH_PROGRAMSTATE(LockMap, const MemRegion *, LockState)
+
+void PthreadLockChecker::checkPostStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
+ StringRef FName = C.getCalleeName(CE);
+ if (FName.empty())
+ return;
+
+ if (CE->getNumArgs() != 1 && CE->getNumArgs() != 2)
+ return;
+
+ if (FName == "pthread_mutex_lock" ||
+ FName == "pthread_rwlock_rdlock" ||
+ FName == "pthread_rwlock_wrlock")
+ AcquireLock(C, CE, state->getSVal(CE->getArg(0), LCtx),
+ false, PthreadSemantics);
+ else if (FName == "lck_mtx_lock" ||
+ FName == "lck_rw_lock_exclusive" ||
+ FName == "lck_rw_lock_shared")
+ AcquireLock(C, CE, state->getSVal(CE->getArg(0), LCtx),
+ false, XNUSemantics);
+ else if (FName == "pthread_mutex_trylock" ||
+ FName == "pthread_rwlock_tryrdlock" ||
+ FName == "pthread_rwlock_trywrlock")
+ AcquireLock(C, CE, state->getSVal(CE->getArg(0), LCtx),
+ true, PthreadSemantics);
+ else if (FName == "lck_mtx_try_lock" ||
+ FName == "lck_rw_try_lock_exclusive" ||
+ FName == "lck_rw_try_lock_shared")
+ AcquireLock(C, CE, state->getSVal(CE->getArg(0), LCtx),
+ true, XNUSemantics);
+ else if (FName == "pthread_mutex_unlock" ||
+ FName == "pthread_rwlock_unlock" ||
+ FName == "lck_mtx_unlock" ||
+ FName == "lck_rw_done")
+ ReleaseLock(C, CE, state->getSVal(CE->getArg(0), LCtx));
+ else if (FName == "pthread_mutex_destroy" ||
+ FName == "lck_mtx_destroy")
+ DestroyLock(C, CE, state->getSVal(CE->getArg(0), LCtx));
+ else if (FName == "pthread_mutex_init")
+ InitLock(C, CE, state->getSVal(CE->getArg(0), LCtx));
+}
+
+void PthreadLockChecker::AcquireLock(CheckerContext &C, const CallExpr *CE,
+ SVal lock, bool isTryLock,
+ enum LockingSemantics semantics) const {
+
+ const MemRegion *lockR = lock.getAsRegion();
+ if (!lockR)
+ return;
+
+ ProgramStateRef state = C.getState();
+
+ SVal X = state->getSVal(CE, C.getLocationContext());
+ if (X.isUnknownOrUndef())
+ return;
+
+ DefinedSVal retVal = X.castAs<DefinedSVal>();
+
+ if (const LockState *LState = state->get<LockMap>(lockR)) {
+ if (LState->isLocked()) {
+ if (!BT_doublelock)
+ BT_doublelock.reset(new BugType(this, "Double locking",
+ "Lock checker"));
+ ExplodedNode *N = C.generateErrorNode();
+ if (!N)
+ return;
+ auto report = llvm::make_unique<BugReport>(
+ *BT_doublelock, "This lock has already been acquired", N);
+ report->addRange(CE->getArg(0)->getSourceRange());
+ C.emitReport(std::move(report));
+ return;
+ } else if (LState->isDestroyed()) {
+ reportUseDestroyedBug(C, CE);
+ return;
+ }
+ }
+
+ ProgramStateRef lockSucc = state;
+ if (isTryLock) {
+ // Bifurcate the state, and allow a mode where the lock acquisition fails.
+ ProgramStateRef lockFail;
+ switch (semantics) {
+ case PthreadSemantics:
+ std::tie(lockFail, lockSucc) = state->assume(retVal);
+ break;
+ case XNUSemantics:
+ std::tie(lockSucc, lockFail) = state->assume(retVal);
+ break;
+ default:
+ llvm_unreachable("Unknown tryLock locking semantics");
+ }
+ assert(lockFail && lockSucc);
+ C.addTransition(lockFail);
+
+ } else if (semantics == PthreadSemantics) {
+ // Assume that the return value was 0.
+ lockSucc = state->assume(retVal, false);
+ assert(lockSucc);
+
+ } else {
+ // XNU locking semantics return void on non-try locks
+ assert((semantics == XNUSemantics) && "Unknown locking semantics");
+ lockSucc = state;
+ }
+
+ // Record that the lock was acquired.
+ lockSucc = lockSucc->add<LockSet>(lockR);
+ lockSucc = lockSucc->set<LockMap>(lockR, LockState::getLocked());
+ C.addTransition(lockSucc);
+}
+
+void PthreadLockChecker::ReleaseLock(CheckerContext &C, const CallExpr *CE,
+ SVal lock) const {
+
+ const MemRegion *lockR = lock.getAsRegion();
+ if (!lockR)
+ return;
+
+ ProgramStateRef state = C.getState();
+
+ if (const LockState *LState = state->get<LockMap>(lockR)) {
+ if (LState->isUnlocked()) {
+ if (!BT_doubleunlock)
+ BT_doubleunlock.reset(new BugType(this, "Double unlocking",
+ "Lock checker"));
+ ExplodedNode *N = C.generateErrorNode();
+ if (!N)
+ return;
+ auto Report = llvm::make_unique<BugReport>(
+ *BT_doubleunlock, "This lock has already been unlocked", N);
+ Report->addRange(CE->getArg(0)->getSourceRange());
+ C.emitReport(std::move(Report));
+ return;
+ } else if (LState->isDestroyed()) {
+ reportUseDestroyedBug(C, CE);
+ return;
+ }
+ }
+
+ LockSetTy LS = state->get<LockSet>();
+
+ // FIXME: Better analysis requires IPA for wrappers.
+
+ if (!LS.isEmpty()) {
+ const MemRegion *firstLockR = LS.getHead();
+ if (firstLockR != lockR) {
+ if (!BT_lor)
+ BT_lor.reset(new BugType(this, "Lock order reversal", "Lock checker"));
+ ExplodedNode *N = C.generateErrorNode();
+ if (!N)
+ return;
+ auto report = llvm::make_unique<BugReport>(
+ *BT_lor, "This was not the most recently acquired lock. Possible "
+ "lock order reversal", N);
+ report->addRange(CE->getArg(0)->getSourceRange());
+ C.emitReport(std::move(report));
+ return;
+ }
+ // Record that the lock was released.
+ state = state->set<LockSet>(LS.getTail());
+ }
+
+ state = state->set<LockMap>(lockR, LockState::getUnlocked());
+ C.addTransition(state);
+}
+
+void PthreadLockChecker::DestroyLock(CheckerContext &C, const CallExpr *CE,
+ SVal Lock) const {
+
+ const MemRegion *LockR = Lock.getAsRegion();
+ if (!LockR)
+ return;
+
+ ProgramStateRef State = C.getState();
+
+ const LockState *LState = State->get<LockMap>(LockR);
+ if (!LState || LState->isUnlocked()) {
+ State = State->set<LockMap>(LockR, LockState::getDestroyed());
+ C.addTransition(State);
+ return;
+ }
+
+ StringRef Message;
+
+ if (LState->isLocked()) {
+ Message = "This lock is still locked";
+ } else {
+ Message = "This lock has already been destroyed";
+ }
+
+ if (!BT_destroylock)
+ BT_destroylock.reset(new BugType(this, "Destroy invalid lock",
+ "Lock checker"));
+ ExplodedNode *N = C.generateErrorNode();
+ if (!N)
+ return;
+ auto Report = llvm::make_unique<BugReport>(*BT_destroylock, Message, N);
+ Report->addRange(CE->getArg(0)->getSourceRange());
+ C.emitReport(std::move(Report));
+}
+
+void PthreadLockChecker::InitLock(CheckerContext &C, const CallExpr *CE,
+ SVal Lock) const {
+
+ const MemRegion *LockR = Lock.getAsRegion();
+ if (!LockR)
+ return;
+
+ ProgramStateRef State = C.getState();
+
+ const struct LockState *LState = State->get<LockMap>(LockR);
+ if (!LState || LState->isDestroyed()) {
+ State = State->set<LockMap>(LockR, LockState::getUnlocked());
+ C.addTransition(State);
+ return;
+ }
+
+ StringRef Message;
+
+ if (LState->isLocked()) {
+ Message = "This lock is still being held";
+ } else {
+ Message = "This lock has already been initialized";
+ }
+
+ if (!BT_initlock)
+ BT_initlock.reset(new BugType(this, "Init invalid lock",
+ "Lock checker"));
+ ExplodedNode *N = C.generateErrorNode();
+ if (!N)
+ return;
+ auto Report = llvm::make_unique<BugReport>(*BT_initlock, Message, N);
+ Report->addRange(CE->getArg(0)->getSourceRange());
+ C.emitReport(std::move(Report));
+}
+
+void PthreadLockChecker::reportUseDestroyedBug(CheckerContext &C,
+ const CallExpr *CE) const {
+ if (!BT_destroylock)
+ BT_destroylock.reset(new BugType(this, "Use destroyed lock",
+ "Lock checker"));
+ ExplodedNode *N = C.generateErrorNode();
+ if (!N)
+ return;
+ auto Report = llvm::make_unique<BugReport>(
+ *BT_destroylock, "This lock has already been destroyed", N);
+ Report->addRange(CE->getArg(0)->getSourceRange());
+ C.emitReport(std::move(Report));
+}
+
+void ento::registerPthreadLockChecker(CheckerManager &mgr) {
+ mgr.registerChecker<PthreadLockChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
new file mode 100644
index 0000000..f983c30
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
@@ -0,0 +1,4029 @@
+//==-- RetainCountChecker.cpp - Checks for leaks and other issues -*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the methods for RetainCountChecker, which implements
+// a reference count checker for Core Foundation and Cocoa on (Mac OS X).
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "AllocationDiagnostics.h"
+#include "SelectorExtras.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/StaticAnalyzer/Checkers/ObjCRetainCount.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/ImmutableList.h"
+#include "llvm/ADT/ImmutableMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include <cstdarg>
+
+using namespace clang;
+using namespace ento;
+using namespace objc_retain;
+using llvm::StrInStrNoCase;
+
+//===----------------------------------------------------------------------===//
+// Adapters for FoldingSet.
+//===----------------------------------------------------------------------===//
+
+namespace llvm {
+template <> struct FoldingSetTrait<ArgEffect> {
+static inline void Profile(const ArgEffect X, FoldingSetNodeID &ID) {
+ ID.AddInteger((unsigned) X);
+}
+};
+template <> struct FoldingSetTrait<RetEffect> {
+ static inline void Profile(const RetEffect &X, FoldingSetNodeID &ID) {
+ ID.AddInteger((unsigned) X.getKind());
+ ID.AddInteger((unsigned) X.getObjKind());
+}
+};
+} // end llvm namespace
+
+//===----------------------------------------------------------------------===//
+// Reference-counting logic (typestate + counts).
+//===----------------------------------------------------------------------===//
+
+/// ArgEffects summarizes the effects of a function/method call on all of
+/// its arguments.
+typedef llvm::ImmutableMap<unsigned,ArgEffect> ArgEffects;
+
+namespace {
+class RefVal {
+public:
+ enum Kind {
+ Owned = 0, // Owning reference.
+ NotOwned, // Reference is not owned by still valid (not freed).
+ Released, // Object has been released.
+ ReturnedOwned, // Returned object passes ownership to caller.
+ ReturnedNotOwned, // Return object does not pass ownership to caller.
+ ERROR_START,
+ ErrorDeallocNotOwned, // -dealloc called on non-owned object.
+ ErrorDeallocGC, // Calling -dealloc with GC enabled.
+ ErrorUseAfterRelease, // Object used after released.
+ ErrorReleaseNotOwned, // Release of an object that was not owned.
+ ERROR_LEAK_START,
+ ErrorLeak, // A memory leak due to excessive reference counts.
+ ErrorLeakReturned, // A memory leak due to the returning method not having
+ // the correct naming conventions.
+ ErrorGCLeakReturned,
+ ErrorOverAutorelease,
+ ErrorReturnedNotOwned
+ };
+
+ /// Tracks how an object referenced by an ivar has been used.
+ ///
+ /// This accounts for us not knowing if an arbitrary ivar is supposed to be
+ /// stored at +0 or +1.
+ enum class IvarAccessHistory {
+ None,
+ AccessedDirectly,
+ ReleasedAfterDirectAccess
+ };
+
+private:
+ /// The number of outstanding retains.
+ unsigned Cnt;
+ /// The number of outstanding autoreleases.
+ unsigned ACnt;
+ /// The (static) type of the object at the time we started tracking it.
+ QualType T;
+
+ /// The current state of the object.
+ ///
+ /// See the RefVal::Kind enum for possible values.
+ unsigned RawKind : 5;
+
+ /// The kind of object being tracked (CF or ObjC), if known.
+ ///
+ /// See the RetEffect::ObjKind enum for possible values.
+ unsigned RawObjectKind : 2;
+
+ /// True if the current state and/or retain count may turn out to not be the
+ /// best possible approximation of the reference counting state.
+ ///
+ /// If true, the checker may decide to throw away ("override") this state
+ /// in favor of something else when it sees the object being used in new ways.
+ ///
+ /// This setting should not be propagated to state derived from this state.
+ /// Once we start deriving new states, it would be inconsistent to override
+ /// them.
+ unsigned RawIvarAccessHistory : 2;
+
+ RefVal(Kind k, RetEffect::ObjKind o, unsigned cnt, unsigned acnt, QualType t,
+ IvarAccessHistory IvarAccess)
+ : Cnt(cnt), ACnt(acnt), T(t), RawKind(static_cast<unsigned>(k)),
+ RawObjectKind(static_cast<unsigned>(o)),
+ RawIvarAccessHistory(static_cast<unsigned>(IvarAccess)) {
+ assert(getKind() == k && "not enough bits for the kind");
+ assert(getObjKind() == o && "not enough bits for the object kind");
+ assert(getIvarAccessHistory() == IvarAccess && "not enough bits");
+ }
+
+public:
+ Kind getKind() const { return static_cast<Kind>(RawKind); }
+
+ RetEffect::ObjKind getObjKind() const {
+ return static_cast<RetEffect::ObjKind>(RawObjectKind);
+ }
+
+ unsigned getCount() const { return Cnt; }
+ unsigned getAutoreleaseCount() const { return ACnt; }
+ unsigned getCombinedCounts() const { return Cnt + ACnt; }
+ void clearCounts() {
+ Cnt = 0;
+ ACnt = 0;
+ }
+ void setCount(unsigned i) {
+ Cnt = i;
+ }
+ void setAutoreleaseCount(unsigned i) {
+ ACnt = i;
+ }
+
+ QualType getType() const { return T; }
+
+ /// Returns what the analyzer knows about direct accesses to a particular
+ /// instance variable.
+ ///
+ /// If the object with this refcount wasn't originally from an Objective-C
+ /// ivar region, this should always return IvarAccessHistory::None.
+ IvarAccessHistory getIvarAccessHistory() const {
+ return static_cast<IvarAccessHistory>(RawIvarAccessHistory);
+ }
+
+ bool isOwned() const {
+ return getKind() == Owned;
+ }
+
+ bool isNotOwned() const {
+ return getKind() == NotOwned;
+ }
+
+ bool isReturnedOwned() const {
+ return getKind() == ReturnedOwned;
+ }
+
+ bool isReturnedNotOwned() const {
+ return getKind() == ReturnedNotOwned;
+ }
+
+ /// Create a state for an object whose lifetime is the responsibility of the
+ /// current function, at least partially.
+ ///
+ /// Most commonly, this is an owned object with a retain count of +1.
+ static RefVal makeOwned(RetEffect::ObjKind o, QualType t,
+ unsigned Count = 1) {
+ return RefVal(Owned, o, Count, 0, t, IvarAccessHistory::None);
+ }
+
+ /// Create a state for an object whose lifetime is not the responsibility of
+ /// the current function.
+ ///
+ /// Most commonly, this is an unowned object with a retain count of +0.
+ static RefVal makeNotOwned(RetEffect::ObjKind o, QualType t,
+ unsigned Count = 0) {
+ return RefVal(NotOwned, o, Count, 0, t, IvarAccessHistory::None);
+ }
+
+ RefVal operator-(size_t i) const {
+ return RefVal(getKind(), getObjKind(), getCount() - i,
+ getAutoreleaseCount(), getType(), getIvarAccessHistory());
+ }
+
+ RefVal operator+(size_t i) const {
+ return RefVal(getKind(), getObjKind(), getCount() + i,
+ getAutoreleaseCount(), getType(), getIvarAccessHistory());
+ }
+
+ RefVal operator^(Kind k) const {
+ return RefVal(k, getObjKind(), getCount(), getAutoreleaseCount(),
+ getType(), getIvarAccessHistory());
+ }
+
+ RefVal autorelease() const {
+ return RefVal(getKind(), getObjKind(), getCount(), getAutoreleaseCount()+1,
+ getType(), getIvarAccessHistory());
+ }
+
+ RefVal withIvarAccess() const {
+ assert(getIvarAccessHistory() == IvarAccessHistory::None);
+ return RefVal(getKind(), getObjKind(), getCount(), getAutoreleaseCount(),
+ getType(), IvarAccessHistory::AccessedDirectly);
+ }
+
+ RefVal releaseViaIvar() const {
+ assert(getIvarAccessHistory() == IvarAccessHistory::AccessedDirectly);
+ return RefVal(getKind(), getObjKind(), getCount(), getAutoreleaseCount(),
+ getType(), IvarAccessHistory::ReleasedAfterDirectAccess);
+ }
+
+ // Comparison, profiling, and pretty-printing.
+
+ bool hasSameState(const RefVal &X) const {
+ return getKind() == X.getKind() && Cnt == X.Cnt && ACnt == X.ACnt &&
+ getIvarAccessHistory() == X.getIvarAccessHistory();
+ }
+
+ bool operator==(const RefVal& X) const {
+ return T == X.T && hasSameState(X) && getObjKind() == X.getObjKind();
+ }
+
+ void Profile(llvm::FoldingSetNodeID& ID) const {
+ ID.Add(T);
+ ID.AddInteger(RawKind);
+ ID.AddInteger(Cnt);
+ ID.AddInteger(ACnt);
+ ID.AddInteger(RawObjectKind);
+ ID.AddInteger(RawIvarAccessHistory);
+ }
+
+ void print(raw_ostream &Out) const;
+};
+
+void RefVal::print(raw_ostream &Out) const {
+ if (!T.isNull())
+ Out << "Tracked " << T.getAsString() << '/';
+
+ switch (getKind()) {
+ default: llvm_unreachable("Invalid RefVal kind");
+ case Owned: {
+ Out << "Owned";
+ unsigned cnt = getCount();
+ if (cnt) Out << " (+ " << cnt << ")";
+ break;
+ }
+
+ case NotOwned: {
+ Out << "NotOwned";
+ unsigned cnt = getCount();
+ if (cnt) Out << " (+ " << cnt << ")";
+ break;
+ }
+
+ case ReturnedOwned: {
+ Out << "ReturnedOwned";
+ unsigned cnt = getCount();
+ if (cnt) Out << " (+ " << cnt << ")";
+ break;
+ }
+
+ case ReturnedNotOwned: {
+ Out << "ReturnedNotOwned";
+ unsigned cnt = getCount();
+ if (cnt) Out << " (+ " << cnt << ")";
+ break;
+ }
+
+ case Released:
+ Out << "Released";
+ break;
+
+ case ErrorDeallocGC:
+ Out << "-dealloc (GC)";
+ break;
+
+ case ErrorDeallocNotOwned:
+ Out << "-dealloc (not-owned)";
+ break;
+
+ case ErrorLeak:
+ Out << "Leaked";
+ break;
+
+ case ErrorLeakReturned:
+ Out << "Leaked (Bad naming)";
+ break;
+
+ case ErrorGCLeakReturned:
+ Out << "Leaked (GC-ed at return)";
+ break;
+
+ case ErrorUseAfterRelease:
+ Out << "Use-After-Release [ERROR]";
+ break;
+
+ case ErrorReleaseNotOwned:
+ Out << "Release of Not-Owned [ERROR]";
+ break;
+
+ case RefVal::ErrorOverAutorelease:
+ Out << "Over-autoreleased";
+ break;
+
+ case RefVal::ErrorReturnedNotOwned:
+ Out << "Non-owned object returned instead of owned";
+ break;
+ }
+
+ switch (getIvarAccessHistory()) {
+ case IvarAccessHistory::None:
+ break;
+ case IvarAccessHistory::AccessedDirectly:
+ Out << " [direct ivar access]";
+ break;
+ case IvarAccessHistory::ReleasedAfterDirectAccess:
+ Out << " [released after direct ivar access]";
+ }
+
+ if (ACnt) {
+ Out << " [autorelease -" << ACnt << ']';
+ }
+}
+} //end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// RefBindings - State used to track object reference counts.
+//===----------------------------------------------------------------------===//
+
+REGISTER_MAP_WITH_PROGRAMSTATE(RefBindings, SymbolRef, RefVal)
+
+static inline const RefVal *getRefBinding(ProgramStateRef State,
+ SymbolRef Sym) {
+ return State->get<RefBindings>(Sym);
+}
+
+static inline ProgramStateRef setRefBinding(ProgramStateRef State,
+ SymbolRef Sym, RefVal Val) {
+ return State->set<RefBindings>(Sym, Val);
+}
+
+static ProgramStateRef removeRefBinding(ProgramStateRef State, SymbolRef Sym) {
+ return State->remove<RefBindings>(Sym);
+}
+
+//===----------------------------------------------------------------------===//
+// Function/Method behavior summaries.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class RetainSummary {
+ /// Args - a map of (index, ArgEffect) pairs, where index
+ /// specifies the argument (starting from 0). This can be sparsely
+ /// populated; arguments with no entry in Args use 'DefaultArgEffect'.
+ ArgEffects Args;
+
+ /// DefaultArgEffect - The default ArgEffect to apply to arguments that
+ /// do not have an entry in Args.
+ ArgEffect DefaultArgEffect;
+
+ /// Receiver - If this summary applies to an Objective-C message expression,
+ /// this is the effect applied to the state of the receiver.
+ ArgEffect Receiver;
+
+ /// Ret - The effect on the return value. Used to indicate if the
+ /// function/method call returns a new tracked symbol.
+ RetEffect Ret;
+
+public:
+ RetainSummary(ArgEffects A, RetEffect R, ArgEffect defaultEff,
+ ArgEffect ReceiverEff)
+ : Args(A), DefaultArgEffect(defaultEff), Receiver(ReceiverEff), Ret(R) {}
+
+ /// getArg - Return the argument effect on the argument specified by
+ /// idx (starting from 0).
+ ArgEffect getArg(unsigned idx) const {
+ if (const ArgEffect *AE = Args.lookup(idx))
+ return *AE;
+
+ return DefaultArgEffect;
+ }
+
+ void addArg(ArgEffects::Factory &af, unsigned idx, ArgEffect e) {
+ Args = af.add(Args, idx, e);
+ }
+
+ /// setDefaultArgEffect - Set the default argument effect.
+ void setDefaultArgEffect(ArgEffect E) {
+ DefaultArgEffect = E;
+ }
+
+ /// getRetEffect - Returns the effect on the return value of the call.
+ RetEffect getRetEffect() const { return Ret; }
+
+ /// setRetEffect - Set the effect of the return value of the call.
+ void setRetEffect(RetEffect E) { Ret = E; }
+
+
+ /// Sets the effect on the receiver of the message.
+ void setReceiverEffect(ArgEffect e) { Receiver = e; }
+
+ /// getReceiverEffect - Returns the effect on the receiver of the call.
+ /// This is only meaningful if the summary applies to an ObjCMessageExpr*.
+ ArgEffect getReceiverEffect() const { return Receiver; }
+
+ /// Test if two retain summaries are identical. Note that merely equivalent
+ /// summaries are not necessarily identical (for example, if an explicit
+ /// argument effect matches the default effect).
+ bool operator==(const RetainSummary &Other) const {
+ return Args == Other.Args && DefaultArgEffect == Other.DefaultArgEffect &&
+ Receiver == Other.Receiver && Ret == Other.Ret;
+ }
+
+ /// Profile this summary for inclusion in a FoldingSet.
+ void Profile(llvm::FoldingSetNodeID& ID) const {
+ ID.Add(Args);
+ ID.Add(DefaultArgEffect);
+ ID.Add(Receiver);
+ ID.Add(Ret);
+ }
+
+ /// A retain summary is simple if it has no ArgEffects other than the default.
+ bool isSimple() const {
+ return Args.isEmpty();
+ }
+
+private:
+ ArgEffects getArgEffects() const { return Args; }
+ ArgEffect getDefaultArgEffect() const { return DefaultArgEffect; }
+
+ friend class RetainSummaryManager;
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Data structures for constructing summaries.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ObjCSummaryKey {
+ IdentifierInfo* II;
+ Selector S;
+public:
+ ObjCSummaryKey(IdentifierInfo* ii, Selector s)
+ : II(ii), S(s) {}
+
+ ObjCSummaryKey(const ObjCInterfaceDecl *d, Selector s)
+ : II(d ? d->getIdentifier() : nullptr), S(s) {}
+
+ ObjCSummaryKey(Selector s)
+ : II(nullptr), S(s) {}
+
+ IdentifierInfo *getIdentifier() const { return II; }
+ Selector getSelector() const { return S; }
+};
+} // end anonymous namespace
+
+namespace llvm {
+template <> struct DenseMapInfo<ObjCSummaryKey> {
+ static inline ObjCSummaryKey getEmptyKey() {
+ return ObjCSummaryKey(DenseMapInfo<IdentifierInfo*>::getEmptyKey(),
+ DenseMapInfo<Selector>::getEmptyKey());
+ }
+
+ static inline ObjCSummaryKey getTombstoneKey() {
+ return ObjCSummaryKey(DenseMapInfo<IdentifierInfo*>::getTombstoneKey(),
+ DenseMapInfo<Selector>::getTombstoneKey());
+ }
+
+ static unsigned getHashValue(const ObjCSummaryKey &V) {
+ typedef std::pair<IdentifierInfo*, Selector> PairTy;
+ return DenseMapInfo<PairTy>::getHashValue(PairTy(V.getIdentifier(),
+ V.getSelector()));
+ }
+
+ static bool isEqual(const ObjCSummaryKey& LHS, const ObjCSummaryKey& RHS) {
+ return LHS.getIdentifier() == RHS.getIdentifier() &&
+ LHS.getSelector() == RHS.getSelector();
+ }
+
+};
+} // end llvm namespace
+
+namespace {
+class ObjCSummaryCache {
+ typedef llvm::DenseMap<ObjCSummaryKey, const RetainSummary *> MapTy;
+ MapTy M;
+public:
+ ObjCSummaryCache() {}
+
+ const RetainSummary * find(const ObjCInterfaceDecl *D, Selector S) {
+ // Do a lookup with the (D,S) pair. If we find a match return
+ // the iterator.
+ ObjCSummaryKey K(D, S);
+ MapTy::iterator I = M.find(K);
+
+ if (I != M.end())
+ return I->second;
+ if (!D)
+ return nullptr;
+
+ // Walk the super chain. If we find a hit with a parent, we'll end
+ // up returning that summary. We actually allow that key (null,S), as
+ // we cache summaries for the null ObjCInterfaceDecl* to allow us to
+ // generate initial summaries without having to worry about NSObject
+ // being declared.
+ // FIXME: We may change this at some point.
+ for (ObjCInterfaceDecl *C=D->getSuperClass() ;; C=C->getSuperClass()) {
+ if ((I = M.find(ObjCSummaryKey(C, S))) != M.end())
+ break;
+
+ if (!C)
+ return nullptr;
+ }
+
+ // Cache the summary with original key to make the next lookup faster
+ // and return the iterator.
+ const RetainSummary *Summ = I->second;
+ M[K] = Summ;
+ return Summ;
+ }
+
+ const RetainSummary *find(IdentifierInfo* II, Selector S) {
+ // FIXME: Class method lookup. Right now we dont' have a good way
+ // of going between IdentifierInfo* and the class hierarchy.
+ MapTy::iterator I = M.find(ObjCSummaryKey(II, S));
+
+ if (I == M.end())
+ I = M.find(ObjCSummaryKey(S));
+
+ return I == M.end() ? nullptr : I->second;
+ }
+
+ const RetainSummary *& operator[](ObjCSummaryKey K) {
+ return M[K];
+ }
+
+ const RetainSummary *& operator[](Selector S) {
+ return M[ ObjCSummaryKey(S) ];
+ }
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Data structures for managing collections of summaries.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class RetainSummaryManager {
+
+ //==-----------------------------------------------------------------==//
+ // Typedefs.
+ //==-----------------------------------------------------------------==//
+
+ typedef llvm::DenseMap<const FunctionDecl*, const RetainSummary *>
+ FuncSummariesTy;
+
+ typedef ObjCSummaryCache ObjCMethodSummariesTy;
+
+ typedef llvm::FoldingSetNodeWrapper<RetainSummary> CachedSummaryNode;
+
+ //==-----------------------------------------------------------------==//
+ // Data.
+ //==-----------------------------------------------------------------==//
+
+ /// Ctx - The ASTContext object for the analyzed ASTs.
+ ASTContext &Ctx;
+
+ /// GCEnabled - Records whether or not the analyzed code runs in GC mode.
+ const bool GCEnabled;
+
+ /// Records whether or not the analyzed code runs in ARC mode.
+ const bool ARCEnabled;
+
+ /// FuncSummaries - A map from FunctionDecls to summaries.
+ FuncSummariesTy FuncSummaries;
+
+ /// ObjCClassMethodSummaries - A map from selectors (for instance methods)
+ /// to summaries.
+ ObjCMethodSummariesTy ObjCClassMethodSummaries;
+
+ /// ObjCMethodSummaries - A map from selectors to summaries.
+ ObjCMethodSummariesTy ObjCMethodSummaries;
+
+ /// BPAlloc - A BumpPtrAllocator used for allocating summaries, ArgEffects,
+ /// and all other data used by the checker.
+ llvm::BumpPtrAllocator BPAlloc;
+
+ /// AF - A factory for ArgEffects objects.
+ ArgEffects::Factory AF;
+
+ /// ScratchArgs - A holding buffer for construct ArgEffects.
+ ArgEffects ScratchArgs;
+
+ /// ObjCAllocRetE - Default return effect for methods returning Objective-C
+ /// objects.
+ RetEffect ObjCAllocRetE;
+
+ /// ObjCInitRetE - Default return effect for init methods returning
+ /// Objective-C objects.
+ RetEffect ObjCInitRetE;
+
+ /// SimpleSummaries - Used for uniquing summaries that don't have special
+ /// effects.
+ llvm::FoldingSet<CachedSummaryNode> SimpleSummaries;
+
+ //==-----------------------------------------------------------------==//
+ // Methods.
+ //==-----------------------------------------------------------------==//
+
+ /// getArgEffects - Returns a persistent ArgEffects object based on the
+ /// data in ScratchArgs.
+ ArgEffects getArgEffects();
+
+ enum UnaryFuncKind { cfretain, cfrelease, cfautorelease, cfmakecollectable };
+
+ const RetainSummary *getUnarySummary(const FunctionType* FT,
+ UnaryFuncKind func);
+
+ const RetainSummary *getCFSummaryCreateRule(const FunctionDecl *FD);
+ const RetainSummary *getCFSummaryGetRule(const FunctionDecl *FD);
+ const RetainSummary *getCFCreateGetRuleSummary(const FunctionDecl *FD);
+
+ const RetainSummary *getPersistentSummary(const RetainSummary &OldSumm);
+
+ const RetainSummary *getPersistentSummary(RetEffect RetEff,
+ ArgEffect ReceiverEff = DoNothing,
+ ArgEffect DefaultEff = MayEscape) {
+ RetainSummary Summ(getArgEffects(), RetEff, DefaultEff, ReceiverEff);
+ return getPersistentSummary(Summ);
+ }
+
+ const RetainSummary *getDoNothingSummary() {
+ return getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ }
+
+ const RetainSummary *getDefaultSummary() {
+ return getPersistentSummary(RetEffect::MakeNoRet(),
+ DoNothing, MayEscape);
+ }
+
+ const RetainSummary *getPersistentStopSummary() {
+ return getPersistentSummary(RetEffect::MakeNoRet(),
+ StopTracking, StopTracking);
+ }
+
+ void InitializeClassMethodSummaries();
+ void InitializeMethodSummaries();
+private:
+ void addNSObjectClsMethSummary(Selector S, const RetainSummary *Summ) {
+ ObjCClassMethodSummaries[S] = Summ;
+ }
+
+ void addNSObjectMethSummary(Selector S, const RetainSummary *Summ) {
+ ObjCMethodSummaries[S] = Summ;
+ }
+
+ void addClassMethSummary(const char* Cls, const char* name,
+ const RetainSummary *Summ, bool isNullary = true) {
+ IdentifierInfo* ClsII = &Ctx.Idents.get(Cls);
+ Selector S = isNullary ? GetNullarySelector(name, Ctx)
+ : GetUnarySelector(name, Ctx);
+ ObjCClassMethodSummaries[ObjCSummaryKey(ClsII, S)] = Summ;
+ }
+
+ void addInstMethSummary(const char* Cls, const char* nullaryName,
+ const RetainSummary *Summ) {
+ IdentifierInfo* ClsII = &Ctx.Idents.get(Cls);
+ Selector S = GetNullarySelector(nullaryName, Ctx);
+ ObjCMethodSummaries[ObjCSummaryKey(ClsII, S)] = Summ;
+ }
+
+ void addMethodSummary(IdentifierInfo *ClsII, ObjCMethodSummariesTy &Summaries,
+ const RetainSummary *Summ, va_list argp) {
+ Selector S = getKeywordSelector(Ctx, argp);
+ Summaries[ObjCSummaryKey(ClsII, S)] = Summ;
+ }
+
+ void addInstMethSummary(const char* Cls, const RetainSummary * Summ, ...) {
+ va_list argp;
+ va_start(argp, Summ);
+ addMethodSummary(&Ctx.Idents.get(Cls), ObjCMethodSummaries, Summ, argp);
+ va_end(argp);
+ }
+
+ void addClsMethSummary(const char* Cls, const RetainSummary * Summ, ...) {
+ va_list argp;
+ va_start(argp, Summ);
+ addMethodSummary(&Ctx.Idents.get(Cls),ObjCClassMethodSummaries, Summ, argp);
+ va_end(argp);
+ }
+
+ void addClsMethSummary(IdentifierInfo *II, const RetainSummary * Summ, ...) {
+ va_list argp;
+ va_start(argp, Summ);
+ addMethodSummary(II, ObjCClassMethodSummaries, Summ, argp);
+ va_end(argp);
+ }
+
+public:
+
+ RetainSummaryManager(ASTContext &ctx, bool gcenabled, bool usesARC)
+ : Ctx(ctx),
+ GCEnabled(gcenabled),
+ ARCEnabled(usesARC),
+ AF(BPAlloc), ScratchArgs(AF.getEmptyMap()),
+ ObjCAllocRetE(gcenabled
+ ? RetEffect::MakeGCNotOwned()
+ : (usesARC ? RetEffect::MakeNotOwned(RetEffect::ObjC)
+ : RetEffect::MakeOwned(RetEffect::ObjC, true))),
+ ObjCInitRetE(gcenabled
+ ? RetEffect::MakeGCNotOwned()
+ : (usesARC ? RetEffect::MakeNotOwned(RetEffect::ObjC)
+ : RetEffect::MakeOwnedWhenTrackedReceiver())) {
+ InitializeClassMethodSummaries();
+ InitializeMethodSummaries();
+ }
+
+ const RetainSummary *getSummary(const CallEvent &Call,
+ ProgramStateRef State = nullptr);
+
+ const RetainSummary *getFunctionSummary(const FunctionDecl *FD);
+
+ const RetainSummary *getMethodSummary(Selector S, const ObjCInterfaceDecl *ID,
+ const ObjCMethodDecl *MD,
+ QualType RetTy,
+ ObjCMethodSummariesTy &CachedSummaries);
+
+ const RetainSummary *getInstanceMethodSummary(const ObjCMethodCall &M,
+ ProgramStateRef State);
+
+ const RetainSummary *getClassMethodSummary(const ObjCMethodCall &M) {
+ assert(!M.isInstanceMessage());
+ const ObjCInterfaceDecl *Class = M.getReceiverInterface();
+
+ return getMethodSummary(M.getSelector(), Class, M.getDecl(),
+ M.getResultType(), ObjCClassMethodSummaries);
+ }
+
+ /// getMethodSummary - This version of getMethodSummary is used to query
+ /// the summary for the current method being analyzed.
+ const RetainSummary *getMethodSummary(const ObjCMethodDecl *MD) {
+ const ObjCInterfaceDecl *ID = MD->getClassInterface();
+ Selector S = MD->getSelector();
+ QualType ResultTy = MD->getReturnType();
+
+ ObjCMethodSummariesTy *CachedSummaries;
+ if (MD->isInstanceMethod())
+ CachedSummaries = &ObjCMethodSummaries;
+ else
+ CachedSummaries = &ObjCClassMethodSummaries;
+
+ return getMethodSummary(S, ID, MD, ResultTy, *CachedSummaries);
+ }
+
+ const RetainSummary *getStandardMethodSummary(const ObjCMethodDecl *MD,
+ Selector S, QualType RetTy);
+
+ /// Determine if there is a special return effect for this function or method.
+ Optional<RetEffect> getRetEffectFromAnnotations(QualType RetTy,
+ const Decl *D);
+
+ void updateSummaryFromAnnotations(const RetainSummary *&Summ,
+ const ObjCMethodDecl *MD);
+
+ void updateSummaryFromAnnotations(const RetainSummary *&Summ,
+ const FunctionDecl *FD);
+
+ void updateSummaryForCall(const RetainSummary *&Summ,
+ const CallEvent &Call);
+
+ bool isGCEnabled() const { return GCEnabled; }
+
+ bool isARCEnabled() const { return ARCEnabled; }
+
+ bool isARCorGCEnabled() const { return GCEnabled || ARCEnabled; }
+
+ RetEffect getObjAllocRetEffect() const { return ObjCAllocRetE; }
+
+ friend class RetainSummaryTemplate;
+};
+
+// Used to avoid allocating long-term (BPAlloc'd) memory for default retain
+// summaries. If a function or method looks like it has a default summary, but
+// it has annotations, the annotations are added to the stack-based template
+// and then copied into managed memory.
+class RetainSummaryTemplate {
+ RetainSummaryManager &Manager;
+ const RetainSummary *&RealSummary;
+ RetainSummary ScratchSummary;
+ bool Accessed;
+public:
+ RetainSummaryTemplate(const RetainSummary *&real, RetainSummaryManager &mgr)
+ : Manager(mgr), RealSummary(real), ScratchSummary(*real), Accessed(false) {}
+
+ ~RetainSummaryTemplate() {
+ if (Accessed)
+ RealSummary = Manager.getPersistentSummary(ScratchSummary);
+ }
+
+ RetainSummary &operator*() {
+ Accessed = true;
+ return ScratchSummary;
+ }
+
+ RetainSummary *operator->() {
+ Accessed = true;
+ return &ScratchSummary;
+ }
+};
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Implementation of checker data structures.
+//===----------------------------------------------------------------------===//
+
+ArgEffects RetainSummaryManager::getArgEffects() {
+ ArgEffects AE = ScratchArgs;
+ ScratchArgs = AF.getEmptyMap();
+ return AE;
+}
+
+const RetainSummary *
+RetainSummaryManager::getPersistentSummary(const RetainSummary &OldSumm) {
+ // Unique "simple" summaries -- those without ArgEffects.
+ if (OldSumm.isSimple()) {
+ llvm::FoldingSetNodeID ID;
+ OldSumm.Profile(ID);
+
+ void *Pos;
+ CachedSummaryNode *N = SimpleSummaries.FindNodeOrInsertPos(ID, Pos);
+
+ if (!N) {
+ N = (CachedSummaryNode *) BPAlloc.Allocate<CachedSummaryNode>();
+ new (N) CachedSummaryNode(OldSumm);
+ SimpleSummaries.InsertNode(N, Pos);
+ }
+
+ return &N->getValue();
+ }
+
+ RetainSummary *Summ = (RetainSummary *) BPAlloc.Allocate<RetainSummary>();
+ new (Summ) RetainSummary(OldSumm);
+ return Summ;
+}
+
+//===----------------------------------------------------------------------===//
+// Summary creation for functions (largely uses of Core Foundation).
+//===----------------------------------------------------------------------===//
+
+static bool isRetain(const FunctionDecl *FD, StringRef FName) {
+ return FName.endswith("Retain");
+}
+
+static bool isRelease(const FunctionDecl *FD, StringRef FName) {
+ return FName.endswith("Release");
+}
+
+static bool isAutorelease(const FunctionDecl *FD, StringRef FName) {
+ return FName.endswith("Autorelease");
+}
+
+static bool isMakeCollectable(const FunctionDecl *FD, StringRef FName) {
+ // FIXME: Remove FunctionDecl parameter.
+ // FIXME: Is it really okay if MakeCollectable isn't a suffix?
+ return FName.find("MakeCollectable") != StringRef::npos;
+}
+
+static ArgEffect getStopTrackingHardEquivalent(ArgEffect E) {
+ switch (E) {
+ case DoNothing:
+ case Autorelease:
+ case DecRefBridgedTransferred:
+ case IncRef:
+ case IncRefMsg:
+ case MakeCollectable:
+ case UnretainedOutParameter:
+ case RetainedOutParameter:
+ case MayEscape:
+ case StopTracking:
+ case StopTrackingHard:
+ return StopTrackingHard;
+ case DecRef:
+ case DecRefAndStopTrackingHard:
+ return DecRefAndStopTrackingHard;
+ case DecRefMsg:
+ case DecRefMsgAndStopTrackingHard:
+ return DecRefMsgAndStopTrackingHard;
+ case Dealloc:
+ return Dealloc;
+ }
+
+ llvm_unreachable("Unknown ArgEffect kind");
+}
+
+void RetainSummaryManager::updateSummaryForCall(const RetainSummary *&S,
+ const CallEvent &Call) {
+ if (Call.hasNonZeroCallbackArg()) {
+ ArgEffect RecEffect =
+ getStopTrackingHardEquivalent(S->getReceiverEffect());
+ ArgEffect DefEffect =
+ getStopTrackingHardEquivalent(S->getDefaultArgEffect());
+
+ ArgEffects CustomArgEffects = S->getArgEffects();
+ for (ArgEffects::iterator I = CustomArgEffects.begin(),
+ E = CustomArgEffects.end();
+ I != E; ++I) {
+ ArgEffect Translated = getStopTrackingHardEquivalent(I->second);
+ if (Translated != DefEffect)
+ ScratchArgs = AF.add(ScratchArgs, I->first, Translated);
+ }
+
+ RetEffect RE = RetEffect::MakeNoRetHard();
+
+ // Special cases where the callback argument CANNOT free the return value.
+ // This can generally only happen if we know that the callback will only be
+ // called when the return value is already being deallocated.
+ if (const SimpleFunctionCall *FC = dyn_cast<SimpleFunctionCall>(&Call)) {
+ if (IdentifierInfo *Name = FC->getDecl()->getIdentifier()) {
+ // When the CGBitmapContext is deallocated, the callback here will free
+ // the associated data buffer.
+ if (Name->isStr("CGBitmapContextCreateWithData"))
+ RE = S->getRetEffect();
+ }
+ }
+
+ S = getPersistentSummary(RE, RecEffect, DefEffect);
+ }
+
+ // Special case '[super init];' and '[self init];'
+ //
+ // Even though calling '[super init]' without assigning the result to self
+ // and checking if the parent returns 'nil' is a bad pattern, it is common.
+ // Additionally, our Self Init checker already warns about it. To avoid
+ // overwhelming the user with messages from both checkers, we model the case
+ // of '[super init]' in cases when it is not consumed by another expression
+ // as if the call preserves the value of 'self'; essentially, assuming it can
+ // never fail and return 'nil'.
+ // Note, we don't want to just stop tracking the value since we want the
+ // RetainCount checker to report leaks and use-after-free if SelfInit checker
+ // is turned off.
+ if (const ObjCMethodCall *MC = dyn_cast<ObjCMethodCall>(&Call)) {
+ if (MC->getMethodFamily() == OMF_init && MC->isReceiverSelfOrSuper()) {
+
+ // Check if the message is not consumed, we know it will not be used in
+ // an assignment, ex: "self = [super init]".
+ const Expr *ME = MC->getOriginExpr();
+ const LocationContext *LCtx = MC->getLocationContext();
+ ParentMap &PM = LCtx->getAnalysisDeclContext()->getParentMap();
+ if (!PM.isConsumedExpr(ME)) {
+ RetainSummaryTemplate ModifiableSummaryTemplate(S, *this);
+ ModifiableSummaryTemplate->setReceiverEffect(DoNothing);
+ ModifiableSummaryTemplate->setRetEffect(RetEffect::MakeNoRet());
+ }
+ }
+ }
+}
+
+const RetainSummary *
+RetainSummaryManager::getSummary(const CallEvent &Call,
+ ProgramStateRef State) {
+ const RetainSummary *Summ;
+ switch (Call.getKind()) {
+ case CE_Function:
+ Summ = getFunctionSummary(cast<SimpleFunctionCall>(Call).getDecl());
+ break;
+ case CE_CXXMember:
+ case CE_CXXMemberOperator:
+ case CE_Block:
+ case CE_CXXConstructor:
+ case CE_CXXDestructor:
+ case CE_CXXAllocator:
+ // FIXME: These calls are currently unsupported.
+ return getPersistentStopSummary();
+ case CE_ObjCMessage: {
+ const ObjCMethodCall &Msg = cast<ObjCMethodCall>(Call);
+ if (Msg.isInstanceMessage())
+ Summ = getInstanceMethodSummary(Msg, State);
+ else
+ Summ = getClassMethodSummary(Msg);
+ break;
+ }
+ }
+
+ updateSummaryForCall(Summ, Call);
+
+ assert(Summ && "Unknown call type?");
+ return Summ;
+}
+
+const RetainSummary *
+RetainSummaryManager::getFunctionSummary(const FunctionDecl *FD) {
+ // If we don't know what function we're calling, use our default summary.
+ if (!FD)
+ return getDefaultSummary();
+
+ // Look up a summary in our cache of FunctionDecls -> Summaries.
+ FuncSummariesTy::iterator I = FuncSummaries.find(FD);
+ if (I != FuncSummaries.end())
+ return I->second;
+
+ // No summary? Generate one.
+ const RetainSummary *S = nullptr;
+ bool AllowAnnotations = true;
+
+ do {
+ // We generate "stop" summaries for implicitly defined functions.
+ if (FD->isImplicit()) {
+ S = getPersistentStopSummary();
+ break;
+ }
+
+ // [PR 3337] Use 'getAs<FunctionType>' to strip away any typedefs on the
+ // function's type.
+ const FunctionType* FT = FD->getType()->getAs<FunctionType>();
+ const IdentifierInfo *II = FD->getIdentifier();
+ if (!II)
+ break;
+
+ StringRef FName = II->getName();
+
+ // Strip away preceding '_'. Doing this here will effect all the checks
+ // down below.
+ FName = FName.substr(FName.find_first_not_of('_'));
+
+ // Inspect the result type.
+ QualType RetTy = FT->getReturnType();
+
+ // FIXME: This should all be refactored into a chain of "summary lookup"
+ // filters.
+ assert(ScratchArgs.isEmpty());
+
+ if (FName == "pthread_create" || FName == "pthread_setspecific") {
+ // Part of: <rdar://problem/7299394> and <rdar://problem/11282706>.
+ // This will be addressed better with IPA.
+ S = getPersistentStopSummary();
+ } else if (FName == "NSMakeCollectable") {
+ // Handle: id NSMakeCollectable(CFTypeRef)
+ S = (RetTy->isObjCIdType())
+ ? getUnarySummary(FT, cfmakecollectable)
+ : getPersistentStopSummary();
+ // The headers on OS X 10.8 use cf_consumed/ns_returns_retained,
+ // but we can fully model NSMakeCollectable ourselves.
+ AllowAnnotations = false;
+ } else if (FName == "CFPlugInInstanceCreate") {
+ S = getPersistentSummary(RetEffect::MakeNoRet());
+ } else if (FName == "IOBSDNameMatching" ||
+ FName == "IOServiceMatching" ||
+ FName == "IOServiceNameMatching" ||
+ FName == "IORegistryEntrySearchCFProperty" ||
+ FName == "IORegistryEntryIDMatching" ||
+ FName == "IOOpenFirmwarePathMatching") {
+ // Part of <rdar://problem/6961230>. (IOKit)
+ // This should be addressed using a API table.
+ S = getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true),
+ DoNothing, DoNothing);
+ } else if (FName == "IOServiceGetMatchingService" ||
+ FName == "IOServiceGetMatchingServices") {
+ // FIXES: <rdar://problem/6326900>
+ // This should be addressed using a API table. This strcmp is also
+ // a little gross, but there is no need to super optimize here.
+ ScratchArgs = AF.add(ScratchArgs, 1, DecRef);
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ } else if (FName == "IOServiceAddNotification" ||
+ FName == "IOServiceAddMatchingNotification") {
+ // Part of <rdar://problem/6961230>. (IOKit)
+ // This should be addressed using a API table.
+ ScratchArgs = AF.add(ScratchArgs, 2, DecRef);
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ } else if (FName == "CVPixelBufferCreateWithBytes") {
+ // FIXES: <rdar://problem/7283567>
+ // Eventually this can be improved by recognizing that the pixel
+ // buffer passed to CVPixelBufferCreateWithBytes is released via
+ // a callback and doing full IPA to make sure this is done correctly.
+ // FIXME: This function has an out parameter that returns an
+ // allocated object.
+ ScratchArgs = AF.add(ScratchArgs, 7, StopTracking);
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ } else if (FName == "CGBitmapContextCreateWithData") {
+ // FIXES: <rdar://problem/7358899>
+ // Eventually this can be improved by recognizing that 'releaseInfo'
+ // passed to CGBitmapContextCreateWithData is released via
+ // a callback and doing full IPA to make sure this is done correctly.
+ ScratchArgs = AF.add(ScratchArgs, 8, StopTracking);
+ S = getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true),
+ DoNothing, DoNothing);
+ } else if (FName == "CVPixelBufferCreateWithPlanarBytes") {
+ // FIXES: <rdar://problem/7283567>
+ // Eventually this can be improved by recognizing that the pixel
+ // buffer passed to CVPixelBufferCreateWithPlanarBytes is released
+ // via a callback and doing full IPA to make sure this is done
+ // correctly.
+ ScratchArgs = AF.add(ScratchArgs, 12, StopTracking);
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ } else if (FName == "dispatch_set_context" ||
+ FName == "xpc_connection_set_context") {
+ // <rdar://problem/11059275> - The analyzer currently doesn't have
+ // a good way to reason about the finalizer function for libdispatch.
+ // If we pass a context object that is memory managed, stop tracking it.
+ // <rdar://problem/13783514> - Same problem, but for XPC.
+ // FIXME: this hack should possibly go away once we can handle
+ // libdispatch and XPC finalizers.
+ ScratchArgs = AF.add(ScratchArgs, 1, StopTracking);
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ } else if (FName.startswith("NSLog")) {
+ S = getDoNothingSummary();
+ } else if (FName.startswith("NS") &&
+ (FName.find("Insert") != StringRef::npos)) {
+ // Whitelist NSXXInsertXX, for example NSMapInsertIfAbsent, since they can
+ // be deallocated by NSMapRemove. (radar://11152419)
+ ScratchArgs = AF.add(ScratchArgs, 1, StopTracking);
+ ScratchArgs = AF.add(ScratchArgs, 2, StopTracking);
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ }
+
+ // Did we get a summary?
+ if (S)
+ break;
+
+ if (RetTy->isPointerType()) {
+ // For CoreFoundation ('CF') types.
+ if (cocoa::isRefType(RetTy, "CF", FName)) {
+ if (isRetain(FD, FName)) {
+ S = getUnarySummary(FT, cfretain);
+ } else if (isAutorelease(FD, FName)) {
+ S = getUnarySummary(FT, cfautorelease);
+ // The headers use cf_consumed, but we can fully model CFAutorelease
+ // ourselves.
+ AllowAnnotations = false;
+ } else if (isMakeCollectable(FD, FName)) {
+ S = getUnarySummary(FT, cfmakecollectable);
+ AllowAnnotations = false;
+ } else {
+ S = getCFCreateGetRuleSummary(FD);
+ }
+
+ break;
+ }
+
+ // For CoreGraphics ('CG') types.
+ if (cocoa::isRefType(RetTy, "CG", FName)) {
+ if (isRetain(FD, FName))
+ S = getUnarySummary(FT, cfretain);
+ else
+ S = getCFCreateGetRuleSummary(FD);
+
+ break;
+ }
+
+ // For the Disk Arbitration API (DiskArbitration/DADisk.h)
+ if (cocoa::isRefType(RetTy, "DADisk") ||
+ cocoa::isRefType(RetTy, "DADissenter") ||
+ cocoa::isRefType(RetTy, "DASessionRef")) {
+ S = getCFCreateGetRuleSummary(FD);
+ break;
+ }
+
+ if (FD->hasAttr<CFAuditedTransferAttr>()) {
+ S = getCFCreateGetRuleSummary(FD);
+ break;
+ }
+
+ break;
+ }
+
+ // Check for release functions, the only kind of functions that we care
+ // about that don't return a pointer type.
+ if (FName[0] == 'C' && (FName[1] == 'F' || FName[1] == 'G')) {
+ // Test for 'CGCF'.
+ FName = FName.substr(FName.startswith("CGCF") ? 4 : 2);
+
+ if (isRelease(FD, FName))
+ S = getUnarySummary(FT, cfrelease);
+ else {
+ assert (ScratchArgs.isEmpty());
+ // Remaining CoreFoundation and CoreGraphics functions.
+ // We use to assume that they all strictly followed the ownership idiom
+ // and that ownership cannot be transferred. While this is technically
+ // correct, many methods allow a tracked object to escape. For example:
+ //
+ // CFMutableDictionaryRef x = CFDictionaryCreateMutable(...);
+ // CFDictionaryAddValue(y, key, x);
+ // CFRelease(x);
+ // ... it is okay to use 'x' since 'y' has a reference to it
+ //
+ // We handle this and similar cases with the follow heuristic. If the
+ // function name contains "InsertValue", "SetValue", "AddValue",
+ // "AppendValue", or "SetAttribute", then we assume that arguments may
+ // "escape." This means that something else holds on to the object,
+ // allowing it be used even after its local retain count drops to 0.
+ ArgEffect E = (StrInStrNoCase(FName, "InsertValue") != StringRef::npos||
+ StrInStrNoCase(FName, "AddValue") != StringRef::npos ||
+ StrInStrNoCase(FName, "SetValue") != StringRef::npos ||
+ StrInStrNoCase(FName, "AppendValue") != StringRef::npos||
+ StrInStrNoCase(FName, "SetAttribute") != StringRef::npos)
+ ? MayEscape : DoNothing;
+
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, E);
+ }
+ }
+ }
+ while (0);
+
+ // If we got all the way here without any luck, use a default summary.
+ if (!S)
+ S = getDefaultSummary();
+
+ // Annotations override defaults.
+ if (AllowAnnotations)
+ updateSummaryFromAnnotations(S, FD);
+
+ FuncSummaries[FD] = S;
+ return S;
+}
+
+const RetainSummary *
+RetainSummaryManager::getCFCreateGetRuleSummary(const FunctionDecl *FD) {
+ if (coreFoundation::followsCreateRule(FD))
+ return getCFSummaryCreateRule(FD);
+
+ return getCFSummaryGetRule(FD);
+}
+
+const RetainSummary *
+RetainSummaryManager::getUnarySummary(const FunctionType* FT,
+ UnaryFuncKind func) {
+
+ // Sanity check that this is *really* a unary function. This can
+ // happen if people do weird things.
+ const FunctionProtoType* FTP = dyn_cast<FunctionProtoType>(FT);
+ if (!FTP || FTP->getNumParams() != 1)
+ return getPersistentStopSummary();
+
+ assert (ScratchArgs.isEmpty());
+
+ ArgEffect Effect;
+ switch (func) {
+ case cfretain: Effect = IncRef; break;
+ case cfrelease: Effect = DecRef; break;
+ case cfautorelease: Effect = Autorelease; break;
+ case cfmakecollectable: Effect = MakeCollectable; break;
+ }
+
+ ScratchArgs = AF.add(ScratchArgs, 0, Effect);
+ return getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+}
+
+const RetainSummary *
+RetainSummaryManager::getCFSummaryCreateRule(const FunctionDecl *FD) {
+ assert (ScratchArgs.isEmpty());
+
+ return getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true));
+}
+
+const RetainSummary *
+RetainSummaryManager::getCFSummaryGetRule(const FunctionDecl *FD) {
+ assert (ScratchArgs.isEmpty());
+ return getPersistentSummary(RetEffect::MakeNotOwned(RetEffect::CF),
+ DoNothing, DoNothing);
+}
+
+//===----------------------------------------------------------------------===//
+// Summary creation for Selectors.
+//===----------------------------------------------------------------------===//
+
+Optional<RetEffect>
+RetainSummaryManager::getRetEffectFromAnnotations(QualType RetTy,
+ const Decl *D) {
+ if (cocoa::isCocoaObjectRef(RetTy)) {
+ if (D->hasAttr<NSReturnsRetainedAttr>())
+ return ObjCAllocRetE;
+
+ if (D->hasAttr<NSReturnsNotRetainedAttr>() ||
+ D->hasAttr<NSReturnsAutoreleasedAttr>())
+ return RetEffect::MakeNotOwned(RetEffect::ObjC);
+
+ } else if (!RetTy->isPointerType()) {
+ return None;
+ }
+
+ if (D->hasAttr<CFReturnsRetainedAttr>())
+ return RetEffect::MakeOwned(RetEffect::CF, true);
+
+ if (D->hasAttr<CFReturnsNotRetainedAttr>())
+ return RetEffect::MakeNotOwned(RetEffect::CF);
+
+ return None;
+}
+
+void
+RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
+ const FunctionDecl *FD) {
+ if (!FD)
+ return;
+
+ assert(Summ && "Must have a summary to add annotations to.");
+ RetainSummaryTemplate Template(Summ, *this);
+
+ // Effects on the parameters.
+ unsigned parm_idx = 0;
+ for (FunctionDecl::param_const_iterator pi = FD->param_begin(),
+ pe = FD->param_end(); pi != pe; ++pi, ++parm_idx) {
+ const ParmVarDecl *pd = *pi;
+ if (pd->hasAttr<NSConsumedAttr>())
+ Template->addArg(AF, parm_idx, DecRefMsg);
+ else if (pd->hasAttr<CFConsumedAttr>())
+ Template->addArg(AF, parm_idx, DecRef);
+ else if (pd->hasAttr<CFReturnsRetainedAttr>()) {
+ QualType PointeeTy = pd->getType()->getPointeeType();
+ if (!PointeeTy.isNull())
+ if (coreFoundation::isCFObjectRef(PointeeTy))
+ Template->addArg(AF, parm_idx, RetainedOutParameter);
+ } else if (pd->hasAttr<CFReturnsNotRetainedAttr>()) {
+ QualType PointeeTy = pd->getType()->getPointeeType();
+ if (!PointeeTy.isNull())
+ if (coreFoundation::isCFObjectRef(PointeeTy))
+ Template->addArg(AF, parm_idx, UnretainedOutParameter);
+ }
+ }
+
+ QualType RetTy = FD->getReturnType();
+ if (Optional<RetEffect> RetE = getRetEffectFromAnnotations(RetTy, FD))
+ Template->setRetEffect(*RetE);
+}
+
+void
+RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
+ const ObjCMethodDecl *MD) {
+ if (!MD)
+ return;
+
+ assert(Summ && "Must have a valid summary to add annotations to");
+ RetainSummaryTemplate Template(Summ, *this);
+
+ // Effects on the receiver.
+ if (MD->hasAttr<NSConsumesSelfAttr>())
+ Template->setReceiverEffect(DecRefMsg);
+
+ // Effects on the parameters.
+ unsigned parm_idx = 0;
+ for (ObjCMethodDecl::param_const_iterator
+ pi=MD->param_begin(), pe=MD->param_end();
+ pi != pe; ++pi, ++parm_idx) {
+ const ParmVarDecl *pd = *pi;
+ if (pd->hasAttr<NSConsumedAttr>())
+ Template->addArg(AF, parm_idx, DecRefMsg);
+ else if (pd->hasAttr<CFConsumedAttr>()) {
+ Template->addArg(AF, parm_idx, DecRef);
+ } else if (pd->hasAttr<CFReturnsRetainedAttr>()) {
+ QualType PointeeTy = pd->getType()->getPointeeType();
+ if (!PointeeTy.isNull())
+ if (coreFoundation::isCFObjectRef(PointeeTy))
+ Template->addArg(AF, parm_idx, RetainedOutParameter);
+ } else if (pd->hasAttr<CFReturnsNotRetainedAttr>()) {
+ QualType PointeeTy = pd->getType()->getPointeeType();
+ if (!PointeeTy.isNull())
+ if (coreFoundation::isCFObjectRef(PointeeTy))
+ Template->addArg(AF, parm_idx, UnretainedOutParameter);
+ }
+ }
+
+ QualType RetTy = MD->getReturnType();
+ if (Optional<RetEffect> RetE = getRetEffectFromAnnotations(RetTy, MD))
+ Template->setRetEffect(*RetE);
+}
+
+const RetainSummary *
+RetainSummaryManager::getStandardMethodSummary(const ObjCMethodDecl *MD,
+ Selector S, QualType RetTy) {
+ // Any special effects?
+ ArgEffect ReceiverEff = DoNothing;
+ RetEffect ResultEff = RetEffect::MakeNoRet();
+
+ // Check the method family, and apply any default annotations.
+ switch (MD ? MD->getMethodFamily() : S.getMethodFamily()) {
+ case OMF_None:
+ case OMF_initialize:
+ case OMF_performSelector:
+ // Assume all Objective-C methods follow Cocoa Memory Management rules.
+ // FIXME: Does the non-threaded performSelector family really belong here?
+ // The selector could be, say, @selector(copy).
+ if (cocoa::isCocoaObjectRef(RetTy))
+ ResultEff = RetEffect::MakeNotOwned(RetEffect::ObjC);
+ else if (coreFoundation::isCFObjectRef(RetTy)) {
+ // ObjCMethodDecl currently doesn't consider CF objects as valid return
+ // values for alloc, new, copy, or mutableCopy, so we have to
+ // double-check with the selector. This is ugly, but there aren't that
+ // many Objective-C methods that return CF objects, right?
+ if (MD) {
+ switch (S.getMethodFamily()) {
+ case OMF_alloc:
+ case OMF_new:
+ case OMF_copy:
+ case OMF_mutableCopy:
+ ResultEff = RetEffect::MakeOwned(RetEffect::CF, true);
+ break;
+ default:
+ ResultEff = RetEffect::MakeNotOwned(RetEffect::CF);
+ break;
+ }
+ } else {
+ ResultEff = RetEffect::MakeNotOwned(RetEffect::CF);
+ }
+ }
+ break;
+ case OMF_init:
+ ResultEff = ObjCInitRetE;
+ ReceiverEff = DecRefMsg;
+ break;
+ case OMF_alloc:
+ case OMF_new:
+ case OMF_copy:
+ case OMF_mutableCopy:
+ if (cocoa::isCocoaObjectRef(RetTy))
+ ResultEff = ObjCAllocRetE;
+ else if (coreFoundation::isCFObjectRef(RetTy))
+ ResultEff = RetEffect::MakeOwned(RetEffect::CF, true);
+ break;
+ case OMF_autorelease:
+ ReceiverEff = Autorelease;
+ break;
+ case OMF_retain:
+ ReceiverEff = IncRefMsg;
+ break;
+ case OMF_release:
+ ReceiverEff = DecRefMsg;
+ break;
+ case OMF_dealloc:
+ ReceiverEff = Dealloc;
+ break;
+ case OMF_self:
+ // -self is handled specially by the ExprEngine to propagate the receiver.
+ break;
+ case OMF_retainCount:
+ case OMF_finalize:
+ // These methods don't return objects.
+ break;
+ }
+
+ // If one of the arguments in the selector has the keyword 'delegate' we
+ // should stop tracking the reference count for the receiver. This is
+ // because the reference count is quite possibly handled by a delegate
+ // method.
+ if (S.isKeywordSelector()) {
+ for (unsigned i = 0, e = S.getNumArgs(); i != e; ++i) {
+ StringRef Slot = S.getNameForSlot(i);
+ if (Slot.substr(Slot.size() - 8).equals_lower("delegate")) {
+ if (ResultEff == ObjCInitRetE)
+ ResultEff = RetEffect::MakeNoRetHard();
+ else
+ ReceiverEff = StopTrackingHard;
+ }
+ }
+ }
+
+ if (ScratchArgs.isEmpty() && ReceiverEff == DoNothing &&
+ ResultEff.getKind() == RetEffect::NoRet)
+ return getDefaultSummary();
+
+ return getPersistentSummary(ResultEff, ReceiverEff, MayEscape);
+}
+
+const RetainSummary *
+RetainSummaryManager::getInstanceMethodSummary(const ObjCMethodCall &Msg,
+ ProgramStateRef State) {
+ const ObjCInterfaceDecl *ReceiverClass = nullptr;
+
+ // We do better tracking of the type of the object than the core ExprEngine.
+ // See if we have its type in our private state.
+ // FIXME: Eventually replace the use of state->get<RefBindings> with
+ // a generic API for reasoning about the Objective-C types of symbolic
+ // objects.
+ SVal ReceiverV = Msg.getReceiverSVal();
+ if (SymbolRef Sym = ReceiverV.getAsLocSymbol())
+ if (const RefVal *T = getRefBinding(State, Sym))
+ if (const ObjCObjectPointerType *PT =
+ T->getType()->getAs<ObjCObjectPointerType>())
+ ReceiverClass = PT->getInterfaceDecl();
+
+ // If we don't know what kind of object this is, fall back to its static type.
+ if (!ReceiverClass)
+ ReceiverClass = Msg.getReceiverInterface();
+
+ // FIXME: The receiver could be a reference to a class, meaning that
+ // we should use the class method.
+ // id x = [NSObject class];
+ // [x performSelector:... withObject:... afterDelay:...];
+ Selector S = Msg.getSelector();
+ const ObjCMethodDecl *Method = Msg.getDecl();
+ if (!Method && ReceiverClass)
+ Method = ReceiverClass->getInstanceMethod(S);
+
+ return getMethodSummary(S, ReceiverClass, Method, Msg.getResultType(),
+ ObjCMethodSummaries);
+}
+
+const RetainSummary *
+RetainSummaryManager::getMethodSummary(Selector S, const ObjCInterfaceDecl *ID,
+ const ObjCMethodDecl *MD, QualType RetTy,
+ ObjCMethodSummariesTy &CachedSummaries) {
+
+ // Look up a summary in our summary cache.
+ const RetainSummary *Summ = CachedSummaries.find(ID, S);
+
+ if (!Summ) {
+ Summ = getStandardMethodSummary(MD, S, RetTy);
+
+ // Annotations override defaults.
+ updateSummaryFromAnnotations(Summ, MD);
+
+ // Memoize the summary.
+ CachedSummaries[ObjCSummaryKey(ID, S)] = Summ;
+ }
+
+ return Summ;
+}
+
+void RetainSummaryManager::InitializeClassMethodSummaries() {
+ assert(ScratchArgs.isEmpty());
+ // Create the [NSAssertionHandler currentHander] summary.
+ addClassMethSummary("NSAssertionHandler", "currentHandler",
+ getPersistentSummary(RetEffect::MakeNotOwned(RetEffect::ObjC)));
+
+ // Create the [NSAutoreleasePool addObject:] summary.
+ ScratchArgs = AF.add(ScratchArgs, 0, Autorelease);
+ addClassMethSummary("NSAutoreleasePool", "addObject",
+ getPersistentSummary(RetEffect::MakeNoRet(),
+ DoNothing, Autorelease));
+}
+
+void RetainSummaryManager::InitializeMethodSummaries() {
+
+ assert (ScratchArgs.isEmpty());
+
+ // Create the "init" selector. It just acts as a pass-through for the
+ // receiver.
+ const RetainSummary *InitSumm = getPersistentSummary(ObjCInitRetE, DecRefMsg);
+ addNSObjectMethSummary(GetNullarySelector("init", Ctx), InitSumm);
+
+ // awakeAfterUsingCoder: behaves basically like an 'init' method. It
+ // claims the receiver and returns a retained object.
+ addNSObjectMethSummary(GetUnarySelector("awakeAfterUsingCoder", Ctx),
+ InitSumm);
+
+ // The next methods are allocators.
+ const RetainSummary *AllocSumm = getPersistentSummary(ObjCAllocRetE);
+ const RetainSummary *CFAllocSumm =
+ getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true));
+
+ // Create the "retain" selector.
+ RetEffect NoRet = RetEffect::MakeNoRet();
+ const RetainSummary *Summ = getPersistentSummary(NoRet, IncRefMsg);
+ addNSObjectMethSummary(GetNullarySelector("retain", Ctx), Summ);
+
+ // Create the "release" selector.
+ Summ = getPersistentSummary(NoRet, DecRefMsg);
+ addNSObjectMethSummary(GetNullarySelector("release", Ctx), Summ);
+
+ // Create the -dealloc summary.
+ Summ = getPersistentSummary(NoRet, Dealloc);
+ addNSObjectMethSummary(GetNullarySelector("dealloc", Ctx), Summ);
+
+ // Create the "autorelease" selector.
+ Summ = getPersistentSummary(NoRet, Autorelease);
+ addNSObjectMethSummary(GetNullarySelector("autorelease", Ctx), Summ);
+
+ // For NSWindow, allocated objects are (initially) self-owned.
+ // FIXME: For now we opt for false negatives with NSWindow, as these objects
+ // self-own themselves. However, they only do this once they are displayed.
+ // Thus, we need to track an NSWindow's display status.
+ // This is tracked in <rdar://problem/6062711>.
+ // See also http://llvm.org/bugs/show_bug.cgi?id=3714.
+ const RetainSummary *NoTrackYet = getPersistentSummary(RetEffect::MakeNoRet(),
+ StopTracking,
+ StopTracking);
+
+ addClassMethSummary("NSWindow", "alloc", NoTrackYet);
+
+ // For NSPanel (which subclasses NSWindow), allocated objects are not
+ // self-owned.
+ // FIXME: For now we don't track NSPanels. object for the same reason
+ // as for NSWindow objects.
+ addClassMethSummary("NSPanel", "alloc", NoTrackYet);
+
+ // For NSNull, objects returned by +null are singletons that ignore
+ // retain/release semantics. Just don't track them.
+ // <rdar://problem/12858915>
+ addClassMethSummary("NSNull", "null", NoTrackYet);
+
+ // Don't track allocated autorelease pools, as it is okay to prematurely
+ // exit a method.
+ addClassMethSummary("NSAutoreleasePool", "alloc", NoTrackYet);
+ addClassMethSummary("NSAutoreleasePool", "allocWithZone", NoTrackYet, false);
+ addClassMethSummary("NSAutoreleasePool", "new", NoTrackYet);
+
+ // Create summaries QCRenderer/QCView -createSnapShotImageOfType:
+ addInstMethSummary("QCRenderer", AllocSumm,
+ "createSnapshotImageOfType", nullptr);
+ addInstMethSummary("QCView", AllocSumm,
+ "createSnapshotImageOfType", nullptr);
+
+ // Create summaries for CIContext, 'createCGImage' and
+ // 'createCGLayerWithSize'. These objects are CF objects, and are not
+ // automatically garbage collected.
+ addInstMethSummary("CIContext", CFAllocSumm,
+ "createCGImage", "fromRect", nullptr);
+ addInstMethSummary("CIContext", CFAllocSumm, "createCGImage", "fromRect",
+ "format", "colorSpace", nullptr);
+ addInstMethSummary("CIContext", CFAllocSumm, "createCGLayerWithSize", "info",
+ nullptr);
+}
+
+//===----------------------------------------------------------------------===//
+// Error reporting.
+//===----------------------------------------------------------------------===//
+namespace {
+ typedef llvm::DenseMap<const ExplodedNode *, const RetainSummary *>
+ SummaryLogTy;
+
+ //===-------------===//
+ // Bug Descriptions. //
+ //===-------------===//
+
+ class CFRefBug : public BugType {
+ protected:
+ CFRefBug(const CheckerBase *checker, StringRef name)
+ : BugType(checker, name, categories::MemoryCoreFoundationObjectiveC) {}
+
+ public:
+
+ // FIXME: Eventually remove.
+ virtual const char *getDescription() const = 0;
+
+ virtual bool isLeak() const { return false; }
+ };
+
+ class UseAfterRelease : public CFRefBug {
+ public:
+ UseAfterRelease(const CheckerBase *checker)
+ : CFRefBug(checker, "Use-after-release") {}
+
+ const char *getDescription() const override {
+ return "Reference-counted object is used after it is released";
+ }
+ };
+
+ class BadRelease : public CFRefBug {
+ public:
+ BadRelease(const CheckerBase *checker) : CFRefBug(checker, "Bad release") {}
+
+ const char *getDescription() const override {
+ return "Incorrect decrement of the reference count of an object that is "
+ "not owned at this point by the caller";
+ }
+ };
+
+ class DeallocGC : public CFRefBug {
+ public:
+ DeallocGC(const CheckerBase *checker)
+ : CFRefBug(checker, "-dealloc called while using garbage collection") {}
+
+ const char *getDescription() const override {
+ return "-dealloc called while using garbage collection";
+ }
+ };
+
+ class DeallocNotOwned : public CFRefBug {
+ public:
+ DeallocNotOwned(const CheckerBase *checker)
+ : CFRefBug(checker, "-dealloc sent to non-exclusively owned object") {}
+
+ const char *getDescription() const override {
+ return "-dealloc sent to object that may be referenced elsewhere";
+ }
+ };
+
+ class OverAutorelease : public CFRefBug {
+ public:
+ OverAutorelease(const CheckerBase *checker)
+ : CFRefBug(checker, "Object autoreleased too many times") {}
+
+ const char *getDescription() const override {
+ return "Object autoreleased too many times";
+ }
+ };
+
+ class ReturnedNotOwnedForOwned : public CFRefBug {
+ public:
+ ReturnedNotOwnedForOwned(const CheckerBase *checker)
+ : CFRefBug(checker, "Method should return an owned object") {}
+
+ const char *getDescription() const override {
+ return "Object with a +0 retain count returned to caller where a +1 "
+ "(owning) retain count is expected";
+ }
+ };
+
+ class Leak : public CFRefBug {
+ public:
+ Leak(const CheckerBase *checker, StringRef name) : CFRefBug(checker, name) {
+ // Leaks should not be reported if they are post-dominated by a sink.
+ setSuppressOnSink(true);
+ }
+
+ const char *getDescription() const override { return ""; }
+
+ bool isLeak() const override { return true; }
+ };
+
+ //===---------===//
+ // Bug Reports. //
+ //===---------===//
+
+ class CFRefReportVisitor : public BugReporterVisitorImpl<CFRefReportVisitor> {
+ protected:
+ SymbolRef Sym;
+ const SummaryLogTy &SummaryLog;
+ bool GCEnabled;
+
+ public:
+ CFRefReportVisitor(SymbolRef sym, bool gcEnabled, const SummaryLogTy &log)
+ : Sym(sym), SummaryLog(log), GCEnabled(gcEnabled) {}
+
+ void Profile(llvm::FoldingSetNodeID &ID) const override {
+ static int x = 0;
+ ID.AddPointer(&x);
+ ID.AddPointer(Sym);
+ }
+
+ PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) override;
+
+ std::unique_ptr<PathDiagnosticPiece> getEndPath(BugReporterContext &BRC,
+ const ExplodedNode *N,
+ BugReport &BR) override;
+ };
+
+ class CFRefLeakReportVisitor : public CFRefReportVisitor {
+ public:
+ CFRefLeakReportVisitor(SymbolRef sym, bool GCEnabled,
+ const SummaryLogTy &log)
+ : CFRefReportVisitor(sym, GCEnabled, log) {}
+
+ std::unique_ptr<PathDiagnosticPiece> getEndPath(BugReporterContext &BRC,
+ const ExplodedNode *N,
+ BugReport &BR) override;
+
+ std::unique_ptr<BugReporterVisitor> clone() const override {
+ // The curiously-recurring template pattern only works for one level of
+ // subclassing. Rather than make a new template base for
+ // CFRefReportVisitor, we simply override clone() to do the right thing.
+ // This could be trouble someday if BugReporterVisitorImpl is ever
+ // used for something else besides a convenient implementation of clone().
+ return llvm::make_unique<CFRefLeakReportVisitor>(*this);
+ }
+ };
+
+ class CFRefReport : public BugReport {
+ void addGCModeDescription(const LangOptions &LOpts, bool GCEnabled);
+
+ public:
+ CFRefReport(CFRefBug &D, const LangOptions &LOpts, bool GCEnabled,
+ const SummaryLogTy &Log, ExplodedNode *n, SymbolRef sym,
+ bool registerVisitor = true)
+ : BugReport(D, D.getDescription(), n) {
+ if (registerVisitor)
+ addVisitor(llvm::make_unique<CFRefReportVisitor>(sym, GCEnabled, Log));
+ addGCModeDescription(LOpts, GCEnabled);
+ }
+
+ CFRefReport(CFRefBug &D, const LangOptions &LOpts, bool GCEnabled,
+ const SummaryLogTy &Log, ExplodedNode *n, SymbolRef sym,
+ StringRef endText)
+ : BugReport(D, D.getDescription(), endText, n) {
+ addVisitor(llvm::make_unique<CFRefReportVisitor>(sym, GCEnabled, Log));
+ addGCModeDescription(LOpts, GCEnabled);
+ }
+
+ llvm::iterator_range<ranges_iterator> getRanges() override {
+ const CFRefBug& BugTy = static_cast<CFRefBug&>(getBugType());
+ if (!BugTy.isLeak())
+ return BugReport::getRanges();
+ return llvm::make_range(ranges_iterator(), ranges_iterator());
+ }
+ };
+
+ class CFRefLeakReport : public CFRefReport {
+ const MemRegion* AllocBinding;
+ public:
+ CFRefLeakReport(CFRefBug &D, const LangOptions &LOpts, bool GCEnabled,
+ const SummaryLogTy &Log, ExplodedNode *n, SymbolRef sym,
+ CheckerContext &Ctx,
+ bool IncludeAllocationLine);
+
+ PathDiagnosticLocation getLocation(const SourceManager &SM) const override {
+ assert(Location.isValid());
+ return Location;
+ }
+ };
+} // end anonymous namespace
+
+void CFRefReport::addGCModeDescription(const LangOptions &LOpts,
+ bool GCEnabled) {
+ const char *GCModeDescription = nullptr;
+
+ switch (LOpts.getGC()) {
+ case LangOptions::GCOnly:
+ assert(GCEnabled);
+ GCModeDescription = "Code is compiled to only use garbage collection";
+ break;
+
+ case LangOptions::NonGC:
+ assert(!GCEnabled);
+ GCModeDescription = "Code is compiled to use reference counts";
+ break;
+
+ case LangOptions::HybridGC:
+ if (GCEnabled) {
+ GCModeDescription = "Code is compiled to use either garbage collection "
+ "(GC) or reference counts (non-GC). The bug occurs "
+ "with GC enabled";
+ break;
+ } else {
+ GCModeDescription = "Code is compiled to use either garbage collection "
+ "(GC) or reference counts (non-GC). The bug occurs "
+ "in non-GC mode";
+ break;
+ }
+ }
+
+ assert(GCModeDescription && "invalid/unknown GC mode");
+ addExtraText(GCModeDescription);
+}
+
+static bool isNumericLiteralExpression(const Expr *E) {
+ // FIXME: This set of cases was copied from SemaExprObjC.
+ return isa<IntegerLiteral>(E) ||
+ isa<CharacterLiteral>(E) ||
+ isa<FloatingLiteral>(E) ||
+ isa<ObjCBoolLiteralExpr>(E) ||
+ isa<CXXBoolLiteralExpr>(E);
+}
+
+/// Returns true if this stack frame is for an Objective-C method that is a
+/// property getter or setter whose body has been synthesized by the analyzer.
+static bool isSynthesizedAccessor(const StackFrameContext *SFC) {
+ auto Method = dyn_cast_or_null<ObjCMethodDecl>(SFC->getDecl());
+ if (!Method || !Method->isPropertyAccessor())
+ return false;
+
+ return SFC->getAnalysisDeclContext()->isBodyAutosynthesized();
+}
+
+PathDiagnosticPiece *CFRefReportVisitor::VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) {
+ // FIXME: We will eventually need to handle non-statement-based events
+ // (__attribute__((cleanup))).
+ if (!N->getLocation().getAs<StmtPoint>())
+ return nullptr;
+
+ // Check if the type state has changed.
+ ProgramStateRef PrevSt = PrevN->getState();
+ ProgramStateRef CurrSt = N->getState();
+ const LocationContext *LCtx = N->getLocationContext();
+
+ const RefVal* CurrT = getRefBinding(CurrSt, Sym);
+ if (!CurrT) return nullptr;
+
+ const RefVal &CurrV = *CurrT;
+ const RefVal *PrevT = getRefBinding(PrevSt, Sym);
+
+ // Create a string buffer to constain all the useful things we want
+ // to tell the user.
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ // This is the allocation site since the previous node had no bindings
+ // for this symbol.
+ if (!PrevT) {
+ const Stmt *S = N->getLocation().castAs<StmtPoint>().getStmt();
+
+ if (isa<ObjCIvarRefExpr>(S) &&
+ isSynthesizedAccessor(LCtx->getCurrentStackFrame())) {
+ S = LCtx->getCurrentStackFrame()->getCallSite();
+ }
+
+ if (isa<ObjCArrayLiteral>(S)) {
+ os << "NSArray literal is an object with a +0 retain count";
+ }
+ else if (isa<ObjCDictionaryLiteral>(S)) {
+ os << "NSDictionary literal is an object with a +0 retain count";
+ }
+ else if (const ObjCBoxedExpr *BL = dyn_cast<ObjCBoxedExpr>(S)) {
+ if (isNumericLiteralExpression(BL->getSubExpr()))
+ os << "NSNumber literal is an object with a +0 retain count";
+ else {
+ const ObjCInterfaceDecl *BoxClass = nullptr;
+ if (const ObjCMethodDecl *Method = BL->getBoxingMethod())
+ BoxClass = Method->getClassInterface();
+
+ // We should always be able to find the boxing class interface,
+ // but consider this future-proofing.
+ if (BoxClass)
+ os << *BoxClass << " b";
+ else
+ os << "B";
+
+ os << "oxed expression produces an object with a +0 retain count";
+ }
+ }
+ else if (isa<ObjCIvarRefExpr>(S)) {
+ os << "Object loaded from instance variable";
+ }
+ else {
+ if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
+ // Get the name of the callee (if it is available).
+ SVal X = CurrSt->getSValAsScalarOrLoc(CE->getCallee(), LCtx);
+ if (const FunctionDecl *FD = X.getAsFunctionDecl())
+ os << "Call to function '" << *FD << '\'';
+ else
+ os << "function call";
+ }
+ else {
+ assert(isa<ObjCMessageExpr>(S));
+ CallEventManager &Mgr = CurrSt->getStateManager().getCallEventManager();
+ CallEventRef<ObjCMethodCall> Call
+ = Mgr.getObjCMethodCall(cast<ObjCMessageExpr>(S), CurrSt, LCtx);
+
+ switch (Call->getMessageKind()) {
+ case OCM_Message:
+ os << "Method";
+ break;
+ case OCM_PropertyAccess:
+ os << "Property";
+ break;
+ case OCM_Subscript:
+ os << "Subscript";
+ break;
+ }
+ }
+
+ if (CurrV.getObjKind() == RetEffect::CF) {
+ os << " returns a Core Foundation object with a ";
+ }
+ else {
+ assert (CurrV.getObjKind() == RetEffect::ObjC);
+ os << " returns an Objective-C object with a ";
+ }
+
+ if (CurrV.isOwned()) {
+ os << "+1 retain count";
+
+ if (GCEnabled) {
+ assert(CurrV.getObjKind() == RetEffect::CF);
+ os << ". "
+ "Core Foundation objects are not automatically garbage collected.";
+ }
+ }
+ else {
+ assert (CurrV.isNotOwned());
+ os << "+0 retain count";
+ }
+ }
+
+ PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
+ N->getLocationContext());
+ return new PathDiagnosticEventPiece(Pos, os.str());
+ }
+
+ // Gather up the effects that were performed on the object at this
+ // program point
+ SmallVector<ArgEffect, 2> AEffects;
+
+ const ExplodedNode *OrigNode = BRC.getNodeResolver().getOriginalNode(N);
+ if (const RetainSummary *Summ = SummaryLog.lookup(OrigNode)) {
+ // We only have summaries attached to nodes after evaluating CallExpr and
+ // ObjCMessageExprs.
+ const Stmt *S = N->getLocation().castAs<StmtPoint>().getStmt();
+
+ if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
+ // Iterate through the parameter expressions and see if the symbol
+ // was ever passed as an argument.
+ unsigned i = 0;
+
+ for (CallExpr::const_arg_iterator AI=CE->arg_begin(), AE=CE->arg_end();
+ AI!=AE; ++AI, ++i) {
+
+ // Retrieve the value of the argument. Is it the symbol
+ // we are interested in?
+ if (CurrSt->getSValAsScalarOrLoc(*AI, LCtx).getAsLocSymbol() != Sym)
+ continue;
+
+ // We have an argument. Get the effect!
+ AEffects.push_back(Summ->getArg(i));
+ }
+ }
+ else if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S)) {
+ if (const Expr *receiver = ME->getInstanceReceiver())
+ if (CurrSt->getSValAsScalarOrLoc(receiver, LCtx)
+ .getAsLocSymbol() == Sym) {
+ // The symbol we are tracking is the receiver.
+ AEffects.push_back(Summ->getReceiverEffect());
+ }
+ }
+ }
+
+ do {
+ // Get the previous type state.
+ RefVal PrevV = *PrevT;
+
+ // Specially handle -dealloc.
+ if (!GCEnabled && std::find(AEffects.begin(), AEffects.end(), Dealloc) !=
+ AEffects.end()) {
+ // Determine if the object's reference count was pushed to zero.
+ assert(!PrevV.hasSameState(CurrV) && "The state should have changed.");
+ // We may not have transitioned to 'release' if we hit an error.
+ // This case is handled elsewhere.
+ if (CurrV.getKind() == RefVal::Released) {
+ assert(CurrV.getCombinedCounts() == 0);
+ os << "Object released by directly sending the '-dealloc' message";
+ break;
+ }
+ }
+
+ // Specially handle CFMakeCollectable and friends.
+ if (std::find(AEffects.begin(), AEffects.end(), MakeCollectable) !=
+ AEffects.end()) {
+ // Get the name of the function.
+ const Stmt *S = N->getLocation().castAs<StmtPoint>().getStmt();
+ SVal X =
+ CurrSt->getSValAsScalarOrLoc(cast<CallExpr>(S)->getCallee(), LCtx);
+ const FunctionDecl *FD = X.getAsFunctionDecl();
+
+ if (GCEnabled) {
+ // Determine if the object's reference count was pushed to zero.
+ assert(!PrevV.hasSameState(CurrV) && "The state should have changed.");
+
+ os << "In GC mode a call to '" << *FD
+ << "' decrements an object's retain count and registers the "
+ "object with the garbage collector. ";
+
+ if (CurrV.getKind() == RefVal::Released) {
+ assert(CurrV.getCount() == 0);
+ os << "Since it now has a 0 retain count the object can be "
+ "automatically collected by the garbage collector.";
+ }
+ else
+ os << "An object must have a 0 retain count to be garbage collected. "
+ "After this call its retain count is +" << CurrV.getCount()
+ << '.';
+ }
+ else
+ os << "When GC is not enabled a call to '" << *FD
+ << "' has no effect on its argument.";
+
+ // Nothing more to say.
+ break;
+ }
+
+ // Determine if the typestate has changed.
+ if (!PrevV.hasSameState(CurrV))
+ switch (CurrV.getKind()) {
+ case RefVal::Owned:
+ case RefVal::NotOwned:
+ if (PrevV.getCount() == CurrV.getCount()) {
+ // Did an autorelease message get sent?
+ if (PrevV.getAutoreleaseCount() == CurrV.getAutoreleaseCount())
+ return nullptr;
+
+ assert(PrevV.getAutoreleaseCount() < CurrV.getAutoreleaseCount());
+ os << "Object autoreleased";
+ break;
+ }
+
+ if (PrevV.getCount() > CurrV.getCount())
+ os << "Reference count decremented.";
+ else
+ os << "Reference count incremented.";
+
+ if (unsigned Count = CurrV.getCount())
+ os << " The object now has a +" << Count << " retain count.";
+
+ if (PrevV.getKind() == RefVal::Released) {
+ assert(GCEnabled && CurrV.getCount() > 0);
+ os << " The object is not eligible for garbage collection until "
+ "the retain count reaches 0 again.";
+ }
+
+ break;
+
+ case RefVal::Released:
+ if (CurrV.getIvarAccessHistory() ==
+ RefVal::IvarAccessHistory::ReleasedAfterDirectAccess &&
+ CurrV.getIvarAccessHistory() != PrevV.getIvarAccessHistory()) {
+ os << "Strong instance variable relinquished. ";
+ }
+ os << "Object released.";
+ break;
+
+ case RefVal::ReturnedOwned:
+ // Autoreleases can be applied after marking a node ReturnedOwned.
+ if (CurrV.getAutoreleaseCount())
+ return nullptr;
+
+ os << "Object returned to caller as an owning reference (single "
+ "retain count transferred to caller)";
+ break;
+
+ case RefVal::ReturnedNotOwned:
+ os << "Object returned to caller with a +0 retain count";
+ break;
+
+ default:
+ return nullptr;
+ }
+
+ // Emit any remaining diagnostics for the argument effects (if any).
+ for (SmallVectorImpl<ArgEffect>::iterator I=AEffects.begin(),
+ E=AEffects.end(); I != E; ++I) {
+
+ // A bunch of things have alternate behavior under GC.
+ if (GCEnabled)
+ switch (*I) {
+ default: break;
+ case Autorelease:
+ os << "In GC mode an 'autorelease' has no effect.";
+ continue;
+ case IncRefMsg:
+ os << "In GC mode the 'retain' message has no effect.";
+ continue;
+ case DecRefMsg:
+ os << "In GC mode the 'release' message has no effect.";
+ continue;
+ }
+ }
+ } while (0);
+
+ if (os.str().empty())
+ return nullptr; // We have nothing to say!
+
+ const Stmt *S = N->getLocation().castAs<StmtPoint>().getStmt();
+ PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
+ N->getLocationContext());
+ PathDiagnosticPiece *P = new PathDiagnosticEventPiece(Pos, os.str());
+
+ // Add the range by scanning the children of the statement for any bindings
+ // to Sym.
+ for (const Stmt *Child : S->children())
+ if (const Expr *Exp = dyn_cast_or_null<Expr>(Child))
+ if (CurrSt->getSValAsScalarOrLoc(Exp, LCtx).getAsLocSymbol() == Sym) {
+ P->addRange(Exp->getSourceRange());
+ break;
+ }
+
+ return P;
+}
+
+namespace {
+// Find the first node in the current function context that referred to the
+// tracked symbol and the memory location that value was stored to. Note, the
+// value is only reported if the allocation occurred in the same function as
+// the leak. The function can also return a location context, which should be
+// treated as interesting.
+struct AllocationInfo {
+ const ExplodedNode* N;
+ const MemRegion *R;
+ const LocationContext *InterestingMethodContext;
+ AllocationInfo(const ExplodedNode *InN,
+ const MemRegion *InR,
+ const LocationContext *InInterestingMethodContext) :
+ N(InN), R(InR), InterestingMethodContext(InInterestingMethodContext) {}
+};
+} // end anonymous namespace
+
+static AllocationInfo
+GetAllocationSite(ProgramStateManager& StateMgr, const ExplodedNode *N,
+ SymbolRef Sym) {
+ const ExplodedNode *AllocationNode = N;
+ const ExplodedNode *AllocationNodeInCurrentOrParentContext = N;
+ const MemRegion *FirstBinding = nullptr;
+ const LocationContext *LeakContext = N->getLocationContext();
+
+ // The location context of the init method called on the leaked object, if
+ // available.
+ const LocationContext *InitMethodContext = nullptr;
+
+ while (N) {
+ ProgramStateRef St = N->getState();
+ const LocationContext *NContext = N->getLocationContext();
+
+ if (!getRefBinding(St, Sym))
+ break;
+
+ StoreManager::FindUniqueBinding FB(Sym);
+ StateMgr.iterBindings(St, FB);
+
+ if (FB) {
+ const MemRegion *R = FB.getRegion();
+ const VarRegion *VR = R->getBaseRegion()->getAs<VarRegion>();
+ // Do not show local variables belonging to a function other than
+ // where the error is reported.
+ if (!VR || VR->getStackFrame() == LeakContext->getCurrentStackFrame())
+ FirstBinding = R;
+ }
+
+ // AllocationNode is the last node in which the symbol was tracked.
+ AllocationNode = N;
+
+ // AllocationNodeInCurrentContext, is the last node in the current or
+ // parent context in which the symbol was tracked.
+ //
+ // Note that the allocation site might be in the parent conext. For example,
+ // the case where an allocation happens in a block that captures a reference
+ // to it and that reference is overwritten/dropped by another call to
+ // the block.
+ if (NContext == LeakContext || NContext->isParentOf(LeakContext))
+ AllocationNodeInCurrentOrParentContext = N;
+
+ // Find the last init that was called on the given symbol and store the
+ // init method's location context.
+ if (!InitMethodContext)
+ if (Optional<CallEnter> CEP = N->getLocation().getAs<CallEnter>()) {
+ const Stmt *CE = CEP->getCallExpr();
+ if (const ObjCMessageExpr *ME = dyn_cast_or_null<ObjCMessageExpr>(CE)) {
+ const Stmt *RecExpr = ME->getInstanceReceiver();
+ if (RecExpr) {
+ SVal RecV = St->getSVal(RecExpr, NContext);
+ if (ME->getMethodFamily() == OMF_init && RecV.getAsSymbol() == Sym)
+ InitMethodContext = CEP->getCalleeContext();
+ }
+ }
+ }
+
+ N = N->pred_empty() ? nullptr : *(N->pred_begin());
+ }
+
+ // If we are reporting a leak of the object that was allocated with alloc,
+ // mark its init method as interesting.
+ const LocationContext *InterestingMethodContext = nullptr;
+ if (InitMethodContext) {
+ const ProgramPoint AllocPP = AllocationNode->getLocation();
+ if (Optional<StmtPoint> SP = AllocPP.getAs<StmtPoint>())
+ if (const ObjCMessageExpr *ME = SP->getStmtAs<ObjCMessageExpr>())
+ if (ME->getMethodFamily() == OMF_alloc)
+ InterestingMethodContext = InitMethodContext;
+ }
+
+ // If allocation happened in a function different from the leak node context,
+ // do not report the binding.
+ assert(N && "Could not find allocation node");
+ if (N->getLocationContext() != LeakContext) {
+ FirstBinding = nullptr;
+ }
+
+ return AllocationInfo(AllocationNodeInCurrentOrParentContext,
+ FirstBinding,
+ InterestingMethodContext);
+}
+
+std::unique_ptr<PathDiagnosticPiece>
+CFRefReportVisitor::getEndPath(BugReporterContext &BRC,
+ const ExplodedNode *EndN, BugReport &BR) {
+ BR.markInteresting(Sym);
+ return BugReporterVisitor::getDefaultEndPath(BRC, EndN, BR);
+}
+
+std::unique_ptr<PathDiagnosticPiece>
+CFRefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
+ const ExplodedNode *EndN, BugReport &BR) {
+
+ // Tell the BugReporterContext to report cases when the tracked symbol is
+ // assigned to different variables, etc.
+ BR.markInteresting(Sym);
+
+ // We are reporting a leak. Walk up the graph to get to the first node where
+ // the symbol appeared, and also get the first VarDecl that tracked object
+ // is stored to.
+ AllocationInfo AllocI =
+ GetAllocationSite(BRC.getStateManager(), EndN, Sym);
+
+ const MemRegion* FirstBinding = AllocI.R;
+ BR.markInteresting(AllocI.InterestingMethodContext);
+
+ SourceManager& SM = BRC.getSourceManager();
+
+ // Compute an actual location for the leak. Sometimes a leak doesn't
+ // occur at an actual statement (e.g., transition between blocks; end
+ // of function) so we need to walk the graph and compute a real location.
+ const ExplodedNode *LeakN = EndN;
+ PathDiagnosticLocation L = PathDiagnosticLocation::createEndOfPath(LeakN, SM);
+
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ os << "Object leaked: ";
+
+ if (FirstBinding) {
+ os << "object allocated and stored into '"
+ << FirstBinding->getString() << '\'';
+ }
+ else
+ os << "allocated object";
+
+ // Get the retain count.
+ const RefVal* RV = getRefBinding(EndN->getState(), Sym);
+ assert(RV);
+
+ if (RV->getKind() == RefVal::ErrorLeakReturned) {
+ // FIXME: Per comments in rdar://6320065, "create" only applies to CF
+ // objects. Only "copy", "alloc", "retain" and "new" transfer ownership
+ // to the caller for NS objects.
+ const Decl *D = &EndN->getCodeDecl();
+
+ os << (isa<ObjCMethodDecl>(D) ? " is returned from a method "
+ : " is returned from a function ");
+
+ if (D->hasAttr<CFReturnsNotRetainedAttr>())
+ os << "that is annotated as CF_RETURNS_NOT_RETAINED";
+ else if (D->hasAttr<NSReturnsNotRetainedAttr>())
+ os << "that is annotated as NS_RETURNS_NOT_RETAINED";
+ else {
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ os << "whose name ('" << MD->getSelector().getAsString()
+ << "') does not start with 'copy', 'mutableCopy', 'alloc' or 'new'."
+ " This violates the naming convention rules"
+ " given in the Memory Management Guide for Cocoa";
+ }
+ else {
+ const FunctionDecl *FD = cast<FunctionDecl>(D);
+ os << "whose name ('" << *FD
+ << "') does not contain 'Copy' or 'Create'. This violates the naming"
+ " convention rules given in the Memory Management Guide for Core"
+ " Foundation";
+ }
+ }
+ }
+ else if (RV->getKind() == RefVal::ErrorGCLeakReturned) {
+ const ObjCMethodDecl &MD = cast<ObjCMethodDecl>(EndN->getCodeDecl());
+ os << " and returned from method '" << MD.getSelector().getAsString()
+ << "' is potentially leaked when using garbage collection. Callers "
+ "of this method do not expect a returned object with a +1 retain "
+ "count since they expect the object to be managed by the garbage "
+ "collector";
+ }
+ else
+ os << " is not referenced later in this execution path and has a retain "
+ "count of +" << RV->getCount();
+
+ return llvm::make_unique<PathDiagnosticEventPiece>(L, os.str());
+}
+
+CFRefLeakReport::CFRefLeakReport(CFRefBug &D, const LangOptions &LOpts,
+ bool GCEnabled, const SummaryLogTy &Log,
+ ExplodedNode *n, SymbolRef sym,
+ CheckerContext &Ctx,
+ bool IncludeAllocationLine)
+ : CFRefReport(D, LOpts, GCEnabled, Log, n, sym, false) {
+
+ // Most bug reports are cached at the location where they occurred.
+ // With leaks, we want to unique them by the location where they were
+ // allocated, and only report a single path. To do this, we need to find
+ // the allocation site of a piece of tracked memory, which we do via a
+ // call to GetAllocationSite. This will walk the ExplodedGraph backwards.
+ // Note that this is *not* the trimmed graph; we are guaranteed, however,
+ // that all ancestor nodes that represent the allocation site have the
+ // same SourceLocation.
+ const ExplodedNode *AllocNode = nullptr;
+
+ const SourceManager& SMgr = Ctx.getSourceManager();
+
+ AllocationInfo AllocI =
+ GetAllocationSite(Ctx.getStateManager(), getErrorNode(), sym);
+
+ AllocNode = AllocI.N;
+ AllocBinding = AllocI.R;
+ markInteresting(AllocI.InterestingMethodContext);
+
+ // Get the SourceLocation for the allocation site.
+ // FIXME: This will crash the analyzer if an allocation comes from an
+ // implicit call (ex: a destructor call).
+ // (Currently there are no such allocations in Cocoa, though.)
+ const Stmt *AllocStmt = nullptr;
+ ProgramPoint P = AllocNode->getLocation();
+ if (Optional<CallExitEnd> Exit = P.getAs<CallExitEnd>())
+ AllocStmt = Exit->getCalleeContext()->getCallSite();
+ else
+ AllocStmt = P.castAs<PostStmt>().getStmt();
+ assert(AllocStmt && "Cannot find allocation statement");
+
+ PathDiagnosticLocation AllocLocation =
+ PathDiagnosticLocation::createBegin(AllocStmt, SMgr,
+ AllocNode->getLocationContext());
+ Location = AllocLocation;
+
+ // Set uniqieing info, which will be used for unique the bug reports. The
+ // leaks should be uniqued on the allocation site.
+ UniqueingLocation = AllocLocation;
+ UniqueingDecl = AllocNode->getLocationContext()->getDecl();
+
+ // Fill in the description of the bug.
+ Description.clear();
+ llvm::raw_string_ostream os(Description);
+ os << "Potential leak ";
+ if (GCEnabled)
+ os << "(when using garbage collection) ";
+ os << "of an object";
+
+ if (AllocBinding) {
+ os << " stored into '" << AllocBinding->getString() << '\'';
+ if (IncludeAllocationLine) {
+ FullSourceLoc SL(AllocStmt->getLocStart(), Ctx.getSourceManager());
+ os << " (allocated on line " << SL.getSpellingLineNumber() << ")";
+ }
+ }
+
+ addVisitor(llvm::make_unique<CFRefLeakReportVisitor>(sym, GCEnabled, Log));
+}
+
+//===----------------------------------------------------------------------===//
+// Main checker logic.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class RetainCountChecker
+ : public Checker< check::Bind,
+ check::DeadSymbols,
+ check::EndAnalysis,
+ check::EndFunction,
+ check::PostStmt<BlockExpr>,
+ check::PostStmt<CastExpr>,
+ check::PostStmt<ObjCArrayLiteral>,
+ check::PostStmt<ObjCDictionaryLiteral>,
+ check::PostStmt<ObjCBoxedExpr>,
+ check::PostStmt<ObjCIvarRefExpr>,
+ check::PostCall,
+ check::PreStmt<ReturnStmt>,
+ check::RegionChanges,
+ eval::Assume,
+ eval::Call > {
+ mutable std::unique_ptr<CFRefBug> useAfterRelease, releaseNotOwned;
+ mutable std::unique_ptr<CFRefBug> deallocGC, deallocNotOwned;
+ mutable std::unique_ptr<CFRefBug> overAutorelease, returnNotOwnedForOwned;
+ mutable std::unique_ptr<CFRefBug> leakWithinFunction, leakAtReturn;
+ mutable std::unique_ptr<CFRefBug> leakWithinFunctionGC, leakAtReturnGC;
+
+ typedef llvm::DenseMap<SymbolRef, const CheckerProgramPointTag *> SymbolTagMap;
+
+ // This map is only used to ensure proper deletion of any allocated tags.
+ mutable SymbolTagMap DeadSymbolTags;
+
+ mutable std::unique_ptr<RetainSummaryManager> Summaries;
+ mutable std::unique_ptr<RetainSummaryManager> SummariesGC;
+ mutable SummaryLogTy SummaryLog;
+ mutable bool ShouldResetSummaryLog;
+
+ /// Optional setting to indicate if leak reports should include
+ /// the allocation line.
+ mutable bool IncludeAllocationLine;
+
+public:
+ RetainCountChecker(AnalyzerOptions &AO)
+ : ShouldResetSummaryLog(false),
+ IncludeAllocationLine(shouldIncludeAllocationSiteInLeakDiagnostics(AO)) {}
+
+ ~RetainCountChecker() override { DeleteContainerSeconds(DeadSymbolTags); }
+
+ void checkEndAnalysis(ExplodedGraph &G, BugReporter &BR,
+ ExprEngine &Eng) const {
+ // FIXME: This is a hack to make sure the summary log gets cleared between
+ // analyses of different code bodies.
+ //
+ // Why is this necessary? Because a checker's lifetime is tied to a
+ // translation unit, but an ExplodedGraph's lifetime is just a code body.
+ // Once in a blue moon, a new ExplodedNode will have the same address as an
+ // old one with an associated summary, and the bug report visitor gets very
+ // confused. (To make things worse, the summary lifetime is currently also
+ // tied to a code body, so we get a crash instead of incorrect results.)
+ //
+ // Why is this a bad solution? Because if the lifetime of the ExplodedGraph
+ // changes, things will start going wrong again. Really the lifetime of this
+ // log needs to be tied to either the specific nodes in it or the entire
+ // ExplodedGraph, not to a specific part of the code being analyzed.
+ //
+ // (Also, having stateful local data means that the same checker can't be
+ // used from multiple threads, but a lot of checkers have incorrect
+ // assumptions about that anyway. So that wasn't a priority at the time of
+ // this fix.)
+ //
+ // This happens at the end of analysis, but bug reports are emitted /after/
+ // this point. So we can't just clear the summary log now. Instead, we mark
+ // that the next time we access the summary log, it should be cleared.
+
+ // If we never reset the summary log during /this/ code body analysis,
+ // there were no new summaries. There might still have been summaries from
+ // the /last/ analysis, so clear them out to make sure the bug report
+ // visitors don't get confused.
+ if (ShouldResetSummaryLog)
+ SummaryLog.clear();
+
+ ShouldResetSummaryLog = !SummaryLog.empty();
+ }
+
+ CFRefBug *getLeakWithinFunctionBug(const LangOptions &LOpts,
+ bool GCEnabled) const {
+ if (GCEnabled) {
+ if (!leakWithinFunctionGC)
+ leakWithinFunctionGC.reset(new Leak(this, "Leak of object when using "
+ "garbage collection"));
+ return leakWithinFunctionGC.get();
+ } else {
+ if (!leakWithinFunction) {
+ if (LOpts.getGC() == LangOptions::HybridGC) {
+ leakWithinFunction.reset(new Leak(this,
+ "Leak of object when not using "
+ "garbage collection (GC) in "
+ "dual GC/non-GC code"));
+ } else {
+ leakWithinFunction.reset(new Leak(this, "Leak"));
+ }
+ }
+ return leakWithinFunction.get();
+ }
+ }
+
+ CFRefBug *getLeakAtReturnBug(const LangOptions &LOpts, bool GCEnabled) const {
+ if (GCEnabled) {
+ if (!leakAtReturnGC)
+ leakAtReturnGC.reset(new Leak(this,
+ "Leak of returned object when using "
+ "garbage collection"));
+ return leakAtReturnGC.get();
+ } else {
+ if (!leakAtReturn) {
+ if (LOpts.getGC() == LangOptions::HybridGC) {
+ leakAtReturn.reset(new Leak(this,
+ "Leak of returned object when not using "
+ "garbage collection (GC) in dual "
+ "GC/non-GC code"));
+ } else {
+ leakAtReturn.reset(new Leak(this, "Leak of returned object"));
+ }
+ }
+ return leakAtReturn.get();
+ }
+ }
+
+ RetainSummaryManager &getSummaryManager(ASTContext &Ctx,
+ bool GCEnabled) const {
+ // FIXME: We don't support ARC being turned on and off during one analysis.
+ // (nor, for that matter, do we support changing ASTContexts)
+ bool ARCEnabled = (bool)Ctx.getLangOpts().ObjCAutoRefCount;
+ if (GCEnabled) {
+ if (!SummariesGC)
+ SummariesGC.reset(new RetainSummaryManager(Ctx, true, ARCEnabled));
+ else
+ assert(SummariesGC->isARCEnabled() == ARCEnabled);
+ return *SummariesGC;
+ } else {
+ if (!Summaries)
+ Summaries.reset(new RetainSummaryManager(Ctx, false, ARCEnabled));
+ else
+ assert(Summaries->isARCEnabled() == ARCEnabled);
+ return *Summaries;
+ }
+ }
+
+ RetainSummaryManager &getSummaryManager(CheckerContext &C) const {
+ return getSummaryManager(C.getASTContext(), C.isObjCGCEnabled());
+ }
+
+ void printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) const override;
+
+ void checkBind(SVal loc, SVal val, const Stmt *S, CheckerContext &C) const;
+ void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
+ void checkPostStmt(const CastExpr *CE, CheckerContext &C) const;
+
+ void checkPostStmt(const ObjCArrayLiteral *AL, CheckerContext &C) const;
+ void checkPostStmt(const ObjCDictionaryLiteral *DL, CheckerContext &C) const;
+ void checkPostStmt(const ObjCBoxedExpr *BE, CheckerContext &C) const;
+
+ void checkPostStmt(const ObjCIvarRefExpr *IRE, CheckerContext &C) const;
+
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+
+ void checkSummary(const RetainSummary &Summ, const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void processSummaryOfInlined(const RetainSummary &Summ,
+ const CallEvent &Call,
+ CheckerContext &C) const;
+
+ bool evalCall(const CallExpr *CE, CheckerContext &C) const;
+
+ ProgramStateRef evalAssume(ProgramStateRef state, SVal Cond,
+ bool Assumption) const;
+
+ ProgramStateRef
+ checkRegionChanges(ProgramStateRef state,
+ const InvalidatedSymbols *invalidated,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const CallEvent *Call) const;
+
+ bool wantsRegionChangeUpdate(ProgramStateRef state) const {
+ return true;
+ }
+
+ void checkPreStmt(const ReturnStmt *S, CheckerContext &C) const;
+ void checkReturnWithRetEffect(const ReturnStmt *S, CheckerContext &C,
+ ExplodedNode *Pred, RetEffect RE, RefVal X,
+ SymbolRef Sym, ProgramStateRef state) const;
+
+ void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
+ void checkEndFunction(CheckerContext &C) const;
+
+ ProgramStateRef updateSymbol(ProgramStateRef state, SymbolRef sym,
+ RefVal V, ArgEffect E, RefVal::Kind &hasErr,
+ CheckerContext &C) const;
+
+ void processNonLeakError(ProgramStateRef St, SourceRange ErrorRange,
+ RefVal::Kind ErrorKind, SymbolRef Sym,
+ CheckerContext &C) const;
+
+ void processObjCLiterals(CheckerContext &C, const Expr *Ex) const;
+
+ const ProgramPointTag *getDeadSymbolTag(SymbolRef sym) const;
+
+ ProgramStateRef handleSymbolDeath(ProgramStateRef state,
+ SymbolRef sid, RefVal V,
+ SmallVectorImpl<SymbolRef> &Leaked) const;
+
+ ProgramStateRef
+ handleAutoreleaseCounts(ProgramStateRef state, ExplodedNode *Pred,
+ const ProgramPointTag *Tag, CheckerContext &Ctx,
+ SymbolRef Sym, RefVal V) const;
+
+ ExplodedNode *processLeaks(ProgramStateRef state,
+ SmallVectorImpl<SymbolRef> &Leaked,
+ CheckerContext &Ctx,
+ ExplodedNode *Pred = nullptr) const;
+};
+} // end anonymous namespace
+
+namespace {
+class StopTrackingCallback final : public SymbolVisitor {
+ ProgramStateRef state;
+public:
+ StopTrackingCallback(ProgramStateRef st) : state(st) {}
+ ProgramStateRef getState() const { return state; }
+
+ bool VisitSymbol(SymbolRef sym) override {
+ state = state->remove<RefBindings>(sym);
+ return true;
+ }
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Handle statements that may have an effect on refcounts.
+//===----------------------------------------------------------------------===//
+
+void RetainCountChecker::checkPostStmt(const BlockExpr *BE,
+ CheckerContext &C) const {
+
+ // Scan the BlockDecRefExprs for any object the retain count checker
+ // may be tracking.
+ if (!BE->getBlockDecl()->hasCaptures())
+ return;
+
+ ProgramStateRef state = C.getState();
+ const BlockDataRegion *R =
+ cast<BlockDataRegion>(state->getSVal(BE,
+ C.getLocationContext()).getAsRegion());
+
+ BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
+ E = R->referenced_vars_end();
+
+ if (I == E)
+ return;
+
+ // FIXME: For now we invalidate the tracking of all symbols passed to blocks
+ // via captured variables, even though captured variables result in a copy
+ // and in implicit increment/decrement of a retain count.
+ SmallVector<const MemRegion*, 10> Regions;
+ const LocationContext *LC = C.getLocationContext();
+ MemRegionManager &MemMgr = C.getSValBuilder().getRegionManager();
+
+ for ( ; I != E; ++I) {
+ const VarRegion *VR = I.getCapturedRegion();
+ if (VR->getSuperRegion() == R) {
+ VR = MemMgr.getVarRegion(VR->getDecl(), LC);
+ }
+ Regions.push_back(VR);
+ }
+
+ state =
+ state->scanReachableSymbols<StopTrackingCallback>(Regions.data(),
+ Regions.data() + Regions.size()).getState();
+ C.addTransition(state);
+}
+
+void RetainCountChecker::checkPostStmt(const CastExpr *CE,
+ CheckerContext &C) const {
+ const ObjCBridgedCastExpr *BE = dyn_cast<ObjCBridgedCastExpr>(CE);
+ if (!BE)
+ return;
+
+ ArgEffect AE = IncRef;
+
+ switch (BE->getBridgeKind()) {
+ case clang::OBC_Bridge:
+ // Do nothing.
+ return;
+ case clang::OBC_BridgeRetained:
+ AE = IncRef;
+ break;
+ case clang::OBC_BridgeTransfer:
+ AE = DecRefBridgedTransferred;
+ break;
+ }
+
+ ProgramStateRef state = C.getState();
+ SymbolRef Sym = state->getSVal(CE, C.getLocationContext()).getAsLocSymbol();
+ if (!Sym)
+ return;
+ const RefVal* T = getRefBinding(state, Sym);
+ if (!T)
+ return;
+
+ RefVal::Kind hasErr = (RefVal::Kind) 0;
+ state = updateSymbol(state, Sym, *T, AE, hasErr, C);
+
+ if (hasErr) {
+ // FIXME: If we get an error during a bridge cast, should we report it?
+ return;
+ }
+
+ C.addTransition(state);
+}
+
+void RetainCountChecker::processObjCLiterals(CheckerContext &C,
+ const Expr *Ex) const {
+ ProgramStateRef state = C.getState();
+ const ExplodedNode *pred = C.getPredecessor();
+ for (const Stmt *Child : Ex->children()) {
+ SVal V = state->getSVal(Child, pred->getLocationContext());
+ if (SymbolRef sym = V.getAsSymbol())
+ if (const RefVal* T = getRefBinding(state, sym)) {
+ RefVal::Kind hasErr = (RefVal::Kind) 0;
+ state = updateSymbol(state, sym, *T, MayEscape, hasErr, C);
+ if (hasErr) {
+ processNonLeakError(state, Child->getSourceRange(), hasErr, sym, C);
+ return;
+ }
+ }
+ }
+
+ // Return the object as autoreleased.
+ // RetEffect RE = RetEffect::MakeNotOwned(RetEffect::ObjC);
+ if (SymbolRef sym =
+ state->getSVal(Ex, pred->getLocationContext()).getAsSymbol()) {
+ QualType ResultTy = Ex->getType();
+ state = setRefBinding(state, sym,
+ RefVal::makeNotOwned(RetEffect::ObjC, ResultTy));
+ }
+
+ C.addTransition(state);
+}
+
+void RetainCountChecker::checkPostStmt(const ObjCArrayLiteral *AL,
+ CheckerContext &C) const {
+ // Apply the 'MayEscape' to all values.
+ processObjCLiterals(C, AL);
+}
+
+void RetainCountChecker::checkPostStmt(const ObjCDictionaryLiteral *DL,
+ CheckerContext &C) const {
+ // Apply the 'MayEscape' to all keys and values.
+ processObjCLiterals(C, DL);
+}
+
+void RetainCountChecker::checkPostStmt(const ObjCBoxedExpr *Ex,
+ CheckerContext &C) const {
+ const ExplodedNode *Pred = C.getPredecessor();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ ProgramStateRef State = Pred->getState();
+
+ if (SymbolRef Sym = State->getSVal(Ex, LCtx).getAsSymbol()) {
+ QualType ResultTy = Ex->getType();
+ State = setRefBinding(State, Sym,
+ RefVal::makeNotOwned(RetEffect::ObjC, ResultTy));
+ }
+
+ C.addTransition(State);
+}
+
+static bool wasLoadedFromIvar(SymbolRef Sym) {
+ if (auto DerivedVal = dyn_cast<SymbolDerived>(Sym))
+ return isa<ObjCIvarRegion>(DerivedVal->getRegion());
+ if (auto RegionVal = dyn_cast<SymbolRegionValue>(Sym))
+ return isa<ObjCIvarRegion>(RegionVal->getRegion());
+ return false;
+}
+
+void RetainCountChecker::checkPostStmt(const ObjCIvarRefExpr *IRE,
+ CheckerContext &C) const {
+ Optional<Loc> IVarLoc = C.getSVal(IRE).getAs<Loc>();
+ if (!IVarLoc)
+ return;
+
+ ProgramStateRef State = C.getState();
+ SymbolRef Sym = State->getSVal(*IVarLoc).getAsSymbol();
+ if (!Sym || !wasLoadedFromIvar(Sym))
+ return;
+
+ // Accessing an ivar directly is unusual. If we've done that, be more
+ // forgiving about what the surrounding code is allowed to do.
+
+ QualType Ty = Sym->getType();
+ RetEffect::ObjKind Kind;
+ if (Ty->isObjCRetainableType())
+ Kind = RetEffect::ObjC;
+ else if (coreFoundation::isCFObjectRef(Ty))
+ Kind = RetEffect::CF;
+ else
+ return;
+
+ // If the value is already known to be nil, don't bother tracking it.
+ ConstraintManager &CMgr = State->getConstraintManager();
+ if (CMgr.isNull(State, Sym).isConstrainedTrue())
+ return;
+
+ if (const RefVal *RV = getRefBinding(State, Sym)) {
+ // If we've seen this symbol before, or we're only seeing it now because
+ // of something the analyzer has synthesized, don't do anything.
+ if (RV->getIvarAccessHistory() != RefVal::IvarAccessHistory::None ||
+ isSynthesizedAccessor(C.getStackFrame())) {
+ return;
+ }
+
+ // Note that this value has been loaded from an ivar.
+ C.addTransition(setRefBinding(State, Sym, RV->withIvarAccess()));
+ return;
+ }
+
+ RefVal PlusZero = RefVal::makeNotOwned(Kind, Ty);
+
+ // In a synthesized accessor, the effective retain count is +0.
+ if (isSynthesizedAccessor(C.getStackFrame())) {
+ C.addTransition(setRefBinding(State, Sym, PlusZero));
+ return;
+ }
+
+ State = setRefBinding(State, Sym, PlusZero.withIvarAccess());
+ C.addTransition(State);
+}
+
+void RetainCountChecker::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ RetainSummaryManager &Summaries = getSummaryManager(C);
+ const RetainSummary *Summ = Summaries.getSummary(Call, C.getState());
+
+ if (C.wasInlined) {
+ processSummaryOfInlined(*Summ, Call, C);
+ return;
+ }
+ checkSummary(*Summ, Call, C);
+}
+
+/// GetReturnType - Used to get the return type of a message expression or
+/// function call with the intention of affixing that type to a tracked symbol.
+/// While the return type can be queried directly from RetEx, when
+/// invoking class methods we augment to the return type to be that of
+/// a pointer to the class (as opposed it just being id).
+// FIXME: We may be able to do this with related result types instead.
+// This function is probably overestimating.
+static QualType GetReturnType(const Expr *RetE, ASTContext &Ctx) {
+ QualType RetTy = RetE->getType();
+ // If RetE is not a message expression just return its type.
+ // If RetE is a message expression, return its types if it is something
+ /// more specific than id.
+ if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(RetE))
+ if (const ObjCObjectPointerType *PT = RetTy->getAs<ObjCObjectPointerType>())
+ if (PT->isObjCQualifiedIdType() || PT->isObjCIdType() ||
+ PT->isObjCClassType()) {
+ // At this point we know the return type of the message expression is
+ // id, id<...>, or Class. If we have an ObjCInterfaceDecl, we know this
+ // is a call to a class method whose type we can resolve. In such
+ // cases, promote the return type to XXX* (where XXX is the class).
+ const ObjCInterfaceDecl *D = ME->getReceiverInterface();
+ return !D ? RetTy :
+ Ctx.getObjCObjectPointerType(Ctx.getObjCInterfaceType(D));
+ }
+
+ return RetTy;
+}
+
+// We don't always get the exact modeling of the function with regards to the
+// retain count checker even when the function is inlined. For example, we need
+// to stop tracking the symbols which were marked with StopTrackingHard.
+void RetainCountChecker::processSummaryOfInlined(const RetainSummary &Summ,
+ const CallEvent &CallOrMsg,
+ CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+
+ // Evaluate the effect of the arguments.
+ for (unsigned idx = 0, e = CallOrMsg.getNumArgs(); idx != e; ++idx) {
+ if (Summ.getArg(idx) == StopTrackingHard) {
+ SVal V = CallOrMsg.getArgSVal(idx);
+ if (SymbolRef Sym = V.getAsLocSymbol()) {
+ state = removeRefBinding(state, Sym);
+ }
+ }
+ }
+
+ // Evaluate the effect on the message receiver.
+ const ObjCMethodCall *MsgInvocation = dyn_cast<ObjCMethodCall>(&CallOrMsg);
+ if (MsgInvocation) {
+ if (SymbolRef Sym = MsgInvocation->getReceiverSVal().getAsLocSymbol()) {
+ if (Summ.getReceiverEffect() == StopTrackingHard) {
+ state = removeRefBinding(state, Sym);
+ }
+ }
+ }
+
+ // Consult the summary for the return value.
+ RetEffect RE = Summ.getRetEffect();
+ if (RE.getKind() == RetEffect::NoRetHard) {
+ SymbolRef Sym = CallOrMsg.getReturnValue().getAsSymbol();
+ if (Sym)
+ state = removeRefBinding(state, Sym);
+ }
+
+ C.addTransition(state);
+}
+
+static ProgramStateRef updateOutParameter(ProgramStateRef State,
+ SVal ArgVal,
+ ArgEffect Effect) {
+ auto *ArgRegion = dyn_cast_or_null<TypedValueRegion>(ArgVal.getAsRegion());
+ if (!ArgRegion)
+ return State;
+
+ QualType PointeeTy = ArgRegion->getValueType();
+ if (!coreFoundation::isCFObjectRef(PointeeTy))
+ return State;
+
+ SVal PointeeVal = State->getSVal(ArgRegion);
+ SymbolRef Pointee = PointeeVal.getAsLocSymbol();
+ if (!Pointee)
+ return State;
+
+ switch (Effect) {
+ case UnretainedOutParameter:
+ State = setRefBinding(State, Pointee,
+ RefVal::makeNotOwned(RetEffect::CF, PointeeTy));
+ break;
+ case RetainedOutParameter:
+ // Do nothing. Retained out parameters will either point to a +1 reference
+ // or NULL, but the way you check for failure differs depending on the API.
+ // Consequently, we don't have a good way to track them yet.
+ break;
+
+ default:
+ llvm_unreachable("only for out parameters");
+ }
+
+ return State;
+}
+
+void RetainCountChecker::checkSummary(const RetainSummary &Summ,
+ const CallEvent &CallOrMsg,
+ CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+
+ // Evaluate the effect of the arguments.
+ RefVal::Kind hasErr = (RefVal::Kind) 0;
+ SourceRange ErrorRange;
+ SymbolRef ErrorSym = nullptr;
+
+ for (unsigned idx = 0, e = CallOrMsg.getNumArgs(); idx != e; ++idx) {
+ SVal V = CallOrMsg.getArgSVal(idx);
+
+ ArgEffect Effect = Summ.getArg(idx);
+ if (Effect == RetainedOutParameter || Effect == UnretainedOutParameter) {
+ state = updateOutParameter(state, V, Effect);
+ } else if (SymbolRef Sym = V.getAsLocSymbol()) {
+ if (const RefVal *T = getRefBinding(state, Sym)) {
+ state = updateSymbol(state, Sym, *T, Effect, hasErr, C);
+ if (hasErr) {
+ ErrorRange = CallOrMsg.getArgSourceRange(idx);
+ ErrorSym = Sym;
+ break;
+ }
+ }
+ }
+ }
+
+ // Evaluate the effect on the message receiver.
+ bool ReceiverIsTracked = false;
+ if (!hasErr) {
+ const ObjCMethodCall *MsgInvocation = dyn_cast<ObjCMethodCall>(&CallOrMsg);
+ if (MsgInvocation) {
+ if (SymbolRef Sym = MsgInvocation->getReceiverSVal().getAsLocSymbol()) {
+ if (const RefVal *T = getRefBinding(state, Sym)) {
+ ReceiverIsTracked = true;
+ state = updateSymbol(state, Sym, *T, Summ.getReceiverEffect(),
+ hasErr, C);
+ if (hasErr) {
+ ErrorRange = MsgInvocation->getOriginExpr()->getReceiverRange();
+ ErrorSym = Sym;
+ }
+ }
+ }
+ }
+ }
+
+ // Process any errors.
+ if (hasErr) {
+ processNonLeakError(state, ErrorRange, hasErr, ErrorSym, C);
+ return;
+ }
+
+ // Consult the summary for the return value.
+ RetEffect RE = Summ.getRetEffect();
+
+ if (RE.getKind() == RetEffect::OwnedWhenTrackedReceiver) {
+ if (ReceiverIsTracked)
+ RE = getSummaryManager(C).getObjAllocRetEffect();
+ else
+ RE = RetEffect::MakeNoRet();
+ }
+
+ switch (RE.getKind()) {
+ default:
+ llvm_unreachable("Unhandled RetEffect.");
+
+ case RetEffect::NoRet:
+ case RetEffect::NoRetHard:
+ // No work necessary.
+ break;
+
+ case RetEffect::OwnedAllocatedSymbol:
+ case RetEffect::OwnedSymbol: {
+ SymbolRef Sym = CallOrMsg.getReturnValue().getAsSymbol();
+ if (!Sym)
+ break;
+
+ // Use the result type from the CallEvent as it automatically adjusts
+ // for methods/functions that return references.
+ QualType ResultTy = CallOrMsg.getResultType();
+ state = setRefBinding(state, Sym, RefVal::makeOwned(RE.getObjKind(),
+ ResultTy));
+
+ // FIXME: Add a flag to the checker where allocations are assumed to
+ // *not* fail.
+ break;
+ }
+
+ case RetEffect::GCNotOwnedSymbol:
+ case RetEffect::NotOwnedSymbol: {
+ const Expr *Ex = CallOrMsg.getOriginExpr();
+ SymbolRef Sym = CallOrMsg.getReturnValue().getAsSymbol();
+ if (!Sym)
+ break;
+ assert(Ex);
+ // Use GetReturnType in order to give [NSFoo alloc] the type NSFoo *.
+ QualType ResultTy = GetReturnType(Ex, C.getASTContext());
+ state = setRefBinding(state, Sym, RefVal::makeNotOwned(RE.getObjKind(),
+ ResultTy));
+ break;
+ }
+ }
+
+ // This check is actually necessary; otherwise the statement builder thinks
+ // we've hit a previously-found path.
+ // Normally addTransition takes care of this, but we want the node pointer.
+ ExplodedNode *NewNode;
+ if (state == C.getState()) {
+ NewNode = C.getPredecessor();
+ } else {
+ NewNode = C.addTransition(state);
+ }
+
+ // Annotate the node with summary we used.
+ if (NewNode) {
+ // FIXME: This is ugly. See checkEndAnalysis for why it's necessary.
+ if (ShouldResetSummaryLog) {
+ SummaryLog.clear();
+ ShouldResetSummaryLog = false;
+ }
+ SummaryLog[NewNode] = &Summ;
+ }
+}
+
+ProgramStateRef
+RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym,
+ RefVal V, ArgEffect E, RefVal::Kind &hasErr,
+ CheckerContext &C) const {
+ // In GC mode [... release] and [... retain] do nothing.
+ // In ARC mode they shouldn't exist at all, but we just ignore them.
+ bool IgnoreRetainMsg = C.isObjCGCEnabled();
+ if (!IgnoreRetainMsg)
+ IgnoreRetainMsg = (bool)C.getASTContext().getLangOpts().ObjCAutoRefCount;
+
+ switch (E) {
+ default:
+ break;
+ case IncRefMsg:
+ E = IgnoreRetainMsg ? DoNothing : IncRef;
+ break;
+ case DecRefMsg:
+ E = IgnoreRetainMsg ? DoNothing : DecRef;
+ break;
+ case DecRefMsgAndStopTrackingHard:
+ E = IgnoreRetainMsg ? StopTracking : DecRefAndStopTrackingHard;
+ break;
+ case MakeCollectable:
+ E = C.isObjCGCEnabled() ? DecRef : DoNothing;
+ break;
+ }
+
+ // Handle all use-after-releases.
+ if (!C.isObjCGCEnabled() && V.getKind() == RefVal::Released) {
+ V = V ^ RefVal::ErrorUseAfterRelease;
+ hasErr = V.getKind();
+ return setRefBinding(state, sym, V);
+ }
+
+ switch (E) {
+ case DecRefMsg:
+ case IncRefMsg:
+ case MakeCollectable:
+ case DecRefMsgAndStopTrackingHard:
+ llvm_unreachable("DecRefMsg/IncRefMsg/MakeCollectable already converted");
+
+ case UnretainedOutParameter:
+ case RetainedOutParameter:
+ llvm_unreachable("Applies to pointer-to-pointer parameters, which should "
+ "not have ref state.");
+
+ case Dealloc:
+ // Any use of -dealloc in GC is *bad*.
+ if (C.isObjCGCEnabled()) {
+ V = V ^ RefVal::ErrorDeallocGC;
+ hasErr = V.getKind();
+ break;
+ }
+
+ switch (V.getKind()) {
+ default:
+ llvm_unreachable("Invalid RefVal state for an explicit dealloc.");
+ case RefVal::Owned:
+ // The object immediately transitions to the released state.
+ V = V ^ RefVal::Released;
+ V.clearCounts();
+ return setRefBinding(state, sym, V);
+ case RefVal::NotOwned:
+ V = V ^ RefVal::ErrorDeallocNotOwned;
+ hasErr = V.getKind();
+ break;
+ }
+ break;
+
+ case MayEscape:
+ if (V.getKind() == RefVal::Owned) {
+ V = V ^ RefVal::NotOwned;
+ break;
+ }
+
+ // Fall-through.
+
+ case DoNothing:
+ return state;
+
+ case Autorelease:
+ if (C.isObjCGCEnabled())
+ return state;
+ // Update the autorelease counts.
+ V = V.autorelease();
+ break;
+
+ case StopTracking:
+ case StopTrackingHard:
+ return removeRefBinding(state, sym);
+
+ case IncRef:
+ switch (V.getKind()) {
+ default:
+ llvm_unreachable("Invalid RefVal state for a retain.");
+ case RefVal::Owned:
+ case RefVal::NotOwned:
+ V = V + 1;
+ break;
+ case RefVal::Released:
+ // Non-GC cases are handled above.
+ assert(C.isObjCGCEnabled());
+ V = (V ^ RefVal::Owned) + 1;
+ break;
+ }
+ break;
+
+ case DecRef:
+ case DecRefBridgedTransferred:
+ case DecRefAndStopTrackingHard:
+ switch (V.getKind()) {
+ default:
+ // case 'RefVal::Released' handled above.
+ llvm_unreachable("Invalid RefVal state for a release.");
+
+ case RefVal::Owned:
+ assert(V.getCount() > 0);
+ if (V.getCount() == 1) {
+ if (E == DecRefBridgedTransferred ||
+ V.getIvarAccessHistory() ==
+ RefVal::IvarAccessHistory::AccessedDirectly)
+ V = V ^ RefVal::NotOwned;
+ else
+ V = V ^ RefVal::Released;
+ } else if (E == DecRefAndStopTrackingHard) {
+ return removeRefBinding(state, sym);
+ }
+
+ V = V - 1;
+ break;
+
+ case RefVal::NotOwned:
+ if (V.getCount() > 0) {
+ if (E == DecRefAndStopTrackingHard)
+ return removeRefBinding(state, sym);
+ V = V - 1;
+ } else if (V.getIvarAccessHistory() ==
+ RefVal::IvarAccessHistory::AccessedDirectly) {
+ // Assume that the instance variable was holding on the object at
+ // +1, and we just didn't know.
+ if (E == DecRefAndStopTrackingHard)
+ return removeRefBinding(state, sym);
+ V = V.releaseViaIvar() ^ RefVal::Released;
+ } else {
+ V = V ^ RefVal::ErrorReleaseNotOwned;
+ hasErr = V.getKind();
+ }
+ break;
+
+ case RefVal::Released:
+ // Non-GC cases are handled above.
+ assert(C.isObjCGCEnabled());
+ V = V ^ RefVal::ErrorUseAfterRelease;
+ hasErr = V.getKind();
+ break;
+ }
+ break;
+ }
+ return setRefBinding(state, sym, V);
+}
+
+void RetainCountChecker::processNonLeakError(ProgramStateRef St,
+ SourceRange ErrorRange,
+ RefVal::Kind ErrorKind,
+ SymbolRef Sym,
+ CheckerContext &C) const {
+ // HACK: Ignore retain-count issues on values accessed through ivars,
+ // because of cases like this:
+ // [_contentView retain];
+ // [_contentView removeFromSuperview];
+ // [self addSubview:_contentView]; // invalidates 'self'
+ // [_contentView release];
+ if (const RefVal *RV = getRefBinding(St, Sym))
+ if (RV->getIvarAccessHistory() != RefVal::IvarAccessHistory::None)
+ return;
+
+ ExplodedNode *N = C.generateErrorNode(St);
+ if (!N)
+ return;
+
+ CFRefBug *BT;
+ switch (ErrorKind) {
+ default:
+ llvm_unreachable("Unhandled error.");
+ case RefVal::ErrorUseAfterRelease:
+ if (!useAfterRelease)
+ useAfterRelease.reset(new UseAfterRelease(this));
+ BT = useAfterRelease.get();
+ break;
+ case RefVal::ErrorReleaseNotOwned:
+ if (!releaseNotOwned)
+ releaseNotOwned.reset(new BadRelease(this));
+ BT = releaseNotOwned.get();
+ break;
+ case RefVal::ErrorDeallocGC:
+ if (!deallocGC)
+ deallocGC.reset(new DeallocGC(this));
+ BT = deallocGC.get();
+ break;
+ case RefVal::ErrorDeallocNotOwned:
+ if (!deallocNotOwned)
+ deallocNotOwned.reset(new DeallocNotOwned(this));
+ BT = deallocNotOwned.get();
+ break;
+ }
+
+ assert(BT);
+ auto report = std::unique_ptr<BugReport>(
+ new CFRefReport(*BT, C.getASTContext().getLangOpts(), C.isObjCGCEnabled(),
+ SummaryLog, N, Sym));
+ report->addRange(ErrorRange);
+ C.emitReport(std::move(report));
+}
+
+//===----------------------------------------------------------------------===//
+// Handle the return values of retain-count-related functions.
+//===----------------------------------------------------------------------===//
+
+bool RetainCountChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
+ // Get the callee. We're only interested in simple C functions.
+ ProgramStateRef state = C.getState();
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ if (!FD)
+ return false;
+
+ IdentifierInfo *II = FD->getIdentifier();
+ if (!II)
+ return false;
+
+ // For now, we're only handling the functions that return aliases of their
+ // arguments: CFRetain and CFMakeCollectable (and their families).
+ // Eventually we should add other functions we can model entirely,
+ // such as CFRelease, which don't invalidate their arguments or globals.
+ if (CE->getNumArgs() != 1)
+ return false;
+
+ // Get the name of the function.
+ StringRef FName = II->getName();
+ FName = FName.substr(FName.find_first_not_of('_'));
+
+ // See if it's one of the specific functions we know how to eval.
+ bool canEval = false;
+
+ QualType ResultTy = CE->getCallReturnType(C.getASTContext());
+ if (ResultTy->isObjCIdType()) {
+ // Handle: id NSMakeCollectable(CFTypeRef)
+ canEval = II->isStr("NSMakeCollectable");
+ } else if (ResultTy->isPointerType()) {
+ // Handle: (CF|CG)Retain
+ // CFAutorelease
+ // CFMakeCollectable
+ // It's okay to be a little sloppy here (CGMakeCollectable doesn't exist).
+ if (cocoa::isRefType(ResultTy, "CF", FName) ||
+ cocoa::isRefType(ResultTy, "CG", FName)) {
+ canEval = isRetain(FD, FName) || isAutorelease(FD, FName) ||
+ isMakeCollectable(FD, FName);
+ }
+ }
+
+ if (!canEval)
+ return false;
+
+ // Bind the return value.
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal RetVal = state->getSVal(CE->getArg(0), LCtx);
+ if (RetVal.isUnknown()) {
+ // If the receiver is unknown, conjure a return value.
+ SValBuilder &SVB = C.getSValBuilder();
+ RetVal = SVB.conjureSymbolVal(nullptr, CE, LCtx, ResultTy, C.blockCount());
+ }
+ state = state->BindExpr(CE, LCtx, RetVal, false);
+
+ // FIXME: This should not be necessary, but otherwise the argument seems to be
+ // considered alive during the next statement.
+ if (const MemRegion *ArgRegion = RetVal.getAsRegion()) {
+ // Save the refcount status of the argument.
+ SymbolRef Sym = RetVal.getAsLocSymbol();
+ const RefVal *Binding = nullptr;
+ if (Sym)
+ Binding = getRefBinding(state, Sym);
+
+ // Invalidate the argument region.
+ state = state->invalidateRegions(ArgRegion, CE, C.blockCount(), LCtx,
+ /*CausesPointerEscape*/ false);
+
+ // Restore the refcount status of the argument.
+ if (Binding)
+ state = setRefBinding(state, Sym, *Binding);
+ }
+
+ C.addTransition(state);
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// Handle return statements.
+//===----------------------------------------------------------------------===//
+
+void RetainCountChecker::checkPreStmt(const ReturnStmt *S,
+ CheckerContext &C) const {
+
+ // Only adjust the reference count if this is the top-level call frame,
+ // and not the result of inlining. In the future, we should do
+ // better checking even for inlined calls, and see if they match
+ // with their expected semantics (e.g., the method should return a retained
+ // object, etc.).
+ if (!C.inTopFrame())
+ return;
+
+ const Expr *RetE = S->getRetValue();
+ if (!RetE)
+ return;
+
+ ProgramStateRef state = C.getState();
+ SymbolRef Sym =
+ state->getSValAsScalarOrLoc(RetE, C.getLocationContext()).getAsLocSymbol();
+ if (!Sym)
+ return;
+
+ // Get the reference count binding (if any).
+ const RefVal *T = getRefBinding(state, Sym);
+ if (!T)
+ return;
+
+ // Change the reference count.
+ RefVal X = *T;
+
+ switch (X.getKind()) {
+ case RefVal::Owned: {
+ unsigned cnt = X.getCount();
+ assert(cnt > 0);
+ X.setCount(cnt - 1);
+ X = X ^ RefVal::ReturnedOwned;
+ break;
+ }
+
+ case RefVal::NotOwned: {
+ unsigned cnt = X.getCount();
+ if (cnt) {
+ X.setCount(cnt - 1);
+ X = X ^ RefVal::ReturnedOwned;
+ }
+ else {
+ X = X ^ RefVal::ReturnedNotOwned;
+ }
+ break;
+ }
+
+ default:
+ return;
+ }
+
+ // Update the binding.
+ state = setRefBinding(state, Sym, X);
+ ExplodedNode *Pred = C.addTransition(state);
+
+ // At this point we have updated the state properly.
+ // Everything after this is merely checking to see if the return value has
+ // been over- or under-retained.
+
+ // Did we cache out?
+ if (!Pred)
+ return;
+
+ // Update the autorelease counts.
+ static CheckerProgramPointTag AutoreleaseTag(this, "Autorelease");
+ state = handleAutoreleaseCounts(state, Pred, &AutoreleaseTag, C, Sym, X);
+
+ // Did we cache out?
+ if (!state)
+ return;
+
+ // Get the updated binding.
+ T = getRefBinding(state, Sym);
+ assert(T);
+ X = *T;
+
+ // Consult the summary of the enclosing method.
+ RetainSummaryManager &Summaries = getSummaryManager(C);
+ const Decl *CD = &Pred->getCodeDecl();
+ RetEffect RE = RetEffect::MakeNoRet();
+
+ // FIXME: What is the convention for blocks? Is there one?
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(CD)) {
+ const RetainSummary *Summ = Summaries.getMethodSummary(MD);
+ RE = Summ->getRetEffect();
+ } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CD)) {
+ if (!isa<CXXMethodDecl>(FD)) {
+ const RetainSummary *Summ = Summaries.getFunctionSummary(FD);
+ RE = Summ->getRetEffect();
+ }
+ }
+
+ checkReturnWithRetEffect(S, C, Pred, RE, X, Sym, state);
+}
+
+void RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
+ CheckerContext &C,
+ ExplodedNode *Pred,
+ RetEffect RE, RefVal X,
+ SymbolRef Sym,
+ ProgramStateRef state) const {
+ // HACK: Ignore retain-count issues on values accessed through ivars,
+ // because of cases like this:
+ // [_contentView retain];
+ // [_contentView removeFromSuperview];
+ // [self addSubview:_contentView]; // invalidates 'self'
+ // [_contentView release];
+ if (X.getIvarAccessHistory() != RefVal::IvarAccessHistory::None)
+ return;
+
+ // Any leaks or other errors?
+ if (X.isReturnedOwned() && X.getCount() == 0) {
+ if (RE.getKind() != RetEffect::NoRet) {
+ bool hasError = false;
+ if (C.isObjCGCEnabled() && RE.getObjKind() == RetEffect::ObjC) {
+ // Things are more complicated with garbage collection. If the
+ // returned object is suppose to be an Objective-C object, we have
+ // a leak (as the caller expects a GC'ed object) because no
+ // method should return ownership unless it returns a CF object.
+ hasError = true;
+ X = X ^ RefVal::ErrorGCLeakReturned;
+ }
+ else if (!RE.isOwned()) {
+ // Either we are using GC and the returned object is a CF type
+ // or we aren't using GC. In either case, we expect that the
+ // enclosing method is expected to return ownership.
+ hasError = true;
+ X = X ^ RefVal::ErrorLeakReturned;
+ }
+
+ if (hasError) {
+ // Generate an error node.
+ state = setRefBinding(state, Sym, X);
+
+ static CheckerProgramPointTag ReturnOwnLeakTag(this, "ReturnsOwnLeak");
+ ExplodedNode *N = C.addTransition(state, Pred, &ReturnOwnLeakTag);
+ if (N) {
+ const LangOptions &LOpts = C.getASTContext().getLangOpts();
+ bool GCEnabled = C.isObjCGCEnabled();
+ C.emitReport(std::unique_ptr<BugReport>(new CFRefLeakReport(
+ *getLeakAtReturnBug(LOpts, GCEnabled), LOpts, GCEnabled,
+ SummaryLog, N, Sym, C, IncludeAllocationLine)));
+ }
+ }
+ }
+ } else if (X.isReturnedNotOwned()) {
+ if (RE.isOwned()) {
+ if (X.getIvarAccessHistory() ==
+ RefVal::IvarAccessHistory::AccessedDirectly) {
+ // Assume the method was trying to transfer a +1 reference from a
+ // strong ivar to the caller.
+ state = setRefBinding(state, Sym,
+ X.releaseViaIvar() ^ RefVal::ReturnedOwned);
+ } else {
+ // Trying to return a not owned object to a caller expecting an
+ // owned object.
+ state = setRefBinding(state, Sym, X ^ RefVal::ErrorReturnedNotOwned);
+
+ static CheckerProgramPointTag
+ ReturnNotOwnedTag(this, "ReturnNotOwnedForOwned");
+
+ ExplodedNode *N = C.addTransition(state, Pred, &ReturnNotOwnedTag);
+ if (N) {
+ if (!returnNotOwnedForOwned)
+ returnNotOwnedForOwned.reset(new ReturnedNotOwnedForOwned(this));
+
+ C.emitReport(std::unique_ptr<BugReport>(new CFRefReport(
+ *returnNotOwnedForOwned, C.getASTContext().getLangOpts(),
+ C.isObjCGCEnabled(), SummaryLog, N, Sym)));
+ }
+ }
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Check various ways a symbol can be invalidated.
+//===----------------------------------------------------------------------===//
+
+void RetainCountChecker::checkBind(SVal loc, SVal val, const Stmt *S,
+ CheckerContext &C) const {
+ // Are we storing to something that causes the value to "escape"?
+ bool escapes = true;
+
+ // A value escapes in three possible cases (this may change):
+ //
+ // (1) we are binding to something that is not a memory region.
+ // (2) we are binding to a memregion that does not have stack storage
+ // (3) we are binding to a memregion with stack storage that the store
+ // does not understand.
+ ProgramStateRef state = C.getState();
+
+ if (Optional<loc::MemRegionVal> regionLoc = loc.getAs<loc::MemRegionVal>()) {
+ escapes = !regionLoc->getRegion()->hasStackStorage();
+
+ if (!escapes) {
+ // To test (3), generate a new state with the binding added. If it is
+ // the same state, then it escapes (since the store cannot represent
+ // the binding).
+ // Do this only if we know that the store is not supposed to generate the
+ // same state.
+ SVal StoredVal = state->getSVal(regionLoc->getRegion());
+ if (StoredVal != val)
+ escapes = (state == (state->bindLoc(*regionLoc, val)));
+ }
+ if (!escapes) {
+ // Case 4: We do not currently model what happens when a symbol is
+ // assigned to a struct field, so be conservative here and let the symbol
+ // go. TODO: This could definitely be improved upon.
+ escapes = !isa<VarRegion>(regionLoc->getRegion());
+ }
+ }
+
+ // If we are storing the value into an auto function scope variable annotated
+ // with (__attribute__((cleanup))), stop tracking the value to avoid leak
+ // false positives.
+ if (const VarRegion *LVR = dyn_cast_or_null<VarRegion>(loc.getAsRegion())) {
+ const VarDecl *VD = LVR->getDecl();
+ if (VD->hasAttr<CleanupAttr>()) {
+ escapes = true;
+ }
+ }
+
+ // If our store can represent the binding and we aren't storing to something
+ // that doesn't have local storage then just return and have the simulation
+ // state continue as is.
+ if (!escapes)
+ return;
+
+ // Otherwise, find all symbols referenced by 'val' that we are tracking
+ // and stop tracking them.
+ state = state->scanReachableSymbols<StopTrackingCallback>(val).getState();
+ C.addTransition(state);
+}
+
+ProgramStateRef RetainCountChecker::evalAssume(ProgramStateRef state,
+ SVal Cond,
+ bool Assumption) const {
+ // FIXME: We may add to the interface of evalAssume the list of symbols
+ // whose assumptions have changed. For now we just iterate through the
+ // bindings and check if any of the tracked symbols are NULL. This isn't
+ // too bad since the number of symbols we will track in practice are
+ // probably small and evalAssume is only called at branches and a few
+ // other places.
+ RefBindingsTy B = state->get<RefBindings>();
+
+ if (B.isEmpty())
+ return state;
+
+ bool changed = false;
+ RefBindingsTy::Factory &RefBFactory = state->get_context<RefBindings>();
+
+ for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ // Check if the symbol is null stop tracking the symbol.
+ ConstraintManager &CMgr = state->getConstraintManager();
+ ConditionTruthVal AllocFailed = CMgr.isNull(state, I.getKey());
+ if (AllocFailed.isConstrainedTrue()) {
+ changed = true;
+ B = RefBFactory.remove(B, I.getKey());
+ }
+ }
+
+ if (changed)
+ state = state->set<RefBindings>(B);
+
+ return state;
+}
+
+ProgramStateRef
+RetainCountChecker::checkRegionChanges(ProgramStateRef state,
+ const InvalidatedSymbols *invalidated,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const CallEvent *Call) const {
+ if (!invalidated)
+ return state;
+
+ llvm::SmallPtrSet<SymbolRef, 8> WhitelistedSymbols;
+ for (ArrayRef<const MemRegion *>::iterator I = ExplicitRegions.begin(),
+ E = ExplicitRegions.end(); I != E; ++I) {
+ if (const SymbolicRegion *SR = (*I)->StripCasts()->getAs<SymbolicRegion>())
+ WhitelistedSymbols.insert(SR->getSymbol());
+ }
+
+ for (InvalidatedSymbols::const_iterator I=invalidated->begin(),
+ E = invalidated->end(); I!=E; ++I) {
+ SymbolRef sym = *I;
+ if (WhitelistedSymbols.count(sym))
+ continue;
+ // Remove any existing reference-count binding.
+ state = removeRefBinding(state, sym);
+ }
+ return state;
+}
+
+//===----------------------------------------------------------------------===//
+// Handle dead symbols and end-of-path.
+//===----------------------------------------------------------------------===//
+
+ProgramStateRef
+RetainCountChecker::handleAutoreleaseCounts(ProgramStateRef state,
+ ExplodedNode *Pred,
+ const ProgramPointTag *Tag,
+ CheckerContext &Ctx,
+ SymbolRef Sym, RefVal V) const {
+ unsigned ACnt = V.getAutoreleaseCount();
+
+ // No autorelease counts? Nothing to be done.
+ if (!ACnt)
+ return state;
+
+ assert(!Ctx.isObjCGCEnabled() && "Autorelease counts in GC mode?");
+ unsigned Cnt = V.getCount();
+
+ // FIXME: Handle sending 'autorelease' to already released object.
+
+ if (V.getKind() == RefVal::ReturnedOwned)
+ ++Cnt;
+
+ // If we would over-release here, but we know the value came from an ivar,
+ // assume it was a strong ivar that's just been relinquished.
+ if (ACnt > Cnt &&
+ V.getIvarAccessHistory() == RefVal::IvarAccessHistory::AccessedDirectly) {
+ V = V.releaseViaIvar();
+ --ACnt;
+ }
+
+ if (ACnt <= Cnt) {
+ if (ACnt == Cnt) {
+ V.clearCounts();
+ if (V.getKind() == RefVal::ReturnedOwned)
+ V = V ^ RefVal::ReturnedNotOwned;
+ else
+ V = V ^ RefVal::NotOwned;
+ } else {
+ V.setCount(V.getCount() - ACnt);
+ V.setAutoreleaseCount(0);
+ }
+ return setRefBinding(state, Sym, V);
+ }
+
+ // HACK: Ignore retain-count issues on values accessed through ivars,
+ // because of cases like this:
+ // [_contentView retain];
+ // [_contentView removeFromSuperview];
+ // [self addSubview:_contentView]; // invalidates 'self'
+ // [_contentView release];
+ if (V.getIvarAccessHistory() != RefVal::IvarAccessHistory::None)
+ return state;
+
+ // Woah! More autorelease counts then retain counts left.
+ // Emit hard error.
+ V = V ^ RefVal::ErrorOverAutorelease;
+ state = setRefBinding(state, Sym, V);
+
+ ExplodedNode *N = Ctx.generateSink(state, Pred, Tag);
+ if (N) {
+ SmallString<128> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+ os << "Object was autoreleased ";
+ if (V.getAutoreleaseCount() > 1)
+ os << V.getAutoreleaseCount() << " times but the object ";
+ else
+ os << "but ";
+ os << "has a +" << V.getCount() << " retain count";
+
+ if (!overAutorelease)
+ overAutorelease.reset(new OverAutorelease(this));
+
+ const LangOptions &LOpts = Ctx.getASTContext().getLangOpts();
+ Ctx.emitReport(std::unique_ptr<BugReport>(
+ new CFRefReport(*overAutorelease, LOpts, /* GCEnabled = */ false,
+ SummaryLog, N, Sym, os.str())));
+ }
+
+ return nullptr;
+}
+
+ProgramStateRef
+RetainCountChecker::handleSymbolDeath(ProgramStateRef state,
+ SymbolRef sid, RefVal V,
+ SmallVectorImpl<SymbolRef> &Leaked) const {
+ bool hasLeak;
+
+ // HACK: Ignore retain-count issues on values accessed through ivars,
+ // because of cases like this:
+ // [_contentView retain];
+ // [_contentView removeFromSuperview];
+ // [self addSubview:_contentView]; // invalidates 'self'
+ // [_contentView release];
+ if (V.getIvarAccessHistory() != RefVal::IvarAccessHistory::None)
+ hasLeak = false;
+ else if (V.isOwned())
+ hasLeak = true;
+ else if (V.isNotOwned() || V.isReturnedOwned())
+ hasLeak = (V.getCount() > 0);
+ else
+ hasLeak = false;
+
+ if (!hasLeak)
+ return removeRefBinding(state, sid);
+
+ Leaked.push_back(sid);
+ return setRefBinding(state, sid, V ^ RefVal::ErrorLeak);
+}
+
+ExplodedNode *
+RetainCountChecker::processLeaks(ProgramStateRef state,
+ SmallVectorImpl<SymbolRef> &Leaked,
+ CheckerContext &Ctx,
+ ExplodedNode *Pred) const {
+ // Generate an intermediate node representing the leak point.
+ ExplodedNode *N = Ctx.addTransition(state, Pred);
+
+ if (N) {
+ for (SmallVectorImpl<SymbolRef>::iterator
+ I = Leaked.begin(), E = Leaked.end(); I != E; ++I) {
+
+ const LangOptions &LOpts = Ctx.getASTContext().getLangOpts();
+ bool GCEnabled = Ctx.isObjCGCEnabled();
+ CFRefBug *BT = Pred ? getLeakWithinFunctionBug(LOpts, GCEnabled)
+ : getLeakAtReturnBug(LOpts, GCEnabled);
+ assert(BT && "BugType not initialized.");
+
+ Ctx.emitReport(std::unique_ptr<BugReport>(
+ new CFRefLeakReport(*BT, LOpts, GCEnabled, SummaryLog, N, *I, Ctx,
+ IncludeAllocationLine)));
+ }
+ }
+
+ return N;
+}
+
+void RetainCountChecker::checkEndFunction(CheckerContext &Ctx) const {
+ ProgramStateRef state = Ctx.getState();
+ RefBindingsTy B = state->get<RefBindings>();
+ ExplodedNode *Pred = Ctx.getPredecessor();
+
+ // Don't process anything within synthesized bodies.
+ const LocationContext *LCtx = Pred->getLocationContext();
+ if (LCtx->getAnalysisDeclContext()->isBodyAutosynthesized()) {
+ assert(LCtx->getParent());
+ return;
+ }
+
+ for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ state = handleAutoreleaseCounts(state, Pred, /*Tag=*/nullptr, Ctx,
+ I->first, I->second);
+ if (!state)
+ return;
+ }
+
+ // If the current LocationContext has a parent, don't check for leaks.
+ // We will do that later.
+ // FIXME: we should instead check for imbalances of the retain/releases,
+ // and suggest annotations.
+ if (LCtx->getParent())
+ return;
+
+ B = state->get<RefBindings>();
+ SmallVector<SymbolRef, 10> Leaked;
+
+ for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I)
+ state = handleSymbolDeath(state, I->first, I->second, Leaked);
+
+ processLeaks(state, Leaked, Ctx, Pred);
+}
+
+const ProgramPointTag *
+RetainCountChecker::getDeadSymbolTag(SymbolRef sym) const {
+ const CheckerProgramPointTag *&tag = DeadSymbolTags[sym];
+ if (!tag) {
+ SmallString<64> buf;
+ llvm::raw_svector_ostream out(buf);
+ out << "Dead Symbol : ";
+ sym->dumpToStream(out);
+ tag = new CheckerProgramPointTag(this, out.str());
+ }
+ return tag;
+}
+
+void RetainCountChecker::checkDeadSymbols(SymbolReaper &SymReaper,
+ CheckerContext &C) const {
+ ExplodedNode *Pred = C.getPredecessor();
+
+ ProgramStateRef state = C.getState();
+ RefBindingsTy B = state->get<RefBindings>();
+ SmallVector<SymbolRef, 10> Leaked;
+
+ // Update counts from autorelease pools
+ for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
+ E = SymReaper.dead_end(); I != E; ++I) {
+ SymbolRef Sym = *I;
+ if (const RefVal *T = B.lookup(Sym)){
+ // Use the symbol as the tag.
+ // FIXME: This might not be as unique as we would like.
+ const ProgramPointTag *Tag = getDeadSymbolTag(Sym);
+ state = handleAutoreleaseCounts(state, Pred, Tag, C, Sym, *T);
+ if (!state)
+ return;
+
+ // Fetch the new reference count from the state, and use it to handle
+ // this symbol.
+ state = handleSymbolDeath(state, *I, *getRefBinding(state, Sym), Leaked);
+ }
+ }
+
+ if (Leaked.empty()) {
+ C.addTransition(state);
+ return;
+ }
+
+ Pred = processLeaks(state, Leaked, C, Pred);
+
+ // Did we cache out?
+ if (!Pred)
+ return;
+
+ // Now generate a new node that nukes the old bindings.
+ // The only bindings left at this point are the leaked symbols.
+ RefBindingsTy::Factory &F = state->get_context<RefBindings>();
+ B = state->get<RefBindings>();
+
+ for (SmallVectorImpl<SymbolRef>::iterator I = Leaked.begin(),
+ E = Leaked.end();
+ I != E; ++I)
+ B = F.remove(B, *I);
+
+ state = state->set<RefBindings>(B);
+ C.addTransition(state, Pred);
+}
+
+void RetainCountChecker::printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) const {
+
+ RefBindingsTy B = State->get<RefBindings>();
+
+ if (B.isEmpty())
+ return;
+
+ Out << Sep << NL;
+
+ for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ Out << I->first << " : ";
+ I->second.print(Out);
+ Out << NL;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Checker registration.
+//===----------------------------------------------------------------------===//
+
+void ento::registerRetainCountChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<RetainCountChecker>(Mgr.getAnalyzerOptions());
+}
+
+//===----------------------------------------------------------------------===//
+// Implementation of the CallEffects API.
+//===----------------------------------------------------------------------===//
+
+namespace clang {
+namespace ento {
+namespace objc_retain {
+
+// This is a bit gross, but it allows us to populate CallEffects without
+// creating a bunch of accessors. This kind is very localized, so the
+// damage of this macro is limited.
+#define createCallEffect(D, KIND)\
+ ASTContext &Ctx = D->getASTContext();\
+ LangOptions L = Ctx.getLangOpts();\
+ RetainSummaryManager M(Ctx, L.GCOnly, L.ObjCAutoRefCount);\
+ const RetainSummary *S = M.get ## KIND ## Summary(D);\
+ CallEffects CE(S->getRetEffect());\
+ CE.Receiver = S->getReceiverEffect();\
+ unsigned N = D->param_size();\
+ for (unsigned i = 0; i < N; ++i) {\
+ CE.Args.push_back(S->getArg(i));\
+ }
+
+CallEffects CallEffects::getEffect(const ObjCMethodDecl *MD) {
+ createCallEffect(MD, Method);
+ return CE;
+}
+
+CallEffects CallEffects::getEffect(const FunctionDecl *FD) {
+ createCallEffect(FD, Function);
+ return CE;
+}
+
+#undef createCallEffect
+
+} // end namespace objc_retain
+} // end namespace ento
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
new file mode 100644
index 0000000..19fa0fb
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
@@ -0,0 +1,92 @@
+//== ReturnPointerRangeChecker.cpp ------------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ReturnPointerRangeChecker, which is a path-sensitive check
+// which looks for an out-of-bound pointer being returned to callers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class ReturnPointerRangeChecker :
+ public Checker< check::PreStmt<ReturnStmt> > {
+ mutable std::unique_ptr<BuiltinBug> BT;
+
+public:
+ void checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const;
+};
+}
+
+void ReturnPointerRangeChecker::checkPreStmt(const ReturnStmt *RS,
+ CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+
+ const Expr *RetE = RS->getRetValue();
+ if (!RetE)
+ return;
+
+ SVal V = state->getSVal(RetE, C.getLocationContext());
+ const MemRegion *R = V.getAsRegion();
+
+ const ElementRegion *ER = dyn_cast_or_null<ElementRegion>(R);
+ if (!ER)
+ return;
+
+ DefinedOrUnknownSVal Idx = ER->getIndex().castAs<DefinedOrUnknownSVal>();
+ // Zero index is always in bound, this also passes ElementRegions created for
+ // pointer casts.
+ if (Idx.isZeroConstant())
+ return;
+ // FIXME: All of this out-of-bounds checking should eventually be refactored
+ // into a common place.
+
+ DefinedOrUnknownSVal NumElements
+ = C.getStoreManager().getSizeInElements(state, ER->getSuperRegion(),
+ ER->getValueType());
+
+ ProgramStateRef StInBound = state->assumeInBound(Idx, NumElements, true);
+ ProgramStateRef StOutBound = state->assumeInBound(Idx, NumElements, false);
+ if (StOutBound && !StInBound) {
+ ExplodedNode *N = C.generateErrorNode(StOutBound);
+
+ if (!N)
+ return;
+
+ // FIXME: This bug correspond to CWE-466. Eventually we should have bug
+ // types explicitly reference such exploit categories (when applicable).
+ if (!BT)
+ BT.reset(new BuiltinBug(
+ this, "Return of pointer value outside of expected range",
+ "Returned pointer value points outside the original object "
+ "(potential buffer overflow)"));
+
+ // FIXME: It would be nice to eventually make this diagnostic more clear,
+ // e.g., by referencing the original declaration or by saying *why* this
+ // reference is outside the range.
+
+ // Generate a report for this bug.
+ auto report = llvm::make_unique<BugReport>(*BT, BT->getDescription(), N);
+
+ report->addRange(RetE->getSourceRange());
+ C.emitReport(std::move(report));
+ }
+}
+
+void ento::registerReturnPointerRangeChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ReturnPointerRangeChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
new file mode 100644
index 0000000..c5e826a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
@@ -0,0 +1,123 @@
+//== ReturnUndefChecker.cpp -------------------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ReturnUndefChecker, which is a path-sensitive
+// check which looks for undefined or garbage values being returned to the
+// caller.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class ReturnUndefChecker : public Checker< check::PreStmt<ReturnStmt> > {
+ mutable std::unique_ptr<BuiltinBug> BT_Undef;
+ mutable std::unique_ptr<BuiltinBug> BT_NullReference;
+
+ void emitUndef(CheckerContext &C, const Expr *RetE) const;
+ void checkReference(CheckerContext &C, const Expr *RetE,
+ DefinedOrUnknownSVal RetVal) const;
+public:
+ void checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const;
+};
+}
+
+void ReturnUndefChecker::checkPreStmt(const ReturnStmt *RS,
+ CheckerContext &C) const {
+ const Expr *RetE = RS->getRetValue();
+ if (!RetE)
+ return;
+ SVal RetVal = C.getSVal(RetE);
+
+ const StackFrameContext *SFC = C.getStackFrame();
+ QualType RT = CallEvent::getDeclaredResultType(SFC->getDecl());
+
+ if (RetVal.isUndef()) {
+ // "return;" is modeled to evaluate to an UndefinedVal. Allow UndefinedVal
+ // to be returned in functions returning void to support this pattern:
+ // void foo() {
+ // return;
+ // }
+ // void test() {
+ // return foo();
+ // }
+ if (!RT.isNull() && RT->isVoidType())
+ return;
+
+ // Not all blocks have explicitly-specified return types; if the return type
+ // is not available, but the return value expression has 'void' type, assume
+ // Sema already checked it.
+ if (RT.isNull() && isa<BlockDecl>(SFC->getDecl()) &&
+ RetE->getType()->isVoidType())
+ return;
+
+ emitUndef(C, RetE);
+ return;
+ }
+
+ if (RT.isNull())
+ return;
+
+ if (RT->isReferenceType()) {
+ checkReference(C, RetE, RetVal.castAs<DefinedOrUnknownSVal>());
+ return;
+ }
+}
+
+static void emitBug(CheckerContext &C, BuiltinBug &BT, const Expr *RetE,
+ const Expr *TrackingE = nullptr) {
+ ExplodedNode *N = C.generateErrorNode();
+ if (!N)
+ return;
+
+ auto Report = llvm::make_unique<BugReport>(BT, BT.getDescription(), N);
+
+ Report->addRange(RetE->getSourceRange());
+ bugreporter::trackNullOrUndefValue(N, TrackingE ? TrackingE : RetE, *Report);
+
+ C.emitReport(std::move(Report));
+}
+
+void ReturnUndefChecker::emitUndef(CheckerContext &C, const Expr *RetE) const {
+ if (!BT_Undef)
+ BT_Undef.reset(
+ new BuiltinBug(this, "Garbage return value",
+ "Undefined or garbage value returned to caller"));
+ emitBug(C, *BT_Undef, RetE);
+}
+
+void ReturnUndefChecker::checkReference(CheckerContext &C, const Expr *RetE,
+ DefinedOrUnknownSVal RetVal) const {
+ ProgramStateRef StNonNull, StNull;
+ std::tie(StNonNull, StNull) = C.getState()->assume(RetVal);
+
+ if (StNonNull) {
+ // Going forward, assume the location is non-null.
+ C.addTransition(StNonNull);
+ return;
+ }
+
+ // The return value is known to be null. Emit a bug report.
+ if (!BT_NullReference)
+ BT_NullReference.reset(new BuiltinBug(this, "Returning null reference"));
+
+ emitBug(C, *BT_NullReference, RetE, bugreporter::getDerefExpr(RetE));
+}
+
+void ento::registerReturnUndefChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ReturnUndefChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/SelectorExtras.h b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/SelectorExtras.h
new file mode 100644
index 0000000..41f70d7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/SelectorExtras.h
@@ -0,0 +1,68 @@
+//=== SelectorExtras.h - Helpers for checkers using selectors -----*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_SELECTOREXTRAS_H
+#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_SELECTOREXTRAS_H
+
+#include "clang/AST/ASTContext.h"
+#include <cstdarg>
+
+namespace clang {
+namespace ento {
+
+static inline Selector getKeywordSelectorImpl(ASTContext &Ctx,
+ const char *First,
+ va_list argp) {
+ SmallVector<IdentifierInfo*, 10> II;
+ II.push_back(&Ctx.Idents.get(First));
+
+ while (const char *s = va_arg(argp, const char *))
+ II.push_back(&Ctx.Idents.get(s));
+
+ return Ctx.Selectors.getSelector(II.size(), &II[0]);
+}
+
+static inline Selector getKeywordSelector(ASTContext &Ctx, va_list argp) {
+ const char *First = va_arg(argp, const char *);
+ assert(First && "keyword selectors must have at least one argument");
+ return getKeywordSelectorImpl(Ctx, First, argp);
+}
+
+LLVM_END_WITH_NULL
+static inline Selector getKeywordSelector(ASTContext &Ctx,
+ const char *First, ...) {
+ va_list argp;
+ va_start(argp, First);
+ Selector result = getKeywordSelectorImpl(Ctx, First, argp);
+ va_end(argp);
+ return result;
+}
+
+LLVM_END_WITH_NULL
+static inline void lazyInitKeywordSelector(Selector &Sel, ASTContext &Ctx,
+ const char *First, ...) {
+ if (!Sel.isNull())
+ return;
+ va_list argp;
+ va_start(argp, First);
+ Sel = getKeywordSelectorImpl(Ctx, First, argp);
+ va_end(argp);
+}
+
+static inline void lazyInitNullarySelector(Selector &Sel, ASTContext &Ctx,
+ const char *Name) {
+ if (!Sel.isNull())
+ return;
+ Sel = GetNullarySelector(Name, Ctx);
+}
+
+} // end namespace ento
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
new file mode 100644
index 0000000..7026a2e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
@@ -0,0 +1,287 @@
+//===-- SimpleStreamChecker.cpp -----------------------------------------*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines a checker for proper use of fopen/fclose APIs.
+// - If a file has been closed with fclose, it should not be accessed again.
+// Accessing a closed file results in undefined behavior.
+// - If a file was opened with fopen, it must be closed with fclose before
+// the execution ends. Failing to do so results in a resource leak.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+typedef SmallVector<SymbolRef, 2> SymbolVector;
+
+struct StreamState {
+private:
+ enum Kind { Opened, Closed } K;
+ StreamState(Kind InK) : K(InK) { }
+
+public:
+ bool isOpened() const { return K == Opened; }
+ bool isClosed() const { return K == Closed; }
+
+ static StreamState getOpened() { return StreamState(Opened); }
+ static StreamState getClosed() { return StreamState(Closed); }
+
+ bool operator==(const StreamState &X) const {
+ return K == X.K;
+ }
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(K);
+ }
+};
+
+class SimpleStreamChecker : public Checker<check::PostCall,
+ check::PreCall,
+ check::DeadSymbols,
+ check::PointerEscape> {
+
+ mutable IdentifierInfo *IIfopen, *IIfclose;
+
+ std::unique_ptr<BugType> DoubleCloseBugType;
+ std::unique_ptr<BugType> LeakBugType;
+
+ void initIdentifierInfo(ASTContext &Ctx) const;
+
+ void reportDoubleClose(SymbolRef FileDescSym,
+ const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void reportLeaks(ArrayRef<SymbolRef> LeakedStreams, CheckerContext &C,
+ ExplodedNode *ErrNode) const;
+
+ bool guaranteedNotToCloseFile(const CallEvent &Call) const;
+
+public:
+ SimpleStreamChecker();
+
+ /// Process fopen.
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+ /// Process fclose.
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+
+ void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
+
+ /// Stop tracking addresses which escape.
+ ProgramStateRef checkPointerEscape(ProgramStateRef State,
+ const InvalidatedSymbols &Escaped,
+ const CallEvent *Call,
+ PointerEscapeKind Kind) const;
+};
+
+} // end anonymous namespace
+
+/// The state of the checker is a map from tracked stream symbols to their
+/// state. Let's store it in the ProgramState.
+REGISTER_MAP_WITH_PROGRAMSTATE(StreamMap, SymbolRef, StreamState)
+
+namespace {
+class StopTrackingCallback final : public SymbolVisitor {
+ ProgramStateRef state;
+public:
+ StopTrackingCallback(ProgramStateRef st) : state(st) {}
+ ProgramStateRef getState() const { return state; }
+
+ bool VisitSymbol(SymbolRef sym) override {
+ state = state->remove<StreamMap>(sym);
+ return true;
+ }
+};
+} // end anonymous namespace
+
+SimpleStreamChecker::SimpleStreamChecker()
+ : IIfopen(nullptr), IIfclose(nullptr) {
+ // Initialize the bug types.
+ DoubleCloseBugType.reset(
+ new BugType(this, "Double fclose", "Unix Stream API Error"));
+
+ LeakBugType.reset(
+ new BugType(this, "Resource Leak", "Unix Stream API Error"));
+ // Sinks are higher importance bugs as well as calls to assert() or exit(0).
+ LeakBugType->setSuppressOnSink(true);
+}
+
+void SimpleStreamChecker::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ initIdentifierInfo(C.getASTContext());
+
+ if (!Call.isGlobalCFunction())
+ return;
+
+ if (Call.getCalleeIdentifier() != IIfopen)
+ return;
+
+ // Get the symbolic value corresponding to the file handle.
+ SymbolRef FileDesc = Call.getReturnValue().getAsSymbol();
+ if (!FileDesc)
+ return;
+
+ // Generate the next transition (an edge in the exploded graph).
+ ProgramStateRef State = C.getState();
+ State = State->set<StreamMap>(FileDesc, StreamState::getOpened());
+ C.addTransition(State);
+}
+
+void SimpleStreamChecker::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ initIdentifierInfo(C.getASTContext());
+
+ if (!Call.isGlobalCFunction())
+ return;
+
+ if (Call.getCalleeIdentifier() != IIfclose)
+ return;
+
+ if (Call.getNumArgs() != 1)
+ return;
+
+ // Get the symbolic value corresponding to the file handle.
+ SymbolRef FileDesc = Call.getArgSVal(0).getAsSymbol();
+ if (!FileDesc)
+ return;
+
+ // Check if the stream has already been closed.
+ ProgramStateRef State = C.getState();
+ const StreamState *SS = State->get<StreamMap>(FileDesc);
+ if (SS && SS->isClosed()) {
+ reportDoubleClose(FileDesc, Call, C);
+ return;
+ }
+
+ // Generate the next transition, in which the stream is closed.
+ State = State->set<StreamMap>(FileDesc, StreamState::getClosed());
+ C.addTransition(State);
+}
+
+static bool isLeaked(SymbolRef Sym, const StreamState &SS,
+ bool IsSymDead, ProgramStateRef State) {
+ if (IsSymDead && SS.isOpened()) {
+ // If a symbol is NULL, assume that fopen failed on this path.
+ // A symbol should only be considered leaked if it is non-null.
+ ConstraintManager &CMgr = State->getConstraintManager();
+ ConditionTruthVal OpenFailed = CMgr.isNull(State, Sym);
+ return !OpenFailed.isConstrainedTrue();
+ }
+ return false;
+}
+
+void SimpleStreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SymbolVector LeakedStreams;
+ StreamMapTy TrackedStreams = State->get<StreamMap>();
+ for (StreamMapTy::iterator I = TrackedStreams.begin(),
+ E = TrackedStreams.end(); I != E; ++I) {
+ SymbolRef Sym = I->first;
+ bool IsSymDead = SymReaper.isDead(Sym);
+
+ // Collect leaked symbols.
+ if (isLeaked(Sym, I->second, IsSymDead, State))
+ LeakedStreams.push_back(Sym);
+
+ // Remove the dead symbol from the streams map.
+ if (IsSymDead)
+ State = State->remove<StreamMap>(Sym);
+ }
+
+ ExplodedNode *N = C.generateNonFatalErrorNode(State);
+ if (!N)
+ return;
+ reportLeaks(LeakedStreams, C, N);
+}
+
+void SimpleStreamChecker::reportDoubleClose(SymbolRef FileDescSym,
+ const CallEvent &Call,
+ CheckerContext &C) const {
+ // We reached a bug, stop exploring the path here by generating a sink.
+ ExplodedNode *ErrNode = C.generateErrorNode();
+ // If we've already reached this node on another path, return.
+ if (!ErrNode)
+ return;
+
+ // Generate the report.
+ auto R = llvm::make_unique<BugReport>(*DoubleCloseBugType,
+ "Closing a previously closed file stream", ErrNode);
+ R->addRange(Call.getSourceRange());
+ R->markInteresting(FileDescSym);
+ C.emitReport(std::move(R));
+}
+
+void SimpleStreamChecker::reportLeaks(ArrayRef<SymbolRef> LeakedStreams,
+ CheckerContext &C,
+ ExplodedNode *ErrNode) const {
+ // Attach bug reports to the leak node.
+ // TODO: Identify the leaked file descriptor.
+ for (SymbolRef LeakedStream : LeakedStreams) {
+ auto R = llvm::make_unique<BugReport>(*LeakBugType,
+ "Opened file is never closed; potential resource leak", ErrNode);
+ R->markInteresting(LeakedStream);
+ C.emitReport(std::move(R));
+ }
+}
+
+bool SimpleStreamChecker::guaranteedNotToCloseFile(const CallEvent &Call) const{
+ // If it's not in a system header, assume it might close a file.
+ if (!Call.isInSystemHeader())
+ return false;
+
+ // Handle cases where we know a buffer's /address/ can escape.
+ if (Call.argumentsMayEscape())
+ return false;
+
+ // Note, even though fclose closes the file, we do not list it here
+ // since the checker is modeling the call.
+
+ return true;
+}
+
+// If the pointer we are tracking escaped, do not track the symbol as
+// we cannot reason about it anymore.
+ProgramStateRef
+SimpleStreamChecker::checkPointerEscape(ProgramStateRef State,
+ const InvalidatedSymbols &Escaped,
+ const CallEvent *Call,
+ PointerEscapeKind Kind) const {
+ // If we know that the call cannot close a file, there is nothing to do.
+ if (Kind == PSK_DirectEscapeOnCall && guaranteedNotToCloseFile(*Call)) {
+ return State;
+ }
+
+ for (InvalidatedSymbols::const_iterator I = Escaped.begin(),
+ E = Escaped.end();
+ I != E; ++I) {
+ SymbolRef Sym = *I;
+
+ // The symbol escaped. Optimistically, assume that the corresponding file
+ // handle will be closed somewhere else.
+ State = State->remove<StreamMap>(Sym);
+ }
+ return State;
+}
+
+void SimpleStreamChecker::initIdentifierInfo(ASTContext &Ctx) const {
+ if (IIfopen)
+ return;
+ IIfopen = &Ctx.Idents.get("fopen");
+ IIfclose = &Ctx.Idents.get("fclose");
+}
+
+void ento::registerSimpleStreamChecker(CheckerManager &mgr) {
+ mgr.registerChecker<SimpleStreamChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
new file mode 100644
index 0000000..79fc701
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
@@ -0,0 +1,253 @@
+//=== StackAddrEscapeChecker.cpp ----------------------------------*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines stack address leak checker, which checks if an invalid
+// stack address is stored into a global or heap location. See CERT DCL30-C.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+using namespace ento;
+
+namespace {
+class StackAddrEscapeChecker : public Checker< check::PreStmt<ReturnStmt>,
+ check::EndFunction > {
+ mutable std::unique_ptr<BuiltinBug> BT_stackleak;
+ mutable std::unique_ptr<BuiltinBug> BT_returnstack;
+
+public:
+ void checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const;
+ void checkEndFunction(CheckerContext &Ctx) const;
+private:
+ void EmitStackError(CheckerContext &C, const MemRegion *R,
+ const Expr *RetE) const;
+ static SourceRange genName(raw_ostream &os, const MemRegion *R,
+ ASTContext &Ctx);
+};
+}
+
+SourceRange StackAddrEscapeChecker::genName(raw_ostream &os, const MemRegion *R,
+ ASTContext &Ctx) {
+ // Get the base region, stripping away fields and elements.
+ R = R->getBaseRegion();
+ SourceManager &SM = Ctx.getSourceManager();
+ SourceRange range;
+ os << "Address of ";
+
+ // Check if the region is a compound literal.
+ if (const CompoundLiteralRegion* CR = dyn_cast<CompoundLiteralRegion>(R)) {
+ const CompoundLiteralExpr *CL = CR->getLiteralExpr();
+ os << "stack memory associated with a compound literal "
+ "declared on line "
+ << SM.getExpansionLineNumber(CL->getLocStart())
+ << " returned to caller";
+ range = CL->getSourceRange();
+ }
+ else if (const AllocaRegion* AR = dyn_cast<AllocaRegion>(R)) {
+ const Expr *ARE = AR->getExpr();
+ SourceLocation L = ARE->getLocStart();
+ range = ARE->getSourceRange();
+ os << "stack memory allocated by call to alloca() on line "
+ << SM.getExpansionLineNumber(L);
+ }
+ else if (const BlockDataRegion *BR = dyn_cast<BlockDataRegion>(R)) {
+ const BlockDecl *BD = BR->getCodeRegion()->getDecl();
+ SourceLocation L = BD->getLocStart();
+ range = BD->getSourceRange();
+ os << "stack-allocated block declared on line "
+ << SM.getExpansionLineNumber(L);
+ }
+ else if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+ os << "stack memory associated with local variable '"
+ << VR->getString() << '\'';
+ range = VR->getDecl()->getSourceRange();
+ }
+ else if (const CXXTempObjectRegion *TOR = dyn_cast<CXXTempObjectRegion>(R)) {
+ QualType Ty = TOR->getValueType().getLocalUnqualifiedType();
+ os << "stack memory associated with temporary object of type '";
+ Ty.print(os, Ctx.getPrintingPolicy());
+ os << "'";
+ range = TOR->getExpr()->getSourceRange();
+ }
+ else {
+ llvm_unreachable("Invalid region in ReturnStackAddressChecker.");
+ }
+
+ return range;
+}
+
+void StackAddrEscapeChecker::EmitStackError(CheckerContext &C, const MemRegion *R,
+ const Expr *RetE) const {
+ ExplodedNode *N = C.generateErrorNode();
+
+ if (!N)
+ return;
+
+ if (!BT_returnstack)
+ BT_returnstack.reset(
+ new BuiltinBug(this, "Return of address to stack-allocated memory"));
+
+ // Generate a report for this bug.
+ SmallString<512> buf;
+ llvm::raw_svector_ostream os(buf);
+ SourceRange range = genName(os, R, C.getASTContext());
+ os << " returned to caller";
+ auto report = llvm::make_unique<BugReport>(*BT_returnstack, os.str(), N);
+ report->addRange(RetE->getSourceRange());
+ if (range.isValid())
+ report->addRange(range);
+
+ C.emitReport(std::move(report));
+}
+
+void StackAddrEscapeChecker::checkPreStmt(const ReturnStmt *RS,
+ CheckerContext &C) const {
+
+ const Expr *RetE = RS->getRetValue();
+ if (!RetE)
+ return;
+ RetE = RetE->IgnoreParens();
+
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal V = C.getState()->getSVal(RetE, LCtx);
+ const MemRegion *R = V.getAsRegion();
+
+ if (!R)
+ return;
+
+ const StackSpaceRegion *SS =
+ dyn_cast_or_null<StackSpaceRegion>(R->getMemorySpace());
+
+ if (!SS)
+ return;
+
+ // Return stack memory in an ancestor stack frame is fine.
+ const StackFrameContext *CurFrame = LCtx->getCurrentStackFrame();
+ const StackFrameContext *MemFrame = SS->getStackFrame();
+ if (MemFrame != CurFrame)
+ return;
+
+ // Automatic reference counting automatically copies blocks.
+ if (C.getASTContext().getLangOpts().ObjCAutoRefCount &&
+ isa<BlockDataRegion>(R))
+ return;
+
+ // Returning a record by value is fine. (In this case, the returned
+ // expression will be a copy-constructor, possibly wrapped in an
+ // ExprWithCleanups node.)
+ if (const ExprWithCleanups *Cleanup = dyn_cast<ExprWithCleanups>(RetE))
+ RetE = Cleanup->getSubExpr();
+ if (isa<CXXConstructExpr>(RetE) && RetE->getType()->isRecordType())
+ return;
+
+ // The CK_CopyAndAutoreleaseBlockObject cast causes the block to be copied
+ // so the stack address is not escaping here.
+ if (auto *ICE = dyn_cast<ImplicitCastExpr>(RetE)) {
+ if (isa<BlockDataRegion>(R) &&
+ ICE->getCastKind() == CK_CopyAndAutoreleaseBlockObject) {
+ return;
+ }
+ }
+
+ EmitStackError(C, R, RetE);
+}
+
+void StackAddrEscapeChecker::checkEndFunction(CheckerContext &Ctx) const {
+ ProgramStateRef state = Ctx.getState();
+
+ // Iterate over all bindings to global variables and see if it contains
+ // a memory region in the stack space.
+ class CallBack : public StoreManager::BindingsHandler {
+ private:
+ CheckerContext &Ctx;
+ const StackFrameContext *CurSFC;
+ public:
+ SmallVector<std::pair<const MemRegion*, const MemRegion*>, 10> V;
+
+ CallBack(CheckerContext &CC) :
+ Ctx(CC),
+ CurSFC(CC.getLocationContext()->getCurrentStackFrame())
+ {}
+
+ bool HandleBinding(StoreManager &SMgr, Store store,
+ const MemRegion *region, SVal val) override {
+
+ if (!isa<GlobalsSpaceRegion>(region->getMemorySpace()))
+ return true;
+
+ const MemRegion *vR = val.getAsRegion();
+ if (!vR)
+ return true;
+
+ // Under automated retain release, it is okay to assign a block
+ // directly to a global variable.
+ if (Ctx.getASTContext().getLangOpts().ObjCAutoRefCount &&
+ isa<BlockDataRegion>(vR))
+ return true;
+
+ if (const StackSpaceRegion *SSR =
+ dyn_cast<StackSpaceRegion>(vR->getMemorySpace())) {
+ // If the global variable holds a location in the current stack frame,
+ // record the binding to emit a warning.
+ if (SSR->getStackFrame() == CurSFC)
+ V.push_back(std::make_pair(region, vR));
+ }
+
+ return true;
+ }
+ };
+
+ CallBack cb(Ctx);
+ state->getStateManager().getStoreManager().iterBindings(state->getStore(),cb);
+
+ if (cb.V.empty())
+ return;
+
+ // Generate an error node.
+ ExplodedNode *N = Ctx.generateNonFatalErrorNode(state);
+ if (!N)
+ return;
+
+ if (!BT_stackleak)
+ BT_stackleak.reset(
+ new BuiltinBug(this, "Stack address stored into global variable",
+ "Stack address was saved into a global variable. "
+ "This is dangerous because the address will become "
+ "invalid after returning from the function"));
+
+ for (unsigned i = 0, e = cb.V.size(); i != e; ++i) {
+ // Generate a report for this bug.
+ SmallString<512> buf;
+ llvm::raw_svector_ostream os(buf);
+ SourceRange range = genName(os, cb.V[i].second, Ctx.getASTContext());
+ os << " is still referred to by the global variable '";
+ const VarRegion *VR = cast<VarRegion>(cb.V[i].first->getBaseRegion());
+ os << *VR->getDecl()
+ << "' upon returning to the caller. This will be a dangling reference";
+ auto report = llvm::make_unique<BugReport>(*BT_stackleak, os.str(), N);
+ if (range.isValid())
+ report->addRange(range);
+
+ Ctx.emitReport(std::move(report));
+ }
+}
+
+void ento::registerStackAddrEscapeChecker(CheckerManager &mgr) {
+ mgr.registerChecker<StackAddrEscapeChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
new file mode 100644
index 0000000..82b01fe
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
@@ -0,0 +1,424 @@
+//===-- StreamChecker.cpp -----------------------------------------*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines checkers that model and check stream handling functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "llvm/ADT/ImmutableMap.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+struct StreamState {
+ enum Kind { Opened, Closed, OpenFailed, Escaped } K;
+ const Stmt *S;
+
+ StreamState(Kind k, const Stmt *s) : K(k), S(s) {}
+
+ bool isOpened() const { return K == Opened; }
+ bool isClosed() const { return K == Closed; }
+ //bool isOpenFailed() const { return K == OpenFailed; }
+ //bool isEscaped() const { return K == Escaped; }
+
+ bool operator==(const StreamState &X) const {
+ return K == X.K && S == X.S;
+ }
+
+ static StreamState getOpened(const Stmt *s) { return StreamState(Opened, s); }
+ static StreamState getClosed(const Stmt *s) { return StreamState(Closed, s); }
+ static StreamState getOpenFailed(const Stmt *s) {
+ return StreamState(OpenFailed, s);
+ }
+ static StreamState getEscaped(const Stmt *s) {
+ return StreamState(Escaped, s);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(K);
+ ID.AddPointer(S);
+ }
+};
+
+class StreamChecker : public Checker<eval::Call,
+ check::DeadSymbols > {
+ mutable IdentifierInfo *II_fopen, *II_tmpfile, *II_fclose, *II_fread,
+ *II_fwrite,
+ *II_fseek, *II_ftell, *II_rewind, *II_fgetpos, *II_fsetpos,
+ *II_clearerr, *II_feof, *II_ferror, *II_fileno;
+ mutable std::unique_ptr<BuiltinBug> BT_nullfp, BT_illegalwhence,
+ BT_doubleclose, BT_ResourceLeak;
+
+public:
+ StreamChecker()
+ : II_fopen(nullptr), II_tmpfile(nullptr), II_fclose(nullptr),
+ II_fread(nullptr), II_fwrite(nullptr), II_fseek(nullptr),
+ II_ftell(nullptr), II_rewind(nullptr), II_fgetpos(nullptr),
+ II_fsetpos(nullptr), II_clearerr(nullptr), II_feof(nullptr),
+ II_ferror(nullptr), II_fileno(nullptr) {}
+
+ bool evalCall(const CallExpr *CE, CheckerContext &C) const;
+ void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
+
+private:
+ void Fopen(CheckerContext &C, const CallExpr *CE) const;
+ void Tmpfile(CheckerContext &C, const CallExpr *CE) const;
+ void Fclose(CheckerContext &C, const CallExpr *CE) const;
+ void Fread(CheckerContext &C, const CallExpr *CE) const;
+ void Fwrite(CheckerContext &C, const CallExpr *CE) const;
+ void Fseek(CheckerContext &C, const CallExpr *CE) const;
+ void Ftell(CheckerContext &C, const CallExpr *CE) const;
+ void Rewind(CheckerContext &C, const CallExpr *CE) const;
+ void Fgetpos(CheckerContext &C, const CallExpr *CE) const;
+ void Fsetpos(CheckerContext &C, const CallExpr *CE) const;
+ void Clearerr(CheckerContext &C, const CallExpr *CE) const;
+ void Feof(CheckerContext &C, const CallExpr *CE) const;
+ void Ferror(CheckerContext &C, const CallExpr *CE) const;
+ void Fileno(CheckerContext &C, const CallExpr *CE) const;
+
+ void OpenFileAux(CheckerContext &C, const CallExpr *CE) const;
+
+ ProgramStateRef CheckNullStream(SVal SV, ProgramStateRef state,
+ CheckerContext &C) const;
+ ProgramStateRef CheckDoubleClose(const CallExpr *CE, ProgramStateRef state,
+ CheckerContext &C) const;
+};
+
+} // end anonymous namespace
+
+REGISTER_MAP_WITH_PROGRAMSTATE(StreamMap, SymbolRef, StreamState)
+
+
+bool StreamChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ if (!FD || FD->getKind() != Decl::Function)
+ return false;
+
+ ASTContext &Ctx = C.getASTContext();
+ if (!II_fopen)
+ II_fopen = &Ctx.Idents.get("fopen");
+ if (!II_tmpfile)
+ II_tmpfile = &Ctx.Idents.get("tmpfile");
+ if (!II_fclose)
+ II_fclose = &Ctx.Idents.get("fclose");
+ if (!II_fread)
+ II_fread = &Ctx.Idents.get("fread");
+ if (!II_fwrite)
+ II_fwrite = &Ctx.Idents.get("fwrite");
+ if (!II_fseek)
+ II_fseek = &Ctx.Idents.get("fseek");
+ if (!II_ftell)
+ II_ftell = &Ctx.Idents.get("ftell");
+ if (!II_rewind)
+ II_rewind = &Ctx.Idents.get("rewind");
+ if (!II_fgetpos)
+ II_fgetpos = &Ctx.Idents.get("fgetpos");
+ if (!II_fsetpos)
+ II_fsetpos = &Ctx.Idents.get("fsetpos");
+ if (!II_clearerr)
+ II_clearerr = &Ctx.Idents.get("clearerr");
+ if (!II_feof)
+ II_feof = &Ctx.Idents.get("feof");
+ if (!II_ferror)
+ II_ferror = &Ctx.Idents.get("ferror");
+ if (!II_fileno)
+ II_fileno = &Ctx.Idents.get("fileno");
+
+ if (FD->getIdentifier() == II_fopen) {
+ Fopen(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_tmpfile) {
+ Tmpfile(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_fclose) {
+ Fclose(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_fread) {
+ Fread(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_fwrite) {
+ Fwrite(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_fseek) {
+ Fseek(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_ftell) {
+ Ftell(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_rewind) {
+ Rewind(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_fgetpos) {
+ Fgetpos(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_fsetpos) {
+ Fsetpos(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_clearerr) {
+ Clearerr(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_feof) {
+ Feof(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_ferror) {
+ Ferror(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_fileno) {
+ Fileno(C, CE);
+ return true;
+ }
+
+ return false;
+}
+
+void StreamChecker::Fopen(CheckerContext &C, const CallExpr *CE) const {
+ OpenFileAux(C, CE);
+}
+
+void StreamChecker::Tmpfile(CheckerContext &C, const CallExpr *CE) const {
+ OpenFileAux(C, CE);
+}
+
+void StreamChecker::OpenFileAux(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ const LocationContext *LCtx = C.getPredecessor()->getLocationContext();
+ DefinedSVal RetVal = svalBuilder.conjureSymbolVal(nullptr, CE, LCtx,
+ C.blockCount())
+ .castAs<DefinedSVal>();
+ state = state->BindExpr(CE, C.getLocationContext(), RetVal);
+
+ ConstraintManager &CM = C.getConstraintManager();
+ // Bifurcate the state into two: one with a valid FILE* pointer, the other
+ // with a NULL.
+ ProgramStateRef stateNotNull, stateNull;
+ std::tie(stateNotNull, stateNull) = CM.assumeDual(state, RetVal);
+
+ if (SymbolRef Sym = RetVal.getAsSymbol()) {
+ // if RetVal is not NULL, set the symbol's state to Opened.
+ stateNotNull =
+ stateNotNull->set<StreamMap>(Sym,StreamState::getOpened(CE));
+ stateNull =
+ stateNull->set<StreamMap>(Sym, StreamState::getOpenFailed(CE));
+
+ C.addTransition(stateNotNull);
+ C.addTransition(stateNull);
+ }
+}
+
+void StreamChecker::Fclose(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = CheckDoubleClose(CE, C.getState(), C);
+ if (state)
+ C.addTransition(state);
+}
+
+void StreamChecker::Fread(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(3), C.getLocationContext()),
+ state, C))
+ return;
+}
+
+void StreamChecker::Fwrite(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(3), C.getLocationContext()),
+ state, C))
+ return;
+}
+
+void StreamChecker::Fseek(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+ if (!(state = CheckNullStream(state->getSVal(CE->getArg(0),
+ C.getLocationContext()), state, C)))
+ return;
+ // Check the legality of the 'whence' argument of 'fseek'.
+ SVal Whence = state->getSVal(CE->getArg(2), C.getLocationContext());
+ Optional<nonloc::ConcreteInt> CI = Whence.getAs<nonloc::ConcreteInt>();
+
+ if (!CI)
+ return;
+
+ int64_t x = CI->getValue().getSExtValue();
+ if (x >= 0 && x <= 2)
+ return;
+
+ if (ExplodedNode *N = C.generateNonFatalErrorNode(state)) {
+ if (!BT_illegalwhence)
+ BT_illegalwhence.reset(
+ new BuiltinBug(this, "Illegal whence argument",
+ "The whence argument to fseek() should be "
+ "SEEK_SET, SEEK_END, or SEEK_CUR."));
+ C.emitReport(llvm::make_unique<BugReport>(
+ *BT_illegalwhence, BT_illegalwhence->getDescription(), N));
+ }
+}
+
+void StreamChecker::Ftell(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+ state, C))
+ return;
+}
+
+void StreamChecker::Rewind(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+ state, C))
+ return;
+}
+
+void StreamChecker::Fgetpos(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+ state, C))
+ return;
+}
+
+void StreamChecker::Fsetpos(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+ state, C))
+ return;
+}
+
+void StreamChecker::Clearerr(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+ state, C))
+ return;
+}
+
+void StreamChecker::Feof(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+ state, C))
+ return;
+}
+
+void StreamChecker::Ferror(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+ state, C))
+ return;
+}
+
+void StreamChecker::Fileno(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+ state, C))
+ return;
+}
+
+ProgramStateRef StreamChecker::CheckNullStream(SVal SV, ProgramStateRef state,
+ CheckerContext &C) const {
+ Optional<DefinedSVal> DV = SV.getAs<DefinedSVal>();
+ if (!DV)
+ return nullptr;
+
+ ConstraintManager &CM = C.getConstraintManager();
+ ProgramStateRef stateNotNull, stateNull;
+ std::tie(stateNotNull, stateNull) = CM.assumeDual(state, *DV);
+
+ if (!stateNotNull && stateNull) {
+ if (ExplodedNode *N = C.generateErrorNode(stateNull)) {
+ if (!BT_nullfp)
+ BT_nullfp.reset(new BuiltinBug(this, "NULL stream pointer",
+ "Stream pointer might be NULL."));
+ C.emitReport(llvm::make_unique<BugReport>(
+ *BT_nullfp, BT_nullfp->getDescription(), N));
+ }
+ return nullptr;
+ }
+ return stateNotNull;
+}
+
+ProgramStateRef StreamChecker::CheckDoubleClose(const CallExpr *CE,
+ ProgramStateRef state,
+ CheckerContext &C) const {
+ SymbolRef Sym =
+ state->getSVal(CE->getArg(0), C.getLocationContext()).getAsSymbol();
+ if (!Sym)
+ return state;
+
+ const StreamState *SS = state->get<StreamMap>(Sym);
+
+ // If the file stream is not tracked, return.
+ if (!SS)
+ return state;
+
+ // Check: Double close a File Descriptor could cause undefined behaviour.
+ // Conforming to man-pages
+ if (SS->isClosed()) {
+ ExplodedNode *N = C.generateErrorNode();
+ if (N) {
+ if (!BT_doubleclose)
+ BT_doubleclose.reset(new BuiltinBug(
+ this, "Double fclose", "Try to close a file Descriptor already"
+ " closed. Cause undefined behaviour."));
+ C.emitReport(llvm::make_unique<BugReport>(
+ *BT_doubleclose, BT_doubleclose->getDescription(), N));
+ }
+ return nullptr;
+ }
+
+ // Close the File Descriptor.
+ return state->set<StreamMap>(Sym, StreamState::getClosed(CE));
+}
+
+void StreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
+ CheckerContext &C) const {
+ // TODO: Clean up the state.
+ for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
+ E = SymReaper.dead_end(); I != E; ++I) {
+ SymbolRef Sym = *I;
+ ProgramStateRef state = C.getState();
+ const StreamState *SS = state->get<StreamMap>(Sym);
+ if (!SS)
+ continue;
+
+ if (SS->isOpened()) {
+ ExplodedNode *N = C.generateErrorNode();
+ if (N) {
+ if (!BT_ResourceLeak)
+ BT_ResourceLeak.reset(new BuiltinBug(
+ this, "Resource Leak",
+ "Opened File never closed. Potential Resource leak."));
+ C.emitReport(llvm::make_unique<BugReport>(
+ *BT_ResourceLeak, BT_ResourceLeak->getDescription(), N));
+ }
+ }
+ }
+}
+
+void ento::registerStreamChecker(CheckerManager &mgr) {
+ mgr.registerChecker<StreamChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
new file mode 100644
index 0000000..2e05290
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
@@ -0,0 +1,62 @@
+//== TaintTesterChecker.cpp ----------------------------------- -*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker can be used for testing how taint data is propagated.
+//
+//===----------------------------------------------------------------------===//
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class TaintTesterChecker : public Checker< check::PostStmt<Expr> > {
+
+ mutable std::unique_ptr<BugType> BT;
+ void initBugType() const;
+
+ /// Given a pointer argument, get the symbol of the value it contains
+ /// (points to).
+ SymbolRef getPointedToSymbol(CheckerContext &C,
+ const Expr* Arg,
+ bool IssueWarning = true) const;
+
+public:
+ void checkPostStmt(const Expr *E, CheckerContext &C) const;
+};
+}
+
+inline void TaintTesterChecker::initBugType() const {
+ if (!BT)
+ BT.reset(new BugType(this, "Tainted data", "General"));
+}
+
+void TaintTesterChecker::checkPostStmt(const Expr *E,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ if (!State)
+ return;
+
+ if (State->isTainted(E, C.getLocationContext())) {
+ if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
+ initBugType();
+ auto report = llvm::make_unique<BugReport>(*BT, "tainted",N);
+ report->addRange(E->getSourceRange());
+ C.emitReport(std::move(report));
+ }
+ }
+}
+
+void ento::registerTaintTesterChecker(CheckerManager &mgr) {
+ mgr.registerChecker<TaintTesterChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
new file mode 100644
index 0000000..b794d2f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
@@ -0,0 +1,265 @@
+//== TestAfterDivZeroChecker.cpp - Test after division by zero checker --*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines TestAfterDivZeroChecker, a builtin check that performs checks
+// for division by zero where the division occurs before comparison with zero.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/ADT/FoldingSet.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class ZeroState {
+private:
+ SymbolRef ZeroSymbol;
+ unsigned BlockID;
+ const StackFrameContext *SFC;
+
+public:
+ ZeroState(SymbolRef S, unsigned B, const StackFrameContext *SFC)
+ : ZeroSymbol(S), BlockID(B), SFC(SFC) {}
+
+ const StackFrameContext *getStackFrameContext() const { return SFC; }
+
+ bool operator==(const ZeroState &X) const {
+ return BlockID == X.BlockID && SFC == X.SFC && ZeroSymbol == X.ZeroSymbol;
+ }
+
+ bool operator<(const ZeroState &X) const {
+ if (BlockID != X.BlockID)
+ return BlockID < X.BlockID;
+ if (SFC != X.SFC)
+ return SFC < X.SFC;
+ return ZeroSymbol < X.ZeroSymbol;
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(BlockID);
+ ID.AddPointer(SFC);
+ ID.AddPointer(ZeroSymbol);
+ }
+};
+
+class DivisionBRVisitor : public BugReporterVisitorImpl<DivisionBRVisitor> {
+private:
+ SymbolRef ZeroSymbol;
+ const StackFrameContext *SFC;
+ bool Satisfied;
+
+public:
+ DivisionBRVisitor(SymbolRef ZeroSymbol, const StackFrameContext *SFC)
+ : ZeroSymbol(ZeroSymbol), SFC(SFC), Satisfied(false) {}
+
+ void Profile(llvm::FoldingSetNodeID &ID) const override {
+ ID.Add(ZeroSymbol);
+ ID.Add(SFC);
+ }
+
+ PathDiagnosticPiece *VisitNode(const ExplodedNode *Succ,
+ const ExplodedNode *Pred,
+ BugReporterContext &BRC,
+ BugReport &BR) override;
+};
+
+class TestAfterDivZeroChecker
+ : public Checker<check::PreStmt<BinaryOperator>, check::BranchCondition,
+ check::EndFunction> {
+ mutable std::unique_ptr<BuiltinBug> DivZeroBug;
+ void reportBug(SVal Val, CheckerContext &C) const;
+
+public:
+ void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
+ void checkBranchCondition(const Stmt *Condition, CheckerContext &C) const;
+ void checkEndFunction(CheckerContext &C) const;
+ void setDivZeroMap(SVal Var, CheckerContext &C) const;
+ bool hasDivZeroMap(SVal Var, const CheckerContext &C) const;
+ bool isZero(SVal S, CheckerContext &C) const;
+};
+} // end anonymous namespace
+
+REGISTER_SET_WITH_PROGRAMSTATE(DivZeroMap, ZeroState)
+
+PathDiagnosticPiece *DivisionBRVisitor::VisitNode(const ExplodedNode *Succ,
+ const ExplodedNode *Pred,
+ BugReporterContext &BRC,
+ BugReport &BR) {
+ if (Satisfied)
+ return nullptr;
+
+ const Expr *E = nullptr;
+
+ if (Optional<PostStmt> P = Succ->getLocationAs<PostStmt>())
+ if (const BinaryOperator *BO = P->getStmtAs<BinaryOperator>()) {
+ BinaryOperator::Opcode Op = BO->getOpcode();
+ if (Op == BO_Div || Op == BO_Rem || Op == BO_DivAssign ||
+ Op == BO_RemAssign) {
+ E = BO->getRHS();
+ }
+ }
+
+ if (!E)
+ return nullptr;
+
+ ProgramStateRef State = Succ->getState();
+ SVal S = State->getSVal(E, Succ->getLocationContext());
+ if (ZeroSymbol == S.getAsSymbol() && SFC == Succ->getStackFrame()) {
+ Satisfied = true;
+
+ // Construct a new PathDiagnosticPiece.
+ ProgramPoint P = Succ->getLocation();
+ PathDiagnosticLocation L =
+ PathDiagnosticLocation::create(P, BRC.getSourceManager());
+
+ if (!L.isValid() || !L.asLocation().isValid())
+ return nullptr;
+
+ return new PathDiagnosticEventPiece(
+ L, "Division with compared value made here");
+ }
+
+ return nullptr;
+}
+
+bool TestAfterDivZeroChecker::isZero(SVal S, CheckerContext &C) const {
+ Optional<DefinedSVal> DSV = S.getAs<DefinedSVal>();
+
+ if (!DSV)
+ return false;
+
+ ConstraintManager &CM = C.getConstraintManager();
+ return !CM.assume(C.getState(), *DSV, true);
+}
+
+void TestAfterDivZeroChecker::setDivZeroMap(SVal Var, CheckerContext &C) const {
+ SymbolRef SR = Var.getAsSymbol();
+ if (!SR)
+ return;
+
+ ProgramStateRef State = C.getState();
+ State =
+ State->add<DivZeroMap>(ZeroState(SR, C.getBlockID(), C.getStackFrame()));
+ C.addTransition(State);
+}
+
+bool TestAfterDivZeroChecker::hasDivZeroMap(SVal Var,
+ const CheckerContext &C) const {
+ SymbolRef SR = Var.getAsSymbol();
+ if (!SR)
+ return false;
+
+ ZeroState ZS(SR, C.getBlockID(), C.getStackFrame());
+ return C.getState()->contains<DivZeroMap>(ZS);
+}
+
+void TestAfterDivZeroChecker::reportBug(SVal Val, CheckerContext &C) const {
+ if (ExplodedNode *N = C.generateErrorNode(C.getState())) {
+ if (!DivZeroBug)
+ DivZeroBug.reset(new BuiltinBug(this, "Division by zero"));
+
+ auto R = llvm::make_unique<BugReport>(
+ *DivZeroBug, "Value being compared against zero has already been used "
+ "for division",
+ N);
+
+ R->addVisitor(llvm::make_unique<DivisionBRVisitor>(Val.getAsSymbol(),
+ C.getStackFrame()));
+ C.emitReport(std::move(R));
+ }
+}
+
+void TestAfterDivZeroChecker::checkEndFunction(CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+
+ DivZeroMapTy DivZeroes = State->get<DivZeroMap>();
+ if (DivZeroes.isEmpty())
+ return;
+
+ DivZeroMapTy::Factory &F = State->get_context<DivZeroMap>();
+ for (llvm::ImmutableSet<ZeroState>::iterator I = DivZeroes.begin(),
+ E = DivZeroes.end();
+ I != E; ++I) {
+ ZeroState ZS = *I;
+ if (ZS.getStackFrameContext() == C.getStackFrame())
+ DivZeroes = F.remove(DivZeroes, ZS);
+ }
+ C.addTransition(State->set<DivZeroMap>(DivZeroes));
+}
+
+void TestAfterDivZeroChecker::checkPreStmt(const BinaryOperator *B,
+ CheckerContext &C) const {
+ BinaryOperator::Opcode Op = B->getOpcode();
+ if (Op == BO_Div || Op == BO_Rem || Op == BO_DivAssign ||
+ Op == BO_RemAssign) {
+ SVal S = C.getSVal(B->getRHS());
+
+ if (!isZero(S, C))
+ setDivZeroMap(S, C);
+ }
+}
+
+void TestAfterDivZeroChecker::checkBranchCondition(const Stmt *Condition,
+ CheckerContext &C) const {
+ if (const BinaryOperator *B = dyn_cast<BinaryOperator>(Condition)) {
+ if (B->isComparisonOp()) {
+ const IntegerLiteral *IntLiteral = dyn_cast<IntegerLiteral>(B->getRHS());
+ bool LRHS = true;
+ if (!IntLiteral) {
+ IntLiteral = dyn_cast<IntegerLiteral>(B->getLHS());
+ LRHS = false;
+ }
+
+ if (!IntLiteral || IntLiteral->getValue() != 0)
+ return;
+
+ SVal Val = C.getSVal(LRHS ? B->getLHS() : B->getRHS());
+ if (hasDivZeroMap(Val, C))
+ reportBug(Val, C);
+ }
+ } else if (const UnaryOperator *U = dyn_cast<UnaryOperator>(Condition)) {
+ if (U->getOpcode() == UO_LNot) {
+ SVal Val;
+ if (const ImplicitCastExpr *I =
+ dyn_cast<ImplicitCastExpr>(U->getSubExpr()))
+ Val = C.getSVal(I->getSubExpr());
+
+ if (hasDivZeroMap(Val, C))
+ reportBug(Val, C);
+ else {
+ Val = C.getSVal(U->getSubExpr());
+ if (hasDivZeroMap(Val, C))
+ reportBug(Val, C);
+ }
+ }
+ } else if (const ImplicitCastExpr *IE =
+ dyn_cast<ImplicitCastExpr>(Condition)) {
+ SVal Val = C.getSVal(IE->getSubExpr());
+
+ if (hasDivZeroMap(Val, C))
+ reportBug(Val, C);
+ else {
+ SVal Val = C.getSVal(Condition);
+
+ if (hasDivZeroMap(Val, C))
+ reportBug(Val, C);
+ }
+ }
+}
+
+void ento::registerTestAfterDivZeroChecker(CheckerManager &mgr) {
+ mgr.registerChecker<TestAfterDivZeroChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp
new file mode 100644
index 0000000..d02d2df
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp
@@ -0,0 +1,107 @@
+//== TraversalChecker.cpp -------------------------------------- -*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These checkers print various aspects of the ExprEngine's traversal of the CFG
+// as it builds the ExplodedGraph.
+//
+//===----------------------------------------------------------------------===//
+#include "ClangSACheckers.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class TraversalDumper : public Checker< check::BranchCondition,
+ check::EndFunction > {
+public:
+ void checkBranchCondition(const Stmt *Condition, CheckerContext &C) const;
+ void checkEndFunction(CheckerContext &C) const;
+};
+}
+
+void TraversalDumper::checkBranchCondition(const Stmt *Condition,
+ CheckerContext &C) const {
+ // Special-case Objective-C's for-in loop, which uses the entire loop as its
+ // condition. We just print the collection expression.
+ const Stmt *Parent = dyn_cast<ObjCForCollectionStmt>(Condition);
+ if (!Parent) {
+ const ParentMap &Parents = C.getLocationContext()->getParentMap();
+ Parent = Parents.getParent(Condition);
+ }
+
+ // It is mildly evil to print directly to llvm::outs() rather than emitting
+ // warnings, but this ensures things do not get filtered out by the rest of
+ // the static analyzer machinery.
+ SourceLocation Loc = Parent->getLocStart();
+ llvm::outs() << C.getSourceManager().getSpellingLineNumber(Loc) << " "
+ << Parent->getStmtClassName() << "\n";
+}
+
+void TraversalDumper::checkEndFunction(CheckerContext &C) const {
+ llvm::outs() << "--END FUNCTION--\n";
+}
+
+void ento::registerTraversalDumper(CheckerManager &mgr) {
+ mgr.registerChecker<TraversalDumper>();
+}
+
+//------------------------------------------------------------------------------
+
+namespace {
+class CallDumper : public Checker< check::PreCall,
+ check::PostCall > {
+public:
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+};
+}
+
+void CallDumper::checkPreCall(const CallEvent &Call, CheckerContext &C) const {
+ unsigned Indentation = 0;
+ for (const LocationContext *LC = C.getLocationContext()->getParent();
+ LC != nullptr; LC = LC->getParent())
+ ++Indentation;
+
+ // It is mildly evil to print directly to llvm::outs() rather than emitting
+ // warnings, but this ensures things do not get filtered out by the rest of
+ // the static analyzer machinery.
+ llvm::outs().indent(Indentation);
+ Call.dump(llvm::outs());
+}
+
+void CallDumper::checkPostCall(const CallEvent &Call, CheckerContext &C) const {
+ const Expr *CallE = Call.getOriginExpr();
+ if (!CallE)
+ return;
+
+ unsigned Indentation = 0;
+ for (const LocationContext *LC = C.getLocationContext()->getParent();
+ LC != nullptr; LC = LC->getParent())
+ ++Indentation;
+
+ // It is mildly evil to print directly to llvm::outs() rather than emitting
+ // warnings, but this ensures things do not get filtered out by the rest of
+ // the static analyzer machinery.
+ llvm::outs().indent(Indentation);
+ if (Call.getResultType()->isVoidType())
+ llvm::outs() << "Returning void\n";
+ else
+ llvm::outs() << "Returning " << C.getSVal(CallE) << "\n";
+}
+
+void ento::registerCallDumper(CheckerManager &mgr) {
+ mgr.registerChecker<CallDumper>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
new file mode 100644
index 0000000..ed17610
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
@@ -0,0 +1,110 @@
+//=== UndefBranchChecker.cpp -----------------------------------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines UndefBranchChecker, which checks for undefined branch
+// condition.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class UndefBranchChecker : public Checker<check::BranchCondition> {
+ mutable std::unique_ptr<BuiltinBug> BT;
+
+ struct FindUndefExpr {
+ ProgramStateRef St;
+ const LocationContext *LCtx;
+
+ FindUndefExpr(ProgramStateRef S, const LocationContext *L)
+ : St(S), LCtx(L) {}
+
+ const Expr *FindExpr(const Expr *Ex) {
+ if (!MatchesCriteria(Ex))
+ return nullptr;
+
+ for (const Stmt *SubStmt : Ex->children())
+ if (const Expr *ExI = dyn_cast_or_null<Expr>(SubStmt))
+ if (const Expr *E2 = FindExpr(ExI))
+ return E2;
+
+ return Ex;
+ }
+
+ bool MatchesCriteria(const Expr *Ex) {
+ return St->getSVal(Ex, LCtx).isUndef();
+ }
+ };
+
+public:
+ void checkBranchCondition(const Stmt *Condition, CheckerContext &Ctx) const;
+};
+
+}
+
+void UndefBranchChecker::checkBranchCondition(const Stmt *Condition,
+ CheckerContext &Ctx) const {
+ SVal X = Ctx.getState()->getSVal(Condition, Ctx.getLocationContext());
+ if (X.isUndef()) {
+ // Generate a sink node, which implicitly marks both outgoing branches as
+ // infeasible.
+ ExplodedNode *N = Ctx.generateErrorNode();
+ if (N) {
+ if (!BT)
+ BT.reset(new BuiltinBug(
+ this, "Branch condition evaluates to a garbage value"));
+
+ // What's going on here: we want to highlight the subexpression of the
+ // condition that is the most likely source of the "uninitialized
+ // branch condition." We do a recursive walk of the condition's
+ // subexpressions and roughly look for the most nested subexpression
+ // that binds to Undefined. We then highlight that expression's range.
+
+ // Get the predecessor node and check if is a PostStmt with the Stmt
+ // being the terminator condition. We want to inspect the state
+ // of that node instead because it will contain main information about
+ // the subexpressions.
+
+ // Note: any predecessor will do. They should have identical state,
+ // since all the BlockEdge did was act as an error sink since the value
+ // had to already be undefined.
+ assert (!N->pred_empty());
+ const Expr *Ex = cast<Expr>(Condition);
+ ExplodedNode *PrevN = *N->pred_begin();
+ ProgramPoint P = PrevN->getLocation();
+ ProgramStateRef St = N->getState();
+
+ if (Optional<PostStmt> PS = P.getAs<PostStmt>())
+ if (PS->getStmt() == Ex)
+ St = PrevN->getState();
+
+ FindUndefExpr FindIt(St, Ctx.getLocationContext());
+ Ex = FindIt.FindExpr(Ex);
+
+ // Emit the bug report.
+ auto R = llvm::make_unique<BugReport>(*BT, BT->getDescription(), N);
+ bugreporter::trackNullOrUndefValue(N, Ex, *R);
+ R->addRange(Ex->getSourceRange());
+
+ Ctx.emitReport(std::move(R));
+ }
+ }
+}
+
+void ento::registerUndefBranchChecker(CheckerManager &mgr) {
+ mgr.registerChecker<UndefBranchChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
new file mode 100644
index 0000000..17fe861
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
@@ -0,0 +1,104 @@
+// UndefCapturedBlockVarChecker.cpp - Uninitialized captured vars -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker detects blocks that capture uninitialized values.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/Attr.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class UndefCapturedBlockVarChecker
+ : public Checker< check::PostStmt<BlockExpr> > {
+ mutable std::unique_ptr<BugType> BT;
+
+public:
+ void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
+};
+} // end anonymous namespace
+
+static const DeclRefExpr *FindBlockDeclRefExpr(const Stmt *S,
+ const VarDecl *VD) {
+ if (const DeclRefExpr *BR = dyn_cast<DeclRefExpr>(S))
+ if (BR->getDecl() == VD)
+ return BR;
+
+ for (const Stmt *Child : S->children())
+ if (Child)
+ if (const DeclRefExpr *BR = FindBlockDeclRefExpr(Child, VD))
+ return BR;
+
+ return nullptr;
+}
+
+void
+UndefCapturedBlockVarChecker::checkPostStmt(const BlockExpr *BE,
+ CheckerContext &C) const {
+ if (!BE->getBlockDecl()->hasCaptures())
+ return;
+
+ ProgramStateRef state = C.getState();
+ const BlockDataRegion *R =
+ cast<BlockDataRegion>(state->getSVal(BE,
+ C.getLocationContext()).getAsRegion());
+
+ BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
+ E = R->referenced_vars_end();
+
+ for (; I != E; ++I) {
+ // This VarRegion is the region associated with the block; we need
+ // the one associated with the encompassing context.
+ const VarRegion *VR = I.getCapturedRegion();
+ const VarDecl *VD = VR->getDecl();
+
+ if (VD->hasAttr<BlocksAttr>() || !VD->hasLocalStorage())
+ continue;
+
+ // Get the VarRegion associated with VD in the local stack frame.
+ if (Optional<UndefinedVal> V =
+ state->getSVal(I.getOriginalRegion()).getAs<UndefinedVal>()) {
+ if (ExplodedNode *N = C.generateErrorNode()) {
+ if (!BT)
+ BT.reset(
+ new BuiltinBug(this, "uninitialized variable captured by block"));
+
+ // Generate a bug report.
+ SmallString<128> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ os << "Variable '" << VD->getName()
+ << "' is uninitialized when captured by block";
+
+ auto R = llvm::make_unique<BugReport>(*BT, os.str(), N);
+ if (const Expr *Ex = FindBlockDeclRefExpr(BE->getBody(), VD))
+ R->addRange(Ex->getSourceRange());
+ R->addVisitor(llvm::make_unique<FindLastStoreBRVisitor>(
+ *V, VR, /*EnableNullFPSuppression*/ false));
+ R->disablePathPruning();
+ // need location of block
+ C.emitReport(std::move(R));
+ }
+ }
+ }
+}
+
+void ento::registerUndefCapturedBlockVarChecker(CheckerManager &mgr) {
+ mgr.registerChecker<UndefCapturedBlockVarChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
new file mode 100644
index 0000000..38d2aa6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
@@ -0,0 +1,101 @@
+//=== UndefResultChecker.cpp ------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines UndefResultChecker, a builtin check in ExprEngine that
+// performs checks for undefined results of non-assignment binary operators.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class UndefResultChecker
+ : public Checker< check::PostStmt<BinaryOperator> > {
+
+ mutable std::unique_ptr<BugType> BT;
+
+public:
+ void checkPostStmt(const BinaryOperator *B, CheckerContext &C) const;
+};
+} // end anonymous namespace
+
+void UndefResultChecker::checkPostStmt(const BinaryOperator *B,
+ CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
+ if (state->getSVal(B, LCtx).isUndef()) {
+
+ // Do not report assignments of uninitialized values inside swap functions.
+ // This should allow to swap partially uninitialized structs
+ // (radar://14129997)
+ if (const FunctionDecl *EnclosingFunctionDecl =
+ dyn_cast<FunctionDecl>(C.getStackFrame()->getDecl()))
+ if (C.getCalleeName(EnclosingFunctionDecl) == "swap")
+ return;
+
+ // Generate an error node.
+ ExplodedNode *N = C.generateErrorNode();
+ if (!N)
+ return;
+
+ if (!BT)
+ BT.reset(
+ new BuiltinBug(this, "Result of operation is garbage or undefined"));
+
+ SmallString<256> sbuf;
+ llvm::raw_svector_ostream OS(sbuf);
+ const Expr *Ex = nullptr;
+ bool isLeft = true;
+
+ if (state->getSVal(B->getLHS(), LCtx).isUndef()) {
+ Ex = B->getLHS()->IgnoreParenCasts();
+ isLeft = true;
+ }
+ else if (state->getSVal(B->getRHS(), LCtx).isUndef()) {
+ Ex = B->getRHS()->IgnoreParenCasts();
+ isLeft = false;
+ }
+
+ if (Ex) {
+ OS << "The " << (isLeft ? "left" : "right")
+ << " operand of '"
+ << BinaryOperator::getOpcodeStr(B->getOpcode())
+ << "' is a garbage value";
+ }
+ else {
+ // Neither operand was undefined, but the result is undefined.
+ OS << "The result of the '"
+ << BinaryOperator::getOpcodeStr(B->getOpcode())
+ << "' expression is undefined";
+ }
+ auto report = llvm::make_unique<BugReport>(*BT, OS.str(), N);
+ if (Ex) {
+ report->addRange(Ex->getSourceRange());
+ bugreporter::trackNullOrUndefValue(N, Ex, *report);
+ }
+ else
+ bugreporter::trackNullOrUndefValue(N, B, *report);
+
+ C.emitReport(std::move(report));
+ }
+}
+
+void ento::registerUndefResultChecker(CheckerManager &mgr) {
+ mgr.registerChecker<UndefResultChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
new file mode 100644
index 0000000..fe07eaf
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
@@ -0,0 +1,64 @@
+//===--- UndefinedArraySubscriptChecker.h ----------------------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines UndefinedArraySubscriptChecker, a builtin check in ExprEngine
+// that performs checks for undefined array subscripts.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class UndefinedArraySubscriptChecker
+ : public Checker< check::PreStmt<ArraySubscriptExpr> > {
+ mutable std::unique_ptr<BugType> BT;
+
+public:
+ void checkPreStmt(const ArraySubscriptExpr *A, CheckerContext &C) const;
+};
+} // end anonymous namespace
+
+void
+UndefinedArraySubscriptChecker::checkPreStmt(const ArraySubscriptExpr *A,
+ CheckerContext &C) const {
+ const Expr *Index = A->getIdx();
+ if (!C.getSVal(Index).isUndef())
+ return;
+
+ // Sema generates anonymous array variables for copying array struct fields.
+ // Don't warn if we're in an implicitly-generated constructor.
+ const Decl *D = C.getLocationContext()->getDecl();
+ if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(D))
+ if (Ctor->isDefaulted())
+ return;
+
+ ExplodedNode *N = C.generateErrorNode();
+ if (!N)
+ return;
+ if (!BT)
+ BT.reset(new BuiltinBug(this, "Array subscript is undefined"));
+
+ // Generate a report for this bug.
+ auto R = llvm::make_unique<BugReport>(*BT, BT->getName(), N);
+ R->addRange(A->getIdx()->getSourceRange());
+ bugreporter::trackNullOrUndefValue(N, A->getIdx(), *R);
+ C.emitReport(std::move(R));
+}
+
+void ento::registerUndefinedArraySubscriptChecker(CheckerManager &mgr) {
+ mgr.registerChecker<UndefinedArraySubscriptChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
new file mode 100644
index 0000000..7a31efc
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
@@ -0,0 +1,96 @@
+//===--- UndefinedAssignmentChecker.h ---------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines UndefinedAssignmentChecker, a builtin check in ExprEngine that
+// checks for assigning undefined values.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class UndefinedAssignmentChecker
+ : public Checker<check::Bind> {
+ mutable std::unique_ptr<BugType> BT;
+
+public:
+ void checkBind(SVal location, SVal val, const Stmt *S,
+ CheckerContext &C) const;
+};
+}
+
+void UndefinedAssignmentChecker::checkBind(SVal location, SVal val,
+ const Stmt *StoreE,
+ CheckerContext &C) const {
+ if (!val.isUndef())
+ return;
+
+ // Do not report assignments of uninitialized values inside swap functions.
+ // This should allow to swap partially uninitialized structs
+ // (radar://14129997)
+ if (const FunctionDecl *EnclosingFunctionDecl =
+ dyn_cast<FunctionDecl>(C.getStackFrame()->getDecl()))
+ if (C.getCalleeName(EnclosingFunctionDecl) == "swap")
+ return;
+
+ ExplodedNode *N = C.generateErrorNode();
+
+ if (!N)
+ return;
+
+ const char *str = "Assigned value is garbage or undefined";
+
+ if (!BT)
+ BT.reset(new BuiltinBug(this, str));
+
+ // Generate a report for this bug.
+ const Expr *ex = nullptr;
+
+ while (StoreE) {
+ if (const BinaryOperator *B = dyn_cast<BinaryOperator>(StoreE)) {
+ if (B->isCompoundAssignmentOp()) {
+ ProgramStateRef state = C.getState();
+ if (state->getSVal(B->getLHS(), C.getLocationContext()).isUndef()) {
+ str = "The left expression of the compound assignment is an "
+ "uninitialized value. The computed value will also be garbage";
+ ex = B->getLHS();
+ break;
+ }
+ }
+
+ ex = B->getRHS();
+ break;
+ }
+
+ if (const DeclStmt *DS = dyn_cast<DeclStmt>(StoreE)) {
+ const VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl());
+ ex = VD->getInit();
+ }
+
+ break;
+ }
+
+ auto R = llvm::make_unique<BugReport>(*BT, str, N);
+ if (ex) {
+ R->addRange(ex->getSourceRange());
+ bugreporter::trackNullOrUndefValue(N, ex, *R);
+ }
+ C.emitReport(std::move(R));
+}
+
+void ento::registerUndefinedAssignmentChecker(CheckerManager &mgr) {
+ mgr.registerChecker<UndefinedAssignmentChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
new file mode 100644
index 0000000..4b78c20
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
@@ -0,0 +1,382 @@
+//= UnixAPIChecker.h - Checks preconditions for various Unix APIs --*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines UnixAPIChecker, which is an assortment of checks on calls
+// to various, widely used UNIX/Posix functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/raw_ostream.h"
+#include <fcntl.h>
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class UnixAPIChecker : public Checker< check::PreStmt<CallExpr> > {
+ mutable std::unique_ptr<BugType> BT_open, BT_pthreadOnce, BT_mallocZero;
+ mutable Optional<uint64_t> Val_O_CREAT;
+
+public:
+ void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+
+ void CheckOpen(CheckerContext &C, const CallExpr *CE) const;
+ void CheckPthreadOnce(CheckerContext &C, const CallExpr *CE) const;
+ void CheckCallocZero(CheckerContext &C, const CallExpr *CE) const;
+ void CheckMallocZero(CheckerContext &C, const CallExpr *CE) const;
+ void CheckReallocZero(CheckerContext &C, const CallExpr *CE) const;
+ void CheckReallocfZero(CheckerContext &C, const CallExpr *CE) const;
+ void CheckAllocaZero(CheckerContext &C, const CallExpr *CE) const;
+ void CheckVallocZero(CheckerContext &C, const CallExpr *CE) const;
+
+ typedef void (UnixAPIChecker::*SubChecker)(CheckerContext &,
+ const CallExpr *) const;
+private:
+ bool ReportZeroByteAllocation(CheckerContext &C,
+ ProgramStateRef falseState,
+ const Expr *arg,
+ const char *fn_name) const;
+ void BasicAllocationCheck(CheckerContext &C,
+ const CallExpr *CE,
+ const unsigned numArgs,
+ const unsigned sizeArg,
+ const char *fn) const;
+ void LazyInitialize(std::unique_ptr<BugType> &BT, const char *name) const {
+ if (BT)
+ return;
+ BT.reset(new BugType(this, name, categories::UnixAPI));
+ }
+ void ReportOpenBug(CheckerContext &C,
+ ProgramStateRef State,
+ const char *Msg,
+ SourceRange SR) const;
+};
+} //end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// "open" (man 2 open)
+//===----------------------------------------------------------------------===//
+
+void UnixAPIChecker::ReportOpenBug(CheckerContext &C,
+ ProgramStateRef State,
+ const char *Msg,
+ SourceRange SR) const {
+ ExplodedNode *N = C.generateErrorNode(State);
+ if (!N)
+ return;
+
+ LazyInitialize(BT_open, "Improper use of 'open'");
+
+ auto Report = llvm::make_unique<BugReport>(*BT_open, Msg, N);
+ Report->addRange(SR);
+ C.emitReport(std::move(Report));
+}
+
+void UnixAPIChecker::CheckOpen(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+
+ if (CE->getNumArgs() < 2) {
+ // The frontend should issue a warning for this case, so this is a sanity
+ // check.
+ return;
+ } else if (CE->getNumArgs() == 3) {
+ const Expr *Arg = CE->getArg(2);
+ QualType QT = Arg->getType();
+ if (!QT->isIntegerType()) {
+ ReportOpenBug(C, state,
+ "Third argument to 'open' is not an integer",
+ Arg->getSourceRange());
+ return;
+ }
+ } else if (CE->getNumArgs() > 3) {
+ ReportOpenBug(C, state,
+ "Call to 'open' with more than three arguments",
+ CE->getArg(3)->getSourceRange());
+ return;
+ }
+
+ // The definition of O_CREAT is platform specific. We need a better way
+ // of querying this information from the checking environment.
+ if (!Val_O_CREAT.hasValue()) {
+ if (C.getASTContext().getTargetInfo().getTriple().getVendor()
+ == llvm::Triple::Apple)
+ Val_O_CREAT = 0x0200;
+ else {
+ // FIXME: We need a more general way of getting the O_CREAT value.
+ // We could possibly grovel through the preprocessor state, but
+ // that would require passing the Preprocessor object to the ExprEngine.
+ // See also: MallocChecker.cpp / M_ZERO.
+ return;
+ }
+ }
+
+ // Now check if oflags has O_CREAT set.
+ const Expr *oflagsEx = CE->getArg(1);
+ const SVal V = state->getSVal(oflagsEx, C.getLocationContext());
+ if (!V.getAs<NonLoc>()) {
+ // The case where 'V' can be a location can only be due to a bad header,
+ // so in this case bail out.
+ return;
+ }
+ NonLoc oflags = V.castAs<NonLoc>();
+ NonLoc ocreateFlag = C.getSValBuilder()
+ .makeIntVal(Val_O_CREAT.getValue(), oflagsEx->getType()).castAs<NonLoc>();
+ SVal maskedFlagsUC = C.getSValBuilder().evalBinOpNN(state, BO_And,
+ oflags, ocreateFlag,
+ oflagsEx->getType());
+ if (maskedFlagsUC.isUnknownOrUndef())
+ return;
+ DefinedSVal maskedFlags = maskedFlagsUC.castAs<DefinedSVal>();
+
+ // Check if maskedFlags is non-zero.
+ ProgramStateRef trueState, falseState;
+ std::tie(trueState, falseState) = state->assume(maskedFlags);
+
+ // Only emit an error if the value of 'maskedFlags' is properly
+ // constrained;
+ if (!(trueState && !falseState))
+ return;
+
+ if (CE->getNumArgs() < 3) {
+ ReportOpenBug(C, trueState,
+ "Call to 'open' requires a third argument when "
+ "the 'O_CREAT' flag is set",
+ oflagsEx->getSourceRange());
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// pthread_once
+//===----------------------------------------------------------------------===//
+
+void UnixAPIChecker::CheckPthreadOnce(CheckerContext &C,
+ const CallExpr *CE) const {
+
+ // This is similar to 'CheckDispatchOnce' in the MacOSXAPIChecker.
+ // They can possibly be refactored.
+
+ if (CE->getNumArgs() < 1)
+ return;
+
+ // Check if the first argument is stack allocated. If so, issue a warning
+ // because that's likely to be bad news.
+ ProgramStateRef state = C.getState();
+ const MemRegion *R =
+ state->getSVal(CE->getArg(0), C.getLocationContext()).getAsRegion();
+ if (!R || !isa<StackSpaceRegion>(R->getMemorySpace()))
+ return;
+
+ ExplodedNode *N = C.generateErrorNode(state);
+ if (!N)
+ return;
+
+ SmallString<256> S;
+ llvm::raw_svector_ostream os(S);
+ os << "Call to 'pthread_once' uses";
+ if (const VarRegion *VR = dyn_cast<VarRegion>(R))
+ os << " the local variable '" << VR->getDecl()->getName() << '\'';
+ else
+ os << " stack allocated memory";
+ os << " for the \"control\" value. Using such transient memory for "
+ "the control value is potentially dangerous.";
+ if (isa<VarRegion>(R) && isa<StackLocalsSpaceRegion>(R->getMemorySpace()))
+ os << " Perhaps you intended to declare the variable as 'static'?";
+
+ LazyInitialize(BT_pthreadOnce, "Improper use of 'pthread_once'");
+
+ auto report = llvm::make_unique<BugReport>(*BT_pthreadOnce, os.str(), N);
+ report->addRange(CE->getArg(0)->getSourceRange());
+ C.emitReport(std::move(report));
+}
+
+//===----------------------------------------------------------------------===//
+// "calloc", "malloc", "realloc", "reallocf", "alloca" and "valloc"
+// with allocation size 0
+//===----------------------------------------------------------------------===//
+// FIXME: Eventually these should be rolled into the MallocChecker, but right now
+// they're more basic and valuable for widespread use.
+
+// Returns true if we try to do a zero byte allocation, false otherwise.
+// Fills in trueState and falseState.
+static bool IsZeroByteAllocation(ProgramStateRef state,
+ const SVal argVal,
+ ProgramStateRef *trueState,
+ ProgramStateRef *falseState) {
+ std::tie(*trueState, *falseState) =
+ state->assume(argVal.castAs<DefinedSVal>());
+
+ return (*falseState && !*trueState);
+}
+
+// Generates an error report, indicating that the function whose name is given
+// will perform a zero byte allocation.
+// Returns false if an error occurred, true otherwise.
+bool UnixAPIChecker::ReportZeroByteAllocation(CheckerContext &C,
+ ProgramStateRef falseState,
+ const Expr *arg,
+ const char *fn_name) const {
+ ExplodedNode *N = C.generateErrorNode(falseState);
+ if (!N)
+ return false;
+
+ LazyInitialize(BT_mallocZero,
+ "Undefined allocation of 0 bytes (CERT MEM04-C; CWE-131)");
+
+ SmallString<256> S;
+ llvm::raw_svector_ostream os(S);
+ os << "Call to '" << fn_name << "' has an allocation size of 0 bytes";
+ auto report = llvm::make_unique<BugReport>(*BT_mallocZero, os.str(), N);
+
+ report->addRange(arg->getSourceRange());
+ bugreporter::trackNullOrUndefValue(N, arg, *report);
+ C.emitReport(std::move(report));
+
+ return true;
+}
+
+// Does a basic check for 0-sized allocations suitable for most of the below
+// functions (modulo "calloc")
+void UnixAPIChecker::BasicAllocationCheck(CheckerContext &C,
+ const CallExpr *CE,
+ const unsigned numArgs,
+ const unsigned sizeArg,
+ const char *fn) const {
+ // Sanity check for the correct number of arguments
+ if (CE->getNumArgs() != numArgs)
+ return;
+
+ // Check if the allocation size is 0.
+ ProgramStateRef state = C.getState();
+ ProgramStateRef trueState = nullptr, falseState = nullptr;
+ const Expr *arg = CE->getArg(sizeArg);
+ SVal argVal = state->getSVal(arg, C.getLocationContext());
+
+ if (argVal.isUnknownOrUndef())
+ return;
+
+ // Is the value perfectly constrained to zero?
+ if (IsZeroByteAllocation(state, argVal, &trueState, &falseState)) {
+ (void) ReportZeroByteAllocation(C, falseState, arg, fn);
+ return;
+ }
+ // Assume the value is non-zero going forward.
+ assert(trueState);
+ if (trueState != state)
+ C.addTransition(trueState);
+}
+
+void UnixAPIChecker::CheckCallocZero(CheckerContext &C,
+ const CallExpr *CE) const {
+ unsigned int nArgs = CE->getNumArgs();
+ if (nArgs != 2)
+ return;
+
+ ProgramStateRef state = C.getState();
+ ProgramStateRef trueState = nullptr, falseState = nullptr;
+
+ unsigned int i;
+ for (i = 0; i < nArgs; i++) {
+ const Expr *arg = CE->getArg(i);
+ SVal argVal = state->getSVal(arg, C.getLocationContext());
+ if (argVal.isUnknownOrUndef()) {
+ if (i == 0)
+ continue;
+ else
+ return;
+ }
+
+ if (IsZeroByteAllocation(state, argVal, &trueState, &falseState)) {
+ if (ReportZeroByteAllocation(C, falseState, arg, "calloc"))
+ return;
+ else if (i == 0)
+ continue;
+ else
+ return;
+ }
+ }
+
+ // Assume the value is non-zero going forward.
+ assert(trueState);
+ if (trueState != state)
+ C.addTransition(trueState);
+}
+
+void UnixAPIChecker::CheckMallocZero(CheckerContext &C,
+ const CallExpr *CE) const {
+ BasicAllocationCheck(C, CE, 1, 0, "malloc");
+}
+
+void UnixAPIChecker::CheckReallocZero(CheckerContext &C,
+ const CallExpr *CE) const {
+ BasicAllocationCheck(C, CE, 2, 1, "realloc");
+}
+
+void UnixAPIChecker::CheckReallocfZero(CheckerContext &C,
+ const CallExpr *CE) const {
+ BasicAllocationCheck(C, CE, 2, 1, "reallocf");
+}
+
+void UnixAPIChecker::CheckAllocaZero(CheckerContext &C,
+ const CallExpr *CE) const {
+ BasicAllocationCheck(C, CE, 1, 0, "alloca");
+}
+
+void UnixAPIChecker::CheckVallocZero(CheckerContext &C,
+ const CallExpr *CE) const {
+ BasicAllocationCheck(C, CE, 1, 0, "valloc");
+}
+
+
+//===----------------------------------------------------------------------===//
+// Central dispatch function.
+//===----------------------------------------------------------------------===//
+
+void UnixAPIChecker::checkPreStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ if (!FD || FD->getKind() != Decl::Function)
+ return;
+
+ StringRef FName = C.getCalleeName(FD);
+ if (FName.empty())
+ return;
+
+ SubChecker SC =
+ llvm::StringSwitch<SubChecker>(FName)
+ .Case("open", &UnixAPIChecker::CheckOpen)
+ .Case("pthread_once", &UnixAPIChecker::CheckPthreadOnce)
+ .Case("calloc", &UnixAPIChecker::CheckCallocZero)
+ .Case("malloc", &UnixAPIChecker::CheckMallocZero)
+ .Case("realloc", &UnixAPIChecker::CheckReallocZero)
+ .Case("reallocf", &UnixAPIChecker::CheckReallocfZero)
+ .Cases("alloca", "__builtin_alloca", &UnixAPIChecker::CheckAllocaZero)
+ .Case("valloc", &UnixAPIChecker::CheckVallocZero)
+ .Default(nullptr);
+
+ if (SC)
+ (this->*SC)(C, CE);
+}
+
+//===----------------------------------------------------------------------===//
+// Registration.
+//===----------------------------------------------------------------------===//
+
+void ento::registerUnixAPIChecker(CheckerManager &mgr) {
+ mgr.registerChecker<UnixAPIChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
new file mode 100644
index 0000000..a03abce
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
@@ -0,0 +1,252 @@
+//==- UnreachableCodeChecker.cpp - Generalized dead code checker -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file implements a generalized unreachable code checker using a
+// path-sensitive analysis. We mark any path visited, and then walk the CFG as a
+// post-analysis to determine what was never visited.
+//
+// A similar flow-sensitive only check exists in Analysis/ReachableCode.cpp
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "llvm/ADT/SmallSet.h"
+
+// The number of CFGBlock pointers we want to reserve memory for. This is used
+// once for each function we analyze.
+#define DEFAULT_CFGBLOCKS 256
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class UnreachableCodeChecker : public Checker<check::EndAnalysis> {
+public:
+ void checkEndAnalysis(ExplodedGraph &G, BugReporter &B,
+ ExprEngine &Eng) const;
+private:
+ typedef llvm::SmallSet<unsigned, DEFAULT_CFGBLOCKS> CFGBlocksSet;
+
+ static inline const Stmt *getUnreachableStmt(const CFGBlock *CB);
+ static void FindUnreachableEntryPoints(const CFGBlock *CB,
+ CFGBlocksSet &reachable,
+ CFGBlocksSet &visited);
+ static bool isInvalidPath(const CFGBlock *CB, const ParentMap &PM);
+ static inline bool isEmptyCFGBlock(const CFGBlock *CB);
+};
+}
+
+void UnreachableCodeChecker::checkEndAnalysis(ExplodedGraph &G,
+ BugReporter &B,
+ ExprEngine &Eng) const {
+ CFGBlocksSet reachable, visited;
+
+ if (Eng.hasWorkRemaining())
+ return;
+
+ const Decl *D = nullptr;
+ CFG *C = nullptr;
+ ParentMap *PM = nullptr;
+ const LocationContext *LC = nullptr;
+ // Iterate over ExplodedGraph
+ for (ExplodedGraph::node_iterator I = G.nodes_begin(), E = G.nodes_end();
+ I != E; ++I) {
+ const ProgramPoint &P = I->getLocation();
+ LC = P.getLocationContext();
+ if (!LC->inTopFrame())
+ continue;
+
+ if (!D)
+ D = LC->getAnalysisDeclContext()->getDecl();
+
+ // Save the CFG if we don't have it already
+ if (!C)
+ C = LC->getAnalysisDeclContext()->getUnoptimizedCFG();
+ if (!PM)
+ PM = &LC->getParentMap();
+
+ if (Optional<BlockEntrance> BE = P.getAs<BlockEntrance>()) {
+ const CFGBlock *CB = BE->getBlock();
+ reachable.insert(CB->getBlockID());
+ }
+ }
+
+ // Bail out if we didn't get the CFG or the ParentMap.
+ if (!D || !C || !PM)
+ return;
+
+ // Don't do anything for template instantiations. Proving that code
+ // in a template instantiation is unreachable means proving that it is
+ // unreachable in all instantiations.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ if (FD->isTemplateInstantiation())
+ return;
+
+ // Find CFGBlocks that were not covered by any node
+ for (CFG::const_iterator I = C->begin(), E = C->end(); I != E; ++I) {
+ const CFGBlock *CB = *I;
+ // Check if the block is unreachable
+ if (reachable.count(CB->getBlockID()))
+ continue;
+
+ // Check if the block is empty (an artificial block)
+ if (isEmptyCFGBlock(CB))
+ continue;
+
+ // Find the entry points for this block
+ if (!visited.count(CB->getBlockID()))
+ FindUnreachableEntryPoints(CB, reachable, visited);
+
+ // This block may have been pruned; check if we still want to report it
+ if (reachable.count(CB->getBlockID()))
+ continue;
+
+ // Check for false positives
+ if (CB->size() > 0 && isInvalidPath(CB, *PM))
+ continue;
+
+ // It is good practice to always have a "default" label in a "switch", even
+ // if we should never get there. It can be used to detect errors, for
+ // instance. Unreachable code directly under a "default" label is therefore
+ // likely to be a false positive.
+ if (const Stmt *label = CB->getLabel())
+ if (label->getStmtClass() == Stmt::DefaultStmtClass)
+ continue;
+
+ // Special case for __builtin_unreachable.
+ // FIXME: This should be extended to include other unreachable markers,
+ // such as llvm_unreachable.
+ if (!CB->empty()) {
+ bool foundUnreachable = false;
+ for (CFGBlock::const_iterator ci = CB->begin(), ce = CB->end();
+ ci != ce; ++ci) {
+ if (Optional<CFGStmt> S = (*ci).getAs<CFGStmt>())
+ if (const CallExpr *CE = dyn_cast<CallExpr>(S->getStmt())) {
+ if (CE->getBuiltinCallee() == Builtin::BI__builtin_unreachable) {
+ foundUnreachable = true;
+ break;
+ }
+ }
+ }
+ if (foundUnreachable)
+ continue;
+ }
+
+ // We found a block that wasn't covered - find the statement to report
+ SourceRange SR;
+ PathDiagnosticLocation DL;
+ SourceLocation SL;
+ if (const Stmt *S = getUnreachableStmt(CB)) {
+ SR = S->getSourceRange();
+ DL = PathDiagnosticLocation::createBegin(S, B.getSourceManager(), LC);
+ SL = DL.asLocation();
+ if (SR.isInvalid() || !SL.isValid())
+ continue;
+ }
+ else
+ continue;
+
+ // Check if the SourceLocation is in a system header
+ const SourceManager &SM = B.getSourceManager();
+ if (SM.isInSystemHeader(SL) || SM.isInExternCSystemHeader(SL))
+ continue;
+
+ B.EmitBasicReport(D, this, "Unreachable code", "Dead code",
+ "This statement is never executed", DL, SR);
+ }
+}
+
+// Recursively finds the entry point(s) for this dead CFGBlock.
+void UnreachableCodeChecker::FindUnreachableEntryPoints(const CFGBlock *CB,
+ CFGBlocksSet &reachable,
+ CFGBlocksSet &visited) {
+ visited.insert(CB->getBlockID());
+
+ for (CFGBlock::const_pred_iterator I = CB->pred_begin(), E = CB->pred_end();
+ I != E; ++I) {
+ if (!*I)
+ continue;
+
+ if (!reachable.count((*I)->getBlockID())) {
+ // If we find an unreachable predecessor, mark this block as reachable so
+ // we don't report this block
+ reachable.insert(CB->getBlockID());
+ if (!visited.count((*I)->getBlockID()))
+ // If we haven't previously visited the unreachable predecessor, recurse
+ FindUnreachableEntryPoints(*I, reachable, visited);
+ }
+ }
+}
+
+// Find the Stmt* in a CFGBlock for reporting a warning
+const Stmt *UnreachableCodeChecker::getUnreachableStmt(const CFGBlock *CB) {
+ for (CFGBlock::const_iterator I = CB->begin(), E = CB->end(); I != E; ++I) {
+ if (Optional<CFGStmt> S = I->getAs<CFGStmt>())
+ return S->getStmt();
+ }
+ if (const Stmt *S = CB->getTerminator())
+ return S;
+ else
+ return nullptr;
+}
+
+// Determines if the path to this CFGBlock contained an element that infers this
+// block is a false positive. We assume that FindUnreachableEntryPoints has
+// already marked only the entry points to any dead code, so we need only to
+// find the condition that led to this block (the predecessor of this block.)
+// There will never be more than one predecessor.
+bool UnreachableCodeChecker::isInvalidPath(const CFGBlock *CB,
+ const ParentMap &PM) {
+ // We only expect a predecessor size of 0 or 1. If it is >1, then an external
+ // condition has broken our assumption (for example, a sink being placed by
+ // another check). In these cases, we choose not to report.
+ if (CB->pred_size() > 1)
+ return true;
+
+ // If there are no predecessors, then this block is trivially unreachable
+ if (CB->pred_size() == 0)
+ return false;
+
+ const CFGBlock *pred = *CB->pred_begin();
+ if (!pred)
+ return false;
+
+ // Get the predecessor block's terminator conditon
+ const Stmt *cond = pred->getTerminatorCondition();
+
+ //assert(cond && "CFGBlock's predecessor has a terminator condition");
+ // The previous assertion is invalid in some cases (eg do/while). Leaving
+ // reporting of these situations on at the moment to help triage these cases.
+ if (!cond)
+ return false;
+
+ // Run each of the checks on the conditions
+ return containsMacro(cond) || containsEnum(cond) ||
+ containsStaticLocal(cond) || containsBuiltinOffsetOf(cond) ||
+ containsStmt<UnaryExprOrTypeTraitExpr>(cond);
+}
+
+// Returns true if the given CFGBlock is empty
+bool UnreachableCodeChecker::isEmptyCFGBlock(const CFGBlock *CB) {
+ return CB->getLabel() == nullptr // No labels
+ && CB->size() == 0 // No statements
+ && !CB->getTerminator(); // No terminator
+}
+
+void ento::registerUnreachableCodeChecker(CheckerManager &mgr) {
+ mgr.registerChecker<UnreachableCodeChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
new file mode 100644
index 0000000..e3b2ed2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
@@ -0,0 +1,185 @@
+//=== VLASizeChecker.cpp - Undefined dereference checker --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines VLASizeChecker, a builtin check in ExprEngine that
+// performs checks for declaration of VLA of undefined or zero size.
+// In addition, VLASizeChecker is responsible for defining the extent
+// of the MemRegion that represents a VLA.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class VLASizeChecker : public Checker< check::PreStmt<DeclStmt> > {
+ mutable std::unique_ptr<BugType> BT;
+ enum VLASize_Kind { VLA_Garbage, VLA_Zero, VLA_Tainted, VLA_Negative };
+
+ void reportBug(VLASize_Kind Kind,
+ const Expr *SizeE,
+ ProgramStateRef State,
+ CheckerContext &C) const;
+public:
+ void checkPreStmt(const DeclStmt *DS, CheckerContext &C) const;
+};
+} // end anonymous namespace
+
+void VLASizeChecker::reportBug(VLASize_Kind Kind,
+ const Expr *SizeE,
+ ProgramStateRef State,
+ CheckerContext &C) const {
+ // Generate an error node.
+ ExplodedNode *N = C.generateErrorNode(State);
+ if (!N)
+ return;
+
+ if (!BT)
+ BT.reset(new BuiltinBug(
+ this, "Dangerous variable-length array (VLA) declaration"));
+
+ SmallString<256> buf;
+ llvm::raw_svector_ostream os(buf);
+ os << "Declared variable-length array (VLA) ";
+ switch (Kind) {
+ case VLA_Garbage:
+ os << "uses a garbage value as its size";
+ break;
+ case VLA_Zero:
+ os << "has zero size";
+ break;
+ case VLA_Tainted:
+ os << "has tainted size";
+ break;
+ case VLA_Negative:
+ os << "has negative size";
+ break;
+ }
+
+ auto report = llvm::make_unique<BugReport>(*BT, os.str(), N);
+ report->addRange(SizeE->getSourceRange());
+ bugreporter::trackNullOrUndefValue(N, SizeE, *report);
+ C.emitReport(std::move(report));
+ return;
+}
+
+void VLASizeChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
+ if (!DS->isSingleDecl())
+ return;
+
+ const VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl());
+ if (!VD)
+ return;
+
+ ASTContext &Ctx = C.getASTContext();
+ const VariableArrayType *VLA = Ctx.getAsVariableArrayType(VD->getType());
+ if (!VLA)
+ return;
+
+ // FIXME: Handle multi-dimensional VLAs.
+ const Expr *SE = VLA->getSizeExpr();
+ ProgramStateRef state = C.getState();
+ SVal sizeV = state->getSVal(SE, C.getLocationContext());
+
+ if (sizeV.isUndef()) {
+ reportBug(VLA_Garbage, SE, state, C);
+ return;
+ }
+
+ // See if the size value is known. It can't be undefined because we would have
+ // warned about that already.
+ if (sizeV.isUnknown())
+ return;
+
+ // Check if the size is tainted.
+ if (state->isTainted(sizeV)) {
+ reportBug(VLA_Tainted, SE, nullptr, C);
+ return;
+ }
+
+ // Check if the size is zero.
+ DefinedSVal sizeD = sizeV.castAs<DefinedSVal>();
+
+ ProgramStateRef stateNotZero, stateZero;
+ std::tie(stateNotZero, stateZero) = state->assume(sizeD);
+
+ if (stateZero && !stateNotZero) {
+ reportBug(VLA_Zero, SE, stateZero, C);
+ return;
+ }
+
+ // From this point on, assume that the size is not zero.
+ state = stateNotZero;
+
+ // VLASizeChecker is responsible for defining the extent of the array being
+ // declared. We do this by multiplying the array length by the element size,
+ // then matching that with the array region's extent symbol.
+
+ // Check if the size is negative.
+ SValBuilder &svalBuilder = C.getSValBuilder();
+
+ QualType Ty = SE->getType();
+ DefinedOrUnknownSVal Zero = svalBuilder.makeZeroVal(Ty);
+
+ SVal LessThanZeroVal = svalBuilder.evalBinOp(state, BO_LT, sizeD, Zero, Ty);
+ if (Optional<DefinedSVal> LessThanZeroDVal =
+ LessThanZeroVal.getAs<DefinedSVal>()) {
+ ConstraintManager &CM = C.getConstraintManager();
+ ProgramStateRef StatePos, StateNeg;
+
+ std::tie(StateNeg, StatePos) = CM.assumeDual(state, *LessThanZeroDVal);
+ if (StateNeg && !StatePos) {
+ reportBug(VLA_Negative, SE, state, C);
+ return;
+ }
+ state = StatePos;
+ }
+
+ // Convert the array length to size_t.
+ QualType SizeTy = Ctx.getSizeType();
+ NonLoc ArrayLength =
+ svalBuilder.evalCast(sizeD, SizeTy, SE->getType()).castAs<NonLoc>();
+
+ // Get the element size.
+ CharUnits EleSize = Ctx.getTypeSizeInChars(VLA->getElementType());
+ SVal EleSizeVal = svalBuilder.makeIntVal(EleSize.getQuantity(), SizeTy);
+
+ // Multiply the array length by the element size.
+ SVal ArraySizeVal = svalBuilder.evalBinOpNN(
+ state, BO_Mul, ArrayLength, EleSizeVal.castAs<NonLoc>(), SizeTy);
+
+ // Finally, assume that the array's extent matches the given size.
+ const LocationContext *LC = C.getLocationContext();
+ DefinedOrUnknownSVal Extent =
+ state->getRegion(VD, LC)->getExtent(svalBuilder);
+ DefinedOrUnknownSVal ArraySize = ArraySizeVal.castAs<DefinedOrUnknownSVal>();
+ DefinedOrUnknownSVal sizeIsKnown =
+ svalBuilder.evalEQ(state, Extent, ArraySize);
+ state = state->assume(sizeIsKnown, true);
+
+ // Assume should not fail at this point.
+ assert(state);
+
+ // Remember our assumptions!
+ C.addTransition(state);
+}
+
+void ento::registerVLASizeChecker(CheckerManager &mgr) {
+ mgr.registerChecker<VLASizeChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VforkChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VforkChecker.cpp
new file mode 100644
index 0000000..26ffee8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VforkChecker.cpp
@@ -0,0 +1,218 @@
+//===- VforkChecker.cpp -------- Vfork usage checks --------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines vfork checker which checks for dangerous uses of vfork.
+// Vforked process shares memory (including stack) with parent so it's
+// range of actions is significantly limited: can't write variables,
+// can't call functions not in whitelist, etc. For more details, see
+// http://man7.org/linux/man-pages/man2/vfork.2.html
+//
+// This checker checks for prohibited constructs in vforked process.
+// The state transition diagram:
+// PARENT ---(vfork() == 0)--> CHILD
+// |
+// --(*p = ...)--> bug
+// |
+// --foo()--> bug
+// |
+// --return--> bug
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/AST/ParentMap.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class VforkChecker : public Checker<check::PreCall, check::PostCall,
+ check::Bind, check::PreStmt<ReturnStmt>> {
+ mutable std::unique_ptr<BuiltinBug> BT;
+ mutable llvm::SmallSet<const IdentifierInfo *, 10> VforkWhitelist;
+ mutable const IdentifierInfo *II_vfork;
+
+ static bool isChildProcess(const ProgramStateRef State);
+
+ bool isVforkCall(const Decl *D, CheckerContext &C) const;
+ bool isCallWhitelisted(const IdentifierInfo *II, CheckerContext &C) const;
+
+ void reportBug(const char *What, CheckerContext &C,
+ const char *Details = 0) const;
+
+public:
+ VforkChecker() : II_vfork(0) {}
+
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkBind(SVal L, SVal V, const Stmt *S, CheckerContext &C) const;
+ void checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const;
+};
+
+} // end anonymous namespace
+
+// This trait holds region of variable that is assigned with vfork's
+// return value (this is the only region child is allowed to write).
+// VFORK_RESULT_INVALID means that we are in parent process.
+// VFORK_RESULT_NONE means that vfork's return value hasn't been assigned.
+// Other values point to valid regions.
+REGISTER_TRAIT_WITH_PROGRAMSTATE(VforkResultRegion, const void *)
+#define VFORK_RESULT_INVALID 0
+#define VFORK_RESULT_NONE ((void *)(uintptr_t)1)
+
+bool VforkChecker::isChildProcess(const ProgramStateRef State) {
+ return State->get<VforkResultRegion>() != VFORK_RESULT_INVALID;
+}
+
+bool VforkChecker::isVforkCall(const Decl *D, CheckerContext &C) const {
+ auto FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD || !C.isCLibraryFunction(FD))
+ return false;
+
+ if (!II_vfork) {
+ ASTContext &AC = C.getASTContext();
+ II_vfork = &AC.Idents.get("vfork");
+ }
+
+ return FD->getIdentifier() == II_vfork;
+}
+
+// Returns true iff ok to call function after successful vfork.
+bool VforkChecker::isCallWhitelisted(const IdentifierInfo *II,
+ CheckerContext &C) const {
+ if (VforkWhitelist.empty()) {
+ // According to manpage.
+ const char *ids[] = {
+ "_exit",
+ "_Exit",
+ "execl",
+ "execlp",
+ "execle",
+ "execv",
+ "execvp",
+ "execvpe",
+ 0,
+ };
+
+ ASTContext &AC = C.getASTContext();
+ for (const char **id = ids; *id; ++id)
+ VforkWhitelist.insert(&AC.Idents.get(*id));
+ }
+
+ return VforkWhitelist.count(II);
+}
+
+void VforkChecker::reportBug(const char *What, CheckerContext &C,
+ const char *Details) const {
+ if (ExplodedNode *N = C.generateErrorNode(C.getState())) {
+ if (!BT)
+ BT.reset(new BuiltinBug(this,
+ "Dangerous construct in a vforked process"));
+
+ SmallString<256> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ os << What << " is prohibited after a successful vfork";
+
+ if (Details)
+ os << "; " << Details;
+
+ auto Report = llvm::make_unique<BugReport>(*BT, os.str(), N);
+ // TODO: mark vfork call in BugReportVisitor
+ C.emitReport(std::move(Report));
+ }
+}
+
+// Detect calls to vfork and split execution appropriately.
+void VforkChecker::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ // We can't call vfork in child so don't bother
+ // (corresponding warning has already been emitted in checkPreCall).
+ ProgramStateRef State = C.getState();
+ if (isChildProcess(State))
+ return;
+
+ if (!isVforkCall(Call.getDecl(), C))
+ return;
+
+ // Get return value of vfork.
+ SVal VforkRetVal = Call.getReturnValue();
+ Optional<DefinedOrUnknownSVal> DVal =
+ VforkRetVal.getAs<DefinedOrUnknownSVal>();
+ if (!DVal)
+ return;
+
+ // Get assigned variable.
+ const ParentMap &PM = C.getLocationContext()->getParentMap();
+ const Stmt *P = PM.getParentIgnoreParenCasts(Call.getOriginExpr());
+ const VarDecl *LhsDecl;
+ std::tie(LhsDecl, std::ignore) = parseAssignment(P);
+
+ // Get assigned memory region.
+ MemRegionManager &M = C.getStoreManager().getRegionManager();
+ const MemRegion *LhsDeclReg =
+ LhsDecl
+ ? M.getVarRegion(LhsDecl, C.getLocationContext())
+ : (const MemRegion *)VFORK_RESULT_NONE;
+
+ // Parent branch gets nonzero return value (according to manpage).
+ ProgramStateRef ParentState, ChildState;
+ std::tie(ParentState, ChildState) = C.getState()->assume(*DVal);
+ C.addTransition(ParentState);
+ ChildState = ChildState->set<VforkResultRegion>(LhsDeclReg);
+ C.addTransition(ChildState);
+}
+
+// Prohibit calls to non-whitelist functions in child process.
+void VforkChecker::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ if (isChildProcess(State)
+ && !isCallWhitelisted(Call.getCalleeIdentifier(), C))
+ reportBug("This function call", C);
+}
+
+// Prohibit writes in child process (except for vfork's lhs).
+void VforkChecker::checkBind(SVal L, SVal V, const Stmt *S,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ if (!isChildProcess(State))
+ return;
+
+ const MemRegion *VforkLhs =
+ static_cast<const MemRegion *>(State->get<VforkResultRegion>());
+ const MemRegion *MR = L.getAsRegion();
+
+ // Child is allowed to modify only vfork's lhs.
+ if (!MR || MR == VforkLhs)
+ return;
+
+ reportBug("This assignment", C);
+}
+
+// Prohibit return from function in child process.
+void VforkChecker::checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ if (isChildProcess(State))
+ reportBug("Return", C, "call _exit() instead");
+}
+
+void ento::registerVforkChecker(CheckerManager &mgr) {
+ mgr.registerChecker<VforkChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
new file mode 100644
index 0000000..55025030
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
@@ -0,0 +1,246 @@
+//=======- VirtualCallChecker.cpp --------------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a checker that checks virtual function calls during
+// construction or destruction of C++ objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/SaveAndRestore.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class WalkAST : public StmtVisitor<WalkAST> {
+ const CheckerBase *Checker;
+ BugReporter &BR;
+ AnalysisDeclContext *AC;
+
+ typedef const CallExpr * WorkListUnit;
+ typedef SmallVector<WorkListUnit, 20> DFSWorkList;
+
+ /// A vector representing the worklist which has a chain of CallExprs.
+ DFSWorkList WList;
+
+ // PreVisited : A CallExpr to this FunctionDecl is in the worklist, but the
+ // body has not been visited yet.
+ // PostVisited : A CallExpr to this FunctionDecl is in the worklist, and the
+ // body has been visited.
+ enum Kind { NotVisited,
+ PreVisited, /**< A CallExpr to this FunctionDecl is in the
+ worklist, but the body has not yet been
+ visited. */
+ PostVisited /**< A CallExpr to this FunctionDecl is in the
+ worklist, and the body has been visited. */
+ };
+
+ /// A DenseMap that records visited states of FunctionDecls.
+ llvm::DenseMap<const FunctionDecl *, Kind> VisitedFunctions;
+
+ /// The CallExpr whose body is currently being visited. This is used for
+ /// generating bug reports. This is null while visiting the body of a
+ /// constructor or destructor.
+ const CallExpr *visitingCallExpr;
+
+public:
+ WalkAST(const CheckerBase *checker, BugReporter &br,
+ AnalysisDeclContext *ac)
+ : Checker(checker), BR(br), AC(ac), visitingCallExpr(nullptr) {}
+
+ bool hasWork() const { return !WList.empty(); }
+
+ /// This method adds a CallExpr to the worklist and marks the callee as
+ /// being PreVisited.
+ void Enqueue(WorkListUnit WLUnit) {
+ const FunctionDecl *FD = WLUnit->getDirectCallee();
+ if (!FD || !FD->getBody())
+ return;
+ Kind &K = VisitedFunctions[FD];
+ if (K != NotVisited)
+ return;
+ K = PreVisited;
+ WList.push_back(WLUnit);
+ }
+
+ /// This method returns an item from the worklist without removing it.
+ WorkListUnit Dequeue() {
+ assert(!WList.empty());
+ return WList.back();
+ }
+
+ void Execute() {
+ while (hasWork()) {
+ WorkListUnit WLUnit = Dequeue();
+ const FunctionDecl *FD = WLUnit->getDirectCallee();
+ assert(FD && FD->getBody());
+
+ if (VisitedFunctions[FD] == PreVisited) {
+ // If the callee is PreVisited, walk its body.
+ // Visit the body.
+ SaveAndRestore<const CallExpr *> SaveCall(visitingCallExpr, WLUnit);
+ Visit(FD->getBody());
+
+ // Mark the function as being PostVisited to indicate we have
+ // scanned the body.
+ VisitedFunctions[FD] = PostVisited;
+ continue;
+ }
+
+ // Otherwise, the callee is PostVisited.
+ // Remove it from the worklist.
+ assert(VisitedFunctions[FD] == PostVisited);
+ WList.pop_back();
+ }
+ }
+
+ // Stmt visitor methods.
+ void VisitCallExpr(CallExpr *CE);
+ void VisitCXXMemberCallExpr(CallExpr *CE);
+ void VisitStmt(Stmt *S) { VisitChildren(S); }
+ void VisitChildren(Stmt *S);
+
+ void ReportVirtualCall(const CallExpr *CE, bool isPure);
+
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// AST walking.
+//===----------------------------------------------------------------------===//
+
+void WalkAST::VisitChildren(Stmt *S) {
+ for (Stmt *Child : S->children())
+ if (Child)
+ Visit(Child);
+}
+
+void WalkAST::VisitCallExpr(CallExpr *CE) {
+ VisitChildren(CE);
+ Enqueue(CE);
+}
+
+void WalkAST::VisitCXXMemberCallExpr(CallExpr *CE) {
+ VisitChildren(CE);
+ bool callIsNonVirtual = false;
+
+ // Several situations to elide for checking.
+ if (MemberExpr *CME = dyn_cast<MemberExpr>(CE->getCallee())) {
+ // If the member access is fully qualified (i.e., X::F), then treat
+ // this as a non-virtual call and do not warn.
+ if (CME->getQualifier())
+ callIsNonVirtual = true;
+
+ if (Expr *base = CME->getBase()->IgnoreImpCasts()) {
+ // Elide analyzing the call entirely if the base pointer is not 'this'.
+ if (!isa<CXXThisExpr>(base))
+ return;
+
+ // If the most derived class is marked final, we know that now subclass
+ // can override this member.
+ if (base->getBestDynamicClassType()->hasAttr<FinalAttr>())
+ callIsNonVirtual = true;
+ }
+ }
+
+ // Get the callee.
+ const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(CE->getDirectCallee());
+ if (MD && MD->isVirtual() && !callIsNonVirtual && !MD->hasAttr<FinalAttr>() &&
+ !MD->getParent()->hasAttr<FinalAttr>())
+ ReportVirtualCall(CE, MD->isPure());
+
+ Enqueue(CE);
+}
+
+void WalkAST::ReportVirtualCall(const CallExpr *CE, bool isPure) {
+ SmallString<100> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ os << "Call Path : ";
+ // Name of current visiting CallExpr.
+ os << *CE->getDirectCallee();
+
+ // Name of the CallExpr whose body is current walking.
+ if (visitingCallExpr)
+ os << " <-- " << *visitingCallExpr->getDirectCallee();
+ // Names of FunctionDecls in worklist with state PostVisited.
+ for (SmallVectorImpl<const CallExpr *>::iterator I = WList.end(),
+ E = WList.begin(); I != E; --I) {
+ const FunctionDecl *FD = (*(I-1))->getDirectCallee();
+ assert(FD);
+ if (VisitedFunctions[FD] == PostVisited)
+ os << " <-- " << *FD;
+ }
+
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ SourceRange R = CE->getCallee()->getSourceRange();
+
+ if (isPure) {
+ os << "\n" << "Call pure virtual functions during construction or "
+ << "destruction may leads undefined behaviour";
+ BR.EmitBasicReport(AC->getDecl(), Checker,
+ "Call pure virtual function during construction or "
+ "Destruction",
+ "Cplusplus", os.str(), CELoc, R);
+ return;
+ }
+ else {
+ os << "\n" << "Call virtual functions during construction or "
+ << "destruction will never go to a more derived class";
+ BR.EmitBasicReport(AC->getDecl(), Checker,
+ "Call virtual function during construction or "
+ "Destruction",
+ "Cplusplus", os.str(), CELoc, R);
+ return;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// VirtualCallChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class VirtualCallChecker : public Checker<check::ASTDecl<CXXRecordDecl> > {
+public:
+ void checkASTDecl(const CXXRecordDecl *RD, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ WalkAST walker(this, BR, mgr.getAnalysisDeclContext(RD));
+
+ // Check the constructors.
+ for (const auto *I : RD->ctors()) {
+ if (!I->isCopyOrMoveConstructor())
+ if (Stmt *Body = I->getBody()) {
+ walker.Visit(Body);
+ walker.Execute();
+ }
+ }
+
+ // Check the destructor.
+ if (CXXDestructorDecl *DD = RD->getDestructor())
+ if (Stmt *Body = DD->getBody()) {
+ walker.Visit(Body);
+ walker.Execute();
+ }
+ }
+};
+}
+
+void ento::registerVirtualCallChecker(CheckerManager &mgr) {
+ mgr.registerChecker<VirtualCallChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/APSIntType.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/APSIntType.cpp
new file mode 100644
index 0000000..c7e9526
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/APSIntType.cpp
@@ -0,0 +1,49 @@
+//===--- APSIntType.cpp - Simple record of the type of APSInts ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
+
+using namespace clang;
+using namespace ento;
+
+APSIntType::RangeTestResultKind
+APSIntType::testInRange(const llvm::APSInt &Value,
+ bool AllowSignConversions) const {
+
+ // Negative numbers cannot be losslessly converted to unsigned type.
+ if (IsUnsigned && !AllowSignConversions &&
+ Value.isSigned() && Value.isNegative())
+ return RTR_Below;
+
+ unsigned MinBits;
+ if (AllowSignConversions) {
+ if (Value.isSigned() && !IsUnsigned)
+ MinBits = Value.getMinSignedBits();
+ else
+ MinBits = Value.getActiveBits();
+
+ } else {
+ // Signed integers can be converted to signed integers of the same width
+ // or (if positive) unsigned integers with one fewer bit.
+ // Unsigned integers can be converted to unsigned integers of the same width
+ // or signed integers with one more bit.
+ if (Value.isSigned())
+ MinBits = Value.getMinSignedBits() - IsUnsigned;
+ else
+ MinBits = Value.getActiveBits() + !IsUnsigned;
+ }
+
+ if (MinBits <= BitWidth)
+ return RTR_Within;
+
+ if (Value.isSigned() && Value.isNegative())
+ return RTR_Below;
+ else
+ return RTR_Above;
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp
new file mode 100644
index 0000000..54634fd
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp
@@ -0,0 +1,58 @@
+//===-- AnalysisManager.cpp -------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+
+using namespace clang;
+using namespace ento;
+
+void AnalysisManager::anchor() { }
+
+AnalysisManager::AnalysisManager(ASTContext &ctx, DiagnosticsEngine &diags,
+ const LangOptions &lang,
+ const PathDiagnosticConsumers &PDC,
+ StoreManagerCreator storemgr,
+ ConstraintManagerCreator constraintmgr,
+ CheckerManager *checkerMgr,
+ AnalyzerOptions &Options,
+ CodeInjector *injector)
+ : AnaCtxMgr(Options.UnoptimizedCFG,
+ /*AddImplicitDtors=*/true,
+ /*AddInitializers=*/true,
+ Options.includeTemporaryDtorsInCFG(),
+ Options.shouldSynthesizeBodies(),
+ Options.shouldConditionalizeStaticInitializers(),
+ /*addCXXNewAllocator=*/true,
+ injector),
+ Ctx(ctx),
+ Diags(diags),
+ LangOpts(lang),
+ PathConsumers(PDC),
+ CreateStoreMgr(storemgr), CreateConstraintMgr(constraintmgr),
+ CheckerMgr(checkerMgr),
+ options(Options) {
+ AnaCtxMgr.getCFGBuildOptions().setAllAlwaysAdd();
+}
+
+AnalysisManager::~AnalysisManager() {
+ FlushDiagnostics();
+ for (PathDiagnosticConsumers::iterator I = PathConsumers.begin(),
+ E = PathConsumers.end(); I != E; ++I) {
+ delete *I;
+ }
+}
+
+void AnalysisManager::FlushDiagnostics() {
+ PathDiagnosticConsumer::FilesMade filesMade;
+ for (PathDiagnosticConsumers::iterator I = PathConsumers.begin(),
+ E = PathConsumers.end();
+ I != E; ++I) {
+ (*I)->FlushDiagnostics(&filesMade);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
new file mode 100644
index 0000000..54c668c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
@@ -0,0 +1,346 @@
+//===-- AnalyzerOptions.cpp - Analysis Engine Options -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains special accessors for analyzer configuration options
+// with string representations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+using namespace llvm;
+
+AnalyzerOptions::UserModeKind AnalyzerOptions::getUserMode() {
+ if (UserMode == UMK_NotSet) {
+ StringRef ModeStr =
+ Config.insert(std::make_pair("mode", "deep")).first->second;
+ UserMode = llvm::StringSwitch<UserModeKind>(ModeStr)
+ .Case("shallow", UMK_Shallow)
+ .Case("deep", UMK_Deep)
+ .Default(UMK_NotSet);
+ assert(UserMode != UMK_NotSet && "User mode is invalid.");
+ }
+ return UserMode;
+}
+
+IPAKind AnalyzerOptions::getIPAMode() {
+ if (IPAMode == IPAK_NotSet) {
+
+ // Use the User Mode to set the default IPA value.
+ // Note, we have to add the string to the Config map for the ConfigDumper
+ // checker to function properly.
+ const char *DefaultIPA = nullptr;
+ UserModeKind HighLevelMode = getUserMode();
+ if (HighLevelMode == UMK_Shallow)
+ DefaultIPA = "inlining";
+ else if (HighLevelMode == UMK_Deep)
+ DefaultIPA = "dynamic-bifurcate";
+ assert(DefaultIPA);
+
+ // Lookup the ipa configuration option, use the default from User Mode.
+ StringRef ModeStr =
+ Config.insert(std::make_pair("ipa", DefaultIPA)).first->second;
+ IPAKind IPAConfig = llvm::StringSwitch<IPAKind>(ModeStr)
+ .Case("none", IPAK_None)
+ .Case("basic-inlining", IPAK_BasicInlining)
+ .Case("inlining", IPAK_Inlining)
+ .Case("dynamic", IPAK_DynamicDispatch)
+ .Case("dynamic-bifurcate", IPAK_DynamicDispatchBifurcate)
+ .Default(IPAK_NotSet);
+ assert(IPAConfig != IPAK_NotSet && "IPA Mode is invalid.");
+
+ // Set the member variable.
+ IPAMode = IPAConfig;
+ }
+
+ return IPAMode;
+}
+
+bool
+AnalyzerOptions::mayInlineCXXMemberFunction(CXXInlineableMemberKind K) {
+ if (getIPAMode() < IPAK_Inlining)
+ return false;
+
+ if (!CXXMemberInliningMode) {
+ static const char *ModeKey = "c++-inlining";
+
+ StringRef ModeStr =
+ Config.insert(std::make_pair(ModeKey, "destructors")).first->second;
+
+ CXXInlineableMemberKind &MutableMode =
+ const_cast<CXXInlineableMemberKind &>(CXXMemberInliningMode);
+
+ MutableMode = llvm::StringSwitch<CXXInlineableMemberKind>(ModeStr)
+ .Case("constructors", CIMK_Constructors)
+ .Case("destructors", CIMK_Destructors)
+ .Case("none", CIMK_None)
+ .Case("methods", CIMK_MemberFunctions)
+ .Default(CXXInlineableMemberKind());
+
+ if (!MutableMode) {
+ // FIXME: We should emit a warning here about an unknown inlining kind,
+ // but the AnalyzerOptions doesn't have access to a diagnostic engine.
+ MutableMode = CIMK_None;
+ }
+ }
+
+ return CXXMemberInliningMode >= K;
+}
+
+static StringRef toString(bool b) { return b ? "true" : "false"; }
+
+StringRef AnalyzerOptions::getCheckerOption(StringRef CheckerName,
+ StringRef OptionName,
+ StringRef Default,
+ bool SearchInParents) {
+ // Search for a package option if the option for the checker is not specified
+ // and search in parents is enabled.
+ ConfigTable::const_iterator E = Config.end();
+ do {
+ ConfigTable::const_iterator I =
+ Config.find((Twine(CheckerName) + ":" + OptionName).str());
+ if (I != E)
+ return StringRef(I->getValue());
+ size_t Pos = CheckerName.rfind('.');
+ if (Pos == StringRef::npos)
+ return Default;
+ CheckerName = CheckerName.substr(0, Pos);
+ } while (!CheckerName.empty() && SearchInParents);
+ return Default;
+}
+
+bool AnalyzerOptions::getBooleanOption(StringRef Name, bool DefaultVal,
+ const CheckerBase *C,
+ bool SearchInParents) {
+ // FIXME: We should emit a warning here if the value is something other than
+ // "true", "false", or the empty string (meaning the default value),
+ // but the AnalyzerOptions doesn't have access to a diagnostic engine.
+ StringRef Default = toString(DefaultVal);
+ StringRef V =
+ C ? getCheckerOption(C->getTagDescription(), Name, Default,
+ SearchInParents)
+ : StringRef(Config.insert(std::make_pair(Name, Default)).first->second);
+ return llvm::StringSwitch<bool>(V)
+ .Case("true", true)
+ .Case("false", false)
+ .Default(DefaultVal);
+}
+
+bool AnalyzerOptions::getBooleanOption(Optional<bool> &V, StringRef Name,
+ bool DefaultVal, const CheckerBase *C,
+ bool SearchInParents) {
+ if (!V.hasValue())
+ V = getBooleanOption(Name, DefaultVal, C, SearchInParents);
+ return V.getValue();
+}
+
+bool AnalyzerOptions::includeTemporaryDtorsInCFG() {
+ return getBooleanOption(IncludeTemporaryDtorsInCFG,
+ "cfg-temporary-dtors",
+ /* Default = */ false);
+}
+
+bool AnalyzerOptions::mayInlineCXXStandardLibrary() {
+ return getBooleanOption(InlineCXXStandardLibrary,
+ "c++-stdlib-inlining",
+ /*Default=*/true);
+}
+
+bool AnalyzerOptions::mayInlineTemplateFunctions() {
+ return getBooleanOption(InlineTemplateFunctions,
+ "c++-template-inlining",
+ /*Default=*/true);
+}
+
+bool AnalyzerOptions::mayInlineCXXAllocator() {
+ return getBooleanOption(InlineCXXAllocator,
+ "c++-allocator-inlining",
+ /*Default=*/false);
+}
+
+bool AnalyzerOptions::mayInlineCXXContainerMethods() {
+ return getBooleanOption(InlineCXXContainerMethods,
+ "c++-container-inlining",
+ /*Default=*/false);
+}
+
+bool AnalyzerOptions::mayInlineCXXSharedPtrDtor() {
+ return getBooleanOption(InlineCXXSharedPtrDtor,
+ "c++-shared_ptr-inlining",
+ /*Default=*/false);
+}
+
+
+bool AnalyzerOptions::mayInlineObjCMethod() {
+ return getBooleanOption(ObjCInliningMode,
+ "objc-inlining",
+ /* Default = */ true);
+}
+
+bool AnalyzerOptions::shouldSuppressNullReturnPaths() {
+ return getBooleanOption(SuppressNullReturnPaths,
+ "suppress-null-return-paths",
+ /* Default = */ true);
+}
+
+bool AnalyzerOptions::shouldAvoidSuppressingNullArgumentPaths() {
+ return getBooleanOption(AvoidSuppressingNullArgumentPaths,
+ "avoid-suppressing-null-argument-paths",
+ /* Default = */ false);
+}
+
+bool AnalyzerOptions::shouldSuppressInlinedDefensiveChecks() {
+ return getBooleanOption(SuppressInlinedDefensiveChecks,
+ "suppress-inlined-defensive-checks",
+ /* Default = */ true);
+}
+
+bool AnalyzerOptions::shouldSuppressFromCXXStandardLibrary() {
+ return getBooleanOption(SuppressFromCXXStandardLibrary,
+ "suppress-c++-stdlib",
+ /* Default = */ false);
+}
+
+bool AnalyzerOptions::shouldReportIssuesInMainSourceFile() {
+ return getBooleanOption(ReportIssuesInMainSourceFile,
+ "report-in-main-source-file",
+ /* Default = */ false);
+}
+
+
+bool AnalyzerOptions::shouldWriteStableReportFilename() {
+ return getBooleanOption(StableReportFilename,
+ "stable-report-filename",
+ /* Default = */ false);
+}
+
+int AnalyzerOptions::getOptionAsInteger(StringRef Name, int DefaultVal,
+ const CheckerBase *C,
+ bool SearchInParents) {
+ SmallString<10> StrBuf;
+ llvm::raw_svector_ostream OS(StrBuf);
+ OS << DefaultVal;
+
+ StringRef V = C ? getCheckerOption(C->getTagDescription(), Name, OS.str(),
+ SearchInParents)
+ : StringRef(Config.insert(std::make_pair(Name, OS.str()))
+ .first->second);
+
+ int Res = DefaultVal;
+ bool b = V.getAsInteger(10, Res);
+ assert(!b && "analyzer-config option should be numeric");
+ (void)b;
+ return Res;
+}
+
+StringRef AnalyzerOptions::getOptionAsString(StringRef Name,
+ StringRef DefaultVal,
+ const CheckerBase *C,
+ bool SearchInParents) {
+ return C ? getCheckerOption(C->getTagDescription(), Name, DefaultVal,
+ SearchInParents)
+ : StringRef(
+ Config.insert(std::make_pair(Name, DefaultVal)).first->second);
+}
+
+unsigned AnalyzerOptions::getAlwaysInlineSize() {
+ if (!AlwaysInlineSize.hasValue())
+ AlwaysInlineSize = getOptionAsInteger("ipa-always-inline-size", 3);
+ return AlwaysInlineSize.getValue();
+}
+
+unsigned AnalyzerOptions::getMaxInlinableSize() {
+ if (!MaxInlinableSize.hasValue()) {
+
+ int DefaultValue = 0;
+ UserModeKind HighLevelMode = getUserMode();
+ switch (HighLevelMode) {
+ default:
+ llvm_unreachable("Invalid mode.");
+ case UMK_Shallow:
+ DefaultValue = 4;
+ break;
+ case UMK_Deep:
+ DefaultValue = 50;
+ break;
+ }
+
+ MaxInlinableSize = getOptionAsInteger("max-inlinable-size", DefaultValue);
+ }
+ return MaxInlinableSize.getValue();
+}
+
+unsigned AnalyzerOptions::getGraphTrimInterval() {
+ if (!GraphTrimInterval.hasValue())
+ GraphTrimInterval = getOptionAsInteger("graph-trim-interval", 1000);
+ return GraphTrimInterval.getValue();
+}
+
+unsigned AnalyzerOptions::getMaxTimesInlineLarge() {
+ if (!MaxTimesInlineLarge.hasValue())
+ MaxTimesInlineLarge = getOptionAsInteger("max-times-inline-large", 32);
+ return MaxTimesInlineLarge.getValue();
+}
+
+unsigned AnalyzerOptions::getMinCFGSizeTreatFunctionsAsLarge() {
+ if (!MinCFGSizeTreatFunctionsAsLarge.hasValue())
+ MinCFGSizeTreatFunctionsAsLarge = getOptionAsInteger(
+ "min-cfg-size-treat-functions-as-large", 14);
+ return MinCFGSizeTreatFunctionsAsLarge.getValue();
+}
+
+unsigned AnalyzerOptions::getMaxNodesPerTopLevelFunction() {
+ if (!MaxNodesPerTopLevelFunction.hasValue()) {
+ int DefaultValue = 0;
+ UserModeKind HighLevelMode = getUserMode();
+ switch (HighLevelMode) {
+ default:
+ llvm_unreachable("Invalid mode.");
+ case UMK_Shallow:
+ DefaultValue = 75000;
+ break;
+ case UMK_Deep:
+ DefaultValue = 150000;
+ break;
+ }
+ MaxNodesPerTopLevelFunction = getOptionAsInteger("max-nodes", DefaultValue);
+ }
+ return MaxNodesPerTopLevelFunction.getValue();
+}
+
+bool AnalyzerOptions::shouldSynthesizeBodies() {
+ return getBooleanOption("faux-bodies", true);
+}
+
+bool AnalyzerOptions::shouldPrunePaths() {
+ return getBooleanOption("prune-paths", true);
+}
+
+bool AnalyzerOptions::shouldConditionalizeStaticInitializers() {
+ return getBooleanOption("cfg-conditional-static-initializers", true);
+}
+
+bool AnalyzerOptions::shouldInlineLambdas() {
+ if (!InlineLambdas.hasValue())
+ InlineLambdas = getBooleanOption("inline-lambdas", /*Default=*/true);
+ return InlineLambdas.getValue();
+}
+
+bool AnalyzerOptions::shouldWidenLoops() {
+ if (!WidenLoops.hasValue())
+ WidenLoops = getBooleanOption("widen-loops", /*Default=*/false);
+ return WidenLoops.getValue();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
new file mode 100644
index 0000000..3c3f41a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
@@ -0,0 +1,292 @@
+//=== BasicValueFactory.cpp - Basic values for Path Sens analysis --*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines BasicValueFactory, a class that manages the lifetime
+// of APSInt objects and symbolic constraints used by ExprEngine
+// and related classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
+
+using namespace clang;
+using namespace ento;
+
+void CompoundValData::Profile(llvm::FoldingSetNodeID& ID, QualType T,
+ llvm::ImmutableList<SVal> L) {
+ T.Profile(ID);
+ ID.AddPointer(L.getInternalPointer());
+}
+
+void LazyCompoundValData::Profile(llvm::FoldingSetNodeID& ID,
+ const StoreRef &store,
+ const TypedValueRegion *region) {
+ ID.AddPointer(store.getStore());
+ ID.AddPointer(region);
+}
+
+typedef std::pair<SVal, uintptr_t> SValData;
+typedef std::pair<SVal, SVal> SValPair;
+
+namespace llvm {
+template<> struct FoldingSetTrait<SValData> {
+ static inline void Profile(const SValData& X, llvm::FoldingSetNodeID& ID) {
+ X.first.Profile(ID);
+ ID.AddPointer( (void*) X.second);
+ }
+};
+
+template<> struct FoldingSetTrait<SValPair> {
+ static inline void Profile(const SValPair& X, llvm::FoldingSetNodeID& ID) {
+ X.first.Profile(ID);
+ X.second.Profile(ID);
+ }
+};
+}
+
+typedef llvm::FoldingSet<llvm::FoldingSetNodeWrapper<SValData> >
+ PersistentSValsTy;
+
+typedef llvm::FoldingSet<llvm::FoldingSetNodeWrapper<SValPair> >
+ PersistentSValPairsTy;
+
+BasicValueFactory::~BasicValueFactory() {
+ // Note that the dstor for the contents of APSIntSet will never be called,
+ // so we iterate over the set and invoke the dstor for each APSInt. This
+ // frees an aux. memory allocated to represent very large constants.
+ for (APSIntSetTy::iterator I=APSIntSet.begin(), E=APSIntSet.end(); I!=E; ++I)
+ I->getValue().~APSInt();
+
+ delete (PersistentSValsTy*) PersistentSVals;
+ delete (PersistentSValPairsTy*) PersistentSValPairs;
+}
+
+const llvm::APSInt& BasicValueFactory::getValue(const llvm::APSInt& X) {
+ llvm::FoldingSetNodeID ID;
+ void *InsertPos;
+ typedef llvm::FoldingSetNodeWrapper<llvm::APSInt> FoldNodeTy;
+
+ X.Profile(ID);
+ FoldNodeTy* P = APSIntSet.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!P) {
+ P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>();
+ new (P) FoldNodeTy(X);
+ APSIntSet.InsertNode(P, InsertPos);
+ }
+
+ return *P;
+}
+
+const llvm::APSInt& BasicValueFactory::getValue(const llvm::APInt& X,
+ bool isUnsigned) {
+ llvm::APSInt V(X, isUnsigned);
+ return getValue(V);
+}
+
+const llvm::APSInt& BasicValueFactory::getValue(uint64_t X, unsigned BitWidth,
+ bool isUnsigned) {
+ llvm::APSInt V(BitWidth, isUnsigned);
+ V = X;
+ return getValue(V);
+}
+
+const llvm::APSInt& BasicValueFactory::getValue(uint64_t X, QualType T) {
+
+ return getValue(getAPSIntType(T).getValue(X));
+}
+
+const CompoundValData*
+BasicValueFactory::getCompoundValData(QualType T,
+ llvm::ImmutableList<SVal> Vals) {
+
+ llvm::FoldingSetNodeID ID;
+ CompoundValData::Profile(ID, T, Vals);
+ void *InsertPos;
+
+ CompoundValData* D = CompoundValDataSet.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!D) {
+ D = (CompoundValData*) BPAlloc.Allocate<CompoundValData>();
+ new (D) CompoundValData(T, Vals);
+ CompoundValDataSet.InsertNode(D, InsertPos);
+ }
+
+ return D;
+}
+
+const LazyCompoundValData*
+BasicValueFactory::getLazyCompoundValData(const StoreRef &store,
+ const TypedValueRegion *region) {
+ llvm::FoldingSetNodeID ID;
+ LazyCompoundValData::Profile(ID, store, region);
+ void *InsertPos;
+
+ LazyCompoundValData *D =
+ LazyCompoundValDataSet.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!D) {
+ D = (LazyCompoundValData*) BPAlloc.Allocate<LazyCompoundValData>();
+ new (D) LazyCompoundValData(store, region);
+ LazyCompoundValDataSet.InsertNode(D, InsertPos);
+ }
+
+ return D;
+}
+
+const llvm::APSInt*
+BasicValueFactory::evalAPSInt(BinaryOperator::Opcode Op,
+ const llvm::APSInt& V1, const llvm::APSInt& V2) {
+
+ switch (Op) {
+ default:
+ assert (false && "Invalid Opcode.");
+
+ case BO_Mul:
+ return &getValue( V1 * V2 );
+
+ case BO_Div:
+ if (V2 == 0) // Avoid division by zero
+ return nullptr;
+ return &getValue( V1 / V2 );
+
+ case BO_Rem:
+ if (V2 == 0) // Avoid division by zero
+ return nullptr;
+ return &getValue( V1 % V2 );
+
+ case BO_Add:
+ return &getValue( V1 + V2 );
+
+ case BO_Sub:
+ return &getValue( V1 - V2 );
+
+ case BO_Shl: {
+
+ // FIXME: This logic should probably go higher up, where we can
+ // test these conditions symbolically.
+
+ // FIXME: Expand these checks to include all undefined behavior.
+
+ if (V2.isSigned() && V2.isNegative())
+ return nullptr;
+
+ uint64_t Amt = V2.getZExtValue();
+
+ if (Amt >= V1.getBitWidth())
+ return nullptr;
+
+ return &getValue( V1.operator<<( (unsigned) Amt ));
+ }
+
+ case BO_Shr: {
+
+ // FIXME: This logic should probably go higher up, where we can
+ // test these conditions symbolically.
+
+ // FIXME: Expand these checks to include all undefined behavior.
+
+ if (V2.isSigned() && V2.isNegative())
+ return nullptr;
+
+ uint64_t Amt = V2.getZExtValue();
+
+ if (Amt >= V1.getBitWidth())
+ return nullptr;
+
+ return &getValue( V1.operator>>( (unsigned) Amt ));
+ }
+
+ case BO_LT:
+ return &getTruthValue( V1 < V2 );
+
+ case BO_GT:
+ return &getTruthValue( V1 > V2 );
+
+ case BO_LE:
+ return &getTruthValue( V1 <= V2 );
+
+ case BO_GE:
+ return &getTruthValue( V1 >= V2 );
+
+ case BO_EQ:
+ return &getTruthValue( V1 == V2 );
+
+ case BO_NE:
+ return &getTruthValue( V1 != V2 );
+
+ // Note: LAnd, LOr, Comma are handled specially by higher-level logic.
+
+ case BO_And:
+ return &getValue( V1 & V2 );
+
+ case BO_Or:
+ return &getValue( V1 | V2 );
+
+ case BO_Xor:
+ return &getValue( V1 ^ V2 );
+ }
+}
+
+
+const std::pair<SVal, uintptr_t>&
+BasicValueFactory::getPersistentSValWithData(const SVal& V, uintptr_t Data) {
+
+ // Lazily create the folding set.
+ if (!PersistentSVals) PersistentSVals = new PersistentSValsTy();
+
+ llvm::FoldingSetNodeID ID;
+ void *InsertPos;
+ V.Profile(ID);
+ ID.AddPointer((void*) Data);
+
+ PersistentSValsTy& Map = *((PersistentSValsTy*) PersistentSVals);
+
+ typedef llvm::FoldingSetNodeWrapper<SValData> FoldNodeTy;
+ FoldNodeTy* P = Map.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!P) {
+ P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>();
+ new (P) FoldNodeTy(std::make_pair(V, Data));
+ Map.InsertNode(P, InsertPos);
+ }
+
+ return P->getValue();
+}
+
+const std::pair<SVal, SVal>&
+BasicValueFactory::getPersistentSValPair(const SVal& V1, const SVal& V2) {
+
+ // Lazily create the folding set.
+ if (!PersistentSValPairs) PersistentSValPairs = new PersistentSValPairsTy();
+
+ llvm::FoldingSetNodeID ID;
+ void *InsertPos;
+ V1.Profile(ID);
+ V2.Profile(ID);
+
+ PersistentSValPairsTy& Map = *((PersistentSValPairsTy*) PersistentSValPairs);
+
+ typedef llvm::FoldingSetNodeWrapper<SValPair> FoldNodeTy;
+ FoldNodeTy* P = Map.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!P) {
+ P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>();
+ new (P) FoldNodeTy(std::make_pair(V1, V2));
+ Map.InsertNode(P, InsertPos);
+ }
+
+ return P->getValue();
+}
+
+const SVal* BasicValueFactory::getPersistentSVal(SVal X) {
+ return &getPersistentSValWithData(X, 0).first;
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BlockCounter.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BlockCounter.cpp
new file mode 100644
index 0000000..8c99bd8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BlockCounter.cpp
@@ -0,0 +1,85 @@
+//==- BlockCounter.h - ADT for counting block visits -------------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines BlockCounter, an abstract data type used to count
+// the number of times a given block has been visited along a path
+// analyzed by CoreEngine.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/BlockCounter.h"
+#include "llvm/ADT/ImmutableMap.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class CountKey {
+ const StackFrameContext *CallSite;
+ unsigned BlockID;
+
+public:
+ CountKey(const StackFrameContext *CS, unsigned ID)
+ : CallSite(CS), BlockID(ID) {}
+
+ bool operator==(const CountKey &RHS) const {
+ return (CallSite == RHS.CallSite) && (BlockID == RHS.BlockID);
+ }
+
+ bool operator<(const CountKey &RHS) const {
+ return std::tie(CallSite, BlockID) < std::tie(RHS.CallSite, RHS.BlockID);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddPointer(CallSite);
+ ID.AddInteger(BlockID);
+ }
+};
+
+}
+
+typedef llvm::ImmutableMap<CountKey, unsigned> CountMap;
+
+static inline CountMap GetMap(void *D) {
+ return CountMap(static_cast<CountMap::TreeTy*>(D));
+}
+
+static inline CountMap::Factory& GetFactory(void *F) {
+ return *static_cast<CountMap::Factory*>(F);
+}
+
+unsigned BlockCounter::getNumVisited(const StackFrameContext *CallSite,
+ unsigned BlockID) const {
+ CountMap M = GetMap(Data);
+ CountMap::data_type* T = M.lookup(CountKey(CallSite, BlockID));
+ return T ? *T : 0;
+}
+
+BlockCounter::Factory::Factory(llvm::BumpPtrAllocator& Alloc) {
+ F = new CountMap::Factory(Alloc);
+}
+
+BlockCounter::Factory::~Factory() {
+ delete static_cast<CountMap::Factory*>(F);
+}
+
+BlockCounter
+BlockCounter::Factory::IncrementCount(BlockCounter BC,
+ const StackFrameContext *CallSite,
+ unsigned BlockID) {
+ return BlockCounter(GetFactory(F).add(GetMap(BC.Data),
+ CountKey(CallSite, BlockID),
+ BC.getNumVisited(CallSite, BlockID)+1).getRoot());
+}
+
+BlockCounter
+BlockCounter::Factory::GetEmptyCounter() {
+ return BlockCounter(GetFactory(F).getEmptyMap().getRoot());
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
new file mode 100644
index 0000000..11be764
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
@@ -0,0 +1,3553 @@
+// BugReporter.cpp - Generate PathDiagnostics for Bugs ------------*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines BugReporter, a utility class for generating
+// PathDiagnostics.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/ProgramPoint.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/raw_ostream.h"
+#include <memory>
+#include <queue>
+
+using namespace clang;
+using namespace ento;
+
+#define DEBUG_TYPE "BugReporter"
+
+STATISTIC(MaxBugClassSize,
+ "The maximum number of bug reports in the same equivalence class");
+STATISTIC(MaxValidBugClassSize,
+ "The maximum number of bug reports in the same equivalence class "
+ "where at least one report is valid (not suppressed)");
+
+BugReporterVisitor::~BugReporterVisitor() {}
+
+void BugReporterContext::anchor() {}
+
+//===----------------------------------------------------------------------===//
+// Helper routines for walking the ExplodedGraph and fetching statements.
+//===----------------------------------------------------------------------===//
+
+static const Stmt *GetPreviousStmt(const ExplodedNode *N) {
+ for (N = N->getFirstPred(); N; N = N->getFirstPred())
+ if (const Stmt *S = PathDiagnosticLocation::getStmt(N))
+ return S;
+
+ return nullptr;
+}
+
+static inline const Stmt*
+GetCurrentOrPreviousStmt(const ExplodedNode *N) {
+ if (const Stmt *S = PathDiagnosticLocation::getStmt(N))
+ return S;
+
+ return GetPreviousStmt(N);
+}
+
+//===----------------------------------------------------------------------===//
+// Diagnostic cleanup.
+//===----------------------------------------------------------------------===//
+
+static PathDiagnosticEventPiece *
+eventsDescribeSameCondition(PathDiagnosticEventPiece *X,
+ PathDiagnosticEventPiece *Y) {
+ // Prefer diagnostics that come from ConditionBRVisitor over
+ // those that came from TrackConstraintBRVisitor.
+ const void *tagPreferred = ConditionBRVisitor::getTag();
+ const void *tagLesser = TrackConstraintBRVisitor::getTag();
+
+ if (X->getLocation() != Y->getLocation())
+ return nullptr;
+
+ if (X->getTag() == tagPreferred && Y->getTag() == tagLesser)
+ return X;
+
+ if (Y->getTag() == tagPreferred && X->getTag() == tagLesser)
+ return Y;
+
+ return nullptr;
+}
+
+/// An optimization pass over PathPieces that removes redundant diagnostics
+/// generated by both ConditionBRVisitor and TrackConstraintBRVisitor. Both
+/// BugReporterVisitors use different methods to generate diagnostics, with
+/// one capable of emitting diagnostics in some cases but not in others. This
+/// can lead to redundant diagnostic pieces at the same point in a path.
+static void removeRedundantMsgs(PathPieces &path) {
+ unsigned N = path.size();
+ if (N < 2)
+ return;
+ // NOTE: this loop intentionally is not using an iterator. Instead, we
+ // are streaming the path and modifying it in place. This is done by
+ // grabbing the front, processing it, and if we decide to keep it append
+ // it to the end of the path. The entire path is processed in this way.
+ for (unsigned i = 0; i < N; ++i) {
+ IntrusiveRefCntPtr<PathDiagnosticPiece> piece(path.front());
+ path.pop_front();
+
+ switch (piece->getKind()) {
+ case clang::ento::PathDiagnosticPiece::Call:
+ removeRedundantMsgs(cast<PathDiagnosticCallPiece>(piece)->path);
+ break;
+ case clang::ento::PathDiagnosticPiece::Macro:
+ removeRedundantMsgs(cast<PathDiagnosticMacroPiece>(piece)->subPieces);
+ break;
+ case clang::ento::PathDiagnosticPiece::ControlFlow:
+ break;
+ case clang::ento::PathDiagnosticPiece::Event: {
+ if (i == N-1)
+ break;
+
+ if (PathDiagnosticEventPiece *nextEvent =
+ dyn_cast<PathDiagnosticEventPiece>(path.front().get())) {
+ PathDiagnosticEventPiece *event =
+ cast<PathDiagnosticEventPiece>(piece);
+ // Check to see if we should keep one of the two pieces. If we
+ // come up with a preference, record which piece to keep, and consume
+ // another piece from the path.
+ if (PathDiagnosticEventPiece *pieceToKeep =
+ eventsDescribeSameCondition(event, nextEvent)) {
+ piece = pieceToKeep;
+ path.pop_front();
+ ++i;
+ }
+ }
+ break;
+ }
+ }
+ path.push_back(piece);
+ }
+}
+
+/// A map from PathDiagnosticPiece to the LocationContext of the inlined
+/// function call it represents.
+typedef llvm::DenseMap<const PathPieces *, const LocationContext *>
+ LocationContextMap;
+
+/// Recursively scan through a path and prune out calls and macros pieces
+/// that aren't needed. Return true if afterwards the path contains
+/// "interesting stuff" which means it shouldn't be pruned from the parent path.
+static bool removeUnneededCalls(PathPieces &pieces, BugReport *R,
+ LocationContextMap &LCM) {
+ bool containsSomethingInteresting = false;
+ const unsigned N = pieces.size();
+
+ for (unsigned i = 0 ; i < N ; ++i) {
+ // Remove the front piece from the path. If it is still something we
+ // want to keep once we are done, we will push it back on the end.
+ IntrusiveRefCntPtr<PathDiagnosticPiece> piece(pieces.front());
+ pieces.pop_front();
+
+ switch (piece->getKind()) {
+ case PathDiagnosticPiece::Call: {
+ PathDiagnosticCallPiece *call = cast<PathDiagnosticCallPiece>(piece);
+ // Check if the location context is interesting.
+ assert(LCM.count(&call->path));
+ if (R->isInteresting(LCM[&call->path])) {
+ containsSomethingInteresting = true;
+ break;
+ }
+
+ if (!removeUnneededCalls(call->path, R, LCM))
+ continue;
+
+ containsSomethingInteresting = true;
+ break;
+ }
+ case PathDiagnosticPiece::Macro: {
+ PathDiagnosticMacroPiece *macro = cast<PathDiagnosticMacroPiece>(piece);
+ if (!removeUnneededCalls(macro->subPieces, R, LCM))
+ continue;
+ containsSomethingInteresting = true;
+ break;
+ }
+ case PathDiagnosticPiece::Event: {
+ PathDiagnosticEventPiece *event = cast<PathDiagnosticEventPiece>(piece);
+
+ // We never throw away an event, but we do throw it away wholesale
+ // as part of a path if we throw the entire path away.
+ containsSomethingInteresting |= !event->isPrunable();
+ break;
+ }
+ case PathDiagnosticPiece::ControlFlow:
+ break;
+ }
+
+ pieces.push_back(piece);
+ }
+
+ return containsSomethingInteresting;
+}
+
+/// Returns true if the given decl has been implicitly given a body, either by
+/// the analyzer or by the compiler proper.
+static bool hasImplicitBody(const Decl *D) {
+ assert(D);
+ return D->isImplicit() || !D->hasBody();
+}
+
+/// Recursively scan through a path and make sure that all call pieces have
+/// valid locations.
+static void
+adjustCallLocations(PathPieces &Pieces,
+ PathDiagnosticLocation *LastCallLocation = nullptr) {
+ for (PathPieces::iterator I = Pieces.begin(), E = Pieces.end(); I != E; ++I) {
+ PathDiagnosticCallPiece *Call = dyn_cast<PathDiagnosticCallPiece>(*I);
+
+ if (!Call) {
+ assert((*I)->getLocation().asLocation().isValid());
+ continue;
+ }
+
+ if (LastCallLocation) {
+ bool CallerIsImplicit = hasImplicitBody(Call->getCaller());
+ if (CallerIsImplicit || !Call->callEnter.asLocation().isValid())
+ Call->callEnter = *LastCallLocation;
+ if (CallerIsImplicit || !Call->callReturn.asLocation().isValid())
+ Call->callReturn = *LastCallLocation;
+ }
+
+ // Recursively clean out the subclass. Keep this call around if
+ // it contains any informative diagnostics.
+ PathDiagnosticLocation *ThisCallLocation;
+ if (Call->callEnterWithin.asLocation().isValid() &&
+ !hasImplicitBody(Call->getCallee()))
+ ThisCallLocation = &Call->callEnterWithin;
+ else
+ ThisCallLocation = &Call->callEnter;
+
+ assert(ThisCallLocation && "Outermost call has an invalid location");
+ adjustCallLocations(Call->path, ThisCallLocation);
+ }
+}
+
+/// Remove edges in and out of C++ default initializer expressions. These are
+/// for fields that have in-class initializers, as opposed to being initialized
+/// explicitly in a constructor or braced list.
+static void removeEdgesToDefaultInitializers(PathPieces &Pieces) {
+ for (PathPieces::iterator I = Pieces.begin(), E = Pieces.end(); I != E;) {
+ if (PathDiagnosticCallPiece *C = dyn_cast<PathDiagnosticCallPiece>(*I))
+ removeEdgesToDefaultInitializers(C->path);
+
+ if (PathDiagnosticMacroPiece *M = dyn_cast<PathDiagnosticMacroPiece>(*I))
+ removeEdgesToDefaultInitializers(M->subPieces);
+
+ if (PathDiagnosticControlFlowPiece *CF =
+ dyn_cast<PathDiagnosticControlFlowPiece>(*I)) {
+ const Stmt *Start = CF->getStartLocation().asStmt();
+ const Stmt *End = CF->getEndLocation().asStmt();
+ if (Start && isa<CXXDefaultInitExpr>(Start)) {
+ I = Pieces.erase(I);
+ continue;
+ } else if (End && isa<CXXDefaultInitExpr>(End)) {
+ PathPieces::iterator Next = std::next(I);
+ if (Next != E) {
+ if (PathDiagnosticControlFlowPiece *NextCF =
+ dyn_cast<PathDiagnosticControlFlowPiece>(*Next)) {
+ NextCF->setStartLocation(CF->getStartLocation());
+ }
+ }
+ I = Pieces.erase(I);
+ continue;
+ }
+ }
+
+ I++;
+ }
+}
+
+/// Remove all pieces with invalid locations as these cannot be serialized.
+/// We might have pieces with invalid locations as a result of inlining Body
+/// Farm generated functions.
+static void removePiecesWithInvalidLocations(PathPieces &Pieces) {
+ for (PathPieces::iterator I = Pieces.begin(), E = Pieces.end(); I != E;) {
+ if (PathDiagnosticCallPiece *C = dyn_cast<PathDiagnosticCallPiece>(*I))
+ removePiecesWithInvalidLocations(C->path);
+
+ if (PathDiagnosticMacroPiece *M = dyn_cast<PathDiagnosticMacroPiece>(*I))
+ removePiecesWithInvalidLocations(M->subPieces);
+
+ if (!(*I)->getLocation().isValid() ||
+ !(*I)->getLocation().asLocation().isValid()) {
+ I = Pieces.erase(I);
+ continue;
+ }
+ I++;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// PathDiagnosticBuilder and its associated routines and helper objects.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class NodeMapClosure : public BugReport::NodeResolver {
+ InterExplodedGraphMap &M;
+public:
+ NodeMapClosure(InterExplodedGraphMap &m) : M(m) {}
+
+ const ExplodedNode *getOriginalNode(const ExplodedNode *N) override {
+ return M.lookup(N);
+ }
+};
+
+class PathDiagnosticBuilder : public BugReporterContext {
+ BugReport *R;
+ PathDiagnosticConsumer *PDC;
+ NodeMapClosure NMC;
+public:
+ const LocationContext *LC;
+
+ PathDiagnosticBuilder(GRBugReporter &br,
+ BugReport *r, InterExplodedGraphMap &Backmap,
+ PathDiagnosticConsumer *pdc)
+ : BugReporterContext(br),
+ R(r), PDC(pdc), NMC(Backmap), LC(r->getErrorNode()->getLocationContext())
+ {}
+
+ PathDiagnosticLocation ExecutionContinues(const ExplodedNode *N);
+
+ PathDiagnosticLocation ExecutionContinues(llvm::raw_string_ostream &os,
+ const ExplodedNode *N);
+
+ BugReport *getBugReport() { return R; }
+
+ Decl const &getCodeDecl() { return R->getErrorNode()->getCodeDecl(); }
+
+ ParentMap& getParentMap() { return LC->getParentMap(); }
+
+ const Stmt *getParent(const Stmt *S) {
+ return getParentMap().getParent(S);
+ }
+
+ NodeMapClosure& getNodeResolver() override { return NMC; }
+
+ PathDiagnosticLocation getEnclosingStmtLocation(const Stmt *S);
+
+ PathDiagnosticConsumer::PathGenerationScheme getGenerationScheme() const {
+ return PDC ? PDC->getGenerationScheme() : PathDiagnosticConsumer::Extensive;
+ }
+
+ bool supportsLogicalOpControlFlow() const {
+ return PDC ? PDC->supportsLogicalOpControlFlow() : true;
+ }
+};
+} // end anonymous namespace
+
+PathDiagnosticLocation
+PathDiagnosticBuilder::ExecutionContinues(const ExplodedNode *N) {
+ if (const Stmt *S = PathDiagnosticLocation::getNextStmt(N))
+ return PathDiagnosticLocation(S, getSourceManager(), LC);
+
+ return PathDiagnosticLocation::createDeclEnd(N->getLocationContext(),
+ getSourceManager());
+}
+
+PathDiagnosticLocation
+PathDiagnosticBuilder::ExecutionContinues(llvm::raw_string_ostream &os,
+ const ExplodedNode *N) {
+
+ // Slow, but probably doesn't matter.
+ if (os.str().empty())
+ os << ' ';
+
+ const PathDiagnosticLocation &Loc = ExecutionContinues(N);
+
+ if (Loc.asStmt())
+ os << "Execution continues on line "
+ << getSourceManager().getExpansionLineNumber(Loc.asLocation())
+ << '.';
+ else {
+ os << "Execution jumps to the end of the ";
+ const Decl *D = N->getLocationContext()->getDecl();
+ if (isa<ObjCMethodDecl>(D))
+ os << "method";
+ else if (isa<FunctionDecl>(D))
+ os << "function";
+ else {
+ assert(isa<BlockDecl>(D));
+ os << "anonymous block";
+ }
+ os << '.';
+ }
+
+ return Loc;
+}
+
+static const Stmt *getEnclosingParent(const Stmt *S, const ParentMap &PM) {
+ if (isa<Expr>(S) && PM.isConsumedExpr(cast<Expr>(S)))
+ return PM.getParentIgnoreParens(S);
+
+ const Stmt *Parent = PM.getParentIgnoreParens(S);
+ if (!Parent)
+ return nullptr;
+
+ switch (Parent->getStmtClass()) {
+ case Stmt::ForStmtClass:
+ case Stmt::DoStmtClass:
+ case Stmt::WhileStmtClass:
+ case Stmt::ObjCForCollectionStmtClass:
+ case Stmt::CXXForRangeStmtClass:
+ return Parent;
+ default:
+ break;
+ }
+
+ return nullptr;
+}
+
+static PathDiagnosticLocation
+getEnclosingStmtLocation(const Stmt *S, SourceManager &SMgr, const ParentMap &P,
+ const LocationContext *LC, bool allowNestedContexts) {
+ if (!S)
+ return PathDiagnosticLocation();
+
+ while (const Stmt *Parent = getEnclosingParent(S, P)) {
+ switch (Parent->getStmtClass()) {
+ case Stmt::BinaryOperatorClass: {
+ const BinaryOperator *B = cast<BinaryOperator>(Parent);
+ if (B->isLogicalOp())
+ return PathDiagnosticLocation(allowNestedContexts ? B : S, SMgr, LC);
+ break;
+ }
+ case Stmt::CompoundStmtClass:
+ case Stmt::StmtExprClass:
+ return PathDiagnosticLocation(S, SMgr, LC);
+ case Stmt::ChooseExprClass:
+ // Similar to '?' if we are referring to condition, just have the edge
+ // point to the entire choose expression.
+ if (allowNestedContexts || cast<ChooseExpr>(Parent)->getCond() == S)
+ return PathDiagnosticLocation(Parent, SMgr, LC);
+ else
+ return PathDiagnosticLocation(S, SMgr, LC);
+ case Stmt::BinaryConditionalOperatorClass:
+ case Stmt::ConditionalOperatorClass:
+ // For '?', if we are referring to condition, just have the edge point
+ // to the entire '?' expression.
+ if (allowNestedContexts ||
+ cast<AbstractConditionalOperator>(Parent)->getCond() == S)
+ return PathDiagnosticLocation(Parent, SMgr, LC);
+ else
+ return PathDiagnosticLocation(S, SMgr, LC);
+ case Stmt::CXXForRangeStmtClass:
+ if (cast<CXXForRangeStmt>(Parent)->getBody() == S)
+ return PathDiagnosticLocation(S, SMgr, LC);
+ break;
+ case Stmt::DoStmtClass:
+ return PathDiagnosticLocation(S, SMgr, LC);
+ case Stmt::ForStmtClass:
+ if (cast<ForStmt>(Parent)->getBody() == S)
+ return PathDiagnosticLocation(S, SMgr, LC);
+ break;
+ case Stmt::IfStmtClass:
+ if (cast<IfStmt>(Parent)->getCond() != S)
+ return PathDiagnosticLocation(S, SMgr, LC);
+ break;
+ case Stmt::ObjCForCollectionStmtClass:
+ if (cast<ObjCForCollectionStmt>(Parent)->getBody() == S)
+ return PathDiagnosticLocation(S, SMgr, LC);
+ break;
+ case Stmt::WhileStmtClass:
+ if (cast<WhileStmt>(Parent)->getCond() != S)
+ return PathDiagnosticLocation(S, SMgr, LC);
+ break;
+ default:
+ break;
+ }
+
+ S = Parent;
+ }
+
+ assert(S && "Cannot have null Stmt for PathDiagnosticLocation");
+
+ return PathDiagnosticLocation(S, SMgr, LC);
+}
+
+PathDiagnosticLocation
+PathDiagnosticBuilder::getEnclosingStmtLocation(const Stmt *S) {
+ assert(S && "Null Stmt passed to getEnclosingStmtLocation");
+ return ::getEnclosingStmtLocation(S, getSourceManager(), getParentMap(), LC,
+ /*allowNestedContexts=*/false);
+}
+
+//===----------------------------------------------------------------------===//
+// "Visitors only" path diagnostic generation algorithm.
+//===----------------------------------------------------------------------===//
+static bool GenerateVisitorsOnlyPathDiagnostic(
+ PathDiagnostic &PD, PathDiagnosticBuilder &PDB, const ExplodedNode *N,
+ ArrayRef<std::unique_ptr<BugReporterVisitor>> visitors) {
+ // All path generation skips the very first node (the error node).
+ // This is because there is special handling for the end-of-path note.
+ N = N->getFirstPred();
+ if (!N)
+ return true;
+
+ BugReport *R = PDB.getBugReport();
+ while (const ExplodedNode *Pred = N->getFirstPred()) {
+ for (auto &V : visitors) {
+ // Visit all the node pairs, but throw the path pieces away.
+ PathDiagnosticPiece *Piece = V->VisitNode(N, Pred, PDB, *R);
+ delete Piece;
+ }
+
+ N = Pred;
+ }
+
+ return R->isValid();
+}
+
+//===----------------------------------------------------------------------===//
+// "Minimal" path diagnostic generation algorithm.
+//===----------------------------------------------------------------------===//
+typedef std::pair<PathDiagnosticCallPiece*, const ExplodedNode*> StackDiagPair;
+typedef SmallVector<StackDiagPair, 6> StackDiagVector;
+
+static void updateStackPiecesWithMessage(PathDiagnosticPiece *P,
+ StackDiagVector &CallStack) {
+ // If the piece contains a special message, add it to all the call
+ // pieces on the active stack.
+ if (PathDiagnosticEventPiece *ep =
+ dyn_cast<PathDiagnosticEventPiece>(P)) {
+
+ if (ep->hasCallStackHint())
+ for (StackDiagVector::iterator I = CallStack.begin(),
+ E = CallStack.end(); I != E; ++I) {
+ PathDiagnosticCallPiece *CP = I->first;
+ const ExplodedNode *N = I->second;
+ std::string stackMsg = ep->getCallStackMessage(N);
+
+ // The last message on the path to final bug is the most important
+ // one. Since we traverse the path backwards, do not add the message
+ // if one has been previously added.
+ if (!CP->hasCallStackMessage())
+ CP->setCallStackMessage(stackMsg);
+ }
+ }
+}
+
+static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM);
+
+static bool GenerateMinimalPathDiagnostic(
+ PathDiagnostic &PD, PathDiagnosticBuilder &PDB, const ExplodedNode *N,
+ LocationContextMap &LCM,
+ ArrayRef<std::unique_ptr<BugReporterVisitor>> visitors) {
+
+ SourceManager& SMgr = PDB.getSourceManager();
+ const LocationContext *LC = PDB.LC;
+ const ExplodedNode *NextNode = N->pred_empty()
+ ? nullptr : *(N->pred_begin());
+
+ StackDiagVector CallStack;
+
+ while (NextNode) {
+ N = NextNode;
+ PDB.LC = N->getLocationContext();
+ NextNode = N->getFirstPred();
+
+ ProgramPoint P = N->getLocation();
+
+ do {
+ if (Optional<CallExitEnd> CE = P.getAs<CallExitEnd>()) {
+ PathDiagnosticCallPiece *C =
+ PathDiagnosticCallPiece::construct(N, *CE, SMgr);
+ // Record the mapping from call piece to LocationContext.
+ LCM[&C->path] = CE->getCalleeContext();
+ PD.getActivePath().push_front(C);
+ PD.pushActivePath(&C->path);
+ CallStack.push_back(StackDiagPair(C, N));
+ break;
+ }
+
+ if (Optional<CallEnter> CE = P.getAs<CallEnter>()) {
+ // Flush all locations, and pop the active path.
+ bool VisitedEntireCall = PD.isWithinCall();
+ PD.popActivePath();
+
+ // Either we just added a bunch of stuff to the top-level path, or
+ // we have a previous CallExitEnd. If the former, it means that the
+ // path terminated within a function call. We must then take the
+ // current contents of the active path and place it within
+ // a new PathDiagnosticCallPiece.
+ PathDiagnosticCallPiece *C;
+ if (VisitedEntireCall) {
+ C = cast<PathDiagnosticCallPiece>(PD.getActivePath().front());
+ } else {
+ const Decl *Caller = CE->getLocationContext()->getDecl();
+ C = PathDiagnosticCallPiece::construct(PD.getActivePath(), Caller);
+ // Record the mapping from call piece to LocationContext.
+ LCM[&C->path] = CE->getCalleeContext();
+ }
+
+ C->setCallee(*CE, SMgr);
+ if (!CallStack.empty()) {
+ assert(CallStack.back().first == C);
+ CallStack.pop_back();
+ }
+ break;
+ }
+
+ if (Optional<BlockEdge> BE = P.getAs<BlockEdge>()) {
+ const CFGBlock *Src = BE->getSrc();
+ const CFGBlock *Dst = BE->getDst();
+ const Stmt *T = Src->getTerminator();
+
+ if (!T)
+ break;
+
+ PathDiagnosticLocation Start =
+ PathDiagnosticLocation::createBegin(T, SMgr,
+ N->getLocationContext());
+
+ switch (T->getStmtClass()) {
+ default:
+ break;
+
+ case Stmt::GotoStmtClass:
+ case Stmt::IndirectGotoStmtClass: {
+ const Stmt *S = PathDiagnosticLocation::getNextStmt(N);
+
+ if (!S)
+ break;
+
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ const PathDiagnosticLocation &End = PDB.getEnclosingStmtLocation(S);
+
+ os << "Control jumps to line "
+ << End.asLocation().getExpansionLineNumber();
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, os.str()));
+ break;
+ }
+
+ case Stmt::SwitchStmtClass: {
+ // Figure out what case arm we took.
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ if (const Stmt *S = Dst->getLabel()) {
+ PathDiagnosticLocation End(S, SMgr, LC);
+
+ switch (S->getStmtClass()) {
+ default:
+ os << "No cases match in the switch statement. "
+ "Control jumps to line "
+ << End.asLocation().getExpansionLineNumber();
+ break;
+ case Stmt::DefaultStmtClass:
+ os << "Control jumps to the 'default' case at line "
+ << End.asLocation().getExpansionLineNumber();
+ break;
+
+ case Stmt::CaseStmtClass: {
+ os << "Control jumps to 'case ";
+ const CaseStmt *Case = cast<CaseStmt>(S);
+ const Expr *LHS = Case->getLHS()->IgnoreParenCasts();
+
+ // Determine if it is an enum.
+ bool GetRawInt = true;
+
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(LHS)) {
+ // FIXME: Maybe this should be an assertion. Are there cases
+ // were it is not an EnumConstantDecl?
+ const EnumConstantDecl *D =
+ dyn_cast<EnumConstantDecl>(DR->getDecl());
+
+ if (D) {
+ GetRawInt = false;
+ os << *D;
+ }
+ }
+
+ if (GetRawInt)
+ os << LHS->EvaluateKnownConstInt(PDB.getASTContext());
+
+ os << ":' at line "
+ << End.asLocation().getExpansionLineNumber();
+ break;
+ }
+ }
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, os.str()));
+ }
+ else {
+ os << "'Default' branch taken. ";
+ const PathDiagnosticLocation &End = PDB.ExecutionContinues(os, N);
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, os.str()));
+ }
+
+ break;
+ }
+
+ case Stmt::BreakStmtClass:
+ case Stmt::ContinueStmtClass: {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ PathDiagnosticLocation End = PDB.ExecutionContinues(os, N);
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, os.str()));
+ break;
+ }
+
+ // Determine control-flow for ternary '?'.
+ case Stmt::BinaryConditionalOperatorClass:
+ case Stmt::ConditionalOperatorClass: {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ os << "'?' condition is ";
+
+ if (*(Src->succ_begin()+1) == Dst)
+ os << "false";
+ else
+ os << "true";
+
+ PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
+
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, os.str()));
+ break;
+ }
+
+ // Determine control-flow for short-circuited '&&' and '||'.
+ case Stmt::BinaryOperatorClass: {
+ if (!PDB.supportsLogicalOpControlFlow())
+ break;
+
+ const BinaryOperator *B = cast<BinaryOperator>(T);
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ os << "Left side of '";
+
+ if (B->getOpcode() == BO_LAnd) {
+ os << "&&" << "' is ";
+
+ if (*(Src->succ_begin()+1) == Dst) {
+ os << "false";
+ PathDiagnosticLocation End(B->getLHS(), SMgr, LC);
+ PathDiagnosticLocation Start =
+ PathDiagnosticLocation::createOperatorLoc(B, SMgr);
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, os.str()));
+ }
+ else {
+ os << "true";
+ PathDiagnosticLocation Start(B->getLHS(), SMgr, LC);
+ PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, os.str()));
+ }
+ }
+ else {
+ assert(B->getOpcode() == BO_LOr);
+ os << "||" << "' is ";
+
+ if (*(Src->succ_begin()+1) == Dst) {
+ os << "false";
+ PathDiagnosticLocation Start(B->getLHS(), SMgr, LC);
+ PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, os.str()));
+ }
+ else {
+ os << "true";
+ PathDiagnosticLocation End(B->getLHS(), SMgr, LC);
+ PathDiagnosticLocation Start =
+ PathDiagnosticLocation::createOperatorLoc(B, SMgr);
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, os.str()));
+ }
+ }
+
+ break;
+ }
+
+ case Stmt::DoStmtClass: {
+ if (*(Src->succ_begin()) == Dst) {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ os << "Loop condition is true. ";
+ PathDiagnosticLocation End = PDB.ExecutionContinues(os, N);
+
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
+
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, os.str()));
+ }
+ else {
+ PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
+
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, "Loop condition is false. Exiting loop"));
+ }
+
+ break;
+ }
+
+ case Stmt::WhileStmtClass:
+ case Stmt::ForStmtClass: {
+ if (*(Src->succ_begin()+1) == Dst) {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ os << "Loop condition is false. ";
+ PathDiagnosticLocation End = PDB.ExecutionContinues(os, N);
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
+
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, os.str()));
+ }
+ else {
+ PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
+
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, "Loop condition is true. Entering loop body"));
+ }
+
+ break;
+ }
+
+ case Stmt::IfStmtClass: {
+ PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
+
+ if (*(Src->succ_begin()+1) == Dst)
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, "Taking false branch"));
+ else
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, "Taking true branch"));
+
+ break;
+ }
+ }
+ }
+ } while(0);
+
+ if (NextNode) {
+ // Add diagnostic pieces from custom visitors.
+ BugReport *R = PDB.getBugReport();
+ for (auto &V : visitors) {
+ if (PathDiagnosticPiece *p = V->VisitNode(N, NextNode, PDB, *R)) {
+ PD.getActivePath().push_front(p);
+ updateStackPiecesWithMessage(p, CallStack);
+ }
+ }
+ }
+ }
+
+ if (!PDB.getBugReport()->isValid())
+ return false;
+
+ // After constructing the full PathDiagnostic, do a pass over it to compact
+ // PathDiagnosticPieces that occur within a macro.
+ CompactPathDiagnostic(PD.getMutablePieces(), PDB.getSourceManager());
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// "Extensive" PathDiagnostic generation.
+//===----------------------------------------------------------------------===//
+
+static bool IsControlFlowExpr(const Stmt *S) {
+ const Expr *E = dyn_cast<Expr>(S);
+
+ if (!E)
+ return false;
+
+ E = E->IgnoreParenCasts();
+
+ if (isa<AbstractConditionalOperator>(E))
+ return true;
+
+ if (const BinaryOperator *B = dyn_cast<BinaryOperator>(E))
+ if (B->isLogicalOp())
+ return true;
+
+ return false;
+}
+
+namespace {
+class ContextLocation : public PathDiagnosticLocation {
+ bool IsDead;
+public:
+ ContextLocation(const PathDiagnosticLocation &L, bool isdead = false)
+ : PathDiagnosticLocation(L), IsDead(isdead) {}
+
+ void markDead() { IsDead = true; }
+ bool isDead() const { return IsDead; }
+};
+
+static PathDiagnosticLocation cleanUpLocation(PathDiagnosticLocation L,
+ const LocationContext *LC,
+ bool firstCharOnly = false) {
+ if (const Stmt *S = L.asStmt()) {
+ const Stmt *Original = S;
+ while (1) {
+ // Adjust the location for some expressions that are best referenced
+ // by one of their subexpressions.
+ switch (S->getStmtClass()) {
+ default:
+ break;
+ case Stmt::ParenExprClass:
+ case Stmt::GenericSelectionExprClass:
+ S = cast<Expr>(S)->IgnoreParens();
+ firstCharOnly = true;
+ continue;
+ case Stmt::BinaryConditionalOperatorClass:
+ case Stmt::ConditionalOperatorClass:
+ S = cast<AbstractConditionalOperator>(S)->getCond();
+ firstCharOnly = true;
+ continue;
+ case Stmt::ChooseExprClass:
+ S = cast<ChooseExpr>(S)->getCond();
+ firstCharOnly = true;
+ continue;
+ case Stmt::BinaryOperatorClass:
+ S = cast<BinaryOperator>(S)->getLHS();
+ firstCharOnly = true;
+ continue;
+ }
+
+ break;
+ }
+
+ if (S != Original)
+ L = PathDiagnosticLocation(S, L.getManager(), LC);
+ }
+
+ if (firstCharOnly)
+ L = PathDiagnosticLocation::createSingleLocation(L);
+
+ return L;
+}
+
+class EdgeBuilder {
+ std::vector<ContextLocation> CLocs;
+ typedef std::vector<ContextLocation>::iterator iterator;
+ PathDiagnostic &PD;
+ PathDiagnosticBuilder &PDB;
+ PathDiagnosticLocation PrevLoc;
+
+ bool IsConsumedExpr(const PathDiagnosticLocation &L);
+
+ bool containsLocation(const PathDiagnosticLocation &Container,
+ const PathDiagnosticLocation &Containee);
+
+ PathDiagnosticLocation getContextLocation(const PathDiagnosticLocation &L);
+
+
+
+ void popLocation() {
+ if (!CLocs.back().isDead() && CLocs.back().asLocation().isFileID()) {
+ // For contexts, we only one the first character as the range.
+ rawAddEdge(cleanUpLocation(CLocs.back(), PDB.LC, true));
+ }
+ CLocs.pop_back();
+ }
+
+public:
+ EdgeBuilder(PathDiagnostic &pd, PathDiagnosticBuilder &pdb)
+ : PD(pd), PDB(pdb) {
+
+ // If the PathDiagnostic already has pieces, add the enclosing statement
+ // of the first piece as a context as well.
+ if (!PD.path.empty()) {
+ PrevLoc = (*PD.path.begin())->getLocation();
+
+ if (const Stmt *S = PrevLoc.asStmt())
+ addExtendedContext(PDB.getEnclosingStmtLocation(S).asStmt());
+ }
+ }
+
+ ~EdgeBuilder() {
+ while (!CLocs.empty()) popLocation();
+
+ // Finally, add an initial edge from the start location of the first
+ // statement (if it doesn't already exist).
+ PathDiagnosticLocation L = PathDiagnosticLocation::createDeclBegin(
+ PDB.LC,
+ PDB.getSourceManager());
+ if (L.isValid())
+ rawAddEdge(L);
+ }
+
+ void flushLocations() {
+ while (!CLocs.empty())
+ popLocation();
+ PrevLoc = PathDiagnosticLocation();
+ }
+
+ void addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd = false,
+ bool IsPostJump = false);
+
+ void rawAddEdge(PathDiagnosticLocation NewLoc);
+
+ void addContext(const Stmt *S);
+ void addContext(const PathDiagnosticLocation &L);
+ void addExtendedContext(const Stmt *S);
+};
+} // end anonymous namespace
+
+
+PathDiagnosticLocation
+EdgeBuilder::getContextLocation(const PathDiagnosticLocation &L) {
+ if (const Stmt *S = L.asStmt()) {
+ if (IsControlFlowExpr(S))
+ return L;
+
+ return PDB.getEnclosingStmtLocation(S);
+ }
+
+ return L;
+}
+
+bool EdgeBuilder::containsLocation(const PathDiagnosticLocation &Container,
+ const PathDiagnosticLocation &Containee) {
+
+ if (Container == Containee)
+ return true;
+
+ if (Container.asDecl())
+ return true;
+
+ if (const Stmt *S = Containee.asStmt())
+ if (const Stmt *ContainerS = Container.asStmt()) {
+ while (S) {
+ if (S == ContainerS)
+ return true;
+ S = PDB.getParent(S);
+ }
+ return false;
+ }
+
+ // Less accurate: compare using source ranges.
+ SourceRange ContainerR = Container.asRange();
+ SourceRange ContaineeR = Containee.asRange();
+
+ SourceManager &SM = PDB.getSourceManager();
+ SourceLocation ContainerRBeg = SM.getExpansionLoc(ContainerR.getBegin());
+ SourceLocation ContainerREnd = SM.getExpansionLoc(ContainerR.getEnd());
+ SourceLocation ContaineeRBeg = SM.getExpansionLoc(ContaineeR.getBegin());
+ SourceLocation ContaineeREnd = SM.getExpansionLoc(ContaineeR.getEnd());
+
+ unsigned ContainerBegLine = SM.getExpansionLineNumber(ContainerRBeg);
+ unsigned ContainerEndLine = SM.getExpansionLineNumber(ContainerREnd);
+ unsigned ContaineeBegLine = SM.getExpansionLineNumber(ContaineeRBeg);
+ unsigned ContaineeEndLine = SM.getExpansionLineNumber(ContaineeREnd);
+
+ assert(ContainerBegLine <= ContainerEndLine);
+ assert(ContaineeBegLine <= ContaineeEndLine);
+
+ return (ContainerBegLine <= ContaineeBegLine &&
+ ContainerEndLine >= ContaineeEndLine &&
+ (ContainerBegLine != ContaineeBegLine ||
+ SM.getExpansionColumnNumber(ContainerRBeg) <=
+ SM.getExpansionColumnNumber(ContaineeRBeg)) &&
+ (ContainerEndLine != ContaineeEndLine ||
+ SM.getExpansionColumnNumber(ContainerREnd) >=
+ SM.getExpansionColumnNumber(ContaineeREnd)));
+}
+
+void EdgeBuilder::rawAddEdge(PathDiagnosticLocation NewLoc) {
+ if (!PrevLoc.isValid()) {
+ PrevLoc = NewLoc;
+ return;
+ }
+
+ const PathDiagnosticLocation &NewLocClean = cleanUpLocation(NewLoc, PDB.LC);
+ const PathDiagnosticLocation &PrevLocClean = cleanUpLocation(PrevLoc, PDB.LC);
+
+ if (PrevLocClean.asLocation().isInvalid()) {
+ PrevLoc = NewLoc;
+ return;
+ }
+
+ if (NewLocClean.asLocation() == PrevLocClean.asLocation())
+ return;
+
+ // FIXME: Ignore intra-macro edges for now.
+ if (NewLocClean.asLocation().getExpansionLoc() ==
+ PrevLocClean.asLocation().getExpansionLoc())
+ return;
+
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(NewLocClean, PrevLocClean));
+ PrevLoc = NewLoc;
+}
+
+void EdgeBuilder::addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd,
+ bool IsPostJump) {
+
+ if (!alwaysAdd && NewLoc.asLocation().isMacroID())
+ return;
+
+ const PathDiagnosticLocation &CLoc = getContextLocation(NewLoc);
+
+ while (!CLocs.empty()) {
+ ContextLocation &TopContextLoc = CLocs.back();
+
+ // Is the top location context the same as the one for the new location?
+ if (TopContextLoc == CLoc) {
+ if (alwaysAdd) {
+ if (IsConsumedExpr(TopContextLoc))
+ TopContextLoc.markDead();
+
+ rawAddEdge(NewLoc);
+ }
+
+ if (IsPostJump)
+ TopContextLoc.markDead();
+ return;
+ }
+
+ if (containsLocation(TopContextLoc, CLoc)) {
+ if (alwaysAdd) {
+ rawAddEdge(NewLoc);
+
+ if (IsConsumedExpr(CLoc)) {
+ CLocs.push_back(ContextLocation(CLoc, /*IsDead=*/true));
+ return;
+ }
+ }
+
+ CLocs.push_back(ContextLocation(CLoc, /*IsDead=*/IsPostJump));
+ return;
+ }
+
+ // Context does not contain the location. Flush it.
+ popLocation();
+ }
+
+ // If we reach here, there is no enclosing context. Just add the edge.
+ rawAddEdge(NewLoc);
+}
+
+bool EdgeBuilder::IsConsumedExpr(const PathDiagnosticLocation &L) {
+ if (const Expr *X = dyn_cast_or_null<Expr>(L.asStmt()))
+ return PDB.getParentMap().isConsumedExpr(X) && !IsControlFlowExpr(X);
+
+ return false;
+}
+
+void EdgeBuilder::addExtendedContext(const Stmt *S) {
+ if (!S)
+ return;
+
+ const Stmt *Parent = PDB.getParent(S);
+ while (Parent) {
+ if (isa<CompoundStmt>(Parent))
+ Parent = PDB.getParent(Parent);
+ else
+ break;
+ }
+
+ if (Parent) {
+ switch (Parent->getStmtClass()) {
+ case Stmt::DoStmtClass:
+ case Stmt::ObjCAtSynchronizedStmtClass:
+ addContext(Parent);
+ default:
+ break;
+ }
+ }
+
+ addContext(S);
+}
+
+void EdgeBuilder::addContext(const Stmt *S) {
+ if (!S)
+ return;
+
+ PathDiagnosticLocation L(S, PDB.getSourceManager(), PDB.LC);
+ addContext(L);
+}
+
+void EdgeBuilder::addContext(const PathDiagnosticLocation &L) {
+ while (!CLocs.empty()) {
+ const PathDiagnosticLocation &TopContextLoc = CLocs.back();
+
+ // Is the top location context the same as the one for the new location?
+ if (TopContextLoc == L)
+ return;
+
+ if (containsLocation(TopContextLoc, L)) {
+ CLocs.push_back(L);
+ return;
+ }
+
+ // Context does not contain the location. Flush it.
+ popLocation();
+ }
+
+ CLocs.push_back(L);
+}
+
+// Cone-of-influence: support the reverse propagation of "interesting" symbols
+// and values by tracing interesting calculations backwards through evaluated
+// expressions along a path. This is probably overly complicated, but the idea
+// is that if an expression computed an "interesting" value, the child
+// expressions are are also likely to be "interesting" as well (which then
+// propagates to the values they in turn compute). This reverse propagation
+// is needed to track interesting correlations across function call boundaries,
+// where formal arguments bind to actual arguments, etc. This is also needed
+// because the constraint solver sometimes simplifies certain symbolic values
+// into constants when appropriate, and this complicates reasoning about
+// interesting values.
+typedef llvm::DenseSet<const Expr *> InterestingExprs;
+
+static void reversePropagateIntererstingSymbols(BugReport &R,
+ InterestingExprs &IE,
+ const ProgramState *State,
+ const Expr *Ex,
+ const LocationContext *LCtx) {
+ SVal V = State->getSVal(Ex, LCtx);
+ if (!(R.isInteresting(V) || IE.count(Ex)))
+ return;
+
+ switch (Ex->getStmtClass()) {
+ default:
+ if (!isa<CastExpr>(Ex))
+ break;
+ // Fall through.
+ case Stmt::BinaryOperatorClass:
+ case Stmt::UnaryOperatorClass: {
+ for (const Stmt *SubStmt : Ex->children()) {
+ if (const Expr *child = dyn_cast_or_null<Expr>(SubStmt)) {
+ IE.insert(child);
+ SVal ChildV = State->getSVal(child, LCtx);
+ R.markInteresting(ChildV);
+ }
+ }
+ break;
+ }
+ }
+
+ R.markInteresting(V);
+}
+
+static void reversePropagateInterestingSymbols(BugReport &R,
+ InterestingExprs &IE,
+ const ProgramState *State,
+ const LocationContext *CalleeCtx,
+ const LocationContext *CallerCtx)
+{
+ // FIXME: Handle non-CallExpr-based CallEvents.
+ const StackFrameContext *Callee = CalleeCtx->getCurrentStackFrame();
+ const Stmt *CallSite = Callee->getCallSite();
+ if (const CallExpr *CE = dyn_cast_or_null<CallExpr>(CallSite)) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeCtx->getDecl())) {
+ FunctionDecl::param_const_iterator PI = FD->param_begin(),
+ PE = FD->param_end();
+ CallExpr::const_arg_iterator AI = CE->arg_begin(), AE = CE->arg_end();
+ for (; AI != AE && PI != PE; ++AI, ++PI) {
+ if (const Expr *ArgE = *AI) {
+ if (const ParmVarDecl *PD = *PI) {
+ Loc LV = State->getLValue(PD, CalleeCtx);
+ if (R.isInteresting(LV) || R.isInteresting(State->getRawSVal(LV)))
+ IE.insert(ArgE);
+ }
+ }
+ }
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Functions for determining if a loop was executed 0 times.
+//===----------------------------------------------------------------------===//
+
+static bool isLoop(const Stmt *Term) {
+ switch (Term->getStmtClass()) {
+ case Stmt::ForStmtClass:
+ case Stmt::WhileStmtClass:
+ case Stmt::ObjCForCollectionStmtClass:
+ case Stmt::CXXForRangeStmtClass:
+ return true;
+ default:
+ // Note that we intentionally do not include do..while here.
+ return false;
+ }
+}
+
+static bool isJumpToFalseBranch(const BlockEdge *BE) {
+ const CFGBlock *Src = BE->getSrc();
+ assert(Src->succ_size() == 2);
+ return (*(Src->succ_begin()+1) == BE->getDst());
+}
+
+/// Return true if the terminator is a loop and the destination is the
+/// false branch.
+static bool isLoopJumpPastBody(const Stmt *Term, const BlockEdge *BE) {
+ if (!isLoop(Term))
+ return false;
+
+ // Did we take the false branch?
+ return isJumpToFalseBranch(BE);
+}
+
+static bool isContainedByStmt(ParentMap &PM, const Stmt *S, const Stmt *SubS) {
+ while (SubS) {
+ if (SubS == S)
+ return true;
+ SubS = PM.getParent(SubS);
+ }
+ return false;
+}
+
+static const Stmt *getStmtBeforeCond(ParentMap &PM, const Stmt *Term,
+ const ExplodedNode *N) {
+ while (N) {
+ Optional<StmtPoint> SP = N->getLocation().getAs<StmtPoint>();
+ if (SP) {
+ const Stmt *S = SP->getStmt();
+ if (!isContainedByStmt(PM, Term, S))
+ return S;
+ }
+ N = N->getFirstPred();
+ }
+ return nullptr;
+}
+
+static bool isInLoopBody(ParentMap &PM, const Stmt *S, const Stmt *Term) {
+ const Stmt *LoopBody = nullptr;
+ switch (Term->getStmtClass()) {
+ case Stmt::CXXForRangeStmtClass: {
+ const CXXForRangeStmt *FR = cast<CXXForRangeStmt>(Term);
+ if (isContainedByStmt(PM, FR->getInc(), S))
+ return true;
+ if (isContainedByStmt(PM, FR->getLoopVarStmt(), S))
+ return true;
+ LoopBody = FR->getBody();
+ break;
+ }
+ case Stmt::ForStmtClass: {
+ const ForStmt *FS = cast<ForStmt>(Term);
+ if (isContainedByStmt(PM, FS->getInc(), S))
+ return true;
+ LoopBody = FS->getBody();
+ break;
+ }
+ case Stmt::ObjCForCollectionStmtClass: {
+ const ObjCForCollectionStmt *FC = cast<ObjCForCollectionStmt>(Term);
+ LoopBody = FC->getBody();
+ break;
+ }
+ case Stmt::WhileStmtClass:
+ LoopBody = cast<WhileStmt>(Term)->getBody();
+ break;
+ default:
+ return false;
+ }
+ return isContainedByStmt(PM, LoopBody, S);
+}
+
+//===----------------------------------------------------------------------===//
+// Top-level logic for generating extensive path diagnostics.
+//===----------------------------------------------------------------------===//
+
+static bool GenerateExtensivePathDiagnostic(
+ PathDiagnostic &PD, PathDiagnosticBuilder &PDB, const ExplodedNode *N,
+ LocationContextMap &LCM,
+ ArrayRef<std::unique_ptr<BugReporterVisitor>> visitors) {
+ EdgeBuilder EB(PD, PDB);
+ const SourceManager& SM = PDB.getSourceManager();
+ StackDiagVector CallStack;
+ InterestingExprs IE;
+
+ const ExplodedNode *NextNode = N->pred_empty() ? nullptr : *(N->pred_begin());
+ while (NextNode) {
+ N = NextNode;
+ NextNode = N->getFirstPred();
+ ProgramPoint P = N->getLocation();
+
+ do {
+ if (Optional<PostStmt> PS = P.getAs<PostStmt>()) {
+ if (const Expr *Ex = PS->getStmtAs<Expr>())
+ reversePropagateIntererstingSymbols(*PDB.getBugReport(), IE,
+ N->getState().get(), Ex,
+ N->getLocationContext());
+ }
+
+ if (Optional<CallExitEnd> CE = P.getAs<CallExitEnd>()) {
+ const Stmt *S = CE->getCalleeContext()->getCallSite();
+ if (const Expr *Ex = dyn_cast_or_null<Expr>(S)) {
+ reversePropagateIntererstingSymbols(*PDB.getBugReport(), IE,
+ N->getState().get(), Ex,
+ N->getLocationContext());
+ }
+
+ PathDiagnosticCallPiece *C =
+ PathDiagnosticCallPiece::construct(N, *CE, SM);
+ LCM[&C->path] = CE->getCalleeContext();
+
+ EB.addEdge(C->callReturn, /*AlwaysAdd=*/true, /*IsPostJump=*/true);
+ EB.flushLocations();
+
+ PD.getActivePath().push_front(C);
+ PD.pushActivePath(&C->path);
+ CallStack.push_back(StackDiagPair(C, N));
+ break;
+ }
+
+ // Pop the call hierarchy if we are done walking the contents
+ // of a function call.
+ if (Optional<CallEnter> CE = P.getAs<CallEnter>()) {
+ // Add an edge to the start of the function.
+ const Decl *D = CE->getCalleeContext()->getDecl();
+ PathDiagnosticLocation pos =
+ PathDiagnosticLocation::createBegin(D, SM);
+ EB.addEdge(pos);
+
+ // Flush all locations, and pop the active path.
+ bool VisitedEntireCall = PD.isWithinCall();
+ EB.flushLocations();
+ PD.popActivePath();
+ PDB.LC = N->getLocationContext();
+
+ // Either we just added a bunch of stuff to the top-level path, or
+ // we have a previous CallExitEnd. If the former, it means that the
+ // path terminated within a function call. We must then take the
+ // current contents of the active path and place it within
+ // a new PathDiagnosticCallPiece.
+ PathDiagnosticCallPiece *C;
+ if (VisitedEntireCall) {
+ C = cast<PathDiagnosticCallPiece>(PD.getActivePath().front());
+ } else {
+ const Decl *Caller = CE->getLocationContext()->getDecl();
+ C = PathDiagnosticCallPiece::construct(PD.getActivePath(), Caller);
+ LCM[&C->path] = CE->getCalleeContext();
+ }
+
+ C->setCallee(*CE, SM);
+ EB.addContext(C->getLocation());
+
+ if (!CallStack.empty()) {
+ assert(CallStack.back().first == C);
+ CallStack.pop_back();
+ }
+ break;
+ }
+
+ // Note that is important that we update the LocationContext
+ // after looking at CallExits. CallExit basically adds an
+ // edge in the *caller*, so we don't want to update the LocationContext
+ // too soon.
+ PDB.LC = N->getLocationContext();
+
+ // Block edges.
+ if (Optional<BlockEdge> BE = P.getAs<BlockEdge>()) {
+ // Does this represent entering a call? If so, look at propagating
+ // interesting symbols across call boundaries.
+ if (NextNode) {
+ const LocationContext *CallerCtx = NextNode->getLocationContext();
+ const LocationContext *CalleeCtx = PDB.LC;
+ if (CallerCtx != CalleeCtx) {
+ reversePropagateInterestingSymbols(*PDB.getBugReport(), IE,
+ N->getState().get(),
+ CalleeCtx, CallerCtx);
+ }
+ }
+
+ // Are we jumping to the head of a loop? Add a special diagnostic.
+ if (const Stmt *Loop = BE->getSrc()->getLoopTarget()) {
+ PathDiagnosticLocation L(Loop, SM, PDB.LC);
+ const CompoundStmt *CS = nullptr;
+
+ if (const ForStmt *FS = dyn_cast<ForStmt>(Loop))
+ CS = dyn_cast<CompoundStmt>(FS->getBody());
+ else if (const WhileStmt *WS = dyn_cast<WhileStmt>(Loop))
+ CS = dyn_cast<CompoundStmt>(WS->getBody());
+
+ PathDiagnosticEventPiece *p =
+ new PathDiagnosticEventPiece(L,
+ "Looping back to the head of the loop");
+ p->setPrunable(true);
+
+ EB.addEdge(p->getLocation(), true);
+ PD.getActivePath().push_front(p);
+
+ if (CS) {
+ PathDiagnosticLocation BL =
+ PathDiagnosticLocation::createEndBrace(CS, SM);
+ EB.addEdge(BL);
+ }
+ }
+
+ const CFGBlock *BSrc = BE->getSrc();
+ ParentMap &PM = PDB.getParentMap();
+
+ if (const Stmt *Term = BSrc->getTerminator()) {
+ // Are we jumping past the loop body without ever executing the
+ // loop (because the condition was false)?
+ if (isLoopJumpPastBody(Term, &*BE) &&
+ !isInLoopBody(PM,
+ getStmtBeforeCond(PM,
+ BSrc->getTerminatorCondition(),
+ N),
+ Term)) {
+ PathDiagnosticLocation L(Term, SM, PDB.LC);
+ PathDiagnosticEventPiece *PE =
+ new PathDiagnosticEventPiece(L, "Loop body executed 0 times");
+ PE->setPrunable(true);
+
+ EB.addEdge(PE->getLocation(), true);
+ PD.getActivePath().push_front(PE);
+ }
+
+ // In any case, add the terminator as the current statement
+ // context for control edges.
+ EB.addContext(Term);
+ }
+
+ break;
+ }
+
+ if (Optional<BlockEntrance> BE = P.getAs<BlockEntrance>()) {
+ Optional<CFGElement> First = BE->getFirstElement();
+ if (Optional<CFGStmt> S = First ? First->getAs<CFGStmt>() : None) {
+ const Stmt *stmt = S->getStmt();
+ if (IsControlFlowExpr(stmt)) {
+ // Add the proper context for '&&', '||', and '?'.
+ EB.addContext(stmt);
+ }
+ else
+ EB.addExtendedContext(PDB.getEnclosingStmtLocation(stmt).asStmt());
+ }
+
+ break;
+ }
+
+
+ } while (0);
+
+ if (!NextNode)
+ continue;
+
+ // Add pieces from custom visitors.
+ BugReport *R = PDB.getBugReport();
+ for (auto &V : visitors) {
+ if (PathDiagnosticPiece *p = V->VisitNode(N, NextNode, PDB, *R)) {
+ const PathDiagnosticLocation &Loc = p->getLocation();
+ EB.addEdge(Loc, true);
+ PD.getActivePath().push_front(p);
+ updateStackPiecesWithMessage(p, CallStack);
+
+ if (const Stmt *S = Loc.asStmt())
+ EB.addExtendedContext(PDB.getEnclosingStmtLocation(S).asStmt());
+ }
+ }
+ }
+
+ return PDB.getBugReport()->isValid();
+}
+
+/// \brief Adds a sanitized control-flow diagnostic edge to a path.
+static void addEdgeToPath(PathPieces &path,
+ PathDiagnosticLocation &PrevLoc,
+ PathDiagnosticLocation NewLoc,
+ const LocationContext *LC) {
+ if (!NewLoc.isValid())
+ return;
+
+ SourceLocation NewLocL = NewLoc.asLocation();
+ if (NewLocL.isInvalid())
+ return;
+
+ if (!PrevLoc.isValid() || !PrevLoc.asLocation().isValid()) {
+ PrevLoc = NewLoc;
+ return;
+ }
+
+ // Ignore self-edges, which occur when there are multiple nodes at the same
+ // statement.
+ if (NewLoc.asStmt() && NewLoc.asStmt() == PrevLoc.asStmt())
+ return;
+
+ path.push_front(new PathDiagnosticControlFlowPiece(NewLoc,
+ PrevLoc));
+ PrevLoc = NewLoc;
+}
+
+/// A customized wrapper for CFGBlock::getTerminatorCondition()
+/// which returns the element for ObjCForCollectionStmts.
+static const Stmt *getTerminatorCondition(const CFGBlock *B) {
+ const Stmt *S = B->getTerminatorCondition();
+ if (const ObjCForCollectionStmt *FS =
+ dyn_cast_or_null<ObjCForCollectionStmt>(S))
+ return FS->getElement();
+ return S;
+}
+
+static const char StrEnteringLoop[] = "Entering loop body";
+static const char StrLoopBodyZero[] = "Loop body executed 0 times";
+static const char StrLoopRangeEmpty[] =
+ "Loop body skipped when range is empty";
+static const char StrLoopCollectionEmpty[] =
+ "Loop body skipped when collection is empty";
+
+static bool GenerateAlternateExtensivePathDiagnostic(
+ PathDiagnostic &PD, PathDiagnosticBuilder &PDB, const ExplodedNode *N,
+ LocationContextMap &LCM,
+ ArrayRef<std::unique_ptr<BugReporterVisitor>> visitors) {
+
+ BugReport *report = PDB.getBugReport();
+ const SourceManager& SM = PDB.getSourceManager();
+ StackDiagVector CallStack;
+ InterestingExprs IE;
+
+ PathDiagnosticLocation PrevLoc = PD.getLocation();
+
+ const ExplodedNode *NextNode = N->getFirstPred();
+ while (NextNode) {
+ N = NextNode;
+ NextNode = N->getFirstPred();
+ ProgramPoint P = N->getLocation();
+
+ do {
+ // Have we encountered an entrance to a call? It may be
+ // the case that we have not encountered a matching
+ // call exit before this point. This means that the path
+ // terminated within the call itself.
+ if (Optional<CallEnter> CE = P.getAs<CallEnter>()) {
+ // Add an edge to the start of the function.
+ const StackFrameContext *CalleeLC = CE->getCalleeContext();
+ const Decl *D = CalleeLC->getDecl();
+ addEdgeToPath(PD.getActivePath(), PrevLoc,
+ PathDiagnosticLocation::createBegin(D, SM),
+ CalleeLC);
+
+ // Did we visit an entire call?
+ bool VisitedEntireCall = PD.isWithinCall();
+ PD.popActivePath();
+
+ PathDiagnosticCallPiece *C;
+ if (VisitedEntireCall) {
+ PathDiagnosticPiece *P = PD.getActivePath().front().get();
+ C = cast<PathDiagnosticCallPiece>(P);
+ } else {
+ const Decl *Caller = CE->getLocationContext()->getDecl();
+ C = PathDiagnosticCallPiece::construct(PD.getActivePath(), Caller);
+
+ // Since we just transferred the path over to the call piece,
+ // reset the mapping from active to location context.
+ assert(PD.getActivePath().size() == 1 &&
+ PD.getActivePath().front() == C);
+ LCM[&PD.getActivePath()] = nullptr;
+
+ // Record the location context mapping for the path within
+ // the call.
+ assert(LCM[&C->path] == nullptr ||
+ LCM[&C->path] == CE->getCalleeContext());
+ LCM[&C->path] = CE->getCalleeContext();
+
+ // If this is the first item in the active path, record
+ // the new mapping from active path to location context.
+ const LocationContext *&NewLC = LCM[&PD.getActivePath()];
+ if (!NewLC)
+ NewLC = N->getLocationContext();
+
+ PDB.LC = NewLC;
+ }
+ C->setCallee(*CE, SM);
+
+ // Update the previous location in the active path.
+ PrevLoc = C->getLocation();
+
+ if (!CallStack.empty()) {
+ assert(CallStack.back().first == C);
+ CallStack.pop_back();
+ }
+ break;
+ }
+
+ // Query the location context here and the previous location
+ // as processing CallEnter may change the active path.
+ PDB.LC = N->getLocationContext();
+
+ // Record the mapping from the active path to the location
+ // context.
+ assert(!LCM[&PD.getActivePath()] ||
+ LCM[&PD.getActivePath()] == PDB.LC);
+ LCM[&PD.getActivePath()] = PDB.LC;
+
+ // Have we encountered an exit from a function call?
+ if (Optional<CallExitEnd> CE = P.getAs<CallExitEnd>()) {
+ const Stmt *S = CE->getCalleeContext()->getCallSite();
+ // Propagate the interesting symbols accordingly.
+ if (const Expr *Ex = dyn_cast_or_null<Expr>(S)) {
+ reversePropagateIntererstingSymbols(*PDB.getBugReport(), IE,
+ N->getState().get(), Ex,
+ N->getLocationContext());
+ }
+
+ // We are descending into a call (backwards). Construct
+ // a new call piece to contain the path pieces for that call.
+ PathDiagnosticCallPiece *C =
+ PathDiagnosticCallPiece::construct(N, *CE, SM);
+
+ // Record the location context for this call piece.
+ LCM[&C->path] = CE->getCalleeContext();
+
+ // Add the edge to the return site.
+ addEdgeToPath(PD.getActivePath(), PrevLoc, C->callReturn, PDB.LC);
+ PD.getActivePath().push_front(C);
+ PrevLoc.invalidate();
+
+ // Make the contents of the call the active path for now.
+ PD.pushActivePath(&C->path);
+ CallStack.push_back(StackDiagPair(C, N));
+ break;
+ }
+
+ if (Optional<PostStmt> PS = P.getAs<PostStmt>()) {
+ // For expressions, make sure we propagate the
+ // interesting symbols correctly.
+ if (const Expr *Ex = PS->getStmtAs<Expr>())
+ reversePropagateIntererstingSymbols(*PDB.getBugReport(), IE,
+ N->getState().get(), Ex,
+ N->getLocationContext());
+
+ // Add an edge. If this is an ObjCForCollectionStmt do
+ // not add an edge here as it appears in the CFG both
+ // as a terminator and as a terminator condition.
+ if (!isa<ObjCForCollectionStmt>(PS->getStmt())) {
+ PathDiagnosticLocation L =
+ PathDiagnosticLocation(PS->getStmt(), SM, PDB.LC);
+ addEdgeToPath(PD.getActivePath(), PrevLoc, L, PDB.LC);
+ }
+ break;
+ }
+
+ // Block edges.
+ if (Optional<BlockEdge> BE = P.getAs<BlockEdge>()) {
+ // Does this represent entering a call? If so, look at propagating
+ // interesting symbols across call boundaries.
+ if (NextNode) {
+ const LocationContext *CallerCtx = NextNode->getLocationContext();
+ const LocationContext *CalleeCtx = PDB.LC;
+ if (CallerCtx != CalleeCtx) {
+ reversePropagateInterestingSymbols(*PDB.getBugReport(), IE,
+ N->getState().get(),
+ CalleeCtx, CallerCtx);
+ }
+ }
+
+ // Are we jumping to the head of a loop? Add a special diagnostic.
+ if (const Stmt *Loop = BE->getSrc()->getLoopTarget()) {
+ PathDiagnosticLocation L(Loop, SM, PDB.LC);
+ const Stmt *Body = nullptr;
+
+ if (const ForStmt *FS = dyn_cast<ForStmt>(Loop))
+ Body = FS->getBody();
+ else if (const WhileStmt *WS = dyn_cast<WhileStmt>(Loop))
+ Body = WS->getBody();
+ else if (const ObjCForCollectionStmt *OFS =
+ dyn_cast<ObjCForCollectionStmt>(Loop)) {
+ Body = OFS->getBody();
+ } else if (const CXXForRangeStmt *FRS =
+ dyn_cast<CXXForRangeStmt>(Loop)) {
+ Body = FRS->getBody();
+ }
+ // do-while statements are explicitly excluded here
+
+ PathDiagnosticEventPiece *p =
+ new PathDiagnosticEventPiece(L, "Looping back to the head "
+ "of the loop");
+ p->setPrunable(true);
+
+ addEdgeToPath(PD.getActivePath(), PrevLoc, p->getLocation(), PDB.LC);
+ PD.getActivePath().push_front(p);
+
+ if (const CompoundStmt *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
+ addEdgeToPath(PD.getActivePath(), PrevLoc,
+ PathDiagnosticLocation::createEndBrace(CS, SM),
+ PDB.LC);
+ }
+ }
+
+ const CFGBlock *BSrc = BE->getSrc();
+ ParentMap &PM = PDB.getParentMap();
+
+ if (const Stmt *Term = BSrc->getTerminator()) {
+ // Are we jumping past the loop body without ever executing the
+ // loop (because the condition was false)?
+ if (isLoop(Term)) {
+ const Stmt *TermCond = getTerminatorCondition(BSrc);
+ bool IsInLoopBody =
+ isInLoopBody(PM, getStmtBeforeCond(PM, TermCond, N), Term);
+
+ const char *str = nullptr;
+
+ if (isJumpToFalseBranch(&*BE)) {
+ if (!IsInLoopBody) {
+ if (isa<ObjCForCollectionStmt>(Term)) {
+ str = StrLoopCollectionEmpty;
+ } else if (isa<CXXForRangeStmt>(Term)) {
+ str = StrLoopRangeEmpty;
+ } else {
+ str = StrLoopBodyZero;
+ }
+ }
+ } else {
+ str = StrEnteringLoop;
+ }
+
+ if (str) {
+ PathDiagnosticLocation L(TermCond ? TermCond : Term, SM, PDB.LC);
+ PathDiagnosticEventPiece *PE =
+ new PathDiagnosticEventPiece(L, str);
+ PE->setPrunable(true);
+ addEdgeToPath(PD.getActivePath(), PrevLoc,
+ PE->getLocation(), PDB.LC);
+ PD.getActivePath().push_front(PE);
+ }
+ } else if (isa<BreakStmt>(Term) || isa<ContinueStmt>(Term) ||
+ isa<GotoStmt>(Term)) {
+ PathDiagnosticLocation L(Term, SM, PDB.LC);
+ addEdgeToPath(PD.getActivePath(), PrevLoc, L, PDB.LC);
+ }
+ }
+ break;
+ }
+ } while (0);
+
+ if (!NextNode)
+ continue;
+
+ // Add pieces from custom visitors.
+ for (auto &V : visitors) {
+ if (PathDiagnosticPiece *p = V->VisitNode(N, NextNode, PDB, *report)) {
+ addEdgeToPath(PD.getActivePath(), PrevLoc, p->getLocation(), PDB.LC);
+ PD.getActivePath().push_front(p);
+ updateStackPiecesWithMessage(p, CallStack);
+ }
+ }
+ }
+
+ // Add an edge to the start of the function.
+ // We'll prune it out later, but it helps make diagnostics more uniform.
+ const StackFrameContext *CalleeLC = PDB.LC->getCurrentStackFrame();
+ const Decl *D = CalleeLC->getDecl();
+ addEdgeToPath(PD.getActivePath(), PrevLoc,
+ PathDiagnosticLocation::createBegin(D, SM),
+ CalleeLC);
+
+ return report->isValid();
+}
+
+static const Stmt *getLocStmt(PathDiagnosticLocation L) {
+ if (!L.isValid())
+ return nullptr;
+ return L.asStmt();
+}
+
+static const Stmt *getStmtParent(const Stmt *S, const ParentMap &PM) {
+ if (!S)
+ return nullptr;
+
+ while (true) {
+ S = PM.getParentIgnoreParens(S);
+
+ if (!S)
+ break;
+
+ if (isa<ExprWithCleanups>(S) ||
+ isa<CXXBindTemporaryExpr>(S) ||
+ isa<SubstNonTypeTemplateParmExpr>(S))
+ continue;
+
+ break;
+ }
+
+ return S;
+}
+
+static bool isConditionForTerminator(const Stmt *S, const Stmt *Cond) {
+ switch (S->getStmtClass()) {
+ case Stmt::BinaryOperatorClass: {
+ const BinaryOperator *BO = cast<BinaryOperator>(S);
+ if (!BO->isLogicalOp())
+ return false;
+ return BO->getLHS() == Cond || BO->getRHS() == Cond;
+ }
+ case Stmt::IfStmtClass:
+ return cast<IfStmt>(S)->getCond() == Cond;
+ case Stmt::ForStmtClass:
+ return cast<ForStmt>(S)->getCond() == Cond;
+ case Stmt::WhileStmtClass:
+ return cast<WhileStmt>(S)->getCond() == Cond;
+ case Stmt::DoStmtClass:
+ return cast<DoStmt>(S)->getCond() == Cond;
+ case Stmt::ChooseExprClass:
+ return cast<ChooseExpr>(S)->getCond() == Cond;
+ case Stmt::IndirectGotoStmtClass:
+ return cast<IndirectGotoStmt>(S)->getTarget() == Cond;
+ case Stmt::SwitchStmtClass:
+ return cast<SwitchStmt>(S)->getCond() == Cond;
+ case Stmt::BinaryConditionalOperatorClass:
+ return cast<BinaryConditionalOperator>(S)->getCond() == Cond;
+ case Stmt::ConditionalOperatorClass: {
+ const ConditionalOperator *CO = cast<ConditionalOperator>(S);
+ return CO->getCond() == Cond ||
+ CO->getLHS() == Cond ||
+ CO->getRHS() == Cond;
+ }
+ case Stmt::ObjCForCollectionStmtClass:
+ return cast<ObjCForCollectionStmt>(S)->getElement() == Cond;
+ case Stmt::CXXForRangeStmtClass: {
+ const CXXForRangeStmt *FRS = cast<CXXForRangeStmt>(S);
+ return FRS->getCond() == Cond || FRS->getRangeInit() == Cond;
+ }
+ default:
+ return false;
+ }
+}
+
+static bool isIncrementOrInitInForLoop(const Stmt *S, const Stmt *FL) {
+ if (const ForStmt *FS = dyn_cast<ForStmt>(FL))
+ return FS->getInc() == S || FS->getInit() == S;
+ if (const CXXForRangeStmt *FRS = dyn_cast<CXXForRangeStmt>(FL))
+ return FRS->getInc() == S || FRS->getRangeStmt() == S ||
+ FRS->getLoopVarStmt() || FRS->getRangeInit() == S;
+ return false;
+}
+
+typedef llvm::DenseSet<const PathDiagnosticCallPiece *>
+ OptimizedCallsSet;
+
+/// Adds synthetic edges from top-level statements to their subexpressions.
+///
+/// This avoids a "swoosh" effect, where an edge from a top-level statement A
+/// points to a sub-expression B.1 that's not at the start of B. In these cases,
+/// we'd like to see an edge from A to B, then another one from B to B.1.
+static void addContextEdges(PathPieces &pieces, SourceManager &SM,
+ const ParentMap &PM, const LocationContext *LCtx) {
+ PathPieces::iterator Prev = pieces.end();
+ for (PathPieces::iterator I = pieces.begin(), E = Prev; I != E;
+ Prev = I, ++I) {
+ PathDiagnosticControlFlowPiece *Piece =
+ dyn_cast<PathDiagnosticControlFlowPiece>(*I);
+
+ if (!Piece)
+ continue;
+
+ PathDiagnosticLocation SrcLoc = Piece->getStartLocation();
+ SmallVector<PathDiagnosticLocation, 4> SrcContexts;
+
+ PathDiagnosticLocation NextSrcContext = SrcLoc;
+ const Stmt *InnerStmt = nullptr;
+ while (NextSrcContext.isValid() && NextSrcContext.asStmt() != InnerStmt) {
+ SrcContexts.push_back(NextSrcContext);
+ InnerStmt = NextSrcContext.asStmt();
+ NextSrcContext = getEnclosingStmtLocation(InnerStmt, SM, PM, LCtx,
+ /*allowNested=*/true);
+ }
+
+ // Repeatedly split the edge as necessary.
+ // This is important for nested logical expressions (||, &&, ?:) where we
+ // want to show all the levels of context.
+ while (true) {
+ const Stmt *Dst = getLocStmt(Piece->getEndLocation());
+
+ // We are looking at an edge. Is the destination within a larger
+ // expression?
+ PathDiagnosticLocation DstContext =
+ getEnclosingStmtLocation(Dst, SM, PM, LCtx, /*allowNested=*/true);
+ if (!DstContext.isValid() || DstContext.asStmt() == Dst)
+ break;
+
+ // If the source is in the same context, we're already good.
+ if (std::find(SrcContexts.begin(), SrcContexts.end(), DstContext) !=
+ SrcContexts.end())
+ break;
+
+ // Update the subexpression node to point to the context edge.
+ Piece->setStartLocation(DstContext);
+
+ // Try to extend the previous edge if it's at the same level as the source
+ // context.
+ if (Prev != E) {
+ PathDiagnosticControlFlowPiece *PrevPiece =
+ dyn_cast<PathDiagnosticControlFlowPiece>(*Prev);
+
+ if (PrevPiece) {
+ if (const Stmt *PrevSrc = getLocStmt(PrevPiece->getStartLocation())) {
+ const Stmt *PrevSrcParent = getStmtParent(PrevSrc, PM);
+ if (PrevSrcParent == getStmtParent(getLocStmt(DstContext), PM)) {
+ PrevPiece->setEndLocation(DstContext);
+ break;
+ }
+ }
+ }
+ }
+
+ // Otherwise, split the current edge into a context edge and a
+ // subexpression edge. Note that the context statement may itself have
+ // context.
+ Piece = new PathDiagnosticControlFlowPiece(SrcLoc, DstContext);
+ I = pieces.insert(I, Piece);
+ }
+ }
+}
+
+/// \brief Move edges from a branch condition to a branch target
+/// when the condition is simple.
+///
+/// This restructures some of the work of addContextEdges. That function
+/// creates edges this may destroy, but they work together to create a more
+/// aesthetically set of edges around branches. After the call to
+/// addContextEdges, we may have (1) an edge to the branch, (2) an edge from
+/// the branch to the branch condition, and (3) an edge from the branch
+/// condition to the branch target. We keep (1), but may wish to remove (2)
+/// and move the source of (3) to the branch if the branch condition is simple.
+///
+static void simplifySimpleBranches(PathPieces &pieces) {
+ for (PathPieces::iterator I = pieces.begin(), E = pieces.end(); I != E; ++I) {
+
+ PathDiagnosticControlFlowPiece *PieceI =
+ dyn_cast<PathDiagnosticControlFlowPiece>(*I);
+
+ if (!PieceI)
+ continue;
+
+ const Stmt *s1Start = getLocStmt(PieceI->getStartLocation());
+ const Stmt *s1End = getLocStmt(PieceI->getEndLocation());
+
+ if (!s1Start || !s1End)
+ continue;
+
+ PathPieces::iterator NextI = I; ++NextI;
+ if (NextI == E)
+ break;
+
+ PathDiagnosticControlFlowPiece *PieceNextI = nullptr;
+
+ while (true) {
+ if (NextI == E)
+ break;
+
+ PathDiagnosticEventPiece *EV = dyn_cast<PathDiagnosticEventPiece>(*NextI);
+ if (EV) {
+ StringRef S = EV->getString();
+ if (S == StrEnteringLoop || S == StrLoopBodyZero ||
+ S == StrLoopCollectionEmpty || S == StrLoopRangeEmpty) {
+ ++NextI;
+ continue;
+ }
+ break;
+ }
+
+ PieceNextI = dyn_cast<PathDiagnosticControlFlowPiece>(*NextI);
+ break;
+ }
+
+ if (!PieceNextI)
+ continue;
+
+ const Stmt *s2Start = getLocStmt(PieceNextI->getStartLocation());
+ const Stmt *s2End = getLocStmt(PieceNextI->getEndLocation());
+
+ if (!s2Start || !s2End || s1End != s2Start)
+ continue;
+
+ // We only perform this transformation for specific branch kinds.
+ // We don't want to do this for do..while, for example.
+ if (!(isa<ForStmt>(s1Start) || isa<WhileStmt>(s1Start) ||
+ isa<IfStmt>(s1Start) || isa<ObjCForCollectionStmt>(s1Start) ||
+ isa<CXXForRangeStmt>(s1Start)))
+ continue;
+
+ // Is s1End the branch condition?
+ if (!isConditionForTerminator(s1Start, s1End))
+ continue;
+
+ // Perform the hoisting by eliminating (2) and changing the start
+ // location of (3).
+ PieceNextI->setStartLocation(PieceI->getStartLocation());
+ I = pieces.erase(I);
+ }
+}
+
+/// Returns the number of bytes in the given (character-based) SourceRange.
+///
+/// If the locations in the range are not on the same line, returns None.
+///
+/// Note that this does not do a precise user-visible character or column count.
+static Optional<size_t> getLengthOnSingleLine(SourceManager &SM,
+ SourceRange Range) {
+ SourceRange ExpansionRange(SM.getExpansionLoc(Range.getBegin()),
+ SM.getExpansionRange(Range.getEnd()).second);
+
+ FileID FID = SM.getFileID(ExpansionRange.getBegin());
+ if (FID != SM.getFileID(ExpansionRange.getEnd()))
+ return None;
+
+ bool Invalid;
+ const llvm::MemoryBuffer *Buffer = SM.getBuffer(FID, &Invalid);
+ if (Invalid)
+ return None;
+
+ unsigned BeginOffset = SM.getFileOffset(ExpansionRange.getBegin());
+ unsigned EndOffset = SM.getFileOffset(ExpansionRange.getEnd());
+ StringRef Snippet = Buffer->getBuffer().slice(BeginOffset, EndOffset);
+
+ // We're searching the raw bytes of the buffer here, which might include
+ // escaped newlines and such. That's okay; we're trying to decide whether the
+ // SourceRange is covering a large or small amount of space in the user's
+ // editor.
+ if (Snippet.find_first_of("\r\n") != StringRef::npos)
+ return None;
+
+ // This isn't Unicode-aware, but it doesn't need to be.
+ return Snippet.size();
+}
+
+/// \sa getLengthOnSingleLine(SourceManager, SourceRange)
+static Optional<size_t> getLengthOnSingleLine(SourceManager &SM,
+ const Stmt *S) {
+ return getLengthOnSingleLine(SM, S->getSourceRange());
+}
+
+/// Eliminate two-edge cycles created by addContextEdges().
+///
+/// Once all the context edges are in place, there are plenty of cases where
+/// there's a single edge from a top-level statement to a subexpression,
+/// followed by a single path note, and then a reverse edge to get back out to
+/// the top level. If the statement is simple enough, the subexpression edges
+/// just add noise and make it harder to understand what's going on.
+///
+/// This function only removes edges in pairs, because removing only one edge
+/// might leave other edges dangling.
+///
+/// This will not remove edges in more complicated situations:
+/// - if there is more than one "hop" leading to or from a subexpression.
+/// - if there is an inlined call between the edges instead of a single event.
+/// - if the whole statement is large enough that having subexpression arrows
+/// might be helpful.
+static void removeContextCycles(PathPieces &Path, SourceManager &SM,
+ ParentMap &PM) {
+ for (PathPieces::iterator I = Path.begin(), E = Path.end(); I != E; ) {
+ // Pattern match the current piece and its successor.
+ PathDiagnosticControlFlowPiece *PieceI =
+ dyn_cast<PathDiagnosticControlFlowPiece>(*I);
+
+ if (!PieceI) {
+ ++I;
+ continue;
+ }
+
+ const Stmt *s1Start = getLocStmt(PieceI->getStartLocation());
+ const Stmt *s1End = getLocStmt(PieceI->getEndLocation());
+
+ PathPieces::iterator NextI = I; ++NextI;
+ if (NextI == E)
+ break;
+
+ PathDiagnosticControlFlowPiece *PieceNextI =
+ dyn_cast<PathDiagnosticControlFlowPiece>(*NextI);
+
+ if (!PieceNextI) {
+ if (isa<PathDiagnosticEventPiece>(*NextI)) {
+ ++NextI;
+ if (NextI == E)
+ break;
+ PieceNextI = dyn_cast<PathDiagnosticControlFlowPiece>(*NextI);
+ }
+
+ if (!PieceNextI) {
+ ++I;
+ continue;
+ }
+ }
+
+ const Stmt *s2Start = getLocStmt(PieceNextI->getStartLocation());
+ const Stmt *s2End = getLocStmt(PieceNextI->getEndLocation());
+
+ if (s1Start && s2Start && s1Start == s2End && s2Start == s1End) {
+ const size_t MAX_SHORT_LINE_LENGTH = 80;
+ Optional<size_t> s1Length = getLengthOnSingleLine(SM, s1Start);
+ if (s1Length && *s1Length <= MAX_SHORT_LINE_LENGTH) {
+ Optional<size_t> s2Length = getLengthOnSingleLine(SM, s2Start);
+ if (s2Length && *s2Length <= MAX_SHORT_LINE_LENGTH) {
+ Path.erase(I);
+ I = Path.erase(NextI);
+ continue;
+ }
+ }
+ }
+
+ ++I;
+ }
+}
+
+/// \brief Return true if X is contained by Y.
+static bool lexicalContains(ParentMap &PM,
+ const Stmt *X,
+ const Stmt *Y) {
+ while (X) {
+ if (X == Y)
+ return true;
+ X = PM.getParent(X);
+ }
+ return false;
+}
+
+// Remove short edges on the same line less than 3 columns in difference.
+static void removePunyEdges(PathPieces &path,
+ SourceManager &SM,
+ ParentMap &PM) {
+
+ bool erased = false;
+
+ for (PathPieces::iterator I = path.begin(), E = path.end(); I != E;
+ erased ? I : ++I) {
+
+ erased = false;
+
+ PathDiagnosticControlFlowPiece *PieceI =
+ dyn_cast<PathDiagnosticControlFlowPiece>(*I);
+
+ if (!PieceI)
+ continue;
+
+ const Stmt *start = getLocStmt(PieceI->getStartLocation());
+ const Stmt *end = getLocStmt(PieceI->getEndLocation());
+
+ if (!start || !end)
+ continue;
+
+ const Stmt *endParent = PM.getParent(end);
+ if (!endParent)
+ continue;
+
+ if (isConditionForTerminator(end, endParent))
+ continue;
+
+ SourceLocation FirstLoc = start->getLocStart();
+ SourceLocation SecondLoc = end->getLocStart();
+
+ if (!SM.isWrittenInSameFile(FirstLoc, SecondLoc))
+ continue;
+ if (SM.isBeforeInTranslationUnit(SecondLoc, FirstLoc))
+ std::swap(SecondLoc, FirstLoc);
+
+ SourceRange EdgeRange(FirstLoc, SecondLoc);
+ Optional<size_t> ByteWidth = getLengthOnSingleLine(SM, EdgeRange);
+
+ // If the statements are on different lines, continue.
+ if (!ByteWidth)
+ continue;
+
+ const size_t MAX_PUNY_EDGE_LENGTH = 2;
+ if (*ByteWidth <= MAX_PUNY_EDGE_LENGTH) {
+ // FIXME: There are enough /bytes/ between the endpoints of the edge, but
+ // there might not be enough /columns/. A proper user-visible column count
+ // is probably too expensive, though.
+ I = path.erase(I);
+ erased = true;
+ continue;
+ }
+ }
+}
+
+static void removeIdenticalEvents(PathPieces &path) {
+ for (PathPieces::iterator I = path.begin(), E = path.end(); I != E; ++I) {
+ PathDiagnosticEventPiece *PieceI =
+ dyn_cast<PathDiagnosticEventPiece>(*I);
+
+ if (!PieceI)
+ continue;
+
+ PathPieces::iterator NextI = I; ++NextI;
+ if (NextI == E)
+ return;
+
+ PathDiagnosticEventPiece *PieceNextI =
+ dyn_cast<PathDiagnosticEventPiece>(*NextI);
+
+ if (!PieceNextI)
+ continue;
+
+ // Erase the second piece if it has the same exact message text.
+ if (PieceI->getString() == PieceNextI->getString()) {
+ path.erase(NextI);
+ }
+ }
+}
+
+static bool optimizeEdges(PathPieces &path, SourceManager &SM,
+ OptimizedCallsSet &OCS,
+ LocationContextMap &LCM) {
+ bool hasChanges = false;
+ const LocationContext *LC = LCM[&path];
+ assert(LC);
+ ParentMap &PM = LC->getParentMap();
+
+ for (PathPieces::iterator I = path.begin(), E = path.end(); I != E; ) {
+ // Optimize subpaths.
+ if (PathDiagnosticCallPiece *CallI = dyn_cast<PathDiagnosticCallPiece>(*I)){
+ // Record the fact that a call has been optimized so we only do the
+ // effort once.
+ if (!OCS.count(CallI)) {
+ while (optimizeEdges(CallI->path, SM, OCS, LCM)) {}
+ OCS.insert(CallI);
+ }
+ ++I;
+ continue;
+ }
+
+ // Pattern match the current piece and its successor.
+ PathDiagnosticControlFlowPiece *PieceI =
+ dyn_cast<PathDiagnosticControlFlowPiece>(*I);
+
+ if (!PieceI) {
+ ++I;
+ continue;
+ }
+
+ const Stmt *s1Start = getLocStmt(PieceI->getStartLocation());
+ const Stmt *s1End = getLocStmt(PieceI->getEndLocation());
+ const Stmt *level1 = getStmtParent(s1Start, PM);
+ const Stmt *level2 = getStmtParent(s1End, PM);
+
+ PathPieces::iterator NextI = I; ++NextI;
+ if (NextI == E)
+ break;
+
+ PathDiagnosticControlFlowPiece *PieceNextI =
+ dyn_cast<PathDiagnosticControlFlowPiece>(*NextI);
+
+ if (!PieceNextI) {
+ ++I;
+ continue;
+ }
+
+ const Stmt *s2Start = getLocStmt(PieceNextI->getStartLocation());
+ const Stmt *s2End = getLocStmt(PieceNextI->getEndLocation());
+ const Stmt *level3 = getStmtParent(s2Start, PM);
+ const Stmt *level4 = getStmtParent(s2End, PM);
+
+ // Rule I.
+ //
+ // If we have two consecutive control edges whose end/begin locations
+ // are at the same level (e.g. statements or top-level expressions within
+ // a compound statement, or siblings share a single ancestor expression),
+ // then merge them if they have no interesting intermediate event.
+ //
+ // For example:
+ //
+ // (1.1 -> 1.2) -> (1.2 -> 1.3) becomes (1.1 -> 1.3) because the common
+ // parent is '1'. Here 'x.y.z' represents the hierarchy of statements.
+ //
+ // NOTE: this will be limited later in cases where we add barriers
+ // to prevent this optimization.
+ //
+ if (level1 && level1 == level2 && level1 == level3 && level1 == level4) {
+ PieceI->setEndLocation(PieceNextI->getEndLocation());
+ path.erase(NextI);
+ hasChanges = true;
+ continue;
+ }
+
+ // Rule II.
+ //
+ // Eliminate edges between subexpressions and parent expressions
+ // when the subexpression is consumed.
+ //
+ // NOTE: this will be limited later in cases where we add barriers
+ // to prevent this optimization.
+ //
+ if (s1End && s1End == s2Start && level2) {
+ bool removeEdge = false;
+ // Remove edges into the increment or initialization of a
+ // loop that have no interleaving event. This means that
+ // they aren't interesting.
+ if (isIncrementOrInitInForLoop(s1End, level2))
+ removeEdge = true;
+ // Next only consider edges that are not anchored on
+ // the condition of a terminator. This are intermediate edges
+ // that we might want to trim.
+ else if (!isConditionForTerminator(level2, s1End)) {
+ // Trim edges on expressions that are consumed by
+ // the parent expression.
+ if (isa<Expr>(s1End) && PM.isConsumedExpr(cast<Expr>(s1End))) {
+ removeEdge = true;
+ }
+ // Trim edges where a lexical containment doesn't exist.
+ // For example:
+ //
+ // X -> Y -> Z
+ //
+ // If 'Z' lexically contains Y (it is an ancestor) and
+ // 'X' does not lexically contain Y (it is a descendant OR
+ // it has no lexical relationship at all) then trim.
+ //
+ // This can eliminate edges where we dive into a subexpression
+ // and then pop back out, etc.
+ else if (s1Start && s2End &&
+ lexicalContains(PM, s2Start, s2End) &&
+ !lexicalContains(PM, s1End, s1Start)) {
+ removeEdge = true;
+ }
+ // Trim edges from a subexpression back to the top level if the
+ // subexpression is on a different line.
+ //
+ // A.1 -> A -> B
+ // becomes
+ // A.1 -> B
+ //
+ // These edges just look ugly and don't usually add anything.
+ else if (s1Start && s2End &&
+ lexicalContains(PM, s1Start, s1End)) {
+ SourceRange EdgeRange(PieceI->getEndLocation().asLocation(),
+ PieceI->getStartLocation().asLocation());
+ if (!getLengthOnSingleLine(SM, EdgeRange).hasValue())
+ removeEdge = true;
+ }
+ }
+
+ if (removeEdge) {
+ PieceI->setEndLocation(PieceNextI->getEndLocation());
+ path.erase(NextI);
+ hasChanges = true;
+ continue;
+ }
+ }
+
+ // Optimize edges for ObjC fast-enumeration loops.
+ //
+ // (X -> collection) -> (collection -> element)
+ //
+ // becomes:
+ //
+ // (X -> element)
+ if (s1End == s2Start) {
+ const ObjCForCollectionStmt *FS =
+ dyn_cast_or_null<ObjCForCollectionStmt>(level3);
+ if (FS && FS->getCollection()->IgnoreParens() == s2Start &&
+ s2End == FS->getElement()) {
+ PieceI->setEndLocation(PieceNextI->getEndLocation());
+ path.erase(NextI);
+ hasChanges = true;
+ continue;
+ }
+ }
+
+ // No changes at this index? Move to the next one.
+ ++I;
+ }
+
+ if (!hasChanges) {
+ // Adjust edges into subexpressions to make them more uniform
+ // and aesthetically pleasing.
+ addContextEdges(path, SM, PM, LC);
+ // Remove "cyclical" edges that include one or more context edges.
+ removeContextCycles(path, SM, PM);
+ // Hoist edges originating from branch conditions to branches
+ // for simple branches.
+ simplifySimpleBranches(path);
+ // Remove any puny edges left over after primary optimization pass.
+ removePunyEdges(path, SM, PM);
+ // Remove identical events.
+ removeIdenticalEvents(path);
+ }
+
+ return hasChanges;
+}
+
+/// Drop the very first edge in a path, which should be a function entry edge.
+///
+/// If the first edge is not a function entry edge (say, because the first
+/// statement had an invalid source location), this function does nothing.
+// FIXME: We should just generate invalid edges anyway and have the optimizer
+// deal with them.
+static void dropFunctionEntryEdge(PathPieces &Path,
+ LocationContextMap &LCM,
+ SourceManager &SM) {
+ const PathDiagnosticControlFlowPiece *FirstEdge =
+ dyn_cast<PathDiagnosticControlFlowPiece>(Path.front());
+ if (!FirstEdge)
+ return;
+
+ const Decl *D = LCM[&Path]->getDecl();
+ PathDiagnosticLocation EntryLoc = PathDiagnosticLocation::createBegin(D, SM);
+ if (FirstEdge->getStartLocation() != EntryLoc)
+ return;
+
+ Path.pop_front();
+}
+
+
+//===----------------------------------------------------------------------===//
+// Methods for BugType and subclasses.
+//===----------------------------------------------------------------------===//
+void BugType::anchor() { }
+
+void BugType::FlushReports(BugReporter &BR) {}
+
+void BuiltinBug::anchor() {}
+
+//===----------------------------------------------------------------------===//
+// Methods for BugReport and subclasses.
+//===----------------------------------------------------------------------===//
+
+void BugReport::NodeResolver::anchor() {}
+
+void BugReport::addVisitor(std::unique_ptr<BugReporterVisitor> visitor) {
+ if (!visitor)
+ return;
+
+ llvm::FoldingSetNodeID ID;
+ visitor->Profile(ID);
+ void *InsertPos;
+
+ if (CallbacksSet.FindNodeOrInsertPos(ID, InsertPos))
+ return;
+
+ CallbacksSet.InsertNode(visitor.get(), InsertPos);
+ Callbacks.push_back(std::move(visitor));
+ ++ConfigurationChangeToken;
+}
+
+BugReport::~BugReport() {
+ while (!interestingSymbols.empty()) {
+ popInterestingSymbolsAndRegions();
+ }
+}
+
+const Decl *BugReport::getDeclWithIssue() const {
+ if (DeclWithIssue)
+ return DeclWithIssue;
+
+ const ExplodedNode *N = getErrorNode();
+ if (!N)
+ return nullptr;
+
+ const LocationContext *LC = N->getLocationContext();
+ return LC->getCurrentStackFrame()->getDecl();
+}
+
+void BugReport::Profile(llvm::FoldingSetNodeID& hash) const {
+ hash.AddPointer(&BT);
+ hash.AddString(Description);
+ PathDiagnosticLocation UL = getUniqueingLocation();
+ if (UL.isValid()) {
+ UL.Profile(hash);
+ } else if (Location.isValid()) {
+ Location.Profile(hash);
+ } else {
+ assert(ErrorNode);
+ hash.AddPointer(GetCurrentOrPreviousStmt(ErrorNode));
+ }
+
+ for (SourceRange range : Ranges) {
+ if (!range.isValid())
+ continue;
+ hash.AddInteger(range.getBegin().getRawEncoding());
+ hash.AddInteger(range.getEnd().getRawEncoding());
+ }
+}
+
+void BugReport::markInteresting(SymbolRef sym) {
+ if (!sym)
+ return;
+
+ // If the symbol wasn't already in our set, note a configuration change.
+ if (getInterestingSymbols().insert(sym).second)
+ ++ConfigurationChangeToken;
+
+ if (const SymbolMetadata *meta = dyn_cast<SymbolMetadata>(sym))
+ getInterestingRegions().insert(meta->getRegion());
+}
+
+void BugReport::markInteresting(const MemRegion *R) {
+ if (!R)
+ return;
+
+ // If the base region wasn't already in our set, note a configuration change.
+ R = R->getBaseRegion();
+ if (getInterestingRegions().insert(R).second)
+ ++ConfigurationChangeToken;
+
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R))
+ getInterestingSymbols().insert(SR->getSymbol());
+}
+
+void BugReport::markInteresting(SVal V) {
+ markInteresting(V.getAsRegion());
+ markInteresting(V.getAsSymbol());
+}
+
+void BugReport::markInteresting(const LocationContext *LC) {
+ if (!LC)
+ return;
+ InterestingLocationContexts.insert(LC);
+}
+
+bool BugReport::isInteresting(SVal V) {
+ return isInteresting(V.getAsRegion()) || isInteresting(V.getAsSymbol());
+}
+
+bool BugReport::isInteresting(SymbolRef sym) {
+ if (!sym)
+ return false;
+ // We don't currently consider metadata symbols to be interesting
+ // even if we know their region is interesting. Is that correct behavior?
+ return getInterestingSymbols().count(sym);
+}
+
+bool BugReport::isInteresting(const MemRegion *R) {
+ if (!R)
+ return false;
+ R = R->getBaseRegion();
+ bool b = getInterestingRegions().count(R);
+ if (b)
+ return true;
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R))
+ return getInterestingSymbols().count(SR->getSymbol());
+ return false;
+}
+
+bool BugReport::isInteresting(const LocationContext *LC) {
+ if (!LC)
+ return false;
+ return InterestingLocationContexts.count(LC);
+}
+
+void BugReport::lazyInitializeInterestingSets() {
+ if (interestingSymbols.empty()) {
+ interestingSymbols.push_back(new Symbols());
+ interestingRegions.push_back(new Regions());
+ }
+}
+
+BugReport::Symbols &BugReport::getInterestingSymbols() {
+ lazyInitializeInterestingSets();
+ return *interestingSymbols.back();
+}
+
+BugReport::Regions &BugReport::getInterestingRegions() {
+ lazyInitializeInterestingSets();
+ return *interestingRegions.back();
+}
+
+void BugReport::pushInterestingSymbolsAndRegions() {
+ interestingSymbols.push_back(new Symbols(getInterestingSymbols()));
+ interestingRegions.push_back(new Regions(getInterestingRegions()));
+}
+
+void BugReport::popInterestingSymbolsAndRegions() {
+ delete interestingSymbols.pop_back_val();
+ delete interestingRegions.pop_back_val();
+}
+
+const Stmt *BugReport::getStmt() const {
+ if (!ErrorNode)
+ return nullptr;
+
+ ProgramPoint ProgP = ErrorNode->getLocation();
+ const Stmt *S = nullptr;
+
+ if (Optional<BlockEntrance> BE = ProgP.getAs<BlockEntrance>()) {
+ CFGBlock &Exit = ProgP.getLocationContext()->getCFG()->getExit();
+ if (BE->getBlock() == &Exit)
+ S = GetPreviousStmt(ErrorNode);
+ }
+ if (!S)
+ S = PathDiagnosticLocation::getStmt(ErrorNode);
+
+ return S;
+}
+
+llvm::iterator_range<BugReport::ranges_iterator> BugReport::getRanges() {
+ // If no custom ranges, add the range of the statement corresponding to
+ // the error node.
+ if (Ranges.empty()) {
+ if (const Expr *E = dyn_cast_or_null<Expr>(getStmt()))
+ addRange(E->getSourceRange());
+ else
+ return llvm::make_range(ranges_iterator(), ranges_iterator());
+ }
+
+ // User-specified absence of range info.
+ if (Ranges.size() == 1 && !Ranges.begin()->isValid())
+ return llvm::make_range(ranges_iterator(), ranges_iterator());
+
+ return llvm::make_range(Ranges.begin(), Ranges.end());
+}
+
+PathDiagnosticLocation BugReport::getLocation(const SourceManager &SM) const {
+ if (ErrorNode) {
+ assert(!Location.isValid() &&
+ "Either Location or ErrorNode should be specified but not both.");
+ return PathDiagnosticLocation::createEndOfPath(ErrorNode, SM);
+ }
+
+ assert(Location.isValid());
+ return Location;
+}
+
+//===----------------------------------------------------------------------===//
+// Methods for BugReporter and subclasses.
+//===----------------------------------------------------------------------===//
+
+BugReportEquivClass::~BugReportEquivClass() { }
+GRBugReporter::~GRBugReporter() { }
+BugReporterData::~BugReporterData() {}
+
+ExplodedGraph &GRBugReporter::getGraph() { return Eng.getGraph(); }
+
+ProgramStateManager&
+GRBugReporter::getStateManager() { return Eng.getStateManager(); }
+
+BugReporter::~BugReporter() {
+ FlushReports();
+
+ // Free the bug reports we are tracking.
+ typedef std::vector<BugReportEquivClass *> ContTy;
+ for (ContTy::iterator I = EQClassesVector.begin(), E = EQClassesVector.end();
+ I != E; ++I) {
+ delete *I;
+ }
+}
+
+void BugReporter::FlushReports() {
+ if (BugTypes.isEmpty())
+ return;
+
+ // First flush the warnings for each BugType. This may end up creating new
+ // warnings and new BugTypes.
+ // FIXME: Only NSErrorChecker needs BugType's FlushReports.
+ // Turn NSErrorChecker into a proper checker and remove this.
+ SmallVector<const BugType *, 16> bugTypes(BugTypes.begin(), BugTypes.end());
+ for (SmallVectorImpl<const BugType *>::iterator
+ I = bugTypes.begin(), E = bugTypes.end(); I != E; ++I)
+ const_cast<BugType*>(*I)->FlushReports(*this);
+
+ // We need to flush reports in deterministic order to ensure the order
+ // of the reports is consistent between runs.
+ typedef std::vector<BugReportEquivClass *> ContVecTy;
+ for (ContVecTy::iterator EI=EQClassesVector.begin(), EE=EQClassesVector.end();
+ EI != EE; ++EI){
+ BugReportEquivClass& EQ = **EI;
+ FlushReport(EQ);
+ }
+
+ // BugReporter owns and deletes only BugTypes created implicitly through
+ // EmitBasicReport.
+ // FIXME: There are leaks from checkers that assume that the BugTypes they
+ // create will be destroyed by the BugReporter.
+ llvm::DeleteContainerSeconds(StrBugTypes);
+
+ // Remove all references to the BugType objects.
+ BugTypes = F.getEmptySet();
+}
+
+//===----------------------------------------------------------------------===//
+// PathDiagnostics generation.
+//===----------------------------------------------------------------------===//
+
+namespace {
+/// A wrapper around a report graph, which contains only a single path, and its
+/// node maps.
+class ReportGraph {
+public:
+ InterExplodedGraphMap BackMap;
+ std::unique_ptr<ExplodedGraph> Graph;
+ const ExplodedNode *ErrorNode;
+ size_t Index;
+};
+
+/// A wrapper around a trimmed graph and its node maps.
+class TrimmedGraph {
+ InterExplodedGraphMap InverseMap;
+
+ typedef llvm::DenseMap<const ExplodedNode *, unsigned> PriorityMapTy;
+ PriorityMapTy PriorityMap;
+
+ typedef std::pair<const ExplodedNode *, size_t> NodeIndexPair;
+ SmallVector<NodeIndexPair, 32> ReportNodes;
+
+ std::unique_ptr<ExplodedGraph> G;
+
+ /// A helper class for sorting ExplodedNodes by priority.
+ template <bool Descending>
+ class PriorityCompare {
+ const PriorityMapTy &PriorityMap;
+
+ public:
+ PriorityCompare(const PriorityMapTy &M) : PriorityMap(M) {}
+
+ bool operator()(const ExplodedNode *LHS, const ExplodedNode *RHS) const {
+ PriorityMapTy::const_iterator LI = PriorityMap.find(LHS);
+ PriorityMapTy::const_iterator RI = PriorityMap.find(RHS);
+ PriorityMapTy::const_iterator E = PriorityMap.end();
+
+ if (LI == E)
+ return Descending;
+ if (RI == E)
+ return !Descending;
+
+ return Descending ? LI->second > RI->second
+ : LI->second < RI->second;
+ }
+
+ bool operator()(const NodeIndexPair &LHS, const NodeIndexPair &RHS) const {
+ return (*this)(LHS.first, RHS.first);
+ }
+ };
+
+public:
+ TrimmedGraph(const ExplodedGraph *OriginalGraph,
+ ArrayRef<const ExplodedNode *> Nodes);
+
+ bool popNextReportGraph(ReportGraph &GraphWrapper);
+};
+}
+
+TrimmedGraph::TrimmedGraph(const ExplodedGraph *OriginalGraph,
+ ArrayRef<const ExplodedNode *> Nodes) {
+ // The trimmed graph is created in the body of the constructor to ensure
+ // that the DenseMaps have been initialized already.
+ InterExplodedGraphMap ForwardMap;
+ G = OriginalGraph->trim(Nodes, &ForwardMap, &InverseMap);
+
+ // Find the (first) error node in the trimmed graph. We just need to consult
+ // the node map which maps from nodes in the original graph to nodes
+ // in the new graph.
+ llvm::SmallPtrSet<const ExplodedNode *, 32> RemainingNodes;
+
+ for (unsigned i = 0, count = Nodes.size(); i < count; ++i) {
+ if (const ExplodedNode *NewNode = ForwardMap.lookup(Nodes[i])) {
+ ReportNodes.push_back(std::make_pair(NewNode, i));
+ RemainingNodes.insert(NewNode);
+ }
+ }
+
+ assert(!RemainingNodes.empty() && "No error node found in the trimmed graph");
+
+ // Perform a forward BFS to find all the shortest paths.
+ std::queue<const ExplodedNode *> WS;
+
+ assert(G->num_roots() == 1);
+ WS.push(*G->roots_begin());
+ unsigned Priority = 0;
+
+ while (!WS.empty()) {
+ const ExplodedNode *Node = WS.front();
+ WS.pop();
+
+ PriorityMapTy::iterator PriorityEntry;
+ bool IsNew;
+ std::tie(PriorityEntry, IsNew) =
+ PriorityMap.insert(std::make_pair(Node, Priority));
+ ++Priority;
+
+ if (!IsNew) {
+ assert(PriorityEntry->second <= Priority);
+ continue;
+ }
+
+ if (RemainingNodes.erase(Node))
+ if (RemainingNodes.empty())
+ break;
+
+ for (ExplodedNode::const_pred_iterator I = Node->succ_begin(),
+ E = Node->succ_end();
+ I != E; ++I)
+ WS.push(*I);
+ }
+
+ // Sort the error paths from longest to shortest.
+ std::sort(ReportNodes.begin(), ReportNodes.end(),
+ PriorityCompare<true>(PriorityMap));
+}
+
+bool TrimmedGraph::popNextReportGraph(ReportGraph &GraphWrapper) {
+ if (ReportNodes.empty())
+ return false;
+
+ const ExplodedNode *OrigN;
+ std::tie(OrigN, GraphWrapper.Index) = ReportNodes.pop_back_val();
+ assert(PriorityMap.find(OrigN) != PriorityMap.end() &&
+ "error node not accessible from root");
+
+ // Create a new graph with a single path. This is the graph
+ // that will be returned to the caller.
+ auto GNew = llvm::make_unique<ExplodedGraph>();
+ GraphWrapper.BackMap.clear();
+
+ // Now walk from the error node up the BFS path, always taking the
+ // predeccessor with the lowest number.
+ ExplodedNode *Succ = nullptr;
+ while (true) {
+ // Create the equivalent node in the new graph with the same state
+ // and location.
+ ExplodedNode *NewN = GNew->getNode(OrigN->getLocation(), OrigN->getState(),
+ OrigN->isSink());
+
+ // Store the mapping to the original node.
+ InterExplodedGraphMap::const_iterator IMitr = InverseMap.find(OrigN);
+ assert(IMitr != InverseMap.end() && "No mapping to original node.");
+ GraphWrapper.BackMap[NewN] = IMitr->second;
+
+ // Link up the new node with the previous node.
+ if (Succ)
+ Succ->addPredecessor(NewN, *GNew);
+ else
+ GraphWrapper.ErrorNode = NewN;
+
+ Succ = NewN;
+
+ // Are we at the final node?
+ if (OrigN->pred_empty()) {
+ GNew->addRoot(NewN);
+ break;
+ }
+
+ // Find the next predeccessor node. We choose the node that is marked
+ // with the lowest BFS number.
+ OrigN = *std::min_element(OrigN->pred_begin(), OrigN->pred_end(),
+ PriorityCompare<false>(PriorityMap));
+ }
+
+ GraphWrapper.Graph = std::move(GNew);
+
+ return true;
+}
+
+
+/// CompactPathDiagnostic - This function postprocesses a PathDiagnostic object
+/// and collapses PathDiagosticPieces that are expanded by macros.
+static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM) {
+ typedef std::vector<std::pair<IntrusiveRefCntPtr<PathDiagnosticMacroPiece>,
+ SourceLocation> > MacroStackTy;
+
+ typedef std::vector<IntrusiveRefCntPtr<PathDiagnosticPiece> >
+ PiecesTy;
+
+ MacroStackTy MacroStack;
+ PiecesTy Pieces;
+
+ for (PathPieces::const_iterator I = path.begin(), E = path.end();
+ I!=E; ++I) {
+
+ PathDiagnosticPiece *piece = I->get();
+
+ // Recursively compact calls.
+ if (PathDiagnosticCallPiece *call=dyn_cast<PathDiagnosticCallPiece>(piece)){
+ CompactPathDiagnostic(call->path, SM);
+ }
+
+ // Get the location of the PathDiagnosticPiece.
+ const FullSourceLoc Loc = piece->getLocation().asLocation();
+
+ // Determine the instantiation location, which is the location we group
+ // related PathDiagnosticPieces.
+ SourceLocation InstantiationLoc = Loc.isMacroID() ?
+ SM.getExpansionLoc(Loc) :
+ SourceLocation();
+
+ if (Loc.isFileID()) {
+ MacroStack.clear();
+ Pieces.push_back(piece);
+ continue;
+ }
+
+ assert(Loc.isMacroID());
+
+ // Is the PathDiagnosticPiece within the same macro group?
+ if (!MacroStack.empty() && InstantiationLoc == MacroStack.back().second) {
+ MacroStack.back().first->subPieces.push_back(piece);
+ continue;
+ }
+
+ // We aren't in the same group. Are we descending into a new macro
+ // or are part of an old one?
+ IntrusiveRefCntPtr<PathDiagnosticMacroPiece> MacroGroup;
+
+ SourceLocation ParentInstantiationLoc = InstantiationLoc.isMacroID() ?
+ SM.getExpansionLoc(Loc) :
+ SourceLocation();
+
+ // Walk the entire macro stack.
+ while (!MacroStack.empty()) {
+ if (InstantiationLoc == MacroStack.back().second) {
+ MacroGroup = MacroStack.back().first;
+ break;
+ }
+
+ if (ParentInstantiationLoc == MacroStack.back().second) {
+ MacroGroup = MacroStack.back().first;
+ break;
+ }
+
+ MacroStack.pop_back();
+ }
+
+ if (!MacroGroup || ParentInstantiationLoc == MacroStack.back().second) {
+ // Create a new macro group and add it to the stack.
+ PathDiagnosticMacroPiece *NewGroup =
+ new PathDiagnosticMacroPiece(
+ PathDiagnosticLocation::createSingleLocation(piece->getLocation()));
+
+ if (MacroGroup)
+ MacroGroup->subPieces.push_back(NewGroup);
+ else {
+ assert(InstantiationLoc.isFileID());
+ Pieces.push_back(NewGroup);
+ }
+
+ MacroGroup = NewGroup;
+ MacroStack.push_back(std::make_pair(MacroGroup, InstantiationLoc));
+ }
+
+ // Finally, add the PathDiagnosticPiece to the group.
+ MacroGroup->subPieces.push_back(piece);
+ }
+
+ // Now take the pieces and construct a new PathDiagnostic.
+ path.clear();
+
+ path.insert(path.end(), Pieces.begin(), Pieces.end());
+}
+
+bool GRBugReporter::generatePathDiagnostic(PathDiagnostic& PD,
+ PathDiagnosticConsumer &PC,
+ ArrayRef<BugReport *> &bugReports) {
+ assert(!bugReports.empty());
+
+ bool HasValid = false;
+ bool HasInvalid = false;
+ SmallVector<const ExplodedNode *, 32> errorNodes;
+ for (ArrayRef<BugReport*>::iterator I = bugReports.begin(),
+ E = bugReports.end(); I != E; ++I) {
+ if ((*I)->isValid()) {
+ HasValid = true;
+ errorNodes.push_back((*I)->getErrorNode());
+ } else {
+ // Keep the errorNodes list in sync with the bugReports list.
+ HasInvalid = true;
+ errorNodes.push_back(nullptr);
+ }
+ }
+
+ // If all the reports have been marked invalid by a previous path generation,
+ // we're done.
+ if (!HasValid)
+ return false;
+
+ typedef PathDiagnosticConsumer::PathGenerationScheme PathGenerationScheme;
+ PathGenerationScheme ActiveScheme = PC.getGenerationScheme();
+
+ if (ActiveScheme == PathDiagnosticConsumer::Extensive) {
+ AnalyzerOptions &options = getAnalyzerOptions();
+ if (options.getBooleanOption("path-diagnostics-alternate", true)) {
+ ActiveScheme = PathDiagnosticConsumer::AlternateExtensive;
+ }
+ }
+
+ TrimmedGraph TrimG(&getGraph(), errorNodes);
+ ReportGraph ErrorGraph;
+
+ while (TrimG.popNextReportGraph(ErrorGraph)) {
+ // Find the BugReport with the original location.
+ assert(ErrorGraph.Index < bugReports.size());
+ BugReport *R = bugReports[ErrorGraph.Index];
+ assert(R && "No original report found for sliced graph.");
+ assert(R->isValid() && "Report selected by trimmed graph marked invalid.");
+
+ // Start building the path diagnostic...
+ PathDiagnosticBuilder PDB(*this, R, ErrorGraph.BackMap, &PC);
+ const ExplodedNode *N = ErrorGraph.ErrorNode;
+
+ // Register additional node visitors.
+ R->addVisitor(llvm::make_unique<NilReceiverBRVisitor>());
+ R->addVisitor(llvm::make_unique<ConditionBRVisitor>());
+ R->addVisitor(llvm::make_unique<LikelyFalsePositiveSuppressionBRVisitor>());
+
+ BugReport::VisitorList visitors;
+ unsigned origReportConfigToken, finalReportConfigToken;
+ LocationContextMap LCM;
+
+ // While generating diagnostics, it's possible the visitors will decide
+ // new symbols and regions are interesting, or add other visitors based on
+ // the information they find. If they do, we need to regenerate the path
+ // based on our new report configuration.
+ do {
+ // Get a clean copy of all the visitors.
+ for (BugReport::visitor_iterator I = R->visitor_begin(),
+ E = R->visitor_end(); I != E; ++I)
+ visitors.push_back((*I)->clone());
+
+ // Clear out the active path from any previous work.
+ PD.resetPath();
+ origReportConfigToken = R->getConfigurationChangeToken();
+
+ // Generate the very last diagnostic piece - the piece is visible before
+ // the trace is expanded.
+ std::unique_ptr<PathDiagnosticPiece> LastPiece;
+ for (BugReport::visitor_iterator I = visitors.begin(), E = visitors.end();
+ I != E; ++I) {
+ if (std::unique_ptr<PathDiagnosticPiece> Piece =
+ (*I)->getEndPath(PDB, N, *R)) {
+ assert (!LastPiece &&
+ "There can only be one final piece in a diagnostic.");
+ LastPiece = std::move(Piece);
+ }
+ }
+
+ if (ActiveScheme != PathDiagnosticConsumer::None) {
+ if (!LastPiece)
+ LastPiece = BugReporterVisitor::getDefaultEndPath(PDB, N, *R);
+ assert(LastPiece);
+ PD.setEndOfPath(std::move(LastPiece));
+ }
+
+ // Make sure we get a clean location context map so we don't
+ // hold onto old mappings.
+ LCM.clear();
+
+ switch (ActiveScheme) {
+ case PathDiagnosticConsumer::AlternateExtensive:
+ GenerateAlternateExtensivePathDiagnostic(PD, PDB, N, LCM, visitors);
+ break;
+ case PathDiagnosticConsumer::Extensive:
+ GenerateExtensivePathDiagnostic(PD, PDB, N, LCM, visitors);
+ break;
+ case PathDiagnosticConsumer::Minimal:
+ GenerateMinimalPathDiagnostic(PD, PDB, N, LCM, visitors);
+ break;
+ case PathDiagnosticConsumer::None:
+ GenerateVisitorsOnlyPathDiagnostic(PD, PDB, N, visitors);
+ break;
+ }
+
+ // Clean up the visitors we used.
+ visitors.clear();
+
+ // Did anything change while generating this path?
+ finalReportConfigToken = R->getConfigurationChangeToken();
+ } while (finalReportConfigToken != origReportConfigToken);
+
+ if (!R->isValid())
+ continue;
+
+ // Finally, prune the diagnostic path of uninteresting stuff.
+ if (!PD.path.empty()) {
+ if (R->shouldPrunePath() && getAnalyzerOptions().shouldPrunePaths()) {
+ bool stillHasNotes = removeUnneededCalls(PD.getMutablePieces(), R, LCM);
+ assert(stillHasNotes);
+ (void)stillHasNotes;
+ }
+
+ // Redirect all call pieces to have valid locations.
+ adjustCallLocations(PD.getMutablePieces());
+ removePiecesWithInvalidLocations(PD.getMutablePieces());
+
+ if (ActiveScheme == PathDiagnosticConsumer::AlternateExtensive) {
+ SourceManager &SM = getSourceManager();
+
+ // Reduce the number of edges from a very conservative set
+ // to an aesthetically pleasing subset that conveys the
+ // necessary information.
+ OptimizedCallsSet OCS;
+ while (optimizeEdges(PD.getMutablePieces(), SM, OCS, LCM)) {}
+
+ // Drop the very first function-entry edge. It's not really necessary
+ // for top-level functions.
+ dropFunctionEntryEdge(PD.getMutablePieces(), LCM, SM);
+ }
+
+ // Remove messages that are basically the same, and edges that may not
+ // make sense.
+ // We have to do this after edge optimization in the Extensive mode.
+ removeRedundantMsgs(PD.getMutablePieces());
+ removeEdgesToDefaultInitializers(PD.getMutablePieces());
+ }
+
+ // We found a report and didn't suppress it.
+ return true;
+ }
+
+ // We suppressed all the reports in this equivalence class.
+ assert(!HasInvalid && "Inconsistent suppression");
+ (void)HasInvalid;
+ return false;
+}
+
+void BugReporter::Register(BugType *BT) {
+ BugTypes = F.add(BugTypes, BT);
+}
+
+void BugReporter::emitReport(std::unique_ptr<BugReport> R) {
+ if (const ExplodedNode *E = R->getErrorNode()) {
+ // An error node must either be a sink or have a tag, otherwise
+ // it could get reclaimed before the path diagnostic is created.
+ assert((E->isSink() || E->getLocation().getTag()) &&
+ "Error node must either be a sink or have a tag");
+
+ const AnalysisDeclContext *DeclCtx =
+ E->getLocationContext()->getAnalysisDeclContext();
+ // The source of autosynthesized body can be handcrafted AST or a model
+ // file. The locations from handcrafted ASTs have no valid source locations
+ // and have to be discarded. Locations from model files should be preserved
+ // for processing and reporting.
+ if (DeclCtx->isBodyAutosynthesized() &&
+ !DeclCtx->isBodyAutosynthesizedFromModelFile())
+ return;
+ }
+
+ bool ValidSourceLoc = R->getLocation(getSourceManager()).isValid();
+ assert(ValidSourceLoc);
+ // If we mess up in a release build, we'd still prefer to just drop the bug
+ // instead of trying to go on.
+ if (!ValidSourceLoc)
+ return;
+
+ // Compute the bug report's hash to determine its equivalence class.
+ llvm::FoldingSetNodeID ID;
+ R->Profile(ID);
+
+ // Lookup the equivance class. If there isn't one, create it.
+ BugType& BT = R->getBugType();
+ Register(&BT);
+ void *InsertPos;
+ BugReportEquivClass* EQ = EQClasses.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!EQ) {
+ EQ = new BugReportEquivClass(std::move(R));
+ EQClasses.InsertNode(EQ, InsertPos);
+ EQClassesVector.push_back(EQ);
+ } else
+ EQ->AddReport(std::move(R));
+}
+
+
+//===----------------------------------------------------------------------===//
+// Emitting reports in equivalence classes.
+//===----------------------------------------------------------------------===//
+
+namespace {
+struct FRIEC_WLItem {
+ const ExplodedNode *N;
+ ExplodedNode::const_succ_iterator I, E;
+
+ FRIEC_WLItem(const ExplodedNode *n)
+ : N(n), I(N->succ_begin()), E(N->succ_end()) {}
+};
+}
+
+static BugReport *
+FindReportInEquivalenceClass(BugReportEquivClass& EQ,
+ SmallVectorImpl<BugReport*> &bugReports) {
+
+ BugReportEquivClass::iterator I = EQ.begin(), E = EQ.end();
+ assert(I != E);
+ BugType& BT = I->getBugType();
+
+ // If we don't need to suppress any of the nodes because they are
+ // post-dominated by a sink, simply add all the nodes in the equivalence class
+ // to 'Nodes'. Any of the reports will serve as a "representative" report.
+ if (!BT.isSuppressOnSink()) {
+ BugReport *R = &*I;
+ for (BugReportEquivClass::iterator I=EQ.begin(), E=EQ.end(); I!=E; ++I) {
+ const ExplodedNode *N = I->getErrorNode();
+ if (N) {
+ R = &*I;
+ bugReports.push_back(R);
+ }
+ }
+ return R;
+ }
+
+ // For bug reports that should be suppressed when all paths are post-dominated
+ // by a sink node, iterate through the reports in the equivalence class
+ // until we find one that isn't post-dominated (if one exists). We use a
+ // DFS traversal of the ExplodedGraph to find a non-sink node. We could write
+ // this as a recursive function, but we don't want to risk blowing out the
+ // stack for very long paths.
+ BugReport *exampleReport = nullptr;
+
+ for (; I != E; ++I) {
+ const ExplodedNode *errorNode = I->getErrorNode();
+
+ if (!errorNode)
+ continue;
+ if (errorNode->isSink()) {
+ llvm_unreachable(
+ "BugType::isSuppressSink() should not be 'true' for sink end nodes");
+ }
+ // No successors? By definition this nodes isn't post-dominated by a sink.
+ if (errorNode->succ_empty()) {
+ bugReports.push_back(&*I);
+ if (!exampleReport)
+ exampleReport = &*I;
+ continue;
+ }
+
+ // At this point we know that 'N' is not a sink and it has at least one
+ // successor. Use a DFS worklist to find a non-sink end-of-path node.
+ typedef FRIEC_WLItem WLItem;
+ typedef SmallVector<WLItem, 10> DFSWorkList;
+ llvm::DenseMap<const ExplodedNode *, unsigned> Visited;
+
+ DFSWorkList WL;
+ WL.push_back(errorNode);
+ Visited[errorNode] = 1;
+
+ while (!WL.empty()) {
+ WLItem &WI = WL.back();
+ assert(!WI.N->succ_empty());
+
+ for (; WI.I != WI.E; ++WI.I) {
+ const ExplodedNode *Succ = *WI.I;
+ // End-of-path node?
+ if (Succ->succ_empty()) {
+ // If we found an end-of-path node that is not a sink.
+ if (!Succ->isSink()) {
+ bugReports.push_back(&*I);
+ if (!exampleReport)
+ exampleReport = &*I;
+ WL.clear();
+ break;
+ }
+ // Found a sink? Continue on to the next successor.
+ continue;
+ }
+ // Mark the successor as visited. If it hasn't been explored,
+ // enqueue it to the DFS worklist.
+ unsigned &mark = Visited[Succ];
+ if (!mark) {
+ mark = 1;
+ WL.push_back(Succ);
+ break;
+ }
+ }
+
+ // The worklist may have been cleared at this point. First
+ // check if it is empty before checking the last item.
+ if (!WL.empty() && &WL.back() == &WI)
+ WL.pop_back();
+ }
+ }
+
+ // ExampleReport will be NULL if all the nodes in the equivalence class
+ // were post-dominated by sinks.
+ return exampleReport;
+}
+
+void BugReporter::FlushReport(BugReportEquivClass& EQ) {
+ SmallVector<BugReport*, 10> bugReports;
+ BugReport *exampleReport = FindReportInEquivalenceClass(EQ, bugReports);
+ if (exampleReport) {
+ for (PathDiagnosticConsumer *PDC : getPathDiagnosticConsumers()) {
+ FlushReport(exampleReport, *PDC, bugReports);
+ }
+ }
+}
+
+void BugReporter::FlushReport(BugReport *exampleReport,
+ PathDiagnosticConsumer &PD,
+ ArrayRef<BugReport*> bugReports) {
+
+ // FIXME: Make sure we use the 'R' for the path that was actually used.
+ // Probably doesn't make a difference in practice.
+ BugType& BT = exampleReport->getBugType();
+
+ std::unique_ptr<PathDiagnostic> D(new PathDiagnostic(
+ exampleReport->getBugType().getCheckName(),
+ exampleReport->getDeclWithIssue(), exampleReport->getBugType().getName(),
+ exampleReport->getDescription(),
+ exampleReport->getShortDescription(/*Fallback=*/false), BT.getCategory(),
+ exampleReport->getUniqueingLocation(),
+ exampleReport->getUniqueingDecl()));
+
+ MaxBugClassSize = std::max(bugReports.size(),
+ static_cast<size_t>(MaxBugClassSize));
+
+ // Generate the full path diagnostic, using the generation scheme
+ // specified by the PathDiagnosticConsumer. Note that we have to generate
+ // path diagnostics even for consumers which do not support paths, because
+ // the BugReporterVisitors may mark this bug as a false positive.
+ if (!bugReports.empty())
+ if (!generatePathDiagnostic(*D.get(), PD, bugReports))
+ return;
+
+ MaxValidBugClassSize = std::max(bugReports.size(),
+ static_cast<size_t>(MaxValidBugClassSize));
+
+ // Examine the report and see if the last piece is in a header. Reset the
+ // report location to the last piece in the main source file.
+ AnalyzerOptions& Opts = getAnalyzerOptions();
+ if (Opts.shouldReportIssuesInMainSourceFile() && !Opts.AnalyzeAll)
+ D->resetDiagnosticLocationToMainFile();
+
+ // If the path is empty, generate a single step path with the location
+ // of the issue.
+ if (D->path.empty()) {
+ PathDiagnosticLocation L = exampleReport->getLocation(getSourceManager());
+ auto piece = llvm::make_unique<PathDiagnosticEventPiece>(
+ L, exampleReport->getDescription());
+ for (SourceRange Range : exampleReport->getRanges())
+ piece->addRange(Range);
+ D->setEndOfPath(std::move(piece));
+ }
+
+ // Get the meta data.
+ const BugReport::ExtraTextList &Meta = exampleReport->getExtraText();
+ for (BugReport::ExtraTextList::const_iterator i = Meta.begin(),
+ e = Meta.end(); i != e; ++i) {
+ D->addMeta(*i);
+ }
+
+ PD.HandlePathDiagnostic(std::move(D));
+}
+
+void BugReporter::EmitBasicReport(const Decl *DeclWithIssue,
+ const CheckerBase *Checker,
+ StringRef Name, StringRef Category,
+ StringRef Str, PathDiagnosticLocation Loc,
+ ArrayRef<SourceRange> Ranges) {
+ EmitBasicReport(DeclWithIssue, Checker->getCheckName(), Name, Category, Str,
+ Loc, Ranges);
+}
+void BugReporter::EmitBasicReport(const Decl *DeclWithIssue,
+ CheckName CheckName,
+ StringRef name, StringRef category,
+ StringRef str, PathDiagnosticLocation Loc,
+ ArrayRef<SourceRange> Ranges) {
+
+ // 'BT' is owned by BugReporter.
+ BugType *BT = getBugTypeForName(CheckName, name, category);
+ auto R = llvm::make_unique<BugReport>(*BT, str, Loc);
+ R->setDeclWithIssue(DeclWithIssue);
+ for (ArrayRef<SourceRange>::iterator I = Ranges.begin(), E = Ranges.end();
+ I != E; ++I)
+ R->addRange(*I);
+ emitReport(std::move(R));
+}
+
+BugType *BugReporter::getBugTypeForName(CheckName CheckName, StringRef name,
+ StringRef category) {
+ SmallString<136> fullDesc;
+ llvm::raw_svector_ostream(fullDesc) << CheckName.getName() << ":" << name
+ << ":" << category;
+ BugType *&BT = StrBugTypes[fullDesc];
+ if (!BT)
+ BT = new BugType(CheckName, name, category);
+ return BT;
+}
+
+LLVM_DUMP_METHOD void PathPieces::dump() const {
+ unsigned index = 0;
+ for (PathPieces::const_iterator I = begin(), E = end(); I != E; ++I) {
+ llvm::errs() << "[" << index++ << "] ";
+ (*I)->dump();
+ llvm::errs() << "\n";
+ }
+}
+
+void PathDiagnosticCallPiece::dump() const {
+ llvm::errs() << "CALL\n--------------\n";
+
+ if (const Stmt *SLoc = getLocStmt(getLocation()))
+ SLoc->dump();
+ else if (const NamedDecl *ND = dyn_cast<NamedDecl>(getCallee()))
+ llvm::errs() << *ND << "\n";
+ else
+ getLocation().dump();
+}
+
+void PathDiagnosticEventPiece::dump() const {
+ llvm::errs() << "EVENT\n--------------\n";
+ llvm::errs() << getString() << "\n";
+ llvm::errs() << " ---- at ----\n";
+ getLocation().dump();
+}
+
+void PathDiagnosticControlFlowPiece::dump() const {
+ llvm::errs() << "CONTROL\n--------------\n";
+ getStartLocation().dump();
+ llvm::errs() << " ---- to ----\n";
+ getEndLocation().dump();
+}
+
+void PathDiagnosticMacroPiece::dump() const {
+ llvm::errs() << "MACRO\n--------------\n";
+ // FIXME: Print which macro is being invoked.
+}
+
+void PathDiagnosticLocation::dump() const {
+ if (!isValid()) {
+ llvm::errs() << "<INVALID>\n";
+ return;
+ }
+
+ switch (K) {
+ case RangeK:
+ // FIXME: actually print the range.
+ llvm::errs() << "<range>\n";
+ break;
+ case SingleLocK:
+ asLocation().dump();
+ llvm::errs() << "\n";
+ break;
+ case StmtK:
+ if (S)
+ S->dump();
+ else
+ llvm::errs() << "<NULL STMT>\n";
+ break;
+ case DeclK:
+ if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(D))
+ llvm::errs() << *ND << "\n";
+ else if (isa<BlockDecl>(D))
+ // FIXME: Make this nicer.
+ llvm::errs() << "<block>\n";
+ else if (D)
+ llvm::errs() << "<unknown decl>\n";
+ else
+ llvm::errs() << "<NULL DECL>\n";
+ break;
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
new file mode 100644
index 0000000..cf1e0a6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -0,0 +1,1643 @@
+// BugReporterVisitors.cpp - Helpers for reporting bugs -----------*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a set of BugReporter "visitors" which can be used to
+// enhance the diagnostics reported for a bug.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitor.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+using llvm::FoldingSetNodeID;
+
+//===----------------------------------------------------------------------===//
+// Utility functions.
+//===----------------------------------------------------------------------===//
+
+bool bugreporter::isDeclRefExprToReference(const Expr *E) {
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ return DRE->getDecl()->getType()->isReferenceType();
+ }
+ return false;
+}
+
+const Expr *bugreporter::getDerefExpr(const Stmt *S) {
+ // Pattern match for a few useful cases:
+ // a[0], p->f, *p
+ const Expr *E = dyn_cast<Expr>(S);
+ if (!E)
+ return nullptr;
+ E = E->IgnoreParenCasts();
+
+ while (true) {
+ if (const BinaryOperator *B = dyn_cast<BinaryOperator>(E)) {
+ assert(B->isAssignmentOp());
+ E = B->getLHS()->IgnoreParenCasts();
+ continue;
+ }
+ else if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) {
+ if (U->getOpcode() == UO_Deref)
+ return U->getSubExpr()->IgnoreParenCasts();
+ }
+ else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
+ if (ME->isArrow() || isDeclRefExprToReference(ME->getBase())) {
+ return ME->getBase()->IgnoreParenCasts();
+ } else {
+ // If we have a member expr with a dot, the base must have been
+ // dereferenced.
+ return getDerefExpr(ME->getBase());
+ }
+ }
+ else if (const ObjCIvarRefExpr *IvarRef = dyn_cast<ObjCIvarRefExpr>(E)) {
+ return IvarRef->getBase()->IgnoreParenCasts();
+ }
+ else if (const ArraySubscriptExpr *AE = dyn_cast<ArraySubscriptExpr>(E)) {
+ return AE->getBase();
+ }
+ else if (isDeclRefExprToReference(E)) {
+ return E;
+ }
+ break;
+ }
+
+ return nullptr;
+}
+
+const Stmt *bugreporter::GetDenomExpr(const ExplodedNode *N) {
+ const Stmt *S = N->getLocationAs<PreStmt>()->getStmt();
+ if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(S))
+ return BE->getRHS();
+ return nullptr;
+}
+
+const Stmt *bugreporter::GetRetValExpr(const ExplodedNode *N) {
+ const Stmt *S = N->getLocationAs<PostStmt>()->getStmt();
+ if (const ReturnStmt *RS = dyn_cast<ReturnStmt>(S))
+ return RS->getRetValue();
+ return nullptr;
+}
+
+//===----------------------------------------------------------------------===//
+// Definitions for bug reporter visitors.
+//===----------------------------------------------------------------------===//
+
+std::unique_ptr<PathDiagnosticPiece>
+BugReporterVisitor::getEndPath(BugReporterContext &BRC,
+ const ExplodedNode *EndPathNode, BugReport &BR) {
+ return nullptr;
+}
+
+std::unique_ptr<PathDiagnosticPiece> BugReporterVisitor::getDefaultEndPath(
+ BugReporterContext &BRC, const ExplodedNode *EndPathNode, BugReport &BR) {
+ PathDiagnosticLocation L =
+ PathDiagnosticLocation::createEndOfPath(EndPathNode,BRC.getSourceManager());
+
+ const auto &Ranges = BR.getRanges();
+
+ // Only add the statement itself as a range if we didn't specify any
+ // special ranges for this report.
+ auto P = llvm::make_unique<PathDiagnosticEventPiece>(
+ L, BR.getDescription(), Ranges.begin() == Ranges.end());
+ for (SourceRange Range : Ranges)
+ P->addRange(Range);
+
+ return std::move(P);
+}
+
+
+namespace {
+/// Emits an extra note at the return statement of an interesting stack frame.
+///
+/// The returned value is marked as an interesting value, and if it's null,
+/// adds a visitor to track where it became null.
+///
+/// This visitor is intended to be used when another visitor discovers that an
+/// interesting value comes from an inlined function call.
+class ReturnVisitor : public BugReporterVisitorImpl<ReturnVisitor> {
+ const StackFrameContext *StackFrame;
+ enum {
+ Initial,
+ MaybeUnsuppress,
+ Satisfied
+ } Mode;
+
+ bool EnableNullFPSuppression;
+
+public:
+ ReturnVisitor(const StackFrameContext *Frame, bool Suppressed)
+ : StackFrame(Frame), Mode(Initial), EnableNullFPSuppression(Suppressed) {}
+
+ static void *getTag() {
+ static int Tag = 0;
+ return static_cast<void *>(&Tag);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const override {
+ ID.AddPointer(ReturnVisitor::getTag());
+ ID.AddPointer(StackFrame);
+ ID.AddBoolean(EnableNullFPSuppression);
+ }
+
+ /// Adds a ReturnVisitor if the given statement represents a call that was
+ /// inlined.
+ ///
+ /// This will search back through the ExplodedGraph, starting from the given
+ /// node, looking for when the given statement was processed. If it turns out
+ /// the statement is a call that was inlined, we add the visitor to the
+ /// bug report, so it can print a note later.
+ static void addVisitorIfNecessary(const ExplodedNode *Node, const Stmt *S,
+ BugReport &BR,
+ bool InEnableNullFPSuppression) {
+ if (!CallEvent::isCallStmt(S))
+ return;
+
+ // First, find when we processed the statement.
+ do {
+ if (Optional<CallExitEnd> CEE = Node->getLocationAs<CallExitEnd>())
+ if (CEE->getCalleeContext()->getCallSite() == S)
+ break;
+ if (Optional<StmtPoint> SP = Node->getLocationAs<StmtPoint>())
+ if (SP->getStmt() == S)
+ break;
+
+ Node = Node->getFirstPred();
+ } while (Node);
+
+ // Next, step over any post-statement checks.
+ while (Node && Node->getLocation().getAs<PostStmt>())
+ Node = Node->getFirstPred();
+ if (!Node)
+ return;
+
+ // Finally, see if we inlined the call.
+ Optional<CallExitEnd> CEE = Node->getLocationAs<CallExitEnd>();
+ if (!CEE)
+ return;
+
+ const StackFrameContext *CalleeContext = CEE->getCalleeContext();
+ if (CalleeContext->getCallSite() != S)
+ return;
+
+ // Check the return value.
+ ProgramStateRef State = Node->getState();
+ SVal RetVal = State->getSVal(S, Node->getLocationContext());
+
+ // Handle cases where a reference is returned and then immediately used.
+ if (cast<Expr>(S)->isGLValue())
+ if (Optional<Loc> LValue = RetVal.getAs<Loc>())
+ RetVal = State->getSVal(*LValue);
+
+ // See if the return value is NULL. If so, suppress the report.
+ SubEngine *Eng = State->getStateManager().getOwningEngine();
+ assert(Eng && "Cannot file a bug report without an owning engine");
+ AnalyzerOptions &Options = Eng->getAnalysisManager().options;
+
+ bool EnableNullFPSuppression = false;
+ if (InEnableNullFPSuppression && Options.shouldSuppressNullReturnPaths())
+ if (Optional<Loc> RetLoc = RetVal.getAs<Loc>())
+ EnableNullFPSuppression = State->isNull(*RetLoc).isConstrainedTrue();
+
+ BR.markInteresting(CalleeContext);
+ BR.addVisitor(llvm::make_unique<ReturnVisitor>(CalleeContext,
+ EnableNullFPSuppression));
+ }
+
+ /// Returns true if any counter-suppression heuristics are enabled for
+ /// ReturnVisitor.
+ static bool hasCounterSuppression(AnalyzerOptions &Options) {
+ return Options.shouldAvoidSuppressingNullArgumentPaths();
+ }
+
+ PathDiagnosticPiece *visitNodeInitial(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) {
+ // Only print a message at the interesting return statement.
+ if (N->getLocationContext() != StackFrame)
+ return nullptr;
+
+ Optional<StmtPoint> SP = N->getLocationAs<StmtPoint>();
+ if (!SP)
+ return nullptr;
+
+ const ReturnStmt *Ret = dyn_cast<ReturnStmt>(SP->getStmt());
+ if (!Ret)
+ return nullptr;
+
+ // Okay, we're at the right return statement, but do we have the return
+ // value available?
+ ProgramStateRef State = N->getState();
+ SVal V = State->getSVal(Ret, StackFrame);
+ if (V.isUnknownOrUndef())
+ return nullptr;
+
+ // Don't print any more notes after this one.
+ Mode = Satisfied;
+
+ const Expr *RetE = Ret->getRetValue();
+ assert(RetE && "Tracking a return value for a void function");
+
+ // Handle cases where a reference is returned and then immediately used.
+ Optional<Loc> LValue;
+ if (RetE->isGLValue()) {
+ if ((LValue = V.getAs<Loc>())) {
+ SVal RValue = State->getRawSVal(*LValue, RetE->getType());
+ if (RValue.getAs<DefinedSVal>())
+ V = RValue;
+ }
+ }
+
+ // Ignore aggregate rvalues.
+ if (V.getAs<nonloc::LazyCompoundVal>() ||
+ V.getAs<nonloc::CompoundVal>())
+ return nullptr;
+
+ RetE = RetE->IgnoreParenCasts();
+
+ // If we can't prove the return value is 0, just mark it interesting, and
+ // make sure to track it into any further inner functions.
+ if (!State->isNull(V).isConstrainedTrue()) {
+ BR.markInteresting(V);
+ ReturnVisitor::addVisitorIfNecessary(N, RetE, BR,
+ EnableNullFPSuppression);
+ return nullptr;
+ }
+
+ // If we're returning 0, we should track where that 0 came from.
+ bugreporter::trackNullOrUndefValue(N, RetE, BR, /*IsArg*/ false,
+ EnableNullFPSuppression);
+
+ // Build an appropriate message based on the return value.
+ SmallString<64> Msg;
+ llvm::raw_svector_ostream Out(Msg);
+
+ if (V.getAs<Loc>()) {
+ // If we have counter-suppression enabled, make sure we keep visiting
+ // future nodes. We want to emit a path note as well, in case
+ // the report is resurrected as valid later on.
+ ExprEngine &Eng = BRC.getBugReporter().getEngine();
+ AnalyzerOptions &Options = Eng.getAnalysisManager().options;
+ if (EnableNullFPSuppression && hasCounterSuppression(Options))
+ Mode = MaybeUnsuppress;
+
+ if (RetE->getType()->isObjCObjectPointerType())
+ Out << "Returning nil";
+ else
+ Out << "Returning null pointer";
+ } else {
+ Out << "Returning zero";
+ }
+
+ if (LValue) {
+ if (const MemRegion *MR = LValue->getAsRegion()) {
+ if (MR->canPrintPretty()) {
+ Out << " (reference to ";
+ MR->printPretty(Out);
+ Out << ")";
+ }
+ }
+ } else {
+ // FIXME: We should have a more generalized location printing mechanism.
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(RetE))
+ if (const DeclaratorDecl *DD = dyn_cast<DeclaratorDecl>(DR->getDecl()))
+ Out << " (loaded from '" << *DD << "')";
+ }
+
+ PathDiagnosticLocation L(Ret, BRC.getSourceManager(), StackFrame);
+ return new PathDiagnosticEventPiece(L, Out.str());
+ }
+
+ PathDiagnosticPiece *visitNodeMaybeUnsuppress(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) {
+#ifndef NDEBUG
+ ExprEngine &Eng = BRC.getBugReporter().getEngine();
+ AnalyzerOptions &Options = Eng.getAnalysisManager().options;
+ assert(hasCounterSuppression(Options));
+#endif
+
+ // Are we at the entry node for this call?
+ Optional<CallEnter> CE = N->getLocationAs<CallEnter>();
+ if (!CE)
+ return nullptr;
+
+ if (CE->getCalleeContext() != StackFrame)
+ return nullptr;
+
+ Mode = Satisfied;
+
+ // Don't automatically suppress a report if one of the arguments is
+ // known to be a null pointer. Instead, start tracking /that/ null
+ // value back to its origin.
+ ProgramStateManager &StateMgr = BRC.getStateManager();
+ CallEventManager &CallMgr = StateMgr.getCallEventManager();
+
+ ProgramStateRef State = N->getState();
+ CallEventRef<> Call = CallMgr.getCaller(StackFrame, State);
+ for (unsigned I = 0, E = Call->getNumArgs(); I != E; ++I) {
+ Optional<Loc> ArgV = Call->getArgSVal(I).getAs<Loc>();
+ if (!ArgV)
+ continue;
+
+ const Expr *ArgE = Call->getArgExpr(I);
+ if (!ArgE)
+ continue;
+
+ // Is it possible for this argument to be non-null?
+ if (!State->isNull(*ArgV).isConstrainedTrue())
+ continue;
+
+ if (bugreporter::trackNullOrUndefValue(N, ArgE, BR, /*IsArg=*/true,
+ EnableNullFPSuppression))
+ BR.removeInvalidation(ReturnVisitor::getTag(), StackFrame);
+
+ // If we /can't/ track the null pointer, we should err on the side of
+ // false negatives, and continue towards marking this report invalid.
+ // (We will still look at the other arguments, though.)
+ }
+
+ return nullptr;
+ }
+
+ PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) override {
+ switch (Mode) {
+ case Initial:
+ return visitNodeInitial(N, PrevN, BRC, BR);
+ case MaybeUnsuppress:
+ return visitNodeMaybeUnsuppress(N, PrevN, BRC, BR);
+ case Satisfied:
+ return nullptr;
+ }
+
+ llvm_unreachable("Invalid visit mode!");
+ }
+
+ std::unique_ptr<PathDiagnosticPiece> getEndPath(BugReporterContext &BRC,
+ const ExplodedNode *N,
+ BugReport &BR) override {
+ if (EnableNullFPSuppression)
+ BR.markInvalid(ReturnVisitor::getTag(), StackFrame);
+ return nullptr;
+ }
+};
+} // end anonymous namespace
+
+
+void FindLastStoreBRVisitor ::Profile(llvm::FoldingSetNodeID &ID) const {
+ static int tag = 0;
+ ID.AddPointer(&tag);
+ ID.AddPointer(R);
+ ID.Add(V);
+ ID.AddBoolean(EnableNullFPSuppression);
+}
+
+/// Returns true if \p N represents the DeclStmt declaring and initializing
+/// \p VR.
+static bool isInitializationOfVar(const ExplodedNode *N, const VarRegion *VR) {
+ Optional<PostStmt> P = N->getLocationAs<PostStmt>();
+ if (!P)
+ return false;
+
+ const DeclStmt *DS = P->getStmtAs<DeclStmt>();
+ if (!DS)
+ return false;
+
+ if (DS->getSingleDecl() != VR->getDecl())
+ return false;
+
+ const MemSpaceRegion *VarSpace = VR->getMemorySpace();
+ const StackSpaceRegion *FrameSpace = dyn_cast<StackSpaceRegion>(VarSpace);
+ if (!FrameSpace) {
+ // If we ever directly evaluate global DeclStmts, this assertion will be
+ // invalid, but this still seems preferable to silently accepting an
+ // initialization that may be for a path-sensitive variable.
+ assert(VR->getDecl()->isStaticLocal() && "non-static stackless VarRegion");
+ return true;
+ }
+
+ assert(VR->getDecl()->hasLocalStorage());
+ const LocationContext *LCtx = N->getLocationContext();
+ return FrameSpace->getStackFrame() == LCtx->getCurrentStackFrame();
+}
+
+PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
+ const ExplodedNode *Pred,
+ BugReporterContext &BRC,
+ BugReport &BR) {
+
+ if (Satisfied)
+ return nullptr;
+
+ const ExplodedNode *StoreSite = nullptr;
+ const Expr *InitE = nullptr;
+ bool IsParam = false;
+
+ // First see if we reached the declaration of the region.
+ if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+ if (isInitializationOfVar(Pred, VR)) {
+ StoreSite = Pred;
+ InitE = VR->getDecl()->getInit();
+ }
+ }
+
+ // If this is a post initializer expression, initializing the region, we
+ // should track the initializer expression.
+ if (Optional<PostInitializer> PIP = Pred->getLocationAs<PostInitializer>()) {
+ const MemRegion *FieldReg = (const MemRegion *)PIP->getLocationValue();
+ if (FieldReg && FieldReg == R) {
+ StoreSite = Pred;
+ InitE = PIP->getInitializer()->getInit();
+ }
+ }
+
+ // Otherwise, see if this is the store site:
+ // (1) Succ has this binding and Pred does not, i.e. this is
+ // where the binding first occurred.
+ // (2) Succ has this binding and is a PostStore node for this region, i.e.
+ // the same binding was re-assigned here.
+ if (!StoreSite) {
+ if (Succ->getState()->getSVal(R) != V)
+ return nullptr;
+
+ if (Pred->getState()->getSVal(R) == V) {
+ Optional<PostStore> PS = Succ->getLocationAs<PostStore>();
+ if (!PS || PS->getLocationValue() != R)
+ return nullptr;
+ }
+
+ StoreSite = Succ;
+
+ // If this is an assignment expression, we can track the value
+ // being assigned.
+ if (Optional<PostStmt> P = Succ->getLocationAs<PostStmt>())
+ if (const BinaryOperator *BO = P->getStmtAs<BinaryOperator>())
+ if (BO->isAssignmentOp())
+ InitE = BO->getRHS();
+
+ // If this is a call entry, the variable should be a parameter.
+ // FIXME: Handle CXXThisRegion as well. (This is not a priority because
+ // 'this' should never be NULL, but this visitor isn't just for NULL and
+ // UndefinedVal.)
+ if (Optional<CallEnter> CE = Succ->getLocationAs<CallEnter>()) {
+ if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+ const ParmVarDecl *Param = cast<ParmVarDecl>(VR->getDecl());
+
+ ProgramStateManager &StateMgr = BRC.getStateManager();
+ CallEventManager &CallMgr = StateMgr.getCallEventManager();
+
+ CallEventRef<> Call = CallMgr.getCaller(CE->getCalleeContext(),
+ Succ->getState());
+ InitE = Call->getArgExpr(Param->getFunctionScopeIndex());
+ IsParam = true;
+ }
+ }
+
+ // If this is a CXXTempObjectRegion, the Expr responsible for its creation
+ // is wrapped inside of it.
+ if (const CXXTempObjectRegion *TmpR = dyn_cast<CXXTempObjectRegion>(R))
+ InitE = TmpR->getExpr();
+ }
+
+ if (!StoreSite)
+ return nullptr;
+ Satisfied = true;
+
+ // If we have an expression that provided the value, try to track where it
+ // came from.
+ if (InitE) {
+ if (V.isUndef() ||
+ V.getAs<loc::ConcreteInt>() || V.getAs<nonloc::ConcreteInt>()) {
+ if (!IsParam)
+ InitE = InitE->IgnoreParenCasts();
+ bugreporter::trackNullOrUndefValue(StoreSite, InitE, BR, IsParam,
+ EnableNullFPSuppression);
+ } else {
+ ReturnVisitor::addVisitorIfNecessary(StoreSite, InitE->IgnoreParenCasts(),
+ BR, EnableNullFPSuppression);
+ }
+ }
+
+ // Okay, we've found the binding. Emit an appropriate message.
+ SmallString<256> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+
+ if (Optional<PostStmt> PS = StoreSite->getLocationAs<PostStmt>()) {
+ const Stmt *S = PS->getStmt();
+ const char *action = nullptr;
+ const DeclStmt *DS = dyn_cast<DeclStmt>(S);
+ const VarRegion *VR = dyn_cast<VarRegion>(R);
+
+ if (DS) {
+ action = R->canPrintPretty() ? "initialized to " :
+ "Initializing to ";
+ } else if (isa<BlockExpr>(S)) {
+ action = R->canPrintPretty() ? "captured by block as " :
+ "Captured by block as ";
+ if (VR) {
+ // See if we can get the BlockVarRegion.
+ ProgramStateRef State = StoreSite->getState();
+ SVal V = State->getSVal(S, PS->getLocationContext());
+ if (const BlockDataRegion *BDR =
+ dyn_cast_or_null<BlockDataRegion>(V.getAsRegion())) {
+ if (const VarRegion *OriginalR = BDR->getOriginalRegion(VR)) {
+ if (Optional<KnownSVal> KV =
+ State->getSVal(OriginalR).getAs<KnownSVal>())
+ BR.addVisitor(llvm::make_unique<FindLastStoreBRVisitor>(
+ *KV, OriginalR, EnableNullFPSuppression));
+ }
+ }
+ }
+ }
+
+ if (action) {
+ if (R->canPrintPretty()) {
+ R->printPretty(os);
+ os << " ";
+ }
+
+ if (V.getAs<loc::ConcreteInt>()) {
+ bool b = false;
+ if (R->isBoundable()) {
+ if (const TypedValueRegion *TR = dyn_cast<TypedValueRegion>(R)) {
+ if (TR->getValueType()->isObjCObjectPointerType()) {
+ os << action << "nil";
+ b = true;
+ }
+ }
+ }
+
+ if (!b)
+ os << action << "a null pointer value";
+ } else if (Optional<nonloc::ConcreteInt> CVal =
+ V.getAs<nonloc::ConcreteInt>()) {
+ os << action << CVal->getValue();
+ }
+ else if (DS) {
+ if (V.isUndef()) {
+ if (isa<VarRegion>(R)) {
+ const VarDecl *VD = cast<VarDecl>(DS->getSingleDecl());
+ if (VD->getInit()) {
+ os << (R->canPrintPretty() ? "initialized" : "Initializing")
+ << " to a garbage value";
+ } else {
+ os << (R->canPrintPretty() ? "declared" : "Declaring")
+ << " without an initial value";
+ }
+ }
+ }
+ else {
+ os << (R->canPrintPretty() ? "initialized" : "Initialized")
+ << " here";
+ }
+ }
+ }
+ } else if (StoreSite->getLocation().getAs<CallEnter>()) {
+ if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+ const ParmVarDecl *Param = cast<ParmVarDecl>(VR->getDecl());
+
+ os << "Passing ";
+
+ if (V.getAs<loc::ConcreteInt>()) {
+ if (Param->getType()->isObjCObjectPointerType())
+ os << "nil object reference";
+ else
+ os << "null pointer value";
+ } else if (V.isUndef()) {
+ os << "uninitialized value";
+ } else if (Optional<nonloc::ConcreteInt> CI =
+ V.getAs<nonloc::ConcreteInt>()) {
+ os << "the value " << CI->getValue();
+ } else {
+ os << "value";
+ }
+
+ // Printed parameter indexes are 1-based, not 0-based.
+ unsigned Idx = Param->getFunctionScopeIndex() + 1;
+ os << " via " << Idx << llvm::getOrdinalSuffix(Idx) << " parameter";
+ if (R->canPrintPretty()) {
+ os << " ";
+ R->printPretty(os);
+ }
+ }
+ }
+
+ if (os.str().empty()) {
+ if (V.getAs<loc::ConcreteInt>()) {
+ bool b = false;
+ if (R->isBoundable()) {
+ if (const TypedValueRegion *TR = dyn_cast<TypedValueRegion>(R)) {
+ if (TR->getValueType()->isObjCObjectPointerType()) {
+ os << "nil object reference stored";
+ b = true;
+ }
+ }
+ }
+ if (!b) {
+ if (R->canPrintPretty())
+ os << "Null pointer value stored";
+ else
+ os << "Storing null pointer value";
+ }
+
+ } else if (V.isUndef()) {
+ if (R->canPrintPretty())
+ os << "Uninitialized value stored";
+ else
+ os << "Storing uninitialized value";
+
+ } else if (Optional<nonloc::ConcreteInt> CV =
+ V.getAs<nonloc::ConcreteInt>()) {
+ if (R->canPrintPretty())
+ os << "The value " << CV->getValue() << " is assigned";
+ else
+ os << "Assigning " << CV->getValue();
+
+ } else {
+ if (R->canPrintPretty())
+ os << "Value assigned";
+ else
+ os << "Assigning value";
+ }
+
+ if (R->canPrintPretty()) {
+ os << " to ";
+ R->printPretty(os);
+ }
+ }
+
+ // Construct a new PathDiagnosticPiece.
+ ProgramPoint P = StoreSite->getLocation();
+ PathDiagnosticLocation L;
+ if (P.getAs<CallEnter>() && InitE)
+ L = PathDiagnosticLocation(InitE, BRC.getSourceManager(),
+ P.getLocationContext());
+
+ if (!L.isValid() || !L.asLocation().isValid())
+ L = PathDiagnosticLocation::create(P, BRC.getSourceManager());
+
+ if (!L.isValid() || !L.asLocation().isValid())
+ return nullptr;
+
+ return new PathDiagnosticEventPiece(L, os.str());
+}
+
+void TrackConstraintBRVisitor::Profile(llvm::FoldingSetNodeID &ID) const {
+ static int tag = 0;
+ ID.AddPointer(&tag);
+ ID.AddBoolean(Assumption);
+ ID.Add(Constraint);
+}
+
+/// Return the tag associated with this visitor. This tag will be used
+/// to make all PathDiagnosticPieces created by this visitor.
+const char *TrackConstraintBRVisitor::getTag() {
+ return "TrackConstraintBRVisitor";
+}
+
+bool TrackConstraintBRVisitor::isUnderconstrained(const ExplodedNode *N) const {
+ if (IsZeroCheck)
+ return N->getState()->isNull(Constraint).isUnderconstrained();
+ return (bool)N->getState()->assume(Constraint, !Assumption);
+}
+
+PathDiagnosticPiece *
+TrackConstraintBRVisitor::VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) {
+ if (IsSatisfied)
+ return nullptr;
+
+ // Start tracking after we see the first state in which the value is
+ // constrained.
+ if (!IsTrackingTurnedOn)
+ if (!isUnderconstrained(N))
+ IsTrackingTurnedOn = true;
+ if (!IsTrackingTurnedOn)
+ return nullptr;
+
+ // Check if in the previous state it was feasible for this constraint
+ // to *not* be true.
+ if (isUnderconstrained(PrevN)) {
+
+ IsSatisfied = true;
+
+ // As a sanity check, make sure that the negation of the constraint
+ // was infeasible in the current state. If it is feasible, we somehow
+ // missed the transition point.
+ assert(!isUnderconstrained(N));
+
+ // We found the transition point for the constraint. We now need to
+ // pretty-print the constraint. (work-in-progress)
+ SmallString<64> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+
+ if (Constraint.getAs<Loc>()) {
+ os << "Assuming pointer value is ";
+ os << (Assumption ? "non-null" : "null");
+ }
+
+ if (os.str().empty())
+ return nullptr;
+
+ // Construct a new PathDiagnosticPiece.
+ ProgramPoint P = N->getLocation();
+ PathDiagnosticLocation L =
+ PathDiagnosticLocation::create(P, BRC.getSourceManager());
+ if (!L.isValid())
+ return nullptr;
+
+ PathDiagnosticEventPiece *X = new PathDiagnosticEventPiece(L, os.str());
+ X->setTag(getTag());
+ return X;
+ }
+
+ return nullptr;
+}
+
+SuppressInlineDefensiveChecksVisitor::
+SuppressInlineDefensiveChecksVisitor(DefinedSVal Value, const ExplodedNode *N)
+ : V(Value), IsSatisfied(false), IsTrackingTurnedOn(false) {
+
+ // Check if the visitor is disabled.
+ SubEngine *Eng = N->getState()->getStateManager().getOwningEngine();
+ assert(Eng && "Cannot file a bug report without an owning engine");
+ AnalyzerOptions &Options = Eng->getAnalysisManager().options;
+ if (!Options.shouldSuppressInlinedDefensiveChecks())
+ IsSatisfied = true;
+
+ assert(N->getState()->isNull(V).isConstrainedTrue() &&
+ "The visitor only tracks the cases where V is constrained to 0");
+}
+
+void SuppressInlineDefensiveChecksVisitor::Profile(FoldingSetNodeID &ID) const {
+ static int id = 0;
+ ID.AddPointer(&id);
+ ID.Add(V);
+}
+
+const char *SuppressInlineDefensiveChecksVisitor::getTag() {
+ return "IDCVisitor";
+}
+
+PathDiagnosticPiece *
+SuppressInlineDefensiveChecksVisitor::VisitNode(const ExplodedNode *Succ,
+ const ExplodedNode *Pred,
+ BugReporterContext &BRC,
+ BugReport &BR) {
+ if (IsSatisfied)
+ return nullptr;
+
+ // Start tracking after we see the first state in which the value is null.
+ if (!IsTrackingTurnedOn)
+ if (Succ->getState()->isNull(V).isConstrainedTrue())
+ IsTrackingTurnedOn = true;
+ if (!IsTrackingTurnedOn)
+ return nullptr;
+
+ // Check if in the previous state it was feasible for this value
+ // to *not* be null.
+ if (!Pred->getState()->isNull(V).isConstrainedTrue()) {
+ IsSatisfied = true;
+
+ assert(Succ->getState()->isNull(V).isConstrainedTrue());
+
+ // Check if this is inlined defensive checks.
+ const LocationContext *CurLC =Succ->getLocationContext();
+ const LocationContext *ReportLC = BR.getErrorNode()->getLocationContext();
+ if (CurLC != ReportLC && !CurLC->isParentOf(ReportLC))
+ BR.markInvalid("Suppress IDC", CurLC);
+ }
+ return nullptr;
+}
+
+static const MemRegion *getLocationRegionIfReference(const Expr *E,
+ const ExplodedNode *N) {
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ if (!VD->getType()->isReferenceType())
+ return nullptr;
+ ProgramStateManager &StateMgr = N->getState()->getStateManager();
+ MemRegionManager &MRMgr = StateMgr.getRegionManager();
+ return MRMgr.getVarRegion(VD, N->getLocationContext());
+ }
+ }
+
+ // FIXME: This does not handle other kinds of null references,
+ // for example, references from FieldRegions:
+ // struct Wrapper { int &ref; };
+ // Wrapper w = { *(int *)0 };
+ // w.ref = 1;
+
+ return nullptr;
+}
+
+static const Expr *peelOffOuterExpr(const Expr *Ex,
+ const ExplodedNode *N) {
+ Ex = Ex->IgnoreParenCasts();
+ if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Ex))
+ return peelOffOuterExpr(EWC->getSubExpr(), N);
+ if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(Ex))
+ return peelOffOuterExpr(OVE->getSourceExpr(), N);
+
+ // Peel off the ternary operator.
+ if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(Ex)) {
+ // Find a node where the branching occurred and find out which branch
+ // we took (true/false) by looking at the ExplodedGraph.
+ const ExplodedNode *NI = N;
+ do {
+ ProgramPoint ProgPoint = NI->getLocation();
+ if (Optional<BlockEdge> BE = ProgPoint.getAs<BlockEdge>()) {
+ const CFGBlock *srcBlk = BE->getSrc();
+ if (const Stmt *term = srcBlk->getTerminator()) {
+ if (term == CO) {
+ bool TookTrueBranch = (*(srcBlk->succ_begin()) == BE->getDst());
+ if (TookTrueBranch)
+ return peelOffOuterExpr(CO->getTrueExpr(), N);
+ else
+ return peelOffOuterExpr(CO->getFalseExpr(), N);
+ }
+ }
+ }
+ NI = NI->getFirstPred();
+ } while (NI);
+ }
+ return Ex;
+}
+
+bool bugreporter::trackNullOrUndefValue(const ExplodedNode *N,
+ const Stmt *S,
+ BugReport &report, bool IsArg,
+ bool EnableNullFPSuppression) {
+ if (!S || !N)
+ return false;
+
+ if (const Expr *Ex = dyn_cast<Expr>(S)) {
+ Ex = Ex->IgnoreParenCasts();
+ const Expr *PeeledEx = peelOffOuterExpr(Ex, N);
+ if (Ex != PeeledEx)
+ S = PeeledEx;
+ }
+
+ const Expr *Inner = nullptr;
+ if (const Expr *Ex = dyn_cast<Expr>(S)) {
+ Ex = Ex->IgnoreParenCasts();
+ if (ExplodedGraph::isInterestingLValueExpr(Ex) || CallEvent::isCallStmt(Ex))
+ Inner = Ex;
+ }
+
+ if (IsArg && !Inner) {
+ assert(N->getLocation().getAs<CallEnter>() && "Tracking arg but not at call");
+ } else {
+ // Walk through nodes until we get one that matches the statement exactly.
+ // Alternately, if we hit a known lvalue for the statement, we know we've
+ // gone too far (though we can likely track the lvalue better anyway).
+ do {
+ const ProgramPoint &pp = N->getLocation();
+ if (Optional<StmtPoint> ps = pp.getAs<StmtPoint>()) {
+ if (ps->getStmt() == S || ps->getStmt() == Inner)
+ break;
+ } else if (Optional<CallExitEnd> CEE = pp.getAs<CallExitEnd>()) {
+ if (CEE->getCalleeContext()->getCallSite() == S ||
+ CEE->getCalleeContext()->getCallSite() == Inner)
+ break;
+ }
+ N = N->getFirstPred();
+ } while (N);
+
+ if (!N)
+ return false;
+ }
+
+ ProgramStateRef state = N->getState();
+
+ // The message send could be nil due to the receiver being nil.
+ // At this point in the path, the receiver should be live since we are at the
+ // message send expr. If it is nil, start tracking it.
+ if (const Expr *Receiver = NilReceiverBRVisitor::getNilReceiver(S, N))
+ trackNullOrUndefValue(N, Receiver, report, false, EnableNullFPSuppression);
+
+
+ // See if the expression we're interested refers to a variable.
+ // If so, we can track both its contents and constraints on its value.
+ if (Inner && ExplodedGraph::isInterestingLValueExpr(Inner)) {
+ const MemRegion *R = nullptr;
+
+ // Find the ExplodedNode where the lvalue (the value of 'Ex')
+ // was computed. We need this for getting the location value.
+ const ExplodedNode *LVNode = N;
+ while (LVNode) {
+ if (Optional<PostStmt> P = LVNode->getLocation().getAs<PostStmt>()) {
+ if (P->getStmt() == Inner)
+ break;
+ }
+ LVNode = LVNode->getFirstPred();
+ }
+ assert(LVNode && "Unable to find the lvalue node.");
+ ProgramStateRef LVState = LVNode->getState();
+ SVal LVal = LVState->getSVal(Inner, LVNode->getLocationContext());
+
+ if (LVState->isNull(LVal).isConstrainedTrue()) {
+ // In case of C++ references, we want to differentiate between a null
+ // reference and reference to null pointer.
+ // If the LVal is null, check if we are dealing with null reference.
+ // For those, we want to track the location of the reference.
+ if (const MemRegion *RR = getLocationRegionIfReference(Inner, N))
+ R = RR;
+ } else {
+ R = LVState->getSVal(Inner, LVNode->getLocationContext()).getAsRegion();
+
+ // If this is a C++ reference to a null pointer, we are tracking the
+ // pointer. In additon, we should find the store at which the reference
+ // got initialized.
+ if (const MemRegion *RR = getLocationRegionIfReference(Inner, N)) {
+ if (Optional<KnownSVal> KV = LVal.getAs<KnownSVal>())
+ report.addVisitor(llvm::make_unique<FindLastStoreBRVisitor>(
+ *KV, RR, EnableNullFPSuppression));
+ }
+ }
+
+ if (R) {
+ // Mark both the variable region and its contents as interesting.
+ SVal V = LVState->getRawSVal(loc::MemRegionVal(R));
+
+ report.markInteresting(R);
+ report.markInteresting(V);
+ report.addVisitor(llvm::make_unique<UndefOrNullArgVisitor>(R));
+
+ // If the contents are symbolic, find out when they became null.
+ if (V.getAsLocSymbol(/*IncludeBaseRegions*/ true))
+ report.addVisitor(llvm::make_unique<TrackConstraintBRVisitor>(
+ V.castAs<DefinedSVal>(), false));
+
+ // Add visitor, which will suppress inline defensive checks.
+ if (Optional<DefinedSVal> DV = V.getAs<DefinedSVal>()) {
+ if (!DV->isZeroConstant() && LVState->isNull(*DV).isConstrainedTrue() &&
+ EnableNullFPSuppression) {
+ report.addVisitor(
+ llvm::make_unique<SuppressInlineDefensiveChecksVisitor>(*DV,
+ LVNode));
+ }
+ }
+
+ if (Optional<KnownSVal> KV = V.getAs<KnownSVal>())
+ report.addVisitor(llvm::make_unique<FindLastStoreBRVisitor>(
+ *KV, R, EnableNullFPSuppression));
+ return true;
+ }
+ }
+
+ // If the expression is not an "lvalue expression", we can still
+ // track the constraints on its contents.
+ SVal V = state->getSValAsScalarOrLoc(S, N->getLocationContext());
+
+ // If the value came from an inlined function call, we should at least make
+ // sure that function isn't pruned in our output.
+ if (const Expr *E = dyn_cast<Expr>(S))
+ S = E->IgnoreParenCasts();
+
+ ReturnVisitor::addVisitorIfNecessary(N, S, report, EnableNullFPSuppression);
+
+ // Uncomment this to find cases where we aren't properly getting the
+ // base value that was dereferenced.
+ // assert(!V.isUnknownOrUndef());
+ // Is it a symbolic value?
+ if (Optional<loc::MemRegionVal> L = V.getAs<loc::MemRegionVal>()) {
+ // At this point we are dealing with the region's LValue.
+ // However, if the rvalue is a symbolic region, we should track it as well.
+ // Try to use the correct type when looking up the value.
+ SVal RVal;
+ if (const Expr *E = dyn_cast<Expr>(S))
+ RVal = state->getRawSVal(L.getValue(), E->getType());
+ else
+ RVal = state->getSVal(L->getRegion());
+
+ const MemRegion *RegionRVal = RVal.getAsRegion();
+ report.addVisitor(llvm::make_unique<UndefOrNullArgVisitor>(L->getRegion()));
+
+ if (RegionRVal && isa<SymbolicRegion>(RegionRVal)) {
+ report.markInteresting(RegionRVal);
+ report.addVisitor(llvm::make_unique<TrackConstraintBRVisitor>(
+ loc::MemRegionVal(RegionRVal), false));
+ }
+ }
+
+ return true;
+}
+
+const Expr *NilReceiverBRVisitor::getNilReceiver(const Stmt *S,
+ const ExplodedNode *N) {
+ const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S);
+ if (!ME)
+ return nullptr;
+ if (const Expr *Receiver = ME->getInstanceReceiver()) {
+ ProgramStateRef state = N->getState();
+ SVal V = state->getSVal(Receiver, N->getLocationContext());
+ if (state->isNull(V).isConstrainedTrue())
+ return Receiver;
+ }
+ return nullptr;
+}
+
+PathDiagnosticPiece *NilReceiverBRVisitor::VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) {
+ Optional<PreStmt> P = N->getLocationAs<PreStmt>();
+ if (!P)
+ return nullptr;
+
+ const Stmt *S = P->getStmt();
+ const Expr *Receiver = getNilReceiver(S, N);
+ if (!Receiver)
+ return nullptr;
+
+ llvm::SmallString<256> Buf;
+ llvm::raw_svector_ostream OS(Buf);
+
+ if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S)) {
+ OS << "'";
+ ME->getSelector().print(OS);
+ OS << "' not called";
+ }
+ else {
+ OS << "No method is called";
+ }
+ OS << " because the receiver is nil";
+
+ // The receiver was nil, and hence the method was skipped.
+ // Register a BugReporterVisitor to issue a message telling us how
+ // the receiver was null.
+ bugreporter::trackNullOrUndefValue(N, Receiver, BR, /*IsArg*/ false,
+ /*EnableNullFPSuppression*/ false);
+ // Issue a message saying that the method was skipped.
+ PathDiagnosticLocation L(Receiver, BRC.getSourceManager(),
+ N->getLocationContext());
+ return new PathDiagnosticEventPiece(L, OS.str());
+}
+
+// Registers every VarDecl inside a Stmt with a last store visitor.
+void FindLastStoreBRVisitor::registerStatementVarDecls(BugReport &BR,
+ const Stmt *S,
+ bool EnableNullFPSuppression) {
+ const ExplodedNode *N = BR.getErrorNode();
+ std::deque<const Stmt *> WorkList;
+ WorkList.push_back(S);
+
+ while (!WorkList.empty()) {
+ const Stmt *Head = WorkList.front();
+ WorkList.pop_front();
+
+ ProgramStateRef state = N->getState();
+ ProgramStateManager &StateMgr = state->getStateManager();
+
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Head)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ const VarRegion *R =
+ StateMgr.getRegionManager().getVarRegion(VD, N->getLocationContext());
+
+ // What did we load?
+ SVal V = state->getSVal(S, N->getLocationContext());
+
+ if (V.getAs<loc::ConcreteInt>() || V.getAs<nonloc::ConcreteInt>()) {
+ // Register a new visitor with the BugReport.
+ BR.addVisitor(llvm::make_unique<FindLastStoreBRVisitor>(
+ V.castAs<KnownSVal>(), R, EnableNullFPSuppression));
+ }
+ }
+ }
+
+ for (const Stmt *SubStmt : Head->children())
+ WorkList.push_back(SubStmt);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Visitor that tries to report interesting diagnostics from conditions.
+//===----------------------------------------------------------------------===//
+
+/// Return the tag associated with this visitor. This tag will be used
+/// to make all PathDiagnosticPieces created by this visitor.
+const char *ConditionBRVisitor::getTag() {
+ return "ConditionBRVisitor";
+}
+
+PathDiagnosticPiece *ConditionBRVisitor::VisitNode(const ExplodedNode *N,
+ const ExplodedNode *Prev,
+ BugReporterContext &BRC,
+ BugReport &BR) {
+ PathDiagnosticPiece *piece = VisitNodeImpl(N, Prev, BRC, BR);
+ if (piece) {
+ piece->setTag(getTag());
+ if (PathDiagnosticEventPiece *ev=dyn_cast<PathDiagnosticEventPiece>(piece))
+ ev->setPrunable(true, /* override */ false);
+ }
+ return piece;
+}
+
+PathDiagnosticPiece *ConditionBRVisitor::VisitNodeImpl(const ExplodedNode *N,
+ const ExplodedNode *Prev,
+ BugReporterContext &BRC,
+ BugReport &BR) {
+
+ ProgramPoint progPoint = N->getLocation();
+ ProgramStateRef CurrentState = N->getState();
+ ProgramStateRef PrevState = Prev->getState();
+
+ // Compare the GDMs of the state, because that is where constraints
+ // are managed. Note that ensure that we only look at nodes that
+ // were generated by the analyzer engine proper, not checkers.
+ if (CurrentState->getGDM().getRoot() ==
+ PrevState->getGDM().getRoot())
+ return nullptr;
+
+ // If an assumption was made on a branch, it should be caught
+ // here by looking at the state transition.
+ if (Optional<BlockEdge> BE = progPoint.getAs<BlockEdge>()) {
+ const CFGBlock *srcBlk = BE->getSrc();
+ if (const Stmt *term = srcBlk->getTerminator())
+ return VisitTerminator(term, N, srcBlk, BE->getDst(), BR, BRC);
+ return nullptr;
+ }
+
+ if (Optional<PostStmt> PS = progPoint.getAs<PostStmt>()) {
+ // FIXME: Assuming that BugReporter is a GRBugReporter is a layering
+ // violation.
+ const std::pair<const ProgramPointTag *, const ProgramPointTag *> &tags =
+ cast<GRBugReporter>(BRC.getBugReporter()).
+ getEngine().geteagerlyAssumeBinOpBifurcationTags();
+
+ const ProgramPointTag *tag = PS->getTag();
+ if (tag == tags.first)
+ return VisitTrueTest(cast<Expr>(PS->getStmt()), true,
+ BRC, BR, N);
+ if (tag == tags.second)
+ return VisitTrueTest(cast<Expr>(PS->getStmt()), false,
+ BRC, BR, N);
+
+ return nullptr;
+ }
+
+ return nullptr;
+}
+
+PathDiagnosticPiece *
+ConditionBRVisitor::VisitTerminator(const Stmt *Term,
+ const ExplodedNode *N,
+ const CFGBlock *srcBlk,
+ const CFGBlock *dstBlk,
+ BugReport &R,
+ BugReporterContext &BRC) {
+ const Expr *Cond = nullptr;
+
+ switch (Term->getStmtClass()) {
+ default:
+ return nullptr;
+ case Stmt::IfStmtClass:
+ Cond = cast<IfStmt>(Term)->getCond();
+ break;
+ case Stmt::ConditionalOperatorClass:
+ Cond = cast<ConditionalOperator>(Term)->getCond();
+ break;
+ }
+
+ assert(Cond);
+ assert(srcBlk->succ_size() == 2);
+ const bool tookTrue = *(srcBlk->succ_begin()) == dstBlk;
+ return VisitTrueTest(Cond, tookTrue, BRC, R, N);
+}
+
+PathDiagnosticPiece *
+ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
+ bool tookTrue,
+ BugReporterContext &BRC,
+ BugReport &R,
+ const ExplodedNode *N) {
+
+ const Expr *Ex = Cond;
+
+ while (true) {
+ Ex = Ex->IgnoreParenCasts();
+ switch (Ex->getStmtClass()) {
+ default:
+ return nullptr;
+ case Stmt::BinaryOperatorClass:
+ return VisitTrueTest(Cond, cast<BinaryOperator>(Ex), tookTrue, BRC,
+ R, N);
+ case Stmt::DeclRefExprClass:
+ return VisitTrueTest(Cond, cast<DeclRefExpr>(Ex), tookTrue, BRC,
+ R, N);
+ case Stmt::UnaryOperatorClass: {
+ const UnaryOperator *UO = cast<UnaryOperator>(Ex);
+ if (UO->getOpcode() == UO_LNot) {
+ tookTrue = !tookTrue;
+ Ex = UO->getSubExpr();
+ continue;
+ }
+ return nullptr;
+ }
+ }
+ }
+}
+
+bool ConditionBRVisitor::patternMatch(const Expr *Ex, raw_ostream &Out,
+ BugReporterContext &BRC,
+ BugReport &report,
+ const ExplodedNode *N,
+ Optional<bool> &prunable) {
+ const Expr *OriginalExpr = Ex;
+ Ex = Ex->IgnoreParenCasts();
+
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Ex)) {
+ const bool quotes = isa<VarDecl>(DR->getDecl());
+ if (quotes) {
+ Out << '\'';
+ const LocationContext *LCtx = N->getLocationContext();
+ const ProgramState *state = N->getState().get();
+ if (const MemRegion *R = state->getLValue(cast<VarDecl>(DR->getDecl()),
+ LCtx).getAsRegion()) {
+ if (report.isInteresting(R))
+ prunable = false;
+ else {
+ const ProgramState *state = N->getState().get();
+ SVal V = state->getSVal(R);
+ if (report.isInteresting(V))
+ prunable = false;
+ }
+ }
+ }
+ Out << DR->getDecl()->getDeclName().getAsString();
+ if (quotes)
+ Out << '\'';
+ return quotes;
+ }
+
+ if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(Ex)) {
+ QualType OriginalTy = OriginalExpr->getType();
+ if (OriginalTy->isPointerType()) {
+ if (IL->getValue() == 0) {
+ Out << "null";
+ return false;
+ }
+ }
+ else if (OriginalTy->isObjCObjectPointerType()) {
+ if (IL->getValue() == 0) {
+ Out << "nil";
+ return false;
+ }
+ }
+
+ Out << IL->getValue();
+ return false;
+ }
+
+ return false;
+}
+
+PathDiagnosticPiece *
+ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
+ const BinaryOperator *BExpr,
+ const bool tookTrue,
+ BugReporterContext &BRC,
+ BugReport &R,
+ const ExplodedNode *N) {
+
+ bool shouldInvert = false;
+ Optional<bool> shouldPrune;
+
+ SmallString<128> LhsString, RhsString;
+ {
+ llvm::raw_svector_ostream OutLHS(LhsString), OutRHS(RhsString);
+ const bool isVarLHS = patternMatch(BExpr->getLHS(), OutLHS, BRC, R, N,
+ shouldPrune);
+ const bool isVarRHS = patternMatch(BExpr->getRHS(), OutRHS, BRC, R, N,
+ shouldPrune);
+
+ shouldInvert = !isVarLHS && isVarRHS;
+ }
+
+ BinaryOperator::Opcode Op = BExpr->getOpcode();
+
+ if (BinaryOperator::isAssignmentOp(Op)) {
+ // For assignment operators, all that we care about is that the LHS
+ // evaluates to "true" or "false".
+ return VisitConditionVariable(LhsString, BExpr->getLHS(), tookTrue,
+ BRC, R, N);
+ }
+
+ // For non-assignment operations, we require that we can understand
+ // both the LHS and RHS.
+ if (LhsString.empty() || RhsString.empty() ||
+ !BinaryOperator::isComparisonOp(Op))
+ return nullptr;
+
+ // Should we invert the strings if the LHS is not a variable name?
+ SmallString<256> buf;
+ llvm::raw_svector_ostream Out(buf);
+ Out << "Assuming " << (shouldInvert ? RhsString : LhsString) << " is ";
+
+ // Do we need to invert the opcode?
+ if (shouldInvert)
+ switch (Op) {
+ default: break;
+ case BO_LT: Op = BO_GT; break;
+ case BO_GT: Op = BO_LT; break;
+ case BO_LE: Op = BO_GE; break;
+ case BO_GE: Op = BO_LE; break;
+ }
+
+ if (!tookTrue)
+ switch (Op) {
+ case BO_EQ: Op = BO_NE; break;
+ case BO_NE: Op = BO_EQ; break;
+ case BO_LT: Op = BO_GE; break;
+ case BO_GT: Op = BO_LE; break;
+ case BO_LE: Op = BO_GT; break;
+ case BO_GE: Op = BO_LT; break;
+ default:
+ return nullptr;
+ }
+
+ switch (Op) {
+ case BO_EQ:
+ Out << "equal to ";
+ break;
+ case BO_NE:
+ Out << "not equal to ";
+ break;
+ default:
+ Out << BinaryOperator::getOpcodeStr(Op) << ' ';
+ break;
+ }
+
+ Out << (shouldInvert ? LhsString : RhsString);
+ const LocationContext *LCtx = N->getLocationContext();
+ PathDiagnosticLocation Loc(Cond, BRC.getSourceManager(), LCtx);
+ PathDiagnosticEventPiece *event =
+ new PathDiagnosticEventPiece(Loc, Out.str());
+ if (shouldPrune.hasValue())
+ event->setPrunable(shouldPrune.getValue());
+ return event;
+}
+
+PathDiagnosticPiece *
+ConditionBRVisitor::VisitConditionVariable(StringRef LhsString,
+ const Expr *CondVarExpr,
+ const bool tookTrue,
+ BugReporterContext &BRC,
+ BugReport &report,
+ const ExplodedNode *N) {
+ // FIXME: If there's already a constraint tracker for this variable,
+ // we shouldn't emit anything here (c.f. the double note in
+ // test/Analysis/inlining/path-notes.c)
+ SmallString<256> buf;
+ llvm::raw_svector_ostream Out(buf);
+ Out << "Assuming " << LhsString << " is ";
+
+ QualType Ty = CondVarExpr->getType();
+
+ if (Ty->isPointerType())
+ Out << (tookTrue ? "not null" : "null");
+ else if (Ty->isObjCObjectPointerType())
+ Out << (tookTrue ? "not nil" : "nil");
+ else if (Ty->isBooleanType())
+ Out << (tookTrue ? "true" : "false");
+ else if (Ty->isIntegralOrEnumerationType())
+ Out << (tookTrue ? "non-zero" : "zero");
+ else
+ return nullptr;
+
+ const LocationContext *LCtx = N->getLocationContext();
+ PathDiagnosticLocation Loc(CondVarExpr, BRC.getSourceManager(), LCtx);
+ PathDiagnosticEventPiece *event =
+ new PathDiagnosticEventPiece(Loc, Out.str());
+
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(CondVarExpr)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ const ProgramState *state = N->getState().get();
+ if (const MemRegion *R = state->getLValue(VD, LCtx).getAsRegion()) {
+ if (report.isInteresting(R))
+ event->setPrunable(false);
+ }
+ }
+ }
+
+ return event;
+}
+
+PathDiagnosticPiece *
+ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
+ const DeclRefExpr *DR,
+ const bool tookTrue,
+ BugReporterContext &BRC,
+ BugReport &report,
+ const ExplodedNode *N) {
+
+ const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl());
+ if (!VD)
+ return nullptr;
+
+ SmallString<256> Buf;
+ llvm::raw_svector_ostream Out(Buf);
+
+ Out << "Assuming '" << VD->getDeclName() << "' is ";
+
+ QualType VDTy = VD->getType();
+
+ if (VDTy->isPointerType())
+ Out << (tookTrue ? "non-null" : "null");
+ else if (VDTy->isObjCObjectPointerType())
+ Out << (tookTrue ? "non-nil" : "nil");
+ else if (VDTy->isScalarType())
+ Out << (tookTrue ? "not equal to 0" : "0");
+ else
+ return nullptr;
+
+ const LocationContext *LCtx = N->getLocationContext();
+ PathDiagnosticLocation Loc(Cond, BRC.getSourceManager(), LCtx);
+ PathDiagnosticEventPiece *event =
+ new PathDiagnosticEventPiece(Loc, Out.str());
+
+ const ProgramState *state = N->getState().get();
+ if (const MemRegion *R = state->getLValue(VD, LCtx).getAsRegion()) {
+ if (report.isInteresting(R))
+ event->setPrunable(false);
+ else {
+ SVal V = state->getSVal(R);
+ if (report.isInteresting(V))
+ event->setPrunable(false);
+ }
+ }
+ return event;
+}
+
+
+// FIXME: Copied from ExprEngineCallAndReturn.cpp.
+static bool isInStdNamespace(const Decl *D) {
+ const DeclContext *DC = D->getDeclContext()->getEnclosingNamespaceContext();
+ const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC);
+ if (!ND)
+ return false;
+
+ while (const NamespaceDecl *Parent = dyn_cast<NamespaceDecl>(ND->getParent()))
+ ND = Parent;
+
+ return ND->isStdNamespace();
+}
+
+std::unique_ptr<PathDiagnosticPiece>
+LikelyFalsePositiveSuppressionBRVisitor::getEndPath(BugReporterContext &BRC,
+ const ExplodedNode *N,
+ BugReport &BR) {
+ // Here we suppress false positives coming from system headers. This list is
+ // based on known issues.
+ ExprEngine &Eng = BRC.getBugReporter().getEngine();
+ AnalyzerOptions &Options = Eng.getAnalysisManager().options;
+ const Decl *D = N->getLocationContext()->getDecl();
+
+ if (isInStdNamespace(D)) {
+ // Skip reports within the 'std' namespace. Although these can sometimes be
+ // the user's fault, we currently don't report them very well, and
+ // Note that this will not help for any other data structure libraries, like
+ // TR1, Boost, or llvm/ADT.
+ if (Options.shouldSuppressFromCXXStandardLibrary()) {
+ BR.markInvalid(getTag(), nullptr);
+ return nullptr;
+
+ } else {
+ // If the complete 'std' suppression is not enabled, suppress reports
+ // from the 'std' namespace that are known to produce false positives.
+
+ // The analyzer issues a false use-after-free when std::list::pop_front
+ // or std::list::pop_back are called multiple times because we cannot
+ // reason about the internal invariants of the datastructure.
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
+ const CXXRecordDecl *CD = MD->getParent();
+ if (CD->getName() == "list") {
+ BR.markInvalid(getTag(), nullptr);
+ return nullptr;
+ }
+ }
+
+ // The analyzer issues a false positive when the constructor of
+ // std::__independent_bits_engine from algorithms is used.
+ if (const CXXConstructorDecl *MD = dyn_cast<CXXConstructorDecl>(D)) {
+ const CXXRecordDecl *CD = MD->getParent();
+ if (CD->getName() == "__independent_bits_engine") {
+ BR.markInvalid(getTag(), nullptr);
+ return nullptr;
+ }
+ }
+
+ // The analyzer issues a false positive on
+ // std::basic_string<uint8_t> v; v.push_back(1);
+ // and
+ // std::u16string s; s += u'a';
+ // because we cannot reason about the internal invariants of the
+ // datastructure.
+ for (const LocationContext *LCtx = N->getLocationContext(); LCtx;
+ LCtx = LCtx->getParent()) {
+ const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(LCtx->getDecl());
+ if (!MD)
+ continue;
+
+ const CXXRecordDecl *CD = MD->getParent();
+ if (CD->getName() == "basic_string") {
+ BR.markInvalid(getTag(), nullptr);
+ return nullptr;
+ }
+ }
+ }
+ }
+
+ // Skip reports within the sys/queue.h macros as we do not have the ability to
+ // reason about data structure shapes.
+ SourceManager &SM = BRC.getSourceManager();
+ FullSourceLoc Loc = BR.getLocation(SM).asLocation();
+ while (Loc.isMacroID()) {
+ Loc = Loc.getSpellingLoc();
+ if (SM.getFilename(Loc).endswith("sys/queue.h")) {
+ BR.markInvalid(getTag(), nullptr);
+ return nullptr;
+ }
+ }
+
+ return nullptr;
+}
+
+PathDiagnosticPiece *
+UndefOrNullArgVisitor::VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) {
+
+ ProgramStateRef State = N->getState();
+ ProgramPoint ProgLoc = N->getLocation();
+
+ // We are only interested in visiting CallEnter nodes.
+ Optional<CallEnter> CEnter = ProgLoc.getAs<CallEnter>();
+ if (!CEnter)
+ return nullptr;
+
+ // Check if one of the arguments is the region the visitor is tracking.
+ CallEventManager &CEMgr = BRC.getStateManager().getCallEventManager();
+ CallEventRef<> Call = CEMgr.getCaller(CEnter->getCalleeContext(), State);
+ unsigned Idx = 0;
+ ArrayRef<ParmVarDecl*> parms = Call->parameters();
+
+ for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end();
+ I != E; ++I, ++Idx) {
+ const MemRegion *ArgReg = Call->getArgSVal(Idx).getAsRegion();
+
+ // Are we tracking the argument or its subregion?
+ if ( !ArgReg || (ArgReg != R && !R->isSubRegionOf(ArgReg->StripCasts())))
+ continue;
+
+ // Check the function parameter type.
+ const ParmVarDecl *ParamDecl = *I;
+ assert(ParamDecl && "Formal parameter has no decl?");
+ QualType T = ParamDecl->getType();
+
+ if (!(T->isAnyPointerType() || T->isReferenceType())) {
+ // Function can only change the value passed in by address.
+ continue;
+ }
+
+ // If it is a const pointer value, the function does not intend to
+ // change the value.
+ if (T->getPointeeType().isConstQualified())
+ continue;
+
+ // Mark the call site (LocationContext) as interesting if the value of the
+ // argument is undefined or '0'/'NULL'.
+ SVal BoundVal = State->getSVal(R);
+ if (BoundVal.isUndef() || BoundVal.isZeroConstant()) {
+ BR.markInteresting(CEnter->getCalleeContext());
+ return nullptr;
+ }
+ }
+ return nullptr;
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CallEvent.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
new file mode 100644
index 0000000..69af09b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
@@ -0,0 +1,1038 @@
+//===- Calls.cpp - Wrapper for all function and method calls ------*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file defines CallEvent and its subclasses, which represent path-
+/// sensitive instances of different kinds of function and method calls
+/// (C, C++, and Objective-C).
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Analysis/ProgramPoint.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeMap.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+QualType CallEvent::getResultType() const {
+ const Expr *E = getOriginExpr();
+ assert(E && "Calls without origin expressions do not have results");
+ QualType ResultTy = E->getType();
+
+ ASTContext &Ctx = getState()->getStateManager().getContext();
+
+ // A function that returns a reference to 'int' will have a result type
+ // of simply 'int'. Check the origin expr's value kind to recover the
+ // proper type.
+ switch (E->getValueKind()) {
+ case VK_LValue:
+ ResultTy = Ctx.getLValueReferenceType(ResultTy);
+ break;
+ case VK_XValue:
+ ResultTy = Ctx.getRValueReferenceType(ResultTy);
+ break;
+ case VK_RValue:
+ // No adjustment is necessary.
+ break;
+ }
+
+ return ResultTy;
+}
+
+static bool isCallback(QualType T) {
+ // If a parameter is a block or a callback, assume it can modify pointer.
+ if (T->isBlockPointerType() ||
+ T->isFunctionPointerType() ||
+ T->isObjCSelType())
+ return true;
+
+ // Check if a callback is passed inside a struct (for both, struct passed by
+ // reference and by value). Dig just one level into the struct for now.
+
+ if (T->isAnyPointerType() || T->isReferenceType())
+ T = T->getPointeeType();
+
+ if (const RecordType *RT = T->getAsStructureType()) {
+ const RecordDecl *RD = RT->getDecl();
+ for (const auto *I : RD->fields()) {
+ QualType FieldT = I->getType();
+ if (FieldT->isBlockPointerType() || FieldT->isFunctionPointerType())
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool isVoidPointerToNonConst(QualType T) {
+ if (const PointerType *PT = T->getAs<PointerType>()) {
+ QualType PointeeTy = PT->getPointeeType();
+ if (PointeeTy.isConstQualified())
+ return false;
+ return PointeeTy->isVoidType();
+ } else
+ return false;
+}
+
+bool CallEvent::hasNonNullArgumentsWithType(bool (*Condition)(QualType)) const {
+ unsigned NumOfArgs = getNumArgs();
+
+ // If calling using a function pointer, assume the function does not
+ // satisfy the callback.
+ // TODO: We could check the types of the arguments here.
+ if (!getDecl())
+ return false;
+
+ unsigned Idx = 0;
+ for (CallEvent::param_type_iterator I = param_type_begin(),
+ E = param_type_end();
+ I != E && Idx < NumOfArgs; ++I, ++Idx) {
+ if (NumOfArgs <= Idx)
+ break;
+
+ // If the parameter is 0, it's harmless.
+ if (getArgSVal(Idx).isZeroConstant())
+ continue;
+
+ if (Condition(*I))
+ return true;
+ }
+ return false;
+}
+
+bool CallEvent::hasNonZeroCallbackArg() const {
+ return hasNonNullArgumentsWithType(isCallback);
+}
+
+bool CallEvent::hasVoidPointerToNonConstArg() const {
+ return hasNonNullArgumentsWithType(isVoidPointerToNonConst);
+}
+
+bool CallEvent::isGlobalCFunction(StringRef FunctionName) const {
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(getDecl());
+ if (!FD)
+ return false;
+
+ return CheckerContext::isCLibraryFunction(FD, FunctionName);
+}
+
+/// \brief Returns true if a type is a pointer-to-const or reference-to-const
+/// with no further indirection.
+static bool isPointerToConst(QualType Ty) {
+ QualType PointeeTy = Ty->getPointeeType();
+ if (PointeeTy == QualType())
+ return false;
+ if (!PointeeTy.isConstQualified())
+ return false;
+ if (PointeeTy->isAnyPointerType())
+ return false;
+ return true;
+}
+
+// Try to retrieve the function declaration and find the function parameter
+// types which are pointers/references to a non-pointer const.
+// We will not invalidate the corresponding argument regions.
+static void findPtrToConstParams(llvm::SmallSet<unsigned, 4> &PreserveArgs,
+ const CallEvent &Call) {
+ unsigned Idx = 0;
+ for (CallEvent::param_type_iterator I = Call.param_type_begin(),
+ E = Call.param_type_end();
+ I != E; ++I, ++Idx) {
+ if (isPointerToConst(*I))
+ PreserveArgs.insert(Idx);
+ }
+}
+
+ProgramStateRef CallEvent::invalidateRegions(unsigned BlockCount,
+ ProgramStateRef Orig) const {
+ ProgramStateRef Result = (Orig ? Orig : getState());
+
+ // Don't invalidate anything if the callee is marked pure/const.
+ if (const Decl *callee = getDecl())
+ if (callee->hasAttr<PureAttr>() || callee->hasAttr<ConstAttr>())
+ return Result;
+
+ SmallVector<SVal, 8> ValuesToInvalidate;
+ RegionAndSymbolInvalidationTraits ETraits;
+
+ getExtraInvalidatedValues(ValuesToInvalidate, &ETraits);
+
+ // Indexes of arguments whose values will be preserved by the call.
+ llvm::SmallSet<unsigned, 4> PreserveArgs;
+ if (!argumentsMayEscape())
+ findPtrToConstParams(PreserveArgs, *this);
+
+ for (unsigned Idx = 0, Count = getNumArgs(); Idx != Count; ++Idx) {
+ // Mark this region for invalidation. We batch invalidate regions
+ // below for efficiency.
+ if (PreserveArgs.count(Idx))
+ if (const MemRegion *MR = getArgSVal(Idx).getAsRegion())
+ ETraits.setTrait(MR->StripCasts(),
+ RegionAndSymbolInvalidationTraits::TK_PreserveContents);
+ // TODO: Factor this out + handle the lower level const pointers.
+
+ ValuesToInvalidate.push_back(getArgSVal(Idx));
+ }
+
+ // Invalidate designated regions using the batch invalidation API.
+ // NOTE: Even if RegionsToInvalidate is empty, we may still invalidate
+ // global variables.
+ return Result->invalidateRegions(ValuesToInvalidate, getOriginExpr(),
+ BlockCount, getLocationContext(),
+ /*CausedByPointerEscape*/ true,
+ /*Symbols=*/nullptr, this, &ETraits);
+}
+
+ProgramPoint CallEvent::getProgramPoint(bool IsPreVisit,
+ const ProgramPointTag *Tag) const {
+ if (const Expr *E = getOriginExpr()) {
+ if (IsPreVisit)
+ return PreStmt(E, getLocationContext(), Tag);
+ return PostStmt(E, getLocationContext(), Tag);
+ }
+
+ const Decl *D = getDecl();
+ assert(D && "Cannot get a program point without a statement or decl");
+
+ SourceLocation Loc = getSourceRange().getBegin();
+ if (IsPreVisit)
+ return PreImplicitCall(D, Loc, getLocationContext(), Tag);
+ return PostImplicitCall(D, Loc, getLocationContext(), Tag);
+}
+
+SVal CallEvent::getArgSVal(unsigned Index) const {
+ const Expr *ArgE = getArgExpr(Index);
+ if (!ArgE)
+ return UnknownVal();
+ return getSVal(ArgE);
+}
+
+SourceRange CallEvent::getArgSourceRange(unsigned Index) const {
+ const Expr *ArgE = getArgExpr(Index);
+ if (!ArgE)
+ return SourceRange();
+ return ArgE->getSourceRange();
+}
+
+SVal CallEvent::getReturnValue() const {
+ const Expr *E = getOriginExpr();
+ if (!E)
+ return UndefinedVal();
+ return getSVal(E);
+}
+
+LLVM_DUMP_METHOD void CallEvent::dump() const { dump(llvm::errs()); }
+
+void CallEvent::dump(raw_ostream &Out) const {
+ ASTContext &Ctx = getState()->getStateManager().getContext();
+ if (const Expr *E = getOriginExpr()) {
+ E->printPretty(Out, nullptr, Ctx.getPrintingPolicy());
+ Out << "\n";
+ return;
+ }
+
+ if (const Decl *D = getDecl()) {
+ Out << "Call to ";
+ D->print(Out, Ctx.getPrintingPolicy());
+ return;
+ }
+
+ // FIXME: a string representation of the kind would be nice.
+ Out << "Unknown call (type " << getKind() << ")";
+}
+
+
+bool CallEvent::isCallStmt(const Stmt *S) {
+ return isa<CallExpr>(S) || isa<ObjCMessageExpr>(S)
+ || isa<CXXConstructExpr>(S)
+ || isa<CXXNewExpr>(S);
+}
+
+QualType CallEvent::getDeclaredResultType(const Decl *D) {
+ assert(D);
+ if (const FunctionDecl* FD = dyn_cast<FunctionDecl>(D))
+ return FD->getReturnType();
+ if (const ObjCMethodDecl* MD = dyn_cast<ObjCMethodDecl>(D))
+ return MD->getReturnType();
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
+ // Blocks are difficult because the return type may not be stored in the
+ // BlockDecl itself. The AST should probably be enhanced, but for now we
+ // just do what we can.
+ // If the block is declared without an explicit argument list, the
+ // signature-as-written just includes the return type, not the entire
+ // function type.
+ // FIXME: All blocks should have signatures-as-written, even if the return
+ // type is inferred. (That's signified with a dependent result type.)
+ if (const TypeSourceInfo *TSI = BD->getSignatureAsWritten()) {
+ QualType Ty = TSI->getType();
+ if (const FunctionType *FT = Ty->getAs<FunctionType>())
+ Ty = FT->getReturnType();
+ if (!Ty->isDependentType())
+ return Ty;
+ }
+
+ return QualType();
+ }
+
+ llvm_unreachable("unknown callable kind");
+}
+
+bool CallEvent::isVariadic(const Decl *D) {
+ assert(D);
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ return FD->isVariadic();
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
+ return MD->isVariadic();
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(D))
+ return BD->isVariadic();
+
+ llvm_unreachable("unknown callable kind");
+}
+
+static void addParameterValuesToBindings(const StackFrameContext *CalleeCtx,
+ CallEvent::BindingsTy &Bindings,
+ SValBuilder &SVB,
+ const CallEvent &Call,
+ ArrayRef<ParmVarDecl*> parameters) {
+ MemRegionManager &MRMgr = SVB.getRegionManager();
+
+ // If the function has fewer parameters than the call has arguments, we simply
+ // do not bind any values to them.
+ unsigned NumArgs = Call.getNumArgs();
+ unsigned Idx = 0;
+ ArrayRef<ParmVarDecl*>::iterator I = parameters.begin(), E = parameters.end();
+ for (; I != E && Idx < NumArgs; ++I, ++Idx) {
+ const ParmVarDecl *ParamDecl = *I;
+ assert(ParamDecl && "Formal parameter has no decl?");
+
+ SVal ArgVal = Call.getArgSVal(Idx);
+ if (!ArgVal.isUnknown()) {
+ Loc ParamLoc = SVB.makeLoc(MRMgr.getVarRegion(ParamDecl, CalleeCtx));
+ Bindings.push_back(std::make_pair(ParamLoc, ArgVal));
+ }
+ }
+
+ // FIXME: Variadic arguments are not handled at all right now.
+}
+
+ArrayRef<ParmVarDecl*> AnyFunctionCall::parameters() const {
+ const FunctionDecl *D = getDecl();
+ if (!D)
+ return None;
+ return D->parameters();
+}
+
+void AnyFunctionCall::getInitialStackFrameContents(
+ const StackFrameContext *CalleeCtx,
+ BindingsTy &Bindings) const {
+ const FunctionDecl *D = cast<FunctionDecl>(CalleeCtx->getDecl());
+ SValBuilder &SVB = getState()->getStateManager().getSValBuilder();
+ addParameterValuesToBindings(CalleeCtx, Bindings, SVB, *this,
+ D->parameters());
+}
+
+bool AnyFunctionCall::argumentsMayEscape() const {
+ if (CallEvent::argumentsMayEscape() || hasVoidPointerToNonConstArg())
+ return true;
+
+ const FunctionDecl *D = getDecl();
+ if (!D)
+ return true;
+
+ const IdentifierInfo *II = D->getIdentifier();
+ if (!II)
+ return false;
+
+ // This set of "escaping" APIs is
+
+ // - 'int pthread_setspecific(ptheread_key k, const void *)' stores a
+ // value into thread local storage. The value can later be retrieved with
+ // 'void *ptheread_getspecific(pthread_key)'. So even thought the
+ // parameter is 'const void *', the region escapes through the call.
+ if (II->isStr("pthread_setspecific"))
+ return true;
+
+ // - xpc_connection_set_context stores a value which can be retrieved later
+ // with xpc_connection_get_context.
+ if (II->isStr("xpc_connection_set_context"))
+ return true;
+
+ // - funopen - sets a buffer for future IO calls.
+ if (II->isStr("funopen"))
+ return true;
+
+ StringRef FName = II->getName();
+
+ // - CoreFoundation functions that end with "NoCopy" can free a passed-in
+ // buffer even if it is const.
+ if (FName.endswith("NoCopy"))
+ return true;
+
+ // - NSXXInsertXX, for example NSMapInsertIfAbsent, since they can
+ // be deallocated by NSMapRemove.
+ if (FName.startswith("NS") && (FName.find("Insert") != StringRef::npos))
+ return true;
+
+ // - Many CF containers allow objects to escape through custom
+ // allocators/deallocators upon container construction. (PR12101)
+ if (FName.startswith("CF") || FName.startswith("CG")) {
+ return StrInStrNoCase(FName, "InsertValue") != StringRef::npos ||
+ StrInStrNoCase(FName, "AddValue") != StringRef::npos ||
+ StrInStrNoCase(FName, "SetValue") != StringRef::npos ||
+ StrInStrNoCase(FName, "WithData") != StringRef::npos ||
+ StrInStrNoCase(FName, "AppendValue") != StringRef::npos ||
+ StrInStrNoCase(FName, "SetAttribute") != StringRef::npos;
+ }
+
+ return false;
+}
+
+
+const FunctionDecl *SimpleFunctionCall::getDecl() const {
+ const FunctionDecl *D = getOriginExpr()->getDirectCallee();
+ if (D)
+ return D;
+
+ return getSVal(getOriginExpr()->getCallee()).getAsFunctionDecl();
+}
+
+
+const FunctionDecl *CXXInstanceCall::getDecl() const {
+ const CallExpr *CE = cast_or_null<CallExpr>(getOriginExpr());
+ if (!CE)
+ return AnyFunctionCall::getDecl();
+
+ const FunctionDecl *D = CE->getDirectCallee();
+ if (D)
+ return D;
+
+ return getSVal(CE->getCallee()).getAsFunctionDecl();
+}
+
+void CXXInstanceCall::getExtraInvalidatedValues(
+ ValueList &Values, RegionAndSymbolInvalidationTraits *ETraits) const {
+ SVal ThisVal = getCXXThisVal();
+ Values.push_back(ThisVal);
+
+ // Don't invalidate if the method is const and there are no mutable fields.
+ if (const CXXMethodDecl *D = cast_or_null<CXXMethodDecl>(getDecl())) {
+ if (!D->isConst())
+ return;
+ // Get the record decl for the class of 'This'. D->getParent() may return a
+ // base class decl, rather than the class of the instance which needs to be
+ // checked for mutable fields.
+ const Expr *Ex = getCXXThisExpr()->ignoreParenBaseCasts();
+ const CXXRecordDecl *ParentRecord = Ex->getType()->getAsCXXRecordDecl();
+ if (!ParentRecord || ParentRecord->hasMutableFields())
+ return;
+ // Preserve CXXThis.
+ const MemRegion *ThisRegion = ThisVal.getAsRegion();
+ if (!ThisRegion)
+ return;
+
+ ETraits->setTrait(ThisRegion->getBaseRegion(),
+ RegionAndSymbolInvalidationTraits::TK_PreserveContents);
+ }
+}
+
+SVal CXXInstanceCall::getCXXThisVal() const {
+ const Expr *Base = getCXXThisExpr();
+ // FIXME: This doesn't handle an overloaded ->* operator.
+ if (!Base)
+ return UnknownVal();
+
+ SVal ThisVal = getSVal(Base);
+ assert(ThisVal.isUnknownOrUndef() || ThisVal.getAs<Loc>());
+ return ThisVal;
+}
+
+
+RuntimeDefinition CXXInstanceCall::getRuntimeDefinition() const {
+ // Do we have a decl at all?
+ const Decl *D = getDecl();
+ if (!D)
+ return RuntimeDefinition();
+
+ // If the method is non-virtual, we know we can inline it.
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
+ if (!MD->isVirtual())
+ return AnyFunctionCall::getRuntimeDefinition();
+
+ // Do we know the implicit 'this' object being called?
+ const MemRegion *R = getCXXThisVal().getAsRegion();
+ if (!R)
+ return RuntimeDefinition();
+
+ // Do we know anything about the type of 'this'?
+ DynamicTypeInfo DynType = getDynamicTypeInfo(getState(), R);
+ if (!DynType.isValid())
+ return RuntimeDefinition();
+
+ // Is the type a C++ class? (This is mostly a defensive check.)
+ QualType RegionType = DynType.getType()->getPointeeType();
+ assert(!RegionType.isNull() && "DynamicTypeInfo should always be a pointer.");
+
+ const CXXRecordDecl *RD = RegionType->getAsCXXRecordDecl();
+ if (!RD || !RD->hasDefinition())
+ return RuntimeDefinition();
+
+ // Find the decl for this method in that class.
+ const CXXMethodDecl *Result = MD->getCorrespondingMethodInClass(RD, true);
+ if (!Result) {
+ // We might not even get the original statically-resolved method due to
+ // some particularly nasty casting (e.g. casts to sister classes).
+ // However, we should at least be able to search up and down our own class
+ // hierarchy, and some real bugs have been caught by checking this.
+ assert(!RD->isDerivedFrom(MD->getParent()) && "Couldn't find known method");
+
+ // FIXME: This is checking that our DynamicTypeInfo is at least as good as
+ // the static type. However, because we currently don't update
+ // DynamicTypeInfo when an object is cast, we can't actually be sure the
+ // DynamicTypeInfo is up to date. This assert should be re-enabled once
+ // this is fixed. <rdar://problem/12287087>
+ //assert(!MD->getParent()->isDerivedFrom(RD) && "Bad DynamicTypeInfo");
+
+ return RuntimeDefinition();
+ }
+
+ // Does the decl that we found have an implementation?
+ const FunctionDecl *Definition;
+ if (!Result->hasBody(Definition))
+ return RuntimeDefinition();
+
+ // We found a definition. If we're not sure that this devirtualization is
+ // actually what will happen at runtime, make sure to provide the region so
+ // that ExprEngine can decide what to do with it.
+ if (DynType.canBeASubClass())
+ return RuntimeDefinition(Definition, R->StripCasts());
+ return RuntimeDefinition(Definition, /*DispatchRegion=*/nullptr);
+}
+
+void CXXInstanceCall::getInitialStackFrameContents(
+ const StackFrameContext *CalleeCtx,
+ BindingsTy &Bindings) const {
+ AnyFunctionCall::getInitialStackFrameContents(CalleeCtx, Bindings);
+
+ // Handle the binding of 'this' in the new stack frame.
+ SVal ThisVal = getCXXThisVal();
+ if (!ThisVal.isUnknown()) {
+ ProgramStateManager &StateMgr = getState()->getStateManager();
+ SValBuilder &SVB = StateMgr.getSValBuilder();
+
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(CalleeCtx->getDecl());
+ Loc ThisLoc = SVB.getCXXThis(MD, CalleeCtx);
+
+ // If we devirtualized to a different member function, we need to make sure
+ // we have the proper layering of CXXBaseObjectRegions.
+ if (MD->getCanonicalDecl() != getDecl()->getCanonicalDecl()) {
+ ASTContext &Ctx = SVB.getContext();
+ const CXXRecordDecl *Class = MD->getParent();
+ QualType Ty = Ctx.getPointerType(Ctx.getRecordType(Class));
+
+ // FIXME: CallEvent maybe shouldn't be directly accessing StoreManager.
+ bool Failed;
+ ThisVal = StateMgr.getStoreManager().evalDynamicCast(ThisVal, Ty, Failed);
+ assert(!Failed && "Calling an incorrectly devirtualized method");
+ }
+
+ if (!ThisVal.isUnknown())
+ Bindings.push_back(std::make_pair(ThisLoc, ThisVal));
+ }
+}
+
+
+
+const Expr *CXXMemberCall::getCXXThisExpr() const {
+ return getOriginExpr()->getImplicitObjectArgument();
+}
+
+RuntimeDefinition CXXMemberCall::getRuntimeDefinition() const {
+ // C++11 [expr.call]p1: ...If the selected function is non-virtual, or if the
+ // id-expression in the class member access expression is a qualified-id,
+ // that function is called. Otherwise, its final overrider in the dynamic type
+ // of the object expression is called.
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(getOriginExpr()->getCallee()))
+ if (ME->hasQualifier())
+ return AnyFunctionCall::getRuntimeDefinition();
+
+ return CXXInstanceCall::getRuntimeDefinition();
+}
+
+
+const Expr *CXXMemberOperatorCall::getCXXThisExpr() const {
+ return getOriginExpr()->getArg(0);
+}
+
+
+const BlockDataRegion *BlockCall::getBlockRegion() const {
+ const Expr *Callee = getOriginExpr()->getCallee();
+ const MemRegion *DataReg = getSVal(Callee).getAsRegion();
+
+ return dyn_cast_or_null<BlockDataRegion>(DataReg);
+}
+
+ArrayRef<ParmVarDecl*> BlockCall::parameters() const {
+ const BlockDecl *D = getDecl();
+ if (!D)
+ return nullptr;
+ return D->parameters();
+}
+
+void BlockCall::getExtraInvalidatedValues(ValueList &Values,
+ RegionAndSymbolInvalidationTraits *ETraits) const {
+ // FIXME: This also needs to invalidate captured globals.
+ if (const MemRegion *R = getBlockRegion())
+ Values.push_back(loc::MemRegionVal(R));
+}
+
+void BlockCall::getInitialStackFrameContents(const StackFrameContext *CalleeCtx,
+ BindingsTy &Bindings) const {
+ SValBuilder &SVB = getState()->getStateManager().getSValBuilder();
+ ArrayRef<ParmVarDecl*> Params;
+ if (isConversionFromLambda()) {
+ auto *LambdaOperatorDecl = cast<CXXMethodDecl>(CalleeCtx->getDecl());
+ Params = LambdaOperatorDecl->parameters();
+
+ // For blocks converted from a C++ lambda, the callee declaration is the
+ // operator() method on the lambda so we bind "this" to
+ // the lambda captured by the block.
+ const VarRegion *CapturedLambdaRegion = getRegionStoringCapturedLambda();
+ SVal ThisVal = loc::MemRegionVal(CapturedLambdaRegion);
+ Loc ThisLoc = SVB.getCXXThis(LambdaOperatorDecl, CalleeCtx);
+ Bindings.push_back(std::make_pair(ThisLoc, ThisVal));
+ } else {
+ Params = cast<BlockDecl>(CalleeCtx->getDecl())->parameters();
+ }
+
+ addParameterValuesToBindings(CalleeCtx, Bindings, SVB, *this,
+ Params);
+}
+
+
+SVal CXXConstructorCall::getCXXThisVal() const {
+ if (Data)
+ return loc::MemRegionVal(static_cast<const MemRegion *>(Data));
+ return UnknownVal();
+}
+
+void CXXConstructorCall::getExtraInvalidatedValues(ValueList &Values,
+ RegionAndSymbolInvalidationTraits *ETraits) const {
+ if (Data)
+ Values.push_back(loc::MemRegionVal(static_cast<const MemRegion *>(Data)));
+}
+
+void CXXConstructorCall::getInitialStackFrameContents(
+ const StackFrameContext *CalleeCtx,
+ BindingsTy &Bindings) const {
+ AnyFunctionCall::getInitialStackFrameContents(CalleeCtx, Bindings);
+
+ SVal ThisVal = getCXXThisVal();
+ if (!ThisVal.isUnknown()) {
+ SValBuilder &SVB = getState()->getStateManager().getSValBuilder();
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(CalleeCtx->getDecl());
+ Loc ThisLoc = SVB.getCXXThis(MD, CalleeCtx);
+ Bindings.push_back(std::make_pair(ThisLoc, ThisVal));
+ }
+}
+
+SVal CXXDestructorCall::getCXXThisVal() const {
+ if (Data)
+ return loc::MemRegionVal(DtorDataTy::getFromOpaqueValue(Data).getPointer());
+ return UnknownVal();
+}
+
+RuntimeDefinition CXXDestructorCall::getRuntimeDefinition() const {
+ // Base destructors are always called non-virtually.
+ // Skip CXXInstanceCall's devirtualization logic in this case.
+ if (isBaseDestructor())
+ return AnyFunctionCall::getRuntimeDefinition();
+
+ return CXXInstanceCall::getRuntimeDefinition();
+}
+
+ArrayRef<ParmVarDecl*> ObjCMethodCall::parameters() const {
+ const ObjCMethodDecl *D = getDecl();
+ if (!D)
+ return None;
+ return D->parameters();
+}
+
+void
+ObjCMethodCall::getExtraInvalidatedValues(ValueList &Values,
+ RegionAndSymbolInvalidationTraits *ETraits) const {
+ Values.push_back(getReceiverSVal());
+}
+
+SVal ObjCMethodCall::getSelfSVal() const {
+ const LocationContext *LCtx = getLocationContext();
+ const ImplicitParamDecl *SelfDecl = LCtx->getSelfDecl();
+ if (!SelfDecl)
+ return SVal();
+ return getState()->getSVal(getState()->getRegion(SelfDecl, LCtx));
+}
+
+SVal ObjCMethodCall::getReceiverSVal() const {
+ // FIXME: Is this the best way to handle class receivers?
+ if (!isInstanceMessage())
+ return UnknownVal();
+
+ if (const Expr *RecE = getOriginExpr()->getInstanceReceiver())
+ return getSVal(RecE);
+
+ // An instance message with no expression means we are sending to super.
+ // In this case the object reference is the same as 'self'.
+ assert(getOriginExpr()->getReceiverKind() == ObjCMessageExpr::SuperInstance);
+ SVal SelfVal = getSelfSVal();
+ assert(SelfVal.isValid() && "Calling super but not in ObjC method");
+ return SelfVal;
+}
+
+bool ObjCMethodCall::isReceiverSelfOrSuper() const {
+ if (getOriginExpr()->getReceiverKind() == ObjCMessageExpr::SuperInstance ||
+ getOriginExpr()->getReceiverKind() == ObjCMessageExpr::SuperClass)
+ return true;
+
+ if (!isInstanceMessage())
+ return false;
+
+ SVal RecVal = getSVal(getOriginExpr()->getInstanceReceiver());
+
+ return (RecVal == getSelfSVal());
+}
+
+SourceRange ObjCMethodCall::getSourceRange() const {
+ switch (getMessageKind()) {
+ case OCM_Message:
+ return getOriginExpr()->getSourceRange();
+ case OCM_PropertyAccess:
+ case OCM_Subscript:
+ return getContainingPseudoObjectExpr()->getSourceRange();
+ }
+ llvm_unreachable("unknown message kind");
+}
+
+typedef llvm::PointerIntPair<const PseudoObjectExpr *, 2> ObjCMessageDataTy;
+
+const PseudoObjectExpr *ObjCMethodCall::getContainingPseudoObjectExpr() const {
+ assert(Data && "Lazy lookup not yet performed.");
+ assert(getMessageKind() != OCM_Message && "Explicit message send.");
+ return ObjCMessageDataTy::getFromOpaqueValue(Data).getPointer();
+}
+
+ObjCMessageKind ObjCMethodCall::getMessageKind() const {
+ if (!Data) {
+
+ // Find the parent, ignoring implicit casts.
+ ParentMap &PM = getLocationContext()->getParentMap();
+ const Stmt *S = PM.getParentIgnoreParenCasts(getOriginExpr());
+
+ // Check if parent is a PseudoObjectExpr.
+ if (const PseudoObjectExpr *POE = dyn_cast_or_null<PseudoObjectExpr>(S)) {
+ const Expr *Syntactic = POE->getSyntacticForm();
+
+ // This handles the funny case of assigning to the result of a getter.
+ // This can happen if the getter returns a non-const reference.
+ if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(Syntactic))
+ Syntactic = BO->getLHS();
+
+ ObjCMessageKind K;
+ switch (Syntactic->getStmtClass()) {
+ case Stmt::ObjCPropertyRefExprClass:
+ K = OCM_PropertyAccess;
+ break;
+ case Stmt::ObjCSubscriptRefExprClass:
+ K = OCM_Subscript;
+ break;
+ default:
+ // FIXME: Can this ever happen?
+ K = OCM_Message;
+ break;
+ }
+
+ if (K != OCM_Message) {
+ const_cast<ObjCMethodCall *>(this)->Data
+ = ObjCMessageDataTy(POE, K).getOpaqueValue();
+ assert(getMessageKind() == K);
+ return K;
+ }
+ }
+
+ const_cast<ObjCMethodCall *>(this)->Data
+ = ObjCMessageDataTy(nullptr, 1).getOpaqueValue();
+ assert(getMessageKind() == OCM_Message);
+ return OCM_Message;
+ }
+
+ ObjCMessageDataTy Info = ObjCMessageDataTy::getFromOpaqueValue(Data);
+ if (!Info.getPointer())
+ return OCM_Message;
+ return static_cast<ObjCMessageKind>(Info.getInt());
+}
+
+
+bool ObjCMethodCall::canBeOverridenInSubclass(ObjCInterfaceDecl *IDecl,
+ Selector Sel) const {
+ assert(IDecl);
+ const SourceManager &SM =
+ getState()->getStateManager().getContext().getSourceManager();
+
+ // If the class interface is declared inside the main file, assume it is not
+ // subcassed.
+ // TODO: It could actually be subclassed if the subclass is private as well.
+ // This is probably very rare.
+ SourceLocation InterfLoc = IDecl->getEndOfDefinitionLoc();
+ if (InterfLoc.isValid() && SM.isInMainFile(InterfLoc))
+ return false;
+
+ // Assume that property accessors are not overridden.
+ if (getMessageKind() == OCM_PropertyAccess)
+ return false;
+
+ // We assume that if the method is public (declared outside of main file) or
+ // has a parent which publicly declares the method, the method could be
+ // overridden in a subclass.
+
+ // Find the first declaration in the class hierarchy that declares
+ // the selector.
+ ObjCMethodDecl *D = nullptr;
+ while (true) {
+ D = IDecl->lookupMethod(Sel, true);
+
+ // Cannot find a public definition.
+ if (!D)
+ return false;
+
+ // If outside the main file,
+ if (D->getLocation().isValid() && !SM.isInMainFile(D->getLocation()))
+ return true;
+
+ if (D->isOverriding()) {
+ // Search in the superclass on the next iteration.
+ IDecl = D->getClassInterface();
+ if (!IDecl)
+ return false;
+
+ IDecl = IDecl->getSuperClass();
+ if (!IDecl)
+ return false;
+
+ continue;
+ }
+
+ return false;
+ };
+
+ llvm_unreachable("The while loop should always terminate.");
+}
+
+RuntimeDefinition ObjCMethodCall::getRuntimeDefinition() const {
+ const ObjCMessageExpr *E = getOriginExpr();
+ assert(E);
+ Selector Sel = E->getSelector();
+
+ if (E->isInstanceMessage()) {
+
+ // Find the receiver type.
+ const ObjCObjectPointerType *ReceiverT = nullptr;
+ bool CanBeSubClassed = false;
+ QualType SupersType = E->getSuperType();
+ const MemRegion *Receiver = nullptr;
+
+ if (!SupersType.isNull()) {
+ // Super always means the type of immediate predecessor to the method
+ // where the call occurs.
+ ReceiverT = cast<ObjCObjectPointerType>(SupersType);
+ } else {
+ Receiver = getReceiverSVal().getAsRegion();
+ if (!Receiver)
+ return RuntimeDefinition();
+
+ DynamicTypeInfo DTI = getDynamicTypeInfo(getState(), Receiver);
+ QualType DynType = DTI.getType();
+ CanBeSubClassed = DTI.canBeASubClass();
+ ReceiverT = dyn_cast<ObjCObjectPointerType>(DynType);
+
+ if (ReceiverT && CanBeSubClassed)
+ if (ObjCInterfaceDecl *IDecl = ReceiverT->getInterfaceDecl())
+ if (!canBeOverridenInSubclass(IDecl, Sel))
+ CanBeSubClassed = false;
+ }
+
+ // Lookup the method implementation.
+ if (ReceiverT)
+ if (ObjCInterfaceDecl *IDecl = ReceiverT->getInterfaceDecl()) {
+ // Repeatedly calling lookupPrivateMethod() is expensive, especially
+ // when in many cases it returns null. We cache the results so
+ // that repeated queries on the same ObjCIntefaceDecl and Selector
+ // don't incur the same cost. On some test cases, we can see the
+ // same query being issued thousands of times.
+ //
+ // NOTE: This cache is essentially a "global" variable, but it
+ // only gets lazily created when we get here. The value of the
+ // cache probably comes from it being global across ExprEngines,
+ // where the same queries may get issued. If we are worried about
+ // concurrency, or possibly loading/unloading ASTs, etc., we may
+ // need to revisit this someday. In terms of memory, this table
+ // stays around until clang quits, which also may be bad if we
+ // need to release memory.
+ typedef std::pair<const ObjCInterfaceDecl*, Selector>
+ PrivateMethodKey;
+ typedef llvm::DenseMap<PrivateMethodKey,
+ Optional<const ObjCMethodDecl *> >
+ PrivateMethodCache;
+
+ static PrivateMethodCache PMC;
+ Optional<const ObjCMethodDecl *> &Val = PMC[std::make_pair(IDecl, Sel)];
+
+ // Query lookupPrivateMethod() if the cache does not hit.
+ if (!Val.hasValue()) {
+ Val = IDecl->lookupPrivateMethod(Sel);
+
+ // If the method is a property accessor, we should try to "inline" it
+ // even if we don't actually have an implementation.
+ if (!*Val)
+ if (const ObjCMethodDecl *CompileTimeMD = E->getMethodDecl())
+ if (CompileTimeMD->isPropertyAccessor())
+ Val = IDecl->lookupInstanceMethod(Sel);
+ }
+
+ const ObjCMethodDecl *MD = Val.getValue();
+ if (CanBeSubClassed)
+ return RuntimeDefinition(MD, Receiver);
+ else
+ return RuntimeDefinition(MD, nullptr);
+ }
+
+ } else {
+ // This is a class method.
+ // If we have type info for the receiver class, we are calling via
+ // class name.
+ if (ObjCInterfaceDecl *IDecl = E->getReceiverInterface()) {
+ // Find/Return the method implementation.
+ return RuntimeDefinition(IDecl->lookupPrivateClassMethod(Sel));
+ }
+ }
+
+ return RuntimeDefinition();
+}
+
+bool ObjCMethodCall::argumentsMayEscape() const {
+ if (isInSystemHeader() && !isInstanceMessage()) {
+ Selector Sel = getSelector();
+ if (Sel.getNumArgs() == 1 &&
+ Sel.getIdentifierInfoForSlot(0)->isStr("valueWithPointer"))
+ return true;
+ }
+
+ return CallEvent::argumentsMayEscape();
+}
+
+void ObjCMethodCall::getInitialStackFrameContents(
+ const StackFrameContext *CalleeCtx,
+ BindingsTy &Bindings) const {
+ const ObjCMethodDecl *D = cast<ObjCMethodDecl>(CalleeCtx->getDecl());
+ SValBuilder &SVB = getState()->getStateManager().getSValBuilder();
+ addParameterValuesToBindings(CalleeCtx, Bindings, SVB, *this,
+ D->parameters());
+
+ SVal SelfVal = getReceiverSVal();
+ if (!SelfVal.isUnknown()) {
+ const VarDecl *SelfD = CalleeCtx->getAnalysisDeclContext()->getSelfDecl();
+ MemRegionManager &MRMgr = SVB.getRegionManager();
+ Loc SelfLoc = SVB.makeLoc(MRMgr.getVarRegion(SelfD, CalleeCtx));
+ Bindings.push_back(std::make_pair(SelfLoc, SelfVal));
+ }
+}
+
+CallEventRef<>
+CallEventManager::getSimpleCall(const CallExpr *CE, ProgramStateRef State,
+ const LocationContext *LCtx) {
+ if (const CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(CE))
+ return create<CXXMemberCall>(MCE, State, LCtx);
+
+ if (const CXXOperatorCallExpr *OpCE = dyn_cast<CXXOperatorCallExpr>(CE)) {
+ const FunctionDecl *DirectCallee = OpCE->getDirectCallee();
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(DirectCallee))
+ if (MD->isInstance())
+ return create<CXXMemberOperatorCall>(OpCE, State, LCtx);
+
+ } else if (CE->getCallee()->getType()->isBlockPointerType()) {
+ return create<BlockCall>(CE, State, LCtx);
+ }
+
+ // Otherwise, it's a normal function call, static member function call, or
+ // something we can't reason about.
+ return create<SimpleFunctionCall>(CE, State, LCtx);
+}
+
+
+CallEventRef<>
+CallEventManager::getCaller(const StackFrameContext *CalleeCtx,
+ ProgramStateRef State) {
+ const LocationContext *ParentCtx = CalleeCtx->getParent();
+ const LocationContext *CallerCtx = ParentCtx->getCurrentStackFrame();
+ assert(CallerCtx && "This should not be used for top-level stack frames");
+
+ const Stmt *CallSite = CalleeCtx->getCallSite();
+
+ if (CallSite) {
+ if (const CallExpr *CE = dyn_cast<CallExpr>(CallSite))
+ return getSimpleCall(CE, State, CallerCtx);
+
+ switch (CallSite->getStmtClass()) {
+ case Stmt::CXXConstructExprClass:
+ case Stmt::CXXTemporaryObjectExprClass: {
+ SValBuilder &SVB = State->getStateManager().getSValBuilder();
+ const CXXMethodDecl *Ctor = cast<CXXMethodDecl>(CalleeCtx->getDecl());
+ Loc ThisPtr = SVB.getCXXThis(Ctor, CalleeCtx);
+ SVal ThisVal = State->getSVal(ThisPtr);
+
+ return getCXXConstructorCall(cast<CXXConstructExpr>(CallSite),
+ ThisVal.getAsRegion(), State, CallerCtx);
+ }
+ case Stmt::CXXNewExprClass:
+ return getCXXAllocatorCall(cast<CXXNewExpr>(CallSite), State, CallerCtx);
+ case Stmt::ObjCMessageExprClass:
+ return getObjCMethodCall(cast<ObjCMessageExpr>(CallSite),
+ State, CallerCtx);
+ default:
+ llvm_unreachable("This is not an inlineable statement.");
+ }
+ }
+
+ // Fall back to the CFG. The only thing we haven't handled yet is
+ // destructors, though this could change in the future.
+ const CFGBlock *B = CalleeCtx->getCallSiteBlock();
+ CFGElement E = (*B)[CalleeCtx->getIndex()];
+ assert(E.getAs<CFGImplicitDtor>() &&
+ "All other CFG elements should have exprs");
+ assert(!E.getAs<CFGTemporaryDtor>() && "We don't handle temporaries yet");
+
+ SValBuilder &SVB = State->getStateManager().getSValBuilder();
+ const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CalleeCtx->getDecl());
+ Loc ThisPtr = SVB.getCXXThis(Dtor, CalleeCtx);
+ SVal ThisVal = State->getSVal(ThisPtr);
+
+ const Stmt *Trigger;
+ if (Optional<CFGAutomaticObjDtor> AutoDtor = E.getAs<CFGAutomaticObjDtor>())
+ Trigger = AutoDtor->getTriggerStmt();
+ else if (Optional<CFGDeleteDtor> DeleteDtor = E.getAs<CFGDeleteDtor>())
+ Trigger = cast<Stmt>(DeleteDtor->getDeleteExpr());
+ else
+ Trigger = Dtor->getBody();
+
+ return getCXXDestructorCall(Dtor, Trigger, ThisVal.getAsRegion(),
+ E.getAs<CFGBaseDtor>().hasValue(), State,
+ CallerCtx);
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Checker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Checker.cpp
new file mode 100644
index 0000000..b422a88
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Checker.cpp
@@ -0,0 +1,38 @@
+//== Checker.cpp - Registration mechanism for checkers -----------*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines Checker, used to create and register checkers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+
+using namespace clang;
+using namespace ento;
+
+StringRef CheckerBase::getTagDescription() const {
+ return getCheckName().getName();
+}
+
+CheckName CheckerBase::getCheckName() const { return Name; }
+
+CheckerProgramPointTag::CheckerProgramPointTag(StringRef CheckerName,
+ StringRef Msg)
+ : SimpleProgramPointTag(CheckerName, Msg) {}
+
+CheckerProgramPointTag::CheckerProgramPointTag(const CheckerBase *Checker,
+ StringRef Msg)
+ : SimpleProgramPointTag(Checker->getCheckName().getName(), Msg) {}
+
+raw_ostream& clang::ento::operator<<(raw_ostream &Out,
+ const CheckerBase &Checker) {
+ Out << Checker.getCheckName().getName();
+ return Out;
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
new file mode 100644
index 0000000..5ec8bfa
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
@@ -0,0 +1,94 @@
+//== CheckerContext.cpp - Context info for path-sensitive checkers-----------=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines CheckerContext that provides contextual info for
+// path-sensitive checkers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Lex/Lexer.h"
+
+using namespace clang;
+using namespace ento;
+
+const FunctionDecl *CheckerContext::getCalleeDecl(const CallExpr *CE) const {
+ ProgramStateRef State = getState();
+ const Expr *Callee = CE->getCallee();
+ SVal L = State->getSVal(Callee, Pred->getLocationContext());
+ return L.getAsFunctionDecl();
+}
+
+StringRef CheckerContext::getCalleeName(const FunctionDecl *FunDecl) const {
+ if (!FunDecl)
+ return StringRef();
+ IdentifierInfo *funI = FunDecl->getIdentifier();
+ if (!funI)
+ return StringRef();
+ return funI->getName();
+}
+
+
+bool CheckerContext::isCLibraryFunction(const FunctionDecl *FD,
+ StringRef Name) {
+ // To avoid false positives (Ex: finding user defined functions with
+ // similar names), only perform fuzzy name matching when it's a builtin.
+ // Using a string compare is slow, we might want to switch on BuiltinID here.
+ unsigned BId = FD->getBuiltinID();
+ if (BId != 0) {
+ if (Name.empty())
+ return true;
+ StringRef BName = FD->getASTContext().BuiltinInfo.getName(BId);
+ if (BName.find(Name) != StringRef::npos)
+ return true;
+ }
+
+ const IdentifierInfo *II = FD->getIdentifier();
+ // If this is a special C++ name without IdentifierInfo, it can't be a
+ // C library function.
+ if (!II)
+ return false;
+
+ // Look through 'extern "C"' and anything similar invented in the future.
+ // If this function is not in TU directly, it is not a C library function.
+ if (!FD->getDeclContext()->getRedeclContext()->isTranslationUnit())
+ return false;
+
+ // If this function is not externally visible, it is not a C library function.
+ // Note that we make an exception for inline functions, which may be
+ // declared in header files without external linkage.
+ if (!FD->isInlined() && !FD->isExternallyVisible())
+ return false;
+
+ if (Name.empty())
+ return true;
+
+ StringRef FName = II->getName();
+ if (FName.equals(Name))
+ return true;
+
+ if (FName.startswith("__inline") && (FName.find(Name) != StringRef::npos))
+ return true;
+
+ if (FName.startswith("__") && FName.endswith("_chk") &&
+ FName.find(Name) != StringRef::npos)
+ return true;
+
+ return false;
+}
+
+StringRef CheckerContext::getMacroNameOrSpelling(SourceLocation &Loc) {
+ if (Loc.isMacroID())
+ return Lexer::getImmediateMacroName(Loc, getSourceManager(),
+ getLangOpts());
+ SmallVector<char, 16> buf;
+ return Lexer::getSpelling(Loc, buf, getSourceManager(), getLangOpts());
+}
+
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
new file mode 100644
index 0000000..d6aeceb
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
@@ -0,0 +1,96 @@
+//===---- CheckerHelpers.cpp - Helper functions for checkers ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines several static functions for use in checkers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/Expr.h"
+
+// Recursively find any substatements containing macros
+bool clang::ento::containsMacro(const Stmt *S) {
+ if (S->getLocStart().isMacroID())
+ return true;
+
+ if (S->getLocEnd().isMacroID())
+ return true;
+
+ for (const Stmt *Child : S->children())
+ if (Child && containsMacro(Child))
+ return true;
+
+ return false;
+}
+
+// Recursively find any substatements containing enum constants
+bool clang::ento::containsEnum(const Stmt *S) {
+ const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(S);
+
+ if (DR && isa<EnumConstantDecl>(DR->getDecl()))
+ return true;
+
+ for (const Stmt *Child : S->children())
+ if (Child && containsEnum(Child))
+ return true;
+
+ return false;
+}
+
+// Recursively find any substatements containing static vars
+bool clang::ento::containsStaticLocal(const Stmt *S) {
+ const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(S);
+
+ if (DR)
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl()))
+ if (VD->isStaticLocal())
+ return true;
+
+ for (const Stmt *Child : S->children())
+ if (Child && containsStaticLocal(Child))
+ return true;
+
+ return false;
+}
+
+// Recursively find any substatements containing __builtin_offsetof
+bool clang::ento::containsBuiltinOffsetOf(const Stmt *S) {
+ if (isa<OffsetOfExpr>(S))
+ return true;
+
+ for (const Stmt *Child : S->children())
+ if (Child && containsBuiltinOffsetOf(Child))
+ return true;
+
+ return false;
+}
+
+// Extract lhs and rhs from assignment statement
+std::pair<const clang::VarDecl *, const clang::Expr *>
+clang::ento::parseAssignment(const Stmt *S) {
+ const VarDecl *VD = 0;
+ const Expr *RHS = 0;
+
+ if (auto Assign = dyn_cast_or_null<BinaryOperator>(S)) {
+ if (Assign->isAssignmentOp()) {
+ // Ordinary assignment
+ RHS = Assign->getRHS();
+ if (auto DE = dyn_cast_or_null<DeclRefExpr>(Assign->getLHS()))
+ VD = dyn_cast_or_null<VarDecl>(DE->getDecl());
+ }
+ } else if (auto PD = dyn_cast_or_null<DeclStmt>(S)) {
+ // Initialization
+ assert(PD->isSingleDecl() && "We process decls one by one");
+ VD = dyn_cast_or_null<VarDecl>(PD->getSingleDecl());
+ RHS = VD->getAnyInitializer();
+ }
+
+ return std::make_pair(VD, RHS);
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
new file mode 100644
index 0000000..008e8ef
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
@@ -0,0 +1,745 @@
+//===--- CheckerManager.cpp - Static Analyzer Checker Manager -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines the Static Analyzer Checker Manager.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/Analysis/ProgramPoint.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+bool CheckerManager::hasPathSensitiveCheckers() const {
+ return !StmtCheckers.empty() ||
+ !PreObjCMessageCheckers.empty() ||
+ !PostObjCMessageCheckers.empty() ||
+ !PreCallCheckers.empty() ||
+ !PostCallCheckers.empty() ||
+ !LocationCheckers.empty() ||
+ !BindCheckers.empty() ||
+ !EndAnalysisCheckers.empty() ||
+ !EndFunctionCheckers.empty() ||
+ !BranchConditionCheckers.empty() ||
+ !LiveSymbolsCheckers.empty() ||
+ !DeadSymbolsCheckers.empty() ||
+ !RegionChangesCheckers.empty() ||
+ !EvalAssumeCheckers.empty() ||
+ !EvalCallCheckers.empty();
+}
+
+void CheckerManager::finishedCheckerRegistration() {
+#ifndef NDEBUG
+ // Make sure that for every event that has listeners, there is at least
+ // one dispatcher registered for it.
+ for (llvm::DenseMap<EventTag, EventInfo>::iterator
+ I = Events.begin(), E = Events.end(); I != E; ++I)
+ assert(I->second.HasDispatcher && "No dispatcher registered for an event");
+#endif
+}
+
+//===----------------------------------------------------------------------===//
+// Functions for running checkers for AST traversing..
+//===----------------------------------------------------------------------===//
+
+void CheckerManager::runCheckersOnASTDecl(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) {
+ assert(D);
+
+ unsigned DeclKind = D->getKind();
+ CachedDeclCheckers *checkers = nullptr;
+ CachedDeclCheckersMapTy::iterator CCI = CachedDeclCheckersMap.find(DeclKind);
+ if (CCI != CachedDeclCheckersMap.end()) {
+ checkers = &(CCI->second);
+ } else {
+ // Find the checkers that should run for this Decl and cache them.
+ checkers = &CachedDeclCheckersMap[DeclKind];
+ for (unsigned i = 0, e = DeclCheckers.size(); i != e; ++i) {
+ DeclCheckerInfo &info = DeclCheckers[i];
+ if (info.IsForDeclFn(D))
+ checkers->push_back(info.CheckFn);
+ }
+ }
+
+ assert(checkers);
+ for (CachedDeclCheckers::iterator
+ I = checkers->begin(), E = checkers->end(); I != E; ++I)
+ (*I)(D, mgr, BR);
+}
+
+void CheckerManager::runCheckersOnASTBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) {
+ assert(D && D->hasBody());
+
+ for (unsigned i = 0, e = BodyCheckers.size(); i != e; ++i)
+ BodyCheckers[i](D, mgr, BR);
+}
+
+//===----------------------------------------------------------------------===//
+// Functions for running checkers for path-sensitive checking.
+//===----------------------------------------------------------------------===//
+
+template <typename CHECK_CTX>
+static void expandGraphWithCheckers(CHECK_CTX checkCtx,
+ ExplodedNodeSet &Dst,
+ const ExplodedNodeSet &Src) {
+ const NodeBuilderContext &BldrCtx = checkCtx.Eng.getBuilderContext();
+ if (Src.empty())
+ return;
+
+ typename CHECK_CTX::CheckersTy::const_iterator
+ I = checkCtx.checkers_begin(), E = checkCtx.checkers_end();
+ if (I == E) {
+ Dst.insert(Src);
+ return;
+ }
+
+ ExplodedNodeSet Tmp1, Tmp2;
+ const ExplodedNodeSet *PrevSet = &Src;
+
+ for (; I != E; ++I) {
+ ExplodedNodeSet *CurrSet = nullptr;
+ if (I+1 == E)
+ CurrSet = &Dst;
+ else {
+ CurrSet = (PrevSet == &Tmp1) ? &Tmp2 : &Tmp1;
+ CurrSet->clear();
+ }
+
+ NodeBuilder B(*PrevSet, *CurrSet, BldrCtx);
+ for (ExplodedNodeSet::iterator NI = PrevSet->begin(), NE = PrevSet->end();
+ NI != NE; ++NI) {
+ checkCtx.runChecker(*I, B, *NI);
+ }
+
+ // If all the produced transitions are sinks, stop.
+ if (CurrSet->empty())
+ return;
+
+ // Update which NodeSet is the current one.
+ PrevSet = CurrSet;
+ }
+}
+
+namespace {
+ struct CheckStmtContext {
+ typedef SmallVectorImpl<CheckerManager::CheckStmtFunc> CheckersTy;
+ bool IsPreVisit;
+ const CheckersTy &Checkers;
+ const Stmt *S;
+ ExprEngine &Eng;
+ bool WasInlined;
+
+ CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+ CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
+
+ CheckStmtContext(bool isPreVisit, const CheckersTy &checkers,
+ const Stmt *s, ExprEngine &eng, bool wasInlined = false)
+ : IsPreVisit(isPreVisit), Checkers(checkers), S(s), Eng(eng),
+ WasInlined(wasInlined) {}
+
+ void runChecker(CheckerManager::CheckStmtFunc checkFn,
+ NodeBuilder &Bldr, ExplodedNode *Pred) {
+ // FIXME: Remove respondsToCallback from CheckerContext;
+ ProgramPoint::Kind K = IsPreVisit ? ProgramPoint::PreStmtKind :
+ ProgramPoint::PostStmtKind;
+ const ProgramPoint &L = ProgramPoint::getProgramPoint(S, K,
+ Pred->getLocationContext(), checkFn.Checker);
+ CheckerContext C(Bldr, Eng, Pred, L, WasInlined);
+ checkFn(S, C);
+ }
+ };
+}
+
+/// \brief Run checkers for visiting Stmts.
+void CheckerManager::runCheckersForStmt(bool isPreVisit,
+ ExplodedNodeSet &Dst,
+ const ExplodedNodeSet &Src,
+ const Stmt *S,
+ ExprEngine &Eng,
+ bool WasInlined) {
+ CheckStmtContext C(isPreVisit, getCachedStmtCheckersFor(S, isPreVisit),
+ S, Eng, WasInlined);
+ expandGraphWithCheckers(C, Dst, Src);
+}
+
+namespace {
+ struct CheckObjCMessageContext {
+ typedef std::vector<CheckerManager::CheckObjCMessageFunc> CheckersTy;
+
+ ObjCMessageVisitKind Kind;
+ bool WasInlined;
+ const CheckersTy &Checkers;
+ const ObjCMethodCall &Msg;
+ ExprEngine &Eng;
+
+ CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+ CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
+
+ CheckObjCMessageContext(ObjCMessageVisitKind visitKind,
+ const CheckersTy &checkers,
+ const ObjCMethodCall &msg, ExprEngine &eng,
+ bool wasInlined)
+ : Kind(visitKind), WasInlined(wasInlined), Checkers(checkers),
+ Msg(msg), Eng(eng) { }
+
+ void runChecker(CheckerManager::CheckObjCMessageFunc checkFn,
+ NodeBuilder &Bldr, ExplodedNode *Pred) {
+
+ bool IsPreVisit;
+
+ switch (Kind) {
+ case ObjCMessageVisitKind::Pre:
+ IsPreVisit = true;
+ break;
+ case ObjCMessageVisitKind::MessageNil:
+ case ObjCMessageVisitKind::Post:
+ IsPreVisit = false;
+ break;
+ }
+
+ const ProgramPoint &L = Msg.getProgramPoint(IsPreVisit,checkFn.Checker);
+ CheckerContext C(Bldr, Eng, Pred, L, WasInlined);
+
+ checkFn(*Msg.cloneWithState<ObjCMethodCall>(Pred->getState()), C);
+ }
+ };
+}
+
+/// \brief Run checkers for visiting obj-c messages.
+void CheckerManager::runCheckersForObjCMessage(ObjCMessageVisitKind visitKind,
+ ExplodedNodeSet &Dst,
+ const ExplodedNodeSet &Src,
+ const ObjCMethodCall &msg,
+ ExprEngine &Eng,
+ bool WasInlined) {
+ auto &checkers = getObjCMessageCheckers(visitKind);
+ CheckObjCMessageContext C(visitKind, checkers, msg, Eng, WasInlined);
+ expandGraphWithCheckers(C, Dst, Src);
+}
+
+const std::vector<CheckerManager::CheckObjCMessageFunc> &
+CheckerManager::getObjCMessageCheckers(ObjCMessageVisitKind Kind) {
+ switch (Kind) {
+ case ObjCMessageVisitKind::Pre:
+ return PreObjCMessageCheckers;
+ break;
+ case ObjCMessageVisitKind::Post:
+ return PostObjCMessageCheckers;
+ case ObjCMessageVisitKind::MessageNil:
+ return ObjCMessageNilCheckers;
+ }
+ llvm_unreachable("Unknown Kind");
+}
+namespace {
+ // FIXME: This has all the same signatures as CheckObjCMessageContext.
+ // Is there a way we can merge the two?
+ struct CheckCallContext {
+ typedef std::vector<CheckerManager::CheckCallFunc> CheckersTy;
+ bool IsPreVisit, WasInlined;
+ const CheckersTy &Checkers;
+ const CallEvent &Call;
+ ExprEngine &Eng;
+
+ CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+ CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
+
+ CheckCallContext(bool isPreVisit, const CheckersTy &checkers,
+ const CallEvent &call, ExprEngine &eng,
+ bool wasInlined)
+ : IsPreVisit(isPreVisit), WasInlined(wasInlined), Checkers(checkers),
+ Call(call), Eng(eng) { }
+
+ void runChecker(CheckerManager::CheckCallFunc checkFn,
+ NodeBuilder &Bldr, ExplodedNode *Pred) {
+ const ProgramPoint &L = Call.getProgramPoint(IsPreVisit,checkFn.Checker);
+ CheckerContext C(Bldr, Eng, Pred, L, WasInlined);
+
+ checkFn(*Call.cloneWithState(Pred->getState()), C);
+ }
+ };
+}
+
+/// \brief Run checkers for visiting an abstract call event.
+void CheckerManager::runCheckersForCallEvent(bool isPreVisit,
+ ExplodedNodeSet &Dst,
+ const ExplodedNodeSet &Src,
+ const CallEvent &Call,
+ ExprEngine &Eng,
+ bool WasInlined) {
+ CheckCallContext C(isPreVisit,
+ isPreVisit ? PreCallCheckers
+ : PostCallCheckers,
+ Call, Eng, WasInlined);
+ expandGraphWithCheckers(C, Dst, Src);
+}
+
+namespace {
+ struct CheckLocationContext {
+ typedef std::vector<CheckerManager::CheckLocationFunc> CheckersTy;
+ const CheckersTy &Checkers;
+ SVal Loc;
+ bool IsLoad;
+ const Stmt *NodeEx; /* Will become a CFGStmt */
+ const Stmt *BoundEx;
+ ExprEngine &Eng;
+
+ CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+ CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
+
+ CheckLocationContext(const CheckersTy &checkers,
+ SVal loc, bool isLoad, const Stmt *NodeEx,
+ const Stmt *BoundEx,
+ ExprEngine &eng)
+ : Checkers(checkers), Loc(loc), IsLoad(isLoad), NodeEx(NodeEx),
+ BoundEx(BoundEx), Eng(eng) {}
+
+ void runChecker(CheckerManager::CheckLocationFunc checkFn,
+ NodeBuilder &Bldr, ExplodedNode *Pred) {
+ ProgramPoint::Kind K = IsLoad ? ProgramPoint::PreLoadKind :
+ ProgramPoint::PreStoreKind;
+ const ProgramPoint &L =
+ ProgramPoint::getProgramPoint(NodeEx, K,
+ Pred->getLocationContext(),
+ checkFn.Checker);
+ CheckerContext C(Bldr, Eng, Pred, L);
+ checkFn(Loc, IsLoad, BoundEx, C);
+ }
+ };
+}
+
+/// \brief Run checkers for load/store of a location.
+
+void CheckerManager::runCheckersForLocation(ExplodedNodeSet &Dst,
+ const ExplodedNodeSet &Src,
+ SVal location, bool isLoad,
+ const Stmt *NodeEx,
+ const Stmt *BoundEx,
+ ExprEngine &Eng) {
+ CheckLocationContext C(LocationCheckers, location, isLoad, NodeEx,
+ BoundEx, Eng);
+ expandGraphWithCheckers(C, Dst, Src);
+}
+
+namespace {
+ struct CheckBindContext {
+ typedef std::vector<CheckerManager::CheckBindFunc> CheckersTy;
+ const CheckersTy &Checkers;
+ SVal Loc;
+ SVal Val;
+ const Stmt *S;
+ ExprEngine &Eng;
+ const ProgramPoint &PP;
+
+ CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+ CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
+
+ CheckBindContext(const CheckersTy &checkers,
+ SVal loc, SVal val, const Stmt *s, ExprEngine &eng,
+ const ProgramPoint &pp)
+ : Checkers(checkers), Loc(loc), Val(val), S(s), Eng(eng), PP(pp) {}
+
+ void runChecker(CheckerManager::CheckBindFunc checkFn,
+ NodeBuilder &Bldr, ExplodedNode *Pred) {
+ const ProgramPoint &L = PP.withTag(checkFn.Checker);
+ CheckerContext C(Bldr, Eng, Pred, L);
+
+ checkFn(Loc, Val, S, C);
+ }
+ };
+}
+
+/// \brief Run checkers for binding of a value to a location.
+void CheckerManager::runCheckersForBind(ExplodedNodeSet &Dst,
+ const ExplodedNodeSet &Src,
+ SVal location, SVal val,
+ const Stmt *S, ExprEngine &Eng,
+ const ProgramPoint &PP) {
+ CheckBindContext C(BindCheckers, location, val, S, Eng, PP);
+ expandGraphWithCheckers(C, Dst, Src);
+}
+
+void CheckerManager::runCheckersForEndAnalysis(ExplodedGraph &G,
+ BugReporter &BR,
+ ExprEngine &Eng) {
+ for (unsigned i = 0, e = EndAnalysisCheckers.size(); i != e; ++i)
+ EndAnalysisCheckers[i](G, BR, Eng);
+}
+
+/// \brief Run checkers for end of path.
+// Note, We do not chain the checker output (like in expandGraphWithCheckers)
+// for this callback since end of path nodes are expected to be final.
+void CheckerManager::runCheckersForEndFunction(NodeBuilderContext &BC,
+ ExplodedNodeSet &Dst,
+ ExplodedNode *Pred,
+ ExprEngine &Eng) {
+
+ // We define the builder outside of the loop bacause if at least one checkers
+ // creates a sucsessor for Pred, we do not need to generate an
+ // autotransition for it.
+ NodeBuilder Bldr(Pred, Dst, BC);
+ for (unsigned i = 0, e = EndFunctionCheckers.size(); i != e; ++i) {
+ CheckEndFunctionFunc checkFn = EndFunctionCheckers[i];
+
+ const ProgramPoint &L = BlockEntrance(BC.Block,
+ Pred->getLocationContext(),
+ checkFn.Checker);
+ CheckerContext C(Bldr, Eng, Pred, L);
+ checkFn(C);
+ }
+}
+
+namespace {
+ struct CheckBranchConditionContext {
+ typedef std::vector<CheckerManager::CheckBranchConditionFunc> CheckersTy;
+ const CheckersTy &Checkers;
+ const Stmt *Condition;
+ ExprEngine &Eng;
+
+ CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+ CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
+
+ CheckBranchConditionContext(const CheckersTy &checkers,
+ const Stmt *Cond, ExprEngine &eng)
+ : Checkers(checkers), Condition(Cond), Eng(eng) {}
+
+ void runChecker(CheckerManager::CheckBranchConditionFunc checkFn,
+ NodeBuilder &Bldr, ExplodedNode *Pred) {
+ ProgramPoint L = PostCondition(Condition, Pred->getLocationContext(),
+ checkFn.Checker);
+ CheckerContext C(Bldr, Eng, Pred, L);
+ checkFn(Condition, C);
+ }
+ };
+}
+
+/// \brief Run checkers for branch condition.
+void CheckerManager::runCheckersForBranchCondition(const Stmt *Condition,
+ ExplodedNodeSet &Dst,
+ ExplodedNode *Pred,
+ ExprEngine &Eng) {
+ ExplodedNodeSet Src;
+ Src.insert(Pred);
+ CheckBranchConditionContext C(BranchConditionCheckers, Condition, Eng);
+ expandGraphWithCheckers(C, Dst, Src);
+}
+
+/// \brief Run checkers for live symbols.
+void CheckerManager::runCheckersForLiveSymbols(ProgramStateRef state,
+ SymbolReaper &SymReaper) {
+ for (unsigned i = 0, e = LiveSymbolsCheckers.size(); i != e; ++i)
+ LiveSymbolsCheckers[i](state, SymReaper);
+}
+
+namespace {
+ struct CheckDeadSymbolsContext {
+ typedef std::vector<CheckerManager::CheckDeadSymbolsFunc> CheckersTy;
+ const CheckersTy &Checkers;
+ SymbolReaper &SR;
+ const Stmt *S;
+ ExprEngine &Eng;
+ ProgramPoint::Kind ProgarmPointKind;
+
+ CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+ CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
+
+ CheckDeadSymbolsContext(const CheckersTy &checkers, SymbolReaper &sr,
+ const Stmt *s, ExprEngine &eng,
+ ProgramPoint::Kind K)
+ : Checkers(checkers), SR(sr), S(s), Eng(eng), ProgarmPointKind(K) { }
+
+ void runChecker(CheckerManager::CheckDeadSymbolsFunc checkFn,
+ NodeBuilder &Bldr, ExplodedNode *Pred) {
+ const ProgramPoint &L = ProgramPoint::getProgramPoint(S, ProgarmPointKind,
+ Pred->getLocationContext(), checkFn.Checker);
+ CheckerContext C(Bldr, Eng, Pred, L);
+
+ // Note, do not pass the statement to the checkers without letting them
+ // differentiate if we ran remove dead bindings before or after the
+ // statement.
+ checkFn(SR, C);
+ }
+ };
+}
+
+/// \brief Run checkers for dead symbols.
+void CheckerManager::runCheckersForDeadSymbols(ExplodedNodeSet &Dst,
+ const ExplodedNodeSet &Src,
+ SymbolReaper &SymReaper,
+ const Stmt *S,
+ ExprEngine &Eng,
+ ProgramPoint::Kind K) {
+ CheckDeadSymbolsContext C(DeadSymbolsCheckers, SymReaper, S, Eng, K);
+ expandGraphWithCheckers(C, Dst, Src);
+}
+
+/// \brief True if at least one checker wants to check region changes.
+bool CheckerManager::wantsRegionChangeUpdate(ProgramStateRef state) {
+ for (unsigned i = 0, e = RegionChangesCheckers.size(); i != e; ++i)
+ if (RegionChangesCheckers[i].WantUpdateFn(state))
+ return true;
+
+ return false;
+}
+
+/// \brief Run checkers for region changes.
+ProgramStateRef
+CheckerManager::runCheckersForRegionChanges(ProgramStateRef state,
+ const InvalidatedSymbols *invalidated,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const CallEvent *Call) {
+ for (unsigned i = 0, e = RegionChangesCheckers.size(); i != e; ++i) {
+ // If any checker declares the state infeasible (or if it starts that way),
+ // bail out.
+ if (!state)
+ return nullptr;
+ state = RegionChangesCheckers[i].CheckFn(state, invalidated,
+ ExplicitRegions, Regions, Call);
+ }
+ return state;
+}
+
+/// \brief Run checkers to process symbol escape event.
+ProgramStateRef
+CheckerManager::runCheckersForPointerEscape(ProgramStateRef State,
+ const InvalidatedSymbols &Escaped,
+ const CallEvent *Call,
+ PointerEscapeKind Kind,
+ RegionAndSymbolInvalidationTraits *ETraits) {
+ assert((Call != nullptr ||
+ (Kind != PSK_DirectEscapeOnCall &&
+ Kind != PSK_IndirectEscapeOnCall)) &&
+ "Call must not be NULL when escaping on call");
+ for (unsigned i = 0, e = PointerEscapeCheckers.size(); i != e; ++i) {
+ // If any checker declares the state infeasible (or if it starts that
+ // way), bail out.
+ if (!State)
+ return nullptr;
+ State = PointerEscapeCheckers[i](State, Escaped, Call, Kind, ETraits);
+ }
+ return State;
+}
+
+/// \brief Run checkers for handling assumptions on symbolic values.
+ProgramStateRef
+CheckerManager::runCheckersForEvalAssume(ProgramStateRef state,
+ SVal Cond, bool Assumption) {
+ for (unsigned i = 0, e = EvalAssumeCheckers.size(); i != e; ++i) {
+ // If any checker declares the state infeasible (or if it starts that way),
+ // bail out.
+ if (!state)
+ return nullptr;
+ state = EvalAssumeCheckers[i](state, Cond, Assumption);
+ }
+ return state;
+}
+
+/// \brief Run checkers for evaluating a call.
+/// Only one checker will evaluate the call.
+void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
+ const ExplodedNodeSet &Src,
+ const CallEvent &Call,
+ ExprEngine &Eng) {
+ const CallExpr *CE = cast<CallExpr>(Call.getOriginExpr());
+ for (ExplodedNodeSet::iterator
+ NI = Src.begin(), NE = Src.end(); NI != NE; ++NI) {
+ ExplodedNode *Pred = *NI;
+ bool anyEvaluated = false;
+
+ ExplodedNodeSet checkDst;
+ NodeBuilder B(Pred, checkDst, Eng.getBuilderContext());
+
+ // Check if any of the EvalCall callbacks can evaluate the call.
+ for (std::vector<EvalCallFunc>::iterator
+ EI = EvalCallCheckers.begin(), EE = EvalCallCheckers.end();
+ EI != EE; ++EI) {
+ ProgramPoint::Kind K = ProgramPoint::PostStmtKind;
+ const ProgramPoint &L = ProgramPoint::getProgramPoint(CE, K,
+ Pred->getLocationContext(), EI->Checker);
+ bool evaluated = false;
+ { // CheckerContext generates transitions(populates checkDest) on
+ // destruction, so introduce the scope to make sure it gets properly
+ // populated.
+ CheckerContext C(B, Eng, Pred, L);
+ evaluated = (*EI)(CE, C);
+ }
+ assert(!(evaluated && anyEvaluated)
+ && "There are more than one checkers evaluating the call");
+ if (evaluated) {
+ anyEvaluated = true;
+ Dst.insert(checkDst);
+#ifdef NDEBUG
+ break; // on release don't check that no other checker also evals.
+#endif
+ }
+ }
+
+ // If none of the checkers evaluated the call, ask ExprEngine to handle it.
+ if (!anyEvaluated) {
+ NodeBuilder B(Pred, Dst, Eng.getBuilderContext());
+ Eng.defaultEvalCall(B, Pred, Call);
+ }
+ }
+}
+
+/// \brief Run checkers for the entire Translation Unit.
+void CheckerManager::runCheckersOnEndOfTranslationUnit(
+ const TranslationUnitDecl *TU,
+ AnalysisManager &mgr,
+ BugReporter &BR) {
+ for (unsigned i = 0, e = EndOfTranslationUnitCheckers.size(); i != e; ++i)
+ EndOfTranslationUnitCheckers[i](TU, mgr, BR);
+}
+
+void CheckerManager::runCheckersForPrintState(raw_ostream &Out,
+ ProgramStateRef State,
+ const char *NL, const char *Sep) {
+ for (llvm::DenseMap<CheckerTag, CheckerRef>::iterator
+ I = CheckerTags.begin(), E = CheckerTags.end(); I != E; ++I)
+ I->second->printState(Out, State, NL, Sep);
+}
+
+//===----------------------------------------------------------------------===//
+// Internal registration functions for AST traversing.
+//===----------------------------------------------------------------------===//
+
+void CheckerManager::_registerForDecl(CheckDeclFunc checkfn,
+ HandlesDeclFunc isForDeclFn) {
+ DeclCheckerInfo info = { checkfn, isForDeclFn };
+ DeclCheckers.push_back(info);
+}
+
+void CheckerManager::_registerForBody(CheckDeclFunc checkfn) {
+ BodyCheckers.push_back(checkfn);
+}
+
+//===----------------------------------------------------------------------===//
+// Internal registration functions for path-sensitive checking.
+//===----------------------------------------------------------------------===//
+
+void CheckerManager::_registerForPreStmt(CheckStmtFunc checkfn,
+ HandlesStmtFunc isForStmtFn) {
+ StmtCheckerInfo info = { checkfn, isForStmtFn, /*IsPreVisit*/true };
+ StmtCheckers.push_back(info);
+}
+void CheckerManager::_registerForPostStmt(CheckStmtFunc checkfn,
+ HandlesStmtFunc isForStmtFn) {
+ StmtCheckerInfo info = { checkfn, isForStmtFn, /*IsPreVisit*/false };
+ StmtCheckers.push_back(info);
+}
+
+void CheckerManager::_registerForPreObjCMessage(CheckObjCMessageFunc checkfn) {
+ PreObjCMessageCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForObjCMessageNil(CheckObjCMessageFunc checkfn) {
+ ObjCMessageNilCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForPostObjCMessage(CheckObjCMessageFunc checkfn) {
+ PostObjCMessageCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForPreCall(CheckCallFunc checkfn) {
+ PreCallCheckers.push_back(checkfn);
+}
+void CheckerManager::_registerForPostCall(CheckCallFunc checkfn) {
+ PostCallCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForLocation(CheckLocationFunc checkfn) {
+ LocationCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForBind(CheckBindFunc checkfn) {
+ BindCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForEndAnalysis(CheckEndAnalysisFunc checkfn) {
+ EndAnalysisCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForEndFunction(CheckEndFunctionFunc checkfn) {
+ EndFunctionCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForBranchCondition(
+ CheckBranchConditionFunc checkfn) {
+ BranchConditionCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForLiveSymbols(CheckLiveSymbolsFunc checkfn) {
+ LiveSymbolsCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForDeadSymbols(CheckDeadSymbolsFunc checkfn) {
+ DeadSymbolsCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForRegionChanges(CheckRegionChangesFunc checkfn,
+ WantsRegionChangeUpdateFunc wantUpdateFn) {
+ RegionChangesCheckerInfo info = {checkfn, wantUpdateFn};
+ RegionChangesCheckers.push_back(info);
+}
+
+void CheckerManager::_registerForPointerEscape(CheckPointerEscapeFunc checkfn){
+ PointerEscapeCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForConstPointerEscape(
+ CheckPointerEscapeFunc checkfn) {
+ PointerEscapeCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForEvalAssume(EvalAssumeFunc checkfn) {
+ EvalAssumeCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForEvalCall(EvalCallFunc checkfn) {
+ EvalCallCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForEndOfTranslationUnit(
+ CheckEndOfTranslationUnit checkfn) {
+ EndOfTranslationUnitCheckers.push_back(checkfn);
+}
+
+//===----------------------------------------------------------------------===//
+// Implementation details.
+//===----------------------------------------------------------------------===//
+
+const CheckerManager::CachedStmtCheckers &
+CheckerManager::getCachedStmtCheckersFor(const Stmt *S, bool isPreVisit) {
+ assert(S);
+
+ unsigned Key = (S->getStmtClass() << 1) | unsigned(isPreVisit);
+ CachedStmtCheckersMapTy::iterator CCI = CachedStmtCheckersMap.find(Key);
+ if (CCI != CachedStmtCheckersMap.end())
+ return CCI->second;
+
+ // Find the checkers that should run for this Stmt and cache them.
+ CachedStmtCheckers &Checkers = CachedStmtCheckersMap[Key];
+ for (unsigned i = 0, e = StmtCheckers.size(); i != e; ++i) {
+ StmtCheckerInfo &Info = StmtCheckers[i];
+ if (Info.IsPreVisit == isPreVisit && Info.IsForStmtFn(S))
+ Checkers.push_back(Info.CheckFn);
+ }
+ return Checkers;
+}
+
+CheckerManager::~CheckerManager() {
+ for (unsigned i = 0, e = CheckerDtors.size(); i != e; ++i)
+ CheckerDtors[i]();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerRegistry.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerRegistry.cpp
new file mode 100644
index 0000000..a15e157
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerRegistry.cpp
@@ -0,0 +1,177 @@
+//===--- CheckerRegistry.cpp - Maintains all available checkers -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/CheckerRegistry.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/CheckerOptInfo.h"
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+static const char PackageSeparator = '.';
+typedef llvm::SetVector<const CheckerRegistry::CheckerInfo *> CheckerInfoSet;
+
+
+static bool checkerNameLT(const CheckerRegistry::CheckerInfo &a,
+ const CheckerRegistry::CheckerInfo &b) {
+ return a.FullName < b.FullName;
+}
+
+static bool isInPackage(const CheckerRegistry::CheckerInfo &checker,
+ StringRef packageName) {
+ // Does the checker's full name have the package as a prefix?
+ if (!checker.FullName.startswith(packageName))
+ return false;
+
+ // Is the package actually just the name of a specific checker?
+ if (checker.FullName.size() == packageName.size())
+ return true;
+
+ // Is the checker in the package (or a subpackage)?
+ if (checker.FullName[packageName.size()] == PackageSeparator)
+ return true;
+
+ return false;
+}
+
+static void collectCheckers(const CheckerRegistry::CheckerInfoList &checkers,
+ const llvm::StringMap<size_t> &packageSizes,
+ CheckerOptInfo &opt, CheckerInfoSet &collected) {
+ // Use a binary search to find the possible start of the package.
+ CheckerRegistry::CheckerInfo packageInfo(nullptr, opt.getName(), "");
+ CheckerRegistry::CheckerInfoList::const_iterator e = checkers.end();
+ CheckerRegistry::CheckerInfoList::const_iterator i =
+ std::lower_bound(checkers.begin(), e, packageInfo, checkerNameLT);
+
+ // If we didn't even find a possible package, give up.
+ if (i == e)
+ return;
+
+ // If what we found doesn't actually start the package, give up.
+ if (!isInPackage(*i, opt.getName()))
+ return;
+
+ // There is at least one checker in the package; claim the option.
+ opt.claim();
+
+ // See how large the package is.
+ // If the package doesn't exist, assume the option refers to a single checker.
+ size_t size = 1;
+ llvm::StringMap<size_t>::const_iterator packageSize =
+ packageSizes.find(opt.getName());
+ if (packageSize != packageSizes.end())
+ size = packageSize->getValue();
+
+ // Step through all the checkers in the package.
+ for (e = i+size; i != e; ++i) {
+ if (opt.isEnabled())
+ collected.insert(&*i);
+ else
+ collected.remove(&*i);
+ }
+}
+
+void CheckerRegistry::addChecker(InitializationFunction fn, StringRef name,
+ StringRef desc) {
+ Checkers.push_back(CheckerInfo(fn, name, desc));
+
+ // Record the presence of the checker in its packages.
+ StringRef packageName, leafName;
+ std::tie(packageName, leafName) = name.rsplit(PackageSeparator);
+ while (!leafName.empty()) {
+ Packages[packageName] += 1;
+ std::tie(packageName, leafName) = packageName.rsplit(PackageSeparator);
+ }
+}
+
+void CheckerRegistry::initializeManager(CheckerManager &checkerMgr,
+ SmallVectorImpl<CheckerOptInfo> &opts) const {
+ // Sort checkers for efficient collection.
+ std::sort(Checkers.begin(), Checkers.end(), checkerNameLT);
+
+ // Collect checkers enabled by the options.
+ CheckerInfoSet enabledCheckers;
+ for (SmallVectorImpl<CheckerOptInfo>::iterator
+ i = opts.begin(), e = opts.end(); i != e; ++i) {
+ collectCheckers(Checkers, Packages, *i, enabledCheckers);
+ }
+
+ // Initialize the CheckerManager with all enabled checkers.
+ for (CheckerInfoSet::iterator
+ i = enabledCheckers.begin(), e = enabledCheckers.end(); i != e; ++i) {
+ checkerMgr.setCurrentCheckName(CheckName((*i)->FullName));
+ (*i)->Initialize(checkerMgr);
+ }
+}
+
+void CheckerRegistry::validateCheckerOptions(const AnalyzerOptions &opts,
+ DiagnosticsEngine &diags) const {
+ for (auto &config : opts.Config) {
+ size_t pos = config.getKey().find(':');
+ if (pos == StringRef::npos)
+ continue;
+
+ bool hasChecker = false;
+ StringRef checkerName = config.getKey().substr(0, pos);
+ for (auto &checker : Checkers) {
+ if (checker.FullName.startswith(checkerName) &&
+ (checker.FullName.size() == pos || checker.FullName[pos] == '.')) {
+ hasChecker = true;
+ break;
+ }
+ }
+ if (!hasChecker) {
+ diags.Report(diag::err_unknown_analyzer_checker) << checkerName;
+ }
+ }
+}
+
+void CheckerRegistry::printHelp(raw_ostream &out,
+ size_t maxNameChars) const {
+ // FIXME: Alphabetical sort puts 'experimental' in the middle.
+ // Would it be better to name it '~experimental' or something else
+ // that's ASCIIbetically last?
+ std::sort(Checkers.begin(), Checkers.end(), checkerNameLT);
+
+ // FIXME: Print available packages.
+
+ out << "CHECKERS:\n";
+
+ // Find the maximum option length.
+ size_t optionFieldWidth = 0;
+ for (CheckerInfoList::const_iterator i = Checkers.begin(), e = Checkers.end();
+ i != e; ++i) {
+ // Limit the amount of padding we are willing to give up for alignment.
+ // Package.Name Description [Hidden]
+ size_t nameLength = i->FullName.size();
+ if (nameLength <= maxNameChars)
+ optionFieldWidth = std::max(optionFieldWidth, nameLength);
+ }
+
+ const size_t initialPad = 2;
+ for (CheckerInfoList::const_iterator i = Checkers.begin(), e = Checkers.end();
+ i != e; ++i) {
+ out.indent(initialPad) << i->FullName;
+
+ int pad = optionFieldWidth - i->FullName.size();
+
+ // Break on long option names.
+ if (pad < 0) {
+ out << '\n';
+ pad = optionFieldWidth + initialPad;
+ }
+ out.indent(pad + 2) << i->Desc;
+
+ out << '\n';
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CommonBugCategories.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CommonBugCategories.cpp
new file mode 100644
index 0000000..3cb9323
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CommonBugCategories.cpp
@@ -0,0 +1,20 @@
+//=--- CommonBugCategories.cpp - Provides common issue categories -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h"
+
+// Common strings used for the "category" of many static analyzer issues.
+namespace clang { namespace ento { namespace categories {
+
+const char * const CoreFoundationObjectiveC = "Core Foundation/Objective-C";
+const char * const LogicError = "Logic error";
+const char * const MemoryCoreFoundationObjectiveC =
+ "Memory (Core Foundation/Objective-C)";
+const char * const UnixAPI = "Unix API";
+}}}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ConstraintManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ConstraintManager.cpp
new file mode 100644
index 0000000..b7db833
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ConstraintManager.cpp
@@ -0,0 +1,39 @@
+//== ConstraintManager.cpp - Constraints on symbolic values -----*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defined the interface to manage constraints on symbolic values.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+
+using namespace clang;
+using namespace ento;
+
+ConstraintManager::~ConstraintManager() {}
+
+static DefinedSVal getLocFromSymbol(const ProgramStateRef &State,
+ SymbolRef Sym) {
+ const MemRegion *R = State->getStateManager().getRegionManager()
+ .getSymbolicRegion(Sym);
+ return loc::MemRegionVal(R);
+}
+
+ConditionTruthVal ConstraintManager::checkNull(ProgramStateRef State,
+ SymbolRef Sym) {
+ QualType Ty = Sym->getType();
+ DefinedSVal V = Loc::isLocType(Ty) ? getLocFromSymbol(State, Sym)
+ : nonloc::SymbolVal(Sym);
+ const ProgramStatePair &P = assumeDual(State, V);
+ if (P.first && !P.second)
+ return ConditionTruthVal(false);
+ if (!P.first && P.second)
+ return ConditionTruthVal(true);
+ return ConditionTruthVal();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
new file mode 100644
index 0000000..39cf7e7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
@@ -0,0 +1,730 @@
+//==- CoreEngine.cpp - Path-Sensitive Dataflow Engine ------------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a generic engine for intraprocedural, path-sensitive,
+// dataflow analysis via graph reachability engine.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/Casting.h"
+
+using namespace clang;
+using namespace ento;
+
+#define DEBUG_TYPE "CoreEngine"
+
+STATISTIC(NumSteps,
+ "The # of steps executed.");
+STATISTIC(NumReachedMaxSteps,
+ "The # of times we reached the max number of steps.");
+STATISTIC(NumPathsExplored,
+ "The # of paths explored by the analyzer.");
+
+//===----------------------------------------------------------------------===//
+// Worklist classes for exploration of reachable states.
+//===----------------------------------------------------------------------===//
+
+WorkList::Visitor::~Visitor() {}
+
+namespace {
+class DFS : public WorkList {
+ SmallVector<WorkListUnit,20> Stack;
+public:
+ bool hasWork() const override {
+ return !Stack.empty();
+ }
+
+ void enqueue(const WorkListUnit& U) override {
+ Stack.push_back(U);
+ }
+
+ WorkListUnit dequeue() override {
+ assert (!Stack.empty());
+ const WorkListUnit& U = Stack.back();
+ Stack.pop_back(); // This technically "invalidates" U, but we are fine.
+ return U;
+ }
+
+ bool visitItemsInWorkList(Visitor &V) override {
+ for (SmallVectorImpl<WorkListUnit>::iterator
+ I = Stack.begin(), E = Stack.end(); I != E; ++I) {
+ if (V.visit(*I))
+ return true;
+ }
+ return false;
+ }
+};
+
+class BFS : public WorkList {
+ std::deque<WorkListUnit> Queue;
+public:
+ bool hasWork() const override {
+ return !Queue.empty();
+ }
+
+ void enqueue(const WorkListUnit& U) override {
+ Queue.push_back(U);
+ }
+
+ WorkListUnit dequeue() override {
+ WorkListUnit U = Queue.front();
+ Queue.pop_front();
+ return U;
+ }
+
+ bool visitItemsInWorkList(Visitor &V) override {
+ for (std::deque<WorkListUnit>::iterator
+ I = Queue.begin(), E = Queue.end(); I != E; ++I) {
+ if (V.visit(*I))
+ return true;
+ }
+ return false;
+ }
+};
+
+} // end anonymous namespace
+
+// Place the dstor for WorkList here because it contains virtual member
+// functions, and we the code for the dstor generated in one compilation unit.
+WorkList::~WorkList() {}
+
+WorkList *WorkList::makeDFS() { return new DFS(); }
+WorkList *WorkList::makeBFS() { return new BFS(); }
+
+namespace {
+ class BFSBlockDFSContents : public WorkList {
+ std::deque<WorkListUnit> Queue;
+ SmallVector<WorkListUnit,20> Stack;
+ public:
+ bool hasWork() const override {
+ return !Queue.empty() || !Stack.empty();
+ }
+
+ void enqueue(const WorkListUnit& U) override {
+ if (U.getNode()->getLocation().getAs<BlockEntrance>())
+ Queue.push_front(U);
+ else
+ Stack.push_back(U);
+ }
+
+ WorkListUnit dequeue() override {
+ // Process all basic blocks to completion.
+ if (!Stack.empty()) {
+ const WorkListUnit& U = Stack.back();
+ Stack.pop_back(); // This technically "invalidates" U, but we are fine.
+ return U;
+ }
+
+ assert(!Queue.empty());
+ // Don't use const reference. The subsequent pop_back() might make it
+ // unsafe.
+ WorkListUnit U = Queue.front();
+ Queue.pop_front();
+ return U;
+ }
+ bool visitItemsInWorkList(Visitor &V) override {
+ for (SmallVectorImpl<WorkListUnit>::iterator
+ I = Stack.begin(), E = Stack.end(); I != E; ++I) {
+ if (V.visit(*I))
+ return true;
+ }
+ for (std::deque<WorkListUnit>::iterator
+ I = Queue.begin(), E = Queue.end(); I != E; ++I) {
+ if (V.visit(*I))
+ return true;
+ }
+ return false;
+ }
+
+ };
+} // end anonymous namespace
+
+WorkList* WorkList::makeBFSBlockDFSContents() {
+ return new BFSBlockDFSContents();
+}
+
+//===----------------------------------------------------------------------===//
+// Core analysis engine.
+//===----------------------------------------------------------------------===//
+
+/// ExecuteWorkList - Run the worklist algorithm for a maximum number of steps.
+bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps,
+ ProgramStateRef InitState) {
+
+ if (G.num_roots() == 0) { // Initialize the analysis by constructing
+ // the root if none exists.
+
+ const CFGBlock *Entry = &(L->getCFG()->getEntry());
+
+ assert (Entry->empty() &&
+ "Entry block must be empty.");
+
+ assert (Entry->succ_size() == 1 &&
+ "Entry block must have 1 successor.");
+
+ // Mark the entry block as visited.
+ FunctionSummaries->markVisitedBasicBlock(Entry->getBlockID(),
+ L->getDecl(),
+ L->getCFG()->getNumBlockIDs());
+
+ // Get the solitary successor.
+ const CFGBlock *Succ = *(Entry->succ_begin());
+
+ // Construct an edge representing the
+ // starting location in the function.
+ BlockEdge StartLoc(Entry, Succ, L);
+
+ // Set the current block counter to being empty.
+ WList->setBlockCounter(BCounterFactory.GetEmptyCounter());
+
+ if (!InitState)
+ // Generate the root.
+ generateNode(StartLoc, SubEng.getInitialState(L), nullptr);
+ else
+ generateNode(StartLoc, InitState, nullptr);
+ }
+
+ // Check if we have a steps limit
+ bool UnlimitedSteps = Steps == 0;
+
+ while (WList->hasWork()) {
+ if (!UnlimitedSteps) {
+ if (Steps == 0) {
+ NumReachedMaxSteps++;
+ break;
+ }
+ --Steps;
+ }
+
+ NumSteps++;
+
+ const WorkListUnit& WU = WList->dequeue();
+
+ // Set the current block counter.
+ WList->setBlockCounter(WU.getBlockCounter());
+
+ // Retrieve the node.
+ ExplodedNode *Node = WU.getNode();
+
+ dispatchWorkItem(Node, Node->getLocation(), WU);
+ }
+ SubEng.processEndWorklist(hasWorkRemaining());
+ return WList->hasWork();
+}
+
+void CoreEngine::dispatchWorkItem(ExplodedNode* Pred, ProgramPoint Loc,
+ const WorkListUnit& WU) {
+ // Dispatch on the location type.
+ switch (Loc.getKind()) {
+ case ProgramPoint::BlockEdgeKind:
+ HandleBlockEdge(Loc.castAs<BlockEdge>(), Pred);
+ break;
+
+ case ProgramPoint::BlockEntranceKind:
+ HandleBlockEntrance(Loc.castAs<BlockEntrance>(), Pred);
+ break;
+
+ case ProgramPoint::BlockExitKind:
+ assert (false && "BlockExit location never occur in forward analysis.");
+ break;
+
+ case ProgramPoint::CallEnterKind: {
+ CallEnter CEnter = Loc.castAs<CallEnter>();
+ SubEng.processCallEnter(CEnter, Pred);
+ break;
+ }
+
+ case ProgramPoint::CallExitBeginKind:
+ SubEng.processCallExit(Pred);
+ break;
+
+ case ProgramPoint::EpsilonKind: {
+ assert(Pred->hasSinglePred() &&
+ "Assume epsilon has exactly one predecessor by construction");
+ ExplodedNode *PNode = Pred->getFirstPred();
+ dispatchWorkItem(Pred, PNode->getLocation(), WU);
+ break;
+ }
+ default:
+ assert(Loc.getAs<PostStmt>() ||
+ Loc.getAs<PostInitializer>() ||
+ Loc.getAs<PostImplicitCall>() ||
+ Loc.getAs<CallExitEnd>());
+ HandlePostStmt(WU.getBlock(), WU.getIndex(), Pred);
+ break;
+ }
+}
+
+bool CoreEngine::ExecuteWorkListWithInitialState(const LocationContext *L,
+ unsigned Steps,
+ ProgramStateRef InitState,
+ ExplodedNodeSet &Dst) {
+ bool DidNotFinish = ExecuteWorkList(L, Steps, InitState);
+ for (ExplodedGraph::eop_iterator I = G.eop_begin(), E = G.eop_end(); I != E;
+ ++I) {
+ Dst.Add(*I);
+ }
+ return DidNotFinish;
+}
+
+void CoreEngine::HandleBlockEdge(const BlockEdge &L, ExplodedNode *Pred) {
+
+ const CFGBlock *Blk = L.getDst();
+ NodeBuilderContext BuilderCtx(*this, Blk, Pred);
+
+ // Mark this block as visited.
+ const LocationContext *LC = Pred->getLocationContext();
+ FunctionSummaries->markVisitedBasicBlock(Blk->getBlockID(),
+ LC->getDecl(),
+ LC->getCFG()->getNumBlockIDs());
+
+ // Check if we are entering the EXIT block.
+ if (Blk == &(L.getLocationContext()->getCFG()->getExit())) {
+
+ assert (L.getLocationContext()->getCFG()->getExit().size() == 0
+ && "EXIT block cannot contain Stmts.");
+
+ // Process the final state transition.
+ SubEng.processEndOfFunction(BuilderCtx, Pred);
+
+ // This path is done. Don't enqueue any more nodes.
+ return;
+ }
+
+ // Call into the SubEngine to process entering the CFGBlock.
+ ExplodedNodeSet dstNodes;
+ BlockEntrance BE(Blk, Pred->getLocationContext());
+ NodeBuilderWithSinks nodeBuilder(Pred, dstNodes, BuilderCtx, BE);
+ SubEng.processCFGBlockEntrance(L, nodeBuilder, Pred);
+
+ // Auto-generate a node.
+ if (!nodeBuilder.hasGeneratedNodes()) {
+ nodeBuilder.generateNode(Pred->State, Pred);
+ }
+
+ // Enqueue nodes onto the worklist.
+ enqueue(dstNodes);
+}
+
+void CoreEngine::HandleBlockEntrance(const BlockEntrance &L,
+ ExplodedNode *Pred) {
+
+ // Increment the block counter.
+ const LocationContext *LC = Pred->getLocationContext();
+ unsigned BlockId = L.getBlock()->getBlockID();
+ BlockCounter Counter = WList->getBlockCounter();
+ Counter = BCounterFactory.IncrementCount(Counter, LC->getCurrentStackFrame(),
+ BlockId);
+ WList->setBlockCounter(Counter);
+
+ // Process the entrance of the block.
+ if (Optional<CFGElement> E = L.getFirstElement()) {
+ NodeBuilderContext Ctx(*this, L.getBlock(), Pred);
+ SubEng.processCFGElement(*E, Pred, 0, &Ctx);
+ }
+ else
+ HandleBlockExit(L.getBlock(), Pred);
+}
+
+void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode *Pred) {
+
+ if (const Stmt *Term = B->getTerminator()) {
+ switch (Term->getStmtClass()) {
+ default:
+ llvm_unreachable("Analysis for this terminator not implemented.");
+
+ case Stmt::CXXBindTemporaryExprClass:
+ HandleCleanupTemporaryBranch(
+ cast<CXXBindTemporaryExpr>(B->getTerminator().getStmt()), B, Pred);
+ return;
+
+ // Model static initializers.
+ case Stmt::DeclStmtClass:
+ HandleStaticInit(cast<DeclStmt>(Term), B, Pred);
+ return;
+
+ case Stmt::BinaryOperatorClass: // '&&' and '||'
+ HandleBranch(cast<BinaryOperator>(Term)->getLHS(), Term, B, Pred);
+ return;
+
+ case Stmt::BinaryConditionalOperatorClass:
+ case Stmt::ConditionalOperatorClass:
+ HandleBranch(cast<AbstractConditionalOperator>(Term)->getCond(),
+ Term, B, Pred);
+ return;
+
+ // FIXME: Use constant-folding in CFG construction to simplify this
+ // case.
+
+ case Stmt::ChooseExprClass:
+ HandleBranch(cast<ChooseExpr>(Term)->getCond(), Term, B, Pred);
+ return;
+
+ case Stmt::CXXTryStmtClass: {
+ // Generate a node for each of the successors.
+ // Our logic for EH analysis can certainly be improved.
+ for (CFGBlock::const_succ_iterator it = B->succ_begin(),
+ et = B->succ_end(); it != et; ++it) {
+ if (const CFGBlock *succ = *it) {
+ generateNode(BlockEdge(B, succ, Pred->getLocationContext()),
+ Pred->State, Pred);
+ }
+ }
+ return;
+ }
+
+ case Stmt::DoStmtClass:
+ HandleBranch(cast<DoStmt>(Term)->getCond(), Term, B, Pred);
+ return;
+
+ case Stmt::CXXForRangeStmtClass:
+ HandleBranch(cast<CXXForRangeStmt>(Term)->getCond(), Term, B, Pred);
+ return;
+
+ case Stmt::ForStmtClass:
+ HandleBranch(cast<ForStmt>(Term)->getCond(), Term, B, Pred);
+ return;
+
+ case Stmt::ContinueStmtClass:
+ case Stmt::BreakStmtClass:
+ case Stmt::GotoStmtClass:
+ break;
+
+ case Stmt::IfStmtClass:
+ HandleBranch(cast<IfStmt>(Term)->getCond(), Term, B, Pred);
+ return;
+
+ case Stmt::IndirectGotoStmtClass: {
+ // Only 1 successor: the indirect goto dispatch block.
+ assert (B->succ_size() == 1);
+
+ IndirectGotoNodeBuilder
+ builder(Pred, B, cast<IndirectGotoStmt>(Term)->getTarget(),
+ *(B->succ_begin()), this);
+
+ SubEng.processIndirectGoto(builder);
+ return;
+ }
+
+ case Stmt::ObjCForCollectionStmtClass: {
+ // In the case of ObjCForCollectionStmt, it appears twice in a CFG:
+ //
+ // (1) inside a basic block, which represents the binding of the
+ // 'element' variable to a value.
+ // (2) in a terminator, which represents the branch.
+ //
+ // For (1), subengines will bind a value (i.e., 0 or 1) indicating
+ // whether or not collection contains any more elements. We cannot
+ // just test to see if the element is nil because a container can
+ // contain nil elements.
+ HandleBranch(Term, Term, B, Pred);
+ return;
+ }
+
+ case Stmt::SwitchStmtClass: {
+ SwitchNodeBuilder builder(Pred, B, cast<SwitchStmt>(Term)->getCond(),
+ this);
+
+ SubEng.processSwitch(builder);
+ return;
+ }
+
+ case Stmt::WhileStmtClass:
+ HandleBranch(cast<WhileStmt>(Term)->getCond(), Term, B, Pred);
+ return;
+ }
+ }
+
+ assert (B->succ_size() == 1 &&
+ "Blocks with no terminator should have at most 1 successor.");
+
+ generateNode(BlockEdge(B, *(B->succ_begin()), Pred->getLocationContext()),
+ Pred->State, Pred);
+}
+
+void CoreEngine::HandleBranch(const Stmt *Cond, const Stmt *Term,
+ const CFGBlock * B, ExplodedNode *Pred) {
+ assert(B->succ_size() == 2);
+ NodeBuilderContext Ctx(*this, B, Pred);
+ ExplodedNodeSet Dst;
+ SubEng.processBranch(Cond, Term, Ctx, Pred, Dst,
+ *(B->succ_begin()), *(B->succ_begin()+1));
+ // Enqueue the new frontier onto the worklist.
+ enqueue(Dst);
+}
+
+void CoreEngine::HandleCleanupTemporaryBranch(const CXXBindTemporaryExpr *BTE,
+ const CFGBlock *B,
+ ExplodedNode *Pred) {
+ assert(B->succ_size() == 2);
+ NodeBuilderContext Ctx(*this, B, Pred);
+ ExplodedNodeSet Dst;
+ SubEng.processCleanupTemporaryBranch(BTE, Ctx, Pred, Dst, *(B->succ_begin()),
+ *(B->succ_begin() + 1));
+ // Enqueue the new frontier onto the worklist.
+ enqueue(Dst);
+}
+
+void CoreEngine::HandleStaticInit(const DeclStmt *DS, const CFGBlock *B,
+ ExplodedNode *Pred) {
+ assert(B->succ_size() == 2);
+ NodeBuilderContext Ctx(*this, B, Pred);
+ ExplodedNodeSet Dst;
+ SubEng.processStaticInitializer(DS, Ctx, Pred, Dst,
+ *(B->succ_begin()), *(B->succ_begin()+1));
+ // Enqueue the new frontier onto the worklist.
+ enqueue(Dst);
+}
+
+
+void CoreEngine::HandlePostStmt(const CFGBlock *B, unsigned StmtIdx,
+ ExplodedNode *Pred) {
+ assert(B);
+ assert(!B->empty());
+
+ if (StmtIdx == B->size())
+ HandleBlockExit(B, Pred);
+ else {
+ NodeBuilderContext Ctx(*this, B, Pred);
+ SubEng.processCFGElement((*B)[StmtIdx], Pred, StmtIdx, &Ctx);
+ }
+}
+
+/// generateNode - Utility method to generate nodes, hook up successors,
+/// and add nodes to the worklist.
+void CoreEngine::generateNode(const ProgramPoint &Loc,
+ ProgramStateRef State,
+ ExplodedNode *Pred) {
+
+ bool IsNew;
+ ExplodedNode *Node = G.getNode(Loc, State, false, &IsNew);
+
+ if (Pred)
+ Node->addPredecessor(Pred, G); // Link 'Node' with its predecessor.
+ else {
+ assert (IsNew);
+ G.addRoot(Node); // 'Node' has no predecessor. Make it a root.
+ }
+
+ // Only add 'Node' to the worklist if it was freshly generated.
+ if (IsNew) WList->enqueue(Node);
+}
+
+void CoreEngine::enqueueStmtNode(ExplodedNode *N,
+ const CFGBlock *Block, unsigned Idx) {
+ assert(Block);
+ assert (!N->isSink());
+
+ // Check if this node entered a callee.
+ if (N->getLocation().getAs<CallEnter>()) {
+ // Still use the index of the CallExpr. It's needed to create the callee
+ // StackFrameContext.
+ WList->enqueue(N, Block, Idx);
+ return;
+ }
+
+ // Do not create extra nodes. Move to the next CFG element.
+ if (N->getLocation().getAs<PostInitializer>() ||
+ N->getLocation().getAs<PostImplicitCall>()) {
+ WList->enqueue(N, Block, Idx+1);
+ return;
+ }
+
+ if (N->getLocation().getAs<EpsilonPoint>()) {
+ WList->enqueue(N, Block, Idx);
+ return;
+ }
+
+ if ((*Block)[Idx].getKind() == CFGElement::NewAllocator) {
+ WList->enqueue(N, Block, Idx+1);
+ return;
+ }
+
+ // At this point, we know we're processing a normal statement.
+ CFGStmt CS = (*Block)[Idx].castAs<CFGStmt>();
+ PostStmt Loc(CS.getStmt(), N->getLocationContext());
+
+ if (Loc == N->getLocation().withTag(nullptr)) {
+ // Note: 'N' should be a fresh node because otherwise it shouldn't be
+ // a member of Deferred.
+ WList->enqueue(N, Block, Idx+1);
+ return;
+ }
+
+ bool IsNew;
+ ExplodedNode *Succ = G.getNode(Loc, N->getState(), false, &IsNew);
+ Succ->addPredecessor(N, G);
+
+ if (IsNew)
+ WList->enqueue(Succ, Block, Idx+1);
+}
+
+ExplodedNode *CoreEngine::generateCallExitBeginNode(ExplodedNode *N) {
+ // Create a CallExitBegin node and enqueue it.
+ const StackFrameContext *LocCtx
+ = cast<StackFrameContext>(N->getLocationContext());
+
+ // Use the callee location context.
+ CallExitBegin Loc(LocCtx);
+
+ bool isNew;
+ ExplodedNode *Node = G.getNode(Loc, N->getState(), false, &isNew);
+ Node->addPredecessor(N, G);
+ return isNew ? Node : nullptr;
+}
+
+
+void CoreEngine::enqueue(ExplodedNodeSet &Set) {
+ for (ExplodedNodeSet::iterator I = Set.begin(),
+ E = Set.end(); I != E; ++I) {
+ WList->enqueue(*I);
+ }
+}
+
+void CoreEngine::enqueue(ExplodedNodeSet &Set,
+ const CFGBlock *Block, unsigned Idx) {
+ for (ExplodedNodeSet::iterator I = Set.begin(),
+ E = Set.end(); I != E; ++I) {
+ enqueueStmtNode(*I, Block, Idx);
+ }
+}
+
+void CoreEngine::enqueueEndOfFunction(ExplodedNodeSet &Set) {
+ for (ExplodedNodeSet::iterator I = Set.begin(), E = Set.end(); I != E; ++I) {
+ ExplodedNode *N = *I;
+ // If we are in an inlined call, generate CallExitBegin node.
+ if (N->getLocationContext()->getParent()) {
+ N = generateCallExitBeginNode(N);
+ if (N)
+ WList->enqueue(N);
+ } else {
+ // TODO: We should run remove dead bindings here.
+ G.addEndOfPath(N);
+ NumPathsExplored++;
+ }
+ }
+}
+
+
+void NodeBuilder::anchor() { }
+
+ExplodedNode* NodeBuilder::generateNodeImpl(const ProgramPoint &Loc,
+ ProgramStateRef State,
+ ExplodedNode *FromN,
+ bool MarkAsSink) {
+ HasGeneratedNodes = true;
+ bool IsNew;
+ ExplodedNode *N = C.Eng.G.getNode(Loc, State, MarkAsSink, &IsNew);
+ N->addPredecessor(FromN, C.Eng.G);
+ Frontier.erase(FromN);
+
+ if (!IsNew)
+ return nullptr;
+
+ if (!MarkAsSink)
+ Frontier.Add(N);
+
+ return N;
+}
+
+void NodeBuilderWithSinks::anchor() { }
+
+StmtNodeBuilder::~StmtNodeBuilder() {
+ if (EnclosingBldr)
+ for (ExplodedNodeSet::iterator I = Frontier.begin(),
+ E = Frontier.end(); I != E; ++I )
+ EnclosingBldr->addNodes(*I);
+}
+
+void BranchNodeBuilder::anchor() { }
+
+ExplodedNode *BranchNodeBuilder::generateNode(ProgramStateRef State,
+ bool branch,
+ ExplodedNode *NodePred) {
+ // If the branch has been marked infeasible we should not generate a node.
+ if (!isFeasible(branch))
+ return nullptr;
+
+ ProgramPoint Loc = BlockEdge(C.Block, branch ? DstT:DstF,
+ NodePred->getLocationContext());
+ ExplodedNode *Succ = generateNodeImpl(Loc, State, NodePred);
+ return Succ;
+}
+
+ExplodedNode*
+IndirectGotoNodeBuilder::generateNode(const iterator &I,
+ ProgramStateRef St,
+ bool IsSink) {
+ bool IsNew;
+ ExplodedNode *Succ =
+ Eng.G.getNode(BlockEdge(Src, I.getBlock(), Pred->getLocationContext()),
+ St, IsSink, &IsNew);
+ Succ->addPredecessor(Pred, Eng.G);
+
+ if (!IsNew)
+ return nullptr;
+
+ if (!IsSink)
+ Eng.WList->enqueue(Succ);
+
+ return Succ;
+}
+
+
+ExplodedNode*
+SwitchNodeBuilder::generateCaseStmtNode(const iterator &I,
+ ProgramStateRef St) {
+
+ bool IsNew;
+ ExplodedNode *Succ =
+ Eng.G.getNode(BlockEdge(Src, I.getBlock(), Pred->getLocationContext()),
+ St, false, &IsNew);
+ Succ->addPredecessor(Pred, Eng.G);
+ if (!IsNew)
+ return nullptr;
+
+ Eng.WList->enqueue(Succ);
+ return Succ;
+}
+
+
+ExplodedNode*
+SwitchNodeBuilder::generateDefaultCaseNode(ProgramStateRef St,
+ bool IsSink) {
+ // Get the block for the default case.
+ assert(Src->succ_rbegin() != Src->succ_rend());
+ CFGBlock *DefaultBlock = *Src->succ_rbegin();
+
+ // Sanity check for default blocks that are unreachable and not caught
+ // by earlier stages.
+ if (!DefaultBlock)
+ return nullptr;
+
+ bool IsNew;
+ ExplodedNode *Succ =
+ Eng.G.getNode(BlockEdge(Src, DefaultBlock, Pred->getLocationContext()),
+ St, IsSink, &IsNew);
+ Succ->addPredecessor(Pred, Eng.G);
+
+ if (!IsNew)
+ return nullptr;
+
+ if (!IsSink)
+ Eng.WList->enqueue(Succ);
+
+ return Succ;
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/DynamicTypeMap.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/DynamicTypeMap.cpp
new file mode 100644
index 0000000..fd35b66
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/DynamicTypeMap.cpp
@@ -0,0 +1,51 @@
+//==- DynamicTypeMap.cpp - Dynamic Type Info related APIs ----------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines APIs that track and query dynamic type information. This
+// information can be used to devirtualize calls during the symbolic exection
+// or do type checking.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeMap.h"
+
+namespace clang {
+namespace ento {
+
+DynamicTypeInfo getDynamicTypeInfo(ProgramStateRef State,
+ const MemRegion *Reg) {
+ Reg = Reg->StripCasts();
+
+ // Look up the dynamic type in the GDM.
+ const DynamicTypeInfo *GDMType = State->get<DynamicTypeMap>(Reg);
+ if (GDMType)
+ return *GDMType;
+
+ // Otherwise, fall back to what we know about the region.
+ if (const TypedRegion *TR = dyn_cast<TypedRegion>(Reg))
+ return DynamicTypeInfo(TR->getLocationType(), /*CanBeSubclass=*/false);
+
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(Reg)) {
+ SymbolRef Sym = SR->getSymbol();
+ return DynamicTypeInfo(Sym->getType());
+ }
+
+ return DynamicTypeInfo();
+}
+
+ProgramStateRef setDynamicTypeInfo(ProgramStateRef State, const MemRegion *Reg,
+ DynamicTypeInfo NewTy) {
+ Reg = Reg->StripCasts();
+ ProgramStateRef NewState = State->set<DynamicTypeMap>(Reg, NewTy);
+ assert(NewState);
+ return NewState;
+}
+
+} // namespace ento
+} // namespace clang
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Environment.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Environment.cpp
new file mode 100644
index 0000000..e2cb52cb
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Environment.cpp
@@ -0,0 +1,213 @@
+//== Environment.cpp - Map from Stmt* to Locations/Values -------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defined the Environment and EnvironmentManager classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+static const Expr *ignoreTransparentExprs(const Expr *E) {
+ E = E->IgnoreParens();
+
+ switch (E->getStmtClass()) {
+ case Stmt::OpaqueValueExprClass:
+ E = cast<OpaqueValueExpr>(E)->getSourceExpr();
+ break;
+ case Stmt::ExprWithCleanupsClass:
+ E = cast<ExprWithCleanups>(E)->getSubExpr();
+ break;
+ case Stmt::CXXBindTemporaryExprClass:
+ E = cast<CXXBindTemporaryExpr>(E)->getSubExpr();
+ break;
+ case Stmt::SubstNonTypeTemplateParmExprClass:
+ E = cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement();
+ break;
+ default:
+ // This is the base case: we can't look through more than we already have.
+ return E;
+ }
+
+ return ignoreTransparentExprs(E);
+}
+
+static const Stmt *ignoreTransparentExprs(const Stmt *S) {
+ if (const Expr *E = dyn_cast<Expr>(S))
+ return ignoreTransparentExprs(E);
+ return S;
+}
+
+EnvironmentEntry::EnvironmentEntry(const Stmt *S, const LocationContext *L)
+ : std::pair<const Stmt *,
+ const StackFrameContext *>(ignoreTransparentExprs(S),
+ L ? L->getCurrentStackFrame()
+ : nullptr) {}
+
+SVal Environment::lookupExpr(const EnvironmentEntry &E) const {
+ const SVal* X = ExprBindings.lookup(E);
+ if (X) {
+ SVal V = *X;
+ return V;
+ }
+ return UnknownVal();
+}
+
+SVal Environment::getSVal(const EnvironmentEntry &Entry,
+ SValBuilder& svalBuilder) const {
+ const Stmt *S = Entry.getStmt();
+ const LocationContext *LCtx = Entry.getLocationContext();
+
+ switch (S->getStmtClass()) {
+ case Stmt::CXXBindTemporaryExprClass:
+ case Stmt::ExprWithCleanupsClass:
+ case Stmt::GenericSelectionExprClass:
+ case Stmt::OpaqueValueExprClass:
+ case Stmt::ParenExprClass:
+ case Stmt::SubstNonTypeTemplateParmExprClass:
+ llvm_unreachable("Should have been handled by ignoreTransparentExprs");
+
+ case Stmt::AddrLabelExprClass:
+ case Stmt::CharacterLiteralClass:
+ case Stmt::CXXBoolLiteralExprClass:
+ case Stmt::CXXScalarValueInitExprClass:
+ case Stmt::ImplicitValueInitExprClass:
+ case Stmt::IntegerLiteralClass:
+ case Stmt::ObjCBoolLiteralExprClass:
+ case Stmt::CXXNullPtrLiteralExprClass:
+ case Stmt::ObjCStringLiteralClass:
+ case Stmt::StringLiteralClass:
+ case Stmt::TypeTraitExprClass:
+ // Known constants; defer to SValBuilder.
+ return svalBuilder.getConstantVal(cast<Expr>(S)).getValue();
+
+ case Stmt::ReturnStmtClass: {
+ const ReturnStmt *RS = cast<ReturnStmt>(S);
+ if (const Expr *RE = RS->getRetValue())
+ return getSVal(EnvironmentEntry(RE, LCtx), svalBuilder);
+ return UndefinedVal();
+ }
+
+ // Handle all other Stmt* using a lookup.
+ default:
+ return lookupExpr(EnvironmentEntry(S, LCtx));
+ }
+}
+
+Environment EnvironmentManager::bindExpr(Environment Env,
+ const EnvironmentEntry &E,
+ SVal V,
+ bool Invalidate) {
+ if (V.isUnknown()) {
+ if (Invalidate)
+ return Environment(F.remove(Env.ExprBindings, E));
+ else
+ return Env;
+ }
+ return Environment(F.add(Env.ExprBindings, E, V));
+}
+
+namespace {
+class MarkLiveCallback final : public SymbolVisitor {
+ SymbolReaper &SymReaper;
+public:
+ MarkLiveCallback(SymbolReaper &symreaper) : SymReaper(symreaper) {}
+ bool VisitSymbol(SymbolRef sym) override {
+ SymReaper.markLive(sym);
+ return true;
+ }
+ bool VisitMemRegion(const MemRegion *R) override {
+ SymReaper.markLive(R);
+ return true;
+ }
+};
+} // end anonymous namespace
+
+// removeDeadBindings:
+// - Remove subexpression bindings.
+// - Remove dead block expression bindings.
+// - Keep live block expression bindings:
+// - Mark their reachable symbols live in SymbolReaper,
+// see ScanReachableSymbols.
+// - Mark the region in DRoots if the binding is a loc::MemRegionVal.
+Environment
+EnvironmentManager::removeDeadBindings(Environment Env,
+ SymbolReaper &SymReaper,
+ ProgramStateRef ST) {
+
+ // We construct a new Environment object entirely, as this is cheaper than
+ // individually removing all the subexpression bindings (which will greatly
+ // outnumber block-level expression bindings).
+ Environment NewEnv = getInitialEnvironment();
+
+ MarkLiveCallback CB(SymReaper);
+ ScanReachableSymbols RSScaner(ST, CB);
+
+ llvm::ImmutableMapRef<EnvironmentEntry,SVal>
+ EBMapRef(NewEnv.ExprBindings.getRootWithoutRetain(),
+ F.getTreeFactory());
+
+ // Iterate over the block-expr bindings.
+ for (Environment::iterator I = Env.begin(), E = Env.end();
+ I != E; ++I) {
+
+ const EnvironmentEntry &BlkExpr = I.getKey();
+ const SVal &X = I.getData();
+
+ if (SymReaper.isLive(BlkExpr.getStmt(), BlkExpr.getLocationContext())) {
+ // Copy the binding to the new map.
+ EBMapRef = EBMapRef.add(BlkExpr, X);
+
+ // Mark all symbols in the block expr's value live.
+ RSScaner.scan(X);
+ continue;
+ } else {
+ SymExpr::symbol_iterator SI = X.symbol_begin(), SE = X.symbol_end();
+ for (; SI != SE; ++SI)
+ SymReaper.maybeDead(*SI);
+ }
+ }
+
+ NewEnv.ExprBindings = EBMapRef.asImmutableMap();
+ return NewEnv;
+}
+
+void Environment::print(raw_ostream &Out, const char *NL,
+ const char *Sep) const {
+ bool isFirst = true;
+
+ for (Environment::iterator I = begin(), E = end(); I != E; ++I) {
+ const EnvironmentEntry &En = I.getKey();
+
+ if (isFirst) {
+ Out << NL << NL
+ << "Expressions:"
+ << NL;
+ isFirst = false;
+ } else {
+ Out << NL;
+ }
+
+ const Stmt *S = En.getStmt();
+ assert(S != nullptr && "Expected non-null Stmt");
+
+ Out << " (" << (const void*) En.getLocationContext() << ','
+ << (const void*) S << ") ";
+ LangOptions LO; // FIXME.
+ S->printPretty(Out, nullptr, PrintingPolicy(LO));
+ Out << " : " << I.getData();
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
new file mode 100644
index 0000000..8a09720
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
@@ -0,0 +1,443 @@
+//=-- ExplodedGraph.cpp - Local, Path-Sens. "Exploded Graph" -*- C++ -*------=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the template classes ExplodedNode and ExplodedGraph,
+// which represent a path-sensitive, intra-procedural "exploded graph."
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/Stmt.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
+#include <vector>
+
+using namespace clang;
+using namespace ento;
+
+//===----------------------------------------------------------------------===//
+// Node auditing.
+//===----------------------------------------------------------------------===//
+
+// An out of line virtual method to provide a home for the class vtable.
+ExplodedNode::Auditor::~Auditor() {}
+
+#ifndef NDEBUG
+static ExplodedNode::Auditor* NodeAuditor = nullptr;
+#endif
+
+void ExplodedNode::SetAuditor(ExplodedNode::Auditor* A) {
+#ifndef NDEBUG
+ NodeAuditor = A;
+#endif
+}
+
+//===----------------------------------------------------------------------===//
+// Cleanup.
+//===----------------------------------------------------------------------===//
+
+ExplodedGraph::ExplodedGraph()
+ : NumNodes(0), ReclaimNodeInterval(0) {}
+
+ExplodedGraph::~ExplodedGraph() {}
+
+//===----------------------------------------------------------------------===//
+// Node reclamation.
+//===----------------------------------------------------------------------===//
+
+bool ExplodedGraph::isInterestingLValueExpr(const Expr *Ex) {
+ if (!Ex->isLValue())
+ return false;
+ return isa<DeclRefExpr>(Ex) ||
+ isa<MemberExpr>(Ex) ||
+ isa<ObjCIvarRefExpr>(Ex);
+}
+
+bool ExplodedGraph::shouldCollect(const ExplodedNode *node) {
+ // First, we only consider nodes for reclamation of the following
+ // conditions apply:
+ //
+ // (1) 1 predecessor (that has one successor)
+ // (2) 1 successor (that has one predecessor)
+ //
+ // If a node has no successor it is on the "frontier", while a node
+ // with no predecessor is a root.
+ //
+ // After these prerequisites, we discard all "filler" nodes that
+ // are used only for intermediate processing, and are not essential
+ // for analyzer history:
+ //
+ // (a) PreStmtPurgeDeadSymbols
+ //
+ // We then discard all other nodes where *all* of the following conditions
+ // apply:
+ //
+ // (3) The ProgramPoint is for a PostStmt, but not a PostStore.
+ // (4) There is no 'tag' for the ProgramPoint.
+ // (5) The 'store' is the same as the predecessor.
+ // (6) The 'GDM' is the same as the predecessor.
+ // (7) The LocationContext is the same as the predecessor.
+ // (8) Expressions that are *not* lvalue expressions.
+ // (9) The PostStmt isn't for a non-consumed Stmt or Expr.
+ // (10) The successor is neither a CallExpr StmtPoint nor a CallEnter or
+ // PreImplicitCall (so that we would be able to find it when retrying a
+ // call with no inlining).
+ // FIXME: It may be safe to reclaim PreCall and PostCall nodes as well.
+
+ // Conditions 1 and 2.
+ if (node->pred_size() != 1 || node->succ_size() != 1)
+ return false;
+
+ const ExplodedNode *pred = *(node->pred_begin());
+ if (pred->succ_size() != 1)
+ return false;
+
+ const ExplodedNode *succ = *(node->succ_begin());
+ if (succ->pred_size() != 1)
+ return false;
+
+ // Now reclaim any nodes that are (by definition) not essential to
+ // analysis history and are not consulted by any client code.
+ ProgramPoint progPoint = node->getLocation();
+ if (progPoint.getAs<PreStmtPurgeDeadSymbols>())
+ return !progPoint.getTag();
+
+ // Condition 3.
+ if (!progPoint.getAs<PostStmt>() || progPoint.getAs<PostStore>())
+ return false;
+
+ // Condition 4.
+ if (progPoint.getTag())
+ return false;
+
+ // Conditions 5, 6, and 7.
+ ProgramStateRef state = node->getState();
+ ProgramStateRef pred_state = pred->getState();
+ if (state->store != pred_state->store || state->GDM != pred_state->GDM ||
+ progPoint.getLocationContext() != pred->getLocationContext())
+ return false;
+
+ // All further checks require expressions. As per #3, we know that we have
+ // a PostStmt.
+ const Expr *Ex = dyn_cast<Expr>(progPoint.castAs<PostStmt>().getStmt());
+ if (!Ex)
+ return false;
+
+ // Condition 8.
+ // Do not collect nodes for "interesting" lvalue expressions since they are
+ // used extensively for generating path diagnostics.
+ if (isInterestingLValueExpr(Ex))
+ return false;
+
+ // Condition 9.
+ // Do not collect nodes for non-consumed Stmt or Expr to ensure precise
+ // diagnostic generation; specifically, so that we could anchor arrows
+ // pointing to the beginning of statements (as written in code).
+ ParentMap &PM = progPoint.getLocationContext()->getParentMap();
+ if (!PM.isConsumedExpr(Ex))
+ return false;
+
+ // Condition 10.
+ const ProgramPoint SuccLoc = succ->getLocation();
+ if (Optional<StmtPoint> SP = SuccLoc.getAs<StmtPoint>())
+ if (CallEvent::isCallStmt(SP->getStmt()))
+ return false;
+
+ // Condition 10, continuation.
+ if (SuccLoc.getAs<CallEnter>() || SuccLoc.getAs<PreImplicitCall>())
+ return false;
+
+ return true;
+}
+
+void ExplodedGraph::collectNode(ExplodedNode *node) {
+ // Removing a node means:
+ // (a) changing the predecessors successor to the successor of this node
+ // (b) changing the successors predecessor to the predecessor of this node
+ // (c) Putting 'node' onto freeNodes.
+ assert(node->pred_size() == 1 || node->succ_size() == 1);
+ ExplodedNode *pred = *(node->pred_begin());
+ ExplodedNode *succ = *(node->succ_begin());
+ pred->replaceSuccessor(succ);
+ succ->replacePredecessor(pred);
+ FreeNodes.push_back(node);
+ Nodes.RemoveNode(node);
+ --NumNodes;
+ node->~ExplodedNode();
+}
+
+void ExplodedGraph::reclaimRecentlyAllocatedNodes() {
+ if (ChangedNodes.empty())
+ return;
+
+ // Only periodically reclaim nodes so that we can build up a set of
+ // nodes that meet the reclamation criteria. Freshly created nodes
+ // by definition have no successor, and thus cannot be reclaimed (see below).
+ assert(ReclaimCounter > 0);
+ if (--ReclaimCounter != 0)
+ return;
+ ReclaimCounter = ReclaimNodeInterval;
+
+ for (NodeVector::iterator it = ChangedNodes.begin(), et = ChangedNodes.end();
+ it != et; ++it) {
+ ExplodedNode *node = *it;
+ if (shouldCollect(node))
+ collectNode(node);
+ }
+ ChangedNodes.clear();
+}
+
+//===----------------------------------------------------------------------===//
+// ExplodedNode.
+//===----------------------------------------------------------------------===//
+
+// An NodeGroup's storage type is actually very much like a TinyPtrVector:
+// it can be either a pointer to a single ExplodedNode, or a pointer to a
+// BumpVector allocated with the ExplodedGraph's allocator. This allows the
+// common case of single-node NodeGroups to be implemented with no extra memory.
+//
+// Consequently, each of the NodeGroup methods have up to four cases to handle:
+// 1. The flag is set and this group does not actually contain any nodes.
+// 2. The group is empty, in which case the storage value is null.
+// 3. The group contains a single node.
+// 4. The group contains more than one node.
+typedef BumpVector<ExplodedNode *> ExplodedNodeVector;
+typedef llvm::PointerUnion<ExplodedNode *, ExplodedNodeVector *> GroupStorage;
+
+void ExplodedNode::addPredecessor(ExplodedNode *V, ExplodedGraph &G) {
+ assert (!V->isSink());
+ Preds.addNode(V, G);
+ V->Succs.addNode(this, G);
+#ifndef NDEBUG
+ if (NodeAuditor) NodeAuditor->AddEdge(V, this);
+#endif
+}
+
+void ExplodedNode::NodeGroup::replaceNode(ExplodedNode *node) {
+ assert(!getFlag());
+
+ GroupStorage &Storage = reinterpret_cast<GroupStorage&>(P);
+ assert(Storage.is<ExplodedNode *>());
+ Storage = node;
+ assert(Storage.is<ExplodedNode *>());
+}
+
+void ExplodedNode::NodeGroup::addNode(ExplodedNode *N, ExplodedGraph &G) {
+ assert(!getFlag());
+
+ GroupStorage &Storage = reinterpret_cast<GroupStorage&>(P);
+ if (Storage.isNull()) {
+ Storage = N;
+ assert(Storage.is<ExplodedNode *>());
+ return;
+ }
+
+ ExplodedNodeVector *V = Storage.dyn_cast<ExplodedNodeVector *>();
+
+ if (!V) {
+ // Switch from single-node to multi-node representation.
+ ExplodedNode *Old = Storage.get<ExplodedNode *>();
+
+ BumpVectorContext &Ctx = G.getNodeAllocator();
+ V = G.getAllocator().Allocate<ExplodedNodeVector>();
+ new (V) ExplodedNodeVector(Ctx, 4);
+ V->push_back(Old, Ctx);
+
+ Storage = V;
+ assert(!getFlag());
+ assert(Storage.is<ExplodedNodeVector *>());
+ }
+
+ V->push_back(N, G.getNodeAllocator());
+}
+
+unsigned ExplodedNode::NodeGroup::size() const {
+ if (getFlag())
+ return 0;
+
+ const GroupStorage &Storage = reinterpret_cast<const GroupStorage &>(P);
+ if (Storage.isNull())
+ return 0;
+ if (ExplodedNodeVector *V = Storage.dyn_cast<ExplodedNodeVector *>())
+ return V->size();
+ return 1;
+}
+
+ExplodedNode * const *ExplodedNode::NodeGroup::begin() const {
+ if (getFlag())
+ return nullptr;
+
+ const GroupStorage &Storage = reinterpret_cast<const GroupStorage &>(P);
+ if (Storage.isNull())
+ return nullptr;
+ if (ExplodedNodeVector *V = Storage.dyn_cast<ExplodedNodeVector *>())
+ return V->begin();
+ return Storage.getAddrOfPtr1();
+}
+
+ExplodedNode * const *ExplodedNode::NodeGroup::end() const {
+ if (getFlag())
+ return nullptr;
+
+ const GroupStorage &Storage = reinterpret_cast<const GroupStorage &>(P);
+ if (Storage.isNull())
+ return nullptr;
+ if (ExplodedNodeVector *V = Storage.dyn_cast<ExplodedNodeVector *>())
+ return V->end();
+ return Storage.getAddrOfPtr1() + 1;
+}
+
+ExplodedNode *ExplodedGraph::getNode(const ProgramPoint &L,
+ ProgramStateRef State,
+ bool IsSink,
+ bool* IsNew) {
+ // Profile 'State' to determine if we already have an existing node.
+ llvm::FoldingSetNodeID profile;
+ void *InsertPos = nullptr;
+
+ NodeTy::Profile(profile, L, State, IsSink);
+ NodeTy* V = Nodes.FindNodeOrInsertPos(profile, InsertPos);
+
+ if (!V) {
+ if (!FreeNodes.empty()) {
+ V = FreeNodes.back();
+ FreeNodes.pop_back();
+ }
+ else {
+ // Allocate a new node.
+ V = (NodeTy*) getAllocator().Allocate<NodeTy>();
+ }
+
+ new (V) NodeTy(L, State, IsSink);
+
+ if (ReclaimNodeInterval)
+ ChangedNodes.push_back(V);
+
+ // Insert the node into the node set and return it.
+ Nodes.InsertNode(V, InsertPos);
+ ++NumNodes;
+
+ if (IsNew) *IsNew = true;
+ }
+ else
+ if (IsNew) *IsNew = false;
+
+ return V;
+}
+
+std::unique_ptr<ExplodedGraph>
+ExplodedGraph::trim(ArrayRef<const NodeTy *> Sinks,
+ InterExplodedGraphMap *ForwardMap,
+ InterExplodedGraphMap *InverseMap) const {
+
+ if (Nodes.empty())
+ return nullptr;
+
+ typedef llvm::DenseSet<const ExplodedNode*> Pass1Ty;
+ Pass1Ty Pass1;
+
+ typedef InterExplodedGraphMap Pass2Ty;
+ InterExplodedGraphMap Pass2Scratch;
+ Pass2Ty &Pass2 = ForwardMap ? *ForwardMap : Pass2Scratch;
+
+ SmallVector<const ExplodedNode*, 10> WL1, WL2;
+
+ // ===- Pass 1 (reverse DFS) -===
+ for (ArrayRef<const NodeTy *>::iterator I = Sinks.begin(), E = Sinks.end();
+ I != E; ++I) {
+ if (*I)
+ WL1.push_back(*I);
+ }
+
+ // Process the first worklist until it is empty.
+ while (!WL1.empty()) {
+ const ExplodedNode *N = WL1.pop_back_val();
+
+ // Have we already visited this node? If so, continue to the next one.
+ if (!Pass1.insert(N).second)
+ continue;
+
+ // If this is a root enqueue it to the second worklist.
+ if (N->Preds.empty()) {
+ WL2.push_back(N);
+ continue;
+ }
+
+ // Visit our predecessors and enqueue them.
+ WL1.append(N->Preds.begin(), N->Preds.end());
+ }
+
+ // We didn't hit a root? Return with a null pointer for the new graph.
+ if (WL2.empty())
+ return nullptr;
+
+ // Create an empty graph.
+ std::unique_ptr<ExplodedGraph> G = MakeEmptyGraph();
+
+ // ===- Pass 2 (forward DFS to construct the new graph) -===
+ while (!WL2.empty()) {
+ const ExplodedNode *N = WL2.pop_back_val();
+
+ // Skip this node if we have already processed it.
+ if (Pass2.find(N) != Pass2.end())
+ continue;
+
+ // Create the corresponding node in the new graph and record the mapping
+ // from the old node to the new node.
+ ExplodedNode *NewN = G->getNode(N->getLocation(), N->State, N->isSink(),
+ nullptr);
+ Pass2[N] = NewN;
+
+ // Also record the reverse mapping from the new node to the old node.
+ if (InverseMap) (*InverseMap)[NewN] = N;
+
+ // If this node is a root, designate it as such in the graph.
+ if (N->Preds.empty())
+ G->addRoot(NewN);
+
+ // In the case that some of the intended predecessors of NewN have already
+ // been created, we should hook them up as predecessors.
+
+ // Walk through the predecessors of 'N' and hook up their corresponding
+ // nodes in the new graph (if any) to the freshly created node.
+ for (ExplodedNode::pred_iterator I = N->Preds.begin(), E = N->Preds.end();
+ I != E; ++I) {
+ Pass2Ty::iterator PI = Pass2.find(*I);
+ if (PI == Pass2.end())
+ continue;
+
+ NewN->addPredecessor(const_cast<ExplodedNode *>(PI->second), *G);
+ }
+
+ // In the case that some of the intended successors of NewN have already
+ // been created, we should hook them up as successors. Otherwise, enqueue
+ // the new nodes from the original graph that should have nodes created
+ // in the new graph.
+ for (ExplodedNode::succ_iterator I = N->Succs.begin(), E = N->Succs.end();
+ I != E; ++I) {
+ Pass2Ty::iterator PI = Pass2.find(*I);
+ if (PI != Pass2.end()) {
+ const_cast<ExplodedNode *>(PI->second)->addPredecessor(NewN, *G);
+ continue;
+ }
+
+ // Enqueue nodes to the worklist that were marked during pass 1.
+ if (Pass1.count(*I))
+ WL2.push_back(*I);
+ }
+ }
+
+ return G;
+}
+
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
new file mode 100644
index 0000000..662b0a2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -0,0 +1,2749 @@
+//=-- ExprEngine.cpp - Path-Sensitive Expression-Level Dataflow ---*- C++ -*-=
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a meta-engine for path-sensitive dataflow analysis that
+// is built on GREngine, but provides the boilerplate to execute transfer
+// functions and build the ExplodedGraph at the expression level.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "PrettyStackTraceLocationContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/LoopWidening.h"
+#include "llvm/ADT/ImmutableList.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/raw_ostream.h"
+
+#ifndef NDEBUG
+#include "llvm/Support/GraphWriter.h"
+#endif
+
+using namespace clang;
+using namespace ento;
+using llvm::APSInt;
+
+#define DEBUG_TYPE "ExprEngine"
+
+STATISTIC(NumRemoveDeadBindings,
+ "The # of times RemoveDeadBindings is called");
+STATISTIC(NumMaxBlockCountReached,
+ "The # of aborted paths due to reaching the maximum block count in "
+ "a top level function");
+STATISTIC(NumMaxBlockCountReachedInInlined,
+ "The # of aborted paths due to reaching the maximum block count in "
+ "an inlined function");
+STATISTIC(NumTimesRetriedWithoutInlining,
+ "The # of times we re-evaluated a call without inlining");
+
+typedef std::pair<const CXXBindTemporaryExpr *, const StackFrameContext *>
+ CXXBindTemporaryContext;
+
+// Keeps track of whether CXXBindTemporaryExpr nodes have been evaluated.
+// The StackFrameContext assures that nested calls due to inlined recursive
+// functions do not interfere.
+REGISTER_TRAIT_WITH_PROGRAMSTATE(InitializedTemporariesSet,
+ llvm::ImmutableSet<CXXBindTemporaryContext>)
+
+//===----------------------------------------------------------------------===//
+// Engine construction and deletion.
+//===----------------------------------------------------------------------===//
+
+static const char* TagProviderName = "ExprEngine";
+
+ExprEngine::ExprEngine(AnalysisManager &mgr, bool gcEnabled,
+ SetOfConstDecls *VisitedCalleesIn,
+ FunctionSummariesTy *FS,
+ InliningModes HowToInlineIn)
+ : AMgr(mgr),
+ AnalysisDeclContexts(mgr.getAnalysisDeclContextManager()),
+ Engine(*this, FS),
+ G(Engine.getGraph()),
+ StateMgr(getContext(), mgr.getStoreManagerCreator(),
+ mgr.getConstraintManagerCreator(), G.getAllocator(),
+ this),
+ SymMgr(StateMgr.getSymbolManager()),
+ svalBuilder(StateMgr.getSValBuilder()),
+ currStmtIdx(0), currBldrCtx(nullptr),
+ ObjCNoRet(mgr.getASTContext()),
+ ObjCGCEnabled(gcEnabled), BR(mgr, *this),
+ VisitedCallees(VisitedCalleesIn),
+ HowToInline(HowToInlineIn)
+{
+ unsigned TrimInterval = mgr.options.getGraphTrimInterval();
+ if (TrimInterval != 0) {
+ // Enable eager node reclaimation when constructing the ExplodedGraph.
+ G.enableNodeReclamation(TrimInterval);
+ }
+}
+
+ExprEngine::~ExprEngine() {
+ BR.FlushReports();
+}
+
+//===----------------------------------------------------------------------===//
+// Utility methods.
+//===----------------------------------------------------------------------===//
+
+ProgramStateRef ExprEngine::getInitialState(const LocationContext *InitLoc) {
+ ProgramStateRef state = StateMgr.getInitialState(InitLoc);
+ const Decl *D = InitLoc->getDecl();
+
+ // Preconditions.
+ // FIXME: It would be nice if we had a more general mechanism to add
+ // such preconditions. Some day.
+ do {
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // Precondition: the first argument of 'main' is an integer guaranteed
+ // to be > 0.
+ const IdentifierInfo *II = FD->getIdentifier();
+ if (!II || !(II->getName() == "main" && FD->getNumParams() > 0))
+ break;
+
+ const ParmVarDecl *PD = FD->getParamDecl(0);
+ QualType T = PD->getType();
+ const BuiltinType *BT = dyn_cast<BuiltinType>(T);
+ if (!BT || !BT->isInteger())
+ break;
+
+ const MemRegion *R = state->getRegion(PD, InitLoc);
+ if (!R)
+ break;
+
+ SVal V = state->getSVal(loc::MemRegionVal(R));
+ SVal Constraint_untested = evalBinOp(state, BO_GT, V,
+ svalBuilder.makeZeroVal(T),
+ svalBuilder.getConditionType());
+
+ Optional<DefinedOrUnknownSVal> Constraint =
+ Constraint_untested.getAs<DefinedOrUnknownSVal>();
+
+ if (!Constraint)
+ break;
+
+ if (ProgramStateRef newState = state->assume(*Constraint, true))
+ state = newState;
+ }
+ break;
+ }
+ while (0);
+
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ // Precondition: 'self' is always non-null upon entry to an Objective-C
+ // method.
+ const ImplicitParamDecl *SelfD = MD->getSelfDecl();
+ const MemRegion *R = state->getRegion(SelfD, InitLoc);
+ SVal V = state->getSVal(loc::MemRegionVal(R));
+
+ if (Optional<Loc> LV = V.getAs<Loc>()) {
+ // Assume that the pointer value in 'self' is non-null.
+ state = state->assume(*LV, true);
+ assert(state && "'self' cannot be null");
+ }
+ }
+
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
+ if (!MD->isStatic()) {
+ // Precondition: 'this' is always non-null upon entry to the
+ // top-level function. This is our starting assumption for
+ // analyzing an "open" program.
+ const StackFrameContext *SFC = InitLoc->getCurrentStackFrame();
+ if (SFC->getParent() == nullptr) {
+ loc::MemRegionVal L = svalBuilder.getCXXThis(MD, SFC);
+ SVal V = state->getSVal(L);
+ if (Optional<Loc> LV = V.getAs<Loc>()) {
+ state = state->assume(*LV, true);
+ assert(state && "'this' cannot be null");
+ }
+ }
+ }
+ }
+
+ return state;
+}
+
+ProgramStateRef
+ExprEngine::createTemporaryRegionIfNeeded(ProgramStateRef State,
+ const LocationContext *LC,
+ const Expr *Ex,
+ const Expr *Result) {
+ SVal V = State->getSVal(Ex, LC);
+ if (!Result) {
+ // If we don't have an explicit result expression, we're in "if needed"
+ // mode. Only create a region if the current value is a NonLoc.
+ if (!V.getAs<NonLoc>())
+ return State;
+ Result = Ex;
+ } else {
+ // We need to create a region no matter what. For sanity, make sure we don't
+ // try to stuff a Loc into a non-pointer temporary region.
+ assert(!V.getAs<Loc>() || Loc::isLocType(Result->getType()) ||
+ Result->getType()->isMemberPointerType());
+ }
+
+ ProgramStateManager &StateMgr = State->getStateManager();
+ MemRegionManager &MRMgr = StateMgr.getRegionManager();
+ StoreManager &StoreMgr = StateMgr.getStoreManager();
+
+ // We need to be careful about treating a derived type's value as
+ // bindings for a base type. Unless we're creating a temporary pointer region,
+ // start by stripping and recording base casts.
+ SmallVector<const CastExpr *, 4> Casts;
+ const Expr *Inner = Ex->IgnoreParens();
+ if (!Loc::isLocType(Result->getType())) {
+ while (const CastExpr *CE = dyn_cast<CastExpr>(Inner)) {
+ if (CE->getCastKind() == CK_DerivedToBase ||
+ CE->getCastKind() == CK_UncheckedDerivedToBase)
+ Casts.push_back(CE);
+ else if (CE->getCastKind() != CK_NoOp)
+ break;
+
+ Inner = CE->getSubExpr()->IgnoreParens();
+ }
+ }
+
+ // Create a temporary object region for the inner expression (which may have
+ // a more derived type) and bind the value into it.
+ const TypedValueRegion *TR = nullptr;
+ if (const MaterializeTemporaryExpr *MT =
+ dyn_cast<MaterializeTemporaryExpr>(Result)) {
+ StorageDuration SD = MT->getStorageDuration();
+ // If this object is bound to a reference with static storage duration, we
+ // put it in a different region to prevent "address leakage" warnings.
+ if (SD == SD_Static || SD == SD_Thread)
+ TR = MRMgr.getCXXStaticTempObjectRegion(Inner);
+ }
+ if (!TR)
+ TR = MRMgr.getCXXTempObjectRegion(Inner, LC);
+
+ SVal Reg = loc::MemRegionVal(TR);
+
+ if (V.isUnknown())
+ V = getSValBuilder().conjureSymbolVal(Result, LC, TR->getValueType(),
+ currBldrCtx->blockCount());
+ State = State->bindLoc(Reg, V);
+
+ // Re-apply the casts (from innermost to outermost) for type sanity.
+ for (SmallVectorImpl<const CastExpr *>::reverse_iterator I = Casts.rbegin(),
+ E = Casts.rend();
+ I != E; ++I) {
+ Reg = StoreMgr.evalDerivedToBase(Reg, *I);
+ }
+
+ State = State->BindExpr(Result, LC, Reg);
+ return State;
+}
+
+//===----------------------------------------------------------------------===//
+// Top-level transfer function logic (Dispatcher).
+//===----------------------------------------------------------------------===//
+
+/// evalAssume - Called by ConstraintManager. Used to call checker-specific
+/// logic for handling assumptions on symbolic values.
+ProgramStateRef ExprEngine::processAssume(ProgramStateRef state,
+ SVal cond, bool assumption) {
+ return getCheckerManager().runCheckersForEvalAssume(state, cond, assumption);
+}
+
+bool ExprEngine::wantsRegionChangeUpdate(ProgramStateRef state) {
+ return getCheckerManager().wantsRegionChangeUpdate(state);
+}
+
+ProgramStateRef
+ExprEngine::processRegionChanges(ProgramStateRef state,
+ const InvalidatedSymbols *invalidated,
+ ArrayRef<const MemRegion *> Explicits,
+ ArrayRef<const MemRegion *> Regions,
+ const CallEvent *Call) {
+ return getCheckerManager().runCheckersForRegionChanges(state, invalidated,
+ Explicits, Regions, Call);
+}
+
+void ExprEngine::printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) {
+ getCheckerManager().runCheckersForPrintState(Out, State, NL, Sep);
+}
+
+void ExprEngine::processEndWorklist(bool hasWorkRemaining) {
+ getCheckerManager().runCheckersForEndAnalysis(G, BR, *this);
+}
+
+void ExprEngine::processCFGElement(const CFGElement E, ExplodedNode *Pred,
+ unsigned StmtIdx, NodeBuilderContext *Ctx) {
+ PrettyStackTraceLocationContext CrashInfo(Pred->getLocationContext());
+ currStmtIdx = StmtIdx;
+ currBldrCtx = Ctx;
+
+ switch (E.getKind()) {
+ case CFGElement::Statement:
+ ProcessStmt(const_cast<Stmt*>(E.castAs<CFGStmt>().getStmt()), Pred);
+ return;
+ case CFGElement::Initializer:
+ ProcessInitializer(E.castAs<CFGInitializer>().getInitializer(), Pred);
+ return;
+ case CFGElement::NewAllocator:
+ ProcessNewAllocator(E.castAs<CFGNewAllocator>().getAllocatorExpr(),
+ Pred);
+ return;
+ case CFGElement::AutomaticObjectDtor:
+ case CFGElement::DeleteDtor:
+ case CFGElement::BaseDtor:
+ case CFGElement::MemberDtor:
+ case CFGElement::TemporaryDtor:
+ ProcessImplicitDtor(E.castAs<CFGImplicitDtor>(), Pred);
+ return;
+ }
+}
+
+static bool shouldRemoveDeadBindings(AnalysisManager &AMgr,
+ const CFGStmt S,
+ const ExplodedNode *Pred,
+ const LocationContext *LC) {
+
+ // Are we never purging state values?
+ if (AMgr.options.AnalysisPurgeOpt == PurgeNone)
+ return false;
+
+ // Is this the beginning of a basic block?
+ if (Pred->getLocation().getAs<BlockEntrance>())
+ return true;
+
+ // Is this on a non-expression?
+ if (!isa<Expr>(S.getStmt()))
+ return true;
+
+ // Run before processing a call.
+ if (CallEvent::isCallStmt(S.getStmt()))
+ return true;
+
+ // Is this an expression that is consumed by another expression? If so,
+ // postpone cleaning out the state.
+ ParentMap &PM = LC->getAnalysisDeclContext()->getParentMap();
+ return !PM.isConsumedExpr(cast<Expr>(S.getStmt()));
+}
+
+void ExprEngine::removeDead(ExplodedNode *Pred, ExplodedNodeSet &Out,
+ const Stmt *ReferenceStmt,
+ const LocationContext *LC,
+ const Stmt *DiagnosticStmt,
+ ProgramPoint::Kind K) {
+ assert((K == ProgramPoint::PreStmtPurgeDeadSymbolsKind ||
+ ReferenceStmt == nullptr || isa<ReturnStmt>(ReferenceStmt))
+ && "PostStmt is not generally supported by the SymbolReaper yet");
+ assert(LC && "Must pass the current (or expiring) LocationContext");
+
+ if (!DiagnosticStmt) {
+ DiagnosticStmt = ReferenceStmt;
+ assert(DiagnosticStmt && "Required for clearing a LocationContext");
+ }
+
+ NumRemoveDeadBindings++;
+ ProgramStateRef CleanedState = Pred->getState();
+
+ // LC is the location context being destroyed, but SymbolReaper wants a
+ // location context that is still live. (If this is the top-level stack
+ // frame, this will be null.)
+ if (!ReferenceStmt) {
+ assert(K == ProgramPoint::PostStmtPurgeDeadSymbolsKind &&
+ "Use PostStmtPurgeDeadSymbolsKind for clearing a LocationContext");
+ LC = LC->getParent();
+ }
+
+ const StackFrameContext *SFC = LC ? LC->getCurrentStackFrame() : nullptr;
+ SymbolReaper SymReaper(SFC, ReferenceStmt, SymMgr, getStoreManager());
+
+ getCheckerManager().runCheckersForLiveSymbols(CleanedState, SymReaper);
+
+ // Create a state in which dead bindings are removed from the environment
+ // and the store. TODO: The function should just return new env and store,
+ // not a new state.
+ CleanedState = StateMgr.removeDeadBindings(CleanedState, SFC, SymReaper);
+
+ // Process any special transfer function for dead symbols.
+ // A tag to track convenience transitions, which can be removed at cleanup.
+ static SimpleProgramPointTag cleanupTag(TagProviderName, "Clean Node");
+ if (!SymReaper.hasDeadSymbols()) {
+ // Generate a CleanedNode that has the environment and store cleaned
+ // up. Since no symbols are dead, we can optimize and not clean out
+ // the constraint manager.
+ StmtNodeBuilder Bldr(Pred, Out, *currBldrCtx);
+ Bldr.generateNode(DiagnosticStmt, Pred, CleanedState, &cleanupTag, K);
+
+ } else {
+ // Call checkers with the non-cleaned state so that they could query the
+ // values of the soon to be dead symbols.
+ ExplodedNodeSet CheckedSet;
+ getCheckerManager().runCheckersForDeadSymbols(CheckedSet, Pred, SymReaper,
+ DiagnosticStmt, *this, K);
+
+ // For each node in CheckedSet, generate CleanedNodes that have the
+ // environment, the store, and the constraints cleaned up but have the
+ // user-supplied states as the predecessors.
+ StmtNodeBuilder Bldr(CheckedSet, Out, *currBldrCtx);
+ for (ExplodedNodeSet::const_iterator
+ I = CheckedSet.begin(), E = CheckedSet.end(); I != E; ++I) {
+ ProgramStateRef CheckerState = (*I)->getState();
+
+ // The constraint manager has not been cleaned up yet, so clean up now.
+ CheckerState = getConstraintManager().removeDeadBindings(CheckerState,
+ SymReaper);
+
+ assert(StateMgr.haveEqualEnvironments(CheckerState, Pred->getState()) &&
+ "Checkers are not allowed to modify the Environment as a part of "
+ "checkDeadSymbols processing.");
+ assert(StateMgr.haveEqualStores(CheckerState, Pred->getState()) &&
+ "Checkers are not allowed to modify the Store as a part of "
+ "checkDeadSymbols processing.");
+
+ // Create a state based on CleanedState with CheckerState GDM and
+ // generate a transition to that state.
+ ProgramStateRef CleanedCheckerSt =
+ StateMgr.getPersistentStateWithGDM(CleanedState, CheckerState);
+ Bldr.generateNode(DiagnosticStmt, *I, CleanedCheckerSt, &cleanupTag, K);
+ }
+ }
+}
+
+void ExprEngine::ProcessStmt(const CFGStmt S,
+ ExplodedNode *Pred) {
+ // Reclaim any unnecessary nodes in the ExplodedGraph.
+ G.reclaimRecentlyAllocatedNodes();
+
+ const Stmt *currStmt = S.getStmt();
+ PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
+ currStmt->getLocStart(),
+ "Error evaluating statement");
+
+ // Remove dead bindings and symbols.
+ ExplodedNodeSet CleanedStates;
+ if (shouldRemoveDeadBindings(AMgr, S, Pred, Pred->getLocationContext())){
+ removeDead(Pred, CleanedStates, currStmt, Pred->getLocationContext());
+ } else
+ CleanedStates.Add(Pred);
+
+ // Visit the statement.
+ ExplodedNodeSet Dst;
+ for (ExplodedNodeSet::iterator I = CleanedStates.begin(),
+ E = CleanedStates.end(); I != E; ++I) {
+ ExplodedNodeSet DstI;
+ // Visit the statement.
+ Visit(currStmt, *I, DstI);
+ Dst.insert(DstI);
+ }
+
+ // Enqueue the new nodes onto the work list.
+ Engine.enqueue(Dst, currBldrCtx->getBlock(), currStmtIdx);
+}
+
+void ExprEngine::ProcessInitializer(const CFGInitializer Init,
+ ExplodedNode *Pred) {
+ const CXXCtorInitializer *BMI = Init.getInitializer();
+
+ PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
+ BMI->getSourceLocation(),
+ "Error evaluating initializer");
+
+ // We don't clean up dead bindings here.
+ const StackFrameContext *stackFrame =
+ cast<StackFrameContext>(Pred->getLocationContext());
+ const CXXConstructorDecl *decl =
+ cast<CXXConstructorDecl>(stackFrame->getDecl());
+
+ ProgramStateRef State = Pred->getState();
+ SVal thisVal = State->getSVal(svalBuilder.getCXXThis(decl, stackFrame));
+
+ ExplodedNodeSet Tmp(Pred);
+ SVal FieldLoc;
+
+ // Evaluate the initializer, if necessary
+ if (BMI->isAnyMemberInitializer()) {
+ // Constructors build the object directly in the field,
+ // but non-objects must be copied in from the initializer.
+ if (auto *CtorExpr = findDirectConstructorForCurrentCFGElement()) {
+ assert(BMI->getInit()->IgnoreImplicit() == CtorExpr);
+ (void)CtorExpr;
+ // The field was directly constructed, so there is no need to bind.
+ } else {
+ const Expr *Init = BMI->getInit()->IgnoreImplicit();
+ const ValueDecl *Field;
+ if (BMI->isIndirectMemberInitializer()) {
+ Field = BMI->getIndirectMember();
+ FieldLoc = State->getLValue(BMI->getIndirectMember(), thisVal);
+ } else {
+ Field = BMI->getMember();
+ FieldLoc = State->getLValue(BMI->getMember(), thisVal);
+ }
+
+ SVal InitVal;
+ if (BMI->getNumArrayIndices() > 0) {
+ // Handle arrays of trivial type. We can represent this with a
+ // primitive load/copy from the base array region.
+ const ArraySubscriptExpr *ASE;
+ while ((ASE = dyn_cast<ArraySubscriptExpr>(Init)))
+ Init = ASE->getBase()->IgnoreImplicit();
+
+ SVal LValue = State->getSVal(Init, stackFrame);
+ if (Optional<Loc> LValueLoc = LValue.getAs<Loc>())
+ InitVal = State->getSVal(*LValueLoc);
+
+ // If we fail to get the value for some reason, use a symbolic value.
+ if (InitVal.isUnknownOrUndef()) {
+ SValBuilder &SVB = getSValBuilder();
+ InitVal = SVB.conjureSymbolVal(BMI->getInit(), stackFrame,
+ Field->getType(),
+ currBldrCtx->blockCount());
+ }
+ } else {
+ InitVal = State->getSVal(BMI->getInit(), stackFrame);
+ }
+
+ assert(Tmp.size() == 1 && "have not generated any new nodes yet");
+ assert(*Tmp.begin() == Pred && "have not generated any new nodes yet");
+ Tmp.clear();
+
+ PostInitializer PP(BMI, FieldLoc.getAsRegion(), stackFrame);
+ evalBind(Tmp, Init, Pred, FieldLoc, InitVal, /*isInit=*/true, &PP);
+ }
+ } else {
+ assert(BMI->isBaseInitializer() || BMI->isDelegatingInitializer());
+ // We already did all the work when visiting the CXXConstructExpr.
+ }
+
+ // Construct PostInitializer nodes whether the state changed or not,
+ // so that the diagnostics don't get confused.
+ PostInitializer PP(BMI, FieldLoc.getAsRegion(), stackFrame);
+ ExplodedNodeSet Dst;
+ NodeBuilder Bldr(Tmp, Dst, *currBldrCtx);
+ for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I != E; ++I) {
+ ExplodedNode *N = *I;
+ Bldr.generateNode(PP, N->getState(), N);
+ }
+
+ // Enqueue the new nodes onto the work list.
+ Engine.enqueue(Dst, currBldrCtx->getBlock(), currStmtIdx);
+}
+
+void ExprEngine::ProcessImplicitDtor(const CFGImplicitDtor D,
+ ExplodedNode *Pred) {
+ ExplodedNodeSet Dst;
+ switch (D.getKind()) {
+ case CFGElement::AutomaticObjectDtor:
+ ProcessAutomaticObjDtor(D.castAs<CFGAutomaticObjDtor>(), Pred, Dst);
+ break;
+ case CFGElement::BaseDtor:
+ ProcessBaseDtor(D.castAs<CFGBaseDtor>(), Pred, Dst);
+ break;
+ case CFGElement::MemberDtor:
+ ProcessMemberDtor(D.castAs<CFGMemberDtor>(), Pred, Dst);
+ break;
+ case CFGElement::TemporaryDtor:
+ ProcessTemporaryDtor(D.castAs<CFGTemporaryDtor>(), Pred, Dst);
+ break;
+ case CFGElement::DeleteDtor:
+ ProcessDeleteDtor(D.castAs<CFGDeleteDtor>(), Pred, Dst);
+ break;
+ default:
+ llvm_unreachable("Unexpected dtor kind.");
+ }
+
+ // Enqueue the new nodes onto the work list.
+ Engine.enqueue(Dst, currBldrCtx->getBlock(), currStmtIdx);
+}
+
+void ExprEngine::ProcessNewAllocator(const CXXNewExpr *NE,
+ ExplodedNode *Pred) {
+ ExplodedNodeSet Dst;
+ AnalysisManager &AMgr = getAnalysisManager();
+ AnalyzerOptions &Opts = AMgr.options;
+ // TODO: We're not evaluating allocators for all cases just yet as
+ // we're not handling the return value correctly, which causes false
+ // positives when the alpha.cplusplus.NewDeleteLeaks check is on.
+ if (Opts.mayInlineCXXAllocator())
+ VisitCXXNewAllocatorCall(NE, Pred, Dst);
+ else {
+ NodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+ const LocationContext *LCtx = Pred->getLocationContext();
+ PostImplicitCall PP(NE->getOperatorNew(), NE->getLocStart(), LCtx);
+ Bldr.generateNode(PP, Pred->getState(), Pred);
+ }
+ Engine.enqueue(Dst, currBldrCtx->getBlock(), currStmtIdx);
+}
+
+void ExprEngine::ProcessAutomaticObjDtor(const CFGAutomaticObjDtor Dtor,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ const VarDecl *varDecl = Dtor.getVarDecl();
+ QualType varType = varDecl->getType();
+
+ ProgramStateRef state = Pred->getState();
+ SVal dest = state->getLValue(varDecl, Pred->getLocationContext());
+ const MemRegion *Region = dest.castAs<loc::MemRegionVal>().getRegion();
+
+ if (const ReferenceType *refType = varType->getAs<ReferenceType>()) {
+ varType = refType->getPointeeType();
+ Region = state->getSVal(Region).getAsRegion();
+ }
+
+ VisitCXXDestructor(varType, Region, Dtor.getTriggerStmt(), /*IsBase=*/ false,
+ Pred, Dst);
+}
+
+void ExprEngine::ProcessDeleteDtor(const CFGDeleteDtor Dtor,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ ProgramStateRef State = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ const CXXDeleteExpr *DE = Dtor.getDeleteExpr();
+ const Stmt *Arg = DE->getArgument();
+ SVal ArgVal = State->getSVal(Arg, LCtx);
+
+ // If the argument to delete is known to be a null value,
+ // don't run destructor.
+ if (State->isNull(ArgVal).isConstrainedTrue()) {
+ QualType DTy = DE->getDestroyedType();
+ QualType BTy = getContext().getBaseElementType(DTy);
+ const CXXRecordDecl *RD = BTy->getAsCXXRecordDecl();
+ const CXXDestructorDecl *Dtor = RD->getDestructor();
+
+ PostImplicitCall PP(Dtor, DE->getLocStart(), LCtx);
+ NodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+ Bldr.generateNode(PP, Pred->getState(), Pred);
+ return;
+ }
+
+ VisitCXXDestructor(DE->getDestroyedType(),
+ ArgVal.getAsRegion(),
+ DE, /*IsBase=*/ false,
+ Pred, Dst);
+}
+
+void ExprEngine::ProcessBaseDtor(const CFGBaseDtor D,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst) {
+ const LocationContext *LCtx = Pred->getLocationContext();
+
+ const CXXDestructorDecl *CurDtor = cast<CXXDestructorDecl>(LCtx->getDecl());
+ Loc ThisPtr = getSValBuilder().getCXXThis(CurDtor,
+ LCtx->getCurrentStackFrame());
+ SVal ThisVal = Pred->getState()->getSVal(ThisPtr);
+
+ // Create the base object region.
+ const CXXBaseSpecifier *Base = D.getBaseSpecifier();
+ QualType BaseTy = Base->getType();
+ SVal BaseVal = getStoreManager().evalDerivedToBase(ThisVal, BaseTy,
+ Base->isVirtual());
+
+ VisitCXXDestructor(BaseTy, BaseVal.castAs<loc::MemRegionVal>().getRegion(),
+ CurDtor->getBody(), /*IsBase=*/ true, Pred, Dst);
+}
+
+void ExprEngine::ProcessMemberDtor(const CFGMemberDtor D,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst) {
+ const FieldDecl *Member = D.getFieldDecl();
+ ProgramStateRef State = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+
+ const CXXDestructorDecl *CurDtor = cast<CXXDestructorDecl>(LCtx->getDecl());
+ Loc ThisVal = getSValBuilder().getCXXThis(CurDtor,
+ LCtx->getCurrentStackFrame());
+ SVal FieldVal =
+ State->getLValue(Member, State->getSVal(ThisVal).castAs<Loc>());
+
+ VisitCXXDestructor(Member->getType(),
+ FieldVal.castAs<loc::MemRegionVal>().getRegion(),
+ CurDtor->getBody(), /*IsBase=*/false, Pred, Dst);
+}
+
+void ExprEngine::ProcessTemporaryDtor(const CFGTemporaryDtor D,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ ExplodedNodeSet CleanDtorState;
+ StmtNodeBuilder StmtBldr(Pred, CleanDtorState, *currBldrCtx);
+ ProgramStateRef State = Pred->getState();
+ if (State->contains<InitializedTemporariesSet>(
+ std::make_pair(D.getBindTemporaryExpr(), Pred->getStackFrame()))) {
+ // FIXME: Currently we insert temporary destructors for default parameters,
+ // but we don't insert the constructors.
+ State = State->remove<InitializedTemporariesSet>(
+ std::make_pair(D.getBindTemporaryExpr(), Pred->getStackFrame()));
+ }
+ StmtBldr.generateNode(D.getBindTemporaryExpr(), Pred, State);
+
+ QualType varType = D.getBindTemporaryExpr()->getSubExpr()->getType();
+ // FIXME: Currently CleanDtorState can be empty here due to temporaries being
+ // bound to default parameters.
+ assert(CleanDtorState.size() <= 1);
+ ExplodedNode *CleanPred =
+ CleanDtorState.empty() ? Pred : *CleanDtorState.begin();
+ // FIXME: Inlining of temporary destructors is not supported yet anyway, so
+ // we just put a NULL region for now. This will need to be changed later.
+ VisitCXXDestructor(varType, nullptr, D.getBindTemporaryExpr(),
+ /*IsBase=*/false, CleanPred, Dst);
+}
+
+void ExprEngine::processCleanupTemporaryBranch(const CXXBindTemporaryExpr *BTE,
+ NodeBuilderContext &BldCtx,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst,
+ const CFGBlock *DstT,
+ const CFGBlock *DstF) {
+ BranchNodeBuilder TempDtorBuilder(Pred, Dst, BldCtx, DstT, DstF);
+ if (Pred->getState()->contains<InitializedTemporariesSet>(
+ std::make_pair(BTE, Pred->getStackFrame()))) {
+ TempDtorBuilder.markInfeasible(false);
+ TempDtorBuilder.generateNode(Pred->getState(), true, Pred);
+ } else {
+ TempDtorBuilder.markInfeasible(true);
+ TempDtorBuilder.generateNode(Pred->getState(), false, Pred);
+ }
+}
+
+void ExprEngine::VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *BTE,
+ ExplodedNodeSet &PreVisit,
+ ExplodedNodeSet &Dst) {
+ if (!getAnalysisManager().options.includeTemporaryDtorsInCFG()) {
+ // In case we don't have temporary destructors in the CFG, do not mark
+ // the initialization - we would otherwise never clean it up.
+ Dst = PreVisit;
+ return;
+ }
+ StmtNodeBuilder StmtBldr(PreVisit, Dst, *currBldrCtx);
+ for (ExplodedNode *Node : PreVisit) {
+ ProgramStateRef State = Node->getState();
+
+ if (!State->contains<InitializedTemporariesSet>(
+ std::make_pair(BTE, Node->getStackFrame()))) {
+ // FIXME: Currently the state might already contain the marker due to
+ // incorrect handling of temporaries bound to default parameters; for
+ // those, we currently skip the CXXBindTemporaryExpr but rely on adding
+ // temporary destructor nodes.
+ State = State->add<InitializedTemporariesSet>(
+ std::make_pair(BTE, Node->getStackFrame()));
+ }
+ StmtBldr.generateNode(BTE, Node, State);
+ }
+}
+
+void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
+ ExplodedNodeSet &DstTop) {
+ PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
+ S->getLocStart(),
+ "Error evaluating statement");
+ ExplodedNodeSet Dst;
+ StmtNodeBuilder Bldr(Pred, DstTop, *currBldrCtx);
+
+ assert(!isa<Expr>(S) || S == cast<Expr>(S)->IgnoreParens());
+
+ switch (S->getStmtClass()) {
+ // C++ and ARC stuff we don't support yet.
+ case Expr::ObjCIndirectCopyRestoreExprClass:
+ case Stmt::CXXDependentScopeMemberExprClass:
+ case Stmt::CXXTryStmtClass:
+ case Stmt::CXXTypeidExprClass:
+ case Stmt::CXXUuidofExprClass:
+ case Stmt::CXXFoldExprClass:
+ case Stmt::MSPropertyRefExprClass:
+ case Stmt::MSPropertySubscriptExprClass:
+ case Stmt::CXXUnresolvedConstructExprClass:
+ case Stmt::DependentScopeDeclRefExprClass:
+ case Stmt::ArrayTypeTraitExprClass:
+ case Stmt::ExpressionTraitExprClass:
+ case Stmt::UnresolvedLookupExprClass:
+ case Stmt::UnresolvedMemberExprClass:
+ case Stmt::TypoExprClass:
+ case Stmt::CXXNoexceptExprClass:
+ case Stmt::PackExpansionExprClass:
+ case Stmt::SubstNonTypeTemplateParmPackExprClass:
+ case Stmt::FunctionParmPackExprClass:
+ case Stmt::CoroutineBodyStmtClass:
+ case Stmt::CoawaitExprClass:
+ case Stmt::CoreturnStmtClass:
+ case Stmt::CoyieldExprClass:
+ case Stmt::SEHTryStmtClass:
+ case Stmt::SEHExceptStmtClass:
+ case Stmt::SEHLeaveStmtClass:
+ case Stmt::SEHFinallyStmtClass: {
+ const ExplodedNode *node = Bldr.generateSink(S, Pred, Pred->getState());
+ Engine.addAbortedBlock(node, currBldrCtx->getBlock());
+ break;
+ }
+
+ case Stmt::ParenExprClass:
+ llvm_unreachable("ParenExprs already handled.");
+ case Stmt::GenericSelectionExprClass:
+ llvm_unreachable("GenericSelectionExprs already handled.");
+ // Cases that should never be evaluated simply because they shouldn't
+ // appear in the CFG.
+ case Stmt::BreakStmtClass:
+ case Stmt::CaseStmtClass:
+ case Stmt::CompoundStmtClass:
+ case Stmt::ContinueStmtClass:
+ case Stmt::CXXForRangeStmtClass:
+ case Stmt::DefaultStmtClass:
+ case Stmt::DoStmtClass:
+ case Stmt::ForStmtClass:
+ case Stmt::GotoStmtClass:
+ case Stmt::IfStmtClass:
+ case Stmt::IndirectGotoStmtClass:
+ case Stmt::LabelStmtClass:
+ case Stmt::NoStmtClass:
+ case Stmt::NullStmtClass:
+ case Stmt::SwitchStmtClass:
+ case Stmt::WhileStmtClass:
+ case Expr::MSDependentExistsStmtClass:
+ case Stmt::CapturedStmtClass:
+ case Stmt::OMPParallelDirectiveClass:
+ case Stmt::OMPSimdDirectiveClass:
+ case Stmt::OMPForDirectiveClass:
+ case Stmt::OMPForSimdDirectiveClass:
+ case Stmt::OMPSectionsDirectiveClass:
+ case Stmt::OMPSectionDirectiveClass:
+ case Stmt::OMPSingleDirectiveClass:
+ case Stmt::OMPMasterDirectiveClass:
+ case Stmt::OMPCriticalDirectiveClass:
+ case Stmt::OMPParallelForDirectiveClass:
+ case Stmt::OMPParallelForSimdDirectiveClass:
+ case Stmt::OMPParallelSectionsDirectiveClass:
+ case Stmt::OMPTaskDirectiveClass:
+ case Stmt::OMPTaskyieldDirectiveClass:
+ case Stmt::OMPBarrierDirectiveClass:
+ case Stmt::OMPTaskwaitDirectiveClass:
+ case Stmt::OMPTaskgroupDirectiveClass:
+ case Stmt::OMPFlushDirectiveClass:
+ case Stmt::OMPOrderedDirectiveClass:
+ case Stmt::OMPAtomicDirectiveClass:
+ case Stmt::OMPTargetDirectiveClass:
+ case Stmt::OMPTargetDataDirectiveClass:
+ case Stmt::OMPTeamsDirectiveClass:
+ case Stmt::OMPCancellationPointDirectiveClass:
+ case Stmt::OMPCancelDirectiveClass:
+ case Stmt::OMPTaskLoopDirectiveClass:
+ case Stmt::OMPTaskLoopSimdDirectiveClass:
+ case Stmt::OMPDistributeDirectiveClass:
+ llvm_unreachable("Stmt should not be in analyzer evaluation loop");
+
+ case Stmt::ObjCSubscriptRefExprClass:
+ case Stmt::ObjCPropertyRefExprClass:
+ llvm_unreachable("These are handled by PseudoObjectExpr");
+
+ case Stmt::GNUNullExprClass: {
+ // GNU __null is a pointer-width integer, not an actual pointer.
+ ProgramStateRef state = Pred->getState();
+ state = state->BindExpr(S, Pred->getLocationContext(),
+ svalBuilder.makeIntValWithPtrWidth(0, false));
+ Bldr.generateNode(S, Pred, state);
+ break;
+ }
+
+ case Stmt::ObjCAtSynchronizedStmtClass:
+ Bldr.takeNodes(Pred);
+ VisitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::ExprWithCleanupsClass:
+ // Handled due to fully linearised CFG.
+ break;
+
+ case Stmt::CXXBindTemporaryExprClass: {
+ Bldr.takeNodes(Pred);
+ ExplodedNodeSet PreVisit;
+ getCheckerManager().runCheckersForPreStmt(PreVisit, Pred, S, *this);
+ ExplodedNodeSet Next;
+ VisitCXXBindTemporaryExpr(cast<CXXBindTemporaryExpr>(S), PreVisit, Next);
+ getCheckerManager().runCheckersForPostStmt(Dst, Next, S, *this);
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ // Cases not handled yet; but will handle some day.
+ case Stmt::DesignatedInitExprClass:
+ case Stmt::DesignatedInitUpdateExprClass:
+ case Stmt::ExtVectorElementExprClass:
+ case Stmt::ImaginaryLiteralClass:
+ case Stmt::ObjCAtCatchStmtClass:
+ case Stmt::ObjCAtFinallyStmtClass:
+ case Stmt::ObjCAtTryStmtClass:
+ case Stmt::ObjCAutoreleasePoolStmtClass:
+ case Stmt::ObjCEncodeExprClass:
+ case Stmt::ObjCIsaExprClass:
+ case Stmt::ObjCProtocolExprClass:
+ case Stmt::ObjCSelectorExprClass:
+ case Stmt::ParenListExprClass:
+ case Stmt::ShuffleVectorExprClass:
+ case Stmt::ConvertVectorExprClass:
+ case Stmt::VAArgExprClass:
+ case Stmt::CUDAKernelCallExprClass:
+ case Stmt::OpaqueValueExprClass:
+ case Stmt::AsTypeExprClass:
+ case Stmt::AtomicExprClass:
+ // Fall through.
+
+ // Cases we intentionally don't evaluate, since they don't need
+ // to be explicitly evaluated.
+ case Stmt::PredefinedExprClass:
+ case Stmt::AddrLabelExprClass:
+ case Stmt::AttributedStmtClass:
+ case Stmt::IntegerLiteralClass:
+ case Stmt::CharacterLiteralClass:
+ case Stmt::ImplicitValueInitExprClass:
+ case Stmt::CXXScalarValueInitExprClass:
+ case Stmt::CXXBoolLiteralExprClass:
+ case Stmt::ObjCBoolLiteralExprClass:
+ case Stmt::FloatingLiteralClass:
+ case Stmt::NoInitExprClass:
+ case Stmt::SizeOfPackExprClass:
+ case Stmt::StringLiteralClass:
+ case Stmt::ObjCStringLiteralClass:
+ case Stmt::CXXPseudoDestructorExprClass:
+ case Stmt::SubstNonTypeTemplateParmExprClass:
+ case Stmt::CXXNullPtrLiteralExprClass:
+ case Stmt::OMPArraySectionExprClass:
+ case Stmt::TypeTraitExprClass: {
+ Bldr.takeNodes(Pred);
+ ExplodedNodeSet preVisit;
+ getCheckerManager().runCheckersForPreStmt(preVisit, Pred, S, *this);
+ getCheckerManager().runCheckersForPostStmt(Dst, preVisit, S, *this);
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ case Stmt::CXXDefaultArgExprClass:
+ case Stmt::CXXDefaultInitExprClass: {
+ Bldr.takeNodes(Pred);
+ ExplodedNodeSet PreVisit;
+ getCheckerManager().runCheckersForPreStmt(PreVisit, Pred, S, *this);
+
+ ExplodedNodeSet Tmp;
+ StmtNodeBuilder Bldr2(PreVisit, Tmp, *currBldrCtx);
+
+ const Expr *ArgE;
+ if (const CXXDefaultArgExpr *DefE = dyn_cast<CXXDefaultArgExpr>(S))
+ ArgE = DefE->getExpr();
+ else if (const CXXDefaultInitExpr *DefE = dyn_cast<CXXDefaultInitExpr>(S))
+ ArgE = DefE->getExpr();
+ else
+ llvm_unreachable("unknown constant wrapper kind");
+
+ bool IsTemporary = false;
+ if (const MaterializeTemporaryExpr *MTE =
+ dyn_cast<MaterializeTemporaryExpr>(ArgE)) {
+ ArgE = MTE->GetTemporaryExpr();
+ IsTemporary = true;
+ }
+
+ Optional<SVal> ConstantVal = svalBuilder.getConstantVal(ArgE);
+ if (!ConstantVal)
+ ConstantVal = UnknownVal();
+
+ const LocationContext *LCtx = Pred->getLocationContext();
+ for (ExplodedNodeSet::iterator I = PreVisit.begin(), E = PreVisit.end();
+ I != E; ++I) {
+ ProgramStateRef State = (*I)->getState();
+ State = State->BindExpr(S, LCtx, *ConstantVal);
+ if (IsTemporary)
+ State = createTemporaryRegionIfNeeded(State, LCtx,
+ cast<Expr>(S),
+ cast<Expr>(S));
+ Bldr2.generateNode(S, *I, State);
+ }
+
+ getCheckerManager().runCheckersForPostStmt(Dst, Tmp, S, *this);
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ // Cases we evaluate as opaque expressions, conjuring a symbol.
+ case Stmt::CXXStdInitializerListExprClass:
+ case Expr::ObjCArrayLiteralClass:
+ case Expr::ObjCDictionaryLiteralClass:
+ case Expr::ObjCBoxedExprClass: {
+ Bldr.takeNodes(Pred);
+
+ ExplodedNodeSet preVisit;
+ getCheckerManager().runCheckersForPreStmt(preVisit, Pred, S, *this);
+
+ ExplodedNodeSet Tmp;
+ StmtNodeBuilder Bldr2(preVisit, Tmp, *currBldrCtx);
+
+ const Expr *Ex = cast<Expr>(S);
+ QualType resultType = Ex->getType();
+
+ for (ExplodedNodeSet::iterator it = preVisit.begin(), et = preVisit.end();
+ it != et; ++it) {
+ ExplodedNode *N = *it;
+ const LocationContext *LCtx = N->getLocationContext();
+ SVal result = svalBuilder.conjureSymbolVal(nullptr, Ex, LCtx,
+ resultType,
+ currBldrCtx->blockCount());
+ ProgramStateRef state = N->getState()->BindExpr(Ex, LCtx, result);
+ Bldr2.generateNode(S, N, state);
+ }
+
+ getCheckerManager().runCheckersForPostStmt(Dst, Tmp, S, *this);
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ case Stmt::ArraySubscriptExprClass:
+ Bldr.takeNodes(Pred);
+ VisitLvalArraySubscriptExpr(cast<ArraySubscriptExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::GCCAsmStmtClass:
+ Bldr.takeNodes(Pred);
+ VisitGCCAsmStmt(cast<GCCAsmStmt>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::MSAsmStmtClass:
+ Bldr.takeNodes(Pred);
+ VisitMSAsmStmt(cast<MSAsmStmt>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::BlockExprClass:
+ Bldr.takeNodes(Pred);
+ VisitBlockExpr(cast<BlockExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::LambdaExprClass:
+ if (AMgr.options.shouldInlineLambdas()) {
+ Bldr.takeNodes(Pred);
+ VisitLambdaExpr(cast<LambdaExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ } else {
+ const ExplodedNode *node = Bldr.generateSink(S, Pred, Pred->getState());
+ Engine.addAbortedBlock(node, currBldrCtx->getBlock());
+ }
+ break;
+
+ case Stmt::BinaryOperatorClass: {
+ const BinaryOperator* B = cast<BinaryOperator>(S);
+ if (B->isLogicalOp()) {
+ Bldr.takeNodes(Pred);
+ VisitLogicalExpr(B, Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+ }
+ else if (B->getOpcode() == BO_Comma) {
+ ProgramStateRef state = Pred->getState();
+ Bldr.generateNode(B, Pred,
+ state->BindExpr(B, Pred->getLocationContext(),
+ state->getSVal(B->getRHS(),
+ Pred->getLocationContext())));
+ break;
+ }
+
+ Bldr.takeNodes(Pred);
+
+ if (AMgr.options.eagerlyAssumeBinOpBifurcation &&
+ (B->isRelationalOp() || B->isEqualityOp())) {
+ ExplodedNodeSet Tmp;
+ VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Tmp);
+ evalEagerlyAssumeBinOpBifurcation(Dst, Tmp, cast<Expr>(S));
+ }
+ else
+ VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Dst);
+
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ case Stmt::CXXOperatorCallExprClass: {
+ const CXXOperatorCallExpr *OCE = cast<CXXOperatorCallExpr>(S);
+
+ // For instance method operators, make sure the 'this' argument has a
+ // valid region.
+ const Decl *Callee = OCE->getCalleeDecl();
+ if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(Callee)) {
+ if (MD->isInstance()) {
+ ProgramStateRef State = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ ProgramStateRef NewState =
+ createTemporaryRegionIfNeeded(State, LCtx, OCE->getArg(0));
+ if (NewState != State) {
+ Pred = Bldr.generateNode(OCE, Pred, NewState, /*Tag=*/nullptr,
+ ProgramPoint::PreStmtKind);
+ // Did we cache out?
+ if (!Pred)
+ break;
+ }
+ }
+ }
+ // FALLTHROUGH
+ }
+ case Stmt::CallExprClass:
+ case Stmt::CXXMemberCallExprClass:
+ case Stmt::UserDefinedLiteralClass: {
+ Bldr.takeNodes(Pred);
+ VisitCallExpr(cast<CallExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ case Stmt::CXXCatchStmtClass: {
+ Bldr.takeNodes(Pred);
+ VisitCXXCatchStmt(cast<CXXCatchStmt>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ case Stmt::CXXTemporaryObjectExprClass:
+ case Stmt::CXXConstructExprClass: {
+ Bldr.takeNodes(Pred);
+ VisitCXXConstructExpr(cast<CXXConstructExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ case Stmt::CXXNewExprClass: {
+ Bldr.takeNodes(Pred);
+ ExplodedNodeSet PostVisit;
+ VisitCXXNewExpr(cast<CXXNewExpr>(S), Pred, PostVisit);
+ getCheckerManager().runCheckersForPostStmt(Dst, PostVisit, S, *this);
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ case Stmt::CXXDeleteExprClass: {
+ Bldr.takeNodes(Pred);
+ ExplodedNodeSet PreVisit;
+ const CXXDeleteExpr *CDE = cast<CXXDeleteExpr>(S);
+ getCheckerManager().runCheckersForPreStmt(PreVisit, Pred, S, *this);
+
+ for (ExplodedNodeSet::iterator i = PreVisit.begin(),
+ e = PreVisit.end(); i != e ; ++i)
+ VisitCXXDeleteExpr(CDE, *i, Dst);
+
+ Bldr.addNodes(Dst);
+ break;
+ }
+ // FIXME: ChooseExpr is really a constant. We need to fix
+ // the CFG do not model them as explicit control-flow.
+
+ case Stmt::ChooseExprClass: { // __builtin_choose_expr
+ Bldr.takeNodes(Pred);
+ const ChooseExpr *C = cast<ChooseExpr>(S);
+ VisitGuardedExpr(C, C->getLHS(), C->getRHS(), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ case Stmt::CompoundAssignOperatorClass:
+ Bldr.takeNodes(Pred);
+ VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::CompoundLiteralExprClass:
+ Bldr.takeNodes(Pred);
+ VisitCompoundLiteralExpr(cast<CompoundLiteralExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::BinaryConditionalOperatorClass:
+ case Stmt::ConditionalOperatorClass: { // '?' operator
+ Bldr.takeNodes(Pred);
+ const AbstractConditionalOperator *C
+ = cast<AbstractConditionalOperator>(S);
+ VisitGuardedExpr(C, C->getTrueExpr(), C->getFalseExpr(), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ case Stmt::CXXThisExprClass:
+ Bldr.takeNodes(Pred);
+ VisitCXXThisExpr(cast<CXXThisExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::DeclRefExprClass: {
+ Bldr.takeNodes(Pred);
+ const DeclRefExpr *DE = cast<DeclRefExpr>(S);
+ VisitCommonDeclRefExpr(DE, DE->getDecl(), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ case Stmt::DeclStmtClass:
+ Bldr.takeNodes(Pred);
+ VisitDeclStmt(cast<DeclStmt>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::ImplicitCastExprClass:
+ case Stmt::CStyleCastExprClass:
+ case Stmt::CXXStaticCastExprClass:
+ case Stmt::CXXDynamicCastExprClass:
+ case Stmt::CXXReinterpretCastExprClass:
+ case Stmt::CXXConstCastExprClass:
+ case Stmt::CXXFunctionalCastExprClass:
+ case Stmt::ObjCBridgedCastExprClass: {
+ Bldr.takeNodes(Pred);
+ const CastExpr *C = cast<CastExpr>(S);
+ // Handle the previsit checks.
+ ExplodedNodeSet dstPrevisit;
+ getCheckerManager().runCheckersForPreStmt(dstPrevisit, Pred, C, *this);
+
+ // Handle the expression itself.
+ ExplodedNodeSet dstExpr;
+ for (ExplodedNodeSet::iterator i = dstPrevisit.begin(),
+ e = dstPrevisit.end(); i != e ; ++i) {
+ VisitCast(C, C->getSubExpr(), *i, dstExpr);
+ }
+
+ // Handle the postvisit checks.
+ getCheckerManager().runCheckersForPostStmt(Dst, dstExpr, C, *this);
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ case Expr::MaterializeTemporaryExprClass: {
+ Bldr.takeNodes(Pred);
+ const MaterializeTemporaryExpr *MTE = cast<MaterializeTemporaryExpr>(S);
+ CreateCXXTemporaryObject(MTE, Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ case Stmt::InitListExprClass:
+ Bldr.takeNodes(Pred);
+ VisitInitListExpr(cast<InitListExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::MemberExprClass:
+ Bldr.takeNodes(Pred);
+ VisitMemberExpr(cast<MemberExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::ObjCIvarRefExprClass:
+ Bldr.takeNodes(Pred);
+ VisitLvalObjCIvarRefExpr(cast<ObjCIvarRefExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::ObjCForCollectionStmtClass:
+ Bldr.takeNodes(Pred);
+ VisitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::ObjCMessageExprClass:
+ Bldr.takeNodes(Pred);
+ VisitObjCMessage(cast<ObjCMessageExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::ObjCAtThrowStmtClass:
+ case Stmt::CXXThrowExprClass:
+ // FIXME: This is not complete. We basically treat @throw as
+ // an abort.
+ Bldr.generateSink(S, Pred, Pred->getState());
+ break;
+
+ case Stmt::ReturnStmtClass:
+ Bldr.takeNodes(Pred);
+ VisitReturnStmt(cast<ReturnStmt>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::OffsetOfExprClass:
+ Bldr.takeNodes(Pred);
+ VisitOffsetOfExpr(cast<OffsetOfExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::UnaryExprOrTypeTraitExprClass:
+ Bldr.takeNodes(Pred);
+ VisitUnaryExprOrTypeTraitExpr(cast<UnaryExprOrTypeTraitExpr>(S),
+ Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::StmtExprClass: {
+ const StmtExpr *SE = cast<StmtExpr>(S);
+
+ if (SE->getSubStmt()->body_empty()) {
+ // Empty statement expression.
+ assert(SE->getType() == getContext().VoidTy
+ && "Empty statement expression must have void type.");
+ break;
+ }
+
+ if (Expr *LastExpr = dyn_cast<Expr>(*SE->getSubStmt()->body_rbegin())) {
+ ProgramStateRef state = Pred->getState();
+ Bldr.generateNode(SE, Pred,
+ state->BindExpr(SE, Pred->getLocationContext(),
+ state->getSVal(LastExpr,
+ Pred->getLocationContext())));
+ }
+ break;
+ }
+
+ case Stmt::UnaryOperatorClass: {
+ Bldr.takeNodes(Pred);
+ const UnaryOperator *U = cast<UnaryOperator>(S);
+ if (AMgr.options.eagerlyAssumeBinOpBifurcation && (U->getOpcode() == UO_LNot)) {
+ ExplodedNodeSet Tmp;
+ VisitUnaryOperator(U, Pred, Tmp);
+ evalEagerlyAssumeBinOpBifurcation(Dst, Tmp, U);
+ }
+ else
+ VisitUnaryOperator(U, Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ case Stmt::PseudoObjectExprClass: {
+ Bldr.takeNodes(Pred);
+ ProgramStateRef state = Pred->getState();
+ const PseudoObjectExpr *PE = cast<PseudoObjectExpr>(S);
+ if (const Expr *Result = PE->getResultExpr()) {
+ SVal V = state->getSVal(Result, Pred->getLocationContext());
+ Bldr.generateNode(S, Pred,
+ state->BindExpr(S, Pred->getLocationContext(), V));
+ }
+ else
+ Bldr.generateNode(S, Pred,
+ state->BindExpr(S, Pred->getLocationContext(),
+ UnknownVal()));
+
+ Bldr.addNodes(Dst);
+ break;
+ }
+ }
+}
+
+bool ExprEngine::replayWithoutInlining(ExplodedNode *N,
+ const LocationContext *CalleeLC) {
+ const StackFrameContext *CalleeSF = CalleeLC->getCurrentStackFrame();
+ const StackFrameContext *CallerSF = CalleeSF->getParent()->getCurrentStackFrame();
+ assert(CalleeSF && CallerSF);
+ ExplodedNode *BeforeProcessingCall = nullptr;
+ const Stmt *CE = CalleeSF->getCallSite();
+
+ // Find the first node before we started processing the call expression.
+ while (N) {
+ ProgramPoint L = N->getLocation();
+ BeforeProcessingCall = N;
+ N = N->pred_empty() ? nullptr : *(N->pred_begin());
+
+ // Skip the nodes corresponding to the inlined code.
+ if (L.getLocationContext()->getCurrentStackFrame() != CallerSF)
+ continue;
+ // We reached the caller. Find the node right before we started
+ // processing the call.
+ if (L.isPurgeKind())
+ continue;
+ if (L.getAs<PreImplicitCall>())
+ continue;
+ if (L.getAs<CallEnter>())
+ continue;
+ if (Optional<StmtPoint> SP = L.getAs<StmtPoint>())
+ if (SP->getStmt() == CE)
+ continue;
+ break;
+ }
+
+ if (!BeforeProcessingCall)
+ return false;
+
+ // TODO: Clean up the unneeded nodes.
+
+ // Build an Epsilon node from which we will restart the analyzes.
+ // Note that CE is permitted to be NULL!
+ ProgramPoint NewNodeLoc =
+ EpsilonPoint(BeforeProcessingCall->getLocationContext(), CE);
+ // Add the special flag to GDM to signal retrying with no inlining.
+ // Note, changing the state ensures that we are not going to cache out.
+ ProgramStateRef NewNodeState = BeforeProcessingCall->getState();
+ NewNodeState =
+ NewNodeState->set<ReplayWithoutInlining>(const_cast<Stmt *>(CE));
+
+ // Make the new node a successor of BeforeProcessingCall.
+ bool IsNew = false;
+ ExplodedNode *NewNode = G.getNode(NewNodeLoc, NewNodeState, false, &IsNew);
+ // We cached out at this point. Caching out is common due to us backtracking
+ // from the inlined function, which might spawn several paths.
+ if (!IsNew)
+ return true;
+
+ NewNode->addPredecessor(BeforeProcessingCall, G);
+
+ // Add the new node to the work list.
+ Engine.enqueueStmtNode(NewNode, CalleeSF->getCallSiteBlock(),
+ CalleeSF->getIndex());
+ NumTimesRetriedWithoutInlining++;
+ return true;
+}
+
+/// Block entrance. (Update counters).
+void ExprEngine::processCFGBlockEntrance(const BlockEdge &L,
+ NodeBuilderWithSinks &nodeBuilder,
+ ExplodedNode *Pred) {
+ PrettyStackTraceLocationContext CrashInfo(Pred->getLocationContext());
+
+ // If this block is terminated by a loop and it has already been visited the
+ // maximum number of times, widen the loop.
+ unsigned int BlockCount = nodeBuilder.getContext().blockCount();
+ if (BlockCount == AMgr.options.maxBlockVisitOnPath - 1 &&
+ AMgr.options.shouldWidenLoops()) {
+ const Stmt *Term = nodeBuilder.getContext().getBlock()->getTerminator();
+ if (!(Term &&
+ (isa<ForStmt>(Term) || isa<WhileStmt>(Term) || isa<DoStmt>(Term))))
+ return;
+ // Widen.
+ const LocationContext *LCtx = Pred->getLocationContext();
+ ProgramStateRef WidenedState =
+ getWidenedLoopState(Pred->getState(), LCtx, BlockCount, Term);
+ nodeBuilder.generateNode(WidenedState, Pred);
+ return;
+ }
+
+ // FIXME: Refactor this into a checker.
+ if (BlockCount >= AMgr.options.maxBlockVisitOnPath) {
+ static SimpleProgramPointTag tag(TagProviderName, "Block count exceeded");
+ const ExplodedNode *Sink =
+ nodeBuilder.generateSink(Pred->getState(), Pred, &tag);
+
+ // Check if we stopped at the top level function or not.
+ // Root node should have the location context of the top most function.
+ const LocationContext *CalleeLC = Pred->getLocation().getLocationContext();
+ const LocationContext *CalleeSF = CalleeLC->getCurrentStackFrame();
+ const LocationContext *RootLC =
+ (*G.roots_begin())->getLocation().getLocationContext();
+ if (RootLC->getCurrentStackFrame() != CalleeSF) {
+ Engine.FunctionSummaries->markReachedMaxBlockCount(CalleeSF->getDecl());
+
+ // Re-run the call evaluation without inlining it, by storing the
+ // no-inlining policy in the state and enqueuing the new work item on
+ // the list. Replay should almost never fail. Use the stats to catch it
+ // if it does.
+ if ((!AMgr.options.NoRetryExhausted &&
+ replayWithoutInlining(Pred, CalleeLC)))
+ return;
+ NumMaxBlockCountReachedInInlined++;
+ } else
+ NumMaxBlockCountReached++;
+
+ // Make sink nodes as exhausted(for stats) only if retry failed.
+ Engine.blocksExhausted.push_back(std::make_pair(L, Sink));
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Branch processing.
+//===----------------------------------------------------------------------===//
+
+/// RecoverCastedSymbol - A helper function for ProcessBranch that is used
+/// to try to recover some path-sensitivity for casts of symbolic
+/// integers that promote their values (which are currently not tracked well).
+/// This function returns the SVal bound to Condition->IgnoreCasts if all the
+// cast(s) did was sign-extend the original value.
+static SVal RecoverCastedSymbol(ProgramStateManager& StateMgr,
+ ProgramStateRef state,
+ const Stmt *Condition,
+ const LocationContext *LCtx,
+ ASTContext &Ctx) {
+
+ const Expr *Ex = dyn_cast<Expr>(Condition);
+ if (!Ex)
+ return UnknownVal();
+
+ uint64_t bits = 0;
+ bool bitsInit = false;
+
+ while (const CastExpr *CE = dyn_cast<CastExpr>(Ex)) {
+ QualType T = CE->getType();
+
+ if (!T->isIntegralOrEnumerationType())
+ return UnknownVal();
+
+ uint64_t newBits = Ctx.getTypeSize(T);
+ if (!bitsInit || newBits < bits) {
+ bitsInit = true;
+ bits = newBits;
+ }
+
+ Ex = CE->getSubExpr();
+ }
+
+ // We reached a non-cast. Is it a symbolic value?
+ QualType T = Ex->getType();
+
+ if (!bitsInit || !T->isIntegralOrEnumerationType() ||
+ Ctx.getTypeSize(T) > bits)
+ return UnknownVal();
+
+ return state->getSVal(Ex, LCtx);
+}
+
+#ifndef NDEBUG
+static const Stmt *getRightmostLeaf(const Stmt *Condition) {
+ while (Condition) {
+ const BinaryOperator *BO = dyn_cast<BinaryOperator>(Condition);
+ if (!BO || !BO->isLogicalOp()) {
+ return Condition;
+ }
+ Condition = BO->getRHS()->IgnoreParens();
+ }
+ return nullptr;
+}
+#endif
+
+// Returns the condition the branch at the end of 'B' depends on and whose value
+// has been evaluated within 'B'.
+// In most cases, the terminator condition of 'B' will be evaluated fully in
+// the last statement of 'B'; in those cases, the resolved condition is the
+// given 'Condition'.
+// If the condition of the branch is a logical binary operator tree, the CFG is
+// optimized: in that case, we know that the expression formed by all but the
+// rightmost leaf of the logical binary operator tree must be true, and thus
+// the branch condition is at this point equivalent to the truth value of that
+// rightmost leaf; the CFG block thus only evaluates this rightmost leaf
+// expression in its final statement. As the full condition in that case was
+// not evaluated, and is thus not in the SVal cache, we need to use that leaf
+// expression to evaluate the truth value of the condition in the current state
+// space.
+static const Stmt *ResolveCondition(const Stmt *Condition,
+ const CFGBlock *B) {
+ if (const Expr *Ex = dyn_cast<Expr>(Condition))
+ Condition = Ex->IgnoreParens();
+
+ const BinaryOperator *BO = dyn_cast<BinaryOperator>(Condition);
+ if (!BO || !BO->isLogicalOp())
+ return Condition;
+
+ assert(!B->getTerminator().isTemporaryDtorsBranch() &&
+ "Temporary destructor branches handled by processBindTemporary.");
+
+ // For logical operations, we still have the case where some branches
+ // use the traditional "merge" approach and others sink the branch
+ // directly into the basic blocks representing the logical operation.
+ // We need to distinguish between those two cases here.
+
+ // The invariants are still shifting, but it is possible that the
+ // last element in a CFGBlock is not a CFGStmt. Look for the last
+ // CFGStmt as the value of the condition.
+ CFGBlock::const_reverse_iterator I = B->rbegin(), E = B->rend();
+ for (; I != E; ++I) {
+ CFGElement Elem = *I;
+ Optional<CFGStmt> CS = Elem.getAs<CFGStmt>();
+ if (!CS)
+ continue;
+ const Stmt *LastStmt = CS->getStmt();
+ assert(LastStmt == Condition || LastStmt == getRightmostLeaf(Condition));
+ return LastStmt;
+ }
+ llvm_unreachable("could not resolve condition");
+}
+
+void ExprEngine::processBranch(const Stmt *Condition, const Stmt *Term,
+ NodeBuilderContext& BldCtx,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst,
+ const CFGBlock *DstT,
+ const CFGBlock *DstF) {
+ assert((!Condition || !isa<CXXBindTemporaryExpr>(Condition)) &&
+ "CXXBindTemporaryExprs are handled by processBindTemporary.");
+ const LocationContext *LCtx = Pred->getLocationContext();
+ PrettyStackTraceLocationContext StackCrashInfo(LCtx);
+ currBldrCtx = &BldCtx;
+
+ // Check for NULL conditions; e.g. "for(;;)"
+ if (!Condition) {
+ BranchNodeBuilder NullCondBldr(Pred, Dst, BldCtx, DstT, DstF);
+ NullCondBldr.markInfeasible(false);
+ NullCondBldr.generateNode(Pred->getState(), true, Pred);
+ return;
+ }
+
+ if (const Expr *Ex = dyn_cast<Expr>(Condition))
+ Condition = Ex->IgnoreParens();
+
+ Condition = ResolveCondition(Condition, BldCtx.getBlock());
+ PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
+ Condition->getLocStart(),
+ "Error evaluating branch");
+
+ ExplodedNodeSet CheckersOutSet;
+ getCheckerManager().runCheckersForBranchCondition(Condition, CheckersOutSet,
+ Pred, *this);
+ // We generated only sinks.
+ if (CheckersOutSet.empty())
+ return;
+
+ BranchNodeBuilder builder(CheckersOutSet, Dst, BldCtx, DstT, DstF);
+ for (NodeBuilder::iterator I = CheckersOutSet.begin(),
+ E = CheckersOutSet.end(); E != I; ++I) {
+ ExplodedNode *PredI = *I;
+
+ if (PredI->isSink())
+ continue;
+
+ ProgramStateRef PrevState = PredI->getState();
+ SVal X = PrevState->getSVal(Condition, PredI->getLocationContext());
+
+ if (X.isUnknownOrUndef()) {
+ // Give it a chance to recover from unknown.
+ if (const Expr *Ex = dyn_cast<Expr>(Condition)) {
+ if (Ex->getType()->isIntegralOrEnumerationType()) {
+ // Try to recover some path-sensitivity. Right now casts of symbolic
+ // integers that promote their values are currently not tracked well.
+ // If 'Condition' is such an expression, try and recover the
+ // underlying value and use that instead.
+ SVal recovered = RecoverCastedSymbol(getStateManager(),
+ PrevState, Condition,
+ PredI->getLocationContext(),
+ getContext());
+
+ if (!recovered.isUnknown()) {
+ X = recovered;
+ }
+ }
+ }
+ }
+
+ // If the condition is still unknown, give up.
+ if (X.isUnknownOrUndef()) {
+ builder.generateNode(PrevState, true, PredI);
+ builder.generateNode(PrevState, false, PredI);
+ continue;
+ }
+
+ DefinedSVal V = X.castAs<DefinedSVal>();
+
+ ProgramStateRef StTrue, StFalse;
+ std::tie(StTrue, StFalse) = PrevState->assume(V);
+
+ // Process the true branch.
+ if (builder.isFeasible(true)) {
+ if (StTrue)
+ builder.generateNode(StTrue, true, PredI);
+ else
+ builder.markInfeasible(true);
+ }
+
+ // Process the false branch.
+ if (builder.isFeasible(false)) {
+ if (StFalse)
+ builder.generateNode(StFalse, false, PredI);
+ else
+ builder.markInfeasible(false);
+ }
+ }
+ currBldrCtx = nullptr;
+}
+
+/// The GDM component containing the set of global variables which have been
+/// previously initialized with explicit initializers.
+REGISTER_TRAIT_WITH_PROGRAMSTATE(InitializedGlobalsSet,
+ llvm::ImmutableSet<const VarDecl *>)
+
+void ExprEngine::processStaticInitializer(const DeclStmt *DS,
+ NodeBuilderContext &BuilderCtx,
+ ExplodedNode *Pred,
+ clang::ento::ExplodedNodeSet &Dst,
+ const CFGBlock *DstT,
+ const CFGBlock *DstF) {
+ PrettyStackTraceLocationContext CrashInfo(Pred->getLocationContext());
+ currBldrCtx = &BuilderCtx;
+
+ const VarDecl *VD = cast<VarDecl>(DS->getSingleDecl());
+ ProgramStateRef state = Pred->getState();
+ bool initHasRun = state->contains<InitializedGlobalsSet>(VD);
+ BranchNodeBuilder builder(Pred, Dst, BuilderCtx, DstT, DstF);
+
+ if (!initHasRun) {
+ state = state->add<InitializedGlobalsSet>(VD);
+ }
+
+ builder.generateNode(state, initHasRun, Pred);
+ builder.markInfeasible(!initHasRun);
+
+ currBldrCtx = nullptr;
+}
+
+/// processIndirectGoto - Called by CoreEngine. Used to generate successor
+/// nodes by processing the 'effects' of a computed goto jump.
+void ExprEngine::processIndirectGoto(IndirectGotoNodeBuilder &builder) {
+
+ ProgramStateRef state = builder.getState();
+ SVal V = state->getSVal(builder.getTarget(), builder.getLocationContext());
+
+ // Three possibilities:
+ //
+ // (1) We know the computed label.
+ // (2) The label is NULL (or some other constant), or Undefined.
+ // (3) We have no clue about the label. Dispatch to all targets.
+ //
+
+ typedef IndirectGotoNodeBuilder::iterator iterator;
+
+ if (Optional<loc::GotoLabel> LV = V.getAs<loc::GotoLabel>()) {
+ const LabelDecl *L = LV->getLabel();
+
+ for (iterator I = builder.begin(), E = builder.end(); I != E; ++I) {
+ if (I.getLabel() == L) {
+ builder.generateNode(I, state);
+ return;
+ }
+ }
+
+ llvm_unreachable("No block with label.");
+ }
+
+ if (V.getAs<loc::ConcreteInt>() || V.getAs<UndefinedVal>()) {
+ // Dispatch to the first target and mark it as a sink.
+ //ExplodedNode* N = builder.generateNode(builder.begin(), state, true);
+ // FIXME: add checker visit.
+ // UndefBranches.insert(N);
+ return;
+ }
+
+ // This is really a catch-all. We don't support symbolics yet.
+ // FIXME: Implement dispatch for symbolic pointers.
+
+ for (iterator I=builder.begin(), E=builder.end(); I != E; ++I)
+ builder.generateNode(I, state);
+}
+
+#if 0
+static bool stackFrameDoesNotContainInitializedTemporaries(ExplodedNode &Pred) {
+ const StackFrameContext* Frame = Pred.getStackFrame();
+ const llvm::ImmutableSet<CXXBindTemporaryContext> &Set =
+ Pred.getState()->get<InitializedTemporariesSet>();
+ return std::find_if(Set.begin(), Set.end(),
+ [&](const CXXBindTemporaryContext &Ctx) {
+ if (Ctx.second == Frame) {
+ Ctx.first->dump();
+ llvm::errs() << "\n";
+ }
+ return Ctx.second == Frame;
+ }) == Set.end();
+}
+#endif
+
+/// ProcessEndPath - Called by CoreEngine. Used to generate end-of-path
+/// nodes when the control reaches the end of a function.
+void ExprEngine::processEndOfFunction(NodeBuilderContext& BC,
+ ExplodedNode *Pred) {
+ // FIXME: Assert that stackFrameDoesNotContainInitializedTemporaries(*Pred)).
+ // We currently cannot enable this assert, as lifetime extended temporaries
+ // are not modelled correctly.
+ PrettyStackTraceLocationContext CrashInfo(Pred->getLocationContext());
+ StateMgr.EndPath(Pred->getState());
+
+ ExplodedNodeSet Dst;
+ if (Pred->getLocationContext()->inTopFrame()) {
+ // Remove dead symbols.
+ ExplodedNodeSet AfterRemovedDead;
+ removeDeadOnEndOfFunction(BC, Pred, AfterRemovedDead);
+
+ // Notify checkers.
+ for (ExplodedNodeSet::iterator I = AfterRemovedDead.begin(),
+ E = AfterRemovedDead.end(); I != E; ++I) {
+ getCheckerManager().runCheckersForEndFunction(BC, Dst, *I, *this);
+ }
+ } else {
+ getCheckerManager().runCheckersForEndFunction(BC, Dst, Pred, *this);
+ }
+
+ Engine.enqueueEndOfFunction(Dst);
+}
+
+/// ProcessSwitch - Called by CoreEngine. Used to generate successor
+/// nodes by processing the 'effects' of a switch statement.
+void ExprEngine::processSwitch(SwitchNodeBuilder& builder) {
+ typedef SwitchNodeBuilder::iterator iterator;
+ ProgramStateRef state = builder.getState();
+ const Expr *CondE = builder.getCondition();
+ SVal CondV_untested = state->getSVal(CondE, builder.getLocationContext());
+
+ if (CondV_untested.isUndef()) {
+ //ExplodedNode* N = builder.generateDefaultCaseNode(state, true);
+ // FIXME: add checker
+ //UndefBranches.insert(N);
+
+ return;
+ }
+ DefinedOrUnknownSVal CondV = CondV_untested.castAs<DefinedOrUnknownSVal>();
+
+ ProgramStateRef DefaultSt = state;
+
+ iterator I = builder.begin(), EI = builder.end();
+ bool defaultIsFeasible = I == EI;
+
+ for ( ; I != EI; ++I) {
+ // Successor may be pruned out during CFG construction.
+ if (!I.getBlock())
+ continue;
+
+ const CaseStmt *Case = I.getCase();
+
+ // Evaluate the LHS of the case value.
+ llvm::APSInt V1 = Case->getLHS()->EvaluateKnownConstInt(getContext());
+ assert(V1.getBitWidth() == getContext().getTypeSize(CondE->getType()));
+
+ // Get the RHS of the case, if it exists.
+ llvm::APSInt V2;
+ if (const Expr *E = Case->getRHS())
+ V2 = E->EvaluateKnownConstInt(getContext());
+ else
+ V2 = V1;
+
+ ProgramStateRef StateCase;
+ if (Optional<NonLoc> NL = CondV.getAs<NonLoc>())
+ std::tie(StateCase, DefaultSt) =
+ DefaultSt->assumeWithinInclusiveRange(*NL, V1, V2);
+ else // UnknownVal
+ StateCase = DefaultSt;
+
+ if (StateCase)
+ builder.generateCaseStmtNode(I, StateCase);
+
+ // Now "assume" that the case doesn't match. Add this state
+ // to the default state (if it is feasible).
+ if (DefaultSt)
+ defaultIsFeasible = true;
+ else {
+ defaultIsFeasible = false;
+ break;
+ }
+ }
+
+ if (!defaultIsFeasible)
+ return;
+
+ // If we have switch(enum value), the default branch is not
+ // feasible if all of the enum constants not covered by 'case:' statements
+ // are not feasible values for the switch condition.
+ //
+ // Note that this isn't as accurate as it could be. Even if there isn't
+ // a case for a particular enum value as long as that enum value isn't
+ // feasible then it shouldn't be considered for making 'default:' reachable.
+ const SwitchStmt *SS = builder.getSwitch();
+ const Expr *CondExpr = SS->getCond()->IgnoreParenImpCasts();
+ if (CondExpr->getType()->getAs<EnumType>()) {
+ if (SS->isAllEnumCasesCovered())
+ return;
+ }
+
+ builder.generateDefaultCaseNode(DefaultSt);
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer functions: Loads and stores.
+//===----------------------------------------------------------------------===//
+
+void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ // C permits "extern void v", and if you cast the address to a valid type,
+ // you can even do things with it. We simply pretend
+ assert(Ex->isGLValue() || VD->getType()->isVoidType());
+ const LocationContext *LocCtxt = Pred->getLocationContext();
+ const Decl *D = LocCtxt->getDecl();
+ const auto *MD = D ? dyn_cast<CXXMethodDecl>(D) : nullptr;
+ const auto *DeclRefEx = dyn_cast<DeclRefExpr>(Ex);
+ SVal V;
+ bool IsReference;
+ if (AMgr.options.shouldInlineLambdas() && DeclRefEx &&
+ DeclRefEx->refersToEnclosingVariableOrCapture() && MD &&
+ MD->getParent()->isLambda()) {
+ // Lookup the field of the lambda.
+ const CXXRecordDecl *CXXRec = MD->getParent();
+ llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
+ FieldDecl *LambdaThisCaptureField;
+ CXXRec->getCaptureFields(LambdaCaptureFields, LambdaThisCaptureField);
+ const FieldDecl *FD = LambdaCaptureFields[VD];
+ if (!FD) {
+ // When a constant is captured, sometimes no corresponding field is
+ // created in the lambda object.
+ assert(VD->getType().isConstQualified());
+ V = state->getLValue(VD, LocCtxt);
+ IsReference = false;
+ } else {
+ Loc CXXThis =
+ svalBuilder.getCXXThis(MD, LocCtxt->getCurrentStackFrame());
+ SVal CXXThisVal = state->getSVal(CXXThis);
+ V = state->getLValue(FD, CXXThisVal);
+ IsReference = FD->getType()->isReferenceType();
+ }
+ } else {
+ V = state->getLValue(VD, LocCtxt);
+ IsReference = VD->getType()->isReferenceType();
+ }
+
+ // For references, the 'lvalue' is the pointer address stored in the
+ // reference region.
+ if (IsReference) {
+ if (const MemRegion *R = V.getAsRegion())
+ V = state->getSVal(R);
+ else
+ V = UnknownVal();
+ }
+
+ Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V), nullptr,
+ ProgramPoint::PostLValueKind);
+ return;
+ }
+ if (const EnumConstantDecl *ED = dyn_cast<EnumConstantDecl>(D)) {
+ assert(!Ex->isGLValue());
+ SVal V = svalBuilder.makeIntVal(ED->getInitVal());
+ Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V));
+ return;
+ }
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ SVal V = svalBuilder.getFunctionPointer(FD);
+ Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V), nullptr,
+ ProgramPoint::PostLValueKind);
+ return;
+ }
+ if (isa<FieldDecl>(D)) {
+ // FIXME: Compute lvalue of field pointers-to-member.
+ // Right now we just use a non-null void pointer, so that it gives proper
+ // results in boolean contexts.
+ SVal V = svalBuilder.conjureSymbolVal(Ex, LCtx, getContext().VoidPtrTy,
+ currBldrCtx->blockCount());
+ state = state->assume(V.castAs<DefinedOrUnknownSVal>(), true);
+ Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V), nullptr,
+ ProgramPoint::PostLValueKind);
+ return;
+ }
+
+ llvm_unreachable("Support for this Decl not implemented.");
+}
+
+/// VisitArraySubscriptExpr - Transfer function for array accesses
+void ExprEngine::VisitLvalArraySubscriptExpr(const ArraySubscriptExpr *A,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst){
+
+ const Expr *Base = A->getBase()->IgnoreParens();
+ const Expr *Idx = A->getIdx()->IgnoreParens();
+
+ ExplodedNodeSet checkerPreStmt;
+ getCheckerManager().runCheckersForPreStmt(checkerPreStmt, Pred, A, *this);
+
+ StmtNodeBuilder Bldr(checkerPreStmt, Dst, *currBldrCtx);
+ assert(A->isGLValue() ||
+ (!AMgr.getLangOpts().CPlusPlus &&
+ A->getType().isCForbiddenLValueType()));
+
+ for (ExplodedNodeSet::iterator it = checkerPreStmt.begin(),
+ ei = checkerPreStmt.end(); it != ei; ++it) {
+ const LocationContext *LCtx = (*it)->getLocationContext();
+ ProgramStateRef state = (*it)->getState();
+ SVal V = state->getLValue(A->getType(),
+ state->getSVal(Idx, LCtx),
+ state->getSVal(Base, LCtx));
+ Bldr.generateNode(A, *it, state->BindExpr(A, LCtx, V), nullptr,
+ ProgramPoint::PostLValueKind);
+ }
+}
+
+/// VisitMemberExpr - Transfer function for member expressions.
+void ExprEngine::VisitMemberExpr(const MemberExpr *M, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+
+ // FIXME: Prechecks eventually go in ::Visit().
+ ExplodedNodeSet CheckedSet;
+ getCheckerManager().runCheckersForPreStmt(CheckedSet, Pred, M, *this);
+
+ ExplodedNodeSet EvalSet;
+ ValueDecl *Member = M->getMemberDecl();
+
+ // Handle static member variables and enum constants accessed via
+ // member syntax.
+ if (isa<VarDecl>(Member) || isa<EnumConstantDecl>(Member)) {
+ ExplodedNodeSet Dst;
+ for (ExplodedNodeSet::iterator I = CheckedSet.begin(), E = CheckedSet.end();
+ I != E; ++I) {
+ VisitCommonDeclRefExpr(M, Member, Pred, EvalSet);
+ }
+ } else {
+ StmtNodeBuilder Bldr(CheckedSet, EvalSet, *currBldrCtx);
+ ExplodedNodeSet Tmp;
+
+ for (ExplodedNodeSet::iterator I = CheckedSet.begin(), E = CheckedSet.end();
+ I != E; ++I) {
+ ProgramStateRef state = (*I)->getState();
+ const LocationContext *LCtx = (*I)->getLocationContext();
+ Expr *BaseExpr = M->getBase();
+
+ // Handle C++ method calls.
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Member)) {
+ if (MD->isInstance())
+ state = createTemporaryRegionIfNeeded(state, LCtx, BaseExpr);
+
+ SVal MDVal = svalBuilder.getFunctionPointer(MD);
+ state = state->BindExpr(M, LCtx, MDVal);
+
+ Bldr.generateNode(M, *I, state);
+ continue;
+ }
+
+ // Handle regular struct fields / member variables.
+ state = createTemporaryRegionIfNeeded(state, LCtx, BaseExpr);
+ SVal baseExprVal = state->getSVal(BaseExpr, LCtx);
+
+ FieldDecl *field = cast<FieldDecl>(Member);
+ SVal L = state->getLValue(field, baseExprVal);
+
+ if (M->isGLValue() || M->getType()->isArrayType()) {
+ // We special-case rvalues of array type because the analyzer cannot
+ // reason about them, since we expect all regions to be wrapped in Locs.
+ // We instead treat these as lvalues and assume that they will decay to
+ // pointers as soon as they are used.
+ if (!M->isGLValue()) {
+ assert(M->getType()->isArrayType());
+ const ImplicitCastExpr *PE =
+ dyn_cast<ImplicitCastExpr>((*I)->getParentMap().getParent(M));
+ if (!PE || PE->getCastKind() != CK_ArrayToPointerDecay) {
+ llvm_unreachable("should always be wrapped in ArrayToPointerDecay");
+ }
+ }
+
+ if (field->getType()->isReferenceType()) {
+ if (const MemRegion *R = L.getAsRegion())
+ L = state->getSVal(R);
+ else
+ L = UnknownVal();
+ }
+
+ Bldr.generateNode(M, *I, state->BindExpr(M, LCtx, L), nullptr,
+ ProgramPoint::PostLValueKind);
+ } else {
+ Bldr.takeNodes(*I);
+ evalLoad(Tmp, M, M, *I, state, L);
+ Bldr.addNodes(Tmp);
+ }
+ }
+ }
+
+ getCheckerManager().runCheckersForPostStmt(Dst, EvalSet, M, *this);
+}
+
+namespace {
+class CollectReachableSymbolsCallback final : public SymbolVisitor {
+ InvalidatedSymbols Symbols;
+
+public:
+ CollectReachableSymbolsCallback(ProgramStateRef State) {}
+ const InvalidatedSymbols &getSymbols() const { return Symbols; }
+
+ bool VisitSymbol(SymbolRef Sym) override {
+ Symbols.insert(Sym);
+ return true;
+ }
+};
+} // end anonymous namespace
+
+// A value escapes in three possible cases:
+// (1) We are binding to something that is not a memory region.
+// (2) We are binding to a MemrRegion that does not have stack storage.
+// (3) We are binding to a MemRegion with stack storage that the store
+// does not understand.
+ProgramStateRef ExprEngine::processPointerEscapedOnBind(ProgramStateRef State,
+ SVal Loc, SVal Val) {
+ // Are we storing to something that causes the value to "escape"?
+ bool escapes = true;
+
+ // TODO: Move to StoreManager.
+ if (Optional<loc::MemRegionVal> regionLoc = Loc.getAs<loc::MemRegionVal>()) {
+ escapes = !regionLoc->getRegion()->hasStackStorage();
+
+ if (!escapes) {
+ // To test (3), generate a new state with the binding added. If it is
+ // the same state, then it escapes (since the store cannot represent
+ // the binding).
+ // Do this only if we know that the store is not supposed to generate the
+ // same state.
+ SVal StoredVal = State->getSVal(regionLoc->getRegion());
+ if (StoredVal != Val)
+ escapes = (State == (State->bindLoc(*regionLoc, Val)));
+ }
+ }
+
+ // If our store can represent the binding and we aren't storing to something
+ // that doesn't have local storage then just return and have the simulation
+ // state continue as is.
+ if (!escapes)
+ return State;
+
+ // Otherwise, find all symbols referenced by 'val' that we are tracking
+ // and stop tracking them.
+ CollectReachableSymbolsCallback Scanner =
+ State->scanReachableSymbols<CollectReachableSymbolsCallback>(Val);
+ const InvalidatedSymbols &EscapedSymbols = Scanner.getSymbols();
+ State = getCheckerManager().runCheckersForPointerEscape(State,
+ EscapedSymbols,
+ /*CallEvent*/ nullptr,
+ PSK_EscapeOnBind,
+ nullptr);
+
+ return State;
+}
+
+ProgramStateRef
+ExprEngine::notifyCheckersOfPointerEscape(ProgramStateRef State,
+ const InvalidatedSymbols *Invalidated,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const CallEvent *Call,
+ RegionAndSymbolInvalidationTraits &ITraits) {
+
+ if (!Invalidated || Invalidated->empty())
+ return State;
+
+ if (!Call)
+ return getCheckerManager().runCheckersForPointerEscape(State,
+ *Invalidated,
+ nullptr,
+ PSK_EscapeOther,
+ &ITraits);
+
+ // If the symbols were invalidated by a call, we want to find out which ones
+ // were invalidated directly due to being arguments to the call.
+ InvalidatedSymbols SymbolsDirectlyInvalidated;
+ for (ArrayRef<const MemRegion *>::iterator I = ExplicitRegions.begin(),
+ E = ExplicitRegions.end(); I != E; ++I) {
+ if (const SymbolicRegion *R = (*I)->StripCasts()->getAs<SymbolicRegion>())
+ SymbolsDirectlyInvalidated.insert(R->getSymbol());
+ }
+
+ InvalidatedSymbols SymbolsIndirectlyInvalidated;
+ for (InvalidatedSymbols::const_iterator I=Invalidated->begin(),
+ E = Invalidated->end(); I!=E; ++I) {
+ SymbolRef sym = *I;
+ if (SymbolsDirectlyInvalidated.count(sym))
+ continue;
+ SymbolsIndirectlyInvalidated.insert(sym);
+ }
+
+ if (!SymbolsDirectlyInvalidated.empty())
+ State = getCheckerManager().runCheckersForPointerEscape(State,
+ SymbolsDirectlyInvalidated, Call, PSK_DirectEscapeOnCall, &ITraits);
+
+ // Notify about the symbols that get indirectly invalidated by the call.
+ if (!SymbolsIndirectlyInvalidated.empty())
+ State = getCheckerManager().runCheckersForPointerEscape(State,
+ SymbolsIndirectlyInvalidated, Call, PSK_IndirectEscapeOnCall, &ITraits);
+
+ return State;
+}
+
+/// evalBind - Handle the semantics of binding a value to a specific location.
+/// This method is used by evalStore and (soon) VisitDeclStmt, and others.
+void ExprEngine::evalBind(ExplodedNodeSet &Dst, const Stmt *StoreE,
+ ExplodedNode *Pred,
+ SVal location, SVal Val,
+ bool atDeclInit, const ProgramPoint *PP) {
+
+ const LocationContext *LC = Pred->getLocationContext();
+ PostStmt PS(StoreE, LC);
+ if (!PP)
+ PP = &PS;
+
+ // Do a previsit of the bind.
+ ExplodedNodeSet CheckedSet;
+ getCheckerManager().runCheckersForBind(CheckedSet, Pred, location, Val,
+ StoreE, *this, *PP);
+
+ StmtNodeBuilder Bldr(CheckedSet, Dst, *currBldrCtx);
+
+ // If the location is not a 'Loc', it will already be handled by
+ // the checkers. There is nothing left to do.
+ if (!location.getAs<Loc>()) {
+ const ProgramPoint L = PostStore(StoreE, LC, /*Loc*/nullptr,
+ /*tag*/nullptr);
+ ProgramStateRef state = Pred->getState();
+ state = processPointerEscapedOnBind(state, location, Val);
+ Bldr.generateNode(L, state, Pred);
+ return;
+ }
+
+ for (ExplodedNodeSet::iterator I = CheckedSet.begin(), E = CheckedSet.end();
+ I!=E; ++I) {
+ ExplodedNode *PredI = *I;
+ ProgramStateRef state = PredI->getState();
+
+ state = processPointerEscapedOnBind(state, location, Val);
+
+ // When binding the value, pass on the hint that this is a initialization.
+ // For initializations, we do not need to inform clients of region
+ // changes.
+ state = state->bindLoc(location.castAs<Loc>(),
+ Val, /* notifyChanges = */ !atDeclInit);
+
+ const MemRegion *LocReg = nullptr;
+ if (Optional<loc::MemRegionVal> LocRegVal =
+ location.getAs<loc::MemRegionVal>()) {
+ LocReg = LocRegVal->getRegion();
+ }
+
+ const ProgramPoint L = PostStore(StoreE, LC, LocReg, nullptr);
+ Bldr.generateNode(L, state, PredI);
+ }
+}
+
+/// evalStore - Handle the semantics of a store via an assignment.
+/// @param Dst The node set to store generated state nodes
+/// @param AssignE The assignment expression if the store happens in an
+/// assignment.
+/// @param LocationE The location expression that is stored to.
+/// @param state The current simulation state
+/// @param location The location to store the value
+/// @param Val The value to be stored
+void ExprEngine::evalStore(ExplodedNodeSet &Dst, const Expr *AssignE,
+ const Expr *LocationE,
+ ExplodedNode *Pred,
+ ProgramStateRef state, SVal location, SVal Val,
+ const ProgramPointTag *tag) {
+ // Proceed with the store. We use AssignE as the anchor for the PostStore
+ // ProgramPoint if it is non-NULL, and LocationE otherwise.
+ const Expr *StoreE = AssignE ? AssignE : LocationE;
+
+ // Evaluate the location (checks for bad dereferences).
+ ExplodedNodeSet Tmp;
+ evalLocation(Tmp, AssignE, LocationE, Pred, state, location, tag, false);
+
+ if (Tmp.empty())
+ return;
+
+ if (location.isUndef())
+ return;
+
+ for (ExplodedNodeSet::iterator NI=Tmp.begin(), NE=Tmp.end(); NI!=NE; ++NI)
+ evalBind(Dst, StoreE, *NI, location, Val, false);
+}
+
+void ExprEngine::evalLoad(ExplodedNodeSet &Dst,
+ const Expr *NodeEx,
+ const Expr *BoundEx,
+ ExplodedNode *Pred,
+ ProgramStateRef state,
+ SVal location,
+ const ProgramPointTag *tag,
+ QualType LoadTy)
+{
+ assert(!location.getAs<NonLoc>() && "location cannot be a NonLoc.");
+
+ // Are we loading from a region? This actually results in two loads; one
+ // to fetch the address of the referenced value and one to fetch the
+ // referenced value.
+ if (const TypedValueRegion *TR =
+ dyn_cast_or_null<TypedValueRegion>(location.getAsRegion())) {
+
+ QualType ValTy = TR->getValueType();
+ if (const ReferenceType *RT = ValTy->getAs<ReferenceType>()) {
+ static SimpleProgramPointTag
+ loadReferenceTag(TagProviderName, "Load Reference");
+ ExplodedNodeSet Tmp;
+ evalLoadCommon(Tmp, NodeEx, BoundEx, Pred, state,
+ location, &loadReferenceTag,
+ getContext().getPointerType(RT->getPointeeType()));
+
+ // Perform the load from the referenced value.
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end() ; I!=E; ++I) {
+ state = (*I)->getState();
+ location = state->getSVal(BoundEx, (*I)->getLocationContext());
+ evalLoadCommon(Dst, NodeEx, BoundEx, *I, state, location, tag, LoadTy);
+ }
+ return;
+ }
+ }
+
+ evalLoadCommon(Dst, NodeEx, BoundEx, Pred, state, location, tag, LoadTy);
+}
+
+void ExprEngine::evalLoadCommon(ExplodedNodeSet &Dst,
+ const Expr *NodeEx,
+ const Expr *BoundEx,
+ ExplodedNode *Pred,
+ ProgramStateRef state,
+ SVal location,
+ const ProgramPointTag *tag,
+ QualType LoadTy) {
+ assert(NodeEx);
+ assert(BoundEx);
+ // Evaluate the location (checks for bad dereferences).
+ ExplodedNodeSet Tmp;
+ evalLocation(Tmp, NodeEx, BoundEx, Pred, state, location, tag, true);
+ if (Tmp.empty())
+ return;
+
+ StmtNodeBuilder Bldr(Tmp, Dst, *currBldrCtx);
+ if (location.isUndef())
+ return;
+
+ // Proceed with the load.
+ for (ExplodedNodeSet::iterator NI=Tmp.begin(), NE=Tmp.end(); NI!=NE; ++NI) {
+ state = (*NI)->getState();
+ const LocationContext *LCtx = (*NI)->getLocationContext();
+
+ SVal V = UnknownVal();
+ if (location.isValid()) {
+ if (LoadTy.isNull())
+ LoadTy = BoundEx->getType();
+ V = state->getSVal(location.castAs<Loc>(), LoadTy);
+ }
+
+ Bldr.generateNode(NodeEx, *NI, state->BindExpr(BoundEx, LCtx, V), tag,
+ ProgramPoint::PostLoadKind);
+ }
+}
+
+void ExprEngine::evalLocation(ExplodedNodeSet &Dst,
+ const Stmt *NodeEx,
+ const Stmt *BoundEx,
+ ExplodedNode *Pred,
+ ProgramStateRef state,
+ SVal location,
+ const ProgramPointTag *tag,
+ bool isLoad) {
+ StmtNodeBuilder BldrTop(Pred, Dst, *currBldrCtx);
+ // Early checks for performance reason.
+ if (location.isUnknown()) {
+ return;
+ }
+
+ ExplodedNodeSet Src;
+ BldrTop.takeNodes(Pred);
+ StmtNodeBuilder Bldr(Pred, Src, *currBldrCtx);
+ if (Pred->getState() != state) {
+ // Associate this new state with an ExplodedNode.
+ // FIXME: If I pass null tag, the graph is incorrect, e.g for
+ // int *p;
+ // p = 0;
+ // *p = 0xDEADBEEF;
+ // "p = 0" is not noted as "Null pointer value stored to 'p'" but
+ // instead "int *p" is noted as
+ // "Variable 'p' initialized to a null pointer value"
+
+ static SimpleProgramPointTag tag(TagProviderName, "Location");
+ Bldr.generateNode(NodeEx, Pred, state, &tag);
+ }
+ ExplodedNodeSet Tmp;
+ getCheckerManager().runCheckersForLocation(Tmp, Src, location, isLoad,
+ NodeEx, BoundEx, *this);
+ BldrTop.addNodes(Tmp);
+}
+
+std::pair<const ProgramPointTag *, const ProgramPointTag*>
+ExprEngine::geteagerlyAssumeBinOpBifurcationTags() {
+ static SimpleProgramPointTag
+ eagerlyAssumeBinOpBifurcationTrue(TagProviderName,
+ "Eagerly Assume True"),
+ eagerlyAssumeBinOpBifurcationFalse(TagProviderName,
+ "Eagerly Assume False");
+ return std::make_pair(&eagerlyAssumeBinOpBifurcationTrue,
+ &eagerlyAssumeBinOpBifurcationFalse);
+}
+
+void ExprEngine::evalEagerlyAssumeBinOpBifurcation(ExplodedNodeSet &Dst,
+ ExplodedNodeSet &Src,
+ const Expr *Ex) {
+ StmtNodeBuilder Bldr(Src, Dst, *currBldrCtx);
+
+ for (ExplodedNodeSet::iterator I=Src.begin(), E=Src.end(); I!=E; ++I) {
+ ExplodedNode *Pred = *I;
+ // Test if the previous node was as the same expression. This can happen
+ // when the expression fails to evaluate to anything meaningful and
+ // (as an optimization) we don't generate a node.
+ ProgramPoint P = Pred->getLocation();
+ if (!P.getAs<PostStmt>() || P.castAs<PostStmt>().getStmt() != Ex) {
+ continue;
+ }
+
+ ProgramStateRef state = Pred->getState();
+ SVal V = state->getSVal(Ex, Pred->getLocationContext());
+ Optional<nonloc::SymbolVal> SEV = V.getAs<nonloc::SymbolVal>();
+ if (SEV && SEV->isExpression()) {
+ const std::pair<const ProgramPointTag *, const ProgramPointTag*> &tags =
+ geteagerlyAssumeBinOpBifurcationTags();
+
+ ProgramStateRef StateTrue, StateFalse;
+ std::tie(StateTrue, StateFalse) = state->assume(*SEV);
+
+ // First assume that the condition is true.
+ if (StateTrue) {
+ SVal Val = svalBuilder.makeIntVal(1U, Ex->getType());
+ StateTrue = StateTrue->BindExpr(Ex, Pred->getLocationContext(), Val);
+ Bldr.generateNode(Ex, Pred, StateTrue, tags.first);
+ }
+
+ // Next, assume that the condition is false.
+ if (StateFalse) {
+ SVal Val = svalBuilder.makeIntVal(0U, Ex->getType());
+ StateFalse = StateFalse->BindExpr(Ex, Pred->getLocationContext(), Val);
+ Bldr.generateNode(Ex, Pred, StateFalse, tags.second);
+ }
+ }
+ }
+}
+
+void ExprEngine::VisitGCCAsmStmt(const GCCAsmStmt *A, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+ // We have processed both the inputs and the outputs. All of the outputs
+ // should evaluate to Locs. Nuke all of their values.
+
+ // FIXME: Some day in the future it would be nice to allow a "plug-in"
+ // which interprets the inline asm and stores proper results in the
+ // outputs.
+
+ ProgramStateRef state = Pred->getState();
+
+ for (const Expr *O : A->outputs()) {
+ SVal X = state->getSVal(O, Pred->getLocationContext());
+ assert (!X.getAs<NonLoc>()); // Should be an Lval, or unknown, undef.
+
+ if (Optional<Loc> LV = X.getAs<Loc>())
+ state = state->bindLoc(*LV, UnknownVal());
+ }
+
+ Bldr.generateNode(A, Pred, state);
+}
+
+void ExprEngine::VisitMSAsmStmt(const MSAsmStmt *A, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+ Bldr.generateNode(A, Pred, Pred->getState());
+}
+
+//===----------------------------------------------------------------------===//
+// Visualization.
+//===----------------------------------------------------------------------===//
+
+#ifndef NDEBUG
+static ExprEngine* GraphPrintCheckerState;
+static SourceManager* GraphPrintSourceManager;
+
+namespace llvm {
+template<>
+struct DOTGraphTraits<ExplodedNode*> :
+ public DefaultDOTGraphTraits {
+
+ DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
+
+ // FIXME: Since we do not cache error nodes in ExprEngine now, this does not
+ // work.
+ static std::string getNodeAttributes(const ExplodedNode *N, void*) {
+
+#if 0
+ // FIXME: Replace with a general scheme to tell if the node is
+ // an error node.
+ if (GraphPrintCheckerState->isImplicitNullDeref(N) ||
+ GraphPrintCheckerState->isExplicitNullDeref(N) ||
+ GraphPrintCheckerState->isUndefDeref(N) ||
+ GraphPrintCheckerState->isUndefStore(N) ||
+ GraphPrintCheckerState->isUndefControlFlow(N) ||
+ GraphPrintCheckerState->isUndefResult(N) ||
+ GraphPrintCheckerState->isBadCall(N) ||
+ GraphPrintCheckerState->isUndefArg(N))
+ return "color=\"red\",style=\"filled\"";
+
+ if (GraphPrintCheckerState->isNoReturnCall(N))
+ return "color=\"blue\",style=\"filled\"";
+#endif
+ return "";
+ }
+
+ static void printLocation(raw_ostream &Out, SourceLocation SLoc) {
+ if (SLoc.isFileID()) {
+ Out << "\\lline="
+ << GraphPrintSourceManager->getExpansionLineNumber(SLoc)
+ << " col="
+ << GraphPrintSourceManager->getExpansionColumnNumber(SLoc)
+ << "\\l";
+ }
+ }
+
+ static std::string getNodeLabel(const ExplodedNode *N, void*){
+
+ std::string sbuf;
+ llvm::raw_string_ostream Out(sbuf);
+
+ // Program Location.
+ ProgramPoint Loc = N->getLocation();
+
+ switch (Loc.getKind()) {
+ case ProgramPoint::BlockEntranceKind: {
+ Out << "Block Entrance: B"
+ << Loc.castAs<BlockEntrance>().getBlock()->getBlockID();
+ if (const NamedDecl *ND =
+ dyn_cast<NamedDecl>(Loc.getLocationContext()->getDecl())) {
+ Out << " (";
+ ND->printName(Out);
+ Out << ")";
+ }
+ break;
+ }
+
+ case ProgramPoint::BlockExitKind:
+ assert (false);
+ break;
+
+ case ProgramPoint::CallEnterKind:
+ Out << "CallEnter";
+ break;
+
+ case ProgramPoint::CallExitBeginKind:
+ Out << "CallExitBegin";
+ break;
+
+ case ProgramPoint::CallExitEndKind:
+ Out << "CallExitEnd";
+ break;
+
+ case ProgramPoint::PostStmtPurgeDeadSymbolsKind:
+ Out << "PostStmtPurgeDeadSymbols";
+ break;
+
+ case ProgramPoint::PreStmtPurgeDeadSymbolsKind:
+ Out << "PreStmtPurgeDeadSymbols";
+ break;
+
+ case ProgramPoint::EpsilonKind:
+ Out << "Epsilon Point";
+ break;
+
+ case ProgramPoint::PreImplicitCallKind: {
+ ImplicitCallPoint PC = Loc.castAs<ImplicitCallPoint>();
+ Out << "PreCall: ";
+
+ // FIXME: Get proper printing options.
+ PC.getDecl()->print(Out, LangOptions());
+ printLocation(Out, PC.getLocation());
+ break;
+ }
+
+ case ProgramPoint::PostImplicitCallKind: {
+ ImplicitCallPoint PC = Loc.castAs<ImplicitCallPoint>();
+ Out << "PostCall: ";
+
+ // FIXME: Get proper printing options.
+ PC.getDecl()->print(Out, LangOptions());
+ printLocation(Out, PC.getLocation());
+ break;
+ }
+
+ case ProgramPoint::PostInitializerKind: {
+ Out << "PostInitializer: ";
+ const CXXCtorInitializer *Init =
+ Loc.castAs<PostInitializer>().getInitializer();
+ if (const FieldDecl *FD = Init->getAnyMember())
+ Out << *FD;
+ else {
+ QualType Ty = Init->getTypeSourceInfo()->getType();
+ Ty = Ty.getLocalUnqualifiedType();
+ LangOptions LO; // FIXME.
+ Ty.print(Out, LO);
+ }
+ break;
+ }
+
+ case ProgramPoint::BlockEdgeKind: {
+ const BlockEdge &E = Loc.castAs<BlockEdge>();
+ Out << "Edge: (B" << E.getSrc()->getBlockID() << ", B"
+ << E.getDst()->getBlockID() << ')';
+
+ if (const Stmt *T = E.getSrc()->getTerminator()) {
+ SourceLocation SLoc = T->getLocStart();
+
+ Out << "\\|Terminator: ";
+ LangOptions LO; // FIXME.
+ E.getSrc()->printTerminator(Out, LO);
+
+ if (SLoc.isFileID()) {
+ Out << "\\lline="
+ << GraphPrintSourceManager->getExpansionLineNumber(SLoc)
+ << " col="
+ << GraphPrintSourceManager->getExpansionColumnNumber(SLoc);
+ }
+
+ if (isa<SwitchStmt>(T)) {
+ const Stmt *Label = E.getDst()->getLabel();
+
+ if (Label) {
+ if (const CaseStmt *C = dyn_cast<CaseStmt>(Label)) {
+ Out << "\\lcase ";
+ LangOptions LO; // FIXME.
+ if (C->getLHS())
+ C->getLHS()->printPretty(Out, nullptr, PrintingPolicy(LO));
+
+ if (const Stmt *RHS = C->getRHS()) {
+ Out << " .. ";
+ RHS->printPretty(Out, nullptr, PrintingPolicy(LO));
+ }
+
+ Out << ":";
+ }
+ else {
+ assert (isa<DefaultStmt>(Label));
+ Out << "\\ldefault:";
+ }
+ }
+ else
+ Out << "\\l(implicit) default:";
+ }
+ else if (isa<IndirectGotoStmt>(T)) {
+ // FIXME
+ }
+ else {
+ Out << "\\lCondition: ";
+ if (*E.getSrc()->succ_begin() == E.getDst())
+ Out << "true";
+ else
+ Out << "false";
+ }
+
+ Out << "\\l";
+ }
+
+#if 0
+ // FIXME: Replace with a general scheme to determine
+ // the name of the check.
+ if (GraphPrintCheckerState->isUndefControlFlow(N)) {
+ Out << "\\|Control-flow based on\\lUndefined value.\\l";
+ }
+#endif
+ break;
+ }
+
+ default: {
+ const Stmt *S = Loc.castAs<StmtPoint>().getStmt();
+ assert(S != nullptr && "Expecting non-null Stmt");
+
+ Out << S->getStmtClassName() << ' ' << (const void*) S << ' ';
+ LangOptions LO; // FIXME.
+ S->printPretty(Out, nullptr, PrintingPolicy(LO));
+ printLocation(Out, S->getLocStart());
+
+ if (Loc.getAs<PreStmt>())
+ Out << "\\lPreStmt\\l;";
+ else if (Loc.getAs<PostLoad>())
+ Out << "\\lPostLoad\\l;";
+ else if (Loc.getAs<PostStore>())
+ Out << "\\lPostStore\\l";
+ else if (Loc.getAs<PostLValue>())
+ Out << "\\lPostLValue\\l";
+
+#if 0
+ // FIXME: Replace with a general scheme to determine
+ // the name of the check.
+ if (GraphPrintCheckerState->isImplicitNullDeref(N))
+ Out << "\\|Implicit-Null Dereference.\\l";
+ else if (GraphPrintCheckerState->isExplicitNullDeref(N))
+ Out << "\\|Explicit-Null Dereference.\\l";
+ else if (GraphPrintCheckerState->isUndefDeref(N))
+ Out << "\\|Dereference of undefialied value.\\l";
+ else if (GraphPrintCheckerState->isUndefStore(N))
+ Out << "\\|Store to Undefined Loc.";
+ else if (GraphPrintCheckerState->isUndefResult(N))
+ Out << "\\|Result of operation is undefined.";
+ else if (GraphPrintCheckerState->isNoReturnCall(N))
+ Out << "\\|Call to function marked \"noreturn\".";
+ else if (GraphPrintCheckerState->isBadCall(N))
+ Out << "\\|Call to NULL/Undefined.";
+ else if (GraphPrintCheckerState->isUndefArg(N))
+ Out << "\\|Argument in call is undefined";
+#endif
+
+ break;
+ }
+ }
+
+ ProgramStateRef state = N->getState();
+ Out << "\\|StateID: " << (const void*) state.get()
+ << " NodeID: " << (const void*) N << "\\|";
+ state->printDOT(Out);
+
+ Out << "\\l";
+
+ if (const ProgramPointTag *tag = Loc.getTag()) {
+ Out << "\\|Tag: " << tag->getTagDescription();
+ Out << "\\l";
+ }
+ return Out.str();
+ }
+};
+} // end llvm namespace
+#endif
+
+void ExprEngine::ViewGraph(bool trim) {
+#ifndef NDEBUG
+ if (trim) {
+ std::vector<const ExplodedNode*> Src;
+
+ // Flush any outstanding reports to make sure we cover all the nodes.
+ // This does not cause them to get displayed.
+ for (BugReporter::iterator I=BR.begin(), E=BR.end(); I!=E; ++I)
+ const_cast<BugType*>(*I)->FlushReports(BR);
+
+ // Iterate through the reports and get their nodes.
+ for (BugReporter::EQClasses_iterator
+ EI = BR.EQClasses_begin(), EE = BR.EQClasses_end(); EI != EE; ++EI) {
+ ExplodedNode *N = const_cast<ExplodedNode*>(EI->begin()->getErrorNode());
+ if (N) Src.push_back(N);
+ }
+
+ ViewGraph(Src);
+ }
+ else {
+ GraphPrintCheckerState = this;
+ GraphPrintSourceManager = &getContext().getSourceManager();
+
+ llvm::ViewGraph(*G.roots_begin(), "ExprEngine");
+
+ GraphPrintCheckerState = nullptr;
+ GraphPrintSourceManager = nullptr;
+ }
+#endif
+}
+
+void ExprEngine::ViewGraph(ArrayRef<const ExplodedNode*> Nodes) {
+#ifndef NDEBUG
+ GraphPrintCheckerState = this;
+ GraphPrintSourceManager = &getContext().getSourceManager();
+
+ std::unique_ptr<ExplodedGraph> TrimmedG(G.trim(Nodes));
+
+ if (!TrimmedG.get())
+ llvm::errs() << "warning: Trimmed ExplodedGraph is empty.\n";
+ else
+ llvm::ViewGraph(*TrimmedG->roots_begin(), "TrimmedExprEngine");
+
+ GraphPrintCheckerState = nullptr;
+ GraphPrintSourceManager = nullptr;
+#endif
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
new file mode 100644
index 0000000..a5b5871
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
@@ -0,0 +1,1001 @@
+//=-- ExprEngineC.cpp - ExprEngine support for C expressions ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ExprEngine's support for C expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ExprCXX.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+
+using namespace clang;
+using namespace ento;
+using llvm::APSInt;
+
+void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+
+ Expr *LHS = B->getLHS()->IgnoreParens();
+ Expr *RHS = B->getRHS()->IgnoreParens();
+
+ // FIXME: Prechecks eventually go in ::Visit().
+ ExplodedNodeSet CheckedSet;
+ ExplodedNodeSet Tmp2;
+ getCheckerManager().runCheckersForPreStmt(CheckedSet, Pred, B, *this);
+
+ // With both the LHS and RHS evaluated, process the operation itself.
+ for (ExplodedNodeSet::iterator it=CheckedSet.begin(), ei=CheckedSet.end();
+ it != ei; ++it) {
+
+ ProgramStateRef state = (*it)->getState();
+ const LocationContext *LCtx = (*it)->getLocationContext();
+ SVal LeftV = state->getSVal(LHS, LCtx);
+ SVal RightV = state->getSVal(RHS, LCtx);
+
+ BinaryOperator::Opcode Op = B->getOpcode();
+
+ if (Op == BO_Assign) {
+ // EXPERIMENTAL: "Conjured" symbols.
+ // FIXME: Handle structs.
+ if (RightV.isUnknown()) {
+ unsigned Count = currBldrCtx->blockCount();
+ RightV = svalBuilder.conjureSymbolVal(nullptr, B->getRHS(), LCtx,
+ Count);
+ }
+ // Simulate the effects of a "store": bind the value of the RHS
+ // to the L-Value represented by the LHS.
+ SVal ExprVal = B->isGLValue() ? LeftV : RightV;
+ evalStore(Tmp2, B, LHS, *it, state->BindExpr(B, LCtx, ExprVal),
+ LeftV, RightV);
+ continue;
+ }
+
+ if (!B->isAssignmentOp()) {
+ StmtNodeBuilder Bldr(*it, Tmp2, *currBldrCtx);
+
+ if (B->isAdditiveOp()) {
+ // If one of the operands is a location, conjure a symbol for the other
+ // one (offset) if it's unknown so that memory arithmetic always
+ // results in an ElementRegion.
+ // TODO: This can be removed after we enable history tracking with
+ // SymSymExpr.
+ unsigned Count = currBldrCtx->blockCount();
+ if (LeftV.getAs<Loc>() &&
+ RHS->getType()->isIntegralOrEnumerationType() &&
+ RightV.isUnknown()) {
+ RightV = svalBuilder.conjureSymbolVal(RHS, LCtx, RHS->getType(),
+ Count);
+ }
+ if (RightV.getAs<Loc>() &&
+ LHS->getType()->isIntegralOrEnumerationType() &&
+ LeftV.isUnknown()) {
+ LeftV = svalBuilder.conjureSymbolVal(LHS, LCtx, LHS->getType(),
+ Count);
+ }
+ }
+
+ // Although we don't yet model pointers-to-members, we do need to make
+ // sure that the members of temporaries have a valid 'this' pointer for
+ // other checks.
+ if (B->getOpcode() == BO_PtrMemD)
+ state = createTemporaryRegionIfNeeded(state, LCtx, LHS);
+
+ // Process non-assignments except commas or short-circuited
+ // logical expressions (LAnd and LOr).
+ SVal Result = evalBinOp(state, Op, LeftV, RightV, B->getType());
+ if (Result.isUnknown()) {
+ Bldr.generateNode(B, *it, state);
+ continue;
+ }
+
+ state = state->BindExpr(B, LCtx, Result);
+ Bldr.generateNode(B, *it, state);
+ continue;
+ }
+
+ assert (B->isCompoundAssignmentOp());
+
+ switch (Op) {
+ default:
+ llvm_unreachable("Invalid opcode for compound assignment.");
+ case BO_MulAssign: Op = BO_Mul; break;
+ case BO_DivAssign: Op = BO_Div; break;
+ case BO_RemAssign: Op = BO_Rem; break;
+ case BO_AddAssign: Op = BO_Add; break;
+ case BO_SubAssign: Op = BO_Sub; break;
+ case BO_ShlAssign: Op = BO_Shl; break;
+ case BO_ShrAssign: Op = BO_Shr; break;
+ case BO_AndAssign: Op = BO_And; break;
+ case BO_XorAssign: Op = BO_Xor; break;
+ case BO_OrAssign: Op = BO_Or; break;
+ }
+
+ // Perform a load (the LHS). This performs the checks for
+ // null dereferences, and so on.
+ ExplodedNodeSet Tmp;
+ SVal location = LeftV;
+ evalLoad(Tmp, B, LHS, *it, state, location);
+
+ for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I != E;
+ ++I) {
+
+ state = (*I)->getState();
+ const LocationContext *LCtx = (*I)->getLocationContext();
+ SVal V = state->getSVal(LHS, LCtx);
+
+ // Get the computation type.
+ QualType CTy =
+ cast<CompoundAssignOperator>(B)->getComputationResultType();
+ CTy = getContext().getCanonicalType(CTy);
+
+ QualType CLHSTy =
+ cast<CompoundAssignOperator>(B)->getComputationLHSType();
+ CLHSTy = getContext().getCanonicalType(CLHSTy);
+
+ QualType LTy = getContext().getCanonicalType(LHS->getType());
+
+ // Promote LHS.
+ V = svalBuilder.evalCast(V, CLHSTy, LTy);
+
+ // Compute the result of the operation.
+ SVal Result = svalBuilder.evalCast(evalBinOp(state, Op, V, RightV, CTy),
+ B->getType(), CTy);
+
+ // EXPERIMENTAL: "Conjured" symbols.
+ // FIXME: Handle structs.
+
+ SVal LHSVal;
+
+ if (Result.isUnknown()) {
+ // The symbolic value is actually for the type of the left-hand side
+ // expression, not the computation type, as this is the value the
+ // LValue on the LHS will bind to.
+ LHSVal = svalBuilder.conjureSymbolVal(nullptr, B->getRHS(), LCtx, LTy,
+ currBldrCtx->blockCount());
+ // However, we need to convert the symbol to the computation type.
+ Result = svalBuilder.evalCast(LHSVal, CTy, LTy);
+ }
+ else {
+ // The left-hand side may bind to a different value then the
+ // computation type.
+ LHSVal = svalBuilder.evalCast(Result, LTy, CTy);
+ }
+
+ // In C++, assignment and compound assignment operators return an
+ // lvalue.
+ if (B->isGLValue())
+ state = state->BindExpr(B, LCtx, location);
+ else
+ state = state->BindExpr(B, LCtx, Result);
+
+ evalStore(Tmp2, B, LHS, *I, state, location, LHSVal);
+ }
+ }
+
+ // FIXME: postvisits eventually go in ::Visit()
+ getCheckerManager().runCheckersForPostStmt(Dst, Tmp2, B, *this);
+}
+
+void ExprEngine::VisitBlockExpr(const BlockExpr *BE, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+
+ CanQualType T = getContext().getCanonicalType(BE->getType());
+
+ const BlockDecl *BD = BE->getBlockDecl();
+ // Get the value of the block itself.
+ SVal V = svalBuilder.getBlockPointer(BD, T,
+ Pred->getLocationContext(),
+ currBldrCtx->blockCount());
+
+ ProgramStateRef State = Pred->getState();
+
+ // If we created a new MemRegion for the block, we should explicitly bind
+ // the captured variables.
+ if (const BlockDataRegion *BDR =
+ dyn_cast_or_null<BlockDataRegion>(V.getAsRegion())) {
+
+ BlockDataRegion::referenced_vars_iterator I = BDR->referenced_vars_begin(),
+ E = BDR->referenced_vars_end();
+
+ auto CI = BD->capture_begin();
+ auto CE = BD->capture_end();
+ for (; I != E; ++I) {
+ const VarRegion *capturedR = I.getCapturedRegion();
+ const VarRegion *originalR = I.getOriginalRegion();
+
+ // If the capture had a copy expression, use the result of evaluating
+ // that expression, otherwise use the original value.
+ // We rely on the invariant that the block declaration's capture variables
+ // are a prefix of the BlockDataRegion's referenced vars (which may include
+ // referenced globals, etc.) to enable fast lookup of the capture for a
+ // given referenced var.
+ const Expr *copyExpr = nullptr;
+ if (CI != CE) {
+ assert(CI->getVariable() == capturedR->getDecl());
+ copyExpr = CI->getCopyExpr();
+ CI++;
+ }
+
+ if (capturedR != originalR) {
+ SVal originalV;
+ if (copyExpr) {
+ originalV = State->getSVal(copyExpr, Pred->getLocationContext());
+ } else {
+ originalV = State->getSVal(loc::MemRegionVal(originalR));
+ }
+ State = State->bindLoc(loc::MemRegionVal(capturedR), originalV);
+ }
+ }
+ }
+
+ ExplodedNodeSet Tmp;
+ StmtNodeBuilder Bldr(Pred, Tmp, *currBldrCtx);
+ Bldr.generateNode(BE, Pred,
+ State->BindExpr(BE, Pred->getLocationContext(), V),
+ nullptr, ProgramPoint::PostLValueKind);
+
+ // FIXME: Move all post/pre visits to ::Visit().
+ getCheckerManager().runCheckersForPostStmt(Dst, Tmp, BE, *this);
+}
+
+void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst) {
+
+ ExplodedNodeSet dstPreStmt;
+ getCheckerManager().runCheckersForPreStmt(dstPreStmt, Pred, CastE, *this);
+
+ if (CastE->getCastKind() == CK_LValueToRValue) {
+ for (ExplodedNodeSet::iterator I = dstPreStmt.begin(), E = dstPreStmt.end();
+ I!=E; ++I) {
+ ExplodedNode *subExprNode = *I;
+ ProgramStateRef state = subExprNode->getState();
+ const LocationContext *LCtx = subExprNode->getLocationContext();
+ evalLoad(Dst, CastE, CastE, subExprNode, state, state->getSVal(Ex, LCtx));
+ }
+ return;
+ }
+
+ // All other casts.
+ QualType T = CastE->getType();
+ QualType ExTy = Ex->getType();
+
+ if (const ExplicitCastExpr *ExCast=dyn_cast_or_null<ExplicitCastExpr>(CastE))
+ T = ExCast->getTypeAsWritten();
+
+ StmtNodeBuilder Bldr(dstPreStmt, Dst, *currBldrCtx);
+ for (ExplodedNodeSet::iterator I = dstPreStmt.begin(), E = dstPreStmt.end();
+ I != E; ++I) {
+
+ Pred = *I;
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+
+ switch (CastE->getCastKind()) {
+ case CK_LValueToRValue:
+ llvm_unreachable("LValueToRValue casts handled earlier.");
+ case CK_ToVoid:
+ continue;
+ // The analyzer doesn't do anything special with these casts,
+ // since it understands retain/release semantics already.
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject: // Fall-through.
+ case CK_CopyAndAutoreleaseBlockObject:
+ // The analyser can ignore atomic casts for now, although some future
+ // checkers may want to make certain that you're not modifying the same
+ // value through atomic and nonatomic pointers.
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
+ // True no-ops.
+ case CK_NoOp:
+ case CK_ConstructorConversion:
+ case CK_UserDefinedConversion:
+ case CK_FunctionToPointerDecay:
+ case CK_BuiltinFnToFnPtr: {
+ // Copy the SVal of Ex to CastE.
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ SVal V = state->getSVal(Ex, LCtx);
+ state = state->BindExpr(CastE, LCtx, V);
+ Bldr.generateNode(CastE, Pred, state);
+ continue;
+ }
+ case CK_MemberPointerToBoolean:
+ // FIXME: For now, member pointers are represented by void *.
+ // FALLTHROUGH
+ case CK_Dependent:
+ case CK_ArrayToPointerDecay:
+ case CK_BitCast:
+ case CK_AddressSpaceConversion:
+ case CK_IntegralCast:
+ case CK_NullToPointer:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_PointerToBoolean:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ObjCObjectLValueCast:
+ case CK_ZeroToOCLEvent:
+ case CK_LValueBitCast: {
+ // Delegate to SValBuilder to process.
+ SVal V = state->getSVal(Ex, LCtx);
+ V = svalBuilder.evalCast(V, T, ExTy);
+ state = state->BindExpr(CastE, LCtx, V);
+ Bldr.generateNode(CastE, Pred, state);
+ continue;
+ }
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase: {
+ // For DerivedToBase cast, delegate to the store manager.
+ SVal val = state->getSVal(Ex, LCtx);
+ val = getStoreManager().evalDerivedToBase(val, CastE);
+ state = state->BindExpr(CastE, LCtx, val);
+ Bldr.generateNode(CastE, Pred, state);
+ continue;
+ }
+ // Handle C++ dyn_cast.
+ case CK_Dynamic: {
+ SVal val = state->getSVal(Ex, LCtx);
+
+ // Compute the type of the result.
+ QualType resultType = CastE->getType();
+ if (CastE->isGLValue())
+ resultType = getContext().getPointerType(resultType);
+
+ bool Failed = false;
+
+ // Check if the value being cast evaluates to 0.
+ if (val.isZeroConstant())
+ Failed = true;
+ // Else, evaluate the cast.
+ else
+ val = getStoreManager().evalDynamicCast(val, T, Failed);
+
+ if (Failed) {
+ if (T->isReferenceType()) {
+ // A bad_cast exception is thrown if input value is a reference.
+ // Currently, we model this, by generating a sink.
+ Bldr.generateSink(CastE, Pred, state);
+ continue;
+ } else {
+ // If the cast fails on a pointer, bind to 0.
+ state = state->BindExpr(CastE, LCtx, svalBuilder.makeNull());
+ }
+ } else {
+ // If we don't know if the cast succeeded, conjure a new symbol.
+ if (val.isUnknown()) {
+ DefinedOrUnknownSVal NewSym =
+ svalBuilder.conjureSymbolVal(nullptr, CastE, LCtx, resultType,
+ currBldrCtx->blockCount());
+ state = state->BindExpr(CastE, LCtx, NewSym);
+ } else
+ // Else, bind to the derived region value.
+ state = state->BindExpr(CastE, LCtx, val);
+ }
+ Bldr.generateNode(CastE, Pred, state);
+ continue;
+ }
+ case CK_NullToMemberPointer: {
+ // FIXME: For now, member pointers are represented by void *.
+ SVal V = svalBuilder.makeNull();
+ state = state->BindExpr(CastE, LCtx, V);
+ Bldr.generateNode(CastE, Pred, state);
+ continue;
+ }
+ // Various C++ casts that are not handled yet.
+ case CK_ToUnion:
+ case CK_BaseToDerived:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_ReinterpretMemberPointer:
+ case CK_VectorSplat: {
+ // Recover some path-sensitivty by conjuring a new value.
+ QualType resultType = CastE->getType();
+ if (CastE->isGLValue())
+ resultType = getContext().getPointerType(resultType);
+ SVal result = svalBuilder.conjureSymbolVal(nullptr, CastE, LCtx,
+ resultType,
+ currBldrCtx->blockCount());
+ state = state->BindExpr(CastE, LCtx, result);
+ Bldr.generateNode(CastE, Pred, state);
+ continue;
+ }
+ }
+ }
+}
+
+void ExprEngine::VisitCompoundLiteralExpr(const CompoundLiteralExpr *CL,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ StmtNodeBuilder B(Pred, Dst, *currBldrCtx);
+
+ ProgramStateRef State = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+
+ const Expr *Init = CL->getInitializer();
+ SVal V = State->getSVal(CL->getInitializer(), LCtx);
+
+ if (isa<CXXConstructExpr>(Init)) {
+ // No work needed. Just pass the value up to this expression.
+ } else {
+ assert(isa<InitListExpr>(Init));
+ Loc CLLoc = State->getLValue(CL, LCtx);
+ State = State->bindLoc(CLLoc, V);
+
+ // Compound literal expressions are a GNU extension in C++.
+ // Unlike in C, where CLs are lvalues, in C++ CLs are prvalues,
+ // and like temporary objects created by the functional notation T()
+ // CLs are destroyed at the end of the containing full-expression.
+ // HOWEVER, an rvalue of array type is not something the analyzer can
+ // reason about, since we expect all regions to be wrapped in Locs.
+ // So we treat array CLs as lvalues as well, knowing that they will decay
+ // to pointers as soon as they are used.
+ if (CL->isGLValue() || CL->getType()->isArrayType())
+ V = CLLoc;
+ }
+
+ B.generateNode(CL, Pred, State->BindExpr(CL, LCtx, V));
+}
+
+void ExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ // Assumption: The CFG has one DeclStmt per Decl.
+ const VarDecl *VD = dyn_cast_or_null<VarDecl>(*DS->decl_begin());
+
+ if (!VD) {
+ //TODO:AZ: remove explicit insertion after refactoring is done.
+ Dst.insert(Pred);
+ return;
+ }
+
+ // FIXME: all pre/post visits should eventually be handled by ::Visit().
+ ExplodedNodeSet dstPreVisit;
+ getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, DS, *this);
+
+ ExplodedNodeSet dstEvaluated;
+ StmtNodeBuilder B(dstPreVisit, dstEvaluated, *currBldrCtx);
+ for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end();
+ I!=E; ++I) {
+ ExplodedNode *N = *I;
+ ProgramStateRef state = N->getState();
+ const LocationContext *LC = N->getLocationContext();
+
+ // Decls without InitExpr are not initialized explicitly.
+ if (const Expr *InitEx = VD->getInit()) {
+
+ // Note in the state that the initialization has occurred.
+ ExplodedNode *UpdatedN = N;
+ SVal InitVal = state->getSVal(InitEx, LC);
+
+ assert(DS->isSingleDecl());
+ if (auto *CtorExpr = findDirectConstructorForCurrentCFGElement()) {
+ assert(InitEx->IgnoreImplicit() == CtorExpr);
+ (void)CtorExpr;
+ // We constructed the object directly in the variable.
+ // No need to bind anything.
+ B.generateNode(DS, UpdatedN, state);
+ } else {
+ // We bound the temp obj region to the CXXConstructExpr. Now recover
+ // the lazy compound value when the variable is not a reference.
+ if (AMgr.getLangOpts().CPlusPlus && VD->getType()->isRecordType() &&
+ !VD->getType()->isReferenceType()) {
+ if (Optional<loc::MemRegionVal> M =
+ InitVal.getAs<loc::MemRegionVal>()) {
+ InitVal = state->getSVal(M->getRegion());
+ assert(InitVal.getAs<nonloc::LazyCompoundVal>());
+ }
+ }
+
+ // Recover some path-sensitivity if a scalar value evaluated to
+ // UnknownVal.
+ if (InitVal.isUnknown()) {
+ QualType Ty = InitEx->getType();
+ if (InitEx->isGLValue()) {
+ Ty = getContext().getPointerType(Ty);
+ }
+
+ InitVal = svalBuilder.conjureSymbolVal(nullptr, InitEx, LC, Ty,
+ currBldrCtx->blockCount());
+ }
+
+
+ B.takeNodes(UpdatedN);
+ ExplodedNodeSet Dst2;
+ evalBind(Dst2, DS, UpdatedN, state->getLValue(VD, LC), InitVal, true);
+ B.addNodes(Dst2);
+ }
+ }
+ else {
+ B.generateNode(DS, N, state);
+ }
+ }
+
+ getCheckerManager().runCheckersForPostStmt(Dst, B.getResults(), DS, *this);
+}
+
+void ExprEngine::VisitLogicalExpr(const BinaryOperator* B, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ assert(B->getOpcode() == BO_LAnd ||
+ B->getOpcode() == BO_LOr);
+
+ StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+ ProgramStateRef state = Pred->getState();
+
+ ExplodedNode *N = Pred;
+ while (!N->getLocation().getAs<BlockEntrance>()) {
+ ProgramPoint P = N->getLocation();
+ assert(P.getAs<PreStmt>()|| P.getAs<PreStmtPurgeDeadSymbols>());
+ (void) P;
+ assert(N->pred_size() == 1);
+ N = *N->pred_begin();
+ }
+ assert(N->pred_size() == 1);
+ N = *N->pred_begin();
+ BlockEdge BE = N->getLocation().castAs<BlockEdge>();
+ SVal X;
+
+ // Determine the value of the expression by introspecting how we
+ // got this location in the CFG. This requires looking at the previous
+ // block we were in and what kind of control-flow transfer was involved.
+ const CFGBlock *SrcBlock = BE.getSrc();
+ // The only terminator (if there is one) that makes sense is a logical op.
+ CFGTerminator T = SrcBlock->getTerminator();
+ if (const BinaryOperator *Term = cast_or_null<BinaryOperator>(T.getStmt())) {
+ (void) Term;
+ assert(Term->isLogicalOp());
+ assert(SrcBlock->succ_size() == 2);
+ // Did we take the true or false branch?
+ unsigned constant = (*SrcBlock->succ_begin() == BE.getDst()) ? 1 : 0;
+ X = svalBuilder.makeIntVal(constant, B->getType());
+ }
+ else {
+ // If there is no terminator, by construction the last statement
+ // in SrcBlock is the value of the enclosing expression.
+ // However, we still need to constrain that value to be 0 or 1.
+ assert(!SrcBlock->empty());
+ CFGStmt Elem = SrcBlock->rbegin()->castAs<CFGStmt>();
+ const Expr *RHS = cast<Expr>(Elem.getStmt());
+ SVal RHSVal = N->getState()->getSVal(RHS, Pred->getLocationContext());
+
+ if (RHSVal.isUndef()) {
+ X = RHSVal;
+ } else {
+ DefinedOrUnknownSVal DefinedRHS = RHSVal.castAs<DefinedOrUnknownSVal>();
+ ProgramStateRef StTrue, StFalse;
+ std::tie(StTrue, StFalse) = N->getState()->assume(DefinedRHS);
+ if (StTrue) {
+ if (StFalse) {
+ // We can't constrain the value to 0 or 1.
+ // The best we can do is a cast.
+ X = getSValBuilder().evalCast(RHSVal, B->getType(), RHS->getType());
+ } else {
+ // The value is known to be true.
+ X = getSValBuilder().makeIntVal(1, B->getType());
+ }
+ } else {
+ // The value is known to be false.
+ assert(StFalse && "Infeasible path!");
+ X = getSValBuilder().makeIntVal(0, B->getType());
+ }
+ }
+ }
+ Bldr.generateNode(B, Pred, state->BindExpr(B, Pred->getLocationContext(), X));
+}
+
+void ExprEngine::VisitInitListExpr(const InitListExpr *IE,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ StmtNodeBuilder B(Pred, Dst, *currBldrCtx);
+
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ QualType T = getContext().getCanonicalType(IE->getType());
+ unsigned NumInitElements = IE->getNumInits();
+
+ if (!IE->isGLValue() &&
+ (T->isArrayType() || T->isRecordType() || T->isVectorType() ||
+ T->isAnyComplexType())) {
+ llvm::ImmutableList<SVal> vals = getBasicVals().getEmptySValList();
+
+ // Handle base case where the initializer has no elements.
+ // e.g: static int* myArray[] = {};
+ if (NumInitElements == 0) {
+ SVal V = svalBuilder.makeCompoundVal(T, vals);
+ B.generateNode(IE, Pred, state->BindExpr(IE, LCtx, V));
+ return;
+ }
+
+ for (InitListExpr::const_reverse_iterator it = IE->rbegin(),
+ ei = IE->rend(); it != ei; ++it) {
+ SVal V = state->getSVal(cast<Expr>(*it), LCtx);
+ vals = getBasicVals().consVals(V, vals);
+ }
+
+ B.generateNode(IE, Pred,
+ state->BindExpr(IE, LCtx,
+ svalBuilder.makeCompoundVal(T, vals)));
+ return;
+ }
+
+ // Handle scalars: int{5} and int{} and GLvalues.
+ // Note, if the InitListExpr is a GLvalue, it means that there is an address
+ // representing it, so it must have a single init element.
+ assert(NumInitElements <= 1);
+
+ SVal V;
+ if (NumInitElements == 0)
+ V = getSValBuilder().makeZeroVal(T);
+ else
+ V = state->getSVal(IE->getInit(0), LCtx);
+
+ B.generateNode(IE, Pred, state->BindExpr(IE, LCtx, V));
+}
+
+void ExprEngine::VisitGuardedExpr(const Expr *Ex,
+ const Expr *L,
+ const Expr *R,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ assert(L && R);
+
+ StmtNodeBuilder B(Pred, Dst, *currBldrCtx);
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ const CFGBlock *SrcBlock = nullptr;
+
+ // Find the predecessor block.
+ ProgramStateRef SrcState = state;
+ for (const ExplodedNode *N = Pred ; N ; N = *N->pred_begin()) {
+ ProgramPoint PP = N->getLocation();
+ if (PP.getAs<PreStmtPurgeDeadSymbols>() || PP.getAs<BlockEntrance>()) {
+ assert(N->pred_size() == 1);
+ continue;
+ }
+ SrcBlock = PP.castAs<BlockEdge>().getSrc();
+ SrcState = N->getState();
+ break;
+ }
+
+ assert(SrcBlock && "missing function entry");
+
+ // Find the last expression in the predecessor block. That is the
+ // expression that is used for the value of the ternary expression.
+ bool hasValue = false;
+ SVal V;
+
+ for (CFGElement CE : llvm::reverse(*SrcBlock)) {
+ if (Optional<CFGStmt> CS = CE.getAs<CFGStmt>()) {
+ const Expr *ValEx = cast<Expr>(CS->getStmt());
+ ValEx = ValEx->IgnoreParens();
+
+ // For GNU extension '?:' operator, the left hand side will be an
+ // OpaqueValueExpr, so get the underlying expression.
+ if (const OpaqueValueExpr *OpaqueEx = dyn_cast<OpaqueValueExpr>(L))
+ L = OpaqueEx->getSourceExpr();
+
+ // If the last expression in the predecessor block matches true or false
+ // subexpression, get its the value.
+ if (ValEx == L->IgnoreParens() || ValEx == R->IgnoreParens()) {
+ hasValue = true;
+ V = SrcState->getSVal(ValEx, LCtx);
+ }
+ break;
+ }
+ }
+
+ if (!hasValue)
+ V = svalBuilder.conjureSymbolVal(nullptr, Ex, LCtx,
+ currBldrCtx->blockCount());
+
+ // Generate a new node with the binding from the appropriate path.
+ B.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V, true));
+}
+
+void ExprEngine::
+VisitOffsetOfExpr(const OffsetOfExpr *OOE,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst) {
+ StmtNodeBuilder B(Pred, Dst, *currBldrCtx);
+ APSInt IV;
+ if (OOE->EvaluateAsInt(IV, getContext())) {
+ assert(IV.getBitWidth() == getContext().getTypeSize(OOE->getType()));
+ assert(OOE->getType()->isBuiltinType());
+ assert(OOE->getType()->getAs<BuiltinType>()->isInteger());
+ assert(IV.isSigned() == OOE->getType()->isSignedIntegerType());
+ SVal X = svalBuilder.makeIntVal(IV);
+ B.generateNode(OOE, Pred,
+ Pred->getState()->BindExpr(OOE, Pred->getLocationContext(),
+ X));
+ }
+ // FIXME: Handle the case where __builtin_offsetof is not a constant.
+}
+
+
+void ExprEngine::
+VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Ex,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ // FIXME: Prechecks eventually go in ::Visit().
+ ExplodedNodeSet CheckedSet;
+ getCheckerManager().runCheckersForPreStmt(CheckedSet, Pred, Ex, *this);
+
+ ExplodedNodeSet EvalSet;
+ StmtNodeBuilder Bldr(CheckedSet, EvalSet, *currBldrCtx);
+
+ QualType T = Ex->getTypeOfArgument();
+
+ for (ExplodedNodeSet::iterator I = CheckedSet.begin(), E = CheckedSet.end();
+ I != E; ++I) {
+ if (Ex->getKind() == UETT_SizeOf) {
+ if (!T->isIncompleteType() && !T->isConstantSizeType()) {
+ assert(T->isVariableArrayType() && "Unknown non-constant-sized type.");
+
+ // FIXME: Add support for VLA type arguments and VLA expressions.
+ // When that happens, we should probably refactor VLASizeChecker's code.
+ continue;
+ } else if (T->getAs<ObjCObjectType>()) {
+ // Some code tries to take the sizeof an ObjCObjectType, relying that
+ // the compiler has laid out its representation. Just report Unknown
+ // for these.
+ continue;
+ }
+ }
+
+ APSInt Value = Ex->EvaluateKnownConstInt(getContext());
+ CharUnits amt = CharUnits::fromQuantity(Value.getZExtValue());
+
+ ProgramStateRef state = (*I)->getState();
+ state = state->BindExpr(Ex, (*I)->getLocationContext(),
+ svalBuilder.makeIntVal(amt.getQuantity(),
+ Ex->getType()));
+ Bldr.generateNode(Ex, *I, state);
+ }
+
+ getCheckerManager().runCheckersForPostStmt(Dst, EvalSet, Ex, *this);
+}
+
+void ExprEngine::VisitUnaryOperator(const UnaryOperator* U,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ // FIXME: Prechecks eventually go in ::Visit().
+ ExplodedNodeSet CheckedSet;
+ getCheckerManager().runCheckersForPreStmt(CheckedSet, Pred, U, *this);
+
+ ExplodedNodeSet EvalSet;
+ StmtNodeBuilder Bldr(CheckedSet, EvalSet, *currBldrCtx);
+
+ for (ExplodedNodeSet::iterator I = CheckedSet.begin(), E = CheckedSet.end();
+ I != E; ++I) {
+ switch (U->getOpcode()) {
+ default: {
+ Bldr.takeNodes(*I);
+ ExplodedNodeSet Tmp;
+ VisitIncrementDecrementOperator(U, *I, Tmp);
+ Bldr.addNodes(Tmp);
+ break;
+ }
+ case UO_Real: {
+ const Expr *Ex = U->getSubExpr()->IgnoreParens();
+
+ // FIXME: We don't have complex SValues yet.
+ if (Ex->getType()->isAnyComplexType()) {
+ // Just report "Unknown."
+ break;
+ }
+
+ // For all other types, UO_Real is an identity operation.
+ assert (U->getType() == Ex->getType());
+ ProgramStateRef state = (*I)->getState();
+ const LocationContext *LCtx = (*I)->getLocationContext();
+ Bldr.generateNode(U, *I, state->BindExpr(U, LCtx,
+ state->getSVal(Ex, LCtx)));
+ break;
+ }
+
+ case UO_Imag: {
+ const Expr *Ex = U->getSubExpr()->IgnoreParens();
+ // FIXME: We don't have complex SValues yet.
+ if (Ex->getType()->isAnyComplexType()) {
+ // Just report "Unknown."
+ break;
+ }
+ // For all other types, UO_Imag returns 0.
+ ProgramStateRef state = (*I)->getState();
+ const LocationContext *LCtx = (*I)->getLocationContext();
+ SVal X = svalBuilder.makeZeroVal(Ex->getType());
+ Bldr.generateNode(U, *I, state->BindExpr(U, LCtx, X));
+ break;
+ }
+
+ case UO_Plus:
+ assert(!U->isGLValue());
+ // FALL-THROUGH.
+ case UO_Deref:
+ case UO_AddrOf:
+ case UO_Extension: {
+ // FIXME: We can probably just have some magic in Environment::getSVal()
+ // that propagates values, instead of creating a new node here.
+ //
+ // Unary "+" is a no-op, similar to a parentheses. We still have places
+ // where it may be a block-level expression, so we need to
+ // generate an extra node that just propagates the value of the
+ // subexpression.
+ const Expr *Ex = U->getSubExpr()->IgnoreParens();
+ ProgramStateRef state = (*I)->getState();
+ const LocationContext *LCtx = (*I)->getLocationContext();
+ Bldr.generateNode(U, *I, state->BindExpr(U, LCtx,
+ state->getSVal(Ex, LCtx)));
+ break;
+ }
+
+ case UO_LNot:
+ case UO_Minus:
+ case UO_Not: {
+ assert (!U->isGLValue());
+ const Expr *Ex = U->getSubExpr()->IgnoreParens();
+ ProgramStateRef state = (*I)->getState();
+ const LocationContext *LCtx = (*I)->getLocationContext();
+
+ // Get the value of the subexpression.
+ SVal V = state->getSVal(Ex, LCtx);
+
+ if (V.isUnknownOrUndef()) {
+ Bldr.generateNode(U, *I, state->BindExpr(U, LCtx, V));
+ break;
+ }
+
+ switch (U->getOpcode()) {
+ default:
+ llvm_unreachable("Invalid Opcode.");
+ case UO_Not:
+ // FIXME: Do we need to handle promotions?
+ state = state->BindExpr(U, LCtx, evalComplement(V.castAs<NonLoc>()));
+ break;
+ case UO_Minus:
+ // FIXME: Do we need to handle promotions?
+ state = state->BindExpr(U, LCtx, evalMinus(V.castAs<NonLoc>()));
+ break;
+ case UO_LNot:
+ // C99 6.5.3.3: "The expression !E is equivalent to (0==E)."
+ //
+ // Note: technically we do "E == 0", but this is the same in the
+ // transfer functions as "0 == E".
+ SVal Result;
+ if (Optional<Loc> LV = V.getAs<Loc>()) {
+ Loc X = svalBuilder.makeNull();
+ Result = evalBinOp(state, BO_EQ, *LV, X, U->getType());
+ }
+ else if (Ex->getType()->isFloatingType()) {
+ // FIXME: handle floating point types.
+ Result = UnknownVal();
+ } else {
+ nonloc::ConcreteInt X(getBasicVals().getValue(0, Ex->getType()));
+ Result = evalBinOp(state, BO_EQ, V.castAs<NonLoc>(), X,
+ U->getType());
+ }
+
+ state = state->BindExpr(U, LCtx, Result);
+ break;
+ }
+ Bldr.generateNode(U, *I, state);
+ break;
+ }
+ }
+ }
+
+ getCheckerManager().runCheckersForPostStmt(Dst, EvalSet, U, *this);
+}
+
+void ExprEngine::VisitIncrementDecrementOperator(const UnaryOperator* U,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ // Handle ++ and -- (both pre- and post-increment).
+ assert (U->isIncrementDecrementOp());
+ const Expr *Ex = U->getSubExpr()->IgnoreParens();
+
+ const LocationContext *LCtx = Pred->getLocationContext();
+ ProgramStateRef state = Pred->getState();
+ SVal loc = state->getSVal(Ex, LCtx);
+
+ // Perform a load.
+ ExplodedNodeSet Tmp;
+ evalLoad(Tmp, U, Ex, Pred, state, loc);
+
+ ExplodedNodeSet Dst2;
+ StmtNodeBuilder Bldr(Tmp, Dst2, *currBldrCtx);
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end();I!=E;++I) {
+
+ state = (*I)->getState();
+ assert(LCtx == (*I)->getLocationContext());
+ SVal V2_untested = state->getSVal(Ex, LCtx);
+
+ // Propagate unknown and undefined values.
+ if (V2_untested.isUnknownOrUndef()) {
+ Bldr.generateNode(U, *I, state->BindExpr(U, LCtx, V2_untested));
+ continue;
+ }
+ DefinedSVal V2 = V2_untested.castAs<DefinedSVal>();
+
+ // Handle all other values.
+ BinaryOperator::Opcode Op = U->isIncrementOp() ? BO_Add : BO_Sub;
+
+ // If the UnaryOperator has non-location type, use its type to create the
+ // constant value. If the UnaryOperator has location type, create the
+ // constant with int type and pointer width.
+ SVal RHS;
+
+ if (U->getType()->isAnyPointerType())
+ RHS = svalBuilder.makeArrayIndex(1);
+ else if (U->getType()->isIntegralOrEnumerationType())
+ RHS = svalBuilder.makeIntVal(1, U->getType());
+ else
+ RHS = UnknownVal();
+
+ SVal Result = evalBinOp(state, Op, V2, RHS, U->getType());
+
+ // Conjure a new symbol if necessary to recover precision.
+ if (Result.isUnknown()){
+ DefinedOrUnknownSVal SymVal =
+ svalBuilder.conjureSymbolVal(nullptr, Ex, LCtx,
+ currBldrCtx->blockCount());
+ Result = SymVal;
+
+ // If the value is a location, ++/-- should always preserve
+ // non-nullness. Check if the original value was non-null, and if so
+ // propagate that constraint.
+ if (Loc::isLocType(U->getType())) {
+ DefinedOrUnknownSVal Constraint =
+ svalBuilder.evalEQ(state, V2,svalBuilder.makeZeroVal(U->getType()));
+
+ if (!state->assume(Constraint, true)) {
+ // It isn't feasible for the original value to be null.
+ // Propagate this constraint.
+ Constraint = svalBuilder.evalEQ(state, SymVal,
+ svalBuilder.makeZeroVal(U->getType()));
+
+
+ state = state->assume(Constraint, false);
+ assert(state);
+ }
+ }
+ }
+
+ // Since the lvalue-to-rvalue conversion is explicit in the AST,
+ // we bind an l-value if the operator is prefix and an lvalue (in C++).
+ if (U->isGLValue())
+ state = state->BindExpr(U, LCtx, loc);
+ else
+ state = state->BindExpr(U, LCtx, U->isPostfix() ? V2 : Result);
+
+ // Perform the store.
+ Bldr.takeNodes(*I);
+ ExplodedNodeSet Dst3;
+ evalStore(Dst3, U, U, *I, state, loc, Result);
+ Bldr.addNodes(Dst3);
+ }
+ Dst.insert(Dst2);
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
new file mode 100644
index 0000000..556e223
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
@@ -0,0 +1,623 @@
+//===- ExprEngineCXX.cpp - ExprEngine support for C++ -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the C++ expression evaluation engine.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+
+using namespace clang;
+using namespace ento;
+
+void ExprEngine::CreateCXXTemporaryObject(const MaterializeTemporaryExpr *ME,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+ const Expr *tempExpr = ME->GetTemporaryExpr()->IgnoreParens();
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+
+ state = createTemporaryRegionIfNeeded(state, LCtx, tempExpr, ME);
+ Bldr.generateNode(ME, Pred, state);
+}
+
+// FIXME: This is the sort of code that should eventually live in a Core
+// checker rather than as a special case in ExprEngine.
+void ExprEngine::performTrivialCopy(NodeBuilder &Bldr, ExplodedNode *Pred,
+ const CallEvent &Call) {
+ SVal ThisVal;
+ bool AlwaysReturnsLValue;
+ if (const CXXConstructorCall *Ctor = dyn_cast<CXXConstructorCall>(&Call)) {
+ assert(Ctor->getDecl()->isTrivial());
+ assert(Ctor->getDecl()->isCopyOrMoveConstructor());
+ ThisVal = Ctor->getCXXThisVal();
+ AlwaysReturnsLValue = false;
+ } else {
+ assert(cast<CXXMethodDecl>(Call.getDecl())->isTrivial());
+ assert(cast<CXXMethodDecl>(Call.getDecl())->getOverloadedOperator() ==
+ OO_Equal);
+ ThisVal = cast<CXXInstanceCall>(Call).getCXXThisVal();
+ AlwaysReturnsLValue = true;
+ }
+
+ const LocationContext *LCtx = Pred->getLocationContext();
+
+ ExplodedNodeSet Dst;
+ Bldr.takeNodes(Pred);
+
+ SVal V = Call.getArgSVal(0);
+
+ // If the value being copied is not unknown, load from its location to get
+ // an aggregate rvalue.
+ if (Optional<Loc> L = V.getAs<Loc>())
+ V = Pred->getState()->getSVal(*L);
+ else
+ assert(V.isUnknown());
+
+ const Expr *CallExpr = Call.getOriginExpr();
+ evalBind(Dst, CallExpr, Pred, ThisVal, V, true);
+
+ PostStmt PS(CallExpr, LCtx);
+ for (ExplodedNodeSet::iterator I = Dst.begin(), E = Dst.end();
+ I != E; ++I) {
+ ProgramStateRef State = (*I)->getState();
+ if (AlwaysReturnsLValue)
+ State = State->BindExpr(CallExpr, LCtx, ThisVal);
+ else
+ State = bindReturnValue(Call, LCtx, State);
+ Bldr.generateNode(PS, State, *I);
+ }
+}
+
+
+/// Returns a region representing the first element of a (possibly
+/// multi-dimensional) array.
+///
+/// On return, \p Ty will be set to the base type of the array.
+///
+/// If the type is not an array type at all, the original value is returned.
+static SVal makeZeroElementRegion(ProgramStateRef State, SVal LValue,
+ QualType &Ty) {
+ SValBuilder &SVB = State->getStateManager().getSValBuilder();
+ ASTContext &Ctx = SVB.getContext();
+
+ while (const ArrayType *AT = Ctx.getAsArrayType(Ty)) {
+ Ty = AT->getElementType();
+ LValue = State->getLValue(Ty, SVB.makeZeroArrayIndex(), LValue);
+ }
+
+ return LValue;
+}
+
+
+const MemRegion *
+ExprEngine::getRegionForConstructedObject(const CXXConstructExpr *CE,
+ ExplodedNode *Pred) {
+ const LocationContext *LCtx = Pred->getLocationContext();
+ ProgramStateRef State = Pred->getState();
+
+ // See if we're constructing an existing region by looking at the next
+ // element in the CFG.
+
+ if (auto Elem = findElementDirectlyInitializedByCurrentConstructor()) {
+ if (Optional<CFGStmt> StmtElem = Elem->getAs<CFGStmt>()) {
+ auto *DS = cast<DeclStmt>(StmtElem->getStmt());
+ if (const auto *Var = dyn_cast<VarDecl>(DS->getSingleDecl())) {
+ if (Var->getInit() && Var->getInit()->IgnoreImplicit() == CE) {
+ SVal LValue = State->getLValue(Var, LCtx);
+ QualType Ty = Var->getType();
+ LValue = makeZeroElementRegion(State, LValue, Ty);
+ return LValue.getAsRegion();
+ }
+ }
+ } else if (Optional<CFGInitializer> InitElem = Elem->getAs<CFGInitializer>()) {
+ const CXXCtorInitializer *Init = InitElem->getInitializer();
+ assert(Init->isAnyMemberInitializer());
+ const CXXMethodDecl *CurCtor = cast<CXXMethodDecl>(LCtx->getDecl());
+ Loc ThisPtr =
+ getSValBuilder().getCXXThis(CurCtor, LCtx->getCurrentStackFrame());
+ SVal ThisVal = State->getSVal(ThisPtr);
+
+ const ValueDecl *Field;
+ SVal FieldVal;
+ if (Init->isIndirectMemberInitializer()) {
+ Field = Init->getIndirectMember();
+ FieldVal = State->getLValue(Init->getIndirectMember(), ThisVal);
+ } else {
+ Field = Init->getMember();
+ FieldVal = State->getLValue(Init->getMember(), ThisVal);
+ }
+
+ QualType Ty = Field->getType();
+ FieldVal = makeZeroElementRegion(State, FieldVal, Ty);
+ return FieldVal.getAsRegion();
+ }
+
+ // FIXME: This will eventually need to handle new-expressions as well.
+ // Don't forget to update the pre-constructor initialization code in
+ // ExprEngine::VisitCXXConstructExpr.
+ }
+ // If we couldn't find an existing region to construct into, assume we're
+ // constructing a temporary.
+ MemRegionManager &MRMgr = getSValBuilder().getRegionManager();
+ return MRMgr.getCXXTempObjectRegion(CE, LCtx);
+}
+
+/// Returns true if the initializer for \Elem can be a direct
+/// constructor.
+static bool canHaveDirectConstructor(CFGElement Elem){
+ // DeclStmts and CXXCtorInitializers for fields can be directly constructed.
+
+ if (Optional<CFGStmt> StmtElem = Elem.getAs<CFGStmt>()) {
+ if (isa<DeclStmt>(StmtElem->getStmt())) {
+ return true;
+ }
+ }
+
+ if (Elem.getKind() == CFGElement::Initializer) {
+ return true;
+ }
+
+ return false;
+}
+
+Optional<CFGElement>
+ExprEngine::findElementDirectlyInitializedByCurrentConstructor() {
+ const NodeBuilderContext &CurrBldrCtx = getBuilderContext();
+ // See if we're constructing an existing region by looking at the next
+ // element in the CFG.
+ const CFGBlock *B = CurrBldrCtx.getBlock();
+ assert(isa<CXXConstructExpr>(((*B)[currStmtIdx]).castAs<CFGStmt>().getStmt()));
+ unsigned int NextStmtIdx = currStmtIdx + 1;
+ if (NextStmtIdx >= B->size())
+ return None;
+
+ CFGElement Next = (*B)[NextStmtIdx];
+
+ // Is this a destructor? If so, we might be in the middle of an assignment
+ // to a local or member: look ahead one more element to see what we find.
+ while (Next.getAs<CFGImplicitDtor>() && NextStmtIdx + 1 < B->size()) {
+ ++NextStmtIdx;
+ Next = (*B)[NextStmtIdx];
+ }
+
+ if (canHaveDirectConstructor(Next))
+ return Next;
+
+ return None;
+}
+
+const CXXConstructExpr *
+ExprEngine::findDirectConstructorForCurrentCFGElement() {
+ // Go backward in the CFG to see if the previous element (ignoring
+ // destructors) was a CXXConstructExpr. If so, that constructor
+ // was constructed directly into an existing region.
+ // This process is essentially the inverse of that performed in
+ // findElementDirectlyInitializedByCurrentConstructor().
+ if (currStmtIdx == 0)
+ return nullptr;
+
+ const CFGBlock *B = getBuilderContext().getBlock();
+ assert(canHaveDirectConstructor((*B)[currStmtIdx]));
+
+ unsigned int PreviousStmtIdx = currStmtIdx - 1;
+ CFGElement Previous = (*B)[PreviousStmtIdx];
+
+ while (Previous.getAs<CFGImplicitDtor>() && PreviousStmtIdx > 0) {
+ --PreviousStmtIdx;
+ Previous = (*B)[PreviousStmtIdx];
+ }
+
+ if (Optional<CFGStmt> PrevStmtElem = Previous.getAs<CFGStmt>()) {
+ if (auto *CtorExpr = dyn_cast<CXXConstructExpr>(PrevStmtElem->getStmt())) {
+ return CtorExpr;
+ }
+ }
+
+ return nullptr;
+}
+
+void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &destNodes) {
+ const LocationContext *LCtx = Pred->getLocationContext();
+ ProgramStateRef State = Pred->getState();
+
+ const MemRegion *Target = nullptr;
+
+ // FIXME: Handle arrays, which run the same constructor for every element.
+ // For now, we just run the first constructor (which should still invalidate
+ // the entire array).
+
+ switch (CE->getConstructionKind()) {
+ case CXXConstructExpr::CK_Complete: {
+ Target = getRegionForConstructedObject(CE, Pred);
+ break;
+ }
+ case CXXConstructExpr::CK_VirtualBase:
+ // Make sure we are not calling virtual base class initializers twice.
+ // Only the most-derived object should initialize virtual base classes.
+ if (const Stmt *Outer = LCtx->getCurrentStackFrame()->getCallSite()) {
+ const CXXConstructExpr *OuterCtor = dyn_cast<CXXConstructExpr>(Outer);
+ if (OuterCtor) {
+ switch (OuterCtor->getConstructionKind()) {
+ case CXXConstructExpr::CK_NonVirtualBase:
+ case CXXConstructExpr::CK_VirtualBase:
+ // Bail out!
+ destNodes.Add(Pred);
+ return;
+ case CXXConstructExpr::CK_Complete:
+ case CXXConstructExpr::CK_Delegating:
+ break;
+ }
+ }
+ }
+ // FALLTHROUGH
+ case CXXConstructExpr::CK_NonVirtualBase:
+ case CXXConstructExpr::CK_Delegating: {
+ const CXXMethodDecl *CurCtor = cast<CXXMethodDecl>(LCtx->getDecl());
+ Loc ThisPtr = getSValBuilder().getCXXThis(CurCtor,
+ LCtx->getCurrentStackFrame());
+ SVal ThisVal = State->getSVal(ThisPtr);
+
+ if (CE->getConstructionKind() == CXXConstructExpr::CK_Delegating) {
+ Target = ThisVal.getAsRegion();
+ } else {
+ // Cast to the base type.
+ bool IsVirtual =
+ (CE->getConstructionKind() == CXXConstructExpr::CK_VirtualBase);
+ SVal BaseVal = getStoreManager().evalDerivedToBase(ThisVal, CE->getType(),
+ IsVirtual);
+ Target = BaseVal.getAsRegion();
+ }
+ break;
+ }
+ }
+
+ CallEventManager &CEMgr = getStateManager().getCallEventManager();
+ CallEventRef<CXXConstructorCall> Call =
+ CEMgr.getCXXConstructorCall(CE, Target, State, LCtx);
+
+ ExplodedNodeSet DstPreVisit;
+ getCheckerManager().runCheckersForPreStmt(DstPreVisit, Pred, CE, *this);
+
+ ExplodedNodeSet PreInitialized;
+ {
+ StmtNodeBuilder Bldr(DstPreVisit, PreInitialized, *currBldrCtx);
+ if (CE->requiresZeroInitialization()) {
+ // Type of the zero doesn't matter.
+ SVal ZeroVal = svalBuilder.makeZeroVal(getContext().CharTy);
+
+ for (ExplodedNodeSet::iterator I = DstPreVisit.begin(),
+ E = DstPreVisit.end();
+ I != E; ++I) {
+ ProgramStateRef State = (*I)->getState();
+ // FIXME: Once we properly handle constructors in new-expressions, we'll
+ // need to invalidate the region before setting a default value, to make
+ // sure there aren't any lingering bindings around. This probably needs
+ // to happen regardless of whether or not the object is zero-initialized
+ // to handle random fields of a placement-initialized object picking up
+ // old bindings. We might only want to do it when we need to, though.
+ // FIXME: This isn't actually correct for arrays -- we need to zero-
+ // initialize the entire array, not just the first element -- but our
+ // handling of arrays everywhere else is weak as well, so this shouldn't
+ // actually make things worse. Placement new makes this tricky as well,
+ // since it's then possible to be initializing one part of a multi-
+ // dimensional array.
+ State = State->bindDefault(loc::MemRegionVal(Target), ZeroVal);
+ Bldr.generateNode(CE, *I, State, /*tag=*/nullptr,
+ ProgramPoint::PreStmtKind);
+ }
+ }
+ }
+
+ ExplodedNodeSet DstPreCall;
+ getCheckerManager().runCheckersForPreCall(DstPreCall, PreInitialized,
+ *Call, *this);
+
+ ExplodedNodeSet DstEvaluated;
+ StmtNodeBuilder Bldr(DstPreCall, DstEvaluated, *currBldrCtx);
+
+ bool IsArray = isa<ElementRegion>(Target);
+ if (CE->getConstructor()->isTrivial() &&
+ CE->getConstructor()->isCopyOrMoveConstructor() &&
+ !IsArray) {
+ // FIXME: Handle other kinds of trivial constructors as well.
+ for (ExplodedNodeSet::iterator I = DstPreCall.begin(), E = DstPreCall.end();
+ I != E; ++I)
+ performTrivialCopy(Bldr, *I, *Call);
+
+ } else {
+ for (ExplodedNodeSet::iterator I = DstPreCall.begin(), E = DstPreCall.end();
+ I != E; ++I)
+ defaultEvalCall(Bldr, *I, *Call);
+ }
+
+ ExplodedNodeSet DstPostCall;
+ getCheckerManager().runCheckersForPostCall(DstPostCall, DstEvaluated,
+ *Call, *this);
+ getCheckerManager().runCheckersForPostStmt(destNodes, DstPostCall, CE, *this);
+}
+
+void ExprEngine::VisitCXXDestructor(QualType ObjectType,
+ const MemRegion *Dest,
+ const Stmt *S,
+ bool IsBaseDtor,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ const LocationContext *LCtx = Pred->getLocationContext();
+ ProgramStateRef State = Pred->getState();
+
+ // FIXME: We need to run the same destructor on every element of the array.
+ // This workaround will just run the first destructor (which will still
+ // invalidate the entire array).
+ SVal DestVal = UnknownVal();
+ if (Dest)
+ DestVal = loc::MemRegionVal(Dest);
+ DestVal = makeZeroElementRegion(State, DestVal, ObjectType);
+ Dest = DestVal.getAsRegion();
+
+ const CXXRecordDecl *RecordDecl = ObjectType->getAsCXXRecordDecl();
+ assert(RecordDecl && "Only CXXRecordDecls should have destructors");
+ const CXXDestructorDecl *DtorDecl = RecordDecl->getDestructor();
+
+ CallEventManager &CEMgr = getStateManager().getCallEventManager();
+ CallEventRef<CXXDestructorCall> Call =
+ CEMgr.getCXXDestructorCall(DtorDecl, S, Dest, IsBaseDtor, State, LCtx);
+
+ PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
+ Call->getSourceRange().getBegin(),
+ "Error evaluating destructor");
+
+ ExplodedNodeSet DstPreCall;
+ getCheckerManager().runCheckersForPreCall(DstPreCall, Pred,
+ *Call, *this);
+
+ ExplodedNodeSet DstInvalidated;
+ StmtNodeBuilder Bldr(DstPreCall, DstInvalidated, *currBldrCtx);
+ for (ExplodedNodeSet::iterator I = DstPreCall.begin(), E = DstPreCall.end();
+ I != E; ++I)
+ defaultEvalCall(Bldr, *I, *Call);
+
+ ExplodedNodeSet DstPostCall;
+ getCheckerManager().runCheckersForPostCall(Dst, DstInvalidated,
+ *Call, *this);
+}
+
+void ExprEngine::VisitCXXNewAllocatorCall(const CXXNewExpr *CNE,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ ProgramStateRef State = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
+ CNE->getStartLoc(),
+ "Error evaluating New Allocator Call");
+ CallEventManager &CEMgr = getStateManager().getCallEventManager();
+ CallEventRef<CXXAllocatorCall> Call =
+ CEMgr.getCXXAllocatorCall(CNE, State, LCtx);
+
+ ExplodedNodeSet DstPreCall;
+ getCheckerManager().runCheckersForPreCall(DstPreCall, Pred,
+ *Call, *this);
+
+ ExplodedNodeSet DstInvalidated;
+ StmtNodeBuilder Bldr(DstPreCall, DstInvalidated, *currBldrCtx);
+ for (ExplodedNodeSet::iterator I = DstPreCall.begin(), E = DstPreCall.end();
+ I != E; ++I)
+ defaultEvalCall(Bldr, *I, *Call);
+ getCheckerManager().runCheckersForPostCall(Dst, DstInvalidated,
+ *Call, *this);
+}
+
+
+void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ // FIXME: Much of this should eventually migrate to CXXAllocatorCall.
+ // Also, we need to decide how allocators actually work -- they're not
+ // really part of the CXXNewExpr because they happen BEFORE the
+ // CXXConstructExpr subexpression. See PR12014 for some discussion.
+
+ unsigned blockCount = currBldrCtx->blockCount();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ DefinedOrUnknownSVal symVal = UnknownVal();
+ FunctionDecl *FD = CNE->getOperatorNew();
+
+ bool IsStandardGlobalOpNewFunction = false;
+ if (FD && !isa<CXXMethodDecl>(FD) && !FD->isVariadic()) {
+ if (FD->getNumParams() == 2) {
+ QualType T = FD->getParamDecl(1)->getType();
+ if (const IdentifierInfo *II = T.getBaseTypeIdentifier())
+ // NoThrow placement new behaves as a standard new.
+ IsStandardGlobalOpNewFunction = II->getName().equals("nothrow_t");
+ }
+ else
+ // Placement forms are considered non-standard.
+ IsStandardGlobalOpNewFunction = (FD->getNumParams() == 1);
+ }
+
+ // We assume all standard global 'operator new' functions allocate memory in
+ // heap. We realize this is an approximation that might not correctly model
+ // a custom global allocator.
+ if (IsStandardGlobalOpNewFunction)
+ symVal = svalBuilder.getConjuredHeapSymbolVal(CNE, LCtx, blockCount);
+ else
+ symVal = svalBuilder.conjureSymbolVal(nullptr, CNE, LCtx, CNE->getType(),
+ blockCount);
+
+ ProgramStateRef State = Pred->getState();
+ CallEventManager &CEMgr = getStateManager().getCallEventManager();
+ CallEventRef<CXXAllocatorCall> Call =
+ CEMgr.getCXXAllocatorCall(CNE, State, LCtx);
+
+ // Invalidate placement args.
+ // FIXME: Once we figure out how we want allocators to work,
+ // we should be using the usual pre-/(default-)eval-/post-call checks here.
+ State = Call->invalidateRegions(blockCount);
+ if (!State)
+ return;
+
+ // If this allocation function is not declared as non-throwing, failures
+ // /must/ be signalled by exceptions, and thus the return value will never be
+ // NULL. -fno-exceptions does not influence this semantics.
+ // FIXME: GCC has a -fcheck-new option, which forces it to consider the case
+ // where new can return NULL. If we end up supporting that option, we can
+ // consider adding a check for it here.
+ // C++11 [basic.stc.dynamic.allocation]p3.
+ if (FD) {
+ QualType Ty = FD->getType();
+ if (const FunctionProtoType *ProtoType = Ty->getAs<FunctionProtoType>())
+ if (!ProtoType->isNothrow(getContext()))
+ State = State->assume(symVal, true);
+ }
+
+ StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+
+ if (CNE->isArray()) {
+ // FIXME: allocating an array requires simulating the constructors.
+ // For now, just return a symbolicated region.
+ const MemRegion *NewReg = symVal.castAs<loc::MemRegionVal>().getRegion();
+ QualType ObjTy = CNE->getType()->getAs<PointerType>()->getPointeeType();
+ const ElementRegion *EleReg =
+ getStoreManager().GetElementZeroRegion(NewReg, ObjTy);
+ State = State->BindExpr(CNE, Pred->getLocationContext(),
+ loc::MemRegionVal(EleReg));
+ Bldr.generateNode(CNE, Pred, State);
+ return;
+ }
+
+ // FIXME: Once we have proper support for CXXConstructExprs inside
+ // CXXNewExpr, we need to make sure that the constructed object is not
+ // immediately invalidated here. (The placement call should happen before
+ // the constructor call anyway.)
+ SVal Result = symVal;
+ if (FD && FD->isReservedGlobalPlacementOperator()) {
+ // Non-array placement new should always return the placement location.
+ SVal PlacementLoc = State->getSVal(CNE->getPlacementArg(0), LCtx);
+ Result = svalBuilder.evalCast(PlacementLoc, CNE->getType(),
+ CNE->getPlacementArg(0)->getType());
+ }
+
+ // Bind the address of the object, then check to see if we cached out.
+ State = State->BindExpr(CNE, LCtx, Result);
+ ExplodedNode *NewN = Bldr.generateNode(CNE, Pred, State);
+ if (!NewN)
+ return;
+
+ // If the type is not a record, we won't have a CXXConstructExpr as an
+ // initializer. Copy the value over.
+ if (const Expr *Init = CNE->getInitializer()) {
+ if (!isa<CXXConstructExpr>(Init)) {
+ assert(Bldr.getResults().size() == 1);
+ Bldr.takeNodes(NewN);
+ evalBind(Dst, CNE, NewN, Result, State->getSVal(Init, LCtx),
+ /*FirstInit=*/IsStandardGlobalOpNewFunction);
+ }
+ }
+}
+
+void ExprEngine::VisitCXXDeleteExpr(const CXXDeleteExpr *CDE,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst) {
+ StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+ ProgramStateRef state = Pred->getState();
+ Bldr.generateNode(CDE, Pred, state);
+}
+
+void ExprEngine::VisitCXXCatchStmt(const CXXCatchStmt *CS,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ const VarDecl *VD = CS->getExceptionDecl();
+ if (!VD) {
+ Dst.Add(Pred);
+ return;
+ }
+
+ const LocationContext *LCtx = Pred->getLocationContext();
+ SVal V = svalBuilder.conjureSymbolVal(CS, LCtx, VD->getType(),
+ currBldrCtx->blockCount());
+ ProgramStateRef state = Pred->getState();
+ state = state->bindLoc(state->getLValue(VD, LCtx), V);
+
+ StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+ Bldr.generateNode(CS, Pred, state);
+}
+
+void ExprEngine::VisitCXXThisExpr(const CXXThisExpr *TE, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+
+ // Get the this object region from StoreManager.
+ const LocationContext *LCtx = Pred->getLocationContext();
+ const MemRegion *R =
+ svalBuilder.getRegionManager().getCXXThisRegion(
+ getContext().getCanonicalType(TE->getType()),
+ LCtx);
+
+ ProgramStateRef state = Pred->getState();
+ SVal V = state->getSVal(loc::MemRegionVal(R));
+ Bldr.generateNode(TE, Pred, state->BindExpr(TE, LCtx, V));
+}
+
+void ExprEngine::VisitLambdaExpr(const LambdaExpr *LE, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ const LocationContext *LocCtxt = Pred->getLocationContext();
+
+ // Get the region of the lambda itself.
+ const MemRegion *R = svalBuilder.getRegionManager().getCXXTempObjectRegion(
+ LE, LocCtxt);
+ SVal V = loc::MemRegionVal(R);
+
+ ProgramStateRef State = Pred->getState();
+
+ // If we created a new MemRegion for the lambda, we should explicitly bind
+ // the captures.
+ CXXRecordDecl::field_iterator CurField = LE->getLambdaClass()->field_begin();
+ for (LambdaExpr::const_capture_init_iterator i = LE->capture_init_begin(),
+ e = LE->capture_init_end();
+ i != e; ++i, ++CurField) {
+ FieldDecl *FieldForCapture = *CurField;
+ SVal FieldLoc = State->getLValue(FieldForCapture, V);
+
+ SVal InitVal;
+ if (!FieldForCapture->hasCapturedVLAType()) {
+ Expr *InitExpr = *i;
+ assert(InitExpr && "Capture missing initialization expression");
+ InitVal = State->getSVal(InitExpr, LocCtxt);
+ } else {
+ // The field stores the length of a captured variable-length array.
+ // These captures don't have initialization expressions; instead we
+ // get the length from the VLAType size expression.
+ Expr *SizeExpr = FieldForCapture->getCapturedVLAType()->getSizeExpr();
+ InitVal = State->getSVal(SizeExpr, LocCtxt);
+ }
+
+ State = State->bindLoc(FieldLoc, InitVal);
+ }
+
+ // Decay the Loc into an RValue, because there might be a
+ // MaterializeTemporaryExpr node above this one which expects the bound value
+ // to be an RValue.
+ SVal LambdaRVal = State->getSVal(R);
+
+ ExplodedNodeSet Tmp;
+ StmtNodeBuilder Bldr(Pred, Tmp, *currBldrCtx);
+ // FIXME: is this the right program point kind?
+ Bldr.generateNode(LE, Pred,
+ State->BindExpr(LE, LocCtxt, LambdaRVal),
+ nullptr, ProgramPoint::PostLValueKind);
+
+ // FIXME: Move all post/pre visits to ::Visit().
+ getCheckerManager().runCheckersForPostStmt(Dst, Tmp, LE, *this);
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
new file mode 100644
index 0000000..74cc8d2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
@@ -0,0 +1,1009 @@
+//=-- ExprEngineCallAndReturn.cpp - Support for call/return -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ExprEngine's support for calls and returns.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "PrettyStackTraceLocationContext.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/SaveAndRestore.h"
+
+using namespace clang;
+using namespace ento;
+
+#define DEBUG_TYPE "ExprEngine"
+
+STATISTIC(NumOfDynamicDispatchPathSplits,
+ "The # of times we split the path due to imprecise dynamic dispatch info");
+
+STATISTIC(NumInlinedCalls,
+ "The # of times we inlined a call");
+
+STATISTIC(NumReachedInlineCountMax,
+ "The # of times we reached inline count maximum");
+
+void ExprEngine::processCallEnter(CallEnter CE, ExplodedNode *Pred) {
+ // Get the entry block in the CFG of the callee.
+ const StackFrameContext *calleeCtx = CE.getCalleeContext();
+ PrettyStackTraceLocationContext CrashInfo(calleeCtx);
+
+ const CFG *CalleeCFG = calleeCtx->getCFG();
+ const CFGBlock *Entry = &(CalleeCFG->getEntry());
+
+ // Validate the CFG.
+ assert(Entry->empty());
+ assert(Entry->succ_size() == 1);
+
+ // Get the solitary successor.
+ const CFGBlock *Succ = *(Entry->succ_begin());
+
+ // Construct an edge representing the starting location in the callee.
+ BlockEdge Loc(Entry, Succ, calleeCtx);
+
+ ProgramStateRef state = Pred->getState();
+
+ // Construct a new node and add it to the worklist.
+ bool isNew;
+ ExplodedNode *Node = G.getNode(Loc, state, false, &isNew);
+ Node->addPredecessor(Pred, G);
+ if (isNew)
+ Engine.getWorkList()->enqueue(Node);
+}
+
+// Find the last statement on the path to the exploded node and the
+// corresponding Block.
+static std::pair<const Stmt*,
+ const CFGBlock*> getLastStmt(const ExplodedNode *Node) {
+ const Stmt *S = nullptr;
+ const CFGBlock *Blk = nullptr;
+ const StackFrameContext *SF =
+ Node->getLocation().getLocationContext()->getCurrentStackFrame();
+
+ // Back up through the ExplodedGraph until we reach a statement node in this
+ // stack frame.
+ while (Node) {
+ const ProgramPoint &PP = Node->getLocation();
+
+ if (PP.getLocationContext()->getCurrentStackFrame() == SF) {
+ if (Optional<StmtPoint> SP = PP.getAs<StmtPoint>()) {
+ S = SP->getStmt();
+ break;
+ } else if (Optional<CallExitEnd> CEE = PP.getAs<CallExitEnd>()) {
+ S = CEE->getCalleeContext()->getCallSite();
+ if (S)
+ break;
+
+ // If there is no statement, this is an implicitly-generated call.
+ // We'll walk backwards over it and then continue the loop to find
+ // an actual statement.
+ Optional<CallEnter> CE;
+ do {
+ Node = Node->getFirstPred();
+ CE = Node->getLocationAs<CallEnter>();
+ } while (!CE || CE->getCalleeContext() != CEE->getCalleeContext());
+
+ // Continue searching the graph.
+ } else if (Optional<BlockEdge> BE = PP.getAs<BlockEdge>()) {
+ Blk = BE->getSrc();
+ }
+ } else if (Optional<CallEnter> CE = PP.getAs<CallEnter>()) {
+ // If we reached the CallEnter for this function, it has no statements.
+ if (CE->getCalleeContext() == SF)
+ break;
+ }
+
+ if (Node->pred_empty())
+ return std::make_pair(nullptr, nullptr);
+
+ Node = *Node->pred_begin();
+ }
+
+ return std::make_pair(S, Blk);
+}
+
+/// Adjusts a return value when the called function's return type does not
+/// match the caller's expression type. This can happen when a dynamic call
+/// is devirtualized, and the overridding method has a covariant (more specific)
+/// return type than the parent's method. For C++ objects, this means we need
+/// to add base casts.
+static SVal adjustReturnValue(SVal V, QualType ExpectedTy, QualType ActualTy,
+ StoreManager &StoreMgr) {
+ // For now, the only adjustments we handle apply only to locations.
+ if (!V.getAs<Loc>())
+ return V;
+
+ // If the types already match, don't do any unnecessary work.
+ ExpectedTy = ExpectedTy.getCanonicalType();
+ ActualTy = ActualTy.getCanonicalType();
+ if (ExpectedTy == ActualTy)
+ return V;
+
+ // No adjustment is needed between Objective-C pointer types.
+ if (ExpectedTy->isObjCObjectPointerType() &&
+ ActualTy->isObjCObjectPointerType())
+ return V;
+
+ // C++ object pointers may need "derived-to-base" casts.
+ const CXXRecordDecl *ExpectedClass = ExpectedTy->getPointeeCXXRecordDecl();
+ const CXXRecordDecl *ActualClass = ActualTy->getPointeeCXXRecordDecl();
+ if (ExpectedClass && ActualClass) {
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/false);
+ if (ActualClass->isDerivedFrom(ExpectedClass, Paths) &&
+ !Paths.isAmbiguous(ActualTy->getCanonicalTypeUnqualified())) {
+ return StoreMgr.evalDerivedToBase(V, Paths.front());
+ }
+ }
+
+ // Unfortunately, Objective-C does not enforce that overridden methods have
+ // covariant return types, so we can't assert that that never happens.
+ // Be safe and return UnknownVal().
+ return UnknownVal();
+}
+
+void ExprEngine::removeDeadOnEndOfFunction(NodeBuilderContext& BC,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ // Find the last statement in the function and the corresponding basic block.
+ const Stmt *LastSt = nullptr;
+ const CFGBlock *Blk = nullptr;
+ std::tie(LastSt, Blk) = getLastStmt(Pred);
+ if (!Blk || !LastSt) {
+ Dst.Add(Pred);
+ return;
+ }
+
+ // Here, we destroy the current location context. We use the current
+ // function's entire body as a diagnostic statement, with which the program
+ // point will be associated. However, we only want to use LastStmt as a
+ // reference for what to clean up if it's a ReturnStmt; otherwise, everything
+ // is dead.
+ SaveAndRestore<const NodeBuilderContext *> NodeContextRAII(currBldrCtx, &BC);
+ const LocationContext *LCtx = Pred->getLocationContext();
+ removeDead(Pred, Dst, dyn_cast<ReturnStmt>(LastSt), LCtx,
+ LCtx->getAnalysisDeclContext()->getBody(),
+ ProgramPoint::PostStmtPurgeDeadSymbolsKind);
+}
+
+static bool wasDifferentDeclUsedForInlining(CallEventRef<> Call,
+ const StackFrameContext *calleeCtx) {
+ const Decl *RuntimeCallee = calleeCtx->getDecl();
+ const Decl *StaticDecl = Call->getDecl();
+ assert(RuntimeCallee);
+ if (!StaticDecl)
+ return true;
+ return RuntimeCallee->getCanonicalDecl() != StaticDecl->getCanonicalDecl();
+}
+
+/// Returns true if the CXXConstructExpr \p E was intended to construct a
+/// prvalue for the region in \p V.
+///
+/// Note that we can't just test for rvalue vs. glvalue because
+/// CXXConstructExprs embedded in DeclStmts and initializers are considered
+/// rvalues by the AST, and the analyzer would like to treat them as lvalues.
+static bool isTemporaryPRValue(const CXXConstructExpr *E, SVal V) {
+ if (E->isGLValue())
+ return false;
+
+ const MemRegion *MR = V.getAsRegion();
+ if (!MR)
+ return false;
+
+ return isa<CXXTempObjectRegion>(MR);
+}
+
+/// The call exit is simulated with a sequence of nodes, which occur between
+/// CallExitBegin and CallExitEnd. The following operations occur between the
+/// two program points:
+/// 1. CallExitBegin (triggers the start of call exit sequence)
+/// 2. Bind the return value
+/// 3. Run Remove dead bindings to clean up the dead symbols from the callee.
+/// 4. CallExitEnd (switch to the caller context)
+/// 5. PostStmt<CallExpr>
+void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
+ // Step 1 CEBNode was generated before the call.
+ PrettyStackTraceLocationContext CrashInfo(CEBNode->getLocationContext());
+ const StackFrameContext *calleeCtx =
+ CEBNode->getLocationContext()->getCurrentStackFrame();
+
+ // The parent context might not be a stack frame, so make sure we
+ // look up the first enclosing stack frame.
+ const StackFrameContext *callerCtx =
+ calleeCtx->getParent()->getCurrentStackFrame();
+
+ const Stmt *CE = calleeCtx->getCallSite();
+ ProgramStateRef state = CEBNode->getState();
+ // Find the last statement in the function and the corresponding basic block.
+ const Stmt *LastSt = nullptr;
+ const CFGBlock *Blk = nullptr;
+ std::tie(LastSt, Blk) = getLastStmt(CEBNode);
+
+ // Generate a CallEvent /before/ cleaning the state, so that we can get the
+ // correct value for 'this' (if necessary).
+ CallEventManager &CEMgr = getStateManager().getCallEventManager();
+ CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state);
+
+ // Step 2: generate node with bound return value: CEBNode -> BindedRetNode.
+
+ // If the callee returns an expression, bind its value to CallExpr.
+ if (CE) {
+ if (const ReturnStmt *RS = dyn_cast_or_null<ReturnStmt>(LastSt)) {
+ const LocationContext *LCtx = CEBNode->getLocationContext();
+ SVal V = state->getSVal(RS, LCtx);
+
+ // Ensure that the return type matches the type of the returned Expr.
+ if (wasDifferentDeclUsedForInlining(Call, calleeCtx)) {
+ QualType ReturnedTy =
+ CallEvent::getDeclaredResultType(calleeCtx->getDecl());
+ if (!ReturnedTy.isNull()) {
+ if (const Expr *Ex = dyn_cast<Expr>(CE)) {
+ V = adjustReturnValue(V, Ex->getType(), ReturnedTy,
+ getStoreManager());
+ }
+ }
+ }
+
+ state = state->BindExpr(CE, callerCtx, V);
+ }
+
+ // Bind the constructed object value to CXXConstructExpr.
+ if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) {
+ loc::MemRegionVal This =
+ svalBuilder.getCXXThis(CCE->getConstructor()->getParent(), calleeCtx);
+ SVal ThisV = state->getSVal(This);
+
+ // If the constructed object is a temporary prvalue, get its bindings.
+ if (isTemporaryPRValue(CCE, ThisV))
+ ThisV = state->getSVal(ThisV.castAs<Loc>());
+
+ state = state->BindExpr(CCE, callerCtx, ThisV);
+ }
+ }
+
+ // Step 3: BindedRetNode -> CleanedNodes
+ // If we can find a statement and a block in the inlined function, run remove
+ // dead bindings before returning from the call. This is important to ensure
+ // that we report the issues such as leaks in the stack contexts in which
+ // they occurred.
+ ExplodedNodeSet CleanedNodes;
+ if (LastSt && Blk && AMgr.options.AnalysisPurgeOpt != PurgeNone) {
+ static SimpleProgramPointTag retValBind("ExprEngine", "Bind Return Value");
+ PostStmt Loc(LastSt, calleeCtx, &retValBind);
+ bool isNew;
+ ExplodedNode *BindedRetNode = G.getNode(Loc, state, false, &isNew);
+ BindedRetNode->addPredecessor(CEBNode, G);
+ if (!isNew)
+ return;
+
+ NodeBuilderContext Ctx(getCoreEngine(), Blk, BindedRetNode);
+ currBldrCtx = &Ctx;
+ // Here, we call the Symbol Reaper with 0 statement and callee location
+ // context, telling it to clean up everything in the callee's context
+ // (and its children). We use the callee's function body as a diagnostic
+ // statement, with which the program point will be associated.
+ removeDead(BindedRetNode, CleanedNodes, nullptr, calleeCtx,
+ calleeCtx->getAnalysisDeclContext()->getBody(),
+ ProgramPoint::PostStmtPurgeDeadSymbolsKind);
+ currBldrCtx = nullptr;
+ } else {
+ CleanedNodes.Add(CEBNode);
+ }
+
+ for (ExplodedNodeSet::iterator I = CleanedNodes.begin(),
+ E = CleanedNodes.end(); I != E; ++I) {
+
+ // Step 4: Generate the CallExit and leave the callee's context.
+ // CleanedNodes -> CEENode
+ CallExitEnd Loc(calleeCtx, callerCtx);
+ bool isNew;
+ ProgramStateRef CEEState = (*I == CEBNode) ? state : (*I)->getState();
+ ExplodedNode *CEENode = G.getNode(Loc, CEEState, false, &isNew);
+ CEENode->addPredecessor(*I, G);
+ if (!isNew)
+ return;
+
+ // Step 5: Perform the post-condition check of the CallExpr and enqueue the
+ // result onto the work list.
+ // CEENode -> Dst -> WorkList
+ NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), CEENode);
+ SaveAndRestore<const NodeBuilderContext*> NBCSave(currBldrCtx,
+ &Ctx);
+ SaveAndRestore<unsigned> CBISave(currStmtIdx, calleeCtx->getIndex());
+
+ CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState);
+
+ ExplodedNodeSet DstPostCall;
+ getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode,
+ *UpdatedCall, *this,
+ /*WasInlined=*/true);
+
+ ExplodedNodeSet Dst;
+ if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) {
+ getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, *Msg,
+ *this,
+ /*WasInlined=*/true);
+ } else if (CE) {
+ getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE,
+ *this, /*WasInlined=*/true);
+ } else {
+ Dst.insert(DstPostCall);
+ }
+
+ // Enqueue the next element in the block.
+ for (ExplodedNodeSet::iterator PSI = Dst.begin(), PSE = Dst.end();
+ PSI != PSE; ++PSI) {
+ Engine.getWorkList()->enqueue(*PSI, calleeCtx->getCallSiteBlock(),
+ calleeCtx->getIndex()+1);
+ }
+ }
+}
+
+void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx,
+ bool &IsRecursive, unsigned &StackDepth) {
+ IsRecursive = false;
+ StackDepth = 0;
+
+ while (LCtx) {
+ if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LCtx)) {
+ const Decl *DI = SFC->getDecl();
+
+ // Mark recursive (and mutually recursive) functions and always count
+ // them when measuring the stack depth.
+ if (DI == D) {
+ IsRecursive = true;
+ ++StackDepth;
+ LCtx = LCtx->getParent();
+ continue;
+ }
+
+ // Do not count the small functions when determining the stack depth.
+ AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(DI);
+ const CFG *CalleeCFG = CalleeADC->getCFG();
+ if (CalleeCFG->getNumBlockIDs() > AMgr.options.getAlwaysInlineSize())
+ ++StackDepth;
+ }
+ LCtx = LCtx->getParent();
+ }
+
+}
+
+static bool IsInStdNamespace(const FunctionDecl *FD) {
+ const DeclContext *DC = FD->getEnclosingNamespaceContext();
+ const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC);
+ if (!ND)
+ return false;
+
+ while (const DeclContext *Parent = ND->getParent()) {
+ if (!isa<NamespaceDecl>(Parent))
+ break;
+ ND = cast<NamespaceDecl>(Parent);
+ }
+
+ return ND->isStdNamespace();
+}
+
+// The GDM component containing the dynamic dispatch bifurcation info. When
+// the exact type of the receiver is not known, we want to explore both paths -
+// one on which we do inline it and the other one on which we don't. This is
+// done to ensure we do not drop coverage.
+// This is the map from the receiver region to a bool, specifying either we
+// consider this region's information precise or not along the given path.
+namespace {
+ enum DynamicDispatchMode {
+ DynamicDispatchModeInlined = 1,
+ DynamicDispatchModeConservative
+ };
+}
+REGISTER_TRAIT_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap,
+ CLANG_ENTO_PROGRAMSTATE_MAP(const MemRegion *,
+ unsigned))
+
+bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
+ NodeBuilder &Bldr, ExplodedNode *Pred,
+ ProgramStateRef State) {
+ assert(D);
+
+ const LocationContext *CurLC = Pred->getLocationContext();
+ const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame();
+ const LocationContext *ParentOfCallee = CallerSFC;
+ if (Call.getKind() == CE_Block &&
+ !cast<BlockCall>(Call).isConversionFromLambda()) {
+ const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion();
+ assert(BR && "If we have the block definition we should have its region");
+ AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D);
+ ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC,
+ cast<BlockDecl>(D),
+ BR);
+ }
+
+ // This may be NULL, but that's fine.
+ const Expr *CallE = Call.getOriginExpr();
+
+ // Construct a new stack frame for the callee.
+ AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D);
+ const StackFrameContext *CalleeSFC =
+ CalleeADC->getStackFrame(ParentOfCallee, CallE,
+ currBldrCtx->getBlock(),
+ currStmtIdx);
+
+
+ CallEnter Loc(CallE, CalleeSFC, CurLC);
+
+ // Construct a new state which contains the mapping from actual to
+ // formal arguments.
+ State = State->enterStackFrame(Call, CalleeSFC);
+
+ bool isNew;
+ if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) {
+ N->addPredecessor(Pred, G);
+ if (isNew)
+ Engine.getWorkList()->enqueue(N);
+ }
+
+ // If we decided to inline the call, the successor has been manually
+ // added onto the work list so remove it from the node builder.
+ Bldr.takeNodes(Pred);
+
+ NumInlinedCalls++;
+
+ // Mark the decl as visited.
+ if (VisitedCallees)
+ VisitedCallees->insert(D);
+
+ return true;
+}
+
+static ProgramStateRef getInlineFailedState(ProgramStateRef State,
+ const Stmt *CallE) {
+ const void *ReplayState = State->get<ReplayWithoutInlining>();
+ if (!ReplayState)
+ return nullptr;
+
+ assert(ReplayState == CallE && "Backtracked to the wrong call.");
+ (void)CallE;
+
+ return State->remove<ReplayWithoutInlining>();
+}
+
+void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
+ ExplodedNodeSet &dst) {
+ // Perform the previsit of the CallExpr.
+ ExplodedNodeSet dstPreVisit;
+ getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, CE, *this);
+
+ // Get the call in its initial state. We use this as a template to perform
+ // all the checks.
+ CallEventManager &CEMgr = getStateManager().getCallEventManager();
+ CallEventRef<> CallTemplate
+ = CEMgr.getSimpleCall(CE, Pred->getState(), Pred->getLocationContext());
+
+ // Evaluate the function call. We try each of the checkers
+ // to see if the can evaluate the function call.
+ ExplodedNodeSet dstCallEvaluated;
+ for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end();
+ I != E; ++I) {
+ evalCall(dstCallEvaluated, *I, *CallTemplate);
+ }
+
+ // Finally, perform the post-condition check of the CallExpr and store
+ // the created nodes in 'Dst'.
+ // Note that if the call was inlined, dstCallEvaluated will be empty.
+ // The post-CallExpr check will occur in processCallExit.
+ getCheckerManager().runCheckersForPostStmt(dst, dstCallEvaluated, CE,
+ *this);
+}
+
+void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
+ const CallEvent &Call) {
+ // WARNING: At this time, the state attached to 'Call' may be older than the
+ // state in 'Pred'. This is a minor optimization since CheckerManager will
+ // use an updated CallEvent instance when calling checkers, but if 'Call' is
+ // ever used directly in this function all callers should be updated to pass
+ // the most recent state. (It is probably not worth doing the work here since
+ // for some callers this will not be necessary.)
+
+ // Run any pre-call checks using the generic call interface.
+ ExplodedNodeSet dstPreVisit;
+ getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred, Call, *this);
+
+ // Actually evaluate the function call. We try each of the checkers
+ // to see if the can evaluate the function call, and get a callback at
+ // defaultEvalCall if all of them fail.
+ ExplodedNodeSet dstCallEvaluated;
+ getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit,
+ Call, *this);
+
+ // Finally, run any post-call checks.
+ getCheckerManager().runCheckersForPostCall(Dst, dstCallEvaluated,
+ Call, *this);
+}
+
+ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
+ const LocationContext *LCtx,
+ ProgramStateRef State) {
+ const Expr *E = Call.getOriginExpr();
+ if (!E)
+ return State;
+
+ // Some method families have known return values.
+ if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) {
+ switch (Msg->getMethodFamily()) {
+ default:
+ break;
+ case OMF_autorelease:
+ case OMF_retain:
+ case OMF_self: {
+ // These methods return their receivers.
+ return State->BindExpr(E, LCtx, Msg->getReceiverSVal());
+ }
+ }
+ } else if (const CXXConstructorCall *C = dyn_cast<CXXConstructorCall>(&Call)){
+ SVal ThisV = C->getCXXThisVal();
+
+ // If the constructed object is a temporary prvalue, get its bindings.
+ if (isTemporaryPRValue(cast<CXXConstructExpr>(E), ThisV))
+ ThisV = State->getSVal(ThisV.castAs<Loc>());
+
+ return State->BindExpr(E, LCtx, ThisV);
+ }
+
+ // Conjure a symbol if the return value is unknown.
+ QualType ResultTy = Call.getResultType();
+ SValBuilder &SVB = getSValBuilder();
+ unsigned Count = currBldrCtx->blockCount();
+ SVal R = SVB.conjureSymbolVal(nullptr, E, LCtx, ResultTy, Count);
+ return State->BindExpr(E, LCtx, R);
+}
+
+// Conservatively evaluate call by invalidating regions and binding
+// a conjured return value.
+void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr,
+ ExplodedNode *Pred,
+ ProgramStateRef State) {
+ State = Call.invalidateRegions(currBldrCtx->blockCount(), State);
+ State = bindReturnValue(Call, Pred->getLocationContext(), State);
+
+ // And make the result node.
+ Bldr.generateNode(Call.getProgramPoint(), State, Pred);
+}
+
+enum CallInlinePolicy {
+ CIP_Allowed,
+ CIP_DisallowedOnce,
+ CIP_DisallowedAlways
+};
+
+static CallInlinePolicy mayInlineCallKind(const CallEvent &Call,
+ const ExplodedNode *Pred,
+ AnalyzerOptions &Opts) {
+ const LocationContext *CurLC = Pred->getLocationContext();
+ const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame();
+ switch (Call.getKind()) {
+ case CE_Function:
+ case CE_Block:
+ break;
+ case CE_CXXMember:
+ case CE_CXXMemberOperator:
+ if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions))
+ return CIP_DisallowedAlways;
+ break;
+ case CE_CXXConstructor: {
+ if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors))
+ return CIP_DisallowedAlways;
+
+ const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call);
+
+ // FIXME: We don't handle constructors or destructors for arrays properly.
+ // Even once we do, we still need to be careful about implicitly-generated
+ // initializers for array fields in default move/copy constructors.
+ const MemRegion *Target = Ctor.getCXXThisVal().getAsRegion();
+ if (Target && isa<ElementRegion>(Target))
+ return CIP_DisallowedOnce;
+
+ // FIXME: This is a hack. We don't use the correct region for a new
+ // expression, so if we inline the constructor its result will just be
+ // thrown away. This short-term hack is tracked in <rdar://problem/12180598>
+ // and the longer-term possible fix is discussed in PR12014.
+ const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr();
+ if (const Stmt *Parent = CurLC->getParentMap().getParent(CtorExpr))
+ if (isa<CXXNewExpr>(Parent))
+ return CIP_DisallowedOnce;
+
+ // Inlining constructors requires including initializers in the CFG.
+ const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
+ assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers");
+ (void)ADC;
+
+ // If the destructor is trivial, it's always safe to inline the constructor.
+ if (Ctor.getDecl()->getParent()->hasTrivialDestructor())
+ break;
+
+ // For other types, only inline constructors if destructor inlining is
+ // also enabled.
+ if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
+ return CIP_DisallowedAlways;
+
+ // FIXME: This is a hack. We don't handle temporary destructors
+ // right now, so we shouldn't inline their constructors.
+ if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete)
+ if (!Target || !isa<DeclRegion>(Target))
+ return CIP_DisallowedOnce;
+
+ break;
+ }
+ case CE_CXXDestructor: {
+ if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
+ return CIP_DisallowedAlways;
+
+ // Inlining destructors requires building the CFG correctly.
+ const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
+ assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors");
+ (void)ADC;
+
+ const CXXDestructorCall &Dtor = cast<CXXDestructorCall>(Call);
+
+ // FIXME: We don't handle constructors or destructors for arrays properly.
+ const MemRegion *Target = Dtor.getCXXThisVal().getAsRegion();
+ if (Target && isa<ElementRegion>(Target))
+ return CIP_DisallowedOnce;
+
+ break;
+ }
+ case CE_CXXAllocator:
+ if (Opts.mayInlineCXXAllocator())
+ break;
+ // Do not inline allocators until we model deallocators.
+ // This is unfortunate, but basically necessary for smart pointers and such.
+ return CIP_DisallowedAlways;
+ case CE_ObjCMessage:
+ if (!Opts.mayInlineObjCMethod())
+ return CIP_DisallowedAlways;
+ if (!(Opts.getIPAMode() == IPAK_DynamicDispatch ||
+ Opts.getIPAMode() == IPAK_DynamicDispatchBifurcate))
+ return CIP_DisallowedAlways;
+ break;
+ }
+
+ return CIP_Allowed;
+}
+
+/// Returns true if the given C++ class contains a member with the given name.
+static bool hasMember(const ASTContext &Ctx, const CXXRecordDecl *RD,
+ StringRef Name) {
+ const IdentifierInfo &II = Ctx.Idents.get(Name);
+ DeclarationName DeclName = Ctx.DeclarationNames.getIdentifier(&II);
+ if (!RD->lookup(DeclName).empty())
+ return true;
+
+ CXXBasePaths Paths(false, false, false);
+ if (RD->lookupInBases(
+ [DeclName](const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
+ return CXXRecordDecl::FindOrdinaryMember(Specifier, Path, DeclName);
+ },
+ Paths))
+ return true;
+
+ return false;
+}
+
+/// Returns true if the given C++ class is a container or iterator.
+///
+/// Our heuristic for this is whether it contains a method named 'begin()' or a
+/// nested type named 'iterator' or 'iterator_category'.
+static bool isContainerClass(const ASTContext &Ctx, const CXXRecordDecl *RD) {
+ return hasMember(Ctx, RD, "begin") ||
+ hasMember(Ctx, RD, "iterator") ||
+ hasMember(Ctx, RD, "iterator_category");
+}
+
+/// Returns true if the given function refers to a method of a C++ container
+/// or iterator.
+///
+/// We generally do a poor job modeling most containers right now, and might
+/// prefer not to inline their methods.
+static bool isContainerMethod(const ASTContext &Ctx,
+ const FunctionDecl *FD) {
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
+ return isContainerClass(Ctx, MD->getParent());
+ return false;
+}
+
+/// Returns true if the given function is the destructor of a class named
+/// "shared_ptr".
+static bool isCXXSharedPtrDtor(const FunctionDecl *FD) {
+ const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(FD);
+ if (!Dtor)
+ return false;
+
+ const CXXRecordDecl *RD = Dtor->getParent();
+ if (const IdentifierInfo *II = RD->getDeclName().getAsIdentifierInfo())
+ if (II->isStr("shared_ptr"))
+ return true;
+
+ return false;
+}
+
+/// Returns true if the function in \p CalleeADC may be inlined in general.
+///
+/// This checks static properties of the function, such as its signature and
+/// CFG, to determine whether the analyzer should ever consider inlining it,
+/// in any context.
+static bool mayInlineDecl(AnalysisDeclContext *CalleeADC,
+ AnalyzerOptions &Opts) {
+ // FIXME: Do not inline variadic calls.
+ if (CallEvent::isVariadic(CalleeADC->getDecl()))
+ return false;
+
+ // Check certain C++-related inlining policies.
+ ASTContext &Ctx = CalleeADC->getASTContext();
+ if (Ctx.getLangOpts().CPlusPlus) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeADC->getDecl())) {
+ // Conditionally control the inlining of template functions.
+ if (!Opts.mayInlineTemplateFunctions())
+ if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate)
+ return false;
+
+ // Conditionally control the inlining of C++ standard library functions.
+ if (!Opts.mayInlineCXXStandardLibrary())
+ if (Ctx.getSourceManager().isInSystemHeader(FD->getLocation()))
+ if (IsInStdNamespace(FD))
+ return false;
+
+ // Conditionally control the inlining of methods on objects that look
+ // like C++ containers.
+ if (!Opts.mayInlineCXXContainerMethods())
+ if (!Ctx.getSourceManager().isInMainFile(FD->getLocation()))
+ if (isContainerMethod(Ctx, FD))
+ return false;
+
+ // Conditionally control the inlining of the destructor of C++ shared_ptr.
+ // We don't currently do a good job modeling shared_ptr because we can't
+ // see the reference count, so treating as opaque is probably the best
+ // idea.
+ if (!Opts.mayInlineCXXSharedPtrDtor())
+ if (isCXXSharedPtrDtor(FD))
+ return false;
+
+ }
+ }
+
+ // It is possible that the CFG cannot be constructed.
+ // Be safe, and check if the CalleeCFG is valid.
+ const CFG *CalleeCFG = CalleeADC->getCFG();
+ if (!CalleeCFG)
+ return false;
+
+ // Do not inline large functions.
+ if (CalleeCFG->getNumBlockIDs() > Opts.getMaxInlinableSize())
+ return false;
+
+ // It is possible that the live variables analysis cannot be
+ // run. If so, bail out.
+ if (!CalleeADC->getAnalysis<RelaxedLiveVariables>())
+ return false;
+
+ return true;
+}
+
+bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
+ const ExplodedNode *Pred) {
+ if (!D)
+ return false;
+
+ AnalysisManager &AMgr = getAnalysisManager();
+ AnalyzerOptions &Opts = AMgr.options;
+ AnalysisDeclContextManager &ADCMgr = AMgr.getAnalysisDeclContextManager();
+ AnalysisDeclContext *CalleeADC = ADCMgr.getContext(D);
+
+ // Temporary object destructor processing is currently broken, so we never
+ // inline them.
+ // FIXME: Remove this once temp destructors are working.
+ if (isa<CXXDestructorCall>(Call)) {
+ if ((*currBldrCtx->getBlock())[currStmtIdx].getAs<CFGTemporaryDtor>())
+ return false;
+ }
+
+ // The auto-synthesized bodies are essential to inline as they are
+ // usually small and commonly used. Note: we should do this check early on to
+ // ensure we always inline these calls.
+ if (CalleeADC->isBodyAutosynthesized())
+ return true;
+
+ if (!AMgr.shouldInlineCall())
+ return false;
+
+ // Check if this function has been marked as non-inlinable.
+ Optional<bool> MayInline = Engine.FunctionSummaries->mayInline(D);
+ if (MayInline.hasValue()) {
+ if (!MayInline.getValue())
+ return false;
+
+ } else {
+ // We haven't actually checked the static properties of this function yet.
+ // Do that now, and record our decision in the function summaries.
+ if (mayInlineDecl(CalleeADC, Opts)) {
+ Engine.FunctionSummaries->markMayInline(D);
+ } else {
+ Engine.FunctionSummaries->markShouldNotInline(D);
+ return false;
+ }
+ }
+
+ // Check if we should inline a call based on its kind.
+ // FIXME: this checks both static and dynamic properties of the call, which
+ // means we're redoing a bit of work that could be cached in the function
+ // summary.
+ CallInlinePolicy CIP = mayInlineCallKind(Call, Pred, Opts);
+ if (CIP != CIP_Allowed) {
+ if (CIP == CIP_DisallowedAlways) {
+ assert(!MayInline.hasValue() || MayInline.getValue());
+ Engine.FunctionSummaries->markShouldNotInline(D);
+ }
+ return false;
+ }
+
+ const CFG *CalleeCFG = CalleeADC->getCFG();
+
+ // Do not inline if recursive or we've reached max stack frame count.
+ bool IsRecursive = false;
+ unsigned StackDepth = 0;
+ examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth);
+ if ((StackDepth >= Opts.InlineMaxStackDepth) &&
+ ((CalleeCFG->getNumBlockIDs() > Opts.getAlwaysInlineSize())
+ || IsRecursive))
+ return false;
+
+ // Do not inline large functions too many times.
+ if ((Engine.FunctionSummaries->getNumTimesInlined(D) >
+ Opts.getMaxTimesInlineLarge()) &&
+ CalleeCFG->getNumBlockIDs() >=
+ Opts.getMinCFGSizeTreatFunctionsAsLarge()) {
+ NumReachedInlineCountMax++;
+ return false;
+ }
+
+ if (HowToInline == Inline_Minimal &&
+ (CalleeCFG->getNumBlockIDs() > Opts.getAlwaysInlineSize()
+ || IsRecursive))
+ return false;
+
+ Engine.FunctionSummaries->bumpNumTimesInlined(D);
+
+ return true;
+}
+
+static bool isTrivialObjectAssignment(const CallEvent &Call) {
+ const CXXInstanceCall *ICall = dyn_cast<CXXInstanceCall>(&Call);
+ if (!ICall)
+ return false;
+
+ const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(ICall->getDecl());
+ if (!MD)
+ return false;
+ if (!(MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()))
+ return false;
+
+ return MD->isTrivial();
+}
+
+void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
+ const CallEvent &CallTemplate) {
+ // Make sure we have the most recent state attached to the call.
+ ProgramStateRef State = Pred->getState();
+ CallEventRef<> Call = CallTemplate.cloneWithState(State);
+
+ // Special-case trivial assignment operators.
+ if (isTrivialObjectAssignment(*Call)) {
+ performTrivialCopy(Bldr, Pred, *Call);
+ return;
+ }
+
+ // Try to inline the call.
+ // The origin expression here is just used as a kind of checksum;
+ // this should still be safe even for CallEvents that don't come from exprs.
+ const Expr *E = Call->getOriginExpr();
+
+ ProgramStateRef InlinedFailedState = getInlineFailedState(State, E);
+ if (InlinedFailedState) {
+ // If we already tried once and failed, make sure we don't retry later.
+ State = InlinedFailedState;
+ } else {
+ RuntimeDefinition RD = Call->getRuntimeDefinition();
+ const Decl *D = RD.getDecl();
+ if (shouldInlineCall(*Call, D, Pred)) {
+ if (RD.mayHaveOtherDefinitions()) {
+ AnalyzerOptions &Options = getAnalysisManager().options;
+
+ // Explore with and without inlining the call.
+ if (Options.getIPAMode() == IPAK_DynamicDispatchBifurcate) {
+ BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred);
+ return;
+ }
+
+ // Don't inline if we're not in any dynamic dispatch mode.
+ if (Options.getIPAMode() != IPAK_DynamicDispatch) {
+ conservativeEvalCall(*Call, Bldr, Pred, State);
+ return;
+ }
+ }
+
+ // We are not bifurcating and we do have a Decl, so just inline.
+ if (inlineCall(*Call, D, Bldr, Pred, State))
+ return;
+ }
+ }
+
+ // If we can't inline it, handle the return value and invalidate the regions.
+ conservativeEvalCall(*Call, Bldr, Pred, State);
+}
+
+void ExprEngine::BifurcateCall(const MemRegion *BifurReg,
+ const CallEvent &Call, const Decl *D,
+ NodeBuilder &Bldr, ExplodedNode *Pred) {
+ assert(BifurReg);
+ BifurReg = BifurReg->StripCasts();
+
+ // Check if we've performed the split already - note, we only want
+ // to split the path once per memory region.
+ ProgramStateRef State = Pred->getState();
+ const unsigned *BState =
+ State->get<DynamicDispatchBifurcationMap>(BifurReg);
+ if (BState) {
+ // If we are on "inline path", keep inlining if possible.
+ if (*BState == DynamicDispatchModeInlined)
+ if (inlineCall(Call, D, Bldr, Pred, State))
+ return;
+ // If inline failed, or we are on the path where we assume we
+ // don't have enough info about the receiver to inline, conjure the
+ // return value and invalidate the regions.
+ conservativeEvalCall(Call, Bldr, Pred, State);
+ return;
+ }
+
+ // If we got here, this is the first time we process a message to this
+ // region, so split the path.
+ ProgramStateRef IState =
+ State->set<DynamicDispatchBifurcationMap>(BifurReg,
+ DynamicDispatchModeInlined);
+ inlineCall(Call, D, Bldr, Pred, IState);
+
+ ProgramStateRef NoIState =
+ State->set<DynamicDispatchBifurcationMap>(BifurReg,
+ DynamicDispatchModeConservative);
+ conservativeEvalCall(Call, Bldr, Pred, NoIState);
+
+ NumOfDynamicDispatchPathSplits++;
+ return;
+}
+
+
+void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+
+ ExplodedNodeSet dstPreVisit;
+ getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, RS, *this);
+
+ StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx);
+
+ if (RS->getRetValue()) {
+ for (ExplodedNodeSet::iterator it = dstPreVisit.begin(),
+ ei = dstPreVisit.end(); it != ei; ++it) {
+ B.generateNode(RS, *it, (*it)->getState());
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
new file mode 100644
index 0000000..92c5fe6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
@@ -0,0 +1,262 @@
+//=-- ExprEngineObjC.cpp - ExprEngine support for Objective-C ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ExprEngine's support for Objective-C expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/StmtObjC.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+
+using namespace clang;
+using namespace ento;
+
+void ExprEngine::VisitLvalObjCIvarRefExpr(const ObjCIvarRefExpr *Ex,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ SVal baseVal = state->getSVal(Ex->getBase(), LCtx);
+ SVal location = state->getLValue(Ex->getDecl(), baseVal);
+
+ ExplodedNodeSet dstIvar;
+ StmtNodeBuilder Bldr(Pred, dstIvar, *currBldrCtx);
+ Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, location));
+
+ // Perform the post-condition check of the ObjCIvarRefExpr and store
+ // the created nodes in 'Dst'.
+ getCheckerManager().runCheckersForPostStmt(Dst, dstIvar, Ex, *this);
+}
+
+void ExprEngine::VisitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt *S,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ getCheckerManager().runCheckersForPreStmt(Dst, Pred, S, *this);
+}
+
+void ExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+
+ // ObjCForCollectionStmts are processed in two places. This method
+ // handles the case where an ObjCForCollectionStmt* occurs as one of the
+ // statements within a basic block. This transfer function does two things:
+ //
+ // (1) binds the next container value to 'element'. This creates a new
+ // node in the ExplodedGraph.
+ //
+ // (2) binds the value 0/1 to the ObjCForCollectionStmt* itself, indicating
+ // whether or not the container has any more elements. This value
+ // will be tested in ProcessBranch. We need to explicitly bind
+ // this value because a container can contain nil elements.
+ //
+ // FIXME: Eventually this logic should actually do dispatches to
+ // 'countByEnumeratingWithState:objects:count:' (NSFastEnumeration).
+ // This will require simulating a temporary NSFastEnumerationState, either
+ // through an SVal or through the use of MemRegions. This value can
+ // be affixed to the ObjCForCollectionStmt* instead of 0/1; when the loop
+ // terminates we reclaim the temporary (it goes out of scope) and we
+ // we can test if the SVal is 0 or if the MemRegion is null (depending
+ // on what approach we take).
+ //
+ // For now: simulate (1) by assigning either a symbol or nil if the
+ // container is empty. Thus this transfer function will by default
+ // result in state splitting.
+
+ const Stmt *elem = S->getElement();
+ ProgramStateRef state = Pred->getState();
+ SVal elementV;
+
+ if (const DeclStmt *DS = dyn_cast<DeclStmt>(elem)) {
+ const VarDecl *elemD = cast<VarDecl>(DS->getSingleDecl());
+ assert(elemD->getInit() == nullptr);
+ elementV = state->getLValue(elemD, Pred->getLocationContext());
+ }
+ else {
+ elementV = state->getSVal(elem, Pred->getLocationContext());
+ }
+
+ ExplodedNodeSet dstLocation;
+ evalLocation(dstLocation, S, elem, Pred, state, elementV, nullptr, false);
+
+ ExplodedNodeSet Tmp;
+ StmtNodeBuilder Bldr(Pred, Tmp, *currBldrCtx);
+
+ for (ExplodedNodeSet::iterator NI = dstLocation.begin(),
+ NE = dstLocation.end(); NI!=NE; ++NI) {
+ Pred = *NI;
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+
+ // Handle the case where the container still has elements.
+ SVal TrueV = svalBuilder.makeTruthVal(1);
+ ProgramStateRef hasElems = state->BindExpr(S, LCtx, TrueV);
+
+ // Handle the case where the container has no elements.
+ SVal FalseV = svalBuilder.makeTruthVal(0);
+ ProgramStateRef noElems = state->BindExpr(S, LCtx, FalseV);
+
+ if (Optional<loc::MemRegionVal> MV = elementV.getAs<loc::MemRegionVal>())
+ if (const TypedValueRegion *R =
+ dyn_cast<TypedValueRegion>(MV->getRegion())) {
+ // FIXME: The proper thing to do is to really iterate over the
+ // container. We will do this with dispatch logic to the store.
+ // For now, just 'conjure' up a symbolic value.
+ QualType T = R->getValueType();
+ assert(Loc::isLocType(T));
+ SymbolRef Sym = SymMgr.conjureSymbol(elem, LCtx, T,
+ currBldrCtx->blockCount());
+ SVal V = svalBuilder.makeLoc(Sym);
+ hasElems = hasElems->bindLoc(elementV, V);
+
+ // Bind the location to 'nil' on the false branch.
+ SVal nilV = svalBuilder.makeIntVal(0, T);
+ noElems = noElems->bindLoc(elementV, nilV);
+ }
+
+ // Create the new nodes.
+ Bldr.generateNode(S, Pred, hasElems);
+ Bldr.generateNode(S, Pred, noElems);
+ }
+
+ // Finally, run any custom checkers.
+ // FIXME: Eventually all pre- and post-checks should live in VisitStmt.
+ getCheckerManager().runCheckersForPostStmt(Dst, Tmp, S, *this);
+}
+
+void ExprEngine::VisitObjCMessage(const ObjCMessageExpr *ME,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ CallEventManager &CEMgr = getStateManager().getCallEventManager();
+ CallEventRef<ObjCMethodCall> Msg =
+ CEMgr.getObjCMethodCall(ME, Pred->getState(), Pred->getLocationContext());
+
+ // There are three cases for the receiver:
+ // (1) it is definitely nil,
+ // (2) it is definitely non-nil, and
+ // (3) we don't know.
+ //
+ // If the receiver is definitely nil, we skip the pre/post callbacks and
+ // instead call the ObjCMessageNil callbacks and return.
+ //
+ // If the receiver is definitely non-nil, we call the pre- callbacks,
+ // evaluate the call, and call the post- callbacks.
+ //
+ // If we don't know, we drop the potential nil flow and instead
+ // continue from the assumed non-nil state as in (2). This approach
+ // intentionally drops coverage in order to prevent false alarms
+ // in the following scenario:
+ //
+ // id result = [o someMethod]
+ // if (result) {
+ // if (!o) {
+ // // <-- This program point should be unreachable because if o is nil
+ // // it must the case that result is nil as well.
+ // }
+ // }
+ //
+ // We could avoid dropping coverage by performing an explicit case split
+ // on each method call -- but this would get very expensive. An alternative
+ // would be to introduce lazy constraints.
+ // FIXME: This ignores many potential bugs (<rdar://problem/11733396>).
+ // Revisit once we have lazier constraints.
+ if (Msg->isInstanceMessage()) {
+ SVal recVal = Msg->getReceiverSVal();
+ if (!recVal.isUndef()) {
+ // Bifurcate the state into nil and non-nil ones.
+ DefinedOrUnknownSVal receiverVal =
+ recVal.castAs<DefinedOrUnknownSVal>();
+ ProgramStateRef State = Pred->getState();
+
+ ProgramStateRef notNilState, nilState;
+ std::tie(notNilState, nilState) = State->assume(receiverVal);
+
+ // Receiver is definitely nil, so run ObjCMessageNil callbacks and return.
+ if (nilState && !notNilState) {
+ StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+ bool HasTag = Pred->getLocation().getTag();
+ Pred = Bldr.generateNode(ME, Pred, nilState, nullptr,
+ ProgramPoint::PreStmtKind);
+ assert((Pred || HasTag) && "Should have cached out already!");
+ (void)HasTag;
+ if (!Pred)
+ return;
+ getCheckerManager().runCheckersForObjCMessageNil(Dst, Pred,
+ *Msg, *this);
+ return;
+ }
+
+ ExplodedNodeSet dstNonNil;
+ StmtNodeBuilder Bldr(Pred, dstNonNil, *currBldrCtx);
+ // Generate a transition to the non-nil state, dropping any potential
+ // nil flow.
+ if (notNilState != State) {
+ bool HasTag = Pred->getLocation().getTag();
+ Pred = Bldr.generateNode(ME, Pred, notNilState);
+ assert((Pred || HasTag) && "Should have cached out already!");
+ (void)HasTag;
+ if (!Pred)
+ return;
+ }
+ }
+ }
+
+ // Handle the previsits checks.
+ ExplodedNodeSet dstPrevisit;
+ getCheckerManager().runCheckersForPreObjCMessage(dstPrevisit, Pred,
+ *Msg, *this);
+ ExplodedNodeSet dstGenericPrevisit;
+ getCheckerManager().runCheckersForPreCall(dstGenericPrevisit, dstPrevisit,
+ *Msg, *this);
+
+ // Proceed with evaluate the message expression.
+ ExplodedNodeSet dstEval;
+ StmtNodeBuilder Bldr(dstGenericPrevisit, dstEval, *currBldrCtx);
+
+ for (ExplodedNodeSet::iterator DI = dstGenericPrevisit.begin(),
+ DE = dstGenericPrevisit.end(); DI != DE; ++DI) {
+ ExplodedNode *Pred = *DI;
+ ProgramStateRef State = Pred->getState();
+ CallEventRef<ObjCMethodCall> UpdatedMsg = Msg.cloneWithState(State);
+
+ if (UpdatedMsg->isInstanceMessage()) {
+ SVal recVal = UpdatedMsg->getReceiverSVal();
+ if (!recVal.isUndef()) {
+ if (ObjCNoRet.isImplicitNoReturn(ME)) {
+ // If we raise an exception, for now treat it as a sink.
+ // Eventually we will want to handle exceptions properly.
+ Bldr.generateSink(ME, Pred, State);
+ continue;
+ }
+ }
+ } else {
+ // Check for special class methods that are known to not return
+ // and that we should treat as a sink.
+ if (ObjCNoRet.isImplicitNoReturn(ME)) {
+ // If we raise an exception, for now treat it as a sink.
+ // Eventually we will want to handle exceptions properly.
+ Bldr.generateSink(ME, Pred, Pred->getState());
+ continue;
+ }
+ }
+
+ defaultEvalCall(Bldr, Pred, *UpdatedMsg);
+ }
+
+ ExplodedNodeSet dstPostvisit;
+ getCheckerManager().runCheckersForPostCall(dstPostvisit, dstEval,
+ *Msg, *this);
+
+ // Finally, perform the post-condition check of the ObjCMessageExpr and store
+ // the created nodes in 'Dst'.
+ getCheckerManager().runCheckersForPostObjCMessage(Dst, dstPostvisit,
+ *Msg, *this);
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/FunctionSummary.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/FunctionSummary.cpp
new file mode 100644
index 0000000..c21735b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/FunctionSummary.cpp
@@ -0,0 +1,32 @@
+//== FunctionSummary.cpp - Stores summaries of functions. ----------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a summary of a function gathered/used by static analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h"
+using namespace clang;
+using namespace ento;
+
+unsigned FunctionSummariesTy::getTotalNumBasicBlocks() {
+ unsigned Total = 0;
+ for (MapTy::iterator I = Map.begin(), E = Map.end(); I != E; ++I) {
+ Total += I->second.TotalBasicBlocks;
+ }
+ return Total;
+}
+
+unsigned FunctionSummariesTy::getTotalNumVisitedBasicBlocks() {
+ unsigned Total = 0;
+ for (MapTy::iterator I = Map.begin(), E = Map.end(); I != E; ++I) {
+ Total += I->second.VisitedBasicBlocks.count();
+ }
+ return Total;
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
new file mode 100644
index 0000000..b3edb85
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
@@ -0,0 +1,634 @@
+//===--- HTMLDiagnostics.cpp - HTML Diagnostics for Paths ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the HTMLDiagnostics object.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Rewrite/Core/HTMLRewrite.h"
+#include "clang/Rewrite/Core/Rewriter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/IssueHash.h"
+#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
+#include "llvm/Support/Errc.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+#include <sstream>
+
+using namespace clang;
+using namespace ento;
+
+//===----------------------------------------------------------------------===//
+// Boilerplate.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class HTMLDiagnostics : public PathDiagnosticConsumer {
+ std::string Directory;
+ bool createdDir, noDir;
+ const Preprocessor &PP;
+ AnalyzerOptions &AnalyzerOpts;
+public:
+ HTMLDiagnostics(AnalyzerOptions &AnalyzerOpts, const std::string& prefix, const Preprocessor &pp);
+
+ ~HTMLDiagnostics() override { FlushDiagnostics(nullptr); }
+
+ void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
+ FilesMade *filesMade) override;
+
+ StringRef getName() const override {
+ return "HTMLDiagnostics";
+ }
+
+ unsigned ProcessMacroPiece(raw_ostream &os,
+ const PathDiagnosticMacroPiece& P,
+ unsigned num);
+
+ void HandlePiece(Rewriter& R, FileID BugFileID,
+ const PathDiagnosticPiece& P, unsigned num, unsigned max);
+
+ void HighlightRange(Rewriter& R, FileID BugFileID, SourceRange Range,
+ const char *HighlightStart = "<span class=\"mrange\">",
+ const char *HighlightEnd = "</span>");
+
+ void ReportDiag(const PathDiagnostic& D,
+ FilesMade *filesMade);
+};
+
+} // end anonymous namespace
+
+HTMLDiagnostics::HTMLDiagnostics(AnalyzerOptions &AnalyzerOpts,
+ const std::string& prefix,
+ const Preprocessor &pp)
+ : Directory(prefix), createdDir(false), noDir(false), PP(pp), AnalyzerOpts(AnalyzerOpts) {
+}
+
+void ento::createHTMLDiagnosticConsumer(AnalyzerOptions &AnalyzerOpts,
+ PathDiagnosticConsumers &C,
+ const std::string& prefix,
+ const Preprocessor &PP) {
+ C.push_back(new HTMLDiagnostics(AnalyzerOpts, prefix, PP));
+}
+
+//===----------------------------------------------------------------------===//
+// Report processing.
+//===----------------------------------------------------------------------===//
+
+void HTMLDiagnostics::FlushDiagnosticsImpl(
+ std::vector<const PathDiagnostic *> &Diags,
+ FilesMade *filesMade) {
+ for (std::vector<const PathDiagnostic *>::iterator it = Diags.begin(),
+ et = Diags.end(); it != et; ++it) {
+ ReportDiag(**it, filesMade);
+ }
+}
+
+void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
+ FilesMade *filesMade) {
+
+ // Create the HTML directory if it is missing.
+ if (!createdDir) {
+ createdDir = true;
+ if (std::error_code ec = llvm::sys::fs::create_directories(Directory)) {
+ llvm::errs() << "warning: could not create directory '"
+ << Directory << "': " << ec.message() << '\n';
+
+ noDir = true;
+
+ return;
+ }
+ }
+
+ if (noDir)
+ return;
+
+ // First flatten out the entire path to make it easier to use.
+ PathPieces path = D.path.flatten(/*ShouldFlattenMacros=*/false);
+
+ // The path as already been prechecked that all parts of the path are
+ // from the same file and that it is non-empty.
+ const SourceManager &SMgr = path.front()->getLocation().getManager();
+ assert(!path.empty());
+ FileID FID =
+ path.front()->getLocation().asLocation().getExpansionLoc().getFileID();
+ assert(FID.isValid());
+
+ // Create a new rewriter to generate HTML.
+ Rewriter R(const_cast<SourceManager&>(SMgr), PP.getLangOpts());
+
+ // Get the function/method name
+ SmallString<128> declName("unknown");
+ int offsetDecl = 0;
+ if (const Decl *DeclWithIssue = D.getDeclWithIssue()) {
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(DeclWithIssue)) {
+ declName = ND->getDeclName().getAsString();
+ }
+
+ if (const Stmt *Body = DeclWithIssue->getBody()) {
+ // Retrieve the relative position of the declaration which will be used
+ // for the file name
+ FullSourceLoc L(
+ SMgr.getExpansionLoc(path.back()->getLocation().asLocation()),
+ SMgr);
+ FullSourceLoc FunL(SMgr.getExpansionLoc(Body->getLocStart()), SMgr);
+ offsetDecl = L.getExpansionLineNumber() - FunL.getExpansionLineNumber();
+ }
+ }
+
+ // Process the path.
+ unsigned n = path.size();
+ unsigned max = n;
+
+ for (PathPieces::const_reverse_iterator I = path.rbegin(),
+ E = path.rend();
+ I != E; ++I, --n)
+ HandlePiece(R, FID, **I, n, max);
+
+ // Add line numbers, header, footer, etc.
+
+ // unsigned FID = R.getSourceMgr().getMainFileID();
+ html::EscapeText(R, FID);
+ html::AddLineNumbers(R, FID);
+
+ // If we have a preprocessor, relex the file and syntax highlight.
+ // We might not have a preprocessor if we come from a deserialized AST file,
+ // for example.
+
+ html::SyntaxHighlight(R, FID, PP);
+ html::HighlightMacros(R, FID, PP);
+
+ // Get the full directory name of the analyzed file.
+
+ const FileEntry* Entry = SMgr.getFileEntryForID(FID);
+
+ // This is a cludge; basically we want to append either the full
+ // working directory if we have no directory information. This is
+ // a work in progress.
+
+ llvm::SmallString<0> DirName;
+
+ if (llvm::sys::path::is_relative(Entry->getName())) {
+ llvm::sys::fs::current_path(DirName);
+ DirName += '/';
+ }
+
+ int LineNumber = path.back()->getLocation().asLocation().getExpansionLineNumber();
+ int ColumnNumber = path.back()->getLocation().asLocation().getExpansionColumnNumber();
+
+ // Add the name of the file as an <h1> tag.
+
+ {
+ std::string s;
+ llvm::raw_string_ostream os(s);
+
+ os << "<!-- REPORTHEADER -->\n"
+ << "<h3>Bug Summary</h3>\n<table class=\"simpletable\">\n"
+ "<tr><td class=\"rowname\">File:</td><td>"
+ << html::EscapeText(DirName)
+ << html::EscapeText(Entry->getName())
+ << "</td></tr>\n<tr><td class=\"rowname\">Location:</td><td>"
+ "<a href=\"#EndPath\">line "
+ << LineNumber
+ << ", column "
+ << ColumnNumber
+ << "</a></td></tr>\n"
+ "<tr><td class=\"rowname\">Description:</td><td>"
+ << D.getVerboseDescription() << "</td></tr>\n";
+
+ // Output any other meta data.
+
+ for (PathDiagnostic::meta_iterator I=D.meta_begin(), E=D.meta_end();
+ I!=E; ++I) {
+ os << "<tr><td></td><td>" << html::EscapeText(*I) << "</td></tr>\n";
+ }
+
+ os << "</table>\n<!-- REPORTSUMMARYEXTRA -->\n"
+ "<h3>Annotated Source Code</h3>\n";
+
+ R.InsertTextBefore(SMgr.getLocForStartOfFile(FID), os.str());
+ }
+
+ // Embed meta-data tags.
+ {
+ std::string s;
+ llvm::raw_string_ostream os(s);
+
+ StringRef BugDesc = D.getVerboseDescription();
+ if (!BugDesc.empty())
+ os << "\n<!-- BUGDESC " << BugDesc << " -->\n";
+
+ StringRef BugType = D.getBugType();
+ if (!BugType.empty())
+ os << "\n<!-- BUGTYPE " << BugType << " -->\n";
+
+ PathDiagnosticLocation UPDLoc = D.getUniqueingLoc();
+ FullSourceLoc L(SMgr.getExpansionLoc(UPDLoc.isValid()
+ ? UPDLoc.asLocation()
+ : D.getLocation().asLocation()),
+ SMgr);
+ const Decl *DeclWithIssue = D.getDeclWithIssue();
+
+ StringRef BugCategory = D.getCategory();
+ if (!BugCategory.empty())
+ os << "\n<!-- BUGCATEGORY " << BugCategory << " -->\n";
+
+ os << "\n<!-- BUGFILE " << DirName << Entry->getName() << " -->\n";
+
+ os << "\n<!-- FILENAME " << llvm::sys::path::filename(Entry->getName()) << " -->\n";
+
+ os << "\n<!-- FUNCTIONNAME " << declName << " -->\n";
+
+ os << "\n<!-- ISSUEHASHCONTENTOFLINEINCONTEXT "
+ << GetIssueHash(SMgr, L, D.getCheckName(), D.getBugType(), DeclWithIssue,
+ PP.getLangOpts()) << " -->\n";
+
+ os << "\n<!-- BUGLINE "
+ << LineNumber
+ << " -->\n";
+
+ os << "\n<!-- BUGCOLUMN "
+ << ColumnNumber
+ << " -->\n";
+
+ os << "\n<!-- BUGPATHLENGTH " << path.size() << " -->\n";
+
+ // Mark the end of the tags.
+ os << "\n<!-- BUGMETAEND -->\n";
+
+ // Insert the text.
+ R.InsertTextBefore(SMgr.getLocForStartOfFile(FID), os.str());
+ }
+
+ // Add CSS, header, and footer.
+
+ html::AddHeaderFooterInternalBuiltinCSS(R, FID, Entry->getName());
+
+ // Get the rewrite buffer.
+ const RewriteBuffer *Buf = R.getRewriteBufferFor(FID);
+
+ if (!Buf) {
+ llvm::errs() << "warning: no diagnostics generated for main file.\n";
+ return;
+ }
+
+ // Create a path for the target HTML file.
+ int FD;
+ SmallString<128> Model, ResultPath;
+
+ if (!AnalyzerOpts.shouldWriteStableReportFilename()) {
+ llvm::sys::path::append(Model, Directory, "report-%%%%%%.html");
+ if (std::error_code EC =
+ llvm::sys::fs::make_absolute(Model)) {
+ llvm::errs() << "warning: could not make '" << Model
+ << "' absolute: " << EC.message() << '\n';
+ return;
+ }
+ if (std::error_code EC =
+ llvm::sys::fs::createUniqueFile(Model, FD, ResultPath)) {
+ llvm::errs() << "warning: could not create file in '" << Directory
+ << "': " << EC.message() << '\n';
+ return;
+ }
+
+ } else {
+ int i = 1;
+ std::error_code EC;
+ do {
+ // Find a filename which is not already used
+ std::stringstream filename;
+ Model = "";
+ filename << "report-"
+ << llvm::sys::path::filename(Entry->getName()).str()
+ << "-" << declName.c_str()
+ << "-" << offsetDecl
+ << "-" << i << ".html";
+ llvm::sys::path::append(Model, Directory,
+ filename.str());
+ EC = llvm::sys::fs::openFileForWrite(Model,
+ FD,
+ llvm::sys::fs::F_RW |
+ llvm::sys::fs::F_Excl);
+ if (EC && EC != llvm::errc::file_exists) {
+ llvm::errs() << "warning: could not create file '" << Model
+ << "': " << EC.message() << '\n';
+ return;
+ }
+ i++;
+ } while (EC);
+ }
+
+ llvm::raw_fd_ostream os(FD, true);
+
+ if (filesMade)
+ filesMade->addDiagnostic(D, getName(),
+ llvm::sys::path::filename(ResultPath));
+
+ // Emit the HTML to disk.
+ for (RewriteBuffer::iterator I = Buf->begin(), E = Buf->end(); I!=E; ++I)
+ os << *I;
+}
+
+void HTMLDiagnostics::HandlePiece(Rewriter& R, FileID BugFileID,
+ const PathDiagnosticPiece& P,
+ unsigned num, unsigned max) {
+
+ // For now, just draw a box above the line in question, and emit the
+ // warning.
+ FullSourceLoc Pos = P.getLocation().asLocation();
+
+ if (!Pos.isValid())
+ return;
+
+ SourceManager &SM = R.getSourceMgr();
+ assert(&Pos.getManager() == &SM && "SourceManagers are different!");
+ std::pair<FileID, unsigned> LPosInfo = SM.getDecomposedExpansionLoc(Pos);
+
+ if (LPosInfo.first != BugFileID)
+ return;
+
+ const llvm::MemoryBuffer *Buf = SM.getBuffer(LPosInfo.first);
+ const char* FileStart = Buf->getBufferStart();
+
+ // Compute the column number. Rewind from the current position to the start
+ // of the line.
+ unsigned ColNo = SM.getColumnNumber(LPosInfo.first, LPosInfo.second);
+ const char *TokInstantiationPtr =Pos.getExpansionLoc().getCharacterData();
+ const char *LineStart = TokInstantiationPtr-ColNo;
+
+ // Compute LineEnd.
+ const char *LineEnd = TokInstantiationPtr;
+ const char* FileEnd = Buf->getBufferEnd();
+ while (*LineEnd != '\n' && LineEnd != FileEnd)
+ ++LineEnd;
+
+ // Compute the margin offset by counting tabs and non-tabs.
+ unsigned PosNo = 0;
+ for (const char* c = LineStart; c != TokInstantiationPtr; ++c)
+ PosNo += *c == '\t' ? 8 : 1;
+
+ // Create the html for the message.
+
+ const char *Kind = nullptr;
+ switch (P.getKind()) {
+ case PathDiagnosticPiece::Call:
+ llvm_unreachable("Calls should already be handled");
+ case PathDiagnosticPiece::Event: Kind = "Event"; break;
+ case PathDiagnosticPiece::ControlFlow: Kind = "Control"; break;
+ // Setting Kind to "Control" is intentional.
+ case PathDiagnosticPiece::Macro: Kind = "Control"; break;
+ }
+
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ os << "\n<tr><td class=\"num\"></td><td class=\"line\"><div id=\"";
+
+ if (num == max)
+ os << "EndPath";
+ else
+ os << "Path" << num;
+
+ os << "\" class=\"msg";
+ if (Kind)
+ os << " msg" << Kind;
+ os << "\" style=\"margin-left:" << PosNo << "ex";
+
+ // Output a maximum size.
+ if (!isa<PathDiagnosticMacroPiece>(P)) {
+ // Get the string and determining its maximum substring.
+ const std::string& Msg = P.getString();
+ unsigned max_token = 0;
+ unsigned cnt = 0;
+ unsigned len = Msg.size();
+
+ for (std::string::const_iterator I=Msg.begin(), E=Msg.end(); I!=E; ++I)
+ switch (*I) {
+ default:
+ ++cnt;
+ continue;
+ case ' ':
+ case '\t':
+ case '\n':
+ if (cnt > max_token) max_token = cnt;
+ cnt = 0;
+ }
+
+ if (cnt > max_token)
+ max_token = cnt;
+
+ // Determine the approximate size of the message bubble in em.
+ unsigned em;
+ const unsigned max_line = 120;
+
+ if (max_token >= max_line)
+ em = max_token / 2;
+ else {
+ unsigned characters = max_line;
+ unsigned lines = len / max_line;
+
+ if (lines > 0) {
+ for (; characters > max_token; --characters)
+ if (len / characters > lines) {
+ ++characters;
+ break;
+ }
+ }
+
+ em = characters / 2;
+ }
+
+ if (em < max_line/2)
+ os << "; max-width:" << em << "em";
+ }
+ else
+ os << "; max-width:100em";
+
+ os << "\">";
+
+ if (max > 1) {
+ os << "<table class=\"msgT\"><tr><td valign=\"top\">";
+ os << "<div class=\"PathIndex";
+ if (Kind) os << " PathIndex" << Kind;
+ os << "\">" << num << "</div>";
+
+ if (num > 1) {
+ os << "</td><td><div class=\"PathNav\"><a href=\"#Path"
+ << (num - 1)
+ << "\" title=\"Previous event ("
+ << (num - 1)
+ << ")\">&#x2190;</a></div></td>";
+ }
+
+ os << "</td><td>";
+ }
+
+ if (const PathDiagnosticMacroPiece *MP =
+ dyn_cast<PathDiagnosticMacroPiece>(&P)) {
+
+ os << "Within the expansion of the macro '";
+
+ // Get the name of the macro by relexing it.
+ {
+ FullSourceLoc L = MP->getLocation().asLocation().getExpansionLoc();
+ assert(L.isFileID());
+ StringRef BufferInfo = L.getBufferData();
+ std::pair<FileID, unsigned> LocInfo = L.getDecomposedLoc();
+ const char* MacroName = LocInfo.second + BufferInfo.data();
+ Lexer rawLexer(SM.getLocForStartOfFile(LocInfo.first), PP.getLangOpts(),
+ BufferInfo.begin(), MacroName, BufferInfo.end());
+
+ Token TheTok;
+ rawLexer.LexFromRawLexer(TheTok);
+ for (unsigned i = 0, n = TheTok.getLength(); i < n; ++i)
+ os << MacroName[i];
+ }
+
+ os << "':\n";
+
+ if (max > 1) {
+ os << "</td>";
+ if (num < max) {
+ os << "<td><div class=\"PathNav\"><a href=\"#";
+ if (num == max - 1)
+ os << "EndPath";
+ else
+ os << "Path" << (num + 1);
+ os << "\" title=\"Next event ("
+ << (num + 1)
+ << ")\">&#x2192;</a></div></td>";
+ }
+
+ os << "</tr></table>";
+ }
+
+ // Within a macro piece. Write out each event.
+ ProcessMacroPiece(os, *MP, 0);
+ }
+ else {
+ os << html::EscapeText(P.getString());
+
+ if (max > 1) {
+ os << "</td>";
+ if (num < max) {
+ os << "<td><div class=\"PathNav\"><a href=\"#";
+ if (num == max - 1)
+ os << "EndPath";
+ else
+ os << "Path" << (num + 1);
+ os << "\" title=\"Next event ("
+ << (num + 1)
+ << ")\">&#x2192;</a></div></td>";
+ }
+
+ os << "</tr></table>";
+ }
+ }
+
+ os << "</div></td></tr>";
+
+ // Insert the new html.
+ unsigned DisplayPos = LineEnd - FileStart;
+ SourceLocation Loc =
+ SM.getLocForStartOfFile(LPosInfo.first).getLocWithOffset(DisplayPos);
+
+ R.InsertTextBefore(Loc, os.str());
+
+ // Now highlight the ranges.
+ ArrayRef<SourceRange> Ranges = P.getRanges();
+ for (ArrayRef<SourceRange>::iterator I = Ranges.begin(),
+ E = Ranges.end(); I != E; ++I) {
+ HighlightRange(R, LPosInfo.first, *I);
+ }
+}
+
+static void EmitAlphaCounter(raw_ostream &os, unsigned n) {
+ unsigned x = n % ('z' - 'a');
+ n /= 'z' - 'a';
+
+ if (n > 0)
+ EmitAlphaCounter(os, n);
+
+ os << char('a' + x);
+}
+
+unsigned HTMLDiagnostics::ProcessMacroPiece(raw_ostream &os,
+ const PathDiagnosticMacroPiece& P,
+ unsigned num) {
+
+ for (PathPieces::const_iterator I = P.subPieces.begin(), E=P.subPieces.end();
+ I!=E; ++I) {
+
+ if (const PathDiagnosticMacroPiece *MP =
+ dyn_cast<PathDiagnosticMacroPiece>(*I)) {
+ num = ProcessMacroPiece(os, *MP, num);
+ continue;
+ }
+
+ if (PathDiagnosticEventPiece *EP = dyn_cast<PathDiagnosticEventPiece>(*I)) {
+ os << "<div class=\"msg msgEvent\" style=\"width:94%; "
+ "margin-left:5px\">"
+ "<table class=\"msgT\"><tr>"
+ "<td valign=\"top\"><div class=\"PathIndex PathIndexEvent\">";
+ EmitAlphaCounter(os, num++);
+ os << "</div></td><td valign=\"top\">"
+ << html::EscapeText(EP->getString())
+ << "</td></tr></table></div>\n";
+ }
+ }
+
+ return num;
+}
+
+void HTMLDiagnostics::HighlightRange(Rewriter& R, FileID BugFileID,
+ SourceRange Range,
+ const char *HighlightStart,
+ const char *HighlightEnd) {
+ SourceManager &SM = R.getSourceMgr();
+ const LangOptions &LangOpts = R.getLangOpts();
+
+ SourceLocation InstantiationStart = SM.getExpansionLoc(Range.getBegin());
+ unsigned StartLineNo = SM.getExpansionLineNumber(InstantiationStart);
+
+ SourceLocation InstantiationEnd = SM.getExpansionLoc(Range.getEnd());
+ unsigned EndLineNo = SM.getExpansionLineNumber(InstantiationEnd);
+
+ if (EndLineNo < StartLineNo)
+ return;
+
+ if (SM.getFileID(InstantiationStart) != BugFileID ||
+ SM.getFileID(InstantiationEnd) != BugFileID)
+ return;
+
+ // Compute the column number of the end.
+ unsigned EndColNo = SM.getExpansionColumnNumber(InstantiationEnd);
+ unsigned OldEndColNo = EndColNo;
+
+ if (EndColNo) {
+ // Add in the length of the token, so that we cover multi-char tokens.
+ EndColNo += Lexer::MeasureTokenLength(Range.getEnd(), SM, LangOpts)-1;
+ }
+
+ // Highlight the range. Make the span tag the outermost tag for the
+ // selected range.
+
+ SourceLocation E =
+ InstantiationEnd.getLocWithOffset(EndColNo - OldEndColNo);
+
+ html::HighlightRange(R, InstantiationStart, E, HighlightStart, HighlightEnd);
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/IssueHash.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/IssueHash.cpp
new file mode 100644
index 0000000..0a3af3d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/IssueHash.cpp
@@ -0,0 +1,196 @@
+//===---------- IssueHash.cpp - Generate identification hashes --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/StaticAnalyzer/Core/IssueHash.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/Specifiers.h"
+#include "clang/Lex/Lexer.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/LineIterator.h"
+#include "llvm/Support/MD5.h"
+#include "llvm/Support/Path.h"
+
+#include <functional>
+#include <sstream>
+#include <string>
+
+using namespace clang;
+
+// Get a string representation of the parts of the signature that can be
+// overloaded on.
+static std::string GetSignature(const FunctionDecl *Target) {
+ if (!Target)
+ return "";
+ std::string Signature;
+
+ if (!isa<CXXConstructorDecl>(Target) && !isa<CXXDestructorDecl>(Target) &&
+ !isa<CXXConversionDecl>(Target))
+ Signature.append(Target->getReturnType().getAsString()).append(" ");
+ Signature.append(Target->getQualifiedNameAsString()).append("(");
+
+ for (int i = 0, paramsCount = Target->getNumParams(); i < paramsCount; ++i) {
+ if (i)
+ Signature.append(", ");
+ Signature.append(Target->getParamDecl(i)->getType().getAsString());
+ }
+
+ if (Target->isVariadic())
+ Signature.append(", ...");
+ Signature.append(")");
+
+ const auto *TargetT =
+ llvm::dyn_cast_or_null<FunctionType>(Target->getType().getTypePtr());
+
+ if (!TargetT || !isa<CXXMethodDecl>(Target))
+ return Signature;
+
+ if (TargetT->isConst())
+ Signature.append(" const");
+ if (TargetT->isVolatile())
+ Signature.append(" volatile");
+ if (TargetT->isRestrict())
+ Signature.append(" restrict");
+
+ if (const auto *TargetPT =
+ dyn_cast_or_null<FunctionProtoType>(Target->getType().getTypePtr())) {
+ switch (TargetPT->getRefQualifier()) {
+ case RQ_LValue:
+ Signature.append(" &");
+ break;
+ case RQ_RValue:
+ Signature.append(" &&");
+ break;
+ default:
+ break;
+ }
+ }
+
+ return Signature;
+}
+
+static std::string GetEnclosingDeclContextSignature(const Decl *D) {
+ if (!D)
+ return "";
+
+ if (const auto *ND = dyn_cast<NamedDecl>(D)) {
+ std::string DeclName;
+
+ switch (ND->getKind()) {
+ case Decl::Namespace:
+ case Decl::Record:
+ case Decl::CXXRecord:
+ case Decl::Enum:
+ DeclName = ND->getQualifiedNameAsString();
+ break;
+ case Decl::CXXConstructor:
+ case Decl::CXXDestructor:
+ case Decl::CXXConversion:
+ case Decl::CXXMethod:
+ case Decl::Function:
+ DeclName = GetSignature(dyn_cast_or_null<FunctionDecl>(ND));
+ break;
+ case Decl::ObjCMethod:
+ // ObjC Methods can not be overloaded, qualified name uniquely identifies
+ // the method.
+ DeclName = ND->getQualifiedNameAsString();
+ break;
+ default:
+ break;
+ }
+
+ return DeclName;
+ }
+
+ return "";
+}
+
+static StringRef GetNthLineOfFile(llvm::MemoryBuffer *Buffer, int Line) {
+ if (!Buffer)
+ return "";
+
+ llvm::line_iterator LI(*Buffer, false);
+ for (; !LI.is_at_eof() && LI.line_number() != Line; ++LI)
+ ;
+
+ return *LI;
+}
+
+static std::string NormalizeLine(const SourceManager &SM, FullSourceLoc &L,
+ const LangOptions &LangOpts) {
+ static StringRef Whitespaces = " \t\n";
+
+ StringRef Str = GetNthLineOfFile(SM.getBuffer(L.getFileID(), L),
+ L.getExpansionLineNumber());
+ unsigned col = Str.find_first_not_of(Whitespaces);
+ col++;
+ SourceLocation StartOfLine =
+ SM.translateLineCol(SM.getFileID(L), L.getExpansionLineNumber(), col);
+ llvm::MemoryBuffer *Buffer =
+ SM.getBuffer(SM.getFileID(StartOfLine), StartOfLine);
+ if (!Buffer)
+ return {};
+
+ const char *BufferPos = SM.getCharacterData(StartOfLine);
+
+ Token Token;
+ Lexer Lexer(SM.getLocForStartOfFile(SM.getFileID(StartOfLine)), LangOpts,
+ Buffer->getBufferStart(), BufferPos, Buffer->getBufferEnd());
+
+ size_t NextStart = 0;
+ std::ostringstream LineBuff;
+ while (!Lexer.LexFromRawLexer(Token) && NextStart < 2) {
+ if (Token.isAtStartOfLine() && NextStart++ > 0)
+ continue;
+ LineBuff << std::string(SM.getCharacterData(Token.getLocation()),
+ Token.getLength());
+ }
+
+ return LineBuff.str();
+}
+
+static llvm::SmallString<32> GetHashOfContent(StringRef Content) {
+ llvm::MD5 Hash;
+ llvm::MD5::MD5Result MD5Res;
+ SmallString<32> Res;
+
+ Hash.update(Content);
+ Hash.final(MD5Res);
+ llvm::MD5::stringifyResult(MD5Res, Res);
+
+ return Res;
+}
+
+std::string clang::GetIssueString(const SourceManager &SM,
+ FullSourceLoc &IssueLoc,
+ StringRef CheckerName, StringRef BugType,
+ const Decl *D,
+ const LangOptions &LangOpts) {
+ static StringRef Delimiter = "$";
+
+ return (llvm::Twine(CheckerName) + Delimiter +
+ GetEnclosingDeclContextSignature(D) + Delimiter +
+ llvm::utostr(IssueLoc.getExpansionColumnNumber()) + Delimiter +
+ NormalizeLine(SM, IssueLoc, LangOpts) + Delimiter + BugType)
+ .str();
+}
+
+SmallString<32> clang::GetIssueHash(const SourceManager &SM,
+ FullSourceLoc &IssueLoc,
+ StringRef CheckerName, StringRef BugType,
+ const Decl *D,
+ const LangOptions &LangOpts) {
+
+ return GetHashOfContent(
+ GetIssueString(SM, IssueLoc, CheckerName, BugType, D, LangOpts));
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/LoopWidening.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/LoopWidening.cpp
new file mode 100644
index 0000000..05865c2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/LoopWidening.cpp
@@ -0,0 +1,68 @@
+//===--- LoopWidening.cpp - Widen loops -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// This file contains functions which are used to widen loops. A loop may be
+/// widened to approximate the exit state(s), without analyzing every
+/// iteration. The widening is done by invalidating anything which might be
+/// modified by the body of the loop.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/LoopWidening.h"
+
+using namespace clang;
+using namespace ento;
+
+/// Return the loops condition Stmt or NULL if LoopStmt is not a loop
+static const Expr *getLoopCondition(const Stmt *LoopStmt) {
+ switch (LoopStmt->getStmtClass()) {
+ default:
+ return nullptr;
+ case Stmt::ForStmtClass:
+ return cast<ForStmt>(LoopStmt)->getCond();
+ case Stmt::WhileStmtClass:
+ return cast<WhileStmt>(LoopStmt)->getCond();
+ case Stmt::DoStmtClass:
+ return cast<DoStmt>(LoopStmt)->getCond();
+ }
+}
+
+namespace clang {
+namespace ento {
+
+ProgramStateRef getWidenedLoopState(ProgramStateRef PrevState,
+ const LocationContext *LCtx,
+ unsigned BlockCount, const Stmt *LoopStmt) {
+
+ assert(isa<ForStmt>(LoopStmt) || isa<WhileStmt>(LoopStmt) ||
+ isa<DoStmt>(LoopStmt));
+
+ // Invalidate values in the current state.
+ // TODO Make this more conservative by only invalidating values that might
+ // be modified by the body of the loop.
+ // TODO Nested loops are currently widened as a result of the invalidation
+ // being so inprecise. When the invalidation is improved, the handling
+ // of nested loops will also need to be improved.
+ const StackFrameContext *STC = LCtx->getCurrentStackFrame();
+ MemRegionManager &MRMgr = PrevState->getStateManager().getRegionManager();
+ const MemRegion *Regions[] = {MRMgr.getStackLocalsRegion(STC),
+ MRMgr.getStackArgumentsRegion(STC),
+ MRMgr.getGlobalsRegion()};
+ RegionAndSymbolInvalidationTraits ITraits;
+ for (auto *Region : Regions) {
+ ITraits.setTrait(Region,
+ RegionAndSymbolInvalidationTraits::TK_EntireMemSpace);
+ }
+ return PrevState->invalidateRegions(Regions, getLoopCondition(LoopStmt),
+ BlockCount, LCtx, true, nullptr, nullptr,
+ &ITraits);
+}
+
+} // end namespace ento
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
new file mode 100644
index 0000000..ad3f396
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
@@ -0,0 +1,1508 @@
+//== MemRegion.cpp - Abstract memory regions for static analysis --*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines MemRegion and its subclasses. MemRegion defines a
+// partially-typed abstraction of memory useful for path-sensitive dataflow
+// analyses.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Analysis/Support/BumpVector.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+//===----------------------------------------------------------------------===//
+// MemRegion Construction.
+//===----------------------------------------------------------------------===//
+
+template<typename RegionTy> struct MemRegionManagerTrait;
+
+template <typename RegionTy, typename A1>
+RegionTy* MemRegionManager::getRegion(const A1 a1) {
+
+ const typename MemRegionManagerTrait<RegionTy>::SuperRegionTy *superRegion =
+ MemRegionManagerTrait<RegionTy>::getSuperRegion(*this, a1);
+
+ llvm::FoldingSetNodeID ID;
+ RegionTy::ProfileRegion(ID, a1, superRegion);
+ void *InsertPos;
+ RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
+ InsertPos));
+
+ if (!R) {
+ R = (RegionTy*) A.Allocate<RegionTy>();
+ new (R) RegionTy(a1, superRegion);
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+template <typename RegionTy, typename A1>
+RegionTy* MemRegionManager::getSubRegion(const A1 a1,
+ const MemRegion *superRegion) {
+ llvm::FoldingSetNodeID ID;
+ RegionTy::ProfileRegion(ID, a1, superRegion);
+ void *InsertPos;
+ RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
+ InsertPos));
+
+ if (!R) {
+ R = (RegionTy*) A.Allocate<RegionTy>();
+ new (R) RegionTy(a1, superRegion);
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+template <typename RegionTy, typename A1, typename A2>
+RegionTy* MemRegionManager::getRegion(const A1 a1, const A2 a2) {
+
+ const typename MemRegionManagerTrait<RegionTy>::SuperRegionTy *superRegion =
+ MemRegionManagerTrait<RegionTy>::getSuperRegion(*this, a1, a2);
+
+ llvm::FoldingSetNodeID ID;
+ RegionTy::ProfileRegion(ID, a1, a2, superRegion);
+ void *InsertPos;
+ RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
+ InsertPos));
+
+ if (!R) {
+ R = (RegionTy*) A.Allocate<RegionTy>();
+ new (R) RegionTy(a1, a2, superRegion);
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+template <typename RegionTy, typename A1, typename A2>
+RegionTy* MemRegionManager::getSubRegion(const A1 a1, const A2 a2,
+ const MemRegion *superRegion) {
+
+ llvm::FoldingSetNodeID ID;
+ RegionTy::ProfileRegion(ID, a1, a2, superRegion);
+ void *InsertPos;
+ RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
+ InsertPos));
+
+ if (!R) {
+ R = (RegionTy*) A.Allocate<RegionTy>();
+ new (R) RegionTy(a1, a2, superRegion);
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+template <typename RegionTy, typename A1, typename A2, typename A3>
+RegionTy* MemRegionManager::getSubRegion(const A1 a1, const A2 a2, const A3 a3,
+ const MemRegion *superRegion) {
+
+ llvm::FoldingSetNodeID ID;
+ RegionTy::ProfileRegion(ID, a1, a2, a3, superRegion);
+ void *InsertPos;
+ RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
+ InsertPos));
+
+ if (!R) {
+ R = (RegionTy*) A.Allocate<RegionTy>();
+ new (R) RegionTy(a1, a2, a3, superRegion);
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+//===----------------------------------------------------------------------===//
+// Object destruction.
+//===----------------------------------------------------------------------===//
+
+MemRegion::~MemRegion() {}
+
+MemRegionManager::~MemRegionManager() {
+ // All regions and their data are BumpPtrAllocated. No need to call
+ // their destructors.
+}
+
+//===----------------------------------------------------------------------===//
+// Basic methods.
+//===----------------------------------------------------------------------===//
+
+bool SubRegion::isSubRegionOf(const MemRegion* R) const {
+ const MemRegion* r = getSuperRegion();
+ while (r != nullptr) {
+ if (r == R)
+ return true;
+ if (const SubRegion* sr = dyn_cast<SubRegion>(r))
+ r = sr->getSuperRegion();
+ else
+ break;
+ }
+ return false;
+}
+
+MemRegionManager* SubRegion::getMemRegionManager() const {
+ const SubRegion* r = this;
+ do {
+ const MemRegion *superRegion = r->getSuperRegion();
+ if (const SubRegion *sr = dyn_cast<SubRegion>(superRegion)) {
+ r = sr;
+ continue;
+ }
+ return superRegion->getMemRegionManager();
+ } while (1);
+}
+
+const StackFrameContext *VarRegion::getStackFrame() const {
+ const StackSpaceRegion *SSR = dyn_cast<StackSpaceRegion>(getMemorySpace());
+ return SSR ? SSR->getStackFrame() : nullptr;
+}
+
+//===----------------------------------------------------------------------===//
+// Region extents.
+//===----------------------------------------------------------------------===//
+
+DefinedOrUnknownSVal TypedValueRegion::getExtent(SValBuilder &svalBuilder) const {
+ ASTContext &Ctx = svalBuilder.getContext();
+ QualType T = getDesugaredValueType(Ctx);
+
+ if (isa<VariableArrayType>(T))
+ return nonloc::SymbolVal(svalBuilder.getSymbolManager().getExtentSymbol(this));
+ if (T->isIncompleteType())
+ return UnknownVal();
+
+ CharUnits size = Ctx.getTypeSizeInChars(T);
+ QualType sizeTy = svalBuilder.getArrayIndexType();
+ return svalBuilder.makeIntVal(size.getQuantity(), sizeTy);
+}
+
+DefinedOrUnknownSVal FieldRegion::getExtent(SValBuilder &svalBuilder) const {
+ // Force callers to deal with bitfields explicitly.
+ if (getDecl()->isBitField())
+ return UnknownVal();
+
+ DefinedOrUnknownSVal Extent = DeclRegion::getExtent(svalBuilder);
+
+ // A zero-length array at the end of a struct often stands for dynamically-
+ // allocated extra memory.
+ if (Extent.isZeroConstant()) {
+ QualType T = getDesugaredValueType(svalBuilder.getContext());
+
+ if (isa<ConstantArrayType>(T))
+ return UnknownVal();
+ }
+
+ return Extent;
+}
+
+DefinedOrUnknownSVal AllocaRegion::getExtent(SValBuilder &svalBuilder) const {
+ return nonloc::SymbolVal(svalBuilder.getSymbolManager().getExtentSymbol(this));
+}
+
+DefinedOrUnknownSVal SymbolicRegion::getExtent(SValBuilder &svalBuilder) const {
+ return nonloc::SymbolVal(svalBuilder.getSymbolManager().getExtentSymbol(this));
+}
+
+DefinedOrUnknownSVal StringRegion::getExtent(SValBuilder &svalBuilder) const {
+ return svalBuilder.makeIntVal(getStringLiteral()->getByteLength()+1,
+ svalBuilder.getArrayIndexType());
+}
+
+ObjCIvarRegion::ObjCIvarRegion(const ObjCIvarDecl *ivd, const MemRegion* sReg)
+ : DeclRegion(ivd, sReg, ObjCIvarRegionKind) {}
+
+const ObjCIvarDecl *ObjCIvarRegion::getDecl() const {
+ return cast<ObjCIvarDecl>(D);
+}
+
+QualType ObjCIvarRegion::getValueType() const {
+ return getDecl()->getType();
+}
+
+QualType CXXBaseObjectRegion::getValueType() const {
+ return QualType(getDecl()->getTypeForDecl(), 0);
+}
+
+//===----------------------------------------------------------------------===//
+// FoldingSet profiling.
+//===----------------------------------------------------------------------===//
+
+void MemSpaceRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ ID.AddInteger((unsigned)getKind());
+}
+
+void StackSpaceRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger((unsigned)getKind());
+ ID.AddPointer(getStackFrame());
+}
+
+void StaticGlobalSpaceRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger((unsigned)getKind());
+ ID.AddPointer(getCodeRegion());
+}
+
+void StringRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const StringLiteral* Str,
+ const MemRegion* superRegion) {
+ ID.AddInteger((unsigned) StringRegionKind);
+ ID.AddPointer(Str);
+ ID.AddPointer(superRegion);
+}
+
+void ObjCStringRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const ObjCStringLiteral* Str,
+ const MemRegion* superRegion) {
+ ID.AddInteger((unsigned) ObjCStringRegionKind);
+ ID.AddPointer(Str);
+ ID.AddPointer(superRegion);
+}
+
+void AllocaRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const Expr *Ex, unsigned cnt,
+ const MemRegion *superRegion) {
+ ID.AddInteger((unsigned) AllocaRegionKind);
+ ID.AddPointer(Ex);
+ ID.AddInteger(cnt);
+ ID.AddPointer(superRegion);
+}
+
+void AllocaRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ ProfileRegion(ID, Ex, Cnt, superRegion);
+}
+
+void CompoundLiteralRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ CompoundLiteralRegion::ProfileRegion(ID, CL, superRegion);
+}
+
+void CompoundLiteralRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const CompoundLiteralExpr *CL,
+ const MemRegion* superRegion) {
+ ID.AddInteger((unsigned) CompoundLiteralRegionKind);
+ ID.AddPointer(CL);
+ ID.AddPointer(superRegion);
+}
+
+void CXXThisRegion::ProfileRegion(llvm::FoldingSetNodeID &ID,
+ const PointerType *PT,
+ const MemRegion *sRegion) {
+ ID.AddInteger((unsigned) CXXThisRegionKind);
+ ID.AddPointer(PT);
+ ID.AddPointer(sRegion);
+}
+
+void CXXThisRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+ CXXThisRegion::ProfileRegion(ID, ThisPointerTy, superRegion);
+}
+
+void ObjCIvarRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const ObjCIvarDecl *ivd,
+ const MemRegion* superRegion) {
+ DeclRegion::ProfileRegion(ID, ivd, superRegion, ObjCIvarRegionKind);
+}
+
+void DeclRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, const Decl *D,
+ const MemRegion* superRegion, Kind k) {
+ ID.AddInteger((unsigned) k);
+ ID.AddPointer(D);
+ ID.AddPointer(superRegion);
+}
+
+void DeclRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ DeclRegion::ProfileRegion(ID, D, superRegion, getKind());
+}
+
+void VarRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+ VarRegion::ProfileRegion(ID, getDecl(), superRegion);
+}
+
+void SymbolicRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, SymbolRef sym,
+ const MemRegion *sreg) {
+ ID.AddInteger((unsigned) MemRegion::SymbolicRegionKind);
+ ID.Add(sym);
+ ID.AddPointer(sreg);
+}
+
+void SymbolicRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ SymbolicRegion::ProfileRegion(ID, sym, getSuperRegion());
+}
+
+void ElementRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ QualType ElementType, SVal Idx,
+ const MemRegion* superRegion) {
+ ID.AddInteger(MemRegion::ElementRegionKind);
+ ID.Add(ElementType);
+ ID.AddPointer(superRegion);
+ Idx.Profile(ID);
+}
+
+void ElementRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ ElementRegion::ProfileRegion(ID, ElementType, Index, superRegion);
+}
+
+void FunctionTextRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const NamedDecl *FD,
+ const MemRegion*) {
+ ID.AddInteger(MemRegion::FunctionTextRegionKind);
+ ID.AddPointer(FD);
+}
+
+void FunctionTextRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ FunctionTextRegion::ProfileRegion(ID, FD, superRegion);
+}
+
+void BlockTextRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const BlockDecl *BD, CanQualType,
+ const AnalysisDeclContext *AC,
+ const MemRegion*) {
+ ID.AddInteger(MemRegion::BlockTextRegionKind);
+ ID.AddPointer(BD);
+}
+
+void BlockTextRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ BlockTextRegion::ProfileRegion(ID, BD, locTy, AC, superRegion);
+}
+
+void BlockDataRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const BlockTextRegion *BC,
+ const LocationContext *LC,
+ unsigned BlkCount,
+ const MemRegion *sReg) {
+ ID.AddInteger(MemRegion::BlockDataRegionKind);
+ ID.AddPointer(BC);
+ ID.AddPointer(LC);
+ ID.AddInteger(BlkCount);
+ ID.AddPointer(sReg);
+}
+
+void BlockDataRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ BlockDataRegion::ProfileRegion(ID, BC, LC, BlockCount, getSuperRegion());
+}
+
+void CXXTempObjectRegion::ProfileRegion(llvm::FoldingSetNodeID &ID,
+ Expr const *Ex,
+ const MemRegion *sReg) {
+ ID.AddPointer(Ex);
+ ID.AddPointer(sReg);
+}
+
+void CXXTempObjectRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+ ProfileRegion(ID, Ex, getSuperRegion());
+}
+
+void CXXBaseObjectRegion::ProfileRegion(llvm::FoldingSetNodeID &ID,
+ const CXXRecordDecl *RD,
+ bool IsVirtual,
+ const MemRegion *SReg) {
+ ID.AddPointer(RD);
+ ID.AddBoolean(IsVirtual);
+ ID.AddPointer(SReg);
+}
+
+void CXXBaseObjectRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+ ProfileRegion(ID, getDecl(), isVirtual(), superRegion);
+}
+
+//===----------------------------------------------------------------------===//
+// Region anchors.
+//===----------------------------------------------------------------------===//
+
+void GlobalsSpaceRegion::anchor() { }
+void HeapSpaceRegion::anchor() { }
+void UnknownSpaceRegion::anchor() { }
+void StackLocalsSpaceRegion::anchor() { }
+void StackArgumentsSpaceRegion::anchor() { }
+void TypedRegion::anchor() { }
+void TypedValueRegion::anchor() { }
+void CodeTextRegion::anchor() { }
+void SubRegion::anchor() { }
+
+//===----------------------------------------------------------------------===//
+// Region pretty-printing.
+//===----------------------------------------------------------------------===//
+
+void MemRegion::dump() const {
+ dumpToStream(llvm::errs());
+}
+
+std::string MemRegion::getString() const {
+ std::string s;
+ llvm::raw_string_ostream os(s);
+ dumpToStream(os);
+ return os.str();
+}
+
+void MemRegion::dumpToStream(raw_ostream &os) const {
+ os << "<Unknown Region>";
+}
+
+void AllocaRegion::dumpToStream(raw_ostream &os) const {
+ os << "alloca{" << (const void*) Ex << ',' << Cnt << '}';
+}
+
+void FunctionTextRegion::dumpToStream(raw_ostream &os) const {
+ os << "code{" << getDecl()->getDeclName().getAsString() << '}';
+}
+
+void BlockTextRegion::dumpToStream(raw_ostream &os) const {
+ os << "block_code{" << (const void*) this << '}';
+}
+
+void BlockDataRegion::dumpToStream(raw_ostream &os) const {
+ os << "block_data{" << BC;
+ os << "; ";
+ for (BlockDataRegion::referenced_vars_iterator
+ I = referenced_vars_begin(),
+ E = referenced_vars_end(); I != E; ++I)
+ os << "(" << I.getCapturedRegion() << "," <<
+ I.getOriginalRegion() << ") ";
+ os << '}';
+}
+
+void CompoundLiteralRegion::dumpToStream(raw_ostream &os) const {
+ // FIXME: More elaborate pretty-printing.
+ os << "{ " << (const void*) CL << " }";
+}
+
+void CXXTempObjectRegion::dumpToStream(raw_ostream &os) const {
+ os << "temp_object{" << getValueType().getAsString() << ','
+ << (const void*) Ex << '}';
+}
+
+void CXXBaseObjectRegion::dumpToStream(raw_ostream &os) const {
+ os << "base{" << superRegion << ',' << getDecl()->getName() << '}';
+}
+
+void CXXThisRegion::dumpToStream(raw_ostream &os) const {
+ os << "this";
+}
+
+void ElementRegion::dumpToStream(raw_ostream &os) const {
+ os << "element{" << superRegion << ','
+ << Index << ',' << getElementType().getAsString() << '}';
+}
+
+void FieldRegion::dumpToStream(raw_ostream &os) const {
+ os << superRegion << "->" << *getDecl();
+}
+
+void ObjCIvarRegion::dumpToStream(raw_ostream &os) const {
+ os << "ivar{" << superRegion << ',' << *getDecl() << '}';
+}
+
+void StringRegion::dumpToStream(raw_ostream &os) const {
+ assert(Str != nullptr && "Expecting non-null StringLiteral");
+ Str->printPretty(os, nullptr, PrintingPolicy(getContext().getLangOpts()));
+}
+
+void ObjCStringRegion::dumpToStream(raw_ostream &os) const {
+ assert(Str != nullptr && "Expecting non-null ObjCStringLiteral");
+ Str->printPretty(os, nullptr, PrintingPolicy(getContext().getLangOpts()));
+}
+
+void SymbolicRegion::dumpToStream(raw_ostream &os) const {
+ os << "SymRegion{" << sym << '}';
+}
+
+void VarRegion::dumpToStream(raw_ostream &os) const {
+ os << *cast<VarDecl>(D);
+}
+
+void RegionRawOffset::dump() const {
+ dumpToStream(llvm::errs());
+}
+
+void RegionRawOffset::dumpToStream(raw_ostream &os) const {
+ os << "raw_offset{" << getRegion() << ',' << getOffset().getQuantity() << '}';
+}
+
+void StaticGlobalSpaceRegion::dumpToStream(raw_ostream &os) const {
+ os << "StaticGlobalsMemSpace{" << CR << '}';
+}
+
+void GlobalInternalSpaceRegion::dumpToStream(raw_ostream &os) const {
+ os << "GlobalInternalSpaceRegion";
+}
+
+void GlobalSystemSpaceRegion::dumpToStream(raw_ostream &os) const {
+ os << "GlobalSystemSpaceRegion";
+}
+
+void GlobalImmutableSpaceRegion::dumpToStream(raw_ostream &os) const {
+ os << "GlobalImmutableSpaceRegion";
+}
+
+void HeapSpaceRegion::dumpToStream(raw_ostream &os) const {
+ os << "HeapSpaceRegion";
+}
+
+void UnknownSpaceRegion::dumpToStream(raw_ostream &os) const {
+ os << "UnknownSpaceRegion";
+}
+
+void StackArgumentsSpaceRegion::dumpToStream(raw_ostream &os) const {
+ os << "StackArgumentsSpaceRegion";
+}
+
+void StackLocalsSpaceRegion::dumpToStream(raw_ostream &os) const {
+ os << "StackLocalsSpaceRegion";
+}
+
+bool MemRegion::canPrintPretty() const {
+ return canPrintPrettyAsExpr();
+}
+
+bool MemRegion::canPrintPrettyAsExpr() const {
+ return false;
+}
+
+void MemRegion::printPretty(raw_ostream &os) const {
+ assert(canPrintPretty() && "This region cannot be printed pretty.");
+ os << "'";
+ printPrettyAsExpr(os);
+ os << "'";
+ return;
+}
+
+void MemRegion::printPrettyAsExpr(raw_ostream &os) const {
+ llvm_unreachable("This region cannot be printed pretty.");
+ return;
+}
+
+bool VarRegion::canPrintPrettyAsExpr() const {
+ return true;
+}
+
+void VarRegion::printPrettyAsExpr(raw_ostream &os) const {
+ os << getDecl()->getName();
+}
+
+bool ObjCIvarRegion::canPrintPrettyAsExpr() const {
+ return true;
+}
+
+void ObjCIvarRegion::printPrettyAsExpr(raw_ostream &os) const {
+ os << getDecl()->getName();
+}
+
+bool FieldRegion::canPrintPretty() const {
+ return true;
+}
+
+bool FieldRegion::canPrintPrettyAsExpr() const {
+ return superRegion->canPrintPrettyAsExpr();
+}
+
+void FieldRegion::printPrettyAsExpr(raw_ostream &os) const {
+ assert(canPrintPrettyAsExpr());
+ superRegion->printPrettyAsExpr(os);
+ os << "." << getDecl()->getName();
+}
+
+void FieldRegion::printPretty(raw_ostream &os) const {
+ if (canPrintPrettyAsExpr()) {
+ os << "\'";
+ printPrettyAsExpr(os);
+ os << "'";
+ } else {
+ os << "field " << "\'" << getDecl()->getName() << "'";
+ }
+ return;
+}
+
+bool CXXBaseObjectRegion::canPrintPrettyAsExpr() const {
+ return superRegion->canPrintPrettyAsExpr();
+}
+
+void CXXBaseObjectRegion::printPrettyAsExpr(raw_ostream &os) const {
+ superRegion->printPrettyAsExpr(os);
+}
+
+//===----------------------------------------------------------------------===//
+// MemRegionManager methods.
+//===----------------------------------------------------------------------===//
+
+template <typename REG>
+const REG *MemRegionManager::LazyAllocate(REG*& region) {
+ if (!region) {
+ region = (REG*) A.Allocate<REG>();
+ new (region) REG(this);
+ }
+
+ return region;
+}
+
+template <typename REG, typename ARG>
+const REG *MemRegionManager::LazyAllocate(REG*& region, ARG a) {
+ if (!region) {
+ region = (REG*) A.Allocate<REG>();
+ new (region) REG(this, a);
+ }
+
+ return region;
+}
+
+const StackLocalsSpaceRegion*
+MemRegionManager::getStackLocalsRegion(const StackFrameContext *STC) {
+ assert(STC);
+ StackLocalsSpaceRegion *&R = StackLocalsSpaceRegions[STC];
+
+ if (R)
+ return R;
+
+ R = A.Allocate<StackLocalsSpaceRegion>();
+ new (R) StackLocalsSpaceRegion(this, STC);
+ return R;
+}
+
+const StackArgumentsSpaceRegion *
+MemRegionManager::getStackArgumentsRegion(const StackFrameContext *STC) {
+ assert(STC);
+ StackArgumentsSpaceRegion *&R = StackArgumentsSpaceRegions[STC];
+
+ if (R)
+ return R;
+
+ R = A.Allocate<StackArgumentsSpaceRegion>();
+ new (R) StackArgumentsSpaceRegion(this, STC);
+ return R;
+}
+
+const GlobalsSpaceRegion
+*MemRegionManager::getGlobalsRegion(MemRegion::Kind K,
+ const CodeTextRegion *CR) {
+ if (!CR) {
+ if (K == MemRegion::GlobalSystemSpaceRegionKind)
+ return LazyAllocate(SystemGlobals);
+ if (K == MemRegion::GlobalImmutableSpaceRegionKind)
+ return LazyAllocate(ImmutableGlobals);
+ assert(K == MemRegion::GlobalInternalSpaceRegionKind);
+ return LazyAllocate(InternalGlobals);
+ }
+
+ assert(K == MemRegion::StaticGlobalSpaceRegionKind);
+ StaticGlobalSpaceRegion *&R = StaticsGlobalSpaceRegions[CR];
+ if (R)
+ return R;
+
+ R = A.Allocate<StaticGlobalSpaceRegion>();
+ new (R) StaticGlobalSpaceRegion(this, CR);
+ return R;
+}
+
+const HeapSpaceRegion *MemRegionManager::getHeapRegion() {
+ return LazyAllocate(heap);
+}
+
+const MemSpaceRegion *MemRegionManager::getUnknownRegion() {
+ return LazyAllocate(unknown);
+}
+
+const MemSpaceRegion *MemRegionManager::getCodeRegion() {
+ return LazyAllocate(code);
+}
+
+//===----------------------------------------------------------------------===//
+// Constructing regions.
+//===----------------------------------------------------------------------===//
+const StringRegion* MemRegionManager::getStringRegion(const StringLiteral* Str){
+ return getSubRegion<StringRegion>(Str, getGlobalsRegion());
+}
+
+const ObjCStringRegion *
+MemRegionManager::getObjCStringRegion(const ObjCStringLiteral* Str){
+ return getSubRegion<ObjCStringRegion>(Str, getGlobalsRegion());
+}
+
+/// Look through a chain of LocationContexts to either find the
+/// StackFrameContext that matches a DeclContext, or find a VarRegion
+/// for a variable captured by a block.
+static llvm::PointerUnion<const StackFrameContext *, const VarRegion *>
+getStackOrCaptureRegionForDeclContext(const LocationContext *LC,
+ const DeclContext *DC,
+ const VarDecl *VD) {
+ while (LC) {
+ if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LC)) {
+ if (cast<DeclContext>(SFC->getDecl()) == DC)
+ return SFC;
+ }
+ if (const BlockInvocationContext *BC =
+ dyn_cast<BlockInvocationContext>(LC)) {
+ const BlockDataRegion *BR =
+ static_cast<const BlockDataRegion*>(BC->getContextData());
+ // FIXME: This can be made more efficient.
+ for (BlockDataRegion::referenced_vars_iterator
+ I = BR->referenced_vars_begin(),
+ E = BR->referenced_vars_end(); I != E; ++I) {
+ if (const VarRegion *VR = dyn_cast<VarRegion>(I.getOriginalRegion()))
+ if (VR->getDecl() == VD)
+ return cast<VarRegion>(I.getCapturedRegion());
+ }
+ }
+
+ LC = LC->getParent();
+ }
+ return (const StackFrameContext *)nullptr;
+}
+
+const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D,
+ const LocationContext *LC) {
+ const MemRegion *sReg = nullptr;
+
+ if (D->hasGlobalStorage() && !D->isStaticLocal()) {
+
+ // First handle the globals defined in system headers.
+ if (C.getSourceManager().isInSystemHeader(D->getLocation())) {
+ // Whitelist the system globals which often DO GET modified, assume the
+ // rest are immutable.
+ if (D->getName().find("errno") != StringRef::npos)
+ sReg = getGlobalsRegion(MemRegion::GlobalSystemSpaceRegionKind);
+ else
+ sReg = getGlobalsRegion(MemRegion::GlobalImmutableSpaceRegionKind);
+
+ // Treat other globals as GlobalInternal unless they are constants.
+ } else {
+ QualType GQT = D->getType();
+ const Type *GT = GQT.getTypePtrOrNull();
+ // TODO: We could walk the complex types here and see if everything is
+ // constified.
+ if (GT && GQT.isConstQualified() && GT->isArithmeticType())
+ sReg = getGlobalsRegion(MemRegion::GlobalImmutableSpaceRegionKind);
+ else
+ sReg = getGlobalsRegion();
+ }
+
+ // Finally handle static locals.
+ } else {
+ // FIXME: Once we implement scope handling, we will need to properly lookup
+ // 'D' to the proper LocationContext.
+ const DeclContext *DC = D->getDeclContext();
+ llvm::PointerUnion<const StackFrameContext *, const VarRegion *> V =
+ getStackOrCaptureRegionForDeclContext(LC, DC, D);
+
+ if (V.is<const VarRegion*>())
+ return V.get<const VarRegion*>();
+
+ const StackFrameContext *STC = V.get<const StackFrameContext*>();
+
+ if (!STC)
+ sReg = getUnknownRegion();
+ else {
+ if (D->hasLocalStorage()) {
+ sReg = isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)
+ ? static_cast<const MemRegion*>(getStackArgumentsRegion(STC))
+ : static_cast<const MemRegion*>(getStackLocalsRegion(STC));
+ }
+ else {
+ assert(D->isStaticLocal());
+ const Decl *STCD = STC->getDecl();
+ if (isa<FunctionDecl>(STCD) || isa<ObjCMethodDecl>(STCD))
+ sReg = getGlobalsRegion(MemRegion::StaticGlobalSpaceRegionKind,
+ getFunctionTextRegion(cast<NamedDecl>(STCD)));
+ else if (const BlockDecl *BD = dyn_cast<BlockDecl>(STCD)) {
+ // FIXME: The fallback type here is totally bogus -- though it should
+ // never be queried, it will prevent uniquing with the real
+ // BlockTextRegion. Ideally we'd fix the AST so that we always had a
+ // signature.
+ QualType T;
+ if (const TypeSourceInfo *TSI = BD->getSignatureAsWritten())
+ T = TSI->getType();
+ if (T.isNull())
+ T = getContext().VoidTy;
+ if (!T->getAs<FunctionType>())
+ T = getContext().getFunctionNoProtoType(T);
+ T = getContext().getBlockPointerType(T);
+
+ const BlockTextRegion *BTR =
+ getBlockTextRegion(BD, C.getCanonicalType(T),
+ STC->getAnalysisDeclContext());
+ sReg = getGlobalsRegion(MemRegion::StaticGlobalSpaceRegionKind,
+ BTR);
+ }
+ else {
+ sReg = getGlobalsRegion();
+ }
+ }
+ }
+ }
+
+ return getSubRegion<VarRegion>(D, sReg);
+}
+
+const VarRegion *MemRegionManager::getVarRegion(const VarDecl *D,
+ const MemRegion *superR) {
+ return getSubRegion<VarRegion>(D, superR);
+}
+
+const BlockDataRegion *
+MemRegionManager::getBlockDataRegion(const BlockTextRegion *BC,
+ const LocationContext *LC,
+ unsigned blockCount) {
+ const MemRegion *sReg = nullptr;
+ const BlockDecl *BD = BC->getDecl();
+ if (!BD->hasCaptures()) {
+ // This handles 'static' blocks.
+ sReg = getGlobalsRegion(MemRegion::GlobalImmutableSpaceRegionKind);
+ }
+ else {
+ if (LC) {
+ // FIXME: Once we implement scope handling, we want the parent region
+ // to be the scope.
+ const StackFrameContext *STC = LC->getCurrentStackFrame();
+ assert(STC);
+ sReg = getStackLocalsRegion(STC);
+ }
+ else {
+ // We allow 'LC' to be NULL for cases where want BlockDataRegions
+ // without context-sensitivity.
+ sReg = getUnknownRegion();
+ }
+ }
+
+ return getSubRegion<BlockDataRegion>(BC, LC, blockCount, sReg);
+}
+
+const CXXTempObjectRegion *
+MemRegionManager::getCXXStaticTempObjectRegion(const Expr *Ex) {
+ return getSubRegion<CXXTempObjectRegion>(
+ Ex, getGlobalsRegion(MemRegion::GlobalInternalSpaceRegionKind, nullptr));
+}
+
+const CompoundLiteralRegion*
+MemRegionManager::getCompoundLiteralRegion(const CompoundLiteralExpr *CL,
+ const LocationContext *LC) {
+
+ const MemRegion *sReg = nullptr;
+
+ if (CL->isFileScope())
+ sReg = getGlobalsRegion();
+ else {
+ const StackFrameContext *STC = LC->getCurrentStackFrame();
+ assert(STC);
+ sReg = getStackLocalsRegion(STC);
+ }
+
+ return getSubRegion<CompoundLiteralRegion>(CL, sReg);
+}
+
+const ElementRegion*
+MemRegionManager::getElementRegion(QualType elementType, NonLoc Idx,
+ const MemRegion* superRegion,
+ ASTContext &Ctx){
+
+ QualType T = Ctx.getCanonicalType(elementType).getUnqualifiedType();
+
+ llvm::FoldingSetNodeID ID;
+ ElementRegion::ProfileRegion(ID, T, Idx, superRegion);
+
+ void *InsertPos;
+ MemRegion* data = Regions.FindNodeOrInsertPos(ID, InsertPos);
+ ElementRegion* R = cast_or_null<ElementRegion>(data);
+
+ if (!R) {
+ R = (ElementRegion*) A.Allocate<ElementRegion>();
+ new (R) ElementRegion(T, Idx, superRegion);
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+const FunctionTextRegion *
+MemRegionManager::getFunctionTextRegion(const NamedDecl *FD) {
+ return getSubRegion<FunctionTextRegion>(FD, getCodeRegion());
+}
+
+const BlockTextRegion *
+MemRegionManager::getBlockTextRegion(const BlockDecl *BD, CanQualType locTy,
+ AnalysisDeclContext *AC) {
+ return getSubRegion<BlockTextRegion>(BD, locTy, AC, getCodeRegion());
+}
+
+
+/// getSymbolicRegion - Retrieve or create a "symbolic" memory region.
+const SymbolicRegion *MemRegionManager::getSymbolicRegion(SymbolRef sym) {
+ return getSubRegion<SymbolicRegion>(sym, getUnknownRegion());
+}
+
+const SymbolicRegion *MemRegionManager::getSymbolicHeapRegion(SymbolRef Sym) {
+ return getSubRegion<SymbolicRegion>(Sym, getHeapRegion());
+}
+
+const FieldRegion*
+MemRegionManager::getFieldRegion(const FieldDecl *d,
+ const MemRegion* superRegion){
+ return getSubRegion<FieldRegion>(d, superRegion);
+}
+
+const ObjCIvarRegion*
+MemRegionManager::getObjCIvarRegion(const ObjCIvarDecl *d,
+ const MemRegion* superRegion) {
+ return getSubRegion<ObjCIvarRegion>(d, superRegion);
+}
+
+const CXXTempObjectRegion*
+MemRegionManager::getCXXTempObjectRegion(Expr const *E,
+ LocationContext const *LC) {
+ const StackFrameContext *SFC = LC->getCurrentStackFrame();
+ assert(SFC);
+ return getSubRegion<CXXTempObjectRegion>(E, getStackLocalsRegion(SFC));
+}
+
+/// Checks whether \p BaseClass is a valid virtual or direct non-virtual base
+/// class of the type of \p Super.
+static bool isValidBaseClass(const CXXRecordDecl *BaseClass,
+ const TypedValueRegion *Super,
+ bool IsVirtual) {
+ BaseClass = BaseClass->getCanonicalDecl();
+
+ const CXXRecordDecl *Class = Super->getValueType()->getAsCXXRecordDecl();
+ if (!Class)
+ return true;
+
+ if (IsVirtual)
+ return Class->isVirtuallyDerivedFrom(BaseClass);
+
+ for (const auto &I : Class->bases()) {
+ if (I.getType()->getAsCXXRecordDecl()->getCanonicalDecl() == BaseClass)
+ return true;
+ }
+
+ return false;
+}
+
+const CXXBaseObjectRegion *
+MemRegionManager::getCXXBaseObjectRegion(const CXXRecordDecl *RD,
+ const MemRegion *Super,
+ bool IsVirtual) {
+ if (isa<TypedValueRegion>(Super)) {
+ assert(isValidBaseClass(RD, dyn_cast<TypedValueRegion>(Super), IsVirtual));
+ (void)&isValidBaseClass;
+
+ if (IsVirtual) {
+ // Virtual base regions should not be layered, since the layout rules
+ // are different.
+ while (const CXXBaseObjectRegion *Base =
+ dyn_cast<CXXBaseObjectRegion>(Super)) {
+ Super = Base->getSuperRegion();
+ }
+ assert(Super && !isa<MemSpaceRegion>(Super));
+ }
+ }
+
+ return getSubRegion<CXXBaseObjectRegion>(RD, IsVirtual, Super);
+}
+
+const CXXThisRegion*
+MemRegionManager::getCXXThisRegion(QualType thisPointerTy,
+ const LocationContext *LC) {
+ const PointerType *PT = thisPointerTy->getAs<PointerType>();
+ assert(PT);
+ // Inside the body of the operator() of a lambda a this expr might refer to an
+ // object in one of the parent location contexts.
+ const auto *D = dyn_cast<CXXMethodDecl>(LC->getDecl());
+ // FIXME: when operator() of lambda is analyzed as a top level function and
+ // 'this' refers to a this to the enclosing scope, there is no right region to
+ // return.
+ while (!LC->inTopFrame() &&
+ (!D || D->isStatic() ||
+ PT != D->getThisType(getContext())->getAs<PointerType>())) {
+ LC = LC->getParent();
+ D = dyn_cast<CXXMethodDecl>(LC->getDecl());
+ }
+ const StackFrameContext *STC = LC->getCurrentStackFrame();
+ assert(STC);
+ return getSubRegion<CXXThisRegion>(PT, getStackArgumentsRegion(STC));
+}
+
+const AllocaRegion*
+MemRegionManager::getAllocaRegion(const Expr *E, unsigned cnt,
+ const LocationContext *LC) {
+ const StackFrameContext *STC = LC->getCurrentStackFrame();
+ assert(STC);
+ return getSubRegion<AllocaRegion>(E, cnt, getStackLocalsRegion(STC));
+}
+
+const MemSpaceRegion *MemRegion::getMemorySpace() const {
+ const MemRegion *R = this;
+ const SubRegion* SR = dyn_cast<SubRegion>(this);
+
+ while (SR) {
+ R = SR->getSuperRegion();
+ SR = dyn_cast<SubRegion>(R);
+ }
+
+ return dyn_cast<MemSpaceRegion>(R);
+}
+
+bool MemRegion::hasStackStorage() const {
+ return isa<StackSpaceRegion>(getMemorySpace());
+}
+
+bool MemRegion::hasStackNonParametersStorage() const {
+ return isa<StackLocalsSpaceRegion>(getMemorySpace());
+}
+
+bool MemRegion::hasStackParametersStorage() const {
+ return isa<StackArgumentsSpaceRegion>(getMemorySpace());
+}
+
+bool MemRegion::hasGlobalsOrParametersStorage() const {
+ const MemSpaceRegion *MS = getMemorySpace();
+ return isa<StackArgumentsSpaceRegion>(MS) ||
+ isa<GlobalsSpaceRegion>(MS);
+}
+
+// getBaseRegion strips away all elements and fields, and get the base region
+// of them.
+const MemRegion *MemRegion::getBaseRegion() const {
+ const MemRegion *R = this;
+ while (true) {
+ switch (R->getKind()) {
+ case MemRegion::ElementRegionKind:
+ case MemRegion::FieldRegionKind:
+ case MemRegion::ObjCIvarRegionKind:
+ case MemRegion::CXXBaseObjectRegionKind:
+ R = cast<SubRegion>(R)->getSuperRegion();
+ continue;
+ default:
+ break;
+ }
+ break;
+ }
+ return R;
+}
+
+bool MemRegion::isSubRegionOf(const MemRegion *R) const {
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// View handling.
+//===----------------------------------------------------------------------===//
+
+const MemRegion *MemRegion::StripCasts(bool StripBaseCasts) const {
+ const MemRegion *R = this;
+ while (true) {
+ switch (R->getKind()) {
+ case ElementRegionKind: {
+ const ElementRegion *ER = cast<ElementRegion>(R);
+ if (!ER->getIndex().isZeroConstant())
+ return R;
+ R = ER->getSuperRegion();
+ break;
+ }
+ case CXXBaseObjectRegionKind:
+ if (!StripBaseCasts)
+ return R;
+ R = cast<CXXBaseObjectRegion>(R)->getSuperRegion();
+ break;
+ default:
+ return R;
+ }
+ }
+}
+
+const SymbolicRegion *MemRegion::getSymbolicBase() const {
+ const SubRegion *SubR = dyn_cast<SubRegion>(this);
+
+ while (SubR) {
+ if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(SubR))
+ return SymR;
+ SubR = dyn_cast<SubRegion>(SubR->getSuperRegion());
+ }
+ return nullptr;
+}
+
+RegionRawOffset ElementRegion::getAsArrayOffset() const {
+ CharUnits offset = CharUnits::Zero();
+ const ElementRegion *ER = this;
+ const MemRegion *superR = nullptr;
+ ASTContext &C = getContext();
+
+ // FIXME: Handle multi-dimensional arrays.
+
+ while (ER) {
+ superR = ER->getSuperRegion();
+
+ // FIXME: generalize to symbolic offsets.
+ SVal index = ER->getIndex();
+ if (Optional<nonloc::ConcreteInt> CI = index.getAs<nonloc::ConcreteInt>()) {
+ // Update the offset.
+ int64_t i = CI->getValue().getSExtValue();
+
+ if (i != 0) {
+ QualType elemType = ER->getElementType();
+
+ // If we are pointing to an incomplete type, go no further.
+ if (elemType->isIncompleteType()) {
+ superR = ER;
+ break;
+ }
+
+ CharUnits size = C.getTypeSizeInChars(elemType);
+ offset += (i * size);
+ }
+
+ // Go to the next ElementRegion (if any).
+ ER = dyn_cast<ElementRegion>(superR);
+ continue;
+ }
+
+ return nullptr;
+ }
+
+ assert(superR && "super region cannot be NULL");
+ return RegionRawOffset(superR, offset);
+}
+
+
+/// Returns true if \p Base is an immediate base class of \p Child
+static bool isImmediateBase(const CXXRecordDecl *Child,
+ const CXXRecordDecl *Base) {
+ assert(Child && "Child must not be null");
+ // Note that we do NOT canonicalize the base class here, because
+ // ASTRecordLayout doesn't either. If that leads us down the wrong path,
+ // so be it; at least we won't crash.
+ for (const auto &I : Child->bases()) {
+ if (I.getType()->getAsCXXRecordDecl() == Base)
+ return true;
+ }
+
+ return false;
+}
+
+RegionOffset MemRegion::getAsOffset() const {
+ const MemRegion *R = this;
+ const MemRegion *SymbolicOffsetBase = nullptr;
+ int64_t Offset = 0;
+
+ while (1) {
+ switch (R->getKind()) {
+ case GenericMemSpaceRegionKind:
+ case StackLocalsSpaceRegionKind:
+ case StackArgumentsSpaceRegionKind:
+ case HeapSpaceRegionKind:
+ case UnknownSpaceRegionKind:
+ case StaticGlobalSpaceRegionKind:
+ case GlobalInternalSpaceRegionKind:
+ case GlobalSystemSpaceRegionKind:
+ case GlobalImmutableSpaceRegionKind:
+ // Stores can bind directly to a region space to set a default value.
+ assert(Offset == 0 && !SymbolicOffsetBase);
+ goto Finish;
+
+ case FunctionTextRegionKind:
+ case BlockTextRegionKind:
+ case BlockDataRegionKind:
+ // These will never have bindings, but may end up having values requested
+ // if the user does some strange casting.
+ if (Offset != 0)
+ SymbolicOffsetBase = R;
+ goto Finish;
+
+ case SymbolicRegionKind:
+ case AllocaRegionKind:
+ case CompoundLiteralRegionKind:
+ case CXXThisRegionKind:
+ case StringRegionKind:
+ case ObjCStringRegionKind:
+ case VarRegionKind:
+ case CXXTempObjectRegionKind:
+ // Usual base regions.
+ goto Finish;
+
+ case ObjCIvarRegionKind:
+ // This is a little strange, but it's a compromise between
+ // ObjCIvarRegions having unknown compile-time offsets (when using the
+ // non-fragile runtime) and yet still being distinct, non-overlapping
+ // regions. Thus we treat them as "like" base regions for the purposes
+ // of computing offsets.
+ goto Finish;
+
+ case CXXBaseObjectRegionKind: {
+ const CXXBaseObjectRegion *BOR = cast<CXXBaseObjectRegion>(R);
+ R = BOR->getSuperRegion();
+
+ QualType Ty;
+ bool RootIsSymbolic = false;
+ if (const TypedValueRegion *TVR = dyn_cast<TypedValueRegion>(R)) {
+ Ty = TVR->getDesugaredValueType(getContext());
+ } else if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) {
+ // If our base region is symbolic, we don't know what type it really is.
+ // Pretend the type of the symbol is the true dynamic type.
+ // (This will at least be self-consistent for the life of the symbol.)
+ Ty = SR->getSymbol()->getType()->getPointeeType();
+ RootIsSymbolic = true;
+ }
+
+ const CXXRecordDecl *Child = Ty->getAsCXXRecordDecl();
+ if (!Child) {
+ // We cannot compute the offset of the base class.
+ SymbolicOffsetBase = R;
+ } else {
+ if (RootIsSymbolic) {
+ // Base layers on symbolic regions may not be type-correct.
+ // Double-check the inheritance here, and revert to a symbolic offset
+ // if it's invalid (e.g. due to a reinterpret_cast).
+ if (BOR->isVirtual()) {
+ if (!Child->isVirtuallyDerivedFrom(BOR->getDecl()))
+ SymbolicOffsetBase = R;
+ } else {
+ if (!isImmediateBase(Child, BOR->getDecl()))
+ SymbolicOffsetBase = R;
+ }
+ }
+ }
+
+ // Don't bother calculating precise offsets if we already have a
+ // symbolic offset somewhere in the chain.
+ if (SymbolicOffsetBase)
+ continue;
+
+ CharUnits BaseOffset;
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Child);
+ if (BOR->isVirtual())
+ BaseOffset = Layout.getVBaseClassOffset(BOR->getDecl());
+ else
+ BaseOffset = Layout.getBaseClassOffset(BOR->getDecl());
+
+ // The base offset is in chars, not in bits.
+ Offset += BaseOffset.getQuantity() * getContext().getCharWidth();
+ break;
+ }
+ case ElementRegionKind: {
+ const ElementRegion *ER = cast<ElementRegion>(R);
+ R = ER->getSuperRegion();
+
+ QualType EleTy = ER->getValueType();
+ if (EleTy->isIncompleteType()) {
+ // We cannot compute the offset of the base class.
+ SymbolicOffsetBase = R;
+ continue;
+ }
+
+ SVal Index = ER->getIndex();
+ if (Optional<nonloc::ConcreteInt> CI =
+ Index.getAs<nonloc::ConcreteInt>()) {
+ // Don't bother calculating precise offsets if we already have a
+ // symbolic offset somewhere in the chain.
+ if (SymbolicOffsetBase)
+ continue;
+
+ int64_t i = CI->getValue().getSExtValue();
+ // This type size is in bits.
+ Offset += i * getContext().getTypeSize(EleTy);
+ } else {
+ // We cannot compute offset for non-concrete index.
+ SymbolicOffsetBase = R;
+ }
+ break;
+ }
+ case FieldRegionKind: {
+ const FieldRegion *FR = cast<FieldRegion>(R);
+ R = FR->getSuperRegion();
+
+ const RecordDecl *RD = FR->getDecl()->getParent();
+ if (RD->isUnion() || !RD->isCompleteDefinition()) {
+ // We cannot compute offset for incomplete type.
+ // For unions, we could treat everything as offset 0, but we'd rather
+ // treat each field as a symbolic offset so they aren't stored on top
+ // of each other, since we depend on things in typed regions actually
+ // matching their types.
+ SymbolicOffsetBase = R;
+ }
+
+ // Don't bother calculating precise offsets if we already have a
+ // symbolic offset somewhere in the chain.
+ if (SymbolicOffsetBase)
+ continue;
+
+ // Get the field number.
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator FI = RD->field_begin(),
+ FE = RD->field_end(); FI != FE; ++FI, ++idx)
+ if (FR->getDecl() == *FI)
+ break;
+
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+ // This is offset in bits.
+ Offset += Layout.getFieldOffset(idx);
+ break;
+ }
+ }
+ }
+
+ Finish:
+ if (SymbolicOffsetBase)
+ return RegionOffset(SymbolicOffsetBase, RegionOffset::Symbolic);
+ return RegionOffset(R, Offset);
+}
+
+//===----------------------------------------------------------------------===//
+// BlockDataRegion
+//===----------------------------------------------------------------------===//
+
+std::pair<const VarRegion *, const VarRegion *>
+BlockDataRegion::getCaptureRegions(const VarDecl *VD) {
+ MemRegionManager &MemMgr = *getMemRegionManager();
+ const VarRegion *VR = nullptr;
+ const VarRegion *OriginalVR = nullptr;
+
+ if (!VD->hasAttr<BlocksAttr>() && VD->hasLocalStorage()) {
+ VR = MemMgr.getVarRegion(VD, this);
+ OriginalVR = MemMgr.getVarRegion(VD, LC);
+ }
+ else {
+ if (LC) {
+ VR = MemMgr.getVarRegion(VD, LC);
+ OriginalVR = VR;
+ }
+ else {
+ VR = MemMgr.getVarRegion(VD, MemMgr.getUnknownRegion());
+ OriginalVR = MemMgr.getVarRegion(VD, LC);
+ }
+ }
+ return std::make_pair(VR, OriginalVR);
+}
+
+void BlockDataRegion::LazyInitializeReferencedVars() {
+ if (ReferencedVars)
+ return;
+
+ AnalysisDeclContext *AC = getCodeRegion()->getAnalysisDeclContext();
+ const auto &ReferencedBlockVars = AC->getReferencedBlockVars(BC->getDecl());
+ auto NumBlockVars =
+ std::distance(ReferencedBlockVars.begin(), ReferencedBlockVars.end());
+
+ if (NumBlockVars == 0) {
+ ReferencedVars = (void*) 0x1;
+ return;
+ }
+
+ MemRegionManager &MemMgr = *getMemRegionManager();
+ llvm::BumpPtrAllocator &A = MemMgr.getAllocator();
+ BumpVectorContext BC(A);
+
+ typedef BumpVector<const MemRegion*> VarVec;
+ VarVec *BV = (VarVec*) A.Allocate<VarVec>();
+ new (BV) VarVec(BC, NumBlockVars);
+ VarVec *BVOriginal = (VarVec*) A.Allocate<VarVec>();
+ new (BVOriginal) VarVec(BC, NumBlockVars);
+
+ for (const VarDecl *VD : ReferencedBlockVars) {
+ const VarRegion *VR = nullptr;
+ const VarRegion *OriginalVR = nullptr;
+ std::tie(VR, OriginalVR) = getCaptureRegions(VD);
+ assert(VR);
+ assert(OriginalVR);
+ BV->push_back(VR, BC);
+ BVOriginal->push_back(OriginalVR, BC);
+ }
+
+ ReferencedVars = BV;
+ OriginalVars = BVOriginal;
+}
+
+BlockDataRegion::referenced_vars_iterator
+BlockDataRegion::referenced_vars_begin() const {
+ const_cast<BlockDataRegion*>(this)->LazyInitializeReferencedVars();
+
+ BumpVector<const MemRegion*> *Vec =
+ static_cast<BumpVector<const MemRegion*>*>(ReferencedVars);
+
+ if (Vec == (void*) 0x1)
+ return BlockDataRegion::referenced_vars_iterator(nullptr, nullptr);
+
+ BumpVector<const MemRegion*> *VecOriginal =
+ static_cast<BumpVector<const MemRegion*>*>(OriginalVars);
+
+ return BlockDataRegion::referenced_vars_iterator(Vec->begin(),
+ VecOriginal->begin());
+}
+
+BlockDataRegion::referenced_vars_iterator
+BlockDataRegion::referenced_vars_end() const {
+ const_cast<BlockDataRegion*>(this)->LazyInitializeReferencedVars();
+
+ BumpVector<const MemRegion*> *Vec =
+ static_cast<BumpVector<const MemRegion*>*>(ReferencedVars);
+
+ if (Vec == (void*) 0x1)
+ return BlockDataRegion::referenced_vars_iterator(nullptr, nullptr);
+
+ BumpVector<const MemRegion*> *VecOriginal =
+ static_cast<BumpVector<const MemRegion*>*>(OriginalVars);
+
+ return BlockDataRegion::referenced_vars_iterator(Vec->end(),
+ VecOriginal->end());
+}
+
+const VarRegion *BlockDataRegion::getOriginalRegion(const VarRegion *R) const {
+ for (referenced_vars_iterator I = referenced_vars_begin(),
+ E = referenced_vars_end();
+ I != E; ++I) {
+ if (I.getCapturedRegion() == R)
+ return I.getOriginalRegion();
+ }
+ return nullptr;
+}
+
+//===----------------------------------------------------------------------===//
+// RegionAndSymbolInvalidationTraits
+//===----------------------------------------------------------------------===//
+
+void RegionAndSymbolInvalidationTraits::setTrait(SymbolRef Sym,
+ InvalidationKinds IK) {
+ SymTraitsMap[Sym] |= IK;
+}
+
+void RegionAndSymbolInvalidationTraits::setTrait(const MemRegion *MR,
+ InvalidationKinds IK) {
+ assert(MR);
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(MR))
+ setTrait(SR->getSymbol(), IK);
+ else
+ MRTraitsMap[MR] |= IK;
+}
+
+bool RegionAndSymbolInvalidationTraits::hasTrait(SymbolRef Sym,
+ InvalidationKinds IK) {
+ const_symbol_iterator I = SymTraitsMap.find(Sym);
+ if (I != SymTraitsMap.end())
+ return I->second & IK;
+
+ return false;
+}
+
+bool RegionAndSymbolInvalidationTraits::hasTrait(const MemRegion *MR,
+ InvalidationKinds IK) {
+ if (!MR)
+ return false;
+
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(MR))
+ return hasTrait(SR->getSymbol(), IK);
+
+ const_region_iterator I = MRTraitsMap.find(MR);
+ if (I != MRTraitsMap.end())
+ return I->second & IK;
+
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
new file mode 100644
index 0000000..504df30
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
@@ -0,0 +1,1183 @@
+//===--- PathDiagnostic.cpp - Path-Specific Diagnostic Handling -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PathDiagnostic-related interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+bool PathDiagnosticMacroPiece::containsEvent() const {
+ for (PathPieces::const_iterator I = subPieces.begin(), E = subPieces.end();
+ I!=E; ++I) {
+ if (isa<PathDiagnosticEventPiece>(*I))
+ return true;
+ if (PathDiagnosticMacroPiece *MP = dyn_cast<PathDiagnosticMacroPiece>(*I))
+ if (MP->containsEvent())
+ return true;
+ }
+ return false;
+}
+
+static StringRef StripTrailingDots(StringRef s) {
+ for (StringRef::size_type i = s.size(); i != 0; --i)
+ if (s[i - 1] != '.')
+ return s.substr(0, i);
+ return "";
+}
+
+PathDiagnosticPiece::PathDiagnosticPiece(StringRef s,
+ Kind k, DisplayHint hint)
+ : str(StripTrailingDots(s)), kind(k), Hint(hint),
+ LastInMainSourceFile(false) {}
+
+PathDiagnosticPiece::PathDiagnosticPiece(Kind k, DisplayHint hint)
+ : kind(k), Hint(hint), LastInMainSourceFile(false) {}
+
+PathDiagnosticPiece::~PathDiagnosticPiece() {}
+PathDiagnosticEventPiece::~PathDiagnosticEventPiece() {}
+PathDiagnosticCallPiece::~PathDiagnosticCallPiece() {}
+PathDiagnosticControlFlowPiece::~PathDiagnosticControlFlowPiece() {}
+PathDiagnosticMacroPiece::~PathDiagnosticMacroPiece() {}
+
+
+void PathPieces::flattenTo(PathPieces &Primary, PathPieces &Current,
+ bool ShouldFlattenMacros) const {
+ for (PathPieces::const_iterator I = begin(), E = end(); I != E; ++I) {
+ PathDiagnosticPiece *Piece = I->get();
+
+ switch (Piece->getKind()) {
+ case PathDiagnosticPiece::Call: {
+ PathDiagnosticCallPiece *Call = cast<PathDiagnosticCallPiece>(Piece);
+ IntrusiveRefCntPtr<PathDiagnosticEventPiece> CallEnter =
+ Call->getCallEnterEvent();
+ if (CallEnter)
+ Current.push_back(CallEnter);
+ Call->path.flattenTo(Primary, Primary, ShouldFlattenMacros);
+ IntrusiveRefCntPtr<PathDiagnosticEventPiece> callExit =
+ Call->getCallExitEvent();
+ if (callExit)
+ Current.push_back(callExit);
+ break;
+ }
+ case PathDiagnosticPiece::Macro: {
+ PathDiagnosticMacroPiece *Macro = cast<PathDiagnosticMacroPiece>(Piece);
+ if (ShouldFlattenMacros) {
+ Macro->subPieces.flattenTo(Primary, Primary, ShouldFlattenMacros);
+ } else {
+ Current.push_back(Piece);
+ PathPieces NewPath;
+ Macro->subPieces.flattenTo(Primary, NewPath, ShouldFlattenMacros);
+ // FIXME: This probably shouldn't mutate the original path piece.
+ Macro->subPieces = NewPath;
+ }
+ break;
+ }
+ case PathDiagnosticPiece::Event:
+ case PathDiagnosticPiece::ControlFlow:
+ Current.push_back(Piece);
+ break;
+ }
+ }
+}
+
+
+PathDiagnostic::~PathDiagnostic() {}
+
+PathDiagnostic::PathDiagnostic(StringRef CheckName, const Decl *declWithIssue,
+ StringRef bugtype, StringRef verboseDesc,
+ StringRef shortDesc, StringRef category,
+ PathDiagnosticLocation LocationToUnique,
+ const Decl *DeclToUnique)
+ : CheckName(CheckName),
+ DeclWithIssue(declWithIssue),
+ BugType(StripTrailingDots(bugtype)),
+ VerboseDesc(StripTrailingDots(verboseDesc)),
+ ShortDesc(StripTrailingDots(shortDesc)),
+ Category(StripTrailingDots(category)),
+ UniqueingLoc(LocationToUnique),
+ UniqueingDecl(DeclToUnique),
+ path(pathImpl) {}
+
+static PathDiagnosticCallPiece *
+getFirstStackedCallToHeaderFile(PathDiagnosticCallPiece *CP,
+ const SourceManager &SMgr) {
+ SourceLocation CallLoc = CP->callEnter.asLocation();
+
+ // If the call is within a macro, don't do anything (for now).
+ if (CallLoc.isMacroID())
+ return nullptr;
+
+ assert(SMgr.isInMainFile(CallLoc) &&
+ "The call piece should be in the main file.");
+
+ // Check if CP represents a path through a function outside of the main file.
+ if (!SMgr.isInMainFile(CP->callEnterWithin.asLocation()))
+ return CP;
+
+ const PathPieces &Path = CP->path;
+ if (Path.empty())
+ return nullptr;
+
+ // Check if the last piece in the callee path is a call to a function outside
+ // of the main file.
+ if (PathDiagnosticCallPiece *CPInner =
+ dyn_cast<PathDiagnosticCallPiece>(Path.back())) {
+ return getFirstStackedCallToHeaderFile(CPInner, SMgr);
+ }
+
+ // Otherwise, the last piece is in the main file.
+ return nullptr;
+}
+
+void PathDiagnostic::resetDiagnosticLocationToMainFile() {
+ if (path.empty())
+ return;
+
+ PathDiagnosticPiece *LastP = path.back().get();
+ assert(LastP);
+ const SourceManager &SMgr = LastP->getLocation().getManager();
+
+ // We only need to check if the report ends inside headers, if the last piece
+ // is a call piece.
+ if (PathDiagnosticCallPiece *CP = dyn_cast<PathDiagnosticCallPiece>(LastP)) {
+ CP = getFirstStackedCallToHeaderFile(CP, SMgr);
+ if (CP) {
+ // Mark the piece.
+ CP->setAsLastInMainSourceFile();
+
+ // Update the path diagnostic message.
+ const NamedDecl *ND = dyn_cast<NamedDecl>(CP->getCallee());
+ if (ND) {
+ SmallString<200> buf;
+ llvm::raw_svector_ostream os(buf);
+ os << " (within a call to '" << ND->getDeclName() << "')";
+ appendToDesc(os.str());
+ }
+
+ // Reset the report containing declaration and location.
+ DeclWithIssue = CP->getCaller();
+ Loc = CP->getLocation();
+
+ return;
+ }
+ }
+}
+
+void PathDiagnosticConsumer::anchor() { }
+
+PathDiagnosticConsumer::~PathDiagnosticConsumer() {
+ // Delete the contents of the FoldingSet if it isn't empty already.
+ for (llvm::FoldingSet<PathDiagnostic>::iterator it =
+ Diags.begin(), et = Diags.end() ; it != et ; ++it) {
+ delete &*it;
+ }
+}
+
+void PathDiagnosticConsumer::HandlePathDiagnostic(
+ std::unique_ptr<PathDiagnostic> D) {
+ if (!D || D->path.empty())
+ return;
+
+ // We need to flatten the locations (convert Stmt* to locations) because
+ // the referenced statements may be freed by the time the diagnostics
+ // are emitted.
+ D->flattenLocations();
+
+ // If the PathDiagnosticConsumer does not support diagnostics that
+ // cross file boundaries, prune out such diagnostics now.
+ if (!supportsCrossFileDiagnostics()) {
+ // Verify that the entire path is from the same FileID.
+ FileID FID;
+ const SourceManager &SMgr = D->path.front()->getLocation().getManager();
+ SmallVector<const PathPieces *, 5> WorkList;
+ WorkList.push_back(&D->path);
+
+ while (!WorkList.empty()) {
+ const PathPieces &path = *WorkList.pop_back_val();
+
+ for (PathPieces::const_iterator I = path.begin(), E = path.end(); I != E;
+ ++I) {
+ const PathDiagnosticPiece *piece = I->get();
+ FullSourceLoc L = piece->getLocation().asLocation().getExpansionLoc();
+
+ if (FID.isInvalid()) {
+ FID = SMgr.getFileID(L);
+ } else if (SMgr.getFileID(L) != FID)
+ return; // FIXME: Emit a warning?
+
+ // Check the source ranges.
+ ArrayRef<SourceRange> Ranges = piece->getRanges();
+ for (ArrayRef<SourceRange>::iterator I = Ranges.begin(),
+ E = Ranges.end(); I != E; ++I) {
+ SourceLocation L = SMgr.getExpansionLoc(I->getBegin());
+ if (!L.isFileID() || SMgr.getFileID(L) != FID)
+ return; // FIXME: Emit a warning?
+ L = SMgr.getExpansionLoc(I->getEnd());
+ if (!L.isFileID() || SMgr.getFileID(L) != FID)
+ return; // FIXME: Emit a warning?
+ }
+
+ if (const PathDiagnosticCallPiece *call =
+ dyn_cast<PathDiagnosticCallPiece>(piece)) {
+ WorkList.push_back(&call->path);
+ }
+ else if (const PathDiagnosticMacroPiece *macro =
+ dyn_cast<PathDiagnosticMacroPiece>(piece)) {
+ WorkList.push_back(&macro->subPieces);
+ }
+ }
+ }
+
+ if (FID.isInvalid())
+ return; // FIXME: Emit a warning?
+ }
+
+ // Profile the node to see if we already have something matching it
+ llvm::FoldingSetNodeID profile;
+ D->Profile(profile);
+ void *InsertPos = nullptr;
+
+ if (PathDiagnostic *orig = Diags.FindNodeOrInsertPos(profile, InsertPos)) {
+ // Keep the PathDiagnostic with the shorter path.
+ // Note, the enclosing routine is called in deterministic order, so the
+ // results will be consistent between runs (no reason to break ties if the
+ // size is the same).
+ const unsigned orig_size = orig->full_size();
+ const unsigned new_size = D->full_size();
+ if (orig_size <= new_size)
+ return;
+
+ assert(orig != D.get());
+ Diags.RemoveNode(orig);
+ delete orig;
+ }
+
+ Diags.InsertNode(D.release());
+}
+
+static Optional<bool> comparePath(const PathPieces &X, const PathPieces &Y);
+static Optional<bool>
+compareControlFlow(const PathDiagnosticControlFlowPiece &X,
+ const PathDiagnosticControlFlowPiece &Y) {
+ FullSourceLoc XSL = X.getStartLocation().asLocation();
+ FullSourceLoc YSL = Y.getStartLocation().asLocation();
+ if (XSL != YSL)
+ return XSL.isBeforeInTranslationUnitThan(YSL);
+ FullSourceLoc XEL = X.getEndLocation().asLocation();
+ FullSourceLoc YEL = Y.getEndLocation().asLocation();
+ if (XEL != YEL)
+ return XEL.isBeforeInTranslationUnitThan(YEL);
+ return None;
+}
+
+static Optional<bool> compareMacro(const PathDiagnosticMacroPiece &X,
+ const PathDiagnosticMacroPiece &Y) {
+ return comparePath(X.subPieces, Y.subPieces);
+}
+
+static Optional<bool> compareCall(const PathDiagnosticCallPiece &X,
+ const PathDiagnosticCallPiece &Y) {
+ FullSourceLoc X_CEL = X.callEnter.asLocation();
+ FullSourceLoc Y_CEL = Y.callEnter.asLocation();
+ if (X_CEL != Y_CEL)
+ return X_CEL.isBeforeInTranslationUnitThan(Y_CEL);
+ FullSourceLoc X_CEWL = X.callEnterWithin.asLocation();
+ FullSourceLoc Y_CEWL = Y.callEnterWithin.asLocation();
+ if (X_CEWL != Y_CEWL)
+ return X_CEWL.isBeforeInTranslationUnitThan(Y_CEWL);
+ FullSourceLoc X_CRL = X.callReturn.asLocation();
+ FullSourceLoc Y_CRL = Y.callReturn.asLocation();
+ if (X_CRL != Y_CRL)
+ return X_CRL.isBeforeInTranslationUnitThan(Y_CRL);
+ return comparePath(X.path, Y.path);
+}
+
+static Optional<bool> comparePiece(const PathDiagnosticPiece &X,
+ const PathDiagnosticPiece &Y) {
+ if (X.getKind() != Y.getKind())
+ return X.getKind() < Y.getKind();
+
+ FullSourceLoc XL = X.getLocation().asLocation();
+ FullSourceLoc YL = Y.getLocation().asLocation();
+ if (XL != YL)
+ return XL.isBeforeInTranslationUnitThan(YL);
+
+ if (X.getString() != Y.getString())
+ return X.getString() < Y.getString();
+
+ if (X.getRanges().size() != Y.getRanges().size())
+ return X.getRanges().size() < Y.getRanges().size();
+
+ const SourceManager &SM = XL.getManager();
+
+ for (unsigned i = 0, n = X.getRanges().size(); i < n; ++i) {
+ SourceRange XR = X.getRanges()[i];
+ SourceRange YR = Y.getRanges()[i];
+ if (XR != YR) {
+ if (XR.getBegin() != YR.getBegin())
+ return SM.isBeforeInTranslationUnit(XR.getBegin(), YR.getBegin());
+ return SM.isBeforeInTranslationUnit(XR.getEnd(), YR.getEnd());
+ }
+ }
+
+ switch (X.getKind()) {
+ case clang::ento::PathDiagnosticPiece::ControlFlow:
+ return compareControlFlow(cast<PathDiagnosticControlFlowPiece>(X),
+ cast<PathDiagnosticControlFlowPiece>(Y));
+ case clang::ento::PathDiagnosticPiece::Event:
+ return None;
+ case clang::ento::PathDiagnosticPiece::Macro:
+ return compareMacro(cast<PathDiagnosticMacroPiece>(X),
+ cast<PathDiagnosticMacroPiece>(Y));
+ case clang::ento::PathDiagnosticPiece::Call:
+ return compareCall(cast<PathDiagnosticCallPiece>(X),
+ cast<PathDiagnosticCallPiece>(Y));
+ }
+ llvm_unreachable("all cases handled");
+}
+
+static Optional<bool> comparePath(const PathPieces &X, const PathPieces &Y) {
+ if (X.size() != Y.size())
+ return X.size() < Y.size();
+
+ PathPieces::const_iterator X_I = X.begin(), X_end = X.end();
+ PathPieces::const_iterator Y_I = Y.begin(), Y_end = Y.end();
+
+ for ( ; X_I != X_end && Y_I != Y_end; ++X_I, ++Y_I) {
+ Optional<bool> b = comparePiece(**X_I, **Y_I);
+ if (b.hasValue())
+ return b.getValue();
+ }
+
+ return None;
+}
+
+static bool compare(const PathDiagnostic &X, const PathDiagnostic &Y) {
+ FullSourceLoc XL = X.getLocation().asLocation();
+ FullSourceLoc YL = Y.getLocation().asLocation();
+ if (XL != YL)
+ return XL.isBeforeInTranslationUnitThan(YL);
+ if (X.getBugType() != Y.getBugType())
+ return X.getBugType() < Y.getBugType();
+ if (X.getCategory() != Y.getCategory())
+ return X.getCategory() < Y.getCategory();
+ if (X.getVerboseDescription() != Y.getVerboseDescription())
+ return X.getVerboseDescription() < Y.getVerboseDescription();
+ if (X.getShortDescription() != Y.getShortDescription())
+ return X.getShortDescription() < Y.getShortDescription();
+ if (X.getDeclWithIssue() != Y.getDeclWithIssue()) {
+ const Decl *XD = X.getDeclWithIssue();
+ if (!XD)
+ return true;
+ const Decl *YD = Y.getDeclWithIssue();
+ if (!YD)
+ return false;
+ SourceLocation XDL = XD->getLocation();
+ SourceLocation YDL = YD->getLocation();
+ if (XDL != YDL) {
+ const SourceManager &SM = XL.getManager();
+ return SM.isBeforeInTranslationUnit(XDL, YDL);
+ }
+ }
+ PathDiagnostic::meta_iterator XI = X.meta_begin(), XE = X.meta_end();
+ PathDiagnostic::meta_iterator YI = Y.meta_begin(), YE = Y.meta_end();
+ if (XE - XI != YE - YI)
+ return (XE - XI) < (YE - YI);
+ for ( ; XI != XE ; ++XI, ++YI) {
+ if (*XI != *YI)
+ return (*XI) < (*YI);
+ }
+ Optional<bool> b = comparePath(X.path, Y.path);
+ assert(b.hasValue());
+ return b.getValue();
+}
+
+void PathDiagnosticConsumer::FlushDiagnostics(
+ PathDiagnosticConsumer::FilesMade *Files) {
+ if (flushed)
+ return;
+
+ flushed = true;
+
+ std::vector<const PathDiagnostic *> BatchDiags;
+ for (llvm::FoldingSet<PathDiagnostic>::iterator it = Diags.begin(),
+ et = Diags.end(); it != et; ++it) {
+ const PathDiagnostic *D = &*it;
+ BatchDiags.push_back(D);
+ }
+
+ // Sort the diagnostics so that they are always emitted in a deterministic
+ // order.
+ int (*Comp)(const PathDiagnostic *const *, const PathDiagnostic *const *) =
+ [](const PathDiagnostic *const *X, const PathDiagnostic *const *Y) {
+ assert(*X != *Y && "PathDiagnostics not uniqued!");
+ if (compare(**X, **Y))
+ return -1;
+ assert(compare(**Y, **X) && "Not a total order!");
+ return 1;
+ };
+ array_pod_sort(BatchDiags.begin(), BatchDiags.end(), Comp);
+
+ FlushDiagnosticsImpl(BatchDiags, Files);
+
+ // Delete the flushed diagnostics.
+ for (std::vector<const PathDiagnostic *>::iterator it = BatchDiags.begin(),
+ et = BatchDiags.end(); it != et; ++it) {
+ const PathDiagnostic *D = *it;
+ delete D;
+ }
+
+ // Clear out the FoldingSet.
+ Diags.clear();
+}
+
+PathDiagnosticConsumer::FilesMade::~FilesMade() {
+ for (PDFileEntry &Entry : Set)
+ Entry.~PDFileEntry();
+}
+
+void PathDiagnosticConsumer::FilesMade::addDiagnostic(const PathDiagnostic &PD,
+ StringRef ConsumerName,
+ StringRef FileName) {
+ llvm::FoldingSetNodeID NodeID;
+ NodeID.Add(PD);
+ void *InsertPos;
+ PDFileEntry *Entry = Set.FindNodeOrInsertPos(NodeID, InsertPos);
+ if (!Entry) {
+ Entry = Alloc.Allocate<PDFileEntry>();
+ Entry = new (Entry) PDFileEntry(NodeID);
+ Set.InsertNode(Entry, InsertPos);
+ }
+
+ // Allocate persistent storage for the file name.
+ char *FileName_cstr = (char*) Alloc.Allocate(FileName.size(), 1);
+ memcpy(FileName_cstr, FileName.data(), FileName.size());
+
+ Entry->files.push_back(std::make_pair(ConsumerName,
+ StringRef(FileName_cstr,
+ FileName.size())));
+}
+
+PathDiagnosticConsumer::PDFileEntry::ConsumerFiles *
+PathDiagnosticConsumer::FilesMade::getFiles(const PathDiagnostic &PD) {
+ llvm::FoldingSetNodeID NodeID;
+ NodeID.Add(PD);
+ void *InsertPos;
+ PDFileEntry *Entry = Set.FindNodeOrInsertPos(NodeID, InsertPos);
+ if (!Entry)
+ return nullptr;
+ return &Entry->files;
+}
+
+//===----------------------------------------------------------------------===//
+// PathDiagnosticLocation methods.
+//===----------------------------------------------------------------------===//
+
+static SourceLocation getValidSourceLocation(const Stmt* S,
+ LocationOrAnalysisDeclContext LAC,
+ bool UseEnd = false) {
+ SourceLocation L = UseEnd ? S->getLocEnd() : S->getLocStart();
+ assert(!LAC.isNull() && "A valid LocationContext or AnalysisDeclContext should "
+ "be passed to PathDiagnosticLocation upon creation.");
+
+ // S might be a temporary statement that does not have a location in the
+ // source code, so find an enclosing statement and use its location.
+ if (!L.isValid()) {
+
+ AnalysisDeclContext *ADC;
+ if (LAC.is<const LocationContext*>())
+ ADC = LAC.get<const LocationContext*>()->getAnalysisDeclContext();
+ else
+ ADC = LAC.get<AnalysisDeclContext*>();
+
+ ParentMap &PM = ADC->getParentMap();
+
+ const Stmt *Parent = S;
+ do {
+ Parent = PM.getParent(Parent);
+
+ // In rare cases, we have implicit top-level expressions,
+ // such as arguments for implicit member initializers.
+ // In this case, fall back to the start of the body (even if we were
+ // asked for the statement end location).
+ if (!Parent) {
+ const Stmt *Body = ADC->getBody();
+ if (Body)
+ L = Body->getLocStart();
+ else
+ L = ADC->getDecl()->getLocEnd();
+ break;
+ }
+
+ L = UseEnd ? Parent->getLocEnd() : Parent->getLocStart();
+ } while (!L.isValid());
+ }
+
+ return L;
+}
+
+static PathDiagnosticLocation
+getLocationForCaller(const StackFrameContext *SFC,
+ const LocationContext *CallerCtx,
+ const SourceManager &SM) {
+ const CFGBlock &Block = *SFC->getCallSiteBlock();
+ CFGElement Source = Block[SFC->getIndex()];
+
+ switch (Source.getKind()) {
+ case CFGElement::Statement:
+ return PathDiagnosticLocation(Source.castAs<CFGStmt>().getStmt(),
+ SM, CallerCtx);
+ case CFGElement::Initializer: {
+ const CFGInitializer &Init = Source.castAs<CFGInitializer>();
+ return PathDiagnosticLocation(Init.getInitializer()->getInit(),
+ SM, CallerCtx);
+ }
+ case CFGElement::AutomaticObjectDtor: {
+ const CFGAutomaticObjDtor &Dtor = Source.castAs<CFGAutomaticObjDtor>();
+ return PathDiagnosticLocation::createEnd(Dtor.getTriggerStmt(),
+ SM, CallerCtx);
+ }
+ case CFGElement::DeleteDtor: {
+ const CFGDeleteDtor &Dtor = Source.castAs<CFGDeleteDtor>();
+ return PathDiagnosticLocation(Dtor.getDeleteExpr(), SM, CallerCtx);
+ }
+ case CFGElement::BaseDtor:
+ case CFGElement::MemberDtor: {
+ const AnalysisDeclContext *CallerInfo = CallerCtx->getAnalysisDeclContext();
+ if (const Stmt *CallerBody = CallerInfo->getBody())
+ return PathDiagnosticLocation::createEnd(CallerBody, SM, CallerCtx);
+ return PathDiagnosticLocation::create(CallerInfo->getDecl(), SM);
+ }
+ case CFGElement::TemporaryDtor:
+ case CFGElement::NewAllocator:
+ llvm_unreachable("not yet implemented!");
+ }
+
+ llvm_unreachable("Unknown CFGElement kind");
+}
+
+
+PathDiagnosticLocation
+ PathDiagnosticLocation::createBegin(const Decl *D,
+ const SourceManager &SM) {
+ return PathDiagnosticLocation(D->getLocStart(), SM, SingleLocK);
+}
+
+PathDiagnosticLocation
+ PathDiagnosticLocation::createBegin(const Stmt *S,
+ const SourceManager &SM,
+ LocationOrAnalysisDeclContext LAC) {
+ return PathDiagnosticLocation(getValidSourceLocation(S, LAC),
+ SM, SingleLocK);
+}
+
+
+PathDiagnosticLocation
+PathDiagnosticLocation::createEnd(const Stmt *S,
+ const SourceManager &SM,
+ LocationOrAnalysisDeclContext LAC) {
+ if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S))
+ return createEndBrace(CS, SM);
+ return PathDiagnosticLocation(getValidSourceLocation(S, LAC, /*End=*/true),
+ SM, SingleLocK);
+}
+
+PathDiagnosticLocation
+ PathDiagnosticLocation::createOperatorLoc(const BinaryOperator *BO,
+ const SourceManager &SM) {
+ return PathDiagnosticLocation(BO->getOperatorLoc(), SM, SingleLocK);
+}
+
+PathDiagnosticLocation
+ PathDiagnosticLocation::createConditionalColonLoc(
+ const ConditionalOperator *CO,
+ const SourceManager &SM) {
+ return PathDiagnosticLocation(CO->getColonLoc(), SM, SingleLocK);
+}
+
+
+PathDiagnosticLocation
+ PathDiagnosticLocation::createMemberLoc(const MemberExpr *ME,
+ const SourceManager &SM) {
+ return PathDiagnosticLocation(ME->getMemberLoc(), SM, SingleLocK);
+}
+
+PathDiagnosticLocation
+ PathDiagnosticLocation::createBeginBrace(const CompoundStmt *CS,
+ const SourceManager &SM) {
+ SourceLocation L = CS->getLBracLoc();
+ return PathDiagnosticLocation(L, SM, SingleLocK);
+}
+
+PathDiagnosticLocation
+ PathDiagnosticLocation::createEndBrace(const CompoundStmt *CS,
+ const SourceManager &SM) {
+ SourceLocation L = CS->getRBracLoc();
+ return PathDiagnosticLocation(L, SM, SingleLocK);
+}
+
+PathDiagnosticLocation
+ PathDiagnosticLocation::createDeclBegin(const LocationContext *LC,
+ const SourceManager &SM) {
+ // FIXME: Should handle CXXTryStmt if analyser starts supporting C++.
+ if (const CompoundStmt *CS =
+ dyn_cast_or_null<CompoundStmt>(LC->getDecl()->getBody()))
+ if (!CS->body_empty()) {
+ SourceLocation Loc = (*CS->body_begin())->getLocStart();
+ return PathDiagnosticLocation(Loc, SM, SingleLocK);
+ }
+
+ return PathDiagnosticLocation();
+}
+
+PathDiagnosticLocation
+ PathDiagnosticLocation::createDeclEnd(const LocationContext *LC,
+ const SourceManager &SM) {
+ SourceLocation L = LC->getDecl()->getBodyRBrace();
+ return PathDiagnosticLocation(L, SM, SingleLocK);
+}
+
+PathDiagnosticLocation
+ PathDiagnosticLocation::create(const ProgramPoint& P,
+ const SourceManager &SMng) {
+
+ const Stmt* S = nullptr;
+ if (Optional<BlockEdge> BE = P.getAs<BlockEdge>()) {
+ const CFGBlock *BSrc = BE->getSrc();
+ S = BSrc->getTerminatorCondition();
+ } else if (Optional<StmtPoint> SP = P.getAs<StmtPoint>()) {
+ S = SP->getStmt();
+ if (P.getAs<PostStmtPurgeDeadSymbols>())
+ return PathDiagnosticLocation::createEnd(S, SMng, P.getLocationContext());
+ } else if (Optional<PostInitializer> PIP = P.getAs<PostInitializer>()) {
+ return PathDiagnosticLocation(PIP->getInitializer()->getSourceLocation(),
+ SMng);
+ } else if (Optional<PostImplicitCall> PIE = P.getAs<PostImplicitCall>()) {
+ return PathDiagnosticLocation(PIE->getLocation(), SMng);
+ } else if (Optional<CallEnter> CE = P.getAs<CallEnter>()) {
+ return getLocationForCaller(CE->getCalleeContext(),
+ CE->getLocationContext(),
+ SMng);
+ } else if (Optional<CallExitEnd> CEE = P.getAs<CallExitEnd>()) {
+ return getLocationForCaller(CEE->getCalleeContext(),
+ CEE->getLocationContext(),
+ SMng);
+ } else {
+ llvm_unreachable("Unexpected ProgramPoint");
+ }
+
+ return PathDiagnosticLocation(S, SMng, P.getLocationContext());
+}
+
+const Stmt *PathDiagnosticLocation::getStmt(const ExplodedNode *N) {
+ ProgramPoint P = N->getLocation();
+ if (Optional<StmtPoint> SP = P.getAs<StmtPoint>())
+ return SP->getStmt();
+ if (Optional<BlockEdge> BE = P.getAs<BlockEdge>())
+ return BE->getSrc()->getTerminator();
+ if (Optional<CallEnter> CE = P.getAs<CallEnter>())
+ return CE->getCallExpr();
+ if (Optional<CallExitEnd> CEE = P.getAs<CallExitEnd>())
+ return CEE->getCalleeContext()->getCallSite();
+ if (Optional<PostInitializer> PIPP = P.getAs<PostInitializer>())
+ return PIPP->getInitializer()->getInit();
+
+ return nullptr;
+}
+
+const Stmt *PathDiagnosticLocation::getNextStmt(const ExplodedNode *N) {
+ for (N = N->getFirstSucc(); N; N = N->getFirstSucc()) {
+ if (const Stmt *S = getStmt(N)) {
+ // Check if the statement is '?' or '&&'/'||'. These are "merges",
+ // not actual statement points.
+ switch (S->getStmtClass()) {
+ case Stmt::ChooseExprClass:
+ case Stmt::BinaryConditionalOperatorClass:
+ case Stmt::ConditionalOperatorClass:
+ continue;
+ case Stmt::BinaryOperatorClass: {
+ BinaryOperatorKind Op = cast<BinaryOperator>(S)->getOpcode();
+ if (Op == BO_LAnd || Op == BO_LOr)
+ continue;
+ break;
+ }
+ default:
+ break;
+ }
+ // We found the statement, so return it.
+ return S;
+ }
+ }
+
+ return nullptr;
+}
+
+PathDiagnosticLocation
+ PathDiagnosticLocation::createEndOfPath(const ExplodedNode *N,
+ const SourceManager &SM) {
+ assert(N && "Cannot create a location with a null node.");
+ const Stmt *S = getStmt(N);
+
+ if (!S) {
+ // If this is an implicit call, return the implicit call point location.
+ if (Optional<PreImplicitCall> PIE = N->getLocationAs<PreImplicitCall>())
+ return PathDiagnosticLocation(PIE->getLocation(), SM);
+ S = getNextStmt(N);
+ }
+
+ if (S) {
+ ProgramPoint P = N->getLocation();
+ const LocationContext *LC = N->getLocationContext();
+
+ // For member expressions, return the location of the '.' or '->'.
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(S))
+ return PathDiagnosticLocation::createMemberLoc(ME, SM);
+
+ // For binary operators, return the location of the operator.
+ if (const BinaryOperator *B = dyn_cast<BinaryOperator>(S))
+ return PathDiagnosticLocation::createOperatorLoc(B, SM);
+
+ if (P.getAs<PostStmtPurgeDeadSymbols>())
+ return PathDiagnosticLocation::createEnd(S, SM, LC);
+
+ if (S->getLocStart().isValid())
+ return PathDiagnosticLocation(S, SM, LC);
+ return PathDiagnosticLocation(getValidSourceLocation(S, LC), SM);
+ }
+
+ return createDeclEnd(N->getLocationContext(), SM);
+}
+
+PathDiagnosticLocation PathDiagnosticLocation::createSingleLocation(
+ const PathDiagnosticLocation &PDL) {
+ FullSourceLoc L = PDL.asLocation();
+ return PathDiagnosticLocation(L, L.getManager(), SingleLocK);
+}
+
+FullSourceLoc
+ PathDiagnosticLocation::genLocation(SourceLocation L,
+ LocationOrAnalysisDeclContext LAC) const {
+ assert(isValid());
+ // Note that we want a 'switch' here so that the compiler can warn us in
+ // case we add more cases.
+ switch (K) {
+ case SingleLocK:
+ case RangeK:
+ break;
+ case StmtK:
+ // Defensive checking.
+ if (!S)
+ break;
+ return FullSourceLoc(getValidSourceLocation(S, LAC),
+ const_cast<SourceManager&>(*SM));
+ case DeclK:
+ // Defensive checking.
+ if (!D)
+ break;
+ return FullSourceLoc(D->getLocation(), const_cast<SourceManager&>(*SM));
+ }
+
+ return FullSourceLoc(L, const_cast<SourceManager&>(*SM));
+}
+
+PathDiagnosticRange
+ PathDiagnosticLocation::genRange(LocationOrAnalysisDeclContext LAC) const {
+ assert(isValid());
+ // Note that we want a 'switch' here so that the compiler can warn us in
+ // case we add more cases.
+ switch (K) {
+ case SingleLocK:
+ return PathDiagnosticRange(SourceRange(Loc,Loc), true);
+ case RangeK:
+ break;
+ case StmtK: {
+ const Stmt *S = asStmt();
+ switch (S->getStmtClass()) {
+ default:
+ break;
+ case Stmt::DeclStmtClass: {
+ const DeclStmt *DS = cast<DeclStmt>(S);
+ if (DS->isSingleDecl()) {
+ // Should always be the case, but we'll be defensive.
+ return SourceRange(DS->getLocStart(),
+ DS->getSingleDecl()->getLocation());
+ }
+ break;
+ }
+ // FIXME: Provide better range information for different
+ // terminators.
+ case Stmt::IfStmtClass:
+ case Stmt::WhileStmtClass:
+ case Stmt::DoStmtClass:
+ case Stmt::ForStmtClass:
+ case Stmt::ChooseExprClass:
+ case Stmt::IndirectGotoStmtClass:
+ case Stmt::SwitchStmtClass:
+ case Stmt::BinaryConditionalOperatorClass:
+ case Stmt::ConditionalOperatorClass:
+ case Stmt::ObjCForCollectionStmtClass: {
+ SourceLocation L = getValidSourceLocation(S, LAC);
+ return SourceRange(L, L);
+ }
+ }
+ SourceRange R = S->getSourceRange();
+ if (R.isValid())
+ return R;
+ break;
+ }
+ case DeclK:
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
+ return MD->getSourceRange();
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (Stmt *Body = FD->getBody())
+ return Body->getSourceRange();
+ }
+ else {
+ SourceLocation L = D->getLocation();
+ return PathDiagnosticRange(SourceRange(L, L), true);
+ }
+ }
+
+ return SourceRange(Loc,Loc);
+}
+
+void PathDiagnosticLocation::flatten() {
+ if (K == StmtK) {
+ K = RangeK;
+ S = nullptr;
+ D = nullptr;
+ }
+ else if (K == DeclK) {
+ K = SingleLocK;
+ S = nullptr;
+ D = nullptr;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Manipulation of PathDiagnosticCallPieces.
+//===----------------------------------------------------------------------===//
+
+PathDiagnosticCallPiece *
+PathDiagnosticCallPiece::construct(const ExplodedNode *N,
+ const CallExitEnd &CE,
+ const SourceManager &SM) {
+ const Decl *caller = CE.getLocationContext()->getDecl();
+ PathDiagnosticLocation pos = getLocationForCaller(CE.getCalleeContext(),
+ CE.getLocationContext(),
+ SM);
+ return new PathDiagnosticCallPiece(caller, pos);
+}
+
+PathDiagnosticCallPiece *
+PathDiagnosticCallPiece::construct(PathPieces &path,
+ const Decl *caller) {
+ PathDiagnosticCallPiece *C = new PathDiagnosticCallPiece(path, caller);
+ path.clear();
+ path.push_front(C);
+ return C;
+}
+
+void PathDiagnosticCallPiece::setCallee(const CallEnter &CE,
+ const SourceManager &SM) {
+ const StackFrameContext *CalleeCtx = CE.getCalleeContext();
+ Callee = CalleeCtx->getDecl();
+
+ callEnterWithin = PathDiagnosticLocation::createBegin(Callee, SM);
+ callEnter = getLocationForCaller(CalleeCtx, CE.getLocationContext(), SM);
+}
+
+static inline void describeClass(raw_ostream &Out, const CXXRecordDecl *D,
+ StringRef Prefix = StringRef()) {
+ if (!D->getIdentifier())
+ return;
+ Out << Prefix << '\'' << *D << '\'';
+}
+
+static bool describeCodeDecl(raw_ostream &Out, const Decl *D,
+ bool ExtendedDescription,
+ StringRef Prefix = StringRef()) {
+ if (!D)
+ return false;
+
+ if (isa<BlockDecl>(D)) {
+ if (ExtendedDescription)
+ Out << Prefix << "anonymous block";
+ return ExtendedDescription;
+ }
+
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
+ Out << Prefix;
+ if (ExtendedDescription && !MD->isUserProvided()) {
+ if (MD->isExplicitlyDefaulted())
+ Out << "defaulted ";
+ else
+ Out << "implicit ";
+ }
+
+ if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(MD)) {
+ if (CD->isDefaultConstructor())
+ Out << "default ";
+ else if (CD->isCopyConstructor())
+ Out << "copy ";
+ else if (CD->isMoveConstructor())
+ Out << "move ";
+
+ Out << "constructor";
+ describeClass(Out, MD->getParent(), " for ");
+
+ } else if (isa<CXXDestructorDecl>(MD)) {
+ if (!MD->isUserProvided()) {
+ Out << "destructor";
+ describeClass(Out, MD->getParent(), " for ");
+ } else {
+ // Use ~Foo for explicitly-written destructors.
+ Out << "'" << *MD << "'";
+ }
+
+ } else if (MD->isCopyAssignmentOperator()) {
+ Out << "copy assignment operator";
+ describeClass(Out, MD->getParent(), " for ");
+
+ } else if (MD->isMoveAssignmentOperator()) {
+ Out << "move assignment operator";
+ describeClass(Out, MD->getParent(), " for ");
+
+ } else {
+ if (MD->getParent()->getIdentifier())
+ Out << "'" << *MD->getParent() << "::" << *MD << "'";
+ else
+ Out << "'" << *MD << "'";
+ }
+
+ return true;
+ }
+
+ Out << Prefix << '\'' << cast<NamedDecl>(*D) << '\'';
+ return true;
+}
+
+IntrusiveRefCntPtr<PathDiagnosticEventPiece>
+PathDiagnosticCallPiece::getCallEnterEvent() const {
+ if (!Callee)
+ return nullptr;
+
+ SmallString<256> buf;
+ llvm::raw_svector_ostream Out(buf);
+
+ Out << "Calling ";
+ describeCodeDecl(Out, Callee, /*ExtendedDescription=*/true);
+
+ assert(callEnter.asLocation().isValid());
+ return new PathDiagnosticEventPiece(callEnter, Out.str());
+}
+
+IntrusiveRefCntPtr<PathDiagnosticEventPiece>
+PathDiagnosticCallPiece::getCallEnterWithinCallerEvent() const {
+ if (!callEnterWithin.asLocation().isValid())
+ return nullptr;
+ if (Callee->isImplicit() || !Callee->hasBody())
+ return nullptr;
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Callee))
+ if (MD->isDefaulted())
+ return nullptr;
+
+ SmallString<256> buf;
+ llvm::raw_svector_ostream Out(buf);
+
+ Out << "Entered call";
+ describeCodeDecl(Out, Caller, /*ExtendedDescription=*/false, " from ");
+
+ return new PathDiagnosticEventPiece(callEnterWithin, Out.str());
+}
+
+IntrusiveRefCntPtr<PathDiagnosticEventPiece>
+PathDiagnosticCallPiece::getCallExitEvent() const {
+ if (NoExit)
+ return nullptr;
+
+ SmallString<256> buf;
+ llvm::raw_svector_ostream Out(buf);
+
+ if (!CallStackMessage.empty()) {
+ Out << CallStackMessage;
+ } else {
+ bool DidDescribe = describeCodeDecl(Out, Callee,
+ /*ExtendedDescription=*/false,
+ "Returning from ");
+ if (!DidDescribe)
+ Out << "Returning to caller";
+ }
+
+ assert(callReturn.asLocation().isValid());
+ return new PathDiagnosticEventPiece(callReturn, Out.str());
+}
+
+static void compute_path_size(const PathPieces &pieces, unsigned &size) {
+ for (PathPieces::const_iterator it = pieces.begin(),
+ et = pieces.end(); it != et; ++it) {
+ const PathDiagnosticPiece *piece = it->get();
+ if (const PathDiagnosticCallPiece *cp =
+ dyn_cast<PathDiagnosticCallPiece>(piece)) {
+ compute_path_size(cp->path, size);
+ }
+ else
+ ++size;
+ }
+}
+
+unsigned PathDiagnostic::full_size() {
+ unsigned size = 0;
+ compute_path_size(path, size);
+ return size;
+}
+
+//===----------------------------------------------------------------------===//
+// FoldingSet profiling methods.
+//===----------------------------------------------------------------------===//
+
+void PathDiagnosticLocation::Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(Range.getBegin().getRawEncoding());
+ ID.AddInteger(Range.getEnd().getRawEncoding());
+ ID.AddInteger(Loc.getRawEncoding());
+ return;
+}
+
+void PathDiagnosticPiece::Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger((unsigned) getKind());
+ ID.AddString(str);
+ // FIXME: Add profiling support for code hints.
+ ID.AddInteger((unsigned) getDisplayHint());
+ ArrayRef<SourceRange> Ranges = getRanges();
+ for (ArrayRef<SourceRange>::iterator I = Ranges.begin(), E = Ranges.end();
+ I != E; ++I) {
+ ID.AddInteger(I->getBegin().getRawEncoding());
+ ID.AddInteger(I->getEnd().getRawEncoding());
+ }
+}
+
+void PathDiagnosticCallPiece::Profile(llvm::FoldingSetNodeID &ID) const {
+ PathDiagnosticPiece::Profile(ID);
+ for (PathPieces::const_iterator it = path.begin(),
+ et = path.end(); it != et; ++it) {
+ ID.Add(**it);
+ }
+}
+
+void PathDiagnosticSpotPiece::Profile(llvm::FoldingSetNodeID &ID) const {
+ PathDiagnosticPiece::Profile(ID);
+ ID.Add(Pos);
+}
+
+void PathDiagnosticControlFlowPiece::Profile(llvm::FoldingSetNodeID &ID) const {
+ PathDiagnosticPiece::Profile(ID);
+ for (const_iterator I = begin(), E = end(); I != E; ++I)
+ ID.Add(*I);
+}
+
+void PathDiagnosticMacroPiece::Profile(llvm::FoldingSetNodeID &ID) const {
+ PathDiagnosticSpotPiece::Profile(ID);
+ for (PathPieces::const_iterator I = subPieces.begin(), E = subPieces.end();
+ I != E; ++I)
+ ID.Add(**I);
+}
+
+void PathDiagnostic::Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.Add(getLocation());
+ ID.AddString(BugType);
+ ID.AddString(VerboseDesc);
+ ID.AddString(Category);
+}
+
+void PathDiagnostic::FullProfile(llvm::FoldingSetNodeID &ID) const {
+ Profile(ID);
+ for (PathPieces::const_iterator I = path.begin(), E = path.end(); I != E; ++I)
+ ID.Add(**I);
+ for (meta_iterator I = meta_begin(), E = meta_end(); I != E; ++I)
+ ID.AddString(*I);
+}
+
+StackHintGenerator::~StackHintGenerator() {}
+
+std::string StackHintGeneratorForSymbol::getMessage(const ExplodedNode *N){
+ ProgramPoint P = N->getLocation();
+ CallExitEnd CExit = P.castAs<CallExitEnd>();
+
+ // FIXME: Use CallEvent to abstract this over all calls.
+ const Stmt *CallSite = CExit.getCalleeContext()->getCallSite();
+ const CallExpr *CE = dyn_cast_or_null<CallExpr>(CallSite);
+ if (!CE)
+ return "";
+
+ if (!N)
+ return getMessageForSymbolNotFound();
+
+ // Check if one of the parameters are set to the interesting symbol.
+ ProgramStateRef State = N->getState();
+ const LocationContext *LCtx = N->getLocationContext();
+ unsigned ArgIndex = 0;
+ for (CallExpr::const_arg_iterator I = CE->arg_begin(),
+ E = CE->arg_end(); I != E; ++I, ++ArgIndex){
+ SVal SV = State->getSVal(*I, LCtx);
+
+ // Check if the variable corresponding to the symbol is passed by value.
+ SymbolRef AS = SV.getAsLocSymbol();
+ if (AS == Sym) {
+ return getMessageForArg(*I, ArgIndex);
+ }
+
+ // Check if the parameter is a pointer to the symbol.
+ if (Optional<loc::MemRegionVal> Reg = SV.getAs<loc::MemRegionVal>()) {
+ SVal PSV = State->getSVal(Reg->getRegion());
+ SymbolRef AS = PSV.getAsLocSymbol();
+ if (AS == Sym) {
+ return getMessageForArg(*I, ArgIndex);
+ }
+ }
+ }
+
+ // Check if we are returning the interesting symbol.
+ SVal SV = State->getSVal(CE, LCtx);
+ SymbolRef RetSym = SV.getAsLocSymbol();
+ if (RetSym == Sym) {
+ return getMessageForReturn(CE);
+ }
+
+ return getMessageForSymbolNotFound();
+}
+
+std::string StackHintGeneratorForSymbol::getMessageForArg(const Expr *ArgE,
+ unsigned ArgIndex) {
+ // Printed parameters start at 1, not 0.
+ ++ArgIndex;
+
+ SmallString<200> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ os << Msg << " via " << ArgIndex << llvm::getOrdinalSuffix(ArgIndex)
+ << " parameter";
+
+ return os.str();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
new file mode 100644
index 0000000..55e1222
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
@@ -0,0 +1,497 @@
+//===--- PlistDiagnostics.cpp - Plist Diagnostics for Paths -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PlistDiagnostics object.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/PlistSupport.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/Version.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/IssueHash.h"
+#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Casting.h"
+using namespace clang;
+using namespace ento;
+using namespace markup;
+
+namespace {
+ class PlistDiagnostics : public PathDiagnosticConsumer {
+ const std::string OutputFile;
+ const LangOptions &LangOpts;
+ const bool SupportsCrossFileDiagnostics;
+ public:
+ PlistDiagnostics(AnalyzerOptions &AnalyzerOpts,
+ const std::string& prefix,
+ const LangOptions &LangOpts,
+ bool supportsMultipleFiles);
+
+ ~PlistDiagnostics() override {}
+
+ void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
+ FilesMade *filesMade) override;
+
+ StringRef getName() const override {
+ return "PlistDiagnostics";
+ }
+
+ PathGenerationScheme getGenerationScheme() const override {
+ return Extensive;
+ }
+ bool supportsLogicalOpControlFlow() const override { return true; }
+ bool supportsCrossFileDiagnostics() const override {
+ return SupportsCrossFileDiagnostics;
+ }
+ };
+} // end anonymous namespace
+
+PlistDiagnostics::PlistDiagnostics(AnalyzerOptions &AnalyzerOpts,
+ const std::string& output,
+ const LangOptions &LO,
+ bool supportsMultipleFiles)
+ : OutputFile(output),
+ LangOpts(LO),
+ SupportsCrossFileDiagnostics(supportsMultipleFiles) {}
+
+void ento::createPlistDiagnosticConsumer(AnalyzerOptions &AnalyzerOpts,
+ PathDiagnosticConsumers &C,
+ const std::string& s,
+ const Preprocessor &PP) {
+ C.push_back(new PlistDiagnostics(AnalyzerOpts, s,
+ PP.getLangOpts(), false));
+}
+
+void ento::createPlistMultiFileDiagnosticConsumer(AnalyzerOptions &AnalyzerOpts,
+ PathDiagnosticConsumers &C,
+ const std::string &s,
+ const Preprocessor &PP) {
+ C.push_back(new PlistDiagnostics(AnalyzerOpts, s,
+ PP.getLangOpts(), true));
+}
+
+static void ReportControlFlow(raw_ostream &o,
+ const PathDiagnosticControlFlowPiece& P,
+ const FIDMap& FM,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ unsigned indent) {
+
+ Indent(o, indent) << "<dict>\n";
+ ++indent;
+
+ Indent(o, indent) << "<key>kind</key><string>control</string>\n";
+
+ // Emit edges.
+ Indent(o, indent) << "<key>edges</key>\n";
+ ++indent;
+ Indent(o, indent) << "<array>\n";
+ ++indent;
+ for (PathDiagnosticControlFlowPiece::const_iterator I=P.begin(), E=P.end();
+ I!=E; ++I) {
+ Indent(o, indent) << "<dict>\n";
+ ++indent;
+
+ // Make the ranges of the start and end point self-consistent with adjacent edges
+ // by forcing to use only the beginning of the range. This simplifies the layout
+ // logic for clients.
+ Indent(o, indent) << "<key>start</key>\n";
+ SourceRange StartEdge(
+ SM.getExpansionLoc(I->getStart().asRange().getBegin()));
+ EmitRange(o, SM, Lexer::getAsCharRange(StartEdge, SM, LangOpts), FM,
+ indent + 1);
+
+ Indent(o, indent) << "<key>end</key>\n";
+ SourceRange EndEdge(SM.getExpansionLoc(I->getEnd().asRange().getBegin()));
+ EmitRange(o, SM, Lexer::getAsCharRange(EndEdge, SM, LangOpts), FM,
+ indent + 1);
+
+ --indent;
+ Indent(o, indent) << "</dict>\n";
+ }
+ --indent;
+ Indent(o, indent) << "</array>\n";
+ --indent;
+
+ // Output any helper text.
+ const std::string& s = P.getString();
+ if (!s.empty()) {
+ Indent(o, indent) << "<key>alternate</key>";
+ EmitString(o, s) << '\n';
+ }
+
+ --indent;
+ Indent(o, indent) << "</dict>\n";
+}
+
+static void ReportEvent(raw_ostream &o, const PathDiagnosticPiece& P,
+ const FIDMap& FM,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ unsigned indent,
+ unsigned depth,
+ bool isKeyEvent = false) {
+
+ Indent(o, indent) << "<dict>\n";
+ ++indent;
+
+ Indent(o, indent) << "<key>kind</key><string>event</string>\n";
+
+ if (isKeyEvent) {
+ Indent(o, indent) << "<key>key_event</key><true/>\n";
+ }
+
+ // Output the location.
+ FullSourceLoc L = P.getLocation().asLocation();
+
+ Indent(o, indent) << "<key>location</key>\n";
+ EmitLocation(o, SM, L, FM, indent);
+
+ // Output the ranges (if any).
+ ArrayRef<SourceRange> Ranges = P.getRanges();
+
+ if (!Ranges.empty()) {
+ Indent(o, indent) << "<key>ranges</key>\n";
+ Indent(o, indent) << "<array>\n";
+ ++indent;
+ for (auto &R : Ranges)
+ EmitRange(o, SM,
+ Lexer::getAsCharRange(SM.getExpansionRange(R), SM, LangOpts),
+ FM, indent + 1);
+ --indent;
+ Indent(o, indent) << "</array>\n";
+ }
+
+ // Output the call depth.
+ Indent(o, indent) << "<key>depth</key>";
+ EmitInteger(o, depth) << '\n';
+
+ // Output the text.
+ assert(!P.getString().empty());
+ Indent(o, indent) << "<key>extended_message</key>\n";
+ Indent(o, indent);
+ EmitString(o, P.getString()) << '\n';
+
+ // Output the short text.
+ // FIXME: Really use a short string.
+ Indent(o, indent) << "<key>message</key>\n";
+ Indent(o, indent);
+ EmitString(o, P.getString()) << '\n';
+
+ // Finish up.
+ --indent;
+ Indent(o, indent); o << "</dict>\n";
+}
+
+static void ReportPiece(raw_ostream &o,
+ const PathDiagnosticPiece &P,
+ const FIDMap& FM, const SourceManager &SM,
+ const LangOptions &LangOpts,
+ unsigned indent,
+ unsigned depth,
+ bool includeControlFlow,
+ bool isKeyEvent = false);
+
+static void ReportCall(raw_ostream &o,
+ const PathDiagnosticCallPiece &P,
+ const FIDMap& FM, const SourceManager &SM,
+ const LangOptions &LangOpts,
+ unsigned indent,
+ unsigned depth) {
+
+ IntrusiveRefCntPtr<PathDiagnosticEventPiece> callEnter =
+ P.getCallEnterEvent();
+
+ if (callEnter)
+ ReportPiece(o, *callEnter, FM, SM, LangOpts, indent, depth, true,
+ P.isLastInMainSourceFile());
+
+ IntrusiveRefCntPtr<PathDiagnosticEventPiece> callEnterWithinCaller =
+ P.getCallEnterWithinCallerEvent();
+
+ ++depth;
+
+ if (callEnterWithinCaller)
+ ReportPiece(o, *callEnterWithinCaller, FM, SM, LangOpts,
+ indent, depth, true);
+
+ for (PathPieces::const_iterator I = P.path.begin(), E = P.path.end();I!=E;++I)
+ ReportPiece(o, **I, FM, SM, LangOpts, indent, depth, true);
+
+ --depth;
+
+ IntrusiveRefCntPtr<PathDiagnosticEventPiece> callExit =
+ P.getCallExitEvent();
+
+ if (callExit)
+ ReportPiece(o, *callExit, FM, SM, LangOpts, indent, depth, true);
+}
+
+static void ReportMacro(raw_ostream &o,
+ const PathDiagnosticMacroPiece& P,
+ const FIDMap& FM, const SourceManager &SM,
+ const LangOptions &LangOpts,
+ unsigned indent,
+ unsigned depth) {
+
+ for (PathPieces::const_iterator I = P.subPieces.begin(), E=P.subPieces.end();
+ I!=E; ++I) {
+ ReportPiece(o, **I, FM, SM, LangOpts, indent, depth, false);
+ }
+}
+
+static void ReportDiag(raw_ostream &o, const PathDiagnosticPiece& P,
+ const FIDMap& FM, const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ ReportPiece(o, P, FM, SM, LangOpts, 4, 0, true);
+}
+
+static void ReportPiece(raw_ostream &o,
+ const PathDiagnosticPiece &P,
+ const FIDMap& FM, const SourceManager &SM,
+ const LangOptions &LangOpts,
+ unsigned indent,
+ unsigned depth,
+ bool includeControlFlow,
+ bool isKeyEvent) {
+ switch (P.getKind()) {
+ case PathDiagnosticPiece::ControlFlow:
+ if (includeControlFlow)
+ ReportControlFlow(o, cast<PathDiagnosticControlFlowPiece>(P), FM, SM,
+ LangOpts, indent);
+ break;
+ case PathDiagnosticPiece::Call:
+ ReportCall(o, cast<PathDiagnosticCallPiece>(P), FM, SM, LangOpts,
+ indent, depth);
+ break;
+ case PathDiagnosticPiece::Event:
+ ReportEvent(o, cast<PathDiagnosticSpotPiece>(P), FM, SM, LangOpts,
+ indent, depth, isKeyEvent);
+ break;
+ case PathDiagnosticPiece::Macro:
+ ReportMacro(o, cast<PathDiagnosticMacroPiece>(P), FM, SM, LangOpts,
+ indent, depth);
+ break;
+ }
+}
+
+void PlistDiagnostics::FlushDiagnosticsImpl(
+ std::vector<const PathDiagnostic *> &Diags,
+ FilesMade *filesMade) {
+ // Build up a set of FIDs that we use by scanning the locations and
+ // ranges of the diagnostics.
+ FIDMap FM;
+ SmallVector<FileID, 10> Fids;
+ const SourceManager* SM = nullptr;
+
+ if (!Diags.empty())
+ SM = &Diags.front()->path.front()->getLocation().getManager();
+
+
+ for (std::vector<const PathDiagnostic*>::iterator DI = Diags.begin(),
+ DE = Diags.end(); DI != DE; ++DI) {
+
+ const PathDiagnostic *D = *DI;
+
+ SmallVector<const PathPieces *, 5> WorkList;
+ WorkList.push_back(&D->path);
+
+ while (!WorkList.empty()) {
+ const PathPieces &path = *WorkList.pop_back_val();
+
+ for (PathPieces::const_iterator I = path.begin(), E = path.end(); I != E;
+ ++I) {
+ const PathDiagnosticPiece *piece = I->get();
+ AddFID(FM, Fids, *SM, piece->getLocation().asLocation());
+ ArrayRef<SourceRange> Ranges = piece->getRanges();
+ for (ArrayRef<SourceRange>::iterator I = Ranges.begin(),
+ E = Ranges.end(); I != E; ++I) {
+ AddFID(FM, Fids, *SM, I->getBegin());
+ AddFID(FM, Fids, *SM, I->getEnd());
+ }
+
+ if (const PathDiagnosticCallPiece *call =
+ dyn_cast<PathDiagnosticCallPiece>(piece)) {
+ IntrusiveRefCntPtr<PathDiagnosticEventPiece>
+ callEnterWithin = call->getCallEnterWithinCallerEvent();
+ if (callEnterWithin)
+ AddFID(FM, Fids, *SM, callEnterWithin->getLocation().asLocation());
+
+ WorkList.push_back(&call->path);
+ }
+ else if (const PathDiagnosticMacroPiece *macro =
+ dyn_cast<PathDiagnosticMacroPiece>(piece)) {
+ WorkList.push_back(&macro->subPieces);
+ }
+ }
+ }
+ }
+
+ // Open the file.
+ std::error_code EC;
+ llvm::raw_fd_ostream o(OutputFile, EC, llvm::sys::fs::F_Text);
+ if (EC) {
+ llvm::errs() << "warning: could not create file: " << EC.message() << '\n';
+ return;
+ }
+
+ EmitPlistHeader(o);
+
+ // Write the root object: a <dict> containing...
+ // - "clang_version", the string representation of clang version
+ // - "files", an <array> mapping from FIDs to file names
+ // - "diagnostics", an <array> containing the path diagnostics
+ o << "<dict>\n" <<
+ " <key>clang_version</key>\n";
+ EmitString(o, getClangFullVersion()) << '\n';
+ o << " <key>files</key>\n"
+ " <array>\n";
+
+ for (FileID FID : Fids)
+ EmitString(o << " ", SM->getFileEntryForID(FID)->getName()) << '\n';
+
+ o << " </array>\n"
+ " <key>diagnostics</key>\n"
+ " <array>\n";
+
+ for (std::vector<const PathDiagnostic*>::iterator DI=Diags.begin(),
+ DE = Diags.end(); DI!=DE; ++DI) {
+
+ o << " <dict>\n"
+ " <key>path</key>\n";
+
+ const PathDiagnostic *D = *DI;
+
+ o << " <array>\n";
+
+ for (PathPieces::const_iterator I = D->path.begin(), E = D->path.end();
+ I != E; ++I)
+ ReportDiag(o, **I, FM, *SM, LangOpts);
+
+ o << " </array>\n";
+
+ // Output the bug type and bug category.
+ o << " <key>description</key>";
+ EmitString(o, D->getShortDescription()) << '\n';
+ o << " <key>category</key>";
+ EmitString(o, D->getCategory()) << '\n';
+ o << " <key>type</key>";
+ EmitString(o, D->getBugType()) << '\n';
+ o << " <key>check_name</key>";
+ EmitString(o, D->getCheckName()) << '\n';
+
+ o << " <!-- This hash is experimental and going to change! -->\n";
+ o << " <key>issue_hash_content_of_line_in_context</key>";
+ PathDiagnosticLocation UPDLoc = D->getUniqueingLoc();
+ FullSourceLoc L(SM->getExpansionLoc(UPDLoc.isValid()
+ ? UPDLoc.asLocation()
+ : D->getLocation().asLocation()),
+ *SM);
+ const Decl *DeclWithIssue = D->getDeclWithIssue();
+ EmitString(o, GetIssueHash(*SM, L, D->getCheckName(), D->getBugType(),
+ DeclWithIssue, LangOpts))
+ << '\n';
+
+ // Output information about the semantic context where
+ // the issue occurred.
+ if (const Decl *DeclWithIssue = D->getDeclWithIssue()) {
+ // FIXME: handle blocks, which have no name.
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(DeclWithIssue)) {
+ StringRef declKind;
+ switch (ND->getKind()) {
+ case Decl::CXXRecord:
+ declKind = "C++ class";
+ break;
+ case Decl::CXXMethod:
+ declKind = "C++ method";
+ break;
+ case Decl::ObjCMethod:
+ declKind = "Objective-C method";
+ break;
+ case Decl::Function:
+ declKind = "function";
+ break;
+ default:
+ break;
+ }
+ if (!declKind.empty()) {
+ const std::string &declName = ND->getDeclName().getAsString();
+ o << " <key>issue_context_kind</key>";
+ EmitString(o, declKind) << '\n';
+ o << " <key>issue_context</key>";
+ EmitString(o, declName) << '\n';
+ }
+
+ // Output the bug hash for issue unique-ing. Currently, it's just an
+ // offset from the beginning of the function.
+ if (const Stmt *Body = DeclWithIssue->getBody()) {
+
+ // If the bug uniqueing location exists, use it for the hash.
+ // For example, this ensures that two leaks reported on the same line
+ // will have different issue_hashes and that the hash will identify
+ // the leak location even after code is added between the allocation
+ // site and the end of scope (leak report location).
+ if (UPDLoc.isValid()) {
+ FullSourceLoc UFunL(SM->getExpansionLoc(
+ D->getUniqueingDecl()->getBody()->getLocStart()), *SM);
+ o << " <key>issue_hash_function_offset</key><string>"
+ << L.getExpansionLineNumber() - UFunL.getExpansionLineNumber()
+ << "</string>\n";
+
+ // Otherwise, use the location on which the bug is reported.
+ } else {
+ FullSourceLoc FunL(SM->getExpansionLoc(Body->getLocStart()), *SM);
+ o << " <key>issue_hash_function_offset</key><string>"
+ << L.getExpansionLineNumber() - FunL.getExpansionLineNumber()
+ << "</string>\n";
+ }
+
+ }
+ }
+ }
+
+ // Output the location of the bug.
+ o << " <key>location</key>\n";
+ EmitLocation(o, *SM, D->getLocation().asLocation(), FM, 2);
+
+ // Output the diagnostic to the sub-diagnostic client, if any.
+ if (!filesMade->empty()) {
+ StringRef lastName;
+ PDFileEntry::ConsumerFiles *files = filesMade->getFiles(*D);
+ if (files) {
+ for (PDFileEntry::ConsumerFiles::const_iterator CI = files->begin(),
+ CE = files->end(); CI != CE; ++CI) {
+ StringRef newName = CI->first;
+ if (newName != lastName) {
+ if (!lastName.empty()) {
+ o << " </array>\n";
+ }
+ lastName = newName;
+ o << " <key>" << lastName << "_files</key>\n";
+ o << " <array>\n";
+ }
+ o << " <string>" << CI->second << "</string>\n";
+ }
+ o << " </array>\n";
+ }
+ }
+
+ // Close up the entry.
+ o << " </dict>\n";
+ }
+
+ o << " </array>\n";
+
+ // Finish.
+ o << "</dict>\n</plist>";
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PrettyStackTraceLocationContext.h b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PrettyStackTraceLocationContext.h
new file mode 100644
index 0000000..e7cc23c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PrettyStackTraceLocationContext.h
@@ -0,0 +1,45 @@
+//==- PrettyStackTraceLocationContext.h - show analysis backtrace --*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_STATICANALYZER_CORE_PRETTYSTACKTRACELOCATIONCONTEXT_H
+#define LLVM_CLANG_LIB_STATICANALYZER_CORE_PRETTYSTACKTRACELOCATIONCONTEXT_H
+
+#include "clang/Analysis/AnalysisContext.h"
+
+namespace clang {
+namespace ento {
+
+/// While alive, includes the current analysis stack in a crash trace.
+///
+/// Example:
+/// \code
+/// 0. Program arguments: ...
+/// 1. <eof> parser at end of file
+/// 2. While analyzing stack:
+/// #0 void inlined()
+/// #1 void test()
+/// 3. crash-trace.c:6:3: Error evaluating statement
+/// \endcode
+class PrettyStackTraceLocationContext : public llvm::PrettyStackTraceEntry {
+ const LocationContext *LCtx;
+public:
+ PrettyStackTraceLocationContext(const LocationContext *LC) : LCtx(LC) {
+ assert(LCtx);
+ }
+
+ void print(raw_ostream &OS) const override {
+ OS << "While analyzing stack: \n";
+ LCtx->dumpStack(OS, "\t");
+ }
+};
+
+} // end ento namespace
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ProgramState.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
new file mode 100644
index 0000000..4f9ad9e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
@@ -0,0 +1,754 @@
+//= ProgramState.cpp - Path-Sensitive "State" for tracking values --*- C++ -*--=
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements ProgramState and ProgramStateManager.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/TaintManager.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace clang { namespace ento {
+/// Increments the number of times this state is referenced.
+
+void ProgramStateRetain(const ProgramState *state) {
+ ++const_cast<ProgramState*>(state)->refCount;
+}
+
+/// Decrement the number of times this state is referenced.
+void ProgramStateRelease(const ProgramState *state) {
+ assert(state->refCount > 0);
+ ProgramState *s = const_cast<ProgramState*>(state);
+ if (--s->refCount == 0) {
+ ProgramStateManager &Mgr = s->getStateManager();
+ Mgr.StateSet.RemoveNode(s);
+ s->~ProgramState();
+ Mgr.freeStates.push_back(s);
+ }
+}
+}}
+
+ProgramState::ProgramState(ProgramStateManager *mgr, const Environment& env,
+ StoreRef st, GenericDataMap gdm)
+ : stateMgr(mgr),
+ Env(env),
+ store(st.getStore()),
+ GDM(gdm),
+ refCount(0) {
+ stateMgr->getStoreManager().incrementReferenceCount(store);
+}
+
+ProgramState::ProgramState(const ProgramState &RHS)
+ : llvm::FoldingSetNode(),
+ stateMgr(RHS.stateMgr),
+ Env(RHS.Env),
+ store(RHS.store),
+ GDM(RHS.GDM),
+ refCount(0) {
+ stateMgr->getStoreManager().incrementReferenceCount(store);
+}
+
+ProgramState::~ProgramState() {
+ if (store)
+ stateMgr->getStoreManager().decrementReferenceCount(store);
+}
+
+ProgramStateManager::ProgramStateManager(ASTContext &Ctx,
+ StoreManagerCreator CreateSMgr,
+ ConstraintManagerCreator CreateCMgr,
+ llvm::BumpPtrAllocator &alloc,
+ SubEngine *SubEng)
+ : Eng(SubEng), EnvMgr(alloc), GDMFactory(alloc),
+ svalBuilder(createSimpleSValBuilder(alloc, Ctx, *this)),
+ CallEventMgr(new CallEventManager(alloc)), Alloc(alloc) {
+ StoreMgr = (*CreateSMgr)(*this);
+ ConstraintMgr = (*CreateCMgr)(*this, SubEng);
+}
+
+
+ProgramStateManager::~ProgramStateManager() {
+ for (GDMContextsTy::iterator I=GDMContexts.begin(), E=GDMContexts.end();
+ I!=E; ++I)
+ I->second.second(I->second.first);
+}
+
+ProgramStateRef
+ProgramStateManager::removeDeadBindings(ProgramStateRef state,
+ const StackFrameContext *LCtx,
+ SymbolReaper& SymReaper) {
+
+ // This code essentially performs a "mark-and-sweep" of the VariableBindings.
+ // The roots are any Block-level exprs and Decls that our liveness algorithm
+ // tells us are live. We then see what Decls they may reference, and keep
+ // those around. This code more than likely can be made faster, and the
+ // frequency of which this method is called should be experimented with
+ // for optimum performance.
+ ProgramState NewState = *state;
+
+ NewState.Env = EnvMgr.removeDeadBindings(NewState.Env, SymReaper, state);
+
+ // Clean up the store.
+ StoreRef newStore = StoreMgr->removeDeadBindings(NewState.getStore(), LCtx,
+ SymReaper);
+ NewState.setStore(newStore);
+ SymReaper.setReapedStore(newStore);
+
+ ProgramStateRef Result = getPersistentState(NewState);
+ return ConstraintMgr->removeDeadBindings(Result, SymReaper);
+}
+
+ProgramStateRef ProgramState::bindLoc(Loc LV, SVal V, bool notifyChanges) const {
+ ProgramStateManager &Mgr = getStateManager();
+ ProgramStateRef newState = makeWithStore(Mgr.StoreMgr->Bind(getStore(),
+ LV, V));
+ const MemRegion *MR = LV.getAsRegion();
+ if (MR && Mgr.getOwningEngine() && notifyChanges)
+ return Mgr.getOwningEngine()->processRegionChange(newState, MR);
+
+ return newState;
+}
+
+ProgramStateRef ProgramState::bindDefault(SVal loc, SVal V) const {
+ ProgramStateManager &Mgr = getStateManager();
+ const MemRegion *R = loc.castAs<loc::MemRegionVal>().getRegion();
+ const StoreRef &newStore = Mgr.StoreMgr->BindDefault(getStore(), R, V);
+ ProgramStateRef new_state = makeWithStore(newStore);
+ return Mgr.getOwningEngine() ?
+ Mgr.getOwningEngine()->processRegionChange(new_state, R) :
+ new_state;
+}
+
+typedef ArrayRef<const MemRegion *> RegionList;
+typedef ArrayRef<SVal> ValueList;
+
+ProgramStateRef
+ProgramState::invalidateRegions(RegionList Regions,
+ const Expr *E, unsigned Count,
+ const LocationContext *LCtx,
+ bool CausedByPointerEscape,
+ InvalidatedSymbols *IS,
+ const CallEvent *Call,
+ RegionAndSymbolInvalidationTraits *ITraits) const {
+ SmallVector<SVal, 8> Values;
+ for (RegionList::const_iterator I = Regions.begin(),
+ End = Regions.end(); I != End; ++I)
+ Values.push_back(loc::MemRegionVal(*I));
+
+ return invalidateRegionsImpl(Values, E, Count, LCtx, CausedByPointerEscape,
+ IS, ITraits, Call);
+}
+
+ProgramStateRef
+ProgramState::invalidateRegions(ValueList Values,
+ const Expr *E, unsigned Count,
+ const LocationContext *LCtx,
+ bool CausedByPointerEscape,
+ InvalidatedSymbols *IS,
+ const CallEvent *Call,
+ RegionAndSymbolInvalidationTraits *ITraits) const {
+
+ return invalidateRegionsImpl(Values, E, Count, LCtx, CausedByPointerEscape,
+ IS, ITraits, Call);
+}
+
+ProgramStateRef
+ProgramState::invalidateRegionsImpl(ValueList Values,
+ const Expr *E, unsigned Count,
+ const LocationContext *LCtx,
+ bool CausedByPointerEscape,
+ InvalidatedSymbols *IS,
+ RegionAndSymbolInvalidationTraits *ITraits,
+ const CallEvent *Call) const {
+ ProgramStateManager &Mgr = getStateManager();
+ SubEngine* Eng = Mgr.getOwningEngine();
+
+ InvalidatedSymbols Invalidated;
+ if (!IS)
+ IS = &Invalidated;
+
+ RegionAndSymbolInvalidationTraits ITraitsLocal;
+ if (!ITraits)
+ ITraits = &ITraitsLocal;
+
+ if (Eng) {
+ StoreManager::InvalidatedRegions TopLevelInvalidated;
+ StoreManager::InvalidatedRegions Invalidated;
+ const StoreRef &newStore
+ = Mgr.StoreMgr->invalidateRegions(getStore(), Values, E, Count, LCtx, Call,
+ *IS, *ITraits, &TopLevelInvalidated,
+ &Invalidated);
+
+ ProgramStateRef newState = makeWithStore(newStore);
+
+ if (CausedByPointerEscape) {
+ newState = Eng->notifyCheckersOfPointerEscape(newState, IS,
+ TopLevelInvalidated,
+ Invalidated, Call,
+ *ITraits);
+ }
+
+ return Eng->processRegionChanges(newState, IS, TopLevelInvalidated,
+ Invalidated, Call);
+ }
+
+ const StoreRef &newStore =
+ Mgr.StoreMgr->invalidateRegions(getStore(), Values, E, Count, LCtx, Call,
+ *IS, *ITraits, nullptr, nullptr);
+ return makeWithStore(newStore);
+}
+
+ProgramStateRef ProgramState::killBinding(Loc LV) const {
+ assert(!LV.getAs<loc::MemRegionVal>() && "Use invalidateRegion instead.");
+
+ Store OldStore = getStore();
+ const StoreRef &newStore =
+ getStateManager().StoreMgr->killBinding(OldStore, LV);
+
+ if (newStore.getStore() == OldStore)
+ return this;
+
+ return makeWithStore(newStore);
+}
+
+ProgramStateRef
+ProgramState::enterStackFrame(const CallEvent &Call,
+ const StackFrameContext *CalleeCtx) const {
+ const StoreRef &NewStore =
+ getStateManager().StoreMgr->enterStackFrame(getStore(), Call, CalleeCtx);
+ return makeWithStore(NewStore);
+}
+
+SVal ProgramState::getSValAsScalarOrLoc(const MemRegion *R) const {
+ // We only want to do fetches from regions that we can actually bind
+ // values. For example, SymbolicRegions of type 'id<...>' cannot
+ // have direct bindings (but their can be bindings on their subregions).
+ if (!R->isBoundable())
+ return UnknownVal();
+
+ if (const TypedValueRegion *TR = dyn_cast<TypedValueRegion>(R)) {
+ QualType T = TR->getValueType();
+ if (Loc::isLocType(T) || T->isIntegralOrEnumerationType())
+ return getSVal(R);
+ }
+
+ return UnknownVal();
+}
+
+SVal ProgramState::getSVal(Loc location, QualType T) const {
+ SVal V = getRawSVal(cast<Loc>(location), T);
+
+ // If 'V' is a symbolic value that is *perfectly* constrained to
+ // be a constant value, use that value instead to lessen the burden
+ // on later analysis stages (so we have less symbolic values to reason
+ // about).
+ if (!T.isNull()) {
+ if (SymbolRef sym = V.getAsSymbol()) {
+ if (const llvm::APSInt *Int = getStateManager()
+ .getConstraintManager()
+ .getSymVal(this, sym)) {
+ // FIXME: Because we don't correctly model (yet) sign-extension
+ // and truncation of symbolic values, we need to convert
+ // the integer value to the correct signedness and bitwidth.
+ //
+ // This shows up in the following:
+ //
+ // char foo();
+ // unsigned x = foo();
+ // if (x == 54)
+ // ...
+ //
+ // The symbolic value stored to 'x' is actually the conjured
+ // symbol for the call to foo(); the type of that symbol is 'char',
+ // not unsigned.
+ const llvm::APSInt &NewV = getBasicVals().Convert(T, *Int);
+
+ if (V.getAs<Loc>())
+ return loc::ConcreteInt(NewV);
+ else
+ return nonloc::ConcreteInt(NewV);
+ }
+ }
+ }
+
+ return V;
+}
+
+ProgramStateRef ProgramState::BindExpr(const Stmt *S,
+ const LocationContext *LCtx,
+ SVal V, bool Invalidate) const{
+ Environment NewEnv =
+ getStateManager().EnvMgr.bindExpr(Env, EnvironmentEntry(S, LCtx), V,
+ Invalidate);
+ if (NewEnv == Env)
+ return this;
+
+ ProgramState NewSt = *this;
+ NewSt.Env = NewEnv;
+ return getStateManager().getPersistentState(NewSt);
+}
+
+ProgramStateRef ProgramState::assumeInBound(DefinedOrUnknownSVal Idx,
+ DefinedOrUnknownSVal UpperBound,
+ bool Assumption,
+ QualType indexTy) const {
+ if (Idx.isUnknown() || UpperBound.isUnknown())
+ return this;
+
+ // Build an expression for 0 <= Idx < UpperBound.
+ // This is the same as Idx + MIN < UpperBound + MIN, if overflow is allowed.
+ // FIXME: This should probably be part of SValBuilder.
+ ProgramStateManager &SM = getStateManager();
+ SValBuilder &svalBuilder = SM.getSValBuilder();
+ ASTContext &Ctx = svalBuilder.getContext();
+
+ // Get the offset: the minimum value of the array index type.
+ BasicValueFactory &BVF = svalBuilder.getBasicValueFactory();
+ // FIXME: This should be using ValueManager::ArrayindexTy...somehow.
+ if (indexTy.isNull())
+ indexTy = Ctx.IntTy;
+ nonloc::ConcreteInt Min(BVF.getMinValue(indexTy));
+
+ // Adjust the index.
+ SVal newIdx = svalBuilder.evalBinOpNN(this, BO_Add,
+ Idx.castAs<NonLoc>(), Min, indexTy);
+ if (newIdx.isUnknownOrUndef())
+ return this;
+
+ // Adjust the upper bound.
+ SVal newBound =
+ svalBuilder.evalBinOpNN(this, BO_Add, UpperBound.castAs<NonLoc>(),
+ Min, indexTy);
+
+ if (newBound.isUnknownOrUndef())
+ return this;
+
+ // Build the actual comparison.
+ SVal inBound = svalBuilder.evalBinOpNN(this, BO_LT, newIdx.castAs<NonLoc>(),
+ newBound.castAs<NonLoc>(), Ctx.IntTy);
+ if (inBound.isUnknownOrUndef())
+ return this;
+
+ // Finally, let the constraint manager take care of it.
+ ConstraintManager &CM = SM.getConstraintManager();
+ return CM.assume(this, inBound.castAs<DefinedSVal>(), Assumption);
+}
+
+ConditionTruthVal ProgramState::isNull(SVal V) const {
+ if (V.isZeroConstant())
+ return true;
+
+ if (V.isConstant())
+ return false;
+
+ SymbolRef Sym = V.getAsSymbol(/* IncludeBaseRegion */ true);
+ if (!Sym)
+ return ConditionTruthVal();
+
+ return getStateManager().ConstraintMgr->isNull(this, Sym);
+}
+
+ProgramStateRef ProgramStateManager::getInitialState(const LocationContext *InitLoc) {
+ ProgramState State(this,
+ EnvMgr.getInitialEnvironment(),
+ StoreMgr->getInitialStore(InitLoc),
+ GDMFactory.getEmptyMap());
+
+ return getPersistentState(State);
+}
+
+ProgramStateRef ProgramStateManager::getPersistentStateWithGDM(
+ ProgramStateRef FromState,
+ ProgramStateRef GDMState) {
+ ProgramState NewState(*FromState);
+ NewState.GDM = GDMState->GDM;
+ return getPersistentState(NewState);
+}
+
+ProgramStateRef ProgramStateManager::getPersistentState(ProgramState &State) {
+
+ llvm::FoldingSetNodeID ID;
+ State.Profile(ID);
+ void *InsertPos;
+
+ if (ProgramState *I = StateSet.FindNodeOrInsertPos(ID, InsertPos))
+ return I;
+
+ ProgramState *newState = nullptr;
+ if (!freeStates.empty()) {
+ newState = freeStates.back();
+ freeStates.pop_back();
+ }
+ else {
+ newState = (ProgramState*) Alloc.Allocate<ProgramState>();
+ }
+ new (newState) ProgramState(State);
+ StateSet.InsertNode(newState, InsertPos);
+ return newState;
+}
+
+ProgramStateRef ProgramState::makeWithStore(const StoreRef &store) const {
+ ProgramState NewSt(*this);
+ NewSt.setStore(store);
+ return getStateManager().getPersistentState(NewSt);
+}
+
+void ProgramState::setStore(const StoreRef &newStore) {
+ Store newStoreStore = newStore.getStore();
+ if (newStoreStore)
+ stateMgr->getStoreManager().incrementReferenceCount(newStoreStore);
+ if (store)
+ stateMgr->getStoreManager().decrementReferenceCount(store);
+ store = newStoreStore;
+}
+
+//===----------------------------------------------------------------------===//
+// State pretty-printing.
+//===----------------------------------------------------------------------===//
+
+void ProgramState::print(raw_ostream &Out,
+ const char *NL, const char *Sep) const {
+ // Print the store.
+ ProgramStateManager &Mgr = getStateManager();
+ Mgr.getStoreManager().print(getStore(), Out, NL, Sep);
+
+ // Print out the environment.
+ Env.print(Out, NL, Sep);
+
+ // Print out the constraints.
+ Mgr.getConstraintManager().print(this, Out, NL, Sep);
+
+ // Print checker-specific data.
+ Mgr.getOwningEngine()->printState(Out, this, NL, Sep);
+}
+
+void ProgramState::printDOT(raw_ostream &Out) const {
+ print(Out, "\\l", "\\|");
+}
+
+void ProgramState::dump() const {
+ print(llvm::errs());
+}
+
+void ProgramState::printTaint(raw_ostream &Out,
+ const char *NL, const char *Sep) const {
+ TaintMapImpl TM = get<TaintMap>();
+
+ if (!TM.isEmpty())
+ Out <<"Tainted Symbols:" << NL;
+
+ for (TaintMapImpl::iterator I = TM.begin(), E = TM.end(); I != E; ++I) {
+ Out << I->first << " : " << I->second << NL;
+ }
+}
+
+void ProgramState::dumpTaint() const {
+ printTaint(llvm::errs());
+}
+
+//===----------------------------------------------------------------------===//
+// Generic Data Map.
+//===----------------------------------------------------------------------===//
+
+void *const* ProgramState::FindGDM(void *K) const {
+ return GDM.lookup(K);
+}
+
+void*
+ProgramStateManager::FindGDMContext(void *K,
+ void *(*CreateContext)(llvm::BumpPtrAllocator&),
+ void (*DeleteContext)(void*)) {
+
+ std::pair<void*, void (*)(void*)>& p = GDMContexts[K];
+ if (!p.first) {
+ p.first = CreateContext(Alloc);
+ p.second = DeleteContext;
+ }
+
+ return p.first;
+}
+
+ProgramStateRef ProgramStateManager::addGDM(ProgramStateRef St, void *Key, void *Data){
+ ProgramState::GenericDataMap M1 = St->getGDM();
+ ProgramState::GenericDataMap M2 = GDMFactory.add(M1, Key, Data);
+
+ if (M1 == M2)
+ return St;
+
+ ProgramState NewSt = *St;
+ NewSt.GDM = M2;
+ return getPersistentState(NewSt);
+}
+
+ProgramStateRef ProgramStateManager::removeGDM(ProgramStateRef state, void *Key) {
+ ProgramState::GenericDataMap OldM = state->getGDM();
+ ProgramState::GenericDataMap NewM = GDMFactory.remove(OldM, Key);
+
+ if (NewM == OldM)
+ return state;
+
+ ProgramState NewState = *state;
+ NewState.GDM = NewM;
+ return getPersistentState(NewState);
+}
+
+bool ScanReachableSymbols::scan(nonloc::LazyCompoundVal val) {
+ bool wasVisited = !visited.insert(val.getCVData()).second;
+ if (wasVisited)
+ return true;
+
+ StoreManager &StoreMgr = state->getStateManager().getStoreManager();
+ // FIXME: We don't really want to use getBaseRegion() here because pointer
+ // arithmetic doesn't apply, but scanReachableSymbols only accepts base
+ // regions right now.
+ const MemRegion *R = val.getRegion()->getBaseRegion();
+ return StoreMgr.scanReachableSymbols(val.getStore(), R, *this);
+}
+
+bool ScanReachableSymbols::scan(nonloc::CompoundVal val) {
+ for (nonloc::CompoundVal::iterator I=val.begin(), E=val.end(); I!=E; ++I)
+ if (!scan(*I))
+ return false;
+
+ return true;
+}
+
+bool ScanReachableSymbols::scan(const SymExpr *sym) {
+ bool wasVisited = !visited.insert(sym).second;
+ if (wasVisited)
+ return true;
+
+ if (!visitor.VisitSymbol(sym))
+ return false;
+
+ // TODO: should be rewritten using SymExpr::symbol_iterator.
+ switch (sym->getKind()) {
+ case SymExpr::RegionValueKind:
+ case SymExpr::ConjuredKind:
+ case SymExpr::DerivedKind:
+ case SymExpr::ExtentKind:
+ case SymExpr::MetadataKind:
+ break;
+ case SymExpr::CastSymbolKind:
+ return scan(cast<SymbolCast>(sym)->getOperand());
+ case SymExpr::SymIntKind:
+ return scan(cast<SymIntExpr>(sym)->getLHS());
+ case SymExpr::IntSymKind:
+ return scan(cast<IntSymExpr>(sym)->getRHS());
+ case SymExpr::SymSymKind: {
+ const SymSymExpr *x = cast<SymSymExpr>(sym);
+ return scan(x->getLHS()) && scan(x->getRHS());
+ }
+ }
+ return true;
+}
+
+bool ScanReachableSymbols::scan(SVal val) {
+ if (Optional<loc::MemRegionVal> X = val.getAs<loc::MemRegionVal>())
+ return scan(X->getRegion());
+
+ if (Optional<nonloc::LazyCompoundVal> X =
+ val.getAs<nonloc::LazyCompoundVal>())
+ return scan(*X);
+
+ if (Optional<nonloc::LocAsInteger> X = val.getAs<nonloc::LocAsInteger>())
+ return scan(X->getLoc());
+
+ if (SymbolRef Sym = val.getAsSymbol())
+ return scan(Sym);
+
+ if (const SymExpr *Sym = val.getAsSymbolicExpression())
+ return scan(Sym);
+
+ if (Optional<nonloc::CompoundVal> X = val.getAs<nonloc::CompoundVal>())
+ return scan(*X);
+
+ return true;
+}
+
+bool ScanReachableSymbols::scan(const MemRegion *R) {
+ if (isa<MemSpaceRegion>(R))
+ return true;
+
+ bool wasVisited = !visited.insert(R).second;
+ if (wasVisited)
+ return true;
+
+ if (!visitor.VisitMemRegion(R))
+ return false;
+
+ // If this is a symbolic region, visit the symbol for the region.
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R))
+ if (!visitor.VisitSymbol(SR->getSymbol()))
+ return false;
+
+ // If this is a subregion, also visit the parent regions.
+ if (const SubRegion *SR = dyn_cast<SubRegion>(R)) {
+ const MemRegion *Super = SR->getSuperRegion();
+ if (!scan(Super))
+ return false;
+
+ // When we reach the topmost region, scan all symbols in it.
+ if (isa<MemSpaceRegion>(Super)) {
+ StoreManager &StoreMgr = state->getStateManager().getStoreManager();
+ if (!StoreMgr.scanReachableSymbols(state->getStore(), SR, *this))
+ return false;
+ }
+ }
+
+ // Regions captured by a block are also implicitly reachable.
+ if (const BlockDataRegion *BDR = dyn_cast<BlockDataRegion>(R)) {
+ BlockDataRegion::referenced_vars_iterator I = BDR->referenced_vars_begin(),
+ E = BDR->referenced_vars_end();
+ for ( ; I != E; ++I) {
+ if (!scan(I.getCapturedRegion()))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool ProgramState::scanReachableSymbols(SVal val, SymbolVisitor& visitor) const {
+ ScanReachableSymbols S(this, visitor);
+ return S.scan(val);
+}
+
+bool ProgramState::scanReachableSymbols(const SVal *I, const SVal *E,
+ SymbolVisitor &visitor) const {
+ ScanReachableSymbols S(this, visitor);
+ for ( ; I != E; ++I) {
+ if (!S.scan(*I))
+ return false;
+ }
+ return true;
+}
+
+bool ProgramState::scanReachableSymbols(const MemRegion * const *I,
+ const MemRegion * const *E,
+ SymbolVisitor &visitor) const {
+ ScanReachableSymbols S(this, visitor);
+ for ( ; I != E; ++I) {
+ if (!S.scan(*I))
+ return false;
+ }
+ return true;
+}
+
+ProgramStateRef ProgramState::addTaint(const Stmt *S,
+ const LocationContext *LCtx,
+ TaintTagType Kind) const {
+ if (const Expr *E = dyn_cast_or_null<Expr>(S))
+ S = E->IgnoreParens();
+
+ SymbolRef Sym = getSVal(S, LCtx).getAsSymbol();
+ if (Sym)
+ return addTaint(Sym, Kind);
+
+ const MemRegion *R = getSVal(S, LCtx).getAsRegion();
+ addTaint(R, Kind);
+
+ // Cannot add taint, so just return the state.
+ return this;
+}
+
+ProgramStateRef ProgramState::addTaint(const MemRegion *R,
+ TaintTagType Kind) const {
+ if (const SymbolicRegion *SR = dyn_cast_or_null<SymbolicRegion>(R))
+ return addTaint(SR->getSymbol(), Kind);
+ return this;
+}
+
+ProgramStateRef ProgramState::addTaint(SymbolRef Sym,
+ TaintTagType Kind) const {
+ // If this is a symbol cast, remove the cast before adding the taint. Taint
+ // is cast agnostic.
+ while (const SymbolCast *SC = dyn_cast<SymbolCast>(Sym))
+ Sym = SC->getOperand();
+
+ ProgramStateRef NewState = set<TaintMap>(Sym, Kind);
+ assert(NewState);
+ return NewState;
+}
+
+bool ProgramState::isTainted(const Stmt *S, const LocationContext *LCtx,
+ TaintTagType Kind) const {
+ if (const Expr *E = dyn_cast_or_null<Expr>(S))
+ S = E->IgnoreParens();
+
+ SVal val = getSVal(S, LCtx);
+ return isTainted(val, Kind);
+}
+
+bool ProgramState::isTainted(SVal V, TaintTagType Kind) const {
+ if (const SymExpr *Sym = V.getAsSymExpr())
+ return isTainted(Sym, Kind);
+ if (const MemRegion *Reg = V.getAsRegion())
+ return isTainted(Reg, Kind);
+ return false;
+}
+
+bool ProgramState::isTainted(const MemRegion *Reg, TaintTagType K) const {
+ if (!Reg)
+ return false;
+
+ // Element region (array element) is tainted if either the base or the offset
+ // are tainted.
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(Reg))
+ return isTainted(ER->getSuperRegion(), K) || isTainted(ER->getIndex(), K);
+
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(Reg))
+ return isTainted(SR->getSymbol(), K);
+
+ if (const SubRegion *ER = dyn_cast<SubRegion>(Reg))
+ return isTainted(ER->getSuperRegion(), K);
+
+ return false;
+}
+
+bool ProgramState::isTainted(SymbolRef Sym, TaintTagType Kind) const {
+ if (!Sym)
+ return false;
+
+ // Traverse all the symbols this symbol depends on to see if any are tainted.
+ bool Tainted = false;
+ for (SymExpr::symbol_iterator SI = Sym->symbol_begin(), SE =Sym->symbol_end();
+ SI != SE; ++SI) {
+ if (!isa<SymbolData>(*SI))
+ continue;
+
+ const TaintTagType *Tag = get<TaintMap>(*SI);
+ Tainted = (Tag && *Tag == Kind);
+
+ // If this is a SymbolDerived with a tainted parent, it's also tainted.
+ if (const SymbolDerived *SD = dyn_cast<SymbolDerived>(*SI))
+ Tainted = Tainted || isTainted(SD->getParentSymbol(), Kind);
+
+ // If memory region is tainted, data is also tainted.
+ if (const SymbolRegionValue *SRV = dyn_cast<SymbolRegionValue>(*SI))
+ Tainted = Tainted || isTainted(SRV->getRegion(), Kind);
+
+ // If If this is a SymbolCast from a tainted value, it's also tainted.
+ if (const SymbolCast *SC = dyn_cast<SymbolCast>(*SI))
+ Tainted = Tainted || isTainted(SC->getOperand(), Kind);
+
+ if (Tainted)
+ return true;
+ }
+
+ return Tainted;
+}
+
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
new file mode 100644
index 0000000..0a2b2e6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
@@ -0,0 +1,697 @@
+//== RangeConstraintManager.cpp - Manage range constraints.------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines RangeConstraintManager, a class that tracks simple
+// equality and inequality constraints on symbolic values of ProgramState.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SimpleConstraintManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/ImmutableSet.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+/// A Range represents the closed range [from, to]. The caller must
+/// guarantee that from <= to. Note that Range is immutable, so as not
+/// to subvert RangeSet's immutability.
+namespace {
+class Range : public std::pair<const llvm::APSInt*,
+ const llvm::APSInt*> {
+public:
+ Range(const llvm::APSInt &from, const llvm::APSInt &to)
+ : std::pair<const llvm::APSInt*, const llvm::APSInt*>(&from, &to) {
+ assert(from <= to);
+ }
+ bool Includes(const llvm::APSInt &v) const {
+ return *first <= v && v <= *second;
+ }
+ const llvm::APSInt &From() const {
+ return *first;
+ }
+ const llvm::APSInt &To() const {
+ return *second;
+ }
+ const llvm::APSInt *getConcreteValue() const {
+ return &From() == &To() ? &From() : nullptr;
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddPointer(&From());
+ ID.AddPointer(&To());
+ }
+};
+
+
+class RangeTrait : public llvm::ImutContainerInfo<Range> {
+public:
+ // When comparing if one Range is less than another, we should compare
+ // the actual APSInt values instead of their pointers. This keeps the order
+ // consistent (instead of comparing by pointer values) and can potentially
+ // be used to speed up some of the operations in RangeSet.
+ static inline bool isLess(key_type_ref lhs, key_type_ref rhs) {
+ return *lhs.first < *rhs.first || (!(*rhs.first < *lhs.first) &&
+ *lhs.second < *rhs.second);
+ }
+};
+
+/// RangeSet contains a set of ranges. If the set is empty, then
+/// there the value of a symbol is overly constrained and there are no
+/// possible values for that symbol.
+class RangeSet {
+ typedef llvm::ImmutableSet<Range, RangeTrait> PrimRangeSet;
+ PrimRangeSet ranges; // no need to make const, since it is an
+ // ImmutableSet - this allows default operator=
+ // to work.
+public:
+ typedef PrimRangeSet::Factory Factory;
+ typedef PrimRangeSet::iterator iterator;
+
+ RangeSet(PrimRangeSet RS) : ranges(RS) {}
+
+ /// Create a new set with all ranges of this set and RS.
+ /// Possible intersections are not checked here.
+ RangeSet addRange(Factory &F, const RangeSet &RS) {
+ PrimRangeSet Ranges(RS.ranges);
+ for (const auto &range : ranges)
+ Ranges = F.add(Ranges, range);
+ return RangeSet(Ranges);
+ }
+
+ iterator begin() const { return ranges.begin(); }
+ iterator end() const { return ranges.end(); }
+
+ bool isEmpty() const { return ranges.isEmpty(); }
+
+ /// Construct a new RangeSet representing '{ [from, to] }'.
+ RangeSet(Factory &F, const llvm::APSInt &from, const llvm::APSInt &to)
+ : ranges(F.add(F.getEmptySet(), Range(from, to))) {}
+
+ /// Profile - Generates a hash profile of this RangeSet for use
+ /// by FoldingSet.
+ void Profile(llvm::FoldingSetNodeID &ID) const { ranges.Profile(ID); }
+
+ /// getConcreteValue - If a symbol is contrained to equal a specific integer
+ /// constant then this method returns that value. Otherwise, it returns
+ /// NULL.
+ const llvm::APSInt* getConcreteValue() const {
+ return ranges.isSingleton() ? ranges.begin()->getConcreteValue() : nullptr;
+ }
+
+private:
+ void IntersectInRange(BasicValueFactory &BV, Factory &F,
+ const llvm::APSInt &Lower,
+ const llvm::APSInt &Upper,
+ PrimRangeSet &newRanges,
+ PrimRangeSet::iterator &i,
+ PrimRangeSet::iterator &e) const {
+ // There are six cases for each range R in the set:
+ // 1. R is entirely before the intersection range.
+ // 2. R is entirely after the intersection range.
+ // 3. R contains the entire intersection range.
+ // 4. R starts before the intersection range and ends in the middle.
+ // 5. R starts in the middle of the intersection range and ends after it.
+ // 6. R is entirely contained in the intersection range.
+ // These correspond to each of the conditions below.
+ for (/* i = begin(), e = end() */; i != e; ++i) {
+ if (i->To() < Lower) {
+ continue;
+ }
+ if (i->From() > Upper) {
+ break;
+ }
+
+ if (i->Includes(Lower)) {
+ if (i->Includes(Upper)) {
+ newRanges = F.add(newRanges, Range(BV.getValue(Lower),
+ BV.getValue(Upper)));
+ break;
+ } else
+ newRanges = F.add(newRanges, Range(BV.getValue(Lower), i->To()));
+ } else {
+ if (i->Includes(Upper)) {
+ newRanges = F.add(newRanges, Range(i->From(), BV.getValue(Upper)));
+ break;
+ } else
+ newRanges = F.add(newRanges, *i);
+ }
+ }
+ }
+
+ const llvm::APSInt &getMinValue() const {
+ assert(!isEmpty());
+ return ranges.begin()->From();
+ }
+
+ bool pin(llvm::APSInt &Lower, llvm::APSInt &Upper) const {
+ // This function has nine cases, the cartesian product of range-testing
+ // both the upper and lower bounds against the symbol's type.
+ // Each case requires a different pinning operation.
+ // The function returns false if the described range is entirely outside
+ // the range of values for the associated symbol.
+ APSIntType Type(getMinValue());
+ APSIntType::RangeTestResultKind LowerTest = Type.testInRange(Lower, true);
+ APSIntType::RangeTestResultKind UpperTest = Type.testInRange(Upper, true);
+
+ switch (LowerTest) {
+ case APSIntType::RTR_Below:
+ switch (UpperTest) {
+ case APSIntType::RTR_Below:
+ // The entire range is outside the symbol's set of possible values.
+ // If this is a conventionally-ordered range, the state is infeasible.
+ if (Lower < Upper)
+ return false;
+
+ // However, if the range wraps around, it spans all possible values.
+ Lower = Type.getMinValue();
+ Upper = Type.getMaxValue();
+ break;
+ case APSIntType::RTR_Within:
+ // The range starts below what's possible but ends within it. Pin.
+ Lower = Type.getMinValue();
+ Type.apply(Upper);
+ break;
+ case APSIntType::RTR_Above:
+ // The range spans all possible values for the symbol. Pin.
+ Lower = Type.getMinValue();
+ Upper = Type.getMaxValue();
+ break;
+ }
+ break;
+ case APSIntType::RTR_Within:
+ switch (UpperTest) {
+ case APSIntType::RTR_Below:
+ // The range wraps around, but all lower values are not possible.
+ Type.apply(Lower);
+ Upper = Type.getMaxValue();
+ break;
+ case APSIntType::RTR_Within:
+ // The range may or may not wrap around, but both limits are valid.
+ Type.apply(Lower);
+ Type.apply(Upper);
+ break;
+ case APSIntType::RTR_Above:
+ // The range starts within what's possible but ends above it. Pin.
+ Type.apply(Lower);
+ Upper = Type.getMaxValue();
+ break;
+ }
+ break;
+ case APSIntType::RTR_Above:
+ switch (UpperTest) {
+ case APSIntType::RTR_Below:
+ // The range wraps but is outside the symbol's set of possible values.
+ return false;
+ case APSIntType::RTR_Within:
+ // The range starts above what's possible but ends within it (wrap).
+ Lower = Type.getMinValue();
+ Type.apply(Upper);
+ break;
+ case APSIntType::RTR_Above:
+ // The entire range is outside the symbol's set of possible values.
+ // If this is a conventionally-ordered range, the state is infeasible.
+ if (Lower < Upper)
+ return false;
+
+ // However, if the range wraps around, it spans all possible values.
+ Lower = Type.getMinValue();
+ Upper = Type.getMaxValue();
+ break;
+ }
+ break;
+ }
+
+ return true;
+ }
+
+public:
+ // Returns a set containing the values in the receiving set, intersected with
+ // the closed range [Lower, Upper]. Unlike the Range type, this range uses
+ // modular arithmetic, corresponding to the common treatment of C integer
+ // overflow. Thus, if the Lower bound is greater than the Upper bound, the
+ // range is taken to wrap around. This is equivalent to taking the
+ // intersection with the two ranges [Min, Upper] and [Lower, Max],
+ // or, alternatively, /removing/ all integers between Upper and Lower.
+ RangeSet Intersect(BasicValueFactory &BV, Factory &F,
+ llvm::APSInt Lower, llvm::APSInt Upper) const {
+ if (!pin(Lower, Upper))
+ return F.getEmptySet();
+
+ PrimRangeSet newRanges = F.getEmptySet();
+
+ PrimRangeSet::iterator i = begin(), e = end();
+ if (Lower <= Upper)
+ IntersectInRange(BV, F, Lower, Upper, newRanges, i, e);
+ else {
+ // The order of the next two statements is important!
+ // IntersectInRange() does not reset the iteration state for i and e.
+ // Therefore, the lower range most be handled first.
+ IntersectInRange(BV, F, BV.getMinValue(Upper), Upper, newRanges, i, e);
+ IntersectInRange(BV, F, Lower, BV.getMaxValue(Lower), newRanges, i, e);
+ }
+
+ return newRanges;
+ }
+
+ void print(raw_ostream &os) const {
+ bool isFirst = true;
+ os << "{ ";
+ for (iterator i = begin(), e = end(); i != e; ++i) {
+ if (isFirst)
+ isFirst = false;
+ else
+ os << ", ";
+
+ os << '[' << i->From().toString(10) << ", " << i->To().toString(10)
+ << ']';
+ }
+ os << " }";
+ }
+
+ bool operator==(const RangeSet &other) const {
+ return ranges == other.ranges;
+ }
+};
+} // end anonymous namespace
+
+REGISTER_TRAIT_WITH_PROGRAMSTATE(ConstraintRange,
+ CLANG_ENTO_PROGRAMSTATE_MAP(SymbolRef,
+ RangeSet))
+
+namespace {
+class RangeConstraintManager : public SimpleConstraintManager{
+ RangeSet GetRange(ProgramStateRef state, SymbolRef sym);
+public:
+ RangeConstraintManager(SubEngine *subengine, SValBuilder &SVB)
+ : SimpleConstraintManager(subengine, SVB) {}
+
+ ProgramStateRef assumeSymNE(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment) override;
+
+ ProgramStateRef assumeSymEQ(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment) override;
+
+ ProgramStateRef assumeSymLT(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment) override;
+
+ ProgramStateRef assumeSymGT(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment) override;
+
+ ProgramStateRef assumeSymGE(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment) override;
+
+ ProgramStateRef assumeSymLE(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment) override;
+
+ ProgramStateRef assumeSymbolWithinInclusiveRange(
+ ProgramStateRef State, SymbolRef Sym, const llvm::APSInt &From,
+ const llvm::APSInt &To, const llvm::APSInt &Adjustment) override;
+
+ ProgramStateRef assumeSymbolOutOfInclusiveRange(
+ ProgramStateRef State, SymbolRef Sym, const llvm::APSInt &From,
+ const llvm::APSInt &To, const llvm::APSInt &Adjustment) override;
+
+ const llvm::APSInt* getSymVal(ProgramStateRef St,
+ SymbolRef sym) const override;
+ ConditionTruthVal checkNull(ProgramStateRef State, SymbolRef Sym) override;
+
+ ProgramStateRef removeDeadBindings(ProgramStateRef St,
+ SymbolReaper& SymReaper) override;
+
+ void print(ProgramStateRef St, raw_ostream &Out,
+ const char* nl, const char *sep) override;
+
+private:
+ RangeSet::Factory F;
+ RangeSet getSymLTRange(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment);
+ RangeSet getSymGTRange(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment);
+ RangeSet getSymLERange(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment);
+ RangeSet getSymLERange(const RangeSet &RS, const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment);
+ RangeSet getSymGERange(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment);
+};
+
+} // end anonymous namespace
+
+std::unique_ptr<ConstraintManager>
+ento::CreateRangeConstraintManager(ProgramStateManager &StMgr, SubEngine *Eng) {
+ return llvm::make_unique<RangeConstraintManager>(Eng, StMgr.getSValBuilder());
+}
+
+const llvm::APSInt* RangeConstraintManager::getSymVal(ProgramStateRef St,
+ SymbolRef sym) const {
+ const ConstraintRangeTy::data_type *T = St->get<ConstraintRange>(sym);
+ return T ? T->getConcreteValue() : nullptr;
+}
+
+ConditionTruthVal RangeConstraintManager::checkNull(ProgramStateRef State,
+ SymbolRef Sym) {
+ const RangeSet *Ranges = State->get<ConstraintRange>(Sym);
+
+ // If we don't have any information about this symbol, it's underconstrained.
+ if (!Ranges)
+ return ConditionTruthVal();
+
+ // If we have a concrete value, see if it's zero.
+ if (const llvm::APSInt *Value = Ranges->getConcreteValue())
+ return *Value == 0;
+
+ BasicValueFactory &BV = getBasicVals();
+ APSIntType IntType = BV.getAPSIntType(Sym->getType());
+ llvm::APSInt Zero = IntType.getZeroValue();
+
+ // Check if zero is in the set of possible values.
+ if (Ranges->Intersect(BV, F, Zero, Zero).isEmpty())
+ return false;
+
+ // Zero is a possible value, but it is not the /only/ possible value.
+ return ConditionTruthVal();
+}
+
+/// Scan all symbols referenced by the constraints. If the symbol is not alive
+/// as marked in LSymbols, mark it as dead in DSymbols.
+ProgramStateRef
+RangeConstraintManager::removeDeadBindings(ProgramStateRef state,
+ SymbolReaper& SymReaper) {
+
+ ConstraintRangeTy CR = state->get<ConstraintRange>();
+ ConstraintRangeTy::Factory& CRFactory = state->get_context<ConstraintRange>();
+
+ for (ConstraintRangeTy::iterator I = CR.begin(), E = CR.end(); I != E; ++I) {
+ SymbolRef sym = I.getKey();
+ if (SymReaper.maybeDead(sym))
+ CR = CRFactory.remove(CR, sym);
+ }
+
+ return state->set<ConstraintRange>(CR);
+}
+
+RangeSet
+RangeConstraintManager::GetRange(ProgramStateRef state, SymbolRef sym) {
+ if (ConstraintRangeTy::data_type* V = state->get<ConstraintRange>(sym))
+ return *V;
+
+ // Lazily generate a new RangeSet representing all possible values for the
+ // given symbol type.
+ BasicValueFactory &BV = getBasicVals();
+ QualType T = sym->getType();
+
+ RangeSet Result(F, BV.getMinValue(T), BV.getMaxValue(T));
+
+ // Special case: references are known to be non-zero.
+ if (T->isReferenceType()) {
+ APSIntType IntType = BV.getAPSIntType(T);
+ Result = Result.Intersect(BV, F, ++IntType.getZeroValue(),
+ --IntType.getZeroValue());
+ }
+
+ return Result;
+}
+
+//===------------------------------------------------------------------------===
+// assumeSymX methods: public interface for RangeConstraintManager.
+//===------------------------------------------------------------------------===/
+
+// The syntax for ranges below is mathematical, using [x, y] for closed ranges
+// and (x, y) for open ranges. These ranges are modular, corresponding with
+// a common treatment of C integer overflow. This means that these methods
+// do not have to worry about overflow; RangeSet::Intersect can handle such a
+// "wraparound" range.
+// As an example, the range [UINT_MAX-1, 3) contains five values: UINT_MAX-1,
+// UINT_MAX, 0, 1, and 2.
+
+ProgramStateRef
+RangeConstraintManager::assumeSymNE(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
+ // Before we do any real work, see if the value can even show up.
+ APSIntType AdjustmentType(Adjustment);
+ if (AdjustmentType.testInRange(Int, true) != APSIntType::RTR_Within)
+ return St;
+
+ llvm::APSInt Lower = AdjustmentType.convert(Int) - Adjustment;
+ llvm::APSInt Upper = Lower;
+ --Lower;
+ ++Upper;
+
+ // [Int-Adjustment+1, Int-Adjustment-1]
+ // Notice that the lower bound is greater than the upper bound.
+ RangeSet New = GetRange(St, Sym).Intersect(getBasicVals(), F, Upper, Lower);
+ return New.isEmpty() ? nullptr : St->set<ConstraintRange>(Sym, New);
+}
+
+ProgramStateRef
+RangeConstraintManager::assumeSymEQ(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
+ // Before we do any real work, see if the value can even show up.
+ APSIntType AdjustmentType(Adjustment);
+ if (AdjustmentType.testInRange(Int, true) != APSIntType::RTR_Within)
+ return nullptr;
+
+ // [Int-Adjustment, Int-Adjustment]
+ llvm::APSInt AdjInt = AdjustmentType.convert(Int) - Adjustment;
+ RangeSet New = GetRange(St, Sym).Intersect(getBasicVals(), F, AdjInt, AdjInt);
+ return New.isEmpty() ? nullptr : St->set<ConstraintRange>(Sym, New);
+}
+
+RangeSet RangeConstraintManager::getSymLTRange(ProgramStateRef St,
+ SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
+ // Before we do any real work, see if the value can even show up.
+ APSIntType AdjustmentType(Adjustment);
+ switch (AdjustmentType.testInRange(Int, true)) {
+ case APSIntType::RTR_Below:
+ return F.getEmptySet();
+ case APSIntType::RTR_Within:
+ break;
+ case APSIntType::RTR_Above:
+ return GetRange(St, Sym);
+ }
+
+ // Special case for Int == Min. This is always false.
+ llvm::APSInt ComparisonVal = AdjustmentType.convert(Int);
+ llvm::APSInt Min = AdjustmentType.getMinValue();
+ if (ComparisonVal == Min)
+ return F.getEmptySet();
+
+ llvm::APSInt Lower = Min - Adjustment;
+ llvm::APSInt Upper = ComparisonVal - Adjustment;
+ --Upper;
+
+ return GetRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
+}
+
+ProgramStateRef
+RangeConstraintManager::assumeSymLT(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
+ RangeSet New = getSymLTRange(St, Sym, Int, Adjustment);
+ return New.isEmpty() ? nullptr : St->set<ConstraintRange>(Sym, New);
+}
+
+RangeSet
+RangeConstraintManager::getSymGTRange(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
+ // Before we do any real work, see if the value can even show up.
+ APSIntType AdjustmentType(Adjustment);
+ switch (AdjustmentType.testInRange(Int, true)) {
+ case APSIntType::RTR_Below:
+ return GetRange(St, Sym);
+ case APSIntType::RTR_Within:
+ break;
+ case APSIntType::RTR_Above:
+ return F.getEmptySet();
+ }
+
+ // Special case for Int == Max. This is always false.
+ llvm::APSInt ComparisonVal = AdjustmentType.convert(Int);
+ llvm::APSInt Max = AdjustmentType.getMaxValue();
+ if (ComparisonVal == Max)
+ return F.getEmptySet();
+
+ llvm::APSInt Lower = ComparisonVal - Adjustment;
+ llvm::APSInt Upper = Max - Adjustment;
+ ++Lower;
+
+ return GetRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
+}
+
+ProgramStateRef
+RangeConstraintManager::assumeSymGT(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
+ RangeSet New = getSymGTRange(St, Sym, Int, Adjustment);
+ return New.isEmpty() ? nullptr : St->set<ConstraintRange>(Sym, New);
+}
+
+RangeSet
+RangeConstraintManager::getSymGERange(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
+ // Before we do any real work, see if the value can even show up.
+ APSIntType AdjustmentType(Adjustment);
+ switch (AdjustmentType.testInRange(Int, true)) {
+ case APSIntType::RTR_Below:
+ return GetRange(St, Sym);
+ case APSIntType::RTR_Within:
+ break;
+ case APSIntType::RTR_Above:
+ return F.getEmptySet();
+ }
+
+ // Special case for Int == Min. This is always feasible.
+ llvm::APSInt ComparisonVal = AdjustmentType.convert(Int);
+ llvm::APSInt Min = AdjustmentType.getMinValue();
+ if (ComparisonVal == Min)
+ return GetRange(St, Sym);
+
+ llvm::APSInt Max = AdjustmentType.getMaxValue();
+ llvm::APSInt Lower = ComparisonVal - Adjustment;
+ llvm::APSInt Upper = Max - Adjustment;
+
+ return GetRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
+}
+
+ProgramStateRef
+RangeConstraintManager::assumeSymGE(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
+ RangeSet New = getSymGERange(St, Sym, Int, Adjustment);
+ return New.isEmpty() ? nullptr : St->set<ConstraintRange>(Sym, New);
+}
+
+RangeSet
+RangeConstraintManager::getSymLERange(const RangeSet &RS,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
+ // Before we do any real work, see if the value can even show up.
+ APSIntType AdjustmentType(Adjustment);
+ switch (AdjustmentType.testInRange(Int, true)) {
+ case APSIntType::RTR_Below:
+ return F.getEmptySet();
+ case APSIntType::RTR_Within:
+ break;
+ case APSIntType::RTR_Above:
+ return RS;
+ }
+
+ // Special case for Int == Max. This is always feasible.
+ llvm::APSInt ComparisonVal = AdjustmentType.convert(Int);
+ llvm::APSInt Max = AdjustmentType.getMaxValue();
+ if (ComparisonVal == Max)
+ return RS;
+
+ llvm::APSInt Min = AdjustmentType.getMinValue();
+ llvm::APSInt Lower = Min - Adjustment;
+ llvm::APSInt Upper = ComparisonVal - Adjustment;
+
+ return RS.Intersect(getBasicVals(), F, Lower, Upper);
+}
+
+RangeSet
+RangeConstraintManager::getSymLERange(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
+ // Before we do any real work, see if the value can even show up.
+ APSIntType AdjustmentType(Adjustment);
+ switch (AdjustmentType.testInRange(Int, true)) {
+ case APSIntType::RTR_Below:
+ return F.getEmptySet();
+ case APSIntType::RTR_Within:
+ break;
+ case APSIntType::RTR_Above:
+ return GetRange(St, Sym);
+ }
+
+ // Special case for Int == Max. This is always feasible.
+ llvm::APSInt ComparisonVal = AdjustmentType.convert(Int);
+ llvm::APSInt Max = AdjustmentType.getMaxValue();
+ if (ComparisonVal == Max)
+ return GetRange(St, Sym);
+
+ llvm::APSInt Min = AdjustmentType.getMinValue();
+ llvm::APSInt Lower = Min - Adjustment;
+ llvm::APSInt Upper = ComparisonVal - Adjustment;
+
+ return GetRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
+}
+
+ProgramStateRef
+RangeConstraintManager::assumeSymLE(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
+ RangeSet New = getSymLERange(St, Sym, Int, Adjustment);
+ return New.isEmpty() ? nullptr : St->set<ConstraintRange>(Sym, New);
+}
+
+ProgramStateRef
+RangeConstraintManager::assumeSymbolWithinInclusiveRange(
+ ProgramStateRef State, SymbolRef Sym, const llvm::APSInt &From,
+ const llvm::APSInt &To, const llvm::APSInt &Adjustment) {
+ RangeSet New = getSymGERange(State, Sym, From, Adjustment);
+ if (New.isEmpty())
+ return nullptr;
+ New = getSymLERange(New, To, Adjustment);
+ return New.isEmpty() ? nullptr : State->set<ConstraintRange>(Sym, New);
+}
+
+ProgramStateRef
+RangeConstraintManager::assumeSymbolOutOfInclusiveRange(
+ ProgramStateRef State, SymbolRef Sym, const llvm::APSInt &From,
+ const llvm::APSInt &To, const llvm::APSInt &Adjustment) {
+ RangeSet RangeLT = getSymLTRange(State, Sym, From, Adjustment);
+ RangeSet RangeGT = getSymGTRange(State, Sym, To, Adjustment);
+ RangeSet New(RangeLT.addRange(F, RangeGT));
+ return New.isEmpty() ? nullptr : State->set<ConstraintRange>(Sym, New);
+}
+
+//===------------------------------------------------------------------------===
+// Pretty-printing.
+//===------------------------------------------------------------------------===/
+
+void RangeConstraintManager::print(ProgramStateRef St, raw_ostream &Out,
+ const char* nl, const char *sep) {
+
+ ConstraintRangeTy Ranges = St->get<ConstraintRange>();
+
+ if (Ranges.isEmpty()) {
+ Out << nl << sep << "Ranges are empty." << nl;
+ return;
+ }
+
+ Out << nl << sep << "Ranges of symbol values:";
+ for (ConstraintRangeTy::iterator I=Ranges.begin(), E=Ranges.end(); I!=E; ++I){
+ Out << nl << ' ' << I.getKey() << " : ";
+ I.getData().print(Out);
+ }
+ Out << nl;
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
new file mode 100644
index 0000000..a63f6e4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -0,0 +1,2469 @@
+//== RegionStore.cpp - Field-sensitive store model --------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a basic region store model. In this model, we do have field
+// sensitivity. But we assume nothing about the heap shape. So recursive data
+// structures are largely ignored. Basically we do 1-limiting analysis.
+// Parameter pointers are assumed with no aliasing. Pointee objects of
+// parameters are created lazily.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/Attr.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
+#include "llvm/ADT/ImmutableList.h"
+#include "llvm/ADT/ImmutableMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+//===----------------------------------------------------------------------===//
+// Representation of binding keys.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class BindingKey {
+public:
+ enum Kind { Default = 0x0, Direct = 0x1 };
+private:
+ enum { Symbolic = 0x2 };
+
+ llvm::PointerIntPair<const MemRegion *, 2> P;
+ uint64_t Data;
+
+ /// Create a key for a binding to region \p r, which has a symbolic offset
+ /// from region \p Base.
+ explicit BindingKey(const SubRegion *r, const SubRegion *Base, Kind k)
+ : P(r, k | Symbolic), Data(reinterpret_cast<uintptr_t>(Base)) {
+ assert(r && Base && "Must have known regions.");
+ assert(getConcreteOffsetRegion() == Base && "Failed to store base region");
+ }
+
+ /// Create a key for a binding at \p offset from base region \p r.
+ explicit BindingKey(const MemRegion *r, uint64_t offset, Kind k)
+ : P(r, k), Data(offset) {
+ assert(r && "Must have known regions.");
+ assert(getOffset() == offset && "Failed to store offset");
+ assert((r == r->getBaseRegion() || isa<ObjCIvarRegion>(r)) && "Not a base");
+ }
+public:
+
+ bool isDirect() const { return P.getInt() & Direct; }
+ bool hasSymbolicOffset() const { return P.getInt() & Symbolic; }
+
+ const MemRegion *getRegion() const { return P.getPointer(); }
+ uint64_t getOffset() const {
+ assert(!hasSymbolicOffset());
+ return Data;
+ }
+
+ const SubRegion *getConcreteOffsetRegion() const {
+ assert(hasSymbolicOffset());
+ return reinterpret_cast<const SubRegion *>(static_cast<uintptr_t>(Data));
+ }
+
+ const MemRegion *getBaseRegion() const {
+ if (hasSymbolicOffset())
+ return getConcreteOffsetRegion()->getBaseRegion();
+ return getRegion()->getBaseRegion();
+ }
+
+ void Profile(llvm::FoldingSetNodeID& ID) const {
+ ID.AddPointer(P.getOpaqueValue());
+ ID.AddInteger(Data);
+ }
+
+ static BindingKey Make(const MemRegion *R, Kind k);
+
+ bool operator<(const BindingKey &X) const {
+ if (P.getOpaqueValue() < X.P.getOpaqueValue())
+ return true;
+ if (P.getOpaqueValue() > X.P.getOpaqueValue())
+ return false;
+ return Data < X.Data;
+ }
+
+ bool operator==(const BindingKey &X) const {
+ return P.getOpaqueValue() == X.P.getOpaqueValue() &&
+ Data == X.Data;
+ }
+
+ void dump() const;
+};
+} // end anonymous namespace
+
+BindingKey BindingKey::Make(const MemRegion *R, Kind k) {
+ const RegionOffset &RO = R->getAsOffset();
+ if (RO.hasSymbolicOffset())
+ return BindingKey(cast<SubRegion>(R), cast<SubRegion>(RO.getRegion()), k);
+
+ return BindingKey(RO.getRegion(), RO.getOffset(), k);
+}
+
+namespace llvm {
+ static inline
+ raw_ostream &operator<<(raw_ostream &os, BindingKey K) {
+ os << '(' << K.getRegion();
+ if (!K.hasSymbolicOffset())
+ os << ',' << K.getOffset();
+ os << ',' << (K.isDirect() ? "direct" : "default")
+ << ')';
+ return os;
+ }
+
+ template <typename T> struct isPodLike;
+ template <> struct isPodLike<BindingKey> {
+ static const bool value = true;
+ };
+} // end llvm namespace
+
+LLVM_DUMP_METHOD void BindingKey::dump() const { llvm::errs() << *this; }
+
+//===----------------------------------------------------------------------===//
+// Actual Store type.
+//===----------------------------------------------------------------------===//
+
+typedef llvm::ImmutableMap<BindingKey, SVal> ClusterBindings;
+typedef llvm::ImmutableMapRef<BindingKey, SVal> ClusterBindingsRef;
+typedef std::pair<BindingKey, SVal> BindingPair;
+
+typedef llvm::ImmutableMap<const MemRegion *, ClusterBindings>
+ RegionBindings;
+
+namespace {
+class RegionBindingsRef : public llvm::ImmutableMapRef<const MemRegion *,
+ ClusterBindings> {
+ ClusterBindings::Factory *CBFactory;
+
+public:
+ typedef llvm::ImmutableMapRef<const MemRegion *, ClusterBindings>
+ ParentTy;
+
+ RegionBindingsRef(ClusterBindings::Factory &CBFactory,
+ const RegionBindings::TreeTy *T,
+ RegionBindings::TreeTy::Factory *F)
+ : llvm::ImmutableMapRef<const MemRegion *, ClusterBindings>(T, F),
+ CBFactory(&CBFactory) {}
+
+ RegionBindingsRef(const ParentTy &P, ClusterBindings::Factory &CBFactory)
+ : llvm::ImmutableMapRef<const MemRegion *, ClusterBindings>(P),
+ CBFactory(&CBFactory) {}
+
+ RegionBindingsRef add(key_type_ref K, data_type_ref D) const {
+ return RegionBindingsRef(static_cast<const ParentTy *>(this)->add(K, D),
+ *CBFactory);
+ }
+
+ RegionBindingsRef remove(key_type_ref K) const {
+ return RegionBindingsRef(static_cast<const ParentTy *>(this)->remove(K),
+ *CBFactory);
+ }
+
+ RegionBindingsRef addBinding(BindingKey K, SVal V) const;
+
+ RegionBindingsRef addBinding(const MemRegion *R,
+ BindingKey::Kind k, SVal V) const;
+
+ const SVal *lookup(BindingKey K) const;
+ const SVal *lookup(const MemRegion *R, BindingKey::Kind k) const;
+ using llvm::ImmutableMapRef<const MemRegion *, ClusterBindings>::lookup;
+
+ RegionBindingsRef removeBinding(BindingKey K);
+
+ RegionBindingsRef removeBinding(const MemRegion *R,
+ BindingKey::Kind k);
+
+ RegionBindingsRef removeBinding(const MemRegion *R) {
+ return removeBinding(R, BindingKey::Direct).
+ removeBinding(R, BindingKey::Default);
+ }
+
+ Optional<SVal> getDirectBinding(const MemRegion *R) const;
+
+ /// getDefaultBinding - Returns an SVal* representing an optional default
+ /// binding associated with a region and its subregions.
+ Optional<SVal> getDefaultBinding(const MemRegion *R) const;
+
+ /// Return the internal tree as a Store.
+ Store asStore() const {
+ return asImmutableMap().getRootWithoutRetain();
+ }
+
+ void dump(raw_ostream &OS, const char *nl) const {
+ for (iterator I = begin(), E = end(); I != E; ++I) {
+ const ClusterBindings &Cluster = I.getData();
+ for (ClusterBindings::iterator CI = Cluster.begin(), CE = Cluster.end();
+ CI != CE; ++CI) {
+ OS << ' ' << CI.getKey() << " : " << CI.getData() << nl;
+ }
+ OS << nl;
+ }
+ }
+
+ LLVM_DUMP_METHOD void dump() const { dump(llvm::errs(), "\n"); }
+};
+} // end anonymous namespace
+
+typedef const RegionBindingsRef& RegionBindingsConstRef;
+
+Optional<SVal> RegionBindingsRef::getDirectBinding(const MemRegion *R) const {
+ return Optional<SVal>::create(lookup(R, BindingKey::Direct));
+}
+
+Optional<SVal> RegionBindingsRef::getDefaultBinding(const MemRegion *R) const {
+ if (R->isBoundable())
+ if (const TypedValueRegion *TR = dyn_cast<TypedValueRegion>(R))
+ if (TR->getValueType()->isUnionType())
+ return UnknownVal();
+
+ return Optional<SVal>::create(lookup(R, BindingKey::Default));
+}
+
+RegionBindingsRef RegionBindingsRef::addBinding(BindingKey K, SVal V) const {
+ const MemRegion *Base = K.getBaseRegion();
+
+ const ClusterBindings *ExistingCluster = lookup(Base);
+ ClusterBindings Cluster =
+ (ExistingCluster ? *ExistingCluster : CBFactory->getEmptyMap());
+
+ ClusterBindings NewCluster = CBFactory->add(Cluster, K, V);
+ return add(Base, NewCluster);
+}
+
+
+RegionBindingsRef RegionBindingsRef::addBinding(const MemRegion *R,
+ BindingKey::Kind k,
+ SVal V) const {
+ return addBinding(BindingKey::Make(R, k), V);
+}
+
+const SVal *RegionBindingsRef::lookup(BindingKey K) const {
+ const ClusterBindings *Cluster = lookup(K.getBaseRegion());
+ if (!Cluster)
+ return nullptr;
+ return Cluster->lookup(K);
+}
+
+const SVal *RegionBindingsRef::lookup(const MemRegion *R,
+ BindingKey::Kind k) const {
+ return lookup(BindingKey::Make(R, k));
+}
+
+RegionBindingsRef RegionBindingsRef::removeBinding(BindingKey K) {
+ const MemRegion *Base = K.getBaseRegion();
+ const ClusterBindings *Cluster = lookup(Base);
+ if (!Cluster)
+ return *this;
+
+ ClusterBindings NewCluster = CBFactory->remove(*Cluster, K);
+ if (NewCluster.isEmpty())
+ return remove(Base);
+ return add(Base, NewCluster);
+}
+
+RegionBindingsRef RegionBindingsRef::removeBinding(const MemRegion *R,
+ BindingKey::Kind k){
+ return removeBinding(BindingKey::Make(R, k));
+}
+
+//===----------------------------------------------------------------------===//
+// Fine-grained control of RegionStoreManager.
+//===----------------------------------------------------------------------===//
+
+namespace {
+struct minimal_features_tag {};
+struct maximal_features_tag {};
+
+class RegionStoreFeatures {
+ bool SupportsFields;
+public:
+ RegionStoreFeatures(minimal_features_tag) :
+ SupportsFields(false) {}
+
+ RegionStoreFeatures(maximal_features_tag) :
+ SupportsFields(true) {}
+
+ void enableFields(bool t) { SupportsFields = t; }
+
+ bool supportsFields() const { return SupportsFields; }
+};
+}
+
+//===----------------------------------------------------------------------===//
+// Main RegionStore logic.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class invalidateRegionsWorker;
+
+class RegionStoreManager : public StoreManager {
+public:
+ const RegionStoreFeatures Features;
+
+ RegionBindings::Factory RBFactory;
+ mutable ClusterBindings::Factory CBFactory;
+
+ typedef std::vector<SVal> SValListTy;
+private:
+ typedef llvm::DenseMap<const LazyCompoundValData *,
+ SValListTy> LazyBindingsMapTy;
+ LazyBindingsMapTy LazyBindingsMap;
+
+ /// The largest number of fields a struct can have and still be
+ /// considered "small".
+ ///
+ /// This is currently used to decide whether or not it is worth "forcing" a
+ /// LazyCompoundVal on bind.
+ ///
+ /// This is controlled by 'region-store-small-struct-limit' option.
+ /// To disable all small-struct-dependent behavior, set the option to "0".
+ unsigned SmallStructLimit;
+
+ /// \brief A helper used to populate the work list with the given set of
+ /// regions.
+ void populateWorkList(invalidateRegionsWorker &W,
+ ArrayRef<SVal> Values,
+ InvalidatedRegions *TopLevelRegions);
+
+public:
+ RegionStoreManager(ProgramStateManager& mgr, const RegionStoreFeatures &f)
+ : StoreManager(mgr), Features(f),
+ RBFactory(mgr.getAllocator()), CBFactory(mgr.getAllocator()),
+ SmallStructLimit(0) {
+ if (SubEngine *Eng = StateMgr.getOwningEngine()) {
+ AnalyzerOptions &Options = Eng->getAnalysisManager().options;
+ SmallStructLimit =
+ Options.getOptionAsInteger("region-store-small-struct-limit", 2);
+ }
+ }
+
+
+ /// setImplicitDefaultValue - Set the default binding for the provided
+ /// MemRegion to the value implicitly defined for compound literals when
+ /// the value is not specified.
+ RegionBindingsRef setImplicitDefaultValue(RegionBindingsConstRef B,
+ const MemRegion *R, QualType T);
+
+ /// ArrayToPointer - Emulates the "decay" of an array to a pointer
+ /// type. 'Array' represents the lvalue of the array being decayed
+ /// to a pointer, and the returned SVal represents the decayed
+ /// version of that lvalue (i.e., a pointer to the first element of
+ /// the array). This is called by ExprEngine when evaluating
+ /// casts from arrays to pointers.
+ SVal ArrayToPointer(Loc Array, QualType ElementTy) override;
+
+ StoreRef getInitialStore(const LocationContext *InitLoc) override {
+ return StoreRef(RBFactory.getEmptyMap().getRootWithoutRetain(), *this);
+ }
+
+ //===-------------------------------------------------------------------===//
+ // Binding values to regions.
+ //===-------------------------------------------------------------------===//
+ RegionBindingsRef invalidateGlobalRegion(MemRegion::Kind K,
+ const Expr *Ex,
+ unsigned Count,
+ const LocationContext *LCtx,
+ RegionBindingsRef B,
+ InvalidatedRegions *Invalidated);
+
+ StoreRef invalidateRegions(Store store,
+ ArrayRef<SVal> Values,
+ const Expr *E, unsigned Count,
+ const LocationContext *LCtx,
+ const CallEvent *Call,
+ InvalidatedSymbols &IS,
+ RegionAndSymbolInvalidationTraits &ITraits,
+ InvalidatedRegions *Invalidated,
+ InvalidatedRegions *InvalidatedTopLevel) override;
+
+ bool scanReachableSymbols(Store S, const MemRegion *R,
+ ScanReachableSymbols &Callbacks) override;
+
+ RegionBindingsRef removeSubRegionBindings(RegionBindingsConstRef B,
+ const SubRegion *R);
+
+public: // Part of public interface to class.
+
+ StoreRef Bind(Store store, Loc LV, SVal V) override {
+ return StoreRef(bind(getRegionBindings(store), LV, V).asStore(), *this);
+ }
+
+ RegionBindingsRef bind(RegionBindingsConstRef B, Loc LV, SVal V);
+
+ // BindDefault is only used to initialize a region with a default value.
+ StoreRef BindDefault(Store store, const MemRegion *R, SVal V) override {
+ RegionBindingsRef B = getRegionBindings(store);
+ assert(!B.lookup(R, BindingKey::Direct));
+
+ BindingKey Key = BindingKey::Make(R, BindingKey::Default);
+ if (B.lookup(Key)) {
+ const SubRegion *SR = cast<SubRegion>(R);
+ assert(SR->getAsOffset().getOffset() ==
+ SR->getSuperRegion()->getAsOffset().getOffset() &&
+ "A default value must come from a super-region");
+ B = removeSubRegionBindings(B, SR);
+ } else {
+ B = B.addBinding(Key, V);
+ }
+
+ return StoreRef(B.asImmutableMap().getRootWithoutRetain(), *this);
+ }
+
+ /// Attempt to extract the fields of \p LCV and bind them to the struct region
+ /// \p R.
+ ///
+ /// This path is used when it seems advantageous to "force" loading the values
+ /// within a LazyCompoundVal to bind memberwise to the struct region, rather
+ /// than using a Default binding at the base of the entire region. This is a
+ /// heuristic attempting to avoid building long chains of LazyCompoundVals.
+ ///
+ /// \returns The updated store bindings, or \c None if binding non-lazily
+ /// would be too expensive.
+ Optional<RegionBindingsRef> tryBindSmallStruct(RegionBindingsConstRef B,
+ const TypedValueRegion *R,
+ const RecordDecl *RD,
+ nonloc::LazyCompoundVal LCV);
+
+ /// BindStruct - Bind a compound value to a structure.
+ RegionBindingsRef bindStruct(RegionBindingsConstRef B,
+ const TypedValueRegion* R, SVal V);
+
+ /// BindVector - Bind a compound value to a vector.
+ RegionBindingsRef bindVector(RegionBindingsConstRef B,
+ const TypedValueRegion* R, SVal V);
+
+ RegionBindingsRef bindArray(RegionBindingsConstRef B,
+ const TypedValueRegion* R,
+ SVal V);
+
+ /// Clears out all bindings in the given region and assigns a new value
+ /// as a Default binding.
+ RegionBindingsRef bindAggregate(RegionBindingsConstRef B,
+ const TypedRegion *R,
+ SVal DefaultVal);
+
+ /// \brief Create a new store with the specified binding removed.
+ /// \param ST the original store, that is the basis for the new store.
+ /// \param L the location whose binding should be removed.
+ StoreRef killBinding(Store ST, Loc L) override;
+
+ void incrementReferenceCount(Store store) override {
+ getRegionBindings(store).manualRetain();
+ }
+
+ /// If the StoreManager supports it, decrement the reference count of
+ /// the specified Store object. If the reference count hits 0, the memory
+ /// associated with the object is recycled.
+ void decrementReferenceCount(Store store) override {
+ getRegionBindings(store).manualRelease();
+ }
+
+ bool includedInBindings(Store store, const MemRegion *region) const override;
+
+ /// \brief Return the value bound to specified location in a given state.
+ ///
+ /// The high level logic for this method is this:
+ /// getBinding (L)
+ /// if L has binding
+ /// return L's binding
+ /// else if L is in killset
+ /// return unknown
+ /// else
+ /// if L is on stack or heap
+ /// return undefined
+ /// else
+ /// return symbolic
+ SVal getBinding(Store S, Loc L, QualType T) override {
+ return getBinding(getRegionBindings(S), L, T);
+ }
+
+ SVal getBinding(RegionBindingsConstRef B, Loc L, QualType T = QualType());
+
+ SVal getBindingForElement(RegionBindingsConstRef B, const ElementRegion *R);
+
+ SVal getBindingForField(RegionBindingsConstRef B, const FieldRegion *R);
+
+ SVal getBindingForObjCIvar(RegionBindingsConstRef B, const ObjCIvarRegion *R);
+
+ SVal getBindingForVar(RegionBindingsConstRef B, const VarRegion *R);
+
+ SVal getBindingForLazySymbol(const TypedValueRegion *R);
+
+ SVal getBindingForFieldOrElementCommon(RegionBindingsConstRef B,
+ const TypedValueRegion *R,
+ QualType Ty);
+
+ SVal getLazyBinding(const SubRegion *LazyBindingRegion,
+ RegionBindingsRef LazyBinding);
+
+ /// Get bindings for the values in a struct and return a CompoundVal, used
+ /// when doing struct copy:
+ /// struct s x, y;
+ /// x = y;
+ /// y's value is retrieved by this method.
+ SVal getBindingForStruct(RegionBindingsConstRef B, const TypedValueRegion *R);
+ SVal getBindingForArray(RegionBindingsConstRef B, const TypedValueRegion *R);
+ NonLoc createLazyBinding(RegionBindingsConstRef B, const TypedValueRegion *R);
+
+ /// Used to lazily generate derived symbols for bindings that are defined
+ /// implicitly by default bindings in a super region.
+ ///
+ /// Note that callers may need to specially handle LazyCompoundVals, which
+ /// are returned as is in case the caller needs to treat them differently.
+ Optional<SVal> getBindingForDerivedDefaultValue(RegionBindingsConstRef B,
+ const MemRegion *superR,
+ const TypedValueRegion *R,
+ QualType Ty);
+
+ /// Get the state and region whose binding this region \p R corresponds to.
+ ///
+ /// If there is no lazy binding for \p R, the returned value will have a null
+ /// \c second. Note that a null pointer can represents a valid Store.
+ std::pair<Store, const SubRegion *>
+ findLazyBinding(RegionBindingsConstRef B, const SubRegion *R,
+ const SubRegion *originalRegion);
+
+ /// Returns the cached set of interesting SVals contained within a lazy
+ /// binding.
+ ///
+ /// The precise value of "interesting" is determined for the purposes of
+ /// RegionStore's internal analysis. It must always contain all regions and
+ /// symbols, but may omit constants and other kinds of SVal.
+ const SValListTy &getInterestingValues(nonloc::LazyCompoundVal LCV);
+
+ //===------------------------------------------------------------------===//
+ // State pruning.
+ //===------------------------------------------------------------------===//
+
+ /// removeDeadBindings - Scans the RegionStore of 'state' for dead values.
+ /// It returns a new Store with these values removed.
+ StoreRef removeDeadBindings(Store store, const StackFrameContext *LCtx,
+ SymbolReaper& SymReaper) override;
+
+ //===------------------------------------------------------------------===//
+ // Region "extents".
+ //===------------------------------------------------------------------===//
+
+ // FIXME: This method will soon be eliminated; see the note in Store.h.
+ DefinedOrUnknownSVal getSizeInElements(ProgramStateRef state,
+ const MemRegion* R,
+ QualType EleTy) override;
+
+ //===------------------------------------------------------------------===//
+ // Utility methods.
+ //===------------------------------------------------------------------===//
+
+ RegionBindingsRef getRegionBindings(Store store) const {
+ return RegionBindingsRef(CBFactory,
+ static_cast<const RegionBindings::TreeTy*>(store),
+ RBFactory.getTreeFactory());
+ }
+
+ void print(Store store, raw_ostream &Out, const char* nl,
+ const char *sep) override;
+
+ void iterBindings(Store store, BindingsHandler& f) override {
+ RegionBindingsRef B = getRegionBindings(store);
+ for (RegionBindingsRef::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ const ClusterBindings &Cluster = I.getData();
+ for (ClusterBindings::iterator CI = Cluster.begin(), CE = Cluster.end();
+ CI != CE; ++CI) {
+ const BindingKey &K = CI.getKey();
+ if (!K.isDirect())
+ continue;
+ if (const SubRegion *R = dyn_cast<SubRegion>(K.getRegion())) {
+ // FIXME: Possibly incorporate the offset?
+ if (!f.HandleBinding(*this, store, R, CI.getData()))
+ return;
+ }
+ }
+ }
+ }
+};
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// RegionStore creation.
+//===----------------------------------------------------------------------===//
+
+std::unique_ptr<StoreManager>
+ento::CreateRegionStoreManager(ProgramStateManager &StMgr) {
+ RegionStoreFeatures F = maximal_features_tag();
+ return llvm::make_unique<RegionStoreManager>(StMgr, F);
+}
+
+std::unique_ptr<StoreManager>
+ento::CreateFieldsOnlyRegionStoreManager(ProgramStateManager &StMgr) {
+ RegionStoreFeatures F = minimal_features_tag();
+ F.enableFields(true);
+ return llvm::make_unique<RegionStoreManager>(StMgr, F);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Region Cluster analysis.
+//===----------------------------------------------------------------------===//
+
+namespace {
+/// Used to determine which global regions are automatically included in the
+/// initial worklist of a ClusterAnalysis.
+enum GlobalsFilterKind {
+ /// Don't include any global regions.
+ GFK_None,
+ /// Only include system globals.
+ GFK_SystemOnly,
+ /// Include all global regions.
+ GFK_All
+};
+
+template <typename DERIVED>
+class ClusterAnalysis {
+protected:
+ typedef llvm::DenseMap<const MemRegion *, const ClusterBindings *> ClusterMap;
+ typedef const MemRegion * WorkListElement;
+ typedef SmallVector<WorkListElement, 10> WorkList;
+
+ llvm::SmallPtrSet<const ClusterBindings *, 16> Visited;
+
+ WorkList WL;
+
+ RegionStoreManager &RM;
+ ASTContext &Ctx;
+ SValBuilder &svalBuilder;
+
+ RegionBindingsRef B;
+
+
+protected:
+ const ClusterBindings *getCluster(const MemRegion *R) {
+ return B.lookup(R);
+ }
+
+ /// Returns true if all clusters in the given memspace should be initially
+ /// included in the cluster analysis. Subclasses may provide their
+ /// own implementation.
+ bool includeEntireMemorySpace(const MemRegion *Base) {
+ return false;
+ }
+
+public:
+ ClusterAnalysis(RegionStoreManager &rm, ProgramStateManager &StateMgr,
+ RegionBindingsRef b )
+ : RM(rm), Ctx(StateMgr.getContext()),
+ svalBuilder(StateMgr.getSValBuilder()),
+ B(b) {}
+
+ RegionBindingsRef getRegionBindings() const { return B; }
+
+ bool isVisited(const MemRegion *R) {
+ return Visited.count(getCluster(R));
+ }
+
+ void GenerateClusters() {
+ // Scan the entire set of bindings and record the region clusters.
+ for (RegionBindingsRef::iterator RI = B.begin(), RE = B.end();
+ RI != RE; ++RI){
+ const MemRegion *Base = RI.getKey();
+
+ const ClusterBindings &Cluster = RI.getData();
+ assert(!Cluster.isEmpty() && "Empty clusters should be removed");
+ static_cast<DERIVED*>(this)->VisitAddedToCluster(Base, Cluster);
+
+ // If the base's memspace should be entirely invalidated, add the cluster
+ // to the workspace up front.
+ if (static_cast<DERIVED*>(this)->includeEntireMemorySpace(Base))
+ AddToWorkList(WorkListElement(Base), &Cluster);
+ }
+ }
+
+ bool AddToWorkList(WorkListElement E, const ClusterBindings *C) {
+ if (C && !Visited.insert(C).second)
+ return false;
+ WL.push_back(E);
+ return true;
+ }
+
+ bool AddToWorkList(const MemRegion *R) {
+ return static_cast<DERIVED*>(this)->AddToWorkList(R);
+ }
+
+ void RunWorkList() {
+ while (!WL.empty()) {
+ WorkListElement E = WL.pop_back_val();
+ const MemRegion *BaseR = E;
+
+ static_cast<DERIVED*>(this)->VisitCluster(BaseR, getCluster(BaseR));
+ }
+ }
+
+ void VisitAddedToCluster(const MemRegion *baseR, const ClusterBindings &C) {}
+ void VisitCluster(const MemRegion *baseR, const ClusterBindings *C) {}
+
+ void VisitCluster(const MemRegion *BaseR, const ClusterBindings *C,
+ bool Flag) {
+ static_cast<DERIVED*>(this)->VisitCluster(BaseR, C);
+ }
+};
+}
+
+//===----------------------------------------------------------------------===//
+// Binding invalidation.
+//===----------------------------------------------------------------------===//
+
+bool RegionStoreManager::scanReachableSymbols(Store S, const MemRegion *R,
+ ScanReachableSymbols &Callbacks) {
+ assert(R == R->getBaseRegion() && "Should only be called for base regions");
+ RegionBindingsRef B = getRegionBindings(S);
+ const ClusterBindings *Cluster = B.lookup(R);
+
+ if (!Cluster)
+ return true;
+
+ for (ClusterBindings::iterator RI = Cluster->begin(), RE = Cluster->end();
+ RI != RE; ++RI) {
+ if (!Callbacks.scan(RI.getData()))
+ return false;
+ }
+
+ return true;
+}
+
+static inline bool isUnionField(const FieldRegion *FR) {
+ return FR->getDecl()->getParent()->isUnion();
+}
+
+typedef SmallVector<const FieldDecl *, 8> FieldVector;
+
+static void getSymbolicOffsetFields(BindingKey K, FieldVector &Fields) {
+ assert(K.hasSymbolicOffset() && "Not implemented for concrete offset keys");
+
+ const MemRegion *Base = K.getConcreteOffsetRegion();
+ const MemRegion *R = K.getRegion();
+
+ while (R != Base) {
+ if (const FieldRegion *FR = dyn_cast<FieldRegion>(R))
+ if (!isUnionField(FR))
+ Fields.push_back(FR->getDecl());
+
+ R = cast<SubRegion>(R)->getSuperRegion();
+ }
+}
+
+static bool isCompatibleWithFields(BindingKey K, const FieldVector &Fields) {
+ assert(K.hasSymbolicOffset() && "Not implemented for concrete offset keys");
+
+ if (Fields.empty())
+ return true;
+
+ FieldVector FieldsInBindingKey;
+ getSymbolicOffsetFields(K, FieldsInBindingKey);
+
+ ptrdiff_t Delta = FieldsInBindingKey.size() - Fields.size();
+ if (Delta >= 0)
+ return std::equal(FieldsInBindingKey.begin() + Delta,
+ FieldsInBindingKey.end(),
+ Fields.begin());
+ else
+ return std::equal(FieldsInBindingKey.begin(), FieldsInBindingKey.end(),
+ Fields.begin() - Delta);
+}
+
+/// Collects all bindings in \p Cluster that may refer to bindings within
+/// \p Top.
+///
+/// Each binding is a pair whose \c first is the key (a BindingKey) and whose
+/// \c second is the value (an SVal).
+///
+/// The \p IncludeAllDefaultBindings parameter specifies whether to include
+/// default bindings that may extend beyond \p Top itself, e.g. if \p Top is
+/// an aggregate within a larger aggregate with a default binding.
+static void
+collectSubRegionBindings(SmallVectorImpl<BindingPair> &Bindings,
+ SValBuilder &SVB, const ClusterBindings &Cluster,
+ const SubRegion *Top, BindingKey TopKey,
+ bool IncludeAllDefaultBindings) {
+ FieldVector FieldsInSymbolicSubregions;
+ if (TopKey.hasSymbolicOffset()) {
+ getSymbolicOffsetFields(TopKey, FieldsInSymbolicSubregions);
+ Top = cast<SubRegion>(TopKey.getConcreteOffsetRegion());
+ TopKey = BindingKey::Make(Top, BindingKey::Default);
+ }
+
+ // Find the length (in bits) of the region being invalidated.
+ uint64_t Length = UINT64_MAX;
+ SVal Extent = Top->getExtent(SVB);
+ if (Optional<nonloc::ConcreteInt> ExtentCI =
+ Extent.getAs<nonloc::ConcreteInt>()) {
+ const llvm::APSInt &ExtentInt = ExtentCI->getValue();
+ assert(ExtentInt.isNonNegative() || ExtentInt.isUnsigned());
+ // Extents are in bytes but region offsets are in bits. Be careful!
+ Length = ExtentInt.getLimitedValue() * SVB.getContext().getCharWidth();
+ } else if (const FieldRegion *FR = dyn_cast<FieldRegion>(Top)) {
+ if (FR->getDecl()->isBitField())
+ Length = FR->getDecl()->getBitWidthValue(SVB.getContext());
+ }
+
+ for (ClusterBindings::iterator I = Cluster.begin(), E = Cluster.end();
+ I != E; ++I) {
+ BindingKey NextKey = I.getKey();
+ if (NextKey.getRegion() == TopKey.getRegion()) {
+ // FIXME: This doesn't catch the case where we're really invalidating a
+ // region with a symbolic offset. Example:
+ // R: points[i].y
+ // Next: points[0].x
+
+ if (NextKey.getOffset() > TopKey.getOffset() &&
+ NextKey.getOffset() - TopKey.getOffset() < Length) {
+ // Case 1: The next binding is inside the region we're invalidating.
+ // Include it.
+ Bindings.push_back(*I);
+
+ } else if (NextKey.getOffset() == TopKey.getOffset()) {
+ // Case 2: The next binding is at the same offset as the region we're
+ // invalidating. In this case, we need to leave default bindings alone,
+ // since they may be providing a default value for a regions beyond what
+ // we're invalidating.
+ // FIXME: This is probably incorrect; consider invalidating an outer
+ // struct whose first field is bound to a LazyCompoundVal.
+ if (IncludeAllDefaultBindings || NextKey.isDirect())
+ Bindings.push_back(*I);
+ }
+
+ } else if (NextKey.hasSymbolicOffset()) {
+ const MemRegion *Base = NextKey.getConcreteOffsetRegion();
+ if (Top->isSubRegionOf(Base)) {
+ // Case 3: The next key is symbolic and we just changed something within
+ // its concrete region. We don't know if the binding is still valid, so
+ // we'll be conservative and include it.
+ if (IncludeAllDefaultBindings || NextKey.isDirect())
+ if (isCompatibleWithFields(NextKey, FieldsInSymbolicSubregions))
+ Bindings.push_back(*I);
+ } else if (const SubRegion *BaseSR = dyn_cast<SubRegion>(Base)) {
+ // Case 4: The next key is symbolic, but we changed a known
+ // super-region. In this case the binding is certainly included.
+ if (Top == Base || BaseSR->isSubRegionOf(Top))
+ if (isCompatibleWithFields(NextKey, FieldsInSymbolicSubregions))
+ Bindings.push_back(*I);
+ }
+ }
+ }
+}
+
+static void
+collectSubRegionBindings(SmallVectorImpl<BindingPair> &Bindings,
+ SValBuilder &SVB, const ClusterBindings &Cluster,
+ const SubRegion *Top, bool IncludeAllDefaultBindings) {
+ collectSubRegionBindings(Bindings, SVB, Cluster, Top,
+ BindingKey::Make(Top, BindingKey::Default),
+ IncludeAllDefaultBindings);
+}
+
+RegionBindingsRef
+RegionStoreManager::removeSubRegionBindings(RegionBindingsConstRef B,
+ const SubRegion *Top) {
+ BindingKey TopKey = BindingKey::Make(Top, BindingKey::Default);
+ const MemRegion *ClusterHead = TopKey.getBaseRegion();
+
+ if (Top == ClusterHead) {
+ // We can remove an entire cluster's bindings all in one go.
+ return B.remove(Top);
+ }
+
+ const ClusterBindings *Cluster = B.lookup(ClusterHead);
+ if (!Cluster) {
+ // If we're invalidating a region with a symbolic offset, we need to make
+ // sure we don't treat the base region as uninitialized anymore.
+ if (TopKey.hasSymbolicOffset()) {
+ const SubRegion *Concrete = TopKey.getConcreteOffsetRegion();
+ return B.addBinding(Concrete, BindingKey::Default, UnknownVal());
+ }
+ return B;
+ }
+
+ SmallVector<BindingPair, 32> Bindings;
+ collectSubRegionBindings(Bindings, svalBuilder, *Cluster, Top, TopKey,
+ /*IncludeAllDefaultBindings=*/false);
+
+ ClusterBindingsRef Result(*Cluster, CBFactory);
+ for (SmallVectorImpl<BindingPair>::const_iterator I = Bindings.begin(),
+ E = Bindings.end();
+ I != E; ++I)
+ Result = Result.remove(I->first);
+
+ // If we're invalidating a region with a symbolic offset, we need to make sure
+ // we don't treat the base region as uninitialized anymore.
+ // FIXME: This isn't very precise; see the example in
+ // collectSubRegionBindings.
+ if (TopKey.hasSymbolicOffset()) {
+ const SubRegion *Concrete = TopKey.getConcreteOffsetRegion();
+ Result = Result.add(BindingKey::Make(Concrete, BindingKey::Default),
+ UnknownVal());
+ }
+
+ if (Result.isEmpty())
+ return B.remove(ClusterHead);
+ return B.add(ClusterHead, Result.asImmutableMap());
+}
+
+namespace {
+class invalidateRegionsWorker : public ClusterAnalysis<invalidateRegionsWorker>
+{
+ const Expr *Ex;
+ unsigned Count;
+ const LocationContext *LCtx;
+ InvalidatedSymbols &IS;
+ RegionAndSymbolInvalidationTraits &ITraits;
+ StoreManager::InvalidatedRegions *Regions;
+ GlobalsFilterKind GlobalsFilter;
+public:
+ invalidateRegionsWorker(RegionStoreManager &rm,
+ ProgramStateManager &stateMgr,
+ RegionBindingsRef b,
+ const Expr *ex, unsigned count,
+ const LocationContext *lctx,
+ InvalidatedSymbols &is,
+ RegionAndSymbolInvalidationTraits &ITraitsIn,
+ StoreManager::InvalidatedRegions *r,
+ GlobalsFilterKind GFK)
+ : ClusterAnalysis<invalidateRegionsWorker>(rm, stateMgr, b),
+ Ex(ex), Count(count), LCtx(lctx), IS(is), ITraits(ITraitsIn), Regions(r),
+ GlobalsFilter(GFK) {}
+
+ void VisitCluster(const MemRegion *baseR, const ClusterBindings *C);
+ void VisitBinding(SVal V);
+
+ using ClusterAnalysis::AddToWorkList;
+
+ bool AddToWorkList(const MemRegion *R);
+
+ /// Returns true if all clusters in the memory space for \p Base should be
+ /// be invalidated.
+ bool includeEntireMemorySpace(const MemRegion *Base);
+
+ /// Returns true if the memory space of the given region is one of the global
+ /// regions specially included at the start of invalidation.
+ bool isInitiallyIncludedGlobalRegion(const MemRegion *R);
+};
+}
+
+bool invalidateRegionsWorker::AddToWorkList(const MemRegion *R) {
+ bool doNotInvalidateSuperRegion = ITraits.hasTrait(
+ R, RegionAndSymbolInvalidationTraits::TK_DoNotInvalidateSuperRegion);
+ const MemRegion *BaseR = doNotInvalidateSuperRegion ? R : R->getBaseRegion();
+ return AddToWorkList(WorkListElement(BaseR), getCluster(BaseR));
+}
+
+void invalidateRegionsWorker::VisitBinding(SVal V) {
+ // A symbol? Mark it touched by the invalidation.
+ if (SymbolRef Sym = V.getAsSymbol())
+ IS.insert(Sym);
+
+ if (const MemRegion *R = V.getAsRegion()) {
+ AddToWorkList(R);
+ return;
+ }
+
+ // Is it a LazyCompoundVal? All references get invalidated as well.
+ if (Optional<nonloc::LazyCompoundVal> LCS =
+ V.getAs<nonloc::LazyCompoundVal>()) {
+
+ const RegionStoreManager::SValListTy &Vals = RM.getInterestingValues(*LCS);
+
+ for (RegionStoreManager::SValListTy::const_iterator I = Vals.begin(),
+ E = Vals.end();
+ I != E; ++I)
+ VisitBinding(*I);
+
+ return;
+ }
+}
+
+void invalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
+ const ClusterBindings *C) {
+
+ bool PreserveRegionsContents =
+ ITraits.hasTrait(baseR,
+ RegionAndSymbolInvalidationTraits::TK_PreserveContents);
+
+ if (C) {
+ for (ClusterBindings::iterator I = C->begin(), E = C->end(); I != E; ++I)
+ VisitBinding(I.getData());
+
+ // Invalidate regions contents.
+ if (!PreserveRegionsContents)
+ B = B.remove(baseR);
+ }
+
+ // BlockDataRegion? If so, invalidate captured variables that are passed
+ // by reference.
+ if (const BlockDataRegion *BR = dyn_cast<BlockDataRegion>(baseR)) {
+ for (BlockDataRegion::referenced_vars_iterator
+ BI = BR->referenced_vars_begin(), BE = BR->referenced_vars_end() ;
+ BI != BE; ++BI) {
+ const VarRegion *VR = BI.getCapturedRegion();
+ const VarDecl *VD = VR->getDecl();
+ if (VD->hasAttr<BlocksAttr>() || !VD->hasLocalStorage()) {
+ AddToWorkList(VR);
+ }
+ else if (Loc::isLocType(VR->getValueType())) {
+ // Map the current bindings to a Store to retrieve the value
+ // of the binding. If that binding itself is a region, we should
+ // invalidate that region. This is because a block may capture
+ // a pointer value, but the thing pointed by that pointer may
+ // get invalidated.
+ SVal V = RM.getBinding(B, loc::MemRegionVal(VR));
+ if (Optional<Loc> L = V.getAs<Loc>()) {
+ if (const MemRegion *LR = L->getAsRegion())
+ AddToWorkList(LR);
+ }
+ }
+ }
+ return;
+ }
+
+ // Symbolic region?
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(baseR))
+ IS.insert(SR->getSymbol());
+
+ // Nothing else should be done in the case when we preserve regions context.
+ if (PreserveRegionsContents)
+ return;
+
+ // Otherwise, we have a normal data region. Record that we touched the region.
+ if (Regions)
+ Regions->push_back(baseR);
+
+ if (isa<AllocaRegion>(baseR) || isa<SymbolicRegion>(baseR)) {
+ // Invalidate the region by setting its default value to
+ // conjured symbol. The type of the symbol is irrelevant.
+ DefinedOrUnknownSVal V =
+ svalBuilder.conjureSymbolVal(baseR, Ex, LCtx, Ctx.IntTy, Count);
+ B = B.addBinding(baseR, BindingKey::Default, V);
+ return;
+ }
+
+ if (!baseR->isBoundable())
+ return;
+
+ const TypedValueRegion *TR = cast<TypedValueRegion>(baseR);
+ QualType T = TR->getValueType();
+
+ if (isInitiallyIncludedGlobalRegion(baseR)) {
+ // If the region is a global and we are invalidating all globals,
+ // erasing the entry is good enough. This causes all globals to be lazily
+ // symbolicated from the same base symbol.
+ return;
+ }
+
+ if (T->isStructureOrClassType()) {
+ // Invalidate the region by setting its default value to
+ // conjured symbol. The type of the symbol is irrelevant.
+ DefinedOrUnknownSVal V = svalBuilder.conjureSymbolVal(baseR, Ex, LCtx,
+ Ctx.IntTy, Count);
+ B = B.addBinding(baseR, BindingKey::Default, V);
+ return;
+ }
+
+ if (const ArrayType *AT = Ctx.getAsArrayType(T)) {
+ bool doNotInvalidateSuperRegion = ITraits.hasTrait(
+ baseR,
+ RegionAndSymbolInvalidationTraits::TK_DoNotInvalidateSuperRegion);
+
+ if (doNotInvalidateSuperRegion) {
+ // We are not doing blank invalidation of the whole array region so we
+ // have to manually invalidate each elements.
+ Optional<uint64_t> NumElements;
+
+ // Compute lower and upper offsets for region within array.
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
+ NumElements = CAT->getSize().getZExtValue();
+ if (!NumElements) // We are not dealing with a constant size array
+ goto conjure_default;
+ QualType ElementTy = AT->getElementType();
+ uint64_t ElemSize = Ctx.getTypeSize(ElementTy);
+ const RegionOffset &RO = baseR->getAsOffset();
+ const MemRegion *SuperR = baseR->getBaseRegion();
+ if (RO.hasSymbolicOffset()) {
+ // If base region has a symbolic offset,
+ // we revert to invalidating the super region.
+ if (SuperR)
+ AddToWorkList(SuperR);
+ goto conjure_default;
+ }
+
+ uint64_t LowerOffset = RO.getOffset();
+ uint64_t UpperOffset = LowerOffset + *NumElements * ElemSize;
+ bool UpperOverflow = UpperOffset < LowerOffset;
+
+ // Invalidate regions which are within array boundaries,
+ // or have a symbolic offset.
+ if (!SuperR)
+ goto conjure_default;
+
+ const ClusterBindings *C = B.lookup(SuperR);
+ if (!C)
+ goto conjure_default;
+
+ for (ClusterBindings::iterator I = C->begin(), E = C->end(); I != E;
+ ++I) {
+ const BindingKey &BK = I.getKey();
+ Optional<uint64_t> ROffset =
+ BK.hasSymbolicOffset() ? Optional<uint64_t>() : BK.getOffset();
+
+ // Check offset is not symbolic and within array's boundaries.
+ // Handles arrays of 0 elements and of 0-sized elements as well.
+ if (!ROffset ||
+ (ROffset &&
+ ((*ROffset >= LowerOffset && *ROffset < UpperOffset) ||
+ (UpperOverflow &&
+ (*ROffset >= LowerOffset || *ROffset < UpperOffset)) ||
+ (LowerOffset == UpperOffset && *ROffset == LowerOffset)))) {
+ B = B.removeBinding(I.getKey());
+ // Bound symbolic regions need to be invalidated for dead symbol
+ // detection.
+ SVal V = I.getData();
+ const MemRegion *R = V.getAsRegion();
+ if (R && isa<SymbolicRegion>(R))
+ VisitBinding(V);
+ }
+ }
+ }
+ conjure_default:
+ // Set the default value of the array to conjured symbol.
+ DefinedOrUnknownSVal V =
+ svalBuilder.conjureSymbolVal(baseR, Ex, LCtx,
+ AT->getElementType(), Count);
+ B = B.addBinding(baseR, BindingKey::Default, V);
+ return;
+ }
+
+ DefinedOrUnknownSVal V = svalBuilder.conjureSymbolVal(baseR, Ex, LCtx,
+ T,Count);
+ assert(SymbolManager::canSymbolicate(T) || V.isUnknown());
+ B = B.addBinding(baseR, BindingKey::Direct, V);
+}
+
+bool invalidateRegionsWorker::isInitiallyIncludedGlobalRegion(
+ const MemRegion *R) {
+ switch (GlobalsFilter) {
+ case GFK_None:
+ return false;
+ case GFK_SystemOnly:
+ return isa<GlobalSystemSpaceRegion>(R->getMemorySpace());
+ case GFK_All:
+ return isa<NonStaticGlobalSpaceRegion>(R->getMemorySpace());
+ }
+
+ llvm_unreachable("unknown globals filter");
+}
+
+bool invalidateRegionsWorker::includeEntireMemorySpace(const MemRegion *Base) {
+ if (isInitiallyIncludedGlobalRegion(Base))
+ return true;
+
+ const MemSpaceRegion *MemSpace = Base->getMemorySpace();
+ return ITraits.hasTrait(MemSpace,
+ RegionAndSymbolInvalidationTraits::TK_EntireMemSpace);
+}
+
+RegionBindingsRef
+RegionStoreManager::invalidateGlobalRegion(MemRegion::Kind K,
+ const Expr *Ex,
+ unsigned Count,
+ const LocationContext *LCtx,
+ RegionBindingsRef B,
+ InvalidatedRegions *Invalidated) {
+ // Bind the globals memory space to a new symbol that we will use to derive
+ // the bindings for all globals.
+ const GlobalsSpaceRegion *GS = MRMgr.getGlobalsRegion(K);
+ SVal V = svalBuilder.conjureSymbolVal(/* SymbolTag = */ (const void*) GS, Ex, LCtx,
+ /* type does not matter */ Ctx.IntTy,
+ Count);
+
+ B = B.removeBinding(GS)
+ .addBinding(BindingKey::Make(GS, BindingKey::Default), V);
+
+ // Even if there are no bindings in the global scope, we still need to
+ // record that we touched it.
+ if (Invalidated)
+ Invalidated->push_back(GS);
+
+ return B;
+}
+
+void RegionStoreManager::populateWorkList(invalidateRegionsWorker &W,
+ ArrayRef<SVal> Values,
+ InvalidatedRegions *TopLevelRegions) {
+ for (ArrayRef<SVal>::iterator I = Values.begin(),
+ E = Values.end(); I != E; ++I) {
+ SVal V = *I;
+ if (Optional<nonloc::LazyCompoundVal> LCS =
+ V.getAs<nonloc::LazyCompoundVal>()) {
+
+ const SValListTy &Vals = getInterestingValues(*LCS);
+
+ for (SValListTy::const_iterator I = Vals.begin(),
+ E = Vals.end(); I != E; ++I) {
+ // Note: the last argument is false here because these are
+ // non-top-level regions.
+ if (const MemRegion *R = (*I).getAsRegion())
+ W.AddToWorkList(R);
+ }
+ continue;
+ }
+
+ if (const MemRegion *R = V.getAsRegion()) {
+ if (TopLevelRegions)
+ TopLevelRegions->push_back(R);
+ W.AddToWorkList(R);
+ continue;
+ }
+ }
+}
+
+StoreRef
+RegionStoreManager::invalidateRegions(Store store,
+ ArrayRef<SVal> Values,
+ const Expr *Ex, unsigned Count,
+ const LocationContext *LCtx,
+ const CallEvent *Call,
+ InvalidatedSymbols &IS,
+ RegionAndSymbolInvalidationTraits &ITraits,
+ InvalidatedRegions *TopLevelRegions,
+ InvalidatedRegions *Invalidated) {
+ GlobalsFilterKind GlobalsFilter;
+ if (Call) {
+ if (Call->isInSystemHeader())
+ GlobalsFilter = GFK_SystemOnly;
+ else
+ GlobalsFilter = GFK_All;
+ } else {
+ GlobalsFilter = GFK_None;
+ }
+
+ RegionBindingsRef B = getRegionBindings(store);
+ invalidateRegionsWorker W(*this, StateMgr, B, Ex, Count, LCtx, IS, ITraits,
+ Invalidated, GlobalsFilter);
+
+ // Scan the bindings and generate the clusters.
+ W.GenerateClusters();
+
+ // Add the regions to the worklist.
+ populateWorkList(W, Values, TopLevelRegions);
+
+ W.RunWorkList();
+
+ // Return the new bindings.
+ B = W.getRegionBindings();
+
+ // For calls, determine which global regions should be invalidated and
+ // invalidate them. (Note that function-static and immutable globals are never
+ // invalidated by this.)
+ // TODO: This could possibly be more precise with modules.
+ switch (GlobalsFilter) {
+ case GFK_All:
+ B = invalidateGlobalRegion(MemRegion::GlobalInternalSpaceRegionKind,
+ Ex, Count, LCtx, B, Invalidated);
+ // FALLTHROUGH
+ case GFK_SystemOnly:
+ B = invalidateGlobalRegion(MemRegion::GlobalSystemSpaceRegionKind,
+ Ex, Count, LCtx, B, Invalidated);
+ // FALLTHROUGH
+ case GFK_None:
+ break;
+ }
+
+ return StoreRef(B.asStore(), *this);
+}
+
+//===----------------------------------------------------------------------===//
+// Extents for regions.
+//===----------------------------------------------------------------------===//
+
+DefinedOrUnknownSVal
+RegionStoreManager::getSizeInElements(ProgramStateRef state,
+ const MemRegion *R,
+ QualType EleTy) {
+ SVal Size = cast<SubRegion>(R)->getExtent(svalBuilder);
+ const llvm::APSInt *SizeInt = svalBuilder.getKnownValue(state, Size);
+ if (!SizeInt)
+ return UnknownVal();
+
+ CharUnits RegionSize = CharUnits::fromQuantity(SizeInt->getSExtValue());
+
+ if (Ctx.getAsVariableArrayType(EleTy)) {
+ // FIXME: We need to track extra state to properly record the size
+ // of VLAs. Returning UnknownVal here, however, is a stop-gap so that
+ // we don't have a divide-by-zero below.
+ return UnknownVal();
+ }
+
+ CharUnits EleSize = Ctx.getTypeSizeInChars(EleTy);
+
+ // If a variable is reinterpreted as a type that doesn't fit into a larger
+ // type evenly, round it down.
+ // This is a signed value, since it's used in arithmetic with signed indices.
+ return svalBuilder.makeIntVal(RegionSize / EleSize, false);
+}
+
+//===----------------------------------------------------------------------===//
+// Location and region casting.
+//===----------------------------------------------------------------------===//
+
+/// ArrayToPointer - Emulates the "decay" of an array to a pointer
+/// type. 'Array' represents the lvalue of the array being decayed
+/// to a pointer, and the returned SVal represents the decayed
+/// version of that lvalue (i.e., a pointer to the first element of
+/// the array). This is called by ExprEngine when evaluating casts
+/// from arrays to pointers.
+SVal RegionStoreManager::ArrayToPointer(Loc Array, QualType T) {
+ if (!Array.getAs<loc::MemRegionVal>())
+ return UnknownVal();
+
+ const MemRegion* R = Array.castAs<loc::MemRegionVal>().getRegion();
+ NonLoc ZeroIdx = svalBuilder.makeZeroArrayIndex();
+ return loc::MemRegionVal(MRMgr.getElementRegion(T, ZeroIdx, R, Ctx));
+}
+
+//===----------------------------------------------------------------------===//
+// Loading values from regions.
+//===----------------------------------------------------------------------===//
+
+SVal RegionStoreManager::getBinding(RegionBindingsConstRef B, Loc L, QualType T) {
+ assert(!L.getAs<UnknownVal>() && "location unknown");
+ assert(!L.getAs<UndefinedVal>() && "location undefined");
+
+ // For access to concrete addresses, return UnknownVal. Checks
+ // for null dereferences (and similar errors) are done by checkers, not
+ // the Store.
+ // FIXME: We can consider lazily symbolicating such memory, but we really
+ // should defer this when we can reason easily about symbolicating arrays
+ // of bytes.
+ if (L.getAs<loc::ConcreteInt>()) {
+ return UnknownVal();
+ }
+ if (!L.getAs<loc::MemRegionVal>()) {
+ return UnknownVal();
+ }
+
+ const MemRegion *MR = L.castAs<loc::MemRegionVal>().getRegion();
+
+ if (isa<BlockDataRegion>(MR)) {
+ return UnknownVal();
+ }
+
+ if (isa<AllocaRegion>(MR) ||
+ isa<SymbolicRegion>(MR) ||
+ isa<CodeTextRegion>(MR)) {
+ if (T.isNull()) {
+ if (const TypedRegion *TR = dyn_cast<TypedRegion>(MR))
+ T = TR->getLocationType();
+ else {
+ const SymbolicRegion *SR = cast<SymbolicRegion>(MR);
+ T = SR->getSymbol()->getType();
+ }
+ }
+ MR = GetElementZeroRegion(MR, T);
+ }
+
+ // FIXME: Perhaps this method should just take a 'const MemRegion*' argument
+ // instead of 'Loc', and have the other Loc cases handled at a higher level.
+ const TypedValueRegion *R = cast<TypedValueRegion>(MR);
+ QualType RTy = R->getValueType();
+
+ // FIXME: we do not yet model the parts of a complex type, so treat the
+ // whole thing as "unknown".
+ if (RTy->isAnyComplexType())
+ return UnknownVal();
+
+ // FIXME: We should eventually handle funny addressing. e.g.:
+ //
+ // int x = ...;
+ // int *p = &x;
+ // char *q = (char*) p;
+ // char c = *q; // returns the first byte of 'x'.
+ //
+ // Such funny addressing will occur due to layering of regions.
+ if (RTy->isStructureOrClassType())
+ return getBindingForStruct(B, R);
+
+ // FIXME: Handle unions.
+ if (RTy->isUnionType())
+ return createLazyBinding(B, R);
+
+ if (RTy->isArrayType()) {
+ if (RTy->isConstantArrayType())
+ return getBindingForArray(B, R);
+ else
+ return UnknownVal();
+ }
+
+ // FIXME: handle Vector types.
+ if (RTy->isVectorType())
+ return UnknownVal();
+
+ if (const FieldRegion* FR = dyn_cast<FieldRegion>(R))
+ return CastRetrievedVal(getBindingForField(B, FR), FR, T, false);
+
+ if (const ElementRegion* ER = dyn_cast<ElementRegion>(R)) {
+ // FIXME: Here we actually perform an implicit conversion from the loaded
+ // value to the element type. Eventually we want to compose these values
+ // more intelligently. For example, an 'element' can encompass multiple
+ // bound regions (e.g., several bound bytes), or could be a subset of
+ // a larger value.
+ return CastRetrievedVal(getBindingForElement(B, ER), ER, T, false);
+ }
+
+ if (const ObjCIvarRegion *IVR = dyn_cast<ObjCIvarRegion>(R)) {
+ // FIXME: Here we actually perform an implicit conversion from the loaded
+ // value to the ivar type. What we should model is stores to ivars
+ // that blow past the extent of the ivar. If the address of the ivar is
+ // reinterpretted, it is possible we stored a different value that could
+ // fit within the ivar. Either we need to cast these when storing them
+ // or reinterpret them lazily (as we do here).
+ return CastRetrievedVal(getBindingForObjCIvar(B, IVR), IVR, T, false);
+ }
+
+ if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+ // FIXME: Here we actually perform an implicit conversion from the loaded
+ // value to the variable type. What we should model is stores to variables
+ // that blow past the extent of the variable. If the address of the
+ // variable is reinterpretted, it is possible we stored a different value
+ // that could fit within the variable. Either we need to cast these when
+ // storing them or reinterpret them lazily (as we do here).
+ return CastRetrievedVal(getBindingForVar(B, VR), VR, T, false);
+ }
+
+ const SVal *V = B.lookup(R, BindingKey::Direct);
+
+ // Check if the region has a binding.
+ if (V)
+ return *V;
+
+ // The location does not have a bound value. This means that it has
+ // the value it had upon its creation and/or entry to the analyzed
+ // function/method. These are either symbolic values or 'undefined'.
+ if (R->hasStackNonParametersStorage()) {
+ // All stack variables are considered to have undefined values
+ // upon creation. All heap allocated blocks are considered to
+ // have undefined values as well unless they are explicitly bound
+ // to specific values.
+ return UndefinedVal();
+ }
+
+ // All other values are symbolic.
+ return svalBuilder.getRegionValueSymbolVal(R);
+}
+
+static QualType getUnderlyingType(const SubRegion *R) {
+ QualType RegionTy;
+ if (const TypedValueRegion *TVR = dyn_cast<TypedValueRegion>(R))
+ RegionTy = TVR->getValueType();
+
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R))
+ RegionTy = SR->getSymbol()->getType();
+
+ return RegionTy;
+}
+
+/// Checks to see if store \p B has a lazy binding for region \p R.
+///
+/// If \p AllowSubregionBindings is \c false, a lazy binding will be rejected
+/// if there are additional bindings within \p R.
+///
+/// Note that unlike RegionStoreManager::findLazyBinding, this will not search
+/// for lazy bindings for super-regions of \p R.
+static Optional<nonloc::LazyCompoundVal>
+getExistingLazyBinding(SValBuilder &SVB, RegionBindingsConstRef B,
+ const SubRegion *R, bool AllowSubregionBindings) {
+ Optional<SVal> V = B.getDefaultBinding(R);
+ if (!V)
+ return None;
+
+ Optional<nonloc::LazyCompoundVal> LCV = V->getAs<nonloc::LazyCompoundVal>();
+ if (!LCV)
+ return None;
+
+ // If the LCV is for a subregion, the types might not match, and we shouldn't
+ // reuse the binding.
+ QualType RegionTy = getUnderlyingType(R);
+ if (!RegionTy.isNull() &&
+ !RegionTy->isVoidPointerType()) {
+ QualType SourceRegionTy = LCV->getRegion()->getValueType();
+ if (!SVB.getContext().hasSameUnqualifiedType(RegionTy, SourceRegionTy))
+ return None;
+ }
+
+ if (!AllowSubregionBindings) {
+ // If there are any other bindings within this region, we shouldn't reuse
+ // the top-level binding.
+ SmallVector<BindingPair, 16> Bindings;
+ collectSubRegionBindings(Bindings, SVB, *B.lookup(R->getBaseRegion()), R,
+ /*IncludeAllDefaultBindings=*/true);
+ if (Bindings.size() > 1)
+ return None;
+ }
+
+ return *LCV;
+}
+
+
+std::pair<Store, const SubRegion *>
+RegionStoreManager::findLazyBinding(RegionBindingsConstRef B,
+ const SubRegion *R,
+ const SubRegion *originalRegion) {
+ if (originalRegion != R) {
+ if (Optional<nonloc::LazyCompoundVal> V =
+ getExistingLazyBinding(svalBuilder, B, R, true))
+ return std::make_pair(V->getStore(), V->getRegion());
+ }
+
+ typedef std::pair<Store, const SubRegion *> StoreRegionPair;
+ StoreRegionPair Result = StoreRegionPair();
+
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
+ Result = findLazyBinding(B, cast<SubRegion>(ER->getSuperRegion()),
+ originalRegion);
+
+ if (Result.second)
+ Result.second = MRMgr.getElementRegionWithSuper(ER, Result.second);
+
+ } else if (const FieldRegion *FR = dyn_cast<FieldRegion>(R)) {
+ Result = findLazyBinding(B, cast<SubRegion>(FR->getSuperRegion()),
+ originalRegion);
+
+ if (Result.second)
+ Result.second = MRMgr.getFieldRegionWithSuper(FR, Result.second);
+
+ } else if (const CXXBaseObjectRegion *BaseReg =
+ dyn_cast<CXXBaseObjectRegion>(R)) {
+ // C++ base object region is another kind of region that we should blast
+ // through to look for lazy compound value. It is like a field region.
+ Result = findLazyBinding(B, cast<SubRegion>(BaseReg->getSuperRegion()),
+ originalRegion);
+
+ if (Result.second)
+ Result.second = MRMgr.getCXXBaseObjectRegionWithSuper(BaseReg,
+ Result.second);
+ }
+
+ return Result;
+}
+
+SVal RegionStoreManager::getBindingForElement(RegionBindingsConstRef B,
+ const ElementRegion* R) {
+ // We do not currently model bindings of the CompoundLiteralregion.
+ if (isa<CompoundLiteralRegion>(R->getBaseRegion()))
+ return UnknownVal();
+
+ // Check if the region has a binding.
+ if (const Optional<SVal> &V = B.getDirectBinding(R))
+ return *V;
+
+ const MemRegion* superR = R->getSuperRegion();
+
+ // Check if the region is an element region of a string literal.
+ if (const StringRegion *StrR=dyn_cast<StringRegion>(superR)) {
+ // FIXME: Handle loads from strings where the literal is treated as
+ // an integer, e.g., *((unsigned int*)"hello")
+ QualType T = Ctx.getAsArrayType(StrR->getValueType())->getElementType();
+ if (!Ctx.hasSameUnqualifiedType(T, R->getElementType()))
+ return UnknownVal();
+
+ const StringLiteral *Str = StrR->getStringLiteral();
+ SVal Idx = R->getIndex();
+ if (Optional<nonloc::ConcreteInt> CI = Idx.getAs<nonloc::ConcreteInt>()) {
+ int64_t i = CI->getValue().getSExtValue();
+ // Abort on string underrun. This can be possible by arbitrary
+ // clients of getBindingForElement().
+ if (i < 0)
+ return UndefinedVal();
+ int64_t length = Str->getLength();
+ // Technically, only i == length is guaranteed to be null.
+ // However, such overflows should be caught before reaching this point;
+ // the only time such an access would be made is if a string literal was
+ // used to initialize a larger array.
+ char c = (i >= length) ? '\0' : Str->getCodeUnit(i);
+ return svalBuilder.makeIntVal(c, T);
+ }
+ }
+
+ // Check for loads from a code text region. For such loads, just give up.
+ if (isa<CodeTextRegion>(superR))
+ return UnknownVal();
+
+ // Handle the case where we are indexing into a larger scalar object.
+ // For example, this handles:
+ // int x = ...
+ // char *y = &x;
+ // return *y;
+ // FIXME: This is a hack, and doesn't do anything really intelligent yet.
+ const RegionRawOffset &O = R->getAsArrayOffset();
+
+ // If we cannot reason about the offset, return an unknown value.
+ if (!O.getRegion())
+ return UnknownVal();
+
+ if (const TypedValueRegion *baseR =
+ dyn_cast_or_null<TypedValueRegion>(O.getRegion())) {
+ QualType baseT = baseR->getValueType();
+ if (baseT->isScalarType()) {
+ QualType elemT = R->getElementType();
+ if (elemT->isScalarType()) {
+ if (Ctx.getTypeSizeInChars(baseT) >= Ctx.getTypeSizeInChars(elemT)) {
+ if (const Optional<SVal> &V = B.getDirectBinding(superR)) {
+ if (SymbolRef parentSym = V->getAsSymbol())
+ return svalBuilder.getDerivedRegionValueSymbolVal(parentSym, R);
+
+ if (V->isUnknownOrUndef())
+ return *V;
+ // Other cases: give up. We are indexing into a larger object
+ // that has some value, but we don't know how to handle that yet.
+ return UnknownVal();
+ }
+ }
+ }
+ }
+ }
+ return getBindingForFieldOrElementCommon(B, R, R->getElementType());
+}
+
+SVal RegionStoreManager::getBindingForField(RegionBindingsConstRef B,
+ const FieldRegion* R) {
+
+ // Check if the region has a binding.
+ if (const Optional<SVal> &V = B.getDirectBinding(R))
+ return *V;
+
+ QualType Ty = R->getValueType();
+ return getBindingForFieldOrElementCommon(B, R, Ty);
+}
+
+Optional<SVal>
+RegionStoreManager::getBindingForDerivedDefaultValue(RegionBindingsConstRef B,
+ const MemRegion *superR,
+ const TypedValueRegion *R,
+ QualType Ty) {
+
+ if (const Optional<SVal> &D = B.getDefaultBinding(superR)) {
+ const SVal &val = D.getValue();
+ if (SymbolRef parentSym = val.getAsSymbol())
+ return svalBuilder.getDerivedRegionValueSymbolVal(parentSym, R);
+
+ if (val.isZeroConstant())
+ return svalBuilder.makeZeroVal(Ty);
+
+ if (val.isUnknownOrUndef())
+ return val;
+
+ // Lazy bindings are usually handled through getExistingLazyBinding().
+ // We should unify these two code paths at some point.
+ if (val.getAs<nonloc::LazyCompoundVal>())
+ return val;
+
+ llvm_unreachable("Unknown default value");
+ }
+
+ return None;
+}
+
+SVal RegionStoreManager::getLazyBinding(const SubRegion *LazyBindingRegion,
+ RegionBindingsRef LazyBinding) {
+ SVal Result;
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(LazyBindingRegion))
+ Result = getBindingForElement(LazyBinding, ER);
+ else
+ Result = getBindingForField(LazyBinding,
+ cast<FieldRegion>(LazyBindingRegion));
+
+ // FIXME: This is a hack to deal with RegionStore's inability to distinguish a
+ // default value for /part/ of an aggregate from a default value for the
+ // /entire/ aggregate. The most common case of this is when struct Outer
+ // has as its first member a struct Inner, which is copied in from a stack
+ // variable. In this case, even if the Outer's default value is symbolic, 0,
+ // or unknown, it gets overridden by the Inner's default value of undefined.
+ //
+ // This is a general problem -- if the Inner is zero-initialized, the Outer
+ // will now look zero-initialized. The proper way to solve this is with a
+ // new version of RegionStore that tracks the extent of a binding as well
+ // as the offset.
+ //
+ // This hack only takes care of the undefined case because that can very
+ // quickly result in a warning.
+ if (Result.isUndef())
+ Result = UnknownVal();
+
+ return Result;
+}
+
+SVal
+RegionStoreManager::getBindingForFieldOrElementCommon(RegionBindingsConstRef B,
+ const TypedValueRegion *R,
+ QualType Ty) {
+
+ // At this point we have already checked in either getBindingForElement or
+ // getBindingForField if 'R' has a direct binding.
+
+ // Lazy binding?
+ Store lazyBindingStore = nullptr;
+ const SubRegion *lazyBindingRegion = nullptr;
+ std::tie(lazyBindingStore, lazyBindingRegion) = findLazyBinding(B, R, R);
+ if (lazyBindingRegion)
+ return getLazyBinding(lazyBindingRegion,
+ getRegionBindings(lazyBindingStore));
+
+ // Record whether or not we see a symbolic index. That can completely
+ // be out of scope of our lookup.
+ bool hasSymbolicIndex = false;
+
+ // FIXME: This is a hack to deal with RegionStore's inability to distinguish a
+ // default value for /part/ of an aggregate from a default value for the
+ // /entire/ aggregate. The most common case of this is when struct Outer
+ // has as its first member a struct Inner, which is copied in from a stack
+ // variable. In this case, even if the Outer's default value is symbolic, 0,
+ // or unknown, it gets overridden by the Inner's default value of undefined.
+ //
+ // This is a general problem -- if the Inner is zero-initialized, the Outer
+ // will now look zero-initialized. The proper way to solve this is with a
+ // new version of RegionStore that tracks the extent of a binding as well
+ // as the offset.
+ //
+ // This hack only takes care of the undefined case because that can very
+ // quickly result in a warning.
+ bool hasPartialLazyBinding = false;
+
+ const SubRegion *SR = dyn_cast<SubRegion>(R);
+ while (SR) {
+ const MemRegion *Base = SR->getSuperRegion();
+ if (Optional<SVal> D = getBindingForDerivedDefaultValue(B, Base, R, Ty)) {
+ if (D->getAs<nonloc::LazyCompoundVal>()) {
+ hasPartialLazyBinding = true;
+ break;
+ }
+
+ return *D;
+ }
+
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(Base)) {
+ NonLoc index = ER->getIndex();
+ if (!index.isConstant())
+ hasSymbolicIndex = true;
+ }
+
+ // If our super region is a field or element itself, walk up the region
+ // hierarchy to see if there is a default value installed in an ancestor.
+ SR = dyn_cast<SubRegion>(Base);
+ }
+
+ if (R->hasStackNonParametersStorage()) {
+ if (isa<ElementRegion>(R)) {
+ // Currently we don't reason specially about Clang-style vectors. Check
+ // if superR is a vector and if so return Unknown.
+ if (const TypedValueRegion *typedSuperR =
+ dyn_cast<TypedValueRegion>(R->getSuperRegion())) {
+ if (typedSuperR->getValueType()->isVectorType())
+ return UnknownVal();
+ }
+ }
+
+ // FIXME: We also need to take ElementRegions with symbolic indexes into
+ // account. This case handles both directly accessing an ElementRegion
+ // with a symbolic offset, but also fields within an element with
+ // a symbolic offset.
+ if (hasSymbolicIndex)
+ return UnknownVal();
+
+ if (!hasPartialLazyBinding)
+ return UndefinedVal();
+ }
+
+ // All other values are symbolic.
+ return svalBuilder.getRegionValueSymbolVal(R);
+}
+
+SVal RegionStoreManager::getBindingForObjCIvar(RegionBindingsConstRef B,
+ const ObjCIvarRegion* R) {
+ // Check if the region has a binding.
+ if (const Optional<SVal> &V = B.getDirectBinding(R))
+ return *V;
+
+ const MemRegion *superR = R->getSuperRegion();
+
+ // Check if the super region has a default binding.
+ if (const Optional<SVal> &V = B.getDefaultBinding(superR)) {
+ if (SymbolRef parentSym = V->getAsSymbol())
+ return svalBuilder.getDerivedRegionValueSymbolVal(parentSym, R);
+
+ // Other cases: give up.
+ return UnknownVal();
+ }
+
+ return getBindingForLazySymbol(R);
+}
+
+SVal RegionStoreManager::getBindingForVar(RegionBindingsConstRef B,
+ const VarRegion *R) {
+
+ // Check if the region has a binding.
+ if (const Optional<SVal> &V = B.getDirectBinding(R))
+ return *V;
+
+ // Lazily derive a value for the VarRegion.
+ const VarDecl *VD = R->getDecl();
+ const MemSpaceRegion *MS = R->getMemorySpace();
+
+ // Arguments are always symbolic.
+ if (isa<StackArgumentsSpaceRegion>(MS))
+ return svalBuilder.getRegionValueSymbolVal(R);
+
+ // Is 'VD' declared constant? If so, retrieve the constant value.
+ if (VD->getType().isConstQualified())
+ if (const Expr *Init = VD->getInit())
+ if (Optional<SVal> V = svalBuilder.getConstantVal(Init))
+ return *V;
+
+ // This must come after the check for constants because closure-captured
+ // constant variables may appear in UnknownSpaceRegion.
+ if (isa<UnknownSpaceRegion>(MS))
+ return svalBuilder.getRegionValueSymbolVal(R);
+
+ if (isa<GlobalsSpaceRegion>(MS)) {
+ QualType T = VD->getType();
+
+ // Function-scoped static variables are default-initialized to 0; if they
+ // have an initializer, it would have been processed by now.
+ if (isa<StaticGlobalSpaceRegion>(MS))
+ return svalBuilder.makeZeroVal(T);
+
+ if (Optional<SVal> V = getBindingForDerivedDefaultValue(B, MS, R, T)) {
+ assert(!V->getAs<nonloc::LazyCompoundVal>());
+ return V.getValue();
+ }
+
+ return svalBuilder.getRegionValueSymbolVal(R);
+ }
+
+ return UndefinedVal();
+}
+
+SVal RegionStoreManager::getBindingForLazySymbol(const TypedValueRegion *R) {
+ // All other values are symbolic.
+ return svalBuilder.getRegionValueSymbolVal(R);
+}
+
+const RegionStoreManager::SValListTy &
+RegionStoreManager::getInterestingValues(nonloc::LazyCompoundVal LCV) {
+ // First, check the cache.
+ LazyBindingsMapTy::iterator I = LazyBindingsMap.find(LCV.getCVData());
+ if (I != LazyBindingsMap.end())
+ return I->second;
+
+ // If we don't have a list of values cached, start constructing it.
+ SValListTy List;
+
+ const SubRegion *LazyR = LCV.getRegion();
+ RegionBindingsRef B = getRegionBindings(LCV.getStore());
+
+ // If this region had /no/ bindings at the time, there are no interesting
+ // values to return.
+ const ClusterBindings *Cluster = B.lookup(LazyR->getBaseRegion());
+ if (!Cluster)
+ return (LazyBindingsMap[LCV.getCVData()] = std::move(List));
+
+ SmallVector<BindingPair, 32> Bindings;
+ collectSubRegionBindings(Bindings, svalBuilder, *Cluster, LazyR,
+ /*IncludeAllDefaultBindings=*/true);
+ for (SmallVectorImpl<BindingPair>::const_iterator I = Bindings.begin(),
+ E = Bindings.end();
+ I != E; ++I) {
+ SVal V = I->second;
+ if (V.isUnknownOrUndef() || V.isConstant())
+ continue;
+
+ if (Optional<nonloc::LazyCompoundVal> InnerLCV =
+ V.getAs<nonloc::LazyCompoundVal>()) {
+ const SValListTy &InnerList = getInterestingValues(*InnerLCV);
+ List.insert(List.end(), InnerList.begin(), InnerList.end());
+ continue;
+ }
+
+ List.push_back(V);
+ }
+
+ return (LazyBindingsMap[LCV.getCVData()] = std::move(List));
+}
+
+NonLoc RegionStoreManager::createLazyBinding(RegionBindingsConstRef B,
+ const TypedValueRegion *R) {
+ if (Optional<nonloc::LazyCompoundVal> V =
+ getExistingLazyBinding(svalBuilder, B, R, false))
+ return *V;
+
+ return svalBuilder.makeLazyCompoundVal(StoreRef(B.asStore(), *this), R);
+}
+
+static bool isRecordEmpty(const RecordDecl *RD) {
+ if (!RD->field_empty())
+ return false;
+ if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD))
+ return CRD->getNumBases() == 0;
+ return true;
+}
+
+SVal RegionStoreManager::getBindingForStruct(RegionBindingsConstRef B,
+ const TypedValueRegion *R) {
+ const RecordDecl *RD = R->getValueType()->castAs<RecordType>()->getDecl();
+ if (!RD->getDefinition() || isRecordEmpty(RD))
+ return UnknownVal();
+
+ return createLazyBinding(B, R);
+}
+
+SVal RegionStoreManager::getBindingForArray(RegionBindingsConstRef B,
+ const TypedValueRegion *R) {
+ assert(Ctx.getAsConstantArrayType(R->getValueType()) &&
+ "Only constant array types can have compound bindings.");
+
+ return createLazyBinding(B, R);
+}
+
+bool RegionStoreManager::includedInBindings(Store store,
+ const MemRegion *region) const {
+ RegionBindingsRef B = getRegionBindings(store);
+ region = region->getBaseRegion();
+
+ // Quick path: if the base is the head of a cluster, the region is live.
+ if (B.lookup(region))
+ return true;
+
+ // Slow path: if the region is the VALUE of any binding, it is live.
+ for (RegionBindingsRef::iterator RI = B.begin(), RE = B.end(); RI != RE; ++RI) {
+ const ClusterBindings &Cluster = RI.getData();
+ for (ClusterBindings::iterator CI = Cluster.begin(), CE = Cluster.end();
+ CI != CE; ++CI) {
+ const SVal &D = CI.getData();
+ if (const MemRegion *R = D.getAsRegion())
+ if (R->getBaseRegion() == region)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Binding values to regions.
+//===----------------------------------------------------------------------===//
+
+StoreRef RegionStoreManager::killBinding(Store ST, Loc L) {
+ if (Optional<loc::MemRegionVal> LV = L.getAs<loc::MemRegionVal>())
+ if (const MemRegion* R = LV->getRegion())
+ return StoreRef(getRegionBindings(ST).removeBinding(R)
+ .asImmutableMap()
+ .getRootWithoutRetain(),
+ *this);
+
+ return StoreRef(ST, *this);
+}
+
+RegionBindingsRef
+RegionStoreManager::bind(RegionBindingsConstRef B, Loc L, SVal V) {
+ if (L.getAs<loc::ConcreteInt>())
+ return B;
+
+ // If we get here, the location should be a region.
+ const MemRegion *R = L.castAs<loc::MemRegionVal>().getRegion();
+
+ // Check if the region is a struct region.
+ if (const TypedValueRegion* TR = dyn_cast<TypedValueRegion>(R)) {
+ QualType Ty = TR->getValueType();
+ if (Ty->isArrayType())
+ return bindArray(B, TR, V);
+ if (Ty->isStructureOrClassType())
+ return bindStruct(B, TR, V);
+ if (Ty->isVectorType())
+ return bindVector(B, TR, V);
+ if (Ty->isUnionType())
+ return bindAggregate(B, TR, V);
+ }
+
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) {
+ // Binding directly to a symbolic region should be treated as binding
+ // to element 0.
+ QualType T = SR->getSymbol()->getType();
+ if (T->isAnyPointerType() || T->isReferenceType())
+ T = T->getPointeeType();
+
+ R = GetElementZeroRegion(SR, T);
+ }
+
+ // Clear out bindings that may overlap with this binding.
+ RegionBindingsRef NewB = removeSubRegionBindings(B, cast<SubRegion>(R));
+ return NewB.addBinding(BindingKey::Make(R, BindingKey::Direct), V);
+}
+
+RegionBindingsRef
+RegionStoreManager::setImplicitDefaultValue(RegionBindingsConstRef B,
+ const MemRegion *R,
+ QualType T) {
+ SVal V;
+
+ if (Loc::isLocType(T))
+ V = svalBuilder.makeNull();
+ else if (T->isIntegralOrEnumerationType())
+ V = svalBuilder.makeZeroVal(T);
+ else if (T->isStructureOrClassType() || T->isArrayType()) {
+ // Set the default value to a zero constant when it is a structure
+ // or array. The type doesn't really matter.
+ V = svalBuilder.makeZeroVal(Ctx.IntTy);
+ }
+ else {
+ // We can't represent values of this type, but we still need to set a value
+ // to record that the region has been initialized.
+ // If this assertion ever fires, a new case should be added above -- we
+ // should know how to default-initialize any value we can symbolicate.
+ assert(!SymbolManager::canSymbolicate(T) && "This type is representable");
+ V = UnknownVal();
+ }
+
+ return B.addBinding(R, BindingKey::Default, V);
+}
+
+RegionBindingsRef
+RegionStoreManager::bindArray(RegionBindingsConstRef B,
+ const TypedValueRegion* R,
+ SVal Init) {
+
+ const ArrayType *AT =cast<ArrayType>(Ctx.getCanonicalType(R->getValueType()));
+ QualType ElementTy = AT->getElementType();
+ Optional<uint64_t> Size;
+
+ if (const ConstantArrayType* CAT = dyn_cast<ConstantArrayType>(AT))
+ Size = CAT->getSize().getZExtValue();
+
+ // Check if the init expr is a string literal.
+ if (Optional<loc::MemRegionVal> MRV = Init.getAs<loc::MemRegionVal>()) {
+ const StringRegion *S = cast<StringRegion>(MRV->getRegion());
+
+ // Treat the string as a lazy compound value.
+ StoreRef store(B.asStore(), *this);
+ nonloc::LazyCompoundVal LCV = svalBuilder.makeLazyCompoundVal(store, S)
+ .castAs<nonloc::LazyCompoundVal>();
+ return bindAggregate(B, R, LCV);
+ }
+
+ // Handle lazy compound values.
+ if (Init.getAs<nonloc::LazyCompoundVal>())
+ return bindAggregate(B, R, Init);
+
+ // Remaining case: explicit compound values.
+
+ if (Init.isUnknown())
+ return setImplicitDefaultValue(B, R, ElementTy);
+
+ const nonloc::CompoundVal& CV = Init.castAs<nonloc::CompoundVal>();
+ nonloc::CompoundVal::iterator VI = CV.begin(), VE = CV.end();
+ uint64_t i = 0;
+
+ RegionBindingsRef NewB(B);
+
+ for (; Size.hasValue() ? i < Size.getValue() : true ; ++i, ++VI) {
+ // The init list might be shorter than the array length.
+ if (VI == VE)
+ break;
+
+ const NonLoc &Idx = svalBuilder.makeArrayIndex(i);
+ const ElementRegion *ER = MRMgr.getElementRegion(ElementTy, Idx, R, Ctx);
+
+ if (ElementTy->isStructureOrClassType())
+ NewB = bindStruct(NewB, ER, *VI);
+ else if (ElementTy->isArrayType())
+ NewB = bindArray(NewB, ER, *VI);
+ else
+ NewB = bind(NewB, loc::MemRegionVal(ER), *VI);
+ }
+
+ // If the init list is shorter than the array length, set the
+ // array default value.
+ if (Size.hasValue() && i < Size.getValue())
+ NewB = setImplicitDefaultValue(NewB, R, ElementTy);
+
+ return NewB;
+}
+
+RegionBindingsRef RegionStoreManager::bindVector(RegionBindingsConstRef B,
+ const TypedValueRegion* R,
+ SVal V) {
+ QualType T = R->getValueType();
+ assert(T->isVectorType());
+ const VectorType *VT = T->getAs<VectorType>(); // Use getAs for typedefs.
+
+ // Handle lazy compound values and symbolic values.
+ if (V.getAs<nonloc::LazyCompoundVal>() || V.getAs<nonloc::SymbolVal>())
+ return bindAggregate(B, R, V);
+
+ // We may get non-CompoundVal accidentally due to imprecise cast logic or
+ // that we are binding symbolic struct value. Kill the field values, and if
+ // the value is symbolic go and bind it as a "default" binding.
+ if (!V.getAs<nonloc::CompoundVal>()) {
+ return bindAggregate(B, R, UnknownVal());
+ }
+
+ QualType ElemType = VT->getElementType();
+ nonloc::CompoundVal CV = V.castAs<nonloc::CompoundVal>();
+ nonloc::CompoundVal::iterator VI = CV.begin(), VE = CV.end();
+ unsigned index = 0, numElements = VT->getNumElements();
+ RegionBindingsRef NewB(B);
+
+ for ( ; index != numElements ; ++index) {
+ if (VI == VE)
+ break;
+
+ NonLoc Idx = svalBuilder.makeArrayIndex(index);
+ const ElementRegion *ER = MRMgr.getElementRegion(ElemType, Idx, R, Ctx);
+
+ if (ElemType->isArrayType())
+ NewB = bindArray(NewB, ER, *VI);
+ else if (ElemType->isStructureOrClassType())
+ NewB = bindStruct(NewB, ER, *VI);
+ else
+ NewB = bind(NewB, loc::MemRegionVal(ER), *VI);
+ }
+ return NewB;
+}
+
+Optional<RegionBindingsRef>
+RegionStoreManager::tryBindSmallStruct(RegionBindingsConstRef B,
+ const TypedValueRegion *R,
+ const RecordDecl *RD,
+ nonloc::LazyCompoundVal LCV) {
+ FieldVector Fields;
+
+ if (const CXXRecordDecl *Class = dyn_cast<CXXRecordDecl>(RD))
+ if (Class->getNumBases() != 0 || Class->getNumVBases() != 0)
+ return None;
+
+ for (const auto *FD : RD->fields()) {
+ if (FD->isUnnamedBitfield())
+ continue;
+
+ // If there are too many fields, or if any of the fields are aggregates,
+ // just use the LCV as a default binding.
+ if (Fields.size() == SmallStructLimit)
+ return None;
+
+ QualType Ty = FD->getType();
+ if (!(Ty->isScalarType() || Ty->isReferenceType()))
+ return None;
+
+ Fields.push_back(FD);
+ }
+
+ RegionBindingsRef NewB = B;
+
+ for (FieldVector::iterator I = Fields.begin(), E = Fields.end(); I != E; ++I){
+ const FieldRegion *SourceFR = MRMgr.getFieldRegion(*I, LCV.getRegion());
+ SVal V = getBindingForField(getRegionBindings(LCV.getStore()), SourceFR);
+
+ const FieldRegion *DestFR = MRMgr.getFieldRegion(*I, R);
+ NewB = bind(NewB, loc::MemRegionVal(DestFR), V);
+ }
+
+ return NewB;
+}
+
+RegionBindingsRef RegionStoreManager::bindStruct(RegionBindingsConstRef B,
+ const TypedValueRegion* R,
+ SVal V) {
+ if (!Features.supportsFields())
+ return B;
+
+ QualType T = R->getValueType();
+ assert(T->isStructureOrClassType());
+
+ const RecordType* RT = T->getAs<RecordType>();
+ const RecordDecl *RD = RT->getDecl();
+
+ if (!RD->isCompleteDefinition())
+ return B;
+
+ // Handle lazy compound values and symbolic values.
+ if (Optional<nonloc::LazyCompoundVal> LCV =
+ V.getAs<nonloc::LazyCompoundVal>()) {
+ if (Optional<RegionBindingsRef> NewB = tryBindSmallStruct(B, R, RD, *LCV))
+ return *NewB;
+ return bindAggregate(B, R, V);
+ }
+ if (V.getAs<nonloc::SymbolVal>())
+ return bindAggregate(B, R, V);
+
+ // We may get non-CompoundVal accidentally due to imprecise cast logic or
+ // that we are binding symbolic struct value. Kill the field values, and if
+ // the value is symbolic go and bind it as a "default" binding.
+ if (V.isUnknown() || !V.getAs<nonloc::CompoundVal>())
+ return bindAggregate(B, R, UnknownVal());
+
+ const nonloc::CompoundVal& CV = V.castAs<nonloc::CompoundVal>();
+ nonloc::CompoundVal::iterator VI = CV.begin(), VE = CV.end();
+
+ RecordDecl::field_iterator FI, FE;
+ RegionBindingsRef NewB(B);
+
+ for (FI = RD->field_begin(), FE = RD->field_end(); FI != FE; ++FI) {
+
+ if (VI == VE)
+ break;
+
+ // Skip any unnamed bitfields to stay in sync with the initializers.
+ if (FI->isUnnamedBitfield())
+ continue;
+
+ QualType FTy = FI->getType();
+ const FieldRegion* FR = MRMgr.getFieldRegion(*FI, R);
+
+ if (FTy->isArrayType())
+ NewB = bindArray(NewB, FR, *VI);
+ else if (FTy->isStructureOrClassType())
+ NewB = bindStruct(NewB, FR, *VI);
+ else
+ NewB = bind(NewB, loc::MemRegionVal(FR), *VI);
+ ++VI;
+ }
+
+ // There may be fewer values in the initialize list than the fields of struct.
+ if (FI != FE) {
+ NewB = NewB.addBinding(R, BindingKey::Default,
+ svalBuilder.makeIntVal(0, false));
+ }
+
+ return NewB;
+}
+
+RegionBindingsRef
+RegionStoreManager::bindAggregate(RegionBindingsConstRef B,
+ const TypedRegion *R,
+ SVal Val) {
+ // Remove the old bindings, using 'R' as the root of all regions
+ // we will invalidate. Then add the new binding.
+ return removeSubRegionBindings(B, R).addBinding(R, BindingKey::Default, Val);
+}
+
+//===----------------------------------------------------------------------===//
+// State pruning.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class removeDeadBindingsWorker :
+ public ClusterAnalysis<removeDeadBindingsWorker> {
+ SmallVector<const SymbolicRegion*, 12> Postponed;
+ SymbolReaper &SymReaper;
+ const StackFrameContext *CurrentLCtx;
+
+public:
+ removeDeadBindingsWorker(RegionStoreManager &rm,
+ ProgramStateManager &stateMgr,
+ RegionBindingsRef b, SymbolReaper &symReaper,
+ const StackFrameContext *LCtx)
+ : ClusterAnalysis<removeDeadBindingsWorker>(rm, stateMgr, b),
+ SymReaper(symReaper), CurrentLCtx(LCtx) {}
+
+ // Called by ClusterAnalysis.
+ void VisitAddedToCluster(const MemRegion *baseR, const ClusterBindings &C);
+ void VisitCluster(const MemRegion *baseR, const ClusterBindings *C);
+ using ClusterAnalysis<removeDeadBindingsWorker>::VisitCluster;
+
+ using ClusterAnalysis::AddToWorkList;
+
+ bool AddToWorkList(const MemRegion *R);
+
+ bool UpdatePostponed();
+ void VisitBinding(SVal V);
+};
+}
+
+bool removeDeadBindingsWorker::AddToWorkList(const MemRegion *R) {
+ const MemRegion *BaseR = R->getBaseRegion();
+ return AddToWorkList(WorkListElement(BaseR), getCluster(BaseR));
+}
+
+void removeDeadBindingsWorker::VisitAddedToCluster(const MemRegion *baseR,
+ const ClusterBindings &C) {
+
+ if (const VarRegion *VR = dyn_cast<VarRegion>(baseR)) {
+ if (SymReaper.isLive(VR))
+ AddToWorkList(baseR, &C);
+
+ return;
+ }
+
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(baseR)) {
+ if (SymReaper.isLive(SR->getSymbol()))
+ AddToWorkList(SR, &C);
+ else
+ Postponed.push_back(SR);
+
+ return;
+ }
+
+ if (isa<NonStaticGlobalSpaceRegion>(baseR)) {
+ AddToWorkList(baseR, &C);
+ return;
+ }
+
+ // CXXThisRegion in the current or parent location context is live.
+ if (const CXXThisRegion *TR = dyn_cast<CXXThisRegion>(baseR)) {
+ const StackArgumentsSpaceRegion *StackReg =
+ cast<StackArgumentsSpaceRegion>(TR->getSuperRegion());
+ const StackFrameContext *RegCtx = StackReg->getStackFrame();
+ if (CurrentLCtx &&
+ (RegCtx == CurrentLCtx || RegCtx->isParentOf(CurrentLCtx)))
+ AddToWorkList(TR, &C);
+ }
+}
+
+void removeDeadBindingsWorker::VisitCluster(const MemRegion *baseR,
+ const ClusterBindings *C) {
+ if (!C)
+ return;
+
+ // Mark the symbol for any SymbolicRegion with live bindings as live itself.
+ // This means we should continue to track that symbol.
+ if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(baseR))
+ SymReaper.markLive(SymR->getSymbol());
+
+ for (ClusterBindings::iterator I = C->begin(), E = C->end(); I != E; ++I) {
+ // Element index of a binding key is live.
+ SymReaper.markElementIndicesLive(I.getKey().getRegion());
+
+ VisitBinding(I.getData());
+ }
+}
+
+void removeDeadBindingsWorker::VisitBinding(SVal V) {
+ // Is it a LazyCompoundVal? All referenced regions are live as well.
+ if (Optional<nonloc::LazyCompoundVal> LCS =
+ V.getAs<nonloc::LazyCompoundVal>()) {
+
+ const RegionStoreManager::SValListTy &Vals = RM.getInterestingValues(*LCS);
+
+ for (RegionStoreManager::SValListTy::const_iterator I = Vals.begin(),
+ E = Vals.end();
+ I != E; ++I)
+ VisitBinding(*I);
+
+ return;
+ }
+
+ // If V is a region, then add it to the worklist.
+ if (const MemRegion *R = V.getAsRegion()) {
+ AddToWorkList(R);
+ SymReaper.markLive(R);
+
+ // All regions captured by a block are also live.
+ if (const BlockDataRegion *BR = dyn_cast<BlockDataRegion>(R)) {
+ BlockDataRegion::referenced_vars_iterator I = BR->referenced_vars_begin(),
+ E = BR->referenced_vars_end();
+ for ( ; I != E; ++I)
+ AddToWorkList(I.getCapturedRegion());
+ }
+ }
+
+
+ // Update the set of live symbols.
+ for (SymExpr::symbol_iterator SI = V.symbol_begin(), SE = V.symbol_end();
+ SI!=SE; ++SI)
+ SymReaper.markLive(*SI);
+}
+
+bool removeDeadBindingsWorker::UpdatePostponed() {
+ // See if any postponed SymbolicRegions are actually live now, after
+ // having done a scan.
+ bool changed = false;
+
+ for (SmallVectorImpl<const SymbolicRegion*>::iterator
+ I = Postponed.begin(), E = Postponed.end() ; I != E ; ++I) {
+ if (const SymbolicRegion *SR = *I) {
+ if (SymReaper.isLive(SR->getSymbol())) {
+ changed |= AddToWorkList(SR);
+ *I = nullptr;
+ }
+ }
+ }
+
+ return changed;
+}
+
+StoreRef RegionStoreManager::removeDeadBindings(Store store,
+ const StackFrameContext *LCtx,
+ SymbolReaper& SymReaper) {
+ RegionBindingsRef B = getRegionBindings(store);
+ removeDeadBindingsWorker W(*this, StateMgr, B, SymReaper, LCtx);
+ W.GenerateClusters();
+
+ // Enqueue the region roots onto the worklist.
+ for (SymbolReaper::region_iterator I = SymReaper.region_begin(),
+ E = SymReaper.region_end(); I != E; ++I) {
+ W.AddToWorkList(*I);
+ }
+
+ do W.RunWorkList(); while (W.UpdatePostponed());
+
+ // We have now scanned the store, marking reachable regions and symbols
+ // as live. We now remove all the regions that are dead from the store
+ // as well as update DSymbols with the set symbols that are now dead.
+ for (RegionBindingsRef::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ const MemRegion *Base = I.getKey();
+
+ // If the cluster has been visited, we know the region has been marked.
+ if (W.isVisited(Base))
+ continue;
+
+ // Remove the dead entry.
+ B = B.remove(Base);
+
+ if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(Base))
+ SymReaper.maybeDead(SymR->getSymbol());
+
+ // Mark all non-live symbols that this binding references as dead.
+ const ClusterBindings &Cluster = I.getData();
+ for (ClusterBindings::iterator CI = Cluster.begin(), CE = Cluster.end();
+ CI != CE; ++CI) {
+ SVal X = CI.getData();
+ SymExpr::symbol_iterator SI = X.symbol_begin(), SE = X.symbol_end();
+ for (; SI != SE; ++SI)
+ SymReaper.maybeDead(*SI);
+ }
+ }
+
+ return StoreRef(B.asStore(), *this);
+}
+
+//===----------------------------------------------------------------------===//
+// Utility methods.
+//===----------------------------------------------------------------------===//
+
+void RegionStoreManager::print(Store store, raw_ostream &OS,
+ const char* nl, const char *sep) {
+ RegionBindingsRef B = getRegionBindings(store);
+ OS << "Store (direct and default bindings), "
+ << B.asStore()
+ << " :" << nl;
+ B.dump(OS, nl);
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
new file mode 100644
index 0000000..cdae040
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
@@ -0,0 +1,556 @@
+// SValBuilder.cpp - Basic class for all SValBuilder implementations -*- C++ -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines SValBuilder, the base class for all (complete) SValBuilder
+// implementations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+
+using namespace clang;
+using namespace ento;
+
+//===----------------------------------------------------------------------===//
+// Basic SVal creation.
+//===----------------------------------------------------------------------===//
+
+void SValBuilder::anchor() { }
+
+DefinedOrUnknownSVal SValBuilder::makeZeroVal(QualType type) {
+ if (Loc::isLocType(type))
+ return makeNull();
+
+ if (type->isIntegralOrEnumerationType())
+ return makeIntVal(0, type);
+
+ // FIXME: Handle floats.
+ // FIXME: Handle structs.
+ return UnknownVal();
+}
+
+NonLoc SValBuilder::makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
+ const llvm::APSInt& rhs, QualType type) {
+ // The Environment ensures we always get a persistent APSInt in
+ // BasicValueFactory, so we don't need to get the APSInt from
+ // BasicValueFactory again.
+ assert(lhs);
+ assert(!Loc::isLocType(type));
+ return nonloc::SymbolVal(SymMgr.getSymIntExpr(lhs, op, rhs, type));
+}
+
+NonLoc SValBuilder::makeNonLoc(const llvm::APSInt& lhs,
+ BinaryOperator::Opcode op, const SymExpr *rhs,
+ QualType type) {
+ assert(rhs);
+ assert(!Loc::isLocType(type));
+ return nonloc::SymbolVal(SymMgr.getIntSymExpr(lhs, op, rhs, type));
+}
+
+NonLoc SValBuilder::makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
+ const SymExpr *rhs, QualType type) {
+ assert(lhs && rhs);
+ assert(!Loc::isLocType(type));
+ return nonloc::SymbolVal(SymMgr.getSymSymExpr(lhs, op, rhs, type));
+}
+
+NonLoc SValBuilder::makeNonLoc(const SymExpr *operand,
+ QualType fromTy, QualType toTy) {
+ assert(operand);
+ assert(!Loc::isLocType(toTy));
+ return nonloc::SymbolVal(SymMgr.getCastSymbol(operand, fromTy, toTy));
+}
+
+SVal SValBuilder::convertToArrayIndex(SVal val) {
+ if (val.isUnknownOrUndef())
+ return val;
+
+ // Common case: we have an appropriately sized integer.
+ if (Optional<nonloc::ConcreteInt> CI = val.getAs<nonloc::ConcreteInt>()) {
+ const llvm::APSInt& I = CI->getValue();
+ if (I.getBitWidth() == ArrayIndexWidth && I.isSigned())
+ return val;
+ }
+
+ return evalCastFromNonLoc(val.castAs<NonLoc>(), ArrayIndexTy);
+}
+
+nonloc::ConcreteInt SValBuilder::makeBoolVal(const CXXBoolLiteralExpr *boolean){
+ return makeTruthVal(boolean->getValue());
+}
+
+DefinedOrUnknownSVal
+SValBuilder::getRegionValueSymbolVal(const TypedValueRegion* region) {
+ QualType T = region->getValueType();
+
+ if (T->isNullPtrType())
+ return makeZeroVal(T);
+
+ if (!SymbolManager::canSymbolicate(T))
+ return UnknownVal();
+
+ SymbolRef sym = SymMgr.getRegionValueSymbol(region);
+
+ if (Loc::isLocType(T))
+ return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
+
+ return nonloc::SymbolVal(sym);
+}
+
+DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const void *SymbolTag,
+ const Expr *Ex,
+ const LocationContext *LCtx,
+ unsigned Count) {
+ QualType T = Ex->getType();
+
+ if (T->isNullPtrType())
+ return makeZeroVal(T);
+
+ // Compute the type of the result. If the expression is not an R-value, the
+ // result should be a location.
+ QualType ExType = Ex->getType();
+ if (Ex->isGLValue())
+ T = LCtx->getAnalysisDeclContext()->getASTContext().getPointerType(ExType);
+
+ return conjureSymbolVal(SymbolTag, Ex, LCtx, T, Count);
+}
+
+DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const void *symbolTag,
+ const Expr *expr,
+ const LocationContext *LCtx,
+ QualType type,
+ unsigned count) {
+ if (type->isNullPtrType())
+ return makeZeroVal(type);
+
+ if (!SymbolManager::canSymbolicate(type))
+ return UnknownVal();
+
+ SymbolRef sym = SymMgr.conjureSymbol(expr, LCtx, type, count, symbolTag);
+
+ if (Loc::isLocType(type))
+ return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
+
+ return nonloc::SymbolVal(sym);
+}
+
+
+DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const Stmt *stmt,
+ const LocationContext *LCtx,
+ QualType type,
+ unsigned visitCount) {
+ if (type->isNullPtrType())
+ return makeZeroVal(type);
+
+ if (!SymbolManager::canSymbolicate(type))
+ return UnknownVal();
+
+ SymbolRef sym = SymMgr.conjureSymbol(stmt, LCtx, type, visitCount);
+
+ if (Loc::isLocType(type))
+ return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
+
+ return nonloc::SymbolVal(sym);
+}
+
+DefinedOrUnknownSVal
+SValBuilder::getConjuredHeapSymbolVal(const Expr *E,
+ const LocationContext *LCtx,
+ unsigned VisitCount) {
+ QualType T = E->getType();
+ assert(Loc::isLocType(T));
+ assert(SymbolManager::canSymbolicate(T));
+ if (T->isNullPtrType())
+ return makeZeroVal(T);
+
+ SymbolRef sym = SymMgr.conjureSymbol(E, LCtx, T, VisitCount);
+ return loc::MemRegionVal(MemMgr.getSymbolicHeapRegion(sym));
+}
+
+DefinedSVal SValBuilder::getMetadataSymbolVal(const void *symbolTag,
+ const MemRegion *region,
+ const Expr *expr, QualType type,
+ unsigned count) {
+ assert(SymbolManager::canSymbolicate(type) && "Invalid metadata symbol type");
+
+ SymbolRef sym =
+ SymMgr.getMetadataSymbol(region, expr, type, count, symbolTag);
+
+ if (Loc::isLocType(type))
+ return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
+
+ return nonloc::SymbolVal(sym);
+}
+
+DefinedOrUnknownSVal
+SValBuilder::getDerivedRegionValueSymbolVal(SymbolRef parentSymbol,
+ const TypedValueRegion *region) {
+ QualType T = region->getValueType();
+
+ if (T->isNullPtrType())
+ return makeZeroVal(T);
+
+ if (!SymbolManager::canSymbolicate(T))
+ return UnknownVal();
+
+ SymbolRef sym = SymMgr.getDerivedSymbol(parentSymbol, region);
+
+ if (Loc::isLocType(T))
+ return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
+
+ return nonloc::SymbolVal(sym);
+}
+
+DefinedSVal SValBuilder::getFunctionPointer(const FunctionDecl *func) {
+ return loc::MemRegionVal(MemMgr.getFunctionTextRegion(func));
+}
+
+DefinedSVal SValBuilder::getBlockPointer(const BlockDecl *block,
+ CanQualType locTy,
+ const LocationContext *locContext,
+ unsigned blockCount) {
+ const BlockTextRegion *BC =
+ MemMgr.getBlockTextRegion(block, locTy, locContext->getAnalysisDeclContext());
+ const BlockDataRegion *BD = MemMgr.getBlockDataRegion(BC, locContext,
+ blockCount);
+ return loc::MemRegionVal(BD);
+}
+
+/// Return a memory region for the 'this' object reference.
+loc::MemRegionVal SValBuilder::getCXXThis(const CXXMethodDecl *D,
+ const StackFrameContext *SFC) {
+ return loc::MemRegionVal(getRegionManager().
+ getCXXThisRegion(D->getThisType(getContext()), SFC));
+}
+
+/// Return a memory region for the 'this' object reference.
+loc::MemRegionVal SValBuilder::getCXXThis(const CXXRecordDecl *D,
+ const StackFrameContext *SFC) {
+ const Type *T = D->getTypeForDecl();
+ QualType PT = getContext().getPointerType(QualType(T, 0));
+ return loc::MemRegionVal(getRegionManager().getCXXThisRegion(PT, SFC));
+}
+
+Optional<SVal> SValBuilder::getConstantVal(const Expr *E) {
+ E = E->IgnoreParens();
+
+ switch (E->getStmtClass()) {
+ // Handle expressions that we treat differently from the AST's constant
+ // evaluator.
+ case Stmt::AddrLabelExprClass:
+ return makeLoc(cast<AddrLabelExpr>(E));
+
+ case Stmt::CXXScalarValueInitExprClass:
+ case Stmt::ImplicitValueInitExprClass:
+ return makeZeroVal(E->getType());
+
+ case Stmt::ObjCStringLiteralClass: {
+ const ObjCStringLiteral *SL = cast<ObjCStringLiteral>(E);
+ return makeLoc(getRegionManager().getObjCStringRegion(SL));
+ }
+
+ case Stmt::StringLiteralClass: {
+ const StringLiteral *SL = cast<StringLiteral>(E);
+ return makeLoc(getRegionManager().getStringRegion(SL));
+ }
+
+ // Fast-path some expressions to avoid the overhead of going through the AST's
+ // constant evaluator
+ case Stmt::CharacterLiteralClass: {
+ const CharacterLiteral *C = cast<CharacterLiteral>(E);
+ return makeIntVal(C->getValue(), C->getType());
+ }
+
+ case Stmt::CXXBoolLiteralExprClass:
+ return makeBoolVal(cast<CXXBoolLiteralExpr>(E));
+
+ case Stmt::TypeTraitExprClass: {
+ const TypeTraitExpr *TE = cast<TypeTraitExpr>(E);
+ return makeTruthVal(TE->getValue(), TE->getType());
+ }
+
+ case Stmt::IntegerLiteralClass:
+ return makeIntVal(cast<IntegerLiteral>(E));
+
+ case Stmt::ObjCBoolLiteralExprClass:
+ return makeBoolVal(cast<ObjCBoolLiteralExpr>(E));
+
+ case Stmt::CXXNullPtrLiteralExprClass:
+ return makeNull();
+
+ case Stmt::ImplicitCastExprClass: {
+ const CastExpr *CE = cast<CastExpr>(E);
+ switch (CE->getCastKind()) {
+ default:
+ break;
+ case CK_ArrayToPointerDecay:
+ case CK_BitCast: {
+ const Expr *SE = CE->getSubExpr();
+ Optional<SVal> Val = getConstantVal(SE);
+ if (!Val)
+ return None;
+ return evalCast(*Val, CE->getType(), SE->getType());
+ }
+ }
+ // FALLTHROUGH
+ }
+
+ // If we don't have a special case, fall back to the AST's constant evaluator.
+ default: {
+ // Don't try to come up with a value for materialized temporaries.
+ if (E->isGLValue())
+ return None;
+
+ ASTContext &Ctx = getContext();
+ llvm::APSInt Result;
+ if (E->EvaluateAsInt(Result, Ctx))
+ return makeIntVal(Result);
+
+ if (Loc::isLocType(E->getType()))
+ if (E->isNullPointerConstant(Ctx, Expr::NPC_ValueDependentIsNotNull))
+ return makeNull();
+
+ return None;
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+
+SVal SValBuilder::makeSymExprValNN(ProgramStateRef State,
+ BinaryOperator::Opcode Op,
+ NonLoc LHS, NonLoc RHS,
+ QualType ResultTy) {
+ if (!State->isTainted(RHS) && !State->isTainted(LHS))
+ return UnknownVal();
+
+ const SymExpr *symLHS = LHS.getAsSymExpr();
+ const SymExpr *symRHS = RHS.getAsSymExpr();
+ // TODO: When the Max Complexity is reached, we should conjure a symbol
+ // instead of generating an Unknown value and propagate the taint info to it.
+ const unsigned MaxComp = 10000; // 100000 28X
+
+ if (symLHS && symRHS &&
+ (symLHS->computeComplexity() + symRHS->computeComplexity()) < MaxComp)
+ return makeNonLoc(symLHS, Op, symRHS, ResultTy);
+
+ if (symLHS && symLHS->computeComplexity() < MaxComp)
+ if (Optional<nonloc::ConcreteInt> rInt = RHS.getAs<nonloc::ConcreteInt>())
+ return makeNonLoc(symLHS, Op, rInt->getValue(), ResultTy);
+
+ if (symRHS && symRHS->computeComplexity() < MaxComp)
+ if (Optional<nonloc::ConcreteInt> lInt = LHS.getAs<nonloc::ConcreteInt>())
+ return makeNonLoc(lInt->getValue(), Op, symRHS, ResultTy);
+
+ return UnknownVal();
+}
+
+
+SVal SValBuilder::evalBinOp(ProgramStateRef state, BinaryOperator::Opcode op,
+ SVal lhs, SVal rhs, QualType type) {
+
+ if (lhs.isUndef() || rhs.isUndef())
+ return UndefinedVal();
+
+ if (lhs.isUnknown() || rhs.isUnknown())
+ return UnknownVal();
+
+ if (Optional<Loc> LV = lhs.getAs<Loc>()) {
+ if (Optional<Loc> RV = rhs.getAs<Loc>())
+ return evalBinOpLL(state, op, *LV, *RV, type);
+
+ return evalBinOpLN(state, op, *LV, rhs.castAs<NonLoc>(), type);
+ }
+
+ if (Optional<Loc> RV = rhs.getAs<Loc>()) {
+ // Support pointer arithmetic where the addend is on the left
+ // and the pointer on the right.
+ assert(op == BO_Add);
+
+ // Commute the operands.
+ return evalBinOpLN(state, op, *RV, lhs.castAs<NonLoc>(), type);
+ }
+
+ return evalBinOpNN(state, op, lhs.castAs<NonLoc>(), rhs.castAs<NonLoc>(),
+ type);
+}
+
+DefinedOrUnknownSVal SValBuilder::evalEQ(ProgramStateRef state,
+ DefinedOrUnknownSVal lhs,
+ DefinedOrUnknownSVal rhs) {
+ return evalBinOp(state, BO_EQ, lhs, rhs, getConditionType())
+ .castAs<DefinedOrUnknownSVal>();
+}
+
+/// Recursively check if the pointer types are equal modulo const, volatile,
+/// and restrict qualifiers. Also, assume that all types are similar to 'void'.
+/// Assumes the input types are canonical.
+static bool shouldBeModeledWithNoOp(ASTContext &Context, QualType ToTy,
+ QualType FromTy) {
+ while (Context.UnwrapSimilarPointerTypes(ToTy, FromTy)) {
+ Qualifiers Quals1, Quals2;
+ ToTy = Context.getUnqualifiedArrayType(ToTy, Quals1);
+ FromTy = Context.getUnqualifiedArrayType(FromTy, Quals2);
+
+ // Make sure that non-cvr-qualifiers the other qualifiers (e.g., address
+ // spaces) are identical.
+ Quals1.removeCVRQualifiers();
+ Quals2.removeCVRQualifiers();
+ if (Quals1 != Quals2)
+ return false;
+ }
+
+ // If we are casting to void, the 'From' value can be used to represent the
+ // 'To' value.
+ if (ToTy->isVoidType())
+ return true;
+
+ if (ToTy != FromTy)
+ return false;
+
+ return true;
+}
+
+// FIXME: should rewrite according to the cast kind.
+SVal SValBuilder::evalCast(SVal val, QualType castTy, QualType originalTy) {
+ castTy = Context.getCanonicalType(castTy);
+ originalTy = Context.getCanonicalType(originalTy);
+ if (val.isUnknownOrUndef() || castTy == originalTy)
+ return val;
+
+ if (castTy->isBooleanType()) {
+ if (val.isUnknownOrUndef())
+ return val;
+ if (val.isConstant())
+ return makeTruthVal(!val.isZeroConstant(), castTy);
+ if (!Loc::isLocType(originalTy) &&
+ !originalTy->isIntegralOrEnumerationType() &&
+ !originalTy->isMemberPointerType())
+ return UnknownVal();
+ if (SymbolRef Sym = val.getAsSymbol(true)) {
+ BasicValueFactory &BVF = getBasicValueFactory();
+ // FIXME: If we had a state here, we could see if the symbol is known to
+ // be zero, but we don't.
+ return makeNonLoc(Sym, BO_NE, BVF.getValue(0, Sym->getType()), castTy);
+ }
+ // Loc values are not always true, they could be weakly linked functions.
+ if (Optional<Loc> L = val.getAs<Loc>())
+ return evalCastFromLoc(*L, castTy);
+
+ Loc L = val.castAs<nonloc::LocAsInteger>().getLoc();
+ return evalCastFromLoc(L, castTy);
+ }
+
+ // For const casts, casts to void, just propagate the value.
+ if (!castTy->isVariableArrayType() && !originalTy->isVariableArrayType())
+ if (shouldBeModeledWithNoOp(Context, Context.getPointerType(castTy),
+ Context.getPointerType(originalTy)))
+ return val;
+
+ // Check for casts from pointers to integers.
+ if (castTy->isIntegralOrEnumerationType() && Loc::isLocType(originalTy))
+ return evalCastFromLoc(val.castAs<Loc>(), castTy);
+
+ // Check for casts from integers to pointers.
+ if (Loc::isLocType(castTy) && originalTy->isIntegralOrEnumerationType()) {
+ if (Optional<nonloc::LocAsInteger> LV = val.getAs<nonloc::LocAsInteger>()) {
+ if (const MemRegion *R = LV->getLoc().getAsRegion()) {
+ StoreManager &storeMgr = StateMgr.getStoreManager();
+ R = storeMgr.castRegion(R, castTy);
+ return R ? SVal(loc::MemRegionVal(R)) : UnknownVal();
+ }
+ return LV->getLoc();
+ }
+ return dispatchCast(val, castTy);
+ }
+
+ // Just pass through function and block pointers.
+ if (originalTy->isBlockPointerType() || originalTy->isFunctionPointerType()) {
+ assert(Loc::isLocType(castTy));
+ return val;
+ }
+
+ // Check for casts from array type to another type.
+ if (const ArrayType *arrayT =
+ dyn_cast<ArrayType>(originalTy.getCanonicalType())) {
+ // We will always decay to a pointer.
+ QualType elemTy = arrayT->getElementType();
+ val = StateMgr.ArrayToPointer(val.castAs<Loc>(), elemTy);
+
+ // Are we casting from an array to a pointer? If so just pass on
+ // the decayed value.
+ if (castTy->isPointerType() || castTy->isReferenceType())
+ return val;
+
+ // Are we casting from an array to an integer? If so, cast the decayed
+ // pointer value to an integer.
+ assert(castTy->isIntegralOrEnumerationType());
+
+ // FIXME: Keep these here for now in case we decide soon that we
+ // need the original decayed type.
+ // QualType elemTy = cast<ArrayType>(originalTy)->getElementType();
+ // QualType pointerTy = C.getPointerType(elemTy);
+ return evalCastFromLoc(val.castAs<Loc>(), castTy);
+ }
+
+ // Check for casts from a region to a specific type.
+ if (const MemRegion *R = val.getAsRegion()) {
+ // Handle other casts of locations to integers.
+ if (castTy->isIntegralOrEnumerationType())
+ return evalCastFromLoc(loc::MemRegionVal(R), castTy);
+
+ // FIXME: We should handle the case where we strip off view layers to get
+ // to a desugared type.
+ if (!Loc::isLocType(castTy)) {
+ // FIXME: There can be gross cases where one casts the result of a function
+ // (that returns a pointer) to some other value that happens to fit
+ // within that pointer value. We currently have no good way to
+ // model such operations. When this happens, the underlying operation
+ // is that the caller is reasoning about bits. Conceptually we are
+ // layering a "view" of a location on top of those bits. Perhaps
+ // we need to be more lazy about mutual possible views, even on an
+ // SVal? This may be necessary for bit-level reasoning as well.
+ return UnknownVal();
+ }
+
+ // We get a symbolic function pointer for a dereference of a function
+ // pointer, but it is of function type. Example:
+
+ // struct FPRec {
+ // void (*my_func)(int * x);
+ // };
+ //
+ // int bar(int x);
+ //
+ // int f1_a(struct FPRec* foo) {
+ // int x;
+ // (*foo->my_func)(&x);
+ // return bar(x)+1; // no-warning
+ // }
+
+ assert(Loc::isLocType(originalTy) || originalTy->isFunctionType() ||
+ originalTy->isBlockPointerType() || castTy->isReferenceType());
+
+ StoreManager &storeMgr = StateMgr.getStoreManager();
+
+ // Delegate to store manager to get the result of casting a region to a
+ // different type. If the MemRegion* returned is NULL, this expression
+ // Evaluates to UnknownVal.
+ R = storeMgr.castRegion(R, castTy);
+ return R ? SVal(loc::MemRegionVal(R)) : UnknownVal();
+ }
+
+ return dispatchCast(val, castTy);
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SVals.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SVals.cpp
new file mode 100644
index 0000000..8de939f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SVals.cpp
@@ -0,0 +1,322 @@
+//= RValues.cpp - Abstract RValues for Path-Sens. Value Tracking -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines SVal, Loc, and NonLoc, classes that represent
+// abstract r-values for use with path-sensitive value tracking.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+using namespace ento;
+using llvm::APSInt;
+
+//===----------------------------------------------------------------------===//
+// Symbol iteration within an SVal.
+//===----------------------------------------------------------------------===//
+
+
+//===----------------------------------------------------------------------===//
+// Utility methods.
+//===----------------------------------------------------------------------===//
+
+bool SVal::hasConjuredSymbol() const {
+ if (Optional<nonloc::SymbolVal> SV = getAs<nonloc::SymbolVal>()) {
+ SymbolRef sym = SV->getSymbol();
+ if (isa<SymbolConjured>(sym))
+ return true;
+ }
+
+ if (Optional<loc::MemRegionVal> RV = getAs<loc::MemRegionVal>()) {
+ const MemRegion *R = RV->getRegion();
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) {
+ SymbolRef sym = SR->getSymbol();
+ if (isa<SymbolConjured>(sym))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+const FunctionDecl *SVal::getAsFunctionDecl() const {
+ if (Optional<loc::MemRegionVal> X = getAs<loc::MemRegionVal>()) {
+ const MemRegion* R = X->getRegion();
+ if (const FunctionTextRegion *CTR = R->getAs<FunctionTextRegion>())
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CTR->getDecl()))
+ return FD;
+ }
+
+ return nullptr;
+}
+
+/// \brief If this SVal is a location (subclasses Loc) and wraps a symbol,
+/// return that SymbolRef. Otherwise return 0.
+///
+/// Implicit casts (ex: void* -> char*) can turn Symbolic region into Element
+/// region. If that is the case, gets the underlining region.
+/// When IncludeBaseRegions is set to true and the SubRegion is non-symbolic,
+/// the first symbolic parent region is returned.
+SymbolRef SVal::getAsLocSymbol(bool IncludeBaseRegions) const {
+ // FIXME: should we consider SymbolRef wrapped in CodeTextRegion?
+ if (Optional<nonloc::LocAsInteger> X = getAs<nonloc::LocAsInteger>())
+ return X->getLoc().getAsLocSymbol();
+
+ if (Optional<loc::MemRegionVal> X = getAs<loc::MemRegionVal>()) {
+ const MemRegion *R = X->getRegion();
+ if (const SymbolicRegion *SymR = IncludeBaseRegions ?
+ R->getSymbolicBase() :
+ dyn_cast<SymbolicRegion>(R->StripCasts()))
+ return SymR->getSymbol();
+ }
+ return nullptr;
+}
+
+/// Get the symbol in the SVal or its base region.
+SymbolRef SVal::getLocSymbolInBase() const {
+ Optional<loc::MemRegionVal> X = getAs<loc::MemRegionVal>();
+
+ if (!X)
+ return nullptr;
+
+ const MemRegion *R = X->getRegion();
+
+ while (const SubRegion *SR = dyn_cast<SubRegion>(R)) {
+ if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(SR))
+ return SymR->getSymbol();
+ else
+ R = SR->getSuperRegion();
+ }
+
+ return nullptr;
+}
+
+// TODO: The next 3 functions have to be simplified.
+
+/// \brief If this SVal wraps a symbol return that SymbolRef.
+/// Otherwise, return 0.
+///
+/// Casts are ignored during lookup.
+/// \param IncludeBaseRegions The boolean that controls whether the search
+/// should continue to the base regions if the region is not symbolic.
+SymbolRef SVal::getAsSymbol(bool IncludeBaseRegion) const {
+ // FIXME: should we consider SymbolRef wrapped in CodeTextRegion?
+ if (Optional<nonloc::SymbolVal> X = getAs<nonloc::SymbolVal>())
+ return X->getSymbol();
+
+ return getAsLocSymbol(IncludeBaseRegion);
+}
+
+/// getAsSymbolicExpression - If this Sval wraps a symbolic expression then
+/// return that expression. Otherwise return NULL.
+const SymExpr *SVal::getAsSymbolicExpression() const {
+ if (Optional<nonloc::SymbolVal> X = getAs<nonloc::SymbolVal>())
+ return X->getSymbol();
+
+ return getAsSymbol();
+}
+
+const SymExpr* SVal::getAsSymExpr() const {
+ const SymExpr* Sym = getAsSymbol();
+ if (!Sym)
+ Sym = getAsSymbolicExpression();
+ return Sym;
+}
+
+const MemRegion *SVal::getAsRegion() const {
+ if (Optional<loc::MemRegionVal> X = getAs<loc::MemRegionVal>())
+ return X->getRegion();
+
+ if (Optional<nonloc::LocAsInteger> X = getAs<nonloc::LocAsInteger>())
+ return X->getLoc().getAsRegion();
+
+ return nullptr;
+}
+
+const MemRegion *loc::MemRegionVal::stripCasts(bool StripBaseCasts) const {
+ const MemRegion *R = getRegion();
+ return R ? R->StripCasts(StripBaseCasts) : nullptr;
+}
+
+const void *nonloc::LazyCompoundVal::getStore() const {
+ return static_cast<const LazyCompoundValData*>(Data)->getStore();
+}
+
+const TypedValueRegion *nonloc::LazyCompoundVal::getRegion() const {
+ return static_cast<const LazyCompoundValData*>(Data)->getRegion();
+}
+
+//===----------------------------------------------------------------------===//
+// Other Iterators.
+//===----------------------------------------------------------------------===//
+
+nonloc::CompoundVal::iterator nonloc::CompoundVal::begin() const {
+ return getValue()->begin();
+}
+
+nonloc::CompoundVal::iterator nonloc::CompoundVal::end() const {
+ return getValue()->end();
+}
+
+//===----------------------------------------------------------------------===//
+// Useful predicates.
+//===----------------------------------------------------------------------===//
+
+bool SVal::isConstant() const {
+ return getAs<nonloc::ConcreteInt>() || getAs<loc::ConcreteInt>();
+}
+
+bool SVal::isConstant(int I) const {
+ if (Optional<loc::ConcreteInt> LV = getAs<loc::ConcreteInt>())
+ return LV->getValue() == I;
+ if (Optional<nonloc::ConcreteInt> NV = getAs<nonloc::ConcreteInt>())
+ return NV->getValue() == I;
+ return false;
+}
+
+bool SVal::isZeroConstant() const {
+ return isConstant(0);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Transfer function dispatch for Non-Locs.
+//===----------------------------------------------------------------------===//
+
+SVal nonloc::ConcreteInt::evalBinOp(SValBuilder &svalBuilder,
+ BinaryOperator::Opcode Op,
+ const nonloc::ConcreteInt& R) const {
+ const llvm::APSInt* X =
+ svalBuilder.getBasicValueFactory().evalAPSInt(Op, getValue(), R.getValue());
+
+ if (X)
+ return nonloc::ConcreteInt(*X);
+ else
+ return UndefinedVal();
+}
+
+nonloc::ConcreteInt
+nonloc::ConcreteInt::evalComplement(SValBuilder &svalBuilder) const {
+ return svalBuilder.makeIntVal(~getValue());
+}
+
+nonloc::ConcreteInt
+nonloc::ConcreteInt::evalMinus(SValBuilder &svalBuilder) const {
+ return svalBuilder.makeIntVal(-getValue());
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function dispatch for Locs.
+//===----------------------------------------------------------------------===//
+
+SVal loc::ConcreteInt::evalBinOp(BasicValueFactory& BasicVals,
+ BinaryOperator::Opcode Op,
+ const loc::ConcreteInt& R) const {
+
+ assert(BinaryOperator::isComparisonOp(Op) || Op == BO_Sub);
+
+ const llvm::APSInt *X = BasicVals.evalAPSInt(Op, getValue(), R.getValue());
+
+ if (X)
+ return nonloc::ConcreteInt(*X);
+ else
+ return UndefinedVal();
+}
+
+//===----------------------------------------------------------------------===//
+// Pretty-Printing.
+//===----------------------------------------------------------------------===//
+
+void SVal::dump() const { dumpToStream(llvm::errs()); }
+
+void SVal::dumpToStream(raw_ostream &os) const {
+ switch (getBaseKind()) {
+ case UnknownKind:
+ os << "Unknown";
+ break;
+ case NonLocKind:
+ castAs<NonLoc>().dumpToStream(os);
+ break;
+ case LocKind:
+ castAs<Loc>().dumpToStream(os);
+ break;
+ case UndefinedKind:
+ os << "Undefined";
+ break;
+ }
+}
+
+void NonLoc::dumpToStream(raw_ostream &os) const {
+ switch (getSubKind()) {
+ case nonloc::ConcreteIntKind: {
+ const nonloc::ConcreteInt& C = castAs<nonloc::ConcreteInt>();
+ if (C.getValue().isUnsigned())
+ os << C.getValue().getZExtValue();
+ else
+ os << C.getValue().getSExtValue();
+ os << ' ' << (C.getValue().isUnsigned() ? 'U' : 'S')
+ << C.getValue().getBitWidth() << 'b';
+ break;
+ }
+ case nonloc::SymbolValKind: {
+ os << castAs<nonloc::SymbolVal>().getSymbol();
+ break;
+ }
+ case nonloc::LocAsIntegerKind: {
+ const nonloc::LocAsInteger& C = castAs<nonloc::LocAsInteger>();
+ os << C.getLoc() << " [as " << C.getNumBits() << " bit integer]";
+ break;
+ }
+ case nonloc::CompoundValKind: {
+ const nonloc::CompoundVal& C = castAs<nonloc::CompoundVal>();
+ os << "compoundVal{";
+ bool first = true;
+ for (nonloc::CompoundVal::iterator I=C.begin(), E=C.end(); I!=E; ++I) {
+ if (first) {
+ os << ' '; first = false;
+ }
+ else
+ os << ", ";
+
+ (*I).dumpToStream(os);
+ }
+ os << "}";
+ break;
+ }
+ case nonloc::LazyCompoundValKind: {
+ const nonloc::LazyCompoundVal &C = castAs<nonloc::LazyCompoundVal>();
+ os << "lazyCompoundVal{" << const_cast<void *>(C.getStore())
+ << ',' << C.getRegion()
+ << '}';
+ break;
+ }
+ default:
+ assert (false && "Pretty-printed not implemented for this NonLoc.");
+ break;
+ }
+}
+
+void Loc::dumpToStream(raw_ostream &os) const {
+ switch (getSubKind()) {
+ case loc::ConcreteIntKind:
+ os << castAs<loc::ConcreteInt>().getValue().getZExtValue() << " (Loc)";
+ break;
+ case loc::GotoLabelKind:
+ os << "&&" << castAs<loc::GotoLabel>().getLabel()->getName();
+ break;
+ case loc::MemRegionKind:
+ os << '&' << castAs<loc::MemRegionVal>().getRegion()->getString();
+ break;
+ default:
+ llvm_unreachable("Pretty-printing not implemented for this Loc.");
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
new file mode 100644
index 0000000..4051242
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
@@ -0,0 +1,334 @@
+//== SimpleConstraintManager.cpp --------------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines SimpleConstraintManager, a class that holds code shared
+// between BasicConstraintManager and RangeConstraintManager.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SimpleConstraintManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+
+namespace clang {
+
+namespace ento {
+
+SimpleConstraintManager::~SimpleConstraintManager() {}
+
+bool SimpleConstraintManager::canReasonAbout(SVal X) const {
+ Optional<nonloc::SymbolVal> SymVal = X.getAs<nonloc::SymbolVal>();
+ if (SymVal && SymVal->isExpression()) {
+ const SymExpr *SE = SymVal->getSymbol();
+
+ if (const SymIntExpr *SIE = dyn_cast<SymIntExpr>(SE)) {
+ switch (SIE->getOpcode()) {
+ // We don't reason yet about bitwise-constraints on symbolic values.
+ case BO_And:
+ case BO_Or:
+ case BO_Xor:
+ return false;
+ // We don't reason yet about these arithmetic constraints on
+ // symbolic values.
+ case BO_Mul:
+ case BO_Div:
+ case BO_Rem:
+ case BO_Shl:
+ case BO_Shr:
+ return false;
+ // All other cases.
+ default:
+ return true;
+ }
+ }
+
+ if (const SymSymExpr *SSE = dyn_cast<SymSymExpr>(SE)) {
+ if (BinaryOperator::isComparisonOp(SSE->getOpcode())) {
+ // We handle Loc <> Loc comparisons, but not (yet) NonLoc <> NonLoc.
+ if (Loc::isLocType(SSE->getLHS()->getType())) {
+ assert(Loc::isLocType(SSE->getRHS()->getType()));
+ return true;
+ }
+ }
+ }
+
+ return false;
+ }
+
+ return true;
+}
+
+ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef state,
+ DefinedSVal Cond,
+ bool Assumption) {
+ // If we have a Loc value, cast it to a bool NonLoc first.
+ if (Optional<Loc> LV = Cond.getAs<Loc>()) {
+ SValBuilder &SVB = state->getStateManager().getSValBuilder();
+ QualType T;
+ const MemRegion *MR = LV->getAsRegion();
+ if (const TypedRegion *TR = dyn_cast_or_null<TypedRegion>(MR))
+ T = TR->getLocationType();
+ else
+ T = SVB.getContext().VoidPtrTy;
+
+ Cond = SVB.evalCast(*LV, SVB.getContext().BoolTy, T).castAs<DefinedSVal>();
+ }
+
+ return assume(state, Cond.castAs<NonLoc>(), Assumption);
+}
+
+ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef state,
+ NonLoc cond,
+ bool assumption) {
+ state = assumeAux(state, cond, assumption);
+ if (NotifyAssumeClients && SU)
+ return SU->processAssume(state, cond, assumption);
+ return state;
+}
+
+
+ProgramStateRef
+SimpleConstraintManager::assumeAuxForSymbol(ProgramStateRef State,
+ SymbolRef Sym, bool Assumption) {
+ BasicValueFactory &BVF = getBasicVals();
+ QualType T = Sym->getType();
+
+ // None of the constraint solvers currently support non-integer types.
+ if (!T->isIntegralOrEnumerationType())
+ return State;
+
+ const llvm::APSInt &zero = BVF.getValue(0, T);
+ if (Assumption)
+ return assumeSymNE(State, Sym, zero, zero);
+ else
+ return assumeSymEQ(State, Sym, zero, zero);
+}
+
+ProgramStateRef SimpleConstraintManager::assumeAux(ProgramStateRef state,
+ NonLoc Cond,
+ bool Assumption) {
+
+ // We cannot reason about SymSymExprs, and can only reason about some
+ // SymIntExprs.
+ if (!canReasonAbout(Cond)) {
+ // Just add the constraint to the expression without trying to simplify.
+ SymbolRef sym = Cond.getAsSymExpr();
+ return assumeAuxForSymbol(state, sym, Assumption);
+ }
+
+ switch (Cond.getSubKind()) {
+ default:
+ llvm_unreachable("'Assume' not implemented for this NonLoc");
+
+ case nonloc::SymbolValKind: {
+ nonloc::SymbolVal SV = Cond.castAs<nonloc::SymbolVal>();
+ SymbolRef sym = SV.getSymbol();
+ assert(sym);
+
+ // Handle SymbolData.
+ if (!SV.isExpression()) {
+ return assumeAuxForSymbol(state, sym, Assumption);
+
+ // Handle symbolic expression.
+ } else if (const SymIntExpr *SE = dyn_cast<SymIntExpr>(sym)) {
+ // We can only simplify expressions whose RHS is an integer.
+
+ BinaryOperator::Opcode op = SE->getOpcode();
+ if (BinaryOperator::isComparisonOp(op)) {
+ if (!Assumption)
+ op = BinaryOperator::negateComparisonOp(op);
+
+ return assumeSymRel(state, SE->getLHS(), op, SE->getRHS());
+ }
+
+ } else if (const SymSymExpr *SSE = dyn_cast<SymSymExpr>(sym)) {
+ // Translate "a != b" to "(b - a) != 0".
+ // We invert the order of the operands as a heuristic for how loop
+ // conditions are usually written ("begin != end") as compared to length
+ // calculations ("end - begin"). The more correct thing to do would be to
+ // canonicalize "a - b" and "b - a", which would allow us to treat
+ // "a != b" and "b != a" the same.
+ SymbolManager &SymMgr = getSymbolManager();
+ BinaryOperator::Opcode Op = SSE->getOpcode();
+ assert(BinaryOperator::isComparisonOp(Op));
+
+ // For now, we only support comparing pointers.
+ assert(Loc::isLocType(SSE->getLHS()->getType()));
+ assert(Loc::isLocType(SSE->getRHS()->getType()));
+ QualType DiffTy = SymMgr.getContext().getPointerDiffType();
+ SymbolRef Subtraction = SymMgr.getSymSymExpr(SSE->getRHS(), BO_Sub,
+ SSE->getLHS(), DiffTy);
+
+ const llvm::APSInt &Zero = getBasicVals().getValue(0, DiffTy);
+ Op = BinaryOperator::reverseComparisonOp(Op);
+ if (!Assumption)
+ Op = BinaryOperator::negateComparisonOp(Op);
+ return assumeSymRel(state, Subtraction, Op, Zero);
+ }
+
+ // If we get here, there's nothing else we can do but treat the symbol as
+ // opaque.
+ return assumeAuxForSymbol(state, sym, Assumption);
+ }
+
+ case nonloc::ConcreteIntKind: {
+ bool b = Cond.castAs<nonloc::ConcreteInt>().getValue() != 0;
+ bool isFeasible = b ? Assumption : !Assumption;
+ return isFeasible ? state : nullptr;
+ }
+
+ case nonloc::LocAsIntegerKind:
+ return assume(state, Cond.castAs<nonloc::LocAsInteger>().getLoc(),
+ Assumption);
+ } // end switch
+}
+
+ProgramStateRef SimpleConstraintManager::assumeWithinInclusiveRange(
+ ProgramStateRef State, NonLoc Value, const llvm::APSInt &From,
+ const llvm::APSInt &To, bool InRange) {
+
+ assert(From.isUnsigned() == To.isUnsigned() &&
+ From.getBitWidth() == To.getBitWidth() &&
+ "Values should have same types!");
+
+ if (!canReasonAbout(Value)) {
+ // Just add the constraint to the expression without trying to simplify.
+ SymbolRef Sym = Value.getAsSymExpr();
+ assert(Sym);
+ return assumeSymWithinInclusiveRange(State, Sym, From, To, InRange);
+ }
+
+ switch (Value.getSubKind()) {
+ default:
+ llvm_unreachable("'assumeWithinInclusiveRange' is not implemented"
+ "for this NonLoc");
+
+ case nonloc::LocAsIntegerKind:
+ case nonloc::SymbolValKind: {
+ if (SymbolRef Sym = Value.getAsSymbol())
+ return assumeSymWithinInclusiveRange(State, Sym, From, To, InRange);
+ return State;
+ } // end switch
+
+ case nonloc::ConcreteIntKind: {
+ const llvm::APSInt &IntVal = Value.castAs<nonloc::ConcreteInt>().getValue();
+ bool IsInRange = IntVal >= From && IntVal <= To;
+ bool isFeasible = (IsInRange == InRange);
+ return isFeasible ? State : nullptr;
+ }
+ } // end switch
+}
+
+static void computeAdjustment(SymbolRef &Sym, llvm::APSInt &Adjustment) {
+ // Is it a "($sym+constant1)" expression?
+ if (const SymIntExpr *SE = dyn_cast<SymIntExpr>(Sym)) {
+ BinaryOperator::Opcode Op = SE->getOpcode();
+ if (Op == BO_Add || Op == BO_Sub) {
+ Sym = SE->getLHS();
+ Adjustment = APSIntType(Adjustment).convert(SE->getRHS());
+
+ // Don't forget to negate the adjustment if it's being subtracted.
+ // This should happen /after/ promotion, in case the value being
+ // subtracted is, say, CHAR_MIN, and the promoted type is 'int'.
+ if (Op == BO_Sub)
+ Adjustment = -Adjustment;
+ }
+ }
+}
+
+ProgramStateRef SimpleConstraintManager::assumeSymRel(ProgramStateRef state,
+ const SymExpr *LHS,
+ BinaryOperator::Opcode op,
+ const llvm::APSInt& Int) {
+ assert(BinaryOperator::isComparisonOp(op) &&
+ "Non-comparison ops should be rewritten as comparisons to zero.");
+
+ // Get the type used for calculating wraparound.
+ BasicValueFactory &BVF = getBasicVals();
+ APSIntType WraparoundType = BVF.getAPSIntType(LHS->getType());
+
+ // We only handle simple comparisons of the form "$sym == constant"
+ // or "($sym+constant1) == constant2".
+ // The adjustment is "constant1" in the above expression. It's used to
+ // "slide" the solution range around for modular arithmetic. For example,
+ // x < 4 has the solution [0, 3]. x+2 < 4 has the solution [0-2, 3-2], which
+ // in modular arithmetic is [0, 1] U [UINT_MAX-1, UINT_MAX]. It's up to
+ // the subclasses of SimpleConstraintManager to handle the adjustment.
+ SymbolRef Sym = LHS;
+ llvm::APSInt Adjustment = WraparoundType.getZeroValue();
+ computeAdjustment(Sym, Adjustment);
+
+ // Convert the right-hand side integer as necessary.
+ APSIntType ComparisonType = std::max(WraparoundType, APSIntType(Int));
+ llvm::APSInt ConvertedInt = ComparisonType.convert(Int);
+
+ // Prefer unsigned comparisons.
+ if (ComparisonType.getBitWidth() == WraparoundType.getBitWidth() &&
+ ComparisonType.isUnsigned() && !WraparoundType.isUnsigned())
+ Adjustment.setIsSigned(false);
+
+ switch (op) {
+ default:
+ llvm_unreachable("invalid operation not caught by assertion above");
+
+ case BO_EQ:
+ return assumeSymEQ(state, Sym, ConvertedInt, Adjustment);
+
+ case BO_NE:
+ return assumeSymNE(state, Sym, ConvertedInt, Adjustment);
+
+ case BO_GT:
+ return assumeSymGT(state, Sym, ConvertedInt, Adjustment);
+
+ case BO_GE:
+ return assumeSymGE(state, Sym, ConvertedInt, Adjustment);
+
+ case BO_LT:
+ return assumeSymLT(state, Sym, ConvertedInt, Adjustment);
+
+ case BO_LE:
+ return assumeSymLE(state, Sym, ConvertedInt, Adjustment);
+ } // end switch
+}
+
+ProgramStateRef
+SimpleConstraintManager::assumeSymWithinInclusiveRange(ProgramStateRef State,
+ SymbolRef Sym,
+ const llvm::APSInt &From,
+ const llvm::APSInt &To,
+ bool InRange) {
+ // Get the type used for calculating wraparound.
+ BasicValueFactory &BVF = getBasicVals();
+ APSIntType WraparoundType = BVF.getAPSIntType(Sym->getType());
+
+ llvm::APSInt Adjustment = WraparoundType.getZeroValue();
+ SymbolRef AdjustedSym = Sym;
+ computeAdjustment(AdjustedSym, Adjustment);
+
+ // Convert the right-hand side integer as necessary.
+ APSIntType ComparisonType = std::max(WraparoundType, APSIntType(From));
+ llvm::APSInt ConvertedFrom = ComparisonType.convert(From);
+ llvm::APSInt ConvertedTo = ComparisonType.convert(To);
+
+ // Prefer unsigned comparisons.
+ if (ComparisonType.getBitWidth() == WraparoundType.getBitWidth() &&
+ ComparisonType.isUnsigned() && !WraparoundType.isUnsigned())
+ Adjustment.setIsSigned(false);
+
+ if (InRange)
+ return assumeSymbolWithinInclusiveRange(State, AdjustedSym, ConvertedFrom,
+ ConvertedTo, Adjustment);
+ return assumeSymbolOutOfInclusiveRange(State, AdjustedSym, ConvertedFrom,
+ ConvertedTo, Adjustment);
+}
+
+} // end of namespace ento
+
+} // end of namespace clang
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
new file mode 100644
index 0000000..b26bc94
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
@@ -0,0 +1,121 @@
+//== SimpleConstraintManager.h ----------------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Code shared between BasicConstraintManager and RangeConstraintManager.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_STATICANALYZER_CORE_SIMPLECONSTRAINTMANAGER_H
+#define LLVM_CLANG_LIB_STATICANALYZER_CORE_SIMPLECONSTRAINTMANAGER_H
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+
+namespace clang {
+
+namespace ento {
+
+class SimpleConstraintManager : public ConstraintManager {
+ SubEngine *SU;
+ SValBuilder &SVB;
+public:
+ SimpleConstraintManager(SubEngine *subengine, SValBuilder &SB)
+ : SU(subengine), SVB(SB) {}
+ ~SimpleConstraintManager() override;
+
+ //===------------------------------------------------------------------===//
+ // Common implementation for the interface provided by ConstraintManager.
+ //===------------------------------------------------------------------===//
+
+ ProgramStateRef assume(ProgramStateRef state, DefinedSVal Cond,
+ bool Assumption) override;
+
+ ProgramStateRef assume(ProgramStateRef state, NonLoc Cond, bool Assumption);
+
+ ProgramStateRef assumeWithinInclusiveRange(ProgramStateRef State,
+ NonLoc Value,
+ const llvm::APSInt &From,
+ const llvm::APSInt &To,
+ bool InRange) override;
+
+ ProgramStateRef assumeSymRel(ProgramStateRef state,
+ const SymExpr *LHS,
+ BinaryOperator::Opcode op,
+ const llvm::APSInt& Int);
+
+ ProgramStateRef assumeSymWithinInclusiveRange(ProgramStateRef State,
+ SymbolRef Sym,
+ const llvm::APSInt &From,
+ const llvm::APSInt &To,
+ bool InRange);
+
+
+protected:
+
+ //===------------------------------------------------------------------===//
+ // Interface that subclasses must implement.
+ //===------------------------------------------------------------------===//
+
+ // Each of these is of the form "$sym+Adj <> V", where "<>" is the comparison
+ // operation for the method being invoked.
+ virtual ProgramStateRef assumeSymNE(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment) = 0;
+
+ virtual ProgramStateRef assumeSymEQ(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment) = 0;
+
+ virtual ProgramStateRef assumeSymLT(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment) = 0;
+
+ virtual ProgramStateRef assumeSymGT(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment) = 0;
+
+ virtual ProgramStateRef assumeSymLE(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment) = 0;
+
+ virtual ProgramStateRef assumeSymGE(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment) = 0;
+
+
+ virtual ProgramStateRef assumeSymbolWithinInclusiveRange(
+ ProgramStateRef State, SymbolRef Sym, const llvm::APSInt &From,
+ const llvm::APSInt &To, const llvm::APSInt &Adjustment) = 0;
+
+ virtual ProgramStateRef assumeSymbolOutOfInclusiveRange(
+ ProgramStateRef state, SymbolRef Sym, const llvm::APSInt &From,
+ const llvm::APSInt &To, const llvm::APSInt &Adjustment) = 0;
+ //===------------------------------------------------------------------===//
+ // Internal implementation.
+ //===------------------------------------------------------------------===//
+
+ BasicValueFactory &getBasicVals() const { return SVB.getBasicValueFactory(); }
+ SymbolManager &getSymbolManager() const { return SVB.getSymbolManager(); }
+
+ bool canReasonAbout(SVal X) const override;
+
+ ProgramStateRef assumeAux(ProgramStateRef state,
+ NonLoc Cond,
+ bool Assumption);
+
+ ProgramStateRef assumeAuxForSymbol(ProgramStateRef State,
+ SymbolRef Sym,
+ bool Assumption);
+};
+
+} // end GR namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
new file mode 100644
index 0000000..a704ce2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
@@ -0,0 +1,945 @@
+// SimpleSValBuilder.cpp - A basic SValBuilder -----------------------*- C++ -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines SimpleSValBuilder, a basic implementation of SValBuilder.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class SimpleSValBuilder : public SValBuilder {
+protected:
+ SVal dispatchCast(SVal val, QualType castTy) override;
+ SVal evalCastFromNonLoc(NonLoc val, QualType castTy) override;
+ SVal evalCastFromLoc(Loc val, QualType castTy) override;
+
+public:
+ SimpleSValBuilder(llvm::BumpPtrAllocator &alloc, ASTContext &context,
+ ProgramStateManager &stateMgr)
+ : SValBuilder(alloc, context, stateMgr) {}
+ ~SimpleSValBuilder() override {}
+
+ SVal evalMinus(NonLoc val) override;
+ SVal evalComplement(NonLoc val) override;
+ SVal evalBinOpNN(ProgramStateRef state, BinaryOperator::Opcode op,
+ NonLoc lhs, NonLoc rhs, QualType resultTy) override;
+ SVal evalBinOpLL(ProgramStateRef state, BinaryOperator::Opcode op,
+ Loc lhs, Loc rhs, QualType resultTy) override;
+ SVal evalBinOpLN(ProgramStateRef state, BinaryOperator::Opcode op,
+ Loc lhs, NonLoc rhs, QualType resultTy) override;
+
+ /// getKnownValue - evaluates a given SVal. If the SVal has only one possible
+ /// (integer) value, that value is returned. Otherwise, returns NULL.
+ const llvm::APSInt *getKnownValue(ProgramStateRef state, SVal V) override;
+
+ SVal MakeSymIntVal(const SymExpr *LHS, BinaryOperator::Opcode op,
+ const llvm::APSInt &RHS, QualType resultTy);
+};
+} // end anonymous namespace
+
+SValBuilder *ento::createSimpleSValBuilder(llvm::BumpPtrAllocator &alloc,
+ ASTContext &context,
+ ProgramStateManager &stateMgr) {
+ return new SimpleSValBuilder(alloc, context, stateMgr);
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function for Casts.
+//===----------------------------------------------------------------------===//
+
+SVal SimpleSValBuilder::dispatchCast(SVal Val, QualType CastTy) {
+ assert(Val.getAs<Loc>() || Val.getAs<NonLoc>());
+ return Val.getAs<Loc>() ? evalCastFromLoc(Val.castAs<Loc>(), CastTy)
+ : evalCastFromNonLoc(Val.castAs<NonLoc>(), CastTy);
+}
+
+SVal SimpleSValBuilder::evalCastFromNonLoc(NonLoc val, QualType castTy) {
+
+ bool isLocType = Loc::isLocType(castTy);
+
+ if (Optional<nonloc::LocAsInteger> LI = val.getAs<nonloc::LocAsInteger>()) {
+ if (isLocType)
+ return LI->getLoc();
+
+ // FIXME: Correctly support promotions/truncations.
+ unsigned castSize = Context.getTypeSize(castTy);
+ if (castSize == LI->getNumBits())
+ return val;
+ return makeLocAsInteger(LI->getLoc(), castSize);
+ }
+
+ if (const SymExpr *se = val.getAsSymbolicExpression()) {
+ QualType T = Context.getCanonicalType(se->getType());
+ // If types are the same or both are integers, ignore the cast.
+ // FIXME: Remove this hack when we support symbolic truncation/extension.
+ // HACK: If both castTy and T are integers, ignore the cast. This is
+ // not a permanent solution. Eventually we want to precisely handle
+ // extension/truncation of symbolic integers. This prevents us from losing
+ // precision when we assign 'x = y' and 'y' is symbolic and x and y are
+ // different integer types.
+ if (haveSameType(T, castTy))
+ return val;
+
+ if (!isLocType)
+ return makeNonLoc(se, T, castTy);
+ return UnknownVal();
+ }
+
+ // If value is a non-integer constant, produce unknown.
+ if (!val.getAs<nonloc::ConcreteInt>())
+ return UnknownVal();
+
+ // Handle casts to a boolean type.
+ if (castTy->isBooleanType()) {
+ bool b = val.castAs<nonloc::ConcreteInt>().getValue().getBoolValue();
+ return makeTruthVal(b, castTy);
+ }
+
+ // Only handle casts from integers to integers - if val is an integer constant
+ // being cast to a non-integer type, produce unknown.
+ if (!isLocType && !castTy->isIntegralOrEnumerationType())
+ return UnknownVal();
+
+ llvm::APSInt i = val.castAs<nonloc::ConcreteInt>().getValue();
+ BasicVals.getAPSIntType(castTy).apply(i);
+
+ if (isLocType)
+ return makeIntLocVal(i);
+ else
+ return makeIntVal(i);
+}
+
+SVal SimpleSValBuilder::evalCastFromLoc(Loc val, QualType castTy) {
+
+ // Casts from pointers -> pointers, just return the lval.
+ //
+ // Casts from pointers -> references, just return the lval. These
+ // can be introduced by the frontend for corner cases, e.g
+ // casting from va_list* to __builtin_va_list&.
+ //
+ if (Loc::isLocType(castTy) || castTy->isReferenceType())
+ return val;
+
+ // FIXME: Handle transparent unions where a value can be "transparently"
+ // lifted into a union type.
+ if (castTy->isUnionType())
+ return UnknownVal();
+
+ // Casting a Loc to a bool will almost always be true,
+ // unless this is a weak function or a symbolic region.
+ if (castTy->isBooleanType()) {
+ switch (val.getSubKind()) {
+ case loc::MemRegionKind: {
+ const MemRegion *R = val.castAs<loc::MemRegionVal>().getRegion();
+ if (const FunctionTextRegion *FTR = dyn_cast<FunctionTextRegion>(R))
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FTR->getDecl()))
+ if (FD->isWeak())
+ // FIXME: Currently we are using an extent symbol here,
+ // because there are no generic region address metadata
+ // symbols to use, only content metadata.
+ return nonloc::SymbolVal(SymMgr.getExtentSymbol(FTR));
+
+ if (const SymbolicRegion *SymR = R->getSymbolicBase())
+ return nonloc::SymbolVal(SymR->getSymbol());
+
+ // FALL-THROUGH
+ }
+
+ case loc::GotoLabelKind:
+ // Labels and non-symbolic memory regions are always true.
+ return makeTruthVal(true, castTy);
+ }
+ }
+
+ if (castTy->isIntegralOrEnumerationType()) {
+ unsigned BitWidth = Context.getTypeSize(castTy);
+
+ if (!val.getAs<loc::ConcreteInt>())
+ return makeLocAsInteger(val, BitWidth);
+
+ llvm::APSInt i = val.castAs<loc::ConcreteInt>().getValue();
+ BasicVals.getAPSIntType(castTy).apply(i);
+ return makeIntVal(i);
+ }
+
+ // All other cases: return 'UnknownVal'. This includes casting pointers
+ // to floats, which is probably badness it itself, but this is a good
+ // intermediate solution until we do something better.
+ return UnknownVal();
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function for unary operators.
+//===----------------------------------------------------------------------===//
+
+SVal SimpleSValBuilder::evalMinus(NonLoc val) {
+ switch (val.getSubKind()) {
+ case nonloc::ConcreteIntKind:
+ return val.castAs<nonloc::ConcreteInt>().evalMinus(*this);
+ default:
+ return UnknownVal();
+ }
+}
+
+SVal SimpleSValBuilder::evalComplement(NonLoc X) {
+ switch (X.getSubKind()) {
+ case nonloc::ConcreteIntKind:
+ return X.castAs<nonloc::ConcreteInt>().evalComplement(*this);
+ default:
+ return UnknownVal();
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function for binary operators.
+//===----------------------------------------------------------------------===//
+
+SVal SimpleSValBuilder::MakeSymIntVal(const SymExpr *LHS,
+ BinaryOperator::Opcode op,
+ const llvm::APSInt &RHS,
+ QualType resultTy) {
+ bool isIdempotent = false;
+
+ // Check for a few special cases with known reductions first.
+ switch (op) {
+ default:
+ // We can't reduce this case; just treat it normally.
+ break;
+ case BO_Mul:
+ // a*0 and a*1
+ if (RHS == 0)
+ return makeIntVal(0, resultTy);
+ else if (RHS == 1)
+ isIdempotent = true;
+ break;
+ case BO_Div:
+ // a/0 and a/1
+ if (RHS == 0)
+ // This is also handled elsewhere.
+ return UndefinedVal();
+ else if (RHS == 1)
+ isIdempotent = true;
+ break;
+ case BO_Rem:
+ // a%0 and a%1
+ if (RHS == 0)
+ // This is also handled elsewhere.
+ return UndefinedVal();
+ else if (RHS == 1)
+ return makeIntVal(0, resultTy);
+ break;
+ case BO_Add:
+ case BO_Sub:
+ case BO_Shl:
+ case BO_Shr:
+ case BO_Xor:
+ // a+0, a-0, a<<0, a>>0, a^0
+ if (RHS == 0)
+ isIdempotent = true;
+ break;
+ case BO_And:
+ // a&0 and a&(~0)
+ if (RHS == 0)
+ return makeIntVal(0, resultTy);
+ else if (RHS.isAllOnesValue())
+ isIdempotent = true;
+ break;
+ case BO_Or:
+ // a|0 and a|(~0)
+ if (RHS == 0)
+ isIdempotent = true;
+ else if (RHS.isAllOnesValue()) {
+ const llvm::APSInt &Result = BasicVals.Convert(resultTy, RHS);
+ return nonloc::ConcreteInt(Result);
+ }
+ break;
+ }
+
+ // Idempotent ops (like a*1) can still change the type of an expression.
+ // Wrap the LHS up in a NonLoc again and let evalCastFromNonLoc do the
+ // dirty work.
+ if (isIdempotent)
+ return evalCastFromNonLoc(nonloc::SymbolVal(LHS), resultTy);
+
+ // If we reach this point, the expression cannot be simplified.
+ // Make a SymbolVal for the entire expression, after converting the RHS.
+ const llvm::APSInt *ConvertedRHS = &RHS;
+ if (BinaryOperator::isComparisonOp(op)) {
+ // We're looking for a type big enough to compare the symbolic value
+ // with the given constant.
+ // FIXME: This is an approximation of Sema::UsualArithmeticConversions.
+ ASTContext &Ctx = getContext();
+ QualType SymbolType = LHS->getType();
+ uint64_t ValWidth = RHS.getBitWidth();
+ uint64_t TypeWidth = Ctx.getTypeSize(SymbolType);
+
+ if (ValWidth < TypeWidth) {
+ // If the value is too small, extend it.
+ ConvertedRHS = &BasicVals.Convert(SymbolType, RHS);
+ } else if (ValWidth == TypeWidth) {
+ // If the value is signed but the symbol is unsigned, do the comparison
+ // in unsigned space. [C99 6.3.1.8]
+ // (For the opposite case, the value is already unsigned.)
+ if (RHS.isSigned() && !SymbolType->isSignedIntegerOrEnumerationType())
+ ConvertedRHS = &BasicVals.Convert(SymbolType, RHS);
+ }
+ } else
+ ConvertedRHS = &BasicVals.Convert(resultTy, RHS);
+
+ return makeNonLoc(LHS, op, *ConvertedRHS, resultTy);
+}
+
+SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
+ BinaryOperator::Opcode op,
+ NonLoc lhs, NonLoc rhs,
+ QualType resultTy) {
+ NonLoc InputLHS = lhs;
+ NonLoc InputRHS = rhs;
+
+ // Handle trivial case where left-side and right-side are the same.
+ if (lhs == rhs)
+ switch (op) {
+ default:
+ break;
+ case BO_EQ:
+ case BO_LE:
+ case BO_GE:
+ return makeTruthVal(true, resultTy);
+ case BO_LT:
+ case BO_GT:
+ case BO_NE:
+ return makeTruthVal(false, resultTy);
+ case BO_Xor:
+ case BO_Sub:
+ if (resultTy->isIntegralOrEnumerationType())
+ return makeIntVal(0, resultTy);
+ return evalCastFromNonLoc(makeIntVal(0, /*Unsigned=*/false), resultTy);
+ case BO_Or:
+ case BO_And:
+ return evalCastFromNonLoc(lhs, resultTy);
+ }
+
+ while (1) {
+ switch (lhs.getSubKind()) {
+ default:
+ return makeSymExprValNN(state, op, lhs, rhs, resultTy);
+ case nonloc::LocAsIntegerKind: {
+ Loc lhsL = lhs.castAs<nonloc::LocAsInteger>().getLoc();
+ switch (rhs.getSubKind()) {
+ case nonloc::LocAsIntegerKind:
+ return evalBinOpLL(state, op, lhsL,
+ rhs.castAs<nonloc::LocAsInteger>().getLoc(),
+ resultTy);
+ case nonloc::ConcreteIntKind: {
+ // Transform the integer into a location and compare.
+ llvm::APSInt i = rhs.castAs<nonloc::ConcreteInt>().getValue();
+ BasicVals.getAPSIntType(Context.VoidPtrTy).apply(i);
+ return evalBinOpLL(state, op, lhsL, makeLoc(i), resultTy);
+ }
+ default:
+ switch (op) {
+ case BO_EQ:
+ return makeTruthVal(false, resultTy);
+ case BO_NE:
+ return makeTruthVal(true, resultTy);
+ default:
+ // This case also handles pointer arithmetic.
+ return makeSymExprValNN(state, op, InputLHS, InputRHS, resultTy);
+ }
+ }
+ }
+ case nonloc::ConcreteIntKind: {
+ llvm::APSInt LHSValue = lhs.castAs<nonloc::ConcreteInt>().getValue();
+
+ // If we're dealing with two known constants, just perform the operation.
+ if (const llvm::APSInt *KnownRHSValue = getKnownValue(state, rhs)) {
+ llvm::APSInt RHSValue = *KnownRHSValue;
+ if (BinaryOperator::isComparisonOp(op)) {
+ // We're looking for a type big enough to compare the two values.
+ // FIXME: This is not correct. char + short will result in a promotion
+ // to int. Unfortunately we have lost types by this point.
+ APSIntType CompareType = std::max(APSIntType(LHSValue),
+ APSIntType(RHSValue));
+ CompareType.apply(LHSValue);
+ CompareType.apply(RHSValue);
+ } else if (!BinaryOperator::isShiftOp(op)) {
+ APSIntType IntType = BasicVals.getAPSIntType(resultTy);
+ IntType.apply(LHSValue);
+ IntType.apply(RHSValue);
+ }
+
+ const llvm::APSInt *Result =
+ BasicVals.evalAPSInt(op, LHSValue, RHSValue);
+ if (!Result)
+ return UndefinedVal();
+
+ return nonloc::ConcreteInt(*Result);
+ }
+
+ // Swap the left and right sides and flip the operator if doing so
+ // allows us to better reason about the expression (this is a form
+ // of expression canonicalization).
+ // While we're at it, catch some special cases for non-commutative ops.
+ switch (op) {
+ case BO_LT:
+ case BO_GT:
+ case BO_LE:
+ case BO_GE:
+ op = BinaryOperator::reverseComparisonOp(op);
+ // FALL-THROUGH
+ case BO_EQ:
+ case BO_NE:
+ case BO_Add:
+ case BO_Mul:
+ case BO_And:
+ case BO_Xor:
+ case BO_Or:
+ std::swap(lhs, rhs);
+ continue;
+ case BO_Shr:
+ // (~0)>>a
+ if (LHSValue.isAllOnesValue() && LHSValue.isSigned())
+ return evalCastFromNonLoc(lhs, resultTy);
+ // FALL-THROUGH
+ case BO_Shl:
+ // 0<<a and 0>>a
+ if (LHSValue == 0)
+ return evalCastFromNonLoc(lhs, resultTy);
+ return makeSymExprValNN(state, op, InputLHS, InputRHS, resultTy);
+ default:
+ return makeSymExprValNN(state, op, InputLHS, InputRHS, resultTy);
+ }
+ }
+ case nonloc::SymbolValKind: {
+ // We only handle LHS as simple symbols or SymIntExprs.
+ SymbolRef Sym = lhs.castAs<nonloc::SymbolVal>().getSymbol();
+
+ // LHS is a symbolic expression.
+ if (const SymIntExpr *symIntExpr = dyn_cast<SymIntExpr>(Sym)) {
+
+ // Is this a logical not? (!x is represented as x == 0.)
+ if (op == BO_EQ && rhs.isZeroConstant()) {
+ // We know how to negate certain expressions. Simplify them here.
+
+ BinaryOperator::Opcode opc = symIntExpr->getOpcode();
+ switch (opc) {
+ default:
+ // We don't know how to negate this operation.
+ // Just handle it as if it were a normal comparison to 0.
+ break;
+ case BO_LAnd:
+ case BO_LOr:
+ llvm_unreachable("Logical operators handled by branching logic.");
+ case BO_Assign:
+ case BO_MulAssign:
+ case BO_DivAssign:
+ case BO_RemAssign:
+ case BO_AddAssign:
+ case BO_SubAssign:
+ case BO_ShlAssign:
+ case BO_ShrAssign:
+ case BO_AndAssign:
+ case BO_XorAssign:
+ case BO_OrAssign:
+ case BO_Comma:
+ llvm_unreachable("'=' and ',' operators handled by ExprEngine.");
+ case BO_PtrMemD:
+ case BO_PtrMemI:
+ llvm_unreachable("Pointer arithmetic not handled here.");
+ case BO_LT:
+ case BO_GT:
+ case BO_LE:
+ case BO_GE:
+ case BO_EQ:
+ case BO_NE:
+ assert(resultTy->isBooleanType() ||
+ resultTy == getConditionType());
+ assert(symIntExpr->getType()->isBooleanType() ||
+ getContext().hasSameUnqualifiedType(symIntExpr->getType(),
+ getConditionType()));
+ // Negate the comparison and make a value.
+ opc = BinaryOperator::negateComparisonOp(opc);
+ return makeNonLoc(symIntExpr->getLHS(), opc,
+ symIntExpr->getRHS(), resultTy);
+ }
+ }
+
+ // For now, only handle expressions whose RHS is a constant.
+ if (const llvm::APSInt *RHSValue = getKnownValue(state, rhs)) {
+ // If both the LHS and the current expression are additive,
+ // fold their constants and try again.
+ if (BinaryOperator::isAdditiveOp(op)) {
+ BinaryOperator::Opcode lop = symIntExpr->getOpcode();
+ if (BinaryOperator::isAdditiveOp(lop)) {
+ // Convert the two constants to a common type, then combine them.
+
+ // resultTy may not be the best type to convert to, but it's
+ // probably the best choice in expressions with mixed type
+ // (such as x+1U+2LL). The rules for implicit conversions should
+ // choose a reasonable type to preserve the expression, and will
+ // at least match how the value is going to be used.
+ APSIntType IntType = BasicVals.getAPSIntType(resultTy);
+ const llvm::APSInt &first = IntType.convert(symIntExpr->getRHS());
+ const llvm::APSInt &second = IntType.convert(*RHSValue);
+
+ const llvm::APSInt *newRHS;
+ if (lop == op)
+ newRHS = BasicVals.evalAPSInt(BO_Add, first, second);
+ else
+ newRHS = BasicVals.evalAPSInt(BO_Sub, first, second);
+
+ assert(newRHS && "Invalid operation despite common type!");
+ rhs = nonloc::ConcreteInt(*newRHS);
+ lhs = nonloc::SymbolVal(symIntExpr->getLHS());
+ op = lop;
+ continue;
+ }
+ }
+
+ // Otherwise, make a SymIntExpr out of the expression.
+ return MakeSymIntVal(symIntExpr, op, *RHSValue, resultTy);
+ }
+ }
+
+ // Does the symbolic expression simplify to a constant?
+ // If so, "fold" the constant by setting 'lhs' to a ConcreteInt
+ // and try again.
+ ConstraintManager &CMgr = state->getConstraintManager();
+ if (const llvm::APSInt *Constant = CMgr.getSymVal(state, Sym)) {
+ lhs = nonloc::ConcreteInt(*Constant);
+ continue;
+ }
+
+ // Is the RHS a constant?
+ if (const llvm::APSInt *RHSValue = getKnownValue(state, rhs))
+ return MakeSymIntVal(Sym, op, *RHSValue, resultTy);
+
+ // Give up -- this is not a symbolic expression we can handle.
+ return makeSymExprValNN(state, op, InputLHS, InputRHS, resultTy);
+ }
+ }
+ }
+}
+
+static SVal evalBinOpFieldRegionFieldRegion(const FieldRegion *LeftFR,
+ const FieldRegion *RightFR,
+ BinaryOperator::Opcode op,
+ QualType resultTy,
+ SimpleSValBuilder &SVB) {
+ // Only comparisons are meaningful here!
+ if (!BinaryOperator::isComparisonOp(op))
+ return UnknownVal();
+
+ // Next, see if the two FRs have the same super-region.
+ // FIXME: This doesn't handle casts yet, and simply stripping the casts
+ // doesn't help.
+ if (LeftFR->getSuperRegion() != RightFR->getSuperRegion())
+ return UnknownVal();
+
+ const FieldDecl *LeftFD = LeftFR->getDecl();
+ const FieldDecl *RightFD = RightFR->getDecl();
+ const RecordDecl *RD = LeftFD->getParent();
+
+ // Make sure the two FRs are from the same kind of record. Just in case!
+ // FIXME: This is probably where inheritance would be a problem.
+ if (RD != RightFD->getParent())
+ return UnknownVal();
+
+ // We know for sure that the two fields are not the same, since that
+ // would have given us the same SVal.
+ if (op == BO_EQ)
+ return SVB.makeTruthVal(false, resultTy);
+ if (op == BO_NE)
+ return SVB.makeTruthVal(true, resultTy);
+
+ // Iterate through the fields and see which one comes first.
+ // [C99 6.7.2.1.13] "Within a structure object, the non-bit-field
+ // members and the units in which bit-fields reside have addresses that
+ // increase in the order in which they are declared."
+ bool leftFirst = (op == BO_LT || op == BO_LE);
+ for (const auto *I : RD->fields()) {
+ if (I == LeftFD)
+ return SVB.makeTruthVal(leftFirst, resultTy);
+ if (I == RightFD)
+ return SVB.makeTruthVal(!leftFirst, resultTy);
+ }
+
+ llvm_unreachable("Fields not found in parent record's definition");
+}
+
+// FIXME: all this logic will change if/when we have MemRegion::getLocation().
+SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
+ BinaryOperator::Opcode op,
+ Loc lhs, Loc rhs,
+ QualType resultTy) {
+ // Only comparisons and subtractions are valid operations on two pointers.
+ // See [C99 6.5.5 through 6.5.14] or [C++0x 5.6 through 5.15].
+ // However, if a pointer is casted to an integer, evalBinOpNN may end up
+ // calling this function with another operation (PR7527). We don't attempt to
+ // model this for now, but it could be useful, particularly when the
+ // "location" is actually an integer value that's been passed through a void*.
+ if (!(BinaryOperator::isComparisonOp(op) || op == BO_Sub))
+ return UnknownVal();
+
+ // Special cases for when both sides are identical.
+ if (lhs == rhs) {
+ switch (op) {
+ default:
+ llvm_unreachable("Unimplemented operation for two identical values");
+ case BO_Sub:
+ return makeZeroVal(resultTy);
+ case BO_EQ:
+ case BO_LE:
+ case BO_GE:
+ return makeTruthVal(true, resultTy);
+ case BO_NE:
+ case BO_LT:
+ case BO_GT:
+ return makeTruthVal(false, resultTy);
+ }
+ }
+
+ switch (lhs.getSubKind()) {
+ default:
+ llvm_unreachable("Ordering not implemented for this Loc.");
+
+ case loc::GotoLabelKind:
+ // The only thing we know about labels is that they're non-null.
+ if (rhs.isZeroConstant()) {
+ switch (op) {
+ default:
+ break;
+ case BO_Sub:
+ return evalCastFromLoc(lhs, resultTy);
+ case BO_EQ:
+ case BO_LE:
+ case BO_LT:
+ return makeTruthVal(false, resultTy);
+ case BO_NE:
+ case BO_GT:
+ case BO_GE:
+ return makeTruthVal(true, resultTy);
+ }
+ }
+ // There may be two labels for the same location, and a function region may
+ // have the same address as a label at the start of the function (depending
+ // on the ABI).
+ // FIXME: we can probably do a comparison against other MemRegions, though.
+ // FIXME: is there a way to tell if two labels refer to the same location?
+ return UnknownVal();
+
+ case loc::ConcreteIntKind: {
+ // If one of the operands is a symbol and the other is a constant,
+ // build an expression for use by the constraint manager.
+ if (SymbolRef rSym = rhs.getAsLocSymbol()) {
+ // We can only build expressions with symbols on the left,
+ // so we need a reversible operator.
+ if (!BinaryOperator::isComparisonOp(op))
+ return UnknownVal();
+
+ const llvm::APSInt &lVal = lhs.castAs<loc::ConcreteInt>().getValue();
+ op = BinaryOperator::reverseComparisonOp(op);
+ return makeNonLoc(rSym, op, lVal, resultTy);
+ }
+
+ // If both operands are constants, just perform the operation.
+ if (Optional<loc::ConcreteInt> rInt = rhs.getAs<loc::ConcreteInt>()) {
+ SVal ResultVal =
+ lhs.castAs<loc::ConcreteInt>().evalBinOp(BasicVals, op, *rInt);
+ if (Optional<NonLoc> Result = ResultVal.getAs<NonLoc>())
+ return evalCastFromNonLoc(*Result, resultTy);
+
+ assert(!ResultVal.getAs<Loc>() && "Loc-Loc ops should not produce Locs");
+ return UnknownVal();
+ }
+
+ // Special case comparisons against NULL.
+ // This must come after the test if the RHS is a symbol, which is used to
+ // build constraints. The address of any non-symbolic region is guaranteed
+ // to be non-NULL, as is any label.
+ assert(rhs.getAs<loc::MemRegionVal>() || rhs.getAs<loc::GotoLabel>());
+ if (lhs.isZeroConstant()) {
+ switch (op) {
+ default:
+ break;
+ case BO_EQ:
+ case BO_GT:
+ case BO_GE:
+ return makeTruthVal(false, resultTy);
+ case BO_NE:
+ case BO_LT:
+ case BO_LE:
+ return makeTruthVal(true, resultTy);
+ }
+ }
+
+ // Comparing an arbitrary integer to a region or label address is
+ // completely unknowable.
+ return UnknownVal();
+ }
+ case loc::MemRegionKind: {
+ if (Optional<loc::ConcreteInt> rInt = rhs.getAs<loc::ConcreteInt>()) {
+ // If one of the operands is a symbol and the other is a constant,
+ // build an expression for use by the constraint manager.
+ if (SymbolRef lSym = lhs.getAsLocSymbol(true))
+ return MakeSymIntVal(lSym, op, rInt->getValue(), resultTy);
+
+ // Special case comparisons to NULL.
+ // This must come after the test if the LHS is a symbol, which is used to
+ // build constraints. The address of any non-symbolic region is guaranteed
+ // to be non-NULL.
+ if (rInt->isZeroConstant()) {
+ if (op == BO_Sub)
+ return evalCastFromLoc(lhs, resultTy);
+
+ if (BinaryOperator::isComparisonOp(op)) {
+ QualType boolType = getContext().BoolTy;
+ NonLoc l = evalCastFromLoc(lhs, boolType).castAs<NonLoc>();
+ NonLoc r = makeTruthVal(false, boolType).castAs<NonLoc>();
+ return evalBinOpNN(state, op, l, r, resultTy);
+ }
+ }
+
+ // Comparing a region to an arbitrary integer is completely unknowable.
+ return UnknownVal();
+ }
+
+ // Get both values as regions, if possible.
+ const MemRegion *LeftMR = lhs.getAsRegion();
+ assert(LeftMR && "MemRegionKind SVal doesn't have a region!");
+
+ const MemRegion *RightMR = rhs.getAsRegion();
+ if (!RightMR)
+ // The RHS is probably a label, which in theory could address a region.
+ // FIXME: we can probably make a more useful statement about non-code
+ // regions, though.
+ return UnknownVal();
+
+ const MemRegion *LeftBase = LeftMR->getBaseRegion();
+ const MemRegion *RightBase = RightMR->getBaseRegion();
+ const MemSpaceRegion *LeftMS = LeftBase->getMemorySpace();
+ const MemSpaceRegion *RightMS = RightBase->getMemorySpace();
+ const MemSpaceRegion *UnknownMS = MemMgr.getUnknownRegion();
+
+ // If the two regions are from different known memory spaces they cannot be
+ // equal. Also, assume that no symbolic region (whose memory space is
+ // unknown) is on the stack.
+ if (LeftMS != RightMS &&
+ ((LeftMS != UnknownMS && RightMS != UnknownMS) ||
+ (isa<StackSpaceRegion>(LeftMS) || isa<StackSpaceRegion>(RightMS)))) {
+ switch (op) {
+ default:
+ return UnknownVal();
+ case BO_EQ:
+ return makeTruthVal(false, resultTy);
+ case BO_NE:
+ return makeTruthVal(true, resultTy);
+ }
+ }
+
+ // If both values wrap regions, see if they're from different base regions.
+ // Note, heap base symbolic regions are assumed to not alias with
+ // each other; for example, we assume that malloc returns different address
+ // on each invocation.
+ if (LeftBase != RightBase &&
+ ((!isa<SymbolicRegion>(LeftBase) && !isa<SymbolicRegion>(RightBase)) ||
+ (isa<HeapSpaceRegion>(LeftMS) || isa<HeapSpaceRegion>(RightMS))) ){
+ switch (op) {
+ default:
+ return UnknownVal();
+ case BO_EQ:
+ return makeTruthVal(false, resultTy);
+ case BO_NE:
+ return makeTruthVal(true, resultTy);
+ }
+ }
+
+ // Handle special cases for when both regions are element regions.
+ const ElementRegion *RightER = dyn_cast<ElementRegion>(RightMR);
+ const ElementRegion *LeftER = dyn_cast<ElementRegion>(LeftMR);
+ if (RightER && LeftER) {
+ // Next, see if the two ERs have the same super-region and matching types.
+ // FIXME: This should do something useful even if the types don't match,
+ // though if both indexes are constant the RegionRawOffset path will
+ // give the correct answer.
+ if (LeftER->getSuperRegion() == RightER->getSuperRegion() &&
+ LeftER->getElementType() == RightER->getElementType()) {
+ // Get the left index and cast it to the correct type.
+ // If the index is unknown or undefined, bail out here.
+ SVal LeftIndexVal = LeftER->getIndex();
+ Optional<NonLoc> LeftIndex = LeftIndexVal.getAs<NonLoc>();
+ if (!LeftIndex)
+ return UnknownVal();
+ LeftIndexVal = evalCastFromNonLoc(*LeftIndex, ArrayIndexTy);
+ LeftIndex = LeftIndexVal.getAs<NonLoc>();
+ if (!LeftIndex)
+ return UnknownVal();
+
+ // Do the same for the right index.
+ SVal RightIndexVal = RightER->getIndex();
+ Optional<NonLoc> RightIndex = RightIndexVal.getAs<NonLoc>();
+ if (!RightIndex)
+ return UnknownVal();
+ RightIndexVal = evalCastFromNonLoc(*RightIndex, ArrayIndexTy);
+ RightIndex = RightIndexVal.getAs<NonLoc>();
+ if (!RightIndex)
+ return UnknownVal();
+
+ // Actually perform the operation.
+ // evalBinOpNN expects the two indexes to already be the right type.
+ return evalBinOpNN(state, op, *LeftIndex, *RightIndex, resultTy);
+ }
+ }
+
+ // Special handling of the FieldRegions, even with symbolic offsets.
+ const FieldRegion *RightFR = dyn_cast<FieldRegion>(RightMR);
+ const FieldRegion *LeftFR = dyn_cast<FieldRegion>(LeftMR);
+ if (RightFR && LeftFR) {
+ SVal R = evalBinOpFieldRegionFieldRegion(LeftFR, RightFR, op, resultTy,
+ *this);
+ if (!R.isUnknown())
+ return R;
+ }
+
+ // Compare the regions using the raw offsets.
+ RegionOffset LeftOffset = LeftMR->getAsOffset();
+ RegionOffset RightOffset = RightMR->getAsOffset();
+
+ if (LeftOffset.getRegion() != nullptr &&
+ LeftOffset.getRegion() == RightOffset.getRegion() &&
+ !LeftOffset.hasSymbolicOffset() && !RightOffset.hasSymbolicOffset()) {
+ int64_t left = LeftOffset.getOffset();
+ int64_t right = RightOffset.getOffset();
+
+ switch (op) {
+ default:
+ return UnknownVal();
+ case BO_LT:
+ return makeTruthVal(left < right, resultTy);
+ case BO_GT:
+ return makeTruthVal(left > right, resultTy);
+ case BO_LE:
+ return makeTruthVal(left <= right, resultTy);
+ case BO_GE:
+ return makeTruthVal(left >= right, resultTy);
+ case BO_EQ:
+ return makeTruthVal(left == right, resultTy);
+ case BO_NE:
+ return makeTruthVal(left != right, resultTy);
+ }
+ }
+
+ // At this point we're not going to get a good answer, but we can try
+ // conjuring an expression instead.
+ SymbolRef LHSSym = lhs.getAsLocSymbol();
+ SymbolRef RHSSym = rhs.getAsLocSymbol();
+ if (LHSSym && RHSSym)
+ return makeNonLoc(LHSSym, op, RHSSym, resultTy);
+
+ // If we get here, we have no way of comparing the regions.
+ return UnknownVal();
+ }
+ }
+}
+
+SVal SimpleSValBuilder::evalBinOpLN(ProgramStateRef state,
+ BinaryOperator::Opcode op,
+ Loc lhs, NonLoc rhs, QualType resultTy) {
+ assert(!BinaryOperator::isComparisonOp(op) &&
+ "arguments to comparison ops must be of the same type");
+
+ // Special case: rhs is a zero constant.
+ if (rhs.isZeroConstant())
+ return lhs;
+
+ // We are dealing with pointer arithmetic.
+
+ // Handle pointer arithmetic on constant values.
+ if (Optional<nonloc::ConcreteInt> rhsInt = rhs.getAs<nonloc::ConcreteInt>()) {
+ if (Optional<loc::ConcreteInt> lhsInt = lhs.getAs<loc::ConcreteInt>()) {
+ const llvm::APSInt &leftI = lhsInt->getValue();
+ assert(leftI.isUnsigned());
+ llvm::APSInt rightI(rhsInt->getValue(), /* isUnsigned */ true);
+
+ // Convert the bitwidth of rightI. This should deal with overflow
+ // since we are dealing with concrete values.
+ rightI = rightI.extOrTrunc(leftI.getBitWidth());
+
+ // Offset the increment by the pointer size.
+ llvm::APSInt Multiplicand(rightI.getBitWidth(), /* isUnsigned */ true);
+ rightI *= Multiplicand;
+
+ // Compute the adjusted pointer.
+ switch (op) {
+ case BO_Add:
+ rightI = leftI + rightI;
+ break;
+ case BO_Sub:
+ rightI = leftI - rightI;
+ break;
+ default:
+ llvm_unreachable("Invalid pointer arithmetic operation");
+ }
+ return loc::ConcreteInt(getBasicValueFactory().getValue(rightI));
+ }
+ }
+
+ // Handle cases where 'lhs' is a region.
+ if (const MemRegion *region = lhs.getAsRegion()) {
+ rhs = convertToArrayIndex(rhs).castAs<NonLoc>();
+ SVal index = UnknownVal();
+ const MemRegion *superR = nullptr;
+ QualType elementType;
+
+ if (const ElementRegion *elemReg = dyn_cast<ElementRegion>(region)) {
+ assert(op == BO_Add || op == BO_Sub);
+ index = evalBinOpNN(state, op, elemReg->getIndex(), rhs,
+ getArrayIndexType());
+ superR = elemReg->getSuperRegion();
+ elementType = elemReg->getElementType();
+ }
+ else if (isa<SubRegion>(region)) {
+ assert(op == BO_Add || op == BO_Sub);
+ index = (op == BO_Add) ? rhs : evalMinus(rhs);
+ superR = region;
+ if (resultTy->isAnyPointerType())
+ elementType = resultTy->getPointeeType();
+ }
+
+ if (Optional<NonLoc> indexV = index.getAs<NonLoc>()) {
+ return loc::MemRegionVal(MemMgr.getElementRegion(elementType, *indexV,
+ superR, getContext()));
+ }
+ }
+ return UnknownVal();
+}
+
+const llvm::APSInt *SimpleSValBuilder::getKnownValue(ProgramStateRef state,
+ SVal V) {
+ if (V.isUnknownOrUndef())
+ return nullptr;
+
+ if (Optional<loc::ConcreteInt> X = V.getAs<loc::ConcreteInt>())
+ return &X->getValue();
+
+ if (Optional<nonloc::ConcreteInt> X = V.getAs<nonloc::ConcreteInt>())
+ return &X->getValue();
+
+ if (SymbolRef Sym = V.getAsSymbol())
+ return state->getConstraintManager().getSymVal(state, Sym);
+
+ // FIXME: Add support for SymExprs.
+ return nullptr;
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Store.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Store.cpp
new file mode 100644
index 0000000..7cdb55a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Store.cpp
@@ -0,0 +1,510 @@
+//== Store.cpp - Interface for maps from Locations to Values ----*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defined the types Store and StoreManager.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+
+using namespace clang;
+using namespace ento;
+
+StoreManager::StoreManager(ProgramStateManager &stateMgr)
+ : svalBuilder(stateMgr.getSValBuilder()), StateMgr(stateMgr),
+ MRMgr(svalBuilder.getRegionManager()), Ctx(stateMgr.getContext()) {}
+
+StoreRef StoreManager::enterStackFrame(Store OldStore,
+ const CallEvent &Call,
+ const StackFrameContext *LCtx) {
+ StoreRef Store = StoreRef(OldStore, *this);
+
+ SmallVector<CallEvent::FrameBindingTy, 16> InitialBindings;
+ Call.getInitialStackFrameContents(LCtx, InitialBindings);
+
+ for (CallEvent::BindingsTy::iterator I = InitialBindings.begin(),
+ E = InitialBindings.end();
+ I != E; ++I) {
+ Store = Bind(Store.getStore(), I->first, I->second);
+ }
+
+ return Store;
+}
+
+const MemRegion *StoreManager::MakeElementRegion(const MemRegion *Base,
+ QualType EleTy, uint64_t index) {
+ NonLoc idx = svalBuilder.makeArrayIndex(index);
+ return MRMgr.getElementRegion(EleTy, idx, Base, svalBuilder.getContext());
+}
+
+StoreRef StoreManager::BindDefault(Store store, const MemRegion *R, SVal V) {
+ return StoreRef(store, *this);
+}
+
+const ElementRegion *StoreManager::GetElementZeroRegion(const MemRegion *R,
+ QualType T) {
+ NonLoc idx = svalBuilder.makeZeroArrayIndex();
+ assert(!T.isNull());
+ return MRMgr.getElementRegion(T, idx, R, Ctx);
+}
+
+const MemRegion *StoreManager::castRegion(const MemRegion *R, QualType CastToTy) {
+
+ ASTContext &Ctx = StateMgr.getContext();
+
+ // Handle casts to Objective-C objects.
+ if (CastToTy->isObjCObjectPointerType())
+ return R->StripCasts();
+
+ if (CastToTy->isBlockPointerType()) {
+ // FIXME: We may need different solutions, depending on the symbol
+ // involved. Blocks can be casted to/from 'id', as they can be treated
+ // as Objective-C objects. This could possibly be handled by enhancing
+ // our reasoning of downcasts of symbolic objects.
+ if (isa<CodeTextRegion>(R) || isa<SymbolicRegion>(R))
+ return R;
+
+ // We don't know what to make of it. Return a NULL region, which
+ // will be interpretted as UnknownVal.
+ return nullptr;
+ }
+
+ // Now assume we are casting from pointer to pointer. Other cases should
+ // already be handled.
+ QualType PointeeTy = CastToTy->getPointeeType();
+ QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy);
+
+ // Handle casts to void*. We just pass the region through.
+ if (CanonPointeeTy.getLocalUnqualifiedType() == Ctx.VoidTy)
+ return R;
+
+ // Handle casts from compatible types.
+ if (R->isBoundable())
+ if (const TypedValueRegion *TR = dyn_cast<TypedValueRegion>(R)) {
+ QualType ObjTy = Ctx.getCanonicalType(TR->getValueType());
+ if (CanonPointeeTy == ObjTy)
+ return R;
+ }
+
+ // Process region cast according to the kind of the region being cast.
+ switch (R->getKind()) {
+ case MemRegion::CXXThisRegionKind:
+ case MemRegion::GenericMemSpaceRegionKind:
+ case MemRegion::StackLocalsSpaceRegionKind:
+ case MemRegion::StackArgumentsSpaceRegionKind:
+ case MemRegion::HeapSpaceRegionKind:
+ case MemRegion::UnknownSpaceRegionKind:
+ case MemRegion::StaticGlobalSpaceRegionKind:
+ case MemRegion::GlobalInternalSpaceRegionKind:
+ case MemRegion::GlobalSystemSpaceRegionKind:
+ case MemRegion::GlobalImmutableSpaceRegionKind: {
+ llvm_unreachable("Invalid region cast");
+ }
+
+ case MemRegion::FunctionTextRegionKind:
+ case MemRegion::BlockTextRegionKind:
+ case MemRegion::BlockDataRegionKind:
+ case MemRegion::StringRegionKind:
+ // FIXME: Need to handle arbitrary downcasts.
+ case MemRegion::SymbolicRegionKind:
+ case MemRegion::AllocaRegionKind:
+ case MemRegion::CompoundLiteralRegionKind:
+ case MemRegion::FieldRegionKind:
+ case MemRegion::ObjCIvarRegionKind:
+ case MemRegion::ObjCStringRegionKind:
+ case MemRegion::VarRegionKind:
+ case MemRegion::CXXTempObjectRegionKind:
+ case MemRegion::CXXBaseObjectRegionKind:
+ return MakeElementRegion(R, PointeeTy);
+
+ case MemRegion::ElementRegionKind: {
+ // If we are casting from an ElementRegion to another type, the
+ // algorithm is as follows:
+ //
+ // (1) Compute the "raw offset" of the ElementRegion from the
+ // base region. This is done by calling 'getAsRawOffset()'.
+ //
+ // (2a) If we get a 'RegionRawOffset' after calling
+ // 'getAsRawOffset()', determine if the absolute offset
+ // can be exactly divided into chunks of the size of the
+ // casted-pointee type. If so, create a new ElementRegion with
+ // the pointee-cast type as the new ElementType and the index
+ // being the offset divded by the chunk size. If not, create
+ // a new ElementRegion at offset 0 off the raw offset region.
+ //
+ // (2b) If we don't a get a 'RegionRawOffset' after calling
+ // 'getAsRawOffset()', it means that we are at offset 0.
+ //
+ // FIXME: Handle symbolic raw offsets.
+
+ const ElementRegion *elementR = cast<ElementRegion>(R);
+ const RegionRawOffset &rawOff = elementR->getAsArrayOffset();
+ const MemRegion *baseR = rawOff.getRegion();
+
+ // If we cannot compute a raw offset, throw up our hands and return
+ // a NULL MemRegion*.
+ if (!baseR)
+ return nullptr;
+
+ CharUnits off = rawOff.getOffset();
+
+ if (off.isZero()) {
+ // Edge case: we are at 0 bytes off the beginning of baseR. We
+ // check to see if type we are casting to is the same as the base
+ // region. If so, just return the base region.
+ if (const TypedValueRegion *TR = dyn_cast<TypedValueRegion>(baseR)) {
+ QualType ObjTy = Ctx.getCanonicalType(TR->getValueType());
+ QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy);
+ if (CanonPointeeTy == ObjTy)
+ return baseR;
+ }
+
+ // Otherwise, create a new ElementRegion at offset 0.
+ return MakeElementRegion(baseR, PointeeTy);
+ }
+
+ // We have a non-zero offset from the base region. We want to determine
+ // if the offset can be evenly divided by sizeof(PointeeTy). If so,
+ // we create an ElementRegion whose index is that value. Otherwise, we
+ // create two ElementRegions, one that reflects a raw offset and the other
+ // that reflects the cast.
+
+ // Compute the index for the new ElementRegion.
+ int64_t newIndex = 0;
+ const MemRegion *newSuperR = nullptr;
+
+ // We can only compute sizeof(PointeeTy) if it is a complete type.
+ if (!PointeeTy->isIncompleteType()) {
+ // Compute the size in **bytes**.
+ CharUnits pointeeTySize = Ctx.getTypeSizeInChars(PointeeTy);
+ if (!pointeeTySize.isZero()) {
+ // Is the offset a multiple of the size? If so, we can layer the
+ // ElementRegion (with elementType == PointeeTy) directly on top of
+ // the base region.
+ if (off % pointeeTySize == 0) {
+ newIndex = off / pointeeTySize;
+ newSuperR = baseR;
+ }
+ }
+ }
+
+ if (!newSuperR) {
+ // Create an intermediate ElementRegion to represent the raw byte.
+ // This will be the super region of the final ElementRegion.
+ newSuperR = MakeElementRegion(baseR, Ctx.CharTy, off.getQuantity());
+ }
+
+ return MakeElementRegion(newSuperR, PointeeTy, newIndex);
+ }
+ }
+
+ llvm_unreachable("unreachable");
+}
+
+static bool regionMatchesCXXRecordType(SVal V, QualType Ty) {
+ const MemRegion *MR = V.getAsRegion();
+ if (!MR)
+ return true;
+
+ const TypedValueRegion *TVR = dyn_cast<TypedValueRegion>(MR);
+ if (!TVR)
+ return true;
+
+ const CXXRecordDecl *RD = TVR->getValueType()->getAsCXXRecordDecl();
+ if (!RD)
+ return true;
+
+ const CXXRecordDecl *Expected = Ty->getPointeeCXXRecordDecl();
+ if (!Expected)
+ Expected = Ty->getAsCXXRecordDecl();
+
+ return Expected->getCanonicalDecl() == RD->getCanonicalDecl();
+}
+
+SVal StoreManager::evalDerivedToBase(SVal Derived, const CastExpr *Cast) {
+ // Sanity check to avoid doing the wrong thing in the face of
+ // reinterpret_cast.
+ if (!regionMatchesCXXRecordType(Derived, Cast->getSubExpr()->getType()))
+ return UnknownVal();
+
+ // Walk through the cast path to create nested CXXBaseRegions.
+ SVal Result = Derived;
+ for (CastExpr::path_const_iterator I = Cast->path_begin(),
+ E = Cast->path_end();
+ I != E; ++I) {
+ Result = evalDerivedToBase(Result, (*I)->getType(), (*I)->isVirtual());
+ }
+ return Result;
+}
+
+SVal StoreManager::evalDerivedToBase(SVal Derived, const CXXBasePath &Path) {
+ // Walk through the path to create nested CXXBaseRegions.
+ SVal Result = Derived;
+ for (CXXBasePath::const_iterator I = Path.begin(), E = Path.end();
+ I != E; ++I) {
+ Result = evalDerivedToBase(Result, I->Base->getType(),
+ I->Base->isVirtual());
+ }
+ return Result;
+}
+
+SVal StoreManager::evalDerivedToBase(SVal Derived, QualType BaseType,
+ bool IsVirtual) {
+ Optional<loc::MemRegionVal> DerivedRegVal =
+ Derived.getAs<loc::MemRegionVal>();
+ if (!DerivedRegVal)
+ return Derived;
+
+ const CXXRecordDecl *BaseDecl = BaseType->getPointeeCXXRecordDecl();
+ if (!BaseDecl)
+ BaseDecl = BaseType->getAsCXXRecordDecl();
+ assert(BaseDecl && "not a C++ object?");
+
+ const MemRegion *BaseReg =
+ MRMgr.getCXXBaseObjectRegion(BaseDecl, DerivedRegVal->getRegion(),
+ IsVirtual);
+
+ return loc::MemRegionVal(BaseReg);
+}
+
+/// Returns the static type of the given region, if it represents a C++ class
+/// object.
+///
+/// This handles both fully-typed regions, where the dynamic type is known, and
+/// symbolic regions, where the dynamic type is merely bounded (and even then,
+/// only ostensibly!), but does not take advantage of any dynamic type info.
+static const CXXRecordDecl *getCXXRecordType(const MemRegion *MR) {
+ if (const TypedValueRegion *TVR = dyn_cast<TypedValueRegion>(MR))
+ return TVR->getValueType()->getAsCXXRecordDecl();
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(MR))
+ return SR->getSymbol()->getType()->getPointeeCXXRecordDecl();
+ return nullptr;
+}
+
+SVal StoreManager::evalDynamicCast(SVal Base, QualType TargetType,
+ bool &Failed) {
+ Failed = false;
+
+ const MemRegion *MR = Base.getAsRegion();
+ if (!MR)
+ return UnknownVal();
+
+ // Assume the derived class is a pointer or a reference to a CXX record.
+ TargetType = TargetType->getPointeeType();
+ assert(!TargetType.isNull());
+ const CXXRecordDecl *TargetClass = TargetType->getAsCXXRecordDecl();
+ if (!TargetClass && !TargetType->isVoidType())
+ return UnknownVal();
+
+ // Drill down the CXXBaseObject chains, which represent upcasts (casts from
+ // derived to base).
+ while (const CXXRecordDecl *MRClass = getCXXRecordType(MR)) {
+ // If found the derived class, the cast succeeds.
+ if (MRClass == TargetClass)
+ return loc::MemRegionVal(MR);
+
+ // We skip over incomplete types. They must be the result of an earlier
+ // reinterpret_cast, as one can only dynamic_cast between types in the same
+ // class hierarchy.
+ if (!TargetType->isVoidType() && MRClass->hasDefinition()) {
+ // Static upcasts are marked as DerivedToBase casts by Sema, so this will
+ // only happen when multiple or virtual inheritance is involved.
+ CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/true,
+ /*DetectVirtual=*/false);
+ if (MRClass->isDerivedFrom(TargetClass, Paths))
+ return evalDerivedToBase(loc::MemRegionVal(MR), Paths.front());
+ }
+
+ if (const CXXBaseObjectRegion *BaseR = dyn_cast<CXXBaseObjectRegion>(MR)) {
+ // Drill down the chain to get the derived classes.
+ MR = BaseR->getSuperRegion();
+ continue;
+ }
+
+ // If this is a cast to void*, return the region.
+ if (TargetType->isVoidType())
+ return loc::MemRegionVal(MR);
+
+ // Strange use of reinterpret_cast can give us paths we don't reason
+ // about well, by putting in ElementRegions where we'd expect
+ // CXXBaseObjectRegions. If it's a valid reinterpret_cast (i.e. if the
+ // derived class has a zero offset from the base class), then it's safe
+ // to strip the cast; if it's invalid, -Wreinterpret-base-class should
+ // catch it. In the interest of performance, the analyzer will silently
+ // do the wrong thing in the invalid case (because offsets for subregions
+ // will be wrong).
+ const MemRegion *Uncasted = MR->StripCasts(/*IncludeBaseCasts=*/false);
+ if (Uncasted == MR) {
+ // We reached the bottom of the hierarchy and did not find the derived
+ // class. We we must be casting the base to derived, so the cast should
+ // fail.
+ break;
+ }
+
+ MR = Uncasted;
+ }
+
+ // We failed if the region we ended up with has perfect type info.
+ Failed = isa<TypedValueRegion>(MR);
+ return UnknownVal();
+}
+
+
+/// CastRetrievedVal - Used by subclasses of StoreManager to implement
+/// implicit casts that arise from loads from regions that are reinterpreted
+/// as another region.
+SVal StoreManager::CastRetrievedVal(SVal V, const TypedValueRegion *R,
+ QualType castTy, bool performTestOnly) {
+
+ if (castTy.isNull() || V.isUnknownOrUndef())
+ return V;
+
+ ASTContext &Ctx = svalBuilder.getContext();
+
+ if (performTestOnly) {
+ // Automatically translate references to pointers.
+ QualType T = R->getValueType();
+ if (const ReferenceType *RT = T->getAs<ReferenceType>())
+ T = Ctx.getPointerType(RT->getPointeeType());
+
+ assert(svalBuilder.getContext().hasSameUnqualifiedType(castTy, T));
+ return V;
+ }
+
+ return svalBuilder.dispatchCast(V, castTy);
+}
+
+SVal StoreManager::getLValueFieldOrIvar(const Decl *D, SVal Base) {
+ if (Base.isUnknownOrUndef())
+ return Base;
+
+ Loc BaseL = Base.castAs<Loc>();
+ const MemRegion* BaseR = nullptr;
+
+ switch (BaseL.getSubKind()) {
+ case loc::MemRegionKind:
+ BaseR = BaseL.castAs<loc::MemRegionVal>().getRegion();
+ break;
+
+ case loc::GotoLabelKind:
+ // These are anormal cases. Flag an undefined value.
+ return UndefinedVal();
+
+ case loc::ConcreteIntKind:
+ // While these seem funny, this can happen through casts.
+ // FIXME: What we should return is the field offset. For example,
+ // add the field offset to the integer value. That way funny things
+ // like this work properly: &(((struct foo *) 0xa)->f)
+ return Base;
+
+ default:
+ llvm_unreachable("Unhandled Base.");
+ }
+
+ // NOTE: We must have this check first because ObjCIvarDecl is a subclass
+ // of FieldDecl.
+ if (const ObjCIvarDecl *ID = dyn_cast<ObjCIvarDecl>(D))
+ return loc::MemRegionVal(MRMgr.getObjCIvarRegion(ID, BaseR));
+
+ return loc::MemRegionVal(MRMgr.getFieldRegion(cast<FieldDecl>(D), BaseR));
+}
+
+SVal StoreManager::getLValueIvar(const ObjCIvarDecl *decl, SVal base) {
+ return getLValueFieldOrIvar(decl, base);
+}
+
+SVal StoreManager::getLValueElement(QualType elementType, NonLoc Offset,
+ SVal Base) {
+
+ // If the base is an unknown or undefined value, just return it back.
+ // FIXME: For absolute pointer addresses, we just return that value back as
+ // well, although in reality we should return the offset added to that
+ // value.
+ if (Base.isUnknownOrUndef() || Base.getAs<loc::ConcreteInt>())
+ return Base;
+
+ const MemRegion* BaseRegion = Base.castAs<loc::MemRegionVal>().getRegion();
+
+ // Pointer of any type can be cast and used as array base.
+ const ElementRegion *ElemR = dyn_cast<ElementRegion>(BaseRegion);
+
+ // Convert the offset to the appropriate size and signedness.
+ Offset = svalBuilder.convertToArrayIndex(Offset).castAs<NonLoc>();
+
+ if (!ElemR) {
+ //
+ // If the base region is not an ElementRegion, create one.
+ // This can happen in the following example:
+ //
+ // char *p = __builtin_alloc(10);
+ // p[1] = 8;
+ //
+ // Observe that 'p' binds to an AllocaRegion.
+ //
+ return loc::MemRegionVal(MRMgr.getElementRegion(elementType, Offset,
+ BaseRegion, Ctx));
+ }
+
+ SVal BaseIdx = ElemR->getIndex();
+
+ if (!BaseIdx.getAs<nonloc::ConcreteInt>())
+ return UnknownVal();
+
+ const llvm::APSInt &BaseIdxI =
+ BaseIdx.castAs<nonloc::ConcreteInt>().getValue();
+
+ // Only allow non-integer offsets if the base region has no offset itself.
+ // FIXME: This is a somewhat arbitrary restriction. We should be using
+ // SValBuilder here to add the two offsets without checking their types.
+ if (!Offset.getAs<nonloc::ConcreteInt>()) {
+ if (isa<ElementRegion>(BaseRegion->StripCasts()))
+ return UnknownVal();
+
+ return loc::MemRegionVal(MRMgr.getElementRegion(elementType, Offset,
+ ElemR->getSuperRegion(),
+ Ctx));
+ }
+
+ const llvm::APSInt& OffI = Offset.castAs<nonloc::ConcreteInt>().getValue();
+ assert(BaseIdxI.isSigned());
+
+ // Compute the new index.
+ nonloc::ConcreteInt NewIdx(svalBuilder.getBasicValueFactory().getValue(BaseIdxI +
+ OffI));
+
+ // Construct the new ElementRegion.
+ const MemRegion *ArrayR = ElemR->getSuperRegion();
+ return loc::MemRegionVal(MRMgr.getElementRegion(elementType, NewIdx, ArrayR,
+ Ctx));
+}
+
+StoreManager::BindingsHandler::~BindingsHandler() {}
+
+bool StoreManager::FindUniqueBinding::HandleBinding(StoreManager& SMgr,
+ Store store,
+ const MemRegion* R,
+ SVal val) {
+ SymbolRef SymV = val.getAsLocSymbol();
+ if (!SymV || SymV != Sym)
+ return true;
+
+ if (Binding) {
+ First = false;
+ return false;
+ }
+ else
+ Binding = R;
+
+ return true;
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SubEngine.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SubEngine.cpp
new file mode 100644
index 0000000..350f4b8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SubEngine.cpp
@@ -0,0 +1,14 @@
+//== SubEngine.cpp - Interface of the subengine of CoreEngine ------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
+
+using namespace clang::ento;
+
+void SubEngine::anchor() { }
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
new file mode 100644
index 0000000..99b2e14
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
@@ -0,0 +1,560 @@
+//== SymbolManager.h - Management of Symbolic Values ------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines SymbolManager, a class that manages symbolic values
+// created for use by ExprEngine and related classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+void SymExpr::anchor() { }
+
+void SymExpr::dump() const {
+ dumpToStream(llvm::errs());
+}
+
+void SymIntExpr::dumpToStream(raw_ostream &os) const {
+ os << '(';
+ getLHS()->dumpToStream(os);
+ os << ") "
+ << BinaryOperator::getOpcodeStr(getOpcode()) << ' '
+ << getRHS().getZExtValue();
+ if (getRHS().isUnsigned())
+ os << 'U';
+}
+
+void IntSymExpr::dumpToStream(raw_ostream &os) const {
+ os << getLHS().getZExtValue();
+ if (getLHS().isUnsigned())
+ os << 'U';
+ os << ' '
+ << BinaryOperator::getOpcodeStr(getOpcode())
+ << " (";
+ getRHS()->dumpToStream(os);
+ os << ')';
+}
+
+void SymSymExpr::dumpToStream(raw_ostream &os) const {
+ os << '(';
+ getLHS()->dumpToStream(os);
+ os << ") "
+ << BinaryOperator::getOpcodeStr(getOpcode())
+ << " (";
+ getRHS()->dumpToStream(os);
+ os << ')';
+}
+
+void SymbolCast::dumpToStream(raw_ostream &os) const {
+ os << '(' << ToTy.getAsString() << ") (";
+ Operand->dumpToStream(os);
+ os << ')';
+}
+
+void SymbolConjured::dumpToStream(raw_ostream &os) const {
+ os << "conj_$" << getSymbolID() << '{' << T.getAsString() << '}';
+}
+
+void SymbolDerived::dumpToStream(raw_ostream &os) const {
+ os << "derived_$" << getSymbolID() << '{'
+ << getParentSymbol() << ',' << getRegion() << '}';
+}
+
+void SymbolExtent::dumpToStream(raw_ostream &os) const {
+ os << "extent_$" << getSymbolID() << '{' << getRegion() << '}';
+}
+
+void SymbolMetadata::dumpToStream(raw_ostream &os) const {
+ os << "meta_$" << getSymbolID() << '{'
+ << getRegion() << ',' << T.getAsString() << '}';
+}
+
+void SymbolData::anchor() { }
+
+void SymbolRegionValue::dumpToStream(raw_ostream &os) const {
+ os << "reg_$" << getSymbolID() << "<" << R << ">";
+}
+
+bool SymExpr::symbol_iterator::operator==(const symbol_iterator &X) const {
+ return itr == X.itr;
+}
+
+bool SymExpr::symbol_iterator::operator!=(const symbol_iterator &X) const {
+ return itr != X.itr;
+}
+
+SymExpr::symbol_iterator::symbol_iterator(const SymExpr *SE) {
+ itr.push_back(SE);
+}
+
+SymExpr::symbol_iterator &SymExpr::symbol_iterator::operator++() {
+ assert(!itr.empty() && "attempting to iterate on an 'end' iterator");
+ expand();
+ return *this;
+}
+
+SymbolRef SymExpr::symbol_iterator::operator*() {
+ assert(!itr.empty() && "attempting to dereference an 'end' iterator");
+ return itr.back();
+}
+
+void SymExpr::symbol_iterator::expand() {
+ const SymExpr *SE = itr.pop_back_val();
+
+ switch (SE->getKind()) {
+ case SymExpr::RegionValueKind:
+ case SymExpr::ConjuredKind:
+ case SymExpr::DerivedKind:
+ case SymExpr::ExtentKind:
+ case SymExpr::MetadataKind:
+ return;
+ case SymExpr::CastSymbolKind:
+ itr.push_back(cast<SymbolCast>(SE)->getOperand());
+ return;
+ case SymExpr::SymIntKind:
+ itr.push_back(cast<SymIntExpr>(SE)->getLHS());
+ return;
+ case SymExpr::IntSymKind:
+ itr.push_back(cast<IntSymExpr>(SE)->getRHS());
+ return;
+ case SymExpr::SymSymKind: {
+ const SymSymExpr *x = cast<SymSymExpr>(SE);
+ itr.push_back(x->getLHS());
+ itr.push_back(x->getRHS());
+ return;
+ }
+ }
+ llvm_unreachable("unhandled expansion case");
+}
+
+unsigned SymExpr::computeComplexity() const {
+ unsigned R = 0;
+ for (symbol_iterator I = symbol_begin(), E = symbol_end(); I != E; ++I)
+ R++;
+ return R;
+}
+
+const SymbolRegionValue*
+SymbolManager::getRegionValueSymbol(const TypedValueRegion* R) {
+ llvm::FoldingSetNodeID profile;
+ SymbolRegionValue::Profile(profile, R);
+ void *InsertPos;
+ SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
+ if (!SD) {
+ SD = (SymExpr*) BPAlloc.Allocate<SymbolRegionValue>();
+ new (SD) SymbolRegionValue(SymbolCounter, R);
+ DataSet.InsertNode(SD, InsertPos);
+ ++SymbolCounter;
+ }
+
+ return cast<SymbolRegionValue>(SD);
+}
+
+const SymbolConjured* SymbolManager::conjureSymbol(const Stmt *E,
+ const LocationContext *LCtx,
+ QualType T,
+ unsigned Count,
+ const void *SymbolTag) {
+ llvm::FoldingSetNodeID profile;
+ SymbolConjured::Profile(profile, E, T, Count, LCtx, SymbolTag);
+ void *InsertPos;
+ SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
+ if (!SD) {
+ SD = (SymExpr*) BPAlloc.Allocate<SymbolConjured>();
+ new (SD) SymbolConjured(SymbolCounter, E, LCtx, T, Count, SymbolTag);
+ DataSet.InsertNode(SD, InsertPos);
+ ++SymbolCounter;
+ }
+
+ return cast<SymbolConjured>(SD);
+}
+
+const SymbolDerived*
+SymbolManager::getDerivedSymbol(SymbolRef parentSymbol,
+ const TypedValueRegion *R) {
+
+ llvm::FoldingSetNodeID profile;
+ SymbolDerived::Profile(profile, parentSymbol, R);
+ void *InsertPos;
+ SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
+ if (!SD) {
+ SD = (SymExpr*) BPAlloc.Allocate<SymbolDerived>();
+ new (SD) SymbolDerived(SymbolCounter, parentSymbol, R);
+ DataSet.InsertNode(SD, InsertPos);
+ ++SymbolCounter;
+ }
+
+ return cast<SymbolDerived>(SD);
+}
+
+const SymbolExtent*
+SymbolManager::getExtentSymbol(const SubRegion *R) {
+ llvm::FoldingSetNodeID profile;
+ SymbolExtent::Profile(profile, R);
+ void *InsertPos;
+ SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
+ if (!SD) {
+ SD = (SymExpr*) BPAlloc.Allocate<SymbolExtent>();
+ new (SD) SymbolExtent(SymbolCounter, R);
+ DataSet.InsertNode(SD, InsertPos);
+ ++SymbolCounter;
+ }
+
+ return cast<SymbolExtent>(SD);
+}
+
+const SymbolMetadata*
+SymbolManager::getMetadataSymbol(const MemRegion* R, const Stmt *S, QualType T,
+ unsigned Count, const void *SymbolTag) {
+
+ llvm::FoldingSetNodeID profile;
+ SymbolMetadata::Profile(profile, R, S, T, Count, SymbolTag);
+ void *InsertPos;
+ SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
+ if (!SD) {
+ SD = (SymExpr*) BPAlloc.Allocate<SymbolMetadata>();
+ new (SD) SymbolMetadata(SymbolCounter, R, S, T, Count, SymbolTag);
+ DataSet.InsertNode(SD, InsertPos);
+ ++SymbolCounter;
+ }
+
+ return cast<SymbolMetadata>(SD);
+}
+
+const SymbolCast*
+SymbolManager::getCastSymbol(const SymExpr *Op,
+ QualType From, QualType To) {
+ llvm::FoldingSetNodeID ID;
+ SymbolCast::Profile(ID, Op, From, To);
+ void *InsertPos;
+ SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos);
+ if (!data) {
+ data = (SymbolCast*) BPAlloc.Allocate<SymbolCast>();
+ new (data) SymbolCast(Op, From, To);
+ DataSet.InsertNode(data, InsertPos);
+ }
+
+ return cast<SymbolCast>(data);
+}
+
+const SymIntExpr *SymbolManager::getSymIntExpr(const SymExpr *lhs,
+ BinaryOperator::Opcode op,
+ const llvm::APSInt& v,
+ QualType t) {
+ llvm::FoldingSetNodeID ID;
+ SymIntExpr::Profile(ID, lhs, op, v, t);
+ void *InsertPos;
+ SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!data) {
+ data = (SymIntExpr*) BPAlloc.Allocate<SymIntExpr>();
+ new (data) SymIntExpr(lhs, op, v, t);
+ DataSet.InsertNode(data, InsertPos);
+ }
+
+ return cast<SymIntExpr>(data);
+}
+
+const IntSymExpr *SymbolManager::getIntSymExpr(const llvm::APSInt& lhs,
+ BinaryOperator::Opcode op,
+ const SymExpr *rhs,
+ QualType t) {
+ llvm::FoldingSetNodeID ID;
+ IntSymExpr::Profile(ID, lhs, op, rhs, t);
+ void *InsertPos;
+ SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!data) {
+ data = (IntSymExpr*) BPAlloc.Allocate<IntSymExpr>();
+ new (data) IntSymExpr(lhs, op, rhs, t);
+ DataSet.InsertNode(data, InsertPos);
+ }
+
+ return cast<IntSymExpr>(data);
+}
+
+const SymSymExpr *SymbolManager::getSymSymExpr(const SymExpr *lhs,
+ BinaryOperator::Opcode op,
+ const SymExpr *rhs,
+ QualType t) {
+ llvm::FoldingSetNodeID ID;
+ SymSymExpr::Profile(ID, lhs, op, rhs, t);
+ void *InsertPos;
+ SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!data) {
+ data = (SymSymExpr*) BPAlloc.Allocate<SymSymExpr>();
+ new (data) SymSymExpr(lhs, op, rhs, t);
+ DataSet.InsertNode(data, InsertPos);
+ }
+
+ return cast<SymSymExpr>(data);
+}
+
+QualType SymbolConjured::getType() const {
+ return T;
+}
+
+QualType SymbolDerived::getType() const {
+ return R->getValueType();
+}
+
+QualType SymbolExtent::getType() const {
+ ASTContext &Ctx = R->getMemRegionManager()->getContext();
+ return Ctx.getSizeType();
+}
+
+QualType SymbolMetadata::getType() const {
+ return T;
+}
+
+QualType SymbolRegionValue::getType() const {
+ return R->getValueType();
+}
+
+SymbolManager::~SymbolManager() {
+ llvm::DeleteContainerSeconds(SymbolDependencies);
+}
+
+bool SymbolManager::canSymbolicate(QualType T) {
+ T = T.getCanonicalType();
+
+ if (Loc::isLocType(T))
+ return true;
+
+ if (T->isIntegralOrEnumerationType())
+ return true;
+
+ if (T->isRecordType() && !T->isUnionType())
+ return true;
+
+ return false;
+}
+
+void SymbolManager::addSymbolDependency(const SymbolRef Primary,
+ const SymbolRef Dependent) {
+ SymbolDependTy::iterator I = SymbolDependencies.find(Primary);
+ SymbolRefSmallVectorTy *dependencies = nullptr;
+ if (I == SymbolDependencies.end()) {
+ dependencies = new SymbolRefSmallVectorTy();
+ SymbolDependencies[Primary] = dependencies;
+ } else {
+ dependencies = I->second;
+ }
+ dependencies->push_back(Dependent);
+}
+
+const SymbolRefSmallVectorTy *SymbolManager::getDependentSymbols(
+ const SymbolRef Primary) {
+ SymbolDependTy::const_iterator I = SymbolDependencies.find(Primary);
+ if (I == SymbolDependencies.end())
+ return nullptr;
+ return I->second;
+}
+
+void SymbolReaper::markDependentsLive(SymbolRef sym) {
+ // Do not mark dependents more then once.
+ SymbolMapTy::iterator LI = TheLiving.find(sym);
+ assert(LI != TheLiving.end() && "The primary symbol is not live.");
+ if (LI->second == HaveMarkedDependents)
+ return;
+ LI->second = HaveMarkedDependents;
+
+ if (const SymbolRefSmallVectorTy *Deps = SymMgr.getDependentSymbols(sym)) {
+ for (SymbolRefSmallVectorTy::const_iterator I = Deps->begin(),
+ E = Deps->end(); I != E; ++I) {
+ if (TheLiving.find(*I) != TheLiving.end())
+ continue;
+ markLive(*I);
+ }
+ }
+}
+
+void SymbolReaper::markLive(SymbolRef sym) {
+ TheLiving[sym] = NotProcessed;
+ TheDead.erase(sym);
+ markDependentsLive(sym);
+}
+
+void SymbolReaper::markLive(const MemRegion *region) {
+ RegionRoots.insert(region);
+ markElementIndicesLive(region);
+}
+
+void SymbolReaper::markElementIndicesLive(const MemRegion *region) {
+ for (auto SR = dyn_cast<SubRegion>(region); SR;
+ SR = dyn_cast<SubRegion>(SR->getSuperRegion())) {
+ if (auto ER = dyn_cast<ElementRegion>(SR)) {
+ SVal Idx = ER->getIndex();
+ for (auto SI = Idx.symbol_begin(), SE = Idx.symbol_end(); SI != SE; ++SI)
+ markLive(*SI);
+ }
+ }
+}
+
+void SymbolReaper::markInUse(SymbolRef sym) {
+ if (isa<SymbolMetadata>(sym))
+ MetadataInUse.insert(sym);
+}
+
+bool SymbolReaper::maybeDead(SymbolRef sym) {
+ if (isLive(sym))
+ return false;
+
+ TheDead.insert(sym);
+ return true;
+}
+
+bool SymbolReaper::isLiveRegion(const MemRegion *MR) {
+ if (RegionRoots.count(MR))
+ return true;
+
+ MR = MR->getBaseRegion();
+
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(MR))
+ return isLive(SR->getSymbol());
+
+ if (const VarRegion *VR = dyn_cast<VarRegion>(MR))
+ return isLive(VR, true);
+
+ // FIXME: This is a gross over-approximation. What we really need is a way to
+ // tell if anything still refers to this region. Unlike SymbolicRegions,
+ // AllocaRegions don't have associated symbols, though, so we don't actually
+ // have a way to track their liveness.
+ if (isa<AllocaRegion>(MR))
+ return true;
+
+ if (isa<CXXThisRegion>(MR))
+ return true;
+
+ if (isa<MemSpaceRegion>(MR))
+ return true;
+
+ if (isa<CodeTextRegion>(MR))
+ return true;
+
+ return false;
+}
+
+bool SymbolReaper::isLive(SymbolRef sym) {
+ if (TheLiving.count(sym)) {
+ markDependentsLive(sym);
+ return true;
+ }
+
+ bool KnownLive;
+
+ switch (sym->getKind()) {
+ case SymExpr::RegionValueKind:
+ KnownLive = isLiveRegion(cast<SymbolRegionValue>(sym)->getRegion());
+ break;
+ case SymExpr::ConjuredKind:
+ KnownLive = false;
+ break;
+ case SymExpr::DerivedKind:
+ KnownLive = isLive(cast<SymbolDerived>(sym)->getParentSymbol());
+ break;
+ case SymExpr::ExtentKind:
+ KnownLive = isLiveRegion(cast<SymbolExtent>(sym)->getRegion());
+ break;
+ case SymExpr::MetadataKind:
+ KnownLive = MetadataInUse.count(sym) &&
+ isLiveRegion(cast<SymbolMetadata>(sym)->getRegion());
+ if (KnownLive)
+ MetadataInUse.erase(sym);
+ break;
+ case SymExpr::SymIntKind:
+ KnownLive = isLive(cast<SymIntExpr>(sym)->getLHS());
+ break;
+ case SymExpr::IntSymKind:
+ KnownLive = isLive(cast<IntSymExpr>(sym)->getRHS());
+ break;
+ case SymExpr::SymSymKind:
+ KnownLive = isLive(cast<SymSymExpr>(sym)->getLHS()) &&
+ isLive(cast<SymSymExpr>(sym)->getRHS());
+ break;
+ case SymExpr::CastSymbolKind:
+ KnownLive = isLive(cast<SymbolCast>(sym)->getOperand());
+ break;
+ }
+
+ if (KnownLive)
+ markLive(sym);
+
+ return KnownLive;
+}
+
+bool
+SymbolReaper::isLive(const Stmt *ExprVal, const LocationContext *ELCtx) const {
+ if (LCtx == nullptr)
+ return false;
+
+ if (LCtx != ELCtx) {
+ // If the reaper's location context is a parent of the expression's
+ // location context, then the expression value is now "out of scope".
+ if (LCtx->isParentOf(ELCtx))
+ return false;
+ return true;
+ }
+
+ // If no statement is provided, everything is this and parent contexts is live.
+ if (!Loc)
+ return true;
+
+ return LCtx->getAnalysis<RelaxedLiveVariables>()->isLive(Loc, ExprVal);
+}
+
+bool SymbolReaper::isLive(const VarRegion *VR, bool includeStoreBindings) const{
+ const StackFrameContext *VarContext = VR->getStackFrame();
+
+ if (!VarContext)
+ return true;
+
+ if (!LCtx)
+ return false;
+ const StackFrameContext *CurrentContext = LCtx->getCurrentStackFrame();
+
+ if (VarContext == CurrentContext) {
+ // If no statement is provided, everything is live.
+ if (!Loc)
+ return true;
+
+ if (LCtx->getAnalysis<RelaxedLiveVariables>()->isLive(Loc, VR->getDecl()))
+ return true;
+
+ if (!includeStoreBindings)
+ return false;
+
+ unsigned &cachedQuery =
+ const_cast<SymbolReaper*>(this)->includedRegionCache[VR];
+
+ if (cachedQuery) {
+ return cachedQuery == 1;
+ }
+
+ // Query the store to see if the region occurs in any live bindings.
+ if (Store store = reapedStore.getStore()) {
+ bool hasRegion =
+ reapedStore.getStoreManager().includedInBindings(store, VR);
+ cachedQuery = hasRegion ? 1 : 2;
+ return hasRegion;
+ }
+
+ return false;
+ }
+
+ return VarContext->isParentOf(CurrentContext);
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
new file mode 100644
index 0000000..bf85c4c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -0,0 +1,813 @@
+//===--- AnalysisConsumer.cpp - ASTConsumer for running Analyses ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// "Meta" ASTConsumer for running different source analyses.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Frontend/AnalysisConsumer.h"
+#include "ModelInjector.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/CallGraph.h"
+#include "clang/Analysis/CodeInjector.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/StaticAnalyzer/Checkers/LocalCheckers.h"
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Frontend/CheckerRegistration.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Program.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/Support/raw_ostream.h"
+#include <memory>
+#include <queue>
+
+using namespace clang;
+using namespace ento;
+using llvm::SmallPtrSet;
+
+#define DEBUG_TYPE "AnalysisConsumer"
+
+static std::unique_ptr<ExplodedNode::Auditor> CreateUbiViz();
+
+STATISTIC(NumFunctionTopLevel, "The # of functions at top level.");
+STATISTIC(NumFunctionsAnalyzed,
+ "The # of functions and blocks analyzed (as top level "
+ "with inlining turned on).");
+STATISTIC(NumBlocksInAnalyzedFunctions,
+ "The # of basic blocks in the analyzed functions.");
+STATISTIC(PercentReachableBlocks, "The % of reachable basic blocks.");
+STATISTIC(MaxCFGSize, "The maximum number of basic blocks in a function.");
+
+//===----------------------------------------------------------------------===//
+// Special PathDiagnosticConsumers.
+//===----------------------------------------------------------------------===//
+
+void ento::createPlistHTMLDiagnosticConsumer(AnalyzerOptions &AnalyzerOpts,
+ PathDiagnosticConsumers &C,
+ const std::string &prefix,
+ const Preprocessor &PP) {
+ createHTMLDiagnosticConsumer(AnalyzerOpts, C,
+ llvm::sys::path::parent_path(prefix), PP);
+ createPlistDiagnosticConsumer(AnalyzerOpts, C, prefix, PP);
+}
+
+void ento::createTextPathDiagnosticConsumer(AnalyzerOptions &AnalyzerOpts,
+ PathDiagnosticConsumers &C,
+ const std::string &Prefix,
+ const clang::Preprocessor &PP) {
+ llvm_unreachable("'text' consumer should be enabled on ClangDiags");
+}
+
+namespace {
+class ClangDiagPathDiagConsumer : public PathDiagnosticConsumer {
+ DiagnosticsEngine &Diag;
+ bool IncludePath;
+public:
+ ClangDiagPathDiagConsumer(DiagnosticsEngine &Diag)
+ : Diag(Diag), IncludePath(false) {}
+ ~ClangDiagPathDiagConsumer() override {}
+ StringRef getName() const override { return "ClangDiags"; }
+
+ bool supportsLogicalOpControlFlow() const override { return true; }
+ bool supportsCrossFileDiagnostics() const override { return true; }
+
+ PathGenerationScheme getGenerationScheme() const override {
+ return IncludePath ? Minimal : None;
+ }
+
+ void enablePaths() {
+ IncludePath = true;
+ }
+
+ void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
+ FilesMade *filesMade) override {
+ unsigned WarnID = Diag.getCustomDiagID(DiagnosticsEngine::Warning, "%0");
+ unsigned NoteID = Diag.getCustomDiagID(DiagnosticsEngine::Note, "%0");
+
+ for (std::vector<const PathDiagnostic*>::iterator I = Diags.begin(),
+ E = Diags.end(); I != E; ++I) {
+ const PathDiagnostic *PD = *I;
+ SourceLocation WarnLoc = PD->getLocation().asLocation();
+ Diag.Report(WarnLoc, WarnID) << PD->getShortDescription()
+ << PD->path.back()->getRanges();
+
+ if (!IncludePath)
+ continue;
+
+ PathPieces FlatPath = PD->path.flatten(/*ShouldFlattenMacros=*/true);
+ for (PathPieces::const_iterator PI = FlatPath.begin(),
+ PE = FlatPath.end();
+ PI != PE; ++PI) {
+ SourceLocation NoteLoc = (*PI)->getLocation().asLocation();
+ Diag.Report(NoteLoc, NoteID) << (*PI)->getString()
+ << (*PI)->getRanges();
+ }
+ }
+ }
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// AnalysisConsumer declaration.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class AnalysisConsumer : public AnalysisASTConsumer,
+ public RecursiveASTVisitor<AnalysisConsumer> {
+ enum {
+ AM_None = 0,
+ AM_Syntax = 0x1,
+ AM_Path = 0x2
+ };
+ typedef unsigned AnalysisMode;
+
+ /// Mode of the analyzes while recursively visiting Decls.
+ AnalysisMode RecVisitorMode;
+ /// Bug Reporter to use while recursively visiting Decls.
+ BugReporter *RecVisitorBR;
+
+public:
+ ASTContext *Ctx;
+ const Preprocessor &PP;
+ const std::string OutDir;
+ AnalyzerOptionsRef Opts;
+ ArrayRef<std::string> Plugins;
+ CodeInjector *Injector;
+
+ /// \brief Stores the declarations from the local translation unit.
+ /// Note, we pre-compute the local declarations at parse time as an
+ /// optimization to make sure we do not deserialize everything from disk.
+ /// The local declaration to all declarations ratio might be very small when
+ /// working with a PCH file.
+ SetOfDecls LocalTUDecls;
+
+ // Set of PathDiagnosticConsumers. Owned by AnalysisManager.
+ PathDiagnosticConsumers PathConsumers;
+
+ StoreManagerCreator CreateStoreMgr;
+ ConstraintManagerCreator CreateConstraintMgr;
+
+ std::unique_ptr<CheckerManager> checkerMgr;
+ std::unique_ptr<AnalysisManager> Mgr;
+
+ /// Time the analyzes time of each translation unit.
+ static llvm::Timer* TUTotalTimer;
+
+ /// The information about analyzed functions shared throughout the
+ /// translation unit.
+ FunctionSummariesTy FunctionSummaries;
+
+ AnalysisConsumer(const Preprocessor& pp,
+ const std::string& outdir,
+ AnalyzerOptionsRef opts,
+ ArrayRef<std::string> plugins,
+ CodeInjector *injector)
+ : RecVisitorMode(0), RecVisitorBR(nullptr), Ctx(nullptr), PP(pp),
+ OutDir(outdir), Opts(opts), Plugins(plugins), Injector(injector) {
+ DigestAnalyzerOptions();
+ if (Opts->PrintStats) {
+ llvm::EnableStatistics();
+ TUTotalTimer = new llvm::Timer("Analyzer Total Time");
+ }
+ }
+
+ ~AnalysisConsumer() override {
+ if (Opts->PrintStats)
+ delete TUTotalTimer;
+ }
+
+ void DigestAnalyzerOptions() {
+ if (Opts->AnalysisDiagOpt != PD_NONE) {
+ // Create the PathDiagnosticConsumer.
+ ClangDiagPathDiagConsumer *clangDiags =
+ new ClangDiagPathDiagConsumer(PP.getDiagnostics());
+ PathConsumers.push_back(clangDiags);
+
+ if (Opts->AnalysisDiagOpt == PD_TEXT) {
+ clangDiags->enablePaths();
+
+ } else if (!OutDir.empty()) {
+ switch (Opts->AnalysisDiagOpt) {
+ default:
+#define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATEFN) \
+ case PD_##NAME: \
+ CREATEFN(*Opts.get(), PathConsumers, OutDir, PP); \
+ break;
+#include "clang/StaticAnalyzer/Core/Analyses.def"
+ }
+ }
+ }
+
+ // Create the analyzer component creators.
+ switch (Opts->AnalysisStoreOpt) {
+ default:
+ llvm_unreachable("Unknown store manager.");
+#define ANALYSIS_STORE(NAME, CMDFLAG, DESC, CREATEFN) \
+ case NAME##Model: CreateStoreMgr = CREATEFN; break;
+#include "clang/StaticAnalyzer/Core/Analyses.def"
+ }
+
+ switch (Opts->AnalysisConstraintsOpt) {
+ default:
+ llvm_unreachable("Unknown constraint manager.");
+#define ANALYSIS_CONSTRAINTS(NAME, CMDFLAG, DESC, CREATEFN) \
+ case NAME##Model: CreateConstraintMgr = CREATEFN; break;
+#include "clang/StaticAnalyzer/Core/Analyses.def"
+ }
+ }
+
+ void DisplayFunction(const Decl *D, AnalysisMode Mode,
+ ExprEngine::InliningModes IMode) {
+ if (!Opts->AnalyzerDisplayProgress)
+ return;
+
+ SourceManager &SM = Mgr->getASTContext().getSourceManager();
+ PresumedLoc Loc = SM.getPresumedLoc(D->getLocation());
+ if (Loc.isValid()) {
+ llvm::errs() << "ANALYZE";
+
+ if (Mode == AM_Syntax)
+ llvm::errs() << " (Syntax)";
+ else if (Mode == AM_Path) {
+ llvm::errs() << " (Path, ";
+ switch (IMode) {
+ case ExprEngine::Inline_Minimal:
+ llvm::errs() << " Inline_Minimal";
+ break;
+ case ExprEngine::Inline_Regular:
+ llvm::errs() << " Inline_Regular";
+ break;
+ }
+ llvm::errs() << ")";
+ }
+ else
+ assert(Mode == (AM_Syntax | AM_Path) && "Unexpected mode!");
+
+ llvm::errs() << ": " << Loc.getFilename();
+ if (isa<FunctionDecl>(D) || isa<ObjCMethodDecl>(D)) {
+ const NamedDecl *ND = cast<NamedDecl>(D);
+ llvm::errs() << ' ' << *ND << '\n';
+ }
+ else if (isa<BlockDecl>(D)) {
+ llvm::errs() << ' ' << "block(line:" << Loc.getLine() << ",col:"
+ << Loc.getColumn() << '\n';
+ }
+ else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ Selector S = MD->getSelector();
+ llvm::errs() << ' ' << S.getAsString();
+ }
+ }
+ }
+
+ void Initialize(ASTContext &Context) override {
+ Ctx = &Context;
+ checkerMgr = createCheckerManager(*Opts, PP.getLangOpts(), Plugins,
+ PP.getDiagnostics());
+
+ Mgr = llvm::make_unique<AnalysisManager>(
+ *Ctx, PP.getDiagnostics(), PP.getLangOpts(), PathConsumers,
+ CreateStoreMgr, CreateConstraintMgr, checkerMgr.get(), *Opts, Injector);
+ }
+
+ /// \brief Store the top level decls in the set to be processed later on.
+ /// (Doing this pre-processing avoids deserialization of data from PCH.)
+ bool HandleTopLevelDecl(DeclGroupRef D) override;
+ void HandleTopLevelDeclInObjCContainer(DeclGroupRef D) override;
+
+ void HandleTranslationUnit(ASTContext &C) override;
+
+ /// \brief Determine which inlining mode should be used when this function is
+ /// analyzed. This allows to redefine the default inlining policies when
+ /// analyzing a given function.
+ ExprEngine::InliningModes
+ getInliningModeForFunction(const Decl *D, const SetOfConstDecls &Visited);
+
+ /// \brief Build the call graph for all the top level decls of this TU and
+ /// use it to define the order in which the functions should be visited.
+ void HandleDeclsCallGraph(const unsigned LocalTUDeclsSize);
+
+ /// \brief Run analyzes(syntax or path sensitive) on the given function.
+ /// \param Mode - determines if we are requesting syntax only or path
+ /// sensitive only analysis.
+ /// \param VisitedCallees - The output parameter, which is populated with the
+ /// set of functions which should be considered analyzed after analyzing the
+ /// given root function.
+ void HandleCode(Decl *D, AnalysisMode Mode,
+ ExprEngine::InliningModes IMode = ExprEngine::Inline_Minimal,
+ SetOfConstDecls *VisitedCallees = nullptr);
+
+ void RunPathSensitiveChecks(Decl *D,
+ ExprEngine::InliningModes IMode,
+ SetOfConstDecls *VisitedCallees);
+ void ActionExprEngine(Decl *D, bool ObjCGCEnabled,
+ ExprEngine::InliningModes IMode,
+ SetOfConstDecls *VisitedCallees);
+
+ /// Visitors for the RecursiveASTVisitor.
+ bool shouldWalkTypesOfTypeLocs() const { return false; }
+
+ /// Handle callbacks for arbitrary Decls.
+ bool VisitDecl(Decl *D) {
+ AnalysisMode Mode = getModeForDecl(D, RecVisitorMode);
+ if (Mode & AM_Syntax)
+ checkerMgr->runCheckersOnASTDecl(D, *Mgr, *RecVisitorBR);
+ return true;
+ }
+
+ bool VisitFunctionDecl(FunctionDecl *FD) {
+ IdentifierInfo *II = FD->getIdentifier();
+ if (II && II->getName().startswith("__inline"))
+ return true;
+
+ // We skip function template definitions, as their semantics is
+ // only determined when they are instantiated.
+ if (FD->isThisDeclarationADefinition() &&
+ !FD->isDependentContext()) {
+ assert(RecVisitorMode == AM_Syntax || Mgr->shouldInlineCall() == false);
+ HandleCode(FD, RecVisitorMode);
+ }
+ return true;
+ }
+
+ bool VisitObjCMethodDecl(ObjCMethodDecl *MD) {
+ if (MD->isThisDeclarationADefinition()) {
+ assert(RecVisitorMode == AM_Syntax || Mgr->shouldInlineCall() == false);
+ HandleCode(MD, RecVisitorMode);
+ }
+ return true;
+ }
+
+ bool VisitBlockDecl(BlockDecl *BD) {
+ if (BD->hasBody()) {
+ assert(RecVisitorMode == AM_Syntax || Mgr->shouldInlineCall() == false);
+ // Since we skip function template definitions, we should skip blocks
+ // declared in those functions as well.
+ if (!BD->isDependentContext()) {
+ HandleCode(BD, RecVisitorMode);
+ }
+ }
+ return true;
+ }
+
+ void AddDiagnosticConsumer(PathDiagnosticConsumer *Consumer) override {
+ PathConsumers.push_back(Consumer);
+ }
+
+private:
+ void storeTopLevelDecls(DeclGroupRef DG);
+
+ /// \brief Check if we should skip (not analyze) the given function.
+ AnalysisMode getModeForDecl(Decl *D, AnalysisMode Mode);
+
+};
+} // end anonymous namespace
+
+
+//===----------------------------------------------------------------------===//
+// AnalysisConsumer implementation.
+//===----------------------------------------------------------------------===//
+llvm::Timer* AnalysisConsumer::TUTotalTimer = nullptr;
+
+bool AnalysisConsumer::HandleTopLevelDecl(DeclGroupRef DG) {
+ storeTopLevelDecls(DG);
+ return true;
+}
+
+void AnalysisConsumer::HandleTopLevelDeclInObjCContainer(DeclGroupRef DG) {
+ storeTopLevelDecls(DG);
+}
+
+void AnalysisConsumer::storeTopLevelDecls(DeclGroupRef DG) {
+ for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I) {
+
+ // Skip ObjCMethodDecl, wait for the objc container to avoid
+ // analyzing twice.
+ if (isa<ObjCMethodDecl>(*I))
+ continue;
+
+ LocalTUDecls.push_back(*I);
+ }
+}
+
+static bool shouldSkipFunction(const Decl *D,
+ const SetOfConstDecls &Visited,
+ const SetOfConstDecls &VisitedAsTopLevel) {
+ if (VisitedAsTopLevel.count(D))
+ return true;
+
+ // We want to re-analyse the functions as top level in the following cases:
+ // - The 'init' methods should be reanalyzed because
+ // ObjCNonNilReturnValueChecker assumes that '[super init]' never returns
+ // 'nil' and unless we analyze the 'init' functions as top level, we will
+ // not catch errors within defensive code.
+ // - We want to reanalyze all ObjC methods as top level to report Retain
+ // Count naming convention errors more aggressively.
+ if (isa<ObjCMethodDecl>(D))
+ return false;
+
+ // Otherwise, if we visited the function before, do not reanalyze it.
+ return Visited.count(D);
+}
+
+ExprEngine::InliningModes
+AnalysisConsumer::getInliningModeForFunction(const Decl *D,
+ const SetOfConstDecls &Visited) {
+ // We want to reanalyze all ObjC methods as top level to report Retain
+ // Count naming convention errors more aggressively. But we should tune down
+ // inlining when reanalyzing an already inlined function.
+ if (Visited.count(D)) {
+ assert(isa<ObjCMethodDecl>(D) &&
+ "We are only reanalyzing ObjCMethods.");
+ const ObjCMethodDecl *ObjCM = cast<ObjCMethodDecl>(D);
+ if (ObjCM->getMethodFamily() != OMF_init)
+ return ExprEngine::Inline_Minimal;
+ }
+
+ return ExprEngine::Inline_Regular;
+}
+
+void AnalysisConsumer::HandleDeclsCallGraph(const unsigned LocalTUDeclsSize) {
+ // Build the Call Graph by adding all the top level declarations to the graph.
+ // Note: CallGraph can trigger deserialization of more items from a pch
+ // (though HandleInterestingDecl); triggering additions to LocalTUDecls.
+ // We rely on random access to add the initially processed Decls to CG.
+ CallGraph CG;
+ for (unsigned i = 0 ; i < LocalTUDeclsSize ; ++i) {
+ CG.addToCallGraph(LocalTUDecls[i]);
+ }
+
+ // Walk over all of the call graph nodes in topological order, so that we
+ // analyze parents before the children. Skip the functions inlined into
+ // the previously processed functions. Use external Visited set to identify
+ // inlined functions. The topological order allows the "do not reanalyze
+ // previously inlined function" performance heuristic to be triggered more
+ // often.
+ SetOfConstDecls Visited;
+ SetOfConstDecls VisitedAsTopLevel;
+ llvm::ReversePostOrderTraversal<clang::CallGraph*> RPOT(&CG);
+ for (llvm::ReversePostOrderTraversal<clang::CallGraph*>::rpo_iterator
+ I = RPOT.begin(), E = RPOT.end(); I != E; ++I) {
+ NumFunctionTopLevel++;
+
+ CallGraphNode *N = *I;
+ Decl *D = N->getDecl();
+
+ // Skip the abstract root node.
+ if (!D)
+ continue;
+
+ // Skip the functions which have been processed already or previously
+ // inlined.
+ if (shouldSkipFunction(D, Visited, VisitedAsTopLevel))
+ continue;
+
+ // Analyze the function.
+ SetOfConstDecls VisitedCallees;
+
+ HandleCode(D, AM_Path, getInliningModeForFunction(D, Visited),
+ (Mgr->options.InliningMode == All ? nullptr : &VisitedCallees));
+
+ // Add the visited callees to the global visited set.
+ for (SetOfConstDecls::iterator I = VisitedCallees.begin(),
+ E = VisitedCallees.end(); I != E; ++I) {
+ Visited.insert(*I);
+ }
+ VisitedAsTopLevel.insert(D);
+ }
+}
+
+void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) {
+ // Don't run the actions if an error has occurred with parsing the file.
+ DiagnosticsEngine &Diags = PP.getDiagnostics();
+ if (Diags.hasErrorOccurred() || Diags.hasFatalErrorOccurred())
+ return;
+
+ // Don't analyze if the user explicitly asked for no checks to be performed
+ // on this file.
+ if (Opts->DisableAllChecks)
+ return;
+
+ {
+ if (TUTotalTimer) TUTotalTimer->startTimer();
+
+ // Introduce a scope to destroy BR before Mgr.
+ BugReporter BR(*Mgr);
+ TranslationUnitDecl *TU = C.getTranslationUnitDecl();
+ checkerMgr->runCheckersOnASTDecl(TU, *Mgr, BR);
+
+ // Run the AST-only checks using the order in which functions are defined.
+ // If inlining is not turned on, use the simplest function order for path
+ // sensitive analyzes as well.
+ RecVisitorMode = AM_Syntax;
+ if (!Mgr->shouldInlineCall())
+ RecVisitorMode |= AM_Path;
+ RecVisitorBR = &BR;
+
+ // Process all the top level declarations.
+ //
+ // Note: TraverseDecl may modify LocalTUDecls, but only by appending more
+ // entries. Thus we don't use an iterator, but rely on LocalTUDecls
+ // random access. By doing so, we automatically compensate for iterators
+ // possibly being invalidated, although this is a bit slower.
+ const unsigned LocalTUDeclsSize = LocalTUDecls.size();
+ for (unsigned i = 0 ; i < LocalTUDeclsSize ; ++i) {
+ TraverseDecl(LocalTUDecls[i]);
+ }
+
+ if (Mgr->shouldInlineCall())
+ HandleDeclsCallGraph(LocalTUDeclsSize);
+
+ // After all decls handled, run checkers on the entire TranslationUnit.
+ checkerMgr->runCheckersOnEndOfTranslationUnit(TU, *Mgr, BR);
+
+ RecVisitorBR = nullptr;
+ }
+
+ // Explicitly destroy the PathDiagnosticConsumer. This will flush its output.
+ // FIXME: This should be replaced with something that doesn't rely on
+ // side-effects in PathDiagnosticConsumer's destructor. This is required when
+ // used with option -disable-free.
+ Mgr.reset();
+
+ if (TUTotalTimer) TUTotalTimer->stopTimer();
+
+ // Count how many basic blocks we have not covered.
+ NumBlocksInAnalyzedFunctions = FunctionSummaries.getTotalNumBasicBlocks();
+ if (NumBlocksInAnalyzedFunctions > 0)
+ PercentReachableBlocks =
+ (FunctionSummaries.getTotalNumVisitedBasicBlocks() * 100) /
+ NumBlocksInAnalyzedFunctions;
+
+}
+
+static std::string getFunctionName(const Decl *D) {
+ if (const ObjCMethodDecl *ID = dyn_cast<ObjCMethodDecl>(D)) {
+ return ID->getSelector().getAsString();
+ }
+ if (const FunctionDecl *ND = dyn_cast<FunctionDecl>(D)) {
+ IdentifierInfo *II = ND->getIdentifier();
+ if (II)
+ return II->getName();
+ }
+ return "";
+}
+
+AnalysisConsumer::AnalysisMode
+AnalysisConsumer::getModeForDecl(Decl *D, AnalysisMode Mode) {
+ if (!Opts->AnalyzeSpecificFunction.empty() &&
+ getFunctionName(D) != Opts->AnalyzeSpecificFunction)
+ return AM_None;
+
+ // Unless -analyze-all is specified, treat decls differently depending on
+ // where they came from:
+ // - Main source file: run both path-sensitive and non-path-sensitive checks.
+ // - Header files: run non-path-sensitive checks only.
+ // - System headers: don't run any checks.
+ SourceManager &SM = Ctx->getSourceManager();
+ const Stmt *Body = D->getBody();
+ SourceLocation SL = Body ? Body->getLocStart() : D->getLocation();
+ SL = SM.getExpansionLoc(SL);
+
+ if (!Opts->AnalyzeAll && !SM.isWrittenInMainFile(SL)) {
+ if (SL.isInvalid() || SM.isInSystemHeader(SL))
+ return AM_None;
+ return Mode & ~AM_Path;
+ }
+
+ return Mode;
+}
+
+void AnalysisConsumer::HandleCode(Decl *D, AnalysisMode Mode,
+ ExprEngine::InliningModes IMode,
+ SetOfConstDecls *VisitedCallees) {
+ if (!D->hasBody())
+ return;
+ Mode = getModeForDecl(D, Mode);
+ if (Mode == AM_None)
+ return;
+
+ DisplayFunction(D, Mode, IMode);
+ CFG *DeclCFG = Mgr->getCFG(D);
+ if (DeclCFG) {
+ unsigned CFGSize = DeclCFG->size();
+ MaxCFGSize = MaxCFGSize < CFGSize ? CFGSize : MaxCFGSize;
+ }
+
+ // Clear the AnalysisManager of old AnalysisDeclContexts.
+ Mgr->ClearContexts();
+ BugReporter BR(*Mgr);
+
+ if (Mode & AM_Syntax)
+ checkerMgr->runCheckersOnASTBody(D, *Mgr, BR);
+ if ((Mode & AM_Path) && checkerMgr->hasPathSensitiveCheckers()) {
+ RunPathSensitiveChecks(D, IMode, VisitedCallees);
+ if (IMode != ExprEngine::Inline_Minimal)
+ NumFunctionsAnalyzed++;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Path-sensitive checking.
+//===----------------------------------------------------------------------===//
+
+void AnalysisConsumer::ActionExprEngine(Decl *D, bool ObjCGCEnabled,
+ ExprEngine::InliningModes IMode,
+ SetOfConstDecls *VisitedCallees) {
+ // Construct the analysis engine. First check if the CFG is valid.
+ // FIXME: Inter-procedural analysis will need to handle invalid CFGs.
+ if (!Mgr->getCFG(D))
+ return;
+
+ // See if the LiveVariables analysis scales.
+ if (!Mgr->getAnalysisDeclContext(D)->getAnalysis<RelaxedLiveVariables>())
+ return;
+
+ ExprEngine Eng(*Mgr, ObjCGCEnabled, VisitedCallees, &FunctionSummaries,IMode);
+
+ // Set the graph auditor.
+ std::unique_ptr<ExplodedNode::Auditor> Auditor;
+ if (Mgr->options.visualizeExplodedGraphWithUbiGraph) {
+ Auditor = CreateUbiViz();
+ ExplodedNode::SetAuditor(Auditor.get());
+ }
+
+ // Execute the worklist algorithm.
+ Eng.ExecuteWorkList(Mgr->getAnalysisDeclContextManager().getStackFrame(D),
+ Mgr->options.getMaxNodesPerTopLevelFunction());
+
+ // Release the auditor (if any) so that it doesn't monitor the graph
+ // created BugReporter.
+ ExplodedNode::SetAuditor(nullptr);
+
+ // Visualize the exploded graph.
+ if (Mgr->options.visualizeExplodedGraphWithGraphViz)
+ Eng.ViewGraph(Mgr->options.TrimGraph);
+
+ // Display warnings.
+ Eng.getBugReporter().FlushReports();
+}
+
+void AnalysisConsumer::RunPathSensitiveChecks(Decl *D,
+ ExprEngine::InliningModes IMode,
+ SetOfConstDecls *Visited) {
+
+ switch (Mgr->getLangOpts().getGC()) {
+ case LangOptions::NonGC:
+ ActionExprEngine(D, false, IMode, Visited);
+ break;
+
+ case LangOptions::GCOnly:
+ ActionExprEngine(D, true, IMode, Visited);
+ break;
+
+ case LangOptions::HybridGC:
+ ActionExprEngine(D, false, IMode, Visited);
+ ActionExprEngine(D, true, IMode, Visited);
+ break;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// AnalysisConsumer creation.
+//===----------------------------------------------------------------------===//
+
+std::unique_ptr<AnalysisASTConsumer>
+ento::CreateAnalysisConsumer(CompilerInstance &CI) {
+ // Disable the effects of '-Werror' when using the AnalysisConsumer.
+ CI.getPreprocessor().getDiagnostics().setWarningsAsErrors(false);
+
+ AnalyzerOptionsRef analyzerOpts = CI.getAnalyzerOpts();
+ bool hasModelPath = analyzerOpts->Config.count("model-path") > 0;
+
+ return llvm::make_unique<AnalysisConsumer>(
+ CI.getPreprocessor(), CI.getFrontendOpts().OutputFile, analyzerOpts,
+ CI.getFrontendOpts().Plugins,
+ hasModelPath ? new ModelInjector(CI) : nullptr);
+}
+
+//===----------------------------------------------------------------------===//
+// Ubigraph Visualization. FIXME: Move to separate file.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class UbigraphViz : public ExplodedNode::Auditor {
+ std::unique_ptr<raw_ostream> Out;
+ std::string Filename;
+ unsigned Cntr;
+
+ typedef llvm::DenseMap<void*,unsigned> VMap;
+ VMap M;
+
+public:
+ UbigraphViz(std::unique_ptr<raw_ostream> Out, StringRef Filename);
+
+ ~UbigraphViz() override;
+
+ void AddEdge(ExplodedNode *Src, ExplodedNode *Dst) override;
+};
+
+} // end anonymous namespace
+
+static std::unique_ptr<ExplodedNode::Auditor> CreateUbiViz() {
+ SmallString<128> P;
+ int FD;
+ llvm::sys::fs::createTemporaryFile("llvm_ubi", "", FD, P);
+ llvm::errs() << "Writing '" << P << "'.\n";
+
+ auto Stream = llvm::make_unique<llvm::raw_fd_ostream>(FD, true);
+
+ return llvm::make_unique<UbigraphViz>(std::move(Stream), P);
+}
+
+void UbigraphViz::AddEdge(ExplodedNode *Src, ExplodedNode *Dst) {
+
+ assert (Src != Dst && "Self-edges are not allowed.");
+
+ // Lookup the Src. If it is a new node, it's a root.
+ VMap::iterator SrcI= M.find(Src);
+ unsigned SrcID;
+
+ if (SrcI == M.end()) {
+ M[Src] = SrcID = Cntr++;
+ *Out << "('vertex', " << SrcID << ", ('color','#00ff00'))\n";
+ }
+ else
+ SrcID = SrcI->second;
+
+ // Lookup the Dst.
+ VMap::iterator DstI= M.find(Dst);
+ unsigned DstID;
+
+ if (DstI == M.end()) {
+ M[Dst] = DstID = Cntr++;
+ *Out << "('vertex', " << DstID << ")\n";
+ }
+ else {
+ // We have hit DstID before. Change its style to reflect a cache hit.
+ DstID = DstI->second;
+ *Out << "('change_vertex_style', " << DstID << ", 1)\n";
+ }
+
+ // Add the edge.
+ *Out << "('edge', " << SrcID << ", " << DstID
+ << ", ('arrow','true'), ('oriented', 'true'))\n";
+}
+
+UbigraphViz::UbigraphViz(std::unique_ptr<raw_ostream> OutStream,
+ StringRef Filename)
+ : Out(std::move(OutStream)), Filename(Filename), Cntr(0) {
+
+ *Out << "('vertex_style_attribute', 0, ('shape', 'icosahedron'))\n";
+ *Out << "('vertex_style', 1, 0, ('shape', 'sphere'), ('color', '#ffcc66'),"
+ " ('size', '1.5'))\n";
+}
+
+UbigraphViz::~UbigraphViz() {
+ Out.reset();
+ llvm::errs() << "Running 'ubiviz' program... ";
+ std::string ErrMsg;
+ std::string Ubiviz;
+ if (auto Path = llvm::sys::findProgramByName("ubiviz"))
+ Ubiviz = *Path;
+ std::vector<const char*> args;
+ args.push_back(Ubiviz.c_str());
+ args.push_back(Filename.c_str());
+ args.push_back(nullptr);
+
+ if (llvm::sys::ExecuteAndWait(Ubiviz, &args[0], nullptr, nullptr, 0, 0,
+ &ErrMsg)) {
+ llvm::errs() << "Error viewing graph: " << ErrMsg << "\n";
+ }
+
+ // Delete the file.
+ llvm::sys::fs::remove(Filename);
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
new file mode 100644
index 0000000..75fa4c6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
@@ -0,0 +1,139 @@
+//===--- CheckerRegistration.cpp - Registration for the Analyzer Checkers -===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines the registration function for the analyzer checkers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Frontend/CheckerRegistration.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/StaticAnalyzer/Checkers/ClangCheckers.h"
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/CheckerOptInfo.h"
+#include "clang/StaticAnalyzer/Core/CheckerRegistry.h"
+#include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+#include <memory>
+
+using namespace clang;
+using namespace ento;
+using llvm::sys::DynamicLibrary;
+
+namespace {
+class ClangCheckerRegistry : public CheckerRegistry {
+ typedef void (*RegisterCheckersFn)(CheckerRegistry &);
+
+ static bool isCompatibleAPIVersion(const char *versionString);
+ static void warnIncompatible(DiagnosticsEngine *diags, StringRef pluginPath,
+ const char *pluginAPIVersion);
+
+public:
+ ClangCheckerRegistry(ArrayRef<std::string> plugins,
+ DiagnosticsEngine *diags = nullptr);
+};
+
+} // end anonymous namespace
+
+ClangCheckerRegistry::ClangCheckerRegistry(ArrayRef<std::string> plugins,
+ DiagnosticsEngine *diags) {
+ registerBuiltinCheckers(*this);
+
+ for (ArrayRef<std::string>::iterator i = plugins.begin(), e = plugins.end();
+ i != e; ++i) {
+ // Get access to the plugin.
+ std::string err;
+ DynamicLibrary lib = DynamicLibrary::getPermanentLibrary(i->c_str(), &err);
+ if (!lib.isValid()) {
+ diags->Report(diag::err_fe_unable_to_load_plugin) << *i << err;
+ continue;
+ }
+
+ // See if it's compatible with this build of clang.
+ const char *pluginAPIVersion =
+ (const char *) lib.getAddressOfSymbol("clang_analyzerAPIVersionString");
+ if (!isCompatibleAPIVersion(pluginAPIVersion)) {
+ warnIncompatible(diags, *i, pluginAPIVersion);
+ continue;
+ }
+
+ // Register its checkers.
+ RegisterCheckersFn registerPluginCheckers =
+ (RegisterCheckersFn) (intptr_t) lib.getAddressOfSymbol(
+ "clang_registerCheckers");
+ if (registerPluginCheckers)
+ registerPluginCheckers(*this);
+ }
+}
+
+bool ClangCheckerRegistry::isCompatibleAPIVersion(const char *versionString) {
+ // If the version string is null, it's not an analyzer plugin.
+ if (!versionString)
+ return false;
+
+ // For now, none of the static analyzer API is considered stable.
+ // Versions must match exactly.
+ return strcmp(versionString, CLANG_ANALYZER_API_VERSION_STRING) == 0;
+}
+
+void ClangCheckerRegistry::warnIncompatible(DiagnosticsEngine *diags,
+ StringRef pluginPath,
+ const char *pluginAPIVersion) {
+ if (!diags)
+ return;
+ if (!pluginAPIVersion)
+ return;
+
+ diags->Report(diag::warn_incompatible_analyzer_plugin_api)
+ << llvm::sys::path::filename(pluginPath);
+ diags->Report(diag::note_incompatible_analyzer_plugin_api)
+ << CLANG_ANALYZER_API_VERSION_STRING
+ << pluginAPIVersion;
+}
+
+std::unique_ptr<CheckerManager>
+ento::createCheckerManager(AnalyzerOptions &opts, const LangOptions &langOpts,
+ ArrayRef<std::string> plugins,
+ DiagnosticsEngine &diags) {
+ std::unique_ptr<CheckerManager> checkerMgr(
+ new CheckerManager(langOpts, &opts));
+
+ SmallVector<CheckerOptInfo, 8> checkerOpts;
+ for (unsigned i = 0, e = opts.CheckersControlList.size(); i != e; ++i) {
+ const std::pair<std::string, bool> &opt = opts.CheckersControlList[i];
+ checkerOpts.push_back(CheckerOptInfo(opt.first.c_str(), opt.second));
+ }
+
+ ClangCheckerRegistry allCheckers(plugins, &diags);
+ allCheckers.initializeManager(*checkerMgr, checkerOpts);
+ allCheckers.validateCheckerOptions(opts, diags);
+ checkerMgr->finishedCheckerRegistration();
+
+ for (unsigned i = 0, e = checkerOpts.size(); i != e; ++i) {
+ if (checkerOpts[i].isUnclaimed()) {
+ diags.Report(diag::err_unknown_analyzer_checker)
+ << checkerOpts[i].getName();
+ diags.Report(diag::note_suggest_disabling_all_checkers);
+ }
+
+ }
+
+ return checkerMgr;
+}
+
+void ento::printCheckerHelp(raw_ostream &out, ArrayRef<std::string> plugins) {
+ out << "OVERVIEW: Clang Static Analyzer Checkers List\n\n";
+ out << "USAGE: -analyzer-checker <CHECKER or PACKAGE,...>\n\n";
+
+ ClangCheckerRegistry(plugins).printHelp(out);
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/FrontendActions.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/FrontendActions.cpp
new file mode 100644
index 0000000..b336080
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/FrontendActions.cpp
@@ -0,0 +1,28 @@
+//===--- FrontendActions.cpp ----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
+#include "clang/StaticAnalyzer/Frontend/AnalysisConsumer.h"
+#include "clang/StaticAnalyzer/Frontend/ModelConsumer.h"
+using namespace clang;
+using namespace ento;
+
+std::unique_ptr<ASTConsumer>
+AnalysisAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
+ return CreateAnalysisConsumer(CI);
+}
+
+ParseModelFileAction::ParseModelFileAction(llvm::StringMap<Stmt *> &Bodies)
+ : Bodies(Bodies) {}
+
+std::unique_ptr<ASTConsumer>
+ParseModelFileAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ return llvm::make_unique<ModelConsumer>(Bodies);
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/ModelConsumer.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/ModelConsumer.cpp
new file mode 100644
index 0000000..a65a5ee
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/ModelConsumer.cpp
@@ -0,0 +1,42 @@
+//===--- ModelConsumer.cpp - ASTConsumer for consuming model files --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements an ASTConsumer for consuming model files.
+///
+/// This ASTConsumer handles the AST of a parsed model file. All top level
+/// function definitions will be collected from that model file for later
+/// retrieval during the static analysis. The body of these functions will not
+/// be injected into the ASTUnit of the analyzed translation unit. It will be
+/// available through the BodyFarm which is utilized by the AnalysisDeclContext
+/// class.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Frontend/ModelConsumer.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclGroup.h"
+
+using namespace clang;
+using namespace ento;
+
+ModelConsumer::ModelConsumer(llvm::StringMap<Stmt *> &Bodies)
+ : Bodies(Bodies) {}
+
+bool ModelConsumer::HandleTopLevelDecl(DeclGroupRef D) {
+ for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) {
+
+ // Only interested in definitions.
+ const FunctionDecl *func = llvm::dyn_cast<FunctionDecl>(*I);
+ if (func && func->hasBody()) {
+ Bodies.insert(std::make_pair(func->getName(), func->getBody()));
+ }
+ }
+ return true;
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/ModelInjector.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/ModelInjector.cpp
new file mode 100644
index 0000000..ee2c3f5
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/ModelInjector.cpp
@@ -0,0 +1,117 @@
+//===-- ModelInjector.cpp ---------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ModelInjector.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Frontend/ASTUnit.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendAction.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Serialization/ASTReader.h"
+#include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/CrashRecoveryContext.h"
+#include "llvm/Support/FileSystem.h"
+#include <string>
+#include <utility>
+
+using namespace clang;
+using namespace ento;
+
+ModelInjector::ModelInjector(CompilerInstance &CI) : CI(CI) {}
+
+Stmt *ModelInjector::getBody(const FunctionDecl *D) {
+ onBodySynthesis(D);
+ return Bodies[D->getName()];
+}
+
+Stmt *ModelInjector::getBody(const ObjCMethodDecl *D) {
+ onBodySynthesis(D);
+ return Bodies[D->getName()];
+}
+
+void ModelInjector::onBodySynthesis(const NamedDecl *D) {
+
+ // FIXME: what about overloads? Declarations can be used as keys but what
+ // about file name index? Mangled names may not be suitable for that either.
+ if (Bodies.count(D->getName()) != 0)
+ return;
+
+ SourceManager &SM = CI.getSourceManager();
+ FileID mainFileID = SM.getMainFileID();
+
+ AnalyzerOptionsRef analyzerOpts = CI.getAnalyzerOpts();
+ llvm::StringRef modelPath = analyzerOpts->Config["model-path"];
+
+ llvm::SmallString<128> fileName;
+
+ if (!modelPath.empty())
+ fileName =
+ llvm::StringRef(modelPath.str() + "/" + D->getName().str() + ".model");
+ else
+ fileName = llvm::StringRef(D->getName().str() + ".model");
+
+ if (!llvm::sys::fs::exists(fileName.str())) {
+ Bodies[D->getName()] = nullptr;
+ return;
+ }
+
+ IntrusiveRefCntPtr<CompilerInvocation> Invocation(
+ new CompilerInvocation(CI.getInvocation()));
+
+ FrontendOptions &FrontendOpts = Invocation->getFrontendOpts();
+ InputKind IK = IK_CXX; // FIXME
+ FrontendOpts.Inputs.clear();
+ FrontendOpts.Inputs.emplace_back(fileName, IK);
+ FrontendOpts.DisableFree = true;
+
+ Invocation->getDiagnosticOpts().VerifyDiagnostics = 0;
+
+ // Modules are parsed by a separate CompilerInstance, so this code mimics that
+ // behavior for models
+ CompilerInstance Instance(CI.getPCHContainerOperations());
+ Instance.setInvocation(&*Invocation);
+ Instance.createDiagnostics(
+ new ForwardingDiagnosticConsumer(CI.getDiagnosticClient()),
+ /*ShouldOwnClient=*/true);
+
+ Instance.getDiagnostics().setSourceManager(&SM);
+
+ Instance.setVirtualFileSystem(&CI.getVirtualFileSystem());
+
+ // The instance wants to take ownership, however DisableFree frontend option
+ // is set to true to avoid double free issues
+ Instance.setFileManager(&CI.getFileManager());
+ Instance.setSourceManager(&SM);
+ Instance.setPreprocessor(&CI.getPreprocessor());
+ Instance.setASTContext(&CI.getASTContext());
+
+ Instance.getPreprocessor().InitializeForModelFile();
+
+ ParseModelFileAction parseModelFile(Bodies);
+
+ const unsigned ThreadStackSize = 8 << 20;
+ llvm::CrashRecoveryContext CRC;
+
+ CRC.RunSafelyOnThread([&]() { Instance.ExecuteAction(parseModelFile); },
+ ThreadStackSize);
+
+ Instance.getPreprocessor().FinalizeForModelFile();
+
+ Instance.resetAndLeakSourceManager();
+ Instance.resetAndLeakFileManager();
+ Instance.resetAndLeakPreprocessor();
+
+ // The preprocessor enters to the main file id when parsing is started, so
+ // the main file id is changed to the model file during parsing and it needs
+ // to be reseted to the former main file id after parsing of the model file
+ // is done.
+ SM.setMainFileID(mainFileID);
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/ModelInjector.h b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/ModelInjector.h
new file mode 100644
index 0000000..e23bf8a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/ModelInjector.h
@@ -0,0 +1,74 @@
+//===-- ModelInjector.h -----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file defines the clang::ento::ModelInjector class which implements the
+/// clang::CodeInjector interface. This class is responsible for injecting
+/// function definitions that were synthesized from model files.
+///
+/// Model files allow definitions of functions to be lazily constituted for functions
+/// which lack bodies in the original source code. This allows the analyzer
+/// to more precisely analyze code that calls such functions, analyzing the
+/// artificial definitions (which typically approximate the semantics of the
+/// called function) when called by client code. These definitions are
+/// reconstituted lazily, on-demand, by the static analyzer engine.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SA_FRONTEND_MODELINJECTOR_H
+#define LLVM_CLANG_SA_FRONTEND_MODELINJECTOR_H
+
+#include "clang/Analysis/CodeInjector.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/StringMap.h"
+#include <map>
+#include <memory>
+#include <vector>
+
+namespace clang {
+
+class CompilerInstance;
+class ASTUnit;
+class ASTReader;
+class NamedDecl;
+class Module;
+
+namespace ento {
+class ModelInjector : public CodeInjector {
+public:
+ ModelInjector(CompilerInstance &CI);
+ Stmt *getBody(const FunctionDecl *D) override;
+ Stmt *getBody(const ObjCMethodDecl *D) override;
+
+private:
+ /// \brief Synthesize a body for a declaration
+ ///
+ /// This method first looks up the appropriate model file based on the
+ /// model-path configuration option and the name of the declaration that is
+ /// looked up. If no model were synthesized yet for a function with that name
+ /// it will create a new compiler instance to parse the model file using the
+ /// ASTContext, Preprocessor, SourceManager of the original compiler instance.
+ /// The former resources are shared between the two compiler instance, so the
+ /// newly created instance have to "leak" these objects, since they are owned
+ /// by the original instance.
+ ///
+ /// The model-path should be either an absolute path or relative to the
+ /// working directory of the compiler.
+ void onBodySynthesis(const NamedDecl *D);
+
+ CompilerInstance &CI;
+
+ // FIXME: double memoization is redundant, with memoization both here and in
+ // BodyFarm.
+ llvm::StringMap<Stmt *> Bodies;
+};
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Tooling/ArgumentsAdjusters.cpp b/contrib/llvm/tools/clang/lib/Tooling/ArgumentsAdjusters.cpp
new file mode 100644
index 0000000..2f3d829
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Tooling/ArgumentsAdjusters.cpp
@@ -0,0 +1,86 @@
+//===--- ArgumentsAdjusters.cpp - Command line arguments adjuster ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains definitions of classes which implement ArgumentsAdjuster
+// interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/ArgumentsAdjusters.h"
+
+namespace clang {
+namespace tooling {
+
+/// Add -fsyntax-only option to the commnand line arguments.
+ArgumentsAdjuster getClangSyntaxOnlyAdjuster() {
+ return [](const CommandLineArguments &Args, StringRef /*unused*/) {
+ CommandLineArguments AdjustedArgs;
+ for (size_t i = 0, e = Args.size(); i != e; ++i) {
+ StringRef Arg = Args[i];
+ // FIXME: Remove options that generate output.
+ if (!Arg.startswith("-fcolor-diagnostics") &&
+ !Arg.startswith("-fdiagnostics-color"))
+ AdjustedArgs.push_back(Args[i]);
+ }
+ AdjustedArgs.push_back("-fsyntax-only");
+ return AdjustedArgs;
+ };
+}
+
+ArgumentsAdjuster getClangStripOutputAdjuster() {
+ return [](const CommandLineArguments &Args, StringRef /*unused*/) {
+ CommandLineArguments AdjustedArgs;
+ for (size_t i = 0, e = Args.size(); i < e; ++i) {
+ StringRef Arg = Args[i];
+ if (!Arg.startswith("-o"))
+ AdjustedArgs.push_back(Args[i]);
+
+ if (Arg == "-o") {
+ // Output is specified as -o foo. Skip the next argument also.
+ ++i;
+ }
+ // Else, the output is specified as -ofoo. Just do nothing.
+ }
+ return AdjustedArgs;
+ };
+}
+
+ArgumentsAdjuster getInsertArgumentAdjuster(const CommandLineArguments &Extra,
+ ArgumentInsertPosition Pos) {
+ return [Extra, Pos](const CommandLineArguments &Args, StringRef /*unused*/) {
+ CommandLineArguments Return(Args);
+
+ CommandLineArguments::iterator I;
+ if (Pos == ArgumentInsertPosition::END) {
+ I = Return.end();
+ } else {
+ I = Return.begin();
+ ++I; // To leave the program name in place
+ }
+
+ Return.insert(I, Extra.begin(), Extra.end());
+ return Return;
+ };
+}
+
+ArgumentsAdjuster getInsertArgumentAdjuster(const char *Extra,
+ ArgumentInsertPosition Pos) {
+ return getInsertArgumentAdjuster(CommandLineArguments(1, Extra), Pos);
+}
+
+ArgumentsAdjuster combineAdjusters(ArgumentsAdjuster First,
+ ArgumentsAdjuster Second) {
+ return [First, Second](const CommandLineArguments &Args, StringRef File) {
+ return Second(First(Args, File), File);
+ };
+}
+
+} // end namespace tooling
+} // end namespace clang
+
diff --git a/contrib/llvm/tools/clang/lib/Tooling/CommonOptionsParser.cpp b/contrib/llvm/tools/clang/lib/Tooling/CommonOptionsParser.cpp
new file mode 100644
index 0000000..82f5601
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Tooling/CommonOptionsParser.cpp
@@ -0,0 +1,149 @@
+//===--- CommonOptionsParser.cpp - common options for clang tools ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the CommonOptionsParser class used to parse common
+// command-line options for clang tools, so that they can be run as separate
+// command-line applications with a consistent common interface for handling
+// compilation database and input files.
+//
+// It provides a common subset of command-line options, common algorithm
+// for locating a compilation database and source files, and help messages
+// for the basic command-line interface.
+//
+// It creates a CompilationDatabase and reads common command-line options.
+//
+// This class uses the Clang Tooling infrastructure, see
+// http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html
+// for details on setting it up with LLVM source tree.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/CommandLine.h"
+#include "clang/Tooling/ArgumentsAdjusters.h"
+#include "clang/Tooling/CommonOptionsParser.h"
+#include "clang/Tooling/Tooling.h"
+
+using namespace clang::tooling;
+using namespace llvm;
+
+const char *const CommonOptionsParser::HelpMessage =
+ "\n"
+ "-p <build-path> is used to read a compile command database.\n"
+ "\n"
+ "\tFor example, it can be a CMake build directory in which a file named\n"
+ "\tcompile_commands.json exists (use -DCMAKE_EXPORT_COMPILE_COMMANDS=ON\n"
+ "\tCMake option to get this output). When no build path is specified,\n"
+ "\ta search for compile_commands.json will be attempted through all\n"
+ "\tparent paths of the first input file . See:\n"
+ "\thttp://clang.llvm.org/docs/HowToSetupToolingForLLVM.html for an\n"
+ "\texample of setting up Clang Tooling on a source tree.\n"
+ "\n"
+ "<source0> ... specify the paths of source files. These paths are\n"
+ "\tlooked up in the compile command database. If the path of a file is\n"
+ "\tabsolute, it needs to point into CMake's source tree. If the path is\n"
+ "\trelative, the current working directory needs to be in the CMake\n"
+ "\tsource tree and the file must be in a subdirectory of the current\n"
+ "\tworking directory. \"./\" prefixes in the relative files will be\n"
+ "\tautomatically removed, but the rest of a relative path must be a\n"
+ "\tsuffix of a path in the compile command database.\n"
+ "\n";
+
+namespace {
+class ArgumentsAdjustingCompilations : public CompilationDatabase {
+public:
+ ArgumentsAdjustingCompilations(
+ std::unique_ptr<CompilationDatabase> Compilations)
+ : Compilations(std::move(Compilations)) {}
+
+ void appendArgumentsAdjuster(ArgumentsAdjuster Adjuster) {
+ Adjusters.push_back(Adjuster);
+ }
+
+ std::vector<CompileCommand>
+ getCompileCommands(StringRef FilePath) const override {
+ return adjustCommands(Compilations->getCompileCommands(FilePath));
+ }
+
+ std::vector<std::string> getAllFiles() const override {
+ return Compilations->getAllFiles();
+ }
+
+ std::vector<CompileCommand> getAllCompileCommands() const override {
+ return adjustCommands(Compilations->getAllCompileCommands());
+ }
+
+private:
+ std::unique_ptr<CompilationDatabase> Compilations;
+ std::vector<ArgumentsAdjuster> Adjusters;
+
+ std::vector<CompileCommand>
+ adjustCommands(std::vector<CompileCommand> Commands) const {
+ for (CompileCommand &Command : Commands)
+ for (const auto &Adjuster : Adjusters)
+ Command.CommandLine = Adjuster(Command.CommandLine, Command.Filename);
+ return Commands;
+ }
+};
+} // namespace
+
+CommonOptionsParser::CommonOptionsParser(
+ int &argc, const char **argv, cl::OptionCategory &Category,
+ llvm::cl::NumOccurrencesFlag OccurrencesFlag, const char *Overview) {
+ static cl::opt<bool> Help("h", cl::desc("Alias for -help"), cl::Hidden);
+
+ static cl::opt<std::string> BuildPath("p", cl::desc("Build path"),
+ cl::Optional, cl::cat(Category));
+
+ static cl::list<std::string> SourcePaths(
+ cl::Positional, cl::desc("<source0> [... <sourceN>]"), OccurrencesFlag,
+ cl::cat(Category));
+
+ static cl::list<std::string> ArgsAfter(
+ "extra-arg",
+ cl::desc("Additional argument to append to the compiler command line"),
+ cl::cat(Category));
+
+ static cl::list<std::string> ArgsBefore(
+ "extra-arg-before",
+ cl::desc("Additional argument to prepend to the compiler command line"),
+ cl::cat(Category));
+
+ cl::HideUnrelatedOptions(Category);
+
+ Compilations.reset(FixedCompilationDatabase::loadFromCommandLine(argc, argv));
+ cl::ParseCommandLineOptions(argc, argv, Overview);
+ SourcePathList = SourcePaths;
+ if ((OccurrencesFlag == cl::ZeroOrMore || OccurrencesFlag == cl::Optional) &&
+ SourcePathList.empty())
+ return;
+ if (!Compilations) {
+ std::string ErrorMessage;
+ if (!BuildPath.empty()) {
+ Compilations =
+ CompilationDatabase::autoDetectFromDirectory(BuildPath, ErrorMessage);
+ } else {
+ Compilations = CompilationDatabase::autoDetectFromSource(SourcePaths[0],
+ ErrorMessage);
+ }
+ if (!Compilations) {
+ llvm::errs() << "Error while trying to load a compilation database:\n"
+ << ErrorMessage << "Running without flags.\n";
+ Compilations.reset(
+ new FixedCompilationDatabase(".", std::vector<std::string>()));
+ }
+ }
+ auto AdjustingCompilations =
+ llvm::make_unique<ArgumentsAdjustingCompilations>(
+ std::move(Compilations));
+ AdjustingCompilations->appendArgumentsAdjuster(
+ getInsertArgumentAdjuster(ArgsBefore, ArgumentInsertPosition::BEGIN));
+ AdjustingCompilations->appendArgumentsAdjuster(
+ getInsertArgumentAdjuster(ArgsAfter, ArgumentInsertPosition::END));
+ Compilations = std::move(AdjustingCompilations);
+}
diff --git a/contrib/llvm/tools/clang/lib/Tooling/CompilationDatabase.cpp b/contrib/llvm/tools/clang/lib/Tooling/CompilationDatabase.cpp
new file mode 100644
index 0000000..957e401
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Tooling/CompilationDatabase.cpp
@@ -0,0 +1,333 @@
+//===--- CompilationDatabase.cpp - ----------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains implementations of the CompilationDatabase base class
+// and the FixedCompilationDatabase.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/CompilationDatabase.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Driver/Action.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Job.h"
+#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "clang/Tooling/CompilationDatabasePluginRegistry.h"
+#include "clang/Tooling/Tooling.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Option/Arg.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/Path.h"
+#include <sstream>
+#include <system_error>
+using namespace clang;
+using namespace tooling;
+
+CompilationDatabase::~CompilationDatabase() {}
+
+std::unique_ptr<CompilationDatabase>
+CompilationDatabase::loadFromDirectory(StringRef BuildDirectory,
+ std::string &ErrorMessage) {
+ std::stringstream ErrorStream;
+ for (CompilationDatabasePluginRegistry::iterator
+ It = CompilationDatabasePluginRegistry::begin(),
+ Ie = CompilationDatabasePluginRegistry::end();
+ It != Ie; ++It) {
+ std::string DatabaseErrorMessage;
+ std::unique_ptr<CompilationDatabasePlugin> Plugin(It->instantiate());
+ if (std::unique_ptr<CompilationDatabase> DB =
+ Plugin->loadFromDirectory(BuildDirectory, DatabaseErrorMessage))
+ return DB;
+ ErrorStream << It->getName() << ": " << DatabaseErrorMessage << "\n";
+ }
+ ErrorMessage = ErrorStream.str();
+ return nullptr;
+}
+
+static std::unique_ptr<CompilationDatabase>
+findCompilationDatabaseFromDirectory(StringRef Directory,
+ std::string &ErrorMessage) {
+ std::stringstream ErrorStream;
+ bool HasErrorMessage = false;
+ while (!Directory.empty()) {
+ std::string LoadErrorMessage;
+
+ if (std::unique_ptr<CompilationDatabase> DB =
+ CompilationDatabase::loadFromDirectory(Directory, LoadErrorMessage))
+ return DB;
+
+ if (!HasErrorMessage) {
+ ErrorStream << "No compilation database found in " << Directory.str()
+ << " or any parent directory\n" << LoadErrorMessage;
+ HasErrorMessage = true;
+ }
+
+ Directory = llvm::sys::path::parent_path(Directory);
+ }
+ ErrorMessage = ErrorStream.str();
+ return nullptr;
+}
+
+std::unique_ptr<CompilationDatabase>
+CompilationDatabase::autoDetectFromSource(StringRef SourceFile,
+ std::string &ErrorMessage) {
+ SmallString<1024> AbsolutePath(getAbsolutePath(SourceFile));
+ StringRef Directory = llvm::sys::path::parent_path(AbsolutePath);
+
+ std::unique_ptr<CompilationDatabase> DB =
+ findCompilationDatabaseFromDirectory(Directory, ErrorMessage);
+
+ if (!DB)
+ ErrorMessage = ("Could not auto-detect compilation database for file \"" +
+ SourceFile + "\"\n" + ErrorMessage).str();
+ return DB;
+}
+
+std::unique_ptr<CompilationDatabase>
+CompilationDatabase::autoDetectFromDirectory(StringRef SourceDir,
+ std::string &ErrorMessage) {
+ SmallString<1024> AbsolutePath(getAbsolutePath(SourceDir));
+
+ std::unique_ptr<CompilationDatabase> DB =
+ findCompilationDatabaseFromDirectory(AbsolutePath, ErrorMessage);
+
+ if (!DB)
+ ErrorMessage = ("Could not auto-detect compilation database from directory \"" +
+ SourceDir + "\"\n" + ErrorMessage).str();
+ return DB;
+}
+
+CompilationDatabasePlugin::~CompilationDatabasePlugin() {}
+
+namespace {
+// Helper for recursively searching through a chain of actions and collecting
+// all inputs, direct and indirect, of compile jobs.
+struct CompileJobAnalyzer {
+ void run(const driver::Action *A) {
+ runImpl(A, false);
+ }
+
+ SmallVector<std::string, 2> Inputs;
+
+private:
+
+ void runImpl(const driver::Action *A, bool Collect) {
+ bool CollectChildren = Collect;
+ switch (A->getKind()) {
+ case driver::Action::CompileJobClass:
+ CollectChildren = true;
+ break;
+
+ case driver::Action::InputClass: {
+ if (Collect) {
+ const driver::InputAction *IA = cast<driver::InputAction>(A);
+ Inputs.push_back(IA->getInputArg().getSpelling());
+ }
+ } break;
+
+ default:
+ // Don't care about others
+ ;
+ }
+
+ for (driver::ActionList::const_iterator I = A->begin(), E = A->end();
+ I != E; ++I)
+ runImpl(*I, CollectChildren);
+ }
+};
+
+// Special DiagnosticConsumer that looks for warn_drv_input_file_unused
+// diagnostics from the driver and collects the option strings for those unused
+// options.
+class UnusedInputDiagConsumer : public DiagnosticConsumer {
+public:
+ UnusedInputDiagConsumer() : Other(nullptr) {}
+
+ // Useful for debugging, chain diagnostics to another consumer after
+ // recording for our own purposes.
+ UnusedInputDiagConsumer(DiagnosticConsumer *Other) : Other(Other) {}
+
+ void HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
+ const Diagnostic &Info) override {
+ if (Info.getID() == clang::diag::warn_drv_input_file_unused) {
+ // Arg 1 for this diagnostic is the option that didn't get used.
+ UnusedInputs.push_back(Info.getArgStdStr(0));
+ }
+ if (Other)
+ Other->HandleDiagnostic(DiagLevel, Info);
+ }
+
+ DiagnosticConsumer *Other;
+ SmallVector<std::string, 2> UnusedInputs;
+};
+
+// Unary functor for asking "Given a StringRef S1, does there exist a string
+// S2 in Arr where S1 == S2?"
+struct MatchesAny {
+ MatchesAny(ArrayRef<std::string> Arr) : Arr(Arr) {}
+ bool operator() (StringRef S) {
+ for (const std::string *I = Arr.begin(), *E = Arr.end(); I != E; ++I)
+ if (*I == S)
+ return true;
+ return false;
+ }
+private:
+ ArrayRef<std::string> Arr;
+};
+} // namespace
+
+/// \brief Strips any positional args and possible argv[0] from a command-line
+/// provided by the user to construct a FixedCompilationDatabase.
+///
+/// FixedCompilationDatabase requires a command line to be in this format as it
+/// constructs the command line for each file by appending the name of the file
+/// to be compiled. FixedCompilationDatabase also adds its own argv[0] to the
+/// start of the command line although its value is not important as it's just
+/// ignored by the Driver invoked by the ClangTool using the
+/// FixedCompilationDatabase.
+///
+/// FIXME: This functionality should probably be made available by
+/// clang::driver::Driver although what the interface should look like is not
+/// clear.
+///
+/// \param[in] Args Args as provided by the user.
+/// \return Resulting stripped command line.
+/// \li true if successful.
+/// \li false if \c Args cannot be used for compilation jobs (e.g.
+/// contains an option like -E or -version).
+static bool stripPositionalArgs(std::vector<const char *> Args,
+ std::vector<std::string> &Result) {
+ IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts = new DiagnosticOptions();
+ UnusedInputDiagConsumer DiagClient;
+ DiagnosticsEngine Diagnostics(
+ IntrusiveRefCntPtr<clang::DiagnosticIDs>(new DiagnosticIDs()),
+ &*DiagOpts, &DiagClient, false);
+
+ // The clang executable path isn't required since the jobs the driver builds
+ // will not be executed.
+ std::unique_ptr<driver::Driver> NewDriver(new driver::Driver(
+ /* ClangExecutable= */ "", llvm::sys::getDefaultTargetTriple(),
+ Diagnostics));
+ NewDriver->setCheckInputsExist(false);
+
+ // This becomes the new argv[0]. The value is actually not important as it
+ // isn't used for invoking Tools.
+ Args.insert(Args.begin(), "clang-tool");
+
+ // By adding -c, we force the driver to treat compilation as the last phase.
+ // It will then issue warnings via Diagnostics about un-used options that
+ // would have been used for linking. If the user provided a compiler name as
+ // the original argv[0], this will be treated as a linker input thanks to
+ // insertng a new argv[0] above. All un-used options get collected by
+ // UnusedInputdiagConsumer and get stripped out later.
+ Args.push_back("-c");
+
+ // Put a dummy C++ file on to ensure there's at least one compile job for the
+ // driver to construct. If the user specified some other argument that
+ // prevents compilation, e.g. -E or something like -version, we may still end
+ // up with no jobs but then this is the user's fault.
+ Args.push_back("placeholder.cpp");
+
+ // Remove -no-integrated-as; it's not used for syntax checking,
+ // and it confuses targets which don't support this option.
+ Args.erase(std::remove_if(Args.begin(), Args.end(),
+ MatchesAny(std::string("-no-integrated-as"))),
+ Args.end());
+
+ const std::unique_ptr<driver::Compilation> Compilation(
+ NewDriver->BuildCompilation(Args));
+
+ const driver::JobList &Jobs = Compilation->getJobs();
+
+ CompileJobAnalyzer CompileAnalyzer;
+
+ for (const auto &Cmd : Jobs) {
+ // Collect only for Assemble jobs. If we do all jobs we get duplicates
+ // since Link jobs point to Assemble jobs as inputs.
+ if (Cmd.getSource().getKind() == driver::Action::AssembleJobClass)
+ CompileAnalyzer.run(&Cmd.getSource());
+ }
+
+ if (CompileAnalyzer.Inputs.empty()) {
+ // No compile jobs found.
+ // FIXME: Emit a warning of some kind?
+ return false;
+ }
+
+ // Remove all compilation input files from the command line. This is
+ // necessary so that getCompileCommands() can construct a command line for
+ // each file.
+ std::vector<const char *>::iterator End = std::remove_if(
+ Args.begin(), Args.end(), MatchesAny(CompileAnalyzer.Inputs));
+
+ // Remove all inputs deemed unused for compilation.
+ End = std::remove_if(Args.begin(), End, MatchesAny(DiagClient.UnusedInputs));
+
+ // Remove the -c add above as well. It will be at the end right now.
+ assert(strcmp(*(End - 1), "-c") == 0);
+ --End;
+
+ Result = std::vector<std::string>(Args.begin() + 1, End);
+ return true;
+}
+
+FixedCompilationDatabase *FixedCompilationDatabase::loadFromCommandLine(
+ int &Argc, const char *const *Argv, Twine Directory) {
+ const char *const *DoubleDash = std::find(Argv, Argv + Argc, StringRef("--"));
+ if (DoubleDash == Argv + Argc)
+ return nullptr;
+ std::vector<const char *> CommandLine(DoubleDash + 1, Argv + Argc);
+ Argc = DoubleDash - Argv;
+
+ std::vector<std::string> StrippedArgs;
+ if (!stripPositionalArgs(CommandLine, StrippedArgs))
+ return nullptr;
+ return new FixedCompilationDatabase(Directory, StrippedArgs);
+}
+
+FixedCompilationDatabase::
+FixedCompilationDatabase(Twine Directory, ArrayRef<std::string> CommandLine) {
+ std::vector<std::string> ToolCommandLine(1, "clang-tool");
+ ToolCommandLine.insert(ToolCommandLine.end(),
+ CommandLine.begin(), CommandLine.end());
+ CompileCommands.emplace_back(Directory, StringRef(),
+ std::move(ToolCommandLine));
+}
+
+std::vector<CompileCommand>
+FixedCompilationDatabase::getCompileCommands(StringRef FilePath) const {
+ std::vector<CompileCommand> Result(CompileCommands);
+ Result[0].CommandLine.push_back(FilePath);
+ Result[0].Filename = FilePath;
+ return Result;
+}
+
+std::vector<std::string>
+FixedCompilationDatabase::getAllFiles() const {
+ return std::vector<std::string>();
+}
+
+std::vector<CompileCommand>
+FixedCompilationDatabase::getAllCompileCommands() const {
+ return std::vector<CompileCommand>();
+}
+
+namespace clang {
+namespace tooling {
+
+// This anchor is used to force the linker to link in the generated object file
+// and thus register the JSONCompilationDatabasePlugin.
+extern volatile int JSONAnchorSource;
+static int LLVM_ATTRIBUTE_UNUSED JSONAnchorDest = JSONAnchorSource;
+
+} // end namespace tooling
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/Tooling/Core/Lookup.cpp b/contrib/llvm/tools/clang/lib/Tooling/Core/Lookup.cpp
new file mode 100644
index 0000000..697eeb4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Tooling/Core/Lookup.cpp
@@ -0,0 +1,113 @@
+//===--- Lookup.cpp - Framework for clang refactoring tools ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines helper methods for clang tools performing name lookup.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/Core/Lookup.h"
+#include "clang/AST/Decl.h"
+using namespace clang;
+using namespace clang::tooling;
+
+static bool isInsideDifferentNamespaceWithSameName(const DeclContext *DeclA,
+ const DeclContext *DeclB) {
+ while (true) {
+ // Look past non-namespaces on DeclA.
+ while (DeclA && !isa<NamespaceDecl>(DeclA))
+ DeclA = DeclA->getParent();
+
+ // Look past non-namespaces on DeclB.
+ while (DeclB && !isa<NamespaceDecl>(DeclB))
+ DeclB = DeclB->getParent();
+
+ // We hit the root, no namespace collision.
+ if (!DeclA || !DeclB)
+ return false;
+
+ // Literally the same namespace, not a collision.
+ if (DeclA == DeclB)
+ return false;
+
+ // Now check the names. If they match we have a different namespace with the
+ // same name.
+ if (cast<NamespaceDecl>(DeclA)->getDeclName() ==
+ cast<NamespaceDecl>(DeclB)->getDeclName())
+ return true;
+
+ DeclA = DeclA->getParent();
+ DeclB = DeclB->getParent();
+ }
+}
+
+static StringRef getBestNamespaceSubstr(const DeclContext *DeclA,
+ StringRef NewName,
+ bool HadLeadingColonColon) {
+ while (true) {
+ while (DeclA && !isa<NamespaceDecl>(DeclA))
+ DeclA = DeclA->getParent();
+
+ // Fully qualified it is! Leave :: in place if it's there already.
+ if (!DeclA)
+ return HadLeadingColonColon ? NewName : NewName.substr(2);
+
+ // Otherwise strip off redundant namespace qualifications from the new name.
+ // We use the fully qualified name of the namespace and remove that part
+ // from NewName if it has an identical prefix.
+ std::string NS =
+ "::" + cast<NamespaceDecl>(DeclA)->getQualifiedNameAsString() + "::";
+ if (NewName.startswith(NS))
+ return NewName.substr(NS.size());
+
+ // No match yet. Strip of a namespace from the end of the chain and try
+ // again. This allows to get optimal qualifications even if the old and new
+ // decl only share common namespaces at a higher level.
+ DeclA = DeclA->getParent();
+ }
+}
+
+/// Check if the name specifier begins with a written "::".
+static bool isFullyQualified(const NestedNameSpecifier *NNS) {
+ while (NNS) {
+ if (NNS->getKind() == NestedNameSpecifier::Global)
+ return true;
+ NNS = NNS->getPrefix();
+ }
+ return false;
+}
+
+std::string tooling::replaceNestedName(const NestedNameSpecifier *Use,
+ const DeclContext *UseContext,
+ const NamedDecl *FromDecl,
+ StringRef ReplacementString) {
+ assert(ReplacementString.startswith("::") &&
+ "Expected fully-qualified name!");
+
+ // We can do a raw name replacement when we are not inside the namespace for
+ // the original function and it is not in the global namespace. The
+ // assumption is that outside the original namespace we must have a using
+ // statement that makes this work out and that other parts of this refactor
+ // will automatically fix using statements to point to the new function
+ const bool class_name_only = !Use;
+ const bool in_global_namespace =
+ isa<TranslationUnitDecl>(FromDecl->getDeclContext());
+ if (class_name_only && !in_global_namespace &&
+ !isInsideDifferentNamespaceWithSameName(FromDecl->getDeclContext(),
+ UseContext)) {
+ auto Pos = ReplacementString.rfind("::");
+ return Pos != StringRef::npos ? ReplacementString.substr(Pos + 2)
+ : ReplacementString;
+ }
+ // We did not match this because of a using statement, so we will need to
+ // figure out how good a namespace match we have with our destination type.
+ // We work backwards (from most specific possible namespace to least
+ // specific).
+ return getBestNamespaceSubstr(UseContext, ReplacementString,
+ isFullyQualified(Use));
+}
diff --git a/contrib/llvm/tools/clang/lib/Tooling/Core/Replacement.cpp b/contrib/llvm/tools/clang/lib/Tooling/Core/Replacement.cpp
new file mode 100644
index 0000000..47bbdeb
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Tooling/Core/Replacement.cpp
@@ -0,0 +1,419 @@
+//===--- Replacement.cpp - Framework for clang refactoring tools ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements classes to support/store refactorings.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/DiagnosticIDs.h"
+#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Rewrite/Core/Rewriter.h"
+#include "clang/Tooling/Core/Replacement.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_os_ostream.h"
+
+namespace clang {
+namespace tooling {
+
+static const char * const InvalidLocation = "";
+
+Replacement::Replacement()
+ : FilePath(InvalidLocation) {}
+
+Replacement::Replacement(StringRef FilePath, unsigned Offset, unsigned Length,
+ StringRef ReplacementText)
+ : FilePath(FilePath), ReplacementRange(Offset, Length),
+ ReplacementText(ReplacementText) {}
+
+Replacement::Replacement(const SourceManager &Sources, SourceLocation Start,
+ unsigned Length, StringRef ReplacementText) {
+ setFromSourceLocation(Sources, Start, Length, ReplacementText);
+}
+
+Replacement::Replacement(const SourceManager &Sources,
+ const CharSourceRange &Range,
+ StringRef ReplacementText,
+ const LangOptions &LangOpts) {
+ setFromSourceRange(Sources, Range, ReplacementText, LangOpts);
+}
+
+bool Replacement::isApplicable() const {
+ return FilePath != InvalidLocation;
+}
+
+bool Replacement::apply(Rewriter &Rewrite) const {
+ SourceManager &SM = Rewrite.getSourceMgr();
+ const FileEntry *Entry = SM.getFileManager().getFile(FilePath);
+ if (!Entry)
+ return false;
+ FileID ID;
+ // FIXME: Use SM.translateFile directly.
+ SourceLocation Location = SM.translateFileLineCol(Entry, 1, 1);
+ ID = Location.isValid() ?
+ SM.getFileID(Location) :
+ SM.createFileID(Entry, SourceLocation(), SrcMgr::C_User);
+ // FIXME: We cannot check whether Offset + Length is in the file, as
+ // the remapping API is not public in the RewriteBuffer.
+ const SourceLocation Start =
+ SM.getLocForStartOfFile(ID).
+ getLocWithOffset(ReplacementRange.getOffset());
+ // ReplaceText returns false on success.
+ // ReplaceText only fails if the source location is not a file location, in
+ // which case we already returned false earlier.
+ bool RewriteSucceeded = !Rewrite.ReplaceText(
+ Start, ReplacementRange.getLength(), ReplacementText);
+ assert(RewriteSucceeded);
+ return RewriteSucceeded;
+}
+
+std::string Replacement::toString() const {
+ std::string Result;
+ llvm::raw_string_ostream Stream(Result);
+ Stream << FilePath << ": " << ReplacementRange.getOffset() << ":+"
+ << ReplacementRange.getLength() << ":\"" << ReplacementText << "\"";
+ return Stream.str();
+}
+
+bool operator<(const Replacement &LHS, const Replacement &RHS) {
+ if (LHS.getOffset() != RHS.getOffset())
+ return LHS.getOffset() < RHS.getOffset();
+
+ // Apply longer replacements first, specifically so that deletions are
+ // executed before insertions. It is (hopefully) never the intention to
+ // delete parts of newly inserted code.
+ if (LHS.getLength() != RHS.getLength())
+ return LHS.getLength() > RHS.getLength();
+
+ if (LHS.getFilePath() != RHS.getFilePath())
+ return LHS.getFilePath() < RHS.getFilePath();
+ return LHS.getReplacementText() < RHS.getReplacementText();
+}
+
+bool operator==(const Replacement &LHS, const Replacement &RHS) {
+ return LHS.getOffset() == RHS.getOffset() &&
+ LHS.getLength() == RHS.getLength() &&
+ LHS.getFilePath() == RHS.getFilePath() &&
+ LHS.getReplacementText() == RHS.getReplacementText();
+}
+
+void Replacement::setFromSourceLocation(const SourceManager &Sources,
+ SourceLocation Start, unsigned Length,
+ StringRef ReplacementText) {
+ const std::pair<FileID, unsigned> DecomposedLocation =
+ Sources.getDecomposedLoc(Start);
+ const FileEntry *Entry = Sources.getFileEntryForID(DecomposedLocation.first);
+ this->FilePath = Entry ? Entry->getName() : InvalidLocation;
+ this->ReplacementRange = Range(DecomposedLocation.second, Length);
+ this->ReplacementText = ReplacementText;
+}
+
+// FIXME: This should go into the Lexer, but we need to figure out how
+// to handle ranges for refactoring in general first - there is no obvious
+// good way how to integrate this into the Lexer yet.
+static int getRangeSize(const SourceManager &Sources,
+ const CharSourceRange &Range,
+ const LangOptions &LangOpts) {
+ SourceLocation SpellingBegin = Sources.getSpellingLoc(Range.getBegin());
+ SourceLocation SpellingEnd = Sources.getSpellingLoc(Range.getEnd());
+ std::pair<FileID, unsigned> Start = Sources.getDecomposedLoc(SpellingBegin);
+ std::pair<FileID, unsigned> End = Sources.getDecomposedLoc(SpellingEnd);
+ if (Start.first != End.first) return -1;
+ if (Range.isTokenRange())
+ End.second += Lexer::MeasureTokenLength(SpellingEnd, Sources, LangOpts);
+ return End.second - Start.second;
+}
+
+void Replacement::setFromSourceRange(const SourceManager &Sources,
+ const CharSourceRange &Range,
+ StringRef ReplacementText,
+ const LangOptions &LangOpts) {
+ setFromSourceLocation(Sources, Sources.getSpellingLoc(Range.getBegin()),
+ getRangeSize(Sources, Range, LangOpts),
+ ReplacementText);
+}
+
+template <typename T>
+unsigned shiftedCodePositionInternal(const T &Replaces, unsigned Position) {
+ unsigned Offset = 0;
+ for (const auto& R : Replaces) {
+ if (R.getOffset() + R.getLength() <= Position) {
+ Offset += R.getReplacementText().size() - R.getLength();
+ continue;
+ }
+ if (R.getOffset() < Position &&
+ R.getOffset() + R.getReplacementText().size() <= Position) {
+ Position = R.getOffset() + R.getReplacementText().size() - 1;
+ }
+ break;
+ }
+ return Position + Offset;
+}
+
+unsigned shiftedCodePosition(const Replacements &Replaces, unsigned Position) {
+ return shiftedCodePositionInternal(Replaces, Position);
+}
+
+// FIXME: Remove this function when Replacements is implemented as std::vector
+// instead of std::set.
+unsigned shiftedCodePosition(const std::vector<Replacement> &Replaces,
+ unsigned Position) {
+ return shiftedCodePositionInternal(Replaces, Position);
+}
+
+void deduplicate(std::vector<Replacement> &Replaces,
+ std::vector<Range> &Conflicts) {
+ if (Replaces.empty())
+ return;
+
+ auto LessNoPath = [](const Replacement &LHS, const Replacement &RHS) {
+ if (LHS.getOffset() != RHS.getOffset())
+ return LHS.getOffset() < RHS.getOffset();
+ if (LHS.getLength() != RHS.getLength())
+ return LHS.getLength() < RHS.getLength();
+ return LHS.getReplacementText() < RHS.getReplacementText();
+ };
+
+ auto EqualNoPath = [](const Replacement &LHS, const Replacement &RHS) {
+ return LHS.getOffset() == RHS.getOffset() &&
+ LHS.getLength() == RHS.getLength() &&
+ LHS.getReplacementText() == RHS.getReplacementText();
+ };
+
+ // Deduplicate. We don't want to deduplicate based on the path as we assume
+ // that all replacements refer to the same file (or are symlinks).
+ std::sort(Replaces.begin(), Replaces.end(), LessNoPath);
+ Replaces.erase(std::unique(Replaces.begin(), Replaces.end(), EqualNoPath),
+ Replaces.end());
+
+ // Detect conflicts
+ Range ConflictRange(Replaces.front().getOffset(),
+ Replaces.front().getLength());
+ unsigned ConflictStart = 0;
+ unsigned ConflictLength = 1;
+ for (unsigned i = 1; i < Replaces.size(); ++i) {
+ Range Current(Replaces[i].getOffset(), Replaces[i].getLength());
+ if (ConflictRange.overlapsWith(Current)) {
+ // Extend conflicted range
+ ConflictRange = Range(ConflictRange.getOffset(),
+ std::max(ConflictRange.getLength(),
+ Current.getOffset() + Current.getLength() -
+ ConflictRange.getOffset()));
+ ++ConflictLength;
+ } else {
+ if (ConflictLength > 1)
+ Conflicts.push_back(Range(ConflictStart, ConflictLength));
+ ConflictRange = Current;
+ ConflictStart = i;
+ ConflictLength = 1;
+ }
+ }
+
+ if (ConflictLength > 1)
+ Conflicts.push_back(Range(ConflictStart, ConflictLength));
+}
+
+bool applyAllReplacements(const Replacements &Replaces, Rewriter &Rewrite) {
+ bool Result = true;
+ for (Replacements::const_iterator I = Replaces.begin(),
+ E = Replaces.end();
+ I != E; ++I) {
+ if (I->isApplicable()) {
+ Result = I->apply(Rewrite) && Result;
+ } else {
+ Result = false;
+ }
+ }
+ return Result;
+}
+
+// FIXME: Remove this function when Replacements is implemented as std::vector
+// instead of std::set.
+bool applyAllReplacements(const std::vector<Replacement> &Replaces,
+ Rewriter &Rewrite) {
+ bool Result = true;
+ for (std::vector<Replacement>::const_iterator I = Replaces.begin(),
+ E = Replaces.end();
+ I != E; ++I) {
+ if (I->isApplicable()) {
+ Result = I->apply(Rewrite) && Result;
+ } else {
+ Result = false;
+ }
+ }
+ return Result;
+}
+
+std::string applyAllReplacements(StringRef Code, const Replacements &Replaces) {
+ IntrusiveRefCntPtr<vfs::InMemoryFileSystem> InMemoryFileSystem(
+ new vfs::InMemoryFileSystem);
+ FileManager Files(FileSystemOptions(), InMemoryFileSystem);
+ DiagnosticsEngine Diagnostics(
+ IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs),
+ new DiagnosticOptions);
+ SourceManager SourceMgr(Diagnostics, Files);
+ Rewriter Rewrite(SourceMgr, LangOptions());
+ InMemoryFileSystem->addFile(
+ "<stdin>", 0, llvm::MemoryBuffer::getMemBuffer(Code, "<stdin>"));
+ FileID ID = SourceMgr.createFileID(Files.getFile("<stdin>"), SourceLocation(),
+ clang::SrcMgr::C_User);
+ for (Replacements::const_iterator I = Replaces.begin(), E = Replaces.end();
+ I != E; ++I) {
+ Replacement Replace("<stdin>", I->getOffset(), I->getLength(),
+ I->getReplacementText());
+ if (!Replace.apply(Rewrite))
+ return "";
+ }
+ std::string Result;
+ llvm::raw_string_ostream OS(Result);
+ Rewrite.getEditBuffer(ID).write(OS);
+ OS.flush();
+ return Result;
+}
+
+namespace {
+// Represents a merged replacement, i.e. a replacement consisting of multiple
+// overlapping replacements from 'First' and 'Second' in mergeReplacements.
+//
+// Position projection:
+// Offsets and lengths of the replacements can generally refer to two different
+// coordinate spaces. Replacements from 'First' refer to the original text
+// whereas replacements from 'Second' refer to the text after applying 'First'.
+//
+// MergedReplacement always operates in the coordinate space of the original
+// text, i.e. transforms elements from 'Second' to take into account what was
+// changed based on the elements from 'First'.
+//
+// We can correctly calculate this projection as we look at the replacements in
+// order of strictly increasing offsets.
+//
+// Invariants:
+// * We always merge elements from 'First' into elements from 'Second' and vice
+// versa. Within each set, the replacements are non-overlapping.
+// * We only extend to the right, i.e. merge elements with strictly increasing
+// offsets.
+class MergedReplacement {
+public:
+ MergedReplacement(const Replacement &R, bool MergeSecond, int D)
+ : MergeSecond(MergeSecond), Delta(D), FilePath(R.getFilePath()),
+ Offset(R.getOffset() + (MergeSecond ? 0 : Delta)), Length(R.getLength()),
+ Text(R.getReplacementText()) {
+ Delta += MergeSecond ? 0 : Text.size() - Length;
+ DeltaFirst = MergeSecond ? Text.size() - Length : 0;
+ }
+
+ // Merges the next element 'R' into this merged element. As we always merge
+ // from 'First' into 'Second' or vice versa, the MergedReplacement knows what
+ // set the next element is coming from.
+ void merge(const Replacement &R) {
+ if (MergeSecond) {
+ unsigned REnd = R.getOffset() + Delta + R.getLength();
+ unsigned End = Offset + Text.size();
+ if (REnd > End) {
+ Length += REnd - End;
+ MergeSecond = false;
+ }
+ StringRef TextRef = Text;
+ StringRef Head = TextRef.substr(0, R.getOffset() + Delta - Offset);
+ StringRef Tail = TextRef.substr(REnd - Offset);
+ Text = (Head + R.getReplacementText() + Tail).str();
+ Delta += R.getReplacementText().size() - R.getLength();
+ } else {
+ unsigned End = Offset + Length;
+ StringRef RText = R.getReplacementText();
+ StringRef Tail = RText.substr(End - R.getOffset());
+ Text = (Text + Tail).str();
+ if (R.getOffset() + RText.size() > End) {
+ Length = R.getOffset() + R.getLength() - Offset;
+ MergeSecond = true;
+ } else {
+ Length += R.getLength() - RText.size();
+ }
+ DeltaFirst += RText.size() - R.getLength();
+ }
+ }
+
+ // Returns 'true' if 'R' starts strictly after the MergedReplacement and thus
+ // doesn't need to be merged.
+ bool endsBefore(const Replacement &R) const {
+ if (MergeSecond)
+ return Offset + Text.size() < R.getOffset() + Delta;
+ return Offset + Length < R.getOffset();
+ }
+
+ // Returns 'true' if an element from the second set should be merged next.
+ bool mergeSecond() const { return MergeSecond; }
+ int deltaFirst() const { return DeltaFirst; }
+ Replacement asReplacement() const { return {FilePath, Offset, Length, Text}; }
+
+private:
+ bool MergeSecond;
+
+ // Amount of characters that elements from 'Second' need to be shifted by in
+ // order to refer to the original text.
+ int Delta;
+
+ // Sum of all deltas (text-length - length) of elements from 'First' merged
+ // into this element. This is used to update 'Delta' once the
+ // MergedReplacement is completed.
+ int DeltaFirst;
+
+ // Data of the actually merged replacement. FilePath and Offset aren't changed
+ // as the element is only extended to the right.
+ const StringRef FilePath;
+ const unsigned Offset;
+ unsigned Length;
+ std::string Text;
+};
+} // namespace
+
+Replacements mergeReplacements(const Replacements &First,
+ const Replacements &Second) {
+ if (First.empty() || Second.empty())
+ return First.empty() ? Second : First;
+
+ // Delta is the amount of characters that replacements from 'Second' need to
+ // be shifted so that their offsets refer to the original text.
+ int Delta = 0;
+ Replacements Result;
+
+ // Iterate over both sets and always add the next element (smallest total
+ // Offset) from either 'First' or 'Second'. Merge that element with
+ // subsequent replacements as long as they overlap. See more details in the
+ // comment on MergedReplacement.
+ for (auto FirstI = First.begin(), SecondI = Second.begin();
+ FirstI != First.end() || SecondI != Second.end();) {
+ bool NextIsFirst = SecondI == Second.end() ||
+ (FirstI != First.end() &&
+ FirstI->getOffset() < SecondI->getOffset() + Delta);
+ MergedReplacement Merged(NextIsFirst ? *FirstI : *SecondI, NextIsFirst,
+ Delta);
+ ++(NextIsFirst ? FirstI : SecondI);
+
+ while ((Merged.mergeSecond() && SecondI != Second.end()) ||
+ (!Merged.mergeSecond() && FirstI != First.end())) {
+ auto &I = Merged.mergeSecond() ? SecondI : FirstI;
+ if (Merged.endsBefore(*I))
+ break;
+ Merged.merge(*I);
+ ++I;
+ }
+ Delta -= Merged.deltaFirst();
+ Result.insert(Merged.asReplacement());
+ }
+ return Result;
+}
+
+} // end namespace tooling
+} // end namespace clang
+
diff --git a/contrib/llvm/tools/clang/lib/Tooling/FileMatchTrie.cpp b/contrib/llvm/tools/clang/lib/Tooling/FileMatchTrie.cpp
new file mode 100644
index 0000000..86ed036
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Tooling/FileMatchTrie.cpp
@@ -0,0 +1,189 @@
+//===--- FileMatchTrie.cpp - ----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the implementation of a FileMatchTrie.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/FileMatchTrie.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+#include <sstream>
+using namespace clang;
+using namespace tooling;
+
+namespace {
+/// \brief Default \c PathComparator using \c llvm::sys::fs::equivalent().
+struct DefaultPathComparator : public PathComparator {
+ bool equivalent(StringRef FileA, StringRef FileB) const override {
+ return FileA == FileB || llvm::sys::fs::equivalent(FileA, FileB);
+ }
+};
+}
+
+namespace clang {
+namespace tooling {
+/// \brief A node of the \c FileMatchTrie.
+///
+/// Each node has storage for up to one path and a map mapping a path segment to
+/// child nodes. The trie starts with an empty root node.
+class FileMatchTrieNode {
+public:
+ /// \brief Inserts 'NewPath' into this trie. \c ConsumedLength denotes
+ /// the number of \c NewPath's trailing characters already consumed during
+ /// recursion.
+ ///
+ /// An insert of a path
+ /// 'p'starts at the root node and does the following:
+ /// - If the node is empty, insert 'p' into its storage and abort.
+ /// - If the node has a path 'p2' but no children, take the last path segment
+ /// 's' of 'p2', put a new child into the map at 's' an insert the rest of
+ /// 'p2' there.
+ /// - Insert a new child for the last segment of 'p' and insert the rest of
+ /// 'p' there.
+ ///
+ /// An insert operation is linear in the number of a path's segments.
+ void insert(StringRef NewPath, unsigned ConsumedLength = 0) {
+ // We cannot put relative paths into the FileMatchTrie as then a path can be
+ // a postfix of another path, violating a core assumption of the trie.
+ if (llvm::sys::path::is_relative(NewPath))
+ return;
+ if (Path.empty()) {
+ // This is an empty leaf. Store NewPath and return.
+ Path = NewPath;
+ return;
+ }
+ if (Children.empty()) {
+ // This is a leaf, ignore duplicate entry if 'Path' equals 'NewPath'.
+ if (NewPath == Path)
+ return;
+ // Make this a node and create a child-leaf with 'Path'.
+ StringRef Element(llvm::sys::path::filename(
+ StringRef(Path).drop_back(ConsumedLength)));
+ Children[Element].Path = Path;
+ }
+ StringRef Element(llvm::sys::path::filename(
+ StringRef(NewPath).drop_back(ConsumedLength)));
+ Children[Element].insert(NewPath, ConsumedLength + Element.size() + 1);
+ }
+
+ /// \brief Tries to find the node under this \c FileMatchTrieNode that best
+ /// matches 'FileName'.
+ ///
+ /// If multiple paths fit 'FileName' equally well, \c IsAmbiguous is set to
+ /// \c true and an empty string is returned. If no path fits 'FileName', an
+ /// empty string is returned. \c ConsumedLength denotes the number of
+ /// \c Filename's trailing characters already consumed during recursion.
+ ///
+ /// To find the best matching node for a given path 'p', the
+ /// \c findEquivalent() function is called recursively for each path segment
+ /// (back to fron) of 'p' until a node 'n' is reached that does not ..
+ /// - .. have children. In this case it is checked
+ /// whether the stored path is equivalent to 'p'. If yes, the best match is
+ /// found. Otherwise continue with the parent node as if this node did not
+ /// exist.
+ /// - .. a child matching the next path segment. In this case, all children of
+ /// 'n' are an equally good match for 'p'. All children are of 'n' are found
+ /// recursively and their equivalence to 'p' is determined. If none are
+ /// equivalent, continue with the parent node as if 'n' didn't exist. If one
+ /// is equivalent, the best match is found. Otherwise, report and ambigiuity
+ /// error.
+ StringRef findEquivalent(const PathComparator& Comparator,
+ StringRef FileName,
+ bool &IsAmbiguous,
+ unsigned ConsumedLength = 0) const {
+ if (Children.empty()) {
+ if (Comparator.equivalent(StringRef(Path), FileName))
+ return StringRef(Path);
+ return StringRef();
+ }
+ StringRef Element(llvm::sys::path::filename(FileName.drop_back(
+ ConsumedLength)));
+ llvm::StringMap<FileMatchTrieNode>::const_iterator MatchingChild =
+ Children.find(Element);
+ if (MatchingChild != Children.end()) {
+ StringRef Result = MatchingChild->getValue().findEquivalent(
+ Comparator, FileName, IsAmbiguous,
+ ConsumedLength + Element.size() + 1);
+ if (!Result.empty() || IsAmbiguous)
+ return Result;
+ }
+ std::vector<StringRef> AllChildren;
+ getAll(AllChildren, MatchingChild);
+ StringRef Result;
+ for (unsigned i = 0; i < AllChildren.size(); i++) {
+ if (Comparator.equivalent(AllChildren[i], FileName)) {
+ if (Result.empty()) {
+ Result = AllChildren[i];
+ } else {
+ IsAmbiguous = true;
+ return StringRef();
+ }
+ }
+ }
+ return Result;
+ }
+
+private:
+ /// \brief Gets all paths under this FileMatchTrieNode.
+ void getAll(std::vector<StringRef> &Results,
+ llvm::StringMap<FileMatchTrieNode>::const_iterator Except) const {
+ if (Path.empty())
+ return;
+ if (Children.empty()) {
+ Results.push_back(StringRef(Path));
+ return;
+ }
+ for (llvm::StringMap<FileMatchTrieNode>::const_iterator
+ It = Children.begin(), E = Children.end();
+ It != E; ++It) {
+ if (It == Except)
+ continue;
+ It->getValue().getAll(Results, Children.end());
+ }
+ }
+
+ // The stored absolute path in this node. Only valid for leaf nodes, i.e.
+ // nodes where Children.empty().
+ std::string Path;
+
+ // The children of this node stored in a map based on the next path segment.
+ llvm::StringMap<FileMatchTrieNode> Children;
+};
+} // end namespace tooling
+} // end namespace clang
+
+FileMatchTrie::FileMatchTrie()
+ : Root(new FileMatchTrieNode), Comparator(new DefaultPathComparator()) {}
+
+FileMatchTrie::FileMatchTrie(PathComparator *Comparator)
+ : Root(new FileMatchTrieNode), Comparator(Comparator) {}
+
+FileMatchTrie::~FileMatchTrie() {
+ delete Root;
+}
+
+void FileMatchTrie::insert(StringRef NewPath) {
+ Root->insert(NewPath);
+}
+
+StringRef FileMatchTrie::findEquivalent(StringRef FileName,
+ raw_ostream &Error) const {
+ if (llvm::sys::path::is_relative(FileName)) {
+ Error << "Cannot resolve relative paths";
+ return StringRef();
+ }
+ bool IsAmbiguous = false;
+ StringRef Result = Root->findEquivalent(*Comparator, FileName, IsAmbiguous);
+ if (IsAmbiguous)
+ Error << "Path is ambiguous";
+ return Result;
+}
diff --git a/contrib/llvm/tools/clang/lib/Tooling/JSONCompilationDatabase.cpp b/contrib/llvm/tools/clang/lib/Tooling/JSONCompilationDatabase.cpp
new file mode 100644
index 0000000..299fbdc
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Tooling/JSONCompilationDatabase.cpp
@@ -0,0 +1,345 @@
+//===--- JSONCompilationDatabase.cpp - ------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the implementation of the JSONCompilationDatabase.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/JSONCompilationDatabase.h"
+#include "clang/Tooling/CompilationDatabase.h"
+#include "clang/Tooling/CompilationDatabasePluginRegistry.h"
+#include "clang/Tooling/Tooling.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/Path.h"
+#include <system_error>
+
+namespace clang {
+namespace tooling {
+
+namespace {
+
+/// \brief A parser for escaped strings of command line arguments.
+///
+/// Assumes \-escaping for quoted arguments (see the documentation of
+/// unescapeCommandLine(...)).
+class CommandLineArgumentParser {
+ public:
+ CommandLineArgumentParser(StringRef CommandLine)
+ : Input(CommandLine), Position(Input.begin()-1) {}
+
+ std::vector<std::string> parse() {
+ bool HasMoreInput = true;
+ while (HasMoreInput && nextNonWhitespace()) {
+ std::string Argument;
+ HasMoreInput = parseStringInto(Argument);
+ CommandLine.push_back(Argument);
+ }
+ return CommandLine;
+ }
+
+ private:
+ // All private methods return true if there is more input available.
+
+ bool parseStringInto(std::string &String) {
+ do {
+ if (*Position == '"') {
+ if (!parseDoubleQuotedStringInto(String)) return false;
+ } else if (*Position == '\'') {
+ if (!parseSingleQuotedStringInto(String)) return false;
+ } else {
+ if (!parseFreeStringInto(String)) return false;
+ }
+ } while (*Position != ' ');
+ return true;
+ }
+
+ bool parseDoubleQuotedStringInto(std::string &String) {
+ if (!next()) return false;
+ while (*Position != '"') {
+ if (!skipEscapeCharacter()) return false;
+ String.push_back(*Position);
+ if (!next()) return false;
+ }
+ return next();
+ }
+
+ bool parseSingleQuotedStringInto(std::string &String) {
+ if (!next()) return false;
+ while (*Position != '\'') {
+ String.push_back(*Position);
+ if (!next()) return false;
+ }
+ return next();
+ }
+
+ bool parseFreeStringInto(std::string &String) {
+ do {
+ if (!skipEscapeCharacter()) return false;
+ String.push_back(*Position);
+ if (!next()) return false;
+ } while (*Position != ' ' && *Position != '"' && *Position != '\'');
+ return true;
+ }
+
+ bool skipEscapeCharacter() {
+ if (*Position == '\\') {
+ return next();
+ }
+ return true;
+ }
+
+ bool nextNonWhitespace() {
+ do {
+ if (!next()) return false;
+ } while (*Position == ' ');
+ return true;
+ }
+
+ bool next() {
+ ++Position;
+ return Position != Input.end();
+ }
+
+ const StringRef Input;
+ StringRef::iterator Position;
+ std::vector<std::string> CommandLine;
+};
+
+std::vector<std::string> unescapeCommandLine(
+ StringRef EscapedCommandLine) {
+ CommandLineArgumentParser parser(EscapedCommandLine);
+ return parser.parse();
+}
+
+class JSONCompilationDatabasePlugin : public CompilationDatabasePlugin {
+ std::unique_ptr<CompilationDatabase>
+ loadFromDirectory(StringRef Directory, std::string &ErrorMessage) override {
+ SmallString<1024> JSONDatabasePath(Directory);
+ llvm::sys::path::append(JSONDatabasePath, "compile_commands.json");
+ std::unique_ptr<CompilationDatabase> Database(
+ JSONCompilationDatabase::loadFromFile(JSONDatabasePath, ErrorMessage));
+ if (!Database)
+ return nullptr;
+ return Database;
+ }
+};
+
+} // end namespace
+
+// Register the JSONCompilationDatabasePlugin with the
+// CompilationDatabasePluginRegistry using this statically initialized variable.
+static CompilationDatabasePluginRegistry::Add<JSONCompilationDatabasePlugin>
+X("json-compilation-database", "Reads JSON formatted compilation databases");
+
+// This anchor is used to force the linker to link in the generated object file
+// and thus register the JSONCompilationDatabasePlugin.
+volatile int JSONAnchorSource = 0;
+
+std::unique_ptr<JSONCompilationDatabase>
+JSONCompilationDatabase::loadFromFile(StringRef FilePath,
+ std::string &ErrorMessage) {
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> DatabaseBuffer =
+ llvm::MemoryBuffer::getFile(FilePath);
+ if (std::error_code Result = DatabaseBuffer.getError()) {
+ ErrorMessage = "Error while opening JSON database: " + Result.message();
+ return nullptr;
+ }
+ std::unique_ptr<JSONCompilationDatabase> Database(
+ new JSONCompilationDatabase(std::move(*DatabaseBuffer)));
+ if (!Database->parse(ErrorMessage))
+ return nullptr;
+ return Database;
+}
+
+std::unique_ptr<JSONCompilationDatabase>
+JSONCompilationDatabase::loadFromBuffer(StringRef DatabaseString,
+ std::string &ErrorMessage) {
+ std::unique_ptr<llvm::MemoryBuffer> DatabaseBuffer(
+ llvm::MemoryBuffer::getMemBuffer(DatabaseString));
+ std::unique_ptr<JSONCompilationDatabase> Database(
+ new JSONCompilationDatabase(std::move(DatabaseBuffer)));
+ if (!Database->parse(ErrorMessage))
+ return nullptr;
+ return Database;
+}
+
+std::vector<CompileCommand>
+JSONCompilationDatabase::getCompileCommands(StringRef FilePath) const {
+ SmallString<128> NativeFilePath;
+ llvm::sys::path::native(FilePath, NativeFilePath);
+
+ std::string Error;
+ llvm::raw_string_ostream ES(Error);
+ StringRef Match = MatchTrie.findEquivalent(NativeFilePath, ES);
+ if (Match.empty())
+ return std::vector<CompileCommand>();
+ llvm::StringMap< std::vector<CompileCommandRef> >::const_iterator
+ CommandsRefI = IndexByFile.find(Match);
+ if (CommandsRefI == IndexByFile.end())
+ return std::vector<CompileCommand>();
+ std::vector<CompileCommand> Commands;
+ getCommands(CommandsRefI->getValue(), Commands);
+ return Commands;
+}
+
+std::vector<std::string>
+JSONCompilationDatabase::getAllFiles() const {
+ std::vector<std::string> Result;
+
+ llvm::StringMap< std::vector<CompileCommandRef> >::const_iterator
+ CommandsRefI = IndexByFile.begin();
+ const llvm::StringMap< std::vector<CompileCommandRef> >::const_iterator
+ CommandsRefEnd = IndexByFile.end();
+ for (; CommandsRefI != CommandsRefEnd; ++CommandsRefI) {
+ Result.push_back(CommandsRefI->first().str());
+ }
+
+ return Result;
+}
+
+std::vector<CompileCommand>
+JSONCompilationDatabase::getAllCompileCommands() const {
+ std::vector<CompileCommand> Commands;
+ getCommands(AllCommands, Commands);
+ return Commands;
+}
+
+static std::vector<std::string>
+nodeToCommandLine(const std::vector<llvm::yaml::ScalarNode *> &Nodes) {
+ SmallString<1024> Storage;
+ if (Nodes.size() == 1) {
+ return unescapeCommandLine(Nodes[0]->getValue(Storage));
+ }
+ std::vector<std::string> Arguments;
+ for (auto *Node : Nodes) {
+ Arguments.push_back(Node->getValue(Storage));
+ }
+ return Arguments;
+}
+
+void JSONCompilationDatabase::getCommands(
+ ArrayRef<CompileCommandRef> CommandsRef,
+ std::vector<CompileCommand> &Commands) const {
+ for (int I = 0, E = CommandsRef.size(); I != E; ++I) {
+ SmallString<8> DirectoryStorage;
+ SmallString<32> FilenameStorage;
+ Commands.emplace_back(
+ std::get<0>(CommandsRef[I])->getValue(DirectoryStorage),
+ std::get<1>(CommandsRef[I])->getValue(FilenameStorage),
+ nodeToCommandLine(std::get<2>(CommandsRef[I])));
+ }
+}
+
+bool JSONCompilationDatabase::parse(std::string &ErrorMessage) {
+ llvm::yaml::document_iterator I = YAMLStream.begin();
+ if (I == YAMLStream.end()) {
+ ErrorMessage = "Error while parsing YAML.";
+ return false;
+ }
+ llvm::yaml::Node *Root = I->getRoot();
+ if (!Root) {
+ ErrorMessage = "Error while parsing YAML.";
+ return false;
+ }
+ llvm::yaml::SequenceNode *Array = dyn_cast<llvm::yaml::SequenceNode>(Root);
+ if (!Array) {
+ ErrorMessage = "Expected array.";
+ return false;
+ }
+ for (auto& NextObject : *Array) {
+ llvm::yaml::MappingNode *Object = dyn_cast<llvm::yaml::MappingNode>(&NextObject);
+ if (!Object) {
+ ErrorMessage = "Expected object.";
+ return false;
+ }
+ llvm::yaml::ScalarNode *Directory = nullptr;
+ llvm::Optional<std::vector<llvm::yaml::ScalarNode *>> Command;
+ llvm::yaml::ScalarNode *File = nullptr;
+ for (auto& NextKeyValue : *Object) {
+ llvm::yaml::ScalarNode *KeyString =
+ dyn_cast<llvm::yaml::ScalarNode>(NextKeyValue.getKey());
+ if (!KeyString) {
+ ErrorMessage = "Expected strings as key.";
+ return false;
+ }
+ SmallString<10> KeyStorage;
+ StringRef KeyValue = KeyString->getValue(KeyStorage);
+ llvm::yaml::Node *Value = NextKeyValue.getValue();
+ if (!Value) {
+ ErrorMessage = "Expected value.";
+ return false;
+ }
+ llvm::yaml::ScalarNode *ValueString =
+ dyn_cast<llvm::yaml::ScalarNode>(Value);
+ llvm::yaml::SequenceNode *SequenceString =
+ dyn_cast<llvm::yaml::SequenceNode>(Value);
+ if (KeyValue == "arguments" && !SequenceString) {
+ ErrorMessage = "Expected sequence as value.";
+ return false;
+ } else if (KeyValue != "arguments" && !ValueString) {
+ ErrorMessage = "Expected string as value.";
+ return false;
+ }
+ if (KeyValue == "directory") {
+ Directory = ValueString;
+ } else if (KeyValue == "arguments") {
+ Command = std::vector<llvm::yaml::ScalarNode *>();
+ for (auto &Argument : *SequenceString) {
+ auto Scalar = dyn_cast<llvm::yaml::ScalarNode>(&Argument);
+ if (!Scalar) {
+ ErrorMessage = "Only strings are allowed in 'arguments'.";
+ return false;
+ }
+ Command->push_back(Scalar);
+ }
+ } else if (KeyValue == "command") {
+ if (!Command)
+ Command = std::vector<llvm::yaml::ScalarNode *>(1, ValueString);
+ } else if (KeyValue == "file") {
+ File = ValueString;
+ } else {
+ ErrorMessage = ("Unknown key: \"" +
+ KeyString->getRawValue() + "\"").str();
+ return false;
+ }
+ }
+ if (!File) {
+ ErrorMessage = "Missing key: \"file\".";
+ return false;
+ }
+ if (!Command) {
+ ErrorMessage = "Missing key: \"command\" or \"arguments\".";
+ return false;
+ }
+ if (!Directory) {
+ ErrorMessage = "Missing key: \"directory\".";
+ return false;
+ }
+ SmallString<8> FileStorage;
+ StringRef FileName = File->getValue(FileStorage);
+ SmallString<128> NativeFilePath;
+ if (llvm::sys::path::is_relative(FileName)) {
+ SmallString<8> DirectoryStorage;
+ SmallString<128> AbsolutePath(
+ Directory->getValue(DirectoryStorage));
+ llvm::sys::path::append(AbsolutePath, FileName);
+ llvm::sys::path::native(AbsolutePath, NativeFilePath);
+ } else {
+ llvm::sys::path::native(FileName, NativeFilePath);
+ }
+ auto Cmd = CompileCommandRef(Directory, File, *Command);
+ IndexByFile[NativeFilePath].push_back(Cmd);
+ AllCommands.push_back(Cmd);
+ MatchTrie.insert(NativeFilePath);
+ }
+ return true;
+}
+
+} // end namespace tooling
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/Tooling/Refactoring.cpp b/contrib/llvm/tools/clang/lib/Tooling/Refactoring.cpp
new file mode 100644
index 0000000..d32452f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Tooling/Refactoring.cpp
@@ -0,0 +1,65 @@
+//===--- Refactoring.cpp - Framework for clang refactoring tools ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements tools to support refactorings.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Rewrite/Core/Rewriter.h"
+#include "clang/Tooling/Refactoring.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_os_ostream.h"
+
+namespace clang {
+namespace tooling {
+
+RefactoringTool::RefactoringTool(
+ const CompilationDatabase &Compilations, ArrayRef<std::string> SourcePaths,
+ std::shared_ptr<PCHContainerOperations> PCHContainerOps)
+ : ClangTool(Compilations, SourcePaths, PCHContainerOps) {}
+
+Replacements &RefactoringTool::getReplacements() { return Replace; }
+
+int RefactoringTool::runAndSave(FrontendActionFactory *ActionFactory) {
+ if (int Result = run(ActionFactory)) {
+ return Result;
+ }
+
+ LangOptions DefaultLangOptions;
+ IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts = new DiagnosticOptions();
+ TextDiagnosticPrinter DiagnosticPrinter(llvm::errs(), &*DiagOpts);
+ DiagnosticsEngine Diagnostics(
+ IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs()),
+ &*DiagOpts, &DiagnosticPrinter, false);
+ SourceManager Sources(Diagnostics, getFiles());
+ Rewriter Rewrite(Sources, DefaultLangOptions);
+
+ if (!applyAllReplacements(Rewrite)) {
+ llvm::errs() << "Skipped some replacements.\n";
+ }
+
+ return saveRewrittenFiles(Rewrite);
+}
+
+bool RefactoringTool::applyAllReplacements(Rewriter &Rewrite) {
+ return tooling::applyAllReplacements(Replace, Rewrite);
+}
+
+int RefactoringTool::saveRewrittenFiles(Rewriter &Rewrite) {
+ return Rewrite.overwriteChangedFiles() ? 1 : 0;
+}
+
+} // end namespace tooling
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/Tooling/RefactoringCallbacks.cpp b/contrib/llvm/tools/clang/lib/Tooling/RefactoringCallbacks.cpp
new file mode 100644
index 0000000..4de125e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Tooling/RefactoringCallbacks.cpp
@@ -0,0 +1,81 @@
+//===--- RefactoringCallbacks.cpp - Structural query framework ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Lex/Lexer.h"
+#include "clang/Tooling/RefactoringCallbacks.h"
+
+namespace clang {
+namespace tooling {
+
+RefactoringCallback::RefactoringCallback() {}
+tooling::Replacements &RefactoringCallback::getReplacements() {
+ return Replace;
+}
+
+static Replacement replaceStmtWithText(SourceManager &Sources,
+ const Stmt &From,
+ StringRef Text) {
+ return tooling::Replacement(Sources, CharSourceRange::getTokenRange(
+ From.getSourceRange()), Text);
+}
+static Replacement replaceStmtWithStmt(SourceManager &Sources,
+ const Stmt &From,
+ const Stmt &To) {
+ return replaceStmtWithText(Sources, From, Lexer::getSourceText(
+ CharSourceRange::getTokenRange(To.getSourceRange()),
+ Sources, LangOptions()));
+}
+
+ReplaceStmtWithText::ReplaceStmtWithText(StringRef FromId, StringRef ToText)
+ : FromId(FromId), ToText(ToText) {}
+
+void ReplaceStmtWithText::run(
+ const ast_matchers::MatchFinder::MatchResult &Result) {
+ if (const Stmt *FromMatch = Result.Nodes.getStmtAs<Stmt>(FromId)) {
+ Replace.insert(tooling::Replacement(
+ *Result.SourceManager,
+ CharSourceRange::getTokenRange(FromMatch->getSourceRange()),
+ ToText));
+ }
+}
+
+ReplaceStmtWithStmt::ReplaceStmtWithStmt(StringRef FromId, StringRef ToId)
+ : FromId(FromId), ToId(ToId) {}
+
+void ReplaceStmtWithStmt::run(
+ const ast_matchers::MatchFinder::MatchResult &Result) {
+ const Stmt *FromMatch = Result.Nodes.getStmtAs<Stmt>(FromId);
+ const Stmt *ToMatch = Result.Nodes.getStmtAs<Stmt>(ToId);
+ if (FromMatch && ToMatch)
+ Replace.insert(replaceStmtWithStmt(
+ *Result.SourceManager, *FromMatch, *ToMatch));
+}
+
+ReplaceIfStmtWithItsBody::ReplaceIfStmtWithItsBody(StringRef Id,
+ bool PickTrueBranch)
+ : Id(Id), PickTrueBranch(PickTrueBranch) {}
+
+void ReplaceIfStmtWithItsBody::run(
+ const ast_matchers::MatchFinder::MatchResult &Result) {
+ if (const IfStmt *Node = Result.Nodes.getStmtAs<IfStmt>(Id)) {
+ const Stmt *Body = PickTrueBranch ? Node->getThen() : Node->getElse();
+ if (Body) {
+ Replace.insert(replaceStmtWithStmt(*Result.SourceManager, *Node, *Body));
+ } else if (!PickTrueBranch) {
+ // If we want to use the 'else'-branch, but it doesn't exist, delete
+ // the whole 'if'.
+ Replace.insert(replaceStmtWithText(*Result.SourceManager, *Node, ""));
+ }
+ }
+}
+
+} // end namespace tooling
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/Tooling/Tooling.cpp b/contrib/llvm/tools/clang/lib/Tooling/Tooling.cpp
new file mode 100644
index 0000000..fd5596e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Tooling/Tooling.cpp
@@ -0,0 +1,507 @@
+//===--- Tooling.cpp - Running clang standalone tools ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements functions to run clang tools standalone instead
+// of running them as a plugin.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/Tooling.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/Tool.h"
+#include "clang/Driver/ToolChain.h"
+#include "clang/Frontend/ASTUnit.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "clang/Tooling/ArgumentsAdjusters.h"
+#include "clang/Tooling/CompilationDatabase.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/Option/Option.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define DEBUG_TYPE "clang-tooling"
+
+namespace clang {
+namespace tooling {
+
+ToolAction::~ToolAction() {}
+
+FrontendActionFactory::~FrontendActionFactory() {}
+
+// FIXME: This file contains structural duplication with other parts of the
+// code that sets up a compiler to run tools on it, and we should refactor
+// it to be based on the same framework.
+
+/// \brief Builds a clang driver initialized for running clang tools.
+static clang::driver::Driver *newDriver(
+ clang::DiagnosticsEngine *Diagnostics, const char *BinaryName,
+ IntrusiveRefCntPtr<vfs::FileSystem> VFS) {
+ clang::driver::Driver *CompilerDriver = new clang::driver::Driver(
+ BinaryName, llvm::sys::getDefaultTargetTriple(), *Diagnostics, VFS);
+ CompilerDriver->setTitle("clang_based_tool");
+ return CompilerDriver;
+}
+
+/// \brief Retrieves the clang CC1 specific flags out of the compilation's jobs.
+///
+/// Returns NULL on error.
+static const llvm::opt::ArgStringList *getCC1Arguments(
+ clang::DiagnosticsEngine *Diagnostics,
+ clang::driver::Compilation *Compilation) {
+ // We expect to get back exactly one Command job, if we didn't something
+ // failed. Extract that job from the Compilation.
+ const clang::driver::JobList &Jobs = Compilation->getJobs();
+ if (Jobs.size() != 1 || !isa<clang::driver::Command>(*Jobs.begin())) {
+ SmallString<256> error_msg;
+ llvm::raw_svector_ostream error_stream(error_msg);
+ Jobs.Print(error_stream, "; ", true);
+ Diagnostics->Report(clang::diag::err_fe_expected_compiler_job)
+ << error_stream.str();
+ return nullptr;
+ }
+
+ // The one job we find should be to invoke clang again.
+ const clang::driver::Command &Cmd =
+ cast<clang::driver::Command>(*Jobs.begin());
+ if (StringRef(Cmd.getCreator().getName()) != "clang") {
+ Diagnostics->Report(clang::diag::err_fe_expected_clang_command);
+ return nullptr;
+ }
+
+ return &Cmd.getArguments();
+}
+
+/// \brief Returns a clang build invocation initialized from the CC1 flags.
+clang::CompilerInvocation *newInvocation(
+ clang::DiagnosticsEngine *Diagnostics,
+ const llvm::opt::ArgStringList &CC1Args) {
+ assert(!CC1Args.empty() && "Must at least contain the program name!");
+ clang::CompilerInvocation *Invocation = new clang::CompilerInvocation;
+ clang::CompilerInvocation::CreateFromArgs(
+ *Invocation, CC1Args.data() + 1, CC1Args.data() + CC1Args.size(),
+ *Diagnostics);
+ Invocation->getFrontendOpts().DisableFree = false;
+ Invocation->getCodeGenOpts().DisableFree = false;
+ Invocation->getDependencyOutputOpts() = DependencyOutputOptions();
+ return Invocation;
+}
+
+bool runToolOnCode(clang::FrontendAction *ToolAction, const Twine &Code,
+ const Twine &FileName,
+ std::shared_ptr<PCHContainerOperations> PCHContainerOps) {
+ return runToolOnCodeWithArgs(ToolAction, Code, std::vector<std::string>(),
+ FileName, PCHContainerOps);
+}
+
+static std::vector<std::string>
+getSyntaxOnlyToolArgs(const std::vector<std::string> &ExtraArgs,
+ StringRef FileName) {
+ std::vector<std::string> Args;
+ Args.push_back("clang-tool");
+ Args.push_back("-fsyntax-only");
+ Args.insert(Args.end(), ExtraArgs.begin(), ExtraArgs.end());
+ Args.push_back(FileName.str());
+ return Args;
+}
+
+bool runToolOnCodeWithArgs(
+ clang::FrontendAction *ToolAction, const Twine &Code,
+ const std::vector<std::string> &Args, const Twine &FileName,
+ std::shared_ptr<PCHContainerOperations> PCHContainerOps,
+ const FileContentMappings &VirtualMappedFiles) {
+
+ SmallString<16> FileNameStorage;
+ StringRef FileNameRef = FileName.toNullTerminatedStringRef(FileNameStorage);
+ llvm::IntrusiveRefCntPtr<vfs::OverlayFileSystem> OverlayFileSystem(
+ new vfs::OverlayFileSystem(vfs::getRealFileSystem()));
+ llvm::IntrusiveRefCntPtr<vfs::InMemoryFileSystem> InMemoryFileSystem(
+ new vfs::InMemoryFileSystem);
+ OverlayFileSystem->pushOverlay(InMemoryFileSystem);
+ llvm::IntrusiveRefCntPtr<FileManager> Files(
+ new FileManager(FileSystemOptions(), OverlayFileSystem));
+ ToolInvocation Invocation(getSyntaxOnlyToolArgs(Args, FileNameRef),
+ ToolAction, Files.get(), PCHContainerOps);
+
+ SmallString<1024> CodeStorage;
+ InMemoryFileSystem->addFile(FileNameRef, 0,
+ llvm::MemoryBuffer::getMemBuffer(
+ Code.toNullTerminatedStringRef(CodeStorage)));
+
+ for (auto &FilenameWithContent : VirtualMappedFiles) {
+ InMemoryFileSystem->addFile(
+ FilenameWithContent.first, 0,
+ llvm::MemoryBuffer::getMemBuffer(FilenameWithContent.second));
+ }
+
+ return Invocation.run();
+}
+
+std::string getAbsolutePath(StringRef File) {
+ StringRef RelativePath(File);
+ // FIXME: Should '.\\' be accepted on Win32?
+ if (RelativePath.startswith("./")) {
+ RelativePath = RelativePath.substr(strlen("./"));
+ }
+
+ SmallString<1024> AbsolutePath = RelativePath;
+ std::error_code EC = llvm::sys::fs::make_absolute(AbsolutePath);
+ assert(!EC);
+ (void)EC;
+ llvm::sys::path::native(AbsolutePath);
+ return AbsolutePath.str();
+}
+
+void addTargetAndModeForProgramName(std::vector<std::string> &CommandLine,
+ StringRef InvokedAs) {
+ if (!CommandLine.empty() && !InvokedAs.empty()) {
+ bool AlreadyHasTarget = false;
+ bool AlreadyHasMode = false;
+ // Skip CommandLine[0].
+ for (auto Token = ++CommandLine.begin(); Token != CommandLine.end();
+ ++Token) {
+ StringRef TokenRef(*Token);
+ AlreadyHasTarget |=
+ (TokenRef == "-target" || TokenRef.startswith("-target="));
+ AlreadyHasMode |= (TokenRef == "--driver-mode" ||
+ TokenRef.startswith("--driver-mode="));
+ }
+ auto TargetMode =
+ clang::driver::ToolChain::getTargetAndModeFromProgramName(InvokedAs);
+ if (!AlreadyHasMode && !TargetMode.second.empty()) {
+ CommandLine.insert(++CommandLine.begin(), TargetMode.second);
+ }
+ if (!AlreadyHasTarget && !TargetMode.first.empty()) {
+ CommandLine.insert(++CommandLine.begin(), {"-target", TargetMode.first});
+ }
+ }
+}
+
+namespace {
+
+class SingleFrontendActionFactory : public FrontendActionFactory {
+ FrontendAction *Action;
+
+public:
+ SingleFrontendActionFactory(FrontendAction *Action) : Action(Action) {}
+
+ FrontendAction *create() override { return Action; }
+};
+
+}
+
+ToolInvocation::ToolInvocation(
+ std::vector<std::string> CommandLine, ToolAction *Action,
+ FileManager *Files, std::shared_ptr<PCHContainerOperations> PCHContainerOps)
+ : CommandLine(std::move(CommandLine)), Action(Action), OwnsAction(false),
+ Files(Files), PCHContainerOps(PCHContainerOps), DiagConsumer(nullptr) {}
+
+ToolInvocation::ToolInvocation(
+ std::vector<std::string> CommandLine, FrontendAction *FAction,
+ FileManager *Files, std::shared_ptr<PCHContainerOperations> PCHContainerOps)
+ : CommandLine(std::move(CommandLine)),
+ Action(new SingleFrontendActionFactory(FAction)), OwnsAction(true),
+ Files(Files), PCHContainerOps(PCHContainerOps), DiagConsumer(nullptr) {}
+
+ToolInvocation::~ToolInvocation() {
+ if (OwnsAction)
+ delete Action;
+}
+
+void ToolInvocation::mapVirtualFile(StringRef FilePath, StringRef Content) {
+ SmallString<1024> PathStorage;
+ llvm::sys::path::native(FilePath, PathStorage);
+ MappedFileContents[PathStorage] = Content;
+}
+
+bool ToolInvocation::run() {
+ std::vector<const char*> Argv;
+ for (const std::string &Str : CommandLine)
+ Argv.push_back(Str.c_str());
+ const char *const BinaryName = Argv[0];
+ IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts = new DiagnosticOptions();
+ TextDiagnosticPrinter DiagnosticPrinter(
+ llvm::errs(), &*DiagOpts);
+ DiagnosticsEngine Diagnostics(
+ IntrusiveRefCntPtr<clang::DiagnosticIDs>(new DiagnosticIDs()), &*DiagOpts,
+ DiagConsumer ? DiagConsumer : &DiagnosticPrinter, false);
+
+ const std::unique_ptr<clang::driver::Driver> Driver(
+ newDriver(&Diagnostics, BinaryName, Files->getVirtualFileSystem()));
+ // Since the input might only be virtual, don't check whether it exists.
+ Driver->setCheckInputsExist(false);
+ const std::unique_ptr<clang::driver::Compilation> Compilation(
+ Driver->BuildCompilation(llvm::makeArrayRef(Argv)));
+ const llvm::opt::ArgStringList *const CC1Args = getCC1Arguments(
+ &Diagnostics, Compilation.get());
+ if (!CC1Args) {
+ return false;
+ }
+ std::unique_ptr<clang::CompilerInvocation> Invocation(
+ newInvocation(&Diagnostics, *CC1Args));
+ // FIXME: remove this when all users have migrated!
+ for (const auto &It : MappedFileContents) {
+ // Inject the code as the given file name into the preprocessor options.
+ std::unique_ptr<llvm::MemoryBuffer> Input =
+ llvm::MemoryBuffer::getMemBuffer(It.getValue());
+ Invocation->getPreprocessorOpts().addRemappedFile(It.getKey(),
+ Input.release());
+ }
+ return runInvocation(BinaryName, Compilation.get(), Invocation.release(),
+ PCHContainerOps);
+}
+
+bool ToolInvocation::runInvocation(
+ const char *BinaryName, clang::driver::Compilation *Compilation,
+ clang::CompilerInvocation *Invocation,
+ std::shared_ptr<PCHContainerOperations> PCHContainerOps) {
+ // Show the invocation, with -v.
+ if (Invocation->getHeaderSearchOpts().Verbose) {
+ llvm::errs() << "clang Invocation:\n";
+ Compilation->getJobs().Print(llvm::errs(), "\n", true);
+ llvm::errs() << "\n";
+ }
+
+ return Action->runInvocation(Invocation, Files, PCHContainerOps,
+ DiagConsumer);
+}
+
+bool FrontendActionFactory::runInvocation(
+ CompilerInvocation *Invocation, FileManager *Files,
+ std::shared_ptr<PCHContainerOperations> PCHContainerOps,
+ DiagnosticConsumer *DiagConsumer) {
+ // Create a compiler instance to handle the actual work.
+ clang::CompilerInstance Compiler(PCHContainerOps);
+ Compiler.setInvocation(Invocation);
+ Compiler.setFileManager(Files);
+
+ // The FrontendAction can have lifetime requirements for Compiler or its
+ // members, and we need to ensure it's deleted earlier than Compiler. So we
+ // pass it to an std::unique_ptr declared after the Compiler variable.
+ std::unique_ptr<FrontendAction> ScopedToolAction(create());
+
+ // Create the compiler's actual diagnostics engine.
+ Compiler.createDiagnostics(DiagConsumer, /*ShouldOwnClient=*/false);
+ if (!Compiler.hasDiagnostics())
+ return false;
+
+ Compiler.createSourceManager(*Files);
+
+ const bool Success = Compiler.ExecuteAction(*ScopedToolAction);
+
+ Files->clearStatCaches();
+ return Success;
+}
+
+ClangTool::ClangTool(const CompilationDatabase &Compilations,
+ ArrayRef<std::string> SourcePaths,
+ std::shared_ptr<PCHContainerOperations> PCHContainerOps)
+ : Compilations(Compilations), SourcePaths(SourcePaths),
+ PCHContainerOps(PCHContainerOps),
+ OverlayFileSystem(new vfs::OverlayFileSystem(vfs::getRealFileSystem())),
+ InMemoryFileSystem(new vfs::InMemoryFileSystem),
+ Files(new FileManager(FileSystemOptions(), OverlayFileSystem)),
+ DiagConsumer(nullptr) {
+ OverlayFileSystem->pushOverlay(InMemoryFileSystem);
+ appendArgumentsAdjuster(getClangStripOutputAdjuster());
+ appendArgumentsAdjuster(getClangSyntaxOnlyAdjuster());
+}
+
+ClangTool::~ClangTool() {}
+
+void ClangTool::mapVirtualFile(StringRef FilePath, StringRef Content) {
+ MappedFileContents.push_back(std::make_pair(FilePath, Content));
+}
+
+void ClangTool::appendArgumentsAdjuster(ArgumentsAdjuster Adjuster) {
+ if (ArgsAdjuster)
+ ArgsAdjuster = combineAdjusters(ArgsAdjuster, Adjuster);
+ else
+ ArgsAdjuster = Adjuster;
+}
+
+void ClangTool::clearArgumentsAdjusters() {
+ ArgsAdjuster = nullptr;
+}
+
+int ClangTool::run(ToolAction *Action) {
+ // Exists solely for the purpose of lookup of the resource path.
+ // This just needs to be some symbol in the binary.
+ static int StaticSymbol;
+ // The driver detects the builtin header path based on the path of the
+ // executable.
+ // FIXME: On linux, GetMainExecutable is independent of the value of the
+ // first argument, thus allowing ClangTool and runToolOnCode to just
+ // pass in made-up names here. Make sure this works on other platforms.
+ std::string MainExecutable =
+ llvm::sys::fs::getMainExecutable("clang_tool", &StaticSymbol);
+
+ llvm::SmallString<128> InitialDirectory;
+ if (std::error_code EC = llvm::sys::fs::current_path(InitialDirectory))
+ llvm::report_fatal_error("Cannot detect current path: " +
+ Twine(EC.message()));
+
+ // First insert all absolute paths into the in-memory VFS. These are global
+ // for all compile commands.
+ if (SeenWorkingDirectories.insert("/").second)
+ for (const auto &MappedFile : MappedFileContents)
+ if (llvm::sys::path::is_absolute(MappedFile.first))
+ InMemoryFileSystem->addFile(
+ MappedFile.first, 0,
+ llvm::MemoryBuffer::getMemBuffer(MappedFile.second));
+
+ bool ProcessingFailed = false;
+ for (const auto &SourcePath : SourcePaths) {
+ std::string File(getAbsolutePath(SourcePath));
+
+ // Currently implementations of CompilationDatabase::getCompileCommands can
+ // change the state of the file system (e.g. prepare generated headers), so
+ // this method needs to run right before we invoke the tool, as the next
+ // file may require a different (incompatible) state of the file system.
+ //
+ // FIXME: Make the compilation database interface more explicit about the
+ // requirements to the order of invocation of its members.
+ std::vector<CompileCommand> CompileCommandsForFile =
+ Compilations.getCompileCommands(File);
+ if (CompileCommandsForFile.empty()) {
+ // FIXME: There are two use cases here: doing a fuzzy
+ // "find . -name '*.cc' |xargs tool" match, where as a user I don't care
+ // about the .cc files that were not found, and the use case where I
+ // specify all files I want to run over explicitly, where this should
+ // be an error. We'll want to add an option for this.
+ llvm::errs() << "Skipping " << File << ". Compile command not found.\n";
+ continue;
+ }
+ for (CompileCommand &CompileCommand : CompileCommandsForFile) {
+ // FIXME: chdir is thread hostile; on the other hand, creating the same
+ // behavior as chdir is complex: chdir resolves the path once, thus
+ // guaranteeing that all subsequent relative path operations work
+ // on the same path the original chdir resulted in. This makes a
+ // difference for example on network filesystems, where symlinks might be
+ // switched during runtime of the tool. Fixing this depends on having a
+ // file system abstraction that allows openat() style interactions.
+ if (OverlayFileSystem->setCurrentWorkingDirectory(
+ CompileCommand.Directory))
+ llvm::report_fatal_error("Cannot chdir into \"" +
+ Twine(CompileCommand.Directory) + "\n!");
+
+ // Now fill the in-memory VFS with the relative file mappings so it will
+ // have the correct relative paths. We never remove mappings but that
+ // should be fine.
+ if (SeenWorkingDirectories.insert(CompileCommand.Directory).second)
+ for (const auto &MappedFile : MappedFileContents)
+ if (!llvm::sys::path::is_absolute(MappedFile.first))
+ InMemoryFileSystem->addFile(
+ MappedFile.first, 0,
+ llvm::MemoryBuffer::getMemBuffer(MappedFile.second));
+
+ std::vector<std::string> CommandLine = CompileCommand.CommandLine;
+ if (ArgsAdjuster)
+ CommandLine = ArgsAdjuster(CommandLine, CompileCommand.Filename);
+ assert(!CommandLine.empty());
+ CommandLine[0] = MainExecutable;
+ // FIXME: We need a callback mechanism for the tool writer to output a
+ // customized message for each file.
+ DEBUG({ llvm::dbgs() << "Processing: " << File << ".\n"; });
+ ToolInvocation Invocation(std::move(CommandLine), Action, Files.get(),
+ PCHContainerOps);
+ Invocation.setDiagnosticConsumer(DiagConsumer);
+
+ if (!Invocation.run()) {
+ // FIXME: Diagnostics should be used instead.
+ llvm::errs() << "Error while processing " << File << ".\n";
+ ProcessingFailed = true;
+ }
+ // Return to the initial directory to correctly resolve next file by
+ // relative path.
+ if (OverlayFileSystem->setCurrentWorkingDirectory(InitialDirectory.c_str()))
+ llvm::report_fatal_error("Cannot chdir into \"" +
+ Twine(InitialDirectory) + "\n!");
+ }
+ }
+ return ProcessingFailed ? 1 : 0;
+}
+
+namespace {
+
+class ASTBuilderAction : public ToolAction {
+ std::vector<std::unique_ptr<ASTUnit>> &ASTs;
+
+public:
+ ASTBuilderAction(std::vector<std::unique_ptr<ASTUnit>> &ASTs) : ASTs(ASTs) {}
+
+ bool runInvocation(CompilerInvocation *Invocation, FileManager *Files,
+ std::shared_ptr<PCHContainerOperations> PCHContainerOps,
+ DiagnosticConsumer *DiagConsumer) override {
+ std::unique_ptr<ASTUnit> AST = ASTUnit::LoadFromCompilerInvocation(
+ Invocation, PCHContainerOps,
+ CompilerInstance::createDiagnostics(&Invocation->getDiagnosticOpts(),
+ DiagConsumer,
+ /*ShouldOwnClient=*/false),
+ Files);
+ if (!AST)
+ return false;
+
+ ASTs.push_back(std::move(AST));
+ return true;
+ }
+};
+
+}
+
+int ClangTool::buildASTs(std::vector<std::unique_ptr<ASTUnit>> &ASTs) {
+ ASTBuilderAction Action(ASTs);
+ return run(&Action);
+}
+
+std::unique_ptr<ASTUnit>
+buildASTFromCode(const Twine &Code, const Twine &FileName,
+ std::shared_ptr<PCHContainerOperations> PCHContainerOps) {
+ return buildASTFromCodeWithArgs(Code, std::vector<std::string>(), FileName,
+ PCHContainerOps);
+}
+
+std::unique_ptr<ASTUnit> buildASTFromCodeWithArgs(
+ const Twine &Code, const std::vector<std::string> &Args,
+ const Twine &FileName,
+ std::shared_ptr<PCHContainerOperations> PCHContainerOps) {
+ SmallString<16> FileNameStorage;
+ StringRef FileNameRef = FileName.toNullTerminatedStringRef(FileNameStorage);
+
+ std::vector<std::unique_ptr<ASTUnit>> ASTs;
+ ASTBuilderAction Action(ASTs);
+ llvm::IntrusiveRefCntPtr<vfs::OverlayFileSystem> OverlayFileSystem(
+ new vfs::OverlayFileSystem(vfs::getRealFileSystem()));
+ llvm::IntrusiveRefCntPtr<vfs::InMemoryFileSystem> InMemoryFileSystem(
+ new vfs::InMemoryFileSystem);
+ OverlayFileSystem->pushOverlay(InMemoryFileSystem);
+ llvm::IntrusiveRefCntPtr<FileManager> Files(
+ new FileManager(FileSystemOptions(), OverlayFileSystem));
+ ToolInvocation Invocation(getSyntaxOnlyToolArgs(Args, FileNameRef), &Action,
+ Files.get(), PCHContainerOps);
+
+ SmallString<1024> CodeStorage;
+ InMemoryFileSystem->addFile(FileNameRef, 0,
+ llvm::MemoryBuffer::getMemBuffer(
+ Code.toNullTerminatedStringRef(CodeStorage)));
+ if (!Invocation.run())
+ return nullptr;
+
+ assert(ASTs.size() == 1);
+ return std::move(ASTs[0]);
+}
+
+} // end namespace tooling
+} // end namespace clang
OpenPOWER on IntegriCloud